diff --git a/Downscaling.R b/Downscaling.R new file mode 100644 index 0000000000000000000000000000000000000000..adb5acae7c053d3c44661859c70de7768693482a --- /dev/null +++ b/Downscaling.R @@ -0,0 +1,193 @@ +######################################################################################### +# Downscaling with ERA-Interim # +######################################################################################### +# run it from the bash with: +# +# Rscript Downscaling.R 1984 +# +# being 1984 the year you want to classify the WTs; +# in this way you can run up to 8 jobs at the same time, each one producing its output files! +# You can also run it for a sequence of years with the syntax: +# +# Rscript Downscaling.R 1980 2014 +# +# and it will compute each year from 1980 to 2014, each one after finishing the previous one. +# If you want to run many years in parallel with just only 1 command, run from the bash: +# +# for y in {1980..2014}; do Rscript Downscaling.R &; done +# +# but it'd need 24 processors! In practice, it is possible to run only 4-8 years at time + + +library(s2dverification) # for the function Load() +source('/home/Earth/ncortesi/Downloads/scripts/Rfunctions.R') # for the calendar functions +Load.path <- '/home/Earth/ncortesi/Downloads/scripts/IC3.conf' # Load() file path + +workdir="/scratch/Earth/ncortesi/RESILIENCE/SD" # working dir where to put the output maps and files +subdatadir="/scratch/Earth/ncortesi/RESILIENCE/SD_subdata" # dir where to put the intermediate daily psl data (1 file for each subarray) + +mslp.rean='ERAintDailyHighRes' #'ERAintDailyLowRes' # daily reanalysis dataset used for MSLP data + +year.start=1980 # starting year of the MSLP daily data (from the 1st of january) +year.end=2014 # ending year of the MSLP daily data (up to the 31 of December) + + +########################################################################################## + +args <- commandArgs(TRUE) + +if(length(args) == 1) year.start <- year.end <- as.integer(args[1]) +if(length(args) == 2) {year.start <- as.integer(args[1]); year.end <- as.integer(args[2])} + +# Load 1 day of MSLP data from the reanalysis chosen, just to detect the number of latitude and longitude points: +MSLP <- Load('psl', NULL, mslp.rean, paste0(year.start,'0101'), storefreq = 'daily', leadtimemax = 1, output = 'lonlat', configfile = Load.path) + +n.lat <- length(MSLP$lat) # number of latitude values +n.lon <- length(MSLP$lon) # number of longitude values + +lon.pos <- ifelse(min(MSLP$lon>=0), TRUE, FALSE) # set lon.pos to TRUE if the longitude values of the reanalysis grid has no negative values, FALSE if not. + +# for each grid point of the reanalysis and each day to downscale, find the analogue days +# we exclude only central points > +77 degrees lat and < -77 deg. +n.lat.unused.poles <- 20 # number of unused latitude values near each of the two poles (must exclude last 10 degrees N/S, so it depends on the spatial res. of the reanalysis) + +lat.used <- MSLP$lat[-c(1:n.lat.unused.poles,(n.lat-n.lat.unused.poles):n.lat)] # latitude values used as central points +psl.res <- diff(lat.used)[1] # psl grid resolution +lat.used <- round(lat.used,3) # round psl values to the third decimal to save them in a file with a short file name (precision: ~100 m) +n.lat.used <- length(lat.used) + +lon.used <- round(MSLP$lon,3) # longitude values used as central points rounded to the third decimal (precision: ~100 m) +n.lon.used <- length(lon.used) + +n.grid.points <- length(lat.used)*length(lon.used) + +# load all MSLP data, one year at time: +for(y in year.start:year.end){ + print(paste0("Year: ",y)) + n.days <- n.days.in.a.year(y) + + MSLP <- Load('psl', NULL, mslp.rean, paste0(y,'0101'), storefreq = 'daily', leadtimemax = n.days, output = 'lonlat', configfile = Load.path) + gc() + + + + + + years.crossv <- c(year.start:year.end) + ss <- which(years.crossv==y1) + years.crossv <- years.crossv[-ss] # remove all the potential analogue days belonging to the year of the day to downscale + + for(y2 in years.crossv){ + n.days2 <- n.days.in.a.year(y2) + + MSLP2 <- Load('psl', NULL, mslp.rean, paste0(y2,'0101'), storefreq = 'daily', leadtimemax = n.days2, output = 'lonlat', configfile = Load.path) + gc() + + for(day1 in 1:n.days1){ + for(day2 in 1:n.days2){ + + (MSLP1$obs[1,1,1,day1,,] - MSLP2$obs[1,1,1,day2,,])^2 + (MSLP1$obs[1,1,1,day1,,] - MSLP2$obs[1,1,1,day2,,])^2 + + } + + + # create subdirs where to put the yearly output (there are too many files to put them in only 1 dir) + if(!dir.exists(file.path(workdir,paste0(y,"_Rdata")))) dir.create(file.path(workdir,paste0(y,"_Rdata"))) + #if(!dir.exists(file.path(workdir,paste0(y,"_txt")))) dir.create(file.path(workdir,paste0(y,"_txt"))) + + # loop over the lat/lon of the central point of the Lamb classification: + for(latc in lat.used){ + #latc <- lat.used[1]; lonc <- lon.used[1]; # for the debug + + for(lonc in lon.used){ + pos.latc <- which(lat.used==latc) + pos.lonc <- which(lon.used==lonc) + cat(paste0("Grid point saved: ",(pos.latc-1)*n.lon.used+pos.lonc ,"/", n.grid.points, " "), "\r") + + + # mean psl at each of the 9 grid points, for each day of the year: + psl <- array(NA,c(n.days,9)) + + for(p in 1:16) { + + pos.lon.low.res <- (pos.lon[p]-1):(pos.lon[p]+radiu1) + + if(head(pos.lon.low.res,1) <= 0){ + ss <- which(pos.lon.low.res <= 0) + pos.lon.low.res <- c(pos.lon.low.res[ss] + n.lon.used, pos.lon.low.res[-ss]) + } + if(tail(pos.lon.low.res,1) > n.lon.used){ + ss <- which(pos.lon.low.res > n.lon.used) + pos.lon.low.res <- c(pos.lon.low.res[-ss], pos.lon.low.res[ss]-n.lon.used) + } + + pos.lat.low.res <- (pos.lat[p]-1):(pos.lat[p]+1) # it never has to be corrected because we excluded the poles + + MSLP.low.res <- MSLP.year$obs[1,1,1,,pos.lat.low.res,pos.lon.low.res] + psl[,p] <- apply(MSLP.low.res, 1, mean) + } + + + # create a data frame with all the info about the WT classification: + + #years.period <- months.period <- days.period <- c() + #for(y in year.start:year.end) years.period <- c(years.period, rep(y, n.days.in.a.year(y))) + #for(y in year.start:year.end) months.period <- c(months.period, seq.months.in.a.year(y)) + #for(y in year.start:year.end) days.period <- c(days.period, seq.days.in.a.year(y)) + years.period <- rep(y, n.days) + months.period <- seq.months.in.a.year(y)[1:n.days] + days.period <- seq.days.in.a.year(y)[1:n.days] + WT<-data.frame(Year=years.period, Month=months.period, Day=days.period, Direction=round(D2,3), WT.dir=D2.dir, stringsAsFactors=FALSE) + + WT$Year<-as.numeric(WT$Year);WT$Month<-as.numeric(WT$Month);WT$Day<-as.numeric(WT$Day);WT$Direction<-as.numeric(WT$Direction);WT$WT.dir<-as.character(WT$WT.dir) + + # colonna che identifica quali giorni sono di tipo ibrido e quali di tipo direzionale: + WT$Hyb <- rep("Directional",length(D2)) + WT$Hyb[cyc.pure] <- "Pure_C" + WT$Hyb[anticyc.pure] <- "Pure_A" + WT$Hyb[hybrid.cyc] <- "Hybrid_C" + WT$Hyb[hybrid.anticyc] <- "Hybrid_A" + + # colonna con la lista dei 26 WTs officiali: + WT$WT <- WT$D2.dir + WT$WT[cyc.pure] <- "C" + WT$WT[anticyc.pure] <- "A" + WT$WT[hybrid.cyc] <- paste("C.",WT$WT.dir[hybrid.cyc],sep="") + WT$WT[hybrid.anticyc] <- paste("A.", WT$WT.dir[hybrid.anticyc],sep="") + #WT$WT[indeter] <- "U" # comment this line not to include the Unclassified U WT + + WT$WT.num <- match(WT$WT,WTs.type)# aggiunge una colonna con la conversione dei 26 tipi di tempo a numeri da 1 a 26 + + # colonna con la lista dei 10 WTs ottenuti unendo a C e ad A gli otto WT direzionali puri; + # i WTs ibridi si aggiungono ognuno al suo tipo direzionale corrispondente (es: C.SW si aggiunge a SW, ecc.) + WT$WT10 <- WT$WT.dir + WT$WT10[cyc.pure] <- "C" + WT$WT10[anticyc.pure] <- "A" + WT$WT10.num <- match(WT$WT10,WTs.type) # conversione dei 10 tipi di tempo a numeri da 1 a 9 + il tipo A che si converte nel numero 18 + + # add the other geostrophical indexes: + WT$SF=round(SF,5) + WT$WF=round(WF,5) + WT$F=round(F,5) + WT$ZS=round(ZS,5) + WT$ZW=round(ZW,5) + WT$Z=round(Z,5) + + #WTs<-rbind(WTs,WT) + + # save the full data table as a .txt to exchange it with other people: + write.table(WT,file=paste0(workdir,"/",y,"_txt/WTs_",mslp.rean,"_year_",y,"_lat_",latc,"_lon_",lonc,".txt"),row.names=FALSE, col.names=TRUE, quote=FALSE, sep=" ") + # save only the classification with 10 WTs, to have a smaller (binary) file: + #WTs<-cbind(WT$Year, WT$Month, WT$Day, WT$WT10.num) + WTs <- WT$WT10.num + save(WTs, file=paste0(workdir,"/",y,"_Rdata/10WTs_",mslp.rean,"_year_",y,"_lat_",latc,"_lon_",lonc,".RData"), compress=FALSE) + + rm(WT,SF,WF,F,D,ZS,ZW,W) + + } # close for on lonc + } # close for on latc + cat("\n") + +} # close for on y + + diff --git a/ReliabilityDiagramHist.R b/ReliabilityDiagramHist.R new file mode 100644 index 0000000000000000000000000000000000000000..7f05a095e3e75aa514242ef5d31c08ca0cb90a2b --- /dev/null +++ b/ReliabilityDiagramHist.R @@ -0,0 +1,223 @@ +###################################################################### +# # +# RELIABILITY DIAGRAM FOR A COLLECTION OF PROBABILITY FORECASTS # +### Veronica: Modified to include in the outputs the hist.counts +# # +###################################################################### + +ReliabilityDiagramHist<- + function(probs, obs, bins=10, nboot=500, + plot=FALSE, plot.refin=TRUE, mc.cores=1, + cons.probs=c(0.025, 0.975)) + { +print("RD") + # + # Plot reliability diagram for a probability forecast + # + # Usage: ReliabilityDiagram(probs, obs, nbins, nboot) + # + # Arguments: + # + # probs ... vector of length N, probs[k] has the predicted probability for + # the event obs[k] + # obs ... obs[k] = 1 if the event happened at instance k, obs[k] = 0 + # otherwise + # bins ... either scalar: number of equidistant bins to discretize the + # forecast probabilities, + # or a vector: user-defined breakpoints of the bins; the `hist` + # function will produce errors if these are not valid + # nboot ... number of bootstrap resamples for estimating consistency bars + # if nboot==0, no resampling is done and NAs are returned as + # consistency bars + # plot ... boolean; whether to plot the reliability diagram + # plot.refin ... boolean; whether to plot the small refinement histogram + # in lower right corner + # cons.probs ... a 2-vector, lower and upper confidence limit + # mc.cores ... number of cores for resampling (if > 1, library `multicore` + # is required) + # + # Return value: + # + # a data frame of K+1 rows with the following columns: + # + # * p.avgs ... in-bin averages of the forecast probabilities + # * cond.probs ... observed conditional frequency of event, given i + # * cbar.lo ... lower limit consistency of consistency bar[i], as specified by user + # * cbar.hi ... upper limit consistency of consistency bar[i], as specified by user + # + # Author: + # + # Stefan Siegert + # s.siegert@exeter.ac.uk + # December 2013 + # + # Example: + # + # N <- 1000 + # p <- rbeta(N, 1, 3) + # y <- rbinom(N, 1, p) + # rd <- rel.diag(p, y, plot=TRUE) + # print(rd) + # + # + # change log: + # + # 2013/12/02 + # * manual definition of bin-breaks + # * manual definition of consistency intervals + # * sanity checks + # * multicore option for resampling + # + # 2013/10/31: + # * return summary data as data frame + # * added options `plot` and `plot.refin` + # + # 2013/08/20: + # * points are plotted at in-bin-averages, not at bin centres + # * legend has been removed + # * consistency bars have been added, calculated by a resampling technique + # * see Broecker (2007) http://dx.doi.org/10.1175/WAF993.1 for details + # * the bars are pointwise 2.5% ... 97.5% intervals around the hypothesis of reliability + # * dependency on package "verification" was removed + # + # Author: Stefan Siegert + # + # based on previous version by Caio Coelho and the routine + # reliability.plot.default of the R-package `verification` + # + + + # sanity checks + if (class(probs) == "data.frame") { + probs <- c(as.matrix(probs)) + } + if (class(obs) == "data.frame") { + obs <- c(as.matrix(obs)) + } + stopifnot(length(probs) == length(obs)) + stopifnot(nboot >= 0, mc.cores >= 0) + stopifnot(all(probs >= 0), all(probs <= 1), all(obs %in% c(0,1))) + stopifnot(length(cons.probs) == 2, all(cons.probs >= 0), all(cons.probs <= 1)) + # optional use of multicore without warning message + warn <- getOption("warn") + options(warn=-1) + if(require(multicore, quietly=TRUE)) { + mclapply <- multicore::mclapply + } else { + mclapply <- function(..., mc.cores) lapply(...) + } + options(warn=warn) + + # some definitions and corrections + n <- length(obs) + mc.cores <- floor(mc.cores) + nboot <- floor(nboot) + cons.probs <- sort(cons.probs) + + + ############################################# + # reliability analysis + ############################################# + # estimate refinement function + if (length(bins) == 1) { + nbins <- floor(bins) + brx <- seq(0, 1, length.out=nbins+1) + + c(-.1, rep(0, nbins-1), .1) + } else { + nbins <- length(bins) - 1 + bins <- sort(bins) + stopifnot(min(bins)<= 0 & max(bins) >= 1) + brx <- bins + } + h <- hist(probs, breaks=brx, plot=FALSE)$counts +#print(h) +#print(sum(h)) +#print(probs) +#print(sum(probs)) + p <- sum(probs) +print(sum(probs)/sum(h)) + + # estimate calibration function + g <- hist(probs[obs==1], breaks=brx, plot=FALSE)$counts +#print(g) +#print(sum(g)) + obar.i <- g / h +#print(obar.i) + no_res <- sum(g)/sum(h) +print(no_res) + obar.i[ is.nan(obar.i) ] <- NA + + # calculate in-bin averages + p.bins <- as.numeric(cut(probs, breaks=brx, include.lowest=TRUE)) + p.avgs <- sapply(seq(nbins), + function(ii) mean(probs[p.bins == ii], na.rm=TRUE)) + p.avgs[ is.nan(p.avgs) ] <- NA + +# +#print(p.avgs) +# vertline <- sum(p.avgs,na.rm = TRUE)/bins +#print(vertline) + + ############################################# + # consistency resampling (broecker and smith 2007) + ############################################# + if (nboot) { + resamp.mat <- matrix(nrow=0, ncol=nbins) + # the resampling function + sample.rel.diag <- function(dummy=0) { + p.hat <- sample(x=probs, size=n, replace=TRUE) + x.hat <- rbinom(n=n, size=1, prob=p.hat) + hh <- hist(p.hat, breaks=brx, plot=FALSE)$counts + gg <- hist(p.hat[x.hat==1], breaks=brx, plot=FALSE)$counts + return(gg / hh) + } + # multicore? + if (mc.cores > 1) { + l <- mclapply(1:nboot, sample.rel.diag, mc.cores=mc.cores) + resamp.mat <- do.call(rbind, l) + } else { + l <- replicate(nboot, sample.rel.diag()) + resamp.mat <- t(l) + } + cons.bars <- apply(resamp.mat, 2, + function(z) quantile(z, cons.probs, na.rm=TRUE)) + } else { + cons.bars <- matrix(NA, ncol=nbins, nrow=2) + } + + + ############################################# + # plot the reliability diagram + ############################################# + if (plot) { + # reliability plot + old.par <- par(no.readonly = TRUE) + on.exit(par(old.par)) + plot(NULL, xlim = c(0,1), ylim = c(0,1), + xlab= "Forecast probability", + ylab="Observed relative frequency") + # consistency bars + for (i in 1:length(p.avgs)) { + lines(rep(p.avgs[i], 2), cons.bars[, i], col="#CCCCCC", lwd=6) + } + # reliability points and diagonal + points(p.avgs, obar.i, col = "black", pch = 1, lwd=2, type="b") + lines(c(0,1), c(0,1), lty=1) + if (plot.refin) { + # refinement histogram in lower corner + pp<- par("plt") + par("plt" = c(pp[2] - 0.2 , pp[2], pp[3], pp[3]+ 0.2) ) + par(new = TRUE) + barplot(h, axes = FALSE, axisnames = FALSE) + axis(4) + box() + } + } + + ############################################# + # return data + ############################################# + ret.df <- data.frame(p.avgs=p.avgs, cond.probs=obar.i, + cbar.lo=cons.bars[1,], cbar.hi=cons.bars[2,],hist.counts=h, obs.counts=g, for.prob=p) + return(ret.df) + } diff --git a/Rfunctions.R b/Rfunctions.R new file mode 100644 index 0000000000000000000000000000000000000000..a7c71af6c646f26eef0bdc855522f6bbddb20aac --- /dev/null +++ b/Rfunctions.R @@ -0,0 +1,4078 @@ +# Things you might want to change + +# options(papersize="a4") +# options(editor="notepad") +# options(pager="internal") + +# set the default help type: +# options(help_type="text") +options(help_type="html") + +# your list with all R packages you want to load in memory at the beginning of every R session: +my.packages.ic3<-c("sp","s2dverification","ncdf","ncdf4","statmod","maps","mapdata","parallel", + "doMC","RColorBrewer","MASS","maptools","scales","abind","reshape","reshape2","data.table","GEOmap") + +# "psych","plotrix" + +# GEOmap: for the dataset coastmap needed by PlotEquiDist + +############################ Script loaded automatically at startup ##################### + +.First<-function(){ + # load all my packages in memory at the beginning of the R session: + options(defaultPackages=c(getOption("defaultPackages"),my.packages.ic3)) + cat("\nLeidos todas las librerias opcionales\n") + print(paste("Current R session loaded from: ",commandArgs())) # print il nome del file .RData appena aperto + + print(paste("Working dir is: ",getwd())) + print(system("free -m")) + print(gc()) +} + + +################################### My R Aliases ############################################ +# alias for commonly used functions: + +s <- function() str(x) +l <- function(x) length(x) # <- to quickly get the length of a vector +qu <- function() quit("no") # <- to exit quickly from R typing only qu() instead of q("no") +clear <- function() rm(list=ls()) # to remove all objects in the R session +na <- function(x) length(which(is.na(x))) # find if there are NA in the object and how many thay are +nna <- function(x) length(which(!is.na(x))) # find if there are NOT NA in the object and how many they are +de <- function() dev.off() # to close a graphic window quickly +pl <-function() plot.new() # to open a void plot quickly +w <- function() windows() # <- to open a new windows (only works in Windows, in Linux the command in x11() ) + +################################### my Color Palettes ############################################# + +library("RColorBrewer") + +my.palette1<-c("lightblue","white",brewer.pal(9, "YlOrRd")) #rev(brewer.pal(11, "RdYlBu")) # it has only 11 colors! +my.palette2<-c("lightblue","white",brewer.pal(9, "YlOrRd")) #rev(brewer.pal(11, "RdYlBu")) +my.palette3<-brewer.pal(9, "Greens") # only 9 colors +my.palette4<-rev(brewer.pal(9, "BuPu")) +my.palette5<-brewer.pal(9, "Blues") +my.palette6<-brewer.pal(9, "Blues") +my.palette<-list(my.palette1,my.palette2,my.palette3,my.palette4,my.palette5,my.palette6) + +# Escala de colores para los mapas de tendencias +my.palette1.trend<-brewer.pal(9,"YlOrBr") +my.palette2.trend<-brewer.pal(9,"YlOrBr") +my.palette3.trend<-brewer.pal(9,"Greens") # Winkler's Index +my.palette4.trend<-brewer.pal(9,"YlOrBr") # rev(brewer.pal(11,"RdBu")) +my.palette5.trend<-rev(brewer.pal(11,"RdBu")) +my.palette6.trend<-rev(brewer.pal(11,"RdBu")) +my.palette.trend<-list(my.palette1.trend,my.palette2.trend,my.palette3.trend,my.palette4.trend,my.palette5.trend,my.palette6.trend) + +# others color palettes: +#my.palette7 <- colorRampPalette(as.character(read.csv("/home/Earth/vtorralb/rgbhex.csv",header=F)[,1]))(length(my.brks)-1) # blue--yellow-red colors + + +############################## My (short) Functions ####################################### + +# save in this function all the commands to download all the packages the first time you install R: +download.my.packages <- function(list.packages)for(p in 1:length(list.packages))install.packages(list.packages[p],repos="http://cran.univ-lyon1.fr") + +# Convert degrees to radians: +deg2rad<-function(deg) return(deg*pi/180) + +################################### My Functions ####################################### + +# function to show memory usage: +.ls.objects <- function (pos = 1, pattern, order.by, decreasing=FALSE, head=FALSE, n=5) { + napply <- function(names, fn) sapply(names, function(x) + fn(get(x, pos = pos))) + names <- ls(pos = pos, pattern = pattern) + obj.class <- napply(names, function(x) as.character(class(x))[1]) + obj.mode <- napply(names, mode) + obj.type <- ifelse(is.na(obj.class), obj.mode, obj.class) + obj.size <- napply(names, object.size) + obj.dim <- t(napply(names, function(x) + as.numeric(dim(x))[1:2])) + vec <- is.na(obj.dim)[, 1] & (obj.type != "function") + obj.dim[vec, 1] <- napply(names, length)[vec] + out <- data.frame(obj.type, obj.size, obj.dim) + names(out) <- c("Type", "Size", "Rows", "Columns") + if (!missing(order.by)) + out <- out[order(out[[order.by]], decreasing=decreasing), ] + if (head) + out <- head(out, n) + return(out) +} + +# shorthand +lsos <- function(..., n=10) { + .ls.objects(..., order.by="Size", decreasing=TRUE, head=TRUE, n=n) + +} + +# create function to return matrix of memory consumption: +object.sizes <- function() +{ + return(rev(sort(sapply(ls(envir=.GlobalEnv), function (object.name) + object.size(get(object.name)))))) +} + +# function to resume memory use on Linux: +mem<-function(){ + print(system("free -m")) + #print(gc()) + print(lsos()) +} + +################################################################################################# +# Calendar functions # +################################################################################################# + +# vector with the month names: +my.month <- c("January","February","March","April","May","June","July","August","September","October","November","December") +my.month.short <- c("Jan", "Feb", "Mar", "Apr", "May", "Jun", "Jul", "Aug", "Sep", "Oct", "Nov", "Dec") +my.month.short2 <- c("Ja", "Fe", "Ma", "Ap", "Ma", "Ju", "Ju", "Au", "Se", "Oc", "No", "De") +my.month.short3 <- my.month.very.short <- c("J","F","M","A","M","J","J","A","S","O","N","D") +my.season <- c("Winter", "Spring", "Summer", "Autumn") +my.period <- period.name <- c(my.month, my.season, "Yearly") + +endmonth<-function(day,month,year){ # day indica un giorno del mese di cui si vuole controllare se e' l'ultimo giorno del mese o meno: + last=FALSE + if(month==1 & day==31) last=TRUE; if(month==3 & day==31) last=TRUE; if(month==4 & day==30)last=TRUE; if(month==5 & day==31)last=TRUE + if(month==6 & day==30) last=TRUE; if(month==7 & day==31) last=TRUE; if(month==8 & day==31)last=TRUE; if(month==9 & day==30)last=TRUE + if(month==10 & day==31) last=TRUE; if(month==11 & day==30) last=TRUE; if(month==12 & day==31)last=TRUE + if(year%%400==0 | (year%%4==0 & year%%100!=0)){ # in questo caso l'anno e' bisestile) + if(month==2 & day==29) last=TRUE } else { # Anno NON bisestile: + if(month==2 & day==28) last=TRUE } + return(last) +} + +# simile alla precedente, restituisce qual'e'l'ultimo giorno del mese introdotto (ovvero il numero di giorni di quel mese) +lastday<-function(month,year){ + if(month==1 | month==3 | month==5 | month==7 | month==8 | month==10 | month==12) last=31 + if(month==4 | month==6 | month==9 | month==11) last=30 + if(month==2){ + if(year%%400==0 | (year%%4==0 & year%%100!=0)) {last=29} else {last=28} + } + #if(year==1 & month==2) last=28.25 # se si mette come anno il numero 1, restituisce il numero medio di giorni di febbraio quando si considerano tanti anni (28.25). Utile per calcolare le frequenze dei WTs del mese di febbraio. + return(last) +} + +# returns TRUE if the input year is a leap year: +leap.year <- function(year) return(ifelse((year%%4==0 & year%%100!=0) | year%%400==0, TRUE, FALSE)) + +# return the number of days of the input year: +n.days.in.a.year <- function(year) return(ifelse((year%%4==0 & year%%100!=0) | year%%400==0, 366, 365)) + +# return the number of days of the input month (1=Jan, 12=Dec): +# actually, it is only a wrapper of lastday(), that was kept for consistency +n.days.in.a.month <- function(month,year){ + return(lastday(month, year)) +} + +# like the previous function, but in case the input month is greater than 12, it enters into the next year: +ndm <- function(month,year){ + if(month < 13){ + return(lastday(month, year)) + } else { + return(lastday(month-12, year+1)) + } +} + +# return the number of days of the input season (1=Winter, 2=Spring, 3=Summer, 4=Autumn): +n.days.in.a.season <- function(season,year){ + return(length(pos.season(year,season))) +} + +# return the number of days of the input period (1-12: Jan-Dec, 13: winter, 14. spring, 15:summer, 16:autumn, 17: ear) +n.days.in.a.period <- function(period,year){ + if(period <= 12) return(n.days.in.a.month(period,year)) + if(period > 12 && period < 17) return(n.days.in.a.season(period-12,year)) + if(period == 17) return(n.days.in.a.year(year)) +} + +# get the total number of days from year.start to year.end: +n.days.in.a.yearly.period <- function(year.start, year.end){ + days.tot <- 0 + for(y in year.start:year.end){ + days.tot <- days.tot + n.days.in.a.year(y) + } + return(days.tot) +} + +# get the total number of days from month.start to month.end (included). +# if month.end is smaller than month.start, it considers month.end to be a month of the following year: +n.days.in.a.monthly.period <- function(month.start, month.end, year){ + days.tot <- 0 + if(month.start <= month.end){ + for(m in month.start:month.end) days.tot <- days.tot + n.days.in.a.month(m, year) + } else { + for(m in month.start:12) days.tot <- days.tot + n.days.in.a.month(m, year) + for(m in 1:month.end) days.tot <- days.tot + n.days.in.a.month(m, year+1) + } + return(days.tot) +} + +seq.months.in.a.year<-function(year){ # restituisce una sequenza di 365 o 366 numeri, il cui valore rappres.il numero del mese dell'anno associato a quel giorno + n.days.febr<-ifelse(n.days.in.a.year(year)==366,29,28) + return(c(rep(1,31),rep(2,n.days.febr),rep(3,31),rep(4,30),rep(5,31),rep(6,30),rep(7,31),rep(8,31),rep(9,30),rep(10,31),rep(11,30),rep(12,31))) +} + +seq.days.in.a.year<-function(year){ # restituisce una sequenza di 365 o 366 numeri, il cui valore rappres.il numero del giorno dell'anno associato + n.days.febr<-ifelse(n.days.in.a.year(year)==366,29,28) + return(c(1:31, 1:n.days.febr, 1:31, 1:30, 1:31, 1:30, 1:31, 1:31, 1:30, 1:31, 1:30, 1:31)) +} + + # interval of days belonging only to the year y, but starting to count from the year year.start: +seq.days.in.a.future.year <- function(year.start, y){ + return(n.days.in.a.yearly.period(year.start,y) - n.days.in.a.yearly.period(y,y) + 1:n.days.in.a.year(y)) +} + +# number of days from year.start to year y, excluding year y: +n.days.in.a.future.year <- function(year.start, y){ + return(n.days.in.a.yearly.period(year.start,y) - n.days.in.a.yearly.period(y,y)) +} + + +# vector with the number of the days of the input year belonging to the input month (1=January, 12=December): +pos.month <- function(year,month){ + if(month==1) return(1:31) + n.feb <- 28 + ifelse(leap.year(year)==TRUE,1,0) + + if(month == 2) return(31 + 1:n.feb) + if(month == 3) return(31 + n.feb + 1:31) + if(month == 4) return(31 + n.feb + 31 + 1:30) + if(month == 5) return(31 + n.feb + 31 + 30 + 1:31) + if(month == 6) return(31 + n.feb + 31 + 30 + 31 + 1:30) + if(month == 7) return(31 + n.feb + 31 + 30 + 31 + 30 + 1:31) + if(month == 8) return(31 + n.feb + 31 + 30 + 31 + 30 + 31 + 1:31) + if(month == 9) return(31 + n.feb + 31 + 30 + 31 + 30 + 31 + 31 + 1:30) + if(month == 10) return(31 + n.feb + 31 + 30 + 31 + 30 + 31 + 31 + 30 + 1:31) + if(month == 11) return(31 + n.feb + 31 + 30 + 31 + 30 + 31 + 31 + 30 + 31 + 1:30) + if(month == 12) return(31 + n.feb + 31 + 30 + 31 + 30 + 31 + 31 + 30 + 31 + 30 + 1:31) +} + +# as pos.month, but it returns a vector with the days of the imput year belonging to the months before the input month: +pos.months.before <- function(year,month){ + pos <- c() + if(month == 1) { + pos <- 0 + } else { + for(m in 1:(month-1)){ + pos <- c(pos, pos.month(year, m)) + } + } + + return(pos) +} + +# as pos.month, but also adds the days of the two closer months to the input month: +pos.month.extended <- function(year,month){ + if(month == 1) return(c(pos.month(year,12),pos.month(year,1),pos.month(year,2))) + if(month == 2) return(c(pos.month(year,1),pos.month(year,2),pos.month(year,3))) + if(month == 3) return(c(pos.month(year,2),pos.month(year,3),pos.month(year,4))) + if(month == 4) return(c(pos.month(year,3),pos.month(year,4),pos.month(year,5))) + if(month == 5) return(c(pos.month(year,4),pos.month(year,5),pos.month(year,6))) + if(month == 6) return(c(pos.month(year,5),pos.month(year,6),pos.month(year,7))) + if(month == 7) return(c(pos.month(year,6),pos.month(year,7),pos.month(year,8))) + if(month == 8) return(c(pos.month(year,7),pos.month(year,8),pos.month(year,9))) + if(month == 9) return(c(pos.month(year,8),pos.month(year,9),pos.month(year,10))) + if(month == 10) return(c(pos.month(year,9),pos.month(year,10),pos.month(year,11))) + if(month == 11) return(c(pos.month(year,10),pos.month(year,11),pos.month(year,12))) + if(month == 12) return(c(pos.month(year,11),pos.month(year,12),pos.month(year,1))) +} + +# as pos.month.extended, but only adds 15 days of the two closer months to the input month: +pos.month.extended15 <- function(year,month){ + if(month == 1) return(c(pos.month(year,12)[(l(pos.month(year,12))-14):l(pos.month(year,12))],pos.month(year,1),pos.month(year,2)[1:15])) + if(month == 2) return(c(pos.month(year,1)[(l(pos.month(year,1))-14):l(pos.month(year,1))],pos.month(year,2),pos.month(year,3)[1:15])) + if(month == 3) return(c(pos.month(year,2)[(l(pos.month(year,2))-14):l(pos.month(year,2))],pos.month(year,3),pos.month(year,4)[1:15])) + if(month == 4) return(c(pos.month(year,3)[(l(pos.month(year,3))-14):l(pos.month(year,3))],pos.month(year,4),pos.month(year,5)[1:15])) + if(month == 5) return(c(pos.month(year,4)[(l(pos.month(year,4))-14):l(pos.month(year,4))],pos.month(year,5),pos.month(year,6)[1:15])) + if(month == 6) return(c(pos.month(year,5)[(l(pos.month(year,5))-14):l(pos.month(year,5))],pos.month(year,6),pos.month(year,7)[1:15])) + if(month == 7) return(c(pos.month(year,6)[(l(pos.month(year,6))-14):l(pos.month(year,6))],pos.month(year,7),pos.month(year,8)[1:15])) + if(month == 8) return(c(pos.month(year,7)[(l(pos.month(year,7))-14):l(pos.month(year,7))],pos.month(year,8),pos.month(year,9)[1:15])) + if(month == 9) return(c(pos.month(year,8)[(l(pos.month(year,8))-14):l(pos.month(year,8))],pos.month(year,9),pos.month(year,10)[1:15])) + if(month == 10) return(c(pos.month(year,9)[(l(pos.month(year,9))-14):l(pos.month(year,9))],pos.month(year,10),pos.month(year,11)[1:15])) + if(month == 11) return(c(pos.month(year,10)[(l(pos.month(year,10))-14):l(pos.month(year,10))],pos.month(year,11),pos.month(year,12)[1:15])) + if(month == 12) return(c(pos.month(year,11)[(l(pos.month(year,11))-14):l(pos.month(year,11))],pos.month(year,12),pos.month(year,1)[1:15])) + +} + +# vector with the number of the days of the input year belonging to the input month (1=Winter, 4=Autumn): (Winter is Jan-Feb and Dec of the same year) +pos.season <- function(year,season){ + if(season==1) return(c(pos.month(year,1),pos.month(year,2),pos.month(year,12))) # winter + if(season==2) return(c(pos.month(year,3),pos.month(year,4),pos.month(year,5))) # spring + if(season==3) return(c(pos.month(year,6),pos.month(year,7),pos.month(year,8))) # summer + if(season==4) return(c(pos.month(year,9),pos.month(year,10),pos.month(year,11))) # autumn +} + +# same as pos.month, but for period > 12 returns the seasonal positions instead (13: winter, 14. spring, 15:summer, 16:autumn), or the yearly interval for period = 17 +pos.period <- function(year,period){ + if(period <= 12) return(pos.month(year, period)) + if(period > 12 && period < 17) return(pos.season(year, period-12)) + if(period == 17) return(1:n.days.in.a.year(year)) +} + + + +# sequence of weekly startdate for the chosen year and start day/month: +weekly.seq <- function(year,month,day){ + yr1 <- year # starting year of the weekly sequence + #yr2 <- year # in future you can create a sequence for more than one year + mes <- month # starting month (usually january) + #day<-2 # starting day + + if(mes<10) {mes0 <- paste0(0,mes)} else {mes0 <- mes} + if(day<10) {day0 <- paste0(0,day)} else {day0 <- day} + sdates <- paste0(yr1,mes0,day0) + nday <- day + ndaysFebruary <- lastday(2,yr1) + ndays4month <- c(31,ndaysFebruary,31,30,31,30,31,31,30,31,30,31) + + while (nday < 365-7) { # ojo a los bisiestos! + day <- day+7 + nday <- nday+7 + if(day > ndays4month[mes]){ + day <- day-ndays4month[mes] + mes=mes+1 + } + if(mes < 10){ mes0 <- paste0(0,mes)} else {mes0 <- mes} + if(day < 10){ day0 <- paste0(0,day)} else {day0 <- day} + sdates <- c(sdates,paste0(yr1,mes0,day0)) + } + return(sdates) +} + +# return the position inside the weekly.seq of all the startdates whose months belongs to the chosen period: +months.period <- function(year,mes,day,period){ + sdates.seq <- weekly.seq(year,mes,day) + months.period <- list() + + for(p in 1:12) months.period[[p]] <- which(as.numeric(substr(sdates.seq,5,6)) == p) + + months.period[[13]] <- c(months.period[[1]],months.period[[2]],months.period[[12]]) + months.period[[14]] <- c(months.period[[3]],months.period[[4]],months.period[[5]]) + months.period[[15]] <- c(months.period[[6]],months.period[[7]],months.period[[8]]) + months.period[[16]] <- c(months.period[[9]],months.period[[10]],months.period[[11]]) + + return(months.period[[period]]) +} + +################################################################################################# +# Graphic functions # +################################################################################################# + +# Like PlotEquiMap, but: +# - with the option to specify with 'contours.col' colours of the contour lines, +# - with the option to specify with 'cex.axis' the size of the lat/lon tick numbers, +# - with the option to specify with 'xlabel.dist' the distance of the x labels from the x axis +# - with the option to specify with 'contours.lty=' to use a different line type for negative contour values! +# - with the option 'contours.labels' not to draw the contour labels +# - with the option 'continents.col', the colors of the line of the continents, if filled.continents=FALSE (by default it is gray) +# +# PlotEquiMap(array(0,c(160,160)),1:160,-80:79) +# + +PlotEquiMap2<-function (var, lon, lat, toptitle = "", sizetit = 1, units = "", + brks = NULL, cols = NULL, square = TRUE, filled.continents = TRUE, + contours = NULL, brks2 = NULL, dots = NULL, axelab = TRUE, + labW = FALSE, intylat = 20, intxlon = 20, drawleg = TRUE, + subsampleg = 1, numbfig = 1, colNA = "white", contours.col = par("fg"), col_border = gray(0.5), + contours.lty = 1, contours.labels=TRUE, cex.lab = NULL, xlabel.dist = 1, continents.col = gray(0.5)) +{ + data(coastmap, envir = environment()) + dims <- dim(var) + if (length(dims) > 2) { + stop("Only 2 dimensions expected for var : (lon,lat) ") + } + if (dims[1] != length(lon) | dims[2] != length(lat)) { + if (dims[1] == length(lat) & dims[2] == length(lon)) { + var <- t(var) + dims <- dim(var) + } + else { + stop("Inconsistent var dimensions / longitudes + latitudes") + } + } + latb <- sort(lat, index.return = TRUE) + dlon <- lon[2:dims[1]] - lon[1:(dims[1] - 1)] + wher <- which(dlon > (mean(dlon) + 1)) + if (length(wher) > 0) { + lon[(wher + 1):dims[1]] <- lon[(wher + 1):dims[1]] - 360 + } + lonb <- sort(lon, index.return = TRUE) + latmin <- floor(min(lat)/10) * 10 + latmax <- ceiling(max(lat)/10) * 10 + lonmin <- floor(min(lon)/10) * 10 + lonmax <- ceiling(max(lon)/10) * 10 + colorbar <- colorRampPalette(c("dodgerblue4", "dodgerblue1", "forestgreen", "yellowgreen", "white", "white", "yellow", "orange", "red", "saddlebrown")) + if (is.null(brks) == TRUE) { + ll <- signif(min(var, na.rm = TRUE), 4) + ul <- signif(max(var, na.rm = TRUE), 4) + if (is.null(cols) == TRUE) { + cols <- colorbar(10) + } + nlev <- length(cols) + brks <- signif(seq(ll, ul, (ul - ll)/nlev), 4) + } + else { + if (is.null(cols) == TRUE) { + nlev <- length(brks) - 1 + cols <- colorbar(nlev) + } + else { + if (length(cols) != (length(brks) - 1)) { + stop("Inconsistent colour levels / list of colours") + } + } + } + if (is.null(brks2) == TRUE) { + if (is.null(contours)) { + if (square == FALSE) { + brks2 <- brks + contours <- var + } + } + else { + ll <- signif(min(contours, na.rm = TRUE), 2) + ul <- signif(max(contours, na.rm = TRUE), 2) + brks2 <- signif(seq(ll, ul, (ul - ll)/(length(brks) - + 1)), 2) + } + } + if (axelab == TRUE) { + ypos <- seq(latmin, latmax, intylat) + xpos <- seq(lonmin, lonmax, intxlon) + letters <- array("", length(ypos)) + letters[ypos < 0] <- "S" + letters[ypos > 0] <- "N" + ylabs <- paste(as.character(abs(ypos)), letters, sep = "") + letters <- array("", length(xpos)) + if (labW) { + nlon <- length(xpos) + xpos2 <- xpos + xpos2[xpos2 > 180] <- 360 - xpos2[xpos2 > 180] + } + letters[xpos < 0] <- "W" + letters[xpos > 0] <- "E" + if (labW) { + letters[xpos == 0] <- " " + letters[xpos == 180] <- " " + letters[xpos > 180] <- "W" + xlabs <- paste(as.character(abs(xpos2)), letters, sep = "") + } + else { + xlabs <- paste(as.character(abs(xpos)), letters, sep = "") + } + xmargin <- 1.2 - (numbfig^0.2) * 0.05 + ymargin <- 3 - (numbfig^0.2) + spaceticklab <- 1.3 - (numbfig^0.2) * 0.8 + topmargin <- 0.4 + ymargin2 <- 1.5 - (numbfig^0.2) * 0.9 + } + else { + xmargin <- 0.2 + ymargin <- 0.2 + switch(as.character(square), `FALSE` = 1.8, + 0) + topmargin <- 0.2 + spaceticklab <- 1 + ymargin2 <- 0.2 + } + + if (toptitle != "") topmargin <- 2.5 - (numbfig^0.2) * 0.6 + if (min(lon) < 0) { + continents <- "world" + } else { + continents <- "world2" + } + + if (square) { + if (drawleg) {layout(matrix(1:2, ncol = 1, nrow = 2), heights = c(5,1))} + + par(mar = c(xmargin, ymargin, topmargin, ymargin2), cex = 1.4,mgp = c(3, spaceticklab, 0), las = 0) + + if (colNA != "white") { + blanks <- array(0, dim = c(length(lonb$x), length(latb$x))) + image(lonb$x, latb$x, blanks, col = c(colNA), breaks = c(-1,1), main = toptitle, cex.main = (1.5/numbfig^(0.2))*sizetit, axes = FALSE, xlab = "", ylab = "") + flagadd <- TRUE + } + else {flagadd <- FALSE} + + image(lonb$x, latb$x, var[lonb$ix, latb$ix], col = cols, breaks = brks, main = toptitle, axes = FALSE, xlab = "", ylab = "", cex.main = (1.5/numbfig^(0.2)) * sizetit, add = flagadd) + + if (axelab == TRUE) { + if(is.null(cex.lab)) {my.cex <- 1/(numbfig^0.3)} else {my.cex <- cex.lab} + axis(2, at = ypos, labels = ylabs, cex.axis = my.cex, tck = -0.01) + axis(1, at = xpos, labels = xlabs, cex.axis = my.cex, tck = -0.01, mgp=c(3,xlabel.dist,0)) + } + + if (is.null(contours) == FALSE) { + if(contours.lty == 1){ + contour(lonb$x, latb$x, contours[lonb$ix, latb$ix], levels = brks2, method = "edge", add = TRUE, labcex = 1/numbfig, lwd = 0.5/(numbfig^0.5), lty = 1, col = contours.col, drawlabels=contours.labels) + } else { + contour(lonb$x, latb$x, contours[lonb$ix, latb$ix], levels = brks2[which(brks2 < 0)], method = "edge", add = TRUE, labcex = 1/numbfig, lwd = 0.5/(numbfig^0.5), lty = contours.lty, col = contours.col, drawlabels=contours.labels) + contour(lonb$x, latb$x, contours[lonb$ix, latb$ix], levels = brks2[which(brks2 == 0)], method = "edge", add = TRUE, labcex = 1/numbfig, lwd = 3, lty = 1, col = contours.col, drawlabels=contours.labels) + contour(lonb$x, latb$x, contours[lonb$ix, latb$ix], levels = brks2[which(brks2 > 0)], method = "edge", add = TRUE, labcex = 1/numbfig, lwd = 0.5/(numbfig^0.5), lty = 1, col = contours.col, drawlabels=contours.labels) + } + } + + map(continents, interior = FALSE, add = TRUE, lwd = 1, col=continents.col) + box() + } + else { + par(mar = c(xmargin + 5, ymargin + 1.5, topmargin, ymargin2), + cex.main = (1.6 * numbfig^(0.3)) * sizetit, cex.axis = 1.4, + cex.lab = 1.6, mgp = c(3, spaceticklab + 0.5, 0), + las = 0) + if (axelab == TRUE) { + filled.contour(lonb$x, latb$x, var[lonb$ix, latb$ix], + xlab = "", levels = brks, col = cols, ylab = "", + main = toptitle, key.axes = axis(4, brks[seq(1, + length(brks), subsampleg)], cex.axis = 1/(numbfig^0.3)), + plot.axes = { + axis(2, at = ypos, labels = ylabs, cex.axis = 1/(numbfig^0.3), + tck = -0.03) + axis(1, at = xpos, labels = xlabs, cex.axis = 1/(numbfig^0.3), + tck = -0.03) + contour(lonb$x, latb$x, contours[lonb$ix, latb$ix], + levels = brks2, method = "edge", add = TRUE, + labcex = 1, lwd = 2,col = contours.col) + map(continents, interior = FALSE, xlim = c(lonmin, + lonmax), ylim = c(latmin, latmax), add = TRUE, col=continents.col) + }, key.title = title(main = units, cex.main = (1.2 * + numbfig^(0.3)) * sizetit)) + } + else { + filled.contour(lonb$x, latb$x, var[lonb$ix, latb$ix], + xlab = "", levels = brks, col = cols, ylab = "", + main = toptitle, key.axes = axis(4, brks[seq(1, + length(brks), subsampleg)], cex.axis = 1/(numbfig^0.3)), + plot.axes = { + contour(lonb$x, latb$x, contours[lonb$ix, latb$ix], + levels = brks2, method = "edge", add = TRUE, + labcex = 1, lwd = 2, col = contours.col) + map(continents, interior = FALSE, xlim = c(lonmin, + lonmax), ylim = c(latmin, latmax), add = TRUE, col=continents.col) + }, key.title = title(main = units, cex.main = (1.2 * + numbfig^(0.3)) * sizetit)) + } + } + if (is.null(dots) == FALSE) { + for (ix in 1:length(lon)) { + for (jy in 1:length(lat)) { + if (is.na(var[ix, jy]) == FALSE) { + if (dots[ix, jy] == TRUE) { + text(x = lon[ix], y = lat[jy], ".", cex = 12/(sqrt(sqrt(length(var))) * numbfig^0.5)) + } + } + } + } + } + if (square == TRUE & filled.continents == TRUE) { + if (min(lon) >= 0) { + ylat <- latmin:latmax + xlon <- lonmin:lonmax + proj <- setPROJ(1, LON0 = mean(xlon), LAT0 = mean(ylat), + LATS = ylat, LONS = xlon) + coastmap$STROKES$col[which(coastmap$STROKES$col == "blue")] <- "white" + par(new = TRUE) + plotGEOmap(coastmap, PROJ = proj, border = "black", add = TRUE) + box() + } + else { + map(continents, interior = FALSE, wrap = TRUE, lwd = 0.7, col = gray(0.5), fill = TRUE, add = TRUE, border = col_border) + } + } + if (square & drawleg) { + par(mar = c(1.5, ymargin + 1.5, 2.5, ymargin2), mgp = c(1.5, 0.3, 0), las = 1, cex = 1.2) + image(1:length(cols), 1, t(t(1:length(cols))), axes = FALSE, col = cols, xlab = "", ylab = "", main = units, cex.main = 1.1) + box() + axis(1, at = seq(0.5, length(brks) - 0.5, subsampleg), labels = brks[seq(1, length(brks), subsampleg)]) + } +} + + +# Draw points over a PlotEquiMap: +map.points<-function (x, country = "", label = NULL, minpop = 0, + maxpop = Inf, capitals = 0, cex = par("cex"), projection = FALSE, + parameters = NULL, orientation = NULL, pch = 1, ...) +{ + + usr <- par("usr") + if (!missing(projection) && projection != FALSE) { + if (require(mapproj)) { + if (is.character(projection)) { + projx <- mapproject(x$long, x$lat, projection = projection, + parameters = parameters, orientation = orientation) + } + else { + if (nchar(.Last.projection()$projection) > 0) { + projx <- mapproject(x$long, x$lat) + } + else stop("No projection defined\n") + } + x$long <- projx$x + x$lat <- projx$y + } + else stop("mapproj package not available\n") + } + else { + if (usr[2] > (180 + 0.04 * (usr[2] - usr[1]))) + x$long[x$long < 0] <- 360 + x$long[x$long < 0] + } + selection <- x$long >= usr[1] & x$long <= usr[2] & x$lat >= + usr[3] & x$lat <= usr[4] & (x$pop >= minpop & x$pop <= + maxpop) & ((capitals == 0) | (x$capital >= 1)) + if (is.null(label)) + label <- sum(selection) < 20 + cxy <- par("cxy") + if (sum(selection01) > 0) + points(x$long[selection01], x$lat[selection01], pch = pch, + cex = cex * 0.6, ...) + if (sum(selection0) > 0) + if (label) + text(x$long[selection0], x$lat[selection0] + cxy[2] * + cex * 0.7, paste(" ", x$name[selection0], sep = ""), + cex = cex * 0.7, ...) + if (sum(selection1) > 0) { + points(x$long[selection1], x$lat[selection1], pch = pch, + cex = cex, ...) + text(x$long[selection1], x$lat[selection1] + cxy[2] * + cex, paste(" ", x$name[selection1], sep = ""), cex = cex * + 1.2, ...) + } + if (sum(selection2) > 0) { + points(x$long[selection2], x$lat[selection2], pch = pch, + cex = cex, ...) + text(x$long[selection2], x$lat[selection2] + cxy[2] * + cex * 1.1, paste(" ", x$name[selection2], sep = ""), + cex = cex * 1.1, ...) + } + if (sum(selection3) > 0) { + points(x$long[selection3], x$lat[selection3], pch = pch, + cex = cex, ...) + text(x$long[selection3], x$lat[selection3] + cxy[2] * + cex * 0.9, paste(" ", x$name[selection3], sep = ""), + cex = cex * 0.9, ...) + } + invisible() +} + +# Taylor diagram (modified from function taylor.diagram of package Plotrix to have the same colors of Nube's taylor diagram) +# you can also specify a text label for each point and can put the color of the point proportional to its bias +# (still missing: bias legend) +# gamma si riferisce alle curve del RMSE! +my.taylor<-function (ref, model, add = FALSE, col = "red", pch = 19, pos.cor = TRUE, + xlab = "", ylab = "", main = "Taylor Diagram", show.gamma = TRUE, + ngamma = 3, gamma.col = "darkgreen", sd.arcs = 0, ref.sd = FALSE, sd.method = "sample", + grad.corr.lines = c(0.2, 0.4, 0.6, 0.8, 0.9, 0.95, 0.99), pcex = 1, cex.axis = 1, + normalize = FALSE, mar = c(5, 4, 6, 6), BIAS = FALSE, my.text = NULL, text.cex = pcex, RMSE.label = FALSE, ...) +{ + grad.corr.full <- c(0, 0.2, 0.4, 0.6, 0.8, 0.9, 0.95, 0.99, 1) + R <- cor(ref, model, use = "na.or.complete") + + if(BIAS==TRUE){ + BIAS <- mean(model-ref,na.rm=TRUE) + my.equidist<-c(-75,-35,-15,-5,5,15,35,75) + my.colors<-c("magenta4","blue4","steelblue","skyblue2","orange","orangered","red","red4") + #my.colorscale<-rev(my.colors(8)) + my.col<-my.colors[which.min(abs(my.equidist-BIAS))] + #my.labels<-c("0-30%","41-50","41-50","51-70%","61-70%","71-80%","81-90%","91-100%") + #my.cuts<-c(-100,-50,-20,-10,0,10,20,50,100) + } + + if (is.list(ref)) + ref <- unlist(ref) + if (is.list(model)) + ref <- unlist(model) + SD <- function(x, subn) { + meanx <- mean(x, na.rm = TRUE) + devx <- x - meanx + ssd <- sqrt(sum(devx * devx, na.rm = TRUE)/(length(x[!is.na(x)]) - + subn)) + return(ssd) + } + subn <- sd.method != "sample" + sd.r <- SD(ref, subn) + sd.f <- SD(model, subn) + if (normalize) { + sd.f <- sd.f/sd.r + sd.r <- 1 + } + maxsd <- 1.5 * max(sd.f, sd.r) + oldpar <- par("mar", "xpd", "xaxs", "yaxs") + if (!add) { + # plot for positive correlations only: + if (pos.cor) { + if (nchar(ylab) == 0) + ylab = "Standard deviation" + par(mar = mar) + plot(0, xlim = c(0, maxsd), ylim = c(0, maxsd), xaxs = "i", + yaxs = "i", axes = FALSE, main = main, xlab = xlab, + ylab = ylab, type = "n", cex = cex.axis, ...) + if (grad.corr.lines[1]) { + for (gcl in grad.corr.lines) lines(c(0, maxsd * + gcl), c(0, maxsd * sqrt(1 - gcl^2)), lty = 3,col="blue") + } + segments(c(0, 0), c(0, 0), c(0, maxsd), c(maxsd, + 0),col="blue") + axis.ticks <- pretty(c(0, maxsd),n=6) + axis.ticks <- axis.ticks[axis.ticks <= maxsd] + axis(1, at = axis.ticks, cex.axis = cex.axis) + axis(2, at = axis.ticks, cex.axis = cex.axis) + if (sd.arcs[1]) { + if (length(sd.arcs) == 1) + sd.arcs <- axis.ticks + for (sdarc in sd.arcs) { + xcurve <- cos(seq(0, pi/2, by = 0.03)) * sdarc + ycurve <- sin(seq(0, pi/2, by = 0.03)) * sdarc + lines(xcurve, ycurve, col = "black", lty = 3) + } + } # if there is more than one curve for the st.dev: + if (show.gamma[1]) { + if (length(show.gamma) > 1) + gamma <- show.gamma + else gamma <- pretty(c(0, maxsd), n = ngamma)[-1] # [-1] for removing the first value of 0.0 + if (gamma[length(gamma)] > maxsd) + gamma <- gamma[-length(gamma)] + labelpos <- seq(45, 70, length.out = length(gamma)) + for (gindex in 1:length(gamma)) { + xcurve <- cos(seq(0, pi, by = 0.03)) * gamma[gindex] + + sd.r + endcurve <- which(xcurve < 0) + endcurve <- ifelse(length(endcurve), min(endcurve) - + 1, 105) + ycurve <- sin(seq(0, pi, by = 0.03)) * gamma[gindex] + maxcurve <- xcurve * xcurve + ycurve * ycurve + startcurve <- which(maxcurve > maxsd * maxsd) + startcurve <- ifelse(length(startcurve), max(startcurve) + + 1, 0) + lines(xcurve[startcurve:endcurve], ycurve[startcurve:endcurve], + col = gamma.col) + if (xcurve[labelpos[gindex]] > 0) + boxed.labels(xcurve[labelpos[gindex]], ycurve[labelpos[gindex]], + gamma[gindex], border = FALSE,cex=1, col=gamma.col) + } + } + xcurve <- cos(seq(0, pi/2, by = 0.01)) * maxsd + ycurve <- sin(seq(0, pi/2, by = 0.01)) * maxsd + lines(xcurve, ycurve) # external semicircle + bigtickangles <- acos(seq(0.1, 0.9, by = 0.1)) + medtickangles <- acos(seq(0.05, 0.95, by = 0.1)) + smltickangles <- acos(seq(0.91, 0.99, by = 0.01)) + segments(cos(bigtickangles) * maxsd, sin(bigtickangles) * + maxsd, cos(bigtickangles) * 0.97 * maxsd, sin(bigtickangles) * + 0.97 * maxsd) # external mayor ticks + par(xpd = TRUE) + if (ref.sd) { + xcurve <- cos(seq(0, pi/2, by = 0.01)) * sd.r + ycurve <- sin(seq(0, pi/2, by = 0.01)) * sd.r + lines(xcurve, ycurve) + } + points(sd.r, 0, cex = pcex) + text(cos(c(bigtickangles, acos(c(0.95, 0.99)))) * + 1.05 * maxsd, sin(c(bigtickangles, acos(c(0.95, + 0.99)))) * 1.05 * maxsd, c(seq(0.1, 0.9, by = 0.1), + 0.95, 0.99), col="blue") # correlation numbers + text(maxsd * 0.8, maxsd * 0.8, "Correlation", srt = 315, col="blue") + segments(cos(medtickangles) * maxsd, sin(medtickangles) * + maxsd, cos(medtickangles) * 0.98 * maxsd, sin(medtickangles) * + 0.98 * maxsd) + segments(cos(smltickangles) * maxsd, sin(smltickangles) * + maxsd, cos(smltickangles) * 0.99 * maxsd, sin(smltickangles) * + 0.99 * maxsd) + } + else { # plot in case correlations can be negative or positive (pos.cor=FALSE): + x <- ref + y <- model + R <- cor(x, y, use = "pairwise.complete.obs") + E <- mean(x, na.rm = TRUE) - mean(y, na.rm = TRUE) + xprime <- x - mean(x, na.rm = TRUE) + yprime <- y - mean(y, na.rm = TRUE) + sumofsquares <- (xprime - yprime)^2 + Eprime <- sqrt(sum(sumofsquares)/length(complete.cases(x))) + E2 <- E^2 + Eprime^2 + if (add == FALSE) { + maxray <- 1.5 * max(sd.f, sd.r) + plot(c(-maxray, maxray), c(0, maxray), type = "n", + asp = 1, bty = "n", xaxt = "n", yaxt = "n", + xlab = xlab, ylab = ylab, main = main, cex = cex.axis) + discrete <- seq(180, 0, by = -1) + listepoints <- NULL + for (i in discrete) { + listepoints <- cbind(listepoints, maxray * + cos(i * pi/180), maxray * sin(i * pi/180)) + } + listepoints <- matrix(listepoints, 2, length(listepoints)/2) + listepoints <- t(listepoints) + lines(listepoints[, 1], listepoints[, 2]) + lines(c(-maxray, maxray), c(0, 0)) + lines(c(0, 0), c(0, maxray)) + for (i in grad.corr.lines) { + lines(c(0, maxray * i), c(0, maxray * sqrt(1 - + i^2)), lty = 3, col="blue") + lines(c(0, -maxray * i), c(0, maxray * sqrt(1 - + i^2)), lty = 3, col="blue") + } + for (i in grad.corr.full) { + text(1.05 * maxray * i, 1.05 * maxray * sqrt(1 - + i^2), i, cex = cex.axis) + text(-1.05 * maxray * i, 1.05 * maxray * sqrt(1 - + i^2), -i, cex = cex.axis) + } + seq.sd <- seq.int(0, 2 * maxray, by = (maxray/10))[-1] + for (i in seq.sd) { + xcircle <- sd.r + (cos(discrete * pi/180) * + i) + ycircle <- sin(discrete * pi/180) * i + for (j in 1:length(xcircle)) { + if ((xcircle[j]^2 + ycircle[j]^2) < (maxray^2)) { + points(xcircle[j], ycircle[j], col = "darkgreen", + pch = ".") + if (j == 10) + text(xcircle[j], ycircle[j], signif(i, + 2), cex = cex.axis, col = "darkgreen") + } + } + } + seq.sd <- seq.int(0, maxray, length.out = 5) + for (i in seq.sd) { + xcircle <- (cos(discrete * pi/180) * i) + ycircle <- sin(discrete * pi/180) * i + if (i) + lines(xcircle, ycircle, lty = 3, col = "blue") + text(min(xcircle), -0.03 * maxray, signif(i, + 2), cex = cex.axis, col = "blue") + text(max(xcircle), -0.03 * maxray, signif(i, + 2), cex = cex.axis, col = "blue") + } + text(0, -0.08 * maxray, "Standard Deviation", + cex = cex.axis, col = "blue") + text(0, -0.12 * maxray, "Centered RMSE", + cex = cex.axis, col = "darkgreen") + points(sd.r, 0, pch = 22, bg = "darkgreen", cex = 1.1) + text(0, 1.1 * maxray, "Correlation Coefficient", + cex = cex.axis) + } + S <- (2 * (1 + R))/(sd.f + (1/sd.f))^2 + } # close if on 'pos.cor' + } + + if(BIAS==TRUE){ + points(sd.f * R, sd.f * sin(acos(R)), pch = pch, col = my.col, cex = pcex) + } else { + points(sd.f * R, sd.f * sin(acos(R)), pch = pch, col = col, cex = pcex) + } + + # Label line; You can change the pos argument to your liking: + if(length(text)>0) text(sd.f * R, sd.f * sin(acos(R)), labels=my.text, cex = text.cex, pos=3) + if(RMSE.label==TRUE) text(0.81, 0.14, "RMSE", srt = 45, cex=1, col=gamma.col) + + text(1, 0.04, "ERA-Interim", srt = 0, cex=1, col="darkgray") + + invisible(oldpar) +} + + +north.arrow <- function(loc,size,bearing=0,cols,cex=1,...) { + # checking arguments + if(missing(loc)) stop("loc is missing") + if(missing(size)) stop("size is missing") + # default colors are white and black + if(missing(cols)) cols <- rep(c("white","black"),8) + # calculating coordinates of polygons + radii <- rep(size/c(1,4,2,4),4) + x <- radii[(0:15)+1]*cos((0:15)*pi/8+bearing)+loc[1] + y <- radii[(0:15)+1]*sin((0:15)*pi/8+bearing)+loc[2] + # drawing polygons + for (i in 1:15) { + x1 <- c(x[i],x[i+1],loc[1]) + y1 <- c(y[i],y[i+1],loc[2]) + polygon(x1,y1,col=cols[i]) + } + # drawing the last polygon + polygon(c(x[16],x[1],loc[1]),c(y[16],y[1],loc[2]),col=cols[16]) + # drawing letters + b <- c("E","N","W","S") + for (i in 0:3) text((size+par("cxy")[1])*cos(bearing+i*pi/2)+loc[1], + (size+par("cxy")[2])*sin(bearing+i*pi/2)+loc[2],b[i+1], + cex=cex) +} + +# funzione che prende un array e restituisce lo stesso array abbassando pero'tutti gli elementi piu'alti di val.max al valore val.max (utile per aggiustare le leggende dei grafici) +rescale.max <- function(my.array,val.max){ + ss <- which(my.array > val.max) + my.array[ss] <- val.max - 0.000000001 + return(my.array) +} + +rescale.min <- function(my.array,val.min){ # come rescale.max ma per i valori piu'piccoli di val.min che vengono cambiati a val.min + ss <- which(my.array < val.min) + my.array[ss] <- val.min + 0.0000000001 + return(my.array) +} + +rescale <- function(my.array,val.min,val.max){ # unisce rescale.max con rescale.min + ss <- which(my.array > val.max) + my.array[ss] <- val.max - 0.0000000001 + ss <- which(my.array < val.min) + my.array[ss] <- val.min + 0.0000000001 # the 0.0000000001 is just to be able to draw a color with PlotEquiMap (otherwise draw the color for NA) + return(my.array) +} + +ColorBarV <- function(brks, cols = NULL, vert = TRUE, subsampleg = 1, + cex = 1, marg=NULL) { + # Creates a horizontal or vertical colorbar to introduce in multipanels. + # + # Args: + # brks: Levels. + # cols: List of colours, optional. + # vert: TRUE/FALSE for vertical/horizontal colorbar. + # kharin: Supsampling factor of the interval between ticks on colorbar. + # Default: 1 = every level + # cex: Multiplicative factor to increase the ticks size, optional. + # marg: margins + # + # Returns: + # This function returns nothing + # + # History: + # 1.0 # 2012-04 (V. Guemas, vguemas@ic3.cat) # Original code + # 1.1 # 2014-11 (C. Prodhomme, chloe.prodhomme@ic3.cat) + # add cex option + # + # + # Input arguments + # ~~~~~~~~~~~~~~~~~ + # + if (is.null(cols) == TRUE) { + nlev <- length(brks) - 1 + cols <- rainbow(nlev) + } else { + if (length(cols) != (length(brks) - 1)) { + stop("Inconsistent colour levels / list of colours") + } + } + + # + # Plotting colorbar + # ~~~~~~~~~~~~~~~~~~~ + # + if (vert) { + if (is.null(marg)== FALSE){ + par(mar = marg, mgp = c(1, 1, 0), las = 1, cex = 1.2) + }else{ + par(mar = c(1, 1, 1, 1.5 *( 1 + cex)), mgp = c(1, 1, 0), las = 1, cex = 1.2) + } + image(1, c(1:length(cols)), t(c(1:length(cols))), axes = FALSE, col = cols, + xlab = '', ylab = '') + box() + axis(4, at = seq(0.5, length(brks) - 0.5, subsampleg), tick = TRUE, + labels = brks[seq(1, length(brks), subsampleg)], cex.axis = cex) + } else { + if (marg){ + par(mar = marg, mgp = c(1, 1, 0), las = 1, cex = 1.2) + }else{ + par(mar = c(0.5 + cex, 1, 1, 1), mgp = c(1.5, max(c(0.3,0.8*(cex-0.625))), 0), + las = 1, cex = 1.2) + } + + image(1:length(cols), 1, t(t(1:length(cols))), axes = FALSE, col = cols, + xlab = '', ylab = '') + box() + axis(1, at = seq(0.5, length(brks) - 0.5, subsampleg), + labels = brks[seq(1, length(brks), subsampleg)], cex.axis = cex) + } +} + +# Creation: 6/2016 +# Author: Nicola Cortesi +# Aim: Remove the grid points above a certain value (argument 'level') [and below '-level' if two.sides=TRUE] that happens to be in areas with few points above that value. +# Useful to remove from a contour plot all the small spots of significative points that we don't want to contour. +# To do so, just apply this function inside the option 'contour' of 'PlotEquiMap' to remove the significative points (they are set to the value of 0). +# Argument 'size' determines the side of the square (in grid points) used to find if there are enough grid points with values above 'level' nearby +# the chosen point or not. Increasing it will incresase the number of grid points deleted, leaving only the bigger spots of points above the chosen value. +# I/O: a 2D lat/lon grid in geographic coordinates +# Assumptions: none +# Branch: general +# Example: +# data <- matrix(runif(48000,0,1)^2,300,160) + matrix(c(rep(0,20000),rep(0.6,3000),rep(0,25000)),300,160) +# PlotEquiMap(data,lon=1:300,lat=-80:79,filled.continents=F,cols=c("white","yellow","orange","red","darkred")) +# PlotEquiMap(data,lon=1:300,lat=-80:79,filled.continents=F,cols=c("white","yellow","orange","red","darkred"), contours=data, brks2=0.6) +# PlotEquiMap(data,lon=1:300,lat=-80:79,filled.continents=F,cols=c("white","yellow","orange","red","darkred"), contours=grid2contour(data,0.6,FALSE,5), brks2=0.6, contours.labels=FALSE) +# PlotEquiMap_colored(data,lon=1:300,lat=-80:79,filled.continents=F,cols=c("white","yellow","orange","red","darkred"), contours=grid2contour(data,0.6,FALSE,5), brks2=0.6, contours.labels=FALSE, contours.col="blue", continents.col="gray40") + +grid2contour <- function(grid, level, two.sides=FALSE, size=10){ + nrows <- dim(grid)[1] + ncols <- dim(grid)[2] + radius <- round(size/2) + + grid.weighted <- matrix(NA, nrows, ncols) + + grid.expanded <- rbind(cbind(grid[nrows:1,((ncols/2)+1):ncols],grid[nrows:1,],grid[nrows:1,],grid[nrows:1,1:(ncols/2)]),cbind(grid,grid,grid),cbind(grid[nrows:1,((ncols/2)+1):ncols],grid[nrows:1,],grid[nrows:1,],grid[nrows:1,1:(ncols/2)])) + + if(two.sides==FALSE){ + for(i in 1:nrows){ + for(j in 1:ncols){ + grid.weighted[i,j] <- sum(grid.expanded[(nrows+i-radius):(nrows+i+radius),(ncols+j-radius):(ncols+j+radius)] > level) + } + } + } else { + for(i in 1:nrows){ + for(j in 1:ncols){ + grid.weighted[i,j] <- sum(grid.expanded[(nrows+i-radius):(nrows+i+radius),(ncols+j-radius):(ncols+j+radius)] > level) + sum(grid.expanded[(nrows+i-radius):(nrows+i+radius),(ncols+j-radius):(ncols+j+radius)] < -level) + } + } + } + + n.points.min <- (2*radius+1)^2*0.3 # 30% of the total points in the square + ss <- which(grid.weighted < n.points.min) + grid[ss] <- 0 + return(grid) +} + +###################################################################### +# +# RELIABILITY DIAGRAM FOR A COLLECTION OF PROBABILITY FORECASTS # +# Veronica: Like ReliabilityDiagram() function, but +# Modified to include in the outputs the hist.counts +# +###################################################################### + +ReliabilityDiagramHist <- function(probs, obs, bins=10, nboot=500, + plot=FALSE, plot.refin=TRUE, mc.cores=1, + cons.probs=c(0.025, 0.975)) + { + #print("RD") + # + # Plot reliability diagram for a probability forecast + # + # Usage: ReliabilityDiagram(probs, obs, nbins, nboot) + # + # Arguments: + # + # probs ... vector of length N, probs[k] has the predicted probability for + # the event obs[k] + # obs ... obs[k] = 1 if the event happened at instance k, obs[k] = 0 + # otherwise + # bins ... either scalar: number of equidistant bins to discretize the + # forecast probabilities, + # or a vector: user-defined breakpoints of the bins; the `hist` + # function will produce errors if these are not valid + # nboot ... number of bootstrap resamples for estimating consistency bars + # if nboot==0, no resampling is done and NAs are returned as + # consistency bars + # plot ... boolean; whether to plot the reliability diagram + # plot.refin ... boolean; whether to plot the small refinement histogram + # in lower right corner + # cons.probs ... a 2-vector, lower and upper confidence limit + # mc.cores ... number of cores for resampling (if > 1, library `multicore` + # is required) + # + # Return value: + # + # a data frame of K+1 rows with the following columns: + # + # * p.avgs ... in-bin averages of the forecast probabilities + # * cond.probs ... observed conditional frequency of event, given i + # * cbar.lo ... lower limit consistency of consistency bar[i], as specified by user + # * cbar.hi ... upper limit consistency of consistency bar[i], as specified by user + # + # Author: + # + # Stefan Siegert + # s.siegert@exeter.ac.uk + # December 2013 + # + # Example: + # + # N <- 1000 + # p <- rbeta(N, 1, 3) + # y <- rbinom(N, 1, p) + # rd <- rel.diag(p, y, plot=TRUE) + # print(rd) + # + # + # change log: + # + # 2013/12/02 + # * manual definition of bin-breaks + # * manual definition of consistency intervals + # * sanity checks + # * multicore option for resampling + # + # 2013/10/31: + # * return summary data as data frame + # * added options `plot` and `plot.refin` + # + # 2013/08/20: + # * points are plotted at in-bin-averages, not at bin centres + # * legend has been removed + # * consistency bars have been added, calculated by a resampling technique + # * see Broecker (2007) http://dx.doi.org/10.1175/WAF993.1 for details + # * the bars are pointwise 2.5% ... 97.5% intervals around the hypothesis of reliability + # * dependency on package "verification" was removed + # + # Author: Stefan Siegert + # + # based on previous version by Caio Coelho and the routine + # reliability.plot.default of the R-package `verification` + # + + + # sanity checks + if (class(probs) == "data.frame") { + probs <- c(as.matrix(probs)) + } + if (class(obs) == "data.frame") { + obs <- c(as.matrix(obs)) + } + stopifnot(length(probs) == length(obs)) + stopifnot(nboot >= 0, mc.cores >= 0) + stopifnot(all(probs >= 0), all(probs <= 1), all(obs %in% c(0,1))) + stopifnot(length(cons.probs) == 2, all(cons.probs >= 0), all(cons.probs <= 1)) + # optional use of multicore without warning message + warn <- getOption("warn") + options(warn=-1) + if(require(multicore, quietly=TRUE)) { + mclapply <- multicore::mclapply + } else { + mclapply <- function(..., mc.cores) lapply(...) + } + options(warn=warn) + + # some definitions and corrections + n <- length(obs) + mc.cores <- floor(mc.cores) + nboot <- floor(nboot) + cons.probs <- sort(cons.probs) + + + ############################################# + # reliability analysis + ############################################# + # estimate refinement function + if (length(bins) == 1) { + nbins <- floor(bins) + brx <- seq(0, 1, length.out=nbins+1) + + c(-.1, rep(0, nbins-1), .1) + } else { + nbins <- length(bins) - 1 + bins <- sort(bins) + stopifnot(min(bins)<= 0 & max(bins) >= 1) + brx <- bins + } + h <- hist(probs, breaks=brx, plot=FALSE)$counts +#print(h) +#print(sum(h)) +#print(probs) +#print(sum(probs)) + p <- sum(probs) + #print(sum(probs)/sum(h)) + + # estimate calibration function + g <- hist(probs[obs==1], breaks=brx, plot=FALSE)$counts +#print(g) +#print(sum(g)) + obar.i <- g / h +#print(obar.i) + no_res <- sum(g)/sum(h) + #print(no_res) + obar.i[ is.nan(obar.i) ] <- NA + + # calculate in-bin averages + p.bins <- as.numeric(cut(probs, breaks=brx, include.lowest=TRUE)) + p.avgs <- sapply(seq(nbins), + function(ii) mean(probs[p.bins == ii], na.rm=TRUE)) + p.avgs[ is.nan(p.avgs) ] <- NA + +# +#print(p.avgs) +# vertline <- sum(p.avgs,na.rm = TRUE)/bins +#print(vertline) + + ############################################# + # consistency resampling (broecker and smith 2007) + ############################################# + if (nboot) { + resamp.mat <- matrix(nrow=0, ncol=nbins) + # the resampling function + sample.rel.diag <- function(dummy=0) { + p.hat <- sample(x=probs, size=n, replace=TRUE) + x.hat <- rbinom(n=n, size=1, prob=p.hat) + hh <- hist(p.hat, breaks=brx, plot=FALSE)$counts + gg <- hist(p.hat[x.hat==1], breaks=brx, plot=FALSE)$counts + return(gg / hh) + } + # multicore? + if (mc.cores > 1) { + l <- mclapply(1:nboot, sample.rel.diag, mc.cores=mc.cores) + resamp.mat <- do.call(rbind, l) + } else { + l <- replicate(nboot, sample.rel.diag()) + resamp.mat <- t(l) + } + cons.bars <- apply(resamp.mat, 2, + function(z) quantile(z, cons.probs, na.rm=TRUE)) + } else { + cons.bars <- matrix(NA, ncol=nbins, nrow=2) + } + + + ############################################# + # plot the reliability diagram + ############################################# + if (plot) { + # reliability plot + old.par <- par(no.readonly = TRUE) + on.exit(par(old.par)) + plot(NULL, xlim = c(0,1), ylim = c(0,1), + xlab="Forecast probability", + ylab="Observed relative frequency") + # consistency bars + for (i in 1:length(p.avgs)) { + lines(rep(p.avgs[i], 2), cons.bars[, i], col="#CCCCCC", lwd=6) + } + # reliability points and diagonal + points(p.avgs, obar.i, col = "black", pch = 1, lwd=2, type="b") + lines(c(0,1), c(0,1), lty=1) + if (plot.refin) { + # refinement histogram in lower corner + pp<- par("plt") + par("plt" = c(pp[2] - 0.2 , pp[2], pp[3], pp[3]+ 0.2) ) + par(new = TRUE) + barplot(h, axes = FALSE, axisnames = FALSE) + axis(4) + box() + } + } + + ############################################# + # return data + ############################################# + ret.df <- data.frame(p.avgs=p.avgs, cond.probs=obar.i, + cbar.lo=cons.bars[1,], cbar.hi=cons.bars[2,],hist.counts=h, obs.counts=g, for.prob=p) + return(ret.df) + } + + +# to plot the reliability diagrams for both upper and below tercile at the same time: +ReliabilityDiagram2 <-function(rel_diag,nbins=10,consbars=F,tit=NULL,colLine=NULL,colBar=NULL,marHist=T,hist_ylim=NULL,Lg=NULL) { + + # print("Plot") +# rel_diag<-rd # output of ReliabilityDiagramHist() +# nbins=10 +# consbars=T +# colLine=col_line +# colBar=col_bar +# tit=tit1 +# marHist=T +# hist_ylim=c(0,100) +# x11(width=12,height=10) + # x11() + # PLOT OF THE RELIABILITY DIAGRAM + # + ###################################################################################### + # rd: a list with the reliability diagrams that will be represented in the same plot + # cons.bars : if the consistency bar must be represented or not. + # nbins : number of equidistant points used to compute the reliability diagram (optional) + # tit: the title of the plot (optional) + # brierScores: The brier score linked to the reliability diagram (optional) + # marHist: Whether to plot the small refinement histogram is showed + ##################################################################################### + + # Some parameters are defined + nrd <- length(rel_diag) # nrd = 5, 4 models + mme + rg <- list() + + # Check the dimensions of the rank histogram + for (i in 1:nrd){ + if (dim(rel_diag[[i]])[1]!=nbins){ + stop ('The bins of the reliability diagram must be the same that nbins') + } + rg[[i]]<-range(rel_diag[[i]]$hist.counts)# check the range of the histograms + } + + if (is.null(hist_ylim)){ + rgH<-range(rg) +#print(rgH) + }else{ + rgH<-hist_ylim + } + + + ########################################## + # reliability plot + # par(mar=c(5,3,2,2)+0.1) + ########################################## + + layout(matrix(c(rep(1,nrd),seq(2,(nrd+1))),nrd,2,byrow=F),width=c(5,2)) + par(oma=c(2.5,4,5,1)) + #layout.show(a) + + # The axis are defined + # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + #x11(width=12,height=10) +# old.par <- par(no.readonly = TRUE) +#print(old.par) +# on.exit(par(old.par)) +# par(mar=c(5,5,5,0)) + old.par <- par(mar=c(5,5,5,0)) + on.exit(par(old.par)) + + plot(NULL, xlim = c(0,1), ylim = c(0,1),axes=F, xlab='', ylab='') + + axis(1, at=seq(0,1,by=0.1),labels=seq(0,1,by=0.1),cex.axis=2.0) + title(xlab= "Forecast probability",line=3.9,cex.lab=2.0) + + axis(2, at=seq(0,1,by=0.1), labels=seq(0,1,by=0.1), las=2,cex.axis=2.0) + #axis(2, at=seq(0,1,by=0.1), labels=seq(0,1,by=0.1), cex.axis=2.0) + box() + title(ylab= "Observed relative frequency", line=0.2,cex.lab=2.0,outer=T) + if(is.null(tit)==F){ +# title(tit,cex.main=4,outer=T,line=-1) +# title(tit,cex.main=2.0,outer=T,line=-4) + title(tit,cex.main=2.0,outer=T,line=-3) + } + + # Legend + # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + yloc <- c(1.0, 0.95, 0.90, 0.85, 0.80) + legend(0.,yloc[1], legend=Lg[[1]], fill=colLine[[1]], bty="n", cex=1.2) + legend(0.,yloc[2], legend=Lg[[2]], fill=colLine[[2]], bty="n", cex=1.2) + legend(0.,yloc[3], legend=Lg[[3]], fill=colLine[[3]], bty="n", cex=1.2) + legend(0.,yloc[4], legend=Lg[[4]], fill=colLine[[4]], bty="n", cex=1.2) + legend(0.,yloc[5], legend=Lg[[5]], fill=colLine[[5]], bty="n", cex=1.2) +# legend("topleft", "(x,y)", pch = 1, title = "topleft, inset = .05", inset = .05) + + # No resolution and No skill lines + # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + no_res <- sum(rel_diag[[1]]$obs.counts)/sum(rel_diag[[1]]$hist.counts) +# vt_res <- sum(rel_diag[[1]]$for.prob)/sum(rel_diag[[1]]$hist.counts) +#print(paste("no_res = ",no_res)) + numb <- c(seq(0,1,by=0.1)) +#print(numb) + no_skill <- (numb+no_res)/2. +#print(no_skill) + +# diagonal line + lines(c(0,1), c(0,1), lty=1) +# no_resolution line + lines(c(0,1), c(no_res,no_res), col="gray", lty=3) + lines(c(1/3,1/3), c(0,1), col="gray", lty=3) +# lines(c(vt_res,vt_res), c(0,1), col="gray", lty=3) +# lines(c(no_res,no_res), c(0,1), col="gray", lty=3) +# no_skill line + lines(c(0,1), c(no_skill[1],no_skill[11]), col="black", lty=3) + + + # Consistency bars + # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + HI <- matrix(NA, nrow=nrd, ncol=length(rel_diag[[1]]$hist.counts)) + + for (j in 1:nrd){ # nrd = 5: 4 models + mme + HI[j,] <- rel_diag[[j]]$hist.counts + if (consbars==T){ + # The lower limit of consistency bar i and the upper limit are combined in one list + consBars<-list() + consBars[[j]]<-abind(InsertDim(rel_diag[[j]]$cbar.lo,1,1),InsertDim(rel_diag[[j]]$cbar.hi,1,1),along=1) + + # plot consistency bars + for (i in 1:nbins){ # nbins = 10 bins +# lines(rep(rel_diag[[j]]$p.avgs[i], 2), consBars[[j]][, i], col=colBar[j], lwd=3) + lines(rep(rel_diag[[j]]$p.avgs[i], 2), consBars[[j]][, i], col=colBar[j], lwd=2) # lwd: line width + } + } + +# see plot: "p" for points, "l" for lines, "b" for both points and lines, "c" for empty points joined by lines, "o" for overplotted points and lines, "s" and "S" for stair steps and "h" for histogram-like vertical lines. Finally, "n" does not produce any points or lines. + points(rel_diag[[j]]$p.avgs, rel_diag[[j]]$cond.probs, type="b", pch=1 , col =colLine[[j]], cex=2.0 , lwd=3) + + } + + + # Number of forecasts + # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + if (marHist==TRUE){ + + for (i in 1:nrd){ +# par(mar=c(5,0,5,12)) +# par(mar=c(1.5,1.5,6,7)) # in case num of sharpness diagram is 3 or 4 + par(mar=c(0.5,1.5,5,7)) # in case num of sharpness diagram is 5 + barplot(HI[i,]/10000, beside=T,space=c(0,1.2),axes = F, axis.lty=F, axisnames = F, col = colLine[[i]], ylim=rgH/10000) +# axis(1, at=seq(0,1,by=0.1),labels=seq(0,1,by=0.1),cex.axis=1.5) + title(main = "# of forecasts (x10⁴)", font.main = 1.0, line=0.5) +# grid(1,5,col='#525252') + axis(4,cex.axis=1.0) + box(bg='grey') + } + #pp<- par("plt") + #par("plt" = c(pp[2] - 0.14 , pp[2], pp[3], pp[3]+ 0.15) ) + #par(new = TRUE) + } + +} + + +################################################################################################# +# Interpolation # +################################################################################################# + +# function that returns the position of the nearest grid point from a point of coordinates lat, lon +# the grid is represented by two vectors lat.grid and lon.grid: +# if there is more than one point at the same mimimum distance, it returns only the position of the first one +nearest <- function(lat,lon,lat.grid,lon.grid){ + n.lat=length(lat.grid) + n.lon=length(lon.grid) + + if(lon<0 && min(lon.grid>=0)) lon=360+lon # convert the negative longitude lon of the point to a positive one, if lon.grid has only positive values + + #lat.grid2<-sort(lat.grid,decreasing=T) # sort latitudes because they must go from the higher number to the lower one + #lon.grid2<-sort(lon.grid) # sort longitudes because they must go from the lower number to the highest one + + grid.dist.lat<-matrix((lat.grid-lat)^2, nrow=n.lat, ncol=n.lon+1, byrow=FALSE) + #grid.dist.lon<-matrix((lon.grid-lon)^2,nrow=n.lat,ncol=n.lon,byrow=TRUE) + grid.dist.lon<-matrix((c(lon.grid,lon.grid[1]+360)-lon)^2,nrow=n.lat,ncol=n.lon+1,byrow=TRUE) # add also the forst lon point at the end of lon values + + grid.dist<-(grid.dist.lat+grid.dist.lon)^0.5 # matrix of distance from the point in grados + nearest.grid.point.pos<-which(grid.dist==min(grid.dist),arr.ind=T) + + if(length(nearest.grid.point.pos)>2) nearest.grid.point.pos <- nearest.grid.point.pos[1,] # remove all points at the same minimum distance beyond the first one + + if(nearest.grid.point.pos[2] > n.lon) nearest.grid.point.pos[2] <- 1 # in case the closer point is that with the first longitude value, must change the lon position + + return(nearest.grid.point.pos) +} + + +# interpolates the observed values in grid2 on grid1 with the bilinear method but using the great-circle distances (using Haversine formula) and selecting only the 4 nearest grid points,.; +# lat1.list and lon1.list are vectors of coordinates referring to grid1, while lat2.list and lon2.list to grid2. They must be in degrees. Grid2 format must be: [lat,lon] +# lat2.list and lon2.list can also be in a different order than lat1.list and lon1.list. +# It interpolates at least 2 grids at time (i.e: the monthly grids of a model), so it can use the same distance matrix for all grids (the more computational intensive part of the script) +# and the same weights for each monthly grid; grid2 must have the format: [layer,lat,lon], where stands for day, month, year, season, etc... +# beware that the 2 rows of points closer to the north and south pole are still not well interpolated because they are still not associated to grid points at the other side of the north pole +bilinear<-function(lat1.list,lon1.list,lat2.list,lon2.list,grid2){ + R<-6371 # earth mean radius (km) + Rx2<-2*R + + n.lat1=length(lat1.list) + n.lon1=length(lon1.list) + n.lat2=length(lat2.list) + n.lon2=length(lon2.list) + n.points1<-as.double(n.lat1*n.lon1) # number of points of grid1 + n.points2<-as.double(n.lat2*n.lon2) + n.points.tot<-n.points1*n.points2 + n.layers<-dim(grid2)[1] + + nearest.lat<-array(NA,c(n.lat1,2)) # latitude of the two closer grid points + nearest.lat.pos<-array(NA,c(n.lat1,2)) # position inside grid2 of the two closer points in the latitudinal sense + for(l in 1:n.lat1){ + pos.first<-which.min(abs(lat2.list-lat1.list[l])) + lat2.list.temp<-lat2.list + lat2.list.temp[pos.first]=10000000000 + pos.second<-which.min(abs(lat2.list.temp-lat1.list[l])) + nearest.lat[l,1]=lat2.list[pos.first] + nearest.lat[l,2]=lat2.list[pos.second] + nearest.lat.pos[l,1]=pos.first + nearest.lat.pos[l,2]=pos.second + } + + # correct longitude values if one of the two grids has positive longitude values only and the other also negative ones: + if(length(which(lon1.list<0))>0 && length(which(lon2.list<0))==0){lon2.list[which(lon2.list>180)]<-lon2.list[which(lon2.list>180)]-360} + if(length(which(lon1.list<0))==0 && length(which(lon2.list<0))<0){lon2.list[which(lon2.list<0)]<-lon2.list[which(lon2.list<0)]+360} + + nearest.lon<-array(NA,c(n.lon1,2)) # longitude of the two closer grid points + nearest.lon.pos<-array(NA,c(n.lon1,2)) # position inside grid2 of the two closer points in the longitudinal sense + for(l in 1:n.lon1){ + pos.first<-which.min(abs(lon2.list-lon1.list[l])) + lon2.list.temp<-lon2.list + lon2.list.temp[pos.first]=10000000000 + pos.second<-which.min(abs(lon2.list.temp-lon1.list[l])) + # points of grid1 to the left of all points of grid2 are associated also to the rightmost points of grid2 because earth is flat: + if(lon1.list[l] < lon2.list[pos.first] && lon1.list[l] < lon2.list[pos.second]) pos.second<-n.lon2 + # points of grid1 to the right of all points of grid2 are associated also to the leftmost points of grid2 because earth is flat: + if(lon1.list[l] > lon2.list[pos.first] && lon1.list[l] > lon2.list[pos.second]) pos.second<-1 + + nearest.lon[l,1]=lon2.list[pos.first] + nearest.lon[l,2]=lon2.list[pos.second] + nearest.lon.pos[l,1]=pos.first + nearest.lon.pos[l,2]=pos.second + } + + + lat1.list.rad<-deg2rad(lat1.list) + lon1.list.rad<-deg2rad(lon1.list) + nearest.lat.rad<-deg2rad(nearest.lat) + nearest.lon.rad<-deg2rad(nearest.lon) + + pred<-array(NA,c(n.layers,n.lat1,n.lon1)) + for(y in 1:n.lat1){ + for(x in 1:n.lon1){ + + #lat.deg<-lat1.list[y] # lat y lon of the grid1 point selected + #lon.deg<-lon1.list[x] + #lat1.deg<-nearest.lat[y,1] # lat y lon of its 4 nearest points + #lon1.deg<-nearest.lon[x,1] + #lat2.deg<-nearest.lat[y,1] + #lon2.deg<-nearest.lon[x,2] + #lat3.deg<-nearest.lat[y,2] + #lon3.deg<-nearest.lon[x,1] + #lat4.deg<-nearest.lat[y,2] + #lon4.deg<-nearest.lon[x,2] + #latN.deg<-c(lat1.deg,lat2.deg,lat3.deg,lat4.deg) + #lonN.deg<-c(lon1.deg,lon2.deg,lon3.deg,lon4.deg) + + # conversion to rad: + lat<-lat1.list.rad[y] # lat y lon of the grid1 point selected + lon<-lon1.list.rad[x] + lat1<-nearest.lat.rad[y,1] # lat y lon of its 4 nearest points + lon1<-nearest.lon.rad[x,1] + lat2<-nearest.lat.rad[y,1] + lon2<-nearest.lon.rad[x,2] + lat3<-nearest.lat.rad[y,2] + lon3<-nearest.lon.rad[x,1] + lat4<-nearest.lat.rad[y,2] + lon4<-nearest.lon.rad[x,2] + latN<-c(lat1,lat2,lat3,lat4) + lonN<-c(lon1,lon2,lon3,lon4) + + distN<-sqrt((sin((lat-latN)/2))^2 + cos(lat)*cos(latN)*(sin((lon-lonN)/2))^2) + distN<-Rx2 * asin(pmin(distN,1)) # distance in km of the 4 nearest points + + ss<-which(distN==0) + weights<-distN^2 + weights[ss]<-0.000000000001 # not to have +inf in the denominator of the weight matrix + weights=1/weights + sum.weights<-sum(weights) + + for(l in 1:n.layers){ + value1<-grid2[l,nearest.lat.pos[y,1],nearest.lon.pos[x,1]] # grid2 value of its 4 nearest points + value2<-grid2[l,nearest.lat.pos[y,1],nearest.lon.pos[x,2]] # for layer l + value3<-grid2[l,nearest.lat.pos[y,2],nearest.lon.pos[x,1]] + value4<-grid2[l,nearest.lat.pos[y,2],nearest.lon.pos[x,2]] + valueN<-c(value1,value2,value3,value4) + + values.weighted<-valueN * weights + sum.values.weighted<-sum(values.weighted) + pred[l,y,x]<-sum.values.weighted/sum.weights + if(sum.weights==0)pred[l,y,x]=NA # si los 4 puntos estan tan lejanos que no tienen peso; pero creo que con ese metodo no se verifica nunca + } + } + } + + # R bug: values of interp are not true!!!!!!!!!!! + #interp<-list() + #for(l in 1:n.layers) interp[[l]]<-pred[l,,] + #return(interp) + return(pred) +} + + + +# interpolates the observed values in grid2 on grid1 assigning to each point of grid1 the value of the closer point of grid 2 +# lat1.list and lon1.list are vectors of coordinates referring to grid1, while lat2.list and lon2.list to grid2. They must be in degrees. Grid2 format must be: [lat,lon] +# lat2.list and lon2.list can also be in a different order than lat1.list and lon1.list. +# It interpolates at least 2 grids at time (i.e: the monthly grids of a model), so it can use the same distance matrix for all grids (the more computational intensive part of the script) +# and the same weights for each monthly grid; grid2 must have the format: [layer,lat,lon], where stands for day, month, year, season, etc... +# beware that at lon=0 is not working well and it dosn't compute the distance with the great-circle!!! +closer<-function(lat1.list,lon1.list,lat2.list,lon2.list,grid2){ + R<-6371 # earth mean radius (km) + Rx2<-2*R + + n.lat1=length(lat1.list) + n.lon1=length(lon1.list) + n.lat2=length(lat2.list) + n.lon2=length(lon2.list) + n.points1<-as.double(n.lat1*n.lon1) # number of points of grid1 + n.points2<-as.double(n.lat2*n.lon2) + n.points.tot<-n.points1*n.points2 + n.layers<-dim(grid2)[1] + + nearest.lat<-array(NA,c(n.lat1,2)) # latitude of the closer grid point + nearest.lat.pos<-array(NA,c(n.lat1,2)) # position inside grid2 of the closer point in the latitudinal sense + for(l in 1:n.lat1){ + pos.first<-which.min(abs(lat2.list-lat1.list[l])) + lat2.list.temp<-lat2.list + nearest.lat[l,1]=lat2.list[pos.first] + nearest.lat.pos[l,1]=pos.first + } + + # correct longitude values if one of the two grids has positive longitude values only and the other also negative ones: + if(length(which(lon1.list<0))>0 && length(which(lon2.list<0))==0){lon2.list[which(lon2.list>180)]<-lon2.list[which(lon2.list>180)]-360} + if(length(which(lon1.list<0))==0 && length(which(lon2.list<0))<0){lon2.list[which(lon2.list<0)]<-lon2.list[which(lon2.list<0)]+360} + + nearest.lon<-array(NA,c(n.lon1,2)) # longitude of the closer grid point + nearest.lon.pos<-array(NA,c(n.lon1,2)) # position inside grid2 of the closer points in the longitudinal sense + for(l in 1:n.lon1){ + pos.first<-which.min(abs(lon2.list-lon1.list[l])) + lon2.list.temp<-lon2.list + nearest.lon[l,1]=lon2.list[pos.first] + nearest.lon.pos[l,1]=pos.first + } + + + #lat1.list.rad<-deg2rad(lat1.list) + #lon1.list.rad<-deg2rad(lon1.list) + #nearest.lat.rad<-deg2rad(nearest.lat) + #nearest.lon.rad<-deg2rad(nearest.lon) + + pred<-array(NA,c(n.layers,n.lat1,n.lon1)) + for(y in 1:n.lat1){ + for(x in 1:n.lon1){ + #lat<--lat1.list.rad[y] # lat y lon of the grid1 point selected + #lon<--lon1.list.rad[x] + + #latN<-nearest.lat.rad[y,1] # lat y lon of its nearest point + #lonN<-nearest.lon.rad[x,1] + + #distN<-sqrt((sin((lat-latN)/2))^2 + cos(lat)*cos(latN)*(sin((lon-lonN)/2))^2) + #distN<-Rx2 * asin(pmin(distN,1)) # distance in km of the nearest point + + for(l in 1:n.layers){ + pred[l,y,x]<-grid2[l,nearest.lat.pos[y,1],nearest.lon.pos[x,1]] # grid2 value of its nearest point for layer l + } + } + } + + interp<-list() + for(l in 1:n.layers) interp[[l]]<-pred[l,,] + return(interp) + +} + + + +# interpolates the observed values in grid2 on grid1 with the IDW method using the great-circle distances (using Haversine formula) and selecting only the grid points close to km.; +# lat1.list and lon1.list are vectors of coordinates referring to grid1, while lat2.list and lon2.list to grid2. They must be in degrees. Grid2 format must be: [lat,lon] +# lat2.list and lon2.list can also be in a different order than lat1.list and lon1.list. +# it returns NA if for a given point of grid1 there are no points of grid2 close of less than km that can be used to interpolate that point. +# It interpolates at least 2 grids at time (i.e: the monthly grids of a model), so it can use the same distance matrix for all grids (the more computational intensive part of the IDW) +# and the same weights for each monthly grid; grid2 must have the format: [layer,lat,lon], where stands for day, month, year, season, etc... +# This function is fully vectorialized allowing the faster possible interpolation of grids with any number of points. it uses up to ~8 GB of RAM, but it can +# be decreased by setting a lower value of variable max.memory defined below. +multi.idw<-function(lat1.list,lon1.list,lat2.list,lon2.list,grid2,dmax){ + max.memory<-125000000 # maximum number of elements in a matrix (each element is a double and occupies 8 byte; the maximum size of a matrix in R is 2.1 GB, about 250000000 elements of type double) + R<-6371 # earth mean radius (km) + Rx2<-2*R + + n.lat1=length(lat1.list) + n.lon1=length(lon1.list) + n.lat2=length(lat2.list) + n.lon2=length(lon2.list) + n.points1<-as.double(n.lat1*n.lon1) # number of points of grid1 + n.points2<-as.double(n.lat2*n.lon2) + n.points.tot<-n.points1*n.points2 + n.layers<-dim(grid2)[1] + + # longitude conversion: + #if(min(lon1)<0 && min(lon2)>=0) lon1=360+lon1 # convert the negative longitude of the first grid to a positive one, if the second grid has only positive longitudes + #if(min(lon2)<0 && min(lon1)>=0) lon2=360+lon2 # convert the negative longitude of the second grid to a positive one, if the first grid has only positive longitudes + + #lat.grid1<-sort(lat.grid1,decreasing=T) # sort latitudes because they must go from the higher number to the lower one to simulate a spatial grid + #lon1<-sort(lon1) # sort longitudes because they must go from the lower number to the highest one to similuate a spatial grid + + #lat.grid2<-sort(lat.grid2,decreasing=T) # sort latitudes because they must go from the higher number to the lower one to simulate a spatial grid + #lon2<-sort(lon2) # sort longitudes because they must go from the lower number to the highest one to similuate a spatial grid + + n.max1<-floor(max.memory/n.points2) # maximum number of points of grid1 that can be used in a matrix of ~1 GB (1 point = 1 double = 8 bytes) + if(n.points1<=n.max1) { + n.int<-1 + n.points1.int<-n.points1 + n.points1.last.int<-n.points1 + print("grid fits into memory") + } else { # in this case, cut grid1 horizontally in smaller grids to work with matrices of size of ~1 GB + n.int<-floor(n.points1/n.max1)+1 + n.points1.int<-n.max1 + n.points1.last.int<-n.points1 %% n.max1 # number of points of the last interval + print(paste("grid will be split in",n.int,"subgrid")) + } + + lat1<-rep(lat1.list,each=n.lon1) + lon1<-rep(lon1.list,n.lat1) + lat1.rad<-deg2rad(lat1) + lon1.rad<-deg2rad(lon1) + cos.lat1<-cos(lat1.rad) + vert1<-cbind(lat=lat1,lon=lon1,lat.deg=lat1.rad,lon.deg=lon1.rad,cos.lat=cos.lat1) # list of lat and lon of points of grid1 with the radial values and cosinus too + + lat2<-rep(lat2.list,each=n.lon2) + lon2<-rep(lon2.list,n.lat2) + lat2.rad<-deg2rad(lat2) + lon2.rad<-deg2rad(lon2) + cos.lat2<-cos(lat2.rad) + vert2<-cbind(lat=lat2,lon=lon2,lat.deg=lat2.rad,lon.deg=lon2.rad,cos.lat=cos.lat2) # list of lat and lon of points of grid2 with the radial values and cosinus too + + pred<-matrix(NA,nrow=n.points1,ncol=n.layers) # matrix with the interpolated values for each layer + + for(i in 1:n.int){ + imax<-i*n.max1 + if(i==n.int)imax=n.points1 + imin<-1+(i-1)*n.max1 + if(i==n.int && n.int>1)n.points1.int=n.points1.last.int + + lat1.rad.int<-lat1.rad[imin:imax] + lat2.rad.int<-lat2.rad + lon1.rad.int<-lon1.rad[imin:imax] + lon2.rad.int<-lon2.rad + cos.lat1.int<-cos.lat1[imin:imax] + cos.lat2.int<-cos.lat2 + + print("Calculating distance matrix...") + + vert.cos.lat1<-matrix(cos.lat1.int,nrow=n.points1.int,ncol=n.points2) + vert.cos.lat2<-matrix(cos.lat2.int,nrow=n.points1.int,ncol=n.points2,byrow=TRUE) + vert.cos.lat = vert.cos.lat1 * vert.cos.lat2 + rm(vert.cos.lat1,vert.cos.lat2);gc() + + vert.lon1<-matrix(lon1.rad.int,nrow=n.points1.int,ncol=n.points2) + vert.lon2<-matrix(lon2.rad.int,nrow=n.points1.int,ncol=n.points2,byrow=TRUE) + vert.lon <- (vert.lon2 - vert.lon1)/2 + rm(vert.lon1,vert.lon2);gc() + + vert.lon.squared<-vert.lon^2 + rm(vert.lon);gc() + + vert.mult<-vert.cos.lat * vert.lon.squared + rm(vert.cos.lat,vert.lon.squared);gc() + + vert.lat1<-matrix(lat1.rad.int,nrow=n.points1.int,ncol=n.points2) + vert.lat2<-matrix(lat2.rad.int,nrow=n.points1.int,ncol=n.points2,byrow=TRUE) + vert.lat <- (vert.lat2 - vert.lat1)/2 + rm(vert.lat1,vert.lat2);gc() + + vert.lat.squared <- vert.lat^2 + rm(vert.lat);gc() + + vert.latlon <- vert.lat.squared + vert.mult + rm(vert.lat.squared,vert.mult);gc() + + vert.root<-sqrt(vert.latlon) + rm(vert.latlon);gc() + + mm<-which(vert.root > 1) + if(length(mm)>0) vert.root[mm]<-1 + rm(mm);gc() + + vert.dist <- Rx2 * asin(vert.root) # matrix of distances in km + rm(vert.root);gc() + + ss<-which(vert.dist==0) + weights<-vert.dist^2 + weights[ss]<-0.000000000001 # not to have +inf in the denominator of the weight matrix + weights=1/weights + rm(ss);gc() + + nn<-which(vert.dist > dmax) + weights[nn]<-0 #put to 0 the weights of grid points of grid 2 that are not used to compute the idw because they are too far + sum.weights<-rowSums(weights) + zz<-which(sum.weights==0) + rm(vert.dist,nn);gc() + + for(l in 1:n.layers){ + # put the grid2 values in each row of a matrix following the lat/lon list of grid2 points, but horizontally: + horiz2<-as.vector(t(grid2[l,,])) + horiz2.rep<-rep(horiz2,n.points1.int) + + vert2.values.int<-matrix(horiz2.rep,nrow=n.points1.int,ncol=n.points2,byrow=TRUE) + values.weighted<-vert2.values.int * weights + sum.values.weighted<-rowSums(values.weighted) + + pred[imin:imax,l]<-sum.values.weighted/sum.weights + if(length(zz)>0) pred[imin-1+zz]<-NA + rm(values.weighted,sum.values.weighted,vert2.values.int,horiz2,horiz2.rep); #gc() + + print(paste("subgrid:",i,"/",n.int," layer:",l,"/",n.layers)) + } + } + + rm(zz,weights,sum.weights);gc() + + interp<-list() + for(l in 1:n.layers) interp[[l]]<-matrix(pred[,l],nrow=n.lat1,ncol=n.lon1,byrow=TRUE) + + return(interp) +} + +################################################################################ +# Regression functions # +################################################################################ + +# Modified version of the lm.fit() base R function, to increase its speed by removing some unnecessary outputs +# dalla versione 2.15 di R non la puoi piu usare, usa invece lm.fit.fast +# occhio che con lm.fit bisogna passare anche una colonna di uno iniziali per simulare il termine noto!!! +lm.fit.fast.old<-function (x, y, offset = NULL, method = "qr", tol = 1e-07, singular.ok = TRUE, ...) +{ + if (is.null(n <- nrow(x))) + stop("'x' must be a matrix") + if (n == 0L) + stop("0 (non-NA) cases") + p <- ncol(x) + if (p == 0L) { + return(list(coefficients = numeric(0L), residuals = y, + fitted.values = 0 * y, rank = 0, df.residual = length(y))) + } + ny <- NCOL(y) + if (is.matrix(y) && ny == 1) + y <- drop(y) + if (!is.null(offset)) + y <- y - offset + if (NROW(y) != n) + stop("incompatible dimensions") + if (method != "qr") + warning(gettextf("method = '%s' is not supported. Using 'qr'", + method), domain = NA) + if (length(list(...))) + warning("extra arguments ", paste(names(list(...)), sep = ", "), + " are just disregarded.") + storage.mode(x) <- "double" + storage.mode(y) <- "double" + z <- .Fortran("dqrls", qr = x, n = n, p = p, y = y, ny = ny, + tol = as.double(tol), coefficients = mat.or.vec(p, ny), + residuals = y, effects = y, rank = integer(1L), pivot = 1L:p, + qraux = double(p), work = double(2 * p), PACKAGE = "base") + if (!singular.ok && z$rank < p) + stop("singular fit encountered") + coef <- z$coefficients + pivot <- z$pivot + r1 <- seq_len(z$rank) + dn <- colnames(x) + if (is.null(dn)) + dn <- paste("x", 1L:p, sep = "") + r2 <- if (z$rank < p) + (z$rank + 1L):p + else integer(0L) + if (is.matrix(y)) { + coef[r2, ] <- NA + coef[pivot, ] <- coef + dimnames(coef) <- list(dn, colnames(y)) + } + else { + coef[r2] <- NA + coef[pivot] <- coef + names(coef) <- dn + } + z$coefficients <- coef + r1 <- y - z$residuals + if (!is.null(offset)) + r1 <- r1 + offset + c(z[c("coefficients", "residuals", "rank")], list(fitted.values = r1, df.residual = n - z$rank)) +} + +# Modified version of the lm.fit() base R function, to increase its speed by removing some unnecessary outputs (such as the QR decomposition) +# remember that the first column of the x matrix must be a column of 1, to represent the constant term! +# Dopo la versione 2.15 di R le funzioni .Fortran() sono proibite, bisogna sostituirle con .Call, +# percio' invece di lm.fit.fast.old bisogna chiamare lm.fit.fast: +lm.fit.fast<-function (x, y, offset = NULL, method = "qr", tol = 1e-07, singular.ok = TRUE, ...) +{ + if (is.null(n <- nrow(x))) + stop("'x' must be a matrix") + if (n == 0L) + stop("0 (non-NA) cases") + p <- ncol(x) + if (p == 0L) { + return(list(coefficients = numeric(0L), residuals = y, + fitted.values = 0 * y, rank = 0, df.residual = length(y))) + } + ny <- NCOL(y) + if (is.matrix(y) && ny == 1) + y <- drop(y) + if (!is.null(offset)) + y <- y - offset + if (NROW(y) != n) + stop("incompatible dimensions") + if (method != "qr") + warning(gettextf("method = '%s' is not supported. Using 'qr'", + method), domain = NA) + if (length(list(...))) + warning("extra arguments ", paste(names(list(...)), sep = ", "), + " are just disregarded.") + storage.mode(x) <- "double" + storage.mode(y) <- "double" + + z <- .Call(stats:::C_Cdqrls, x, y, tol, TRUE) + + if (!singular.ok && z$rank < p) + stop("singular fit encountered") + coef <- z$coefficients + pivot <- z$pivot + r1 <- seq_len(z$rank) + dn <- colnames(x) + if (is.null(dn)) + dn <- paste("x", 1L:p, sep = "") + r2 <- if (z$rank < p) + (z$rank + 1L):p + else integer(0L) + if (is.matrix(y)) { + coef[r2, ] <- NA + coef[pivot, ] <- coef + dimnames(coef) <- list(dn, colnames(y)) + } + else { + coef[r2] <- NA + coef[pivot] <- coef + names(coef) <- dn + } + z$coefficients <- coef + r1 <- y - z$residuals + if (!is.null(offset)) + r1 <- r1 + offset + c(z[c("coefficients", "residuals", "rank")], list(fitted.values = r1, df.residual = n - z$rank)) +} + +# Function to plot the bar chart of the anomalies of a time series of frequencies (by default in % but can be changed with the freq.max option), +# using red colors for anomalies above the climatological value of the frequencies. It also deals with eventual NA in the time series (doesn't plot the correspondent bar) +# and blue color for anomalies below the climatological value (the frequency mean of the variable over the study period). +# it also adds the linear fit if it is found to be significant (with the test of Mann-Kendall). +barplot.freq <- function(time.serie, year.start, year.end, p.value = 0.05, freq.max = 0.8, title=NULL, cex.y = 1, cex.x = 1, ylab="%", mgp=c(1,1,0), ...) +{ + x <- time.serie + n.years <- length(x) + years.serie <- year.start:year.end + # m is the value used to separate positive anomalies from negative anomalies, i.e: the climatology over the whole period + m <- mean(x, na.rm=TRUE) + bar3 <- pmax(x - m, 0) + bar2 <- pmax(m - x, 0 ) + bar1 <- m - bar2 + bar1[is.na(bar1)] <- m # in case there are NA + bar2[is.na(bar2)] <- 0 # in case there are NA + bar3[is.na(bar3)] <- 0 # in case there are NA + + bar.matrix <- matrix(c(bar1,bar2,bar3), 3, n.years, byrow=T) + barplot(bar.matrix, col = c("white","blue","red"), border = NA, names.arg = years.serie, ylim = c(0,freq.max), axis.lty = 1, space = 0.2, main = title, cex.axis=cex.y, cex.names=cex.x, xlab="", ylab=ylab, mgp=mgp) + + abline(0,0, col="black") # add a black x-axis line + + # add mean frequency of the simulated ensemble mean time series: + # text(length(years.serie)/3, freq.max*(29/30),labels=bquote(bar(nu) == .(paste0(round(mean(x),1),"%"))),cex=3) + + #z <- lm(x ~ years) + kendall_pvalue <- MannKendall(x)$sl[1] + + # add a line with the linear trend only if it significant: + #if(summary(z)$coefficients[2,4] < p.value) abline(lsfit(1:n.years,x), lty=1, col="gray10") + if(kendall_pvalue < p.value) abline(lsfit(1:n.years,x), lty=1, col="gray10") +} + +## Alternative but without bars: +## library(lattice) # for xyplot +## library(grid) # for adding a different color for negative values in xyplot +## x<-1:100 +## y<-4+rnorm(100) +## x <- zoo(4+rnorm(100)) +## xyplot(x, grid=TRUE, panel = function(x, y, ...){ +## panel.xyplot(x, y, col="blue", ...) +## #panel.abline(h=0, col="black") +## panel.abline(h=4, col="gray") +## grid.clip(y=unit(4,"native"),just=c("bottom")) +## panel.xyplot(x, y, col="red", ...) +## }) + +# as barplot.freq, but for forecasts: +barplot.freq.sim <- function(time.serie, time.serie.max, time.serie.min, time.serie.obs, year.start, year.end, p.value = 0.05, freq.max = 0.8, title=NULL, cex.y = 1, cex.x = 1, ylab="%", mgp=c(1,1,0), col.bar = c("white","gray70"), col.line="gray50", cex.mean = 1, cex.obs = 1, cex.r = 1.5, ...) +{ + x <- time.serie + y <- time.serie.max + z <- time.serie.min + o <- time.serie.obs + + #n.years <- length(x) + years.serie <- year.start:year.end + # m is the value used to separate positive anomalies from negative anomalies, i.e: the climatology over the whole period + m <- mean(x, na.rm=TRUE) + + bar1 <- z + bar2 <- y + bar1[is.na(bar1)] <- m # in case there are NA + bar2[is.na(bar2)] <- 0 # in case there are NA + + bar.matrix <- matrix(c(bar1,bar2), 2, length(years.serie), byrow=T) + my.bar <- barplot(bar.matrix, col = col.bar, border = NA, names.arg = years.serie, ylim = c(0,freq.max), axis.lty = 1, space = 0.2, main = title, cex.axis=cex.y, cex.names=cex.x, xlab="", ylab=ylab, mgp=mgp) + + abline(m,0, col=col.line) # add a black x-axis line + + # add points with the ensemble mean: + col.sim <- rep("red",year.end-year.start+1) + col.sim[which(x < m)] <- "blue" + points(my.bar, x, type="p", pch=20, xlab="", ylab="", col=col.sim, cex=cex.mean) + + # add crosses with observed values: + col.obs <- rep("red",year.end-year.start+1) + col.obs[which(o < m)] <- "blue" + points(my.bar, o, type="p", pch=4, xlab="", ylab="", col=col.obs, cex=cex.obs, lwd=5) + + # add corr between obs.time series and ensemble mean time series: + corr <- round(cor(x,o, use="complete.obs"),2) + text(length(years.serie), freq.max*(19/20), labels=paste0("r= ",corr), cex=cex.r) + #text(c(length(years.serie-5),length(years.serie)),rep(freq.max*(9/10),2), labels=c(round(o,2),corr)) + + #z <- lm(x ~ years) + kendall_pvalue <- MannKendall(x)$sl[1] + + # add a line with the linear trend only if it significant: + #if(summary(z)$coefficients[2,4] < p.value) abline(lsfit(1:n.years,x), lty=1, col="gray10") + if(kendall_pvalue < p.value) abline(lsfit(1:n.years,x), lty=1, col="gray10") +} + +# as barplot.freq, but for forecasts: +barplot.freq.sim2 <- function(time.serie, time.serie.max, time.serie.min, time.serie.obs, year.start, year.end, p.value = 0.05, freq.min=0, freq.max = 0.8, title=NULL, cex.y = 1, cex.x = 1, ylab="%", mgp=c(1,1,0), col.bar = c("white","gray85","#4393c3","#d6604d","gray80"), col.line="gray50", cex.mean = 1, cex.obs = 1, cex.r = 1.5, ...) +{ + x <- time.serie + y <- time.serie.max + z <- time.serie.min + obs <- time.serie.obs + + #n.years <- length(x) + years.serie <- year.start:year.end + # m is the value used to separate positive anomalies from negative anomalies, i.e: the climatology over the whole period + m <- mean(x, na.rm=TRUE) + + bar1 <- z # white bar + bar2 <- pmin(x, m) - bar1 # gray bar + bar3 <- pmax(m - x, 0) # blue bar + bar4 <- pmax(x - m, 0) # red bar + bar5 <- y - (bar1+bar2+bar3+bar4) # gray bar + + bar1[is.na(bar1)] <- m # in case there are NA + bar2[is.na(bar2)] <- 0 # in case there are NA + bar3[is.na(bar3)] <- 0 # in case there are NA + bar4[is.na(bar4)] <- 0 # in case there are NA + bar5[is.na(bar5)] <- 0 # in case there are NA + + #bar3 <- pmax(x - m, 0) + #bar2 <- pmax(m - x, 0 ) + #bar1 <- m - bar2 + + bar.matrix <- matrix(c(bar1,bar2,bar3,bar4,bar5), 5, length(years.serie), byrow=T) + my.bar <- barplot(bar.matrix, col = col.bar, border = NA, names.arg = years.serie, ylim = c(freq.min,freq.max), axis.lty = 1, space = 0.2, main = title, cex.axis=cex.y, cex.names=cex.x, xlab="", ylab=ylab, mgp=mgp) + + abline(m,0, col=col.line) # add a black x-axis line + + # add points with the ensemble mean: + #col.sim <- rep("red",year.end-year.start+1) + #col.sim[which(x < m)] <- "blue" + #points(my.bar, x, type="p", pch=20, xlab="", ylab="", col=col.sim, cex=cex.mean) + + # add crosses with observed values: + col.obs <- rep("#67001f",year.end-year.start+1) + col.obs[which(obs < m)] <- "#053061" + points(my.bar, obs, type="p", pch=4, xlab="", ylab="", col=col.obs, cex=cex.obs, lwd=5) + + # add corr between obs.time series and ensemble mean time series: + #corr <- round(cor(x,obs, use="complete.obs"),2) + #text(length(years.serie), freq.max*(29/30), labels=paste0("r= ",corr), cex=cex.r) + + # add mean frequency of the simulated ensemble mean time series: + #text(length(years.serie)/3, freq.max*(29/30),labels=bquote(bar(nu) == .(paste0(round(mean(x),1),"%"))),cex=cex.r) + + #z <- lm(x ~ years) + kendall_pvalue <- MannKendall(x)$sl[1] + + # add a line with the linear trend only if it significant: + #if(summary(z)$coefficients[2,4] < p.value) abline(lsfit(1:n.years,x), lty=1, col="gray10") + if(kendall_pvalue < p.value) abline(lsfit(1:n.years,x), lty=1, col="gray10") +} + + +# As ColorBar, but the user MUST supply its own ticks and tick labels: +# (it is used to have a better control over the values shown) +# the option draw.ticks is used to remove the ticks lines +# the option label.dist is used to change the distance from the legend to the labels: +# Example: +# +# ColorBar2(brks=my.brks, cols=my.cols, vert=FALSE, cex=legend1.cex, label.dist=2, my.ticks=-0.5 + 1:length(my.brks), my.labels=my.brks) +# + +ColorBar2 <- function (brks, cols = NULL, vert = TRUE, cex = 1, draw.ticks = TRUE, label.dist = 1, my.ticks, my.labels) +{ + if (is.null(cols) == TRUE) { + nlev <- length(brks) - 1 + cols <- rainbow(nlev) + } + else { + if (length(cols) != (length(brks) - 1)) { + stop("Inconsistent colour levels / list of colours") + } + } + if (vert) { + par(mar = c(1, 1, 1, 1.5 * (1 + cex)), mgp = c(1, 1, + 0), las = 1, cex = 1.2) + image(1, c(1:length(cols)), t(c(1:length(cols))), axes = FALSE, + col = cols, xlab = "", ylab = "") + box() + axis(4, at = my.ticks, tick = draw.ticks, labels = my.labels, cex.axis = cex, mgp=c(3,label.dist,0)) + } + else { + par(mar = c(0.5 + cex, 1, 1, 1), mgp = c(1.5, max(c(0.3, + 0.8 * (cex - 0.625))), 0), las = 1, cex = 1.2) + image(1:length(cols), 1, t(t(1:length(cols))), axes = FALSE, + col = cols, xlab = "", ylab = "") + box() + axis(1, at = my.ticks, tick = draw.ticks, labels = my.labels, cex.axis = cex, mgp=c(3,label.dist,0)) + } +} + +# ColorBar3: like ColorBar, but the user can specify a subset of the predefined labels to be shown in the legend (more powerful than the resample option) +ColorBar3 <- function (brks, cols = NULL, vert = TRUE, cex = 1, subset = 1:length(brks)) +{ + if (is.null(cols) == TRUE) { + nlev <- length(brks) - 1 + cols <- rainbow(nlev) + } + else { + if (length(cols) != (length(brks) - 1)) { + stop("Inconsistent colour levels / list of colours") + } + } + if (vert) { + par(mar = c(1, 1, 1, 1.5 * (1 + cex)), mgp = c(1, 1, + 0), las = 1, cex = 1.2) + image(1, c(1:length(cols)), t(c(1:length(cols))), axes = FALSE, + col = cols, xlab = "", ylab = "") + box() + axis(4, at = seq(0.5, length(brks) - 0.5, 1)[subset], + tick = TRUE, labels = brks[seq(1, length(brks), 1)][subset], + cex.axis = cex) + } + else { + par(mar = c(0.5 + cex, 1, 1, 1), mgp = c(1.5, max(c(0.3, + 0.8 * (cex - 0.625))), 0), las = 1, cex = 1.2) + image(1:length(cols), 1, t(t(1:length(cols))), axes = FALSE, + col = cols, xlab = "", ylab = "") + box() + axis(1, at = seq(0.5, length(brks) - 0.5, 1)[subset], + labels = brks[seq(1, length(brks), 1)][subset], + cex.axis = cex) + } +} + + +# Use the function below by handing it a matrix of numbers. It will plot the matrix with a color scale based on the highest and lowest values in the matrix. +# usage: myImagePlot(m) where m is a matrix of numbers +# optional arguments: myImagePlot(m, xlabels, ylabels, zlim, title=c("my title")) +# xLabels and yLabels are vectors of strings to label the rows and columns. +# zlim is a vector containing a low and high value to use for the color scale + +myImagePlot <- function(x, ...){ + min <- min(x) + max <- max(x) + yLabels <- rownames(x) + xLabels <- colnames(x) + title <-c() + # check for additional function arguments + if( length(list(...)) ){ + Lst <- list(...) + if( !is.null(Lst$zlim) ){ + min <- Lst$zlim[1] + max <- Lst$zlim[2] + } + if( !is.null(Lst$yLabels) ){ + yLabels <- c(Lst$yLabels) + } + if( !is.null(Lst$xLabels) ){ + xLabels <- c(Lst$xLabels) + } + if( !is.null(Lst$title) ){ + title <- Lst$title + } + } +# check for null values +if( is.null(xLabels) ){ + xLabels <- c(1:ncol(x)) +} +if( is.null(yLabels) ){ + yLabels <- c(1:nrow(x)) +} + +layout(matrix(data=c(1,2), nrow=1, ncol=2), widths=c(4,1), heights=c(1,1)) + + # Red and green range from 0 to 1 while Blue ranges from 1 to 0 + ColorRamp <- rgb( seq(0,1,length=256), # Red + seq(0,1,length=256), # Green + seq(1,0,length=256)) # Blue + ColorLevels <- seq(min, max, length=length(ColorRamp)) + + # Reverse Y axis + reverse <- nrow(x) : 1 + yLabels <- yLabels[reverse] + x <- x[reverse,] + + # Data Map + par(mar = c(3,5,2.5,2)) + image(1:length(xLabels), 1:length(yLabels), t(x), col=ColorRamp, xlab="", + ylab="", axes=FALSE, zlim=c(min,max)) + if( !is.null(title) ){ + title(main=title) + } +axis(BELOW<-1, at=1:length(xLabels), labels=xLabels, cex.axis=0.7) + axis(LEFT <-2, at=1:length(yLabels), labels=yLabels, las= HORIZONTAL<-1, + cex.axis=0.7) + + # Color Scale + par(mar = c(3,2.5,2.5,2)) + image(1, ColorLevels, + matrix(data=ColorLevels, ncol=length(ColorLevels),nrow=1), + col=ColorRamp, + xlab="",ylab="", + xaxt="n") + + layout(1) +} + +################################################################################ +# Big data functions # +################################################################################ + +################################# split.array ################################# + +# function to split an array in smaller arrays, selecting one dimension of the array as the one used to split the array. +# The function only returns the intervals of each subarray, not the values of the subarrays. +# the subarrays are then used inside a for loop in the main script do do all the required analysis instead of applying them to the whole array. +# +# The array can be for instance an hindcast array with dimensions: c(n.members, n.yrs.hind*n.startdates, n.leadtimes, n.lat, n.lon) +# +# Example: +# +# hindcasts <- array(rnorm(4*100*4*256*512), c(4,100,4,256,512)) +# block <- split.array.old(hindcasts, 5) # split on longitude +# +# for(subArray in 1:block$n.sub){ # loop on each smaller subarray +# subLon=block$my.interv[[subArray]] # longitude interval corresponding to the subarray data +# nlon<-length(subLon) # length of the actual subarray +# +# hindcast.sub <- hindcasts[,,,,subLon] # subarray data +# # now insert below any calculation you need to do on the hindcast.sub array +# +# } # close for on subArray + +split.array.old <- function (array, along=tail(dim(array),1), max.n.el=10000000){ + array.dims <- dim(array) # i.e: [4,100,4,320,640] + n.sub <- prod(array.dims)/max.n.el # number of sub-arrays in which to split the hindcast and rean.data, i.e: 4*100*4*320*640/10000000 = 32.768 + n.sub <- ceiling(n.sub) # round n.sub to the nearest higher integer, i.e: 33 + n.split <- array.dims[along] # number of elements along the splitting dimension, i.e: 640 if along=5 + sub.size <- floor(n.split/n.sub) # number of elements in the splitted dimension (i.e: lon) of each subarray rounded to the lower integer, i.e: 640/33=19.39=19 + if(sub.size<=1) stop("Subarrays too small. Increase ten times the value of max.n.el") + n.sub <- n.split %/% sub.size # take only the integer part of the ratio. It is equal to floor(n.split/sub.size). I.e: 640/19=33.68=33 + add.last <- n.split %% n.sub # number of additional elements of the last subarray (if %% >0), i.e: 640 %% 33 = 13 + + my.interv<-list() + for(s in 1:n.sub){ + if(s==n.sub) {mod.last <- add.last} else {mod.last <- 0} + my.interv[[s]] <- (1 + sub.size*(s-1)):((sub.size*s) + mod.last) + } + + return(list(array.dims=array.dims, along=along, n.split=n.split, n.sub=n.sub, sub.size=sub.size, add.last=add.last, sub.size.last=sub.size + mod.last, my.interv=my.interv)) +} + +# Function to split an array into smaller arrays (chunks) +# +# in the argument 'dimension', you specify the dimensions of the array to split [i.e: c(4,100,4,256,512) for an hindcast array] +# in the argument 'along', you specify which dimension you want to use to split the array. Be default, it is the last dimension of the array. +# in the argument 'max.n.el', you set the size of a chunk. By default, split.array creates array of exactly 10'000'000 of elements. For array of type numeric (double), +# it is equivalent to 80 MB, because each double occupies 8 byte. +# in the argument 'chunks', you specify how many chunks do you want to be created exactly. It overrides the option 'max.n.el'. Useful if you already knows +# how many chunks do you need (more or less) and want to run one chunk in a different core at the same time, so you can't exceed the maximum number of cores available on your machine +# in the argument, 'smallest', you specify if you want to run the script with the maximum possible number of chunks (smallest=TRUE), +# so that the number of chunks is equal to the number of possible values of the along dimensions and the size of each chunk is exactly 1. + +split.array <- function (dimensions, along=tail(dimensions,1), max.n.el=10000000, chunks=NULL, smallest=FALSE){ + if(along <= 0 || along > length(dimensions)) stop("Choose a number for the 'along' argument inside the number of available dimensions!") + + array.dims <- dimensions # i.e: [4,100,4,320,640] + n.split <- array.dims[along] # number of elements along the splitting dimension, i.e: 640 if along=5 + + n.chunk <- prod(array.dims)/max.n.el # first estimate of the number of chunks (sub-arrays) into which the array will be split, i.e: 4*100*4*320*640/10000000 = 32.768 + n.chunk <- ceiling(n.chunk) # round n.chunk to the nearest higher integer, i.e: 33 + chunk.size <- floor(n.split/n.chunk) # number of elements in the splitted dimension (i.e: lon) of each chunk rounded to the lower integer, i.e: 640/33=19.39=19 + if(chunk.size <= 1) stop("Chunks too small. Try increasing 10 times the value of max.n.el") + + n.chunk <- n.split %/% chunk.size # take only the integer part of the ratio. It is equal to floor(n.split/chunk.size). I.e: 640/19=33.68=33 + if(!is.null(chunks)) {n.chunks <- chunks; chunk.size <- floor(n.split/n.chunk)} + + add.last <- n.split %% n.chunk # number of additional elements of the last chunk (if %% >0), i.e: 640 %% 33 = 13 + + if(smallest == TRUE){ # in this case, we want each chunk to be exactly 1 row or column wide: + n.chunk <- array.dims[along] + n.split <- n.chunk + chunk.size <- 1 + add.last <- 0 + } + + # list with the position of each chunk c inside the chosen dimension of the array: + int<-list() + for(c in 1:n.chunk){ + if(c == n.chunk) {mod.last <- add.last} else {mod.last <- 0} + int[[c]] <- (1 + chunk.size*(c-1)):((chunk.size*c) + mod.last) + } + + chunk.size.last <- chunk.size + mod.last # number of elements of the last chunk + + # list with the number of elements of each chunk c: + n.int<-list() + for(c in 1:n.chunk) n.int[[c]] <- length(int[[c]]) + + return(list(n.int=n.int, int=int, array.dims=array.dims, along=along, max.n.el=max.n.el, n.split=n.split, + n.chunk=n.chunk, chunk.size=chunk.size, add.last=add.last, chunk.size.last=chunk.size.last)) +} + + +# Example: +# +# hindcasts <- array(rnorm(4*100*4*256*512), c(4,100,4,256,512)) +# +# chunk <- split.array(dim(hindcasts), 5) # split on longitude +# +# for(subArray in 1:chunk$n.sub){ # loop on each smaller subarray +# subLon=chunk$my.interv[[subArray]] # longitude interval corresponding to the subarray data +# nlon<-length(subLon) # length of the actual subarray +# +# hindcast.sub <- hindcasts[,,,,subLon] # subarray data +# # now insert below any calculation you need to do on the hindcast.sub array +# +# } # close for on subArray + + +#################### veriApplyBig ############################################ + + +# A wrapper of veriApply() to be able to work even with hindcasts arrays that exceed the memory limits of the workstation +# and to efficently use the option 'parallel=TRUE' also for large data arrays with no memory limits. +# (only 2-5 GB of RAM are necessary, depending on the number of cpus used) +# +# It splits the input hindcast data and the input observed data in smaller arrays based on the longitude value, +# and then applies veriApply() to each sub-array, assembling the results in an array with the same format of the array returned by veriApply() +# A progress bar shows how many sub-arrays have already been processed. +# +# To take advantage of this function, the option parallel=TRUE is enabled by default, +# and the option 'ncpus' is set to 8, to work with BSC workstations with 4 physical cores (8 with hyperthreading). +# Users in possess of IC3 Intel Xeon workstations with 8 physical cores should set ncpus = 16 to take advantage of hyperthreading. +# With ncpus=8, calculations are 4 times faster, while with ncpus=16, calculations are 8 times faster. +# +# On Moore, you can set ncpus=8, and on Amdahl, ncpus can be set to a maximum of 12; however, it is not raccomendedd to run this function at full power on Moore or Amdahl , +# since it'd consume all the cores avaiable (8 for Moore and 12 for Amhdal), forbidding other users to employ the cluster for their single-core calculations. +# You can run it during the week-end if there are no other jobs scheduled, or set a lower number of ncpus (4-5) to leave resources for other users, even if performance'll suffer +# +# Parallel computation employs all CPU recurses of the machine: it will go slower until the computation is finished +# If you want to use the machine also for other tasks, you can set a lower number of ncpus (2 or 3), with a loss of performance. +# +# If you get a memory problem error, try decreasing the size of the variable 'max.n.el'. +# +# Example of use: +# +# library(ff) +# bigfile <- "/scratch/Earth/ncortesi/bigfile" # choose a site where to store the big data array +# source('/scratch/Earth/ncortesi/RESILIENCE/veriApplyBig.R') # load the veryApplyBig() and the save.big(9 functions +# +# # create a random hindcast that normally wouldn't fit into memory: +# anom.hind.dim<-c(51,30,1,256,512) +# anom.hind<-array(rnorm(prod(anom.hind.dim)),anom.hind.dim) +# +# save.big(array=anom.hind, path=bigfile) +# +# # create random observed hindcast that fit into memory (because observations only have 1 member): +# anom.rean<-array(rnorm(prod(anom.hind.dim[-1])), anom.hind.dim[-1]) +# +# ffload(file=bigfile) +# # str(ff.array) +# open(ff.array) +# +# my.score <- veriApplyBig("FairRpss",fcst=ff.array, obs=anom.rean, tdim=2, ensdim=1 , prob=c(1/3, 1/3)) # by default parallel=TRUE and with ncpus=8 to run it on our pc +# +# close(ff.array) + +veriApplyBig <- function (verifun, fcst, obs, fcst.ref = NULL, tdim = length(dim(fcst)) - + 1, ensdim = length(dim(fcst)), prob = NULL, threshold = NULL, + na.rm = FALSE, parallel = TRUE, ncpus = 8, max.n.el=5000000, path=NULL, ...) +{ + + sub <- prod(dim(fcst))/max.n.el + sub <- ceiling(sub) # number of sub-arrays in which to split the hindcast and rean.data + n.lon <- tail(dim(fcst),1) # number of longitude elements + sub.size <- floor(n.lon/sub) # number of elements in the last dimension (lon) of each subarray + if(sub.size<=1) stop("Subarrays too small. Increase ten times the value of max.n.el") + last.sub.size<-n.lon %% sub # number of additional elements of the last subarray + if(last.sub.size>0) sub<-sub+1 + + my.SkillScore<-array(NA,tail(dim(fcst),3)) # take only the leadtime, lat and lon dimensions + + cat('Subarray n. ') + + for(s in 1:sub){ + cat(paste0(s,'/',sub,' ')) + + if(s==sub && last.sub.size>0) {last<-sub.size-last.sub.size} else {last<-0} # because the last subarray is shorter than the others, if last.sub.size>0 + my.interv <- (1+sub.size*(s-1)):((sub.size*s)-last) # longitude interval where to load data + + #anom.hindcast.sub <- array(NA, c(head(dim(fcst),4), sub.size-last)) + #anom.rean.sub <- array(NA, c(dim(fcst)[2:4], sub.size-last)) + + anom.hindcast.sub <- fcst[,,,,my.interv] + anom.rean.sub <- obs[,,,my.interv] + + my.SkillScore.sub <- veriApply(verifun, fcst=anom.hindcast.sub, obs=anom.rean.sub, tdim=tdim, ensdim=ensdim, prob=prob, threshold=threshold, na.rm=na.rm, parallel=parallel, ncpus=ncpus)[[1]] + + my.SkillScore[,,my.interv]<-my.SkillScore.sub + + gc() + } + + cat('\n') + return(my.SkillScore) + +} + + + +old_veriApplyBig <- function (verifun, fcst, obs, fcst.ref = NULL, tdim = length(dim(fcst)) - + 1, ensdim = length(dim(fcst)), prob = NULL, threshold = NULL, + na.rm = FALSE, parallel = TRUE, ncpus = 8, max.n.el=10000000, path=NULL, ...) +{ + + ffload(file=fcst) + #str(ff.array) + open(ff.array) + + sub <- prod(dim(ff.array))/max.n.el + sub <- ceiling(sub) # number of sub-arrays in which to split the hindcast and rean.data + n.lon <- tail(dim(ff.array),1) # number of longitude elements + sub.size <- floor(n.lon/sub) # number of elements in the last dimension (lon) of each subarray + if(sub.size<=1) stop("Subarrays too small. Increase ten times the value of max.n.el") + last.sub.size<-n.lon %% sub # number of additional elements of the last subarray + if(last.sub.size>0) sub<-sub+1 + + my.SkillScore<-array(NA,tail(dim(ff.array),3)) # take only the leadtime, lat and lon dimensions + + cat('Subarray n. ') + + for(s in 1:sub){ + cat(paste0(s,'/',sub,' ')) + + if(s==sub && last.sub.size>0) {last<-sub.size-last.sub.size} else {last<-0} # because the last subarray is shorter than the others, if last.sub.size>0 + my.interv <- (1+sub.size*(s-1)):((sub.size*s)-last) # longitude interval where to load data + + #anom.hindcast.sub <- array(NA, c(head(dim(ff.array),4), sub.size-last)) + #anom.rean.sub <- array(NA, c(dim(ff.array)[2:4], sub.size-last)) + + anom.hindcast.sub <- ff.array[,,,,my.interv] + anom.rean.sub <- obs[,,,my.interv] + + my.SkillScore.sub <- veriApply(verifun, fcst=anom.hindcast.sub, obs=anom.rean.sub, tdim=tdim, ensdim=ensdim, prob=prob, threshold=threshold, na.rm=na.rm, parallel=parallel, ncpus=ncpus)[[1]] + + my.SkillScore[,,my.interv]<-my.SkillScore.sub + + gc() + } + + close(ff.array) + + cat('\n') + return(my.SkillScore) + +} + + +################################################################################ +# save.big # + + +# A wrapper of ffsave to save on disk big arrays of a numeric (double) variable +# in format .ffData (by means of the ff package). See veriApplyBig() for an example. + +save.big <- function(array, path) { + ff.array <- as.ff(array, vmode="double", file = path) + ffsave(ff.array, file= path) + close(ff.array); rm(ff.array) +} + + + +################################################################################ +# veriApplyPar # + + +# A wrapper of veriApply() to efficently use the option 'parallel=TRUE' also for large data arrays with no memory limits, +# but the input hindcast array must fit into the memory to use this function. +# +# the function splits the input hindcast data and the input observed data in smaller arrays based on the longitude value, +# and then applies veriApply() to each sub-array, assembling the results in an array with the same format of the array returned by veriApply() +# A progress bar shows how many sub-arrays have already been processed. +# +# To take advantage of this function, the option parallel=TRUE is enabled by default, +# and the option 'ncpus' is set to 8, to work with BSC workstations with 4 physical cores (8 with hyperthreading). +# Users in possess of IC3 Intel Xeon workstations with 8 physical cores should set ncpus = 16 to take advantage of hyperthreading. +# With ncpus=8, calculations are 4 times faster, while with ncpus=16, calculations are 8 times faster. +# +# On Moore, you can set ncpus=8, and on Amdahl, ncpus can be set to a maximum of 12; however, it is not raccomendedd to run this function at full power on Moore or Amdahl , +# since it'd consume all the cores avaiable (8 for Moore and 12 for Amhdal), forbidding other users to employ the cluster for their single-core calculations. +# You can run it during the week-end if there are no other jobs scheduled, or set a lower number of ncpus (4-5) to leave resources for other users, even if performance'll suffer +# +# Parallel computation employs all CPU recurses of the machine: it is not possible to use it for other tasks until the computation is finished. +# If you want to use the machine also for other tasks, you can set a lower number of ncpus (2 or 3), with a loss of performance. +# +# If you get a memory problem error, try decreasing the size of the variable 'max.n.el'. +# +# example of use: +# +# my.score <- veriApplyBig("FairRpss",fcst=anom.hindcast, obs=anom.rean, tdim=2, ensdim=1 , prob=c(1/3, 1/3)) # by default parallel=TRUE and with ncpus=8 to run it on our pc +# +# my.score <- veriApplyBig("FairCrpss",fcst=anom.hindcast, obs=anom.rean, tdim=2, ensdim=1 , ncpus=2) # to set a lower number of cpus +# + +veriApplyPar <- function (verifun, fcst, obs, fcst.ref = NULL, tdim = length(dim(fcst)) - + 1, ensdim = length(dim(fcst)), prob = NULL, threshold = NULL, + na.rm = FALSE, parallel = TRUE, ncpus = 8, max.n.el=10000000, ...) +{ + sub <- prod(dim(fcst))/max.n.el + sub <- ceiling(sub) # number of sub-arrays in which to split the hindcast and rean.data + n.lon <- tail(dim(fcst),1) # number of longitude elements + sub.size <- floor(n.lon/sub) # number of elements in the last dimension (lon) of each subarray + if(sub.size<=1) stop("Subarrays too small. Increase ten times the value of max.n.el") + last.sub.size<-n.lon %% sub # number of additional elements of the last subarray + if(last.sub.size>0) sub<-sub+1 + + my.SkillScore<-array(NA,tail(dim(fcst),3)) # take only the leadtime, lat and lon dimensions + + cat('Subarray n. ') + + for(s in 1:sub){ + cat(paste0(s,'/',sub,' ')) + + if(s==sub && last.sub.size>0) {last<-sub.size-last.sub.size} else {last<-0} # because the last subarray is shorter than the others, if last.sub.size>0 + my.interv <- (1+sub.size*(s-1)):((sub.size*s)-last) # longitude interval where to load data + + anom.hindcast.sub <- array(NA, c(head(dim(fcst),4), sub.size-last)) + anom.rean.sub <- array(NA, c(dim(fcst)[2:4], sub.size-last)) + + anom.hindcast.sub <- fcst[,,,,my.interv] + anom.rean.sub <- obs[,,,my.interv] + + my.SkillScore.sub <- veriApply(verifun, fcst=anom.hindcast.sub, obs=anom.rean.sub, tdim=tdim, ensdim=ensdim, prob=prob, threshold=threshold, na.rm=na.rm, parallel=parallel, ncpus=ncpus)[[1]] + + my.SkillScore[,,my.interv]<-my.SkillScore.sub + + gc() + } + + cat('\n') + return(my.SkillScore) + +} + + +####################################################################################### +# parApplyCal # + + +# like parApply, but with a check to authorize the parallel computation or not. +# As parApply, it must be applied only to small arrays to be able to fit in memory. +# If your array is too big, consider the possibility to split it in smaller arrays with +# the function split.array. + +parApplyCal <- function(cl = NULL, X, MARGIN, FUN, ncpus=4, ... ) { + + .cl <- try(parallel::makeCluster(ncpus, type = "FORK"), silent = TRUE) + + if (!"try-error" %in% class(.cl)) hasparallel <- TRUE + + if (hasparallel) { + on.exit(parallel::stopCluster(.cl)) + + output <- parallel::parApply(cl = .cl, X = X, MARGIN = MARGIN, FUN = FUN, nmemb=nmemb, nsdates=nsdates) + + } else { + output <- apply(X = X, MARGIN = MARGIN, FUN = FUN, nmemb=nmemb, nsdates=nsdates) + } + + return(output) + +} + +####################################################################################### +# old_parApplyBig # + + +# parallel::parApply() needs too much memory when the input array is too big (>400-500 MB on 8GB machines) + +# splitdir is the dimension that will be split internally; it must be one of the dimensions used also by MARGIN 8see examples below) + +old_parApplyBig <- function(cl = NULL, X, MARGIN, FUN, splitdim = tail(dim(X),1), max.n.el=10000000, ... ) { + + sub <- prod(dim(X))/max.n.el + sub <- ceiling(sub) # number of sub-arrays in which to split the hindcast and rean.data + n.el <- dim(X)[splitdim] # number of elements in the splitdir dimension + sub.size <- floor(n.el/sub) # number of elements in the splitting dimension of each subarray + if(sub.size<=1) stop("Subarrays too small. Increase ten times the value of max.n.el") + last.sub.size<-n.el %% sub # number of additional elements of the last subarray + if(last.sub.size>0) sub<-sub+1 + n.dim.X <- length(dim(X)) # number of dimensions of array X + + # swap the splitdim dimension with the last one: + my.seq <- 1:n.dim.X + my.seq[n.dim.X] <- splitdim + my.seq[splitdim] <- n.dim.X + if(splitdim < n.dim.X) X <- aperm(X, my.seq) + if(splitdim < n.dim.X && length(which(MARGIN==n.dim.X))==0) MARGIN[splitdim] <- n.dim.X + + output<-array(NA,dim(X)[MARGIN]) # take only the dimensions used by MARGIN + + print('Subarray n. ') + + for(s in 1:sub){ + cat(paste0(s,'/',sub,' ')) + + if(s==sub && last.sub.size>0) {last<-sub.size-last.sub.size} else {last<-0} # because the last subarray is shorter than the others, if last.sub.size>0 + my.interv <- (1+sub.size*(s-1)):((sub.size*s)-last) # longitude interval where to load data + + if(n.dim.X == 2) subarray <- X[,my.interv] + if(n.dim.X == 3) subarray <- X[,,my.interv] + if(n.dim.X == 4) subarray <- X[,,,my.interv] + if(n.dim.X == 5) subarray <- X[,,,,my.interv] + if(n.dim.X == 6) subarray <- X[,,,,,my.interv] + if(n.dim.X == 7) subarray <- X[,,,,,,my.interv] + if(n.dim.X == 8) subarray <- X[,,,,,,,my.interv] + if(n.dim.X == 9) subarray <- X[,,,,,,,,my.interv] + if(n.dim.X == 10) subarray <- X[,,,,,,,,,my.interv] + if(n.dim.X > 10) stop("input array has too many dimensions") + + .cl <- try(parallel::makeCluster(ncpus, type = "FORK"), silent = TRUE) + + if (!"try-error" %in% class(.cl)) hasparallel <- TRUE + + if (hasparallel) { + on.exit(parallel::stopCluster(.cl)) + suboutput <- parallel::parApply(cl = .cl, X = X, MARGIN = MARGIN, FUN = FUN) + } + else { + suboutput <- apply(X = X, MARGIN = MARGIN, FUN = FUN, ...) + } + + if(length(MARGIN) == 2) output[,my.interv] <- suboutput + if(length(MARGIN) == 3) output[,,my.interv] <- suboutput + if(length(MARGIN) == 4) output[,,,my.interv] <- suboutput + if(length(MARGIN) == 5) output[,,,,my.interv] <- suboutput + if(length(MARGIN) == 6) output[,,,,,my.interv] <- suboutput + + if(splitdim < n.dim.X) X <- aperm(X, my.seq) ## sistema!!! + + gc() + } + + cat('\n') + return(output) + +} + + + +################################################################################################# +# Wine indexes # +################################################################################################# + +# 1. Annual mean temperature +# temp must have the format [month,lat,lon] or [month,lon,lat], and num.months must be a multiple of 12 +# and can be smaller than the number of months in temp: it defines the number of months we want to use to calculate the index, starting from the first month in the temp array. +# It must also be a multiple of 12 to reflect the yearly data; i.e: num.months=24 considers the first 24 months of the temp array to do the average. +index1<-function(temp,num.months){ + my.temp<-temp[1:num.months,,] # select only the months actually used + if(!is.array(my.temp)) my.temp<-array(my.temp,c(length(my.temp),1,1)) # if temp is a dumb array we must convert it back to an array before applying apply + my.index<-apply(my.temp,c(2,3),mean) # calculate the mean temperature + return(my.index) +} + +#2. Mean temperature for growing season, for the WHOLE period +# (promedio de las temperaturas medias mensuales para el periodo de octubre a abril) +# you must provide at least 24 months of data starting from January to compute the index +# because the first 4 months and the last 3 cannot be used for calculation. +# if you want to calculate the yearly value for year XXXX, just introduce its 12 months more the 12 months of the following year +# temp must have the format [month,lat,lon] or [month,lon,lat], num.months must be a multiple of 12 +index2<-function(temp,num.months){ + my.seq1<-rep(12*seq(0,num.months/12-1),each=7) + my.seq2<-rep(c(1:4,10:12),num.months/12) + my.months<-my.seq1 + my.seq2 # all the months inside num.months from october to april + + my.months<-my.months[-(1:4)] # remove the first four months because they miss the oct:dec months + n.months<-length(my.months) + my.months<-my.months[-((n.months-2):n.months)] # remove the last three months because they miss the jan:apr months + + temp.oct.apr<-temp[my.months,,] # select only months from octuber to april + if(!is.array(temp.oct.apr)) temp.oct.apr<-array(temp.oct.apr,c(length(temp.oct.apr),1,1)) # if temp.oct.apr is a dumb array we must convert it back to an array + + my.index<-apply(temp.oct.apr,c(2,3),mean) # calculate the mean temperature + return(my.index) +} + +#3. Winkler index for the vegetative period +# temp data must start from january and num.months must be at least 24 months long and a multiple of 12 months +# temp must have the format [month,lat,lon] or [month,lon,lat] +index3<-function(temp,num.months){ + n.years=num.months/12 + + temp.minus.ten<-temp-10 # decrease each month of 10 degrees + ss<-which(temp.minus.ten<0,arr.ind=T) # select elements < 0 degrees + temp.minus.ten[ss]=0 # set to 0 elements lower than 0 degrees + + month31=c(1,3,5,7,8,10,12) # meses con 31 dias + my.months31<-rep(month31,n.years)+rep(0:(n.years-1)*12,each=length(month31)) # select only months with 31 days + temp.minus.ten[my.months31,,]<-temp.minus.ten[my.months31,,]*31 # multiply temperature of these months for 31 + + month30=c(4,6,9,11) # meses con 30 dias + my.months30<-rep(month30,n.years)+rep(0:(n.years-1)*12,each=length(month30)) # select only months with 30 days + temp.minus.ten[my.months30,,]<-temp.minus.ten[my.months30,,]*30 # multiply temperature of these months for 30 + + month28=2 # meses con 28 dias + my.months28<-rep(month28,n.years)+rep(0:(n.years-1)*12,each=length(month28)) # select only months with 28 days + temp.minus.ten[my.months28,,]<-temp.minus.ten[my.months28,,]*28.25 # multiply temperature of these months for 28.25 + + my.seq1<-rep(12*seq(0,num.months/12-1),each=7) + my.seq2<-rep(c(1:4,10:12),num.months/12) + my.months<-my.seq1 + my.seq2 # all the months inside num.months from october to april + + my.months<-my.months[-(1:4)] # remove the first four months because they are missing the oct:dec months + n.months<-length(my.months) + my.months<-my.months[-((n.months-2):n.months)] # remove the last three months because they miss the jan:apr months + + temp.oct.apr<-temp.minus.ten[my.months,,] # select only months from octuber to april + if(!is.array(temp.oct.apr)) temp.oct.apr<-array(temp.oct.apr,c(length(temp.oct.apr),1,1)) # if temp is a dumb array we must convert it back to an array before applying apply + my.index<-apply(temp.oct.apr,c(2,3),sum) # calculate the sum of the (aproximately) daily degrees above 10 degrees + return(my.index/(n.years-1)) # normalize for the number of years used taking into account that one vegetative period is always lost +} + +#4. Winter Severity Index (old version, as the absolute minimum of the mean temperature) +# temp must have the format [month,lat,lon] or [month,lon,lat], num.months must be a multiple of 12 +index4_old<-function(temp,num.months){ + my.temp<-temp[1:num.months,,] # select only the months actually used + if(!is.array(my.temp)) my.temp<-array(my.temp,c(length(my.temp),1,1)) # if temp is a dumb array we must convert it back to an array before applying apply + my.index<-apply(my.temp,c(2,3),min) + return(my.index) +} + + +#4. Winter Severity Index (temp media del mes mas frio) +# temp must have the format [month,lat,lon] or [month,lon,lat], num.months must be a multiple of 12 +index4<-function(temp,num.months){ + n.years=num.months/12 + + month.coldest=7 # mese mas frio en el emisferio austral + my.months.coldest<-rep(month.coldest,n.years)+rep(0:(n.years-1)*12,each=1) # select only months with 28 days + my.temp<-temp[1:num.months,,] # select only the months in the chosen period + + if(!is.array(my.temp)) my.temp<-array(my.temp,c(length(my.temp),1,1)) # if temp is a dumb array we must convert it back to an array before applying apply + my.temp<-my.temp[my.months.coldest,,] # select only the coldest months + if(is.null(dim(my.temp))) {my.index<-my.temp} else {my.index<-apply(my.temp,c(2,3),mean)} + return(my.index) +} + + + +#5. Precipitacion annual +# prec must have the format [month,lat,lon] or [month,lon,lat], num.months must be a multiple of 12 +index5<-function(prec,num.months){ + my.prec<-prec[1:num.months,,] # select only the months actually used + if(!is.array(my.prec)) my.prec<-array(my.prec,c(length(my.prec),1,1)) # if prec is a dumb array we must convert it back to an array before applying apply + my.index<-apply(my.prec,c(2,3),sum) + n.years=num.months/12 + return(my.index/n.years) # normaliza por el numero de años para devolver el valor promedio anual del indice +} + +#6. Precipitacion durante el ciclo vegetativo (octubre a abril) +# prec must have the format [month,lat,lon] or [month,lon,lat], num.months must be a multiple of 12 and at least with 24 months +index6<-function(prec,num.months){ + my.seq1<-rep(12*seq(0,num.months/12-1),each=7) + my.seq2<-rep(c(1:4,10:12),num.months/12) + my.months<-my.seq1 + my.seq2 # all the months inside num.months from october to april + + my.months<-my.months[-(1:4)] # remove the first four months because they belongs to the previous year + n.months<-length(my.months) + my.months<-my.months[-((n.months-2):n.months)] # remove the last three months of the last year + + prec.oct.apr<-prec[my.months,,] # select only months from octuber to april + if(!is.array(prec.oct.apr)) prec.oct.apr<-array(prec.oct.apr,c(length(prec.oct.apr),1,1)) # if prec is a dumb array we must convert it back to an array before applying apply + my.index<-apply(prec.oct.apr,c(2,3),sum) # calculate the total precipitation + n.years=num.months/12 + + return(my.index/(n.years-1)) # normaliza por el numero de periodos vegetativos introducidos para devolver el valor promedio anual del indice +} + +# function to select one of the above six indices: +# temp and prec must have the format [month,lat,lon] or [month,lon,lat], num.months must be a multiple of 12 +choose.index<-function(num.index,temp,prec,num.months){ + if(num.index==1) return(index1(temp,num.months)) + if(num.index==2) return(index2(temp,num.months)) + if(num.index==3) return(index3(temp,num.months)) + if(num.index==4) return(index4(temp,num.months)) + if(num.index==5) return(index5(prec,num.months)) + if(num.index==6) return(index6(prec,num.months)) +} + +# same function as above but returns all six indices: +indices<-function(temp,prec,num.months){ + return(list(TempMediaAnual=index1(temp,num.months), + TempMediaVeget=index2(temp,num.months), + IndiceWinklerVeg=index3(temp,num.months), + WinterSeverityIndex=index4(temp,num.months), + PrecAnual=index5(prec,num.months), + PrecVeget=index6(prec,num.months))) +} + + +################################################################################################# +# Error indexes # +################################################################################################# + + +RMSE<-function(obs,pred){ # semplice funzione per calcolare l'errore quadratico medio dato il vettore con la grandezza osservata e quello con la previsione (possono esserci anche NA nel vettore obs) + # i pred hanno sempre tutti i valori predetti annuali, mentre potrebbero esserci anni senza valori osservati, dunque devo togliere solo le file corrispondenti agli anni con NA tra i valori obs: + years.right<-which(is.na(obs)==FALSE) + obs2<-obs[years.right] + pred2<-pred[years.right] + sum.scarti.quad<-sum((obs2-pred2)^2,na.rm=TRUE) + RMSE<-(sum.scarti.quad/(length(years.right)))^0.5 # devi togliere dal denominatore gli anni con NA!!! + return(RMSE) +} + +MAE<-function(obs,pred){ # semplice funzione per calcolare il mean Absolute Error dato il vettore con la grandezza osservata e quello con la previsione (possono esserci anche NA nel vettore obs) + # i pred hanno sempre tutti i valori predetti annuali, mentre potrebbero esserci anni senza valori osservati, dunque devo togliere solo le file corrispondenti agli anni con NA tra i valori obs: + years.right<-which(is.na(obs)==FALSE) + obs2<-obs[years.right] + pred2<-pred[years.right] + MAE<-sum(abs(obs2-pred2))/(length(years.right)) # devi togliere dal denominatore gli anni con NA!!! + return(MAE) +} + +MAEp<-function(obs,pred){ # semplice funzione per calcolare il Mean Absolute Error in percentuale (%) dato il vettore con la grandezza osservata e quello con la previsione (possono esserci anche NA nel vettore obs) + # i pred hanno sempre tutti i valori predetti annuali, mentre potrebbero esserci anni senza valori osservati, dunque devo togliere solo le file corrispondenti agli anni con NA tra i valori obs: + years.right<-which(is.na(obs)==FALSE) + obs2<-obs[years.right] + pred2<-pred[years.right] + MAE<-sum(abs(obs2-pred2))/(length(years.right)) # devi togliere dal denominatore gli anni con NA!!! + obs.prom<-sum(obs2)/length(years.right) + MAE<-MAE/obs.prom + return(MAE) +} + +MBE<-function(obs,pred){ # semplice funzione per calcolare l'errore medio (Mean Bias Error) (ci possono essere anche elementi con NA nel vettore obs) + # i pred hanno sempre tutti i valori predetti annuali, mentre potrebbero esserci anni senza valori osservati, dunque devo togliere le file corrispondenti agli anni con NA tra i valori obs: + years.right<-which(is.na(obs)==FALSE) + obs2<-obs[years.right] + pred2<-pred[years.right] + MBE<-sum(pred2-obs2)/(length(years.right)) + return(MBE) +} + +AGREE<-function(obs,pred){ # per calcolare la d di Willmott o Index of agreement OCCHIO che e' insensibile a sovra/sottostime quasi come l'R2 + years.right<-which(is.na(obs)==FALSE) + obs2<-obs[years.right] + pred2<-pred[years.right] + sum.scarti.quad<-sum((obs2-pred2)^2,na.rm=TRUE) + obs2.mean<-mean(obs2) + d<-1-(sum.scarti.quad/(sum((abs(pred2-obs2.mean)+abs(obs2-obs2.mean))^2))) + return(d) +} + +AGREE.1<-function(obs,pred){ # d di Willmott corretto senza i quadrati + years.right<-which(is.na(obs)==FALSE) + obs2<-obs[years.right] + pred2<-pred[years.right] + sum.scarti<-sum(abs(obs2-pred2),na.rm=TRUE) + obs2.mean<-mean(obs2) + d1<-1-(sum.scarti/(sum(abs(pred2-obs2.mean)+abs(obs2-obs2.mean)))) + return(d1) +} + +AGREE.2011<-function(obs,pred){ # nuova d di Willmott introdotta da lui nel 2011 + years.right<-which(is.na(obs)==FALSE) + obs2<-obs[years.right] + pred2<-pred[years.right] + sum.scarti<-sum(abs(obs2-pred2),na.rm=TRUE) + obs2.mean<-mean(obs2) + denom<-2*sum(abs(obs2-obs2.mean)) # denominatore della formula di d(r) + if(sum.scarti<=denom){d.2011<-1-(sum.scarti/denom)}else{d.2011<-(denom/sum.scarti)-1} + return(d.2011) +} + +RMSE.freedom<-function(obs,pred,degree.freedom){ # funzione per calcolare l'errore quadratico medio dato il vettore con la grandezza osservata e quello con la previsione (possono esserci anche NA nel vettore obs) + # i pred hanno sempre tutti i valori predetti annuali, mentre potrebbero esserci anni senza valori osservati, dunque devo togliere le file corrispondenti agli anni con NA tra i valori obs: + years.right<-which(is.na(obs)==FALSE) + obs2<-obs[years.right] + pred2<-pred[years.right] + sum.scarti.quad<-sum((obs2-pred2)^2,na.rm=TRUE) + RMSE<-(sum.scarti.quad/degree.freedom)^0.5 + return(RMSE) +} + +SumSquared<-function(obs,pred){ # semplice funzione per calcolare la somma dei quadrati degli scarti dato il vettore con la grandezza osservata e quello con la previsione (possono esserci anche NA nel vettore obs) + # i pred hanno sempre tutti i valori predetti annuali, mentre potrebbero esserci anni senza valori osservati, dunque devo togliere le file corrispondenti agli anni con NA tra i valori obs: + years.right<-which(is.na(obs)==FALSE) + obs2<-obs[years.right] + pred2<-pred[years.right] + sum.scarti.quad<-sum((obs2-pred2)^2,na.rm=TRUE) + return(sum.scarti.quad) +} + + + + + +################################################################################################# +# Others # +################################################################################################# + + + +myboxplot.stats <- function (x, coef = NULL, do.conf = TRUE, do.out =TRUE) +{ + nna <- !is.na(x) + n <- sum(nna) + stats <- quantile(x, c(.05,.25,.5,.75,.95), na.rm = TRUE) + iqr <- diff(stats[c(2, 4)]) + out <- x < stats[1] | x > stats[5] + conf <- if (do.conf) stats[3] + c(-1.58, 1.58) * diff(stats[c(2, 4)])/sqrt(n) + list(stats = stats, n = n, conf = conf, out = x[out & nna]) +} + +Load2 <- function (var, exp = NULL, obs = NULL, sdates, nmember = NULL, + nmemberobs = NULL, nleadtime = NULL, leadtimemin = 1, leadtimemax = NULL, + storefreq = "monthly", sampleperiod = 1, lonmin = 0, lonmax = 360, + latmin = -90, latmax = 90, output = "areave", method = "conservative", + grid = NULL, maskmod = vector("list", 15), maskobs = vector("list", + 15), configfile = NULL, varmin = NULL, varmax = NULL, + silent = FALSE, nprocs = NULL, dimnames = NULL, remapcells = 2) +{ + parameter_names <- ls() + if (length(parameter_names) < 3 || is.null(var) || is.null(sdates) || + (is.null(exp) && is.null(obs))) { + stop("Error: At least 'var', 'exp'/'obs' and 'sdates' must be provided.") + } + load_parameters <- lapply(parameter_names, get, envir = environment()) + names(load_parameters) <- parameter_names + parameters_to_show <- c("var", "exp", "obs", "sdates", "grid", + "output", "storefreq") + load_parameters <- c(load_parameters[parameters_to_show], + load_parameters[-match(parameters_to_show, names(load_parameters))]) + cat(paste("* The load call you issued is:\n* Load(", paste(strwrap(paste(unlist(lapply(names(load_parameters[1:length(parameters_to_show)]), + function(x) paste(x, "=", if (x == "sdates" && length(load_parameters[[x]]) > + 4) { + paste0("c('", load_parameters[[x]][1], "', '", load_parameters[[x]][2], + "', ..., '", tail(load_parameters[[x]], 1), "')") + } + else { + paste(deparse(load_parameters[[x]]), collapse = "") + }))), collapse = ", "), width = getOption("width") - + 9, indent = 0, exdent = 8), collapse = "\n*"), ", ...)\n* See the full call in '$load_parameters' after Load() finishes.\n", + sep = "")) + errors <- try({ + if (is.null(var) || !(is.character(var) && nchar(var) > + 0)) { + stop("Error: parameter 'var' should be a character string of length >= 1.") + } + exps_to_fetch <- c() + exp_info_names <- c("name", "path", "nc_var_name", "suffix", + "var_min", "var_max", "dimnames") + if (!is.null(exp) && !(is.character(exp) && all(nchar(exp) > + 0)) && !is.list(exp)) { + stop("Error: parameter 'exp' should be a vector of strings or a list with information of the experimental datasets to load. Check 'exp' in ?Load for details.") + } + else if (!is.null(exp)) { + if (!is.list(exp)) { + exp <- lapply(exp, function(x) list(name = x)) + } + for (i in 1:length(exp)) { + if (!is.list(exp[[i]])) { + stop("Error: parameter 'exp' is incorrect. It should be a list of lists.") + } + if (!(all(names(exp[[i]]) %in% exp_info_names))) { + stop("Error: parameter 'exp' is incorrect. There are unrecognized components in the information of some of the experiments. Check 'exp' in ?Load for details.") + } + if (!("name" %in% names(exp[[i]]))) { + exp[[i]][["name"]] <- paste0("exp", i) + if (!("path" %in% names(exp[[i]]))) { + stop("Error: parameter 'exp' is incorrect. A 'path' should be provided for each experimental dataset if no 'name' is provided. See 'exp' in ?Load for details.") + } + } + else if (!("path" %in% names(exp[[i]]))) { + exps_to_fetch <- c(exps_to_fetch, i) + } + if ("path" %in% names(exp[[i]])) { + if (!("nc_var_name" %in% names(exp[[i]]))) { + exp[[i]][["nc_var_name"]] <- "$VAR_NAME$" + } + if (!("suffix" %in% names(exp[[i]]))) { + exp[[i]][["suffix"]] <- "" + } + if (!("var_min" %in% names(exp[[i]]))) { + exp[[i]][["var_min"]] <- "" + } + if (!("var_max" %in% names(exp[[i]]))) { + exp[[i]][["var_max"]] <- "" + } + } + } + if ((length(exps_to_fetch) > 0) && (length(exps_to_fetch) < + length(exp))) { + cat("! Warning: 'path' was provided for some experimental datasets in 'exp'. Any \n* information in the configuration file related to these will be ignored.\n") + } + } + obs_to_fetch <- c() + obs_info_names <- c("name", "path", "nc_var_name", "suffix", + "var_min", "var_max") + if (!is.null(obs) && !(is.character(obs) && all(nchar(obs) > + 0)) && !is.list(obs)) { + stop("Error: parameter 'obs' should be a vector of strings or a list with information of the observational datasets to load. Check 'obs' in ?Load for details.") + } + else if (!is.null(obs)) { + if (!is.list(obs)) { + obs <- lapply(obs, function(x) list(name = x)) + } + for (i in 1:length(obs)) { + if (!is.list(obs[[i]])) { + stop("Error: parameter 'obs' is incorrect. It should be a list of lists.") + } + if (!(all(names(obs[[i]]) %in% obs_info_names))) { + stop("Error: parameter 'obs' is incorrect. There are unrecognized components in the information of some of the observations. Check 'obs' in ?Load for details.") + } + if (!("name" %in% names(obs[[i]]))) { + obs[[i]][["name"]] <- paste0("obs", i) + if (!("path" %in% names(obs[[i]]))) { + stop("Error: parameter 'obs' is incorrect. A 'path' should be provided for each observational dataset if no 'name' is provided. See 'obs' in ?Load for details.") + } + } + else if (!("path" %in% names(obs[[i]]))) { + obs_to_fetch <- c(obs_to_fetch, i) + } + if ("path" %in% names(obs[[i]])) { + if (!("nc_var_name" %in% names(obs[[i]]))) { + obs[[i]][["nc_var_name"]] <- "$VAR_NAME$" + } + if (!("suffix" %in% names(obs[[i]]))) { + obs[[i]][["suffix"]] <- "" + } + if (!("var_min" %in% names(obs[[i]]))) { + obs[[i]][["var_min"]] <- "" + } + if (!("var_max" %in% names(obs[[i]]))) { + obs[[i]][["var_max"]] <- "" + } + } + } + if (length(c(obs_to_fetch, exps_to_fetch) > 1) && + (length(obs_to_fetch) < length(obs))) { + cat("! Warning: 'path' was provided for some observational datasets in 'obs'. Any \n* information in the configuration file related to these will be ignored.\n") + } + } + if (is.null(sdates)) { + stop("Error: parameter 'sdates' must be provided.") + } + if (!is.character(sdates) || !all(nchar(sdates) == 8) || + any(is.na(strtoi(sdates)))) { + stop("Error: parameter 'sdates' is incorrect. All starting dates should be a character string in the format 'YYYYMMDD'.") + } + if (!is.null(nmember) && !is.null(exp)) { + if (!is.numeric(nmember)) { + stop("Error: parameter 'nmember' is incorrect. It should be numeric.") + } + if (length(nmember) == 1) { + cat(paste("! Warning: 'nmember' should specify the number of members of each experimental dataset. Forcing to", + nmember, "for all experiments.\n")) + nmember <- rep(nmember, length(exp)) + } + if (length(nmember) != length(exp)) { + stop("Error: 'nmember' must contain as many values as 'exp'.") + } + else if (any(is.na(nmember))) { + nmember[which(is.na(nmember))] <- max(nmember, + na.rm = TRUE) + } + } + if (!is.null(nmemberobs) && !is.null(obs)) { + if (!is.numeric(nmemberobs)) { + stop("Error: parameter 'nmemberobs' is incorrect. It should be numeric.") + } + if (length(nmemberobs) == 1) { + cat(paste("! Warning: 'nmemberobs' should specify the number of members of each observational dataset. Forcing to", + nmemberobs, "for all observations.\n")) + nmemberobs <- rep(nmemberobs, length(obs)) + } + if (length(nmemberobs) != length(obs)) { + stop("Error: 'nmemberobs' must contain as many values as 'obs'.") + } + else if (any(is.na(nmemberobs))) { + nmemberobs[which(is.na(nmemberobs))] <- max(nmemberobs, + na.rm = TRUE) + } + } + if (!is.null(nleadtime) && !is.numeric(nleadtime)) { + stop("Error: parameter 'nleadtime' is wrong. It should be numeric.") + } + if (is.null(leadtimemin) || !is.numeric(leadtimemin)) { + stop("Error: parameter 'leadtimemin' is wrong. It should be numeric.") + } + if (!is.null(leadtimemax) && !is.numeric(leadtimemax)) { + stop("Error: parameter 'leadtimemax' is wrong. It should be numeric.") + } + if (!is.character(storefreq) || !(storefreq %in% c("monthly", + "daily"))) { + stop("Error: parameter 'storefreq' is wrong, can take value 'daily' or 'monthly'.") + } + if (is.null(sampleperiod) || !is.numeric(sampleperiod)) { + stop("Error: parameter 'sampleperiod' is wrong. It should be numeric.") + } + if (is.null(lonmin) || !is.numeric(lonmin)) { + stop("Error: parameter 'lonmin' is wrong. It should be numeric.") + } + if (lonmin < -360 || lonmin > 360) { + stop("Error: parameter 'lonmin' must be in the range [-360, 360]") + } + if (lonmin < 0) { + lonmin <- lonmin + 360 + } + if (is.null(lonmax) || !is.numeric(lonmax)) { + stop("Error: parameter 'lonmax' is wrong. It should be numeric.") + } + if (lonmax < -360 || lonmax > 360) { + stop("Error: parameter 'lonmax' must be in the range [-360, 360]") + } + if (lonmax < 0) { + lonmax <- lonmax + 360 + } + if (is.null(latmin) || !is.numeric(latmin)) { + stop("Error: parameter 'latmin' is wrong. It should be numeric.") + } + if (latmin > 90 || latmin < -90) { + stop("Error: 'latmin' must be in the interval [-90, 90].") + } + if (is.null(latmax) || !is.numeric(latmax)) { + stop("Error: parameter 'latmax' is wrong. It should be numeric.") + } + if (latmax > 90 || latmax < -90) { + stop("Error: 'latmax' must be in the interval [-90, 90].") + } + if (is.null(output) || !(output %in% c("lonlat", "lon", + "lat", "areave"))) { + stop("Error: 'output' can only take values 'lonlat', 'lon', 'lat' or 'areave'.") + } + if (is.null(method) || !(method %in% c("bilinear", "bicubic", + "conservative", "distance-weighted"))) { + stop("Error: parameter 'method' is wrong, can take value 'bilinear', 'bicubic', 'conservative' or 'distance-weighted'.") + } + remap <- switch(method, bilinear = "remapbil", bicubic = "remapbic", + conservative = "remapcon", `distance-weighted` = "remapdis") + if (!is.null(grid)) { + if (is.character(grid)) { + supported_grids <- list("r[0-9]{1,}x[0-9]{1,}", + "t[0-9]{1,}grid") + grid_matches <- unlist(lapply(lapply(supported_grids, + regexpr, grid), .IsFullMatch, grid)) + if (sum(grid_matches) < 1) { + stop("The specified grid in the parameter 'grid' is incorrect. Must be one of rx or tgrid.") + } + } + else { + stop("Error: parameter 'grid' should be a character string, if specified.") + } + } + if (!is.list(maskmod)) { + stop("Error: parameter 'maskmod' must be a list.") + } + if (length(maskmod) < length(exp)) { + stop("Error: 'maskmod' must contain a numeric mask or NULL for each experiment in 'exp'.") + } + for (i in 1:length(maskmod)) { + if (is.list(maskmod[[i]])) { + if ((length(maskmod[[i]]) > 2) || !all(names(maskmod[[i]]) %in% + c("path", "nc_var_name"))) { + stop("Error: all masks in 'maskmod' must be a numeric matrix, or a list with the components 'path' and optionally 'nc_var_name', or NULL.") + } + } + else if (!(is.numeric(maskmod[[i]]) || is.null(maskmod[[i]]))) { + stop("Error: all masks in 'maskmod' must be a numeric matrix, or a list with the components 'path' and optionally 'nc_var_name', or NULL.") + } + } + if (!is.list(maskobs)) { + stop("Error: parameter 'maskobs' must be a list.") + } + if (length(maskobs) < length(obs)) { + stop("Error: 'maskobs' must contain a numeric mask or NULL for each obseriment in 'obs'.") + } + for (i in 1:length(maskobs)) { + if (is.list(maskobs[[i]])) { + if ((length(maskobs[[i]]) > 2) || !all(names(maskobs[[i]]) %in% + c("path", "nc_var_name"))) { + stop("Error: all masks in 'maskobs' must be a numeric matrix, or a list with the components 'path' and optionally 'nc_var_name', or NULL.") + } + } + else if (!(is.numeric(maskobs[[i]]) || is.null(maskobs[[i]]))) { + stop("Error: all masks in 'maskobs' must be a numeric matrix, or a list with the components 'path' and optionally 'nc_var_name', or NULL.") + } + } + if ((output != "areave" || !is.null(grid)) && length(exp) > + 0) { + if (!all(unlist(lapply(maskobs, is.null)))) { + cat("! Warning: 'maskobs' will be ignored. 'maskmod[[1]]' will be applied to observations instead.\n") + } + maskobs <- lapply(maskobs, function(x) x <- maskmod[[1]]) + } + if (is.null(configfile)) { + configfile <- system.file("config", "BSC.conf", package = "s2dverification") + } + else if (!is.character(configfile) || !(nchar(configfile) > + 0)) { + stop("Error: parameter 'configfile' must be a character string with the path to an s2dverification configuration file, if specified.") + } + if (!is.null(varmin) && !is.numeric(varmin)) { + stop("Error: parameter 'varmin' must be numeric, if specified.") + } + if (!is.null(varmax) && !is.numeric(varmax)) { + stop("Error: parameter 'varmax' must be numeric, if specified.") + } + if (!is.logical(silent)) { + stop("Error: parameter 'silent' must be TRUE or FALSE.") + } + if (!is.null(nprocs) && (!is.numeric(nprocs) || nprocs < + 1)) { + stop("Error: parameter 'nprocs' must be a positive integer, if specified.") + } + if (!is.null(dimnames) && (!is.list(dimnames))) { + stop("Error: parameter 'dimnames' must be a list, if specified.") + } + if (!all(names(dimnames) %in% c("member", "lat", "lon"))) { + stop("Error: parameter 'dimnames' is wrong. There are unrecognized component names. See 'dimnames' in ?Load for details.") + } + if (!is.numeric(remapcells) || remapcells < 0) { + stop("Error: 'remapcells' must be an integer >= 0.") + } + if (length(exps_to_fetch) > 0 || length(obs_to_fetch) > + 0) { + cat("* Some 'path's not explicitly provided in 'exp' and 'obs', so will now proceed to open the configuration file.\n") + data_info <- ConfigFileOpen(configfile, silent, TRUE) + matches <- ConfigApplyMatchingEntries(data_info, + var, sapply(exp[exps_to_fetch], "[[", "name"), + sapply(obs[obs_to_fetch], "[[", "name"), show_entries = FALSE, + show_result = FALSE) + replace_values <- data_info$definitions + if (!is.null(exp) && length(exps_to_fetch) > 0) { + counter <- 1 + exp[exps_to_fetch] <- lapply(matches$exp_info, + function(x) { + x[names(exp[[exps_to_fetch[counter]]])] <- exp[[exps_to_fetch[counter]]] + x[["path"]] <- paste0(x[["main_path"]], x[["file_path"]]) + counter <<- counter + 1 + x + }) + } + if (!is.null(obs) && length(obs_to_fetch) > 0) { + counter <- 1 + obs[obs_to_fetch] <- lapply(matches$obs_info, + function(x) { + x[names(obs[[obs_to_fetch[counter]]])] <- obs[[obs_to_fetch[counter]]] + x[["path"]] <- paste0(x[["main_path"]], x[["file_path"]]) + counter <<- counter + 1 + x + }) + } + if (!silent) { + cat("* All pairs (var, exp) and (var, obs) have matching entries.\n") + } + } + else { + replace_values <- list(DEFAULT_NC_VAR_NAME = "$VAR_NAME$", + DEFAULT_VAR_MIN = "", DEFAULT_VAR_MAX = "", DEFAULT_SUFFIX = "", + DEFAULT_DIM_NAME_LONGITUDES = "longitude", DEFAULT_DIM_NAME_LATITUDES = "latitude", + DEFAULT_DIM_NAME_MEMBERS = "ensemble") + } + dimnames <- list(lon = ifelse(is.null(dimnames[["lon"]]), + replace_values[["DEFAULT_DIM_NAME_LONGITUDES"]], + dimnames[["lon"]]), lat = ifelse(is.null(dimnames[["lat"]]), + replace_values[["DEFAULT_DIM_NAME_LATITUDES"]], dimnames[["lat"]]), + member = ifelse(is.null(dimnames[["member"]]), replace_values[["DEFAULT_DIM_NAME_MEMBERS"]], + dimnames[["member"]])) + if (!is.null(exp)) { + exp <- lapply(exp, function(x) { + if (!("dimnames" %in% names(x))) { + x[["dimnames"]] <- dimnames + x + } + else { + dimnames2 <- dimnames + dimnames2[names(x[["dimnames"]])] <- x[["dimnames"]] + x[["dimnames"]] <- dimnames2 + x + } + }) + } + if (!is.null(obs)) { + obs <- lapply(obs, function(x) { + if (!("dimnames" %in% names(x))) { + x[["dimnames"]] <- dimnames + x + } + else { + dimnames2 <- dimnames + dimnames2[names(x[["dimnames"]])] <- x[["dimnames"]] + x[["dimnames"]] <- dimnames2 + x + } + }) + } + single_dataset <- (length(obs) + length(exp) == 1) + replace_values[["VAR_NAME"]] <- var + replace_values[["STORE_FREQ"]] <- storefreq + latitudes <- longitudes <- NULL + leadtimes <- NULL + var_exp <- var_obs <- NULL + units <- var_long_name <- NULL + is_2d_var <- FALSE + nmod <- length(exp) + nobs <- length(obs) + nsdates <- length(sdates) + if (!silent) { + cat("* Fetching first experimental files to work out 'var_exp' size...\n") + } + dataset_type <- "exp" + dim_exp <- NULL + filename <- file_found <- tmp <- nltime <- NULL + dims2define <- TRUE + is_file_per_member_exp <- rep(nmod, FALSE) + exp_work_pieces <- list() + jmod <- 1 + while (jmod <= nmod) { + tags_to_find <- c("MEMBER_NUMBER") + position_of_tags <- na.omit(match(tags_to_find, names(replace_values))) + if (length(position_of_tags) > 0) { + quasi_final_path <- .ConfigReplaceVariablesInString(exp[[jmod]][["path"]], + replace_values[-position_of_tags], TRUE) + } + else { + quasi_final_path <- .ConfigReplaceVariablesInString(exp[[jmod]][["path"]], + replace_values, TRUE) + } + is_file_per_member_exp[jmod] <- grepl("$MEMBER_NUMBER$", + quasi_final_path, fixed = TRUE) + replace_values[["EXP_NAME"]] <- exp[[jmod]][["name"]] + replace_values[["NC_VAR_NAME"]] <- exp[[jmod]][["nc_var_name"]] + namevar <- .ConfigReplaceVariablesInString(exp[[jmod]][["nc_var_name"]], + replace_values) + replace_values[["SUFFIX"]] <- exp[[jmod]][["suffix"]] + if (is.null(varmin)) { + mod_var_min <- as.numeric(.ConfigReplaceVariablesInString(exp[[jmod]][["var_min"]], + replace_values)) + } + else { + mod_var_min <- varmin + } + if (is.null(varmax)) { + mod_var_max <- as.numeric(.ConfigReplaceVariablesInString(exp[[jmod]][["var_max"]], + replace_values)) + } + else { + mod_var_max <- varmax + } + jsdate <- 1 + while (jsdate <= nsdates) { + replace_values[["START_DATE"]] <- sdates[jsdate] + replace_values[["YEAR"]] <- substr(sdates[jsdate], + 1, 4) + replace_values[["MONTH"]] <- substr(sdates[jsdate], + 5, 6) + replace_values[["DAY"]] <- substr(sdates[jsdate], + 7, 8) + if (dims2define) { + if (is_file_per_member_exp[jmod]) { + replace_values[["MEMBER_NUMBER"]] <- "*" + } + work_piece <- list(dataset_type = dataset_type, + filename = .ConfigReplaceVariablesInString(exp[[jmod]][["path"]], + replace_values), namevar = namevar, grid = grid, + remap = remap, remapcells = remapcells, is_file_per_member = is_file_per_member_exp[jmod], + is_file_per_dataset = FALSE, lon_limits = c(lonmin, + lonmax), lat_limits = c(latmin, latmax), + dimnames = exp[[jmod]][["dimnames"]], single_dataset = single_dataset) + found_data <- .LoadDataFile(work_piece, explore_dims = TRUE, + silent = silent) + found_dims <- found_data$dims + var_long_name <- found_data$var_long_name + units <- found_data$units + if (!is.null(found_dims)) { + is_2d_var <- found_data$is_2d_var + if (!is_2d_var && (output != "areave")) { + cat(paste("! Warning: '", output, "' output format not allowed when loading global mean variables. Forcing to 'areave'.\n", + sep = "")) + output <- "areave" + } + if (output != "areave" && is.null(grid)) { + grid <- found_data$grid + } + if (is.null(nmember)) { + if (is.null(found_dims[["member"]])) { + cat("! Warning: loading data from a server but 'nmember' not specified. Loading only one member.\n") + nmember <- rep(1, nmod) + } + else { + nmember <- rep(found_dims[["member"]], + nmod) + } + } + if (is.null(nleadtime)) { + nleadtime <- found_dims[["time"]] + } + if (is.null(leadtimemax)) { + leadtimemax <- nleadtime + } + else if (leadtimemax > nleadtime) { + stop("Error: 'leadtimemax' argument is greater than the number of loaded leadtimes. Put first the experiment with the greatest number of leadtimes or adjust properly the parameters 'nleadtime' and 'leadtimemax'.") + } + leadtimes <- seq(leadtimemin, leadtimemax, + sampleperiod) + latitudes <- found_dims[["lat"]] + longitudes <- found_dims[["lon"]] + if (output == "lon" || output == "lonlat") { + dim_exp[["lon"]] <- length(longitudes) + } + if (output == "lat" || output == "lonlat") { + dim_exp[["lat"]] <- length(latitudes) + } + dim_exp[["time"]] <- length(leadtimes) + dim_exp[["member"]] <- max(nmember) + dim_exp[["sdate"]] <- nsdates + dim_exp[["dataset"]] <- nmod + dims2define <- FALSE + } + } + if (is_file_per_member_exp[jmod]) { + jmember <- 1 + while (jmember <= nmember[jmod]) { + replace_values[["MEMBER_NUMBER"]] <- sprintf(paste("%.", + (nmember[jmod]%/%10) + 1, "i", sep = ""), + jmember - 1) + work_piece <- list(filename = .ConfigReplaceVariablesInString(exp[[jmod]][["path"]], + replace_values), namevar = namevar, indices = c(1, + jmember, jsdate, jmod), nmember = nmember[jmod], + leadtimes = leadtimes, mask = maskmod[[jmod]], + is_file_per_dataset = FALSE, dimnames = exp[[jmod]][["dimnames"]], + var_limits = c(mod_var_min, mod_var_max), + remapcells = remapcells) + exp_work_pieces <- c(exp_work_pieces, list(work_piece)) + jmember <- jmember + 1 + } + } + else { + work_piece <- list(filename = .ConfigReplaceVariablesInString(exp[[jmod]][["path"]], + replace_values), namevar = namevar, indices = c(1, + 1, jsdate, jmod), nmember = nmember[jmod], + leadtimes = leadtimes, mask = maskmod[[jmod]], + is_file_per_dataset = FALSE, dimnames = exp[[jmod]][["dimnames"]], + var_limits = c(mod_var_min, mod_var_max), + remapcells = remapcells) + exp_work_pieces <- c(exp_work_pieces, list(work_piece)) + } + jsdate <- jsdate + 1 + } + jmod <- jmod + 1 + } + if (dims2define && length(exp) > 0) { + cat("! Warning: no data found in file system for any experimental dataset.\n") + } + dims <- dim_exp[na.omit(match(c("dataset", "member", + "sdate", "time", "lat", "lon"), names(dim_exp)))] + if (is.null(dims[["member"]]) || any(is.na(unlist(dims))) || + any(unlist(dims) == 0)) { + dims <- 0 + dim_exp <- NULL + } + if (!silent) { + message <- "* Success. Detected dimensions of experimental data: " + cat(paste0(message, paste(unlist(dims), collapse = ", "), + "\n")) + cat("* Fetching first observational files to work out 'var_obs' size...\n") + } + if (is.null(exp) || dims == 0) { + if (is.null(leadtimemax)) { + cat("! Warning: loading observations only and no 'leadtimemax' specified. Data will be loaded from each starting date to current time.\n") + diff <- Sys.time() - as.POSIXct(paste(substr(sdates[1], + 1, 4), "-", substr(sdates[1], 5, 6), "-", substr(sdates[1], + 7, 8), sep = "")) + if (storefreq == "monthly") { + leadtimemax <- as.integer(diff/30) + } + else { + leadtimemax <- as.integer(diff) + } + } + if (is.null(nleadtime)) { + nleadtime <- leadtimemax + } + leadtimes <- seq(leadtimemin, leadtimemax, sampleperiod) + } + dataset_type <- "obs" + dim_obs <- NULL + dims2define <- TRUE + lat_indices <- lon_indices <- NULL + obs_work_pieces <- list() + is_file_per_dataset_obs <- rep(FALSE, nobs) + is_file_per_member_obs <- rep(FALSE, nobs) + jobs <- 1 + while (jobs <= nobs) { + tags_to_find <- c("MONTH", "DAY", "YEAR", "MEMBER_NUMBER") + position_of_tags <- na.omit(match(tags_to_find, names(replace_values))) + if (length(position_of_tags) > 0) { + quasi_final_path <- .ConfigReplaceVariablesInString(obs[[jobs]][["path"]], + replace_values[-position_of_tags], TRUE) + } + else { + quasi_final_path <- .ConfigReplaceVariablesInString(obs[[jobs]][["path"]], + replace_values, TRUE) + } + is_file_per_dataset_obs[jobs] <- !any(sapply(c("$MONTH$", + "$DAY$", "$YEAR$"), grepl, quasi_final_path, + fixed = TRUE)) + is_file_per_member_obs[jobs] <- grepl("$MEMBER_NUMBER$", + quasi_final_path, fixed = TRUE) + replace_values[["OBS_NAME"]] <- obs[[jobs]][["name"]] + replace_values[["NC_VAR_NAME"]] <- obs[[jobs]][["nc_var_name"]] + namevar <- .ConfigReplaceVariablesInString(obs[[jobs]][["nc_var_name"]], + replace_values) + replace_values[["SUFFIX"]] <- obs[[jobs]][["suffix"]] + if (is.null(varmin)) { + obs_var_min <- as.numeric(.ConfigReplaceVariablesInString(obs[[jobs]][["var_min"]], + replace_values)) + } + else { + obs_var_min <- varmin + } + if (is.null(varmax)) { + obs_var_max <- as.numeric(.ConfigReplaceVariablesInString(obs[[jobs]][["var_max"]], + replace_values)) + } + else { + obs_var_max <- varmax + } + if (is_file_per_dataset_obs[jobs]) { + if (dims2define) { + work_piece <- list(dataset_type = dataset_type, + filename = .ConfigReplaceVariablesInString(obs[[jobs]][["path"]], + replace_values), namevar = namevar, grid = grid, + remap = remap, remapcells = remapcells, is_file_per_member = is_file_per_member_obs[jobs], + is_file_per_dataset = is_file_per_dataset_obs[jobs], + lon_limits = c(lonmin, lonmax), lat_limits = c(latmin, + latmax), dimnames = obs[[jobs]][["dimnames"]], + single_dataset = single_dataset) + found_data <- .LoadDataFile(work_piece, explore_dims = TRUE, + silent = silent) + found_dims <- found_data$dims + var_long_name <- found_data$var_long_name + units <- found_data$units + if (!is.null(found_dims)) { + is_2d_var <- found_data$is_2d_var + if (!is_2d_var && (output != "areave")) { + cat(paste("! Warning: '", output, "' output format not allowed when loading global mean variables. Forcing to 'areave'.\n", + sep = "")) + output <- "areave" + } + if (output != "areave" && is.null(grid)) { + grid <- found_data$grid + } + if (is.null(nmemberobs)) { + if (is.null(found_dims[["member"]])) { + cat("! Warning: loading observational data from a server but 'nmemberobs' not specified. Loading only one member.\n") + nmemberobs <- rep(1, nobs) + } + else { + nmemberobs <- rep(found_dims[["member"]], + nobs) + } + } + if (is.null(dim_exp)) { + longitudes <- found_dims[["lon"]] + latitudes <- found_dims[["lat"]] + } + if (output == "lon" || output == "lonlat") { + dim_obs[["lon"]] <- length(longitudes) + } + if (output == "lat" || output == "lonlat") { + dim_obs[["lat"]] <- length(latitudes) + } + dim_obs[["time"]] <- length(leadtimes) + dim_obs[["member"]] <- max(nmemberobs) + dim_obs[["sdate"]] <- nsdates + dim_obs[["dataset"]] <- nobs + dims2define <- FALSE + } + } + work_piece <- list(filename = .ConfigReplaceVariablesInString(obs[[jobs]][["path"]], + replace_values), namevar = namevar, indices = c(1, + 1, 1, jobs), nmember = nmemberobs[jobs], mask = maskobs[[jobs]], + leadtimes = leadtimes, is_file_per_dataset = is_file_per_dataset_obs[jobs], + startdates = sdates, dimnames = obs[[jobs]][["dimnames"]], + var_limits = c(obs_var_min, obs_var_max), remapcells = remapcells) + obs_work_pieces <- c(obs_work_pieces, list(work_piece)) + } + else { + jsdate <- 1 + while (jsdate <= nsdates) { + replace_values[["START_DATE"]] <- sdates[jsdate] + sdate <- sdates[jsdate] + if (storefreq == "daily") { + day <- substr(sdate, 7, 8) + if (day == "") { + day <- "01" + } + day <- as.integer(day) + startdate <- as.POSIXct(paste(substr(sdate, + 1, 4), "-", substr(sdate, 5, 6), "-", day, + " 12:00:00", sep = "")) + (leadtimemin - + 1) * 86400 + year <- as.integer(substr(startdate, 1, 4)) + month <- as.integer(substr(startdate, 6, + 7)) + } + else { + month <- (as.integer(substr(sdate, 5, 6)) + + leadtimemin - 2)%%12 + 1 + year <- as.integer(substr(sdate, 1, 4)) + + (as.integer(substr(sdate, 5, 6)) + leadtimemin - + 2)%/%12 + } + jleadtime <- 1 + while (jleadtime <= length(leadtimes)) { + replace_values[["YEAR"]] <- paste(year, "", + sep = "") + replace_values[["MONTH"]] <- sprintf("%2.2i", + month) + if (storefreq == "daily") { + replace_values[["DAY"]] <- sprintf("%2.2i", + day) + days_in_month <- ifelse(LeapYear(year), + 29, 28) + days_in_month <- switch(paste(month, "", + sep = ""), `1` = 31, `3` = 31, `4` = 30, + `5` = 31, `6` = 30, `7` = 31, `8` = 31, + `9` = 30, `10` = 31, `11` = 30, `12` = 31, + days_in_month) + obs_file_indices <- seq(day, min(days_in_month, + (length(leadtimes) - jleadtime) * sampleperiod + + day), sampleperiod) + } + else { + obs_file_indices <- 1 + } + if (dims2define) { + if (is_file_per_member_obs[jobs]) { + replace_values[["MEMBER_NUMBER"]] <- "*" + } + work_piece <- list(dataset_type = dataset_type, + filename = .ConfigReplaceVariablesInString(obs[[jobs]][["path"]], + replace_values), namevar = namevar, + grid = grid, remap = remap, remapcells = remapcells, + is_file_per_member = is_file_per_member_obs[jobs], + is_file_per_dataset = is_file_per_dataset_obs[jobs], + lon_limits = c(lonmin, lonmax), lat_limits = c(latmin, + latmax), dimnames = obs[[jobs]][["dimnames"]], + single_dataset = single_dataset) + found_data <- .LoadDataFile(work_piece, + explore_dims = TRUE, silent = silent) + found_dims <- found_data$dims + var_long_name <- found_data$var_long_name + units <- found_data$units + if (!is.null(found_dims)) { + is_2d_var <- found_data$is_2d_var + if (!is_2d_var && (output != "areave")) { + cat(paste("! Warning: '", output, "' output format not allowed when loading global mean variables. Forcing to 'areave'\n.", + sep = "")) + output <- "areave" + } + if (output != "areave" && is.null(grid)) { + grid <- found_data$grid + } + if (is.null(nmemberobs)) { + if (is.null(found_dims[["member"]])) { + cat("! Warning: loading observational data from a server but 'nmemberobs' not specified. Loading only one member.\n") + nmemberobs <- rep(1, nobs) + } + else { + nmemberobs <- rep(found_dims[["member"]], + nobs) + } + } + if (is.null(dim_exp)) { + longitudes <- found_dims[["lon"]] + latitudes <- found_dims[["lat"]] + } + if (output == "lon" || output == "lonlat") { + dim_obs[["lon"]] <- length(longitudes) + } + if (output == "lat" || output == "lonlat") { + dim_obs[["lat"]] <- length(latitudes) + } + dim_obs[["time"]] <- length(leadtimes) + dim_obs[["member"]] <- max(nmemberobs) + dim_obs[["sdate"]] <- nsdates + dim_obs[["dataset"]] <- nobs + dims2define <- FALSE + } + } + if (is_file_per_member_obs[jobs]) { + jmember <- 1 + while (jmember <= nmemberobs[jobs]) { + replace_values[["MEMBER_NUMBER"]] <- sprintf(paste("%.", + (nmemberobs[jobs]%/%10) + 1, "i", sep = ""), + jmember - 1) + work_piece <- list(filename = .ConfigReplaceVariablesInString(obs[[jobs]][["path"]], + replace_values), namevar = namevar, + indices = c(jleadtime, jmember, jsdate, + jobs), nmember = nmemberobs[jobs], + leadtimes = obs_file_indices, mask = maskobs[[jobs]], + dimnames = obs[[jobs]][["dimnames"]], + is_file_per_dataset = is_file_per_dataset_obs[jobs], + var_limits = c(obs_var_min, obs_var_max), + remapcells = remapcells) + obs_work_pieces <- c(obs_work_pieces, + list(work_piece)) + jmember <- jmember + 1 + } + } + else { + work_piece <- list(filename = .ConfigReplaceVariablesInString(obs[[jobs]][["path"]], + replace_values), namevar = namevar, indices = c(jleadtime, + 1, jsdate, jobs), nmember = nmemberobs[jobs], + leadtimes = obs_file_indices, mask = maskobs[[jobs]], + dimnames = obs[[jobs]][["dimnames"]], + is_file_per_dataset = is_file_per_dataset_obs[jobs], + var_limits = c(obs_var_min, obs_var_max), + remapcells) + obs_work_pieces <- c(obs_work_pieces, list(work_piece)) + } + if (storefreq == "daily") { + startdate <- startdate + 86400 * sampleperiod * + length(obs_file_indices) + year <- as.integer(substr(startdate, 1, + 4)) + month <- as.integer(substr(startdate, 6, + 7)) + day <- as.integer(substr(startdate, 9, + 10)) + } + else { + month <- month + sampleperiod + year <- year + (month - 1)%/%12 + month <- (month - 1)%%12 + 1 + } + jleadtime <- jleadtime + length(obs_file_indices) + } + jsdate <- jsdate + 1 + } + } + jobs <- jobs + 1 + } + if (dims2define && length(obs) > 0) { + cat("! Warning: no data found in file system for any observational dataset.\n") + } + dims <- dim_obs[na.omit(match(c("dataset", "member", + "sdate", "time", "lat", "lon"), names(dim_obs)))] + if (is.null(dims[["member"]]) || any(is.na(unlist(dims))) || + any(unlist(dims) == 0)) { + dims <- 0 + dim_obs <- NULL + } + if (!silent) { + message <- "* Success. Detected dimensions of observational data: " + cat(paste0(message, paste(unlist(dims), collapse = ", "), + "\n")) + } + if (!(is.null(dim_obs) && is.null(dim_exp))) { + pointer_var_exp <- pointer_var_obs <- NULL + if (!is.null(dim_exp) && (length(unlist(dim_exp)) == + length(dim_exp)) && !any(is.na(unlist(dim_exp))) && + !any(unlist(dim_exp) == 0)) { + var_exp <- big.matrix(nrow = prod(unlist(dim_exp)), + ncol = 1) + pointer_var_exp <- describe(var_exp) + } + if (!is.null(dim_obs) && (length(unlist(dim_obs)) == + length(dim_obs)) && !any(is.na(unlist(dim_obs))) && + !any(unlist(dim_obs) == 0)) { + var_obs <- big.matrix(nrow = prod(unlist(dim_obs)), + ncol = 1) + pointer_var_obs <- describe(var_obs) + } + if (is.null(nprocs)) { + nprocs <- detectCores() + } + exp_work_piece_percent <- prod(dim_exp)/(prod(dim_obs) + + prod(dim_exp)) + obs_work_piece_percent <- prod(dim_obs)/(prod(dim_obs) + + prod(dim_exp)) + exp_work_pieces <- lapply(exp_work_pieces, function(x) c(x, + list(dataset_type = "exp", dims = dim_exp, out_pointer = pointer_var_exp))) + obs_work_pieces <- lapply(obs_work_pieces, function(x) c(x, + list(dataset_type = "obs", dims = dim_obs, out_pointer = pointer_var_obs))) + work_pieces <- c(exp_work_pieces, obs_work_pieces) + if (length(work_pieces)/nprocs >= 2 && !silent) { + if (length(work_pieces)/nprocs < 10) { + amount <- 100/ceiling(length(work_pieces)/nprocs) + reps <- ceiling(length(work_pieces)/nprocs) + } + else { + amount <- 10 + reps <- 10 + } + progress_steps <- rep(amount, reps) + if (length(exp_work_pieces) == 0) { + selected_exp_pieces <- c() + } + else if (length(exp_work_pieces) < floor(reps * + exp_work_piece_percent) + 1) { + selected_exp_pieces <- length(exp_work_pieces) + progress_steps <- c(sum(head(progress_steps, + floor(reps * exp_work_piece_percent))), tail(progress_steps, + ceiling(reps * obs_work_piece_percent))) + } + else { + selected_exp_pieces <- round(seq(1, length(exp_work_pieces), + length.out = floor(reps * exp_work_piece_percent) + + 1))[-1] + } + if (length(obs_work_pieces) == 0) { + selected_obs_pieces <- c() + } + else if (length(obs_work_pieces) < ceiling(reps * + obs_work_piece_percent) + 1) { + selected_obs_pieces <- length(obs_work_pieces) + progress_steps <- c(head(progress_steps, floor(reps * + exp_work_piece_percent)), sum(tail(progress_steps, + ceiling(reps * obs_work_piece_percent)))) + } + else { + selected_obs_pieces <- round(seq(1, length(obs_work_pieces), + length.out = ceiling(reps * obs_work_piece_percent) + + 1))[-1] + } + selected_pieces <- c(selected_exp_pieces, selected_obs_pieces + + length(exp_work_pieces)) + progress_steps <- paste0(" + ", round(progress_steps, + 2), "%") + progress_message <- "* Progress: 0%" + } + else { + progress_message <- "" + selected_pieces <- NULL + } + piece_counter <- 1 + step_counter <- 1 + work_pieces <- lapply(work_pieces, function(x) { + wp <- c(x, list(is_2d_var = is_2d_var, grid = grid, + remap = remap, lon_limits = c(lonmin, lonmax), + lat_limits = c(latmin, latmax), output = output, + remapcells = remapcells, single_dataset = single_dataset)) + if (piece_counter %in% selected_pieces) { + wp <- c(wp, list(progress_amount = progress_steps[step_counter])) + step_counter <<- step_counter + 1 + } + piece_counter <<- piece_counter + 1 + wp + }) + if (!silent) { + cat(paste("* Will now proceed to read and process ", + length(work_pieces), " data files:\n", sep = "")) + if (length(work_pieces) < 30) { + lapply(work_pieces, function(x) cat(paste("* ", + x[["filename"]], "\n", sep = ""))) + } + else { + cat(paste("* The list of files is long. You can check it after Load() finishes in the output '$source_files'.\n")) + } + if (length(dim_obs) == 0) { + bytes_obs <- 0 + obs_dim_sizes <- "0" + } + else { + bytes_obs <- prod(c(dim_obs, 8)) + obs_dim_sizes <- paste(na.omit(as.vector(dim_obs[c("dataset", + "member", "sdate", "time", "lat", "lon")])), + collapse = " x ") + } + if (length(dim_exp) == 0) { + bytes_exp <- 0 + exp_dim_sizes <- "0" + } + else { + bytes_exp <- prod(c(dim_exp, 8)) + exp_dim_sizes <- paste(na.omit(as.vector(dim_exp[c("dataset", + "member", "sdate", "time", "lat", "lon")])), + collapse = " x ") + } + cat(paste("* Total size of requested data: ", + bytes_obs + bytes_exp, "bytes.\n")) + cat(paste("* - Experimental data: (", exp_dim_sizes, + ") x 8 bytes =", bytes_exp, "bytes.\n")) + cat(paste("* - Observational data: (", obs_dim_sizes, + ") x 8 bytes =", bytes_obs, "bytes.\n")) + cat(paste("* If size of requested data is close to or above the free shared RAM memory, R will crash.\n")) + } + if (nprocs == 1) { + found_files <- lapply(work_pieces, .LoadDataFile, + silent = silent) + } + else { + cluster <- makeCluster(nprocs, outfile = "") + if (!silent) { + cat(paste("* Loading... This may take several minutes...\n", + sep = "")) + cat(progress_message) + } + work_errors <- try({ + found_files <- clusterApplyLB(cluster, work_pieces, + .LoadDataFile, silent = silent) + }) + stopCluster(cluster) + } + if (!silent) { + if (progress_message != "") { + cat("\n") + } + if (any(unlist(lapply(found_files, is.null)))) { + if (sum(unlist(lapply(found_files, is.null))) < + 30) { + cat("! WARNING: The following files were not found in the file system. Filling with NA values instead.\n") + lapply(work_pieces[which(unlist(lapply(found_files, + is.null)))], function(x) cat(paste("* ", + x[["filename"]], "\n", sep = ""))) + } + else { + cat("! WARNING: Some files were not found in the file system. The list is long. You can check it in the output '$not_found_files'. Filling with NA values instead.\n") + } + } + } + source_files <- unlist(found_files[which(!unlist(lapply(found_files, + is.null)))]) + not_found_files <- unlist(lapply(work_pieces[which(unlist(lapply(found_files, + is.null)))], "[[", "filename")) + } + else { + error_message <- "Error: No found files for any dataset. Check carefully the file patterns and correct either the pattern or the provided parameters:\n" + if (!is.null(exp)) { + lapply(exp, function(x) error_message <<- paste0(error_message, + paste0(x[["path"]], "\n"))) + } + if (!is.null(obs)) { + lapply(obs, function(x) error_message <<- paste0(error_message, + paste0(x[["path"]], "\n"))) + } + stop(error_message) + } + }) + if (class(errors) == "try-error") { + invisible(list(load_parameters = load_parameters)) + } + else { + variable <- list() + variable[["varName"]] <- var + variable[["level"]] <- NULL + attr(variable, "is_standard") <- FALSE + attr(variable, "units") <- units + attr(variable, "longname") <- var_long_name + attr(variable, "daily_agg_cellfun") <- "none" + attr(variable, "monthly_agg_cellfun") <- "none" + attr(variable, "verification_time") <- "none" + if (is.null(var_exp)) { + mod_data <- NULL + } + else { + dim_reorder <- length(dim_exp):1 + dim_reorder[2:3] <- dim_reorder[3:2] + old_dims <- dim_exp + dim_exp <- dim_exp[dim_reorder] + mod_data <- aperm(array(bigmemory::as.matrix(var_exp), + dim = old_dims), dim_reorder) + attr(mod_data, "dimensions") <- names(dim_exp) + } + if (is.null(var_obs)) { + obs_data <- NULL + } + else { + dim_reorder <- length(dim_obs):1 + dim_reorder[2:3] <- dim_reorder[3:2] + old_dims <- dim_obs + dim_obs <- dim_obs[dim_reorder] + obs_data <- aperm(array(bigmemory::as.matrix(var_obs), + dim = old_dims), dim_reorder) + attr(obs_data, "dimensions") <- names(dim_obs) + } + if (is.null(latitudes)) { + lat <- 0 + attr(lat, "cdo_grid_name") <- "none" + } + else { + lat <- latitudes + attr(lat, "cdo_grid_name") <- if (is.null(grid)) + "none" + else grid + } + attr(lat, "projection") <- "none" + if (is.null(longitudes)) { + lon <- 0 + attr(lon, "cdo_grid_name") <- "none" + } + else { + lon <- longitudes + attr(lon, "cdo_grid_name") <- if (is.null(grid)) + "none" + else grid + } + attr(lon, "projection") <- "none" + dates <- list() + dates[["start"]] <- NULL + dates[["end"]] <- NULL + models <- NULL + if (length(exp) > 0 && !is.null(dim_exp)) { + models <- list() + for (jmod in 1:length(exp)) { + models[[exp[[jmod]][["name"]]]] <- list(members = paste0("Member_", + 1:nmember[jmod]), source = if ((nchar(exp[[jmod]][["path"]]) - + nchar(gsub("/", "", exp[[jmod]][["path"]])) > + 2) && (length(sdates) > 1 && !is_file_per_member_exp[jmod])) { + parts <- strsplit(exp[[jmod]][["path"]], "/")[[1]] + paste(parts[-length(parts)], sep = "", collapse = "/") + } else { + exp[[jmod]][["path"]] + }) + } + } + observations <- NULL + if (length(obs) > 0 && !is.null(dim_obs)) { + observations <- list() + for (jobs in 1:length(obs)) { + observations[[obs[[jobs]][["name"]]]] <- list(members = paste0("Member_", + 1:nmemberobs[jobs]), source = if ((nchar(obs[[jobs]][["path"]]) - + nchar(gsub("/", "", obs[[jobs]][["path"]])) > + 2) && !is_file_per_dataset_obs[jobs]) { + parts <- strsplit(obs[[jobs]][["path"]], "/")[[1]] + paste(parts[-length(parts)], sep = "", collapse = "/") + } else { + obs[[jobs]][["path"]] + }) + } + } + invisible(list(mod = mod_data, obs = obs_data, lon = lon, + lat = lat, Variable = variable, Datasets = list(exp = models, + obs = observations), Dates = dates, InitializationDates = lapply(sdates, + function(x) { + sink("/dev/null") + date <- print(as.POSIXct(as.Date(x, format = "%Y%m%d"))) + sink() + date + }), when = Sys.time(), source_files = source_files, + not_found_files = not_found_files, load_parameters = load_parameters)) + } +} + +mergePDF <- function(..., file, gsversion = NULL, in.file = NULL) { + if (is.null(in.file)) { + in.file <- substitute(...()) + } + infiles <- paste(unlist(lapply(in.file, function(y) as.character(y))), + collapse = " ") + if (is.null(gsversion)) { + gsversion <- names(which(Sys.which(c("gs", "gswin32c", "gswin64c")) != "")) + if (length(gsversion) == 0) + stop("Please install Ghostscript and ensure it is in your PATH") + if (length(gsversion) > 1) + stop("More than one Ghostscript executable was found:", + paste(gsversion, collapse = " "), + ". Please specify which version should be used with the gsversion argument") + } + pre = " -dBATCH -dNOPAUSE -q -sDEVICE=pdfwrite -sOutputFile=" + system(paste(paste(gsversion, pre, file, sep = ""), infiles, collapse = " ")) +} diff --git a/Rfunctions.R~ b/Rfunctions.R~ new file mode 100644 index 0000000000000000000000000000000000000000..0049023d3232870fb480334b7ba1f2fb77b320e6 --- /dev/null +++ b/Rfunctions.R~ @@ -0,0 +1,4077 @@ +# Things you might want to change + +# options(papersize="a4") +# options(editor="notepad") +# options(pager="internal") + +# set the default help type: +# options(help_type="text") +options(help_type="html") + +# your list with all R packages you want to load in memory at the beginning of every R session: +my.packages.ic3<-c("sp","s2dverification","ncdf","ncdf4","statmod","maps","mapdata","parallel", + "doMC","RColorBrewer","MASS","maptools","scales","abind","reshape","reshape2","data.table","GEOmap") + +# "psych","plotrix" + +# GEOmap: for the dataset coastmap needed by PlotEquiDist + +############################ Script loaded automatically at startup ##################### + +.First<-function(){ + # load all my packages in memory at the beginning of the R session: + options(defaultPackages=c(getOption("defaultPackages"),my.packages.ic3)) + cat("\nLeidos todas las librerias opcionales\n") + print(paste("Current R session loaded from: ",commandArgs())) # print il nome del file .RData appena aperto + + print(paste("Working dir is: ",getwd())) + print(system("free -m")) + print(gc()) +} + + +################################### My R Aliases ############################################ +# alias for commonly used functions: + +l <- function(x) length(x) # <- to quickly get the length of a vector +w <- function() windows() # <- to open a new windows (only works in Windows, in Linux the command in x11() ) +qu <- function() quit("no") # <- to exit quickly from R typing only qu() instead of q("no") +clear <- function() rm(list=ls()) # to remove all objects in the R session +na <- function(x) length(which(is.na(x))) # find if there are NA in the object and how many thay are +nna <- function(x) length(which(!is.na(x))) # find if there are NOT NA in the object and how many they are +de <- function() dev.off() # to close a graphic window quickly +pl <-function() plot.new() # to open a void plot quickly + +################################### my Color Palettes ############################################# + +library("RColorBrewer") + +my.palette1<-c("lightblue","white",brewer.pal(9, "YlOrRd")) #rev(brewer.pal(11, "RdYlBu")) # it has only 11 colors! +my.palette2<-c("lightblue","white",brewer.pal(9, "YlOrRd")) #rev(brewer.pal(11, "RdYlBu")) +my.palette3<-brewer.pal(9, "Greens") # only 9 colors +my.palette4<-rev(brewer.pal(9, "BuPu")) +my.palette5<-brewer.pal(9, "Blues") +my.palette6<-brewer.pal(9, "Blues") +my.palette<-list(my.palette1,my.palette2,my.palette3,my.palette4,my.palette5,my.palette6) + +# Escala de colores para los mapas de tendencias +my.palette1.trend<-brewer.pal(9,"YlOrBr") +my.palette2.trend<-brewer.pal(9,"YlOrBr") +my.palette3.trend<-brewer.pal(9,"Greens") # Winkler's Index +my.palette4.trend<-brewer.pal(9,"YlOrBr") # rev(brewer.pal(11,"RdBu")) +my.palette5.trend<-rev(brewer.pal(11,"RdBu")) +my.palette6.trend<-rev(brewer.pal(11,"RdBu")) +my.palette.trend<-list(my.palette1.trend,my.palette2.trend,my.palette3.trend,my.palette4.trend,my.palette5.trend,my.palette6.trend) + +# others color palettes: +#my.palette7 <- colorRampPalette(as.character(read.csv("/home/Earth/vtorralb/rgbhex.csv",header=F)[,1]))(length(my.brks)-1) # blue--yellow-red colors + + +############################## My (short) Functions ####################################### + +# save in this function all the commands to download all the packages the first time you install R: +download.my.packages <- function(list.packages)for(p in 1:length(list.packages))install.packages(list.packages[p],repos="http://cran.univ-lyon1.fr") + +# Convert degrees to radians: +deg2rad<-function(deg) return(deg*pi/180) + +################################### My Functions ####################################### + +# function to show memory usage: +.ls.objects <- function (pos = 1, pattern, order.by, decreasing=FALSE, head=FALSE, n=5) { + napply <- function(names, fn) sapply(names, function(x) + fn(get(x, pos = pos))) + names <- ls(pos = pos, pattern = pattern) + obj.class <- napply(names, function(x) as.character(class(x))[1]) + obj.mode <- napply(names, mode) + obj.type <- ifelse(is.na(obj.class), obj.mode, obj.class) + obj.size <- napply(names, object.size) + obj.dim <- t(napply(names, function(x) + as.numeric(dim(x))[1:2])) + vec <- is.na(obj.dim)[, 1] & (obj.type != "function") + obj.dim[vec, 1] <- napply(names, length)[vec] + out <- data.frame(obj.type, obj.size, obj.dim) + names(out) <- c("Type", "Size", "Rows", "Columns") + if (!missing(order.by)) + out <- out[order(out[[order.by]], decreasing=decreasing), ] + if (head) + out <- head(out, n) + return(out) +} + +# shorthand +lsos <- function(..., n=10) { + .ls.objects(..., order.by="Size", decreasing=TRUE, head=TRUE, n=n) + +} + +# create function to return matrix of memory consumption: +object.sizes <- function() +{ + return(rev(sort(sapply(ls(envir=.GlobalEnv), function (object.name) + object.size(get(object.name)))))) +} + +# function to resume memory use on Linux: +mem<-function(){ + print(system("free -m")) + #print(gc()) + print(lsos()) +} + +################################################################################################# +# Calendar functions # +################################################################################################# + +# vector with the month names: +my.month <- c("January","February","March","April","May","June","July","August","September","October","November","December") +my.month.short <- c("Jan", "Feb", "Mar", "Apr", "May", "Jun", "Jul", "Aug", "Sep", "Oct", "Nov", "Dec") +my.month.short2 <- c("Ja", "Fe", "Ma", "Ap", "Ma", "Ju", "Ju", "Au", "Se", "Oc", "No", "De") +my.month.short3 <- my.month.very.short <- c("J","F","M","A","M","J","J","A","S","O","N","D") +my.season <- c("Winter", "Spring", "Summer", "Autumn") +my.period <- period.name <- c(my.month, my.season, "Yearly") + +endmonth<-function(day,month,year){ # day indica un giorno del mese di cui si vuole controllare se e' l'ultimo giorno del mese o meno: + last=FALSE + if(month==1 & day==31) last=TRUE; if(month==3 & day==31) last=TRUE; if(month==4 & day==30)last=TRUE; if(month==5 & day==31)last=TRUE + if(month==6 & day==30) last=TRUE; if(month==7 & day==31) last=TRUE; if(month==8 & day==31)last=TRUE; if(month==9 & day==30)last=TRUE + if(month==10 & day==31) last=TRUE; if(month==11 & day==30) last=TRUE; if(month==12 & day==31)last=TRUE + if(year%%400==0 | (year%%4==0 & year%%100!=0)){ # in questo caso l'anno e' bisestile) + if(month==2 & day==29) last=TRUE } else { # Anno NON bisestile: + if(month==2 & day==28) last=TRUE } + return(last) +} + +# simile alla precedente, restituisce qual'e'l'ultimo giorno del mese introdotto (ovvero il numero di giorni di quel mese) +lastday<-function(month,year){ + if(month==1 | month==3 | month==5 | month==7 | month==8 | month==10 | month==12) last=31 + if(month==4 | month==6 | month==9 | month==11) last=30 + if(month==2){ + if(year%%400==0 | (year%%4==0 & year%%100!=0)) {last=29} else {last=28} + } + #if(year==1 & month==2) last=28.25 # se si mette come anno il numero 1, restituisce il numero medio di giorni di febbraio quando si considerano tanti anni (28.25). Utile per calcolare le frequenze dei WTs del mese di febbraio. + return(last) +} + +# returns TRUE if the input year is a leap year: +leap.year <- function(year) return(ifelse((year%%4==0 & year%%100!=0) | year%%400==0, TRUE, FALSE)) + +# return the number of days of the input year: +n.days.in.a.year <- function(year) return(ifelse((year%%4==0 & year%%100!=0) | year%%400==0, 366, 365)) + +# return the number of days of the input month (1=Jan, 12=Dec): +# actually, it is only a wrapper of lastday(), that was kept for consistency +n.days.in.a.month <- function(month,year){ + return(lastday(month, year)) +} + +# like the previous function, but in case the input month is greater than 12, it enters into the next year: +ndm <- function(month,year){ + if(month < 13){ + return(lastday(month, year)) + } else { + return(lastday(month-12, year+1)) + } +} + +# return the number of days of the input season (1=Winter, 2=Spring, 3=Summer, 4=Autumn): +n.days.in.a.season <- function(season,year){ + return(length(pos.season(year,season))) +} + +# return the number of days of the input period (1-12: Jan-Dec, 13: winter, 14. spring, 15:summer, 16:autumn, 17: ear) +n.days.in.a.period <- function(period,year){ + if(period <= 12) return(n.days.in.a.month(period,year)) + if(period > 12 && period < 17) return(n.days.in.a.season(period-12,year)) + if(period == 17) return(n.days.in.a.year(year)) +} + +# get the total number of days from year.start to year.end: +n.days.in.a.yearly.period <- function(year.start, year.end){ + days.tot <- 0 + for(y in year.start:year.end){ + days.tot <- days.tot + n.days.in.a.year(y) + } + return(days.tot) +} + +# get the total number of days from month.start to month.end (included). +# if month.end is smaller than month.start, it considers month.end to be a month of the following year: +n.days.in.a.monthly.period <- function(month.start, month.end, year){ + days.tot <- 0 + if(month.start <= month.end){ + for(m in month.start:month.end) days.tot <- days.tot + n.days.in.a.month(m, year) + } else { + for(m in month.start:12) days.tot <- days.tot + n.days.in.a.month(m, year) + for(m in 1:month.end) days.tot <- days.tot + n.days.in.a.month(m, year+1) + } + return(days.tot) +} + +seq.months.in.a.year<-function(year){ # restituisce una sequenza di 365 o 366 numeri, il cui valore rappres.il numero del mese dell'anno associato a quel giorno + n.days.febr<-ifelse(n.days.in.a.year(year)==366,29,28) + return(c(rep(1,31),rep(2,n.days.febr),rep(3,31),rep(4,30),rep(5,31),rep(6,30),rep(7,31),rep(8,31),rep(9,30),rep(10,31),rep(11,30),rep(12,31))) +} + +seq.days.in.a.year<-function(year){ # restituisce una sequenza di 365 o 366 numeri, il cui valore rappres.il numero del giorno dell'anno associato + n.days.febr<-ifelse(n.days.in.a.year(year)==366,29,28) + return(c(1:31, 1:n.days.febr, 1:31, 1:30, 1:31, 1:30, 1:31, 1:31, 1:30, 1:31, 1:30, 1:31)) +} + + # interval of days belonging only to the year y, but starting to count from the year year.start: +seq.days.in.a.future.year <- function(year.start, y){ + return(n.days.in.a.yearly.period(year.start,y) - n.days.in.a.yearly.period(y,y) + 1:n.days.in.a.year(y)) +} + +# number of days from year.start to year y, excluding year y: +n.days.in.a.future.year <- function(year.start, y){ + return(n.days.in.a.yearly.period(year.start,y) - n.days.in.a.yearly.period(y,y)) +} + + +# vector with the number of the days of the input year belonging to the input month (1=January, 12=December): +pos.month <- function(year,month){ + if(month==1) return(1:31) + n.feb <- 28 + ifelse(leap.year(year)==TRUE,1,0) + + if(month == 2) return(31 + 1:n.feb) + if(month == 3) return(31 + n.feb + 1:31) + if(month == 4) return(31 + n.feb + 31 + 1:30) + if(month == 5) return(31 + n.feb + 31 + 30 + 1:31) + if(month == 6) return(31 + n.feb + 31 + 30 + 31 + 1:30) + if(month == 7) return(31 + n.feb + 31 + 30 + 31 + 30 + 1:31) + if(month == 8) return(31 + n.feb + 31 + 30 + 31 + 30 + 31 + 1:31) + if(month == 9) return(31 + n.feb + 31 + 30 + 31 + 30 + 31 + 31 + 1:30) + if(month == 10) return(31 + n.feb + 31 + 30 + 31 + 30 + 31 + 31 + 30 + 1:31) + if(month == 11) return(31 + n.feb + 31 + 30 + 31 + 30 + 31 + 31 + 30 + 31 + 1:30) + if(month == 12) return(31 + n.feb + 31 + 30 + 31 + 30 + 31 + 31 + 30 + 31 + 30 + 1:31) +} + +# as pos.month, but it returns a vector with the days of the imput year belonging to the months before the input month: +pos.months.before <- function(year,month){ + pos <- c() + if(month == 1) { + pos <- 0 + } else { + for(m in 1:(month-1)){ + pos <- c(pos, pos.month(year, m)) + } + } + + return(pos) +} + +# as pos.month, but also adds the days of the two closer months to the input month: +pos.month.extended <- function(year,month){ + if(month == 1) return(c(pos.month(year,12),pos.month(year,1),pos.month(year,2))) + if(month == 2) return(c(pos.month(year,1),pos.month(year,2),pos.month(year,3))) + if(month == 3) return(c(pos.month(year,2),pos.month(year,3),pos.month(year,4))) + if(month == 4) return(c(pos.month(year,3),pos.month(year,4),pos.month(year,5))) + if(month == 5) return(c(pos.month(year,4),pos.month(year,5),pos.month(year,6))) + if(month == 6) return(c(pos.month(year,5),pos.month(year,6),pos.month(year,7))) + if(month == 7) return(c(pos.month(year,6),pos.month(year,7),pos.month(year,8))) + if(month == 8) return(c(pos.month(year,7),pos.month(year,8),pos.month(year,9))) + if(month == 9) return(c(pos.month(year,8),pos.month(year,9),pos.month(year,10))) + if(month == 10) return(c(pos.month(year,9),pos.month(year,10),pos.month(year,11))) + if(month == 11) return(c(pos.month(year,10),pos.month(year,11),pos.month(year,12))) + if(month == 12) return(c(pos.month(year,11),pos.month(year,12),pos.month(year,1))) +} + +# as pos.month.extended, but only adds 15 days of the two closer months to the input month: +pos.month.extended15 <- function(year,month){ + if(month == 1) return(c(pos.month(year,12)[(l(pos.month(year,12))-14):l(pos.month(year,12))],pos.month(year,1),pos.month(year,2)[1:15])) + if(month == 2) return(c(pos.month(year,1)[(l(pos.month(year,1))-14):l(pos.month(year,1))],pos.month(year,2),pos.month(year,3)[1:15])) + if(month == 3) return(c(pos.month(year,2)[(l(pos.month(year,2))-14):l(pos.month(year,2))],pos.month(year,3),pos.month(year,4)[1:15])) + if(month == 4) return(c(pos.month(year,3)[(l(pos.month(year,3))-14):l(pos.month(year,3))],pos.month(year,4),pos.month(year,5)[1:15])) + if(month == 5) return(c(pos.month(year,4)[(l(pos.month(year,4))-14):l(pos.month(year,4))],pos.month(year,5),pos.month(year,6)[1:15])) + if(month == 6) return(c(pos.month(year,5)[(l(pos.month(year,5))-14):l(pos.month(year,5))],pos.month(year,6),pos.month(year,7)[1:15])) + if(month == 7) return(c(pos.month(year,6)[(l(pos.month(year,6))-14):l(pos.month(year,6))],pos.month(year,7),pos.month(year,8)[1:15])) + if(month == 8) return(c(pos.month(year,7)[(l(pos.month(year,7))-14):l(pos.month(year,7))],pos.month(year,8),pos.month(year,9)[1:15])) + if(month == 9) return(c(pos.month(year,8)[(l(pos.month(year,8))-14):l(pos.month(year,8))],pos.month(year,9),pos.month(year,10)[1:15])) + if(month == 10) return(c(pos.month(year,9)[(l(pos.month(year,9))-14):l(pos.month(year,9))],pos.month(year,10),pos.month(year,11)[1:15])) + if(month == 11) return(c(pos.month(year,10)[(l(pos.month(year,10))-14):l(pos.month(year,10))],pos.month(year,11),pos.month(year,12)[1:15])) + if(month == 12) return(c(pos.month(year,11)[(l(pos.month(year,11))-14):l(pos.month(year,11))],pos.month(year,12),pos.month(year,1)[1:15])) + +} + +# vector with the number of the days of the input year belonging to the input month (1=Winter, 4=Autumn): (Winter is Jan-Feb and Dec of the same year) +pos.season <- function(year,season){ + if(season==1) return(c(pos.month(year,1),pos.month(year,2),pos.month(year,12))) # winter + if(season==2) return(c(pos.month(year,3),pos.month(year,4),pos.month(year,5))) # spring + if(season==3) return(c(pos.month(year,6),pos.month(year,7),pos.month(year,8))) # summer + if(season==4) return(c(pos.month(year,9),pos.month(year,10),pos.month(year,11))) # autumn +} + +# same as pos.month, but for period > 12 returns the seasonal positions instead (13: winter, 14. spring, 15:summer, 16:autumn), or the yearly interval for period = 17 +pos.period <- function(year,period){ + if(period <= 12) return(pos.month(year, period)) + if(period > 12 && period < 17) return(pos.season(year, period-12)) + if(period == 17) return(1:n.days.in.a.year(year)) +} + + + +# sequence of weekly startdate for the chosen year and start day/month: +weekly.seq <- function(year,month,day){ + yr1 <- year # starting year of the weekly sequence + #yr2 <- year # in future you can create a sequence for more than one year + mes <- month # starting month (usually january) + #day<-2 # starting day + + if(mes<10) {mes0 <- paste0(0,mes)} else {mes0 <- mes} + if(day<10) {day0 <- paste0(0,day)} else {day0 <- day} + sdates <- paste0(yr1,mes0,day0) + nday <- day + ndaysFebruary <- lastday(2,yr1) + ndays4month <- c(31,ndaysFebruary,31,30,31,30,31,31,30,31,30,31) + + while (nday < 365-7) { # ojo a los bisiestos! + day <- day+7 + nday <- nday+7 + if(day > ndays4month[mes]){ + day <- day-ndays4month[mes] + mes=mes+1 + } + if(mes < 10){ mes0 <- paste0(0,mes)} else {mes0 <- mes} + if(day < 10){ day0 <- paste0(0,day)} else {day0 <- day} + sdates <- c(sdates,paste0(yr1,mes0,day0)) + } + return(sdates) +} + +# return the position inside the weekly.seq of all the startdates whose months belongs to the chosen period: +months.period <- function(year,mes,day,period){ + sdates.seq <- weekly.seq(year,mes,day) + months.period <- list() + + for(p in 1:12) months.period[[p]] <- which(as.numeric(substr(sdates.seq,5,6)) == p) + + months.period[[13]] <- c(months.period[[1]],months.period[[2]],months.period[[12]]) + months.period[[14]] <- c(months.period[[3]],months.period[[4]],months.period[[5]]) + months.period[[15]] <- c(months.period[[6]],months.period[[7]],months.period[[8]]) + months.period[[16]] <- c(months.period[[9]],months.period[[10]],months.period[[11]]) + + return(months.period[[period]]) +} + +################################################################################################# +# Graphic functions # +################################################################################################# + +# Like PlotEquiMap, but: +# - with the option to specify with 'contours.col' colours of the contour lines, +# - with the option to specify with 'cex.axis' the size of the lat/lon tick numbers, +# - with the option to specify with 'xlabel.dist' the distance of the x labels from the x axis +# - with the option to specify with 'contours.lty=' to use a different line type for negative contour values! +# - with the option 'contours.labels' not to draw the contour labels +# - with the option 'continents.col', the colors of the line of the continents, if filled.continents=FALSE (by default it is gray) +# +# PlotEquiMap(array(0,c(160,160)),1:160,-80:79) +# + +PlotEquiMap2<-function (var, lon, lat, toptitle = "", sizetit = 1, units = "", + brks = NULL, cols = NULL, square = TRUE, filled.continents = TRUE, + contours = NULL, brks2 = NULL, dots = NULL, axelab = TRUE, + labW = FALSE, intylat = 20, intxlon = 20, drawleg = TRUE, + subsampleg = 1, numbfig = 1, colNA = "white", contours.col = par("fg"), col_border = gray(0.5), + contours.lty = 1, contours.labels=TRUE, cex.lab = NULL, xlabel.dist = 1, continents.col = gray(0.5)) +{ + data(coastmap, envir = environment()) + dims <- dim(var) + if (length(dims) > 2) { + stop("Only 2 dimensions expected for var : (lon,lat) ") + } + if (dims[1] != length(lon) | dims[2] != length(lat)) { + if (dims[1] == length(lat) & dims[2] == length(lon)) { + var <- t(var) + dims <- dim(var) + } + else { + stop("Inconsistent var dimensions / longitudes + latitudes") + } + } + latb <- sort(lat, index.return = TRUE) + dlon <- lon[2:dims[1]] - lon[1:(dims[1] - 1)] + wher <- which(dlon > (mean(dlon) + 1)) + if (length(wher) > 0) { + lon[(wher + 1):dims[1]] <- lon[(wher + 1):dims[1]] - 360 + } + lonb <- sort(lon, index.return = TRUE) + latmin <- floor(min(lat)/10) * 10 + latmax <- ceiling(max(lat)/10) * 10 + lonmin <- floor(min(lon)/10) * 10 + lonmax <- ceiling(max(lon)/10) * 10 + colorbar <- colorRampPalette(c("dodgerblue4", "dodgerblue1", "forestgreen", "yellowgreen", "white", "white", "yellow", "orange", "red", "saddlebrown")) + if (is.null(brks) == TRUE) { + ll <- signif(min(var, na.rm = TRUE), 4) + ul <- signif(max(var, na.rm = TRUE), 4) + if (is.null(cols) == TRUE) { + cols <- colorbar(10) + } + nlev <- length(cols) + brks <- signif(seq(ll, ul, (ul - ll)/nlev), 4) + } + else { + if (is.null(cols) == TRUE) { + nlev <- length(brks) - 1 + cols <- colorbar(nlev) + } + else { + if (length(cols) != (length(brks) - 1)) { + stop("Inconsistent colour levels / list of colours") + } + } + } + if (is.null(brks2) == TRUE) { + if (is.null(contours)) { + if (square == FALSE) { + brks2 <- brks + contours <- var + } + } + else { + ll <- signif(min(contours, na.rm = TRUE), 2) + ul <- signif(max(contours, na.rm = TRUE), 2) + brks2 <- signif(seq(ll, ul, (ul - ll)/(length(brks) - + 1)), 2) + } + } + if (axelab == TRUE) { + ypos <- seq(latmin, latmax, intylat) + xpos <- seq(lonmin, lonmax, intxlon) + letters <- array("", length(ypos)) + letters[ypos < 0] <- "S" + letters[ypos > 0] <- "N" + ylabs <- paste(as.character(abs(ypos)), letters, sep = "") + letters <- array("", length(xpos)) + if (labW) { + nlon <- length(xpos) + xpos2 <- xpos + xpos2[xpos2 > 180] <- 360 - xpos2[xpos2 > 180] + } + letters[xpos < 0] <- "W" + letters[xpos > 0] <- "E" + if (labW) { + letters[xpos == 0] <- " " + letters[xpos == 180] <- " " + letters[xpos > 180] <- "W" + xlabs <- paste(as.character(abs(xpos2)), letters, sep = "") + } + else { + xlabs <- paste(as.character(abs(xpos)), letters, sep = "") + } + xmargin <- 1.2 - (numbfig^0.2) * 0.05 + ymargin <- 3 - (numbfig^0.2) + spaceticklab <- 1.3 - (numbfig^0.2) * 0.8 + topmargin <- 0.4 + ymargin2 <- 1.5 - (numbfig^0.2) * 0.9 + } + else { + xmargin <- 0.2 + ymargin <- 0.2 + switch(as.character(square), `FALSE` = 1.8, + 0) + topmargin <- 0.2 + spaceticklab <- 1 + ymargin2 <- 0.2 + } + + if (toptitle != "") topmargin <- 2.5 - (numbfig^0.2) * 0.6 + if (min(lon) < 0) { + continents <- "world" + } else { + continents <- "world2" + } + + if (square) { + if (drawleg) {layout(matrix(1:2, ncol = 1, nrow = 2), heights = c(5,1))} + + par(mar = c(xmargin, ymargin, topmargin, ymargin2), cex = 1.4,mgp = c(3, spaceticklab, 0), las = 0) + + if (colNA != "white") { + blanks <- array(0, dim = c(length(lonb$x), length(latb$x))) + image(lonb$x, latb$x, blanks, col = c(colNA), breaks = c(-1,1), main = toptitle, cex.main = (1.5/numbfig^(0.2))*sizetit, axes = FALSE, xlab = "", ylab = "") + flagadd <- TRUE + } + else {flagadd <- FALSE} + + image(lonb$x, latb$x, var[lonb$ix, latb$ix], col = cols, breaks = brks, main = toptitle, axes = FALSE, xlab = "", ylab = "", cex.main = (1.5/numbfig^(0.2)) * sizetit, add = flagadd) + + if (axelab == TRUE) { + if(is.null(cex.lab)) {my.cex <- 1/(numbfig^0.3)} else {my.cex <- cex.lab} + axis(2, at = ypos, labels = ylabs, cex.axis = my.cex, tck = -0.01) + axis(1, at = xpos, labels = xlabs, cex.axis = my.cex, tck = -0.01, mgp=c(3,xlabel.dist,0)) + } + + if (is.null(contours) == FALSE) { + if(contours.lty == 1){ + contour(lonb$x, latb$x, contours[lonb$ix, latb$ix], levels = brks2, method = "edge", add = TRUE, labcex = 1/numbfig, lwd = 0.5/(numbfig^0.5), lty = 1, col = contours.col, drawlabels=contours.labels) + } else { + contour(lonb$x, latb$x, contours[lonb$ix, latb$ix], levels = brks2[which(brks2 < 0)], method = "edge", add = TRUE, labcex = 1/numbfig, lwd = 0.5/(numbfig^0.5), lty = contours.lty, col = contours.col, drawlabels=contours.labels) + contour(lonb$x, latb$x, contours[lonb$ix, latb$ix], levels = brks2[which(brks2 == 0)], method = "edge", add = TRUE, labcex = 1/numbfig, lwd = 3, lty = 1, col = contours.col, drawlabels=contours.labels) + contour(lonb$x, latb$x, contours[lonb$ix, latb$ix], levels = brks2[which(brks2 > 0)], method = "edge", add = TRUE, labcex = 1/numbfig, lwd = 0.5/(numbfig^0.5), lty = 1, col = contours.col, drawlabels=contours.labels) + } + } + + map(continents, interior = FALSE, add = TRUE, lwd = 1, col=continents.col) + box() + } + else { + par(mar = c(xmargin + 5, ymargin + 1.5, topmargin, ymargin2), + cex.main = (1.6 * numbfig^(0.3)) * sizetit, cex.axis = 1.4, + cex.lab = 1.6, mgp = c(3, spaceticklab + 0.5, 0), + las = 0) + if (axelab == TRUE) { + filled.contour(lonb$x, latb$x, var[lonb$ix, latb$ix], + xlab = "", levels = brks, col = cols, ylab = "", + main = toptitle, key.axes = axis(4, brks[seq(1, + length(brks), subsampleg)], cex.axis = 1/(numbfig^0.3)), + plot.axes = { + axis(2, at = ypos, labels = ylabs, cex.axis = 1/(numbfig^0.3), + tck = -0.03) + axis(1, at = xpos, labels = xlabs, cex.axis = 1/(numbfig^0.3), + tck = -0.03) + contour(lonb$x, latb$x, contours[lonb$ix, latb$ix], + levels = brks2, method = "edge", add = TRUE, + labcex = 1, lwd = 2,col = contours.col) + map(continents, interior = FALSE, xlim = c(lonmin, + lonmax), ylim = c(latmin, latmax), add = TRUE, col=continents.col) + }, key.title = title(main = units, cex.main = (1.2 * + numbfig^(0.3)) * sizetit)) + } + else { + filled.contour(lonb$x, latb$x, var[lonb$ix, latb$ix], + xlab = "", levels = brks, col = cols, ylab = "", + main = toptitle, key.axes = axis(4, brks[seq(1, + length(brks), subsampleg)], cex.axis = 1/(numbfig^0.3)), + plot.axes = { + contour(lonb$x, latb$x, contours[lonb$ix, latb$ix], + levels = brks2, method = "edge", add = TRUE, + labcex = 1, lwd = 2, col = contours.col) + map(continents, interior = FALSE, xlim = c(lonmin, + lonmax), ylim = c(latmin, latmax), add = TRUE, col=continents.col) + }, key.title = title(main = units, cex.main = (1.2 * + numbfig^(0.3)) * sizetit)) + } + } + if (is.null(dots) == FALSE) { + for (ix in 1:length(lon)) { + for (jy in 1:length(lat)) { + if (is.na(var[ix, jy]) == FALSE) { + if (dots[ix, jy] == TRUE) { + text(x = lon[ix], y = lat[jy], ".", cex = 12/(sqrt(sqrt(length(var))) * numbfig^0.5)) + } + } + } + } + } + if (square == TRUE & filled.continents == TRUE) { + if (min(lon) >= 0) { + ylat <- latmin:latmax + xlon <- lonmin:lonmax + proj <- setPROJ(1, LON0 = mean(xlon), LAT0 = mean(ylat), + LATS = ylat, LONS = xlon) + coastmap$STROKES$col[which(coastmap$STROKES$col == "blue")] <- "white" + par(new = TRUE) + plotGEOmap(coastmap, PROJ = proj, border = "black", add = TRUE) + box() + } + else { + map(continents, interior = FALSE, wrap = TRUE, lwd = 0.7, col = gray(0.5), fill = TRUE, add = TRUE, border = col_border) + } + } + if (square & drawleg) { + par(mar = c(1.5, ymargin + 1.5, 2.5, ymargin2), mgp = c(1.5, 0.3, 0), las = 1, cex = 1.2) + image(1:length(cols), 1, t(t(1:length(cols))), axes = FALSE, col = cols, xlab = "", ylab = "", main = units, cex.main = 1.1) + box() + axis(1, at = seq(0.5, length(brks) - 0.5, subsampleg), labels = brks[seq(1, length(brks), subsampleg)]) + } +} + + +# Draw points over a PlotEquiMap: +map.points<-function (x, country = "", label = NULL, minpop = 0, + maxpop = Inf, capitals = 0, cex = par("cex"), projection = FALSE, + parameters = NULL, orientation = NULL, pch = 1, ...) +{ + + usr <- par("usr") + if (!missing(projection) && projection != FALSE) { + if (require(mapproj)) { + if (is.character(projection)) { + projx <- mapproject(x$long, x$lat, projection = projection, + parameters = parameters, orientation = orientation) + } + else { + if (nchar(.Last.projection()$projection) > 0) { + projx <- mapproject(x$long, x$lat) + } + else stop("No projection defined\n") + } + x$long <- projx$x + x$lat <- projx$y + } + else stop("mapproj package not available\n") + } + else { + if (usr[2] > (180 + 0.04 * (usr[2] - usr[1]))) + x$long[x$long < 0] <- 360 + x$long[x$long < 0] + } + selection <- x$long >= usr[1] & x$long <= usr[2] & x$lat >= + usr[3] & x$lat <= usr[4] & (x$pop >= minpop & x$pop <= + maxpop) & ((capitals == 0) | (x$capital >= 1)) + if (is.null(label)) + label <- sum(selection) < 20 + cxy <- par("cxy") + if (sum(selection01) > 0) + points(x$long[selection01], x$lat[selection01], pch = pch, + cex = cex * 0.6, ...) + if (sum(selection0) > 0) + if (label) + text(x$long[selection0], x$lat[selection0] + cxy[2] * + cex * 0.7, paste(" ", x$name[selection0], sep = ""), + cex = cex * 0.7, ...) + if (sum(selection1) > 0) { + points(x$long[selection1], x$lat[selection1], pch = pch, + cex = cex, ...) + text(x$long[selection1], x$lat[selection1] + cxy[2] * + cex, paste(" ", x$name[selection1], sep = ""), cex = cex * + 1.2, ...) + } + if (sum(selection2) > 0) { + points(x$long[selection2], x$lat[selection2], pch = pch, + cex = cex, ...) + text(x$long[selection2], x$lat[selection2] + cxy[2] * + cex * 1.1, paste(" ", x$name[selection2], sep = ""), + cex = cex * 1.1, ...) + } + if (sum(selection3) > 0) { + points(x$long[selection3], x$lat[selection3], pch = pch, + cex = cex, ...) + text(x$long[selection3], x$lat[selection3] + cxy[2] * + cex * 0.9, paste(" ", x$name[selection3], sep = ""), + cex = cex * 0.9, ...) + } + invisible() +} + +# Taylor diagram (modified from function taylor.diagram of package Plotrix to have the same colors of Nube's taylor diagram) +# you can also specify a text label for each point and can put the color of the point proportional to its bias +# (still missing: bias legend) +# gamma si riferisce alle curve del RMSE! +my.taylor<-function (ref, model, add = FALSE, col = "red", pch = 19, pos.cor = TRUE, + xlab = "", ylab = "", main = "Taylor Diagram", show.gamma = TRUE, + ngamma = 3, gamma.col = "darkgreen", sd.arcs = 0, ref.sd = FALSE, sd.method = "sample", + grad.corr.lines = c(0.2, 0.4, 0.6, 0.8, 0.9, 0.95, 0.99), pcex = 1, cex.axis = 1, + normalize = FALSE, mar = c(5, 4, 6, 6), BIAS = FALSE, my.text = NULL, text.cex = pcex, RMSE.label = FALSE, ...) +{ + grad.corr.full <- c(0, 0.2, 0.4, 0.6, 0.8, 0.9, 0.95, 0.99, 1) + R <- cor(ref, model, use = "na.or.complete") + + if(BIAS==TRUE){ + BIAS <- mean(model-ref,na.rm=TRUE) + my.equidist<-c(-75,-35,-15,-5,5,15,35,75) + my.colors<-c("magenta4","blue4","steelblue","skyblue2","orange","orangered","red","red4") + #my.colorscale<-rev(my.colors(8)) + my.col<-my.colors[which.min(abs(my.equidist-BIAS))] + #my.labels<-c("0-30%","41-50","41-50","51-70%","61-70%","71-80%","81-90%","91-100%") + #my.cuts<-c(-100,-50,-20,-10,0,10,20,50,100) + } + + if (is.list(ref)) + ref <- unlist(ref) + if (is.list(model)) + ref <- unlist(model) + SD <- function(x, subn) { + meanx <- mean(x, na.rm = TRUE) + devx <- x - meanx + ssd <- sqrt(sum(devx * devx, na.rm = TRUE)/(length(x[!is.na(x)]) - + subn)) + return(ssd) + } + subn <- sd.method != "sample" + sd.r <- SD(ref, subn) + sd.f <- SD(model, subn) + if (normalize) { + sd.f <- sd.f/sd.r + sd.r <- 1 + } + maxsd <- 1.5 * max(sd.f, sd.r) + oldpar <- par("mar", "xpd", "xaxs", "yaxs") + if (!add) { + # plot for positive correlations only: + if (pos.cor) { + if (nchar(ylab) == 0) + ylab = "Standard deviation" + par(mar = mar) + plot(0, xlim = c(0, maxsd), ylim = c(0, maxsd), xaxs = "i", + yaxs = "i", axes = FALSE, main = main, xlab = xlab, + ylab = ylab, type = "n", cex = cex.axis, ...) + if (grad.corr.lines[1]) { + for (gcl in grad.corr.lines) lines(c(0, maxsd * + gcl), c(0, maxsd * sqrt(1 - gcl^2)), lty = 3,col="blue") + } + segments(c(0, 0), c(0, 0), c(0, maxsd), c(maxsd, + 0),col="blue") + axis.ticks <- pretty(c(0, maxsd),n=6) + axis.ticks <- axis.ticks[axis.ticks <= maxsd] + axis(1, at = axis.ticks, cex.axis = cex.axis) + axis(2, at = axis.ticks, cex.axis = cex.axis) + if (sd.arcs[1]) { + if (length(sd.arcs) == 1) + sd.arcs <- axis.ticks + for (sdarc in sd.arcs) { + xcurve <- cos(seq(0, pi/2, by = 0.03)) * sdarc + ycurve <- sin(seq(0, pi/2, by = 0.03)) * sdarc + lines(xcurve, ycurve, col = "black", lty = 3) + } + } # if there is more than one curve for the st.dev: + if (show.gamma[1]) { + if (length(show.gamma) > 1) + gamma <- show.gamma + else gamma <- pretty(c(0, maxsd), n = ngamma)[-1] # [-1] for removing the first value of 0.0 + if (gamma[length(gamma)] > maxsd) + gamma <- gamma[-length(gamma)] + labelpos <- seq(45, 70, length.out = length(gamma)) + for (gindex in 1:length(gamma)) { + xcurve <- cos(seq(0, pi, by = 0.03)) * gamma[gindex] + + sd.r + endcurve <- which(xcurve < 0) + endcurve <- ifelse(length(endcurve), min(endcurve) - + 1, 105) + ycurve <- sin(seq(0, pi, by = 0.03)) * gamma[gindex] + maxcurve <- xcurve * xcurve + ycurve * ycurve + startcurve <- which(maxcurve > maxsd * maxsd) + startcurve <- ifelse(length(startcurve), max(startcurve) + + 1, 0) + lines(xcurve[startcurve:endcurve], ycurve[startcurve:endcurve], + col = gamma.col) + if (xcurve[labelpos[gindex]] > 0) + boxed.labels(xcurve[labelpos[gindex]], ycurve[labelpos[gindex]], + gamma[gindex], border = FALSE,cex=1, col=gamma.col) + } + } + xcurve <- cos(seq(0, pi/2, by = 0.01)) * maxsd + ycurve <- sin(seq(0, pi/2, by = 0.01)) * maxsd + lines(xcurve, ycurve) # external semicircle + bigtickangles <- acos(seq(0.1, 0.9, by = 0.1)) + medtickangles <- acos(seq(0.05, 0.95, by = 0.1)) + smltickangles <- acos(seq(0.91, 0.99, by = 0.01)) + segments(cos(bigtickangles) * maxsd, sin(bigtickangles) * + maxsd, cos(bigtickangles) * 0.97 * maxsd, sin(bigtickangles) * + 0.97 * maxsd) # external mayor ticks + par(xpd = TRUE) + if (ref.sd) { + xcurve <- cos(seq(0, pi/2, by = 0.01)) * sd.r + ycurve <- sin(seq(0, pi/2, by = 0.01)) * sd.r + lines(xcurve, ycurve) + } + points(sd.r, 0, cex = pcex) + text(cos(c(bigtickangles, acos(c(0.95, 0.99)))) * + 1.05 * maxsd, sin(c(bigtickangles, acos(c(0.95, + 0.99)))) * 1.05 * maxsd, c(seq(0.1, 0.9, by = 0.1), + 0.95, 0.99), col="blue") # correlation numbers + text(maxsd * 0.8, maxsd * 0.8, "Correlation", srt = 315, col="blue") + segments(cos(medtickangles) * maxsd, sin(medtickangles) * + maxsd, cos(medtickangles) * 0.98 * maxsd, sin(medtickangles) * + 0.98 * maxsd) + segments(cos(smltickangles) * maxsd, sin(smltickangles) * + maxsd, cos(smltickangles) * 0.99 * maxsd, sin(smltickangles) * + 0.99 * maxsd) + } + else { # plot in case correlations can be negative or positive (pos.cor=FALSE): + x <- ref + y <- model + R <- cor(x, y, use = "pairwise.complete.obs") + E <- mean(x, na.rm = TRUE) - mean(y, na.rm = TRUE) + xprime <- x - mean(x, na.rm = TRUE) + yprime <- y - mean(y, na.rm = TRUE) + sumofsquares <- (xprime - yprime)^2 + Eprime <- sqrt(sum(sumofsquares)/length(complete.cases(x))) + E2 <- E^2 + Eprime^2 + if (add == FALSE) { + maxray <- 1.5 * max(sd.f, sd.r) + plot(c(-maxray, maxray), c(0, maxray), type = "n", + asp = 1, bty = "n", xaxt = "n", yaxt = "n", + xlab = xlab, ylab = ylab, main = main, cex = cex.axis) + discrete <- seq(180, 0, by = -1) + listepoints <- NULL + for (i in discrete) { + listepoints <- cbind(listepoints, maxray * + cos(i * pi/180), maxray * sin(i * pi/180)) + } + listepoints <- matrix(listepoints, 2, length(listepoints)/2) + listepoints <- t(listepoints) + lines(listepoints[, 1], listepoints[, 2]) + lines(c(-maxray, maxray), c(0, 0)) + lines(c(0, 0), c(0, maxray)) + for (i in grad.corr.lines) { + lines(c(0, maxray * i), c(0, maxray * sqrt(1 - + i^2)), lty = 3, col="blue") + lines(c(0, -maxray * i), c(0, maxray * sqrt(1 - + i^2)), lty = 3, col="blue") + } + for (i in grad.corr.full) { + text(1.05 * maxray * i, 1.05 * maxray * sqrt(1 - + i^2), i, cex = cex.axis) + text(-1.05 * maxray * i, 1.05 * maxray * sqrt(1 - + i^2), -i, cex = cex.axis) + } + seq.sd <- seq.int(0, 2 * maxray, by = (maxray/10))[-1] + for (i in seq.sd) { + xcircle <- sd.r + (cos(discrete * pi/180) * + i) + ycircle <- sin(discrete * pi/180) * i + for (j in 1:length(xcircle)) { + if ((xcircle[j]^2 + ycircle[j]^2) < (maxray^2)) { + points(xcircle[j], ycircle[j], col = "darkgreen", + pch = ".") + if (j == 10) + text(xcircle[j], ycircle[j], signif(i, + 2), cex = cex.axis, col = "darkgreen") + } + } + } + seq.sd <- seq.int(0, maxray, length.out = 5) + for (i in seq.sd) { + xcircle <- (cos(discrete * pi/180) * i) + ycircle <- sin(discrete * pi/180) * i + if (i) + lines(xcircle, ycircle, lty = 3, col = "blue") + text(min(xcircle), -0.03 * maxray, signif(i, + 2), cex = cex.axis, col = "blue") + text(max(xcircle), -0.03 * maxray, signif(i, + 2), cex = cex.axis, col = "blue") + } + text(0, -0.08 * maxray, "Standard Deviation", + cex = cex.axis, col = "blue") + text(0, -0.12 * maxray, "Centered RMSE", + cex = cex.axis, col = "darkgreen") + points(sd.r, 0, pch = 22, bg = "darkgreen", cex = 1.1) + text(0, 1.1 * maxray, "Correlation Coefficient", + cex = cex.axis) + } + S <- (2 * (1 + R))/(sd.f + (1/sd.f))^2 + } # close if on 'pos.cor' + } + + if(BIAS==TRUE){ + points(sd.f * R, sd.f * sin(acos(R)), pch = pch, col = my.col, cex = pcex) + } else { + points(sd.f * R, sd.f * sin(acos(R)), pch = pch, col = col, cex = pcex) + } + + # Label line; You can change the pos argument to your liking: + if(length(text)>0) text(sd.f * R, sd.f * sin(acos(R)), labels=my.text, cex = text.cex, pos=3) + if(RMSE.label==TRUE) text(0.81, 0.14, "RMSE", srt = 45, cex=1, col=gamma.col) + + text(1, 0.04, "ERA-Interim", srt = 0, cex=1, col="darkgray") + + invisible(oldpar) +} + + +north.arrow <- function(loc,size,bearing=0,cols,cex=1,...) { + # checking arguments + if(missing(loc)) stop("loc is missing") + if(missing(size)) stop("size is missing") + # default colors are white and black + if(missing(cols)) cols <- rep(c("white","black"),8) + # calculating coordinates of polygons + radii <- rep(size/c(1,4,2,4),4) + x <- radii[(0:15)+1]*cos((0:15)*pi/8+bearing)+loc[1] + y <- radii[(0:15)+1]*sin((0:15)*pi/8+bearing)+loc[2] + # drawing polygons + for (i in 1:15) { + x1 <- c(x[i],x[i+1],loc[1]) + y1 <- c(y[i],y[i+1],loc[2]) + polygon(x1,y1,col=cols[i]) + } + # drawing the last polygon + polygon(c(x[16],x[1],loc[1]),c(y[16],y[1],loc[2]),col=cols[16]) + # drawing letters + b <- c("E","N","W","S") + for (i in 0:3) text((size+par("cxy")[1])*cos(bearing+i*pi/2)+loc[1], + (size+par("cxy")[2])*sin(bearing+i*pi/2)+loc[2],b[i+1], + cex=cex) +} + +# funzione che prende un array e restituisce lo stesso array abbassando pero'tutti gli elementi piu'alti di val.max al valore val.max (utile per aggiustare le leggende dei grafici) +rescale.max <- function(my.array,val.max){ + ss <- which(my.array > val.max) + my.array[ss] <- val.max - 0.000000001 + return(my.array) +} + +rescale.min <- function(my.array,val.min){ # come rescale.max ma per i valori piu'piccoli di val.min che vengono cambiati a val.min + ss <- which(my.array < val.min) + my.array[ss] <- val.min + 0.0000000001 + return(my.array) +} + +rescale <- function(my.array,val.min,val.max){ # unisce rescale.max con rescale.min + ss <- which(my.array > val.max) + my.array[ss] <- val.max - 0.0000000001 + ss <- which(my.array < val.min) + my.array[ss] <- val.min + 0.0000000001 # the 0.0000000001 is just to be able to draw a color with PlotEquiMap (otherwise draw the color for NA) + return(my.array) +} + +ColorBarV <- function(brks, cols = NULL, vert = TRUE, subsampleg = 1, + cex = 1, marg=NULL) { + # Creates a horizontal or vertical colorbar to introduce in multipanels. + # + # Args: + # brks: Levels. + # cols: List of colours, optional. + # vert: TRUE/FALSE for vertical/horizontal colorbar. + # kharin: Supsampling factor of the interval between ticks on colorbar. + # Default: 1 = every level + # cex: Multiplicative factor to increase the ticks size, optional. + # marg: margins + # + # Returns: + # This function returns nothing + # + # History: + # 1.0 # 2012-04 (V. Guemas, vguemas@ic3.cat) # Original code + # 1.1 # 2014-11 (C. Prodhomme, chloe.prodhomme@ic3.cat) + # add cex option + # + # + # Input arguments + # ~~~~~~~~~~~~~~~~~ + # + if (is.null(cols) == TRUE) { + nlev <- length(brks) - 1 + cols <- rainbow(nlev) + } else { + if (length(cols) != (length(brks) - 1)) { + stop("Inconsistent colour levels / list of colours") + } + } + + # + # Plotting colorbar + # ~~~~~~~~~~~~~~~~~~~ + # + if (vert) { + if (is.null(marg)== FALSE){ + par(mar = marg, mgp = c(1, 1, 0), las = 1, cex = 1.2) + }else{ + par(mar = c(1, 1, 1, 1.5 *( 1 + cex)), mgp = c(1, 1, 0), las = 1, cex = 1.2) + } + image(1, c(1:length(cols)), t(c(1:length(cols))), axes = FALSE, col = cols, + xlab = '', ylab = '') + box() + axis(4, at = seq(0.5, length(brks) - 0.5, subsampleg), tick = TRUE, + labels = brks[seq(1, length(brks), subsampleg)], cex.axis = cex) + } else { + if (marg){ + par(mar = marg, mgp = c(1, 1, 0), las = 1, cex = 1.2) + }else{ + par(mar = c(0.5 + cex, 1, 1, 1), mgp = c(1.5, max(c(0.3,0.8*(cex-0.625))), 0), + las = 1, cex = 1.2) + } + + image(1:length(cols), 1, t(t(1:length(cols))), axes = FALSE, col = cols, + xlab = '', ylab = '') + box() + axis(1, at = seq(0.5, length(brks) - 0.5, subsampleg), + labels = brks[seq(1, length(brks), subsampleg)], cex.axis = cex) + } +} + +# Creation: 6/2016 +# Author: Nicola Cortesi +# Aim: Remove the grid points above a certain value (argument 'level') [and below '-level' if two.sides=TRUE] that happens to be in areas with few points above that value. +# Useful to remove from a contour plot all the small spots of significative points that we don't want to contour. +# To do so, just apply this function inside the option 'contour' of 'PlotEquiMap' to remove the significative points (they are set to the value of 0). +# Argument 'size' determines the side of the square (in grid points) used to find if there are enough grid points with values above 'level' nearby +# the chosen point or not. Increasing it will incresase the number of grid points deleted, leaving only the bigger spots of points above the chosen value. +# I/O: a 2D lat/lon grid in geographic coordinates +# Assumptions: none +# Branch: general +# Example: +# data <- matrix(runif(48000,0,1)^2,300,160) + matrix(c(rep(0,20000),rep(0.6,3000),rep(0,25000)),300,160) +# PlotEquiMap(data,lon=1:300,lat=-80:79,filled.continents=F,cols=c("white","yellow","orange","red","darkred")) +# PlotEquiMap(data,lon=1:300,lat=-80:79,filled.continents=F,cols=c("white","yellow","orange","red","darkred"), contours=data, brks2=0.6) +# PlotEquiMap(data,lon=1:300,lat=-80:79,filled.continents=F,cols=c("white","yellow","orange","red","darkred"), contours=grid2contour(data,0.6,FALSE,5), brks2=0.6, contours.labels=FALSE) +# PlotEquiMap_colored(data,lon=1:300,lat=-80:79,filled.continents=F,cols=c("white","yellow","orange","red","darkred"), contours=grid2contour(data,0.6,FALSE,5), brks2=0.6, contours.labels=FALSE, contours.col="blue", continents.col="gray40") + +grid2contour <- function(grid, level, two.sides=FALSE, size=10){ + nrows <- dim(grid)[1] + ncols <- dim(grid)[2] + radius <- round(size/2) + + grid.weighted <- matrix(NA, nrows, ncols) + + grid.expanded <- rbind(cbind(grid[nrows:1,((ncols/2)+1):ncols],grid[nrows:1,],grid[nrows:1,],grid[nrows:1,1:(ncols/2)]),cbind(grid,grid,grid),cbind(grid[nrows:1,((ncols/2)+1):ncols],grid[nrows:1,],grid[nrows:1,],grid[nrows:1,1:(ncols/2)])) + + if(two.sides==FALSE){ + for(i in 1:nrows){ + for(j in 1:ncols){ + grid.weighted[i,j] <- sum(grid.expanded[(nrows+i-radius):(nrows+i+radius),(ncols+j-radius):(ncols+j+radius)] > level) + } + } + } else { + for(i in 1:nrows){ + for(j in 1:ncols){ + grid.weighted[i,j] <- sum(grid.expanded[(nrows+i-radius):(nrows+i+radius),(ncols+j-radius):(ncols+j+radius)] > level) + sum(grid.expanded[(nrows+i-radius):(nrows+i+radius),(ncols+j-radius):(ncols+j+radius)] < -level) + } + } + } + + n.points.min <- (2*radius+1)^2*0.3 # 30% of the total points in the square + ss <- which(grid.weighted < n.points.min) + grid[ss] <- 0 + return(grid) +} + +###################################################################### +# +# RELIABILITY DIAGRAM FOR A COLLECTION OF PROBABILITY FORECASTS # +# Veronica: Like ReliabilityDiagram() function, but +# Modified to include in the outputs the hist.counts +# +###################################################################### + +ReliabilityDiagramHist <- function(probs, obs, bins=10, nboot=500, + plot=FALSE, plot.refin=TRUE, mc.cores=1, + cons.probs=c(0.025, 0.975)) + { + #print("RD") + # + # Plot reliability diagram for a probability forecast + # + # Usage: ReliabilityDiagram(probs, obs, nbins, nboot) + # + # Arguments: + # + # probs ... vector of length N, probs[k] has the predicted probability for + # the event obs[k] + # obs ... obs[k] = 1 if the event happened at instance k, obs[k] = 0 + # otherwise + # bins ... either scalar: number of equidistant bins to discretize the + # forecast probabilities, + # or a vector: user-defined breakpoints of the bins; the `hist` + # function will produce errors if these are not valid + # nboot ... number of bootstrap resamples for estimating consistency bars + # if nboot==0, no resampling is done and NAs are returned as + # consistency bars + # plot ... boolean; whether to plot the reliability diagram + # plot.refin ... boolean; whether to plot the small refinement histogram + # in lower right corner + # cons.probs ... a 2-vector, lower and upper confidence limit + # mc.cores ... number of cores for resampling (if > 1, library `multicore` + # is required) + # + # Return value: + # + # a data frame of K+1 rows with the following columns: + # + # * p.avgs ... in-bin averages of the forecast probabilities + # * cond.probs ... observed conditional frequency of event, given i + # * cbar.lo ... lower limit consistency of consistency bar[i], as specified by user + # * cbar.hi ... upper limit consistency of consistency bar[i], as specified by user + # + # Author: + # + # Stefan Siegert + # s.siegert@exeter.ac.uk + # December 2013 + # + # Example: + # + # N <- 1000 + # p <- rbeta(N, 1, 3) + # y <- rbinom(N, 1, p) + # rd <- rel.diag(p, y, plot=TRUE) + # print(rd) + # + # + # change log: + # + # 2013/12/02 + # * manual definition of bin-breaks + # * manual definition of consistency intervals + # * sanity checks + # * multicore option for resampling + # + # 2013/10/31: + # * return summary data as data frame + # * added options `plot` and `plot.refin` + # + # 2013/08/20: + # * points are plotted at in-bin-averages, not at bin centres + # * legend has been removed + # * consistency bars have been added, calculated by a resampling technique + # * see Broecker (2007) http://dx.doi.org/10.1175/WAF993.1 for details + # * the bars are pointwise 2.5% ... 97.5% intervals around the hypothesis of reliability + # * dependency on package "verification" was removed + # + # Author: Stefan Siegert + # + # based on previous version by Caio Coelho and the routine + # reliability.plot.default of the R-package `verification` + # + + + # sanity checks + if (class(probs) == "data.frame") { + probs <- c(as.matrix(probs)) + } + if (class(obs) == "data.frame") { + obs <- c(as.matrix(obs)) + } + stopifnot(length(probs) == length(obs)) + stopifnot(nboot >= 0, mc.cores >= 0) + stopifnot(all(probs >= 0), all(probs <= 1), all(obs %in% c(0,1))) + stopifnot(length(cons.probs) == 2, all(cons.probs >= 0), all(cons.probs <= 1)) + # optional use of multicore without warning message + warn <- getOption("warn") + options(warn=-1) + if(require(multicore, quietly=TRUE)) { + mclapply <- multicore::mclapply + } else { + mclapply <- function(..., mc.cores) lapply(...) + } + options(warn=warn) + + # some definitions and corrections + n <- length(obs) + mc.cores <- floor(mc.cores) + nboot <- floor(nboot) + cons.probs <- sort(cons.probs) + + + ############################################# + # reliability analysis + ############################################# + # estimate refinement function + if (length(bins) == 1) { + nbins <- floor(bins) + brx <- seq(0, 1, length.out=nbins+1) + + c(-.1, rep(0, nbins-1), .1) + } else { + nbins <- length(bins) - 1 + bins <- sort(bins) + stopifnot(min(bins)<= 0 & max(bins) >= 1) + brx <- bins + } + h <- hist(probs, breaks=brx, plot=FALSE)$counts +#print(h) +#print(sum(h)) +#print(probs) +#print(sum(probs)) + p <- sum(probs) + #print(sum(probs)/sum(h)) + + # estimate calibration function + g <- hist(probs[obs==1], breaks=brx, plot=FALSE)$counts +#print(g) +#print(sum(g)) + obar.i <- g / h +#print(obar.i) + no_res <- sum(g)/sum(h) + #print(no_res) + obar.i[ is.nan(obar.i) ] <- NA + + # calculate in-bin averages + p.bins <- as.numeric(cut(probs, breaks=brx, include.lowest=TRUE)) + p.avgs <- sapply(seq(nbins), + function(ii) mean(probs[p.bins == ii], na.rm=TRUE)) + p.avgs[ is.nan(p.avgs) ] <- NA + +# +#print(p.avgs) +# vertline <- sum(p.avgs,na.rm = TRUE)/bins +#print(vertline) + + ############################################# + # consistency resampling (broecker and smith 2007) + ############################################# + if (nboot) { + resamp.mat <- matrix(nrow=0, ncol=nbins) + # the resampling function + sample.rel.diag <- function(dummy=0) { + p.hat <- sample(x=probs, size=n, replace=TRUE) + x.hat <- rbinom(n=n, size=1, prob=p.hat) + hh <- hist(p.hat, breaks=brx, plot=FALSE)$counts + gg <- hist(p.hat[x.hat==1], breaks=brx, plot=FALSE)$counts + return(gg / hh) + } + # multicore? + if (mc.cores > 1) { + l <- mclapply(1:nboot, sample.rel.diag, mc.cores=mc.cores) + resamp.mat <- do.call(rbind, l) + } else { + l <- replicate(nboot, sample.rel.diag()) + resamp.mat <- t(l) + } + cons.bars <- apply(resamp.mat, 2, + function(z) quantile(z, cons.probs, na.rm=TRUE)) + } else { + cons.bars <- matrix(NA, ncol=nbins, nrow=2) + } + + + ############################################# + # plot the reliability diagram + ############################################# + if (plot) { + # reliability plot + old.par <- par(no.readonly = TRUE) + on.exit(par(old.par)) + plot(NULL, xlim = c(0,1), ylim = c(0,1), + xlab="Forecast probability", + ylab="Observed relative frequency") + # consistency bars + for (i in 1:length(p.avgs)) { + lines(rep(p.avgs[i], 2), cons.bars[, i], col="#CCCCCC", lwd=6) + } + # reliability points and diagonal + points(p.avgs, obar.i, col = "black", pch = 1, lwd=2, type="b") + lines(c(0,1), c(0,1), lty=1) + if (plot.refin) { + # refinement histogram in lower corner + pp<- par("plt") + par("plt" = c(pp[2] - 0.2 , pp[2], pp[3], pp[3]+ 0.2) ) + par(new = TRUE) + barplot(h, axes = FALSE, axisnames = FALSE) + axis(4) + box() + } + } + + ############################################# + # return data + ############################################# + ret.df <- data.frame(p.avgs=p.avgs, cond.probs=obar.i, + cbar.lo=cons.bars[1,], cbar.hi=cons.bars[2,],hist.counts=h, obs.counts=g, for.prob=p) + return(ret.df) + } + + +# to plot the reliability diagrams for both upper and below tercile at the same time: +ReliabilityDiagram2 <-function(rel_diag,nbins=10,consbars=F,tit=NULL,colLine=NULL,colBar=NULL,marHist=T,hist_ylim=NULL,Lg=NULL) { + + # print("Plot") +# rel_diag<-rd # output of ReliabilityDiagramHist() +# nbins=10 +# consbars=T +# colLine=col_line +# colBar=col_bar +# tit=tit1 +# marHist=T +# hist_ylim=c(0,100) +# x11(width=12,height=10) + # x11() + # PLOT OF THE RELIABILITY DIAGRAM + # + ###################################################################################### + # rd: a list with the reliability diagrams that will be represented in the same plot + # cons.bars : if the consistency bar must be represented or not. + # nbins : number of equidistant points used to compute the reliability diagram (optional) + # tit: the title of the plot (optional) + # brierScores: The brier score linked to the reliability diagram (optional) + # marHist: Whether to plot the small refinement histogram is showed + ##################################################################################### + + # Some parameters are defined + nrd <- length(rel_diag) # nrd = 5, 4 models + mme + rg <- list() + + # Check the dimensions of the rank histogram + for (i in 1:nrd){ + if (dim(rel_diag[[i]])[1]!=nbins){ + stop ('The bins of the reliability diagram must be the same that nbins') + } + rg[[i]]<-range(rel_diag[[i]]$hist.counts)# check the range of the histograms + } + + if (is.null(hist_ylim)){ + rgH<-range(rg) +#print(rgH) + }else{ + rgH<-hist_ylim + } + + + ########################################## + # reliability plot + # par(mar=c(5,3,2,2)+0.1) + ########################################## + + layout(matrix(c(rep(1,nrd),seq(2,(nrd+1))),nrd,2,byrow=F),width=c(5,2)) + par(oma=c(2.5,4,5,1)) + #layout.show(a) + + # The axis are defined + # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + #x11(width=12,height=10) +# old.par <- par(no.readonly = TRUE) +#print(old.par) +# on.exit(par(old.par)) +# par(mar=c(5,5,5,0)) + old.par <- par(mar=c(5,5,5,0)) + on.exit(par(old.par)) + + plot(NULL, xlim = c(0,1), ylim = c(0,1),axes=F, xlab='', ylab='') + + axis(1, at=seq(0,1,by=0.1),labels=seq(0,1,by=0.1),cex.axis=2.0) + title(xlab= "Forecast probability",line=3.9,cex.lab=2.0) + + axis(2, at=seq(0,1,by=0.1), labels=seq(0,1,by=0.1), las=2,cex.axis=2.0) + #axis(2, at=seq(0,1,by=0.1), labels=seq(0,1,by=0.1), cex.axis=2.0) + box() + title(ylab= "Observed relative frequency", line=0.2,cex.lab=2.0,outer=T) + if(is.null(tit)==F){ +# title(tit,cex.main=4,outer=T,line=-1) +# title(tit,cex.main=2.0,outer=T,line=-4) + title(tit,cex.main=2.0,outer=T,line=-3) + } + + # Legend + # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + yloc <- c(1.0, 0.95, 0.90, 0.85, 0.80) + legend(0.,yloc[1], legend=Lg[[1]], fill=colLine[[1]], bty="n", cex=1.2) + legend(0.,yloc[2], legend=Lg[[2]], fill=colLine[[2]], bty="n", cex=1.2) + legend(0.,yloc[3], legend=Lg[[3]], fill=colLine[[3]], bty="n", cex=1.2) + legend(0.,yloc[4], legend=Lg[[4]], fill=colLine[[4]], bty="n", cex=1.2) + legend(0.,yloc[5], legend=Lg[[5]], fill=colLine[[5]], bty="n", cex=1.2) +# legend("topleft", "(x,y)", pch = 1, title = "topleft, inset = .05", inset = .05) + + # No resolution and No skill lines + # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + no_res <- sum(rel_diag[[1]]$obs.counts)/sum(rel_diag[[1]]$hist.counts) +# vt_res <- sum(rel_diag[[1]]$for.prob)/sum(rel_diag[[1]]$hist.counts) +#print(paste("no_res = ",no_res)) + numb <- c(seq(0,1,by=0.1)) +#print(numb) + no_skill <- (numb+no_res)/2. +#print(no_skill) + +# diagonal line + lines(c(0,1), c(0,1), lty=1) +# no_resolution line + lines(c(0,1), c(no_res,no_res), col="gray", lty=3) + lines(c(1/3,1/3), c(0,1), col="gray", lty=3) +# lines(c(vt_res,vt_res), c(0,1), col="gray", lty=3) +# lines(c(no_res,no_res), c(0,1), col="gray", lty=3) +# no_skill line + lines(c(0,1), c(no_skill[1],no_skill[11]), col="black", lty=3) + + + # Consistency bars + # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + HI <- matrix(NA, nrow=nrd, ncol=length(rel_diag[[1]]$hist.counts)) + + for (j in 1:nrd){ # nrd = 5: 4 models + mme + HI[j,] <- rel_diag[[j]]$hist.counts + if (consbars==T){ + # The lower limit of consistency bar i and the upper limit are combined in one list + consBars<-list() + consBars[[j]]<-abind(InsertDim(rel_diag[[j]]$cbar.lo,1,1),InsertDim(rel_diag[[j]]$cbar.hi,1,1),along=1) + + # plot consistency bars + for (i in 1:nbins){ # nbins = 10 bins +# lines(rep(rel_diag[[j]]$p.avgs[i], 2), consBars[[j]][, i], col=colBar[j], lwd=3) + lines(rep(rel_diag[[j]]$p.avgs[i], 2), consBars[[j]][, i], col=colBar[j], lwd=2) # lwd: line width + } + } + +# see plot: "p" for points, "l" for lines, "b" for both points and lines, "c" for empty points joined by lines, "o" for overplotted points and lines, "s" and "S" for stair steps and "h" for histogram-like vertical lines. Finally, "n" does not produce any points or lines. + points(rel_diag[[j]]$p.avgs, rel_diag[[j]]$cond.probs, type="b", pch=1 , col =colLine[[j]], cex=2.0 , lwd=3) + + } + + + # Number of forecasts + # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + if (marHist==TRUE){ + + for (i in 1:nrd){ +# par(mar=c(5,0,5,12)) +# par(mar=c(1.5,1.5,6,7)) # in case num of sharpness diagram is 3 or 4 + par(mar=c(0.5,1.5,5,7)) # in case num of sharpness diagram is 5 + barplot(HI[i,]/10000, beside=T,space=c(0,1.2),axes = F, axis.lty=F, axisnames = F, col = colLine[[i]], ylim=rgH/10000) +# axis(1, at=seq(0,1,by=0.1),labels=seq(0,1,by=0.1),cex.axis=1.5) + title(main = "# of forecasts (x10⁴)", font.main = 1.0, line=0.5) +# grid(1,5,col='#525252') + axis(4,cex.axis=1.0) + box(bg='grey') + } + #pp<- par("plt") + #par("plt" = c(pp[2] - 0.14 , pp[2], pp[3], pp[3]+ 0.15) ) + #par(new = TRUE) + } + +} + + +################################################################################################# +# Interpolation # +################################################################################################# + +# function that returns the position of the nearest grid point from a point of coordinates lat, lon +# the grid is represented by two vectors lat.grid and lon.grid: +# if there is more than one point at the same mimimum distance, it returns only the position of the first one +nearest <- function(lat,lon,lat.grid,lon.grid){ + n.lat=length(lat.grid) + n.lon=length(lon.grid) + + if(lon<0 && min(lon.grid>=0)) lon=360+lon # convert the negative longitude lon of the point to a positive one, if lon.grid has only positive values + + #lat.grid2<-sort(lat.grid,decreasing=T) # sort latitudes because they must go from the higher number to the lower one + #lon.grid2<-sort(lon.grid) # sort longitudes because they must go from the lower number to the highest one + + grid.dist.lat<-matrix((lat.grid-lat)^2, nrow=n.lat, ncol=n.lon+1, byrow=FALSE) + #grid.dist.lon<-matrix((lon.grid-lon)^2,nrow=n.lat,ncol=n.lon,byrow=TRUE) + grid.dist.lon<-matrix((c(lon.grid,lon.grid[1]+360)-lon)^2,nrow=n.lat,ncol=n.lon+1,byrow=TRUE) # add also the forst lon point at the end of lon values + + grid.dist<-(grid.dist.lat+grid.dist.lon)^0.5 # matrix of distance from the point in grados + nearest.grid.point.pos<-which(grid.dist==min(grid.dist),arr.ind=T) + + if(length(nearest.grid.point.pos)>2) nearest.grid.point.pos <- nearest.grid.point.pos[1,] # remove all points at the same minimum distance beyond the first one + + if(nearest.grid.point.pos[2] > n.lon) nearest.grid.point.pos[2] <- 1 # in case the closer point is that with the first longitude value, must change the lon position + + return(nearest.grid.point.pos) +} + + +# interpolates the observed values in grid2 on grid1 with the bilinear method but using the great-circle distances (using Haversine formula) and selecting only the 4 nearest grid points,.; +# lat1.list and lon1.list are vectors of coordinates referring to grid1, while lat2.list and lon2.list to grid2. They must be in degrees. Grid2 format must be: [lat,lon] +# lat2.list and lon2.list can also be in a different order than lat1.list and lon1.list. +# It interpolates at least 2 grids at time (i.e: the monthly grids of a model), so it can use the same distance matrix for all grids (the more computational intensive part of the script) +# and the same weights for each monthly grid; grid2 must have the format: [layer,lat,lon], where stands for day, month, year, season, etc... +# beware that the 2 rows of points closer to the north and south pole are still not well interpolated because they are still not associated to grid points at the other side of the north pole +bilinear<-function(lat1.list,lon1.list,lat2.list,lon2.list,grid2){ + R<-6371 # earth mean radius (km) + Rx2<-2*R + + n.lat1=length(lat1.list) + n.lon1=length(lon1.list) + n.lat2=length(lat2.list) + n.lon2=length(lon2.list) + n.points1<-as.double(n.lat1*n.lon1) # number of points of grid1 + n.points2<-as.double(n.lat2*n.lon2) + n.points.tot<-n.points1*n.points2 + n.layers<-dim(grid2)[1] + + nearest.lat<-array(NA,c(n.lat1,2)) # latitude of the two closer grid points + nearest.lat.pos<-array(NA,c(n.lat1,2)) # position inside grid2 of the two closer points in the latitudinal sense + for(l in 1:n.lat1){ + pos.first<-which.min(abs(lat2.list-lat1.list[l])) + lat2.list.temp<-lat2.list + lat2.list.temp[pos.first]=10000000000 + pos.second<-which.min(abs(lat2.list.temp-lat1.list[l])) + nearest.lat[l,1]=lat2.list[pos.first] + nearest.lat[l,2]=lat2.list[pos.second] + nearest.lat.pos[l,1]=pos.first + nearest.lat.pos[l,2]=pos.second + } + + # correct longitude values if one of the two grids has positive longitude values only and the other also negative ones: + if(length(which(lon1.list<0))>0 && length(which(lon2.list<0))==0){lon2.list[which(lon2.list>180)]<-lon2.list[which(lon2.list>180)]-360} + if(length(which(lon1.list<0))==0 && length(which(lon2.list<0))<0){lon2.list[which(lon2.list<0)]<-lon2.list[which(lon2.list<0)]+360} + + nearest.lon<-array(NA,c(n.lon1,2)) # longitude of the two closer grid points + nearest.lon.pos<-array(NA,c(n.lon1,2)) # position inside grid2 of the two closer points in the longitudinal sense + for(l in 1:n.lon1){ + pos.first<-which.min(abs(lon2.list-lon1.list[l])) + lon2.list.temp<-lon2.list + lon2.list.temp[pos.first]=10000000000 + pos.second<-which.min(abs(lon2.list.temp-lon1.list[l])) + # points of grid1 to the left of all points of grid2 are associated also to the rightmost points of grid2 because earth is flat: + if(lon1.list[l] < lon2.list[pos.first] && lon1.list[l] < lon2.list[pos.second]) pos.second<-n.lon2 + # points of grid1 to the right of all points of grid2 are associated also to the leftmost points of grid2 because earth is flat: + if(lon1.list[l] > lon2.list[pos.first] && lon1.list[l] > lon2.list[pos.second]) pos.second<-1 + + nearest.lon[l,1]=lon2.list[pos.first] + nearest.lon[l,2]=lon2.list[pos.second] + nearest.lon.pos[l,1]=pos.first + nearest.lon.pos[l,2]=pos.second + } + + + lat1.list.rad<-deg2rad(lat1.list) + lon1.list.rad<-deg2rad(lon1.list) + nearest.lat.rad<-deg2rad(nearest.lat) + nearest.lon.rad<-deg2rad(nearest.lon) + + pred<-array(NA,c(n.layers,n.lat1,n.lon1)) + for(y in 1:n.lat1){ + for(x in 1:n.lon1){ + + #lat.deg<-lat1.list[y] # lat y lon of the grid1 point selected + #lon.deg<-lon1.list[x] + #lat1.deg<-nearest.lat[y,1] # lat y lon of its 4 nearest points + #lon1.deg<-nearest.lon[x,1] + #lat2.deg<-nearest.lat[y,1] + #lon2.deg<-nearest.lon[x,2] + #lat3.deg<-nearest.lat[y,2] + #lon3.deg<-nearest.lon[x,1] + #lat4.deg<-nearest.lat[y,2] + #lon4.deg<-nearest.lon[x,2] + #latN.deg<-c(lat1.deg,lat2.deg,lat3.deg,lat4.deg) + #lonN.deg<-c(lon1.deg,lon2.deg,lon3.deg,lon4.deg) + + # conversion to rad: + lat<-lat1.list.rad[y] # lat y lon of the grid1 point selected + lon<-lon1.list.rad[x] + lat1<-nearest.lat.rad[y,1] # lat y lon of its 4 nearest points + lon1<-nearest.lon.rad[x,1] + lat2<-nearest.lat.rad[y,1] + lon2<-nearest.lon.rad[x,2] + lat3<-nearest.lat.rad[y,2] + lon3<-nearest.lon.rad[x,1] + lat4<-nearest.lat.rad[y,2] + lon4<-nearest.lon.rad[x,2] + latN<-c(lat1,lat2,lat3,lat4) + lonN<-c(lon1,lon2,lon3,lon4) + + distN<-sqrt((sin((lat-latN)/2))^2 + cos(lat)*cos(latN)*(sin((lon-lonN)/2))^2) + distN<-Rx2 * asin(pmin(distN,1)) # distance in km of the 4 nearest points + + ss<-which(distN==0) + weights<-distN^2 + weights[ss]<-0.000000000001 # not to have +inf in the denominator of the weight matrix + weights=1/weights + sum.weights<-sum(weights) + + for(l in 1:n.layers){ + value1<-grid2[l,nearest.lat.pos[y,1],nearest.lon.pos[x,1]] # grid2 value of its 4 nearest points + value2<-grid2[l,nearest.lat.pos[y,1],nearest.lon.pos[x,2]] # for layer l + value3<-grid2[l,nearest.lat.pos[y,2],nearest.lon.pos[x,1]] + value4<-grid2[l,nearest.lat.pos[y,2],nearest.lon.pos[x,2]] + valueN<-c(value1,value2,value3,value4) + + values.weighted<-valueN * weights + sum.values.weighted<-sum(values.weighted) + pred[l,y,x]<-sum.values.weighted/sum.weights + if(sum.weights==0)pred[l,y,x]=NA # si los 4 puntos estan tan lejanos que no tienen peso; pero creo que con ese metodo no se verifica nunca + } + } + } + + # R bug: values of interp are not true!!!!!!!!!!! + #interp<-list() + #for(l in 1:n.layers) interp[[l]]<-pred[l,,] + #return(interp) + return(pred) +} + + + +# interpolates the observed values in grid2 on grid1 assigning to each point of grid1 the value of the closer point of grid 2 +# lat1.list and lon1.list are vectors of coordinates referring to grid1, while lat2.list and lon2.list to grid2. They must be in degrees. Grid2 format must be: [lat,lon] +# lat2.list and lon2.list can also be in a different order than lat1.list and lon1.list. +# It interpolates at least 2 grids at time (i.e: the monthly grids of a model), so it can use the same distance matrix for all grids (the more computational intensive part of the script) +# and the same weights for each monthly grid; grid2 must have the format: [layer,lat,lon], where stands for day, month, year, season, etc... +# beware that at lon=0 is not working well and it dosn't compute the distance with the great-circle!!! +closer<-function(lat1.list,lon1.list,lat2.list,lon2.list,grid2){ + R<-6371 # earth mean radius (km) + Rx2<-2*R + + n.lat1=length(lat1.list) + n.lon1=length(lon1.list) + n.lat2=length(lat2.list) + n.lon2=length(lon2.list) + n.points1<-as.double(n.lat1*n.lon1) # number of points of grid1 + n.points2<-as.double(n.lat2*n.lon2) + n.points.tot<-n.points1*n.points2 + n.layers<-dim(grid2)[1] + + nearest.lat<-array(NA,c(n.lat1,2)) # latitude of the closer grid point + nearest.lat.pos<-array(NA,c(n.lat1,2)) # position inside grid2 of the closer point in the latitudinal sense + for(l in 1:n.lat1){ + pos.first<-which.min(abs(lat2.list-lat1.list[l])) + lat2.list.temp<-lat2.list + nearest.lat[l,1]=lat2.list[pos.first] + nearest.lat.pos[l,1]=pos.first + } + + # correct longitude values if one of the two grids has positive longitude values only and the other also negative ones: + if(length(which(lon1.list<0))>0 && length(which(lon2.list<0))==0){lon2.list[which(lon2.list>180)]<-lon2.list[which(lon2.list>180)]-360} + if(length(which(lon1.list<0))==0 && length(which(lon2.list<0))<0){lon2.list[which(lon2.list<0)]<-lon2.list[which(lon2.list<0)]+360} + + nearest.lon<-array(NA,c(n.lon1,2)) # longitude of the closer grid point + nearest.lon.pos<-array(NA,c(n.lon1,2)) # position inside grid2 of the closer points in the longitudinal sense + for(l in 1:n.lon1){ + pos.first<-which.min(abs(lon2.list-lon1.list[l])) + lon2.list.temp<-lon2.list + nearest.lon[l,1]=lon2.list[pos.first] + nearest.lon.pos[l,1]=pos.first + } + + + #lat1.list.rad<-deg2rad(lat1.list) + #lon1.list.rad<-deg2rad(lon1.list) + #nearest.lat.rad<-deg2rad(nearest.lat) + #nearest.lon.rad<-deg2rad(nearest.lon) + + pred<-array(NA,c(n.layers,n.lat1,n.lon1)) + for(y in 1:n.lat1){ + for(x in 1:n.lon1){ + #lat<--lat1.list.rad[y] # lat y lon of the grid1 point selected + #lon<--lon1.list.rad[x] + + #latN<-nearest.lat.rad[y,1] # lat y lon of its nearest point + #lonN<-nearest.lon.rad[x,1] + + #distN<-sqrt((sin((lat-latN)/2))^2 + cos(lat)*cos(latN)*(sin((lon-lonN)/2))^2) + #distN<-Rx2 * asin(pmin(distN,1)) # distance in km of the nearest point + + for(l in 1:n.layers){ + pred[l,y,x]<-grid2[l,nearest.lat.pos[y,1],nearest.lon.pos[x,1]] # grid2 value of its nearest point for layer l + } + } + } + + interp<-list() + for(l in 1:n.layers) interp[[l]]<-pred[l,,] + return(interp) + +} + + + +# interpolates the observed values in grid2 on grid1 with the IDW method using the great-circle distances (using Haversine formula) and selecting only the grid points close to km.; +# lat1.list and lon1.list are vectors of coordinates referring to grid1, while lat2.list and lon2.list to grid2. They must be in degrees. Grid2 format must be: [lat,lon] +# lat2.list and lon2.list can also be in a different order than lat1.list and lon1.list. +# it returns NA if for a given point of grid1 there are no points of grid2 close of less than km that can be used to interpolate that point. +# It interpolates at least 2 grids at time (i.e: the monthly grids of a model), so it can use the same distance matrix for all grids (the more computational intensive part of the IDW) +# and the same weights for each monthly grid; grid2 must have the format: [layer,lat,lon], where stands for day, month, year, season, etc... +# This function is fully vectorialized allowing the faster possible interpolation of grids with any number of points. it uses up to ~8 GB of RAM, but it can +# be decreased by setting a lower value of variable max.memory defined below. +multi.idw<-function(lat1.list,lon1.list,lat2.list,lon2.list,grid2,dmax){ + max.memory<-125000000 # maximum number of elements in a matrix (each element is a double and occupies 8 byte; the maximum size of a matrix in R is 2.1 GB, about 250000000 elements of type double) + R<-6371 # earth mean radius (km) + Rx2<-2*R + + n.lat1=length(lat1.list) + n.lon1=length(lon1.list) + n.lat2=length(lat2.list) + n.lon2=length(lon2.list) + n.points1<-as.double(n.lat1*n.lon1) # number of points of grid1 + n.points2<-as.double(n.lat2*n.lon2) + n.points.tot<-n.points1*n.points2 + n.layers<-dim(grid2)[1] + + # longitude conversion: + #if(min(lon1)<0 && min(lon2)>=0) lon1=360+lon1 # convert the negative longitude of the first grid to a positive one, if the second grid has only positive longitudes + #if(min(lon2)<0 && min(lon1)>=0) lon2=360+lon2 # convert the negative longitude of the second grid to a positive one, if the first grid has only positive longitudes + + #lat.grid1<-sort(lat.grid1,decreasing=T) # sort latitudes because they must go from the higher number to the lower one to simulate a spatial grid + #lon1<-sort(lon1) # sort longitudes because they must go from the lower number to the highest one to similuate a spatial grid + + #lat.grid2<-sort(lat.grid2,decreasing=T) # sort latitudes because they must go from the higher number to the lower one to simulate a spatial grid + #lon2<-sort(lon2) # sort longitudes because they must go from the lower number to the highest one to similuate a spatial grid + + n.max1<-floor(max.memory/n.points2) # maximum number of points of grid1 that can be used in a matrix of ~1 GB (1 point = 1 double = 8 bytes) + if(n.points1<=n.max1) { + n.int<-1 + n.points1.int<-n.points1 + n.points1.last.int<-n.points1 + print("grid fits into memory") + } else { # in this case, cut grid1 horizontally in smaller grids to work with matrices of size of ~1 GB + n.int<-floor(n.points1/n.max1)+1 + n.points1.int<-n.max1 + n.points1.last.int<-n.points1 %% n.max1 # number of points of the last interval + print(paste("grid will be split in",n.int,"subgrid")) + } + + lat1<-rep(lat1.list,each=n.lon1) + lon1<-rep(lon1.list,n.lat1) + lat1.rad<-deg2rad(lat1) + lon1.rad<-deg2rad(lon1) + cos.lat1<-cos(lat1.rad) + vert1<-cbind(lat=lat1,lon=lon1,lat.deg=lat1.rad,lon.deg=lon1.rad,cos.lat=cos.lat1) # list of lat and lon of points of grid1 with the radial values and cosinus too + + lat2<-rep(lat2.list,each=n.lon2) + lon2<-rep(lon2.list,n.lat2) + lat2.rad<-deg2rad(lat2) + lon2.rad<-deg2rad(lon2) + cos.lat2<-cos(lat2.rad) + vert2<-cbind(lat=lat2,lon=lon2,lat.deg=lat2.rad,lon.deg=lon2.rad,cos.lat=cos.lat2) # list of lat and lon of points of grid2 with the radial values and cosinus too + + pred<-matrix(NA,nrow=n.points1,ncol=n.layers) # matrix with the interpolated values for each layer + + for(i in 1:n.int){ + imax<-i*n.max1 + if(i==n.int)imax=n.points1 + imin<-1+(i-1)*n.max1 + if(i==n.int && n.int>1)n.points1.int=n.points1.last.int + + lat1.rad.int<-lat1.rad[imin:imax] + lat2.rad.int<-lat2.rad + lon1.rad.int<-lon1.rad[imin:imax] + lon2.rad.int<-lon2.rad + cos.lat1.int<-cos.lat1[imin:imax] + cos.lat2.int<-cos.lat2 + + print("Calculating distance matrix...") + + vert.cos.lat1<-matrix(cos.lat1.int,nrow=n.points1.int,ncol=n.points2) + vert.cos.lat2<-matrix(cos.lat2.int,nrow=n.points1.int,ncol=n.points2,byrow=TRUE) + vert.cos.lat = vert.cos.lat1 * vert.cos.lat2 + rm(vert.cos.lat1,vert.cos.lat2);gc() + + vert.lon1<-matrix(lon1.rad.int,nrow=n.points1.int,ncol=n.points2) + vert.lon2<-matrix(lon2.rad.int,nrow=n.points1.int,ncol=n.points2,byrow=TRUE) + vert.lon <- (vert.lon2 - vert.lon1)/2 + rm(vert.lon1,vert.lon2);gc() + + vert.lon.squared<-vert.lon^2 + rm(vert.lon);gc() + + vert.mult<-vert.cos.lat * vert.lon.squared + rm(vert.cos.lat,vert.lon.squared);gc() + + vert.lat1<-matrix(lat1.rad.int,nrow=n.points1.int,ncol=n.points2) + vert.lat2<-matrix(lat2.rad.int,nrow=n.points1.int,ncol=n.points2,byrow=TRUE) + vert.lat <- (vert.lat2 - vert.lat1)/2 + rm(vert.lat1,vert.lat2);gc() + + vert.lat.squared <- vert.lat^2 + rm(vert.lat);gc() + + vert.latlon <- vert.lat.squared + vert.mult + rm(vert.lat.squared,vert.mult);gc() + + vert.root<-sqrt(vert.latlon) + rm(vert.latlon);gc() + + mm<-which(vert.root > 1) + if(length(mm)>0) vert.root[mm]<-1 + rm(mm);gc() + + vert.dist <- Rx2 * asin(vert.root) # matrix of distances in km + rm(vert.root);gc() + + ss<-which(vert.dist==0) + weights<-vert.dist^2 + weights[ss]<-0.000000000001 # not to have +inf in the denominator of the weight matrix + weights=1/weights + rm(ss);gc() + + nn<-which(vert.dist > dmax) + weights[nn]<-0 #put to 0 the weights of grid points of grid 2 that are not used to compute the idw because they are too far + sum.weights<-rowSums(weights) + zz<-which(sum.weights==0) + rm(vert.dist,nn);gc() + + for(l in 1:n.layers){ + # put the grid2 values in each row of a matrix following the lat/lon list of grid2 points, but horizontally: + horiz2<-as.vector(t(grid2[l,,])) + horiz2.rep<-rep(horiz2,n.points1.int) + + vert2.values.int<-matrix(horiz2.rep,nrow=n.points1.int,ncol=n.points2,byrow=TRUE) + values.weighted<-vert2.values.int * weights + sum.values.weighted<-rowSums(values.weighted) + + pred[imin:imax,l]<-sum.values.weighted/sum.weights + if(length(zz)>0) pred[imin-1+zz]<-NA + rm(values.weighted,sum.values.weighted,vert2.values.int,horiz2,horiz2.rep); #gc() + + print(paste("subgrid:",i,"/",n.int," layer:",l,"/",n.layers)) + } + } + + rm(zz,weights,sum.weights);gc() + + interp<-list() + for(l in 1:n.layers) interp[[l]]<-matrix(pred[,l],nrow=n.lat1,ncol=n.lon1,byrow=TRUE) + + return(interp) +} + +################################################################################ +# Regression functions # +################################################################################ + +# Modified version of the lm.fit() base R function, to increase its speed by removing some unnecessary outputs +# dalla versione 2.15 di R non la puoi piu usare, usa invece lm.fit.fast +# occhio che con lm.fit bisogna passare anche una colonna di uno iniziali per simulare il termine noto!!! +lm.fit.fast.old<-function (x, y, offset = NULL, method = "qr", tol = 1e-07, singular.ok = TRUE, ...) +{ + if (is.null(n <- nrow(x))) + stop("'x' must be a matrix") + if (n == 0L) + stop("0 (non-NA) cases") + p <- ncol(x) + if (p == 0L) { + return(list(coefficients = numeric(0L), residuals = y, + fitted.values = 0 * y, rank = 0, df.residual = length(y))) + } + ny <- NCOL(y) + if (is.matrix(y) && ny == 1) + y <- drop(y) + if (!is.null(offset)) + y <- y - offset + if (NROW(y) != n) + stop("incompatible dimensions") + if (method != "qr") + warning(gettextf("method = '%s' is not supported. Using 'qr'", + method), domain = NA) + if (length(list(...))) + warning("extra arguments ", paste(names(list(...)), sep = ", "), + " are just disregarded.") + storage.mode(x) <- "double" + storage.mode(y) <- "double" + z <- .Fortran("dqrls", qr = x, n = n, p = p, y = y, ny = ny, + tol = as.double(tol), coefficients = mat.or.vec(p, ny), + residuals = y, effects = y, rank = integer(1L), pivot = 1L:p, + qraux = double(p), work = double(2 * p), PACKAGE = "base") + if (!singular.ok && z$rank < p) + stop("singular fit encountered") + coef <- z$coefficients + pivot <- z$pivot + r1 <- seq_len(z$rank) + dn <- colnames(x) + if (is.null(dn)) + dn <- paste("x", 1L:p, sep = "") + r2 <- if (z$rank < p) + (z$rank + 1L):p + else integer(0L) + if (is.matrix(y)) { + coef[r2, ] <- NA + coef[pivot, ] <- coef + dimnames(coef) <- list(dn, colnames(y)) + } + else { + coef[r2] <- NA + coef[pivot] <- coef + names(coef) <- dn + } + z$coefficients <- coef + r1 <- y - z$residuals + if (!is.null(offset)) + r1 <- r1 + offset + c(z[c("coefficients", "residuals", "rank")], list(fitted.values = r1, df.residual = n - z$rank)) +} + +# Modified version of the lm.fit() base R function, to increase its speed by removing some unnecessary outputs (such as the QR decomposition) +# remember that the first column of the x matrix must be a column of 1, to represent the constant term! +# Dopo la versione 2.15 di R le funzioni .Fortran() sono proibite, bisogna sostituirle con .Call, +# percio' invece di lm.fit.fast.old bisogna chiamare lm.fit.fast: +lm.fit.fast<-function (x, y, offset = NULL, method = "qr", tol = 1e-07, singular.ok = TRUE, ...) +{ + if (is.null(n <- nrow(x))) + stop("'x' must be a matrix") + if (n == 0L) + stop("0 (non-NA) cases") + p <- ncol(x) + if (p == 0L) { + return(list(coefficients = numeric(0L), residuals = y, + fitted.values = 0 * y, rank = 0, df.residual = length(y))) + } + ny <- NCOL(y) + if (is.matrix(y) && ny == 1) + y <- drop(y) + if (!is.null(offset)) + y <- y - offset + if (NROW(y) != n) + stop("incompatible dimensions") + if (method != "qr") + warning(gettextf("method = '%s' is not supported. Using 'qr'", + method), domain = NA) + if (length(list(...))) + warning("extra arguments ", paste(names(list(...)), sep = ", "), + " are just disregarded.") + storage.mode(x) <- "double" + storage.mode(y) <- "double" + + z <- .Call(stats:::C_Cdqrls, x, y, tol, TRUE) + + if (!singular.ok && z$rank < p) + stop("singular fit encountered") + coef <- z$coefficients + pivot <- z$pivot + r1 <- seq_len(z$rank) + dn <- colnames(x) + if (is.null(dn)) + dn <- paste("x", 1L:p, sep = "") + r2 <- if (z$rank < p) + (z$rank + 1L):p + else integer(0L) + if (is.matrix(y)) { + coef[r2, ] <- NA + coef[pivot, ] <- coef + dimnames(coef) <- list(dn, colnames(y)) + } + else { + coef[r2] <- NA + coef[pivot] <- coef + names(coef) <- dn + } + z$coefficients <- coef + r1 <- y - z$residuals + if (!is.null(offset)) + r1 <- r1 + offset + c(z[c("coefficients", "residuals", "rank")], list(fitted.values = r1, df.residual = n - z$rank)) +} + +# Function to plot the bar chart of the anomalies of a time series of frequencies (by default in % but can be changed with the freq.max option), +# using red colors for anomalies above the climatological value of the frequencies. It also deals with eventual NA in the time series (doesn't plot the correspondent bar) +# and blue color for anomalies below the climatological value (the frequency mean of the variable over the study period). +# it also adds the linear fit if it is found to be significant (with the test of Mann-Kendall). +barplot.freq <- function(time.serie, year.start, year.end, p.value = 0.05, freq.max = 0.8, title=NULL, cex.y = 1, cex.x = 1, ylab="%", mgp=c(1,1,0), ...) +{ + x <- time.serie + n.years <- length(x) + years.serie <- year.start:year.end + # m is the value used to separate positive anomalies from negative anomalies, i.e: the climatology over the whole period + m <- mean(x, na.rm=TRUE) + bar3 <- pmax(x - m, 0) + bar2 <- pmax(m - x, 0 ) + bar1 <- m - bar2 + bar1[is.na(bar1)] <- m # in case there are NA + bar2[is.na(bar2)] <- 0 # in case there are NA + bar3[is.na(bar3)] <- 0 # in case there are NA + + bar.matrix <- matrix(c(bar1,bar2,bar3), 3, n.years, byrow=T) + barplot(bar.matrix, col = c("white","blue","red"), border = NA, names.arg = years.serie, ylim = c(0,freq.max), axis.lty = 1, space = 0.2, main = title, cex.axis=cex.y, cex.names=cex.x, xlab="", ylab=ylab, mgp=mgp) + + abline(0,0, col="black") # add a black x-axis line + + # add mean frequency of the simulated ensemble mean time series: + # text(length(years.serie)/3, freq.max*(29/30),labels=bquote(bar(nu) == .(paste0(round(mean(x),1),"%"))),cex=3) + + #z <- lm(x ~ years) + kendall_pvalue <- MannKendall(x)$sl[1] + + # add a line with the linear trend only if it significant: + #if(summary(z)$coefficients[2,4] < p.value) abline(lsfit(1:n.years,x), lty=1, col="gray10") + if(kendall_pvalue < p.value) abline(lsfit(1:n.years,x), lty=1, col="gray10") +} + +## Alternative but without bars: +## library(lattice) # for xyplot +## library(grid) # for adding a different color for negative values in xyplot +## x<-1:100 +## y<-4+rnorm(100) +## x <- zoo(4+rnorm(100)) +## xyplot(x, grid=TRUE, panel = function(x, y, ...){ +## panel.xyplot(x, y, col="blue", ...) +## #panel.abline(h=0, col="black") +## panel.abline(h=4, col="gray") +## grid.clip(y=unit(4,"native"),just=c("bottom")) +## panel.xyplot(x, y, col="red", ...) +## }) + +# as barplot.freq, but for forecasts: +barplot.freq.sim <- function(time.serie, time.serie.max, time.serie.min, time.serie.obs, year.start, year.end, p.value = 0.05, freq.max = 0.8, title=NULL, cex.y = 1, cex.x = 1, ylab="%", mgp=c(1,1,0), col.bar = c("white","gray70"), col.line="gray50", cex.mean = 1, cex.obs = 1, cex.r = 1.5, ...) +{ + x <- time.serie + y <- time.serie.max + z <- time.serie.min + o <- time.serie.obs + + #n.years <- length(x) + years.serie <- year.start:year.end + # m is the value used to separate positive anomalies from negative anomalies, i.e: the climatology over the whole period + m <- mean(x, na.rm=TRUE) + + bar1 <- z + bar2 <- y + bar1[is.na(bar1)] <- m # in case there are NA + bar2[is.na(bar2)] <- 0 # in case there are NA + + bar.matrix <- matrix(c(bar1,bar2), 2, length(years.serie), byrow=T) + my.bar <- barplot(bar.matrix, col = col.bar, border = NA, names.arg = years.serie, ylim = c(0,freq.max), axis.lty = 1, space = 0.2, main = title, cex.axis=cex.y, cex.names=cex.x, xlab="", ylab=ylab, mgp=mgp) + + abline(m,0, col=col.line) # add a black x-axis line + + # add points with the ensemble mean: + col.sim <- rep("red",year.end-year.start+1) + col.sim[which(x < m)] <- "blue" + points(my.bar, x, type="p", pch=20, xlab="", ylab="", col=col.sim, cex=cex.mean) + + # add crosses with observed values: + col.obs <- rep("red",year.end-year.start+1) + col.obs[which(o < m)] <- "blue" + points(my.bar, o, type="p", pch=4, xlab="", ylab="", col=col.obs, cex=cex.obs, lwd=5) + + # add corr between obs.time series and ensemble mean time series: + corr <- round(cor(x,o, use="complete.obs"),2) + text(length(years.serie), freq.max*(19/20), labels=paste0("r= ",corr), cex=cex.r) + #text(c(length(years.serie-5),length(years.serie)),rep(freq.max*(9/10),2), labels=c(round(o,2),corr)) + + #z <- lm(x ~ years) + kendall_pvalue <- MannKendall(x)$sl[1] + + # add a line with the linear trend only if it significant: + #if(summary(z)$coefficients[2,4] < p.value) abline(lsfit(1:n.years,x), lty=1, col="gray10") + if(kendall_pvalue < p.value) abline(lsfit(1:n.years,x), lty=1, col="gray10") +} + +# as barplot.freq, but for forecasts: +barplot.freq.sim2 <- function(time.serie, time.serie.max, time.serie.min, time.serie.obs, year.start, year.end, p.value = 0.05, freq.min=0, freq.max = 0.8, title=NULL, cex.y = 1, cex.x = 1, ylab="%", mgp=c(1,1,0), col.bar = c("white","gray85","#4393c3","#d6604d","gray80"), col.line="gray50", cex.mean = 1, cex.obs = 1, cex.r = 1.5, ...) +{ + x <- time.serie + y <- time.serie.max + z <- time.serie.min + obs <- time.serie.obs + + #n.years <- length(x) + years.serie <- year.start:year.end + # m is the value used to separate positive anomalies from negative anomalies, i.e: the climatology over the whole period + m <- mean(x, na.rm=TRUE) + + bar1 <- z # white bar + bar2 <- pmin(x, m) - bar1 # gray bar + bar3 <- pmax(m - x, 0) # blue bar + bar4 <- pmax(x - m, 0) # red bar + bar5 <- y - (bar1+bar2+bar3+bar4) # gray bar + + bar1[is.na(bar1)] <- m # in case there are NA + bar2[is.na(bar2)] <- 0 # in case there are NA + bar3[is.na(bar3)] <- 0 # in case there are NA + bar4[is.na(bar4)] <- 0 # in case there are NA + bar5[is.na(bar5)] <- 0 # in case there are NA + + #bar3 <- pmax(x - m, 0) + #bar2 <- pmax(m - x, 0 ) + #bar1 <- m - bar2 + + bar.matrix <- matrix(c(bar1,bar2,bar3,bar4,bar5), 5, length(years.serie), byrow=T) + my.bar <- barplot(bar.matrix, col = col.bar, border = NA, names.arg = years.serie, ylim = c(freq.min,freq.max), axis.lty = 1, space = 0.2, main = title, cex.axis=cex.y, cex.names=cex.x, xlab="", ylab=ylab, mgp=mgp) + + abline(m,0, col=col.line) # add a black x-axis line + + # add points with the ensemble mean: + #col.sim <- rep("red",year.end-year.start+1) + #col.sim[which(x < m)] <- "blue" + #points(my.bar, x, type="p", pch=20, xlab="", ylab="", col=col.sim, cex=cex.mean) + + # add crosses with observed values: + col.obs <- rep("#67001f",year.end-year.start+1) + col.obs[which(obs < m)] <- "#053061" + points(my.bar, obs, type="p", pch=4, xlab="", ylab="", col=col.obs, cex=cex.obs, lwd=5) + + # add corr between obs.time series and ensemble mean time series: + #corr <- round(cor(x,obs, use="complete.obs"),2) + #text(length(years.serie), freq.max*(29/30), labels=paste0("r= ",corr), cex=cex.r) + + # add mean frequency of the simulated ensemble mean time series: + #text(length(years.serie)/3, freq.max*(29/30),labels=bquote(bar(nu) == .(paste0(round(mean(x),1),"%"))),cex=cex.r) + + #z <- lm(x ~ years) + kendall_pvalue <- MannKendall(x)$sl[1] + + # add a line with the linear trend only if it significant: + #if(summary(z)$coefficients[2,4] < p.value) abline(lsfit(1:n.years,x), lty=1, col="gray10") + if(kendall_pvalue < p.value) abline(lsfit(1:n.years,x), lty=1, col="gray10") +} + + +# As ColorBar, but the user MUST supply its own ticks and tick labels: +# (it is used to have a better control over the values shown) +# the option draw.ticks is used to remove the ticks lines +# the option label.dist is used to change the distance from the legend to the labels: +# Example: +# +# ColorBar2(brks=my.brks, cols=my.cols, vert=FALSE, cex=legend1.cex, label.dist=2, my.ticks=-0.5 + 1:length(my.brks), my.labels=my.brks) +# + +ColorBar2 <- function (brks, cols = NULL, vert = TRUE, cex = 1, draw.ticks = TRUE, label.dist = 1, my.ticks, my.labels) +{ + if (is.null(cols) == TRUE) { + nlev <- length(brks) - 1 + cols <- rainbow(nlev) + } + else { + if (length(cols) != (length(brks) - 1)) { + stop("Inconsistent colour levels / list of colours") + } + } + if (vert) { + par(mar = c(1, 1, 1, 1.5 * (1 + cex)), mgp = c(1, 1, + 0), las = 1, cex = 1.2) + image(1, c(1:length(cols)), t(c(1:length(cols))), axes = FALSE, + col = cols, xlab = "", ylab = "") + box() + axis(4, at = my.ticks, tick = draw.ticks, labels = my.labels, cex.axis = cex, mgp=c(3,label.dist,0)) + } + else { + par(mar = c(0.5 + cex, 1, 1, 1), mgp = c(1.5, max(c(0.3, + 0.8 * (cex - 0.625))), 0), las = 1, cex = 1.2) + image(1:length(cols), 1, t(t(1:length(cols))), axes = FALSE, + col = cols, xlab = "", ylab = "") + box() + axis(1, at = my.ticks, tick = draw.ticks, labels = my.labels, cex.axis = cex, mgp=c(3,label.dist,0)) + } +} + +# ColorBar3: like ColorBar, but the user can specify a subset of the predefined labels to be shown in the legend (more powerful than the resample option) +ColorBar3 <- function (brks, cols = NULL, vert = TRUE, cex = 1, subset = 1:length(brks)) +{ + if (is.null(cols) == TRUE) { + nlev <- length(brks) - 1 + cols <- rainbow(nlev) + } + else { + if (length(cols) != (length(brks) - 1)) { + stop("Inconsistent colour levels / list of colours") + } + } + if (vert) { + par(mar = c(1, 1, 1, 1.5 * (1 + cex)), mgp = c(1, 1, + 0), las = 1, cex = 1.2) + image(1, c(1:length(cols)), t(c(1:length(cols))), axes = FALSE, + col = cols, xlab = "", ylab = "") + box() + axis(4, at = seq(0.5, length(brks) - 0.5, 1)[subset], + tick = TRUE, labels = brks[seq(1, length(brks), 1)][subset], + cex.axis = cex) + } + else { + par(mar = c(0.5 + cex, 1, 1, 1), mgp = c(1.5, max(c(0.3, + 0.8 * (cex - 0.625))), 0), las = 1, cex = 1.2) + image(1:length(cols), 1, t(t(1:length(cols))), axes = FALSE, + col = cols, xlab = "", ylab = "") + box() + axis(1, at = seq(0.5, length(brks) - 0.5, 1)[subset], + labels = brks[seq(1, length(brks), 1)][subset], + cex.axis = cex) + } +} + + +# Use the function below by handing it a matrix of numbers. It will plot the matrix with a color scale based on the highest and lowest values in the matrix. +# usage: myImagePlot(m) where m is a matrix of numbers +# optional arguments: myImagePlot(m, xlabels, ylabels, zlim, title=c("my title")) +# xLabels and yLabels are vectors of strings to label the rows and columns. +# zlim is a vector containing a low and high value to use for the color scale + +myImagePlot <- function(x, ...){ + min <- min(x) + max <- max(x) + yLabels <- rownames(x) + xLabels <- colnames(x) + title <-c() + # check for additional function arguments + if( length(list(...)) ){ + Lst <- list(...) + if( !is.null(Lst$zlim) ){ + min <- Lst$zlim[1] + max <- Lst$zlim[2] + } + if( !is.null(Lst$yLabels) ){ + yLabels <- c(Lst$yLabels) + } + if( !is.null(Lst$xLabels) ){ + xLabels <- c(Lst$xLabels) + } + if( !is.null(Lst$title) ){ + title <- Lst$title + } + } +# check for null values +if( is.null(xLabels) ){ + xLabels <- c(1:ncol(x)) +} +if( is.null(yLabels) ){ + yLabels <- c(1:nrow(x)) +} + +layout(matrix(data=c(1,2), nrow=1, ncol=2), widths=c(4,1), heights=c(1,1)) + + # Red and green range from 0 to 1 while Blue ranges from 1 to 0 + ColorRamp <- rgb( seq(0,1,length=256), # Red + seq(0,1,length=256), # Green + seq(1,0,length=256)) # Blue + ColorLevels <- seq(min, max, length=length(ColorRamp)) + + # Reverse Y axis + reverse <- nrow(x) : 1 + yLabels <- yLabels[reverse] + x <- x[reverse,] + + # Data Map + par(mar = c(3,5,2.5,2)) + image(1:length(xLabels), 1:length(yLabels), t(x), col=ColorRamp, xlab="", + ylab="", axes=FALSE, zlim=c(min,max)) + if( !is.null(title) ){ + title(main=title) + } +axis(BELOW<-1, at=1:length(xLabels), labels=xLabels, cex.axis=0.7) + axis(LEFT <-2, at=1:length(yLabels), labels=yLabels, las= HORIZONTAL<-1, + cex.axis=0.7) + + # Color Scale + par(mar = c(3,2.5,2.5,2)) + image(1, ColorLevels, + matrix(data=ColorLevels, ncol=length(ColorLevels),nrow=1), + col=ColorRamp, + xlab="",ylab="", + xaxt="n") + + layout(1) +} + +################################################################################ +# Big data functions # +################################################################################ + +################################# split.array ################################# + +# function to split an array in smaller arrays, selecting one dimension of the array as the one used to split the array. +# The function only returns the intervals of each subarray, not the values of the subarrays. +# the subarrays are then used inside a for loop in the main script do do all the required analysis instead of applying them to the whole array. +# +# The array can be for instance an hindcast array with dimensions: c(n.members, n.yrs.hind*n.startdates, n.leadtimes, n.lat, n.lon) +# +# Example: +# +# hindcasts <- array(rnorm(4*100*4*256*512), c(4,100,4,256,512)) +# block <- split.array.old(hindcasts, 5) # split on longitude +# +# for(subArray in 1:block$n.sub){ # loop on each smaller subarray +# subLon=block$my.interv[[subArray]] # longitude interval corresponding to the subarray data +# nlon<-length(subLon) # length of the actual subarray +# +# hindcast.sub <- hindcasts[,,,,subLon] # subarray data +# # now insert below any calculation you need to do on the hindcast.sub array +# +# } # close for on subArray + +split.array.old <- function (array, along=tail(dim(array),1), max.n.el=10000000){ + array.dims <- dim(array) # i.e: [4,100,4,320,640] + n.sub <- prod(array.dims)/max.n.el # number of sub-arrays in which to split the hindcast and rean.data, i.e: 4*100*4*320*640/10000000 = 32.768 + n.sub <- ceiling(n.sub) # round n.sub to the nearest higher integer, i.e: 33 + n.split <- array.dims[along] # number of elements along the splitting dimension, i.e: 640 if along=5 + sub.size <- floor(n.split/n.sub) # number of elements in the splitted dimension (i.e: lon) of each subarray rounded to the lower integer, i.e: 640/33=19.39=19 + if(sub.size<=1) stop("Subarrays too small. Increase ten times the value of max.n.el") + n.sub <- n.split %/% sub.size # take only the integer part of the ratio. It is equal to floor(n.split/sub.size). I.e: 640/19=33.68=33 + add.last <- n.split %% n.sub # number of additional elements of the last subarray (if %% >0), i.e: 640 %% 33 = 13 + + my.interv<-list() + for(s in 1:n.sub){ + if(s==n.sub) {mod.last <- add.last} else {mod.last <- 0} + my.interv[[s]] <- (1 + sub.size*(s-1)):((sub.size*s) + mod.last) + } + + return(list(array.dims=array.dims, along=along, n.split=n.split, n.sub=n.sub, sub.size=sub.size, add.last=add.last, sub.size.last=sub.size + mod.last, my.interv=my.interv)) +} + +# Function to split an array into smaller arrays (chunks) +# +# in the argument 'dimension', you specify the dimensions of the array to split [i.e: c(4,100,4,256,512) for an hindcast array] +# in the argument 'along', you specify which dimension you want to use to split the array. Be default, it is the last dimension of the array. +# in the argument 'max.n.el', you set the size of a chunk. By default, split.array creates array of exactly 10'000'000 of elements. For array of type numeric (double), +# it is equivalent to 80 MB, because each double occupies 8 byte. +# in the argument 'chunks', you specify how many chunks do you want to be created exactly. It overrides the option 'max.n.el'. Useful if you already knows +# how many chunks do you need (more or less) and want to run one chunk in a different core at the same time, so you can't exceed the maximum number of cores available on your machine +# in the argument, 'smallest', you specify if you want to run the script with the maximum possible number of chunks (smallest=TRUE), +# so that the number of chunks is equal to the number of possible values of the along dimensions and the size of each chunk is exactly 1. + +split.array <- function (dimensions, along=tail(dimensions,1), max.n.el=10000000, chunks=NULL, smallest=FALSE){ + if(along <= 0 || along > length(dimensions)) stop("Choose a number for the 'along' argument inside the number of available dimensions!") + + array.dims <- dimensions # i.e: [4,100,4,320,640] + n.split <- array.dims[along] # number of elements along the splitting dimension, i.e: 640 if along=5 + + n.chunk <- prod(array.dims)/max.n.el # first estimate of the number of chunks (sub-arrays) into which the array will be split, i.e: 4*100*4*320*640/10000000 = 32.768 + n.chunk <- ceiling(n.chunk) # round n.chunk to the nearest higher integer, i.e: 33 + chunk.size <- floor(n.split/n.chunk) # number of elements in the splitted dimension (i.e: lon) of each chunk rounded to the lower integer, i.e: 640/33=19.39=19 + if(chunk.size <= 1) stop("Chunks too small. Try increasing 10 times the value of max.n.el") + + n.chunk <- n.split %/% chunk.size # take only the integer part of the ratio. It is equal to floor(n.split/chunk.size). I.e: 640/19=33.68=33 + if(!is.null(chunks)) {n.chunks <- chunks; chunk.size <- floor(n.split/n.chunk)} + + add.last <- n.split %% n.chunk # number of additional elements of the last chunk (if %% >0), i.e: 640 %% 33 = 13 + + if(smallest == TRUE){ # in this case, we want each chunk to be exactly 1 row or column wide: + n.chunk <- array.dims[along] + n.split <- n.chunk + chunk.size <- 1 + add.last <- 0 + } + + # list with the position of each chunk c inside the chosen dimension of the array: + int<-list() + for(c in 1:n.chunk){ + if(c == n.chunk) {mod.last <- add.last} else {mod.last <- 0} + int[[c]] <- (1 + chunk.size*(c-1)):((chunk.size*c) + mod.last) + } + + chunk.size.last <- chunk.size + mod.last # number of elements of the last chunk + + # list with the number of elements of each chunk c: + n.int<-list() + for(c in 1:n.chunk) n.int[[c]] <- length(int[[c]]) + + return(list(n.int=n.int, int=int, array.dims=array.dims, along=along, max.n.el=max.n.el, n.split=n.split, + n.chunk=n.chunk, chunk.size=chunk.size, add.last=add.last, chunk.size.last=chunk.size.last)) +} + + +# Example: +# +# hindcasts <- array(rnorm(4*100*4*256*512), c(4,100,4,256,512)) +# +# chunk <- split.array(dim(hindcasts), 5) # split on longitude +# +# for(subArray in 1:chunk$n.sub){ # loop on each smaller subarray +# subLon=chunk$my.interv[[subArray]] # longitude interval corresponding to the subarray data +# nlon<-length(subLon) # length of the actual subarray +# +# hindcast.sub <- hindcasts[,,,,subLon] # subarray data +# # now insert below any calculation you need to do on the hindcast.sub array +# +# } # close for on subArray + + +#################### veriApplyBig ############################################ + + +# A wrapper of veriApply() to be able to work even with hindcasts arrays that exceed the memory limits of the workstation +# and to efficently use the option 'parallel=TRUE' also for large data arrays with no memory limits. +# (only 2-5 GB of RAM are necessary, depending on the number of cpus used) +# +# It splits the input hindcast data and the input observed data in smaller arrays based on the longitude value, +# and then applies veriApply() to each sub-array, assembling the results in an array with the same format of the array returned by veriApply() +# A progress bar shows how many sub-arrays have already been processed. +# +# To take advantage of this function, the option parallel=TRUE is enabled by default, +# and the option 'ncpus' is set to 8, to work with BSC workstations with 4 physical cores (8 with hyperthreading). +# Users in possess of IC3 Intel Xeon workstations with 8 physical cores should set ncpus = 16 to take advantage of hyperthreading. +# With ncpus=8, calculations are 4 times faster, while with ncpus=16, calculations are 8 times faster. +# +# On Moore, you can set ncpus=8, and on Amdahl, ncpus can be set to a maximum of 12; however, it is not raccomendedd to run this function at full power on Moore or Amdahl , +# since it'd consume all the cores avaiable (8 for Moore and 12 for Amhdal), forbidding other users to employ the cluster for their single-core calculations. +# You can run it during the week-end if there are no other jobs scheduled, or set a lower number of ncpus (4-5) to leave resources for other users, even if performance'll suffer +# +# Parallel computation employs all CPU recurses of the machine: it will go slower until the computation is finished +# If you want to use the machine also for other tasks, you can set a lower number of ncpus (2 or 3), with a loss of performance. +# +# If you get a memory problem error, try decreasing the size of the variable 'max.n.el'. +# +# Example of use: +# +# library(ff) +# bigfile <- "/scratch/Earth/ncortesi/bigfile" # choose a site where to store the big data array +# source('/scratch/Earth/ncortesi/RESILIENCE/veriApplyBig.R') # load the veryApplyBig() and the save.big(9 functions +# +# # create a random hindcast that normally wouldn't fit into memory: +# anom.hind.dim<-c(51,30,1,256,512) +# anom.hind<-array(rnorm(prod(anom.hind.dim)),anom.hind.dim) +# +# save.big(array=anom.hind, path=bigfile) +# +# # create random observed hindcast that fit into memory (because observations only have 1 member): +# anom.rean<-array(rnorm(prod(anom.hind.dim[-1])), anom.hind.dim[-1]) +# +# ffload(file=bigfile) +# # str(ff.array) +# open(ff.array) +# +# my.score <- veriApplyBig("FairRpss",fcst=ff.array, obs=anom.rean, tdim=2, ensdim=1 , prob=c(1/3, 1/3)) # by default parallel=TRUE and with ncpus=8 to run it on our pc +# +# close(ff.array) + +veriApplyBig <- function (verifun, fcst, obs, fcst.ref = NULL, tdim = length(dim(fcst)) - + 1, ensdim = length(dim(fcst)), prob = NULL, threshold = NULL, + na.rm = FALSE, parallel = TRUE, ncpus = 8, max.n.el=5000000, path=NULL, ...) +{ + + sub <- prod(dim(fcst))/max.n.el + sub <- ceiling(sub) # number of sub-arrays in which to split the hindcast and rean.data + n.lon <- tail(dim(fcst),1) # number of longitude elements + sub.size <- floor(n.lon/sub) # number of elements in the last dimension (lon) of each subarray + if(sub.size<=1) stop("Subarrays too small. Increase ten times the value of max.n.el") + last.sub.size<-n.lon %% sub # number of additional elements of the last subarray + if(last.sub.size>0) sub<-sub+1 + + my.SkillScore<-array(NA,tail(dim(fcst),3)) # take only the leadtime, lat and lon dimensions + + cat('Subarray n. ') + + for(s in 1:sub){ + cat(paste0(s,'/',sub,' ')) + + if(s==sub && last.sub.size>0) {last<-sub.size-last.sub.size} else {last<-0} # because the last subarray is shorter than the others, if last.sub.size>0 + my.interv <- (1+sub.size*(s-1)):((sub.size*s)-last) # longitude interval where to load data + + #anom.hindcast.sub <- array(NA, c(head(dim(fcst),4), sub.size-last)) + #anom.rean.sub <- array(NA, c(dim(fcst)[2:4], sub.size-last)) + + anom.hindcast.sub <- fcst[,,,,my.interv] + anom.rean.sub <- obs[,,,my.interv] + + my.SkillScore.sub <- veriApply(verifun, fcst=anom.hindcast.sub, obs=anom.rean.sub, tdim=tdim, ensdim=ensdim, prob=prob, threshold=threshold, na.rm=na.rm, parallel=parallel, ncpus=ncpus)[[1]] + + my.SkillScore[,,my.interv]<-my.SkillScore.sub + + gc() + } + + cat('\n') + return(my.SkillScore) + +} + + + +old_veriApplyBig <- function (verifun, fcst, obs, fcst.ref = NULL, tdim = length(dim(fcst)) - + 1, ensdim = length(dim(fcst)), prob = NULL, threshold = NULL, + na.rm = FALSE, parallel = TRUE, ncpus = 8, max.n.el=10000000, path=NULL, ...) +{ + + ffload(file=fcst) + #str(ff.array) + open(ff.array) + + sub <- prod(dim(ff.array))/max.n.el + sub <- ceiling(sub) # number of sub-arrays in which to split the hindcast and rean.data + n.lon <- tail(dim(ff.array),1) # number of longitude elements + sub.size <- floor(n.lon/sub) # number of elements in the last dimension (lon) of each subarray + if(sub.size<=1) stop("Subarrays too small. Increase ten times the value of max.n.el") + last.sub.size<-n.lon %% sub # number of additional elements of the last subarray + if(last.sub.size>0) sub<-sub+1 + + my.SkillScore<-array(NA,tail(dim(ff.array),3)) # take only the leadtime, lat and lon dimensions + + cat('Subarray n. ') + + for(s in 1:sub){ + cat(paste0(s,'/',sub,' ')) + + if(s==sub && last.sub.size>0) {last<-sub.size-last.sub.size} else {last<-0} # because the last subarray is shorter than the others, if last.sub.size>0 + my.interv <- (1+sub.size*(s-1)):((sub.size*s)-last) # longitude interval where to load data + + #anom.hindcast.sub <- array(NA, c(head(dim(ff.array),4), sub.size-last)) + #anom.rean.sub <- array(NA, c(dim(ff.array)[2:4], sub.size-last)) + + anom.hindcast.sub <- ff.array[,,,,my.interv] + anom.rean.sub <- obs[,,,my.interv] + + my.SkillScore.sub <- veriApply(verifun, fcst=anom.hindcast.sub, obs=anom.rean.sub, tdim=tdim, ensdim=ensdim, prob=prob, threshold=threshold, na.rm=na.rm, parallel=parallel, ncpus=ncpus)[[1]] + + my.SkillScore[,,my.interv]<-my.SkillScore.sub + + gc() + } + + close(ff.array) + + cat('\n') + return(my.SkillScore) + +} + + +################################################################################ +# save.big # + + +# A wrapper of ffsave to save on disk big arrays of a numeric (double) variable +# in format .ffData (by means of the ff package). See veriApplyBig() for an example. + +save.big <- function(array, path) { + ff.array <- as.ff(array, vmode="double", file = path) + ffsave(ff.array, file= path) + close(ff.array); rm(ff.array) +} + + + +################################################################################ +# veriApplyPar # + + +# A wrapper of veriApply() to efficently use the option 'parallel=TRUE' also for large data arrays with no memory limits, +# but the input hindcast array must fit into the memory to use this function. +# +# the function splits the input hindcast data and the input observed data in smaller arrays based on the longitude value, +# and then applies veriApply() to each sub-array, assembling the results in an array with the same format of the array returned by veriApply() +# A progress bar shows how many sub-arrays have already been processed. +# +# To take advantage of this function, the option parallel=TRUE is enabled by default, +# and the option 'ncpus' is set to 8, to work with BSC workstations with 4 physical cores (8 with hyperthreading). +# Users in possess of IC3 Intel Xeon workstations with 8 physical cores should set ncpus = 16 to take advantage of hyperthreading. +# With ncpus=8, calculations are 4 times faster, while with ncpus=16, calculations are 8 times faster. +# +# On Moore, you can set ncpus=8, and on Amdahl, ncpus can be set to a maximum of 12; however, it is not raccomendedd to run this function at full power on Moore or Amdahl , +# since it'd consume all the cores avaiable (8 for Moore and 12 for Amhdal), forbidding other users to employ the cluster for their single-core calculations. +# You can run it during the week-end if there are no other jobs scheduled, or set a lower number of ncpus (4-5) to leave resources for other users, even if performance'll suffer +# +# Parallel computation employs all CPU recurses of the machine: it is not possible to use it for other tasks until the computation is finished. +# If you want to use the machine also for other tasks, you can set a lower number of ncpus (2 or 3), with a loss of performance. +# +# If you get a memory problem error, try decreasing the size of the variable 'max.n.el'. +# +# example of use: +# +# my.score <- veriApplyBig("FairRpss",fcst=anom.hindcast, obs=anom.rean, tdim=2, ensdim=1 , prob=c(1/3, 1/3)) # by default parallel=TRUE and with ncpus=8 to run it on our pc +# +# my.score <- veriApplyBig("FairCrpss",fcst=anom.hindcast, obs=anom.rean, tdim=2, ensdim=1 , ncpus=2) # to set a lower number of cpus +# + +veriApplyPar <- function (verifun, fcst, obs, fcst.ref = NULL, tdim = length(dim(fcst)) - + 1, ensdim = length(dim(fcst)), prob = NULL, threshold = NULL, + na.rm = FALSE, parallel = TRUE, ncpus = 8, max.n.el=10000000, ...) +{ + sub <- prod(dim(fcst))/max.n.el + sub <- ceiling(sub) # number of sub-arrays in which to split the hindcast and rean.data + n.lon <- tail(dim(fcst),1) # number of longitude elements + sub.size <- floor(n.lon/sub) # number of elements in the last dimension (lon) of each subarray + if(sub.size<=1) stop("Subarrays too small. Increase ten times the value of max.n.el") + last.sub.size<-n.lon %% sub # number of additional elements of the last subarray + if(last.sub.size>0) sub<-sub+1 + + my.SkillScore<-array(NA,tail(dim(fcst),3)) # take only the leadtime, lat and lon dimensions + + cat('Subarray n. ') + + for(s in 1:sub){ + cat(paste0(s,'/',sub,' ')) + + if(s==sub && last.sub.size>0) {last<-sub.size-last.sub.size} else {last<-0} # because the last subarray is shorter than the others, if last.sub.size>0 + my.interv <- (1+sub.size*(s-1)):((sub.size*s)-last) # longitude interval where to load data + + anom.hindcast.sub <- array(NA, c(head(dim(fcst),4), sub.size-last)) + anom.rean.sub <- array(NA, c(dim(fcst)[2:4], sub.size-last)) + + anom.hindcast.sub <- fcst[,,,,my.interv] + anom.rean.sub <- obs[,,,my.interv] + + my.SkillScore.sub <- veriApply(verifun, fcst=anom.hindcast.sub, obs=anom.rean.sub, tdim=tdim, ensdim=ensdim, prob=prob, threshold=threshold, na.rm=na.rm, parallel=parallel, ncpus=ncpus)[[1]] + + my.SkillScore[,,my.interv]<-my.SkillScore.sub + + gc() + } + + cat('\n') + return(my.SkillScore) + +} + + +####################################################################################### +# parApplyCal # + + +# like parApply, but with a check to authorize the parallel computation or not. +# As parApply, it must be applied only to small arrays to be able to fit in memory. +# If your array is too big, consider the possibility to split it in smaller arrays with +# the function split.array. + +parApplyCal <- function(cl = NULL, X, MARGIN, FUN, ncpus=4, ... ) { + + .cl <- try(parallel::makeCluster(ncpus, type = "FORK"), silent = TRUE) + + if (!"try-error" %in% class(.cl)) hasparallel <- TRUE + + if (hasparallel) { + on.exit(parallel::stopCluster(.cl)) + + output <- parallel::parApply(cl = .cl, X = X, MARGIN = MARGIN, FUN = FUN, nmemb=nmemb, nsdates=nsdates) + + } else { + output <- apply(X = X, MARGIN = MARGIN, FUN = FUN, nmemb=nmemb, nsdates=nsdates) + } + + return(output) + +} + +####################################################################################### +# old_parApplyBig # + + +# parallel::parApply() needs too much memory when the input array is too big (>400-500 MB on 8GB machines) + +# splitdir is the dimension that will be split internally; it must be one of the dimensions used also by MARGIN 8see examples below) + +old_parApplyBig <- function(cl = NULL, X, MARGIN, FUN, splitdim = tail(dim(X),1), max.n.el=10000000, ... ) { + + sub <- prod(dim(X))/max.n.el + sub <- ceiling(sub) # number of sub-arrays in which to split the hindcast and rean.data + n.el <- dim(X)[splitdim] # number of elements in the splitdir dimension + sub.size <- floor(n.el/sub) # number of elements in the splitting dimension of each subarray + if(sub.size<=1) stop("Subarrays too small. Increase ten times the value of max.n.el") + last.sub.size<-n.el %% sub # number of additional elements of the last subarray + if(last.sub.size>0) sub<-sub+1 + n.dim.X <- length(dim(X)) # number of dimensions of array X + + # swap the splitdim dimension with the last one: + my.seq <- 1:n.dim.X + my.seq[n.dim.X] <- splitdim + my.seq[splitdim] <- n.dim.X + if(splitdim < n.dim.X) X <- aperm(X, my.seq) + if(splitdim < n.dim.X && length(which(MARGIN==n.dim.X))==0) MARGIN[splitdim] <- n.dim.X + + output<-array(NA,dim(X)[MARGIN]) # take only the dimensions used by MARGIN + + print('Subarray n. ') + + for(s in 1:sub){ + cat(paste0(s,'/',sub,' ')) + + if(s==sub && last.sub.size>0) {last<-sub.size-last.sub.size} else {last<-0} # because the last subarray is shorter than the others, if last.sub.size>0 + my.interv <- (1+sub.size*(s-1)):((sub.size*s)-last) # longitude interval where to load data + + if(n.dim.X == 2) subarray <- X[,my.interv] + if(n.dim.X == 3) subarray <- X[,,my.interv] + if(n.dim.X == 4) subarray <- X[,,,my.interv] + if(n.dim.X == 5) subarray <- X[,,,,my.interv] + if(n.dim.X == 6) subarray <- X[,,,,,my.interv] + if(n.dim.X == 7) subarray <- X[,,,,,,my.interv] + if(n.dim.X == 8) subarray <- X[,,,,,,,my.interv] + if(n.dim.X == 9) subarray <- X[,,,,,,,,my.interv] + if(n.dim.X == 10) subarray <- X[,,,,,,,,,my.interv] + if(n.dim.X > 10) stop("input array has too many dimensions") + + .cl <- try(parallel::makeCluster(ncpus, type = "FORK"), silent = TRUE) + + if (!"try-error" %in% class(.cl)) hasparallel <- TRUE + + if (hasparallel) { + on.exit(parallel::stopCluster(.cl)) + suboutput <- parallel::parApply(cl = .cl, X = X, MARGIN = MARGIN, FUN = FUN) + } + else { + suboutput <- apply(X = X, MARGIN = MARGIN, FUN = FUN, ...) + } + + if(length(MARGIN) == 2) output[,my.interv] <- suboutput + if(length(MARGIN) == 3) output[,,my.interv] <- suboutput + if(length(MARGIN) == 4) output[,,,my.interv] <- suboutput + if(length(MARGIN) == 5) output[,,,,my.interv] <- suboutput + if(length(MARGIN) == 6) output[,,,,,my.interv] <- suboutput + + if(splitdim < n.dim.X) X <- aperm(X, my.seq) ## sistema!!! + + gc() + } + + cat('\n') + return(output) + +} + + + +################################################################################################# +# Wine indexes # +################################################################################################# + +# 1. Annual mean temperature +# temp must have the format [month,lat,lon] or [month,lon,lat], and num.months must be a multiple of 12 +# and can be smaller than the number of months in temp: it defines the number of months we want to use to calculate the index, starting from the first month in the temp array. +# It must also be a multiple of 12 to reflect the yearly data; i.e: num.months=24 considers the first 24 months of the temp array to do the average. +index1<-function(temp,num.months){ + my.temp<-temp[1:num.months,,] # select only the months actually used + if(!is.array(my.temp)) my.temp<-array(my.temp,c(length(my.temp),1,1)) # if temp is a dumb array we must convert it back to an array before applying apply + my.index<-apply(my.temp,c(2,3),mean) # calculate the mean temperature + return(my.index) +} + +#2. Mean temperature for growing season, for the WHOLE period +# (promedio de las temperaturas medias mensuales para el periodo de octubre a abril) +# you must provide at least 24 months of data starting from January to compute the index +# because the first 4 months and the last 3 cannot be used for calculation. +# if you want to calculate the yearly value for year XXXX, just introduce its 12 months more the 12 months of the following year +# temp must have the format [month,lat,lon] or [month,lon,lat], num.months must be a multiple of 12 +index2<-function(temp,num.months){ + my.seq1<-rep(12*seq(0,num.months/12-1),each=7) + my.seq2<-rep(c(1:4,10:12),num.months/12) + my.months<-my.seq1 + my.seq2 # all the months inside num.months from october to april + + my.months<-my.months[-(1:4)] # remove the first four months because they miss the oct:dec months + n.months<-length(my.months) + my.months<-my.months[-((n.months-2):n.months)] # remove the last three months because they miss the jan:apr months + + temp.oct.apr<-temp[my.months,,] # select only months from octuber to april + if(!is.array(temp.oct.apr)) temp.oct.apr<-array(temp.oct.apr,c(length(temp.oct.apr),1,1)) # if temp.oct.apr is a dumb array we must convert it back to an array + + my.index<-apply(temp.oct.apr,c(2,3),mean) # calculate the mean temperature + return(my.index) +} + +#3. Winkler index for the vegetative period +# temp data must start from january and num.months must be at least 24 months long and a multiple of 12 months +# temp must have the format [month,lat,lon] or [month,lon,lat] +index3<-function(temp,num.months){ + n.years=num.months/12 + + temp.minus.ten<-temp-10 # decrease each month of 10 degrees + ss<-which(temp.minus.ten<0,arr.ind=T) # select elements < 0 degrees + temp.minus.ten[ss]=0 # set to 0 elements lower than 0 degrees + + month31=c(1,3,5,7,8,10,12) # meses con 31 dias + my.months31<-rep(month31,n.years)+rep(0:(n.years-1)*12,each=length(month31)) # select only months with 31 days + temp.minus.ten[my.months31,,]<-temp.minus.ten[my.months31,,]*31 # multiply temperature of these months for 31 + + month30=c(4,6,9,11) # meses con 30 dias + my.months30<-rep(month30,n.years)+rep(0:(n.years-1)*12,each=length(month30)) # select only months with 30 days + temp.minus.ten[my.months30,,]<-temp.minus.ten[my.months30,,]*30 # multiply temperature of these months for 30 + + month28=2 # meses con 28 dias + my.months28<-rep(month28,n.years)+rep(0:(n.years-1)*12,each=length(month28)) # select only months with 28 days + temp.minus.ten[my.months28,,]<-temp.minus.ten[my.months28,,]*28.25 # multiply temperature of these months for 28.25 + + my.seq1<-rep(12*seq(0,num.months/12-1),each=7) + my.seq2<-rep(c(1:4,10:12),num.months/12) + my.months<-my.seq1 + my.seq2 # all the months inside num.months from october to april + + my.months<-my.months[-(1:4)] # remove the first four months because they are missing the oct:dec months + n.months<-length(my.months) + my.months<-my.months[-((n.months-2):n.months)] # remove the last three months because they miss the jan:apr months + + temp.oct.apr<-temp.minus.ten[my.months,,] # select only months from octuber to april + if(!is.array(temp.oct.apr)) temp.oct.apr<-array(temp.oct.apr,c(length(temp.oct.apr),1,1)) # if temp is a dumb array we must convert it back to an array before applying apply + my.index<-apply(temp.oct.apr,c(2,3),sum) # calculate the sum of the (aproximately) daily degrees above 10 degrees + return(my.index/(n.years-1)) # normalize for the number of years used taking into account that one vegetative period is always lost +} + +#4. Winter Severity Index (old version, as the absolute minimum of the mean temperature) +# temp must have the format [month,lat,lon] or [month,lon,lat], num.months must be a multiple of 12 +index4_old<-function(temp,num.months){ + my.temp<-temp[1:num.months,,] # select only the months actually used + if(!is.array(my.temp)) my.temp<-array(my.temp,c(length(my.temp),1,1)) # if temp is a dumb array we must convert it back to an array before applying apply + my.index<-apply(my.temp,c(2,3),min) + return(my.index) +} + + +#4. Winter Severity Index (temp media del mes mas frio) +# temp must have the format [month,lat,lon] or [month,lon,lat], num.months must be a multiple of 12 +index4<-function(temp,num.months){ + n.years=num.months/12 + + month.coldest=7 # mese mas frio en el emisferio austral + my.months.coldest<-rep(month.coldest,n.years)+rep(0:(n.years-1)*12,each=1) # select only months with 28 days + my.temp<-temp[1:num.months,,] # select only the months in the chosen period + + if(!is.array(my.temp)) my.temp<-array(my.temp,c(length(my.temp),1,1)) # if temp is a dumb array we must convert it back to an array before applying apply + my.temp<-my.temp[my.months.coldest,,] # select only the coldest months + if(is.null(dim(my.temp))) {my.index<-my.temp} else {my.index<-apply(my.temp,c(2,3),mean)} + return(my.index) +} + + + +#5. Precipitacion annual +# prec must have the format [month,lat,lon] or [month,lon,lat], num.months must be a multiple of 12 +index5<-function(prec,num.months){ + my.prec<-prec[1:num.months,,] # select only the months actually used + if(!is.array(my.prec)) my.prec<-array(my.prec,c(length(my.prec),1,1)) # if prec is a dumb array we must convert it back to an array before applying apply + my.index<-apply(my.prec,c(2,3),sum) + n.years=num.months/12 + return(my.index/n.years) # normaliza por el numero de años para devolver el valor promedio anual del indice +} + +#6. Precipitacion durante el ciclo vegetativo (octubre a abril) +# prec must have the format [month,lat,lon] or [month,lon,lat], num.months must be a multiple of 12 and at least with 24 months +index6<-function(prec,num.months){ + my.seq1<-rep(12*seq(0,num.months/12-1),each=7) + my.seq2<-rep(c(1:4,10:12),num.months/12) + my.months<-my.seq1 + my.seq2 # all the months inside num.months from october to april + + my.months<-my.months[-(1:4)] # remove the first four months because they belongs to the previous year + n.months<-length(my.months) + my.months<-my.months[-((n.months-2):n.months)] # remove the last three months of the last year + + prec.oct.apr<-prec[my.months,,] # select only months from octuber to april + if(!is.array(prec.oct.apr)) prec.oct.apr<-array(prec.oct.apr,c(length(prec.oct.apr),1,1)) # if prec is a dumb array we must convert it back to an array before applying apply + my.index<-apply(prec.oct.apr,c(2,3),sum) # calculate the total precipitation + n.years=num.months/12 + + return(my.index/(n.years-1)) # normaliza por el numero de periodos vegetativos introducidos para devolver el valor promedio anual del indice +} + +# function to select one of the above six indices: +# temp and prec must have the format [month,lat,lon] or [month,lon,lat], num.months must be a multiple of 12 +choose.index<-function(num.index,temp,prec,num.months){ + if(num.index==1) return(index1(temp,num.months)) + if(num.index==2) return(index2(temp,num.months)) + if(num.index==3) return(index3(temp,num.months)) + if(num.index==4) return(index4(temp,num.months)) + if(num.index==5) return(index5(prec,num.months)) + if(num.index==6) return(index6(prec,num.months)) +} + +# same function as above but returns all six indices: +indices<-function(temp,prec,num.months){ + return(list(TempMediaAnual=index1(temp,num.months), + TempMediaVeget=index2(temp,num.months), + IndiceWinklerVeg=index3(temp,num.months), + WinterSeverityIndex=index4(temp,num.months), + PrecAnual=index5(prec,num.months), + PrecVeget=index6(prec,num.months))) +} + + +################################################################################################# +# Error indexes # +################################################################################################# + + +RMSE<-function(obs,pred){ # semplice funzione per calcolare l'errore quadratico medio dato il vettore con la grandezza osservata e quello con la previsione (possono esserci anche NA nel vettore obs) + # i pred hanno sempre tutti i valori predetti annuali, mentre potrebbero esserci anni senza valori osservati, dunque devo togliere solo le file corrispondenti agli anni con NA tra i valori obs: + years.right<-which(is.na(obs)==FALSE) + obs2<-obs[years.right] + pred2<-pred[years.right] + sum.scarti.quad<-sum((obs2-pred2)^2,na.rm=TRUE) + RMSE<-(sum.scarti.quad/(length(years.right)))^0.5 # devi togliere dal denominatore gli anni con NA!!! + return(RMSE) +} + +MAE<-function(obs,pred){ # semplice funzione per calcolare il mean Absolute Error dato il vettore con la grandezza osservata e quello con la previsione (possono esserci anche NA nel vettore obs) + # i pred hanno sempre tutti i valori predetti annuali, mentre potrebbero esserci anni senza valori osservati, dunque devo togliere solo le file corrispondenti agli anni con NA tra i valori obs: + years.right<-which(is.na(obs)==FALSE) + obs2<-obs[years.right] + pred2<-pred[years.right] + MAE<-sum(abs(obs2-pred2))/(length(years.right)) # devi togliere dal denominatore gli anni con NA!!! + return(MAE) +} + +MAEp<-function(obs,pred){ # semplice funzione per calcolare il Mean Absolute Error in percentuale (%) dato il vettore con la grandezza osservata e quello con la previsione (possono esserci anche NA nel vettore obs) + # i pred hanno sempre tutti i valori predetti annuali, mentre potrebbero esserci anni senza valori osservati, dunque devo togliere solo le file corrispondenti agli anni con NA tra i valori obs: + years.right<-which(is.na(obs)==FALSE) + obs2<-obs[years.right] + pred2<-pred[years.right] + MAE<-sum(abs(obs2-pred2))/(length(years.right)) # devi togliere dal denominatore gli anni con NA!!! + obs.prom<-sum(obs2)/length(years.right) + MAE<-MAE/obs.prom + return(MAE) +} + +MBE<-function(obs,pred){ # semplice funzione per calcolare l'errore medio (Mean Bias Error) (ci possono essere anche elementi con NA nel vettore obs) + # i pred hanno sempre tutti i valori predetti annuali, mentre potrebbero esserci anni senza valori osservati, dunque devo togliere le file corrispondenti agli anni con NA tra i valori obs: + years.right<-which(is.na(obs)==FALSE) + obs2<-obs[years.right] + pred2<-pred[years.right] + MBE<-sum(pred2-obs2)/(length(years.right)) + return(MBE) +} + +AGREE<-function(obs,pred){ # per calcolare la d di Willmott o Index of agreement OCCHIO che e' insensibile a sovra/sottostime quasi come l'R2 + years.right<-which(is.na(obs)==FALSE) + obs2<-obs[years.right] + pred2<-pred[years.right] + sum.scarti.quad<-sum((obs2-pred2)^2,na.rm=TRUE) + obs2.mean<-mean(obs2) + d<-1-(sum.scarti.quad/(sum((abs(pred2-obs2.mean)+abs(obs2-obs2.mean))^2))) + return(d) +} + +AGREE.1<-function(obs,pred){ # d di Willmott corretto senza i quadrati + years.right<-which(is.na(obs)==FALSE) + obs2<-obs[years.right] + pred2<-pred[years.right] + sum.scarti<-sum(abs(obs2-pred2),na.rm=TRUE) + obs2.mean<-mean(obs2) + d1<-1-(sum.scarti/(sum(abs(pred2-obs2.mean)+abs(obs2-obs2.mean)))) + return(d1) +} + +AGREE.2011<-function(obs,pred){ # nuova d di Willmott introdotta da lui nel 2011 + years.right<-which(is.na(obs)==FALSE) + obs2<-obs[years.right] + pred2<-pred[years.right] + sum.scarti<-sum(abs(obs2-pred2),na.rm=TRUE) + obs2.mean<-mean(obs2) + denom<-2*sum(abs(obs2-obs2.mean)) # denominatore della formula di d(r) + if(sum.scarti<=denom){d.2011<-1-(sum.scarti/denom)}else{d.2011<-(denom/sum.scarti)-1} + return(d.2011) +} + +RMSE.freedom<-function(obs,pred,degree.freedom){ # funzione per calcolare l'errore quadratico medio dato il vettore con la grandezza osservata e quello con la previsione (possono esserci anche NA nel vettore obs) + # i pred hanno sempre tutti i valori predetti annuali, mentre potrebbero esserci anni senza valori osservati, dunque devo togliere le file corrispondenti agli anni con NA tra i valori obs: + years.right<-which(is.na(obs)==FALSE) + obs2<-obs[years.right] + pred2<-pred[years.right] + sum.scarti.quad<-sum((obs2-pred2)^2,na.rm=TRUE) + RMSE<-(sum.scarti.quad/degree.freedom)^0.5 + return(RMSE) +} + +SumSquared<-function(obs,pred){ # semplice funzione per calcolare la somma dei quadrati degli scarti dato il vettore con la grandezza osservata e quello con la previsione (possono esserci anche NA nel vettore obs) + # i pred hanno sempre tutti i valori predetti annuali, mentre potrebbero esserci anni senza valori osservati, dunque devo togliere le file corrispondenti agli anni con NA tra i valori obs: + years.right<-which(is.na(obs)==FALSE) + obs2<-obs[years.right] + pred2<-pred[years.right] + sum.scarti.quad<-sum((obs2-pred2)^2,na.rm=TRUE) + return(sum.scarti.quad) +} + + + + + +################################################################################################# +# Others # +################################################################################################# + + + +myboxplot.stats <- function (x, coef = NULL, do.conf = TRUE, do.out =TRUE) +{ + nna <- !is.na(x) + n <- sum(nna) + stats <- quantile(x, c(.05,.25,.5,.75,.95), na.rm = TRUE) + iqr <- diff(stats[c(2, 4)]) + out <- x < stats[1] | x > stats[5] + conf <- if (do.conf) stats[3] + c(-1.58, 1.58) * diff(stats[c(2, 4)])/sqrt(n) + list(stats = stats, n = n, conf = conf, out = x[out & nna]) +} + +Load2 <- function (var, exp = NULL, obs = NULL, sdates, nmember = NULL, + nmemberobs = NULL, nleadtime = NULL, leadtimemin = 1, leadtimemax = NULL, + storefreq = "monthly", sampleperiod = 1, lonmin = 0, lonmax = 360, + latmin = -90, latmax = 90, output = "areave", method = "conservative", + grid = NULL, maskmod = vector("list", 15), maskobs = vector("list", + 15), configfile = NULL, varmin = NULL, varmax = NULL, + silent = FALSE, nprocs = NULL, dimnames = NULL, remapcells = 2) +{ + parameter_names <- ls() + if (length(parameter_names) < 3 || is.null(var) || is.null(sdates) || + (is.null(exp) && is.null(obs))) { + stop("Error: At least 'var', 'exp'/'obs' and 'sdates' must be provided.") + } + load_parameters <- lapply(parameter_names, get, envir = environment()) + names(load_parameters) <- parameter_names + parameters_to_show <- c("var", "exp", "obs", "sdates", "grid", + "output", "storefreq") + load_parameters <- c(load_parameters[parameters_to_show], + load_parameters[-match(parameters_to_show, names(load_parameters))]) + cat(paste("* The load call you issued is:\n* Load(", paste(strwrap(paste(unlist(lapply(names(load_parameters[1:length(parameters_to_show)]), + function(x) paste(x, "=", if (x == "sdates" && length(load_parameters[[x]]) > + 4) { + paste0("c('", load_parameters[[x]][1], "', '", load_parameters[[x]][2], + "', ..., '", tail(load_parameters[[x]], 1), "')") + } + else { + paste(deparse(load_parameters[[x]]), collapse = "") + }))), collapse = ", "), width = getOption("width") - + 9, indent = 0, exdent = 8), collapse = "\n*"), ", ...)\n* See the full call in '$load_parameters' after Load() finishes.\n", + sep = "")) + errors <- try({ + if (is.null(var) || !(is.character(var) && nchar(var) > + 0)) { + stop("Error: parameter 'var' should be a character string of length >= 1.") + } + exps_to_fetch <- c() + exp_info_names <- c("name", "path", "nc_var_name", "suffix", + "var_min", "var_max", "dimnames") + if (!is.null(exp) && !(is.character(exp) && all(nchar(exp) > + 0)) && !is.list(exp)) { + stop("Error: parameter 'exp' should be a vector of strings or a list with information of the experimental datasets to load. Check 'exp' in ?Load for details.") + } + else if (!is.null(exp)) { + if (!is.list(exp)) { + exp <- lapply(exp, function(x) list(name = x)) + } + for (i in 1:length(exp)) { + if (!is.list(exp[[i]])) { + stop("Error: parameter 'exp' is incorrect. It should be a list of lists.") + } + if (!(all(names(exp[[i]]) %in% exp_info_names))) { + stop("Error: parameter 'exp' is incorrect. There are unrecognized components in the information of some of the experiments. Check 'exp' in ?Load for details.") + } + if (!("name" %in% names(exp[[i]]))) { + exp[[i]][["name"]] <- paste0("exp", i) + if (!("path" %in% names(exp[[i]]))) { + stop("Error: parameter 'exp' is incorrect. A 'path' should be provided for each experimental dataset if no 'name' is provided. See 'exp' in ?Load for details.") + } + } + else if (!("path" %in% names(exp[[i]]))) { + exps_to_fetch <- c(exps_to_fetch, i) + } + if ("path" %in% names(exp[[i]])) { + if (!("nc_var_name" %in% names(exp[[i]]))) { + exp[[i]][["nc_var_name"]] <- "$VAR_NAME$" + } + if (!("suffix" %in% names(exp[[i]]))) { + exp[[i]][["suffix"]] <- "" + } + if (!("var_min" %in% names(exp[[i]]))) { + exp[[i]][["var_min"]] <- "" + } + if (!("var_max" %in% names(exp[[i]]))) { + exp[[i]][["var_max"]] <- "" + } + } + } + if ((length(exps_to_fetch) > 0) && (length(exps_to_fetch) < + length(exp))) { + cat("! Warning: 'path' was provided for some experimental datasets in 'exp'. Any \n* information in the configuration file related to these will be ignored.\n") + } + } + obs_to_fetch <- c() + obs_info_names <- c("name", "path", "nc_var_name", "suffix", + "var_min", "var_max") + if (!is.null(obs) && !(is.character(obs) && all(nchar(obs) > + 0)) && !is.list(obs)) { + stop("Error: parameter 'obs' should be a vector of strings or a list with information of the observational datasets to load. Check 'obs' in ?Load for details.") + } + else if (!is.null(obs)) { + if (!is.list(obs)) { + obs <- lapply(obs, function(x) list(name = x)) + } + for (i in 1:length(obs)) { + if (!is.list(obs[[i]])) { + stop("Error: parameter 'obs' is incorrect. It should be a list of lists.") + } + if (!(all(names(obs[[i]]) %in% obs_info_names))) { + stop("Error: parameter 'obs' is incorrect. There are unrecognized components in the information of some of the observations. Check 'obs' in ?Load for details.") + } + if (!("name" %in% names(obs[[i]]))) { + obs[[i]][["name"]] <- paste0("obs", i) + if (!("path" %in% names(obs[[i]]))) { + stop("Error: parameter 'obs' is incorrect. A 'path' should be provided for each observational dataset if no 'name' is provided. See 'obs' in ?Load for details.") + } + } + else if (!("path" %in% names(obs[[i]]))) { + obs_to_fetch <- c(obs_to_fetch, i) + } + if ("path" %in% names(obs[[i]])) { + if (!("nc_var_name" %in% names(obs[[i]]))) { + obs[[i]][["nc_var_name"]] <- "$VAR_NAME$" + } + if (!("suffix" %in% names(obs[[i]]))) { + obs[[i]][["suffix"]] <- "" + } + if (!("var_min" %in% names(obs[[i]]))) { + obs[[i]][["var_min"]] <- "" + } + if (!("var_max" %in% names(obs[[i]]))) { + obs[[i]][["var_max"]] <- "" + } + } + } + if (length(c(obs_to_fetch, exps_to_fetch) > 1) && + (length(obs_to_fetch) < length(obs))) { + cat("! Warning: 'path' was provided for some observational datasets in 'obs'. Any \n* information in the configuration file related to these will be ignored.\n") + } + } + if (is.null(sdates)) { + stop("Error: parameter 'sdates' must be provided.") + } + if (!is.character(sdates) || !all(nchar(sdates) == 8) || + any(is.na(strtoi(sdates)))) { + stop("Error: parameter 'sdates' is incorrect. All starting dates should be a character string in the format 'YYYYMMDD'.") + } + if (!is.null(nmember) && !is.null(exp)) { + if (!is.numeric(nmember)) { + stop("Error: parameter 'nmember' is incorrect. It should be numeric.") + } + if (length(nmember) == 1) { + cat(paste("! Warning: 'nmember' should specify the number of members of each experimental dataset. Forcing to", + nmember, "for all experiments.\n")) + nmember <- rep(nmember, length(exp)) + } + if (length(nmember) != length(exp)) { + stop("Error: 'nmember' must contain as many values as 'exp'.") + } + else if (any(is.na(nmember))) { + nmember[which(is.na(nmember))] <- max(nmember, + na.rm = TRUE) + } + } + if (!is.null(nmemberobs) && !is.null(obs)) { + if (!is.numeric(nmemberobs)) { + stop("Error: parameter 'nmemberobs' is incorrect. It should be numeric.") + } + if (length(nmemberobs) == 1) { + cat(paste("! Warning: 'nmemberobs' should specify the number of members of each observational dataset. Forcing to", + nmemberobs, "for all observations.\n")) + nmemberobs <- rep(nmemberobs, length(obs)) + } + if (length(nmemberobs) != length(obs)) { + stop("Error: 'nmemberobs' must contain as many values as 'obs'.") + } + else if (any(is.na(nmemberobs))) { + nmemberobs[which(is.na(nmemberobs))] <- max(nmemberobs, + na.rm = TRUE) + } + } + if (!is.null(nleadtime) && !is.numeric(nleadtime)) { + stop("Error: parameter 'nleadtime' is wrong. It should be numeric.") + } + if (is.null(leadtimemin) || !is.numeric(leadtimemin)) { + stop("Error: parameter 'leadtimemin' is wrong. It should be numeric.") + } + if (!is.null(leadtimemax) && !is.numeric(leadtimemax)) { + stop("Error: parameter 'leadtimemax' is wrong. It should be numeric.") + } + if (!is.character(storefreq) || !(storefreq %in% c("monthly", + "daily"))) { + stop("Error: parameter 'storefreq' is wrong, can take value 'daily' or 'monthly'.") + } + if (is.null(sampleperiod) || !is.numeric(sampleperiod)) { + stop("Error: parameter 'sampleperiod' is wrong. It should be numeric.") + } + if (is.null(lonmin) || !is.numeric(lonmin)) { + stop("Error: parameter 'lonmin' is wrong. It should be numeric.") + } + if (lonmin < -360 || lonmin > 360) { + stop("Error: parameter 'lonmin' must be in the range [-360, 360]") + } + if (lonmin < 0) { + lonmin <- lonmin + 360 + } + if (is.null(lonmax) || !is.numeric(lonmax)) { + stop("Error: parameter 'lonmax' is wrong. It should be numeric.") + } + if (lonmax < -360 || lonmax > 360) { + stop("Error: parameter 'lonmax' must be in the range [-360, 360]") + } + if (lonmax < 0) { + lonmax <- lonmax + 360 + } + if (is.null(latmin) || !is.numeric(latmin)) { + stop("Error: parameter 'latmin' is wrong. It should be numeric.") + } + if (latmin > 90 || latmin < -90) { + stop("Error: 'latmin' must be in the interval [-90, 90].") + } + if (is.null(latmax) || !is.numeric(latmax)) { + stop("Error: parameter 'latmax' is wrong. It should be numeric.") + } + if (latmax > 90 || latmax < -90) { + stop("Error: 'latmax' must be in the interval [-90, 90].") + } + if (is.null(output) || !(output %in% c("lonlat", "lon", + "lat", "areave"))) { + stop("Error: 'output' can only take values 'lonlat', 'lon', 'lat' or 'areave'.") + } + if (is.null(method) || !(method %in% c("bilinear", "bicubic", + "conservative", "distance-weighted"))) { + stop("Error: parameter 'method' is wrong, can take value 'bilinear', 'bicubic', 'conservative' or 'distance-weighted'.") + } + remap <- switch(method, bilinear = "remapbil", bicubic = "remapbic", + conservative = "remapcon", `distance-weighted` = "remapdis") + if (!is.null(grid)) { + if (is.character(grid)) { + supported_grids <- list("r[0-9]{1,}x[0-9]{1,}", + "t[0-9]{1,}grid") + grid_matches <- unlist(lapply(lapply(supported_grids, + regexpr, grid), .IsFullMatch, grid)) + if (sum(grid_matches) < 1) { + stop("The specified grid in the parameter 'grid' is incorrect. Must be one of rx or tgrid.") + } + } + else { + stop("Error: parameter 'grid' should be a character string, if specified.") + } + } + if (!is.list(maskmod)) { + stop("Error: parameter 'maskmod' must be a list.") + } + if (length(maskmod) < length(exp)) { + stop("Error: 'maskmod' must contain a numeric mask or NULL for each experiment in 'exp'.") + } + for (i in 1:length(maskmod)) { + if (is.list(maskmod[[i]])) { + if ((length(maskmod[[i]]) > 2) || !all(names(maskmod[[i]]) %in% + c("path", "nc_var_name"))) { + stop("Error: all masks in 'maskmod' must be a numeric matrix, or a list with the components 'path' and optionally 'nc_var_name', or NULL.") + } + } + else if (!(is.numeric(maskmod[[i]]) || is.null(maskmod[[i]]))) { + stop("Error: all masks in 'maskmod' must be a numeric matrix, or a list with the components 'path' and optionally 'nc_var_name', or NULL.") + } + } + if (!is.list(maskobs)) { + stop("Error: parameter 'maskobs' must be a list.") + } + if (length(maskobs) < length(obs)) { + stop("Error: 'maskobs' must contain a numeric mask or NULL for each obseriment in 'obs'.") + } + for (i in 1:length(maskobs)) { + if (is.list(maskobs[[i]])) { + if ((length(maskobs[[i]]) > 2) || !all(names(maskobs[[i]]) %in% + c("path", "nc_var_name"))) { + stop("Error: all masks in 'maskobs' must be a numeric matrix, or a list with the components 'path' and optionally 'nc_var_name', or NULL.") + } + } + else if (!(is.numeric(maskobs[[i]]) || is.null(maskobs[[i]]))) { + stop("Error: all masks in 'maskobs' must be a numeric matrix, or a list with the components 'path' and optionally 'nc_var_name', or NULL.") + } + } + if ((output != "areave" || !is.null(grid)) && length(exp) > + 0) { + if (!all(unlist(lapply(maskobs, is.null)))) { + cat("! Warning: 'maskobs' will be ignored. 'maskmod[[1]]' will be applied to observations instead.\n") + } + maskobs <- lapply(maskobs, function(x) x <- maskmod[[1]]) + } + if (is.null(configfile)) { + configfile <- system.file("config", "BSC.conf", package = "s2dverification") + } + else if (!is.character(configfile) || !(nchar(configfile) > + 0)) { + stop("Error: parameter 'configfile' must be a character string with the path to an s2dverification configuration file, if specified.") + } + if (!is.null(varmin) && !is.numeric(varmin)) { + stop("Error: parameter 'varmin' must be numeric, if specified.") + } + if (!is.null(varmax) && !is.numeric(varmax)) { + stop("Error: parameter 'varmax' must be numeric, if specified.") + } + if (!is.logical(silent)) { + stop("Error: parameter 'silent' must be TRUE or FALSE.") + } + if (!is.null(nprocs) && (!is.numeric(nprocs) || nprocs < + 1)) { + stop("Error: parameter 'nprocs' must be a positive integer, if specified.") + } + if (!is.null(dimnames) && (!is.list(dimnames))) { + stop("Error: parameter 'dimnames' must be a list, if specified.") + } + if (!all(names(dimnames) %in% c("member", "lat", "lon"))) { + stop("Error: parameter 'dimnames' is wrong. There are unrecognized component names. See 'dimnames' in ?Load for details.") + } + if (!is.numeric(remapcells) || remapcells < 0) { + stop("Error: 'remapcells' must be an integer >= 0.") + } + if (length(exps_to_fetch) > 0 || length(obs_to_fetch) > + 0) { + cat("* Some 'path's not explicitly provided in 'exp' and 'obs', so will now proceed to open the configuration file.\n") + data_info <- ConfigFileOpen(configfile, silent, TRUE) + matches <- ConfigApplyMatchingEntries(data_info, + var, sapply(exp[exps_to_fetch], "[[", "name"), + sapply(obs[obs_to_fetch], "[[", "name"), show_entries = FALSE, + show_result = FALSE) + replace_values <- data_info$definitions + if (!is.null(exp) && length(exps_to_fetch) > 0) { + counter <- 1 + exp[exps_to_fetch] <- lapply(matches$exp_info, + function(x) { + x[names(exp[[exps_to_fetch[counter]]])] <- exp[[exps_to_fetch[counter]]] + x[["path"]] <- paste0(x[["main_path"]], x[["file_path"]]) + counter <<- counter + 1 + x + }) + } + if (!is.null(obs) && length(obs_to_fetch) > 0) { + counter <- 1 + obs[obs_to_fetch] <- lapply(matches$obs_info, + function(x) { + x[names(obs[[obs_to_fetch[counter]]])] <- obs[[obs_to_fetch[counter]]] + x[["path"]] <- paste0(x[["main_path"]], x[["file_path"]]) + counter <<- counter + 1 + x + }) + } + if (!silent) { + cat("* All pairs (var, exp) and (var, obs) have matching entries.\n") + } + } + else { + replace_values <- list(DEFAULT_NC_VAR_NAME = "$VAR_NAME$", + DEFAULT_VAR_MIN = "", DEFAULT_VAR_MAX = "", DEFAULT_SUFFIX = "", + DEFAULT_DIM_NAME_LONGITUDES = "longitude", DEFAULT_DIM_NAME_LATITUDES = "latitude", + DEFAULT_DIM_NAME_MEMBERS = "ensemble") + } + dimnames <- list(lon = ifelse(is.null(dimnames[["lon"]]), + replace_values[["DEFAULT_DIM_NAME_LONGITUDES"]], + dimnames[["lon"]]), lat = ifelse(is.null(dimnames[["lat"]]), + replace_values[["DEFAULT_DIM_NAME_LATITUDES"]], dimnames[["lat"]]), + member = ifelse(is.null(dimnames[["member"]]), replace_values[["DEFAULT_DIM_NAME_MEMBERS"]], + dimnames[["member"]])) + if (!is.null(exp)) { + exp <- lapply(exp, function(x) { + if (!("dimnames" %in% names(x))) { + x[["dimnames"]] <- dimnames + x + } + else { + dimnames2 <- dimnames + dimnames2[names(x[["dimnames"]])] <- x[["dimnames"]] + x[["dimnames"]] <- dimnames2 + x + } + }) + } + if (!is.null(obs)) { + obs <- lapply(obs, function(x) { + if (!("dimnames" %in% names(x))) { + x[["dimnames"]] <- dimnames + x + } + else { + dimnames2 <- dimnames + dimnames2[names(x[["dimnames"]])] <- x[["dimnames"]] + x[["dimnames"]] <- dimnames2 + x + } + }) + } + single_dataset <- (length(obs) + length(exp) == 1) + replace_values[["VAR_NAME"]] <- var + replace_values[["STORE_FREQ"]] <- storefreq + latitudes <- longitudes <- NULL + leadtimes <- NULL + var_exp <- var_obs <- NULL + units <- var_long_name <- NULL + is_2d_var <- FALSE + nmod <- length(exp) + nobs <- length(obs) + nsdates <- length(sdates) + if (!silent) { + cat("* Fetching first experimental files to work out 'var_exp' size...\n") + } + dataset_type <- "exp" + dim_exp <- NULL + filename <- file_found <- tmp <- nltime <- NULL + dims2define <- TRUE + is_file_per_member_exp <- rep(nmod, FALSE) + exp_work_pieces <- list() + jmod <- 1 + while (jmod <= nmod) { + tags_to_find <- c("MEMBER_NUMBER") + position_of_tags <- na.omit(match(tags_to_find, names(replace_values))) + if (length(position_of_tags) > 0) { + quasi_final_path <- .ConfigReplaceVariablesInString(exp[[jmod]][["path"]], + replace_values[-position_of_tags], TRUE) + } + else { + quasi_final_path <- .ConfigReplaceVariablesInString(exp[[jmod]][["path"]], + replace_values, TRUE) + } + is_file_per_member_exp[jmod] <- grepl("$MEMBER_NUMBER$", + quasi_final_path, fixed = TRUE) + replace_values[["EXP_NAME"]] <- exp[[jmod]][["name"]] + replace_values[["NC_VAR_NAME"]] <- exp[[jmod]][["nc_var_name"]] + namevar <- .ConfigReplaceVariablesInString(exp[[jmod]][["nc_var_name"]], + replace_values) + replace_values[["SUFFIX"]] <- exp[[jmod]][["suffix"]] + if (is.null(varmin)) { + mod_var_min <- as.numeric(.ConfigReplaceVariablesInString(exp[[jmod]][["var_min"]], + replace_values)) + } + else { + mod_var_min <- varmin + } + if (is.null(varmax)) { + mod_var_max <- as.numeric(.ConfigReplaceVariablesInString(exp[[jmod]][["var_max"]], + replace_values)) + } + else { + mod_var_max <- varmax + } + jsdate <- 1 + while (jsdate <= nsdates) { + replace_values[["START_DATE"]] <- sdates[jsdate] + replace_values[["YEAR"]] <- substr(sdates[jsdate], + 1, 4) + replace_values[["MONTH"]] <- substr(sdates[jsdate], + 5, 6) + replace_values[["DAY"]] <- substr(sdates[jsdate], + 7, 8) + if (dims2define) { + if (is_file_per_member_exp[jmod]) { + replace_values[["MEMBER_NUMBER"]] <- "*" + } + work_piece <- list(dataset_type = dataset_type, + filename = .ConfigReplaceVariablesInString(exp[[jmod]][["path"]], + replace_values), namevar = namevar, grid = grid, + remap = remap, remapcells = remapcells, is_file_per_member = is_file_per_member_exp[jmod], + is_file_per_dataset = FALSE, lon_limits = c(lonmin, + lonmax), lat_limits = c(latmin, latmax), + dimnames = exp[[jmod]][["dimnames"]], single_dataset = single_dataset) + found_data <- .LoadDataFile(work_piece, explore_dims = TRUE, + silent = silent) + found_dims <- found_data$dims + var_long_name <- found_data$var_long_name + units <- found_data$units + if (!is.null(found_dims)) { + is_2d_var <- found_data$is_2d_var + if (!is_2d_var && (output != "areave")) { + cat(paste("! Warning: '", output, "' output format not allowed when loading global mean variables. Forcing to 'areave'.\n", + sep = "")) + output <- "areave" + } + if (output != "areave" && is.null(grid)) { + grid <- found_data$grid + } + if (is.null(nmember)) { + if (is.null(found_dims[["member"]])) { + cat("! Warning: loading data from a server but 'nmember' not specified. Loading only one member.\n") + nmember <- rep(1, nmod) + } + else { + nmember <- rep(found_dims[["member"]], + nmod) + } + } + if (is.null(nleadtime)) { + nleadtime <- found_dims[["time"]] + } + if (is.null(leadtimemax)) { + leadtimemax <- nleadtime + } + else if (leadtimemax > nleadtime) { + stop("Error: 'leadtimemax' argument is greater than the number of loaded leadtimes. Put first the experiment with the greatest number of leadtimes or adjust properly the parameters 'nleadtime' and 'leadtimemax'.") + } + leadtimes <- seq(leadtimemin, leadtimemax, + sampleperiod) + latitudes <- found_dims[["lat"]] + longitudes <- found_dims[["lon"]] + if (output == "lon" || output == "lonlat") { + dim_exp[["lon"]] <- length(longitudes) + } + if (output == "lat" || output == "lonlat") { + dim_exp[["lat"]] <- length(latitudes) + } + dim_exp[["time"]] <- length(leadtimes) + dim_exp[["member"]] <- max(nmember) + dim_exp[["sdate"]] <- nsdates + dim_exp[["dataset"]] <- nmod + dims2define <- FALSE + } + } + if (is_file_per_member_exp[jmod]) { + jmember <- 1 + while (jmember <= nmember[jmod]) { + replace_values[["MEMBER_NUMBER"]] <- sprintf(paste("%.", + (nmember[jmod]%/%10) + 1, "i", sep = ""), + jmember - 1) + work_piece <- list(filename = .ConfigReplaceVariablesInString(exp[[jmod]][["path"]], + replace_values), namevar = namevar, indices = c(1, + jmember, jsdate, jmod), nmember = nmember[jmod], + leadtimes = leadtimes, mask = maskmod[[jmod]], + is_file_per_dataset = FALSE, dimnames = exp[[jmod]][["dimnames"]], + var_limits = c(mod_var_min, mod_var_max), + remapcells = remapcells) + exp_work_pieces <- c(exp_work_pieces, list(work_piece)) + jmember <- jmember + 1 + } + } + else { + work_piece <- list(filename = .ConfigReplaceVariablesInString(exp[[jmod]][["path"]], + replace_values), namevar = namevar, indices = c(1, + 1, jsdate, jmod), nmember = nmember[jmod], + leadtimes = leadtimes, mask = maskmod[[jmod]], + is_file_per_dataset = FALSE, dimnames = exp[[jmod]][["dimnames"]], + var_limits = c(mod_var_min, mod_var_max), + remapcells = remapcells) + exp_work_pieces <- c(exp_work_pieces, list(work_piece)) + } + jsdate <- jsdate + 1 + } + jmod <- jmod + 1 + } + if (dims2define && length(exp) > 0) { + cat("! Warning: no data found in file system for any experimental dataset.\n") + } + dims <- dim_exp[na.omit(match(c("dataset", "member", + "sdate", "time", "lat", "lon"), names(dim_exp)))] + if (is.null(dims[["member"]]) || any(is.na(unlist(dims))) || + any(unlist(dims) == 0)) { + dims <- 0 + dim_exp <- NULL + } + if (!silent) { + message <- "* Success. Detected dimensions of experimental data: " + cat(paste0(message, paste(unlist(dims), collapse = ", "), + "\n")) + cat("* Fetching first observational files to work out 'var_obs' size...\n") + } + if (is.null(exp) || dims == 0) { + if (is.null(leadtimemax)) { + cat("! Warning: loading observations only and no 'leadtimemax' specified. Data will be loaded from each starting date to current time.\n") + diff <- Sys.time() - as.POSIXct(paste(substr(sdates[1], + 1, 4), "-", substr(sdates[1], 5, 6), "-", substr(sdates[1], + 7, 8), sep = "")) + if (storefreq == "monthly") { + leadtimemax <- as.integer(diff/30) + } + else { + leadtimemax <- as.integer(diff) + } + } + if (is.null(nleadtime)) { + nleadtime <- leadtimemax + } + leadtimes <- seq(leadtimemin, leadtimemax, sampleperiod) + } + dataset_type <- "obs" + dim_obs <- NULL + dims2define <- TRUE + lat_indices <- lon_indices <- NULL + obs_work_pieces <- list() + is_file_per_dataset_obs <- rep(FALSE, nobs) + is_file_per_member_obs <- rep(FALSE, nobs) + jobs <- 1 + while (jobs <= nobs) { + tags_to_find <- c("MONTH", "DAY", "YEAR", "MEMBER_NUMBER") + position_of_tags <- na.omit(match(tags_to_find, names(replace_values))) + if (length(position_of_tags) > 0) { + quasi_final_path <- .ConfigReplaceVariablesInString(obs[[jobs]][["path"]], + replace_values[-position_of_tags], TRUE) + } + else { + quasi_final_path <- .ConfigReplaceVariablesInString(obs[[jobs]][["path"]], + replace_values, TRUE) + } + is_file_per_dataset_obs[jobs] <- !any(sapply(c("$MONTH$", + "$DAY$", "$YEAR$"), grepl, quasi_final_path, + fixed = TRUE)) + is_file_per_member_obs[jobs] <- grepl("$MEMBER_NUMBER$", + quasi_final_path, fixed = TRUE) + replace_values[["OBS_NAME"]] <- obs[[jobs]][["name"]] + replace_values[["NC_VAR_NAME"]] <- obs[[jobs]][["nc_var_name"]] + namevar <- .ConfigReplaceVariablesInString(obs[[jobs]][["nc_var_name"]], + replace_values) + replace_values[["SUFFIX"]] <- obs[[jobs]][["suffix"]] + if (is.null(varmin)) { + obs_var_min <- as.numeric(.ConfigReplaceVariablesInString(obs[[jobs]][["var_min"]], + replace_values)) + } + else { + obs_var_min <- varmin + } + if (is.null(varmax)) { + obs_var_max <- as.numeric(.ConfigReplaceVariablesInString(obs[[jobs]][["var_max"]], + replace_values)) + } + else { + obs_var_max <- varmax + } + if (is_file_per_dataset_obs[jobs]) { + if (dims2define) { + work_piece <- list(dataset_type = dataset_type, + filename = .ConfigReplaceVariablesInString(obs[[jobs]][["path"]], + replace_values), namevar = namevar, grid = grid, + remap = remap, remapcells = remapcells, is_file_per_member = is_file_per_member_obs[jobs], + is_file_per_dataset = is_file_per_dataset_obs[jobs], + lon_limits = c(lonmin, lonmax), lat_limits = c(latmin, + latmax), dimnames = obs[[jobs]][["dimnames"]], + single_dataset = single_dataset) + found_data <- .LoadDataFile(work_piece, explore_dims = TRUE, + silent = silent) + found_dims <- found_data$dims + var_long_name <- found_data$var_long_name + units <- found_data$units + if (!is.null(found_dims)) { + is_2d_var <- found_data$is_2d_var + if (!is_2d_var && (output != "areave")) { + cat(paste("! Warning: '", output, "' output format not allowed when loading global mean variables. Forcing to 'areave'.\n", + sep = "")) + output <- "areave" + } + if (output != "areave" && is.null(grid)) { + grid <- found_data$grid + } + if (is.null(nmemberobs)) { + if (is.null(found_dims[["member"]])) { + cat("! Warning: loading observational data from a server but 'nmemberobs' not specified. Loading only one member.\n") + nmemberobs <- rep(1, nobs) + } + else { + nmemberobs <- rep(found_dims[["member"]], + nobs) + } + } + if (is.null(dim_exp)) { + longitudes <- found_dims[["lon"]] + latitudes <- found_dims[["lat"]] + } + if (output == "lon" || output == "lonlat") { + dim_obs[["lon"]] <- length(longitudes) + } + if (output == "lat" || output == "lonlat") { + dim_obs[["lat"]] <- length(latitudes) + } + dim_obs[["time"]] <- length(leadtimes) + dim_obs[["member"]] <- max(nmemberobs) + dim_obs[["sdate"]] <- nsdates + dim_obs[["dataset"]] <- nobs + dims2define <- FALSE + } + } + work_piece <- list(filename = .ConfigReplaceVariablesInString(obs[[jobs]][["path"]], + replace_values), namevar = namevar, indices = c(1, + 1, 1, jobs), nmember = nmemberobs[jobs], mask = maskobs[[jobs]], + leadtimes = leadtimes, is_file_per_dataset = is_file_per_dataset_obs[jobs], + startdates = sdates, dimnames = obs[[jobs]][["dimnames"]], + var_limits = c(obs_var_min, obs_var_max), remapcells = remapcells) + obs_work_pieces <- c(obs_work_pieces, list(work_piece)) + } + else { + jsdate <- 1 + while (jsdate <= nsdates) { + replace_values[["START_DATE"]] <- sdates[jsdate] + sdate <- sdates[jsdate] + if (storefreq == "daily") { + day <- substr(sdate, 7, 8) + if (day == "") { + day <- "01" + } + day <- as.integer(day) + startdate <- as.POSIXct(paste(substr(sdate, + 1, 4), "-", substr(sdate, 5, 6), "-", day, + " 12:00:00", sep = "")) + (leadtimemin - + 1) * 86400 + year <- as.integer(substr(startdate, 1, 4)) + month <- as.integer(substr(startdate, 6, + 7)) + } + else { + month <- (as.integer(substr(sdate, 5, 6)) + + leadtimemin - 2)%%12 + 1 + year <- as.integer(substr(sdate, 1, 4)) + + (as.integer(substr(sdate, 5, 6)) + leadtimemin - + 2)%/%12 + } + jleadtime <- 1 + while (jleadtime <= length(leadtimes)) { + replace_values[["YEAR"]] <- paste(year, "", + sep = "") + replace_values[["MONTH"]] <- sprintf("%2.2i", + month) + if (storefreq == "daily") { + replace_values[["DAY"]] <- sprintf("%2.2i", + day) + days_in_month <- ifelse(LeapYear(year), + 29, 28) + days_in_month <- switch(paste(month, "", + sep = ""), `1` = 31, `3` = 31, `4` = 30, + `5` = 31, `6` = 30, `7` = 31, `8` = 31, + `9` = 30, `10` = 31, `11` = 30, `12` = 31, + days_in_month) + obs_file_indices <- seq(day, min(days_in_month, + (length(leadtimes) - jleadtime) * sampleperiod + + day), sampleperiod) + } + else { + obs_file_indices <- 1 + } + if (dims2define) { + if (is_file_per_member_obs[jobs]) { + replace_values[["MEMBER_NUMBER"]] <- "*" + } + work_piece <- list(dataset_type = dataset_type, + filename = .ConfigReplaceVariablesInString(obs[[jobs]][["path"]], + replace_values), namevar = namevar, + grid = grid, remap = remap, remapcells = remapcells, + is_file_per_member = is_file_per_member_obs[jobs], + is_file_per_dataset = is_file_per_dataset_obs[jobs], + lon_limits = c(lonmin, lonmax), lat_limits = c(latmin, + latmax), dimnames = obs[[jobs]][["dimnames"]], + single_dataset = single_dataset) + found_data <- .LoadDataFile(work_piece, + explore_dims = TRUE, silent = silent) + found_dims <- found_data$dims + var_long_name <- found_data$var_long_name + units <- found_data$units + if (!is.null(found_dims)) { + is_2d_var <- found_data$is_2d_var + if (!is_2d_var && (output != "areave")) { + cat(paste("! Warning: '", output, "' output format not allowed when loading global mean variables. Forcing to 'areave'\n.", + sep = "")) + output <- "areave" + } + if (output != "areave" && is.null(grid)) { + grid <- found_data$grid + } + if (is.null(nmemberobs)) { + if (is.null(found_dims[["member"]])) { + cat("! Warning: loading observational data from a server but 'nmemberobs' not specified. Loading only one member.\n") + nmemberobs <- rep(1, nobs) + } + else { + nmemberobs <- rep(found_dims[["member"]], + nobs) + } + } + if (is.null(dim_exp)) { + longitudes <- found_dims[["lon"]] + latitudes <- found_dims[["lat"]] + } + if (output == "lon" || output == "lonlat") { + dim_obs[["lon"]] <- length(longitudes) + } + if (output == "lat" || output == "lonlat") { + dim_obs[["lat"]] <- length(latitudes) + } + dim_obs[["time"]] <- length(leadtimes) + dim_obs[["member"]] <- max(nmemberobs) + dim_obs[["sdate"]] <- nsdates + dim_obs[["dataset"]] <- nobs + dims2define <- FALSE + } + } + if (is_file_per_member_obs[jobs]) { + jmember <- 1 + while (jmember <= nmemberobs[jobs]) { + replace_values[["MEMBER_NUMBER"]] <- sprintf(paste("%.", + (nmemberobs[jobs]%/%10) + 1, "i", sep = ""), + jmember - 1) + work_piece <- list(filename = .ConfigReplaceVariablesInString(obs[[jobs]][["path"]], + replace_values), namevar = namevar, + indices = c(jleadtime, jmember, jsdate, + jobs), nmember = nmemberobs[jobs], + leadtimes = obs_file_indices, mask = maskobs[[jobs]], + dimnames = obs[[jobs]][["dimnames"]], + is_file_per_dataset = is_file_per_dataset_obs[jobs], + var_limits = c(obs_var_min, obs_var_max), + remapcells = remapcells) + obs_work_pieces <- c(obs_work_pieces, + list(work_piece)) + jmember <- jmember + 1 + } + } + else { + work_piece <- list(filename = .ConfigReplaceVariablesInString(obs[[jobs]][["path"]], + replace_values), namevar = namevar, indices = c(jleadtime, + 1, jsdate, jobs), nmember = nmemberobs[jobs], + leadtimes = obs_file_indices, mask = maskobs[[jobs]], + dimnames = obs[[jobs]][["dimnames"]], + is_file_per_dataset = is_file_per_dataset_obs[jobs], + var_limits = c(obs_var_min, obs_var_max), + remapcells) + obs_work_pieces <- c(obs_work_pieces, list(work_piece)) + } + if (storefreq == "daily") { + startdate <- startdate + 86400 * sampleperiod * + length(obs_file_indices) + year <- as.integer(substr(startdate, 1, + 4)) + month <- as.integer(substr(startdate, 6, + 7)) + day <- as.integer(substr(startdate, 9, + 10)) + } + else { + month <- month + sampleperiod + year <- year + (month - 1)%/%12 + month <- (month - 1)%%12 + 1 + } + jleadtime <- jleadtime + length(obs_file_indices) + } + jsdate <- jsdate + 1 + } + } + jobs <- jobs + 1 + } + if (dims2define && length(obs) > 0) { + cat("! Warning: no data found in file system for any observational dataset.\n") + } + dims <- dim_obs[na.omit(match(c("dataset", "member", + "sdate", "time", "lat", "lon"), names(dim_obs)))] + if (is.null(dims[["member"]]) || any(is.na(unlist(dims))) || + any(unlist(dims) == 0)) { + dims <- 0 + dim_obs <- NULL + } + if (!silent) { + message <- "* Success. Detected dimensions of observational data: " + cat(paste0(message, paste(unlist(dims), collapse = ", "), + "\n")) + } + if (!(is.null(dim_obs) && is.null(dim_exp))) { + pointer_var_exp <- pointer_var_obs <- NULL + if (!is.null(dim_exp) && (length(unlist(dim_exp)) == + length(dim_exp)) && !any(is.na(unlist(dim_exp))) && + !any(unlist(dim_exp) == 0)) { + var_exp <- big.matrix(nrow = prod(unlist(dim_exp)), + ncol = 1) + pointer_var_exp <- describe(var_exp) + } + if (!is.null(dim_obs) && (length(unlist(dim_obs)) == + length(dim_obs)) && !any(is.na(unlist(dim_obs))) && + !any(unlist(dim_obs) == 0)) { + var_obs <- big.matrix(nrow = prod(unlist(dim_obs)), + ncol = 1) + pointer_var_obs <- describe(var_obs) + } + if (is.null(nprocs)) { + nprocs <- detectCores() + } + exp_work_piece_percent <- prod(dim_exp)/(prod(dim_obs) + + prod(dim_exp)) + obs_work_piece_percent <- prod(dim_obs)/(prod(dim_obs) + + prod(dim_exp)) + exp_work_pieces <- lapply(exp_work_pieces, function(x) c(x, + list(dataset_type = "exp", dims = dim_exp, out_pointer = pointer_var_exp))) + obs_work_pieces <- lapply(obs_work_pieces, function(x) c(x, + list(dataset_type = "obs", dims = dim_obs, out_pointer = pointer_var_obs))) + work_pieces <- c(exp_work_pieces, obs_work_pieces) + if (length(work_pieces)/nprocs >= 2 && !silent) { + if (length(work_pieces)/nprocs < 10) { + amount <- 100/ceiling(length(work_pieces)/nprocs) + reps <- ceiling(length(work_pieces)/nprocs) + } + else { + amount <- 10 + reps <- 10 + } + progress_steps <- rep(amount, reps) + if (length(exp_work_pieces) == 0) { + selected_exp_pieces <- c() + } + else if (length(exp_work_pieces) < floor(reps * + exp_work_piece_percent) + 1) { + selected_exp_pieces <- length(exp_work_pieces) + progress_steps <- c(sum(head(progress_steps, + floor(reps * exp_work_piece_percent))), tail(progress_steps, + ceiling(reps * obs_work_piece_percent))) + } + else { + selected_exp_pieces <- round(seq(1, length(exp_work_pieces), + length.out = floor(reps * exp_work_piece_percent) + + 1))[-1] + } + if (length(obs_work_pieces) == 0) { + selected_obs_pieces <- c() + } + else if (length(obs_work_pieces) < ceiling(reps * + obs_work_piece_percent) + 1) { + selected_obs_pieces <- length(obs_work_pieces) + progress_steps <- c(head(progress_steps, floor(reps * + exp_work_piece_percent)), sum(tail(progress_steps, + ceiling(reps * obs_work_piece_percent)))) + } + else { + selected_obs_pieces <- round(seq(1, length(obs_work_pieces), + length.out = ceiling(reps * obs_work_piece_percent) + + 1))[-1] + } + selected_pieces <- c(selected_exp_pieces, selected_obs_pieces + + length(exp_work_pieces)) + progress_steps <- paste0(" + ", round(progress_steps, + 2), "%") + progress_message <- "* Progress: 0%" + } + else { + progress_message <- "" + selected_pieces <- NULL + } + piece_counter <- 1 + step_counter <- 1 + work_pieces <- lapply(work_pieces, function(x) { + wp <- c(x, list(is_2d_var = is_2d_var, grid = grid, + remap = remap, lon_limits = c(lonmin, lonmax), + lat_limits = c(latmin, latmax), output = output, + remapcells = remapcells, single_dataset = single_dataset)) + if (piece_counter %in% selected_pieces) { + wp <- c(wp, list(progress_amount = progress_steps[step_counter])) + step_counter <<- step_counter + 1 + } + piece_counter <<- piece_counter + 1 + wp + }) + if (!silent) { + cat(paste("* Will now proceed to read and process ", + length(work_pieces), " data files:\n", sep = "")) + if (length(work_pieces) < 30) { + lapply(work_pieces, function(x) cat(paste("* ", + x[["filename"]], "\n", sep = ""))) + } + else { + cat(paste("* The list of files is long. You can check it after Load() finishes in the output '$source_files'.\n")) + } + if (length(dim_obs) == 0) { + bytes_obs <- 0 + obs_dim_sizes <- "0" + } + else { + bytes_obs <- prod(c(dim_obs, 8)) + obs_dim_sizes <- paste(na.omit(as.vector(dim_obs[c("dataset", + "member", "sdate", "time", "lat", "lon")])), + collapse = " x ") + } + if (length(dim_exp) == 0) { + bytes_exp <- 0 + exp_dim_sizes <- "0" + } + else { + bytes_exp <- prod(c(dim_exp, 8)) + exp_dim_sizes <- paste(na.omit(as.vector(dim_exp[c("dataset", + "member", "sdate", "time", "lat", "lon")])), + collapse = " x ") + } + cat(paste("* Total size of requested data: ", + bytes_obs + bytes_exp, "bytes.\n")) + cat(paste("* - Experimental data: (", exp_dim_sizes, + ") x 8 bytes =", bytes_exp, "bytes.\n")) + cat(paste("* - Observational data: (", obs_dim_sizes, + ") x 8 bytes =", bytes_obs, "bytes.\n")) + cat(paste("* If size of requested data is close to or above the free shared RAM memory, R will crash.\n")) + } + if (nprocs == 1) { + found_files <- lapply(work_pieces, .LoadDataFile, + silent = silent) + } + else { + cluster <- makeCluster(nprocs, outfile = "") + if (!silent) { + cat(paste("* Loading... This may take several minutes...\n", + sep = "")) + cat(progress_message) + } + work_errors <- try({ + found_files <- clusterApplyLB(cluster, work_pieces, + .LoadDataFile, silent = silent) + }) + stopCluster(cluster) + } + if (!silent) { + if (progress_message != "") { + cat("\n") + } + if (any(unlist(lapply(found_files, is.null)))) { + if (sum(unlist(lapply(found_files, is.null))) < + 30) { + cat("! WARNING: The following files were not found in the file system. Filling with NA values instead.\n") + lapply(work_pieces[which(unlist(lapply(found_files, + is.null)))], function(x) cat(paste("* ", + x[["filename"]], "\n", sep = ""))) + } + else { + cat("! WARNING: Some files were not found in the file system. The list is long. You can check it in the output '$not_found_files'. Filling with NA values instead.\n") + } + } + } + source_files <- unlist(found_files[which(!unlist(lapply(found_files, + is.null)))]) + not_found_files <- unlist(lapply(work_pieces[which(unlist(lapply(found_files, + is.null)))], "[[", "filename")) + } + else { + error_message <- "Error: No found files for any dataset. Check carefully the file patterns and correct either the pattern or the provided parameters:\n" + if (!is.null(exp)) { + lapply(exp, function(x) error_message <<- paste0(error_message, + paste0(x[["path"]], "\n"))) + } + if (!is.null(obs)) { + lapply(obs, function(x) error_message <<- paste0(error_message, + paste0(x[["path"]], "\n"))) + } + stop(error_message) + } + }) + if (class(errors) == "try-error") { + invisible(list(load_parameters = load_parameters)) + } + else { + variable <- list() + variable[["varName"]] <- var + variable[["level"]] <- NULL + attr(variable, "is_standard") <- FALSE + attr(variable, "units") <- units + attr(variable, "longname") <- var_long_name + attr(variable, "daily_agg_cellfun") <- "none" + attr(variable, "monthly_agg_cellfun") <- "none" + attr(variable, "verification_time") <- "none" + if (is.null(var_exp)) { + mod_data <- NULL + } + else { + dim_reorder <- length(dim_exp):1 + dim_reorder[2:3] <- dim_reorder[3:2] + old_dims <- dim_exp + dim_exp <- dim_exp[dim_reorder] + mod_data <- aperm(array(bigmemory::as.matrix(var_exp), + dim = old_dims), dim_reorder) + attr(mod_data, "dimensions") <- names(dim_exp) + } + if (is.null(var_obs)) { + obs_data <- NULL + } + else { + dim_reorder <- length(dim_obs):1 + dim_reorder[2:3] <- dim_reorder[3:2] + old_dims <- dim_obs + dim_obs <- dim_obs[dim_reorder] + obs_data <- aperm(array(bigmemory::as.matrix(var_obs), + dim = old_dims), dim_reorder) + attr(obs_data, "dimensions") <- names(dim_obs) + } + if (is.null(latitudes)) { + lat <- 0 + attr(lat, "cdo_grid_name") <- "none" + } + else { + lat <- latitudes + attr(lat, "cdo_grid_name") <- if (is.null(grid)) + "none" + else grid + } + attr(lat, "projection") <- "none" + if (is.null(longitudes)) { + lon <- 0 + attr(lon, "cdo_grid_name") <- "none" + } + else { + lon <- longitudes + attr(lon, "cdo_grid_name") <- if (is.null(grid)) + "none" + else grid + } + attr(lon, "projection") <- "none" + dates <- list() + dates[["start"]] <- NULL + dates[["end"]] <- NULL + models <- NULL + if (length(exp) > 0 && !is.null(dim_exp)) { + models <- list() + for (jmod in 1:length(exp)) { + models[[exp[[jmod]][["name"]]]] <- list(members = paste0("Member_", + 1:nmember[jmod]), source = if ((nchar(exp[[jmod]][["path"]]) - + nchar(gsub("/", "", exp[[jmod]][["path"]])) > + 2) && (length(sdates) > 1 && !is_file_per_member_exp[jmod])) { + parts <- strsplit(exp[[jmod]][["path"]], "/")[[1]] + paste(parts[-length(parts)], sep = "", collapse = "/") + } else { + exp[[jmod]][["path"]] + }) + } + } + observations <- NULL + if (length(obs) > 0 && !is.null(dim_obs)) { + observations <- list() + for (jobs in 1:length(obs)) { + observations[[obs[[jobs]][["name"]]]] <- list(members = paste0("Member_", + 1:nmemberobs[jobs]), source = if ((nchar(obs[[jobs]][["path"]]) - + nchar(gsub("/", "", obs[[jobs]][["path"]])) > + 2) && !is_file_per_dataset_obs[jobs]) { + parts <- strsplit(obs[[jobs]][["path"]], "/")[[1]] + paste(parts[-length(parts)], sep = "", collapse = "/") + } else { + obs[[jobs]][["path"]] + }) + } + } + invisible(list(mod = mod_data, obs = obs_data, lon = lon, + lat = lat, Variable = variable, Datasets = list(exp = models, + obs = observations), Dates = dates, InitializationDates = lapply(sdates, + function(x) { + sink("/dev/null") + date <- print(as.POSIXct(as.Date(x, format = "%Y%m%d"))) + sink() + date + }), when = Sys.time(), source_files = source_files, + not_found_files = not_found_files, load_parameters = load_parameters)) + } +} + +mergePDF <- function(..., file, gsversion = NULL, in.file = NULL) { + if (is.null(in.file)) { + in.file <- substitute(...()) + } + infiles <- paste(unlist(lapply(in.file, function(y) as.character(y))), + collapse = " ") + if (is.null(gsversion)) { + gsversion <- names(which(Sys.which(c("gs", "gswin32c", "gswin64c")) != "")) + if (length(gsversion) == 0) + stop("Please install Ghostscript and ensure it is in your PATH") + if (length(gsversion) > 1) + stop("More than one Ghostscript executable was found:", + paste(gsversion, collapse = " "), + ". Please specify which version should be used with the gsversion argument") + } + pre = " -dBATCH -dNOPAUSE -q -sDEVICE=pdfwrite -sOutputFile=" + system(paste(paste(gsversion, pre, file, sep = ""), infiles, collapse = " ")) +} diff --git a/Utils.R b/Utils.R new file mode 100644 index 0000000000000000000000000000000000000000..0e951f2c7a47e3d219952b02bdb6ee983ac64a91 --- /dev/null +++ b/Utils.R @@ -0,0 +1,909 @@ +## Function to tell if a regexpr() match is a complete match to a specified name +.IsFullMatch <- function(x, name) { + ifelse(x > 0 && attributes(x)$match.length == nchar(name), TRUE, FALSE) +} + +.ConfigReplaceVariablesInString <- function(string, replace_values, allow_undefined_key_vars = FALSE) { + # This function replaces all the occurrences of a variable in a string by + # their corresponding string stored in the replace_values. + if (length(strsplit(string, "\\$")[[1]]) > 1) { + parts <- strsplit(string, "\\$")[[1]] + output <- "" + i <- 0 + for (part in parts) { + if (i %% 2 == 0) { + output <- paste(output, part, sep = "") + } else { + if (part %in% names(replace_values)) { + output <- paste(output, .ConfigReplaceVariablesInString(replace_values[[part]], replace_values, allow_undefined_key_vars), sep = "") + } else if (allow_undefined_key_vars) { + output <- paste0(output, "$", part, "$") + } else { + stop(paste('Error: The variable $', part, '$ was not defined in the configuration file.', sep = '')) + } + } + i <- i + 1 + } + output + } else { + string + } +} + +.LoadDataFile <- function(work_piece, explore_dims = FALSE, silent = FALSE) { + # The purpose, working modes, inputs and outputs of this function are + # explained in ?LoadDataFile + #suppressPackageStartupMessages({library(ncdf4)}) + #suppressPackageStartupMessages({library(bigmemory)}) + #suppressPackageStartupMessages({library(plyr)}) + # Auxiliar function to convert array indices to lineal indices + arrayIndex2VectorIndex <- function(indices, dims) { + if (length(indices) > length(dims)) { + stop("Error: indices do not match dimensions in arrayIndex2VectorIndex.") + } + position <- 1 + dims <- rev(dims) + indices <- rev(indices) + for (i in 1:length(indices)) { + position <- position + (indices[i] - 1) * prod(dims[-c(1:i)]) + } + position + } + .t2nlatlon <- function(t) { + ## As seen in cdo's griddes.c: ntr2nlat() + nlats <- (t * 3 + 1) / 2 + if ((nlats > 0) && (nlats - trunc(nlats) >= 0.5)) { + nlats <- ceiling(nlats) + } else { + nlats <- round(nlats) + } + if (nlats %% 2 > 0) { + nlats <- nlats + 1 + } + ## As seen in cdo's griddes.c: compNlon(), and as specified in ECMWF + nlons <- 2 * nlats + keep_going <- TRUE + while (keep_going) { + n <- nlons + if (n %% 8 == 0) n <- trunc(n / 8) + while (n %% 6 == 0) n <- trunc(n / 6) + while (n %% 5 == 0) n <- trunc(n / 5) + while (n %% 4 == 0) n <- trunc(n / 4) + while (n %% 3 == 0) n <- trunc(n / 3) + if (n %% 2 == 0) n <- trunc(n / 2) + if (n <= 8) { + keep_going <- FALSE + } else { + nlons <- nlons + 2 + if (nlons > 9999) { + stop("Error: pick another gaussian grid truncation. It doesn't fulfill the standards to apply FFT.") + } + } + } + c(nlats, nlons) + } + + .nlat2t <- function(nlats) { + trunc((nlats * 2 - 1) / 3) + } + + found_file <- NULL + dims <- NULL + grid_name <- units <- var_long_name <- is_2d_var <- NULL + + filename <- work_piece[['filename']] + namevar <- work_piece[['namevar']] + output <- work_piece[['output']] + # The names of all data files in the directory of the repository that match + # the pattern are obtained. + if (length(grep("^http", filename)) > 0) { + is_url <- TRUE + files <- filename + ## TODO: Check that the user is not using shell globbing exps. + } else { + is_url <- FALSE + files <- Sys.glob(filename) + } + + # If we don't find any, we leave the flag 'found_file' with a NULL value. + if (length(files) > 0) { + # The first file that matches the pattern is chosen and read. + filename <- files[length(files)] + filein <- filename + found_file <- filename + mask <- work_piece[['mask']] + + if (!silent) { + if (explore_dims) { + cat(paste("* Exploring dimensions...", filename, '\n')) + } + ##} else { + ## cat(paste("* Reading & processing data...", filename, '\n')) + ##} + } + + # We will fill in 'expected_dims' with the names of the expected dimensions of + # the data array we'll retrieve from the file. + expected_dims <- NULL + remap_needed <- FALSE + # But first we open the file and work out whether the requested variable is 2d + fnc <- nc_open(filein) + if (!(namevar %in% names(fnc$var))) { + stop(paste("Error: The variable", namevar, "is not defined in the file", filename)) + } + var_long_name <- fnc$var[[namevar]]$longname + units <- fnc$var[[namevar]]$units + if (is.null(work_piece[['is_2d_var']])) { + is_2d_var <- all(c(work_piece[['dimnames']][['lon']], + work_piece[['dimnames']][['lat']]) %in% + unlist(lapply(fnc$var[[namevar]][['dim']], + '[[', 'name'))) + } else { + is_2d_var <- work_piece[['is_2d_var']] + } + if ((is_2d_var || work_piece[['is_file_per_dataset']]) && (Sys.which("cdo")[[1]] == "")) { + stop("Error: CDO libraries not available") + } + # If the variable to load is 2-d, we need to determine whether: + # - interpolation is needed + # - subsetting is requested + if (is_2d_var) { + ## We read the longitudes and latitudes from the file. + lon <- ncvar_get(fnc, work_piece[['dimnames']][['lon']]) + lat <- ncvar_get(fnc, work_piece[['dimnames']][['lat']]) + # If a common grid is requested or we are exploring the file dimensions + # we need to read the grid type and size of the file to finally work out the + # CDO grid name. + if (!is.null(work_piece[['grid']]) || explore_dims) { + # Here we read the grid type and its number of longitudes and latitudes + file_info <- system(paste('cdo -s griddes', filein, '2> /dev/null'), intern = TRUE) + grids_positions <- grep('# gridID', file_info) + grids_first_lines <- grids_positions + 2 + grids_last_lines <- c((grids_positions - 2)[-1], length(file_info)) + grids_info <- as.list(1:length(grids_positions)) + grids_info <- lapply(grids_info, function (x) file_info[grids_first_lines[x]:grids_last_lines[x]]) + grids_info <- lapply(grids_info, function (x) gsub(" *", " ", x)) + grids_info <- lapply(grids_info, function (x) gsub("^ | $", "", x)) + grids_info <- lapply(grids_info, function (x) unlist(strsplit(x, " | = "))) + grids_types <- unlist(lapply(grids_info, function (x) x[grep('gridtype', x) + 1])) + grids_matches <- unlist(lapply(grids_info, function (x) { + nlons <- if (length(grep('xsize', x)) > 0) { + as.integer(x[grep('xsize', x) + 1]) + } else { + NA + } + nlats <- if (length(grep('ysize', x)) > 0) { + as.integer(x[grep('ysize', x) + 1]) + } else { + NA + } + if (identical(nlons, length(lon)) && + identical(nlats, length(lat))) { + TRUE + } else { + FALSE + } + })) + grids_matches <- grids_matches[which(grids_types %in% c('gaussian', 'lonlat'))] + grids_info <- grids_info[which(grids_types %in% c('gaussian', 'lonlat'))] + grids_types <- grids_types[which(grids_types %in% c('gaussian', 'lonlat'))] + if (length(grids_matches) == 0) { + stop("Error: Only 'gaussian' and 'lonlat' grids supported. See e.g: cdo sinfo ", filename) + } + if (sum(grids_matches) > 1) { + if ((all(grids_types[which(grids_matches)] == 'gaussian') || + all(grids_types[which(grids_matches)] == 'lonlat')) && + all(unlist(lapply(grids_info[which(grids_matches)], identical, + grids_info[which(grids_matches)][[1]])))) { + grid_type <- grids_types[which(grids_matches)][1] + } else { + stop("Error: Load() can't disambiguate: More than one lonlat/gaussian grids with the same size as the requested variable defined in ", filename) + } + } else { + grid_type <- grids_types[which(grids_matches)] + } + grid_lons <- length(lon) + grid_lats <- length(lat) + # Convert to CDO grid name as seen in cdo's griddes.c: nlat2ntr() + if (grid_type == 'lonlat') { + grid_name <- paste0('r', grid_lons, 'x', grid_lats) + } else { + grid_name <- paste0('t', .nlat2t(grid_lats), 'grid') + } + } + # If a common grid is requested, we will also calculate its size which we will use + # later on. + if (!is.null(work_piece[['grid']])) { + # Now we calculate the common grid type and its lons and lats + if (length(grep('^t\\d{1,+}grid$', work_piece[['grid']])) > 0) { + common_grid_type <- 'gaussian' + common_grid_res <- as.integer(strsplit(work_piece[['grid']], '[^0-9]{1,+}')[[1]][2]) + nlonlat <- .t2nlatlon(common_grid_res) + common_grid_lats <- nlonlat[1] + common_grid_lons <- nlonlat[2] + } else if (length(grep('^r\\d{1,+}x\\d{1,+}$', work_piece[['grid']])) > 0) { + common_grid_type <- 'lonlat' + common_grid_lons <- as.integer(strsplit(work_piece[['grid']], '[^0-9]{1,+}')[[1]][2]) + common_grid_lats <- as.integer(strsplit(work_piece[['grid']], '[^0-9]{1,+}')[[1]][3]) + } else { + stop("Error: Only supported grid types in parameter 'grid' are tgrid and rx") + } + } else { + ## If no 'grid' is specified, there is no common grid. + ## But these variables are filled in for consistency in the code. + common_grid_lons <- length(lon) + common_grid_lats <- length(lat) + } + first_common_grid_lon <- 0 + last_common_grid_lon <- 360 - 360/common_grid_lons + ## This is not true for gaussian grids or for some regular grids, but + ## is a safe estimation + first_common_grid_lat <- -90 + last_common_grid_lat <- 90 + # And finally determine whether interpolation is needed or not + remove_shift <- FALSE + if (!is.null(work_piece[['grid']])) { + if ((grid_lons != common_grid_lons) || + (grid_lats != common_grid_lats) || + (grid_type != common_grid_type) || + ((lon[1] != first_common_grid_lon) + && !work_piece[['single_dataset']])) { + if (grid_lons == common_grid_lons && grid_lats == common_grid_lats && + grid_type == common_grid_type && lon[1] != first_common_grid_lon && + !work_piece[['single_dataset']]) { + remove_shift <- TRUE + } + remap_needed <- TRUE + common_grid_name <- work_piece[['grid']] + } + } else if ((lon[1] != first_common_grid_lon) && explore_dims && + !work_piece[['single_dataset']]) { + remap_needed <- TRUE + common_grid_name <- grid_name + remove_shift <- TRUE + } + if (remove_shift && !explore_dims) { + if (!is.null(work_piece[['progress_amount']])) { + cat("\n") + } + cat(paste0("! Warning: The dataset with index ", + tail(work_piece[['indices']], 1), " in '", + work_piece[['dataset_type']], "' doesn't start at longitude 0 and will be re-interpolated in order to align its longitudes with the standard CDO grids definable with the names 'tgrid' or 'rx', which are by definition starting at the longitude 0.\n")) + if (!is.null(mask)) { + cat(paste0("! Warning: A mask was provided for the dataset with index ", + tail(work_piece[['indices']], 1), " in '", + work_piece[['dataset_type']], "'. This dataset has been re-interpolated to align its longitudes to start at 0. You must re-interpolate the corresponding mask to align its longitudes to start at 0 as well, if you haven't done so yet. Running cdo remapcon,", common_grid_name, " original_mask_file.nc new_mask_file.nc will fix it.\n")) + } + } + # Now calculate if the user requests for a lonlat subset or for the + # entire field + lonmin <- work_piece[['lon_limits']][1] + lonmax <- work_piece[['lon_limits']][2] + latmin <- work_piece[['lat_limits']][1] + latmax <- work_piece[['lat_limits']][2] + lonlat_subsetting_requested <- FALSE + if (lonmin <= lonmax) { + if ((lonmin > first_common_grid_lon) || (lonmax < last_common_grid_lon)) { + lonlat_subsetting_requested <- TRUE + } + } else { + if ((lonmin - lonmax) > 360/common_grid_lons) { + lonlat_subsetting_requested <- TRUE + } else { + gap_width <- floor(lonmin / (360/common_grid_lons)) - + floor(lonmax / (360/common_grid_lons)) + if (gap_width > 0) { + if (!(gap_width == 1 && (lonmin %% (360/common_grid_lons) == 0) && + (lonmax %% (360/common_grid_lons) == 0))) { + lonlat_subsetting_requested <- TRUE + } + } + } + } + if ((latmin > first_common_grid_lat) || (latmax < last_common_grid_lat)) { + lonlat_subsetting_requested <- TRUE + } + + # When remap is needed but no subsetting, the file is copied locally + # so that cdo works faster, and then interpolated. + # Otherwise the file is kept as is and the subset will have to be + # interpolated still. + if (!lonlat_subsetting_requested && remap_needed) { + nc_close(fnc) + filecopy <- tempfile(pattern = "load", fileext = ".nc") + file.copy(filein, filecopy) + filein <- tempfile(pattern = "loadRegridded", fileext = ".nc") + system(paste0("cdo -s ", work_piece[['remap']], ",", + common_grid_name, + " -selname,", namevar, " ", filecopy, " ", filein, + " 2>/dev/null", sep = "")) + file.remove(filecopy) + work_piece[['dimnames']][['lon']] <- 'lon' + work_piece[['dimnames']][['lat']] <- 'lat' + fnc <- nc_open(filein) + lon <- ncvar_get(fnc, work_piece[['dimnames']][['lon']]) + lat <- ncvar_get(fnc, work_piece[['dimnames']][['lat']]) + } + + # Read and check also the mask + if (!is.null(mask)) { + ###mask_file <- tempfile(pattern = 'loadMask', fileext = '.nc') + if (is.list(mask)) { + if (!file.exists(mask[['path']])) { + stop(paste("Error: Couldn't find the mask file", mask[['path']])) + } + mask_file <- mask[['path']] + ###file.copy(work_piece[['mask']][['path']], mask_file) + fnc_mask <- nc_open(mask_file) + vars_in_mask <- sapply(fnc_mask$var, '[[', 'name') + if ('nc_var_name' %in% names(mask)) { + if (!(mask[['nc_var_name']] %in% + vars_in_mask)) { + stop(paste("Error: couldn't find variable", mask[['nc_var_name']], + "in the mask file", mask[['path']])) + } + } else { + if (length(vars_in_mask) != 1) { + stop(paste("Error: one and only one non-coordinate variable should be defined in the mask file", + mask[['path']], "if the component 'nc_var_name' is not specified. Currently found: ", + paste(vars_in_mask, collapse = ', '), ".")) + } else { + mask[['nc_var_name']] <- vars_in_mask + } + } + if (sum(fnc_mask$var[[mask[['nc_var_name']]]]$size > 1) != 2) { + stop(paste0("Error: the variable '", + mask[['nc_var_name']], + "' must be defined only over the dimensions '", + work_piece[['dimnames']][['lon']], "' and '", + work_piece[['dimnames']][['lat']], + "' in the mask file ", + mask[['path']])) + } + mask <- ncvar_get(fnc_mask, mask[['nc_var_name']], collapse_degen = TRUE) + nc_close(fnc_mask) + ### mask_lon <- ncvar_get(fnc_mask, work_piece[['dimnames']][['lon']]) + ### mask_lat <- ncvar_get(fnc_mask, work_piece[['dimnames']][['lat']]) + ###} else { + ### dim_longitudes <- ncdim_def(work_piece[['dimnames']][['lon']], "degrees_east", lon) + ### dim_latitudes <- ncdim_def(work_piece[['dimnames']][['lat']], "degrees_north", lat) + ### ncdf_var <- ncvar_def('LSM', "", list(dim_longitudes, dim_latitudes), NA, 'double') + ### fnc_mask <- nc_create(mask_file, list(ncdf_var)) + ### ncvar_put(fnc_mask, ncdf_var, work_piece[['mask']]) + ### nc_close(fnc_mask) + ### fnc_mask <- nc_open(mask_file) + ### work_piece[['mask']] <- list(path = mask_file, nc_var_name = 'LSM') + ### mask_lon <- lon + ### mask_lat <- lat + ###} + ###} + ### Now ready to check that the mask is right + ##if (!(lonlat_subsetting_requested && remap_needed)) { + ### if ((dim(mask)[2] != length(lon)) || (dim(mask)[1] != length(lat))) { + ### stop(paste("Error: the mask of the dataset with index ", tail(work_piece[['indices']], 1), " in '", work_piece[['dataset_type']], "' is wrong. It must be on the common grid if the selected output type is 'lonlat', 'lon' or 'lat', or 'areave' and 'grid' has been specified. It must be on the grid of the corresponding dataset if the selected output type is 'areave' and no 'grid' has been specified. For more information check ?Load and see help on parameters 'grid', 'maskmod' and 'maskobs'.", sep = "")) + ### } + ###if (!(identical(mask_lon, lon) && identical(mask_lat, lat))) { + ### stop(paste0("Error: the longitudes and latitudes in the masks must be identical to the ones in the corresponding data files if output = 'areave' or, if the selected output is 'lon', 'lat' or 'lonlat', the longitudes in the mask file must start by 0 and the latitudes must be ordered from highest to lowest. See\n ", + ### work_piece[['mask']][['path']], " and ", filein)) + ###} + } + } + + lon_indices <- 1:length(lon) + if (!(lonlat_subsetting_requested && remap_needed)) { + lon[which(lon < 0)] <- lon[which(lon < 0)] + 360 + } + if (lonmax >= lonmin) { + lon_indices <- lon_indices[which(((lon %% 360) >= lonmin) & ((lon %% 360) <= lonmax))] + } else if (!remap_needed) { + lon_indices <- lon_indices[which(((lon %% 360) <= lonmax) | ((lon %% 360) >= lonmin))] + } + lat_indices <- which(lat >= latmin & lat <= latmax) + ## In most of the cases the latitudes are ordered from -90 to 90. + ## We will reorder them to be in the order from 90 to -90, so mostly + ## always the latitudes are reordered. + ## TODO: This could be avoided in future. + if (lat[1] < lat[length(lat)]) { + lat_indices <- lat_indices[length(lat_indices):1] + } + if (!is.null(mask) && !(lonlat_subsetting_requested && remap_needed)) { + if ((dim(mask)[1] != length(lon)) || (dim(mask)[2] != length(lat))) { + stop(paste("Error: the mask of the dataset with index ", tail(work_piece[['indices']], 1), " in '", work_piece[['dataset_type']], "' is wrong. It must be on the common grid if the selected output type is 'lonlat', 'lon' or 'lat', or 'areave' and 'grid' has been specified. It must be on the grid of the corresponding dataset if the selected output type is 'areave' and no 'grid' has been specified. For more information check ?Load and see help on parameters 'grid', 'maskmod' and 'maskobs'.", sep = "")) + } + mask <- mask[lon_indices, lat_indices] + } + ## If the user requests subsetting, we must extend the lon and lat limits if possible + ## so that the interpolation after is done properly + maximum_extra_points <- work_piece[['remapcells']] + if (lonlat_subsetting_requested && remap_needed) { + if ((maximum_extra_points > (head(lon_indices, 1) - 1)) || + (maximum_extra_points > (length(lon) - tail(lon_indices, 1)))) { + ## if the requested number of points goes beyond the left or right + ## sides of the map, we need to take the entire map so that the + ## interpolation works properly + lon_indices <- 1:length(lon) + } else { + extra_points <- min(maximum_extra_points, head(lon_indices, 1) - 1) + if (extra_points > 0) { + lon_indices <- c((head(lon_indices, 1) - extra_points):(head(lon_indices, 1) - 1), lon_indices) + } + extra_points <- min(maximum_extra_points, length(lon) - tail(lon_indices, 1)) + if (extra_points > 0) { + lon_indices <- c(lon_indices, (tail(lon_indices, 1) + 1):(tail(lon_indices, 1) + extra_points)) + } + } + min_lat_ind <- min(lat_indices) + max_lat_ind <- max(lat_indices) + extra_points <- min(maximum_extra_points, min_lat_ind - 1) + if (extra_points > 0) { + if (lat[1] < tail(lat, 1)) { + lat_indices <- c(lat_indices, (min_lat_ind - 1):(min_lat_ind - extra_points)) + } else { + lat_indices <- c((min_lat_ind - extra_points):(min_lat_ind - 1), lat_indices) + } + } + extra_points <- min(maximum_extra_points, length(lat) - max_lat_ind) + if (extra_points > 0) { + if (lat[1] < tail(lat, 1)) { + lat_indices <- c((max_lat_ind + extra_points):(max_lat_ind + 1), lat_indices) + } else { + lat_indices <- c(lat_indices, (max_lat_ind + 1):(max_lat_ind + extra_points)) + } + } + } + lon <- lon[lon_indices] + lat <- lat[lat_indices] + expected_dims <- c(work_piece[['dimnames']][['lon']], + work_piece[['dimnames']][['lat']]) + } else { + lon <- 0 + lat <- 0 + } + # We keep on filling the expected dimensions + var_dimnames <- unlist(lapply(fnc$var[[namevar]][['dim']], '[[', 'name')) + nmemb <- nltime <- NULL + ## Sometimes CDO renames 'members' dimension to 'lev' + old_members_dimname <- NULL + if (('lev' %in% var_dimnames) && !(work_piece[['dimnames']][['member']] %in% var_dimnames)) { + old_members_dimname <- work_piece[['dimnames']][['member']] + work_piece[['dimnames']][['member']] <- 'lev' + } + if (work_piece[['dimnames']][['member']] %in% var_dimnames) { + nmemb <- fnc$var[[namevar]][['dim']][[match(work_piece[['dimnames']][['member']], var_dimnames)]]$len + expected_dims <- c(expected_dims, work_piece[['dimnames']][['member']]) + } else { + nmemb <- 1 + } + if (length(expected_dims) > 0) { + dim_matches <- match(expected_dims, var_dimnames) + if (any(is.na(dim_matches))) { + if (!is.null(old_members_dimname)) { + expected_dims[which(expected_dims == 'lev')] <- old_members_dimname + } + stop(paste("Error: the expected dimension(s)", + paste(expected_dims[which(is.na(dim_matches))], collapse = ', '), + "were not found in", filename)) + } + time_dimname <- var_dimnames[-dim_matches] + } else { + time_dimname <- var_dimnames + } + if (length(time_dimname) > 0) { + if (length(time_dimname) == 1) { + nltime <- fnc$var[[namevar]][['dim']][[match(time_dimname, var_dimnames)]]$len + expected_dims <- c(expected_dims, time_dimname) + dim_matches <- match(expected_dims, var_dimnames) + } else { + if (!is.null(old_members_dimname)) { + expected_dims[which(expected_dims == 'lev')] <- old_members_dimname + } + stop(paste("Error: the variable", namevar, + "is defined over more dimensions than the expected (", + paste(c(expected_dims, 'time'), collapse = ', '), + "). It could also be that the members dimension in 'dimnames' or in the configuration file is incorrect. If not, it could also be that the members dimension is named incorrectly. In that case, either rename the dimension in the file or adjust Load() to recognize this name with the parameter 'dimnames'. See file", filename)) + } + } else { + nltime <- 1 + } + + # Now we must retrieve the data from the file, but only the asked indices. + # So we build up the indices to retrieve. + # Longitudes or latitudes have been retrieved already. + if (explore_dims) { + # If we're exploring the file we only want one time step from one member, + # to regrid it and work out the number of longitudes and latitudes. + # We don't need more. + members <- 1 + ltimes_list <- list(c(1)) + } else { + # The data is arranged in the array 'tmp' with the dimensions in a + # common order: + # 1) Longitudes + # 2) Latitudes + # 3) Members (even if is not a file per member experiment) + # 4) Lead-times + if (work_piece[['is_file_per_dataset']]) { + time_indices <- 1:nltime + mons <- strsplit(system(paste('cdo showmon ', filein, + ' 2>/dev/null'), intern = TRUE), split = ' ') + years <- strsplit(system(paste('cdo showyear ', filein, + ' 2>/dev/null'), intern = TRUE), split = ' ') + mons <- as.integer(mons[[1]][which(mons[[1]] != "")]) + years <- as.integer(years[[1]][which(years[[1]] != "")]) + time_indices <- ts(time_indices, start = c(years[1], mons[1]), + end = c(years[length(years)], mons[length(mons)]), + frequency = 12) + ltimes_list <- list() + for (sdate in work_piece[['startdates']]) { + selected_time_indices <- window(time_indices, start = c(as.integer( + substr(sdate, 1, 4)), as.integer(substr(sdate, 5, 6))), + end = c(3000, 12), frequency = 12, extend = TRUE) + selected_time_indices <- selected_time_indices[work_piece[['leadtimes']]] + ltimes_list <- c(ltimes_list, list(selected_time_indices)) + } + } else { + ltimes <- work_piece[['leadtimes']] + #if (work_piece[['dataset_type']] == 'exp') { + ltimes_list <- list(ltimes[which(ltimes <= nltime)]) + #} + } + ## TODO: Put, when reading matrices, this kind of warnings + # if (nmember < nmemb) { + # cat("Warning: + members <- 1:work_piece[['nmember']] + members <- members[which(members <= nmemb)] + } + + # Now, for each list of leadtimes to load (usually only one list with all leadtimes), + # we'll join the indices and retrieve data + found_disordered_dims <- FALSE + for (ltimes in ltimes_list) { + if (is_2d_var) { + start <- c(min(lon_indices), min(lat_indices)) + end <- c(max(lon_indices), max(lat_indices)) + if (lonlat_subsetting_requested && remap_needed) { + subset_indices <- list(min(lon_indices):max(lon_indices) - min(lon_indices) + 1, + lat_indices - min(lat_indices) + 1) + dim_longitudes <- ncdim_def(work_piece[['dimnames']][['lon']], "degrees_east", lon) + dim_latitudes <- ncdim_def(work_piece[['dimnames']][['lat']], "degrees_north", lat) + ncdf_dims <- list(dim_longitudes, dim_latitudes) + } else { + subset_indices <- list(lon_indices - min(lon_indices) + 1, + lat_indices - min(lat_indices) + 1) + ncdf_dims <- list() + } + final_dims <- c(length(subset_indices[[1]]), length(subset_indices[[2]]), 1, 1) + } else { + start <- end <- c() + subset_indices <- list() + ncdf_dims <- list() + final_dims <- c(1, 1, 1, 1) + } + + if (work_piece[['dimnames']][['member']] %in% expected_dims) { + start <- c(start, head(members, 1)) + end <- c(end, tail(members, 1)) + subset_indices <- c(subset_indices, list(members - head(members, 1) + 1)) + dim_members <- ncdim_def(work_piece[['dimnames']][['member']], "", members) + ncdf_dims <- c(ncdf_dims, list(dim_members)) + final_dims[3] <- length(members) + } + if (time_dimname %in% expected_dims) { + if (any(!is.na(ltimes))) { + start <- c(start, head(ltimes[which(!is.na(ltimes))], 1)) + end <- c(end, tail(ltimes[which(!is.na(ltimes))], 1)) + subset_indices <- c(subset_indices, list(ltimes - head(ltimes[which(!is.na(ltimes))], 1) + 1)) + } else { + start <- c(start, NA) + end <- c(end, NA) + subset_indices <- c(subset_indices, list(ltimes)) + } + dim_time <- ncdim_def(time_dimname, "", 1:length(ltimes), unlim = TRUE) + ncdf_dims <- c(ncdf_dims, list(dim_time)) + final_dims[4] <- length(ltimes) + } + count <- end - start + 1 + start <- start[dim_matches] + count <- count[dim_matches] + subset_indices <- subset_indices[dim_matches] + # Now that we have the indices to retrieve, we retrieve the data + if (prod(final_dims) > 0) { + tmp <- take(ncvar_get(fnc, namevar, start, count, + collapse_degen = FALSE), + 1:length(subset_indices), subset_indices) + # The data is regridded if it corresponds to an atmospheric variable. When + # the chosen output type is 'areave' the data is not regridded to not + # waste computing time unless the user specified a common grid. + if (is_2d_var) { + ###if (!is.null(work_piece[['mask']]) && !(lonlat_subsetting_requested && remap_needed)) { + ### mask <- take(ncvar_get(fnc_mask, work_piece[['mask']][['nc_var_name']], + ### start[dim_matches[1:2]], count[dim_matches[1:2]], + ### collapse_degen = FALSE), 1:2, subset_indices[dim_matches[1:2]]) + ###} + if (lonlat_subsetting_requested && remap_needed) { + filein <- tempfile(pattern = "loadRegridded", fileext = ".nc") + filein2 <- tempfile(pattern = "loadRegridded2", fileext = ".nc") + ncdf_var <- ncvar_def(namevar, "", ncdf_dims[dim_matches], + fnc$var[[namevar]]$missval, + prec = if (fnc$var[[namevar]]$prec == 'int') { + 'integer' + } else { + fnc$var[[namevar]]$prec + }) + nc_close(fnc) + fnc <- nc_create(filein2, list(ncdf_var)) + ncvar_put(fnc, ncdf_var, tmp) + nc_close(fnc) + system(paste0("cdo -s -sellonlatbox,", if (lonmin > lonmax) { + "0,360," + } else { + paste0(lonmin, ",", lonmax, ",") + }, latmin, ",", latmax, + " -", work_piece[['remap']], ",", common_grid_name, + " ", filein2, " ", filein, " 2>/dev/null", sep = "")) + file.remove(filein2) + fnc <- nc_open(filein) + lon <- ncvar_get(fnc, 'lon') + lat <- ncvar_get(fnc, 'lat') + ## We read the longitudes and latitudes from the file. + ## In principle cdo should put in order the longitudes + ## and slice them properly unless data is across greenwich + lon[which(lon < 0)] <- lon[which(lon < 0)] + 360 + lon_indices <- 1:length(lon) + if (lonmax < lonmin) { + lon_indices <- lon_indices[which((lon <= lonmax) | (lon >= lonmin))] + } + lat_indices <- 1:length(lat) + ## In principle cdo should put in order the latitudes + if (lat[1] < lat[length(lat)]) { + lat_indices <- length(lat):1 + } + final_dims[c(1, 2)] <- c(length(lon_indices), length(lat_indices)) + subset_indices[[dim_matches[1]]] <- lon_indices + subset_indices[[dim_matches[2]]] <- lat_indices + + tmp <- take(ncvar_get(fnc, namevar, collapse_degen = FALSE), + 1:length(subset_indices), subset_indices) + + if (!is.null(mask)) { + ## We create a very simple 2d netcdf file that is then interpolated to the common + ## grid to know what are the lons and lats of our slice of data + mask_file <- tempfile(pattern = 'loadMask', fileext = '.nc') + mask_file_remap <- tempfile(pattern = 'loadMask', fileext = '.nc') + dim_longitudes <- ncdim_def(work_piece[['dimnames']][['lon']], "degrees_east", c(0, 360)) + dim_latitudes <- ncdim_def(work_piece[['dimnames']][['lat']], "degrees_north", c(-90, 90)) + ncdf_var <- ncvar_def('LSM', "", list(dim_longitudes, dim_latitudes), NA, 'double') + fnc_mask <- nc_create(mask_file, list(ncdf_var)) + ncvar_put(fnc_mask, ncdf_var, array(rep(0, 4), dim = c(2, 2))) + nc_close(fnc_mask) + system(paste0("cdo -s ", work_piece[['remap']], ",", common_grid_name, + " ", mask_file, " ", mask_file_remap, " 2>/dev/null", sep = "")) + fnc_mask <- nc_open(mask_file_remap) + mask_lons <- ncvar_get(fnc_mask, 'lon') + mask_lats <- ncvar_get(fnc_mask, 'lat') + nc_close(fnc_mask) + file.remove(mask_file, mask_file_remap) + if ((dim(mask)[1] != common_grid_lons) || (dim(mask)[2] != common_grid_lats)) { + stop(paste("Error: the mask of the dataset with index ", tail(work_piece[['indices']], 1), " in '", work_piece[['dataset_type']], "' is wrong. It must be on the common grid if the selected output type is 'lonlat', 'lon' or 'lat', or 'areave' and 'grid' has been specified. It must be on the grid of the corresponding dataset if the selected output type is 'areave' and no 'grid' has been specified. For more information check ?Load and see help on parameters 'grid', 'maskmod' and 'maskobs'.", sep = "")) + } + mask_lons[which(mask_lons < 0)] <- mask_lons[which(mask_lons < 0)] + 360 + if (lonmax >= lonmin) { + mask_lon_indices <- which((mask_lons >= lonmin) & (mask_lons <= lonmax)) + } else { + mask_lon_indices <- which((mask_lons >= lonmin) | (mask_lons <= lonmax)) + } + mask_lat_indices <- which((mask_lats >= latmin) & (mask_lats <= latmax)) + if (lat[1] < lat[length(lat)]) { + mask_lat_indices <- mask_lat_indices[length(mask_lat_indices):1] + } + mask <- mask[mask_lon_indices, mask_lat_indices] + } + lon <- lon[lon_indices] + lat <- lat[lat_indices] + ### nc_close(fnc_mask) + ### system(paste0("cdo -s -sellonlatbox,", if (lonmin > lonmax) { + ### "0,360," + ### } else { + ### paste0(lonmin, ",", lonmax, ",") + ### }, latmin, ",", latmax, + ### " -", work_piece[['remap']], ",", common_grid_name, + ###This is wrong: same files + ### " ", mask_file, " ", mask_file, " 2>/dev/null", sep = "")) + ### fnc_mask <- nc_open(mask_file) + ### mask <- take(ncvar_get(fnc_mask, work_piece[['mask']][['nc_var_name']], + ### collapse_degen = FALSE), 1:2, subset_indices[dim_matches[1:2]]) + ###} + } + } + if (!all(dim_matches == sort(dim_matches))) { + if (!found_disordered_dims && rev(work_piece[['indices']])[2] == 1 && rev(work_piece[['indices']])[3] == 1) { + found_disordered_dims <- TRUE + cat(paste0("! Warning: the dimensions for the variable ", namevar, " in the files of the experiment with index ", tail(work_piece[['indices']], 1), " are not in the optimal order for loading with Load(). The optimal order would be '", paste(expected_dims, collapse = ', '), "'. One of the files of the dataset is stored in ", filename)) + } + tmp <- aperm(tmp, dim_matches) + } + dim(tmp) <- final_dims + # If we are exploring the file we don't need to process and arrange + # the retrieved data. We only need to keep the dimension sizes. + if (explore_dims) { + if (work_piece[['is_file_per_member']]) { + ## TODO: When the exp_full_path contains asterisks and is file_per_member + ## members from different datasets may be accounted. + ## Also if one file member is missing the accounting will be wrong. + ## Should parse the file name and extract number of members. + if (is_url) { + nmemb <- NULL + } else { + nmemb <- length(files) + } + } + dims <- list(member = nmemb, time = nltime, lon = lon, lat = lat) + } else { + # If we are not exploring, then we have to process the retrieved data + if (is_2d_var) { + tmp <- apply(tmp, c(3, 4), function(x) { + # Disable of large values. + if (!is.na(work_piece[['var_limits']][2])) { + x[which(x > work_piece[['var_limits']][2])] <- NA + } + if (!is.na(work_piece[['var_limits']][1])) { + x[which(x < work_piece[['var_limits']][1])] <- NA + } + if (!is.null(mask)) { + x[which(mask < 0.5)] <- NA + } + + if (output == 'areave' || output == 'lon') { + weights <- InsertDim(cos(lat * pi / 180), 1, length(lon)) + weights[which(is.na(x))] <- NA + if (output == 'areave') { + weights <- weights / mean(weights, na.rm = TRUE) + mean(x * weights, na.rm = TRUE) + } else { + weights <- weights / InsertDim(Mean1Dim(weights, 2, narm = TRUE), 2, length(lat)) + Mean1Dim(x * weights, 2, narm = TRUE) + } + } else if (output == 'lat') { + Mean1Dim(x, 1, narm = TRUE) + } else if (output == 'lonlat') { + signif(x, 5) + } + }) + if (output == 'areave') { + dim(tmp) <- c(1, 1, final_dims[3:4]) + } else if (output == 'lon') { + dim(tmp) <- c(final_dims[1], 1, final_dims[3:4]) + } else if (output == 'lat') { + dim(tmp) <- c(1, final_dims[c(2, 3, 4)]) + } else if (output == 'lonlat') { + dim(tmp) <- final_dims + } + } + var_data <- attach.big.matrix(work_piece[['out_pointer']]) + if (work_piece[['dims']][['member']] > 1 && nmemb > 1 && + work_piece[['dims']][['time']] > 1 && + nltime < work_piece[['dims']][['time']]) { + work_piece[['indices']][2] <- work_piece[['indices']][2] - 1 + for (jmemb in members) { + work_piece[['indices']][2] <- work_piece[['indices']][2] + 1 + out_position <- arrayIndex2VectorIndex(work_piece[['indices']], work_piece[['dims']]) + out_indices <- out_position:(out_position + length(tmp[, , jmemb, ]) - 1) + var_data[out_indices] <- as.vector(tmp[, , jmemb, ]) + } + work_piece[['indices']][2] <- work_piece[['indices']][2] - tail(members, 1) + 1 + } else { + out_position <- arrayIndex2VectorIndex(work_piece[['indices']], work_piece[['dims']]) + out_indices <- out_position:(out_position + length(tmp) - 1) + a <- aperm(tmp, c(1, 2, 4, 3)) + as.vector(a) + var_data[out_indices] <- as.vector(aperm(tmp, c(1, 2, 4, 3))) + } + work_piece[['indices']][3] <- work_piece[['indices']][3] + 1 + } + } + } + nc_close(fnc) + if (is_2d_var && remap_needed) { + file.remove(filein) + ###if (!is.null(mask) && lonlat_subsetting_requested) { + ### file.remove(mask_file) + ###} + } + + } + if (explore_dims) { + list(dims = dims, is_2d_var = is_2d_var, grid = grid_name, units = units, + var_long_name = var_long_name) + } else { + ###if (!silent && !is.null(progress_connection) && !is.null(work_piece[['progress_amount']])) { + ### foobar <- writeBin(work_piece[['progress_amount']], progress_connection) + ###} + if (!silent && !is.null(work_piece[['progress_amount']])) { + cat(paste0(work_piece[['progress_amount']])) + } + found_file + } +} + +.LoadSampleData <- function(var, exp = NULL, obs = NULL, sdates, + nmember = NULL, nmemberobs = NULL, + nleadtime = NULL, leadtimemin = 1, + leadtimemax = NULL, storefreq = 'monthly', + sampleperiod = 1, lonmin = 0, lonmax = 360, + latmin = -90, latmax = 90, output = 'areave', + method = 'conservative', grid = NULL, + maskmod = vector("list", 15), + maskobs = vector("list", 15), + configfile = NULL, suffixexp = NULL, + suffixobs = NULL, varmin = NULL, varmax = NULL, + silent = FALSE, nprocs = NULL) { + ## This function loads and selects sample data stored in sampleMap and + ## sampleTimeSeries and is used in the examples instead of Load() so as + ## to avoid nco and cdo system calls and computation time in the stage + ## of running examples in the CHECK process on CRAN. + selected_start_dates <- match(sdates, c('19851101', '19901101', '19951101', + '20001101', '20051101')) + start_dates_position <- 3 + lead_times_position <- 4 + + if (output == 'lonlat') { + sampleData <- s2dverification::sampleMap + if (is.null(leadtimemax)) { + leadtimemax <- dim(sampleData$mod)[lead_times_position] + } + selected_lead_times <- leadtimemin:leadtimemax + + dataOut <- sampleData + dataOut$mod <- sampleData$mod[, , selected_start_dates, selected_lead_times, , ] + dataOut$obs <- sampleData$obs[, , selected_start_dates, selected_lead_times, , ] + } + else if (output == 'areave') { + sampleData <- s2dverification::sampleTimeSeries + if (is.null(leadtimemax)) { + leadtimemax <- dim(sampleData$mod)[lead_times_position] + } + selected_lead_times <- leadtimemin:leadtimemax + + dataOut <- sampleData + dataOut$mod <- sampleData$mod[, , selected_start_dates, selected_lead_times] + dataOut$obs <- sampleData$obs[, , selected_start_dates, selected_lead_times] + } + + dims_out <- dim(sampleData$mod) + dims_out[start_dates_position] <- length(selected_start_dates) + dims_out[lead_times_position] <- length(selected_lead_times) + dim(dataOut$mod) <- dims_out + + dims_out <- dim(sampleData$obs) + dims_out[start_dates_position] <- length(selected_start_dates) + dims_out[lead_times_position] <- length(selected_lead_times) + dim(dataOut$obs) <- dims_out + + invisible(list(mod = dataOut$mod, obs = dataOut$obs, + lat = dataOut$lat, lon = dataOut$lon)) +} + +.ConfigGetDatasetInfo <- function(matching_entries, table_name) { + # This function obtains the information of a dataset and variable pair, + # applying all the entries that match in the configuration file. + if (table_name == 'experiments') { + id <- 'EXP' + } else { + id <- 'OBS' + } + defaults <- c(paste0('$DEFAULT_', id, '_MAIN_PATH$'), paste0('$DEFAULT_', id, '_FILE_PATH$'), '$DEFAULT_NC_VAR_NAME$', '$DEFAULT_SUFFIX$', '$DEFAULT_VAR_MIN$', '$DEFAULT_VAR_MAX$') + info <- NULL + + for (entry in matching_entries) { + if (is.null(info)) { + info <- entry[-1:-2] + info[which(info == '*')] <- defaults[which(info == '*')] + } else { + info[which(entry[-1:-2] != '*')] <- entry[-1:-2][which(entry[-1:-2] != '*')] + } + } + + info <- as.list(info) + names(info) <- c('main_path', 'file_path', 'nc_var_name', 'suffix', 'var_min', 'var_max') + info +} diff --git a/WT_drivers_v10.R b/WT_drivers_v10.R new file mode 100644 index 0000000000000000000000000000000000000000..2fbe250ee77626c2af66c4b4f93ccbbec0c7e806 --- /dev/null +++ b/WT_drivers_v10.R @@ -0,0 +1,817 @@ +########################################################################################## +# Relationship between WTs and a chosen climate variable # +########################################################################################## + +rm(list=ls()) +library(s2dverification) # for the function Load() +library(abind) # for funcion abind() +source('/home/Earth/ncortesi/scripts/Rfunctions.R') # for the calendar functions + +# directory where to find the WT classification +workdir <- "/scratch/Earth/ncortesi/RESILIENCE/WT" + +# reanalysis used for daily var data (must be the same as the mslp data): +var.rean <- list(path = '/esnas/recon/ecmwf/erainterim/$STORE_FREQ$_mean/$VAR_NAME$_f6h/$VAR_NAME$_$YEAR$$MONTH$.nc') +#var.rean <- list(path = '/esnas/recon/noaa/ncep-reanalysis/$STORE_FREQ$_mean/$VAR_NAME$_f6h/$VAR_NAME$_$YEAR$$MONTH$.nc') +rean.name <- 'erai' #'ncep' #'erai' + +# any daily variable: +var.name='tas' #'sfcWind' #'prlr' +#var.name.file='Temperature' #'10-m Wind Speed' #'Precipitation' #'Surface_Temperature' # '10-m_Wind_Speed' # variable name used in the ouput map's title + +year.start <- 1985 +year.end <- 2014 + +periods=1:17 # identify monthly (from 1=Jan to 12=Dec) , seasonal (from 13=winter to 16=Autumn) and yearly=17 values + +LOESS <- FALSE # LOESS climatology filter on/off (if off, a 5-days mobile windows is usedto measure daily anomalies) + +rm.lat <- c(-12,12) # in case of WTs, remove these lat values from the maps + +########################################################################################## +n.periods <- length(periods) +n.years <- year.end - year.start + 1 + +my.brks <- my.cols <- my.labels <- my.unit <- list() +my.brks.freq <- c(0,0.05,seq(0.1,0.7,0.1),1) # Frequency of a WT or WD +my.cols.freq <- colorRampPalette(c('#f7fbff','#deebf7','#c6dbef','#9ecae1','#6baed6','#4292c6','#2171b5','#08519c','#08306b'))(length(my.brks.freq)-1) +my.unit.freq <- "%" + +if(var.name == 'tas') var.num <- 1 +if(var.name == 'sfcWind') var.num <- 2 +if(var.name == 'prlr') var.num <- 3 + +WTs.type <- c("NE","E","SE","S","SW","W","NW","N","C","C.NE","C.E","C.SE","C.S","C.SW","C.W","C.NW","C.N","A","A.NE","A.E","A.SE","A.S","A.SW","A.W","A.NW","A.N") +WTs.type10<-c("NE","E","SE","S","SW","W","NW","N","C","A") +WTs.type10.name<-c("Northeasterly (NE)","Easterly (E)","Southeasterly (SE)","Southerly (S)","Southwesterly (SW)","Westerly (W)","Northwesterly (NW)","Northerly (N)","Cyclonic (C)","Anticyclonic (A)") +wd.dir <- c("N","NE","E","SE","S","SW","W","NW") +n.wd <- length(wd.dir) + +# load one day of var data only to detect lat and lon values: +var <- Load(var.name, NULL, list(var.rean), paste0(year.start,'0101'), storefreq = 'daily', leadtimemax=1, output = 'lonlat', nprocs=1) +var.lat <- var$lat # var lat and lon MUST be the same of WT classification, even if the latter can have NA for certain lat values. +var.lon <- var$lon +n.lat.var <- length(var.lat) +n.lon.var <- length(var.lon) +n.lonr <- n.lon.var - ceiling(length(var.lon[var.lon>0 & var.lon < 180])) # count the num of lon values between 180 and 360 degrees +lon.swapped <- c((n.lonr+1):n.lon.var,1:n.lonr) # lon is always positive but rearranged +var.lon2 <- c(var.lon[c((n.lonr+1):n.lon.var)]-360,var.lon[1:n.lonr]) # to have lon values from -180 to 180 + +l1 <- which(var.lat > rm.lat[1]) # lat values greater than -12 +l2 <- which(var.lat < rm.lat[2]) # lat values lower than 12 +pos.lat.unused <- which(!is.na(match(l1,l2))) +var.lat.unused <- var.lat[pos.lat.unused] # latitude values not visualized in the WT classification because too close to equator + +########################################################################################## + +# save var anomalies: + +# load WT metadata to get the info on year.start and year.end +#WTs_files <- list.files(paste0(workdir,"/txt/ERAint/all_years")) +#WTs_file1 <- read.table(file=paste0(workdir,"/txt/ERAint/all_years/",WTs_files[1]), header=TRUE) +#load(paste0(workdir,"/txt/ERAint/metadata.RData")) + +# load daily var data for all years: +var <- Load(var.name, NULL, list(var.rean), paste0(year.start:year.end,'0101'), storefreq = 'daily', leadtimemax=366, output = 'lonlat', nprocs=8) + +# remove bisestile days from var to have all arrays with the same dimensions: +cat("Removing bisestile days. Please wait...\n") +var365 <- var$obs[,,,1:365,,,drop=FALSE] + +for(y in year.start:year.end){ + y2 <- y - year.start + 1 + if(n.days.in.a.year(y) == 366) var365[,,y2,60:365,,] <- var$obs[,,y2,61:366,,] # take the march to december period removing the 29th of February +} + +rm(var) +gc() + +# convert var to daily anomalies: +cat("Calculating anomalies. Please wait...\n") + +if(LOESS == TRUE){ + var365ClimDaily <- apply(var365, c(1,2,4,5,6), mean, na.rm=T) + var365ClimLoess <- var365ClimDaily + + for(i in 1:n.lat.var){ + for(j in 1:n.lon.var){ + my.data <- data.frame(ens.mean=var365ClimDaily[1,1,,i,j], day=1:365) + my.loess <- loess(ens.mean ~ day, my.data, span=0.35) + var365ClimLoess[1,1,,i,j] <- predict(my.loess) + } + } + + rm(var365ClimDaily, my.data, my.loess) + gc() + + var365Clim <- InsertDim(var365ClimLoess, 3, year.end-year.start+1) + + rm(var365ClimLoess) + gc() + +} else { # apply a 5-days mobile window: + + #var365Clim <- InsertDim(var365ClimDaily, 3, n.years) + var365Clim <- array(NA,c(365,dim(var365)[5:6])) + + var365x3 <- abind(var365[,,,364:365,,,drop=FALSE],var365[,,,,,,drop=FALSE],var365[,,,1:2,,,drop=FALSE],along=4) # add the two days before 1st january and after 31 dec + + for(d in 1:365){ + window <- var365x3[1,1,,2+d+seq(-2,2),,,drop=FALSE] + var365Clim[d,,] <- apply(window,c(5,6),mean,na.rm=TRUE) + } + + rm(var365x3, window) + gc() + + var365Clim <- InsertDim(var365Clim, 1, n.years) + +} + + +var365Anom <- var365[1,1,,,,] - var365Clim + +rm(var365Clim) +gc() + +rm(var365) +gc() + +# save var anomalies for retreiving them when necessary: +save(var365Anom, file=paste0(workdir,"/",rean.name,"_",var.name,"365Anom.RData")) + + + + +if(LOESS == 'test'){ + +# Compute wind directions: +uas <- Load(var = 'uas', exp = NULL, obs = list(var.rean), paste0(year.start:year.end,'0101'), storefreq = 'daily', leadtimemax=366, output = 'lonlat', nprocs=8)$obs +vas <- Load(var = 'vas', exp = NULL, obs = list(var.rean), paste0(year.start:year.end,'0101'), storefreq = 'daily', leadtimemax=366, output = 'lonlat', nprocs=8)$obs + +windir <- (180/pi)*atan2(uas,vas) + 180 # wind direction in degrees; 90 is necessary to shift from trigonometric system to cardinal system and +180 to shift + # from the direction wind is blowing to the direction wind is coming from +rm(uas, vas) +gc() + +# remove bisestile days from windir, to compare it with var, which has no bisestiles: +cat("Removing bisestile days. Please wait...\n") +windir365 <- windir[,,,1:365,,,drop=FALSE] + +for(y in year.start:year.end){ + y2 <- y - year.start + 1 + if(n.days.in.a.year(y) == 366) windir365[,,y2,60:365,,] <- windir[,,y2,61:366,,] # take the march to december period removing the 29th of February +} + + +rm(windir) +gc() + +dir2 <- which(windir365 > 22.5) +dir3 <- which(windir365 > 22.5 + 45) +dir4 <- which(windir365 > 22.5 + 90) +dir5 <- which(windir365 > 22.5 + 135) +dir6 <- which(windir365 > 22.5 + 180) +dir7 <- which(windir365 > 22.5 + 225) +dir8 <- which(windir365 > 22.5 + 270) +dir1 <- which(windir365 > 22.5 + 315) + +windirClass <- array(1, dim(windir365)) # N +windirClass[dir2] <- 2 # NE +windirClass[dir3] <- 3 # E +windirClass[dir4] <- 4 # SE +windirClass[dir5] <- 5 # S +windirClass[dir6] <- 6 # SW +windirClass[dir7] <- 7 # W +windirClass[dir8] <- 8 # NW +windirClass[dir1] <- 1 # N + +rm(windir365) +rm(dir1,dir2,dir3,dir4,dir5,dir6,dir7,dir8) +gc() + +# save it once to retreive it later: +save(windirClass, file=paste0(workdir,"/",rean.name,"_windirClass.RData")) + + + + + +# Plot frequency maps: + +load(paste0(workdir,"/",rean.name,"_windirClass.RData")) + +windirFreqInter <- array(NA, c(n.wd, length(periods), n.years, n.lat.var, n.lon.var)) # array where to store the interannual frequencies + +dir.create(paste0(workdir,"/windir")) + +# save wind direction mean frequency maps: +for(p in periods){ + # select wind direction data only inside period p: + windirPeriod <- windirClass[1,1,,pos.period(1,p),,] + + for(wd in 1:8){ + + # remove from windPeriod the days not belonging to that wd (setting its value to NA): + windirPeriod.wt <- windirPeriod + + ss <- which(windirPeriod == wd) + pp <- which(windirPeriod != wd) + + windirPeriod.wt[ss] <- 1 # set to 1 the days belonging to that wt + windirPeriod.wt[pp] <- NA # set to NA the days not belonging to that wt + rm(ss,pp) + gc() + + # mean frequency maps: + windirDays <- apply(windirPeriod.wt, c(3,4), sum, na.rm=TRUE) + windirFreq <- windirDays / (n.years * n.days.in.a.period(p,1)) + + my.fileout <- paste0(workdir,"/windir/",rean.name,"_frequency_",period.name[p],"_direction_",wd.dir[wd],".png") + + png(filename=my.fileout,width=900,height=600) + + layout(matrix(c(rep(1,10),2), 11, 1, byrow = TRUE)) + PlotEquiMap(windirFreq[,lon.swapped], var.lon2, var.lat, filled.continents = FALSE, brks=my.brks.freq, cols=my.cols.freq, colNA="gray", drawleg=F) + ColorBar2(my.brks.freq, cols=my.cols.freq, vert=FALSE, cex=1, my.ticks=-0.5 + 1:length(my.brks.freq), my.labels=100*my.brks.freq, label.dist=.5) + dev.off() + + # mean frequency maps for North Pole: + my.fileout <- paste0(workdir,"/windir/",rean.name,"_frequency_polar_",period.name[p],"_direction_",wd.dir[wd],".png") + PlotStereoMap(windirFreq, var.lon, var.lat, latlims = c(60,90), brks=my.brks.freq, cols=my.cols.freq, subsampleg=1, units=my.unit.freq, colNA="gray", fileout=my.fileout) + + # measure the interannual frequency series of that wind direction (for each grid point): + windirFreqInter[wd,p,,,] <- apply(windirPeriod.wt,c(1,3,4), sum, na.rm=TRUE) / n.days.in.a.period(p,1) + #windirPeriod.wt[year,days,,] + + # plot the frequency maps for each year: + for(y in year.start:year.end){ + y2 <- y - year.start + 1 + my.fileout <- paste0(workdir,"/windir/",rean.name,"_frequency_",period.name[p],"_year_",y,"_direction_",wd.dir[wd],".png") + png(filename=my.fileout,width=900,height=600) + layout(matrix(c(rep(1,10),2), 11, 1, byrow = TRUE)) + PlotEquiMap(windirFreqInter[wd,p,y2,,lon.swapped], var.lon2, var.lat, filled.continents = FALSE, brks=my.brks.freq, cols=my.cols.freq, colNA="gray", drawleg=F) + ColorBar2(my.brks.freq, cols=my.cols.freq, vert=FALSE, cex=1, my.ticks=-0.5 + 1:length(my.brks.freq), my.labels=100*my.brks.freq, label.dist=.5) + dev.off() + + } # close for on y + + } # close for on wd + +} # close for on p + +save(windirFreqInter, file=paste0(workdir,"/",rean.name,"_windirFreqInter.RData")) + + + + + + + +# save impact map of wind direction on var (average of var only during the days belonging to a particular wind direction): + +# load var anomalies and wind direction array: +load(file=paste0(workdir,"/",rean.name,"_",var.name,"365Anom.RData")) +load(paste0(workdir,"/",rean.name,"_windirClass.RData")) + +my.brks[[1]] <- seq(-10,10,1) # % Mean anomaly of a WT for temperature +my.cols[[1]] <- colorRampPalette(as.character(read.csv("~/scripts/palettes/rgbhex.csv",header=F)[,1]))(length(my.brks[[1]])-1) # blue--yellow-red colors for temperature +my.unit[[1]] <- "degrees" + +my.brks[[2]] <- seq(-3,3,0.5) # Mean wind speed anomaly in m/s +my.cols[[2]] <- colorRampPalette(rev(brewer.pal(11,"RdBu")))(length(my.brks[[2]])-1) # blue--white--red colors for wind speed impact +my.unit[[2]] <- "m/s" + +impact.var <- array(NA, c(n.wd, length(periods), n.lat.var, n.lon.var)) # array where to save the impact of each wd on var + +for(p in periods){ + # select var data only inside period p: + varPeriod <- var365Anom[,pos.period(1,p),,] + + # select wind direction data only inside period p: + windirPeriod <- windirClass[1,1,,pos.period(1,p),,] + + for(wd in 1:8){ # 1:N, 2=NE, 3=E, ... + + # remove from varPeriod the days not belonging to that wd (setting its value to NA): + windirPeriod.wt <- array(NA, dim(windirPeriod)) + + ss <- which(windirPeriod == wd) + #pp <- which(windirPeriod != wd) + + windirPeriod.wt[ss] <- 1 # set to 1 the days belonging to that wt + #windirPeriod.wt[pp] <- NA # set to NA the days not belonging to that wt + rm(ss) + gc() + + varPeriod.wt <- varPeriod * windirPeriod.wt + + var.mean.wt <- apply(varPeriod.wt, c(3,4), mean, na.rm=TRUE) + + impact.var[wd,p,,] <- var.mean.wt + + # visualize impact map of the wd on var: + my.fileout <- paste0(workdir,"/windir/",rean.name,"_impact_",var.name,"_",period.name[p],"_direction_",wd.dir[wd],".png") + + png(filename=my.fileout,width=900,height=600) + + layout(matrix(c(rep(1,10),2), 11, 1, byrow = TRUE)) + #PlotEquiMap(var.mean.wt, var.lon, var.lat, filled.continents = FALSE, brks=my.brks[[var.num]], cols=my.cols[[var.num]], drawleg=F, colNA="gray") + PlotEquiMap(rescale(var.mean.wt[,lon.swapped],min(my.brks[[var.num]]),max(my.brks[[var.num]])), var.lon2, var.lat, filled.continents = FALSE, brks=my.brks[[var.num]], cols=my.cols[[var.num]], colNA="gray", drawleg=F) + ColorBar2(my.brks[[var.num]], cols=my.cols[[var.num]], vert=FALSE, cex=1, my.ticks=-0.5 + 1:length(my.brks[[var.num]]), my.labels=my.brks[[var.num]], label.dist=.5) + dev.off() + + + # North Pole map: + #png(filename=paste0(workdir,"/windir/impact_polar_",var.name,"_",period.name[p],"_direction_",wd.dir[wd],".png")) + my.fileout <- paste0(workdir,"/windir/",rean.name,"_impact_polar_",var.name,"_",period.name[p],"_direction_",wd.dir[wd],".png") + PlotStereoMap(rescale(var.mean.wt,min(my.brks[[var.num]]),max(my.brks[[var.num]])), var.lon, var.lat, latlims = c(60,90), brks=my.brks[[var.num]], cols=my.cols[[var.num]], subsampleg=1, units=my.unit[[var.num]], colNA="gray", fileout=my.fileout) + + # + # same maps as above but removing points with frequency < 3%: + # + + windirFreq.wt <- apply(windirPeriod.wt, c(3,4), sum, na.rm=TRUE) / (n.years * n.days.in.a.period(p,1)) + + ss <- which(windirFreq.wt < 0.03) + pp <- which(windirFreq.wt >= 0.03) + + windirFreq.wt[ss] <- NA + windirFreq.wt[pp] <- 1 + + var.mean.wt2 <- var.mean.wt * windirFreq.wt + rm(ss,windirFreq.wt) + gc() + + # visualize impact map of the wd on var: + my.fileout <- paste0(workdir,"/windir/",rean.name,"_impact_min3percent_",var.name,"_",period.name[p],"_direction_",wd.dir[wd],".png") + + png(filename=my.fileout,width=900,height=600) + + layout(matrix(c(rep(1,10),2), 11, 1, byrow = TRUE)) + #PlotEquiMap(var.mean.wt, var.lon, var.lat, filled.continents = FALSE, brks=my.brks[[var.num]], cols=my.cols[[var.num]], drawleg=F, colNA="gray") + PlotEquiMap(rescale(var.mean.wt2[,lon.swapped],min(my.brks[[var.num]]),max(my.brks[[var.num]])), var.lon2, var.lat, filled.continents = FALSE, brks=my.brks[[var.num]], cols=my.cols[[var.num]], colNA="gray", drawleg=F) + ColorBar2(my.brks[[var.num]], cols=my.cols[[var.num]], vert=FALSE, cex=1, my.ticks=-0.5 + 1:length(my.brks[[var.num]]), my.labels=my.brks[[var.num]], label.dist=.5) + dev.off() + + # North Pole map: + #png(filename=paste0(workdir,"/windir/impact_polar_",var.name,"_",period.name[p],"_direction_",wd.dir[wd],".png")) + my.fileout <- paste0(workdir,"/windir/",rean.name,"_impact_min3percent_polar_",var.name,"_",period.name[p],"_direction_",wd.dir[wd],".png") + PlotStereoMap(rescale(var.mean.wt2,min(my.brks[[var.num]]),max(my.brks[[var.num]])), var.lon, var.lat, latlims = c(60,90), brks=my.brks[[var.num]], cols=my.cols[[var.num]], subsampleg=1, units=my.unit[[var.num]], colNA="gray", fileout=my.fileout) + + rm(var.mean.wt, var.mean.wt2) + + } # close for on wd + +} # close for on p + +save(impact.var, file=paste0(workdir,"/",rean.name,"_",var.name,"_impact.RData")) + + + + +# visualize and save the reconstructed monthly/seasonal anomalies with the WDs: + +load(paste0(workdir,"/",rean.name,"_windirFreqInter.RData")) +load(paste0(workdir,"/",rean.name,"_",var.name,"_impact.RData")) + +impact.total <- array(NA, c(length(periods), n.years, n.lat.var, n.lon.var)) # array where to store the interannual frequencies + +my.brks[[1]] <- seq(-10,10,1) # % Mean anomaly of a WT for temperature +my.cols[[1]] <- colorRampPalette(as.character(read.csv("~/scripts/palettes/rgbhex.csv",header=F)[,1]))(length(my.brks[[1]])-1) +my.unit[[1]] <- "degrees" + +my.brks[[2]] <- seq(-3,3,0.5) # Mean wind speed anomaly in m/s +my.cols[[2]] <- colorRampPalette(rev(brewer.pal(11,"RdBu")))(length(my.brks[[2]])-1) # blue--white--red colors for wind speed impact +my.unit[[2]] <- "m/s" + +for(p in periods){ + for(y in year.start:year.end){ + #p <- 10; y <- 2016 # for debugging + + #test <- impact.var[1,10,,]*windirFreqInter[1,10,32,,]+impact.var[2,10,,]*windirFreqInter[2,10,32,,]+impact.var[3,10,,]*windirFreqInter[3,10,32,,]+impact.var[4,10,,]*windirFreqInter[4,10,32,,]+impact.var[5,10,,]*windirFreqInter[5,10,32,,]+impact.var[6,10,,]*windirFreqInter[6,10,32,,]+impact.var[7,10,,]*windirFreqInter[7,10,32,,]+impact.var[8,10,,]*windirFreqInter[8,10,32,,] + + #PlotEquiMap(test[,lon.swapped], var.lon2, var.lat, filled.continents = FALSE, brks=my.brks[[var.num]], cols=my.cols[[var.num]], colNA="gray", drawleg=F) + # windirFreqInter[,10,32,66,507] + #test2 <- impact.var[3,10,,]*0.4516 # + impact.var[2,10,,]*0.225 + impact.var[8,10,,]*0.0967 + impact.var[4,10,,]*0.06451 + impact.var[6,10,,]*0.0645 + impact.var[1,10,,]*0.03225 + impact.var[5,10,,]*0.0645 + + #PlotEquiMap(test2[,lon.swapped], var.lon2, var.lat, filled.continents = FALSE, brks=my.brks[[var.num]], cols=my.cols[[var.num]], colNA="gray", drawleg=F) + + #test3 <- impact.var[3,10,,] #*windirFreqInter[3,10,32,,] + #PlotEquiMap(test3[,lon.swapped], var.lon2, var.lat, filled.continents = FALSE, brks=my.brks[[var.num]], cols=my.cols[[var.num]], colNA="gray", drawleg=F) + + #test4 <- impact.var[3,10,,] * windirFreqInter[3,10,32,,] + #PlotEquiMap(test4[,lon.swapped], var.lon2, var.lat, filled.continents = FALSE, brks=my.brks[[var.num]], cols=my.cols[[var.num]], colNA="gray", drawleg=F) + + y2 <- y - year.start + 1 + impact.weighted <- impact.var[,p,,] * windirFreqInter[,p,y2,,] + impact.total[p,y2,,] <- apply(impact.weighted, c(2,3), sum, na.rm=T) + + my.fileout <- paste0(workdir,"/windir/",rean.name,"_reconstructed_",var.name,"_anomalies_",period.name[p],"_year_",y,".png") + png(filename=my.fileout,width=900,height=600) + + layout(matrix(c(rep(1,10),2), 11, 1, byrow = TRUE)) + PlotEquiMap(rescale(impact.total[p,y2,,lon.swapped],min(my.brks[[var.num]]),max(my.brks[[var.num]])), var.lon2, var.lat, filled.continents = FALSE, brks=my.brks[[var.num]], cols=my.cols[[var.num]], colNA="gray", drawleg=F) + ColorBar2(my.brks[[var.num]], cols=my.cols[[var.num]], vert=FALSE, cex=1, my.ticks=-0.5 + 1:length(my.brks[[var.num]]), my.labels=my.brks[[var.num]], label.dist=.5) + dev.off() + + for(wd in 1:n.wd){ + my.fileout <- paste0(workdir,"/windir/",rean.name,"_weighted_impact_",var.name,"_",period.name[p],"_year_",y,"_direction_",wd.dir[wd],".png") + png(filename=my.fileout,width=900,height=600) + + layout(matrix(c(rep(1,10),2), 11, 1, byrow = TRUE)) + PlotEquiMap(rescale(impact.weighted[wd,,lon.swapped],min(my.brks[[var.num]]),max(my.brks[[var.num]])), var.lon2, var.lat, filled.continents = FALSE, brks=my.brks[[var.num]], cols=my.cols[[var.num]], colNA="gray", drawleg=F) + ColorBar2(my.brks[[var.num]], cols=my.cols[[var.num]], vert=FALSE, cex=1, my.ticks=-0.5 + 1:length(my.brks[[var.num]]), my.labels=my.brks[[var.num]], label.dist=.5) + dev.off() + + } + + } # close for on y +} # close for on p + +save(impact.total, file=paste0(workdir,"/",rean.name,"_",var.name,"_reconstructed_anomalies.RData")) + + + + + + + + + + + + + + + + + + + + + + +# Plot WTs frequency maps: + +#load(paste0(workdir,"/",rean.name,"_windirClass.RData")) +#windirClass <- [wd,1,1,year,1:365,lat,lon] + +# load lat and lon from SLP grid: +load(paste0(workdir,"/txt/",rean.name,"/metadata.RData")) # load lat.used and lon.used + +# load daily WT classification for all years and grid points and with the same format of var365Anom: +load(paste0(workdir,"/txt/",rean.name,"/WTs.RData")) + +WTs.code <- sort(unique(as.vector(WTs[1,1,,]))) +n.wt <- length(WTs.code) +WTs.name <- WTs.type[WTs.code] +wt.num <- c(1:9,NA,NA,NA,NA,NA,NA,NA,NA,10) + +windirFreqInter <- array(NA, c(n.wt, length(periods), n.years, n.lat.var, n.lon.var)) # array where to store the interannual frequencies + +# save weather type mean frequency maps: +for(p in periods){ + # select weather type data only inside period p: + windirPeriod <- WTs[,pos.period(1,p),,] + + for(wt in WTs.code){ + + # remove from windPeriod the days not belonging to that wd (setting its value to NA): + windirPeriod.wt <- array(NA, dim(windirPeriod)) #windirPeriod + + ss <- which(windirPeriod == wt) + #pp <- which(windirPeriod != wt) + + windirPeriod.wt[ss] <- 1 # set to 1 the days belonging to that wt + #windirPeriod.wt[pp] <- NA # set to NA the days not belonging to that wt + rm(ss) + gc() + + # mean frequency maps: + windirDays <- apply(windirPeriod.wt, c(3,4), sum, na.rm=TRUE) + windirFreq <- windirDays / (n.years * n.days.in.a.period(p,1)) + + my.fileout <- paste0(workdir,"/txt/",rean.name,"/",rean.name,"_frequency_",period.name[p],"_type_",WTs.name[wt.num[wt]],".png") + + png(filename=my.fileout,width=900,height=600) + + layout(matrix(c(rep(1,10),2), 11, 1, byrow = TRUE)) + + windirFreq[pos.lat.unused,] <- NA + PlotEquiMap(windirFreq[,lon.swapped], var.lon2, var.lat, filled.continents = FALSE, brks=my.brks.freq, cols=my.cols.freq, colNA="gray", drawleg=F) + + ColorBar2(my.brks.freq, cols=my.cols.freq, vert=FALSE, cex=1, my.ticks=-0.5 + 1:length(my.brks.freq), my.labels=100*my.brks.freq, label.dist=.5) + dev.off() + + # measure the interannual frequency series of that wind direction (for each grid point): + windirFreqInter[wt.num[wt],p,,,] <- apply(windirPeriod.wt,c(1,3,4), sum, na.rm=TRUE) / n.days.in.a.period(p,1) + #windirPeriod.wt[year,days,,] + + # plot the frequency maps for each year: + ## for(y in year.start:year.end){ + ## y2 <- y - year.start + 1 + ## my.fileout <- paste0(workdir,"/txt/",rean.name,"/frequency/",rean.name,"_frequency_",period.name[p],"_year_",y,"_type_",WTs.name[wt],".png") + + ## png(filename=my.fileout,width=900,height=600) + ## layout(matrix(c(rep(1,10),2), 11, 1, byrow = TRUE)) + + ## windirFreqInter[wt.num[wt],p,y2,pos.lat.unused,] <- NA + ## PlotEquiMap(windirFreqInter[wt.num[wt],p,y2,,lon.swapped], var.lon2, var.lat, filled.continents = FALSE, brks=my.brks.freq, cols=my.cols.freq, colNA="gray", drawleg=F) + ## ColorBar2(my.brks.freq, cols=my.cols.freq, vert=FALSE, cex=1, my.ticks=-0.5 + 1:length(my.brks.freq), my.labels=100*my.brks.freq, label.dist=.5) + ## dev.off() + + ## } # close for on y + + } # close for on wt + +} # close for on p + +save(windirFreqInter, file=paste0(workdir,"/txt/",rean.name,"/",rean.name,"_WTsFreqInter.RData")) + + + + + + + + + + + + + + + + + + + +# save impact map of wind direction on var (average of var only during the days belonging to a particular wind direction): + +# load var anomalies: +load(file=paste0(workdir,"/txt/",rean.name,"/",rean.name,"_",var.name,"365Anom.RData")) + +# load lat and lon from SLP grid: +load(paste0(workdir,"/txt/",rean.name,"/metadata.RData")) # load lat.used and lon.used + +# only for compatibility with older versions (it should be already loaded): +#lat <- round(MSLP$lat,3) +#lon <- round(MSLP$lon,3) + +# load daily WT classification for all years and grid points and with the same format of var365Anom: +load(paste0(workdir,"/txt/",rean.name,"/WTs.RData")) + +WTs.code <- sort(unique(as.vector(WTs[1,1,,]))) +n.WTs <- length(WTs.code) +WTs.name <- WTs.type[WTs.code] +WTs.num <- c(1:9,NA,NA,NA,NA,NA,NA,NA,NA,10) + +## Load MDE and interpolate it to the same grid of var data: +library(ncdf4) +dem <- nc_open("/home/Earth/ncortesi/scripts/dem/elevation_512x256.nc") +dem.data <- ncvar_get(dem, dem$var$Band1) +dem.lat <- dem$dim$lat$vals +dem.lon <- dem$dim$lon$vals +names(dim(dem.data)) <- c('lon','lat') +dem.interp <- CDORemap(dem.data, dem.lon, dem.lat, 'r30x15', 'bil', crop=TRUE) + + +my.brks[[1]] <- seq(-10,10,1) # % Mean anomaly of a WT for temperature +my.cols[[1]] <- colorRampPalette(as.character(read.csv("~/scripts/palettes/rgbhex.csv",header=F)[,1]))(length(my.brks[[1]])-1) # blue--yellow-red colors for temperature +my.unit[[1]] <- "degrees" + +my.brks[[2]] <- seq(-3,3,0.5) # Mean wind speed anomaly in m/s +my.cols[[2]] <- colorRampPalette(rev(brewer.pal(11,"RdBu")))(length(my.brks[[2]])-1) # blue--white--red colors for wind speed impact +my.unit[[2]] <- "m/s" + +impact.var <- array(NA, c(n.WTs, length(periods), n.lat.var, n.lon.var)) # array where to save the impact of each wd on var + +for(p in periods){ + # select var data only inside period p: + varPeriod <- var365Anom[,pos.period(1,p),,] + + # select wind direction data only inside period p: + windirPeriod <- WTs[,pos.period(1,p),,] + + print(paste0("Period: ",period[p],)) + + for(wt in WTs.code){ + + # remove from varPeriod the days not belonging to that wd (setting its value to NA): + windirPeriod.wt <- array(NA, dim(windirPeriod)) + + ss <- which(windirPeriod == wt) + #pp <- which(windirPeriod != wt) + + windirPeriod.wt[ss] <- 1 # set to 1 the days belonging to that wt + #windirPeriod.wt[pp] <- NA # set to NA the days not belonging to that wt + rm(ss) + gc() + + varPeriod.wt <- varPeriod * windirPeriod.wt + var.mean.wt <- apply(varPeriod.wt, c(3,4), mean, na.rm=TRUE) + impact.var[wt.num[wt],p,,] <- var.mean.wt + + # visualize impact map of the wd on var: + my.fileout <- paste0(workdir,"/txt/",rean.name,"/",rean.name,"_impact_",var.name,"_",period.name[p],"_type_",WTs.name[WTs.num[wt]],".png") + + png(filename=my.fileout,width=900,height=600) + + layout(matrix(c(rep(1,10),2), 11, 1, byrow = TRUE)) + #PlotEquiMap(var.mean.wt, var.lon, var.lat, filled.continents = FALSE, brks=my.brks[[var.num]], cols=my.cols[[var.num]], drawleg=F, colNA="gray") + + var.mean.wt[pos.lat.unused,] <- NA + #PlotEquiMap(rescale(var.mean.wt[,lon.swapped],min(my.brks[[var.num]]),max(my.brks[[var.num]])), var.lon2, var.lat, filled.continents = FALSE, brks=my.brks[[var.num]], cols=my.cols[[var.num]], colNA="gray", drawleg=F) + + PlotEquiMap(rescale(var.mean.wt[,lon.swapped],min(my.brks[[var.num]]),max(my.brks[[var.num]])), var.lon2, var.lat, filled.continents = FALSE, brks=my.brks[[var.num]], cols=my.cols[[var.num]], colNA="gray", drawleg=F, contours=dem.interp) + + ColorBar2(my.brks[[var.num]], cols=my.cols[[var.num]], vert=FALSE, cex=1, my.ticks=-0.5 + 1:length(my.brks[[var.num]]), my.labels=my.brks[[var.num]], label.dist=.5) + dev.off() + + ## windirFreq.wt <- apply(windirPeriod.wt, c(3,4), sum, na.rm=TRUE) / (n.years * n.days.in.a.period(p,1)) + + ## ss <- which(windirFreq.wt < 0.03) + ## pp <- which(windirFreq.wt >= 0.03) + + ## windirFreq.wt[ss] <- NA + ## windirFreq.wt[pp] <- 1 + + ## var.mean.wt2 <- var.mean.wt * windirFreq.wt + + ## rm(ss,windirFreq.wt) + ## gc() + rm(varPeriod.wt, windirPeriod.wt) + + } # close for on wd + + rm(varPeriod, windirPeriod) + gc() +} # close for on p + +save(impact.var, file=paste0(workdir,"/txt/",rean.name,"/",rean.name,"_",var.name,"_impact_WTs.RData")) + + + + + + + + + +# visualize and save the reconstructed monthly/seasonal anomalies with the WTs: + +# load daily WT classification for all years and grid points and with the same format of var365Anom: +load(paste0(workdir,"/txt/",rean.name,"/",rean.name,"_WTsFreqInter.RData")) +load(paste0(workdir,"/txt",rean.name,"/",rean.name,"_",var.name,"_impact_WTs.RData")) + +impact.total <- array(NA, c(length(periods), n.years, n.lat.var, n.lon.var)) # array where to store the interannual frequencies + +my.brks[[1]] <- seq(-10,10,1) # % Mean anomaly of a WT for temperature +my.cols[[1]] <- colorRampPalette(as.character(read.csv("~/scripts/palettes/rgbhex.csv",header=F)[,1]))(length(my.brks[[1]])-1) +my.unit[[1]] <- "degrees" + +my.brks[[2]] <- seq(-3,3,0.5) # Mean wind speed anomaly in m/s +my.cols[[2]] <- colorRampPalette(rev(brewer.pal(11,"RdBu")))(length(my.brks[[2]])-1) # blue--white--red colors for wind speed impact +my.unit[[2]] <- "m/s" + +for(p in periods){ + for(y in year.start:year.end){ + #p <- 10; y <- 2016 # for debugging + + #test <- impact.var[1,10,,]*windirFreqInter[1,10,32,,]+impact.var[2,10,,]*windirFreqInter[2,10,32,,]+impact.var[3,10,,]*windirFreqInter[3,10,32,,]+impact.var[4,10,,]*windirFreqInter[4,10,32,,]+impact.var[5,10,,]*windirFreqInter[5,10,32,,]+impact.var[6,10,,]*windirFreqInter[6,10,32,,]+impact.var[7,10,,]*windirFreqInter[7,10,32,,]+impact.var[8,10,,]*windirFreqInter[8,10,32,,] + + #PlotEquiMap(test[,lon.swapped], var.lon2, var.lat, filled.continents = FALSE, brks=my.brks[[var.num]], cols=my.cols[[var.num]], colNA="gray", drawleg=F) + # windirFreqInter[,10,32,66,507] + #test2 <- impact.var[3,10,,]*0.4516 # + impact.var[2,10,,]*0.225 + impact.var[8,10,,]*0.0967 + impact.var[4,10,,]*0.06451 + impact.var[6,10,,]*0.0645 + impact.var[1,10,,]*0.03225 + impact.var[5,10,,]*0.0645 + + #PlotEquiMap(test2[,lon.swapped], var.lon2, var.lat, filled.continents = FALSE, brks=my.brks[[var.num]], cols=my.cols[[var.num]], colNA="gray", drawleg=F) + + #test3 <- impact.var[3,10,,] #*windirFreqInter[3,10,32,,] + #PlotEquiMap(test3[,lon.swapped], var.lon2, var.lat, filled.continents = FALSE, brks=my.brks[[var.num]], cols=my.cols[[var.num]], colNA="gray", drawleg=F) + + #test4 <- impact.var[3,10,,] * windirFreqInter[3,10,32,,] + #PlotEquiMap(test4[,lon.swapped], var.lon2, var.lat, filled.continents = FALSE, brks=my.brks[[var.num]], cols=my.cols[[var.num]], colNA="gray", drawleg=F) + + y2 <- y - year.start + 1 + impact.weighted <- impact.var[,p,,] * windirFreqInter[,p,y2,,] + impact.total[p,y2,,] <- apply(impact.weighted, c(2,3), sum, na.rm=T) + + my.fileout <- paste0(workdir,"/txt/",rean.name,"/reconstructed/reconstructed_",var.name,"_anomalies_",period.name[p],"_year_",y,".png") + png(filename=my.fileout,width=900,height=600) + + layout(matrix(c(rep(1,10),2), 11, 1, byrow = TRUE)) + + impact.total[p,y2,pos.lat.unused,] <- NA + + PlotEquiMap(impact.total[p,y2,,lon.swapped],min(my.brks[[var.num]]),max(my.brks[[var.num]], var.lon2, var.lat, filled.continents = FALSE, brks=my.brks[[var.num]], cols=my.cols[[var.num]], colNA="gray", drawleg=F) + #ColorBar2(my.brks[[var.num]], cols=my.cols[[var.num]], vert=FALSE, cex=1, my.ticks=-0.5 + 1:length(my.brks[[var.num]]), my.labels=my.brks[[var.num]], label.dist=.5) + ColorBar(my.brks[[var.num]], cols=my.cols[[var.num]], vert=FALSE, cex=1, my.ticks=-0.5 + 1:length(my.brks[[var.num]]), my.labels=my.brks[[var.num]], label.dist=.5) + + dev.off() + + for(wd in 1:n.wd){ + my.fileout <- paste0(workdir,"/windir/",rean.name,"_weighted_impact_",var.name,"_",period.name[p],"_year_",y,"_type_",WTs.name[wt],".png") + + png(filename=my.fileout,width=900,height=600) + + layout(matrix(c(rep(1,10),2), 11, 1, byrow = TRUE)) + impact.weighted[wt,pos.lat.unused,] <- NA + + PlotEquiMap(rescale(impact.weighted[wt,,lon.swapped],min(my.brks[[var.num]]),max(my.brks[[var.num]])), var.lon2, var.lat, filled.continents = FALSE, brks=my.brks[[var.num]], cols=my.cols[[var.num]], colNA="gray", drawleg=F) + ColorBar2(my.brks[[var.num]], cols=my.cols[[var.num]], vert=FALSE, cex=1, my.ticks=-0.5 + 1:length(my.brks[[var.num]]), my.labels=my.brks[[var.num]], label.dist=.5) + dev.off() + } + + } # close for on y +} # close for on p + +save(impact.total, file=paste0(workdir,"/",rean.name,"_",var.name,"_reconstructed_anomalies.RData")) + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +# Create and save monthly/seasonal/yearly climatology maps of var: + +#p=17 # for the debug + +# Map intervals and colors: +my.brks[[1]] <- c(-50,-40,-30,seq(-20,30,0.05),40,50) # Surface Temperature Climatology +my.brks[[2]] <- c(0,seq(0.05,10,0.05),15) # Precipitation Climatology +my.brks[[3]] <- c(0,seq(1.05,10,0.05),15) # Wind speed Climatology + +my.cols[[1]] <- colorRampPalette(as.character(read.csv("/home/Earth/vtorralb/rgbhex.csv",header=F)[,1]))(length(my.brks[[1]])-1) # blue-yellow-red colors +my.cols[[2]] <- colorRampPalette(c("white","gray","cyan2","blue","deeppink4"))(length(my.brks[[1]])-1) # white-gray-blue-purple colores for wind speed + +n.lonr <- n.lon.var - ceiling(length(var.lon[var.lon>0 & var.lon < 180])) # count the num of lon values between 180 and 360 degrees +lon.swapped <- c((n.lonr+1):n.lon.var,1:n.lonr) # lon is always positive but rearranged +var.lon2 <- c(var.lon[c((n.lonr+1):n.lon.var)]-360,var.lon[1:n.lonr]) # to have lon values from -180 to 180 + +mod.var <- 0 +if(var.num == 1) mod.var <- -273.5 # for temperature, you must remove -273 after var.clim.NA.bis[p,,]: + +for(p in periods){ + # Select only days of the chosen month/season: + varPeriod <- var365[,pos.period(1,p),,,drop=FALSE] + varPeriodMean <- apply(varPeriod,c(3,4),mean,na.rm=TRUE) + + png(filename=paste0(workdir,"/",var.name,"_Climatology_",period.name[p],".png"),width=1000,height=700) + + layout(matrix(c(rep(1,10),2), 11, 1, byrow = TRUE)) + + #PlotEquiMap(varPeriodMean+mod.var, var.lon, var.lat, filled.continents = FALSE, brks=my.brks[[var.num]], cols=my.cols[[var.num]], drawleg=F, colNA="gray") + PlotEquiMap(varPeriodMean[,lon.swapped]+mod.num, var.lon2, var.lat, filled.continents = FALSE, brks=my.brks[[var.num]], cols=my.cols[[var.num]], drawleg=F) + ColorBar(my.brks[[var.num]], cols=my.cols[[var.num]], vert=FALSE, subsampleg=20, cex=1) + + dev.off() +} + + + + + + + + + +} # close test diff --git a/WT_drivers_v10.R~ b/WT_drivers_v10.R~ new file mode 100644 index 0000000000000000000000000000000000000000..97421ef86f47edc547a14df01ffc8fa2c52c5a0e --- /dev/null +++ b/WT_drivers_v10.R~ @@ -0,0 +1,808 @@ +########################################################################################## +# Relationship between WTs and a chosen climate variable # +########################################################################################## + +rm(list=ls()) +library(s2dverification) # for the function Load() +library(abind) # for funcion abind() +source('/home/Earth/ncortesi/scripts/Rfunctions.R') # for the calendar functions + +# directory where to find the WT classification +workdir <- "/scratch/Earth/ncortesi/RESILIENCE/WT" + +# reanalysis used for daily var data (must be the same as the mslp data): +var.rean <- list(path = '/esnas/recon/ecmwf/erainterim/$STORE_FREQ$_mean/$VAR_NAME$_f6h/$VAR_NAME$_$YEAR$$MONTH$.nc') +#var.rean <- list(path = '/esnas/recon/noaa/ncep-reanalysis/$STORE_FREQ$_mean/$VAR_NAME$_f6h/$VAR_NAME$_$YEAR$$MONTH$.nc') +rean.name <- 'erai' #'ncep' #'erai' + +# any daily variable: +var.name='tas' #'sfcWind' #'prlr' +#var.name.file='Temperature' #'10-m Wind Speed' #'Precipitation' #'Surface_Temperature' # '10-m_Wind_Speed' # variable name used in the ouput map's title + +year.start <- 1985 +year.end <- 2014 + +periods=1:17 # identify monthly (from 1=Jan to 12=Dec) , seasonal (from 13=winter to 16=Autumn) and yearly=17 values + +LOESS <- FALSE # LOESS climatology filter on/off (if off, a 5-days mobile windows is usedto measure daily anomalies) + +rm.lat <- c(-12,12) # in case of WTs, remove these lat values from the maps + +########################################################################################## +n.periods <- length(periods) +n.years <- year.end - year.start + 1 + +my.brks <- my.cols <- my.labels <- my.unit <- list() +my.brks.freq <- c(0,0.05,seq(0.1,0.7,0.1),1) # Frequency of a WT or WD +my.cols.freq <- colorRampPalette(c('#f7fbff','#deebf7','#c6dbef','#9ecae1','#6baed6','#4292c6','#2171b5','#08519c','#08306b'))(length(my.brks.freq)-1) +my.unit.freq <- "%" + +if(var.name == 'tas') var.num <- 1 +if(var.name == 'sfcWind') var.num <- 2 +if(var.name == 'prlr') var.num <- 3 + +WTs.type <- c("NE","E","SE","S","SW","W","NW","N","C","C.NE","C.E","C.SE","C.S","C.SW","C.W","C.NW","C.N","A","A.NE","A.E","A.SE","A.S","A.SW","A.W","A.NW","A.N") +WTs.type10<-c("NE","E","SE","S","SW","W","NW","N","C","A") +WTs.type10.name<-c("Northeasterly (NE)","Easterly (E)","Southeasterly (SE)","Southerly (S)","Southwesterly (SW)","Westerly (W)","Northwesterly (NW)","Northerly (N)","Cyclonic (C)","Anticyclonic (A)") +wd.dir <- c("N","NE","E","SE","S","SW","W","NW") +n.wd <- length(wd.dir) + +# load one day of var data only to detect lat and lon values: +var <- Load(var.name, NULL, list(var.rean), paste0(year.start,'0101'), storefreq = 'daily', leadtimemax=1, output = 'lonlat', nprocs=1) +var.lat <- var$lat # var lat and lon MUST be the same of WT classification, even if the latter can have NA for certain lat values. +var.lon <- var$lon +n.lat.var <- length(var.lat) +n.lon.var <- length(var.lon) +n.lonr <- n.lon.var - ceiling(length(var.lon[var.lon>0 & var.lon < 180])) # count the num of lon values between 180 and 360 degrees +lon.swapped <- c((n.lonr+1):n.lon.var,1:n.lonr) # lon is always positive but rearranged +var.lon2 <- c(var.lon[c((n.lonr+1):n.lon.var)]-360,var.lon[1:n.lonr]) # to have lon values from -180 to 180 + +l1 <- which(var.lat > rm.lat[1]) # lat values greater than -12 +l2 <- which(var.lat < rm.lat[2]) # lat values lower than 12 +pos.lat.unused <- which(!is.na(match(l1,l2))) +var.lat.unused <- var.lat[pos.lat.unused] # latitude values not visualized in the WT classification because too close to equator + +########################################################################################## + +# save var anomalies: + +# load WT metadata to get the info on year.start and year.end +#WTs_files <- list.files(paste0(workdir,"/txt/ERAint/all_years")) +#WTs_file1 <- read.table(file=paste0(workdir,"/txt/ERAint/all_years/",WTs_files[1]), header=TRUE) +#load(paste0(workdir,"/txt/ERAint/metadata.RData")) + +# load daily var data for all years: +var <- Load(var.name, NULL, list(var.rean), paste0(year.start:year.end,'0101'), storefreq = 'daily', leadtimemax=366, output = 'lonlat', nprocs=8) + +# remove bisestile days from var to have all arrays with the same dimensions: +cat("Removing bisestile days. Please wait...\n") +var365 <- var$obs[,,,1:365,,,drop=FALSE] + +for(y in year.start:year.end){ + y2 <- y - year.start + 1 + if(n.days.in.a.year(y) == 366) var365[,,y2,60:365,,] <- var$obs[,,y2,61:366,,] # take the march to december period removing the 29th of February +} + +rm(var) +gc() + +# convert var to daily anomalies: +cat("Calculating anomalies. Please wait...\n") + +if(LOESS == TRUE){ + var365ClimDaily <- apply(var365, c(1,2,4,5,6), mean, na.rm=T) + var365ClimLoess <- var365ClimDaily + + for(i in 1:n.lat.var){ + for(j in 1:n.lon.var){ + my.data <- data.frame(ens.mean=var365ClimDaily[1,1,,i,j], day=1:365) + my.loess <- loess(ens.mean ~ day, my.data, span=0.35) + var365ClimLoess[1,1,,i,j] <- predict(my.loess) + } + } + + rm(var365ClimDaily, my.data, my.loess) + gc() + + var365Clim <- InsertDim(var365ClimLoess, 3, year.end-year.start+1) + + rm(var365ClimLoess) + gc() + +} else { # apply a 5-days mobile window: + + #var365Clim <- InsertDim(var365ClimDaily, 3, n.years) + var365Clim <- array(NA,c(365,dim(var365)[5:6])) + + var365x3 <- abind(var365[,,,364:365,,,drop=FALSE],var365[,,,,,,drop=FALSE],var365[,,,1:2,,,drop=FALSE],along=4) # add the two days before 1st january and after 31 dec + + for(d in 1:365){ + window <- var365x3[1,1,,2+d+seq(-2,2),,,drop=FALSE] + var365Clim[d,,] <- apply(window,c(5,6),mean,na.rm=TRUE) + } + + rm(var365x3, window) + gc() + + var365Clim <- InsertDim(var365Clim, 1, n.years) + +} + + +var365Anom <- var365[1,1,,,,] - var365Clim + +rm(var365Clim) +gc() + +rm(var365) +gc() + +# save var anomalies for retreiving them when necessary: +save(var365Anom, file=paste0(workdir,"/",rean.name,"_",var.name,"365Anom.RData")) + + + + +if(LOESS == 'test'){ + +# Compute wind directions: +uas <- Load(var = 'uas', exp = NULL, obs = list(var.rean), paste0(year.start:year.end,'0101'), storefreq = 'daily', leadtimemax=366, output = 'lonlat', nprocs=8)$obs +vas <- Load(var = 'vas', exp = NULL, obs = list(var.rean), paste0(year.start:year.end,'0101'), storefreq = 'daily', leadtimemax=366, output = 'lonlat', nprocs=8)$obs + +windir <- (180/pi)*atan2(uas,vas) + 180 # wind direction in degrees; 90 is necessary to shift from trigonometric system to cardinal system and +180 to shift + # from the direction wind is blowing to the direction wind is coming from +rm(uas, vas) +gc() + +# remove bisestile days from windir, to compare it with var, which has no bisestiles: +cat("Removing bisestile days. Please wait...\n") +windir365 <- windir[,,,1:365,,,drop=FALSE] + +for(y in year.start:year.end){ + y2 <- y - year.start + 1 + if(n.days.in.a.year(y) == 366) windir365[,,y2,60:365,,] <- windir[,,y2,61:366,,] # take the march to december period removing the 29th of February +} + + +rm(windir) +gc() + +dir2 <- which(windir365 > 22.5) +dir3 <- which(windir365 > 22.5 + 45) +dir4 <- which(windir365 > 22.5 + 90) +dir5 <- which(windir365 > 22.5 + 135) +dir6 <- which(windir365 > 22.5 + 180) +dir7 <- which(windir365 > 22.5 + 225) +dir8 <- which(windir365 > 22.5 + 270) +dir1 <- which(windir365 > 22.5 + 315) + +windirClass <- array(1, dim(windir365)) # N +windirClass[dir2] <- 2 # NE +windirClass[dir3] <- 3 # E +windirClass[dir4] <- 4 # SE +windirClass[dir5] <- 5 # S +windirClass[dir6] <- 6 # SW +windirClass[dir7] <- 7 # W +windirClass[dir8] <- 8 # NW +windirClass[dir1] <- 1 # N + +rm(windir365) +rm(dir1,dir2,dir3,dir4,dir5,dir6,dir7,dir8) +gc() + +# save it once to retreive it later: +save(windirClass, file=paste0(workdir,"/",rean.name,"_windirClass.RData")) + + + + + +# Plot frequency maps: + +load(paste0(workdir,"/",rean.name,"_windirClass.RData")) + +windirFreqInter <- array(NA, c(n.wd, length(periods), n.years, n.lat.var, n.lon.var)) # array where to store the interannual frequencies + +dir.create(paste0(workdir,"/windir")) + +# save wind direction mean frequency maps: +for(p in periods){ + # select wind direction data only inside period p: + windirPeriod <- windirClass[1,1,,pos.period(1,p),,] + + for(wd in 1:8){ + + # remove from windPeriod the days not belonging to that wd (setting its value to NA): + windirPeriod.wt <- windirPeriod + + ss <- which(windirPeriod == wd) + pp <- which(windirPeriod != wd) + + windirPeriod.wt[ss] <- 1 # set to 1 the days belonging to that wt + windirPeriod.wt[pp] <- NA # set to NA the days not belonging to that wt + rm(ss,pp) + gc() + + # mean frequency maps: + windirDays <- apply(windirPeriod.wt, c(3,4), sum, na.rm=TRUE) + windirFreq <- windirDays / (n.years * n.days.in.a.period(p,1)) + + my.fileout <- paste0(workdir,"/windir/",rean.name,"_frequency_",period.name[p],"_direction_",wd.dir[wd],".png") + + png(filename=my.fileout,width=900,height=600) + + layout(matrix(c(rep(1,10),2), 11, 1, byrow = TRUE)) + PlotEquiMap(windirFreq[,lon.swapped], var.lon2, var.lat, filled.continents = FALSE, brks=my.brks.freq, cols=my.cols.freq, colNA="gray", drawleg=F) + ColorBar2(my.brks.freq, cols=my.cols.freq, vert=FALSE, cex=1, my.ticks=-0.5 + 1:length(my.brks.freq), my.labels=100*my.brks.freq, label.dist=.5) + dev.off() + + # mean frequency maps for North Pole: + my.fileout <- paste0(workdir,"/windir/",rean.name,"_frequency_polar_",period.name[p],"_direction_",wd.dir[wd],".png") + PlotStereoMap(windirFreq, var.lon, var.lat, latlims = c(60,90), brks=my.brks.freq, cols=my.cols.freq, subsampleg=1, units=my.unit.freq, colNA="gray", fileout=my.fileout) + + # measure the interannual frequency series of that wind direction (for each grid point): + windirFreqInter[wd,p,,,] <- apply(windirPeriod.wt,c(1,3,4), sum, na.rm=TRUE) / n.days.in.a.period(p,1) + #windirPeriod.wt[year,days,,] + + # plot the frequency maps for each year: + for(y in year.start:year.end){ + y2 <- y - year.start + 1 + my.fileout <- paste0(workdir,"/windir/",rean.name,"_frequency_",period.name[p],"_year_",y,"_direction_",wd.dir[wd],".png") + png(filename=my.fileout,width=900,height=600) + layout(matrix(c(rep(1,10),2), 11, 1, byrow = TRUE)) + PlotEquiMap(windirFreqInter[wd,p,y2,,lon.swapped], var.lon2, var.lat, filled.continents = FALSE, brks=my.brks.freq, cols=my.cols.freq, colNA="gray", drawleg=F) + ColorBar2(my.brks.freq, cols=my.cols.freq, vert=FALSE, cex=1, my.ticks=-0.5 + 1:length(my.brks.freq), my.labels=100*my.brks.freq, label.dist=.5) + dev.off() + + } # close for on y + + } # close for on wd + +} # close for on p + +save(windirFreqInter, file=paste0(workdir,"/",rean.name,"_windirFreqInter.RData")) + + + + + + + +# save impact map of wind direction on var (average of var only during the days belonging to a particular wind direction): + +# load var anomalies and wind direction array: +load(file=paste0(workdir,"/",rean.name,"_",var.name,"365Anom.RData")) +load(paste0(workdir,"/",rean.name,"_windirClass.RData")) + +my.brks[[1]] <- seq(-10,10,1) # % Mean anomaly of a WT for temperature +my.cols[[1]] <- colorRampPalette(as.character(read.csv("~/scripts/palettes/rgbhex.csv",header=F)[,1]))(length(my.brks[[1]])-1) # blue--yellow-red colors for temperature +my.unit[[1]] <- "degrees" + +my.brks[[2]] <- seq(-3,3,0.5) # Mean wind speed anomaly in m/s +my.cols[[2]] <- colorRampPalette(rev(brewer.pal(11,"RdBu")))(length(my.brks[[2]])-1) # blue--white--red colors for wind speed impact +my.unit[[2]] <- "m/s" + +impact.var <- array(NA, c(n.wd, length(periods), n.lat.var, n.lon.var)) # array where to save the impact of each wd on var + +for(p in periods){ + # select var data only inside period p: + varPeriod <- var365Anom[,pos.period(1,p),,] + + # select wind direction data only inside period p: + windirPeriod <- windirClass[1,1,,pos.period(1,p),,] + + for(wd in 1:8){ # 1:N, 2=NE, 3=E, ... + + # remove from varPeriod the days not belonging to that wd (setting its value to NA): + windirPeriod.wt <- array(NA, dim(windirPeriod)) + + ss <- which(windirPeriod == wd) + #pp <- which(windirPeriod != wd) + + windirPeriod.wt[ss] <- 1 # set to 1 the days belonging to that wt + #windirPeriod.wt[pp] <- NA # set to NA the days not belonging to that wt + rm(ss) + gc() + + varPeriod.wt <- varPeriod * windirPeriod.wt + + var.mean.wt <- apply(varPeriod.wt, c(3,4), mean, na.rm=TRUE) + + impact.var[wd,p,,] <- var.mean.wt + + # visualize impact map of the wd on var: + my.fileout <- paste0(workdir,"/windir/",rean.name,"_impact_",var.name,"_",period.name[p],"_direction_",wd.dir[wd],".png") + + png(filename=my.fileout,width=900,height=600) + + layout(matrix(c(rep(1,10),2), 11, 1, byrow = TRUE)) + #PlotEquiMap(var.mean.wt, var.lon, var.lat, filled.continents = FALSE, brks=my.brks[[var.num]], cols=my.cols[[var.num]], drawleg=F, colNA="gray") + PlotEquiMap(rescale(var.mean.wt[,lon.swapped],min(my.brks[[var.num]]),max(my.brks[[var.num]])), var.lon2, var.lat, filled.continents = FALSE, brks=my.brks[[var.num]], cols=my.cols[[var.num]], colNA="gray", drawleg=F) + ColorBar2(my.brks[[var.num]], cols=my.cols[[var.num]], vert=FALSE, cex=1, my.ticks=-0.5 + 1:length(my.brks[[var.num]]), my.labels=my.brks[[var.num]], label.dist=.5) + dev.off() + + + # North Pole map: + #png(filename=paste0(workdir,"/windir/impact_polar_",var.name,"_",period.name[p],"_direction_",wd.dir[wd],".png")) + my.fileout <- paste0(workdir,"/windir/",rean.name,"_impact_polar_",var.name,"_",period.name[p],"_direction_",wd.dir[wd],".png") + PlotStereoMap(rescale(var.mean.wt,min(my.brks[[var.num]]),max(my.brks[[var.num]])), var.lon, var.lat, latlims = c(60,90), brks=my.brks[[var.num]], cols=my.cols[[var.num]], subsampleg=1, units=my.unit[[var.num]], colNA="gray", fileout=my.fileout) + + # + # same maps as above but removing points with frequency < 3%: + # + + windirFreq.wt <- apply(windirPeriod.wt, c(3,4), sum, na.rm=TRUE) / (n.years * n.days.in.a.period(p,1)) + + ss <- which(windirFreq.wt < 0.03) + pp <- which(windirFreq.wt >= 0.03) + + windirFreq.wt[ss] <- NA + windirFreq.wt[pp] <- 1 + + var.mean.wt2 <- var.mean.wt * windirFreq.wt + rm(ss,windirFreq.wt) + gc() + + # visualize impact map of the wd on var: + my.fileout <- paste0(workdir,"/windir/",rean.name,"_impact_min3percent_",var.name,"_",period.name[p],"_direction_",wd.dir[wd],".png") + + png(filename=my.fileout,width=900,height=600) + + layout(matrix(c(rep(1,10),2), 11, 1, byrow = TRUE)) + #PlotEquiMap(var.mean.wt, var.lon, var.lat, filled.continents = FALSE, brks=my.brks[[var.num]], cols=my.cols[[var.num]], drawleg=F, colNA="gray") + PlotEquiMap(rescale(var.mean.wt2[,lon.swapped],min(my.brks[[var.num]]),max(my.brks[[var.num]])), var.lon2, var.lat, filled.continents = FALSE, brks=my.brks[[var.num]], cols=my.cols[[var.num]], colNA="gray", drawleg=F) + ColorBar2(my.brks[[var.num]], cols=my.cols[[var.num]], vert=FALSE, cex=1, my.ticks=-0.5 + 1:length(my.brks[[var.num]]), my.labels=my.brks[[var.num]], label.dist=.5) + dev.off() + + # North Pole map: + #png(filename=paste0(workdir,"/windir/impact_polar_",var.name,"_",period.name[p],"_direction_",wd.dir[wd],".png")) + my.fileout <- paste0(workdir,"/windir/",rean.name,"_impact_min3percent_polar_",var.name,"_",period.name[p],"_direction_",wd.dir[wd],".png") + PlotStereoMap(rescale(var.mean.wt2,min(my.brks[[var.num]]),max(my.brks[[var.num]])), var.lon, var.lat, latlims = c(60,90), brks=my.brks[[var.num]], cols=my.cols[[var.num]], subsampleg=1, units=my.unit[[var.num]], colNA="gray", fileout=my.fileout) + + rm(var.mean.wt, var.mean.wt2) + + } # close for on wd + +} # close for on p + +save(impact.var, file=paste0(workdir,"/",rean.name,"_",var.name,"_impact.RData")) + + + + +# visualize and save the reconstructed monthly/seasonal anomalies with the WDs: + +load(paste0(workdir,"/",rean.name,"_windirFreqInter.RData")) +load(paste0(workdir,"/",rean.name,"_",var.name,"_impact.RData")) + +impact.total <- array(NA, c(length(periods), n.years, n.lat.var, n.lon.var)) # array where to store the interannual frequencies + +my.brks[[1]] <- seq(-10,10,1) # % Mean anomaly of a WT for temperature +my.cols[[1]] <- colorRampPalette(as.character(read.csv("~/scripts/palettes/rgbhex.csv",header=F)[,1]))(length(my.brks[[1]])-1) +my.unit[[1]] <- "degrees" + +my.brks[[2]] <- seq(-3,3,0.5) # Mean wind speed anomaly in m/s +my.cols[[2]] <- colorRampPalette(rev(brewer.pal(11,"RdBu")))(length(my.brks[[2]])-1) # blue--white--red colors for wind speed impact +my.unit[[2]] <- "m/s" + +for(p in periods){ + for(y in year.start:year.end){ + #p <- 10; y <- 2016 # for debugging + + #test <- impact.var[1,10,,]*windirFreqInter[1,10,32,,]+impact.var[2,10,,]*windirFreqInter[2,10,32,,]+impact.var[3,10,,]*windirFreqInter[3,10,32,,]+impact.var[4,10,,]*windirFreqInter[4,10,32,,]+impact.var[5,10,,]*windirFreqInter[5,10,32,,]+impact.var[6,10,,]*windirFreqInter[6,10,32,,]+impact.var[7,10,,]*windirFreqInter[7,10,32,,]+impact.var[8,10,,]*windirFreqInter[8,10,32,,] + + #PlotEquiMap(test[,lon.swapped], var.lon2, var.lat, filled.continents = FALSE, brks=my.brks[[var.num]], cols=my.cols[[var.num]], colNA="gray", drawleg=F) + # windirFreqInter[,10,32,66,507] + #test2 <- impact.var[3,10,,]*0.4516 # + impact.var[2,10,,]*0.225 + impact.var[8,10,,]*0.0967 + impact.var[4,10,,]*0.06451 + impact.var[6,10,,]*0.0645 + impact.var[1,10,,]*0.03225 + impact.var[5,10,,]*0.0645 + + #PlotEquiMap(test2[,lon.swapped], var.lon2, var.lat, filled.continents = FALSE, brks=my.brks[[var.num]], cols=my.cols[[var.num]], colNA="gray", drawleg=F) + + #test3 <- impact.var[3,10,,] #*windirFreqInter[3,10,32,,] + #PlotEquiMap(test3[,lon.swapped], var.lon2, var.lat, filled.continents = FALSE, brks=my.brks[[var.num]], cols=my.cols[[var.num]], colNA="gray", drawleg=F) + + #test4 <- impact.var[3,10,,] * windirFreqInter[3,10,32,,] + #PlotEquiMap(test4[,lon.swapped], var.lon2, var.lat, filled.continents = FALSE, brks=my.brks[[var.num]], cols=my.cols[[var.num]], colNA="gray", drawleg=F) + + y2 <- y - year.start + 1 + impact.weighted <- impact.var[,p,,] * windirFreqInter[,p,y2,,] + impact.total[p,y2,,] <- apply(impact.weighted, c(2,3), sum, na.rm=T) + + my.fileout <- paste0(workdir,"/windir/",rean.name,"_reconstructed_",var.name,"_anomalies_",period.name[p],"_year_",y,".png") + png(filename=my.fileout,width=900,height=600) + + layout(matrix(c(rep(1,10),2), 11, 1, byrow = TRUE)) + PlotEquiMap(rescale(impact.total[p,y2,,lon.swapped],min(my.brks[[var.num]]),max(my.brks[[var.num]])), var.lon2, var.lat, filled.continents = FALSE, brks=my.brks[[var.num]], cols=my.cols[[var.num]], colNA="gray", drawleg=F) + ColorBar2(my.brks[[var.num]], cols=my.cols[[var.num]], vert=FALSE, cex=1, my.ticks=-0.5 + 1:length(my.brks[[var.num]]), my.labels=my.brks[[var.num]], label.dist=.5) + dev.off() + + for(wd in 1:n.wd){ + my.fileout <- paste0(workdir,"/windir/",rean.name,"_weighted_impact_",var.name,"_",period.name[p],"_year_",y,"_direction_",wd.dir[wd],".png") + png(filename=my.fileout,width=900,height=600) + + layout(matrix(c(rep(1,10),2), 11, 1, byrow = TRUE)) + PlotEquiMap(rescale(impact.weighted[wd,,lon.swapped],min(my.brks[[var.num]]),max(my.brks[[var.num]])), var.lon2, var.lat, filled.continents = FALSE, brks=my.brks[[var.num]], cols=my.cols[[var.num]], colNA="gray", drawleg=F) + ColorBar2(my.brks[[var.num]], cols=my.cols[[var.num]], vert=FALSE, cex=1, my.ticks=-0.5 + 1:length(my.brks[[var.num]]), my.labels=my.brks[[var.num]], label.dist=.5) + dev.off() + + } + + } # close for on y +} # close for on p + +save(impact.total, file=paste0(workdir,"/",rean.name,"_",var.name,"_reconstructed_anomalies.RData")) + + + + + + + + + + + + + + + + + + + + + + +# Plot WTs frequency maps: + +#load(paste0(workdir,"/",rean.name,"_windirClass.RData")) +#windirClass <- [wd,1,1,year,1:365,lat,lon] + +# load lat and lon from SLP grid: +load(paste0(workdir,"/txt/",rean.name,"/metadata.RData")) # load lat.used and lon.used + +# load daily WT classification for all years and grid points and with the same format of var365Anom: +load(paste0(workdir,"/txt/",rean.name,"/WTs.RData")) + +WTs.code <- sort(unique(as.vector(WTs[1,1,,]))) +n.wt <- length(WTs.code) +WTs.name <- WTs.type[WTs.code] +wt.num <- c(1:9,NA,NA,NA,NA,NA,NA,NA,NA,10) + +windirFreqInter <- array(NA, c(n.wt, length(periods), n.years, n.lat.var, n.lon.var)) # array where to store the interannual frequencies + +# save weather type mean frequency maps: +for(p in periods){ + # select weather type data only inside period p: + windirPeriod <- WTs[,pos.period(1,p),,] + + for(wt in WTs.code){ + + # remove from windPeriod the days not belonging to that wd (setting its value to NA): + windirPeriod.wt <- array(NA, dim(windirPeriod)) #windirPeriod + + ss <- which(windirPeriod == wt) + #pp <- which(windirPeriod != wt) + + windirPeriod.wt[ss] <- 1 # set to 1 the days belonging to that wt + #windirPeriod.wt[pp] <- NA # set to NA the days not belonging to that wt + rm(ss) + gc() + + # mean frequency maps: + windirDays <- apply(windirPeriod.wt, c(3,4), sum, na.rm=TRUE) + windirFreq <- windirDays / (n.years * n.days.in.a.period(p,1)) + + my.fileout <- paste0(workdir,"/txt/",rean.name,"/",rean.name,"_frequency_",period.name[p],"_type_",WTs.name[wt.num[wt]],".png") + + png(filename=my.fileout,width=900,height=600) + + layout(matrix(c(rep(1,10),2), 11, 1, byrow = TRUE)) + + windirFreq[pos.lat.unused,] <- NA + PlotEquiMap(windirFreq[,lon.swapped], var.lon2, var.lat, filled.continents = FALSE, brks=my.brks.freq, cols=my.cols.freq, colNA="gray", drawleg=F) + + ColorBar2(my.brks.freq, cols=my.cols.freq, vert=FALSE, cex=1, my.ticks=-0.5 + 1:length(my.brks.freq), my.labels=100*my.brks.freq, label.dist=.5) + dev.off() + + # measure the interannual frequency series of that wind direction (for each grid point): + windirFreqInter[wt.num[wt],p,,,] <- apply(windirPeriod.wt,c(1,3,4), sum, na.rm=TRUE) / n.days.in.a.period(p,1) + #windirPeriod.wt[year,days,,] + + # plot the frequency maps for each year: + ## for(y in year.start:year.end){ + ## y2 <- y - year.start + 1 + ## my.fileout <- paste0(workdir,"/txt/",rean.name,"/frequency/",rean.name,"_frequency_",period.name[p],"_year_",y,"_type_",WTs.name[wt],".png") + + ## png(filename=my.fileout,width=900,height=600) + ## layout(matrix(c(rep(1,10),2), 11, 1, byrow = TRUE)) + + ## windirFreqInter[wt.num[wt],p,y2,pos.lat.unused,] <- NA + ## PlotEquiMap(windirFreqInter[wt.num[wt],p,y2,,lon.swapped], var.lon2, var.lat, filled.continents = FALSE, brks=my.brks.freq, cols=my.cols.freq, colNA="gray", drawleg=F) + ## ColorBar2(my.brks.freq, cols=my.cols.freq, vert=FALSE, cex=1, my.ticks=-0.5 + 1:length(my.brks.freq), my.labels=100*my.brks.freq, label.dist=.5) + ## dev.off() + + ## } # close for on y + + } # close for on wt + +} # close for on p + +save(windirFreqInter, file=paste0(workdir,"/txt/",rean.name,"/",rean.name,"_WTsFreqInter.RData")) + + + + + + + + + + + + + + + + + + + +# save impact map of wind direction on var (average of var only during the days belonging to a particular wind direction): + +# load var anomalies: +load(file=paste0(workdir,"/txt/",rean.name,"/",rean.name,"_",var.name,"365Anom.RData")) + +# load lat and lon from SLP grid: +load(paste0(workdir,"/txt/",rean.name,"/metadata.RData")) # load lat.used and lon.used + +# only for compatibility with older versions (it should be already loaded): +#lat <- round(MSLP$lat,3) +#lon <- round(MSLP$lon,3) + +# load daily WT classification for all years and grid points and with the same format of var365Anom: +load(paste0(workdir,"/txt/",rean.name,"/WTs.RData")) + +WTs.code <- sort(unique(as.vector(WTs[1,1,,]))) +n.WTs <- length(WTs.code) +WTs.name <- WTs.type[WTs.code] +WTs.num <- c(1:9,NA,NA,NA,NA,NA,NA,NA,NA,10) + +## Load MDE: +mde <- nc_open("/home/Earth/ncortesi/scripts/dem/elevation_512x256.nc") + +my.brks[[1]] <- seq(-10,10,1) # % Mean anomaly of a WT for temperature +my.cols[[1]] <- colorRampPalette(as.character(read.csv("~/scripts/palettes/rgbhex.csv",header=F)[,1]))(length(my.brks[[1]])-1) # blue--yellow-red colors for temperature +my.unit[[1]] <- "degrees" + +my.brks[[2]] <- seq(-3,3,0.5) # Mean wind speed anomaly in m/s +my.cols[[2]] <- colorRampPalette(rev(brewer.pal(11,"RdBu")))(length(my.brks[[2]])-1) # blue--white--red colors for wind speed impact +my.unit[[2]] <- "m/s" + +impact.var <- array(NA, c(n.WTs, length(periods), n.lat.var, n.lon.var)) # array where to save the impact of each wd on var + +for(p in periods){ + # select var data only inside period p: + varPeriod <- var365Anom[,pos.period(1,p),,] + + # select wind direction data only inside period p: + windirPeriod <- WTs[,pos.period(1,p),,] + + print(paste0("Period: ",period[p],)) + + for(wt in WTs.code){ + + # remove from varPeriod the days not belonging to that wd (setting its value to NA): + windirPeriod.wt <- array(NA, dim(windirPeriod)) + + ss <- which(windirPeriod == wt) + #pp <- which(windirPeriod != wt) + + windirPeriod.wt[ss] <- 1 # set to 1 the days belonging to that wt + #windirPeriod.wt[pp] <- NA # set to NA the days not belonging to that wt + rm(ss) + gc() + + varPeriod.wt <- varPeriod * windirPeriod.wt + var.mean.wt <- apply(varPeriod.wt, c(3,4), mean, na.rm=TRUE) + impact.var[wt.num[wt],p,,] <- var.mean.wt + + # visualize impact map of the wd on var: + my.fileout <- paste0(workdir,"/txt/",rean.name,"/",rean.name,"_impact_",var.name,"_",period.name[p],"_type_",WTs.name[WTs.num[wt]],".png") + + png(filename=my.fileout,width=900,height=600) + + layout(matrix(c(rep(1,10),2), 11, 1, byrow = TRUE)) + #PlotEquiMap(var.mean.wt, var.lon, var.lat, filled.continents = FALSE, brks=my.brks[[var.num]], cols=my.cols[[var.num]], drawleg=F, colNA="gray") + + var.mean.wt[pos.lat.unused,] <- NA + PlotEquiMap(rescale(var.mean.wt[,lon.swapped],min(my.brks[[var.num]]),max(my.brks[[var.num]])), var.lon2, var.lat, filled.continents = FALSE, brks=my.brks[[var.num]], cols=my.cols[[var.num]], colNA="gray", drawleg=F) + + ColorBar2(my.brks[[var.num]], cols=my.cols[[var.num]], vert=FALSE, cex=1, my.ticks=-0.5 + 1:length(my.brks[[var.num]]), my.labels=my.brks[[var.num]], label.dist=.5) + dev.off() + + ## windirFreq.wt <- apply(windirPeriod.wt, c(3,4), sum, na.rm=TRUE) / (n.years * n.days.in.a.period(p,1)) + + ## ss <- which(windirFreq.wt < 0.03) + ## pp <- which(windirFreq.wt >= 0.03) + + ## windirFreq.wt[ss] <- NA + ## windirFreq.wt[pp] <- 1 + + ## var.mean.wt2 <- var.mean.wt * windirFreq.wt + + ## rm(ss,windirFreq.wt) + ## gc() + rm(varPeriod.wt, windirPeriod.wt) + + } # close for on wd + + rm(varPeriod, windirPeriod) + gc() +} # close for on p + +save(impact.var, file=paste0(workdir,"/txt/",rean.name,"/",rean.name,"_",var.name,"_impact_WTs.RData")) + + + + + + + + + +# visualize and save the reconstructed monthly/seasonal anomalies with the WTs: + +# load daily WT classification for all years and grid points and with the same format of var365Anom: +load(paste0(workdir,"/txt/",rean.name,"/",rean.name,"_WTsFreqInter.RData")) +load(paste0(workdir,"/txt",rean.name,"/",rean.name,"_",var.name,"_impact_WTs.RData")) + +impact.total <- array(NA, c(length(periods), n.years, n.lat.var, n.lon.var)) # array where to store the interannual frequencies + +my.brks[[1]] <- seq(-10,10,1) # % Mean anomaly of a WT for temperature +my.cols[[1]] <- colorRampPalette(as.character(read.csv("~/scripts/palettes/rgbhex.csv",header=F)[,1]))(length(my.brks[[1]])-1) +my.unit[[1]] <- "degrees" + +my.brks[[2]] <- seq(-3,3,0.5) # Mean wind speed anomaly in m/s +my.cols[[2]] <- colorRampPalette(rev(brewer.pal(11,"RdBu")))(length(my.brks[[2]])-1) # blue--white--red colors for wind speed impact +my.unit[[2]] <- "m/s" + +for(p in periods){ + for(y in year.start:year.end){ + #p <- 10; y <- 2016 # for debugging + + #test <- impact.var[1,10,,]*windirFreqInter[1,10,32,,]+impact.var[2,10,,]*windirFreqInter[2,10,32,,]+impact.var[3,10,,]*windirFreqInter[3,10,32,,]+impact.var[4,10,,]*windirFreqInter[4,10,32,,]+impact.var[5,10,,]*windirFreqInter[5,10,32,,]+impact.var[6,10,,]*windirFreqInter[6,10,32,,]+impact.var[7,10,,]*windirFreqInter[7,10,32,,]+impact.var[8,10,,]*windirFreqInter[8,10,32,,] + + #PlotEquiMap(test[,lon.swapped], var.lon2, var.lat, filled.continents = FALSE, brks=my.brks[[var.num]], cols=my.cols[[var.num]], colNA="gray", drawleg=F) + # windirFreqInter[,10,32,66,507] + #test2 <- impact.var[3,10,,]*0.4516 # + impact.var[2,10,,]*0.225 + impact.var[8,10,,]*0.0967 + impact.var[4,10,,]*0.06451 + impact.var[6,10,,]*0.0645 + impact.var[1,10,,]*0.03225 + impact.var[5,10,,]*0.0645 + + #PlotEquiMap(test2[,lon.swapped], var.lon2, var.lat, filled.continents = FALSE, brks=my.brks[[var.num]], cols=my.cols[[var.num]], colNA="gray", drawleg=F) + + #test3 <- impact.var[3,10,,] #*windirFreqInter[3,10,32,,] + #PlotEquiMap(test3[,lon.swapped], var.lon2, var.lat, filled.continents = FALSE, brks=my.brks[[var.num]], cols=my.cols[[var.num]], colNA="gray", drawleg=F) + + #test4 <- impact.var[3,10,,] * windirFreqInter[3,10,32,,] + #PlotEquiMap(test4[,lon.swapped], var.lon2, var.lat, filled.continents = FALSE, brks=my.brks[[var.num]], cols=my.cols[[var.num]], colNA="gray", drawleg=F) + + y2 <- y - year.start + 1 + impact.weighted <- impact.var[,p,,] * windirFreqInter[,p,y2,,] + impact.total[p,y2,,] <- apply(impact.weighted, c(2,3), sum, na.rm=T) + + my.fileout <- paste0(workdir,"/txt/",rean.name,"/reconstructed/reconstructed_",var.name,"_anomalies_",period.name[p],"_year_",y,".png") + png(filename=my.fileout,width=900,height=600) + + layout(matrix(c(rep(1,10),2), 11, 1, byrow = TRUE)) + + impact.total[p,y2,pos.lat.unused,] <- NA + + PlotEquiMap(impact.total[p,y2,,lon.swapped],min(my.brks[[var.num]]),max(my.brks[[var.num]], var.lon2, var.lat, filled.continents = FALSE, brks=my.brks[[var.num]], cols=my.cols[[var.num]], colNA="gray", drawleg=F) + #ColorBar2(my.brks[[var.num]], cols=my.cols[[var.num]], vert=FALSE, cex=1, my.ticks=-0.5 + 1:length(my.brks[[var.num]]), my.labels=my.brks[[var.num]], label.dist=.5) + ColorBar(my.brks[[var.num]], cols=my.cols[[var.num]], vert=FALSE, cex=1, my.ticks=-0.5 + 1:length(my.brks[[var.num]]), my.labels=my.brks[[var.num]], label.dist=.5) + + dev.off() + + for(wd in 1:n.wd){ + my.fileout <- paste0(workdir,"/windir/",rean.name,"_weighted_impact_",var.name,"_",period.name[p],"_year_",y,"_type_",WTs.name[wt],".png") + + png(filename=my.fileout,width=900,height=600) + + layout(matrix(c(rep(1,10),2), 11, 1, byrow = TRUE)) + impact.weighted[wt,pos.lat.unused,] <- NA + + PlotEquiMap(rescale(impact.weighted[wt,,lon.swapped],min(my.brks[[var.num]]),max(my.brks[[var.num]])), var.lon2, var.lat, filled.continents = FALSE, brks=my.brks[[var.num]], cols=my.cols[[var.num]], colNA="gray", drawleg=F) + ColorBar2(my.brks[[var.num]], cols=my.cols[[var.num]], vert=FALSE, cex=1, my.ticks=-0.5 + 1:length(my.brks[[var.num]]), my.labels=my.brks[[var.num]], label.dist=.5) + dev.off() + } + + } # close for on y +} # close for on p + +save(impact.total, file=paste0(workdir,"/",rean.name,"_",var.name,"_reconstructed_anomalies.RData")) + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +# Create and save monthly/seasonal/yearly climatology maps of var: + +#p=17 # for the debug + +# Map intervals and colors: +my.brks[[1]] <- c(-50,-40,-30,seq(-20,30,0.05),40,50) # Surface Temperature Climatology +my.brks[[2]] <- c(0,seq(0.05,10,0.05),15) # Precipitation Climatology +my.brks[[3]] <- c(0,seq(1.05,10,0.05),15) # Wind speed Climatology + +my.cols[[1]] <- colorRampPalette(as.character(read.csv("/home/Earth/vtorralb/rgbhex.csv",header=F)[,1]))(length(my.brks[[1]])-1) # blue-yellow-red colors +my.cols[[2]] <- colorRampPalette(c("white","gray","cyan2","blue","deeppink4"))(length(my.brks[[1]])-1) # white-gray-blue-purple colores for wind speed + +n.lonr <- n.lon.var - ceiling(length(var.lon[var.lon>0 & var.lon < 180])) # count the num of lon values between 180 and 360 degrees +lon.swapped <- c((n.lonr+1):n.lon.var,1:n.lonr) # lon is always positive but rearranged +var.lon2 <- c(var.lon[c((n.lonr+1):n.lon.var)]-360,var.lon[1:n.lonr]) # to have lon values from -180 to 180 + +mod.var <- 0 +if(var.num == 1) mod.var <- -273.5 # for temperature, you must remove -273 after var.clim.NA.bis[p,,]: + +for(p in periods){ + # Select only days of the chosen month/season: + varPeriod <- var365[,pos.period(1,p),,,drop=FALSE] + varPeriodMean <- apply(varPeriod,c(3,4),mean,na.rm=TRUE) + + png(filename=paste0(workdir,"/",var.name,"_Climatology_",period.name[p],".png"),width=1000,height=700) + + layout(matrix(c(rep(1,10),2), 11, 1, byrow = TRUE)) + + #PlotEquiMap(varPeriodMean+mod.var, var.lon, var.lat, filled.continents = FALSE, brks=my.brks[[var.num]], cols=my.cols[[var.num]], drawleg=F, colNA="gray") + PlotEquiMap(varPeriodMean[,lon.swapped]+mod.num, var.lon2, var.lat, filled.continents = FALSE, brks=my.brks[[var.num]], cols=my.cols[[var.num]], drawleg=F) + ColorBar(my.brks[[var.num]], cols=my.cols[[var.num]], vert=FALSE, subsampleg=20, cex=1) + + dev.off() +} + + + + + + + + + +} # close test diff --git a/WT_drivers_v9.R b/WT_drivers_v9.R new file mode 100644 index 0000000000000000000000000000000000000000..9c1ba0beae8985dd2f0a55e47d09e1c7cced825d --- /dev/null +++ b/WT_drivers_v9.R @@ -0,0 +1,808 @@ +########################################################################################## +# Relationship between WTs and a chosen climate variable # +########################################################################################## + +rm(list=ls()) +library(s2dverification) # for the function Load() +library(abind) # for funcion abind() +source('/home/Earth/ncortesi/scripts/Rfunctions.R') # for the calendar functions + +# directory where to find the WT classification +workdir <- "/scratch/Earth/ncortesi/RESILIENCE/WT" + +# reanalysis used for daily var data (must be the same as the mslp data): +var.rean <- list(path = '/esnas/recon/ecmwf/erainterim/$STORE_FREQ$_mean/$VAR_NAME$_f6h/$VAR_NAME$_$YEAR$$MONTH$.nc') +#var.rean <- list(path = '/esnas/recon/noaa/ncep-reanalysis/$STORE_FREQ$_mean/$VAR_NAME$_f6h/$VAR_NAME$_$YEAR$$MONTH$.nc') +rean.name <- 'erai' #'ncep' #'erai' + +# any daily variable: +var.name='tas' #'sfcWind' #'prlr' +#var.name.file='Temperature' #'10-m Wind Speed' #'Precipitation' #'Surface_Temperature' # '10-m_Wind_Speed' # variable name used in the ouput map's title + +year.start <- 1985 +year.end <- 2014 + +periods=1:17 # identify monthly (from 1=Jan to 12=Dec) , seasonal (from 13=winter to 16=Autumn) and yearly=17 values + +LOESS <- FALSE # LOESS climatology filter on/off (if off, a 5-days mobile windows is usedto measure daily anomalies) + +rm.lat <- c(-12,12) # in case of WTs, remove these lat values from the maps + +########################################################################################## +n.periods <- length(periods) +n.years <- year.end - year.start + 1 + +my.brks <- my.cols <- my.labels <- my.unit <- list() +my.brks.freq <- c(0,0.05,seq(0.1,0.7,0.1),1) # Frequency of a WT or WD +my.cols.freq <- colorRampPalette(c('#f7fbff','#deebf7','#c6dbef','#9ecae1','#6baed6','#4292c6','#2171b5','#08519c','#08306b'))(length(my.brks.freq)-1) +my.unit.freq <- "%" + +if(var.name == 'tas') var.num <- 1 +if(var.name == 'sfcWind') var.num <- 2 +if(var.name == 'prlr') var.num <- 3 + +WTs.type <- c("NE","E","SE","S","SW","W","NW","N","C","C.NE","C.E","C.SE","C.S","C.SW","C.W","C.NW","C.N","A","A.NE","A.E","A.SE","A.S","A.SW","A.W","A.NW","A.N") +WTs.type10<-c("NE","E","SE","S","SW","W","NW","N","C","A") +WTs.type10.name<-c("Northeasterly (NE)","Easterly (E)","Southeasterly (SE)","Southerly (S)","Southwesterly (SW)","Westerly (W)","Northwesterly (NW)","Northerly (N)","Cyclonic (C)","Anticyclonic (A)") +wd.dir <- c("N","NE","E","SE","S","SW","W","NW") +n.wd <- length(wd.dir) + +# load one day of var data only to detect lat and lon values: +var <- Load(var.name, NULL, list(var.rean), paste0(year.start,'0101'), storefreq = 'daily', leadtimemax=1, output = 'lonlat', nprocs=1) +var.lat <- var$lat # var lat and lon MUST be the same of WT classification, even if the latter can have NA for certain lat values. +var.lon <- var$lon +n.lat.var <- length(var.lat) +n.lon.var <- length(var.lon) +n.lonr <- n.lon.var - ceiling(length(var.lon[var.lon>0 & var.lon < 180])) # count the num of lon values between 180 and 360 degrees +lon.swapped <- c((n.lonr+1):n.lon.var,1:n.lonr) # lon is always positive but rearranged +var.lon2 <- c(var.lon[c((n.lonr+1):n.lon.var)]-360,var.lon[1:n.lonr]) # to have lon values from -180 to 180 + +l1 <- which(var.lat > rm.lat[1]) # lat values greater than -12 +l2 <- which(var.lat < rm.lat[2]) # lat values lower than 12 +pos.lat.unused <- which(!is.na(match(l1,l2))) +var.lat.unused <- var.lat[pos.lat.unused] # latitude values not visualized in the WT classification because too close to equator + +########################################################################################## + +# save var anomalies: + +# load WT metadata to get the info on year.start and year.end +#WTs_files <- list.files(paste0(workdir,"/txt/ERAint/all_years")) +#WTs_file1 <- read.table(file=paste0(workdir,"/txt/ERAint/all_years/",WTs_files[1]), header=TRUE) +#load(paste0(workdir,"/txt/ERAint/metadata.RData")) + +# load daily var data for all years: +var <- Load(var.name, NULL, list(var.rean), paste0(year.start:year.end,'0101'), storefreq = 'daily', leadtimemax=366, output = 'lonlat', nprocs=8) + +# remove bisestile days from var to have all arrays with the same dimensions: +cat("Removing bisestile days. Please wait...\n") +var365 <- var$obs[,,,1:365,,,drop=FALSE] + +for(y in year.start:year.end){ + y2 <- y - year.start + 1 + if(n.days.in.a.year(y) == 366) var365[,,y2,60:365,,] <- var$obs[,,y2,61:366,,] # take the march to december period removing the 29th of February +} + +rm(var) +gc() + +# convert var to daily anomalies: +cat("Calculating anomalies. Please wait...\n") + +if(LOESS == TRUE){ + var365ClimDaily <- apply(var365, c(1,2,4,5,6), mean, na.rm=T) + var365ClimLoess <- var365ClimDaily + + for(i in 1:n.lat.var){ + for(j in 1:n.lon.var){ + my.data <- data.frame(ens.mean=var365ClimDaily[1,1,,i,j], day=1:365) + my.loess <- loess(ens.mean ~ day, my.data, span=0.35) + var365ClimLoess[1,1,,i,j] <- predict(my.loess) + } + } + + rm(var365ClimDaily, my.data, my.loess) + gc() + + var365Clim <- InsertDim(var365ClimLoess, 3, year.end-year.start+1) + + rm(var365ClimLoess) + gc() + +} else { # apply a 5-days mobile window: + + #var365Clim <- InsertDim(var365ClimDaily, 3, n.years) + var365Clim <- array(NA,c(365,dim(var365)[5:6])) + + var365x3 <- abind(var365[,,,364:365,,,drop=FALSE],var365[,,,,,,drop=FALSE],var365[,,,1:2,,,drop=FALSE],along=4) # add the two days before 1st january and after 31 dec + + for(d in 1:365){ + window <- var365x3[1,1,,2+d+seq(-2,2),,,drop=FALSE] + var365Clim[d,,] <- apply(window,c(5,6),mean,na.rm=TRUE) + } + + rm(var365x3, window) + gc() + + var365Clim <- InsertDim(var365Clim, 1, n.years) + +} + + +var365Anom <- var365[1,1,,,,] - var365Clim + +rm(var365Clim) +gc() + +rm(var365) +gc() + +# save var anomalies for retreiving them when necessary: +save(var365Anom, file=paste0(workdir,"/",rean.name,"_",var.name,"365Anom.RData")) + + + + +if(LOESS == 'test'){ + +# Compute wind directions: +uas <- Load(var = 'uas', exp = NULL, obs = list(var.rean), paste0(year.start:year.end,'0101'), storefreq = 'daily', leadtimemax=366, output = 'lonlat', nprocs=8)$obs +vas <- Load(var = 'vas', exp = NULL, obs = list(var.rean), paste0(year.start:year.end,'0101'), storefreq = 'daily', leadtimemax=366, output = 'lonlat', nprocs=8)$obs + +windir <- (180/pi)*atan2(uas,vas) + 180 # wind direction in degrees; 90 is necessary to shift from trigonometric system to cardinal system and +180 to shift + # from the direction wind is blowing to the direction wind is coming from +rm(uas, vas) +gc() + +# remove bisestile days from windir, to compare it with var, which has no bisestiles: +cat("Removing bisestile days. Please wait...\n") +windir365 <- windir[,,,1:365,,,drop=FALSE] + +for(y in year.start:year.end){ + y2 <- y - year.start + 1 + if(n.days.in.a.year(y) == 366) windir365[,,y2,60:365,,] <- windir[,,y2,61:366,,] # take the march to december period removing the 29th of February +} + + +rm(windir) +gc() + +dir2 <- which(windir365 > 22.5) +dir3 <- which(windir365 > 22.5 + 45) +dir4 <- which(windir365 > 22.5 + 90) +dir5 <- which(windir365 > 22.5 + 135) +dir6 <- which(windir365 > 22.5 + 180) +dir7 <- which(windir365 > 22.5 + 225) +dir8 <- which(windir365 > 22.5 + 270) +dir1 <- which(windir365 > 22.5 + 315) + +windirClass <- array(1, dim(windir365)) # N +windirClass[dir2] <- 2 # NE +windirClass[dir3] <- 3 # E +windirClass[dir4] <- 4 # SE +windirClass[dir5] <- 5 # S +windirClass[dir6] <- 6 # SW +windirClass[dir7] <- 7 # W +windirClass[dir8] <- 8 # NW +windirClass[dir1] <- 1 # N + +rm(windir365) +rm(dir1,dir2,dir3,dir4,dir5,dir6,dir7,dir8) +gc() + +# save it once to retreive it later: +save(windirClass, file=paste0(workdir,"/",rean.name,"_windirClass.RData")) + + + + + +# Plot frequency maps: + +load(paste0(workdir,"/",rean.name,"_windirClass.RData")) + +windirFreqInter <- array(NA, c(n.wd, length(periods), n.years, n.lat.var, n.lon.var)) # array where to store the interannual frequencies + +dir.create(paste0(workdir,"/windir")) + +# save wind direction mean frequency maps: +for(p in periods){ + # select wind direction data only inside period p: + windirPeriod <- windirClass[1,1,,pos.period(1,p),,] + + for(wd in 1:8){ + + # remove from windPeriod the days not belonging to that wd (setting its value to NA): + windirPeriod.wt <- windirPeriod + + ss <- which(windirPeriod == wd) + pp <- which(windirPeriod != wd) + + windirPeriod.wt[ss] <- 1 # set to 1 the days belonging to that wt + windirPeriod.wt[pp] <- NA # set to NA the days not belonging to that wt + rm(ss,pp) + gc() + + # mean frequency maps: + windirDays <- apply(windirPeriod.wt, c(3,4), sum, na.rm=TRUE) + windirFreq <- windirDays / (n.years * n.days.in.a.period(p,1)) + + my.fileout <- paste0(workdir,"/windir/",rean.name,"_frequency_",period.name[p],"_direction_",wd.dir[wd],".png") + + png(filename=my.fileout,width=900,height=600) + + layout(matrix(c(rep(1,10),2), 11, 1, byrow = TRUE)) + PlotEquiMap(windirFreq[,lon.swapped], var.lon2, var.lat, filled.continents = FALSE, brks=my.brks.freq, cols=my.cols.freq, colNA="gray", drawleg=F) + ColorBar2(my.brks.freq, cols=my.cols.freq, vert=FALSE, cex=1, my.ticks=-0.5 + 1:length(my.brks.freq), my.labels=100*my.brks.freq, label.dist=.5) + dev.off() + + # mean frequency maps for North Pole: + my.fileout <- paste0(workdir,"/windir/",rean.name,"_frequency_polar_",period.name[p],"_direction_",wd.dir[wd],".png") + PlotStereoMap(windirFreq, var.lon, var.lat, latlims = c(60,90), brks=my.brks.freq, cols=my.cols.freq, subsampleg=1, units=my.unit.freq, colNA="gray", fileout=my.fileout) + + # measure the interannual frequency series of that wind direction (for each grid point): + windirFreqInter[wd,p,,,] <- apply(windirPeriod.wt,c(1,3,4), sum, na.rm=TRUE) / n.days.in.a.period(p,1) + #windirPeriod.wt[year,days,,] + + # plot the frequency maps for each year: + for(y in year.start:year.end){ + y2 <- y - year.start + 1 + my.fileout <- paste0(workdir,"/windir/",rean.name,"_frequency_",period.name[p],"_year_",y,"_direction_",wd.dir[wd],".png") + png(filename=my.fileout,width=900,height=600) + layout(matrix(c(rep(1,10),2), 11, 1, byrow = TRUE)) + PlotEquiMap(windirFreqInter[wd,p,y2,,lon.swapped], var.lon2, var.lat, filled.continents = FALSE, brks=my.brks.freq, cols=my.cols.freq, colNA="gray", drawleg=F) + ColorBar2(my.brks.freq, cols=my.cols.freq, vert=FALSE, cex=1, my.ticks=-0.5 + 1:length(my.brks.freq), my.labels=100*my.brks.freq, label.dist=.5) + dev.off() + + } # close for on y + + } # close for on wd + +} # close for on p + +save(windirFreqInter, file=paste0(workdir,"/",rean.name,"_windirFreqInter.RData")) + + + + + + + +# save impact map of wind direction on var (average of var only during the days belonging to a particular wind direction): + +# load var anomalies and wind direction array: +load(file=paste0(workdir,"/",rean.name,"_",var.name,"365Anom.RData")) +load(paste0(workdir,"/",rean.name,"_windirClass.RData")) + +my.brks[[1]] <- seq(-10,10,1) # % Mean anomaly of a WT for temperature +my.cols[[1]] <- colorRampPalette(as.character(read.csv("~/scripts/palettes/rgbhex.csv",header=F)[,1]))(length(my.brks[[1]])-1) # blue--yellow-red colors for temperature +my.unit[[1]] <- "degrees" + +my.brks[[2]] <- seq(-3,3,0.5) # Mean wind speed anomaly in m/s +my.cols[[2]] <- colorRampPalette(rev(brewer.pal(11,"RdBu")))(length(my.brks[[2]])-1) # blue--white--red colors for wind speed impact +my.unit[[2]] <- "m/s" + +impact.var <- array(NA, c(n.wd, length(periods), n.lat.var, n.lon.var)) # array where to save the impact of each wd on var + +for(p in periods){ + # select var data only inside period p: + varPeriod <- var365Anom[,pos.period(1,p),,] + + # select wind direction data only inside period p: + windirPeriod <- windirClass[1,1,,pos.period(1,p),,] + + for(wd in 1:8){ # 1:N, 2=NE, 3=E, ... + + # remove from varPeriod the days not belonging to that wd (setting its value to NA): + windirPeriod.wt <- array(NA, dim(windirPeriod)) + + ss <- which(windirPeriod == wd) + #pp <- which(windirPeriod != wd) + + windirPeriod.wt[ss] <- 1 # set to 1 the days belonging to that wt + #windirPeriod.wt[pp] <- NA # set to NA the days not belonging to that wt + rm(ss) + gc() + + varPeriod.wt <- varPeriod * windirPeriod.wt + + var.mean.wt <- apply(varPeriod.wt, c(3,4), mean, na.rm=TRUE) + + impact.var[wd,p,,] <- var.mean.wt + + # visualize impact map of the wd on var: + my.fileout <- paste0(workdir,"/windir/",rean.name,"_impact_",var.name,"_",period.name[p],"_direction_",wd.dir[wd],".png") + + png(filename=my.fileout,width=900,height=600) + + layout(matrix(c(rep(1,10),2), 11, 1, byrow = TRUE)) + #PlotEquiMap(var.mean.wt, var.lon, var.lat, filled.continents = FALSE, brks=my.brks[[var.num]], cols=my.cols[[var.num]], drawleg=F, colNA="gray") + PlotEquiMap(rescale(var.mean.wt[,lon.swapped],min(my.brks[[var.num]]),max(my.brks[[var.num]])), var.lon2, var.lat, filled.continents = FALSE, brks=my.brks[[var.num]], cols=my.cols[[var.num]], colNA="gray", drawleg=F) + ColorBar2(my.brks[[var.num]], cols=my.cols[[var.num]], vert=FALSE, cex=1, my.ticks=-0.5 + 1:length(my.brks[[var.num]]), my.labels=my.brks[[var.num]], label.dist=.5) + dev.off() + + + # North Pole map: + #png(filename=paste0(workdir,"/windir/impact_polar_",var.name,"_",period.name[p],"_direction_",wd.dir[wd],".png")) + my.fileout <- paste0(workdir,"/windir/",rean.name,"_impact_polar_",var.name,"_",period.name[p],"_direction_",wd.dir[wd],".png") + PlotStereoMap(rescale(var.mean.wt,min(my.brks[[var.num]]),max(my.brks[[var.num]])), var.lon, var.lat, latlims = c(60,90), brks=my.brks[[var.num]], cols=my.cols[[var.num]], subsampleg=1, units=my.unit[[var.num]], colNA="gray", fileout=my.fileout) + + # + # same maps as above but removing points with frequency < 3%: + # + + windirFreq.wt <- apply(windirPeriod.wt, c(3,4), sum, na.rm=TRUE) / (n.years * n.days.in.a.period(p,1)) + + ss <- which(windirFreq.wt < 0.03) + pp <- which(windirFreq.wt >= 0.03) + + windirFreq.wt[ss] <- NA + windirFreq.wt[pp] <- 1 + + var.mean.wt2 <- var.mean.wt * windirFreq.wt + rm(ss,windirFreq.wt) + gc() + + # visualize impact map of the wd on var: + my.fileout <- paste0(workdir,"/windir/",rean.name,"_impact_min3percent_",var.name,"_",period.name[p],"_direction_",wd.dir[wd],".png") + + png(filename=my.fileout,width=900,height=600) + + layout(matrix(c(rep(1,10),2), 11, 1, byrow = TRUE)) + #PlotEquiMap(var.mean.wt, var.lon, var.lat, filled.continents = FALSE, brks=my.brks[[var.num]], cols=my.cols[[var.num]], drawleg=F, colNA="gray") + PlotEquiMap(rescale(var.mean.wt2[,lon.swapped],min(my.brks[[var.num]]),max(my.brks[[var.num]])), var.lon2, var.lat, filled.continents = FALSE, brks=my.brks[[var.num]], cols=my.cols[[var.num]], colNA="gray", drawleg=F) + ColorBar2(my.brks[[var.num]], cols=my.cols[[var.num]], vert=FALSE, cex=1, my.ticks=-0.5 + 1:length(my.brks[[var.num]]), my.labels=my.brks[[var.num]], label.dist=.5) + dev.off() + + # North Pole map: + #png(filename=paste0(workdir,"/windir/impact_polar_",var.name,"_",period.name[p],"_direction_",wd.dir[wd],".png")) + my.fileout <- paste0(workdir,"/windir/",rean.name,"_impact_min3percent_polar_",var.name,"_",period.name[p],"_direction_",wd.dir[wd],".png") + PlotStereoMap(rescale(var.mean.wt2,min(my.brks[[var.num]]),max(my.brks[[var.num]])), var.lon, var.lat, latlims = c(60,90), brks=my.brks[[var.num]], cols=my.cols[[var.num]], subsampleg=1, units=my.unit[[var.num]], colNA="gray", fileout=my.fileout) + + rm(var.mean.wt, var.mean.wt2) + + } # close for on wd + +} # close for on p + +save(impact.var, file=paste0(workdir,"/",rean.name,"_",var.name,"_impact.RData")) + + + + +# visualize and save the reconstructed monthly/seasonal anomalies with the WDs: + +load(paste0(workdir,"/",rean.name,"_windirFreqInter.RData")) +load(paste0(workdir,"/",rean.name,"_",var.name,"_impact.RData")) + +impact.total <- array(NA, c(length(periods), n.years, n.lat.var, n.lon.var)) # array where to store the interannual frequencies + +my.brks[[1]] <- seq(-10,10,1) # % Mean anomaly of a WT for temperature +my.cols[[1]] <- colorRampPalette(as.character(read.csv("~/scripts/palettes/rgbhex.csv",header=F)[,1]))(length(my.brks[[1]])-1) +my.unit[[1]] <- "degrees" + +my.brks[[2]] <- seq(-3,3,0.5) # Mean wind speed anomaly in m/s +my.cols[[2]] <- colorRampPalette(rev(brewer.pal(11,"RdBu")))(length(my.brks[[2]])-1) # blue--white--red colors for wind speed impact +my.unit[[2]] <- "m/s" + +for(p in periods){ + for(y in year.start:year.end){ + #p <- 10; y <- 2016 # for debugging + + #test <- impact.var[1,10,,]*windirFreqInter[1,10,32,,]+impact.var[2,10,,]*windirFreqInter[2,10,32,,]+impact.var[3,10,,]*windirFreqInter[3,10,32,,]+impact.var[4,10,,]*windirFreqInter[4,10,32,,]+impact.var[5,10,,]*windirFreqInter[5,10,32,,]+impact.var[6,10,,]*windirFreqInter[6,10,32,,]+impact.var[7,10,,]*windirFreqInter[7,10,32,,]+impact.var[8,10,,]*windirFreqInter[8,10,32,,] + + #PlotEquiMap(test[,lon.swapped], var.lon2, var.lat, filled.continents = FALSE, brks=my.brks[[var.num]], cols=my.cols[[var.num]], colNA="gray", drawleg=F) + # windirFreqInter[,10,32,66,507] + #test2 <- impact.var[3,10,,]*0.4516 # + impact.var[2,10,,]*0.225 + impact.var[8,10,,]*0.0967 + impact.var[4,10,,]*0.06451 + impact.var[6,10,,]*0.0645 + impact.var[1,10,,]*0.03225 + impact.var[5,10,,]*0.0645 + + #PlotEquiMap(test2[,lon.swapped], var.lon2, var.lat, filled.continents = FALSE, brks=my.brks[[var.num]], cols=my.cols[[var.num]], colNA="gray", drawleg=F) + + #test3 <- impact.var[3,10,,] #*windirFreqInter[3,10,32,,] + #PlotEquiMap(test3[,lon.swapped], var.lon2, var.lat, filled.continents = FALSE, brks=my.brks[[var.num]], cols=my.cols[[var.num]], colNA="gray", drawleg=F) + + #test4 <- impact.var[3,10,,] * windirFreqInter[3,10,32,,] + #PlotEquiMap(test4[,lon.swapped], var.lon2, var.lat, filled.continents = FALSE, brks=my.brks[[var.num]], cols=my.cols[[var.num]], colNA="gray", drawleg=F) + + y2 <- y - year.start + 1 + impact.weighted <- impact.var[,p,,] * windirFreqInter[,p,y2,,] + impact.total[p,y2,,] <- apply(impact.weighted, c(2,3), sum, na.rm=T) + + my.fileout <- paste0(workdir,"/windir/",rean.name,"_reconstructed_",var.name,"_anomalies_",period.name[p],"_year_",y,".png") + png(filename=my.fileout,width=900,height=600) + + layout(matrix(c(rep(1,10),2), 11, 1, byrow = TRUE)) + PlotEquiMap(rescale(impact.total[p,y2,,lon.swapped],min(my.brks[[var.num]]),max(my.brks[[var.num]])), var.lon2, var.lat, filled.continents = FALSE, brks=my.brks[[var.num]], cols=my.cols[[var.num]], colNA="gray", drawleg=F) + ColorBar2(my.brks[[var.num]], cols=my.cols[[var.num]], vert=FALSE, cex=1, my.ticks=-0.5 + 1:length(my.brks[[var.num]]), my.labels=my.brks[[var.num]], label.dist=.5) + dev.off() + + for(wd in 1:n.wd){ + my.fileout <- paste0(workdir,"/windir/",rean.name,"_weighted_impact_",var.name,"_",period.name[p],"_year_",y,"_direction_",wd.dir[wd],".png") + png(filename=my.fileout,width=900,height=600) + + layout(matrix(c(rep(1,10),2), 11, 1, byrow = TRUE)) + PlotEquiMap(rescale(impact.weighted[wd,,lon.swapped],min(my.brks[[var.num]]),max(my.brks[[var.num]])), var.lon2, var.lat, filled.continents = FALSE, brks=my.brks[[var.num]], cols=my.cols[[var.num]], colNA="gray", drawleg=F) + ColorBar2(my.brks[[var.num]], cols=my.cols[[var.num]], vert=FALSE, cex=1, my.ticks=-0.5 + 1:length(my.brks[[var.num]]), my.labels=my.brks[[var.num]], label.dist=.5) + dev.off() + + } + + } # close for on y +} # close for on p + +save(impact.total, file=paste0(workdir,"/",rean.name,"_",var.name,"_reconstructed_anomalies.RData")) + + + + + + + + + + + + + + + + + + + + + + +# Plot WTs frequency maps: + +#load(paste0(workdir,"/",rean.name,"_windirClass.RData")) +#windirClass <- [wd,1,1,year,1:365,lat,lon] + +# load lat and lon from SLP grid: +load(paste0(workdir,"/txt/",rean.name,"/metadata.RData")) # load lat.used and lon.used + +# load daily WT classification for all years and grid points and with the same format of var365Anom: +load(paste0(workdir,"/txt/",rean.name,"/WTs.RData")) + +WTs.code <- sort(unique(as.vector(WTs[1,1,,]))) +n.wt <- length(WTs.code) +WTs.name <- WTs.type[WTs.code] +wt.num <- c(1:9,NA,NA,NA,NA,NA,NA,NA,NA,10) + +windirFreqInter <- array(NA, c(n.wt, length(periods), n.years, n.lat.var, n.lon.var)) # array where to store the interannual frequencies + +# save weather type mean frequency maps: +for(p in periods){ + # select weather type data only inside period p: + windirPeriod <- WTs[,pos.period(1,p),,] + + for(wt in WTs.code){ + + # remove from windPeriod the days not belonging to that wd (setting its value to NA): + windirPeriod.wt <- array(NA, dim(windirPeriod)) #windirPeriod + + ss <- which(windirPeriod == wt) + #pp <- which(windirPeriod != wt) + + windirPeriod.wt[ss] <- 1 # set to 1 the days belonging to that wt + #windirPeriod.wt[pp] <- NA # set to NA the days not belonging to that wt + rm(ss) + gc() + + # mean frequency maps: + windirDays <- apply(windirPeriod.wt, c(3,4), sum, na.rm=TRUE) + windirFreq <- windirDays / (n.years * n.days.in.a.period(p,1)) + + my.fileout <- paste0(workdir,"/txt/",rean.name,"/",rean.name,"_frequency_",period.name[p],"_type_",WTs.name[wt.num[wt]],".png") + + png(filename=my.fileout,width=900,height=600) + + layout(matrix(c(rep(1,10),2), 11, 1, byrow = TRUE)) + + windirFreq[pos.lat.unused,] <- NA + PlotEquiMap(windirFreq[,lon.swapped], var.lon2, var.lat, filled.continents = FALSE, brks=my.brks.freq, cols=my.cols.freq, colNA="gray", drawleg=F) + + ColorBar2(my.brks.freq, cols=my.cols.freq, vert=FALSE, cex=1, my.ticks=-0.5 + 1:length(my.brks.freq), my.labels=100*my.brks.freq, label.dist=.5) + dev.off() + + # measure the interannual frequency series of that wind direction (for each grid point): + windirFreqInter[wt.num[wt],p,,,] <- apply(windirPeriod.wt,c(1,3,4), sum, na.rm=TRUE) / n.days.in.a.period(p,1) + #windirPeriod.wt[year,days,,] + + # plot the frequency maps for each year: + ## for(y in year.start:year.end){ + ## y2 <- y - year.start + 1 + ## my.fileout <- paste0(workdir,"/txt/",rean.name,"/frequency/",rean.name,"_frequency_",period.name[p],"_year_",y,"_type_",WTs.name[wt],".png") + + ## png(filename=my.fileout,width=900,height=600) + ## layout(matrix(c(rep(1,10),2), 11, 1, byrow = TRUE)) + + ## windirFreqInter[wt.num[wt],p,y2,pos.lat.unused,] <- NA + ## PlotEquiMap(windirFreqInter[wt.num[wt],p,y2,,lon.swapped], var.lon2, var.lat, filled.continents = FALSE, brks=my.brks.freq, cols=my.cols.freq, colNA="gray", drawleg=F) + ## ColorBar2(my.brks.freq, cols=my.cols.freq, vert=FALSE, cex=1, my.ticks=-0.5 + 1:length(my.brks.freq), my.labels=100*my.brks.freq, label.dist=.5) + ## dev.off() + + ## } # close for on y + + } # close for on wt + +} # close for on p + +save(windirFreqInter, file=paste0(workdir,"/txt/",rean.name,"/",rean.name,"_WTsFreqInter.RData")) + + + + + + + + + + + + + + + + + + + +# save impact map of wind direction on var (average of var only during the days belonging to a particular wind direction): + +# load var anomalies: +load(file=paste0(workdir,"/txt/",rean.name,"/",rean.name,"_",var.name,"365Anom.RData")) + +# load lat and lon from SLP grid: +load(paste0(workdir,"/txt/",rean.name,"/metadata.RData")) # load lat.used and lon.used + +# only for compatibility with older versions (it should be already loaded): +#lat <- round(MSLP$lat,3) +#lon <- round(MSLP$lon,3) + +# load daily WT classification for all years and grid points and with the same format of var365Anom: +load(paste0(workdir,"/txt/",rean.name,"/WTs.RData")) + +WTs.code <- sort(unique(as.vector(WTs[1,1,,]))) +n.WTs <- length(WTs.code) +WTs.name <- WTs.type[WTs.code] +WTs.num <- c(1:9,NA,NA,NA,NA,NA,NA,NA,NA,10) + +## Load MDE: +#mde <- nc_open("/home/Earth/ncortesi/scripts/dem/elevation_512x256.nc") + +my.brks[[1]] <- seq(-10,10,1) # % Mean anomaly of a WT for temperature +my.cols[[1]] <- colorRampPalette(as.character(read.csv("~/scripts/palettes/rgbhex.csv",header=F)[,1]))(length(my.brks[[1]])-1) # blue--yellow-red colors for temperature +my.unit[[1]] <- "degrees" + +my.brks[[2]] <- seq(-3,3,0.5) # Mean wind speed anomaly in m/s +my.cols[[2]] <- colorRampPalette(rev(brewer.pal(11,"RdBu")))(length(my.brks[[2]])-1) # blue--white--red colors for wind speed impact +my.unit[[2]] <- "m/s" + +impact.var <- array(NA, c(n.WTs, length(periods), n.lat.var, n.lon.var)) # array where to save the impact of each wd on var + +for(p in periods){ + # select var data only inside period p: + varPeriod <- var365Anom[,pos.period(1,p),,] + + # select wind direction data only inside period p: + windirPeriod <- WTs[,pos.period(1,p),,] + + print(paste0("Period: ",period[p],)) + + for(wt in WTs.code){ + + # remove from varPeriod the days not belonging to that wd (setting its value to NA): + windirPeriod.wt <- array(NA, dim(windirPeriod)) + + ss <- which(windirPeriod == wt) + #pp <- which(windirPeriod != wt) + + windirPeriod.wt[ss] <- 1 # set to 1 the days belonging to that wt + #windirPeriod.wt[pp] <- NA # set to NA the days not belonging to that wt + rm(ss) + gc() + + varPeriod.wt <- varPeriod * windirPeriod.wt + var.mean.wt <- apply(varPeriod.wt, c(3,4), mean, na.rm=TRUE) + impact.var[wt.num[wt],p,,] <- var.mean.wt + + # visualize impact map of the wd on var: + my.fileout <- paste0(workdir,"/txt/",rean.name,"/",rean.name,"_impact_",var.name,"_",period.name[p],"_type_",WTs.name[WTs.num[wt]],".png") + + png(filename=my.fileout,width=900,height=600) + + layout(matrix(c(rep(1,10),2), 11, 1, byrow = TRUE)) + #PlotEquiMap(var.mean.wt, var.lon, var.lat, filled.continents = FALSE, brks=my.brks[[var.num]], cols=my.cols[[var.num]], drawleg=F, colNA="gray") + + var.mean.wt[pos.lat.unused,] <- NA + PlotEquiMap(rescale(var.mean.wt[,lon.swapped],min(my.brks[[var.num]]),max(my.brks[[var.num]])), var.lon2, var.lat, filled.continents = FALSE, brks=my.brks[[var.num]], cols=my.cols[[var.num]], colNA="gray", drawleg=F) + + ColorBar2(my.brks[[var.num]], cols=my.cols[[var.num]], vert=FALSE, cex=1, my.ticks=-0.5 + 1:length(my.brks[[var.num]]), my.labels=my.brks[[var.num]], label.dist=.5) + dev.off() + + ## windirFreq.wt <- apply(windirPeriod.wt, c(3,4), sum, na.rm=TRUE) / (n.years * n.days.in.a.period(p,1)) + + ## ss <- which(windirFreq.wt < 0.03) + ## pp <- which(windirFreq.wt >= 0.03) + + ## windirFreq.wt[ss] <- NA + ## windirFreq.wt[pp] <- 1 + + ## var.mean.wt2 <- var.mean.wt * windirFreq.wt + + ## rm(ss,windirFreq.wt) + ## gc() + rm(varPeriod.wt, windirPeriod.wt) + + } # close for on wd + + rm(varPeriod, windirPeriod) + gc() +} # close for on p + +save(impact.var, file=paste0(workdir,"/txt/",rean.name,"/",rean.name,"_",var.name,"_impact_WTs.RData")) + + + + + + + + + +# visualize and save the reconstructed monthly/seasonal anomalies with the WTs: + +# load daily WT classification for all years and grid points and with the same format of var365Anom: +load(paste0(workdir,"/txt/",rean.name,"/",rean.name,"_WTsFreqInter.RData")) +load(paste0(workdir,"/txt",rean.name,"/",rean.name,"_",var.name,"_impact_WTs.RData")) + +impact.total <- array(NA, c(length(periods), n.years, n.lat.var, n.lon.var)) # array where to store the interannual frequencies + +my.brks[[1]] <- seq(-10,10,1) # % Mean anomaly of a WT for temperature +my.cols[[1]] <- colorRampPalette(as.character(read.csv("~/scripts/palettes/rgbhex.csv",header=F)[,1]))(length(my.brks[[1]])-1) +my.unit[[1]] <- "degrees" + +my.brks[[2]] <- seq(-3,3,0.5) # Mean wind speed anomaly in m/s +my.cols[[2]] <- colorRampPalette(rev(brewer.pal(11,"RdBu")))(length(my.brks[[2]])-1) # blue--white--red colors for wind speed impact +my.unit[[2]] <- "m/s" + +for(p in periods){ + for(y in year.start:year.end){ + #p <- 10; y <- 2016 # for debugging + + #test <- impact.var[1,10,,]*windirFreqInter[1,10,32,,]+impact.var[2,10,,]*windirFreqInter[2,10,32,,]+impact.var[3,10,,]*windirFreqInter[3,10,32,,]+impact.var[4,10,,]*windirFreqInter[4,10,32,,]+impact.var[5,10,,]*windirFreqInter[5,10,32,,]+impact.var[6,10,,]*windirFreqInter[6,10,32,,]+impact.var[7,10,,]*windirFreqInter[7,10,32,,]+impact.var[8,10,,]*windirFreqInter[8,10,32,,] + + #PlotEquiMap(test[,lon.swapped], var.lon2, var.lat, filled.continents = FALSE, brks=my.brks[[var.num]], cols=my.cols[[var.num]], colNA="gray", drawleg=F) + # windirFreqInter[,10,32,66,507] + #test2 <- impact.var[3,10,,]*0.4516 # + impact.var[2,10,,]*0.225 + impact.var[8,10,,]*0.0967 + impact.var[4,10,,]*0.06451 + impact.var[6,10,,]*0.0645 + impact.var[1,10,,]*0.03225 + impact.var[5,10,,]*0.0645 + + #PlotEquiMap(test2[,lon.swapped], var.lon2, var.lat, filled.continents = FALSE, brks=my.brks[[var.num]], cols=my.cols[[var.num]], colNA="gray", drawleg=F) + + #test3 <- impact.var[3,10,,] #*windirFreqInter[3,10,32,,] + #PlotEquiMap(test3[,lon.swapped], var.lon2, var.lat, filled.continents = FALSE, brks=my.brks[[var.num]], cols=my.cols[[var.num]], colNA="gray", drawleg=F) + + #test4 <- impact.var[3,10,,] * windirFreqInter[3,10,32,,] + #PlotEquiMap(test4[,lon.swapped], var.lon2, var.lat, filled.continents = FALSE, brks=my.brks[[var.num]], cols=my.cols[[var.num]], colNA="gray", drawleg=F) + + y2 <- y - year.start + 1 + impact.weighted <- impact.var[,p,,] * windirFreqInter[,p,y2,,] + impact.total[p,y2,,] <- apply(impact.weighted, c(2,3), sum, na.rm=T) + + my.fileout <- paste0(workdir,"/txt/",rean.name,"/reconstructed/reconstructed_",var.name,"_anomalies_",period.name[p],"_year_",y,".png") + png(filename=my.fileout,width=900,height=600) + + layout(matrix(c(rep(1,10),2), 11, 1, byrow = TRUE)) + + impact.total[p,y2,pos.lat.unused,] <- NA + + PlotEquiMap(impact.total[p,y2,,lon.swapped],min(my.brks[[var.num]]),max(my.brks[[var.num]], var.lon2, var.lat, filled.continents = FALSE, brks=my.brks[[var.num]], cols=my.cols[[var.num]], colNA="gray", drawleg=F) + #ColorBar2(my.brks[[var.num]], cols=my.cols[[var.num]], vert=FALSE, cex=1, my.ticks=-0.5 + 1:length(my.brks[[var.num]]), my.labels=my.brks[[var.num]], label.dist=.5) + ColorBar(my.brks[[var.num]], cols=my.cols[[var.num]], vert=FALSE, cex=1, my.ticks=-0.5 + 1:length(my.brks[[var.num]]), my.labels=my.brks[[var.num]], label.dist=.5) + + dev.off() + + for(wd in 1:n.wd){ + my.fileout <- paste0(workdir,"/windir/",rean.name,"_weighted_impact_",var.name,"_",period.name[p],"_year_",y,"_type_",WTs.name[wt],".png") + + png(filename=my.fileout,width=900,height=600) + + layout(matrix(c(rep(1,10),2), 11, 1, byrow = TRUE)) + impact.weighted[wt,pos.lat.unused,] <- NA + + PlotEquiMap(rescale(impact.weighted[wt,,lon.swapped],min(my.brks[[var.num]]),max(my.brks[[var.num]])), var.lon2, var.lat, filled.continents = FALSE, brks=my.brks[[var.num]], cols=my.cols[[var.num]], colNA="gray", drawleg=F) + ColorBar2(my.brks[[var.num]], cols=my.cols[[var.num]], vert=FALSE, cex=1, my.ticks=-0.5 + 1:length(my.brks[[var.num]]), my.labels=my.brks[[var.num]], label.dist=.5) + dev.off() + } + + } # close for on y +} # close for on p + +save(impact.total, file=paste0(workdir,"/",rean.name,"_",var.name,"_reconstructed_anomalies.RData")) + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +# Create and save monthly/seasonal/yearly climatology maps of var: + +#p=17 # for the debug + +# Map intervals and colors: +my.brks[[1]] <- c(-50,-40,-30,seq(-20,30,0.05),40,50) # Surface Temperature Climatology +my.brks[[2]] <- c(0,seq(0.05,10,0.05),15) # Precipitation Climatology +my.brks[[3]] <- c(0,seq(1.05,10,0.05),15) # Wind speed Climatology + +my.cols[[1]] <- colorRampPalette(as.character(read.csv("/home/Earth/vtorralb/rgbhex.csv",header=F)[,1]))(length(my.brks[[1]])-1) # blue-yellow-red colors +my.cols[[2]] <- colorRampPalette(c("white","gray","cyan2","blue","deeppink4"))(length(my.brks[[1]])-1) # white-gray-blue-purple colores for wind speed + +n.lonr <- n.lon.var - ceiling(length(var.lon[var.lon>0 & var.lon < 180])) # count the num of lon values between 180 and 360 degrees +lon.swapped <- c((n.lonr+1):n.lon.var,1:n.lonr) # lon is always positive but rearranged +var.lon2 <- c(var.lon[c((n.lonr+1):n.lon.var)]-360,var.lon[1:n.lonr]) # to have lon values from -180 to 180 + +mod.var <- 0 +if(var.num == 1) mod.var <- -273.5 # for temperature, you must remove -273 after var.clim.NA.bis[p,,]: + +for(p in periods){ + # Select only days of the chosen month/season: + varPeriod <- var365[,pos.period(1,p),,,drop=FALSE] + varPeriodMean <- apply(varPeriod,c(3,4),mean,na.rm=TRUE) + + png(filename=paste0(workdir,"/",var.name,"_Climatology_",period.name[p],".png"),width=1000,height=700) + + layout(matrix(c(rep(1,10),2), 11, 1, byrow = TRUE)) + + #PlotEquiMap(varPeriodMean+mod.var, var.lon, var.lat, filled.continents = FALSE, brks=my.brks[[var.num]], cols=my.cols[[var.num]], drawleg=F, colNA="gray") + PlotEquiMap(varPeriodMean[,lon.swapped]+mod.num, var.lon2, var.lat, filled.continents = FALSE, brks=my.brks[[var.num]], cols=my.cols[[var.num]], drawleg=F) + ColorBar(my.brks[[var.num]], cols=my.cols[[var.num]], vert=FALSE, subsampleg=20, cex=1) + + dev.off() +} + + + + + + + + + +} # close test diff --git a/WT_drivers_v9.R~ b/WT_drivers_v9.R~ new file mode 100644 index 0000000000000000000000000000000000000000..1a3d0fe5ca2a717551d3952f1982da4f42bd6aa8 --- /dev/null +++ b/WT_drivers_v9.R~ @@ -0,0 +1,805 @@ +########################################################################################## +# Relationship between WTs and a chosen climate variable # +########################################################################################## + +rm(list=ls()) +library(s2dverification) # for the function Load() +library(abind) # for funcion abind() +source('/home/Earth/ncortesi/scripts/Rfunctions.R') # for the calendar functions + +# directory where to find the WT classification +workdir <- "/scratch/Earth/ncortesi/RESILIENCE/WT" + +# reanalysis used for daily var data (must be the same as the mslp data): +var.rean <- list(path = '/esnas/recon/ecmwf/erainterim/$STORE_FREQ$_mean/$VAR_NAME$_f6h/$VAR_NAME$_$YEAR$$MONTH$.nc') +#var.rean <- list(path = '/esnas/recon/noaa/ncep-reanalysis/$STORE_FREQ$_mean/$VAR_NAME$_f6h/$VAR_NAME$_$YEAR$$MONTH$.nc') +rean.name <- 'erai' #'ncep' #'erai' + +# any daily variable: +var.name='tas' #'sfcWind' #'prlr' +#var.name.file='Temperature' #'10-m Wind Speed' #'Precipitation' #'Surface_Temperature' # '10-m_Wind_Speed' # variable name used in the ouput map's title + +year.start <- 1985 +year.end <- 2014 + +periods=1:17 # identify monthly (from 1=Jan to 12=Dec) , seasonal (from 13=winter to 16=Autumn) and yearly=17 values + +LOESS <- FALSE # LOESS climatology filter on/off (if off, a 5-days mobile windows is usedto measure daily anomalies) + +rm.lat <- c(-12,12) # in case of WTs, remove these lat values from the maps + +########################################################################################## +n.periods <- length(periods) +n.years <- year.end - year.start + 1 + +my.brks <- my.cols <- my.labels <- my.unit <- list() +my.brks.freq <- c(0,0.05,seq(0.1,0.7,0.1),1) # Frequency of a WT or WD +my.cols.freq <- colorRampPalette(c('#f7fbff','#deebf7','#c6dbef','#9ecae1','#6baed6','#4292c6','#2171b5','#08519c','#08306b'))(length(my.brks.freq)-1) +my.unit.freq <- "%" + +if(var.name == 'tas') var.num <- 1 +if(var.name == 'sfcWind') var.num <- 2 +if(var.name == 'prlr') var.num <- 3 + +WTs.type <- c("NE","E","SE","S","SW","W","NW","N","C","C.NE","C.E","C.SE","C.S","C.SW","C.W","C.NW","C.N","A","A.NE","A.E","A.SE","A.S","A.SW","A.W","A.NW","A.N") +WTs.type10<-c("NE","E","SE","S","SW","W","NW","N","C","A") +WTs.type10.name<-c("Northeasterly (NE)","Easterly (E)","Southeasterly (SE)","Southerly (S)","Southwesterly (SW)","Westerly (W)","Northwesterly (NW)","Northerly (N)","Cyclonic (C)","Anticyclonic (A)") +wd.dir <- c("N","NE","E","SE","S","SW","W","NW") +n.wd <- length(wd.dir) + +# load one day of var data only to detect lat and lon values: +var <- Load(var.name, NULL, list(var.rean), paste0(year.start,'0101'), storefreq = 'daily', leadtimemax=1, output = 'lonlat', nprocs=1) +var.lat <- var$lat # var lat and lon MUST be the same of WT classification, even if the latter can have NA for certain lat values. +var.lon <- var$lon +n.lat.var <- length(var.lat) +n.lon.var <- length(var.lon) +n.lonr <- n.lon.var - ceiling(length(var.lon[var.lon>0 & var.lon < 180])) # count the num of lon values between 180 and 360 degrees +lon.swapped <- c((n.lonr+1):n.lon.var,1:n.lonr) # lon is always positive but rearranged +var.lon2 <- c(var.lon[c((n.lonr+1):n.lon.var)]-360,var.lon[1:n.lonr]) # to have lon values from -180 to 180 + +l1 <- which(var.lat > rm.lat[1]) # lat values greater than -12 +l2 <- which(var.lat < rm.lat[2]) # lat values lower than 12 +pos.lat.unused <- which(!is.na(match(l1,l2))) +var.lat.unused <- var.lat[pos.lat.unused] # latitude values not visualized in the WT classification because too close to equator + +########################################################################################## + +# save var anomalies: + +# load WT metadata to get the info on year.start and year.end +#WTs_files <- list.files(paste0(workdir,"/txt/ERAint/all_years")) +#WTs_file1 <- read.table(file=paste0(workdir,"/txt/ERAint/all_years/",WTs_files[1]), header=TRUE) +#load(paste0(workdir,"/txt/ERAint/metadata.RData")) + +# load daily var data for all years: +var <- Load(var.name, NULL, list(var.rean), paste0(year.start:year.end,'0101'), storefreq = 'daily', leadtimemax=366, output = 'lonlat', nprocs=8) + +# remove bisestile days from var to have all arrays with the same dimensions: +cat("Removing bisestile days. Please wait...\n") +var365 <- var$obs[,,,1:365,,,drop=FALSE] + +for(y in year.start:year.end){ + y2 <- y - year.start + 1 + if(n.days.in.a.year(y) == 366) var365[,,y2,60:365,,] <- var$obs[,,y2,61:366,,] # take the march to december period removing the 29th of February +} + +rm(var) +gc() + +# convert var to daily anomalies: +cat("Calculating anomalies. Please wait...\n") + +if(LOESS == TRUE){ + var365ClimDaily <- apply(var365, c(1,2,4,5,6), mean, na.rm=T) + var365ClimLoess <- var365ClimDaily + + for(i in 1:n.lat.var){ + for(j in 1:n.lon.var){ + my.data <- data.frame(ens.mean=var365ClimDaily[1,1,,i,j], day=1:365) + my.loess <- loess(ens.mean ~ day, my.data, span=0.35) + var365ClimLoess[1,1,,i,j] <- predict(my.loess) + } + } + + rm(var365ClimDaily, my.data, my.loess) + gc() + + var365Clim <- InsertDim(var365ClimLoess, 3, year.end-year.start+1) + + rm(var365ClimLoess) + gc() + +} else { # apply a 5-days mobile window: + + #var365Clim <- InsertDim(var365ClimDaily, 3, n.years) + var365Clim <- array(NA,c(365,dim(var365)[5:6])) + + var365x3 <- abind(var365[,,,364:365,,,drop=FALSE],var365[,,,,,,drop=FALSE],var365[,,,1:2,,,drop=FALSE],along=4) # add the two days before 1st january and after 31 dec + + for(d in 1:365){ + window <- var365x3[1,1,,2+d+seq(-2,2),,,drop=FALSE] + var365Clim[d,,] <- apply(window,c(5,6),mean,na.rm=TRUE) + } + + rm(var365x3, window) + gc() + + var365Clim <- InsertDim(var365Clim, 1, n.years) + +} + + +var365Anom <- var365[1,1,,,,] - var365Clim + +rm(var365Clim) +gc() + +rm(var365) +gc() + +# save var anomalies for retreiving them when necessary: +save(var365Anom, file=paste0(workdir,"/",rean.name,"_",var.name,"365Anom.RData")) + + + + +if(LOESS == 'test'){ + +# Compute wind directions: +uas <- Load(var = 'uas', exp = NULL, obs = list(var.rean), paste0(year.start:year.end,'0101'), storefreq = 'daily', leadtimemax=366, output = 'lonlat', nprocs=8)$obs +vas <- Load(var = 'vas', exp = NULL, obs = list(var.rean), paste0(year.start:year.end,'0101'), storefreq = 'daily', leadtimemax=366, output = 'lonlat', nprocs=8)$obs + +windir <- (180/pi)*atan2(uas,vas) + 180 # wind direction in degrees; 90 is necessary to shift from trigonometric system to cardinal system and +180 to shift + # from the direction wind is blowing to the direction wind is coming from +rm(uas, vas) +gc() + +# remove bisestile days from windir, to compare it with var, which has no bisestiles: +cat("Removing bisestile days. Please wait...\n") +windir365 <- windir[,,,1:365,,,drop=FALSE] + +for(y in year.start:year.end){ + y2 <- y - year.start + 1 + if(n.days.in.a.year(y) == 366) windir365[,,y2,60:365,,] <- windir[,,y2,61:366,,] # take the march to december period removing the 29th of February +} + + +rm(windir) +gc() + +dir2 <- which(windir365 > 22.5) +dir3 <- which(windir365 > 22.5 + 45) +dir4 <- which(windir365 > 22.5 + 90) +dir5 <- which(windir365 > 22.5 + 135) +dir6 <- which(windir365 > 22.5 + 180) +dir7 <- which(windir365 > 22.5 + 225) +dir8 <- which(windir365 > 22.5 + 270) +dir1 <- which(windir365 > 22.5 + 315) + +windirClass <- array(1, dim(windir365)) # N +windirClass[dir2] <- 2 # NE +windirClass[dir3] <- 3 # E +windirClass[dir4] <- 4 # SE +windirClass[dir5] <- 5 # S +windirClass[dir6] <- 6 # SW +windirClass[dir7] <- 7 # W +windirClass[dir8] <- 8 # NW +windirClass[dir1] <- 1 # N + +rm(windir365) +rm(dir1,dir2,dir3,dir4,dir5,dir6,dir7,dir8) +gc() + +# save it once to retreive it later: +save(windirClass, file=paste0(workdir,"/",rean.name,"_windirClass.RData")) + + + + + +# Plot frequency maps: + +load(paste0(workdir,"/",rean.name,"_windirClass.RData")) + +windirFreqInter <- array(NA, c(n.wd, length(periods), n.years, n.lat.var, n.lon.var)) # array where to store the interannual frequencies + +dir.create(paste0(workdir,"/windir")) + +# save wind direction mean frequency maps: +for(p in periods){ + # select wind direction data only inside period p: + windirPeriod <- windirClass[1,1,,pos.period(1,p),,] + + for(wd in 1:8){ + + # remove from windPeriod the days not belonging to that wd (setting its value to NA): + windirPeriod.wt <- windirPeriod + + ss <- which(windirPeriod == wd) + pp <- which(windirPeriod != wd) + + windirPeriod.wt[ss] <- 1 # set to 1 the days belonging to that wt + windirPeriod.wt[pp] <- NA # set to NA the days not belonging to that wt + rm(ss,pp) + gc() + + # mean frequency maps: + windirDays <- apply(windirPeriod.wt, c(3,4), sum, na.rm=TRUE) + windirFreq <- windirDays / (n.years * n.days.in.a.period(p,1)) + + my.fileout <- paste0(workdir,"/windir/",rean.name,"_frequency_",period.name[p],"_direction_",wd.dir[wd],".png") + + png(filename=my.fileout,width=900,height=600) + + layout(matrix(c(rep(1,10),2), 11, 1, byrow = TRUE)) + PlotEquiMap(windirFreq[,lon.swapped], var.lon2, var.lat, filled.continents = FALSE, brks=my.brks.freq, cols=my.cols.freq, colNA="gray", drawleg=F) + ColorBar2(my.brks.freq, cols=my.cols.freq, vert=FALSE, cex=1, my.ticks=-0.5 + 1:length(my.brks.freq), my.labels=100*my.brks.freq, label.dist=.5) + dev.off() + + # mean frequency maps for North Pole: + my.fileout <- paste0(workdir,"/windir/",rean.name,"_frequency_polar_",period.name[p],"_direction_",wd.dir[wd],".png") + PlotStereoMap(windirFreq, var.lon, var.lat, latlims = c(60,90), brks=my.brks.freq, cols=my.cols.freq, subsampleg=1, units=my.unit.freq, colNA="gray", fileout=my.fileout) + + # measure the interannual frequency series of that wind direction (for each grid point): + windirFreqInter[wd,p,,,] <- apply(windirPeriod.wt,c(1,3,4), sum, na.rm=TRUE) / n.days.in.a.period(p,1) + #windirPeriod.wt[year,days,,] + + # plot the frequency maps for each year: + for(y in year.start:year.end){ + y2 <- y - year.start + 1 + my.fileout <- paste0(workdir,"/windir/",rean.name,"_frequency_",period.name[p],"_year_",y,"_direction_",wd.dir[wd],".png") + png(filename=my.fileout,width=900,height=600) + layout(matrix(c(rep(1,10),2), 11, 1, byrow = TRUE)) + PlotEquiMap(windirFreqInter[wd,p,y2,,lon.swapped], var.lon2, var.lat, filled.continents = FALSE, brks=my.brks.freq, cols=my.cols.freq, colNA="gray", drawleg=F) + ColorBar2(my.brks.freq, cols=my.cols.freq, vert=FALSE, cex=1, my.ticks=-0.5 + 1:length(my.brks.freq), my.labels=100*my.brks.freq, label.dist=.5) + dev.off() + + } # close for on y + + } # close for on wd + +} # close for on p + +save(windirFreqInter, file=paste0(workdir,"/",rean.name,"_windirFreqInter.RData")) + + + + + + + +# save impact map of wind direction on var (average of var only during the days belonging to a particular wind direction): + +# load var anomalies and wind direction array: +load(file=paste0(workdir,"/",rean.name,"_",var.name,"365Anom.RData")) +load(paste0(workdir,"/",rean.name,"_windirClass.RData")) + +my.brks[[1]] <- seq(-10,10,1) # % Mean anomaly of a WT for temperature +my.cols[[1]] <- colorRampPalette(as.character(read.csv("~/scripts/palettes/rgbhex.csv",header=F)[,1]))(length(my.brks[[1]])-1) # blue--yellow-red colors for temperature +my.unit[[1]] <- "degrees" + +my.brks[[2]] <- seq(-3,3,0.5) # Mean wind speed anomaly in m/s +my.cols[[2]] <- colorRampPalette(rev(brewer.pal(11,"RdBu")))(length(my.brks[[2]])-1) # blue--white--red colors for wind speed impact +my.unit[[2]] <- "m/s" + +impact.var <- array(NA, c(n.wd, length(periods), n.lat.var, n.lon.var)) # array where to save the impact of each wd on var + +for(p in periods){ + # select var data only inside period p: + varPeriod <- var365Anom[,pos.period(1,p),,] + + # select wind direction data only inside period p: + windirPeriod <- windirClass[1,1,,pos.period(1,p),,] + + for(wd in 1:8){ # 1:N, 2=NE, 3=E, ... + + # remove from varPeriod the days not belonging to that wd (setting its value to NA): + windirPeriod.wt <- array(NA, dim(windirPeriod)) + + ss <- which(windirPeriod == wd) + #pp <- which(windirPeriod != wd) + + windirPeriod.wt[ss] <- 1 # set to 1 the days belonging to that wt + #windirPeriod.wt[pp] <- NA # set to NA the days not belonging to that wt + rm(ss) + gc() + + varPeriod.wt <- varPeriod * windirPeriod.wt + + var.mean.wt <- apply(varPeriod.wt, c(3,4), mean, na.rm=TRUE) + + impact.var[wd,p,,] <- var.mean.wt + + # visualize impact map of the wd on var: + my.fileout <- paste0(workdir,"/windir/",rean.name,"_impact_",var.name,"_",period.name[p],"_direction_",wd.dir[wd],".png") + + png(filename=my.fileout,width=900,height=600) + + layout(matrix(c(rep(1,10),2), 11, 1, byrow = TRUE)) + #PlotEquiMap(var.mean.wt, var.lon, var.lat, filled.continents = FALSE, brks=my.brks[[var.num]], cols=my.cols[[var.num]], drawleg=F, colNA="gray") + PlotEquiMap(rescale(var.mean.wt[,lon.swapped],min(my.brks[[var.num]]),max(my.brks[[var.num]])), var.lon2, var.lat, filled.continents = FALSE, brks=my.brks[[var.num]], cols=my.cols[[var.num]], colNA="gray", drawleg=F) + ColorBar2(my.brks[[var.num]], cols=my.cols[[var.num]], vert=FALSE, cex=1, my.ticks=-0.5 + 1:length(my.brks[[var.num]]), my.labels=my.brks[[var.num]], label.dist=.5) + dev.off() + + + # North Pole map: + #png(filename=paste0(workdir,"/windir/impact_polar_",var.name,"_",period.name[p],"_direction_",wd.dir[wd],".png")) + my.fileout <- paste0(workdir,"/windir/",rean.name,"_impact_polar_",var.name,"_",period.name[p],"_direction_",wd.dir[wd],".png") + PlotStereoMap(rescale(var.mean.wt,min(my.brks[[var.num]]),max(my.brks[[var.num]])), var.lon, var.lat, latlims = c(60,90), brks=my.brks[[var.num]], cols=my.cols[[var.num]], subsampleg=1, units=my.unit[[var.num]], colNA="gray", fileout=my.fileout) + + # + # same maps as above but removing points with frequency < 3%: + # + + windirFreq.wt <- apply(windirPeriod.wt, c(3,4), sum, na.rm=TRUE) / (n.years * n.days.in.a.period(p,1)) + + ss <- which(windirFreq.wt < 0.03) + pp <- which(windirFreq.wt >= 0.03) + + windirFreq.wt[ss] <- NA + windirFreq.wt[pp] <- 1 + + var.mean.wt2 <- var.mean.wt * windirFreq.wt + rm(ss,windirFreq.wt) + gc() + + # visualize impact map of the wd on var: + my.fileout <- paste0(workdir,"/windir/",rean.name,"_impact_min3percent_",var.name,"_",period.name[p],"_direction_",wd.dir[wd],".png") + + png(filename=my.fileout,width=900,height=600) + + layout(matrix(c(rep(1,10),2), 11, 1, byrow = TRUE)) + #PlotEquiMap(var.mean.wt, var.lon, var.lat, filled.continents = FALSE, brks=my.brks[[var.num]], cols=my.cols[[var.num]], drawleg=F, colNA="gray") + PlotEquiMap(rescale(var.mean.wt2[,lon.swapped],min(my.brks[[var.num]]),max(my.brks[[var.num]])), var.lon2, var.lat, filled.continents = FALSE, brks=my.brks[[var.num]], cols=my.cols[[var.num]], colNA="gray", drawleg=F) + ColorBar2(my.brks[[var.num]], cols=my.cols[[var.num]], vert=FALSE, cex=1, my.ticks=-0.5 + 1:length(my.brks[[var.num]]), my.labels=my.brks[[var.num]], label.dist=.5) + dev.off() + + # North Pole map: + #png(filename=paste0(workdir,"/windir/impact_polar_",var.name,"_",period.name[p],"_direction_",wd.dir[wd],".png")) + my.fileout <- paste0(workdir,"/windir/",rean.name,"_impact_min3percent_polar_",var.name,"_",period.name[p],"_direction_",wd.dir[wd],".png") + PlotStereoMap(rescale(var.mean.wt2,min(my.brks[[var.num]]),max(my.brks[[var.num]])), var.lon, var.lat, latlims = c(60,90), brks=my.brks[[var.num]], cols=my.cols[[var.num]], subsampleg=1, units=my.unit[[var.num]], colNA="gray", fileout=my.fileout) + + rm(var.mean.wt, var.mean.wt2) + + } # close for on wd + +} # close for on p + +save(impact.var, file=paste0(workdir,"/",rean.name,"_",var.name,"_impact.RData")) + + + + +# visualize and save the reconstructed monthly/seasonal anomalies with the WDs: + +load(paste0(workdir,"/",rean.name,"_windirFreqInter.RData")) +load(paste0(workdir,"/",rean.name,"_",var.name,"_impact.RData")) + +impact.total <- array(NA, c(length(periods), n.years, n.lat.var, n.lon.var)) # array where to store the interannual frequencies + +my.brks[[1]] <- seq(-10,10,1) # % Mean anomaly of a WT for temperature +my.cols[[1]] <- colorRampPalette(as.character(read.csv("~/scripts/palettes/rgbhex.csv",header=F)[,1]))(length(my.brks[[1]])-1) +my.unit[[1]] <- "degrees" + +my.brks[[2]] <- seq(-3,3,0.5) # Mean wind speed anomaly in m/s +my.cols[[2]] <- colorRampPalette(rev(brewer.pal(11,"RdBu")))(length(my.brks[[2]])-1) # blue--white--red colors for wind speed impact +my.unit[[2]] <- "m/s" + +for(p in periods){ + for(y in year.start:year.end){ + #p <- 10; y <- 2016 # for debugging + + #test <- impact.var[1,10,,]*windirFreqInter[1,10,32,,]+impact.var[2,10,,]*windirFreqInter[2,10,32,,]+impact.var[3,10,,]*windirFreqInter[3,10,32,,]+impact.var[4,10,,]*windirFreqInter[4,10,32,,]+impact.var[5,10,,]*windirFreqInter[5,10,32,,]+impact.var[6,10,,]*windirFreqInter[6,10,32,,]+impact.var[7,10,,]*windirFreqInter[7,10,32,,]+impact.var[8,10,,]*windirFreqInter[8,10,32,,] + + #PlotEquiMap(test[,lon.swapped], var.lon2, var.lat, filled.continents = FALSE, brks=my.brks[[var.num]], cols=my.cols[[var.num]], colNA="gray", drawleg=F) + # windirFreqInter[,10,32,66,507] + #test2 <- impact.var[3,10,,]*0.4516 # + impact.var[2,10,,]*0.225 + impact.var[8,10,,]*0.0967 + impact.var[4,10,,]*0.06451 + impact.var[6,10,,]*0.0645 + impact.var[1,10,,]*0.03225 + impact.var[5,10,,]*0.0645 + + #PlotEquiMap(test2[,lon.swapped], var.lon2, var.lat, filled.continents = FALSE, brks=my.brks[[var.num]], cols=my.cols[[var.num]], colNA="gray", drawleg=F) + + #test3 <- impact.var[3,10,,] #*windirFreqInter[3,10,32,,] + #PlotEquiMap(test3[,lon.swapped], var.lon2, var.lat, filled.continents = FALSE, brks=my.brks[[var.num]], cols=my.cols[[var.num]], colNA="gray", drawleg=F) + + #test4 <- impact.var[3,10,,] * windirFreqInter[3,10,32,,] + #PlotEquiMap(test4[,lon.swapped], var.lon2, var.lat, filled.continents = FALSE, brks=my.brks[[var.num]], cols=my.cols[[var.num]], colNA="gray", drawleg=F) + + y2 <- y - year.start + 1 + impact.weighted <- impact.var[,p,,] * windirFreqInter[,p,y2,,] + impact.total[p,y2,,] <- apply(impact.weighted, c(2,3), sum, na.rm=T) + + my.fileout <- paste0(workdir,"/windir/",rean.name,"_reconstructed_",var.name,"_anomalies_",period.name[p],"_year_",y,".png") + png(filename=my.fileout,width=900,height=600) + + layout(matrix(c(rep(1,10),2), 11, 1, byrow = TRUE)) + PlotEquiMap(rescale(impact.total[p,y2,,lon.swapped],min(my.brks[[var.num]]),max(my.brks[[var.num]])), var.lon2, var.lat, filled.continents = FALSE, brks=my.brks[[var.num]], cols=my.cols[[var.num]], colNA="gray", drawleg=F) + ColorBar2(my.brks[[var.num]], cols=my.cols[[var.num]], vert=FALSE, cex=1, my.ticks=-0.5 + 1:length(my.brks[[var.num]]), my.labels=my.brks[[var.num]], label.dist=.5) + dev.off() + + for(wd in 1:n.wd){ + my.fileout <- paste0(workdir,"/windir/",rean.name,"_weighted_impact_",var.name,"_",period.name[p],"_year_",y,"_direction_",wd.dir[wd],".png") + png(filename=my.fileout,width=900,height=600) + + layout(matrix(c(rep(1,10),2), 11, 1, byrow = TRUE)) + PlotEquiMap(rescale(impact.weighted[wd,,lon.swapped],min(my.brks[[var.num]]),max(my.brks[[var.num]])), var.lon2, var.lat, filled.continents = FALSE, brks=my.brks[[var.num]], cols=my.cols[[var.num]], colNA="gray", drawleg=F) + ColorBar2(my.brks[[var.num]], cols=my.cols[[var.num]], vert=FALSE, cex=1, my.ticks=-0.5 + 1:length(my.brks[[var.num]]), my.labels=my.brks[[var.num]], label.dist=.5) + dev.off() + + } + + } # close for on y +} # close for on p + +save(impact.total, file=paste0(workdir,"/",rean.name,"_",var.name,"_reconstructed_anomalies.RData")) + + + + + + + + + + + + + + + + + + + + + + +# Plot WTs frequency maps: + +#load(paste0(workdir,"/",rean.name,"_windirClass.RData")) +#windirClass <- [wd,1,1,year,1:365,lat,lon] + +# load lat and lon from SLP grid: +load(paste0(workdir,"/txt/",rean.name,"/metadata.RData")) # load lat.used and lon.used + +# load daily WT classification for all years and grid points and with the same format of var365Anom: +load(paste0(workdir,"/txt/",rean.name,"/WTs.RData")) + +WTs.code <- sort(unique(as.vector(WTs[1,1,,]))) +n.wt <- length(WTs.code) +WTs.name <- WTs.type[WTs.code] +wt.num <- c(1:9,NA,NA,NA,NA,NA,NA,NA,NA,10) + +windirFreqInter <- array(NA, c(n.wt, length(periods), n.years, n.lat.var, n.lon.var)) # array where to store the interannual frequencies + +# save weather type mean frequency maps: +for(p in periods){ + # select weather type data only inside period p: + windirPeriod <- WTs[,pos.period(1,p),,] + + for(wt in WTs.code){ + + # remove from windPeriod the days not belonging to that wd (setting its value to NA): + windirPeriod.wt <- array(NA, dim(windirPeriod)) #windirPeriod + + ss <- which(windirPeriod == wt) + #pp <- which(windirPeriod != wt) + + windirPeriod.wt[ss] <- 1 # set to 1 the days belonging to that wt + #windirPeriod.wt[pp] <- NA # set to NA the days not belonging to that wt + rm(ss) + gc() + + # mean frequency maps: + windirDays <- apply(windirPeriod.wt, c(3,4), sum, na.rm=TRUE) + windirFreq <- windirDays / (n.years * n.days.in.a.period(p,1)) + + my.fileout <- paste0(workdir,"/txt/",rean.name,"/",rean.name,"_frequency_",period.name[p],"_type_",WTs.name[wt.num[wt]],".png") + + png(filename=my.fileout,width=900,height=600) + + layout(matrix(c(rep(1,10),2), 11, 1, byrow = TRUE)) + + windirFreq[pos.lat.unused,] <- NA + PlotEquiMap(windirFreq[,lon.swapped], var.lon2, var.lat, filled.continents = FALSE, brks=my.brks.freq, cols=my.cols.freq, colNA="gray", drawleg=F) + + ColorBar2(my.brks.freq, cols=my.cols.freq, vert=FALSE, cex=1, my.ticks=-0.5 + 1:length(my.brks.freq), my.labels=100*my.brks.freq, label.dist=.5) + dev.off() + + # measure the interannual frequency series of that wind direction (for each grid point): + windirFreqInter[wt.num[wt],p,,,] <- apply(windirPeriod.wt,c(1,3,4), sum, na.rm=TRUE) / n.days.in.a.period(p,1) + #windirPeriod.wt[year,days,,] + + # plot the frequency maps for each year: + ## for(y in year.start:year.end){ + ## y2 <- y - year.start + 1 + ## my.fileout <- paste0(workdir,"/txt/",rean.name,"/frequency/",rean.name,"_frequency_",period.name[p],"_year_",y,"_type_",WTs.name[wt],".png") + + ## png(filename=my.fileout,width=900,height=600) + ## layout(matrix(c(rep(1,10),2), 11, 1, byrow = TRUE)) + + ## windirFreqInter[wt.num[wt],p,y2,pos.lat.unused,] <- NA + ## PlotEquiMap(windirFreqInter[wt.num[wt],p,y2,,lon.swapped], var.lon2, var.lat, filled.continents = FALSE, brks=my.brks.freq, cols=my.cols.freq, colNA="gray", drawleg=F) + ## ColorBar2(my.brks.freq, cols=my.cols.freq, vert=FALSE, cex=1, my.ticks=-0.5 + 1:length(my.brks.freq), my.labels=100*my.brks.freq, label.dist=.5) + ## dev.off() + + ## } # close for on y + + } # close for on wt + +} # close for on p + +save(windirFreqInter, file=paste0(workdir,"/txt/",rean.name,"/",rean.name,"_WTsFreqInter.RData")) + + + + + + + + + + + + + + + + + + + +# save impact map of wind direction on var (average of var only during the days belonging to a particular wind direction): + +# load var anomalies: +load(file=paste0(workdir,"/txt/",rean.name,"/",rean.name,"_",var.name,"365Anom.RData")) + +# load lat and lon from SLP grid: +load(paste0(workdir,"/txt/",rean.name,"/metadata.RData")) # load lat.used and lon.used + +# only for compatibility with older versions (it should be already loaded): +#lat <- round(MSLP$lat,3) +#lon <- round(MSLP$lon,3) + +# load daily WT classification for all years and grid points and with the same format of var365Anom: +load(paste0(workdir,"/txt/",rean.name,"/WTs.RData")) + +WTs.code <- sort(unique(as.vector(WTs[1,1,,]))) +n.WTs <- length(WTs.code) +WTs.name <- WTs.type[WTs.code] +WTs.num <- c(1:9,NA,NA,NA,NA,NA,NA,NA,NA,10) + +my.brks[[1]] <- seq(-10,10,1) # % Mean anomaly of a WT for temperature +my.cols[[1]] <- colorRampPalette(as.character(read.csv("~/scripts/palettes/rgbhex.csv",header=F)[,1]))(length(my.brks[[1]])-1) # blue--yellow-red colors for temperature +my.unit[[1]] <- "degrees" + +my.brks[[2]] <- seq(-3,3,0.5) # Mean wind speed anomaly in m/s +my.cols[[2]] <- colorRampPalette(rev(brewer.pal(11,"RdBu")))(length(my.brks[[2]])-1) # blue--white--red colors for wind speed impact +my.unit[[2]] <- "m/s" + +impact.var <- array(NA, c(n.WTs, length(periods), n.lat.var, n.lon.var)) # array where to save the impact of each wd on var + +for(p in periods){ + # select var data only inside period p: + varPeriod <- var365Anom[,pos.period(1,p),,] + + # select wind direction data only inside period p: + windirPeriod <- WTs[,pos.period(1,p),,] + + print(paste0("Period: ",period[p],)) + + for(wt in WTs.code){ + + # remove from varPeriod the days not belonging to that wd (setting its value to NA): + windirPeriod.wt <- array(NA, dim(windirPeriod)) + + ss <- which(windirPeriod == wt) + #pp <- which(windirPeriod != wt) + + windirPeriod.wt[ss] <- 1 # set to 1 the days belonging to that wt + #windirPeriod.wt[pp] <- NA # set to NA the days not belonging to that wt + rm(ss) + gc() + + varPeriod.wt <- varPeriod * windirPeriod.wt + var.mean.wt <- apply(varPeriod.wt, c(3,4), mean, na.rm=TRUE) + impact.var[wt.num[wt],p,,] <- var.mean.wt + + # visualize impact map of the wd on var: + my.fileout <- paste0(workdir,"/txt/",rean.name,"/",rean.name,"_impact_",var.name,"_",period.name[p],"_type_",WTs.name[WTs.num[wt]],".png") + + png(filename=my.fileout,width=900,height=600) + + layout(matrix(c(rep(1,10),2), 11, 1, byrow = TRUE)) + #PlotEquiMap(var.mean.wt, var.lon, var.lat, filled.continents = FALSE, brks=my.brks[[var.num]], cols=my.cols[[var.num]], drawleg=F, colNA="gray") + + var.mean.wt[pos.lat.unused,] <- NA + PlotEquiMap(rescale(var.mean.wt[,lon.swapped],min(my.brks[[var.num]]),max(my.brks[[var.num]])), var.lon2, var.lat, filled.continents = FALSE, brks=my.brks[[var.num]], cols=my.cols[[var.num]], colNA="gray", drawleg=F) + + ColorBar2(my.brks[[var.num]], cols=my.cols[[var.num]], vert=FALSE, cex=1, my.ticks=-0.5 + 1:length(my.brks[[var.num]]), my.labels=my.brks[[var.num]], label.dist=.5) + dev.off() + + ## windirFreq.wt <- apply(windirPeriod.wt, c(3,4), sum, na.rm=TRUE) / (n.years * n.days.in.a.period(p,1)) + + ## ss <- which(windirFreq.wt < 0.03) + ## pp <- which(windirFreq.wt >= 0.03) + + ## windirFreq.wt[ss] <- NA + ## windirFreq.wt[pp] <- 1 + + ## var.mean.wt2 <- var.mean.wt * windirFreq.wt + + ## rm(ss,windirFreq.wt) + ## gc() + rm(varPeriod.wt, windirPeriod.wt) + + } # close for on wd + + rm(varPeriod, windirPeriod) + gc() +} # close for on p + +save(impact.var, file=paste0(workdir,"/txt/",rean.name,"/",rean.name,"_",var.name,"_impact_WTs.RData")) + + + + + + + + + +# visualize and save the reconstructed monthly/seasonal anomalies with the WTs: + +# load daily WT classification for all years and grid points and with the same format of var365Anom: +load(paste0(workdir,"/txt/",rean.name,"/",rean.name,"_WTsFreqInter.RData")) +load(paste0(workdir,"/txt",rean.name,"/",rean.name,"_",var.name,"_impact_WTs.RData")) + +impact.total <- array(NA, c(length(periods), n.years, n.lat.var, n.lon.var)) # array where to store the interannual frequencies + +my.brks[[1]] <- seq(-10,10,1) # % Mean anomaly of a WT for temperature +my.cols[[1]] <- colorRampPalette(as.character(read.csv("~/scripts/palettes/rgbhex.csv",header=F)[,1]))(length(my.brks[[1]])-1) +my.unit[[1]] <- "degrees" + +my.brks[[2]] <- seq(-3,3,0.5) # Mean wind speed anomaly in m/s +my.cols[[2]] <- colorRampPalette(rev(brewer.pal(11,"RdBu")))(length(my.brks[[2]])-1) # blue--white--red colors for wind speed impact +my.unit[[2]] <- "m/s" + +for(p in periods){ + for(y in year.start:year.end){ + #p <- 10; y <- 2016 # for debugging + + #test <- impact.var[1,10,,]*windirFreqInter[1,10,32,,]+impact.var[2,10,,]*windirFreqInter[2,10,32,,]+impact.var[3,10,,]*windirFreqInter[3,10,32,,]+impact.var[4,10,,]*windirFreqInter[4,10,32,,]+impact.var[5,10,,]*windirFreqInter[5,10,32,,]+impact.var[6,10,,]*windirFreqInter[6,10,32,,]+impact.var[7,10,,]*windirFreqInter[7,10,32,,]+impact.var[8,10,,]*windirFreqInter[8,10,32,,] + + #PlotEquiMap(test[,lon.swapped], var.lon2, var.lat, filled.continents = FALSE, brks=my.brks[[var.num]], cols=my.cols[[var.num]], colNA="gray", drawleg=F) + # windirFreqInter[,10,32,66,507] + #test2 <- impact.var[3,10,,]*0.4516 # + impact.var[2,10,,]*0.225 + impact.var[8,10,,]*0.0967 + impact.var[4,10,,]*0.06451 + impact.var[6,10,,]*0.0645 + impact.var[1,10,,]*0.03225 + impact.var[5,10,,]*0.0645 + + #PlotEquiMap(test2[,lon.swapped], var.lon2, var.lat, filled.continents = FALSE, brks=my.brks[[var.num]], cols=my.cols[[var.num]], colNA="gray", drawleg=F) + + #test3 <- impact.var[3,10,,] #*windirFreqInter[3,10,32,,] + #PlotEquiMap(test3[,lon.swapped], var.lon2, var.lat, filled.continents = FALSE, brks=my.brks[[var.num]], cols=my.cols[[var.num]], colNA="gray", drawleg=F) + + #test4 <- impact.var[3,10,,] * windirFreqInter[3,10,32,,] + #PlotEquiMap(test4[,lon.swapped], var.lon2, var.lat, filled.continents = FALSE, brks=my.brks[[var.num]], cols=my.cols[[var.num]], colNA="gray", drawleg=F) + + y2 <- y - year.start + 1 + impact.weighted <- impact.var[,p,,] * windirFreqInter[,p,y2,,] + impact.total[p,y2,,] <- apply(impact.weighted, c(2,3), sum, na.rm=T) + + my.fileout <- paste0(workdir,"/txt/",rean.name,"/reconstructed/reconstructed_",var.name,"_anomalies_",period.name[p],"_year_",y,".png") + png(filename=my.fileout,width=900,height=600) + + layout(matrix(c(rep(1,10),2), 11, 1, byrow = TRUE)) + + impact.total[p,y2,pos.lat.unused,] <- NA + + PlotEquiMap(impact.total[p,y2,,lon.swapped],min(my.brks[[var.num]]),max(my.brks[[var.num]], var.lon2, var.lat, filled.continents = FALSE, brks=my.brks[[var.num]], cols=my.cols[[var.num]], colNA="gray", drawleg=F) + #ColorBar2(my.brks[[var.num]], cols=my.cols[[var.num]], vert=FALSE, cex=1, my.ticks=-0.5 + 1:length(my.brks[[var.num]]), my.labels=my.brks[[var.num]], label.dist=.5) + ColorBar(my.brks[[var.num]], cols=my.cols[[var.num]], vert=FALSE, cex=1, my.ticks=-0.5 + 1:length(my.brks[[var.num]]), my.labels=my.brks[[var.num]], label.dist=.5) + + dev.off() + + for(wd in 1:n.wd){ + my.fileout <- paste0(workdir,"/windir/",rean.name,"_weighted_impact_",var.name,"_",period.name[p],"_year_",y,"_type_",WTs.name[wt],".png") + + png(filename=my.fileout,width=900,height=600) + + layout(matrix(c(rep(1,10),2), 11, 1, byrow = TRUE)) + impact.weighted[wt,pos.lat.unused,] <- NA + + PlotEquiMap(rescale(impact.weighted[wt,,lon.swapped],min(my.brks[[var.num]]),max(my.brks[[var.num]])), var.lon2, var.lat, filled.continents = FALSE, brks=my.brks[[var.num]], cols=my.cols[[var.num]], colNA="gray", drawleg=F) + ColorBar2(my.brks[[var.num]], cols=my.cols[[var.num]], vert=FALSE, cex=1, my.ticks=-0.5 + 1:length(my.brks[[var.num]]), my.labels=my.brks[[var.num]], label.dist=.5) + dev.off() + } + + } # close for on y +} # close for on p + +save(impact.total, file=paste0(workdir,"/",rean.name,"_",var.name,"_reconstructed_anomalies.RData")) + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +# Create and save monthly/seasonal/yearly climatology maps of var: + +#p=17 # for the debug + +# Map intervals and colors: +my.brks[[1]] <- c(-50,-40,-30,seq(-20,30,0.05),40,50) # Surface Temperature Climatology +my.brks[[2]] <- c(0,seq(0.05,10,0.05),15) # Precipitation Climatology +my.brks[[3]] <- c(0,seq(1.05,10,0.05),15) # Wind speed Climatology + +my.cols[[1]] <- colorRampPalette(as.character(read.csv("/home/Earth/vtorralb/rgbhex.csv",header=F)[,1]))(length(my.brks[[1]])-1) # blue-yellow-red colors +my.cols[[2]] <- colorRampPalette(c("white","gray","cyan2","blue","deeppink4"))(length(my.brks[[1]])-1) # white-gray-blue-purple colores for wind speed + +n.lonr <- n.lon.var - ceiling(length(var.lon[var.lon>0 & var.lon < 180])) # count the num of lon values between 180 and 360 degrees +lon.swapped <- c((n.lonr+1):n.lon.var,1:n.lonr) # lon is always positive but rearranged +var.lon2 <- c(var.lon[c((n.lonr+1):n.lon.var)]-360,var.lon[1:n.lonr]) # to have lon values from -180 to 180 + +mod.var <- 0 +if(var.num == 1) mod.var <- -273.5 # for temperature, you must remove -273 after var.clim.NA.bis[p,,]: + +for(p in periods){ + # Select only days of the chosen month/season: + varPeriod <- var365[,pos.period(1,p),,,drop=FALSE] + varPeriodMean <- apply(varPeriod,c(3,4),mean,na.rm=TRUE) + + png(filename=paste0(workdir,"/",var.name,"_Climatology_",period.name[p],".png"),width=1000,height=700) + + layout(matrix(c(rep(1,10),2), 11, 1, byrow = TRUE)) + + #PlotEquiMap(varPeriodMean+mod.var, var.lon, var.lat, filled.continents = FALSE, brks=my.brks[[var.num]], cols=my.cols[[var.num]], drawleg=F, colNA="gray") + PlotEquiMap(varPeriodMean[,lon.swapped]+mod.num, var.lon2, var.lat, filled.continents = FALSE, brks=my.brks[[var.num]], cols=my.cols[[var.num]], drawleg=F) + ColorBar(my.brks[[var.num]], cols=my.cols[[var.num]], vert=FALSE, subsampleg=20, cex=1) + + dev.off() +} + + + + + + + + + +} # close test diff --git a/WT_v8.R b/WT_v8.R new file mode 100644 index 0000000000000000000000000000000000000000..65b31e18ef133d5cb0ea64c73241f120e74465d9 --- /dev/null +++ b/WT_v8.R @@ -0,0 +1,477 @@ +######################################################################################### +# Lamb's WTs with ERA-Interim # +######################################################################################### +# run it from the bash with: +# +# Rscript WT_v5.R 1984 +# +# being 1984 the year you want to classify the WTs; +# in this way you can run up to 8 jobs at the same time, each one producing its output files! +# You can also run it for a sequence of years with the syntax: +# +# Rscript WT_v5.R 1980 2014 +# +# and it will compute each year from 1980 to 2014, each one after finishing the previous one. +# +# If you want to run many years in parallel with just only 1 command, run from the bash: +# +# for y in {1980..2014}; do Rscript WT_v5.R &; done +# +# but it'd need 24 processors! In practice, it is possible to run only 4-8 years at time. +# + +library(s2dverification) # for the function Load() +# library(ff) +source('/home/Earth/ncortesi/scripts/Rfunctions.R') # for the calendar functions +#Load.path <- '/home/Earth/ncortesi/Downloads/scripts/IC3.conf' # Load() file path + +# Available reanalysis: +ERAint <- list(path = '/esnas/recon/ecmwf/erainterim/$STORE_FREQ$_mean/$VAR_NAME$_f6h/$VAR_NAME$_$YEAR$$MONTH$.nc') +JRA55 <- list(path = '/esnas/recon/jma/jra-55/$STORE_FREQ$_mean/$VAR_NAME$_f6h/$VAR_NAME$_$YEAR$$MONTH$.nc') + +workdir="/scratch/Earth/ncortesi/RESILIENCE/WT" # working dir where to put the output maps and files +#subdatadir="/scratch/Earth/ncortesi/RESILIENCE/WT_subdata" # dir where to put the intermediate daily psl data (1 file for each subarray) + +mslp.rean=ERAint #'ERAintDailyHighRes' #'ERAintDailyLowRes' # choose one of the daily reanalysis above for loading MSLP data +rean.name <- 'erai' + +year.start=1981 # starting year of the MSLP daily data (from the 1st of january) +year.end=2016 # ending year of the MSLP daily data (up to the 31 of December) + +res=4.9 #5.25 # spacing between the Lamb grid points in the meridional direction (in the zonal direction, it is exactly the double of this value) + # it should be put equal to the multiple of 'psl.res' closer to 5 degrees, i.e: res = psl.res * round((5/psl.res)) + +low.res.size=7 # odd numb. of MSLP nearby grid points to compute the psl at each of the 16 points (both for lon and lat: the actual n.of points used is its square) + +#partial.end=FALSE # put TRUE if the last year ('year.end') has not all the yearly data but stop before December the 31th; in this case, must also specify the variable below +#n.days.last=334 # number of days available in the last year (used only if partial.end=TRUE) I.e: data for 2015 doesn't have December, so it has 365-31=334 days + +merge=FALSE # put TRUE if you want to concatenate all the WT classification for different years and same grid point at the end of the analysis, FALSE otherwise + +########################################################################################## + +args <- commandArgs(TRUE) + +if(length(args) == 1) year.start <- year.end <- as.integer(args[1]) +if(length(args) == 2) {year.start <- as.integer(args[1]); year.end <- as.integer(args[2])} + +n.years <- year.end - year.start + 1 + +WTs.type<-c("NE","E","SE","S","SW","W","NW","N","C","C.NE","C.E","C.SE","C.S","C.SW","C.W","C.NW","C.N","A","A.NE","A.E","A.SE","A.S","A.SW","A.W","A.NW","A.N") +radius <- (low.res.size-1)/2 # number of grid points to use as search radius for averaging the psl at each Lamb grid point + +# Load 1 day of MSLP data from the reanalysis chosen, just to detect the number of latitude and longitude points: +#MSLP <- Load('psl', NULL, mslp.rean, paste0(year.start,'0101'), storefreq = 'daily', leadtimemax = 1, output = 'lonlat', configfile = Load.path) +MSLP <- Load(var = 'psl', exp = NULL, obs = list(mslp.rean), paste0(year.start,'0101'), storefreq = 'daily', leadtimemax = 1, output = 'lonlat', nprocs=1) + +lat <- round(MSLP$lat,3) +lon <- round(MSLP$lon,3) + +n.lat <- length(lat) # number of latitude values +n.lon <- length(lon) # number of longitude values + +lon.pos <- ifelse(min(MSLP$lon>=0), TRUE, FALSE) # set lon.pos to TRUE if the longitude values of the reanalysis grid have no negative values, FALSE if not. + +# for each grid point of the reanalysis, compute the Lamb WT classification with Lamb grid centered on that point. +# we exclude only central points > +77 degrees lat and < -77 deg. because the Lamb grid needs 12.5 deg. north/south of the central point. +n.lat.unused.poles <- 17 #20 # number of unused latitude values near each of the two poles (must exclude last 10 degrees N/S, so it depends on the spatial res. of the reanalysis) + +lat.used <- MSLP$lat[-c(1:n.lat.unused.poles,(n.lat-n.lat.unused.poles):n.lat)] # latitude values used as central points +psl.res <- diff(lat.used)[1] # psl grid resolution +lat.used <- round(lat.used,3) # round psl values to the third decimal to save them in a file with a short file name (precision: ~100 m) +n.lat.used <- length(lat.used) + +lon.used <- round(MSLP$lon,3) # longitude values used as central points rounded to the third decimal (precision: ~100 m) +n.lon.used <- length(lon.used) + +n.grid.points <- length(lat.used)*length(lon.used) + +# save metadata on the WT classification used to retrieve in subsequent analysis (do not save them if you are only adding a new year to an existing classification!!!): +save.image(file=paste0(workdir,"/txt/",rean.name,"/metadata.RData")) + +# load all MSLP data one year at time:<< +for(y in year.start:year.end){ + print(paste0("Year: ",y)) + + #for(month in 1:12){ + # print(paste0("Month: ",month)) + + # month_2_digits <- ifelse(month<10, paste0("0",month), month) + n.days <- n.days.in.a.year(y) #n.days.in.a.month(month,y) + + # load data of year y: + MSLP.year <- Load(var = 'psl', exp = NULL, obs = list(mslp.rean), paste0(y,'01','01'), storefreq = 'daily', leadtimemax = n.days, output = 'lonlat', nprocs=1) + gc() + + # loop over the lat/lon of the central point of the Lamb classification: + for(latc in lat.used){ + #latc <- lat.used[1]; lonc <- lon.used[1]; # for the debug + + ## first, it computes the latitude coefficients: + SF1 <- 1/cos(latc*pi/180) + ZS1 <- 1/(2*cos(latc*pi/180)^2) + ZW1 <- sin(latc*pi/180)/sin((latc-res)*pi/180) + ZW2 <- sin(latc*pi/180)/sin((latc+res)*pi/180) + + for(lonc in lon.used){ + pos.latc <- which(lat.used==latc) + pos.lonc <- which(lon.used==lonc) + + n.point <- (pos.latc-1)*n.lon.used + pos.lonc # number of points already computed + cat(paste0("Grid point saved: ", n.point ,"/", n.grid.points, " "), "\r") + + # assign the lat and lon values of the 16 grid points of the Lamb grid centered over the central point: + lat <- lon <- c() # lat and lon of the 16 points of Lamb's grid + lat[1] <- lat[2] <- latc + 2*res + lat[3] <- lat[4] <- lat[5] <- lat[6] <- latc + res + lat[7] <- lat[8] <- lat[9] <- lat[10] <- latc + lat[11] <- lat[12] <- lat[13] <- lat[14] <- latc - res + lat[15] <- lat[16] <- latc - 2*res + + lon[1] <- lon[4] <- lon[8] <- lon[12] <- lon[15] <- lonc - res + lon[3] <- lon[7] <- lon[11] <- lonc - 3*res + lon[2] <- lon[5] <- lon[9] <- lon[13] <- lon[16] <- lonc + res + lon[6] <- lon[10] <- lon[14] <- lonc + 3*res + + if(lat[1] > 90) stop("*** Lamb grid is employing points beyond 90 degrees ***") + + # longitude correction for reanalysis with positive-only longitudes (to stay always positive): + if(lon.pos){ + if(lonc - res < 0){ + lon[1] <- lon[4] <- lon[8] <- lon[12] <- lon[15] <- lonc - res + 360 + lon[3] <- lon[7] <- lon[11] <- lonc - 3*res + 360 + } + if(lonc - 3*res < 0 && lon - res >= 0){ + lon[3] <- lon[7] <- lon[11] <- lonc - 3*res + 360 + } + if(lonc + res >= 360){ + lon[2] <- lon[5] <- lon[9] <- lon[13] <- lon[16] <- lonc + res - 360 + lon[6] <- lon[10] <- lon[14] <- lonc + 3*res - 360 + } + if(lonc + 3*res >= 360 && lonc + res < 360){ + lon[6] <- lon[10] <- lon[14] <- lonc + 3*res - 360 + } + } + + # longitude correction for reanalysis with [-180, +180] longitude (to stay always in the same interval): + # MUST STILL BE UPDATED FOR CASE lonc + 15 >= 360 !!! + if(!lon.pos){ + if(lonc - res < -180){ + lon[1] <- lon[4] <- lon[8] <- lon[12] <- lon[15] <- lonc - res + 360 + lon[3] <- lon[7] <- lon[11] <- lonc - 3*res + 360 + } + if(lonc + res >= 180){ + lon[2] <- lon[5] <- lon[9] <- lon[13] <- lon[16] <- lonc + res - 360 + lon[6] <- lon[10] <- lon[14] <- lonc + 3*res - 360 + } + } + + # lat and lon position of the 16 Lamb grid points inside the MSLP reanalysis: + pos.lat <- pos.lon <- c() + for(p in 1:16){ + pos <- nearest(lat[p], lon[p], MSLP$lat, MSLP$lon) + pos.lat[p] <- pos[1] + pos.lon[p] <- pos[2] + } + + #WTs <- data.frame() + + # mean psl at each of the 16 grid points: + psl <- array(NA,c(n.days,16)) # for each day of the year, MSLP of the 16 points of Lamb's grid + #for(p in 1:16) psl[,p] <- MSLP.year$obs[1,1,1,,pos.lat[p],pos.lon[p]] + + for(p in 1:16) { + pos.lon.low.res <- (pos.lon[p]-radius):(pos.lon[p]+radius) + + if(head(pos.lon.low.res,1) <= 0){ + ss <- which(pos.lon.low.res <= 0) + pos.lon.low.res <- c(pos.lon.low.res[ss] + n.lon.used, pos.lon.low.res[-ss]) + } + if(tail(pos.lon.low.res,1) > n.lon.used){ + ss <- which(pos.lon.low.res > n.lon.used) + pos.lon.low.res <- c(pos.lon.low.res[-ss], pos.lon.low.res[ss]-n.lon.used) + } + + pos.lat.low.res <- (pos.lat[p]-radius):(pos.lat[p]+radius) # it never has to be corrected because we excluded the poles + + MSLP.low.res <- MSLP.year$obs[1,1,1,,pos.lat.low.res,pos.lon.low.res] + psl[,p] <- apply(MSLP.low.res, 1, mean) + } + + # compute geostrophical indexes (don't round them to the 3rd decimal number as done by Trigo and Martin-Vide, not to introduce an error that change 3-4% of WTs!!!): + SF<-WF<-F<-D<-D2<-c() + + WF <- 0.5*(psl[,12]+psl[,13]) - 0.5*(psl[,4]+psl[,5]) + SF <- SF1*(0.25 * (psl[,5]+2*psl[,9]+psl[,13]) - 0.25 * (psl[,4]+2*psl[,8]+psl[,12])) + F <- (SF*SF + WF*WF)^0.5 + + if(latc < 0){ + WF <- -WF # because in the southern hemisphere, cyclones and anticyclones spins in the opposite sense! + SF <- -SF + } + + # D e' la direzione del flusso; dato che l'angolo risultato di atan e' espresso in radianti, bisogna convertirlo in gradi moltiplicandolo per (180/pi) + # D e' l'angolo tra la direzione del vento e la sua proiezione in direzione nord-sud (SF) + D <- (180/pi) * atan(WF/SF) + + # trasforma l'angolo D iniziale in un angolo D2 sempre positivo che si calcola a partire del Nord andando in senso orario: + Q1 <- which(WF>0 & SF>0) + Q2 <- which(WF>0 & SF<=0) + Q3 <- which(WF<0 & SF<=0) # calcola quali angoli D appartengono al quadrante 1 (Q1), quali al quadrante 2, quali al 3 e quali al 4. + Q4 <- which(WF<0 & SF>0) # il primo quadrante e' in alto a destra, il secondo in basso a destra, il terzo in basso a sinistra, il quarto in alto a sinistra. + + # the wind direction is usually different from the angle D where the wind is blowing to, so a correction has to be made: + D2 <- D + D2[Q1] <- D[Q1] + 180 + D2[Q2] <- D[Q2] + 360 + D2[Q4] <- D[Q4] + 180 + + # individua i WTs direzionali: + NE <- which(D2>=22.5 & D2<67.5) + E <- which(D2>=67.5 & D2<112.5) + SE <- which(D2>=112.5 & D2<157.5) + S <- which(D2>=157.5 & D2<202.5) + SW <- which(D2>=202.5 & D2<247.5) + W <- which(D2>=247.5 & D2<292.5) + NW <- which(D2>=292.5 & D2<337.5) + N <- which(D2>=337.5 | D2<22.5) + + # aggiunge una colonna a destra con il WT direzionale associato all'angolo: + D2.dir<-rep(NA,length(D2)) + D2.dir[NE]<-"NE";D2.dir[E]<-"E";D2.dir[SE]<-"SE";D2.dir[S]<-"S";D2.dir[SW]<-"SW";D2.dir[W]<-"W";D2.dir[NW]<-"NW";D2.dir[N]<-"N" + + # calcolo componenti vorticita': + ZS<-ZW<-Z<-c() + + ZS <- ZS1*(0.25 * (psl[,6]+2*psl[,10]+psl[,14]) - 0.25 * (psl[,5]+2*psl[,9]+psl[,13]) - 0.25 * (psl[,4]+2*psl[,8]+psl[,12]) + 0.25 * (psl[,3]+2*psl[,7]+psl[,11])) + ZW <- ZW1 * (0.5*(psl[,15]+psl[,16]) - 0.5*(psl[,8]+psl[,9])) - ZW2 * (0.5*(psl[,8]+psl[,9]) - 0.5*(psl[,1]+psl[,2])) + Z <- ZS + ZW + + # Pure and hybrid WTs + cyc.pure <- which(Z>2*F) + anticyc.pure <- which(Z<(-2*F)) + hybrid.cyc <- which(Z>0 & abs(Z)<2*F & abs(Z)>F) + hybrid.anticyc <- which(Z<0 & abs(Z)<2*F & abs(Z)>F) + indeter <- which(F<6 & abs(Z)<6) # tipo di tempo indeterminato (U); the choice of 6 depend on grid size and should be changed if grid res.is higher! + + # create a data frame with all the info about the WT classification: + + #years.period <- months.period <- days.period <- c() + #for(y in year.start:year.end) years.period <- c(years.period, rep(y, n.days.in.a.year(y))) + #for(y in year.start:year.end) months.period <- c(months.period, seq.months.in.a.year(y)) + #for(y in year.start:year.end) days.period <- c(days.period, seq.days.in.a.year(y)) + years.period <- rep(y, n.days) + months.period <- seq.months.in.a.year(y)[1:n.days] + days.period <- seq.days.in.a.year(y)[1:n.days] + WT<-data.frame(Year=years.period, Month=months.period, Day=days.period, Direction=round(D2,3), WT.dir=D2.dir, stringsAsFactors=FALSE) + + WT$Year<-as.numeric(WT$Year);WT$Month<-as.numeric(WT$Month);WT$Day<-as.numeric(WT$Day);WT$Direction<-as.numeric(WT$Direction);WT$WT.dir<-as.character(WT$WT.dir) + + # colonna che identifica quali giorni sono di tipo ibrido e quali di tipo direzionale: + WT$Hyb <- rep("Directional",length(D2)) + WT$Hyb[cyc.pure] <- "Pure_C" + WT$Hyb[anticyc.pure] <- "Pure_A" + WT$Hyb[hybrid.cyc] <- "Hybrid_C" + WT$Hyb[hybrid.anticyc] <- "Hybrid_A" + WT$Hyb[which(is.na(D2))] <- NA # remove the writing "directional" from days with no data + + # colonna con la lista dei 26 WTs officiali: + WT$WT <- WT$D2.dir + WT$WT[cyc.pure] <- "C" + WT$WT[anticyc.pure] <- "A" + WT$WT[hybrid.cyc] <- paste("C.",WT$WT.dir[hybrid.cyc],sep="") + WT$WT[hybrid.anticyc] <- paste("A.", WT$WT.dir[hybrid.anticyc],sep="") + #WT$WT[indeter] <- "U" # comment this line not to include the Unclassified U WT + + WT$WT.num <- match(WT$WT,WTs.type)# aggiunge una colonna con la conversione dei 26 tipi di tempo a numeri da 1 a 26 + + # colonna con la lista dei 10 WTs ottenuti unendo a C e ad A gli otto WT direzionali puri; + # i WTs ibridi si aggiungono ognuno al suo tipo direzionale corrispondente (es: C.SW si aggiunge a SW, ecc.) + WT$WT10 <- WT$WT.dir + WT$WT10[cyc.pure] <- "C" + WT$WT10[anticyc.pure] <- "A" + WT$WT10.num <- match(WT$WT10,WTs.type) # conversione dei 10 tipi di tempo a numeri da 1 a 9 + il tipo A che si converte nel numero 18 + + # add the other geostrophical indexes: + WT$SF=round(SF,5) + WT$WF=round(WF,5) + WT$F=round(F,5) + WT$ZS=round(ZS,5) + WT$ZW=round(ZW,5) + WT$Z=round(Z,5) + + #WTs<-rbind(WTs,WT) + #assign(paste0("WT","_year_",y,"_lat_",latc,"_lon_",lonc), WT) + + # when saving as .txt instead as .RData, total computational time decreases from 30h to 12h !!! + if(dir.exists(paths=paste0(workdir,"/txt/",rean.name,"/",y))) { + write.table(WT,paste0(workdir,"/txt/",rean.name,"/",y,"/WT_year_",y,"_lat_",latc,"_lon_",lonc,".txt")) + } else { + dir.create(paste0(workdir,"/txt/",rean.name,"/",y)) # create the dir with the year data if it doesn't exist + } + + rm(WT,SF,WF,F,D,ZS,ZW,W) + + gc() + + } # close for on lonc + } # close for on latc + #cat("\n") + + rm(MSLP.year) + + # save all classification of a single year and month in a single .RData file: + #output <- paste0(workdir,"/WTs_",rean.name,"_year_",y,".RData") + #save.image(file=output) + + gc() + + #} # close for on month + #gc() + +} # close for on y + + +postproc <- FALSE +if(postproc == TRUE){ + +# merge all txt data of the same lat and lon in 1 file to have all the years together and delete the yearly files: +# beware that if you do it in an interactive session, time available (12h) is usually not enough to finish this operation! +# this txt files are convenient if you want to give them to other researcher who study a specific area + +if(merge){ + if(!dir.exists(file.path(workdir,paste0("txt/ERAint/all_years")))) dir.create(file.path(workdir,paste0("txt/ERAint/all_years"))) + + #lat.used <- lat.used[205:221] + + for(latc in lat.used){ + #latc <- lat.used[1]; lonc <- lon.used[1]; # for the debug + + for(lonc in lon.used){ + pos.latc <- which(lat.used==latc) + pos.lonc <- which(lon.used==lonc) + + #cat(paste0("Merging classification at grid point: ",(pos.latc-1)*n.lon.used + pos.lonc ,"/", n.grid.points, " "), "\r") + print(paste0("lat: ",latc," lon: ", lonc)) + + for(y in year.start:year.end){ + #print(paste0("Year: ",y)) + + WTs <- read.table(file=paste0(workdir,"/txt/",rean.name,"/",y,"/WT_year_",y,"_lat_",latc,"_lon_",lonc,".txt"),stringsAsFactors=FALSE, header=TRUE) # Load WTs + if(y == year.start){ + WTs_full_period <- WTs + } else { + WTs_full_period <- rbind(WTs_full_period, WTs) + } + + } + + + write.table(WTs_full_period, file=paste0(workdir,"/txt/",rean.name,"/all_years/WTs_",year.start,"-",year.end,"_lat_",latc,"_lon_",lonc,".txt"),row.names=FALSE, col.names=TRUE, quote=FALSE, sep=" ") + + rm(WTs_full_period) + + } + } + + # delete the old yearly files in the _txt directories and the directories: + for(y in year.start:year.end){ + system(paste0("rm -fr ",workdir,"/",y,"_txt/")) + } + +} # close if on merge + + + + + + +# merge all the txt files together in a single array of format [years, days, lat, lon]. For example, in case of ERA-Interim: [30, 366, 256, 512]. It takes ~4 hours. +# After this step, the Worldwide WT classification is ready to be loaded in the script 'WT_drivers.R' + +# load daily WT classification for all years and grid points and with the same format of var365Anom: +load(paste0(workdir,"/txt/",rean.name,"/metadata.RData")) # load lat.used and lon.used + +#WTs <- array(NA, c(n.years*365, n.lat, n.lon)) +WTs <- array(NA, c(n.years, 365, n.lat, n.lon)) + +# this loop lasts ~ 4 hours with ERA-Interim: +for(latc in lat){ + #latc <- lat[20]; lonc <- lon[1]; # for the debug + + for(lonc in lon){ + pos.latc <- which(lat == latc) + pos.lonc <- which(lon == lonc) + + # Load WT classification for that point: + WT.file <- paste0(workdir,"/txt/ERAint/all_years/WTs_",year.start,"-",year.end,"_lat_",latc,"_lon_",lonc,".txt") + + if(file.exists(WT.file)){ + print(paste0("pos.lat: ",pos.latc," pos.lon:",pos.lonc)) + + WT <- read.table(WT.file,header=TRUE, stringsAsFactors=FALSE) # load lat.used and lon.used + + # remove bisestile days from WT classification: + WT$MonthDay <- paste0(WT$Month,WT$Day) # create a new column + bis <- which(WT$MonthDay == "229") + WT365 <- WT[-bis,] + WT365$MonthDay <- NULL + + # insert the WT classification with 10 WTs: + #WTs[, pos.latc, pos.lonc] <- WT$WT10.name + for(y in year.start:year.end) WTs[y-year.start+1,, pos.latc, pos.lonc] <- WT365$WT10.num[365*(y-year.start)+1:365] + rm(WT365, WT, bis) + + } + } + +} + +save(WTs,file=paste0(workdir,"/txt/",rean.name,"/WTs.RData")) + + + + + + + +# only add ONE year to the existing classification: + +year.start <- 1985 # first year of the old classification to update +year.end <- 2014 # last year of the old classification to update + +y.new <- 2016 # year to add to the classification + +# load daily WT classification for all years and grid points and with the same format of var365Anom: +load(paste0(workdir,"/txt/",rean.name,"/metadata.RData")) # load lat.used and lon.used + +for(latc in lat.used){ + #latc <- lat.used[1]; lonc <- lon.used[1]; # for the debug + + for(lonc in lon.used){ + pos.latc <- which(lat.used==latc) + pos.lonc <- which(lon.used==lonc) + + #cat(paste0("Adding classification at grid point: ",(pos.latc-1)*n.lon.used + pos.lonc ,"/", n.grid.points, " "), "\r") + print(paste0("lat: ",latc," lon: ", lonc)) + + WTs <- read.table(file=paste0(workdir,"/txt/",rean.name,"/all_years/WTs_",year.start,"-",year.end,"_lat_",latc,"_lon_",lonc,".txt"),stringsAsFactors=FALSE, header=TRUE) # Load WTs + + WTs_new <- read.table(file=paste0(workdir,"/txt/",rean.name,"/",y.new,"/WT_year_",y.new,"_lat_",latc,"_lon_",lonc,".txt"),stringsAsFactors=FALSE, header=TRUE) # Load WTs + + WTs_all <- rbind(WTs, WTs_new) + + write.table(WTs_all, file=paste0(workdir,"/txt/",rean.name,"/all_years/WTs_",year.start,"-",year.end,"_lat_",latc,"_lon_",lonc,".txt"),row.names=FALSE, col.names=TRUE, quote=FALSE, sep=" ") + + rm(WTs_all) + + } +} + +# when you have finished inserting all the new years, you have to re-rn the previous for block again, to re-create the updated WTs.RData!!! + + + + + +} # close if on postproc diff --git a/WT_v8.R~ b/WT_v8.R~ new file mode 100644 index 0000000000000000000000000000000000000000..3996ad4d359e1e46c626577c4e04503cbe5b6764 --- /dev/null +++ b/WT_v8.R~ @@ -0,0 +1,477 @@ +######################################################################################### +# Lamb's WTs with ERA-Interim # +######################################################################################### +# run it from the bash with: +# +# Rscript WT_v5.R 1984 +# +# being 1984 the year you want to classify the WTs; +# in this way you can run up to 8 jobs at the same time, each one producing its output files! +# You can also run it for a sequence of years with the syntax: +# +# Rscript WT_v5.R 1980 2014 +# +# and it will compute each year from 1980 to 2014, each one after finishing the previous one. +# +# If you want to run many years in parallel with just only 1 command, run from the bash: +# +# for y in {1980..2014}; do Rscript WT_v5.R &; done +# +# but it'd need 24 processors! In practice, it is possible to run only 4-8 years at time. +# + +library(s2dverification) # for the function Load() +# library(ff) +source('/home/Earth/ncortesi/scripts/Rfunctions.R') # for the calendar functions +#Load.path <- '/home/Earth/ncortesi/Downloads/scripts/IC3.conf' # Load() file path + +# Available reanalysis: +ERAint <- list(path = '/esnas/recon/ecmwf/erainterim/$STORE_FREQ$_mean/$VAR_NAME$_f6h/$VAR_NAME$_$YEAR$$MONTH$.nc') +JRA55 <- list(path = '/esnas/recon/jma/jra-55/$STORE_FREQ$_mean/$VAR_NAME$_f6h/$VAR_NAME$_$YEAR$$MONTH$.nc') + +workdir="/scratch/Earth/ncortesi/RESILIENCE/WT" # working dir where to put the output maps and files +#subdatadir="/scratch/Earth/ncortesi/RESILIENCE/WT_subdata" # dir where to put the intermediate daily psl data (1 file for each subarray) + +mslp.rean=ERAint #'ERAintDailyHighRes' #'ERAintDailyLowRes' # choose one of the daily reanalysis above for loading MSLP data +rean.name <- 'erai' + +year.start=1981 # starting year of the MSLP daily data (from the 1st of january) +year.end=2016 # ending year of the MSLP daily data (up to the 31 of December) + +res=4.9 #5.25 # spacing between the Lamb grid points in the meridional direction (in the zonal direction, it is exactly the double of this value) + # it should be put equal to the multiple of 'psl.res' closer to 5 degrees, i.e: res = psl.res * round((5/psl.res)) + +low.res.size=7 # odd numb. of MSLP nearby grid points to compute the psl at each of the 16 points (both for lon and lat: the actual n.of points used is its square) + +#partial.end=FALSE # put TRUE if the last year ('year.end') has not all the yearly data but stop before December the 31th; in this case, must also specify the variable below +#n.days.last=334 # number of days available in the last year (used only if partial.end=TRUE) I.e: data for 2015 doesn't have December, so it has 365-31=334 days + +merge=FALSE # put TRUE if you want to concatenate all the WT classification for different years and same grid point at the end of the analysis, FALSE otherwise + +########################################################################################## + +args <- commandArgs(TRUE) + +if(length(args) == 1) year.start <- year.end <- as.integer(args[1]) +if(length(args) == 2) {year.start <- as.integer(args[1]); year.end <- as.integer(args[2])} + +n.years <- year.end - year.start + 1 + +WTs.type<-c("NE","E","SE","S","SW","W","NW","N","C","C.NE","C.E","C.SE","C.S","C.SW","C.W","C.NW","C.N","A","A.NE","A.E","A.SE","A.S","A.SW","A.W","A.NW","A.N") +radius <- (low.res.size-1)/2 # number of grid points to use as search radius for averaging the psl at each Lamb grid point + +# Load 1 day of MSLP data from the reanalysis chosen, just to detect the number of latitude and longitude points: +#MSLP <- Load('psl', NULL, mslp.rean, paste0(year.start,'0101'), storefreq = 'daily', leadtimemax = 1, output = 'lonlat', configfile = Load.path) +MSLP <- Load(var = 'psl', exp = NULL, obs = list(mslp.rean), paste0(year.start,'0101'), storefreq = 'daily', leadtimemax = 1, output = 'lonlat', nprocs=1) + +lat <- round(MSLP$lat,3) +lon <- round(MSLP$lon,3) + +n.lat <- length(lat) # number of latitude values +n.lon <- length(lon) # number of longitude values + +lon.pos <- ifelse(min(MSLP$lon>=0), TRUE, FALSE) # set lon.pos to TRUE if the longitude values of the reanalysis grid have no negative values, FALSE if not. + +# for each grid point of the reanalysis, compute the Lamb WT classification with Lamb grid centered on that point. +# we exclude only central points > +77 degrees lat and < -77 deg. because the Lamb grid needs 12.5 deg. north/south of the central point. +n.lat.unused.poles <- 17 #20 # number of unused latitude values near each of the two poles (must exclude last 10 degrees N/S, so it depends on the spatial res. of the reanalysis) + +lat.used <- MSLP$lat[-c(1:n.lat.unused.poles,(n.lat-n.lat.unused.poles):n.lat)] # latitude values used as central points +psl.res <- diff(lat.used)[1] # psl grid resolution +lat.used <- round(lat.used,3) # round psl values to the third decimal to save them in a file with a short file name (precision: ~100 m) +n.lat.used <- length(lat.used) + +lon.used <- round(MSLP$lon,3) # longitude values used as central points rounded to the third decimal (precision: ~100 m) +n.lon.used <- length(lon.used) + +n.grid.points <- length(lat.used)*length(lon.used) + +# save metadata on the WT classification used to retrieve in subsequent analysis (do not save them if you are only adding a new year to an existing classification!!!): +save.image(file=paste0(workdir,"/txt/",rean.name,"/metadata.RData")) + +# load all MSLP data one year at time:<< +for(y in year.start:year.end){ + print(paste0("Year: ",y)) + + #for(month in 1:12){ + # print(paste0("Month: ",month)) + + # month_2_digits <- ifelse(month<10, paste0("0",month), month) + n.days <- n.days.in.a.year(y) #n.days.in.a.month(month,y) + + # load data of year y: + MSLP.year <- Load(var = 'psl', exp = NULL, obs = list(mslp.rean), paste0(y,'01','01'), storefreq = 'daily', leadtimemax = n.days, output = 'lonlat', nprocs=1) + gc() + + # loop over the lat/lon of the central point of the Lamb classification: + for(latc in lat.used){ + #latc <- lat.used[1]; lonc <- lon.used[1]; # for the debug + + ## first, it computes the latitude coefficients: + SF1 <- 1/cos(latc*pi/180) + ZS1 <- 1/(2*cos(latc*pi/180)^2) + ZW1 <- sin(latc*pi/180)/sin((latc-res)*pi/180) + ZW2 <- sin(latc*pi/180)/sin((latc+res)*pi/180) + + for(lonc in lon.used){ + pos.latc <- which(lat.used==latc) + pos.lonc <- which(lon.used==lonc) + + n.point <- (pos.latc-1)*n.lon.used + pos.lonc # number of points already computed + cat(paste0("Grid point saved: ", n.point ,"/", n.grid.points, " "), "\r") + + # assign the lat and lon values of the 16 grid points of the Lamb grid centered over the central point: + lat <- lon <- c() # lat and lon of the 16 points of Lamb's grid + lat[1] <- lat[2] <- latc + 2*res + lat[3] <- lat[4] <- lat[5] <- lat[6] <- latc + res + lat[7] <- lat[8] <- lat[9] <- lat[10] <- latc + lat[11] <- lat[12] <- lat[13] <- lat[14] <- latc - res + lat[15] <- lat[16] <- latc - 2*res + + lon[1] <- lon[4] <- lon[8] <- lon[12] <- lon[15] <- lonc - res + lon[3] <- lon[7] <- lon[11] <- lonc - 3*res + lon[2] <- lon[5] <- lon[9] <- lon[13] <- lon[16] <- lonc + res + lon[6] <- lon[10] <- lon[14] <- lonc + 3*res + + if(lat[1] > 90) stop("*** Lamb grid is employing points beyond 90 degrees ***") + + # longitude correction for reanalysis with positive-only longitudes (to stay always positive): + if(lon.pos){ + if(lonc - res < 0){ + lon[1] <- lon[4] <- lon[8] <- lon[12] <- lon[15] <- lonc - res + 360 + lon[3] <- lon[7] <- lon[11] <- lonc - 3*res + 360 + } + if(lonc - 3*res < 0 && lon - res >= 0){ + lon[3] <- lon[7] <- lon[11] <- lonc - 3*res + 360 + } + if(lonc + res >= 360){ + lon[2] <- lon[5] <- lon[9] <- lon[13] <- lon[16] <- lonc + res - 360 + lon[6] <- lon[10] <- lon[14] <- lonc + 3*res - 360 + } + if(lonc + 3*res >= 360 && lonc + res < 360){ + lon[6] <- lon[10] <- lon[14] <- lonc + 3*res - 360 + } + } + + # longitude correction for reanalysis with [-180, +180] longitude (to stay always in the same interval): + # MUST STILL UPDATE FOR CASE lonc + 15 >= 360 !!! + if(!lon.pos){ + if(lonc - res < -180){ + lon[1] <- lon[4] <- lon[8] <- lon[12] <- lon[15] <- lonc - res + 360 + lon[3] <- lon[7] <- lon[11] <- lonc - 3*res + 360 + } + if(lonc + res >= 180){ + lon[2] <- lon[5] <- lon[9] <- lon[13] <- lon[16] <- lonc + res - 360 + lon[6] <- lon[10] <- lon[14] <- lonc + 3*res - 360 + } + } + + # lat and lon position of the 16 Lamb grid points inside the MSLP reanalysis: + pos.lat <- pos.lon <- c() + for(p in 1:16){ + pos <- nearest(lat[p], lon[p], MSLP$lat, MSLP$lon) + pos.lat[p] <- pos[1] + pos.lon[p] <- pos[2] + } + + #WTs <- data.frame() + + # mean psl at each of the 16 grid points: + psl <- array(NA,c(n.days,16)) # for each day of the year, MSLP of the 16 points of Lamb's grid + #for(p in 1:16) psl[,p] <- MSLP.year$obs[1,1,1,,pos.lat[p],pos.lon[p]] + + for(p in 1:16) { + pos.lon.low.res <- (pos.lon[p]-radius):(pos.lon[p]+radius) + + if(head(pos.lon.low.res,1) <= 0){ + ss <- which(pos.lon.low.res <= 0) + pos.lon.low.res <- c(pos.lon.low.res[ss] + n.lon.used, pos.lon.low.res[-ss]) + } + if(tail(pos.lon.low.res,1) > n.lon.used){ + ss <- which(pos.lon.low.res > n.lon.used) + pos.lon.low.res <- c(pos.lon.low.res[-ss], pos.lon.low.res[ss]-n.lon.used) + } + + pos.lat.low.res <- (pos.lat[p]-radius):(pos.lat[p]+radius) # it never has to be corrected because we excluded the poles + + MSLP.low.res <- MSLP.year$obs[1,1,1,,pos.lat.low.res,pos.lon.low.res] + psl[,p] <- apply(MSLP.low.res, 1, mean) + } + + # compute geostrophical indexes (don't round them to the 3rd decimal number as done by Trigo and Martin-Vide, not to introduce an error that change 3-4% of WTs!!!): + SF<-WF<-F<-D<-D2<-c() + + WF <- 0.5*(psl[,12]+psl[,13]) - 0.5*(psl[,4]+psl[,5]) + SF <- SF1*(0.25 * (psl[,5]+2*psl[,9]+psl[,13]) - 0.25 * (psl[,4]+2*psl[,8]+psl[,12])) + F <- (SF*SF + WF*WF)^0.5 + + if(latc < 0){ + WF <- -WF # because in the southern hemisphere, cyclones and anticyclones spins in the opposite sense! + SF <- -SF + } + + # D e' la direzione del flusso; dato che l'angolo risultato di atan e' espresso in radianti, bisogna convertirlo in gradi moltiplicandolo per (180/pi) + # D e' l'angolo tra la direzione del vento e la sua proiezione in direzione nord-sud (SF) + D <- (180/pi) * atan(WF/SF) + + # trasforma l'angolo D iniziale in un angolo D2 sempre positivo che si calcola a partire del Nord andando in senso orario: + Q1 <- which(WF>0 & SF>0) + Q2 <- which(WF>0 & SF<=0) + Q3 <- which(WF<0 & SF<=0) # calcola quali angoli D appartengono al quadrante 1 (Q1), quali al quadrante 2, quali al 3 e quali al 4. + Q4 <- which(WF<0 & SF>0) # il primo quadrante e' in alto a destra, il secondo in basso a destra, il terzo in basso a sinistra, il quarto in alto a sinistra. + + # the wind direction is usually different from the angle D where the wind is blowing to, so a correction has to be made: + D2 <- D + D2[Q1] <- D[Q1] + 180 + D2[Q2] <- D[Q2] + 360 + D2[Q4] <- D[Q4] + 180 + + # individua i WTs direzionali: + NE <- which(D2>=22.5 & D2<67.5) + E <- which(D2>=67.5 & D2<112.5) + SE <- which(D2>=112.5 & D2<157.5) + S <- which(D2>=157.5 & D2<202.5) + SW <- which(D2>=202.5 & D2<247.5) + W <- which(D2>=247.5 & D2<292.5) + NW <- which(D2>=292.5 & D2<337.5) + N <- which(D2>=337.5 | D2<22.5) + + # aggiunge una colonna a destra con il WT direzionale associato all'angolo: + D2.dir<-rep(NA,length(D2)) + D2.dir[NE]<-"NE";D2.dir[E]<-"E";D2.dir[SE]<-"SE";D2.dir[S]<-"S";D2.dir[SW]<-"SW";D2.dir[W]<-"W";D2.dir[NW]<-"NW";D2.dir[N]<-"N" + + # calcolo componenti vorticita': + ZS<-ZW<-Z<-c() + + ZS <- ZS1*(0.25 * (psl[,6]+2*psl[,10]+psl[,14]) - 0.25 * (psl[,5]+2*psl[,9]+psl[,13]) - 0.25 * (psl[,4]+2*psl[,8]+psl[,12]) + 0.25 * (psl[,3]+2*psl[,7]+psl[,11])) + ZW <- ZW1 * (0.5*(psl[,15]+psl[,16]) - 0.5*(psl[,8]+psl[,9])) - ZW2 * (0.5*(psl[,8]+psl[,9]) - 0.5*(psl[,1]+psl[,2])) + Z <- ZS + ZW + + # Pure and hybrid WTs + cyc.pure <- which(Z>2*F) + anticyc.pure <- which(Z<(-2*F)) + hybrid.cyc <- which(Z>0 & abs(Z)<2*F & abs(Z)>F) + hybrid.anticyc <- which(Z<0 & abs(Z)<2*F & abs(Z)>F) + indeter <- which(F<6 & abs(Z)<6) # tipo di tempo indeterminato (U); the choice of 6 depend on grid size and should be changed if grid res.is higher! + + # create a data frame with all the info about the WT classification: + + #years.period <- months.period <- days.period <- c() + #for(y in year.start:year.end) years.period <- c(years.period, rep(y, n.days.in.a.year(y))) + #for(y in year.start:year.end) months.period <- c(months.period, seq.months.in.a.year(y)) + #for(y in year.start:year.end) days.period <- c(days.period, seq.days.in.a.year(y)) + years.period <- rep(y, n.days) + months.period <- seq.months.in.a.year(y)[1:n.days] + days.period <- seq.days.in.a.year(y)[1:n.days] + WT<-data.frame(Year=years.period, Month=months.period, Day=days.period, Direction=round(D2,3), WT.dir=D2.dir, stringsAsFactors=FALSE) + + WT$Year<-as.numeric(WT$Year);WT$Month<-as.numeric(WT$Month);WT$Day<-as.numeric(WT$Day);WT$Direction<-as.numeric(WT$Direction);WT$WT.dir<-as.character(WT$WT.dir) + + # colonna che identifica quali giorni sono di tipo ibrido e quali di tipo direzionale: + WT$Hyb <- rep("Directional",length(D2)) + WT$Hyb[cyc.pure] <- "Pure_C" + WT$Hyb[anticyc.pure] <- "Pure_A" + WT$Hyb[hybrid.cyc] <- "Hybrid_C" + WT$Hyb[hybrid.anticyc] <- "Hybrid_A" + WT$Hyb[which(is.na(D2))] <- NA # remove the writing "directional" from days with no data + + # colonna con la lista dei 26 WTs officiali: + WT$WT <- WT$D2.dir + WT$WT[cyc.pure] <- "C" + WT$WT[anticyc.pure] <- "A" + WT$WT[hybrid.cyc] <- paste("C.",WT$WT.dir[hybrid.cyc],sep="") + WT$WT[hybrid.anticyc] <- paste("A.", WT$WT.dir[hybrid.anticyc],sep="") + #WT$WT[indeter] <- "U" # comment this line not to include the Unclassified U WT + + WT$WT.num <- match(WT$WT,WTs.type)# aggiunge una colonna con la conversione dei 26 tipi di tempo a numeri da 1 a 26 + + # colonna con la lista dei 10 WTs ottenuti unendo a C e ad A gli otto WT direzionali puri; + # i WTs ibridi si aggiungono ognuno al suo tipo direzionale corrispondente (es: C.SW si aggiunge a SW, ecc.) + WT$WT10 <- WT$WT.dir + WT$WT10[cyc.pure] <- "C" + WT$WT10[anticyc.pure] <- "A" + WT$WT10.num <- match(WT$WT10,WTs.type) # conversione dei 10 tipi di tempo a numeri da 1 a 9 + il tipo A che si converte nel numero 18 + + # add the other geostrophical indexes: + WT$SF=round(SF,5) + WT$WF=round(WF,5) + WT$F=round(F,5) + WT$ZS=round(ZS,5) + WT$ZW=round(ZW,5) + WT$Z=round(Z,5) + + #WTs<-rbind(WTs,WT) + #assign(paste0("WT","_year_",y,"_lat_",latc,"_lon_",lonc), WT) + + # when saving as .txt instead as .RData, total computational time decreases from 30h to 12h !!! + if(dir.exists(paths=paste0(workdir,"/txt/",rean.name,"/",y))) { + write.table(WT,paste0(workdir,"/txt/",rean.name,"/",y,"/WT_year_",y,"_lat_",latc,"_lon_",lonc,".txt")) + } else { + dir.create(paste0(workdir,"/txt/",rean.name,"/",y)) # create the dir with the year data if it doesn't exist + } + + rm(WT,SF,WF,F,D,ZS,ZW,W) + + gc() + + } # close for on lonc + } # close for on latc + #cat("\n") + + rm(MSLP.year) + + # save all classification of a single year and month in a single .RData file: + #output <- paste0(workdir,"/WTs_",rean.name,"_year_",y,".RData") + #save.image(file=output) + + gc() + + #} # close for on month + #gc() + +} # close for on y + + +postproc <- FALSE +if(postproc == TRUE){ + +# merge all txt data of the same lat and lon in 1 file to have all the years together and delete the yearly files: +# beware that if you do it in an interactive session, time available (12h) is usually not enough to finish this operation! +# this txt files are convenient if you want to give them to other researcher who study a specific area + +if(merge){ + if(!dir.exists(file.path(workdir,paste0("txt/ERAint/all_years")))) dir.create(file.path(workdir,paste0("txt/ERAint/all_years"))) + + #lat.used <- lat.used[205:221] + + for(latc in lat.used){ + #latc <- lat.used[1]; lonc <- lon.used[1]; # for the debug + + for(lonc in lon.used){ + pos.latc <- which(lat.used==latc) + pos.lonc <- which(lon.used==lonc) + + #cat(paste0("Merging classification at grid point: ",(pos.latc-1)*n.lon.used + pos.lonc ,"/", n.grid.points, " "), "\r") + print(paste0("lat: ",latc," lon: ", lonc)) + + for(y in year.start:year.end){ + #print(paste0("Year: ",y)) + + WTs <- read.table(file=paste0(workdir,"/txt/",rean.name,"/",y,"/WT_year_",y,"_lat_",latc,"_lon_",lonc,".txt"),stringsAsFactors=FALSE, header=TRUE) # Load WTs + if(y == year.start){ + WTs_full_period <- WTs + } else { + WTs_full_period <- rbind(WTs_full_period, WTs) + } + + } + + + write.table(WTs_full_period, file=paste0(workdir,"/txt/",rean.name,"/all_years/WTs_",year.start,"-",year.end,"_lat_",latc,"_lon_",lonc,".txt"),row.names=FALSE, col.names=TRUE, quote=FALSE, sep=" ") + + rm(WTs_full_period) + + } + } + + # delete the old yearly files in the _txt directories and the directories: + for(y in year.start:year.end){ + system(paste0("rm -fr ",workdir,"/",y,"_txt/")) + } + +} # close if on merge + + + + + + +# merge all the txt files together in a single array of format [years, days, lat, lon]. For example, in case of ERA-Interim: [30, 366, 256, 512]. It takes ~4 hours. +# After this step, the Worldwide WT classification is ready to be loaded in the script 'WT_drivers.R' + +# load daily WT classification for all years and grid points and with the same format of var365Anom: +load(paste0(workdir,"/txt/",rean.name,"/metadata.RData")) # load lat.used and lon.used + +#WTs <- array(NA, c(n.years*365, n.lat, n.lon)) +WTs <- array(NA, c(n.years, 365, n.lat, n.lon)) + +# this loop lasts ~ 4 hours with ERA-Interim: +for(latc in lat){ + #latc <- lat[20]; lonc <- lon[1]; # for the debug + + for(lonc in lon){ + pos.latc <- which(lat == latc) + pos.lonc <- which(lon == lonc) + + # Load WT classification for that point: + WT.file <- paste0(workdir,"/txt/ERAint/all_years/WTs_",year.start,"-",year.end,"_lat_",latc,"_lon_",lonc,".txt") + + if(file.exists(WT.file)){ + print(paste0("pos.lat: ",pos.latc," pos.lon:",pos.lonc)) + + WT <- read.table(WT.file,header=TRUE, stringsAsFactors=FALSE) # load lat.used and lon.used + + # remove bisestile days from WT classification: + WT$MonthDay <- paste0(WT$Month,WT$Day) # create a new column + bis <- which(WT$MonthDay == "229") + WT365 <- WT[-bis,] + WT365$MonthDay <- NULL + + # insert the WT classification with 10 WTs: + #WTs[, pos.latc, pos.lonc] <- WT$WT10.name + for(y in year.start:year.end) WTs[y-year.start+1,, pos.latc, pos.lonc] <- WT365$WT10.num[365*(y-year.start)+1:365] + rm(WT365, WT, bis) + + } + } + +} + +save(WTs,file=paste0(workdir,"/txt/",rean.name,"/WTs.RData")) + + + + + + + +# only add ONE year to the existing classification: + +year.start <- 1985 # first year of the old classification to update +year.end <- 2014 # last year of the old classification to update + +y.new <- 2016 # year to add to the classification + +# load daily WT classification for all years and grid points and with the same format of var365Anom: +load(paste0(workdir,"/txt/",rean.name,"/metadata.RData")) # load lat.used and lon.used + +for(latc in lat.used){ + #latc <- lat.used[1]; lonc <- lon.used[1]; # for the debug + + for(lonc in lon.used){ + pos.latc <- which(lat.used==latc) + pos.lonc <- which(lon.used==lonc) + + #cat(paste0("Adding classification at grid point: ",(pos.latc-1)*n.lon.used + pos.lonc ,"/", n.grid.points, " "), "\r") + print(paste0("lat: ",latc," lon: ", lonc)) + + WTs <- read.table(file=paste0(workdir,"/txt/",rean.name,"/all_years/WTs_",year.start,"-",year.end,"_lat_",latc,"_lon_",lonc,".txt"),stringsAsFactors=FALSE, header=TRUE) # Load WTs + + WTs_new <- read.table(file=paste0(workdir,"/txt/",rean.name,"/",y.new,"/WT_year_",y.new,"_lat_",latc,"_lon_",lonc,".txt"),stringsAsFactors=FALSE, header=TRUE) # Load WTs + + WTs_all <- rbind(WTs, WTs_new) + + write.table(WTs_all, file=paste0(workdir,"/txt/",rean.name,"/all_years/WTs_",year.start,"-",year.end,"_lat_",latc,"_lon_",lonc,".txt"),row.names=FALSE, col.names=TRUE, quote=FALSE, sep=" ") + + rm(WTs_all) + + } +} + +# when you have finished inserting all the new years, you have to re-rn the previous for block again, to re-create the updated WTs.RData!!! + + + + + +} # close if on postproc diff --git a/bash/.Rhistory b/bash/.Rhistory new file mode 100644 index 0000000000000000000000000000000000000000..d4718b6fbc6906220cf4052b6f681f3c5cb80d67 --- /dev/null +++ b/bash/.Rhistory @@ -0,0 +1,2 @@ +q() +n diff --git a/bash/.directory b/bash/.directory new file mode 100644 index 0000000000000000000000000000000000000000..4063b83af2e55ea734ea370010265002115961df --- /dev/null +++ b/bash/.directory @@ -0,0 +1,4 @@ +[Dolphin] +Timestamp=2016,6,2,11,24,59 +Version=3 +ViewMode=2 diff --git a/bash/check_ECMWFS4_psl_6hourly.txt b/bash/check_ECMWFS4_psl_6hourly.txt new file mode 100644 index 0000000000000000000000000000000000000000..d7d45ceb70ebf97d92cdc760f06ef342da150606 --- /dev/null +++ b/bash/check_ECMWFS4_psl_6hourly.txt @@ -0,0 +1,435 @@ +Checked variable: psl +Path: /esnas/exp/ecmwf/system4_m1/daily_mean/sfcWind_f6h/ +Extension: .nc +Checked period: 1981-2015 +>>>>>>>>>>>>>>>>>>>>>>>> Expected elements <<<<<<<<<<<<<<<<<<<<<<<< +Latitude denomination: latitude +Number of latitude values: 181 +Longitude denomination: longitude +Number of longitude values: 360 +Ensemble denomination: ensemble +Number of ensemble values: 15 +Alternative number of ensemble values: 51 +Lead-time denonomination: time +Number of lead-times: 216 + >>>>>>>>>>>>>>>>>>>>>>>> Checking <<<<<<<<<<<<<<<<<<<<<<<< +psl_19810101.nc : >>>>> Missing file <<<<< +psl_19810201.nc : >>>>> Missing file <<<<< +psl_19810301.nc : >>>>> Missing file <<<<< +psl_19810401.nc : >>>>> Missing file <<<<< +psl_19810501.nc : >>>>> Missing file <<<<< +psl_19810601.nc : >>>>> Missing file <<<<< +psl_19810701.nc : >>>>> Missing file <<<<< +psl_19810801.nc : >>>>> Missing file <<<<< +psl_19810901.nc : >>>>> Missing file <<<<< +psl_19811001.nc : >>>>> Missing file <<<<< +psl_19811101.nc : >>>>> Missing file <<<<< +psl_19811201.nc : >>>>> Missing file <<<<< +psl_19820101.nc : >>>>> Missing file <<<<< +psl_19820201.nc : >>>>> Missing file <<<<< +psl_19820301.nc : >>>>> Missing file <<<<< +psl_19820401.nc : >>>>> Missing file <<<<< +psl_19820501.nc : >>>>> Missing file <<<<< +psl_19820601.nc : >>>>> Missing file <<<<< +psl_19820701.nc : >>>>> Missing file <<<<< +psl_19820801.nc : >>>>> Missing file <<<<< +psl_19820901.nc : >>>>> Missing file <<<<< +psl_19821001.nc : >>>>> Missing file <<<<< +psl_19821101.nc : >>>>> Missing file <<<<< +psl_19821201.nc : >>>>> Missing file <<<<< +psl_19830101.nc : >>>>> Missing file <<<<< +psl_19830201.nc : >>>>> Missing file <<<<< +psl_19830301.nc : >>>>> Missing file <<<<< +psl_19830401.nc : >>>>> Missing file <<<<< +psl_19830501.nc : >>>>> Missing file <<<<< +psl_19830601.nc : >>>>> Missing file <<<<< +psl_19830701.nc : >>>>> Missing file <<<<< +psl_19830801.nc : >>>>> Missing file <<<<< +psl_19830901.nc : >>>>> Missing file <<<<< +psl_19831001.nc : >>>>> Missing file <<<<< +psl_19831101.nc : >>>>> Missing file <<<<< +psl_19831201.nc : >>>>> Missing file <<<<< +psl_19840101.nc : >>>>> Missing file <<<<< +psl_19840201.nc : >>>>> Missing file <<<<< +psl_19840301.nc : >>>>> Missing file <<<<< +psl_19840401.nc : >>>>> Missing file <<<<< +psl_19840501.nc : >>>>> Missing file <<<<< +psl_19840601.nc : >>>>> Missing file <<<<< +psl_19840701.nc : >>>>> Missing file <<<<< +psl_19840801.nc : >>>>> Missing file <<<<< +psl_19840901.nc : >>>>> Missing file <<<<< +psl_19841001.nc : >>>>> Missing file <<<<< +psl_19841101.nc : >>>>> Missing file <<<<< +psl_19841201.nc : >>>>> Missing file <<<<< +psl_19850101.nc : >>>>> Missing file <<<<< +psl_19850201.nc : >>>>> Missing file <<<<< +psl_19850301.nc : >>>>> Missing file <<<<< +psl_19850401.nc : >>>>> Missing file <<<<< +psl_19850501.nc : >>>>> Missing file <<<<< +psl_19850601.nc : >>>>> Missing file <<<<< +psl_19850701.nc : >>>>> Missing file <<<<< +psl_19850801.nc : >>>>> Missing file <<<<< +psl_19850901.nc : >>>>> Missing file <<<<< +psl_19851001.nc : >>>>> Missing file <<<<< +psl_19851101.nc : >>>>> Missing file <<<<< +psl_19851201.nc : >>>>> Missing file <<<<< +psl_19860101.nc : >>>>> Missing file <<<<< +psl_19860201.nc : >>>>> Missing file <<<<< +psl_19860301.nc : >>>>> Missing file <<<<< +psl_19860401.nc : >>>>> Missing file <<<<< +psl_19860501.nc : >>>>> Missing file <<<<< +psl_19860601.nc : >>>>> Missing file <<<<< +psl_19860701.nc : >>>>> Missing file <<<<< +psl_19860801.nc : >>>>> Missing file <<<<< +psl_19860901.nc : >>>>> Missing file <<<<< +psl_19861001.nc : >>>>> Missing file <<<<< +psl_19861101.nc : >>>>> Missing file <<<<< +psl_19861201.nc : >>>>> Missing file <<<<< +psl_19870101.nc : >>>>> Missing file <<<<< +psl_19870201.nc : >>>>> Missing file <<<<< +psl_19870301.nc : >>>>> Missing file <<<<< +psl_19870401.nc : >>>>> Missing file <<<<< +psl_19870501.nc : >>>>> Missing file <<<<< +psl_19870601.nc : >>>>> Missing file <<<<< +psl_19870701.nc : >>>>> Missing file <<<<< +psl_19870801.nc : >>>>> Missing file <<<<< +psl_19870901.nc : >>>>> Missing file <<<<< +psl_19871001.nc : >>>>> Missing file <<<<< +psl_19871101.nc : >>>>> Missing file <<<<< +psl_19871201.nc : >>>>> Missing file <<<<< +psl_19880101.nc : >>>>> Missing file <<<<< +psl_19880201.nc : >>>>> Missing file <<<<< +psl_19880301.nc : >>>>> Missing file <<<<< +psl_19880401.nc : >>>>> Missing file <<<<< +psl_19880501.nc : >>>>> Missing file <<<<< +psl_19880601.nc : >>>>> Missing file <<<<< +psl_19880701.nc : >>>>> Missing file <<<<< +psl_19880801.nc : >>>>> Missing file <<<<< +psl_19880901.nc : >>>>> Missing file <<<<< +psl_19881001.nc : >>>>> Missing file <<<<< +psl_19881101.nc : >>>>> Missing file <<<<< +psl_19881201.nc : >>>>> Missing file <<<<< +psl_19890101.nc : >>>>> Missing file <<<<< +psl_19890201.nc : >>>>> Missing file <<<<< +psl_19890301.nc : >>>>> Missing file <<<<< +psl_19890401.nc : >>>>> Missing file <<<<< +psl_19890501.nc : >>>>> Missing file <<<<< +psl_19890601.nc : >>>>> Missing file <<<<< +psl_19890701.nc : >>>>> Missing file <<<<< +psl_19890801.nc : >>>>> Missing file <<<<< +psl_19890901.nc : >>>>> Missing file <<<<< +psl_19891001.nc : >>>>> Missing file <<<<< +psl_19891101.nc : >>>>> Missing file <<<<< +psl_19891201.nc : >>>>> Missing file <<<<< +psl_19900101.nc : >>>>> Missing file <<<<< +psl_19900201.nc : >>>>> Missing file <<<<< +psl_19900301.nc : >>>>> Missing file <<<<< +psl_19900401.nc : >>>>> Missing file <<<<< +psl_19900501.nc : >>>>> Missing file <<<<< +psl_19900601.nc : >>>>> Missing file <<<<< +psl_19900701.nc : >>>>> Missing file <<<<< +psl_19900801.nc : >>>>> Missing file <<<<< +psl_19900901.nc : >>>>> Missing file <<<<< +psl_19901001.nc : >>>>> Missing file <<<<< +psl_19901101.nc : >>>>> Missing file <<<<< +psl_19901201.nc : >>>>> Missing file <<<<< +psl_19910101.nc : >>>>> Missing file <<<<< +psl_19910201.nc : >>>>> Missing file <<<<< +psl_19910301.nc : >>>>> Missing file <<<<< +psl_19910401.nc : >>>>> Missing file <<<<< +psl_19910501.nc : >>>>> Missing file <<<<< +psl_19910601.nc : >>>>> Missing file <<<<< +psl_19910701.nc : >>>>> Missing file <<<<< +psl_19910801.nc : >>>>> Missing file <<<<< +psl_19910901.nc : >>>>> Missing file <<<<< +psl_19911001.nc : >>>>> Missing file <<<<< +psl_19911101.nc : >>>>> Missing file <<<<< +psl_19911201.nc : >>>>> Missing file <<<<< +psl_19920101.nc : >>>>> Missing file <<<<< +psl_19920201.nc : >>>>> Missing file <<<<< +psl_19920301.nc : >>>>> Missing file <<<<< +psl_19920401.nc : >>>>> Missing file <<<<< +psl_19920501.nc : >>>>> Missing file <<<<< +psl_19920601.nc : >>>>> Missing file <<<<< +psl_19920701.nc : >>>>> Missing file <<<<< +psl_19920801.nc : >>>>> Missing file <<<<< +psl_19920901.nc : >>>>> Missing file <<<<< +psl_19921001.nc : >>>>> Missing file <<<<< +psl_19921101.nc : >>>>> Missing file <<<<< +psl_19921201.nc : >>>>> Missing file <<<<< +psl_19930101.nc : >>>>> Missing file <<<<< +psl_19930201.nc : >>>>> Missing file <<<<< +psl_19930301.nc : >>>>> Missing file <<<<< +psl_19930401.nc : >>>>> Missing file <<<<< +psl_19930501.nc : >>>>> Missing file <<<<< +psl_19930601.nc : >>>>> Missing file <<<<< +psl_19930701.nc : >>>>> Missing file <<<<< +psl_19930801.nc : >>>>> Missing file <<<<< +psl_19930901.nc : >>>>> Missing file <<<<< +psl_19931001.nc : >>>>> Missing file <<<<< +psl_19931101.nc : >>>>> Missing file <<<<< +psl_19931201.nc : >>>>> Missing file <<<<< +psl_19940101.nc : >>>>> Missing file <<<<< +psl_19940201.nc : >>>>> Missing file <<<<< +psl_19940301.nc : >>>>> Missing file <<<<< +psl_19940401.nc : >>>>> Missing file <<<<< +psl_19940501.nc : >>>>> Missing file <<<<< +psl_19940601.nc : >>>>> Missing file <<<<< +psl_19940701.nc : >>>>> Missing file <<<<< +psl_19940801.nc : >>>>> Missing file <<<<< +psl_19940901.nc : >>>>> Missing file <<<<< +psl_19941001.nc : >>>>> Missing file <<<<< +psl_19941101.nc : >>>>> Missing file <<<<< +psl_19941201.nc : >>>>> Missing file <<<<< +psl_19950101.nc : >>>>> Missing file <<<<< +psl_19950201.nc : >>>>> Missing file <<<<< +psl_19950301.nc : >>>>> Missing file <<<<< +psl_19950401.nc : >>>>> Missing file <<<<< +psl_19950501.nc : >>>>> Missing file <<<<< +psl_19950601.nc : >>>>> Missing file <<<<< +psl_19950701.nc : >>>>> Missing file <<<<< +psl_19950801.nc : >>>>> Missing file <<<<< +psl_19950901.nc : >>>>> Missing file <<<<< +psl_19951001.nc : >>>>> Missing file <<<<< +psl_19951101.nc : >>>>> Missing file <<<<< +psl_19951201.nc : >>>>> Missing file <<<<< +psl_19960101.nc : >>>>> Missing file <<<<< +psl_19960201.nc : >>>>> Missing file <<<<< +psl_19960301.nc : >>>>> Missing file <<<<< +psl_19960401.nc : >>>>> Missing file <<<<< +psl_19960501.nc : >>>>> Missing file <<<<< +psl_19960601.nc : >>>>> Missing file <<<<< +psl_19960701.nc : >>>>> Missing file <<<<< +psl_19960801.nc : >>>>> Missing file <<<<< +psl_19960901.nc : >>>>> Missing file <<<<< +psl_19961001.nc : >>>>> Missing file <<<<< +psl_19961101.nc : >>>>> Missing file <<<<< +psl_19961201.nc : >>>>> Missing file <<<<< +psl_19970101.nc : >>>>> Missing file <<<<< +psl_19970201.nc : >>>>> Missing file <<<<< +psl_19970301.nc : >>>>> Missing file <<<<< +psl_19970401.nc : >>>>> Missing file <<<<< +psl_19970501.nc : >>>>> Missing file <<<<< +psl_19970601.nc : >>>>> Missing file <<<<< +psl_19970701.nc : >>>>> Missing file <<<<< +psl_19970801.nc : >>>>> Missing file <<<<< +psl_19970901.nc : >>>>> Missing file <<<<< +psl_19971001.nc : >>>>> Missing file <<<<< +psl_19971101.nc : >>>>> Missing file <<<<< +psl_19971201.nc : >>>>> Missing file <<<<< +psl_19980101.nc : >>>>> Missing file <<<<< +psl_19980201.nc : >>>>> Missing file <<<<< +psl_19980301.nc : >>>>> Missing file <<<<< +psl_19980401.nc : >>>>> Missing file <<<<< +psl_19980501.nc : >>>>> Missing file <<<<< +psl_19980601.nc : >>>>> Missing file <<<<< +psl_19980701.nc : >>>>> Missing file <<<<< +psl_19980801.nc : >>>>> Missing file <<<<< +psl_19980901.nc : >>>>> Missing file <<<<< +psl_19981001.nc : >>>>> Missing file <<<<< +psl_19981101.nc : >>>>> Missing file <<<<< +psl_19981201.nc : >>>>> Missing file <<<<< +psl_19990101.nc : >>>>> Missing file <<<<< +psl_19990201.nc : >>>>> Missing file <<<<< +psl_19990301.nc : >>>>> Missing file <<<<< +psl_19990401.nc : >>>>> Missing file <<<<< +psl_19990501.nc : >>>>> Missing file <<<<< +psl_19990601.nc : >>>>> Missing file <<<<< +psl_19990701.nc : >>>>> Missing file <<<<< +psl_19990801.nc : >>>>> Missing file <<<<< +psl_19990901.nc : >>>>> Missing file <<<<< +psl_19991001.nc : >>>>> Missing file <<<<< +psl_19991101.nc : >>>>> Missing file <<<<< +psl_19991201.nc : >>>>> Missing file <<<<< +psl_20000101.nc : >>>>> Missing file <<<<< +psl_20000201.nc : >>>>> Missing file <<<<< +psl_20000301.nc : >>>>> Missing file <<<<< +psl_20000401.nc : >>>>> Missing file <<<<< +psl_20000501.nc : >>>>> Missing file <<<<< +psl_20000601.nc : >>>>> Missing file <<<<< +psl_20000701.nc : >>>>> Missing file <<<<< +psl_20000801.nc : >>>>> Missing file <<<<< +psl_20000901.nc : >>>>> Missing file <<<<< +psl_20001001.nc : >>>>> Missing file <<<<< +psl_20001101.nc : >>>>> Missing file <<<<< +psl_20001201.nc : >>>>> Missing file <<<<< +psl_20010101.nc : >>>>> Missing file <<<<< +psl_20010201.nc : >>>>> Missing file <<<<< +psl_20010301.nc : >>>>> Missing file <<<<< +psl_20010401.nc : >>>>> Missing file <<<<< +psl_20010501.nc : >>>>> Missing file <<<<< +psl_20010601.nc : >>>>> Missing file <<<<< +psl_20010701.nc : >>>>> Missing file <<<<< +psl_20010801.nc : >>>>> Missing file <<<<< +psl_20010901.nc : >>>>> Missing file <<<<< +psl_20011001.nc : >>>>> Missing file <<<<< +psl_20011101.nc : >>>>> Missing file <<<<< +psl_20011201.nc : >>>>> Missing file <<<<< +psl_20020101.nc : >>>>> Missing file <<<<< +psl_20020201.nc : >>>>> Missing file <<<<< +psl_20020301.nc : >>>>> Missing file <<<<< +psl_20020401.nc : >>>>> Missing file <<<<< +psl_20020501.nc : >>>>> Missing file <<<<< +psl_20020601.nc : >>>>> Missing file <<<<< +psl_20020701.nc : >>>>> Missing file <<<<< +psl_20020801.nc : >>>>> Missing file <<<<< +psl_20020901.nc : >>>>> Missing file <<<<< +psl_20021001.nc : >>>>> Missing file <<<<< +psl_20021101.nc : >>>>> Missing file <<<<< +psl_20021201.nc : >>>>> Missing file <<<<< +psl_20030101.nc : >>>>> Missing file <<<<< +psl_20030201.nc : >>>>> Missing file <<<<< +psl_20030301.nc : >>>>> Missing file <<<<< +psl_20030401.nc : >>>>> Missing file <<<<< +psl_20030501.nc : >>>>> Missing file <<<<< +psl_20030601.nc : >>>>> Missing file <<<<< +psl_20030701.nc : >>>>> Missing file <<<<< +psl_20030801.nc : >>>>> Missing file <<<<< +psl_20030901.nc : >>>>> Missing file <<<<< +psl_20031001.nc : >>>>> Missing file <<<<< +psl_20031101.nc : >>>>> Missing file <<<<< +psl_20031201.nc : >>>>> Missing file <<<<< +psl_20040101.nc : >>>>> Missing file <<<<< +psl_20040201.nc : >>>>> Missing file <<<<< +psl_20040301.nc : >>>>> Missing file <<<<< +psl_20040401.nc : >>>>> Missing file <<<<< +psl_20040501.nc : >>>>> Missing file <<<<< +psl_20040601.nc : >>>>> Missing file <<<<< +psl_20040701.nc : >>>>> Missing file <<<<< +psl_20040801.nc : >>>>> Missing file <<<<< +psl_20040901.nc : >>>>> Missing file <<<<< +psl_20041001.nc : >>>>> Missing file <<<<< +psl_20041101.nc : >>>>> Missing file <<<<< +psl_20041201.nc : >>>>> Missing file <<<<< +psl_20050101.nc : >>>>> Missing file <<<<< +psl_20050201.nc : >>>>> Missing file <<<<< +psl_20050301.nc : >>>>> Missing file <<<<< +psl_20050401.nc : >>>>> Missing file <<<<< +psl_20050501.nc : >>>>> Missing file <<<<< +psl_20050601.nc : >>>>> Missing file <<<<< +psl_20050701.nc : >>>>> Missing file <<<<< +psl_20050801.nc : >>>>> Missing file <<<<< +psl_20050901.nc : >>>>> Missing file <<<<< +psl_20051001.nc : >>>>> Missing file <<<<< +psl_20051101.nc : >>>>> Missing file <<<<< +psl_20051201.nc : >>>>> Missing file <<<<< +psl_20060101.nc : >>>>> Missing file <<<<< +psl_20060201.nc : >>>>> Missing file <<<<< +psl_20060301.nc : >>>>> Missing file <<<<< +psl_20060401.nc : >>>>> Missing file <<<<< +psl_20060501.nc : >>>>> Missing file <<<<< +psl_20060601.nc : >>>>> Missing file <<<<< +psl_20060701.nc : >>>>> Missing file <<<<< +psl_20060801.nc : >>>>> Missing file <<<<< +psl_20060901.nc : >>>>> Missing file <<<<< +psl_20061001.nc : >>>>> Missing file <<<<< +psl_20061101.nc : >>>>> Missing file <<<<< +psl_20061201.nc : >>>>> Missing file <<<<< +psl_20070101.nc : >>>>> Missing file <<<<< +psl_20070201.nc : >>>>> Missing file <<<<< +psl_20070301.nc : >>>>> Missing file <<<<< +psl_20070401.nc : >>>>> Missing file <<<<< +psl_20070501.nc : >>>>> Missing file <<<<< +psl_20070601.nc : >>>>> Missing file <<<<< +psl_20070701.nc : >>>>> Missing file <<<<< +psl_20070801.nc : >>>>> Missing file <<<<< +psl_20070901.nc : >>>>> Missing file <<<<< +psl_20071001.nc : >>>>> Missing file <<<<< +psl_20071101.nc : >>>>> Missing file <<<<< +psl_20071201.nc : >>>>> Missing file <<<<< +psl_20080101.nc : >>>>> Missing file <<<<< +psl_20080201.nc : >>>>> Missing file <<<<< +psl_20080301.nc : >>>>> Missing file <<<<< +psl_20080401.nc : >>>>> Missing file <<<<< +psl_20080501.nc : >>>>> Missing file <<<<< +psl_20080601.nc : >>>>> Missing file <<<<< +psl_20080701.nc : >>>>> Missing file <<<<< +psl_20080801.nc : >>>>> Missing file <<<<< +psl_20080901.nc : >>>>> Missing file <<<<< +psl_20081001.nc : >>>>> Missing file <<<<< +psl_20081101.nc : >>>>> Missing file <<<<< +psl_20081201.nc : >>>>> Missing file <<<<< +psl_20090101.nc : >>>>> Missing file <<<<< +psl_20090201.nc : >>>>> Missing file <<<<< +psl_20090301.nc : >>>>> Missing file <<<<< +psl_20090401.nc : >>>>> Missing file <<<<< +psl_20090501.nc : >>>>> Missing file <<<<< +psl_20090601.nc : >>>>> Missing file <<<<< +psl_20090701.nc : >>>>> Missing file <<<<< +psl_20090801.nc : >>>>> Missing file <<<<< +psl_20090901.nc : >>>>> Missing file <<<<< +psl_20091001.nc : >>>>> Missing file <<<<< +psl_20091101.nc : >>>>> Missing file <<<<< +psl_20091201.nc : >>>>> Missing file <<<<< +psl_20100101.nc : >>>>> Missing file <<<<< +psl_20100201.nc : >>>>> Missing file <<<<< +psl_20100301.nc : >>>>> Missing file <<<<< +psl_20100401.nc : >>>>> Missing file <<<<< +psl_20100501.nc : >>>>> Missing file <<<<< +psl_20100601.nc : >>>>> Missing file <<<<< +psl_20100701.nc : >>>>> Missing file <<<<< +psl_20100801.nc : >>>>> Missing file <<<<< +psl_20100901.nc : >>>>> Missing file <<<<< +psl_20101001.nc : >>>>> Missing file <<<<< +psl_20101101.nc : >>>>> Missing file <<<<< +psl_20101201.nc : >>>>> Missing file <<<<< +psl_20110101.nc : >>>>> Missing file <<<<< +psl_20110201.nc : >>>>> Missing file <<<<< +psl_20110301.nc : >>>>> Missing file <<<<< +psl_20110401.nc : >>>>> Missing file <<<<< +psl_20110501.nc : >>>>> Missing file <<<<< +psl_20110601.nc : >>>>> Missing file <<<<< +psl_20110701.nc : >>>>> Missing file <<<<< +psl_20110801.nc : >>>>> Missing file <<<<< +psl_20110901.nc : >>>>> Missing file <<<<< +psl_20111001.nc : >>>>> Missing file <<<<< +psl_20111101.nc : >>>>> Missing file <<<<< +psl_20111201.nc : >>>>> Missing file <<<<< +psl_20120101.nc : >>>>> Missing file <<<<< +psl_20120201.nc : >>>>> Missing file <<<<< +psl_20120301.nc : >>>>> Missing file <<<<< +psl_20120401.nc : >>>>> Missing file <<<<< +psl_20120501.nc : >>>>> Missing file <<<<< +psl_20120601.nc : >>>>> Missing file <<<<< +psl_20120701.nc : >>>>> Missing file <<<<< +psl_20120801.nc : >>>>> Missing file <<<<< +psl_20120901.nc : >>>>> Missing file <<<<< +psl_20121001.nc : >>>>> Missing file <<<<< +psl_20121101.nc : >>>>> Missing file <<<<< +psl_20121201.nc : >>>>> Missing file <<<<< +psl_20130101.nc : >>>>> Missing file <<<<< +psl_20130201.nc : >>>>> Missing file <<<<< +psl_20130301.nc : >>>>> Missing file <<<<< +psl_20130401.nc : >>>>> Missing file <<<<< +psl_20130501.nc : >>>>> Missing file <<<<< +psl_20130601.nc : >>>>> Missing file <<<<< +psl_20130701.nc : >>>>> Missing file <<<<< +psl_20130801.nc : >>>>> Missing file <<<<< +psl_20130901.nc : >>>>> Missing file <<<<< +psl_20131001.nc : >>>>> Missing file <<<<< +psl_20131101.nc : >>>>> Missing file <<<<< +psl_20131201.nc : >>>>> Missing file <<<<< +psl_20140101.nc : >>>>> Missing file <<<<< +psl_20140201.nc : >>>>> Missing file <<<<< +psl_20140301.nc : >>>>> Missing file <<<<< +psl_20140401.nc : >>>>> Missing file <<<<< +psl_20140501.nc : >>>>> Missing file <<<<< +psl_20140601.nc : >>>>> Missing file <<<<< +psl_20140701.nc : >>>>> Missing file <<<<< +psl_20140801.nc : >>>>> Missing file <<<<< +psl_20140901.nc : >>>>> Missing file <<<<< +psl_20141001.nc : >>>>> Missing file <<<<< +psl_20141101.nc : >>>>> Missing file <<<<< +psl_20141201.nc : >>>>> Missing file <<<<< +psl_20150101.nc : >>>>> Missing file <<<<< +psl_20150201.nc : >>>>> Missing file <<<<< +psl_20150301.nc : >>>>> Missing file <<<<< +psl_20150401.nc : >>>>> Missing file <<<<< +psl_20150501.nc : >>>>> Missing file <<<<< +psl_20150601.nc : >>>>> Missing file <<<<< +psl_20150701.nc : >>>>> Missing file <<<<< +psl_20150801.nc : >>>>> Missing file <<<<< +psl_20150901.nc : >>>>> Missing file <<<<< +psl_20151001.nc : >>>>> Missing file <<<<< +psl_20151101.nc : >>>>> Missing file <<<<< +psl_20151201.nc : >>>>> Missing file <<<<< diff --git a/bash/check_ECMWFS4_sfcWind_6hourly.txt b/bash/check_ECMWFS4_sfcWind_6hourly.txt new file mode 100644 index 0000000000000000000000000000000000000000..213ee2a6dbc81158f145746abb1e8ce2bb6bec7c --- /dev/null +++ b/bash/check_ECMWFS4_sfcWind_6hourly.txt @@ -0,0 +1,783 @@ +Checked variable: sfcWind +Path: /esnas/exp/ecmwf/system4_m1/daily_mean/sfcWind_f6h/ +Extension: .nc +Checked period: 1981-2015 +>>>>>>>>>>>>>>>>>>>>>>>> Expected elements <<<<<<<<<<<<<<<<<<<<<<<< +Latitude denomination: latitude +Number of latitude values: 181 +Longitude denomination: longitude +Number of longitude values: 360 +Ensemble denomination: ensemble +Number of ensemble values: 15 +Alternative number of ensemble values: 51 +Lead-time denonomination: time +Number of lead-times: 216 + >>>>>>>>>>>>>>>>>>>>>>>> Checking <<<<<<<<<<<<<<<<<<<<<<<< +sfcWind_19810101.nc : there is no dimension called ensemble inside this file +sfcWind_19810201.nc : there is no dimension called latitude inside this file +sfcWind_19810201.nc : there is no dimension called longitude inside this file +sfcWind_19810201.nc : there is no dimension called ensemble inside this file +sfcWind_19810201.nc : time dimension has 10812 values instead of 216 +sfcWind_19810301.nc : there is no dimension called ensemble inside this file +sfcWind_19810401.nc : there is no dimension called ensemble inside this file +sfcWind_19810501.nc : latitude dimension has 256 values instead of 181 +sfcWind_19810501.nc : longitude dimension has 512 values instead of 360 +sfcWind_19810501.nc : there is no dimension called ensemble inside this file +sfcWind_19810501.nc : there is no dimension called time inside this file +sfcWind_19810601.nc : there is no dimension called ensemble inside this file +sfcWind_19810701.nc : there is no dimension called ensemble inside this file +sfcWind_19810801.nc : latitude dimension has 256 values instead of 181 +sfcWind_19810801.nc : longitude dimension has 512 values instead of 360 +sfcWind_19810801.nc : there is no dimension called ensemble inside this file +sfcWind_19810801.nc : time dimension has 212 values instead of 216 +sfcWind_19810901.nc : there is no dimension called ensemble inside this file +sfcWind_19811001.nc : there is no dimension called ensemble inside this file +sfcWind_19811101.nc : latitude dimension has 256 values instead of 181 +sfcWind_19811101.nc : longitude dimension has 512 values instead of 360 +sfcWind_19811101.nc : there is no dimension called ensemble inside this file +sfcWind_19811101.nc : there is no dimension called time inside this file +sfcWind_19811201.nc : there is no dimension called ensemble inside this file +sfcWind_19820101.nc : there is no dimension called ensemble inside this file +sfcWind_19820201.nc : there is no dimension called latitude inside this file +sfcWind_19820201.nc : there is no dimension called longitude inside this file +sfcWind_19820201.nc : there is no dimension called ensemble inside this file +sfcWind_19820201.nc : time dimension has 10812 values instead of 216 +sfcWind_19820301.nc : there is no dimension called ensemble inside this file +sfcWind_19820401.nc : there is no dimension called ensemble inside this file +sfcWind_19820501.nc : latitude dimension has 256 values instead of 181 +sfcWind_19820501.nc : longitude dimension has 512 values instead of 360 +sfcWind_19820501.nc : there is no dimension called ensemble inside this file +sfcWind_19820501.nc : there is no dimension called time inside this file +sfcWind_19820601.nc : there is no dimension called ensemble inside this file +sfcWind_19820701.nc : there is no dimension called ensemble inside this file +sfcWind_19820701.nc : time dimension has 31 values instead of 216 +sfcWind_19820801.nc : latitude dimension has 256 values instead of 181 +sfcWind_19820801.nc : longitude dimension has 512 values instead of 360 +sfcWind_19820801.nc : there is no dimension called ensemble inside this file +sfcWind_19820801.nc : time dimension has 212 values instead of 216 +sfcWind_19820901.nc : there is no dimension called ensemble inside this file +sfcWind_19821001.nc : there is no dimension called ensemble inside this file +sfcWind_19821101.nc : latitude dimension has 256 values instead of 181 +sfcWind_19821101.nc : longitude dimension has 512 values instead of 360 +sfcWind_19821101.nc : there is no dimension called ensemble inside this file +sfcWind_19821101.nc : there is no dimension called time inside this file +sfcWind_19821201.nc : there is no dimension called ensemble inside this file +sfcWind_19830101.nc : there is no dimension called ensemble inside this file +sfcWind_19830201.nc : there is no dimension called latitude inside this file +sfcWind_19830201.nc : there is no dimension called longitude inside this file +sfcWind_19830201.nc : there is no dimension called ensemble inside this file +sfcWind_19830201.nc : time dimension has 10812 values instead of 216 +sfcWind_19830301.nc : there is no dimension called ensemble inside this file +sfcWind_19830401.nc : there is no dimension called ensemble inside this file +sfcWind_19830501.nc : latitude dimension has 256 values instead of 181 +sfcWind_19830501.nc : longitude dimension has 512 values instead of 360 +sfcWind_19830501.nc : there is no dimension called ensemble inside this file +sfcWind_19830501.nc : there is no dimension called time inside this file +sfcWind_19830601.nc : there is no dimension called ensemble inside this file +sfcWind_19830701.nc : there is no dimension called ensemble inside this file +sfcWind_19830801.nc : latitude dimension has 256 values instead of 181 +sfcWind_19830801.nc : longitude dimension has 512 values instead of 360 +sfcWind_19830801.nc : there is no dimension called ensemble inside this file +sfcWind_19830801.nc : time dimension has 213 values instead of 216 +sfcWind_19830901.nc : there is no dimension called ensemble inside this file +sfcWind_19831001.nc : there is no dimension called ensemble inside this file +sfcWind_19831101.nc : latitude dimension has 256 values instead of 181 +sfcWind_19831101.nc : longitude dimension has 512 values instead of 360 +sfcWind_19831101.nc : there is no dimension called ensemble inside this file +sfcWind_19831101.nc : there is no dimension called time inside this file +sfcWind_19831201.nc : there is no dimension called ensemble inside this file +sfcWind_19840101.nc : there is no dimension called ensemble inside this file +sfcWind_19840101.nc : time dimension has 9 values instead of 216 +sfcWind_19840201.nc : there is no dimension called latitude inside this file +sfcWind_19840201.nc : there is no dimension called longitude inside this file +sfcWind_19840201.nc : there is no dimension called ensemble inside this file +sfcWind_19840201.nc : time dimension has 10863 values instead of 216 +sfcWind_19840301.nc : there is no dimension called ensemble inside this file +sfcWind_19840401.nc : there is no dimension called ensemble inside this file +sfcWind_19840501.nc : latitude dimension has 256 values instead of 181 +sfcWind_19840501.nc : longitude dimension has 512 values instead of 360 +sfcWind_19840501.nc : there is no dimension called ensemble inside this file +sfcWind_19840501.nc : there is no dimension called time inside this file +sfcWind_19840601.nc : there is no dimension called ensemble inside this file +sfcWind_19840701.nc : there is no dimension called ensemble inside this file +sfcWind_19840801.nc : latitude dimension has 256 values instead of 181 +sfcWind_19840801.nc : longitude dimension has 512 values instead of 360 +sfcWind_19840801.nc : there is no dimension called ensemble inside this file +sfcWind_19840801.nc : time dimension has 212 values instead of 216 +sfcWind_19840901.nc : there is no dimension called ensemble inside this file +sfcWind_19841001.nc : there is no dimension called ensemble inside this file +sfcWind_19841101.nc : latitude dimension has 256 values instead of 181 +sfcWind_19841101.nc : longitude dimension has 512 values instead of 360 +sfcWind_19841101.nc : there is no dimension called ensemble inside this file +sfcWind_19841101.nc : there is no dimension called time inside this file +sfcWind_19841201.nc : there is no dimension called ensemble inside this file +sfcWind_19850101.nc : there is no dimension called ensemble inside this file +sfcWind_19850201.nc : there is no dimension called latitude inside this file +sfcWind_19850201.nc : there is no dimension called longitude inside this file +sfcWind_19850201.nc : there is no dimension called ensemble inside this file +sfcWind_19850201.nc : time dimension has 10812 values instead of 216 +sfcWind_19850301.nc : there is no dimension called ensemble inside this file +sfcWind_19850401.nc : there is no dimension called ensemble inside this file +sfcWind_19850501.nc : latitude dimension has 256 values instead of 181 +sfcWind_19850501.nc : longitude dimension has 512 values instead of 360 +sfcWind_19850501.nc : there is no dimension called ensemble inside this file +sfcWind_19850501.nc : there is no dimension called time inside this file +sfcWind_19850601.nc : there is no dimension called latitude inside this file +sfcWind_19850601.nc : there is no dimension called longitude inside this file +sfcWind_19850601.nc : there is no dimension called ensemble inside this file +sfcWind_19850601.nc : there is no dimension called time inside this file +sfcWind_19850701.nc : there is no dimension called ensemble inside this file +sfcWind_19850801.nc : >>>>> Missing file <<<<< +sfcWind_19850901.nc : there is no dimension called ensemble inside this file +sfcWind_19850901.nc : time dimension has 226 values instead of 216 +sfcWind_19851001.nc : there is no dimension called ensemble inside this file +sfcWind_19851101.nc : latitude dimension has 256 values instead of 181 +sfcWind_19851101.nc : longitude dimension has 512 values instead of 360 +sfcWind_19851101.nc : there is no dimension called ensemble inside this file +sfcWind_19851101.nc : time dimension has 214 values instead of 216 +sfcWind_19851201.nc : there is no dimension called ensemble inside this file +sfcWind_19860101.nc : there is no dimension called ensemble inside this file +sfcWind_19860101.nc : time dimension has 23 values instead of 216 +sfcWind_19860201.nc : there is no dimension called latitude inside this file +sfcWind_19860201.nc : there is no dimension called longitude inside this file +sfcWind_19860201.nc : there is no dimension called ensemble inside this file +sfcWind_19860201.nc : time dimension has 10812 values instead of 216 +sfcWind_19860301.nc : there is no dimension called ensemble inside this file +sfcWind_19860301.nc : time dimension has 285 values instead of 216 +sfcWind_19860401.nc : there is no dimension called ensemble inside this file +sfcWind_19860501.nc : >>>>> Missing file <<<<< +sfcWind_19860601.nc : there is no dimension called ensemble inside this file +sfcWind_19860701.nc : there is no dimension called ensemble inside this file +sfcWind_19860801.nc : >>>>> Missing file <<<<< +sfcWind_19860901.nc : there is no dimension called ensemble inside this file +sfcWind_19861001.nc : there is no dimension called ensemble inside this file +sfcWind_19861001.nc : time dimension has 94 values instead of 216 +sfcWind_19861101.nc : latitude dimension has 256 values instead of 181 +sfcWind_19861101.nc : longitude dimension has 512 values instead of 360 +sfcWind_19861101.nc : there is no dimension called ensemble inside this file +sfcWind_19861101.nc : time dimension has 214 values instead of 216 +sfcWind_19861201.nc : there is no dimension called ensemble inside this file +sfcWind_19870101.nc : there is no dimension called ensemble inside this file +sfcWind_19870201.nc : there is no dimension called latitude inside this file +sfcWind_19870201.nc : there is no dimension called longitude inside this file +sfcWind_19870201.nc : there is no dimension called ensemble inside this file +sfcWind_19870201.nc : time dimension has 10812 values instead of 216 +sfcWind_19870301.nc : there is no dimension called ensemble inside this file +sfcWind_19870401.nc : there is no dimension called ensemble inside this file +sfcWind_19870501.nc : >>>>> Missing file <<<<< +sfcWind_19870601.nc : there is no dimension called ensemble inside this file +sfcWind_19870701.nc : there is no dimension called ensemble inside this file +sfcWind_19870801.nc : >>>>> Missing file <<<<< +sfcWind_19870901.nc : there is no dimension called ensemble inside this file +sfcWind_19871001.nc : there is no dimension called ensemble inside this file +sfcWind_19871101.nc : latitude dimension has 256 values instead of 181 +sfcWind_19871101.nc : longitude dimension has 512 values instead of 360 +sfcWind_19871101.nc : there is no dimension called ensemble inside this file +sfcWind_19871101.nc : time dimension has 214 values instead of 216 +sfcWind_19871201.nc : there is no dimension called ensemble inside this file +sfcWind_19871201.nc : time dimension has 46 values instead of 216 +sfcWind_19880101.nc : there is no dimension called ensemble inside this file +sfcWind_19880201.nc : there is no dimension called latitude inside this file +sfcWind_19880201.nc : there is no dimension called longitude inside this file +sfcWind_19880201.nc : there is no dimension called ensemble inside this file +sfcWind_19880201.nc : time dimension has 10863 values instead of 216 +sfcWind_19880301.nc : there is no dimension called ensemble inside this file +sfcWind_19880401.nc : there is no dimension called ensemble inside this file +sfcWind_19880501.nc : >>>>> Missing file <<<<< +sfcWind_19880601.nc : there is no dimension called ensemble inside this file +sfcWind_19880701.nc : there is no dimension called ensemble inside this file +sfcWind_19880801.nc : >>>>> Missing file <<<<< +sfcWind_19880901.nc : there is no dimension called ensemble inside this file +sfcWind_19881001.nc : there is no dimension called ensemble inside this file +sfcWind_19881101.nc : latitude dimension has 256 values instead of 181 +sfcWind_19881101.nc : longitude dimension has 512 values instead of 360 +sfcWind_19881101.nc : there is no dimension called ensemble inside this file +sfcWind_19881101.nc : time dimension has 214 values instead of 216 +sfcWind_19881201.nc : there is no dimension called ensemble inside this file +sfcWind_19890101.nc : there is no dimension called ensemble inside this file +sfcWind_19890201.nc : there is no dimension called latitude inside this file +sfcWind_19890201.nc : there is no dimension called longitude inside this file +sfcWind_19890201.nc : there is no dimension called ensemble inside this file +sfcWind_19890201.nc : time dimension has 10812 values instead of 216 +sfcWind_19890301.nc : there is no dimension called ensemble inside this file +sfcWind_19890401.nc : there is no dimension called ensemble inside this file +sfcWind_19890501.nc : >>>>> Missing file <<<<< +sfcWind_19890601.nc : there is no dimension called ensemble inside this file +sfcWind_19890701.nc : there is no dimension called ensemble inside this file +sfcWind_19890801.nc : >>>>> Missing file <<<<< +sfcWind_19890901.nc : there is no dimension called ensemble inside this file +sfcWind_19891001.nc : there is no dimension called ensemble inside this file +sfcWind_19891101.nc : latitude dimension has 256 values instead of 181 +sfcWind_19891101.nc : longitude dimension has 512 values instead of 360 +sfcWind_19891101.nc : there is no dimension called ensemble inside this file +sfcWind_19891101.nc : time dimension has 214 values instead of 216 +sfcWind_19891201.nc : there is no dimension called ensemble inside this file +sfcWind_19900101.nc : there is no dimension called ensemble inside this file +sfcWind_19900201.nc : there is no dimension called latitude inside this file +sfcWind_19900201.nc : there is no dimension called longitude inside this file +sfcWind_19900201.nc : there is no dimension called ensemble inside this file +sfcWind_19900201.nc : time dimension has 10812 values instead of 216 +sfcWind_19900301.nc : there is no dimension called ensemble inside this file +sfcWind_19900401.nc : >>>>> Missing file <<<<< +sfcWind_19900501.nc : >>>>> Missing file <<<<< +sfcWind_19900601.nc : there is no dimension called ensemble inside this file +sfcWind_19900701.nc : >>>>> Missing file <<<<< +sfcWind_19900801.nc : >>>>> Missing file <<<<< +sfcWind_19900901.nc : there is no dimension called ensemble inside this file +sfcWind_19900901.nc : time dimension has 2 values instead of 216 +sfcWind_19901001.nc : there is no dimension called ensemble inside this file +sfcWind_19901001.nc : time dimension has 120 values instead of 216 +sfcWind_19901101.nc : latitude dimension has 256 values instead of 181 +sfcWind_19901101.nc : longitude dimension has 512 values instead of 360 +sfcWind_19901101.nc : there is no dimension called ensemble inside this file +sfcWind_19901101.nc : time dimension has 214 values instead of 216 +sfcWind_19901201.nc : there is no dimension called ensemble inside this file +sfcWind_19910101.nc : there is no dimension called ensemble inside this file +sfcWind_19910201.nc : there is no dimension called latitude inside this file +sfcWind_19910201.nc : there is no dimension called longitude inside this file +sfcWind_19910201.nc : there is no dimension called ensemble inside this file +sfcWind_19910201.nc : time dimension has 10812 values instead of 216 +sfcWind_19910301.nc : there is no dimension called ensemble inside this file +sfcWind_19910401.nc : there is no dimension called ensemble inside this file +sfcWind_19910501.nc : >>>>> Missing file <<<<< +sfcWind_19910601.nc : there is no dimension called ensemble inside this file +sfcWind_19910701.nc : there is no dimension called ensemble inside this file +sfcWind_19910801.nc : >>>>> Missing file <<<<< +sfcWind_19910901.nc : there is no dimension called ensemble inside this file +sfcWind_19911001.nc : there is no dimension called ensemble inside this file +sfcWind_19911001.nc : time dimension has 117 values instead of 216 +sfcWind_19911101.nc : latitude dimension has 256 values instead of 181 +sfcWind_19911101.nc : longitude dimension has 512 values instead of 360 +sfcWind_19911101.nc : there is no dimension called ensemble inside this file +sfcWind_19911101.nc : time dimension has 214 values instead of 216 +sfcWind_19911201.nc : there is no dimension called ensemble inside this file +sfcWind_19920101.nc : there is no dimension called ensemble inside this file +sfcWind_19920201.nc : there is no dimension called latitude inside this file +sfcWind_19920201.nc : there is no dimension called longitude inside this file +sfcWind_19920201.nc : there is no dimension called ensemble inside this file +sfcWind_19920201.nc : time dimension has 10863 values instead of 216 +sfcWind_19920301.nc : there is no dimension called ensemble inside this file +sfcWind_19920401.nc : there is no dimension called ensemble inside this file +sfcWind_19920501.nc : >>>>> Missing file <<<<< +sfcWind_19920601.nc : there is no dimension called ensemble inside this file +sfcWind_19920601.nc : time dimension has 188 values instead of 216 +sfcWind_19920701.nc : there is no dimension called ensemble inside this file +sfcWind_19920801.nc : >>>>> Missing file <<<<< +sfcWind_19920901.nc : there is no dimension called ensemble inside this file +sfcWind_19921001.nc : there is no dimension called ensemble inside this file +sfcWind_19921101.nc : latitude dimension has 256 values instead of 181 +sfcWind_19921101.nc : longitude dimension has 512 values instead of 360 +sfcWind_19921101.nc : there is no dimension called ensemble inside this file +sfcWind_19921101.nc : time dimension has 214 values instead of 216 +sfcWind_19921201.nc : there is no dimension called ensemble inside this file +sfcWind_19930101.nc : there is no dimension called ensemble inside this file +sfcWind_19930201.nc : there is no dimension called latitude inside this file +sfcWind_19930201.nc : there is no dimension called longitude inside this file +sfcWind_19930201.nc : there is no dimension called ensemble inside this file +sfcWind_19930201.nc : time dimension has 10812 values instead of 216 +sfcWind_19930301.nc : there is no dimension called ensemble inside this file +sfcWind_19930401.nc : there is no dimension called ensemble inside this file +sfcWind_19930501.nc : >>>>> Missing file <<<<< +sfcWind_19930601.nc : there is no dimension called latitude inside this file +sfcWind_19930601.nc : there is no dimension called longitude inside this file +sfcWind_19930601.nc : there is no dimension called ensemble inside this file +sfcWind_19930601.nc : there is no dimension called time inside this file +sfcWind_19930701.nc : there is no dimension called ensemble inside this file +sfcWind_19930801.nc : >>>>> Missing file <<<<< +sfcWind_19930901.nc : there is no dimension called ensemble inside this file +sfcWind_19931001.nc : there is no dimension called ensemble inside this file +sfcWind_19931101.nc : latitude dimension has 256 values instead of 181 +sfcWind_19931101.nc : longitude dimension has 512 values instead of 360 +sfcWind_19931101.nc : there is no dimension called ensemble inside this file +sfcWind_19931101.nc : time dimension has 214 values instead of 216 +sfcWind_19931201.nc : there is no dimension called ensemble inside this file +sfcWind_19931201.nc : time dimension has 29 values instead of 216 +sfcWind_19940101.nc : there is no dimension called ensemble inside this file +sfcWind_19940201.nc : there is no dimension called latitude inside this file +sfcWind_19940201.nc : there is no dimension called longitude inside this file +sfcWind_19940201.nc : there is no dimension called ensemble inside this file +sfcWind_19940201.nc : time dimension has 10812 values instead of 216 +sfcWind_19940301.nc : there is no dimension called ensemble inside this file +sfcWind_19940301.nc : time dimension has 101 values instead of 216 +sfcWind_19940401.nc : there is no dimension called ensemble inside this file +sfcWind_19940501.nc : >>>>> Missing file <<<<< +sfcWind_19940601.nc : there is no dimension called ensemble inside this file +sfcWind_19940701.nc : there is no dimension called ensemble inside this file +sfcWind_19940801.nc : >>>>> Missing file <<<<< +sfcWind_19940901.nc : there is no dimension called ensemble inside this file +sfcWind_19941001.nc : there is no dimension called ensemble inside this file +sfcWind_19941101.nc : latitude dimension has 256 values instead of 181 +sfcWind_19941101.nc : longitude dimension has 512 values instead of 360 +sfcWind_19941101.nc : there is no dimension called ensemble inside this file +sfcWind_19941101.nc : time dimension has 214 values instead of 216 +sfcWind_19941201.nc : there is no dimension called ensemble inside this file +sfcWind_19950101.nc : there is no dimension called ensemble inside this file +sfcWind_19950101.nc : time dimension has 75 values instead of 216 +sfcWind_19950201.nc : there is no dimension called latitude inside this file +sfcWind_19950201.nc : there is no dimension called longitude inside this file +sfcWind_19950201.nc : there is no dimension called ensemble inside this file +sfcWind_19950201.nc : time dimension has 10812 values instead of 216 +sfcWind_19950301.nc : there is no dimension called ensemble inside this file +sfcWind_19950401.nc : there is no dimension called ensemble inside this file +sfcWind_19950401.nc : time dimension has 187 values instead of 216 +sfcWind_19950501.nc : >>>>> Missing file <<<<< +sfcWind_19950601.nc : there is no dimension called ensemble inside this file +sfcWind_19950701.nc : there is no dimension called ensemble inside this file +sfcWind_19950801.nc : >>>>> Missing file <<<<< +sfcWind_19950901.nc : there is no dimension called ensemble inside this file +sfcWind_19951001.nc : there is no dimension called ensemble inside this file +sfcWind_19951101.nc : latitude dimension has 256 values instead of 181 +sfcWind_19951101.nc : longitude dimension has 512 values instead of 360 +sfcWind_19951101.nc : there is no dimension called ensemble inside this file +sfcWind_19951101.nc : time dimension has 214 values instead of 216 +sfcWind_19951201.nc : there is no dimension called ensemble inside this file +sfcWind_19960101.nc : there is no dimension called ensemble inside this file +sfcWind_19960201.nc : there is no dimension called latitude inside this file +sfcWind_19960201.nc : there is no dimension called longitude inside this file +sfcWind_19960201.nc : there is no dimension called ensemble inside this file +sfcWind_19960201.nc : time dimension has 10863 values instead of 216 +sfcWind_19960301.nc : there is no dimension called ensemble inside this file +sfcWind_19960401.nc : there is no dimension called ensemble inside this file +sfcWind_19960501.nc : >>>>> Missing file <<<<< +sfcWind_19960601.nc : there is no dimension called ensemble inside this file +sfcWind_19960701.nc : there is no dimension called ensemble inside this file +sfcWind_19960801.nc : >>>>> Missing file <<<<< +sfcWind_19960901.nc : there is no dimension called ensemble inside this file +sfcWind_19961001.nc : there is no dimension called ensemble inside this file +sfcWind_19961101.nc : latitude dimension has 256 values instead of 181 +sfcWind_19961101.nc : longitude dimension has 512 values instead of 360 +sfcWind_19961101.nc : there is no dimension called ensemble inside this file +sfcWind_19961101.nc : time dimension has 214 values instead of 216 +sfcWind_19961201.nc : there is no dimension called ensemble inside this file +sfcWind_19970101.nc : there is no dimension called ensemble inside this file +sfcWind_19970201.nc : there is no dimension called latitude inside this file +sfcWind_19970201.nc : there is no dimension called longitude inside this file +sfcWind_19970201.nc : there is no dimension called ensemble inside this file +sfcWind_19970201.nc : time dimension has 10812 values instead of 216 +sfcWind_19970301.nc : there is no dimension called ensemble inside this file +sfcWind_19970401.nc : there is no dimension called ensemble inside this file +sfcWind_19970501.nc : >>>>> Missing file <<<<< +sfcWind_19970601.nc : there is no dimension called ensemble inside this file +sfcWind_19970701.nc : there is no dimension called ensemble inside this file +sfcWind_19970801.nc : >>>>> Missing file <<<<< +sfcWind_19970901.nc : there is no dimension called ensemble inside this file +sfcWind_19971001.nc : there is no dimension called ensemble inside this file +sfcWind_19971101.nc : latitude dimension has 256 values instead of 181 +sfcWind_19971101.nc : longitude dimension has 512 values instead of 360 +sfcWind_19971101.nc : there is no dimension called ensemble inside this file +sfcWind_19971101.nc : time dimension has 214 values instead of 216 +sfcWind_19971201.nc : there is no dimension called ensemble inside this file +sfcWind_19980101.nc : there is no dimension called ensemble inside this file +sfcWind_19980201.nc : there is no dimension called latitude inside this file +sfcWind_19980201.nc : there is no dimension called longitude inside this file +sfcWind_19980201.nc : there is no dimension called ensemble inside this file +sfcWind_19980201.nc : time dimension has 10812 values instead of 216 +sfcWind_19980301.nc : there is no dimension called ensemble inside this file +sfcWind_19980401.nc : there is no dimension called ensemble inside this file +sfcWind_19980501.nc : >>>>> Missing file <<<<< +sfcWind_19980601.nc : there is no dimension called ensemble inside this file +sfcWind_19980701.nc : there is no dimension called ensemble inside this file +sfcWind_19980801.nc : >>>>> Missing file <<<<< +sfcWind_19980901.nc : there is no dimension called ensemble inside this file +sfcWind_19981001.nc : there is no dimension called ensemble inside this file +sfcWind_19981101.nc : latitude dimension has 256 values instead of 181 +sfcWind_19981101.nc : longitude dimension has 512 values instead of 360 +sfcWind_19981101.nc : there is no dimension called ensemble inside this file +sfcWind_19981101.nc : time dimension has 214 values instead of 216 +sfcWind_19981201.nc : there is no dimension called ensemble inside this file +sfcWind_19990101.nc : there is no dimension called ensemble inside this file +sfcWind_19990201.nc : there is no dimension called latitude inside this file +sfcWind_19990201.nc : there is no dimension called longitude inside this file +sfcWind_19990201.nc : there is no dimension called ensemble inside this file +sfcWind_19990201.nc : time dimension has 10812 values instead of 216 +sfcWind_19990301.nc : there is no dimension called ensemble inside this file +sfcWind_19990401.nc : there is no dimension called ensemble inside this file +sfcWind_19990501.nc : >>>>> Missing file <<<<< +sfcWind_19990601.nc : there is no dimension called ensemble inside this file +sfcWind_19990701.nc : there is no dimension called ensemble inside this file +sfcWind_19990801.nc : >>>>> Missing file <<<<< +sfcWind_19990901.nc : there is no dimension called ensemble inside this file +sfcWind_19991001.nc : there is no dimension called ensemble inside this file +sfcWind_19991101.nc : latitude dimension has 256 values instead of 181 +sfcWind_19991101.nc : longitude dimension has 512 values instead of 360 +sfcWind_19991101.nc : there is no dimension called ensemble inside this file +sfcWind_19991101.nc : there is no dimension called time inside this file +sfcWind_19991201.nc : there is no dimension called ensemble inside this file +sfcWind_20000101.nc : there is no dimension called ensemble inside this file +sfcWind_20000201.nc : there is no dimension called ensemble inside this file +sfcWind_20000201.nc : time dimension has 10863 values instead of 216 +sfcWind_20000301.nc : there is no dimension called ensemble inside this file +sfcWind_20000301.nc : time dimension has 55 values instead of 216 +sfcWind_20000401.nc : there is no dimension called ensemble inside this file +sfcWind_20000401.nc : time dimension has 54 values instead of 216 +sfcWind_20000501.nc : latitude dimension has 256 values instead of 181 +sfcWind_20000501.nc : longitude dimension has 512 values instead of 360 +sfcWind_20000501.nc : there is no dimension called ensemble inside this file +sfcWind_20000501.nc : there is no dimension called time inside this file +sfcWind_20000601.nc : there is no dimension called ensemble inside this file +sfcWind_20000701.nc : there is no dimension called ensemble inside this file +sfcWind_20000801.nc : there is no dimension called latitude inside this file +sfcWind_20000801.nc : there is no dimension called longitude inside this file +sfcWind_20000801.nc : there is no dimension called ensemble inside this file +sfcWind_20000801.nc : time dimension has 212 values instead of 216 +sfcWind_20000901.nc : there is no dimension called ensemble inside this file +sfcWind_20001001.nc : there is no dimension called ensemble inside this file +sfcWind_20001101.nc : latitude dimension has 256 values instead of 181 +sfcWind_20001101.nc : longitude dimension has 512 values instead of 360 +sfcWind_20001101.nc : there is no dimension called ensemble inside this file +sfcWind_20001101.nc : there is no dimension called time inside this file +sfcWind_20001201.nc : there is no dimension called ensemble inside this file +sfcWind_20010101.nc : there is no dimension called ensemble inside this file +sfcWind_20010201.nc : there is no dimension called latitude inside this file +sfcWind_20010201.nc : there is no dimension called longitude inside this file +sfcWind_20010201.nc : there is no dimension called ensemble inside this file +sfcWind_20010201.nc : time dimension has 10812 values instead of 216 +sfcWind_20010301.nc : there is no dimension called ensemble inside this file +sfcWind_20010401.nc : there is no dimension called ensemble inside this file +sfcWind_20010501.nc : latitude dimension has 256 values instead of 181 +sfcWind_20010501.nc : longitude dimension has 512 values instead of 360 +sfcWind_20010501.nc : there is no dimension called ensemble inside this file +sfcWind_20010501.nc : there is no dimension called time inside this file +sfcWind_20010601.nc : there is no dimension called ensemble inside this file +sfcWind_20010701.nc : there is no dimension called ensemble inside this file +sfcWind_20010801.nc : there is no dimension called latitude inside this file +sfcWind_20010801.nc : there is no dimension called longitude inside this file +sfcWind_20010801.nc : there is no dimension called ensemble inside this file +sfcWind_20010801.nc : time dimension has 212 values instead of 216 +sfcWind_20010901.nc : there is no dimension called ensemble inside this file +sfcWind_20011001.nc : there is no dimension called ensemble inside this file +sfcWind_20011101.nc : latitude dimension has 256 values instead of 181 +sfcWind_20011101.nc : longitude dimension has 512 values instead of 360 +sfcWind_20011101.nc : there is no dimension called ensemble inside this file +sfcWind_20011101.nc : there is no dimension called time inside this file +sfcWind_20011201.nc : there is no dimension called ensemble inside this file +sfcWind_20020101.nc : there is no dimension called ensemble inside this file +sfcWind_20020201.nc : there is no dimension called latitude inside this file +sfcWind_20020201.nc : there is no dimension called longitude inside this file +sfcWind_20020201.nc : there is no dimension called ensemble inside this file +sfcWind_20020201.nc : time dimension has 10812 values instead of 216 +sfcWind_20020301.nc : there is no dimension called ensemble inside this file +sfcWind_20020401.nc : there is no dimension called ensemble inside this file +sfcWind_20020501.nc : latitude dimension has 256 values instead of 181 +sfcWind_20020501.nc : longitude dimension has 512 values instead of 360 +sfcWind_20020501.nc : there is no dimension called ensemble inside this file +sfcWind_20020501.nc : there is no dimension called time inside this file +sfcWind_20020601.nc : there is no dimension called ensemble inside this file +sfcWind_20020701.nc : there is no dimension called ensemble inside this file +sfcWind_20020801.nc : there is no dimension called latitude inside this file +sfcWind_20020801.nc : there is no dimension called longitude inside this file +sfcWind_20020801.nc : there is no dimension called ensemble inside this file +sfcWind_20020801.nc : time dimension has 212 values instead of 216 +sfcWind_20020901.nc : there is no dimension called ensemble inside this file +sfcWind_20021001.nc : there is no dimension called ensemble inside this file +sfcWind_20021101.nc : latitude dimension has 256 values instead of 181 +sfcWind_20021101.nc : longitude dimension has 512 values instead of 360 +sfcWind_20021101.nc : there is no dimension called ensemble inside this file +sfcWind_20021101.nc : there is no dimension called time inside this file +sfcWind_20021201.nc : there is no dimension called ensemble inside this file +sfcWind_20030101.nc : there is no dimension called ensemble inside this file +sfcWind_20030201.nc : there is no dimension called latitude inside this file +sfcWind_20030201.nc : there is no dimension called longitude inside this file +sfcWind_20030201.nc : there is no dimension called ensemble inside this file +sfcWind_20030201.nc : time dimension has 10812 values instead of 216 +sfcWind_20030301.nc : there is no dimension called ensemble inside this file +sfcWind_20030401.nc : there is no dimension called ensemble inside this file +sfcWind_20030501.nc : latitude dimension has 256 values instead of 181 +sfcWind_20030501.nc : longitude dimension has 512 values instead of 360 +sfcWind_20030501.nc : there is no dimension called ensemble inside this file +sfcWind_20030501.nc : there is no dimension called time inside this file +sfcWind_20030601.nc : there is no dimension called ensemble inside this file +sfcWind_20030701.nc : there is no dimension called ensemble inside this file +sfcWind_20030801.nc : there is no dimension called latitude inside this file +sfcWind_20030801.nc : there is no dimension called longitude inside this file +sfcWind_20030801.nc : there is no dimension called ensemble inside this file +sfcWind_20030801.nc : time dimension has 213 values instead of 216 +sfcWind_20030901.nc : there is no dimension called ensemble inside this file +sfcWind_20031001.nc : there is no dimension called ensemble inside this file +sfcWind_20031101.nc : latitude dimension has 256 values instead of 181 +sfcWind_20031101.nc : longitude dimension has 512 values instead of 360 +sfcWind_20031101.nc : there is no dimension called ensemble inside this file +sfcWind_20031101.nc : there is no dimension called time inside this file +sfcWind_20031201.nc : there is no dimension called ensemble inside this file +sfcWind_20040101.nc : there is no dimension called ensemble inside this file +sfcWind_20040201.nc : there is no dimension called latitude inside this file +sfcWind_20040201.nc : there is no dimension called longitude inside this file +sfcWind_20040201.nc : there is no dimension called ensemble inside this file +sfcWind_20040201.nc : time dimension has 10863 values instead of 216 +sfcWind_20040301.nc : there is no dimension called ensemble inside this file +sfcWind_20040401.nc : there is no dimension called ensemble inside this file +sfcWind_20040501.nc : latitude dimension has 256 values instead of 181 +sfcWind_20040501.nc : longitude dimension has 512 values instead of 360 +sfcWind_20040501.nc : there is no dimension called ensemble inside this file +sfcWind_20040501.nc : there is no dimension called time inside this file +sfcWind_20040601.nc : there is no dimension called ensemble inside this file +sfcWind_20040701.nc : there is no dimension called ensemble inside this file +sfcWind_20040801.nc : there is no dimension called latitude inside this file +sfcWind_20040801.nc : there is no dimension called longitude inside this file +sfcWind_20040801.nc : there is no dimension called ensemble inside this file +sfcWind_20040801.nc : time dimension has 212 values instead of 216 +sfcWind_20040901.nc : there is no dimension called ensemble inside this file +sfcWind_20041001.nc : there is no dimension called ensemble inside this file +sfcWind_20041101.nc : latitude dimension has 256 values instead of 181 +sfcWind_20041101.nc : longitude dimension has 512 values instead of 360 +sfcWind_20041101.nc : there is no dimension called ensemble inside this file +sfcWind_20041101.nc : there is no dimension called time inside this file +sfcWind_20041201.nc : there is no dimension called ensemble inside this file +sfcWind_20050101.nc : there is no dimension called ensemble inside this file +sfcWind_20050201.nc : there is no dimension called latitude inside this file +sfcWind_20050201.nc : there is no dimension called longitude inside this file +sfcWind_20050201.nc : there is no dimension called ensemble inside this file +sfcWind_20050201.nc : time dimension has 10812 values instead of 216 +sfcWind_20050301.nc : there is no dimension called ensemble inside this file +sfcWind_20050401.nc : there is no dimension called ensemble inside this file +sfcWind_20050501.nc : latitude dimension has 256 values instead of 181 +sfcWind_20050501.nc : longitude dimension has 512 values instead of 360 +sfcWind_20050501.nc : there is no dimension called ensemble inside this file +sfcWind_20050501.nc : there is no dimension called time inside this file +sfcWind_20050601.nc : there is no dimension called ensemble inside this file +sfcWind_20050701.nc : there is no dimension called ensemble inside this file +sfcWind_20050801.nc : there is no dimension called latitude inside this file +sfcWind_20050801.nc : there is no dimension called longitude inside this file +sfcWind_20050801.nc : there is no dimension called ensemble inside this file +sfcWind_20050801.nc : time dimension has 212 values instead of 216 +sfcWind_20050901.nc : there is no dimension called ensemble inside this file +sfcWind_20051001.nc : there is no dimension called ensemble inside this file +sfcWind_20051101.nc : latitude dimension has 256 values instead of 181 +sfcWind_20051101.nc : longitude dimension has 512 values instead of 360 +sfcWind_20051101.nc : there is no dimension called ensemble inside this file +sfcWind_20051101.nc : there is no dimension called time inside this file +sfcWind_20051201.nc : there is no dimension called ensemble inside this file +sfcWind_20060101.nc : there is no dimension called ensemble inside this file +sfcWind_20060201.nc : there is no dimension called latitude inside this file +sfcWind_20060201.nc : there is no dimension called longitude inside this file +sfcWind_20060201.nc : there is no dimension called ensemble inside this file +sfcWind_20060201.nc : time dimension has 10812 values instead of 216 +sfcWind_20060301.nc : there is no dimension called ensemble inside this file +sfcWind_20060401.nc : there is no dimension called ensemble inside this file +sfcWind_20060501.nc : latitude dimension has 256 values instead of 181 +sfcWind_20060501.nc : longitude dimension has 512 values instead of 360 +sfcWind_20060501.nc : there is no dimension called ensemble inside this file +sfcWind_20060501.nc : there is no dimension called time inside this file +sfcWind_20060601.nc : there is no dimension called ensemble inside this file +sfcWind_20060701.nc : there is no dimension called ensemble inside this file +sfcWind_20060801.nc : there is no dimension called latitude inside this file +sfcWind_20060801.nc : there is no dimension called longitude inside this file +sfcWind_20060801.nc : there is no dimension called ensemble inside this file +sfcWind_20060801.nc : time dimension has 212 values instead of 216 +sfcWind_20060901.nc : there is no dimension called ensemble inside this file +sfcWind_20061001.nc : there is no dimension called ensemble inside this file +sfcWind_20061101.nc : latitude dimension has 256 values instead of 181 +sfcWind_20061101.nc : longitude dimension has 512 values instead of 360 +sfcWind_20061101.nc : there is no dimension called ensemble inside this file +sfcWind_20061101.nc : there is no dimension called time inside this file +sfcWind_20061201.nc : there is no dimension called ensemble inside this file +sfcWind_20070101.nc : there is no dimension called ensemble inside this file +sfcWind_20070201.nc : there is no dimension called latitude inside this file +sfcWind_20070201.nc : there is no dimension called longitude inside this file +sfcWind_20070201.nc : there is no dimension called ensemble inside this file +sfcWind_20070201.nc : time dimension has 10812 values instead of 216 +sfcWind_20070301.nc : there is no dimension called ensemble inside this file +sfcWind_20070401.nc : there is no dimension called ensemble inside this file +sfcWind_20070501.nc : latitude dimension has 256 values instead of 181 +sfcWind_20070501.nc : longitude dimension has 512 values instead of 360 +sfcWind_20070501.nc : there is no dimension called ensemble inside this file +sfcWind_20070501.nc : there is no dimension called time inside this file +sfcWind_20070601.nc : there is no dimension called ensemble inside this file +sfcWind_20070701.nc : there is no dimension called ensemble inside this file +sfcWind_20070801.nc : there is no dimension called latitude inside this file +sfcWind_20070801.nc : there is no dimension called longitude inside this file +sfcWind_20070801.nc : there is no dimension called ensemble inside this file +sfcWind_20070801.nc : time dimension has 213 values instead of 216 +sfcWind_20070901.nc : there is no dimension called ensemble inside this file +sfcWind_20071001.nc : there is no dimension called ensemble inside this file +sfcWind_20071101.nc : latitude dimension has 256 values instead of 181 +sfcWind_20071101.nc : longitude dimension has 512 values instead of 360 +sfcWind_20071101.nc : there is no dimension called ensemble inside this file +sfcWind_20071101.nc : there is no dimension called time inside this file +sfcWind_20071201.nc : there is no dimension called ensemble inside this file +sfcWind_20080101.nc : there is no dimension called ensemble inside this file +sfcWind_20080201.nc : there is no dimension called latitude inside this file +sfcWind_20080201.nc : there is no dimension called longitude inside this file +sfcWind_20080201.nc : there is no dimension called ensemble inside this file +sfcWind_20080201.nc : time dimension has 10863 values instead of 216 +sfcWind_20080301.nc : there is no dimension called ensemble inside this file +sfcWind_20080301.nc : time dimension has 2 values instead of 216 +sfcWind_20080401.nc : there is no dimension called ensemble inside this file +sfcWind_20080501.nc : latitude dimension has 256 values instead of 181 +sfcWind_20080501.nc : longitude dimension has 512 values instead of 360 +sfcWind_20080501.nc : there is no dimension called ensemble inside this file +sfcWind_20080501.nc : there is no dimension called time inside this file +sfcWind_20080601.nc : there is no dimension called ensemble inside this file +sfcWind_20080701.nc : there is no dimension called ensemble inside this file +sfcWind_20080801.nc : there is no dimension called latitude inside this file +sfcWind_20080801.nc : there is no dimension called longitude inside this file +sfcWind_20080801.nc : there is no dimension called ensemble inside this file +sfcWind_20080801.nc : time dimension has 212 values instead of 216 +sfcWind_20080901.nc : there is no dimension called ensemble inside this file +sfcWind_20081001.nc : there is no dimension called ensemble inside this file +sfcWind_20081101.nc : latitude dimension has 256 values instead of 181 +sfcWind_20081101.nc : longitude dimension has 512 values instead of 360 +sfcWind_20081101.nc : there is no dimension called ensemble inside this file +sfcWind_20081101.nc : there is no dimension called time inside this file +sfcWind_20081201.nc : there is no dimension called ensemble inside this file +sfcWind_20090101.nc : there is no dimension called ensemble inside this file +sfcWind_20090201.nc : there is no dimension called latitude inside this file +sfcWind_20090201.nc : there is no dimension called longitude inside this file +sfcWind_20090201.nc : there is no dimension called ensemble inside this file +sfcWind_20090201.nc : time dimension has 10812 values instead of 216 +sfcWind_20090301.nc : there is no dimension called ensemble inside this file +sfcWind_20090401.nc : there is no dimension called ensemble inside this file +sfcWind_20090501.nc : latitude dimension has 256 values instead of 181 +sfcWind_20090501.nc : longitude dimension has 512 values instead of 360 +sfcWind_20090501.nc : there is no dimension called ensemble inside this file +sfcWind_20090501.nc : there is no dimension called time inside this file +sfcWind_20090601.nc : there is no dimension called ensemble inside this file +sfcWind_20090701.nc : there is no dimension called ensemble inside this file +sfcWind_20090801.nc : there is no dimension called latitude inside this file +sfcWind_20090801.nc : there is no dimension called longitude inside this file +sfcWind_20090801.nc : there is no dimension called ensemble inside this file +sfcWind_20090801.nc : time dimension has 212 values instead of 216 +sfcWind_20090901.nc : there is no dimension called ensemble inside this file +sfcWind_20091001.nc : there is no dimension called ensemble inside this file +sfcWind_20091101.nc : latitude dimension has 256 values instead of 181 +sfcWind_20091101.nc : longitude dimension has 512 values instead of 360 +sfcWind_20091101.nc : there is no dimension called ensemble inside this file +sfcWind_20091101.nc : there is no dimension called time inside this file +sfcWind_20091201.nc : there is no dimension called ensemble inside this file +sfcWind_20100101.nc : there is no dimension called ensemble inside this file +sfcWind_20100201.nc : there is no dimension called latitude inside this file +sfcWind_20100201.nc : there is no dimension called longitude inside this file +sfcWind_20100201.nc : there is no dimension called ensemble inside this file +sfcWind_20100201.nc : time dimension has 10812 values instead of 216 +sfcWind_20100301.nc : there is no dimension called ensemble inside this file +sfcWind_20100401.nc : there is no dimension called ensemble inside this file +sfcWind_20100501.nc : latitude dimension has 256 values instead of 181 +sfcWind_20100501.nc : longitude dimension has 512 values instead of 360 +sfcWind_20100501.nc : there is no dimension called ensemble inside this file +sfcWind_20100501.nc : there is no dimension called time inside this file +sfcWind_20100601.nc : there is no dimension called ensemble inside this file +sfcWind_20100701.nc : there is no dimension called ensemble inside this file +sfcWind_20100801.nc : there is no dimension called latitude inside this file +sfcWind_20100801.nc : there is no dimension called longitude inside this file +sfcWind_20100801.nc : there is no dimension called ensemble inside this file +sfcWind_20100801.nc : time dimension has 212 values instead of 216 +sfcWind_20100901.nc : there is no dimension called ensemble inside this file +sfcWind_20100901.nc : time dimension has 110 values instead of 216 +sfcWind_20101001.nc : there is no dimension called ensemble inside this file +sfcWind_20101101.nc : latitude dimension has 256 values instead of 181 +sfcWind_20101101.nc : longitude dimension has 512 values instead of 360 +sfcWind_20101101.nc : there is no dimension called ensemble inside this file +sfcWind_20101101.nc : there is no dimension called time inside this file +sfcWind_20101201.nc : there is no dimension called ensemble inside this file +sfcWind_20101201.nc : time dimension has 160 values instead of 216 +sfcWind_20110101.nc : there is no dimension called ensemble inside this file +sfcWind_20110201.nc : there is no dimension called latitude inside this file +sfcWind_20110201.nc : there is no dimension called longitude inside this file +sfcWind_20110201.nc : there is no dimension called ensemble inside this file +sfcWind_20110201.nc : time dimension has 10812 values instead of 216 +sfcWind_20110301.nc : there is no dimension called ensemble inside this file +sfcWind_20110401.nc : there is no dimension called ensemble inside this file +sfcWind_20110501.nc : latitude dimension has 256 values instead of 181 +sfcWind_20110501.nc : longitude dimension has 512 values instead of 360 +sfcWind_20110501.nc : there is no dimension called ensemble inside this file +sfcWind_20110501.nc : there is no dimension called time inside this file +sfcWind_20110601.nc : there is no dimension called ensemble inside this file +sfcWind_20110701.nc : there is no dimension called ensemble inside this file +sfcWind_20110801.nc : there is no dimension called latitude inside this file +sfcWind_20110801.nc : there is no dimension called longitude inside this file +sfcWind_20110801.nc : there is no dimension called ensemble inside this file +sfcWind_20110801.nc : time dimension has 213 values instead of 216 +sfcWind_20110901.nc : there is no dimension called ensemble inside this file +sfcWind_20111001.nc : there is no dimension called ensemble inside this file +sfcWind_20111101.nc : latitude dimension has 256 values instead of 181 +sfcWind_20111101.nc : longitude dimension has 512 values instead of 360 +sfcWind_20111101.nc : there is no dimension called ensemble inside this file +sfcWind_20111101.nc : there is no dimension called time inside this file +sfcWind_20111201.nc : there is no dimension called ensemble inside this file +sfcWind_20120101.nc : there is no dimension called ensemble inside this file +sfcWind_20120201.nc : there is no dimension called latitude inside this file +sfcWind_20120201.nc : there is no dimension called longitude inside this file +sfcWind_20120201.nc : there is no dimension called ensemble inside this file +sfcWind_20120201.nc : time dimension has 10863 values instead of 216 +sfcWind_20120301.nc : there is no dimension called ensemble inside this file +sfcWind_20120401.nc : there is no dimension called ensemble inside this file +sfcWind_20120401.nc : time dimension has 180 values instead of 216 +sfcWind_20120501.nc : latitude dimension has 256 values instead of 181 +sfcWind_20120501.nc : longitude dimension has 512 values instead of 360 +sfcWind_20120501.nc : there is no dimension called ensemble inside this file +sfcWind_20120501.nc : there is no dimension called time inside this file +sfcWind_20120601.nc : there is no dimension called ensemble inside this file +sfcWind_20120601.nc : time dimension has 97 values instead of 216 +sfcWind_20120701.nc : there is no dimension called ensemble inside this file +sfcWind_20120801.nc : there is no dimension called latitude inside this file +sfcWind_20120801.nc : there is no dimension called longitude inside this file +sfcWind_20120801.nc : there is no dimension called ensemble inside this file +sfcWind_20120801.nc : time dimension has 212 values instead of 216 +sfcWind_20120901.nc : there is no dimension called ensemble inside this file +sfcWind_20121001.nc : >>>>> Missing file <<<<< +sfcWind_20121101.nc : latitude dimension has 256 values instead of 181 +sfcWind_20121101.nc : longitude dimension has 512 values instead of 360 +sfcWind_20121101.nc : there is no dimension called ensemble inside this file +sfcWind_20121101.nc : there is no dimension called time inside this file +sfcWind_20121201.nc : >>>>> Missing file <<<<< +sfcWind_20130101.nc : >>>>> Missing file <<<<< +sfcWind_20130201.nc : there is no dimension called latitude inside this file +sfcWind_20130201.nc : there is no dimension called longitude inside this file +sfcWind_20130201.nc : there is no dimension called ensemble inside this file +sfcWind_20130201.nc : time dimension has 11016 values instead of 216 +sfcWind_20130301.nc : >>>>> Missing file <<<<< +sfcWind_20130401.nc : >>>>> Missing file <<<<< +sfcWind_20130501.nc : there is no dimension called latitude inside this file +sfcWind_20130501.nc : there is no dimension called longitude inside this file +sfcWind_20130501.nc : there is no dimension called ensemble inside this file +sfcWind_20130501.nc : time dimension has 11016 values instead of 216 +sfcWind_20130601.nc : >>>>> Missing file <<<<< +sfcWind_20130701.nc : >>>>> Missing file <<<<< +sfcWind_20130801.nc : there is no dimension called latitude inside this file +sfcWind_20130801.nc : there is no dimension called longitude inside this file +sfcWind_20130801.nc : there is no dimension called ensemble inside this file +sfcWind_20130801.nc : time dimension has 212 values instead of 216 +sfcWind_20130901.nc : there is no dimension called ensemble inside this file +sfcWind_20131001.nc : there is no dimension called ensemble inside this file +sfcWind_20131101.nc : there is no dimension called latitude inside this file +sfcWind_20131101.nc : there is no dimension called longitude inside this file +sfcWind_20131101.nc : there is no dimension called ensemble inside this file +sfcWind_20131101.nc : time dimension has 212 values instead of 216 +sfcWind_20131201.nc : there is no dimension called ensemble inside this file +sfcWind_20140101.nc : >>>>> Missing file <<<<< +sfcWind_20140201.nc : there is no dimension called latitude inside this file +sfcWind_20140201.nc : there is no dimension called longitude inside this file +sfcWind_20140201.nc : there is no dimension called ensemble inside this file +sfcWind_20140201.nc : time dimension has 11016 values instead of 216 +sfcWind_20140301.nc : >>>>> Missing file <<<<< +sfcWind_20140401.nc : >>>>> Missing file <<<<< +sfcWind_20140501.nc : there is no dimension called latitude inside this file +sfcWind_20140501.nc : there is no dimension called longitude inside this file +sfcWind_20140501.nc : there is no dimension called ensemble inside this file +sfcWind_20140501.nc : time dimension has 214 values instead of 216 +sfcWind_20140601.nc : >>>>> Missing file <<<<< +sfcWind_20140701.nc : >>>>> Missing file <<<<< +sfcWind_20140801.nc : there is no dimension called ensemble inside this file +sfcWind_20140901.nc : there is no dimension called ensemble inside this file +sfcWind_20141001.nc : there is no dimension called ensemble inside this file +sfcWind_20141101.nc : there is no dimension called latitude inside this file +sfcWind_20141101.nc : there is no dimension called longitude inside this file +sfcWind_20141101.nc : there is no dimension called ensemble inside this file +sfcWind_20141101.nc : time dimension has 212 values instead of 216 +sfcWind_20141201.nc : there is no dimension called ensemble inside this file +sfcWind_20150101.nc : >>>>> Missing file <<<<< +sfcWind_20150201.nc : >>>>> Missing file <<<<< +sfcWind_20150301.nc : >>>>> Missing file <<<<< +sfcWind_20150401.nc : >>>>> Missing file <<<<< +sfcWind_20150501.nc : >>>>> Missing file <<<<< +sfcWind_20150601.nc : there is no dimension called ensemble inside this file +sfcWind_20150701.nc : there is no dimension called ensemble inside this file +sfcWind_20150801.nc : there is no dimension called ensemble inside this file +sfcWind_20150801.nc : time dimension has 197 values instead of 216 +sfcWind_20150901.nc : >>>>> Missing file <<<<< +sfcWind_20151001.nc : >>>>> Missing file <<<<< +sfcWind_20151101.nc : latitude dimension has 256 values instead of 181 +sfcWind_20151101.nc : longitude dimension has 512 values instead of 360 +sfcWind_20151101.nc : there is no dimension called ensemble inside this file +sfcWind_20151101.nc : time dimension has 236 values instead of 216 +sfcWind_20151201.nc : >>>>> Missing file <<<<< diff --git a/bash/check_ECMWFS4_sfcWind_daily.txt b/bash/check_ECMWFS4_sfcWind_daily.txt new file mode 100644 index 0000000000000000000000000000000000000000..18c7f82bd8eb4dfa165c22a5ea9cf16b26b06625 --- /dev/null +++ b/bash/check_ECMWFS4_sfcWind_daily.txt @@ -0,0 +1,731 @@ +Checked variable: sfcWind +Path: /esnas/exp/ecmwf/system4_m1/daily_mean/sfcWind_f6h/ +Extension: .nc +Checked period: 1981-2015 +>>>>>>>>>>>>>>>>>>>>>>>> Expected elements <<<<<<<<<<<<<<<<<<<<<<<< +Latitude denomination: latitude +Number of latitude values: 181 +Longitude denomination: longitude +Number of longitude values: 360 +Ensemble denomination: ensemble +Number of ensemble values: 15 +Alternative number of ensemble values: 51 +Lead-time denonomination: time +Number of lead-times: 216 + >>>>>>>>>>>>>>>>>>>>>>>> Checking <<<<<<<<<<<<<<<<<<<<<<<< +sfcWind_19810101.nc : ok +sfcWind_19810201.nc : latitude dimension has 256 values instead of 181 +sfcWind_19810201.nc : longitude dimension has 512 values instead of 360 +sfcWind_19810201.nc : time dimension has 626 values instead of 216 +sfcWind_19810301.nc : ok +sfcWind_19810401.nc : ok +sfcWind_19810501.nc : latitude dimension has 256 values instead of 181 +sfcWind_19810501.nc : longitude dimension has 512 values instead of 360 +sfcWind_19810501.nc : time dimension has 214 values instead of 216 +sfcWind_19810601.nc : ok +sfcWind_19810701.nc : ok +sfcWind_19810801.nc : latitude dimension has 256 values instead of 181 +sfcWind_19810801.nc : longitude dimension has 512 values instead of 360 +sfcWind_19810801.nc : time dimension has 212 values instead of 216 +sfcWind_19810901.nc : ok +sfcWind_19811001.nc : ok +sfcWind_19811101.nc : latitude dimension has 256 values instead of 181 +sfcWind_19811101.nc : longitude dimension has 512 values instead of 360 +sfcWind_19811101.nc : time dimension has 214 values instead of 216 +sfcWind_19811201.nc : ok +sfcWind_19820101.nc : ok +sfcWind_19820201.nc : latitude dimension has 256 values instead of 181 +sfcWind_19820201.nc : longitude dimension has 512 values instead of 360 +sfcWind_19820201.nc : time dimension has 848 values instead of 216 +sfcWind_19820301.nc : ok +sfcWind_19820401.nc : ok +sfcWind_19820501.nc : latitude dimension has 256 values instead of 181 +sfcWind_19820501.nc : longitude dimension has 512 values instead of 360 +sfcWind_19820501.nc : time dimension has 214 values instead of 216 +sfcWind_19820601.nc : ok +sfcWind_19820701.nc : time dimension has 31 values instead of 216 +sfcWind_19820801.nc : latitude dimension has 256 values instead of 181 +sfcWind_19820801.nc : longitude dimension has 512 values instead of 360 +sfcWind_19820801.nc : time dimension has 212 values instead of 216 +sfcWind_19820901.nc : ok +sfcWind_19821001.nc : ok +sfcWind_19821101.nc : latitude dimension has 256 values instead of 181 +sfcWind_19821101.nc : longitude dimension has 512 values instead of 360 +sfcWind_19821101.nc : time dimension has 214 values instead of 216 +sfcWind_19821201.nc : ok +sfcWind_19830101.nc : ok +sfcWind_19830201.nc : latitude dimension has 256 values instead of 181 +sfcWind_19830201.nc : longitude dimension has 512 values instead of 360 +sfcWind_19830201.nc : time dimension has 848 values instead of 216 +sfcWind_19830301.nc : ok +sfcWind_19830401.nc : ok +sfcWind_19830501.nc : latitude dimension has 256 values instead of 181 +sfcWind_19830501.nc : longitude dimension has 512 values instead of 360 +sfcWind_19830501.nc : time dimension has 214 values instead of 216 +sfcWind_19830601.nc : ok +sfcWind_19830701.nc : ok +sfcWind_19830801.nc : latitude dimension has 256 values instead of 181 +sfcWind_19830801.nc : longitude dimension has 512 values instead of 360 +sfcWind_19830801.nc : time dimension has 213 values instead of 216 +sfcWind_19830901.nc : ok +sfcWind_19831001.nc : ok +sfcWind_19831101.nc : latitude dimension has 256 values instead of 181 +sfcWind_19831101.nc : longitude dimension has 512 values instead of 360 +sfcWind_19831101.nc : time dimension has 734 values instead of 216 +sfcWind_19831201.nc : ok +sfcWind_19840101.nc : time dimension has 9 values instead of 216 +sfcWind_19840201.nc : latitude dimension has 256 values instead of 181 +sfcWind_19840201.nc : longitude dimension has 512 values instead of 360 +sfcWind_19840201.nc : time dimension has 852 values instead of 216 +sfcWind_19840301.nc : ok +sfcWind_19840401.nc : ok +sfcWind_19840501.nc : latitude dimension has 256 values instead of 181 +sfcWind_19840501.nc : longitude dimension has 512 values instead of 360 +sfcWind_19840501.nc : time dimension has 214 values instead of 216 +sfcWind_19840601.nc : ok +sfcWind_19840701.nc : ok +sfcWind_19840801.nc : latitude dimension has 256 values instead of 181 +sfcWind_19840801.nc : longitude dimension has 512 values instead of 360 +sfcWind_19840801.nc : time dimension has 212 values instead of 216 +sfcWind_19840901.nc : ok +sfcWind_19841001.nc : ok +sfcWind_19841101.nc : latitude dimension has 256 values instead of 181 +sfcWind_19841101.nc : longitude dimension has 512 values instead of 360 +sfcWind_19841101.nc : time dimension has 214 values instead of 216 +sfcWind_19841201.nc : ok +sfcWind_19850101.nc : ok +sfcWind_19850201.nc : latitude dimension has 256 values instead of 181 +sfcWind_19850201.nc : longitude dimension has 512 values instead of 360 +sfcWind_19850201.nc : there is no dimension called ensemble inside this file +sfcWind_19850201.nc : time dimension has 14625 values instead of 216 +sfcWind_19850301.nc : ok +sfcWind_19850401.nc : ok +sfcWind_19850501.nc : latitude dimension has 256 values instead of 181 +sfcWind_19850501.nc : longitude dimension has 512 values instead of 360 +sfcWind_19850501.nc : time dimension has 214 values instead of 216 +sfcWind_19850601.nc : there is no dimension called latitude inside this file +sfcWind_19850601.nc : there is no dimension called longitude inside this file +sfcWind_19850601.nc : there is no dimension called ensemble inside this file +sfcWind_19850601.nc : there is no dimension called time inside this file +sfcWind_19850701.nc : ok +sfcWind_19850801.nc : there is no dimension called latitude inside this file +sfcWind_19850801.nc : there is no dimension called longitude inside this file +sfcWind_19850801.nc : there is no dimension called ensemble inside this file +sfcWind_19850801.nc : time dimension has 1 values instead of 216 +sfcWind_19850901.nc : time dimension has 226 values instead of 216 +sfcWind_19851001.nc : ok +sfcWind_19851101.nc : latitude dimension has 256 values instead of 181 +sfcWind_19851101.nc : longitude dimension has 512 values instead of 360 +sfcWind_19851101.nc : time dimension has 214 values instead of 216 +sfcWind_19851201.nc : ok +sfcWind_19860101.nc : time dimension has 23 values instead of 216 +sfcWind_19860201.nc : latitude dimension has 256 values instead of 181 +sfcWind_19860201.nc : longitude dimension has 512 values instead of 360 +sfcWind_19860201.nc : time dimension has 848 values instead of 216 +sfcWind_19860301.nc : time dimension has 285 values instead of 216 +sfcWind_19860401.nc : ok +sfcWind_19860501.nc : latitude dimension has 256 values instead of 181 +sfcWind_19860501.nc : longitude dimension has 512 values instead of 360 +sfcWind_19860501.nc : time dimension has 214 values instead of 216 +sfcWind_19860601.nc : ok +sfcWind_19860701.nc : ok +sfcWind_19860801.nc : latitude dimension has 256 values instead of 181 +sfcWind_19860801.nc : longitude dimension has 512 values instead of 360 +sfcWind_19860801.nc : time dimension has 212 values instead of 216 +sfcWind_19860901.nc : ok +sfcWind_19861001.nc : time dimension has 94 values instead of 216 +sfcWind_19861101.nc : latitude dimension has 256 values instead of 181 +sfcWind_19861101.nc : longitude dimension has 512 values instead of 360 +sfcWind_19861101.nc : time dimension has 214 values instead of 216 +sfcWind_19861201.nc : ok +sfcWind_19870101.nc : ok +sfcWind_19870201.nc : latitude dimension has 256 values instead of 181 +sfcWind_19870201.nc : longitude dimension has 512 values instead of 360 +sfcWind_19870201.nc : there is no dimension called ensemble inside this file +sfcWind_19870201.nc : time dimension has 10812 values instead of 216 +sfcWind_19870301.nc : ok +sfcWind_19870401.nc : ok +sfcWind_19870501.nc : latitude dimension has 256 values instead of 181 +sfcWind_19870501.nc : longitude dimension has 512 values instead of 360 +sfcWind_19870501.nc : time dimension has 214 values instead of 216 +sfcWind_19870601.nc : ok +sfcWind_19870701.nc : ok +sfcWind_19870801.nc : latitude dimension has 256 values instead of 181 +sfcWind_19870801.nc : longitude dimension has 512 values instead of 360 +sfcWind_19870801.nc : time dimension has 213 values instead of 216 +sfcWind_19870901.nc : ok +sfcWind_19871001.nc : ok +sfcWind_19871101.nc : latitude dimension has 256 values instead of 181 +sfcWind_19871101.nc : longitude dimension has 512 values instead of 360 +sfcWind_19871101.nc : time dimension has 214 values instead of 216 +sfcWind_19871201.nc : time dimension has 46 values instead of 216 +sfcWind_19880101.nc : ok +sfcWind_19880201.nc : latitude dimension has 256 values instead of 181 +sfcWind_19880201.nc : longitude dimension has 512 values instead of 360 +sfcWind_19880201.nc : time dimension has 852 values instead of 216 +sfcWind_19880301.nc : ok +sfcWind_19880401.nc : ok +sfcWind_19880501.nc : latitude dimension has 256 values instead of 181 +sfcWind_19880501.nc : longitude dimension has 512 values instead of 360 +sfcWind_19880501.nc : time dimension has 214 values instead of 216 +sfcWind_19880601.nc : ok +sfcWind_19880701.nc : ok +sfcWind_19880801.nc : latitude dimension has 256 values instead of 181 +sfcWind_19880801.nc : longitude dimension has 512 values instead of 360 +sfcWind_19880801.nc : time dimension has 212 values instead of 216 +sfcWind_19880901.nc : ok +sfcWind_19881001.nc : ok +sfcWind_19881101.nc : latitude dimension has 256 values instead of 181 +sfcWind_19881101.nc : longitude dimension has 512 values instead of 360 +sfcWind_19881101.nc : time dimension has 214 values instead of 216 +sfcWind_19881201.nc : ok +sfcWind_19890101.nc : ok +sfcWind_19890201.nc : latitude dimension has 256 values instead of 181 +sfcWind_19890201.nc : longitude dimension has 512 values instead of 360 +sfcWind_19890201.nc : there is no dimension called ensemble inside this file +sfcWind_19890201.nc : time dimension has 10812 values instead of 216 +sfcWind_19890301.nc : ok +sfcWind_19890401.nc : ok +sfcWind_19890501.nc : latitude dimension has 256 values instead of 181 +sfcWind_19890501.nc : longitude dimension has 512 values instead of 360 +sfcWind_19890501.nc : time dimension has 214 values instead of 216 +sfcWind_19890601.nc : ok +sfcWind_19890701.nc : ok +sfcWind_19890801.nc : latitude dimension has 256 values instead of 181 +sfcWind_19890801.nc : longitude dimension has 512 values instead of 360 +sfcWind_19890801.nc : time dimension has 720 values instead of 216 +sfcWind_19890901.nc : ok +sfcWind_19891001.nc : ok +sfcWind_19891101.nc : latitude dimension has 256 values instead of 181 +sfcWind_19891101.nc : longitude dimension has 512 values instead of 360 +sfcWind_19891101.nc : time dimension has 214 values instead of 216 +sfcWind_19891201.nc : ok +sfcWind_19900101.nc : ok +sfcWind_19900201.nc : latitude dimension has 256 values instead of 181 +sfcWind_19900201.nc : longitude dimension has 512 values instead of 360 +sfcWind_19900201.nc : time dimension has 848 values instead of 216 +sfcWind_19900301.nc : ok +sfcWind_19900401.nc : >>>>> Missing file <<<<< +sfcWind_19900501.nc : latitude dimension has 256 values instead of 181 +sfcWind_19900501.nc : longitude dimension has 512 values instead of 360 +sfcWind_19900501.nc : time dimension has 214 values instead of 216 +sfcWind_19900601.nc : ok +sfcWind_19900701.nc : >>>>> Missing file <<<<< +sfcWind_19900801.nc : latitude dimension has 256 values instead of 181 +sfcWind_19900801.nc : longitude dimension has 512 values instead of 360 +sfcWind_19900801.nc : time dimension has 212 values instead of 216 +sfcWind_19900901.nc : time dimension has 2 values instead of 216 +sfcWind_19901001.nc : time dimension has 120 values instead of 216 +sfcWind_19901101.nc : latitude dimension has 256 values instead of 181 +sfcWind_19901101.nc : longitude dimension has 512 values instead of 360 +sfcWind_19901101.nc : time dimension has 214 values instead of 216 +sfcWind_19901201.nc : ok +sfcWind_19910101.nc : ok +sfcWind_19910201.nc : latitude dimension has 256 values instead of 181 +sfcWind_19910201.nc : longitude dimension has 512 values instead of 360 +sfcWind_19910201.nc : there is no dimension called ensemble inside this file +sfcWind_19910201.nc : time dimension has 10812 values instead of 216 +sfcWind_19910301.nc : ok +sfcWind_19910401.nc : ok +sfcWind_19910501.nc : latitude dimension has 256 values instead of 181 +sfcWind_19910501.nc : longitude dimension has 512 values instead of 360 +sfcWind_19910501.nc : time dimension has 214 values instead of 216 +sfcWind_19910601.nc : ok +sfcWind_19910701.nc : ok +sfcWind_19910801.nc : latitude dimension has 256 values instead of 181 +sfcWind_19910801.nc : longitude dimension has 512 values instead of 360 +sfcWind_19910801.nc : time dimension has 213 values instead of 216 +sfcWind_19910901.nc : ok +sfcWind_19911001.nc : time dimension has 117 values instead of 216 +sfcWind_19911101.nc : latitude dimension has 256 values instead of 181 +sfcWind_19911101.nc : longitude dimension has 512 values instead of 360 +sfcWind_19911101.nc : time dimension has 214 values instead of 216 +sfcWind_19911201.nc : ok +sfcWind_19920101.nc : ok +sfcWind_19920201.nc : latitude dimension has 256 values instead of 181 +sfcWind_19920201.nc : longitude dimension has 512 values instead of 360 +sfcWind_19920201.nc : time dimension has 852 values instead of 216 +sfcWind_19920301.nc : ok +sfcWind_19920401.nc : ok +sfcWind_19920501.nc : latitude dimension has 256 values instead of 181 +sfcWind_19920501.nc : longitude dimension has 512 values instead of 360 +sfcWind_19920501.nc : time dimension has 214 values instead of 216 +sfcWind_19920601.nc : time dimension has 188 values instead of 216 +sfcWind_19920701.nc : ok +sfcWind_19920801.nc : latitude dimension has 256 values instead of 181 +sfcWind_19920801.nc : longitude dimension has 512 values instead of 360 +sfcWind_19920801.nc : time dimension has 212 values instead of 216 +sfcWind_19920901.nc : ok +sfcWind_19921001.nc : ok +sfcWind_19921101.nc : latitude dimension has 256 values instead of 181 +sfcWind_19921101.nc : longitude dimension has 512 values instead of 360 +sfcWind_19921101.nc : time dimension has 214 values instead of 216 +sfcWind_19921201.nc : ok +sfcWind_19930101.nc : ok +sfcWind_19930201.nc : latitude dimension has 256 values instead of 181 +sfcWind_19930201.nc : longitude dimension has 512 values instead of 360 +sfcWind_19930201.nc : there is no dimension called ensemble inside this file +sfcWind_19930201.nc : time dimension has 10812 values instead of 216 +sfcWind_19930301.nc : ok +sfcWind_19930401.nc : ok +sfcWind_19930501.nc : latitude dimension has 256 values instead of 181 +sfcWind_19930501.nc : longitude dimension has 512 values instead of 360 +sfcWind_19930501.nc : time dimension has 214 values instead of 216 +sfcWind_19930601.nc : there is no dimension called latitude inside this file +sfcWind_19930601.nc : there is no dimension called longitude inside this file +sfcWind_19930601.nc : there is no dimension called ensemble inside this file +sfcWind_19930601.nc : there is no dimension called time inside this file +sfcWind_19930701.nc : there is no dimension called ensemble inside this file +sfcWind_19930801.nc : latitude dimension has 256 values instead of 181 +sfcWind_19930801.nc : longitude dimension has 512 values instead of 360 +sfcWind_19930801.nc : time dimension has 212 values instead of 216 +sfcWind_19930901.nc : there is no dimension called ensemble inside this file +sfcWind_19931001.nc : there is no dimension called ensemble inside this file +sfcWind_19931101.nc : latitude dimension has 256 values instead of 181 +sfcWind_19931101.nc : longitude dimension has 512 values instead of 360 +sfcWind_19931101.nc : time dimension has 214 values instead of 216 +sfcWind_19931201.nc : time dimension has 29 values instead of 216 +sfcWind_19940101.nc : ok +sfcWind_19940201.nc : latitude dimension has 256 values instead of 181 +sfcWind_19940201.nc : longitude dimension has 512 values instead of 360 +sfcWind_19940201.nc : time dimension has 848 values instead of 216 +sfcWind_19940301.nc : time dimension has 101 values instead of 216 +sfcWind_19940401.nc : ok +sfcWind_19940501.nc : latitude dimension has 256 values instead of 181 +sfcWind_19940501.nc : longitude dimension has 512 values instead of 360 +sfcWind_19940501.nc : time dimension has 214 values instead of 216 +sfcWind_19940601.nc : ok +sfcWind_19940701.nc : ok +sfcWind_19940801.nc : latitude dimension has 256 values instead of 181 +sfcWind_19940801.nc : longitude dimension has 512 values instead of 360 +sfcWind_19940801.nc : time dimension has 212 values instead of 216 +sfcWind_19940901.nc : ok +sfcWind_19941001.nc : ok +sfcWind_19941101.nc : latitude dimension has 256 values instead of 181 +sfcWind_19941101.nc : longitude dimension has 512 values instead of 360 +sfcWind_19941101.nc : time dimension has 214 values instead of 216 +sfcWind_19941201.nc : ok +sfcWind_19950101.nc : time dimension has 75 values instead of 216 +sfcWind_19950201.nc : latitude dimension has 256 values instead of 181 +sfcWind_19950201.nc : longitude dimension has 512 values instead of 360 +sfcWind_19950201.nc : there is no dimension called ensemble inside this file +sfcWind_19950201.nc : time dimension has 10812 values instead of 216 +sfcWind_19950301.nc : ok +sfcWind_19950401.nc : time dimension has 187 values instead of 216 +sfcWind_19950501.nc : latitude dimension has 256 values instead of 181 +sfcWind_19950501.nc : longitude dimension has 512 values instead of 360 +sfcWind_19950501.nc : time dimension has 214 values instead of 216 +sfcWind_19950601.nc : ok +sfcWind_19950701.nc : ok +sfcWind_19950801.nc : latitude dimension has 256 values instead of 181 +sfcWind_19950801.nc : longitude dimension has 512 values instead of 360 +sfcWind_19950801.nc : time dimension has 213 values instead of 216 +sfcWind_19950901.nc : ok +sfcWind_19951001.nc : ok +sfcWind_19951101.nc : latitude dimension has 256 values instead of 181 +sfcWind_19951101.nc : longitude dimension has 512 values instead of 360 +sfcWind_19951101.nc : time dimension has 214 values instead of 216 +sfcWind_19951201.nc : ok +sfcWind_19960101.nc : ok +sfcWind_19960201.nc : latitude dimension has 256 values instead of 181 +sfcWind_19960201.nc : longitude dimension has 512 values instead of 360 +sfcWind_19960201.nc : time dimension has 852 values instead of 216 +sfcWind_19960301.nc : ok +sfcWind_19960401.nc : ok +sfcWind_19960501.nc : latitude dimension has 256 values instead of 181 +sfcWind_19960501.nc : longitude dimension has 512 values instead of 360 +sfcWind_19960501.nc : time dimension has 214 values instead of 216 +sfcWind_19960601.nc : ok +sfcWind_19960701.nc : ok +sfcWind_19960801.nc : latitude dimension has 256 values instead of 181 +sfcWind_19960801.nc : longitude dimension has 512 values instead of 360 +sfcWind_19960801.nc : time dimension has 212 values instead of 216 +sfcWind_19960901.nc : ok +sfcWind_19961001.nc : ok +sfcWind_19961101.nc : latitude dimension has 256 values instead of 181 +sfcWind_19961101.nc : longitude dimension has 512 values instead of 360 +sfcWind_19961101.nc : time dimension has 214 values instead of 216 +sfcWind_19961201.nc : ok +sfcWind_19970101.nc : ok +sfcWind_19970201.nc : latitude dimension has 256 values instead of 181 +sfcWind_19970201.nc : longitude dimension has 512 values instead of 360 +sfcWind_19970201.nc : there is no dimension called ensemble inside this file +sfcWind_19970201.nc : time dimension has 10812 values instead of 216 +sfcWind_19970301.nc : ok +sfcWind_19970401.nc : ok +sfcWind_19970501.nc : latitude dimension has 256 values instead of 181 +sfcWind_19970501.nc : longitude dimension has 512 values instead of 360 +sfcWind_19970501.nc : time dimension has 214 values instead of 216 +sfcWind_19970601.nc : ok +sfcWind_19970701.nc : ok +sfcWind_19970801.nc : latitude dimension has 256 values instead of 181 +sfcWind_19970801.nc : longitude dimension has 512 values instead of 360 +sfcWind_19970801.nc : time dimension has 407 values instead of 216 +sfcWind_19970901.nc : ok +sfcWind_19971001.nc : ok +sfcWind_19971101.nc : latitude dimension has 256 values instead of 181 +sfcWind_19971101.nc : longitude dimension has 512 values instead of 360 +sfcWind_19971101.nc : time dimension has 214 values instead of 216 +sfcWind_19971201.nc : ok +sfcWind_19980101.nc : ok +sfcWind_19980201.nc : latitude dimension has 256 values instead of 181 +sfcWind_19980201.nc : longitude dimension has 512 values instead of 360 +sfcWind_19980201.nc : time dimension has 848 values instead of 216 +sfcWind_19980301.nc : ok +sfcWind_19980401.nc : ok +sfcWind_19980501.nc : latitude dimension has 256 values instead of 181 +sfcWind_19980501.nc : longitude dimension has 512 values instead of 360 +sfcWind_19980501.nc : time dimension has 214 values instead of 216 +sfcWind_19980601.nc : ok +sfcWind_19980701.nc : ok +sfcWind_19980801.nc : latitude dimension has 256 values instead of 181 +sfcWind_19980801.nc : longitude dimension has 512 values instead of 360 +sfcWind_19980801.nc : time dimension has 212 values instead of 216 +sfcWind_19980901.nc : ok +sfcWind_19981001.nc : ok +sfcWind_19981101.nc : latitude dimension has 256 values instead of 181 +sfcWind_19981101.nc : longitude dimension has 512 values instead of 360 +sfcWind_19981101.nc : time dimension has 214 values instead of 216 +sfcWind_19981201.nc : ok +sfcWind_19990101.nc : ok +sfcWind_19990201.nc : latitude dimension has 256 values instead of 181 +sfcWind_19990201.nc : longitude dimension has 512 values instead of 360 +sfcWind_19990201.nc : there is no dimension called ensemble inside this file +sfcWind_19990201.nc : time dimension has 10812 values instead of 216 +sfcWind_19990301.nc : ok +sfcWind_19990401.nc : ok +sfcWind_19990501.nc : latitude dimension has 256 values instead of 181 +sfcWind_19990501.nc : longitude dimension has 512 values instead of 360 +sfcWind_19990501.nc : time dimension has 214 values instead of 216 +sfcWind_19990601.nc : ok +sfcWind_19990701.nc : ok +sfcWind_19990801.nc : latitude dimension has 256 values instead of 181 +sfcWind_19990801.nc : longitude dimension has 512 values instead of 360 +sfcWind_19990801.nc : time dimension has 213 values instead of 216 +sfcWind_19990901.nc : ok +sfcWind_19991001.nc : ok +sfcWind_19991101.nc : latitude dimension has 256 values instead of 181 +sfcWind_19991101.nc : longitude dimension has 512 values instead of 360 +sfcWind_19991101.nc : time dimension has 243 values instead of 216 +sfcWind_19991201.nc : ok +sfcWind_20000101.nc : ok +sfcWind_20000201.nc : latitude dimension has 256 values instead of 181 +sfcWind_20000201.nc : longitude dimension has 512 values instead of 360 +sfcWind_20000201.nc : there is no dimension called ensemble inside this file +sfcWind_20000201.nc : time dimension has 852 values instead of 216 +sfcWind_20000301.nc : time dimension has 55 values instead of 216 +sfcWind_20000401.nc : time dimension has 54 values instead of 216 +sfcWind_20000501.nc : latitude dimension has 256 values instead of 181 +sfcWind_20000501.nc : longitude dimension has 512 values instead of 360 +sfcWind_20000501.nc : time dimension has 214 values instead of 216 +sfcWind_20000601.nc : ok +sfcWind_20000701.nc : ok +sfcWind_20000801.nc : latitude dimension has 256 values instead of 181 +sfcWind_20000801.nc : longitude dimension has 512 values instead of 360 +sfcWind_20000801.nc : time dimension has 212 values instead of 216 +sfcWind_20000901.nc : ok +sfcWind_20001001.nc : ok +sfcWind_20001101.nc : latitude dimension has 256 values instead of 181 +sfcWind_20001101.nc : longitude dimension has 512 values instead of 360 +sfcWind_20001101.nc : time dimension has 214 values instead of 216 +sfcWind_20001201.nc : ok +sfcWind_20010101.nc : ok +sfcWind_20010201.nc : latitude dimension has 256 values instead of 181 +sfcWind_20010201.nc : longitude dimension has 512 values instead of 360 +sfcWind_20010201.nc : there is no dimension called ensemble inside this file +sfcWind_20010201.nc : time dimension has 10812 values instead of 216 +sfcWind_20010301.nc : ok +sfcWind_20010401.nc : ok +sfcWind_20010501.nc : latitude dimension has 256 values instead of 181 +sfcWind_20010501.nc : longitude dimension has 512 values instead of 360 +sfcWind_20010501.nc : time dimension has 214 values instead of 216 +sfcWind_20010601.nc : ok +sfcWind_20010701.nc : ok +sfcWind_20010801.nc : latitude dimension has 256 values instead of 181 +sfcWind_20010801.nc : longitude dimension has 512 values instead of 360 +sfcWind_20010801.nc : time dimension has 212 values instead of 216 +sfcWind_20010901.nc : ok +sfcWind_20011001.nc : ok +sfcWind_20011101.nc : latitude dimension has 256 values instead of 181 +sfcWind_20011101.nc : longitude dimension has 512 values instead of 360 +sfcWind_20011101.nc : time dimension has 214 values instead of 216 +sfcWind_20011201.nc : ok +sfcWind_20020101.nc : ok +sfcWind_20020201.nc : latitude dimension has 256 values instead of 181 +sfcWind_20020201.nc : longitude dimension has 512 values instead of 360 +sfcWind_20020201.nc : time dimension has 848 values instead of 216 +sfcWind_20020301.nc : ok +sfcWind_20020401.nc : ok +sfcWind_20020501.nc : latitude dimension has 256 values instead of 181 +sfcWind_20020501.nc : longitude dimension has 512 values instead of 360 +sfcWind_20020501.nc : time dimension has 214 values instead of 216 +sfcWind_20020601.nc : ok +sfcWind_20020701.nc : ok +sfcWind_20020801.nc : latitude dimension has 256 values instead of 181 +sfcWind_20020801.nc : longitude dimension has 512 values instead of 360 +sfcWind_20020801.nc : time dimension has 212 values instead of 216 +sfcWind_20020901.nc : ok +sfcWind_20021001.nc : ok +sfcWind_20021101.nc : latitude dimension has 256 values instead of 181 +sfcWind_20021101.nc : longitude dimension has 512 values instead of 360 +sfcWind_20021101.nc : time dimension has 214 values instead of 216 +sfcWind_20021201.nc : ok +sfcWind_20030101.nc : ok +sfcWind_20030201.nc : latitude dimension has 256 values instead of 181 +sfcWind_20030201.nc : longitude dimension has 512 values instead of 360 +sfcWind_20030201.nc : there is no dimension called ensemble inside this file +sfcWind_20030201.nc : time dimension has 10812 values instead of 216 +sfcWind_20030301.nc : ok +sfcWind_20030401.nc : ok +sfcWind_20030501.nc : latitude dimension has 256 values instead of 181 +sfcWind_20030501.nc : longitude dimension has 512 values instead of 360 +sfcWind_20030501.nc : time dimension has 214 values instead of 216 +sfcWind_20030601.nc : ok +sfcWind_20030701.nc : ok +sfcWind_20030801.nc : latitude dimension has 256 values instead of 181 +sfcWind_20030801.nc : longitude dimension has 512 values instead of 360 +sfcWind_20030801.nc : time dimension has 213 values instead of 216 +sfcWind_20030901.nc : ok +sfcWind_20031001.nc : ok +sfcWind_20031101.nc : latitude dimension has 256 values instead of 181 +sfcWind_20031101.nc : longitude dimension has 512 values instead of 360 +sfcWind_20031101.nc : time dimension has 214 values instead of 216 +sfcWind_20031201.nc : ok +sfcWind_20040101.nc : ok +sfcWind_20040201.nc : latitude dimension has 256 values instead of 181 +sfcWind_20040201.nc : longitude dimension has 512 values instead of 360 +sfcWind_20040201.nc : time dimension has 852 values instead of 216 +sfcWind_20040301.nc : ok +sfcWind_20040401.nc : ok +sfcWind_20040501.nc : latitude dimension has 256 values instead of 181 +sfcWind_20040501.nc : longitude dimension has 512 values instead of 360 +sfcWind_20040501.nc : time dimension has 214 values instead of 216 +sfcWind_20040601.nc : ok +sfcWind_20040701.nc : ok +sfcWind_20040801.nc : latitude dimension has 256 values instead of 181 +sfcWind_20040801.nc : longitude dimension has 512 values instead of 360 +sfcWind_20040801.nc : time dimension has 212 values instead of 216 +sfcWind_20040901.nc : ok +sfcWind_20041001.nc : ok +sfcWind_20041101.nc : latitude dimension has 256 values instead of 181 +sfcWind_20041101.nc : longitude dimension has 512 values instead of 360 +sfcWind_20041101.nc : time dimension has 214 values instead of 216 +sfcWind_20041201.nc : ok +sfcWind_20050101.nc : ok +sfcWind_20050201.nc : latitude dimension has 256 values instead of 181 +sfcWind_20050201.nc : longitude dimension has 512 values instead of 360 +sfcWind_20050201.nc : there is no dimension called ensemble inside this file +sfcWind_20050201.nc : time dimension has 10812 values instead of 216 +sfcWind_20050301.nc : ok +sfcWind_20050401.nc : ok +sfcWind_20050501.nc : latitude dimension has 256 values instead of 181 +sfcWind_20050501.nc : longitude dimension has 512 values instead of 360 +sfcWind_20050501.nc : time dimension has 214 values instead of 216 +sfcWind_20050601.nc : ok +sfcWind_20050701.nc : ok +sfcWind_20050801.nc : latitude dimension has 256 values instead of 181 +sfcWind_20050801.nc : longitude dimension has 512 values instead of 360 +sfcWind_20050801.nc : time dimension has 212 values instead of 216 +sfcWind_20050901.nc : ok +sfcWind_20051001.nc : ok +sfcWind_20051101.nc : latitude dimension has 256 values instead of 181 +sfcWind_20051101.nc : longitude dimension has 512 values instead of 360 +sfcWind_20051101.nc : time dimension has 214 values instead of 216 +sfcWind_20051201.nc : ok +sfcWind_20060101.nc : ok +sfcWind_20060201.nc : latitude dimension has 256 values instead of 181 +sfcWind_20060201.nc : longitude dimension has 512 values instead of 360 +sfcWind_20060201.nc : time dimension has 848 values instead of 216 +sfcWind_20060301.nc : ok +sfcWind_20060401.nc : ok +sfcWind_20060501.nc : latitude dimension has 256 values instead of 181 +sfcWind_20060501.nc : longitude dimension has 512 values instead of 360 +sfcWind_20060501.nc : time dimension has 214 values instead of 216 +sfcWind_20060601.nc : ok +sfcWind_20060701.nc : ok +sfcWind_20060801.nc : latitude dimension has 256 values instead of 181 +sfcWind_20060801.nc : longitude dimension has 512 values instead of 360 +sfcWind_20060801.nc : time dimension has 212 values instead of 216 +sfcWind_20060901.nc : ok +sfcWind_20061001.nc : ok +sfcWind_20061101.nc : latitude dimension has 256 values instead of 181 +sfcWind_20061101.nc : longitude dimension has 512 values instead of 360 +sfcWind_20061101.nc : time dimension has 214 values instead of 216 +sfcWind_20061201.nc : ok +sfcWind_20070101.nc : ok +sfcWind_20070201.nc : latitude dimension has 256 values instead of 181 +sfcWind_20070201.nc : longitude dimension has 512 values instead of 360 +sfcWind_20070201.nc : there is no dimension called ensemble inside this file +sfcWind_20070201.nc : time dimension has 10812 values instead of 216 +sfcWind_20070301.nc : ok +sfcWind_20070401.nc : ok +sfcWind_20070501.nc : latitude dimension has 256 values instead of 181 +sfcWind_20070501.nc : longitude dimension has 512 values instead of 360 +sfcWind_20070501.nc : time dimension has 214 values instead of 216 +sfcWind_20070601.nc : ok +sfcWind_20070701.nc : ok +sfcWind_20070801.nc : latitude dimension has 256 values instead of 181 +sfcWind_20070801.nc : longitude dimension has 512 values instead of 360 +sfcWind_20070801.nc : time dimension has 213 values instead of 216 +sfcWind_20070901.nc : ok +sfcWind_20071001.nc : ok +sfcWind_20071101.nc : latitude dimension has 256 values instead of 181 +sfcWind_20071101.nc : longitude dimension has 512 values instead of 360 +sfcWind_20071101.nc : time dimension has 214 values instead of 216 +sfcWind_20071201.nc : ok +sfcWind_20080101.nc : ok +sfcWind_20080201.nc : latitude dimension has 256 values instead of 181 +sfcWind_20080201.nc : longitude dimension has 512 values instead of 360 +sfcWind_20080201.nc : time dimension has 852 values instead of 216 +sfcWind_20080301.nc : time dimension has 2 values instead of 216 +sfcWind_20080401.nc : ok +sfcWind_20080501.nc : latitude dimension has 256 values instead of 181 +sfcWind_20080501.nc : longitude dimension has 512 values instead of 360 +sfcWind_20080501.nc : time dimension has 214 values instead of 216 +sfcWind_20080601.nc : ok +sfcWind_20080701.nc : ok +sfcWind_20080801.nc : latitude dimension has 256 values instead of 181 +sfcWind_20080801.nc : longitude dimension has 512 values instead of 360 +sfcWind_20080801.nc : time dimension has 212 values instead of 216 +sfcWind_20080901.nc : ok +sfcWind_20081001.nc : ok +sfcWind_20081101.nc : latitude dimension has 256 values instead of 181 +sfcWind_20081101.nc : longitude dimension has 512 values instead of 360 +sfcWind_20081101.nc : time dimension has 214 values instead of 216 +sfcWind_20081201.nc : ok +sfcWind_20090101.nc : ok +sfcWind_20090201.nc : latitude dimension has 256 values instead of 181 +sfcWind_20090201.nc : longitude dimension has 512 values instead of 360 +sfcWind_20090201.nc : there is no dimension called ensemble inside this file +sfcWind_20090201.nc : time dimension has 10812 values instead of 216 +sfcWind_20090301.nc : ok +sfcWind_20090401.nc : ok +sfcWind_20090501.nc : latitude dimension has 256 values instead of 181 +sfcWind_20090501.nc : longitude dimension has 512 values instead of 360 +sfcWind_20090501.nc : time dimension has 214 values instead of 216 +sfcWind_20090601.nc : ok +sfcWind_20090701.nc : ok +sfcWind_20090801.nc : latitude dimension has 256 values instead of 181 +sfcWind_20090801.nc : longitude dimension has 512 values instead of 360 +sfcWind_20090801.nc : time dimension has 212 values instead of 216 +sfcWind_20090901.nc : ok +sfcWind_20091001.nc : ok +sfcWind_20091101.nc : latitude dimension has 256 values instead of 181 +sfcWind_20091101.nc : longitude dimension has 512 values instead of 360 +sfcWind_20091101.nc : time dimension has 214 values instead of 216 +sfcWind_20091201.nc : ok +sfcWind_20100101.nc : ok +sfcWind_20100201.nc : latitude dimension has 256 values instead of 181 +sfcWind_20100201.nc : longitude dimension has 512 values instead of 360 +sfcWind_20100201.nc : time dimension has 848 values instead of 216 +sfcWind_20100301.nc : ok +sfcWind_20100401.nc : ok +sfcWind_20100501.nc : latitude dimension has 256 values instead of 181 +sfcWind_20100501.nc : longitude dimension has 512 values instead of 360 +sfcWind_20100501.nc : time dimension has 214 values instead of 216 +sfcWind_20100601.nc : ok +sfcWind_20100701.nc : ok +sfcWind_20100801.nc : latitude dimension has 256 values instead of 181 +sfcWind_20100801.nc : longitude dimension has 512 values instead of 360 +sfcWind_20100801.nc : time dimension has 212 values instead of 216 +sfcWind_20100901.nc : time dimension has 110 values instead of 216 +sfcWind_20101001.nc : ok +sfcWind_20101101.nc : latitude dimension has 256 values instead of 181 +sfcWind_20101101.nc : longitude dimension has 512 values instead of 360 +sfcWind_20101101.nc : time dimension has 214 values instead of 216 +sfcWind_20101201.nc : time dimension has 160 values instead of 216 +sfcWind_20110101.nc : ok +sfcWind_20110201.nc : latitude dimension has 256 values instead of 181 +sfcWind_20110201.nc : longitude dimension has 512 values instead of 360 +sfcWind_20110201.nc : there is no dimension called ensemble inside this file +sfcWind_20110201.nc : time dimension has 10812 values instead of 216 +sfcWind_20110301.nc : ok +sfcWind_20110401.nc : ok +sfcWind_20110501.nc : latitude dimension has 256 values instead of 181 +sfcWind_20110501.nc : longitude dimension has 512 values instead of 360 +sfcWind_20110501.nc : time dimension has 214 values instead of 216 +sfcWind_20110601.nc : ok +sfcWind_20110701.nc : ok +sfcWind_20110801.nc : latitude dimension has 256 values instead of 181 +sfcWind_20110801.nc : longitude dimension has 512 values instead of 360 +sfcWind_20110801.nc : time dimension has 213 values instead of 216 +sfcWind_20110901.nc : ok +sfcWind_20111001.nc : ok +sfcWind_20111101.nc : latitude dimension has 256 values instead of 181 +sfcWind_20111101.nc : longitude dimension has 512 values instead of 360 +sfcWind_20111101.nc : time dimension has 214 values instead of 216 +sfcWind_20111201.nc : ok +sfcWind_20120101.nc : ok +sfcWind_20120201.nc : latitude dimension has 256 values instead of 181 +sfcWind_20120201.nc : longitude dimension has 512 values instead of 360 +sfcWind_20120201.nc : time dimension has 852 values instead of 216 +sfcWind_20120301.nc : ok +sfcWind_20120401.nc : time dimension has 180 values instead of 216 +sfcWind_20120501.nc : latitude dimension has 256 values instead of 181 +sfcWind_20120501.nc : longitude dimension has 512 values instead of 360 +sfcWind_20120501.nc : time dimension has 214 values instead of 216 +sfcWind_20120601.nc : time dimension has 97 values instead of 216 +sfcWind_20120701.nc : ok +sfcWind_20120801.nc : latitude dimension has 256 values instead of 181 +sfcWind_20120801.nc : longitude dimension has 512 values instead of 360 +sfcWind_20120801.nc : time dimension has 212 values instead of 216 +sfcWind_20120901.nc : ok +sfcWind_20121001.nc : >>>>> Missing file <<<<< +sfcWind_20121101.nc : latitude dimension has 256 values instead of 181 +sfcWind_20121101.nc : longitude dimension has 512 values instead of 360 +sfcWind_20121101.nc : time dimension has 214 values instead of 216 +sfcWind_20121201.nc : >>>>> Missing file <<<<< +sfcWind_20130101.nc : >>>>> Missing file <<<<< +sfcWind_20130201.nc : latitude dimension has 256 values instead of 181 +sfcWind_20130201.nc : longitude dimension has 512 values instead of 360 +sfcWind_20130201.nc : there is no dimension called ensemble inside this file +sfcWind_20130201.nc : time dimension has 11016 values instead of 216 +sfcWind_20130301.nc : >>>>> Missing file <<<<< +sfcWind_20130401.nc : >>>>> Missing file <<<<< +sfcWind_20130501.nc : latitude dimension has 256 values instead of 181 +sfcWind_20130501.nc : longitude dimension has 512 values instead of 360 +sfcWind_20130501.nc : there is no dimension called ensemble inside this file +sfcWind_20130501.nc : time dimension has 11016 values instead of 216 +sfcWind_20130601.nc : >>>>> Missing file <<<<< +sfcWind_20130701.nc : >>>>> Missing file <<<<< +sfcWind_20130801.nc : latitude dimension has 256 values instead of 181 +sfcWind_20130801.nc : longitude dimension has 512 values instead of 360 +sfcWind_20130801.nc : time dimension has 212 values instead of 216 +sfcWind_20130901.nc : ok +sfcWind_20131001.nc : ok +sfcWind_20131101.nc : latitude dimension has 256 values instead of 181 +sfcWind_20131101.nc : longitude dimension has 512 values instead of 360 +sfcWind_20131101.nc : time dimension has 212 values instead of 216 +sfcWind_20131201.nc : ok +sfcWind_20140101.nc : >>>>> Missing file <<<<< +sfcWind_20140201.nc : latitude dimension has 256 values instead of 181 +sfcWind_20140201.nc : longitude dimension has 512 values instead of 360 +sfcWind_20140201.nc : time dimension has 861 values instead of 216 +sfcWind_20140301.nc : >>>>> Missing file <<<<< +sfcWind_20140401.nc : >>>>> Missing file <<<<< +sfcWind_20140501.nc : latitude dimension has 256 values instead of 181 +sfcWind_20140501.nc : longitude dimension has 512 values instead of 360 +sfcWind_20140501.nc : time dimension has 214 values instead of 216 +sfcWind_20140601.nc : >>>>> Missing file <<<<< +sfcWind_20140701.nc : >>>>> Missing file <<<<< +sfcWind_20140801.nc : ok +sfcWind_20140901.nc : ok +sfcWind_20141001.nc : ok +sfcWind_20141101.nc : latitude dimension has 256 values instead of 181 +sfcWind_20141101.nc : longitude dimension has 512 values instead of 360 +sfcWind_20141101.nc : time dimension has 212 values instead of 216 +sfcWind_20141201.nc : ok +sfcWind_20150101.nc : >>>>> Missing file <<<<< +sfcWind_20150201.nc : >>>>> Missing file <<<<< +sfcWind_20150301.nc : >>>>> Missing file <<<<< +sfcWind_20150401.nc : >>>>> Missing file <<<<< +sfcWind_20150501.nc : >>>>> Missing file <<<<< +sfcWind_20150601.nc : ok +sfcWind_20150701.nc : ok +sfcWind_20150801.nc : time dimension has 197 values instead of 216 +sfcWind_20150901.nc : >>>>> Missing file <<<<< +sfcWind_20151001.nc : >>>>> Missing file <<<<< +sfcWind_20151101.nc : latitude dimension has 256 values instead of 181 +sfcWind_20151101.nc : longitude dimension has 512 values instead of 360 +sfcWind_20151101.nc : time dimension has 236 values instead of 216 +sfcWind_20151201.nc : >>>>> Missing file <<<<< diff --git a/bash/check_ECMWFS4_tas_daily.txt b/bash/check_ECMWFS4_tas_daily.txt new file mode 100644 index 0000000000000000000000000000000000000000..24d458a576f63e7080a85c7a9f21fe671ad11093 --- /dev/null +++ b/bash/check_ECMWFS4_tas_daily.txt @@ -0,0 +1,437 @@ +Checked variable: tas +Path: /esnas/exp/ecmwf/system4_m1/daily_mean/tas_f6h/ +Extension: .nc +Checked period: 1981-2015 +>>>>>>>>>>>>>>>>>>>>>>>> Expected elements <<<<<<<<<<<<<<<<<<<<<<<< +Latitude denomination: latitude +Number of latitude values: 181 +Longitude denomination: longitude +Number of longitude values: 360 +Ensemble denomination: ensemble +Number of ensemble values: 15 +Alternative number of ensemble values: 51 +Lead-time denonomination: time +Number of lead-times: 216 + >>>>>>>>>>>>>>>>>>>>>>>> Checking <<<<<<<<<<<<<<<<<<<<<<<< +tas_19810101.nc : time dimension has 153 values instead of 216 +tas_19810201.nc : ok +tas_19810301.nc : ok +tas_19810401.nc : ok +tas_19810501.nc : ok +tas_19810601.nc : ok +tas_19810701.nc : time dimension has 422 values instead of 216 +tas_19810801.nc : ok +tas_19810901.nc : ok +tas_19811001.nc : ok +tas_19811101.nc : ok +tas_19811201.nc : ok +tas_19820101.nc : ok +tas_19820201.nc : ok +tas_19820301.nc : ok +tas_19820401.nc : ok +tas_19820501.nc : ok +tas_19820601.nc : ok +tas_19820701.nc : time dimension has 537 values instead of 216 +tas_19820801.nc : ok +tas_19820901.nc : ok +tas_19821001.nc : ok +tas_19821101.nc : ok +tas_19821201.nc : ok +tas_19830101.nc : ok +tas_19830201.nc : ok +tas_19830301.nc : ok +tas_19830401.nc : time dimension has 10 values instead of 216 +tas_19830501.nc : ok +tas_19830601.nc : ok +tas_19830701.nc : ok +tas_19830801.nc : ok +tas_19830901.nc : time dimension has 316 values instead of 216 +tas_19831001.nc : ok +tas_19831101.nc : ok +tas_19831201.nc : ok +tas_19840101.nc : ok +tas_19840201.nc : ok +tas_19840301.nc : ok +tas_19840401.nc : ok +tas_19840501.nc : ok +tas_19840601.nc : latitude dimension has 256 values instead of 181 +tas_19840601.nc : longitude dimension has 512 values instead of 360 +tas_19840601.nc : time dimension has 861 values instead of 216 +tas_19840701.nc : ok +tas_19840801.nc : ok +tas_19840901.nc : time dimension has 824 values instead of 216 +tas_19841001.nc : ok +tas_19841101.nc : ok +tas_19841201.nc : ok +tas_19850101.nc : ok +tas_19850201.nc : ok +tas_19850301.nc : time dimension has 309 values instead of 216 +tas_19850401.nc : ok +tas_19850501.nc : ok +tas_19850601.nc : time dimension has 321 values instead of 216 +tas_19850701.nc : ok +tas_19850801.nc : ok +tas_19850901.nc : time dimension has 658 values instead of 216 +tas_19851001.nc : ok +tas_19851101.nc : ok +tas_19851201.nc : ok +tas_19860101.nc : ok +tas_19860201.nc : ok +tas_19860301.nc : time dimension has 815 values instead of 216 +tas_19860401.nc : ok +tas_19860501.nc : ok +tas_19860601.nc : ok +tas_19860701.nc : ok +tas_19860801.nc : ok +tas_19860901.nc : time dimension has 424 values instead of 216 +tas_19861001.nc : ok +tas_19861101.nc : ok +tas_19861201.nc : ok +tas_19870101.nc : ok +tas_19870201.nc : ok +tas_19870301.nc : time dimension has 817 values instead of 216 +tas_19870401.nc : ok +tas_19870501.nc : ok +tas_19870601.nc : ok +tas_19870701.nc : ok +tas_19870801.nc : ok +tas_19870901.nc : time dimension has 347 values instead of 216 +tas_19871001.nc : ok +tas_19871101.nc : ok +tas_19871201.nc : ok +tas_19880101.nc : ok +tas_19880201.nc : ok +tas_19880301.nc : time dimension has 362 values instead of 216 +tas_19880401.nc : ok +tas_19880501.nc : ok +tas_19880601.nc : ok +tas_19880701.nc : time dimension has 841 values instead of 216 +tas_19880801.nc : ok +tas_19880901.nc : time dimension has 482 values instead of 216 +tas_19881001.nc : ok +tas_19881101.nc : ok +tas_19881201.nc : ok +tas_19890101.nc : ok +tas_19890201.nc : ok +tas_19890301.nc : time dimension has 361 values instead of 216 +tas_19890401.nc : ok +tas_19890501.nc : ok +tas_19890601.nc : ok +tas_19890701.nc : time dimension has 512 values instead of 216 +tas_19890801.nc : ok +tas_19890901.nc : ok +tas_19891001.nc : ok +tas_19891101.nc : ok +tas_19891201.nc : ok +tas_19900101.nc : time dimension has 38 values instead of 216 +tas_19900201.nc : ok +tas_19900301.nc : time dimension has 260 values instead of 216 +tas_19900401.nc : ok +tas_19900501.nc : ok +tas_19900601.nc : ok +tas_19900701.nc : ok +tas_19900801.nc : ok +tas_19900901.nc : ok +tas_19901001.nc : ok +tas_19901101.nc : ok +tas_19901201.nc : ok +tas_19910101.nc : ok +tas_19910201.nc : ok +tas_19910301.nc : ok +tas_19910401.nc : ok +tas_19910501.nc : ok +tas_19910601.nc : ok +tas_19910701.nc : ok +tas_19910801.nc : ok +tas_19910901.nc : ok +tas_19911001.nc : ok +tas_19911101.nc : ok +tas_19911201.nc : ok +tas_19920101.nc : ok +tas_19920201.nc : ok +tas_19920301.nc : ok +tas_19920401.nc : ok +tas_19920501.nc : ok +tas_19920601.nc : ok +tas_19920701.nc : ok +tas_19920801.nc : ok +tas_19920901.nc : ok +tas_19921001.nc : ok +tas_19921101.nc : time dimension has 192 values instead of 216 +tas_19921201.nc : ok +tas_19930101.nc : ok +tas_19930201.nc : ok +tas_19930301.nc : ok +tas_19930401.nc : ok +tas_19930501.nc : ok +tas_19930601.nc : ok +tas_19930701.nc : ok +tas_19930801.nc : ok +tas_19930901.nc : ok +tas_19931001.nc : ok +tas_19931101.nc : ok +tas_19931201.nc : ok +tas_19940101.nc : ok +tas_19940201.nc : ok +tas_19940301.nc : ok +tas_19940401.nc : ok +tas_19940501.nc : ok +tas_19940601.nc : ok +tas_19940701.nc : ok +tas_19940801.nc : ok +tas_19940901.nc : ok +tas_19941001.nc : ok +tas_19941101.nc : ok +tas_19941201.nc : ok +tas_19950101.nc : ok +tas_19950201.nc : ok +tas_19950301.nc : ok +tas_19950401.nc : ok +tas_19950501.nc : ok +tas_19950601.nc : ok +tas_19950701.nc : ok +tas_19950801.nc : ok +tas_19950901.nc : ok +tas_19951001.nc : ok +tas_19951101.nc : ok +tas_19951201.nc : ok +tas_19960101.nc : ok +tas_19960201.nc : ok +tas_19960301.nc : ok +tas_19960401.nc : ok +tas_19960501.nc : ok +tas_19960601.nc : ok +tas_19960701.nc : ok +tas_19960801.nc : ok +tas_19960901.nc : ok +tas_19961001.nc : ok +tas_19961101.nc : ok +tas_19961201.nc : ok +tas_19970101.nc : ok +tas_19970201.nc : ok +tas_19970301.nc : ok +tas_19970401.nc : ok +tas_19970501.nc : ok +tas_19970601.nc : ok +tas_19970701.nc : ok +tas_19970801.nc : ok +tas_19970901.nc : ok +tas_19971001.nc : ok +tas_19971101.nc : ok +tas_19971201.nc : ok +tas_19980101.nc : ok +tas_19980201.nc : ok +tas_19980301.nc : time dimension has 9 values instead of 216 +tas_19980401.nc : ok +tas_19980501.nc : ok +tas_19980601.nc : ok +tas_19980701.nc : ok +tas_19980801.nc : ok +tas_19980901.nc : ok +tas_19981001.nc : ok +tas_19981101.nc : ok +tas_19981201.nc : ok +tas_19990101.nc : time dimension has 22 values instead of 216 +tas_19990201.nc : ok +tas_19990301.nc : time dimension has 16 values instead of 216 +tas_19990401.nc : ok +tas_19990501.nc : ok +tas_19990601.nc : ok +tas_19990701.nc : ok +tas_19990801.nc : ok +tas_19990901.nc : ok +tas_19991001.nc : ok +tas_19991101.nc : ok +tas_19991201.nc : ok +tas_20000101.nc : ok +tas_20000201.nc : ok +tas_20000301.nc : ok +tas_20000401.nc : ok +tas_20000501.nc : ok +tas_20000601.nc : ok +tas_20000701.nc : ok +tas_20000801.nc : ok +tas_20000901.nc : ok +tas_20001001.nc : ok +tas_20001101.nc : ok +tas_20001201.nc : ok +tas_20010101.nc : ok +tas_20010201.nc : ok +tas_20010301.nc : ok +tas_20010401.nc : ok +tas_20010501.nc : ok +tas_20010601.nc : ok +tas_20010701.nc : ok +tas_20010801.nc : ok +tas_20010901.nc : ok +tas_20011001.nc : ok +tas_20011101.nc : ok +tas_20011201.nc : ok +tas_20020101.nc : ok +tas_20020201.nc : time dimension has 207 values instead of 216 +tas_20020301.nc : ok +tas_20020401.nc : ok +tas_20020501.nc : ok +tas_20020601.nc : ok +tas_20020701.nc : ok +tas_20020801.nc : ok +tas_20020901.nc : ok +tas_20021001.nc : ok +tas_20021101.nc : ok +tas_20021201.nc : ok +tas_20030101.nc : ok +tas_20030201.nc : ok +tas_20030301.nc : ok +tas_20030401.nc : ok +tas_20030501.nc : ok +tas_20030601.nc : ok +tas_20030701.nc : ok +tas_20030801.nc : ok +tas_20030901.nc : ok +tas_20031001.nc : ok +tas_20031101.nc : ok +tas_20031201.nc : ok +tas_20040101.nc : ok +tas_20040201.nc : ok +tas_20040301.nc : ok +tas_20040401.nc : ok +tas_20040501.nc : ok +tas_20040601.nc : ok +tas_20040701.nc : ok +tas_20040801.nc : ok +tas_20040901.nc : ok +tas_20041001.nc : ok +tas_20041101.nc : ok +tas_20041201.nc : ok +tas_20050101.nc : ok +tas_20050201.nc : ok +tas_20050301.nc : ok +tas_20050401.nc : ok +tas_20050501.nc : ok +tas_20050601.nc : ok +tas_20050701.nc : ok +tas_20050801.nc : ok +tas_20050901.nc : ok +tas_20051001.nc : ok +tas_20051101.nc : ok +tas_20051201.nc : ok +tas_20060101.nc : ok +tas_20060201.nc : ok +tas_20060301.nc : ok +tas_20060401.nc : ok +tas_20060501.nc : time dimension has 186 values instead of 216 +tas_20060601.nc : ok +tas_20060701.nc : time dimension has 9 values instead of 216 +tas_20060801.nc : ok +tas_20060901.nc : ok +tas_20061001.nc : ok +tas_20061101.nc : ok +tas_20061201.nc : ok +tas_20070101.nc : ok +tas_20070201.nc : ok +tas_20070301.nc : ok +tas_20070401.nc : ok +tas_20070501.nc : ok +tas_20070601.nc : ok +tas_20070701.nc : ok +tas_20070801.nc : ok +tas_20070901.nc : ok +tas_20071001.nc : ok +tas_20071101.nc : ok +tas_20071201.nc : time dimension has 110 values instead of 216 +tas_20080101.nc : ok +tas_20080201.nc : ok +tas_20080301.nc : ok +tas_20080401.nc : ok +tas_20080501.nc : ok +tas_20080601.nc : ok +tas_20080701.nc : ok +tas_20080801.nc : ok +tas_20080901.nc : ok +tas_20081001.nc : ok +tas_20081101.nc : ok +tas_20081201.nc : ok +tas_20090101.nc : ok +tas_20090201.nc : ok +tas_20090301.nc : ok +tas_20090401.nc : ok +tas_20090501.nc : ok +tas_20090601.nc : ok +tas_20090701.nc : ok +tas_20090801.nc : ok +tas_20090901.nc : ok +tas_20091001.nc : ok +tas_20091101.nc : ok +tas_20091201.nc : ok +tas_20100101.nc : ok +tas_20100201.nc : ok +tas_20100301.nc : ok +tas_20100401.nc : ok +tas_20100501.nc : ok +tas_20100601.nc : ok +tas_20100701.nc : ok +tas_20100801.nc : ok +tas_20100901.nc : ok +tas_20101001.nc : ok +tas_20101101.nc : ok +tas_20101201.nc : ok +tas_20110101.nc : ok +tas_20110201.nc : ok +tas_20110301.nc : ok +tas_20110401.nc : ok +tas_20110501.nc : ok +tas_20110601.nc : ok +tas_20110701.nc : time dimension has 112 values instead of 216 +tas_20110801.nc : ok +tas_20110901.nc : time dimension has 118 values instead of 216 +tas_20111001.nc : ok +tas_20111101.nc : ok +tas_20111201.nc : ok +tas_20120101.nc : ok +tas_20120201.nc : ok +tas_20120301.nc : ok +tas_20120401.nc : ok +tas_20120501.nc : ok +tas_20120601.nc : ok +tas_20120701.nc : ok +tas_20120801.nc : ok +tas_20120901.nc : ok +tas_20121001.nc : ok +tas_20121101.nc : ok +tas_20121201.nc : ok +tas_20130101.nc : ok +tas_20130201.nc : ok +tas_20130301.nc : ok +tas_20130401.nc : ok +tas_20130501.nc : ok +tas_20130601.nc : ok +tas_20130701.nc : time dimension has 89 values instead of 216 +tas_20130801.nc : ok +tas_20130901.nc : ok +tas_20131001.nc : ok +tas_20131101.nc : ok +tas_20131201.nc : ok +tas_20140101.nc : ok +tas_20140201.nc : ok +tas_20140301.nc : ok +tas_20140401.nc : ok +tas_20140501.nc : ok +tas_20140601.nc : ok +tas_20140701.nc : ok +tas_20140801.nc : ok +tas_20140901.nc : ok +tas_20141001.nc : ok +tas_20141101.nc : ok +tas_20141201.nc : ok +tas_20150101.nc : ok +tas_20150201.nc : ok +tas_20150301.nc : ok +tas_20150401.nc : ok +tas_20150501.nc : ok +tas_20150601.nc : ok +tas_20150701.nc : time dimension has 811 values instead of 216 +tas_20150801.nc : ok +tas_20150901.nc : >>>>> Missing file <<<<< +tas_20151001.nc : >>>>> Missing file <<<<< +tas_20151101.nc : >>>>> Missing file <<<<< +tas_20151201.nc : >>>>> Missing file <<<<< diff --git a/bash/checking_if_data_inside_is_the_same.txt b/bash/checking_if_data_inside_is_the_same.txt new file mode 100644 index 0000000000000000000000000000000000000000..3278e08c41a35c60de357c446d47b524df193800 --- /dev/null +++ b/bash/checking_if_data_inside_is_the_same.txt @@ -0,0 +1,32 @@ + +# check reanalysis: +month=01 +day=02 + +for year in {2002..2013} +do +ncks -O -h -d latitude,64 -d longitude,632 -d time,56,83 /esnas/exp/ECMWF/monthly/ensforhc/6hourly/sfcWind/2014${month}${day}00/sfcWind_${year}${month}${day}00.nc ~/my_point_sfcWind.nc; +ncwa -O -a ensemble ~/my_point_sfcWind.nc ~/my_point_ensemble_sfcWind.nc; +ncwa -O -a time ~/my_point_ensemble_sfcWind.nc ~/my_point_ensemble_all28_sfcWind_${year}010200.nc; +ncks -O -h -d time,0,,4 ~/my_point_ensemble_sfcWind.nc ~/my_point_ensemble_UTM_sfcWind.nc; +ncwa -O -a time ~/my_point_ensemble_UTM_sfcWind.nc ~/my_point_ensemble_UTM0_sfcWind_${year}010200.nc; +ncks -O -h -d time,1,,4 ~/my_point_ensemble_sfcWind.nc ~/my_point_ensemble_UTM_sfcWind.nc; +ncwa -O -a time ~/my_point_ensemble_UTM_sfcWind.nc ~/my_point_ensemble_UTM6_sfcWind_${year}010200.nc; +ncks -O -h -d time,2,,4 ~/my_point_ensemble_sfcWind.nc ~/my_point_ensemble_UTM_sfcWind.nc; +ncwa -O -a time ~/my_point_ensemble_UTM_sfcWind.nc ~/my_point_ensemble_UTM12_sfcWind_${year}010200.nc; +ncks -O -h -d time,3,,4 ~/my_point_ensemble_sfcWind.nc ~/my_point_ensemble_UTM_sfcWind.nc; +ncwa -O -a time ~/my_point_ensemble_UTM_sfcWind.nc ~/my_point_ensemble_UTM18_sfcWind_${year}010200.nc; +done + + + + +# check forecasts: +month=01 +for day in 02 09 16 23 30 +do +ncks -O -h -d latitude,64 -d longitude,632 -d time,56,83 /esnas/exp/ECMWF/monthly/ensfor/6hourly/sfcWind/sfcWind_2014${month}${day}00.nc ~/my_point_sfcWind.nc; +ncwa -O -a ensemble ~/my_point_sfcWind.nc ~/my_point_ensemble_sfcWind.nc; +ncwa -O -a time ~/my_point_ensemble_sfcWind.nc ~/my_point_ensemble_all28_sfcWind_2014${month}${day}00.nc; +done + diff --git a/bash/checking_ncdata.sh b/bash/checking_ncdata.sh new file mode 100755 index 0000000000000000000000000000000000000000..b539211e756e0dbec2989ea6c20d39185feeb7e8 --- /dev/null +++ b/bash/checking_ncdata.sh @@ -0,0 +1,157 @@ +#!/bin/bash + +# Creation: 6/2016 +# Authors: Nicola Cortesi and Raul Marcos +# Aim: to do a quality control of the dimension variables inside all NetCDF data file in a directory. Notice that this script is not able to find if a file is corrupt. +# I/O: you only have to specify where are the files you want to check and the correct names and size of the dimensions inside. +# the output is a text file inside the directory where this script is run, whose name starts with "check_", with inside the results of the quality control. + +var=sfcWind #tas #sfcWind #psl # name of the variable to check +dat=ECMWFS4 # dataset name +freq=daily # time step (just for the output filename + +path=/esnas/exp/ecmwf/system4_m1/daily_mean/sfcWind_f6h/ +#path=/esnas/exp/ECMWF/seasonal/0001/s004/m001/6hourly/sfcWind/ # its path +#path=/esnas/exp/ecmwf/system4_m1/daily_mean/sfcWind_f6h/ +#/esnas/exp/Meteofrance/seasonal/0001/s004/m001/6hourly/sfcWind Meteofrance S4 +#/esnas/exp/ECMWF/seasonal/0001/s004/m001/6hourly/sfcWind ECMWF S4 + +suffix=01 # this suffix is usually present in all S4 netCDF file, right begore the file extension +ext=.nc # file extension + +yearStart=1981 # time period to check inside the files +yearEnd=2015 + +nameLatitude=latitude # name of the latitude dimension to check in the file +nLatitude=181 #128 #181 #256 # number of latitude values to check + +nameLongitude=longitude # name of the longitude dimension to check in the file +nLongitude=360 #256 #360 #512 # number of longitude values to check + +nameMembers=ensemble # name of the dimension with the model members to check in the file +nMembers=15 #51 #15 # number of members to check in the file +nMemberss=51 # if there is another number of ensemble members that it is ok (should be set the same as nMembers if not) + +nameLeadtimes=time # name of the dimension with the forecast time to check in the file +nLeadtimes=216 #861 # number of forecast times to check in the file (215 complete days * 4 (6hourly) + 1 (for the last midnight) + +printf "\t\n" > ktemp +printf "Checked variable: \t $var\n" >> ktemp +printf "Path: \t $path\n" >> ktemp +printf "Extension: \t $ext\n" >> ktemp +printf "Checked period: \t $yearStart-$yearEnd\n" >> ktemp +printf "\t\n" >> ktemp +printf ">>>>>>>>>>>>>>>>>>>>>>>> Expected elements <<<<<<<<<<<<<<<<<<<<<<<<\n" >> ktemp +printf "\t\n" >> ktemp +printf "Latitude denomination: \t $nameLatitude\n" >> ktemp +printf "Number of latitude values: \t $nLatitude\n" >> ktemp +printf "Longitude denomination: \t $nameLongitude\n" >> ktemp +printf "Number of longitude values:\t $nLongitude\n" >> ktemp +printf "Ensemble denomination: \t $nameMembers\n" >> ktemp +printf "Number of ensemble values: \t $nMembers\n" >> ktemp +if [ $nMembers != $nMemberss ]; then +printf "Alternative number of ensemble values: \t $nMemberss\n" >> ktemp +fi +printf "Lead-time denonomination: \t $nameLeadtimes\n" >> ktemp +printf "Number of lead-times: \t $nLeadtimes\n" >> ktemp + +printf "\n\n >>>>>>>>>>>>>>>>>>>>>>>> Checking <<<<<<<<<<<<<<<<<<<<<<<< \n\n" | tee -a ktemp + +outputfile=./check_${dat}_${var}_${freq}.txt + +# function that detect the size of a dimension variable in a netCDF file: +# +# usage: get_size myNetCDF.nc myDimensionName +# +function get_size { + ncks -m ${1} | grep -E -i ": ${2}, size =" | cut -d ' ' -f 7 | uniq +} + +# check file existence: +for ((i=$yearStart;i<=$yearEnd;i++)) +do +for j in 01 02 03 04 05 06 07 08 09 10 11 12 +do + ok1=0; ok2=0; ok3=0; ok4=0 ; ok5=0 + + # echo $path${var}_$i$j$suffix$ext > ktemp + if [ ! -f $path${var}_$i$j$suffix$ext ] + then + echo ${var}_$i$j$suffix$ext ": >>>>> Missing file <<<<<" | tee -a ktemp + else + nMemb=`get_size $path${var}_$i$j$suffix$ext $nameMembers` + nLead=`get_size $path${var}_$i$j$suffix$ext $nameLeadtimes` + nLat=`get_size $path${var}_$i$j$suffix$ext $nameLatitude` + nLon=`get_size $path${var}_$i$j$suffix$ext $nameLongitude` + + if [ -z $nLat ]; then # check if nLat is empty + echo ${var}_$i$j$suffix$ext ": there is no dimension called " $nameLatitude " inside this file " | tee -a ktemp + elif [ ${nLat//[[:blank:]]/} -ne $nLatitude ]; then + echo ${var}_$i$j$suffix$ext ": "$nameLatitude" dimension has " $nLat " values instead of " $nLatitude | tee -a ktemp + else + ok1=1 + fi + + if [ -z $nLon ]; then # check if nLon is empty + echo ${var}_$i$j$suffix$ext ": there is no dimension called " $nameLongitude " inside this file " | tee -a ktemp + elif [ ${nLon//[[:blank:]]/} -ne $nLongitude ]; then + echo ${var}_$i$j$suffix$ext ": "$nameLongitude "dimension has " $nLon " values instead of " $nLongitude | tee -a ktemp + else + ok2=1 + fi + + if [ -z $nMemb ]; then # check if nMemb is empty + echo ${var}_$i$j$suffix$ext ": there is no dimension called " $nameMembers " inside this file " | tee -a ktemp + elif [ ${nMemb//[[:blank:]]/} -ne $nMembers -a ${nMemb//[[:blank:]]/} -ne "$nMemberss" ]; then + echo ${var}_$i$j$suffix$ext ": "$nameMembers "dimension has " $nMemb " values instead of " $nMembers | tee -a ktemp + else + ok3=1 + fi + + if [ -z $nLead ]; then + echo ${var}_$i$j$suffix$ext ": there is no dimension called " $nameLeadtimes " inside this file " | tee -a ktemp + elif [ ${nLead//[[:blank:]]/} -ne $nLeadtimes ]; then + echo ${var}_$i$j$suffix$ext ": "$nameLeadtimes " dimension has " $nLead " values instead of " $nLeadtimes | tee -a ktemp + else + ok4=1 + fi + + + if [ $ok1 -eq 1 ] && [ $ok2 -eq 1 ] && [ $ok3 -eq 1 ] && [ $ok4 -eq 1 ]; then + echo ${var}_$i$j$suffix$ext ": ok" | tee -a ktemp + fi + fi +done +done + +column -t -s $'\t' ktemp > $outputfile +rm ktemp + +### other common changes to netCDF that can be useful: + +# rename dimensions/variables in all netCDF in the directory where the command is executed: +#for file in *; do ncrename -d .number,ensemble -d .lev,ensemble -v .number,realization -v .msl,psl $file; done +#for file in *; do ncrename -d .number,ensemble -d .lev,ensemble -v .number,realization -v .t2m,tas $file; done +#for file in *; do ncrename -d .reftime,time -d .sfc,ensemble -v .reftime,time -v .sfc,ensemble $file; done +#for file in *; do ncrename -d .lat,latitude -d .lon,longitude -v .lat,latitude -v .lon,longitude $file; done + +# # convert a 6-hourly file to a daily one: +# for file in *; do cdo daymean $file /esnas/exp/ecmwf/system4_m1/daily_mean/psl_f6h/$file; done + +# convert only February 6-hourly files to daily and update its variable names: +#for year in {1981..2015}; do cdo daymean sfcWind_${year}0201.nc /esnas/exp/ecmwf/system4_m1/daily_mean/sfcWind_f6h/sfcWind_${year}0201.nc; ncrename -d .lev,ensemble -v .lev,ensemble /esnas/exp/ecmwf/system4_m1/daily_mean/sfcWind_f6h/sfcWind_${year}0201.nc; done + +# cdo showdate + +# PA command to convert to both daily and monthly netCDF: +# convert a .nc file of ECMWF S4 after downloading: +#file=_grib2netcdf-atls01-95e2cf....nc ; ncrename -v .msl,psl -d .number,ensemble -v .number,realization $file ; date=$(cdo showdate $file | cut -f3 -d" " | sed -e s/-//g) ; mv $file /esnas/exp/ecmwf/system4_m1/6hourly/psl/psl_${date}.nc ; cdo daymean /esnas/exp/ecmwf/system4_m1/6hourly/psl/psl_${date}.nc /esnas/exp/ecmwf/system4_m1/daily_mean/psl_f6h/psl_${date}.nc ; cdo monmean /esnas/exp/ecmwf/system4_m1/daily_mean/psl_f6h/psl_${date}.nc /esnas/exp/ecmwf/system4_m1/monthly_mean/psl_f6h/psl_${date}.nc + + + +#file=_grib2netcdf-atls01-95e2cf....nc +#ncrename -v .msl,psl -d .number,ensemble -v .number,realization $file +#date=$(cdo showdate $file | cut -f3 -d" " | sed -e s/-//g) +#mv $file /esnas/exp/ecmwf/system4_m1/6hourly/psl/psl_${date}.nc +#cdo daymean /esnas/exp/ecmwf/system4_m1/6hourly/psl/psl_${date}.nc /esnas/exp/ecmwf/system4_m1/daily_mean/psl_f6h/psl_${date}.nc +#cdo monmean /esnas/exp/ecmwf/system4_m1/daily_mean/psl_f6h/psl_${date}.nc /esnas/exp/ecmwf/system4_m1/monthly_mean/psl_f6h/psl_${date}.nc diff --git a/bash/checking_ncdata.sh~ b/bash/checking_ncdata.sh~ new file mode 100755 index 0000000000000000000000000000000000000000..b539211e756e0dbec2989ea6c20d39185feeb7e8 --- /dev/null +++ b/bash/checking_ncdata.sh~ @@ -0,0 +1,157 @@ +#!/bin/bash + +# Creation: 6/2016 +# Authors: Nicola Cortesi and Raul Marcos +# Aim: to do a quality control of the dimension variables inside all NetCDF data file in a directory. Notice that this script is not able to find if a file is corrupt. +# I/O: you only have to specify where are the files you want to check and the correct names and size of the dimensions inside. +# the output is a text file inside the directory where this script is run, whose name starts with "check_", with inside the results of the quality control. + +var=sfcWind #tas #sfcWind #psl # name of the variable to check +dat=ECMWFS4 # dataset name +freq=daily # time step (just for the output filename + +path=/esnas/exp/ecmwf/system4_m1/daily_mean/sfcWind_f6h/ +#path=/esnas/exp/ECMWF/seasonal/0001/s004/m001/6hourly/sfcWind/ # its path +#path=/esnas/exp/ecmwf/system4_m1/daily_mean/sfcWind_f6h/ +#/esnas/exp/Meteofrance/seasonal/0001/s004/m001/6hourly/sfcWind Meteofrance S4 +#/esnas/exp/ECMWF/seasonal/0001/s004/m001/6hourly/sfcWind ECMWF S4 + +suffix=01 # this suffix is usually present in all S4 netCDF file, right begore the file extension +ext=.nc # file extension + +yearStart=1981 # time period to check inside the files +yearEnd=2015 + +nameLatitude=latitude # name of the latitude dimension to check in the file +nLatitude=181 #128 #181 #256 # number of latitude values to check + +nameLongitude=longitude # name of the longitude dimension to check in the file +nLongitude=360 #256 #360 #512 # number of longitude values to check + +nameMembers=ensemble # name of the dimension with the model members to check in the file +nMembers=15 #51 #15 # number of members to check in the file +nMemberss=51 # if there is another number of ensemble members that it is ok (should be set the same as nMembers if not) + +nameLeadtimes=time # name of the dimension with the forecast time to check in the file +nLeadtimes=216 #861 # number of forecast times to check in the file (215 complete days * 4 (6hourly) + 1 (for the last midnight) + +printf "\t\n" > ktemp +printf "Checked variable: \t $var\n" >> ktemp +printf "Path: \t $path\n" >> ktemp +printf "Extension: \t $ext\n" >> ktemp +printf "Checked period: \t $yearStart-$yearEnd\n" >> ktemp +printf "\t\n" >> ktemp +printf ">>>>>>>>>>>>>>>>>>>>>>>> Expected elements <<<<<<<<<<<<<<<<<<<<<<<<\n" >> ktemp +printf "\t\n" >> ktemp +printf "Latitude denomination: \t $nameLatitude\n" >> ktemp +printf "Number of latitude values: \t $nLatitude\n" >> ktemp +printf "Longitude denomination: \t $nameLongitude\n" >> ktemp +printf "Number of longitude values:\t $nLongitude\n" >> ktemp +printf "Ensemble denomination: \t $nameMembers\n" >> ktemp +printf "Number of ensemble values: \t $nMembers\n" >> ktemp +if [ $nMembers != $nMemberss ]; then +printf "Alternative number of ensemble values: \t $nMemberss\n" >> ktemp +fi +printf "Lead-time denonomination: \t $nameLeadtimes\n" >> ktemp +printf "Number of lead-times: \t $nLeadtimes\n" >> ktemp + +printf "\n\n >>>>>>>>>>>>>>>>>>>>>>>> Checking <<<<<<<<<<<<<<<<<<<<<<<< \n\n" | tee -a ktemp + +outputfile=./check_${dat}_${var}_${freq}.txt + +# function that detect the size of a dimension variable in a netCDF file: +# +# usage: get_size myNetCDF.nc myDimensionName +# +function get_size { + ncks -m ${1} | grep -E -i ": ${2}, size =" | cut -d ' ' -f 7 | uniq +} + +# check file existence: +for ((i=$yearStart;i<=$yearEnd;i++)) +do +for j in 01 02 03 04 05 06 07 08 09 10 11 12 +do + ok1=0; ok2=0; ok3=0; ok4=0 ; ok5=0 + + # echo $path${var}_$i$j$suffix$ext > ktemp + if [ ! -f $path${var}_$i$j$suffix$ext ] + then + echo ${var}_$i$j$suffix$ext ": >>>>> Missing file <<<<<" | tee -a ktemp + else + nMemb=`get_size $path${var}_$i$j$suffix$ext $nameMembers` + nLead=`get_size $path${var}_$i$j$suffix$ext $nameLeadtimes` + nLat=`get_size $path${var}_$i$j$suffix$ext $nameLatitude` + nLon=`get_size $path${var}_$i$j$suffix$ext $nameLongitude` + + if [ -z $nLat ]; then # check if nLat is empty + echo ${var}_$i$j$suffix$ext ": there is no dimension called " $nameLatitude " inside this file " | tee -a ktemp + elif [ ${nLat//[[:blank:]]/} -ne $nLatitude ]; then + echo ${var}_$i$j$suffix$ext ": "$nameLatitude" dimension has " $nLat " values instead of " $nLatitude | tee -a ktemp + else + ok1=1 + fi + + if [ -z $nLon ]; then # check if nLon is empty + echo ${var}_$i$j$suffix$ext ": there is no dimension called " $nameLongitude " inside this file " | tee -a ktemp + elif [ ${nLon//[[:blank:]]/} -ne $nLongitude ]; then + echo ${var}_$i$j$suffix$ext ": "$nameLongitude "dimension has " $nLon " values instead of " $nLongitude | tee -a ktemp + else + ok2=1 + fi + + if [ -z $nMemb ]; then # check if nMemb is empty + echo ${var}_$i$j$suffix$ext ": there is no dimension called " $nameMembers " inside this file " | tee -a ktemp + elif [ ${nMemb//[[:blank:]]/} -ne $nMembers -a ${nMemb//[[:blank:]]/} -ne "$nMemberss" ]; then + echo ${var}_$i$j$suffix$ext ": "$nameMembers "dimension has " $nMemb " values instead of " $nMembers | tee -a ktemp + else + ok3=1 + fi + + if [ -z $nLead ]; then + echo ${var}_$i$j$suffix$ext ": there is no dimension called " $nameLeadtimes " inside this file " | tee -a ktemp + elif [ ${nLead//[[:blank:]]/} -ne $nLeadtimes ]; then + echo ${var}_$i$j$suffix$ext ": "$nameLeadtimes " dimension has " $nLead " values instead of " $nLeadtimes | tee -a ktemp + else + ok4=1 + fi + + + if [ $ok1 -eq 1 ] && [ $ok2 -eq 1 ] && [ $ok3 -eq 1 ] && [ $ok4 -eq 1 ]; then + echo ${var}_$i$j$suffix$ext ": ok" | tee -a ktemp + fi + fi +done +done + +column -t -s $'\t' ktemp > $outputfile +rm ktemp + +### other common changes to netCDF that can be useful: + +# rename dimensions/variables in all netCDF in the directory where the command is executed: +#for file in *; do ncrename -d .number,ensemble -d .lev,ensemble -v .number,realization -v .msl,psl $file; done +#for file in *; do ncrename -d .number,ensemble -d .lev,ensemble -v .number,realization -v .t2m,tas $file; done +#for file in *; do ncrename -d .reftime,time -d .sfc,ensemble -v .reftime,time -v .sfc,ensemble $file; done +#for file in *; do ncrename -d .lat,latitude -d .lon,longitude -v .lat,latitude -v .lon,longitude $file; done + +# # convert a 6-hourly file to a daily one: +# for file in *; do cdo daymean $file /esnas/exp/ecmwf/system4_m1/daily_mean/psl_f6h/$file; done + +# convert only February 6-hourly files to daily and update its variable names: +#for year in {1981..2015}; do cdo daymean sfcWind_${year}0201.nc /esnas/exp/ecmwf/system4_m1/daily_mean/sfcWind_f6h/sfcWind_${year}0201.nc; ncrename -d .lev,ensemble -v .lev,ensemble /esnas/exp/ecmwf/system4_m1/daily_mean/sfcWind_f6h/sfcWind_${year}0201.nc; done + +# cdo showdate + +# PA command to convert to both daily and monthly netCDF: +# convert a .nc file of ECMWF S4 after downloading: +#file=_grib2netcdf-atls01-95e2cf....nc ; ncrename -v .msl,psl -d .number,ensemble -v .number,realization $file ; date=$(cdo showdate $file | cut -f3 -d" " | sed -e s/-//g) ; mv $file /esnas/exp/ecmwf/system4_m1/6hourly/psl/psl_${date}.nc ; cdo daymean /esnas/exp/ecmwf/system4_m1/6hourly/psl/psl_${date}.nc /esnas/exp/ecmwf/system4_m1/daily_mean/psl_f6h/psl_${date}.nc ; cdo monmean /esnas/exp/ecmwf/system4_m1/daily_mean/psl_f6h/psl_${date}.nc /esnas/exp/ecmwf/system4_m1/monthly_mean/psl_f6h/psl_${date}.nc + + + +#file=_grib2netcdf-atls01-95e2cf....nc +#ncrename -v .msl,psl -d .number,ensemble -v .number,realization $file +#date=$(cdo showdate $file | cut -f3 -d" " | sed -e s/-//g) +#mv $file /esnas/exp/ecmwf/system4_m1/6hourly/psl/psl_${date}.nc +#cdo daymean /esnas/exp/ecmwf/system4_m1/6hourly/psl/psl_${date}.nc /esnas/exp/ecmwf/system4_m1/daily_mean/psl_f6h/psl_${date}.nc +#cdo monmean /esnas/exp/ecmwf/system4_m1/daily_mean/psl_f6h/psl_${date}.nc /esnas/exp/ecmwf/system4_m1/monthly_mean/psl_f6h/psl_${date}.nc diff --git a/bash/copy_to_dropbox.sh b/bash/copy_to_dropbox.sh new file mode 100644 index 0000000000000000000000000000000000000000..67ac486489aa731cd44b3c693f545abf4ccc3ff4 --- /dev/null +++ b/bash/copy_to_dropbox.sh @@ -0,0 +1,55 @@ + +for lat in {30..72} +do + cp WTs_ERAintDailyHighRes_1980-2015_lat_$lat.*_lon_350.* /home/Earth/ncortesi/Dropbox/data + cp WTs_ERAintDailyHighRes_1980-2015_lat_$lat.*_lon_351.* /home/Earth/ncortesi/Dropbox/data + cp WTs_ERAintDailyHighRes_1980-2015_lat_$lat.*_lon_352.* /home/Earth/ncortesi/Dropbox/data + cp WTs_ERAintDailyHighRes_1980-2015_lat_$lat.*_lon_353.* /home/Earth/ncortesi/Dropbox/data + cp WTs_ERAintDailyHighRes_1980-2015_lat_$lat.*_lon_354.* /home/Earth/ncortesi/Dropbox/data + cp WTs_ERAintDailyHighRes_1980-2015_lat_$lat.*_lon_355.* /home/Earth/ncortesi/Dropbox/data + cp WTs_ERAintDailyHighRes_1980-2015_lat_$lat.*_lon_356.* /home/Earth/ncortesi/Dropbox/data + cp WTs_ERAintDailyHighRes_1980-2015_lat_$lat.*_lon_357.* /home/Earth/ncortesi/Dropbox/data + cp WTs_ERAintDailyHighRes_1980-2015_lat_$lat.*_lon_358.* /home/Earth/ncortesi/Dropbox/data + cp WTs_ERAintDailyHighRes_1980-2015_lat_$lat.*_lon_359.* /home/Earth/ncortesi/Dropbox/data + cp WTs_ERAintDailyHighRes_1980-2015_lat_$lat.*_lon_0.* /home/Earth/ncortesi/Dropbox/data + cp WTs_ERAintDailyHighRes_1980-2015_lat_$lat.*_lon_1.* /home/Earth/ncortesi/Dropbox/data + cp WTs_ERAintDailyHighRes_1980-2015_lat_$lat.*_lon_2.* /home/Earth/ncortesi/Dropbox/data + cp WTs_ERAintDailyHighRes_1980-2015_lat_$lat.*_lon_3.* /home/Earth/ncortesi/Dropbox/data + cp WTs_ERAintDailyHighRes_1980-2015_lat_$lat.*_lon_4.* /home/Earth/ncortesi/Dropbox/data + cp WTs_ERAintDailyHighRes_1980-2015_lat_$lat.*_lon_5.* /home/Earth/ncortesi/Dropbox/data + cp WTs_ERAintDailyHighRes_1980-2015_lat_$lat.*_lon_6.* /home/Earth/ncortesi/Dropbox/data + cp WTs_ERAintDailyHighRes_1980-2015_lat_$lat.*_lon_7.* /home/Earth/ncortesi/Dropbox/data + cp WTs_ERAintDailyHighRes_1980-2015_lat_$lat.*_lon_8.* /home/Earth/ncortesi/Dropbox/data + cp WTs_ERAintDailyHighRes_1980-2015_lat_$lat.*_lon_9.* /home/Earth/ncortesi/Dropbox/data + cp WTs_ERAintDailyHighRes_1980-2015_lat_$lat.*_lon_10.* /home/Earth/ncortesi/Dropbox/data + cp WTs_ERAintDailyHighRes_1980-2015_lat_$lat.*_lon_11.* /home/Earth/ncortesi/Dropbox/data + cp WTs_ERAintDailyHighRes_1980-2015_lat_$lat.*_lon_12.* /home/Earth/ncortesi/Dropbox/data + cp WTs_ERAintDailyHighRes_1980-2015_lat_$lat.*_lon_13.* /home/Earth/ncortesi/Dropbox/data + cp WTs_ERAintDailyHighRes_1980-2015_lat_$lat.*_lon_14.* /home/Earth/ncortesi/Dropbox/data + cp WTs_ERAintDailyHighRes_1980-2015_lat_$lat.*_lon_15.* /home/Earth/ncortesi/Dropbox/data + cp WTs_ERAintDailyHighRes_1980-2015_lat_$lat.*_lon_16.* /home/Earth/ncortesi/Dropbox/data + cp WTs_ERAintDailyHighRes_1980-2015_lat_$lat.*_lon_17.* /home/Earth/ncortesi/Dropbox/data + cp WTs_ERAintDailyHighRes_1980-2015_lat_$lat.*_lon_18.* /home/Earth/ncortesi/Dropbox/data + cp WTs_ERAintDailyHighRes_1980-2015_lat_$lat.*_lon_19.* /home/Earth/ncortesi/Dropbox/data + cp WTs_ERAintDailyHighRes_1980-2015_lat_$lat.*_lon_20.* /home/Earth/ncortesi/Dropbox/data + cp WTs_ERAintDailyHighRes_1980-2015_lat_$lat.*_lon_21.* /home/Earth/ncortesi/Dropbox/data + cp WTs_ERAintDailyHighRes_1980-2015_lat_$lat.*_lon_22.* /home/Earth/ncortesi/Dropbox/data + cp WTs_ERAintDailyHighRes_1980-2015_lat_$lat.*_lon_23.* /home/Earth/ncortesi/Dropbox/data + cp WTs_ERAintDailyHighRes_1980-2015_lat_$lat.*_lon_24.* /home/Earth/ncortesi/Dropbox/data + cp WTs_ERAintDailyHighRes_1980-2015_lat_$lat.*_lon_25.* /home/Earth/ncortesi/Dropbox/data + cp WTs_ERAintDailyHighRes_1980-2015_lat_$lat.*_lon_26.* /home/Earth/ncortesi/Dropbox/data + cp WTs_ERAintDailyHighRes_1980-2015_lat_$lat.*_lon_27.* /home/Earth/ncortesi/Dropbox/data + cp WTs_ERAintDailyHighRes_1980-2015_lat_$lat.*_lon_28.* /home/Earth/ncortesi/Dropbox/data + cp WTs_ERAintDailyHighRes_1980-2015_lat_$lat.*_lon_29.* /home/Earth/ncortesi/Dropbox/data + cp WTs_ERAintDailyHighRes_1980-2015_lat_$lat.*_lon_30.* /home/Earth/ncortesi/Dropbox/data + cp WTs_ERAintDailyHighRes_1980-2015_lat_$lat.*_lon_31.* /home/Earth/ncortesi/Dropbox/data + cp WTs_ERAintDailyHighRes_1980-2015_lat_$lat.*_lon_32.* /home/Earth/ncortesi/Dropbox/data + cp WTs_ERAintDailyHighRes_1980-2015_lat_$lat.*_lon_33.* /home/Earth/ncortesi/Dropbox/data + cp WTs_ERAintDailyHighRes_1980-2015_lat_$lat.*_lon_34.* /home/Earth/ncortesi/Dropbox/data + cp WTs_ERAintDailyHighRes_1980-2015_lat_$lat.*_lon_35.* /home/Earth/ncortesi/Dropbox/data + cp WTs_ERAintDailyHighRes_1980-2015_lat_$lat.*_lon_36.* /home/Earth/ncortesi/Dropbox/data + cp WTs_ERAintDailyHighRes_1980-2015_lat_$lat.*_lon_37.* /home/Earth/ncortesi/Dropbox/data + cp WTs_ERAintDailyHighRes_1980-2015_lat_$lat.*_lon_38.* /home/Earth/ncortesi/Dropbox/data + cp WTs_ERAintDailyHighRes_1980-2015_lat_$lat.*_lon_39.* /home/Earth/ncortesi/Dropbox/data + cp WTs_ERAintDailyHighRes_1980-2015_lat_$lat.*_lon_40.* /home/Earth/ncortesi/Dropbox/data +done \ No newline at end of file diff --git a/bash/fix_ncdata.sh b/bash/fix_ncdata.sh new file mode 100644 index 0000000000000000000000000000000000000000..7434ea6aa6cf1b74b1882e861e72a940ee2ee966 --- /dev/null +++ b/bash/fix_ncdata.sh @@ -0,0 +1,144 @@ +#!/bin/bash + +# Creation: 6/2016 +# Authors: Nicola Cortesi and Raul Marcos +# Aim: to do a quality control of the dimension variables inside all NetCDF data file in a directory. Notice that this script is not able to find if a file is corrupt. +# I/O: you only have to specify where are the files you want to check and the correct names and size of the dimensions inside. +# the output is a text file inside the directory where this script is run, whose name starts with "check_", with inside the results of the quality control. + +var=psl #sfcWind # name of the variable to check +dat=ECMWFS4 # dataset name +freq=6hourly # time step +#path=/esnas/exp/ECMWF/seasonal/0001/s004/m001/6hourly/sfcWind/ # its path +path=/esnas/exp/ecmwf/system4_m1/6hourly/psl/ +#/esnas/exp/Meteofrance/seasonal/0001/s004/m001/6hourly/sfcWind Meteofrance S4 +#/esnas/exp/ECMWF/seasonal/0001/s004/m001/6hourly/sfcWind ECMWF S4 + +suffix=01 # this suffix is usually present in all S4 netCDF file, right begore the file extension +ext=.nc # file extension + +yearStart=1981 # time period to check inside the files +yearEnd=2015 + +nameLatitude=latitude # name of the latitude dimension to check in the file +nLatitude=181 #128 #181 #256 # number of latitude values to check + +nameLongitude=longitude # name of the longitude dimension to check in the file +nLongitude=360 #256 #360 #512 # number of longitude values to check + +nameMembers=ensemble # name of the dimension with the model members to check in the file +nMembers=15 #51 #15 # number of members to check in the file +nMemberss=51 # if there is another number of ensemble members that it is ok (should be set the same as nMembers if not) + +nameLeadtimes=time # name of the dimension with the forecast time to check in the file +nLeadtimes=861 #216 # number of forecast times to check in the file (215 complete days * 4 (6hourly) + 1 (for the last midnight) + +printf "\t\n" > ktemp +printf "Checked variable: \t $var\n" >> ktemp +printf "Path: \t $path\n" >> ktemp +printf "Extension: \t $ext\n" >> ktemp +printf "Checked period: \t $yearStart-$yearEnd\n" >> ktemp +printf "\t\n" >> ktemp +printf ">>>>>>>>>>>>>>>>>>>>>>>> Expected elements <<<<<<<<<<<<<<<<<<<<<<<<\n" >> ktemp +printf "\t\n" >> ktemp +printf "Latitude denomination: \t $nameLatitude\n" >> ktemp +printf "Number of latitude values: \t $nLatitude\n" >> ktemp +printf "Longitude denomination: \t $nameLongitude\n" >> ktemp +printf "Number of longitude values:\t $nLongitude\n" >> ktemp +printf "Ensemble denomination: \t $nameMembers\n" >> ktemp +printf "Number of ensemble values: \t $nMembers\n" >> ktemp +if [ $nMembers != $nMemberss ]; then +printf "Alternative number of ensemble values: \t $nMemberss\n" >> ktemp +fi +printf "Lead-time denonomination: \t $nameLeadtimes\n" >> ktemp +printf "Number of lead-times: \t $nLeadtimes\n" >> ktemp + +printf "\n\n >>>>>>>>>>>>>>>>>>>>>>>> Checking <<<<<<<<<<<<<<<<<<<<<<<< \n\n" | tee -a ktemp + +outputfile=./check_${dat}_${var}_${freq}.txt + +# function that detect the size of a dimension variable in a netCDF file: +# +# usage: get_size myNetCDF.nc myDimensionName +# +function get_size { + ncks -m ${1} | grep -E -i ": ${2}, size =" | cut -d ' ' -f 7 | uniq +} + +# check file existence: +for ((i=$yearStart;i<=$yearEnd;i++)) +do +for j in 01 02 03 04 05 06 07 08 09 10 11 12 +do + ok1=0; ok2=0; ok3=0; ok4=0 ; ok5=0 + + # echo $path${var}_$i$j$suffix$ext > ktemp + if [ ! -f $path${var}_$i$j$suffix$ext ] + then + echo ${var}_$i$j$suffix$ext ": >>>>> Missing file <<<<<" | tee -a ktemp + else + nMemb=`get_size $path${var}_$i$j$suffix$ext $nameMembers` + nLead=`get_size $path${var}_$i$j$suffix$ext $nameLeadtimes` + nLat=`get_size $path${var}_$i$j$suffix$ext $nameLatitude` + nLon=`get_size $path${var}_$i$j$suffix$ext $nameLongitude` + + if [ -z $nLat ]; then # check if nLat is empty + echo ${var}_$i$j$suffix$ext ": there is no dimension called " $nameLatitude " inside this file " | tee -a ktemp + elif [ ${nLat//[[:blank:]]/} -ne $nLatitude ]; then + echo ${var}_$i$j$suffix$ext ": "$nameLatitude" dimension has " $nLat " values instead of " $nLatitude | tee -a ktemp + else + ok1=1 + fi + + if [ -z $nLon ]; then # check if nLon is empty + echo ${var}_$i$j$suffix$ext ": there is no dimension called " $nameLongitude " inside this file " | tee -a ktemp + elif [ ${nLon//[[:blank:]]/} -ne $nLongitude ]; then + echo ${var}_$i$j$suffix$ext ": "$nameLongitude "dimension has " $nLon " values instead of " $nLongitude | tee -a ktemp + else + ok2=1 + fi + + if [ -z $nMemb ]; then # check if nMemb is empty + echo ${var}_$i$j$suffix$ext ": there is no dimension called " $nameMembers " inside this file " | tee -a ktemp + elif [ ${nMemb//[[:blank:]]/} -ne $nMembers -a ${nMemb//[[:blank:]]/} -ne "$nMemberss" ]; then + echo ${var}_$i$j$suffix$ext ": "$nameMembers "dimension has " $nMemb " values instead of " $nMembers | tee -a ktemp + else + ok3=1 + fi + + if [ -z $nLead ]; then + echo ${var}_$i$j$suffix$ext ": there is no dimension called " $nameLeadtimes " inside this file " | tee -a ktemp + elif [ ${nLead//[[:blank:]]/} -ne $nLeadtimes ]; then + echo ${var}_$i$j$suffix$ext ": "$nameLeadtimes " dimension has " $nLead " values instead of " $nLeadtimes | tee -a ktemp + else + ok4=1 + fi + + + if [ $ok1 -eq 1 ] && [ $ok2 -eq 1 ] && [ $ok3 -eq 1 ] && [ $ok4 -eq 1 ]; then + echo ${var}_$i$j$suffix$ext ": ok" | tee -a ktemp + fi + fi +done +done + +column -t -s $'\t' ktemp > $outputfile +rm ktemp + +### other common changes to netCDF that can be useful: +# # convert a 6-hourly file to a daily one: +# for file in *; do cdo daymean $file /esnas/exp/ecmwf/system4_m1/daily_mean/sfcWind_f6h/$file + +# cdo daymean +# cdo showdate +# for file in *; do ncrename -d $file ; done + +# convert a .nc file of ECMWF S4 after downloading: +#file=_grib2netcdf-atls01-95e2cf....nc ; ncrename -v .msl,psl -d .number,ensemble -v .number,realization $file ; date=$(cdo showdate $file | cut -f3 -d" " | sed -e s/-//g) ; mv $file /esnas/exp/ecmwf/system4_m1/6hourly/psl/psl_${date}.nc ; cdo daymean /esnas/exp/ecmwf/system4_m1/6hourly/psl/psl_${date}.nc /esnas/exp/ecmwf/system4_m1/daily_mean/psl_f6h/psl_${date}.nc ; cdo monmean /esnas/exp/ecmwf/system4_m1/daily_mean/psl_f6h/psl_${date}.nc /esnas/exp/ecmwf/system4_m1/monthly_mean/psl_f6h/psl_${date}.nc + +#file=_grib2netcdf-atls01-95e2cf....nc +#ncrename -v .msl,psl -d .number,ensemble -v .number,realization $file +#date=$(cdo showdate $file | cut -f3 -d" " | sed -e s/-//g) +#mv $file /esnas/exp/ecmwf/system4_m1/6hourly/psl/psl_${date}.nc +#cdo daymean /esnas/exp/ecmwf/system4_m1/6hourly/psl/psl_${date}.nc /esnas/exp/ecmwf/system4_m1/daily_mean/psl_f6h/psl_${date}.nc +#cdo monmean /esnas/exp/ecmwf/system4_m1/daily_mean/psl_f6h/psl_${date}.nc /esnas/exp/ecmwf/system4_m1/monthly_mean/psl_f6h/psl_${date}.nc diff --git a/bash/logo_llorens_v3.sh b/bash/logo_llorens_v3.sh new file mode 100644 index 0000000000000000000000000000000000000000..8a3f7179d567d323aa115ae3ea601d88ecb7ef8b --- /dev/null +++ b/bash/logo_llorens_v3.sh @@ -0,0 +1,306 @@ +#!/usr/bin/sh + +#------------------- +# Usage info +#------------------- +show_help() { + cat << EOF +Usage: ${0##*/} [-hl] [-t "title"] [-c "caption"] INFILE OUTFILE +Add caption, title and logo to an image, for including in the catalog. +OPTIONS: + -h Display this help and exit + -t "title" Add a title on top of the image + -c "caption" Add a caption below the image + -l Add the bsc logo to the bottom right of the image +EOF +} + +#------------------- +# Initialize flags +#------------------- + +titsize=25 # size of the title. It is automatically rescaled if the size of the figure increases/decreases; i.e: if the figure double, the title size doubles too. +captionsize=12 # size of the caption. It is automatically rescaled too. +resizelogo=25 # width of the logo compared to the width of the image (in %). 25% is a good balance. + +cut_title=true # you can also cut an horizontal strip at the top of the image, if you want to remove and old title before adding the new one with option -t +cut_title_pixels=50 # set the height of the horizontal strip (in pixels) to remove, if cut_title=false. + +OPTIND=1 +LOGO=false +LOGO_file=/home/Earth/ncortesi/logo1.png + +#------------------- +# Parse input options +#------------------- +while getopts ":t:c:lh" opt; do + case $opt in + h) + show_help + exit 0 + ;; + t) + echo "-t was triggered, Title: $OPTARG" >&2 + TITLE=$OPTARG + ;; + c) + echo "-c was triggered, Caption: $OPTARG" >&2 + CAPTION=$OPTARG + ;; + l) + echo "-l was triggered, Include BSC logo" >&2 + LOGO=true + ;; + \?) + echo "Invalid option: -$OPTARG" >&2 + exit 1 + ;; + :) + echo "Option -$OPTARG requires an argument." >&2 + exit 1 + ;; + esac +done +shift "$((OPTIND-1))" # Shift off the options + +#------------------- +# Check we have two args remaining +#------------------- +if [[ $# -ne 2 ]]; +then + show_help + exit 0 +fi + +#------------------- +# Get in and out files +#------------------- +INFILE=$1 +OUTFILE=$2 + +#------------------- +# Check infile exists +#------------------- +if [[ ! -f $INFILE ]] +then + echo "File not found: $INFILE" >&2 + exit -1 +fi + +#################### +# Start doing some work +#################### + +#------------------------------ +# Convert ps to png with 300dpi +#------------------------------ +if [[ $INFILE == *.ps ]] +then + convert -units PixelsPerInch -density 300 -background white -flatten $INFILE crop.png +else + cp $INFILE crop.png +fi + +# get the width of the image in pixels: +width_figure=$(identify -ping -format %w crop.png) + +# get the height of the image in pixels: +height_figure=$(identify -ping -format %h crop.png) + +#------------------- +# Add caption +#------------------- +if [[ -v CAPTION ]] +then + + #------------------------------------- + # In case the logo has to be drawn too + #------------------------------------- + if ( $LOGO ) + then + width_logo_area=$(( $width_figure * $resizelogo / 100 )) + width_caption_area=$(( $width_figure - $width_logo_area )) + + width_caption=$(( $width_caption_area * 90 / 100)) + width_logo=$(( $width_logo_area * 90 / 100 )) + width_logo_file=$(identify -ping -format %w $LOGO_file) + + # 1) create the caption: + captionsize_rescaled=$(( $captionsize * $width_figure / 600)) + convert -background white -font Arial -pointsize $captionsize_rescaled -size ${width_caption}x -define pango:justify=true pango:"$CAPTION" -geometry +0+0 crop1.png + + # 2) insert the caption at the top center of the caption area: + convert crop1.png -gravity North -background white -extent ${width_caption_area}x crop2.png + + # 3) resize the logo, to be proportional to the size of the figure: + resize_logo=$(( $width_logo * 100 / $width_logo_file )) + convert -background white $LOGO_file -scale ${resize_logo}% crop3.png + + # 4) insert the logo over a larger canvas, (the logo area), in its top left part: + height_caption_area=$(identify -ping -format %h crop2.png) + height_logo_resized=$(identify -ping -format %h crop3.png) + + if [[ $height_caption_area -lt $height_logo_resized ]]; then + height_logo_area=$height_logo_resized + else + height_logo_area=$(identify -ping -format %h crop1.png) + fi + + convert crop3.png -gravity NorthWest -background white -extent ${width_logo_area}x${height_logo_area} crop4.png + + # if the caption is less high than the logo, insert the caption at the center of the caption area instead than at the top center: + if [[ $height_caption_area -lt $height_logo_resized ]]; then + convert crop1.png -gravity Center -background white -extent ${width_caption_area}x${height_logo_area} crop2.png + fi + + # 5) Merge the caption area and the logo area together: + montage crop2.png crop4.png -tile 2x1 -geometry +0+0 crop5.png + + # 6) Add a white horizontal strip over the caption+logo area: + convert -size x20 xc:white crop6.png + montage crop6.png crop5.png -tile 1x2 -geometry +0+0 crop7.png + + # 7) Add a white horizontal strip below the caption+logo area: + if [[ $height_caption_area -ge $height_logo_resized ]]; then + convert -size x20 xc:white crop8.png + montage crop7.png crop8.png -tile 1x2 -geometry +0+0 crop9.png + else + convert -size x5 xc:white crop8.png + montage crop7.png crop8.png -tile 1x2 -geometry +0+0 crop9.png + fi + + # 8) Add the figure: + montage crop.png crop9.png -tile 1x2 -geometry +0+0 crop10.png + + else + #------------------------- + # In case there is no logo + #------------------------- + + width_caption=$(( $width_figure * 90 / 100)) + + # 1) Create the caption: + captionsize_rescaled=$(( $captionsize * $width_figure / 600)) + convert -background white -font Arial -pointsize $captionsize_rescaled -size ${width_caption}x -define pango:justify=true pango:"$CAPTION" -geometry +0+0 crop1.png + + # 2) Insert the caption at the top center of the caption area: + convert crop1.png -gravity North -background white -extent ${width_figure}x crop2.png + + # 3) Add a white horizontal strip over the caption area: + convert -size x20 xc:white crop3.png + montage crop3.png crop2.png -tile 1x2 -geometry +0+0 crop7.png + + # 4) Add a white horizontal strip below the caption area: + convert -size x20 xc:white crop8.png + montage crop7.png crop8.png -tile 1x2 -geometry +0+0 crop9.png + + # 5) Add the figure: + montage crop.png crop9.png -tile 1x2 -geometry +0+0 crop10.png + + # xdg-open crop10.png # for the debug + + fi # end if on LOGO +else + #---------------------------- + # In case there is no caption + #---------------------------- + + CAPTION="" + + if ( $LOGO ) + then + #------------------------------------- + # In case the logo has to be drawn too + #------------------------------------- + + width_logo_area=$(( $width_figure * $resizelogo / 100 )) + width_caption_area=$(( $width_figure - $width_logo_area )) + + width_caption=$(( $width_caption_area * 90 / 100)) + width_logo=$(( $width_logo_area * 90 / 100 )) + width_logo_file=$(identify -ping -format %w $LOGO_file) + + # 1) create the caption: + captionsize_rescaled=$(( $captionsize * $width_figure / 600)) + convert -background white -font Arial -pointsize $captionsize_rescaled -size ${width_caption}x -define pango:justify=true pango:"$CAPTION" -geometry +0+0 crop1.png + + # 2) insert the caption at the top center of the caption area: + convert crop1.png -gravity North -background white -extent ${width_caption_area}x crop2.png + + # 3) resize the logo, to be proportional to the size of the figure: + resize_logo=$(( $width_logo * 100 / $width_logo_file )) + convert -background white $LOGO_file -scale ${resize_logo}% crop3.png + + # 4) insert the logo over a larger canvas, (the logo area), in its top left part: + height_caption_area=$(identify -ping -format %h crop2.png) + height_logo_resized=$(identify -ping -format %h crop3.png) + + if [[ $height_caption_area -lt $height_logo_resized ]]; then + height_logo_area=$height_logo_resized + else + height_logo_area=$(identify -ping -format %h crop1.png) + fi + + convert crop3.png -gravity NorthWest -background white -extent ${width_logo_area}x${height_logo_area} crop4.png + + # if the caption is less high than the logo, insert the caption at the center of the caption area instead than at the top center: + if [[ $height_caption_area -lt $height_logo_resized ]]; then + convert crop1.png -gravity Center -background white -extent ${width_caption_area}x${height_logo_area} crop2.png + fi + + # 5) Merge the caption area and the logo area together: + montage crop2.png crop4.png -tile 2x1 -geometry +0+0 crop5.png + + # 6) Add a white horizontal strip over the caption+logo area: + convert -size x20 xc:white crop6.png + montage crop6.png crop5.png -tile 1x2 -geometry +0+0 crop7.png + + # 7) Add a white horizontal strip below the caption+logo area: + if [[ $height_caption_area -ge $height_logo_resized ]]; then + convert -size x20 xc:white crop8.png + montage crop7.png crop8.png -tile 1x2 -geometry +0+0 crop9.png + else + convert -size x5 xc:white crop8.png + montage crop7.png crop8.png -tile 1x2 -geometry +0+0 crop9.png + fi + + # 8) Add the figure: + montage crop.png crop9.png -tile 1x2 -geometry +0+0 crop10.png + + else + #--------------------------------------------------------- + # In case there is only the title, and caption and no logo + #--------------------------------------------------------- + cp crop.png crop10.png + + fi # end if on LOGO + +fi # end if on CAPTION + + +#------------------------------------------ +# Cut old title +#------------------------------------------ + +if ( $cut_title ); then + # cut the upper part of the image of the desired size to remove the old title: + convert crop10.png -crop +0+${cut_title_pixels} +repage crop10.png +fi + +#------------------------------------------ +# Add title +#------------------------------------------ + +if [[ -v TITLE ]] +then + # add the new title: + titsize_rescaled=$(( $titsize * $width_figure / 600)) + convert crop10.png -background white -pointsize $titsize_rescaled label:"$TITLE" +swap -gravity Center -append crop11.png + cp crop11.png crop10.png +fi + + +cp crop10.png $OUTFILE + +# remove all temporary files: +rm crop*.* diff --git a/bash/logo_llorens_v3.sh~ b/bash/logo_llorens_v3.sh~ new file mode 100644 index 0000000000000000000000000000000000000000..d2ff8b0dfb8526d6fb4aa4cc5e4fc55052cb3a12 --- /dev/null +++ b/bash/logo_llorens_v3.sh~ @@ -0,0 +1,306 @@ +#!/usr/bin/sh + +#------------------- +# Usage info +#------------------- +show_help() { + cat << EOF +Usage: ${0##*/} [-hl] [-t "title"] [-c "caption"] INFILE OUTFILE +Add caption, title and logo to an image, for including in the catalog. +OPTIONS: + -h Display this help and exit + -t "title" Add a title on top of the image + -c "caption" Add a caption below the image + -l Add the bsc logo to the bottom right of the image +EOF +} + +#------------------- +# Initialize flags +#------------------- + +titsize=30 # size of the title. It is automatically rescaled if the size of the figure increases/decreases; i.e: if the figure double, the title size doubles too. +captionsize=12 # size of the caption. It is automatically rescaled too. +resizelogo=25 # width of the logo compared to the width of the image (in %). 25% is a good balance. + +cut_title=false # you can also cut an horizontal strip at the topo of the image, if you want to remove and old title before adding the new one with option -t +cut_title_pixels=50 # set the height of the horizontal strip (in pixels) to remove, if cut_title=false. + +OPTIND=1 +LOGO=false +LOGO_file=/home/Earth/ncortesi/logo1.png + +#------------------- +# Parse input options +#------------------- +while getopts ":t:c:lh" opt; do + case $opt in + h) + show_help + exit 0 + ;; + t) + echo "-t was triggered, Title: $OPTARG" >&2 + TITLE=$OPTARG + ;; + c) + echo "-c was triggered, Caption: $OPTARG" >&2 + CAPTION=$OPTARG + ;; + l) + echo "-l was triggered, Include BSC logo" >&2 + LOGO=true + ;; + \?) + echo "Invalid option: -$OPTARG" >&2 + exit 1 + ;; + :) + echo "Option -$OPTARG requires an argument." >&2 + exit 1 + ;; + esac +done +shift "$((OPTIND-1))" # Shift off the options + +#------------------- +# Check we have two args remaining +#------------------- +if [[ $# -ne 2 ]]; +then + show_help + exit 0 +fi + +#------------------- +# Get in and out files +#------------------- +INFILE=$1 +OUTFILE=$2 + +#------------------- +# Check infile exists +#------------------- +if [[ ! -f $INFILE ]] +then + echo "File not found: $INFILE" >&2 + exit -1 +fi + +#################### +# Start doing some work +#################### + +#------------------------------ +# Convert ps to png with 300dpi +#------------------------------ +if [[ $INFILE == *.ps ]] +then + convert -units PixelsPerInch -density 300 -background white -flatten $INFILE crop.png +else + cp $INFILE crop.png +fi + +# get the width of the image in pixels: +width_figure=$(identify -ping -format %w crop.png) + +# get the height of the image in pixels: +height_figure=$(identify -ping -format %h crop.png) + +#------------------- +# Add caption +#------------------- +if [[ -v CAPTION ]] +then + + #------------------------------------- + # In case the logo has to be drawn too + #------------------------------------- + if ( $LOGO ) + then + width_logo_area=$(( $width_figure * $resizelogo / 100 )) + width_caption_area=$(( $width_figure - $width_logo_area )) + + width_caption=$(( $width_caption_area * 90 / 100)) + width_logo=$(( $width_logo_area * 90 / 100 )) + width_logo_file=$(identify -ping -format %w $LOGO_file) + + # 1) create the caption: + captionsize_rescaled=$(( $captionsize * $width_figure / 600)) + convert -background white -font Arial -pointsize $captionsize_rescaled -size ${width_caption}x -define pango:justify=true pango:"$CAPTION" -geometry +0+0 crop1.png + + # 2) insert the caption at the top center of the caption area: + convert crop1.png -gravity North -background white -extent ${width_caption_area}x crop2.png + + # 3) resize the logo, to be proportional to the size of the figure: + resize_logo=$(( $width_logo * 100 / $width_logo_file )) + convert -background white $LOGO_file -scale ${resize_logo}% crop3.png + + # 4) insert the logo over a larger canvas, (the logo area), in its top left part: + height_caption_area=$(identify -ping -format %h crop2.png) + height_logo_resized=$(identify -ping -format %h crop3.png) + + if [[ $height_caption_area -lt $height_logo_resized ]]; then + height_logo_area=$height_logo_resized + else + height_logo_area=$(identify -ping -format %h crop1.png) + fi + + convert crop3.png -gravity NorthWest -background white -extent ${width_logo_area}x${height_logo_area} crop4.png + + # if the caption is less high than the logo, insert the caption at the center of the caption area instead than at the top center: + if [[ $height_caption_area -lt $height_logo_resized ]]; then + convert crop1.png -gravity Center -background white -extent ${width_caption_area}x${height_logo_area} crop2.png + fi + + # 5) Merge the caption area and the logo area together: + montage crop2.png crop4.png -tile 2x1 -geometry +0+0 crop5.png + + # 6) Add a white horizontal strip over the caption+logo area: + convert -size x20 xc:white crop6.png + montage crop6.png crop5.png -tile 1x2 -geometry +0+0 crop7.png + + # 7) Add a white horizontal strip below the caption+logo area: + if [[ $height_caption_area -ge $height_logo_resized ]]; then + convert -size x20 xc:white crop8.png + montage crop7.png crop8.png -tile 1x2 -geometry +0+0 crop9.png + else + convert -size x5 xc:white crop8.png + montage crop7.png crop8.png -tile 1x2 -geometry +0+0 crop9.png + fi + + # 8) Add the figure: + montage crop.png crop9.png -tile 1x2 -geometry +0+0 crop10.png + + else + #------------------------- + # In case there is no logo + #------------------------- + + width_caption=$(( $width_figure * 90 / 100)) + + # 1) Create the caption: + captionsize_rescaled=$(( $captionsize * $width_figure / 600)) + convert -background white -font Arial -pointsize $captionsize_rescaled -size ${width_caption}x -define pango:justify=true pango:"$CAPTION" -geometry +0+0 crop1.png + + # 2) Insert the caption at the top center of the caption area: + convert crop1.png -gravity North -background white -extent ${width_figure}x crop2.png + + # 3) Add a white horizontal strip over the caption area: + convert -size x20 xc:white crop3.png + montage crop3.png crop2.png -tile 1x2 -geometry +0+0 crop7.png + + # 4) Add a white horizontal strip below the caption area: + convert -size x20 xc:white crop8.png + montage crop7.png crop8.png -tile 1x2 -geometry +0+0 crop9.png + + # 5) Add the figure: + montage crop.png crop9.png -tile 1x2 -geometry +0+0 crop10.png + + # xdg-open crop10.png # for the debug + + fi # end if on LOGO +else + #---------------------------- + # In case there is no caption + #---------------------------- + + CAPTION="" + + if ( $LOGO ) + then + #------------------------------------- + # In case the logo has to be drawn too + #------------------------------------- + + width_logo_area=$(( $width_figure * $resizelogo / 100 )) + width_caption_area=$(( $width_figure - $width_logo_area )) + + width_caption=$(( $width_caption_area * 90 / 100)) + width_logo=$(( $width_logo_area * 90 / 100 )) + width_logo_file=$(identify -ping -format %w $LOGO_file) + + # 1) create the caption: + captionsize_rescaled=$(( $captionsize * $width_figure / 600)) + convert -background white -font Arial -pointsize $captionsize_rescaled -size ${width_caption}x -define pango:justify=true pango:"$CAPTION" -geometry +0+0 crop1.png + + # 2) insert the caption at the top center of the caption area: + convert crop1.png -gravity North -background white -extent ${width_caption_area}x crop2.png + + # 3) resize the logo, to be proportional to the size of the figure: + resize_logo=$(( $width_logo * 100 / $width_logo_file )) + convert -background white $LOGO_file -scale ${resize_logo}% crop3.png + + # 4) insert the logo over a larger canvas, (the logo area), in its top left part: + height_caption_area=$(identify -ping -format %h crop2.png) + height_logo_resized=$(identify -ping -format %h crop3.png) + + if [[ $height_caption_area -lt $height_logo_resized ]]; then + height_logo_area=$height_logo_resized + else + height_logo_area=$(identify -ping -format %h crop1.png) + fi + + convert crop3.png -gravity NorthWest -background white -extent ${width_logo_area}x${height_logo_area} crop4.png + + # if the caption is less high than the logo, insert the caption at the center of the caption area instead than at the top center: + if [[ $height_caption_area -lt $height_logo_resized ]]; then + convert crop1.png -gravity Center -background white -extent ${width_caption_area}x${height_logo_area} crop2.png + fi + + # 5) Merge the caption area and the logo area together: + montage crop2.png crop4.png -tile 2x1 -geometry +0+0 crop5.png + + # 6) Add a white horizontal strip over the caption+logo area: + convert -size x20 xc:white crop6.png + montage crop6.png crop5.png -tile 1x2 -geometry +0+0 crop7.png + + # 7) Add a white horizontal strip below the caption+logo area: + if [[ $height_caption_area -ge $height_logo_resized ]]; then + convert -size x20 xc:white crop8.png + montage crop7.png crop8.png -tile 1x2 -geometry +0+0 crop9.png + else + convert -size x5 xc:white crop8.png + montage crop7.png crop8.png -tile 1x2 -geometry +0+0 crop9.png + fi + + # 8) Add the figure: + montage crop.png crop9.png -tile 1x2 -geometry +0+0 crop10.png + + else + #--------------------------------------------------------- + # In case there is only the title, and caption and no logo + #--------------------------------------------------------- + cp crop.png crop10.png + + fi # end if on LOGO + +fi # end if on CAPTION + + +#------------------------------------------ +# Cut old title +#------------------------------------------ + +if ( $cut_title ); then + # cut the upper part of the image of the desired size to remove the old title: + convert crop10.png -crop +0+${cut_title_pixels} +repage crop10.png +fi + +#------------------------------------------ +# Add title +#------------------------------------------ + +if [[ -v TITLE ]] +then + # add the new title: + titsize_rescaled=$(( $titsize * $width_figure / 600)) + convert crop10.png -background white -pointsize $titsize_rescaled label:"$TITLE" +swap -gravity Center -append crop11.png + cp crop11.png crop10.png +fi + + +cp crop10.png $OUTFILE + +# remove all temporary files: +rm crop*.* diff --git a/bash/old/check_psl.out b/bash/old/check_psl.out new file mode 100644 index 0000000000000000000000000000000000000000..0f3a0a602a2695fbb2e15ecf88cad3df17d1bc97 --- /dev/null +++ b/bash/old/check_psl.out @@ -0,0 +1,455 @@ +psl_19810101.nc : has 21 leadtimes instead of 216 +psl_19810201.nc : has 51 members instead of 15 +psl_19810301.nc : ok +psl_19810401.nc : ok +psl_19810501.nc : there is no variable called number inside this file +psl_19810501.nc : has 49 leadtimes instead of 216 +psl_19810601.nc : ok +psl_19810701.nc : ok +psl_19810801.nc : has 51 members instead of 15 +psl_19810901.nc : ok +psl_19811001.nc : ok +psl_19811101.nc : there is no variable called number inside this file +psl_19811201.nc : ok +psl_19820101.nc : ok +psl_19820201.nc : has 51 members instead of 15 +psl_19820301.nc : ok +psl_19820401.nc : ok +psl_19820501.nc : there is no variable called number inside this file +psl_19820601.nc : ok +psl_19820701.nc : ok +psl_19820801.nc : has 51 members instead of 15 +psl_19820901.nc : ok +psl_19821001.nc : ok +psl_19821101.nc : there is no variable called number inside this file +psl_19821201.nc : there is no variable called number inside this file +psl_19830101.nc : ok +psl_19830201.nc : >>> missing file <<< +psl_19830301.nc : ok +psl_19830401.nc : ok +psl_19830501.nc : there is no variable called number inside this file +psl_19830501.nc : has 176 leadtimes instead of 216 +psl_19830601.nc : ok +psl_19830701.nc : ok +psl_19830801.nc : has 51 members instead of 15 +psl_19830901.nc : ok +psl_19831001.nc : ok +psl_19831101.nc : there is no variable called number inside this file +psl_19831201.nc : >>> missing file <<< +psl_19840101.nc : ok +psl_19840201.nc : has 51 members instead of 15 +psl_19840301.nc : ok +psl_19840401.nc : ok +psl_19840501.nc : there is no variable called number inside this file +psl_19840601.nc : ok +psl_19840701.nc : ok +psl_19840801.nc : has 51 members instead of 15 +psl_19840901.nc : ok +psl_19841001.nc : ok +psl_19841101.nc : there is no variable called number inside this file +psl_19841201.nc : there is no variable called number inside this file +psl_19850101.nc : ok +psl_19850201.nc : has 51 members instead of 15 +psl_19850301.nc : ok +psl_19850401.nc : ok +psl_19850501.nc : there is no variable called number inside this file +psl_19850501.nc : has 861 leadtimes instead of 216 +psl_19850601.nc : ok +psl_19850701.nc : ok +psl_19850801.nc : has 51 members instead of 15 +psl_19850901.nc : ok +psl_19851001.nc : ok +psl_19851101.nc : there is no variable called number inside this file +psl_19851201.nc : ok +psl_19860101.nc : ok +psl_19860201.nc : has 51 members instead of 15 +psl_19860301.nc : ok +psl_19860401.nc : ok +psl_19860501.nc : there is no variable called number inside this file +psl_19860601.nc : ok +psl_19860701.nc : ok +psl_19860801.nc : has 51 members instead of 15 +psl_19860901.nc : ok +psl_19861001.nc : ok +psl_19861101.nc : has 51 members instead of 15 +psl_19861201.nc : ok +psl_19870101.nc : ok +psl_19870201.nc : has 51 members instead of 15 +psl_19870301.nc : ok +psl_19870401.nc : ok +psl_19870501.nc : there is no variable called number inside this file +psl_19870601.nc : ok +psl_19870701.nc : ok +psl_19870801.nc : has 51 members instead of 15 +psl_19870901.nc : ok +psl_19871001.nc : ok +psl_19871101.nc : has 51 members instead of 15 +psl_19871201.nc : ok +psl_19880101.nc : ok +psl_19880201.nc : has 51 members instead of 15 +psl_19880301.nc : ok +psl_19880401.nc : ok +psl_19880501.nc : there is no variable called number inside this file +psl_19880501.nc : has 198 leadtimes instead of 216 +psl_19880601.nc : ok +psl_19880701.nc : ok +psl_19880801.nc : has 51 members instead of 15 +psl_19880901.nc : ok +psl_19881001.nc : ok +psl_19881101.nc : there is no variable called number inside this file +psl_19881201.nc : ok +psl_19890101.nc : ok +psl_19890201.nc : has 51 members instead of 15 +psl_19890301.nc : ok +psl_19890401.nc : ok +psl_19890501.nc : there is no variable called number inside this file +psl_19890601.nc : ok +psl_19890701.nc : ok +psl_19890801.nc : has 51 members instead of 15 +psl_19890901.nc : ok +psl_19891001.nc : ok +psl_19891101.nc : there is no variable called number inside this file +psl_19891201.nc : ok +psl_19900101.nc : ok +psl_19900201.nc : has 51 members instead of 15 +psl_19900301.nc : ok +psl_19900401.nc : ok +psl_19900501.nc : there is no variable called number inside this file +psl_19900501.nc : has 4 leadtimes instead of 216 +psl_19900601.nc : ok +psl_19900701.nc : ok +psl_19900801.nc : has 51 members instead of 15 +psl_19900901.nc : ok +psl_19901001.nc : ok +psl_19901101.nc : there is no variable called number inside this file +psl_19901201.nc : ok +psl_19910101.nc : ok +psl_19910201.nc : has 51 members instead of 15 +psl_19910301.nc : ok +psl_19910401.nc : ok +psl_19910501.nc : there is no variable called number inside this file +psl_19910601.nc : there is no variable called latitude inside this file +psl_19910601.nc : there is no variable called longitude inside this file +psl_19910601.nc : there is no variable called number inside this file +psl_19910601.nc : has 3240 leadtimes instead of 216 +psl_19910701.nc : there is no variable called latitude inside this file +psl_19910701.nc : there is no variable called longitude inside this file +psl_19910701.nc : there is no variable called number inside this file +psl_19910701.nc : has 3240 leadtimes instead of 216 +psl_19910801.nc : there is no variable called latitude inside this file +psl_19910801.nc : there is no variable called longitude inside this file +psl_19910801.nc : there is no variable called number inside this file +psl_19910801.nc : has 9628 leadtimes instead of 216 +psl_19910901.nc : there is no variable called latitude inside this file +psl_19910901.nc : there is no variable called longitude inside this file +psl_19910901.nc : there is no variable called number inside this file +psl_19910901.nc : has 2183 leadtimes instead of 216 +psl_19911001.nc : there is no variable called latitude inside this file +psl_19911001.nc : there is no variable called longitude inside this file +psl_19911001.nc : there is no variable called number inside this file +psl_19911001.nc : has 802 leadtimes instead of 216 +psl_19911101.nc : there is no variable called latitude inside this file +psl_19911101.nc : there is no variable called longitude inside this file +psl_19911101.nc : there is no variable called number inside this file +psl_19911101.nc : has 1000 leadtimes instead of 216 +psl_19911201.nc : there is no variable called latitude inside this file +psl_19911201.nc : there is no variable called longitude inside this file +psl_19911201.nc : there is no variable called number inside this file +psl_19911201.nc : has 951 leadtimes instead of 216 +psl_19920101.nc : ok +psl_19920201.nc : there is no variable called latitude inside this file +psl_19920201.nc : there is no variable called longitude inside this file +psl_19920201.nc : there is no variable called number inside this file +psl_19920201.nc : has 748 leadtimes instead of 216 +psl_19920301.nc : there is no variable called latitude inside this file +psl_19920301.nc : there is no variable called longitude inside this file +psl_19920301.nc : there is no variable called number inside this file +psl_19920301.nc : has 1693 leadtimes instead of 216 +psl_19920401.nc : there is no variable called latitude inside this file +psl_19920401.nc : there is no variable called longitude inside this file +psl_19920401.nc : there is no variable called number inside this file +psl_19920401.nc : has 705 leadtimes instead of 216 +psl_19920501.nc : there is no variable called number inside this file +psl_19920601.nc : ok +psl_19920701.nc : ok +psl_19920801.nc : has 51 members instead of 15 +psl_19920901.nc : ok +psl_19921001.nc : ok +psl_19921101.nc : there is no variable called number inside this file +psl_19921101.nc : has 56 leadtimes instead of 216 +psl_19921201.nc : ok +psl_19930101.nc : ok +psl_19930201.nc : has 51 members instead of 15 +psl_19930301.nc : ok +psl_19930401.nc : ok +psl_19930501.nc : there is no variable called number inside this file +psl_19930501.nc : has 2 leadtimes instead of 216 +psl_19930601.nc : ok +psl_19930701.nc : ok +psl_19930801.nc : has 51 members instead of 15 +psl_19930901.nc : ok +psl_19931001.nc : ok +psl_19931101.nc : there is no variable called number inside this file +psl_19931101.nc : has 65 leadtimes instead of 216 +psl_19931201.nc : ok +psl_19940101.nc : ok +psl_19940201.nc : has 51 members instead of 15 +psl_19940301.nc : ok +psl_19940401.nc : ok +psl_19940501.nc : there is no variable called number inside this file +psl_19940601.nc : ok +psl_19940701.nc : ok +psl_19940801.nc : has 51 members instead of 15 +psl_19940901.nc : has 130 leadtimes instead of 216 +psl_19941001.nc : ok +psl_19941101.nc : there is no variable called number inside this file +psl_19941101.nc : has 51 leadtimes instead of 216 +psl_19941201.nc : ok +psl_19950101.nc : ok +psl_19950201.nc : has 51 members instead of 15 +psl_19950301.nc : ok +psl_19950401.nc : ok +psl_19950501.nc : there is no variable called number inside this file +psl_19950601.nc : ok +psl_19950701.nc : ok +psl_19950801.nc : has 51 members instead of 15 +psl_19950901.nc : ok +psl_19951001.nc : ok +psl_19951101.nc : there is no variable called number inside this file +psl_19951201.nc : ok +psl_19960101.nc : ok +psl_19960201.nc : has 51 members instead of 15 +psl_19960301.nc : ok +psl_19960401.nc : ok +psl_19960501.nc : there is no variable called number inside this file +psl_19960601.nc : ok +psl_19960701.nc : ok +psl_19960801.nc : has 51 members instead of 15 +psl_19960901.nc : ok +psl_19961001.nc : there is no variable called number inside this file +psl_19961001.nc : has 109 leadtimes instead of 216 +psl_19961101.nc : there is no variable called number inside this file +psl_19961201.nc : ok +psl_19970101.nc : ok +psl_19970201.nc : has 51 members instead of 15 +psl_19970301.nc : ok +psl_19970401.nc : ok +psl_19970501.nc : has 51 members instead of 15 +psl_19970501.nc : has 180 leadtimes instead of 216 +psl_19970601.nc : ok +psl_19970701.nc : ok +psl_19970801.nc : has 51 members instead of 15 +psl_19970901.nc : ok +psl_19971001.nc : ok +psl_19971101.nc : there is no variable called number inside this file +psl_19971101.nc : has 49 leadtimes instead of 216 +psl_19971201.nc : has 142 leadtimes instead of 216 +psl_19980101.nc : ok +psl_19980201.nc : has 51 members instead of 15 +psl_19980301.nc : ok +psl_19980401.nc : ok +psl_19980501.nc : there is no variable called number inside this file +psl_19980501.nc : has 44 leadtimes instead of 216 +psl_19980601.nc : ok +psl_19980701.nc : ok +psl_19980801.nc : has 51 members instead of 15 +psl_19980901.nc : ok +psl_19981001.nc : ok +psl_19981101.nc : there is no variable called number inside this file +psl_19981201.nc : ok +psl_19990101.nc : ok +psl_19990201.nc : has 51 members instead of 15 +psl_19990301.nc : ok +psl_19990401.nc : ok +psl_19990501.nc : there is no variable called number inside this file +psl_19990501.nc : has 10 leadtimes instead of 216 +psl_19990601.nc : ok +psl_19990701.nc : ok +psl_19990801.nc : has 51 members instead of 15 +psl_19990901.nc : ok +psl_19991001.nc : ok +psl_19991101.nc : there is no variable called number inside this file +psl_19991201.nc : ok +psl_20000101.nc : ok +psl_20000201.nc : has 51 members instead of 15 +psl_20000301.nc : ok +psl_20000401.nc : ok +psl_20000501.nc : there is no variable called number inside this file +psl_20000501.nc : has 105 leadtimes instead of 216 +psl_20000601.nc : has 56 leadtimes instead of 216 +psl_20000701.nc : ok +psl_20000801.nc : has 51 members instead of 15 +psl_20000901.nc : ok +psl_20001001.nc : ok +psl_20001101.nc : there is no variable called number inside this file +psl_20001201.nc : ok +psl_20010101.nc : ok +psl_20010201.nc : has 51 members instead of 15 +psl_20010301.nc : ok +psl_20010401.nc : ok +psl_20010501.nc : there is no variable called number inside this file +psl_20010601.nc : ok +psl_20010701.nc : ok +psl_20010801.nc : has 51 members instead of 15 +psl_20010901.nc : ok +psl_20011001.nc : ok +psl_20011101.nc : there is no variable called number inside this file +psl_20011101.nc : has 10 leadtimes instead of 216 +psl_20011201.nc : ok +psl_20020101.nc : ok +psl_20020201.nc : has 51 members instead of 15 +psl_20020301.nc : ok +psl_20020401.nc : ok +psl_20020501.nc : there is no variable called number inside this file +psl_20020501.nc : has 153 leadtimes instead of 216 +psl_20020601.nc : ok +psl_20020701.nc : ok +psl_20020801.nc : has 51 members instead of 15 +psl_20020901.nc : ok +psl_20021001.nc : ok +psl_20021101.nc : there is no variable called number inside this file +psl_20021101.nc : has 163 leadtimes instead of 216 +psl_20021201.nc : ok +psl_20030101.nc : ok +psl_20030201.nc : has 51 members instead of 15 +psl_20030301.nc : ok +psl_20030401.nc : ok +psl_20030501.nc : there is no variable called number inside this file +psl_20030601.nc : ok +psl_20030701.nc : ok +psl_20030801.nc : has 51 members instead of 15 +psl_20030901.nc : ok +psl_20031001.nc : ok +psl_20031101.nc : there is no variable called number inside this file +psl_20031201.nc : ok +psl_20040101.nc : ok +psl_20040201.nc : has 51 members instead of 15 +psl_20040301.nc : ok +psl_20040401.nc : ok +psl_20040501.nc : there is no variable called number inside this file +psl_20040501.nc : has 7 leadtimes instead of 216 +psl_20040601.nc : has 28 leadtimes instead of 216 +psl_20040701.nc : ok +psl_20040801.nc : has 51 members instead of 15 +psl_20040901.nc : ok +psl_20041001.nc : ok +psl_20041101.nc : there is no variable called number inside this file +psl_20041201.nc : ok +psl_20050101.nc : ok +psl_20050201.nc : has 51 members instead of 15 +psl_20050301.nc : ok +psl_20050401.nc : ok +psl_20050501.nc : there is no variable called number inside this file +psl_20050601.nc : has 186 leadtimes instead of 216 +psl_20050701.nc : ok +psl_20050801.nc : has 51 members instead of 15 +psl_20050901.nc : ok +psl_20051001.nc : has 31 leadtimes instead of 216 +psl_20051101.nc : there is no variable called number inside this file +psl_20051101.nc : has 23 leadtimes instead of 216 +psl_20051201.nc : ok +psl_20060101.nc : ok +psl_20060201.nc : has 51 members instead of 15 +psl_20060301.nc : ok +psl_20060401.nc : has 40 leadtimes instead of 216 +psl_20060501.nc : there is no variable called number inside this file +psl_20060601.nc : ok +psl_20060701.nc : ok +psl_20060801.nc : has 51 members instead of 15 +psl_20060901.nc : ok +psl_20061001.nc : ok +psl_20061101.nc : there is no variable called number inside this file +psl_20061201.nc : ok +psl_20070101.nc : ok +psl_20070201.nc : has 51 members instead of 15 +psl_20070301.nc : ok +psl_20070401.nc : ok +psl_20070501.nc : there is no variable called number inside this file +psl_20070601.nc : ok +psl_20070701.nc : ok +psl_20070801.nc : has 51 members instead of 15 +psl_20070901.nc : ok +psl_20071001.nc : ok +psl_20071101.nc : there is no variable called number inside this file +psl_20071201.nc : ok +psl_20080101.nc : ok +psl_20080201.nc : has 51 members instead of 15 +psl_20080301.nc : ok +psl_20080401.nc : ok +psl_20080501.nc : there is no variable called number inside this file +psl_20080601.nc : ok +psl_20080701.nc : has 166 leadtimes instead of 216 +psl_20080801.nc : has 51 members instead of 15 +psl_20080901.nc : ok +psl_20081001.nc : ok +psl_20081101.nc : there is no variable called number inside this file +psl_20081201.nc : ok +psl_20090101.nc : ok +psl_20090201.nc : has 51 members instead of 15 +psl_20090301.nc : has 46 leadtimes instead of 216 +psl_20090401.nc : has 3 leadtimes instead of 216 +psl_20090501.nc : there is no variable called number inside this file +psl_20090601.nc : ok +psl_20090701.nc : ok +psl_20090801.nc : has 51 members instead of 15 +psl_20090901.nc : ok +psl_20091001.nc : ok +psl_20091101.nc : there is no variable called number inside this file +psl_20091201.nc : ok +psl_20100101.nc : ok +psl_20100201.nc : has 51 members instead of 15 +psl_20100301.nc : ok +psl_20100401.nc : ok +psl_20100501.nc : there is no variable called number inside this file +psl_20100601.nc : has 86 leadtimes instead of 216 +psl_20100701.nc : ok +psl_20100801.nc : has 51 members instead of 15 +psl_20100901.nc : ok +psl_20101001.nc : ok +psl_20101101.nc : there is no variable called number inside this file +psl_20101201.nc : there is no variable called number inside this file +psl_20110101.nc : ok +psl_20110201.nc : has 51 members instead of 15 +psl_20110301.nc : ok +psl_20110401.nc : ok +psl_20110501.nc : there is no variable called number inside this file +psl_20110601.nc : has 51 members instead of 15 +psl_20110601.nc : has 46 leadtimes instead of 216 +psl_20110701.nc : has 51 members instead of 15 +psl_20110701.nc : has 100 leadtimes instead of 216 +psl_20110801.nc : has 51 members instead of 15 +psl_20110801.nc : has 214 leadtimes instead of 216 +psl_20110901.nc : has 51 members instead of 15 +psl_20111001.nc : has 51 members instead of 15 +psl_20111101.nc : there is no variable called number inside this file +psl_20111201.nc : has 51 members instead of 15 +psl_20120101.nc : has 51 members instead of 15 +psl_20120201.nc : has 51 members instead of 15 +psl_20120201.nc : has 165 leadtimes instead of 216 +psl_20120301.nc : has 51 members instead of 15 +psl_20120401.nc : has 51 members instead of 15 +psl_20120501.nc : there is no variable called number inside this file +psl_20120601.nc : has 51 members instead of 15 +psl_20120601.nc : has 125 leadtimes instead of 216 +psl_20120701.nc : has 51 members instead of 15 +psl_20120801.nc : has 51 members instead of 15 +psl_20120901.nc : has 51 members instead of 15 +psl_20121001.nc : has 51 members instead of 15 +psl_20121001.nc : has 213 leadtimes instead of 216 +psl_20121101.nc : there is no variable called number inside this file +psl_20121201.nc : has 51 members instead of 15 +psl_20130101.nc : has 51 members instead of 15 +psl_20130201.nc : has 51 members instead of 15 +psl_20130301.nc : has 51 members instead of 15 +psl_20130301.nc : has 142 leadtimes instead of 216 +psl_20130401.nc : has 51 members instead of 15 +psl_20130501.nc : there is no variable called number inside this file +psl_20130601.nc : has 51 members instead of 15 +psl_20130601.nc : has 205 leadtimes instead of 216 +psl_20130701.nc : has 51 members instead of 15 +psl_20130801.nc : has 51 members instead of 15 +psl_20130901.nc : has 51 members instead of 15 +psl_20131001.nc : has 51 members instead of 15 +psl_20131101.nc : there is no variable called number inside this file +psl_20131101.nc : has 2 leadtimes instead of 216 +psl_20131201.nc : has 51 members instead of 15 diff --git a/bash/old/check_sfcWind.out b/bash/old/check_sfcWind.out new file mode 100644 index 0000000000000000000000000000000000000000..3093a1599af7fc1969002917f7831cd3e2ff29b2 --- /dev/null +++ b/bash/old/check_sfcWind.out @@ -0,0 +1,399 @@ +sfcWind_19810101.nc : >>> missing file <<< +sfcWind_19810201.nc : >>> missing file <<< +sfcWind_19810301.nc : >>> missing file <<< +sfcWind_19810401.nc : >>> missing file <<< +sfcWind_19810501.nc : there is no variable called time inside this file +sfcWind_19810601.nc : >>> missing file <<< +sfcWind_19810701.nc : >>> missing file <<< +sfcWind_19810801.nc : there is no variable called latitude inside this file +sfcWind_19810801.nc : there is no variable called longitude inside this file +sfcWind_19810801.nc : there is no variable called lev inside this file +sfcWind_19810801.nc : has 1 leadtimes instead of 214 +sfcWind_19810901.nc : >>> missing file <<< +sfcWind_19811001.nc : >>> missing file <<< +sfcWind_19811101.nc : ok +sfcWind_19811201.nc : >>> missing file <<< +sfcWind_19820101.nc : >>> missing file <<< +sfcWind_19820201.nc : >>> missing file <<< +sfcWind_19820301.nc : >>> missing file <<< +sfcWind_19820401.nc : >>> missing file <<< +sfcWind_19820501.nc : >>> missing file <<< +sfcWind_19820601.nc : >>> missing file <<< +sfcWind_19820701.nc : >>> missing file <<< +sfcWind_19820801.nc : >>> missing file <<< +sfcWind_19820901.nc : >>> missing file <<< +sfcWind_19821001.nc : >>> missing file <<< +sfcWind_19821101.nc : ok +sfcWind_19821201.nc : >>> missing file <<< +sfcWind_19830101.nc : >>> missing file <<< +sfcWind_19830201.nc : >>> missing file <<< +sfcWind_19830301.nc : >>> missing file <<< +sfcWind_19830401.nc : >>> missing file <<< +sfcWind_19830501.nc : >>> missing file <<< +sfcWind_19830601.nc : >>> missing file <<< +sfcWind_19830701.nc : >>> missing file <<< +sfcWind_19830801.nc : >>> missing file <<< +sfcWind_19830901.nc : >>> missing file <<< +sfcWind_19831001.nc : >>> missing file <<< +sfcWind_19831101.nc : ok +sfcWind_19831201.nc : >>> missing file <<< +sfcWind_19840101.nc : >>> missing file <<< +sfcWind_19840201.nc : >>> missing file <<< +sfcWind_19840301.nc : >>> missing file <<< +sfcWind_19840401.nc : >>> missing file <<< +sfcWind_19840501.nc : >>> missing file <<< +sfcWind_19840601.nc : >>> missing file <<< +sfcWind_19840701.nc : >>> missing file <<< +sfcWind_19840801.nc : >>> missing file <<< +sfcWind_19840901.nc : >>> missing file <<< +sfcWind_19841001.nc : >>> missing file <<< +sfcWind_19841101.nc : ok +sfcWind_19841201.nc : >>> missing file <<< +sfcWind_19850101.nc : >>> missing file <<< +sfcWind_19850201.nc : >>> missing file <<< +sfcWind_19850301.nc : >>> missing file <<< +sfcWind_19850401.nc : >>> missing file <<< +sfcWind_19850501.nc : >>> missing file <<< +sfcWind_19850601.nc : >>> missing file <<< +sfcWind_19850701.nc : >>> missing file <<< +sfcWind_19850801.nc : >>> missing file <<< +sfcWind_19850901.nc : >>> missing file <<< +sfcWind_19851001.nc : >>> missing file <<< +sfcWind_19851101.nc : ok +sfcWind_19851201.nc : >>> missing file <<< +sfcWind_19860101.nc : >>> missing file <<< +sfcWind_19860201.nc : >>> missing file <<< +sfcWind_19860301.nc : >>> missing file <<< +sfcWind_19860401.nc : >>> missing file <<< +sfcWind_19860501.nc : >>> missing file <<< +sfcWind_19860601.nc : >>> missing file <<< +sfcWind_19860701.nc : >>> missing file <<< +sfcWind_19860801.nc : >>> missing file <<< +sfcWind_19860901.nc : >>> missing file <<< +sfcWind_19861001.nc : >>> missing file <<< +sfcWind_19861101.nc : ok +sfcWind_19861201.nc : >>> missing file <<< +sfcWind_19870101.nc : >>> missing file <<< +sfcWind_19870201.nc : >>> missing file <<< +sfcWind_19870301.nc : >>> missing file <<< +sfcWind_19870401.nc : >>> missing file <<< +sfcWind_19870501.nc : >>> missing file <<< +sfcWind_19870601.nc : >>> missing file <<< +sfcWind_19870701.nc : >>> missing file <<< +sfcWind_19870801.nc : >>> missing file <<< +sfcWind_19870901.nc : >>> missing file <<< +sfcWind_19871001.nc : >>> missing file <<< +sfcWind_19871101.nc : ok +sfcWind_19871201.nc : >>> missing file <<< +sfcWind_19880101.nc : >>> missing file <<< +sfcWind_19880201.nc : >>> missing file <<< +sfcWind_19880301.nc : >>> missing file <<< +sfcWind_19880401.nc : >>> missing file <<< +sfcWind_19880501.nc : >>> missing file <<< +sfcWind_19880601.nc : >>> missing file <<< +sfcWind_19880701.nc : >>> missing file <<< +sfcWind_19880801.nc : >>> missing file <<< +sfcWind_19880901.nc : >>> missing file <<< +sfcWind_19881001.nc : >>> missing file <<< +sfcWind_19881101.nc : ok +sfcWind_19881201.nc : >>> missing file <<< +sfcWind_19890101.nc : >>> missing file <<< +sfcWind_19890201.nc : >>> missing file <<< +sfcWind_19890301.nc : >>> missing file <<< +sfcWind_19890401.nc : >>> missing file <<< +sfcWind_19890501.nc : >>> missing file <<< +sfcWind_19890601.nc : >>> missing file <<< +sfcWind_19890701.nc : >>> missing file <<< +sfcWind_19890801.nc : >>> missing file <<< +sfcWind_19890901.nc : >>> missing file <<< +sfcWind_19891001.nc : >>> missing file <<< +sfcWind_19891101.nc : ok +sfcWind_19891201.nc : >>> missing file <<< +sfcWind_19900101.nc : >>> missing file <<< +sfcWind_19900201.nc : >>> missing file <<< +sfcWind_19900301.nc : >>> missing file <<< +sfcWind_19900401.nc : >>> missing file <<< +sfcWind_19900501.nc : >>> missing file <<< +sfcWind_19900601.nc : >>> missing file <<< +sfcWind_19900701.nc : >>> missing file <<< +sfcWind_19900801.nc : >>> missing file <<< +sfcWind_19900901.nc : >>> missing file <<< +sfcWind_19901001.nc : >>> missing file <<< +sfcWind_19901101.nc : ok +sfcWind_19901201.nc : >>> missing file <<< +sfcWind_19910101.nc : >>> missing file <<< +sfcWind_19910201.nc : >>> missing file <<< +sfcWind_19910301.nc : >>> missing file <<< +sfcWind_19910401.nc : >>> missing file <<< +sfcWind_19910501.nc : >>> missing file <<< +sfcWind_19910601.nc : >>> missing file <<< +sfcWind_19910701.nc : >>> missing file <<< +sfcWind_19910801.nc : >>> missing file <<< +sfcWind_19910901.nc : >>> missing file <<< +sfcWind_19911001.nc : >>> missing file <<< +sfcWind_19911101.nc : ok +sfcWind_19911201.nc : >>> missing file <<< +sfcWind_19920101.nc : >>> missing file <<< +sfcWind_19920201.nc : >>> missing file <<< +sfcWind_19920301.nc : >>> missing file <<< +sfcWind_19920401.nc : >>> missing file <<< +sfcWind_19920501.nc : >>> missing file <<< +sfcWind_19920601.nc : >>> missing file <<< +sfcWind_19920701.nc : >>> missing file <<< +sfcWind_19920801.nc : >>> missing file <<< +sfcWind_19920901.nc : >>> missing file <<< +sfcWind_19921001.nc : >>> missing file <<< +sfcWind_19921101.nc : ok +sfcWind_19921201.nc : >>> missing file <<< +sfcWind_19930101.nc : >>> missing file <<< +sfcWind_19930201.nc : >>> missing file <<< +sfcWind_19930301.nc : >>> missing file <<< +sfcWind_19930401.nc : >>> missing file <<< +sfcWind_19930501.nc : >>> missing file <<< +sfcWind_19930601.nc : >>> missing file <<< +sfcWind_19930701.nc : >>> missing file <<< +sfcWind_19930801.nc : >>> missing file <<< +sfcWind_19930901.nc : >>> missing file <<< +sfcWind_19931001.nc : >>> missing file <<< +sfcWind_19931101.nc : ok +sfcWind_19931201.nc : >>> missing file <<< +sfcWind_19940101.nc : >>> missing file <<< +sfcWind_19940201.nc : >>> missing file <<< +sfcWind_19940301.nc : >>> missing file <<< +sfcWind_19940401.nc : >>> missing file <<< +sfcWind_19940501.nc : >>> missing file <<< +sfcWind_19940601.nc : >>> missing file <<< +sfcWind_19940701.nc : >>> missing file <<< +sfcWind_19940801.nc : >>> missing file <<< +sfcWind_19940901.nc : >>> missing file <<< +sfcWind_19941001.nc : >>> missing file <<< +sfcWind_19941101.nc : ok +sfcWind_19941201.nc : >>> missing file <<< +sfcWind_19950101.nc : >>> missing file <<< +sfcWind_19950201.nc : >>> missing file <<< +sfcWind_19950301.nc : >>> missing file <<< +sfcWind_19950401.nc : >>> missing file <<< +sfcWind_19950501.nc : >>> missing file <<< +sfcWind_19950601.nc : >>> missing file <<< +sfcWind_19950701.nc : >>> missing file <<< +sfcWind_19950801.nc : >>> missing file <<< +sfcWind_19950901.nc : >>> missing file <<< +sfcWind_19951001.nc : >>> missing file <<< +sfcWind_19951101.nc : ok +sfcWind_19951201.nc : >>> missing file <<< +sfcWind_19960101.nc : >>> missing file <<< +sfcWind_19960201.nc : >>> missing file <<< +sfcWind_19960301.nc : >>> missing file <<< +sfcWind_19960401.nc : >>> missing file <<< +sfcWind_19960501.nc : >>> missing file <<< +sfcWind_19960601.nc : >>> missing file <<< +sfcWind_19960701.nc : >>> missing file <<< +sfcWind_19960801.nc : >>> missing file <<< +sfcWind_19960901.nc : >>> missing file <<< +sfcWind_19961001.nc : >>> missing file <<< +sfcWind_19961101.nc : ok +sfcWind_19961201.nc : >>> missing file <<< +sfcWind_19970101.nc : >>> missing file <<< +sfcWind_19970201.nc : >>> missing file <<< +sfcWind_19970301.nc : >>> missing file <<< +sfcWind_19970401.nc : >>> missing file <<< +sfcWind_19970501.nc : >>> missing file <<< +sfcWind_19970601.nc : >>> missing file <<< +sfcWind_19970701.nc : >>> missing file <<< +sfcWind_19970801.nc : >>> missing file <<< +sfcWind_19970901.nc : >>> missing file <<< +sfcWind_19971001.nc : >>> missing file <<< +sfcWind_19971101.nc : ok +sfcWind_19971201.nc : >>> missing file <<< +sfcWind_19980101.nc : >>> missing file <<< +sfcWind_19980201.nc : >>> missing file <<< +sfcWind_19980301.nc : >>> missing file <<< +sfcWind_19980401.nc : >>> missing file <<< +sfcWind_19980501.nc : >>> missing file <<< +sfcWind_19980601.nc : >>> missing file <<< +sfcWind_19980701.nc : >>> missing file <<< +sfcWind_19980801.nc : >>> missing file <<< +sfcWind_19980901.nc : >>> missing file <<< +sfcWind_19981001.nc : >>> missing file <<< +sfcWind_19981101.nc : ok +sfcWind_19981201.nc : >>> missing file <<< +sfcWind_19990101.nc : >>> missing file <<< +sfcWind_19990201.nc : >>> missing file <<< +sfcWind_19990301.nc : >>> missing file <<< +sfcWind_19990401.nc : >>> missing file <<< +sfcWind_19990501.nc : >>> missing file <<< +sfcWind_19990601.nc : >>> missing file <<< +sfcWind_19990701.nc : >>> missing file <<< +sfcWind_19990801.nc : >>> missing file <<< +sfcWind_19990901.nc : >>> missing file <<< +sfcWind_19991001.nc : >>> missing file <<< +sfcWind_19991101.nc : ok +sfcWind_19991201.nc : >>> missing file <<< +sfcWind_20000101.nc : >>> missing file <<< +sfcWind_20000201.nc : >>> missing file <<< +sfcWind_20000301.nc : >>> missing file <<< +sfcWind_20000401.nc : >>> missing file <<< +sfcWind_20000501.nc : >>> missing file <<< +sfcWind_20000601.nc : >>> missing file <<< +sfcWind_20000701.nc : >>> missing file <<< +sfcWind_20000801.nc : >>> missing file <<< +sfcWind_20000901.nc : >>> missing file <<< +sfcWind_20001001.nc : >>> missing file <<< +sfcWind_20001101.nc : ok +sfcWind_20001201.nc : >>> missing file <<< +sfcWind_20010101.nc : >>> missing file <<< +sfcWind_20010201.nc : >>> missing file <<< +sfcWind_20010301.nc : >>> missing file <<< +sfcWind_20010401.nc : >>> missing file <<< +sfcWind_20010501.nc : >>> missing file <<< +sfcWind_20010601.nc : >>> missing file <<< +sfcWind_20010701.nc : >>> missing file <<< +sfcWind_20010801.nc : >>> missing file <<< +sfcWind_20010901.nc : >>> missing file <<< +sfcWind_20011001.nc : >>> missing file <<< +sfcWind_20011101.nc : ok +sfcWind_20011201.nc : >>> missing file <<< +sfcWind_20020101.nc : >>> missing file <<< +sfcWind_20020201.nc : >>> missing file <<< +sfcWind_20020301.nc : >>> missing file <<< +sfcWind_20020401.nc : >>> missing file <<< +sfcWind_20020501.nc : >>> missing file <<< +sfcWind_20020601.nc : >>> missing file <<< +sfcWind_20020701.nc : >>> missing file <<< +sfcWind_20020801.nc : >>> missing file <<< +sfcWind_20020901.nc : >>> missing file <<< +sfcWind_20021001.nc : >>> missing file <<< +sfcWind_20021101.nc : ok +sfcWind_20021201.nc : >>> missing file <<< +sfcWind_20030101.nc : >>> missing file <<< +sfcWind_20030201.nc : >>> missing file <<< +sfcWind_20030301.nc : >>> missing file <<< +sfcWind_20030401.nc : >>> missing file <<< +sfcWind_20030501.nc : >>> missing file <<< +sfcWind_20030601.nc : >>> missing file <<< +sfcWind_20030701.nc : >>> missing file <<< +sfcWind_20030801.nc : >>> missing file <<< +sfcWind_20030901.nc : >>> missing file <<< +sfcWind_20031001.nc : >>> missing file <<< +sfcWind_20031101.nc : ok +sfcWind_20031201.nc : >>> missing file <<< +sfcWind_20040101.nc : >>> missing file <<< +sfcWind_20040201.nc : >>> missing file <<< +sfcWind_20040301.nc : >>> missing file <<< +sfcWind_20040401.nc : >>> missing file <<< +sfcWind_20040501.nc : >>> missing file <<< +sfcWind_20040601.nc : >>> missing file <<< +sfcWind_20040701.nc : >>> missing file <<< +sfcWind_20040801.nc : >>> missing file <<< +sfcWind_20040901.nc : >>> missing file <<< +sfcWind_20041001.nc : >>> missing file <<< +sfcWind_20041101.nc : ok +sfcWind_20041201.nc : >>> missing file <<< +sfcWind_20050101.nc : >>> missing file <<< +sfcWind_20050201.nc : >>> missing file <<< +sfcWind_20050301.nc : >>> missing file <<< +sfcWind_20050401.nc : >>> missing file <<< +sfcWind_20050501.nc : >>> missing file <<< +sfcWind_20050601.nc : >>> missing file <<< +sfcWind_20050701.nc : >>> missing file <<< +sfcWind_20050801.nc : >>> missing file <<< +sfcWind_20050901.nc : >>> missing file <<< +sfcWind_20051001.nc : >>> missing file <<< +sfcWind_20051101.nc : ok +sfcWind_20051201.nc : >>> missing file <<< +sfcWind_20060101.nc : >>> missing file <<< +sfcWind_20060201.nc : >>> missing file <<< +sfcWind_20060301.nc : >>> missing file <<< +sfcWind_20060401.nc : >>> missing file <<< +sfcWind_20060501.nc : >>> missing file <<< +sfcWind_20060601.nc : >>> missing file <<< +sfcWind_20060701.nc : >>> missing file <<< +sfcWind_20060801.nc : >>> missing file <<< +sfcWind_20060901.nc : >>> missing file <<< +sfcWind_20061001.nc : >>> missing file <<< +sfcWind_20061101.nc : ok +sfcWind_20061201.nc : >>> missing file <<< +sfcWind_20070101.nc : >>> missing file <<< +sfcWind_20070201.nc : >>> missing file <<< +sfcWind_20070301.nc : >>> missing file <<< +sfcWind_20070401.nc : >>> missing file <<< +sfcWind_20070501.nc : >>> missing file <<< +sfcWind_20070601.nc : >>> missing file <<< +sfcWind_20070701.nc : >>> missing file <<< +sfcWind_20070801.nc : >>> missing file <<< +sfcWind_20070901.nc : >>> missing file <<< +sfcWind_20071001.nc : >>> missing file <<< +sfcWind_20071101.nc : ok +sfcWind_20071201.nc : >>> missing file <<< +sfcWind_20080101.nc : >>> missing file <<< +sfcWind_20080201.nc : >>> missing file <<< +sfcWind_20080301.nc : >>> missing file <<< +sfcWind_20080401.nc : >>> missing file <<< +sfcWind_20080501.nc : >>> missing file <<< +sfcWind_20080601.nc : >>> missing file <<< +sfcWind_20080701.nc : >>> missing file <<< +sfcWind_20080801.nc : >>> missing file <<< +sfcWind_20080901.nc : >>> missing file <<< +sfcWind_20081001.nc : >>> missing file <<< +sfcWind_20081101.nc : ok +sfcWind_20081201.nc : >>> missing file <<< +sfcWind_20090101.nc : >>> missing file <<< +sfcWind_20090201.nc : >>> missing file <<< +sfcWind_20090301.nc : >>> missing file <<< +sfcWind_20090401.nc : >>> missing file <<< +sfcWind_20090501.nc : >>> missing file <<< +sfcWind_20090601.nc : >>> missing file <<< +sfcWind_20090701.nc : >>> missing file <<< +sfcWind_20090801.nc : >>> missing file <<< +sfcWind_20090901.nc : >>> missing file <<< +sfcWind_20091001.nc : >>> missing file <<< +sfcWind_20091101.nc : ok +sfcWind_20091201.nc : >>> missing file <<< +sfcWind_20100101.nc : >>> missing file <<< +sfcWind_20100201.nc : >>> missing file <<< +sfcWind_20100301.nc : >>> missing file <<< +sfcWind_20100401.nc : >>> missing file <<< +sfcWind_20100501.nc : >>> missing file <<< +sfcWind_20100601.nc : >>> missing file <<< +sfcWind_20100701.nc : >>> missing file <<< +sfcWind_20100801.nc : >>> missing file <<< +sfcWind_20100901.nc : >>> missing file <<< +sfcWind_20101001.nc : >>> missing file <<< +sfcWind_20101101.nc : ok +sfcWind_20101201.nc : >>> missing file <<< +sfcWind_20110101.nc : >>> missing file <<< +sfcWind_20110201.nc : >>> missing file <<< +sfcWind_20110301.nc : >>> missing file <<< +sfcWind_20110401.nc : >>> missing file <<< +sfcWind_20110501.nc : >>> missing file <<< +sfcWind_20110601.nc : >>> missing file <<< +sfcWind_20110701.nc : >>> missing file <<< +sfcWind_20110801.nc : >>> missing file <<< +sfcWind_20110901.nc : >>> missing file <<< +sfcWind_20111001.nc : >>> missing file <<< +sfcWind_20111101.nc : ok +sfcWind_20111201.nc : >>> missing file <<< +sfcWind_20120101.nc : >>> missing file <<< +sfcWind_20120201.nc : >>> missing file <<< +sfcWind_20120301.nc : >>> missing file <<< +sfcWind_20120401.nc : >>> missing file <<< +sfcWind_20120501.nc : >>> missing file <<< +sfcWind_20120601.nc : >>> missing file <<< +sfcWind_20120701.nc : >>> missing file <<< +sfcWind_20120801.nc : >>> missing file <<< +sfcWind_20120901.nc : >>> missing file <<< +sfcWind_20121001.nc : >>> missing file <<< +sfcWind_20121101.nc : ok +sfcWind_20121201.nc : >>> missing file <<< +sfcWind_20130101.nc : >>> missing file <<< +sfcWind_20130201.nc : >>> missing file <<< +sfcWind_20130301.nc : >>> missing file <<< +sfcWind_20130401.nc : >>> missing file <<< +sfcWind_20130501.nc : >>> missing file <<< +sfcWind_20130601.nc : >>> missing file <<< +sfcWind_20130701.nc : >>> missing file <<< +sfcWind_20130801.nc : >>> missing file <<< +sfcWind_20130901.nc : >>> missing file <<< +sfcWind_20131001.nc : >>> missing file <<< +sfcWind_20131101.nc : has 212 leadtimes instead of 214 +sfcWind_20131201.nc : >>> missing file <<< diff --git a/bash/old/checking_S4.sh b/bash/old/checking_S4.sh new file mode 100755 index 0000000000000000000000000000000000000000..3dfe39b539c6eacfa9957a9b8e669bc265c9d10d --- /dev/null +++ b/bash/old/checking_S4.sh @@ -0,0 +1,174 @@ +#!/bin/bash + +var=psl #sfcWind # name of the variable to check +path=/esnas/exp/ecmwf/system4_m1/daily_mean/${var}_f6h/ # its path +suffix=01 # this suffix is usually present in all S4 netCDF file, right begore the file extension +ext=.nc # file extension + +yearStart=1981 # time period to check inside the files +yearEnd=2015 + +nameLatitude=latitude # name of the latitude dimension to check in the file +nLatitude=181 #256 #181 # number of latitude values to check + +nameLongitude=longitude # name of the longitude dimension to check in the file +nLongitude=360 #512 #360 # number of longitude values to check + +nameMembers=ensemble #lev #number # name of the dimension with the model members to check in the file +nMembers=15 #51 #15 # number of members to check in the file + +nameLeadtimes=time # name of the dimension with the forecast time to check in the file +nLeadtimes=216 #214 #216 # number of forecast times to check in the file + +# function that detect the size of a dimension variable in a netCDF file: +# +# usage: get_size myNetCDF.nc myDimensionName +# +function get_size { + ncks -m ${1} | grep -E -i ": ${2}, size =" | cut -d ' ' -f 7 | uniq +} + +# check file existence: +for ((i=$yearStart;i<=$yearEnd;i++)) +do +for j in 01 02 03 04 05 06 07 08 09 10 11 12 +do + ok1=0; ok2=0; ok3=0; ok4=0 + + #echo $path${var}_$i$j$suffix$ext + if [ ! -f $path${var}_$i$j$suffix$ext ] + then + echo ${var}_$i$j$suffix$ext ": >>>>> Missing file <<<<<" + else + nMemb=`get_size $path${var}_$i$j$suffix$ext $nameMembers` + nLead=`get_size $path${var}_$i$j$suffix$ext $nameLeadtimes` + nLat=`get_size $path${var}_$i$j$suffix$ext $nameLatitude` + nLon=`get_size $path${var}_$i$j$suffix$ext $nameLongitude` + + if [ -z $nLat ]; then # check if nLat is empty + echo ${var}_$i$j$suffix$ext ": there is no dimension called " $nameLatitude " inside this file " + elif [ ${nLat//[[:blank:]]/} -ne $nLatitude ]; then + echo ${var}_$i$j$suffix$ext ": " $nameLatitude " dimension has " $nLat " values instead of " $nLatitude + else + ok1=1 + fi + + if [ -z $nLon ]; then # check if nLon is empty + echo ${var}_$i$j$suffix$ext ": there is no dimension called " $nameLongitude " inside this file " + elif [ ${nLon//[[:blank:]]/} -ne $nLongitude ]; then + echo ${var}_$i$j$suffix$ext ": " $nameLongitude "dimension has " $nLon " values instead of " $nLongitude + else + ok2=1 + fi + + if [ -z $nMemb ]; then # check if nMemb is empty + echo ${var}_$i$j$suffix$ext ": there is no dimension called " $nameMembers " inside this file " + elif [ ${nMemb//[[:blank:]]/} -ne $nMembers ]; then + echo ${var}_$i$j$suffix$ext ": " $nameMembers " dimension has " $nMemb " values instead of " $nMembers + else + ok3=1 + fi + + if [ -z $nLead ]; then + echo ${var}_$i$j$suffix$ext ": there is no dimension called " $nameLeadtimes " inside this file " + elif [ ${nLead//[[:blank:]]/} -ne $nLeadtimes ]; then + echo ${var}_$i$j$suffix$ext ": " $nameLeadtimes " dimension has " $nLead " values instead of " $nLeadtimes + else + ok4=1 + fi + + if [ $ok1 -eq 1 ] && [ $ok2 -eq 1 ] && [ $ok3 -eq 1 ] && [ $ok4 -eq 1 ]; then + echo ${var}_$i$j$suffix$ext ": ok" + fi + fi +done +done + + +# # other common changes to netCDF: + +# for file in *; ncrename -d . +# # convert a 6-horly file to a daily one: +# cdo daymean + +# cdo showdate + +# convert a .nc file of ECMWF S4 after downloading: +#file=_grib2netcdf-atls01-95e2cf....nc ; ncrename -v .msl,psl -d .number,ensemble -v .number,realization $file ; date=$(cdo showdate $file | cut -f3 -d" " | sed -e s/-//g) ; mv $file /esnas/exp/ecmwf/system4_m1/6hourly/psl/psl_${date}.nc ; cdo daymean /esnas/exp/ecmwf/system4_m1/6hourly/psl/psl_${date}.nc /esnas/exp/ecmwf/system4_m1/daily_mean/psl_f6h/psl_${date}.nc ; cdo monmean /esnas/exp/ecmwf/system4_m1/daily_mean/psl_f6h/psl_${date}.nc /esnas/exp/ecmwf/system4_m1/monthly_mean/psl_f6h/psl_${date}.nc + +#file=_grib2netcdf-atls01-95e2cf....nc +#ncrename -v .msl,psl -d .number,ensemble -v .number,realization $file +#date=$(cdo showdate $file | cut -f3 -d" " | sed -e s/-//g) +#mv $file /esnas/exp/ecmwf/system4_m1/6hourly/psl/psl_${date}.nc +#cdo daymean /esnas/exp/ecmwf/system4_m1/6hourly/psl/psl_${date}.nc /esnas/exp/ecmwf/system4_m1/daily_mean/psl_f6h/psl_${date}.nc +#cdo monmean /esnas/exp/ecmwf/system4_m1/daily_mean/psl_f6h/psl_${date}.nc /esnas/exp/ecmwf/system4_m1/monthly_mean/psl_f6h/psl_${date}.nc + +# psl_19810101.nc +# psl_19810501.nc +# psl_19830501.nc +# psl_19850501.nc +# psl_19880501.nc +# psl_19900501.nc +# psl_19910601.nc +# psl_19910701.nc +# psl_19910801.nc +# psl_19910901.nc +# psl_19911001.nc +# psl_19911101.nc +# psl_19911201.nc +# psl_19920201.nc +# psl_19920301.nc +# psl_19920401.nc +# psl_19921101.nc +# psl_19930501.nc +# psl_19931101.nc +# psl_19940901.nc +# psl_19941101.nc +# psl_19961001.nc +# psl_19970501.nc +# psl_19971101.nc +# psl_19971201.nc +# psl_19980501.nc +# psl_19990501.nc +# psl_20000501.nc +# psl_20000601.nc +# psl_20011101.nc +# psl_20020501.nc +# psl_20021101.nc +# psl_20050601.nc +# psl_20051001.nc +# psl_20051101.nc + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/bash/old/checking_S4.sh~ b/bash/old/checking_S4.sh~ new file mode 100755 index 0000000000000000000000000000000000000000..10f8d1fcdd97a51a224e8e1ab599984dfea2a886 --- /dev/null +++ b/bash/old/checking_S4.sh~ @@ -0,0 +1,174 @@ +#!/bin/bash + +var=psl #sfcWind #psl +path=/esnas/exp/ecmwf/system4_m1/daily_mean/${var}_f6h/ +suffix=01 +ext=.nc + +yearStart=1981 # time period to check in the files +yearEnd=2015 + +nameLatitude=latitude # name of the latitude dimension to check in the file +nLatitude=181 #256 #181 # number of latitude values to check + +nameLongitude=longitude # name of the longitude dimension to check in the file +nLongitude=360 #512 #360 # number of longitude values to check + +nameMembers=ensemble #lev #number # name of the dimension with the model members to check in the file +nMembers=15 #51 #15 # number of members to check in the file + +nameLeadtimes=time # name of the dimension with the forecast time to check in the file +nLeadtimes=216 #214 #216 # number of forecast times to check in the file + +# function that detect the size of a dimension variable in a netCDF file: +# +# usage: get_size myNetCDF.nc myDimensionName +# +function get_size { + ncks -m ${1} | grep -E -i ": ${2}, size =" | cut -d ' ' -f 7 | uniq +} + +# check file existence: +for ((i=$yearStart;i<=$yearEnd;i++)) +do +for j in 01 02 03 04 05 06 07 08 09 10 11 12 +do + ok1=0; ok2=0; ok3=0; ok4=0 + + #echo $path${var}_$i$j$suffix$ext + if [ ! -f $path${var}_$i$j$suffix$ext ] + then + echo ${var}_$i$j$suffix$ext ": >>>>> Missing file <<<<<" + else + nMemb=`get_size $path${var}_$i$j$suffix$ext $nameMembers` + nLead=`get_size $path${var}_$i$j$suffix$ext $nameLeadtimes` + nLat=`get_size $path${var}_$i$j$suffix$ext $nameLatitude` + nLon=`get_size $path${var}_$i$j$suffix$ext $nameLongitude` + + if [ -z $nLat ]; then # check if nLat is empty + echo ${var}_$i$j$suffix$ext ": there is no dimension called " $nameLatitude " inside this file " + elif [ ${nLat//[[:blank:]]/} -ne $nLatitude ]; then + echo ${var}_$i$j$suffix$ext ": " $nameLatitude " dimension has " $nLat " values instead of " $nLatitude + else + ok1=1 + fi + + if [ -z $nLon ]; then # check if nLon is empty + echo ${var}_$i$j$suffix$ext ": there is no dimension called " $nameLongitude " inside this file " + elif [ ${nLon//[[:blank:]]/} -ne $nLongitude ]; then + echo ${var}_$i$j$suffix$ext ": " $nameLongitude "dimension has " $nLon " values instead of " $nLongitude + else + ok2=1 + fi + + if [ -z $nMemb ]; then # check if nMemb is empty + echo ${var}_$i$j$suffix$ext ": there is no dimension called " $nameMembers " inside this file " + elif [ ${nMemb//[[:blank:]]/} -ne $nMembers ]; then + echo ${var}_$i$j$suffix$ext ": " $nameMembers " dimension has " $nMemb " values instead of " $nMembers + else + ok3=1 + fi + + if [ -z $nLead ]; then + echo ${var}_$i$j$suffix$ext ": there is no dimension called " $nameLeadtimes " inside this file " + elif [ ${nLead//[[:blank:]]/} -ne $nLeadtimes ]; then + echo ${var}_$i$j$suffix$ext ": " $nameLeadtimes " dimension has " $nLead " values instead of " $nLeadtimes + else + ok4=1 + fi + + if [ $ok1 -eq 1 ] && [ $ok2 -eq 1 ] && [ $ok3 -eq 1 ] && [ $ok4 -eq 1 ]; then + echo ${var}_$i$j$suffix$ext ": ok" + fi + fi +done +done + + +# # other common changes to netCDF: + +# for file in *; ncrename -d . +# # convert a 6-horly file to a daily one: +# cdo daymean + +# cdo showdate + +# convert a .nc file of ECMWF S4 after downloading: +#file=_grib2netcdf-atls01-95e2cf....nc ; ncrename -v .msl,psl -d .number,ensemble -v .number,realization $file ; date=$(cdo showdate $file | cut -f3 -d" " | sed -e s/-//g) ; mv $file /esnas/exp/ecmwf/system4_m1/6hourly/psl/psl_${date}.nc ; cdo daymean /esnas/exp/ecmwf/system4_m1/6hourly/psl/psl_${date}.nc /esnas/exp/ecmwf/system4_m1/daily_mean/psl_f6h/psl_${date}.nc ; cdo monmean /esnas/exp/ecmwf/system4_m1/daily_mean/psl_f6h/psl_${date}.nc /esnas/exp/ecmwf/system4_m1/monthly_mean/psl_f6h/psl_${date}.nc + +#file=_grib2netcdf-atls01-95e2cf....nc +#ncrename -v .msl,psl -d .number,ensemble -v .number,realization $file +#date=$(cdo showdate $file | cut -f3 -d" " | sed -e s/-//g) +#mv $file /esnas/exp/ecmwf/system4_m1/6hourly/psl/psl_${date}.nc +#cdo daymean /esnas/exp/ecmwf/system4_m1/6hourly/psl/psl_${date}.nc /esnas/exp/ecmwf/system4_m1/daily_mean/psl_f6h/psl_${date}.nc +#cdo monmean /esnas/exp/ecmwf/system4_m1/daily_mean/psl_f6h/psl_${date}.nc /esnas/exp/ecmwf/system4_m1/monthly_mean/psl_f6h/psl_${date}.nc + +# psl_19810101.nc +# psl_19810501.nc +# psl_19830501.nc +# psl_19850501.nc +# psl_19880501.nc +# psl_19900501.nc +# psl_19910601.nc +# psl_19910701.nc +# psl_19910801.nc +# psl_19910901.nc +# psl_19911001.nc +# psl_19911101.nc +# psl_19911201.nc +# psl_19920201.nc +# psl_19920301.nc +# psl_19920401.nc +# psl_19921101.nc +# psl_19930501.nc +# psl_19931101.nc +# psl_19940901.nc +# psl_19941101.nc +# psl_19961001.nc +# psl_19970501.nc +# psl_19971101.nc +# psl_19971201.nc +# psl_19980501.nc +# psl_19990501.nc +# psl_20000501.nc +# psl_20000601.nc +# psl_20011101.nc +# psl_20020501.nc +# psl_20021101.nc +# psl_20050601.nc +# psl_20051001.nc +# psl_20051101.nc + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/dem/elevation_512x256.nc b/dem/elevation_512x256.nc new file mode 100755 index 0000000000000000000000000000000000000000..b8502be666df754544c463a4587f53adf178d0ab Binary files /dev/null and b/dem/elevation_512x256.nc differ diff --git a/doo_young/PlotRD.dy.MME4.R b/doo_young/PlotRD.dy.MME4.R new file mode 100644 index 0000000000000000000000000000000000000000..e883e4755b0787b1bda920e96a2745678815ae91 --- /dev/null +++ b/doo_young/PlotRD.dy.MME4.R @@ -0,0 +1,153 @@ +PlotRD<-function(rel_diag,nbins=10,consbars=F,tit=NULL,colLine=NULL,colBar=NULL,marHist=T,hist_ylim=NULL,Lg=NULL) { + +print("Plot") +# rel_diag<-rd +# nbins=10 +# consbars=T +# colLine=col_line +# colBar=col_bar +# tit=tit1 +# marHist=T +# hist_ylim=c(0,100) +# x11(width=12,height=10) + # x11() + # PLOT OF THE RELIABILITY DIAGRAM + # + ###################################################################################### + # rd: a list with the reliability diagrams that will be represented in the same plot + # cons.bars : if the consistency bar must be represented or not. + # nbins : number of equidistant points used to compute the reliability diagram (optional) + # tit: the title of the plot (optional) + # brierScores: The brier score linked to the reliability diagram (optional) + # marHist: Whether to plot the small refinement histogram is showed + ##################################################################################### + + # Some parameters are defined + nrd<-length(rel_diag) # nrd = 5, 4 models + mme + rg<-list() + # Check the dimensions of the rank histogram + for (i in 1:nrd){ + if (dim(rel_diag[[i]])[1]!=nbins){ + stop ('The bins of the reliability diagram must be the same that nbins') + } + rg[[i]]<-range(rel_diag[[i]]$hist.counts)# check the range of the histograms + } + if (is.null(hist_ylim)){ + rgH<-range(rg) +#print(rgH) + }else{ + rgH<-hist_ylim + } + + + ########################################## + # reliability plot + # par(mar=c(5,3,2,2)+0.1) + ########################################## + + layout(matrix(c(rep(1,nrd),seq(2,(nrd+1))),nrd,2,byrow=F),width=c(5,2)) + par(oma=c(2.5,4,5,1)) + #layout.show(a) + + # The axis are defined + # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + #x11(width=12,height=10) +# old.par <- par(no.readonly = TRUE) +#print(old.par) +# on.exit(par(old.par)) +# par(mar=c(5,5,5,0)) + old.par <- par(mar=c(5,5,5,0)) + on.exit(par(old.par)) + + plot(NULL, xlim = c(0,1), ylim = c(0,1),axes=F, xlab='', ylab='') + + axis(1, at=seq(0,1,by=0.1),labels=seq(0,1,by=0.1),cex.axis=2.0) + title(xlab= "Forecast probability",line=3.9,cex.lab=2.0) + + axis(2, at=seq(0,1,by=0.1), labels=seq(0,1,by=0.1), las=2,cex.axis=2.0) + #axis(2, at=seq(0,1,by=0.1), labels=seq(0,1,by=0.1), cex.axis=2.0) + box() + title(ylab= "Observed relative frequency", line=0.2,cex.lab=2.0,outer=T) + if(is.null(tit)==F){ +# title(tit,cex.main=4,outer=T,line=-1) +# title(tit,cex.main=2.0,outer=T,line=-4) + title(tit,cex.main=2.0,outer=T,line=-3) + } + + # Legend + # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + yloc <- c(1.0, 0.95, 0.90, 0.85, 0.80) + legend(0.,yloc[1], legend=Lg[[1]], fill=colLine[[1]], bty="n", cex=1.2) + legend(0.,yloc[2], legend=Lg[[2]], fill=colLine[[2]], bty="n", cex=1.2) + legend(0.,yloc[3], legend=Lg[[3]], fill=colLine[[3]], bty="n", cex=1.2) + legend(0.,yloc[4], legend=Lg[[4]], fill=colLine[[4]], bty="n", cex=1.2) + legend(0.,yloc[5], legend=Lg[[5]], fill=colLine[[5]], bty="n", cex=1.2) +# legend("topleft", "(x,y)", pch = 1, title = "topleft, inset = .05", inset = .05) + + # No resolution and No skill lines + # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + no_res <- sum(rel_diag[[1]]$obs.counts)/sum(rel_diag[[1]]$hist.counts) +# vt_res <- sum(rel_diag[[1]]$for.prob)/sum(rel_diag[[1]]$hist.counts) +#print(paste("no_res = ",no_res)) + numb <- c(seq(0,1,by=0.1)) +#print(numb) + no_skill <- (numb+no_res)/2. +#print(no_skill) + +# diagonal line + lines(c(0,1), c(0,1), lty=1) +# no_resolution line + lines(c(0,1), c(no_res,no_res), col="gray", lty=3) + lines(c(1/3,1/3), c(0,1), col="gray", lty=3) +# lines(c(vt_res,vt_res), c(0,1), col="gray", lty=3) +# lines(c(no_res,no_res), c(0,1), col="gray", lty=3) +# no_skill line + lines(c(0,1), c(no_skill[1],no_skill[11]), col="black", lty=3) + + + # Consistency bars + # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + HI<-matrix(NA,nrow=nrd,ncol=length(rel_diag[[1]]$hist.counts)) + + for (j in 1:nrd){ # nrd = 5: 4 models + mme + HI[j,]<-rel_diag[[j]]$hist.counts + if (consbars==T){ + # The lower limit of consistency bar i and the upper limit are combined in one list + consBars<-list() + consBars[[j]]<-abind(InsertDim(rel_diag[[j]]$cbar.lo,1,1),InsertDim(rel_diag[[j]]$cbar.hi,1,1),along=1) + + # plot consistency bars + for (i in 1:nbins){ # nbins = 10 bins +# lines(rep(rel_diag[[j]]$p.avgs[i], 2), consBars[[j]][, i], col=colBar[j], lwd=3) + lines(rep(rel_diag[[j]]$p.avgs[i], 2), consBars[[j]][, i], col=colBar[j], lwd=2) # lwd: line width + } + } + +# see plot: "p" for points, "l" for lines, "b" for both points and lines, "c" for empty points joined by lines, "o" for overplotted points and lines, "s" and "S" for stair steps and "h" for histogram-like vertical lines. Finally, "n" does not produce any points or lines. + points(rel_diag[[j]]$p.avgs, rel_diag[[j]]$cond.probs, type="b", pch=1 , col =colLine[[j]], cex=2.0 , lwd=3) + + } + + + # Number of forecasts + # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + if (marHist==TRUE){ + + for (i in 1:nrd){ +# par(mar=c(5,0,5,12)) +# par(mar=c(1.5,1.5,6,7)) # in case num of sharpness diagram is 3 or 4 + par(mar=c(0.5,1.5,5,7)) # in case num of sharpness diagram is 5 + barplot(HI[i,]/10000, beside=T,space=c(0,1.2),axes = F, axis.lty=F, axisnames = F, col = colLine[[i]], ylim=rgH/10000) +# axis(1, at=seq(0,1,by=0.1),labels=seq(0,1,by=0.1),cex.axis=1.5) + title(main = "# of forecasts (x10⁴)", font.main = 1.0, line=0.5) +# grid(1,5,col='#525252') + axis(4,cex.axis=1.0) + box(bg='grey') + } + #pp<- par("plt") + #par("plt" = c(pp[2] - 0.14 , pp[2], pp[3], pp[3]+ 0.15) ) + #par(new = TRUE) + } + +} + diff --git a/doo_young/reliability_diagrams.dy_4models.fig.v2.MME4.R b/doo_young/reliability_diagrams.dy_4models.fig.v2.MME4.R new file mode 100644 index 0000000000000000000000000000000000000000..a439a8784ebfa2549cc3dab8cbd78135300db02a --- /dev/null +++ b/doo_young/reliability_diagrams.dy_4models.fig.v2.MME4.R @@ -0,0 +1,158 @@ +#clear workspace +rm(list=ls()) +gc() +#________________________________________________________________________________________________________ +# +# Reliability Diagram +#________________________________________________________________________________________________________ + +#load sources and libraries +library(s2dverification) +library(SpecsVerification) +library(ncdf) +library(statmod) +library(maps) +library(mapdata) +library(parallel) +library(doMC) +library(psych) +library(RColorBrewer) +library(MASS) +library(maptools) #for shapefiles +library(scales) #for transparency +library(abind) +library(TeachingDemos) +#source('~/R/scripts/functions_s2dverification/PlotEquiMapV.R') +source('../R/ReliabilityDiagramHist.R') +source('../R/PlotRD.dy.MME4.R') + +#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +# Select parameters +# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +variable<-'sfcwind' # variable +var_name<-'10m Wind Speed' +#variable<-'tas' # variable +#var_name<-'2m Temperature' +s1<-1991 # initial start date +s2<-2013 # final start date +s3<-s2-1 # final start date of the hindcast +sea<-1 # number of season (in targ) +# Lead times +ltmin<-2 +ltmax<-4 +# Reanalysis +nrls <- 1 +ranl <- list('wERAint','wJRA55') # exp: data of experiments +rinp <- list('ERA_Int','JRA_55') # exp: data of experiments + +# Season: 1:DJF, 2:JJA, 3:MAM, 4:SON +sea<-4 +targ <- list('DJF','JJA','MAM','SON') # targ: forecast season +fsti <- list('November','May','February','August')# fcst: start month of the forecast + +# Regions +nrg <- 10 +rgn <- list('GL','NH','SH','NA','SA','AU','EA','EU','TR','AF') +region<-list("Globe","Northern H.","Southern H.","North Am.","South Am.","Australia","East Asia","Europe","Tropics","Africa") +area1<-c(0,360,-90,90) +area2<-c(0,360,20,90) +area3<-c(0,360,-90,-20) +area4<-c(190,310,10,75) +area5<-c(270,330,-60,10) +area6<-c(110,180,-50,0) +area7<-c(90,150,20,50) +area8<-c(-15,40,35,75) +area9<-c(0,360,-20,20) +area10<-c(-20,55,-35,40) +range <- list(area1,area2,area3,area4,area5,area6,area7,area8,area9,area10) +######################################################################### +for (js in 1:nrls){ # 2 Reanalysis +#for (js in 1){ +print(paste("Reanalysis = ", rinp[[js]])) + +for (is in 1:sea){ # 4 seasons +#for (is in 1){ +print(paste("season = ", targ[[is]])) + +for (ms in 1:nrg){ # 10 Regions +#for (ms in 1){ +lonlatbox=range[[ms]] +print(rgn[[ms]]) +######################################################################### +# Colours of the rank histogram +#col_line<-c('#137AE1',"#feb24c","#FF0000") +col_line<-c("#FF0000","#feb24c",'#137AE1','#01854d','#4D0185') + +## alpha => light(0) to dark(1) +#col_bar<-c('#C7E7F9','#F5EAC0','#F9C7D4') +col_bar<-c(adjustcolor(col_line[1],alpha=0.3),adjustcolor(col_line[2],alpha=0.5),adjustcolor(col_line[3],alpha=0.3),adjustcolor(col_line[4],alpha=0.3),adjustcolor(col_line[5],alpha=0.3)) +#col_bar<-c('#A2D5F7','#FACD7F','#F6A4A4') + +file<-list('RelDiag_rawdata','RelDiag_sbc_cross','RelDiag_sbc_nocross','RelDiag_cal_cross','RelDiag_cal_nocross') + Stitle<-c('Raw Data','Simple bias correction in cross-validation', 'Simple bias correction without cross-validation', + 'Calibration in cross-validation','Calibration without cross-validation') +#################################################################### + + dirfiles <-paste('/esarchive/oper/DATA/skill/Exp5/',variable,'/',ranl[[js]],'/rld/mask/',sep='') + outputdir<-paste('/esarchive/oper/DATA/fig/Exp5/',ranl[[js]],'/',variable,'/rld/mask_mme4/',targ[[is]],'/',sep='') + + fileobs <-paste(dirfiles,rinp[[js]],'_',variable,'_',targ[[is]],'_1lead_',s1,'_',s3,'_global_ProBins_',rgn[[ms]],'.RData',sep='') + filepred1<-paste(dirfiles,'ECMWF_S4_',variable,'_',targ[[is]],'_1lead_',s1,'_',s3,'_global_ProBins_',rgn[[ms]],'.RData',sep='') + filepred2<-paste(dirfiles,'METFR_S3_',variable,'_',targ[[is]],'_1lead_',s1,'_',s3,'_global_ProBins_',rgn[[ms]],'.RData',sep='') + filepred3<-paste(dirfiles,'METFR_S4_',variable,'_',targ[[is]],'_1lead_',s1,'_',s3,'_global_ProBins_',rgn[[ms]],'.RData',sep='') + filepred4<-paste(dirfiles,'METFR_S5_',variable,'_',targ[[is]],'_1lead_',s1,'_',s3,'_global_ProBins_',rgn[[ms]],'.RData',sep='') + + # data: observations and predictions + obs<-get(load(fileobs)) + pred1<-get(load(filepred1)) + pred2<-get(load(filepred2)) + pred3<-get(load(filepred3)) + pred4<-get(load(filepred4)) + predm<-pred1 + +####################################################### + for (i in (1:5)){ # post-processing datasets +# for (i in 5){ # post-processing datasets +print(paste("post-pro = ", i)) +#print(dim(obs[[i]])) # 3 100804 +#print(dim(pred[[i]])) # 3 100804 + +####################################################### + for (jc in (1:3)){ # 3 categories (jc=1 ->'below', jc=2 ->'near', jc=3 ->'above') +# for (jc in 1){ # 3 categories + predm[[i]][jc, ] <- (pred1[[i]][jc, ]+pred2[[i]][jc, ]+pred3[[i]][jc, ]+pred4[[i]][jc, ])/4 + model <- rbind(rbind(pred1[[i]][jc,],pred2[[i]][jc,]),pred3[[i]][jc,],pred4[[i]][jc,],predm[[i]][jc,]) +print(dim(pred1)); print(dim(predm)); print(dim(model)) + + categ<-list('Below','Near','Above') + Model_Name <- list('ECMWF_S4','METFR_S3','METFR_S4','METFR_S5','MME') + nthr<-dim(model)[1] + +# fileout<-paste(outputdir,i,'_',categ[jc],'_',file[[i]],'_',variable,'_',s1,'_',s3,'_',rgn[[ms]],'_no_topo.ps',sep='') + fileout<-paste(outputdir,i,'_',categ[jc],'_',file[[i]],'_',variable,'_',s1,'_',s3,'_',rgn[[ms]],'.ps',sep='') + cairo_ps(fileout,width=8,height=7) + title<-paste('Reliability Diagram. (',var_name,', ',categ[jc],') +1 month lead, ',targ[[is]],' (',s1,'-',s3,'), ',region[[ms]],' +',rinp[[js]],', ',Stitle[[i]],sep='') + + rd<-list() + for(j in 1:nthr){ # j = Loop of 4 models + MME , jc = 3 categories + rd[[j]]<-ReliabilityDiagramHist(model[j,],obs[[i]][jc,],plot=F,nboot=500) + } + +# For Consistency Bar, consbars = T, otherwise, consbars = F +# PlotRD(rd,nbins=10,consbars=T,colLine=col_line,colBar=col_bar,tit=title,hist_ylim=c(0,100)) + PlotRD(rd,nbins=10,consbars=T,colLine=col_line,colBar=col_bar,tit=title,Lg=Model_Name) + dev.off() + + } # 3 categories (i=1 ->'below', i=2 ->'near', i=3 ->'above') +####################################################### + + } # 1 raw data and 4 post-processing datasets +####################################################### + + } # 10 Regions + + } # 4 seasons + +} # 2 Reanalysis diff --git a/importar ,,,v190.r b/importar ,,,v190.r new file mode 100644 index 0000000000000000000000000000000000000000..1ffc1bcd14dbd50fb373840076b7c4852bb927bb --- /dev/null +++ b/importar ,,,v190.r @@ -0,0 +1,5888 @@ +#if(require(gdata)==FALSE)install.packages("gdata",contriburl = contrib.url("http://cran.us.r-project.org")) + +##################################################### VARIABLES ################################################################################### +# FORMATO nota: prima di importare l'infofile elimina tutte le colonne vuote dopo l'ultima utilizzata, a volte capita che importa anche colonne di NA +# INFOFILE: la colonna con i codici nell'infofile in excel deve essere formattata come TESTO altrimenti si perdono gli zeri all'inizio. +# la lista nell'infofile DEVE avere una prima fila di intestazione, e le colonne disposte nell'ordine: +# codice/nome stazione/provincia/lat/lon/altezza +# (le due colonne lat e long DEVONO essere espresse in gradi decimali, mentre le altre colonne possono chiamarsi come si vuole e la provincia e l'altezza possono anche essere vuote) +# nome e cartella del infofile .xls con l'elenco delle serie: +#dir_infofile="C:/nicola/precipitaciones/weather_types/Validation series 1864-2005/database_Madrid_Lisboa_Valencia.xls" +#dir_infofile="C:/nicola/precipitaciones/weather_types/datos_caudales_portugal/infofile.xls" +#dir_infofile="C:/nicola/precipitaciones/prec_mensuales_Iberia/infofile_Iberia_sin_Ceuta_y_con_Lisboa.xls" # carico questo database invece del "infofile_Iberia_sin_-999.xls" anche se ci sono alcune stazioni con -999 nel 1945, 1946 e 1947 perche' tanto il periodo comune di intersezione con il database Cost733 dei WTs e' il 1957-2002 +#dir_infofile="C:/nicola/precipitaciones/weather_types/contaminantes/infofile_ingresos_hospital.xls" +#dir_infofile="C:/nicola/precipitaciones/prec_mensuales_Spain/Mopredas_1915-2003/6) info_mopredas_1915-2005_full_para_R.xls" +dir_infofile="C:/nicola/temperaturas/WTs and temp/infofile.xls" + +# FORMATO Nel file .txt o .csv dei dati con la prec.mensile ci possono essere anche mesi con -999 : in questo caso riesce cmq a fare la regressione +# DATABASE e a calcolare gli indici di errore (togliendo gli anni mancanti) e a interpolare, solo per fare il variogramma deve togliere i -999 +# PRECIP: e sostituirli con il valore medio mensile della serie su tutti gli anni. +# Ogni serie puo' cominciare e/o finire anche con un anno diverso delle altre, l'importante e' che non ci siano serie senza anni al loro interno!!! +# (cioe' ci deve essere una fila per ogni anno di dati, anche se e' piena solo di -999) +# Anche i file con i tipi di tempo (WTs) di ogni classificazione possono iniziare e finire con date diverse dal database di prec., +# ma ricordati che per fare la regressione bisogna scegliere un periodo comune sempre compreso in tutte le classificazioni. +# il file puo'avere o meno una prima fila di intestazione con nomi a piacere, ma le colonne DEVONO seguire questo ordine: +# codice anno gen feb mar ... dic +# i valori della prec.possono essere espressi in mm. o in decimi di mm. +# nota che le serie in infofile possono avere un ordine diverso da quello sul file di dati, basta solo che ne' in infofile ne' in dati vi siano serie ripetute due o piu'volte. +# nome e cartella del file .txt con il database delle serie di prec.mensile: +#dir_datos="C:/nicola/precipitaciones/weather_types/Validation series 1864-2005/database_Madrid_Lisboa_Valencia.txt" # ci sono alcuni -999 quindi non si puo' interpolare +#dir_datos="C:/nicola/precipitaciones/weather_types/datos_caudales_portugal/datos_1956-2006.txt" +#dir_datos="C:/nicola/precipitaciones/prec_mensuales_Iberia/datos_Iberia_sin_Ceuta_y_con_Lisboa.txt" +#dir_datos="C:/nicola/precipitaciones/weather_types/contaminantes/2) datos_ingresos_hospital.txt" +#dir_datos="C:/nicola/precipitaciones/prec_mensuales_Spain/Mopredas_1915-2003/base_datos_origen_1850-2005.csv" +dir_datos="C:/nicola/temperaturas/WTs and temp/datos.txt" + +header_datos=TRUE # specifica se i dati hanno o meno una prima fila con l'intestazione (vale TRUE per il database MOPREDASP) +piu_1=FALSE # vale true se nel file di dati di prec si e' aggiunto 1 a tutti i valori mensili di prec per evitare gli zeri +dec_mm=TRUE # vale true se la prec.e'espressa in decimi di mm, false altrimenti +remove_2006=FALSE # vale true per togliere l'anno 2006 dal database (solo per il database MOPREDASP) + +##### Parametri Weather Types (WTs): +# cartella con tutti i file con le classificazioni di tipi di tempo usate: (nel formato anno/mese/giorno/WT) +dir_WT="C:/nicola/precipitaciones/weather_types/26 WTs from begin year to end" +cla.rif=3 # numero corrispondente alla classificazione dei WTs presa come riferimento per fare la regressione e le altre analisi + #(1= 20th C.Reanalysis, 2= EMULATE, 3=NCEP, 4=NCEP con 10 WTs, 5= EMULATE con downscaling fatto da me con S y E corretti; l'ordine e'lo stesso che hanno i file di dati nella cartella dir_WT) + # occhio che se la classificazione non comincia con il primo di gennaio o non termina con il 31 di dicembre, viene tolto il primo e/o l'ultimo anno di dati +weekly=FALSE # vale fa?se se i i conteggi dei WT in monthlyWT vengono fatti per mese, true per settimane +years.filtro=30 #50 # numero di anni per lo smoothing del filtro gaussiano nella Mappa 6 delle frequenze dei WTs +contaminantes=FALSE # vale true se INVECE dei WTs si carica il file con i 5 contaminanti indicato sotto: +contam="C:/nicola/precipitaciones/weather_types/contaminantes/5_contaminantes.txt" + +# nell'ultima colonna di dati, quella con i WTs, devono essere numerati in modo che ad ogni numero intero in ordine crescente corrisponda il seguente WT: +# "NE","E","SE","S","SW","W","NW","N","C","C.NE","C.E","C.SE","C.S","C.SW","C.W","C.NW","C.N","A","A.NE","A.E","A.SE","A.S","A.SW","A.W","A.NW","A.N" +# non importa che i numeri vadano proprio da 1 a 26, basta che al numero piu'piccolo corrisponda il tipo NE, al secondo piu' piccolo l'E, ecc.ecc. +begin.analisi=1950 #1915 #1950 #1948 #1 #1850 # anno di inizio per i calcoli della frequenza mensile dei WTs (puo'essere diverso rispetto al periodo della regressione, basta che ci siano i dati dei WTs) +end.analisi=2010 #1949 #2003 #574 # anno di fine per i calcoli della frequenza dei WTs + +##### Parametri Classificazione Tipi di Tempo con il Metodo di Lamb: +SLP_to_WTs=FALSE # se true, prima di importare i file con i tipi di tempo, importa UN file con i dati di pressione SLP nel formato seguente: + # anno/mese/giorno/daily pression dei pixel lungo la longitudine [1 giorno uguale per ogni num.di pixel lungo la latitud] + # dalla directory sotto indicata e li converte in un file con tipi di tempo, che salva sotto 'dir_WT/SLP_to_WTs.txt'. + +# nome file input con i dati SLP nel formato corretto (detto normalizzato o standard nel testo): +#file.SLP="C:/nicola/pressure/database_emulate/2) emulate_normalizado_1850-2003.txt" +file.SLP="C:/nicola/pressure/database_NCEP_Reanalysis/3) database per R/NCEP_formato_standard_1970-2012.txt" # occhio che è piu di un GB di dati!!! +#file.SLP="C:/nicola/pressure/database_NCEP_Reanalysis/3) database per R/NCEP_formato_standard_2000.txt" +#file.SLP="C:/nicola/pressure/database_NCEP_Reanalysis/3) database per R/NCEP_formato_standard_from_2000_to_october_2012.txt" # solo dal 2000 al 2012 + +npx.ver=73 #10 per EMULATE, 73 per NCAR/NCEP # numero di pixel in verticale [latitud] della griglia in file.SLP (il numero di pixel in orizzontale non ci serve) + #(coincide con il numero di file per ogni mappa raster, ovvero dei valori possibili di latitudine) +deg=2.5 #5 per EMULATE, 2.5 per NCAR # risoluzione del grid di SLP in gradi +lat.central=40 #latitudine in gradi dei 4 pixel della fila centrale (per il calcolo dei numeri nelle formule dei 6 indici geostrofici). + # 40 per Madrid, 42.5 per Santander e Zaragoza, 45 per Torino + # nota che se si sposta il grid in senso longitudinale, i 6 indici geostrofici non cambiano. +lat.2nd.row=lat.central+5 #45 #47.5 # 45 # latitudine in gradi dei 4 pixel della fila sopra quella centrale (di solito 5 gradi piu' a nord) +angolo.basso.sinistra=c(-70, 25) # coordinate angolo in basso a sinistra di ogni griglia (solo per visualizzare le mappe finali) + +# posizione dei 16 pixel su cui si basa il calcolo dei WTs all'interno del grid in file.SLP +#(contando sempre dall'alto verso il basso e da sinistra verso destra): +#lon.16p=c(13,15,11,13,15,17,11,13,15,17,11,13,15,17,13,15) # per il database EMULATE centrato sulla IP +#lat.16p=c(5,5,6,6,6,6,7,7,7,7,8,8,8,8,9,9) # per il database EMULATE centrato sulla IP +lon.16p=c(141,1,137,141,1,5,137,141,1,5,137,141,1,5,141,1) # per il database NCAR centrato sulla IP (in -5?,40?) +lat.16p=c(17,17,19,19,19,19,21,21,21,21,23,23,23,23,25,25) # per il database NCAR centrato sulla IP +#lon.16p=c(141,1,137,141,1,5,137,141,1,5,137,141,1,5,141,1) # per il database NCAR centrato su Santander (+1 pixel verso nord) +#lat.16p=c(16,16,18,18,18,18,20,20,20,20,22,22,22,22,24,24) # per il database NCAR centrato su Santander +#lon.16p=c(143,3,139,143,3,7,139,143,3,7,139,143,3,7,143,3) # per il database NCAR centrato su Zaragoza (+2 pixel verso est e +1 verso nord) +#lat.16p=c(16,16,18,18,18,18,20,20,20,20,22,22,22,22,24,24) # per il database NCAR centrato su Zaragoza + +#lon.16p=c(2,6,142,2,6,10,142,2,6,10,142,2,6,10,2,6) # per il database NCAR centrato su Torino +#lat.16p=c(15,15,17,17,17,17,19,19,19,19,21,21,21,21,23,23) # per il database NCAR centrato su Torino + +# nome file output dove salvare i 26 WTs in formato numerico da 1 a 26(anno/mese/giorno/WT): +output.WTs="C:/nicola/precipitaciones/weather_types/26 WTs from begin year to end/NCEP.txt" +#output.WTs="C:/nicola/precipitaciones/weather_types/26 WTs from begin year to end/WTs_from_EMULATE_nic.txt" + +# per esempio, il grid NCEP e' di 144*73 pixel a risoluzione 2.5 gradi, partendo da 90 gr. nord 0 est andando verso est e sud. +# la long e lat dei 16 punti di grid su cui si basa il calcolo dei WTs per la IP e' sempre pari a: +# +# p1: (-10?,50?) p2: (0?,50?) +# p3: (-20?,45?) p4: (-10?,45?) p5: (0?,45?) p6: (10?,45?) +# p7: (-20?,40?) p8: (-10?,40?) p9: (0?,40?) p10:(10?,40?) +# p11:(-20?,35?) p12:(-10?,35?) p13:(0?,35?) p14:(10?,35?) +# p15:(-10?,30?) p16:(0?,30?) +# +# percio' per convertire queste coordinate in pixel del grid dobbiamo sapere come e' fatto il grid, da quanti pixel, con che risoluzione, +# e da dove si parte a contare e in che direzione di contano. Nel caso del database NCEP i pixel sono 144 in orizzontale (long), 73 in vert. (latit) +# la risoluzione e' di 2.5?, e si comincia a contare da 90?N 0?E andando verso est e poi verso sud quando si finisce una fila orizzontale. +# Studiando i valori di pressione ai due poli si capisce che le coordinate di ogni pixel corrispondano al centro del pixel +# in base a queste coordinate, la posizione dei 16 punti all'interno del grid 144x73 del NCEP e' (devi disegnare su un foglio a quadretti il grid): +# +# p1: (141,17) p2: (1,17) +# p3: (137,19) p4: (141,19) p5: (1,19) p6: (5,19) +# p7: (137,21) p8: (141,21) p9: (1,21) p10:(5,21) +# p11:(137,23) p12:(141,23) p13:(1,23) p14:(5,23) +# p15:(141,25) p16:(1,25) +# +# quindi la posizione dei 16 pixel del database NCEP si puo'riassumere cosi': +# lon.16p=c(141,1,137,141,1,5,137,141,1,5,137,141,1,5,141,1) +# lat.16p=c(17,17,19,19,19,19,21,21,21,21,23,23,23,23,25,25) +# npx.ver=73 +# lo stesso ragionamento si puo' applicare al database Emulate, formato da un grid 25 x 10 con risoluzione 5?, e il pixel in alto a sinistra si +# trova a -70?E, 70?N (in questo caso nei metadata ? specificato che le coord.dei pixel Emulate indichino proprio il centroide del pixel): +# la long e lat dei 16 punti di grid per il calcolo dei WTS ? dunque: +# +# p1: (13,5) p2: (15,5) +# p3: (11,6) p4: (13,6) p5: (15,6) p6: (17,6) +# p7: (11,7) p8: (13,7) p9: (15,7) p10:(17,7) +# p11:(11,8) p12:(13,8) p13:(15,8) p14:(17,8) +# p15:(13,9) p16:(15,9) +# +# quindi la posizione dei 16 pixel del database Emulate si puo' riassumere cosi': +# lon.16p=c(13,15,11,13,15,17,11,13,15,17,11,13,15,17,13,15) +# lat.16p=c(5,5,6,6,6,6,7,7,7,7,8,8,8,8,9,9) +# npx.ver=10 + +NCEP_to_SLP=FALSE # se true, prima di importare il file con i dati di pressione SLP, importa tutti i file del database di pressione NCEP e li + # converte nel formato corretto per poterli poi eventualmente convertire in tipi di tempo. + # nota che i file con i dati di pressione NCEP hanno tutti una sola colonna con tutti i dati dei grid in verticale, con in piu' una fila con scritto 144*73 + # ogni volta che finisce un grid di 144*73 file, e non ci sono altre colonne che indichino l'anno e/o il mese e/o il giorno e/o la ora. + +# le opzioni seguenti sono da specificare solo se NCEP_to_SLP=TRUE: +dir_NCEP="C:/nicola/pressure/database_NCEP_Reanalysis/2) database en formado txt" # directory dove si trovano tutti e solo i file dei dati in formato .txt (un file per ogni mese, con il nome del file lungo 7 cifre e del tipo Anno_Mese) +file_NCEP_standard="C:/nicola/pressure/database_NCEP_Reanalysis/3) database per R/NCEP_formato_standard.txt" # directory e nome del file dove si mettera il file dei dati standardizzati in formato .txt (un file per ogni mese) +num.grid.for.day=4 # numero di grid di pressione disponibili al giorno (1 se i dati sono giornalieri, 4 se i dati vengono presi ogni 6 ore) [purtroppo per ora bisogna lasciarlo uguale a 4 perche'mi manca una piccola modifica alla riga in cui si calcola la media giornaliera] +NCEP_ori_npx=144 # numero di pixel orizzontali del grid di SLP del NCEP (si assume che si parte da 0 gradi fino a 360 andando verso est) +NCEP_ver_npx=73 # numero di pixel verticali del grid di SLP del NCEP(si assume che si parte da 90 gradi nord fino a -90 gradi andando verso sud) + +##### Parametri generici: +int.mesi=c(12,1,2,3,4,5,6,7,8,9,10,11) #c(9,10,11,12,1,2,3,4,5,6,7,8) #c(1) # mesi selezionati per fare la eventuale interpolazione, la regressione e le altre analisi +graph.yx<-c(3,4) # numero di file e colonne di grafici in cui suddividere lo schermo quando si mostrano i grafici mensili selezionati in int.mesi +altro=FALSE # se true, esegue anche analisi secondarie come le mappe per i poster, ecc. +#dir_root="C:/nicola/precipitaciones/weather_types" # cartella dove salvare gli output (.Rdata, mappe, tabelle, ecc.): +#dir_root_kriging="C:/nicola/precipitaciones/kriging_mensuales_Iberia/grids for each year" # cartella dove salvare gli output del kriging: +dir_root="C:/nicola/temperaturas/WTs and temp/output" # cartella dove salvare gli output (.Rdata, mappe, tabelle, ecc.): +dir_root_kriging="C:/nicola/temperturas/WTs and temp/kriging_mensuales_Iberia/grids for each year" # cartella dove salvare gli output del kriging: + + +##### Parametri interpolazione: +interpol="NO" # vale "NO" se NON vuoi interpolare prima di fare la regressione dei WTs, + # vale "SI" per interpolare OGNI ANNO con il metodo scelto sotto e poi si applica il modello di regressione dei WTs sulla serie storica di prec.mensuale di ogni pixel del grid. + # vale "CLI" per interpolare TUTTO il periodo [start.int-end.int] con il metodo scelto sotto per creare la climatologia + # (da implementare) vale "ANOM" se prima interpola la climatologia con un il metodo scelto sotto e poi la si moltiplica per le interpolazioni annuali delle anomalie con un Simple Kriging +proyected=TRUE # vale TRUE se il grid e il DEM sono proiettati, quindi la distanza tra pixel si misura in km, (come nel caso dell'analisi dei WTs) + # vale FALSE se il grid e il DEM usano il sistema di coord.geografiche lat long espresse in gradi decimali (come per i modelli di William) +loocv=FALSE # vale TRUE se invece di interpolare per fare il grid, si interpolano solo le stazioni con la leave-one-out cross-validation (loocv) +start.int=1945 #1971 #1950 #1961 #1948 # Anno iniziale interpolazione (e' lo stesso che si usa anche per il calcolo delle correlazioni tra serie). +end.int=2005 #2000 #2003 #2000 #2003 # Anno finale dell'interpolazione (e per il calcolo delle correlazioni tra serie) +# proprieta' del tipo di grid usato per interpolare: +# per esempio, il grid Mopredas di Michele ha risoluz 0.25 gradi dec, l'angolo in alto a sinistra ha coordinate (-10.125,44.125) e il num.di pixel e' 55(orizz)*34(vert). +# il grid dei modelli Ensemble di Willians ha la stessa risuluzione ed e' definito solo per i pixel di terra in un box piu'piccolo, la intersezione dei due grid ci permette di lavorare su un grid piu'ristretto: +# il grid finale ha risoluzione 0.25 gradi dec, il centro del pixel nell'angolo in alto a sinistra ha coordinate (-9.125,44.125) e il centro del pixel in basso a destra e'(3.125,35.875) (anche se le prime due righe e la ultima sono strisce di mare) ed il numero di pixel e' 50(orizz) * 34(vert). +# Per costruire il grid si usa la funzione GridTopology nella quale bisogna specificare il valore di cellcentre.offset che corrisponde ai valori MINIMI delle coordinate dei PUNTI CENTRALI dei pixel del grid. +# (naturalmente i valori minimi in assoluto si possono ottenere sottraendo ai minimi dei centri la meta' del valore di un pixel, ma R lo calcola gia'in automatico) +coord.system.geograf=CRS("+proj=longlat +datum=WGS84") # sistema di coordinate geografico usato nel caso che proyected sia FALSE +coord.system.proyected=CRS("+proj=utm +zone=30 +datum=WGS84") # sistema di coordinate proiettato usato nel caso proyected sia TRUE +pixel.size.geograf=0.20 #0.25 # lunghezza del lato di un pixel (cioe'risoluzione del grid) in gradi decimali se il sistema di coord.e' quello geografico lat long, es: vale 0.25 gradi decimali se vuoi compararlo con i modelli di William, +pixel.size.proyected=10 # risoluzione grid proiettato in km , es: vale 10 km per l'analisi dei WTs. +xmin.box.geograf=-9.6 #-9.125 # valore minimo della longitudine dei pixel del box (rettangolo di lavoro) calcolato pero' nel CENTRO dei pixel +ymin.box.geograf=36 #35.875 # valore minimo della latitudine dei pixel del box (rettangolo di lavoro) calcolato pero' nel CENTRO dei pixel +xmin.box.proyected=-70000 +ymin.box.proyected=3980000 +num.x.pixels.geograf=70 #50 # equivalente a: (3.125+0.125)-(-9.125-0.125)=12.5 (lunghezza totale riquadro in direz.x in gradi dec.) che diviso per la lunghezza di un pixel di 0.25 da proprio 50 +num.y.pixels.geograf=40 #34 # ovvero equivalente a: (xmax - xmin + pixel.size) / pixel.size +num.x.pixels.proyected=ceiling(1110/pixel.size.proyected) # se non sai quanti pixel ci sono in una dimensione, invece di mettere un numero esatto puoi calcolare quanto e' lungo il riquadro e dividere per la lunghezza di un pixel, arrotondando. +num.y.pixels.proyected=ceiling(890/pixel.size.proyected) + +# Capas con perimetro Spagna (senza isole) nelle due versioni: +SpainPoly.geograf='C:/Projecto_Master/Limites Espana/borde_espa?a_sin_proyeccion.shp' +SpainPoly.proyected='C:/Projecto_Master/Limites Espana/borde_espa?a_proyected.shp' +# Capa con perimetro Iberian Peninsule e confine tra Spagna e Portogallo: (basta solo la versione in coord.geograf, la versone proiettata la calcola R) +IpPoly.geograf='C:/Projecto_Master/Limites Espana/borde_peniberica_sin_islas_sin_proyeccion.shp' +# Capa con perimetro Iberian Peninsule e confine tra Spagna e Portogallo e Isole Baleari: (basta solo la versione in coord.geograf, la versone proiettata la calcola R) +IpIslesPoly.geograf='C:/Projecto_Master/Limites Espana/borde_peniberica_sin_africa_y_francia_sin_proyeccion.shp' + +# Digital Elevation Model: (il nome della variabile dipende dal sistema di coord.scelto, se geografico o proiettato) +mde.geograf="C:/nicola/precipitaciones/Local_Regression_Kriging/aster_gdem_iberian_peninsule_res_0.25.txt" # nome file e directory del DEM ascii dell'area di lavoro (es: la IP) per fare la regressione locale +mde.proyected="C:/nicola/precipitaciones/correlaciones_para_Martin/datos_decadas/mde_10km_sin_africa.txt" # qui trovi anche un dem a 1 km, se ti serve: "C:/nicola/precipitaciones/correlaciones_para_Martin/datos_decadas/mde_1km_sin_africa.txt" # nome file e directory con l'mde ascii di tipo 1 +# IMPORTANTE: il rettangolo di lavoro, la risoluzione e il sistema di coord.geografico del DEM devono essere gli stessi del grid di interpolazione!!! +# usa la funzione Clip management di ArcMAp per ritagliare l'Aster Gdem (24*31 m di risoluzione, non proiettato) e poi la funzione Resample con il metodo bilinear per passare da 0.000277777 gradi di risoluzione a 0.0025 (x9), poi a 0.025 (x90) e infine a 0.25 (x900). Purtroppo non si puo' moltiplicare direttamente per 900, percio' bisogna creare dei raster intermedi. +# il file dev'essere infine convertito in formato ASCII, con ArcMap lo puoi convertire usando la herramienta RasterToASCII; poi pero' devi cambiare le eventuali ',' che indicano l'xllcorner e l'yllcorner e il size in '.' perch? R riconosce solo il punto! +# inoltre se vuoi fare l'interpolazione solo per i punti di terra ti basta mettere NODATA_value uguale al valore dei pixel di mare (es: 0) nel file di testo del MDE. Se devi modificare alcuni pixel del MDE, fallo in excel 2007, perche' ti permette di salvare in txt con sep=" " +# (purtroppo non possiamo ancora lavorare con DEM ad alta risoluzione per poter poi creare in R un DEM a bassa risoluzione a nostro piacimento con la interpolazione bilineare perche'il file ascii convertito misura piu' di 6 GB) +dmax=300 # distanza massima (in km) tra coppie di stazioni per costruire il variogramma (anche quello in gradi decimali, perche'vengono convertiti automaticamente in km dentro la funzione variogram) +# scegli il tipo di interpolazione: +int.method="OK" #"LRK"# OK: Ordinary Kriging; RK: Regression-Kriging ; LRK: Local Regression-Kriging (servono le correlazioni tra serie prima) +int.nmax=50 # numero massimo di vicini usati in qualsiasi tipo di Kriging (solo per velocizzare il calcolo della formula finale con i contributi pesati di ogni stazione, il variogramma lo fa con tutte le stazioni) + +##### Parametri della PCA: +PCRA=FALSE # vale TRUE se prima di fare la regressione stepwise si fa la PCA dei WTs usati dalla regressione e li si sostituisce con le EOFs (ricordati di impostare anche negative=TRUE) +stand=TRUE # vale TRUE se prima di fare la PCA si standarizzano i WTs (stranamente produce gli stessi risultati sia che sia TRUE che sia FALSE, come se venisse standardizzato cmq all'interno dell'algoritmo princomp) +correl=FALSE # se true, usa la matrice di correlazione per il calcolo della PCA invece di quella delle covarianze (ma occhio che deve togliere i WTs senza eventi perche'sono predittori costanti) +rotation=FALSE # se true, dopo aver fatto la PCA esegue una varimax rotation dei loadings + +##### Parametri della Regressione dei WTs vs Prec.mensile: +n.weather.types=10 # can be 26 or 10. If 10, it converts internally the set of 26 weather types imported in a set of 10 weather types to use in the regression. +start.regr<-1950 #1871 #1948 #1 # anno iniziale periodo calibrazione per fare la regressione stepwise + #(di solito coincide con gli anni di prec.mensile disponibili per tutta la penisola iberica). +end.regr<-2010 #2005 #574 # anno finale periodo calibrazione per fare la regressione stepwise +predictand="TEMP" # can be "PREC" or "TEMP" +intercept=TRUE # vale True se si considera anche l'intercetta (ovvero il termine noto o costante) nella regressione, false altrimenti +negative=TRUE # vale False se i valori dei coefficienti e dell'eventuale intercetta non possono essere mai negativi (usa la funzione lm per fare la regressione, altrimenti usa la funzione nnlm) (il FALSE va usato se si usano i WTs invece delle PCs) +backward=FALSE # vale True se si calcola anche la regressione stepwise con il metodo backward, false solo con il metodo forward +max.autocorrelation=1 #0.4 # exclude all predictor combinations where there are couple of predictors with correlation above this threshold (put 1+ to disactivate it) +min.cv=FALSE # se true, la regressione stepwise minimizza il CV (coefficient of variation = RMSE / media prec.osservata )invece dell'R2 adj. (usato per la precipitaz.) +min.rmse=TRUE # se true, la regressione stepwise minimizza l'RMSE invece dell'R2 adj o del CV (usato di norma ler la temperatura) +step.min.r2=0 # la regressione stepwise si ferma se l'R2 non migliora almeno di step.min.rw (se la regressione non e'stepwise non viene utilizzato) +step.min.CV=0.01 # la regressione stepwise si ferma se il CV (calcolato sullo stesso periodo della regressione) non migliora almeno di step.min.CV # es: uno step.min.CV di 0.05 equivale a 5 mm di pioggia per una stazione con una media mensile di 100 mm} +step.min.rmse=0.01 # la regressione stepwise si ferma se l'rmse (calcolato sullo stesso periodo della regressione) non migliora almeno di step.min.rmse +stepwise=TRUE # se FALSE, la regressione stepwise non si ferma se l'R2 adj. o il CV o l'rmse non migliorano ma quando ha trovato i primi predittori (pur continuando a migliorare l'R2 adj o il CV) +num.max.pred=10 # servono anche se stepwise=TRUE, per definire il numero massimo di predittori che nemmeno il metodo forward/backward puo'oltrepassare (dev'essere inferiore a max.nWT) [serve soprattutto per definire matrici piu'piccole che occupano meno memoria] +jacknife=TRUE # se TRUE, quando ricostruisce la precipitazione e calcola gli indici di errore (CV, COR, MBE, ecc) calcola la prec.prevista per ogni anno del periodo di +# regressione facendo una leave-one-out cross validation, cioe'per ogni anno la prec.ricostruita si calcola sulla base dei coefficienti della regressione calcolata +# solo sugli ALTRI anni della regressione; tuttavia il tipo e il numero dei WTs predittori usati dalla regressione per ricostruire +# la prec.(e l'ordine in cui si introducono) vengono scelti facendo una regressione dove si minimizza sempre l'r2 o il CV ma calcolati NON con il jacknife. +# ovvero si utilizzano i coefficienti calcolati dal modello stepwise senza leave-one-out cross-validation. +jacknife.full=FALSE # se TRUE, il jacknife non si applica solo per ricostruire la precipitazione e calcolare gli indici di errore (cioe' validare il modello) + # ma anche per calcolare l'r2 o il CV che servono per selezionare il tipo, l'ordine e il numero di WTs o PCs predittori usati dalla regressione. +only.best=TRUE # se TRUE, durante la validazione del modello non calcola il CV, COR, MBE ecc. anche per le regressioni intermedie dello stepwise, prima di arrivare al migliore insieme di predittori trovato (cosi' va 10 volte piu'veloce a fare il jackknife). +seasons=FALSE # TRUE per fare la regressione a livello stagionale, FALSE a livello mensile. La opzione TRUE (dati stagionali) non e'ancora implementata del tutto + +##### Parametri Ricostruzione & Validazione Precipitazioni mensili: +start.recons=1950 #1871 #1 #1948 #1850 #1920 # anno di inizio della ricostruzione (il periodo di ricostruzione puo' includere anche parte o tutto il periodo usato per la calibrazione) +end.recons=2010 #2005 #574 #2003 # ultimo anno riconstruzione (il periodo di ricostruzione puo' includere anche parte o tutto il periodo usato per la calibrazione) +start.valid= 1950 #1945 #1948 #1 #1864 #1956 para los caudales # anno di inizio del periodo di validazione per il calcolo della bonta'della ricostruzione +# (CV, corr, bias, ecc) occhio che DEVE essere all'interno del periodo indicato per la ricostruzione e NON PUOI impostarlo in periodi ove le serie non hanno nessun dato +# di prec.osservata) +end.valid=2010 #2005 #574 #2003 para los caudales # anno di termine del periodo di validazione per il calcolo della bonta'della ricostruzione (deve essere all'interno del periodo indicato per la ricostruzione) + +#nombre y direccion del fichero perl.exe para importar los datos de excel: +perlexe="C:/Toshiba/Strawberry/perl/bin/perl.exe" + +#nombre y direccion del fichero python.exe para exportar los datos de excel: +pythonexe="C:/Python27/python.exe" + +correl<-FALSE # per calcolare le correlazioni mensili tra tutte le possibili coppie di serie +regr.prec.vs.alt<-FALSE # per fare la regressione della prec vs la altezza + +############################# SETTINGS GRAFICI ################################################################################# +nome.mese<-c("January","February","March","April","May","June","July","August","September","October","November","December") +nome.mese.esp=c("Enero","Febrero","Marzo","Abril","Mayo","Junio","Julio","Agosto","Septiembre","Octubre","Noviembre","Diciembre") +nome.mese.short<-c("Jan","Feb","Mar","Apr","May","Jun","Jul","Aug","Sep","Oct","Nov","Dec") +n.days.month<-c(31,28.25,31,30,31,30,31,31,30,31,30,31) # mean number of days for month +n.mesi=12 + +if(seasons==TRUE)then{ + nome.mese<-c("Autumn","Winter","Spring","Summer") + nome.mese.esp=c("Oto?o","Invierno","Primavera","Verano") + nome.mese.short<-c("Aut","Win","Spr","Sum") + n.days.month<-c(91,90.25,92,92) # mean number of days for season + n.mesi=4 +} + +month.color=c("orange","darkcyan","red","darkred","darkgreen","magenta4","orange","darkcyan","red","darkred","darkgreen","magenta4") # colori per destinguere i mesi dell'anno +month.color2=c("orange","darkcyan","red","black","darkgreen","magenta4","orange","darkcyan","red","darkred","black","magenta4") # colori per destinguere i mesi dell'anno +col.metodo<-c("orange","darkcyan","blue","red","darkred","darkgreen","green") + +jet.colors <-colorRampPalette(c("violet","blue", "#007FFF", "cyan","#7FFF7F", "yellow", "#FF7F00", "red", "#7F0000")) +jet.colors2 <-colorRampPalette(c("violet","blue","cyan","#7FFF7F", "yellow", "#FF7F00", "red")) +jet.colors3 <-colorRampPalette(c("blue","cyan","yellow", "#FF7F00", "red")) +jet.colors4 <-colorRampPalette(c("darkmagenta","blue","yellow","red")) +jet.colors5 <-colorRampPalette(c("blue","cyan","yellow","red")) +jet.colors6 <-colorRampPalette(c("transparent","gold2","red")) +jet.colors7 <-colorRampPalette(c("transparent","gold2","red","black")) +jet.colors8 <-c("red","yellow","cyan","blue") +jet.colors9 <-c("black") +jet.colors10 <-c("red","green2","blue") +jet.colors11<-colorRampPalette(c("mediumorchid4","purple","blue", "#007FFF", "cyan","#7FFF7F", "yellow", "#FF7F00", "red", "#7F0000")) +jet.colors12 <-colorRampPalette(c("violet","blue","cyan","#7FFF7F", "yellow", "#FF7F00")) # same as jet.colors3 per la climatologia del database Ip02 +jet.colors13 <-colorRampPalette(c("blue","cyan","white","red","darkred")) +jet.colors14 <-colorRampPalette(c("blue","cyan","gray90","red","darkred")) +jet.colors15 <-colorRampPalette(c("magenta","blue","cyan","gray90","orange","red","darkred")) + +jet.colors.pred<-list() + +if(PCRA==FALSE){ +jet.colors.pred[[1]]<-c("grey78","grey78","grey78","violetred1","red","blue","purple4","cyan","yellow2","aquamarine","gray10","green1","green4","orange","darksalmon","brown4","peachpuff4","gray78","skyblue4","gray78","gray78","gray78","gray78","gray78","seagreen4","red4") +} else {jet.colors.pred[[1]]<-c("violetred1","red","blue","purple4","cyan","yellow2","aquamarine","green1","green4","orange","darksalmon","brown4","peachpuff4","skyblue4","seagreen4","red4","gray78","gray78","gray78","gray78","gray78","gray78","gray78","gray78","gray78","gray78")} +jet.colors.pred[[3]]<-jet.colors.pred[[2]]<-jet.colors.pred[[1]] +jet.colors.pred[[4]]<-jet.colors.pred[[3]] #c("violet","blue","cyan","#7FFF7F","yellow", "orange", "red","brown","black" ,"gray") + + +WTs.type<-c("NE","E","SE","S","SW","W","NW","N","C","C.NE","C.E","C.SE","C.S","C.SW","C.W","C.NW","C.N","A","A.NE","A.E","A.SE","A.S","A.SW","A.W","A.NW","A.N") +# tra l'altro questo elenco ha lo stesso ordine dell'elenco dei WTs del database Emulate, che pero' invece di essere enumerati da 1 a 26 assumono questi valori: +# 1,2,3,4,5,6,7,8,30,31.5,32.5,33.5,34.5,35.5,36.5,37.5,38.5,40,41.5,42.5,43.5,44.5,45.5,46.5,47.5,48.5 +if(n.weather.types==10)WTs.type<-c("NE","E","SE","S","SW","W","NW","N","C","A") +WTs.names<-list() + +if(PCRA==FALSE){ + WTs.names[[1]]<-WTs.type # per compatibilita' con le vecchie versioni + if(n.weather.types==26){ + WTs.names.long<-c("Northeasterly (NE)","Easterly (E)","Southeasterly (SE)","Southerly (S)","Southwesterly (SW)","Westerly (W)","Northwesterly (NW)", + "Northerly (N)","Cyclonic (C)","Cyc.Northeasterly (C.NE)","Cyc.Easterly (C.E)","Cyc.Southeasterly (C.SE)","Cyc.Southerly (C.S)", + "Cyc.Southwesterly (C.SW)","Cyc.Westerly (C.W)","Cyc.Northwesterly (C.NW)","Cyc.Northerly (C.N)","Anticyclonic (A)", + "Ant.Northeasterly (A.NE)","Ant.Easterly (A.E)","Ant.Southeasterly (A.SE)","Ant.Southerly (A.S)","Ant.Southwesterly (A.SW)", + "Ant.Westerly (A.W)","Ant.Northwesterly (A.NW)","Ant.Northerly (A.N)") + WTs.names.long2<-c("Northeasterly","Easterly","Southeasterly","Southerly","Southwesterly","Westerly","Northwesterly", + "Northerly","Cyclonic","Cyc.Northeasterly","Cyc.Easterly","Cyc.Southeasterly","Cyc.Southerly", + "Cyc.Southwesterly","Cyc.Westerly","Cyc.Northwesterly","Cyc.Northerly","Anticyclonic", + "Ant.Northeasterly","Ant.Easterly","Ant.Southeasterly","Ant.Southerly","Ant.Southwesterly", + "Ant.Westerly","Ant.Northwesterly","Ant.Northerly") + } else { # in this case we are working with 10 weather types only + WTs.names.long<-c("Northeasterly (NE)","Easterly (E)","Southeasterly (SE)","Southerly (S)","Southwesterly (SW)","Westerly (W)","Northwesterly (NW)", + "Northerly (N)","Cyclonic (C)","Anticyclonic (A)") + WTs.names.long2<-c("Northeasterly","Easterly","Southeasterly","Southerly","Southwesterly","Westerly","Northwesterly", + "Northerly","Cyclonic","Anticyclonic") + } + WTs.names.full<-c(WTs.names[[1]],"I") + WTs.names[[3]]<-WTs.names[[2]]<-WTs.names[[1]] + WTs.names[[4]]<-WTs.names[[3]] #c("A","AE","AS","AW","AN","C","CE","CS","CW","CN") + + WTs.short.names1<-c("NW","N","NE","W","C","E","SW","S","SE") + WTs.short.names2<-c("C.NW","C.N","C.NE","C.W","C","C.E","C.SW","C.S","C.SE") + WTs.short.names3<-c("A.NW","A.N","A.NE","A.W","A","A.E","A.SW","A.S","A.SE") + WTs.short.names4<-c("NW","N","NE","W","I","E","SW","S","SE") + + WTs.full.names1<-c("Northwesterly","Northerly","Northeasterly","Westerly","Cyclonic","Easterly","Southwesterly","Southerly","Southeasterly") + WTs.full.names2<-c("Cyc.Northwesterly","Cyc.Northerly","Cyc.Northeasterly","Cyc.Westerly","Pure Cyclonic","Cyc.Easterly","Cyc.Southwesterly","Cyc.Southerly","Cyc.Southeasterly") + WTs.full.names3<-c("Antic.Northwesterly","Antic.Northerly","Antic.Northeasterly","Antic.Westerly","Pure Anticyclonic","Antic.Easterly","Antic.Southwesterly","Antic.Southerly","Antic.Southeasterly") + WTs.full.names4<-c("Northwesterly","Northerly","Northeasterly","Westerly","Constant term","Easterly","Southwesterly","Southerly","Southeasterly") + +} else {WTs.names[[1]]<-paste("PC",1:26,sep="")} + +class.color=c("blue","red") # colori per distinguere la 20th Century Project classification dalla EMULATE +############################ FUNZIONI ############################################################################################################# + +endmonth<-function(day,month,year){ # day indica un giorno del mese di cui si vuole controllare se e' l'ultimo giorno del mese o meno: + last=FALSE + if(month==1 & day==31)last=TRUE;if(month==3 & day==31)last=TRUE;if(month==4 & day==30)last=TRUE;if(month==5 & day==31)last=TRUE + if(month==6 & day==30)last=TRUE;if(month==7 & day==31)last=TRUE;if(month==8 & day==31)last=TRUE;if(month==9 & day==30)last=TRUE + if(month==10 & day==31)last=TRUE;if(month==11 & day==30)last=TRUE;if(month==12 & day==31)last=TRUE + if(year%%400==0 | (year%%4==0 & year%%100!=0)){ # in questo caso l'anno e' bisestile) + if(month==2 & day==29)last=TRUE } else { # Anno NON bisestile: + if(month==2 & day==28)last=TRUE } + return(last) +} +lastday<-function(month,year){ # simile alla precedente, restituisce qual'e'l'ultimo giorno del mese introdotto + if(month==1 | month==3 | month==5 | month==7 | month==8 | month==10 | month==12)last=31 + if(month==4 | month==6 | month==9 | month==11)last=30 + if(month==2){ + if(year%%400==0 | (year%%4==0 & year%%100!=0)){last==29} else {last=28} + } + if(year==1 & month==2)last=28.25 # se si mette come anno il numero 1, restituisce il numero medio di giorni di febbraio quando si considerano tanti anni (28.25). Utile per calcolare le frequenze dei WTs del mese di febbraio. + return(last) +} + +RMSE<-function(obs,pred){ # semplice funzione per calcolare l'errore quadratico medio dato il vettore con la grandezza osservata e quello con la previsione (possono esserci anche NA nel vettore obs) + # i pred hanno sempre tutti i valori predetti annuali, mentre potrebbero esserci anni senza valori osservati, dunque devo togliere solo le file corrispondenti agli anni con NA tra i valori obs: + years.right<-which(is.na(obs)==FALSE) + obs2<-obs[years.right] + pred2<-pred[years.right] + sum.scarti.quad<-sum((obs2-pred2)^2,na.rm=TRUE) + RMSE<-(sum.scarti.quad/(length(years.right)))^0.5 # devi togliere dal denominatore gli anni con NA!!! + return(RMSE) +} + +MAE<-function(obs,pred){ # semplice funzione per calcolare il mean Absolute Error dato il vettore con la grandezza osservata e quello con la previsione (possono esserci anche NA nel vettore obs) + # i pred hanno sempre tutti i valori predetti annuali, mentre potrebbero esserci anni senza valori osservati, dunque devo togliere solo le file corrispondenti agli anni con NA tra i valori obs: + years.right<-which(is.na(obs)==FALSE) + obs2<-obs[years.right] + pred2<-pred[years.right] + MAE<-sum(abs(obs2-pred2))/(length(years.right)) # devi togliere dal denominatore gli anni con NA!!! + return(MAE) +} + +MAEp<-function(obs,pred){ # semplice funzione per calcolare il Mean Absolute Error in percentuale (%) dato il vettore con la grandezza osservata e quello con la previsione (possono esserci anche NA nel vettore obs) + # i pred hanno sempre tutti i valori predetti annuali, mentre potrebbero esserci anni senza valori osservati, dunque devo togliere solo le file corrispondenti agli anni con NA tra i valori obs: + years.right<-which(is.na(obs)==FALSE) + obs2<-obs[years.right] + pred2<-pred[years.right] + MAE<-sum(abs(obs2-pred2))/(length(years.right)) # devi togliere dal denominatore gli anni con NA!!! + obs.prom<-sum(obs2)/length(years.right) + MAE<-MAE/obs.prom + return(MAE) +} + +MBE<-function(obs,pred){ # semplice funzione per calcolare l'errore medio (Mean Bias Error) (ci possono essere anche elementi con NA nel vettore obs) + # i pred hanno sempre tutti i valori predetti annuali, mentre potrebbero esserci anni senza valori osservati, dunque devo togliere le file corrispondenti agli anni con NA tra i valori obs: + years.right<-which(is.na(obs)==FALSE) + obs2<-obs[years.right] + pred2<-pred[years.right] + MBE<-sum(pred2-obs2)/(length(years.right)) + return(MBE) +} + +AGREE<-function(obs,pred){ # per calcolare la d di Willmott o Index of agreement OCCHIO che e' insensibile a sovra/sottostime quasi come l'R2 + years.right<-which(is.na(obs)==FALSE) + obs2<-obs[years.right] + pred2<-pred[years.right] + sum.scarti.quad<-sum((obs2-pred2)^2,na.rm=TRUE) + obs2.mean<-mean(obs2) + d<-1-(sum.scarti.quad/(sum((abs(pred2-obs2.mean)+abs(obs2-obs2.mean))^2))) + return(d) +} + +AGREE.1<-function(obs,pred){ # d di Willmott corretto senza i quadrati + years.right<-which(is.na(obs)==FALSE) + obs2<-obs[years.right] + pred2<-pred[years.right] + sum.scarti<-sum(abs(obs2-pred2),na.rm=TRUE) + obs2.mean<-mean(obs2) + d1<-1-(sum.scarti/(sum(abs(pred2-obs2.mean)+abs(obs2-obs2.mean)))) + return(d1) +} + +AGREE.2011<-function(obs,pred){ # nuova d di Willmott introdotta da lui nel 2011 + years.right<-which(is.na(obs)==FALSE) + obs2<-obs[years.right] + pred2<-pred[years.right] + sum.scarti<-sum(abs(obs2-pred2),na.rm=TRUE) + obs2.mean<-mean(obs2) + denom<-2*sum(abs(obs2-obs2.mean)) # denominatore della formula di d(r) + if(sum.scarti<=denom){d.2011<-1-(sum.scarti/denom)}else{d.2011<-(denom/sum.scarti)-1} + return(d.2011) +} + +RMSE.freedom<-function(obs,pred,degree.freedom){ # funzione per calcolare l'errore quadratico medio dato il vettore con la grandezza osservata e quello con la previsione (possono esserci anche NA nel vettore obs) + # i pred hanno sempre tutti i valori predetti annuali, mentre potrebbero esserci anni senza valori osservati, dunque devo togliere le file corrispondenti agli anni con NA tra i valori obs: + years.right<-which(is.na(obs)==FALSE) + obs2<-obs[years.right] + pred2<-pred[years.right] + sum.scarti.quad<-sum((obs2-pred2)^2,na.rm=TRUE) + RMSE<-(sum.scarti.quad/degree.freedom)^0.5 + return(RMSE) +} + +SumSquared<-function(obs,pred){ # semplice funzione per calcolare la somma dei quadrati degli scarti dato il vettore con la grandezza osservata e quello con la previsione (possono esserci anche NA nel vettore obs) + # i pred hanno sempre tutti i valori predetti annuali, mentre potrebbero esserci anni senza valori osservati, dunque devo togliere le file corrispondenti agli anni con NA tra i valori obs: + years.right<-which(is.na(obs)==FALSE) + obs2<-obs[years.right] + pred2<-pred[years.right] + sum.scarti.quad<-sum((obs2-pred2)^2,na.rm=TRUE) + return(sum.scarti.quad) +} + + +# Funzione per fare la regressione (inclusa perche'piu' veloce di lm(), sarebbe una versione modificata di lm.fit() senza troppi output ) +# dalla versione 2.15 di R non la puoi piu?usare, usa invece lm.fit.fast2 +# occhio che con lm.fit bisogna passare anche una colonna di uno iniziali per simulare il termine noto!!! +lm.fit.fast<-function (x, y, offset = NULL, method = "qr", tol = 1e-07, singular.ok = TRUE, ...) +{ + if (is.null(n <- nrow(x))) + stop("'x' must be a matrix") + if (n == 0L) + stop("0 (non-NA) cases") + p <- ncol(x) + if (p == 0L) { + return(list(coefficients = numeric(0L), residuals = y, + fitted.values = 0 * y, rank = 0, df.residual = length(y))) + } + ny <- NCOL(y) + if (is.matrix(y) && ny == 1) + y <- drop(y) + if (!is.null(offset)) + y <- y - offset + if (NROW(y) != n) + stop("incompatible dimensions") + if (method != "qr") + warning(gettextf("method = '%s' is not supported. Using 'qr'", + method), domain = NA) + if (length(list(...))) + warning("extra arguments ", paste(names(list(...)), sep = ", "), + " are just disregarded.") + storage.mode(x) <- "double" + storage.mode(y) <- "double" + z <- .Fortran("dqrls", qr = x, n = n, p = p, y = y, ny = ny, + tol = as.double(tol), coefficients = mat.or.vec(p, ny), + residuals = y, effects = y, rank = integer(1L), pivot = 1L:p, + qraux = double(p), work = double(2 * p), PACKAGE = "base") + if (!singular.ok && z$rank < p) + stop("singular fit encountered") + coef <- z$coefficients + pivot <- z$pivot + r1 <- seq_len(z$rank) + dn <- colnames(x) + if (is.null(dn)) + dn <- paste("x", 1L:p, sep = "") + r2 <- if (z$rank < p) + (z$rank + 1L):p + else integer(0L) + if (is.matrix(y)) { + coef[r2, ] <- NA + coef[pivot, ] <- coef + dimnames(coef) <- list(dn, colnames(y)) + } + else { + coef[r2] <- NA + coef[pivot] <- coef + names(coef) <- dn + } + z$coefficients <- coef + r1 <- y - z$residuals + if (!is.null(offset)) + r1 <- r1 + offset + c(z[c("coefficients", "residuals", "rank")], list(fitted.values = r1, df.residual = n - z$rank)) +} + +# Dopo la versione 2.15 di R le funzioni .Fortran() sono proibite, bisogna sostituirle con .Call, +# percio' invece di lm.fit.fast bisogna chiamare lm.fit.fast2: +lm.fit.fast2<-function (x, y, offset = NULL, method = "qr", tol = 1e-07, singular.ok = TRUE, ...) +{ + if (is.null(n <- nrow(x))) + stop("'x' must be a matrix") + if (n == 0L) + stop("0 (non-NA) cases") + p <- ncol(x) + if (p == 0L) { + return(list(coefficients = numeric(0L), residuals = y, + fitted.values = 0 * y, rank = 0, df.residual = length(y))) + } + ny <- NCOL(y) + if (is.matrix(y) && ny == 1) + y <- drop(y) + if (!is.null(offset)) + y <- y - offset + if (NROW(y) != n) + stop("incompatible dimensions") + if (method != "qr") + warning(gettextf("method = '%s' is not supported. Using 'qr'", + method), domain = NA) + if (length(list(...))) + warning("extra arguments ", paste(names(list(...)), sep = ", "), + " are just disregarded.") + storage.mode(x) <- "double" + storage.mode(y) <- "double" + + z <- .Call(stats:::C_Cdqrls, x, y, tol, TRUE) + + if (!singular.ok && z$rank < p) + stop("singular fit encountered") + coef <- z$coefficients + pivot <- z$pivot + r1 <- seq_len(z$rank) + dn <- colnames(x) + if (is.null(dn)) + dn <- paste("x", 1L:p, sep = "") + r2 <- if (z$rank < p) + (z$rank + 1L):p + else integer(0L) + if (is.matrix(y)) { + coef[r2, ] <- NA + coef[pivot, ] <- coef + dimnames(coef) <- list(dn, colnames(y)) + } + else { + coef[r2] <- NA + coef[pivot] <- coef + names(coef) <- dn + } + z$coefficients <- coef + r1 <- y - z$residuals + if (!is.null(offset)) + r1 <- r1 + offset + c(z[c("coefficients", "residuals", "rank")], list(fitted.values = r1, df.residual = n - z$rank)) +} + +north.arrow <- function(loc,size,bearing=0,cols,cex=1,...) { + # checking arguments + if(missing(loc)) stop("loc is missing") + if(missing(size)) stop("size is missing") + # default colors are white and black + if(missing(cols)) cols <- rep(c("white","black"),8) + # calculating coordinates of polygons + radii <- rep(size/c(1,4,2,4),4) + x <- radii[(0:15)+1]*cos((0:15)*pi/8+bearing)+loc[1] + y <- radii[(0:15)+1]*sin((0:15)*pi/8+bearing)+loc[2] + # drawing polygons + for (i in 1:15) { + x1 <- c(x[i],x[i+1],loc[1]) + y1 <- c(y[i],y[i+1],loc[2]) + polygon(x1,y1,col=cols[i]) + } + # drawing the last polygon + polygon(c(x[16],x[1],loc[1]),c(y[16],y[1],loc[2]),col=cols[16]) + # drawing letters + b <- c("E","N","W","S") + for (i in 0:3) text((size+par("cxy")[1])*cos(bearing+i*pi/2)+loc[1], + (size+par("cxy")[2])*sin(bearing+i*pi/2)+loc[2],b[i+1], + cex=cex) +} + +n.days.in.a.year<-function(year)return(ifelse((year%%4==0 & year%%100!=0) | year%%400==0,366,365)) + +seq.months.in.a.year<-function(year){ # restituisce una sequenza di 365 o 366 numeri, il cui valore rappres.il numero del mese dell'anno associato a quel giorno + n.days.febr<-ifelse(n.days.in.a.year(year)==366,29,28) + return(c(rep(1,31),rep(2,n.days.febr),rep(3,31),rep(4,30),rep(5,31),rep(6,30),rep(7,31),rep(8,31),rep(9,30),rep(10,31),rep(11,30),rep(12,31))) +} + +rescale.max<-function(my.array,val.max){ # funzione che prende un array e restituisce lo stesso array abbassando pero'tutti gli elementi piu'alti di val.max al valore val.max (utile per aggiustare le leggende dei grafici) + ss<-which(my.array>val.max) + my.array[ss]<-val.max + return(my.array) +} +rescale.min<-function(my.array,val.min){ # come rescale.max ma per i valori piu'piccoli di val.min che vengono cambiati a val.min + ss<-which(my.arrayval.max) + my.array[ss]<-val.max + ss<-which(my.array stats[5] + conf <- if (do.conf) + stats[3] + c(-1.58, 1.58) * diff(stats[c(2, 4)])/sqrt(n) + list(stats = stats, n = n, conf = conf, out = x[out & nna]) +} + +########################### Conversione dei dati di pressione NCEP a dati di pressione in formato standard (se lo si e' impostato) #########################################3 +# ogni volta che aggiungi nuovi mesi nella directory"C:\nicola\pressure\database_NCEP_Reanalysis\2) database en formado txt" +# ti basta far correre questo script per creare il nuovo file in "C:/nicola/pressure/database_NCEP_Reanalysis/3) database per R/NCEP_formato_standard.txt" +if(NCEP_to_SLP==TRUE){ + setwd(dir_NCEP) + filemesi<-list.files() + n.filemesi=length(filemesi) # numero di file su cui si calcola la SLP + nome.filemesi<-substr(filemesi,1,nchar(filemesi)-4) #toglie il .txt alla fine del nome ogni file + anno.filemesi<-as.numeric(substr(nome.filemesi,1,4)) # estrae l'anno dal nome del file + mese.filemesi<-as.numeric(substr(nome.filemesi,6,7)) # estrae l'anno dal nome del file (e' possibile perche' i mesi vengono sempre identificati da 2 numeri alla fine del nome del file) + standard.SLP.full<-c() + i=1 # conta il file a cui siamo arrivati a esaminare + barraP<-winProgressBar("Analyzing data","Please wait...",0,n.filemesi,0) + for(x in filemesi){ + listaNCEP<- read.table(x,stringsAsFactors=FALSE,fill=T) # legge ogni mese di dati di SLP + ss<-which(!is.na(listaNCEP[,2]))# rimuove tutte le colonne oltre la prima (se ci sono), e anche tutte le file con dati su ambo le colonne (sono quelle che non corrispondono a dati veri ma al numero di pixel del grid, che si ripete ogni volta che si cambia grid) + listaNCEP<-listaNCEP[-ss,] + listaNCEP<-listaNCEP[,1] # rimuove tutte le colonne oltre la prima; il data.frame diventa un vettore + listaNCEP<-cbind(anno.filemesi[i],mese.filemesi[i],listaNCEP) # aggiunge due colonne a sx con l'anno e il mese dei dati + + # se i dati di SLP sono presi ogni X ore, li converte a giornalieri: + n.gg_al_mese<-dim(listaNCEP)[1]/(NCEP_ori_npx*NCEP_ver_npx*num.grid.for.day) # conta il numero di giorni per il mese importato + + n.pixel<-NCEP_ori_npx*NCEP_ver_npx # conta il numero di pixel per ogni grid + n.rows.for.day<-num.grid.for.day*n.pixel# conta il numero di file di dati per ogni giorno del mese (coincide con dim(listaNCEP[[i]])[1]/n.gg_al_mese + + media<-c() # vettore che contiene le medie giornaliere dei grid di pressione tutte su una sola colonna + + #fa la media di ogni griglie per calcolare la griglia giornaliera: + for(day in 1:n.gg_al_mese){ + # seleziona i grid corrispondenti allo stesso giorno e li mette nel vettore media: (finora funziona solo per =4 perche' la somma e' fissata su 4 termini e non si puo' cambiare senza fare un ciclo for) + media[(n.pixel*(day-1)+1):(n.pixel*day)]<-(listaNCEP[(n.rows.for.day*(day-1)+n.pixel*0+1):(n.rows.for.day*(day-1)+n.pixel*1),3]+ + listaNCEP[(n.rows.for.day*(day-1)+n.pixel+1):(n.rows.for.day*(day-1)+n.pixel*2),3]+ + listaNCEP[(n.rows.for.day*(day-1)+n.pixel*2+1):(n.rows.for.day*(day-1)+n.pixel*3),3]+ + listaNCEP[(n.rows.for.day*(day-1)+n.pixel*3+1):(n.rows.for.day*(day-1)+n.pixel*4),3])/num.grid.for.day + + } # chiude il for su day per fare la media dei giornalieri + + # converte il vettore con la media da verticale a orizzontale per adottarlo al formato standard della SLP: + standard.SLP<-c() #array(NA,c(n.pixel,NCEP_ori_npx)) + for(day in 1:n.gg_al_mese){ + tabla.temp<-media[(n.pixel*(day-1)+1):(n.pixel*day)] + tabla.temp<-matrix(tabla.temp,nrow=NCEP_ver_npx,ncol=NCEP_ori_npx,byrow=T) + tabla.temp<-cbind(anno.filemesi[i],mese.filemesi[i],day,tabla.temp) + if(day>1){standard.SLP<-rbind(standard.SLP,tabla.temp)} else {standard.SLP<-tabla.temp} + } + + if(i==1){standard.SLP.full<-standard.SLP} else {standard.SLP.full<-rbind(standard.SLP.full,standard.SLP)} + i<-i+1 # passa al prossimo file + setWinProgressBar(barraP,i,label=paste(i,"/",n.filemesi) ) + };close(barraP) # for su x (passa al prossimo file) + + write.table(standard.SLP.full,file_NCEP_standard,quote=FALSE,row.names=FALSE,col.names=FALSE) + +} # chiude l'if su SLP_to_WTs + +########################### Classificazione dei dati di pressione in Weather Types (se lo si e' impostato) #########################################3 + +if(SLP_to_WTs==TRUE){ # in questo caso crea prima la classificazione di tipi di tempo giornalieri a partire dai campi di pressione di input: + standard.SLP.full<-read.table(file.SLP,stringsAsFactors=FALSE,header=FALSE,fill=TRUE) # legge tutti i dati giornalieri di SLP + n.days.SLP<-dim(standard.SLP.full)[1]/npx.ver # numero di giorni di tuuuuutto il periodo di dati di SLP + WTs.type<-c("NE","E","SE","S","SW","W","NW","N","C","C.NE","C.E","C.SE","C.S","C.SW","C.W","C.NW","C.N","A","A.NE","A.E","A.SE","A.S","A.SW","A.W","A.NW","A.N") + + # se abbiamo importato il database EMULATE nella versione non normalizzata al formato anno/mese/giorno/grid che si scarica da internet, dobbiamo ricreare le colonne con anno/mese/giorno: + # NON farlo con Excel perche' importa i dati cambiando l'ordine!!!!! + # sequ<-1+seq(0,dim(standard.SLP.full)[1]-1,npx.ver+1) + # C1<-standard.SLP.full[sequ,1] + # C2<-standard.SLP.full[sequ,2] + # C3<-standard.SLP.full[sequ,3] + # standard.SLP.full2<-standard.SLP.full[-sequ,] + # C4<-rep(C1,each=npx.ver) + # C5<-rep(C2,each=npx.ver) + # C6<-rep(C3,each=npx.ver) + # C.all<-cbind(C4,C5,C6) + # standard.SLP.full2<-cbind(C.all,standard.SLP.full2) + # write.table(standard.SLP.full2,file="C:/nicola/pressure/database_emulate/2) emulate_normalizado_1850-2003.txt" ,row.names=FALSE,col.names=FALSE,quote=FALSE) + + for(point in 1:12){ # this for is needed only for the 12 points over Spain used by Estela + if(point==1){point.name<-"point_#1_coord_-7?30'E_42?30'N.txt";lon.16p=c(141,145,137,141,145,5,137,141,145,5,137,141,145,5,141,145)-1;lat.16p=c(17,17,19,19,19,19,21,21,21,21,23,23,23,23,25,25)-1;lat.central=42.5;lat.2nd.row=lat.central+5} + if(point==2){point.name<-"point_#2_coord_-5?E_42?30'N.txt";lon.16p=c(141,1,137,141,1,5,137,141,1,5,137,141,1,5,141,1);lat.16p=c(17,17,19,19,19,19,21,21,21,21,23,23,23,23,25,25)-1;lat.central=42.5;lat.2nd.row=lat.central+5} + if(point==3){point.name<-"point_#3_coord_-2?30'E_42?30'N.txt";lon.16p=c(141,1,137,141,1,5,137,141,1,5,137,141,1,5,141,1)+1;lat.16p=c(17,17,19,19,19,19,21,21,21,21,23,23,23,23,25,25)-1;lat.central=42.5;lat.2nd.row=lat.central+5} + if(point==4){point.name<-"point_#4_coord_0?E_42?30'N.txt";lon.16p=c(141,1,137,141,1,5,137,141,1,5,137,141,1,5,141,1)+2;lat.16p=c(17,17,19,19,19,19,21,21,21,21,23,23,23,23,25,25)-1;lat.central=42.5;lat.2nd.row=lat.central+5} + if(point==5){point.name<-"point_#5_coord_2?30'E_42?30'N.txt";lon.16p=c(141,1,137,141,1,5,137,141,1,5,137,141,1,5,141,1)+3;lat.16p=c(17,17,19,19,19,19,21,21,21,21,23,23,23,23,25,25)-1;lat.central=42.5;lat.2nd.row=lat.central+5} + if(point==6){point.name<-"point_#6_coord_-7?30'E_40?N.txt";lon.16p=c(141,145,137,141,145,5,137,141,145,5,137,141,145,5,141,145)-1;lat.16p=c(17,17,19,19,19,19,21,21,21,21,23,23,23,23,25,25);lat.central=40;lat.2nd.row=lat.central+5} + if(point==7){point.name<-"point_#7_coord_-5?E_40?N.txt";lon.16p=c(141,1,137,141,1,5,137,141,1,5,137,141,1,5,141,1);lat.16p=c(17,17,19,19,19,19,21,21,21,21,23,23,23,23,25,25);lat.central=40;lat.2nd.row=lat.central+5} + if(point==8){point.name<-"point_#8_coord_-2?30'E_40?N.txt";lon.16p=c(141,1,137,141,1,5,137,141,1,5,137,141,1,5,141,1)+1;lat.16p=c(17,17,19,19,19,19,21,21,21,21,23,23,23,23,25,25);lat.central=40;lat.2nd.row=lat.central+5} + if(point==9){point.name<-"point_#9_coord_0?E_40?N.txt";lon.16p=c(141,1,137,141,1,5,137,141,1,5,137,141,1,5,141,1)+2;lat.16p=c(17,17,19,19,19,19,21,21,21,21,23,23,23,23,25,25);lat.central=40;lat.2nd.row=lat.central+5} + if(point==10){point.name<-"point_#10_coord_-7?30'E_37?30'N.txt";lon.16p=c(141,145,137,141,145,5,137,141,145,5,137,141,145,5,141,145)-1;lat.16p=c(17,17,19,19,19,19,21,21,21,21,23,23,23,23,25,25)+1;lat.central=37.5;lat.2nd.row=lat.central+5} + if(point==11){point.name<-"point_#11_coord_-5?E_37?30'N.txt";lon.16p=c(141,1,137,141,1,5,137,141,1,5,137,141,1,5,141,1);lat.16p=c(17,17,19,19,19,19,21,21,21,21,23,23,23,23,25,25)+1;lat.central=37.5;lat.2nd.row=lat.central+5} + if(point==12){point.name<-"point_#12_coord_-2?30'E_37?30'N.txt";lon.16p=c(141,1,137,141,1,5,137,141,1,5,137,141,1,5,141,1)+1;lat.16p=c(17,17,19,19,19,19,21,21,21,21,23,23,23,23,25,25)+1;lat.central=37.5;lat.2nd.row=lat.central+5} + + # estrae solo le serie diarie di pressione corrispondenti ai 16 pixel scelti dall'utente e li mette nelle 16 colonne di SLP16: + for(i in 1:16){ # cicla sui 16 punti + # seleziona i dati di anno/mese/giorno per ogni giorno di dati: + seq.days<-lat.16p[i]+seq(0,npx.ver*(n.days.SLP-1),npx.ver) + #standard.SLP.full[lat.16p[i]+npx.ver*0,c(1:3,3+lon.16p[i])] + #standard.SLP.full[lat.16p[i]+npx.ver*1,c(1:3,3+lon.16p[i])] + if(i==1){SLP16<-standard.SLP.full[seq.days,c(1:3,3+lon.16p[i])]} else {SLP16<-cbind(SLP16,standard.SLP.full[seq.days,3+lon.16p[i]])} + } + names(SLP16)<-c("Year","Month","Day","P1","P2","P3","P4","P5","P6","P7","P8","P9","P10","P11","P12","P13","P14","P15","P16") + + #lat.2nd.row=lat.central+5 # already defined in the header + lat.4rd.row=lat.central-5 + + # calcola i coefficienti degli indici geostrofici (occhio a non arrotondarli alla terza cifra decimale come fanno Ricardo o Martin-Vide perche cosi si introduce un errore che cambia i WT nel 3-4% dei casi!!!: + SF1<-1/cos(lat.central*pi/180) + ZS1<-1/(2*cos(lat.central*pi/180)^2) + ZW1<-sin(lat.central*pi/180)/sin(lat.4rd.row*pi/180) + ZW2<-sin(lat.central*pi/180)/sin(lat.2nd.row*pi/180) + + # calcola gli indici geostrofici: + SF<-WF<-F<-D<-D2<-D3<-c() + SF=SF1*(0.25*(SLP16[,"P5"]+2*SLP16[,"P9"]+SLP16[,"P13"])-0.25*(SLP16[,"P4"]+2*SLP16[,"P8"]+SLP16[,"P12"])) + WF=0.5*(SLP16[,"P12"]+SLP16[,"P13"])-0.5*(SLP16[,"P4"]+SLP16[,"P5"]) + F=(SF*SF+WF*WF)^0.5 + D=(180/pi)*atan(WF/SF) # D e' la direzione del flusso; dato che l'angolo risultato di atan e' espresso in radianti, bisogna convertirlo in gradi moltiplicandolo per (180/pi) + + #SF2<-WF2<-F2<-D2<-D22<-D32<-c() + #SF2=SF1*(0.25*(SLP16[,"P5"]+2*SLP16[,"P9"]+SLP16[,"P13"])-0.25*(SLP16[,"P4"]+2*SLP16[,"P8"]+SLP16[,"P12"])) + #WF2=0.5*(SLP16[,"P12"]+SLP16[,"P13"])-0.5*(SLP16[,"P4"]+SLP16[,"P5"]) + #F2=(SF*SF+WF*WF)^0.5 + #D2=(180/pi)*atan(WF/SF) # D e' la direzione del flusso; dato che l'angolo risultato di atan e' espresso in radianti, bisogna convertirlo in gradi moltiplicandolo per (180/pi) + + # trasforma l'angolo D iniziale in un angolo D2 sempre positivo che si calcola a partire del Nord andando in senso orario: + # trasformazione di Sergio Vicente Serrano: + quad1<-which(WF<0 & SF<=0) # calcola quali angoli D appartengono al quadrante 1, quali al quadrante 2, quali al 3 e quali al 4. + quad2<-which(WF<0 & SF>0) # il primo quadrante e' in alto a destra, il secondo in basso a destra, il terzo in basso a sinistra, il quarto in alto a sinistra. + quad3<-which(WF>0 & SF>0) + quad4<-which(WF>0 & SF<=0) + + D2<-D + D2[quad2]<-D[quad2]+180 + D2[quad3]<-D[quad3]+180 + D2[quad4]<-D[quad4]+360 + + # individua i WTs direzionali: + NE<-which(D2>=22.5 & D2<67.5) + E<-which(D2>=67.5 & D2<112.5) + SE<-which(D2>=112.5 & D2<157.5) + S<-which(D2>=157.5 & D2<202.5) + SW<-which(D2>=202.5 & D2<247.5) + W<-which(D2>=247.5 & D2<292.5) + NW<-which(D2>=292.5 & D2<337.5) + N<-which(D2>=337.5 | D2<22.5) + + # aggiunge una colonna a destra con il WT direzionale associato all'angolo: + D2.dir<-rep("-999",length(D2)) + D2.dir[NE]<-"NE";D2.dir[E]<-"E";D2.dir[SE]<-"SE";D2.dir[S]<-"S";D2.dir[SW]<-"SW";D2.dir[W]<-"W";D2.dir[NW]<-"NW";D2.dir[N]<-"N" + + D3<-as.data.frame(cbind(SLP16[,1:3],D2,D2.dir),stringsAsFactors=FALSE) # aggiunge la colonna a destra di D2 + D3$Year<-as.numeric(D3$Year);D3$Month<-as.numeric(D3$Month);D3$Day<-as.numeric(D3$Day);D3$D2<-as.numeric(D3$D2);D3$D2.dir<-as.character(D3$D2.dir) + + # calcolo componenti vorticita': + ZS<-ZW<-Z<-c() + ZS=ZS1*(0.25*(SLP16[,"P6"]+2*SLP16[,"P10"]+SLP16[,"P14"])-0.25*(SLP16[,"P5"]+2*SLP16[,"P9"]+SLP16[,"P13"])-0.25*(SLP16[,"P4"]+2*SLP16[,"P8"]+SLP16[,"P12"])+0.25*(SLP16[,"P3"]+2*SLP16[,"P7"]+SLP16[,"P11"])) + ZW=ZW1*(0.5*(SLP16[,"P15"]+SLP16[,"P16"])-0.5*(SLP16[,"P8"]+SLP16[,"P9"]))-ZW2*(0.5*(SLP16[,"P8"]+SLP16[,"P9"])-0.5*(SLP16[,"P1"]+SLP16[,"P2"])) + Z=ZS+ZW + + cyc.pure<-which(Z>2*F) + anticyc.pure<-which(Z<(-2*F)) + hybrid.cyc<-which(abs(Z)<2*F & abs(Z)>F & Z>0) + hybrid.anticyc<-which(abs(Z)<2*F & abs(Z)>F & Z<0) + indeter<-which(F<6 | abs(Z)<6) # tipo di tempo indeterminato (U) + + # colonna che identifica quali giorni sono di tipo ibrido e quali di tipo direzionale: + D3$Hyb<-rep("Directional",length(D2)) + D3$Hyb[cyc.pure]<-"Pure C" + D3$Hyb[anticyc.pure]<-"Pure A" + D3$Hyb[hybrid.cyc]<-"Hybrid C" + D3$Hyb[hybrid.anticyc]<-"Hybrid A" + + # colonna con la lista dei 26 WTs officiali: + D3$WT<-D3$D2.dir + D3$WT[cyc.pure]<-"C" + D3$WT[anticyc.pure]<-"A" + D3$WT[hybrid.cyc]<-paste("C.",D3$D2.dir[hybrid.cyc],sep="") + D3$WT[hybrid.anticyc]<-paste("A.",D3$D2.dir[hybrid.anticyc],sep="") + #D3$WT[indeter]<-"U" + + D3$WT.num<-match(D3$WT,WTs.type)# aggiunge una colonna con la conversione dei 26 tipi di tempo a numeri da 1 a 26 + + # colonna con la lista dei 10 WTs ottenuti unendo a C e ad A gli otto direzionali; gli altri WTs ibridi si aggiungono ognuno al suo tipo direzionale corrispondente (es: C.SW si aggiunge a SW, ecc.) + D3$WT10<-D3$D2.dir + D3$WT10[cyc.pure]<-"C" + D3$WT10[anticyc.pure]<-"A" + D3$WT10.num<-match(D3$WT10,WTs.type)# aggiunge l'ultima colonna con la conversione dei 10 tipi di tempo a numeri da 1 a 9 + il tipo A che si converte nel numero 18) + + #write.table(D3[,c("Year","Month","Day","WT")],file="C:/nicola/WTs_from_Emulate.txt",row.names=FALSE,col.names=FALSE,quote=FALSE) + #write.table(D3[,c("Year","Month","Day","WT10.num")],file="C:/nicola/NCEP_10WTs.txt",row.names=FALSE,col.names=FALSE,quote=FALSE) + #write.table(D3[,c("Year","Month","Day","WT.num")],file=output.WTs,row.names=FALSE,col.names=FALSE,quote=FALSE) + write.table(D3[,c("Year","Month","Day","WT")],file="C:/nicola/tmp.txt",row.names=FALSE,col.names=FALSE,quote=FALSE) + #write.table(D3[,c("Year","Month","Day","WT")],file=paste("C:\\nicola\\precipitaciones\\weather_types\\database WTs NCAR for Spain\\",point.name,sep=""),row.names=FALSE,col.names=FALSE,quote=FALSE) + + # Per salvare anche gli indici geostrofici: + write.table(cbind(D3[,c("Year","Month","Day","WT")],D=round(D,3),D2=round(D2,3),F=round(F,3),Z=round(Z,3)),file=paste("C:\\nicola\\precipitaciones\\weather_types\\database WTs NCAR for Spain\\","Torino2.txt",sep=""),row.names=FALSE,col.names=TRUE,quote=FALSE) + + # Per salvare anche gli indici geostrofici per Dhais: + write.table(cbind(D3[,c("Year","Month","Day","WT")],SF=round(SF,5),WF=round(WF,5),F=round(F,5),D=round(D2,5),ZS=round(ZS,5),ZW=round(ZW,5),Z=round(Z,5)),file=paste("C:\\nicola\\precipitaciones\\weather_types\\database WTs NCAR for Spain\\","WTs NCEP 1970-2013.txt",sep=""),row.names=FALSE,col.names=TRUE,quote=FALSE) + + } # chiude il for su point + + ### visualizza il patron dei campi di pressione associati ai WTs calcolati: + # ma non funziona perche c'e' un problema con proj4string(grd) + npx.ori<-dim(standard.SLP.full)[2]-3 + listaWT.used<-D3[,c("Year","Month","Day","WT")] + idWT<-sort(unique(listaWT.used[[4]])) # cosi' puoi trattare anche tipi di tempo numerici ma non interi + nWT=length(unique(listaWT.used[[4]])) # numero di tipi di tempo della classificazione (es: 8, 9 o 10) + WorldPoly<- readShapePoly('C:/Projecto_Master/Limites Mundo/TM_WORLD_BORDERS-0.2.shp') + proj4string(WorldPoly) <- CRS("+proj=longlat +datum=WGS84") + my.polygon=list('sp.polygons',WorldPoly,first=F) + my.layout=list(my.polygon) + gt = GridTopology(cellcentre.offset = angolo.basso.sinistra, cellsize = c(deg,deg), cells.dim = c(npx.ori, npx.ver)) + grd = SpatialGrid(gt) + proj4string(grd) <- CRS("+proj=longlat +datum=WGS84") + my.names<-nome.mese + my.cuts<-c(0,0.01,0.15,0.3,0.5,1) + #my.cuts<-c(0.1,3,10,20,100)^(1/3) + my.labels<-c("1%","15%","30%","50%","100%") + #my.labels<-c("1 mm","3%","10%","20%","100%") + my.cuts.in.leyend<-c(0.01,0.15,0.30,0.50,1) + my.colors<-c("transparent","#00FFFF","#0099F8","blue","purple") + + for(wt in WTs.type){ # cicla su tutti i WTs + #es: wt="NE" # scegli il tipo di tempo da mappare se non fai il ciclo for + wt.num<-which(WTs.type==wt) # converte il wt espresso come lettera in numero da 1 a 26 + + # usa la classificazione dei WT giornalieri scelta all'inizio e i campi di pressione giornalieri del database file.SLP (in formato standard) da cui + # si e' eseguito il downscaling della classificazione in uso e li converte in mappe di pressione media associata ad ogni WT in tutta l'area di studio del database di pressione: + datos.SLP2<-standard.SLP.full #database pressione giornaliera nel formato anno/mese/giorno/valori di pressione dei pixel in colonne + datos.SLP2$WT<-rep(listaWT.used[[4]],each=npx.ver) # aggiunge a destra di datos.SLP2 una colonna con il tipo di tempo corrispondente + pressure.WT.grid<-SpatialGridDataFrame(grd, as.data.frame(rep(NA,npx.ori*npx.ver)), proj4string = CRS("+proj=longlat +datum=WGS84")) + + datos.temp<-datos.SLP2 + datos.temp<-subset(datos.temp,datos.temp$WT==idWT[wt.num]) + n.dias<-dim(datos.temp)[1]/npx.ver + mean.pressure<-data.frame(array(0,c(npx.ver,npx.ori))) + for(d in 1:n.dias){ + mean.pressure<-mean.pressure+datos.temp[(npx.ver*(d-1)+1):(npx.ver*d),4:(3+npx.ori)] + } + mean.pressure<-mean.pressure/n.dias # per calcolare la media invece de la somma + # carica i dati di tutte le medie possibili in pressure.grid: + pressure.WT.grid@data[[WTs.type[wt.num]]]=as.numeric(t(mean.pressure)) + + # campi di pressione del WT considerato senza grid colorato sotto ma solo con linee di livello blu: + my.main=list(label=WTs.names.long[wt.num],cex=1.1) + spplot(pressure.WT.grid,wt,sp.layout=my.layout,contour=TRUE,col='blue',labels=F,region=F, main=my.main,xlim=c(-20,10),ylim=c(30,50)) # mappa campi di pressione + + # centered on Torino: + spplot(pressure.WT.grid,wt,sp.layout=my.layout,contour=TRUE,col='blue',labels=F, cuts=30,lwd=1.5,col.regions=rev(jet.colors(1000)),main=my.main,xlim=c(-7.5,22.5),ylim=c(35,55)) # mappa campi di pressione + + # campi di pressione del WT considerato con grid rosso e blu e linee di livello bianche: + my.WT.map<-spplot(pressure.WT.grid,wt,sp.layout=my.layout,contour=TRUE,col='gray',region=FALSE,labels=FALSE, + cuts=30,lwd=1.5,col.regions=rev(jet.colors(1000)),main=my.main,xlim=c(-35,25),ylim=c(25,60)) # colorkey=list(space = "bottom"),cuts=30, + # campi di pressione con grid rosa bianco azzurro e linee di livello grigio: + my.WT.map<-spplot(pressure.WT.grid,wt,sp.layout=my.layout,contour=TRUE,col='gray',labels=F, + colorkey=list(space = "bottom"),main=my.main,xlim=c(-20,10),ylim=c(30,50)) # mappa campi di pressione del WT considerato + } + + # # confronto WTs Ensemble de Alex y WTs Ensemble de Nic: + # nic<-read.table("C:/nicola/precipitaciones/weather_types/26 WTs from begin year to end/WTs_from_EMULATE_nic.txt",stringsAsFactors=FALSE,header=FALSE) # legge tutti i dati giornalieri di SLP + # nic[,5]<-nic[,4] + # number.WTs.nic<-c(1:26) + # for(i in 1:26)nic[,5]<-replace(nic[,5],which(nic[,5]==number.WTs.nic[i]),WTs.type[i]) + # alex<-read.table("C:/nicola/precipitaciones/weather_types/26 WTs from begin year to end/EMULATE Project.txt",stringsAsFactors=FALSE,header=FALSE) # legge tutti i dati giornalieri di SLP + # alex[,5]<-alex[,4] + # number.WTs.alex<-c(1,2,3,4,5,6,7,8,30,31.5,32.5,33.5,34.5,35.5,36.5,37.5,38.5,40,41.5,42.5,43.5,44.5,45.5,46.5,47.5,48.5) + # # ricordati che WTs.type<-c("NE","E","SE","S","SW","W","NW","N","C","C.NE","C.E","C.SE","C.S","C.SW","C.W","C.NW","C.N","A","A.NE","A.E","A.SE","A.S","A.SW","A.W","A.NW","A.N") + # for(i in 1:26)alex[,5]<-replace(alex[,5],which(alex[,5]==number.WTs.alex[i]),WTs.type[i]) + # alex[,6]<-nic[,5] + # alex[,7]<-D + # alex[,8]<-D2 + # alex[1:200,] + # save.image(paste(dir_root,"/RData/125_cfr_downscaling_Alex_y_Nic.RData",sep="")) +} + +################################## IMPORTA L'INFOFILE DI PREC E I DATI DI PREC ############################################################################################## + #importa il file di excel con elenco di tutte le stazioni disponibili: +infofile <- read.xls(dir_infofile,verbose=TRUE,fill=TRUE,header=T,skip=0,na.strings="-999",perl=perlexe,stringsAsFactors=FALSE) +names(infofile)<-c("CODIGO","NOMBRE","PROVINCIA","LATITUDE","LONGITUDE","ALTITUDE") + +infofile<-subset(infofile,infofile[[1]]!="") #borra le eventuali filas vacias +ss<-which(substring(infofile[[2]],1,1)==" ") #borra el eventuale primero espacio de los nombres de las estaciones que empiezan con un espacio " " +for (i in ss)infofile[[2]][i]=substring(infofile[[2]][i],2,nchar(infofile[[2]][i])) +tt<-which(substring(infofile[[2]],1,1)==".") #borra el eventual primer character en los nombres de las estaciones que empiezan con un punto "." +for (i in tt)infofile[[2]][i]=substring(infofile[[2]][i],2,nchar(infofile[[2]][i])) + +#infofile<-infofile[order(infofile[[2]],infofile[[3]],infofile[[4]]),] #ordina per nome stazione, provincia e anno +infofile<-infofile[order(infofile[[1]]),] #ordina per CODIGO (se non lo e' gia') +staz.infofile<-infofile[[1]] # elenco codici stazioni infofile in ordine alfanumerico +n.staz.infofile<-dim(infofile)[1] + +# importa i dati di tutte le stazioni elencate e converti i manca-dato -999 in NA: +# (stavolta non c'e' bisogno di fare un for per ogni stazione della lista perche' si suppone che tutti e solo i dati delle stazioni nella lista stiano nello stesso file di dati .txt): +separator="";if(substring(dir_datos,nchar(dir_datos),nchar(dir_datos))=="v")separator=";" # solo se importi un .csv +dati<-read.table(dir_datos,header=header_datos,stringsAsFactors=FALSE,na.strings="-999",sep=separator) +names(dati)<-c("Codigo","Year","Jan","Feb","Mar","Apr","May","Jun","Jul","Aug","Sep","Oct","Nov","Dec") + +# convert monthly data to seasonal data: +if(seasons==TRUE){ + dati.seasonal=dati + if(predictand=="TEMP"){mod.pred=3}else{mod.pred=1} # in case of temperature we need the mean of the monthly values , not the cumulated value as for the precip. + dati.seasonal[,3]=sum(dati[,11]+dati[,12]+dati[,13])/mod.pred # Autumn + dati.seasonal[,4]=sum(dati[,3]+dati[,4]+dati[,14])/mod.pred # Winter + dati.seasonal[,5]=sum(dati[,5]+dati[,6]+dati[,7])/mod.pred # Spring + dati.seasonal[,6]=sum(dati[,8]+dati[,9]+dati[,10])/mod.pred # Summer + names(dati.seasonal)<-c("Codigo","Year",name.seasons) + dati<-dati.seasonal + remove(dati.seasonal) + # da qui in poi non ho piu implementato l' algoritmo per utilizzare i dati stagionai; devo ancora comvertire per esempio i tipi di tempo da mensili a stagionali +} + +# RITAGLI DATI DI PREC: + # toglie uno alla prec.di ogni mese perche' nel database si era aggiunto 1 a tutti i valori per evitare mesi con zero: (funziona anche se ci sono NA) +if(piu_1==TRUE){for(m in 1:n.mesi)dati[[2+m]]<-dati[[2+m]]-1 } +# se ci sono valori uguali a -1 (erano dei vecchi zero che dovevano essere messi ad 1) li mette uguali a zero: +for(m in 1:n.mesi){ss<-which(dati[[2+m]]==-1);dati[[2+m]][ss]=0} +# toglie l'anno 2006 alle prec.di ogni serie perche' nel database MOPREDAS compare sempre senza dati: +if(remove_2006==TRUE)dati<-subset(dati,dati[[2]]!=2006) +# se i dati sono espressi in decimi di mm, divide la prec. per 10: +if(dec_mm==T)dati[,3:(2+n.mesi)]=dati[,3:(2+n.mesi)]/10 + +dati<-dati[order(dati[[1]],dati[[2]]),] #ordina per codigo e anno +staz.dati<-unique(dati[[1]]) # unique serve a eliminare gli stessi codici che si ripetono per ogni anno nel file di dati +n.staz.dati<-length(staz.dati) + +# toglie i dati delle serie che non sono in infofile (confrontando i codici): +#(e' normale, di solito nel file di dati si riversa tutto il database disponibile, e nel infofile solo le stazioni con le quali lavorare) +dati<-subset(dati,dati[[1]] %in% staz.infofile) # cioe' estrae le righe in cui il codice appartiene alla lista di codici in staz.infofile! + +# numero ed elenco stazioni finali dopo la selezione: (normalmente coincide con il numero di stazioni di infofile, ma potrebbe essere di meno se ci sono stazioni nell'infofile che non sono presenti anche nei dati) +listaz<-unique(dati[[1]]);n.staz<-length(listaz) +if(n.staz!=n.staz.infofile)stop("Attenzione: alcune serie dell'infofile non sono presenti nel file di dati") + +# ora devi formattare le stazioni rimaste, cioe' fare in modo che tutte quante comincino e finiscano con gli stessi anni: +min.assoluto<-min(dati[[2]]) +max.assoluto<-max(dati[[2]]) +for(st in listaz){ + st.dati<-subset(dati,dati[[1]]==st) + st.min<-min(st.dati[[2]]) + st.max<-max(st.dati[[2]]) + if(st.min>min.assoluto){ # se la stazione comincia piu'tardi del normale bisgona aggiungere prima delle file con NA + n.years.new<-st.min-min.assoluto # conta quanti anni si manca-dati bisogna aggiungere + st.new<-as.data.frame(cbind(rep(st,n.years.new),c(min.assoluto:(st.min-1)),rep(NA,n.years.new),rep(NA,n.years.new),rep(NA,n.years.new),rep(NA,n.years.new),rep(NA,n.years.new),rep(NA,n.years.new),rep(NA,n.years.new),rep(NA,n.years.new),rep(NA,n.years.new),rep(NA,n.years.new),rep(NA,n.years.new),rep(NA,n.years.new)),stringsAsFactors=FALSE) + st.new[[2]]<-as.numeric(st.new[[2]]) # rimette gli anni come numeri perche' erano stati convertiti a char nel creare il data.frame + dimnames(st.new)[[2]]<-c("Codigo","Year","Jan","Feb","Mar","Apr","May","Jun","Jul","Aug","Sep","Oct","Nov","Dec") + dati<-rbind(dati,st.new) # file di manca-dato + } + if(st.max1 | begin.day[cla]>1)begin.year[cla]<-begin.year[cla]+1 # se il primo anno non ha tutti i dati inizia dall'anno seguente (per es. il database Cost inizia il 1/9/1957) + + end.month[cla]=listaWT[[cla]][[2]][length(listaWT[[cla]][[1]])] + end.day[cla]=listaWT[[cla]][[3]][length(listaWT[[cla]][[1]])] + end.year[cla]=listaWT[[cla]][[1]][length(listaWT[[cla]][[1]])] + if(end.month[cla]<12 | end.day[cla]1 | begin.day[cla.rif]>1)begin.year[cla.rif]<-begin.year[cla.rif]+1 # se il primo anno non ha tutti i dati inizia dall'anno seguente (per es. il database Cost inizia il 1/9/1957) + + end.month[cla.rif]=listaWT[[cla.rif]][[2]][length(listaWT[[cla.rif]][[1]])] + end.day[cla.rif]=listaWT[[cla.rif]][[3]][length(listaWT[[cla.rif]][[1]])] + end.year[cla.rif]=listaWT[[cla.rif]][[1]][length(listaWT[[cla.rif]][[1]])] + if(end.month[cla.rif]<12 | end.day[cla.rif]1){temp2<-rbind(temp2,cbind(temp,mes))} else {temp2<-cbind(temp,mes)} + } + temp2<-temp2[,c(1,7,2:6)] # sposta la colonna con i mesi in seconda posizione + monthlyWT[[cla.rif]]<-temp2[order(temp2[,1],temp2[,2]),] # ordina in base alla colonna degli anni e dei mesi in modo che sia uguale al formato di monthlyWT +} + +# recalculate monthlyWTs if they are 10: +if(n.weather.types==10){ + monthlyWT[[cla.rif]][,3]=monthlyWT[[cla.rif]][,3]+monthlyWT[[cla.rif]][,12]+monthlyWT[[cla.rif]][,21] # NE + monthlyWT[[cla.rif]][,4]=monthlyWT[[cla.rif]][,4]+monthlyWT[[cla.rif]][,13]+monthlyWT[[cla.rif]][,22] # E + monthlyWT[[cla.rif]][,5]=monthlyWT[[cla.rif]][,5]+monthlyWT[[cla.rif]][,14]+monthlyWT[[cla.rif]][,23] # SE + monthlyWT[[cla.rif]][,6]=monthlyWT[[cla.rif]][,6]+monthlyWT[[cla.rif]][,15]+monthlyWT[[cla.rif]][,24] # S + monthlyWT[[cla.rif]][,7]=monthlyWT[[cla.rif]][,7]+monthlyWT[[cla.rif]][,16]+monthlyWT[[cla.rif]][,25] # SW + monthlyWT[[cla.rif]][,8]=monthlyWT[[cla.rif]][,8]+monthlyWT[[cla.rif]][,17]+monthlyWT[[cla.rif]][,26] # W + monthlyWT[[cla.rif]][,9]=monthlyWT[[cla.rif]][,9]+monthlyWT[[cla.rif]][,18]+monthlyWT[[cla.rif]][,27] # NW + monthlyWT[[cla.rif]][,10]=monthlyWT[[cla.rif]][,10]+monthlyWT[[cla.rif]][,19]+monthlyWT[[cla.rif]][,28] # N + nWT[cla.rif]=10 + + tmp<-as.matrix(cbind(monthlyWT[[cla.rif]][,1],monthlyWT[[cla.rif]][,2],monthlyWT[[cla.rif]][,3],monthlyWT[[cla.rif]][,4],monthlyWT[[cla.rif]][,5],monthlyWT[[cla.rif]][,6],monthlyWT[[cla.rif]][,7],monthlyWT[[cla.rif]][,8],monthlyWT[[cla.rif]][,9],monthlyWT[[cla.rif]][,10],monthlyWT[[cla.rif]][,11],monthlyWT[[cla.rif]][,20])) + + monthlyWT[[cla.rif]]<-tmp + remove(tmp) +} + + +# Calcolo della frequenza dei WTs mensili per due periodi diversi: + +A<-list() # A e' una lista di liste mensili delle matrici dei WT per la classificazione di riferimento (un mese per matrice); formato: anno/mese/WT1/WT2/... +AN<-list() # come A ma inizia e finisce in un periodo scelto dall'utente per fare le analisi dei WTs; le frequenze non sono ancora in % ma in num.di giorni al mese +for(cla in cla.rif){ #2:n.classi){ # tolgo temporaneamente la cla num.1 perche'comincia dopo il 1850 + A[[cla]]<-list();AN[[cla]]<-list() + for(mese in 1:12)A[[cla]][[mese]]<-subset(monthlyWT[[cla]],monthlyWT[[cla]][,2]==mese) + for(mese in 1:12)AN[[cla]][[mese]]<-A[[cla]][[mese]][A[[cla]][[mese]][,1]>=begin.analisi & A[[cla]][[mese]][,1]<=end.analisi,] +} + +# Calcola la media mensile sul periodo impostato sopra del numero di giorni appartenenti ad ogni WT: +AN.day.mean<-AN.mean<-AN.msd<-array(NA,c(n.classi,nWT[cla.rif],12),dimnames<-list(classi,WTs.type,nome.mese)) # msd e' la deviazione standard della media (=dev.stnd/radice(num.misure)) +for(cla in cla.rif){ # 2:n.classi){ tolgo temporaneamente la cla num.1 perche'comincia dopo il 1850 +for(wt in 1:nWT[cla]){ +for(mese in 1:12){ + AN.day.mean[cla,wt,mese]<-mean(AN[[cla]][[mese]][,2+wt]) # media della frequenza dei WTs mensili espressi pero' come numero di giorni al mese, non come % + temp<-AN[[cla]][[mese]][,2+wt] # ovvero temp<-AN.day.mean[cla,wt,mese] + AN.mean[cla,wt,mese]<-100*mean(temp)/lastday(mese,1) # il denominatore serve per passare alla frequenza in %, cioe' per passare dal numero medio di eventi mensili alla frequenza media in % del numero di giorni di WTs + AN.msd[cla,wt,mese]<-(100*sd(temp)/lastday(mese,1))/(end.analisi-begin.analisi+1)^0.5 +}}} +#plot(AN.day.mean[cla,26,],type="b") + +# calcola il trend delle frequenze per ogni mese e WT e il p-value con Mann-Kendall: +trend<-p.val<-mean.fr<-Z<-array(NA,c(12,nWT[cla.rif]),dimnames=list(nome.mese,WTs.names[[1]])) +for(wt in 1:nWT[cla.rif]){ + for(mese in 1:12){ #int.mesi){ + x<-begin.analisi:end.analisi + y<-AN[[cla]][[mese]][,2+wt] + + reg<-lm(y~x) #linear fit de los puntos + slope<-reg$coefficients[2] #estrae la pendenza della retta di regressione + trend[mese,wt]<-slope*length(x) # calcola la tendenza per il periodo impostato + suppressWarnings(k<-Kendall(x,y)) # nota che se mette un warning: Error.exit, tauk2=FAULT e' perche la Kendall(x,y) restituisce 1 perche' y e' un vettore di zeri + p.val[mese,wt]<-k$sl[1] # estrae il p-value a due code + + S=k$S[1] # statistica S di Kendall + VS=k$varS[1] # varianza di S + if(S>0)Z[mese,wt]=(S-1)/(VS^0.5);if(S==0)Z[mese,wt]=0;if(S<0)Z[mese,wt]=(S+1)/(VS^0.5) # statistica Z o tau di Kendall + + mean.fr[mese,wt]<-AN.day.mean[cla.rif,wt,mese] + } +} + +p.val<-t(round(p.val,2)) # visualizza solo due cifre decimali del p-value) +trend<-t(round(trend,2)) # trend della frequenza (espressa come numero di giorni al mese del WT considerato) su tutto il periodo impostato +mean.fr<-t(round(mean.fr,2));Z<-t(round(Z,3)) #espressa in numero di giorni al mese +#t(round(mean.contribute,2)) # occhio che mean.contribute si calcola solo dopo alla fine dello script, qui l'ho messo solo se si vuole visualizzarlo assieme a mean.fr! + + if(altro==TRUE){ + + # analisi multicollinearity (correlazioni tra WTs) nel periodo di riferimento: +multic<-c() +for(mese in 1:12){ + cor.threshold=0.3 # valore minimo di correlazione tra due WTs per avere multicollinearity + C<-round(cor(AN[[cla.rif]][[mese]][,-c(1,2)],AN[[cla.rif]][[mese]][,-c(1,2)]),2) # toglie da AN le prime due colonne con l'anno e il termine noto + dimnames(C)<-list(WTs.type,WTs.type) + C.max<-which(C>cor.threshold & C<1,arr.ind=T) + C.cor<-cbind(rep(mese,dim(C.max)[1]),dimnames(C)[[1]][C.max[,1]], dimnames(C)[[1]][C.max[,2]],C[C.max]) + n.rig<-dim(C.cor)[1] + if(n.rig>=2){ + ripetuto<-rep(FALSE,n.rig) + for(i in 2:n.rig){ + for(j in 1:i){ + if(C.max[j,1]==C.max[i,2] & C.max[j,2]==C.max[i,1])ripetuto[i]<-TRUE + } + } + C.cor<-C.cor[ripetuto==FALSE,] # seleziona solo le righe che non sono ripetute + multic<-rbind(multic,C.cor) + } + row.names(multic)<-NULL +} +multic<-multic[order(multic[,4],decreasing=TRUE),] +multic<-multic[,c(4,1,2,3)] # porta la colonna con la correlazione in prima posizione +dimnames(multic)<-list(NULL,c("correl","mese","WT1","WT2")) + +##### confronta le frequenze mensili per due periodi a scelta: +p1.start=1945 +p1.end=1974 +p2.start=1975 +p2.end=2003 + +p1.length=p1.end-p1.start+1 +p2.length=p2.end-p2.start+1 + +colnames(monthlyWT[[cla.rif]])<-c("year","month",WTs.type) +monWT<-data.table(monthlyWT[[cla.rif]],key="year,month") + +monWT$period<-NA +rows1<-which(monWT$year>=p1.start & monWT$year<=p1.end) +rows2<-which(monWT$year>=p2.start & monWT$year<=p2.end) +monWT$period[rows1]<-1 +monWT$period[rows2]<-2 + +setkey(monWT,period) +monWT2<-monWT[,lapply(.SD,sum),by=list(month,period),.SDcols=3:28] +monWT2[period==1] +monWT2[period==2] +# il primo metodo si puo applicare solo a: +monWT2[period==1,3:28,with=F]>5 & monWT2[period==2,3:28,with=F]>5 +temp<-monWT2[period==1]>5 & monWT2[period==2]>5 +ss<-which(temp==F) + +n1<-(30.4*p1.length) # numero medio di giorni per mese; non ? precisissimo ; correggi in futuro! +n2<-(30.4*p2.length) +p1<-monWT2[period==1]/n1 +p2<-monWT2[period==2]/n2 +p<-(monWT2[period==1]+monWT2[period==2])/(n1+n2) +options(scipen=999) # elimina la notazione scentifica dei numeri (esponenziale) + +# metodo che funziona se n1*p1>5 e n2*p2>5 e n1>=50 e n2>=50 (p1=x1/n1) +z<-abs(p1-p2)/(p*(1-p)*((1/n1)+(1/n2)))^0.5 +z<-as.matrix(z) +z<-pnorm(z,lower.tail=F) # calcola i p-value, noto z (non moltiplichiamo per due perche sappiamo gia se la frequenza ? aumentata o diminuita, cioe sappiamo gi? quale delle due code stiamo calcolando) +z[ss]<-NA +z<-z[,-c(1,2)] # toglie le prime due colonne +z<-round(z,2) +z + + # metodo basato nella trasformazione arcsen che funziona per n1*p1>=1 y n2>=25 {z<-abs(asin((p1)^0.5)-asin((p2)^0.5))/(28.648*((1/n1)+(1/n2))^0.5)} +z2<-abs(p1-p2)/((p1*(1-p1)/n1)+(p2*(1-p2)/n2))^0.5 +z2<-as.matrix(z2) +z2<-pnorm(z2,lower.tail=F) # calcola i p-value, noto z2 +temp2<-monWT2[period==1]>=1 +ss2<-which(temp2==F) +z2[ss2]<-NA +z2<-z2[,-c(1,2)] # toglie le prime due colonne +z2<-round(z2,2) +z2 + +# nuova classificazione WTs in 10 tipi: +monWT3<-monWT2[,c("month","period","C","A"),with=F] +monWT3$NE<-monWT2$NE+monWT2$C.NE+monWT2$A.NE +monWT3$E<-monWT2$E+monWT2$C.E+monWT2$A.E +monWT3$SE<-monWT2$SE+monWT2$C.SE+monWT2$A.SE +monWT3$S<-monWT2$S+monWT2$C.S+monWT2$A.S +monWT3$SW<-monWT2$SW+monWT2$C.SW+monWT2$A.SW +monWT3$W<-monWT2$W+monWT2$C.W+monWT2$A.W +monWT3$NW<-monWT2$NW+monWT2$C.NW+monWT2$A.NW +monWT3$N<-monWT2$N+monWT2$C.N+monWT2$A.N + +# applichiamo di nuovo il metodo 2 alla nuova classifiazione: +p1<-monWT3[period==1]/n1 +p2<-monWT3[period==2]/n2 +p<-(monWT3[period==1]+monWT3[period==2])/(n1+n2) +z2<-abs(p1-p2)/((p1*(1-p1)/n1)+(p2*(1-p2)/n2))^0.5 +z2<-as.matrix(z2) +z2<-pnorm(z2,lower.tail=F) # calcola i p-value, noto z2 +temp2<-monWT3[period==1]>=1 +ss2<-which(temp2==F) +z2[ss2]<-NA +z2<-z2[,-c(1,2)] # toglie le prime due colonne +z2 + +#p2-p1 per vedere se la freq. aumenta o diminuisce passando dal primo al secondo periodo +segno<-(p2-p1)[,-c(1,2),with=F] +ss<-which(segno<0) +z2[ss]<-z2[ss]*(-1) +z2<-round(z2,7) +z2 + +# to replace numbers with colours: +df<-data.frame(x=c("A","B","C"),colour=c(0.3,0.6,0.9)) +breaks <- c(0.4, 0.7) # you can add more cut points here +cols <- brewer.pal(length(breaks) + 1, "Greens") +df$colour <- as.character(cut(df$colour, c(-Inf, breaks, Inf), labels=cols)) + + +#### MAPPE POSTER WTs #################################################################### + +# MAPPA 1: per ogni classificazione e per ogni tipo di tempo, calcola per ogni mese la % del numero medio di giorni con quel WT per l'intero periodo della matrice A: +par(mfrow=c(4,7));par(oma=c(2,2,2,2)); par(mar=c(1.5,1,1,1)) # oma controlla il bordo esterno a tutti i grafico, mar i bordi interni tra grafici +for(wt in 1:max.nWT){ +# disegna il grafico a barre ma senza l'asse y: (# la seconda opzione di mgp derermina la distanza dei numeri dagli assi) # xlab="months",border="darkblue" +gbarre<-barplot(AN.mean[,wt,],beside=TRUE,ylim=c(0,30),col = c("blue", "red"),names.arg=c("J","F","M","A","M","J","J","A","S","O","N","D"),cex.names=0.6,xpd=F,axes=FALSE,mgp=c(3,0,0)) +axis(2,cex.axis=0.6,tcl=-0.2) # aggiunge l'asse Y (il numero 2 crea un asse a sinistra) e ridimensiona la grandezza dei caratteri e delle tacche (valore default per tcl= -0.5, il meno indica che la tacca e' rivolta a sinistra) +box() # disegna un box intorno al grafico (par(lty="o") non funziona) +arrows(-1.5+3*(1:12),AN.mean[1,wt,]-AN.msd[1,wt,],-1.5+3*(1:12),AN.mean[1,wt,]+AN.msd[1,wt,],length=0.01,angle=90,code=3,col="lightseagreen") # barra d'errore per la prima serie +arrows(-0.5+3*(1:12),AN.mean[2,wt,]-AN.msd[2,wt,],-0.5+3*(1:12),AN.mean[2,wt,]+AN.msd[2,wt,],length=0.01,angle=90,code=3,col="coral") # barra d'errore per la seconda serie +title(WTs.names[[1]][wt],line=0.3) +} +lista.classif<-paste(nome.classe,collapse=" and the ") +title(paste("Comparison between the mean monthly percentage of WTs using the ",lista.classif," database for ",begin.analisi,"-",end.analisi," period.",sep=""),outer=TRUE) # outlier serve per disegnare il titolo fuori dall'area dei pannelli +plot(1:12,1:12,type="n",axes=F);plot(1:12,1:12,type="n",axes=F) # mette due plot in bianco per arrivare in fondo a destra dello schermo +legend(-2.5,10,nome.classe, cex=1.5, bty="n", fill=c("blue", "red"),x.intersp=0.2) # i primi due numeri sono le coordinate della legenda, x.intersp e' la distanza tra i colori e il testo della legenda + +# MAPPA 2: come prima, ma visualizza solo la serie temporale di uno o piu' mesi + media mobile di 5 anni: +mesinv<-c(10,11,12,1,2,3) # scegli 6 mesi da usare per definire la serie invernale (1 e' gennaio, 12 dicembre) SOLO per la Mappa 2 +windows() # per mettere la mappa in una nuova finestra +AN.inv<-AN.inv.medmob<-list() +for(cla in 2:n.classi){ # tolgo temporaneamente la cla num.1 perche'comincia dopo il 1850 +AN.inv[[cla]]<-AN.inv.medmob[[cla]]<-list() +for(wt in 1:max.nWT)AN.inv[[cla]][[wt]]<-AN[[cla]][[mesinv[1]]][,2+wt]+AN[[cla]][[mesinv[2]]][,2+wt]+AN[[cla]][[mesinv[3]]][,2+wt]+AN[[cla]][[mesinv[4]]][,2+wt]+AN[[cla]][[mesinv[5]]][,2+wt]+AN[[cla]][[mesinv[6]]][,2+wt] +for(wt in 1:max.nWT)AN.inv.medmob[[cla]][[wt]]<-filter(AN.inv[[cla]][[wt]],rep(0.2,5)) # 0.2 sono i coefficienti della media mobile di 5 anni +} +cla=cla.rif +par(mfrow=c(4,7));par(oma=c(2,2,2,2)); par(mar=c(2.5,1,1,1)) # oma controlla il bordo esterno a tutti i grafico, mar i bordi interni tra grafici +for(wt in 1:max.nWT){ +plot(begin.analisi:end.analisi,AN.inv[[cla]][[wt]],type="l",cex=0.4,col="blue",ylim=c(0,max(AN.inv[[cla]][[wt]])),xlab="", ylab="") +par(new=TRUE) +plot(begin.analisi:end.analisi,AN.inv.medmob[[cla]][[wt]],type="l",cex=0.4,col="red",ylim=c(0,max(AN.inv[[cla]][[wt]])),xlab="", ylab="",lwd=2) +title(WTs.names[[1]][wt],line=0.3) +} +title(paste("Long-term variability of observed frequency (%) in Winter for ",nome.classe[cla]," database and ",begin.analisi,"-",end.analisi," period.",sep=""),outer=TRUE) # outlier serve per disegnare il titolo fuori dall'area dei pannelli + +# MAPPA 3: come mappa 2, ma mostra solo la media mobile e per tutte le classificazioni sovrapposte: +windows() # per mettere la mappa in una nuova finestra +par(mfrow=c(4,7));par(oma=c(2,2,2,2)); par(mar=c(1.5,1,1,1)) # oma controlla il bordo esterno a tutti i grafico, mar i bordi interni tra grafici +for(wt in 1:max.nWT){ +massimo<-max(AN.inv.medmob[[1]][[wt]],AN.inv.medmob[[2]][[wt]],na.rm=T) +plot(begin.analisi:end.analisi,AN.inv.medmob[[1]][[wt]],type="l",cex=0.4,col="red",ylim=c(0,massimo),xlab="", ylab="",lwd=2) +par(new=TRUE) +plot(begin.analisi:end.analisi,AN.inv.medmob[[2]][[wt]],type="l",cex=0.4,col="orange",ylim=c(0,massimo),xlab="", ylab="",lwd=2) +title(WTs.names[[1]][wt],line=0.3) +} +title(paste("5-days mobile mean of long-term variability of observed frequency (%) in Winter for all database and ",begin.analisi,"-",end.analisi," period.",sep=""),outer=TRUE) # outlier serve per disegnare il titolo fuori dall'area dei pannelli +plot(1:12,1:12,type="n",axes=F);plot(1:12,1:12,type="n",axes=F) # mette due plot in bianco per arrivare in fondo a destra dello schermo +legend(-2.5,10,nome.classe, cex=1.5, bty="n", fill=c("red", "orange"),x.intersp=0.2) # i primi due numeri sono le coordinate della legenda, x.intersp e' la distanza tra i colori e il testo della legenda + +# MAPPA 4: come la mappa 1, ma visualizza solo i tipi di tempo della classificazione EMULATE +par(mfrow=c(4,7));par(oma=c(2,2,2,2)); par(mar=c(1.5,1,1,1)) # oma controlla il bordo esterno a tutti i grafico, mar i bordi interni tra grafici +for(wt in 1:max.nWT){ +# disegna il grafico a barre ma senza l'asse y: (# la seconda opzione di mgp derermina la distanza dei numeri dagli assi) # xlab="months",border="darkblue" +gbarre<-barplot(AN.mean[2,wt,],beside=TRUE,ylim=c(0,30),col = c("grey40"),names.arg=c("J","F","M","A","M","J","J","A","S","O","N","D"),cex.names=0.6,xpd=F,axes=FALSE,mgp=c(3,0,0)) +axis(2,cex.axis=0.6,tcl=-0.2) # aggiunge l'asse Y (il numero 2 crea un asse a sinistra) e ridimensiona la grandezza dei caratteri e delle tacche (valore default per tcl= -0.5, il meno indica che la tacca e' rivolta a sinistra) +box() # disegna un box intorno al grafico (par(lty="o") non funziona) +title(WTs.names[[1]][wt],line=0.3) +} +title(paste("Mean monthly percentage of WTs using the ",nome.classe[2]," database for ",begin.analisi,"-",end.analisi," period.",sep=""),outer=TRUE) # outlier serve per disegnare il titolo fuori dall'area dei pannelli +plot(1:12,1:12,type="n",axes=F);plot(1:12,1:12,type="n",axes=F) # mette due plot in bianco per arrivare in fondo a destra dello schermo + +# MAPPA 5: come la mappa 2, ma visualizza la serie temporale per OGNUNO dei mesi in int.mesi e con media mobile di 21 anni: +AN.mese<-AN.mese.medmob<-list() +cla=cla.rif +for(mese in int.mesi){ +windows() # per mettere la mappa in una nuova finestra +AN.mese[[cla]]<-AN.mese.medmob[[cla]]<-list() +for(wt in 1:max.nWT)AN.mese[[cla]][[wt]]<-AN[[cla]][[mesinv[1]]][,2+wt] +for(wt in 1:max.nWT)AN.mese.medmob[[cla]][[wt]]<-filter(AN.mese[[cla]][[wt]],rep(1/21,21)) # 0.2 sono i coefficienti della media mobile di 5 anni + +par(mfrow=c(4,7));par(oma=c(2,2,2,2)); par(mar=c(2.5,1,1,1)) # oma controlla il bordo esterno a tutti i grafico, mar i bordi interni tra grafici +for(wt in 1:max.nWT){ +plot(begin.analisi:end.analisi,AN.mese[[cla]][[wt]],type="l",cex=0.4,col="blue",ylim=c(0,max(AN.mese[[cla]][[wt]])),xlab="", ylab="") +par(new=TRUE) +plot(begin.analisi:end.analisi,AN.mese.medmob[[cla]][[wt]],type="l",cex=0.4,col="red",ylim=c(0,max(AN.mese[[cla]][[wt]])),xlab="", ylab="",lwd=2) +title(WTs.names[[1]][wt],line=0.3) +} +title(paste("Long-term variability of observed frequency (%) in ",nome.mese[mese]," for ",nome.classe[cla]," database from ",begin.analisi," to ",end.analisi,sep=""),outer=TRUE) # outlier serve per disegnare il titolo fuori dall'area dei pannelli +} + +# MAPPA 6: come la mappa 5, ma visualizza la serie temporale SOVRAPPONENDO ogni mese in int.mesi2 di uno o piu' mesi e con filtro gaussiano: +AN.mese<-AN.mese.medmob<-list() +cla=cla.rif +int.mesi2<-c(10,11,12) # puoi cambiarlo come vuoi es: int.mesi2<-int.mesi o int.mesi2<-c(10,3) o int.mesi2<-4 +par(mfrow=c(4,5));par(oma=c(2,2,2,2)); par(mar=c(2.5,1.5,1.2,1)) # oma controlla il bordo esterno a tutti i grafici, mar i bordi interni tra grafici # ordine parametri: basso/sinistra/alto/destra +for(wt in 1:20 ){ #max.nWT){ + AN.mese[[cla]]<-AN.mese.medmob[[cla]]<-list() + ymin<-ymax<-c() + for(mese in int.mesi2) {#int.mesi){ + AN.mese[[cla]][[wt]]<-AN[[cla]][[mese]][,2+wt] + lll<-length(AN.mese[[cla]][[wt]]) + ymin[mese]<-min(ksmooth(1:lll,AN[[cla]][[mese]][,2+wt], "normal",bandwidth=years.filtro)$y) + ymax[mese]<-max(ksmooth(1:lll,AN[[cla]][[mese]][,2+wt], "normal",bandwidth=years.filtro)$y) + } + ymin2<-min(ymin,na.rm=T);ymax2<-max(ymax,na.rm=T) + + plot(begin.analisi:end.analisi,rep(0,end.analisi-begin.analisi+1),type="n",cex=0.4,col="red",xlim=c(begin.analisi,end.analisi),ylim=c(ymin2,ymax2),xlab="", ylab="",lwd=2) + title(WTs.names[[1]][wt],line=0.3) + + for(mese in int.mesi2){ #int.mesi){ + AN.mese[[cla]][[wt]]<-AN[[cla]][[mese]][,2+wt] + lll<-length(AN.mese[[cla]][[wt]]) + AN.mese.medmob[[cla]][[wt]]<-ksmooth(1:lll,AN.mese[[cla]][[wt]], "normal",bandwidth=years.filtro,n.points=lll) # filtro gaussiano!!! Sostituisce filter(AN.mese[[cla]][[wt]],rep(1/51,51),method="convolution") + lines(begin.analisi:end.analisi,AN.mese.medmob[[cla]][[wt]]$y,cex=0.4,col=month.color2[mese],xlab="", ylab="",lwd=.5) + #lines(begin.analisi:end.analisi,ksmooth(1:lll,AN.mese[[cla]][[wt]], "normal",bandwidth=5,n.points=lll)$y,cex=0.4,col=month.color2[mese],xlab="", ylab="",lwd=.1) + } +} +title(paste("Long-term variability of WTs frequency for ",nome.classe[cla]," database from ",begin.analisi," to ",end.analisi,sep=""),outer=TRUE) # outlier serve per disegnare il titolo fuori dall'area dei pannelli +plot(begin.analisi:end.analisi,rep(0,end.analisi-begin.analisi+1),type="n",axes=F,xlab="", ylab="") # lascia un riquadro (panel) vuoto!!! +plot(begin.analisi:end.analisi,rep(0,end.analisi-begin.analisi+1),type="n",xlim=c(begin.analisi,end.analisi),ylim=c(0,10),axes=F,xlab="", ylab="") +legend(c(begin.analisi,end.analisi),c(10,0),nome.mese[int.mesi],col=month.color2[int.mesi],lwd=.2,bty="n",seg.len=1) # bty="n" toglie il frame attorno alla legenda +} + +if(correl==TRUE){ ############################### CALCOLO CORRELAZIONI mensili tra serie di prec. #################################################### +# servira' per scegliere le stazioni per fare la LRK di tipo avanzato +# si calcola la correlazione tra i logaritmi delle serie temporali mensili per eliminare l'influenza degli outliers. + +# toglie gli anni di dati delle stazioni mensili di prec.fuori dal periodo considerato per il calcolo delle correlazioni: +dati2<-subset(dati,dati[[2]]>=start.regr & dati[[2]]<=end.regr) +log.dati2<-log(1+dati2[,3:14]) # trasforma il database nel suo logaritmo naturale per poter calcolare poi le correlazioni (aggiunge 1 per non avere mai valori negativi) + +# crea la matrice con il logoritmo delle correlazioni tra coppie di serie per ogni mese: (aggiungendo uno per non avere valori negativi del logaritmo) +log.cor<-array(NA,c(n.staz,n.staz,12),dimnames=list(listaz,listaz,nome.mese)) + +barraP<-winProgressBar("Calculating correlations","Please wait...",0,n.staz,0) +for(st1 in 1:(n.staz-1)){ + rows1<-(1:n.years)+(st1-1)*n.years + for(st2 in (st1+1):n.staz){ + rows2<-(1:n.years)+(st2-1)*n.years + for(mese in 1:12){ + log.cor[st1,st2,mese]<-.Internal(cor(log.dati2[rows1,mese],log.dati2[rows2,mese],5L,FALSE)) + } + } + setWinProgressBar(barraP,st1,label=paste(st1,"/",n.staz)) +};close(barraP) + +# controlla tu stesso che chiamando direttamente la funzione .Internal la velocita' aumenta di 70 volte +# rispetto al semplice cor(x,y,use="na.or.complete") [l'ultima opzione, FALSE, serve per impostare il methodo di pearson]: +# system.time(for(i in 1:100000).Internal(cor(x,y,5L,FALSE))) #[5L e' l'na.method scelto (il quinto)] +# (la opzione 5L corrsisponde a use="na.or.complete" restituisce NA solo se non c'e' nemmeno una coppia di dati senza NA in tutte e due le serie) + + for(st1 in 1:n.staz)log.cor[st1,st1,]<-1 # imposta ad 1 le correlazioni tra la stessa serie + + for(st1 in 2:n.staz){ # dato che la matrice e' simmetrica, calcola i valori simmetrici copiando quelli dell'altra meta' della matrice + for(st2 in 1:(st1-1)){ + for(mese in 1:12)log.cor[st1,st2,mese]=log.cor[st2,st1,mese] + } + } + + save.image(paste("C:/nicola/precipitaciones/Local_Regression_Kriging/LRK_v1.RData",sep="")) + write.table(log.cor,file="C:/nicola/precipitaciones/Local_Regerssion_Kriging/correlaciones_mensuales.txt",row.names=FALSE,col.names=FALSE,quote=FALSE) +} # chiude l'if su correl + +################### CARICA CONTORNO PENISOLA IBERICA E PROIETTA LE SERIE PREC IN COORD.GEOGR O UTM30 ##############################################################################3 +coordinates(infofile) <- c('LONGITUDE','LATITUDE') # specifica le colonne delle coordinate di infofile +bbox(infofile) #leggere i valori min e max delle coordinate +#sp::plot(infofile,cex=0.3,col="light blue") +#sp::plot(IpPoly,add=T) # solo per vedere dove sono le stazioni +proj4string(infofile) <- coord.system.geograf # +ellps=clrk66") # specifica il sistema di riferimento delle colonne delle coordinate di infofile (dev'essere con i gradi decimali perche'le due colonne long e lat sono espresse cosi') + +if(proyected==TRUE)infofile<-spTransform(infofile, coord.system.proyected) # proietta le stazioni in UTM30 +#summary(my.grid.values) #plot(infofile) # per vedere la forma della griglia proiettata! + +# importa il bordo della IP con frontiera tra Spagna e Portogallo: +IpPoly<- readShapePoly(IpPoly.geograf) +proj4string(IpPoly) <- coord.system.geograf +if(proyected==TRUE)IpPoly<-spTransform(IpPoly, coord.system.proyected) # proietta il perimetro della penisola iberica a UTM30 +#sp::plot(IpPoly); summary(IpPoly); IpPoly$bbox + +# crea un generico grid a gradi dec che si sovrapponga a tutta la Penisola Iberica: +if(proyected==FALSE){coord.system<-coord.system.geograf}else{coord.system<-coord.system.proyected} +if(proyected==FALSE){pixel.size<-pixel.size.geograf}else{pixel.size<-pixel.size.proyected*1000} # *1000 per passare da km a metri +if(proyected==FALSE){xmin.box<-xmin.box.geograf}else{xmin.box<-xmin.box.proyected} +if(proyected==FALSE){ymin.box<-ymin.box.geograf}else{ymin.box<-ymin.box.proyected} +if(proyected==FALSE){num.x.pixels<-num.x.pixels.geograf}else{num.x.pixels<-num.x.pixels.proyected} +if(proyected==FALSE){num.y.pixels<-num.y.pixels.geograf}else{num.y.pixels<-num.y.pixels.proyected} +if(proyected==FALSE){resol.unit<-"DecDegr"}else{resol.unit<-"Km"} +gt = GridTopology(cellcentre.offset = c(xmin.box,ymin.box), cellsize = c(pixel.size,pixel.size), cells.dim = c(num.x.pixels,num.y.pixels)) +grd = SpatialGrid(gt) +n.pixels<-grd@grid@cells.dim[1]*grd@grid@cells.dim[2] # o equivalentemente: num.x.pixels * num.y.pixels +proj4string(grd) <- coord.system +#summary(grd);sp::plot(IpPoly);sp::plot(grd,add=T);sp::plot(infofile[2414,],add=T) # visualizza il grid sovrapposto alla IP e la stazione di Zaragoza! + +#importa il bordo della sola Spagna (alternativamente si puo' anche importare solo quello in coord.geografiche e poi convertirlo in UTM30 come per IpPoly): +if(proyected==FALSE){SpainPoly<-readShapePoly(SpainPoly.geograf)}else{SpainPoly<-readShapePoly(SpainPoly.proyected)} +#SpainPoly@bbox;summary(SpainPoly);plot(SpainPoly);spplot(SpainPoly) + +my.points<-list('sp.points',infofile,pch=1,cex=0.2,col="black") +my.polygon<-list('sp.polygons',IpPoly,fill='transparent',first=F) # first=FALSE gli dice di NON disegnare il poligono per primo # altre opzioni di list: fill=0, lwd=3, lty=2 +my.layout<-list(my.polygon) +my.layout2<-list(my.polygon,my.points) + +#importa le province spagnole non proiettate: +#SpainProvPoly <- readShapePoly('C:/my_R_scripts/manuales/Introduction to R/cursoR_AEMET_2010/ejercicios/borders_espana/borders_sin_proyeccion.shp') +#SpainProvPoly <- as(readShapePoly('c:/cursoR/ejercicios/province/pol_prov'), 'SpatialPolygonsDataFrame') +#plot(SpainProvPoly) #disegna il grafico delle province + +if(int.method!="OK"){ # importa l'MDE: + if(proyected==FALSE){mde<-mde.geograf}else{mde<-mde.proyected} + elevGrid <- readAsciiGrid(mde) + elevGrid@bbox;summary(elevGrid);image(elevGrid);spplot(elevGrid) + + # verifica che l'mde e i dati si sovrappongano bene alla Spagna (se non va bene cambia i valori di xllcorner e yllcorn): + #sp::plot(SpainPoly);sp::plot(elevGrid, col='gray',cex=0.4, add=T);plot(infofile, col='red',cex=0.5, add=T) +} + +if(regr.prec.vs.alt==TRUE){ ############ REGRESSION PREC VS ALTEZZA ################################################################## + + st.min=100 # numero minimo di stazioni per fare la regressione + #corr.min=0.5 # soglia minima di corr.mensile per considerare una stazione valida per la interpolazione + + for(mese in 1:12){} +} + +if(interpol!="NO"){###################################### MONTHLY PRECIPITATION GRID ############################################################################# + # cerca se ci sono 2 o + serie che hanno le stesse coordinate; in questi casi, le sposta di un metro piu' a nord in modo che si possa fare il kriging: + # alternativamente, la funzione zerodist(infofile) restituisce il numero di fila delle stazioni con la stessa long e lat + if(dim(zerodist(infofile))[1]>0)print(paste("***** Trovate",dim(zerodist(infofile))[1],"serie coincidenti che sono state spostate un poco piu' a nord *****")) + lonlat<-infofile@coords # stringa con la long+lat di ogni stazione # equivalentemente: lonlat<-paste(coordinates(infofile)[,1],coordinates(infofile)[,2]) + nlonlat<-dim(lonlat)[1] + if(proyected==FALSE){small.shift<-0.00001}else{small.shift<-1} + #n.coinc<-0 + for(st in 1:nlonlat){ + ss<-which(lonlat[,1]==lonlat[st,1] & lonlat[,2]==lonlat[st,2]) # vettore con tutte le righe della tabla che hanno la stessa lon e lat della riga staz (dovrebbe esserci 1 sola fila) + nss<-length(ss) # se vale 1 vuol dire che va bene + if(nss>1){ + #print(paste(st,listaz[st],ss[1],ss[2])) # riga per il debug; normalmente ci sono 2 stazioni nella stessa posizione + for(k in 2:nss)infofile@coords[ss[k],2]=infofile@coords[ss[k],2]+small.shift*k-1 # aumenta di k centomillesimi di grado o di k metri la latitudine (esclusa la prima stazione del gruppo) + #n.coinc<-n.coinc+1 #print(n.coinc) + } + } + + tot.int<-end.int-start.int+1 + period.int<-start.int:end.int # sostituisce il vecchio period<-yearB:yearE + + # toglie gli anni di dati delle stazioni mensili di prec.fuori dal periodo considerato per fare l'interpolazione: + dati.int<-subset(dati,dati[[2]]>=start.int & dati[[2]]<=end.int) + + # se stiamo interpolando la climatologia crea prima una matrice con la climatologia, ovvero il valor medio della prec. calcolato su tutti gli anni del periodo impostato e per ogni mese: + if(interpol=="CLI"){ + promedio<-desv<-matrix(NA,n.staz,12) + for(st in 1:n.staz){for(mese in int.mesi)promedio[st,mese]=mean(dati.int[((st-1)*tot.int+1):(st*tot.int),2+mese],na.rm=T)[[1]]} + for(st in 1:n.staz){for(mese in int.mesi)desv[st,mese]=sd(dati.int[((st-1)*tot.int+1):(st*tot.int),2+mese],na.rm=T)[[1]]} + + end2.int<-start.int # cosi' elimina il prossimo ciclo for su y + }else{end2.int<-end.int} + + # interpola per ogni anno (ovvero crea un raster per ogni anno), a meno che non sia impostato interpol="CLI", + # nel qual caso interpola solo il valore medio su tutto il periodo da start.int a end.int (cioe' la climatologia) + if(proyected==TRUE)dmax2<-dmax*1000 # *1000 per passare da km a metri + int.dati<-c() + for(y in start.int:end2.int){ + # carica i dati dei valori mensili di prec.in infofile in base all'anno considerato (o direttamente tutta la climatologia se interpol="CLI"): + for(mese in 1:12){ + # nel caso della climatologia inserisci qui la variabile da mappare: la media (promedio[,mese]) o la varianza (desv[,mese]): + if(interpol=="CLI"){infofile[[4+mese]]<-promedio[,mese]} else {st<-1:n.staz;infofile[[4+mese]]<-dati.int[((st-1)*tot.int+1)+y-start.int,2+mese]} + names(infofile)[4+mese]=nome.mese.short[mese] + } + + # crea un grid vuoto (prec.grid) dove mettere i risultati dell'interpolazione e un'altro (void.grid) che rimanga sempre vuoto da usare come plantilla invece di infofile: + Prec.grid<-void.grid<-SpatialGridDataFrame(grd, as.data.frame(rep(NA,n.pixels)), proj4string = coord.system) + + # interpola ogni mese dell'anno (anche se la regressione non si fa su tutti i mesi), + # mette il risultato in Prec@grid ed arrotonda la precipitazione prevista alla prima cifra decimale per non occupare troppo spazio se poi la si salva su un .txt (tanto l'errore introdotto arrotondando e' trascurabile):: + formul<-c(Jan~1,Feb~1,Mar~1,Apr~1,May~1,Jun~1,Jul~1,Aug~1,Sep~1,Oct~1,Nov~1,Dec~1) + infofile.sinNA<-infofile + for(mese in 1:12){ + # occhio che purtroppo la funzione variogram non accetta NA come dato di input, se ti dice "Error: dimensions do not match", c'e' qualche NA. + # percio'per fare il variogramma gli NA vengono sostituiti con il valore medio mensile per la serie con NA, anche se non e' corretto, ma tanto servono solo per calcolare il variogramma + # se stiamo calcolando la climatologia invece gli NA non sono mai presenti perche' il promedio e'stato fatto con l'opzione na.rm=T + ss<-which(is.na(infofile[[4+mese]])) + if(length(ss)>0){ + for(s in ss)infofile.sinNA[[4+mese]][s]<-mean(dati.int[((s-1)*tot.int+1):(s*tot.int),2+mese],na.rm=T)[[1]] + } + v <- variogram(formul[[mese]], infofile.sinNA, proyected=proyected,cutoff=dmax2) + #plot(v,type='b') # per vedere i punti del semivariogramma, type=b traccia anche una linea spezzata tra i punti, ma non e' il fit + #v.fit <- fit.variogram(v, model=vgm(psill=8000, model='Gau', range=200000, nugget=500)) # calcola automaticamente tutti i parametri della curva di fit, basta dargli dei valori iniziali approssimativi!!!!! + temp.nugget<-v$gamma[1] # valore inziale del nugget per fare una prova, dopodiche l'algoritmo di fit.variogram aggiusta i valori + temp.sill<-last(v$gamma) # come valori iniziali ho messo quelli che si possono ricavare 'ad occhio' dal variogramma sperimentale v + temp.range<-last(v$dist) + v.fit <- fit.variogram(v, model=vgm(psill=temp.sill-temp.nugget, model='Sph', range=temp.range, nugget=temp.nugget),fit.ranges=FALSE) # ho dovuto mettere fit.ranges=FALSE perche' a volte fitta dei range negativi che danno errore fermando i calcoli + print(plot(v,v.fit)) # per vedere il semivariogramma + la curva di fit + + #v.fit #per vedere i parametri risultato del fit #plot(v,v.fit) # per vedere il semivariogramma + la curva di fit #b <- mean(DistData[[i]][[j+1]]) # solo per il simple kriging + print(paste("Anno:",y,"; Mese:",mese)) # cosi' se avvisa che il variogram e' un singular model sai a che mese e anno si riferisce + #Prec.grid@data[[mese]]<-round(krige(formula=formul[[mese]], locations=infofile.sinNA, newdata=grd, model=v.fit,nmax=int.nmax)@data$var1.pred,1) # se desse problemi a fare il kriging prova a vedere se dipende da una cattiva curva del fit, prova magari a impostare un range del fit piu' corto. + if(loocv==FALSE){ + # problema: se ci sono NA in location non puo'interpolare. GLi NA ci possono essere solo in newdata. Percio'dovresti togliere + # da infofile le serie con NA, ma dato che per mesi diversi le serie con NA di solito cambiano, dovresti creare un infofile diverso + # per ogni mese! Per il momento ho sostituito la posto di infofile infofile.sinNA che al posto degli NA ha il promedio dei valori + # della serie, ma non ? una soluzione corretta! + Prec.grid@data[[nome.mese.short[mese]]]<-round(krige(formula=formul[[mese]], locations=infofile.sinNA, newdata=grd, model=v.fit,nmax=int.nmax)@data$var1.pred,1) # come prima ma permette di considerare anche input con osservatori senza dati (NA) perche' li esclude dall'interpolazione!!! (con na.omit) + } else { # fa la leave-one-out cross-validation (loocv): + Prec.loocv<-infofile;Prec.loocv@data[[nome.mese.short[mese]]]<-NA + for(st in 1:n.staz){ + temp<-krige(formula=formul[[mese]], locations=infofile,na.action=na.omit, newdata=infofile[st,], model=v.fit,nmax=int.nmax) #temp<-krige.cv(formula=formul[[mese]], locations=infofile, model=v.fit,nmax=int.nmax) # si puo' usare anche anche krige.cv ma 1) e'piu'lenta di un 20% e 2) non si puo' inserire l'opzione per i missing data + #print(st) + Prec.loocv@data[[nome.mese.short[mese]]][st]<-round(temp@data$var1.pred,1) # su var1.var c'e' la varianza dell'errore, ovvero il RMSE al quadrato dei residuali + } # chiude il for su n.staz + } # chiude l'if sul loocv + } + + #if(loocv==FALSE)names(Prec.grid@data)<-nome.mese.short + + # seleziona i pixel di mare con overlay e li mette a NA in modo che non li disegni nella mappa finale: + sea.pixels<-which(is.na(overlay(grd,IpPoly))==T) + land.pixels<-(1:n.pixels)[-sea.pixels] + for(mese in 1:12)Prec.grid@data[[nome.mese.short[mese]]][sea.pixels]=NA + + # matrice della forma num.pixel/anno/gen/feb/.../dic che memorizza i dati di prec per ogni singolo anno della interpolazione (o solo la climatologia): + int.dati<-rbind(int.dati,cbind(1:n.pixels,y,Prec.grid@data)) # int.dati e' la unione di tanti colonne di dati di Prec.grid, un PRec.grid per ogni anno, cioe' e' del tipo pixel/anno/12 mesi + if(interpol=="SI")my.grid.name=paste("Monthly Precipitation Grid for year ",y,sep="") + if(interpol=="CLI")my.grid.name=paste("Monthly Precipitation Climatology for period ",start.int,"-",end.int,sep="") + + # visualizza e/o salva in un .pdf le 12 mappe mensili appena interpolate: + if (start.int-end2.int>0){second.part=paste("-",end.int,".pdf",sep="")} else { second.part=".pdf"} + my.prec.levels<- c(seq(0,50,1),seq(52.5,150,2.5),seq(155,200,5),seq(210,290,20),300) + my.prec.levels.in.leyend<-c(0,25,50,75,100,150,200,300,400) + my.prec.levels2<- c(0,1,5,10,20,30,50,70,100,150,200,300,500) # leyenda uguale a quella di Ricardo + my.prec.levels3<- c(-100,0,1000) # per vedere se ci sono pixel interpolati minori di 0 + my.period<-c(12,1,2,3,4,5,6,7,8,9,10,11) # seleziona i mesi da mappare + my.names<-nome.mese.esp[my.period] + Prec.grid2<-Prec.grid + for(mese in 1:12)Prec.grid2@data[nome.mese.short[mese]][which(Prec.grid@data[nome.mese.short[mese]]>300),]<-300 + my.map<-spplot(Prec.grid2,zcol=nome.mese.short[my.period],at=my.prec.levels,col.regions=rev(jet.colors(10000)),main=my.grid.name,par.settings = standard.theme(color = FALSE), sp.layout=my.layout,as.table=TRUE,names.attr=my.names,colorkey=list(space="right",width=0.5,height=1,labels=list(at=my.prec.levels.in.leyend))) + + # print(my.map) # per visualizzare la mappa interpolata solo sullo schermo o (fila sotto) per inviarla a un immagine su disco: + # jpeg("C:/nicola/precipitaciones/weather_types/mapas/6) composiciones_EMULATE_y_IP02/Climatology_Mopredas_1950-2003_same_coord_system_as_Ip02.jpg",width=590,height=616,quality=100);print(my.map);dev.off() # altri formati possibili per salvare il grafico sono: pdf, bmp,jpeg, tiff. + + ### BOXPLOT della climatologia mensile: + # Prec.grid2@data<-Prec.grid2@data[,c(2:13)] # toglie la prima colonna senza dati + # #ss<-which(is.na(Prec.grid2@data[[1]])) # toglie gli NA + # #Prec.grid2@data<-Prec.grid2@data[-ss,] + # fixInNamespace(boxplot.default, "graphics") # dopodiche devi editare la riga con boxplot.stats sostituendola con myboxplot.stats. In questo modo visualizza come estremi i valori dei percentili 5 e 95 invece di 1.5*IQR. Se vuoi visualizzare solo i valori minimi e massimi metti range=0 nella funzione boxplot + # boxplot(Prec.grid2@data,boxwex=.5,col="cyan3",outline=F,ylab="mm") # boxwex e?la larghezza di ogni box, outline per togliere i punti outliers. + # #means<-colMeans(Prec.grid2@data) # per disegnare anche la media + # #points(means,col="red",pch="+",cex=1) + + ### zoom sulle precipitazioni alte: + # my.prec.levels<- c(seq(80,300,10),500) + # my.prec.levels.in.leyend<-c(80,100,150,200,300,500) + # my.map<-spplot(Prec.grid,zcol=nome.mese.short,at=my.prec.levels,col.regions=rev(jet.colors(10000)),main=my.grid.name, + # sp.layout=my.layout,as.table=T,colorkey=list(space="right",width=0.5,height=1,labels=list(at=my.prec.levels.in.leyend))) + # print(my.map) # per visualizzarlo solo sullo schermo + # jpeg("C:/nicola/precipitaciones/weather_types/mapas/6) composiciones_EMULATE_y_IP02/Climatology_Mopredas_1950-2003_scale2.jpg",width=590,height=616,quality=100);print(my.map);dev.off() # altri formati possibili per salvare il grafico sono: pdf, bmp,jpeg, tiff. + + ## salva un .pdf con la mappa della distribuzione spaziale dei punti delle stazioni colorati in base al valore di prec.assunto: + # my.points.name="Monthly Rainfall Distribution" # parametri grafici + #pdf(paste(dir_root,"/Monthly Precipitation Stations ",start.int,second.part,sep="")) + #print(spplot(infofile,nome.mese.short[1],key.space = "right",cuts=my.prec.levels3,col.regions=rev(jet.colors2(1000)),main=my.points.name,sp.layout=my.layout,cex=0.4,as.table=T,fill=T)) + #dev.off() # e per sovrapporre i punti colorati al grid come si fa???? + + ## esporta ogni mappa mensile in un file ASCII con valore di manca-dato uguale a -99 invece di -9999 e 1 sola cifra decimale per non occupare tanto spazio: + #if(interpol=="SI")for(mese in int.mesi)writeAsciiGrid(Prec.grid,paste(dir_root_kriging,"/",nome.mese[mese]," Prec.Grid ",pixel.size," ",resol.unit," ",y,".txt",sep=""),attr=nome.mese.short[mese],na.value=-99) + #if(interpol=="CLI")for(mese in int.mesi)writeAsciiGrid(Prec.grid,paste(dir_root_kriging,"/",nome.mese[mese]," Prec.Climatology ",pixel.size," ",resol.unit," ",start.int,"-",end.int,".txt",sep=""),attr=nome.mese.short[mese],na.value=-99) + } # chiude il for su y + + if(loocv==TRUE){ # se stiamo facendo la cross-validation della climatologia calcola i residuali e li visualizza, poi il MAE, il RMSE, il CV(MAE) e il CV(RMSE) per tutte le staz. + prec.residual<-array(NA,c(n.staz,12),dimnames<-list(1:n.staz,nome.mese)) + prec.residual<-Prec.oocv + my.main<-"Kriging Residuals"; # ovvero la climatologia dei WTs, CON la costante + my.cuts<-c(seq(0,300,20),350,400,500) + my.colors<-jet.colors8 + prec.residuals.mappa<-infofile;for(mese in 1:12)prec.residuals.mappa@data[[nome.mese.short[mese]]]<-NA + for(mese in 1:12)prec.residuals.mappa@data[[nome.mese.short[mese]]]<-as.vector(t(prec.residual[,mese])) # carica i dati nel grid (la radice serve per disegnare la colorkey con colori meglio distribuiti) + + my.map<-spplot(,nome.mese.short[c(int.mesi)],layout=c(3,4),par.settings = standard.theme(color = FALSE),key.space = "right", + cuts=my.cuts,col.regions=rev(jet.colors(1000)),main=my.main,sp.layout=my.layout,cex=0.3,as.table=T,names.attr=nome.mese[int.mesi],legendEntries=my.labels) + my.map$legend$right$args$key$points$cex <-rep(1,length(my.cuts)-1) # per cambiare il size dei simboli nella legenda!!! + plot(my.map) + } # chiude l'if su loocv + + # riordina la matrice int.dati per la prima e seconda colonna (pixel e anno) in modo che sia nello stesso ordine della matrice dati: + int.dati<-int.dati[order(int.dati[,1],int.dati[,2]),] + + ## se vuoi utilizzare le interpolazioni annuali come input di my R scripts\Remo models\2) analysis_vX.r + ## devi cambiare il formato, da: year/mes in orizz. a: year/month/lat/long in orizzontale. Per farlo puoi usare il package "Reshape": + # if(interpol=="SI"){ + # #if(proyected==FALSE){mod.size<-1}else{mod.size<-1000} # *1000 per passare da km a metri + # #x.values<-sort(unique(SpatialPoints(grd)@coords[,1])) # elenco valori possibili di long dei centri dei pixel del grid + # #y.values<-sort(unique(SpatialPoints(grd)@coords[,2])) # elenco valori possibili di latitud dei centri dei pixel del grid + # xy.values<-SpatialPoints(grd)@coords # estrae le coordinate dei centri di ogni pixel del grid nello stesso ordine usato dal grid grd + # int.dati<-int.dati[order(int.dati[,2]),] # riordina int.dati in modo che sia ordinata per anni invece che per pixel + # grid.clim.for.cfr.Ensemble<-cbind(xy.values,int.dati[,2:14]) + # names(grid.clim.for.cfr.Ensemble)[1:3]<-c("LON","LAT","YEAR") + # library(reshape);library(reshape2) + # temp<-melt.data.frame(grid.clim.for.cfr.Ensemble,id=c("YEAR","LON","LAT"),variable_name="MONTH") + # temp$MONTH<-as.numeric(match(temp$MONTH,nome.mese.short)) # sostituisci la colonna con i mesi scritti in lettere con i mesi scritti in numeri per avere la stessa notazione usata nel confronto con i modelli Ensemble + # temp2<-dcast(temp,YEAR+MONTH+LAT~LON) + # grid.clim.for.cfr.Ensemble<-temp2;rm(temp);rm(temp2) + # #write.table(grid.clim.for.cfr.Ensemble,"C:/nicola/precipitaciones/Local_Regression_Kriging/precipitation 1961-2000_ordinary_kriging.txt",quote=FALSE,row.names=FALSE,col.names=FALSE) + # write.table(grid.clim.for.cfr.Ensemble,"C:/nicola/precipitaciones/Local_Regression_Kriging/precipitation 1950-2003_ordinary_kriging.txt",quote=FALSE,row.names=FALSE,col.names=FALSE) + # } # chiude l'if su interpol=="SI" + + #save.image("C:/nicola/precipitaciones/kriging_mensuales_Iberia/R_session_v2_Monthly_prec_Mopredas_con_coord_system_Ip02_o_sea_res_0.20.RData") + #save.image("C:/nicola/precipitaciones/Local_Regression_Kriging/LRK_v3_interpolacion_con_NA_y_loocv.RData") + +} # chiude l'if su interpol!="NO" + + ################################## PREPARA I DATI PRIMA DI APPLICARE IL MODELLO DI REGRESSIONE E/O LA PCA ############################################################### + tot.regr<-end.regr-start.regr+1 #numero di anni di durata della regressione + +if(interpol=="NO"){ + # toglie gli anni di dati delle stazioni mensili di prec.prima dell'inizio della regressione e dopo la fine della regressione: + # (il numero delle serie non cambia anche se ci possono essere serie senza dati nel periodo scelto) + dati.regr<-subset(dati,dati[[2]]>=start.regr & dati[[2]]<=end.regr) + n.staz.regr<-n.staz + matrix.regr<-cbind(rep(1:n.staz,each=tot.regr),as.matrix(dati.regr[,2:14])) #matrix.regr e' come dati.regr ma in forma matriciale e con i numeri delle serie invece dei codici sulla prima colonna +} else { + # se stiamo interpolando, i dati e le serie per la regressione non sono ricavati a partire dal dataframe dati + # ma a partire dai pixel dell'interpolazione (che sono gia' stati calcolati escludendo il periodo prima dell'inizio della regress e dopo la sua fine): + n.staz.regr<-n.pixels + # se vogliamo usare i valori mensili di Ip02 dobbiamo caricarli ora: + # int.dati<-read.table("C:/nicola/precipitaciones/weather_types/Ip02/Database_IBERIA_0.2/Iberia_0,2_en_mensuales.txt") + matrix.regr<-int.dati +} + +# toglie gli anni di dati dei WT mensili prima dell'inizio della regressione e dopo la fine della regressione, cosi' come ha fatto con i dati di prec: +monthlyWT.regr<-list() +for(cla in cla.rif)monthlyWT.regr[[cla]]<-subset(monthlyWT[[cla]],monthlyWT[[cla]][,1]>=start.regr & monthlyWT[[cla]][,1]<=end.regr) + +# toglie gli anni di dati dei WT mensili prima dell'inizio della ricostruzione e dopo la fine della ricostruzione, cosi' come ha fatto con i dati di prec: +monthlyWT.recons<-list() +for(cla in cla.rif)monthlyWT.recons[[cla]]<-subset(monthlyWT[[cla]],monthlyWT[[cla]][,1]>=start.recons & monthlyWT[[cla]][,1]<=end.recons) + +################################## PCA DEI WTS MENSILI ########################################################################################### +# per ogni classificazione e per ogni mese fa la PCA dei WTs su TUTTO il periodo ricostruito (incluso quello di misura della bonta'della ricostr.) + +if(PCRA==TRUE){ + PCA.recons<-PCA.regr<-list() + + for(cla in cla.rif){ + PCA.recons[[cla]]<-PCA.regr[[cla]]<-list() + + for(mese in 1:12){ + temp<-subset(monthlyWT.recons[[cla]],monthlyWT.recons[[cla]][,2]==mese) + + col.zeri<-c() # in ogni caso bisogna togliere tutte le colonne con WT=0 + i=1;for(w in 1:nWT[cla]){if(sum(temp[,2+w])==0){col.zeri[i]=2+w;i=i+1}} + + if(stand==TRUE){ + col.mean<-apply(temp,2,mean) + col.std<-apply(temp,2,sd) + temp.stand<-temp + for(w in 1:nWT[cla])temp.stand[,2+w]<-(temp[,2+w]-col.mean[2+w])/col.std[2+w] + temp<-temp.stand + } + + WTs<-temp[,-c(1,2,col.zeri)] # -c(1,2,col.zeri) toglie le colonne con gli anni, con i mesi e con tutti i num.uguali a zero + + # fai la S-mode PCA dei 26 tipi di tempo di quella classificazione per il periodo di calibrazione: (cioe', riduce le colonne con i tipi di tempo) + PCA.recons[[cla]][[mese]]<-princomp(WTs,cor=correl) #str(PCA.recons[[cla]][[mese]]);summary(PCA.recons[[cla]][[mese]]);plot(PCA.recons[[cla]][[mese]]) + + # nota che puoi ottenere gli scores anche con: + # WTs %*% loadings(PCA.recons[[cla]][[mese]]) e i loadings con eigen(cor(WTs))$vectors (perche' sono gli autovettori della matrice di correlazione) + + if(rotation==TRUE){ + loadings.rot<-varimax(loadings(PCA.recons[[cla]][[mese]]))$loadings # calcola i nuovi loadings ruotati + PCA.recons[[cla]][[mese]]$scores<-WTs %*% loadings.rot # calcola i nuovi scores ruotati + } + + PCA.regr[[cla]][[mese]]<-PCA.recons[[cla]][[mese]] + PCA.regr[[cla]][[mese]]$scores<-PCA.recons[[cla]][[mese]]$scores[(start.regr-start.recons+1):(end.regr-start.recons+1),] + + } # chiude il for sui mesi + } # chiude il for su cla +} # chiude l'if + +############################################# STEPWISE REGRESSION PREC vs WTs ###################################################################### +mod<-c();if(intercept==FALSE){mod=0} else {mod=1} +regr.best<-array(NA,c(n.classi,n.staz.regr,12)) # vale 1 se il miglior metodo per quella classificazione, stazione e mese e' il forward, 2 se e' il backward +r2.best<-array(NA,c(n.classi,n.staz.regr,12)) # r2 per quella classificazione, stazione e mese con il migliore dei due metodi +pred.forw<-pred.back<-array(NA,c(max.nWT,n.classi,n.staz.regr,12)) # indica quali sono i WT usati dalla regressione migliore forward e backward +n.pred.forw<-array(NA,c(n.classi,n.staz.regr,12)) # numero di predittori della migliore regressione forward stepwise +coeff.best<-array(NA,c(max.nWT+mod,n.classi,n.staz.regr,12)) # terra'i coefficienti dei predittori WT della regressione migliore e della intercetta (se c'e') +coeff.forw.all<-array(NA,c(num.max.pred,num.max.pred+mod,n.classi,n.staz.regr,12)) # se stepwise=FALSE, per ogni regressione con X<=num.max.pred predittori, memorizza i coefficienti della regressione forward (eventuale intercetta per prima) +media.prec.obs<-array(NA,c(n.staz.regr,12)) # indica il valor medio della precip.osservata su tutti gli anni della regressione (per poter calcolare piu' avanti la dev.snd normalizzata) +years.missing<-array(NA,c(n.staz.regr,12)) # memorizza per ogni stazione e mese il numero di anni mancanti su cui la regressione non si e' potuta fare +years.missing.list<-array(NA,c(n.staz.regr,12,tot.regr)) # memorizza QUALI sono gli anni mancanti per ogni regressione +# purtroppo il metodo della regr.lin stepwise implementato con la funzione step e' molto lento e a volte tutti i coeff.sono negativi, percio' cambio: +# uso sempre il metodo stepwise, ma scrivendolo a mano a partire dalla funzione 'nnls' che minimizza |Ax-b| per x>0 vettore in modo che i coeff. +# della regressione non siano mai negativi (nemmeno la intercetta): + +# if(interpol!="NO")land.pixels<-land.pixels[-which(land.pixels==477)] # se stiamo usando il database Ip02 devo togliere questo pixel perche'e' di mare ma viene scambiato come pixel di terra quando lo si confronta con il poligono della IP + +barraP<-winProgressBar("Stepwise Regression","Please wait...",0,n.classi*12,0) +for(mese in int.mesi){ + for(cla in cla.rif){ + # crea una matrice 'A' solo con i WTs o i PCs della classificazione e del mese scelto: + if(PCRA==TRUE){A<-PCA.regr[[cla]][[mese]]$scores} else {A<-subset(monthlyWT.regr[[cla]],monthlyWT.regr[[cla]][,2]==mese,select=3:(2+nWT[cla]))} + # aggiunge una colonna di 1 a sinistra di A corrispondente alla intercetta (termine noto): + if(intercept==T)A<-cbind(1,A) + + # cicla sulla serie da analizzare all'interno di n.staz.regr: + series<-1:n.staz.regr + if(interpol!="NO")series<-land.pixels # praticamente series diventano solo i pixel di terra + + # soprattutto d'estate puo'accadere di trovare WT con frequenza nulla per tutto il periodo di regressione, + # percio'bisogna eliminare le colonne di A corrispondenti a questi WT: + # (per esempio per il database EMULATE a luglio bisogna togliere i WTs num.4,13,14 e 23 che corrisp.ai tipi S, C.S, C.SW e A.SW, e ad agosto i num.14 e 23) + dev.stand<-apply(A,2,sd) # dev.stnd. di ogni colonna di A inclusa la eventuale intercetta per la quale la dev.stnd e' zero. + if(mod==1)dev.stand<-dev.stand[-1] # toglie l'eventuale primo elemento inutile dato dall'intercetta + WTs.para.tirar<-c();if(min(dev.stand)<0.00001)WTs.para.tirar<-which(dev.stand<0.00001) + + for(st in series){ + # stat=listaz[st] + #b<-subset(matrix.regr,matrix.regr[[1]]==st,select=2+mese)[[1]] # colonna prec. (seleziona solo le file di prec della serie e del mese che ci interessa): + b<-as.numeric(matrix.regr[((st-1)*tot.regr+1):(st*tot.regr),nome.mese.short[mese]]) # piu' efficiente del subset, non usa stat e lavora con matrici invece di dataframe sostituendo alla colonna del codice una colonna con il numero della serie o del pixel (occhio che non funziona con il grid Ip02, bospgna usare la rigas sopra invece) + # [nota che b coincide con la variabile prec.obs.regr introdotta piu' in basso per validare il modello] + + years.NA<-which(is.na(b)) # individua i mesi senza un valore di precipitazione [years.NA e' uguale alla variabile y.regr.NA che si calcola quando si fa la validazione del modello] + years.missing[st,mese]=length(years.NA) + years.missing.list[st,mese,]<-c(years.NA,rep(NA,tot.regr-years.missing[st,mese])) + + #if(length(!is.na(b))==0){stop("b non ha dati osservati di prec.")} + + media.prec.obs[st,mese]<-mean(b,na.rm=TRUE) # calcola la prec.media obs. eliminando gli anni con NA + tot.regr2<-tot.regr-years.missing[st,mese] # anni effettivi usati nella regressione + + if(PCRA==TRUE){max.interv<-dim(PCA.recons[[cla]][[mese]]$scores)[2]} else {max.interv<-nWT[cla]} # se stiamo facendo la PCRA dobbiamo considerare che le colonne possono essere di meno di nWT[cla] perche' abbiamo dovuto togliere le colonne con solo zeri + interv<-1:max.interv # all'inizio tutti i WT sono disponibili per la regressione stepwise; poi diminuiscono togliendo quelli gia' utilizzati + if(length(WTs.para.tirar)>0)interv<-interv[-WTs.para.tirar] + + # se per caso togliendo alcune righe di A senza dati di prec.oss si sono formate colonne con dati costanti o quasi (intercetta a parte) bisogna toglierle dall'elenco dei possibili predittori altrimenti si confondono con la intercetta e a volte i coeff.di regressione schizzano a 1000000 o piu'!!!: + if(years.missing[st,mese]!=0){ + deviaz.stand<-apply(A[-years.NA,],2,sd)[(1+mod):(max.interv+mod)] # dev.stnd. di ogni colonna di A senza le righe senza pioggia oss. + if(min(deviaz.stand)<0.00001)interv<-interv[-which(deviaz.stand<0.00001)] + } + + regr<-list();r.squared<-adj.r.squared<-adj.r.squared.best<-p.best<-c() + + for(p in interv){ # esegue una alla volta tutte le regr.lineari con una sola variabile (+ l'intecetta se c'e'), ciclando tra tutti i WTs o PCs: + A.1pred<-cbind(A[,mod],A[,mod+p]) # estrae dalla matrice A solo le colonne con i predittori, piu' l'eventuale colonna dell'intercetta + # uso la funzione lm.fit.fast per velocizzare i calcoli di 100 volte!!! Altrimenti avrei dovuto usare la vecchia funzione lm(b~.-1,data=as.data.frame(A.1pred))} # il -1 serve per togliere l'intercetta dall'algoritmo lm, perche e' gia' inclusa dentro A.1pred. Nota che si deve trasformare A in matrix perche' in questo caso A e' formata da un vettore, e i vettori non hanno l'attributo ncol che serve dentro la funzione nnls + if(years.missing[st,mese]==0){ + if(negative==FALSE){regr[[p]]<-nnls(as.matrix(A.1pred),b)} else {regr[[p]]<-lm.fit.fast2(A.1pred,b)} # [occhio che la funzione lm.fit e lm.fit.fast + # non accettano anni mancanti, mentre la funzione nnls si] + } else { # se ci sono anni mancanti devo toglierli perche' la funzione lm.fit.fast non accetta anni mancanti (mentre la funzione nnls si): + if(negative==FALSE){regr[[p]]<-nnls(as.matrix(A.1pred[-years.NA,]),b[-years.NA])} else {regr[[p]]<-lm.fit.fast2(A.1pred[-years.NA,],b[-years.NA])} + pr.pred<-regr[[p]]$fitted + for(i in 1:years.missing[st,mese])pr.pred<-append(pr.pred,NA,after=years.NA[i]-1) # aggiunge al vettore di prec.pred gli anni tolti perche' con prec.oss.mancante (mettendo come valore la NA); in questo modo il vettore con la prec.pred ha tanti elementi quanto gli anni su tutto il periodo di regressione + regr[[p]]$fitted<-NULL;regr[[p]]$fitted<-pr.pred + } + + if(jacknife.full==TRUE){ + prec.pred.jack<-rep(NA,tot.regr) # vettore che conterra' la prec.calcolata nel periodo di regressione con il jackknife + # cicla su tutti gli anni di regressione, compresi quelli con prec.oss = NA per calcolare la prec.prevista con il jackknife: + for(y in 1:tot.regr){ + y.regr.buttare<-c(years.NA,y) # fila corrispondenti ad anni ove la prec.oss vale NA e all'anno y da evaluare (come minimo ce n'e' sempre almeno uno, y, quindi puoi scrivere direttamente la linea di codice seguente:) + if(negative==FALSE){regr.jack<-nnls(as.matrix(A.1pred[-y.regr.buttare,]),b[-y.regr.buttare])} else {regr.jack<-lm.fit.fast2(A.1pred[-y.regr.buttare,],b[-y.regr.buttare])} + prec.pred.jack[y]<-max(0,A.1pred[y,] %*% regr.jack[[1]]) # calcola la prec.prevista per l'anno y sulla base dei coeff.appena calcolati per il periodo restante + } # chiude il for su y + regr[[p]]$fitted<-NULL;regr[[p]]$fitted<-prec.pred.jack # sostituisce invece della prec.predetta normale quella predetta con il jacknife per tutto il periodo della regressione (anche gli anni senza prec.oss) + }# chiude l'if sul jacknife + + n.pred<-1+mod # numero di predittori (includendo l'intercetta se intercept=T!) + if(min.rmse==TRUE){ + r.squared[p]<-((sum((regr[[p]]$fitted-b)^2,na.rm=TRUE))/(tot.regr2-n.pred))^0.5 # calcola l'rmse + adj.r.squared[p]<-(-r.squared[p]) # ribalta il valore del rmse perche' cosi'poi quando prendiamo il massimo in realta' prendiamo il valore minimo, che e' quello che ci interessa + } else if(min.cv==TRUE){ + r.squared[p]<-((sum((regr[[p]]$fitted-b)^2,na.rm=TRUE))/(tot.regr2-n.pred))^0.5/media.prec.obs[st,mese] # calcola il CV [la somma dei quadrati non basta perche' poi sara'necessario aggiungere a questo valore lo step.min per vedere se fermare la regressione] + adj.r.squared[p]<-(-r.squared[p]) # ribalta il valore di r.squared ovvero del CV perche' cosi'poi quando prendiamo il massimo in realta' prendiamo il valore minimo, che e' quello che ci interessa + } else { + r.squared[p]<-cor(b,regr[[p]]$fitted,use="pairwise.complete.obs")^2 # calcola l'r2 multiplo come quadrato della correlazione tra i valori di prec.osservati e quelli predetti dalla regressione; occhio che quando la correlazione e' zero invece di mettere 0 mette NA + if(is.na(r.squared[p]))r.squared[p]=0 # vuole dire che i coeff.dei predittori sono tutti 0 tranne quello dell'intercetta + adj.r.squared[p]<-r.squared[p]-(1-r.squared[p])*n.pred/(tot.regr2-n.pred) + } # chiude l'else sul tipo di stepwise, se basato sul r2 o sul CV + } # chiude il for su p + + adj.r.squared.best[1]<-max(adj.r.squared,na.rm=T) + p.best[1]<-min(which(adj.r.squared==adj.r.squared.best[1])) + coeff.forw.all[1,,cla,st,mese]<-c(regr[[p.best]][[1]],rep(NA,num.max.pred+mod-length(regr[[p.best]][[1]]))) # coefficienti della regress.fatta con un solo predittore (quindi sono solo 2 in tutto, un coeff.per l'intercetta e uno per il predittore) + + # controlla se il modello a 1 predittore e' migliore (cioe' ha un MSE piu' basso) di quello utilizzando la prec.osservata media come predittore! + keepgoing="yes" + if(sum((media.prec.obs[st,mese]-b)^2,na.rm=T)<=sum((regr[[p.best]]$fitted-b)^2,na.rm=T)){ + keepgoing="no" + coeff.forw.all[1,,cla,st,mese]<-c(regr[[p.best]][[1]][1],rep(NA,num.max.pred+mod-1)) + pred.forw[,cla,st,mese]=rep(NA,max.nWT) # dato che non si e' trovato nessun predittore valido + n.pred.forw[cla,st,mese]<-0 # dato che non si e' trovato nessun predittore valido + } + + if(keepgoing=="yes"){ # continua solo se l'MSE con il primo predittore trovato e' migliore del MSE con il solo valor medio della prec.osservata + # fissa il WT con l'R2 adjusted piu'alto trovato ed esegue tutte le regr.lineari con due predittori: il WT selezionato ed un altro WT che cicla: + # interv<-interv[-p.best[1]] + n.pred=1+mod # numero di predittori (includendo l'intercetta se intercept=T!) [gia'impostato ma lo ripeto per chiarezza] + interv<-interv[-which(interv==p.best[1])] + regr<-list();r.squared<-adj.r.squared<-c() + for(p in interv){ + if(abs(cor(A[,mod+p.best[1]],A[,mod+p]))>max.autocorrelation){ + adj.r.squared[p]=-100000 # esclude questo tipo di tempo dalla lista dei possibili predittori, se e' altamente collelato con il precedente + } else { + A.2pred<-cbind(A[,mod],A[,mod+p.best[1]],A[,mod+p]) + if(years.missing[st,mese]==0){ + if(negative==FALSE){regr[[p]]<-nnls(as.matrix(A.2pred),b)} else {regr[[p]]<-lm.fit.fast2(A.2pred,b)} # [occhio che la funzione lm.fit non accetta anni mancanti, mentre la funzione nnls si]: + } else { # se ci sono anni mancanti devo toglierli perche' la funzione lm.fit non accetta anni mancanti (mentre la funzione nnls si): + if(negative==FALSE){regr[[p]]<-nnls(as.matrix(A.2pred[-years.NA,]),b[-years.NA])} else {regr[[p]]<-lm.fit.fast2(A.2pred[-years.NA,],b[-years.NA])} + pr.pred<-regr[[p]]$fitted + for(i in 1:years.missing[st,mese])pr.pred<-append(pr.pred,NA,after=years.NA[i]-1) # aggiunge al vettore di prec.pred gli anni tolti perche' con prec.oss.mancante (mettendo come valore la NA); in questo modo il vettore con la prec.pred ha tanti elementi quanto gli anni su tutto il periodo di regressione + regr[[p]]$fitted<-NULL;regr[[p]]$fitted<-pr.pred + } + + if(jacknife.full==TRUE){ + prec.pred.jack<-rep(NA,tot.regr) # vettore che conterra' la prec.calcolata nel periodo di regressione con il jackknife + # cicla su tutti gli anni di regressione, compresi quelli con prec.oss = NA per calcolare la prec.prevista con il jackknife: + for(y in 1:tot.regr){ + y.regr.buttare<-c(years.NA,y) # fila corrispondenti ad anni ove la prec.oss vale NA e all'anno y da evaluare (come minimo ce n'e' sempre almeno uno, y, quindi puoi scrivere direttamente la linea di codice seguente:) + if(negative==FALSE){regr.jack<-nnls(as.matrix(A.2pred[-y.regr.buttare,]),b[-y.regr.buttare])} else {regr.jack<-lm.fit.fast2(A.2pred[-y.regr.buttare,],b[-y.regr.buttare])} + prec.pred.jack[y]<-max(0,A.2pred[y,] %*% regr.jack[[1]]) # calcola la prec.prevista per l'anno y sulla base dei coeff.appena calcolati per il periodo restante + } # chiude il for su y + regr[[p]]$fitted<-NULL;regr[[p]]$fitted<-prec.pred.jack # sostituisce invece della prec.predetta normale quella predetta con il jacknife per tutto il periodo della regressione (anche gli anni senza prec.oss) + }# chiude l'if sul jacknife + + n.pred<-2+mod # numero di predittori + intercetta + if(min.rmse==TRUE){ + r.squared[p]<-((sum((regr[[p]]$fitted-b)^2,na.rm=TRUE))/(tot.regr2-n.pred))^0.5 # calcola l'rmse + adj.r.squared[p]<-(-r.squared[p]) # ribalta il valore di r.squared ovvero del rmse perche' cosi'poi quando prendiamo il massimo in realta' prendiamo il valore minimo, che e' quello che ci interessa + } else if(min.cv==TRUE){ + r.squared[p]<-((sum((regr[[p]]$fitted-b)^2,na.rm=TRUE))/(tot.regr2-n.pred))^0.5/media.prec.obs[st,mese] # calcola il CV + adj.r.squared[p]<-(-r.squared[p]) # ribalta il valore di r.squared ovvero del CV perche' cosi'poi quando prendiamo il massimo in realta' prendiamo il valore minimo, che e' quello che ci interessa + } else { + r.squared[p]<-cor(b,regr[[p]]$fitted,use="pairwise.complete.obs")^2 # calcola l'r2 multiplo come quadrato della correlazione tra i valori di prec.osservati e quelli predetti dalla regressione; occhio che quando la correlazione e' zero invece di mettere 0 mette NA + if(is.na(r.squared[p]))r.squared[p]=0 # vuole dire che i coeff.dei predittori sono tutti 0 tranne quello dell'intercetta + adj.r.squared[p]<-r.squared[p]-(1-r.squared[p])*n.pred/(tot.regr2-n.pred) + } # chiude l'else sul tipo di stepwise, se basato sul r2 o sul CV + } # chiude l'else sulla autocorrelazione tra predittori + } # chiude il for su p + + adj.r.squared.best[2]<-max(adj.r.squared,na.rm=T) + p.best[2]<-min(which(adj.r.squared==adj.r.squared.best[2])) + coeff.forw.all[2,,cla,st,mese]<-c(regr[[p.best[2]]][[1]],rep(NA,num.max.pred+mod-length(regr[[p.best[2]]][[1]]))) + + if(stepwise==TRUE){change1=0} else {change1=100} # per attivare la 2nda condizione del while INVECE della prima se si vuole fermarsi esattamente a + # tot predittori + if(min.rmse==TRUE){ + step.min=step.min.rmse + } else if(min.cv==TRUE){ + step.min=step.min.CV + } else { + step.min=step.min.r2 + } + + # controlla se il miglior R2 adjusted (o il CV) trovato con 2 predittori e' > del miglior R2 adjusted con un solo predittore; se si, continua ad aggiungere + # un predittore nuovo alla volta, finche l'R2 adjusted non aumenta piu'; se no, non entra nel while e passa a fare la regressione finale con 1 solo predittore + regr<-list() + steps=2 # sta ancora usando 2 predittori (intercetta ESCLUSA) + while(adj.r.squared.best[steps]+change1>adj.r.squared.best[steps-1]+step.min & steps<=num.max.pred){ + interv<-interv[-which(interv==p.best[steps])] # toglie i vecchi predittori gia' usati dalla lista di quelli che si possono ancora usare + steps=steps+1 # aggiungiamo ora un predittore nuovo + r.squared<-adj.r.squared<-c() + for(p in interv){ + A.xpred<-cbind(A[,mod],A[,mod+p.best],A[,mod+p]) # matrix con i predittori gia selezionati piu quello nuovo + if(max(abs(cor(A[,mod+p.best],A[,mod+p])))>max.autocorrelation){ # take the maximum value of all the correlations of the the other predictors + adj.r.squared[p]=-100000 # esclude questo tipo di tempo dalla lista dei possibili predittori, se e' altamente correlato con uno dei precedenti + } else { + if(years.missing[st,mese]==0){ + if(negative==FALSE){regr[[p]]<-nnls(as.matrix(A.xpred),b)} else {regr[[p]]<-lm.fit.fast2(A.xpred,b)} # [occhio che la funzione lm.fit non accetta anni mancanti, mentre la funzione nnls si]: + } else { # se ci sono anni mancanti devo toglierli perche' la funzione lm.fit non accetta anni mancanti (mentre la funzione nnls si): + if(negative==FALSE){regr[[p]]<-nnls(as.matrix(A.xpred[-years.NA,]),b[-years.NA])} else {regr[[p]]<-lm.fit.fast2(A.xpred[-years.NA,],b[-years.NA])} + pr.pred<-regr[[p]]$fitted + for(i in 1:years.missing[st,mese])pr.pred<-append(pr.pred,NA,after=years.NA[i]-1) # aggiunge al vettore di prec.pred gli anni tolti perche' con prec.oss.mancante (mettendo come valore la NA); in questo modo il vettore con la prec.pred ha tanti elementi quanto gli anni su tutto il periodo di regressione + regr[[p]]$fitted<-NULL;regr[[p]]$fitted<-pr.pred + } + + if(jacknife.full==TRUE){ + prec.pred.jack<-rep(NA,tot.regr) # vettore che conterra' la prec.calcolata nel periodo di regressione con il jackknife + # cicla su tutti gli anni di regressione, compresi quelli con prec.oss = NA per calcolare la prec.prevista con il jackknife: + for(y in 1:tot.regr){ + y.regr.buttare<-c(years.NA,y) # fila corrispondenti ad anni ove la prec.oss vale NA e all'anno y da evaluare (come minimo ce n'e' sempre almeno uno, y, quindi puoi scrivere direttamente la linea di codice seguente:) + if(negative==FALSE){regr.jack<-nnls(as.matrix(A.xpred[-y.regr.buttare,]),b[-y.regr.buttare])} else {regr.jack<-lm.fit.fast2(A.xpred[-y.regr.buttare,],b[-y.regr.buttare])} + prec.pred.jack[y]<-max(0,A.xpred[y,] %*% regr.jack[[1]]) # calcola la prec.prevista per l'anno y sulla base dei coeff.appena calcolati per il periodo restante + } # chiude il for su y + regr[[p]]$fitted<-NULL;regr[[p]]$fitted<-prec.pred.jack # sostituisce invece della prec.predetta normale quella predetta con il jacknife per tutto il periodo della regressione (anche gli anni senza prec.oss) + }# chiude l'if sul jacknife + + n.pred<-steps+mod # numero di predittori piu' uno se c'e'l'intercetta + if(min.rmse==TRUE){ + r.squared[p]<-((sum((regr[[p]]$fitted-b)^2,na.rm=TRUE))/(tot.regr2-n.pred))^0.5 # calcola l'rmse + adj.r.squared[p]<-(-r.squared[p]) # ribalta il valore di r.squared ovvero del CV perche' cosi'poi quando prendiamo il massimo in realta' prendiamo il valore minimo, che e' quello che ci interessa + } else if(min.cv==TRUE){ + r.squared[p]<-((sum((regr[[p]]$fitted-b)^2,na.rm=TRUE))/(tot.regr2-n.pred))^0.5/media.prec.obs[st,mese] # calcola il CV + adj.r.squared[p]<-(-r.squared[p]) # ribalta il valore di r.squared ovvero del CV perche' cosi'poi quando prendiamo il massimo in realta' prendiamo il valore minimo, che e' quello che ci interessa + } else { + r.squared[p]<-cor(b,regr[[p]]$fitted,use="pairwise.complete.obs")^2 # calcola l'r2 multiplo come quadrato della correlazione tra i valori di prec.osservati e quelli predetti dalla regressione; occhio che quando la correlazione e' zero invece di mettere 0 mette NA + if(is.na(r.squared[p]))r.squared[p]=0 # vuole dire che i coeff.dei predittori sono tutti 0 tranne quello dell'intercetta + adj.r.squared[p]<-r.squared[p]-(1-r.squared[p])*n.pred/(tot.regr2-n.pred) + } # chiude l'else sul tipo di stepwise, se basato sul r2 o sul CV + } # chiude l'if sulla autocorrelazione + } # chiude il for su p (il predittore) + + adj.r.squared.best[steps]<-max(adj.r.squared,na.rm=T) + p.best[steps]<-min(which(adj.r.squared==adj.r.squared.best[steps])) # se per sfiga ci sono due o + predittori con lo stesso R2 adjusted, prende solo il primo dei due + if(steps<=num.max.pred) coeff.forw.all[steps,,cla,st,mese]<-c(regr[[p.best[steps]]][[1]],rep(NA,num.max.pred+mod-length(regr[[p.best[steps]]][[1]]))) + } # chiude il while + + # numero di predittori per la migliore combinazione di predittori trovata (quella che ha R2 adjusted [o CV] piu' alto [basso] o fin quando non gli si e' imposto di fermarsi) per memorizzare i coefficienti: + n.pred.forw[cla,st,mese]<-(steps-1) # quindi tra n.pred.forw[cla,st,mese] NON e' incluso il termine dato dall'intercetta + + # lista in ordine di importanza dei predittori per la migliore combinazione trovata: + pred.forw[,cla,st,mese]=c(p.best[1:n.pred.forw[cla,st,mese]],rep(NA,max.nWT-n.pred.forw[cla,st,mese])) # nota che l'intercetta non compare mai nel vettore p.best + + } # chiude l'if sul keepgoing + + setWinProgressBar(barraP,cla+(mese-1)*n.classi,label=paste(st,"/",n.staz.regr) ) + } # chiude il for sulle stazioni (st) + } # chiude il for sulle classificazioni +} # chiude il for sui mesi +close(barraP) + +################################################################################################################################################### +### RICOSTRUZIONE CON LA LEAVE-ONE-OUT CROSS-VALIDATION DELLE PRECIPITAZIONI MENSILI ### +################################################################################################################################################### +# abbiamo gia' i coefficienti della regressione e i loro WTs usati come predittori per la classificazione di riferimento nel periodo di calibrazione; +# ci manca solo di importare i dati dei WT per il periodo da ricostruire, convertirli in valori mensili e calcolare la precipitazione prevista +# sulla base dei coefficienti della regressione di riferimento. + +#Rprof() # chiudilo con un Rprof(NULL) alla fine del pezzo da controllare + +tot.recons<-end.recons-start.recons+1 +start.valid2<-start.valid-start.recons+1;end.valid2<-end.valid-start.recons+1 # calcola non l'anno di inizio del periodo per il calcolo della validazione.ma la POSIZIONE di inizio a partire dal primo anno ricostruito +start.regr2<-start.regr-start.recons+1;end.regr2<-end.regr-start.recons+1 # calcola non l'anno di inizio della regressione ma la POSIZIONE di inizio a partire dal primo anno ricostruito + +#dimnames(max.pred)<-list(nome.classe,lista.recons,nome.mese) +CV.valid<-CV.calib<-COR.valid<-COR.calib<-array(NA,c(n.classi,n.staz.regr,12,max.nWT)) # nota che la CV.calib fatta con il jacknife e' in effetti come una CV.valid +pixels<-1:n.staz.regr +if(interpol=="NO"){nome.serie<-listaz} else {nome.serie<-1:n.staz.regr} + +CV.calib.best<-CV.valid.best<-COR.calib.best<-COR.valid.best<-SS.CV<-SS.COR<-SS.CLI.MSE<-SS.CLI.RMSE<-SS.PER.MSE<-SS.PER.RMSE<-SS.PER2.MSE<-SS.PER2.RMSE<-CV.clim<-array(NA,c(n.classi,n.staz.regr,12),dimnames=list(nome.classe,nome.serie,nome.mese)) # per ogni stazione e mese memorizza il CV e la correlazione migliori finali calcolati sul periodo della regressione (e con il jackknife, se e' stato attivato) +# occhio che la prima componente di CV.xxxx(x,x,x,,) e' il CV o la COR per 0 predittori, non per 1!!! + +prec.true<-array(NA,c(n.classi,n.staz.regr,12,tot.recons)) # array (per ora vuoto) con i dati di precipitazione osservata per ogni classe, stazione e mese su tutto il periodo di ricostruzione +prec.rec<-array(NA,c(n.classi,n.staz.regr,12,tot.recons)) # array (per ora vuoto) con i dati di precipitazione ricostruita per ogni classe, stazione e mese + +n.series<-length(series) # sono meno di n.staz.regr perche' non si considerano i pixel di mare; se non si interpola invece sono pari a n.staz.regr + +barraP<-winProgressBar("Calculating Leave-one-out Cross Validation","Please wait...",0,n.staz.regr,0) +for(st in series){ + #st.recons=listaz[st] + co<-rmse<-array(NA,c(n.classi,2,12,max.nWT)) # array con memorizzati la correlazione, gli rmse per ogni classificazione mensile e periodo ricostr. e num.di predittori usati(valgono NA per i mesi estivi) + dimnames(co)<-dimnames(rmse)<-list(nome.classe,c("calibration period","validation period"),nome.mese,1:max.nWT) + + for(mese in int.mesi){ + #dati.temp<-subset(dati.recons,dati.recons[[1]]==st.recons) + #start.prec.obs<-dati.temp[1,2] # primo anno di dati di prec.osservata + + #prec.obs.regr<-subset(matrix.regr,matrix.regr[[1]]==st,select=2+mese)[[1]] # colonna prec. (seleziona solo le file di prec della serie e del mese che ci interessa): + prec.obs.regr<-as.numeric(matrix.regr[((st-1)*tot.regr+1):(st*tot.regr),nome.mese.short[mese]]) # estrae la prec.osservata per la serie e il mese desiderato per il periodo di regressione + + y.regr.NA<-which(is.na(prec.obs.regr)) # individua gli anni nel periodo di regressione senza un valore di precipitazione + #tot.regr2<-tot.regr-length(y.regr.NA) # anni effettivi usati nella regressione + + # se il periodo di ricostruzione parte prima di quello della regressione, deve aggiungere anni di NA!!! + if(start.reconsend.regr)prec.obs.recons<-c(prec.obs.recons,rep(NA,end.recons-end.regr)) + + # estrae la prec.osservata per il mese precedente (solo per il calcolo del SS.PER2) + if(mese!=1){previous.month.obs.recons<-matrix.regr[((st-1)*tot.regr+1):(st*tot.regr),1+mese ]} else {temp<-matrix.regr[((st-1)*tot.regr+1):(st*tot.regr),14];previous.month.obs.recons<-c(NA,temp[-(length(temp)+1)]) } + if(start.recons0% : +area.affected<-array(NA,c(12,nWT[cla.rif]),dimnames=list(nome.mese,WTs.names[[1]])) +for(mese in int.mesi){for(wt in 1:nWT[cla.rif]) {area.affected[mese,wt]=length(which(prec.WT.data[[mese]]@data[[WTs.names.full[wt]]]>0))/length(which(prec.WT.data[[mese]]@data[[WTs.names.full[wt]]]>=0))}} +tab.area<-t(100*round(area.affected,3)) # arrotonda e trasforma i valori in percentuali + +# TABELLA % suolo iberico ove il contributo relativo estimato alla prec.mensile dato da un certo WT e' >15% : +area.affected2<-array(NA,c(12,nWT[cla.rif]),dimnames=list(nome.mese,WTs.names[[1]])) +for(mese in int.mesi){for(wt in 1:nWT[cla.rif]) {area.affected2[mese,wt]=length(which(prec.WT.data[[mese]]@data[[WTs.names.full[wt]]]>0.15))/length(which(prec.WT.data[[mese]]@data[[WTs.names.full[wt]]]>=0))}} +tab.area2<-t(100*round(area.affected2,3)) # arrotonda e trasforma i valori in percentuali + +# TABELLA % suolo iberico ove il contributo relativo estimato alla prec.mensile dato da un certo WT e' >30% : +area.affected3<-array(NA,c(12,nWT[cla.rif]),dimnames=list(nome.mese,WTs.names[[1]])) +for(mese in int.mesi){for(wt in 1:nWT[cla.rif]) {area.affected3[mese,wt]=length(which(prec.WT.data[[mese]]@data[[WTs.names.full[wt]]]>0.3))/length(which(prec.WT.data[[mese]]@data[[WTs.names.full[wt]]]>=0))}} +tab.area3<-t(100*round(area.affected3,3)) # arrotonda e trasforma i valori in percentuali + +# TABELLA % suolo iberico ove il contributo relativo estimato alla prec.mensile dato da un certo WT e' >50% : +area.affected4<-array(NA,c(12,nWT[cla.rif]),dimnames=list(nome.mese,WTs.names[[1]])) +for(mese in int.mesi){for(wt in 1:nWT[cla.rif]) {area.affected4[mese,wt]=length(which(prec.WT.data[[mese]]@data[[WTs.names.full[wt]]]>0.5))/length(which(prec.WT.data[[mese]]@data[[WTs.names.full[wt]]]>=0))}} +tab.area4<-t(100*round(area.affected4,3)) # arrotonda e trasforma i valori in percentuali + +# TABELLA Relative Intensity of each WT (cioe' tabla dei valori medi per ogni serie/pixel di coeff.WT.data, che si misura in 1/giorno) +mean.intensity<-array(NA,c(12,nWT[cla.rif]),dimnames=list(nome.mese,WTs.names[[1]])) +for(mese in int.mesi){for(wt in 1:nWT[cla.rif]) {mean.intensity[mese,wt]=mean(coeff.WT.data[[wt]]@data[[nome.mese.short[mese]]],na.rm=T)}} +tab.intensity<-t(100*round(mean.intensity,3)) # arrotonda e trasforma i valori in percentuali + +# TABELLA Absolute Intensity of each WT (cioe' tabla dei valori medi per ogni serie/pixel di coeff.WT.data, che si misura in 1/giorno) +mean.abs.intensity<-array(NA,c(12,nWT[cla.rif]),dimnames=list(nome.mese,WTs.names[[1]])) +for(mese in int.mesi){for(wt in 1:nWT[cla.rif]) {mean.abs.intensity[mese,wt]=mean(coeff.WT.abs.data[[wt]]@data[[nome.mese.short[mese]]],na.rm=T)}} +tab.abs.intensity<-t(round(mean.abs.intensity,3)) # arrotonda e trasforma i valori in percentuali +tab.grados<-tab.abs.intensity*n.days.month[int.mesi] # number of grados that a day of that WT increase or remove to the day when it happens + + +# RIASSUNTO DI TUTTE LE TABELLE PIU'INTERESSANTI: + +round(AN.day.mean[cla.rif,,],2) # frequenza di ogni WT (num.di giorni/mese) in base al mese (uguale anche a mean.fr[,c(int.mesi)]) +round(AN.mean[cla.rif,,c(int.mesi)],1) # la stessa freq ma espressa come % di giorni al mese +trend[,c(int.mesi)] # trend della frequenza di ogni WT (in num.gg/mese) su tutto il periodo impostato in base al mese +p.val[,c(int.mesi)] # p-value del trend della frequenza di ogni WT (in num.gg/mese)in base al mese +tab.abs.intensity[,c(int.mesi)] # nel caso della temp., indica che UN GIORNO appartenente a quel tipo di tempo aumenta la temperatura media MENSILE di quel valore. +contrib3.abs # (tab.abs.intensity x freq.WT) nel caso della temp, indica di quanti gradi la climatologia mensuale aumenta o dimin. per effetto di quel WT +tab.grados # spatial mean of regression coefficient for 30-31 (num.days in month). Indica di quanti gradi un giorno di quel WD e`piu`caldo o freddo risp.a media. + +tab.intensity[,c(int.mesi)] # media su tutte le serie (o pixel) del coeff.della regr.associato al WT diviso per la prec.media.mensile[calcolata su tutti gli anni della regr] della serie (o pixel) [ogni punto della tabella indica che un giorno di quel WT contribuisce all'1% della prec.relativa {cioe' pesata sul suo valore medio} di tutta la penisola iberica] +contrib3[,c(int.mesi)] # % Contributo Relativo di ogni WT (= media su tutte le serie (o pixel) della frequenza del WT (gg/mes) * il coeff.regressione dello stesso WT / mean monthly rainfall su tutti gli anni) +tab.area[,c(int.mesi)] # % Area della IP con piogga > 0% per il WT e il mese considerato +tab.area2[,c(int.mesi)] # % Area della IP con piogga > 15% per il WT e il mese considerato +tab.area3[,c(int.mesi)] # % Area della IP con piogga > 30% per il WT e il mese considerato +tab.area4[,c(int.mesi)] # % Area della IP con piogga > 50% per il WT e il mese considerato +#cbind(round(CV.valid.best.medio,2),round(CV.valid.best.sys.medio,2)) # confronto CV medio mensile con e senza l'errore sistematico +#cbind(round(COR.valid.best.medio,2),round(COR.valid.best.sys.medio,2)) # confronto CV medio mensile con e senza l'errore sistematico + +#save.image("C:/nicola/precipitaciones/weather_types/Ip02/Regression_Ip02_mensile_v4_step_min_cv_0,01.RData") +#save.image("C:/nicola/precipitaciones/weather_types/RData/R_session_v154_regr_1861-2005_20th_Century_Project_(Madrid_Valencia_Lisboa).RData") +#save.image("C:/nicola/precipitaciones/weather_types/RData/R_session_v148_como_v118_articolo1_pero_con_D_Willmott_y_MAE_y_MBE.RData") +save.image("C:\\nicola\\temperaturas\\WTs and temp\\stepwise_MOTEDAS_1950-2010_10_WTs_umbral_0,01_absolute_PCA.RData") + +# disegna le mappe vere e proprie: +# usa l'opzione interp="no" anche per il dataset MOTEDASP, ma con pch=15 (quadrato invece di cerchio) e cex=.2 invece di .3 + +my.main="Number of Predictors for each serie and month" +my.cuts<-seq(0,10,1) #;my.labels=c("0-2","3-4","5-8") # empieza desde -1 so quieres visualizar tambien los pixeles con 0 predictores +#my.cuts<-c(0,0.00001,1,2,3,10) #;my.labels=c("0-2","3-4","5-8") +my.names<-nome.mese #paste(nome.mese.short[int.mesi]," (mean=",round(n.pred.forw.medio[int.mesi],1),")",sep="") +if(interpol=="NO"){ +my.map<-spplot(n.pred.forw.data,nome.mese.short[c(int.mesi)],layout=c(3,4),par.settings = standard.theme(color = FALSE),key.space = "right", + cuts=my.cuts,col.regions=rev(jet.colors4(1000)),main=my.main,sp.layout=my.layout,as.table=T, + names.attr=my.names[int.mesi],pch=15,cex=.2) #,legendEntries = my.labels) +my.map$legend$right$args$key$points$cex <-rep(1,length(my.cuts)-1)# per cambiare il size dei simboli nella legenda!!! +} else { # se stiamo visualizzando un grid, la sola differenza e' che invece di cut dobbiamo usare at per delimitare gli intervalli: +my.map<-spplot(n.pred.forw.data,nome.mese.short[c(int.mesi)],layout=c(3,4),par.settings = standard.theme(color = FALSE),key.space = "right", + at=my.cuts,col.regions=rev(jet.colors4(1000)),main=my.main,sp.layout=my.layout,cex=0.6,as.table=T, + names.attr=my.names[int.mesi],colorkey=list(labels=list(at=my.cuts),space="bottom",width=0.6,height=0.6)) +};plot(my.map) + +my.main<-paste("RMSE for ",nome.classe[cla.rif]) +my.cuts<-c(0.5,1.0,1.3,1.6,3);my.labels<-c("0-0.50","0.50-1.00","1.00-1.30","1.30+") # for temperature +my.names<-nome.mese #paste(nome.mese.short[int.mesi]," (mean CV=",round(CV.best.medio[int.mesi],2),")",sep="") +if(interpol=="NO"){ +my.map<-spplot(RMSE.valid.best.data,nome.mese.short[c(int.mesi)],layout=c(3,4),par.settings = standard.theme(color = FALSE),key.space = "right", + cuts=my.cuts,col.regions=rev(jet.colors8),main=my.main,sp.layout=my.layout,as.table=T, + names.attr = my.names[int.mesi],pch=15,cex=.2)#,legendEntries = my.labels) # as.character(CV.best.levels[-1])) +my.map$legend$right$args$key$points$cex <- rep(1,length(my.cuts)-1) # per cambiare il size dei simboli nella legenda!!! +}else{# se stiamo visualizzando un grid, la sola differenza e' che invece di cut dobbiamo usare at per delimitare gli intervalli: +my.map<-spplot(RMSE.valid.best.data,nome.mese.short[c(int.mesi)],layout=c(3,4),par.settings = standard.theme(color = FALSE),key.space = "right", + at=my.cuts,col.regions=rev(jet.colors8),main=my.main,sp.layout=my.layout,cex=0.3,as.table=T, + names.attr=my.names[int.mesi],legendEntries = my.labels) +};plot(my.map) + +my.main<-paste("MBE for ",nome.classe[cla.rif]) +my.cuts<-c(-1,-0.1,0,0.1,1);my.labels<-c("0-0.50","0.51-0.70","0.71-0.90","0.91+") # for temperature +my.names<-nome.mese #paste(nome.mese.short[int.mesi]," (mean CV=",round(CV.best.medio[int.mesi],2),")",sep="") +if(interpol=="NO"){ +my.map<-spplot(MBE.valid.best.data,nome.mese.short[c(int.mesi)],layout=c(3,4),par.settings = standard.theme(color = FALSE),key.space = "right", + cuts=my.cuts,col.regions=rev(jet.colors8),main=my.main,sp.layout=my.layout,as.table=T, + names.attr = my.names[int.mesi],pch=15,cex=.2)#,legendEntries = my.labels) # as.character(CV.best.levels[-1])) +my.map$legend$right$args$key$points$cex <- rep(1,length(my.cuts)-1) # per cambiare il size dei simboli nella legenda!!! +}else{# se stiamo visualizzando un grid, la sola differenza e' che invece di cut dobbiamo usare at per delimitare gli intervalli: +my.map<-spplot(MBE.valid.best.data,nome.mese.short[c(int.mesi)],layout=c(3,4),par.settings = standard.theme(color = FALSE),key.space = "right", + at=my.cuts,col.regions=rev(jet.colors8),main=my.main,sp.layout=my.layout,cex=0.3,as.table=T, + names.attr=my.names[int.mesi],legendEntries = my.labels) +};plot(my.map) + +my.main<-paste("CV(RMSE) for ",nome.classe[cla.rif]) +#my.cuts<-c(0,0.5,0.7,0.9,10);my.labels<-c("0-0.50","0.51-0.70","0.71-0.90","0.91+") # for precipitation +my.cuts<-c(0,0.04,0.07,0.1,1);my.labels<-c("0-0.50","0.51-0.70","0.71-0.90","0.91+") # for temperature +my.names<-nome.mese #paste(nome.mese.short[int.mesi]," (mean CV=",round(CV.best.medio[int.mesi],2),")",sep="") +if(interpol=="NO"){ +my.map<-spplot(CV.valid.best.data,nome.mese.short[c(int.mesi)],layout=c(3,4),par.settings = standard.theme(color = FALSE),key.space = "right", + cuts=my.cuts,col.regions=rev(jet.colors8),main=my.main,sp.layout=my.layout,as.table=T, + names.attr = my.names[int.mesi],pch=15,cex=.2)#,legendEntries = my.labels) # as.character(CV.best.levels[-1])) +my.map$legend$right$args$key$points$cex <- rep(1,length(my.cuts)-1) # per cambiare il size dei simboli nella legenda!!! +}else{# se stiamo visualizzando un grid, la sola differenza e' che invece di cut dobbiamo usare at per delimitare gli intervalli: +my.map<-spplot(CV.valid.best.data,nome.mese.short[c(int.mesi)],layout=c(3,4),par.settings = standard.theme(color = FALSE),key.space = "right", + at=my.cuts,col.regions=rev(jet.colors8),main=my.main,sp.layout=my.layout,cex=0.3,as.table=T, + names.attr=my.names[int.mesi],legendEntries = my.labels) +};plot(my.map) + +my.main<-paste("MAE% for ",nome.classe[cla.rif]) +my.cuts<-c(0,0.05,0.1,0.2,0.3);my.labels<-c("0-0.50","0.51-0.70","0.71-0.90","0.91+") +my.names<-nome.mese #paste(nome.mese.short[int.mesi]," (mean CV=",round(CV.best.medio[int.mesi],2),")",sep="") +CV.MAE.data.bounded<-CV.MAE.data +CV.MAE.data.bounded@data<-as.data.frame(rescale(as.matrix(CV.MAE.data.bounded@data),my.cuts[1],last(my.cuts))) +if(interpol=="NO"){ +my.map<-spplot(CV.MAE.data.bounded,nome.mese.short[c(int.mesi)],layout=c(3,4),par.settings = standard.theme(color = FALSE),key.space = "right", + cuts=my.cuts,col.regions=rev(jet.colors8),main=my.main,sp.layout=my.layout,cex=0.3,as.table=T, + names.attr = my.names[int.mesi])#,legendEntries = my.labels) # as.character(CV.best.levels[-1])) +my.map$legend$right$args$key$points$cex <- rep(1,length(my.cuts)-1) # per cambiare il size dei simboli nella legenda!!! +}else{# se stiamo visualizzando un grid, la sola differenza e' che invece di cut dobbiamo usare at per delimitare gli intervalli: +my.map<-spplot(CV.MAE.data.bounded,nome.mese.short[c(int.mesi)],layout=c(3,4),par.settings = standard.theme(color = FALSE),key.space = "right", + at=my.cuts,col.regions=rev(jet.colors8),main=my.main,sp.layout=my.layout,cex=0.3,as.table=T, + names.attr=my.names[int.mesi],colorkey=list(labels=list(at=my.cuts),space="bottom",width=0.6,height=0.6)) +};plot(my.map) + +my.main<-paste("Index of agreement d for ",nome.classe[cla.rif]);my.cuts<-c(0,0.99,0.994,0.997,1);my.labels<-c("0-0.50","0.51-0.70","0.71-0.90","0.91+") +my.names<-nome.mese #paste(nome.mese.short[int.mesi]," (mean CV=",round(CV.best.medio[int.mesi],2),")",sep="") +if(interpol=="NO"){ +my.map<-spplot(d.Willmott.data,nome.mese.short[c(int.mesi)],layout=c(3,4),par.settings = standard.theme(color = FALSE),key.space = "right", + cuts=my.cuts,col.regions=rev(jet.colors4(1000)),main=my.main,sp.layout=my.layout,cex=0.3,as.table=T, + names.attr = my.names[int.mesi])#,legendEntries = my.labels) # as.character(CV.best.levels[-1])) +my.map$legend$right$args$key$points$cex <- rep(1,length(my.cuts)-1) # per cambiare il size dei simboli nella legenda!!! +}else{# se stiamo visualizzando un grid, la sola differenza e' che invece di cut dobbiamo usare at per delimitare gli intervalli: +my.map<-spplot(d.Willmott.data,nome.mese.short[c(int.mesi)],layout=c(3,4),par.settings = standard.theme(color = FALSE),key.space = "right", + at=my.cuts,col.regions=rev(jet.colors4(1000)),main=my.main,sp.layout=my.layout,cex=0.3,as.table=T, + names.attr=my.names[int.mesi],legendEntries = my.labels) +};plot(my.map) + +my.main<-paste("Index of agreement d2 for ",nome.classe[cla.rif]);my.cuts<-c(0,0.5,0.6,0.7,1);my.labels<-c("0-0.50","0.51-0.70","0.71-0.90","0.91+") +my.names<-nome.mese #paste(nome.mese.short[int.mesi]," (mean CV=",round(CV.best.medio[int.mesi],2),")",sep="") +if(interpol=="NO"){ +my.map<-spplot(d.Willmott.1.data,nome.mese.short[c(int.mesi)],layout=c(3,4),par.settings = standard.theme(color = FALSE),key.space = "right", + cuts=my.cuts,col.regions=rev(jet.colors4(1000)),main=my.main,sp.layout=my.layout,cex=0.3,as.table=T, + names.attr = my.names[int.mesi])#,legendEntries = my.labels) # as.character(CV.best.levels[-1])) +my.map$legend$right$args$key$points$cex <- rep(1,length(my.cuts)-1) # per cambiare il size dei simboli nella legenda!!! +}else{# se stiamo visualizzando un grid, la sola differenza e' che invece di cut dobbiamo usare at per delimitare gli intervalli: +my.map<-spplot(d.Willmott.1.data,nome.mese.short[c(int.mesi)],layout=c(3,4),par.settings = standard.theme(color = FALSE),key.space = "right", + at=my.cuts,col.regions=rev(jet.colors4(1000)),main=my.main,sp.layout=my.layout,cex=0.3,as.table=T, + names.attr=my.names[int.mesi],legendEntries = my.labels) +};plot(my.map) + +my.main<-paste("Index of agreement for ",nome.classe[cla.rif]) # la versione del 2011, quella che usiamo di solito +my.cuts<-c(0.40,0.50,0.60,0.70,1);my.labels<-c("-1.00 - 0.40","0.41 - 0.50","0.51 - 0.60","0.61 - 0.70") +my.names<-nome.mese #paste(nome.mese.short[int.mesi]," (mean CV=",round(CV.best.medio[int.mesi],2),")",sep="") +d.Willmott.2011.data.bounded<-d.Willmott.2011.data +d.Willmott.2011.data.bounded@data<-as.data.frame(rescale(as.matrix(d.Willmott.2011.data.bounded@data),my.cuts[1],last(my.cuts))) +if(interpol=="NO"){ +my.map<-spplot(d.Willmott.2011.data.bounded,nome.mese.short[c(int.mesi)],layout=c(3,4),par.settings = standard.theme(color = FALSE),key.space = "right", + cuts=my.cuts,col.regions=rev(jet.colors4(1000)),main=my.main,sp.layout=my.layout,as.table=T,legendEntries=my.labels, + names.attr = my.names[int.mesi],pch=15,cex=.2)#,legendEntries = my.labels) # as.character(CV.best.levels[-1])) +my.map$legend$right$args$key$points$cex <- rep(1,length(my.cuts)-1) # per cambiare il size dei simboli nella legenda!!! +}else{# se stiamo visualizzando un grid, la sola differenza e' che invece di cut dobbiamo usare at per delimitare gli intervalli: +my.map<-spplot(d.Willmott.2011.data.bounded,nome.mese.short[c(int.mesi)],layout=c(3,4),par.settings = standard.theme(color = FALSE),key.space = "right", + at=my.cuts,col.regions=rev(jet.colors4(1000)),main=my.main,sp.layout=my.layout,cex=0.3,as.table=T, + names.attr=my.names[int.mesi],colorkey=list(labels=list(at=my.cuts),space="bottom",width=0.6,height=0.6)) +};plot(my.map) + + +my.main<-"Skill Score (CV-based,model:Climatology)" +#my.cuts<-c(-1,0,0.1,0.2,0.3,0.4,0.5,1);# my.labels<-c("0-0.50","0.51-0.70","0.71-0.90","0.91+") +my.cuts<-c(-1,0,0.2,0.4,0.6,1) +my.names<-nome.mese #paste(nome.mese.short[int.mesi]," (mean CV=",round(CV.best.medio[int.mesi],2),")",sep="") +my.map<-spplot(SS.CLI.RMSE.data,nome.mese.short[c(int.mesi)],layout=c(4,3),par.settings = standard.theme(color = FALSE),key.space = "right", + cuts=my.cuts,col.regions=rev(jet.colors3(1000)),main=my.main,sp.layout=my.layout,cex=0.5,as.table=T, + names.attr = my.names[int.mesi]) #,legendEntries = my.labels) # as.character(CV.best.levels[-1])) +my.map$legend$right$args$key$points$cex <- rep(1,length(my.cuts)-1) # per cambiare il size dei simboli nella legenda!!! +plot(my.map) + +my.main<-"Skill Score (CV-based,model:Climatology, syst.err.removed)" +#my.cuts<-c(-1,0,0.1,0.2,0.3,0.4,0.5,1);# my.labels<-c("0-0.50","0.51-0.70","0.71-0.90","0.91+") +my.cuts<-c(-2,0,0.2,0.4,0.6,1) +my.names<-nome.mese #paste(nome.mese.short[int.mesi]," (mean CV=",round(CV.best.medio[int.mesi],2),")",sep="") +my.map<-spplot(SS.CLI.RMSE.sys.data,nome.mese.short[c(int.mesi)],layout=c(4,3),par.settings = standard.theme(color = FALSE),key.space = "right", + cuts=my.cuts,col.regions=rev(jet.colors3(1000)),main=my.main,sp.layout=my.layout,cex=0.5,as.table=T, + names.attr = my.names[int.mesi]) #,legendEntries = my.labels) # as.character(CV.best.levels[-1])) +my.map$legend$right$args$key$points$cex <- rep(1,length(my.cuts)-1) # per cambiare il size dei simboli nella legenda!!! +plot(my.map) + +my.main=paste("COR for",nome.classe[cla.rif]);my.cuts<-c(0,0.5,0.7,0.9,1);my.labels=c("0-0.50","0.51-0.70","0.71-0.90","0.91-1.00") +my.names<-nome.mese #paste(nome.mese.short[int.mesi]," mean=",round(COR.best.medio[int.mesi],2),sep="") +my.map<-spplot(COR.calib.best.data,nome.mese.short[c(int.mesi)],layout=c(2,4),par.settings = standard.theme(color = FALSE),key.space = "right", + cuts=my.cuts,col.regions=rev(jet.colors4(1000)),main=my.main,sp.layout=my.layout,cex=0.3,as.table=T, + names.attr =my.names[int.mesi] ,legendEntries = my.labels) # occhio che pch=19 non gli piace: a volte non disegna p?u' la maggior parte dei punti!!! +my.map$legend$right$args$key$points$cex <-rep(1,length(my.cuts)-1) # per cambiare il size dei simboli nella legenda!!! +plot(my.map) + +#my.main=paste("R2 for",nome.classe[cla.rif]);my.cuts<-c(0,0.3,0.5,0.7,1);my.labels=c("0-0.30","0.31-0.50","0.51-0.70","0.71-1.00") +my.main=paste("R2 for",nome.classe[cla.rif]);my.cuts<-c(0,0.25,0.5,0.8,1);my.labels=c("0-0.25","0.26-0.50","0.51-0.80","0.81-1.00") +my.names<-nome.mese #paste(nome.mese.short[int.mesi]," mean=",round(COR.best.medio[int.mesi],2),sep="") +my.map<-spplot(R2.valid.best.data,nome.mese.short[c(int.mesi)],layout=c(2,4),par.settings = standard.theme(color = FALSE),key.space = "right", + cuts=my.cuts,col.regions=rev(jet.colors4(1000)),sp.layout=my.layout,cex=0.3,as.table=T, + names.attr =my.names[int.mesi] ,legendEntries = my.labels,main=my.main) +my.map$legend$right$args$key$points$cex <-rep(1,length(my.cuts)-1) # per cambiare il size dei simboli nella legenda!!! +plot(my.map) + +my.main="St.Dev.Pred.Prec / St.Dev.Obs.Prec";my.cuts<-c(0,0.5,0.7,0.85,1) +names.attr<-nome.mese +if(interpol=="NO"){ +my.map<-spplot(rapp.sd.data,nome.mese.short[c(int.mesi)],layout=c(3,4),par.settings = standard.theme(color = FALSE),key.space = "right", + cuts=my.cuts,col.regions=rev(jet.colors4(1000)),main=my.main,sp.layout=my.layout,cex=0.3,as.table=T, + names.attr=my.names[int.mesi]) +my.map$legend$right$args$key$points$cex <-rep(1,length(my.cuts)-1)# per cambiare il size dei simboli nella legenda!!! +} else { +#my.map<- +};plot(my.map) + +my.main="Normalized Mean Bias Error";my.cuts<-c(-0.1,-0.05,-0.02,-0.01,0.01,0.02,0.05,0.1) +names.attr<-nome.mese +if(interpol=="NO"){ +my.map<-spplot(CV.MBE.data,nome.mese.short[c(int.mesi)],layout=c(3,4),par.settings = standard.theme(color = FALSE),key.space = "right", + cuts=my.cuts,col.regions=(jet.colors2(1000)),main=my.main,sp.layout=my.layout,as.table=T, + names.attr=my.names[int.mesi],pch=15,cex=.2) +} else { +#my.map<- +};plot(my.map) + +my.main="RMSEs/RMSE";my.cuts<-seq(0,1,0.1) +names.attr<-nome.mese +if(interpol=="NO"){ +my.map<-spplot(proportion.data,nome.mese.short[c(int.mesi)],layout=c(3,4),par.settings = standard.theme(color = FALSE),key.space = "right", + cuts=my.cuts,col.regions=rev(jet.colors2(1000)),main=my.main,sp.layout=my.layout,cex=0.5,as.table=T) +} else { +#my.map<- +};plot(my.map) + +my.main="Mean Pred.Prec / Mean Obs.Prec" +my.cuts<-c(0,0.5,0.9,0.95,0.98,0.99,0.995,1,005,1.01,1.02,1.05,1.1,2) +my.cuts<-c(0,0.5,0.9,1.1,1.5,10) +names.attr<-nome.mese +if(interpol=="NO"){ +my.map<-spplot(rapp.prec.data,nome.mese.short[c(int.mesi)],layout=c(3,4),par.settings = standard.theme(color = FALSE),key.space = "right", + cuts=my.cuts,col.regions=(jet.colors2(1000)),main=my.main,sp.layout=my.layout,cex=0.5,as.table=T, + names.attr=my.names[int.mesi]) +} else { +#my.map<- +};plot(my.map) + +# mappe coefficienti relativi regressione per ogni WT (= Relative Intensity of each WT, ovvero il contributo relativo in % alla prec.totale dovuto a 1 giorno appartenente al WT considerato) +# (la distribuzione spaziale e' uguale alle mappe dei contributi relativi, ma non si moltiplica per la frequenza mensile dei WT) +for(wt in 1:nWT[cla.rif]){ + my.main<-paste("Relative contribute of 1 day of predictor",WTs.names[[1]][wt]," to total mean monthly precipitation") + my.cuts<-c(0,0.01,0.1,0.2,0.3,0.5,1);my.labels<-c("0%","<1%","1-10%","11-20%","21-30%","31-50%","51%+") + my.colors<-c("transparent","#00FFFF","#0099F8","blue","purple") + #my.cuts<-c(0,0.0001,0.05,0.1,0.15,0.2,0.3,0.5,1); my.colors=c("transparent","darksalmon","darkgoldenrod1","lightgreen","deepskyblue","blue","magenta4","black") + windows() + if(interpol=="NO"){ + my.map<-spplot(coeff.WT.data[[wt]],nome.mese.short[c(int.mesi)],layout=c(3,4),par.settings = standard.theme(color = FALSE),key.space = "right", + cuts=my.cuts,col.regions=my.colors,main=my.main,sp.layout=my.layout,cex=0.5,as.table=T,legendEntries=my.labels) + } else { + my.map<-spplot(coeff.WT.data[[wt]],nome.mese.short[c(int.mesi)],layout=c(3,4),par.settings = standard.theme(color = FALSE),key.space = "right", + at=my.cuts,col.regions=my.colors,main=my.main,sp.layout=my.layout,cex=0.5,as.table=T,legendEntries=my.labels) + } + plot(my.map) +} + +# mappe coefficienti assoluti regressione per ogni WT (= Absolute Intensity of each WT, ovvero il contributo assoluto in mm alla prec.totale dovuto a 1 giorno appartenente al WT considerato) +for(wt in 1:nWT[cla.rif]){ + my.main<-paste("Absolute contribute of 1 day of predictor",WTs.names[[1]][wt]) + if(predictand=="PREC"){ + my.cuts<-c(0,0.01,1,2,10,1000); #my.labels<-c("0 mm","<1 mm","1-10 mm","11-20 mm") + my.colors<-rev(jet.colors5(1000)) #c("#00FFFF","#0099F8","blue","purple","magenta4","black") + } else { + my.cuts<-c(-1,-0.1,-0.01,0,0.01,0.1,1); #my.labels<-c("0 mm","<1 mm","1-10 mm","11-20 mm") + my.colors<-jet.colors13(1000) #c("#00FFFF","#0099F8","blue","purple","magenta4","black") + } + windows() + if(interpol=="NO"){ + my.map<-spplot(coeff.WT.abs.data[[wt]],nome.mese.short[c(int.mesi)],layout=c(3,4),par.settings = standard.theme(color = FALSE),key.space = "right", + cuts=my.cuts,col.regions=my.colors,main=my.main,sp.layout=my.layout,cex=0.5,as.table=T) + } else { + my.map<-spplot(coeff.WT.abs.data[[wt]],nome.mese.short[c(int.mesi)],layout=c(3,4),par.settings = standard.theme(color = FALSE),key.space = "right", + at=my.cuts,col.regions=my.colors,main=my.main,sp.layout=my.layout,cex=0.5,as.table=T,legendEntries=my.labels) + } + plot(my.map) +} + +# mappe gradi in piu/meno rispoetto alla media mensile (la climatologia) associati a UN GIORNO di ogni WT (solo per temperature) +# nota che quando vale zero non vuol dire che la sua influenza sulla temperatura e' nulla, ma che quel tipo di tempo non e' stato selezionato come predittore +for(wt in 1:nWT[cla.rif]){ + my.main<-paste("Temp. shift (with respect to the monthly climatology) for a day with the WT ",WTs.names[[1]][wt]) + my.cuts<-c(-100,-15,-10,-5,-0.000001,0.000001,5,10,15,100); my.labels<-c("< -15?","-15?,-10?","-10?,-5?","-5?,0","no pred.","0?-5?","5?-10?","10-15?","15?+") + #my.cuts<-c(-100,-15,-10,-5,-1,1,5,10,15,100); + my.colors<-jet.colors15(1000) #c("#00FFFF","#0099F8","blue","purple","magenta4","black") + + jpeg(paste(dir_root,"/composiciones/",WTs.type[wt],".jpg",sep=""),width=590,height=616,quality=100) + #windows() + if(interpol=="NO"){ + my.map<-spplot(degrees.daily.data[[wt]],nome.mese.short[c(int.mesi)],layout=c(3,4),par.settings = standard.theme(color = FALSE),key.space = "right", + cuts=my.cuts,col.regions=my.colors,main=my.main,sp.layout=my.layout,pch=15,cex=.3,as.table=T,legendEntries=my.labels) + my.map$legend$right$args$key$points$cex <-rep(1,length(my.cuts)-1)# per cambiare il size dei simboli nella legenda!!! + } else { + my.map<-spplot(degrees.daily.data[[wt]],nome.mese.short[c(int.mesi)],layout=c(3,4),par.settings = standard.theme(color = FALSE),key.space = "right", + at=my.cuts,col.regions=my.colors,main=my.main,sp.layout=my.layout,cex=0.5,as.table=T,legendEntries=my.labels) + } + plot(my.map) + dev.off() +} + + +mese=1;n.imp=6 # numero di WTs da disegnare (in ordine di importanza) +my.main<-paste("Relative contribute to ",nome.mese[mese]," precipitation of the ",n.imp," most influent WTs",sep="") # in ordine di contributo medio piu' alto!!! +my.cuts<-c(0,0.01,0.15,0.3,0.4,0.5);my.labels<-c("","1-15%","16-30%","31-50%","51%+") +my.colors<-c("transparent","#00FFFF","#0099F8","blue","purple") +prec.WT.data.bounded<-prec.WT.data +for(mes in 1:12)prec.WT.data.bounded[[mes]]@data<-as.data.frame(rescale(as.matrix(prec.WT.data.bounded[[mes]]@data),my.cuts[1],last(my.cuts))) +maxWTs<-order(-prec.WT.medio[mese,])[1:n.imp] # ordina i WT in ordine decrescente di contributo relativo e poi prende solo i primi WTs col contributo piu' alto +my.index<-WTs.names.full[maxWTs];my.names=paste(WTs.names.long[maxWTs]," ",round(100*prec.WT.medio[mese,maxWTs],1),"%",sep="") +if(interpol=="NO"){ +my.map<-spplot(prec.WT.data.bounded[[mese]],my.index,layout=c(3,2),par.settings = standard.theme(color = FALSE),key.space = "right", + cuts=my.cuts,col.regions=my.colors,main=my.main,sp.layout=my.layout,cex=0.5,as.table=T,names.attr=my.names,legendEntries=my.labels) + #auto.key=list(space="right",title="pippo",text=my.labels,points=list(pch=21,fill=my.colors))) # purtroppo points non funziona!!! +my.map$legend$right$args$key$points$cex <-rep(1,length(my.cuts)-1) # per cambiare il size dei simboli nella legenda!!! +} else { +my.map<-spplot(prec.WT.data.bounded[[mese]],my.index,layout=c(3,2),par.settings = standard.theme(color = FALSE), + at=my.cuts,col.regions=my.colors,main=my.main,sp.layout=my.layout,cex=0.5,as.table=T,names.attr=my.names, + colorkey=list(labels=list(at=my.cuts),space="right",width=0.5,height=1)) +};plot(my.map) + +# mappe WTs mensili 3x3 in ordine di direzione da NW a SE: +mese=6 +my.index<-WTs.short.names1 # metti 2 per i tipi ciclonici, 3 per i tipi anticiclonici e 4 per vedere il termine costante al centro invece del termine ciclonico puro +my.names<-WTs.full.names1 # idem come sopra +my.main<-paste("Relative contribute of WTs to total ",nome.mese[mese]," precipitation",sep="") +my.cuts<-c(0,0.01,0.15,0.3,0.5,1);my.labels<-c("","1-15%","16-30%","31-50%","51%+") +my.colors<-c("transparent","#00FFFF","#0099F8","blue","purple") +if(interpol=="NO"){ +my.map<-spplot(prec.WT.data[[mese]],my.index,layout=c(3,3),par.settings = standard.theme(color = FALSE),key.space = "right", + cuts=my.cuts,col.regions=my.colors,main=my.main,sp.layout=my.layout,cex=0.5,as.table=T,names.attr=my.names,legendEntries=my.labels) +my.map$legend$right$args$key$points$cex <-rep(1,length(my.cuts)-1) # per cambiare il size dei simboli nella legenda!!! +} else { +my.map<-spplot(prec.WT.data[[mese]],my.index,layout=c(3,3),par.settings = standard.theme(color = FALSE),colorkey=list(space = "right"), + at=my.cuts,col.regions=my.colors,main=my.main,sp.layout=my.layout,cex=0.5,as.table=T,names.attr=my.names,legendEntries=my.labels) +} +plot(my.map) + +my.main<-"WTs Relative Contribute (%) to total Observed Precipitation"; +my.cuts<-c(0,0.5,0.7,0.9,1);my.labels<-c("0-50%","51-70%","71-90%","91-100%");my.colors<-jet.colors8 +SUM.RAIN.WT.data.bounded<-SUM.RAIN.WT.data +SUM.RAIN.WT.data.bounded@data<-as.data.frame(rescale(as.matrix(SUM.RAIN.WT.data.bounded@data),my.cuts[1],last(my.cuts))) +if(interpol=="NO"){ +my.map<-spplot(SUM.RAIN.WT.data.bounded,nome.mese.short[c(int.mesi)],layout=c(3,4),par.settings = standard.theme(color = FALSE),key.space = "right", + cuts=my.cuts,col.regions=my.colors,main=my.main,sp.layout=my.layout,cex=0.3,as.table=T,names.attr=nome.mese[int.mesi],legendEntries=my.labels) +my.map$legend$right$args$key$points$cex <-rep(1,length(my.cuts)-1) # per cambiare il size dei simboli nella legenda!!! +} else { # se stiamo visualizzando un grid, la differenza e' che invece di cut dobbiamo usare at per delimitare gli intervalli e colorkey per mettere la legenda in basso: +my.map<-spplot(SUM.RAIN.WT.data.bounded,nome.mese.short[c(int.mesi)],layout=c(3,4),par.settings = standard.theme(color = FALSE), + at=my.cuts,col.regions=my.colors,main=my.main,sp.layout=my.layout,cex=0.3,as.table=T,names.attr=nome.mese[int.mesi], + colorkey=list(labels=list(at=my.cuts),space="bottom",width=0.6,height=0.6)) +};plot(my.map) + +my.main<-"WTs total Contribute to Observed Precipitation"; # ovvero la climatologia dei WTs, senza la costante +my.cuts<-c(seq(0,300,20),350,400,500);my.colors<-jet.colors8 +if(interpol=="NO"){ +my.map<-spplot(SUM.RAIN.WT.abs.data,nome.mese.short[c(int.mesi)],layout=c(3,4),par.settings = standard.theme(color = FALSE),key.space = "right", + cuts=my.cuts,col.regions=rev(jet.colors(1000)),main=my.main,sp.layout=my.layout,cex=0.3,as.table=T,names.attr=nome.mese[int.mesi],legendEntries=my.labels) +my.map$legend$right$args$key$points$cex <-rep(1,length(my.cuts)-1) # per cambiare il size dei simboli nella legenda!!! +} else { # se stiamo visualizzando un grid, la differenza e' che invece di cut dobbiamo usare at per delimitare gli intervalli e colorkey per mettere la legenda in basso: +my.map<-spplot(SUM.RAIN.WT.abs.data,nome.mese.short[c(int.mesi)],layout=c(3,4),par.settings = standard.theme(color = FALSE),colorkey=list(space = "bottom"), + at=my.cuts,col.regions=rev(jet.colors(1000)),main=my.main,sp.layout=my.layout,cex=0.3,as.table=T,names.attr=nome.mese[int.mesi],legendEntries=my.labels) +};plot(my.map) + +my.main<-"WTs total Contribute to Observed Precipitation"; # ovvero la climatologia dei WTs, CON la costante +my.cuts<-c(seq(0,300,20),350,400,500);my.colors<-jet.colors8 +if(interpol=="NO"){ +my.map<-spplot(SUM.RAIN.WT.abs.full.data,nome.mese.short[c(int.mesi)],layout=c(3,4),par.settings = standard.theme(color = FALSE),key.space = "right", + cuts=my.cuts,col.regions=rev(jet.colors(1000)),main=my.main,sp.layout=my.layout,cex=0.3,as.table=T,names.attr=nome.mese[int.mesi],legendEntries=my.labels) +my.map$legend$right$args$key$points$cex <-rep(1,length(my.cuts)-1) # per cambiare il size dei simboli nella legenda!!! +} else { # se stiamo visualizzando un grid, la differenza e' che invece di cut dobbiamo usare at per delimitare gli intervalli e colorkey per mettere la legenda in basso: +my.map<-spplot(SUM.RAIN.WT.abs.full.data,nome.mese.short[c(int.mesi)],layout=c(3,4),par.settings = standard.theme(color = FALSE),colorkey=list(space = "bottom"), + at=my.cuts,col.regions=rev(jet.colors(1000)),main=my.main,sp.layout=my.layout,cex=0.3,as.table=T,names.attr=nome.mese[int.mesi],legendEntries=my.labels) +};plot(my.map) + + +### visualizza tutte le sue mappe mensili di un WT a scelta in una composizione 4x4 con al centro il patron dei campi di pressione associati al WT: +datos.SLP<-read.table(file.SLP,header=FALSE,stringsAsFactors=FALSE,na.strings="-999") # database pressione giornaliera nel formato anno/mese/giorno/valori di pressione dei pixel in colonne +datos.SLP<-subset(datos.SLP,datos.SLP[,1]>=begin.analisi & datos.SLP[,1]<=end.analisi) # toglie gli anni fuori dal periodo scelto per l'analisi +listaWT.used<-listaWT[[cla.rif]] +listaWT.used<-subset(listaWT[[cla.rif]],listaWT[[cla.rif]][[1]] >=begin.analisi & listaWT[[cla.rif]][[1]] <=end.analisi) +WorldPoly<- readShapePoly('C:/Projecto_Master/Limites Mundo/TM_WORLD_BORDERS-0.2.shp') +proj4string(WorldPoly) <- CRS("+proj=longlat") +my.polygon=list('sp.polygons',WorldPoly,first=F) +my.layout=list(my.polygon) +npx.ori<-dim(datos.SLP)[2]-3 +gt = GridTopology(cellcentre.offset = angolo.basso.sinistra, cellsize = c(deg,deg), cells.dim = c(npx.ori, npx.ver)) +grd = SpatialGrid(gt) +proj4string(grd) <- CRS("+proj=longlat +datum=WGS84") + +my.names<-nome.mese +my.cuts<-c(0,0.01,0.15,0.3,0.5,1) +#my.cuts<-c(0.1,3,10,20,100)^(1/3) +my.labels<-c("1%","15%","30%","50%","100%") +#my.labels<-c("1 mm","3%","10%","20%","100%") +my.cuts.in.leyend<-c(0.01,0.15,0.30,0.50,1) +my.colors<-c("transparent","#00FFFF","#0099F8","blue","purple") + +for(wt in WTs.type){ # cicla su tutti i WTs + #es: wt="NE" # scegli il tipo di tempo da mappare se non fai il ciclo for + wt.num<-which(WTs.type==wt) # converte il wt espresso come lettera in numero da 1 a 26 + my.title<-paste("Relative contribute of WT: ",wt," to total monthly precipitation",sep="") # non usato + my.map<-list() + + if(interpol=="NO"){ # occhio che in questo caso non l'ho ancora verificato!!! + my.map<-spplot(prec.mese.data[[wt.num]],nome.mese.short[mese],par.settings = standard.theme(color = FALSE),key.space = "right", + cuts=my.cuts,col.regions=my.colors,main=my.main,sp.layout=my.layout,cex=0.5,names.attr=my.names,legendEntries=my.labels) + my.map$legend$right$args$key$points$cex <-rep(1,length(my.cuts)-1) # per cambiare il size dei simboli nella legenda!!! + } else { + for(mese in int.mesi){ + #my.main<-list(label=paste(nome.mese[mese]," ",round(prec.WT.medio[mese,wt.num]*100,1),"%",sep=""),cex=1) + my.main<-list(label=nome.mese[mese],cex=1) + my.map[[mese]]<-spplot(prec.mese.data[[wt.num]],nome.mese.short[mese],main=my.main,par.settings = standard.theme(color = FALSE), + at=my.cuts,col.regions=my.colors,sp.layout=my.layout,cex=.5,names.attr=my.names[mese],legendEntries=my.labels,colorkey=F) + } + } + + # sotto la mappa di agosto aggiunge la legenda: + mese=8;my.main<-list(label=nome.mese[mese],cex=1) + my.map[[mese]]<-spplot(prec.mese.data[[wt.num]],nome.mese.short[mese],main=my.main,par.settings = standard.theme(color = FALSE), + at=my.cuts,col.regions=my.colors,sp.layout=my.layout,cex=.5,names.attr=my.names[mese],colorkey=list(space="bottom",width=0.4,height=3,labels=list(at=my.cuts.in.leyend,labels=my.labels))) + + # tabella con le variabili da gennaio a dicembre: + #tab.lateral<-data.frame(cbind("Precipit."=contrib[wt.num,],"Frequency"=round(AN.mean[cla.rif,wt.num,],1),"Intensity"=tab.intensity[WTs.type[wt.num],], + # "Area IP"=tab.area[WTs.type[wt.num],],"Area.15"=tab.area2[WTs.type[wt.num],],"Area.30"=tab.area3[WTs.type[wt.num],], + # "Area.50"=tab.area4[WTs.type[wt.num],]),row.names=nome.mese.short) + tab.lateral<-t(data.frame(cbind("Precipitation"=contrib[wt.num,],"Frequency"=round(AN.mean[cla.rif,wt.num,],1), + "Area"=tab.area[WTs.type[wt.num],]),row.names=nome.mese.short)) + + # usa la classificazione dei WT giornalieri scelta all'inizio e i campi di pressione giornalieri del database file.SLP (in formato standard) da cui + # si e' eseguito il downscaling della classificazione in uso e li converte in mappe di pressione media associata ad ogni WT in tutta l'area di studio del database di pressione: + datos.SLP2<-datos.SLP + datos.SLP2$WT<-rep(listaWT.used[[4]],each=npx.ver) # aggiunge a destra di datos.SLP2 una colonna con il tipo di tempo corrispondente + + pressure.WT.grid<-SpatialGridDataFrame(grd, as.data.frame(rep(NA,npx.ori*npx.ver)), proj4string = CRS("+proj=longlat +datum=WGS84")) + + datos.temp<-datos.SLP2 + datos.temp<-subset(datos.temp,datos.temp$WT==idWT[[cla.rif]][wt.num]) + n.dias<-dim(datos.temp)[1]/npx.ver + mean.pressure<-data.frame(array(0,c(npx.ver,npx.ori))) + for(d in 1:n.dias){ + mean.pressure<-mean.pressure+datos.temp[(npx.ver*(d-1)+1):(npx.ver*d),4:(3+npx.ori)] + } + mean.pressure<-mean.pressure/n.dias # per calcolare la media invece de la somma + # carica i dati di tutte le medie possibili in pressure.grid: + pressure.WT.grid@data[[WTs.type[wt.num]]]=as.numeric(t(mean.pressure)) + + # campi di pressione senza grid colorato sotto ma solo con linee di livello blu: + #spplot(pressure.WT.grid,wt,sp.layout=list(my.polygon,list("sp.lines",res)),contour=TRUE,col='blue',labels=F,region=F, + # main=my.main,xlim=c(-20,10),ylim=c(30,50)) # mappa campi di pressione del WT considerato + # campi di pressione con grid rosso e blu e linee di livello bianche: + my.main=list(label=WTs.names.long[wt.num],cex=1.1) + my.WT.map<-spplot(pressure.WT.grid,wt,sp.layout=my.layout,contour=TRUE,col='gray',region=FALSE,labels=FALSE, + cuts=30,lwd=1.5,col.regions=rev(jet.colors(1000)),main=my.main,xlim=c(-35,25),ylim=c(25,60)) # colorkey=list(space = "bottom"),cuts=30, + # campi di pressione con grid rosa bianco azzurro e linee di livello grigio: + # my.WT.map<-spplot(pressure.WT.grid,wt,sp.layout=my.layout,contour=TRUE,col='gray',labels=F, + # colorkey=list(space = "bottom"),main=my.main,xlim=c(-20,10),ylim=c(30,50)) # mappa campi di pressione del WT considerato + + + # salva tutte le composizioni per ogni WT: + #jpeg(paste(dir_root,"/mapas/composiciones_EMULATE_small_table/",wt,".jpg",sep=""),width=590,height=616,quality=100) # altri formati possibili per salvare il grafico sono: pdf, bmp,jpeg, tiff. + jpeg(paste(dir_root,"/composiciones/",wt,".jpg",sep=""),width=590,height=616,quality=100) + + #windows(width=7, height=9) + lx=0.25 # lunghezza orizzontale di un mapa piccolo + ly=0.25 # lunghezza verticale di un mapa piccolo + g1=0.74 # altezza prima fila + g3=0.53 # altezza seconda fila + g8=0.32 # altezza terza fila + g5=0.11 # altezza quarta fila + #textplot(as.matrix(tab.lateral),cex=1,halign="center",valign="bottom",mar=c(0,0,0,0),cmar=1) + textplot(WTs.type[wt.num],cex=7,halign="center",valign="center",mar=c(0,0,0,0)) + + print(my.map[[1]], position= c(0,g1,0+lx,g1+ly),more=T) # fai ?print.trellis per la spiegazione di come dividere lo schermo con gli spplot + print(my.map[[2]], position= c(0.25,g1,0.25+lx,g1+ly),more=T) # (xmin, ymin, xmax, ymax) + print(my.map[[3]], position= c(0.50,g1,0.50+lx,g1+ly),more=T) + print(my.map[[4]], position= c(0.75,g1,0.75+lx,g1+ly),more=T) + print(my.map[[5]], position= c(0.75,g3,0.75+lx,g3+ly),more=T) + print(my.map[[6]], position= c(0.75,g8,0.75+lx,g8+ly),more=T) + print(my.map[[7]], position= c(0.75,g5,0.75+lx,g5+ly),more=T) + print(my.map[[8]], position= c(0.50,g5-0.037,0.50+lx,g5+ly),more=T) + print(my.map[[9]], position= c(0.25,g5,0.25+lx,g5+ly),more=T) + print(my.map[[10]], position= c(0,g5,0+lx,g5+ly),more=T) + print(my.map[[11]], position= c(0,g8,0+lx,g8+ly),more=T) + print(my.map[[12]], position= c(0,g3,0+lx,g3+ly)) + #print(my.WT.map,position = c(0.25,0.35,0.75,0.93),more=T) + + #or=4;ve=5 # numero di riquadri in orizzontale e in verticale per mettere le mappe + # print(my.map[[1]], split= c(1,1,or,ve),more=T) # fai ?print.trellis per la spiegazione di come dividere lo schermo con gli spplot + # print(my.map[[2]], split= c(2,1,or,ve),more=T) + dev.off() + +} # chiude il for sul WT mappato + + +#save.image(paste(dir_root,"/RData/R_session_v133_por_pixels_Composicion_EMULATE_corregido_before_regr.RData",sep="")) + +# altre mappe: +if(PCRA==F){nombre="WT"} else {nombre="PC"} +cla.levels<-seq(0,n.classi,1) +cla.name="Best classification for each station and month" +regr.levels<-c(0,1,2) +#regr.name="Best stepwise method for each station and month \n (blue: forward; red: backward)" +regr.name="Best stepwise method for each station and month" +pred1.name<-paste("Most important",nombre,"of ",nome.classe[cla.rif]) +pred2.name<-paste("2nd Most important",nombre,"of ",nome.classe[cla.rif]) +pred1.levels<-seq(0,nWT[cla.rif]) +COR.name="COR of the best classification for each station and month";COR.levels<- c(0,0.25,seq(0.3,1,0.05)) +prec.obs.name<-"Observed Mean Monthly Precipitation during Regression period";prec.levels<-c(seq(0,300,50),1000) +intercept.name<-"Regression Constant Term";intercept.levels<-c(seq(0,100,5),150) +intercept.perc.name<-"Relative contribute of Constant Term to total precipitation";intercept.perc.levels<-prec.levels + +print(spplot(prec.obs.data,nome.mese.short[c(int.mesi)],layout=c(3,4),par.settings = standard.theme(color = FALSE),key.space = "right",cuts=prec.levels,col.regions=rev(jet.colors2(1000)),main=prec.obs.name,sp.layout=my.layout,cex=0.5,as.table=T)) +print(spplot(pred1.forw.data,nome.mese.short[int.mesi],layout=c(3,2),par.settings = standard.theme(color = FALSE),key.space = "right",checkEmptyRC=F,identify=F,cuts=pred1.levels,col.regions=jet.colors.pred[[cla.rif]],main=pred1.name,sp.layout=my.layout,pch=20,cex=0.7,as.table=T,legendEntries=WTs.names[[cla.rif]])) +print(spplot(pred2.forw.data,nome.mese.short[c(int.mesi)],layout=c(3,2),par.settings = standard.theme(color = FALSE),key.space = "right",cuts=pred1.levels,col.regions=jet.colors.pred[[cla.rif]],main=pred2.name,sp.layout=my.layout,pch=20,cex=0.7,as.table=T,legendEntries=WTs.names[[cla.rif]])) +spplot(intercept.data,nome.mese.short[c(int.mesi)],layout=c(2,4),par.settings = standard.theme(color = FALSE),key.space = "right",at=intercept.levels,col.regions=rev(jet.colors2(1000)),main=intercept.name,sp.layout=my.layout,pch=19,cex=0.5,as.table=T) +print(spplot(intercept.perc.data,nome.mese.short[c(int.mesi)],layout=c(,2),par.settings = standard.theme(color = FALSE),key.space = "right",cuts=intercept.perc.levels,col.regions=prec.perc.colors,main=intercept.perc.name,sp.layout=my.layout,pch=19,cex=0.5,as.table=T)) + + +# parametri interessanti di spplot: +# par.settings = list(axis.line = list(col = 'transparent')) per togliere i bordi ai grafici (ma non ai titoli) +# par.settings = c(standard.theme(color = FALSE), list(par.xlab.text = list(cex = 5, col = "blue"))) per cambiare il colore del box dei titoli (o toglierlo mettendo 'transparent') +# names.attr<-c("...","...", ... ) per cambiare i nomi dentro i titoli rosa dei grafici +} + +if(interpol!="NO"){################ MONTHLY PRECIPITATION GRID DELLE VARIABILI PUNTUALI CALCOLATE ############################################################################# +# devi solo impostare qui sotto la VARIABILE da interpolare in infofile: +# carica i dati dei valori mensili della variabile scelta in infofile in base all'anno considerato (o direttamente tutta la climatologia se interpol="CLI"): +for(mese in int.mesi)infofile[[nome.mese.short[mese]]]<-n.pred.forw[cla.rif,,mese] #ATTENZIONE: non ci devono essere mesi con valori di manca-dato della variabile (es: -999); solo NA va bene! +# for(mese in int.mesi)infofile[[nome.mese.short[mese]]]<-SUM.RAIN.WT.abs.data[[nome.mese.short[mese]]] #ATTENZIONE: non ci devono essere mesi con valori di manca-dato della variabile (es: -999); solo NA va bene! +# for(mese in int.mesi)infofile[[nome.mese.short[mese]]]<-MBE.norm.data[[nome.mese.short[mese]]] #ATTENZIONE: non ci devono essere mesi con valori di manca-dato della variabile (es: -999); solo NA va bene! +# for(mese in int.mesi)infofile[[nome.mese.short[mese]]]<-proportion.data[[nome.mese.short[mese]]] #ATTENZIONE: non ci devono essere mesi con valori di manca-dato della variabile (es: -999); solo NA va bene! +# wt="A";for(mese in int.mesi)infofile[[nome.mese.short[mese]]]<-prec.WT.abs.data[[mese]][[wt]] # in questo caso devi anche selezionare un WT +#for(mese in int.mesi)infofile[[nome.mese.short[mese]]]<-CV.MAE.data[[nome.mese.short[mese]]] #ATTENZIONE: non ci devono essere mesi con valori di manca-dato della variabile (es: -999); solo NA va bene! +#for(mese in int.mesi)infofile[[nome.mese.short[mese]]]<-d.Willmott.2011.data[[nome.mese.short[mese]]] #ATTENZIONE: non ci devono essere mesi con valori di manca-dato della variabile (es: -999); solo NA va bene! + +n.dec=3 # numero di decimali della variabile scelta al termine della interpolazione + + # cerca se ci sono 2 o + serie che hanno le stesse coordinate; in questi casi, le sposta di un metro piu' a nord in modo che si possa fare il kriging: +if(dim(zerodist(infofile))[1]>0)print(paste("Trovate",dim(zerodist(infofile))[1],"serie coincidenti che sono state spostate un poco piu' a nord")) +lonlat<-infofile@coords # stringa con la long+lat di ogni stazione # equivalentemente: lonlat<-paste(coordinates(infofile)[,1],coordinates(infofile)[,2]) +nlonlat<-dim(lonlat)[1] +if(proyected==FALSE){small.shift<-0.00001}else{small.shift<-1} +#n.coinc<-0 +for(st in 1:nlonlat){ + ss<-which(lonlat[,1]==lonlat[st,1] & lonlat[,2]==lonlat[st,2]) # vettore con tutte le righe della tabla che hanno la stessa lon e lat della riga staz (dovrebbe esserci 1 sola fila) + nss<-length(ss) # se vale 1 vuol dire che va bene + if(nss>1){ + #print(paste(st,listaz[st],ss[1],ss[2])) # riga per il debug; normalmente ci sono 2 stazioni nella stessa posizione + for(k in 2:nss)infofile@coords[ss[k],2]=infofile@coords[ss[k],2]+small.shift*k-1 # aumenta di k centomillesimi di grado o di k metri la latitudine (esclusa la prima stazione del gruppo) + #n.coinc<-n.coinc+1 #print(n.coinc) + } + } + + pixel.size=10 + +# crea un generico grid a km in UTM30 che si sovrapponga a tutta la Penisola Iberica: +gt = GridTopology(cellcentre.offset = c(-70000, 3980000), cellsize = c(1000*pixel.size,1000*pixel.size), cells.dim = c(ceiling(1110/pixel.size), ceiling(890/pixel.size))) +grd = SpatialGrid(gt) +n.pixels<-grd@grid@cells.dim[1]*grd@grid@cells.dim[2] +proj4string(grd) <- CRS("+proj=utm +zone=30 +datum=WGS84") # +ellps=clrk66") +#summary(grd);plot(IpPoly);plot(grd,add=T) # visualizza il grid sovrapposto alla IP + +# crea un grid vuoto (Var.grid) dove mettere i risultati dell'interpolazione per la variabili in esame +Var.grid<-void.grid<-SpatialGridDataFrame(grd, as.data.frame(rep(NA,n.pixels)), proj4string = CRS("+proj=utm +zone=30 +datum=WGS84")) + +# interpola i mesi dell'anno selezionati +# mette il risultato in Var,grid ed arrotonda la precipitazione prevista alla X-esima cifra decimale per non occupare troppo spazio se poi la si salva su un .txt (tanto l'errore introdotto arrotondando e' trascurabile):: +formul<-c(Jan~1,Feb~1,Mar~1,Apr~1,May~1,Jun~1,Jul~1,Aug~1,Sep~1,Oct~1,Nov~1,Dec~1) +for(mese in int.mesi){ + print(paste("Mese:",mese)) # cosi' se avvisa che il variogram e' un singular model sai a che mese e anno si riferisce + v <- variogram(formul[[mese]], infofile) + #plot(v,type='b') # per vedere i punti del semivariogramma, type=b traccia anche una linea spezzata tra i punti, ma non e' il fit + #v.fit <- fit.variogram(v, model=vgm(psill=8000, model='Gau', range=200000, nugget=500)) # calcola automaticamente tutti i parametri della curva di fit, basta dargli dei valori iniziali approssimativi!!!!! + temp.nugget<-v$gamma[1] # valore inziale del nugget per fare una prova, dopodiche l'algoritmo di fit.variogram aggiusta i valori + temp.sill<-last(v$gamma) # come valori iniziali ho messo quelli che si possono ricavare 'ad occhio' dal variogramma sperimentale v + temp.range<-last(v$dist) + v.fit <- fit.variogram(v, model=vgm(psill=temp.sill-temp.nugget, model='Sph', range=temp.range, nugget=temp.nugget)) + + #v.fit #per vedere i parametri risultato del fit + print(plot(v,v.fit)) # per vedere il semivariogramma + la curva di fit #b <- mean(DistData[[i]][[j+1]]) # solo per il simple kriging + #Prec.grid@data[[mese]]<-round(krige(formula=formul[[mese]], locations=infofile.sinNA, newdata=grd, model=v.fit,nmax=int.nmax)@data$var1.pred,1) # se desse problemi a fare il kriging prova a vedere se dipende da una cattiva curva del fit, prova magari a impostare un range del fit piu' corto. + Var.grid@data[[nome.mese.short[mese]]]<-round(krige(formula=formul[[mese]], locations=infofile,na.action=na.omit, newdata=grd, model=v.fit,nmax=int.nmax)@data$var1.pred,n.dec) # come prima ma permette di considerare anche input con osservatori senza dati (NA) perche' li esclude dall'interpolazione!!! (con na.omit) +} + +# seleziona i pixel di mare con overlay e li mette a NA in modo che non li disegni nella mappa finale: +sea.pixels<-which(is.na(overlay(grd,IpPoly))==T) +land.pixels<-(1:n.pixels)[-sea.pixels] +for(mese in 1:12)Var.grid@data[[nome.mese.short[mese]]][sea.pixels]=NA + +# (fine interpolazione) + +# disegna il grid della variabile scelta: + +my.name="CV(RMSE)" ;my.cuts<-c(0,0.5,0.7,0.9,10);my.labels<-c("0-0.50","0.51-0.70","0.71-0.90","0.91+") +my.names<-nome.mese #paste(nome.mese.short[int.mesi]," (mean=",round(n.pred.forw.medio[int.mesi],1),")",sep="") +spplot(Var.grid,nome.mese.short[c(int.mesi)],layout=c(3,4),par.settings = standard.theme(color = FALSE),key.space = "right", + at=my.cuts,col.regions=rev(jet.colors8),main=my.name,sp.layout=my.layout,pch=19,cex=0.3,as.table=T, + names.attr=my.names[int.mesi],legendEntries = my.labels) + +int.mesi=c(10:12,1:5) +my.name="CV(MAE)" ;my.cuts<-c(0,0.05,0.10,0.15,1);my.labels<-c("0-5%","6-10%","11-15%",">16%") +my.names<-nome.mese #paste(nome.mese.short[int.mesi]," (mean=",round(n.pred.forw.medio[int.mesi],1),")",sep="") +spplot(Var.grid,nome.mese.short[c(int.mesi)],layout=c(2,4),par.settings = standard.theme(color = FALSE),key.space = "right", + at=my.cuts,col.regions=rev(jet.colors8),main=my.name,sp.layout=my.layout,pch=19,cex=0.3,as.table=T, + names.attr=my.names[int.mesi],legendEntries = my.labels) + +my.name="d of Willmott definition 1" ;my.cuts<-c(0,0.99,0.994,0.997,1); #my.labels<-c("0-5%","6-10%","11-15%",">16%") +my.names<-nome.mese #paste(nome.mese.short[int.mesi]," (mean=",round(n.pred.forw.medio[int.mesi],1),")",sep="") +spplot(Var.grid,nome.mese.short[c(int.mesi)],layout=c(2,4),par.settings = standard.theme(color = FALSE),key.space = "right", + at=my.cuts,col.regions=jet.colors8,main=my.name,sp.layout=my.layout,pch=19,cex=0.3,as.table=T, + names.attr=my.names[int.mesi]) #,legendEntries = my.labels) + +my.name="d of Willmott definition 2" ;my.cuts<-c(0,0.5,0.6,0.7,1); #my.labels<-c("0-5%","6-10%","11-15%",">16%") +my.names<-nome.mese #paste(nome.mese.short[int.mesi]," (mean=",round(n.pred.forw.medio[int.mesi],1),")",sep="") +spplot(Var.grid,nome.mese.short[c(int.mesi)],layout=c(2,4),par.settings = standard.theme(color = FALSE),key.space = "right", + at=my.cuts,col.regions=jet.colors8,main=my.name,sp.layout=my.layout,pch=19,cex=0.3,as.table=T, + names.attr=my.names[int.mesi]) #,legendEntries = my.labels) + +my.name="d of Willmott definition 3" ;my.cuts<-c(-1,0.56,0.64,0.72,1); #my.labels<-c("0-5%","6-10%","11-15%",">16%") +my.names<-nome.mese #paste(nome.mese.short[int.mesi]," (mean=",round(n.pred.forw.medio[int.mesi],1),")",sep="") +spplot(Var.grid,nome.mese.short[c(int.mesi)],layout=c(2,4),par.settings = standard.theme(color = FALSE),key.space = "right", + at=my.cuts,col.regions=jet.colors8,main=my.name,sp.layout=my.layout,pch=19,cex=0.3,as.table=T, + names.attr=my.names[int.mesi]) #,legendEntries = my.labels) + +my.name="R2" ;my.cuts<-c(0,0.3,0.5,0.7,1);my.labels=c("0-0.30","0.31-0.50","0.51-0.70","0.71-1.00") +my.names<-nome.mese #paste(nome.mese.short[int.mesi]," (mean=",round(n.pred.forw.medio[int.mesi],1),")",sep="") +spplot(Var.grid,nome.mese.short[c(int.mesi)],layout=c(2,4),par.settings = standard.theme(color = FALSE),key.space = "right", + at=my.cuts,col.regions=rev(jet.colors4(1000)),main=my.name,sp.layout=my.layout,pch=19,cex=0.3,as.table=T, + names.attr=my.names[int.mesi],legendEntries = my.labels) + +my.name="RMSEs/RMSEn" ;my.cuts<-c(0,0.5,0.7,0.8,0.9,1.0) #;my.labels=c("0-2","3-5","6-8") +my.names<-nome.mese #paste(nome.mese.short[int.mesi]," (mean=",round(n.pred.forw.medio[int.mesi],1),")",sep="") +spplot(Var.grid,nome.mese.short[c(int.mesi)],layout=c(3,3),par.settings = standard.theme(color = FALSE),key.space = "right", + at=my.cuts,col.regions=rev(jet.colors4(1000)),main=my.name,sp.layout=my.layout,pch=19,cex=0.3,as.table=T, + names.attr=my.names[int.mesi],legendEntries = my.labels) + +my.name="Desv.stand prev / obs" ;my.cuts<-c(0,0.5,0.7,0.8,0.9,1.1) #;my.labels=c("0-2","3-5","6-8") +my.names<-nome.mese #paste(nome.mese.short[int.mesi]," (mean=",round(n.pred.forw.medio[int.mesi],1),")",sep="") +spplot(Var.grid,nome.mese.short[c(int.mesi)],layout=c(3,3),par.settings = standard.theme(color = FALSE),key.space = "right", + at=my.cuts,col.regions=rev(jet.colors4(1000)),main=my.name,sp.layout=my.layout,pch=19,cex=0.3,as.table=T, + names.attr=my.names[int.mesi],legendEntries = my.labels) + +my.name="Number of Predictors for each serie and month" ;my.cuts<-seq(0,10);my.labels=c("0-2","3-5","6-8") +my.names<-nome.mese #paste(nome.mese.short[int.mesi]," (mean=",round(n.pred.forw.medio[int.mesi],1),")",sep="") + +spplot(Var.grid,nome.mese.short[c(int.mesi)],layout=c(3,4),par.settings = standard.theme(color = FALSE),key.space = "right", + at=my.cuts,col.regions=rev(jet.colors4(1000)),main=my.name,sp.layout=my.layout,pch=19,cex=0.3,as.table=T, + names.attr=my.names[int.mesi],legendEntries = my.labels) + +my.main<-"WTs Relative Contribute to Observed Precipitation"; +my.cuts<-c(0,0.3,0.4,0.5,0.6,0.7,0.8,0.9,5);my.labels<-c("0-30%","41-50","41-50","51-70%","61-70%","71-80%","81-90%","91-100%") +my.palette<-colorRampPalette(c("red","yellow","cyan","blue")) +my.colors<-my.palette(1000) +spplot(Var.grid,nome.mese.short[c(int.mesi)],layout=c(2,4),par.settings = standard.theme(color = FALSE),colorkey=list(space = "right"), + at=my.cuts,col.regions=my.colors,main=my.main,sp.layout=my.layout,pch=19,cex=0.3,as.table=T,names.attr=nome.mese[int.mesi],legendEntries=my.labels) + +my.main<-"WTs Predicted Climatology without constant term"; +my.cuts<-c(seq(0,50,1),seq(52.5,150,2.5),seq(155,200,5),seq(210,290,20),400);#my.labels<-c("0-30%","41-50","41-50","51-70%","61-70%","71-80%","81-90%","91-100%") +my.prec.levels.in.leyend<-c(0,25,50,75,100,150,200,250,300,350,400) +jet.colors.clim <-colorRampPalette(c("violet","blue", "#007FFF", "cyan","#7FFF7F", "yellow", "#FF7F00", "red", "#7F0000")) +my.palette<-jet.colors.clim +my.colors<-rev(my.palette(1000)) +spplot(Var.grid,nome.mese.short[1:12],layout=c(3,4),par.settings = standard.theme(color = FALSE), + at=my.cuts,col.regions=my.colors,main=my.main,sp.layout=my.layout,pch=19,cex=0.3,as.table=T,names.attr=nome.mese[1:12],colorkey=list(space="right",width=0.5,height=1,labels=list(at=my.prec.levels.in.leyend))) + +my.main<-"WTs Predicted Climatology with constant term"; +my.cuts<-c(seq(0,50,1),seq(52.5,150,2.5),seq(155,200,5),seq(210,290,20),400);#my.labels<-c("0-30%","41-50","41-50","51-70%","61-70%","71-80%","81-90%","91-100%") +my.prec.levels.in.leyend<-c(0,25,50,75,100,150,200,250,300,350,400) +jet.colors.clim <-colorRampPalette(c("violet","blue", "#007FFF", "cyan","#7FFF7F", "yellow", "#FF7F00", "red", "#7F0000")) +my.palette<-jet.colors.clim +my.colors<-rev(my.palette(1000)) +spplot(Var.grid,nome.mese.short[1:12],layout=c(3,4),par.settings = standard.theme(color = FALSE), + at=my.cuts,col.regions=my.colors,main=my.main,sp.layout=my.layout,pch=19,cex=0.3,as.table=T,names.attr=nome.mese[1:12],colorkey=list(space="right",width=0.5,height=1,labels=list(at=my.prec.levels.in.leyend))) + +# visualizza una composizione per il WT interpolato con il suo contributo medio mensile alla prec "climatologia del WT" +my.names<-nome.mese +my.cuts<- c(seq(1,30,1),35,40,50,70,100) # fa il log o la radice per diminuire le differenze tra i valori piu' alti e quelli piu' bassi in modo che non ci sia un colore che domina la legenda; occhio che funziona solo se la variabile da rappresentare non e' negativa!!! +my.labels=c(1,10,20,50,100) # i numeri che vuoi vedere visualizzati nella legenda, anche se nella mappa si visualizzano in realta' le loro radici +my.cuts.in.leyend<-my.labels # fa il log o la radice +Var.grid.temp<-Var.grid +ss<-which(Var.grid@data>100,arr.ind=T);ss1<-ss[,1];ss2<-ss[,2] # imposta a 100 tutti i pixel sopra 100 per non visualizzare una legenda distorta +if(dim(ss)[1]>0){for(s in 1:dim(ss)[1])Var.grid.temp@data[ss1[s],ss2[s]]=100} + +lx=0.25 # lunghezza orizzontale di un mapa piccolo +ly=0.25 # lunghezza verticale di un mapa piccolo +g1=0.74 # altezza prima fila +g3=0.53 # altezza seconda fila +g8=0.32 # altezza terza fila +g5=0.11 # altezza quarta fila + +my.map<-list() +for(mese in 1:12){ + my.main<-list(label=nome.mese[mese],cex=1) + my.map[[mese]]<-spplot(Var.grid.temp,nome.mese.short[mese],main=my.main,par.settings = standard.theme(color = FALSE), + at=my.cuts,col.regions=rev(jet.colors12(10000)),sp.layout=my.layout,cex=.5,names.attr=my.names[mese],legendEntries=my.labels,colorkey=FALSE) +} + +# sotto la mappa di agosto aggiunge la legenda: +mese=8;my.main<-list(label=nome.mese[mese],cex=1) +my.map[[mese]]<-spplot(Var.grid.temp,nome.mese.short[mese],main=my.main,par.settings = standard.theme(color = FALSE), + at=my.cuts,col.regions=rev(jet.colors12(10000)),sp.layout=my.layout,cex=.5,names.attr=my.names[mese],colorkey=list(space="bottom",width=0.5,height=3,labels=list(at=my.cuts.in.leyend,labels=my.labels))) + +jpeg(paste("C:/nicola/precipitaciones/weather_types/mapas/14) composiciones_MOPREDAS_WTs_climatology_over_1_mm_leyenda_bonita/",wt,".jpg",sep=""),width=590,height=616,quality=100) # altri formati possibili per salvare il grafico sono: pdf, bmp,jpeg, tiff. +#textplot(paste(WTs.type[wt],"Mean Rainfall Amount") ,cex=1,halign="center",valign="center",mar=c(0,0,0,0)) +textplot(paste(wt,"MOPREDAS Clim. > 1 mm") ,cex=1.5,halign="center",valign="center",mar=c(0,0,0,0)) +print(my.map[[1]], position= c(0,g1,0+lx,g1+ly),more=T) # fai ?print.trellis per la spiegazione di come dividere lo schermo con gli spplot +print(my.map[[2]], position= c(0.25,g1,0.25+lx,g1+ly),more=T) # (xmin, ymin, xmax, ymax) +print(my.map[[3]], position= c(0.50,g1,0.50+lx,g1+ly),more=T) +print(my.map[[4]], position= c(0.75,g1,0.75+lx,g1+ly),more=T) +print(my.map[[5]], position= c(0.75,g3,0.75+lx,g3+ly),more=T) +print(my.map[[6]], position= c(0.75,g8,0.75+lx,g8+ly),more=T) +print(my.map[[7]], position= c(0.75,g5,0.75+lx,g5+ly),more=T) +print(my.map[[8]], position= c(0.50,g5-0.038,0.50+lx,g5+ly),more=T) +print(my.map[[9]], position= c(0.25,g5,0.25+lx,g5+ly),more=T) +print(my.map[[10]], position= c(0,g5,0+lx,g5+ly),more=T) +print(my.map[[11]], position= c(0,g8,0+lx,g8+ly),more=T) +print(my.map[[12]], position= c(0,g3,0+lx,g3+ly),more=T) +dev.off() + + +} # chiude l'if su interpol!="NO" + +if(altro==TRUE){#################################### CONFRONTO FINALE RICOSTRUZIONI ######################################################################### +# nota: dato che a quanto pare usare la matrice di correlazione o covarianza nella PCA e' lo stesso, sia che i dati siano prima standardizzati o meno, +# inserisco solo il modello con i dati standardizzati e la matrice di covarianza +# carica una sessione di R con i dati che ti servono: +dat1=paste(dir_root,"/RData/R_session_v85_10PCs_stand_covar_1994-2003_nojack.RData",sep="") +dat2=paste(dir_root,"/RData/R_session_v86_10WTs_1994-2003_nojack.RData",sep="") +#dat1=paste(dir_root,"/RData/R_session_v88_10PCs_1948-1957_nojack.RData",sep="") +#dat2=paste(dir_root,"/RData/R_session_v89_10WTs_1948-1957_nojack.RData",sep="") + +envir1<-new.env() +load(file=dat1, env=envir1) # carica tutti gli oggetti della sessione dat1 in memoria nello spazio envir1 +envir2<-new.env() +load(file=dat2, env=envir2) +#ls(env=envir1) +#envir1$CV.valid + +#Grafici finali con il confronto: +pdf(paste(dir_root,"/mapas/Comparison_2.pdf",sep=""),width=10) # altri formati possibili per salvare il grafico sono: pdf, bmp,jpeg, tiff. +y.min<-0.1 +y.max<-0.9 + +for(graf in 1:4){ +par(mfrow=graph.yx);par(oma=c(1,1,3,1)); par(mar=c(2,2,2,2)) # oma controlla il bordo esterno a tutti i grafici, mar i bordi interni tra grafici +for(mese in int.mesi){ +ind<-ifelse(graf<=2,"CV","COR") + +if(graf==1){curva1<-envir1$CV.valid.mean[cla,mese,];curva2<-envir2$CV.valid.mean[cla,mese,]} +if(graf==2){curva1<-envir1$CV.calib.mean[cla,mese,];curva2<-envir2$CV.calib.mean[cla,mese,]} +if(graf==3){curva1<-envir1$COR.valid.mean[cla,mese,];curva2<-envir2$COR.valid.mean[cla,mese,]} +if(graf==4){curva1<-envir1$COR.calib.mean[cla,mese,];curva2<-envir2$COR.calib.mean[cla,mese,]} + +print(plot(rep(0,predmax[mese]),type="n",xlab="Num.of predictors used", ylab=ind,ylim=c(y.min,y.max),yaxp=c(y.min,y.max,(y.max-y.min)*10),xaxp=c(0,predmax[mese],predmax[mese]))) # stampa un grafico vuoto con gli assi e basta +print(lines(curva1,type="o",col=col.metodo[1],cex=.6)) +print(lines(curva2,type="o",col=col.metodo[2],cex=.6)) +title(nome.mese[mese],line=0.5,cex=1.1) + +leg1<-paste(ind,"with PCs") +leg2<-paste(ind,"with WTs") + +legend("bottomright",c(leg1,leg2),col=col.metodo,lwd=.8) +} # chiude il for sui mesi per il grafico finale + +# titolo grafico finale: +per<-ifelse((graf==1 | graf==3),"validation","calibration") +mtext(paste("Mean ",ind," for ",n.staz," stations for ",per," period ",start.valid,"-",end.valid,sep=""),outer=T,cex=1.2) +} # chiude il for su graf + +dev.off() + +library(ReadImages) +img <- read.jpeg("C:/nicola/north.arrow.jpg") +plot( 1:1000, 1:1000, type="n") +plot(img,add=T) + +###################### MAPPA rapporto tra i coefficienti ############################################ +dat=paste(dir_root,"/RData/R_session_v151_regr_1950-2003_with_old_MOPREDAS.RData",sep="") + +envir<-new.env() +load(file=dat, env=envir) # carica tutti gli oggetti della sessione dat1 in memoria nello spazio envir1 + +diff.coeff.WT.abs.data<-coeff.WT.abs.data +for(wt in 1:nWT[cla.rif]){ + # mappe del rapporto tra i coefficienti assoluti della regressione per ogni WT (= Absolute Intensity of each WT, ovvero il contributo assoluto in mm alla prec.totale dovuto a 1 giorno appartenente al WT considerato) + my.main<-paste("(1+Prec.Intensity 1915-1949) / (1+Prec.Intensity 1950-2003) for ",WTs.names[[1]][wt],"Weather Type") + my.cuts<-c(-1,-0.2,-0.05,-0.01,0.01,0.05,0.2,1); #my.labels<-c("0 mm","<1 mm","1-10 mm","11-20 mm") + my.cuts<-c(-100,-20,-5,-1,1,5,20,100); #my.labels<-c("0 mm","<1 mm","1-10 mm","11-20 mm") + my.cuts<-c(0,0.1,0.5,0.8,1.25,2,10,1000); #my.labels<-c("0 mm","<1 mm","1-10 mm","11-20 mm") + my.colors<-rev(jet.colors5(1000)) #c("#00FFFF","#0099F8","blue","purple","magenta4","black") + for(m in 1:12)diff.coeff.WT.abs.data[[wt]][[4+m]]<-((coeff.WT.abs.data[[wt]][[4+m]]+1)/(envir$coeff.WT.abs.data[[wt]][[4+m]]+1)) #/(media.prec.obs[,int.mesi[m]]) #/lastday(m,1999)) + if(interpol=="NO"){ + my.map<-spplot(diff.coeff.WT.abs.data[[wt]],nome.mese.short[c(int.mesi)],layout=c(3,4),par.settings = standard.theme(color = FALSE),key.space = "right", + cuts=my.cuts,col.regions=my.colors,main=my.main,sp.layout=my.layout,cex=0.5,as.table=T) + } else { + my.map<-spplot(diff.coeff.WT.abs.data[[wt]],nome.mese.short[c(int.mesi)],layout=c(3,4),par.settings = standard.theme(color = FALSE),key.space = "right", + at=my.cuts,col.regions=my.colors,main=my.main,sp.layout=my.layout,cex=0.5,as.table=T,legendEntries=my.labels) + } + plot(my.map) + + colorkey=list(space="bottom",width=0.5,height=.8,labels=list(at=my.cuts.in.leyend,labels=my.labels,cex=.6))) + + # mappe coefficienti assoluti regressione per ogni WT per il periodo 1950-2003 (= Absolute Intensity of each WT, ovvero il contributo assoluto in mm alla prec.totale dovuto a 1 giorno appartenente al WT considerato) + # (la distribuzione spaziale e' uguale alle mappe dei contributi assoluti, ma non si moltiplica per la frequenza mensile dei WT) + windows() + my.main<-paste("Absolute contribute of 1 day of predictor",WTs.names[[1]][wt],"during 1950-2003") + my.cuts<-c(0,0.01,1,10,1000); #my.labels<-c("0 mm","<1 mm","1-10 mm","11-20 mm") + my.colors<-rev(jet.colors5(1000)) #c("#00FFFF","#0099F8","blue","purple","magenta4","black") + if(interpol=="NO"){ + my.map<-spplot(envir$coeff.WT.abs.data[[wt]],nome.mese.short[c(int.mesi)],layout=c(3,4),par.settings = standard.theme(color = FALSE),key.space = "right", + cuts=my.cuts,col.regions=my.colors,main=my.main,sp.layout=my.layout,cex=0.5,as.table=T) + } else { + my.map<-spplot(envir$coeff.WT.abs.data[[wt]],nome.mese.short[c(int.mesi)],layout=c(3,4),par.settings = standard.theme(color = FALSE),key.space = "right", + at=my.cuts,col.regions=my.colors,main=my.main,sp.layout=my.layout,cex=0.5,as.table=T,legendEntries=my.labels) + } + plot(my.map) + + + # mappe coefficienti assoluti regressione per ogni WT per il periodo 1915-1949 (= Absolute Intensity of each WT, ovvero il contributo assoluto in mm alla prec.totale dovuto a 1 giorno appartenente al WT considerato) + # (la distribuzione spaziale e' uguale alle mappe dei contributi assoluti, ma non si moltiplica per la frequenza mensile dei WT) + windows() + my.main<-paste("Absolute contribute of 1 day of predictor",WTs.names[[1]][wt],"during 1915-1949") + my.cuts<-c(0,0.01,1,10,1000); #my.labels<-c("0 mm","<1 mm","1-10 mm","11-20 mm") + my.colors<-rev(jet.colors5(1000)) #c("#00FFFF","#0099F8","blue","purple","magenta4","black") + if(interpol=="NO"){ + my.map<-spplot(coeff.WT.abs.data[[wt]],nome.mese.short[c(int.mesi)],layout=c(3,4),par.settings = standard.theme(color = FALSE),key.space = "right", + cuts=my.cuts,col.regions=my.colors,main=my.main,sp.layout=my.layout,cex=0.5,as.table=T) + } else { + my.map<-spplot(coeff.WT.abs.data[[wt]],nome.mese.short[c(int.mesi)],layout=c(3,4),par.settings = standard.theme(color = FALSE),key.space = "right", + at=my.cuts,col.regions=my.colors,main=my.main,sp.layout=my.layout,cex=0.5,as.table=T,legendEntries=my.labels) + } + plot(my.map) + +} + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +################## calcolo promedio mappe con indice teleconnessioni associato in fase positiva o negativa ###########################3 + +year.start=1850 #1946 # deve cominciare assieme o dopo il primo anno di pressioni giornaliere disponibili +year.end=2003 +n.lat=10 # numero di file per ogni mappa raster (valori differenti di latitudine) +n.lon=25 # numero di colonne per ogni mappa raster (valori differenti di longitudine) +deg=5 # risoluzione del grid in gradi +angolo.basso.sinistra=c(-70, 25) # coordinate angolo in basso a sinistra di ogni raster di pressione input + +st.dev=TRUE # se false, calcola il promedio di mesi invece +st.dev.pos=1 # soglia per la individuzione dei mesi con desv.standard maggiore della soglia nel caso della fase positiva +st.dev.neg=-1 # soglia per la fase negativa +n.max.min= 6 # numero di mesi con indice massimo/minimo sui cui calcolare le medie giorno per giorno dei campi di pressione (indice alternativo a st.dev.pos e neg) + + +#datos.tel<-read.table("C:/nicola/pressure/teleconnessiones/2) INDICES_TELECO_2009_correlaciones.txt",header=TRUE,stringsAsFactors=FALSE,na.strings="-999") +datos.tel<-read.table("C:/nicola/pressure/teleconnessiones/NAO_MOI_WEMOI/12) NAO_MOI_WeMOI_para_R_metodo_Michele.txt",header=TRUE,stringsAsFactors=FALSE,na.strings="-999") +#datos.SLP<-read.table("C:/nicola/pressure/database_emulate/3) database_in_txt.txt",header=FALSE,stringsAsFactors=FALSE,na.strings="-999") # database pressione giornaliera nel formato anno/mese/giorno/valori di pressione dei pixel in colonne +datos.SLP<-read.table("C:/nicola/pressure/database_emulate/2) emulate_normalizado_1850-2003.txt",header=FALSE,stringsAsFactors=FALSE,na.strings="-999") # database pressione giornaliera nel formato anno/mese/giorno/valori di pressione dei pixel in colonne + +nomi.indici<-unique(datos.tel[[1]]) # elenco tutti indici teleconnessione di input (AO, NAO, ecc) +n.indici<-length(nomi.indici) # numero di indici differenti (AO, NAO, ...) +n.years<-year.end-year.start+1 + +nome.mese<-c("January","February","March","April","May","June","July","August","September","October","November","December") +nome.mese.short<-c("Jan","Feb","Mar","Apr","May","Jun","Jul","Aug","Sep","Oct","Nov","Dec") +jet.colors <-colorRampPalette(c("violet","blue", "#007FFF", "cyan","#7FFF7F", "yellow", "#FF7F00", "red", "#7F0000")) + +# calcola le mappe associate a tutti i mesi con st.dev sopra o sotto un umbral fissato: +promedio4<-c() +for(i in nomi.indici){ + promedio3<-c() + for(m in 1:12){ # mese + tele<-subset(datos.tel,datos.tel[[1]]==i & datos.tel[[2]]>=year.start & datos.tel[[2]]<=year.end,select=c(1,2,2+m)) + promedio2<-c() + for(x in 1:2){ # fai prima il promedio dei valori minimi dell'indice, poi dei valori max: + mod<-ifelse(x==1,FALSE,TRUE) + suffix<-ifelse(x==1,"Neg","Pos") + tele2<-tele[order(tele[[3]],decreasing=mod),] + daily.pression<-c() + + # cicla su ognuno dei mesi sui cui si calcola il promedio: + n.mesi<-ifelse(suffix=="Neg",max(1,which(tele2[[3]]st.dev.pos))) # (se suffix="Neg" i dati sono ordinati dal piu' negativo al piu' positivo); nel caso che non ci sia nessun mese che soddisfa la condizione, prende solo il primo + if(st.dev==FALSE)n.mesi<-n.min.max + + for(y in 1:n.mesi){ + temp.pression<-subset(datos.SLP,datos.SLP[[1]]==tele2[[2]][y] & datos.SLP[[2]]==m) + daily.pression<-rbind(daily.pression,temp.pression) + } + n.fila<-dim(daily.pression)[1] + n.days<-n.fila/n.lat # ci sono 10 file per ogni raster, percio' se dividiamo per 10 il numero di file totali troviamo il numero di mappe raster ovvero il numero di giorni diversi su cui calcolare il promedio + row.index<-seq(0,n.fila-n.lat,n.lat) + + promedio<-c() + for(k in 1:n.lat){ + temp<-round(colMeans(daily.pression[row.index+k,]),1) + promedio<-rbind(promedio,temp) + } + promedio<-cbind(i,m,suffix,promedio[,c(4:(n.lon+3))]) + promedio2<-rbind(promedio2,promedio) + } # chiude il for su x + promedio3<-rbind(promedio3,promedio2) + } + promedio4<-rbind(promedio4,promedio3) +} + +# write.table(promedio4,"C:/nicola/pressure/promedio.txt",quote=FALSE,row.names=FALSE) +WorldPoly<- readShapePoly('C:/Projecto_Master/Limites Mundo/TM_WORLD_BORDERS-0.2.shp') +proj4string(WorldPoly) <- CRS("+proj=longlat") +my.polygon=list('sp.polygons',WorldPoly,first=F);my.layout=list(my.polygon) + +gt = GridTopology(cellcentre.offset = angolo.basso.sinistra, cellsize = c(deg,deg), cells.dim = c(n.lon, n.lat)) +grd = SpatialGrid(gt) +proj4string(grd) <- CRS("+proj=longlat") +pressure.grid<-SpatialGridDataFrame(grd, as.data.frame(rep(NA,n.lon*n.lat)), proj4string = CRS("+proj=longlat +datum=WGS84")) + +# carica i dati di tutte le medie possibili in pressure.grid: +for(i in nomi.indici){for(k in c("Neg","Pos")){for(mese in 1:12)pressure.grid@data[[paste(i,k,nome.mese[mese],sep="_")]]=as.numeric(t(promedio4[(1:n.lat)+(mese-1)*n.lat*2+ifelse(k=="Neg",0,n.lat)+(which(nomi.indici==i)-1)*n.lat*24,4:(3+n.lon)]))}} + +# visualizza una mappa a scelta: +my.indice="NAO";my.fase="Pos" +spplot(pressure.grid,paste(my.indice,my.fase,nome.mese,sep="_"),as.table=T,col.regions=rev(jet.colors(1000)),sp.layout=my.layout,contour=TRUE,col='white',xlim=c(-70,50),ylim=c(25,70)) + +#salva tutte le mappe per ogni teleconnessione: +for(my.indice in nomi.indici){ + for(my.fase in c("Neg","Pos")){ + png(paste("C:/nicola/pressure/teleconnessiones/NAO_MOI_WEMOI/indice_",my.indice,"_",my.fase,".png",sep=""),width=1440,height=1200) # altri formati possibili per salvare il grafico sono: pdf, bmp,jpeg, tiff. + print(spplot(pressure.grid,paste(my.indice,my.fase,nome.mese,sep="_"),as.table=T,col.regions=rev(jet.colors(1000)),sp.layout=my.layout,contour=TRUE,col='white',xlim=c(-70,50),ylim=c(25,70))) + dev.off() + } +} + +# calcola le mappe promedie associate a tutti i mesi senza nessun umbral: (= a climatologia su tutto il periodo) +promedio.all4<-c() +for(i in nomi.indici){ + promedio.all3<-c() + for(m in 1:12){ # mese + tele<-subset(datos.tel,datos.tel[[1]]==i & datos.tel[[2]]>=year.start & datos.tel[[2]]<=year.end,select=c(1,2,2+m)) + promedio.all2<-c() + for(x in 1:2){ # in questo caso il promedio per suffix="Neg" e' lo stesso per suffix="Pos" perche' si considerano tutti i mesi + mod<-ifelse(x==1,FALSE,TRUE) + suffix<-ifelse(x==1,"Neg","Pos") + tele2<-tele[order(tele[[3]],decreasing=mod),] + daily.pression<-c() + + # cicla su tutti i mesi: + for(y in 1:n.years){ + temp.pression<-subset(datos.SLP,datos.SLP[[1]]==tele2[[2]][y] & datos.SLP[[2]]==m) + daily.pression<-rbind(daily.pression,temp.pression) + } + n.fila<-dim(daily.pression)[1] + n.days<-n.fila/n.lat # ci sono 10 file per ogni raster, percio' se dividiamo per 10 il numero di file totali troviamo il numero di mappe raster ovvero il numero di giorni diversi su cui calcolare il promedio + row.index<-seq(0,n.fila-n.lat,n.lat) + + promedio.all<-c() + for(k in 1:n.lat){ + temp<-round(colMeans(daily.pression[row.index+k,]),1) + promedio.all<-rbind(promedio.all,temp) + } + promedio.all<-cbind(i,m,suffix,promedio.all[,c(4:(n.lon+3))]) + promedio.all2<-rbind(promedio.all2,promedio.all) + } # chiude il for su x + promedio.all3<-rbind(promedio.all3,promedio.all2) + } + promedio.all4<-rbind(promedio.all4,promedio.all3) +} + +# carica i dati di tutte le medie possibili in pressure.all.grid: +pressure.all.grid<-SpatialGridDataFrame(grd, as.data.frame(rep(NA,n.lon*n.lat)), proj4string = CRS("+proj=longlat +datum=WGS84")) +for(i in nomi.indici){for(k in c("Neg","Pos")){for(mese in 1:12)pressure.all.grid@data[[paste(i,k,nome.mese[mese],sep="_")]]=as.numeric(t(promedio.all4[(1:n.lat)+(mese-1)*n.lat*2+ifelse(k=="Neg",0,n.lat)+(which(nomi.indici==i)-1)*n.lat*24,4:(3+n.lon)]))}} + +# visualizza una mappa della pressione "climatologica" a scelta: (non dipende ne dall'indice ne dalla fase ma solo dal mese) +my.indice="NAO";my.fase="Pos" +spplot(pressure.all.grid,paste(my.indice,my.fase,nome.mese,sep="_"),as.table=T,names.attr=nome.mese,col.regions=rev(jet.colors(1000)),sp.layout=my.layout,contour=TRUE,col='white',xlim=c(-70,50),ylim=c(25,70)) + +# salva eventualmente le 12 mappe delle "climatologie" di pressione per ogni teleconnessione: +png(paste("C:/nicola/promedio_mensual_Emulate.png",sep=""),width=1440,height=1200) # altri formati possibili per salvare il grafico sono: pdf, bmp,jpeg, tiff. +print(spplot(pressure.all.grid,paste(my.indice,my.fase,nome.mese,sep="_"),as.table=T,names.attr=nome.mese,col.regions=rev(jet.colors(1000)),sp.layout=my.layout,contour=TRUE,col='white',xlim=c(-70,50),ylim=c(25,70))) +dev.off() + +# salva pure i 12 grid mensili della climatologia mensuale dei campi di pressione: +for(mese in 1:12)writeAsciiGrid(pressure.all.grid,paste("C:/nicola/promedio_EMULATE_",nome.mese[mese],".txt",sep=""),attr=paste("NAO_Neg_",nome.mese[mese],sep=""),na.value=-999) + +# calcola la differenza tra le mappe con umbral e le mappe senza umbral (anomalia): +pressure.anom.grid<-SpatialGridDataFrame(grd, as.data.frame(rep(NA,n.lon*n.lat)), proj4string = CRS("+proj=longlat +datum=WGS84")) +for(i in nomi.indici){for(k in c("Neg","Pos")){for(mese in 1:12)pressure.anom.grid@data[[paste(i,k,nome.mese[mese],sep="_")]]=pressure.grid@data[[paste(i,k,nome.mese[mese],sep="_")]]-pressure.all.grid@data[[paste(i,k,nome.mese[mese],sep="_")]]}} + +# visualizza una mappa delle ANOMALIE a scelta: +my.indice="WEMOI";my.fase="Pos" +spplot(pressure.anom.grid,paste(my.indice,my.fase,nome.mese,sep="_"),as.table=T,col.regions=rev(jet.colors(1000)),sp.layout=my.layout,contour=TRUE,col='white',xlim=c(-70,50),ylim=c(25,70)) + +# salva tutte le mappe delle ANOMALIE per ogni teleconnessione: +for(my.indice in nomi.indici){ + for(my.fase in c("Neg","Pos")){ + png(paste("C:/nicola/pressure/teleconnessiones/NAO_MOI_WEMOI/mapas anomalias/indice_",my.indice,"_",my.fase,".png",sep=""),width=1440,height=1200) # altri formati possibili per salvare il grafico sono: pdf, bmp,jpeg, tiff. + print(spplot(pressure.anom.grid,paste(my.indice,my.fase,nome.mese,sep="_"),as.table=T,col.regions=rev(jet.colors(1000)),sp.layout=my.layout,contour=TRUE,col='white',xlim=c(-70,50),ylim=c(25,70))) + dev.off() + } +} + +#save.image("C:/nicola/pressure/teleconnessiones/NAO_MOI_WEMOI/R_metodo_NO_Michele.RData") +save.image("C:/nicola/pressure/teleconnessiones/promedio_mensuale_ENSEMBLE_1850_2003.RData") + +######################################################################################################################################### +# calcola le stesse mappe ma con il metodo alternativo di Michele: prima calcola le anomalie di pressione e poi la selezione dei 10 mesi: +year.start=1946 # deve cominciare assieme o dopo il primo anno di pressioni giornaliere disponibili +year.end=2003 # deve finire assieme o prima dell'ultimo anno di press.giorn.disponibili (nel caso dell'EMULATE il 2003!!!) +n.lat=10 # numero di file per ogni mappa raster (valori differenti di latitudine) +n.lon=25 # numero di colonne per ogni mappa raster (valori differenti di longitudine) +deg=5 # risoluzione del grid in gradi +angolo.basso.sinistra=c(-70, 25) # coordinate angolo in basso a sinistra di ogni raster di pressione input + +n.max.min= 10 # numero di mesi con indice massimo/minimo sui cui calcolare le medie giorno per giorno dei campi di pressione (indice alternativo a st.dev.pos e neg) + +#datos.tel<-read.table("C:/nicola/pressure/teleconnessiones/2) INDICES_TELECO_2009_correlaciones.txt",header=TRUE,stringsAsFactors=FALSE,na.strings="-999") +datos.tel<-read.table("C:/nicola/pressure/teleconnessiones/NAO_MOI_WEMOI/12) NAO_MOI_WeMOI_para_R_metodo_Michele.txt",header=TRUE,stringsAsFactors=FALSE,na.strings="-999") +datos.SLP<-read.table("C:/nicola/pressure/database_emulate/2) emulate_normalizado_1850-2003.txt",header=FALSE,stringsAsFactors=FALSE,na.strings="-999") # database pressione giornaliera nel formato anno/mese/giorno/valori di pressione dei pixel in colonne + +n.years<-year.end-year.start+1 +nomi.indici<-unique(datos.tel[[1]]) # elenco tutti indici teleconnessione di input (AO, NAO, ecc) +n.indici<-length(nomi.indici) # numero di indici differenti (AO, NAO, ...) + +##### + +nome.mese<-c("January","February","March","April","May","June","July","August","September","October","November","December") +nome.mese.short<-c("Jan","Feb","Mar","Apr","May","Jun","Jul","Aug","Sep","Oct","Nov","Dec") +jet.colors <-colorRampPalette(c("violet","blue", "#007FFF", "cyan","#7FFF7F", "yellow", "#FF7F00", "red", "#7F0000")) +jet.colors3 <-colorRampPalette(c("blue","cyan","yellow", "#FF7F00", "red")) +jet.colors12<-colorRampPalette(c("mediumorchid4","purple","blue", "#007FFF", "cyan","#7FFF7F", "yellow", "white","#FF7F00", "red", "#7F0000","sienna","grey66","grey30","grey10")) +jet.colors13<-colorRampPalette(c("mediumorchid4","purple","blue", "#007FFF", "cyan", "white")) +jet.colors14<-colorRampPalette(c("mediumorchid4","purple","blue", "#007FFF", "cyan","#7FFF7F", "white","yellow","#FF7F00", "red", "#7F0000","sienna","grey66")) +jet.colors15<-colorRampPalette(c("mediumorchid4","purple","blue", "#007FFF", "cyan", "gold","white")) +jet.colors16<-colorRampPalette(c("mediumorchid4","purple","blue", "#007FFF", "cyan", "#7FFF7F","white")) +jet.colors17<-colorRampPalette(c("lightpink","mediumorchid4","purple","blue", "#007FFF", "cyan", "#7FFF7F","white")) +jet.colors18<-colorRampPalette(c("blue","cyan","#7FFF7F","pink","white","yellow","orange","red","darkred")) +jet.colors19<-colorRampPalette(c("gray6","saddlebrown","salmon","pink","white","yellow","orange","red","darkred")) +jet.colors20<-colorRampPalette(c("violetred","blue", "#007FFF", "cyan","#7FFF7F","white","yellow","orange","salmon","red","gray5")) +jet.colors21<-colorRampPalette(c("white","yellow","orange","salmon","red","violetred","blue","#007FFF","cyan","#7FFF7F")) + + +# toglie il 29 febbraio dai dati se no non riesco a calcolare le anomalie giornaliere: +datos.SLP.sin29<-subset(datos.SLP,datos.SLP[,2]!=2 | datos.SLP[,3]!=29) + +first.year.SLP<-datos.SLP.sin29[1,1] +diff.years<-year.start-first.year.SLP # calcola dopo quanto anni iniziano i calcoli rispetto al primo anno di dati di pressione + +# calcolo per ogni giorno (tranne il 29 febb) e pixel il suo promedio su tutto il periodo, per poter calcolare poi l'anomalia: +datos.SLP.climatol<-array(NA,c(365,n.lat,n.lon),dimnames=c("day","lat","lon")) +for(day in 1:365){ + for(i in 1:n.lat){ + for(j in 1:n.lon){ + datos.SLP.climatol[day,i,j]<-mean(datos.SLP.sin29[diff.years*365*n.lat+seq(0,(n.years-1)*365*n.lat,365*n.lat)+(day-1)*10+i,3+j],na.rm=T) + } + } +} + +datos.SLP.climatol2<-c() # ripete 365 volte la climatologia diaria per cada pixel per adattarlo al formato di datos.SLP.sin29: +for(d in 1:365)datos.SLP.climatol2<-rbind(datos.SLP.climatol2,datos.SLP.climatol[d,,]) + +last.year.SLP<-datos.SLP.sin29[dim(datos.SLP.sin29)[1],1] +tot.years<-last.year.SLP-first.year.SLP+1 +datos.SLP.climatol3<-matrix(datos.SLP.climatol2,365*n.lat*tot.years,n.lon,byrow=T) + +# sottrae direttamente le due matrici tra di loro per calcolare l'anomalia: +datos.SLP.anom<-datos.SLP.sin29[,4:(3+n.lon)]-datos.SLP.climatol3 + +# aggiunge le prime tre colonne con anno mese giorno a sinistra: +datos.SLP.anom<-cbind(datos.SLP.sin29[,1:3],datos.SLP.anom) + +promedio4.ano<-c() +for(i in nomi.indici){ + promedio3.ano<-c() + for(m in 1:12){ # mese + # estrae la lista dei valori mensili dell'indice considerato : + tele<-subset(datos.tel,datos.tel[[1]]==i & datos.tel[[2]]>=year.start & datos.tel[[2]]<=year.end,select=c(1,2,2+m)) + promedio2.ano<-c() + for(x in 1:2){ # fai prima il promedio dei valori minimi dell'indice, poi dei valori max: + mod<-ifelse(x==1,FALSE,TRUE) + suffix<-ifelse(x==1,"min","max") + tele2<-tele[order(tele[[3]],decreasing=mod),] # come tele ma in ordine crescente o decrescente + + # cicla su ognuno degli n.max.min anni sui cui si calcola il promedio: + daily.pression<-c() + for(y in 1:n.max.min){ + temp.pression<-subset(datos.SLP.anom,datos.SLP.anom[[1]]==tele2[[2]][y] & datos.SLP.anom[[2]]==m) + daily.pression<-rbind(daily.pression,temp.pression) + } + n.fila<-dim(daily.pression)[1] + n.days<-n.fila/n.lat # ci sono file per ogni raster, percio' se dividiamo per il numero di file totali troviamo il numero di mappe raster ovvero il numero di giorni diversi su cui calcolare il promedio + row.index<-seq(0,n.fila-n.lat,n.lat) + + promedio.ano<-c() + for(k in 1:n.lat){ + temp<-round(colMeans(daily.pression[row.index+k,]),1) + promedio.ano<-rbind(promedio.ano,temp) + } + promedio.ano<-cbind(i,m,suffix,promedio.ano[,c(4:(n.lon+3))]) + promedio2.ano<-rbind(promedio2.ano,promedio.ano) + } # chiude il for su x + promedio3.ano<-rbind(promedio3.ano,promedio2.ano) + } + promedio4.ano<-rbind(promedio4.ano,promedio3.ano) +} + +WorldPoly<- readShapePoly('C:/Projecto_Master/Limites Mundo/TM_WORLD_BORDERS-0.2.shp') +proj4string(WorldPoly) <- CRS("+proj=longlat") +my.polygon=list('sp.polygons',WorldPoly,first=F);my.layout=list(my.polygon) +my.layout=list(my.polygon) + +gt = GridTopology(cellcentre.offset = angolo.basso.sinistra, cellsize = c(deg,deg), cells.dim = c(n.lon, n.lat)) +grd = SpatialGrid(gt) +proj4string(grd) <- CRS("+proj=longlat") +pressure.grid<-SpatialGridDataFrame(grd, as.data.frame(rep(NA,n.lon*n.lat)), proj4string = CRS("+proj=longlat +datum=WGS84")) + +# carica i dati di tutte le medie possibili in pressure.mic.grid: +pressure.mic.grid<-SpatialGridDataFrame(grd, as.data.frame(rep(NA,n.lon*n.lat)), proj4string = CRS("+proj=longlat +datum=WGS84")) +for(i in nomi.indici){for(k in c("Neg","Pos")){for(mese in 1:12)pressure.mic.grid@data[[paste(i,k,nome.mese[mese],sep="_")]]=as.numeric(t(promedio4.ano[(1:n.lat)+(mese-1)*n.lat*2+ifelse(k=="Neg",0,n.lat)+(which(nomi.indici==i)-1)*n.lat*24,4:(3+n.lon)]))}} + +# visualizza una mappa delle ANOMALIE di Michele a scelta (per verificare che fa tutto giusto): +my.indice="NAO";my.fase="Pos" +spplot(pressure.mic.grid,paste(my.indice,my.fase,nome.mese,sep="_"),as.table=T,col.regions=rev(jet.colors(1000)),sp.layout=my.layout,contour=TRUE,col='white',xlim=c(-70,50),ylim=c(25,70)) + +# salva tutte le mappe delle ANOMALIE di Michele per ogni teleconnessione: +for(my.indice in nomi.indici){ + for(my.fase in c("Neg","Pos")){ + png(paste("C:/nicola/pressure/teleconnessiones/NAO_MOI_WEMOI/mapas anomalias Michele/indice_",my.indice,"_",my.fase,".png",sep=""),width=1200,height=1000) # altri formati possibili per salvare il grafico sono: pdf, bmp,jpeg, tiff. + print(spplot(pressure.mic.grid,paste(my.indice,my.fase,nome.mese,sep="_"),as.table=T,col.regions=rev(jet.colors(1000)),sp.layout=my.layout,contour=TRUE,col='white',xlim=c(-70,50),ylim=c(25,70))) + dev.off() + } +} + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +################################################################################################################################################# +# vecchio studio relazione tra WTs e teleconnessioni (solo per Iberian Peninsula): +nomi.WTs<-c("NE","E","SE","S","SW","W","NW","N","C","C.NE","C.E","C.SE","C.S","C.SW","C.W","C.NW","C.N","A","A.NE","A.E","A.SE","A.S","A.SW","A.W","A.NW","A.N") +WTs.conversion<-c(1,2,3,4,5,6,7,8,30,31.5,32.5,33.5,34.5,35.5,36.5,37.5,38.5,40,41.5,42.5,43.5,44.5,45.5,46.5,47.5,48.5) +segni<-c("Neg","Pos") + +idWTs<-sort(unique(datos.WTs[[4]])) # elenco numeri corrispondenti ai WTs +nWTs<-length(idWTs) + +linked.WTs<-array(NA,c(n.indici,2,12,nWTs),dimnames=list(nomi.indici,segni,nome.mese,nomi.WTs)) +linked.years<-array(NA,c(n.indici,2,12,n.max.min),dimnames=list(nomi.indici,segni,nome.mese,c(1:n.max.min))) + +for(i in nomi.indici){ + for(k in segni){ + for(mese in 1:12){ + tele<-subset(datos.tel,datos.tel[[1]]==i & datos.tel[[2]]>=year.start & datos.tel[[2]]<=year.end,select=c(1,2,2+mese)) + mod<-ifelse(k=="Neg",FALSE,TRUE) + tele2<-tele[order(tele[[3]],decreasing=mod),] + + daily.WTs<-c() + for(y in 1:n.max.min){ + temp.WTs<-subset(datos.WTs,datos.WTs[[1]]==tele2[[2]][y] & datos.WTs[[2]]==mese) + daily.WTs<-rbind(daily.WTs,temp.WTs) + linked.years[which(i==nomi.indici),which(k==segni),mese,y]<-tele2[[2]][y] + } + #n.fila<-dim(daily.WTs)[1] + #n.days<-n.fila/n.lat + + fr.WTs<-table(daily.WTs[[4]]) # distr.di frequenza di ogni WTs (numero di giorni per tutto il periodo) + #n.fr.WTs<-length(fr.WTs) + #second.max<-sort(fr.WTs)[n.fr.WTs-1] + #temp<-names(fr.WTs)[min(which(fr.WTs==second.max))] # seleziona il WT con la frequenza maggiore + #linked.WTs[which(i==nomi.indici),which(k==segni),mese,]<-WTs.names[which(temp==WTs.conversion)] + for(wt in WTs.conversion){ + if(length(which(names(fr.WTs)==wt))==0){fr.WTs<-c(fr.WTs,0);names(fr.WTs)[length(fr.WTs)]=wt} + } + x<-sort(as.numeric(names(fr.WTs))) + y<-match(names(fr.WTs),x) + z<-c() + z[y]<-fr.WTs + names(z)<-x + linked.WTs[which(i==nomi.indici),which(k==segni),mese,]<-z + } + } +} + +ftable(linked.WTs) + +# cerca se ci sono 2 o + serie che hanno le stesse coordinate; in questi casi, le sposta di un metro piu' a nord in modo che si possa fare il kriging: + lonlat<-paste(coordinates(infofile)[,1],coordinates(infofile)[,2]) # stringa con la long+lat di ogni stazione + nlonlat<-length(lonlat) + for(st in 1:nlonlat){ + ss<-which(lonlat==lonlat[st]) # vettore con tutte le righe della tabla che hanno la stessa lon e lat della riga staz (dovrebbe esserci 1 sola fila) + nss<-length(ss) + if(nss>1){ + #print(paste(st,listaz[st],ss[1],ss[2])) # riga per il debug; normalmente ci sono al massimo 2 stazioni nella stessa posizione + for(k in 2:nss)infofile@coords[ss[k],2]=infofile@coords[ss[k],2]+k-1 # aumenta di k metri la latitudine (esclusa la prima stazione del gruppo) + } + } + +# crea un generico grid a km in UTM30 che si sovrapponga a tutta la Penisola Iberica: +gt = GridTopology(cellcentre.offset = c(-70000, 3980000), cellsize = c(1000*pixel.size,1000*pixel.size), cells.dim = c(ceiling(1110/pixel.size), ceiling(890/pixel.size))) +grd = SpatialGrid(gt) +n.pixels<-grd@grid@cells.dim[1]*grd@grid@cells.dim[2] + proj4string(grd) <- CRS("+proj=utm +zone=30 +datum=WGS84") # +ellps=clrk66") +#summary(grd);plot(IpPoly);plot(grd,add=T) # visualizza il grid sovrapposto alla IP + +#linked.years<-array(NA,c(n.indici,2,12,n.max.min),dimnames=list(nomi.indici,segni,nome.mese,c(1:n.max.min))) + +# promedio<-array(NA,c(n.indici,2,n.staz,12)) +# for(i in nomi.indici){for(k in segni){for(st in 1:n.staz){for(mese in 1:12){ + # promedio[which(i==nomi.indici),which(k==segni),st,mese]= (subset(dati,dati[[2]]==linked.years[which(i==nomi.indici),which(k==segni),mese,1])[st,2+mese]+subset(dati,dati[[2]]==linked.years[which(i==nomi.indici),which(k==segni),mese,2])[st,2+mese]+subset(dati,dati[[2]]==linked.years[which(i==nomi.indici),which(k==segni),mese,3])[st,2+mese]+subset(dati,dati[[2]]==linked.years[which(i==nomi.indici),which(k==segni),mese,4])[st,2+mese]+subset(dati,dati[[2]]==linked.years[which(i==nomi.indici),which(k==segni),mese,5])[st,2+mese])/5 +# }}}} + +promedio<-array(NA,c(n.indici,2,n.staz,12)) +for(i in nomi.indici){for(k in segni){for(mese in 1:12){ + promedio[which(i==nomi.indici),which(k==segni),,mese]= (subset(dati,dati[[2]]==linked.years[which(i==nomi.indici),which(k==segni),mese,1])[,2+mese]+subset(dati,dati[[2]]==linked.years[which(i==nomi.indici),which(k==segni),mese,2])[,2+mese]+subset(dati,dati[[2]]==linked.years[which(i==nomi.indici),which(k==segni),mese,3])[,2+mese]+subset(dati,dati[[2]]==linked.years[which(i==nomi.indici),which(k==segni),mese,4])[,2+mese]+subset(dati,dati[[2]]==linked.years[which(i==nomi.indici),which(k==segni),mese,5])[,2+mese])/5 +}}} + +# interpola per ogni anno (ovvero crea un raster per ogni anno), a meno che non sia impostato interpolation="CLI", nel qual caso interpola il valore medio su tutto il periodo +int.dati<-c() + +telecon<-infofile +# carica i dati dei valori mensili della freq. dei WTs in infofile in base all'anno considerato (o direttamente tutta la climatologia se interpol="CLI"): + for(i in nomi.indici){for(k in segni){for(mese in 1:12){ + telecon[[4+(which(i==nomi.indici)-1)*24+(which(k==segni)-1)*12+mese]]<-promedio[which(i==nomi.indici),which(k==segni),,mese] + names(telecon)[4+(which(i==nomi.indici)-1)*24+(which(k==segni)-1)*12+mese]=paste(i,k,nome.mese.short[mese],sep=".") + }}} + +# crea un grid vuoto (prec.grid) dove mettere i risultati dell'interpolazione e un'altro (void.grid) che rimanga sempre vuoto da usare come plantilla invece di infofile: +Prec.grid<-void.grid<-SpatialGridDataFrame(grd, as.data.frame(rep(NA,n.pixels)), proj4string = CRS("+proj=utm +zone=30 +datum=WGS84")) + +# interpola ogni mese dell'anno (anche se la regressione non si fa su tutti i mesi), +# mette il risultato in Prec@grid ed arrotonda la precipitazione prevista alla prima cifra decimale per non occupare troppo spazio se poi la si salva su un .txt (tanto l'errore introdotto arrotondando e' trascurabile):: +#formul<-c(Jan~1,Feb~1,Mar~1,Apr~1,May~1,Jun~1,Jul~1,Aug~1,Sep~1,Oct~1,Nov~1,Dec~1) +for(i in nomi.indici){for(k in segni){for(mese in 1:12){ + v <- variogram(as.formula(paste(paste(i,k,nome.mese.short[mese],sep=".")," ~ 1",sep="")), telecon) + #plot(v,type='b') # per vedere i punti del semivariogramma, type=b traccia anche una linea spezzata tra i punti, ma non e' il fit + # calcola automaticamente tutti i parametri della curva di fit, basta dargli dei valori iniziali approssimativi!!!!! + v.fit <- fit.variogram(v, model=vgm(psill=8000, model='Gau', range=200000, nugget=500)) + #v.fit #per vedere i parametri risultato del fit + #plot(v,v.fit) # per vedere il semivariogramma + la curva di fit + #b <- mean(DistData[[i]][[j+1]]) # solo per il simple kriging + Prec.grid@data[[(which(i==nomi.indici)-1)*24+(which(k==segni)-1)*12+mese]]<-round(krige(formula=as.formula(paste(paste(i,k,nome.mese.short[mese],sep=".")," ~ 1",sep="")), locations=telecon, newdata=grd, model=v.fit,nmax=int.nmax)@data$var1.pred,1) + # se desse problemi a fare il kriging prova a vedere se dipende da una cattiva curva del fit, prova magari a impostare un range del fit piu' corto. + names(Prec.grid@data)[(which(i==nomi.indici)-1)*24+(which(k==segni)-1)*12+mese]=paste(i,k,nome.mese.short[mese],sep=".") +}}} + +# seleziona i pixel di mare con overlay e li mette a NA in modo che non li disegni nella mappa finale: +sea.pixels<-which(is.na(overlay(grd,IpPoly))==T) +land.pixels<-(1:n.pixels)[-sea.pixels] +for(i in nomi.indici){for(k in segni){for(mese in 1:12)Prec.grid@data[[(which(i==nomi.indici)-1)*24+(which(k==segni)-1)*12+mese]][sea.pixels]=NA}} + +my.points.name="Monthly Rainfall Distribution" +my.prec.levels<- c(seq(0,50,1),seq(52.5,150,2.5),seq(155,200,5),seq(210,290,20),1000) +my.prec.levels2<- c(0,1,5,10,20,30,50,70,100,150,200,300,500) # leyenda uguale a quella di Ricardo +my.prec.levels3<- c(-100,0,1000) # per vedere se ci sono pixel interpolati minori di 0 +my.prec.levels4<-my.prec.levels*10 + +i="SCAN" +k="Neg" + +#pdf(paste("C:/nicola/pressure/teleconnessiones","/",i,"_",k,".pdf",sep="")) +spplot(Prec.grid,zcol=paste(i,k,nome.mese.short,sep="."),at=my.prec.levels4,col.regions=rev(jet.colors(10000)),sp.layout=my.layout,as.table=T) +#dev.off() + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +######################################################################################################################################## +# Studio correlazioni tra prec.mensile e teleconnessioni: [codice 100% indipendente dal codice principale] +# (nota che gli anni su cui si interpola la prec e su cui si calcolano le correlazione e l'r2 sono quelli di input del database di prec, +# percio' il database di teleconnessioni deve avere come minimo lo stesso periodo del database di prec) + +dir_infofile="C:/nicola/precipitaciones/prec_mensuales_Iberia/infofile_Iberia_sin_-999_sin_Ceuta.xls" +dir_datos="C:/nicola/precipitaciones/prec_mensuales_Iberia/datos_Iberia_sin_-999_sin_Ceuta.txt" +#dir_telec="C:/nicola/pressure/teleconnessiones/NAO_MOI_WEMOI/9) NAO_MOI_WeMOI_final.txt" # deve avere una cabecera, una prima colonna con gli anni e 12 colonne per ogni indice mensile nell'ordine: NAO MOI WEMOI +#dir_telec="C:/nicola/pressure/teleconnessiones/NAO_MOI_WEMOI/PCA/3) PAC_NAO_MOI_WEMOI_para_R.txt" # deve avere una cabecera, una prima colonna con gli anni e 12 colonne per ogni indice mensile nell'ordine: NAO MOI WEMOI (puo'anche iniziare e finire in anni diversi dai dati di prec) +dir_telec="C:/nicola/pressure/teleconnessiones/NAO_MOI_WEMOI/PCA/6) PAC_NAO_MOI_WEMOI_corregidas_para_R.txt" # deve avere una cabecera, una prima colonna con gli anni e 12 colonne per ogni indice mensile nell'ordine: NAO MOI WEMOI (puo'anche iniziare e finire in anni diversi dai dati di prec) + +header_datos=TRUE # specifica se il file con i dati di prec (dir_datos) ha o meno una prima fila con l'intestazione (vale TRUE per il database MOPREDASP) +piu_1=TRUE # vale true se nel file di dati di prec si e' aggiunto 1 a tutti i valori mensili di prec per evitare gli zeri +dec_mm=TRUE # vale true se la prec.e'espressa in decimi di mm, false altrimenti +remove_2006=TRUE # vale true per togliere l'anno 2006 dal database (solo per il database MOPREDASP) +pixel.size=10 # lunghezza del lato di un pixel in km +int.nmax=15 # numero massimo di vicini usati nel Ordinary Kriging +y.start.corr=1946 # anno di inizio per il calcolo delle correlazioni tra indici teleconn e prec (deve iniziare dopo o insieme ai dati di prec e di corr.) +y.end.corr=2005 # anno finale per il calcolo delle correlazioni tra indici teleconn e prec (deve finire prima o insieme ai dati di prec e di corr.) + +perlexe="C:/Toshiba/Strawberry/perl/bin/perl.exe" +pythonexe="C:/Python27/python.exe" +##### + +nome.mese<-c("January","February","March","April","May","June","July","August","September","October","November","December") +nome.mese.short<-c("Jan","Feb","Mar","Apr","May","Jun","Jul","Aug","Sep","Oct","Nov","Dec") +jet.colors3 <-colorRampPalette(c("blue","cyan","yellow", "#FF7F00", "red")) +jet.colors12<-colorRampPalette(c("mediumorchid4","purple","blue", "#007FFF", "cyan","#7FFF7F", "yellow", "white","#FF7F00", "red", "#7F0000","sienna","grey66","grey30","grey10")) +jet.colors13<-colorRampPalette(c("mediumorchid4","purple","blue", "#007FFF", "cyan", "white")) +jet.colors14<-colorRampPalette(c("mediumorchid4","purple","blue", "#007FFF", "cyan","#7FFF7F", "white","yellow","#FF7F00", "red", "#7F0000","sienna","grey66")) +jet.colors15<-colorRampPalette(c("mediumorchid4","purple","blue", "#007FFF", "cyan", "gold","white")) +jet.colors16<-colorRampPalette(c("mediumorchid4","purple","blue", "#007FFF", "cyan", "#7FFF7F","white")) +jet.colors17<-colorRampPalette(c("lightpink","mediumorchid4","purple","blue", "#007FFF", "cyan", "#7FFF7F","white")) +jet.colors18<-colorRampPalette(c("blue","cyan","#7FFF7F","pink","white","yellow","orange","red","darkred")) +jet.colors19<-colorRampPalette(c("gray6","saddlebrown","salmon","pink","white","yellow","orange","red","darkred")) +jet.colors20<-colorRampPalette(c("violetred","blue", "#007FFF", "cyan","#7FFF7F","white","yellow","orange","salmon","red","gray5")) +jet.colors21<-colorRampPalette(c("white","yellow","orange","salmon","red","violetred","blue","#007FFF","cyan","#7FFF7F")) + +infofile <- read.xls(dir_infofile,verbose=TRUE,fill=TRUE,header=T,skip=0,na.strings="-999",perl=perlexe,stringsAsFactors=FALSE) +names(infofile)<-c("CODIGO","NOMBRE","PROVINCIA","LATITUDE","LONGITUDE","ALTITUDE") + +#borra le eventuali filas vacias: +infofile<-subset(infofile,infofile[[1]]!="") + +#borra el eventuale primero espacio de los nombres de las estaciones que empiezan con un espacio " ": +ss<-which(substring(infofile[[2]],1,1)==" ") +for (i in ss)infofile[[2]][i]=substring(infofile[[2]][i],2,nchar(infofile[[2]][i])) + +#borra el eventual primer character en los nombres de las estaciones que empiezan con un punto ".": +tt<-which(substring(infofile[[2]],1,1)==".") +for (i in tt)infofile[[2]][i]=substring(infofile[[2]][i],2,nchar(infofile[[2]][i])) + +#ordina per nome stazione, provincia e anno: +#infofile2<-infofile[order(infofile[[2]],infofile[[3]],infofile[[4]]),] + +# importa i dati di prec di tutte le stazioni elencate e converti i manca-dato -999 in NA: +# (stavolta non c'e' bisogno di fare un for per ogni stazione della lista perche' si suppone che tutti e solo i dati delle stazioni nella lista stiano nello stesso file di dati .txt): +dati<-read.table(dir_datos,header=header_datos,stringsAsFactors=FALSE,na.strings="-999") +names(dati)<-c("Codigo","Year","Jen","Feb","Mar","Apr","May","Jun","Jul","Ago","Sep","Oct","Nov","Dec") + +# RITAGLI PREC # + + # toglie uno alla prec.di ogni mese perche' nel database si era aggiunto 1 a tutti i valori per evitare mesi con zero: (funziona anche se ci sono NA) + if(piu_1==TRUE){for(m in 1:12)dati[[2+m]]<-dati[[2+m]]-1 } + +# se ci sono valori uguali a -1 (erano dei vecchi zero che dovevano essere messi ad 1) li mette uguali a zero: +for(m in 1:12){ss<-which(dati[[2+m]]==-1);dati[[2+m]][ss]=0} + +# toglie l'anno 2006 alle prec.di ogni serie perche' nel database MOPREDAS compare sempre senza dati: +if(remove_2006==TRUE)dati<-subset(dati,dati[[2]]!=2006) + +# se i dati sono espressi in decimi di mm, divide la prec. per 10: +if(dec_mm==T)dati[,3:14]=dati[,3:14]/10 + +listaz<-unique(dati[[1]]);n.staz<-length(listaz) + +# trasforma la colonna della prec.di dicembre nel dicembre climatologico perche' i dati di teleconnessione dal 1951 al 2000 sono disponibili +# solo con il dicembre climatologico e aggiunge un NA al primo anno: +#tt<-c(NA,dati[,14]) +#dati[,14]<-tt[-length(tt)] + +### trasforma le precipitazioni in anomalie mensili (su tutti gli anni disponibili) prima di interpolarle: +anom.start<-dati[1,2] +anom.end<-dati[dim(dati)[1],2] +n.years.anom<-anom.end-anom.start+1 + +# calcola il promedio di ogni mese su tutto il periodo: +#dati.prom<-array(NA,c(n.staz,12),dimnames=c("n.serie","month")) +#for(s in 1:n.staz)dati.prom[s,]<-colMeans(dati[(1+(s-1)*n.years.anom):(s*n.years.anom),3:14],na.rm=T) + +# adatta la forma della matrice dei promedi alla matrice dei dati: +#dati.prom.full<-array(NA,c(n.staz*n.years.anom,12),dimnames=c("n.serie","month")) +#for(s in 1:n.staz)dati.prom.full[(1+(s-1)*n.years.anom):(s*n.years.anom),]<-matrix(dati.prom[s,],n.years.anom,12,byrow=T) + +# divide direttamente le due matrici tra di loro per calcolare l'anomalia: +#dati.anom<-dati[,3:14]/dati.prom.full + +# aggiunge le prime due colonne con anno mese a sinistra: +#dati.anom<-cbind(dati[,1:2],dati.anom) + +### interpola le anomalie mensili: + +# parametri grafici: +my.points.name="Monthly Rainfall Distribution" +my.prec.levels<- c(seq(0,50,1),seq(52.5,150,2.5),seq(155,200,5),seq(210,290,20),500) +my.prec.levels2<- c(0,1,5,10,20,30,50,70,100,150,200,300,500) # leyenda uguale a quella di Ricardo +my.prec.levels3<- c(-100,0,1000) # per vedere se ci sono pixel interpolati minori di 0 + +IpPoly<- readShapePoly('C:/Projecto_Master/Limites Espana/borde_peniberica_sin_islas_sin_proyeccion.shp') +# plot(IpPoly); summary(IpPoly); IpPoly$bbox + +coordinates(infofile) <- c('LONGITUDE','LATITUDE') +#bbox(infofile) #leggere i valori min e max delle coordinate +#plot(infofile,cex=0.3,col="light blue");plot(IpPoly,add=T) # solo per vedere dove sono le stazioni + + # proietta le stazioni in UTM30: + proj4string(infofile) <- CRS("+proj=longlat") # +ellps=clrk66") + infofile<-spTransform(infofile, CRS("+proj=utm +zone=30 +datum=WGS84")) +#summary(my.grid.values) #plot(infofile) # per vedere la forma della griglia proiettata! + +# proietta il perimetro della penisola iberica a UTM30: +proj4string(IpPoly) <- CRS("+proj=longlat") +IpPoly<-spTransform(IpPoly, CRS("+proj=utm +zone=30 +datum=WGS84")) + +my.points<-list('sp.points',infofile,pch=1,cex=0.2,col="black") +my.polygon<-list('sp.polygons',IpPoly,fill='transparent',first=F) # first=FALSE gli dice di NON disegnare il poligono per primo # altre opzioni di list: fill=0, lwd=3, lty=2 +my.layout<-list(my.polygon) #,my.points) +my.polygon<-list('sp.polygons',IpPoly,fill='transparent',first=F) # first=FALSE gli dice di NON disegnare il poligono per primo # altre opzioni di list: fill=0, lwd=3, lty=2 +my.layout<-list(my.polygon) +my.layout2<-list(my.polygon,my.points) + +# cerca se ci sono 2 o + serie che hanno le stesse coordinate; in questi casi, le sposta di un metro piu' a nord in modo che si possa fare il kriging: +# temo che bisognerebbe attualizzarlo perche' basta che due stazioni siano nello stesso pixel per generare un warning: singular model quando si interpola + lonlat<-paste(coordinates(infofile)[,1],coordinates(infofile)[,2]) # stringa con la long+lat di ogni stazione + nlonlat<-length(lonlat) + for(st in 1:nlonlat){ + ss<-which(lonlat==lonlat[st]) # vettore con tutte le righe della tabla che hanno la stessa lon e lat della riga staz (dovrebbe esserci 1 sola fila) + nss<-length(ss) # se vale 1 vuol dire che va bene + if(nss>1){ + #print(paste(st,listaz[st],ss[1],ss[2])) # riga per il debug; normalmente ci sono al massimo 2 stazioni nella stessa posizione + for(k in 2:nss)infofile@coords[ss[k],2]=infofile@coords[ss[k],2]+k-1 # aumenta di k metri la latitudine (esclusa la prima stazione del gruppo) + } + } + +# crea un generico grid a km in UTM30 che si sovrapponga a tutta la Penisola Iberica: +gt = GridTopology(cellcentre.offset = c(-70000, 3980000), cellsize = c(1000*pixel.size,1000*pixel.size), cells.dim = c(ceiling(1110/pixel.size), ceiling(890/pixel.size))) +grd = SpatialGrid(gt) +n.pixels<-grd@grid@cells.dim[1]*grd@grid@cells.dim[2] + proj4string(grd) <- CRS("+proj=utm +zone=30 +datum=WGS84") # +ellps=clrk66") +#summary(grd);plot(IpPoly);plot(grd,add=T) # visualizza il grid sovrapposto alla IP + +# vecchie variabili grid bypassate dalle impostazioni generali: (ATTENZIONE: non ci devono essere mesi con -999 in nessun anno dentro il periodo impostato!!!) +yearB<-anom.start # anno iniziale interpolazione +yearE<-anom.end # anno finale interpolazione +all.rows=TRUE # vale true se i dati di prec importati hanno una fila per ogni anno di dati tra l'anno di inizio e l'anno di fine, false altrimenti + +n.years<-yearE-yearB+1 +period<-yearB:yearE + +# toglie gli anni di dati delle stazioni mensili di prec.fuori dal periodo considerato: +#dati.anom2<-subset(dati.anom,dati.anom[[2]]>=yearB & dati.anom[[2]]<=yearE) +dati2<-subset(dati,dati[[2]]>=yearB & dati[[2]]<=yearE) + +# interpola per ogni anno (ovvero crea un raster per ogni anno) +#int.anom.dati<-c() +#infofile.anom<-infofile +int.dati<-c() +infofile2<-infofile + +# carica i dati dei valori mensili di prec.in infofile2 in base all'anno considerato: +for(y in yearB:yearE){ + for(mese in 1:12){ + st<-1:n.staz + infofile2[[4+mese]]<-dati2[((st-1)*n.years+1)+y-yearB,2+mese] + names(infofile2)[4+mese]=nome.mese.short[mese] + } + + # crea un grid vuoto (prec.grid) dove mettere i risultati dell'interpolazione e un'altro (void.grid) che rimanga sempre vuoto da usare come plantilla invece di infofile: + Prec.grid<-void.grid<-SpatialGridDataFrame(grd, as.data.frame(rep(NA,n.pixels)), proj4string = CRS("+proj=utm +zone=30 +datum=WGS84")) + + # interpola ogni mese dell'anno (anche se la regressione non si fa su tutti i mesi), + # mette il risultato in Prec@grid ed arrotonda la precipitazione prevista alla prima cifra decimale per non occupare troppo spazio se poi la si salva su un .txt (tanto l'errore introdotto arrotondando e' trascurabile):: + formul<-c(Jan~1,Feb~1,Mar~1,Apr~1,May~1,Jun~1,Jul~1,Aug~1,Sep~1,Oct~1,Nov~1,Dec~1) + + infofile2.sinNA<-infofile2 + for(mese in 1:12){ + # occhio che purtroppo le funzioni variogram e krige non accettano NA come dato, se ti dice "Error: dimensions do not match", c'e' qualche NA. + # percio'per fare il variogramma gli NA vengono sostituiti con il valore medio mensile per la serie con NA, anche se non e' corretto, ma tanto servono solo per calcolare il variogramma + ss<-which(is.na(infofile2[[4+mese]])) + if(length(ss)>0){ + for(s in ss)infofile2.sinNA[[4+mese]][s]<-mean(dati2[((s-1)*n.years+1):(s*n.years),2+mese],na.rm=T)[[1]] + } + v <- variogram(formul[[mese]], infofile2.sinNA) + #plot(v,type='b') # per vedere i punti del semivariogramma, type=b traccia anche una linea spezzata tra i punti, ma non e' il fit + # calcola automaticamente tutti i parametri della curva di fit, basta dargli dei valori iniziali approssimativi!!!!! + v.fit <- fit.variogram(v, model=vgm(psill=8000, model='Gau', range=200000, nugget=500)) + #v.fit #per vedere i parametri risultato del fit + #plot(v,v.fit) # per vedere il semivariogramma + la curva di fit + #b <- mean(DistData[[i]][[j+1]]) # solo per il simple kriging + #Prec.grid@data[[mese]]<-round(krige(formula=formul[[mese]], locations=infofile2.sinNA, newdata=grd, model=v.fit,nmax=int.nmax)@data$var1.pred,2) + Prec.grid@data[[mese]]<-round(krige(formula=formul[[mese]], locations=infofile2.sinNA, newdata=grd, model=v.fit,nmax=int.nmax)@data$var1.pred,2) + # se desse problemi a fare il kriging prova a vedere se dipende da una cattiva curva del fit, prova magari a impostare un range del fit piu' corto. + };names(Prec.grid@data)<-nome.mese.short + + # seleziona i pixel di mare con overlay e li mette a NA in modo che non li disegni nella mappa finale: + sea.pixels<-which(is.na(overlay(grd,IpPoly))==T) + land.pixels<-(1:n.pixels)[-sea.pixels] + for(mese in 1:12)Prec.grid@data[[mese]][sea.pixels]=NA + + # matrice della forma num.pixel/anno/gen/feb/.../dic che memorizza i dati di prec per tutti gli anni della interpolazione: + int.dati<-rbind(int.dati,cbind(1:n.pixels,y,Prec.grid@data)) + my.grid.name=paste("Monthly Precipitation Grid for year ",y,sep="") +} # chiude il for su y + +n.land.pixels<-length(land.pixels) + +# riordina la matrice int.dati per la prima e seconda colonna (pixel e anno) in modo che sia nello stesso ordine della matrice dati: +int.dati<-int.dati[order(int.dati[,1],int.dati[,2]),] + +# da qui in poi calcola le correlazioni con gli indici di teleconnessione: + +# importa i dati degli indici di teleconnessione di gennaio: (occhio che non devono esserci -999!!!) +#dati.tele<-read.table("C:/nicola/pressure/teleconnessiones/NAO_MOI_WEMOI/2) componentes_principales.txt",header=TRUE,stringsAsFactors=FALSE) +#dati.tele<-read.table("C:/nicola/pressure/teleconnessiones/NAO_MOI_WEMOI/7) TELECO_1951-2000_Brunetti para R anno climatologico.txt",header=TRUE,stringsAsFactors=FALSE) +dati.tele<-read.table(dir_telec,header=TRUE,stringsAsFactors=FALSE) + +# riduce gli anni in int.dati e in dati.tele affinche'comincino e terminino nel periodo impostato dall'utente per il calcolo delle correlazioni: +int.dati<-subset(int.dati,int.dati[[2]]>=y.start.corr & int.dati[[2]]<=y.end.corr) +dati.tele<-subset(dati.tele,dati.tele[[1]]>=y.start.corr & dati.tele[[1]]<=y.end.corr) + +n.anni<-y.end.corr-y.start.corr+1 + +# aggiunge a destra la serie temporale di teleconnessioni, ripetendola uguale per ogni pixel: +tel.NAO<-tel.WEMOI<-tel.MOI<-array(NA,c(n.anni,12)) +tel.NAO<-dati.tele[,2:13] # nel caso della PCA questo indice diventa la PC1 e gli altri due la PC2 e la PC3 +tel.MOI<-dati.tele[,14:25] +tel.WEMOI<-dati.tele[26:37] + +r.NAO<-r.WEMOI<-r.MOI<-p.NAO<-p.WEMOI<-p.MOI<-r2.NAO<-r2.WEMOI<-r2.MOI<-r2.NAO.WEMOI<-r2.NAO.MOI<-r2.MOI.WEMOI<-r2.NAO.MOI.WEMOI<-array(NA,c(12,n.pixels)) +r2p.NAO<-r2p.MOI<-r2p.WEMOI<-r2p.rel.NAO<-r2p.rel.MOI<-r2p.rel.WEMOI<-r2.solape<-array(NA,c(12,n.pixels)) + +for(mese in 1:12){for(p in 1:n.pixels){ + temp<-int.dati[((p-1)*n.anni+1):(n.anni*p),2+mese] # serie temporale prec per il mese e il pixel scelto + if(length(na.omit(temp))>0){ + r.NAO[mese,p]<-cor(temp,tel.NAO[,mese],method="pearson") + r.MOI[mese,p]<-cor(temp,tel.MOI[,mese],method="pearson") + r.WEMOI[mese,p]<-cor(temp,tel.WEMOI[,mese],method="pearson") + #p.NAO[mese,p]<-cor.test(temp,tel.NAO[,mese],method="pearson")$p.value + #p.MOI[mese,p]<-cor.test(temp,tel.MOI[,mese],method="pearson")$p.value + #p.WEMOI[mese,p]<-cor.test(temp,tel.WEMOI[,mese],method="pearson")$p.value + } else { + r.NAO[mese,p]<-r.MOI[mese,p]<-r.WEMOI[mese,p]<-p.NAO[mese,p]<-p.MOI[mese,p]<-p.WEMOI[mese,p]<-NA +}}} + +r2.NAO<-r.NAO^2;r2.WEMOI<-r.WEMOI^2;r2.MOI<-r.MOI^2 + + # R2 della regressione multipla con NAO e WEMOI vs la prec: +for(mese in 1:12){for(p in 1:n.pixels){ + prec.temp<-int.dati[((p-1)*n.anni+1):(n.anni*p),2+mese] + if(length(na.omit(prec.temp))==n.anni){ + temp<-lm(prec.temp~.,data=as.data.frame(cbind(tel.NAO[,mese],tel.WEMOI[,mese]))) + r2.NAO.WEMOI[mese,p]<-summary(temp)$r.squared +}}} + +for(mese in 1:12){for(p in 1:n.pixels){ + prec.temp<-int.dati[((p-1)*n.anni+1):(n.anni*p),2+mese] + if(length(na.omit(prec.temp))==n.anni){ + temp<-lm(prec.temp~.,data=as.data.frame(cbind(tel.NAO[,mese],tel.MOI[,mese]))) + r2.NAO.MOI[mese,p]<-summary(temp)$r.squared +}}} + +for(mese in 1:12){for(p in 1:n.pixels){ + prec.temp<-int.dati[((p-1)*n.anni+1):(n.anni*p),2+mese] + if(length(na.omit(prec.temp))==n.anni){ + temp<-lm(prec.temp~.,data=as.data.frame(cbind(tel.WEMOI[,mese],tel.MOI[,mese]))) + r2.MOI.WEMOI[mese,p]<-summary(temp)$r.squared +}}} + +for(mese in 1:12){for(p in 1:n.pixels){ + prec.temp<-int.dati[((p-1)*n.anni+1):(n.anni*p),2+mese] + if(length(na.omit(prec.temp))==n.anni){ + temp<-lm(prec.temp~.,data=as.data.frame(cbind(tel.NAO[,mese],tel.MOI[,mese],tel.WEMOI[,mese]))) + r2.NAO.MOI.WEMOI[mese,p]<-summary(temp)$r.squared +}}} + +# correlazioni tra NAO, MOI e WEMOI senza la precipitazione: +cor.NAO.MOI<-cor.NAO.WEMOI<-cor.MOI.WEMOI<-c() +for(mese in 1:12){ + cor.NAO.MOI[mese]<-cor(tel.NAO[,mese],tel.MOI[,mese]) + cor.MOI.WEMOI[mese]<-cor(tel.MOI[,mese],tel.WEMOI[,mese]) + cor.NAO.WEMOI[mese]<-cor(tel.NAO[,mese],tel.WEMOI[,mese]) +} + +# correlazioni parziali tra NAO, MOI e WEMOI: (uguali per tutta la penisola Iberica) +cor.p.NAO.MOI<-cor.p.NAO.WEMOI<-cor.p.MOI.WEMOI<-c() +cor.p.NAO.MOI<-(cor.NAO.MOI-(cor.NAO.WEMOI*cor.MOI.WEMOI))/sqrt((1-cor.NAO.WEMOI^2)*(1-cor.MOI.WEMOI^2)) +cor.p.MOI.WEMOI<-(cor.MOI.WEMOI-(cor.NAO.MOI*cor.NAO.WEMOI))/sqrt((1-cor.NAO.MOI^2)*(1-cor.NAO.WEMOI^2)) +cor.p.NAO.WEMOI<-(cor.NAO.WEMOI-(cor.NAO.MOI*cor.MOI.WEMOI))/sqrt((1-cor.NAO.MOI^2)*(1-cor.MOI.WEMOI^2)) + +cor.parcial.3.var<-function(x1,x2,x3){ # calcola la cor.parziale tra la var x1 e x2 al netto della x3 + cor.12<-cor(x1,x2) + cor.13<-cor(x1,x3) + cor.23<-cor(x2,x3) + cor.12.3<-(cor.12-cor.13*cor.23)/sqrt((1-cor.13^2)*(1-cor.23^2)) + return(cor.12.3) +} + +cor.parcial.4.var<-function(x1,x2,x3,x4){ # calcola la cor.parziale tra la var x1 e x2 al netto della x3 e x4 + cor.12.4<-cor.parcial.3.var(x1,x2,x4) + cor.13.4<-cor.parcial.3.var(x1,x3,x4) + cor.23.4<-cor.parcial.3.var(x2,x3,x4) + cor.12.34<-(cor.12.4-cor.13.4*cor.23.4)/sqrt((1-cor.13.4^2)*(1-cor.23.4^2)) + return(cor.12.34) +} + +# trasforma la prec in una variabile mensile come le altre ma che dipende anche dal pixel: +prec<-array(NA,c(n.anni,12,n.pixels)) +for(mese in 1:12){for(p in 1:n.pixels){ + prec[,mese,p]<-int.dati[((p-1)*n.anni+1):(n.anni*p),2+mese] # serie temporale prec per il mese e il pixel scelto +}} + +#prova<-cor.parcial.3.var(tel.NAO[,mese],tel.MOI[,mese],tel.WEMOI[,mese]) +#prova2<-cor.parcial.3.var(tel.NAO[,mese],prec[,mese,p],tel.WEMOI[,mese]) + +cor.NAO.prec..MOI.WEMOI<-cor.MOI.prec..NAO.WEMOI<-cor.WEMOI.prec..NAO.MOI<-array(NA,c(12,n.pixels)) + +barraP<-winProgressBar("Analyzing data","Please wait...",0,12,0) +for(mese in 1:12){ + setWinProgressBar(barraP,mese,label=paste(nome.mese[mese]) ) + for(p in 1:n.pixels){ + cor.NAO.prec..MOI.WEMOI[mese,p]<-cor.parcial.4.var(tel.NAO[,mese],prec[,mese,p],tel.MOI[,mese],tel.WEMOI[,mese]) + cor.MOI.prec..NAO.WEMOI[mese,p]<-cor.parcial.4.var(tel.MOI[,mese],prec[,mese,p],tel.NAO[,mese],tel.WEMOI[,mese]) + cor.WEMOI.prec..NAO.MOI[mese,p]<-cor.parcial.4.var(tel.WEMOI[,mese],prec[,mese,p],tel.NAO[,mese],tel.MOI[,mese]) + } +} +close(barraP) + +# calcola le 3 mappe di significativita' della correlazione di Pearson (in base al numero di anni di dati meno il num.di gradi di liberta', es: 61-2 nel nosto caso) +sig.10=0.2126 # correlazione minima in valore assoluto corrispondente al livello di significativita' dello 0.10 (two-tailed, 61 anni, due gradi di liberta) +sig.5=0.2521 +sig.1=0.3274 + +sig.p.10=0.2162 # correlazione minima in valore assoluto corrispondente al livello di significativita' dello 0.10 (two-tailed, 61 anni, 4 gradi di liberta) + +# colori di Jorge: +sig.NAO<-r.NAO;sig.MOI<-r.MOI;sig.WEMOI<-r.WEMOI +neg.1=-0.3 # rosso scuro corrisp. a corr.negativa e significativita' inferiore a 0.01 o 1% con il test a 2 code +neg.10=-0.2 # rosso chiaro corrisp. a corr.negativa e significativita' inferiore a 0.10 o 10% +pos.10=0.4 # azzurro corrisp. a corr.positiva e significativita'inferiore a 10% +pos.1=0.6 # blu corrispond. a corr.positiva s significativita'inferiore al 1% +no.sig=0.1 # giallo corrispon. a correl. non significativa al livello del 10% + +sig.NAO[r.NAO <= -sig.10]<- neg.10 +sig.NAO[r.NAO >= sig.10]<- pos.10 +sig.NAO[r.NAO > -sig.10 & r.NAO < sig.10]<- no.sig +sig.NAO[r.NAO <= -sig.1]<- neg.1 +sig.NAO[r.NAO >= sig.1]<- pos.1 + +sig.MOI[r.MOI <= -sig.10]<- neg.10 +sig.MOI[r.MOI >= sig.10]<- pos.10 +sig.MOI[r.MOI > -sig.10 & r.MOI < sig.10]<- no.sig +sig.MOI[r.MOI <= -sig.1]<- neg.1 +sig.MOI[r.MOI >= sig.1]<- pos.1 + +sig.WEMOI[r.WEMOI <= -sig.10]<- neg.10 +sig.WEMOI[r.WEMOI >= sig.10]<- pos.10 +sig.WEMOI[r.WEMOI > -sig.10 & r.WEMOI < sig.10]<- no.sig +sig.WEMOI[r.WEMOI <= -sig.1]<- neg.1 +sig.WEMOI[r.WEMOI >= sig.1]<- pos.1 + +r.sig.NAO<-r.NAO;r.sig.NAO[abs(r.NAO)abs(r.MOI) & abs(r.NAO)>abs(r.WEMOI)]<-1 +var.max.PC1.PC2.PC3[abs(r.MOI)>abs(r.NAO) & abs(r.MOI)>abs(r.WEMOI)]<-2 +var.max.PC1.PC2.PC3[abs(r.WEMOI)>abs(r.NAO) & abs(r.WEMOI)>abs(r.MOI)]<-3 + +# per il confronto tra NAO e MOi dobbiamo prima collegare ogni PCs al suo indice di origine: +NAO.to.PC=c(2,3,3,3,2,2,1,1,3,3,2,2) +MOI.to.PC=c(3,2,2,2,3,3,3,3,2,2,3,3) +WEMOI.to.PC=c(1,1,1,1,1,1,2,2,1,1,1,1) + +for(mese in 1:12){ + if(NAO.to.PC[mese]==1)corr.NAO[mese,]<-r.NAO[mese,] + if(NAO.to.PC[mese]==2)corr.NAO[mese,]<-r.MOI[mese,] + if(NAO.to.PC[mese]==3)corr.NAO[mese,]<-r.WEMOI[mese,] + if(MOI.to.PC[mese]==1)corr.MOI[mese,]<-r.NAO[mese,] + if(MOI.to.PC[mese]==2)corr.MOI[mese,]<-r.MOI[mese,] + if(MOI.to.PC[mese]==3)corr.MOI[mese,]<-r.WEMOI[mese,] + if(WEMOI.to.PC[mese]==1)corr.WEMOI[mese,]<-r.NAO[mese,] + if(WEMOI.to.PC[mese]==2)corr.WEMOI[mese,]<-r.MOI[mese,] + if(WEMOI.to.PC[mese]==3)corr.WEMOI[mese,]<-r.WEMOI[mese,] + var.max.NAO.MOI[mese,abs(corr.NAO[mese,])>=abs(corr.MOI[mese,])]<-1 + var.max.NAO.MOI[mese,abs(corr.NAO[mese,])=abs(corr.MOI[mese,]) & abs(corr.NAO[mese,])>=abs(corr.WEMOI[mese,])]<-1 + var.max.NAO.MOI.WEMOI[mese,abs(corr.MOI[mese,])>=abs(corr.NAO[mese,]) & abs(corr.MOI[mese,])>=abs(corr.WEMOI[mese,])]<-2 + var.max.NAO.MOI.WEMOI[mese,abs(corr.WEMOI[mese,])>=abs(corr.NAO[mese,]) & abs(corr.WEMOI[mese,])>=abs(corr.MOI[mese,])]<-3 + ss<-which(abs(corr.NAO[mese,])>=abs(corr.MOI[mese,])) + dd<-which(abs(corr.NAO[mese,])sig.10^2))/n.land.pixels + area.NAO.sig.5[mese]<-length(which(abs(var1[mese,])>sig.5^2))/n.land.pixels + area.NAO.sig.1[mese]<-length(which(abs(var1[mese,])>sig.1^2))/n.land.pixels + area.MOI.sig.10[mese]<-length(which(abs(var3[mese,])>sig.10^2))/n.land.pixels + area.MOI.sig.5[mese]<-length(which(abs(var3[mese,])>sig.5^2))/n.land.pixels + area.MOI.sig.1[mese]<-length(which(abs(var3[mese,])>sig.1^2))/n.land.pixels + area.WEMOI.sig.10[mese]<-length(which(abs(var2[mese,])>sig.10^2))/n.land.pixels + area.WEMOI.sig.5[mese]<-length(which(abs(var2[mese,])>sig.5^2))/n.land.pixels + area.WEMOI.sig.1[mese]<-length(which(abs(var2[mese,])>sig.1^2))/n.land.pixels +} + +save.image("C:/nicola/pressure/teleconnessiones/NAO_MOI_WEMOI/R_corr_prec_NAO_MOI_WEMOI_v20_Pearson.RData") + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +################ CALCOLO + MAPPE VALORI MEDI PREC. ASSOCIATA AD OGNI WT USANDO IL DATABASE GIORNALIERO IP02 GIA'INTERPOLATO ################################ +# (script indipendente) +y.start<-1950 # primo anno dati nel database di prec.interpolato +y.end<-2003 # ultimo anno dati +coord.system<-CRS("+proj=longlat +datum=WGS84") +pixel.size<-0.20 +xmin.box<--9.6 +ymin.box<-36.0 +num.x.pixels<-71 # in teoria si puo'evitare di specificarlo perche' dovrebbe essere pari a n.lon +num.y.pixels<-40 # in teoria si puo'evitare di specificarlo perche' dovrebbe essere pari a n.lat +resol.unit<-"DecDegr" + +##################################################### COSTANTI E FUNZIONI ######################################################################### +nome.mese<-c("January","February","March","April","May","June","July","August","September","October","November","December") +nome.mese.short<-c("Jan","Feb","Mar","Apr","May","Jun","Jul","Aug","Sep","Oct","Nov","Dec") +WTs.type<-c("NE","E","SE","S","SW","W","NW","N","C","C.NE","C.E","C.SE","C.S","C.SW","C.W","C.NW","C.N","A","A.NE","A.E","A.SE","A.S","A.SW","A.W","A.NW","A.N") +WTs.type2<-c("NE","E","SE","S","SW","W","NW","N","C","CNE","CE","CSE","CS","CSW","CW","CNW","CN","A","ANE","AE","ASE","AS","ASW","AW","ANW","AN") +WTs.color<-c(rep("black",8),rep("blue",9),rep("brown",9)) +WTs.color2<-c(rep("black",26));WTs.color2[c(6,15,24)]="blue";WTs.color2[c(6,15,24)]="blue" +WTs.type.long<-c("Northeasterly (NE)","Easterly (E)","Southeasterly (SE)","Southerly (S)","Southwesterly (SW)","Westerly (W)","Northwesterly (NW)", + "Northerly (N)","Cyclonic (C)","Cyc.Northeasterly (C.NE)","Cyc.Easterly (C.E)","Cyc.Southeasterly (C.SE)","Cyc.Southerly (C.S)", + "Cyc.Southwesterly (C.SW)","Cyc.Westerly (C.W)","Cyc.Northwesterly (C.NW)","Cyc.Northerly (C.N)","Anticyclonic (A)", + "Ant.Northeasterly (A.NE)","Ant.Easterly (A.E)","Ant.Southeasterly (A.SE)","Ant.Southerly (A.S)","Ant.Southwesterly (A.SW)", + "Ant.Westerly (A.W)","Ant.Northwesterly (A.NW)","Ant.Northerly (A.N)") +WTs.type.very.long<-c("Northeasterly (NE)","Easterly (E)","Southeasterly (SE)","Southerly (S)","Southwesterly (SW)","Westerly (W)","Northwesterly (NW)", + "Northerly (N)","Cyclonic (C)","Cyclonic Northeasterly (C.NE)","Cyclonic Easterly (C.E)","Cyclonic Southeasterly (C.SE)","Cyclonic Southerly (C.S)", + "Cyclonic Southwesterly (C.SW)","Cyclonic Westerly (C.W)","Cyclonic Northwesterly (C.NW)","Cyclonic Northerly (C.N)","Anticyclonic (A)", + "Anticyclonic Northeasterly (A.NE)","Anticyclonic Easterly (A.E)","Anticyclonic Southeasterly (A.SE)","Anticyclonic Southerly (A.S)","Anticyclonic Southwesterly (A.SW)", + "Anticyclonic Westerly (A.W)","Anticyclonic Northwesterly (A.NW)","Anticyclonic Northerly (A.N)") +WTs.type.very.long2<-c("Northeasterly (NE)","Easterly (E)","Southeasterly (SE)","Southerly (S)","Southwesterly (SW)","Westerly (W)","Northwesterly (NW)", + "Northerly (N)","Cyclonic (C)","Cyclonic Northeasterly (CNE)","Cyclonic Easterly (CE)","Cyclonic Southeasterly (CSE)","Cyclonic Southerly (CS)", + "Cyclonic Southwesterly (CSW)","Cyclonic Westerly (CW)","Cyclonic Northwesterly (CNW)","Cyclonic Northerly (CN)","Anticyclonic (A)", + "Anticyclonic Northeasterly (ANE)","Anticyclonic Easterly (AE)","Anticyclonic Southeasterly (ASE)","Anticyclonic Southerly (AS)","Anticyclonic Southwesterly (ASW)", + "Anticyclonic Westerly (AW)","Anticyclonic Northwesterly (ANW)","Anticyclonic Northerly (AN)") +jet.colors <-colorRampPalette(c("violet","blue", "#007FFF", "cyan","#7FFF7F", "yellow", "#FF7F00", "red", "#7F0000")) +jet.colors2 <-colorRampPalette(c("violet","blue","cyan","#7FFF7F", "yellow", "#FF7F00", "red")) +jet.colors3 <-colorRampPalette(c("violet","blue","cyan","#7FFF7F", "yellow", "#FF7F00")) +jet.colors4 <-colorRampPalette(c("violet","blue","cyan","#7FFF7F", "yellow")) +jet.colors5 <-colorRampPalette(c("violet","blue","cyan","#7FFF7F")) +jet.colors6 <-colorRampPalette(c("violet","blue","cyan")) +jet.colors7 <-colorRampPalette(c("red4","violet","blue","cyan")) +jet.colors8 <-colorRampPalette(c("violet","blue","cyan", "yellow", "#FF7F00")) # senza il verde +jet.colors9 <-colorRampPalette(c("violet","blue","cyan", "yellow", "orange")) +jet.colors10 <-colorRampPalette(c("red4","violet","blue","cyan","lightgreen")) +jet.colors11 <-colorRampPalette(c("red4","violet","blue","cyan", "yellow", "#FF7F00")) # come la 8 ma con in + il rosso scuro +jet.colors12 <-colorRampPalette(c("red4","violet","blue","cyan", "yellow")) +jet.colors13 <-colorRampPalette(c("violet","blue","cyan", "yellow", "darkgoldenrod3")) +jet.colors14 <-colorRampPalette(c("red4","violet","blue","cyan","darkgoldenrod2")) +jet.colors15 <-colorRampPalette(c("red4","violet","blue","cyan", "yellow", "darkgoldenrod3")) +IpPoly.geograf='C:/Projecto_Master/Limites Espana/borde_peniberica_sin_islas_sin_proyeccion.shp' +IpIslesPoly.geograf='C:/Projecto_Master/Limites Espana/borde_peniberica_sin_africa_y_francia_sin_proyeccion.shp' + +rescale.max<-function(my.array,val.max){ # funzione che prende un array e restituisce lo stesso array abbassando pero'tutti gli elementi piu'alti di val.max al valore val.max (utile per aggiustare le leggende dei grafici) + ss<-which(my.array>val.max) + my.array[ss]<-val.max + return(my.array) +} +rescale.min<-function(my.array,val.min){ # come rescale.max ma per i valori piu'piccoli di val.min che vengono cambiati a val.min + ss<-which(my.arrayval.max) + my.array[ss]<-val.max + ss<-which(my.array=y.start & WTs$Year<=y.end) +WTs<-WTs[-dim(WTs)[1],] # toglie il 31 dic dell'ultimo anno +WTs<-WTs[-dim(WTs)[1],] # toglie il 30 dic dell'ultimo anno +n.days<-dim(WTs)[1] # quindi alla fine i giorni sono 19721 +y.tot<-y.end-y.start+1 +nome.anni<-y.start:y.end +#year<-c();for(y in y.start:y.end)year<-c(year,rep(y,n.days.in.a.year(y))) #month<-c();for(y in y.start:y.end)month<-c(month,seq.months.in.a.year(y)) + +######################################################## ANALYSIS ############################################################################# + +# per ogni mese, calcola la somma dei grid di prec. per tutti gli anni del periodo senza dividere in base ai WTs: +prec.sum<-array(0,c(n.lat,n.lon,12),dimnames=list(rev(lat),lon,nome.mese)) # array con i grid somma della prec. mensile (lat/lon/mese) +righe<-list();for(mese in 1:12)righe[[mese]]<- which(WTs$Month==mese) # lista di 12 elenchi, uno per ogni mese, con le righe di WTs (ovvero i giorni) appartenenti a quel mese in tutto il periodo +for(mese in 1:12){for(d in righe[[mese]]){prec.sum[,,mese]<-prec.sum[,,mese]+prec[,,d]}} + +# per ogni mese, calcola la somma dei grid di prec.per tutto il periodo e li divide per il numero di anni in modo da creare la climatologia su tutto il periodo: +prec.clim<-array(0,c(n.lat,n.lon,12)) # array con i grid somma della prec. mensile diviso per il numero di anni(lat/lon/mese) +prec.clim<-prec.sum/y.tot + +# per ogni mese e per ogni WT, calcola la somma dei grid di prec su tutti gli anni del periodo: +prec.wt.sum<-array(0,c(n.lat,n.lon,nWT,12),dimnames=list(rev(lat),lon,WTs.type,nome.mese)) +righe.wt<-list();for(mese in 1:12){for(wt in 1:nWT){righe.wt[[mese*100+wt]]<-which(WTs$Month==mese & WTs$WT==wt)}} +for(mese in 1:12){for(wt in 1:nWT){for(d in righe.wt[[mese*100+wt]]){prec.wt.sum[,,wt,mese]<-prec.wt.sum[,,wt,mese]+prec[,,d]}}} + +# per ogni mese e per ogni WT, calcola la somma dei grid di prec per tutto il periodo normalizzata per il numero di mesi in modo da avere la climatologia di ogni WT: +prec.wt.clim<-array(0,c(n.lat,n.lon,nWT,12),dimnames=list(rev(lat),lon,WTs.type,nome.mese)) # lista di array con i grid somma della prec.mensile di ogni WT divisa per il numero di mesi +for(mese in 1:12){for(wt in 1:nWT)prec.wt.clim[,,wt,mese]<-round(prec.wt.sum[,,wt,mese]/y.tot,1)} + +# per ogni mese e per ogni WT, calcola la somma dei grid di prec per tutto il periodo e la normalizza per la prec.totale per tutto il periodo: +# (cioe'il contributo relativo o Percentage Contribution)(percentuale arrotondata al secondo decimale) +prec.wt.sum.norm<-array(0,c(n.lat,n.lon,nWT,12),dimnames=list(rev(lat),lon,WTs.type,nome.mese)) # lista di array con i grid somma della prec.mensile normalizzata di ogni WT +for(mese in 1:12){for(wt in 1:nWT){prec.wt.sum.norm[,,wt,mese]<-100*round(prec.wt.sum[,,wt,mese]/prec.sum[,,mese],4)}} + +# conta il numero di giorni in cui appare un WT per tutto il periodo senza dividere per il numero di anni: (e' indipendente dal pixel) +wt.n.days<-array(NA,c(nWT,12),dimnames=list(WTs.type,nome.mese.short)) +for(mese in 1:12){for(wt in 1:nWT){wt.n.days[wt,mese]<-length(righe.wt[[mese*100+wt]])}} + +# calcola la frequenza di ogni WT come numero medio di giorni per mese: +wt.n.days.mean<-wt.n.days/y.tot + +# Precipitation Intensity (in mm/day) per ogni WT,mese e pixel (includendo pero' anche i giorni senza pioggia) +prec.wt.int<-array(NA,c(n.lat,n.lon,nWT,12),dimnames=list(rev(lat),lon,WTs.type,nome.mese)) +for(mese in 1:12){for(wt in 1:nWT)prec.wt.int[,,wt,mese]<-prec.wt.sum[,,wt,mese]/wt.n.days[wt,mese]} # occhio ai NaN che si formano quando si divide per 0 + +# Precipitation Efficiency per ogni WT, mese e pixel (Prec.Efficiency= # dias de lluvia >=1mm del WT en todo el periodo / # # dias de lluvia del WT e todo el periodo +prec.wt.eff<-array(NA,c(n.lat,n.lon,nWT,12),dimnames=list(rev(lat),lon,WTs.type,nome.mese)) +wt.n.rain.days<-array(0,c(n.lat,n.lon,nWT,12),dimnames=list(rev(lat),lon,WTs.type,nome.mese)) # conta il numero di giorni del WT con pioggia su tutto il periodo +temp<-prec;temp[which(prec>=1)]<-1;temp[which(prec<1)]<-0 # temp e' come prec ma e' fatta solo di 0 e di 1 dove la pioggia e' >1 mm (e di NA sul mare) +for(mese in 1:12){for(wt in 1:nWT){for(d in righe.wt[[mese*100+wt]]){wt.n.rain.days[,,wt,mese]<-wt.n.rain.days[,,wt,mese]+temp[,,d]}}} +for(mese in 1:12){for(wt in 1:nWT)prec.wt.eff[,,wt,mese]<-100*round(wt.n.rain.days[,,wt,mese]/wt.n.days[wt,mese],3)} + +# (tabla) area, o sea % di pixel con prec.wt.clim > threshold (mm) +threshold=1 +area.wt.clim.threshold<-array(NA,c(nWT,12),dimnames=list(WTs.type,nome.mese)) +for(mese in 1:12)for(wt in 1:nWT)area.wt.clim.threshold[wt,mese]<-length(which(prec.wt.clim[,,wt,mese]>threshold))/n.land.pixels +tab.area<-100*round(area.wt.clim.threshold,3) # arrotonda e trasforma i valori in percentuali +tab.area<-cbind(tab.area,round(rowMeans(tab.area),1)) + +# calcolo soglie percentili (eliminando i giorni con meno di 1 mm di pioggia): +percentile=c(0.05,0.10,0.20,0.25,0.50,0.75,0.80,0.90,0.95) # valori scelti come soglie +reverse=c(TRUE,TRUE,TRUE,TRUE,TRUE,FALSE,FALSE,FALSE,FALSE) # vale TRUE per gli indici come gli R10p che devono contare i giorni AL DI SOTTO di quel percentile invece che al di sopra (fino al percentile 0.50 incluso si contano quelli al di sotto) +percentiles<-array(NA,c(n.lat,n.lon,12,length(percentile)),dimnames=list(rev(lat),lon,nome.mese,percentile)) +for(mese in 1:12)for(i in 1:n.lat)for(j in 1:n.lon)if(!is.na(prec[i,j,1])){ + temp<-prec[i,j,righe[[mese]]] + ss<-which(prec[i,j,righe[[mese]]]<1) + percentiles[i,j,mese,]<-quantile(temp[-ss],percentile,type=4) +} +righe.by.year<-list() # posizione elementi della matrice prec[,,d] corrispondenti alle righe con giorni appartenenti al mese e anno voluti +for(mese in 1:12){ + righe.by.year[[mese]]<-list() + for(year in 1:y.tot)righe.by.year[[mese]][[year]]<-which(WTs$Month==mese & WTs$Year==y.start+year-1) +} +n.days.above<-array(NA,c(n.lat,n.lon,12,y.tot,length(percentile)),dimnames=list(rev(lat),lon,nome.mese,nome.anni,percentile)) +barraP<-winProgressBar("Analyzing Percentiles","Please wait...",0,length(percentile),0) +for(n.p in 1:length(percentile)){ + if(reverse[n.p]==FALSE){ + for(mese in 1:12)for(i in 1:n.lat)for(j in 1:n.lon)for(year in 1:y.tot)if(!is.na(prec[i,j,1])){ + temp<-prec[i,j,righe.by.year[[mese]][[year]]] + n.days.above[i,j,mese,year,n.p]<-length(which(temp>percentiles[i,j,mese,n.p])) + } + } else { + for(mese in 1:12)for(i in 1:n.lat)for(j in 1:n.lon)for(year in 1:y.tot)if(!is.na(prec[i,j,1])){ + temp<-prec[i,j,righe.by.year[[mese]][[year]]] + n.days.above[i,j,mese,year,n.p]<-length(which(temp>=1 & temp=X% +soglia=1 +num.wt<-array(0,c(n.lat,n.lon,12),dimnames=list(rev(lat),lon,nome.mese)) +temp<-prec.wt.sum.norm +ss<-which(prec.wt.sum.norm>=soglia) +tt<-which(prec.wt.sum.norm",soglia,"%",sep="") +my.prec.levels<- seq(1,20,1) +my.prec.levels.in.leyend<-my.prec.levels #seq(1,20,1) +my.map<-spplot(num.wt.mappa,nome.mese.short,main=my.title,par.settings = standard.theme(color = FALSE),as.table=TRUE,layout=c(3,4), + at=my.prec.levels,col.regions=rev(jet.colors(10000)),sp.layout=my.layout,cex=.5,names.attr=my.names,legendEntries=my.labels,colorkey=list(space="right",width=0.5,height=1,labels=list(at=my.prec.levels.in.leyend))) +print(my.map) +jpeg("C:/nicola/precipitaciones/weather_types/mapas/6) composiciones_EMULATE_y_IP02/Climatology_Ip02_1950-2003.jpg",width=590,height=616,quality=100);print(my.map);dev.off() # altri formati possibili per salvare il grafico sono: pdf, bmp,jpeg, tiff. + +# mappa di quanti WTs servono per arrivare all'X% di prec.mensile +soglia=70 +num.wt<-array(NA,c(n.lat,n.lon,12),dimnames=list(rev(lat),lon,nome.mese)) +for(mese in 1:12){for(i in 1:n.lat){for (j in 1:n.lon){ + if(!is.na(prec.wt.sum.norm[i,j,1,mese])){ + temp<-prec.wt.sum.norm[i,j,,mese] # vettore con i contributi percentuali di ogni tipo di tempo per quel pixel e mese + temp<-sort(temp,decreasing=TRUE) + temp<-cumsum(temp) + num.wt[i,j,mese]<-which(temp>soglia)[1] + } else {num.wt[i,j,mese]=NA} +}}} +num.wt.mappa<-grid.vuoto;for(mese in 1:12)num.wt.mappa@data[[nome.mese.short[mese]]]<-as.vector(t(num.wt[,,mese])) +my.names<-nome.mese +my.title<-paste("Number of WTs with Sum of Percentual Contribution = ",soglia,"%",sep="") +my.prec.levels<- seq(1,max(num.wt,na.rm=T),1) +my.prec.levels.in.leyend<-my.prec.levels #seq(1,20,1) +my.map<-spplot(num.wt.mappa,nome.mese.short[c(12,1:11)],main=my.title,par.settings = standard.theme(color = FALSE),as.table=TRUE,layout=c(3,4), + at=my.prec.levels,col.regions=rev(jet.colors(10000)),sp.layout=my.layout,cex=.5,names.attr=my.names[c(12,1:11)],legendEntries=my.labels,colorkey=list(space="right",width=0.5,height=1,labels=list(at=my.prec.levels.in.leyend))) +print(my.map) +jpeg("C:/nicola/precipitaciones/weather_types/mapas/6) composiciones_EMULATE_y_IP02/Climatology_Ip02_1950-2003.jpg",width=590,height=616,quality=100);print(my.map);dev.off() # altri formati possibili per salvare il grafico sono: pdf, bmp,jpeg, tiff. + + +# mappa della soglia (in mm) del percentile XX% fissato: +p=0.90 # scegli un percentile gia' presente nel vettore percentile +n.percentile<-which(percentile==p) +upper.limit=max(percentiles[,,,n.percentile],na.rm=T) #2.5 per il percentile 0.10 +percentiles.mappa<-grid.vuoto;for(mese in 1:12)percentiles.mappa@data[[nome.mese.short[mese]]]<-as.vector(t(rescale.max(percentiles[,,mese,n.percentile],upper.limit))) +my.names<-nome.mese +my.title<-paste(100*percentile[n.percentile],"th Percentile",sep="") +my.prec.levels<-seq(1,upper.limit,0.1) # seq(1,max(percentiles,na.rm=T),0.1) # nota che la soglia e'sempre >1 mm per definizione di percentile di pioggia +my.prec.levels.in.leyend<-my.prec.levels #seq(1,max(percentiles,na.rm=T),0.1) +my.map<-spplot(percentiles.mappa,nome.mese.short[c(12,1:11)],main=my.title,par.settings = standard.theme(color = FALSE),as.table=TRUE,layout=c(3,4), + at=my.prec.levels,col.regions=rev(jet.colors(10000)),sp.layout=my.layout,cex=.5,names.attr=my.names[c(12,1:11)],legendEntries=my.labels,colorkey=list(space="right",width=0.5,height=1,labels=list(at=my.prec.levels.in.leyend))) +print(my.map) + +# mappa indice RXXp (numero di giorni al mese superiori al percentile XX): +p=0.05 # scegli il percentile corrispondente all'indice RXXp da mappare + +n.percentile<-which(percentile==p) +upper.limit=max(n.days.above.medio[,,,n.percentile],na.rm=T) #2.5 per il percentile 0.10 +my.var.map<-grid.vuoto;for(mese in 1:12)my.var.map@data[[nome.mese.short[mese]]]<-as.vector(t(rescale.max(n.days.above.medio[,,mese,n.percentile],upper.limit))) +my.names<-nome.mese +my.title<-paste("R",100*p,"p",sep="") +#my.prec.levels<-seq(0,max(n.days.above.medio,na.rm=T),0.1) # seq(1,max(percentiles,na.rm=T),0.1) +my.prec.levels<-seq(0,0.1+max(n.days.above.medio[,,,n.percentile],na.rm=T),0.1) +my.prec.levels.in.leyend<-my.prec.levels #seq(1,max(percentiles,na.rm=T),0.1) +my.map<-spplot(my.var.map,nome.mese.short[c(12,1:11)],main=my.title,par.settings = standard.theme(color = FALSE),as.table=TRUE,layout=c(3,4), + at=my.prec.levels,col.regions=rev(jet.colors(10000)),sp.layout=my.layout,cex=.5,names.attr=my.names[c(12,1:11)],legendEntries=my.labels,colorkey=list(space="right",width=0.5,height=1,labels=list(at=my.prec.levels.in.leyend))) +#print(my.map) +jpeg(paste("C:/nicola/precipitaciones/weather_types/Ip02/mapa ",my.title,".jpg",sep=""),width=600,height=650,quality=100);print(my.map);dev.off() + + +# visualizza una composizione per ogni WT con il suo contributo medio mensile alla prec, cioe' visualizza la "climatologia dei WT" +my.names<-nome.mese +my.cuts<- c(seq(1,30,1),35,40,50,70,100)^0.5 # fa la radice quadrata per diminuiere le differenze tra i valori piu' alti e quelli piu' bassi in modo che non ci sia un colore che domina la legenda; occhio che funziona solo se la variabile da rappresentare non e' negativa!!! +my.labels=c(1,10,20,50,100) # i numeri che vuoi vedere visualizzati nella legenda, anche se nella mappa si visualizzano in realta' le loro radici +my.cuts.in.leyend<-my.labels^0.5 +lx=0.25;ly=0.25 # lunghezza orizzontale e verticale di un mapa piccolo +g1=0.74;g3=0.53;g8=0.32;g5=0.11 # altezza prima/seconda/terza/quarta fila + +prec.wt.clim.mappa<-list() +for(wt in 1:nWT){ # cicla su tutti i WTs + prec.wt.clim.mappa[[wt]]<-grid.vuoto + for(mese in 1:12)prec.wt.clim.mappa[[wt]]@data[[nome.mese.short[mese]]]<-as.vector(t(prec.wt.clim[,,wt,mese]))^0.5 # carica i dati nel grid (la radice serve per disegnare la colorkey con colori meglio distribuiti) + + my.map<-list() + for(mese in 1:12){ + my.main<-list(label=nome.mese[mese],cex=1) + my.map[[mese]]<-spplot(prec.wt.clim.mappa[[wt]],nome.mese.short[mese],main=my.main,par.settings = standard.theme(color = FALSE), + at=my.cuts,col.regions=rev(jet.colors3(10000)),sp.layout=my.layout,cex=.5,names.attr=my.names[mese],legendEntries=my.labels,colorkey=FALSE) + } + + # sotto la mappa di agosto aggiunge la legenda: + mese=8;my.main<-list(label=nome.mese[mese],cex=1) + my.map[[mese]]<-spplot(prec.wt.clim.mappa[[wt]],nome.mese.short[mese],main=my.main,par.settings = standard.theme(color = FALSE), + at=my.cuts,col.regions=rev(jet.colors3(10000)),sp.layout=my.layout,cex=.5,names.attr=my.names[mese],colorkey=list(space="bottom",width=0.5,height=3,labels=list(at=my.cuts.in.leyend,labels=my.labels))) + + #jpeg(paste("C:/nicola/precipitaciones/weather_types/mapas/14) composiciones_IP02_WTs_climatology_leyenda_bonita/",WTs.type[wt],".jpg",sep=""),width=590,height=616,quality=100) # altri formati possibili per salvare il grafico sono: pdf, bmp,jpeg, tiff. + tiff(paste("C:/nicola/precipitaciones/weather_types/mapas/14) composiciones_IP02_WTs_climatology_leyenda_bonita/",WTs.type[wt],".tif",sep=""),width=590,height=616,quality=100) # altri formati possibili per salvare il grafico sono: pdf, bmp,jpeg, tiff. + textplot(paste(WTs.type[wt],"Climatology > 1 mm") ,cex=1.5,halign="center",valign="center",mar=c(0,0,0,0)) + print(my.map[[1]], position= c(0,g1,0+lx,g1+ly),more=T) # fai ?print.trellis per la spiegazione di come dividere lo schermo con gli spplot + print(my.map[[2]], position= c(0.25,g1,0.25+lx,g1+ly),more=T) # (xmin, ymin, xmax, ymax) + print(my.map[[3]], position= c(0.50,g1,0.50+lx,g1+ly),more=T) + print(my.map[[4]], position= c(0.75,g1,0.75+lx,g1+ly),more=T) + print(my.map[[5]], position= c(0.75,g3,0.75+lx,g3+ly),more=T) + print(my.map[[6]], position= c(0.75,g8,0.75+lx,g8+ly),more=T) + print(my.map[[7]], position= c(0.75,g5,0.75+lx,g5+ly),more=T) + print(my.map[[8]], position= c(0.50,g5-0.038,0.50+lx,g5+ly),more=T) + print(my.map[[9]], position= c(0.25,g5,0.25+lx,g5+ly),more=T) + print(my.map[[10]], position= c(0,g5,0+lx,g5+ly),more=T) + print(my.map[[11]], position= c(0,g8,0+lx,g8+ly),more=T) + print(my.map[[12]], position= c(0,g3,0+lx,g3+ly),more=T) + dev.off() +} # chiude il for su wt + +# visualizza per ogni WT la sua climatologia mensile (senza composizione): +upper.limit=40 # limite massimo sopra il quale tutti i dati vengono ridotti a questo numero +my.cuts<- seq(1,upper.limit,1) #^0.5 # fa la radice quadrata per diminuiere le differenze tra i valori piu' alti e quelli piu' bassi in modo che non ci sia un colore che domina la legenda; occhio che funziona solo se la variabile da rappresentare non e' negativa!!! +my.labels=c("1 mm","10 mm","20 mm","30 mm",paste(upper.limit,"mm")) # i numeri che vuoi vedere visualizzati nella legenda, anche se nella mappa si visualizzano in realta' le loro radici +my.cuts.in.leyend<-c(1,10,20,30,upper.limit) #^0.5 # posizione tacche della legenda dove si visualizzano i my.labels +my.color=rev(jet.colors3(1000)) +my.names<-nome.mese +prec.wt.clim.mappa<-list() +for(wt in 1:nWT){ # cicla su tutti i WTs + my.main<-paste(WTs.type.long[wt],"Climatology") # inserisci qui il titolo del grafico + my.var<-prec.wt.clim # inserisci qui la variabile da mappare (dev'essere un array del tipo [lat,lon,wt,mese], se e' diverso modica la riga sopra l'spplot) + my.grid<-grid.vuoto + for(mese in 1:12)my.grid@data[[nome.mese.short[mese]]]<-as.vector(t(rescale.max(my.var[,,wt,mese],upper.limit))) #^0.5 # carica i dati nel grid (la radice serve per disegnare la colorkey con colori meglio distribuiti) + my.map<-spplot(my.grid,nome.mese.short,main=my.main,par.settings = standard.theme(color = FALSE),layout=c(3,4),as.table=T, + at=my.cuts,col.regions=my.color,sp.layout=my.layout,cex=.5,names.attr=my.names,colorkey=list(space="bottom",width=0.5,height=1,labels=list(at=my.cuts.in.leyend,labels=my.labels))) + # disegna la stessa mappa ma senza sbavature: + my.grid.high<-grid.high.vuoto;my.var.high<-array(NA,c(dim(my.var)[1]*multiply,dim(my.var)[2]*multiply,dim(my.var)[3],dim(my.var)[4]));for(i in 1:dim(my.var.high)[1]){for(j in 1:dim(my.var.high)[2])my.var.high[i,j,wt,]<-my.var[ceiling(i/multiply),ceiling(j/multiply),wt,]} + for(mese in 1:12)my.grid.high@data[[nome.mese.short[mese]]]<-as.vector(t(rescale.max(my.var.high[,,wt,mese],upper.limit))) #^0.5 # carica i dati nel grid (la radice serve per disegnare la colorkey con colori meglio distribuiti) + for(mese in 1:12)my.grid.high@data[[nome.mese.short[mese]]][sea.high.pixels]<-NA # elimina i pixel del grid fuori dalla penisola iberica e dalle isle + my.map<-spplot(my.grid.high,nome.mese.short,main=my.main,par.settings = standard.theme(color = FALSE),layout=c(3,4),as.table=T, + at=my.cuts,col.regions=my.color,sp.layout=my.layout,cex=.5,names.attr=my.names,colorkey=list(space="bottom",width=0.5,height=1,labels=list(at=my.cuts.in.leyend,labels=my.labels))) + jpeg(paste("C:/nicola/precipitaciones/weather_types/mapas/16) Ip02_WTs_climatology_over_1_mm_leyenda_perfecta/",WTs.type[wt],".jpg",sep=""),width=590,height=616,quality=100);print(my.map);dev.off() +} +prec.wt.clim.medio<-array(NA,c(12,nWT),dimnames=list(nome.mese,WTs.type)) # (tabla) per ogni WT e mese calcola la somma su tutti i pixel del contributo assoluto (somma in mm) alla prec.tot: +for(mese in 1:12){for(wt in 1:nWT)prec.wt.clim.medio[mese,wt]<-mean(prec.wt.clim[,,wt,mese],na.rm=T)} +tab.contrib.ass<-t(round(prec.wt.clim.medio,1)) # arrotonda e trasforma i valori in percentuali +tab.contrib.ass<-rbind(tab.contrib.ass,round(colSums(t(prec.wt.clim.medio)),1)) +tab.contrib.ass<-cbind(tab.contrib.ass,round(rowMeans(tab.contrib.ass),1)) + +# visualizza per ogni WT il suo contributo relativo mensile (senza composizione): +upper.limit=40 # limite massimo sopra il quale tutti i dati vengono ridotti a questo numero +my.cuts<- seq(1,upper.limit,1) #^0.5 # fa la radice quadrata per diminuiere le differenze tra i valori piu' alti e quelli piu' bassi in modo che non ci sia un colore che domina la legenda; occhio che funziona solo se la variabile da rappresentare non e' negativa!!! +my.labels=c("1%","10%","20%","30%",paste(upper.limit,"%",sep="")) # i numeri che vuoi vedere visualizzati nella legenda, anche se nella mappa si visualizzano in realta' le loro radici +my.cuts.in.leyend<-c(1,10,20,30,upper.limit) #^0.5 # posizione tacche della legenda dove si visualizzano i my.labels +my.color=rev(jet.colors7(1000)) +my.names<-nome.mese +for(wt in 1:nWT){ # cicla su tutti i WTs + my.main<-paste(WTs.type.long[wt],"Relative contribute") # inserisci qui il titolo del grafico + my.var=prec.wt.sum.norm # inserisci qui la variabile da mappare (dev'essere un array del tipo [lat,lon,wt,mese], se e' diverso modica la riga sopra l'spplot) + my.grid<-grid.vuoto + for(mese in 1:12)my.grid@data[[nome.mese.short[mese]]]<-as.vector(t(rescale.max(my.var[,,wt,mese],upper.limit))) #^0.5 # carica i dati nel grid (la radice serve per disegnare la colorkey con colori meglio distribuiti) + my.map<-spplot(my.grid,nome.mese.short,main=my.main,par.settings = standard.theme(color = FALSE),layout=c(3,4),as.table=T, + at=my.cuts,col.regions=my.color,sp.layout=my.layout,cex=.5,names.attr=my.names,colorkey=list(space="bottom",width=0.5,height=1,labels=list(at=my.cuts.in.leyend,labels=my.labels))) + # disegna la stessa mappa ma senza sbavature: + my.grid.high<-grid.high.vuoto;my.var.high<-array(NA,c(dim(my.var)[1]*multiply,dim(my.var)[2]*multiply,dim(my.var)[3],dim(my.var)[4]));for(i in 1:dim(my.var.high)[1]){for(j in 1:dim(my.var.high)[2])my.var.high[i,j,wt,]<-my.var[ceiling(i/multiply),ceiling(j/multiply),wt,]} + for(mese in 1:12)my.grid.high@data[[nome.mese.short[mese]]]<-as.vector(t(rescale.max(my.var.high[,,wt,mese],upper.limit))) #^0.5 # carica i dati nel grid (la radice serve per disegnare la colorkey con colori meglio distribuiti) + for(mese in 1:12)my.grid.high@data[[nome.mese.short[mese]]][sea.high.pixels]<-NA # elimina i pixel del grid fuori dalla penisola iberica e dalle isle + my.map<-spplot(my.grid.high,nome.mese.short,main=my.main,par.settings = standard.theme(color = FALSE),layout=c(3,4),as.table=T, + at=my.cuts,col.regions=my.color,sp.layout=my.layout,cex=.5,names.attr=my.names,colorkey=list(space="bottom",width=0.5,height=1,labels=list(at=my.cuts.in.leyend,labels=my.labels))) + jpeg(paste("C:/nicola/precipitaciones/weather_types/mapas/17) Ip02_WTs_relative_contrib_over_1_percent/",WTs.type[wt],".jpg",sep=""),width=590,height=616,quality=100);print(my.map);dev.off() +} +prec.wt.sum.norm.medio<-array(NA,c(nWT,12),dimnames=list(WTs.type,nome.mese)) # (tabla) per ogni WT e mese calcola la somma su tutti i pixel del contributo relativo alla prec.tot: +for(mese in 1:12){for(wt in 1:nWT)prec.wt.sum.norm.medio[wt,mese]<-mean(prec.wt.sum.norm[,,wt,mese],na.rm=T)} +tab.contrib.rel<-round(prec.wt.sum.norm.medio,1) # arrotonda e trasforma i valori in percentuali +tab.contrib.rel<-rbind(tab.contrib.rel,colSums(tab.contrib.rel)) + + +# visualizza per ogni WT la sua intensita'mensile (senza composizione): +upper.limit=20 # limite massimo sopra il quale tutti i dati vengono ridotti a questo numero +my.cuts<- seq(1,upper.limit,1) #^0.5 # fa la radice quadrata per diminuire le differenze tra i valori piu' alti e quelli piu' bassi in modo che non ci sia un colore che domina la legenda; occhio che funziona solo se la variabile da rappresentare non e' negativa!!! +my.labels=c("1 mm/day","5 mm/day","10 mm/day","15 mm/day",paste(upper.limit," mm/day",sep="")) # i numeri che vuoi vedere visualizzati nella legenda, anche se nella mappa si visualizzano in realta' le loro radici +my.cuts.in.leyend<-c(1,5,10,15,upper.limit) #^0.5 # posizione tacche della legenda dove si visualizzano i my.labels +my.color=rev(jet.colors3(1000)) +my.names<-nome.mese +for(wt in 1:nWT){ # cicla su tutti i WTs + my.main<-paste(WTs.type.long[wt],"Prec.Intensity") # inserisci qui il titolo del grafico + my.var=prec.wt.int # inserisci qui la variabile da mappare (dev'essere un array del tipo [lat,lon,mese], se e' diverso modica la riga sopra l'spplot) + my.grid<-grid.vuoto + for(mese in 1:12)my.grid@data[[nome.mese.short[mese]]]<-as.vector(t(rescale.max(my.var[,,wt,mese],upper.limit))) #^0.5 # carica i dati nel grid (la radice serve per disegnare la colorkey con colori meglio distribuiti) + my.map<-spplot(my.grid,nome.mese.short,main=my.main,par.settings = standard.theme(color = FALSE),layout=c(3,4),as.table=T, + at=my.cuts,col.regions=my.color,sp.layout=my.layout,cex=.5,names.attr=my.names,colorkey=list(space="bottom",width=0.5,height=1,labels=list(at=my.cuts.in.leyend,labels=my.labels))) + # disegna la stessa mappa ma senza sbavature: + my.grid.high<-grid.high.vuoto;my.var.high<-array(NA,c(dim(my.var)[1]*multiply,dim(my.var)[2]*multiply,dim(my.var)[3],dim(my.var)[4]));for(i in 1:dim(my.var.high)[1]){for(j in 1:dim(my.var.high)[2])my.var.high[i,j,wt,]<-my.var[ceiling(i/multiply),ceiling(j/multiply),wt,]} + for(mese in 1:12)my.grid.high@data[[nome.mese.short[mese]]]<-as.vector(t(rescale.max(my.var.high[,,wt,mese],upper.limit))) #^0.5 # carica i dati nel grid (la radice serve per disegnare la colorkey con colori meglio distribuiti) + for(mese in 1:12)my.grid.high@data[[nome.mese.short[mese]]][sea.high.pixels]<-NA # elimina i pixel del grid fuori dalla penisola iberica e dalle isle + my.map<-spplot(my.grid.high,nome.mese.short,main=my.main,par.settings = standard.theme(color = FALSE),layout=c(3,4),as.table=T, + at=my.cuts,col.regions=my.color,sp.layout=my.layout,cex=.5,names.attr=my.names,colorkey=list(space="bottom",width=0.5,height=1,labels=list(at=my.cuts.in.leyend,labels=my.labels))) + jpeg(paste("C:/nicola/precipitaciones/weather_types/mapas/18) Ip02_WTs_prec_intensity/",WTs.type[wt],".jpg",sep=""),width=590,height=616,quality=100);print(my.map);dev.off() +} +prec.wt.int.medio<-array(NA,c(nWT,12),dimnames=list(WTs.type.long,nome.mese)) +for(mese in 1:12){for(wt in 1:nWT)prec.wt.int.medio[wt,mese]<-mean(prec.wt.int[,,wt,mese],na.rm=T)} +prec.wt.int.medio[which(is.nan(prec.wt.int.medio))]<-0 # mette a zero i mesi senza giorni di pioggia per quel WT in tutto il periodo +prec.wt.int.medio.tab<-round(rbind(prec.wt.int.medio,colSums(prec.wt.int.medio)),1) +prec.wt.int.medio.tab<-cbind(prec.wt.int.medio.tab,round(rowMeans(prec.wt.int.medio.tab),1)) # (tabla) per ogni WT e mese calcola la somma su tutti i pixel della intensita'di prec: + + +# visualizza per ogni WT la sua efficienza mensile (senza composizione): +upper.limit=100 # limite massimo sopra il quale tutti i dati vengono ridotti a questo numero +my.cuts<- seq(1,upper.limit,1) #^0.5 # fa la radice quadrata per diminuire le differenze tra i valori piu' alti e quelli piu' bassi in modo che non ci sia un colore che domina la legenda; occhio che funziona solo se la variabile da rappresentare non e' negativa!!! +my.labels=c("1%","25%","50%","75%",paste(upper.limit,"%",sep="")) # i numeri che vuoi vedere visualizzati nella legenda, anche se nella mappa si visualizzano in realta' le loro radici +my.cuts.in.leyend<-c(1,25,50,75,upper.limit) #^0.5 # posizione tacche della legenda dove si visualizzano i my.labels +my.color=rev(jet.colors8(1000)) +my.names<-nome.mese +for(wt in 1:nWT){ # cicla su tutti i WTs + my.main<-paste(WTs.type.long[wt],"Prec.Efficiency") # inserisci qui il titolo del grafico + my.var=prec.wt.eff# inserisci qui la variabile da mappare (dev'essere un array del tipo [lat,lon,mese], se e' diverso modica la riga sopra l'spplot) + my.grid<-grid.vuoto + for(mese in 1:12)my.grid@data[[nome.mese.short[mese]]]<-as.vector(t(rescale.max(my.var[,,wt,mese],upper.limit))) #^0.5 # carica i dati nel grid (la radice serve per disegnare la colorkey con colori meglio distribuiti) + my.map<-spplot(my.grid,nome.mese.short,main=my.main,par.settings = standard.theme(color = FALSE),layout=c(3,4),as.table=T, + at=my.cuts,col.regions=my.color,sp.layout=my.layout,cex=.5,names.attr=my.names,colorkey=list(space="bottom",width=0.5,height=1,labels=list(at=my.cuts.in.leyend,labels=my.labels))) + # disegna la stessa mappa ma senza sbavature: + my.grid.high<-grid.high.vuoto;my.var.high<-array(NA,c(dim(my.var)[1]*multiply,dim(my.var)[2]*multiply,dim(my.var)[3],dim(my.var)[4]));for(i in 1:dim(my.var.high)[1]){for(j in 1:dim(my.var.high)[2])my.var.high[i,j,wt,]<-my.var[ceiling(i/multiply),ceiling(j/multiply),wt,]} + for(mese in 1:12)my.grid.high@data[[nome.mese.short[mese]]]<-as.vector(t(rescale.max(my.var.high[,,wt,mese],upper.limit))) #^0.5 # carica i dati nel grid (la radice serve per disegnare la colorkey con colori meglio distribuiti) + for(mese in 1:12)my.grid.high@data[[nome.mese.short[mese]]][sea.high.pixels]<-NA # elimina i pixel del grid fuori dalla penisola iberica e dalle isle + my.map<-spplot(my.grid.high,nome.mese.short,main=my.main,par.settings = standard.theme(color = FALSE),layout=c(3,4),as.table=T, + at=my.cuts,col.regions=my.color,sp.layout=my.layout,cex=.5,names.attr=my.names,colorkey=list(space="bottom",width=0.5,height=1,labels=list(at=my.cuts.in.leyend,labels=my.labels))) + jpeg(paste("C:/nicola/precipitaciones/weather_types/Ip02/19) Ip02_WTs_prec_efficiency/",WTs.type[wt],".jpg",sep=""),width=590,height=616,quality=100);print(my.map);dev.off() +} +prec.wt.eff.medio<-array(NA,c(nWT,12),dimnames=list(WTs.type.long,nome.mese)) +for(mese in 1:12){for(wt in 1:nWT)prec.wt.eff.medio[wt,mese]<-mean(prec.wt.eff[,,wt,mese],na.rm=T)} +prec.wt.eff.medio[which(is.nan(prec.wt.eff.medio))]<-0 # mette a zero i mesi senza giorni di pioggia per quel WT in tutto il periodo +prec.wt.eff.medio.tab<-round(cbind(prec.wt.eff.medio,rowMeans(prec.wt.eff.medio)),1) # (tabla) per ogni WT e mese calcola la somma su tutti i pixel della efficienza dei prec: + + +# grafico di tutti gli indici solo per i mesi di gennaio, aprile, luglio e ottobre (1,4,7, e 10): +my.period<-c(1,4,7,10) # seleziona i mesi da mappare + +my.text<-list("sp.text", c(3,36.8), "400 km",cex=.4,which=4) # in futuro riadattalo anche per le mappe in cord.geografiche +my.scale.bar<-list("SpatialPolygonsRescale", layout.scale.bar(0.03), offset = c(-0.3,36.2),scale = 4, which=4,fill=c("transparent","black")) +my.north.arrow<-list("SpatialPolygonsRescale", offset = c(4,36.2), scale = 1,layout.north.arrow(type=2),size=1,which=4) # per disegnare l'altra freccia del nord meno orribile, quella senza la N dentro la freccia +my.north<-list("sp.text", c(4.15,37.6), "N",cex=.5,which=4) # which serve per aggiungere il testo solo nel pannello numero 6 !!! +my.layout<-list(my.polygon) +my.layout2<-list(my.polygon,my.text,my.scale.bar,my.north.arrow,my.north) +for(wt in 1:nWT){ # cicla su tutti i WTs + # visualizza per ogni WT la sua climatologia mensile (senza composizione): + upper.limit=40 # limite massimo sopra il quale tutti i dati vengono ridotti a questo numero + my.cuts<- seq(1,upper.limit,1) #^0.5 # fa la radice quadrata per diminuiere le differenze tra i valori piu' alti e quelli piu' bassi in modo che non ci sia un colore che domina la legenda; occhio che funziona solo se la variabile da rappresentare non e' negativa!!! + my.labels=c("1 mm","10 mm","20 mm","30 mm",paste(upper.limit,"mm")) # i numeri che vuoi vedere visualizzati nella legenda, anche se nella mappa si visualizzano in realta' le loro radici + my.cuts.in.leyend<-c(1,10,20,30,upper.limit) #^0.5 # posizione tacche della legenda dove si visualizzano i my.labels + my.color=rev(jet.colors15(1000)) + my.names<-nome.mese[my.period] + prec.wt.clim.mappa<-list() + my.main<-list(label="Mean Precipitation",cex=1,vjust=1) # vjust sposta il titolo un piu'in basso, vedi ?xy.plot sotto la descrizione di main + my.var<-prec.wt.clim # inserisci qui la variabile da mappare (dev'essere un array del tipo [lat,lon,wt,mese], se e' diverso modica la riga sopra l'spplot) + # disegna la stessa mappa ma senza sbavature: + my.grid.high<-grid.high.vuoto;my.var.high<-array(NA,c(dim(my.var)[1]*multiply,dim(my.var)[2]*multiply,dim(my.var)[3],dim(my.var)[4]));for(i in 1:dim(my.var.high)[1]){for(j in 1:dim(my.var.high)[2])my.var.high[i,j,wt,]<-my.var[ceiling(i/multiply),ceiling(j/multiply),wt,]} + for(mese in 1:12)my.grid.high@data[[nome.mese.short[mese]]]<-as.vector(t(rescale.max(my.var.high[,,wt,mese],upper.limit))) #^0.5 # carica i dati nel grid (la radice serve per disegnare la colorkey con colori meglio distribuiti) + for(mese in 1:12)my.grid.high@data[[nome.mese.short[mese]]][sea.high.pixels]<-NA # elimina i pixel del grid fuori dalla penisola iberica e dalle isle + my.map1<-spplot(my.grid.high,nome.mese.short[my.period],main=my.main,par.settings = standard.theme(color = FALSE),layout=c(1,4),as.table=T, + at=my.cuts,col.regions=my.color,sp.layout=my.layout,names.attr=my.names,par.strip.text = list(cex = .9),colorkey=list(space="bottom",width=0.5,height=.8,labels=list(at=my.cuts.in.leyend,labels=my.labels,cex=.6))) + + # visualizza per ogni WT il suo contributo relativo mensile (senza composizione): + upper.limit=40 # limite massimo sopra il quale tutti i dati vengono ridotti a questo numero + my.cuts<- seq(1,upper.limit,1) #^0.5 # fa la radice quadrata per diminuiere le differenze tra i valori piu' alti e quelli piu' bassi in modo che non ci sia un colore che domina la legenda; occhio che funziona solo se la variabile da rappresentare non e' negativa!!! + my.labels=c("1%","10%","20%","30%",paste(upper.limit,"%",sep="")) # i numeri che vuoi vedere visualizzati nella legenda, anche se nella mappa si visualizzano in realta' le loro radici + my.cuts.in.leyend<-c(1,10,20,30,upper.limit) #^0.5 # posizione tacche della legenda dove si visualizzano i my.labels + my.color=rev(jet.colors7(1000)) + my.names<-nome.mese[my.period] + my.main<-list(label="% Contribution",cex=1,vjust=1) # vjust sposta il titolo un piu'in basso, vedi ?xy.plot sotto la descrizione di main + my.var=prec.wt.sum.norm # inserisci qui la variabile da mappare (dev'essere un array del tipo [lat,lon,wt,mese], se e' diverso modica la riga sopra l'spplot) + # disegna la stessa mappa ma senza sbavature: + my.grid.high<-grid.high.vuoto;my.var.high<-array(NA,c(dim(my.var)[1]*multiply,dim(my.var)[2]*multiply,dim(my.var)[3],dim(my.var)[4]));for(i in 1:dim(my.var.high)[1]){for(j in 1:dim(my.var.high)[2])my.var.high[i,j,wt,]<-my.var[ceiling(i/multiply),ceiling(j/multiply),wt,]} + for(mese in 1:12)my.grid.high@data[[nome.mese.short[mese]]]<-as.vector(t(rescale.max(my.var.high[,,wt,mese],upper.limit))) #^0.5 # carica i dati nel grid (la radice serve per disegnare la colorkey con colori meglio distribuiti) + for(mese in 1:12)my.grid.high@data[[nome.mese.short[mese]]][sea.high.pixels]<-NA # elimina i pixel del grid fuori dalla penisola iberica e dalle isle + my.map2<-spplot(my.grid.high,nome.mese.short[my.period],main=my.main,par.settings = standard.theme(color = FALSE),layout=c(1,4),as.table=T, + at=my.cuts,col.regions=my.color,sp.layout=my.layout,names.attr=my.names,par.strip.text = list(cex = .9),colorkey=list(space="bottom",width=0.5,height=.8,labels=list(at=my.cuts.in.leyend,labels=my.labels,cex=.6))) + + # visualizza per ogni WT la sua intensita'mensile (senza composizione): + upper.limit=20 # limite massimo sopra il quale tutti i dati vengono ridotti a questo numero + my.cuts<- seq(1,upper.limit,1) #^0.5 # fa la radice quadrata per diminuire le differenze tra i valori piu' alti e quelli piu' bassi in modo che non ci sia un colore che domina la legenda; occhio che funziona solo se la variabile da rappresentare non e' negativa!!! + my.labels=c("1 mm/d","","10 mm/d","",paste(upper.limit," mm/d",sep="")) # i numeri che vuoi vedere visualizzati nella legenda, anche se nella mappa si visualizzano in realta' le loro radici + my.cuts.in.leyend<-c(1,5,10,15,upper.limit) #^0.5 # posizione tacche della legenda dove si visualizzano i my.labels + my.color=rev(jet.colors15(1000)) + my.names<-nome.mese[my.period] + my.main<-list(label="Intensity",cex=1,vjust=1) # vjust sposta il titolo un piu'in basso, vedi ?xy.plot sotto la descrizione di main + my.var=prec.wt.int # inserisci qui la variabile da mappare (dev'essere un array del tipo [lat,lon,wt,mese], se e' diverso modica la riga sopra l'spplot) + my.grid<-grid.vuoto + for(mese in 1:12)my.grid@data[[nome.mese.short[mese]]]<-as.vector(t(rescale.max(my.var[,,wt,mese],upper.limit))) #^0.5 # carica i dati nel grid (la radice serve per disegnare la colorkey con colori meglio distribuiti) + my.map<-spplot(my.grid,nome.mese.short[my.period],main=my.main,par.settings = standard.theme(color = FALSE),layout=c(1,4),as.table=T, + at=my.cuts,col.regions=my.color,sp.layout=my.layout,cex=.5,names.attr=my.names,colorkey=list(space="bottom",width=0.5,height=.8,labels=list(at=my.cuts.in.leyend,labels=my.labels,cex=.6))) + # disegna la stessa mappa ma senza sbavature: + my.grid.high<-grid.high.vuoto;my.var.high<-array(NA,c(dim(my.var)[1]*multiply,dim(my.var)[2]*multiply,dim(my.var)[3],dim(my.var)[4]));for(i in 1:dim(my.var.high)[1]){for(j in 1:dim(my.var.high)[2])my.var.high[i,j,wt,]<-my.var[ceiling(i/multiply),ceiling(j/multiply),wt,]} + for(mese in 1:12)my.grid.high@data[[nome.mese.short[mese]]]<-as.vector(t(rescale.max(my.var.high[,,wt,mese],upper.limit))) #^0.5 # carica i dati nel grid (la radice serve per disegnare la colorkey con colori meglio distribuiti) + for(mese in 1:12)my.grid.high@data[[nome.mese.short[mese]]][sea.high.pixels]<-NA # elimina i pixel del grid fuori dalla penisola iberica e dalle isle + my.map3<-spplot(my.grid.high,nome.mese.short[my.period],main=my.main,par.settings = standard.theme(color = FALSE),layout=c(1,4),as.table=T, + at=my.cuts,col.regions=my.color,sp.layout=my.layout,cex=.5,names.attr=my.names,par.strip.text = list(cex = .9),colorkey=list(space="bottom",width=0.5,height=.8,labels=list(at=my.cuts.in.leyend,labels=my.labels,cex=.6))) + + # visualizza per ogni WT la sua efficienza mensile (senza composizione): + upper.limit=100 # limite massimo sopra il quale tutti i dati vengono ridotti a questo numero + my.cuts<- seq(1,upper.limit,1) #^0.5 # fa la radice quadrata per diminuire le differenze tra i valori piu' alti e quelli piu' bassi in modo che non ci sia un colore che domina la legenda; occhio che funziona solo se la variabile da rappresentare non e' negativa!!! + my.labels=c("1%","25%","50%","75%",paste(upper.limit,"%",sep="")) # i numeri che vuoi vedere visualizzati nella legenda, anche se nella mappa si visualizzano in realta' le loro radici + my.cuts.in.leyend<-c(1,25,50,75,upper.limit) #^0.5 # posizione tacche della legenda dove si visualizzano i my.labels + my.color=rev(jet.colors7(1000)) + my.names<-nome.mese[my.period] + my.main<-list(label="Efficiency",cex=1,vjust=1) # vjust sposta il titolo un piu'in basso, vedi ?xy.plot sotto la descrizione di main + my.var=prec.wt.eff# inserisci qui la variabile da mappare (dev'essere un array del tipo [lat,lon,mese], se e' diverso modica la riga sopra l'spplot) + # disegna la stessa mappa ma senza sbavature: + my.grid.high<-grid.high.vuoto;my.var.high<-array(NA,c(dim(my.var)[1]*multiply,dim(my.var)[2]*multiply,dim(my.var)[3],dim(my.var)[4]));for(i in 1:dim(my.var.high)[1]){for(j in 1:dim(my.var.high)[2])my.var.high[i,j,wt,]<-my.var[ceiling(i/multiply),ceiling(j/multiply),wt,]} + for(mese in 1:12)my.grid.high@data[[nome.mese.short[mese]]]<-as.vector(t(rescale.max(my.var.high[,,wt,mese],upper.limit))) #^0.5 # carica i dati nel grid (la radice serve per disegnare la colorkey con colori meglio distribuiti) + for(mese in 1:12)my.grid.high@data[[nome.mese.short[mese]]][sea.high.pixels]<-NA # elimina i pixel del grid fuori dalla penisola iberica e dalle isle + my.map4<-spplot(my.grid.high,nome.mese.short[my.period],main=my.main,par.settings = standard.theme(color = FALSE),layout=c(1,4),as.table=T, + at=my.cuts,col.regions=my.color,sp.layout=my.layout2,cex=.5,names.attr=my.names,par.strip.text = list(cex = .9),colorkey=list(space="bottom",width=0.5,height=.8,labels=list(at=my.cuts.in.leyend,labels=my.labels,cex=.6))) + # scales=list(draw = TRUE,cex=.5), # per aggiungere le coordinate geografiche sui bordi + # strip=FALSE per non disegnare i titoli dei grafici!!!!! strip.left=TRUE per aggiungerla anche a sinistra!!!!! + + #jpeg(paste("C:/nicola/precipitaciones/weather_types/Ip02/21) Ip02_WTs_all_indices/",WTs.type[wt],".jpg",sep=""),quality=100) + png(paste("C:/nicola/precipitaciones/weather_types/Ip02/22) Ip02_WTs_all_indices_as_tiff_high_res/",WTs.type[wt],".tif",sep=""),width=460,height=460) + textplot(".",cex=1.4,col="white",srt=90,mar=c(0,0,20,23)) # quando si ruota purtroppo si tronca l'ultima parte del testo + #print(textplot(WTs.type.very.long[wt],cex=1.4,halign="center",valign="top",mar=c(30,0,0,0)),more=T) # il 30 lo sposta verso l'alto per non sovrapporlo ai nomi degli indici + print(my.map1, position= c(-0.01,0,0.28,1),more=T) # (xmin, ymin, xmax, ymax) + print(my.map2, position= c(0.23,0,0.52,1),more=T) # (xmin, ymin, xmax, ymax) + title(WTs.type.very.long2[wt],cex=1.4,srt=0,outer=TRUE,line=-1) + print(my.map3, position= c(0.471,0,0.76,1),more=T) # (xmin, ymin, xmax, ymax) # pon 0.47 si no quieres crear lineas verticales en la cartografia automatica; pon 0.471 si quieres copiar las imagenes en Paint.NET para subir la resolucion a 660 dpi. + print(my.map4, position= c(0.711,0,1.00,1),more=T) # (xmin, ymin, xmax, ymax) + dev.off() +} + +# grafico riassuntivo con label intelligenti: +# library(plotrix) +# library(squash) +# par(mfrow=c(4,3));par(oma=c(0,0,0,0)); par(mar=c(2.3,2.5,1,0)) # oma controlla il bordo esterno a tutti i grafici, mar i bordi interni tra grafici +# for(mese in 1:12){ + # x<-1+prec.wt.int.medio[,mese] + # y<-1+prec.wt.clim.medio[mese,] + # z<-prec.wt.eff.medio[,mese] + # my.max<-ceiling(max(prec.wt.int.medio[,mese],prec.wt.clim.medio[mese,])) + # my.palette<-rev(jet.colors12(100));my.colors<-my.palette[z] # divide la paletta in 100 colori ed estrae a quali colori corrispondono i valori di efficienza, che essendo numeri da 1 a 100 si possono confrontare con i 100 colori della paletta + # plot(x,y,log="xy",type="n",axes=F,xlim=1+c(0,my.max),ylim=1+c(0,my.max),xlab="",ylab="",mgp=c(2,0,0)) # disegna il grafico vuoto + # title(xlab="Prec.Intensity (mm/day)",mgp=c(1.3,0,0)) # la prima opzione + # title(ylab="Prec.Sum (mm)",,mgp=c(1.3,0,0)) + # axis(1, at=1+0:my.max,labels=0:my.max,cex.axis=.8,mgp=c(0,0.5,0)) + # axis(2, at=1+0:my.max,labels=0:my.max,cex.axis=.8,mgp=c(0,0.5,0)) + # segments(1,1,1+my.max,1+my.max,col="gray") # bisettrice + # segments(1+0:my.max,1,1+0:my.max,1+my.max,col="gray") # linee verticali maglia + # segments(1,1+0:my.max,1+my.max,1+0:my.max,col="gray") # linee orizzontali maglia + # points(x,y,cex=1.2,pch=21,bg=my.colors) # cerchietti dei WTs + # #text((z.labels)^0.5-0.1,(z.labels)^0.5+0.1,labels=z.labels,srt=53) + # text(my.max*6/10,my.max*6/10,labels="Frequency = 3.3% or 1 dd/mth",srt=42,cex=.9) + # thigmophobe.labels(x,y,WTs.type,cex=.7,col=WTs.color) + # title(nome.mese.short[mese],line=-0.1) +# } +# map<-list(breaks=0:100,colors=my.palette) +# hkey(map,title="Prec.Efficiency (%)",skip=25) #,stretch=.3) + +# grafico Prec.tot vs Prec.Intensity y Frequency: +scala.log=FALSE +x<-prec.wt.int.medio +y<-prec.wt.sum.norm.medio +my.xmax<-10 +my.ymax<-34 #ceiling(max(x,y)) +my.wts=c("W","C","NE","SW","NW") #,"N","E") +my.wts.col=c("blue","red","darkgreen","cyan3","darkgoldenrod3") #,"gray","purple") +my.wts=c("W","C","NE","SW","NW","N","E") +my.wts.col=c("blue","red","darkgreen","cyan3","darkgoldenrod3","gray","purple") +l=0;if(scala.log==TRUE)l=1 +x<-l+x;y<-l+y + +tiff(paste("C:/nicola/precipitaciones/weather_types/Ip02/Figure8.tif",sep=""),res=50) +if(scala.log==TRUE)plot(x,y,log="xy",type="n",axes=F,xlim=1+c(0,my.xmax),ylim=1+c(0,my.ymax),xlab="",ylab="",mgp=c(2,0,0)) # disegna il grafico vuoto +if(scala.log==FALSE)plot(x,y,type="n",axes=F,xlim=c(0,my.xmax),ylim=c(0,my.ymax),xlab="",ylab="",mgp=c(2,0,0)) # disegna il grafico vuoto +#title("Prec.Tot vs. Prec.Intensity",line=-0.1) +title(xlab="Precipitation Intensity (mm/day)",cex.sub=20,mgp=c(2,0,0)) +title(ylab="Percentage Contribution (%)",,mgp=c(2,0,0)) +axis(1, at=l+0:my.xmax,labels=0:my.xmax,cex.axis=.8,mgp=c(0,0.5,0)) +axis(2, at=l+0:my.ymax,labels=0:my.ymax,cex.axis=.8,mgp=c(0,0.5,0)) +#segments(l+0:my.xmax,l,l+0:my.xmax,l+my.ymax,col="gray") # linee verticali maglia +#segments(l,l+0:my.ymax,l+my.xmax,l+0:my.ymax,col="gray") # linee orizzontali maglia +# segments(l,l,l+my.xmax,l+my.xmax,col="gray") # bisettrice +# if(scala.log==TRUE){text(my.xmax*7/10,my.xmax*7/10,labels="Freq.= 1 dd/mth (3.3%)",srt=38,cex=.9)}else{text(8,8.5,labels="Freq.= 1 dd/mth (3.3%)",srt=14,cex=.9)} +# fr<-0.5;passo=0.2;for(i in seq(0,(my.xmax-l-passo),passo))segments(l+i,l+i*fr,l+i+passo,l+(i+passo)*fr,col="gray") # segmento per Freq=0.5 days/month +# if(scala.log==TRUE){text(l+8,l+4,labels="0.5 dd/m",srt=38,cex=.9)}else{text(9.7,5.5,labels="0.5 dd/m",cex=.9)} +# fr<-2;passo=0.2;for(i in seq(0,my.xmax-0.2,passo))segments(l+i,l+i*fr,l+i+passo,l+(i+passo)*fr,col="gray") +# if(scala.log==TRUE){text(l+6,l+12,labels="2 dd/m",srt=38,cex=.9)}else{text(10,20,labels="2 dd/m",cex=.9)} +# fr<-4;passo=0.2;for(i in seq(0,8.2,passo))segments(l+i,l+i*fr,l+i+passo,l+(i+passo)*fr,col="gray") +# if(scala.log==TRUE){text(l+4,l+16,labels="4 dd/m",srt=38,cex=.9)}else{text(8.5,34,labels="4 dd/m",cex=.9)} +# fr<-8;passo=0.2;for(i in seq(0,4,passo))segments(l+i,l+i*fr,l+i+passo,l+(i+passo)*fr,col="gray") +# if(scala.log==TRUE){text(l+4,l+16,labels="4 dd/m",srt=38,cex=.9)}else{text(4.5,34,labels="8 dd/m",cex=.9)} +points(x,y,cex=.3,pch=19) # puntini dei WTs +my.wts.num<-match(my.wts,WTs.type);n.wts<-length(my.wts) +for(wt in 1:n.wts){ + x2<-x[my.wts.num[wt],];y2<-y[my.wts.num[wt],] + points(x2[c(12,1,2)],y2[c(12,1,2)],cex=.7,pch=19,col=my.wts.col[wt]) # winter + points(x2[c(3,4,5)],y2[c(3,4,5)],cex=.7,pch=17,col=my.wts.col[wt]) # spring + points(x2[c(6,7,8)],y2[c(6,7,8)],cex=.7,pch=15,col=my.wts.col[wt]) # summer + points(x2[c(9,10,11)],y2[c(9,10,11)],cex=.5,pch=25,col=my.wts.col[wt],bg=my.wts.col[wt]) # autumn +} +#legend("topright",legend=my.wts,col=my.wts.col,pch=20,horiz=T) +xmin=6;ymin=31;passo=1 +text(xmin+2.1,ymin+3.1*passo,paste(my.wts,collapse=" ")) +text(xmin-0.6,ymin+1.7*passo,"Winter") +text(xmin-0.6,ymin+0.7*passo,"Spring") +text(xmin-0.6,ymin-0.3*passo,"Summer") +text(xmin-0.6,ymin-1.3*passo,"Autumn") +legend(xmin,ymin+3*passo,legend=rep("",n.wts),col=my.wts.col,pch=19,horiz=T,bty="n") +legend(xmin,ymin+2*passo,legend=rep("",n.wts),col=my.wts.col,pch=17,horiz=T,bty="n") +legend(xmin,ymin+passo,legend=rep("",n.wts),col=my.wts.col,pch=15,horiz=T,bty="n") +legend(xmin,ymin,legend=rep("",n.wts),col=my.wts.col,pch=25,pt.bg=my.wts.col,horiz=T,bty="n") +dev.off() + +# fit efficienza vs intensita': +x<-as.vector(prec.wt.int.medio) # as.vector() e'necessario solo per la regressione +y<-as.vector(prec.wt.eff.medio) +plot(x,y,cex=.3,pch=19,xlab="Prec.Intensity (mm/day)",ylab="Prec.Efficiency (%)") #,xaxs="i", yaxs="i") # xaxs="i" fa iniziare gli assi in 0,0 +#abline(lm(y~0+x),col="red",lwd=1.5) # regressione con retta +my.lm<-lm(y~x+I(x^2)) # per fare il fit polinomiale +my.lm<-lm(y~x) # per fare il fit lineare +my.x<-seq(0,9.2,0.1) +my.poly=my.lm[[1]][1]+my.lm[[1]][2]*my.x+my.lm[[1]][3]*(my.x^2) +points(my.x,my.poly, type="l", col="blue", lwd=1,cex=.5) +#my.poly<-function(my.x) my.lm[[1]][1]+my.lm[[1]][2]*my.x+my.lm[[1]][3]*(my.x^2) +#curve(my.poly,0,10) # per disegnare una funzione + + +# grafico intensita' vs efficienza: +my.wts=c("W","C","NE","SW","NW","N","E") +my.wts.col=c("blue","red","darkgreen","purple","darkgoldenrod3","cyan3","gray") +my.wts=c("C.W","C.SW","C.NW","C","W") +my.wts.col=c("blue","red","darkgreen","purple","darkgoldenrod3") + +x<-prec.wt.eff.medio +y<-prec.wt.int.medio +plot(x,y,type="n",ylab="Prec.Intensity (mm/day)",xlab="Prec.Efficiency (%)",xaxs="i", yaxs="i",ylim=c(0,9.5),xlim=c(0,75),yaxp=c(0,9,9)) # xaxs="i" fa iniziare gli assi in 0,0 +#title("Intensity vs. Efficiency") +points(x,y,cex=.3,pch=19) +my.wts.num<-match(my.wts,WTs.type);n.wts<-length(my.wts) +for(wt in 1:n.wts){ + x2<-x[my.wts.num[wt],];y2<-y[my.wts.num[wt],] + points(x2[c(12,1,2)],y2[c(12,1,2)],cex=.7,pch=19,col=my.wts.col[wt]) # winter + points(x2[c(3,4,5)],y2[c(3,4,5)],cex=.7,pch=17,col=my.wts.col[wt]) # spring + points(x2[c(6,7,8)],y2[c(6,7,8)],cex=.7,pch=15,col=my.wts.col[wt]) # summer + points(x2[c(9,10,11)],y2[c(9,10,11)],cex=.5,pch=25,col=my.wts.col[wt],bg=my.wts.col[wt]) # autumn +} +#legend("bottomright",legend=my.wts,col=my.wts.col,pch=20) +xmin=15;ymin=8;passo=0.3 +text(xmin+9,ymin+3.1*passo,"C.W C.SW C.NW C W",cex=.7) +text(xmin-6,ymin+1.7*passo,"Winter") +text(xmin-6,ymin+0.7*passo,"Spring") +text(xmin-6,ymin-0.3*passo,"Summer") +text(xmin-6,ymin-1.3*passo,"Autumn") +legend(xmin,ymin+3*passo,legend=rep("",5),col=my.wts.col,pch=19,horiz=T,bty="n") +legend(xmin,ymin+2*passo,legend=rep("",5),col=my.wts.col,pch=17,horiz=T,bty="n") +legend(xmin,ymin+passo,legend=rep("",5),col=my.wts.col,pch=15,horiz=T,bty="n") +legend(xmin,ymin,legend=rep("",5),col=my.wts.col,pch=25,pt.bg=my.wts.col,horiz=T,bty="n") + +# fit Prec.Tot vs Area: +my.wts=c("W","C","NE","SW","NW") #,"N","E") +my.wts.col=c("blue","red","darkgreen","cyan3","darkgoldenrod3") #,"cyan3","gray") +x<-area.wt.clim.threshold +y<-prec.wt.sum.norm.medio +plot(x,y,type="n",ylab="Percentage Contribution (%)",xlab="Area of influence (%)",xaxs="i", yaxs="i",ylim=c(-0.05,35),xlim=c(-0.01,1.01),axes=F) # xaxs="i" fa iniziare gli assi in 0,0 +axis(1, at=seq(0,1,0.1),labels=seq(0,100,10),cex.axis=.8,mgp=c(0,0.5,0)) +axis(2, at=seq(0,35,5),labels=seq(0,35,5),cex.axis=.8,mgp=c(0,0.5,0)) +#title("% Contribution vs % Area > 1mm.") +points(x,y,cex=.3,pch=19) +my.wts.num<-match(my.wts,WTs.type);n.wts<-length(my.wts) +for(wt in 1:n.wts){ + x2<-x[my.wts.num[wt],];y2<-y[my.wts.num[wt],] + points(x2[c(12,1,2)],y2[c(12,1,2)],cex=.7,pch=19,col=my.wts.col[wt]) # winter + points(x2[c(3,4,5)],y2[c(3,4,5)],cex=.7,pch=17,col=my.wts.col[wt]) # spring + points(x2[c(6,7,8)],y2[c(6,7,8)],cex=.7,pch=15,col=my.wts.col[wt]) # summer + points(x2[c(9,10,11)],y2[c(9,10,11)],cex=.5,pch=25,col=my.wts.col[wt],bg=my.wts.col[wt]) # autumn +} +#legend("topleft",legend=my.wts,col=my.wts.col,pch=20) + +xmin=0.2;ymin=30;passo=1 +text(xmin+0.15,ymin+3.1*passo,paste(my.wts,collapse=" ")) +text(xmin-0.06,ymin+1.7*passo,"Winter") +text(xmin-0.06,ymin+0.7*passo,"Spring") +text(xmin-0.06,ymin-0.3*passo,"Summer") +text(xmin-0.06,ymin-1.3*passo,"Autumn") +legend(xmin,ymin+3*passo,legend=rep("",5),col=my.wts.col,pch=19,horiz=T,bty="n") +legend(xmin,ymin+2*passo,legend=rep("",5),col=my.wts.col,pch=17,horiz=T,bty="n") +legend(xmin,ymin+passo,legend=rep("",5),col=my.wts.col,pch=15,horiz=T,bty="n") +legend(xmin,ymin,legend=rep("",5),col=my.wts.col,pch=25,pt.bg=my.wts.col,horiz=T,bty="n") + + + +save.image("C:/nicola/precipitaciones/weather_types/Ip02/Analisi_WTs_Ip02_v16_percentiles.RData") +#draw.ellipse(rep(0,my.max),rep(0,my.max),1:my.max,1:my.max,lty=3) #,border="gray") +#draw.circle(0,0,1,border=NULL,col=NA,lty=1,lwd=1) #symbols(0,0,circle=1,add=T) non vanno bene perche' il cerchio non e' proporzionale all'asse delle y +#text(wt.n.days.mean[,mese],prec.wt.int.medio[,mese],WTs.type,pos=4) # disegna tutti i label a destra dei punti, ma a volte si sovrappongono +#pointLabel(wt.n.days.mean[,mese],prec.wt.int.medio[,mese],WTs.type,cex=.8) # due modi alternativi di automatizzare i label ma non vanno molto bene +#install.packages("directlabels");library(directlabels);direct.label(xyplot(prec.wt.int.medio[,mese]~wt.n.days.mean[,mese],groups=WTs.type)) +#plot(prec.wt.sum.norm.medio,area.wt.clim.threshold,cex=.3,pch=19,xlab="Prec.Sum (mm)",ylab="% Area > 1 mm") + + +library(plotrix) + +neutral.city=199;n.neutral.city=18 +tomb=188;n.tomb=4 +temple=166;n.temple=4 +dungeon=155;n.dungeon=10 +sorcery=103;n.sorcery=5 +mtower=7;n.mtower=7 +plain=111;n.plain=120-n.mtower-n.sorcery-n.dungeon-n.temple-n.tomb-n.neutral.city + +chaos=904;n.chaos=5 +mithril=955;n.mithril=4 +silver=911;n.silver=8 +gold=922;n.gold=6 +mountain=999;n.mountain=55-n.mithril-n.chaos-n.silver-n.gold + +tomb2=288;n.tomb2=4 +temple2=266;n.temple2=4 +lair=255;n.lair=30 +nature=202;n.nature=5 +forest=222;n.forest=80-n.lair-n.nature-n.temple2-n.tomb2 + +sea.river=1;n.sea.river=25 +sea.ruin=5;n.sea.ruin=16 +sea.city=9;n.sea.city=10 +sea.sorcery=3;n.sea.sorcery=3 +sea.nature=2;n.sea.nature=3 +sea.chaos=4;n.sea.chaos=3 +sea=0;n.sea=150-n.sea.chaos-n.sea.nature-n.sea.sorcery-n.sea.city-n.sea.ruin-sea.river + +lun=15 #27 o 21 +alt=15 + +n.cas<-alt*lun +#land<-matrix(sample(c(999,0),n.cas,replace=FALSE,prob=c(0.6,0.4)),alt,lun) +land<-matrix(sample(c(rep(plain,n.plain),rep(gold,n.gold),rep(sea.ruin,n.sea.ruin),rep(sorcery,n.sorcery),rep(sea.sorcery,n.sea.sorcery),rep(sea.nature,n.sea.nature),rep(mountain,n.mountain),rep(chaos,n.chaos),rep(mithril,n.mithril),rep(forest,n.forest), + rep(lair,n.lair),rep(silver,n.silver),rep(neutral.city,n.neutral.city),rep(nature,n.nature),rep(tomb2,n.tomb2),rep(tomb,n.tomb),rep(temple2,n.temple2), + rep(temple,n.temple),rep(sea.river,n.sea.river),rep(sea.city,n.sea.city),rep(dungeon,n.dungeon),rep(mtower,n.mtower),rep(sea.chaos,n.sea.chaos),rep(sea,n.sea)),n.cas,replace=FALSE),alt,lun) # sample sin replacement (simula la situazione di pescare 375 tessere dal contenitore con 400 tessere +land + +color2D.matplot(land) + + + + + +plain=111;n.plain=93 +desert=333;n.desert=45 +forest=222;n.forest=60 +mountain=7777;n.mountain=54 +sea=0;n.sea=126 + + +lun=21 #15,18 o 21 +alt=15 + +n.cas<-alt*lun +land<-matrix(sample(c(rep(plain,n.plain),rep(mountain,n.mountain),rep(forest,n.forest),rep(desert,n.desert),rep(sea,n.sea)),n.cas,replace=FALSE),alt,lun) # sample sin replacement (simula la situazione di pescare 375 tessere dal contenitore con 400 tessere +land + diff --git a/lsf/diagnostics.R b/lsf/diagnostics.R new file mode 100644 index 0000000000000000000000000000000000000000..29623ee327d478777420c47b42343aad094fec0b --- /dev/null +++ b/lsf/diagnostics.R @@ -0,0 +1,53 @@ +######################################################################################### +# Sub-seasonal Skill Scores # +######################################################################################### + +library(s2dverification) +outdir <- "/gpfs/projects/bsc32/bsc32842/RESILIENCE" + +gpfs.path <- "/gpfs/projects/bsc32/bsc32842/RESILIENCE/2014010200/prlr_200705.nc" +moore.path <- "/scratch/Earth/ncortesi/prlr_200705.nc" +chunk <- as.integer(commandArgs(TRUE)[1]) +#chunk=1 + +# netcdf-3 (10 MB): +#lat <- read.table("/gpfs/projects/bsc32/bsc32842/RESILIENCE/latitudes.txt")[,1] +#lat=rev(lat) # to go in increasing order of latitudes instead of decreasing! +#time <- system.time(data.hindcast <- Load(var='sfcWind', exp = list(list(path="/gpfs/projects/bsc32/bsc32842/RESILIENCE/2014010200/sfcWind_2013010200.nc")), obs=NULL,sdates='20130102', nleadtime=1, output='lonlat', latmin=lat[chunk], latmax=lat[chunk+2],nprocs=1)) + +### netcdf-3 (1GB): +# [or netcdf-4 if change name in prlr_200706.nc] +lat=seq(-90,90,0.75) +# load 3 chunks (the minimum number possible) from gpfs:: +#time <- system.time(data.hindcast <- Load(var='tp', exp = list(list(path="/gpfs/projects/bsc32/bsc32842/RESILIENCE/2014010200/prlr_200705.nc")), obs=NULL,sdates='20070501', nleadtime=157, output='lonlat', latmin=lat[chunk], latmax=lat[chunk+2],nprocs=1)) +# load all chunks in gpfs: +#time <- system.time(data.hindcast <- Load(var='tp', exp = list(list(path=gpfs.path)), obs=NULL,sdates='20070501', nleadtime=157, output='lonlat',nprocs=1)) +# load all chunks in moore: +time <- system.time(data.hindcast <- Load(var='tp', exp = list(list(path=moore.path)), obs=NULL,sdates='20070501', nleadtime=157, output='lonlat',nprocs=1)) + +# load 3 chunks from esnas: +#time <- system.time(data.hindcast <- Load(var='tp', exp = list(list(path="/esnas/exp/ecmwf/system4_m1/daily_mean/prlr_f6h/prlr_200706.nc")), obs=NULL,sdates='20070601', nleadtime=157, output='lonlat', latmin=lat[chunk], latmax=lat[chunk+2], nprocs=1)) +# load all chunks from esnas: +#time <- system.time(data.hindcast <- Load(var='tp', exp = list(list(path="/esnas/exp/ecmwf/system4_m1/daily_mean/prlr_f6h/prlr_200705.nc")), obs=NULL,sdates='20070501', nleadtime=157, output='lonlat', nprocs=1)) + +# netcdf-3 (10 GB): +#time <- system.time(data.hindcast <- Load(var='psl', exp = list(list(path="/gpfs/projects/bsc32/bsc32842/RESILIENCE/2014010200/psl_19900501.nc")), obs=NULL,sdates='19900501', nleadtime=1, output='lonlat', latmin=lat[chunk], latmax=lat[chunk+2],nprocs=1)) +#lat=seq(-90,90,0.75) + +#.RData (1 GB): (must add +20% loading time because it is a file of 820 MB) +#time <- system.time(load(file="/gpfs/projects/bsc32/bsc32842/RESILIENCE/2014010200/test.RData")) +# in esnas: +#time <- system.time(load(file="/esnas/exp/ecmwf/system4_m1/daily_mean/psl_f6h/test.RData")) + +# save chunks: +#save(time, file=paste0(outdir,'/test_chunk_',chunk,'_time_',round(time[3],2),'_',data.hindcast$nleadtime,'.RData')) +#save(time, file=paste0(outdir,'/test_chunk_',chunk,'_time_',round(time[3],2),'.RData')) + +#a <- list.files("/esnas/exp/ecmwf/system4_m1/daily_mean/prlr_f6h") +#save(a, file=paste0(outdir,'/test_chunk_',chunk,'_file_',a[1],'.RData')) +#write.table(a, file=paste0(outdir,'/test_chunk_',chunk,'_file_',a[1],'.txt')) + + + + + diff --git a/lsf/diagnostics.R~ b/lsf/diagnostics.R~ new file mode 100644 index 0000000000000000000000000000000000000000..2ac7b06ca7db5c4dc64174aba93df1f2d71eb7e3 --- /dev/null +++ b/lsf/diagnostics.R~ @@ -0,0 +1,48 @@ +######################################################################################### +# Sub-seasonal Skill Scores # +######################################################################################### + +library(s2dverification) +outdir <- "/gpfs/projects/bsc32/bsc32842/RESILIENCE" + +chunk <- as.integer(commandArgs(TRUE)[1]) +#chunk=1 + +# netcdf-3 (10 MB): +#lat <- read.table("/gpfs/projects/bsc32/bsc32842/RESILIENCE/latitudes.txt")[,1] +#lat=rev(lat) # to go in increasing order of latitudes instead of decreasing! +#time <- system.time(data.hindcast <- Load(var='sfcWind', exp = list(list(path="/gpfs/projects/bsc32/bsc32842/RESILIENCE/2014010200/sfcWind_2013010200.nc")), obs=NULL,sdates='20130102', nleadtime=1, output='lonlat', latmin=lat[chunk], latmax=lat[chunk+2],nprocs=1)) + +### netcdf-3 (1GB): +# [or netcdf-4 if change name in prlr_200706.nc] +lat=seq(-90,90,0.75) +# load 3 chunks (the minimum number possible): +#time <- system.time(data.hindcast <- Load(var='tp', exp = list(list(path="/gpfs/projects/bsc32/bsc32842/RESILIENCE/2014010200/prlr_200705.nc")), obs=NULL,sdates='20070501', nleadtime=157, output='lonlat', latmin=lat[chunk], latmax=lat[chunk+2],nprocs=1)) +# load all chunks: +#time <- system.time(data.hindcast <- Load(var='tp', exp = list(list(path="/gpfs/projects/bsc32/bsc32842/RESILIENCE/2014010200/prlr_200705.nc")), obs=NULL,sdates='20070501', nleadtime=157, output='lonlat',nprocs=1)) +# load 3 chunks from esnas: +time <- system.time(data.hindcast <- Load(var='tp', exp = list(list(path="/esnas/exp/ecmwf/system4_m1/daily_mean/prlr_f6h/prlr_200706.nc")), obs=NULL,sdates='20070601', nleadtime=157, output='lonlat', latmin=lat[chunk], latmax=lat[chunk+2], nprocs=1)) +# load all chunks from esnas: +#time <- system.time(data.hindcast <- Load(var='tp', exp = list(list(path="/esnas/exp/ecmwf/system4_m1/daily_mean/prlr_f6h/prlr_200705.nc")), obs=NULL,sdates='20070501', nleadtime=157, output='lonlat', nprocs=1)) + +# netcdf-3 (10 GB): +#time <- system.time(data.hindcast <- Load(var='psl', exp = list(list(path="/gpfs/projects/bsc32/bsc32842/RESILIENCE/2014010200/psl_19900501.nc")), obs=NULL,sdates='19900501', nleadtime=1, output='lonlat', latmin=lat[chunk], latmax=lat[chunk+2],nprocs=1)) +#lat=seq(-90,90,0.75) + +#.RData (1 GB): (must add +20% loading time because it is a file of 820 MB) +#time <- system.time(load(file="/gpfs/projects/bsc32/bsc32842/RESILIENCE/2014010200/test.RData")) +# in esnas: +#time <- system.time(load(file="/esnas/exp/ecmwf/system4_m1/daily_mean/psl_f6h/test.RData")) + +# save chunks: +#save(time, file=paste0(outdir,'/test_chunk_',chunk,'_time_',round(time[3],2),'_',data.hindcast$nleadtime,'.RData')) +#save(time, file=paste0(outdir,'/test_chunk_',chunk,'_time_',round(time[3],2),'.RData')) + +#a <- list.files("/esnas/exp/ecmwf/system4_m1/daily_mean/prlr_f6h") +#save(a, file=paste0(outdir,'/test_chunk_',chunk,'_file_',a[1],'.RData')) +#write.table(a, file=paste0(outdir,'/test_chunk_',chunk,'_file_',a[1],'.txt')) + + + + + diff --git a/lsf/diagnostics_v2.R b/lsf/diagnostics_v2.R new file mode 100644 index 0000000000000000000000000000000000000000..0c1cdce7d680be584a013e143e83f7ab58689641 --- /dev/null +++ b/lsf/diagnostics_v2.R @@ -0,0 +1,57 @@ +######################################################################################### +# Sub-seasonal Skill Scores # +######################################################################################### + +library(s2dverification) +outdir <- "/gpfs/projects/bsc32/bsc32842/RESILIENCE" + +gpfs.path <- "/gpfs/projects/bsc32/bsc32842/RESILIENCE/2014010200/prlr_200705.nc" +moore.path <- "/scratch/Earth/ncortesi/prlr_$YEAR$$MONTH$.nc" +esnas.path <- "/esnas/exp/ecmwf/system4_m1/daily_mean/prlr_f6h/prlr_$YEAR$$MONTH$.nc" +chunk <- as.integer(commandArgs(TRUE)[1]) +#chunk=1 + +# netcdf-3 (10 MB): +#lat <- read.table("/gpfs/projects/bsc32/bsc32842/RESILIENCE/latitudes.txt")[,1] +#lat=rev(lat) # to go in increasing order of latitudes instead of decreasing! +#time <- system.time(data.hindcast <- Load(var='sfcWind', exp = list(list(path="/gpfs/projects/bsc32/bsc32842/RESILIENCE/2014010200/sfcWind_2013010200.nc")), obs=NULL,sdates='20130102', nleadtime=1, output='lonlat', latmin=lat[chunk], latmax=lat[chunk+2],nprocs=1)) + +### netcdf-3 (1GB): +# [or netcdf-4 if change name in prlr_200706.nc] +lat=seq(-90,90,0.75) +# load 3 chunks (the minimum number possible) from gpfs:: +#time <- system.time(data.hindcast <- Load(var='tp', exp = list(list(path="/gpfs/projects/bsc32/bsc32842/RESILIENCE/2014010200/prlr_200705.nc")), obs=NULL,sdates='20070501', nleadtime=157, output='lonlat', latmin=lat[chunk], latmax=lat[chunk+2],nprocs=1)) +# load all chunks in gpfs: +#time <- system.time(data.hindcast <- Load(var='tp', exp = list(list(path=gpfs.path)), obs=NULL,sdates='20070501', nleadtime=157, output='lonlat',nprocs=1)) +# load all chunks from moore's scratch to moore: +#time <- system.time(data.hindcast <- Load(var='tp', exp = list(list(path=moore.path)), obs=NULL, sdates='20070501', nleadtime=157, output='lonlat',nprocs=1)) +# load all chunks from esnas to moore: +time <- system.time(data.hindcast <- Load(var='tp', exp = list(list(path=esnas.path)), obs=NULL, sdates='20070501', nleadtime=157, output='lonlat',nprocs=1)) + + +# load 3 chunks from esnas: +#time <- system.time(data.hindcast <- Load(var='tp', exp = list(list(path="/esnas/exp/ecmwf/system4_m1/daily_mean/prlr_f6h/prlr_200706.nc")), obs=NULL,sdates='20070601', nleadtime=157, output='lonlat', latmin=lat[chunk], latmax=lat[chunk+2], nprocs=1)) +# load all chunks from esnas: +#time <- system.time(data.hindcast <- Load(var='tp', exp = list(list(path="/esnas/exp/ecmwf/system4_m1/daily_mean/prlr_f6h/prlr_200705.nc")), obs=NULL,sdates='20070501', nleadtime=157, output='lonlat', nprocs=1)) + +# netcdf-3 (10 GB): +#time <- system.time(data.hindcast <- Load(var='psl', exp = list(list(path="/gpfs/projects/bsc32/bsc32842/RESILIENCE/2014010200/psl_19900501.nc")), obs=NULL,sdates='19900501', nleadtime=1, output='lonlat', latmin=lat[chunk], latmax=lat[chunk+2],nprocs=1)) +#lat=seq(-90,90,0.75) + +#.RData (1 GB): (must add +20% loading time because it is a file of 820 MB) +#time <- system.time(load(file="/gpfs/projects/bsc32/bsc32842/RESILIENCE/2014010200/test.RData")) +# in esnas: +#time <- system.time(load(file="/esnas/exp/ecmwf/system4_m1/daily_mean/psl_f6h/test.RData")) + +# save chunks: +#save(time, file=paste0(outdir,'/test_chunk_',chunk,'_time_',round(time[3],2),'_',data.hindcast$nleadtime,'.RData')) +#save(time, file=paste0(outdir,'/test_chunk_',chunk,'_time_',round(time[3],2),'.RData')) + +#a <- list.files("/esnas/exp/ecmwf/system4_m1/daily_mean/prlr_f6h") +#save(a, file=paste0(outdir,'/test_chunk_',chunk,'_file_',a[1],'.RData')) +#write.table(a, file=paste0(outdir,'/test_chunk_',chunk,'_file_',a[1],'.txt')) + + + + + diff --git a/lsf/diagnostics_v2.R~ b/lsf/diagnostics_v2.R~ new file mode 100644 index 0000000000000000000000000000000000000000..ae02b9c220990eef70b618e77dc349e207049544 --- /dev/null +++ b/lsf/diagnostics_v2.R~ @@ -0,0 +1,53 @@ +######################################################################################### +# Sub-seasonal Skill Scores # +######################################################################################### + +library(s2dverification) +outdir <- "/gpfs/projects/bsc32/bsc32842/RESILIENCE" + +gpfs.path <- "/gpfs/projects/bsc32/bsc32842/RESILIENCE/2014010200/prlr_200705.nc" +moore.path <- "/scratch/Earth/ncortesi/prlr_$YEAR$$MONTH$.nc" +chunk <- as.integer(commandArgs(TRUE)[1]) +#chunk=1 + +# netcdf-3 (10 MB): +#lat <- read.table("/gpfs/projects/bsc32/bsc32842/RESILIENCE/latitudes.txt")[,1] +#lat=rev(lat) # to go in increasing order of latitudes instead of decreasing! +#time <- system.time(data.hindcast <- Load(var='sfcWind', exp = list(list(path="/gpfs/projects/bsc32/bsc32842/RESILIENCE/2014010200/sfcWind_2013010200.nc")), obs=NULL,sdates='20130102', nleadtime=1, output='lonlat', latmin=lat[chunk], latmax=lat[chunk+2],nprocs=1)) + +### netcdf-3 (1GB): +# [or netcdf-4 if change name in prlr_200706.nc] +lat=seq(-90,90,0.75) +# load 3 chunks (the minimum number possible) from gpfs:: +#time <- system.time(data.hindcast <- Load(var='tp', exp = list(list(path="/gpfs/projects/bsc32/bsc32842/RESILIENCE/2014010200/prlr_200705.nc")), obs=NULL,sdates='20070501', nleadtime=157, output='lonlat', latmin=lat[chunk], latmax=lat[chunk+2],nprocs=1)) +# load all chunks in gpfs: +#time <- system.time(data.hindcast <- Load(var='tp', exp = list(list(path=gpfs.path)), obs=NULL,sdates='20070501', nleadtime=157, output='lonlat',nprocs=1)) +# load all chunks in moore: +time <- system.time(data.hindcast <- Load(var='tp', exp = list(list(path=moore.path)), obs=NULL, sdates='20070501', nleadtime=157, output='lonlat',nprocs=1)) + +# load 3 chunks from esnas: +#time <- system.time(data.hindcast <- Load(var='tp', exp = list(list(path="/esnas/exp/ecmwf/system4_m1/daily_mean/prlr_f6h/prlr_200706.nc")), obs=NULL,sdates='20070601', nleadtime=157, output='lonlat', latmin=lat[chunk], latmax=lat[chunk+2], nprocs=1)) +# load all chunks from esnas: +#time <- system.time(data.hindcast <- Load(var='tp', exp = list(list(path="/esnas/exp/ecmwf/system4_m1/daily_mean/prlr_f6h/prlr_200705.nc")), obs=NULL,sdates='20070501', nleadtime=157, output='lonlat', nprocs=1)) + +# netcdf-3 (10 GB): +#time <- system.time(data.hindcast <- Load(var='psl', exp = list(list(path="/gpfs/projects/bsc32/bsc32842/RESILIENCE/2014010200/psl_19900501.nc")), obs=NULL,sdates='19900501', nleadtime=1, output='lonlat', latmin=lat[chunk], latmax=lat[chunk+2],nprocs=1)) +#lat=seq(-90,90,0.75) + +#.RData (1 GB): (must add +20% loading time because it is a file of 820 MB) +#time <- system.time(load(file="/gpfs/projects/bsc32/bsc32842/RESILIENCE/2014010200/test.RData")) +# in esnas: +#time <- system.time(load(file="/esnas/exp/ecmwf/system4_m1/daily_mean/psl_f6h/test.RData")) + +# save chunks: +#save(time, file=paste0(outdir,'/test_chunk_',chunk,'_time_',round(time[3],2),'_',data.hindcast$nleadtime,'.RData')) +#save(time, file=paste0(outdir,'/test_chunk_',chunk,'_time_',round(time[3],2),'.RData')) + +#a <- list.files("/esnas/exp/ecmwf/system4_m1/daily_mean/prlr_f6h") +#save(a, file=paste0(outdir,'/test_chunk_',chunk,'_file_',a[1],'.RData')) +#write.table(a, file=paste0(outdir,'/test_chunk_',chunk,'_file_',a[1],'.txt')) + + + + + diff --git a/lsf/example_test_v9.txt b/lsf/example_test_v9.txt new file mode 100644 index 0000000000000000000000000000000000000000..bfacaf27ef941c83b1708380d7d89072843ff6c3 --- /dev/null +++ b/lsf/example_test_v9.txt @@ -0,0 +1,17 @@ +Rscript /gpfs/projects/bsc32/bsc32842/test_v9.R 1 +Rscript /gpfs/projects/bsc32/bsc32842/test_v9.R 2 +Rscript /gpfs/projects/bsc32/bsc32842/test_v9.R 3 +Rscript /gpfs/projects/bsc32/bsc32842/test_v9.R 4 +Rscript /gpfs/projects/bsc32/bsc32842/test_v9.R 5 +Rscript /gpfs/projects/bsc32/bsc32842/test_v9.R 6 +Rscript /gpfs/projects/bsc32/bsc32842/test_v9.R 7 +Rscript /gpfs/projects/bsc32/bsc32842/test_v9.R 8 +Rscript /gpfs/projects/bsc32/bsc32842/test_v9.R 9 +Rscript /gpfs/projects/bsc32/bsc32842/test_v9.R 10 +Rscript /gpfs/projects/bsc32/bsc32842/test_v9.R 11 +Rscript /gpfs/projects/bsc32/bsc32842/test_v9.R 12 +Rscript /gpfs/projects/bsc32/bsc32842/test_v9.R 13 +Rscript /gpfs/projects/bsc32/bsc32842/test_v9.R 14 +Rscript /gpfs/projects/bsc32/bsc32842/test_v9.R 15 +Rscript /gpfs/projects/bsc32/bsc32842/test_v9.R 16 +Rscript /gpfs/projects/bsc32/bsc32842/test_v9.R 17 diff --git a/lsf/load_netcdf.R b/lsf/load_netcdf.R new file mode 100644 index 0000000000000000000000000000000000000000..cc10194ceb701b6a8e9a84b84fdfbcd173134ff2 --- /dev/null +++ b/lsf/load_netcdf.R @@ -0,0 +1,6 @@ +library(s2dverification) + +# Carga un fichero NetCDF-3 de 1GB desde esnas: +time <- system.time(data.hindcast <- Load(var='tp', exp = list(list(path="/esnas/exp/ecmwf/system4_m1/daily_mean/prlr_f6h/prlr_$YEAR$$MONTH$.nc")), obs=NULL,sdates='20070501', nleadtime=1, output='lonlat', nprocs=1)) + +write.table(time, file=paste0("load_netcdf_total_time_",round(time,2),"_seconds.txt")) diff --git a/lsf/load_netcdf.R~ b/lsf/load_netcdf.R~ new file mode 100644 index 0000000000000000000000000000000000000000..a535da968b8ce4c815c3abdcedc60ecfb62933c4 --- /dev/null +++ b/lsf/load_netcdf.R~ @@ -0,0 +1,6 @@ +library(s2dverification) + +# Carga un fichero NetCDF-3 de 1GB desde esnas: +time <- system.time(data.hindcast <- Load(var='tp', exp = list(list(path="/esnas/exp/ecmwf/system4_m1/daily_mean/prlr_f6h/prlr_200705.nc")), obs=NULL,sdates='20070501', nleadtime=157, output='lonlat', nprocs=1)) + +write.table(time, file=paste0("load_netcdf_total_time_",round(time,2),"_seconds.txt")) diff --git a/lsf/load_netcdf.job b/lsf/load_netcdf.job new file mode 100644 index 0000000000000000000000000000000000000000..ccf24c9995977028bf8eb4843f3ecd0748d23b0d --- /dev/null +++ b/lsf/load_netcdf.job @@ -0,0 +1,74 @@ +#!/bin/bash + +#BSUB -J parallel +#BSUB -oo parallel-%J.out +#BSUB -eo parallel-%J.err + +############################################################# +# Line below specify to assign the job to the SMP queue, # +# and consequently the job will run in the SMP machine: # +############################################################# + +#BSUB -q smp + +############################################################# +# Set the total computation time of the parallel job. # +# Time max is 48 hours (syntax: HH:MM), but a lower value # +# means a faster queue! So, try to set it not too much # +# higher than the running time of one job only: # +############################################################# + +#BSUB -W 10:00 + +############################################################# +# Allocate the desired quantity of RAM (in MB) for each # +# processor (core). This value corresponds to the peak RAM # +# utilized by your jobs during their sequential execution: # +############################################################# + +#BSUB -M 10000 + +############################################################# +# Allocate a number of processors (cores) to this job. # +# it is also equal to the maximum number of jobs running at # +# the same time; the jobs exceeding this number will start # +# only when the previous jobs have finished computing. # +# The maximum number of cores that can be allocated by a # +# user on the SMP machine is 80. However, this number # +# cannot be higher than the total RAM of SMP (2000 GB) # +# divided by the RAM allocated to one core (rounded down). # +# For example, if you allocated 50 GB/core in the previous # +# line, you can't allocate more than 2000 / 50 = 40 cores # +# to your job. # +############################################################# + +#BSUB -n 1 + +nCores=1 # same as the number of cores above + +############################################################# +# inlcude the command below only if you need to write data # +# in /esnas. If not, comment it (while for reading from # +# /esnas this command is not necessary): # +############################################################# + +### newgrp Earth + +############################################################# +# These modules should already been loaded in your session. # +# They are provided here in case someone wasn't loaded: # +############################################################# + +module load R CDO NCO NETCDF gcc intel openmpi HDF5 UDUNITS + +############################################################# +# Set the name of your script to run; if no path is # +# provided, it is assumed to be in the directory where # +# the 'bsub' command is executed: # +############################################################# + +diagnostic="load_netcdf.R" + +#/apps/GREASY/2.1.2.1/bin/greasy $taskList + +Rscript $diagnostic diff --git a/lsf/load_netcdf.job~ b/lsf/load_netcdf.job~ new file mode 100644 index 0000000000000000000000000000000000000000..d9485db14601db915a21615955fb7c36dff95674 --- /dev/null +++ b/lsf/load_netcdf.job~ @@ -0,0 +1,106 @@ +#!/bin/bash + +#BSUB -J parallel +#BSUB -oo parallel-%J.out +#BSUB -eo parallel-%J.err + +############################################################# +# Line below specify to assign the job to the SMP queue, # +# and consequently the job will run in the SMP machine: # +############################################################# + +#BSUB -q smp + +############################################################# +# Set the total computation time of the parallel job. # +# Time max is 48 hours (syntax: HH:MM), but a lower value # +# means a faster queue! So, try to set it not too much # +# higher than the running time of one job only: # +############################################################# + +#BSUB -W 10:00 + +############################################################# +# Allocate the desired quantity of RAM (in MB) for each # +# processor (core). This value corresponds to the peak RAM # +# utilized by your jobs during their sequential execution: # +############################################################# + +#BSUB -M 10000 + +############################################################# +# Allocate a number of processors (cores) to this job. # +# it is also equal to the maximum number of jobs running at # +# the same time; the jobs exceeding this number will start # +# only when the previous jobs have finished computing. # +# The maximum number of cores that can be allocated by a # +# user on the SMP machine is 80. However, this number # +# cannot be higher than the total RAM of SMP (2000 GB) # +# divided by the RAM allocated to one core (rounded down). # +# For example, if you allocated 50 GB/core in the previous # +# line, you can't allocate more than 2000 / 50 = 40 cores # +# to your job. # +############################################################# + +#BSUB -n 1 + +nCores=1 # same as the number of cores above + +############################################################# +# inlcude the command below only if you need to write data # +# in /esnas. If not, comment it (while for reading from # +# /esnas this command is not necessary): # +############################################################# + +### newgrp Earth + +############################################################# +# These modules should already been loaded in your session. # +# They are provided here in case someone wasn't loaded: # +############################################################# + +module load R CDO NCO NETCDF gcc intel openmpi HDF5 UDUNITS + +############################################################# +# Set the name of your script to run; if no path is # +# provided, it is assumed to be in the directory where # +# the 'bsub' command is executed: # +############################################################# + +diagnostic="./diagnostics.R" + +################################################## +# first and last chunks (tasks) in which the job is split. +# Examples: +# - If you are computing a Skill Score using hindcast data +# with 241 latitude values, set fistChunk=1 and lastChunk=241 +# - If you want to run the same script 12 times changing variable +# "month" from 1 to 12, set firstChunk=1 and lastChunk=12 +# - If you want to run the same script for a sequence of years, i.e: +# from 1980 to 2015, set firstChunk=1950 and lastChunk=2015 +################################################## + +firstChunk=1 +lastChunk=1 + +################################################## +# Create a .txt file with the list of tasks to run: +################################################## + +taskList=./diagnostics.txt + +echo "" > $taskList # new void file; if already existing, delete it + +for cnk in $(seq $firstChunk $lastChunk); do + echo "Rscript" $diagnostic $cnk >> $taskList +done + +#Rscript -e 'outdir <- "/gpfs/projects/bsc32/bsc32842"' -e 'a <- list.files("/esnas/exp/ecmwf/system4_m1/daily_mean/prlr_f6h")' -e "write.table(a, file=paste0(outdir,'/test_file_',a[1],'.txt'))" + +################################################## +# Run the job splitting it in many sequential tasks in parallel: +################################################## + +#/apps/GREASY/2.1.2.1/bin/greasy $taskList + +Rscript $diagnostic $fistChunk diff --git a/lsf/old/diagnostics_MN.job b/lsf/old/diagnostics_MN.job new file mode 100644 index 0000000000000000000000000000000000000000..d7862dd2f68df194254960457916ad9315bb6299 --- /dev/null +++ b/lsf/old/diagnostics_MN.job @@ -0,0 +1,60 @@ +#!/bin/bash + +#BSUB -J split_job +#BSUB -oo diagnostics-%J.out +#BSUB -eo diagnostics-%J.err + +################################################## +# Name of your script to split in chunks: +################################################## + +script="./script_MN.R" + +################################################## +# Number of chunks (tasks) in which the job is split. +# i.e: if you have to run the same script 12 times changing +# the variable "month" from 1 to 12, set nChunks=12. +# If you are computing a Skill Score using hindcast data +# with 241 latitude values, set nChunks=241 +################################################## + +nChunks=2 + +################################################## +# Maximum number of tasks that are able to run at the same time, +# (same as the number of cores to reserve), more 1 task used for +# data communication only. +# It should be equal or lower than nChunks, i.e: +# if nChunks=241, you can set it to 242, or to 122 / 82 / 62 / 49 +# if you want to reserve less cores and wait for the task to finish +# before starting the following "battery" of tasks. In this case, to end +# sooner it is better to leave only a few cores unassigned. +################################################## + +#BSUB -n 3 + +################################################## +# Maximum execution time of the job (wall clock limit in hh:mm): +################################################## + +#BSUB -W 01:00 + +taskList=./diagnostics_MN.txt + +echo "" > $taskList + +for cnk in $(seq 1 $nChunks); do + echo "Rscript" $script $cnk >> $taskList +done + +# export GREASY_LOGFILE=diagnostics.log +# export GREASY_NWORKERS=4 +# export GREASY_NODELIST=node1,node2,node3 + +################################################## +# Run the job spltting it in many sequential tasks in parallel: +################################################## + +/apps/GREASY/2.1.2.1/bin/greasy $taskList + + diff --git a/lsf/old/diagnostics_cluster.job b/lsf/old/diagnostics_cluster.job new file mode 100644 index 0000000000000000000000000000000000000000..37e9b61953097c6e4bde995dc3de82b3bbeca2f3 --- /dev/null +++ b/lsf/old/diagnostics_cluster.job @@ -0,0 +1,31 @@ +#!/bin/bash +#SBATCH -n 1 +#SBATCH -J diagnostic +#SBATCH -o diagnostic.out +#SBATCH -e diagnostic.err + +# set the maximum execution time of the diagnostic: +#SBATCH -t 12:00:00 + +# Set the name of your script to run: +diagnostic="script_cluster.R" + +# Set the total number of chunks to employ: +nChunks=2 + +# Set the maximum number of processes to run in parallel: +nCores=4 + +# run a process (thread) for each chunk in background: +for ARG in $(seq 1 $nChunks); do + Rscript $diagnostic $ARG & + nThreads=$(($nThreads+1)) + if [ "$nThreads" -ge $nCores ]; then + wait # wait until the first $nCores chunks have finished before executing the next $nCores + nThreads=0 + fi +done + +# wait until all chunks have been computed, then collect all the results of each chunk and merge them in the file 'diagnostic_output.RData': +wait +Rscript -e 'nChunks <- as.integer(commandArgs(TRUE)[1])' -e 'for(cnk in 1:nChunks){' -e 'load(paste0(getwd(),"/output_",cnk,".RData"))' -e 'if(cnk==1) output <- array(NA, c(nChunks, length(var)))' -e 'output[cnk,] <- var' -e 'file.remove(paste0(getwd(),"/output_",cnk,".RData"))}' -e 'save(var, file="diagnostic_output.RData")' $nChunks diff --git a/lsf/old/script_MN.R b/lsf/old/script_MN.R new file mode 100644 index 0000000000000000000000000000000000000000..631adea4f0913b2bdc149c754177f81c07609a80 --- /dev/null +++ b/lsf/old/script_MN.R @@ -0,0 +1,19 @@ +######################################################################################### +# Sub-seasonal Skill Scores # +######################################################################################### + +library(s2dverification) +workdir <- "/gpfs/projects/bsc32/bsc32842/RESILIENCE" + +chunk <- as.integer(commandArgs(TRUE)[1]) +#chunk=1 + +lat <- read.table("/gpfs/projects/bsc32/bsc32842/RESILIENCE/latitudes.txt")[,1] +lat=rev(lat) # to go in increasing order of latitudes instead of decreasing! +time <- system.time(data.hindcast <- Load(var='sfcWind', exp = list(list(path="/gpfs/projects/bsc32/bsc32842/RESILIENCE/2014010200/sfcWind_2013010200.nc")), obs=NULL,sdates='20130102', nleadtime=1, output='lonlat', latmin=lat[chunk], latmax=lat[chunk+2],nprocs=1)) + + +#time <- system.time(data.hindcast <- Load(var='psl', exp = list(list(path="/gpfs/projects/bsc32/bsc32842/RESILIENCE/2014010200/psl_19900501.nc")), obs=NULL,sdates='19900501', nleadtime=1, output='lonlat', latmin=lat[chunk], latmax=lat[chunk+2],nprocs=1)) +#lat=seq(-90,90,0.75) + +save(time, file=paste0(workdir,'/test_chunk_',chunk,'_time_',round(time[3],2),'.RData')) diff --git a/lsf/old/script_cluster.R b/lsf/old/script_cluster.R new file mode 100644 index 0000000000000000000000000000000000000000..176fb8eb2c9602b4feac26dca55f5620269dc80c --- /dev/null +++ b/lsf/old/script_cluster.R @@ -0,0 +1,19 @@ +library(s2dverification) + +chunk <- as.integer(commandArgs(TRUE)[1]) + +ERAint <- '/esnas/reconstructions/ecmwf/eraint/$STORE_FREQ$_mean/$VAR_NAME$_f6h/$VAR_NAME$_$YEAR$$MONTH$.nc' + +domain <- Load(var = "sfcWind", exp = NULL, obs = list(list(path=ERAint)), '19950101', storefreq = 'daily', leadtimemax = 1, output = 'lonlat', nprocs=1) +n.lat <- length(domain$lat) +n.lon <- length(domain$lon) + +data <- Load(var = "sfcWind", exp = NULL, obs = list(list(path=ERAint)), '19950101', storefreq = 'daily', leadtimemax = 3, output = 'lonlat', latmin = domain$lat[chunk+1], latmax = domain$lat[chunk], nprocs=1)$obs +data <- data[,,,,1,] + +n.lat <- 1 +my.RMS <- array(NA, c(n.lat, n.lon)) + +# it is mandatory to save the output variable with the name 'var' and inside the file 'output_" + chunk number + ".RData": +var <- RMS(data,data+1) +save(var, file=paste0("/scratch/Earth/ncortesi/output_",chunk,".RData")) diff --git a/lsf/parallel_MN.job b/lsf/parallel_MN.job new file mode 100644 index 0000000000000000000000000000000000000000..106baf996af8854a950381f9a02caa550b2c0147 --- /dev/null +++ b/lsf/parallel_MN.job @@ -0,0 +1,90 @@ +#!/bin/bash + +#BSUB -J diagnostic +#BSUB -oo greasy-%J.out +#BSUB -eo greasy-%J.err + +################################################## +# these modules should already been loaded in your session, +# they are included here in case someone didn't load correctly: +################################################## + +module load R/3.2.2 gcc/4.7.2 NETCDF/4.1.3 intel/13.0.1 openmpi/1.8.1 HDF5/1.8.10 UDUNITS/2.1.24 CDO/1.7.0 NCO + +################################################## +# inlcude the command below only if you need to write in /esnas. +# if not comment it (for reading from /esnas it is not necessary): +################################################## + +# newgrp Earth + +################################################## +# include this line if you want to run the job in the SMP machine, +# or comment it with ### if you want to run the job in MareNostrum: +################################################## + +#BSUB -q smp + +################################################## +# Name of your script to split in chunks: +################################################## + +script="./diagnostics.R" + +################################################## +# first and last chunks (tasks) in which the job is split. +# examples: +# - If you are computing a Skill Score using hindcast data +# with 241 latitude values, set fistChunk=1 and lastChunk=241 +# - If you want to run the same script 12 times changing variable +# "month" from 1 to 12, set firstChunk=1 and lastChunk=12 +# - If you want to run the same script for a sequence of years, i.e: +# from 1980 to 2015, set firstChunk=1950 and lastChunk=2015 +################################################## + +firstChunk=1 +lastChunk=1 + +################################################## +# Number of cores to reserve. It should be equal to the number +# of chunks (lastChunk - firstChunk + 1), more 1 core, which is +# only used for data communication between tasks. +# For example, if there is a total of 241 chunks, set it to 242. +# You can also set it to a lower value than the number of chunks +# ( for example if the number of chunks is higher than the # of cores); +# in this case, the chunks that are not executed immediatly are +# put in an internal queue and executed after, when there are cores +# available. Consequently, running times will increase. +################################################## + +#BSUB -n 2 + +################################################## +# Set the maximum execution time of the job (in hh:mm). +# Upper limit is 48 hours, but a lower value means a faster queue, +# so try to set it a bit higher to the running time of one chunk. +################################################## + +#BSUB -W 10:00 + +################################################## +# Create a .txt file with the list of tasks to run: +################################################## + +taskList=./diagnostics.txt + +echo "" > $taskList # new void file; if already existing, delete it + +for cnk in $(seq $firstChunk $lastChunk); do + echo "Rscript" $script $cnk >> $taskList +done + +#Rscript -e 'outdir <- "/gpfs/projects/bsc32/bsc32842"' -e 'a <- list.files("/esnas/exp/ecmwf/system4_m1/daily_mean/prlr_f6h")' -e "write.table(a, file=paste0(outdir,'/test_file_',a[1],'.txt'))" + +################################################## +# Run the job spltting it in many sequential tasks in parallel: +################################################## + +/apps/GREASY/2.1.2.1/bin/greasy $taskList + + diff --git a/lsf/parallel_MN.job~ b/lsf/parallel_MN.job~ new file mode 100644 index 0000000000000000000000000000000000000000..ded7e4fe837a6a0ca2e7aed7b54ed07c01c9cc35 --- /dev/null +++ b/lsf/parallel_MN.job~ @@ -0,0 +1,91 @@ +#!/bin/bash + +#BSUB -J diagnostic +#BSUB -oo greasy-%J.out +#BSUB -eo greasy-%J.err + +################################################## +# these modules should already been loaded in your session, +# they are included here in case someone didn't load correctly: +################################################## + +module load R/3.2.2 +module load gcc/4.7.2 NETCDF/4.1.3 intel/13.0.1 openmpi/1.8.1 HDF5/1.8.10 UDUNITS/2.1.24 CDO/1.7.0 NCO + +################################################## +# inlcude the command below only if you need to write in /esnas. +# if not comment it (for reading from /esnas it is not necessary): +################################################## + +# newgrp Earth + +################################################## +# include this line if you want to run the job in the SMP machine, +# or comment it with ### if you want to run the job in MareNostrum: +################################################## + +#BSUB -q smp + +################################################## +# Name of your script to split in chunks: +################################################## + +script="./diagnostics.R" + +################################################## +# first and last chunks (tasks) in which the job is split. +# examples: +# - If you are computing a Skill Score using hindcast data +# with 241 latitude values, set fistChunk=1 and lastChunk=241 +# - If you want to run the same script 12 times changing variable +# "month" from 1 to 12, set firstChunk=1 and lastChunk=12 +# - If you want to run the same script for a sequence of years, i.e: +# from 1980 to 2015, set firstChunk=1950 and lastChunk=2015 +################################################## + +firstChunk=1 +lastChunk=1 + +################################################## +# Number of cores to reserve. It should be equal to the number +# of chunks (lastChunk - firstChunk + 1), more 1 core, which is +# only used for data communication between tasks. +# For example, if there is a total of 241 chunks, set it to 242. +# You can also set it to a lower value than the number of chunks +# ( for example if the number of chunks is higher than the # of cores); +# in this case, the chunks that are not executed immediatly are +# put in an internal queue and executed after, when there are cores +# available. Consequently, running times will increase. +################################################## + +#BSUB -n 2 + +################################################## +# Set the maximum execution time of the job (in hh:mm). +# Upper limit is 48 hours, but a lower value means a faster queue, +# so try to set it a bit higher to the running time of one chunk. +################################################## + +#BSUB -W 10:00 + +################################################## +# Create a .txt file with the list of tasks to run: +################################################## + +taskList=./diagnostics.txt + +echo "" > $taskList # new void file; if already existing, delete it + +for cnk in $(seq $firstChunk $lastChunk); do + echo "Rscript" $script $cnk >> $taskList +done + +#Rscript -e 'outdir <- "/gpfs/projects/bsc32/bsc32842"' -e 'a <- list.files("/esnas/exp/ecmwf/system4_m1/daily_mean/prlr_f6h")' -e "write.table(a, file=paste0(outdir,'/test_file_',a[1],'.txt'))" + +################################################## +# Run the job spltting it in many sequential tasks in parallel: +################################################## + +/apps/GREASY/2.1.2.1/bin/greasy $taskList + + diff --git a/lsf/parallel_SMP.R b/lsf/parallel_SMP.R new file mode 100644 index 0000000000000000000000000000000000000000..ac8417c9174b8bb6d5bff3f083cb711e1df05b80 --- /dev/null +++ b/lsf/parallel_SMP.R @@ -0,0 +1,56 @@ +######################################################################################### +# Sub-seasonal Skill Scores # +######################################################################################### + +library(s2dverification) +outdir <- "/gpfs/projects/bsc32/bsc32842/RESILIENCE" + +gpfs.path <- "/gpfs/projects/bsc32/bsc32842/RESILIENCE/2014010200/prlr_200705.nc" +moore.path <- "/scratch/Earth/ncortesi/prlr_$YEAR$$MONTH$.nc" +esnas.path <- "/esnas/exp/ecmwf/system4_m1/daily_mean/prlr_f6h/prlr_$YEAR$$MONTH$.nc" +chunk <- as.integer(commandArgs(TRUE)[1]) +#chunk=1 + +# netcdf-3 (10 MB): +#lat <- read.table("/gpfs/projects/bsc32/bsc32842/RESILIENCE/latitudes.txt")[,1] +#lat=rev(lat) # to go in increasing order of latitudes instead of decreasing! +#time <- system.time(data.hindcast <- Load(var='sfcWind', exp = list(list(path="/gpfs/projects/bsc32/bsc32842/RESILIENCE/2014010200/sfcWind_2013010200.nc")), obs=NULL,sdates='20130102', nleadtime=1, output='lonlat', latmin=lat[chunk], latmax=lat[chunk+2],nprocs=1)) + +### netcdf-3 (1GB): +# [or netcdf-4 if change name in prlr_200706.nc] +lat=seq(-90,90,0.75) +# load 3 chunks (the minimum number possible) from gpfs:: +#time <- system.time(data.hindcast <- Load(var='tp', exp = list(list(path="/gpfs/projects/bsc32/bsc32842/RESILIENCE/2014010200/prlr_200705.nc")), obs=NULL,sdates='20070501', nleadtime=157, output='lonlat', latmin=lat[chunk], latmax=lat[chunk+2],nprocs=1)) +#load all chunks in gpfs: +time <- system.time(data.hindcast <- Load(var='tp', exp = list(list(path=gpfs.path)), obs=NULL,sdates='20070501', nleadtime=157, output='lonlat',nprocs=1)) +# load all chunks from moore's scratch to moore: +#time <- system.time(data.hindcast <- Load(var='tp', exp = list(list(path=moore.path)), obs=NULL, sdates='20070501', nleadtime=157, output='lonlat',nprocs=1)) +# load all chunks from esnas to moore: +#time <- system.time(data.hindcast <- Load(var='tp', exp = list(list(path=esnas.path)), obs=NULL, sdates='20070501', nleadtime=157, output='lonlat',nprocs=1)) + +# load 3 chunks from esnas: +#time <- system.time(data.hindcast <- Load(var='tp', exp = list(list(path="/esnas/exp/ecmwf/system4_m1/daily_mean/prlr_f6h/prlr_200706.nc")), obs=NULL,sdates='20070601', nleadtime=157, output='lonlat', latmin=lat[chunk], latmax=lat[chunk+2], nprocs=1)) +# load all chunks from esnas: +#time <- system.time(data.hindcast <- Load(var='tp', exp = list(list(path="/esnas/exp/ecmwf/system4_m1/daily_mean/prlr_f6h/prlr_200705.nc")), obs=NULL,sdates='20070501', nleadtime=157, output='lonlat', nprocs=1)) + +# netcdf-3 (10 GB): +#time <- system.time(data.hindcast <- Load(var='psl', exp = list(list(path="/gpfs/projects/bsc32/bsc32842/RESILIENCE/2014010200/psl_19900501.nc")), obs=NULL,sdates='19900501', nleadtime=1, output='lonlat', latmin=lat[chunk], latmax=lat[chunk+2],nprocs=1)) +#lat=seq(-90,90,0.75) + +#.RData (1 GB): (must add +20% loading time because it is a file of 820 MB) +#time <- system.time(load(file="/gpfs/projects/bsc32/bsc32842/RESILIENCE/2014010200/test.RData")) +# in esnas: +#time <- system.time(load(file="/esnas/exp/ecmwf/system4_m1/daily_mean/psl_f6h/test.RData")) + +# save chunks: +#save(time, file=paste0(outdir,'/test_chunk_',chunk,'_time_',round(time[3],2),'_',data.hindcast$nleadtime,'.RData')) +#save(time, file=paste0(outdir,'/test_chunk_',chunk,'_time_',round(time[3],2),'.RData')) + +#a <- list.files("/esnas/exp/ecmwf/system4_m1/daily_mean/prlr_f6h") +#save(a, file=paste0(outdir,'/test_chunk_',chunk,'_file_',a[1],'.RData')) +#write.table(a, file=paste0(outdir,'/test_chunk_',chunk,'_file_',a[1],'.txt')) + + + + + diff --git a/lsf/parallel_SMP.R~ b/lsf/parallel_SMP.R~ new file mode 100644 index 0000000000000000000000000000000000000000..0c1cdce7d680be584a013e143e83f7ab58689641 --- /dev/null +++ b/lsf/parallel_SMP.R~ @@ -0,0 +1,57 @@ +######################################################################################### +# Sub-seasonal Skill Scores # +######################################################################################### + +library(s2dverification) +outdir <- "/gpfs/projects/bsc32/bsc32842/RESILIENCE" + +gpfs.path <- "/gpfs/projects/bsc32/bsc32842/RESILIENCE/2014010200/prlr_200705.nc" +moore.path <- "/scratch/Earth/ncortesi/prlr_$YEAR$$MONTH$.nc" +esnas.path <- "/esnas/exp/ecmwf/system4_m1/daily_mean/prlr_f6h/prlr_$YEAR$$MONTH$.nc" +chunk <- as.integer(commandArgs(TRUE)[1]) +#chunk=1 + +# netcdf-3 (10 MB): +#lat <- read.table("/gpfs/projects/bsc32/bsc32842/RESILIENCE/latitudes.txt")[,1] +#lat=rev(lat) # to go in increasing order of latitudes instead of decreasing! +#time <- system.time(data.hindcast <- Load(var='sfcWind', exp = list(list(path="/gpfs/projects/bsc32/bsc32842/RESILIENCE/2014010200/sfcWind_2013010200.nc")), obs=NULL,sdates='20130102', nleadtime=1, output='lonlat', latmin=lat[chunk], latmax=lat[chunk+2],nprocs=1)) + +### netcdf-3 (1GB): +# [or netcdf-4 if change name in prlr_200706.nc] +lat=seq(-90,90,0.75) +# load 3 chunks (the minimum number possible) from gpfs:: +#time <- system.time(data.hindcast <- Load(var='tp', exp = list(list(path="/gpfs/projects/bsc32/bsc32842/RESILIENCE/2014010200/prlr_200705.nc")), obs=NULL,sdates='20070501', nleadtime=157, output='lonlat', latmin=lat[chunk], latmax=lat[chunk+2],nprocs=1)) +# load all chunks in gpfs: +#time <- system.time(data.hindcast <- Load(var='tp', exp = list(list(path=gpfs.path)), obs=NULL,sdates='20070501', nleadtime=157, output='lonlat',nprocs=1)) +# load all chunks from moore's scratch to moore: +#time <- system.time(data.hindcast <- Load(var='tp', exp = list(list(path=moore.path)), obs=NULL, sdates='20070501', nleadtime=157, output='lonlat',nprocs=1)) +# load all chunks from esnas to moore: +time <- system.time(data.hindcast <- Load(var='tp', exp = list(list(path=esnas.path)), obs=NULL, sdates='20070501', nleadtime=157, output='lonlat',nprocs=1)) + + +# load 3 chunks from esnas: +#time <- system.time(data.hindcast <- Load(var='tp', exp = list(list(path="/esnas/exp/ecmwf/system4_m1/daily_mean/prlr_f6h/prlr_200706.nc")), obs=NULL,sdates='20070601', nleadtime=157, output='lonlat', latmin=lat[chunk], latmax=lat[chunk+2], nprocs=1)) +# load all chunks from esnas: +#time <- system.time(data.hindcast <- Load(var='tp', exp = list(list(path="/esnas/exp/ecmwf/system4_m1/daily_mean/prlr_f6h/prlr_200705.nc")), obs=NULL,sdates='20070501', nleadtime=157, output='lonlat', nprocs=1)) + +# netcdf-3 (10 GB): +#time <- system.time(data.hindcast <- Load(var='psl', exp = list(list(path="/gpfs/projects/bsc32/bsc32842/RESILIENCE/2014010200/psl_19900501.nc")), obs=NULL,sdates='19900501', nleadtime=1, output='lonlat', latmin=lat[chunk], latmax=lat[chunk+2],nprocs=1)) +#lat=seq(-90,90,0.75) + +#.RData (1 GB): (must add +20% loading time because it is a file of 820 MB) +#time <- system.time(load(file="/gpfs/projects/bsc32/bsc32842/RESILIENCE/2014010200/test.RData")) +# in esnas: +#time <- system.time(load(file="/esnas/exp/ecmwf/system4_m1/daily_mean/psl_f6h/test.RData")) + +# save chunks: +#save(time, file=paste0(outdir,'/test_chunk_',chunk,'_time_',round(time[3],2),'_',data.hindcast$nleadtime,'.RData')) +#save(time, file=paste0(outdir,'/test_chunk_',chunk,'_time_',round(time[3],2),'.RData')) + +#a <- list.files("/esnas/exp/ecmwf/system4_m1/daily_mean/prlr_f6h") +#save(a, file=paste0(outdir,'/test_chunk_',chunk,'_file_',a[1],'.RData')) +#write.table(a, file=paste0(outdir,'/test_chunk_',chunk,'_file_',a[1],'.txt')) + + + + + diff --git a/lsf/parallel_SMP.job b/lsf/parallel_SMP.job new file mode 100644 index 0000000000000000000000000000000000000000..fe9a72f733af97d86510f6145e6df28e8b73ecf7 --- /dev/null +++ b/lsf/parallel_SMP.job @@ -0,0 +1,112 @@ +#!/bin/bash + +#BSUB -J parallel +#BSUB -oo parallel-%J.out +#BSUB -eo parallel-%J.err + +############################################################# +# Line below specify to assign the job to the SMP queue, # +# and consequently the job will run in the SMP machine: # +############################################################# + +#BSUB -q smp + +############################################################# +# Set the total computation time of the parallel job. # +# Time max is 48 hours (syntax: HH:MM), but a lower value # +# means a faster queue! So, try to set it not too much # +# higher than the running time of one job only: # +############################################################# + +#BSUB -W 10:00 + +############################################################# +# Allocate the desired quantity of RAM (in MB) for each # +# processor (core). This value corresponds to the peak RAM # +# utilized by your jobs during their sequential execution: # +############################################################# + +#BSUB -M 65000 + +############################################################# +# Allocate a number of processors (cores) to this job. # +# it is also equal to the maximum number of jobs running at # +# the same time; the jobs exceeding this number will start # +# only when the previous jobs have finished computing. # +# The maximum number of cores that can be allocated by a # +# user on the SMP machine is 80. However, this number # +# cannot be higher than the total RAM of SMP (2000 GB) # +# divided by the RAM allocated to one core (rounded down). # +# For example, if you allocated 50 GB/core in the previous # +# line, you can't allocate more than 2000 / 50 = 40 cores # +# to your job. # +############################################################# + +#BSUB -n 4 + +nCores=4 # same as the number of cores above + +############################################################# +# inlcude the command below only if you need to write data # +# in /esnas. If not, comment it (while for reading from # +# /esnas this command is not necessary): # +############################################################# + +### newgrp Earth + +############################################################# +# These modules should already been loaded in your session. # +# They are provided here in case someone wasn't loaded: # +############################################################# + +module load R CDO NCO NETCDF gcc intel openmpi HDF5 UDUNITS + +############################################################# +# Set the name of your script to run; if no path is # +# provided, it is assumed to be in the directory where # +# the 'bsub' command is executed: # +############################################################# + +diagnostic="../weather_regimes_v35.R" + +############################################################# +# first and last chunks (tasks) in which the job is split. +# Examples: +# - If you are computing a Skill Score using hindcast data +# with 241 latitude values, set fistChunk=1 and lastChunk=241 +# - If you want to run the same script 12 times changing variable +# "month" from 1 to 12, set firstChunk=1 and lastChunk=12 +# - If you want to run the same script for a sequence of years, i.e: +# from 1980 to 2015, set firstChunk=1950 and lastChunk=2015 +############################################################# + +firstChunk=1 +lastChunk=1 + +############################################################# +# Name of the .txt file with the list of tasks to run: +############################################################# + +taskList=./parallel.txt + +############################################################# +# create the .txt file with the task list: +############################################################# + +echo "" > $taskList # new void file; if already existing, delete it + +for cnk in $(seq $firstChunk $lastChunk); do + echo "Rscript" $diagnostic $cnk >> $taskList +done + +#Rscript -e 'outdir <- "/gpfs/projects/bsc32/bsc32842"' -e 'a <- list.files("/esnas/exp/ecmwf/system4_m1/daily_mean/prlr_f6h")' -e "write.table(a, file=paste0(outdir,'/test_file_',a[1],'.txt'))" + +############################################################## +# Run the job splitting it in many sequential tasks, one for # +# each different set of values of the 3 above variables, and # +# in group of jobs at the same time: # +############################################################## + +#/apps/GREASY/2.1.2.1/bin/greasy $taskList + +Rscript $diagnostic $fistChunk diff --git a/lsf/parallel_SMP.job~ b/lsf/parallel_SMP.job~ new file mode 100644 index 0000000000000000000000000000000000000000..f9c78773d9c71ef5f826766516785942b45fe80b --- /dev/null +++ b/lsf/parallel_SMP.job~ @@ -0,0 +1,106 @@ +#!/bin/bash + +#BSUB -J parallel +#BSUB -oo parallel-%J.out +#BSUB -eo parallel-%J.err + +############################################################# +# Line below specify to assign the job to the SMP queue, # +# and consequently the job will run in the SMP machine: # +############################################################# + +#BSUB -q smp + +############################################################# +# Set the total computation time of the parallel job. # +# Time max is 48 hours (syntax: HH:MM), but a lower value # +# means a faster queue! So, try to set it not too much # +# higher than the running time of one job only: # +############################################################# + +#BSUB -W 10:00 + +############################################################# +# Allocate the desired quantity of RAM (in MB) for each # +# processor (core). This value corresponds to the peak RAM # +# utilized by your jobs during their sequential execution: # +############################################################# + +#BSUB -M 65000 + +############################################################# +# Allocate a number of processors (cores) to this job. # +# it is also equal to the maximum number of jobs running at # +# the same time; the jobs exceeding this number will start # +# only when the previous jobs have finished computing. # +# The maximum number of cores that can be allocated by a # +# user on the SMP machine is 80. However, this number # +# cannot be higher than the total RAM of SMP (2000 GB) # +# divided by the RAM allocated to one core (rounded down). # +# For example, if you allocated 50 GB/core in the previous # +# line, you can't allocate more than 2000 / 50 = 40 cores # +# to your job. # +############################################################# + +#BSUB -n 4 + +nCores=4 # same as the number of cores above + +############################################################# +# inlcude the command below only if you need to write data # +# in /esnas. If not, comment it (while for reading from # +# /esnas this command is not necessary): # +############################################################# + +### newgrp Earth + +############################################################# +# These modules should already been loaded in your session. # +# They are provided here in case someone wasn't loaded: # +############################################################# + +module load R CDO NCO NETCDF gcc intel openmpi HDF5 UDUNITS + +############################################################# +# Set the name of your script to run; if no path is # +# provided, it is assumed to be in the directory where # +# the 'bsub' command is executed: # +############################################################# + +diagnostic="./diagnostics.R" + +################################################## +# first and last chunks (tasks) in which the job is split. +# Examples: +# - If you are computing a Skill Score using hindcast data +# with 241 latitude values, set fistChunk=1 and lastChunk=241 +# - If you want to run the same script 12 times changing variable +# "month" from 1 to 12, set firstChunk=1 and lastChunk=12 +# - If you want to run the same script for a sequence of years, i.e: +# from 1980 to 2015, set firstChunk=1950 and lastChunk=2015 +################################################## + +firstChunk=1 +lastChunk=1 + +################################################## +# Create a .txt file with the list of tasks to run: +################################################## + +taskList=./diagnostics.txt + +echo "" > $taskList # new void file; if already existing, delete it + +for cnk in $(seq $firstChunk $lastChunk); do + echo "Rscript" $diagnostic $cnk >> $taskList +done + +#Rscript -e 'outdir <- "/gpfs/projects/bsc32/bsc32842"' -e 'a <- list.files("/esnas/exp/ecmwf/system4_m1/daily_mean/prlr_f6h")' -e "write.table(a, file=paste0(outdir,'/test_file_',a[1],'.txt'))" + +################################################## +# Run the job splitting it in many sequential tasks in parallel: +################################################## + +#/apps/GREASY/2.1.2.1/bin/greasy $taskList + +Rscript $diagnostic $fistChunk diff --git a/lsf/parallel_SMP_v2.job b/lsf/parallel_SMP_v2.job new file mode 100644 index 0000000000000000000000000000000000000000..4c0a35224309dfdf1dc6cd75cce29fa64e4e907b --- /dev/null +++ b/lsf/parallel_SMP_v2.job @@ -0,0 +1,112 @@ +#!/bin/bash + +#BSUB -J parallel +#BSUB -oo parallel-%J.out +#BSUB -eo parallel-%J.err + +############################################################# +# Line below specify to assign the job to the SMP queue, # +# and consequently the job will run in the SMP machine: # +############################################################# + +#BSUB -q smp + +############################################################# +# Set the total computation time of the parallel job. # +# Time max is 48 hours (syntax: HH:MM), but a lower value # +# means a faster queue! So, try to set it not too much # +# higher than the running time of one job only: # +############################################################# + +#BSUB -W 10:00 + +############################################################# +# Allocate the desired quantity of RAM (in MB) for each # +# processor (core). This value corresponds to the peak RAM # +# utilized by your jobs during their sequential execution: # +############################################################# + +#BSUB -M 65000 + +############################################################# +# Allocate a number of processors (cores) to this job. # +# it is also equal to the maximum number of jobs running at # +# the same time; the jobs exceeding this number will start # +# only when the previous jobs have finished computing. # +# The maximum number of cores that can be allocated by a # +# user on the SMP machine is 80. However, this number # +# cannot be higher than the total RAM of SMP (2000 GB) # +# divided by the RAM allocated to one core (rounded down). # +# For example, if you allocated 50 GB/core in the previous # +# line, you can't allocate more than 2000 / 50 = 40 cores # +# to your job. # +############################################################# + +#BSUB -n 4 + +nCores=4 # same as the number of cores above + +############################################################# +# inlcude the command below only if you need to write data # +# in /esnas. If not, comment it (while for reading from # +# /esnas this command is not necessary): # +############################################################# + +### newgrp Earth + +############################################################# +# These modules should already been loaded in your session. # +# They are provided here in case someone wasn't loaded: # +############################################################# + +module load R CDO NCO NETCDF gcc intel openmpi HDF5 UDUNITS + +############################################################# +# Set the name of your script to run; if no path is # +# provided, it is assumed to be in the directory where # +# the 'bsub' command is executed: # +############################################################# + +diagnostic="../weather_regimes_v35.R" + +############################################################# +# first and last chunks (tasks) in which the job is split. +# Examples: +# - If you are computing a Skill Score using hindcast data +# with 241 latitude values, set fistChunk=1 and lastChunk=241 +# - If you want to run the same script 12 times changing variable +# "month" from 1 to 12, set firstChunk=1 and lastChunk=12 +# - If you want to run the same script for a sequence of years, i.e: +# from 1980 to 2015, set firstChunk=1950 and lastChunk=2015 +############################################################# + +firstChunk=1 +lastChunk=1 + +############################################################# +# Name of the .txt file with the list of tasks to run: +############################################################# + +taskList=./parallel.txt + +############################################################# +# create the .txt file with the task list: +############################################################# + +echo "" > $taskList # new void file; if already existing, delete it + +for cnk in $(seq $firstChunk $lastChunk); do + echo "Rscript" $diagnostic $cnk >> $taskList +done + +#Rscript -e 'outdir <- "/gpfs/projects/bsc32/bsc32842"' -e 'a <- list.files("/esnas/exp/ecmwf/system4_m1/daily_mean/prlr_f6h")' -e "write.table(a, file=paste0(outdir,'/test_file_',a[1],'.txt'))" + +############################################################## +# Run the job splitting it in many sequential tasks, one for # +# each different set of values of the 3 above variables, and # +# in group of jobs at the same time: # +############################################################## + +/apps/GREASY/2.1.2.1/bin/greasy $taskList + +#Rscript $diagnostic $fistChunk diff --git a/lsf/parallel_SMP_v2.job~ b/lsf/parallel_SMP_v2.job~ new file mode 100644 index 0000000000000000000000000000000000000000..3261589471db9eca921cb244f8c9374b482aa175 --- /dev/null +++ b/lsf/parallel_SMP_v2.job~ @@ -0,0 +1,112 @@ +#!/bin/bash + +#BSUB -J parallel +#BSUB -oo parallel-%J.out +#BSUB -eo parallel-%J.err + +############################################################# +# Line below specify to assign the job to the SMP queue, # +# and consequently the job will run in the SMP machine: # +############################################################# + +#BSUB -q smp + +############################################################# +# Set the total computation time of the parallel job. # +# Time max is 48 hours (syntax: HH:MM), but a lower value # +# means a faster queue! So, try to set it not too much # +# higher than the running time of one job only: # +############################################################# + +#BSUB -W 10:00 + +############################################################# +# Allocate the desired quantity of RAM (in MB) for each # +# processor (core). This value corresponds to the peak RAM # +# utilized by your jobs during their sequential execution: # +############################################################# + +#BSUB -M 65000 + +############################################################# +# Allocate a number of processors (cores) to this job. # +# it is also equal to the maximum number of jobs running at # +# the same time; the jobs exceeding this number will start # +# only when the previous jobs have finished computing. # +# The maximum number of cores that can be allocated by a # +# user on the SMP machine is 80. However, this number # +# cannot be higher than the total RAM of SMP (2000 GB) # +# divided by the RAM allocated to one core (rounded down). # +# For example, if you allocated 50 GB/core in the previous # +# line, you can't allocate more than 2000 / 50 = 40 cores # +# to your job. # +############################################################# + +#BSUB -n 4 + +nCores=4 # same as the number of cores above + +############################################################# +# inlcude the command below only if you need to write data # +# in /esnas. If not, comment it (while for reading from # +# /esnas this command is not necessary): # +############################################################# + +### newgrp Earth + +############################################################# +# These modules should already been loaded in your session. # +# They are provided here in case someone wasn't loaded: # +############################################################# + +module load R CDO NCO NETCDF gcc intel openmpi HDF5 UDUNITS + +############################################################# +# Set the name of your script to run; if no path is # +# provided, it is assumed to be in the directory where # +# the 'bsub' command is executed: # +############################################################# + +diagnostic="../weather_regimes_v35.R" + +############################################################# +# first and last chunks (tasks) in which the job is split. +# Examples: +# - If you are computing a Skill Score using hindcast data +# with 241 latitude values, set fistChunk=1 and lastChunk=241 +# - If you want to run the same script 12 times changing variable +# "month" from 1 to 12, set firstChunk=1 and lastChunk=12 +# - If you want to run the same script for a sequence of years, i.e: +# from 1980 to 2015, set firstChunk=1950 and lastChunk=2015 +############################################################# + +firstChunk=1 +lastChunk=1 + +############################################################# +# Name of the .txt file with the list of tasks to run: +############################################################# + +taskList=./parallel.txt + +############################################################# +# create the .txt file with the task list: +############################################################# + +echo "" > $taskList # new void file; if already existing, delete it + +for cnk in $(seq $firstChunk $lastChunk); do + echo "Rscript" $diagnostic $cnk >> $taskList +done + +#Rscript -e 'outdir <- "/gpfs/projects/bsc32/bsc32842"' -e 'a <- list.files("/esnas/exp/ecmwf/system4_m1/daily_mean/prlr_f6h")' -e "write.table(a, file=paste0(outdir,'/test_file_',a[1],'.txt'))" + +############################################################## +# Run the job splitting it in many sequential tasks, one for # +# each different set of values of the 3 above variables, and # +# in group of jobs at the same time: # +############################################################## + +/apps/GREASY/2.1.2.1/bin/greasy $taskList + +Rscript $diagnostic $fistChunk diff --git a/merge_chunks.R b/merge_chunks.R new file mode 100644 index 0000000000000000000000000000000000000000..5b4079354eb43a56e55839beaec6db3c64ccf200 --- /dev/null +++ b/merge_chunks.R @@ -0,0 +1,20 @@ +# +# script to read the output file of each chunk and merge them in one array only +# + +output <- commandArgs(TRUE)[1] # suffix used for the name of the output files with the results of the calculation of each chunk +chunks <- as.integer(commandArgs(TRUE)[2]) # total number of chunks used + +workdir <- "/scratch/Earth/ncortesi" # path of the output files +#save(output,file=paste0(workdir,"/output_file_",output,".RData")) + +for(c in 1:chunks){ + load(paste0(workdir,"/",output,"_",c,".RData")) # load the result of the analysis of a single chunk + + if(c==1) RMS <- array(NA, c(chunks,length(my.RMS))) + RMS[c,] <- my.RMS # create a matrix the the results of all chunks + + file.remove(paste0(workdir,"/",output,"_",c,".RData")) +} + +save(RMS,file=paste0(workdir,"/RMS.RData")) diff --git a/old/ColorBarCustom.R b/old/ColorBarCustom.R new file mode 100644 index 0000000000000000000000000000000000000000..77849c54f62518b91f8692569e97c06d3e3a578c --- /dev/null +++ b/old/ColorBarCustom.R @@ -0,0 +1,37 @@ + +# Creation: 6/2016 +# Author: Nicola Cortesi +# Aim: it's like ColorBar, but the user can specify a subset of labels (option 'subset') to be shown in the legend instead of the predefined values. +# It's especially useful when you have to publish a chart for a paper/report and you want to see in the legend only the values you want, instead of those decided automatically by ColorBar() +# I/O: as for ColorBar() +# Assumptions: none +# Branch: general + +ColorBarCustom <- function (brks, cols = NULL, vert = TRUE, subsampleg = 1, cex = 1, my.ticks, my.labels) +{ + if (is.null(cols) == TRUE) { + nlev <- length(brks) - 1 + cols <- rainbow(nlev) + } + else { + if (length(cols) != (length(brks) - 1)) { + stop("Inconsistent colour levels / list of colours") + } + } + if (vert) { + par(mar = c(1, 1, 1, 1.5 * (1 + cex)), mgp = c(1, 1, + 0), las = 1, cex = 1.2) + image(1, c(1:length(cols)), t(c(1:length(cols))), axes = FALSE, + col = cols, xlab = "", ylab = "") + box() + axis(4, at = my.ticks, tick = TRUE, labels = my.labels, cex.axis = cex) + } + else { + par(mar = c(0.5 + cex, 1, 1, 1), mgp = c(1.5, max(c(0.3, + 0.8 * (cex - 0.625))), 0), las = 1, cex = 1.2) + image(1:length(cols), 1, t(t(1:length(cols))), axes = FALSE, + col = cols, xlab = "", ylab = "") + box() + axis(1, at = my.ticks, labels = my.labels, cex.axis = cex) + } +} diff --git a/old/ColorBarCustom.R~ b/old/ColorBarCustom.R~ new file mode 100644 index 0000000000000000000000000000000000000000..ea0e84b2f55a5a76007252bfe5ac3c2d46c7224b --- /dev/null +++ b/old/ColorBarCustom.R~ @@ -0,0 +1,35 @@ +# Like ColorBar, but the user can specify a subset of labels (option 'subset') to be shown in the legend. +# Useful when you have to publish a chart for a paper/report and you want to see in the legend only the values you want, instead of those decided automatically by the function! + +ColorBarCustom <- function (brks, cols = NULL, vert = TRUE, cex = 1, subset = 1:length(brks)) +{ + if (is.null(cols) == TRUE) { + nlev <- length(brks) - 1 + cols <- rainbow(nlev) + } + else { + if (length(cols) != (length(brks) - 1)) { + stop("Inconsistent colour levels / list of colours") + } + } + if (vert) { + par(mar = c(1, 1, 1, 1.5 * (1 + cex)), mgp = c(1, 1, + 0), las = 1, cex = 1.2) + image(1, c(1:length(cols)), t(c(1:length(cols))), axes = FALSE, + col = cols, xlab = "", ylab = "") + box() + axis(4, at = seq(0.5, length(brks) - 0.5, 1)[subset], + tick = TRUE, labels = brks[seq(1, length(brks), 1)][subset], + cex.axis = cex) + } + else { + par(mar = c(0.5 + cex, 1, 1, 1), mgp = c(1.5, max(c(0.3, + 0.8 * (cex - 0.625))), 0), las = 1, cex = 1.2) + image(1:length(cols), 1, t(t(1:length(cols))), axes = FALSE, + col = cols, xlab = "", ylab = "") + box() + axis(1, at = seq(0.5, length(brks) - 0.5, 1)[subset], + labels = brks[seq(1, length(brks), 1)][subset], + cex.axis = cex) + } +} diff --git a/old/ColorBarV.R b/old/ColorBarV.R new file mode 100644 index 0000000000000000000000000000000000000000..d44c3383ac03ff09d24dc35c305c63259265b491 --- /dev/null +++ b/old/ColorBarV.R @@ -0,0 +1,64 @@ +ColorBarV <- function(brks, cols = NULL, vert = TRUE, subsampleg = 1, + cex = 1, marg=NULL) { + # Creates a horizontal or vertical colorbar to introduce in multipanels. + # + # Args: + # brks: Levels. + # cols: List of colours, optional. + # vert: TRUE/FALSE for vertical/horizontal colorbar. + # kharin: Supsampling factor of the interval between ticks on colorbar. + # Default: 1 = every level + # cex: Multiplicative factor to increase the ticks size, optional. + # marg: margins + # + # Returns: + # This function returns nothing + # + # History: + # 1.0 # 2012-04 (V. Guemas, vguemas@ic3.cat) # Original code + # 1.1 # 2014-11 (C. Prodhomme, chloe.prodhomme@ic3.cat) + # add cex option + # + # + # Input arguments + # ~~~~~~~~~~~~~~~~~ + # + if (is.null(cols) == TRUE) { + nlev <- length(brks) - 1 + cols <- rainbow(nlev) + } else { + if (length(cols) != (length(brks) - 1)) { + stop("Inconsistent colour levels / list of colours") + } + } + + # + # Plotting colorbar + # ~~~~~~~~~~~~~~~~~~~ + # + if (vert) { + if (is.null(marg)== FALSE){ + par(mar = marg, mgp = c(1, 1, 0), las = 1, cex = 1.2) + }else{ + par(mar = c(1, 1, 1, 1.5 *( 1 + cex)), mgp = c(1, 1, 0), las = 1, cex = 1.2) + } + image(1, c(1:length(cols)), t(c(1:length(cols))), axes = FALSE, col = cols, + xlab = '', ylab = '') + box() + axis(4, at = seq(0.5, length(brks) - 0.5, subsampleg), tick = TRUE, + labels = brks[seq(1, length(brks), subsampleg)], cex.axis = cex) + } else { + if (marg){ + par(mar = marg, mgp = c(1, 1, 0), las = 1, cex = 1.2) + }else{ + par(mar = c(0.5 + cex, 1, 1, 1), mgp = c(1.5, max(c(0.3,0.8*(cex-0.625))), 0), + las = 1, cex = 1.2) + } + + image(1:length(cols), 1, t(t(1:length(cols))), axes = FALSE, col = cols, + xlab = '', ylab = '') + box() + axis(1, at = seq(0.5, length(brks) - 0.5, subsampleg), + labels = brks[seq(1, length(brks), subsampleg)], cex.axis = cex) + } +} diff --git a/old/IC3.conf b/old/IC3.conf new file mode 100755 index 0000000000000000000000000000000000000000..bef5c584cb76699e3a5746d5e243b90ec9f31357 --- /dev/null +++ b/old/IC3.conf @@ -0,0 +1,270 @@ +# s2dverification configuration file +# +# Check ?ConfigFileOpen after loading s2dverification for detailed +# documentation on this configuration file. + +####################### +!!definitions +####################### +DEFAULT_EXP_MAIN_PATH=/cfunas/exp/*/$EXP_NAME$/ +DEFAULT_EXP_FILE_PATH = $STORE_FREQ$_mean/$VAR_NAME$_[36]hourly/$EXP_FILE$ +DEFAULT_NC_VAR_NAME = $VAR_NAME$ +DEFAULT_SUFFIX = +DEFAULT_VAR_MIN = -1e19 +DEFAULT_VAR_MAX = 1e19 +DEFAULT_DIM_NAME_LONGITUDES = longitude +DEFAULT_DIM_NAME_LATITUDES = latitude +DEFAULT_DIM_NAME_MEMBERS = ensemble +DEFAULT_OBS_MAIN_PATH=/cfu/data/*/$OBS_NAME$/ +DEFAULT_OBS_FILE_PATH = $STORE_FREQ$_mean/$VAR_NAME$/$OBS_FILE$ +EXP_FILE = $VAR_NAME$_$START_DATE$.nc +LONG_EXP_FILE = $VAR_NAME$_$EXP_NAME$_$START_DATE$*.nc +ROOTA = /cfunas/exp/ENSEMBLES/decadal/MME +ROOTB = /cfunas/exp/ENSEMBLES +ROOTC = /cfunas/exp/CMIP5/decadal +ROOTD = /cfunas/exp/CMIP5/historical +ROOTE = /cfunas/exp/ECMWF/seasonal +ROOTF = /cfunas/exp/SPECS/decadal +OBS_FILE = $VAR_NAME$_$YEAR$$MONTH$.nc +ROOTG = /cfu/data +ROOTH = /cfu/diagnostics/postprocessed + + + + + +####################### +!!table of experiments +####################### +#exp_name, var_name[, exp_main_path[/, exp_file_path[, suffix[, var_min[, var_max]]]]]] +.*, .*, *, *, *, *, *, * +ECMWF_S4_sea, .*, $ROOTE$/0001/s004/m001/, *, *, *, *, * +ECMWF_S4_ann, .*, $ROOTE$/0001/s004/m003/, *, *, *, *, * +EnsEcmwfDec, .*, $ROOTA$/ecmwf/2005/s001/m001/, $STORE_FREQ$_mean/$VAR_NAME$/$EXP_FILE$, *, *, *, * +EnsCerfacsDec, .*, $ROOTA$/cerfacs/2002/s000/m001/, $STORE_FREQ$_mean/$VAR_NAME$/$EXP_FILE$, *, *, *, * +EnsIfmDec, .*, $ROOTA$/ifm/2002/s001/m010/, $STORE_FREQ$_mean/$VAR_NAME$/$EXP_FILE$, *, *, *, * +EnsUkmoDec, .*, $ROOTA$/ukmo/2026/s001/m001/, $STORE_FREQ$_mean/$VAR_NAME$/$EXP_FILE$, *, *, *, * +EnsEcmwfSeas, .*, $ROOTB$/seasonal/ecmwf/, $STORE_FREQ$_mean/$VAR_NAME$/$EXP_FILE$, *, *, *, * +EnsCerfacsSeas, .*, $ROOTB$/seasonal/cerfacs/, $STORE_FREQ$_mean/$VAR_NAME$/$EXP_FILE$, *, *, *, * +EnsIfmSeas, .*, $ROOTB$/seasonal/ifm/, $STORE_FREQ$_mean/$VAR_NAME$/$EXP_FILE$, *, *, *, * +EnsUkmoSeas, .*, $ROOTB$/seasonal/ukmo/, $STORE_FREQ$_mean/$VAR_NAME$/$EXP_FILE$, *, *, *, * +EnsCmccSeas, .*, $ROOTB$/seasonal/cmcc/, $STORE_FREQ$_mean/$VAR_NAME$/$EXP_FILE$, *, *, *, * +EnsMetfrSeas, .*, $ROOTB$/seasonal/metfr/, $STORE_FREQ$_mean/$VAR_NAME$/$EXP_FILE$, *, *, *, * +ecmwf, .*, $ROOTB$/ecmwf/2005/s001/m001/, $STORE_FREQ$_mean/$VAR_NAME$/$EXP_FILE$, *, *, *, * +cerfacs, .*, $ROOTA$/cerfacs/2002/s000/m001/, $STORE_FREQ$_mean/$VAR_NAME$/$EXP_FILE$, *, *, *, * +ifm, .*, $ROOTA$/ifm/2002/s001/m010/, $STORE_FREQ$_mean/$VAR_NAME$/$EXP_FILE$, *, *, *, * +ukmo, .*, $ROOTA$/ukmo/2026/s001/m001/, $STORE_FREQ$_mean/$VAR_NAME$/$EXP_FILE$, *, *, *, * +ECMWF_S3Seas, .*, $ROOTE$/0001/s003/m001/, $STORE_FREQ$_mean/$VAR_NAME$/$EXP_FILE$, *, *, *, * +bccdec, .*, $ROOTC$/bcc/i1p1/, $STORE_FREQ$_mean/$VAR_NAME$/$EXP_FILE$, *, *, *, * +hadcm3dec, .*, $ROOTC$/hadcm3/i2p1/, $STORE_FREQ$_mean/$VAR_NAME$/$EXP_FILE$, *, *, *, * +hadcm3dec2, .*, $ROOTC$/hadcm3/i3p1/, $STORE_FREQ$_mean/$VAR_NAME$/$EXP_FILE$, *, *, *, * +miroc4dec, .*, $ROOTC$/miroc4h/i1p1/, $STORE_FREQ$_mean/$VAR_NAME$/$EXP_FILE$, *, *, *, * +miroc5dec, .*, $ROOTC$/miroc5/i1p1/, $STORE_FREQ$_mean/$VAR_NAME$/$EXP_FILE$, *, *, *, * +mri-cgcm3dec, .*, $ROOTC$/mri-cgcm3/i1p1/, $STORE_FREQ$_mean/$VAR_NAME$/$EXP_FILE$, *, *, *, * +cnrm-cm5dec, .*, $ROOTC$/cnrm-cm5/i1p1/, $STORE_FREQ$_mean/$VAR_NAME$/$EXP_FILE$, *, *, *, * +cancm4dec1, .*, $ROOTC$/cancm4/i1p1/, $STORE_FREQ$_mean/$VAR_NAME$/$EXP_FILE$, *, *, *, * +cancm4dec2, .*, $ROOTC$/cancm4/i1p2/, $STORE_FREQ$_mean/$VAR_NAME$/$EXP_FILE$, *, *, *, * +knmidec, .*, $ROOTC$/ecearth/i1p1/, $STORE_FREQ$_mean/$VAR_NAME$/$EXP_FILE$, *, *, *, * +smhidec, .*, $ROOTC$/ecearth/i3p1/, $STORE_FREQ$_mean/$VAR_NAME$/$EXP_FILE$, *, *, *, * +mpimdec, .*, $ROOTC$/mpi-esm-lr/i1p1/, $STORE_FREQ$_mean/$VAR_NAME$/$EXP_FILE$, *, *, *, * +mpimdec2, .*, $ROOTF$/mpi-esm-lr/i1p1/, $STORE_FREQ$_mean/$VAR_NAME$/$EXP_FILE$, *, *, *, * +gfdldec, .*, $ROOTC$/gfdl/i1p1/, $STORE_FREQ$_mean/$VAR_NAME$/$EXP_FILE$, *, *, *, * +cmcc-cmdec, .*, $ROOTC$/cmcc-cm/i1p1/, $STORE_FREQ$_mean/$VAR_NAME$/$EXP_FILE$, *, *, *, * +ipsldec, .*, $ROOTC$/ipsl/i1p1/, $STORE_FREQ$_mean/$VAR_NAME$/$EXP_FILE$, *, *, *, * +ipsldec2, .*, $ROOTF$/ipsl/i1p1/, $STORE_FREQ$_mean/$VAR_NAME$/$EXP_FILE$, *, *, *, * +bcchis, .*, $ROOTD$/bcc/i0p1/, $STORE_FREQ$_mean/$VAR_NAME$/$EXP_FILE$, *, *, *, * +hadcm3his, .*, $ROOTD$/hadcm3/i0p1/, $STORE_FREQ$_mean/$VAR_NAME$/$EXP_FILE$, *, *, *, * +miroc4his, .*, $ROOTD$/miroc4h/i0p1/, $STORE_FREQ$_mean/$VAR_NAME$/$EXP_FILE$, *, *, *, * +miroc5his, .*, $ROOTD$/miroc5/i0p1/, $STORE_FREQ$_mean/$VAR_NAME$/$EXP_FILE$, *, *, *, * +mri-cgcm3his, .*, $ROOTD$/mri-cgcm3/i0p1/, $STORE_FREQ$_mean/$VAR_NAME$/$EXP_FILE$, *, *, *, * +cnrm-cm5his, .*, $ROOTD$/cnrm-cm5/i0p1/, $STORE_FREQ$_mean/$VAR_NAME$/$EXP_FILE$, *, *, *, * +knmihis, .*, $ROOTD$/ecearth/iXpY/, $STORE_FREQ$_mean/$VAR_NAME$/$EXP_FILE$, *, *, *, * +cancm4his, .*, $ROOTD$/cancm4/i0p1/, $STORE_FREQ$_mean/$VAR_NAME$/$EXP_FILE$, *, *, *, * +mpihis, .*, $ROOTD$/mpi-esm-lr/i0p1/, $STORE_FREQ$_mean/$VAR_NAME$/$EXP_FILE$, *, *, *, * +gfdlhis, .*, $ROOTD$/gfdl/i0p1/, $STORE_FREQ$_mean/$VAR_NAME$/$EXP_FILE$, *, *, *, * +ipslhis, .*, $ROOTD$/ipsl/i1p1/, $STORE_FREQ$_mean/$VAR_NAME$/$EXP_FILE$, *, *, *, * +cmcc-cmhis, .*, $ROOTD$/cmcc-cm/i0p1/, $STORE_FREQ$_mean/$VAR_NAME$/$EXP_FILE$, *, *, *, * +EnsEcmwfWeek, .*, /esnas/exp/ECMWF/monthly/ensfor/, weekly_mean/$VAR_NAME$$SUFFIX$/$VAR_NAME$_$START_DATE$00.nc, *, _f6h, *, * +EnsEcmwfWeekHind, .*, /esnas/exp/ECMWF/monthly/ensforhc/, weekly_mean/$VAR_NAME$$SUFFIX$/2014$MONTH$$DAY$00/$VAR_NAME$_$START_DATE$00.nc, *, _f6h, *, * +ERAintEnsWeek, .*, /esnas/reconstructions/ecmwf/eraint/, weekly_mean/$VAR_NAME$$SUFFIX$/$VAR_NAME$_$START_DATE$.nc, *, _f6h, *, * +ERAint6h, .*, /esnas/reconstructions/ecmwf/eraint/, 6hourly/$VAR_NAME$$SUFFIX$/$VAR_NAME$_$START_DATE$.nc, *, *, *, * +.*, tas_.*, *, $STORE_FREQ$_statistics/$VAR_NAME$_[36]hourly/$EXP_FILE$, *, *, *, * +.*, prlr_.*, *, $STORE_FREQ$_statistics/$VAR_NAME$_[36]hourly/$EXP_FILE$, *, *, *, * +.*, tasmin_q10, *, $STORE_FREQ$_statistics/$VAR_NAME$_[36]hourly/$EXP_FILE$, *, *, *, * +.*, tasmax_q90, *, $STORE_FREQ$_statistics/$VAR_NAME$_[36]hourly/$EXP_FILE$, *, *, *, * +.*, siaN, *, $STORE_FREQ$_mean/ice/siasiesiv_N_$EXP_NAME$_$START_DATE$*.nc, sia, *, *, * +.*, sieN, *, $STORE_FREQ$_mean/ice/siasiesiv_N_$EXP_NAME$_$START_DATE$*.nc, sie, *, *, * +.*, sivN, *, $STORE_FREQ$_mean/ice/siasiesiv_N_$EXP_NAME$_$START_DATE$*.nc, siv, *, *, * +.*, siaS, *, $STORE_FREQ$_mean/ice/siasiesiv_S_$EXP_NAME$_$START_DATE$*.nc, sia, *, *, * +.*, sieS, *, $STORE_FREQ$_mean/ice/siasiesiv_S_$EXP_NAME$_$START_DATE$*.nc, sie, *, *, * +.*, sivS, *, $STORE_FREQ$_mean/ice/siasiesiv_S_$EXP_NAME$_$START_DATE$*.nc, siv, *, *, * +.*, sia_.*, *, $STORE_FREQ$_mean/ice/sia_Arcticreg1_$EXP_NAME$_$START_DATE$*.nc, *, *, *, * +.*, sie_.*, *, $STORE_FREQ$_mean/ice/sie_Arcticreg1_$EXP_NAME$_$START_DATE$*.nc, *, *, *, * +.*, siv_.*, *, $STORE_FREQ$_mean/ice/siv_Arcticreg1_$EXP_NAME$_$START_DATE$*.nc, *, *, *, * +.*, moc_40N55N_1-2km, *, $STORE_FREQ$_mean/moc/$LONG_EXP_FILE$, zomsfatl, *, *, * +.*, moc_30N40N_1-2km, *, $STORE_FREQ$_mean/moc/$LONG_EXP_FILE$, zomsfatl, *, *, * +.*, max_moc_38N50N_500m-2km, *, $STORE_FREQ$_mean/moc/$LONG_EXP_FILE$, maxmoc, *, *, * +.*, max_moc_40N, *, $STORE_FREQ$_mean/moc/$LONG_EXP_FILE$, maxmoc, *, *, * +.*, .*Pac_stc_.*, *, $STORE_FREQ$_mean/moc/$LONG_EXP_FILE$, zomsfpac, *, *, * +.*, .*Atl_stc_.*, *, $STORE_FREQ$_mean/moc/$LONG_EXP_FILE$, zomsfatl, *, *, * +.*, heatc, *, $STORE_FREQ$_mean/heatc/$LONG_EXP_FILE$, thc, *, *, * +.*, .*_heatc, *, $STORE_FREQ$_mean/heatc/$LONG_EXP_FILE$, thc, *, *, * +.*, ta50, *, *, *, *, *, * +EnsEcmwfDec, (tas|prlr).*_.*, *, $STORE_FREQ$_statistics/$VAR_NAME$/$EXP_FILE$, *, *, *, * +EnsCerfacsDec, (tas|prlr).*_.*, *, $STORE_FREQ$_statistics/$VAR_NAME$/$EXP_FILE$, *, *, *, * +EnsIfmDec, (tas|prlr).*_.*, *, $STORE_FREQ$_statistics/$VAR_NAME$/$EXP_FILE$, *, *, *, * +EnsUkmoDec, (tas|prlr).*_.*, *, $STORE_FREQ$_statistics/$VAR_NAME$/$EXP_FILE$, *, *, *, * +EnsEcmwfSeas, (tas|prlr).*_.*, *, $STORE_FREQ$_statistics/$VAR_NAME$/$EXP_FILE$, *, *, *, * +EnsCerfacsSeas, (tas|prlr).*_.*, *, $STORE_FREQ$_statistics/$VAR_NAME$/$EXP_FILE$, *, *, *, * +EnsIfmSeas, (tas|prlr).*_.*, *, $STORE_FREQ$_statistics/$VAR_NAME$/$EXP_FILE$, *, *, *, * +EnsUkmoSeas, (tas|prlr).*_.*, *, $STORE_FREQ$_statistics/$VAR_NAME$/$EXP_FILE$, *, *, *, * +EnsCmccSeas, (tas|prlr).*_.*, *, $STORE_FREQ$_statistics/$VAR_NAME$/$EXP_FILE$, *, *, *, * +EnsMetfrSeas, (tas|prlr).*_.*, *, $STORE_FREQ$_statistics/$VAR_NAME$/$EXP_FILE$, *, *, *, * +ecmwf, (tas|prlr).*_.*, *, $STORE_FREQ$_statistics/$VAR_NAME$/$EXP_FILE$, *, *, *, * +cerfacs, (tas|prlr).*_.*, *, $STORE_FREQ$_statistics/$VAR_NAME$/$EXP_FILE$, *, *, *, * +ifm, (tas|prlr).*_.*, *, $STORE_FREQ$_statistics/$VAR_NAME$/$EXP_FILE$, *, *, *, * +ukmo, (tas|prlr).*_.*, *, $STORE_FREQ$_statistics/$VAR_NAME$/$EXP_FILE$, *, *, *, * +ECMWF_S3Seas, (tas|prlr).*_.*, *, $STORE_FREQ$_statistics/$VAR_NAME$/$EXP_FILE$, *, *, *, * +bccdec, (tas|prlr).*_.*, *, $STORE_FREQ$_statistics/$VAR_NAME$/$EXP_FILE$, *, *, *, * +hadcm3dec, (tas|prlr).*_.*, *, $STORE_FREQ$_statistics/$VAR_NAME$/$EXP_FILE$, *, *, *, * +miroc4dec, (tas|prlr).*_.*, *, $STORE_FREQ$_statistics/$VAR_NAME$/$EXP_FILE$, *, *, *, * +miroc5dec, (tas|prlr).*_.*, *, $STORE_FREQ$_statistics/$VAR_NAME$/$EXP_FILE$, *, *, *, * +mri-cgcm3dec, (tas|prlr).*_.*, *, $STORE_FREQ$_statistics/$VAR_NAME$/$EXP_FILE$, *, *, *, * +cnrm-cm5dec, (tas|prlr).*_.*, *, $STORE_FREQ$_statistics/$VAR_NAME$/$EXP_FILE$, *, *, *, * +cancm4dec1, (tas|prlr).*_.*, *, $STORE_FREQ$_statistics/$VAR_NAME$/$EXP_FILE$, *, *, *, * +cancm4dec2, (tas|prlr).*_.*, *, $STORE_FREQ$_statistics/$VAR_NAME$/$EXP_FILE$, *, *, *, * +knmidec, (tas|prlr).*_.*, *, $STORE_FREQ$_statistics/$VAR_NAME$/$EXP_FILE$, *, *, *, * +smhidec, (tas|prlr).*_.*, *, $STORE_FREQ$_statistics/$VAR_NAME$/$EXP_FILE$, *, *, *, * +mpimdec, (tas|prlr).*_.*, *, $STORE_FREQ$_statistics/$VAR_NAME$/$EXP_FILE$, *, *, *, * +mpimdec2, (tas|prlr).*_.*, *, $STORE_FREQ$_statistics/$VAR_NAME$/$EXP_FILE$, *, *, *, * +gfdldec, (tas|prlr).*_.*, *, $STORE_FREQ$_statistics/$VAR_NAME$/$EXP_FILE$, *, *, *, * +cmcc-cmdec, (tas|prlr).*_.*, *, $STORE_FREQ$_statistics/$VAR_NAME$/$EXP_FILE$, *, *, *, * +ipsldec, (tas|prlr).*_.*, *, $STORE_FREQ$_statistics/$VAR_NAME$/$EXP_FILE$, *, *, *, * +ipsldec2, (tas|prlr).*_.*, *, $STORE_FREQ$_statistics/$VAR_NAME$/$EXP_FILE$, *, *, *, * +bcchis, (tas|prlr).*_.*, *, $STORE_FREQ$_statistics/$VAR_NAME$/$EXP_FILE$, *, *, *, * +hadcm3his, (tas|prlr).*_.*, *, $STORE_FREQ$_statistics/$VAR_NAME$/$EXP_FILE$, *, *, *, * +miroc4his, (tas|prlr).*_.*, *, $STORE_FREQ$_statistics/$VAR_NAME$/$EXP_FILE$, *, *, *, * +miroc5his, (tas|prlr).*_.*, *, $STORE_FREQ$_statistics/$VAR_NAME$/$EXP_FILE$, *, *, *, * +mri-cgcm3his, (tas|prlr).*_.*, *, $STORE_FREQ$_statistics/$VAR_NAME$/$EXP_FILE$, *, *, *, * +cnrm-cm5his, (tas|prlr).*_.*, *, $STORE_FREQ$_statistics/$VAR_NAME$/$EXP_FILE$, *, *, *, * +knmihis, (tas|prlr).*_.*, *, $STORE_FREQ$_statistics/$VAR_NAME$/$EXP_FILE$, *, *, *, * +cancm4his, (tas|prlr).*_.*, *, $STORE_FREQ$_statistics/$VAR_NAME$/$EXP_FILE$, *, *, *, * +mpihis, (tas|prlr).*_.*, *, $STORE_FREQ$_statistics/$VAR_NAME$/$EXP_FILE$, *, *, *, * +gfdlhis, (tas|prlr).*_.*, *, $STORE_FREQ$_statistics/$VAR_NAME$/$EXP_FILE$, *, *, *, * +ipslhis, (tas|prlr).*_.*, *, $STORE_FREQ$_statistics/$VAR_NAME$/$EXP_FILE$, *, *, *, * +cmc-cmhis, (tas|prlr).*_.*, *, $STORE_FREQ$_statistics/$VAR_NAME$/$EXP_FILE$, *, *, *, * +ECMWF_S3Seas, tos, *, $STORE_FREQ$_mean/$VAR_NAME$_24hourly/$EXP_FILE$, *, *, *, * +ECMWF_S4_sea, tos, *, $STORE_FREQ$_mean/$VAR_NAME$_24hourly/$EXP_FILE$, *, *, *, * +ECMWF_S4_ann, tos, *, $STORE_FREQ$_mean/$VAR_NAME$_24hourly/$EXP_FILE$, *, *, *, * +ECMWF_S3Seas, tas, *, $STORE_FREQ$_mean/$VAR_NAME$_6hourly/$EXP_FILE$, *, *, *, * +ECMWF_S4_ann, prlr, *, $STORE_FREQ$_mean/$VAR_NAME$/$EXP_FILE$, *, *, *, * +hadcm3(dec|his), tos, *, *, *, *, *, * +mri-cgcm3(dec|his), tos, *, *, *, *, *, * +gfdl(dec|his), tos, *, *, *, *, *, * +hadcm3(dec|his), (tas|prlr|psl), *, *, *, *, *, * +mri-cgcm3(dec|his), (tas|prlr|psl), *, *, *, *, *, * +cancm4(dec(1|2)|his), (tas|prlr|psl), *, *, *, *, *, * +gfdl(dec|his), (tas|prlr|psl), *, *, *, *, *, * +cmcc-cmdec, (tas|prlr|psl), *, *, *, *, *, * +DePreSysAsimDec, .*, $ROOTB$/decadal/DePreSys/2502/s051/m01$MEMBER_NUMBER$/, $STORE_FREQ$_mean/$VAR_NAME$/$EXP_FILE$, *, *, *, * +DePreSysNoAsimDec, .*, $ROOTB$/decadal/DePreSys/2501/s051/m01$MEMBER_NUMBER$/, $STORE_FREQ$_mean/$VAR_NAME$/$EXP_FILE$, *, *, *, * +DePreSysAsimSeas, .*, $ROOTB$/seasonal/DePreSys/2502/s051/m01$MEMBER_NUMBER$/, $STORE_FREQ$_mean/$VAR_NAME$/$EXP_FILE$, *, *, *, * +DePreSys.*, (u|v)as, *, $STORE_FREQ$_mean/$VAR_NAME$_rg/$EXP_FILE$, *, *, *, * +DePreSys.*Dec, hf(s|l)sd, *, *, *, *, *, * + +####################### +!!table of observations +####################### +#obs_name, var_name[, obs_main_path[/, obs_file_path[, suffix[, var_min[, var_max]]]]]] +.*, .*, *, *, *, *, * +JRA, .*, $ROOTG$/jma/jra-55/, *, *, *, * +GHCN, .*, $ROOTG$/noaa/ghcn_v2/, *, *, *, * +GHCNERSSTGISS, .*, $ROOTG$/noaanasa/ghcnersstgiss/, *, *, *, * +ERSST, .*, $ROOTG$/noaa/ersstv3b/, *, *, *, * +UKMO, .*, $ROOTG$/ukmo/hadisstv1.1/, *, *, *, * +HadISST, .*, $ROOTG$/ukmo/hadisstv1.1/, *, *, *, * +HadSLP, .*, $ROOTG$/ukmo/hadslp2/, *, *, *, * +GPCC, .*, $ROOTG$/dwd/gpcc_combined1x1_v4/, *, *, *, * +GPCP, .*, $ROOTG$/noaa/gpcp_v2.2/, *, *, *, * +DS94, .*, $ROOTG$/iri/DaSilva/, *, *, *, * +OAFlux, .*, $ROOTG$/whoi/oaflux/, *, *, *, * +DFS4.3, .*, $ROOTG$/legi/dfs4.3/, *, *, *, * +NCDCoc, .*, $ROOTG$/noaa/merged_ocean_mean/, *, *, *, * +NCDCland, .*, $ROOTG$/noaa/merged_land_mean/, *, *, *, * +NCDCglo, .*, $ROOTG$/noaa/merged_global_mean/, *, *, *, * +NCEP, .*, $ROOTG$/ncep/, $STORE_FREQ$_mean/$VAR_NAME$_6hourly/$VAR_NAME$_ncep_$YEAR$$MONTH$.nc, *, *, * +ERA40, .*, $ROOTG$/ecmwf/era40/, $STORE_FREQ$_mean/$VAR_NAME$_f6h/$OBS_FILE$, *, *, * +ERAint, .*, $ROOTG$/ecmwf/eraint/, $STORE_FREQ$_mean/$VAR_NAME$_f6h/$OBS_FILE$, *, *, * +20thCv2, .*, $ROOTG$/noaa/20thc_reanv2/, $STORE_FREQ$_mean/$VAR_NAME$_f6h/$OBS_FILE$, *, *, * +CRU, .*, $ROOTG$/cru/, $STORE_FREQ$_mean/$VAR_NAME$/$VAR_NAME$_cru_$YEAR$$MONTH$.nc, *, *, * +EOBS, .*, $ROOTG$/eobs/eobs_v10/, $STORE_FREQ$_mean/$VAR_NAME$/$VAR_NAME$_$YEAR$$MONTH$.nc, *, *, * +GISSglo, .*, $ROOTG$/nasa/global_mean/, $VAR_NAME$_GISS_$YEAR$$MONTH$.nc, *, *, * +GISSland, .*, $ROOTG$/nasa/land_mean/, $VAR_NAME$_GISS_$YEAR$$MONTH$.nc, *, *, * +GISSoc, .*, $ROOTG$/nasa/ocean_mean/, $VAR_NAME$_GISS_$YEAR$$MONTH$.nc, *, *, * +CRUTEM3land, .*, $ROOTG$/ukmo/land_mean/, $VAR_NAME$_CRUTEM3_$YEAR$$MONTH$.nc, *, *, * +HadSST2oc, .*, $ROOTG$/ukmo/ocean_mean/, $VAR_NAME$_HadSST2_$YEAR$$MONTH$.nc, *, *, * +HadCRUT3glo, .*, $ROOTG$/ukmo/global_mean/, $VAR_NAME$_HadCRUT3_$YEAR$$MONTH$.nc, *, *, * +HadCRUT4, .*, $ROOTG$/ukmo/hadcrut4/, $STORE_FREQ$_mean/$VAR_NAME$_anom/$VAR_NAME$_$YEAR$$MONTH$.nc, *, *, * +ESA, .*, $ROOTG$/esa/, $STORE_FREQ$_mean/$VAR_NAME$/$VAR_NAME$_$YEAR$$MONTH$.nc, *, *, * +NSIDC, .*, $ROOTG$/nsidc/, $STORE_FREQ$_mean/$VAR_NAME$/$VAR_NAME$_$YEAR$$MONTH$.nc, *, *, * +.*, tas_.*, *, $STORE_FREQ$_statistics/$VAR_NAME$/$OBS_FILE$, *, *, * +.*, prlr_.*, *, $STORE_FREQ$_statistics/$VAR_NAME$/$OBS_FILE$, *, *, * +.*, tasmin_q10, *, $STORE_FREQ$_statistics/$VAR_NAME$/$OBS_FILE$, *, *, * +.*, tasmax_q90, *, $STORE_FREQ$_statistics/$VAR_NAME$/$OBS_FILE$, *, *, * +.*, (tos|tas|prlr), *, *, *, -999, 599 +GHCN, (tas|prlr).*_.*, *, $STORE_FREQ$_statistics/$VAR_NAME$/$OBS_FILE$, *, *, * +GHCNERSSTGISS, (tas|prlr).*_.*, *, $STORE_FREQ$_statistics/$VAR_NAME$/$OBS_FILE$, *, *, * +ERSST, (tas|prlr).*_.*, *, $STORE_FREQ$_statistics/$VAR_NAME$/$OBS_FILE$, *, *, * +UKMO, (tas|prlr).*_.*, *, $STORE_FREQ$_statistics/$VAR_NAME$/$OBS_FILE$, *, *, * +HadISST, (tas|prlr).*_.*, *, $STORE_FREQ$_statistics/$VAR_NAME$/$OBS_FILE$, *, *, * +HadSLP, (tas|prlr).*_.*, *, $STORE_FREQ$_statistics/$VAR_NAME$/$OBS_FILE$, *, *, * +GPCC, (tas|prlr).*_.*, *, $STORE_FREQ$_statistics/$VAR_NAME$/$OBS_FILE$, *, *, * +GPCP, (tas|prlr).*_.*, *, $STORE_FREQ$_statistics/$VAR_NAME$/$OBS_FILE$, *, *, * +DS94, (tas|prlr).*_.*, *, $STORE_FREQ$_statistics/$VAR_NAME$/$OBS_FILE$, *, *, * +OAFlux, (tas|prlr).*_.*, *, $STORE_FREQ$_statistics/$VAR_NAME$/$OBS_FILE$, *, *, * +DFS4.3, (tas|prlr).*_.*, *, $STORE_FREQ$_statistics/$VAR_NAME$/$OBS_FILE$, *, *, * +NCDCoc, (tas|prlr).*_.*, *, $STORE_FREQ$_statistics/$VAR_NAME$/$OBS_FILE$, *, *, * +NCDCland, (tas|prlr).*_.*, *, $STORE_FREQ$_statistics/$VAR_NAME$/$OBS_FILE$, *, *, * +NCDCglo, (tas|prlr).*_.*, *, $STORE_FREQ$_statistics/$VAR_NAME$/$OBS_FILE$, *, *, * +NCEP, (tas|prlr).*_.*, *, $STORE_FREQ$_statistics/$VAR_NAME$_6hourly/$VAR_NAME$_ncep_$YEAR$$MONTH$.nc, *, *, * +ERA40, (tas|prlr).*_.*, *, $STORE_FREQ$_statistics/$VAR_NAME$_f6h/$OBS_FILE$, *, *, * +ERAint, (tas|prlr).*_.*, *, $STORE_FREQ$_statistics/$VAR_NAME$_f6h/$OBS_FILE$, *, *, * +20thCv2, (tas|prlr).*_.*, *, $STORE_FREQ$_statistics/$VAR_NAME$_f6h/$OBS_FILE$, *, *, * +CRU, (tas|prlr).*_.*, *, $STORE_FREQ$_statistics/$VAR_NAME$/$VAR_NAME$_cru_$YEAR$$MONTH$.nc, *, *, * +EOBS, (tas|prlr).*_.*, *, $VAR_NAME$_eobs_0.5x0.5/$VAR_NAME$_eobs_$YEAR$$MONTH$.nc, *, *, * +GISSglo, (tas|prlr).*_.*, *, $VAR_NAME$_GISS_$YEAR$$MONTH$.nc, *, *, * +GISSland, (tas|prlr).*_.*, *, $VAR_NAME$_GISS_$YEAR$$MONTH$.nc, *, *, * +GISSoc, (tas|prlr).*_.*, *, $VAR_NAME$_GISS_$YEAR$$MONTH$.nc, *, *, * +CRUTEM3land, (tas|prlr).*_.*, *, $VAR_NAME$_CRUTEM3_$YEAR$$MONTH$.nc, *, *, * +HadSST2oc, (tas|prlr).*_.*, *, $VAR_NAME$_HadSST2_$YEAR$$MONTH$.nc, *, *, * +HadCRUT3glo, (tas|prlr).*_.*, *, $VAR_NAME$_HadCRUT3_$YEAR$$MONTH$.nc, *, *, * +HadCRUT4, (tas|prlr).*_.*, *, $STORE_FREQ$_statistics/$VAR_NAME$_anom/$VAR_NAME$_$YEAR$$MONTH$.nc, *, *, * +NCEP, (hf(l|s)sd|r(l|s)s|rsds), *, $STORE_FREQ$_mean/$VAR_NAME$_step6h/$VAR_NAME$_ncep_$YEAR$$MONTH$.nc, *, *, * +ERAint, (hf(l|s)sd|r(l|s)s|rsds|prlr), *, $STORE_FREQ$_mean/$VAR_NAME$_s0-12h/$OBS_FILE$, *, *, * +20thCv2, (hf(l|s)sd|r(l|s)s|rsds|prlr), *, $STORE_FREQ$_mean/$VAR_NAME$_s0-12h/$OBS_FILE$, *, *, * +ERAint, (tasm(in|ax)|prlr_.*q(1|9)0)_.*, *, $STORE_FREQ$_statistics/$VAR_NAME$/$OBS_FILE$, *, *, * +ERAintDailyLowRes, .*, /scratch/Earth/ncortesi/RESILIENCE/ERA-interim_daily_mean_psl_low_res/, $OBS_FILE$, *, *, *, * +ERAintDailyHighRes, .*, /esnas/reconstructions/ecmwf/eraint/daily_mean/psl_f6h/, $OBS_FILE$, *, *, *, * +ERAintDailyHighResWind, .*, /esnas/reconstructions/ecmwf/eraint/daily_mean/sfcWind_f6h/, $OBS_FILE$, *, *, *, * + +20thCv2, (tasm(in|ax)|prlr_.*q(1|9)0)_.*, *, $STORE_FREQ$_statistics/$VAR_NAME$/$OBS_FILE$, *, *, * +.*, moc_.*, /cfunas/exp/ECMWF/NEMOVAR_S4/, $STORE_FREQ$_mean/moc/$VAR_NAME$_nemovar_s4_19570901_fc0-4_195709_201212.nc, zomsfatl, *, *, * +.*, max_moc_.*, /cfunas/exp/ECMWF/NEMOVAR_S4/, $STORE_FREQ$_mean/moc/$VAR_NAME$_nemovar_s4_19570901_fc0-4_195709_201212.nc, maxmoc, *, *, * +.*, .*Pac_stc_.*, /cfunas/exp/ECMWF/NEMOVAR_S4/, $STORE_FREQ$_mean/moc/$VAR_NAME$_nemovar_s4_19570901_fc0-4_195709_201212.nc, zomsfpac, *, *, * +.*, .*Atl_stc_.*, /cfunas/exp/ECMWF/NEMOVAR_S4/, $STORE_FREQ$_mean/moc/$VAR_NAME$_nemovar_s4_19570901_fc0-4_195709_201212.nc, zomsfatl, *, *, * +.*, heatc, /cfunas/exp/ECMWF/NEMOVAR_S4/, $STORE_FREQ$_mean/heatc/$VAR_NAME$_nemovar_s4_19570901_fc0-4_195709_201212.nc, thc, *, *, * +.*, .*_heatc, /cfunas/exp/ECMWF/NEMOVAR_S4/, $STORE_FREQ$_mean/heatc/$VAR_NAME$_nemovar_s4_19570901_fc0-4_195709_201212.nc, thc, *, *, * +PIOMAS, sivN, $ROOTG$/psc/piomas/original_files/, $VAR_NAME$_piomas.nc, sivN, *, *, * +PIOMAS, sivS, $ROOTG$/psc/piomas/original_files/, $VAR_NAME$_piomas.nc, sivS, *, *, * +UCL, sivN, /cfunas/exp/UCL/, monthly_mean/ice/siasiesiv_N_ucl_fc0-0_19790101_197901_200712.nc, siv, *, *, * +UCL, sivS, /cfunas/exp/UCL/, monthly_mean/ice/siasiesiv_S_ucl_fc0-0_19790101_197901_200712.nc, siv, *, *, * +HadISST, sieN, $ROOTH$/ice/ukmo/, ice_hadisst_N.nc, sie, *, *, * +HadISST, siaN, $ROOTH$/ice/ukmo/, ice_hadisst_N.nc, sia, *, *, * +HadISST, sieS, $ROOTH$/ice/ukmo/, ice_hadisst_S.nc, sie, *, *, * +HadISST, siaS, $ROOTH$/ice/ukmo/, ice_hadisst_S.nc, sia, *, *, * +NSIDC, sieN, $ROOTH$/ice/nsidc/, ice_nsidc_N.nc, sie, *, *, * +NSIDC, siaN, $ROOTH$/ice/nsidc/, ice_nsidc_N.nc, sia, *, *, * +NSIDC, sieS, $ROOTH$/ice/nsidc/, ice_nsidc_S.nc, sie, *, *, * +NSIDC, siaS, $ROOTH$/ice/nsidc/, ice_nsidc_S.nc, sia, *, *, * +HadISST, si(a|e|v)_.*, $ROOTG$/ukmo/hadisst/postprocessed/, siasie_HadISST.nc, *, *, *, * +NSIDC, si(a|e|v)_.*, $ROOTG$/nsidc/postprocessed/, siasie_NSIDC.nc, *, *, *, * diff --git a/old/PlotEquiMap_colored.R b/old/PlotEquiMap_colored.R new file mode 100644 index 0000000000000000000000000000000000000000..8883be1bd2f378dc37ab855b3054e3464804834e --- /dev/null +++ b/old/PlotEquiMap_colored.R @@ -0,0 +1,361 @@ + +# Creation: 6/2016 +# Author: Nicola Cortesi +# Aim: 'col.borders' set the color of the continent's borders. By default it is black +# 'col.lakes' set the color of the lakes. By default it is black. This option is available only if filled.continents=TRUE. +# The default color of the continent themselves is gray and cannot be changed by these options. +# I/O: same as PlotEquiMap() +# Assumptions: same as PlotEquiMap() +# Branch: general + +PlotEquiMap_colored <- function (var, lon, lat, varu = NULL, varv = NULL, toptitle = "", sizetit = 1, units = "", brks = NULL, cols = NULL, square = TRUE, + filled.continents = TRUE, col.borders = "black", col.lakes = "white", + contours = NULL, brks2 = NULL, dots = NULL, + arr_subsamp = 1, arr_scale = 1, arr_ref_len = 15, + arr_units = "m/s", arr_scale_shaft = 1, arr_scale_shaft_angle = 1, + axelab = TRUE, labW = FALSE, intylat = 20, intxlon = 20, + drawleg = TRUE, boxlim = NULL, boxcol = "purple2", boxlwd = 10, + subsampleg = 1, numbfig = 1, colNA = "white", fileout = NULL, + ...) +{ + excludedArgs <- c("cex", "cex.axis", "cex.lab", "cex.main", + "col", "din", "fig", "fin", "lab", "las", "lty", "lwd", + "mai", "mar", "mgp", "new", "oma", "ps", "tck") + userArgs <- .FilterUserGraphicArgs(excludedArgs, ...) + if (!is.null(fileout)) { + deviceInfo <- .SelectDevice(fileout) + saveToFile <- deviceInfo$fun + fileout <- deviceInfo$files + } + data(coastmap, package = "GEOmap", envir = environment()) + dims <- dim(var) + if (is.null(varu) == FALSE) { + if (is.null(varv)) { + stop("you set a u component of wind/current but not the correponding v component") + } + else if ((isTRUE(all.equal(dim(var), dim(varv))) & isTRUE((all.equal(dim(var), + dim(varv))))) == FALSE) { + stop("var, varu and varv should have the same Dimension") + } + else if (drawleg == TRUE) { + stop("plotting the wind is not compatible with draleg = TRUE") + } + else if (square == FALSE) { + stop("the wind can only be plotted if square = TRUE") + } + } + if (length(dims) > 2) { + stop("Only 2 dimensions expected for var : (lon,lat) ") + } + if (dims[1] != length(lon) | dims[2] != length(lat)) { + if (dims[1] == length(lat) & dims[2] == length(lon)) { + var <- t(var) + if (is.null(varu) == FALSE) { + varu <- t(varu) + } + if (is.null(varv) == FALSE) { + varv <- t(varv) + } + dims <- dim(var) + } + else { + stop("Inconsistent var dimensions / longitudes + latitudes") + } + } + latb <- sort(lat, index.return = TRUE) + dlon <- lon[2:dims[1]] - lon[1:(dims[1] - 1)] + wher <- which(dlon > (mean(dlon) + 1)) + if (length(wher) > 0) { + lon[(wher + 1):dims[1]] <- lon[(wher + 1):dims[1]] - + 360 + } + lonb <- sort(lon, index.return = TRUE) + latmin <- floor(min(lat)/10) * 10 + latmax <- ceiling(max(lat)/10) * 10 + lonmin <- floor(min(lon)/10) * 10 + lonmax <- ceiling(max(lon)/10) * 10 + varlim <- signif(c(min(var, na.rm = TRUE), max(var, na.rm = TRUE)), + 4) + if (!is.null(cols)) { + if (!is.character(cols)) { + stop("Error: 'cols' must be a character vector.") + } + } + if (is.null(brks)) { + if (!is.null(cols)) { + brks <- length(cols) + 1 + } + else { + brks <- 21 + } + } + if (is.numeric(brks)) { + if (length(brks) == 1) { + brks <- seq(varlim[1], varlim[2], length.out = brks) + } + if (is.null(cols)) { + cols <- clim.colors(length(brks) - 1) + } + else if (length(cols) != (length(brks) - 1)) { + stop("Error: inconsistent number of 'brks' and 'cols'.") + } + } + else { + stop("Error: 'brks' must be a numeric vector.") + } + if (is.null(brks2) == TRUE) { + if (is.null(contours)) { + if (square == FALSE) { + brks2 <- brks + contours <- var + } + } + else { + ll <- signif(min(contours, na.rm = TRUE), 2) + ul <- signif(max(contours, na.rm = TRUE), 2) + brks2 <- signif(seq(ll, ul, length.out = length(brks)), + 2) + } + } + if (!is.null(fileout)) + saveToFile(fileout, width = 11, height = 8) + par(userArgs) + if (axelab == TRUE) { + ypos <- seq(latmin, latmax, intylat) + xpos <- seq(lonmin, lonmax, intxlon) + letters <- array("", length(ypos)) + letters[ypos < 0] <- "S" + letters[ypos > 0] <- "N" + ylabs <- paste(as.character(abs(ypos)), letters, sep = "") + letters <- array("", length(xpos)) + if (labW) { + nlon <- length(xpos) + xpos2 <- xpos + xpos2[xpos2 > 180] <- 360 - xpos2[xpos2 > 180] + } + letters[xpos < 0] <- "W" + letters[xpos > 0] <- "E" + if (labW) { + letters[xpos == 0] <- " " + letters[xpos == 180] <- " " + letters[xpos > 180] <- "W" + xlabs <- paste(as.character(abs(xpos2)), letters, + sep = "") + } + else { + xlabs <- paste(as.character(abs(xpos)), letters, + sep = "") + } + xmargin <- 1.2 - (numbfig^0.2) * 0.05 + ymargin <- 3 - (numbfig^0.2) + spaceticklab <- 1.3 - (numbfig^0.2) * 0.8 + topmargin <- 0.4 + ymargin2 <- 1.5 - (numbfig^0.2) * 0.9 + } + else { + xmargin <- 0.2 + ymargin <- 0.2 + switch(as.character(square), `FALSE` = 1.8, + 0) + topmargin <- 0.2 + spaceticklab <- 1 + ymargin2 <- 0.2 + } + if (!is.null(toptitle)) { + if (!is.na(toptitle) && is.character(toptitle)) { + if (toptitle != "") { + topmargin <- 2.5 - (numbfig^0.2) * 0.6 + } + } + } + if (min(lon) < 0) { + continents <- "world" + } + else { + continents <- "world2" + } + if (is.null(varu) == FALSE) { + if (axelab == FALSE) { + xmargin = xmargin + 2.5/numbfig + } + else { + xmargin = xmargin + xmargin/numbfig + } + } + if (square) { + if (drawleg) { + layout(matrix(1:2, ncol = 1, nrow = 2), heights = c(5, + 1)) + } + par(mar = c(xmargin, ymargin, topmargin, ymargin2), cex = 1.4, + mgp = c(3, spaceticklab, 0), las = 0) + if (colNA != "white") { + blanks <- array(0, dim = c(length(lonb$x), length(latb$x))) + image(lonb$x, latb$x, blanks, col = c(colNA), breaks = c(-1, + 1), main = toptitle, cex.main = (1.5/numbfig^(0.2)) * + sizetit, axes = FALSE, xlab = "", ylab = "") + flagadd <- T + } + else { + flagadd <- F + } + image(lonb$x, latb$x, var[lonb$ix, latb$ix], col = cols, + breaks = brks, main = toptitle, axes = FALSE, xlab = "", + ylab = "", cex.main = (1.5/numbfig^(0.2)) * sizetit, + add = flagadd) + if (axelab == TRUE) { + axis(2, at = ypos, labels = ylabs, cex.axis = 1/(numbfig^0.3), + tck = -0.03) + axis(1, at = xpos, labels = xlabs, cex.axis = 1/(numbfig^0.3), + tck = -0.03) + } + if (is.null(contours) == FALSE) { + contour(lonb$x, latb$x, contours[lonb$ix, latb$ix], + levels = brks2, method = "edge", add = TRUE, + labcex = 1/numbfig, lwd = 0.5/(numbfig^0.5)) + } + map(continents, interior = FALSE, add = TRUE, lwd = 1, + resolution = 0, col = col.borders) + box() + } + else { + par(mar = c(xmargin + 5, ymargin + 1.5, topmargin, ymargin2), + cex.main = (1.6 * numbfig^(0.3)) * sizetit, cex.axis = 1.4, + cex.lab = 1.6, mgp = c(3, spaceticklab + 0.5, 0), + las = 0) + if (axelab == TRUE) { + filled.contour(lonb$x, latb$x, var[lonb$ix, latb$ix], + xlab = "", levels = brks, col = cols, ylab = "", + main = toptitle, key.axes = axis(4, brks[seq(1, + length(brks), subsampleg)], cex.axis = 1/(numbfig^0.3)), + plot.axes = { + axis(2, at = ypos, labels = ylabs, cex.axis = 1/(numbfig^0.3), + tck = -0.03) + axis(1, at = xpos, labels = xlabs, cex.axis = 1/(numbfig^0.3), + tck = -0.03) + contour(lonb$x, latb$x, contours[lonb$ix, latb$ix], + levels = brks2, method = "edge", add = TRUE, + labcex = 1, lwd = 2) + map(continents, interior = FALSE, xlim = c(lonmin, + lonmax), ylim = c(latmin, latmax), add = TRUE, + resolution = 0, col = col.borders) + }, key.title = title(main = units, cex.main = (1.2 * + numbfig^(0.3)) * sizetit)) + } + else { + filled.contour(lonb$x, latb$x, var[lonb$ix, latb$ix], + xlab = "", levels = brks, col = cols, ylab = "", + main = toptitle, key.axes = axis(4, brks[seq(1, + length(brks), subsampleg)], cex.axis = 1/(numbfig^0.3)), + plot.axes = { + contour(lonb$x, latb$x, contours[lonb$ix, latb$ix], + levels = brks2, method = "edge", add = TRUE, + labcex = 1, lwd = 2) + map(continents, interior = FALSE, xlim = c(lonmin, + lonmax), ylim = c(latmin, latmax), add = TRUE, + resolution = 0, col = col.borders) + }, key.title = title(main = units, cex.main = (1.2 * + numbfig^(0.3)) * sizetit)) + } + } + if (is.null(dots) == FALSE) { + for (ix in 1:length(lon)) { + for (jy in 1:length(lat)) { + if (is.na(var[ix, jy]) == FALSE) { + if (dots[ix, jy] == TRUE) { + text(x = lon[ix], y = lat[jy], ".", cex = 12/(sqrt(sqrt(length(var))) * + numbfig^0.5)) + } + } + } + } + } + if (square == TRUE & filled.continents == TRUE) { + if (min(lon) >= 0) { + ylat <- latmin:latmax + xlon <- lonmin:lonmax + proj <- GEOmap::setPROJ(1, LON0 = mean(xlon), LAT0 = mean(ylat), + LATS = ylat, LONS = xlon) + coastmap$STROKES$col[which(coastmap$STROKES$col == + "blue")] <- col.lakes + par(new = TRUE) + GEOmap::plotGEOmap(coastmap, PROJ = proj, border = col.borders, + add = TRUE) + box() + } + else { + map(continents, interior = FALSE, wrap = TRUE, lwd = 0.7, + col = gray(0.5), fill = TRUE, add = TRUE, border = col.borders, + resolution = 0) + } + } + if (is.null(boxlim) == FALSE) { + boxlimaux <- boxlim + if (boxlim[1] > boxlim[3]) { + boxlimaux[1] <- boxlim[1] - 360 + } + if (length(boxlimaux) != 4) { + stop("Region to be highlighted is ill defined") + } + else if (boxlimaux[2] < latmin | boxlimaux[4] > latmax | + boxlimaux[1] < lonmin | boxlimaux[3] > lonmax) { + stop("Invalid boundaries") + } + else if (boxlimaux[1] < 0 && boxlimaux[3] > 0) { + segments(boxlimaux[1], boxlimaux[2], 0, boxlimaux[2], + col = boxcol, lwd = boxlwd) + segments(0, boxlimaux[2], boxlimaux[3], boxlimaux[2], + col = boxcol, lwd = boxlwd) + segments(boxlimaux[1], boxlimaux[4], 0, boxlimaux[4], + col = boxcol, lwd = boxlwd) + segments(0, boxlimaux[4], boxlimaux[3], boxlimaux[4], + col = boxcol, lwd = boxlwd) + segments(boxlimaux[1], boxlimaux[2], boxlimaux[1], + boxlimaux[4], col = boxcol, lwd = boxlwd) + segments(boxlimaux[3], boxlimaux[2], boxlimaux[3], + boxlimaux[4], col = boxcol, lwd = boxlwd) + } + else { + rect(boxlimaux[1], boxlimaux[2], boxlimaux[3], boxlimaux[4], + border = boxcol, col = NULL, lwd = boxlwd, lty = "solid") + } + } + if (!is.null(varu)) { + lontab <- InsertDim(lonb$x, 2, length(latb$x)) + lattab <- InsertDim(latb$x, 1, length(lonb$x)) + varplotu <- varu[lonb$ix, latb$ix] + varplotv <- varv[lonb$ix, latb$ix] + sublon <- seq(1, length(lon), arr_subsamp) + sublat <- seq(1, length(lat), arr_subsamp) + uaux <- lontab[sublon, sublat] + varplotu[sublon, sublat] * + 0.5 * arr_scale + vaux <- lattab[sublon, sublat] + varplotv[sublon, sublat] * + 0.5 * arr_scale + lenshaft <- 0.18 * arr_scale * arr_scale_shaft/(numbfig^(0.2)) + angleshaft <- 12 * arr_scale_shaft_angle/(numbfig^(0.2)) + arrows(lontab[sublon, sublat], lattab[sublon, sublat], + uaux, vaux, angle = angleshaft, length = lenshaft) + posarlon = lonb$x[1] + (lonmax - lonmin) * 0.1 + posarlat = latmin - (latmax - latmin) * 0.035 + arrows(posarlon, posarlat, posarlon + 0.5 * arr_scale * + arr_ref_len, posarlat, length = lenshaft, angle = angleshaft, + xpd = TRUE) + xpdsave = par()$xpd + par(xpd = NA) + text(x = posarlon + arr_scale, y = posarlat - (latmax - + latmin) * 0.03, labels = paste(as.character(arr_ref_len), + arr_units, sep = ""), cex = (1 * sizetit/numbfig^(0.2))) + par(xpd = xpdsave) + } + if (square & drawleg) { + par(mar = c(1.5, ymargin + 1.5, 2.5, ymargin2), mgp = c(1.5, + 0.3, 0), las = 1, cex = 1.2) + image(1:length(cols), 1, t(t(1:length(cols))), axes = FALSE, + col = cols, xlab = "", ylab = "", main = units, cex.main = 1.1) + box() + axis(1, at = seq(0.5, length(brks) - 0.5, subsampleg), + labels = brks[seq(1, length(brks), subsampleg)]) + } + if (!is.null(fileout)) + dev.off() + invisible(list(cols = cols, brks = brks)) +} diff --git a/old/PlotEquiMap_colored_Raul.R b/old/PlotEquiMap_colored_Raul.R new file mode 100644 index 0000000000000000000000000000000000000000..e154a72732c3e64d80abebdca9ef8c91cd8bb996 --- /dev/null +++ b/old/PlotEquiMap_colored_Raul.R @@ -0,0 +1,363 @@ + +# Creation: 6/2016 +# Author: Nicola Cortesi +# Aim: 'col.borders' set the color of the continent's borders. By default it is black +# 'col.lakes' set the color of the lakes. By default it is black. This option is available only if filled.continents=TRUE. +# The default color of the continent themselves is gray and cannot be changed by these options. +# I/O: same as PlotEquiMap() +# Assumptions: same as PlotEquiMap() +# Branch: general + +PlotEquiMap_colored <- function (var, lon, lat, varu = NULL, varv = NULL, toptitle = "", sizetit = 1, units = "", brks = NULL, cols = NULL, square = TRUE, + filled.continents = TRUE, col.borders = "black", col.lakes = "white", + contours = NULL, brks2 = NULL, dots = NULL, + arr_subsamp = 1, arr_scale = 1, arr_ref_len = 15, + arr_units = "m/s", arr_scale_shaft = 1, arr_scale_shaft_angle = 1, + axelab = TRUE, labW = FALSE, intylat = 20, intxlon = 20, + drawleg = TRUE, boxlim = NULL, boxcol = "purple2", boxlwd = 10, + subsampleg = 1, numbfig = 1, colNA = "white", fileout = NULL, + ...) +{ + excludedArgs <- c("cex", "cex.axis", "cex.lab", "cex.main", + "col", "din", "fig", "fin", "lab", "las", "lty", "lwd", + "mai", "mar", "mgp", "new", "oma", "ps", "tck") + userArgs <- .FilterUserGraphicArgs(excludedArgs, ...) + if (!is.null(fileout)) { + deviceInfo <- .SelectDevice(fileout) + saveToFile <- deviceInfo$fun + fileout <- deviceInfo$files + } + data(coastmap, package = "GEOmap", envir = environment()) + dims <- dim(var) + if (is.null(varu) == FALSE) { + if (is.null(varv)) { + stop("you set a u component of wind/current but not the correponding v component") + } + else if ((isTRUE(all.equal(dim(var), dim(varv))) & isTRUE((all.equal(dim(var), + dim(varv))))) == FALSE) { + stop("var, varu and varv should have the same Dimension") + } + else if (drawleg == TRUE) { + stop("plotting the wind is not compatible with draleg = TRUE") + } + else if (square == FALSE) { + stop("the wind can only be plotted if square = TRUE") + } + } + if (length(dims) > 2) { + stop("Only 2 dimensions expected for var : (lon,lat) ") + } + if (dims[1] != length(lon) | dims[2] != length(lat)) { + if (dims[1] == length(lat) & dims[2] == length(lon)) { + var <- t(var) + if (is.null(varu) == FALSE) { + varu <- t(varu) + } + if (is.null(varv) == FALSE) { + varv <- t(varv) + } + dims <- dim(var) + } + else { + stop("Inconsistent var dimensions / longitudes + latitudes") + } + } + latb <- sort(lat, index.return = TRUE) + dlon <- lon[2:dims[1]] - lon[1:(dims[1] - 1)] + wher <- which(dlon > (mean(dlon) + 1)) + if (length(wher) > 0) { + lon[(wher + 1):dims[1]] <- lon[(wher + 1):dims[1]] - + 360 + } + lonb <- sort(lon, index.return = TRUE) + latmin <- floor(min(lat)/10) * 10 + latmax <- ceiling(max(lat)/10) * 10 + lonmin <- floor(min(lon)/10) * 10 + lonmax <- ceiling(max(lon)/10) * 10 + varlim <- signif(c(min(var, na.rm = TRUE), max(var, na.rm = TRUE)), + 4) + if (!is.null(cols)) { + if (!is.character(cols)) { + stop("Error: 'cols' must be a character vector.") + } + } + if (is.null(brks)) { + if (!is.null(cols)) { + brks <- length(cols) + 1 + } + else { + brks <- 21 + } + } + if (is.numeric(brks)) { + if (length(brks) == 1) { + brks <- seq(varlim[1], varlim[2], length.out = brks) + } + if (is.null(cols)) { + cols <- clim.colors(length(brks) - 1) + } + else if (length(cols) != (length(brks) - 1)) { + stop("Error: inconsistent number of 'brks' and 'cols'.") + } + } + else { + stop("Error: 'brks' must be a numeric vector.") + } + if (is.null(brks2) == TRUE) { + if (is.null(contours)) { + if (square == FALSE) { + brks2 <- brks + contours <- var + } + } + else { + ll <- signif(min(contours, na.rm = TRUE), 2) + ul <- signif(max(contours, na.rm = TRUE), 2) + brks2 <- signif(seq(ll, ul, length.out = length(brks)), + 2) + } + } + if (!is.null(fileout)) + saveToFile(fileout, width = 11, height = 8) + par(userArgs) + if (axelab == TRUE) { + ypos <- seq(latmin, latmax, intylat) + xpos <- seq(lonmin, lonmax, intxlon) + letters <- array("", length(ypos)) + letters[ypos < 0] <- "S" + letters[ypos > 0] <- "N" + ylabs <- paste(as.character(abs(ypos)), letters, sep = "") + letters <- array("", length(xpos)) + if (labW) { + nlon <- length(xpos) + xpos2 <- xpos + xpos2[xpos2 > 180] <- 360 - xpos2[xpos2 > 180] + } + letters[xpos < 0] <- "W" + letters[xpos > 0] <- "E" + if (labW) { + letters[xpos == 0] <- " " + letters[xpos == 180] <- " " + letters[xpos > 180] <- "W" + xlabs <- paste(as.character(abs(xpos2)), letters, + sep = "") + } + else { + xlabs <- paste(as.character(abs(xpos)), letters, + sep = "") + } + xmargin <- 1.2 - (numbfig^0.2) * 0.05 + ymargin <- 3 - (numbfig^0.2) + spaceticklab <- 1.3 - (numbfig^0.2) * 0.8 + topmargin <- 0.4 + ymargin2 <- 1.5 - (numbfig^0.2) * 0.9 + } + else { + xmargin <- 0.2 + ymargin <- 0.2 + switch(as.character(square), `FALSE` = 1.8, + 0) + topmargin <- 0.2 + spaceticklab <- 1 + ymargin2 <- 0.2 + } + if (!is.null(toptitle)) { + if (!is.na(toptitle) && is.character(toptitle)) { + if (toptitle != "") { + topmargin <- 2.5 - (numbfig^0.2) * 0.6 + } + } + } + if (min(lon) < 0) { + continents <- "world" + } + else { + continents <- "world2" + } + if (is.null(varu) == FALSE) { + if (axelab == FALSE) { + xmargin = xmargin + 2.5/numbfig + } + else { + xmargin = xmargin + xmargin/numbfig + } + } + if (square == TRUE & filled.continents == TRUE) { + if (min(lon) >= 0) { + ylat <- latmin:latmax + xlon <- lonmin:lonmax + proj <- GEOmap::setPROJ(1, LON0 = mean(xlon), LAT0 = mean(ylat), + LATS = ylat, LONS = xlon) + coastmap$STROKES$col[which(coastmap$STROKES$col == + "blue")] <- col.lakes + par(new = TRUE) + GEOmap::plotGEOmap(coastmap, PROJ = proj, border = col.borders, + add = TRUE) + box() + } + else { + map(continents, interior = FALSE, wrap = TRUE, lwd = 0.7, + col = gray(0.5), fill = TRUE, add = TRUE, border = col.borders, + resolution = 0) + } + } + + if (square) { + if (drawleg) { + layout(matrix(1:2, ncol = 1, nrow = 2), heights = c(5, + 1)) + } + par(mar = c(xmargin, ymargin, topmargin, ymargin2), cex = 1.4, + mgp = c(3, spaceticklab, 0), las = 0) + if (colNA != "white") { + blanks <- array(0, dim = c(length(lonb$x), length(latb$x))) + image(lonb$x, latb$x, blanks, col = c(colNA), breaks = c(-1, + 1), main = toptitle, cex.main = (1.5/numbfig^(0.2)) * + sizetit, axes = FALSE, xlab = "", ylab = "") + flagadd <- T + } + else { + flagadd <- F + } + image(lonb$x, latb$x, var[lonb$ix, latb$ix], col = cols, + breaks = brks, main = toptitle, axes = FALSE, xlab = "", + ylab = "", cex.main = (1.5/numbfig^(0.2)) * sizetit, + add = flagadd) + if (axelab == TRUE) { + axis(2, at = ypos, labels = ylabs, cex.axis = 1/(numbfig^0.3), + tck = -0.03) + axis(1, at = xpos, labels = xlabs, cex.axis = 1/(numbfig^0.3), + tck = -0.03) + } + if (is.null(contours) == FALSE) { + map(continents, interior = FALSE, add = TRUE, lwd = 1, + resolution = 0, col = col.borders) + contour(lonb$x, latb$x, contours[lonb$ix, latb$ix], + levels = brks2, method = "flattest", add = TRUE, drawlabels = FALSE, + labcex = 0.8/numbfig, lwd = 0.5/(numbfig^0.5)) + } + box() + } + else { + par(mar = c(xmargin + 5, ymargin + 1.5, topmargin, ymargin2), + cex.main = (1.6 * numbfig^(0.3)) * sizetit, cex.axis = 1.4, + cex.lab = 1.6, mgp = c(3, spaceticklab + 0.5, 0), + las = 0) + if (axelab == TRUE) { + filled.contour(lonb$x, latb$x, var[lonb$ix, latb$ix], + xlab = "", levels = brks, col = cols, ylab = "", + main = toptitle, key.axes = axis(4, brks[seq(1, + length(brks), subsampleg)], cex.axis = 1/(numbfig^0.3)), + plot.axes = { + axis(2, at = ypos, labels = ylabs, cex.axis = 1/(numbfig^0.3), + tck = -0.03) + axis(1, at = xpos, labels = xlabs, cex.axis = 1/(numbfig^0.3), + tck = -0.03) + contour(lonb$x, latb$x, contours[lonb$ix, latb$ix], + levels = brks2, method = "edge", add = TRUE, + labcex = 1, lwd = 2) + map(continents, interior = TRUE, xlim = c(lonmin, + lonmax), ylim = c(latmin, latmax), add = TRUE, + resolution = 0, col = col.borders) + }, key.title = title(main = units, cex.main = (1.2 * + numbfig^(0.3)) * sizetit)) + } + else { + filled.contour(lonb$x, latb$x, var[lonb$ix, latb$ix], + xlab = "", levels = brks, col = cols, ylab = "", + main = toptitle, key.axes = axis(4, brks[seq(1, + length(brks), subsampleg)], cex.axis = 1/(numbfig^0.3)), + plot.axes = { + contour(lonb$x, latb$x, contours[lonb$ix, latb$ix], + levels = brks2, method = "edge", add = TRUE, + labcex = 1, lwd = 2) + map(continents, interior = FALSE, xlim = c(lonmin, + lonmax), ylim = c(latmin, latmax), add = TRUE, + resolution = 0, col = col.borders) + }, key.title = title(main = units, cex.main = (1.2 * + numbfig^(0.3)) * sizetit)) + } + } + + if (is.null(dots) == FALSE) { + for (ix in 1:length(lon)) { + for (jy in 1:length(lat)) { + if (is.na(var[ix, jy]) == FALSE) { + if (dots[ix, jy] == TRUE) { + text(x = lon[ix], y = lat[jy], ".", cex = 12/(sqrt(sqrt(length(var))) * + numbfig^0.5)) + } + } + } + } + } + if (is.null(boxlim) == FALSE) { + boxlimaux <- boxlim + if (boxlim[1] > boxlim[3]) { + boxlimaux[1] <- boxlim[1] - 360 + } + if (length(boxlimaux) != 4) { + stop("Region to be highlighted is ill defined") + } + else if (boxlimaux[2] < latmin | boxlimaux[4] > latmax | + boxlimaux[1] < lonmin | boxlimaux[3] > lonmax) { + stop("Invalid boundaries") + } + else if (boxlimaux[1] < 0 && boxlimaux[3] > 0) { + segments(boxlimaux[1], boxlimaux[2], 0, boxlimaux[2], + col = boxcol, lwd = boxlwd) + segments(0, boxlimaux[2], boxlimaux[3], boxlimaux[2], + col = boxcol, lwd = boxlwd) + segments(boxlimaux[1], boxlimaux[4], 0, boxlimaux[4], + col = boxcol, lwd = boxlwd) + segments(0, boxlimaux[4], boxlimaux[3], boxlimaux[4], + col = boxcol, lwd = boxlwd) + segments(boxlimaux[1], boxlimaux[2], boxlimaux[1], + boxlimaux[4], col = boxcol, lwd = boxlwd) + segments(boxlimaux[3], boxlimaux[2], boxlimaux[3], + boxlimaux[4], col = boxcol, lwd = boxlwd) + } + else { + rect(boxlimaux[1], boxlimaux[2], boxlimaux[3], boxlimaux[4], + border = boxcol, col = NULL, lwd = boxlwd, lty = "solid") + } + } + if (!is.null(varu)) { + lontab <- InsertDim(lonb$x, 2, length(latb$x)) + lattab <- InsertDim(latb$x, 1, length(lonb$x)) + varplotu <- varu[lonb$ix, latb$ix] + varplotv <- varv[lonb$ix, latb$ix] + sublon <- seq(1, length(lon), arr_subsamp) + sublat <- seq(1, length(lat), arr_subsamp) + uaux <- lontab[sublon, sublat] + varplotu[sublon, sublat] * + 0.5 * arr_scale + vaux <- lattab[sublon, sublat] + varplotv[sublon, sublat] * + 0.5 * arr_scale + lenshaft <- 0.18 * arr_scale * arr_scale_shaft/(numbfig^(0.2)) + angleshaft <- 12 * arr_scale_shaft_angle/(numbfig^(0.2)) + arrows(lontab[sublon, sublat], lattab[sublon, sublat], + uaux, vaux, angle = angleshaft, length = lenshaft) + posarlon = lonb$x[1] + (lonmax - lonmin) * 0.1 + posarlat = latmin - (latmax - latmin) * 0.035 + arrows(posarlon, posarlat, posarlon + 0.5 * arr_scale * + arr_ref_len, posarlat, length = lenshaft, angle = angleshaft, + xpd = TRUE) + xpdsave = par()$xpd + par(xpd = NA) + text(x = posarlon + arr_scale, y = posarlat - (latmax - + latmin) * 0.03, labels = paste(as.character(arr_ref_len), + arr_units, sep = ""), cex = (1 * sizetit/numbfig^(0.2))) + par(xpd = xpdsave) + } + if (square & drawleg) { + par(mar = c(1.5, ymargin + 1.5, 2.5, ymargin2), mgp = c(1.5, + 0.3, 0), las = 1, cex = 1.2) + image(1:length(cols), 1, t(t(1:length(cols))), axes = FALSE, + col = cols, xlab = "", ylab = "", main = units, cex.main = 1.1) + box() + axis(1, at = seq(0.5, length(brks) - 0.5, subsampleg), + labels = brks[seq(1, length(brks), subsampleg)]) + } + if (!is.null(fileout)) + dev.off() + invisible(list(cols = cols, brks = brks)) +} \ No newline at end of file diff --git a/old/Rfunctions.R b/old/Rfunctions.R new file mode 100644 index 0000000000000000000000000000000000000000..0049023d3232870fb480334b7ba1f2fb77b320e6 --- /dev/null +++ b/old/Rfunctions.R @@ -0,0 +1,4077 @@ +# Things you might want to change + +# options(papersize="a4") +# options(editor="notepad") +# options(pager="internal") + +# set the default help type: +# options(help_type="text") +options(help_type="html") + +# your list with all R packages you want to load in memory at the beginning of every R session: +my.packages.ic3<-c("sp","s2dverification","ncdf","ncdf4","statmod","maps","mapdata","parallel", + "doMC","RColorBrewer","MASS","maptools","scales","abind","reshape","reshape2","data.table","GEOmap") + +# "psych","plotrix" + +# GEOmap: for the dataset coastmap needed by PlotEquiDist + +############################ Script loaded automatically at startup ##################### + +.First<-function(){ + # load all my packages in memory at the beginning of the R session: + options(defaultPackages=c(getOption("defaultPackages"),my.packages.ic3)) + cat("\nLeidos todas las librerias opcionales\n") + print(paste("Current R session loaded from: ",commandArgs())) # print il nome del file .RData appena aperto + + print(paste("Working dir is: ",getwd())) + print(system("free -m")) + print(gc()) +} + + +################################### My R Aliases ############################################ +# alias for commonly used functions: + +l <- function(x) length(x) # <- to quickly get the length of a vector +w <- function() windows() # <- to open a new windows (only works in Windows, in Linux the command in x11() ) +qu <- function() quit("no") # <- to exit quickly from R typing only qu() instead of q("no") +clear <- function() rm(list=ls()) # to remove all objects in the R session +na <- function(x) length(which(is.na(x))) # find if there are NA in the object and how many thay are +nna <- function(x) length(which(!is.na(x))) # find if there are NOT NA in the object and how many they are +de <- function() dev.off() # to close a graphic window quickly +pl <-function() plot.new() # to open a void plot quickly + +################################### my Color Palettes ############################################# + +library("RColorBrewer") + +my.palette1<-c("lightblue","white",brewer.pal(9, "YlOrRd")) #rev(brewer.pal(11, "RdYlBu")) # it has only 11 colors! +my.palette2<-c("lightblue","white",brewer.pal(9, "YlOrRd")) #rev(brewer.pal(11, "RdYlBu")) +my.palette3<-brewer.pal(9, "Greens") # only 9 colors +my.palette4<-rev(brewer.pal(9, "BuPu")) +my.palette5<-brewer.pal(9, "Blues") +my.palette6<-brewer.pal(9, "Blues") +my.palette<-list(my.palette1,my.palette2,my.palette3,my.palette4,my.palette5,my.palette6) + +# Escala de colores para los mapas de tendencias +my.palette1.trend<-brewer.pal(9,"YlOrBr") +my.palette2.trend<-brewer.pal(9,"YlOrBr") +my.palette3.trend<-brewer.pal(9,"Greens") # Winkler's Index +my.palette4.trend<-brewer.pal(9,"YlOrBr") # rev(brewer.pal(11,"RdBu")) +my.palette5.trend<-rev(brewer.pal(11,"RdBu")) +my.palette6.trend<-rev(brewer.pal(11,"RdBu")) +my.palette.trend<-list(my.palette1.trend,my.palette2.trend,my.palette3.trend,my.palette4.trend,my.palette5.trend,my.palette6.trend) + +# others color palettes: +#my.palette7 <- colorRampPalette(as.character(read.csv("/home/Earth/vtorralb/rgbhex.csv",header=F)[,1]))(length(my.brks)-1) # blue--yellow-red colors + + +############################## My (short) Functions ####################################### + +# save in this function all the commands to download all the packages the first time you install R: +download.my.packages <- function(list.packages)for(p in 1:length(list.packages))install.packages(list.packages[p],repos="http://cran.univ-lyon1.fr") + +# Convert degrees to radians: +deg2rad<-function(deg) return(deg*pi/180) + +################################### My Functions ####################################### + +# function to show memory usage: +.ls.objects <- function (pos = 1, pattern, order.by, decreasing=FALSE, head=FALSE, n=5) { + napply <- function(names, fn) sapply(names, function(x) + fn(get(x, pos = pos))) + names <- ls(pos = pos, pattern = pattern) + obj.class <- napply(names, function(x) as.character(class(x))[1]) + obj.mode <- napply(names, mode) + obj.type <- ifelse(is.na(obj.class), obj.mode, obj.class) + obj.size <- napply(names, object.size) + obj.dim <- t(napply(names, function(x) + as.numeric(dim(x))[1:2])) + vec <- is.na(obj.dim)[, 1] & (obj.type != "function") + obj.dim[vec, 1] <- napply(names, length)[vec] + out <- data.frame(obj.type, obj.size, obj.dim) + names(out) <- c("Type", "Size", "Rows", "Columns") + if (!missing(order.by)) + out <- out[order(out[[order.by]], decreasing=decreasing), ] + if (head) + out <- head(out, n) + return(out) +} + +# shorthand +lsos <- function(..., n=10) { + .ls.objects(..., order.by="Size", decreasing=TRUE, head=TRUE, n=n) + +} + +# create function to return matrix of memory consumption: +object.sizes <- function() +{ + return(rev(sort(sapply(ls(envir=.GlobalEnv), function (object.name) + object.size(get(object.name)))))) +} + +# function to resume memory use on Linux: +mem<-function(){ + print(system("free -m")) + #print(gc()) + print(lsos()) +} + +################################################################################################# +# Calendar functions # +################################################################################################# + +# vector with the month names: +my.month <- c("January","February","March","April","May","June","July","August","September","October","November","December") +my.month.short <- c("Jan", "Feb", "Mar", "Apr", "May", "Jun", "Jul", "Aug", "Sep", "Oct", "Nov", "Dec") +my.month.short2 <- c("Ja", "Fe", "Ma", "Ap", "Ma", "Ju", "Ju", "Au", "Se", "Oc", "No", "De") +my.month.short3 <- my.month.very.short <- c("J","F","M","A","M","J","J","A","S","O","N","D") +my.season <- c("Winter", "Spring", "Summer", "Autumn") +my.period <- period.name <- c(my.month, my.season, "Yearly") + +endmonth<-function(day,month,year){ # day indica un giorno del mese di cui si vuole controllare se e' l'ultimo giorno del mese o meno: + last=FALSE + if(month==1 & day==31) last=TRUE; if(month==3 & day==31) last=TRUE; if(month==4 & day==30)last=TRUE; if(month==5 & day==31)last=TRUE + if(month==6 & day==30) last=TRUE; if(month==7 & day==31) last=TRUE; if(month==8 & day==31)last=TRUE; if(month==9 & day==30)last=TRUE + if(month==10 & day==31) last=TRUE; if(month==11 & day==30) last=TRUE; if(month==12 & day==31)last=TRUE + if(year%%400==0 | (year%%4==0 & year%%100!=0)){ # in questo caso l'anno e' bisestile) + if(month==2 & day==29) last=TRUE } else { # Anno NON bisestile: + if(month==2 & day==28) last=TRUE } + return(last) +} + +# simile alla precedente, restituisce qual'e'l'ultimo giorno del mese introdotto (ovvero il numero di giorni di quel mese) +lastday<-function(month,year){ + if(month==1 | month==3 | month==5 | month==7 | month==8 | month==10 | month==12) last=31 + if(month==4 | month==6 | month==9 | month==11) last=30 + if(month==2){ + if(year%%400==0 | (year%%4==0 & year%%100!=0)) {last=29} else {last=28} + } + #if(year==1 & month==2) last=28.25 # se si mette come anno il numero 1, restituisce il numero medio di giorni di febbraio quando si considerano tanti anni (28.25). Utile per calcolare le frequenze dei WTs del mese di febbraio. + return(last) +} + +# returns TRUE if the input year is a leap year: +leap.year <- function(year) return(ifelse((year%%4==0 & year%%100!=0) | year%%400==0, TRUE, FALSE)) + +# return the number of days of the input year: +n.days.in.a.year <- function(year) return(ifelse((year%%4==0 & year%%100!=0) | year%%400==0, 366, 365)) + +# return the number of days of the input month (1=Jan, 12=Dec): +# actually, it is only a wrapper of lastday(), that was kept for consistency +n.days.in.a.month <- function(month,year){ + return(lastday(month, year)) +} + +# like the previous function, but in case the input month is greater than 12, it enters into the next year: +ndm <- function(month,year){ + if(month < 13){ + return(lastday(month, year)) + } else { + return(lastday(month-12, year+1)) + } +} + +# return the number of days of the input season (1=Winter, 2=Spring, 3=Summer, 4=Autumn): +n.days.in.a.season <- function(season,year){ + return(length(pos.season(year,season))) +} + +# return the number of days of the input period (1-12: Jan-Dec, 13: winter, 14. spring, 15:summer, 16:autumn, 17: ear) +n.days.in.a.period <- function(period,year){ + if(period <= 12) return(n.days.in.a.month(period,year)) + if(period > 12 && period < 17) return(n.days.in.a.season(period-12,year)) + if(period == 17) return(n.days.in.a.year(year)) +} + +# get the total number of days from year.start to year.end: +n.days.in.a.yearly.period <- function(year.start, year.end){ + days.tot <- 0 + for(y in year.start:year.end){ + days.tot <- days.tot + n.days.in.a.year(y) + } + return(days.tot) +} + +# get the total number of days from month.start to month.end (included). +# if month.end is smaller than month.start, it considers month.end to be a month of the following year: +n.days.in.a.monthly.period <- function(month.start, month.end, year){ + days.tot <- 0 + if(month.start <= month.end){ + for(m in month.start:month.end) days.tot <- days.tot + n.days.in.a.month(m, year) + } else { + for(m in month.start:12) days.tot <- days.tot + n.days.in.a.month(m, year) + for(m in 1:month.end) days.tot <- days.tot + n.days.in.a.month(m, year+1) + } + return(days.tot) +} + +seq.months.in.a.year<-function(year){ # restituisce una sequenza di 365 o 366 numeri, il cui valore rappres.il numero del mese dell'anno associato a quel giorno + n.days.febr<-ifelse(n.days.in.a.year(year)==366,29,28) + return(c(rep(1,31),rep(2,n.days.febr),rep(3,31),rep(4,30),rep(5,31),rep(6,30),rep(7,31),rep(8,31),rep(9,30),rep(10,31),rep(11,30),rep(12,31))) +} + +seq.days.in.a.year<-function(year){ # restituisce una sequenza di 365 o 366 numeri, il cui valore rappres.il numero del giorno dell'anno associato + n.days.febr<-ifelse(n.days.in.a.year(year)==366,29,28) + return(c(1:31, 1:n.days.febr, 1:31, 1:30, 1:31, 1:30, 1:31, 1:31, 1:30, 1:31, 1:30, 1:31)) +} + + # interval of days belonging only to the year y, but starting to count from the year year.start: +seq.days.in.a.future.year <- function(year.start, y){ + return(n.days.in.a.yearly.period(year.start,y) - n.days.in.a.yearly.period(y,y) + 1:n.days.in.a.year(y)) +} + +# number of days from year.start to year y, excluding year y: +n.days.in.a.future.year <- function(year.start, y){ + return(n.days.in.a.yearly.period(year.start,y) - n.days.in.a.yearly.period(y,y)) +} + + +# vector with the number of the days of the input year belonging to the input month (1=January, 12=December): +pos.month <- function(year,month){ + if(month==1) return(1:31) + n.feb <- 28 + ifelse(leap.year(year)==TRUE,1,0) + + if(month == 2) return(31 + 1:n.feb) + if(month == 3) return(31 + n.feb + 1:31) + if(month == 4) return(31 + n.feb + 31 + 1:30) + if(month == 5) return(31 + n.feb + 31 + 30 + 1:31) + if(month == 6) return(31 + n.feb + 31 + 30 + 31 + 1:30) + if(month == 7) return(31 + n.feb + 31 + 30 + 31 + 30 + 1:31) + if(month == 8) return(31 + n.feb + 31 + 30 + 31 + 30 + 31 + 1:31) + if(month == 9) return(31 + n.feb + 31 + 30 + 31 + 30 + 31 + 31 + 1:30) + if(month == 10) return(31 + n.feb + 31 + 30 + 31 + 30 + 31 + 31 + 30 + 1:31) + if(month == 11) return(31 + n.feb + 31 + 30 + 31 + 30 + 31 + 31 + 30 + 31 + 1:30) + if(month == 12) return(31 + n.feb + 31 + 30 + 31 + 30 + 31 + 31 + 30 + 31 + 30 + 1:31) +} + +# as pos.month, but it returns a vector with the days of the imput year belonging to the months before the input month: +pos.months.before <- function(year,month){ + pos <- c() + if(month == 1) { + pos <- 0 + } else { + for(m in 1:(month-1)){ + pos <- c(pos, pos.month(year, m)) + } + } + + return(pos) +} + +# as pos.month, but also adds the days of the two closer months to the input month: +pos.month.extended <- function(year,month){ + if(month == 1) return(c(pos.month(year,12),pos.month(year,1),pos.month(year,2))) + if(month == 2) return(c(pos.month(year,1),pos.month(year,2),pos.month(year,3))) + if(month == 3) return(c(pos.month(year,2),pos.month(year,3),pos.month(year,4))) + if(month == 4) return(c(pos.month(year,3),pos.month(year,4),pos.month(year,5))) + if(month == 5) return(c(pos.month(year,4),pos.month(year,5),pos.month(year,6))) + if(month == 6) return(c(pos.month(year,5),pos.month(year,6),pos.month(year,7))) + if(month == 7) return(c(pos.month(year,6),pos.month(year,7),pos.month(year,8))) + if(month == 8) return(c(pos.month(year,7),pos.month(year,8),pos.month(year,9))) + if(month == 9) return(c(pos.month(year,8),pos.month(year,9),pos.month(year,10))) + if(month == 10) return(c(pos.month(year,9),pos.month(year,10),pos.month(year,11))) + if(month == 11) return(c(pos.month(year,10),pos.month(year,11),pos.month(year,12))) + if(month == 12) return(c(pos.month(year,11),pos.month(year,12),pos.month(year,1))) +} + +# as pos.month.extended, but only adds 15 days of the two closer months to the input month: +pos.month.extended15 <- function(year,month){ + if(month == 1) return(c(pos.month(year,12)[(l(pos.month(year,12))-14):l(pos.month(year,12))],pos.month(year,1),pos.month(year,2)[1:15])) + if(month == 2) return(c(pos.month(year,1)[(l(pos.month(year,1))-14):l(pos.month(year,1))],pos.month(year,2),pos.month(year,3)[1:15])) + if(month == 3) return(c(pos.month(year,2)[(l(pos.month(year,2))-14):l(pos.month(year,2))],pos.month(year,3),pos.month(year,4)[1:15])) + if(month == 4) return(c(pos.month(year,3)[(l(pos.month(year,3))-14):l(pos.month(year,3))],pos.month(year,4),pos.month(year,5)[1:15])) + if(month == 5) return(c(pos.month(year,4)[(l(pos.month(year,4))-14):l(pos.month(year,4))],pos.month(year,5),pos.month(year,6)[1:15])) + if(month == 6) return(c(pos.month(year,5)[(l(pos.month(year,5))-14):l(pos.month(year,5))],pos.month(year,6),pos.month(year,7)[1:15])) + if(month == 7) return(c(pos.month(year,6)[(l(pos.month(year,6))-14):l(pos.month(year,6))],pos.month(year,7),pos.month(year,8)[1:15])) + if(month == 8) return(c(pos.month(year,7)[(l(pos.month(year,7))-14):l(pos.month(year,7))],pos.month(year,8),pos.month(year,9)[1:15])) + if(month == 9) return(c(pos.month(year,8)[(l(pos.month(year,8))-14):l(pos.month(year,8))],pos.month(year,9),pos.month(year,10)[1:15])) + if(month == 10) return(c(pos.month(year,9)[(l(pos.month(year,9))-14):l(pos.month(year,9))],pos.month(year,10),pos.month(year,11)[1:15])) + if(month == 11) return(c(pos.month(year,10)[(l(pos.month(year,10))-14):l(pos.month(year,10))],pos.month(year,11),pos.month(year,12)[1:15])) + if(month == 12) return(c(pos.month(year,11)[(l(pos.month(year,11))-14):l(pos.month(year,11))],pos.month(year,12),pos.month(year,1)[1:15])) + +} + +# vector with the number of the days of the input year belonging to the input month (1=Winter, 4=Autumn): (Winter is Jan-Feb and Dec of the same year) +pos.season <- function(year,season){ + if(season==1) return(c(pos.month(year,1),pos.month(year,2),pos.month(year,12))) # winter + if(season==2) return(c(pos.month(year,3),pos.month(year,4),pos.month(year,5))) # spring + if(season==3) return(c(pos.month(year,6),pos.month(year,7),pos.month(year,8))) # summer + if(season==4) return(c(pos.month(year,9),pos.month(year,10),pos.month(year,11))) # autumn +} + +# same as pos.month, but for period > 12 returns the seasonal positions instead (13: winter, 14. spring, 15:summer, 16:autumn), or the yearly interval for period = 17 +pos.period <- function(year,period){ + if(period <= 12) return(pos.month(year, period)) + if(period > 12 && period < 17) return(pos.season(year, period-12)) + if(period == 17) return(1:n.days.in.a.year(year)) +} + + + +# sequence of weekly startdate for the chosen year and start day/month: +weekly.seq <- function(year,month,day){ + yr1 <- year # starting year of the weekly sequence + #yr2 <- year # in future you can create a sequence for more than one year + mes <- month # starting month (usually january) + #day<-2 # starting day + + if(mes<10) {mes0 <- paste0(0,mes)} else {mes0 <- mes} + if(day<10) {day0 <- paste0(0,day)} else {day0 <- day} + sdates <- paste0(yr1,mes0,day0) + nday <- day + ndaysFebruary <- lastday(2,yr1) + ndays4month <- c(31,ndaysFebruary,31,30,31,30,31,31,30,31,30,31) + + while (nday < 365-7) { # ojo a los bisiestos! + day <- day+7 + nday <- nday+7 + if(day > ndays4month[mes]){ + day <- day-ndays4month[mes] + mes=mes+1 + } + if(mes < 10){ mes0 <- paste0(0,mes)} else {mes0 <- mes} + if(day < 10){ day0 <- paste0(0,day)} else {day0 <- day} + sdates <- c(sdates,paste0(yr1,mes0,day0)) + } + return(sdates) +} + +# return the position inside the weekly.seq of all the startdates whose months belongs to the chosen period: +months.period <- function(year,mes,day,period){ + sdates.seq <- weekly.seq(year,mes,day) + months.period <- list() + + for(p in 1:12) months.period[[p]] <- which(as.numeric(substr(sdates.seq,5,6)) == p) + + months.period[[13]] <- c(months.period[[1]],months.period[[2]],months.period[[12]]) + months.period[[14]] <- c(months.period[[3]],months.period[[4]],months.period[[5]]) + months.period[[15]] <- c(months.period[[6]],months.period[[7]],months.period[[8]]) + months.period[[16]] <- c(months.period[[9]],months.period[[10]],months.period[[11]]) + + return(months.period[[period]]) +} + +################################################################################################# +# Graphic functions # +################################################################################################# + +# Like PlotEquiMap, but: +# - with the option to specify with 'contours.col' colours of the contour lines, +# - with the option to specify with 'cex.axis' the size of the lat/lon tick numbers, +# - with the option to specify with 'xlabel.dist' the distance of the x labels from the x axis +# - with the option to specify with 'contours.lty=' to use a different line type for negative contour values! +# - with the option 'contours.labels' not to draw the contour labels +# - with the option 'continents.col', the colors of the line of the continents, if filled.continents=FALSE (by default it is gray) +# +# PlotEquiMap(array(0,c(160,160)),1:160,-80:79) +# + +PlotEquiMap2<-function (var, lon, lat, toptitle = "", sizetit = 1, units = "", + brks = NULL, cols = NULL, square = TRUE, filled.continents = TRUE, + contours = NULL, brks2 = NULL, dots = NULL, axelab = TRUE, + labW = FALSE, intylat = 20, intxlon = 20, drawleg = TRUE, + subsampleg = 1, numbfig = 1, colNA = "white", contours.col = par("fg"), col_border = gray(0.5), + contours.lty = 1, contours.labels=TRUE, cex.lab = NULL, xlabel.dist = 1, continents.col = gray(0.5)) +{ + data(coastmap, envir = environment()) + dims <- dim(var) + if (length(dims) > 2) { + stop("Only 2 dimensions expected for var : (lon,lat) ") + } + if (dims[1] != length(lon) | dims[2] != length(lat)) { + if (dims[1] == length(lat) & dims[2] == length(lon)) { + var <- t(var) + dims <- dim(var) + } + else { + stop("Inconsistent var dimensions / longitudes + latitudes") + } + } + latb <- sort(lat, index.return = TRUE) + dlon <- lon[2:dims[1]] - lon[1:(dims[1] - 1)] + wher <- which(dlon > (mean(dlon) + 1)) + if (length(wher) > 0) { + lon[(wher + 1):dims[1]] <- lon[(wher + 1):dims[1]] - 360 + } + lonb <- sort(lon, index.return = TRUE) + latmin <- floor(min(lat)/10) * 10 + latmax <- ceiling(max(lat)/10) * 10 + lonmin <- floor(min(lon)/10) * 10 + lonmax <- ceiling(max(lon)/10) * 10 + colorbar <- colorRampPalette(c("dodgerblue4", "dodgerblue1", "forestgreen", "yellowgreen", "white", "white", "yellow", "orange", "red", "saddlebrown")) + if (is.null(brks) == TRUE) { + ll <- signif(min(var, na.rm = TRUE), 4) + ul <- signif(max(var, na.rm = TRUE), 4) + if (is.null(cols) == TRUE) { + cols <- colorbar(10) + } + nlev <- length(cols) + brks <- signif(seq(ll, ul, (ul - ll)/nlev), 4) + } + else { + if (is.null(cols) == TRUE) { + nlev <- length(brks) - 1 + cols <- colorbar(nlev) + } + else { + if (length(cols) != (length(brks) - 1)) { + stop("Inconsistent colour levels / list of colours") + } + } + } + if (is.null(brks2) == TRUE) { + if (is.null(contours)) { + if (square == FALSE) { + brks2 <- brks + contours <- var + } + } + else { + ll <- signif(min(contours, na.rm = TRUE), 2) + ul <- signif(max(contours, na.rm = TRUE), 2) + brks2 <- signif(seq(ll, ul, (ul - ll)/(length(brks) - + 1)), 2) + } + } + if (axelab == TRUE) { + ypos <- seq(latmin, latmax, intylat) + xpos <- seq(lonmin, lonmax, intxlon) + letters <- array("", length(ypos)) + letters[ypos < 0] <- "S" + letters[ypos > 0] <- "N" + ylabs <- paste(as.character(abs(ypos)), letters, sep = "") + letters <- array("", length(xpos)) + if (labW) { + nlon <- length(xpos) + xpos2 <- xpos + xpos2[xpos2 > 180] <- 360 - xpos2[xpos2 > 180] + } + letters[xpos < 0] <- "W" + letters[xpos > 0] <- "E" + if (labW) { + letters[xpos == 0] <- " " + letters[xpos == 180] <- " " + letters[xpos > 180] <- "W" + xlabs <- paste(as.character(abs(xpos2)), letters, sep = "") + } + else { + xlabs <- paste(as.character(abs(xpos)), letters, sep = "") + } + xmargin <- 1.2 - (numbfig^0.2) * 0.05 + ymargin <- 3 - (numbfig^0.2) + spaceticklab <- 1.3 - (numbfig^0.2) * 0.8 + topmargin <- 0.4 + ymargin2 <- 1.5 - (numbfig^0.2) * 0.9 + } + else { + xmargin <- 0.2 + ymargin <- 0.2 + switch(as.character(square), `FALSE` = 1.8, + 0) + topmargin <- 0.2 + spaceticklab <- 1 + ymargin2 <- 0.2 + } + + if (toptitle != "") topmargin <- 2.5 - (numbfig^0.2) * 0.6 + if (min(lon) < 0) { + continents <- "world" + } else { + continents <- "world2" + } + + if (square) { + if (drawleg) {layout(matrix(1:2, ncol = 1, nrow = 2), heights = c(5,1))} + + par(mar = c(xmargin, ymargin, topmargin, ymargin2), cex = 1.4,mgp = c(3, spaceticklab, 0), las = 0) + + if (colNA != "white") { + blanks <- array(0, dim = c(length(lonb$x), length(latb$x))) + image(lonb$x, latb$x, blanks, col = c(colNA), breaks = c(-1,1), main = toptitle, cex.main = (1.5/numbfig^(0.2))*sizetit, axes = FALSE, xlab = "", ylab = "") + flagadd <- TRUE + } + else {flagadd <- FALSE} + + image(lonb$x, latb$x, var[lonb$ix, latb$ix], col = cols, breaks = brks, main = toptitle, axes = FALSE, xlab = "", ylab = "", cex.main = (1.5/numbfig^(0.2)) * sizetit, add = flagadd) + + if (axelab == TRUE) { + if(is.null(cex.lab)) {my.cex <- 1/(numbfig^0.3)} else {my.cex <- cex.lab} + axis(2, at = ypos, labels = ylabs, cex.axis = my.cex, tck = -0.01) + axis(1, at = xpos, labels = xlabs, cex.axis = my.cex, tck = -0.01, mgp=c(3,xlabel.dist,0)) + } + + if (is.null(contours) == FALSE) { + if(contours.lty == 1){ + contour(lonb$x, latb$x, contours[lonb$ix, latb$ix], levels = brks2, method = "edge", add = TRUE, labcex = 1/numbfig, lwd = 0.5/(numbfig^0.5), lty = 1, col = contours.col, drawlabels=contours.labels) + } else { + contour(lonb$x, latb$x, contours[lonb$ix, latb$ix], levels = brks2[which(brks2 < 0)], method = "edge", add = TRUE, labcex = 1/numbfig, lwd = 0.5/(numbfig^0.5), lty = contours.lty, col = contours.col, drawlabels=contours.labels) + contour(lonb$x, latb$x, contours[lonb$ix, latb$ix], levels = brks2[which(brks2 == 0)], method = "edge", add = TRUE, labcex = 1/numbfig, lwd = 3, lty = 1, col = contours.col, drawlabels=contours.labels) + contour(lonb$x, latb$x, contours[lonb$ix, latb$ix], levels = brks2[which(brks2 > 0)], method = "edge", add = TRUE, labcex = 1/numbfig, lwd = 0.5/(numbfig^0.5), lty = 1, col = contours.col, drawlabels=contours.labels) + } + } + + map(continents, interior = FALSE, add = TRUE, lwd = 1, col=continents.col) + box() + } + else { + par(mar = c(xmargin + 5, ymargin + 1.5, topmargin, ymargin2), + cex.main = (1.6 * numbfig^(0.3)) * sizetit, cex.axis = 1.4, + cex.lab = 1.6, mgp = c(3, spaceticklab + 0.5, 0), + las = 0) + if (axelab == TRUE) { + filled.contour(lonb$x, latb$x, var[lonb$ix, latb$ix], + xlab = "", levels = brks, col = cols, ylab = "", + main = toptitle, key.axes = axis(4, brks[seq(1, + length(brks), subsampleg)], cex.axis = 1/(numbfig^0.3)), + plot.axes = { + axis(2, at = ypos, labels = ylabs, cex.axis = 1/(numbfig^0.3), + tck = -0.03) + axis(1, at = xpos, labels = xlabs, cex.axis = 1/(numbfig^0.3), + tck = -0.03) + contour(lonb$x, latb$x, contours[lonb$ix, latb$ix], + levels = brks2, method = "edge", add = TRUE, + labcex = 1, lwd = 2,col = contours.col) + map(continents, interior = FALSE, xlim = c(lonmin, + lonmax), ylim = c(latmin, latmax), add = TRUE, col=continents.col) + }, key.title = title(main = units, cex.main = (1.2 * + numbfig^(0.3)) * sizetit)) + } + else { + filled.contour(lonb$x, latb$x, var[lonb$ix, latb$ix], + xlab = "", levels = brks, col = cols, ylab = "", + main = toptitle, key.axes = axis(4, brks[seq(1, + length(brks), subsampleg)], cex.axis = 1/(numbfig^0.3)), + plot.axes = { + contour(lonb$x, latb$x, contours[lonb$ix, latb$ix], + levels = brks2, method = "edge", add = TRUE, + labcex = 1, lwd = 2, col = contours.col) + map(continents, interior = FALSE, xlim = c(lonmin, + lonmax), ylim = c(latmin, latmax), add = TRUE, col=continents.col) + }, key.title = title(main = units, cex.main = (1.2 * + numbfig^(0.3)) * sizetit)) + } + } + if (is.null(dots) == FALSE) { + for (ix in 1:length(lon)) { + for (jy in 1:length(lat)) { + if (is.na(var[ix, jy]) == FALSE) { + if (dots[ix, jy] == TRUE) { + text(x = lon[ix], y = lat[jy], ".", cex = 12/(sqrt(sqrt(length(var))) * numbfig^0.5)) + } + } + } + } + } + if (square == TRUE & filled.continents == TRUE) { + if (min(lon) >= 0) { + ylat <- latmin:latmax + xlon <- lonmin:lonmax + proj <- setPROJ(1, LON0 = mean(xlon), LAT0 = mean(ylat), + LATS = ylat, LONS = xlon) + coastmap$STROKES$col[which(coastmap$STROKES$col == "blue")] <- "white" + par(new = TRUE) + plotGEOmap(coastmap, PROJ = proj, border = "black", add = TRUE) + box() + } + else { + map(continents, interior = FALSE, wrap = TRUE, lwd = 0.7, col = gray(0.5), fill = TRUE, add = TRUE, border = col_border) + } + } + if (square & drawleg) { + par(mar = c(1.5, ymargin + 1.5, 2.5, ymargin2), mgp = c(1.5, 0.3, 0), las = 1, cex = 1.2) + image(1:length(cols), 1, t(t(1:length(cols))), axes = FALSE, col = cols, xlab = "", ylab = "", main = units, cex.main = 1.1) + box() + axis(1, at = seq(0.5, length(brks) - 0.5, subsampleg), labels = brks[seq(1, length(brks), subsampleg)]) + } +} + + +# Draw points over a PlotEquiMap: +map.points<-function (x, country = "", label = NULL, minpop = 0, + maxpop = Inf, capitals = 0, cex = par("cex"), projection = FALSE, + parameters = NULL, orientation = NULL, pch = 1, ...) +{ + + usr <- par("usr") + if (!missing(projection) && projection != FALSE) { + if (require(mapproj)) { + if (is.character(projection)) { + projx <- mapproject(x$long, x$lat, projection = projection, + parameters = parameters, orientation = orientation) + } + else { + if (nchar(.Last.projection()$projection) > 0) { + projx <- mapproject(x$long, x$lat) + } + else stop("No projection defined\n") + } + x$long <- projx$x + x$lat <- projx$y + } + else stop("mapproj package not available\n") + } + else { + if (usr[2] > (180 + 0.04 * (usr[2] - usr[1]))) + x$long[x$long < 0] <- 360 + x$long[x$long < 0] + } + selection <- x$long >= usr[1] & x$long <= usr[2] & x$lat >= + usr[3] & x$lat <= usr[4] & (x$pop >= minpop & x$pop <= + maxpop) & ((capitals == 0) | (x$capital >= 1)) + if (is.null(label)) + label <- sum(selection) < 20 + cxy <- par("cxy") + if (sum(selection01) > 0) + points(x$long[selection01], x$lat[selection01], pch = pch, + cex = cex * 0.6, ...) + if (sum(selection0) > 0) + if (label) + text(x$long[selection0], x$lat[selection0] + cxy[2] * + cex * 0.7, paste(" ", x$name[selection0], sep = ""), + cex = cex * 0.7, ...) + if (sum(selection1) > 0) { + points(x$long[selection1], x$lat[selection1], pch = pch, + cex = cex, ...) + text(x$long[selection1], x$lat[selection1] + cxy[2] * + cex, paste(" ", x$name[selection1], sep = ""), cex = cex * + 1.2, ...) + } + if (sum(selection2) > 0) { + points(x$long[selection2], x$lat[selection2], pch = pch, + cex = cex, ...) + text(x$long[selection2], x$lat[selection2] + cxy[2] * + cex * 1.1, paste(" ", x$name[selection2], sep = ""), + cex = cex * 1.1, ...) + } + if (sum(selection3) > 0) { + points(x$long[selection3], x$lat[selection3], pch = pch, + cex = cex, ...) + text(x$long[selection3], x$lat[selection3] + cxy[2] * + cex * 0.9, paste(" ", x$name[selection3], sep = ""), + cex = cex * 0.9, ...) + } + invisible() +} + +# Taylor diagram (modified from function taylor.diagram of package Plotrix to have the same colors of Nube's taylor diagram) +# you can also specify a text label for each point and can put the color of the point proportional to its bias +# (still missing: bias legend) +# gamma si riferisce alle curve del RMSE! +my.taylor<-function (ref, model, add = FALSE, col = "red", pch = 19, pos.cor = TRUE, + xlab = "", ylab = "", main = "Taylor Diagram", show.gamma = TRUE, + ngamma = 3, gamma.col = "darkgreen", sd.arcs = 0, ref.sd = FALSE, sd.method = "sample", + grad.corr.lines = c(0.2, 0.4, 0.6, 0.8, 0.9, 0.95, 0.99), pcex = 1, cex.axis = 1, + normalize = FALSE, mar = c(5, 4, 6, 6), BIAS = FALSE, my.text = NULL, text.cex = pcex, RMSE.label = FALSE, ...) +{ + grad.corr.full <- c(0, 0.2, 0.4, 0.6, 0.8, 0.9, 0.95, 0.99, 1) + R <- cor(ref, model, use = "na.or.complete") + + if(BIAS==TRUE){ + BIAS <- mean(model-ref,na.rm=TRUE) + my.equidist<-c(-75,-35,-15,-5,5,15,35,75) + my.colors<-c("magenta4","blue4","steelblue","skyblue2","orange","orangered","red","red4") + #my.colorscale<-rev(my.colors(8)) + my.col<-my.colors[which.min(abs(my.equidist-BIAS))] + #my.labels<-c("0-30%","41-50","41-50","51-70%","61-70%","71-80%","81-90%","91-100%") + #my.cuts<-c(-100,-50,-20,-10,0,10,20,50,100) + } + + if (is.list(ref)) + ref <- unlist(ref) + if (is.list(model)) + ref <- unlist(model) + SD <- function(x, subn) { + meanx <- mean(x, na.rm = TRUE) + devx <- x - meanx + ssd <- sqrt(sum(devx * devx, na.rm = TRUE)/(length(x[!is.na(x)]) - + subn)) + return(ssd) + } + subn <- sd.method != "sample" + sd.r <- SD(ref, subn) + sd.f <- SD(model, subn) + if (normalize) { + sd.f <- sd.f/sd.r + sd.r <- 1 + } + maxsd <- 1.5 * max(sd.f, sd.r) + oldpar <- par("mar", "xpd", "xaxs", "yaxs") + if (!add) { + # plot for positive correlations only: + if (pos.cor) { + if (nchar(ylab) == 0) + ylab = "Standard deviation" + par(mar = mar) + plot(0, xlim = c(0, maxsd), ylim = c(0, maxsd), xaxs = "i", + yaxs = "i", axes = FALSE, main = main, xlab = xlab, + ylab = ylab, type = "n", cex = cex.axis, ...) + if (grad.corr.lines[1]) { + for (gcl in grad.corr.lines) lines(c(0, maxsd * + gcl), c(0, maxsd * sqrt(1 - gcl^2)), lty = 3,col="blue") + } + segments(c(0, 0), c(0, 0), c(0, maxsd), c(maxsd, + 0),col="blue") + axis.ticks <- pretty(c(0, maxsd),n=6) + axis.ticks <- axis.ticks[axis.ticks <= maxsd] + axis(1, at = axis.ticks, cex.axis = cex.axis) + axis(2, at = axis.ticks, cex.axis = cex.axis) + if (sd.arcs[1]) { + if (length(sd.arcs) == 1) + sd.arcs <- axis.ticks + for (sdarc in sd.arcs) { + xcurve <- cos(seq(0, pi/2, by = 0.03)) * sdarc + ycurve <- sin(seq(0, pi/2, by = 0.03)) * sdarc + lines(xcurve, ycurve, col = "black", lty = 3) + } + } # if there is more than one curve for the st.dev: + if (show.gamma[1]) { + if (length(show.gamma) > 1) + gamma <- show.gamma + else gamma <- pretty(c(0, maxsd), n = ngamma)[-1] # [-1] for removing the first value of 0.0 + if (gamma[length(gamma)] > maxsd) + gamma <- gamma[-length(gamma)] + labelpos <- seq(45, 70, length.out = length(gamma)) + for (gindex in 1:length(gamma)) { + xcurve <- cos(seq(0, pi, by = 0.03)) * gamma[gindex] + + sd.r + endcurve <- which(xcurve < 0) + endcurve <- ifelse(length(endcurve), min(endcurve) - + 1, 105) + ycurve <- sin(seq(0, pi, by = 0.03)) * gamma[gindex] + maxcurve <- xcurve * xcurve + ycurve * ycurve + startcurve <- which(maxcurve > maxsd * maxsd) + startcurve <- ifelse(length(startcurve), max(startcurve) + + 1, 0) + lines(xcurve[startcurve:endcurve], ycurve[startcurve:endcurve], + col = gamma.col) + if (xcurve[labelpos[gindex]] > 0) + boxed.labels(xcurve[labelpos[gindex]], ycurve[labelpos[gindex]], + gamma[gindex], border = FALSE,cex=1, col=gamma.col) + } + } + xcurve <- cos(seq(0, pi/2, by = 0.01)) * maxsd + ycurve <- sin(seq(0, pi/2, by = 0.01)) * maxsd + lines(xcurve, ycurve) # external semicircle + bigtickangles <- acos(seq(0.1, 0.9, by = 0.1)) + medtickangles <- acos(seq(0.05, 0.95, by = 0.1)) + smltickangles <- acos(seq(0.91, 0.99, by = 0.01)) + segments(cos(bigtickangles) * maxsd, sin(bigtickangles) * + maxsd, cos(bigtickangles) * 0.97 * maxsd, sin(bigtickangles) * + 0.97 * maxsd) # external mayor ticks + par(xpd = TRUE) + if (ref.sd) { + xcurve <- cos(seq(0, pi/2, by = 0.01)) * sd.r + ycurve <- sin(seq(0, pi/2, by = 0.01)) * sd.r + lines(xcurve, ycurve) + } + points(sd.r, 0, cex = pcex) + text(cos(c(bigtickangles, acos(c(0.95, 0.99)))) * + 1.05 * maxsd, sin(c(bigtickangles, acos(c(0.95, + 0.99)))) * 1.05 * maxsd, c(seq(0.1, 0.9, by = 0.1), + 0.95, 0.99), col="blue") # correlation numbers + text(maxsd * 0.8, maxsd * 0.8, "Correlation", srt = 315, col="blue") + segments(cos(medtickangles) * maxsd, sin(medtickangles) * + maxsd, cos(medtickangles) * 0.98 * maxsd, sin(medtickangles) * + 0.98 * maxsd) + segments(cos(smltickangles) * maxsd, sin(smltickangles) * + maxsd, cos(smltickangles) * 0.99 * maxsd, sin(smltickangles) * + 0.99 * maxsd) + } + else { # plot in case correlations can be negative or positive (pos.cor=FALSE): + x <- ref + y <- model + R <- cor(x, y, use = "pairwise.complete.obs") + E <- mean(x, na.rm = TRUE) - mean(y, na.rm = TRUE) + xprime <- x - mean(x, na.rm = TRUE) + yprime <- y - mean(y, na.rm = TRUE) + sumofsquares <- (xprime - yprime)^2 + Eprime <- sqrt(sum(sumofsquares)/length(complete.cases(x))) + E2 <- E^2 + Eprime^2 + if (add == FALSE) { + maxray <- 1.5 * max(sd.f, sd.r) + plot(c(-maxray, maxray), c(0, maxray), type = "n", + asp = 1, bty = "n", xaxt = "n", yaxt = "n", + xlab = xlab, ylab = ylab, main = main, cex = cex.axis) + discrete <- seq(180, 0, by = -1) + listepoints <- NULL + for (i in discrete) { + listepoints <- cbind(listepoints, maxray * + cos(i * pi/180), maxray * sin(i * pi/180)) + } + listepoints <- matrix(listepoints, 2, length(listepoints)/2) + listepoints <- t(listepoints) + lines(listepoints[, 1], listepoints[, 2]) + lines(c(-maxray, maxray), c(0, 0)) + lines(c(0, 0), c(0, maxray)) + for (i in grad.corr.lines) { + lines(c(0, maxray * i), c(0, maxray * sqrt(1 - + i^2)), lty = 3, col="blue") + lines(c(0, -maxray * i), c(0, maxray * sqrt(1 - + i^2)), lty = 3, col="blue") + } + for (i in grad.corr.full) { + text(1.05 * maxray * i, 1.05 * maxray * sqrt(1 - + i^2), i, cex = cex.axis) + text(-1.05 * maxray * i, 1.05 * maxray * sqrt(1 - + i^2), -i, cex = cex.axis) + } + seq.sd <- seq.int(0, 2 * maxray, by = (maxray/10))[-1] + for (i in seq.sd) { + xcircle <- sd.r + (cos(discrete * pi/180) * + i) + ycircle <- sin(discrete * pi/180) * i + for (j in 1:length(xcircle)) { + if ((xcircle[j]^2 + ycircle[j]^2) < (maxray^2)) { + points(xcircle[j], ycircle[j], col = "darkgreen", + pch = ".") + if (j == 10) + text(xcircle[j], ycircle[j], signif(i, + 2), cex = cex.axis, col = "darkgreen") + } + } + } + seq.sd <- seq.int(0, maxray, length.out = 5) + for (i in seq.sd) { + xcircle <- (cos(discrete * pi/180) * i) + ycircle <- sin(discrete * pi/180) * i + if (i) + lines(xcircle, ycircle, lty = 3, col = "blue") + text(min(xcircle), -0.03 * maxray, signif(i, + 2), cex = cex.axis, col = "blue") + text(max(xcircle), -0.03 * maxray, signif(i, + 2), cex = cex.axis, col = "blue") + } + text(0, -0.08 * maxray, "Standard Deviation", + cex = cex.axis, col = "blue") + text(0, -0.12 * maxray, "Centered RMSE", + cex = cex.axis, col = "darkgreen") + points(sd.r, 0, pch = 22, bg = "darkgreen", cex = 1.1) + text(0, 1.1 * maxray, "Correlation Coefficient", + cex = cex.axis) + } + S <- (2 * (1 + R))/(sd.f + (1/sd.f))^2 + } # close if on 'pos.cor' + } + + if(BIAS==TRUE){ + points(sd.f * R, sd.f * sin(acos(R)), pch = pch, col = my.col, cex = pcex) + } else { + points(sd.f * R, sd.f * sin(acos(R)), pch = pch, col = col, cex = pcex) + } + + # Label line; You can change the pos argument to your liking: + if(length(text)>0) text(sd.f * R, sd.f * sin(acos(R)), labels=my.text, cex = text.cex, pos=3) + if(RMSE.label==TRUE) text(0.81, 0.14, "RMSE", srt = 45, cex=1, col=gamma.col) + + text(1, 0.04, "ERA-Interim", srt = 0, cex=1, col="darkgray") + + invisible(oldpar) +} + + +north.arrow <- function(loc,size,bearing=0,cols,cex=1,...) { + # checking arguments + if(missing(loc)) stop("loc is missing") + if(missing(size)) stop("size is missing") + # default colors are white and black + if(missing(cols)) cols <- rep(c("white","black"),8) + # calculating coordinates of polygons + radii <- rep(size/c(1,4,2,4),4) + x <- radii[(0:15)+1]*cos((0:15)*pi/8+bearing)+loc[1] + y <- radii[(0:15)+1]*sin((0:15)*pi/8+bearing)+loc[2] + # drawing polygons + for (i in 1:15) { + x1 <- c(x[i],x[i+1],loc[1]) + y1 <- c(y[i],y[i+1],loc[2]) + polygon(x1,y1,col=cols[i]) + } + # drawing the last polygon + polygon(c(x[16],x[1],loc[1]),c(y[16],y[1],loc[2]),col=cols[16]) + # drawing letters + b <- c("E","N","W","S") + for (i in 0:3) text((size+par("cxy")[1])*cos(bearing+i*pi/2)+loc[1], + (size+par("cxy")[2])*sin(bearing+i*pi/2)+loc[2],b[i+1], + cex=cex) +} + +# funzione che prende un array e restituisce lo stesso array abbassando pero'tutti gli elementi piu'alti di val.max al valore val.max (utile per aggiustare le leggende dei grafici) +rescale.max <- function(my.array,val.max){ + ss <- which(my.array > val.max) + my.array[ss] <- val.max - 0.000000001 + return(my.array) +} + +rescale.min <- function(my.array,val.min){ # come rescale.max ma per i valori piu'piccoli di val.min che vengono cambiati a val.min + ss <- which(my.array < val.min) + my.array[ss] <- val.min + 0.0000000001 + return(my.array) +} + +rescale <- function(my.array,val.min,val.max){ # unisce rescale.max con rescale.min + ss <- which(my.array > val.max) + my.array[ss] <- val.max - 0.0000000001 + ss <- which(my.array < val.min) + my.array[ss] <- val.min + 0.0000000001 # the 0.0000000001 is just to be able to draw a color with PlotEquiMap (otherwise draw the color for NA) + return(my.array) +} + +ColorBarV <- function(brks, cols = NULL, vert = TRUE, subsampleg = 1, + cex = 1, marg=NULL) { + # Creates a horizontal or vertical colorbar to introduce in multipanels. + # + # Args: + # brks: Levels. + # cols: List of colours, optional. + # vert: TRUE/FALSE for vertical/horizontal colorbar. + # kharin: Supsampling factor of the interval between ticks on colorbar. + # Default: 1 = every level + # cex: Multiplicative factor to increase the ticks size, optional. + # marg: margins + # + # Returns: + # This function returns nothing + # + # History: + # 1.0 # 2012-04 (V. Guemas, vguemas@ic3.cat) # Original code + # 1.1 # 2014-11 (C. Prodhomme, chloe.prodhomme@ic3.cat) + # add cex option + # + # + # Input arguments + # ~~~~~~~~~~~~~~~~~ + # + if (is.null(cols) == TRUE) { + nlev <- length(brks) - 1 + cols <- rainbow(nlev) + } else { + if (length(cols) != (length(brks) - 1)) { + stop("Inconsistent colour levels / list of colours") + } + } + + # + # Plotting colorbar + # ~~~~~~~~~~~~~~~~~~~ + # + if (vert) { + if (is.null(marg)== FALSE){ + par(mar = marg, mgp = c(1, 1, 0), las = 1, cex = 1.2) + }else{ + par(mar = c(1, 1, 1, 1.5 *( 1 + cex)), mgp = c(1, 1, 0), las = 1, cex = 1.2) + } + image(1, c(1:length(cols)), t(c(1:length(cols))), axes = FALSE, col = cols, + xlab = '', ylab = '') + box() + axis(4, at = seq(0.5, length(brks) - 0.5, subsampleg), tick = TRUE, + labels = brks[seq(1, length(brks), subsampleg)], cex.axis = cex) + } else { + if (marg){ + par(mar = marg, mgp = c(1, 1, 0), las = 1, cex = 1.2) + }else{ + par(mar = c(0.5 + cex, 1, 1, 1), mgp = c(1.5, max(c(0.3,0.8*(cex-0.625))), 0), + las = 1, cex = 1.2) + } + + image(1:length(cols), 1, t(t(1:length(cols))), axes = FALSE, col = cols, + xlab = '', ylab = '') + box() + axis(1, at = seq(0.5, length(brks) - 0.5, subsampleg), + labels = brks[seq(1, length(brks), subsampleg)], cex.axis = cex) + } +} + +# Creation: 6/2016 +# Author: Nicola Cortesi +# Aim: Remove the grid points above a certain value (argument 'level') [and below '-level' if two.sides=TRUE] that happens to be in areas with few points above that value. +# Useful to remove from a contour plot all the small spots of significative points that we don't want to contour. +# To do so, just apply this function inside the option 'contour' of 'PlotEquiMap' to remove the significative points (they are set to the value of 0). +# Argument 'size' determines the side of the square (in grid points) used to find if there are enough grid points with values above 'level' nearby +# the chosen point or not. Increasing it will incresase the number of grid points deleted, leaving only the bigger spots of points above the chosen value. +# I/O: a 2D lat/lon grid in geographic coordinates +# Assumptions: none +# Branch: general +# Example: +# data <- matrix(runif(48000,0,1)^2,300,160) + matrix(c(rep(0,20000),rep(0.6,3000),rep(0,25000)),300,160) +# PlotEquiMap(data,lon=1:300,lat=-80:79,filled.continents=F,cols=c("white","yellow","orange","red","darkred")) +# PlotEquiMap(data,lon=1:300,lat=-80:79,filled.continents=F,cols=c("white","yellow","orange","red","darkred"), contours=data, brks2=0.6) +# PlotEquiMap(data,lon=1:300,lat=-80:79,filled.continents=F,cols=c("white","yellow","orange","red","darkred"), contours=grid2contour(data,0.6,FALSE,5), brks2=0.6, contours.labels=FALSE) +# PlotEquiMap_colored(data,lon=1:300,lat=-80:79,filled.continents=F,cols=c("white","yellow","orange","red","darkred"), contours=grid2contour(data,0.6,FALSE,5), brks2=0.6, contours.labels=FALSE, contours.col="blue", continents.col="gray40") + +grid2contour <- function(grid, level, two.sides=FALSE, size=10){ + nrows <- dim(grid)[1] + ncols <- dim(grid)[2] + radius <- round(size/2) + + grid.weighted <- matrix(NA, nrows, ncols) + + grid.expanded <- rbind(cbind(grid[nrows:1,((ncols/2)+1):ncols],grid[nrows:1,],grid[nrows:1,],grid[nrows:1,1:(ncols/2)]),cbind(grid,grid,grid),cbind(grid[nrows:1,((ncols/2)+1):ncols],grid[nrows:1,],grid[nrows:1,],grid[nrows:1,1:(ncols/2)])) + + if(two.sides==FALSE){ + for(i in 1:nrows){ + for(j in 1:ncols){ + grid.weighted[i,j] <- sum(grid.expanded[(nrows+i-radius):(nrows+i+radius),(ncols+j-radius):(ncols+j+radius)] > level) + } + } + } else { + for(i in 1:nrows){ + for(j in 1:ncols){ + grid.weighted[i,j] <- sum(grid.expanded[(nrows+i-radius):(nrows+i+radius),(ncols+j-radius):(ncols+j+radius)] > level) + sum(grid.expanded[(nrows+i-radius):(nrows+i+radius),(ncols+j-radius):(ncols+j+radius)] < -level) + } + } + } + + n.points.min <- (2*radius+1)^2*0.3 # 30% of the total points in the square + ss <- which(grid.weighted < n.points.min) + grid[ss] <- 0 + return(grid) +} + +###################################################################### +# +# RELIABILITY DIAGRAM FOR A COLLECTION OF PROBABILITY FORECASTS # +# Veronica: Like ReliabilityDiagram() function, but +# Modified to include in the outputs the hist.counts +# +###################################################################### + +ReliabilityDiagramHist <- function(probs, obs, bins=10, nboot=500, + plot=FALSE, plot.refin=TRUE, mc.cores=1, + cons.probs=c(0.025, 0.975)) + { + #print("RD") + # + # Plot reliability diagram for a probability forecast + # + # Usage: ReliabilityDiagram(probs, obs, nbins, nboot) + # + # Arguments: + # + # probs ... vector of length N, probs[k] has the predicted probability for + # the event obs[k] + # obs ... obs[k] = 1 if the event happened at instance k, obs[k] = 0 + # otherwise + # bins ... either scalar: number of equidistant bins to discretize the + # forecast probabilities, + # or a vector: user-defined breakpoints of the bins; the `hist` + # function will produce errors if these are not valid + # nboot ... number of bootstrap resamples for estimating consistency bars + # if nboot==0, no resampling is done and NAs are returned as + # consistency bars + # plot ... boolean; whether to plot the reliability diagram + # plot.refin ... boolean; whether to plot the small refinement histogram + # in lower right corner + # cons.probs ... a 2-vector, lower and upper confidence limit + # mc.cores ... number of cores for resampling (if > 1, library `multicore` + # is required) + # + # Return value: + # + # a data frame of K+1 rows with the following columns: + # + # * p.avgs ... in-bin averages of the forecast probabilities + # * cond.probs ... observed conditional frequency of event, given i + # * cbar.lo ... lower limit consistency of consistency bar[i], as specified by user + # * cbar.hi ... upper limit consistency of consistency bar[i], as specified by user + # + # Author: + # + # Stefan Siegert + # s.siegert@exeter.ac.uk + # December 2013 + # + # Example: + # + # N <- 1000 + # p <- rbeta(N, 1, 3) + # y <- rbinom(N, 1, p) + # rd <- rel.diag(p, y, plot=TRUE) + # print(rd) + # + # + # change log: + # + # 2013/12/02 + # * manual definition of bin-breaks + # * manual definition of consistency intervals + # * sanity checks + # * multicore option for resampling + # + # 2013/10/31: + # * return summary data as data frame + # * added options `plot` and `plot.refin` + # + # 2013/08/20: + # * points are plotted at in-bin-averages, not at bin centres + # * legend has been removed + # * consistency bars have been added, calculated by a resampling technique + # * see Broecker (2007) http://dx.doi.org/10.1175/WAF993.1 for details + # * the bars are pointwise 2.5% ... 97.5% intervals around the hypothesis of reliability + # * dependency on package "verification" was removed + # + # Author: Stefan Siegert + # + # based on previous version by Caio Coelho and the routine + # reliability.plot.default of the R-package `verification` + # + + + # sanity checks + if (class(probs) == "data.frame") { + probs <- c(as.matrix(probs)) + } + if (class(obs) == "data.frame") { + obs <- c(as.matrix(obs)) + } + stopifnot(length(probs) == length(obs)) + stopifnot(nboot >= 0, mc.cores >= 0) + stopifnot(all(probs >= 0), all(probs <= 1), all(obs %in% c(0,1))) + stopifnot(length(cons.probs) == 2, all(cons.probs >= 0), all(cons.probs <= 1)) + # optional use of multicore without warning message + warn <- getOption("warn") + options(warn=-1) + if(require(multicore, quietly=TRUE)) { + mclapply <- multicore::mclapply + } else { + mclapply <- function(..., mc.cores) lapply(...) + } + options(warn=warn) + + # some definitions and corrections + n <- length(obs) + mc.cores <- floor(mc.cores) + nboot <- floor(nboot) + cons.probs <- sort(cons.probs) + + + ############################################# + # reliability analysis + ############################################# + # estimate refinement function + if (length(bins) == 1) { + nbins <- floor(bins) + brx <- seq(0, 1, length.out=nbins+1) + + c(-.1, rep(0, nbins-1), .1) + } else { + nbins <- length(bins) - 1 + bins <- sort(bins) + stopifnot(min(bins)<= 0 & max(bins) >= 1) + brx <- bins + } + h <- hist(probs, breaks=brx, plot=FALSE)$counts +#print(h) +#print(sum(h)) +#print(probs) +#print(sum(probs)) + p <- sum(probs) + #print(sum(probs)/sum(h)) + + # estimate calibration function + g <- hist(probs[obs==1], breaks=brx, plot=FALSE)$counts +#print(g) +#print(sum(g)) + obar.i <- g / h +#print(obar.i) + no_res <- sum(g)/sum(h) + #print(no_res) + obar.i[ is.nan(obar.i) ] <- NA + + # calculate in-bin averages + p.bins <- as.numeric(cut(probs, breaks=brx, include.lowest=TRUE)) + p.avgs <- sapply(seq(nbins), + function(ii) mean(probs[p.bins == ii], na.rm=TRUE)) + p.avgs[ is.nan(p.avgs) ] <- NA + +# +#print(p.avgs) +# vertline <- sum(p.avgs,na.rm = TRUE)/bins +#print(vertline) + + ############################################# + # consistency resampling (broecker and smith 2007) + ############################################# + if (nboot) { + resamp.mat <- matrix(nrow=0, ncol=nbins) + # the resampling function + sample.rel.diag <- function(dummy=0) { + p.hat <- sample(x=probs, size=n, replace=TRUE) + x.hat <- rbinom(n=n, size=1, prob=p.hat) + hh <- hist(p.hat, breaks=brx, plot=FALSE)$counts + gg <- hist(p.hat[x.hat==1], breaks=brx, plot=FALSE)$counts + return(gg / hh) + } + # multicore? + if (mc.cores > 1) { + l <- mclapply(1:nboot, sample.rel.diag, mc.cores=mc.cores) + resamp.mat <- do.call(rbind, l) + } else { + l <- replicate(nboot, sample.rel.diag()) + resamp.mat <- t(l) + } + cons.bars <- apply(resamp.mat, 2, + function(z) quantile(z, cons.probs, na.rm=TRUE)) + } else { + cons.bars <- matrix(NA, ncol=nbins, nrow=2) + } + + + ############################################# + # plot the reliability diagram + ############################################# + if (plot) { + # reliability plot + old.par <- par(no.readonly = TRUE) + on.exit(par(old.par)) + plot(NULL, xlim = c(0,1), ylim = c(0,1), + xlab="Forecast probability", + ylab="Observed relative frequency") + # consistency bars + for (i in 1:length(p.avgs)) { + lines(rep(p.avgs[i], 2), cons.bars[, i], col="#CCCCCC", lwd=6) + } + # reliability points and diagonal + points(p.avgs, obar.i, col = "black", pch = 1, lwd=2, type="b") + lines(c(0,1), c(0,1), lty=1) + if (plot.refin) { + # refinement histogram in lower corner + pp<- par("plt") + par("plt" = c(pp[2] - 0.2 , pp[2], pp[3], pp[3]+ 0.2) ) + par(new = TRUE) + barplot(h, axes = FALSE, axisnames = FALSE) + axis(4) + box() + } + } + + ############################################# + # return data + ############################################# + ret.df <- data.frame(p.avgs=p.avgs, cond.probs=obar.i, + cbar.lo=cons.bars[1,], cbar.hi=cons.bars[2,],hist.counts=h, obs.counts=g, for.prob=p) + return(ret.df) + } + + +# to plot the reliability diagrams for both upper and below tercile at the same time: +ReliabilityDiagram2 <-function(rel_diag,nbins=10,consbars=F,tit=NULL,colLine=NULL,colBar=NULL,marHist=T,hist_ylim=NULL,Lg=NULL) { + + # print("Plot") +# rel_diag<-rd # output of ReliabilityDiagramHist() +# nbins=10 +# consbars=T +# colLine=col_line +# colBar=col_bar +# tit=tit1 +# marHist=T +# hist_ylim=c(0,100) +# x11(width=12,height=10) + # x11() + # PLOT OF THE RELIABILITY DIAGRAM + # + ###################################################################################### + # rd: a list with the reliability diagrams that will be represented in the same plot + # cons.bars : if the consistency bar must be represented or not. + # nbins : number of equidistant points used to compute the reliability diagram (optional) + # tit: the title of the plot (optional) + # brierScores: The brier score linked to the reliability diagram (optional) + # marHist: Whether to plot the small refinement histogram is showed + ##################################################################################### + + # Some parameters are defined + nrd <- length(rel_diag) # nrd = 5, 4 models + mme + rg <- list() + + # Check the dimensions of the rank histogram + for (i in 1:nrd){ + if (dim(rel_diag[[i]])[1]!=nbins){ + stop ('The bins of the reliability diagram must be the same that nbins') + } + rg[[i]]<-range(rel_diag[[i]]$hist.counts)# check the range of the histograms + } + + if (is.null(hist_ylim)){ + rgH<-range(rg) +#print(rgH) + }else{ + rgH<-hist_ylim + } + + + ########################################## + # reliability plot + # par(mar=c(5,3,2,2)+0.1) + ########################################## + + layout(matrix(c(rep(1,nrd),seq(2,(nrd+1))),nrd,2,byrow=F),width=c(5,2)) + par(oma=c(2.5,4,5,1)) + #layout.show(a) + + # The axis are defined + # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + #x11(width=12,height=10) +# old.par <- par(no.readonly = TRUE) +#print(old.par) +# on.exit(par(old.par)) +# par(mar=c(5,5,5,0)) + old.par <- par(mar=c(5,5,5,0)) + on.exit(par(old.par)) + + plot(NULL, xlim = c(0,1), ylim = c(0,1),axes=F, xlab='', ylab='') + + axis(1, at=seq(0,1,by=0.1),labels=seq(0,1,by=0.1),cex.axis=2.0) + title(xlab= "Forecast probability",line=3.9,cex.lab=2.0) + + axis(2, at=seq(0,1,by=0.1), labels=seq(0,1,by=0.1), las=2,cex.axis=2.0) + #axis(2, at=seq(0,1,by=0.1), labels=seq(0,1,by=0.1), cex.axis=2.0) + box() + title(ylab= "Observed relative frequency", line=0.2,cex.lab=2.0,outer=T) + if(is.null(tit)==F){ +# title(tit,cex.main=4,outer=T,line=-1) +# title(tit,cex.main=2.0,outer=T,line=-4) + title(tit,cex.main=2.0,outer=T,line=-3) + } + + # Legend + # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + yloc <- c(1.0, 0.95, 0.90, 0.85, 0.80) + legend(0.,yloc[1], legend=Lg[[1]], fill=colLine[[1]], bty="n", cex=1.2) + legend(0.,yloc[2], legend=Lg[[2]], fill=colLine[[2]], bty="n", cex=1.2) + legend(0.,yloc[3], legend=Lg[[3]], fill=colLine[[3]], bty="n", cex=1.2) + legend(0.,yloc[4], legend=Lg[[4]], fill=colLine[[4]], bty="n", cex=1.2) + legend(0.,yloc[5], legend=Lg[[5]], fill=colLine[[5]], bty="n", cex=1.2) +# legend("topleft", "(x,y)", pch = 1, title = "topleft, inset = .05", inset = .05) + + # No resolution and No skill lines + # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + no_res <- sum(rel_diag[[1]]$obs.counts)/sum(rel_diag[[1]]$hist.counts) +# vt_res <- sum(rel_diag[[1]]$for.prob)/sum(rel_diag[[1]]$hist.counts) +#print(paste("no_res = ",no_res)) + numb <- c(seq(0,1,by=0.1)) +#print(numb) + no_skill <- (numb+no_res)/2. +#print(no_skill) + +# diagonal line + lines(c(0,1), c(0,1), lty=1) +# no_resolution line + lines(c(0,1), c(no_res,no_res), col="gray", lty=3) + lines(c(1/3,1/3), c(0,1), col="gray", lty=3) +# lines(c(vt_res,vt_res), c(0,1), col="gray", lty=3) +# lines(c(no_res,no_res), c(0,1), col="gray", lty=3) +# no_skill line + lines(c(0,1), c(no_skill[1],no_skill[11]), col="black", lty=3) + + + # Consistency bars + # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + HI <- matrix(NA, nrow=nrd, ncol=length(rel_diag[[1]]$hist.counts)) + + for (j in 1:nrd){ # nrd = 5: 4 models + mme + HI[j,] <- rel_diag[[j]]$hist.counts + if (consbars==T){ + # The lower limit of consistency bar i and the upper limit are combined in one list + consBars<-list() + consBars[[j]]<-abind(InsertDim(rel_diag[[j]]$cbar.lo,1,1),InsertDim(rel_diag[[j]]$cbar.hi,1,1),along=1) + + # plot consistency bars + for (i in 1:nbins){ # nbins = 10 bins +# lines(rep(rel_diag[[j]]$p.avgs[i], 2), consBars[[j]][, i], col=colBar[j], lwd=3) + lines(rep(rel_diag[[j]]$p.avgs[i], 2), consBars[[j]][, i], col=colBar[j], lwd=2) # lwd: line width + } + } + +# see plot: "p" for points, "l" for lines, "b" for both points and lines, "c" for empty points joined by lines, "o" for overplotted points and lines, "s" and "S" for stair steps and "h" for histogram-like vertical lines. Finally, "n" does not produce any points or lines. + points(rel_diag[[j]]$p.avgs, rel_diag[[j]]$cond.probs, type="b", pch=1 , col =colLine[[j]], cex=2.0 , lwd=3) + + } + + + # Number of forecasts + # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + if (marHist==TRUE){ + + for (i in 1:nrd){ +# par(mar=c(5,0,5,12)) +# par(mar=c(1.5,1.5,6,7)) # in case num of sharpness diagram is 3 or 4 + par(mar=c(0.5,1.5,5,7)) # in case num of sharpness diagram is 5 + barplot(HI[i,]/10000, beside=T,space=c(0,1.2),axes = F, axis.lty=F, axisnames = F, col = colLine[[i]], ylim=rgH/10000) +# axis(1, at=seq(0,1,by=0.1),labels=seq(0,1,by=0.1),cex.axis=1.5) + title(main = "# of forecasts (x10⁴)", font.main = 1.0, line=0.5) +# grid(1,5,col='#525252') + axis(4,cex.axis=1.0) + box(bg='grey') + } + #pp<- par("plt") + #par("plt" = c(pp[2] - 0.14 , pp[2], pp[3], pp[3]+ 0.15) ) + #par(new = TRUE) + } + +} + + +################################################################################################# +# Interpolation # +################################################################################################# + +# function that returns the position of the nearest grid point from a point of coordinates lat, lon +# the grid is represented by two vectors lat.grid and lon.grid: +# if there is more than one point at the same mimimum distance, it returns only the position of the first one +nearest <- function(lat,lon,lat.grid,lon.grid){ + n.lat=length(lat.grid) + n.lon=length(lon.grid) + + if(lon<0 && min(lon.grid>=0)) lon=360+lon # convert the negative longitude lon of the point to a positive one, if lon.grid has only positive values + + #lat.grid2<-sort(lat.grid,decreasing=T) # sort latitudes because they must go from the higher number to the lower one + #lon.grid2<-sort(lon.grid) # sort longitudes because they must go from the lower number to the highest one + + grid.dist.lat<-matrix((lat.grid-lat)^2, nrow=n.lat, ncol=n.lon+1, byrow=FALSE) + #grid.dist.lon<-matrix((lon.grid-lon)^2,nrow=n.lat,ncol=n.lon,byrow=TRUE) + grid.dist.lon<-matrix((c(lon.grid,lon.grid[1]+360)-lon)^2,nrow=n.lat,ncol=n.lon+1,byrow=TRUE) # add also the forst lon point at the end of lon values + + grid.dist<-(grid.dist.lat+grid.dist.lon)^0.5 # matrix of distance from the point in grados + nearest.grid.point.pos<-which(grid.dist==min(grid.dist),arr.ind=T) + + if(length(nearest.grid.point.pos)>2) nearest.grid.point.pos <- nearest.grid.point.pos[1,] # remove all points at the same minimum distance beyond the first one + + if(nearest.grid.point.pos[2] > n.lon) nearest.grid.point.pos[2] <- 1 # in case the closer point is that with the first longitude value, must change the lon position + + return(nearest.grid.point.pos) +} + + +# interpolates the observed values in grid2 on grid1 with the bilinear method but using the great-circle distances (using Haversine formula) and selecting only the 4 nearest grid points,.; +# lat1.list and lon1.list are vectors of coordinates referring to grid1, while lat2.list and lon2.list to grid2. They must be in degrees. Grid2 format must be: [lat,lon] +# lat2.list and lon2.list can also be in a different order than lat1.list and lon1.list. +# It interpolates at least 2 grids at time (i.e: the monthly grids of a model), so it can use the same distance matrix for all grids (the more computational intensive part of the script) +# and the same weights for each monthly grid; grid2 must have the format: [layer,lat,lon], where stands for day, month, year, season, etc... +# beware that the 2 rows of points closer to the north and south pole are still not well interpolated because they are still not associated to grid points at the other side of the north pole +bilinear<-function(lat1.list,lon1.list,lat2.list,lon2.list,grid2){ + R<-6371 # earth mean radius (km) + Rx2<-2*R + + n.lat1=length(lat1.list) + n.lon1=length(lon1.list) + n.lat2=length(lat2.list) + n.lon2=length(lon2.list) + n.points1<-as.double(n.lat1*n.lon1) # number of points of grid1 + n.points2<-as.double(n.lat2*n.lon2) + n.points.tot<-n.points1*n.points2 + n.layers<-dim(grid2)[1] + + nearest.lat<-array(NA,c(n.lat1,2)) # latitude of the two closer grid points + nearest.lat.pos<-array(NA,c(n.lat1,2)) # position inside grid2 of the two closer points in the latitudinal sense + for(l in 1:n.lat1){ + pos.first<-which.min(abs(lat2.list-lat1.list[l])) + lat2.list.temp<-lat2.list + lat2.list.temp[pos.first]=10000000000 + pos.second<-which.min(abs(lat2.list.temp-lat1.list[l])) + nearest.lat[l,1]=lat2.list[pos.first] + nearest.lat[l,2]=lat2.list[pos.second] + nearest.lat.pos[l,1]=pos.first + nearest.lat.pos[l,2]=pos.second + } + + # correct longitude values if one of the two grids has positive longitude values only and the other also negative ones: + if(length(which(lon1.list<0))>0 && length(which(lon2.list<0))==0){lon2.list[which(lon2.list>180)]<-lon2.list[which(lon2.list>180)]-360} + if(length(which(lon1.list<0))==0 && length(which(lon2.list<0))<0){lon2.list[which(lon2.list<0)]<-lon2.list[which(lon2.list<0)]+360} + + nearest.lon<-array(NA,c(n.lon1,2)) # longitude of the two closer grid points + nearest.lon.pos<-array(NA,c(n.lon1,2)) # position inside grid2 of the two closer points in the longitudinal sense + for(l in 1:n.lon1){ + pos.first<-which.min(abs(lon2.list-lon1.list[l])) + lon2.list.temp<-lon2.list + lon2.list.temp[pos.first]=10000000000 + pos.second<-which.min(abs(lon2.list.temp-lon1.list[l])) + # points of grid1 to the left of all points of grid2 are associated also to the rightmost points of grid2 because earth is flat: + if(lon1.list[l] < lon2.list[pos.first] && lon1.list[l] < lon2.list[pos.second]) pos.second<-n.lon2 + # points of grid1 to the right of all points of grid2 are associated also to the leftmost points of grid2 because earth is flat: + if(lon1.list[l] > lon2.list[pos.first] && lon1.list[l] > lon2.list[pos.second]) pos.second<-1 + + nearest.lon[l,1]=lon2.list[pos.first] + nearest.lon[l,2]=lon2.list[pos.second] + nearest.lon.pos[l,1]=pos.first + nearest.lon.pos[l,2]=pos.second + } + + + lat1.list.rad<-deg2rad(lat1.list) + lon1.list.rad<-deg2rad(lon1.list) + nearest.lat.rad<-deg2rad(nearest.lat) + nearest.lon.rad<-deg2rad(nearest.lon) + + pred<-array(NA,c(n.layers,n.lat1,n.lon1)) + for(y in 1:n.lat1){ + for(x in 1:n.lon1){ + + #lat.deg<-lat1.list[y] # lat y lon of the grid1 point selected + #lon.deg<-lon1.list[x] + #lat1.deg<-nearest.lat[y,1] # lat y lon of its 4 nearest points + #lon1.deg<-nearest.lon[x,1] + #lat2.deg<-nearest.lat[y,1] + #lon2.deg<-nearest.lon[x,2] + #lat3.deg<-nearest.lat[y,2] + #lon3.deg<-nearest.lon[x,1] + #lat4.deg<-nearest.lat[y,2] + #lon4.deg<-nearest.lon[x,2] + #latN.deg<-c(lat1.deg,lat2.deg,lat3.deg,lat4.deg) + #lonN.deg<-c(lon1.deg,lon2.deg,lon3.deg,lon4.deg) + + # conversion to rad: + lat<-lat1.list.rad[y] # lat y lon of the grid1 point selected + lon<-lon1.list.rad[x] + lat1<-nearest.lat.rad[y,1] # lat y lon of its 4 nearest points + lon1<-nearest.lon.rad[x,1] + lat2<-nearest.lat.rad[y,1] + lon2<-nearest.lon.rad[x,2] + lat3<-nearest.lat.rad[y,2] + lon3<-nearest.lon.rad[x,1] + lat4<-nearest.lat.rad[y,2] + lon4<-nearest.lon.rad[x,2] + latN<-c(lat1,lat2,lat3,lat4) + lonN<-c(lon1,lon2,lon3,lon4) + + distN<-sqrt((sin((lat-latN)/2))^2 + cos(lat)*cos(latN)*(sin((lon-lonN)/2))^2) + distN<-Rx2 * asin(pmin(distN,1)) # distance in km of the 4 nearest points + + ss<-which(distN==0) + weights<-distN^2 + weights[ss]<-0.000000000001 # not to have +inf in the denominator of the weight matrix + weights=1/weights + sum.weights<-sum(weights) + + for(l in 1:n.layers){ + value1<-grid2[l,nearest.lat.pos[y,1],nearest.lon.pos[x,1]] # grid2 value of its 4 nearest points + value2<-grid2[l,nearest.lat.pos[y,1],nearest.lon.pos[x,2]] # for layer l + value3<-grid2[l,nearest.lat.pos[y,2],nearest.lon.pos[x,1]] + value4<-grid2[l,nearest.lat.pos[y,2],nearest.lon.pos[x,2]] + valueN<-c(value1,value2,value3,value4) + + values.weighted<-valueN * weights + sum.values.weighted<-sum(values.weighted) + pred[l,y,x]<-sum.values.weighted/sum.weights + if(sum.weights==0)pred[l,y,x]=NA # si los 4 puntos estan tan lejanos que no tienen peso; pero creo que con ese metodo no se verifica nunca + } + } + } + + # R bug: values of interp are not true!!!!!!!!!!! + #interp<-list() + #for(l in 1:n.layers) interp[[l]]<-pred[l,,] + #return(interp) + return(pred) +} + + + +# interpolates the observed values in grid2 on grid1 assigning to each point of grid1 the value of the closer point of grid 2 +# lat1.list and lon1.list are vectors of coordinates referring to grid1, while lat2.list and lon2.list to grid2. They must be in degrees. Grid2 format must be: [lat,lon] +# lat2.list and lon2.list can also be in a different order than lat1.list and lon1.list. +# It interpolates at least 2 grids at time (i.e: the monthly grids of a model), so it can use the same distance matrix for all grids (the more computational intensive part of the script) +# and the same weights for each monthly grid; grid2 must have the format: [layer,lat,lon], where stands for day, month, year, season, etc... +# beware that at lon=0 is not working well and it dosn't compute the distance with the great-circle!!! +closer<-function(lat1.list,lon1.list,lat2.list,lon2.list,grid2){ + R<-6371 # earth mean radius (km) + Rx2<-2*R + + n.lat1=length(lat1.list) + n.lon1=length(lon1.list) + n.lat2=length(lat2.list) + n.lon2=length(lon2.list) + n.points1<-as.double(n.lat1*n.lon1) # number of points of grid1 + n.points2<-as.double(n.lat2*n.lon2) + n.points.tot<-n.points1*n.points2 + n.layers<-dim(grid2)[1] + + nearest.lat<-array(NA,c(n.lat1,2)) # latitude of the closer grid point + nearest.lat.pos<-array(NA,c(n.lat1,2)) # position inside grid2 of the closer point in the latitudinal sense + for(l in 1:n.lat1){ + pos.first<-which.min(abs(lat2.list-lat1.list[l])) + lat2.list.temp<-lat2.list + nearest.lat[l,1]=lat2.list[pos.first] + nearest.lat.pos[l,1]=pos.first + } + + # correct longitude values if one of the two grids has positive longitude values only and the other also negative ones: + if(length(which(lon1.list<0))>0 && length(which(lon2.list<0))==0){lon2.list[which(lon2.list>180)]<-lon2.list[which(lon2.list>180)]-360} + if(length(which(lon1.list<0))==0 && length(which(lon2.list<0))<0){lon2.list[which(lon2.list<0)]<-lon2.list[which(lon2.list<0)]+360} + + nearest.lon<-array(NA,c(n.lon1,2)) # longitude of the closer grid point + nearest.lon.pos<-array(NA,c(n.lon1,2)) # position inside grid2 of the closer points in the longitudinal sense + for(l in 1:n.lon1){ + pos.first<-which.min(abs(lon2.list-lon1.list[l])) + lon2.list.temp<-lon2.list + nearest.lon[l,1]=lon2.list[pos.first] + nearest.lon.pos[l,1]=pos.first + } + + + #lat1.list.rad<-deg2rad(lat1.list) + #lon1.list.rad<-deg2rad(lon1.list) + #nearest.lat.rad<-deg2rad(nearest.lat) + #nearest.lon.rad<-deg2rad(nearest.lon) + + pred<-array(NA,c(n.layers,n.lat1,n.lon1)) + for(y in 1:n.lat1){ + for(x in 1:n.lon1){ + #lat<--lat1.list.rad[y] # lat y lon of the grid1 point selected + #lon<--lon1.list.rad[x] + + #latN<-nearest.lat.rad[y,1] # lat y lon of its nearest point + #lonN<-nearest.lon.rad[x,1] + + #distN<-sqrt((sin((lat-latN)/2))^2 + cos(lat)*cos(latN)*(sin((lon-lonN)/2))^2) + #distN<-Rx2 * asin(pmin(distN,1)) # distance in km of the nearest point + + for(l in 1:n.layers){ + pred[l,y,x]<-grid2[l,nearest.lat.pos[y,1],nearest.lon.pos[x,1]] # grid2 value of its nearest point for layer l + } + } + } + + interp<-list() + for(l in 1:n.layers) interp[[l]]<-pred[l,,] + return(interp) + +} + + + +# interpolates the observed values in grid2 on grid1 with the IDW method using the great-circle distances (using Haversine formula) and selecting only the grid points close to km.; +# lat1.list and lon1.list are vectors of coordinates referring to grid1, while lat2.list and lon2.list to grid2. They must be in degrees. Grid2 format must be: [lat,lon] +# lat2.list and lon2.list can also be in a different order than lat1.list and lon1.list. +# it returns NA if for a given point of grid1 there are no points of grid2 close of less than km that can be used to interpolate that point. +# It interpolates at least 2 grids at time (i.e: the monthly grids of a model), so it can use the same distance matrix for all grids (the more computational intensive part of the IDW) +# and the same weights for each monthly grid; grid2 must have the format: [layer,lat,lon], where stands for day, month, year, season, etc... +# This function is fully vectorialized allowing the faster possible interpolation of grids with any number of points. it uses up to ~8 GB of RAM, but it can +# be decreased by setting a lower value of variable max.memory defined below. +multi.idw<-function(lat1.list,lon1.list,lat2.list,lon2.list,grid2,dmax){ + max.memory<-125000000 # maximum number of elements in a matrix (each element is a double and occupies 8 byte; the maximum size of a matrix in R is 2.1 GB, about 250000000 elements of type double) + R<-6371 # earth mean radius (km) + Rx2<-2*R + + n.lat1=length(lat1.list) + n.lon1=length(lon1.list) + n.lat2=length(lat2.list) + n.lon2=length(lon2.list) + n.points1<-as.double(n.lat1*n.lon1) # number of points of grid1 + n.points2<-as.double(n.lat2*n.lon2) + n.points.tot<-n.points1*n.points2 + n.layers<-dim(grid2)[1] + + # longitude conversion: + #if(min(lon1)<0 && min(lon2)>=0) lon1=360+lon1 # convert the negative longitude of the first grid to a positive one, if the second grid has only positive longitudes + #if(min(lon2)<0 && min(lon1)>=0) lon2=360+lon2 # convert the negative longitude of the second grid to a positive one, if the first grid has only positive longitudes + + #lat.grid1<-sort(lat.grid1,decreasing=T) # sort latitudes because they must go from the higher number to the lower one to simulate a spatial grid + #lon1<-sort(lon1) # sort longitudes because they must go from the lower number to the highest one to similuate a spatial grid + + #lat.grid2<-sort(lat.grid2,decreasing=T) # sort latitudes because they must go from the higher number to the lower one to simulate a spatial grid + #lon2<-sort(lon2) # sort longitudes because they must go from the lower number to the highest one to similuate a spatial grid + + n.max1<-floor(max.memory/n.points2) # maximum number of points of grid1 that can be used in a matrix of ~1 GB (1 point = 1 double = 8 bytes) + if(n.points1<=n.max1) { + n.int<-1 + n.points1.int<-n.points1 + n.points1.last.int<-n.points1 + print("grid fits into memory") + } else { # in this case, cut grid1 horizontally in smaller grids to work with matrices of size of ~1 GB + n.int<-floor(n.points1/n.max1)+1 + n.points1.int<-n.max1 + n.points1.last.int<-n.points1 %% n.max1 # number of points of the last interval + print(paste("grid will be split in",n.int,"subgrid")) + } + + lat1<-rep(lat1.list,each=n.lon1) + lon1<-rep(lon1.list,n.lat1) + lat1.rad<-deg2rad(lat1) + lon1.rad<-deg2rad(lon1) + cos.lat1<-cos(lat1.rad) + vert1<-cbind(lat=lat1,lon=lon1,lat.deg=lat1.rad,lon.deg=lon1.rad,cos.lat=cos.lat1) # list of lat and lon of points of grid1 with the radial values and cosinus too + + lat2<-rep(lat2.list,each=n.lon2) + lon2<-rep(lon2.list,n.lat2) + lat2.rad<-deg2rad(lat2) + lon2.rad<-deg2rad(lon2) + cos.lat2<-cos(lat2.rad) + vert2<-cbind(lat=lat2,lon=lon2,lat.deg=lat2.rad,lon.deg=lon2.rad,cos.lat=cos.lat2) # list of lat and lon of points of grid2 with the radial values and cosinus too + + pred<-matrix(NA,nrow=n.points1,ncol=n.layers) # matrix with the interpolated values for each layer + + for(i in 1:n.int){ + imax<-i*n.max1 + if(i==n.int)imax=n.points1 + imin<-1+(i-1)*n.max1 + if(i==n.int && n.int>1)n.points1.int=n.points1.last.int + + lat1.rad.int<-lat1.rad[imin:imax] + lat2.rad.int<-lat2.rad + lon1.rad.int<-lon1.rad[imin:imax] + lon2.rad.int<-lon2.rad + cos.lat1.int<-cos.lat1[imin:imax] + cos.lat2.int<-cos.lat2 + + print("Calculating distance matrix...") + + vert.cos.lat1<-matrix(cos.lat1.int,nrow=n.points1.int,ncol=n.points2) + vert.cos.lat2<-matrix(cos.lat2.int,nrow=n.points1.int,ncol=n.points2,byrow=TRUE) + vert.cos.lat = vert.cos.lat1 * vert.cos.lat2 + rm(vert.cos.lat1,vert.cos.lat2);gc() + + vert.lon1<-matrix(lon1.rad.int,nrow=n.points1.int,ncol=n.points2) + vert.lon2<-matrix(lon2.rad.int,nrow=n.points1.int,ncol=n.points2,byrow=TRUE) + vert.lon <- (vert.lon2 - vert.lon1)/2 + rm(vert.lon1,vert.lon2);gc() + + vert.lon.squared<-vert.lon^2 + rm(vert.lon);gc() + + vert.mult<-vert.cos.lat * vert.lon.squared + rm(vert.cos.lat,vert.lon.squared);gc() + + vert.lat1<-matrix(lat1.rad.int,nrow=n.points1.int,ncol=n.points2) + vert.lat2<-matrix(lat2.rad.int,nrow=n.points1.int,ncol=n.points2,byrow=TRUE) + vert.lat <- (vert.lat2 - vert.lat1)/2 + rm(vert.lat1,vert.lat2);gc() + + vert.lat.squared <- vert.lat^2 + rm(vert.lat);gc() + + vert.latlon <- vert.lat.squared + vert.mult + rm(vert.lat.squared,vert.mult);gc() + + vert.root<-sqrt(vert.latlon) + rm(vert.latlon);gc() + + mm<-which(vert.root > 1) + if(length(mm)>0) vert.root[mm]<-1 + rm(mm);gc() + + vert.dist <- Rx2 * asin(vert.root) # matrix of distances in km + rm(vert.root);gc() + + ss<-which(vert.dist==0) + weights<-vert.dist^2 + weights[ss]<-0.000000000001 # not to have +inf in the denominator of the weight matrix + weights=1/weights + rm(ss);gc() + + nn<-which(vert.dist > dmax) + weights[nn]<-0 #put to 0 the weights of grid points of grid 2 that are not used to compute the idw because they are too far + sum.weights<-rowSums(weights) + zz<-which(sum.weights==0) + rm(vert.dist,nn);gc() + + for(l in 1:n.layers){ + # put the grid2 values in each row of a matrix following the lat/lon list of grid2 points, but horizontally: + horiz2<-as.vector(t(grid2[l,,])) + horiz2.rep<-rep(horiz2,n.points1.int) + + vert2.values.int<-matrix(horiz2.rep,nrow=n.points1.int,ncol=n.points2,byrow=TRUE) + values.weighted<-vert2.values.int * weights + sum.values.weighted<-rowSums(values.weighted) + + pred[imin:imax,l]<-sum.values.weighted/sum.weights + if(length(zz)>0) pred[imin-1+zz]<-NA + rm(values.weighted,sum.values.weighted,vert2.values.int,horiz2,horiz2.rep); #gc() + + print(paste("subgrid:",i,"/",n.int," layer:",l,"/",n.layers)) + } + } + + rm(zz,weights,sum.weights);gc() + + interp<-list() + for(l in 1:n.layers) interp[[l]]<-matrix(pred[,l],nrow=n.lat1,ncol=n.lon1,byrow=TRUE) + + return(interp) +} + +################################################################################ +# Regression functions # +################################################################################ + +# Modified version of the lm.fit() base R function, to increase its speed by removing some unnecessary outputs +# dalla versione 2.15 di R non la puoi piu usare, usa invece lm.fit.fast +# occhio che con lm.fit bisogna passare anche una colonna di uno iniziali per simulare il termine noto!!! +lm.fit.fast.old<-function (x, y, offset = NULL, method = "qr", tol = 1e-07, singular.ok = TRUE, ...) +{ + if (is.null(n <- nrow(x))) + stop("'x' must be a matrix") + if (n == 0L) + stop("0 (non-NA) cases") + p <- ncol(x) + if (p == 0L) { + return(list(coefficients = numeric(0L), residuals = y, + fitted.values = 0 * y, rank = 0, df.residual = length(y))) + } + ny <- NCOL(y) + if (is.matrix(y) && ny == 1) + y <- drop(y) + if (!is.null(offset)) + y <- y - offset + if (NROW(y) != n) + stop("incompatible dimensions") + if (method != "qr") + warning(gettextf("method = '%s' is not supported. Using 'qr'", + method), domain = NA) + if (length(list(...))) + warning("extra arguments ", paste(names(list(...)), sep = ", "), + " are just disregarded.") + storage.mode(x) <- "double" + storage.mode(y) <- "double" + z <- .Fortran("dqrls", qr = x, n = n, p = p, y = y, ny = ny, + tol = as.double(tol), coefficients = mat.or.vec(p, ny), + residuals = y, effects = y, rank = integer(1L), pivot = 1L:p, + qraux = double(p), work = double(2 * p), PACKAGE = "base") + if (!singular.ok && z$rank < p) + stop("singular fit encountered") + coef <- z$coefficients + pivot <- z$pivot + r1 <- seq_len(z$rank) + dn <- colnames(x) + if (is.null(dn)) + dn <- paste("x", 1L:p, sep = "") + r2 <- if (z$rank < p) + (z$rank + 1L):p + else integer(0L) + if (is.matrix(y)) { + coef[r2, ] <- NA + coef[pivot, ] <- coef + dimnames(coef) <- list(dn, colnames(y)) + } + else { + coef[r2] <- NA + coef[pivot] <- coef + names(coef) <- dn + } + z$coefficients <- coef + r1 <- y - z$residuals + if (!is.null(offset)) + r1 <- r1 + offset + c(z[c("coefficients", "residuals", "rank")], list(fitted.values = r1, df.residual = n - z$rank)) +} + +# Modified version of the lm.fit() base R function, to increase its speed by removing some unnecessary outputs (such as the QR decomposition) +# remember that the first column of the x matrix must be a column of 1, to represent the constant term! +# Dopo la versione 2.15 di R le funzioni .Fortran() sono proibite, bisogna sostituirle con .Call, +# percio' invece di lm.fit.fast.old bisogna chiamare lm.fit.fast: +lm.fit.fast<-function (x, y, offset = NULL, method = "qr", tol = 1e-07, singular.ok = TRUE, ...) +{ + if (is.null(n <- nrow(x))) + stop("'x' must be a matrix") + if (n == 0L) + stop("0 (non-NA) cases") + p <- ncol(x) + if (p == 0L) { + return(list(coefficients = numeric(0L), residuals = y, + fitted.values = 0 * y, rank = 0, df.residual = length(y))) + } + ny <- NCOL(y) + if (is.matrix(y) && ny == 1) + y <- drop(y) + if (!is.null(offset)) + y <- y - offset + if (NROW(y) != n) + stop("incompatible dimensions") + if (method != "qr") + warning(gettextf("method = '%s' is not supported. Using 'qr'", + method), domain = NA) + if (length(list(...))) + warning("extra arguments ", paste(names(list(...)), sep = ", "), + " are just disregarded.") + storage.mode(x) <- "double" + storage.mode(y) <- "double" + + z <- .Call(stats:::C_Cdqrls, x, y, tol, TRUE) + + if (!singular.ok && z$rank < p) + stop("singular fit encountered") + coef <- z$coefficients + pivot <- z$pivot + r1 <- seq_len(z$rank) + dn <- colnames(x) + if (is.null(dn)) + dn <- paste("x", 1L:p, sep = "") + r2 <- if (z$rank < p) + (z$rank + 1L):p + else integer(0L) + if (is.matrix(y)) { + coef[r2, ] <- NA + coef[pivot, ] <- coef + dimnames(coef) <- list(dn, colnames(y)) + } + else { + coef[r2] <- NA + coef[pivot] <- coef + names(coef) <- dn + } + z$coefficients <- coef + r1 <- y - z$residuals + if (!is.null(offset)) + r1 <- r1 + offset + c(z[c("coefficients", "residuals", "rank")], list(fitted.values = r1, df.residual = n - z$rank)) +} + +# Function to plot the bar chart of the anomalies of a time series of frequencies (by default in % but can be changed with the freq.max option), +# using red colors for anomalies above the climatological value of the frequencies. It also deals with eventual NA in the time series (doesn't plot the correspondent bar) +# and blue color for anomalies below the climatological value (the frequency mean of the variable over the study period). +# it also adds the linear fit if it is found to be significant (with the test of Mann-Kendall). +barplot.freq <- function(time.serie, year.start, year.end, p.value = 0.05, freq.max = 0.8, title=NULL, cex.y = 1, cex.x = 1, ylab="%", mgp=c(1,1,0), ...) +{ + x <- time.serie + n.years <- length(x) + years.serie <- year.start:year.end + # m is the value used to separate positive anomalies from negative anomalies, i.e: the climatology over the whole period + m <- mean(x, na.rm=TRUE) + bar3 <- pmax(x - m, 0) + bar2 <- pmax(m - x, 0 ) + bar1 <- m - bar2 + bar1[is.na(bar1)] <- m # in case there are NA + bar2[is.na(bar2)] <- 0 # in case there are NA + bar3[is.na(bar3)] <- 0 # in case there are NA + + bar.matrix <- matrix(c(bar1,bar2,bar3), 3, n.years, byrow=T) + barplot(bar.matrix, col = c("white","blue","red"), border = NA, names.arg = years.serie, ylim = c(0,freq.max), axis.lty = 1, space = 0.2, main = title, cex.axis=cex.y, cex.names=cex.x, xlab="", ylab=ylab, mgp=mgp) + + abline(0,0, col="black") # add a black x-axis line + + # add mean frequency of the simulated ensemble mean time series: + # text(length(years.serie)/3, freq.max*(29/30),labels=bquote(bar(nu) == .(paste0(round(mean(x),1),"%"))),cex=3) + + #z <- lm(x ~ years) + kendall_pvalue <- MannKendall(x)$sl[1] + + # add a line with the linear trend only if it significant: + #if(summary(z)$coefficients[2,4] < p.value) abline(lsfit(1:n.years,x), lty=1, col="gray10") + if(kendall_pvalue < p.value) abline(lsfit(1:n.years,x), lty=1, col="gray10") +} + +## Alternative but without bars: +## library(lattice) # for xyplot +## library(grid) # for adding a different color for negative values in xyplot +## x<-1:100 +## y<-4+rnorm(100) +## x <- zoo(4+rnorm(100)) +## xyplot(x, grid=TRUE, panel = function(x, y, ...){ +## panel.xyplot(x, y, col="blue", ...) +## #panel.abline(h=0, col="black") +## panel.abline(h=4, col="gray") +## grid.clip(y=unit(4,"native"),just=c("bottom")) +## panel.xyplot(x, y, col="red", ...) +## }) + +# as barplot.freq, but for forecasts: +barplot.freq.sim <- function(time.serie, time.serie.max, time.serie.min, time.serie.obs, year.start, year.end, p.value = 0.05, freq.max = 0.8, title=NULL, cex.y = 1, cex.x = 1, ylab="%", mgp=c(1,1,0), col.bar = c("white","gray70"), col.line="gray50", cex.mean = 1, cex.obs = 1, cex.r = 1.5, ...) +{ + x <- time.serie + y <- time.serie.max + z <- time.serie.min + o <- time.serie.obs + + #n.years <- length(x) + years.serie <- year.start:year.end + # m is the value used to separate positive anomalies from negative anomalies, i.e: the climatology over the whole period + m <- mean(x, na.rm=TRUE) + + bar1 <- z + bar2 <- y + bar1[is.na(bar1)] <- m # in case there are NA + bar2[is.na(bar2)] <- 0 # in case there are NA + + bar.matrix <- matrix(c(bar1,bar2), 2, length(years.serie), byrow=T) + my.bar <- barplot(bar.matrix, col = col.bar, border = NA, names.arg = years.serie, ylim = c(0,freq.max), axis.lty = 1, space = 0.2, main = title, cex.axis=cex.y, cex.names=cex.x, xlab="", ylab=ylab, mgp=mgp) + + abline(m,0, col=col.line) # add a black x-axis line + + # add points with the ensemble mean: + col.sim <- rep("red",year.end-year.start+1) + col.sim[which(x < m)] <- "blue" + points(my.bar, x, type="p", pch=20, xlab="", ylab="", col=col.sim, cex=cex.mean) + + # add crosses with observed values: + col.obs <- rep("red",year.end-year.start+1) + col.obs[which(o < m)] <- "blue" + points(my.bar, o, type="p", pch=4, xlab="", ylab="", col=col.obs, cex=cex.obs, lwd=5) + + # add corr between obs.time series and ensemble mean time series: + corr <- round(cor(x,o, use="complete.obs"),2) + text(length(years.serie), freq.max*(19/20), labels=paste0("r= ",corr), cex=cex.r) + #text(c(length(years.serie-5),length(years.serie)),rep(freq.max*(9/10),2), labels=c(round(o,2),corr)) + + #z <- lm(x ~ years) + kendall_pvalue <- MannKendall(x)$sl[1] + + # add a line with the linear trend only if it significant: + #if(summary(z)$coefficients[2,4] < p.value) abline(lsfit(1:n.years,x), lty=1, col="gray10") + if(kendall_pvalue < p.value) abline(lsfit(1:n.years,x), lty=1, col="gray10") +} + +# as barplot.freq, but for forecasts: +barplot.freq.sim2 <- function(time.serie, time.serie.max, time.serie.min, time.serie.obs, year.start, year.end, p.value = 0.05, freq.min=0, freq.max = 0.8, title=NULL, cex.y = 1, cex.x = 1, ylab="%", mgp=c(1,1,0), col.bar = c("white","gray85","#4393c3","#d6604d","gray80"), col.line="gray50", cex.mean = 1, cex.obs = 1, cex.r = 1.5, ...) +{ + x <- time.serie + y <- time.serie.max + z <- time.serie.min + obs <- time.serie.obs + + #n.years <- length(x) + years.serie <- year.start:year.end + # m is the value used to separate positive anomalies from negative anomalies, i.e: the climatology over the whole period + m <- mean(x, na.rm=TRUE) + + bar1 <- z # white bar + bar2 <- pmin(x, m) - bar1 # gray bar + bar3 <- pmax(m - x, 0) # blue bar + bar4 <- pmax(x - m, 0) # red bar + bar5 <- y - (bar1+bar2+bar3+bar4) # gray bar + + bar1[is.na(bar1)] <- m # in case there are NA + bar2[is.na(bar2)] <- 0 # in case there are NA + bar3[is.na(bar3)] <- 0 # in case there are NA + bar4[is.na(bar4)] <- 0 # in case there are NA + bar5[is.na(bar5)] <- 0 # in case there are NA + + #bar3 <- pmax(x - m, 0) + #bar2 <- pmax(m - x, 0 ) + #bar1 <- m - bar2 + + bar.matrix <- matrix(c(bar1,bar2,bar3,bar4,bar5), 5, length(years.serie), byrow=T) + my.bar <- barplot(bar.matrix, col = col.bar, border = NA, names.arg = years.serie, ylim = c(freq.min,freq.max), axis.lty = 1, space = 0.2, main = title, cex.axis=cex.y, cex.names=cex.x, xlab="", ylab=ylab, mgp=mgp) + + abline(m,0, col=col.line) # add a black x-axis line + + # add points with the ensemble mean: + #col.sim <- rep("red",year.end-year.start+1) + #col.sim[which(x < m)] <- "blue" + #points(my.bar, x, type="p", pch=20, xlab="", ylab="", col=col.sim, cex=cex.mean) + + # add crosses with observed values: + col.obs <- rep("#67001f",year.end-year.start+1) + col.obs[which(obs < m)] <- "#053061" + points(my.bar, obs, type="p", pch=4, xlab="", ylab="", col=col.obs, cex=cex.obs, lwd=5) + + # add corr between obs.time series and ensemble mean time series: + #corr <- round(cor(x,obs, use="complete.obs"),2) + #text(length(years.serie), freq.max*(29/30), labels=paste0("r= ",corr), cex=cex.r) + + # add mean frequency of the simulated ensemble mean time series: + #text(length(years.serie)/3, freq.max*(29/30),labels=bquote(bar(nu) == .(paste0(round(mean(x),1),"%"))),cex=cex.r) + + #z <- lm(x ~ years) + kendall_pvalue <- MannKendall(x)$sl[1] + + # add a line with the linear trend only if it significant: + #if(summary(z)$coefficients[2,4] < p.value) abline(lsfit(1:n.years,x), lty=1, col="gray10") + if(kendall_pvalue < p.value) abline(lsfit(1:n.years,x), lty=1, col="gray10") +} + + +# As ColorBar, but the user MUST supply its own ticks and tick labels: +# (it is used to have a better control over the values shown) +# the option draw.ticks is used to remove the ticks lines +# the option label.dist is used to change the distance from the legend to the labels: +# Example: +# +# ColorBar2(brks=my.brks, cols=my.cols, vert=FALSE, cex=legend1.cex, label.dist=2, my.ticks=-0.5 + 1:length(my.brks), my.labels=my.brks) +# + +ColorBar2 <- function (brks, cols = NULL, vert = TRUE, cex = 1, draw.ticks = TRUE, label.dist = 1, my.ticks, my.labels) +{ + if (is.null(cols) == TRUE) { + nlev <- length(brks) - 1 + cols <- rainbow(nlev) + } + else { + if (length(cols) != (length(brks) - 1)) { + stop("Inconsistent colour levels / list of colours") + } + } + if (vert) { + par(mar = c(1, 1, 1, 1.5 * (1 + cex)), mgp = c(1, 1, + 0), las = 1, cex = 1.2) + image(1, c(1:length(cols)), t(c(1:length(cols))), axes = FALSE, + col = cols, xlab = "", ylab = "") + box() + axis(4, at = my.ticks, tick = draw.ticks, labels = my.labels, cex.axis = cex, mgp=c(3,label.dist,0)) + } + else { + par(mar = c(0.5 + cex, 1, 1, 1), mgp = c(1.5, max(c(0.3, + 0.8 * (cex - 0.625))), 0), las = 1, cex = 1.2) + image(1:length(cols), 1, t(t(1:length(cols))), axes = FALSE, + col = cols, xlab = "", ylab = "") + box() + axis(1, at = my.ticks, tick = draw.ticks, labels = my.labels, cex.axis = cex, mgp=c(3,label.dist,0)) + } +} + +# ColorBar3: like ColorBar, but the user can specify a subset of the predefined labels to be shown in the legend (more powerful than the resample option) +ColorBar3 <- function (brks, cols = NULL, vert = TRUE, cex = 1, subset = 1:length(brks)) +{ + if (is.null(cols) == TRUE) { + nlev <- length(brks) - 1 + cols <- rainbow(nlev) + } + else { + if (length(cols) != (length(brks) - 1)) { + stop("Inconsistent colour levels / list of colours") + } + } + if (vert) { + par(mar = c(1, 1, 1, 1.5 * (1 + cex)), mgp = c(1, 1, + 0), las = 1, cex = 1.2) + image(1, c(1:length(cols)), t(c(1:length(cols))), axes = FALSE, + col = cols, xlab = "", ylab = "") + box() + axis(4, at = seq(0.5, length(brks) - 0.5, 1)[subset], + tick = TRUE, labels = brks[seq(1, length(brks), 1)][subset], + cex.axis = cex) + } + else { + par(mar = c(0.5 + cex, 1, 1, 1), mgp = c(1.5, max(c(0.3, + 0.8 * (cex - 0.625))), 0), las = 1, cex = 1.2) + image(1:length(cols), 1, t(t(1:length(cols))), axes = FALSE, + col = cols, xlab = "", ylab = "") + box() + axis(1, at = seq(0.5, length(brks) - 0.5, 1)[subset], + labels = brks[seq(1, length(brks), 1)][subset], + cex.axis = cex) + } +} + + +# Use the function below by handing it a matrix of numbers. It will plot the matrix with a color scale based on the highest and lowest values in the matrix. +# usage: myImagePlot(m) where m is a matrix of numbers +# optional arguments: myImagePlot(m, xlabels, ylabels, zlim, title=c("my title")) +# xLabels and yLabels are vectors of strings to label the rows and columns. +# zlim is a vector containing a low and high value to use for the color scale + +myImagePlot <- function(x, ...){ + min <- min(x) + max <- max(x) + yLabels <- rownames(x) + xLabels <- colnames(x) + title <-c() + # check for additional function arguments + if( length(list(...)) ){ + Lst <- list(...) + if( !is.null(Lst$zlim) ){ + min <- Lst$zlim[1] + max <- Lst$zlim[2] + } + if( !is.null(Lst$yLabels) ){ + yLabels <- c(Lst$yLabels) + } + if( !is.null(Lst$xLabels) ){ + xLabels <- c(Lst$xLabels) + } + if( !is.null(Lst$title) ){ + title <- Lst$title + } + } +# check for null values +if( is.null(xLabels) ){ + xLabels <- c(1:ncol(x)) +} +if( is.null(yLabels) ){ + yLabels <- c(1:nrow(x)) +} + +layout(matrix(data=c(1,2), nrow=1, ncol=2), widths=c(4,1), heights=c(1,1)) + + # Red and green range from 0 to 1 while Blue ranges from 1 to 0 + ColorRamp <- rgb( seq(0,1,length=256), # Red + seq(0,1,length=256), # Green + seq(1,0,length=256)) # Blue + ColorLevels <- seq(min, max, length=length(ColorRamp)) + + # Reverse Y axis + reverse <- nrow(x) : 1 + yLabels <- yLabels[reverse] + x <- x[reverse,] + + # Data Map + par(mar = c(3,5,2.5,2)) + image(1:length(xLabels), 1:length(yLabels), t(x), col=ColorRamp, xlab="", + ylab="", axes=FALSE, zlim=c(min,max)) + if( !is.null(title) ){ + title(main=title) + } +axis(BELOW<-1, at=1:length(xLabels), labels=xLabels, cex.axis=0.7) + axis(LEFT <-2, at=1:length(yLabels), labels=yLabels, las= HORIZONTAL<-1, + cex.axis=0.7) + + # Color Scale + par(mar = c(3,2.5,2.5,2)) + image(1, ColorLevels, + matrix(data=ColorLevels, ncol=length(ColorLevels),nrow=1), + col=ColorRamp, + xlab="",ylab="", + xaxt="n") + + layout(1) +} + +################################################################################ +# Big data functions # +################################################################################ + +################################# split.array ################################# + +# function to split an array in smaller arrays, selecting one dimension of the array as the one used to split the array. +# The function only returns the intervals of each subarray, not the values of the subarrays. +# the subarrays are then used inside a for loop in the main script do do all the required analysis instead of applying them to the whole array. +# +# The array can be for instance an hindcast array with dimensions: c(n.members, n.yrs.hind*n.startdates, n.leadtimes, n.lat, n.lon) +# +# Example: +# +# hindcasts <- array(rnorm(4*100*4*256*512), c(4,100,4,256,512)) +# block <- split.array.old(hindcasts, 5) # split on longitude +# +# for(subArray in 1:block$n.sub){ # loop on each smaller subarray +# subLon=block$my.interv[[subArray]] # longitude interval corresponding to the subarray data +# nlon<-length(subLon) # length of the actual subarray +# +# hindcast.sub <- hindcasts[,,,,subLon] # subarray data +# # now insert below any calculation you need to do on the hindcast.sub array +# +# } # close for on subArray + +split.array.old <- function (array, along=tail(dim(array),1), max.n.el=10000000){ + array.dims <- dim(array) # i.e: [4,100,4,320,640] + n.sub <- prod(array.dims)/max.n.el # number of sub-arrays in which to split the hindcast and rean.data, i.e: 4*100*4*320*640/10000000 = 32.768 + n.sub <- ceiling(n.sub) # round n.sub to the nearest higher integer, i.e: 33 + n.split <- array.dims[along] # number of elements along the splitting dimension, i.e: 640 if along=5 + sub.size <- floor(n.split/n.sub) # number of elements in the splitted dimension (i.e: lon) of each subarray rounded to the lower integer, i.e: 640/33=19.39=19 + if(sub.size<=1) stop("Subarrays too small. Increase ten times the value of max.n.el") + n.sub <- n.split %/% sub.size # take only the integer part of the ratio. It is equal to floor(n.split/sub.size). I.e: 640/19=33.68=33 + add.last <- n.split %% n.sub # number of additional elements of the last subarray (if %% >0), i.e: 640 %% 33 = 13 + + my.interv<-list() + for(s in 1:n.sub){ + if(s==n.sub) {mod.last <- add.last} else {mod.last <- 0} + my.interv[[s]] <- (1 + sub.size*(s-1)):((sub.size*s) + mod.last) + } + + return(list(array.dims=array.dims, along=along, n.split=n.split, n.sub=n.sub, sub.size=sub.size, add.last=add.last, sub.size.last=sub.size + mod.last, my.interv=my.interv)) +} + +# Function to split an array into smaller arrays (chunks) +# +# in the argument 'dimension', you specify the dimensions of the array to split [i.e: c(4,100,4,256,512) for an hindcast array] +# in the argument 'along', you specify which dimension you want to use to split the array. Be default, it is the last dimension of the array. +# in the argument 'max.n.el', you set the size of a chunk. By default, split.array creates array of exactly 10'000'000 of elements. For array of type numeric (double), +# it is equivalent to 80 MB, because each double occupies 8 byte. +# in the argument 'chunks', you specify how many chunks do you want to be created exactly. It overrides the option 'max.n.el'. Useful if you already knows +# how many chunks do you need (more or less) and want to run one chunk in a different core at the same time, so you can't exceed the maximum number of cores available on your machine +# in the argument, 'smallest', you specify if you want to run the script with the maximum possible number of chunks (smallest=TRUE), +# so that the number of chunks is equal to the number of possible values of the along dimensions and the size of each chunk is exactly 1. + +split.array <- function (dimensions, along=tail(dimensions,1), max.n.el=10000000, chunks=NULL, smallest=FALSE){ + if(along <= 0 || along > length(dimensions)) stop("Choose a number for the 'along' argument inside the number of available dimensions!") + + array.dims <- dimensions # i.e: [4,100,4,320,640] + n.split <- array.dims[along] # number of elements along the splitting dimension, i.e: 640 if along=5 + + n.chunk <- prod(array.dims)/max.n.el # first estimate of the number of chunks (sub-arrays) into which the array will be split, i.e: 4*100*4*320*640/10000000 = 32.768 + n.chunk <- ceiling(n.chunk) # round n.chunk to the nearest higher integer, i.e: 33 + chunk.size <- floor(n.split/n.chunk) # number of elements in the splitted dimension (i.e: lon) of each chunk rounded to the lower integer, i.e: 640/33=19.39=19 + if(chunk.size <= 1) stop("Chunks too small. Try increasing 10 times the value of max.n.el") + + n.chunk <- n.split %/% chunk.size # take only the integer part of the ratio. It is equal to floor(n.split/chunk.size). I.e: 640/19=33.68=33 + if(!is.null(chunks)) {n.chunks <- chunks; chunk.size <- floor(n.split/n.chunk)} + + add.last <- n.split %% n.chunk # number of additional elements of the last chunk (if %% >0), i.e: 640 %% 33 = 13 + + if(smallest == TRUE){ # in this case, we want each chunk to be exactly 1 row or column wide: + n.chunk <- array.dims[along] + n.split <- n.chunk + chunk.size <- 1 + add.last <- 0 + } + + # list with the position of each chunk c inside the chosen dimension of the array: + int<-list() + for(c in 1:n.chunk){ + if(c == n.chunk) {mod.last <- add.last} else {mod.last <- 0} + int[[c]] <- (1 + chunk.size*(c-1)):((chunk.size*c) + mod.last) + } + + chunk.size.last <- chunk.size + mod.last # number of elements of the last chunk + + # list with the number of elements of each chunk c: + n.int<-list() + for(c in 1:n.chunk) n.int[[c]] <- length(int[[c]]) + + return(list(n.int=n.int, int=int, array.dims=array.dims, along=along, max.n.el=max.n.el, n.split=n.split, + n.chunk=n.chunk, chunk.size=chunk.size, add.last=add.last, chunk.size.last=chunk.size.last)) +} + + +# Example: +# +# hindcasts <- array(rnorm(4*100*4*256*512), c(4,100,4,256,512)) +# +# chunk <- split.array(dim(hindcasts), 5) # split on longitude +# +# for(subArray in 1:chunk$n.sub){ # loop on each smaller subarray +# subLon=chunk$my.interv[[subArray]] # longitude interval corresponding to the subarray data +# nlon<-length(subLon) # length of the actual subarray +# +# hindcast.sub <- hindcasts[,,,,subLon] # subarray data +# # now insert below any calculation you need to do on the hindcast.sub array +# +# } # close for on subArray + + +#################### veriApplyBig ############################################ + + +# A wrapper of veriApply() to be able to work even with hindcasts arrays that exceed the memory limits of the workstation +# and to efficently use the option 'parallel=TRUE' also for large data arrays with no memory limits. +# (only 2-5 GB of RAM are necessary, depending on the number of cpus used) +# +# It splits the input hindcast data and the input observed data in smaller arrays based on the longitude value, +# and then applies veriApply() to each sub-array, assembling the results in an array with the same format of the array returned by veriApply() +# A progress bar shows how many sub-arrays have already been processed. +# +# To take advantage of this function, the option parallel=TRUE is enabled by default, +# and the option 'ncpus' is set to 8, to work with BSC workstations with 4 physical cores (8 with hyperthreading). +# Users in possess of IC3 Intel Xeon workstations with 8 physical cores should set ncpus = 16 to take advantage of hyperthreading. +# With ncpus=8, calculations are 4 times faster, while with ncpus=16, calculations are 8 times faster. +# +# On Moore, you can set ncpus=8, and on Amdahl, ncpus can be set to a maximum of 12; however, it is not raccomendedd to run this function at full power on Moore or Amdahl , +# since it'd consume all the cores avaiable (8 for Moore and 12 for Amhdal), forbidding other users to employ the cluster for their single-core calculations. +# You can run it during the week-end if there are no other jobs scheduled, or set a lower number of ncpus (4-5) to leave resources for other users, even if performance'll suffer +# +# Parallel computation employs all CPU recurses of the machine: it will go slower until the computation is finished +# If you want to use the machine also for other tasks, you can set a lower number of ncpus (2 or 3), with a loss of performance. +# +# If you get a memory problem error, try decreasing the size of the variable 'max.n.el'. +# +# Example of use: +# +# library(ff) +# bigfile <- "/scratch/Earth/ncortesi/bigfile" # choose a site where to store the big data array +# source('/scratch/Earth/ncortesi/RESILIENCE/veriApplyBig.R') # load the veryApplyBig() and the save.big(9 functions +# +# # create a random hindcast that normally wouldn't fit into memory: +# anom.hind.dim<-c(51,30,1,256,512) +# anom.hind<-array(rnorm(prod(anom.hind.dim)),anom.hind.dim) +# +# save.big(array=anom.hind, path=bigfile) +# +# # create random observed hindcast that fit into memory (because observations only have 1 member): +# anom.rean<-array(rnorm(prod(anom.hind.dim[-1])), anom.hind.dim[-1]) +# +# ffload(file=bigfile) +# # str(ff.array) +# open(ff.array) +# +# my.score <- veriApplyBig("FairRpss",fcst=ff.array, obs=anom.rean, tdim=2, ensdim=1 , prob=c(1/3, 1/3)) # by default parallel=TRUE and with ncpus=8 to run it on our pc +# +# close(ff.array) + +veriApplyBig <- function (verifun, fcst, obs, fcst.ref = NULL, tdim = length(dim(fcst)) - + 1, ensdim = length(dim(fcst)), prob = NULL, threshold = NULL, + na.rm = FALSE, parallel = TRUE, ncpus = 8, max.n.el=5000000, path=NULL, ...) +{ + + sub <- prod(dim(fcst))/max.n.el + sub <- ceiling(sub) # number of sub-arrays in which to split the hindcast and rean.data + n.lon <- tail(dim(fcst),1) # number of longitude elements + sub.size <- floor(n.lon/sub) # number of elements in the last dimension (lon) of each subarray + if(sub.size<=1) stop("Subarrays too small. Increase ten times the value of max.n.el") + last.sub.size<-n.lon %% sub # number of additional elements of the last subarray + if(last.sub.size>0) sub<-sub+1 + + my.SkillScore<-array(NA,tail(dim(fcst),3)) # take only the leadtime, lat and lon dimensions + + cat('Subarray n. ') + + for(s in 1:sub){ + cat(paste0(s,'/',sub,' ')) + + if(s==sub && last.sub.size>0) {last<-sub.size-last.sub.size} else {last<-0} # because the last subarray is shorter than the others, if last.sub.size>0 + my.interv <- (1+sub.size*(s-1)):((sub.size*s)-last) # longitude interval where to load data + + #anom.hindcast.sub <- array(NA, c(head(dim(fcst),4), sub.size-last)) + #anom.rean.sub <- array(NA, c(dim(fcst)[2:4], sub.size-last)) + + anom.hindcast.sub <- fcst[,,,,my.interv] + anom.rean.sub <- obs[,,,my.interv] + + my.SkillScore.sub <- veriApply(verifun, fcst=anom.hindcast.sub, obs=anom.rean.sub, tdim=tdim, ensdim=ensdim, prob=prob, threshold=threshold, na.rm=na.rm, parallel=parallel, ncpus=ncpus)[[1]] + + my.SkillScore[,,my.interv]<-my.SkillScore.sub + + gc() + } + + cat('\n') + return(my.SkillScore) + +} + + + +old_veriApplyBig <- function (verifun, fcst, obs, fcst.ref = NULL, tdim = length(dim(fcst)) - + 1, ensdim = length(dim(fcst)), prob = NULL, threshold = NULL, + na.rm = FALSE, parallel = TRUE, ncpus = 8, max.n.el=10000000, path=NULL, ...) +{ + + ffload(file=fcst) + #str(ff.array) + open(ff.array) + + sub <- prod(dim(ff.array))/max.n.el + sub <- ceiling(sub) # number of sub-arrays in which to split the hindcast and rean.data + n.lon <- tail(dim(ff.array),1) # number of longitude elements + sub.size <- floor(n.lon/sub) # number of elements in the last dimension (lon) of each subarray + if(sub.size<=1) stop("Subarrays too small. Increase ten times the value of max.n.el") + last.sub.size<-n.lon %% sub # number of additional elements of the last subarray + if(last.sub.size>0) sub<-sub+1 + + my.SkillScore<-array(NA,tail(dim(ff.array),3)) # take only the leadtime, lat and lon dimensions + + cat('Subarray n. ') + + for(s in 1:sub){ + cat(paste0(s,'/',sub,' ')) + + if(s==sub && last.sub.size>0) {last<-sub.size-last.sub.size} else {last<-0} # because the last subarray is shorter than the others, if last.sub.size>0 + my.interv <- (1+sub.size*(s-1)):((sub.size*s)-last) # longitude interval where to load data + + #anom.hindcast.sub <- array(NA, c(head(dim(ff.array),4), sub.size-last)) + #anom.rean.sub <- array(NA, c(dim(ff.array)[2:4], sub.size-last)) + + anom.hindcast.sub <- ff.array[,,,,my.interv] + anom.rean.sub <- obs[,,,my.interv] + + my.SkillScore.sub <- veriApply(verifun, fcst=anom.hindcast.sub, obs=anom.rean.sub, tdim=tdim, ensdim=ensdim, prob=prob, threshold=threshold, na.rm=na.rm, parallel=parallel, ncpus=ncpus)[[1]] + + my.SkillScore[,,my.interv]<-my.SkillScore.sub + + gc() + } + + close(ff.array) + + cat('\n') + return(my.SkillScore) + +} + + +################################################################################ +# save.big # + + +# A wrapper of ffsave to save on disk big arrays of a numeric (double) variable +# in format .ffData (by means of the ff package). See veriApplyBig() for an example. + +save.big <- function(array, path) { + ff.array <- as.ff(array, vmode="double", file = path) + ffsave(ff.array, file= path) + close(ff.array); rm(ff.array) +} + + + +################################################################################ +# veriApplyPar # + + +# A wrapper of veriApply() to efficently use the option 'parallel=TRUE' also for large data arrays with no memory limits, +# but the input hindcast array must fit into the memory to use this function. +# +# the function splits the input hindcast data and the input observed data in smaller arrays based on the longitude value, +# and then applies veriApply() to each sub-array, assembling the results in an array with the same format of the array returned by veriApply() +# A progress bar shows how many sub-arrays have already been processed. +# +# To take advantage of this function, the option parallel=TRUE is enabled by default, +# and the option 'ncpus' is set to 8, to work with BSC workstations with 4 physical cores (8 with hyperthreading). +# Users in possess of IC3 Intel Xeon workstations with 8 physical cores should set ncpus = 16 to take advantage of hyperthreading. +# With ncpus=8, calculations are 4 times faster, while with ncpus=16, calculations are 8 times faster. +# +# On Moore, you can set ncpus=8, and on Amdahl, ncpus can be set to a maximum of 12; however, it is not raccomendedd to run this function at full power on Moore or Amdahl , +# since it'd consume all the cores avaiable (8 for Moore and 12 for Amhdal), forbidding other users to employ the cluster for their single-core calculations. +# You can run it during the week-end if there are no other jobs scheduled, or set a lower number of ncpus (4-5) to leave resources for other users, even if performance'll suffer +# +# Parallel computation employs all CPU recurses of the machine: it is not possible to use it for other tasks until the computation is finished. +# If you want to use the machine also for other tasks, you can set a lower number of ncpus (2 or 3), with a loss of performance. +# +# If you get a memory problem error, try decreasing the size of the variable 'max.n.el'. +# +# example of use: +# +# my.score <- veriApplyBig("FairRpss",fcst=anom.hindcast, obs=anom.rean, tdim=2, ensdim=1 , prob=c(1/3, 1/3)) # by default parallel=TRUE and with ncpus=8 to run it on our pc +# +# my.score <- veriApplyBig("FairCrpss",fcst=anom.hindcast, obs=anom.rean, tdim=2, ensdim=1 , ncpus=2) # to set a lower number of cpus +# + +veriApplyPar <- function (verifun, fcst, obs, fcst.ref = NULL, tdim = length(dim(fcst)) - + 1, ensdim = length(dim(fcst)), prob = NULL, threshold = NULL, + na.rm = FALSE, parallel = TRUE, ncpus = 8, max.n.el=10000000, ...) +{ + sub <- prod(dim(fcst))/max.n.el + sub <- ceiling(sub) # number of sub-arrays in which to split the hindcast and rean.data + n.lon <- tail(dim(fcst),1) # number of longitude elements + sub.size <- floor(n.lon/sub) # number of elements in the last dimension (lon) of each subarray + if(sub.size<=1) stop("Subarrays too small. Increase ten times the value of max.n.el") + last.sub.size<-n.lon %% sub # number of additional elements of the last subarray + if(last.sub.size>0) sub<-sub+1 + + my.SkillScore<-array(NA,tail(dim(fcst),3)) # take only the leadtime, lat and lon dimensions + + cat('Subarray n. ') + + for(s in 1:sub){ + cat(paste0(s,'/',sub,' ')) + + if(s==sub && last.sub.size>0) {last<-sub.size-last.sub.size} else {last<-0} # because the last subarray is shorter than the others, if last.sub.size>0 + my.interv <- (1+sub.size*(s-1)):((sub.size*s)-last) # longitude interval where to load data + + anom.hindcast.sub <- array(NA, c(head(dim(fcst),4), sub.size-last)) + anom.rean.sub <- array(NA, c(dim(fcst)[2:4], sub.size-last)) + + anom.hindcast.sub <- fcst[,,,,my.interv] + anom.rean.sub <- obs[,,,my.interv] + + my.SkillScore.sub <- veriApply(verifun, fcst=anom.hindcast.sub, obs=anom.rean.sub, tdim=tdim, ensdim=ensdim, prob=prob, threshold=threshold, na.rm=na.rm, parallel=parallel, ncpus=ncpus)[[1]] + + my.SkillScore[,,my.interv]<-my.SkillScore.sub + + gc() + } + + cat('\n') + return(my.SkillScore) + +} + + +####################################################################################### +# parApplyCal # + + +# like parApply, but with a check to authorize the parallel computation or not. +# As parApply, it must be applied only to small arrays to be able to fit in memory. +# If your array is too big, consider the possibility to split it in smaller arrays with +# the function split.array. + +parApplyCal <- function(cl = NULL, X, MARGIN, FUN, ncpus=4, ... ) { + + .cl <- try(parallel::makeCluster(ncpus, type = "FORK"), silent = TRUE) + + if (!"try-error" %in% class(.cl)) hasparallel <- TRUE + + if (hasparallel) { + on.exit(parallel::stopCluster(.cl)) + + output <- parallel::parApply(cl = .cl, X = X, MARGIN = MARGIN, FUN = FUN, nmemb=nmemb, nsdates=nsdates) + + } else { + output <- apply(X = X, MARGIN = MARGIN, FUN = FUN, nmemb=nmemb, nsdates=nsdates) + } + + return(output) + +} + +####################################################################################### +# old_parApplyBig # + + +# parallel::parApply() needs too much memory when the input array is too big (>400-500 MB on 8GB machines) + +# splitdir is the dimension that will be split internally; it must be one of the dimensions used also by MARGIN 8see examples below) + +old_parApplyBig <- function(cl = NULL, X, MARGIN, FUN, splitdim = tail(dim(X),1), max.n.el=10000000, ... ) { + + sub <- prod(dim(X))/max.n.el + sub <- ceiling(sub) # number of sub-arrays in which to split the hindcast and rean.data + n.el <- dim(X)[splitdim] # number of elements in the splitdir dimension + sub.size <- floor(n.el/sub) # number of elements in the splitting dimension of each subarray + if(sub.size<=1) stop("Subarrays too small. Increase ten times the value of max.n.el") + last.sub.size<-n.el %% sub # number of additional elements of the last subarray + if(last.sub.size>0) sub<-sub+1 + n.dim.X <- length(dim(X)) # number of dimensions of array X + + # swap the splitdim dimension with the last one: + my.seq <- 1:n.dim.X + my.seq[n.dim.X] <- splitdim + my.seq[splitdim] <- n.dim.X + if(splitdim < n.dim.X) X <- aperm(X, my.seq) + if(splitdim < n.dim.X && length(which(MARGIN==n.dim.X))==0) MARGIN[splitdim] <- n.dim.X + + output<-array(NA,dim(X)[MARGIN]) # take only the dimensions used by MARGIN + + print('Subarray n. ') + + for(s in 1:sub){ + cat(paste0(s,'/',sub,' ')) + + if(s==sub && last.sub.size>0) {last<-sub.size-last.sub.size} else {last<-0} # because the last subarray is shorter than the others, if last.sub.size>0 + my.interv <- (1+sub.size*(s-1)):((sub.size*s)-last) # longitude interval where to load data + + if(n.dim.X == 2) subarray <- X[,my.interv] + if(n.dim.X == 3) subarray <- X[,,my.interv] + if(n.dim.X == 4) subarray <- X[,,,my.interv] + if(n.dim.X == 5) subarray <- X[,,,,my.interv] + if(n.dim.X == 6) subarray <- X[,,,,,my.interv] + if(n.dim.X == 7) subarray <- X[,,,,,,my.interv] + if(n.dim.X == 8) subarray <- X[,,,,,,,my.interv] + if(n.dim.X == 9) subarray <- X[,,,,,,,,my.interv] + if(n.dim.X == 10) subarray <- X[,,,,,,,,,my.interv] + if(n.dim.X > 10) stop("input array has too many dimensions") + + .cl <- try(parallel::makeCluster(ncpus, type = "FORK"), silent = TRUE) + + if (!"try-error" %in% class(.cl)) hasparallel <- TRUE + + if (hasparallel) { + on.exit(parallel::stopCluster(.cl)) + suboutput <- parallel::parApply(cl = .cl, X = X, MARGIN = MARGIN, FUN = FUN) + } + else { + suboutput <- apply(X = X, MARGIN = MARGIN, FUN = FUN, ...) + } + + if(length(MARGIN) == 2) output[,my.interv] <- suboutput + if(length(MARGIN) == 3) output[,,my.interv] <- suboutput + if(length(MARGIN) == 4) output[,,,my.interv] <- suboutput + if(length(MARGIN) == 5) output[,,,,my.interv] <- suboutput + if(length(MARGIN) == 6) output[,,,,,my.interv] <- suboutput + + if(splitdim < n.dim.X) X <- aperm(X, my.seq) ## sistema!!! + + gc() + } + + cat('\n') + return(output) + +} + + + +################################################################################################# +# Wine indexes # +################################################################################################# + +# 1. Annual mean temperature +# temp must have the format [month,lat,lon] or [month,lon,lat], and num.months must be a multiple of 12 +# and can be smaller than the number of months in temp: it defines the number of months we want to use to calculate the index, starting from the first month in the temp array. +# It must also be a multiple of 12 to reflect the yearly data; i.e: num.months=24 considers the first 24 months of the temp array to do the average. +index1<-function(temp,num.months){ + my.temp<-temp[1:num.months,,] # select only the months actually used + if(!is.array(my.temp)) my.temp<-array(my.temp,c(length(my.temp),1,1)) # if temp is a dumb array we must convert it back to an array before applying apply + my.index<-apply(my.temp,c(2,3),mean) # calculate the mean temperature + return(my.index) +} + +#2. Mean temperature for growing season, for the WHOLE period +# (promedio de las temperaturas medias mensuales para el periodo de octubre a abril) +# you must provide at least 24 months of data starting from January to compute the index +# because the first 4 months and the last 3 cannot be used for calculation. +# if you want to calculate the yearly value for year XXXX, just introduce its 12 months more the 12 months of the following year +# temp must have the format [month,lat,lon] or [month,lon,lat], num.months must be a multiple of 12 +index2<-function(temp,num.months){ + my.seq1<-rep(12*seq(0,num.months/12-1),each=7) + my.seq2<-rep(c(1:4,10:12),num.months/12) + my.months<-my.seq1 + my.seq2 # all the months inside num.months from october to april + + my.months<-my.months[-(1:4)] # remove the first four months because they miss the oct:dec months + n.months<-length(my.months) + my.months<-my.months[-((n.months-2):n.months)] # remove the last three months because they miss the jan:apr months + + temp.oct.apr<-temp[my.months,,] # select only months from octuber to april + if(!is.array(temp.oct.apr)) temp.oct.apr<-array(temp.oct.apr,c(length(temp.oct.apr),1,1)) # if temp.oct.apr is a dumb array we must convert it back to an array + + my.index<-apply(temp.oct.apr,c(2,3),mean) # calculate the mean temperature + return(my.index) +} + +#3. Winkler index for the vegetative period +# temp data must start from january and num.months must be at least 24 months long and a multiple of 12 months +# temp must have the format [month,lat,lon] or [month,lon,lat] +index3<-function(temp,num.months){ + n.years=num.months/12 + + temp.minus.ten<-temp-10 # decrease each month of 10 degrees + ss<-which(temp.minus.ten<0,arr.ind=T) # select elements < 0 degrees + temp.minus.ten[ss]=0 # set to 0 elements lower than 0 degrees + + month31=c(1,3,5,7,8,10,12) # meses con 31 dias + my.months31<-rep(month31,n.years)+rep(0:(n.years-1)*12,each=length(month31)) # select only months with 31 days + temp.minus.ten[my.months31,,]<-temp.minus.ten[my.months31,,]*31 # multiply temperature of these months for 31 + + month30=c(4,6,9,11) # meses con 30 dias + my.months30<-rep(month30,n.years)+rep(0:(n.years-1)*12,each=length(month30)) # select only months with 30 days + temp.minus.ten[my.months30,,]<-temp.minus.ten[my.months30,,]*30 # multiply temperature of these months for 30 + + month28=2 # meses con 28 dias + my.months28<-rep(month28,n.years)+rep(0:(n.years-1)*12,each=length(month28)) # select only months with 28 days + temp.minus.ten[my.months28,,]<-temp.minus.ten[my.months28,,]*28.25 # multiply temperature of these months for 28.25 + + my.seq1<-rep(12*seq(0,num.months/12-1),each=7) + my.seq2<-rep(c(1:4,10:12),num.months/12) + my.months<-my.seq1 + my.seq2 # all the months inside num.months from october to april + + my.months<-my.months[-(1:4)] # remove the first four months because they are missing the oct:dec months + n.months<-length(my.months) + my.months<-my.months[-((n.months-2):n.months)] # remove the last three months because they miss the jan:apr months + + temp.oct.apr<-temp.minus.ten[my.months,,] # select only months from octuber to april + if(!is.array(temp.oct.apr)) temp.oct.apr<-array(temp.oct.apr,c(length(temp.oct.apr),1,1)) # if temp is a dumb array we must convert it back to an array before applying apply + my.index<-apply(temp.oct.apr,c(2,3),sum) # calculate the sum of the (aproximately) daily degrees above 10 degrees + return(my.index/(n.years-1)) # normalize for the number of years used taking into account that one vegetative period is always lost +} + +#4. Winter Severity Index (old version, as the absolute minimum of the mean temperature) +# temp must have the format [month,lat,lon] or [month,lon,lat], num.months must be a multiple of 12 +index4_old<-function(temp,num.months){ + my.temp<-temp[1:num.months,,] # select only the months actually used + if(!is.array(my.temp)) my.temp<-array(my.temp,c(length(my.temp),1,1)) # if temp is a dumb array we must convert it back to an array before applying apply + my.index<-apply(my.temp,c(2,3),min) + return(my.index) +} + + +#4. Winter Severity Index (temp media del mes mas frio) +# temp must have the format [month,lat,lon] or [month,lon,lat], num.months must be a multiple of 12 +index4<-function(temp,num.months){ + n.years=num.months/12 + + month.coldest=7 # mese mas frio en el emisferio austral + my.months.coldest<-rep(month.coldest,n.years)+rep(0:(n.years-1)*12,each=1) # select only months with 28 days + my.temp<-temp[1:num.months,,] # select only the months in the chosen period + + if(!is.array(my.temp)) my.temp<-array(my.temp,c(length(my.temp),1,1)) # if temp is a dumb array we must convert it back to an array before applying apply + my.temp<-my.temp[my.months.coldest,,] # select only the coldest months + if(is.null(dim(my.temp))) {my.index<-my.temp} else {my.index<-apply(my.temp,c(2,3),mean)} + return(my.index) +} + + + +#5. Precipitacion annual +# prec must have the format [month,lat,lon] or [month,lon,lat], num.months must be a multiple of 12 +index5<-function(prec,num.months){ + my.prec<-prec[1:num.months,,] # select only the months actually used + if(!is.array(my.prec)) my.prec<-array(my.prec,c(length(my.prec),1,1)) # if prec is a dumb array we must convert it back to an array before applying apply + my.index<-apply(my.prec,c(2,3),sum) + n.years=num.months/12 + return(my.index/n.years) # normaliza por el numero de años para devolver el valor promedio anual del indice +} + +#6. Precipitacion durante el ciclo vegetativo (octubre a abril) +# prec must have the format [month,lat,lon] or [month,lon,lat], num.months must be a multiple of 12 and at least with 24 months +index6<-function(prec,num.months){ + my.seq1<-rep(12*seq(0,num.months/12-1),each=7) + my.seq2<-rep(c(1:4,10:12),num.months/12) + my.months<-my.seq1 + my.seq2 # all the months inside num.months from october to april + + my.months<-my.months[-(1:4)] # remove the first four months because they belongs to the previous year + n.months<-length(my.months) + my.months<-my.months[-((n.months-2):n.months)] # remove the last three months of the last year + + prec.oct.apr<-prec[my.months,,] # select only months from octuber to april + if(!is.array(prec.oct.apr)) prec.oct.apr<-array(prec.oct.apr,c(length(prec.oct.apr),1,1)) # if prec is a dumb array we must convert it back to an array before applying apply + my.index<-apply(prec.oct.apr,c(2,3),sum) # calculate the total precipitation + n.years=num.months/12 + + return(my.index/(n.years-1)) # normaliza por el numero de periodos vegetativos introducidos para devolver el valor promedio anual del indice +} + +# function to select one of the above six indices: +# temp and prec must have the format [month,lat,lon] or [month,lon,lat], num.months must be a multiple of 12 +choose.index<-function(num.index,temp,prec,num.months){ + if(num.index==1) return(index1(temp,num.months)) + if(num.index==2) return(index2(temp,num.months)) + if(num.index==3) return(index3(temp,num.months)) + if(num.index==4) return(index4(temp,num.months)) + if(num.index==5) return(index5(prec,num.months)) + if(num.index==6) return(index6(prec,num.months)) +} + +# same function as above but returns all six indices: +indices<-function(temp,prec,num.months){ + return(list(TempMediaAnual=index1(temp,num.months), + TempMediaVeget=index2(temp,num.months), + IndiceWinklerVeg=index3(temp,num.months), + WinterSeverityIndex=index4(temp,num.months), + PrecAnual=index5(prec,num.months), + PrecVeget=index6(prec,num.months))) +} + + +################################################################################################# +# Error indexes # +################################################################################################# + + +RMSE<-function(obs,pred){ # semplice funzione per calcolare l'errore quadratico medio dato il vettore con la grandezza osservata e quello con la previsione (possono esserci anche NA nel vettore obs) + # i pred hanno sempre tutti i valori predetti annuali, mentre potrebbero esserci anni senza valori osservati, dunque devo togliere solo le file corrispondenti agli anni con NA tra i valori obs: + years.right<-which(is.na(obs)==FALSE) + obs2<-obs[years.right] + pred2<-pred[years.right] + sum.scarti.quad<-sum((obs2-pred2)^2,na.rm=TRUE) + RMSE<-(sum.scarti.quad/(length(years.right)))^0.5 # devi togliere dal denominatore gli anni con NA!!! + return(RMSE) +} + +MAE<-function(obs,pred){ # semplice funzione per calcolare il mean Absolute Error dato il vettore con la grandezza osservata e quello con la previsione (possono esserci anche NA nel vettore obs) + # i pred hanno sempre tutti i valori predetti annuali, mentre potrebbero esserci anni senza valori osservati, dunque devo togliere solo le file corrispondenti agli anni con NA tra i valori obs: + years.right<-which(is.na(obs)==FALSE) + obs2<-obs[years.right] + pred2<-pred[years.right] + MAE<-sum(abs(obs2-pred2))/(length(years.right)) # devi togliere dal denominatore gli anni con NA!!! + return(MAE) +} + +MAEp<-function(obs,pred){ # semplice funzione per calcolare il Mean Absolute Error in percentuale (%) dato il vettore con la grandezza osservata e quello con la previsione (possono esserci anche NA nel vettore obs) + # i pred hanno sempre tutti i valori predetti annuali, mentre potrebbero esserci anni senza valori osservati, dunque devo togliere solo le file corrispondenti agli anni con NA tra i valori obs: + years.right<-which(is.na(obs)==FALSE) + obs2<-obs[years.right] + pred2<-pred[years.right] + MAE<-sum(abs(obs2-pred2))/(length(years.right)) # devi togliere dal denominatore gli anni con NA!!! + obs.prom<-sum(obs2)/length(years.right) + MAE<-MAE/obs.prom + return(MAE) +} + +MBE<-function(obs,pred){ # semplice funzione per calcolare l'errore medio (Mean Bias Error) (ci possono essere anche elementi con NA nel vettore obs) + # i pred hanno sempre tutti i valori predetti annuali, mentre potrebbero esserci anni senza valori osservati, dunque devo togliere le file corrispondenti agli anni con NA tra i valori obs: + years.right<-which(is.na(obs)==FALSE) + obs2<-obs[years.right] + pred2<-pred[years.right] + MBE<-sum(pred2-obs2)/(length(years.right)) + return(MBE) +} + +AGREE<-function(obs,pred){ # per calcolare la d di Willmott o Index of agreement OCCHIO che e' insensibile a sovra/sottostime quasi come l'R2 + years.right<-which(is.na(obs)==FALSE) + obs2<-obs[years.right] + pred2<-pred[years.right] + sum.scarti.quad<-sum((obs2-pred2)^2,na.rm=TRUE) + obs2.mean<-mean(obs2) + d<-1-(sum.scarti.quad/(sum((abs(pred2-obs2.mean)+abs(obs2-obs2.mean))^2))) + return(d) +} + +AGREE.1<-function(obs,pred){ # d di Willmott corretto senza i quadrati + years.right<-which(is.na(obs)==FALSE) + obs2<-obs[years.right] + pred2<-pred[years.right] + sum.scarti<-sum(abs(obs2-pred2),na.rm=TRUE) + obs2.mean<-mean(obs2) + d1<-1-(sum.scarti/(sum(abs(pred2-obs2.mean)+abs(obs2-obs2.mean)))) + return(d1) +} + +AGREE.2011<-function(obs,pred){ # nuova d di Willmott introdotta da lui nel 2011 + years.right<-which(is.na(obs)==FALSE) + obs2<-obs[years.right] + pred2<-pred[years.right] + sum.scarti<-sum(abs(obs2-pred2),na.rm=TRUE) + obs2.mean<-mean(obs2) + denom<-2*sum(abs(obs2-obs2.mean)) # denominatore della formula di d(r) + if(sum.scarti<=denom){d.2011<-1-(sum.scarti/denom)}else{d.2011<-(denom/sum.scarti)-1} + return(d.2011) +} + +RMSE.freedom<-function(obs,pred,degree.freedom){ # funzione per calcolare l'errore quadratico medio dato il vettore con la grandezza osservata e quello con la previsione (possono esserci anche NA nel vettore obs) + # i pred hanno sempre tutti i valori predetti annuali, mentre potrebbero esserci anni senza valori osservati, dunque devo togliere le file corrispondenti agli anni con NA tra i valori obs: + years.right<-which(is.na(obs)==FALSE) + obs2<-obs[years.right] + pred2<-pred[years.right] + sum.scarti.quad<-sum((obs2-pred2)^2,na.rm=TRUE) + RMSE<-(sum.scarti.quad/degree.freedom)^0.5 + return(RMSE) +} + +SumSquared<-function(obs,pred){ # semplice funzione per calcolare la somma dei quadrati degli scarti dato il vettore con la grandezza osservata e quello con la previsione (possono esserci anche NA nel vettore obs) + # i pred hanno sempre tutti i valori predetti annuali, mentre potrebbero esserci anni senza valori osservati, dunque devo togliere le file corrispondenti agli anni con NA tra i valori obs: + years.right<-which(is.na(obs)==FALSE) + obs2<-obs[years.right] + pred2<-pred[years.right] + sum.scarti.quad<-sum((obs2-pred2)^2,na.rm=TRUE) + return(sum.scarti.quad) +} + + + + + +################################################################################################# +# Others # +################################################################################################# + + + +myboxplot.stats <- function (x, coef = NULL, do.conf = TRUE, do.out =TRUE) +{ + nna <- !is.na(x) + n <- sum(nna) + stats <- quantile(x, c(.05,.25,.5,.75,.95), na.rm = TRUE) + iqr <- diff(stats[c(2, 4)]) + out <- x < stats[1] | x > stats[5] + conf <- if (do.conf) stats[3] + c(-1.58, 1.58) * diff(stats[c(2, 4)])/sqrt(n) + list(stats = stats, n = n, conf = conf, out = x[out & nna]) +} + +Load2 <- function (var, exp = NULL, obs = NULL, sdates, nmember = NULL, + nmemberobs = NULL, nleadtime = NULL, leadtimemin = 1, leadtimemax = NULL, + storefreq = "monthly", sampleperiod = 1, lonmin = 0, lonmax = 360, + latmin = -90, latmax = 90, output = "areave", method = "conservative", + grid = NULL, maskmod = vector("list", 15), maskobs = vector("list", + 15), configfile = NULL, varmin = NULL, varmax = NULL, + silent = FALSE, nprocs = NULL, dimnames = NULL, remapcells = 2) +{ + parameter_names <- ls() + if (length(parameter_names) < 3 || is.null(var) || is.null(sdates) || + (is.null(exp) && is.null(obs))) { + stop("Error: At least 'var', 'exp'/'obs' and 'sdates' must be provided.") + } + load_parameters <- lapply(parameter_names, get, envir = environment()) + names(load_parameters) <- parameter_names + parameters_to_show <- c("var", "exp", "obs", "sdates", "grid", + "output", "storefreq") + load_parameters <- c(load_parameters[parameters_to_show], + load_parameters[-match(parameters_to_show, names(load_parameters))]) + cat(paste("* The load call you issued is:\n* Load(", paste(strwrap(paste(unlist(lapply(names(load_parameters[1:length(parameters_to_show)]), + function(x) paste(x, "=", if (x == "sdates" && length(load_parameters[[x]]) > + 4) { + paste0("c('", load_parameters[[x]][1], "', '", load_parameters[[x]][2], + "', ..., '", tail(load_parameters[[x]], 1), "')") + } + else { + paste(deparse(load_parameters[[x]]), collapse = "") + }))), collapse = ", "), width = getOption("width") - + 9, indent = 0, exdent = 8), collapse = "\n*"), ", ...)\n* See the full call in '$load_parameters' after Load() finishes.\n", + sep = "")) + errors <- try({ + if (is.null(var) || !(is.character(var) && nchar(var) > + 0)) { + stop("Error: parameter 'var' should be a character string of length >= 1.") + } + exps_to_fetch <- c() + exp_info_names <- c("name", "path", "nc_var_name", "suffix", + "var_min", "var_max", "dimnames") + if (!is.null(exp) && !(is.character(exp) && all(nchar(exp) > + 0)) && !is.list(exp)) { + stop("Error: parameter 'exp' should be a vector of strings or a list with information of the experimental datasets to load. Check 'exp' in ?Load for details.") + } + else if (!is.null(exp)) { + if (!is.list(exp)) { + exp <- lapply(exp, function(x) list(name = x)) + } + for (i in 1:length(exp)) { + if (!is.list(exp[[i]])) { + stop("Error: parameter 'exp' is incorrect. It should be a list of lists.") + } + if (!(all(names(exp[[i]]) %in% exp_info_names))) { + stop("Error: parameter 'exp' is incorrect. There are unrecognized components in the information of some of the experiments. Check 'exp' in ?Load for details.") + } + if (!("name" %in% names(exp[[i]]))) { + exp[[i]][["name"]] <- paste0("exp", i) + if (!("path" %in% names(exp[[i]]))) { + stop("Error: parameter 'exp' is incorrect. A 'path' should be provided for each experimental dataset if no 'name' is provided. See 'exp' in ?Load for details.") + } + } + else if (!("path" %in% names(exp[[i]]))) { + exps_to_fetch <- c(exps_to_fetch, i) + } + if ("path" %in% names(exp[[i]])) { + if (!("nc_var_name" %in% names(exp[[i]]))) { + exp[[i]][["nc_var_name"]] <- "$VAR_NAME$" + } + if (!("suffix" %in% names(exp[[i]]))) { + exp[[i]][["suffix"]] <- "" + } + if (!("var_min" %in% names(exp[[i]]))) { + exp[[i]][["var_min"]] <- "" + } + if (!("var_max" %in% names(exp[[i]]))) { + exp[[i]][["var_max"]] <- "" + } + } + } + if ((length(exps_to_fetch) > 0) && (length(exps_to_fetch) < + length(exp))) { + cat("! Warning: 'path' was provided for some experimental datasets in 'exp'. Any \n* information in the configuration file related to these will be ignored.\n") + } + } + obs_to_fetch <- c() + obs_info_names <- c("name", "path", "nc_var_name", "suffix", + "var_min", "var_max") + if (!is.null(obs) && !(is.character(obs) && all(nchar(obs) > + 0)) && !is.list(obs)) { + stop("Error: parameter 'obs' should be a vector of strings or a list with information of the observational datasets to load. Check 'obs' in ?Load for details.") + } + else if (!is.null(obs)) { + if (!is.list(obs)) { + obs <- lapply(obs, function(x) list(name = x)) + } + for (i in 1:length(obs)) { + if (!is.list(obs[[i]])) { + stop("Error: parameter 'obs' is incorrect. It should be a list of lists.") + } + if (!(all(names(obs[[i]]) %in% obs_info_names))) { + stop("Error: parameter 'obs' is incorrect. There are unrecognized components in the information of some of the observations. Check 'obs' in ?Load for details.") + } + if (!("name" %in% names(obs[[i]]))) { + obs[[i]][["name"]] <- paste0("obs", i) + if (!("path" %in% names(obs[[i]]))) { + stop("Error: parameter 'obs' is incorrect. A 'path' should be provided for each observational dataset if no 'name' is provided. See 'obs' in ?Load for details.") + } + } + else if (!("path" %in% names(obs[[i]]))) { + obs_to_fetch <- c(obs_to_fetch, i) + } + if ("path" %in% names(obs[[i]])) { + if (!("nc_var_name" %in% names(obs[[i]]))) { + obs[[i]][["nc_var_name"]] <- "$VAR_NAME$" + } + if (!("suffix" %in% names(obs[[i]]))) { + obs[[i]][["suffix"]] <- "" + } + if (!("var_min" %in% names(obs[[i]]))) { + obs[[i]][["var_min"]] <- "" + } + if (!("var_max" %in% names(obs[[i]]))) { + obs[[i]][["var_max"]] <- "" + } + } + } + if (length(c(obs_to_fetch, exps_to_fetch) > 1) && + (length(obs_to_fetch) < length(obs))) { + cat("! Warning: 'path' was provided for some observational datasets in 'obs'. Any \n* information in the configuration file related to these will be ignored.\n") + } + } + if (is.null(sdates)) { + stop("Error: parameter 'sdates' must be provided.") + } + if (!is.character(sdates) || !all(nchar(sdates) == 8) || + any(is.na(strtoi(sdates)))) { + stop("Error: parameter 'sdates' is incorrect. All starting dates should be a character string in the format 'YYYYMMDD'.") + } + if (!is.null(nmember) && !is.null(exp)) { + if (!is.numeric(nmember)) { + stop("Error: parameter 'nmember' is incorrect. It should be numeric.") + } + if (length(nmember) == 1) { + cat(paste("! Warning: 'nmember' should specify the number of members of each experimental dataset. Forcing to", + nmember, "for all experiments.\n")) + nmember <- rep(nmember, length(exp)) + } + if (length(nmember) != length(exp)) { + stop("Error: 'nmember' must contain as many values as 'exp'.") + } + else if (any(is.na(nmember))) { + nmember[which(is.na(nmember))] <- max(nmember, + na.rm = TRUE) + } + } + if (!is.null(nmemberobs) && !is.null(obs)) { + if (!is.numeric(nmemberobs)) { + stop("Error: parameter 'nmemberobs' is incorrect. It should be numeric.") + } + if (length(nmemberobs) == 1) { + cat(paste("! Warning: 'nmemberobs' should specify the number of members of each observational dataset. Forcing to", + nmemberobs, "for all observations.\n")) + nmemberobs <- rep(nmemberobs, length(obs)) + } + if (length(nmemberobs) != length(obs)) { + stop("Error: 'nmemberobs' must contain as many values as 'obs'.") + } + else if (any(is.na(nmemberobs))) { + nmemberobs[which(is.na(nmemberobs))] <- max(nmemberobs, + na.rm = TRUE) + } + } + if (!is.null(nleadtime) && !is.numeric(nleadtime)) { + stop("Error: parameter 'nleadtime' is wrong. It should be numeric.") + } + if (is.null(leadtimemin) || !is.numeric(leadtimemin)) { + stop("Error: parameter 'leadtimemin' is wrong. It should be numeric.") + } + if (!is.null(leadtimemax) && !is.numeric(leadtimemax)) { + stop("Error: parameter 'leadtimemax' is wrong. It should be numeric.") + } + if (!is.character(storefreq) || !(storefreq %in% c("monthly", + "daily"))) { + stop("Error: parameter 'storefreq' is wrong, can take value 'daily' or 'monthly'.") + } + if (is.null(sampleperiod) || !is.numeric(sampleperiod)) { + stop("Error: parameter 'sampleperiod' is wrong. It should be numeric.") + } + if (is.null(lonmin) || !is.numeric(lonmin)) { + stop("Error: parameter 'lonmin' is wrong. It should be numeric.") + } + if (lonmin < -360 || lonmin > 360) { + stop("Error: parameter 'lonmin' must be in the range [-360, 360]") + } + if (lonmin < 0) { + lonmin <- lonmin + 360 + } + if (is.null(lonmax) || !is.numeric(lonmax)) { + stop("Error: parameter 'lonmax' is wrong. It should be numeric.") + } + if (lonmax < -360 || lonmax > 360) { + stop("Error: parameter 'lonmax' must be in the range [-360, 360]") + } + if (lonmax < 0) { + lonmax <- lonmax + 360 + } + if (is.null(latmin) || !is.numeric(latmin)) { + stop("Error: parameter 'latmin' is wrong. It should be numeric.") + } + if (latmin > 90 || latmin < -90) { + stop("Error: 'latmin' must be in the interval [-90, 90].") + } + if (is.null(latmax) || !is.numeric(latmax)) { + stop("Error: parameter 'latmax' is wrong. It should be numeric.") + } + if (latmax > 90 || latmax < -90) { + stop("Error: 'latmax' must be in the interval [-90, 90].") + } + if (is.null(output) || !(output %in% c("lonlat", "lon", + "lat", "areave"))) { + stop("Error: 'output' can only take values 'lonlat', 'lon', 'lat' or 'areave'.") + } + if (is.null(method) || !(method %in% c("bilinear", "bicubic", + "conservative", "distance-weighted"))) { + stop("Error: parameter 'method' is wrong, can take value 'bilinear', 'bicubic', 'conservative' or 'distance-weighted'.") + } + remap <- switch(method, bilinear = "remapbil", bicubic = "remapbic", + conservative = "remapcon", `distance-weighted` = "remapdis") + if (!is.null(grid)) { + if (is.character(grid)) { + supported_grids <- list("r[0-9]{1,}x[0-9]{1,}", + "t[0-9]{1,}grid") + grid_matches <- unlist(lapply(lapply(supported_grids, + regexpr, grid), .IsFullMatch, grid)) + if (sum(grid_matches) < 1) { + stop("The specified grid in the parameter 'grid' is incorrect. Must be one of rx or tgrid.") + } + } + else { + stop("Error: parameter 'grid' should be a character string, if specified.") + } + } + if (!is.list(maskmod)) { + stop("Error: parameter 'maskmod' must be a list.") + } + if (length(maskmod) < length(exp)) { + stop("Error: 'maskmod' must contain a numeric mask or NULL for each experiment in 'exp'.") + } + for (i in 1:length(maskmod)) { + if (is.list(maskmod[[i]])) { + if ((length(maskmod[[i]]) > 2) || !all(names(maskmod[[i]]) %in% + c("path", "nc_var_name"))) { + stop("Error: all masks in 'maskmod' must be a numeric matrix, or a list with the components 'path' and optionally 'nc_var_name', or NULL.") + } + } + else if (!(is.numeric(maskmod[[i]]) || is.null(maskmod[[i]]))) { + stop("Error: all masks in 'maskmod' must be a numeric matrix, or a list with the components 'path' and optionally 'nc_var_name', or NULL.") + } + } + if (!is.list(maskobs)) { + stop("Error: parameter 'maskobs' must be a list.") + } + if (length(maskobs) < length(obs)) { + stop("Error: 'maskobs' must contain a numeric mask or NULL for each obseriment in 'obs'.") + } + for (i in 1:length(maskobs)) { + if (is.list(maskobs[[i]])) { + if ((length(maskobs[[i]]) > 2) || !all(names(maskobs[[i]]) %in% + c("path", "nc_var_name"))) { + stop("Error: all masks in 'maskobs' must be a numeric matrix, or a list with the components 'path' and optionally 'nc_var_name', or NULL.") + } + } + else if (!(is.numeric(maskobs[[i]]) || is.null(maskobs[[i]]))) { + stop("Error: all masks in 'maskobs' must be a numeric matrix, or a list with the components 'path' and optionally 'nc_var_name', or NULL.") + } + } + if ((output != "areave" || !is.null(grid)) && length(exp) > + 0) { + if (!all(unlist(lapply(maskobs, is.null)))) { + cat("! Warning: 'maskobs' will be ignored. 'maskmod[[1]]' will be applied to observations instead.\n") + } + maskobs <- lapply(maskobs, function(x) x <- maskmod[[1]]) + } + if (is.null(configfile)) { + configfile <- system.file("config", "BSC.conf", package = "s2dverification") + } + else if (!is.character(configfile) || !(nchar(configfile) > + 0)) { + stop("Error: parameter 'configfile' must be a character string with the path to an s2dverification configuration file, if specified.") + } + if (!is.null(varmin) && !is.numeric(varmin)) { + stop("Error: parameter 'varmin' must be numeric, if specified.") + } + if (!is.null(varmax) && !is.numeric(varmax)) { + stop("Error: parameter 'varmax' must be numeric, if specified.") + } + if (!is.logical(silent)) { + stop("Error: parameter 'silent' must be TRUE or FALSE.") + } + if (!is.null(nprocs) && (!is.numeric(nprocs) || nprocs < + 1)) { + stop("Error: parameter 'nprocs' must be a positive integer, if specified.") + } + if (!is.null(dimnames) && (!is.list(dimnames))) { + stop("Error: parameter 'dimnames' must be a list, if specified.") + } + if (!all(names(dimnames) %in% c("member", "lat", "lon"))) { + stop("Error: parameter 'dimnames' is wrong. There are unrecognized component names. See 'dimnames' in ?Load for details.") + } + if (!is.numeric(remapcells) || remapcells < 0) { + stop("Error: 'remapcells' must be an integer >= 0.") + } + if (length(exps_to_fetch) > 0 || length(obs_to_fetch) > + 0) { + cat("* Some 'path's not explicitly provided in 'exp' and 'obs', so will now proceed to open the configuration file.\n") + data_info <- ConfigFileOpen(configfile, silent, TRUE) + matches <- ConfigApplyMatchingEntries(data_info, + var, sapply(exp[exps_to_fetch], "[[", "name"), + sapply(obs[obs_to_fetch], "[[", "name"), show_entries = FALSE, + show_result = FALSE) + replace_values <- data_info$definitions + if (!is.null(exp) && length(exps_to_fetch) > 0) { + counter <- 1 + exp[exps_to_fetch] <- lapply(matches$exp_info, + function(x) { + x[names(exp[[exps_to_fetch[counter]]])] <- exp[[exps_to_fetch[counter]]] + x[["path"]] <- paste0(x[["main_path"]], x[["file_path"]]) + counter <<- counter + 1 + x + }) + } + if (!is.null(obs) && length(obs_to_fetch) > 0) { + counter <- 1 + obs[obs_to_fetch] <- lapply(matches$obs_info, + function(x) { + x[names(obs[[obs_to_fetch[counter]]])] <- obs[[obs_to_fetch[counter]]] + x[["path"]] <- paste0(x[["main_path"]], x[["file_path"]]) + counter <<- counter + 1 + x + }) + } + if (!silent) { + cat("* All pairs (var, exp) and (var, obs) have matching entries.\n") + } + } + else { + replace_values <- list(DEFAULT_NC_VAR_NAME = "$VAR_NAME$", + DEFAULT_VAR_MIN = "", DEFAULT_VAR_MAX = "", DEFAULT_SUFFIX = "", + DEFAULT_DIM_NAME_LONGITUDES = "longitude", DEFAULT_DIM_NAME_LATITUDES = "latitude", + DEFAULT_DIM_NAME_MEMBERS = "ensemble") + } + dimnames <- list(lon = ifelse(is.null(dimnames[["lon"]]), + replace_values[["DEFAULT_DIM_NAME_LONGITUDES"]], + dimnames[["lon"]]), lat = ifelse(is.null(dimnames[["lat"]]), + replace_values[["DEFAULT_DIM_NAME_LATITUDES"]], dimnames[["lat"]]), + member = ifelse(is.null(dimnames[["member"]]), replace_values[["DEFAULT_DIM_NAME_MEMBERS"]], + dimnames[["member"]])) + if (!is.null(exp)) { + exp <- lapply(exp, function(x) { + if (!("dimnames" %in% names(x))) { + x[["dimnames"]] <- dimnames + x + } + else { + dimnames2 <- dimnames + dimnames2[names(x[["dimnames"]])] <- x[["dimnames"]] + x[["dimnames"]] <- dimnames2 + x + } + }) + } + if (!is.null(obs)) { + obs <- lapply(obs, function(x) { + if (!("dimnames" %in% names(x))) { + x[["dimnames"]] <- dimnames + x + } + else { + dimnames2 <- dimnames + dimnames2[names(x[["dimnames"]])] <- x[["dimnames"]] + x[["dimnames"]] <- dimnames2 + x + } + }) + } + single_dataset <- (length(obs) + length(exp) == 1) + replace_values[["VAR_NAME"]] <- var + replace_values[["STORE_FREQ"]] <- storefreq + latitudes <- longitudes <- NULL + leadtimes <- NULL + var_exp <- var_obs <- NULL + units <- var_long_name <- NULL + is_2d_var <- FALSE + nmod <- length(exp) + nobs <- length(obs) + nsdates <- length(sdates) + if (!silent) { + cat("* Fetching first experimental files to work out 'var_exp' size...\n") + } + dataset_type <- "exp" + dim_exp <- NULL + filename <- file_found <- tmp <- nltime <- NULL + dims2define <- TRUE + is_file_per_member_exp <- rep(nmod, FALSE) + exp_work_pieces <- list() + jmod <- 1 + while (jmod <= nmod) { + tags_to_find <- c("MEMBER_NUMBER") + position_of_tags <- na.omit(match(tags_to_find, names(replace_values))) + if (length(position_of_tags) > 0) { + quasi_final_path <- .ConfigReplaceVariablesInString(exp[[jmod]][["path"]], + replace_values[-position_of_tags], TRUE) + } + else { + quasi_final_path <- .ConfigReplaceVariablesInString(exp[[jmod]][["path"]], + replace_values, TRUE) + } + is_file_per_member_exp[jmod] <- grepl("$MEMBER_NUMBER$", + quasi_final_path, fixed = TRUE) + replace_values[["EXP_NAME"]] <- exp[[jmod]][["name"]] + replace_values[["NC_VAR_NAME"]] <- exp[[jmod]][["nc_var_name"]] + namevar <- .ConfigReplaceVariablesInString(exp[[jmod]][["nc_var_name"]], + replace_values) + replace_values[["SUFFIX"]] <- exp[[jmod]][["suffix"]] + if (is.null(varmin)) { + mod_var_min <- as.numeric(.ConfigReplaceVariablesInString(exp[[jmod]][["var_min"]], + replace_values)) + } + else { + mod_var_min <- varmin + } + if (is.null(varmax)) { + mod_var_max <- as.numeric(.ConfigReplaceVariablesInString(exp[[jmod]][["var_max"]], + replace_values)) + } + else { + mod_var_max <- varmax + } + jsdate <- 1 + while (jsdate <= nsdates) { + replace_values[["START_DATE"]] <- sdates[jsdate] + replace_values[["YEAR"]] <- substr(sdates[jsdate], + 1, 4) + replace_values[["MONTH"]] <- substr(sdates[jsdate], + 5, 6) + replace_values[["DAY"]] <- substr(sdates[jsdate], + 7, 8) + if (dims2define) { + if (is_file_per_member_exp[jmod]) { + replace_values[["MEMBER_NUMBER"]] <- "*" + } + work_piece <- list(dataset_type = dataset_type, + filename = .ConfigReplaceVariablesInString(exp[[jmod]][["path"]], + replace_values), namevar = namevar, grid = grid, + remap = remap, remapcells = remapcells, is_file_per_member = is_file_per_member_exp[jmod], + is_file_per_dataset = FALSE, lon_limits = c(lonmin, + lonmax), lat_limits = c(latmin, latmax), + dimnames = exp[[jmod]][["dimnames"]], single_dataset = single_dataset) + found_data <- .LoadDataFile(work_piece, explore_dims = TRUE, + silent = silent) + found_dims <- found_data$dims + var_long_name <- found_data$var_long_name + units <- found_data$units + if (!is.null(found_dims)) { + is_2d_var <- found_data$is_2d_var + if (!is_2d_var && (output != "areave")) { + cat(paste("! Warning: '", output, "' output format not allowed when loading global mean variables. Forcing to 'areave'.\n", + sep = "")) + output <- "areave" + } + if (output != "areave" && is.null(grid)) { + grid <- found_data$grid + } + if (is.null(nmember)) { + if (is.null(found_dims[["member"]])) { + cat("! Warning: loading data from a server but 'nmember' not specified. Loading only one member.\n") + nmember <- rep(1, nmod) + } + else { + nmember <- rep(found_dims[["member"]], + nmod) + } + } + if (is.null(nleadtime)) { + nleadtime <- found_dims[["time"]] + } + if (is.null(leadtimemax)) { + leadtimemax <- nleadtime + } + else if (leadtimemax > nleadtime) { + stop("Error: 'leadtimemax' argument is greater than the number of loaded leadtimes. Put first the experiment with the greatest number of leadtimes or adjust properly the parameters 'nleadtime' and 'leadtimemax'.") + } + leadtimes <- seq(leadtimemin, leadtimemax, + sampleperiod) + latitudes <- found_dims[["lat"]] + longitudes <- found_dims[["lon"]] + if (output == "lon" || output == "lonlat") { + dim_exp[["lon"]] <- length(longitudes) + } + if (output == "lat" || output == "lonlat") { + dim_exp[["lat"]] <- length(latitudes) + } + dim_exp[["time"]] <- length(leadtimes) + dim_exp[["member"]] <- max(nmember) + dim_exp[["sdate"]] <- nsdates + dim_exp[["dataset"]] <- nmod + dims2define <- FALSE + } + } + if (is_file_per_member_exp[jmod]) { + jmember <- 1 + while (jmember <= nmember[jmod]) { + replace_values[["MEMBER_NUMBER"]] <- sprintf(paste("%.", + (nmember[jmod]%/%10) + 1, "i", sep = ""), + jmember - 1) + work_piece <- list(filename = .ConfigReplaceVariablesInString(exp[[jmod]][["path"]], + replace_values), namevar = namevar, indices = c(1, + jmember, jsdate, jmod), nmember = nmember[jmod], + leadtimes = leadtimes, mask = maskmod[[jmod]], + is_file_per_dataset = FALSE, dimnames = exp[[jmod]][["dimnames"]], + var_limits = c(mod_var_min, mod_var_max), + remapcells = remapcells) + exp_work_pieces <- c(exp_work_pieces, list(work_piece)) + jmember <- jmember + 1 + } + } + else { + work_piece <- list(filename = .ConfigReplaceVariablesInString(exp[[jmod]][["path"]], + replace_values), namevar = namevar, indices = c(1, + 1, jsdate, jmod), nmember = nmember[jmod], + leadtimes = leadtimes, mask = maskmod[[jmod]], + is_file_per_dataset = FALSE, dimnames = exp[[jmod]][["dimnames"]], + var_limits = c(mod_var_min, mod_var_max), + remapcells = remapcells) + exp_work_pieces <- c(exp_work_pieces, list(work_piece)) + } + jsdate <- jsdate + 1 + } + jmod <- jmod + 1 + } + if (dims2define && length(exp) > 0) { + cat("! Warning: no data found in file system for any experimental dataset.\n") + } + dims <- dim_exp[na.omit(match(c("dataset", "member", + "sdate", "time", "lat", "lon"), names(dim_exp)))] + if (is.null(dims[["member"]]) || any(is.na(unlist(dims))) || + any(unlist(dims) == 0)) { + dims <- 0 + dim_exp <- NULL + } + if (!silent) { + message <- "* Success. Detected dimensions of experimental data: " + cat(paste0(message, paste(unlist(dims), collapse = ", "), + "\n")) + cat("* Fetching first observational files to work out 'var_obs' size...\n") + } + if (is.null(exp) || dims == 0) { + if (is.null(leadtimemax)) { + cat("! Warning: loading observations only and no 'leadtimemax' specified. Data will be loaded from each starting date to current time.\n") + diff <- Sys.time() - as.POSIXct(paste(substr(sdates[1], + 1, 4), "-", substr(sdates[1], 5, 6), "-", substr(sdates[1], + 7, 8), sep = "")) + if (storefreq == "monthly") { + leadtimemax <- as.integer(diff/30) + } + else { + leadtimemax <- as.integer(diff) + } + } + if (is.null(nleadtime)) { + nleadtime <- leadtimemax + } + leadtimes <- seq(leadtimemin, leadtimemax, sampleperiod) + } + dataset_type <- "obs" + dim_obs <- NULL + dims2define <- TRUE + lat_indices <- lon_indices <- NULL + obs_work_pieces <- list() + is_file_per_dataset_obs <- rep(FALSE, nobs) + is_file_per_member_obs <- rep(FALSE, nobs) + jobs <- 1 + while (jobs <= nobs) { + tags_to_find <- c("MONTH", "DAY", "YEAR", "MEMBER_NUMBER") + position_of_tags <- na.omit(match(tags_to_find, names(replace_values))) + if (length(position_of_tags) > 0) { + quasi_final_path <- .ConfigReplaceVariablesInString(obs[[jobs]][["path"]], + replace_values[-position_of_tags], TRUE) + } + else { + quasi_final_path <- .ConfigReplaceVariablesInString(obs[[jobs]][["path"]], + replace_values, TRUE) + } + is_file_per_dataset_obs[jobs] <- !any(sapply(c("$MONTH$", + "$DAY$", "$YEAR$"), grepl, quasi_final_path, + fixed = TRUE)) + is_file_per_member_obs[jobs] <- grepl("$MEMBER_NUMBER$", + quasi_final_path, fixed = TRUE) + replace_values[["OBS_NAME"]] <- obs[[jobs]][["name"]] + replace_values[["NC_VAR_NAME"]] <- obs[[jobs]][["nc_var_name"]] + namevar <- .ConfigReplaceVariablesInString(obs[[jobs]][["nc_var_name"]], + replace_values) + replace_values[["SUFFIX"]] <- obs[[jobs]][["suffix"]] + if (is.null(varmin)) { + obs_var_min <- as.numeric(.ConfigReplaceVariablesInString(obs[[jobs]][["var_min"]], + replace_values)) + } + else { + obs_var_min <- varmin + } + if (is.null(varmax)) { + obs_var_max <- as.numeric(.ConfigReplaceVariablesInString(obs[[jobs]][["var_max"]], + replace_values)) + } + else { + obs_var_max <- varmax + } + if (is_file_per_dataset_obs[jobs]) { + if (dims2define) { + work_piece <- list(dataset_type = dataset_type, + filename = .ConfigReplaceVariablesInString(obs[[jobs]][["path"]], + replace_values), namevar = namevar, grid = grid, + remap = remap, remapcells = remapcells, is_file_per_member = is_file_per_member_obs[jobs], + is_file_per_dataset = is_file_per_dataset_obs[jobs], + lon_limits = c(lonmin, lonmax), lat_limits = c(latmin, + latmax), dimnames = obs[[jobs]][["dimnames"]], + single_dataset = single_dataset) + found_data <- .LoadDataFile(work_piece, explore_dims = TRUE, + silent = silent) + found_dims <- found_data$dims + var_long_name <- found_data$var_long_name + units <- found_data$units + if (!is.null(found_dims)) { + is_2d_var <- found_data$is_2d_var + if (!is_2d_var && (output != "areave")) { + cat(paste("! Warning: '", output, "' output format not allowed when loading global mean variables. Forcing to 'areave'.\n", + sep = "")) + output <- "areave" + } + if (output != "areave" && is.null(grid)) { + grid <- found_data$grid + } + if (is.null(nmemberobs)) { + if (is.null(found_dims[["member"]])) { + cat("! Warning: loading observational data from a server but 'nmemberobs' not specified. Loading only one member.\n") + nmemberobs <- rep(1, nobs) + } + else { + nmemberobs <- rep(found_dims[["member"]], + nobs) + } + } + if (is.null(dim_exp)) { + longitudes <- found_dims[["lon"]] + latitudes <- found_dims[["lat"]] + } + if (output == "lon" || output == "lonlat") { + dim_obs[["lon"]] <- length(longitudes) + } + if (output == "lat" || output == "lonlat") { + dim_obs[["lat"]] <- length(latitudes) + } + dim_obs[["time"]] <- length(leadtimes) + dim_obs[["member"]] <- max(nmemberobs) + dim_obs[["sdate"]] <- nsdates + dim_obs[["dataset"]] <- nobs + dims2define <- FALSE + } + } + work_piece <- list(filename = .ConfigReplaceVariablesInString(obs[[jobs]][["path"]], + replace_values), namevar = namevar, indices = c(1, + 1, 1, jobs), nmember = nmemberobs[jobs], mask = maskobs[[jobs]], + leadtimes = leadtimes, is_file_per_dataset = is_file_per_dataset_obs[jobs], + startdates = sdates, dimnames = obs[[jobs]][["dimnames"]], + var_limits = c(obs_var_min, obs_var_max), remapcells = remapcells) + obs_work_pieces <- c(obs_work_pieces, list(work_piece)) + } + else { + jsdate <- 1 + while (jsdate <= nsdates) { + replace_values[["START_DATE"]] <- sdates[jsdate] + sdate <- sdates[jsdate] + if (storefreq == "daily") { + day <- substr(sdate, 7, 8) + if (day == "") { + day <- "01" + } + day <- as.integer(day) + startdate <- as.POSIXct(paste(substr(sdate, + 1, 4), "-", substr(sdate, 5, 6), "-", day, + " 12:00:00", sep = "")) + (leadtimemin - + 1) * 86400 + year <- as.integer(substr(startdate, 1, 4)) + month <- as.integer(substr(startdate, 6, + 7)) + } + else { + month <- (as.integer(substr(sdate, 5, 6)) + + leadtimemin - 2)%%12 + 1 + year <- as.integer(substr(sdate, 1, 4)) + + (as.integer(substr(sdate, 5, 6)) + leadtimemin - + 2)%/%12 + } + jleadtime <- 1 + while (jleadtime <= length(leadtimes)) { + replace_values[["YEAR"]] <- paste(year, "", + sep = "") + replace_values[["MONTH"]] <- sprintf("%2.2i", + month) + if (storefreq == "daily") { + replace_values[["DAY"]] <- sprintf("%2.2i", + day) + days_in_month <- ifelse(LeapYear(year), + 29, 28) + days_in_month <- switch(paste(month, "", + sep = ""), `1` = 31, `3` = 31, `4` = 30, + `5` = 31, `6` = 30, `7` = 31, `8` = 31, + `9` = 30, `10` = 31, `11` = 30, `12` = 31, + days_in_month) + obs_file_indices <- seq(day, min(days_in_month, + (length(leadtimes) - jleadtime) * sampleperiod + + day), sampleperiod) + } + else { + obs_file_indices <- 1 + } + if (dims2define) { + if (is_file_per_member_obs[jobs]) { + replace_values[["MEMBER_NUMBER"]] <- "*" + } + work_piece <- list(dataset_type = dataset_type, + filename = .ConfigReplaceVariablesInString(obs[[jobs]][["path"]], + replace_values), namevar = namevar, + grid = grid, remap = remap, remapcells = remapcells, + is_file_per_member = is_file_per_member_obs[jobs], + is_file_per_dataset = is_file_per_dataset_obs[jobs], + lon_limits = c(lonmin, lonmax), lat_limits = c(latmin, + latmax), dimnames = obs[[jobs]][["dimnames"]], + single_dataset = single_dataset) + found_data <- .LoadDataFile(work_piece, + explore_dims = TRUE, silent = silent) + found_dims <- found_data$dims + var_long_name <- found_data$var_long_name + units <- found_data$units + if (!is.null(found_dims)) { + is_2d_var <- found_data$is_2d_var + if (!is_2d_var && (output != "areave")) { + cat(paste("! Warning: '", output, "' output format not allowed when loading global mean variables. Forcing to 'areave'\n.", + sep = "")) + output <- "areave" + } + if (output != "areave" && is.null(grid)) { + grid <- found_data$grid + } + if (is.null(nmemberobs)) { + if (is.null(found_dims[["member"]])) { + cat("! Warning: loading observational data from a server but 'nmemberobs' not specified. Loading only one member.\n") + nmemberobs <- rep(1, nobs) + } + else { + nmemberobs <- rep(found_dims[["member"]], + nobs) + } + } + if (is.null(dim_exp)) { + longitudes <- found_dims[["lon"]] + latitudes <- found_dims[["lat"]] + } + if (output == "lon" || output == "lonlat") { + dim_obs[["lon"]] <- length(longitudes) + } + if (output == "lat" || output == "lonlat") { + dim_obs[["lat"]] <- length(latitudes) + } + dim_obs[["time"]] <- length(leadtimes) + dim_obs[["member"]] <- max(nmemberobs) + dim_obs[["sdate"]] <- nsdates + dim_obs[["dataset"]] <- nobs + dims2define <- FALSE + } + } + if (is_file_per_member_obs[jobs]) { + jmember <- 1 + while (jmember <= nmemberobs[jobs]) { + replace_values[["MEMBER_NUMBER"]] <- sprintf(paste("%.", + (nmemberobs[jobs]%/%10) + 1, "i", sep = ""), + jmember - 1) + work_piece <- list(filename = .ConfigReplaceVariablesInString(obs[[jobs]][["path"]], + replace_values), namevar = namevar, + indices = c(jleadtime, jmember, jsdate, + jobs), nmember = nmemberobs[jobs], + leadtimes = obs_file_indices, mask = maskobs[[jobs]], + dimnames = obs[[jobs]][["dimnames"]], + is_file_per_dataset = is_file_per_dataset_obs[jobs], + var_limits = c(obs_var_min, obs_var_max), + remapcells = remapcells) + obs_work_pieces <- c(obs_work_pieces, + list(work_piece)) + jmember <- jmember + 1 + } + } + else { + work_piece <- list(filename = .ConfigReplaceVariablesInString(obs[[jobs]][["path"]], + replace_values), namevar = namevar, indices = c(jleadtime, + 1, jsdate, jobs), nmember = nmemberobs[jobs], + leadtimes = obs_file_indices, mask = maskobs[[jobs]], + dimnames = obs[[jobs]][["dimnames"]], + is_file_per_dataset = is_file_per_dataset_obs[jobs], + var_limits = c(obs_var_min, obs_var_max), + remapcells) + obs_work_pieces <- c(obs_work_pieces, list(work_piece)) + } + if (storefreq == "daily") { + startdate <- startdate + 86400 * sampleperiod * + length(obs_file_indices) + year <- as.integer(substr(startdate, 1, + 4)) + month <- as.integer(substr(startdate, 6, + 7)) + day <- as.integer(substr(startdate, 9, + 10)) + } + else { + month <- month + sampleperiod + year <- year + (month - 1)%/%12 + month <- (month - 1)%%12 + 1 + } + jleadtime <- jleadtime + length(obs_file_indices) + } + jsdate <- jsdate + 1 + } + } + jobs <- jobs + 1 + } + if (dims2define && length(obs) > 0) { + cat("! Warning: no data found in file system for any observational dataset.\n") + } + dims <- dim_obs[na.omit(match(c("dataset", "member", + "sdate", "time", "lat", "lon"), names(dim_obs)))] + if (is.null(dims[["member"]]) || any(is.na(unlist(dims))) || + any(unlist(dims) == 0)) { + dims <- 0 + dim_obs <- NULL + } + if (!silent) { + message <- "* Success. Detected dimensions of observational data: " + cat(paste0(message, paste(unlist(dims), collapse = ", "), + "\n")) + } + if (!(is.null(dim_obs) && is.null(dim_exp))) { + pointer_var_exp <- pointer_var_obs <- NULL + if (!is.null(dim_exp) && (length(unlist(dim_exp)) == + length(dim_exp)) && !any(is.na(unlist(dim_exp))) && + !any(unlist(dim_exp) == 0)) { + var_exp <- big.matrix(nrow = prod(unlist(dim_exp)), + ncol = 1) + pointer_var_exp <- describe(var_exp) + } + if (!is.null(dim_obs) && (length(unlist(dim_obs)) == + length(dim_obs)) && !any(is.na(unlist(dim_obs))) && + !any(unlist(dim_obs) == 0)) { + var_obs <- big.matrix(nrow = prod(unlist(dim_obs)), + ncol = 1) + pointer_var_obs <- describe(var_obs) + } + if (is.null(nprocs)) { + nprocs <- detectCores() + } + exp_work_piece_percent <- prod(dim_exp)/(prod(dim_obs) + + prod(dim_exp)) + obs_work_piece_percent <- prod(dim_obs)/(prod(dim_obs) + + prod(dim_exp)) + exp_work_pieces <- lapply(exp_work_pieces, function(x) c(x, + list(dataset_type = "exp", dims = dim_exp, out_pointer = pointer_var_exp))) + obs_work_pieces <- lapply(obs_work_pieces, function(x) c(x, + list(dataset_type = "obs", dims = dim_obs, out_pointer = pointer_var_obs))) + work_pieces <- c(exp_work_pieces, obs_work_pieces) + if (length(work_pieces)/nprocs >= 2 && !silent) { + if (length(work_pieces)/nprocs < 10) { + amount <- 100/ceiling(length(work_pieces)/nprocs) + reps <- ceiling(length(work_pieces)/nprocs) + } + else { + amount <- 10 + reps <- 10 + } + progress_steps <- rep(amount, reps) + if (length(exp_work_pieces) == 0) { + selected_exp_pieces <- c() + } + else if (length(exp_work_pieces) < floor(reps * + exp_work_piece_percent) + 1) { + selected_exp_pieces <- length(exp_work_pieces) + progress_steps <- c(sum(head(progress_steps, + floor(reps * exp_work_piece_percent))), tail(progress_steps, + ceiling(reps * obs_work_piece_percent))) + } + else { + selected_exp_pieces <- round(seq(1, length(exp_work_pieces), + length.out = floor(reps * exp_work_piece_percent) + + 1))[-1] + } + if (length(obs_work_pieces) == 0) { + selected_obs_pieces <- c() + } + else if (length(obs_work_pieces) < ceiling(reps * + obs_work_piece_percent) + 1) { + selected_obs_pieces <- length(obs_work_pieces) + progress_steps <- c(head(progress_steps, floor(reps * + exp_work_piece_percent)), sum(tail(progress_steps, + ceiling(reps * obs_work_piece_percent)))) + } + else { + selected_obs_pieces <- round(seq(1, length(obs_work_pieces), + length.out = ceiling(reps * obs_work_piece_percent) + + 1))[-1] + } + selected_pieces <- c(selected_exp_pieces, selected_obs_pieces + + length(exp_work_pieces)) + progress_steps <- paste0(" + ", round(progress_steps, + 2), "%") + progress_message <- "* Progress: 0%" + } + else { + progress_message <- "" + selected_pieces <- NULL + } + piece_counter <- 1 + step_counter <- 1 + work_pieces <- lapply(work_pieces, function(x) { + wp <- c(x, list(is_2d_var = is_2d_var, grid = grid, + remap = remap, lon_limits = c(lonmin, lonmax), + lat_limits = c(latmin, latmax), output = output, + remapcells = remapcells, single_dataset = single_dataset)) + if (piece_counter %in% selected_pieces) { + wp <- c(wp, list(progress_amount = progress_steps[step_counter])) + step_counter <<- step_counter + 1 + } + piece_counter <<- piece_counter + 1 + wp + }) + if (!silent) { + cat(paste("* Will now proceed to read and process ", + length(work_pieces), " data files:\n", sep = "")) + if (length(work_pieces) < 30) { + lapply(work_pieces, function(x) cat(paste("* ", + x[["filename"]], "\n", sep = ""))) + } + else { + cat(paste("* The list of files is long. You can check it after Load() finishes in the output '$source_files'.\n")) + } + if (length(dim_obs) == 0) { + bytes_obs <- 0 + obs_dim_sizes <- "0" + } + else { + bytes_obs <- prod(c(dim_obs, 8)) + obs_dim_sizes <- paste(na.omit(as.vector(dim_obs[c("dataset", + "member", "sdate", "time", "lat", "lon")])), + collapse = " x ") + } + if (length(dim_exp) == 0) { + bytes_exp <- 0 + exp_dim_sizes <- "0" + } + else { + bytes_exp <- prod(c(dim_exp, 8)) + exp_dim_sizes <- paste(na.omit(as.vector(dim_exp[c("dataset", + "member", "sdate", "time", "lat", "lon")])), + collapse = " x ") + } + cat(paste("* Total size of requested data: ", + bytes_obs + bytes_exp, "bytes.\n")) + cat(paste("* - Experimental data: (", exp_dim_sizes, + ") x 8 bytes =", bytes_exp, "bytes.\n")) + cat(paste("* - Observational data: (", obs_dim_sizes, + ") x 8 bytes =", bytes_obs, "bytes.\n")) + cat(paste("* If size of requested data is close to or above the free shared RAM memory, R will crash.\n")) + } + if (nprocs == 1) { + found_files <- lapply(work_pieces, .LoadDataFile, + silent = silent) + } + else { + cluster <- makeCluster(nprocs, outfile = "") + if (!silent) { + cat(paste("* Loading... This may take several minutes...\n", + sep = "")) + cat(progress_message) + } + work_errors <- try({ + found_files <- clusterApplyLB(cluster, work_pieces, + .LoadDataFile, silent = silent) + }) + stopCluster(cluster) + } + if (!silent) { + if (progress_message != "") { + cat("\n") + } + if (any(unlist(lapply(found_files, is.null)))) { + if (sum(unlist(lapply(found_files, is.null))) < + 30) { + cat("! WARNING: The following files were not found in the file system. Filling with NA values instead.\n") + lapply(work_pieces[which(unlist(lapply(found_files, + is.null)))], function(x) cat(paste("* ", + x[["filename"]], "\n", sep = ""))) + } + else { + cat("! WARNING: Some files were not found in the file system. The list is long. You can check it in the output '$not_found_files'. Filling with NA values instead.\n") + } + } + } + source_files <- unlist(found_files[which(!unlist(lapply(found_files, + is.null)))]) + not_found_files <- unlist(lapply(work_pieces[which(unlist(lapply(found_files, + is.null)))], "[[", "filename")) + } + else { + error_message <- "Error: No found files for any dataset. Check carefully the file patterns and correct either the pattern or the provided parameters:\n" + if (!is.null(exp)) { + lapply(exp, function(x) error_message <<- paste0(error_message, + paste0(x[["path"]], "\n"))) + } + if (!is.null(obs)) { + lapply(obs, function(x) error_message <<- paste0(error_message, + paste0(x[["path"]], "\n"))) + } + stop(error_message) + } + }) + if (class(errors) == "try-error") { + invisible(list(load_parameters = load_parameters)) + } + else { + variable <- list() + variable[["varName"]] <- var + variable[["level"]] <- NULL + attr(variable, "is_standard") <- FALSE + attr(variable, "units") <- units + attr(variable, "longname") <- var_long_name + attr(variable, "daily_agg_cellfun") <- "none" + attr(variable, "monthly_agg_cellfun") <- "none" + attr(variable, "verification_time") <- "none" + if (is.null(var_exp)) { + mod_data <- NULL + } + else { + dim_reorder <- length(dim_exp):1 + dim_reorder[2:3] <- dim_reorder[3:2] + old_dims <- dim_exp + dim_exp <- dim_exp[dim_reorder] + mod_data <- aperm(array(bigmemory::as.matrix(var_exp), + dim = old_dims), dim_reorder) + attr(mod_data, "dimensions") <- names(dim_exp) + } + if (is.null(var_obs)) { + obs_data <- NULL + } + else { + dim_reorder <- length(dim_obs):1 + dim_reorder[2:3] <- dim_reorder[3:2] + old_dims <- dim_obs + dim_obs <- dim_obs[dim_reorder] + obs_data <- aperm(array(bigmemory::as.matrix(var_obs), + dim = old_dims), dim_reorder) + attr(obs_data, "dimensions") <- names(dim_obs) + } + if (is.null(latitudes)) { + lat <- 0 + attr(lat, "cdo_grid_name") <- "none" + } + else { + lat <- latitudes + attr(lat, "cdo_grid_name") <- if (is.null(grid)) + "none" + else grid + } + attr(lat, "projection") <- "none" + if (is.null(longitudes)) { + lon <- 0 + attr(lon, "cdo_grid_name") <- "none" + } + else { + lon <- longitudes + attr(lon, "cdo_grid_name") <- if (is.null(grid)) + "none" + else grid + } + attr(lon, "projection") <- "none" + dates <- list() + dates[["start"]] <- NULL + dates[["end"]] <- NULL + models <- NULL + if (length(exp) > 0 && !is.null(dim_exp)) { + models <- list() + for (jmod in 1:length(exp)) { + models[[exp[[jmod]][["name"]]]] <- list(members = paste0("Member_", + 1:nmember[jmod]), source = if ((nchar(exp[[jmod]][["path"]]) - + nchar(gsub("/", "", exp[[jmod]][["path"]])) > + 2) && (length(sdates) > 1 && !is_file_per_member_exp[jmod])) { + parts <- strsplit(exp[[jmod]][["path"]], "/")[[1]] + paste(parts[-length(parts)], sep = "", collapse = "/") + } else { + exp[[jmod]][["path"]] + }) + } + } + observations <- NULL + if (length(obs) > 0 && !is.null(dim_obs)) { + observations <- list() + for (jobs in 1:length(obs)) { + observations[[obs[[jobs]][["name"]]]] <- list(members = paste0("Member_", + 1:nmemberobs[jobs]), source = if ((nchar(obs[[jobs]][["path"]]) - + nchar(gsub("/", "", obs[[jobs]][["path"]])) > + 2) && !is_file_per_dataset_obs[jobs]) { + parts <- strsplit(obs[[jobs]][["path"]], "/")[[1]] + paste(parts[-length(parts)], sep = "", collapse = "/") + } else { + obs[[jobs]][["path"]] + }) + } + } + invisible(list(mod = mod_data, obs = obs_data, lon = lon, + lat = lat, Variable = variable, Datasets = list(exp = models, + obs = observations), Dates = dates, InitializationDates = lapply(sdates, + function(x) { + sink("/dev/null") + date <- print(as.POSIXct(as.Date(x, format = "%Y%m%d"))) + sink() + date + }), when = Sys.time(), source_files = source_files, + not_found_files = not_found_files, load_parameters = load_parameters)) + } +} + +mergePDF <- function(..., file, gsversion = NULL, in.file = NULL) { + if (is.null(in.file)) { + in.file <- substitute(...()) + } + infiles <- paste(unlist(lapply(in.file, function(y) as.character(y))), + collapse = " ") + if (is.null(gsversion)) { + gsversion <- names(which(Sys.which(c("gs", "gswin32c", "gswin64c")) != "")) + if (length(gsversion) == 0) + stop("Please install Ghostscript and ensure it is in your PATH") + if (length(gsversion) > 1) + stop("More than one Ghostscript executable was found:", + paste(gsversion, collapse = " "), + ". Please specify which version should be used with the gsversion argument") + } + pre = " -dBATCH -dNOPAUSE -q -sDEVICE=pdfwrite -sOutputFile=" + system(paste(paste(gsversion, pre, file, sep = ""), infiles, collapse = " ")) +} diff --git a/old/SkillScores_MN_v1.R b/old/SkillScores_MN_v1.R new file mode 100644 index 0000000000000000000000000000000000000000..e827678ebe3b582d7d9ceb5465b29a544b24d5a4 --- /dev/null +++ b/old/SkillScores_MN_v1.R @@ -0,0 +1,2147 @@ +######################################################################################### +# Sub-seasonal Skill Scores # +######################################################################################### + +library(s2dverification) +library(ncdf4) +library(plyr) + +#source('/gpfs/projects/bsc32/bsc32842/scripts/Rfunctions.R') +#source('/gpfs/projects/bsc32/bsc32842/scripts/Utils.R') +source('/home/Earth/ncortesi/Downloads/scripts/Utils.R') + +# working dir where to put the output maps and files in the /Data and /Maps subdirs: +#workdir <- "/gpfs/projects/bsc32/bsc32842/RESILIENCE" +workdir <- "/scratch/Earth/ncortesi/RESILIENCE/MN" + +args <- commandArgs(TRUE) # put into variable args the list with all the optional arguments with whom the script was run from the terminal +chunk <- as.integer(args[1]) # number of the chunk to run in this script + +forecast.year <- 2014 # starting year of the weekly sequence of the forecasts + +# generic path of the forecast system files: +#ECMWF_monthly <- list(path = paste0('/esnas/exp/ECMWF/monthly/ensforhc/weekly_mean/$VAR_NAME$_f6h/',forecast.year,'$MONTH$$DAY$00/$VAR_NAME$_$START_DATE$00.nc')) +ECMWF_monthly <- list(path = '/esnas/exp/ECMWF/monthly/ensforhc/weekly_mean/sfcWind_f6h/2014010200/sfcWind_2013010200.nc') +#ECMWF_monthly <- list(path = paste0(workdir,'/2014010200/sfcWind_2013010200.nc')) + +#system("module load GCC") +#system("module load HDF5") +#system("module load CDO") + +# load once 1 file to retrieve the lat and lon values, then save them in an .RData file to retrieve them when needed: +#time <- system.time({ +var <- Load(var = 'sfcWind', exp = list(ECMWF_monthly), obs = NULL, sdates='20130102', leadtimemin = 1, leadtimemax=4, output = 'lonlat', nprocs=1) +#}) +#save(time, file=paste0(workdir,'/test_chunk_',chunk,'_time_',round(time[3],2),'.RData')) +#write.table(var$lon, var$lat, file=paste0(workdir,'/load_coord.txt')) + + +a=F;if(a){ + +var='sfcWind' +exp=list(ECMWF_monthly) +obs=NULL +sdates='20130102' +nmember=NULL +nmemberobs=NULL +nleadtime=NULL +leadtimemin=1 +leadtimemax=4 +storefreq='monthly' +sampleperiod=1 +lonmin=0 +lonmax=360 +latmin=-90 +latmax=90 +output='lonlat' +method='conservative' +grid=NULL +maskmod=vector('list',15) +maskobs=vector('list',15) +configfile=NULL +varmin=NULL +varmax=NULL +silent=FALSE +nprocs=1 +dimnames=NULL +remapcells=2 + +test='test' + + # Print a stamp of the call the user issued. + parameter_names <- ls() + if (length(parameter_names) < 3 || is.null(var) || + is.null(sdates) || (is.null(exp) && is.null(obs))) { + stop("Error: At least 'var', 'exp'/'obs' and 'sdates' must be provided.") + } + load_parameters <- lapply(parameter_names, get, envir = environment()) + names(load_parameters) <- parameter_names + +#write.table(load_parameters$exp, file=paste0(workdir,'/load_parameters.txt')) + + + parameters_to_show <- c('var', 'exp', 'obs', 'sdates', 'grid', 'output', 'storefreq') + load_parameters <- c(load_parameters[parameters_to_show], load_parameters[-match(parameters_to_show, names(load_parameters))]) + cat(paste("* The load call you issued is:\n* Load(", + paste(strwrap( + paste(unlist(lapply(names(load_parameters[1:length(parameters_to_show)]), + function(x) paste(x, '=', + if (x == 'sdates' && length(load_parameters[[x]]) > 4) { + paste0("c('", load_parameters[[x]][1], "', '", load_parameters[[x]][2], + "', ..., '", tail(load_parameters[[x]], 1), "')") + } else { + paste(deparse(load_parameters[[x]]), collapse = '') + }))), + collapse = ', '), width = getOption('width') - 9, indent = 0, exdent = 8), collapse = '\n*'), + ", ...)\n* See the full call in '$load_parameters' after Load() finishes.\n", sep = '')) + + + # Run Load() error-aware, so that it always returns something + errors <- try({ + + # Check and sanitize parameters + # var + if (is.null(var) || !(is.character(var) && nchar(var) > 0)) { + stop("Error: parameter 'var' should be a character string of length >= 1.") + } + + # exp + exps_to_fetch <- c() + exp_info_names <- c('name', 'path', 'nc_var_name', 'suffix', + 'var_min', 'var_max', 'dimnames') + if (!is.null(exp) && !(is.character(exp) && all(nchar(exp) > 0)) && !is.list(exp)) { + stop("Error: parameter 'exp' should be a vector of strings or a list with information of the experimental datasets to load. Check 'exp' in ?Load for details.") + } else if (!is.null(exp)) { + if (!is.list(exp)) { + exp <- lapply(exp, function (x) list(name = x)) + } + for (i in 1:length(exp)) { + if (!is.list(exp[[i]])) { + stop("Error: parameter 'exp' is incorrect. It should be a list of lists.") + } + if (!(all(names(exp[[i]]) %in% exp_info_names))) { + stop("Error: parameter 'exp' is incorrect. There are unrecognized components in the information of some of the experiments. Check 'exp' in ?Load for details.") + } + if (!('name' %in% names(exp[[i]]))) { + exp[[i]][['name']] <- paste0('exp', i) + if (!('path' %in% names(exp[[i]]))) { + stop("Error: parameter 'exp' is incorrect. A 'path' should be provided for each experimental dataset if no 'name' is provided. See 'exp' in ?Load for details.") + } + } else if (!('path' %in% names(exp[[i]]))) { + exps_to_fetch <- c(exps_to_fetch, i) + } + if ('path' %in% names(exp[[i]])) { + if (!('nc_var_name' %in% names(exp[[i]]))) { + exp[[i]][['nc_var_name']] <- '$VAR_NAME$' + } + if (!('suffix' %in% names(exp[[i]]))) { + exp[[i]][['suffix']] <- '' + } + if (!('var_min' %in% names(exp[[i]]))) { + exp[[i]][['var_min']] <- '' + } + if (!('var_max' %in% names(exp[[i]]))) { + exp[[i]][['var_max']] <- '' + } + } + } + if ((length(exps_to_fetch) > 0) && (length(exps_to_fetch) < length(exp))) { + cat("! Warning: 'path' was provided for some experimental datasets in 'exp'. Any \n* information in the configuration file related to these will be ignored.\n") + } + } + + # obs + obs_to_fetch <- c() + obs_info_names <- c('name', 'path', 'nc_var_name', 'suffix', + 'var_min', 'var_max') + if (!is.null(obs) && !(is.character(obs) && all(nchar(obs) > 0)) && !is.list(obs)) { + stop("Error: parameter 'obs' should be a vector of strings or a list with information of the observational datasets to load. Check 'obs' in ?Load for details.") + } else if (!is.null(obs)) { + if (!is.list(obs)) { + obs <- lapply(obs, function (x) list(name = x)) + } + for (i in 1:length(obs)) { + if (!is.list(obs[[i]])) { + stop("Error: parameter 'obs' is incorrect. It should be a list of lists.") + } + if (!(all(names(obs[[i]]) %in% obs_info_names))) { + stop("Error: parameter 'obs' is incorrect. There are unrecognized components in the information of some of the observations. Check 'obs' in ?Load for details.") + } + if (!('name' %in% names(obs[[i]]))) { + obs[[i]][['name']] <- paste0('obs', i) + if (!('path' %in% names(obs[[i]]))) { + stop("Error: parameter 'obs' is incorrect. A 'path' should be provided for each observational dataset if no 'name' is provided. See 'obs' in ?Load for details.") + } + } else if (!('path' %in% names(obs[[i]]))) { + obs_to_fetch <- c(obs_to_fetch, i) + } + if ('path' %in% names(obs[[i]])) { + if (!('nc_var_name' %in% names(obs[[i]]))) { + obs[[i]][['nc_var_name']] <- '$VAR_NAME$' + } + if (!('suffix' %in% names(obs[[i]]))) { + obs[[i]][['suffix']] <- '' + } + if (!('var_min' %in% names(obs[[i]]))) { + obs[[i]][['var_min']] <- '' + } + if (!('var_max' %in% names(obs[[i]]))) { + obs[[i]][['var_max']] <- '' + } + } + } + if (length(c(obs_to_fetch, exps_to_fetch) > 1) && (length(obs_to_fetch) < length(obs))) { + cat("! Warning: 'path' was provided for some observational datasets in 'obs'. Any \n* information in the configuration file related to these will be ignored.\n") + } + } + + # sdates + if (is.null(sdates)) { + stop("Error: parameter 'sdates' must be provided.") + } + if (!is.character(sdates) || !all(nchar(sdates) == 8) || any(is.na(strtoi(sdates)))) { + stop("Error: parameter 'sdates' is incorrect. All starting dates should be a character string in the format 'YYYYMMDD'.") + } + + # nmember + if (!is.null(nmember) && !is.null(exp)) { + if (!is.numeric(nmember)) { + stop("Error: parameter 'nmember' is incorrect. It should be numeric.") + } + if (length(nmember) == 1) { + cat(paste("! Warning: 'nmember' should specify the number of members of each experimental dataset. Forcing to", nmember, "for all experiments.\n")) + nmember <- rep(nmember, length(exp)) + } + if (length(nmember) != length(exp)) { + stop("Error: 'nmember' must contain as many values as 'exp'.") + } else if (any(is.na(nmember))) { + nmember[which(is.na(nmember))] <- max(nmember, na.rm = TRUE) + } + } + + # nmemberobs + if (!is.null(nmemberobs) && !is.null(obs)) { + if (!is.numeric(nmemberobs)) { + stop("Error: parameter 'nmemberobs' is incorrect. It should be numeric.") + } + if (length(nmemberobs) == 1) { + cat(paste("! Warning: 'nmemberobs' should specify the number of members of each observational dataset. Forcing to", nmemberobs, "for all observations.\n")) + nmemberobs <- rep(nmemberobs, length(obs)) + } + if (length(nmemberobs) != length(obs)) { + stop("Error: 'nmemberobs' must contain as many values as 'obs'.") + } else if (any(is.na(nmemberobs))) { + nmemberobs[which(is.na(nmemberobs))] <- max(nmemberobs, na.rm = TRUE) + } + } + + # nleadtime + if (!is.null(nleadtime) && !is.numeric(nleadtime)) { + stop("Error: parameter 'nleadtime' is wrong. It should be numeric.") + } + + # leadtimemin + if (is.null(leadtimemin) || !is.numeric(leadtimemin)) { + stop("Error: parameter 'leadtimemin' is wrong. It should be numeric.") + } + + # leadtimemax + if (!is.null(leadtimemax) && !is.numeric(leadtimemax)) { + stop("Error: parameter 'leadtimemax' is wrong. It should be numeric.") + } + + # storefreq + if (!is.character(storefreq) || !(storefreq %in% c('monthly', 'daily'))) { + stop("Error: parameter 'storefreq' is wrong, can take value 'daily' or 'monthly'.") + } + + # sampleperiod + if (is.null(sampleperiod) || !is.numeric(sampleperiod)) { + stop("Error: parameter 'sampleperiod' is wrong. It should be numeric.") + } + + # lonmin + if (is.null(lonmin) || !is.numeric(lonmin)) { + stop("Error: parameter 'lonmin' is wrong. It should be numeric.") + } + if (lonmin < -360 || lonmin > 360) { + stop("Error: parameter 'lonmin' must be in the range [-360, 360]") + } + if (lonmin < 0) { + lonmin <- lonmin + 360 + } + + # lonmax + if (is.null(lonmax) || !is.numeric(lonmax)) { + stop("Error: parameter 'lonmax' is wrong. It should be numeric.") + } + if (lonmax < -360 || lonmax > 360) { + stop("Error: parameter 'lonmax' must be in the range [-360, 360]") + } + if (lonmax < 0) { + lonmax <- lonmax + 360 + } + + # latmin + if (is.null(latmin) || !is.numeric(latmin)) { + stop("Error: parameter 'latmin' is wrong. It should be numeric.") + } + if (latmin > 90 || latmin < -90) { + stop("Error: 'latmin' must be in the interval [-90, 90].") + } + + # latmax + if (is.null(latmax) || !is.numeric(latmax)) { + stop("Error: parameter 'latmax' is wrong. It should be numeric.") + } + if (latmax > 90 || latmax < -90) { + stop("Error: 'latmax' must be in the interval [-90, 90].") + } + + # output + if (is.null(output) || !(output %in% c('lonlat', 'lon', 'lat', 'areave'))) { + stop("Error: 'output' can only take values 'lonlat', 'lon', 'lat' or 'areave'.") + } + + # method + if (is.null(method) || !(method %in% c('bilinear', 'bicubic', 'conservative', 'distance-weighted'))) { + stop("Error: parameter 'method' is wrong, can take value 'bilinear', 'bicubic', 'conservative' or 'distance-weighted'.") + } + remap <- switch(method, 'bilinear' = 'remapbil', 'bicubic' = 'remapbic', + 'conservative' = 'remapcon', 'distance-weighted' = 'remapdis') + + # grid + if (!is.null(grid)) { + if (is.character(grid)) { + supported_grids <- list('r[0-9]{1,}x[0-9]{1,}', 't[0-9]{1,}grid') + grid_matches <- unlist(lapply(lapply(supported_grids, regexpr, grid), .IsFullMatch, grid)) + if (sum(grid_matches) < 1) { + stop("The specified grid in the parameter 'grid' is incorrect. Must be one of rx or tgrid.") + } + } else { + stop("Error: parameter 'grid' should be a character string, if specified.") + } + } + + # maskmod + if (!is.list(maskmod)) { + stop("Error: parameter 'maskmod' must be a list.") + } + if (length(maskmod) < length(exp)) { + stop("Error: 'maskmod' must contain a numeric mask or NULL for each experiment in 'exp'.") + } + for (i in 1:length(maskmod)) { + if (is.list(maskmod[[i]])) { + if ((length(maskmod[[i]]) > 2) || !all(names(maskmod[[i]]) %in% c('path', 'nc_var_name'))) { + stop("Error: all masks in 'maskmod' must be a numeric matrix, or a list with the components 'path' and optionally 'nc_var_name', or NULL.") + } + } else if (!(is.numeric(maskmod[[i]]) || is.null(maskmod[[i]]))) { + stop("Error: all masks in 'maskmod' must be a numeric matrix, or a list with the components 'path' and optionally 'nc_var_name', or NULL.") + } + } + + # maskobs + if (!is.list(maskobs)) { + stop("Error: parameter 'maskobs' must be a list.") + } + if (length(maskobs) < length(obs)) { + stop("Error: 'maskobs' must contain a numeric mask or NULL for each obseriment in 'obs'.") + } + for (i in 1:length(maskobs)) { + if (is.list(maskobs[[i]])) { + if ((length(maskobs[[i]]) > 2) || !all(names(maskobs[[i]]) %in% c('path', 'nc_var_name'))) { + stop("Error: all masks in 'maskobs' must be a numeric matrix, or a list with the components 'path' and optionally 'nc_var_name', or NULL.") + } + } else if (!(is.numeric(maskobs[[i]]) || is.null(maskobs[[i]]))) { + stop("Error: all masks in 'maskobs' must be a numeric matrix, or a list with the components 'path' and optionally 'nc_var_name', or NULL.") + } + } + + ## Force the observational masks to be the same as the experimental when + ## possible. + if ((output != 'areave' || !is.null(grid)) && length(exp) > 0) { + if (!all(unlist(lapply(maskobs, is.null)))) { + cat("! Warning: 'maskobs' will be ignored. 'maskmod[[1]]' will be applied to observations instead.\n") + } + maskobs <- lapply(maskobs, function(x) x <- maskmod[[1]]) + } + + # configfile + if (is.null(configfile)) { + configfile <- system.file("config", "BSC.conf", package = "s2dverification") + } else if (!is.character(configfile) || !(nchar(configfile) > 0)) { + stop("Error: parameter 'configfile' must be a character string with the path to an s2dverification configuration file, if specified.") + } + + # varmin + if (!is.null(varmin) && !is.numeric(varmin)) { + stop("Error: parameter 'varmin' must be numeric, if specified.") + } + + # varmax + if (!is.null(varmax) && !is.numeric(varmax)) { + stop("Error: parameter 'varmax' must be numeric, if specified.") + } + + # silent + if (!is.logical(silent)) { + stop("Error: parameter 'silent' must be TRUE or FALSE.") + } + + # nprocs + if (!is.null(nprocs) && (!is.numeric(nprocs) || nprocs < 1)) { + stop("Error: parameter 'nprocs' must be a positive integer, if specified.") + } + + # dimnames + if (!is.null(dimnames) && (!is.list(dimnames))) { + stop("Error: parameter 'dimnames' must be a list, if specified.") + } + if (!all(names(dimnames) %in% c('member', 'lat', 'lon'))) { + stop("Error: parameter 'dimnames' is wrong. There are unrecognized component names. See 'dimnames' in ?Load for details.") + } + + # remapcells + if (!is.numeric(remapcells) || remapcells < 0) { + stop("Error: 'remapcells' must be an integer >= 0.") + } + + # If not all data has been provided in 'exp' and 'obs', configuration file is read. + if (length(exps_to_fetch) > 0 || length(obs_to_fetch) > 0) { + cat("* Some 'path's not explicitly provided in 'exp' and 'obs', so will now proceed to open the configuration file.\n") + data_info <- ConfigFileOpen(configfile, silent, TRUE) + + # Check that the var, exp and obs parameters are right and keep the entries + # that match for each dataset. + # Afterwards, the matching entries are applied sequentially (as specified + # in ?ConfigFileOpen) and the replace_values are applied to the result. + # Finally a path pattern for each dataset is provided. + matches <- ConfigApplyMatchingEntries(data_info, var, sapply(exp[exps_to_fetch], '[[', 'name'), + sapply(obs[obs_to_fetch], '[[', 'name'), show_entries = FALSE, show_result = FALSE) + # 'replace_values' is a named list that associates a variable name to an + # associated value. Initially it is filled with variables and values parsed + # from the configuration file, but we can add or modify some values during + # the execution to choose for example which start date we want to load. + # When '.ConfigReplaceVariablesInString' is called, all the variable accesses + # ($VARIABLE_NAME$) that appear in the string given as parameter are + # replaced by the associated value in 'replace_values'. + replace_values <- data_info$definitions + if (!is.null(exp) && length(exps_to_fetch) > 0) { + counter <- 1 + exp[exps_to_fetch] <- lapply(matches$exp_info, + function (x) { + x[names(exp[[exps_to_fetch[counter]]])] <- exp[[exps_to_fetch[counter]]] + x[['path']] <- paste0(x[['main_path']], x[['file_path']]) + counter <<- counter + 1 + x + }) + } + if (!is.null(obs) && length(obs_to_fetch) > 0) { + counter <- 1 + obs[obs_to_fetch] <- lapply(matches$obs_info, + function (x) { + x[names(obs[[obs_to_fetch[counter]]])] <- obs[[obs_to_fetch[counter]]] + x[['path']] <- paste0(x[['main_path']], x[['file_path']]) + counter <<- counter + 1 + x + }) + } + if (!silent) { + cat("* All pairs (var, exp) and (var, obs) have matching entries.\n") + } + } else { + replace_values <- list(DEFAULT_NC_VAR_NAME = '$VAR_NAME$', + DEFAULT_VAR_MIN = '', + DEFAULT_VAR_MAX = '', + DEFAULT_SUFFIX = '', + DEFAULT_DIM_NAME_LONGITUDES = 'longitude', + DEFAULT_DIM_NAME_LATITUDES = 'latitude', + DEFAULT_DIM_NAME_MEMBERS = 'ensemble') + } + # We take the dimnames that haven't been explicitly specified from the + # configuration file. + # If the configuration file wasn't opened, we take the default values from + # the dictionary 'replace_values'. + dimnames <- list(lon = ifelse(is.null(dimnames[["lon"]]), + replace_values[["DEFAULT_DIM_NAME_LONGITUDES"]], + dimnames[['lon']]), + lat = ifelse(is.null(dimnames[["lat"]]), + replace_values[["DEFAULT_DIM_NAME_LATITUDES"]], + dimnames[['lat']]), + member = ifelse(is.null(dimnames[["member"]]), + replace_values[["DEFAULT_DIM_NAME_MEMBERS"]], + dimnames[['member']])) + if (!is.null(exp)) { + exp <- lapply(exp, function (x) { + if (!('dimnames' %in% names(x))) { + x[['dimnames']] <- dimnames + x + } else { + dimnames2 <- dimnames + dimnames2[names(x[['dimnames']])] <- x[['dimnames']] + x[['dimnames']] <- dimnames2 + x + } + }) + } + if (!is.null(obs)) { + obs <- lapply(obs, function (x) { + if (!('dimnames' %in% names(x))) { + x[['dimnames']] <- dimnames + x + } else { + dimnames2 <- dimnames + dimnames2[names(x[['dimnames']])] <- x[['dimnames']] + x[['dimnames']] <- dimnames2 + x + } + }) + } + single_dataset <- (length(obs) + length(exp) == 1) + + ## We add some predefined values in the dictionary. + replace_values[["VAR_NAME"]] <- var + replace_values[["STORE_FREQ"]] <- storefreq + + # Initialize some variables that will take various values along the + # execution + latitudes <- longitudes <- NULL + leadtimes <- NULL + var_exp <- var_obs <- NULL + units <- var_long_name <- NULL + is_2d_var <- FALSE + + # Start defining the dimensions of the output matrices + nmod <- length(exp) + nobs <- length(obs) + nsdates <- length(sdates) + + # We will iterate over all the experiments, start dates and members and will open + # the file pointed by the data in the configuration file. + # If a file is found, we will open it and read its metadata to work out the + # remaining dimensions: members, leadtimes, longitudes and latitudes. + # + # At each iteration we will build a 'work piece' that will contain information + # on the data we want to load from a file. For each file we will have one + # work piece. These work pieces will be packages of information to be sent to + # the various parallel processes. Each process will need this information to + # access and manipulate the data according to the output type and other + # parameters. + if (!silent) { + cat("* Fetching first experimental files to work out 'var_exp' size...\n") + } + + dataset_type <- 'exp' + dim_exp <- NULL + filename <- file_found <- tmp <- nltime <- NULL + dims2define <- TRUE + is_file_per_member_exp <- rep(nmod, FALSE) + exp_work_pieces <- list() + jmod <- 1 + + while (jmod <= nmod) { + tags_to_find <- c('MEMBER_NUMBER') + position_of_tags <- na.omit(match(tags_to_find, names(replace_values))) + if (length(position_of_tags) > 0) { + quasi_final_path <- .ConfigReplaceVariablesInString(exp[[jmod]][['path']], + replace_values[-position_of_tags], TRUE) + } else { + quasi_final_path <- .ConfigReplaceVariablesInString(exp[[jmod]][['path']], + replace_values, TRUE) + } + is_file_per_member_exp[jmod] <- grepl('$MEMBER_NUMBER$', + quasi_final_path, fixed = TRUE) + replace_values[["EXP_NAME"]] <- exp[[jmod]][['name']] + replace_values[["NC_VAR_NAME"]] <- exp[[jmod]][['nc_var_name']] + namevar <- .ConfigReplaceVariablesInString(exp[[jmod]][['nc_var_name']], replace_values) + replace_values[["SUFFIX"]] <- exp[[jmod]][['suffix']] + if (is.null(varmin)) { + mod_var_min <- as.numeric(.ConfigReplaceVariablesInString(exp[[jmod]][['var_min']], replace_values)) + } else { + mod_var_min <- varmin + } + if (is.null(varmax)) { + mod_var_max <- as.numeric(.ConfigReplaceVariablesInString(exp[[jmod]][['var_max']], replace_values)) + } else { + mod_var_max <- varmax + } + jsdate <- 1 + while (jsdate <= nsdates) { + replace_values[["START_DATE"]] <- sdates[jsdate] + replace_values[["YEAR"]] <- substr(sdates[jsdate], 1, 4) + replace_values[["MONTH"]] <- substr(sdates[jsdate], 5, 6) + replace_values[["DAY"]] <- substr(sdates[jsdate], 7, 8) + # If the dimensions of the output matrices are still to define, we try to read + # the metadata of the data file that corresponds to the current iteration + if (dims2define) { + if (is_file_per_member_exp[jmod]) { + replace_values[["MEMBER_NUMBER"]] <- '*' + } + # We must build a work piece that will be sent to the .LoadDataFile function + # in 'explore_dims' mode. We will obtain, if success, the dimensions of the + # data in the file. + work_piece <- list(dataset_type = dataset_type, + filename = .ConfigReplaceVariablesInString(exp[[jmod]][['path']], replace_values), + namevar = namevar, grid = grid, remap = remap, remapcells = remapcells, + is_file_per_member = is_file_per_member_exp[jmod], + is_file_per_dataset = FALSE, + lon_limits = c(lonmin, lonmax), + lat_limits = c(latmin, latmax), dimnames = exp[[jmod]][['dimnames']], + single_dataset = single_dataset) + test="found_data" + found_data <- .LoadDataFile(work_piece, explore_dims = TRUE, silent = silent) + + explore_dims=TRUE + + # The purpose, working modes, inputs and outputs of this function are + # explained in ?LoadDataFile + #suppressPackageStartupMessages({library(ncdf4)}) + #suppressPackageStartupMessages({library(bigmemory)}) + #suppressPackageStartupMessages({library(plyr)}) + # Auxiliar function to convert array indices to lineal indices + arrayIndex2VectorIndex <- function(indices, dims) { + if (length(indices) > length(dims)) { + stop("Error: indices do not match dimensions in arrayIndex2VectorIndex.") + } + position <- 1 + dims <- rev(dims) + indices <- rev(indices) + for (i in 1:length(indices)) { + position <- position + (indices[i] - 1) * prod(dims[-c(1:i)]) + } + position + } + + .t2nlatlon <- function(t) { + ## As seen in cdo's griddes.c: ntr2nlat() + nlats <- (t * 3 + 1) / 2 + if ((nlats > 0) && (nlats - trunc(nlats) >= 0.5)) { + nlats <- ceiling(nlats) + } else { + nlats <- round(nlats) + } + if (nlats %% 2 > 0) { + nlats <- nlats + 1 + } + ## As seen in cdo's griddes.c: compNlon(), and as specified in ECMWF + nlons <- 2 * nlats + keep_going <- TRUE + while (keep_going) { + n <- nlons + if (n %% 8 == 0) n <- trunc(n / 8) + while (n %% 6 == 0) n <- trunc(n / 6) + while (n %% 5 == 0) n <- trunc(n / 5) + while (n %% 4 == 0) n <- trunc(n / 4) + while (n %% 3 == 0) n <- trunc(n / 3) + if (n %% 2 == 0) n <- trunc(n / 2) + if (n <= 8) { + keep_going <- FALSE + } else { + nlons <- nlons + 2 + if (nlons > 9999) { + stop("Error: pick another gaussian grid truncation. It doesn't fulfill the standards to apply FFT.") + } + } + } + c(nlats, nlons) + } + + .nlat2t <- function(nlats) { + trunc((nlats * 2 - 1) / 3) + } + + test="1" + found_file <- NULL + dims <- NULL + grid_name <- units <- var_long_name <- is_2d_var <- NULL + + filename <- work_piece[['filename']] + namevar <- work_piece[['namevar']] + output <- work_piece[['output']] + # The names of all data files in the directory of the repository that match + # the pattern are obtained. + if (length(grep("^http", filename)) > 0) { + is_url <- TRUE + files <- filename + ## TODO: Check that the user is not using shell globbing exps. + } else { + is_url <- FALSE + files <- Sys.glob(filename) + } + test="2" + # If we don't find any, we leave the flag 'found_file' with a NULL value. + if (length(files) > 0) { + # The first file that matches the pattern is chosen and read. + filename <- files[length(files)] + filein <- filename + found_file <- filename + mask <- work_piece[['mask']] + + if (!silent) { + if (explore_dims) { + cat(paste("* Exploring dimensions...", filename, '\n')) + } + ##} else { + ## cat(paste("* Reading & processing data...", filename, '\n')) + ##} + } + + # We will fill in 'expected_dims' with the names of the expected dimensions of + # the data array we'll retrieve from the file. + expected_dims <- NULL + remap_needed <- FALSE + # But first we open the file and work out whether the requested variable is 2d + fnc <- nc_open(filein) + if (!(namevar %in% names(fnc$var))) { + stop(paste("Error: The variable", namevar, "is not defined in the file", filename)) + } + var_long_name <- fnc$var[[namevar]]$longname + units <- fnc$var[[namevar]]$units + if (is.null(work_piece[['is_2d_var']])) { + is_2d_var <- all(c(work_piece[['dimnames']][['lon']], + work_piece[['dimnames']][['lat']]) %in% + unlist(lapply(fnc$var[[namevar]][['dim']], + '[[', 'name'))) + } else { + is_2d_var <- work_piece[['is_2d_var']] + } + if ((is_2d_var || work_piece[['is_file_per_dataset']]) && (Sys.which("cdo")[[1]] == "")) { + stop("Error: CDO libraries not available") + } + # If the variable to load is 2-d, we need to determine whether: + # - interpolation is needed + # - subsetting is requested + if (is_2d_var) { + ## We read the longitudes and latitudes from the file. + lon <- ncvar_get(fnc, work_piece[['dimnames']][['lon']]) + lat <- ncvar_get(fnc, work_piece[['dimnames']][['lat']]) + # If a common grid is requested or we are exploring the file dimensions + # we need to read the grid type and size of the file to finally work out the + # CDO grid name. + if (!is.null(work_piece[['grid']]) || explore_dims) { + # Here we read the grid type and its number of longitudes and latitudes + file_info <- system(paste('cdo -s griddes', filein, '2> /dev/null'), intern = TRUE) + grids_positions <- grep('# gridID', file_info) + grids_first_lines <- grids_positions + 2 + grids_last_lines <- c((grids_positions - 2)[-1], length(file_info)) + grids_info <- as.list(1:length(grids_positions)) + grids_info <- lapply(grids_info, function (x) file_info[grids_first_lines[x]:grids_last_lines[x]]) + grids_info <- lapply(grids_info, function (x) gsub(" *", " ", x)) + grids_info <- lapply(grids_info, function (x) gsub("^ | $", "", x)) + grids_info <- lapply(grids_info, function (x) unlist(strsplit(x, " | = "))) + grids_types <- unlist(lapply(grids_info, function (x) x[grep('gridtype', x) + 1])) + grids_matches <- unlist(lapply(grids_info, function (x) { + nlons <- if (length(grep('xsize', x)) > 0) { + as.integer(x[grep('xsize', x) + 1]) + } else { + NA + } + nlats <- if (length(grep('ysize', x)) > 0) { + as.integer(x[grep('ysize', x) + 1]) + } else { + NA + } + if (identical(nlons, length(lon)) && + identical(nlats, length(lat))) { + TRUE + } else { + FALSE + } + })) + grids_matches <- grids_matches[which(grids_types %in% c('gaussian', 'lonlat'))] + grids_info <- grids_info[which(grids_types %in% c('gaussian', 'lonlat'))] + grids_types <- grids_types[which(grids_types %in% c('gaussian', 'lonlat'))] + if (length(grids_matches) == 0) { + stop("Error: Only 'gaussian' and 'lonlat' grids supported. See e.g: cdo sinfo ", filename) + } + if (sum(grids_matches) > 1) { + if ((all(grids_types[which(grids_matches)] == 'gaussian') || + all(grids_types[which(grids_matches)] == 'lonlat')) && + all(unlist(lapply(grids_info[which(grids_matches)], identical, + grids_info[which(grids_matches)][[1]])))) { + grid_type <- grids_types[which(grids_matches)][1] + } else { + stop("Error: Load() can't disambiguate: More than one lonlat/gaussian grids with the same size as the requested variable defined in ", filename) + } + } else { + grid_type <- grids_types[which(grids_matches)] + } + grid_lons <- length(lon) + grid_lats <- length(lat) + # Convert to CDO grid name as seen in cdo's griddes.c: nlat2ntr() + if (grid_type == 'lonlat') { + grid_name <- paste0('r', grid_lons, 'x', grid_lats) + } else { + grid_name <- paste0('t', .nlat2t(grid_lats), 'grid') + } + } + # If a common grid is requested, we will also calculate its size which we will use + # later on. + if (!is.null(work_piece[['grid']])) { + # Now we calculate the common grid type and its lons and lats + if (length(grep('^t\\d{1,+}grid$', work_piece[['grid']])) > 0) { + common_grid_type <- 'gaussian' + common_grid_res <- as.integer(strsplit(work_piece[['grid']], '[^0-9]{1,+}')[[1]][2]) + nlonlat <- .t2nlatlon(common_grid_res) + common_grid_lats <- nlonlat[1] + common_grid_lons <- nlonlat[2] + } else if (length(grep('^r\\d{1,+}x\\d{1,+}$', work_piece[['grid']])) > 0) { + common_grid_type <- 'lonlat' + common_grid_lons <- as.integer(strsplit(work_piece[['grid']], '[^0-9]{1,+}')[[1]][2]) + common_grid_lats <- as.integer(strsplit(work_piece[['grid']], '[^0-9]{1,+}')[[1]][3]) + } else { + stop("Error: Only supported grid types in parameter 'grid' are tgrid and rx") + } + } else { + ## If no 'grid' is specified, there is no common grid. + ## But these variables are filled in for consistency in the code. + common_grid_lons <- length(lon) + common_grid_lats <- length(lat) + } + first_common_grid_lon <- 0 + last_common_grid_lon <- 360 - 360/common_grid_lons + ## This is not true for gaussian grids or for some regular grids, but + ## is a safe estimation + first_common_grid_lat <- -90 + last_common_grid_lat <- 90 + # And finally determine whether interpolation is needed or not + remove_shift <- FALSE + if (!is.null(work_piece[['grid']])) { + if ((grid_lons != common_grid_lons) || + (grid_lats != common_grid_lats) || + (grid_type != common_grid_type) || + ((lon[1] != first_common_grid_lon) + && !work_piece[['single_dataset']])) { + if (grid_lons == common_grid_lons && grid_lats == common_grid_lats && + grid_type == common_grid_type && lon[1] != first_common_grid_lon && + !work_piece[['single_dataset']]) { + remove_shift <- TRUE + } + remap_needed <- TRUE + common_grid_name <- work_piece[['grid']] + } + } else if ((lon[1] != first_common_grid_lon) && explore_dims && + !work_piece[['single_dataset']]) { + remap_needed <- TRUE + common_grid_name <- grid_name + remove_shift <- TRUE + } + if (remove_shift && !explore_dims) { + if (!is.null(work_piece[['progress_amount']])) { + cat("\n") + } + cat(paste0("! Warning: The dataset with index ", + tail(work_piece[['indices']], 1), " in '", + work_piece[['dataset_type']], "' doesn't start at longitude 0 and will be re-interpolated in order to align its longitudes with the standard CDO grids definable with the names 'tgrid' or 'rx', which are by definition starting at the longitude 0.\n")) + if (!is.null(mask)) { + cat(paste0("! Warning: A mask was provided for the dataset with index ", + tail(work_piece[['indices']], 1), " in '", + work_piece[['dataset_type']], "'. This dataset has been re-interpolated to align its longitudes to start at 0. You must re-interpolate the corresponding mask to align its longitudes to start at 0 as well, if you haven't done so yet. Running cdo remapcon,", common_grid_name, " original_mask_file.nc new_mask_file.nc will fix it.\n")) + } + } + # Now calculate if the user requests for a lonlat subset or for the + # entire field + lonmin <- work_piece[['lon_limits']][1] + lonmax <- work_piece[['lon_limits']][2] + latmin <- work_piece[['lat_limits']][1] + latmax <- work_piece[['lat_limits']][2] + lonlat_subsetting_requested <- FALSE + if (lonmin <= lonmax) { + if ((lonmin > first_common_grid_lon) || (lonmax < last_common_grid_lon)) { + lonlat_subsetting_requested <- TRUE + } + } else { + if ((lonmin - lonmax) > 360/common_grid_lons) { + lonlat_subsetting_requested <- TRUE + } else { + gap_width <- floor(lonmin / (360/common_grid_lons)) - + floor(lonmax / (360/common_grid_lons)) + if (gap_width > 0) { + if (!(gap_width == 1 && (lonmin %% (360/common_grid_lons) == 0) && + (lonmax %% (360/common_grid_lons) == 0))) { + lonlat_subsetting_requested <- TRUE + } + } + } + } + if ((latmin > first_common_grid_lat) || (latmax < last_common_grid_lat)) { + lonlat_subsetting_requested <- TRUE + } + test="10" + + # When remap is needed but no subsetting, the file is copied locally + # so that cdo works faster, and then interpolated. + # Otherwise the file is kept as is and the subset will have to be + # interpolated still. + if (!lonlat_subsetting_requested && remap_needed) { + nc_close(fnc) + filecopy <- tempfile(pattern = "load", fileext = ".nc") + file.copy(filein, filecopy) + filein <- tempfile(pattern = "loadRegridded", fileext = ".nc") + system(paste0("cdo -s ", work_piece[['remap']], ",", + common_grid_name, + " -selname,", namevar, " ", filecopy, " ", filein, + " 2>/dev/null", sep = "")) + file.remove(filecopy) + work_piece[['dimnames']][['lon']] <- 'lon' + work_piece[['dimnames']][['lat']] <- 'lat' + fnc <- nc_open(filein) + lon <- ncvar_get(fnc, work_piece[['dimnames']][['lon']]) + lat <- ncvar_get(fnc, work_piece[['dimnames']][['lat']]) + } + + # Read and check also the mask + if (!is.null(mask)) { + ###mask_file <- tempfile(pattern = 'loadMask', fileext = '.nc') + if (is.list(mask)) { + if (!file.exists(mask[['path']])) { + stop(paste("Error: Couldn't find the mask file", mask[['path']])) + } + mask_file <- mask[['path']] + ###file.copy(work_piece[['mask']][['path']], mask_file) + fnc_mask <- nc_open(mask_file) + vars_in_mask <- sapply(fnc_mask$var, '[[', 'name') + if ('nc_var_name' %in% names(mask)) { + if (!(mask[['nc_var_name']] %in% + vars_in_mask)) { + stop(paste("Error: couldn't find variable", mask[['nc_var_name']], + "in the mask file", mask[['path']])) + } + } else { + if (length(vars_in_mask) != 1) { + stop(paste("Error: one and only one non-coordinate variable should be defined in the mask file", + mask[['path']], "if the component 'nc_var_name' is not specified. Currently found: ", + paste(vars_in_mask, collapse = ', '), ".")) + } else { + mask[['nc_var_name']] <- vars_in_mask + } + } + if (sum(fnc_mask$var[[mask[['nc_var_name']]]]$size > 1) != 2) { + stop(paste0("Error: the variable '", + mask[['nc_var_name']], + "' must be defined only over the dimensions '", + work_piece[['dimnames']][['lon']], "' and '", + work_piece[['dimnames']][['lat']], + "' in the mask file ", + mask[['path']])) + } + mask <- ncvar_get(fnc_mask, mask[['nc_var_name']], collapse_degen = TRUE) + nc_close(fnc_mask) + ### mask_lon <- ncvar_get(fnc_mask, work_piece[['dimnames']][['lon']]) + ### mask_lat <- ncvar_get(fnc_mask, work_piece[['dimnames']][['lat']]) + ###} else { + ### dim_longitudes <- ncdim_def(work_piece[['dimnames']][['lon']], "degrees_east", lon) + ### dim_latitudes <- ncdim_def(work_piece[['dimnames']][['lat']], "degrees_north", lat) + ### ncdf_var <- ncvar_def('LSM', "", list(dim_longitudes, dim_latitudes), NA, 'double') + ### fnc_mask <- nc_create(mask_file, list(ncdf_var)) + ### ncvar_put(fnc_mask, ncdf_var, work_piece[['mask']]) + ### nc_close(fnc_mask) + ### fnc_mask <- nc_open(mask_file) + ### work_piece[['mask']] <- list(path = mask_file, nc_var_name = 'LSM') + ### mask_lon <- lon + ### mask_lat <- lat + ###} + ###} + ### Now ready to check that the mask is right + ##if (!(lonlat_subsetting_requested && remap_needed)) { + ### if ((dim(mask)[2] != length(lon)) || (dim(mask)[1] != length(lat))) { + ### stop(paste("Error: the mask of the dataset with index ", tail(work_piece[['indices']], 1), " in '", work_piece[['dataset_type']], "' is wrong. It must be on the common grid if the selected output type is 'lonlat', 'lon' or 'lat', or 'areave' and 'grid' has been specified. It must be on the grid of the corresponding dataset if the selected output type is 'areave' and no 'grid' has been specified. For more information check ?Load and see help on parameters 'grid', 'maskmod' and 'maskobs'.", sep = "")) + ### } + ###if (!(identical(mask_lon, lon) && identical(mask_lat, lat))) { + ### stop(paste0("Error: the longitudes and latitudes in the masks must be identical to the ones in the corresponding data files if output = 'areave' or, if the selected output is 'lon', 'lat' or 'lonlat', the longitudes in the mask file must start by 0 and the latitudes must be ordered from highest to lowest. See\n ", + ### work_piece[['mask']][['path']], " and ", filein)) + ###} + } + } +test='20' + lon_indices <- 1:length(lon) + if (!(lonlat_subsetting_requested && remap_needed)) { + lon[which(lon < 0)] <- lon[which(lon < 0)] + 360 + } + if (lonmax >= lonmin) { + lon_indices <- lon_indices[which(((lon %% 360) >= lonmin) & ((lon %% 360) <= lonmax))] + } else if (!remap_needed) { + lon_indices <- lon_indices[which(((lon %% 360) <= lonmax) | ((lon %% 360) >= lonmin))] + } + lat_indices <- which(lat >= latmin & lat <= latmax) + ## In most of the cases the latitudes are ordered from -90 to 90. + ## We will reorder them to be in the order from 90 to -90, so mostly + ## always the latitudes are reordered. + ## TODO: This could be avoided in future. + if (lat[1] < lat[length(lat)]) { + lat_indices <- lat_indices[length(lat_indices):1] + } + if (!is.null(mask) && !(lonlat_subsetting_requested && remap_needed)) { + if ((dim(mask)[1] != length(lon)) || (dim(mask)[2] != length(lat))) { + stop(paste("Error: the mask of the dataset with index ", tail(work_piece[['indices']], 1), " in '", work_piece[['dataset_type']], "' is wrong. It must be on the common grid if the selected output type is 'lonlat', 'lon' or 'lat', or 'areave' and 'grid' has been specified. It must be on the grid of the corresponding dataset if the selected output type is 'areave' and no 'grid' has been specified. For more information check ?Load and see help on parameters 'grid', 'maskmod' and 'maskobs'.", sep = "")) + } + mask <- mask[lon_indices, lat_indices] + } + ## If the user requests subsetting, we must extend the lon and lat limits if possible + ## so that the interpolation after is done properly + maximum_extra_points <- work_piece[['remapcells']] + if (lonlat_subsetting_requested && remap_needed) { + if ((maximum_extra_points > (head(lon_indices, 1) - 1)) || + (maximum_extra_points > (length(lon) - tail(lon_indices, 1)))) { + ## if the requested number of points goes beyond the left or right + ## sides of the map, we need to take the entire map so that the + ## interpolation works properly + lon_indices <- 1:length(lon) + } else { + extra_points <- min(maximum_extra_points, head(lon_indices, 1) - 1) + if (extra_points > 0) { + lon_indices <- c((head(lon_indices, 1) - extra_points):(head(lon_indices, 1) - 1), lon_indices) + } + extra_points <- min(maximum_extra_points, length(lon) - tail(lon_indices, 1)) + if (extra_points > 0) { + lon_indices <- c(lon_indices, (tail(lon_indices, 1) + 1):(tail(lon_indices, 1) + extra_points)) + } + } + min_lat_ind <- min(lat_indices) + max_lat_ind <- max(lat_indices) + extra_points <- min(maximum_extra_points, min_lat_ind - 1) + if (extra_points > 0) { + if (lat[1] < tail(lat, 1)) { + lat_indices <- c(lat_indices, (min_lat_ind - 1):(min_lat_ind - extra_points)) + } else { + lat_indices <- c((min_lat_ind - extra_points):(min_lat_ind - 1), lat_indices) + } + } + extra_points <- min(maximum_extra_points, length(lat) - max_lat_ind) + if (extra_points > 0) { + if (lat[1] < tail(lat, 1)) { + lat_indices <- c((max_lat_ind + extra_points):(max_lat_ind + 1), lat_indices) + } else { + lat_indices <- c(lat_indices, (max_lat_ind + 1):(max_lat_ind + extra_points)) + } + } + } + lon <- lon[lon_indices] + lat <- lat[lat_indices] + expected_dims <- c(work_piece[['dimnames']][['lon']], + work_piece[['dimnames']][['lat']]) + } else { + lon <- 0 + lat <- 0 + } + # We keep on filling the expected dimensions + var_dimnames <- unlist(lapply(fnc$var[[namevar]][['dim']], '[[', 'name')) + nmemb <- nltime <- NULL + ## Sometimes CDO renames 'members' dimension to 'lev' + old_members_dimname <- NULL + if (('lev' %in% var_dimnames) && !(work_piece[['dimnames']][['member']] %in% var_dimnames)) { + old_members_dimname <- work_piece[['dimnames']][['member']] + work_piece[['dimnames']][['member']] <- 'lev' + } + if (work_piece[['dimnames']][['member']] %in% var_dimnames) { + nmemb <- fnc$var[[namevar]][['dim']][[match(work_piece[['dimnames']][['member']], var_dimnames)]]$len + expected_dims <- c(expected_dims, work_piece[['dimnames']][['member']]) + } else { + nmemb <- 1 + } + if (length(expected_dims) > 0) { + dim_matches <- match(expected_dims, var_dimnames) + if (any(is.na(dim_matches))) { + if (!is.null(old_members_dimname)) { + expected_dims[which(expected_dims == 'lev')] <- old_members_dimname + } + stop(paste("Error: the expected dimension(s)", + paste(expected_dims[which(is.na(dim_matches))], collapse = ', '), + "were not found in", filename)) + } + time_dimname <- var_dimnames[-dim_matches] + } else { + time_dimname <- var_dimnames + } + if (length(time_dimname) > 0) { + if (length(time_dimname) == 1) { + nltime <- fnc$var[[namevar]][['dim']][[match(time_dimname, var_dimnames)]]$len + expected_dims <- c(expected_dims, time_dimname) + dim_matches <- match(expected_dims, var_dimnames) + } else { + if (!is.null(old_members_dimname)) { + expected_dims[which(expected_dims == 'lev')] <- old_members_dimname + } + stop(paste("Error: the variable", namevar, + "is defined over more dimensions than the expected (", + paste(c(expected_dims, 'time'), collapse = ', '), + "). It could also be that the members dimension in 'dimnames' or in the configuration file is incorrect. If not, it could also be that the members dimension is named incorrectly. In that case, either rename the dimension in the file or adjust Load() to recognize this name with the parameter 'dimnames'. See file", filename)) + } + } else { + nltime <- 1 + } +test='30' + # Now we must retrieve the data from the file, but only the asked indices. + # So we build up the indices to retrieve. + # Longitudes or latitudes have been retrieved already. + if (explore_dims) { + # If we're exploring the file we only want one time step from one member, + # to regrid it and work out the number of longitudes and latitudes. + # We don't need more. + members <- 1 + ltimes_list <- list(c(1)) + } else { + # The data is arranged in the array 'tmp' with the dimensions in a + # common order: + # 1) Longitudes + # 2) Latitudes + # 3) Members (even if is not a file per member experiment) + # 4) Lead-times + if (work_piece[['is_file_per_dataset']]) { + time_indices <- 1:nltime + mons <- strsplit(system(paste('cdo showmon ', filein, + ' 2>/dev/null'), intern = TRUE), split = ' ') + years <- strsplit(system(paste('cdo showyear ', filein, + ' 2>/dev/null'), intern = TRUE), split = ' ') + mons <- as.integer(mons[[1]][which(mons[[1]] != "")]) + years <- as.integer(years[[1]][which(years[[1]] != "")]) + time_indices <- ts(time_indices, start = c(years[1], mons[1]), + end = c(years[length(years)], mons[length(mons)]), + frequency = 12) + ltimes_list <- list() + for (sdate in work_piece[['startdates']]) { + selected_time_indices <- window(time_indices, start = c(as.integer( + substr(sdate, 1, 4)), as.integer(substr(sdate, 5, 6))), + end = c(3000, 12), frequency = 12, extend = TRUE) + selected_time_indices <- selected_time_indices[work_piece[['leadtimes']]] + ltimes_list <- c(ltimes_list, list(selected_time_indices)) + } + } else { + ltimes <- work_piece[['leadtimes']] + #if (work_piece[['dataset_type']] == 'exp') { + ltimes_list <- list(ltimes[which(ltimes <= nltime)]) + #} + } + ## TODO: Put, when reading matrices, this kind of warnings + # if (nmember < nmemb) { + # cat("Warning: + members <- 1:work_piece[['nmember']] + members <- members[which(members <= nmemb)] + } +test='35' + # Now, for each list of leadtimes to load (usually only one list with all leadtimes), + # we'll join the indices and retrieve data + found_disordered_dims <- FALSE + for (ltimes in ltimes_list) { + if (is_2d_var) { + start <- c(min(lon_indices), min(lat_indices)) + end <- c(max(lon_indices), max(lat_indices)) + if (lonlat_subsetting_requested && remap_needed) { + subset_indices <- list(min(lon_indices):max(lon_indices) - min(lon_indices) + 1, + lat_indices - min(lat_indices) + 1) + + + dim_longitudes <- ncdim_def(work_piece[['dimnames']][['lon']], "degrees_east", lon) + dim_latitudes <- ncdim_def(work_piece[['dimnames']][['lat']], "degrees_north", lat) + ncdf_dims <- list(dim_longitudes, dim_latitudes) + } else { + subset_indices <- list(lon_indices - min(lon_indices) + 1, + lat_indices - min(lat_indices) + 1) + + + ncdf_dims <- list() + } + final_dims <- c(length(subset_indices[[1]]), length(subset_indices[[2]]), 1, 1) + } else { + start <- end <- c() + subset_indices <- list() + ncdf_dims <- list() + final_dims <- c(1, 1, 1, 1) + } + test='36' + if (work_piece[['dimnames']][['member']] %in% expected_dims) { + start <- c(start, head(members, 1)) + end <- c(end, tail(members, 1)) + subset_indices <- c(subset_indices, list(members - head(members, 1) + 1)) + dim_members <- ncdim_def(work_piece[['dimnames']][['member']], "", members) + ncdf_dims <- c(ncdf_dims, list(dim_members)) + final_dims[3] <- length(members) + } + if (time_dimname %in% expected_dims) { + if (any(!is.na(ltimes))) { + start <- c(start, head(ltimes[which(!is.na(ltimes))], 1)) + end <- c(end, tail(ltimes[which(!is.na(ltimes))], 1)) + subset_indices <- c(subset_indices, list(ltimes - head(ltimes[which(!is.na(ltimes))], 1) + 1)) + } else { + start <- c(start, NA) + end <- c(end, NA) + subset_indices <- c(subset_indices, list(ltimes)) + } + dim_time <- ncdim_def(time_dimname, "", 1:length(ltimes), unlim = TRUE) + ncdf_dims <- c(ncdf_dims, list(dim_time)) + final_dims[4] <- length(ltimes) + } + count <- end - start + 1 + start <- start[dim_matches] + count <- count[dim_matches] + + subset_indices <- subset_indices[dim_matches] + + test='37' + # Now that we have the indices to retrieve, we retrieve the data + if (prod(final_dims) > 0) { + test='370' + + + tmp <- take(ncvar_get(fnc, namevar, start, count, + collapse_degen = FALSE), + 1:length(subset_indices), subset_indices) + test='371' + + #write.table(tmp, file=paste0(workdir,'/tmp',tmp,'.txt')) + + + # The data is regridded if it corresponds to an atmospheric variable. When + # the chosen output type is 'areave' the data is not regridded to not + # waste computing time unless the user specified a common grid. + if (is_2d_var) { + ###if (!is.null(work_piece[['mask']]) && !(lonlat_subsetting_requested && remap_needed)) { + ### mask <- take(ncvar_get(fnc_mask, work_piece[['mask']][['nc_var_name']], + ### start[dim_matches[1:2]], count[dim_matches[1:2]], + ### collapse_degen = FALSE), 1:2, subset_indices[dim_matches[1:2]]) + ###} + if (lonlat_subsetting_requested && remap_needed) { + filein <- tempfile(pattern = "loadRegridded", fileext = ".nc") + filein2 <- tempfile(pattern = "loadRegridded2", fileext = ".nc") + ncdf_var <- ncvar_def(namevar, "", ncdf_dims[dim_matches], + fnc$var[[namevar]]$missval, + prec = if (fnc$var[[namevar]]$prec == 'int') { + 'integer' + } else { + fnc$var[[namevar]]$prec + }) + test='372' + + nc_close(fnc) + fnc <- nc_create(filein2, list(ncdf_var)) + ncvar_put(fnc, ncdf_var, tmp) + nc_close(fnc) + test='375' + system(paste0("cdo -s -sellonlatbox,", if (lonmin > lonmax) { + "0,360," + } else { + paste0(lonmin, ",", lonmax, ",") + }, latmin, ",", latmax, + " -", work_piece[['remap']], ",", common_grid_name, + " ", filein2, " ", filein, " 2>/dev/null", sep = "")) + file.remove(filein2) + fnc <- nc_open(filein) + lon <- ncvar_get(fnc, 'lon') + lat <- ncvar_get(fnc, 'lat') + test='373' + + ## We read the longitudes and latitudes from the file. + ## In principle cdo should put in order the longitudes + ## and slice them properly unless data is across greenwich + lon[which(lon < 0)] <- lon[which(lon < 0)] + 360 + lon_indices <- 1:length(lon) + if (lonmax < lonmin) { + lon_indices <- lon_indices[which((lon <= lonmax) | (lon >= lonmin))] + } + lat_indices <- 1:length(lat) + ## In principle cdo should put in order the latitudes + if (lat[1] < lat[length(lat)]) { + lat_indices <- length(lat):1 + } + final_dims[c(1, 2)] <- c(length(lon_indices), length(lat_indices)) + subset_indices[[dim_matches[1]]] <- lon_indices + subset_indices[[dim_matches[2]]] <- lat_indices + + tmp <- take(ncvar_get(fnc, namevar, collapse_degen = FALSE), + 1:length(subset_indices), subset_indices) +test='38' + if (!is.null(mask)) { + ## We create a very simple 2d netcdf file that is then interpolated to the common + ## grid to know what are the lons and lats of our slice of data + mask_file <- tempfile(pattern = 'loadMask', fileext = '.nc') + mask_file_remap <- tempfile(pattern = 'loadMask', fileext = '.nc') + dim_longitudes <- ncdim_def(work_piece[['dimnames']][['lon']], "degrees_east", c(0, 360)) + dim_latitudes <- ncdim_def(work_piece[['dimnames']][['lat']], "degrees_north", c(-90, 90)) + ncdf_var <- ncvar_def('LSM', "", list(dim_longitudes, dim_latitudes), NA, 'double') + fnc_mask <- nc_create(mask_file, list(ncdf_var)) + ncvar_put(fnc_mask, ncdf_var, array(rep(0, 4), dim = c(2, 2))) + nc_close(fnc_mask) + system(paste0("cdo -s ", work_piece[['remap']], ",", common_grid_name, + " ", mask_file, " ", mask_file_remap, " 2>/dev/null", sep = "")) + fnc_mask <- nc_open(mask_file_remap) + mask_lons <- ncvar_get(fnc_mask, 'lon') + mask_lats <- ncvar_get(fnc_mask, 'lat') + nc_close(fnc_mask) + file.remove(mask_file, mask_file_remap) + if ((dim(mask)[1] != common_grid_lons) || (dim(mask)[2] != common_grid_lats)) { + stop(paste("Error: the mask of the dataset with index ", tail(work_piece[['indices']], 1), " in '", work_piece[['dataset_type']], "' is wrong. It must be on the common grid if the selected output type is 'lonlat', 'lon' or 'lat', or 'areave' and 'grid' has been specified. It must be on the grid of the corresponding dataset if the selected output type is 'areave' and no 'grid' has been specified. For more information check ?Load and see help on parameters 'grid', 'maskmod' and 'maskobs'.", sep = "")) + } + mask_lons[which(mask_lons < 0)] <- mask_lons[which(mask_lons < 0)] + 360 + if (lonmax >= lonmin) { + mask_lon_indices <- which((mask_lons >= lonmin) & (mask_lons <= lonmax)) + } else { + mask_lon_indices <- which((mask_lons >= lonmin) | (mask_lons <= lonmax)) + } + mask_lat_indices <- which((mask_lats >= latmin) & (mask_lats <= latmax)) + if (lat[1] < lat[length(lat)]) { + mask_lat_indices <- mask_lat_indices[length(mask_lat_indices):1] + } + mask <- mask[mask_lon_indices, mask_lat_indices] + } + lon <- lon[lon_indices] + lat <- lat[lat_indices] + ### nc_close(fnc_mask) + ### system(paste0("cdo -s -sellonlatbox,", if (lonmin > lonmax) { + ### "0,360," + ### } else { + ### paste0(lonmin, ",", lonmax, ",") + ### }, latmin, ",", latmax, + ### " -", work_piece[['remap']], ",", common_grid_name, + ###This is wrong: same files + ### " ", mask_file, " ", mask_file, " 2>/dev/null", sep = "")) + ### fnc_mask <- nc_open(mask_file) + ### mask <- take(ncvar_get(fnc_mask, work_piece[['mask']][['nc_var_name']], + ### collapse_degen = FALSE), 1:2, subset_indices[dim_matches[1:2]]) + ###} + } + } + if (!all(dim_matches == sort(dim_matches))) { + if (!found_disordered_dims && rev(work_piece[['indices']])[2] == 1 && rev(work_piece[['indices']])[3] == 1) { + found_disordered_dims <- TRUE + cat(paste0("! Warning: the dimensions for the variable ", namevar, " in the files of the experiment with index ", tail(work_piece[['indices']], 1), " are not in the optimal order for loading with Load(). The optimal order would be '", paste(expected_dims, collapse = ', '), "'. One of the files of the dataset is stored in ", filename)) + } + tmp <- aperm(tmp, dim_matches) + } + dim(tmp) <- final_dims + # If we are exploring the file we don't need to process and arrange + # the retrieved data. We only need to keep the dimension sizes. + if (explore_dims) { + if (work_piece[['is_file_per_member']]) { + ## TODO: When the exp_full_path contains asterisks and is file_per_member + ## members from different datasets may be accounted. + ## Also if one file member is missing the accounting will be wrong. + ## Should parse the file name and extract number of members. + if (is_url) { + nmemb <- NULL + } else { + nmemb <- length(files) + } + } + dims <- list(member = nmemb, time = nltime, lon = lon, lat = lat) + } else { + test='39' + # If we are not exploring, then we have to process the retrieved data + if (is_2d_var) { + tmp <- apply(tmp, c(3, 4), function(x) { + # Disable of large values. + if (!is.na(work_piece[['var_limits']][2])) { + x[which(x > work_piece[['var_limits']][2])] <- NA + } + if (!is.na(work_piece[['var_limits']][1])) { + x[which(x < work_piece[['var_limits']][1])] <- NA + } + if (!is.null(mask)) { + x[which(mask < 0.5)] <- NA + } + + if (output == 'areave' || output == 'lon') { + weights <- InsertDim(cos(lat * pi / 180), 1, length(lon)) + weights[which(is.na(x))] <- NA + if (output == 'areave') { + weights <- weights / mean(weights, na.rm = TRUE) + mean(x * weights, na.rm = TRUE) + } else { + weights <- weights / InsertDim(Mean1Dim(weights, 2, narm = TRUE), 2, length(lat)) + Mean1Dim(x * weights, 2, narm = TRUE) + } + } else if (output == 'lat') { + Mean1Dim(x, 1, narm = TRUE) + } else if (output == 'lonlat') { + signif(x, 5) + } + }) + if (output == 'areave') { + dim(tmp) <- c(1, 1, final_dims[3:4]) + } else if (output == 'lon') { + dim(tmp) <- c(final_dims[1], 1, final_dims[3:4]) + } else if (output == 'lat') { + dim(tmp) <- c(1, final_dims[c(2, 3, 4)]) + } else if (output == 'lonlat') { + dim(tmp) <- final_dims + } + } + var_data <- attach.big.matrix(work_piece[['out_pointer']]) + if (work_piece[['dims']][['member']] > 1 && nmemb > 1 && + work_piece[['dims']][['time']] > 1 && + nltime < work_piece[['dims']][['time']]) { + work_piece[['indices']][2] <- work_piece[['indices']][2] - 1 + for (jmemb in members) { + work_piece[['indices']][2] <- work_piece[['indices']][2] + 1 + out_position <- arrayIndex2VectorIndex(work_piece[['indices']], work_piece[['dims']]) + out_indices <- out_position:(out_position + length(tmp[, , jmemb, ]) - 1) + var_data[out_indices] <- as.vector(tmp[, , jmemb, ]) + } + work_piece[['indices']][2] <- work_piece[['indices']][2] - tail(members, 1) + 1 + } else { + out_position <- arrayIndex2VectorIndex(work_piece[['indices']], work_piece[['dims']]) + out_indices <- out_position:(out_position + length(tmp) - 1) + a <- aperm(tmp, c(1, 2, 4, 3)) + as.vector(a) + var_data[out_indices] <- as.vector(aperm(tmp, c(1, 2, 4, 3))) + } + work_piece[['indices']][3] <- work_piece[['indices']][3] + 1 + } + } + } + nc_close(fnc) + if (is_2d_var && remap_needed) { + file.remove(filein) + ###if (!is.null(mask) && lonlat_subsetting_requested) { + ### file.remove(mask_file) + ###} + } + test='40' + } + if (explore_dims) { + found_data <- list(dims = dims, is_2d_var = is_2d_var, grid = grid_name, units = units, + var_long_name = var_long_name) + } else { + ###if (!silent && !is.null(progress_connection) && !is.null(work_piece[['progress_amount']])) { + ### foobar <- writeBin(work_piece[['progress_amount']], progress_connection) + ###} + if (!silent && !is.null(work_piece[['progress_amount']])) { + cat(paste0(work_piece[['progress_amount']])) + } + found_file + } + + + + + + + + + + + + + + + + + + + + + + + + + found_dims <- found_data$dims + var_long_name <- found_data$var_long_name + units <- found_data$units + if (!is.null(found_dims)) { + is_2d_var <- found_data$is_2d_var + if (!is_2d_var && (output != 'areave')) { + cat(paste("! Warning: '", output, "' output format not allowed when loading global mean variables. Forcing to 'areave'.\n", + sep = '')) + output <- 'areave' + } + if (output != 'areave' && is.null(grid)) { + grid <- found_data$grid + } + if (is.null(nmember)) { + if (is.null(found_dims[['member']])) { + cat("! Warning: loading data from a server but 'nmember' not specified. Loading only one member.\n") + nmember <- rep(1, nmod) + } else { + nmember <- rep(found_dims[['member']], nmod) + } + } + + if (is.null(nleadtime)) { + nleadtime <- found_dims[['time']] + } + if (is.null(leadtimemax)) { + leadtimemax <- nleadtime + } else if (leadtimemax > nleadtime) { + stop("Error: 'leadtimemax' argument is greater than the number of loaded leadtimes. Put first the experiment with the greatest number of leadtimes or adjust properly the parameters 'nleadtime' and 'leadtimemax'.") + } + + leadtimes <- seq(leadtimemin, leadtimemax, sampleperiod) + latitudes <- found_dims[['lat']] + longitudes <- found_dims[['lon']] + + if (output == 'lon' || output == 'lonlat') { + dim_exp[['lon']] <- length(longitudes) + } + if (output == 'lat' || output == 'lonlat') { + dim_exp[['lat']] <- length(latitudes) + } + dim_exp[['time']] <- length(leadtimes) + dim_exp[['member']] <- max(nmember) + dim_exp[['sdate']] <- nsdates + dim_exp[['dataset']] <- nmod + dims2define <- FALSE + } + } + + # We keep on iterating through members to build all the work pieces. + if (is_file_per_member_exp[jmod]) { + jmember <- 1 + while (jmember <= nmember[jmod]) { + replace_values[["MEMBER_NUMBER"]] <- sprintf(paste("%.", (nmember[jmod] %/% 10) + 1, "i", sep = ''), jmember - 1) + work_piece <- list(filename = .ConfigReplaceVariablesInString(exp[[jmod]][['path']], replace_values), + namevar = namevar, indices = c(1, jmember, jsdate, jmod), + nmember = nmember[jmod], leadtimes = leadtimes, mask = maskmod[[jmod]], + is_file_per_dataset = FALSE, dimnames = exp[[jmod]][['dimnames']], + var_limits = c(mod_var_min, mod_var_max), remapcells = remapcells) + exp_work_pieces <- c(exp_work_pieces, list(work_piece)) + jmember <- jmember + 1 + } + } else { + work_piece <- list(filename = .ConfigReplaceVariablesInString(exp[[jmod]][['path']], replace_values), + namevar = namevar, indices = c(1, 1, jsdate, jmod), + nmember = nmember[jmod], leadtimes = leadtimes, mask = maskmod[[jmod]], + is_file_per_dataset = FALSE, dimnames = exp[[jmod]][['dimnames']], + var_limits = c(mod_var_min, mod_var_max), remapcells = remapcells) + exp_work_pieces <- c(exp_work_pieces, list(work_piece)) + } + jsdate <- jsdate + 1 + } + jmod <- jmod + 1 + } + if (dims2define && length(exp) > 0) { + cat("! Warning: no data found in file system for any experimental dataset.\n") + } + + dims <- dim_exp[na.omit(match(c('dataset', 'member', 'sdate', 'time', 'lat', 'lon'), names(dim_exp)))] + if (is.null(dims[['member']]) || any(is.na(unlist(dims))) || any(unlist(dims) == 0)) { + dims <- 0 + dim_exp <- NULL + } + if (!silent) { + message <- "* Success. Detected dimensions of experimental data: " + cat(paste0(message, paste(unlist(dims), collapse = ', '), '\n')) + cat("* Fetching first observational files to work out 'var_obs' size...\n") + } + + + # If there are no experiments to load we need to choose a number of time steps + # to load from observational datasets. We load from the first start date to + # the current date. + if (is.null(exp) || dims == 0) { + if (is.null(leadtimemax)) { + cat("! Warning: loading observations only and no 'leadtimemax' specified. Data will be loaded from each starting date to current time.\n") + diff <- Sys.time() - as.POSIXct(paste(substr(sdates[1], 1, 4), '-', + substr(sdates[1], 5, 6), '-', substr(sdates[1], 7, 8), sep='')) + if (storefreq == 'monthly') { + leadtimemax <- as.integer(diff/30) + } else { + leadtimemax <- as.integer(diff) + } + } + if (is.null(nleadtime)) { + nleadtime <- leadtimemax + } + leadtimes <- seq(leadtimemin, leadtimemax, sampleperiod) + } + + # Now we start iterating over observations. We try to find the output matrix + # dimensions and we build anyway the work pieces corresponding to the observational + # data that time-corresponds the experimental data or the time-steps until the + # current date if no experimental datasets were specified. + dataset_type <- 'obs' + dim_obs <- NULL + dims2define <- TRUE + lat_indices <- lon_indices <- NULL + obs_work_pieces <- list() + is_file_per_dataset_obs <- rep(FALSE, nobs) + is_file_per_member_obs <- rep(FALSE, nobs) + jobs <- 1 + while (jobs <= nobs) { + tags_to_find <- c('MONTH', 'DAY', 'YEAR', 'MEMBER_NUMBER') + position_of_tags <- na.omit(match(tags_to_find, names(replace_values))) + if (length(position_of_tags) > 0) { + quasi_final_path <- .ConfigReplaceVariablesInString(obs[[jobs]][['path']], + replace_values[-position_of_tags], TRUE) + } else { + quasi_final_path <- .ConfigReplaceVariablesInString(obs[[jobs]][['path']], + replace_values, TRUE) + } + is_file_per_dataset_obs[jobs] <- !any(sapply(c("$MONTH$", "$DAY$", "$YEAR$"), + grepl, quasi_final_path, fixed = TRUE)) + is_file_per_member_obs[jobs] <- grepl("$MEMBER_NUMBER$", quasi_final_path, fixed = TRUE) + replace_values[["OBS_NAME"]] <- obs[[jobs]][['name']] + replace_values[["NC_VAR_NAME"]] <- obs[[jobs]][['nc_var_name']] + namevar <- .ConfigReplaceVariablesInString(obs[[jobs]][['nc_var_name']], replace_values) + replace_values[["SUFFIX"]] <- obs[[jobs]][['suffix']] + if (is.null(varmin)) { + obs_var_min <- as.numeric(.ConfigReplaceVariablesInString(obs[[jobs]][['var_min']], replace_values)) + } else { + obs_var_min <- varmin + } + if (is.null(varmax)) { + obs_var_max <- as.numeric(.ConfigReplaceVariablesInString(obs[[jobs]][['var_max']], replace_values)) + } else { + obs_var_max <- varmax + } + # This file format (file per whole dataset) is only supported in observations. + # However a file per whole dataset experiment could be seen as a file per + # member/ensemble experiment with a single start date, so still loadable. + # Nonetheless file per whole dataset observational files do not need to contain + # a year and month in the filename, the time correspondance relies on the + # month and years associated to each timestep inside the NetCDF file. + # So file per whole dataset experiments need to have a start date in the filename. + if (is_file_per_dataset_obs[jobs]) { + ## TODO: Open file-per-dataset-files only once. + if (dims2define) { + work_piece <- list(dataset_type = dataset_type, + filename = .ConfigReplaceVariablesInString(obs[[jobs]][['path']], replace_values), + namevar = namevar, grid = grid, remap = remap, remapcells = remapcells, + is_file_per_member = is_file_per_member_obs[jobs], + is_file_per_dataset = is_file_per_dataset_obs[jobs], + lon_limits = c(lonmin, lonmax), + lat_limits = c(latmin, latmax), dimnames = obs[[jobs]][['dimnames']], + single_dataset = single_dataset) + found_data <- .LoadDataFile(work_piece, explore_dims = TRUE, silent = silent) + found_dims <- found_data$dims + var_long_name <- found_data$var_long_name + units <- found_data$units + if (!is.null(found_dims)) { + is_2d_var <- found_data$is_2d_var + if (!is_2d_var && (output != 'areave')) { + cat(paste("! Warning: '", output, "' output format not allowed when loading global mean variables. Forcing to 'areave'.\n", + sep = '')) + output <- 'areave' + } + if (output != 'areave' && is.null(grid)) { + grid <- found_data$grid + } + if (is.null(nmemberobs)) { + if (is.null(found_dims[['member']])) { + cat("! Warning: loading observational data from a server but 'nmemberobs' not specified. Loading only one member.\n") + nmemberobs <- rep(1, nobs) + } else { + nmemberobs <- rep(found_dims[['member']], nobs) + } + } + if (is.null(dim_exp)) { + longitudes <- found_dims[['lon']] + latitudes <- found_dims[['lat']] + } + + if (output == 'lon' || output == 'lonlat') { + dim_obs[['lon']] <- length(longitudes) + } + if (output == 'lat' || output == 'lonlat') { + dim_obs[['lat']] <- length(latitudes) + } + dim_obs[['time']] <- length(leadtimes) + dim_obs[['member']] <- max(nmemberobs) + dim_obs[['sdate']] <- nsdates + dim_obs[['dataset']] <- nobs + dims2define <- FALSE + } + } + work_piece <- list(filename = .ConfigReplaceVariablesInString(obs[[jobs]][['path']], replace_values), + namevar = namevar, indices = c(1, 1, 1, jobs), + nmember = nmemberobs[jobs], + mask = maskobs[[jobs]], leadtimes = leadtimes, + is_file_per_dataset = is_file_per_dataset_obs[jobs], + startdates = sdates, dimnames = obs[[jobs]][['dimnames']], + var_limits = c(obs_var_min, obs_var_max), remapcells = remapcells) + obs_work_pieces <- c(obs_work_pieces, list(work_piece)) + } else { + jsdate <- 1 + while (jsdate <= nsdates) { + replace_values[["START_DATE"]] <- sdates[jsdate] + sdate <- sdates[jsdate] + + if (storefreq == 'daily') { + day <- substr(sdate, 7, 8) + if (day == '') { + day <- '01' + } + day <- as.integer(day) + startdate <- as.POSIXct(paste(substr(sdate, 1, 4), '-', + substr(sdate, 5, 6), '-', day, ' 12:00:00', sep = '')) + + (leadtimemin - 1) * 86400 + year <- as.integer(substr(startdate, 1, 4)) + month <- as.integer(substr(startdate, 6, 7)) + } else { + month <- (as.integer(substr(sdate, 5, 6)) + leadtimemin - 2) %% 12 + 1 + year <- as.integer(substr(sdate, 1, 4)) + (as.integer(substr(sdate, + 5, 6)) + leadtimemin - 2) %/% 12 + } + jleadtime <- 1 + while (jleadtime <= length(leadtimes)) { + replace_values[["YEAR"]] <- paste(year, '', sep = '') + replace_values[["MONTH"]] <- sprintf("%2.2i", month) + if (storefreq == 'daily') { + replace_values[["DAY"]] <- sprintf("%2.2i", day) + days_in_month <- ifelse(LeapYear(year), 29, 28) + days_in_month <- switch(paste(month, '', sep = ''), '1' = 31, + '3' = 31, '4' = 30, '5' = 31, '6' = 30, + '7' = 31, '8' = 31, '9' = 30, '10' = 31, + '11' = 30, '12' = 31, days_in_month) + ## This condition must be fulfilled to put all the month time steps + ## in the dimension of length nleadtimes. Otherwise it must be cut: + #(length(leadtimes) - 1)*sampleperiod + 1 - (jleadtime - 1)*sampleperiod >= days_in_month - day + 1 + obs_file_indices <- seq(day, min(days_in_month, (length(leadtimes) - jleadtime) * sampleperiod + day), sampleperiod) + } else { + obs_file_indices <- 1 + } + if (dims2define) { + if (is_file_per_member_obs[jobs]) { + replace_values[["MEMBER_NUMBER"]] <- '*' + } + work_piece <- list(dataset_type = dataset_type, + filename = .ConfigReplaceVariablesInString(obs[[jobs]][['path']], replace_values), + namevar = namevar, grid = grid, remap = remap, remapcells = remapcells, + is_file_per_member = is_file_per_member_obs[jobs], + is_file_per_dataset = is_file_per_dataset_obs[jobs], + lon_limits = c(lonmin, lonmax), + lat_limits = c(latmin, latmax), + dimnames = obs[[jobs]][['dimnames']], single_dataset = single_dataset) + found_data <- .LoadDataFile(work_piece, explore_dims = TRUE, silent = silent) + found_dims <- found_data$dims + var_long_name <- found_data$var_long_name + units <- found_data$units + if (!is.null(found_dims)) { + is_2d_var <- found_data$is_2d_var + if (!is_2d_var && (output != 'areave')) { + cat(paste("! Warning: '", output, "' output format not allowed when loading global mean variables. Forcing to 'areave'\n.", + sep = '')) + output <- 'areave' + } + if (output != 'areave' && is.null(grid)) { + grid <- found_data$grid + } + if (is.null(nmemberobs)) { + if (is.null(found_dims[['member']])) { + cat("! Warning: loading observational data from a server but 'nmemberobs' not specified. Loading only one member.\n") + nmemberobs <- rep(1, nobs) + } else { + nmemberobs <- rep(found_dims[['member']], nobs) + } + } + if (is.null(dim_exp)) { + longitudes <- found_dims[['lon']] + latitudes <- found_dims[['lat']] + } + + if (output == 'lon' || output == 'lonlat') { + dim_obs[['lon']] <- length(longitudes) + } + if (output == 'lat' || output == 'lonlat') { + dim_obs[['lat']] <- length(latitudes) + } + dim_obs[['time']] <- length(leadtimes) + dim_obs[['member']] <- max(nmemberobs) + dim_obs[['sdate']] <- nsdates + dim_obs[['dataset']] <- nobs + dims2define <- FALSE + } + } + if (is_file_per_member_obs[jobs]) { + jmember <- 1 + while (jmember <= nmemberobs[jobs]) { + replace_values[["MEMBER_NUMBER"]] <- sprintf(paste("%.", (nmemberobs[jobs] %/% 10) + 1, "i", sep = ''), jmember - 1) + work_piece <- list(filename = .ConfigReplaceVariablesInString(obs[[jobs]][['path']], replace_values), + namevar = namevar, indices = c(jleadtime, jmember, jsdate, jobs), + nmember = nmemberobs[jobs], leadtimes = obs_file_indices, + mask = maskobs[[jobs]], dimnames = obs[[jobs]][['dimnames']], + is_file_per_dataset = is_file_per_dataset_obs[jobs], + var_limits = c(obs_var_min, obs_var_max), remapcells = remapcells) + obs_work_pieces <- c(obs_work_pieces, list(work_piece)) + jmember <- jmember + 1 + } + } else { + work_piece <- list(filename = .ConfigReplaceVariablesInString(obs[[jobs]][['path']], replace_values), + namevar = namevar, indices = c(jleadtime, 1, jsdate, jobs), + nmember = nmemberobs[jobs], leadtimes = obs_file_indices, + mask = maskobs[[jobs]], dimnames = obs[[jobs]][['dimnames']], + is_file_per_dataset = is_file_per_dataset_obs[jobs], + var_limits = c(obs_var_min, obs_var_max), remapcells) + obs_work_pieces <- c(obs_work_pieces, list(work_piece)) + } + + if (storefreq == 'daily') { + startdate <- startdate + 86400 * sampleperiod * length(obs_file_indices) + year <- as.integer(substr(startdate, 1, 4)) + month <- as.integer(substr(startdate, 6, 7)) + day <- as.integer(substr(startdate, 9, 10)) + } else { + month <- month + sampleperiod + year <- year + (month - 1) %/% 12 + month <- (month - 1) %% 12 + 1 + } + jleadtime <- jleadtime + length(obs_file_indices) + } + + jsdate <- jsdate + 1 + } + } + jobs <- jobs + 1 + } + if (dims2define && length(obs) > 0) { + cat("! Warning: no data found in file system for any observational dataset.\n") + } + dims <- dim_obs[na.omit(match(c('dataset', 'member', 'sdate', 'time', 'lat', 'lon'), names(dim_obs)))] + if (is.null(dims[['member']]) || any(is.na(unlist(dims))) || any(unlist(dims) == 0)) { + dims <- 0 + dim_obs <- NULL + } + if (!silent) { + message <- "* Success. Detected dimensions of observational data: " + cat(paste0(message, paste(unlist(dims), collapse = ', '), '\n')) + } + + if (!(is.null(dim_obs) && is.null(dim_exp))) { + + # We build two matrices in shared memory for the parallel processes to + # store their results + # These matrices will contain data arranged with the following + # dimension order, to maintain data spacial locality during the + # parallel fetch: + # longitudes, latitudes, leadtimes, members, startdates, nmod/nobs + # So [1, 1, 1, 1, 1, 1] will be next to [2, 1, 1, 1, 1, 1] in memory + pointer_var_exp <- pointer_var_obs <- NULL + if (!is.null(dim_exp) && (length(unlist(dim_exp)) == length(dim_exp)) && + !any(is.na(unlist(dim_exp))) && !any(unlist(dim_exp) == 0)) { + var_exp <- big.matrix(nrow = prod(unlist(dim_exp)), ncol = 1) + pointer_var_exp <- describe(var_exp) + } + if (!is.null(dim_obs) && (length(unlist(dim_obs)) == length(dim_obs)) && + !any(is.na(unlist(dim_obs))) && !any(unlist(dim_obs) == 0)) { + var_obs <- big.matrix(nrow = prod(unlist(dim_obs)), ncol = 1) + pointer_var_obs <- describe(var_obs) + } + if (is.null(nprocs)) { + nprocs <- detectCores() + } + # We calculate the % of total progress that each work piece represents so + # that progress bar can be updated properly + exp_work_piece_percent <- prod(dim_exp) / (prod(dim_obs) + prod(dim_exp)) + obs_work_piece_percent <- prod(dim_obs) / (prod(dim_obs) + prod(dim_exp)) + # Add some important extra fields in the work pieces before sending + exp_work_pieces <- lapply(exp_work_pieces, function (x) c(x, list(dataset_type = 'exp', dims = dim_exp, out_pointer = pointer_var_exp)))###, progress_amount = exp_work_piece_progress))) + obs_work_pieces <- lapply(obs_work_pieces, function (x) c(x, list(dataset_type = 'obs', dims = dim_obs, out_pointer = pointer_var_obs)))###, progress_amount = obs_work_piece_progress))) + work_pieces <- c(exp_work_pieces, obs_work_pieces) + # Calculate the progress %s that will be displayed and assign them to the + # appropriate work pieces + if (length(work_pieces)/nprocs >= 2 && !silent) { + if (length(work_pieces)/nprocs < 10) { + amount <- 100/ceiling(length(work_pieces)/nprocs) + reps <- ceiling(length(work_pieces)/nprocs) + } else { + amount <- 10 + reps <- 10 + } + progress_steps <- rep(amount, reps) + if (length(exp_work_pieces) == 0) { + selected_exp_pieces <- c() + } else if (length(exp_work_pieces) < floor(reps*exp_work_piece_percent) + 1) { + selected_exp_pieces <- length(exp_work_pieces) + progress_steps <- c(sum(head(progress_steps, + floor(reps*exp_work_piece_percent))), + tail(progress_steps, + ceiling(reps*obs_work_piece_percent))) + } else { + selected_exp_pieces <- round(seq(1, length(exp_work_pieces), + length.out = floor(reps*exp_work_piece_percent) + 1))[-1] + } + if (length(obs_work_pieces) == 0) { + selected_obs_pieces <- c() + } else if (length(obs_work_pieces) < ceiling(reps*obs_work_piece_percent) + 1) { + selected_obs_pieces <- length(obs_work_pieces) + progress_steps <- c(head(progress_steps, + floor(reps*exp_work_piece_percent)), + sum(tail(progress_steps, + ceiling(reps*obs_work_piece_percent)))) + } else { + selected_obs_pieces <- round(seq(1, length(obs_work_pieces), + length.out = ceiling(reps*obs_work_piece_percent) + 1))[-1] + } + selected_pieces <- c(selected_exp_pieces, selected_obs_pieces + length(exp_work_pieces)) + progress_steps <- paste0(' + ', round(progress_steps, 2), '%') + progress_message <- '* Progress: 0%' + } else { + progress_message <- '' + selected_pieces <- NULL + } + piece_counter <- 1 + step_counter <- 1 + work_pieces <- lapply(work_pieces, + function (x) { + wp <- c(x, list(is_2d_var = is_2d_var, grid = grid, remap = remap, + lon_limits = c(lonmin, lonmax), + lat_limits = c(latmin, latmax), + output = output, remapcells = remapcells, + single_dataset = single_dataset)) + if (piece_counter %in% selected_pieces) { + wp <- c(wp, list(progress_amount = progress_steps[step_counter])) + step_counter <<- step_counter + 1 + } + piece_counter <<- piece_counter + 1 + wp + }) + if (!silent) { + cat(paste("* Will now proceed to read and process ", length(work_pieces), " data files:\n", sep = '')) + if (length(work_pieces) < 30) { + lapply(work_pieces, function (x) cat(paste("* ", x[['filename']], '\n', sep = ''))) + } else { + cat(paste("* The list of files is long. You can check it after Load() finishes in the output '$source_files'.\n")) + } + if (length(dim_obs) == 0) { + bytes_obs <- 0 + obs_dim_sizes <- '0' + } else { + bytes_obs <- prod(c(dim_obs, 8)) + obs_dim_sizes <- paste(na.omit(as.vector(dim_obs[c('dataset', 'member', 'sdate', 'time', 'lat', 'lon')])), collapse = ' x ') + } + if (length(dim_exp) == 0) { + bytes_exp <- 0 + exp_dim_sizes <- '0' + } else { + bytes_exp <- prod(c(dim_exp, 8)) + exp_dim_sizes <- paste(na.omit(as.vector(dim_exp[c('dataset', 'member', 'sdate', 'time', 'lat', 'lon')])), collapse = ' x ') + } + cat(paste("* Total size of requested data: ", bytes_obs + bytes_exp, "bytes.\n")) + cat(paste("* - Experimental data: (", exp_dim_sizes, ") x 8 bytes =", bytes_exp, "bytes.\n")) + cat(paste("* - Observational data: (", obs_dim_sizes, ") x 8 bytes =", bytes_obs, "bytes.\n")) + cat(paste("* If size of requested data is close to or above the free shared RAM memory, R will crash.\n")) + } + # Build the cluster of processes that will do the work and dispatch work pieces. + # The function .LoadDataFile is applied to each work package. This function will + # open the data file, regrid if needed, trim (select time steps, longitudes, + # latitudes, members), apply the mask, compute and apply the weights if needed, + # disable extreme values and store in the shared memory matrices. + if (nprocs == 1) { + found_files <- lapply(work_pieces, .LoadDataFile, silent = silent) + } else { + cluster <- makeCluster(nprocs, outfile = "") + # Open connections to keep track of progress + ###range_progress_ports <- c(49000, 49999) + ###progress_ports <- as.list(sample(range_progress_ports[2] - range_progress_ports[1], nprocs) + range_progress_ports[1]) + + # Open from master side + ###connection_set_up_job <- mcparallel({ + ### progress_connections <- vector('list', length(progress_ports)) + ### for (connection in 1:length(progress_ports)) { + ### attempts <- 0 + ### max_attempts <- 3 + ### while (is.null(progress_connections[[connection]]) && attempts < max_attempts) { + ### Sys.sleep(2) + ### suppressWarnings({ + ### progress_connections[[connection]] <- try({ + ### socketConnection(port = progress_ports[[connection]], open = 'w+b') + ### }, silent = TRUE) + ### }) + ### if (!('sockconn' %in% class(progress_connections[[connection]]))) { + ### progress_connections[[connection]] <- NULL + ### } + ### attempts <- attempts + 1 + ### } + ### } + + # And start polling the sockets and update progress bar + ### if (!any( lapply is.null!!! is.null(progress_connections))) { + ### progress <- 0.0 + ### pb <- txtProgressBar(0, 1, style = 3) + ### stop_polling <- FALSE + ### attempts <- 0 + ### max_attempts <- 3 + ### while (progress < 0.999 && !stop_polling) { + ### Sys.sleep(3) + ### progress_obtained <- lapply(progress_connections, function(x) as.numeric(readBin(x, 'double'))) + ### total_progress_obtained <- sum(unlist(progress_obtained)) + ### if (total_progress_obtained > 0) { + ### progress <- progress + total_progress_obtained + ### setTxtProgressBar(pb, progress) + ### attempts <- 0 + ### } else { + ### attempts <- attempts + 1 + ### if (attempts >= max_attempts) { + ### stop_polling <- TRUE + ### } + ### } + ### } + ### } + ###}) + + # Open from the workers side + ###open_connections <- clusterApply(cluster, progress_ports, + ### function (x) { + ### progress_connection <<- NULL + ### progress_connection <<- try({ + ### socketConnection(server = TRUE, port = x, open = 'w+b') + ### }) + ### if ('sockconn' %in% class(progress_connection)) { + ### TRUE + ### } else { + ### progress_connection <<- NULL + ### FALSE + ### } + ### }) + + ###if (!all(unlist(open_connections))) { + ### if (!silent) { + ### cat(paste("! Warning: failed to open connections in ports", process_track_ports[1], "to", process_track_ports[2], "to keep track of progress. Progress bar will not be displayed\n")) + ### } + ###} + + if (!silent) { + cat(paste("* Loading... This may take several minutes...\n", sep = '')) + cat(progress_message) + } + # Send the heavy work to the workers + work_errors <- try({ + found_files <- clusterApplyLB(cluster, work_pieces, .LoadDataFile, silent = silent) + }) + stopCluster(cluster) + } + if (!silent) { + if (progress_message != '') { + cat("\n") + } + if (any(unlist(lapply(found_files, is.null)))) { + if (sum(unlist(lapply(found_files, is.null))) < 30) { + cat("! WARNING: The following files were not found in the file system. Filling with NA values instead.\n") + lapply(work_pieces[which(unlist(lapply(found_files, is.null)))], function (x) cat(paste("* ", x[['filename']], '\n', sep = ''))) + } else { + cat("! WARNING: Some files were not found in the file system. The list is long. You can check it in the output '$not_found_files'. Filling with NA values instead.\n") + } + } + } + source_files <- unlist(found_files[which(!unlist(lapply(found_files, is.null)))]) + not_found_files <- unlist(lapply(work_pieces[which(unlist(lapply(found_files, is.null)))], '[[', 'filename')) + + } else { + error_message <- "Error: No found files for any dataset. Check carefully the file patterns and correct either the pattern or the provided parameters:\n" + if (!is.null(exp)) { + lapply(exp, function (x) error_message <<- paste0(error_message, paste0(x[['path']], '\n'))) + } + if (!is.null(obs)) { + lapply(obs, function (x) error_message <<- paste0(error_message, paste0(x[['path']], '\n'))) + } + stop(error_message) + } + + }) + + if (class(errors) == 'try-error') { + invisible(list(load_parameters = load_parameters)) + } else { + variable <- list() + variable[['varName']] <- var + variable[['level']] <- NULL + attr(variable, 'is_standard') <- FALSE + attr(variable, 'units') <- units + attr(variable, 'longname') <- var_long_name + attr(variable, 'daily_agg_cellfun') <- 'none' + attr(variable, 'monthly_agg_cellfun') <- 'none' + attr(variable, 'verification_time') <- 'none' + + if (is.null(var_exp)) { + mod_data <- NULL + } else { + dim_reorder <- length(dim_exp):1 + dim_reorder[2:3] <- dim_reorder[3:2] + old_dims <- dim_exp + dim_exp <- dim_exp[dim_reorder] + mod_data <- aperm(array(bigmemory::as.matrix(var_exp), dim = old_dims), dim_reorder) + attr(mod_data, 'dimensions') <- names(dim_exp) + } + + if (is.null(var_obs)) { + obs_data <- NULL + } else { + dim_reorder <- length(dim_obs):1 + dim_reorder[2:3] <- dim_reorder[3:2] + old_dims <- dim_obs + dim_obs <- dim_obs[dim_reorder] + obs_data <- aperm(array(bigmemory::as.matrix(var_obs), dim = old_dims), dim_reorder) + attr(obs_data, 'dimensions') <- names(dim_obs) + } + + if (is.null(latitudes)) { + lat <- 0 + attr(lat, 'cdo_grid_name') <- 'none' + } else { + lat <- latitudes + attr(lat, 'cdo_grid_name') <- if (is.null(grid)) 'none' else grid + } + attr(lat, 'projection') <- 'none' + + if (is.null(longitudes)) { + lon <- 0 + attr(lon, 'cdo_grid_name') <- 'none' + } else { + lon <- longitudes + attr(lon, 'cdo_grid_name') <- if (is.null(grid)) 'none' else grid + } + attr(lon, 'projection') <- 'none' + + dates <- list() + dates[['start']] <- NULL + dates[['end']] <- NULL + + models <- NULL + if (length(exp) > 0 && !is.null(dim_exp)) { + models <- list() + for (jmod in 1:length(exp)) { + models[[exp[[jmod]][['name']]]] <- list( + members = paste0('Member_', 1:nmember[jmod]), + source = if ((nchar(exp[[jmod]][['path']]) - + nchar(gsub("/", "", exp[[jmod]][['path']])) > 2) && + (length(sdates) > 1 && !is_file_per_member_exp[jmod])) { + parts <- strsplit(exp[[jmod]][['path']], '/')[[1]] + paste(parts[-length(parts)], sep = '', collapse = '/') + } else { + exp[[jmod]][['path']] + }) + } + } + + observations <- NULL + if (length(obs) > 0 && !is.null(dim_obs)) { + observations <- list() + for (jobs in 1:length(obs)) { + observations[[obs[[jobs]][['name']]]] <- list( + members = paste0('Member_', 1:nmemberobs[jobs]), + source = if ((nchar(obs[[jobs]][['path']]) - + nchar(gsub("/", "", obs[[jobs]][['path']])) > 2) && + !is_file_per_dataset_obs[jobs]) { + parts <- strsplit(obs[[jobs]][['path']], '/')[[1]] + paste(parts[-length(parts)], sep = '', collapse = '/') + } else { + obs[[jobs]][['path']] + }) + } + } + + # Before ending, the data is arranged in the common format, with the following + # dimension order: + # nmod/nobs, members, startdates, leadtimes, latitudes, longitudes + invisible(list(mod = mod_data, + obs = obs_data, + lon = lon, + lat = lat, + Variable = variable, + Datasets = list(exp = models, obs = observations), + Dates = dates, + InitializationDates = lapply(sdates, + function (x) { + sink('/dev/null') + date <- print(as.POSIXct(as.Date(x, format = '%Y%m%d'))) + sink() + date + }), + when = Sys.time(), + source_files = source_files, + not_found_files = not_found_files, + load_parameters = load_parameters)) + } + + +write.table(subset_indices, file=paste0(workdir,'/subset_indices.txt')) + +} diff --git a/old/SkillScores_MN_v1.R~ b/old/SkillScores_MN_v1.R~ new file mode 100644 index 0000000000000000000000000000000000000000..4e555769e57e26f24b519f77b68e1750ccdc106e --- /dev/null +++ b/old/SkillScores_MN_v1.R~ @@ -0,0 +1,2147 @@ +######################################################################################### +# Sub-seasonal Skill Scores # +######################################################################################### + +library(s2dverification) +library(ncdf4) +library(plyr) + +#source('/gpfs/projects/bsc32/bsc32842/scripts/Rfunctions.R') +#source('/gpfs/projects/bsc32/bsc32842/scripts/Utils.R') +source('/home/Earth/ncortesi/Downloads/scripts/Utils.R') + +# working dir where to put the output maps and files in the /Data and /Maps subdirs: +#workdir <- "/gpfs/projects/bsc32/bsc32842/RESILIENCE" +workdir <- "/scratch/Earth/ncortesi/RESILIENCE/MN" + +args <- commandArgs(TRUE) # put into variable args the list with all the optional arguments with whom the script was run from the terminal +chunk <- as.integer(args[1]) # number of the chunk to run in this script + +forecast.year <- 2014 # starting year of the weekly sequence of the forecasts + +# generic path of the forecast system files: +#ECMWF_monthly <- list(path = paste0('/esnas/exp/ECMWF/monthly/ensforhc/weekly_mean/$VAR_NAME$_f6h/',forecast.year,'$MONTH$$DAY$00/$VAR_NAME$_$START_DATE$00.nc')) +ECMWF_monthly <- list(path = '/esnas/exp/ECMWF/monthly/ensforhc/weekly_mean/sfcWind_f6h/2014010200/sfcWind_2013010200.nc') +#ECMWF_monthly <- list(path = paste0(workdir,'/2014010200/sfcWind_2013010200.nc')) + +#system("module load GCC") +#system("module load HDF5") +#system("module load CDO") + +# load once 1 file to retrieve the lat and lon values, then save them in an .RData file to retrieve them when needed: +#time <- system.time({ +#var <- Load(var = 'sfcWind', exp = list(ECMWF_monthly), obs = NULL, sdates='20130102', leadtimemin = 1, leadtimemax=4, output = 'lonlat', nprocs=1) +#}) +#save(time, file=paste0(workdir,'/test_chunk_',chunk,'_time_',round(time[3],2),'.RData')) +#write.table(var$lon, var$lat, file=paste0(workdir,'/load_coord.txt')) + +var='sfcWind' +exp=list(ECMWF_monthly) +obs=NULL +sdates='20130102' +nmember=NULL +nmemberobs=NULL +nleadtime=NULL +leadtimemin=1 +leadtimemax=4 +storefreq='monthly' +sampleperiod=1 +lonmin=0 +lonmax=360 +latmin=-90 +latmax=90 +output='lonlat' +method='conservative' +grid=NULL +maskmod=vector('list',15) +maskobs=vector('list',15) +configfile=NULL +varmin=NULL +varmax=NULL +silent=FALSE +nprocs=1 +dimnames=NULL +remapcells=2 + +test='test' + + # Print a stamp of the call the user issued. + parameter_names <- ls() + if (length(parameter_names) < 3 || is.null(var) || + is.null(sdates) || (is.null(exp) && is.null(obs))) { + stop("Error: At least 'var', 'exp'/'obs' and 'sdates' must be provided.") + } + load_parameters <- lapply(parameter_names, get, envir = environment()) + names(load_parameters) <- parameter_names +#write.table(load_parameters$exp, file=paste0(workdir,'/load_parameters.txt')) + + + parameters_to_show <- c('var', 'exp', 'obs', 'sdates', 'grid', 'output', 'storefreq') + load_parameters <- c(load_parameters[parameters_to_show], load_parameters[-match(parameters_to_show, names(load_parameters))]) + cat(paste("* The load call you issued is:\n* Load(", + paste(strwrap( + paste(unlist(lapply(names(load_parameters[1:length(parameters_to_show)]), + function(x) paste(x, '=', + if (x == 'sdates' && length(load_parameters[[x]]) > 4) { + paste0("c('", load_parameters[[x]][1], "', '", load_parameters[[x]][2], + "', ..., '", tail(load_parameters[[x]], 1), "')") + } else { + paste(deparse(load_parameters[[x]]), collapse = '') + }))), + collapse = ', '), width = getOption('width') - 9, indent = 0, exdent = 8), collapse = '\n*'), + ", ...)\n* See the full call in '$load_parameters' after Load() finishes.\n", sep = '')) + + +cat("eheheh") + +a=TRUE +if(a==FALSE){ + + # Run Load() error-aware, so that it always returns something + errors <- try({ + + # Check and sanitize parameters + # var + if (is.null(var) || !(is.character(var) && nchar(var) > 0)) { + stop("Error: parameter 'var' should be a character string of length >= 1.") + } + + # exp + exps_to_fetch <- c() + exp_info_names <- c('name', 'path', 'nc_var_name', 'suffix', + 'var_min', 'var_max', 'dimnames') + if (!is.null(exp) && !(is.character(exp) && all(nchar(exp) > 0)) && !is.list(exp)) { + stop("Error: parameter 'exp' should be a vector of strings or a list with information of the experimental datasets to load. Check 'exp' in ?Load for details.") + } else if (!is.null(exp)) { + if (!is.list(exp)) { + exp <- lapply(exp, function (x) list(name = x)) + } + for (i in 1:length(exp)) { + if (!is.list(exp[[i]])) { + stop("Error: parameter 'exp' is incorrect. It should be a list of lists.") + } + if (!(all(names(exp[[i]]) %in% exp_info_names))) { + stop("Error: parameter 'exp' is incorrect. There are unrecognized components in the information of some of the experiments. Check 'exp' in ?Load for details.") + } + if (!('name' %in% names(exp[[i]]))) { + exp[[i]][['name']] <- paste0('exp', i) + if (!('path' %in% names(exp[[i]]))) { + stop("Error: parameter 'exp' is incorrect. A 'path' should be provided for each experimental dataset if no 'name' is provided. See 'exp' in ?Load for details.") + } + } else if (!('path' %in% names(exp[[i]]))) { + exps_to_fetch <- c(exps_to_fetch, i) + } + if ('path' %in% names(exp[[i]])) { + if (!('nc_var_name' %in% names(exp[[i]]))) { + exp[[i]][['nc_var_name']] <- '$VAR_NAME$' + } + if (!('suffix' %in% names(exp[[i]]))) { + exp[[i]][['suffix']] <- '' + } + if (!('var_min' %in% names(exp[[i]]))) { + exp[[i]][['var_min']] <- '' + } + if (!('var_max' %in% names(exp[[i]]))) { + exp[[i]][['var_max']] <- '' + } + } + } + if ((length(exps_to_fetch) > 0) && (length(exps_to_fetch) < length(exp))) { + cat("! Warning: 'path' was provided for some experimental datasets in 'exp'. Any \n* information in the configuration file related to these will be ignored.\n") + } + } + + # obs + obs_to_fetch <- c() + obs_info_names <- c('name', 'path', 'nc_var_name', 'suffix', + 'var_min', 'var_max') + if (!is.null(obs) && !(is.character(obs) && all(nchar(obs) > 0)) && !is.list(obs)) { + stop("Error: parameter 'obs' should be a vector of strings or a list with information of the observational datasets to load. Check 'obs' in ?Load for details.") + } else if (!is.null(obs)) { + if (!is.list(obs)) { + obs <- lapply(obs, function (x) list(name = x)) + } + for (i in 1:length(obs)) { + if (!is.list(obs[[i]])) { + stop("Error: parameter 'obs' is incorrect. It should be a list of lists.") + } + if (!(all(names(obs[[i]]) %in% obs_info_names))) { + stop("Error: parameter 'obs' is incorrect. There are unrecognized components in the information of some of the observations. Check 'obs' in ?Load for details.") + } + if (!('name' %in% names(obs[[i]]))) { + obs[[i]][['name']] <- paste0('obs', i) + if (!('path' %in% names(obs[[i]]))) { + stop("Error: parameter 'obs' is incorrect. A 'path' should be provided for each observational dataset if no 'name' is provided. See 'obs' in ?Load for details.") + } + } else if (!('path' %in% names(obs[[i]]))) { + obs_to_fetch <- c(obs_to_fetch, i) + } + if ('path' %in% names(obs[[i]])) { + if (!('nc_var_name' %in% names(obs[[i]]))) { + obs[[i]][['nc_var_name']] <- '$VAR_NAME$' + } + if (!('suffix' %in% names(obs[[i]]))) { + obs[[i]][['suffix']] <- '' + } + if (!('var_min' %in% names(obs[[i]]))) { + obs[[i]][['var_min']] <- '' + } + if (!('var_max' %in% names(obs[[i]]))) { + obs[[i]][['var_max']] <- '' + } + } + } + if (length(c(obs_to_fetch, exps_to_fetch) > 1) && (length(obs_to_fetch) < length(obs))) { + cat("! Warning: 'path' was provided for some observational datasets in 'obs'. Any \n* information in the configuration file related to these will be ignored.\n") + } + } + + # sdates + if (is.null(sdates)) { + stop("Error: parameter 'sdates' must be provided.") + } + if (!is.character(sdates) || !all(nchar(sdates) == 8) || any(is.na(strtoi(sdates)))) { + stop("Error: parameter 'sdates' is incorrect. All starting dates should be a character string in the format 'YYYYMMDD'.") + } + + # nmember + if (!is.null(nmember) && !is.null(exp)) { + if (!is.numeric(nmember)) { + stop("Error: parameter 'nmember' is incorrect. It should be numeric.") + } + if (length(nmember) == 1) { + cat(paste("! Warning: 'nmember' should specify the number of members of each experimental dataset. Forcing to", nmember, "for all experiments.\n")) + nmember <- rep(nmember, length(exp)) + } + if (length(nmember) != length(exp)) { + stop("Error: 'nmember' must contain as many values as 'exp'.") + } else if (any(is.na(nmember))) { + nmember[which(is.na(nmember))] <- max(nmember, na.rm = TRUE) + } + } + + # nmemberobs + if (!is.null(nmemberobs) && !is.null(obs)) { + if (!is.numeric(nmemberobs)) { + stop("Error: parameter 'nmemberobs' is incorrect. It should be numeric.") + } + if (length(nmemberobs) == 1) { + cat(paste("! Warning: 'nmemberobs' should specify the number of members of each observational dataset. Forcing to", nmemberobs, "for all observations.\n")) + nmemberobs <- rep(nmemberobs, length(obs)) + } + if (length(nmemberobs) != length(obs)) { + stop("Error: 'nmemberobs' must contain as many values as 'obs'.") + } else if (any(is.na(nmemberobs))) { + nmemberobs[which(is.na(nmemberobs))] <- max(nmemberobs, na.rm = TRUE) + } + } + + # nleadtime + if (!is.null(nleadtime) && !is.numeric(nleadtime)) { + stop("Error: parameter 'nleadtime' is wrong. It should be numeric.") + } + + # leadtimemin + if (is.null(leadtimemin) || !is.numeric(leadtimemin)) { + stop("Error: parameter 'leadtimemin' is wrong. It should be numeric.") + } + + # leadtimemax + if (!is.null(leadtimemax) && !is.numeric(leadtimemax)) { + stop("Error: parameter 'leadtimemax' is wrong. It should be numeric.") + } + + # storefreq + if (!is.character(storefreq) || !(storefreq %in% c('monthly', 'daily'))) { + stop("Error: parameter 'storefreq' is wrong, can take value 'daily' or 'monthly'.") + } + + # sampleperiod + if (is.null(sampleperiod) || !is.numeric(sampleperiod)) { + stop("Error: parameter 'sampleperiod' is wrong. It should be numeric.") + } + + # lonmin + if (is.null(lonmin) || !is.numeric(lonmin)) { + stop("Error: parameter 'lonmin' is wrong. It should be numeric.") + } + if (lonmin < -360 || lonmin > 360) { + stop("Error: parameter 'lonmin' must be in the range [-360, 360]") + } + if (lonmin < 0) { + lonmin <- lonmin + 360 + } + + # lonmax + if (is.null(lonmax) || !is.numeric(lonmax)) { + stop("Error: parameter 'lonmax' is wrong. It should be numeric.") + } + if (lonmax < -360 || lonmax > 360) { + stop("Error: parameter 'lonmax' must be in the range [-360, 360]") + } + if (lonmax < 0) { + lonmax <- lonmax + 360 + } + + # latmin + if (is.null(latmin) || !is.numeric(latmin)) { + stop("Error: parameter 'latmin' is wrong. It should be numeric.") + } + if (latmin > 90 || latmin < -90) { + stop("Error: 'latmin' must be in the interval [-90, 90].") + } + + # latmax + if (is.null(latmax) || !is.numeric(latmax)) { + stop("Error: parameter 'latmax' is wrong. It should be numeric.") + } + if (latmax > 90 || latmax < -90) { + stop("Error: 'latmax' must be in the interval [-90, 90].") + } + + # output + if (is.null(output) || !(output %in% c('lonlat', 'lon', 'lat', 'areave'))) { + stop("Error: 'output' can only take values 'lonlat', 'lon', 'lat' or 'areave'.") + } + + # method + if (is.null(method) || !(method %in% c('bilinear', 'bicubic', 'conservative', 'distance-weighted'))) { + stop("Error: parameter 'method' is wrong, can take value 'bilinear', 'bicubic', 'conservative' or 'distance-weighted'.") + } + remap <- switch(method, 'bilinear' = 'remapbil', 'bicubic' = 'remapbic', + 'conservative' = 'remapcon', 'distance-weighted' = 'remapdis') + + # grid + if (!is.null(grid)) { + if (is.character(grid)) { + supported_grids <- list('r[0-9]{1,}x[0-9]{1,}', 't[0-9]{1,}grid') + grid_matches <- unlist(lapply(lapply(supported_grids, regexpr, grid), .IsFullMatch, grid)) + if (sum(grid_matches) < 1) { + stop("The specified grid in the parameter 'grid' is incorrect. Must be one of rx or tgrid.") + } + } else { + stop("Error: parameter 'grid' should be a character string, if specified.") + } + } + + # maskmod + if (!is.list(maskmod)) { + stop("Error: parameter 'maskmod' must be a list.") + } + if (length(maskmod) < length(exp)) { + stop("Error: 'maskmod' must contain a numeric mask or NULL for each experiment in 'exp'.") + } + for (i in 1:length(maskmod)) { + if (is.list(maskmod[[i]])) { + if ((length(maskmod[[i]]) > 2) || !all(names(maskmod[[i]]) %in% c('path', 'nc_var_name'))) { + stop("Error: all masks in 'maskmod' must be a numeric matrix, or a list with the components 'path' and optionally 'nc_var_name', or NULL.") + } + } else if (!(is.numeric(maskmod[[i]]) || is.null(maskmod[[i]]))) { + stop("Error: all masks in 'maskmod' must be a numeric matrix, or a list with the components 'path' and optionally 'nc_var_name', or NULL.") + } + } + + # maskobs + if (!is.list(maskobs)) { + stop("Error: parameter 'maskobs' must be a list.") + } + if (length(maskobs) < length(obs)) { + stop("Error: 'maskobs' must contain a numeric mask or NULL for each obseriment in 'obs'.") + } + for (i in 1:length(maskobs)) { + if (is.list(maskobs[[i]])) { + if ((length(maskobs[[i]]) > 2) || !all(names(maskobs[[i]]) %in% c('path', 'nc_var_name'))) { + stop("Error: all masks in 'maskobs' must be a numeric matrix, or a list with the components 'path' and optionally 'nc_var_name', or NULL.") + } + } else if (!(is.numeric(maskobs[[i]]) || is.null(maskobs[[i]]))) { + stop("Error: all masks in 'maskobs' must be a numeric matrix, or a list with the components 'path' and optionally 'nc_var_name', or NULL.") + } + } + + ## Force the observational masks to be the same as the experimental when + ## possible. + if ((output != 'areave' || !is.null(grid)) && length(exp) > 0) { + if (!all(unlist(lapply(maskobs, is.null)))) { + cat("! Warning: 'maskobs' will be ignored. 'maskmod[[1]]' will be applied to observations instead.\n") + } + maskobs <- lapply(maskobs, function(x) x <- maskmod[[1]]) + } + + # configfile + if (is.null(configfile)) { + configfile <- system.file("config", "BSC.conf", package = "s2dverification") + } else if (!is.character(configfile) || !(nchar(configfile) > 0)) { + stop("Error: parameter 'configfile' must be a character string with the path to an s2dverification configuration file, if specified.") + } + + # varmin + if (!is.null(varmin) && !is.numeric(varmin)) { + stop("Error: parameter 'varmin' must be numeric, if specified.") + } + + # varmax + if (!is.null(varmax) && !is.numeric(varmax)) { + stop("Error: parameter 'varmax' must be numeric, if specified.") + } + + # silent + if (!is.logical(silent)) { + stop("Error: parameter 'silent' must be TRUE or FALSE.") + } + + # nprocs + if (!is.null(nprocs) && (!is.numeric(nprocs) || nprocs < 1)) { + stop("Error: parameter 'nprocs' must be a positive integer, if specified.") + } + + # dimnames + if (!is.null(dimnames) && (!is.list(dimnames))) { + stop("Error: parameter 'dimnames' must be a list, if specified.") + } + if (!all(names(dimnames) %in% c('member', 'lat', 'lon'))) { + stop("Error: parameter 'dimnames' is wrong. There are unrecognized component names. See 'dimnames' in ?Load for details.") + } + + # remapcells + if (!is.numeric(remapcells) || remapcells < 0) { + stop("Error: 'remapcells' must be an integer >= 0.") + } + + # If not all data has been provided in 'exp' and 'obs', configuration file is read. + if (length(exps_to_fetch) > 0 || length(obs_to_fetch) > 0) { + cat("* Some 'path's not explicitly provided in 'exp' and 'obs', so will now proceed to open the configuration file.\n") + data_info <- ConfigFileOpen(configfile, silent, TRUE) + + # Check that the var, exp and obs parameters are right and keep the entries + # that match for each dataset. + # Afterwards, the matching entries are applied sequentially (as specified + # in ?ConfigFileOpen) and the replace_values are applied to the result. + # Finally a path pattern for each dataset is provided. + matches <- ConfigApplyMatchingEntries(data_info, var, sapply(exp[exps_to_fetch], '[[', 'name'), + sapply(obs[obs_to_fetch], '[[', 'name'), show_entries = FALSE, show_result = FALSE) + # 'replace_values' is a named list that associates a variable name to an + # associated value. Initially it is filled with variables and values parsed + # from the configuration file, but we can add or modify some values during + # the execution to choose for example which start date we want to load. + # When '.ConfigReplaceVariablesInString' is called, all the variable accesses + # ($VARIABLE_NAME$) that appear in the string given as parameter are + # replaced by the associated value in 'replace_values'. + replace_values <- data_info$definitions + if (!is.null(exp) && length(exps_to_fetch) > 0) { + counter <- 1 + exp[exps_to_fetch] <- lapply(matches$exp_info, + function (x) { + x[names(exp[[exps_to_fetch[counter]]])] <- exp[[exps_to_fetch[counter]]] + x[['path']] <- paste0(x[['main_path']], x[['file_path']]) + counter <<- counter + 1 + x + }) + } + if (!is.null(obs) && length(obs_to_fetch) > 0) { + counter <- 1 + obs[obs_to_fetch] <- lapply(matches$obs_info, + function (x) { + x[names(obs[[obs_to_fetch[counter]]])] <- obs[[obs_to_fetch[counter]]] + x[['path']] <- paste0(x[['main_path']], x[['file_path']]) + counter <<- counter + 1 + x + }) + } + if (!silent) { + cat("* All pairs (var, exp) and (var, obs) have matching entries.\n") + } + } else { + replace_values <- list(DEFAULT_NC_VAR_NAME = '$VAR_NAME$', + DEFAULT_VAR_MIN = '', + DEFAULT_VAR_MAX = '', + DEFAULT_SUFFIX = '', + DEFAULT_DIM_NAME_LONGITUDES = 'longitude', + DEFAULT_DIM_NAME_LATITUDES = 'latitude', + DEFAULT_DIM_NAME_MEMBERS = 'ensemble') + } + # We take the dimnames that haven't been explicitly specified from the + # configuration file. + # If the configuration file wasn't opened, we take the default values from + # the dictionary 'replace_values'. + dimnames <- list(lon = ifelse(is.null(dimnames[["lon"]]), + replace_values[["DEFAULT_DIM_NAME_LONGITUDES"]], + dimnames[['lon']]), + lat = ifelse(is.null(dimnames[["lat"]]), + replace_values[["DEFAULT_DIM_NAME_LATITUDES"]], + dimnames[['lat']]), + member = ifelse(is.null(dimnames[["member"]]), + replace_values[["DEFAULT_DIM_NAME_MEMBERS"]], + dimnames[['member']])) + if (!is.null(exp)) { + exp <- lapply(exp, function (x) { + if (!('dimnames' %in% names(x))) { + x[['dimnames']] <- dimnames + x + } else { + dimnames2 <- dimnames + dimnames2[names(x[['dimnames']])] <- x[['dimnames']] + x[['dimnames']] <- dimnames2 + x + } + }) + } + if (!is.null(obs)) { + obs <- lapply(obs, function (x) { + if (!('dimnames' %in% names(x))) { + x[['dimnames']] <- dimnames + x + } else { + dimnames2 <- dimnames + dimnames2[names(x[['dimnames']])] <- x[['dimnames']] + x[['dimnames']] <- dimnames2 + x + } + }) + } + single_dataset <- (length(obs) + length(exp) == 1) + + ## We add some predefined values in the dictionary. + replace_values[["VAR_NAME"]] <- var + replace_values[["STORE_FREQ"]] <- storefreq + + # Initialize some variables that will take various values along the + # execution + latitudes <- longitudes <- NULL + leadtimes <- NULL + var_exp <- var_obs <- NULL + units <- var_long_name <- NULL + is_2d_var <- FALSE + + # Start defining the dimensions of the output matrices + nmod <- length(exp) + nobs <- length(obs) + nsdates <- length(sdates) + + # We will iterate over all the experiments, start dates and members and will open + # the file pointed by the data in the configuration file. + # If a file is found, we will open it and read its metadata to work out the + # remaining dimensions: members, leadtimes, longitudes and latitudes. + # + # At each iteration we will build a 'work piece' that will contain information + # on the data we want to load from a file. For each file we will have one + # work piece. These work pieces will be packages of information to be sent to + # the various parallel processes. Each process will need this information to + # access and manipulate the data according to the output type and other + # parameters. + if (!silent) { + cat("* Fetching first experimental files to work out 'var_exp' size...\n") + } + + dataset_type <- 'exp' + dim_exp <- NULL + filename <- file_found <- tmp <- nltime <- NULL + dims2define <- TRUE + is_file_per_member_exp <- rep(nmod, FALSE) + exp_work_pieces <- list() + jmod <- 1 + + while (jmod <= nmod) { + tags_to_find <- c('MEMBER_NUMBER') + position_of_tags <- na.omit(match(tags_to_find, names(replace_values))) + if (length(position_of_tags) > 0) { + quasi_final_path <- .ConfigReplaceVariablesInString(exp[[jmod]][['path']], + replace_values[-position_of_tags], TRUE) + } else { + quasi_final_path <- .ConfigReplaceVariablesInString(exp[[jmod]][['path']], + replace_values, TRUE) + } + is_file_per_member_exp[jmod] <- grepl('$MEMBER_NUMBER$', + quasi_final_path, fixed = TRUE) + replace_values[["EXP_NAME"]] <- exp[[jmod]][['name']] + replace_values[["NC_VAR_NAME"]] <- exp[[jmod]][['nc_var_name']] + namevar <- .ConfigReplaceVariablesInString(exp[[jmod]][['nc_var_name']], replace_values) + replace_values[["SUFFIX"]] <- exp[[jmod]][['suffix']] + if (is.null(varmin)) { + mod_var_min <- as.numeric(.ConfigReplaceVariablesInString(exp[[jmod]][['var_min']], replace_values)) + } else { + mod_var_min <- varmin + } + if (is.null(varmax)) { + mod_var_max <- as.numeric(.ConfigReplaceVariablesInString(exp[[jmod]][['var_max']], replace_values)) + } else { + mod_var_max <- varmax + } + jsdate <- 1 + while (jsdate <= nsdates) { + replace_values[["START_DATE"]] <- sdates[jsdate] + replace_values[["YEAR"]] <- substr(sdates[jsdate], 1, 4) + replace_values[["MONTH"]] <- substr(sdates[jsdate], 5, 6) + replace_values[["DAY"]] <- substr(sdates[jsdate], 7, 8) + # If the dimensions of the output matrices are still to define, we try to read + # the metadata of the data file that corresponds to the current iteration + if (dims2define) { + if (is_file_per_member_exp[jmod]) { + replace_values[["MEMBER_NUMBER"]] <- '*' + } + # We must build a work piece that will be sent to the .LoadDataFile function + # in 'explore_dims' mode. We will obtain, if success, the dimensions of the + # data in the file. + work_piece <- list(dataset_type = dataset_type, + filename = .ConfigReplaceVariablesInString(exp[[jmod]][['path']], replace_values), + namevar = namevar, grid = grid, remap = remap, remapcells = remapcells, + is_file_per_member = is_file_per_member_exp[jmod], + is_file_per_dataset = FALSE, + lon_limits = c(lonmin, lonmax), + lat_limits = c(latmin, latmax), dimnames = exp[[jmod]][['dimnames']], + single_dataset = single_dataset) + test="found_data" + #found_data <- .LoadDataFile(work_piece, explore_dims = TRUE, silent = silent) + + explore_dims=TRUE + + # The purpose, working modes, inputs and outputs of this function are + # explained in ?LoadDataFile + #suppressPackageStartupMessages({library(ncdf4)}) + #suppressPackageStartupMessages({library(bigmemory)}) + #suppressPackageStartupMessages({library(plyr)}) + # Auxiliar function to convert array indices to lineal indices + arrayIndex2VectorIndex <- function(indices, dims) { + if (length(indices) > length(dims)) { + stop("Error: indices do not match dimensions in arrayIndex2VectorIndex.") + } + position <- 1 + dims <- rev(dims) + indices <- rev(indices) + for (i in 1:length(indices)) { + position <- position + (indices[i] - 1) * prod(dims[-c(1:i)]) + } + position + } + + .t2nlatlon <- function(t) { + ## As seen in cdo's griddes.c: ntr2nlat() + nlats <- (t * 3 + 1) / 2 + if ((nlats > 0) && (nlats - trunc(nlats) >= 0.5)) { + nlats <- ceiling(nlats) + } else { + nlats <- round(nlats) + } + if (nlats %% 2 > 0) { + nlats <- nlats + 1 + } + ## As seen in cdo's griddes.c: compNlon(), and as specified in ECMWF + nlons <- 2 * nlats + keep_going <- TRUE + while (keep_going) { + n <- nlons + if (n %% 8 == 0) n <- trunc(n / 8) + while (n %% 6 == 0) n <- trunc(n / 6) + while (n %% 5 == 0) n <- trunc(n / 5) + while (n %% 4 == 0) n <- trunc(n / 4) + while (n %% 3 == 0) n <- trunc(n / 3) + if (n %% 2 == 0) n <- trunc(n / 2) + if (n <= 8) { + keep_going <- FALSE + } else { + nlons <- nlons + 2 + if (nlons > 9999) { + stop("Error: pick another gaussian grid truncation. It doesn't fulfill the standards to apply FFT.") + } + } + } + c(nlats, nlons) + } + + .nlat2t <- function(nlats) { + trunc((nlats * 2 - 1) / 3) + } + + test="1" + found_file <- NULL + dims <- NULL + grid_name <- units <- var_long_name <- is_2d_var <- NULL + + filename <- work_piece[['filename']] + namevar <- work_piece[['namevar']] + output <- work_piece[['output']] + # The names of all data files in the directory of the repository that match + # the pattern are obtained. + if (length(grep("^http", filename)) > 0) { + is_url <- TRUE + files <- filename + ## TODO: Check that the user is not using shell globbing exps. + } else { + is_url <- FALSE + files <- Sys.glob(filename) + } + test="2" + # If we don't find any, we leave the flag 'found_file' with a NULL value. + if (length(files) > 0) { + # The first file that matches the pattern is chosen and read. + filename <- files[length(files)] + filein <- filename + found_file <- filename + mask <- work_piece[['mask']] + + if (!silent) { + if (explore_dims) { + cat(paste("* Exploring dimensions...", filename, '\n')) + } + ##} else { + ## cat(paste("* Reading & processing data...", filename, '\n')) + ##} + } + + # We will fill in 'expected_dims' with the names of the expected dimensions of + # the data array we'll retrieve from the file. + expected_dims <- NULL + remap_needed <- FALSE + # But first we open the file and work out whether the requested variable is 2d + fnc <- nc_open(filein) + if (!(namevar %in% names(fnc$var))) { + stop(paste("Error: The variable", namevar, "is not defined in the file", filename)) + } + var_long_name <- fnc$var[[namevar]]$longname + units <- fnc$var[[namevar]]$units + if (is.null(work_piece[['is_2d_var']])) { + is_2d_var <- all(c(work_piece[['dimnames']][['lon']], + work_piece[['dimnames']][['lat']]) %in% + unlist(lapply(fnc$var[[namevar]][['dim']], + '[[', 'name'))) + } else { + is_2d_var <- work_piece[['is_2d_var']] + } + if ((is_2d_var || work_piece[['is_file_per_dataset']]) && (Sys.which("cdo")[[1]] == "")) { + stop("Error: CDO libraries not available") + } + # If the variable to load is 2-d, we need to determine whether: + # - interpolation is needed + # - subsetting is requested + if (is_2d_var) { + ## We read the longitudes and latitudes from the file. + lon <- ncvar_get(fnc, work_piece[['dimnames']][['lon']]) + lat <- ncvar_get(fnc, work_piece[['dimnames']][['lat']]) + # If a common grid is requested or we are exploring the file dimensions + # we need to read the grid type and size of the file to finally work out the + # CDO grid name. + if (!is.null(work_piece[['grid']]) || explore_dims) { + # Here we read the grid type and its number of longitudes and latitudes + file_info <- system(paste('cdo -s griddes', filein, '2> /dev/null'), intern = TRUE) + grids_positions <- grep('# gridID', file_info) + grids_first_lines <- grids_positions + 2 + grids_last_lines <- c((grids_positions - 2)[-1], length(file_info)) + grids_info <- as.list(1:length(grids_positions)) + grids_info <- lapply(grids_info, function (x) file_info[grids_first_lines[x]:grids_last_lines[x]]) + grids_info <- lapply(grids_info, function (x) gsub(" *", " ", x)) + grids_info <- lapply(grids_info, function (x) gsub("^ | $", "", x)) + grids_info <- lapply(grids_info, function (x) unlist(strsplit(x, " | = "))) + grids_types <- unlist(lapply(grids_info, function (x) x[grep('gridtype', x) + 1])) + grids_matches <- unlist(lapply(grids_info, function (x) { + nlons <- if (length(grep('xsize', x)) > 0) { + as.integer(x[grep('xsize', x) + 1]) + } else { + NA + } + nlats <- if (length(grep('ysize', x)) > 0) { + as.integer(x[grep('ysize', x) + 1]) + } else { + NA + } + if (identical(nlons, length(lon)) && + identical(nlats, length(lat))) { + TRUE + } else { + FALSE + } + })) + grids_matches <- grids_matches[which(grids_types %in% c('gaussian', 'lonlat'))] + grids_info <- grids_info[which(grids_types %in% c('gaussian', 'lonlat'))] + grids_types <- grids_types[which(grids_types %in% c('gaussian', 'lonlat'))] + if (length(grids_matches) == 0) { + stop("Error: Only 'gaussian' and 'lonlat' grids supported. See e.g: cdo sinfo ", filename) + } + if (sum(grids_matches) > 1) { + if ((all(grids_types[which(grids_matches)] == 'gaussian') || + all(grids_types[which(grids_matches)] == 'lonlat')) && + all(unlist(lapply(grids_info[which(grids_matches)], identical, + grids_info[which(grids_matches)][[1]])))) { + grid_type <- grids_types[which(grids_matches)][1] + } else { + stop("Error: Load() can't disambiguate: More than one lonlat/gaussian grids with the same size as the requested variable defined in ", filename) + } + } else { + grid_type <- grids_types[which(grids_matches)] + } + grid_lons <- length(lon) + grid_lats <- length(lat) + # Convert to CDO grid name as seen in cdo's griddes.c: nlat2ntr() + if (grid_type == 'lonlat') { + grid_name <- paste0('r', grid_lons, 'x', grid_lats) + } else { + grid_name <- paste0('t', .nlat2t(grid_lats), 'grid') + } + } + # If a common grid is requested, we will also calculate its size which we will use + # later on. + if (!is.null(work_piece[['grid']])) { + # Now we calculate the common grid type and its lons and lats + if (length(grep('^t\\d{1,+}grid$', work_piece[['grid']])) > 0) { + common_grid_type <- 'gaussian' + common_grid_res <- as.integer(strsplit(work_piece[['grid']], '[^0-9]{1,+}')[[1]][2]) + nlonlat <- .t2nlatlon(common_grid_res) + common_grid_lats <- nlonlat[1] + common_grid_lons <- nlonlat[2] + } else if (length(grep('^r\\d{1,+}x\\d{1,+}$', work_piece[['grid']])) > 0) { + common_grid_type <- 'lonlat' + common_grid_lons <- as.integer(strsplit(work_piece[['grid']], '[^0-9]{1,+}')[[1]][2]) + common_grid_lats <- as.integer(strsplit(work_piece[['grid']], '[^0-9]{1,+}')[[1]][3]) + } else { + stop("Error: Only supported grid types in parameter 'grid' are tgrid and rx") + } + } else { + ## If no 'grid' is specified, there is no common grid. + ## But these variables are filled in for consistency in the code. + common_grid_lons <- length(lon) + common_grid_lats <- length(lat) + } + first_common_grid_lon <- 0 + last_common_grid_lon <- 360 - 360/common_grid_lons + ## This is not true for gaussian grids or for some regular grids, but + ## is a safe estimation + first_common_grid_lat <- -90 + last_common_grid_lat <- 90 + # And finally determine whether interpolation is needed or not + remove_shift <- FALSE + if (!is.null(work_piece[['grid']])) { + if ((grid_lons != common_grid_lons) || + (grid_lats != common_grid_lats) || + (grid_type != common_grid_type) || + ((lon[1] != first_common_grid_lon) + && !work_piece[['single_dataset']])) { + if (grid_lons == common_grid_lons && grid_lats == common_grid_lats && + grid_type == common_grid_type && lon[1] != first_common_grid_lon && + !work_piece[['single_dataset']]) { + remove_shift <- TRUE + } + remap_needed <- TRUE + common_grid_name <- work_piece[['grid']] + } + } else if ((lon[1] != first_common_grid_lon) && explore_dims && + !work_piece[['single_dataset']]) { + remap_needed <- TRUE + common_grid_name <- grid_name + remove_shift <- TRUE + } + if (remove_shift && !explore_dims) { + if (!is.null(work_piece[['progress_amount']])) { + cat("\n") + } + cat(paste0("! Warning: The dataset with index ", + tail(work_piece[['indices']], 1), " in '", + work_piece[['dataset_type']], "' doesn't start at longitude 0 and will be re-interpolated in order to align its longitudes with the standard CDO grids definable with the names 'tgrid' or 'rx', which are by definition starting at the longitude 0.\n")) + if (!is.null(mask)) { + cat(paste0("! Warning: A mask was provided for the dataset with index ", + tail(work_piece[['indices']], 1), " in '", + work_piece[['dataset_type']], "'. This dataset has been re-interpolated to align its longitudes to start at 0. You must re-interpolate the corresponding mask to align its longitudes to start at 0 as well, if you haven't done so yet. Running cdo remapcon,", common_grid_name, " original_mask_file.nc new_mask_file.nc will fix it.\n")) + } + } + # Now calculate if the user requests for a lonlat subset or for the + # entire field + lonmin <- work_piece[['lon_limits']][1] + lonmax <- work_piece[['lon_limits']][2] + latmin <- work_piece[['lat_limits']][1] + latmax <- work_piece[['lat_limits']][2] + lonlat_subsetting_requested <- FALSE + if (lonmin <= lonmax) { + if ((lonmin > first_common_grid_lon) || (lonmax < last_common_grid_lon)) { + lonlat_subsetting_requested <- TRUE + } + } else { + if ((lonmin - lonmax) > 360/common_grid_lons) { + lonlat_subsetting_requested <- TRUE + } else { + gap_width <- floor(lonmin / (360/common_grid_lons)) - + floor(lonmax / (360/common_grid_lons)) + if (gap_width > 0) { + if (!(gap_width == 1 && (lonmin %% (360/common_grid_lons) == 0) && + (lonmax %% (360/common_grid_lons) == 0))) { + lonlat_subsetting_requested <- TRUE + } + } + } + } + if ((latmin > first_common_grid_lat) || (latmax < last_common_grid_lat)) { + lonlat_subsetting_requested <- TRUE + } + test="10" + + # When remap is needed but no subsetting, the file is copied locally + # so that cdo works faster, and then interpolated. + # Otherwise the file is kept as is and the subset will have to be + # interpolated still. + if (!lonlat_subsetting_requested && remap_needed) { + nc_close(fnc) + filecopy <- tempfile(pattern = "load", fileext = ".nc") + file.copy(filein, filecopy) + filein <- tempfile(pattern = "loadRegridded", fileext = ".nc") + system(paste0("cdo -s ", work_piece[['remap']], ",", + common_grid_name, + " -selname,", namevar, " ", filecopy, " ", filein, + " 2>/dev/null", sep = "")) + file.remove(filecopy) + work_piece[['dimnames']][['lon']] <- 'lon' + work_piece[['dimnames']][['lat']] <- 'lat' + fnc <- nc_open(filein) + lon <- ncvar_get(fnc, work_piece[['dimnames']][['lon']]) + lat <- ncvar_get(fnc, work_piece[['dimnames']][['lat']]) + } + + # Read and check also the mask + if (!is.null(mask)) { + ###mask_file <- tempfile(pattern = 'loadMask', fileext = '.nc') + if (is.list(mask)) { + if (!file.exists(mask[['path']])) { + stop(paste("Error: Couldn't find the mask file", mask[['path']])) + } + mask_file <- mask[['path']] + ###file.copy(work_piece[['mask']][['path']], mask_file) + fnc_mask <- nc_open(mask_file) + vars_in_mask <- sapply(fnc_mask$var, '[[', 'name') + if ('nc_var_name' %in% names(mask)) { + if (!(mask[['nc_var_name']] %in% + vars_in_mask)) { + stop(paste("Error: couldn't find variable", mask[['nc_var_name']], + "in the mask file", mask[['path']])) + } + } else { + if (length(vars_in_mask) != 1) { + stop(paste("Error: one and only one non-coordinate variable should be defined in the mask file", + mask[['path']], "if the component 'nc_var_name' is not specified. Currently found: ", + paste(vars_in_mask, collapse = ', '), ".")) + } else { + mask[['nc_var_name']] <- vars_in_mask + } + } + if (sum(fnc_mask$var[[mask[['nc_var_name']]]]$size > 1) != 2) { + stop(paste0("Error: the variable '", + mask[['nc_var_name']], + "' must be defined only over the dimensions '", + work_piece[['dimnames']][['lon']], "' and '", + work_piece[['dimnames']][['lat']], + "' in the mask file ", + mask[['path']])) + } + mask <- ncvar_get(fnc_mask, mask[['nc_var_name']], collapse_degen = TRUE) + nc_close(fnc_mask) + ### mask_lon <- ncvar_get(fnc_mask, work_piece[['dimnames']][['lon']]) + ### mask_lat <- ncvar_get(fnc_mask, work_piece[['dimnames']][['lat']]) + ###} else { + ### dim_longitudes <- ncdim_def(work_piece[['dimnames']][['lon']], "degrees_east", lon) + ### dim_latitudes <- ncdim_def(work_piece[['dimnames']][['lat']], "degrees_north", lat) + ### ncdf_var <- ncvar_def('LSM', "", list(dim_longitudes, dim_latitudes), NA, 'double') + ### fnc_mask <- nc_create(mask_file, list(ncdf_var)) + ### ncvar_put(fnc_mask, ncdf_var, work_piece[['mask']]) + ### nc_close(fnc_mask) + ### fnc_mask <- nc_open(mask_file) + ### work_piece[['mask']] <- list(path = mask_file, nc_var_name = 'LSM') + ### mask_lon <- lon + ### mask_lat <- lat + ###} + ###} + ### Now ready to check that the mask is right + ##if (!(lonlat_subsetting_requested && remap_needed)) { + ### if ((dim(mask)[2] != length(lon)) || (dim(mask)[1] != length(lat))) { + ### stop(paste("Error: the mask of the dataset with index ", tail(work_piece[['indices']], 1), " in '", work_piece[['dataset_type']], "' is wrong. It must be on the common grid if the selected output type is 'lonlat', 'lon' or 'lat', or 'areave' and 'grid' has been specified. It must be on the grid of the corresponding dataset if the selected output type is 'areave' and no 'grid' has been specified. For more information check ?Load and see help on parameters 'grid', 'maskmod' and 'maskobs'.", sep = "")) + ### } + ###if (!(identical(mask_lon, lon) && identical(mask_lat, lat))) { + ### stop(paste0("Error: the longitudes and latitudes in the masks must be identical to the ones in the corresponding data files if output = 'areave' or, if the selected output is 'lon', 'lat' or 'lonlat', the longitudes in the mask file must start by 0 and the latitudes must be ordered from highest to lowest. See\n ", + ### work_piece[['mask']][['path']], " and ", filein)) + ###} + } + } +test='20' + lon_indices <- 1:length(lon) + if (!(lonlat_subsetting_requested && remap_needed)) { + lon[which(lon < 0)] <- lon[which(lon < 0)] + 360 + } + if (lonmax >= lonmin) { + lon_indices <- lon_indices[which(((lon %% 360) >= lonmin) & ((lon %% 360) <= lonmax))] + } else if (!remap_needed) { + lon_indices <- lon_indices[which(((lon %% 360) <= lonmax) | ((lon %% 360) >= lonmin))] + } + lat_indices <- which(lat >= latmin & lat <= latmax) + ## In most of the cases the latitudes are ordered from -90 to 90. + ## We will reorder them to be in the order from 90 to -90, so mostly + ## always the latitudes are reordered. + ## TODO: This could be avoided in future. + if (lat[1] < lat[length(lat)]) { + lat_indices <- lat_indices[length(lat_indices):1] + } + if (!is.null(mask) && !(lonlat_subsetting_requested && remap_needed)) { + if ((dim(mask)[1] != length(lon)) || (dim(mask)[2] != length(lat))) { + stop(paste("Error: the mask of the dataset with index ", tail(work_piece[['indices']], 1), " in '", work_piece[['dataset_type']], "' is wrong. It must be on the common grid if the selected output type is 'lonlat', 'lon' or 'lat', or 'areave' and 'grid' has been specified. It must be on the grid of the corresponding dataset if the selected output type is 'areave' and no 'grid' has been specified. For more information check ?Load and see help on parameters 'grid', 'maskmod' and 'maskobs'.", sep = "")) + } + mask <- mask[lon_indices, lat_indices] + } + ## If the user requests subsetting, we must extend the lon and lat limits if possible + ## so that the interpolation after is done properly + maximum_extra_points <- work_piece[['remapcells']] + if (lonlat_subsetting_requested && remap_needed) { + if ((maximum_extra_points > (head(lon_indices, 1) - 1)) || + (maximum_extra_points > (length(lon) - tail(lon_indices, 1)))) { + ## if the requested number of points goes beyond the left or right + ## sides of the map, we need to take the entire map so that the + ## interpolation works properly + lon_indices <- 1:length(lon) + } else { + extra_points <- min(maximum_extra_points, head(lon_indices, 1) - 1) + if (extra_points > 0) { + lon_indices <- c((head(lon_indices, 1) - extra_points):(head(lon_indices, 1) - 1), lon_indices) + } + extra_points <- min(maximum_extra_points, length(lon) - tail(lon_indices, 1)) + if (extra_points > 0) { + lon_indices <- c(lon_indices, (tail(lon_indices, 1) + 1):(tail(lon_indices, 1) + extra_points)) + } + } + min_lat_ind <- min(lat_indices) + max_lat_ind <- max(lat_indices) + extra_points <- min(maximum_extra_points, min_lat_ind - 1) + if (extra_points > 0) { + if (lat[1] < tail(lat, 1)) { + lat_indices <- c(lat_indices, (min_lat_ind - 1):(min_lat_ind - extra_points)) + } else { + lat_indices <- c((min_lat_ind - extra_points):(min_lat_ind - 1), lat_indices) + } + } + extra_points <- min(maximum_extra_points, length(lat) - max_lat_ind) + if (extra_points > 0) { + if (lat[1] < tail(lat, 1)) { + lat_indices <- c((max_lat_ind + extra_points):(max_lat_ind + 1), lat_indices) + } else { + lat_indices <- c(lat_indices, (max_lat_ind + 1):(max_lat_ind + extra_points)) + } + } + } + lon <- lon[lon_indices] + lat <- lat[lat_indices] + expected_dims <- c(work_piece[['dimnames']][['lon']], + work_piece[['dimnames']][['lat']]) + } else { + lon <- 0 + lat <- 0 + } + # We keep on filling the expected dimensions + var_dimnames <- unlist(lapply(fnc$var[[namevar]][['dim']], '[[', 'name')) + nmemb <- nltime <- NULL + ## Sometimes CDO renames 'members' dimension to 'lev' + old_members_dimname <- NULL + if (('lev' %in% var_dimnames) && !(work_piece[['dimnames']][['member']] %in% var_dimnames)) { + old_members_dimname <- work_piece[['dimnames']][['member']] + work_piece[['dimnames']][['member']] <- 'lev' + } + if (work_piece[['dimnames']][['member']] %in% var_dimnames) { + nmemb <- fnc$var[[namevar]][['dim']][[match(work_piece[['dimnames']][['member']], var_dimnames)]]$len + expected_dims <- c(expected_dims, work_piece[['dimnames']][['member']]) + } else { + nmemb <- 1 + } + if (length(expected_dims) > 0) { + dim_matches <- match(expected_dims, var_dimnames) + if (any(is.na(dim_matches))) { + if (!is.null(old_members_dimname)) { + expected_dims[which(expected_dims == 'lev')] <- old_members_dimname + } + stop(paste("Error: the expected dimension(s)", + paste(expected_dims[which(is.na(dim_matches))], collapse = ', '), + "were not found in", filename)) + } + time_dimname <- var_dimnames[-dim_matches] + } else { + time_dimname <- var_dimnames + } + if (length(time_dimname) > 0) { + if (length(time_dimname) == 1) { + nltime <- fnc$var[[namevar]][['dim']][[match(time_dimname, var_dimnames)]]$len + expected_dims <- c(expected_dims, time_dimname) + dim_matches <- match(expected_dims, var_dimnames) + } else { + if (!is.null(old_members_dimname)) { + expected_dims[which(expected_dims == 'lev')] <- old_members_dimname + } + stop(paste("Error: the variable", namevar, + "is defined over more dimensions than the expected (", + paste(c(expected_dims, 'time'), collapse = ', '), + "). It could also be that the members dimension in 'dimnames' or in the configuration file is incorrect. If not, it could also be that the members dimension is named incorrectly. In that case, either rename the dimension in the file or adjust Load() to recognize this name with the parameter 'dimnames'. See file", filename)) + } + } else { + nltime <- 1 + } +test='30' + # Now we must retrieve the data from the file, but only the asked indices. + # So we build up the indices to retrieve. + # Longitudes or latitudes have been retrieved already. + if (explore_dims) { + # If we're exploring the file we only want one time step from one member, + # to regrid it and work out the number of longitudes and latitudes. + # We don't need more. + members <- 1 + ltimes_list <- list(c(1)) + } else { + # The data is arranged in the array 'tmp' with the dimensions in a + # common order: + # 1) Longitudes + # 2) Latitudes + # 3) Members (even if is not a file per member experiment) + # 4) Lead-times + if (work_piece[['is_file_per_dataset']]) { + time_indices <- 1:nltime + mons <- strsplit(system(paste('cdo showmon ', filein, + ' 2>/dev/null'), intern = TRUE), split = ' ') + years <- strsplit(system(paste('cdo showyear ', filein, + ' 2>/dev/null'), intern = TRUE), split = ' ') + mons <- as.integer(mons[[1]][which(mons[[1]] != "")]) + years <- as.integer(years[[1]][which(years[[1]] != "")]) + time_indices <- ts(time_indices, start = c(years[1], mons[1]), + end = c(years[length(years)], mons[length(mons)]), + frequency = 12) + ltimes_list <- list() + for (sdate in work_piece[['startdates']]) { + selected_time_indices <- window(time_indices, start = c(as.integer( + substr(sdate, 1, 4)), as.integer(substr(sdate, 5, 6))), + end = c(3000, 12), frequency = 12, extend = TRUE) + selected_time_indices <- selected_time_indices[work_piece[['leadtimes']]] + ltimes_list <- c(ltimes_list, list(selected_time_indices)) + } + } else { + ltimes <- work_piece[['leadtimes']] + #if (work_piece[['dataset_type']] == 'exp') { + ltimes_list <- list(ltimes[which(ltimes <= nltime)]) + #} + } + ## TODO: Put, when reading matrices, this kind of warnings + # if (nmember < nmemb) { + # cat("Warning: + members <- 1:work_piece[['nmember']] + members <- members[which(members <= nmemb)] + } +test='35' + # Now, for each list of leadtimes to load (usually only one list with all leadtimes), + # we'll join the indices and retrieve data + found_disordered_dims <- FALSE + for (ltimes in ltimes_list) { + if (is_2d_var) { + start <- c(min(lon_indices), min(lat_indices)) + end <- c(max(lon_indices), max(lat_indices)) + if (lonlat_subsetting_requested && remap_needed) { + subset_indices <- list(min(lon_indices):max(lon_indices) - min(lon_indices) + 1, + lat_indices - min(lat_indices) + 1) + + + dim_longitudes <- ncdim_def(work_piece[['dimnames']][['lon']], "degrees_east", lon) + dim_latitudes <- ncdim_def(work_piece[['dimnames']][['lat']], "degrees_north", lat) + ncdf_dims <- list(dim_longitudes, dim_latitudes) + } else { + subset_indices <- list(lon_indices - min(lon_indices) + 1, + lat_indices - min(lat_indices) + 1) + + + ncdf_dims <- list() + } + final_dims <- c(length(subset_indices[[1]]), length(subset_indices[[2]]), 1, 1) + } else { + start <- end <- c() + subset_indices <- list() + ncdf_dims <- list() + final_dims <- c(1, 1, 1, 1) + } + test='36' + if (work_piece[['dimnames']][['member']] %in% expected_dims) { + start <- c(start, head(members, 1)) + end <- c(end, tail(members, 1)) + subset_indices <- c(subset_indices, list(members - head(members, 1) + 1)) + dim_members <- ncdim_def(work_piece[['dimnames']][['member']], "", members) + ncdf_dims <- c(ncdf_dims, list(dim_members)) + final_dims[3] <- length(members) + } + if (time_dimname %in% expected_dims) { + if (any(!is.na(ltimes))) { + start <- c(start, head(ltimes[which(!is.na(ltimes))], 1)) + end <- c(end, tail(ltimes[which(!is.na(ltimes))], 1)) + subset_indices <- c(subset_indices, list(ltimes - head(ltimes[which(!is.na(ltimes))], 1) + 1)) + } else { + start <- c(start, NA) + end <- c(end, NA) + subset_indices <- c(subset_indices, list(ltimes)) + } + dim_time <- ncdim_def(time_dimname, "", 1:length(ltimes), unlim = TRUE) + ncdf_dims <- c(ncdf_dims, list(dim_time)) + final_dims[4] <- length(ltimes) + } + count <- end - start + 1 + start <- start[dim_matches] + count <- count[dim_matches] + + subset_indices <- subset_indices[dim_matches] + + test='37' + # Now that we have the indices to retrieve, we retrieve the data + if (prod(final_dims) > 0) { + test='370' + + + tmp <- take(ncvar_get(fnc, namevar, start, count, + collapse_degen = FALSE), + 1:length(subset_indices), subset_indices) + test='371' + + #write.table(tmp, file=paste0(workdir,'/tmp',tmp,'.txt')) + + + # The data is regridded if it corresponds to an atmospheric variable. When + # the chosen output type is 'areave' the data is not regridded to not + # waste computing time unless the user specified a common grid. + if (is_2d_var) { + ###if (!is.null(work_piece[['mask']]) && !(lonlat_subsetting_requested && remap_needed)) { + ### mask <- take(ncvar_get(fnc_mask, work_piece[['mask']][['nc_var_name']], + ### start[dim_matches[1:2]], count[dim_matches[1:2]], + ### collapse_degen = FALSE), 1:2, subset_indices[dim_matches[1:2]]) + ###} + if (lonlat_subsetting_requested && remap_needed) { + filein <- tempfile(pattern = "loadRegridded", fileext = ".nc") + filein2 <- tempfile(pattern = "loadRegridded2", fileext = ".nc") + ncdf_var <- ncvar_def(namevar, "", ncdf_dims[dim_matches], + fnc$var[[namevar]]$missval, + prec = if (fnc$var[[namevar]]$prec == 'int') { + 'integer' + } else { + fnc$var[[namevar]]$prec + }) + test='372' + + nc_close(fnc) + fnc <- nc_create(filein2, list(ncdf_var)) + ncvar_put(fnc, ncdf_var, tmp) + nc_close(fnc) + test='375' + system(paste0("cdo -s -sellonlatbox,", if (lonmin > lonmax) { + "0,360," + } else { + paste0(lonmin, ",", lonmax, ",") + }, latmin, ",", latmax, + " -", work_piece[['remap']], ",", common_grid_name, + " ", filein2, " ", filein, " 2>/dev/null", sep = "")) + file.remove(filein2) + fnc <- nc_open(filein) + lon <- ncvar_get(fnc, 'lon') + lat <- ncvar_get(fnc, 'lat') + test='373' + + ## We read the longitudes and latitudes from the file. + ## In principle cdo should put in order the longitudes + ## and slice them properly unless data is across greenwich + lon[which(lon < 0)] <- lon[which(lon < 0)] + 360 + lon_indices <- 1:length(lon) + if (lonmax < lonmin) { + lon_indices <- lon_indices[which((lon <= lonmax) | (lon >= lonmin))] + } + lat_indices <- 1:length(lat) + ## In principle cdo should put in order the latitudes + if (lat[1] < lat[length(lat)]) { + lat_indices <- length(lat):1 + } + final_dims[c(1, 2)] <- c(length(lon_indices), length(lat_indices)) + subset_indices[[dim_matches[1]]] <- lon_indices + subset_indices[[dim_matches[2]]] <- lat_indices + + tmp <- take(ncvar_get(fnc, namevar, collapse_degen = FALSE), + 1:length(subset_indices), subset_indices) +test='38' + if (!is.null(mask)) { + ## We create a very simple 2d netcdf file that is then interpolated to the common + ## grid to know what are the lons and lats of our slice of data + mask_file <- tempfile(pattern = 'loadMask', fileext = '.nc') + mask_file_remap <- tempfile(pattern = 'loadMask', fileext = '.nc') + dim_longitudes <- ncdim_def(work_piece[['dimnames']][['lon']], "degrees_east", c(0, 360)) + dim_latitudes <- ncdim_def(work_piece[['dimnames']][['lat']], "degrees_north", c(-90, 90)) + ncdf_var <- ncvar_def('LSM', "", list(dim_longitudes, dim_latitudes), NA, 'double') + fnc_mask <- nc_create(mask_file, list(ncdf_var)) + ncvar_put(fnc_mask, ncdf_var, array(rep(0, 4), dim = c(2, 2))) + nc_close(fnc_mask) + system(paste0("cdo -s ", work_piece[['remap']], ",", common_grid_name, + " ", mask_file, " ", mask_file_remap, " 2>/dev/null", sep = "")) + fnc_mask <- nc_open(mask_file_remap) + mask_lons <- ncvar_get(fnc_mask, 'lon') + mask_lats <- ncvar_get(fnc_mask, 'lat') + nc_close(fnc_mask) + file.remove(mask_file, mask_file_remap) + if ((dim(mask)[1] != common_grid_lons) || (dim(mask)[2] != common_grid_lats)) { + stop(paste("Error: the mask of the dataset with index ", tail(work_piece[['indices']], 1), " in '", work_piece[['dataset_type']], "' is wrong. It must be on the common grid if the selected output type is 'lonlat', 'lon' or 'lat', or 'areave' and 'grid' has been specified. It must be on the grid of the corresponding dataset if the selected output type is 'areave' and no 'grid' has been specified. For more information check ?Load and see help on parameters 'grid', 'maskmod' and 'maskobs'.", sep = "")) + } + mask_lons[which(mask_lons < 0)] <- mask_lons[which(mask_lons < 0)] + 360 + if (lonmax >= lonmin) { + mask_lon_indices <- which((mask_lons >= lonmin) & (mask_lons <= lonmax)) + } else { + mask_lon_indices <- which((mask_lons >= lonmin) | (mask_lons <= lonmax)) + } + mask_lat_indices <- which((mask_lats >= latmin) & (mask_lats <= latmax)) + if (lat[1] < lat[length(lat)]) { + mask_lat_indices <- mask_lat_indices[length(mask_lat_indices):1] + } + mask <- mask[mask_lon_indices, mask_lat_indices] + } + lon <- lon[lon_indices] + lat <- lat[lat_indices] + ### nc_close(fnc_mask) + ### system(paste0("cdo -s -sellonlatbox,", if (lonmin > lonmax) { + ### "0,360," + ### } else { + ### paste0(lonmin, ",", lonmax, ",") + ### }, latmin, ",", latmax, + ### " -", work_piece[['remap']], ",", common_grid_name, + ###This is wrong: same files + ### " ", mask_file, " ", mask_file, " 2>/dev/null", sep = "")) + ### fnc_mask <- nc_open(mask_file) + ### mask <- take(ncvar_get(fnc_mask, work_piece[['mask']][['nc_var_name']], + ### collapse_degen = FALSE), 1:2, subset_indices[dim_matches[1:2]]) + ###} + } + } + if (!all(dim_matches == sort(dim_matches))) { + if (!found_disordered_dims && rev(work_piece[['indices']])[2] == 1 && rev(work_piece[['indices']])[3] == 1) { + found_disordered_dims <- TRUE + cat(paste0("! Warning: the dimensions for the variable ", namevar, " in the files of the experiment with index ", tail(work_piece[['indices']], 1), " are not in the optimal order for loading with Load(). The optimal order would be '", paste(expected_dims, collapse = ', '), "'. One of the files of the dataset is stored in ", filename)) + } + tmp <- aperm(tmp, dim_matches) + } + dim(tmp) <- final_dims + # If we are exploring the file we don't need to process and arrange + # the retrieved data. We only need to keep the dimension sizes. + if (explore_dims) { + if (work_piece[['is_file_per_member']]) { + ## TODO: When the exp_full_path contains asterisks and is file_per_member + ## members from different datasets may be accounted. + ## Also if one file member is missing the accounting will be wrong. + ## Should parse the file name and extract number of members. + if (is_url) { + nmemb <- NULL + } else { + nmemb <- length(files) + } + } + dims <- list(member = nmemb, time = nltime, lon = lon, lat = lat) + } else { + test='39' + # If we are not exploring, then we have to process the retrieved data + if (is_2d_var) { + tmp <- apply(tmp, c(3, 4), function(x) { + # Disable of large values. + if (!is.na(work_piece[['var_limits']][2])) { + x[which(x > work_piece[['var_limits']][2])] <- NA + } + if (!is.na(work_piece[['var_limits']][1])) { + x[which(x < work_piece[['var_limits']][1])] <- NA + } + if (!is.null(mask)) { + x[which(mask < 0.5)] <- NA + } + + if (output == 'areave' || output == 'lon') { + weights <- InsertDim(cos(lat * pi / 180), 1, length(lon)) + weights[which(is.na(x))] <- NA + if (output == 'areave') { + weights <- weights / mean(weights, na.rm = TRUE) + mean(x * weights, na.rm = TRUE) + } else { + weights <- weights / InsertDim(Mean1Dim(weights, 2, narm = TRUE), 2, length(lat)) + Mean1Dim(x * weights, 2, narm = TRUE) + } + } else if (output == 'lat') { + Mean1Dim(x, 1, narm = TRUE) + } else if (output == 'lonlat') { + signif(x, 5) + } + }) + if (output == 'areave') { + dim(tmp) <- c(1, 1, final_dims[3:4]) + } else if (output == 'lon') { + dim(tmp) <- c(final_dims[1], 1, final_dims[3:4]) + } else if (output == 'lat') { + dim(tmp) <- c(1, final_dims[c(2, 3, 4)]) + } else if (output == 'lonlat') { + dim(tmp) <- final_dims + } + } + var_data <- attach.big.matrix(work_piece[['out_pointer']]) + if (work_piece[['dims']][['member']] > 1 && nmemb > 1 && + work_piece[['dims']][['time']] > 1 && + nltime < work_piece[['dims']][['time']]) { + work_piece[['indices']][2] <- work_piece[['indices']][2] - 1 + for (jmemb in members) { + work_piece[['indices']][2] <- work_piece[['indices']][2] + 1 + out_position <- arrayIndex2VectorIndex(work_piece[['indices']], work_piece[['dims']]) + out_indices <- out_position:(out_position + length(tmp[, , jmemb, ]) - 1) + var_data[out_indices] <- as.vector(tmp[, , jmemb, ]) + } + work_piece[['indices']][2] <- work_piece[['indices']][2] - tail(members, 1) + 1 + } else { + out_position <- arrayIndex2VectorIndex(work_piece[['indices']], work_piece[['dims']]) + out_indices <- out_position:(out_position + length(tmp) - 1) + a <- aperm(tmp, c(1, 2, 4, 3)) + as.vector(a) + var_data[out_indices] <- as.vector(aperm(tmp, c(1, 2, 4, 3))) + } + work_piece[['indices']][3] <- work_piece[['indices']][3] + 1 + } + } + } + nc_close(fnc) + if (is_2d_var && remap_needed) { + file.remove(filein) + ###if (!is.null(mask) && lonlat_subsetting_requested) { + ### file.remove(mask_file) + ###} + } + test='40' + } + if (explore_dims) { + found_data <- list(dims = dims, is_2d_var = is_2d_var, grid = grid_name, units = units, + var_long_name = var_long_name) + } else { + ###if (!silent && !is.null(progress_connection) && !is.null(work_piece[['progress_amount']])) { + ### foobar <- writeBin(work_piece[['progress_amount']], progress_connection) + ###} + if (!silent && !is.null(work_piece[['progress_amount']])) { + cat(paste0(work_piece[['progress_amount']])) + } + found_file + } + + + + + + + + + + + + + + + + + + + + + + + + + found_dims <- found_data$dims + var_long_name <- found_data$var_long_name + units <- found_data$units + if (!is.null(found_dims)) { + is_2d_var <- found_data$is_2d_var + if (!is_2d_var && (output != 'areave')) { + cat(paste("! Warning: '", output, "' output format not allowed when loading global mean variables. Forcing to 'areave'.\n", + sep = '')) + output <- 'areave' + } + if (output != 'areave' && is.null(grid)) { + grid <- found_data$grid + } + if (is.null(nmember)) { + if (is.null(found_dims[['member']])) { + cat("! Warning: loading data from a server but 'nmember' not specified. Loading only one member.\n") + nmember <- rep(1, nmod) + } else { + nmember <- rep(found_dims[['member']], nmod) + } + } + + if (is.null(nleadtime)) { + nleadtime <- found_dims[['time']] + } + if (is.null(leadtimemax)) { + leadtimemax <- nleadtime + } else if (leadtimemax > nleadtime) { + stop("Error: 'leadtimemax' argument is greater than the number of loaded leadtimes. Put first the experiment with the greatest number of leadtimes or adjust properly the parameters 'nleadtime' and 'leadtimemax'.") + } + + leadtimes <- seq(leadtimemin, leadtimemax, sampleperiod) + latitudes <- found_dims[['lat']] + longitudes <- found_dims[['lon']] + + if (output == 'lon' || output == 'lonlat') { + dim_exp[['lon']] <- length(longitudes) + } + if (output == 'lat' || output == 'lonlat') { + dim_exp[['lat']] <- length(latitudes) + } + dim_exp[['time']] <- length(leadtimes) + dim_exp[['member']] <- max(nmember) + dim_exp[['sdate']] <- nsdates + dim_exp[['dataset']] <- nmod + dims2define <- FALSE + } + } + + # We keep on iterating through members to build all the work pieces. + if (is_file_per_member_exp[jmod]) { + jmember <- 1 + while (jmember <= nmember[jmod]) { + replace_values[["MEMBER_NUMBER"]] <- sprintf(paste("%.", (nmember[jmod] %/% 10) + 1, "i", sep = ''), jmember - 1) + work_piece <- list(filename = .ConfigReplaceVariablesInString(exp[[jmod]][['path']], replace_values), + namevar = namevar, indices = c(1, jmember, jsdate, jmod), + nmember = nmember[jmod], leadtimes = leadtimes, mask = maskmod[[jmod]], + is_file_per_dataset = FALSE, dimnames = exp[[jmod]][['dimnames']], + var_limits = c(mod_var_min, mod_var_max), remapcells = remapcells) + exp_work_pieces <- c(exp_work_pieces, list(work_piece)) + jmember <- jmember + 1 + } + } else { + work_piece <- list(filename = .ConfigReplaceVariablesInString(exp[[jmod]][['path']], replace_values), + namevar = namevar, indices = c(1, 1, jsdate, jmod), + nmember = nmember[jmod], leadtimes = leadtimes, mask = maskmod[[jmod]], + is_file_per_dataset = FALSE, dimnames = exp[[jmod]][['dimnames']], + var_limits = c(mod_var_min, mod_var_max), remapcells = remapcells) + exp_work_pieces <- c(exp_work_pieces, list(work_piece)) + } + jsdate <- jsdate + 1 + } + jmod <- jmod + 1 + } + if (dims2define && length(exp) > 0) { + cat("! Warning: no data found in file system for any experimental dataset.\n") + } + + dims <- dim_exp[na.omit(match(c('dataset', 'member', 'sdate', 'time', 'lat', 'lon'), names(dim_exp)))] + if (is.null(dims[['member']]) || any(is.na(unlist(dims))) || any(unlist(dims) == 0)) { + dims <- 0 + dim_exp <- NULL + } + if (!silent) { + message <- "* Success. Detected dimensions of experimental data: " + cat(paste0(message, paste(unlist(dims), collapse = ', '), '\n')) + cat("* Fetching first observational files to work out 'var_obs' size...\n") + } + + + # If there are no experiments to load we need to choose a number of time steps + # to load from observational datasets. We load from the first start date to + # the current date. + if (is.null(exp) || dims == 0) { + if (is.null(leadtimemax)) { + cat("! Warning: loading observations only and no 'leadtimemax' specified. Data will be loaded from each starting date to current time.\n") + diff <- Sys.time() - as.POSIXct(paste(substr(sdates[1], 1, 4), '-', + substr(sdates[1], 5, 6), '-', substr(sdates[1], 7, 8), sep='')) + if (storefreq == 'monthly') { + leadtimemax <- as.integer(diff/30) + } else { + leadtimemax <- as.integer(diff) + } + } + if (is.null(nleadtime)) { + nleadtime <- leadtimemax + } + leadtimes <- seq(leadtimemin, leadtimemax, sampleperiod) + } + + # Now we start iterating over observations. We try to find the output matrix + # dimensions and we build anyway the work pieces corresponding to the observational + # data that time-corresponds the experimental data or the time-steps until the + # current date if no experimental datasets were specified. + dataset_type <- 'obs' + dim_obs <- NULL + dims2define <- TRUE + lat_indices <- lon_indices <- NULL + obs_work_pieces <- list() + is_file_per_dataset_obs <- rep(FALSE, nobs) + is_file_per_member_obs <- rep(FALSE, nobs) + jobs <- 1 + while (jobs <= nobs) { + tags_to_find <- c('MONTH', 'DAY', 'YEAR', 'MEMBER_NUMBER') + position_of_tags <- na.omit(match(tags_to_find, names(replace_values))) + if (length(position_of_tags) > 0) { + quasi_final_path <- .ConfigReplaceVariablesInString(obs[[jobs]][['path']], + replace_values[-position_of_tags], TRUE) + } else { + quasi_final_path <- .ConfigReplaceVariablesInString(obs[[jobs]][['path']], + replace_values, TRUE) + } + is_file_per_dataset_obs[jobs] <- !any(sapply(c("$MONTH$", "$DAY$", "$YEAR$"), + grepl, quasi_final_path, fixed = TRUE)) + is_file_per_member_obs[jobs] <- grepl("$MEMBER_NUMBER$", quasi_final_path, fixed = TRUE) + replace_values[["OBS_NAME"]] <- obs[[jobs]][['name']] + replace_values[["NC_VAR_NAME"]] <- obs[[jobs]][['nc_var_name']] + namevar <- .ConfigReplaceVariablesInString(obs[[jobs]][['nc_var_name']], replace_values) + replace_values[["SUFFIX"]] <- obs[[jobs]][['suffix']] + if (is.null(varmin)) { + obs_var_min <- as.numeric(.ConfigReplaceVariablesInString(obs[[jobs]][['var_min']], replace_values)) + } else { + obs_var_min <- varmin + } + if (is.null(varmax)) { + obs_var_max <- as.numeric(.ConfigReplaceVariablesInString(obs[[jobs]][['var_max']], replace_values)) + } else { + obs_var_max <- varmax + } + # This file format (file per whole dataset) is only supported in observations. + # However a file per whole dataset experiment could be seen as a file per + # member/ensemble experiment with a single start date, so still loadable. + # Nonetheless file per whole dataset observational files do not need to contain + # a year and month in the filename, the time correspondance relies on the + # month and years associated to each timestep inside the NetCDF file. + # So file per whole dataset experiments need to have a start date in the filename. + if (is_file_per_dataset_obs[jobs]) { + ## TODO: Open file-per-dataset-files only once. + if (dims2define) { + work_piece <- list(dataset_type = dataset_type, + filename = .ConfigReplaceVariablesInString(obs[[jobs]][['path']], replace_values), + namevar = namevar, grid = grid, remap = remap, remapcells = remapcells, + is_file_per_member = is_file_per_member_obs[jobs], + is_file_per_dataset = is_file_per_dataset_obs[jobs], + lon_limits = c(lonmin, lonmax), + lat_limits = c(latmin, latmax), dimnames = obs[[jobs]][['dimnames']], + single_dataset = single_dataset) + found_data <- .LoadDataFile(work_piece, explore_dims = TRUE, silent = silent) + found_dims <- found_data$dims + var_long_name <- found_data$var_long_name + units <- found_data$units + if (!is.null(found_dims)) { + is_2d_var <- found_data$is_2d_var + if (!is_2d_var && (output != 'areave')) { + cat(paste("! Warning: '", output, "' output format not allowed when loading global mean variables. Forcing to 'areave'.\n", + sep = '')) + output <- 'areave' + } + if (output != 'areave' && is.null(grid)) { + grid <- found_data$grid + } + if (is.null(nmemberobs)) { + if (is.null(found_dims[['member']])) { + cat("! Warning: loading observational data from a server but 'nmemberobs' not specified. Loading only one member.\n") + nmemberobs <- rep(1, nobs) + } else { + nmemberobs <- rep(found_dims[['member']], nobs) + } + } + if (is.null(dim_exp)) { + longitudes <- found_dims[['lon']] + latitudes <- found_dims[['lat']] + } + + if (output == 'lon' || output == 'lonlat') { + dim_obs[['lon']] <- length(longitudes) + } + if (output == 'lat' || output == 'lonlat') { + dim_obs[['lat']] <- length(latitudes) + } + dim_obs[['time']] <- length(leadtimes) + dim_obs[['member']] <- max(nmemberobs) + dim_obs[['sdate']] <- nsdates + dim_obs[['dataset']] <- nobs + dims2define <- FALSE + } + } + work_piece <- list(filename = .ConfigReplaceVariablesInString(obs[[jobs]][['path']], replace_values), + namevar = namevar, indices = c(1, 1, 1, jobs), + nmember = nmemberobs[jobs], + mask = maskobs[[jobs]], leadtimes = leadtimes, + is_file_per_dataset = is_file_per_dataset_obs[jobs], + startdates = sdates, dimnames = obs[[jobs]][['dimnames']], + var_limits = c(obs_var_min, obs_var_max), remapcells = remapcells) + obs_work_pieces <- c(obs_work_pieces, list(work_piece)) + } else { + jsdate <- 1 + while (jsdate <= nsdates) { + replace_values[["START_DATE"]] <- sdates[jsdate] + sdate <- sdates[jsdate] + + if (storefreq == 'daily') { + day <- substr(sdate, 7, 8) + if (day == '') { + day <- '01' + } + day <- as.integer(day) + startdate <- as.POSIXct(paste(substr(sdate, 1, 4), '-', + substr(sdate, 5, 6), '-', day, ' 12:00:00', sep = '')) + + (leadtimemin - 1) * 86400 + year <- as.integer(substr(startdate, 1, 4)) + month <- as.integer(substr(startdate, 6, 7)) + } else { + month <- (as.integer(substr(sdate, 5, 6)) + leadtimemin - 2) %% 12 + 1 + year <- as.integer(substr(sdate, 1, 4)) + (as.integer(substr(sdate, + 5, 6)) + leadtimemin - 2) %/% 12 + } + jleadtime <- 1 + while (jleadtime <= length(leadtimes)) { + replace_values[["YEAR"]] <- paste(year, '', sep = '') + replace_values[["MONTH"]] <- sprintf("%2.2i", month) + if (storefreq == 'daily') { + replace_values[["DAY"]] <- sprintf("%2.2i", day) + days_in_month <- ifelse(LeapYear(year), 29, 28) + days_in_month <- switch(paste(month, '', sep = ''), '1' = 31, + '3' = 31, '4' = 30, '5' = 31, '6' = 30, + '7' = 31, '8' = 31, '9' = 30, '10' = 31, + '11' = 30, '12' = 31, days_in_month) + ## This condition must be fulfilled to put all the month time steps + ## in the dimension of length nleadtimes. Otherwise it must be cut: + #(length(leadtimes) - 1)*sampleperiod + 1 - (jleadtime - 1)*sampleperiod >= days_in_month - day + 1 + obs_file_indices <- seq(day, min(days_in_month, (length(leadtimes) - jleadtime) * sampleperiod + day), sampleperiod) + } else { + obs_file_indices <- 1 + } + if (dims2define) { + if (is_file_per_member_obs[jobs]) { + replace_values[["MEMBER_NUMBER"]] <- '*' + } + work_piece <- list(dataset_type = dataset_type, + filename = .ConfigReplaceVariablesInString(obs[[jobs]][['path']], replace_values), + namevar = namevar, grid = grid, remap = remap, remapcells = remapcells, + is_file_per_member = is_file_per_member_obs[jobs], + is_file_per_dataset = is_file_per_dataset_obs[jobs], + lon_limits = c(lonmin, lonmax), + lat_limits = c(latmin, latmax), + dimnames = obs[[jobs]][['dimnames']], single_dataset = single_dataset) + found_data <- .LoadDataFile(work_piece, explore_dims = TRUE, silent = silent) + found_dims <- found_data$dims + var_long_name <- found_data$var_long_name + units <- found_data$units + if (!is.null(found_dims)) { + is_2d_var <- found_data$is_2d_var + if (!is_2d_var && (output != 'areave')) { + cat(paste("! Warning: '", output, "' output format not allowed when loading global mean variables. Forcing to 'areave'\n.", + sep = '')) + output <- 'areave' + } + if (output != 'areave' && is.null(grid)) { + grid <- found_data$grid + } + if (is.null(nmemberobs)) { + if (is.null(found_dims[['member']])) { + cat("! Warning: loading observational data from a server but 'nmemberobs' not specified. Loading only one member.\n") + nmemberobs <- rep(1, nobs) + } else { + nmemberobs <- rep(found_dims[['member']], nobs) + } + } + if (is.null(dim_exp)) { + longitudes <- found_dims[['lon']] + latitudes <- found_dims[['lat']] + } + + if (output == 'lon' || output == 'lonlat') { + dim_obs[['lon']] <- length(longitudes) + } + if (output == 'lat' || output == 'lonlat') { + dim_obs[['lat']] <- length(latitudes) + } + dim_obs[['time']] <- length(leadtimes) + dim_obs[['member']] <- max(nmemberobs) + dim_obs[['sdate']] <- nsdates + dim_obs[['dataset']] <- nobs + dims2define <- FALSE + } + } + if (is_file_per_member_obs[jobs]) { + jmember <- 1 + while (jmember <= nmemberobs[jobs]) { + replace_values[["MEMBER_NUMBER"]] <- sprintf(paste("%.", (nmemberobs[jobs] %/% 10) + 1, "i", sep = ''), jmember - 1) + work_piece <- list(filename = .ConfigReplaceVariablesInString(obs[[jobs]][['path']], replace_values), + namevar = namevar, indices = c(jleadtime, jmember, jsdate, jobs), + nmember = nmemberobs[jobs], leadtimes = obs_file_indices, + mask = maskobs[[jobs]], dimnames = obs[[jobs]][['dimnames']], + is_file_per_dataset = is_file_per_dataset_obs[jobs], + var_limits = c(obs_var_min, obs_var_max), remapcells = remapcells) + obs_work_pieces <- c(obs_work_pieces, list(work_piece)) + jmember <- jmember + 1 + } + } else { + work_piece <- list(filename = .ConfigReplaceVariablesInString(obs[[jobs]][['path']], replace_values), + namevar = namevar, indices = c(jleadtime, 1, jsdate, jobs), + nmember = nmemberobs[jobs], leadtimes = obs_file_indices, + mask = maskobs[[jobs]], dimnames = obs[[jobs]][['dimnames']], + is_file_per_dataset = is_file_per_dataset_obs[jobs], + var_limits = c(obs_var_min, obs_var_max), remapcells) + obs_work_pieces <- c(obs_work_pieces, list(work_piece)) + } + + if (storefreq == 'daily') { + startdate <- startdate + 86400 * sampleperiod * length(obs_file_indices) + year <- as.integer(substr(startdate, 1, 4)) + month <- as.integer(substr(startdate, 6, 7)) + day <- as.integer(substr(startdate, 9, 10)) + } else { + month <- month + sampleperiod + year <- year + (month - 1) %/% 12 + month <- (month - 1) %% 12 + 1 + } + jleadtime <- jleadtime + length(obs_file_indices) + } + + jsdate <- jsdate + 1 + } + } + jobs <- jobs + 1 + } + if (dims2define && length(obs) > 0) { + cat("! Warning: no data found in file system for any observational dataset.\n") + } + dims <- dim_obs[na.omit(match(c('dataset', 'member', 'sdate', 'time', 'lat', 'lon'), names(dim_obs)))] + if (is.null(dims[['member']]) || any(is.na(unlist(dims))) || any(unlist(dims) == 0)) { + dims <- 0 + dim_obs <- NULL + } + if (!silent) { + message <- "* Success. Detected dimensions of observational data: " + cat(paste0(message, paste(unlist(dims), collapse = ', '), '\n')) + } + + if (!(is.null(dim_obs) && is.null(dim_exp))) { + + # We build two matrices in shared memory for the parallel processes to + # store their results + # These matrices will contain data arranged with the following + # dimension order, to maintain data spacial locality during the + # parallel fetch: + # longitudes, latitudes, leadtimes, members, startdates, nmod/nobs + # So [1, 1, 1, 1, 1, 1] will be next to [2, 1, 1, 1, 1, 1] in memory + pointer_var_exp <- pointer_var_obs <- NULL + if (!is.null(dim_exp) && (length(unlist(dim_exp)) == length(dim_exp)) && + !any(is.na(unlist(dim_exp))) && !any(unlist(dim_exp) == 0)) { + var_exp <- big.matrix(nrow = prod(unlist(dim_exp)), ncol = 1) + pointer_var_exp <- describe(var_exp) + } + if (!is.null(dim_obs) && (length(unlist(dim_obs)) == length(dim_obs)) && + !any(is.na(unlist(dim_obs))) && !any(unlist(dim_obs) == 0)) { + var_obs <- big.matrix(nrow = prod(unlist(dim_obs)), ncol = 1) + pointer_var_obs <- describe(var_obs) + } + if (is.null(nprocs)) { + nprocs <- detectCores() + } + # We calculate the % of total progress that each work piece represents so + # that progress bar can be updated properly + exp_work_piece_percent <- prod(dim_exp) / (prod(dim_obs) + prod(dim_exp)) + obs_work_piece_percent <- prod(dim_obs) / (prod(dim_obs) + prod(dim_exp)) + # Add some important extra fields in the work pieces before sending + exp_work_pieces <- lapply(exp_work_pieces, function (x) c(x, list(dataset_type = 'exp', dims = dim_exp, out_pointer = pointer_var_exp)))###, progress_amount = exp_work_piece_progress))) + obs_work_pieces <- lapply(obs_work_pieces, function (x) c(x, list(dataset_type = 'obs', dims = dim_obs, out_pointer = pointer_var_obs)))###, progress_amount = obs_work_piece_progress))) + work_pieces <- c(exp_work_pieces, obs_work_pieces) + # Calculate the progress %s that will be displayed and assign them to the + # appropriate work pieces + if (length(work_pieces)/nprocs >= 2 && !silent) { + if (length(work_pieces)/nprocs < 10) { + amount <- 100/ceiling(length(work_pieces)/nprocs) + reps <- ceiling(length(work_pieces)/nprocs) + } else { + amount <- 10 + reps <- 10 + } + progress_steps <- rep(amount, reps) + if (length(exp_work_pieces) == 0) { + selected_exp_pieces <- c() + } else if (length(exp_work_pieces) < floor(reps*exp_work_piece_percent) + 1) { + selected_exp_pieces <- length(exp_work_pieces) + progress_steps <- c(sum(head(progress_steps, + floor(reps*exp_work_piece_percent))), + tail(progress_steps, + ceiling(reps*obs_work_piece_percent))) + } else { + selected_exp_pieces <- round(seq(1, length(exp_work_pieces), + length.out = floor(reps*exp_work_piece_percent) + 1))[-1] + } + if (length(obs_work_pieces) == 0) { + selected_obs_pieces <- c() + } else if (length(obs_work_pieces) < ceiling(reps*obs_work_piece_percent) + 1) { + selected_obs_pieces <- length(obs_work_pieces) + progress_steps <- c(head(progress_steps, + floor(reps*exp_work_piece_percent)), + sum(tail(progress_steps, + ceiling(reps*obs_work_piece_percent)))) + } else { + selected_obs_pieces <- round(seq(1, length(obs_work_pieces), + length.out = ceiling(reps*obs_work_piece_percent) + 1))[-1] + } + selected_pieces <- c(selected_exp_pieces, selected_obs_pieces + length(exp_work_pieces)) + progress_steps <- paste0(' + ', round(progress_steps, 2), '%') + progress_message <- '* Progress: 0%' + } else { + progress_message <- '' + selected_pieces <- NULL + } + piece_counter <- 1 + step_counter <- 1 + work_pieces <- lapply(work_pieces, + function (x) { + wp <- c(x, list(is_2d_var = is_2d_var, grid = grid, remap = remap, + lon_limits = c(lonmin, lonmax), + lat_limits = c(latmin, latmax), + output = output, remapcells = remapcells, + single_dataset = single_dataset)) + if (piece_counter %in% selected_pieces) { + wp <- c(wp, list(progress_amount = progress_steps[step_counter])) + step_counter <<- step_counter + 1 + } + piece_counter <<- piece_counter + 1 + wp + }) + if (!silent) { + cat(paste("* Will now proceed to read and process ", length(work_pieces), " data files:\n", sep = '')) + if (length(work_pieces) < 30) { + lapply(work_pieces, function (x) cat(paste("* ", x[['filename']], '\n', sep = ''))) + } else { + cat(paste("* The list of files is long. You can check it after Load() finishes in the output '$source_files'.\n")) + } + if (length(dim_obs) == 0) { + bytes_obs <- 0 + obs_dim_sizes <- '0' + } else { + bytes_obs <- prod(c(dim_obs, 8)) + obs_dim_sizes <- paste(na.omit(as.vector(dim_obs[c('dataset', 'member', 'sdate', 'time', 'lat', 'lon')])), collapse = ' x ') + } + if (length(dim_exp) == 0) { + bytes_exp <- 0 + exp_dim_sizes <- '0' + } else { + bytes_exp <- prod(c(dim_exp, 8)) + exp_dim_sizes <- paste(na.omit(as.vector(dim_exp[c('dataset', 'member', 'sdate', 'time', 'lat', 'lon')])), collapse = ' x ') + } + cat(paste("* Total size of requested data: ", bytes_obs + bytes_exp, "bytes.\n")) + cat(paste("* - Experimental data: (", exp_dim_sizes, ") x 8 bytes =", bytes_exp, "bytes.\n")) + cat(paste("* - Observational data: (", obs_dim_sizes, ") x 8 bytes =", bytes_obs, "bytes.\n")) + cat(paste("* If size of requested data is close to or above the free shared RAM memory, R will crash.\n")) + } + # Build the cluster of processes that will do the work and dispatch work pieces. + # The function .LoadDataFile is applied to each work package. This function will + # open the data file, regrid if needed, trim (select time steps, longitudes, + # latitudes, members), apply the mask, compute and apply the weights if needed, + # disable extreme values and store in the shared memory matrices. + if (nprocs == 1) { + found_files <- lapply(work_pieces, .LoadDataFile, silent = silent) + } else { + cluster <- makeCluster(nprocs, outfile = "") + # Open connections to keep track of progress + ###range_progress_ports <- c(49000, 49999) + ###progress_ports <- as.list(sample(range_progress_ports[2] - range_progress_ports[1], nprocs) + range_progress_ports[1]) + + # Open from master side + ###connection_set_up_job <- mcparallel({ + ### progress_connections <- vector('list', length(progress_ports)) + ### for (connection in 1:length(progress_ports)) { + ### attempts <- 0 + ### max_attempts <- 3 + ### while (is.null(progress_connections[[connection]]) && attempts < max_attempts) { + ### Sys.sleep(2) + ### suppressWarnings({ + ### progress_connections[[connection]] <- try({ + ### socketConnection(port = progress_ports[[connection]], open = 'w+b') + ### }, silent = TRUE) + ### }) + ### if (!('sockconn' %in% class(progress_connections[[connection]]))) { + ### progress_connections[[connection]] <- NULL + ### } + ### attempts <- attempts + 1 + ### } + ### } + + # And start polling the sockets and update progress bar + ### if (!any( lapply is.null!!! is.null(progress_connections))) { + ### progress <- 0.0 + ### pb <- txtProgressBar(0, 1, style = 3) + ### stop_polling <- FALSE + ### attempts <- 0 + ### max_attempts <- 3 + ### while (progress < 0.999 && !stop_polling) { + ### Sys.sleep(3) + ### progress_obtained <- lapply(progress_connections, function(x) as.numeric(readBin(x, 'double'))) + ### total_progress_obtained <- sum(unlist(progress_obtained)) + ### if (total_progress_obtained > 0) { + ### progress <- progress + total_progress_obtained + ### setTxtProgressBar(pb, progress) + ### attempts <- 0 + ### } else { + ### attempts <- attempts + 1 + ### if (attempts >= max_attempts) { + ### stop_polling <- TRUE + ### } + ### } + ### } + ### } + ###}) + + # Open from the workers side + ###open_connections <- clusterApply(cluster, progress_ports, + ### function (x) { + ### progress_connection <<- NULL + ### progress_connection <<- try({ + ### socketConnection(server = TRUE, port = x, open = 'w+b') + ### }) + ### if ('sockconn' %in% class(progress_connection)) { + ### TRUE + ### } else { + ### progress_connection <<- NULL + ### FALSE + ### } + ### }) + + ###if (!all(unlist(open_connections))) { + ### if (!silent) { + ### cat(paste("! Warning: failed to open connections in ports", process_track_ports[1], "to", process_track_ports[2], "to keep track of progress. Progress bar will not be displayed\n")) + ### } + ###} + + if (!silent) { + cat(paste("* Loading... This may take several minutes...\n", sep = '')) + cat(progress_message) + } + # Send the heavy work to the workers + work_errors <- try({ + found_files <- clusterApplyLB(cluster, work_pieces, .LoadDataFile, silent = silent) + }) + stopCluster(cluster) + } + if (!silent) { + if (progress_message != '') { + cat("\n") + } + if (any(unlist(lapply(found_files, is.null)))) { + if (sum(unlist(lapply(found_files, is.null))) < 30) { + cat("! WARNING: The following files were not found in the file system. Filling with NA values instead.\n") + lapply(work_pieces[which(unlist(lapply(found_files, is.null)))], function (x) cat(paste("* ", x[['filename']], '\n', sep = ''))) + } else { + cat("! WARNING: Some files were not found in the file system. The list is long. You can check it in the output '$not_found_files'. Filling with NA values instead.\n") + } + } + } + source_files <- unlist(found_files[which(!unlist(lapply(found_files, is.null)))]) + not_found_files <- unlist(lapply(work_pieces[which(unlist(lapply(found_files, is.null)))], '[[', 'filename')) + + } else { + error_message <- "Error: No found files for any dataset. Check carefully the file patterns and correct either the pattern or the provided parameters:\n" + if (!is.null(exp)) { + lapply(exp, function (x) error_message <<- paste0(error_message, paste0(x[['path']], '\n'))) + } + if (!is.null(obs)) { + lapply(obs, function (x) error_message <<- paste0(error_message, paste0(x[['path']], '\n'))) + } + stop(error_message) + } + + }) + + if (class(errors) == 'try-error') { + invisible(list(load_parameters = load_parameters)) + } else { + variable <- list() + variable[['varName']] <- var + variable[['level']] <- NULL + attr(variable, 'is_standard') <- FALSE + attr(variable, 'units') <- units + attr(variable, 'longname') <- var_long_name + attr(variable, 'daily_agg_cellfun') <- 'none' + attr(variable, 'monthly_agg_cellfun') <- 'none' + attr(variable, 'verification_time') <- 'none' + + if (is.null(var_exp)) { + mod_data <- NULL + } else { + dim_reorder <- length(dim_exp):1 + dim_reorder[2:3] <- dim_reorder[3:2] + old_dims <- dim_exp + dim_exp <- dim_exp[dim_reorder] + mod_data <- aperm(array(bigmemory::as.matrix(var_exp), dim = old_dims), dim_reorder) + attr(mod_data, 'dimensions') <- names(dim_exp) + } + + if (is.null(var_obs)) { + obs_data <- NULL + } else { + dim_reorder <- length(dim_obs):1 + dim_reorder[2:3] <- dim_reorder[3:2] + old_dims <- dim_obs + dim_obs <- dim_obs[dim_reorder] + obs_data <- aperm(array(bigmemory::as.matrix(var_obs), dim = old_dims), dim_reorder) + attr(obs_data, 'dimensions') <- names(dim_obs) + } + + if (is.null(latitudes)) { + lat <- 0 + attr(lat, 'cdo_grid_name') <- 'none' + } else { + lat <- latitudes + attr(lat, 'cdo_grid_name') <- if (is.null(grid)) 'none' else grid + } + attr(lat, 'projection') <- 'none' + + if (is.null(longitudes)) { + lon <- 0 + attr(lon, 'cdo_grid_name') <- 'none' + } else { + lon <- longitudes + attr(lon, 'cdo_grid_name') <- if (is.null(grid)) 'none' else grid + } + attr(lon, 'projection') <- 'none' + + dates <- list() + dates[['start']] <- NULL + dates[['end']] <- NULL + + models <- NULL + if (length(exp) > 0 && !is.null(dim_exp)) { + models <- list() + for (jmod in 1:length(exp)) { + models[[exp[[jmod]][['name']]]] <- list( + members = paste0('Member_', 1:nmember[jmod]), + source = if ((nchar(exp[[jmod]][['path']]) - + nchar(gsub("/", "", exp[[jmod]][['path']])) > 2) && + (length(sdates) > 1 && !is_file_per_member_exp[jmod])) { + parts <- strsplit(exp[[jmod]][['path']], '/')[[1]] + paste(parts[-length(parts)], sep = '', collapse = '/') + } else { + exp[[jmod]][['path']] + }) + } + } + + observations <- NULL + if (length(obs) > 0 && !is.null(dim_obs)) { + observations <- list() + for (jobs in 1:length(obs)) { + observations[[obs[[jobs]][['name']]]] <- list( + members = paste0('Member_', 1:nmemberobs[jobs]), + source = if ((nchar(obs[[jobs]][['path']]) - + nchar(gsub("/", "", obs[[jobs]][['path']])) > 2) && + !is_file_per_dataset_obs[jobs]) { + parts <- strsplit(obs[[jobs]][['path']], '/')[[1]] + paste(parts[-length(parts)], sep = '', collapse = '/') + } else { + obs[[jobs]][['path']] + }) + } + } + + # Before ending, the data is arranged in the common format, with the following + # dimension order: + # nmod/nobs, members, startdates, leadtimes, latitudes, longitudes + invisible(list(mod = mod_data, + obs = obs_data, + lon = lon, + lat = lat, + Variable = variable, + Datasets = list(exp = models, obs = observations), + Dates = dates, + InitializationDates = lapply(sdates, + function (x) { + sink('/dev/null') + date <- print(as.POSIXct(as.Date(x, format = '%Y%m%d'))) + sink() + date + }), + when = Sys.time(), + source_files = source_files, + not_found_files = not_found_files, + load_parameters = load_parameters)) + } + + + +} diff --git a/old/SkillScores_Subseasonal.R b/old/SkillScores_Subseasonal.R new file mode 100644 index 0000000000000000000000000000000000000000..fc0a3e255d2c4f32a5c83627a1b7dac2670ebc2d --- /dev/null +++ b/old/SkillScores_Subseasonal.R @@ -0,0 +1,698 @@ + +# Creation: 6/2016 +# Author: Nicola Cortesi +# Aim: to compute the ACC, RPSS, CRPSS (+ bootstrapping) and the Reliability diagrams for a subseasonal forecast system +# I/O: input monthly hindcasts formatted as in /esnas, output: .png with the desired skill score +# Assumptions: it relies on Load() to import data +# Branch: skill_assessment + +######################################################################################### +# Sub-seasonal Skill Scores # +######################################################################################### +# you can run it from the terminal with the syntax: +# +# Rscript SkillScores_v7.R +# +# i.e: to run only the chunk number 3, write: +# +# Rscript SkillScores_v4.R 3 +# +# if you don't specify any argument, or you run it from the R interface, it runs all the chunks in a sequential way. +# Use this last approach only if the computational speed is not a problem. + +########################################################################################## +# Load functions and libraries # +########################################################################################## + +#load libraries and functions: +library(s2dverification) # for Load() function +library(SpecsVerification) # for skill scores +library(easyVerification) # for veriApply() function +library(abind) # for merging arrays + +########################################################################################## +# MareNostrum settings # +########################################################################################## + +args <- commandArgs(TRUE) # put into variable args the list with all the optional arguments with whom the script was run from the terminal + +if(length(args) == 0) print("Running the script in a sequential way") +if(length(args) > 1) stop("Only one argument is required") + +mare <- ifelse(length(args) == 1 ,TRUE, FALSE) # set variable 'mare' to TRUE if we are running the script in MareNostrum, FALSE otherwise + +if(mare) chunk <- as.integer(args[1]) # number of the chunk to run in this script (if mare == TRUE) + +########################################################################################## +# User's settings # +########################################################################################## +#if(!mare) {source('/home/Earth/ncortesi/Downloads/scripts/Rfunctions.R')} else {source('/gpfs/projects/bsc32/bsc32842/scripts/Rfunctions.R')} + +# working dir where to put the output maps and files in the /Data and /Maps subdirs: +workdir <- ifelse(!mare, "/scratch/Earth/ncortesi/RESILIENCE", "/gpfs/projects/bsc32/bsc32842/results") + +cfs.name <- 'ECMWF' # name of the climate forecast system (CFS) used for monthly predictions (to be used in map titles) +var.name <- 'sfcWind' # forecast variable. It is the name used inside the netCDF files and as suffix in map filenames, i.e: 'sfcWind' or 'psl' +var.name.map <- '10m Wind Speed' # forecast variable to verify (name used in the map titles), i.e: '10m Wind Speed' or 'SLP' + +forecast.year <- 2014 # starting year of the weekly sequence of the forecasts +mes <- 1 # starting forecast month (1: january, 2: february, etc.) +day <- 2 # starting forecast day + +# generic path of the forecast system files: +ECMWF_monthly <- list(path = paste0('/esnas/exp/ECMWF/monthly/ensforhc/weekly_mean/$VAR_NAME$_f6h/',forecast.year,'$MONTH$$DAY$00/$VAR_NAME$_$START_DATE$00.nc')) + +yr1.hind <- 1994 #1994 # first hindcast year +yr2.hind <- 2013 #2013 # last hindcast year (usually the forecast year -1) + +leadtime.week <- c('5-11','12-18','19-25','26-32') # which leadtimes are available in the weekly dataset +n.members <- 4 # number of hindcast members + +my.score.name <- c('FairRpss','FairCrpss') #c('EnsCorr','FairRpss','FairCrpss','RelDiagr') # choose one or more skills scores to compute between EnsCorr, FairRPSS, FairCRPSS and/or RelDiagr + +veri.month <- 1 #1:12 # select the month(s) you want to compute the chosen skill scores + +my.pvalue <- 0.05 # in case of computing the EnsCorr, set the p_value level to show in the ensemble mean correlation maps +my.prob <- c(1/3,2/3) # quantiles used for FairRPSS and the RelDiagr (usually they are the terciles) +conf.level <- c(0.05, 0.95)# percentiles employed for the calculation of the confidence level for the Rpss and Crpss (can be more than 2 if needed) +int.lat <- NULL #1:160 # latitude position of grid points selected for the Reliability Diagram (if you want to subset a region; if not, put NULL) +int.lon <- NULL #1:320 # longitude position of grid points selected for the Reliability Diagram (if you want to subset a region; if not, put NULL) + +boot <- TRUE # in case of computing the FairRpss and/or the FairCrpss, you can choose to do a bootstrap to evaluate the uncertainty of the skill scores +n.boot <- 20 # number of resamples considered in the bootstrapping + +# coordinates of a chosen point in the world to plot the time series over the 52 maps (they must be the same values in objects la y lo) +my.lat <- 55.3197120 +my.lon <- 0.5625 + +###################################### Derived variables ############################################################################# + +#source(paste0(workdir,'/ColorBarV.R')) # Color scale used for maps +n.categ <- 1+length(my.prob) # number of forecasting categories (usually 3 if terciles are used) + +# blue-yellow-red colors: +#col <- ifelse(!mare, as.character(read.csv("/scratch/Earth/ncortesi/RESILIENCE/rgbhex.csv",header=F)[,1]), as.character(read.csv("/gpfs/projects/bsc32//bsc32842/scripts/rgbhex.csv",header=F)[,1])) +#col<-as.character(read.csv("/home/Earth/vtorralb/rgbhex.csv",header=F)[,1]) # blue-yellow-red colors +col <- c("#0C046E", "#1914BE", "#2341F7", "#2F55FB", "#3E64FF", "#528CFF", "#64AAFF", "#82C8FF", "#A0DCFF", "#B4F0FF", "#FFFBAF", "#FFDD9A", "#FFBF87", "#FFA173", "#FF7055", "#FE6346", "#F7403B", "#E92D36", "#C80F1E", "#A50519") + + +sdates.seq <- weekly.seq(forecast.year,mes,day) # load the sequence of dates corresponding to all the thursday of the year +n.leadtimes <- length(leadtime.week) +n.yrs.hind <- yr2.hind-yr1.hind+1 +my.month.short <- substr(my.month,1,3) + +## extract geographic coordinates; do only ONCE for each preducton system and then save the lat/lon in a coordinates.RData and comment these rows to use function load() below: +#data.hindcast <- Load(var='sfcWind', exp = 'EnsEcmwfWeekHind', +# obs = NULL, sdates=paste0(yr1.hind:yr2.hind,substr(sdates.seq[startdate],5,6),substr(sdates.seq[startdate],7,8)), +# nleadtime = 1, leadtimemin = 1, leadtimemax = 1, output = 'lonlat', configfile = file_path, method='distance-weighted' ) + +#lats<-data.hindcast$lat +#lons<-data.hindcast$lon +#save(lats,lons,file=paste0(workdir,'/coordinates.RData')) +#rm(data.hindcast);gc() + +# format geographic coordinates to use with PlotEquiMap: +load(paste0(workdir,'/coordinates.RData')) +n.lon <- length(lons) +n.lat <- length(lats) +n.lonr <- n.lon - ceiling(length(lons[lons<180 & lons > 0])) +#la<-rev(lats) +#lo<-c(lons[c((n.lonr+1):n.lon)]-360,lons[1:n.lonr]) + +if(mare) n.lat == 1 + +######################################################################################### +# Calculate and save up to all the chosen Skill Scores for a selected period # +######################################################################################### +# +# Remember that before computing the skill scores, you have to create and save the anomalies running once the preformatting part at the end of this script. +# + +for(month in veri.month){ + #month=1 # for the debug + my.startdates <- which(as.integer(substr(sdates.seq,5,6)) == month) #startdates.monthly[[month]] #c(1:5) # select the startdates (weeks) you want to compute + startdate.name <- my.month[month] # name given to the period of the selected startdates # i.e:"January" + n.startdates <- length(my.startdates) # number of startdates in the selected month + n.yrs <- length(my.startdates)*n.yrs.hind # number of total years for the selected month + + print(paste0("Computing Skill Scores for ",startdate.name)) + + # Merge all selected startdates: + hind.dim <- c(n.members, n.yrs.hind*n.startdates, n.leadtimes, n.lat, n.lon) # dimensions of the hindcast array + + my.FairRpss <- my.FairCrpss <- my.EnsCorr <- my.PValue <- array(NA,c(n.leadtimes, n.lat,n.lon)) + if(boot) my.FairRpssBoot <- my.FairCrpssBoot <- array(NA, c(n.boot, n.leadtimes, n.lat, n.lon)) + if(boot) my.FairRpssConf <- my.FairCrpssConf <- array(NA, c(length(conf.level), n.leadtimes, n.lat, n.lon)) + + cat('Chunk n. ') + + # if we run the script from the terminal with an argument, it only computes the chunk we specify in the second argument and save the results for that chunk. + # If not, it loops over all chunks in a serial way and save the results at the end. + if(!mare) {my.chunks <- 1:n.lat} else {my.chunks <- chunk} + + for(c in my.chunks){ # EnsCorr, FairRpss and FairCrpss calculation: + #s=1 # for the debug + cat(paste0(c,'/', n.lat,' ')) + anom.hindcast.chunk <- anom.hindcast.chunk.sampled <- array(NA, c(n.members, n.startdates*n.yrs.hind, n.leadtimes, c, n.lon)) + anom.rean.chunk <- anom.hindcast.mean.chunk <- anom.rean.chunk.sampled <- array(NA, c(n.startdates*n.yrs.hind, n.leadtimes, c, n.lon)) + #my.interv<-(1 + sub.size*(s-1)):((sub.size*s) + add.last) # longitude interval where to load data + + # not working on MareNostrum still: + # load 1 file ONLY ONCE to retrieve the lat and lon values, then save them in an .RData file to retrieve them when needed: + coord <- Load(var = 'sfcWind', exp = list(ECMWF_monthly), obs = NULL, sdates=paste0(2013,'0102'), leadtimemin = 1, leadtimemax=1, output = 'lonlat', nprocs=1) + # lat <- coord$lat + # lon <- coord$lon + # save() + # load() + #system.time(a<-Load(var = 'sfcWind', exp = list(ECMWF_monthly), obs = NULL, sdates=paste0(2010:2013,'0102'), leadtimemin = 1, leadtimemax=3, output = 'lonlat', nprocs=1, latmin = lat[5], latmax = lat[5])) + + for(startdate in my.startdates){ + pos.startdate<-which(startdate == my.startdates) # count of the number of stardates already loaded+1 + my.time.interv<-(1+(pos.startdate-1)*n.yrs.hind):(pos.startdate*n.yrs.hind) # time interval where to load data + + # Load reanalysis data: + load(paste0(workdir,'/Data_',var.name,'/anom_rean_startdate',startdate,'.RData')) + anom.rean <- drop(anom.rean) + anom.rean.chunk[my.time.interv,,,] <- anom.rean[,,c,] + + # Load hindcast data: + if(any(my.score.name=="FairRpss") || any(my.score.name=="FairCrpss" || any(my.score.name=="RelDiagr"))){ + load(paste0(workdir,'/Data_',var.name,'/anom_hindcast_startdate',startdate,'.RData')) + anom.hindcast <- drop(anom.hindcast) + anom.hindcast.chunk[,my.time.interv,,,] <- anom.hindcast[,,,c,] + rm(anom.hindcast, anom.rean) + gc() + } + + if(any(my.score.name=="EnsCorr")){ + load(paste0(workdir,'/Data_',var.name,'/anom_hindcast_mean_startdate',startdate,'.RData')) # load ensemble mean hindcast data + anom.hindcast.mean <- drop(anom.hindcast.mean) + anom.hindcast.mean.chunk[my.time.interv,,,] <- anom.hindcast.mean[,,c,] + rm(anom.hindcast.mean) + gc() + } + + } + + if(any(my.score.name=="FairRpss")){ + if(!boot){ + #anom.hindcast.chunk.double <- abind(list(anom.hindcast.chunk,anom.hindcast.chunk),along=4) + #anom.rean.chunk.double <- abind(list(anom.rean.chunk,anom.rean.chunk),along=3) + + + #my.FairRpss.sub <- veriApply("FairRpss", fcst=anom.hindcast.sub, obs=anom.rean.sub, prob=my.prob, tdim=2, ensdim=1, parallel=FALSE, ncpus=n.cpus) + my.FairRps.chunk <- veriApply("FairRps", fcst=anom.hindcast.chunk, obs=anom.rean.chunk, prob=my.prob, tdim=2, ensdim=1, parallel=FALSE, ncpus=n.cpus) + #my.FairRps.chunk.double <- veriApply("FairRps", fcst=anom.hindcast.chunk.double, obs=anom.rean.chunk.double, prob=my.prob, tdim=2, ensdim=1, parallel=FALSE, ncpus=n.cpus) + + # the prob.for the climatological ensemble can be set without using convert2prob and they are the same for all points and leadtimes (33% for each tercile): + ens <- array(33,c(n.startdates*n.yrs.hind,3)) + + # the observed probabilities vary from point to point and for each leadtime: + obs <- apply(anom.rean.chunk, c(2,3,4), function(x){convert2prob(x, prob <- my.prob)}) + + # reshape obs to be able to apply EnsRps(ens, obs) below: + obs2 <- InsertDim(obs,1,3) + obs2[2,1:(n.startdates*n.yrs.hind),,,] <- obs2[1,(n.startdates*n.yrs.hind + 1:(n.startdates*n.yrs.hind)),,,] + obs2[3,1:(n.startdates*n.yrs.hind),,,] <- obs2[1,(2*n.startdates*n.yrs.hind + 1:(n.startdates*n.yrs.hind)),,,] + obs3 <- obs2[,1:(n.startdates*n.yrs.hind),,,,drop=F] # drop=F is used not to loose the dimension(s) with only 1 element + + my.Rps.clim.chunk <- apply(obs3,c(3,4,5), function(x){ EnsRps(ens,t(x)) }) + + my.FairRps.chunk.sum <- apply(my.FairRps.chunk, c(2,3,4), sum) + my.Rps.clim.chunk.sum <- apply(my.Rps.clim.chunk, c(2,3,4), sum) + + my.FairRpss.chunk <- 1-(my.FairRps.chunk.sum/my.Rps.clim.chunk.sum) + + my.FairRpss[,c,] <- my.FairRpss.chunk + + rm(my.FairRpss.chunk, ens, obs, obs2, obs3, my.FairRps.chunk.sum, my.Rps.clim.chunk.sum) + gc() + + } else { # bootstrapping: + + for(b in 1:n.boot){ + cat(paste0("resampling n. ",b,"/",n.boot)) + yrs.sampled <- sample(1:n.yrs, replace=TRUE) + + for(y in 1:n.yrs) anom.hindcast.chunk.sampled[,y,,,] <- anom.hindcast.chunk[,yrs.sampled[y],,,] + for(y in 1:n.yrs) anom.rean.chunk.sampled[y,,,] <- anom.rean.chunk[yrs.sampled[y],,,] + + my.FairRps.chunk <- veriApply("FairRps", fcst=anom.hindcast.chunk.sampled, obs=anom.rean.chunk.sampled, prob=my.prob, tdim=2, ensdim=1, parallel=FALSE, ncpus=n.cpus) + ens <- array(33,c(n.startdates*n.yrs.hind,3)) + obs <- apply(anom.rean.chunk.sampled, c(2,3,4), function(x){convert2prob(x, prob <- my.prob)}) + obs2 <- InsertDim(obs,1,3) + obs2[2,1:(n.startdates*n.yrs.hind),,,] <- obs2[1,(n.startdates*n.yrs.hind + 1:(n.startdates*n.yrs.hind)),,,] + obs2[3,1:(n.startdates*n.yrs.hind),,,] <- obs2[1,(2*n.startdates*n.yrs.hind + 1:(n.startdates*n.yrs.hind)),,,] + obs3 <- obs2[,1:(n.startdates*n.yrs.hind),,,,drop=F] + my.Rps.clim.chunk <- apply(obs3,c(3,4,5), function(x){ EnsRps(ens,t(x)) }) + my.FairRps.chunk.sum <- apply(my.FairRps.chunk,c(2,3,4), sum) + my.Rps.clim.chunk.sum <- apply(my.Rps.clim.chunk,c(2,3,4), sum) + my.FairRpss.chunk <- 1-(my.FairRps.chunk.sum/my.Rps.clim.chunk.sum) + + my.FairRpssBoot[b,,,chunk$int[[c]]] <- my.FairRpss.chunk + rm(my.FairRpss.chunk, ens, obs, obs2, obs3, my.FairRps.chunk.sum, my.Rps.clim.chunk.sum) + gc() + } + + cat('\n') + + # calculate the percentiles of the skill score: + my.FairRpssConf[,,,chunk$int[[c]]] <- apply(my.FairRpssBoot[,,,chunk$int[[c]]], c(2,3,4), function(x) quantile(x, probs=conf.level, type=8)) + } + } + + if(any(my.score.name=="FairCrpss")){ + if(!boot){ + my.FairCrps.chunk <- veriApply("FairCrps", fcst=anom.hindcast.chunk, obs=anom.rean.chunk, tdim=2, ensdim=1, parallel=FALSE, ncpus=n.cpus) + anom.all.chunk <- abind(anom.hindcast.chunk,anom.rean.chunk, along=1) # merge exp and obs together to use apply: + my.Crps.clim.chunk <- apply(anom.all.chunk, c(3,4,5), function(x){ EnsCrps(t(x[1:n.members,]), x[n.members+1,])}) + + my.FairCrps.chunk.sum <- apply(my.FairCrps.chunk,c(2,3,4), sum) + my.Crps.clim.chunk.sum <- apply(my.Crps.clim.chunk,c(2,3,4), sum) + my.FairCrpss.chunk <- 1-(my.FairCrps.chunk.sum/my.Crps.clim.chunk.sum) + + my.FairCrpss[,,chunk$int[[c]]]<-my.FairCrpss.chunk + rm(my.FairCrpss.chunk, my.FairCrps.chunk.sum, my.Crps.clim.chunk.sum) + gc() + + } else { # bootstrapping: + + for(b in 1:n.boot){ + print(paste0("resampling n. ",b,"/",n.boot)) + yrs.sampled <- sample(1:n.yrs, replace=TRUE) + + for(y in 1:n.yrs) anom.hindcast.chunk.sampled[,y,,,] <- anom.hindcast.chunk[,yrs.sampled[y],,,] + for(y in 1:n.yrs) anom.rean.chunk.sampled[y,,,] <- anom.rean.chunk[yrs.sampled[y],,,] + + my.FairCrps.chunk <- veriApply("FairCrps", fcst=anom.hindcast.chunk.sampled, obs=anom.rean.chunk.sampled, tdim=2, ensdim=1, parallel=TRUE, ncpus=n.cpus) + anom.all.chunk <- abind(anom.hindcast.chunk.sampled, anom.rean.chunk.sampled, along=1) # merge exp and obs together to use apply: + my.Crps.clim.chunk <- apply(anom.all.chunk, c(3,4,5), function(x){ EnsCrps(t(x[1:n.members,]), x[n.members+1,])}) + my.FairCrps.chunk.sum <- apply(my.FairCrps.chunk,c(2,3,4), sum) + my.Crps.clim.chunk.sum <- apply(my.Crps.clim.chunk,c(2,3,4), sum) + my.FairCrpss.chunk <- 1-(my.FairCrps.chunk.sum/my.Crps.clim.chunk.sum) + + my.FairCrpssBoot[b,,,chunk$int[[c]]] <- my.FairCrpss.chunk + rm(my.FairCrpss.chunk, my.FairCrps.chunk.sum, my.Crps.clim.chunk.sum) + gc() + + } + + cat('\n') + + # calculate the percentiles of the skill score: + my.FairCrpssConf[,,,chunk$int[[c]]] <- apply(my.FairCrpssBoot[,,,chunk$int[[c]]], c(2,3,4), function(x) quantile(x, probs=conf.level, type=8)) + } + + } + + if(any(my.score.name=="EnsCorr")){ + # ensemble mean correlation for the selected startdates (first dim, 'week') and all leadtimes (second dim): + my.EnsCorr.chunk <- my.PValue.chunk <- array(NA, c(n.leadtimes, n.lat, chunk$n.int[[c]])) + + for(i in 1:n.leadtimes){ + for(j in 1:n.lat){ + for(k in 1:chunk$n.int[[c]]){ + my.EnsCorr.chunk[i,j,k] <- cor(anom.hindcast.mean.chunk[,i,j,k],anom.rean.chunk[,i,j,k], use="complete.obs") + # option 'na.method' is chosen from the following list: c("all.obs", "complete.obs", "pairwise.complete.obs", "everything", "na.or.complete") + + my.PValue.chunk[i,j,k] <- cor.test(anom.hindcast.mean.chunk[,i,j,k],anom.rean.chunk[,i,j,k], use="complete.obs")$p.value + + } + } + } + + my.EnsCorr[,,chunk$int[[c]]] <- my.EnsCorr.chunk + my.PValue[,,chunk$int[[c]]] <- my.PValue.chunk + + rm(anom.hindcast.chunk,anom.hindcast.mean.chunk,my.EnsCorr.chunk,my.PValue.chunk) + } + + rm(anom.rean.chunk) + gc() + #mem() + + if(mare == TRUE) { + if(any(my.score.name=="FairRpss")) save(my.FairRpss, file=paste0(workdir,'/Data_',var.name,'/FairRpss_',startdate.name,'_chunk_',c,'.RData')) + if(any(my.score.name=="FairCrpss")) save(my.FairCrpss, file=paste0(workdir,'/Data_',var.name,'/FairCrpss_',startdate.name,'_chunk_',c'.RData')) + if(any(my.score.name=="EnsCorr")) save(my.EnsCorr, file=paste0(workdir,'/Data_',var.name,'/EnsCorr_',startdate.name,'_chunk_',c'.RData')) + } + + + } # next c (chunk) + + # the Reliability diagram cannot be computed splitting the data in chunk, so it is computed only if the script is run from the R interface or with no arguments: + if(mare == FALSE) { + if(any(my.score.name=="RelDiagr")){ # in this case the computation cannot be split in groups of grid points, but can be split for leadtime instead: + my.RelDiagr<-list() + + for(lead in 1:n.leadtimes){ + cat(paste0('leadtime n.: ',lead,'/',n.leadtimes)) + anom.rean.chunk <- array(NA,c(n.startdates*n.yrs.hind,n.lat,n.lon)) + anom.hindcast.chunk <- array(NA,c(n.members,n.startdates*n.yrs.hind,n.lat,n.lon)) + cat(' startdate: ') + i=0 + + for(startdate in my.startdates){ + i=i+1 + cat(paste0(i,'/',length(my.startdates),' ')) + + pos.startdate<-which(startdate==my.startdates) # count of the number of stardates already loaded+1 + my.time.interv<-(1+(pos.startdate-1)*n.yrs.hind):(pos.startdate*n.yrs.hind) # time interval where to load data + + # Load reanalysis data of next startdate: + load(paste0(workdir,'/Data_',var.name,'/anom_rean_startdate',startdate,'.RData')) + anom.rean <- drop(anom.rean) + anom.rean.chunk[my.time.interv,,] <- anom.rean[,lead,,] + + # Lead hindcast data of next startdate: + load(paste0(workdir,'/Data_',var.name,'/anom_hindcast_startdate',startdate,'.RData')) # load hindcast data + anom.hindcast <- drop(anom.hindcast) + anom.hindcast.chunk[,my.time.interv,,] <- anom.hindcast[,,lead,,] + + rm(anom.rean,anom.hindcast) + } + + + anom.hindcast.chunk.perm<-aperm(anom.hindcast.chunk,c(2,1,3,4)) # swap the first two dimensions to put the years as first dimension (needed for convert2prob) + gc() + + cat('Computing the Reliability Diagram. Please wait... ') + ens.chunk<-apply(anom.hindcast.chunk.perm, c(3,4), convert2prob, prob<-my.prob) # its dimens. are: [n.startdates*n.yrs.hind*n.forecast.categories, n.lat, n.lon] + obs.chunk<-apply(anom.rean.chunk,c(2,3), convert2prob, prob<-my.prob) # the first n.members*n.yrs.hind elements belong to the first tercile, and so on. + ens.chunk.prob<-apply(ens.chunk, c(2,3), function(x) count2prob(matrix(x, n.startdates*n.yrs.hind, n.categ), type=4)) # type=4 is the only method with sum(prob)=1 !! + obs.chunk.prob<-apply(obs.chunk, c(2,3), function(x) count2prob(matrix(x, n.startdates*n.yrs.hind, n.categ), type=4)) #no parallelization here: it only takes 10s + + if(is.null(int.lat)) int.lat <- 1:n.lat # if there is no region selected, select all the world + if(is.null(int.lon)) int.lon <- 1:n.lon + + int1 <- 1:(n.startdates*n.yrs.hind) # time interval of the data of the first tercile + my.RelDiagr[[lead]]<-ReliabilityDiagram(ens.chunk.prob[int1,int.lat,int.lon], obs.chunk.prob[int1,int.lat,int.lon], nboot=0, plot=FALSE, plot.refin=F) #tercile 1 + + int2 <- (1+(n.startdates*n.yrs.hind)):(2*n.startdates*n.yrs.hind) # time interval of the data of the second tercile + my.RelDiagr[[n.leadtimes+lead]]<-ReliabilityDiagram(ens.chunk.prob[int2,int.lat,int.lon], obs.chunk.prob[int2,int.lat,int.lon], nboot=0, plot=FALSE, plot.refin=F) + + int3 <- (1+(2*n.startdates*n.yrs.hind)):(3*n.startdates*n.yrs.hind) # time interval of the data of the third tercile + my.RelDiagr[[2*n.leadtimes+lead]]<-ReliabilityDiagram(ens.chunk.prob[int3,int.lat,int.lon], obs.chunk.prob[int3,int.lat,int.lon], nboot=0, plot=FALSE, plot.refin=F) + + cat('done\n') + #print(my.RelDiagr) # for debugging + + rm(anom.rean.chunk,anom.hindcast.chunk,ens.chunk,obs.chunk,ens.chunk.prob,obs.chunk.prob) + gc() + + } # next lead + + } # close if on RelDiagr + + # we can save the results for all chunks only if mare == FALSE (if it is TRUE, we have the results for 1 chunk only, that have already been saved + if(!boot){ + if(any(my.score.name=="FairRpss")) save(my.FairRpss, file=paste0(workdir,'/Data_',var.name,'/FairRpss_',startdate.name,'.RData')) + if(any(my.score.name=="FairCrpss")) save(my.FairCrpss, file=paste0(workdir,'/Data_',var.name,'/FairCrpss_',startdate.name,'.RData')) + if(any(my.score.name=="EnsCorr")) save(my.EnsCorr, file=paste0(workdir,'/Data_',var.name,'/EnsCorr_',startdate.name,'.RData')) + if(any(my.score.name=="RelDiagr")) save(my.RelDiagr, my.PValue, file=paste0(workdir,'/Data_',var.name,'/RelDiagr_',startdate.name,'.RData')) + } else { # bootstrapping output: + if(any(my.score.name=="FairRpss")) save(my.FairRpssBoot, my.FairRpssConf, file=paste0(workdir,'/Data_',var.name,'/FairRpssBoot_',startdate.name,'.RData')) + if(any(my.score.name=="FairCrpss")) save(my.FairCrpssBoot, my.FairCrpssConf, file=paste0(workdir,'/Data_',var.name,'/FairCrpssBoot_',startdate.name,'.RData')) + } + + # If it has finished computing the last chunk, it can load all results in one file, deleting the intermediate output files: + if(job.chunk == tot.chunks){ + + } + + + } # close if on mare + +} # next m (month) + +if + + + + +# all pre-formatting (conversion to anomalies) and post-formatting (visualization) tasks are done below: +if(mare == FALSE){ + +########################################################################################### +# Visualize and save the score maps for one score and period # +########################################################################################### + +# run it only after computing and saving all the skill score data: + +my.months=1 #9:12 # choose the month(s) to plot and save + +for(month in my.months){ + #month=2 # for the debug + startdate.name.map<-my.month[month] # i.e:"January" + + for(my.score.name.map in my.score.name){ + #my.score.name.map='EnsCorr' # for the debug + + if(my.score.name.map=="EnsCorr") { load(file=paste0(workdir,'/Data_',var.name,'/EnsCorr_',startdate.name.map,'.RData')); my.score<-my.EnsCorr } + if(my.score.name.map=='FairRpss') { load(file=paste0(workdir,'/Data_',var.name,'/FairRpss_',startdate.name.map,'.RData')); my.score<-my.FairRpss } + if(my.score.name.map=='FairCrpss') { load(file=paste0(workdir,'/Data_',var.name,'/FairCrpss_',startdate.name.map,'.RData')); my.score<-my.FairCrpss } + if(my.score.name.map=="RelDiagr") { load(file=paste0(workdir,'/Data_',var.name,'/RelDiagr_',startdate.name.map,'.RData')); my.score<-my.RelDiagr } + + # old green-red palette: + # brk.rpss<-c(-1,seq(0,1,by=0.05)) + # col.rpss<-c("darkred",colorRampPalette(c("red","white","darkgreen"))(length(brk.rpss)-2)) + # brk.rps<-c(seq(0,1,by=0.1),10) + # col.rps<-c(colorRampPalette(c("darkgreen","white","red"))(length(brk.rps)-2),"darkred") + # if(my.score.name.map==my.Rps | my.score==my.Rps.clim) {my.brk<-brk.rps} else {my.brk<-brk.rpss} + # if(my.score.name.map==my.Rps | my.score==my.Rps.clim) {my.col<-col.rps} else {my.col<-col.rpss} + + brk.rpss<-c(seq(-1,1,by=0.1)) + col.rpss<-colorRampPalette(col)(length(brk.rpss)-1) + + my.brk<-brk.rpss # at present all breaks and colors are the same, so there is no need to differenciate between indexes + my.col<-col.rpss + + for(lead in 1:n.leadtimes){ + + #my.title<-paste0(my.score.name.map,' of ',cfs.name,' + #10m Wind Speed for ',startdate.name.map,'. Forecast time ',leadtime.week[lead],'.') + + if(my.score.name.map!="RelDiagr"){ + + #postscript(file=paste0(workdir,'/Maps_',var.name,'/',my.score.name.map,'_',startdate.name.map,'_Forecast_time_',leadtime.week[lead],'_',var.name,'.ps'), + # paper="special",width=12,height=7,horizontal=F) + + jpeg(file=paste0(workdir,'/Maps_',var.name,'/',my.score.name.map,'_',startdate.name.map,'_Forecast_time_',leadtime.week[lead],'_',var.name,'.jpg'),width=1000,height=600) + + layout(matrix(c(1,2), 1, 2, byrow = TRUE),widths=c(11,1.2)) + par(oma=c(1,1,4,1)) + + # lat values must be in decreasing order from +90 to -90 degrees and long from 0 to 360 degrees: + PlotEquiMap(my.score[lead,,][n.lat:1,c((n.lonr+1):n.lon,1:n.lonr)], lons,lats ,sizetit=0.8, brks=my.brk, cols=my.col,axelab=F, filled.continents=FALSE, drawleg=F) + my.title<-paste0(my.score.name.map,' of ',cfs.name,'\n ', var.name.map,' for ',startdate.name.map,'. Forecast time ',leadtime.week[lead],'.') + title(my.title,line=0.5,outer=T) + + ColorBar(my.brk, cols=my.col, vert=T, cex=1.4) + + if(my.score.name.map=="EnsCorr"){ #add a small grey point if the corr.is significant: + + # arrange lat/ lon in my.PValue (a second variable in the EnsCorr file) to start from +90 degr. (lat) and from 0 degr (lon): + my.PValue.rev <- my.PValue + my.PValue.rev[lead,,] <- my.PValue[i,n.lat:1,c((n.lonr+1):n.lon,1:n.lonr)] + for (x in 1:n.lon) { + for (y in 1:n.lat) { + if (my.PValue.rev[lead,y, x] == TRUE) { + text(x = lo[x], y = la[y], ".", cex = .2) + } + } + } + } + dev.off() + + } # close if on !RelDiagr + + if(my.score.name.map=="RelDiagr") { + + jpeg(file=paste0(workdir,'/Maps_',var.name,'/RelDiagr_',startdate.name.map,'_Forecast_time_',leadtime.week[lead],'_',var.name,'.jpg'),width=600,height=600) + + my.title<-paste0('Reliability Diagram of ',cfs.name,'\n ',var.name.map,' for ',startdate.name.map,'. Forecast time ',leadtime.week[lead],'.') + + plot(c(0,1),c(0,1),type="l",xlab="Forecast Frequency",ylab="Observed Frequency", col='gray30', main=my.title) + lines(my.score[[lead]]$p.avgs[!is.na(my.score[[lead]]$p.avgs)],my.score[[lead]]$cond.prob[!is.na(my.score[[lead]]$cond.prob)],type="o", pch=16, col="blue") + #lines(my.score[[n.leadtimes+lead]]$p.avgs[!is.na(my.score[[n.leadtimes+lead]]$p.avgs)],my.score[[n.leadtimes+lead]]$cond.prob[!is.na(my.score[[n.leadtimes+lead]]$cond.prob)],type="o",pch=16,col="darkgreen") + lines(my.score[[2*n.leadtimes+lead]]$p.avgs[!is.na(my.score[[2*n.leadtimes+lead]]$p.avgs)], my.score[[2*n.leadtimes+lead]]$cond.prob[!is.na(my.score[[2*n.leadtimes+lead]]$cond.prob)],type="o", pch=16, col="red") + legend("bottomright", legend=c('Lower Tercile','Upper Tercile'), lty=c(1,1), lwd=c(2.5,2.5), col=c('blue','red')) + + dev.off() + } + + } # next lead + + } # next score + +} # next month + + +############################################################################################### +# Composite map of the four skill scores # +############################################################################################### + +# run it only when you have all the 4 skill scores for each month: + +my.months=9:12 # choose the month(s) to plot and save + +for(month in my.months){ + + startdate.name.map<-my.month[month] # i.e:"January" + + #postscript(file=paste0(workdir,'/Maps_',var.name,'/SkillScores_',startdate.name.map,'_',var.name,'.ps'), paper="special",width=12,height=7,horizontal=F) + + jpeg(file=paste0(workdir,'/Maps_',var.name,'/SkillScores_',startdate.name.map,'_',var.name,'.jpg'), width=4000, height=2400, quality=100) + + layout(matrix(1:16, 4, 4, byrow=FALSE), widths=c(1.8,1.8,1.8,1.2), heights=c(1,1,1,1)) + #layout.show(16) + + for(score in 1:4){ + for(lead in 1:n.leadtimes){ + + img<-readJPEG(paste0(workdir,'/Maps_',var.name,'/',my.score.name[score],'_',startdate.name.map,'_Forecast_time_',leadtime.week[lead],'_',var.name,'.jpg')) + par(mar = rep(0, 4), xaxs='i', yaxs='i' ) + plot.new() + rasterImage(img, 0, 0, 1, 1, interpolate=FALSE) + + } # next lead + + } # next score + + dev.off() + +} # next month + + + +# Dani Map script for the small figure: +#/home/Earth/vtorralb/R/scripts/Veronica_2014/TimeSeries/PlotAno_mod_obs.R + + +############################################################################################### +# Preformatting: calculate weekly climatologies and anomalies for each hindcast startdate # +############################################################################################### + +# Run it only once at the beginning: + +file_path='/home/Earth/ngonzal2/R/ConfFiles/IC3.conf' # Load() file path +#file_path <- '/home/Earth/ncortesi/Downloads/RESILIENCE/IC3.conf' + +my.startdates=1:52 # choose a sequence of startdates to save their anomalies + +for(startdate in my.startdates){ # the startdate week to load inside the sdates weekly sequence: + print(paste0('Computing anomalies for startdate ', which(startdate==my.startdates),'/',length(my.startdates))) + + data.hindcast <- Load(var = var.name, exp = 'EnsEcmwfWeekHind', + obs = NULL, sdates = paste0(yr1.hind:yr2.hind, substr(sdates.seq[startdate],5,6), substr(sdates.seq[startdate],7,8)), + nleadtime = n.leadtimes, leadtimemin = 1, leadtimemax = n.leadtimes, output = 'lonlat', configfile = file_path, method='bilinear' ) + + # dim(data.hindcast$mod) # [1,4,20,4,320,640] + + # Mean of the 4 members of the Hindcasts and of the 20 years (for each point and leadtime); + clim.hindcast<-apply(data.hindcast$mod,c(1,4,5,6),mean)[1,,,] # average for each leadtime and pixel + clim.hindcast<-InsertDim(InsertDim(InsertDim(clim.hindcast,1,n.yrs.hind),1,n.members),1,1) # repeat the same values and times to calc. anomalies + + anom.hindcast <- data.hindcast$mod - clim.hindcast + + rm(data.hindcast,clim.hindcast) + gc() + + # average the anomalies over the 4 hindcast members [1,20,4,320,640] + anom.hindcast.mean<-apply(anom.hindcast,c(1,3,4,5,6),mean) + + my.grid<-paste0('r',n.lon,'x',n.lat) + data.rean <- Load(var = var.name, exp = 'ERAintEnsWeek', + obs = NULL, sdates=paste0(yr1.hind:yr2.hind,substr(sdates.seq[startdate],5,6),substr(sdates.seq[startdate],7,8)), + nleadtime = n.leadtimes, leadtimemin = 1, leadtimemax = n.leadtimes, output = 'lonlat', configfile = file_path, grid=my.grid, method='bilinear') + + # dim(data.rean$mod) # [1,1,20,4,320,640] + + # Mean of the 20 years (for each point and leadtime) + clim.rean<-apply(data.rean$mod,c(1,2,4,5,6),mean)[1,1,,,] # average for each leadtime and pixel + clim.rean<-InsertDim(InsertDim(InsertDim(clim.rean,1,n.yrs.hind),1,1),1,1) # repeat the same values times to be able to calculate the anomalies #[1,1,20,4,320,640] + + anom.rean <- data.rean$mod - clim.rean + + anom.rean<-InsertDim(anom.rean[1,1,,,,],1,1) # [1,20,4,320,640] + + #save(anom.hindcast.mean,anom.rean,file=paste0(workdir,'/Data/anom_for_ACC_startdate',startdate,'.RData')) + + save(anom.hindcast,file=paste0(workdir,'/Data/anom_hindcast_startdate',startdate,'.RData')) + save(anom.hindcast.mean,file=paste0(workdir,'/Data/anom_hindcast_mean_startdate',startdate,'.RData')) + save(anom.rean,file=paste0(workdir,'/Data/anom_rean_startdate',startdate,'.RData')) + save(clim.rean,file=paste0(workdir,'/Data/clim_rean_startdate',startdate,'.RData')) + + rm(data.rean,anom.hindcast,anom.hindcast.mean,clim.rean,anom.rean) + gc() + +} + +############################################################################################### +# Preformatting: calculate weekly climatologies and anomalies for each forecast startdate # +############################################################################################### +# +# finish!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! +# +# the only difference is that anomalies are computed using climatologies from the hincast data, +# and not from the forecast data. + +# Run it only once at the beginning: + +file_path='/home/Earth/ngonzal2/R/ConfFiles/IC3.conf' # Load() file path +#file_path <- '/home/Earth/ncortesi/Downloads/RESILIENCE/IC3.conf' + +my.startdates=1:52 # choose a sequence of forecast startdates to save their anomalies + +for(startdate in my.startdates){ # the startdate week to load inside the sdates weekly sequence: + print(paste0('Computing anomalies for startdate ', which(startdate==my.startdates),'/',length(my.startdates))) + + data.hindcast <- Load(var=var.name, exp = 'EnsEcmwfWeekHind', + obs = NULL, sdates=paste0(yr1.hind:yr2.hind,substr(sdates.seq[startdate],5,6),substr(sdates.seq[startdate],7,8)), + nleadtime = n.leadtimes, leadtimemin = 1, leadtimemax = n.leadtimes, output = 'lonlat', configfile = file_path, method='distance-weighted' ) + + # dim(data.hindcast$mod) # [1,4,20,4,320,640] + + # Mean of the 4 members of the Hindcasts and of the 20 years (for each point and leadtime); + clim.hindcast<-apply(data.hindcast$mod,c(1,4,5,6),mean)[1,,,] # average for each leadtime and pixel + clim.hindcast<-InsertDim(InsertDim(InsertDim(clim.hindcast,1,n.yrs.hind),1,n.members),1,1) # repeat the same values and times to calc. anomalies + + anom.hindcast <- data.hindcast$mod - clim.hindcast + + rm(data.hindcast,clim.hindcast) + gc() + + # average the anomalies over the 4 hindcast members [1,20,4,320,640] + anom.hindcast.mean<-apply(anom.hindcast,c(1,3,4,5,6),mean) + + my.grid<-paste0('r',n.lon,'x',n.lat) + data.rean <- Load(var=var.name, exp = 'ERAintEnsWeek', + obs = NULL, sdates=paste0(yr1.hind:yr2.hind,substr(sdates.seq[startdate],5,6),substr(sdates.seq[startdate],7,8)), + nleadtime = n.leadtimes, leadtimemin = 1, leadtimemax = n.leadtimes, output = 'lonlat', configfile = file_path, grid=my.grid, method='distance-weighted') + # dim(data.rean$mod) # [1,1,20,4,320,640] + + # Mean of the 20 years (for each point and leadtime) + clim.rean<-apply(data.rean$mod,c(1,2,4,5,6),mean)[1,1,,,] # average for each leadtime and pixel + clim.rean<-InsertDim(InsertDim(InsertDim(clim.rean,1,n.yrs.hind),1,1),1,1) # repeat the same values times to be able to calculate the anomalies #[1,1,20,4,320,640] + + anom.rean <- data.rean$mod - clim.rean + + anom.rean<-InsertDim(anom.rean[1,1,,,,],1,1) # [1,20,4,320,640] + + #save(anom.hindcast.mean,anom.rean,file=paste0(workdir,'/Data/anom_for_ACC_startdate',startdate,'.RData')) + + save(anom.hindcast,file=paste0(workdir,'/Data/anom_hindcast_startdate',startdate,'.RData')) + save(anom.hindcast.mean,file=paste0(workdir,'/Data/anom_hindcast_mean_startdate',startdate,'.RData')) + save(anom.rean,file=paste0(workdir,'/Data/anom_rean_startdate',startdate,'.RData')) + save(clim.rean,file=paste0(workdir,'/Data/clim_rean_startdate',startdate,'.RData')) + + rm(data.rean,anom.hindcast,anom.hindcast.mean,clim.rean,anom.rean) + gc() + +} + + + + + + diff --git a/old/SkillScores_map_v1.R b/old/SkillScores_map_v1.R new file mode 100644 index 0000000000000000000000000000000000000000..424659e2aec49b8ae107f568cfc1f1fe15a45df7 --- /dev/null +++ b/old/SkillScores_map_v1.R @@ -0,0 +1,997 @@ +########################################################################################### +# Visualize and save the score maps for one score and period # +########################################################################################### +# run it only after computing and saving all the skill score data: + +rm(list=ls()) + +########################################################################################## +# Load functions and libraries # +########################################################################################## + +library(SpecsVerification) # for skill scores +library(s2dverification) # for Load() function +library(abind) # for merging arrays +source('/home/Earth/ncortesi/scripts/Rfunctions.R') +source('/home/Earth/ncortesi/Downloads/ESS/general/hatching.R') +########################################################################################## +# User's settings # +########################################################################################## +#if(!mare) {source('/home/Earth/ncortesi/Downloads/scripts/Rfunctions.R')} else {source('/gpfs/projects/bsc32/bsc32842/scripts/Rfunctions.R')} + +# working dir where to put the output maps and files in the /Data and /Maps subdirs: +work.dir <- "/scratch/Earth/ncortesi/RESILIENCE/Subestacional" + +var.name <- 'sfcWind' # forecast variable. It is the name used inside the netCDF files and as suffix in map filenames, i.e: 'sfcWind' or 'psl' + +my.pvalue <- 0.05 # in case of computing the EnsCorr, set the p_value level to show in the ensemble mean correlation maps + +# coordinates of a chosen point in the world to plot the time series over the 52 maps (they must be the same values in objects la y lo) +#my.lat <- 55.3197120 +#my.lon <- 0.5625 + +my.months=1:12 #9:12 # choose the month(s) to plot and save + +# DY European areas: (in the format: lon.min /lon.max / lat.min /lat.max. lon.min MUST be a NEGATIVE number, long values MUST be from 0 and 360, and +# lat values MUST be from 90 to -90 degrees) +# Northern Europe: +area1 <- c(-15,45,45,75) +# Southwestern Europe: +area2 <- c(-15,20,35,45) +# Southeastern Europe: +area3 <- c(20,45,35,45) + +# Choose one region between those defined above: +area <- area1 + +# position of long values of Europe only (without the Atlantic Sea and America): +#if(fields.name == "ERA-Interim") EU <- c(1:which(lon >= lon.max)[1], (length(lon)-30):length(lon)) # restrict area to continental europe only +lon.min <- 360 + area[1] +lon.max <- area[2] +lat.min <- area[3] +lat.max <- area[4] + +# choose one or more skills scores to visualize between EnsCorr, FairRPSS, FairCRPSS and/or RelDiagr: +my.score.name <- c('FairRpss','FairCrpss','EnsCorr','RelDiagr') + +col <- c("#0C046E", "#1914BE", "#2341F7", "#2F55FB", "#3E64FF", "#528CFF", "#64AAFF", "#82C8FF", "#A0DCFF", "#B4F0FF", "#FFFBAF", "#FFDD9A", "#FFBF87", "#FFA173", "#FF7055", "#FE6346", "#F7403B", "#E92D36", "#C80F1E", "#A50519") + +########################################################################################## +# visualize worldwide monthly maps of skill scores # +########################################################################################## + +for(month in my.months){ + #month=1 # for the debug + + for(my.score.name.map in my.score.name){ + #my.score.name.map='EnsCorr' # for the debug + + if(my.score.name.map=='FairRpss') { load(file=paste0(work.dir,'/',var.name,'/FairRpss_',my.month[month],'.RData')); my.score<-my.FairRpss.chunk } + if(my.score.name.map=='FairCrpss') { load(file=paste0(work.dir,'/',var.name,'/FairCrpss_',my.month[month],'.RData')); my.score<-my.FairCrpss.chunk } + if(my.score.name.map=="EnsCorr") { load(file=paste0(work.dir,'/',var.name,'/EnsCorr_',my.month[month],'.RData')); my.score<-my.EnsCorr.chunk } + if(my.score.name.map=="RelDiagr") { load(file=paste0(work.dir,'/',var.name,'/RelDiagr_',my.month[month],'.RData')); my.score<-my.RelDiagr } + + # old green-red palette: + # brk.rpss<-c(-1,seq(0,1,by=0.05)) + # col.rpss<-c("darkred",colorRampPalette(c("red","white","darkgreen"))(length(brk.rpss)-2)) + # brk.rps<-c(seq(0,1,by=0.1),10) + # col.rps<-c(colorRampPalette(c("darkgreen","white","red"))(length(brk.rps)-2),"darkred") + # if(my.score.name.map==my.Rps | my.score==my.Rps.clim) {my.brk<-brk.rps} else {my.brk<-brk.rpss} + # if(my.score.name.map==my.Rps | my.score==my.Rps.clim) {my.col<-col.rps} else {my.col<-col.rpss} + + brk.rpss<-c(seq(-1,1,by=0.1)) + col.rpss<-colorRampPalette(col)(length(brk.rpss)-1) + + my.brk<-brk.rpss # at present all breaks and colors are the same, so there is no need to differenciate between indexes + my.col<-col.rpss + + for(lead in 1:n.leadtimes){ + + #my.title<-paste0(my.score.name.map,' of ',cfs.name,' + #10m Wind Speed for ',startdate.name.map,'. Forecast time ',leadtime.week[lead],'.') + + if(my.score.name.map!="RelDiagr"){ + + #postscript(file=paste0(workdir,'/Maps_',var.name,'/',my.score.name.map,'_',startdate.name.map,'_Forecast_time_',leadtime.week[lead],'_',var.name,'.ps'), + # paper="special",width=12,height=7,horizontal=F) + + png(file=paste0(workdir,'/Maps_',var.name,'/',my.score.name.map,'_',my.month[month],'_Forecast_time_',leadtime.week[lead],'_',var.name,'.jpg'),width=1000,height=600) + + layout(matrix(c(1,2), 1, 2, byrow = TRUE),widths=c(11,1.2)) + par(oma=c(1,1,4,1)) + + #n.lonr <- n.lon - ceiling(length(lons[lons<180 & lons > 0])) + #hatching(latsr,c(lons[c((nlonr+1):nlon)]-360,lons[1:nlonr]), my.Pvalue.rev, dens = 12, ang = 45, col_line = '#252525', lwd_size = 0.5, crosshatching = FALSE) + #hatching(lats,c(lons[c((n.lonr+1):n.lon)]-360,lons[1:n.lonr]), my.Pvalue.rev, dens = 12, ang = 45, col_line = '#252525', lwd_size = 0.5, crosshatching = FALSE) + + #PlotEquiMap(my.score[lead,,][n.lat:1,c((n.lonr+1):n.lon,1:n.lonr)], lons,lats ,title_scale=0.8, brks=my.brk, cols=my.col ,axelab=F, filled.continents=FALSE, drawleg=F) + # lat values must be in decreasing order from +90 to -90 degrees and long from 0 to 360 degrees: + PlotEquiMap(my.score[lead,,], lons,lats ,title_scale=0.8, brks=my.brk, cols=my.col ,axelab=F, filled.continents=FALSE, drawleg=F) + my.title<-paste0(my.score.name.map,' of ',cfs.name,'\n ', var.name.map,' for ',startdate.name.map,'. Forecast time ',leadtime.week[lead],'.') + title(my.title,line=0.5,outer=T) + + ColorBar(my.brk, cols=my.col, vert=T, cex=1.4) + + if(my.score.name.map=="EnsCorr"){ #add a small grey point if the corr.is significant: + + # arrange lat/ lon in my.PValue (a second variable in the EnsCorr file) to start from +90 degr. (lat) and from 0 degr (lon): + my.PValue.rev <- my.PValue.chunk < 0.05 + #my.PValue.rev[lead,,] <- my.PValue[i,n.lat:1,c((n.lonr+1):n.lon,1:n.lonr)] + + #for (x in 1:n.lon) { + # for (y in 1:n.lat) { + # if (my.PValue.rev[lead,y, x] == TRUE) { + # text(x = lo[x], y = la[y], ".", cex = .2) + # } + # } + #} + } + #dev.off() + + if(my.score.name.map=="FairRpss"){ #add a small grey point if the corr.is significant: + + # arrange lat/ lon in my.PValue (a second variable in the EnsCorr file) to start from +90 degr. (lat) and from 0 degr (lon): + my.PValue.rev <- my.FairRpss.pvalue > 0.05 + #my.PValue.rev[lead,,] <- my.PValue[i,n.lat:1,c((n.lonr+1):n.lon,1:n.lonr)] + #my.Pvalue.rev <- matrix(TRUE,n.lon,n.lat) + + + #for (x in 1:n.lon) { + # for (y in 1:n.lat) { + # if (my.PValue.rev[lead,y, x] == TRUE) { + # text(x = lo[x], y = la[y], ".", cex = .2) + # } + # } + } + + #} + #dev.off() + + if(my.score.name.map=="FairCrpss"){ #add a small grey point if the corr.is significant: + + # arrange lat/ lon in my.PValue (a second variable in the EnsCorr file) to start from +90 degr. (lat) and from 0 degr (lon): + my.PValue.rev <- my.FairCrpss.pvalue > 0.05 + #my.PValue.rev[lead,,] <- my.PValue[i,n.lat:1,c((n.lonr+1):n.lon,1:n.lonr)] + + #for (x in 1:n.lon) { + # for (y in 1:n.lat) { + # if (my.PValue.rev[lead,y, x] == TRUE) { + # text(x = lo[x], y = la[y], ".", cex = .2) + # } + # } + #} + } + + # Draw the significance diagonal lines: + hatching(lats,lons, my.Pvalue.rev, dens = 12, ang = 45, col_line = '#252525', lwd_size = 0.5, crosshatching = FALSE) + + dev.off() + + } # close if on !RelDiagr + + if(my.score.name.map=="RelDiagr") { + + jpeg(file=paste0(workdir,'/Maps_',var.name,'/RelDiagr_',startdate.name.map,'_Forecast_time_',leadtime.week[lead],'_',var.name,'.jpg'),width=600,height=600) + + my.title<-paste0('Reliability Diagram of ',cfs.name,'\n ',var.name.map,' for ',startdate.name.map,'. Forecast time ',leadtime.week[lead],'.') + + plot(c(0,1),c(0,1),type="l",xlab="Forecast Frequency",ylab="Observed Frequency", col='gray30', main=my.title) + lines(my.score[[lead]]$p.avgs[!is.na(my.score[[lead]]$p.avgs)],my.score[[lead]]$cond.prob[!is.na(my.score[[lead]]$cond.prob)],type="o", pch=16, col="blue") + lines(my.score[[n.leadtimes+lead]]$p.avgs[!is.na(my.score[[n.leadtimes+lead]]$p.avgs)],my.score[[n.leadtimes+lead]]$cond.prob[!is.na(my.score[[n.leadtimes+lead]]$cond.prob)],type="o",pch=16,col="darkgreen") + lines(my.score[[2*n.leadtimes+lead]]$p.avgs[!is.na(my.score[[2*n.leadtimes+lead]]$p.avgs)], my.score[[2*n.leadtimes+lead]]$cond.prob[!is.na(my.score[[2*n.leadtimes+lead]]$cond.prob)],type="o", pch=16, col="red") + legend("bottomright", legend=c('Lower Tercile','Upper Tercile'), lty=c(1,1), lwd=c(2.5,2.5), col=c('blue','red')) + + dev.off() + } + + } # next lead + + } # next score + +} # next month + +############################################################################# +# Visualize summary tables for a region # +############################################################################# +array.EnsCorr <- array.FairRpss <- array.FairCrpss <- array(NA,c(12,4)) + + load(file=paste0(work.dir,'/',var.name,'/EnsCorr_',my.period[1],'.RData')) # for my.EnsCorr.chunk + + +for(p in 1:12){ + + load(file=paste0(work.dir,'/',var.name,'/EnsCorr_',my.period[p],'.RData')) # for my.EnsCorr.chunk + load(file=paste0(work.dir,'/',var.name,'/FairRpss_',my.period[p],'.RData')) # for my.FairRpss.chunk + load(file=paste0(work.dir,'/',var.name,'/FairCrpss_',my.period[p],'.RData')) # for my.FairCrpss.chunk + + area.lon <- c(1:which(lons >= lon.max)[1], (which(lons >= lon.min)[1]:length(lons))) # restrict area to continental europe only + area.lat <- c(which(lats <= lat.max)[1]:(which(lats <= lat.min)[1]-1)) # restrict area to continental europe only + + for(l in 0:3){ + #load(file=paste0(work.dir,"/",fields.name,"_",my.period[p],"_leadmonth_",l,"_","ClusterNames",".RData")) # load cluster names and summarized data + array.EnsCorr[p, 1+l] <- mean(my.EnsCorr.chunk[1+l, area.lat, area.lon], na.rm=T) + array.FairRpss[p, 1+l] <- mean(my.FairRpss.chunk[1+l, area.lat, area.lon], na.rm=T) + array.FairCrpss[p, 1+l] <- mean(my.FairCrpss.chunk[1+l, area.lat, area.lon], na.rm=T) + } +} + + +# Impact summary: + +#png(filename=paste0(work.dir,"/",forecast.name,"_summary_impact_",var.name[var.num],".png"), width=550, height=400) +png(file=paste0(workdir,'/Summary_',var.name,'/',my.score.name.map,'_',startdate.name.map,'_Forecast_time_',leadtime.week[lead],'_',var.name,'.jpg'),width=1000,height=600) + +my.cols <- rev(c('#b2182b','#d6604d','#f4a582','#fddbc7','#d1e5f0','#92c5de','#4393c3','#2166ac')) +my.seq <- c(-1,0,0.4,0.5,0.6,0.7,0.8,0.9,1) + +# create an array similar to array.pers but with colors instead of frequencies: +plot.new() +par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) +mtext("Subseasonal sfcWind 1994-2013", cex=1.2) + +par(mar=c(0,0,4,0), fig=c(0.07, 0.22, 0.8, 0.98), new=TRUE) +mtext("EnsCorr", cex=1.2) +par(mar=c(1,4,1,0), fig=c(0, 0.22, 0.1, 0.8), new=TRUE) +plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,3.2), ann=F) +mtext(side = 1, text = "Lead time", line = 2, cex=1) +mtext(side = 2, text = "Forecast month", line = 2, cex=1) +axis(1, at=seq(0,3), las=1, cex.axis=1, labels=3:0, mgp=c(3,0.7,0)) +axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + +array.colors <- array(my.cols[8],c(12,4)) +array.colors[array.EnsCorr < my.seq[8]] <- my.cols[7] +array.colors[array.EnsCorr < my.seq[7]] <- my.cols[6] +array.colors[array.EnsCorr < my.seq[6]] <- my.cols[5] +array.colors[array.EnsCorr < my.seq[5]] <- my.cols[4] +array.colors[array.EnsCorr < my.seq[4]] <- my.cols[3] +array.colors[array.EnsCorr < my.seq[3]] <- my.cols[2] +array.colors[array.EnsCorr < my.seq[2]] <- my.cols[1] + +for(p in 1:12){ for(l in 0:3){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.colors[p,1+l])}} + +#for(i in 1:12){ for(j in 0:3){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.EnsCorr.colors[i2,1+3-j])}} + +par(mar=c(0,0,4,0), fig=c(0.22, 0.44, 0.8, 0.98), new=TRUE) +mtext("FairRPSS", cex=1.2) +par(mar=c(1,4,1,0), fig=c(0.22, 0.44, 0.1, 0.8), new=TRUE) +plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,3.2), ann=F) +mtext(side = 1, text = "Lead time", line = 2, cex=1) +mtext(side = 2, text = "Forecast month", line = 2, cex=1) +axis(1, at=seq(0,3), las=1, cex.axis=1, labels=3:0, mgp=c(3,0.7,0)) +axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + +array.colors <- array(my.cols[8],c(12,4)) +array.colors[array.FairRpss < my.seq[8]] <- my.cols[7] +array.colors[array.FairRpss < my.seq[7]] <- my.cols[6] +array.colors[array.FairRpss < my.seq[6]] <- my.cols[5] +array.colors[array.FairRpss < my.seq[5]] <- my.cols[4] +array.colors[array.FairRpss < my.seq[4]] <- my.cols[3] +array.colors[array.FairRpss < my.seq[3]] <- my.cols[2] +array.colors[array.FairRpss < my.seq[2]] <- my.cols[1] + +for(p in 1:12){ for(l in 0:3){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.colors[p,1+l])}} + +par(mar=c(0,0,4,0), fig=c(0.22, 0.44, 0.8, 0.98), new=TRUE) +mtext("FairCRPSS", cex=1.2) +par(mar=c(1,4,1,0), fig=c(0.22, 0.44, 0.1, 0.8), new=TRUE) +plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,3.2), ann=F) +mtext(side = 1, text = "Lead time", line = 2, cex=1) +mtext(side = 2, text = "Forecast month", line = 2, cex=1) +axis(1, at=seq(0,3), las=1, cex.axis=1, labels=3:0, mgp=c(3,0.7,0)) +axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + +array.colors <- array(my.cols[8],c(12,4)) +array.colors[array.FairCrpss < my.seq[8]] <- my.cols[7] +array.colors[array.FairCrpss < my.seq[7]] <- my.cols[6] +array.colors[array.FairCrpss < my.seq[6]] <- my.cols[5] +array.colors[array.FairCrpss < my.seq[5]] <- my.cols[4] +array.colors[array.FairCrpss < my.seq[4]] <- my.cols[3] +array.colors[array.FairCrpss < my.seq[3]] <- my.cols[2] +array.colors[array.FairCrpss < my.seq[2]] <- my.cols[1] + +for(p in 1:12){ for(l in 0:3){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.colors[p,1+l])}} + +dev.off() + + + + +############################################################################################### +# Composite map of the four skill scores # +############################################################################################### + +# run it only when you have all the 4 skill scores for each month: + +my.months=1:12 # choose the month(s) to plot and save + +for(month in my.months){ + + startdate.name.map<-my.month[month] # i.e:"January" + + #postscript(file=paste0(workdir,'/Maps_',var.name,'/SkillScores_',startdate.name.map,'_',var.name,'.ps'), paper="special",width=12,height=7,horizontal=F) + + jpeg(file=paste0(workdir,'/Maps_',var.name,'/SkillScores_',startdate.name.map,'_',var.name,'.jpg'), width=4000, height=2400, quality=100) + + layout(matrix(1:16, 4, 4, byrow=FALSE), widths=c(1.8,1.8,1.8,1.2), heights=c(1,1,1,1)) + #layout.show(16) + + for(score in 1:4){ + for(lead in 1:n.leadtimes){ + + img<-readJPEG(paste0(workdir,'/Maps_',var.name,'/',my.score.name[score],'_',startdate.name.map,'_Forecast_time_',leadtime.week[lead],'_',var.name,'.jpg')) + par(mar = rep(0, 4), xaxs='i', yaxs='i' ) + plot.new() + rasterImage(img, 0, 0, 1, 1, interpolate=FALSE) + + } # next lead + + } # next score + + dev.off() + +} # next month + + + + + + + + + + + + + + + + + + + + + +# Dani Map script for the small figure: +#/home/Earth/vtorralb/R/scripts/Veronica_2014/TimeSeries/PlotAno_mod_obs.R + +############################################################################################### +# Preformatting: calculate weekly climatologies and anomalies for each forecast startdate # +############################################################################################### +# +# finish!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! +# +# the only difference is that anomalies are computed using climatologies from the hincast data, +# and not from the forecast data. + +# Run it only once at the beginning: + +file_path='/home/Earth/ngonzal2/R/ConfFiles/IC3.conf' # Load() file path +#file_path <- '/home/Earth/ncortesi/Downloads/RESILIENCE/IC3.conf' + +my.startdates=1:52 # choose a sequence of forecast startdates to save their anomalies + +for(startdate in my.startdates){ # the startdate week to load inside the sdates weekly sequence: + print(paste0('Computing anomalies for startdate ', which(startdate==my.startdates),'/',length(my.startdates))) + + data.hindcast <- Load(var=var.name, exp = 'EnsEcmwfWeekHind', + obs = NULL, sdates=paste0(yr1.hind:yr2.hind,substr(sdates.seq[startdate],5,6),substr(sdates.seq[startdate],7,8)), + nleadtime = n.leadtimes, leadtimemin = 1, leadtimemax = n.leadtimes, output = 'lonlat', configfile = file_path, method='distance-weighted' ) + + # dim(data.hindcast$mod) # [1,4,20,4,320,640] + + # Mean of the 4 members of the Hindcasts and of the 20 years (for each point and leadtime); + clim.hindcast<-apply(data.hindcast$mod,c(1,4,5,6),mean)[1,,,] # average for each leadtime and pixel + clim.hindcast<-InsertDim(InsertDim(InsertDim(clim.hindcast,1,n.yrs.hind),1,n.members),1,1) # repeat the same values and times to calc. anomalies + + anom.hindcast <- data.hindcast$mod - clim.hindcast + + rm(data.hindcast,clim.hindcast) + gc() + + # average the anomalies over the 4 hindcast members [1,20,4,320,640] + anom.hindcast.mean<-apply(anom.hindcast,c(1,3,4,5,6),mean) + + my.grid<-paste0('r',n.lon,'x',n.lat) + data.rean <- Load(var=var.name, exp = 'ERAintEnsWeek', + obs = NULL, sdates=paste0(yr1.hind:yr2.hind,substr(sdates.seq[startdate],5,6),substr(sdates.seq[startdate],7,8)), + nleadtime = n.leadtimes, leadtimemin = 1, leadtimemax = n.leadtimes, output = 'lonlat', configfile = file_path, grid=my.grid, method='distance-weighted') + # dim(data.rean$mod) # [1,1,20,4,320,640] + + # Mean of the 20 years (for each point and leadtime) + clim.rean<-apply(data.rean$mod,c(1,2,4,5,6),mean)[1,1,,,] # average for each leadtime and pixel + clim.rean<-InsertDim(InsertDim(InsertDim(clim.rean,1,n.yrs.hind),1,1),1,1) # repeat the same values times to be able to calculate the anomalies #[1,1,20,4,320,640] + + anom.rean <- data.rean$mod - clim.rean + + anom.rean<-InsertDim(anom.rean[1,1,,,,],1,1) # [1,20,4,320,640] + + #save(anom.hindcast.mean,anom.rean,file=paste0(workdir,'/Data/anom_for_ACC_startdate',startdate,'.RData')) + + save(anom.hindcast,file=paste0(workdir,'/Data/anom_hindcast_startdate',startdate,'.RData')) + save(anom.hindcast.mean,file=paste0(workdir,'/Data/anom_hindcast_mean_startdate',startdate,'.RData')) + save(anom.rean,file=paste0(workdir,'/Data/anom_rean_startdate',startdate,'.RData')) + save(clim.rean,file=paste0(workdir,'/Data/clim_rean_startdate',startdate,'.RData')) + + rm(data.rean,anom.hindcast,anom.hindcast.mean,clim.rean,anom.rean) + gc() + +} + + + + + + + +################################################################################## +# Toy Model # +################################################################################## + +library(s2dverification) +library(SpecsVerification) +library(easyVerification) + +my.prob=c(1/3,2/3) # quantiles used for FairRPSS and the RelDiagr (usually they are the terciles) + +N=80 # number of years +M=14 # number of members + +for(M in c(2:10,20,51,100,200)){ + +for(N in c(5,10,15,20,25,30,40,50,100,200,500,1000,2000,5000,10000,20000,50000,100000)){ + +anom.rean<-rnorm(N) +anom.hind<-array(rnorm(N*M),c(N,M)) +#quantile(anom.hind,prob=my.prob,type=8) + +obs<-convert2prob(anom.rean,prob<-my.prob) +#mean(obs[,1]) # check if it is equal to 0.333 for K=3 + +ens<-convert2prob(anom.hind,prob<-my.prob) + +# climatological forecast: +anom.hind.clim<-array(sample(anom.hind,N*M),c(N,M)) # extract a random sample with no replacement of the hindcast +ens.clim<-convert2prob(anom.hind.clim,prob<-my.prob) +#mean(ens.clim[,1]) # check if it is equal to M/3 (1.333 for M=4) + +# alternative definition of climatological forecast, based on observations instead (as applied by veriApply via convert2prob when option fcst.ref is missing): +#anom.rean.clim<-ClimEns(anom.rean) # create a climatological ensemble from a vector of observations (anom.rean) with the leave-one-out cross validation +anom.rean.clim<-t(array(anom.rean,c(N,N))) # function used by convert2prob when ref is not specified; it is ~ equal to the above function ClimEns, it only has a column more +ens.clim.rean<-convert2prob(anom.rean.clim,prob<-c(1/3,2/3)) # create a new prob.table based on the climatological ensemble of observations + +p<-count2prob(ens) # we should add the option type=4, in not the sum for each year is not 100%!!! +y<-count2prob(obs) # we should add the option type=4, in not the sum for each year is not 100%!!! +ReliabilityDiagram(p[,1], y[,1], bins=10, nboot=0, plot=TRUE, cons.prob=c(0.05, 0.85),plot.refin=F) + +### computation of verification scores: + +Rps<-round(mean(EnsRps(ens,obs)),4) +FairRps<-round(mean(FairRps(ens,obs)),4) +#Rps.easy<-round(mean(veriApply("EnsRps", fcst=anom.hind, obs=anom.rean, prob=my.prob, tdim=1, ensdim=2)),4) # check once to see if it is the same value of Rps +#FairRps.easy<-round(mean(veriApply("FairRps", fcst=anom.hind, obs=anom.rean, prob=my.prob, tdim=1, ensdim=2)),4) # check once to see if it is the same value of FairRps + +Rps.clim<-round(mean(EnsRps(ens.clim,obs)),4) +FairRps.clim<-round(mean(FairRps(ens.clim,obs)),4) +FairRps.clim.rean<-round(mean(FairRps(ens.clim.rean,obs)),4) + +#FairRps.clim.stable<-0.44444444 +#Rps.clim.easy<-round(mean(veriApply("EnsRps", fcst=anom.hind.clim, obs=anom.rean, prob=my.prob, tdim=1, ensdim=2)),4) # check once to see if it is the same value of Rps.clim +#FairRps.clim.easy<-round(mean(veriApply("FairRps", fcst=anom.hind.clim, obs=anom.rean, prob=my.prob, tdim=1, ensdim=2)),4) # check once to see if it is = to FairRps.clim + +Rpss<-round(1-(Rps/Rps.clim),4) +#Rpss.specs<-round(EnsRpss(ens=ens, ens.ref=ens.clim, obs=obs)$rpss,4) # check it once to see if it is the same as Rpss (yes) +Rpss.easy<-veriApply("EnsRpss", fcst=anom.hind, fcst.ref=anom.hind.clim, obs=anom.rean, prob=my.prob, tdim=1, ensdim=2)$rpss # we provide the clim.forecast; = to Rpss.specs +system.time(Rpss.easy.noclim<-round(veriApply("EnsRpss", fcst=anom.hind, obs=anom.rean, prob=my.prob, tdim=1, ensdim=2, parallel=FALSE, ncpus=5)$rpss,4)) # using their climatological forecast +#cat(Rps.clim.noclim<-Rps/(1-Rpss.easy.noclim)) # sale igual a ~0.45 per ogni hindcast; é diverso sia da Rps.clim che da Rps.clim per N-> +oo (0.555) + +Rpss.easy<-veriApply("EnsRpss", fcst=anom.hind, fcst.ref=anom.rean.clim, obs=anom.rean, prob=my.prob, tdim=1, ensdim=2)$rpss # we provide the clim.forecast; = to Rpss.specs +Rpss.easy.noclim<-veriApply("EnsRpss", fcst=anom.hind, obs=anom.rean, prob=my.prob, tdim=1, ensdim=2)$rpss # we provide the clim.forecast; = to Rpss.specs + +FairRpss<-round(1-(FairRps/FairRps.clim),4) +#FairRpss.specs<-round(FairRpss(ens=ens,ens.ref=ens.clim,obs=obs)$rpss,4) # check it once to see if it is the same as FairRpss (yes) +#FairRpss.easy<-veriApply("FairRpss", fcst=anom.hind, fcst.ref=anom.hind.clim, obs=anom.rean, prob=c(1/3,2/3), tdim=1, ensdim=2)$rps +#FairRpss.easy.noclim<-round(veriApply("FairRpss", fcst=anom.hind, obs=anom.rean, prob=c(1/3,2/3), tdim=1, ensdim=2)$rpss,4) # using their climatological forecast +#cat(FairRps.clim.noclim<-Rps/(1-FairRpss.easy.noclim)) # sale igual a ~0.55; é diverso sia da FairRps.clim che da FairRps.clim per N-> +oo (0.444) + +cat(paste0('n.memb: ' ,M,' n.yrs: ',N,' : ',FairRps,' : ' ,FairRps.clim,' FairRpss: ',FairRpss,'\n')) + +#cat(paste0('n.memb: ' ,M,' n.yrs: ',N,' : ',FairRps,' : ' ,FairRps.clim,' FairRpss: ',FairRpss,'\n')) + +#cat(paste0('n.memb: ' ,M,' n.yrs: ',N,' : ',Rps,' : ' ,Rps.clim,' Rpss: ',Rpss,'\n','n.memb: ' ,M,' n.yrs: ',N,' : ',FairRps,' : ' ,FairRps.clim,' FairRpss: ',FairRpss,'\n')) + +#cat(paste0('n.memb: ' ,M,' n.yrs: ',N,' : ',Rps,' : ' ,Rps.clim,' Rpss: ',Rpss,' Rpss.easy: ',Rpss.easy.noclim,'\n','n.memb: ' ,M,' n.yrs: ',N,' #: ',FairRps,' : ' ,FairRps.clim,' FairRpss: ',FairRpss,' FairRpss.easy: ',FairRpss.easy.noclim)) + +} + +# Crpss: + +#Crps<-round(mean(EnsCrps(ens,obs)),4) +#FairCrps<-round(mean(FairCrps(ens,obs)),4) + +#cat(Crps.clim<-round(mean(EnsCrps(ens.clim,obs)),4)) +#FairCrps.clim<-round(mean(FairCrps(ens.clim,obs)),4) + +#Crpss<-round(1-(Crps/Crps.clim),4) +#Crpss.specs<-round(EnsCrpss(ens=ens, ens.ref=ens.clim, obs=obs)$rpss,4) + +#FairCrpss<-round(1-(FairCrps/FairCrps.clim),4) +#FairCrpss.specs<-round(FairCrpss(ens=ens,ens.ref=ens.clim,obs=obs)$rpss,4) + +} + + +################################################################################## +# Other analysis # +################################################################################## + +my.startdates=1 #c(1:9) # select the startdates (weeks) you want to merge together + +#tm<-toyarray(dims=c(32,64),N=20,nens=4) # in the number of startdates in case of forecasts or the number of years in case of hindcasts +#str(tm) # tm$fcst: [32,64,20,4] tm$obs: [32,64,20] +# +#f.corr <- veriApply("EnsCorr", fcst=tm$fcst, obs=tm$obs) +#f.me <- veriApply('EnsMe', tm$fcst, tm$obs) +#f.crps <- veriApply("FairCrps", fcst=tm$fcst, obs=tm$obs) +#str(f.crps) # [32,64,20] + +#cst <- array(rnorm(prod(4:8)), 4:8) +#obs <- array(rnorm(prod(4:7)), 4:7) +#f.me <- veriApply('EnsMe', fcst=fcst, obs=obs) +#dim(f.me) +#fcst2 <- array(aperm(fcst, c(5,1,4,2,3)), c(8, 4, 7, 5*6)) # very interesting use of aperm not only to swap dimensions, but also to merge two dimensions in the same one! +#obs2 <- array(aperm(obs, c(1,4,2,3)), c(4,7,5*6)) +#f.me2 <- veriApply('EnsMe', fcst=fcst2, obs=obs2, tdim=3, ensdim=1) +#dim(f.me2) + +# when you have for each leadtime all the 52 weeks of year 2014 stored in a map, +# you can plot the time serie for a particular point and leadtime: +pos.lat<-which(round(la,3)==round(my.lat,3)) +pos.lon<-which(round(lo,3)==round(my.lon,3)) + +x11() +plot(1:week,EnMeanCorr[,leadtime],type="b", + main=paste0("Weekly time serie of point with lat=",round(my.lat,2),", lon=",round(my.lon,2)), + xlab="week",ylab="Ensemble Mean Corr.",ylim=c(-1,1)) + + + +### check if 4 time steps are better than one: + +yrs.hind<-yr2.hind-yr1.hind+1 +my.point.obs<-array(NA,c(yrs.hind,28)) + +# load obs.data for the four time steps: +for(year in yr1.hind:yr2.hind){ + data_erai_6h <- Load(var=var.name, exp = 'ERAint6h', + obs = NULL, sdates=paste0(year,'01'), leadtimemin = 61, + leadtimemax = 88, output = 'lonlat', configfile = file_path, grid=my.grid,method='distance-weighted') + + my.point.obs[year-yr1.hind+1,]<-data_erai_6h$mod[1,1,1,,65,633] +} + +utm0<-c(1,5,9,13,17,21,25) +utm6<-utm0+1 +utm12<-utm0+2 +utm18<-utm0+3 +my.points.obs.weekly.mean.utm0<-rowMeans(my.point.obs[,utm0]) +my.points.obs.weekly.mean.utm6<-rowMeans(my.point.obs[,utm6]) +my.points.obs.weekly.mean.utm12<-rowMeans(my.point.obs[,utm12]) +my.points.obs.weekly.mean.utm18<-rowMeans(my.point.obs[,utm18]) +my.points.obs.weekly.mean.all<-rowMeans(my.point.obs) + +cor(my.points.exp.weekly.mean.all,my.points.obs.weekly.mean.all,use="complete.obs") + +my.points.obs.weekly.mean.utm<-c(my.points.obs.weekly.mean.utm0,my.points.obs.weekly.mean.utm6,my.points.obs.weekly.mean.utm12,my.points.obs.weekly.mean.utm18) +my.points.exp.weekly.mean.utm<-c(my.points.exp.weekly.mean.utm0,my.points.exp.weekly.mean.utm6,my.points.exp.weekly.mean.utm12,my.points.exp.weekly.mean.utm18) +cor(my.points.exp.weekly.mean.utm,my.points.obs.weekly.mean.utm,use="complete.obs") + + +##### Calculate Reanalysis climatology loading only 1 file each 4 (it's quick but it needs to have all files beforehand): + +ss<-c(seq(1,52,by=4),50,51,52) # take only 1 startdate each 4, more the last 3 startdates that must be computed individually because they are not a complete set of 4 + +for(startdate in ss){ # the startdate day of 2014 to load in the sdates weekly sequence along with the same startdates of the previous 20 years + data.ERAI <- Load(var=var.name, exp = 'ERAintEnsWeek', + obs = NULL, sdates=paste0(yr1.hind:yr2.hind,substr(sdates.seq[startdate],5,6),substr(sdates.seq[startdate],7,8)), + nleadtime = 4, leadtimemin = 1, leadtimemax = 4, output = 'lonlat', configfile = file_path, grid=my.grid, method='distance-weighted') + + clim.ERAI[startdate,,,]<-apply(data.ERAI$mod,c(1,2,4,5,6),mean)[1,1,,,] # average for each leadtime and pixel + + # the intermediate startdate between startdate week i and startdate week i+4 are repeated: + if(startdate <= 49){ + clim.ERAI[startdate+1,1,,]<- clim.ERAI[startdate,2,,] # startdate leadtime (week) + clim.ERAI[startdate+1,2,,]<- clim.ERAI[startdate,3,,] # (week) 1 2 3 4 + clim.ERAI[startdate+2,1,,]<- clim.ERAI[startdate,3,,] # 1 a b c d <- only startdate 1 and 5 are necessary to calculate all startdates between 1 and 5 + clim.ERAI[startdate+1,3,,]<- clim.ERAI[startdate,4,,] # 2 b c d e + clim.ERAI[startdate+2,2,,]<- clim.ERAI[startdate,4,,] # 3 c d e f + clim.ERAI[startdate+3,1,,]<- clim.ERAI[startdate,4,,] # 4 d e f g + } # 5 e f g h + + if(startdate > 1 && startdate <=49){ # fill the previous startdates: + clim.ERAI[startdate-1,4,,]<- clim.ERAI[startdate,3,,] + clim.ERAI[startdate-1,3,,]<- clim.ERAI[startdate,2,,] + clim.ERAI[startdate-2,4,,]<- clim.ERAI[startdate,2,,] + clim.ERAI[startdate-1,2,,]<- clim.ERAI[startdate,1,,] + clim.ERAI[startdate-2,3,,]<- clim.ERAI[startdate,1,,] + clim.ERAI[startdate-3,4,,]<- clim.ERAI[startdate,1,,] + } + +} + + + + + # You can make cor much faster by stripping away all the error checking code and calling the internal c function directly: + # r <- apply(m1, 1, function(x) { apply(m2, 1, function(y) { cor(x,y) })}) + # r2 <- apply(m1, 1, function(x) { apply(m2, 1, function(y) {.Internal(cor(x, y, 4L, FALSE)) })}) + C_cor<-get("C_cor", asNamespace("stats")) + test<-.Call(C_cor, x=rnorm(100,1), y=rnorm(100,1), na.method=2, FALSE) + my.EnsCorr.chunk[i,j,k]<-.Call(C_cor, x=anom.hindcast.mean.chunk[,i,j,k], y=nom.rean.chunk[,i,j,k], na.method=2, FALSE) + + + +####################################################################################### +# Raster Animations # +####################################################################################### + +library(png) +library(animation) + + +clustPNG <- function(pngobj, nclust = 3){ + + j <- pngobj + # If there is an alpha component remove it... + # might be better to just adjust the mat matrix + if(dim(j)[3] > 3){ + j <- j[,,1:3] + } + k <- apply(j, c(1,2), c) + mat <- matrix(unlist(k), ncol = 3, byrow = TRUE) + grd <- expand.grid(1:dim(j)[1], 1:dim(j)[2]) + clust <- kmeans(mat, nclust, iter.max = 20) + new <- clust$centers[clust$cluster,] + + for(i in 1:3){ + tmp <- matrix(new[,i], nrow = dim(j)[1]) + j[,,i] <- tmp + } + + plot.new() + rasterImage(j, 0, 0, 1, 1, interpolate = FALSE) +} + +makeAni <- function(file, n = 10){ + j <- readPNG(file) + for(i in 1:n){ + clustPNG(j, i) + mtext(paste("Number of clusters:", i)) + } + + j <- readPNG(file) + plot.new() + rasterImage(j, 0, 0, 1, 1, interpolate = FALSE) + mtext("True Picture") +} + +file <- "~/Dropbox/Photos/ClassyDogCropPng.png" +n <- 20 +saveGIF(makeAni(file, n), movie.name = "tmp.gif", interval = c(rep(1,n), 5)) + + + + +####################################################################################### +# ProgressBar # +####################################################################################### + +for(i in 1:10) { + Sys.sleep(0.2) + # Dirk says using cat() like this is naughty ;-) + #cat(i,"\r") + # So you can use message() like this, thanks to Sharpie's + # comment to use appendLF=FALSE. + message(i,"\r",appendLF=FALSE) + flush.console() +} + + +for(i in 1:10) { + Sys.sleep(0.2) + # Dirk says using cat() like this is naughty ;-) + cat(i,"\r") + + +} + + + +####################################################################################### +# Load large datasets with ff # +####################################################################################### + +library(ff) +my.scratch<-"/scratch/Earth/ncortesi/myBigData" +anom.hind<-as.ff(anom.hind, vmode="double", file=my.scratch) +ffsave(anom.hind,file=my.scratch) +close(anom.hind);rm(anom.hind) + +ffload(file=my.scratch, list=c(anom.hind)) +open(anom.hind) +my.SkillScore <- veriApplyBig("FairRpss", fcst=anom.hind, obs=anom.rean, tdim=2, ensdim=1, prob=c(1/3,1/3)) +close(anom.hind);rm(anom.hind) + + + +BigDataPath <- "/scratch/Earth/ncortesi/myBigData" +source('/home/Earth/ncortesi/Downloads/RESILIENCE/veriApplyBig.R') + +anom.hind.dim<-c(5,3,1,256,512) +anom.hind<-array(rnorm(prod(anom.hind.dim)),anom.hind.dim) # doesn't fit in memory +save.big(array=anom.hind, path=BigDataPath) + +veriApplyBig <- function(, obsmy.scratch){ + ffload(file=my.scratch) + open(anom.hind) + my.SkillScore <- veriApplyBig("FairRpss", fcst=anom.hind, obs=anom.rean, tdim=2, ensdim=1, prob=c(1/3,1/3)) + close(anom.hind) +} + + +anom.hind<-as.ff(anom.hind, vmode="double", file=my.scratch) +ffsave(anom.hind, file=my.scratch) + +my.scratch<-"/scratch/Earth/ncortesi/myBigData" + +anom.hind<-as.ff(anom.hind, vmode="double", file=my.scratch) +ffsave(anom.hind, file=my.scratch) +close(anom.hind); rm(anom.hind) + +ffload(file=my.scratch, list=c(anom.hind)) +open(anom.hind) +my.SkillScore <- veriApplyBig("FairRpss", fcst=anom.hind, obs=anom.rean, tdim=2, ensdim=1, prob=c(1/3,1/3)) +close(anom.hind); rm(anom.hind) + +anom.hind.dim<-c(51,30,1,256,51) +anom.hind<-ff(rnorm(prod(anom.hind.dim)), dim=anom.hind.dim, vmode="double", filename="/scratch/Earth/ncortesi/anomhind") +str(anom.hind) +dim(anom.hind) + +anom.hind.dim<-c(51,30,1,256,51) +anom.hind<-array(rnorm(prod(anom.hind.dim)),anom.hind.dim) # doesn't fit in memory +anom.hind<-as.ff(anom.hind, vmode="double", file="/scratch/Earth/ncortesi/myBigData") +str(anom.hind) +dim(anom.hind) + +ffsave(anom.hind,file="/scratch/Earth/ncortesi/anomhind") +#ffinfo(file="/scratch/Earth/ncortesi/anomhind") +close(anom.hind) +#delete(anom.hind) +#rm(anom.hind) + +ffload(file="/scratch/Earth/ncortesi/anomhind") + +anom.rean<-array(rnorm(prod(anom.hind.dim[-1])), anom.hind.dim[-1]) # doesn't fit in memory + +open(anom.hind) +my.SkillScore <- veriApplyBig("FairRpss", fcst=anom.hind, obs=anom.rean, tdim=2, ensdim=1, prob=c(1/3,1/3)) +fcst<-anom.hind +obs<-anom.rean + +close(anom.hind) +rm(anom.hind) + +#ffdrop(file="/scratch/Earth/ncortesi/anomhind") + +a<-ffapply(X=anom.hind.big, MARGIN=c(1,3,4,5), AFUN=mean, RETURN=TRUE) +a<-ffapply(X=anom.hind.big, MARGIN=c(1,3,4,5), AFUN=function(x) sample(x, replace=TRUE), CFUN="list", RETURN=TRUE) + + +pred_Cal_cross_ff <- as.ff(pred_Cal_cross, vmode='double', file=fileoutput_cross) +ffsave(pred_Cal_cross_ff, file=paste0(fileoutput_cross)) +delete(pred_Cal_cross_ff) + +assign("pred_Cal_cross_ff", as.ff(pred_Cal_cross, vmode='double', file=fileoutput_cross)) +ffsave(pred_Cal_cross_ff, file=paste0(fileoutput_cross)) +delete(pred_Cal_cross_ff) +rm(pred_Cal_cross_ff) + +assign("pred_Cal_cross_ff", as.ff(pred_Cal_cross, vmode='double', file=fileoutput_cross)) +a<-get("pred_Cal_cross_ff") # get() works well in this case but not in the row below! (you must use do.call, see WT_drivers_v2.R) +ffsave(get("pred_Cal_cross_ff"), file=paste0(fileoutput_cross)) +delete(pred_Cal_cross_ff) +rm(pred_Cal_cross_ff) + + + +######################################################################################### +# Calculate and save the FairRpss and/or the FairCrpss for a chosen period using ff # +######################################################################################### + +bigdata=FALSE # if TRUE, compute the Rpss and the Crpss using the package ff (storing arrays in the disk instead than in the RAM) to remove the memory limit + +for(month in veri.month){ + month=1 # for debug + my.startdates <- which(as.integer(substr(sdates.seq,5,6)) == month) #startdates.monthly[[month]] #c(1:5) # select the startdates (weeks) you want to compute + startdate.name=my.month[month] # name given to the period of the selected startdates, i.e:"January" + n.yrs <- length(my.startdates)*n.yrs.hind # number of total years for the selected month + print(paste0("Computing Skill Scores for ",startdate.name)) + + if(!boot) anom.rean.big<-array(NA, dim=c(n.yrs, n.leadtimes, n.lat, n.lon)) # reanalysis data is not converted to ff because it is a small array + + if(boot) mod<-1 else mod=0 # in case of the boostrap, anom.hind.big includes also the reanalysis data as additional last member + + if(bigdata) anom.hind.big <- ff(NA, dim=c(n.members + mod, n.yrs, n.leadtimes, n.lat, n.lon), vmode="double", filename=paste0(workdir,"/anomhindbig.ff")) + if(!bigdata) anom.hind.big <- array(NA, dim=c(n.members + mod, n.yrs, n.leadtimes, n.lat, n.lon)) + + for(startdate in my.startdates){ + + pos.startdate<-which(startdate==my.startdates) # count of the number of stardates already loaded +1 + my.time.interv<-(1+(pos.startdate-1)*n.yrs.hind):(pos.startdate*n.yrs.hind) # time interval where to load data + + load(paste0(workdir,'/Data_',var.name,'/anom_rean_startdate',startdate,'.RData')) + if(!boot) { + anom.rean<-drop(anom.rean) + anom.rean.big[my.time.interv,,,] <- anom.rean + } + + if(any(my.score.name=="FairRpss") || any(my.score.name=="FairCrpss")){ + load(paste0(workdir,'/Data_',var.name,'/anom_hindcast_startdate',startdate,'.RData')) # load hindcast data + anom.hindcast<-drop(anom.hindcast) + + if(!boot) anom.hind.big[,my.time.interv,,,] <- anom.hindcast + if(boot) anom.hind.big[,my.time.interv,,,] <- abind(anom.hindcast, anom.rean, along=1) + + rm(anom.hindcast, anom.rean) + } + gc() + } + + if(any(my.score.name=="FairRpss" || my.score.name=="FairCrpss")){ + if(!boot) { + if(any(my.score.name=="FairRpss")) my.FairRpss <- veriApplyBig("FairRpss", fcst=anom.hind.big, obs=anom.rean.big, + prob=my.prob, tdim=2, ensdim=1, parallel=TRUE, ncpus=n.cpus) + if(any(my.score.name=="FairCrpss")) my.FairCrpss <- veriApplyBig("FairCrpss", fcst=anom.hind.big, obs=anom.rean.big, tdim=2, ensdim=1, parallel=TRUE, ncpus=n.cpus) + + } else { + # bootstrapping: + my.FairRpssBoot<-my.FairCrpssBoot<-array(NA, c(n.boot, n.leadtimes, n.lat, n.lon)) + if(!bigdata) save(anom.hind.big, file=paste0(workdir,"/anom_hind_big.RData")) # save the anomalies to free memory + + for(b in 1:n.boot){ + print(paste0("resampling n. ",b,"/",n.boot)) + yrs.sampled <- sample(1:n.yrs, replace=TRUE) + + if(bigdata) anom.hind.big.sampled <- ff(NA, dim=c(n.members+1, n.yrs, n.leadtimes, n.lat, n.lon), vmode="double", filename=paste0(workdir,"/anomhindbigsampled")) + if(!bigdata) anom.hind.big.sampled <- array(NA, dim=c(n.members+1, n.yrs, n.leadtimes, n.lat, n.lon)) + + if(!bigdata && b>1) load(paste0(workdir,"/anom_hind_big.RData")) # need to load the hindcasts if b>1 + + for(y in 1:n.yrs) anom.hind.big.sampled[,y,,,] <- anom.hind.big[,yrs.sampled[y],,,] # with ff takes 53s x 100 ~1h!!! (without, only 4s x 100 ~6m) + + if(!bigdata) rm(anom.hind.big); gc() # free memory for the following commands + + # faster way that will be able to replace the above one when ffapply output parameters wll be improved: + #a<-ffapply(X=anom.hind.big, MARGIN=c(3,4,5), AFUN=function(x) my.sample(x,n.members+1, replace=TRUE), CFUN="list", RETURN=TRUE, FF_RETURN=TRUE) # takes 3m only + #aa<-abind(a[1:10], along=4) # it doesn't fit into memory! + + if(any(my.score.name=="FairRpss")) my.FairRpssBoot[b,,,] <- veriApplyBig("FairRpss", fcst=anom.hind.big.sampled[1:n.members,,,,], + obs=anom.hind.big.sampled[n.members+1,,,,], + prob=my.prob, tdim=2, ensdim=1, parallel=TRUE, ncpus=n.cpus) #$rpss + + if(any(my.score.name=="FairCrpss")) my.FairCrpssBoot[b,,,] <- veriApplyBig("FairCrpss", fcst=anom.hind.big.sampled[1:n.members,,,,], + obs=anom.hind.big.sampled[n.members+1,,,,], + tdim=2, ensdim=1, parallel=TRUE, ncpus=n.cpus) #$crpss + + if(bigdata) delete(anom.hind.big.sampled) + rm(anom.hind.big.sampled) # delete the sampled hindcast + + } + + # calculate 5 and 95 percentiles of skill scores: + if(any(my.score.name=="FairRpss")) my.FairRpssConf <- apply(my.FairRpssBoot, c(2,3,4), function(x) quantile(x, probs=c(0.05,0.95), type=8)) + if(any(my.score.name=="FairCrpss")) my.FairCrpssConf <- quantile(my.FairCrpssBoot, probs=c(0.05,0.95), type=8) + + } # close if on boot + + } + + if(bigdata) delete(anom.hind.big) # delete the file with the hindcasts + rm(anom.hind.big) + rm(anom.rean.big) + gc() + + if(!boot){ + if(any(my.score.name=="FairRpss")) save(my.FairRpss, file=paste0(workdir,'/Data_',var.name,'/FairRpss_',startdate.name,'.RData')) + if(any(my.score.name=="FairCrpss")) save(my.FairCrpss, file=paste0(workdir,'/Data_',var.name,'/FairCrpss_',startdate.name,'.RData')) + } else { + if(any(my.score.name=="FairRpss")) save(my.FairRpssBoot, my.FairRpssConf, file=paste0(workdir,'/Data_',var.name,'/FairRpssBoot_',startdate.name,'.RData')) + if(any(my.score.name=="FairCrpss")) save(my.FairCrpssBoot, my.FairCrpssConf, file=paste0(workdir,'/Data_',var.name,'/FairCrpssBoot_',startdate.name,'.RData')) + } + + + if(is.object(my.FairRpss)) rm(my.FairRpss); if(is.object(my.FairCrpss)) rm(my.FairCrpss) + if(is.object(my.FairRpssBoot)) rm(my.FairRpssBoot); if(is.object(my.FairCrpssBoot)) rm(my.FairCrpssBoot) + if(is.object(my.FairRpssConf)) rm(my.FairRpssConf); if(is.object(my.FairCrpssConf)) rm(my.FairCrpssConf) + +} # next m (month) + + +######################################################################################### +# Calculate and save the FairRpss for a chosen period using ff and Load() # +######################################################################################### + + anom.rean.big<-array(NA, dim=c(length(my.startdates)*n.yrs.hind, n.leadtimes, n.lat, n.lon)) + anom.hind.big <- ff(NA, dim=c(n.members,length(my.startdates)*n.yrs.hind, n.leadtimes, n.lat, n.lon), vmode="double", filename="/scratch/Earth/anomhindbig") + + for(startdate in my.startdates){ + pos.startdate<-which(startdate==my.startdates) # count of the number of stardates already loaded+1 + my.time.interv<-(1+(pos.startdate-1)*n.yrs.hind):(pos.startdate*n.yrs.hind) # time interval where to load data + + #load(paste0(workdir,'/Data_',var.name,'/anom_rean_startdate',startdate,'.RData')) + #anom.rean<-drop(anom.rean) + #anom.rean.big[my.time.interv,,,] <- anom.rean + + my.date <- paste0(yr1.hind:yr2.hind,substr(sdates.seq[startdate],5,6),substr(sdates.seq[startdate],7,8)) + anom.rean <- Load(var=var.name, exp = 'EnsEcmwfWeekHind', + obs = NULL, sdates=my.date, nleadtime = n.leadtimes, leadtimemin = 1, leadtimemax = n.leadtimes, + output = 'lonlat', configfile = file_path, method='distance-weighted' ) + + #load(paste0(workdir,'/Data_',var.name,'/anom_hindcast_startdate',startdate,'.RData')) # load hindcast data + #anom.hindcast<-drop(anom.hindcast) + + anom.hind.big[,my.time.interv,,,] <- anom.hindcast + rm(anom.hindcast) + } + + my.FairRpss <- veriApplyBig("FairRpss", fcst=anom.hind.big, obs=anom.rean.big, prob=my.prob, tdim=2, ensdim=1, parallel=TRUE, ncpus=n.cpus) + + save(my.FairRpss, file=paste0(workdir,'/Data_',var.name,'/FairRpss_',startdate.name,'.RData')) + + +######################################################################################### +# Alternative way to speed up Load() # +######################################################################################### + +ERAint <- list(path = '/esnas/reconstructions/ecmwf/eraint/$STORE_FREQ$_mean/$VAR_NAME$_f6h/$VAR_NAME$_$YEAR$$MONTH$.nc') + var.rean=ERAint # daily reanalysis dataset used for the var data; it can have a different resolution of the MSLP dataset + var.name='sfcWind' #'tas' #'prlr' # any daily variable we want to find if it is WTs-driven + var <- Load(var = var.name, exp = NULL, obs = list(var.rean), paste0(y,'0101'), storefreq = 'daily', leadtimemax = 1, output = 'lonlat') + + # Forecast system list: + S4 <- list(path = ...) + CFSv2 <- list(path = ...) + + # Reanalysis list: + ERAint <- list(path = '/esnas/reconstructions/ecmwf/eraint/$STORE_FREQ$_mean/$VAR_NAME$_f6h/$VAR_NAME$_$YEAR$$MONTH$.nc') + JRA55 <- list(path = ...) + + + + + # Select one forecast system and one reanalysis (put NULL if you don't need to load any experiment/observation): + exp.source <- NULL + obs.source <- list(path = '/esnas/reconstructions/ecmwf/eraint/$STORE_FREQ$_mean/$VAR_NAME$_f6h/$VAR_NAME$_$YEAR$$MONTH$.nc') + + # Select one variable and its frequency: + var.name <- 'sfcWind' + store.freq <- 'daily' + + # Extract only the directory path of the forecast and reanalysis: + obs.source2 <- gsub("\\$STORE_FREQ\\$", store.freq, obs.source$path) + obs.source3 <- gsub("\\$VAR_NAME\\$", var.name, obs.source2) + obs.source4 <- strsplit(obs.source3,'/') + obs.source5 <- paste0(obs.source4[[1]][-length(obs.source4[[1]])], collapse="/") + + obs.source6 <- list.files(path = obs.source5) + + system(paste0("ncks -O -h -d longitude,5,6 ", '/esnas/reconstructions/ecmwf/eraint/',STORE_FREQ,'_mean/',VAR_NAME,'_f6h')) + system("ncks -O -h -d longitude,5,6 sfcWind_201405.nc ~/test.nc") + + y <- 1990 + var <- Load(var = VAR_NAME, exp = list(exp.source), obs = list(obs.source), sdates = paste0(y,'0101'), storefreq = store.freq, leadtimemax = 1, output = 'lonlat') + + +} # close if on mare + + diff --git a/old/SkillScores_map_v1.R~ b/old/SkillScores_map_v1.R~ new file mode 100644 index 0000000000000000000000000000000000000000..e36b5d62eda2541ad1260976d02176a6cb9c36e8 --- /dev/null +++ b/old/SkillScores_map_v1.R~ @@ -0,0 +1,847 @@ +########################################################################################### +# Visualize and save the score maps for one score and period # +########################################################################################### +# run it only after computing and saving all the skill score data: + +rm(list=ls()) + +########################################################################################## +# Load functions and libraries # +########################################################################################## + +library(SpecsVerification) # for skill scores +library(s2dverification) # for Load() function +library(abind) # for merging arrays +source('/home/Earth/ncortesi/scripts/Rfunctions.R') + +########################################################################################## +# User's settings # +########################################################################################## +#if(!mare) {source('/home/Earth/ncortesi/Downloads/scripts/Rfunctions.R')} else {source('/gpfs/projects/bsc32/bsc32842/scripts/Rfunctions.R')} + +# working dir where to put the output maps and files in the /Data and /Maps subdirs: +work.dir <- ifelse(!mare, "/scratch/Earth/ncortesi/RESILIENCE/Subestacional", "/gpfs/projects/bsc32/bsc32842/results") + +var.name <- 'sfcWind' # forecast variable. It is the name used inside the netCDF files and as suffix in map filenames, i.e: 'sfcWind' or 'psl' + +my.pvalue <- 0.05 # in case of computing the EnsCorr, set the p_value level to show in the ensemble mean correlation maps + +# coordinates of a chosen point in the world to plot the time series over the 52 maps (they must be the same values in objects la y lo) +#my.lat <- 55.3197120 +#my.lon <- 0.5625 +my.score.name <- c('FairRpss','FairCrpss','EnsCorr','RelDiagr') # choose one or more skills scores to visualize between EnsCorr, FairRPSS, FairCRPSS and/or RelDiagr + +my.months=1:12 #9:12 # choose the month(s) to plot and save + +########################################################################################## + +for(month in my.months){ + #month=1 # for the debug + startdate.name.map<-my.month[month] # i.e:"January" + + for(my.score.name.map in my.score.name){ + #my.score.name.map='EnsCorr' # for the debug + + if(my.score.name.map=='FairRpss') { load(file=paste0(workdir,'/',var.name,'/FairRpss_',startdate.name.map,'.RData')); my.score<-my.FairRpss.chunk } + if(my.score.name.map=='FairCrpss') { load(file=paste0(workdir,'/',var.name,'/FairCrpss_',startdate.name.map,'.RData')); my.score<-my.FairCrpss.chunk } + if(my.score.name.map=="EnsCorr") { load(file=paste0(workdir,'/',var.name,'/EnsCorr_',startdate.name.map,'.RData')); my.score<-my.EnsCorr.chunk } + if(my.score.name.map=="RelDiagr") { load(file=paste0(workdir,'/',var.name,'/RelDiagr_',startdate.name.map,'.RData')); my.score<-my.RelDiagr } + + # old green-red palette: + # brk.rpss<-c(-1,seq(0,1,by=0.05)) + # col.rpss<-c("darkred",colorRampPalette(c("red","white","darkgreen"))(length(brk.rpss)-2)) + # brk.rps<-c(seq(0,1,by=0.1),10) + # col.rps<-c(colorRampPalette(c("darkgreen","white","red"))(length(brk.rps)-2),"darkred") + # if(my.score.name.map==my.Rps | my.score==my.Rps.clim) {my.brk<-brk.rps} else {my.brk<-brk.rpss} + # if(my.score.name.map==my.Rps | my.score==my.Rps.clim) {my.col<-col.rps} else {my.col<-col.rpss} + + brk.rpss<-c(seq(-1,1,by=0.1)) + col.rpss<-colorRampPalette(col)(length(brk.rpss)-1) + + my.brk<-brk.rpss # at present all breaks and colors are the same, so there is no need to differenciate between indexes + my.col<-col.rpss + + for(lead in 1:n.leadtimes){ + + #my.title<-paste0(my.score.name.map,' of ',cfs.name,' + #10m Wind Speed for ',startdate.name.map,'. Forecast time ',leadtime.week[lead],'.') + + if(my.score.name.map!="RelDiagr"){ + + #postscript(file=paste0(workdir,'/Maps_',var.name,'/',my.score.name.map,'_',startdate.name.map,'_Forecast_time_',leadtime.week[lead],'_',var.name,'.ps'), + # paper="special",width=12,height=7,horizontal=F) + + png(file=paste0(workdir,'/Maps_',var.name,'/',my.score.name.map,'_',startdate.name.map,'_Forecast_time_',leadtime.week[lead],'_',var.name,'.jpg'),width=1000,height=600) + + layout(matrix(c(1,2), 1, 2, byrow = TRUE),widths=c(11,1.2)) + par(oma=c(1,1,4,1)) + + # lat values must be in decreasing order from +90 to -90 degrees and long from 0 to 360 degrees: + PlotEquiMap(my.score[lead,,][n.lat:1,c((n.lonr+1):n.lon,1:n.lonr)], lons,lats ,title_scale=0.8, brks=my.brk, cols=my.col,axelab=F, filled.continents=FALSE, drawleg=F) + my.title<-paste0(my.score.name.map,' of ',cfs.name,'\n ', var.name.map,' for ',startdate.name.map,'. Forecast time ',leadtime.week[lead],'.') + title(my.title,line=0.5,outer=T) + + ColorBar(my.brk, cols=my.col, vert=T, cex=1.4) + + if(my.score.name.map=="EnsCorr"){ #add a small grey point if the corr.is significant: + + # arrange lat/ lon in my.PValue (a second variable in the EnsCorr file) to start from +90 degr. (lat) and from 0 degr (lon): + my.PValue.rev <- my.PValue + my.PValue.rev[lead,,] <- my.PValue[i,n.lat:1,c((n.lonr+1):n.lon,1:n.lonr)] + for (x in 1:n.lon) { + for (y in 1:n.lat) { + if (my.PValue.rev[lead,y, x] == TRUE) { + text(x = lo[x], y = la[y], ".", cex = .2) + } + } + } + } + dev.off() + + if(my.score.name.map=="FairRpss"){ #add a small grey point if the corr.is significant: + + # arrange lat/ lon in my.PValue (a second variable in the EnsCorr file) to start from +90 degr. (lat) and from 0 degr (lon): + my.PValue.rev <- my.FairRpss.pvalue + my.PValue.rev[lead,,] <- my.PValue[i,n.lat:1,c((n.lonr+1):n.lon,1:n.lonr)] + + for (x in 1:n.lon) { + for (y in 1:n.lat) { + if (my.PValue.rev[lead,y, x] == TRUE) { + text(x = lo[x], y = la[y], ".", cex = .2) + } + } + } + } + dev.off() + + if(my.score.name.map=="FairCrpss"){ #add a small grey point if the corr.is significant: + + # arrange lat/ lon in my.PValue (a second variable in the EnsCorr file) to start from +90 degr. (lat) and from 0 degr (lon): + my.PValue.rev <- my.FairCrpss.pvalue > 0.05 + my.PValue.rev[lead,,] <- my.PValue[i,n.lat:1,c((n.lonr+1):n.lon,1:n.lonr)] + + for (x in 1:n.lon) { + for (y in 1:n.lat) { + if (my.PValue.rev[lead,y, x] == TRUE) { + text(x = lo[x], y = la[y], ".", cex = .2) + } + } + } + } + dev.off() + + } # close if on !RelDiagr + + if(my.score.name.map=="RelDiagr") { + + jpeg(file=paste0(workdir,'/Maps_',var.name,'/RelDiagr_',startdate.name.map,'_Forecast_time_',leadtime.week[lead],'_',var.name,'.jpg'),width=600,height=600) + + my.title<-paste0('Reliability Diagram of ',cfs.name,'\n ',var.name.map,' for ',startdate.name.map,'. Forecast time ',leadtime.week[lead],'.') + + plot(c(0,1),c(0,1),type="l",xlab="Forecast Frequency",ylab="Observed Frequency", col='gray30', main=my.title) + lines(my.score[[lead]]$p.avgs[!is.na(my.score[[lead]]$p.avgs)],my.score[[lead]]$cond.prob[!is.na(my.score[[lead]]$cond.prob)],type="o", pch=16, col="blue") + lines(my.score[[n.leadtimes+lead]]$p.avgs[!is.na(my.score[[n.leadtimes+lead]]$p.avgs)],my.score[[n.leadtimes+lead]]$cond.prob[!is.na(my.score[[n.leadtimes+lead]]$cond.prob)],type="o",pch=16,col="darkgreen") + lines(my.score[[2*n.leadtimes+lead]]$p.avgs[!is.na(my.score[[2*n.leadtimes+lead]]$p.avgs)], my.score[[2*n.leadtimes+lead]]$cond.prob[!is.na(my.score[[2*n.leadtimes+lead]]$cond.prob)],type="o", pch=16, col="red") + legend("bottomright", legend=c('Lower Tercile','Upper Tercile'), lty=c(1,1), lwd=c(2.5,2.5), col=c('blue','red')) + + dev.off() + } + + } # next lead + + } # next score + +} # next month + + + +############################################################################################### +# Composite map of the four skill scores # +############################################################################################### + +# run it only when you have all the 4 skill scores for each month: + +my.months=1:12 # choose the month(s) to plot and save + +for(month in my.months){ + + startdate.name.map<-my.month[month] # i.e:"January" + + #postscript(file=paste0(workdir,'/Maps_',var.name,'/SkillScores_',startdate.name.map,'_',var.name,'.ps'), paper="special",width=12,height=7,horizontal=F) + + jpeg(file=paste0(workdir,'/Maps_',var.name,'/SkillScores_',startdate.name.map,'_',var.name,'.jpg'), width=4000, height=2400, quality=100) + + layout(matrix(1:16, 4, 4, byrow=FALSE), widths=c(1.8,1.8,1.8,1.2), heights=c(1,1,1,1)) + #layout.show(16) + + for(score in 1:4){ + for(lead in 1:n.leadtimes){ + + img<-readJPEG(paste0(workdir,'/Maps_',var.name,'/',my.score.name[score],'_',startdate.name.map,'_Forecast_time_',leadtime.week[lead],'_',var.name,'.jpg')) + par(mar = rep(0, 4), xaxs='i', yaxs='i' ) + plot.new() + rasterImage(img, 0, 0, 1, 1, interpolate=FALSE) + + } # next lead + + } # next score + + dev.off() + +} # next month + + + + + + + + + + + + +# Dani Map script for the small figure: +#/home/Earth/vtorralb/R/scripts/Veronica_2014/TimeSeries/PlotAno_mod_obs.R + +############################################################################################### +# Preformatting: calculate weekly climatologies and anomalies for each forecast startdate # +############################################################################################### +# +# finish!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! +# +# the only difference is that anomalies are computed using climatologies from the hincast data, +# and not from the forecast data. + +# Run it only once at the beginning: + +file_path='/home/Earth/ngonzal2/R/ConfFiles/IC3.conf' # Load() file path +#file_path <- '/home/Earth/ncortesi/Downloads/RESILIENCE/IC3.conf' + +my.startdates=1:52 # choose a sequence of forecast startdates to save their anomalies + +for(startdate in my.startdates){ # the startdate week to load inside the sdates weekly sequence: + print(paste0('Computing anomalies for startdate ', which(startdate==my.startdates),'/',length(my.startdates))) + + data.hindcast <- Load(var=var.name, exp = 'EnsEcmwfWeekHind', + obs = NULL, sdates=paste0(yr1.hind:yr2.hind,substr(sdates.seq[startdate],5,6),substr(sdates.seq[startdate],7,8)), + nleadtime = n.leadtimes, leadtimemin = 1, leadtimemax = n.leadtimes, output = 'lonlat', configfile = file_path, method='distance-weighted' ) + + # dim(data.hindcast$mod) # [1,4,20,4,320,640] + + # Mean of the 4 members of the Hindcasts and of the 20 years (for each point and leadtime); + clim.hindcast<-apply(data.hindcast$mod,c(1,4,5,6),mean)[1,,,] # average for each leadtime and pixel + clim.hindcast<-InsertDim(InsertDim(InsertDim(clim.hindcast,1,n.yrs.hind),1,n.members),1,1) # repeat the same values and times to calc. anomalies + + anom.hindcast <- data.hindcast$mod - clim.hindcast + + rm(data.hindcast,clim.hindcast) + gc() + + # average the anomalies over the 4 hindcast members [1,20,4,320,640] + anom.hindcast.mean<-apply(anom.hindcast,c(1,3,4,5,6),mean) + + my.grid<-paste0('r',n.lon,'x',n.lat) + data.rean <- Load(var=var.name, exp = 'ERAintEnsWeek', + obs = NULL, sdates=paste0(yr1.hind:yr2.hind,substr(sdates.seq[startdate],5,6),substr(sdates.seq[startdate],7,8)), + nleadtime = n.leadtimes, leadtimemin = 1, leadtimemax = n.leadtimes, output = 'lonlat', configfile = file_path, grid=my.grid, method='distance-weighted') + # dim(data.rean$mod) # [1,1,20,4,320,640] + + # Mean of the 20 years (for each point and leadtime) + clim.rean<-apply(data.rean$mod,c(1,2,4,5,6),mean)[1,1,,,] # average for each leadtime and pixel + clim.rean<-InsertDim(InsertDim(InsertDim(clim.rean,1,n.yrs.hind),1,1),1,1) # repeat the same values times to be able to calculate the anomalies #[1,1,20,4,320,640] + + anom.rean <- data.rean$mod - clim.rean + + anom.rean<-InsertDim(anom.rean[1,1,,,,],1,1) # [1,20,4,320,640] + + #save(anom.hindcast.mean,anom.rean,file=paste0(workdir,'/Data/anom_for_ACC_startdate',startdate,'.RData')) + + save(anom.hindcast,file=paste0(workdir,'/Data/anom_hindcast_startdate',startdate,'.RData')) + save(anom.hindcast.mean,file=paste0(workdir,'/Data/anom_hindcast_mean_startdate',startdate,'.RData')) + save(anom.rean,file=paste0(workdir,'/Data/anom_rean_startdate',startdate,'.RData')) + save(clim.rean,file=paste0(workdir,'/Data/clim_rean_startdate',startdate,'.RData')) + + rm(data.rean,anom.hindcast,anom.hindcast.mean,clim.rean,anom.rean) + gc() + +} + + + + + + + +################################################################################## +# Toy Model # +################################################################################## + +library(s2dverification) +library(SpecsVerification) +library(easyVerification) + +my.prob=c(1/3,2/3) # quantiles used for FairRPSS and the RelDiagr (usually they are the terciles) + +N=80 # number of years +M=14 # number of members + +for(M in c(2:10,20,51,100,200)){ + +for(N in c(5,10,15,20,25,30,40,50,100,200,500,1000,2000,5000,10000,20000,50000,100000)){ + +anom.rean<-rnorm(N) +anom.hind<-array(rnorm(N*M),c(N,M)) +#quantile(anom.hind,prob=my.prob,type=8) + +obs<-convert2prob(anom.rean,prob<-my.prob) +#mean(obs[,1]) # check if it is equal to 0.333 for K=3 + +ens<-convert2prob(anom.hind,prob<-my.prob) + +# climatological forecast: +anom.hind.clim<-array(sample(anom.hind,N*M),c(N,M)) # extract a random sample with no replacement of the hindcast +ens.clim<-convert2prob(anom.hind.clim,prob<-my.prob) +#mean(ens.clim[,1]) # check if it is equal to M/3 (1.333 for M=4) + +# alternative definition of climatological forecast, based on observations instead (as applied by veriApply via convert2prob when option fcst.ref is missing): +#anom.rean.clim<-ClimEns(anom.rean) # create a climatological ensemble from a vector of observations (anom.rean) with the leave-one-out cross validation +anom.rean.clim<-t(array(anom.rean,c(N,N))) # function used by convert2prob when ref is not specified; it is ~ equal to the above function ClimEns, it only has a column more +ens.clim.rean<-convert2prob(anom.rean.clim,prob<-c(1/3,2/3)) # create a new prob.table based on the climatological ensemble of observations + +p<-count2prob(ens) # we should add the option type=4, in not the sum for each year is not 100%!!! +y<-count2prob(obs) # we should add the option type=4, in not the sum for each year is not 100%!!! +ReliabilityDiagram(p[,1], y[,1], bins=10, nboot=0, plot=TRUE, cons.prob=c(0.05, 0.85),plot.refin=F) + +### computation of verification scores: + +Rps<-round(mean(EnsRps(ens,obs)),4) +FairRps<-round(mean(FairRps(ens,obs)),4) +#Rps.easy<-round(mean(veriApply("EnsRps", fcst=anom.hind, obs=anom.rean, prob=my.prob, tdim=1, ensdim=2)),4) # check once to see if it is the same value of Rps +#FairRps.easy<-round(mean(veriApply("FairRps", fcst=anom.hind, obs=anom.rean, prob=my.prob, tdim=1, ensdim=2)),4) # check once to see if it is the same value of FairRps + +Rps.clim<-round(mean(EnsRps(ens.clim,obs)),4) +FairRps.clim<-round(mean(FairRps(ens.clim,obs)),4) +FairRps.clim.rean<-round(mean(FairRps(ens.clim.rean,obs)),4) + +#FairRps.clim.stable<-0.44444444 +#Rps.clim.easy<-round(mean(veriApply("EnsRps", fcst=anom.hind.clim, obs=anom.rean, prob=my.prob, tdim=1, ensdim=2)),4) # check once to see if it is the same value of Rps.clim +#FairRps.clim.easy<-round(mean(veriApply("FairRps", fcst=anom.hind.clim, obs=anom.rean, prob=my.prob, tdim=1, ensdim=2)),4) # check once to see if it is = to FairRps.clim + +Rpss<-round(1-(Rps/Rps.clim),4) +#Rpss.specs<-round(EnsRpss(ens=ens, ens.ref=ens.clim, obs=obs)$rpss,4) # check it once to see if it is the same as Rpss (yes) +Rpss.easy<-veriApply("EnsRpss", fcst=anom.hind, fcst.ref=anom.hind.clim, obs=anom.rean, prob=my.prob, tdim=1, ensdim=2)$rpss # we provide the clim.forecast; = to Rpss.specs +system.time(Rpss.easy.noclim<-round(veriApply("EnsRpss", fcst=anom.hind, obs=anom.rean, prob=my.prob, tdim=1, ensdim=2, parallel=FALSE, ncpus=5)$rpss,4)) # using their climatological forecast +#cat(Rps.clim.noclim<-Rps/(1-Rpss.easy.noclim)) # sale igual a ~0.45 per ogni hindcast; é diverso sia da Rps.clim che da Rps.clim per N-> +oo (0.555) + +Rpss.easy<-veriApply("EnsRpss", fcst=anom.hind, fcst.ref=anom.rean.clim, obs=anom.rean, prob=my.prob, tdim=1, ensdim=2)$rpss # we provide the clim.forecast; = to Rpss.specs +Rpss.easy.noclim<-veriApply("EnsRpss", fcst=anom.hind, obs=anom.rean, prob=my.prob, tdim=1, ensdim=2)$rpss # we provide the clim.forecast; = to Rpss.specs + +FairRpss<-round(1-(FairRps/FairRps.clim),4) +#FairRpss.specs<-round(FairRpss(ens=ens,ens.ref=ens.clim,obs=obs)$rpss,4) # check it once to see if it is the same as FairRpss (yes) +#FairRpss.easy<-veriApply("FairRpss", fcst=anom.hind, fcst.ref=anom.hind.clim, obs=anom.rean, prob=c(1/3,2/3), tdim=1, ensdim=2)$rps +#FairRpss.easy.noclim<-round(veriApply("FairRpss", fcst=anom.hind, obs=anom.rean, prob=c(1/3,2/3), tdim=1, ensdim=2)$rpss,4) # using their climatological forecast +#cat(FairRps.clim.noclim<-Rps/(1-FairRpss.easy.noclim)) # sale igual a ~0.55; é diverso sia da FairRps.clim che da FairRps.clim per N-> +oo (0.444) + +cat(paste0('n.memb: ' ,M,' n.yrs: ',N,' : ',FairRps,' : ' ,FairRps.clim,' FairRpss: ',FairRpss,'\n')) + +#cat(paste0('n.memb: ' ,M,' n.yrs: ',N,' : ',FairRps,' : ' ,FairRps.clim,' FairRpss: ',FairRpss,'\n')) + +#cat(paste0('n.memb: ' ,M,' n.yrs: ',N,' : ',Rps,' : ' ,Rps.clim,' Rpss: ',Rpss,'\n','n.memb: ' ,M,' n.yrs: ',N,' : ',FairRps,' : ' ,FairRps.clim,' FairRpss: ',FairRpss,'\n')) + +#cat(paste0('n.memb: ' ,M,' n.yrs: ',N,' : ',Rps,' : ' ,Rps.clim,' Rpss: ',Rpss,' Rpss.easy: ',Rpss.easy.noclim,'\n','n.memb: ' ,M,' n.yrs: ',N,' #: ',FairRps,' : ' ,FairRps.clim,' FairRpss: ',FairRpss,' FairRpss.easy: ',FairRpss.easy.noclim)) + +} + +# Crpss: + +#Crps<-round(mean(EnsCrps(ens,obs)),4) +#FairCrps<-round(mean(FairCrps(ens,obs)),4) + +#cat(Crps.clim<-round(mean(EnsCrps(ens.clim,obs)),4)) +#FairCrps.clim<-round(mean(FairCrps(ens.clim,obs)),4) + +#Crpss<-round(1-(Crps/Crps.clim),4) +#Crpss.specs<-round(EnsCrpss(ens=ens, ens.ref=ens.clim, obs=obs)$rpss,4) + +#FairCrpss<-round(1-(FairCrps/FairCrps.clim),4) +#FairCrpss.specs<-round(FairCrpss(ens=ens,ens.ref=ens.clim,obs=obs)$rpss,4) + +} + + +################################################################################## +# Other analysis # +################################################################################## + +my.startdates=1 #c(1:9) # select the startdates (weeks) you want to merge together + +#tm<-toyarray(dims=c(32,64),N=20,nens=4) # in the number of startdates in case of forecasts or the number of years in case of hindcasts +#str(tm) # tm$fcst: [32,64,20,4] tm$obs: [32,64,20] +# +#f.corr <- veriApply("EnsCorr", fcst=tm$fcst, obs=tm$obs) +#f.me <- veriApply('EnsMe', tm$fcst, tm$obs) +#f.crps <- veriApply("FairCrps", fcst=tm$fcst, obs=tm$obs) +#str(f.crps) # [32,64,20] + +#cst <- array(rnorm(prod(4:8)), 4:8) +#obs <- array(rnorm(prod(4:7)), 4:7) +#f.me <- veriApply('EnsMe', fcst=fcst, obs=obs) +#dim(f.me) +#fcst2 <- array(aperm(fcst, c(5,1,4,2,3)), c(8, 4, 7, 5*6)) # very interesting use of aperm not only to swap dimensions, but also to merge two dimensions in the same one! +#obs2 <- array(aperm(obs, c(1,4,2,3)), c(4,7,5*6)) +#f.me2 <- veriApply('EnsMe', fcst=fcst2, obs=obs2, tdim=3, ensdim=1) +#dim(f.me2) + +# when you have for each leadtime all the 52 weeks of year 2014 stored in a map, +# you can plot the time serie for a particular point and leadtime: +pos.lat<-which(round(la,3)==round(my.lat,3)) +pos.lon<-which(round(lo,3)==round(my.lon,3)) + +x11() +plot(1:week,EnMeanCorr[,leadtime],type="b", + main=paste0("Weekly time serie of point with lat=",round(my.lat,2),", lon=",round(my.lon,2)), + xlab="week",ylab="Ensemble Mean Corr.",ylim=c(-1,1)) + + + +### check if 4 time steps are better than one: + +yrs.hind<-yr2.hind-yr1.hind+1 +my.point.obs<-array(NA,c(yrs.hind,28)) + +# load obs.data for the four time steps: +for(year in yr1.hind:yr2.hind){ + data_erai_6h <- Load(var=var.name, exp = 'ERAint6h', + obs = NULL, sdates=paste0(year,'01'), leadtimemin = 61, + leadtimemax = 88, output = 'lonlat', configfile = file_path, grid=my.grid,method='distance-weighted') + + my.point.obs[year-yr1.hind+1,]<-data_erai_6h$mod[1,1,1,,65,633] +} + +utm0<-c(1,5,9,13,17,21,25) +utm6<-utm0+1 +utm12<-utm0+2 +utm18<-utm0+3 +my.points.obs.weekly.mean.utm0<-rowMeans(my.point.obs[,utm0]) +my.points.obs.weekly.mean.utm6<-rowMeans(my.point.obs[,utm6]) +my.points.obs.weekly.mean.utm12<-rowMeans(my.point.obs[,utm12]) +my.points.obs.weekly.mean.utm18<-rowMeans(my.point.obs[,utm18]) +my.points.obs.weekly.mean.all<-rowMeans(my.point.obs) + +cor(my.points.exp.weekly.mean.all,my.points.obs.weekly.mean.all,use="complete.obs") + +my.points.obs.weekly.mean.utm<-c(my.points.obs.weekly.mean.utm0,my.points.obs.weekly.mean.utm6,my.points.obs.weekly.mean.utm12,my.points.obs.weekly.mean.utm18) +my.points.exp.weekly.mean.utm<-c(my.points.exp.weekly.mean.utm0,my.points.exp.weekly.mean.utm6,my.points.exp.weekly.mean.utm12,my.points.exp.weekly.mean.utm18) +cor(my.points.exp.weekly.mean.utm,my.points.obs.weekly.mean.utm,use="complete.obs") + + +##### Calculate Reanalysis climatology loading only 1 file each 4 (it's quick but it needs to have all files beforehand): + +ss<-c(seq(1,52,by=4),50,51,52) # take only 1 startdate each 4, more the last 3 startdates that must be computed individually because they are not a complete set of 4 + +for(startdate in ss){ # the startdate day of 2014 to load in the sdates weekly sequence along with the same startdates of the previous 20 years + data.ERAI <- Load(var=var.name, exp = 'ERAintEnsWeek', + obs = NULL, sdates=paste0(yr1.hind:yr2.hind,substr(sdates.seq[startdate],5,6),substr(sdates.seq[startdate],7,8)), + nleadtime = 4, leadtimemin = 1, leadtimemax = 4, output = 'lonlat', configfile = file_path, grid=my.grid, method='distance-weighted') + + clim.ERAI[startdate,,,]<-apply(data.ERAI$mod,c(1,2,4,5,6),mean)[1,1,,,] # average for each leadtime and pixel + + # the intermediate startdate between startdate week i and startdate week i+4 are repeated: + if(startdate <= 49){ + clim.ERAI[startdate+1,1,,]<- clim.ERAI[startdate,2,,] # startdate leadtime (week) + clim.ERAI[startdate+1,2,,]<- clim.ERAI[startdate,3,,] # (week) 1 2 3 4 + clim.ERAI[startdate+2,1,,]<- clim.ERAI[startdate,3,,] # 1 a b c d <- only startdate 1 and 5 are necessary to calculate all startdates between 1 and 5 + clim.ERAI[startdate+1,3,,]<- clim.ERAI[startdate,4,,] # 2 b c d e + clim.ERAI[startdate+2,2,,]<- clim.ERAI[startdate,4,,] # 3 c d e f + clim.ERAI[startdate+3,1,,]<- clim.ERAI[startdate,4,,] # 4 d e f g + } # 5 e f g h + + if(startdate > 1 && startdate <=49){ # fill the previous startdates: + clim.ERAI[startdate-1,4,,]<- clim.ERAI[startdate,3,,] + clim.ERAI[startdate-1,3,,]<- clim.ERAI[startdate,2,,] + clim.ERAI[startdate-2,4,,]<- clim.ERAI[startdate,2,,] + clim.ERAI[startdate-1,2,,]<- clim.ERAI[startdate,1,,] + clim.ERAI[startdate-2,3,,]<- clim.ERAI[startdate,1,,] + clim.ERAI[startdate-3,4,,]<- clim.ERAI[startdate,1,,] + } + +} + + + + + # You can make cor much faster by stripping away all the error checking code and calling the internal c function directly: + # r <- apply(m1, 1, function(x) { apply(m2, 1, function(y) { cor(x,y) })}) + # r2 <- apply(m1, 1, function(x) { apply(m2, 1, function(y) {.Internal(cor(x, y, 4L, FALSE)) })}) + C_cor<-get("C_cor", asNamespace("stats")) + test<-.Call(C_cor, x=rnorm(100,1), y=rnorm(100,1), na.method=2, FALSE) + my.EnsCorr.chunk[i,j,k]<-.Call(C_cor, x=anom.hindcast.mean.chunk[,i,j,k], y=nom.rean.chunk[,i,j,k], na.method=2, FALSE) + + + +####################################################################################### +# Raster Animations # +####################################################################################### + +library(png) +library(animation) + + +clustPNG <- function(pngobj, nclust = 3){ + + j <- pngobj + # If there is an alpha component remove it... + # might be better to just adjust the mat matrix + if(dim(j)[3] > 3){ + j <- j[,,1:3] + } + k <- apply(j, c(1,2), c) + mat <- matrix(unlist(k), ncol = 3, byrow = TRUE) + grd <- expand.grid(1:dim(j)[1], 1:dim(j)[2]) + clust <- kmeans(mat, nclust, iter.max = 20) + new <- clust$centers[clust$cluster,] + + for(i in 1:3){ + tmp <- matrix(new[,i], nrow = dim(j)[1]) + j[,,i] <- tmp + } + + plot.new() + rasterImage(j, 0, 0, 1, 1, interpolate = FALSE) +} + +makeAni <- function(file, n = 10){ + j <- readPNG(file) + for(i in 1:n){ + clustPNG(j, i) + mtext(paste("Number of clusters:", i)) + } + + j <- readPNG(file) + plot.new() + rasterImage(j, 0, 0, 1, 1, interpolate = FALSE) + mtext("True Picture") +} + +file <- "~/Dropbox/Photos/ClassyDogCropPng.png" +n <- 20 +saveGIF(makeAni(file, n), movie.name = "tmp.gif", interval = c(rep(1,n), 5)) + + + + +####################################################################################### +# ProgressBar # +####################################################################################### + +for(i in 1:10) { + Sys.sleep(0.2) + # Dirk says using cat() like this is naughty ;-) + #cat(i,"\r") + # So you can use message() like this, thanks to Sharpie's + # comment to use appendLF=FALSE. + message(i,"\r",appendLF=FALSE) + flush.console() +} + + +for(i in 1:10) { + Sys.sleep(0.2) + # Dirk says using cat() like this is naughty ;-) + cat(i,"\r") + + +} + + + +####################################################################################### +# Load large datasets with ff # +####################################################################################### + +library(ff) +my.scratch<-"/scratch/Earth/ncortesi/myBigData" +anom.hind<-as.ff(anom.hind, vmode="double", file=my.scratch) +ffsave(anom.hind,file=my.scratch) +close(anom.hind);rm(anom.hind) + +ffload(file=my.scratch, list=c(anom.hind)) +open(anom.hind) +my.SkillScore <- veriApplyBig("FairRpss", fcst=anom.hind, obs=anom.rean, tdim=2, ensdim=1, prob=c(1/3,1/3)) +close(anom.hind);rm(anom.hind) + + + +BigDataPath <- "/scratch/Earth/ncortesi/myBigData" +source('/home/Earth/ncortesi/Downloads/RESILIENCE/veriApplyBig.R') + +anom.hind.dim<-c(5,3,1,256,512) +anom.hind<-array(rnorm(prod(anom.hind.dim)),anom.hind.dim) # doesn't fit in memory +save.big(array=anom.hind, path=BigDataPath) + +veriApplyBig <- function(, obsmy.scratch){ + ffload(file=my.scratch) + open(anom.hind) + my.SkillScore <- veriApplyBig("FairRpss", fcst=anom.hind, obs=anom.rean, tdim=2, ensdim=1, prob=c(1/3,1/3)) + close(anom.hind) +} + + +anom.hind<-as.ff(anom.hind, vmode="double", file=my.scratch) +ffsave(anom.hind, file=my.scratch) + +my.scratch<-"/scratch/Earth/ncortesi/myBigData" + +anom.hind<-as.ff(anom.hind, vmode="double", file=my.scratch) +ffsave(anom.hind, file=my.scratch) +close(anom.hind); rm(anom.hind) + +ffload(file=my.scratch, list=c(anom.hind)) +open(anom.hind) +my.SkillScore <- veriApplyBig("FairRpss", fcst=anom.hind, obs=anom.rean, tdim=2, ensdim=1, prob=c(1/3,1/3)) +close(anom.hind); rm(anom.hind) + +anom.hind.dim<-c(51,30,1,256,51) +anom.hind<-ff(rnorm(prod(anom.hind.dim)), dim=anom.hind.dim, vmode="double", filename="/scratch/Earth/ncortesi/anomhind") +str(anom.hind) +dim(anom.hind) + +anom.hind.dim<-c(51,30,1,256,51) +anom.hind<-array(rnorm(prod(anom.hind.dim)),anom.hind.dim) # doesn't fit in memory +anom.hind<-as.ff(anom.hind, vmode="double", file="/scratch/Earth/ncortesi/myBigData") +str(anom.hind) +dim(anom.hind) + +ffsave(anom.hind,file="/scratch/Earth/ncortesi/anomhind") +#ffinfo(file="/scratch/Earth/ncortesi/anomhind") +close(anom.hind) +#delete(anom.hind) +#rm(anom.hind) + +ffload(file="/scratch/Earth/ncortesi/anomhind") + +anom.rean<-array(rnorm(prod(anom.hind.dim[-1])), anom.hind.dim[-1]) # doesn't fit in memory + +open(anom.hind) +my.SkillScore <- veriApplyBig("FairRpss", fcst=anom.hind, obs=anom.rean, tdim=2, ensdim=1, prob=c(1/3,1/3)) +fcst<-anom.hind +obs<-anom.rean + +close(anom.hind) +rm(anom.hind) + +#ffdrop(file="/scratch/Earth/ncortesi/anomhind") + +a<-ffapply(X=anom.hind.big, MARGIN=c(1,3,4,5), AFUN=mean, RETURN=TRUE) +a<-ffapply(X=anom.hind.big, MARGIN=c(1,3,4,5), AFUN=function(x) sample(x, replace=TRUE), CFUN="list", RETURN=TRUE) + + +pred_Cal_cross_ff <- as.ff(pred_Cal_cross, vmode='double', file=fileoutput_cross) +ffsave(pred_Cal_cross_ff, file=paste0(fileoutput_cross)) +delete(pred_Cal_cross_ff) + +assign("pred_Cal_cross_ff", as.ff(pred_Cal_cross, vmode='double', file=fileoutput_cross)) +ffsave(pred_Cal_cross_ff, file=paste0(fileoutput_cross)) +delete(pred_Cal_cross_ff) +rm(pred_Cal_cross_ff) + +assign("pred_Cal_cross_ff", as.ff(pred_Cal_cross, vmode='double', file=fileoutput_cross)) +a<-get("pred_Cal_cross_ff") # get() works well in this case but not in the row below! (you must use do.call, see WT_drivers_v2.R) +ffsave(get("pred_Cal_cross_ff"), file=paste0(fileoutput_cross)) +delete(pred_Cal_cross_ff) +rm(pred_Cal_cross_ff) + + + +######################################################################################### +# Calculate and save the FairRpss and/or the FairCrpss for a chosen period using ff # +######################################################################################### + +bigdata=FALSE # if TRUE, compute the Rpss and the Crpss using the package ff (storing arrays in the disk instead than in the RAM) to remove the memory limit + +for(month in veri.month){ + month=1 # for debug + my.startdates <- which(as.integer(substr(sdates.seq,5,6)) == month) #startdates.monthly[[month]] #c(1:5) # select the startdates (weeks) you want to compute + startdate.name=my.month[month] # name given to the period of the selected startdates, i.e:"January" + n.yrs <- length(my.startdates)*n.yrs.hind # number of total years for the selected month + print(paste0("Computing Skill Scores for ",startdate.name)) + + if(!boot) anom.rean.big<-array(NA, dim=c(n.yrs, n.leadtimes, n.lat, n.lon)) # reanalysis data is not converted to ff because it is a small array + + if(boot) mod<-1 else mod=0 # in case of the boostrap, anom.hind.big includes also the reanalysis data as additional last member + + if(bigdata) anom.hind.big <- ff(NA, dim=c(n.members + mod, n.yrs, n.leadtimes, n.lat, n.lon), vmode="double", filename=paste0(workdir,"/anomhindbig.ff")) + if(!bigdata) anom.hind.big <- array(NA, dim=c(n.members + mod, n.yrs, n.leadtimes, n.lat, n.lon)) + + for(startdate in my.startdates){ + + pos.startdate<-which(startdate==my.startdates) # count of the number of stardates already loaded +1 + my.time.interv<-(1+(pos.startdate-1)*n.yrs.hind):(pos.startdate*n.yrs.hind) # time interval where to load data + + load(paste0(workdir,'/Data_',var.name,'/anom_rean_startdate',startdate,'.RData')) + if(!boot) { + anom.rean<-drop(anom.rean) + anom.rean.big[my.time.interv,,,] <- anom.rean + } + + if(any(my.score.name=="FairRpss") || any(my.score.name=="FairCrpss")){ + load(paste0(workdir,'/Data_',var.name,'/anom_hindcast_startdate',startdate,'.RData')) # load hindcast data + anom.hindcast<-drop(anom.hindcast) + + if(!boot) anom.hind.big[,my.time.interv,,,] <- anom.hindcast + if(boot) anom.hind.big[,my.time.interv,,,] <- abind(anom.hindcast, anom.rean, along=1) + + rm(anom.hindcast, anom.rean) + } + gc() + } + + if(any(my.score.name=="FairRpss" || my.score.name=="FairCrpss")){ + if(!boot) { + if(any(my.score.name=="FairRpss")) my.FairRpss <- veriApplyBig("FairRpss", fcst=anom.hind.big, obs=anom.rean.big, + prob=my.prob, tdim=2, ensdim=1, parallel=TRUE, ncpus=n.cpus) + if(any(my.score.name=="FairCrpss")) my.FairCrpss <- veriApplyBig("FairCrpss", fcst=anom.hind.big, obs=anom.rean.big, tdim=2, ensdim=1, parallel=TRUE, ncpus=n.cpus) + + } else { + # bootstrapping: + my.FairRpssBoot<-my.FairCrpssBoot<-array(NA, c(n.boot, n.leadtimes, n.lat, n.lon)) + if(!bigdata) save(anom.hind.big, file=paste0(workdir,"/anom_hind_big.RData")) # save the anomalies to free memory + + for(b in 1:n.boot){ + print(paste0("resampling n. ",b,"/",n.boot)) + yrs.sampled <- sample(1:n.yrs, replace=TRUE) + + if(bigdata) anom.hind.big.sampled <- ff(NA, dim=c(n.members+1, n.yrs, n.leadtimes, n.lat, n.lon), vmode="double", filename=paste0(workdir,"/anomhindbigsampled")) + if(!bigdata) anom.hind.big.sampled <- array(NA, dim=c(n.members+1, n.yrs, n.leadtimes, n.lat, n.lon)) + + if(!bigdata && b>1) load(paste0(workdir,"/anom_hind_big.RData")) # need to load the hindcasts if b>1 + + for(y in 1:n.yrs) anom.hind.big.sampled[,y,,,] <- anom.hind.big[,yrs.sampled[y],,,] # with ff takes 53s x 100 ~1h!!! (without, only 4s x 100 ~6m) + + if(!bigdata) rm(anom.hind.big); gc() # free memory for the following commands + + # faster way that will be able to replace the above one when ffapply output parameters wll be improved: + #a<-ffapply(X=anom.hind.big, MARGIN=c(3,4,5), AFUN=function(x) my.sample(x,n.members+1, replace=TRUE), CFUN="list", RETURN=TRUE, FF_RETURN=TRUE) # takes 3m only + #aa<-abind(a[1:10], along=4) # it doesn't fit into memory! + + if(any(my.score.name=="FairRpss")) my.FairRpssBoot[b,,,] <- veriApplyBig("FairRpss", fcst=anom.hind.big.sampled[1:n.members,,,,], + obs=anom.hind.big.sampled[n.members+1,,,,], + prob=my.prob, tdim=2, ensdim=1, parallel=TRUE, ncpus=n.cpus) #$rpss + + if(any(my.score.name=="FairCrpss")) my.FairCrpssBoot[b,,,] <- veriApplyBig("FairCrpss", fcst=anom.hind.big.sampled[1:n.members,,,,], + obs=anom.hind.big.sampled[n.members+1,,,,], + tdim=2, ensdim=1, parallel=TRUE, ncpus=n.cpus) #$crpss + + if(bigdata) delete(anom.hind.big.sampled) + rm(anom.hind.big.sampled) # delete the sampled hindcast + + } + + # calculate 5 and 95 percentiles of skill scores: + if(any(my.score.name=="FairRpss")) my.FairRpssConf <- apply(my.FairRpssBoot, c(2,3,4), function(x) quantile(x, probs=c(0.05,0.95), type=8)) + if(any(my.score.name=="FairCrpss")) my.FairCrpssConf <- quantile(my.FairCrpssBoot, probs=c(0.05,0.95), type=8) + + } # close if on boot + + } + + if(bigdata) delete(anom.hind.big) # delete the file with the hindcasts + rm(anom.hind.big) + rm(anom.rean.big) + gc() + + if(!boot){ + if(any(my.score.name=="FairRpss")) save(my.FairRpss, file=paste0(workdir,'/Data_',var.name,'/FairRpss_',startdate.name,'.RData')) + if(any(my.score.name=="FairCrpss")) save(my.FairCrpss, file=paste0(workdir,'/Data_',var.name,'/FairCrpss_',startdate.name,'.RData')) + } else { + if(any(my.score.name=="FairRpss")) save(my.FairRpssBoot, my.FairRpssConf, file=paste0(workdir,'/Data_',var.name,'/FairRpssBoot_',startdate.name,'.RData')) + if(any(my.score.name=="FairCrpss")) save(my.FairCrpssBoot, my.FairCrpssConf, file=paste0(workdir,'/Data_',var.name,'/FairCrpssBoot_',startdate.name,'.RData')) + } + + + if(is.object(my.FairRpss)) rm(my.FairRpss); if(is.object(my.FairCrpss)) rm(my.FairCrpss) + if(is.object(my.FairRpssBoot)) rm(my.FairRpssBoot); if(is.object(my.FairCrpssBoot)) rm(my.FairCrpssBoot) + if(is.object(my.FairRpssConf)) rm(my.FairRpssConf); if(is.object(my.FairCrpssConf)) rm(my.FairCrpssConf) + +} # next m (month) + + +######################################################################################### +# Calculate and save the FairRpss for a chosen period using ff and Load() # +######################################################################################### + + anom.rean.big<-array(NA, dim=c(length(my.startdates)*n.yrs.hind, n.leadtimes, n.lat, n.lon)) + anom.hind.big <- ff(NA, dim=c(n.members,length(my.startdates)*n.yrs.hind, n.leadtimes, n.lat, n.lon), vmode="double", filename="/scratch/Earth/anomhindbig") + + for(startdate in my.startdates){ + pos.startdate<-which(startdate==my.startdates) # count of the number of stardates already loaded+1 + my.time.interv<-(1+(pos.startdate-1)*n.yrs.hind):(pos.startdate*n.yrs.hind) # time interval where to load data + + #load(paste0(workdir,'/Data_',var.name,'/anom_rean_startdate',startdate,'.RData')) + #anom.rean<-drop(anom.rean) + #anom.rean.big[my.time.interv,,,] <- anom.rean + + my.date <- paste0(yr1.hind:yr2.hind,substr(sdates.seq[startdate],5,6),substr(sdates.seq[startdate],7,8)) + anom.rean <- Load(var=var.name, exp = 'EnsEcmwfWeekHind', + obs = NULL, sdates=my.date, nleadtime = n.leadtimes, leadtimemin = 1, leadtimemax = n.leadtimes, + output = 'lonlat', configfile = file_path, method='distance-weighted' ) + + #load(paste0(workdir,'/Data_',var.name,'/anom_hindcast_startdate',startdate,'.RData')) # load hindcast data + #anom.hindcast<-drop(anom.hindcast) + + anom.hind.big[,my.time.interv,,,] <- anom.hindcast + rm(anom.hindcast) + } + + my.FairRpss <- veriApplyBig("FairRpss", fcst=anom.hind.big, obs=anom.rean.big, prob=my.prob, tdim=2, ensdim=1, parallel=TRUE, ncpus=n.cpus) + + save(my.FairRpss, file=paste0(workdir,'/Data_',var.name,'/FairRpss_',startdate.name,'.RData')) + + +######################################################################################### +# Alternative way to speed up Load() # +######################################################################################### + +ERAint <- list(path = '/esnas/reconstructions/ecmwf/eraint/$STORE_FREQ$_mean/$VAR_NAME$_f6h/$VAR_NAME$_$YEAR$$MONTH$.nc') + var.rean=ERAint # daily reanalysis dataset used for the var data; it can have a different resolution of the MSLP dataset + var.name='sfcWind' #'tas' #'prlr' # any daily variable we want to find if it is WTs-driven + var <- Load(var = var.name, exp = NULL, obs = list(var.rean), paste0(y,'0101'), storefreq = 'daily', leadtimemax = 1, output = 'lonlat') + + # Forecast system list: + S4 <- list(path = ...) + CFSv2 <- list(path = ...) + + # Reanalysis list: + ERAint <- list(path = '/esnas/reconstructions/ecmwf/eraint/$STORE_FREQ$_mean/$VAR_NAME$_f6h/$VAR_NAME$_$YEAR$$MONTH$.nc') + JRA55 <- list(path = ...) + + + + + # Select one forecast system and one reanalysis (put NULL if you don't need to load any experiment/observation): + exp.source <- NULL + obs.source <- list(path = '/esnas/reconstructions/ecmwf/eraint/$STORE_FREQ$_mean/$VAR_NAME$_f6h/$VAR_NAME$_$YEAR$$MONTH$.nc') + + # Select one variable and its frequency: + var.name <- 'sfcWind' + store.freq <- 'daily' + + # Extract only the directory path of the forecast and reanalysis: + obs.source2 <- gsub("\\$STORE_FREQ\\$", store.freq, obs.source$path) + obs.source3 <- gsub("\\$VAR_NAME\\$", var.name, obs.source2) + obs.source4 <- strsplit(obs.source3,'/') + obs.source5 <- paste0(obs.source4[[1]][-length(obs.source4[[1]])], collapse="/") + + obs.source6 <- list.files(path = obs.source5) + + system(paste0("ncks -O -h -d longitude,5,6 ", '/esnas/reconstructions/ecmwf/eraint/',STORE_FREQ,'_mean/',VAR_NAME,'_f6h')) + system("ncks -O -h -d longitude,5,6 sfcWind_201405.nc ~/test.nc") + + y <- 1990 + var <- Load(var = VAR_NAME, exp = list(exp.source), obs = list(obs.source), sdates = paste0(y,'0101'), storefreq = store.freq, leadtimemax = 1, output = 'lonlat') + + +} # close if on mare + + diff --git a/old/SkillScores_map_v2.R b/old/SkillScores_map_v2.R new file mode 100644 index 0000000000000000000000000000000000000000..a11b6f7c0fb007aa002d60d76a41b0cf25636141 --- /dev/null +++ b/old/SkillScores_map_v2.R @@ -0,0 +1,974 @@ +########################################################################################### +# Visualize and save the score maps for one score and period # +########################################################################################### +# run it only after computing and saving all the skill score data: + +rm(list=ls()) + +########################################################################################## +# Load functions and libraries # +########################################################################################## + +library(SpecsVerification) # for skill scores +library(s2dverification) # for Load() function +library(abind) # for merging arrays +source('/home/Earth/ncortesi/scripts/Rfunctions.R') +source('/home/Earth/ncortesi/Downloads/ESS/general/hatching.R') +########################################################################################## +# User's settings # +########################################################################################## +#if(!mare) {source('/home/Earth/ncortesi/Downloads/scripts/Rfunctions.R')} else {source('/gpfs/projects/bsc32/bsc32842/scripts/Rfunctions.R')} + +# working dir where to put the output maps and files in the /Data and /Maps subdirs: +work.dir <- "/scratch/Earth/ncortesi/RESILIENCE/Subestacional" + +var.name <- 'sfcWind' # forecast variable. It is the name used inside the netCDF files and as suffix in map filenames, i.e: 'sfcWind' or 'psl' + +my.pvalue <- 0.05 # in case of computing the EnsCorr, set the p_value level to show in the ensemble mean correlation maps + +# coordinates of a chosen point in the world to plot the time series over the 52 maps (they must be the same values in objects la y lo) +#my.lat <- 55.3197120 +#my.lon <- 0.5625 + +my.months=1:12 #9:12 # choose the month(s) to plot and save + +# DY European areas: (in the format: lon.min /lon.max / lat.min /lat.max. lon.min MUST be a NEGATIVE number, long values MUST be from 0 and 360, and +# lat values MUST be from 90 to -90 degrees) + +# all the World: +area0 <- c(-180,180,-90,90) +# Northern Europe: +area1 <- c(-15,45,45,75) +# Southwestern Europe: +area2 <- c(-15,20,35,45) +# Southeastern Europe: +area3 <- c(20,45,35,45) +# All Europe: +area4 <- c(-15,15,35,75) + +# Choose one region between those defined above: +area <- area0 + +# position of long values of Europe only (without the Atlantic Sea and America): +#if(fields.name == "ERA-Interim") EU <- c(1:which(lon >= lon.max)[1], (length(lon)-30):length(lon)) # restrict area to continental europe only +lon.min <- 360 + area[1] +lon.max <- area[2] +lat.min <- area[3] +lat.max <- area[4] + +# choose one or more skills scores to visualize between EnsCorr, FairRPSS, FairCRPSS and/or RelDiagr: +my.score.name <- c('FairRpss','FairCrpss','EnsCorr','RelDiagr') + +col <- c("#0C046E", "#1914BE", "#2341F7", "#2F55FB", "#3E64FF", "#528CFF", "#64AAFF", "#82C8FF", "#A0DCFF", "#B4F0FF", "#FFFBAF", "#FFDD9A", "#FFBF87", "#FFA173", "#FF7055", "#FE6346", "#F7403B", "#E92D36", "#C80F1E", "#A50519") + +########################################################################################## +# visualize worldwide monthly maps of skill scores # +########################################################################################## + +for(month in my.months){ + #month=1 # for the debug + + load(file=paste0(work.dir,'/',var.name,'_',my.month[month],'.RData')) + + for(my.score.name.map in my.score.name){ + #my.score.name.map='EnsCorr' # for the debug + + if(my.score.name.map=='FairRpss') my.score<-my.FairRpss.chunk + if(my.score.name.map=='FairCrpss') my.score<-my.FairCrpss.chunk + if(my.score.name.map=="EnsCorr") my.score<-my.EnsCorr.chunk + if(my.score.name.map=="RelDiagr") my.score<-my.RelDiagr + + # old green-red palette: + # brk.rpss<-c(-1,seq(0,1,by=0.05)) + # col.rpss<-c("darkred",colorRampPalette(c("red","white","darkgreen"))(length(brk.rpss)-2)) + # brk.rps<-c(seq(0,1,by=0.1),10) + # col.rps<-c(colorRampPalette(c("darkgreen","white","red"))(length(brk.rps)-2),"darkred") + # if(my.score.name.map==my.Rps | my.score==my.Rps.clim) {my.brk<-brk.rps} else {my.brk<-brk.rpss} + # if(my.score.name.map==my.Rps | my.score==my.Rps.clim) {my.col<-col.rps} else {my.col<-col.rpss} + + brk.rpss<-c(seq(-1,1,by=0.1)) + col.rpss<-colorRampPalette(col)(length(brk.rpss)-1) + + my.brk<-brk.rpss # at present all breaks and colors are the same, so there is no need to differenciate between indexes + my.col<-col.rpss + + for(lead in 1:n.leadtimes){ + + #my.title<-paste0(my.score.name.map,' of ',cfs.name,' + #10m Wind Speed for ',startdate.name.map,'. Forecast time ',leadtime.week[lead],'.') + + if(my.score.name.map!="RelDiagr"){ + + #postscript(file=paste0(workdir,'/Maps_',var.name,'/',my.score.name.map,'_',startdate.name.map,'_Forecast_time_',leadtime.week[lead],'_',var.name,'.ps'), + # paper="special",width=12,height=7,horizontal=F) + + #layout(matrix(c(1,2), 1, 2, byrow = TRUE),widths=c(11,1.5)) + #par(oma=c(0,0,4,0),mar=c(0,0,0,0)) + + #n.lonr <- n.lon - ceiling(length(lons[lons<180 & lons > 0])) + #hatching(latsr,c(lons[c((nlonr+1):nlon)]-360,lons[1:nlonr]), my.Pvalue.rev, dens = 12, ang = 45, col_line = '#252525', lwd_size = 0.5, crosshatching = FALSE) + #hatching(lats,c(lons[c((n.lonr+1):n.lon)]-360,lons[1:n.lonr]), my.Pvalue.rev, dens = 12, ang = 45, col_line = '#252525', lwd_size = 0.5, crosshatching = FALSE) + n.lonr <- n.lon - ceiling(length(lons[lons<180 & lons > 0])) + #PlotEquiMap(my.score[lead,,][n.lat:1,c((n.lonr+1):n.lon,1:n.lonr)], lons,lats ,title_scale=0.8, brks=my.brk, cols=my.col ,axelab=F, filled.continents=FALSE, drawleg=F) + # lat values must be in decreasing order from +90 to -90 degrees and long from 0 to 360 degrees: + + png(file=paste0(work.dir,'/',var.name,'/',var.name,'_',my.score.name.map,'_',my.month[month],'_leadtime_',lead,'.png'),width=1000,height=600) + + # Map: + par(fig=c(0,0.92,0,0.89), new=TRUE) + lons2 <- c(lons[300:512],lons[1:299]) + PlotEquiMap2(my.score[lead,,], lons2, lats ,title_scale, brks=my.brk, cols=my.col ,axelab=F, filled.continents=FALSE, drawleg=F) + + if(my.score.name.map=="EnsCorr") my.PValue.rev <- my.EnsCorr.pvalue < 0.05 + if(my.score.name.map=="FairRpss") my.PValue.rev <- my.FairRpss.pvalue > 0.05 + if(my.score.name.map=="FairCrpss") my.PValue.rev <- my.FairCrpss.pvalue > 0.05 + + # Draw the significance diagonal lines: + pv <- aperm(my.PValue.rev, c(1,3,2)) + + #hatching(lats,lons, pv[lead,,], dens = 12, ang = 45, col_line = '#252525', lwd_size = 0.5, crosshatching = FALSE) + + # Map title: + par(fig=c(0,1,0.81,0.91), new=TRUE) + my.title<-paste0(my.score.name.map,' of ',cfs.name,'\n ', var.name.map,' for ',my.month[month],'. Forecast time ',leadtime.week[lead],'.') + mtext(my.title, cex=2.2) + + # Color legend: + par(fig=c(0.92,1,0,0.9), new=TRUE) + ColorBar(my.brk, cols=my.col, vert=T, label_scale=1.2) + + ### arrange lat/ lon in my.PValue (a second variable in the EnsCorr file) to start from +90 degr. (lat) and from 0 degr (lon): + ### my.PValue.rev[lead,,] <- my.PValue[i,n.lat:1,c((n.lonr+1):n.lon,1:n.lonr)] + + dev.off() + + } # close if on !RelDiagr + + if(my.score.name.map == "RelDiagr") { + + png(file=paste0(workdir,'/Maps_',var.name,'/RelDiagr_',startdate.name.map,'_Forecast_time_',leadtime.week[lead],'_',var.name,'.png'),width=600,height=600) + + my.title<-paste0('Reliability Diagram of ',cfs.name,'\n ',var.name.map,' for ',startdate.name.map,'. Forecast time ',leadtime.week[lead],'.') + + plot(c(0,1),c(0,1),type="l",xlab="Forecast Frequency",ylab="Observed Frequency", col='gray30', main=my.title) + lines(my.score[[lead]]$p.avgs[!is.na(my.score[[lead]]$p.avgs)],my.score[[lead]]$cond.prob[!is.na(my.score[[lead]]$cond.prob)],type="o", pch=16, col="blue") + lines(my.score[[n.leadtimes+lead]]$p.avgs[!is.na(my.score[[n.leadtimes+lead]]$p.avgs)],my.score[[n.leadtimes+lead]]$cond.prob[!is.na(my.score[[n.leadtimes+lead]]$cond.prob)],type="o",pch=16,col="darkgreen") + lines(my.score[[2*n.leadtimes+lead]]$p.avgs[!is.na(my.score[[2*n.leadtimes+lead]]$p.avgs)], my.score[[2*n.leadtimes+lead]]$cond.prob[!is.na(my.score[[2*n.leadtimes+lead]]$cond.prob)],type="o", pch=16, col="red") + legend("bottomright", legend=c('Lower Tercile','Upper Tercile'), lty=c(1,1), lwd=c(2.5,2.5), col=c('blue','red')) + + dev.off() + } + + + } # next lead + + } # next score + +} # next month + +############################################################################# +# Visualize summary tables for a region # +############################################################################# +array.EnsCorr <- array.FairRpss <- array.FairCrpss <- array(NA,c(12,4)) + +area.lon <- c(1:which(lons >= lon.max)[1], (which(lons >= lon.min)[1]:length(lons))) # restrict area to continental europe only +area.lat <- c(which(lats <= lat.max)[1]:(which(lats <= lat.min)[1]-1)) # restrict area to continental europe only + +for(month in 1:12){ + + load(file=paste0(work.dir,'/',var.name,'_',my.month[month],'.RData')) + + for(l in 0:3){ + #load(file=paste0(work.dir,"/",fields.name,"_",my.period[p],"_leadmonth_",l,"_","ClusterNames",".RData")) # load cluster names and summarized data + array.EnsCorr[month, 1+l] <- mean(my.EnsCorr.chunk[1+l, area.lat, area.lon], na.rm=T) + array.FairRpss[month, 1+l] <- mean(my.FairRpss.chunk[1+l, area.lat, area.lon], na.rm=T) + array.FairCrpss[month, 1+l] <- mean(my.FairCrpss.chunk[1+l, area.lat, area.lon], na.rm=T) + } +} + + +#png(filename=paste0(work.dir,"/",forecast.name,"_summary_impact_",var.name[var.num],".png"), width=550, height=400) +png(file=paste0(work.dir,'/',var.name,'/Summary_',var.name,'.png'),width=700,height=600) + +my.cols <- rev(c('#b2182b','#d6604d','#f4a582','#fddbc7','#d1e5f0','#92c5de','#4393c3','#2166ac')) +my.seq <- c(-1,-0.6,-0.4,-0.2,0,0.2,0.4,0.6,1) + +# create an array similar to array.pers but with colors instead of frequencies: +plot.new() + +par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) +mtext("Subseasonal sfcWind 1994-2013", cex=1.8) + +par(mar=c(0,0,4,0), fig=c(0.07, 0.25, 0.8, 0.98), new=TRUE) +mtext("EnsCorr", cex=1.5) + +par(mar=c(1,4,1,0), fig=c(0, 0.25, 0.1, 0.9), new=TRUE) +plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,3.2), ann=F) +mtext(side = 1, text = "Leadtime", line = 2, cex=1.2) +mtext(side = 2, text = "Startdate", line = 2, cex=1.2) +axis(1, at=seq(0,3), las=1, cex.axis=1, labels=0:3, mgp=c(3,0.7,0)) +axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + +array.colors <- array(my.cols[8],c(12,4)) +array.colors[array.EnsCorr < my.seq[8]] <- my.cols[7] +array.colors[array.EnsCorr < my.seq[7]] <- my.cols[6] +array.colors[array.EnsCorr < my.seq[6]] <- my.cols[5] +array.colors[array.EnsCorr < my.seq[5]] <- my.cols[4] +array.colors[array.EnsCorr < my.seq[4]] <- my.cols[3] +array.colors[array.EnsCorr < my.seq[3]] <- my.cols[2] +array.colors[array.EnsCorr < my.seq[2]] <- my.cols[1] + +for(p in 1:12){ for(l in 0:3){ polygon(c(0.5+l-1, 0.5+l-1, 1.5+l-1, 1.5+l-1), c(-0.5+p, 0.5+p, 0.5+p, -0.5+p), col=array.colors[p,1+l]) }} + +par(mar=c(0,0,4,0), fig=c(0.30, 0.55, 0.8, 0.98), new=TRUE) +mtext("FairRPSS", cex=1.5) + +par(mar=c(1,4,1,0), fig=c(0.30, 0.55, 0.1, 0.9), new=TRUE) +plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,3.2), ann=F) +mtext(side = 1, text = "Leadtime", line = 2, cex=1.2) +mtext(side = 2, text = "Startdate", line = 2, cex=1.2) +axis(1, at=seq(0,3), las=1, cex.axis=1, labels=0:3, mgp=c(3,0.7,0)) +axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + +array.colors <- array(my.cols[8],c(12,4)) +array.colors[array.FairRpss < my.seq[8]] <- my.cols[7] +array.colors[array.FairRpss < my.seq[7]] <- my.cols[6] +array.colors[array.FairRpss < my.seq[6]] <- my.cols[5] +array.colors[array.FairRpss < my.seq[5]] <- my.cols[4] +array.colors[array.FairRpss < my.seq[4]] <- my.cols[3] +array.colors[array.FairRpss < my.seq[3]] <- my.cols[2] +array.colors[array.FairRpss < my.seq[2]] <- my.cols[1] + +for(p in 1:12){ for(l in 0:3){ polygon(c(0.5+l-1, 0.5+l-1, 1.5+l-1, 1.5+l-1), c(-0.5+p, 0.5+p, 0.5+p, -0.5+p), col=array.colors[p,1+l]) }} + +par(mar=c(0,0,4,0), fig=c(0.60, 0.85, 0.8, 0.98), new=TRUE) +mtext("FairCRPSS", cex=1.5) + +par(mar=c(1,4,1,0), fig=c(0.60, 0.85, 0.1, 0.9), new=TRUE) +plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,3.2), ann=F) +mtext(side = 1, text = "Leadtime", line = 2, cex=1.2) +mtext(side = 2, text = "Startdate", line = 2, cex=1.2) +axis(1, at=seq(0,3), las=1, cex.axis=1, labels=0:3, mgp=c(3,0.7,0)) +axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + +array.colors <- array(my.cols[8],c(12,4)) +array.colors[array.FairCrpss < my.seq[8]] <- my.cols[7] +array.colors[array.FairCrpss < my.seq[7]] <- my.cols[6] +array.colors[array.FairCrpss < my.seq[6]] <- my.cols[5] +array.colors[array.FairCrpss < my.seq[5]] <- my.cols[4] +array.colors[array.FairCrpss < my.seq[4]] <- my.cols[3] +array.colors[array.FairCrpss < my.seq[3]] <- my.cols[2] +array.colors[array.FairCrpss < my.seq[2]] <- my.cols[1] + +for(p in 1:12){ for(l in 0:3){ polygon(c(0.5+l-1, 0.5+l-1, 1.5+l-1, 1.5+l-1), c(-0.5+p, 0.5+p, 0.5+p, -0.5+p), col=array.colors[p,1+l]) }} +#for(p in 1:12){ for(l in 0:3){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.colors[p,1+l])}} + +par(fig=c(0.88, 1, 0.1, 0.9), new=TRUE) +#ColorBar2(brks=my.brks, cols=my.cols, vert=FALSE, cex=legend1.cex, label.dist=2, my.ticks=-0.5 + 1:length(my.brks), my.labels=my.brks) +ColorBar2(brks = my.seq, cols = my.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + +dev.off() + + + + +############################################################################################### +# Composite map of the four skill scores # +############################################################################################### + +# run it only when you have all the 4 skill scores for each month: + +my.months=1:12 # choose the month(s) to plot and save + +for(month in my.months){ + + startdate.name.map<-my.month[month] # i.e:"January" + + #postscript(file=paste0(workdir,'/Maps_',var.name,'/SkillScores_',startdate.name.map,'_',var.name,'.ps'), paper="special",width=12,height=7,horizontal=F) + + jpeg(file=paste0(workdir,'/Maps_',var.name,'/SkillScores_',startdate.name.map,'_',var.name,'.jpg'), width=4000, height=2400, quality=100) + + layout(matrix(1:16, 4, 4, byrow=FALSE), widths=c(1.8,1.8,1.8,1.2), heights=c(1,1,1,1)) + #layout.show(16) + + for(score in 1:4){ + for(lead in 1:n.leadtimes){ + + img<-readJPEG(paste0(workdir,'/Maps_',var.name,'/',my.score.name[score],'_',startdate.name.map,'_Forecast_time_',leadtime.week[lead],'_',var.name,'.jpg')) + par(mar = rep(0, 4), xaxs='i', yaxs='i' ) + plot.new() + rasterImage(img, 0, 0, 1, 1, interpolate=FALSE) + + } # next lead + + } # next score + + dev.off() + +} # next month + + + + + + + + + + + + + + + + + + + + + +# Dani Map script for the small figure: +#/home/Earth/vtorralb/R/scripts/Veronica_2014/TimeSeries/PlotAno_mod_obs.R + +############################################################################################### +# Preformatting: calculate weekly climatologies and anomalies for each forecast startdate # +############################################################################################### +# +# finish!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! +# +# the only difference is that anomalies are computed using climatologies from the hincast data, +# and not from the forecast data. + +# Run it only once at the beginning: + +file_path='/home/Earth/ngonzal2/R/ConfFiles/IC3.conf' # Load() file path +#file_path <- '/home/Earth/ncortesi/Downloads/RESILIENCE/IC3.conf' + +my.startdates=1:52 # choose a sequence of forecast startdates to save their anomalies + +for(startdate in my.startdates){ # the startdate week to load inside the sdates weekly sequence: + print(paste0('Computing anomalies for startdate ', which(startdate==my.startdates),'/',length(my.startdates))) + + data.hindcast <- Load(var=var.name, exp = 'EnsEcmwfWeekHind', + obs = NULL, sdates=paste0(yr1.hind:yr2.hind,substr(sdates.seq[startdate],5,6),substr(sdates.seq[startdate],7,8)), + nleadtime = n.leadtimes, leadtimemin = 1, leadtimemax = n.leadtimes, output = 'lonlat', configfile = file_path, method='distance-weighted' ) + + # dim(data.hindcast$mod) # [1,4,20,4,320,640] + + # Mean of the 4 members of the Hindcasts and of the 20 years (for each point and leadtime); + clim.hindcast<-apply(data.hindcast$mod,c(1,4,5,6),mean)[1,,,] # average for each leadtime and pixel + clim.hindcast<-InsertDim(InsertDim(InsertDim(clim.hindcast,1,n.yrs.hind),1,n.members),1,1) # repeat the same values and times to calc. anomalies + + anom.hindcast <- data.hindcast$mod - clim.hindcast + + rm(data.hindcast,clim.hindcast) + gc() + + # average the anomalies over the 4 hindcast members [1,20,4,320,640] + anom.hindcast.mean<-apply(anom.hindcast,c(1,3,4,5,6),mean) + + my.grid<-paste0('r',n.lon,'x',n.lat) + data.rean <- Load(var=var.name, exp = 'ERAintEnsWeek', + obs = NULL, sdates=paste0(yr1.hind:yr2.hind,substr(sdates.seq[startdate],5,6),substr(sdates.seq[startdate],7,8)), + nleadtime = n.leadtimes, leadtimemin = 1, leadtimemax = n.leadtimes, output = 'lonlat', configfile = file_path, grid=my.grid, method='distance-weighted') + # dim(data.rean$mod) # [1,1,20,4,320,640] + + # Mean of the 20 years (for each point and leadtime) + clim.rean<-apply(data.rean$mod,c(1,2,4,5,6),mean)[1,1,,,] # average for each leadtime and pixel + clim.rean<-InsertDim(InsertDim(InsertDim(clim.rean,1,n.yrs.hind),1,1),1,1) # repeat the same values times to be able to calculate the anomalies #[1,1,20,4,320,640] + + anom.rean <- data.rean$mod - clim.rean + + anom.rean<-InsertDim(anom.rean[1,1,,,,],1,1) # [1,20,4,320,640] + + #save(anom.hindcast.mean,anom.rean,file=paste0(workdir,'/Data/anom_for_ACC_startdate',startdate,'.RData')) + + save(anom.hindcast,file=paste0(workdir,'/Data/anom_hindcast_startdate',startdate,'.RData')) + save(anom.hindcast.mean,file=paste0(workdir,'/Data/anom_hindcast_mean_startdate',startdate,'.RData')) + save(anom.rean,file=paste0(workdir,'/Data/anom_rean_startdate',startdate,'.RData')) + save(clim.rean,file=paste0(workdir,'/Data/clim_rean_startdate',startdate,'.RData')) + + rm(data.rean,anom.hindcast,anom.hindcast.mean,clim.rean,anom.rean) + gc() + +} + + + + + + + +################################################################################## +# Toy Model # +################################################################################## + +library(s2dverification) +library(SpecsVerification) +library(easyVerification) + +my.prob=c(1/3,2/3) # quantiles used for FairRPSS and the RelDiagr (usually they are the terciles) + +N=80 # number of years +M=14 # number of members + +for(M in c(2:10,20,51,100,200)){ + +for(N in c(5,10,15,20,25,30,40,50,100,200,500,1000,2000,5000,10000,20000,50000,100000)){ + +anom.rean<-rnorm(N) +anom.hind<-array(rnorm(N*M),c(N,M)) +#quantile(anom.hind,prob=my.prob,type=8) + +obs<-convert2prob(anom.rean,prob<-my.prob) +#mean(obs[,1]) # check if it is equal to 0.333 for K=3 + +ens<-convert2prob(anom.hind,prob<-my.prob) + +# climatological forecast: +anom.hind.clim<-array(sample(anom.hind,N*M),c(N,M)) # extract a random sample with no replacement of the hindcast +ens.clim<-convert2prob(anom.hind.clim,prob<-my.prob) +#mean(ens.clim[,1]) # check if it is equal to M/3 (1.333 for M=4) + +# alternative definition of climatological forecast, based on observations instead (as applied by veriApply via convert2prob when option fcst.ref is missing): +#anom.rean.clim<-ClimEns(anom.rean) # create a climatological ensemble from a vector of observations (anom.rean) with the leave-one-out cross validation +anom.rean.clim<-t(array(anom.rean,c(N,N))) # function used by convert2prob when ref is not specified; it is ~ equal to the above function ClimEns, it only has a column more +ens.clim.rean<-convert2prob(anom.rean.clim,prob<-c(1/3,2/3)) # create a new prob.table based on the climatological ensemble of observations + +p<-count2prob(ens) # we should add the option type=4, in not the sum for each year is not 100%!!! +y<-count2prob(obs) # we should add the option type=4, in not the sum for each year is not 100%!!! +ReliabilityDiagram(p[,1], y[,1], bins=10, nboot=0, plot=TRUE, cons.prob=c(0.05, 0.85),plot.refin=F) + +### computation of verification scores: + +Rps<-round(mean(EnsRps(ens,obs)),4) +FairRps<-round(mean(FairRps(ens,obs)),4) +#Rps.easy<-round(mean(veriApply("EnsRps", fcst=anom.hind, obs=anom.rean, prob=my.prob, tdim=1, ensdim=2)),4) # check once to see if it is the same value of Rps +#FairRps.easy<-round(mean(veriApply("FairRps", fcst=anom.hind, obs=anom.rean, prob=my.prob, tdim=1, ensdim=2)),4) # check once to see if it is the same value of FairRps + +Rps.clim<-round(mean(EnsRps(ens.clim,obs)),4) +FairRps.clim<-round(mean(FairRps(ens.clim,obs)),4) +FairRps.clim.rean<-round(mean(FairRps(ens.clim.rean,obs)),4) + +#FairRps.clim.stable<-0.44444444 +#Rps.clim.easy<-round(mean(veriApply("EnsRps", fcst=anom.hind.clim, obs=anom.rean, prob=my.prob, tdim=1, ensdim=2)),4) # check once to see if it is the same value of Rps.clim +#FairRps.clim.easy<-round(mean(veriApply("FairRps", fcst=anom.hind.clim, obs=anom.rean, prob=my.prob, tdim=1, ensdim=2)),4) # check once to see if it is = to FairRps.clim + +Rpss<-round(1-(Rps/Rps.clim),4) +#Rpss.specs<-round(EnsRpss(ens=ens, ens.ref=ens.clim, obs=obs)$rpss,4) # check it once to see if it is the same as Rpss (yes) +Rpss.easy<-veriApply("EnsRpss", fcst=anom.hind, fcst.ref=anom.hind.clim, obs=anom.rean, prob=my.prob, tdim=1, ensdim=2)$rpss # we provide the clim.forecast; = to Rpss.specs +system.time(Rpss.easy.noclim<-round(veriApply("EnsRpss", fcst=anom.hind, obs=anom.rean, prob=my.prob, tdim=1, ensdim=2, parallel=FALSE, ncpus=5)$rpss,4)) # using their climatological forecast +#cat(Rps.clim.noclim<-Rps/(1-Rpss.easy.noclim)) # sale igual a ~0.45 per ogni hindcast; é diverso sia da Rps.clim che da Rps.clim per N-> +oo (0.555) + +Rpss.easy<-veriApply("EnsRpss", fcst=anom.hind, fcst.ref=anom.rean.clim, obs=anom.rean, prob=my.prob, tdim=1, ensdim=2)$rpss # we provide the clim.forecast; = to Rpss.specs +Rpss.easy.noclim<-veriApply("EnsRpss", fcst=anom.hind, obs=anom.rean, prob=my.prob, tdim=1, ensdim=2)$rpss # we provide the clim.forecast; = to Rpss.specs + +FairRpss<-round(1-(FairRps/FairRps.clim),4) +#FairRpss.specs<-round(FairRpss(ens=ens,ens.ref=ens.clim,obs=obs)$rpss,4) # check it once to see if it is the same as FairRpss (yes) +#FairRpss.easy<-veriApply("FairRpss", fcst=anom.hind, fcst.ref=anom.hind.clim, obs=anom.rean, prob=c(1/3,2/3), tdim=1, ensdim=2)$rps +#FairRpss.easy.noclim<-round(veriApply("FairRpss", fcst=anom.hind, obs=anom.rean, prob=c(1/3,2/3), tdim=1, ensdim=2)$rpss,4) # using their climatological forecast +#cat(FairRps.clim.noclim<-Rps/(1-FairRpss.easy.noclim)) # sale igual a ~0.55; é diverso sia da FairRps.clim che da FairRps.clim per N-> +oo (0.444) + +cat(paste0('n.memb: ' ,M,' n.yrs: ',N,' : ',FairRps,' : ' ,FairRps.clim,' FairRpss: ',FairRpss,'\n')) + +#cat(paste0('n.memb: ' ,M,' n.yrs: ',N,' : ',FairRps,' : ' ,FairRps.clim,' FairRpss: ',FairRpss,'\n')) + +#cat(paste0('n.memb: ' ,M,' n.yrs: ',N,' : ',Rps,' : ' ,Rps.clim,' Rpss: ',Rpss,'\n','n.memb: ' ,M,' n.yrs: ',N,' : ',FairRps,' : ' ,FairRps.clim,' FairRpss: ',FairRpss,'\n')) + +#cat(paste0('n.memb: ' ,M,' n.yrs: ',N,' : ',Rps,' : ' ,Rps.clim,' Rpss: ',Rpss,' Rpss.easy: ',Rpss.easy.noclim,'\n','n.memb: ' ,M,' n.yrs: ',N,' #: ',FairRps,' : ' ,FairRps.clim,' FairRpss: ',FairRpss,' FairRpss.easy: ',FairRpss.easy.noclim)) + +} + +# Crpss: + +#Crps<-round(mean(EnsCrps(ens,obs)),4) +#FairCrps<-round(mean(FairCrps(ens,obs)),4) + +#cat(Crps.clim<-round(mean(EnsCrps(ens.clim,obs)),4)) +#FairCrps.clim<-round(mean(FairCrps(ens.clim,obs)),4) + +#Crpss<-round(1-(Crps/Crps.clim),4) +#Crpss.specs<-round(EnsCrpss(ens=ens, ens.ref=ens.clim, obs=obs)$rpss,4) + +#FairCrpss<-round(1-(FairCrps/FairCrps.clim),4) +#FairCrpss.specs<-round(FairCrpss(ens=ens,ens.ref=ens.clim,obs=obs)$rpss,4) + +} + + +################################################################################## +# Other analysis # +################################################################################## + +my.startdates=1 #c(1:9) # select the startdates (weeks) you want to merge together + +#tm<-toyarray(dims=c(32,64),N=20,nens=4) # in the number of startdates in case of forecasts or the number of years in case of hindcasts +#str(tm) # tm$fcst: [32,64,20,4] tm$obs: [32,64,20] +# +#f.corr <- veriApply("EnsCorr", fcst=tm$fcst, obs=tm$obs) +#f.me <- veriApply('EnsMe', tm$fcst, tm$obs) +#f.crps <- veriApply("FairCrps", fcst=tm$fcst, obs=tm$obs) +#str(f.crps) # [32,64,20] + +#cst <- array(rnorm(prod(4:8)), 4:8) +#obs <- array(rnorm(prod(4:7)), 4:7) +#f.me <- veriApply('EnsMe', fcst=fcst, obs=obs) +#dim(f.me) +#fcst2 <- array(aperm(fcst, c(5,1,4,2,3)), c(8, 4, 7, 5*6)) # very interesting use of aperm not only to swap dimensions, but also to merge two dimensions in the same one! +#obs2 <- array(aperm(obs, c(1,4,2,3)), c(4,7,5*6)) +#f.me2 <- veriApply('EnsMe', fcst=fcst2, obs=obs2, tdim=3, ensdim=1) +#dim(f.me2) + +# when you have for each leadtime all the 52 weeks of year 2014 stored in a map, +# you can plot the time serie for a particular point and leadtime: +pos.lat<-which(round(la,3)==round(my.lat,3)) +pos.lon<-which(round(lo,3)==round(my.lon,3)) + +x11() +plot(1:week,EnMeanCorr[,leadtime],type="b", + main=paste0("Weekly time serie of point with lat=",round(my.lat,2),", lon=",round(my.lon,2)), + xlab="week",ylab="Ensemble Mean Corr.",ylim=c(-1,1)) + + + +### check if 4 time steps are better than one: + +yrs.hind<-yr2.hind-yr1.hind+1 +my.point.obs<-array(NA,c(yrs.hind,28)) + +# load obs.data for the four time steps: +for(year in yr1.hind:yr2.hind){ + data_erai_6h <- Load(var=var.name, exp = 'ERAint6h', + obs = NULL, sdates=paste0(year,'01'), leadtimemin = 61, + leadtimemax = 88, output = 'lonlat', configfile = file_path, grid=my.grid,method='distance-weighted') + + my.point.obs[year-yr1.hind+1,]<-data_erai_6h$mod[1,1,1,,65,633] +} + +utm0<-c(1,5,9,13,17,21,25) +utm6<-utm0+1 +utm12<-utm0+2 +utm18<-utm0+3 +my.points.obs.weekly.mean.utm0<-rowMeans(my.point.obs[,utm0]) +my.points.obs.weekly.mean.utm6<-rowMeans(my.point.obs[,utm6]) +my.points.obs.weekly.mean.utm12<-rowMeans(my.point.obs[,utm12]) +my.points.obs.weekly.mean.utm18<-rowMeans(my.point.obs[,utm18]) +my.points.obs.weekly.mean.all<-rowMeans(my.point.obs) + +cor(my.points.exp.weekly.mean.all,my.points.obs.weekly.mean.all,use="complete.obs") + +my.points.obs.weekly.mean.utm<-c(my.points.obs.weekly.mean.utm0,my.points.obs.weekly.mean.utm6,my.points.obs.weekly.mean.utm12,my.points.obs.weekly.mean.utm18) +my.points.exp.weekly.mean.utm<-c(my.points.exp.weekly.mean.utm0,my.points.exp.weekly.mean.utm6,my.points.exp.weekly.mean.utm12,my.points.exp.weekly.mean.utm18) +cor(my.points.exp.weekly.mean.utm,my.points.obs.weekly.mean.utm,use="complete.obs") + + +##### Calculate Reanalysis climatology loading only 1 file each 4 (it's quick but it needs to have all files beforehand): + +ss<-c(seq(1,52,by=4),50,51,52) # take only 1 startdate each 4, more the last 3 startdates that must be computed individually because they are not a complete set of 4 + +for(startdate in ss){ # the startdate day of 2014 to load in the sdates weekly sequence along with the same startdates of the previous 20 years + data.ERAI <- Load(var=var.name, exp = 'ERAintEnsWeek', + obs = NULL, sdates=paste0(yr1.hind:yr2.hind,substr(sdates.seq[startdate],5,6),substr(sdates.seq[startdate],7,8)), + nleadtime = 4, leadtimemin = 1, leadtimemax = 4, output = 'lonlat', configfile = file_path, grid=my.grid, method='distance-weighted') + + clim.ERAI[startdate,,,]<-apply(data.ERAI$mod,c(1,2,4,5,6),mean)[1,1,,,] # average for each leadtime and pixel + + # the intermediate startdate between startdate week i and startdate week i+4 are repeated: + if(startdate <= 49){ + clim.ERAI[startdate+1,1,,]<- clim.ERAI[startdate,2,,] # startdate leadtime (week) + clim.ERAI[startdate+1,2,,]<- clim.ERAI[startdate,3,,] # (week) 1 2 3 4 + clim.ERAI[startdate+2,1,,]<- clim.ERAI[startdate,3,,] # 1 a b c d <- only startdate 1 and 5 are necessary to calculate all startdates between 1 and 5 + clim.ERAI[startdate+1,3,,]<- clim.ERAI[startdate,4,,] # 2 b c d e + clim.ERAI[startdate+2,2,,]<- clim.ERAI[startdate,4,,] # 3 c d e f + clim.ERAI[startdate+3,1,,]<- clim.ERAI[startdate,4,,] # 4 d e f g + } # 5 e f g h + + if(startdate > 1 && startdate <=49){ # fill the previous startdates: + clim.ERAI[startdate-1,4,,]<- clim.ERAI[startdate,3,,] + clim.ERAI[startdate-1,3,,]<- clim.ERAI[startdate,2,,] + clim.ERAI[startdate-2,4,,]<- clim.ERAI[startdate,2,,] + clim.ERAI[startdate-1,2,,]<- clim.ERAI[startdate,1,,] + clim.ERAI[startdate-2,3,,]<- clim.ERAI[startdate,1,,] + clim.ERAI[startdate-3,4,,]<- clim.ERAI[startdate,1,,] + } + +} + + + + + # You can make cor much faster by stripping away all the error checking code and calling the internal c function directly: + # r <- apply(m1, 1, function(x) { apply(m2, 1, function(y) { cor(x,y) })}) + # r2 <- apply(m1, 1, function(x) { apply(m2, 1, function(y) {.Internal(cor(x, y, 4L, FALSE)) })}) + C_cor<-get("C_cor", asNamespace("stats")) + test<-.Call(C_cor, x=rnorm(100,1), y=rnorm(100,1), na.method=2, FALSE) + my.EnsCorr.chunk[i,j,k]<-.Call(C_cor, x=anom.hindcast.mean.chunk[,i,j,k], y=nom.rean.chunk[,i,j,k], na.method=2, FALSE) + + + +####################################################################################### +# Raster Animations # +####################################################################################### + +library(png) +library(animation) + + +clustPNG <- function(pngobj, nclust = 3){ + + j <- pngobj + # If there is an alpha component remove it... + # might be better to just adjust the mat matrix + if(dim(j)[3] > 3){ + j <- j[,,1:3] + } + k <- apply(j, c(1,2), c) + mat <- matrix(unlist(k), ncol = 3, byrow = TRUE) + grd <- expand.grid(1:dim(j)[1], 1:dim(j)[2]) + clust <- kmeans(mat, nclust, iter.max = 20) + new <- clust$centers[clust$cluster,] + + for(i in 1:3){ + tmp <- matrix(new[,i], nrow = dim(j)[1]) + j[,,i] <- tmp + } + + plot.new() + rasterImage(j, 0, 0, 1, 1, interpolate = FALSE) +} + +makeAni <- function(file, n = 10){ + j <- readPNG(file) + for(i in 1:n){ + clustPNG(j, i) + mtext(paste("Number of clusters:", i)) + } + + j <- readPNG(file) + plot.new() + rasterImage(j, 0, 0, 1, 1, interpolate = FALSE) + mtext("True Picture") +} + +file <- "~/Dropbox/Photos/ClassyDogCropPng.png" +n <- 20 +saveGIF(makeAni(file, n), movie.name = "tmp.gif", interval = c(rep(1,n), 5)) + + + + +####################################################################################### +# ProgressBar # +####################################################################################### + +for(i in 1:10) { + Sys.sleep(0.2) + # Dirk says using cat() like this is naughty ;-) + #cat(i,"\r") + # So you can use message() like this, thanks to Sharpie's + # comment to use appendLF=FALSE. + message(i,"\r",appendLF=FALSE) + flush.console() +} + + +for(i in 1:10) { + Sys.sleep(0.2) + # Dirk says using cat() like this is naughty ;-) + cat(i,"\r") + + +} + + + +####################################################################################### +# Load large datasets with ff # +####################################################################################### + +library(ff) +my.scratch<-"/scratch/Earth/ncortesi/myBigData" +anom.hind<-as.ff(anom.hind, vmode="double", file=my.scratch) +ffsave(anom.hind,file=my.scratch) +close(anom.hind);rm(anom.hind) + +ffload(file=my.scratch, list=c(anom.hind)) +open(anom.hind) +my.SkillScore <- veriApplyBig("FairRpss", fcst=anom.hind, obs=anom.rean, tdim=2, ensdim=1, prob=c(1/3,1/3)) +close(anom.hind);rm(anom.hind) + + + +BigDataPath <- "/scratch/Earth/ncortesi/myBigData" +source('/home/Earth/ncortesi/Downloads/RESILIENCE/veriApplyBig.R') + +anom.hind.dim<-c(5,3,1,256,512) +anom.hind<-array(rnorm(prod(anom.hind.dim)),anom.hind.dim) # doesn't fit in memory +save.big(array=anom.hind, path=BigDataPath) + +veriApplyBig <- function(, obsmy.scratch){ + ffload(file=my.scratch) + open(anom.hind) + my.SkillScore <- veriApplyBig("FairRpss", fcst=anom.hind, obs=anom.rean, tdim=2, ensdim=1, prob=c(1/3,1/3)) + close(anom.hind) +} + + +anom.hind<-as.ff(anom.hind, vmode="double", file=my.scratch) +ffsave(anom.hind, file=my.scratch) + +my.scratch<-"/scratch/Earth/ncortesi/myBigData" + +anom.hind<-as.ff(anom.hind, vmode="double", file=my.scratch) +ffsave(anom.hind, file=my.scratch) +close(anom.hind); rm(anom.hind) + +ffload(file=my.scratch, list=c(anom.hind)) +open(anom.hind) +my.SkillScore <- veriApplyBig("FairRpss", fcst=anom.hind, obs=anom.rean, tdim=2, ensdim=1, prob=c(1/3,1/3)) +close(anom.hind); rm(anom.hind) + +anom.hind.dim<-c(51,30,1,256,51) +anom.hind<-ff(rnorm(prod(anom.hind.dim)), dim=anom.hind.dim, vmode="double", filename="/scratch/Earth/ncortesi/anomhind") +str(anom.hind) +dim(anom.hind) + +anom.hind.dim<-c(51,30,1,256,51) +anom.hind<-array(rnorm(prod(anom.hind.dim)),anom.hind.dim) # doesn't fit in memory +anom.hind<-as.ff(anom.hind, vmode="double", file="/scratch/Earth/ncortesi/myBigData") +str(anom.hind) +dim(anom.hind) + +ffsave(anom.hind,file="/scratch/Earth/ncortesi/anomhind") +#ffinfo(file="/scratch/Earth/ncortesi/anomhind") +close(anom.hind) +#delete(anom.hind) +#rm(anom.hind) + +ffload(file="/scratch/Earth/ncortesi/anomhind") + +anom.rean<-array(rnorm(prod(anom.hind.dim[-1])), anom.hind.dim[-1]) # doesn't fit in memory + +open(anom.hind) +my.SkillScore <- veriApplyBig("FairRpss", fcst=anom.hind, obs=anom.rean, tdim=2, ensdim=1, prob=c(1/3,1/3)) +fcst<-anom.hind +obs<-anom.rean + +close(anom.hind) +rm(anom.hind) + +#ffdrop(file="/scratch/Earth/ncortesi/anomhind") + +a<-ffapply(X=anom.hind.big, MARGIN=c(1,3,4,5), AFUN=mean, RETURN=TRUE) +a<-ffapply(X=anom.hind.big, MARGIN=c(1,3,4,5), AFUN=function(x) sample(x, replace=TRUE), CFUN="list", RETURN=TRUE) + + +pred_Cal_cross_ff <- as.ff(pred_Cal_cross, vmode='double', file=fileoutput_cross) +ffsave(pred_Cal_cross_ff, file=paste0(fileoutput_cross)) +delete(pred_Cal_cross_ff) + +assign("pred_Cal_cross_ff", as.ff(pred_Cal_cross, vmode='double', file=fileoutput_cross)) +ffsave(pred_Cal_cross_ff, file=paste0(fileoutput_cross)) +delete(pred_Cal_cross_ff) +rm(pred_Cal_cross_ff) + +assign("pred_Cal_cross_ff", as.ff(pred_Cal_cross, vmode='double', file=fileoutput_cross)) +a<-get("pred_Cal_cross_ff") # get() works well in this case but not in the row below! (you must use do.call, see WT_drivers_v2.R) +ffsave(get("pred_Cal_cross_ff"), file=paste0(fileoutput_cross)) +delete(pred_Cal_cross_ff) +rm(pred_Cal_cross_ff) + + + +######################################################################################### +# Calculate and save the FairRpss and/or the FairCrpss for a chosen period using ff # +######################################################################################### + +bigdata=FALSE # if TRUE, compute the Rpss and the Crpss using the package ff (storing arrays in the disk instead than in the RAM) to remove the memory limit + +for(month in veri.month){ + month=1 # for debug + my.startdates <- which(as.integer(substr(sdates.seq,5,6)) == month) #startdates.monthly[[month]] #c(1:5) # select the startdates (weeks) you want to compute + startdate.name=my.month[month] # name given to the period of the selected startdates, i.e:"January" + n.yrs <- length(my.startdates)*n.yrs.hind # number of total years for the selected month + print(paste0("Computing Skill Scores for ",startdate.name)) + + if(!boot) anom.rean.big<-array(NA, dim=c(n.yrs, n.leadtimes, n.lat, n.lon)) # reanalysis data is not converted to ff because it is a small array + + if(boot) mod<-1 else mod=0 # in case of the boostrap, anom.hind.big includes also the reanalysis data as additional last member + + if(bigdata) anom.hind.big <- ff(NA, dim=c(n.members + mod, n.yrs, n.leadtimes, n.lat, n.lon), vmode="double", filename=paste0(workdir,"/anomhindbig.ff")) + if(!bigdata) anom.hind.big <- array(NA, dim=c(n.members + mod, n.yrs, n.leadtimes, n.lat, n.lon)) + + for(startdate in my.startdates){ + + pos.startdate<-which(startdate==my.startdates) # count of the number of stardates already loaded +1 + my.time.interv<-(1+(pos.startdate-1)*n.yrs.hind):(pos.startdate*n.yrs.hind) # time interval where to load data + + load(paste0(workdir,'/Data_',var.name,'/anom_rean_startdate',startdate,'.RData')) + if(!boot) { + anom.rean<-drop(anom.rean) + anom.rean.big[my.time.interv,,,] <- anom.rean + } + + if(any(my.score.name=="FairRpss") || any(my.score.name=="FairCrpss")){ + load(paste0(workdir,'/Data_',var.name,'/anom_hindcast_startdate',startdate,'.RData')) # load hindcast data + anom.hindcast<-drop(anom.hindcast) + + if(!boot) anom.hind.big[,my.time.interv,,,] <- anom.hindcast + if(boot) anom.hind.big[,my.time.interv,,,] <- abind(anom.hindcast, anom.rean, along=1) + + rm(anom.hindcast, anom.rean) + } + gc() + } + + if(any(my.score.name=="FairRpss" || my.score.name=="FairCrpss")){ + if(!boot) { + if(any(my.score.name=="FairRpss")) my.FairRpss <- veriApplyBig("FairRpss", fcst=anom.hind.big, obs=anom.rean.big, + prob=my.prob, tdim=2, ensdim=1, parallel=TRUE, ncpus=n.cpus) + if(any(my.score.name=="FairCrpss")) my.FairCrpss <- veriApplyBig("FairCrpss", fcst=anom.hind.big, obs=anom.rean.big, tdim=2, ensdim=1, parallel=TRUE, ncpus=n.cpus) + + } else { + # bootstrapping: + my.FairRpssBoot<-my.FairCrpssBoot<-array(NA, c(n.boot, n.leadtimes, n.lat, n.lon)) + if(!bigdata) save(anom.hind.big, file=paste0(workdir,"/anom_hind_big.RData")) # save the anomalies to free memory + + for(b in 1:n.boot){ + print(paste0("resampling n. ",b,"/",n.boot)) + yrs.sampled <- sample(1:n.yrs, replace=TRUE) + + if(bigdata) anom.hind.big.sampled <- ff(NA, dim=c(n.members+1, n.yrs, n.leadtimes, n.lat, n.lon), vmode="double", filename=paste0(workdir,"/anomhindbigsampled")) + if(!bigdata) anom.hind.big.sampled <- array(NA, dim=c(n.members+1, n.yrs, n.leadtimes, n.lat, n.lon)) + + if(!bigdata && b>1) load(paste0(workdir,"/anom_hind_big.RData")) # need to load the hindcasts if b>1 + + for(y in 1:n.yrs) anom.hind.big.sampled[,y,,,] <- anom.hind.big[,yrs.sampled[y],,,] # with ff takes 53s x 100 ~1h!!! (without, only 4s x 100 ~6m) + + if(!bigdata) rm(anom.hind.big); gc() # free memory for the following commands + + # faster way that will be able to replace the above one when ffapply output parameters wll be improved: + #a<-ffapply(X=anom.hind.big, MARGIN=c(3,4,5), AFUN=function(x) my.sample(x,n.members+1, replace=TRUE), CFUN="list", RETURN=TRUE, FF_RETURN=TRUE) # takes 3m only + #aa<-abind(a[1:10], along=4) # it doesn't fit into memory! + + if(any(my.score.name=="FairRpss")) my.FairRpssBoot[b,,,] <- veriApplyBig("FairRpss", fcst=anom.hind.big.sampled[1:n.members,,,,], + obs=anom.hind.big.sampled[n.members+1,,,,], + prob=my.prob, tdim=2, ensdim=1, parallel=TRUE, ncpus=n.cpus) #$rpss + + if(any(my.score.name=="FairCrpss")) my.FairCrpssBoot[b,,,] <- veriApplyBig("FairCrpss", fcst=anom.hind.big.sampled[1:n.members,,,,], + obs=anom.hind.big.sampled[n.members+1,,,,], + tdim=2, ensdim=1, parallel=TRUE, ncpus=n.cpus) #$crpss + + if(bigdata) delete(anom.hind.big.sampled) + rm(anom.hind.big.sampled) # delete the sampled hindcast + + } + + # calculate 5 and 95 percentiles of skill scores: + if(any(my.score.name=="FairRpss")) my.FairRpssConf <- apply(my.FairRpssBoot, c(2,3,4), function(x) quantile(x, probs=c(0.05,0.95), type=8)) + if(any(my.score.name=="FairCrpss")) my.FairCrpssConf <- quantile(my.FairCrpssBoot, probs=c(0.05,0.95), type=8) + + } # close if on boot + + } + + if(bigdata) delete(anom.hind.big) # delete the file with the hindcasts + rm(anom.hind.big) + rm(anom.rean.big) + gc() + + if(!boot){ + if(any(my.score.name=="FairRpss")) save(my.FairRpss, file=paste0(workdir,'/Data_',var.name,'/FairRpss_',startdate.name,'.RData')) + if(any(my.score.name=="FairCrpss")) save(my.FairCrpss, file=paste0(workdir,'/Data_',var.name,'/FairCrpss_',startdate.name,'.RData')) + } else { + if(any(my.score.name=="FairRpss")) save(my.FairRpssBoot, my.FairRpssConf, file=paste0(workdir,'/Data_',var.name,'/FairRpssBoot_',startdate.name,'.RData')) + if(any(my.score.name=="FairCrpss")) save(my.FairCrpssBoot, my.FairCrpssConf, file=paste0(workdir,'/Data_',var.name,'/FairCrpssBoot_',startdate.name,'.RData')) + } + + + if(is.object(my.FairRpss)) rm(my.FairRpss); if(is.object(my.FairCrpss)) rm(my.FairCrpss) + if(is.object(my.FairRpssBoot)) rm(my.FairRpssBoot); if(is.object(my.FairCrpssBoot)) rm(my.FairCrpssBoot) + if(is.object(my.FairRpssConf)) rm(my.FairRpssConf); if(is.object(my.FairCrpssConf)) rm(my.FairCrpssConf) + +} # next m (month) + + +######################################################################################### +# Calculate and save the FairRpss for a chosen period using ff and Load() # +######################################################################################### + + anom.rean.big<-array(NA, dim=c(length(my.startdates)*n.yrs.hind, n.leadtimes, n.lat, n.lon)) + anom.hind.big <- ff(NA, dim=c(n.members,length(my.startdates)*n.yrs.hind, n.leadtimes, n.lat, n.lon), vmode="double", filename="/scratch/Earth/anomhindbig") + + for(startdate in my.startdates){ + pos.startdate<-which(startdate==my.startdates) # count of the number of stardates already loaded+1 + my.time.interv<-(1+(pos.startdate-1)*n.yrs.hind):(pos.startdate*n.yrs.hind) # time interval where to load data + + #load(paste0(workdir,'/Data_',var.name,'/anom_rean_startdate',startdate,'.RData')) + #anom.rean<-drop(anom.rean) + #anom.rean.big[my.time.interv,,,] <- anom.rean + + my.date <- paste0(yr1.hind:yr2.hind,substr(sdates.seq[startdate],5,6),substr(sdates.seq[startdate],7,8)) + anom.rean <- Load(var=var.name, exp = 'EnsEcmwfWeekHind', + obs = NULL, sdates=my.date, nleadtime = n.leadtimes, leadtimemin = 1, leadtimemax = n.leadtimes, + output = 'lonlat', configfile = file_path, method='distance-weighted' ) + + #load(paste0(workdir,'/Data_',var.name,'/anom_hindcast_startdate',startdate,'.RData')) # load hindcast data + #anom.hindcast<-drop(anom.hindcast) + + anom.hind.big[,my.time.interv,,,] <- anom.hindcast + rm(anom.hindcast) + } + + my.FairRpss <- veriApplyBig("FairRpss", fcst=anom.hind.big, obs=anom.rean.big, prob=my.prob, tdim=2, ensdim=1, parallel=TRUE, ncpus=n.cpus) + + save(my.FairRpss, file=paste0(workdir,'/Data_',var.name,'/FairRpss_',startdate.name,'.RData')) + + +######################################################################################### +# Alternative way to speed up Load() # +######################################################################################### + +ERAint <- list(path = '/esnas/reconstructions/ecmwf/eraint/$STORE_FREQ$_mean/$VAR_NAME$_f6h/$VAR_NAME$_$YEAR$$MONTH$.nc') + var.rean=ERAint # daily reanalysis dataset used for the var data; it can have a different resolution of the MSLP dataset + var.name='sfcWind' #'tas' #'prlr' # any daily variable we want to find if it is WTs-driven + var <- Load(var = var.name, exp = NULL, obs = list(var.rean), paste0(y,'0101'), storefreq = 'daily', leadtimemax = 1, output = 'lonlat') + + # Forecast system list: + S4 <- list(path = ...) + CFSv2 <- list(path = ...) + + # Reanalysis list: + ERAint <- list(path = '/esnas/reconstructions/ecmwf/eraint/$STORE_FREQ$_mean/$VAR_NAME$_f6h/$VAR_NAME$_$YEAR$$MONTH$.nc') + JRA55 <- list(path = ...) + + + + + # Select one forecast system and one reanalysis (put NULL if you don't need to load any experiment/observation): + exp.source <- NULL + obs.source <- list(path = '/esnas/reconstructions/ecmwf/eraint/$STORE_FREQ$_mean/$VAR_NAME$_f6h/$VAR_NAME$_$YEAR$$MONTH$.nc') + + # Select one variable and its frequency: + var.name <- 'sfcWind' + store.freq <- 'daily' + + # Extract only the directory path of the forecast and reanalysis: + obs.source2 <- gsub("\\$STORE_FREQ\\$", store.freq, obs.source$path) + obs.source3 <- gsub("\\$VAR_NAME\\$", var.name, obs.source2) + obs.source4 <- strsplit(obs.source3,'/') + obs.source5 <- paste0(obs.source4[[1]][-length(obs.source4[[1]])], collapse="/") + + obs.source6 <- list.files(path = obs.source5) + + system(paste0("ncks -O -h -d longitude,5,6 ", '/esnas/reconstructions/ecmwf/eraint/',STORE_FREQ,'_mean/',VAR_NAME,'_f6h')) + system("ncks -O -h -d longitude,5,6 sfcWind_201405.nc ~/test.nc") + + y <- 1990 + var <- Load(var = VAR_NAME, exp = list(exp.source), obs = list(obs.source), sdates = paste0(y,'0101'), storefreq = store.freq, leadtimemax = 1, output = 'lonlat') + + +} # close if on mare + + diff --git a/old/SkillScores_map_v2.R~ b/old/SkillScores_map_v2.R~ new file mode 100644 index 0000000000000000000000000000000000000000..918ab02af75d501edc3fe8be3ffc6df89cc5df28 --- /dev/null +++ b/old/SkillScores_map_v2.R~ @@ -0,0 +1,968 @@ +########################################################################################### +# Visualize and save the score maps for one score and period # +########################################################################################### +# run it only after computing and saving all the skill score data: + +rm(list=ls()) + +########################################################################################## +# Load functions and libraries # +########################################################################################## + +library(SpecsVerification) # for skill scores +library(s2dverification) # for Load() function +library(abind) # for merging arrays +source('/home/Earth/ncortesi/scripts/Rfunctions.R') +source('/home/Earth/ncortesi/Downloads/ESS/general/hatching.R') +########################################################################################## +# User's settings # +########################################################################################## +#if(!mare) {source('/home/Earth/ncortesi/Downloads/scripts/Rfunctions.R')} else {source('/gpfs/projects/bsc32/bsc32842/scripts/Rfunctions.R')} + +# working dir where to put the output maps and files in the /Data and /Maps subdirs: +work.dir <- "/scratch/Earth/ncortesi/RESILIENCE/Subestacional" + +var.name <- 'sfcWind' # forecast variable. It is the name used inside the netCDF files and as suffix in map filenames, i.e: 'sfcWind' or 'psl' + +my.pvalue <- 0.05 # in case of computing the EnsCorr, set the p_value level to show in the ensemble mean correlation maps + +# coordinates of a chosen point in the world to plot the time series over the 52 maps (they must be the same values in objects la y lo) +#my.lat <- 55.3197120 +#my.lon <- 0.5625 + +my.months=1:12 #9:12 # choose the month(s) to plot and save + +# DY European areas: (in the format: lon.min /lon.max / lat.min /lat.max. lon.min MUST be a NEGATIVE number, long values MUST be from 0 and 360, and +# lat values MUST be from 90 to -90 degrees) +# Northern Europe: +area1 <- c(-15,45,45,75) +# Southwestern Europe: +area2 <- c(-15,20,35,45) +# Southeastern Europe: +area3 <- c(20,45,35,45) + +# Choose one region between those defined above: +area <- area1 + +# position of long values of Europe only (without the Atlantic Sea and America): +#if(fields.name == "ERA-Interim") EU <- c(1:which(lon >= lon.max)[1], (length(lon)-30):length(lon)) # restrict area to continental europe only +lon.min <- 360 + area[1] +lon.max <- area[2] +lat.min <- area[3] +lat.max <- area[4] + +# choose one or more skills scores to visualize between EnsCorr, FairRPSS, FairCRPSS and/or RelDiagr: +my.score.name <- c('FairRpss','FairCrpss','EnsCorr','RelDiagr') + +col <- c("#0C046E", "#1914BE", "#2341F7", "#2F55FB", "#3E64FF", "#528CFF", "#64AAFF", "#82C8FF", "#A0DCFF", "#B4F0FF", "#FFFBAF", "#FFDD9A", "#FFBF87", "#FFA173", "#FF7055", "#FE6346", "#F7403B", "#E92D36", "#C80F1E", "#A50519") + +########################################################################################## +# visualize worldwide monthly maps of skill scores # +########################################################################################## + +for(month in my.months){ + #month=1 # for the debug + + for(my.score.name.map in my.score.name){ + #my.score.name.map='EnsCorr' # for the debug + + if(my.score.name.map=='FairRpss') { load(file=paste0(work.dir,'/',var.name,'/FairRpss_',my.month[month],'.RData')); my.score<-my.FairRpss.chunk } + if(my.score.name.map=='FairCrpss') { load(file=paste0(work.dir,'/',var.name,'/FairCrpss_',my.month[month],'.RData')); my.score<-my.FairCrpss.chunk } + if(my.score.name.map=="EnsCorr") { load(file=paste0(work.dir,'/',var.name,'/EnsCorr_',my.month[month],'.RData')); my.score<-my.EnsCorr.chunk } + if(my.score.name.map=="RelDiagr") { load(file=paste0(work.dir,'/',var.name,'/RelDiagr_',my.month[month],'.RData')); my.score<-my.RelDiagr } + + # old green-red palette: + # brk.rpss<-c(-1,seq(0,1,by=0.05)) + # col.rpss<-c("darkred",colorRampPalette(c("red","white","darkgreen"))(length(brk.rpss)-2)) + # brk.rps<-c(seq(0,1,by=0.1),10) + # col.rps<-c(colorRampPalette(c("darkgreen","white","red"))(length(brk.rps)-2),"darkred") + # if(my.score.name.map==my.Rps | my.score==my.Rps.clim) {my.brk<-brk.rps} else {my.brk<-brk.rpss} + # if(my.score.name.map==my.Rps | my.score==my.Rps.clim) {my.col<-col.rps} else {my.col<-col.rpss} + + brk.rpss<-c(seq(-1,1,by=0.1)) + col.rpss<-colorRampPalette(col)(length(brk.rpss)-1) + + my.brk<-brk.rpss # at present all breaks and colors are the same, so there is no need to differenciate between indexes + my.col<-col.rpss + + for(lead in 1:n.leadtimes){ + + #my.title<-paste0(my.score.name.map,' of ',cfs.name,' + #10m Wind Speed for ',startdate.name.map,'. Forecast time ',leadtime.week[lead],'.') + + if(my.score.name.map!="RelDiagr"){ + + #postscript(file=paste0(workdir,'/Maps_',var.name,'/',my.score.name.map,'_',startdate.name.map,'_Forecast_time_',leadtime.week[lead],'_',var.name,'.ps'), + # paper="special",width=12,height=7,horizontal=F) + + png(file=paste0(workdir,'/Maps_',var.name,'/',my.score.name.map,'_',my.month[month],'_Forecast_time_',leadtime.week[lead],'_',var.name,'.jpg'),width=1000,height=600) + + layout(matrix(c(1,2), 1, 2, byrow = TRUE),widths=c(11,1.2)) + par(oma=c(1,1,4,1)) + + #n.lonr <- n.lon - ceiling(length(lons[lons<180 & lons > 0])) + #hatching(latsr,c(lons[c((nlonr+1):nlon)]-360,lons[1:nlonr]), my.Pvalue.rev, dens = 12, ang = 45, col_line = '#252525', lwd_size = 0.5, crosshatching = FALSE) + #hatching(lats,c(lons[c((n.lonr+1):n.lon)]-360,lons[1:n.lonr]), my.Pvalue.rev, dens = 12, ang = 45, col_line = '#252525', lwd_size = 0.5, crosshatching = FALSE) + + #PlotEquiMap(my.score[lead,,][n.lat:1,c((n.lonr+1):n.lon,1:n.lonr)], lons,lats ,title_scale=0.8, brks=my.brk, cols=my.col ,axelab=F, filled.continents=FALSE, drawleg=F) + # lat values must be in decreasing order from +90 to -90 degrees and long from 0 to 360 degrees: + PlotEquiMap(my.score[lead,,], lons,lats ,title_scale=0.8, brks=my.brk, cols=my.col ,axelab=F, filled.continents=FALSE, drawleg=F) + my.title<-paste0(my.score.name.map,' of ',cfs.name,'\n ', var.name.map,' for ',startdate.name.map,'. Forecast time ',leadtime.week[lead],'.') + title(my.title,line=0.5,outer=T) + + ColorBar(my.brk, cols=my.col, vert=T, cex=1.4) + + if(my.score.name.map=="EnsCorr"){ #add a small grey point if the corr.is significant: + + # arrange lat/ lon in my.PValue (a second variable in the EnsCorr file) to start from +90 degr. (lat) and from 0 degr (lon): + my.PValue.rev <- my.PValue.chunk < 0.05 + #my.PValue.rev[lead,,] <- my.PValue[i,n.lat:1,c((n.lonr+1):n.lon,1:n.lonr)] + + } + #dev.off() + + if(my.score.name.map=="FairRpss"){ #add a small grey point if the corr.is significant: + # arrange lat/ lon in my.PValue (a second variable in the EnsCorr file) to start from +90 degr. (lat) and from 0 degr (lon): + my.PValue.rev <- my.FairRpss.pvalue > 0.05 + #my.PValue.rev[lead,,] <- my.PValue[i,n.lat:1,c((n.lonr+1):n.lon,1:n.lonr)] #my.Pvalue.rev <- matrix(TRUE,n.lon,n.lat) + + } + + if(my.score.name.map=="FairCrpss"){ #add a small grey point if the corr.is significant: + + # arrange lat/ lon in my.PValue (a second variable in the EnsCorr file) to start from +90 degr. (lat) and from 0 degr (lon): + my.PValue.rev <- my.FairCrpss.pvalue > 0.05 + #my.PValue.rev[lead,,] <- my.PValue[i,n.lat:1,c((n.lonr+1):n.lon,1:n.lonr)] + } + + # Draw the significance diagonal lines: + hatching(lats,lons, my.Pvalue.rev, dens = 12, ang = 45, col_line = '#252525', lwd_size = 0.5, crosshatching = FALSE) + + dev.off() + + } # close if on !RelDiagr + + if(my.score.name.map == "RelDiagr") { + + png(file=paste0(workdir,'/Maps_',var.name,'/RelDiagr_',startdate.name.map,'_Forecast_time_',leadtime.week[lead],'_',var.name,'.jpg'),width=600,height=600) + + my.title<-paste0('Reliability Diagram of ',cfs.name,'\n ',var.name.map,' for ',startdate.name.map,'. Forecast time ',leadtime.week[lead],'.') + + plot(c(0,1),c(0,1),type="l",xlab="Forecast Frequency",ylab="Observed Frequency", col='gray30', main=my.title) + lines(my.score[[lead]]$p.avgs[!is.na(my.score[[lead]]$p.avgs)],my.score[[lead]]$cond.prob[!is.na(my.score[[lead]]$cond.prob)],type="o", pch=16, col="blue") + lines(my.score[[n.leadtimes+lead]]$p.avgs[!is.na(my.score[[n.leadtimes+lead]]$p.avgs)],my.score[[n.leadtimes+lead]]$cond.prob[!is.na(my.score[[n.leadtimes+lead]]$cond.prob)],type="o",pch=16,col="darkgreen") + lines(my.score[[2*n.leadtimes+lead]]$p.avgs[!is.na(my.score[[2*n.leadtimes+lead]]$p.avgs)], my.score[[2*n.leadtimes+lead]]$cond.prob[!is.na(my.score[[2*n.leadtimes+lead]]$cond.prob)],type="o", pch=16, col="red") + legend("bottomright", legend=c('Lower Tercile','Upper Tercile'), lty=c(1,1), lwd=c(2.5,2.5), col=c('blue','red')) + + dev.off() + } + + + } # next lead + + } # next score + +} # next month + +############################################################################# +# Visualize summary tables for a region # +############################################################################# +array.EnsCorr <- array.FairRpss <- array.FairCrpss <- array(NA,c(12,4)) + +for(p in 1:12){ + + load(file=paste0(work.dir,'/',var.name,'/EnsCorr_',my.period[p],'.RData')) # for my.EnsCorr.chunk + load(file=paste0(work.dir,'/',var.name,'/FairRpss_',my.period[p],'.RData')) # for my.FairRpss.chunk + load(file=paste0(work.dir,'/',var.name,'/FairCrpss_',my.period[p],'.RData')) # for my.FairCrpss.chunk + + area.lon <- c(1:which(lons >= lon.max)[1], (which(lons >= lon.min)[1]:length(lons))) # restrict area to continental europe only + area.lat <- c(which(lats <= lat.max)[1]:(which(lats <= lat.min)[1]-1)) # restrict area to continental europe only + + for(l in 0:3){ + #load(file=paste0(work.dir,"/",fields.name,"_",my.period[p],"_leadmonth_",l,"_","ClusterNames",".RData")) # load cluster names and summarized data + array.EnsCorr[p, 1+l] <- mean(my.EnsCorr.chunk[1+l, area.lat, area.lon], na.rm=T) + array.FairRpss[p, 1+l] <- mean(my.FairRpss.chunk[1+l, area.lat, area.lon], na.rm=T) + array.FairCrpss[p, 1+l] <- mean(my.FairCrpss.chunk[1+l, area.lat, area.lon], na.rm=T) + } +} + + +# Impact summary: + +#png(filename=paste0(work.dir,"/",forecast.name,"_summary_impact_",var.name[var.num],".png"), width=550, height=400) +png(file=paste0(workdir,'/Summary_',var.name,'/',my.score.name.map,'_',startdate.name.map,'_Forecast_time_',leadtime.week[lead],'_',var.name,'.jpg'),width=1000,height=600) + +my.cols <- rev(c('#b2182b','#d6604d','#f4a582','#fddbc7','#d1e5f0','#92c5de','#4393c3','#2166ac')) +my.seq <- c(-1,0,0.4,0.5,0.6,0.7,0.8,0.9,1) + +# create an array similar to array.pers but with colors instead of frequencies: +plot.new() +par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) +mtext("Subseasonal sfcWind 1994-2013", cex=1.2) + +par(mar=c(0,0,4,0), fig=c(0.07, 0.22, 0.8, 0.98), new=TRUE) +mtext("EnsCorr", cex=1.2) +par(mar=c(1,4,1,0), fig=c(0, 0.22, 0.1, 0.8), new=TRUE) +plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,3.2), ann=F) +mtext(side = 1, text = "Lead time", line = 2, cex=1) +mtext(side = 2, text = "Forecast month", line = 2, cex=1) +axis(1, at=seq(0,3), las=1, cex.axis=1, labels=3:0, mgp=c(3,0.7,0)) +axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + +array.colors <- array(my.cols[8],c(12,4)) +array.colors[array.EnsCorr < my.seq[8]] <- my.cols[7] +array.colors[array.EnsCorr < my.seq[7]] <- my.cols[6] +array.colors[array.EnsCorr < my.seq[6]] <- my.cols[5] +array.colors[array.EnsCorr < my.seq[5]] <- my.cols[4] +array.colors[array.EnsCorr < my.seq[4]] <- my.cols[3] +array.colors[array.EnsCorr < my.seq[3]] <- my.cols[2] +array.colors[array.EnsCorr < my.seq[2]] <- my.cols[1] + +for(p in 1:12){ for(l in 0:3){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.colors[p,1+l])}} + +#for(i in 1:12){ for(j in 0:3){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.EnsCorr.colors[i2,1+3-j])}} + +par(mar=c(0,0,4,0), fig=c(0.22, 0.44, 0.8, 0.98), new=TRUE) +mtext("FairRPSS", cex=1.2) +par(mar=c(1,4,1,0), fig=c(0.22, 0.44, 0.1, 0.8), new=TRUE) +plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,3.2), ann=F) +mtext(side = 1, text = "Lead time", line = 2, cex=1) +mtext(side = 2, text = "Forecast month", line = 2, cex=1) +axis(1, at=seq(0,3), las=1, cex.axis=1, labels=3:0, mgp=c(3,0.7,0)) +axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + +array.colors <- array(my.cols[8],c(12,4)) +array.colors[array.FairRpss < my.seq[8]] <- my.cols[7] +array.colors[array.FairRpss < my.seq[7]] <- my.cols[6] +array.colors[array.FairRpss < my.seq[6]] <- my.cols[5] +array.colors[array.FairRpss < my.seq[5]] <- my.cols[4] +array.colors[array.FairRpss < my.seq[4]] <- my.cols[3] +array.colors[array.FairRpss < my.seq[3]] <- my.cols[2] +array.colors[array.FairRpss < my.seq[2]] <- my.cols[1] + +for(p in 1:12){ for(l in 0:3){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.colors[p,1+l])}} + +par(mar=c(0,0,4,0), fig=c(0.22, 0.44, 0.8, 0.98), new=TRUE) +mtext("FairCRPSS", cex=1.2) +par(mar=c(1,4,1,0), fig=c(0.22, 0.44, 0.1, 0.8), new=TRUE) +plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,3.2), ann=F) +mtext(side = 1, text = "Lead time", line = 2, cex=1) +mtext(side = 2, text = "Forecast month", line = 2, cex=1) +axis(1, at=seq(0,3), las=1, cex.axis=1, labels=3:0, mgp=c(3,0.7,0)) +axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + +array.colors <- array(my.cols[8],c(12,4)) +array.colors[array.FairCrpss < my.seq[8]] <- my.cols[7] +array.colors[array.FairCrpss < my.seq[7]] <- my.cols[6] +array.colors[array.FairCrpss < my.seq[6]] <- my.cols[5] +array.colors[array.FairCrpss < my.seq[5]] <- my.cols[4] +array.colors[array.FairCrpss < my.seq[4]] <- my.cols[3] +array.colors[array.FairCrpss < my.seq[3]] <- my.cols[2] +array.colors[array.FairCrpss < my.seq[2]] <- my.cols[1] + +for(p in 1:12){ for(l in 0:3){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.colors[p,1+l])}} + +dev.off() + + + + +############################################################################################### +# Composite map of the four skill scores # +############################################################################################### + +# run it only when you have all the 4 skill scores for each month: + +my.months=1:12 # choose the month(s) to plot and save + +for(month in my.months){ + + startdate.name.map<-my.month[month] # i.e:"January" + + #postscript(file=paste0(workdir,'/Maps_',var.name,'/SkillScores_',startdate.name.map,'_',var.name,'.ps'), paper="special",width=12,height=7,horizontal=F) + + jpeg(file=paste0(workdir,'/Maps_',var.name,'/SkillScores_',startdate.name.map,'_',var.name,'.jpg'), width=4000, height=2400, quality=100) + + layout(matrix(1:16, 4, 4, byrow=FALSE), widths=c(1.8,1.8,1.8,1.2), heights=c(1,1,1,1)) + #layout.show(16) + + for(score in 1:4){ + for(lead in 1:n.leadtimes){ + + img<-readJPEG(paste0(workdir,'/Maps_',var.name,'/',my.score.name[score],'_',startdate.name.map,'_Forecast_time_',leadtime.week[lead],'_',var.name,'.jpg')) + par(mar = rep(0, 4), xaxs='i', yaxs='i' ) + plot.new() + rasterImage(img, 0, 0, 1, 1, interpolate=FALSE) + + } # next lead + + } # next score + + dev.off() + +} # next month + + + + + + + + + + + + + + + + + + + + + +# Dani Map script for the small figure: +#/home/Earth/vtorralb/R/scripts/Veronica_2014/TimeSeries/PlotAno_mod_obs.R + +############################################################################################### +# Preformatting: calculate weekly climatologies and anomalies for each forecast startdate # +############################################################################################### +# +# finish!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! +# +# the only difference is that anomalies are computed using climatologies from the hincast data, +# and not from the forecast data. + +# Run it only once at the beginning: + +file_path='/home/Earth/ngonzal2/R/ConfFiles/IC3.conf' # Load() file path +#file_path <- '/home/Earth/ncortesi/Downloads/RESILIENCE/IC3.conf' + +my.startdates=1:52 # choose a sequence of forecast startdates to save their anomalies + +for(startdate in my.startdates){ # the startdate week to load inside the sdates weekly sequence: + print(paste0('Computing anomalies for startdate ', which(startdate==my.startdates),'/',length(my.startdates))) + + data.hindcast <- Load(var=var.name, exp = 'EnsEcmwfWeekHind', + obs = NULL, sdates=paste0(yr1.hind:yr2.hind,substr(sdates.seq[startdate],5,6),substr(sdates.seq[startdate],7,8)), + nleadtime = n.leadtimes, leadtimemin = 1, leadtimemax = n.leadtimes, output = 'lonlat', configfile = file_path, method='distance-weighted' ) + + # dim(data.hindcast$mod) # [1,4,20,4,320,640] + + # Mean of the 4 members of the Hindcasts and of the 20 years (for each point and leadtime); + clim.hindcast<-apply(data.hindcast$mod,c(1,4,5,6),mean)[1,,,] # average for each leadtime and pixel + clim.hindcast<-InsertDim(InsertDim(InsertDim(clim.hindcast,1,n.yrs.hind),1,n.members),1,1) # repeat the same values and times to calc. anomalies + + anom.hindcast <- data.hindcast$mod - clim.hindcast + + rm(data.hindcast,clim.hindcast) + gc() + + # average the anomalies over the 4 hindcast members [1,20,4,320,640] + anom.hindcast.mean<-apply(anom.hindcast,c(1,3,4,5,6),mean) + + my.grid<-paste0('r',n.lon,'x',n.lat) + data.rean <- Load(var=var.name, exp = 'ERAintEnsWeek', + obs = NULL, sdates=paste0(yr1.hind:yr2.hind,substr(sdates.seq[startdate],5,6),substr(sdates.seq[startdate],7,8)), + nleadtime = n.leadtimes, leadtimemin = 1, leadtimemax = n.leadtimes, output = 'lonlat', configfile = file_path, grid=my.grid, method='distance-weighted') + # dim(data.rean$mod) # [1,1,20,4,320,640] + + # Mean of the 20 years (for each point and leadtime) + clim.rean<-apply(data.rean$mod,c(1,2,4,5,6),mean)[1,1,,,] # average for each leadtime and pixel + clim.rean<-InsertDim(InsertDim(InsertDim(clim.rean,1,n.yrs.hind),1,1),1,1) # repeat the same values times to be able to calculate the anomalies #[1,1,20,4,320,640] + + anom.rean <- data.rean$mod - clim.rean + + anom.rean<-InsertDim(anom.rean[1,1,,,,],1,1) # [1,20,4,320,640] + + #save(anom.hindcast.mean,anom.rean,file=paste0(workdir,'/Data/anom_for_ACC_startdate',startdate,'.RData')) + + save(anom.hindcast,file=paste0(workdir,'/Data/anom_hindcast_startdate',startdate,'.RData')) + save(anom.hindcast.mean,file=paste0(workdir,'/Data/anom_hindcast_mean_startdate',startdate,'.RData')) + save(anom.rean,file=paste0(workdir,'/Data/anom_rean_startdate',startdate,'.RData')) + save(clim.rean,file=paste0(workdir,'/Data/clim_rean_startdate',startdate,'.RData')) + + rm(data.rean,anom.hindcast,anom.hindcast.mean,clim.rean,anom.rean) + gc() + +} + + + + + + + +################################################################################## +# Toy Model # +################################################################################## + +library(s2dverification) +library(SpecsVerification) +library(easyVerification) + +my.prob=c(1/3,2/3) # quantiles used for FairRPSS and the RelDiagr (usually they are the terciles) + +N=80 # number of years +M=14 # number of members + +for(M in c(2:10,20,51,100,200)){ + +for(N in c(5,10,15,20,25,30,40,50,100,200,500,1000,2000,5000,10000,20000,50000,100000)){ + +anom.rean<-rnorm(N) +anom.hind<-array(rnorm(N*M),c(N,M)) +#quantile(anom.hind,prob=my.prob,type=8) + +obs<-convert2prob(anom.rean,prob<-my.prob) +#mean(obs[,1]) # check if it is equal to 0.333 for K=3 + +ens<-convert2prob(anom.hind,prob<-my.prob) + +# climatological forecast: +anom.hind.clim<-array(sample(anom.hind,N*M),c(N,M)) # extract a random sample with no replacement of the hindcast +ens.clim<-convert2prob(anom.hind.clim,prob<-my.prob) +#mean(ens.clim[,1]) # check if it is equal to M/3 (1.333 for M=4) + +# alternative definition of climatological forecast, based on observations instead (as applied by veriApply via convert2prob when option fcst.ref is missing): +#anom.rean.clim<-ClimEns(anom.rean) # create a climatological ensemble from a vector of observations (anom.rean) with the leave-one-out cross validation +anom.rean.clim<-t(array(anom.rean,c(N,N))) # function used by convert2prob when ref is not specified; it is ~ equal to the above function ClimEns, it only has a column more +ens.clim.rean<-convert2prob(anom.rean.clim,prob<-c(1/3,2/3)) # create a new prob.table based on the climatological ensemble of observations + +p<-count2prob(ens) # we should add the option type=4, in not the sum for each year is not 100%!!! +y<-count2prob(obs) # we should add the option type=4, in not the sum for each year is not 100%!!! +ReliabilityDiagram(p[,1], y[,1], bins=10, nboot=0, plot=TRUE, cons.prob=c(0.05, 0.85),plot.refin=F) + +### computation of verification scores: + +Rps<-round(mean(EnsRps(ens,obs)),4) +FairRps<-round(mean(FairRps(ens,obs)),4) +#Rps.easy<-round(mean(veriApply("EnsRps", fcst=anom.hind, obs=anom.rean, prob=my.prob, tdim=1, ensdim=2)),4) # check once to see if it is the same value of Rps +#FairRps.easy<-round(mean(veriApply("FairRps", fcst=anom.hind, obs=anom.rean, prob=my.prob, tdim=1, ensdim=2)),4) # check once to see if it is the same value of FairRps + +Rps.clim<-round(mean(EnsRps(ens.clim,obs)),4) +FairRps.clim<-round(mean(FairRps(ens.clim,obs)),4) +FairRps.clim.rean<-round(mean(FairRps(ens.clim.rean,obs)),4) + +#FairRps.clim.stable<-0.44444444 +#Rps.clim.easy<-round(mean(veriApply("EnsRps", fcst=anom.hind.clim, obs=anom.rean, prob=my.prob, tdim=1, ensdim=2)),4) # check once to see if it is the same value of Rps.clim +#FairRps.clim.easy<-round(mean(veriApply("FairRps", fcst=anom.hind.clim, obs=anom.rean, prob=my.prob, tdim=1, ensdim=2)),4) # check once to see if it is = to FairRps.clim + +Rpss<-round(1-(Rps/Rps.clim),4) +#Rpss.specs<-round(EnsRpss(ens=ens, ens.ref=ens.clim, obs=obs)$rpss,4) # check it once to see if it is the same as Rpss (yes) +Rpss.easy<-veriApply("EnsRpss", fcst=anom.hind, fcst.ref=anom.hind.clim, obs=anom.rean, prob=my.prob, tdim=1, ensdim=2)$rpss # we provide the clim.forecast; = to Rpss.specs +system.time(Rpss.easy.noclim<-round(veriApply("EnsRpss", fcst=anom.hind, obs=anom.rean, prob=my.prob, tdim=1, ensdim=2, parallel=FALSE, ncpus=5)$rpss,4)) # using their climatological forecast +#cat(Rps.clim.noclim<-Rps/(1-Rpss.easy.noclim)) # sale igual a ~0.45 per ogni hindcast; é diverso sia da Rps.clim che da Rps.clim per N-> +oo (0.555) + +Rpss.easy<-veriApply("EnsRpss", fcst=anom.hind, fcst.ref=anom.rean.clim, obs=anom.rean, prob=my.prob, tdim=1, ensdim=2)$rpss # we provide the clim.forecast; = to Rpss.specs +Rpss.easy.noclim<-veriApply("EnsRpss", fcst=anom.hind, obs=anom.rean, prob=my.prob, tdim=1, ensdim=2)$rpss # we provide the clim.forecast; = to Rpss.specs + +FairRpss<-round(1-(FairRps/FairRps.clim),4) +#FairRpss.specs<-round(FairRpss(ens=ens,ens.ref=ens.clim,obs=obs)$rpss,4) # check it once to see if it is the same as FairRpss (yes) +#FairRpss.easy<-veriApply("FairRpss", fcst=anom.hind, fcst.ref=anom.hind.clim, obs=anom.rean, prob=c(1/3,2/3), tdim=1, ensdim=2)$rps +#FairRpss.easy.noclim<-round(veriApply("FairRpss", fcst=anom.hind, obs=anom.rean, prob=c(1/3,2/3), tdim=1, ensdim=2)$rpss,4) # using their climatological forecast +#cat(FairRps.clim.noclim<-Rps/(1-FairRpss.easy.noclim)) # sale igual a ~0.55; é diverso sia da FairRps.clim che da FairRps.clim per N-> +oo (0.444) + +cat(paste0('n.memb: ' ,M,' n.yrs: ',N,' : ',FairRps,' : ' ,FairRps.clim,' FairRpss: ',FairRpss,'\n')) + +#cat(paste0('n.memb: ' ,M,' n.yrs: ',N,' : ',FairRps,' : ' ,FairRps.clim,' FairRpss: ',FairRpss,'\n')) + +#cat(paste0('n.memb: ' ,M,' n.yrs: ',N,' : ',Rps,' : ' ,Rps.clim,' Rpss: ',Rpss,'\n','n.memb: ' ,M,' n.yrs: ',N,' : ',FairRps,' : ' ,FairRps.clim,' FairRpss: ',FairRpss,'\n')) + +#cat(paste0('n.memb: ' ,M,' n.yrs: ',N,' : ',Rps,' : ' ,Rps.clim,' Rpss: ',Rpss,' Rpss.easy: ',Rpss.easy.noclim,'\n','n.memb: ' ,M,' n.yrs: ',N,' #: ',FairRps,' : ' ,FairRps.clim,' FairRpss: ',FairRpss,' FairRpss.easy: ',FairRpss.easy.noclim)) + +} + +# Crpss: + +#Crps<-round(mean(EnsCrps(ens,obs)),4) +#FairCrps<-round(mean(FairCrps(ens,obs)),4) + +#cat(Crps.clim<-round(mean(EnsCrps(ens.clim,obs)),4)) +#FairCrps.clim<-round(mean(FairCrps(ens.clim,obs)),4) + +#Crpss<-round(1-(Crps/Crps.clim),4) +#Crpss.specs<-round(EnsCrpss(ens=ens, ens.ref=ens.clim, obs=obs)$rpss,4) + +#FairCrpss<-round(1-(FairCrps/FairCrps.clim),4) +#FairCrpss.specs<-round(FairCrpss(ens=ens,ens.ref=ens.clim,obs=obs)$rpss,4) + +} + + +################################################################################## +# Other analysis # +################################################################################## + +my.startdates=1 #c(1:9) # select the startdates (weeks) you want to merge together + +#tm<-toyarray(dims=c(32,64),N=20,nens=4) # in the number of startdates in case of forecasts or the number of years in case of hindcasts +#str(tm) # tm$fcst: [32,64,20,4] tm$obs: [32,64,20] +# +#f.corr <- veriApply("EnsCorr", fcst=tm$fcst, obs=tm$obs) +#f.me <- veriApply('EnsMe', tm$fcst, tm$obs) +#f.crps <- veriApply("FairCrps", fcst=tm$fcst, obs=tm$obs) +#str(f.crps) # [32,64,20] + +#cst <- array(rnorm(prod(4:8)), 4:8) +#obs <- array(rnorm(prod(4:7)), 4:7) +#f.me <- veriApply('EnsMe', fcst=fcst, obs=obs) +#dim(f.me) +#fcst2 <- array(aperm(fcst, c(5,1,4,2,3)), c(8, 4, 7, 5*6)) # very interesting use of aperm not only to swap dimensions, but also to merge two dimensions in the same one! +#obs2 <- array(aperm(obs, c(1,4,2,3)), c(4,7,5*6)) +#f.me2 <- veriApply('EnsMe', fcst=fcst2, obs=obs2, tdim=3, ensdim=1) +#dim(f.me2) + +# when you have for each leadtime all the 52 weeks of year 2014 stored in a map, +# you can plot the time serie for a particular point and leadtime: +pos.lat<-which(round(la,3)==round(my.lat,3)) +pos.lon<-which(round(lo,3)==round(my.lon,3)) + +x11() +plot(1:week,EnMeanCorr[,leadtime],type="b", + main=paste0("Weekly time serie of point with lat=",round(my.lat,2),", lon=",round(my.lon,2)), + xlab="week",ylab="Ensemble Mean Corr.",ylim=c(-1,1)) + + + +### check if 4 time steps are better than one: + +yrs.hind<-yr2.hind-yr1.hind+1 +my.point.obs<-array(NA,c(yrs.hind,28)) + +# load obs.data for the four time steps: +for(year in yr1.hind:yr2.hind){ + data_erai_6h <- Load(var=var.name, exp = 'ERAint6h', + obs = NULL, sdates=paste0(year,'01'), leadtimemin = 61, + leadtimemax = 88, output = 'lonlat', configfile = file_path, grid=my.grid,method='distance-weighted') + + my.point.obs[year-yr1.hind+1,]<-data_erai_6h$mod[1,1,1,,65,633] +} + +utm0<-c(1,5,9,13,17,21,25) +utm6<-utm0+1 +utm12<-utm0+2 +utm18<-utm0+3 +my.points.obs.weekly.mean.utm0<-rowMeans(my.point.obs[,utm0]) +my.points.obs.weekly.mean.utm6<-rowMeans(my.point.obs[,utm6]) +my.points.obs.weekly.mean.utm12<-rowMeans(my.point.obs[,utm12]) +my.points.obs.weekly.mean.utm18<-rowMeans(my.point.obs[,utm18]) +my.points.obs.weekly.mean.all<-rowMeans(my.point.obs) + +cor(my.points.exp.weekly.mean.all,my.points.obs.weekly.mean.all,use="complete.obs") + +my.points.obs.weekly.mean.utm<-c(my.points.obs.weekly.mean.utm0,my.points.obs.weekly.mean.utm6,my.points.obs.weekly.mean.utm12,my.points.obs.weekly.mean.utm18) +my.points.exp.weekly.mean.utm<-c(my.points.exp.weekly.mean.utm0,my.points.exp.weekly.mean.utm6,my.points.exp.weekly.mean.utm12,my.points.exp.weekly.mean.utm18) +cor(my.points.exp.weekly.mean.utm,my.points.obs.weekly.mean.utm,use="complete.obs") + + +##### Calculate Reanalysis climatology loading only 1 file each 4 (it's quick but it needs to have all files beforehand): + +ss<-c(seq(1,52,by=4),50,51,52) # take only 1 startdate each 4, more the last 3 startdates that must be computed individually because they are not a complete set of 4 + +for(startdate in ss){ # the startdate day of 2014 to load in the sdates weekly sequence along with the same startdates of the previous 20 years + data.ERAI <- Load(var=var.name, exp = 'ERAintEnsWeek', + obs = NULL, sdates=paste0(yr1.hind:yr2.hind,substr(sdates.seq[startdate],5,6),substr(sdates.seq[startdate],7,8)), + nleadtime = 4, leadtimemin = 1, leadtimemax = 4, output = 'lonlat', configfile = file_path, grid=my.grid, method='distance-weighted') + + clim.ERAI[startdate,,,]<-apply(data.ERAI$mod,c(1,2,4,5,6),mean)[1,1,,,] # average for each leadtime and pixel + + # the intermediate startdate between startdate week i and startdate week i+4 are repeated: + if(startdate <= 49){ + clim.ERAI[startdate+1,1,,]<- clim.ERAI[startdate,2,,] # startdate leadtime (week) + clim.ERAI[startdate+1,2,,]<- clim.ERAI[startdate,3,,] # (week) 1 2 3 4 + clim.ERAI[startdate+2,1,,]<- clim.ERAI[startdate,3,,] # 1 a b c d <- only startdate 1 and 5 are necessary to calculate all startdates between 1 and 5 + clim.ERAI[startdate+1,3,,]<- clim.ERAI[startdate,4,,] # 2 b c d e + clim.ERAI[startdate+2,2,,]<- clim.ERAI[startdate,4,,] # 3 c d e f + clim.ERAI[startdate+3,1,,]<- clim.ERAI[startdate,4,,] # 4 d e f g + } # 5 e f g h + + if(startdate > 1 && startdate <=49){ # fill the previous startdates: + clim.ERAI[startdate-1,4,,]<- clim.ERAI[startdate,3,,] + clim.ERAI[startdate-1,3,,]<- clim.ERAI[startdate,2,,] + clim.ERAI[startdate-2,4,,]<- clim.ERAI[startdate,2,,] + clim.ERAI[startdate-1,2,,]<- clim.ERAI[startdate,1,,] + clim.ERAI[startdate-2,3,,]<- clim.ERAI[startdate,1,,] + clim.ERAI[startdate-3,4,,]<- clim.ERAI[startdate,1,,] + } + +} + + + + + # You can make cor much faster by stripping away all the error checking code and calling the internal c function directly: + # r <- apply(m1, 1, function(x) { apply(m2, 1, function(y) { cor(x,y) })}) + # r2 <- apply(m1, 1, function(x) { apply(m2, 1, function(y) {.Internal(cor(x, y, 4L, FALSE)) })}) + C_cor<-get("C_cor", asNamespace("stats")) + test<-.Call(C_cor, x=rnorm(100,1), y=rnorm(100,1), na.method=2, FALSE) + my.EnsCorr.chunk[i,j,k]<-.Call(C_cor, x=anom.hindcast.mean.chunk[,i,j,k], y=nom.rean.chunk[,i,j,k], na.method=2, FALSE) + + + +####################################################################################### +# Raster Animations # +####################################################################################### + +library(png) +library(animation) + + +clustPNG <- function(pngobj, nclust = 3){ + + j <- pngobj + # If there is an alpha component remove it... + # might be better to just adjust the mat matrix + if(dim(j)[3] > 3){ + j <- j[,,1:3] + } + k <- apply(j, c(1,2), c) + mat <- matrix(unlist(k), ncol = 3, byrow = TRUE) + grd <- expand.grid(1:dim(j)[1], 1:dim(j)[2]) + clust <- kmeans(mat, nclust, iter.max = 20) + new <- clust$centers[clust$cluster,] + + for(i in 1:3){ + tmp <- matrix(new[,i], nrow = dim(j)[1]) + j[,,i] <- tmp + } + + plot.new() + rasterImage(j, 0, 0, 1, 1, interpolate = FALSE) +} + +makeAni <- function(file, n = 10){ + j <- readPNG(file) + for(i in 1:n){ + clustPNG(j, i) + mtext(paste("Number of clusters:", i)) + } + + j <- readPNG(file) + plot.new() + rasterImage(j, 0, 0, 1, 1, interpolate = FALSE) + mtext("True Picture") +} + +file <- "~/Dropbox/Photos/ClassyDogCropPng.png" +n <- 20 +saveGIF(makeAni(file, n), movie.name = "tmp.gif", interval = c(rep(1,n), 5)) + + + + +####################################################################################### +# ProgressBar # +####################################################################################### + +for(i in 1:10) { + Sys.sleep(0.2) + # Dirk says using cat() like this is naughty ;-) + #cat(i,"\r") + # So you can use message() like this, thanks to Sharpie's + # comment to use appendLF=FALSE. + message(i,"\r",appendLF=FALSE) + flush.console() +} + + +for(i in 1:10) { + Sys.sleep(0.2) + # Dirk says using cat() like this is naughty ;-) + cat(i,"\r") + + +} + + + +####################################################################################### +# Load large datasets with ff # +####################################################################################### + +library(ff) +my.scratch<-"/scratch/Earth/ncortesi/myBigData" +anom.hind<-as.ff(anom.hind, vmode="double", file=my.scratch) +ffsave(anom.hind,file=my.scratch) +close(anom.hind);rm(anom.hind) + +ffload(file=my.scratch, list=c(anom.hind)) +open(anom.hind) +my.SkillScore <- veriApplyBig("FairRpss", fcst=anom.hind, obs=anom.rean, tdim=2, ensdim=1, prob=c(1/3,1/3)) +close(anom.hind);rm(anom.hind) + + + +BigDataPath <- "/scratch/Earth/ncortesi/myBigData" +source('/home/Earth/ncortesi/Downloads/RESILIENCE/veriApplyBig.R') + +anom.hind.dim<-c(5,3,1,256,512) +anom.hind<-array(rnorm(prod(anom.hind.dim)),anom.hind.dim) # doesn't fit in memory +save.big(array=anom.hind, path=BigDataPath) + +veriApplyBig <- function(, obsmy.scratch){ + ffload(file=my.scratch) + open(anom.hind) + my.SkillScore <- veriApplyBig("FairRpss", fcst=anom.hind, obs=anom.rean, tdim=2, ensdim=1, prob=c(1/3,1/3)) + close(anom.hind) +} + + +anom.hind<-as.ff(anom.hind, vmode="double", file=my.scratch) +ffsave(anom.hind, file=my.scratch) + +my.scratch<-"/scratch/Earth/ncortesi/myBigData" + +anom.hind<-as.ff(anom.hind, vmode="double", file=my.scratch) +ffsave(anom.hind, file=my.scratch) +close(anom.hind); rm(anom.hind) + +ffload(file=my.scratch, list=c(anom.hind)) +open(anom.hind) +my.SkillScore <- veriApplyBig("FairRpss", fcst=anom.hind, obs=anom.rean, tdim=2, ensdim=1, prob=c(1/3,1/3)) +close(anom.hind); rm(anom.hind) + +anom.hind.dim<-c(51,30,1,256,51) +anom.hind<-ff(rnorm(prod(anom.hind.dim)), dim=anom.hind.dim, vmode="double", filename="/scratch/Earth/ncortesi/anomhind") +str(anom.hind) +dim(anom.hind) + +anom.hind.dim<-c(51,30,1,256,51) +anom.hind<-array(rnorm(prod(anom.hind.dim)),anom.hind.dim) # doesn't fit in memory +anom.hind<-as.ff(anom.hind, vmode="double", file="/scratch/Earth/ncortesi/myBigData") +str(anom.hind) +dim(anom.hind) + +ffsave(anom.hind,file="/scratch/Earth/ncortesi/anomhind") +#ffinfo(file="/scratch/Earth/ncortesi/anomhind") +close(anom.hind) +#delete(anom.hind) +#rm(anom.hind) + +ffload(file="/scratch/Earth/ncortesi/anomhind") + +anom.rean<-array(rnorm(prod(anom.hind.dim[-1])), anom.hind.dim[-1]) # doesn't fit in memory + +open(anom.hind) +my.SkillScore <- veriApplyBig("FairRpss", fcst=anom.hind, obs=anom.rean, tdim=2, ensdim=1, prob=c(1/3,1/3)) +fcst<-anom.hind +obs<-anom.rean + +close(anom.hind) +rm(anom.hind) + +#ffdrop(file="/scratch/Earth/ncortesi/anomhind") + +a<-ffapply(X=anom.hind.big, MARGIN=c(1,3,4,5), AFUN=mean, RETURN=TRUE) +a<-ffapply(X=anom.hind.big, MARGIN=c(1,3,4,5), AFUN=function(x) sample(x, replace=TRUE), CFUN="list", RETURN=TRUE) + + +pred_Cal_cross_ff <- as.ff(pred_Cal_cross, vmode='double', file=fileoutput_cross) +ffsave(pred_Cal_cross_ff, file=paste0(fileoutput_cross)) +delete(pred_Cal_cross_ff) + +assign("pred_Cal_cross_ff", as.ff(pred_Cal_cross, vmode='double', file=fileoutput_cross)) +ffsave(pred_Cal_cross_ff, file=paste0(fileoutput_cross)) +delete(pred_Cal_cross_ff) +rm(pred_Cal_cross_ff) + +assign("pred_Cal_cross_ff", as.ff(pred_Cal_cross, vmode='double', file=fileoutput_cross)) +a<-get("pred_Cal_cross_ff") # get() works well in this case but not in the row below! (you must use do.call, see WT_drivers_v2.R) +ffsave(get("pred_Cal_cross_ff"), file=paste0(fileoutput_cross)) +delete(pred_Cal_cross_ff) +rm(pred_Cal_cross_ff) + + + +######################################################################################### +# Calculate and save the FairRpss and/or the FairCrpss for a chosen period using ff # +######################################################################################### + +bigdata=FALSE # if TRUE, compute the Rpss and the Crpss using the package ff (storing arrays in the disk instead than in the RAM) to remove the memory limit + +for(month in veri.month){ + month=1 # for debug + my.startdates <- which(as.integer(substr(sdates.seq,5,6)) == month) #startdates.monthly[[month]] #c(1:5) # select the startdates (weeks) you want to compute + startdate.name=my.month[month] # name given to the period of the selected startdates, i.e:"January" + n.yrs <- length(my.startdates)*n.yrs.hind # number of total years for the selected month + print(paste0("Computing Skill Scores for ",startdate.name)) + + if(!boot) anom.rean.big<-array(NA, dim=c(n.yrs, n.leadtimes, n.lat, n.lon)) # reanalysis data is not converted to ff because it is a small array + + if(boot) mod<-1 else mod=0 # in case of the boostrap, anom.hind.big includes also the reanalysis data as additional last member + + if(bigdata) anom.hind.big <- ff(NA, dim=c(n.members + mod, n.yrs, n.leadtimes, n.lat, n.lon), vmode="double", filename=paste0(workdir,"/anomhindbig.ff")) + if(!bigdata) anom.hind.big <- array(NA, dim=c(n.members + mod, n.yrs, n.leadtimes, n.lat, n.lon)) + + for(startdate in my.startdates){ + + pos.startdate<-which(startdate==my.startdates) # count of the number of stardates already loaded +1 + my.time.interv<-(1+(pos.startdate-1)*n.yrs.hind):(pos.startdate*n.yrs.hind) # time interval where to load data + + load(paste0(workdir,'/Data_',var.name,'/anom_rean_startdate',startdate,'.RData')) + if(!boot) { + anom.rean<-drop(anom.rean) + anom.rean.big[my.time.interv,,,] <- anom.rean + } + + if(any(my.score.name=="FairRpss") || any(my.score.name=="FairCrpss")){ + load(paste0(workdir,'/Data_',var.name,'/anom_hindcast_startdate',startdate,'.RData')) # load hindcast data + anom.hindcast<-drop(anom.hindcast) + + if(!boot) anom.hind.big[,my.time.interv,,,] <- anom.hindcast + if(boot) anom.hind.big[,my.time.interv,,,] <- abind(anom.hindcast, anom.rean, along=1) + + rm(anom.hindcast, anom.rean) + } + gc() + } + + if(any(my.score.name=="FairRpss" || my.score.name=="FairCrpss")){ + if(!boot) { + if(any(my.score.name=="FairRpss")) my.FairRpss <- veriApplyBig("FairRpss", fcst=anom.hind.big, obs=anom.rean.big, + prob=my.prob, tdim=2, ensdim=1, parallel=TRUE, ncpus=n.cpus) + if(any(my.score.name=="FairCrpss")) my.FairCrpss <- veriApplyBig("FairCrpss", fcst=anom.hind.big, obs=anom.rean.big, tdim=2, ensdim=1, parallel=TRUE, ncpus=n.cpus) + + } else { + # bootstrapping: + my.FairRpssBoot<-my.FairCrpssBoot<-array(NA, c(n.boot, n.leadtimes, n.lat, n.lon)) + if(!bigdata) save(anom.hind.big, file=paste0(workdir,"/anom_hind_big.RData")) # save the anomalies to free memory + + for(b in 1:n.boot){ + print(paste0("resampling n. ",b,"/",n.boot)) + yrs.sampled <- sample(1:n.yrs, replace=TRUE) + + if(bigdata) anom.hind.big.sampled <- ff(NA, dim=c(n.members+1, n.yrs, n.leadtimes, n.lat, n.lon), vmode="double", filename=paste0(workdir,"/anomhindbigsampled")) + if(!bigdata) anom.hind.big.sampled <- array(NA, dim=c(n.members+1, n.yrs, n.leadtimes, n.lat, n.lon)) + + if(!bigdata && b>1) load(paste0(workdir,"/anom_hind_big.RData")) # need to load the hindcasts if b>1 + + for(y in 1:n.yrs) anom.hind.big.sampled[,y,,,] <- anom.hind.big[,yrs.sampled[y],,,] # with ff takes 53s x 100 ~1h!!! (without, only 4s x 100 ~6m) + + if(!bigdata) rm(anom.hind.big); gc() # free memory for the following commands + + # faster way that will be able to replace the above one when ffapply output parameters wll be improved: + #a<-ffapply(X=anom.hind.big, MARGIN=c(3,4,5), AFUN=function(x) my.sample(x,n.members+1, replace=TRUE), CFUN="list", RETURN=TRUE, FF_RETURN=TRUE) # takes 3m only + #aa<-abind(a[1:10], along=4) # it doesn't fit into memory! + + if(any(my.score.name=="FairRpss")) my.FairRpssBoot[b,,,] <- veriApplyBig("FairRpss", fcst=anom.hind.big.sampled[1:n.members,,,,], + obs=anom.hind.big.sampled[n.members+1,,,,], + prob=my.prob, tdim=2, ensdim=1, parallel=TRUE, ncpus=n.cpus) #$rpss + + if(any(my.score.name=="FairCrpss")) my.FairCrpssBoot[b,,,] <- veriApplyBig("FairCrpss", fcst=anom.hind.big.sampled[1:n.members,,,,], + obs=anom.hind.big.sampled[n.members+1,,,,], + tdim=2, ensdim=1, parallel=TRUE, ncpus=n.cpus) #$crpss + + if(bigdata) delete(anom.hind.big.sampled) + rm(anom.hind.big.sampled) # delete the sampled hindcast + + } + + # calculate 5 and 95 percentiles of skill scores: + if(any(my.score.name=="FairRpss")) my.FairRpssConf <- apply(my.FairRpssBoot, c(2,3,4), function(x) quantile(x, probs=c(0.05,0.95), type=8)) + if(any(my.score.name=="FairCrpss")) my.FairCrpssConf <- quantile(my.FairCrpssBoot, probs=c(0.05,0.95), type=8) + + } # close if on boot + + } + + if(bigdata) delete(anom.hind.big) # delete the file with the hindcasts + rm(anom.hind.big) + rm(anom.rean.big) + gc() + + if(!boot){ + if(any(my.score.name=="FairRpss")) save(my.FairRpss, file=paste0(workdir,'/Data_',var.name,'/FairRpss_',startdate.name,'.RData')) + if(any(my.score.name=="FairCrpss")) save(my.FairCrpss, file=paste0(workdir,'/Data_',var.name,'/FairCrpss_',startdate.name,'.RData')) + } else { + if(any(my.score.name=="FairRpss")) save(my.FairRpssBoot, my.FairRpssConf, file=paste0(workdir,'/Data_',var.name,'/FairRpssBoot_',startdate.name,'.RData')) + if(any(my.score.name=="FairCrpss")) save(my.FairCrpssBoot, my.FairCrpssConf, file=paste0(workdir,'/Data_',var.name,'/FairCrpssBoot_',startdate.name,'.RData')) + } + + + if(is.object(my.FairRpss)) rm(my.FairRpss); if(is.object(my.FairCrpss)) rm(my.FairCrpss) + if(is.object(my.FairRpssBoot)) rm(my.FairRpssBoot); if(is.object(my.FairCrpssBoot)) rm(my.FairCrpssBoot) + if(is.object(my.FairRpssConf)) rm(my.FairRpssConf); if(is.object(my.FairCrpssConf)) rm(my.FairCrpssConf) + +} # next m (month) + + +######################################################################################### +# Calculate and save the FairRpss for a chosen period using ff and Load() # +######################################################################################### + + anom.rean.big<-array(NA, dim=c(length(my.startdates)*n.yrs.hind, n.leadtimes, n.lat, n.lon)) + anom.hind.big <- ff(NA, dim=c(n.members,length(my.startdates)*n.yrs.hind, n.leadtimes, n.lat, n.lon), vmode="double", filename="/scratch/Earth/anomhindbig") + + for(startdate in my.startdates){ + pos.startdate<-which(startdate==my.startdates) # count of the number of stardates already loaded+1 + my.time.interv<-(1+(pos.startdate-1)*n.yrs.hind):(pos.startdate*n.yrs.hind) # time interval where to load data + + #load(paste0(workdir,'/Data_',var.name,'/anom_rean_startdate',startdate,'.RData')) + #anom.rean<-drop(anom.rean) + #anom.rean.big[my.time.interv,,,] <- anom.rean + + my.date <- paste0(yr1.hind:yr2.hind,substr(sdates.seq[startdate],5,6),substr(sdates.seq[startdate],7,8)) + anom.rean <- Load(var=var.name, exp = 'EnsEcmwfWeekHind', + obs = NULL, sdates=my.date, nleadtime = n.leadtimes, leadtimemin = 1, leadtimemax = n.leadtimes, + output = 'lonlat', configfile = file_path, method='distance-weighted' ) + + #load(paste0(workdir,'/Data_',var.name,'/anom_hindcast_startdate',startdate,'.RData')) # load hindcast data + #anom.hindcast<-drop(anom.hindcast) + + anom.hind.big[,my.time.interv,,,] <- anom.hindcast + rm(anom.hindcast) + } + + my.FairRpss <- veriApplyBig("FairRpss", fcst=anom.hind.big, obs=anom.rean.big, prob=my.prob, tdim=2, ensdim=1, parallel=TRUE, ncpus=n.cpus) + + save(my.FairRpss, file=paste0(workdir,'/Data_',var.name,'/FairRpss_',startdate.name,'.RData')) + + +######################################################################################### +# Alternative way to speed up Load() # +######################################################################################### + +ERAint <- list(path = '/esnas/reconstructions/ecmwf/eraint/$STORE_FREQ$_mean/$VAR_NAME$_f6h/$VAR_NAME$_$YEAR$$MONTH$.nc') + var.rean=ERAint # daily reanalysis dataset used for the var data; it can have a different resolution of the MSLP dataset + var.name='sfcWind' #'tas' #'prlr' # any daily variable we want to find if it is WTs-driven + var <- Load(var = var.name, exp = NULL, obs = list(var.rean), paste0(y,'0101'), storefreq = 'daily', leadtimemax = 1, output = 'lonlat') + + # Forecast system list: + S4 <- list(path = ...) + CFSv2 <- list(path = ...) + + # Reanalysis list: + ERAint <- list(path = '/esnas/reconstructions/ecmwf/eraint/$STORE_FREQ$_mean/$VAR_NAME$_f6h/$VAR_NAME$_$YEAR$$MONTH$.nc') + JRA55 <- list(path = ...) + + + + + # Select one forecast system and one reanalysis (put NULL if you don't need to load any experiment/observation): + exp.source <- NULL + obs.source <- list(path = '/esnas/reconstructions/ecmwf/eraint/$STORE_FREQ$_mean/$VAR_NAME$_f6h/$VAR_NAME$_$YEAR$$MONTH$.nc') + + # Select one variable and its frequency: + var.name <- 'sfcWind' + store.freq <- 'daily' + + # Extract only the directory path of the forecast and reanalysis: + obs.source2 <- gsub("\\$STORE_FREQ\\$", store.freq, obs.source$path) + obs.source3 <- gsub("\\$VAR_NAME\\$", var.name, obs.source2) + obs.source4 <- strsplit(obs.source3,'/') + obs.source5 <- paste0(obs.source4[[1]][-length(obs.source4[[1]])], collapse="/") + + obs.source6 <- list.files(path = obs.source5) + + system(paste0("ncks -O -h -d longitude,5,6 ", '/esnas/reconstructions/ecmwf/eraint/',STORE_FREQ,'_mean/',VAR_NAME,'_f6h')) + system("ncks -O -h -d longitude,5,6 sfcWind_201405.nc ~/test.nc") + + y <- 1990 + var <- Load(var = VAR_NAME, exp = list(exp.source), obs = list(obs.source), sdates = paste0(y,'0101'), storefreq = store.freq, leadtimemax = 1, output = 'lonlat') + + +} # close if on mare + + diff --git a/old/SkillScores_map_v3.R b/old/SkillScores_map_v3.R new file mode 100644 index 0000000000000000000000000000000000000000..665637561d8a7ccaec9778f9efdcc5799865289d --- /dev/null +++ b/old/SkillScores_map_v3.R @@ -0,0 +1,425 @@ +########################################################################################### +# Visualize and save the score maps for one score and period # +########################################################################################### +# run it only after computing and saving all the skill score data: + +rm(list=ls()) + +########################################################################################## +# Load functions and libraries # +########################################################################################## + +library(SpecsVerification) # for skill scores +library(s2dverification) # for Load() function +library(abind) # for merging arrays +source('/home/Earth/ncortesi/scripts/Rfunctions.R') +source('/home/Earth/ncortesi/Downloads/ESS/general/hatching.R') +########################################################################################## +# User's settings # +########################################################################################## +#if(!mare) {source('/home/Earth/ncortesi/Downloads/scripts/Rfunctions.R')} else {source('/gpfs/projects/bsc32/bsc32842/scripts/Rfunctions.R')} + +# working dir where to put the output maps and files in the /Data and /Maps subdirs: +work.dir <- "/scratch/Earth/ncortesi/RESILIENCE/Subestacional" + +var.name <- 'sfcWind' # forecast variable. It is the name used inside the netCDF files and as suffix in map filenames, i.e: 'sfcWind' or 'psl' + +my.pvalue <- 0.05 # in case of computing the EnsCorr, set the p_value level to show in the ensemble mean correlation maps + +# coordinates of a chosen point in the world to plot the time series over the 52 maps (they must be the same values in objects la y lo) +#my.lat <- 55.3197120 +#my.lon <- 0.5625 + +my.months=1:12 #9:12 # choose the month(s) to plot and save + +# choose one or more skills scores to visualize between EnsCorr, FairRPSS, FairCRPSS and/or RelDiagr: +my.score.name <- c('FairRpss','FairCrpss','EnsCorr','RelDiagr') + +col <- c("#0C046E", "#1914BE", "#2341F7", "#2F55FB", "#3E64FF", "#528CFF", "#64AAFF", "#82C8FF", "#A0DCFF", "#B4F0FF", "#FFFBAF", "#FFDD9A", "#FFBF87", "#FFA173", "#FF7055", "#FE6346", "#F7403B", "#E92D36", "#C80F1E", "#A50519") + +########################################################################################## +# visualize worldwide monthly maps of skill scores # +########################################################################################## + +for(month in my.months){ + #month=1 # for the debug + print(paste0("Month=",month)) + + # Load data: + load(file=paste0(work.dir,'/',var.name,'_',my.month[month],'.RData')) + + n.lonr <- n.lon - ceiling(length(lons[lons>0 & lons < 180])) # count the number of longitude values between 180 and 360 degrees longitud + + for(my.score.name.map in my.score.name){ + #my.score.name.map='EnsCorr' # for the debug + + print(paste0("Score=",my.score.name.map)) + + if(my.score.name.map=='FairRpss') my.score <- my.FairRpss.chunk + if(my.score.name.map=='FairCrpss') my.score <- my.FairCrpss.chunk + if(my.score.name.map=="EnsCorr") my.score <- my.EnsCorr.chunk + if(my.score.name.map=="RelDiagr") my.score <- my.RelDiagr + + # old green-red palette: + # brk.rpss<-c(-1,seq(0,1,by=0.05)) + # col.rpss<-c("darkred",colorRampPalette(c("red","white","darkgreen"))(length(brk.rpss)-2)) + # brk.rps<-c(seq(0,1,by=0.1),10) + # col.rps<-c(colorRampPalette(c("darkgreen","white","red"))(length(brk.rps)-2),"darkred") + # if(my.score.name.map==my.Rps | my.score==my.Rps.clim) {my.brk<-brk.rps} else {my.brk<-brk.rpss} + # if(my.score.name.map==my.Rps | my.score==my.Rps.clim) {my.col<-col.rps} else {my.col<-col.rpss} + + brk.rpss<-c(seq(-1,1,by=0.1)) + col.rpss<-colorRampPalette(col)(length(brk.rpss)-1) + + my.brk<-brk.rpss # at present all breaks and colors are the same, so there is no need to differenciate between indexes + my.col<-col.rpss + + for(lead in 1:n.leadtimes){ + print(paste0("Leadtime=",lead)) + + #my.title<-paste0(my.score.name.map,' of ',cfs.name,' + #10m Wind Speed for ',startdate.name.map,'. Forecast time ',leadtime.week[lead],'.') + + if(my.score.name.map!="RelDiagr"){ + + #postscript(file=paste0(workdir,'/Maps_',var.name,'/',my.score.name.map,'_',startdate.name.map,'_Forecast_time_',leadtime.week[lead],'_',var.name,'.ps'), + # paper="special",width=12,height=7,horizontal=F) + + #layout(matrix(c(1,2), 1, 2, byrow = TRUE),widths=c(11,1.5)) + #par(oma=c(0,0,4,0),mar=c(0,0,0,0)) + + #n.lonr <- n.lon - ceiling(length(lons[lons<180 & lons > 0])) + #hatching(latsr,c(lons[c((nlonr+1):nlon)]-360,lons[1:nlonr]), my.Pvalue.rev, dens = 12, ang = 45, col_line = '#252525', lwd_size = 0.5, crosshatching = FALSE) + #hatching(lats,c(lons[c((n.lonr+1):n.lon)]-360,lons[1:n.lonr]), my.Pvalue.rev, dens = 12, ang = 45, col_line = '#252525', lwd_size = 0.5, crosshatching = FALSE) + #n.lonr <- n.lon - ceiling(length(lons[lons<180 & lons > 0])) + #PlotEquiMap(my.score[lead,,][n.lat:1,c((n.lonr+1):n.lon,1:n.lonr)], lons,lats ,title_scale=0.8, brks=my.brk, cols=my.col ,axelab=F, filled.continents=FALSE, drawleg=F) + # lat values must be in decreasing order from +90 to -90 degrees and long from 0 to 360 degrees: + + my.file <- paste0(work.dir,'/',var.name,'/',var.name,'_',my.score.name.map,'_',my.month[month],'_leadtime_',lead,'.png') + my.file2 <- paste0(work.dir,'/',var.name,'/formatted/',var.name,'_',my.score.name.map,'_',my.month[month],'_leadtime_',lead,'.png') + png(file=my.file,width=1000,height=600) + + # Map: + par(fig=c(0,0.92,0,0.89), new=TRUE) + + PlotEquiMap(my.score[lead,,c((n.lonr+1):n.lon,1:n.lonr)], c(lons[c((n.lonr+1):n.lon)]-360, lons[1:n.lonr]), lats ,title_scale=0.8, brks=my.brk, cols=my.col ,axelab=F, filled.continents=FALSE, drawleg=F) + + if(my.score.name.map=="EnsCorr") my.PValue.rev <- my.EnsCorr.pvalue < 0.05 + if(my.score.name.map=="FairRpss") my.PValue.rev <- my.FairRpss.pvalue > 0.05 + if(my.score.name.map=="FairCrpss") my.PValue.rev <- my.FairCrpss.pvalue > 0.05 + + # Draw the significance diagonal lines: + pv <- aperm(my.PValue.rev, c(1,3,2)) + + if(my.score.name.map=="EnsCorr") hatching(lats, c(lons[c((n.lonr+1):n.lon)]-360, lons[1:n.lonr]), pv[lead,,], dens = 12, ang = 45, col_line = '#252525', lwd_size = 0.5, crosshatching = FALSE) + + # Map title: + par(fig=c(0,1,0.81,0.91), new=TRUE) + #my.title <- paste0(my.score.name.map,' of ',cfs.name,'\n ', var.name.map,' for ',my.month[month],'. Forecast time ',leadtime.week[lead],'.') + my.title <- paste0("ECMWF-S4 / ", var.name.map, " / ", my.score.name.map, "\n", my.month[month], " / 1994-2013") + mtext(my.title, cex=2.2) + + # Color legend: + par(fig=c(0.92,1,0,0.9), new=TRUE) + ColorBar2(my.brk, cols=my.col, vert=T, my.ticks=-0.5 + 1:length(my.brk), my.labels=my.brk) + + ### arrange lat/ lon in my.PValue (a second variable in the EnsCorr file) to start from +90 degr. (lat) and from 0 degr (lon): + ### my.PValue.rev[lead,,] <- my.PValue[i,n.lat:1,c((n.lonr+1):n.lon,1:n.lonr)] + + dev.off() + + system(paste0("~/scripts/fig2catalog.sh -s 0.8 -c 'Reference dataset: ERA-Interim \nLead time: ", leadtime.week[lead], " days' ", my.file," ", my.file2)) + + } # close if on !RelDiagr + + if(my.score.name.map == "RelDiagr") { + + my.file <- paste0(work.dir,'/',var.name,'/RelDiagr_',my.month[month],'_Forecast_time_',leadtime.week[lead],'_',var.name,'.png') + my.file2 <- paste0(work.dir,'/',var.name,'/formatted/','/RelDiagr_',my.month[month],'_Forecast_time_',leadtime.week[lead],'_',var.name,'.png') + + png(file=my.file,width=600,height=600) + + #my.title <- paste0('Reliability Diagram of ',cfs.name,'\n ',var.name.map,' for ',my.month[month],'. Forecast time ',leadtime.week[lead],'.') + my.title <- paste0("ECMWF-S4 / ", var.name.map, " / Reliability Diagram \n", my.month[month], " / 1994-2013") + + plot(c(0,1),c(0,1),type="l",xlab="Forecast Frequency",ylab="Observed Frequency", col='gray30', main=my.title, cex.main=1.6) + lines(my.score[[lead]]$p.avgs[!is.na(my.score[[lead]]$p.avgs)],my.score[[lead]]$cond.prob[!is.na(my.score[[lead]]$cond.prob)],type="o", pch=16, col="blue") + #lines(my.score[[n.leadtimes+lead]]$p.avgs[!is.na(my.score[[n.leadtimes+lead]]$p.avgs)],my.score[[n.leadtimes+lead]]$cond.prob[!is.na(my.score[[n.leadtimes+lead]]$cond.prob)],type="o",pch=16,col="darkgreen") + lines(my.score[[2*n.leadtimes+lead]]$p.avgs[!is.na(my.score[[2*n.leadtimes+lead]]$p.avgs)], my.score[[2*n.leadtimes+lead]]$cond.prob[!is.na(my.score[[2*n.leadtimes+lead]]$cond.prob)],type="o", pch=16, col="red") + legend("bottomright", legend=c('Lower Tercile','Upper Tercile'), lty=c(1,1), lwd=c(2.5,2.5), col=c('blue','red')) + + dev.off() + + system(paste0("~/scripts/fig2catalog.sh -c 'Reference dataset: ERA-Interim \nLead time: ", leadtime.week[lead], " days' ", my.file," ", my.file2)) + } + + } # next lead + + } # next score + +} # next month + + +############################################################################# +# Visualize summary tables for a region # +############################################################################# + +# DY European areas: (in the format: lon.min /lon.max / lat.min /lat.max. lon.min MUST be a NEGATIVE number, long values MUST be from 0 and 360, and +# lat values MUST be from 90 to -90 degrees) + +# all the World: +area0 <- c(-180,180,-90,90) +# Northern Europe: +area1 <- c(-15,45,45,75) +# Southwestern Europe: +area2 <- c(-15,20,35,45) +# Southeastern Europe: +area3 <- c(20,45,35,45) +# All Europe: +area4 <- c(-15,45,35,75) +# North America: +area5<-c(-130,-60,30,50) + +# Choose one region between those defined above: +area <- area4 + +# position of long values of Europe only (without the Atlantic Sea and America): +#if(fields.name == "ERA-Interim") EU <- c(1:which(lon >= lon.max)[1], (length(lon)-30):length(lon)) # restrict area to continental europe only +lon.min <- 360 + area[1] +lon.max <- area[2] +lat.min <- area[3] +lat.max <- area[4] + +########################################################################## + +array.EnsCorr <- array.FairRpss <- array.FairCrpss <- array(NA,c(12,4)) + +# Load on file, just to take the lat and lon values: +load(file=paste0(work.dir,'/',var.name,'_',my.month[1],'.RData')) + +area.lon <- c(1:which(lons >= lon.max)[1], (which(lons >= lon.min)[1]:length(lons))) # restrict area to continental europe only +area.lat <- c(which(lats <= lat.max)[1]:(which(lats <= lat.min)[1]-1)) # restrict area to continental europe only + +# Load all data and average it over the chosen region: +for(month in 1:12){ + + load(file=paste0(work.dir,'/',var.name,'_',my.month[month],'.RData')) + + for(l in 0:3){ + #load(file=paste0(work.dir,"/",fields.name,"_",my.period[p],"_leadmonth_",l,"_","ClusterNames",".RData")) # load cluster names and summarized data + array.EnsCorr[month, 1+l] <- mean(my.EnsCorr.chunk[1+l, area.lat, area.lon], na.rm=T) + array.FairRpss[month, 1+l] <- mean(my.FairRpss.chunk[1+l, area.lat, area.lon], na.rm=T) + array.FairCrpss[month, 1+l] <- mean(my.FairCrpss.chunk[1+l, area.lat, area.lon], na.rm=T) + } +} + + +#png(filename=paste0(work.dir,"/",forecast.name,"_summary_impact_",var.name[var.num],".png"), width=550, height=400) +png(file=paste0(work.dir,'/',var.name,'/Summary_',var.name,'.png'),width=700,height=500) + +my.cols <- rev(c('#b2182b','#d6604d','#f4a582','#fddbc7','#d1e5f0','#92c5de','#4393c3','#2166ac')) +my.seq <- c(-1,-0.6,-0.4,-0.2,0,0.2,0.4,0.6,1) + +# create an array similar to array.pers but with colors instead of frequencies: +plot.new() + +par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) +mtext("Subseasonal sfcWind 1994-2013", cex=1.8) + +par(mar=c(0,0,4,0), fig=c(0.07, 0.27, 0.80, 1), new=TRUE) +mtext("EnsCorr", cex=1.5) + +par(mar=c(1,4,1,0), fig=c(0.02, 0.27, 0.1, 0.9), new=TRUE) +plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,3.2), ann=F) +mtext(side = 1, text = "Leadtime", line = 3.5, cex=1.2) +mtext(side = 2, text = "Startdate", line = 3, cex=1.2) +axis(1, at=seq(0,3), las=3, cex.axis=1, labels=c("5-11","12-18","19-25","26-32"), mgp=c(3,0.7,0)) +axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + +array.colors <- array(my.cols[8],c(12,4)) +array.colors[array.EnsCorr < my.seq[8]] <- my.cols[7] +array.colors[array.EnsCorr < my.seq[7]] <- my.cols[6] +array.colors[array.EnsCorr < my.seq[6]] <- my.cols[5] +array.colors[array.EnsCorr < my.seq[5]] <- my.cols[4] +array.colors[array.EnsCorr < my.seq[4]] <- my.cols[3] +array.colors[array.EnsCorr < my.seq[3]] <- my.cols[2] +array.colors[array.EnsCorr < my.seq[2]] <- my.cols[1] + +for(p in 1:12){ for(l in 0:3){ polygon(c(0.5+l-1, 0.5+l-1, 1.5+l-1, 1.5+l-1), c(-0.5+p, 0.5+p, 0.5+p, -0.5+p), col=array.colors[p,1+l]) }} + +par(mar=c(0,0,4,0), fig=c(0.32, 0.57, 0.8, 1), new=TRUE) +mtext("FairRPSS", cex=1.5) + +par(mar=c(1,4,1,0), fig=c(0.32, 0.57, 0.1, 0.9), new=TRUE) +plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,3.2), ann=F) +mtext(side = 1, text = "Leadtime", line = 3.5, cex=1.2) +mtext(side = 2, text = "Startdate", line = 3, cex=1.2) +axis(1, at=seq(0,3), las=3, cex.axis=1, labels=c("5-11","12-18","19-25","26-32"), mgp=c(3,0.7,0)) +axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + +array.colors <- array(my.cols[8],c(12,4)) +array.colors[array.FairRpss < my.seq[8]] <- my.cols[7] +array.colors[array.FairRpss < my.seq[7]] <- my.cols[6] +array.colors[array.FairRpss < my.seq[6]] <- my.cols[5] +array.colors[array.FairRpss < my.seq[5]] <- my.cols[4] +array.colors[array.FairRpss < my.seq[4]] <- my.cols[3] +array.colors[array.FairRpss < my.seq[3]] <- my.cols[2] +array.colors[array.FairRpss < my.seq[2]] <- my.cols[1] + +for(p in 1:12){ for(l in 0:3){ polygon(c(0.5+l-1, 0.5+l-1, 1.5+l-1, 1.5+l-1), c(-0.5+p, 0.5+p, 0.5+p, -0.5+p), col=array.colors[p,1+l]) }} + +par(mar=c(0,0,4,0), fig=c(0.62, 0.87, 0.8, 1), new=TRUE) +mtext("FairCRPSS", cex=1.5) + +par(mar=c(1,4,1,0), fig=c(0.62, 0.87, 0.1, 0.9), new=TRUE) +plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,3.2), ann=F) +mtext(side = 1, text = "Leadtime", line = 3.5, cex=1.2) +mtext(side = 2, text = "Startdate", line = 3, cex=1.2) +axis(1, at=seq(0,3), las=3, cex.axis=1, labels=c("5-11","12-18","19-25","26-32"), mgp=c(3,0.7,0)) +axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + +array.colors <- array(my.cols[8],c(12,4)) +array.colors[array.FairCrpss < my.seq[8]] <- my.cols[7] +array.colors[array.FairCrpss < my.seq[7]] <- my.cols[6] +array.colors[array.FairCrpss < my.seq[6]] <- my.cols[5] +array.colors[array.FairCrpss < my.seq[5]] <- my.cols[4] +array.colors[array.FairCrpss < my.seq[4]] <- my.cols[3] +array.colors[array.FairCrpss < my.seq[3]] <- my.cols[2] +array.colors[array.FairCrpss < my.seq[2]] <- my.cols[1] + +for(p in 1:12){ for(l in 0:3){ polygon(c(0.5+l-1, 0.5+l-1, 1.5+l-1, 1.5+l-1), c(-0.5+p, 0.5+p, 0.5+p, -0.5+p), col=array.colors[p,1+l]) }} +#for(p in 1:12){ for(l in 0:3){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.colors[p,1+l])}} + +par(fig=c(0.89, 1, 0.1, 0.9), new=TRUE) +#ColorBar2(brks=my.brks, cols=my.cols, vert=FALSE, cex=legend1.cex, label.dist=2, my.ticks=-0.5 + 1:length(my.brks), my.labels=my.brks) +ColorBar2(brks = my.seq, cols = my.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + +dev.off() + + + + +############################################################## +# Only monthly reliability diagrams, all over the same graph # +############################################################## + +for(month in my.months){ + #month=1 # for the debug + print(paste0("Month=",month)) + + # Load data: + load(file=paste0(work.dir,'/',var.name,'_',my.month[month],'.RData')) + + n.lonr <- n.lon - ceiling(length(lons[lons>0 & lons < 180])) # count the number of longitude values between 180 and 360 degrees longitud + + my.score.name.map == "RelDiagr" + #my.score.name.map='EnsCorr' # for the debug + + print(paste0("Score=",my.score.name.map)) + + my.score <- my.RelDiagr + + # old green-red palette: + # brk.rpss<-c(-1,seq(0,1,by=0.05)) + # col.rpss<-c("darkred",colorRampPalette(c("red","white","darkgreen"))(length(brk.rpss)-2)) + # brk.rps<-c(seq(0,1,by=0.1),10) + # col.rps<-c(colorRampPalette(c("darkgreen","white","red"))(length(brk.rps)-2),"darkred") + # if(my.score.name.map==my.Rps | my.score==my.Rps.clim) {my.brk<-brk.rps} else {my.brk<-brk.rpss} + # if(my.score.name.map==my.Rps | my.score==my.Rps.clim) {my.col<-col.rps} else {my.col<-col.rpss} + + brk.rpss<-c(seq(-1,1,by=0.1)) + col.rpss<-colorRampPalette(col)(length(brk.rpss)-1) + + my.brk<-brk.rpss # at present all breaks and colors are the same, so there is no need to differenciate between indexes + my.col<-col.rpss + + for(lead in 1:n.leadtimes){ + print(paste0("Leadtime=",lead)) + + #my.title<-paste0(my.score.name.map,' of ',cfs.name,' + #10m Wind Speed for ',startdate.name.map,'. Forecast time ',leadtime.week[lead],'.') + + if(my.score.name.map!="RelDiagr"){ + + #postscript(file=paste0(workdir,'/Maps_',var.name,'/',my.score.name.map,'_',startdate.name.map,'_Forecast_time_',leadtime.week[lead],'_',var.name,'.ps'), + # paper="special",width=12,height=7,horizontal=F) + + #layout(matrix(c(1,2), 1, 2, byrow = TRUE),widths=c(11,1.5)) + #par(oma=c(0,0,4,0),mar=c(0,0,0,0)) + + #n.lonr <- n.lon - ceiling(length(lons[lons<180 & lons > 0])) + #hatching(latsr,c(lons[c((nlonr+1):nlon)]-360,lons[1:nlonr]), my.Pvalue.rev, dens = 12, ang = 45, col_line = '#252525', lwd_size = 0.5, crosshatching = FALSE) + #hatching(lats,c(lons[c((n.lonr+1):n.lon)]-360,lons[1:n.lonr]), my.Pvalue.rev, dens = 12, ang = 45, col_line = '#252525', lwd_size = 0.5, crosshatching = FALSE) + #n.lonr <- n.lon - ceiling(length(lons[lons<180 & lons > 0])) + #PlotEquiMap(my.score[lead,,][n.lat:1,c((n.lonr+1):n.lon,1:n.lonr)], lons,lats ,title_scale=0.8, brks=my.brk, cols=my.col ,axelab=F, filled.continents=FALSE, drawleg=F) + # lat values must be in decreasing order from +90 to -90 degrees and long from 0 to 360 degrees: + + my.file <- paste0(work.dir,'/',var.name,'/',var.name,'_',my.score.name.map,'_',my.month[month],'_leadtime_',lead,'.png') + my.file2 <- paste0(work.dir,'/',var.name,'/formatted/',var.name,'_',my.score.name.map,'_',my.month[month],'_leadtime_',lead,'.png') + png(file=my.file,width=1000,height=600) + + # Map: + par(fig=c(0,0.92,0,0.89), new=TRUE) + + PlotEquiMap(my.score[lead,,c((n.lonr+1):n.lon,1:n.lonr)], c(lons[c((n.lonr+1):n.lon)]-360, lons[1:n.lonr]), lats ,title_scale=0.8, brks=my.brk, cols=my.col ,axelab=F, filled.continents=FALSE, drawleg=F) + + if(my.score.name.map=="EnsCorr") my.PValue.rev <- my.EnsCorr.pvalue < 0.05 + if(my.score.name.map=="FairRpss") my.PValue.rev <- my.FairRpss.pvalue > 0.05 + if(my.score.name.map=="FairCrpss") my.PValue.rev <- my.FairCrpss.pvalue > 0.05 + + # Draw the significance diagonal lines: + pv <- aperm(my.PValue.rev, c(1,3,2)) + + if(my.score.name.map=="EnsCorr") hatching(lats, c(lons[c((n.lonr+1):n.lon)]-360, lons[1:n.lonr]), pv[lead,,], dens = 12, ang = 45, col_line = '#252525', lwd_size = 0.5, crosshatching = FALSE) + + # Map title: + par(fig=c(0,1,0.81,0.91), new=TRUE) + #my.title <- paste0(my.score.name.map,' of ',cfs.name,'\n ', var.name.map,' for ',my.month[month],'. Forecast time ',leadtime.week[lead],'.') + my.title <- paste0("ECMWF-S4 / ", var.name.map, " / ", my.score.name.map, "\n", my.month[month], " / 1994-2013") + mtext(my.title, cex=2.2) + + # Color legend: + par(fig=c(0.92,1,0,0.9), new=TRUE) + ColorBar2(my.brk, cols=my.col, vert=T, my.ticks=-0.5 + 1:length(my.brk), my.labels=my.brk) + + ### arrange lat/ lon in my.PValue (a second variable in the EnsCorr file) to start from +90 degr. (lat) and from 0 degr (lon): + ### my.PValue.rev[lead,,] <- my.PValue[i,n.lat:1,c((n.lonr+1):n.lon,1:n.lonr)] + + dev.off() + + system(paste0("~/scripts/fig2catalog.sh -s 0.8 -c 'Reference dataset: ERA-Interim \nLead time: ", leadtime.week[lead], " days' ", my.file," ", my.file2)) + + } # close if on !RelDiagr + + if(my.score.name.map == "RelDiagr") { + + my.file <- paste0(work.dir,'/',var.name,'/RelDiagr_',my.month[month],'_Forecast_time_',leadtime.week[lead],'_',var.name,'.png') + my.file2 <- paste0(work.dir,'/',var.name,'/formatted/','/RelDiagr_',my.month[month],'_Forecast_time_',leadtime.week[lead],'_',var.name,'.png') + + png(file=my.file,width=600,height=600) + + #my.title <- paste0('Reliability Diagram of ',cfs.name,'\n ',var.name.map,' for ',my.month[month],'. Forecast time ',leadtime.week[lead],'.') + my.title <- paste0("ECMWF-S4 / ", var.name.map, " / Reliability Diagram \n", my.month[month], " / 1994-2013") + + plot(c(0,1),c(0,1),type="l",xlab="Forecast Frequency",ylab="Observed Frequency", col='gray30', main=my.title, cex.main=1.6) + lines(my.score[[lead]]$p.avgs[!is.na(my.score[[lead]]$p.avgs)],my.score[[lead]]$cond.prob[!is.na(my.score[[lead]]$cond.prob)],type="o", pch=16, col="blue") + #lines(my.score[[n.leadtimes+lead]]$p.avgs[!is.na(my.score[[n.leadtimes+lead]]$p.avgs)],my.score[[n.leadtimes+lead]]$cond.prob[!is.na(my.score[[n.leadtimes+lead]]$cond.prob)],type="o",pch=16,col="darkgreen") + lines(my.score[[2*n.leadtimes+lead]]$p.avgs[!is.na(my.score[[2*n.leadtimes+lead]]$p.avgs)], my.score[[2*n.leadtimes+lead]]$cond.prob[!is.na(my.score[[2*n.leadtimes+lead]]$cond.prob)],type="o", pch=16, col="red") + legend("bottomright", legend=c('Lower Tercile','Upper Tercile'), lty=c(1,1), lwd=c(2.5,2.5), col=c('blue','red')) + + dev.off() + + system(paste0("~/scripts/fig2catalog.sh -c 'Reference dataset: ERA-Interim \nLead time: ", leadtime.week[lead], " days' ", my.file," ", my.file2)) + } + + } # next lead + +} # next month + + + + + + + + diff --git a/old/SkillScores_map_v3.R~ b/old/SkillScores_map_v3.R~ new file mode 100644 index 0000000000000000000000000000000000000000..25d7ac7831ed8fa788bf8c336fdbce335b1335cc --- /dev/null +++ b/old/SkillScores_map_v3.R~ @@ -0,0 +1,294 @@ +########################################################################################### +# Visualize and save the score maps for one score and period # +########################################################################################### +# run it only after computing and saving all the skill score data: + +rm(list=ls()) + +########################################################################################## +# Load functions and libraries # +########################################################################################## + +library(SpecsVerification) # for skill scores +library(s2dverification) # for Load() function +library(abind) # for merging arrays +source('/home/Earth/ncortesi/scripts/Rfunctions.R') +source('/home/Earth/ncortesi/Downloads/ESS/general/hatching.R') +########################################################################################## +# User's settings # +########################################################################################## +#if(!mare) {source('/home/Earth/ncortesi/Downloads/scripts/Rfunctions.R')} else {source('/gpfs/projects/bsc32/bsc32842/scripts/Rfunctions.R')} + +# working dir where to put the output maps and files in the /Data and /Maps subdirs: +work.dir <- "/scratch/Earth/ncortesi/RESILIENCE/Subestacional" + +var.name <- 'sfcWind' # forecast variable. It is the name used inside the netCDF files and as suffix in map filenames, i.e: 'sfcWind' or 'psl' + +my.pvalue <- 0.05 # in case of computing the EnsCorr, set the p_value level to show in the ensemble mean correlation maps + +# coordinates of a chosen point in the world to plot the time series over the 52 maps (they must be the same values in objects la y lo) +#my.lat <- 55.3197120 +#my.lon <- 0.5625 + +my.months=1:12 #9:12 # choose the month(s) to plot and save + +# choose one or more skills scores to visualize between EnsCorr, FairRPSS, FairCRPSS and/or RelDiagr: +my.score.name <- c('FairRpss','FairCrpss','EnsCorr','RelDiagr') + +col <- c("#0C046E", "#1914BE", "#2341F7", "#2F55FB", "#3E64FF", "#528CFF", "#64AAFF", "#82C8FF", "#A0DCFF", "#B4F0FF", "#FFFBAF", "#FFDD9A", "#FFBF87", "#FFA173", "#FF7055", "#FE6346", "#F7403B", "#E92D36", "#C80F1E", "#A50519") + +########################################################################################## +# visualize worldwide monthly maps of skill scores # +########################################################################################## + +for(month in my.months){ + #month=1 # for the debug + + # Load data: + load(file=paste0(work.dir,'/',var.name,'_',my.month[month],'.RData')) + + n.lonr <- n.lon - ceiling(length(lons[lons>0 & lons < 180])) # count the number of longitude values between 180 and 360 degrees longitud + + for(my.score.name.map in my.score.name){ + #my.score.name.map='EnsCorr' # for the debug + + if(my.score.name.map=='FairRpss') my.score <- my.FairRpss.chunk + if(my.score.name.map=='FairCrpss') my.score <- my.FairCrpss.chunk + if(my.score.name.map=="EnsCorr") my.score <- my.EnsCorr.chunk + if(my.score.name.map=="RelDiagr") my.score <- my.RelDiagr + + # old green-red palette: + # brk.rpss<-c(-1,seq(0,1,by=0.05)) + # col.rpss<-c("darkred",colorRampPalette(c("red","white","darkgreen"))(length(brk.rpss)-2)) + # brk.rps<-c(seq(0,1,by=0.1),10) + # col.rps<-c(colorRampPalette(c("darkgreen","white","red"))(length(brk.rps)-2),"darkred") + # if(my.score.name.map==my.Rps | my.score==my.Rps.clim) {my.brk<-brk.rps} else {my.brk<-brk.rpss} + # if(my.score.name.map==my.Rps | my.score==my.Rps.clim) {my.col<-col.rps} else {my.col<-col.rpss} + + brk.rpss<-c(seq(-1,1,by=0.1)) + col.rpss<-colorRampPalette(col)(length(brk.rpss)-1) + + my.brk<-brk.rpss # at present all breaks and colors are the same, so there is no need to differenciate between indexes + my.col<-col.rpss + + for(lead in 1:n.leadtimes){ + + #my.title<-paste0(my.score.name.map,' of ',cfs.name,' + #10m Wind Speed for ',startdate.name.map,'. Forecast time ',leadtime.week[lead],'.') + + if(my.score.name.map!="RelDiagr"){ + + #postscript(file=paste0(workdir,'/Maps_',var.name,'/',my.score.name.map,'_',startdate.name.map,'_Forecast_time_',leadtime.week[lead],'_',var.name,'.ps'), + # paper="special",width=12,height=7,horizontal=F) + + #layout(matrix(c(1,2), 1, 2, byrow = TRUE),widths=c(11,1.5)) + #par(oma=c(0,0,4,0),mar=c(0,0,0,0)) + + #n.lonr <- n.lon - ceiling(length(lons[lons<180 & lons > 0])) + #hatching(latsr,c(lons[c((nlonr+1):nlon)]-360,lons[1:nlonr]), my.Pvalue.rev, dens = 12, ang = 45, col_line = '#252525', lwd_size = 0.5, crosshatching = FALSE) + #hatching(lats,c(lons[c((n.lonr+1):n.lon)]-360,lons[1:n.lonr]), my.Pvalue.rev, dens = 12, ang = 45, col_line = '#252525', lwd_size = 0.5, crosshatching = FALSE) + #n.lonr <- n.lon - ceiling(length(lons[lons<180 & lons > 0])) + #PlotEquiMap(my.score[lead,,][n.lat:1,c((n.lonr+1):n.lon,1:n.lonr)], lons,lats ,title_scale=0.8, brks=my.brk, cols=my.col ,axelab=F, filled.continents=FALSE, drawleg=F) + # lat values must be in decreasing order from +90 to -90 degrees and long from 0 to 360 degrees: + + png(file=paste0(work.dir,'/',var.name,'/',var.name,'_',my.score.name.map,'_',my.month[month],'_leadtime_',lead,'.png'),width=1000,height=600) + + # Map: + par(fig=c(0,0.92,0,0.89), new=TRUE) + + PlotEquiMap(my.score[lead,,c((n.lonr+1):n.lon,1:n.lonr)], c(lons[c((n.lonr+1):n.lon)]-360,lons[1:n.lonr]), lats ,title_scale=0.8, brks=my.brk, cols=my.col ,axelab=F, filled.continents=FALSE, drawleg=F) + + if(my.score.name.map=="EnsCorr") my.PValue.rev <- my.EnsCorr.pvalue < 0.05 + if(my.score.name.map=="FairRpss") my.PValue.rev <- my.FairRpss.pvalue > 0.05 + if(my.score.name.map=="FairCrpss") my.PValue.rev <- my.FairCrpss.pvalue > 0.05 + + # Draw the significance diagonal lines: + pv <- aperm(my.PValue.rev, c(1,3,2)) + + hatching(lats,lons, pv[lead,,], dens = 12, ang = 45, col_line = '#252525', lwd_size = 0.5, crosshatching = FALSE) + + # Map title: + par(fig=c(0,1,0.81,0.91), new=TRUE) + my.title<-paste0(my.score.name.map,' of ',cfs.name,'\n ', var.name.map,' for ',my.month[month],'. Forecast time ',leadtime.week[lead],'.') + mtext(my.title, cex=2.2) + + # Color legend: + par(fig=c(0.92,1,0,0.9), new=TRUE) + ColorBar(my.brk, cols=my.col, vert=T, label_scale=1.2) + + ### arrange lat/ lon in my.PValue (a second variable in the EnsCorr file) to start from +90 degr. (lat) and from 0 degr (lon): + ### my.PValue.rev[lead,,] <- my.PValue[i,n.lat:1,c((n.lonr+1):n.lon,1:n.lonr)] + + dev.off() + + } # close if on !RelDiagr + + if(my.score.name.map == "RelDiagr") { + + png(file=paste0(workdir,'/Maps_',var.name,'/RelDiagr_',startdate.name.map,'_Forecast_time_',leadtime.week[lead],'_',var.name,'.png'),width=600,height=600) + + my.title<-paste0('Reliability Diagram of ',cfs.name,'\n ',var.name.map,' for ',startdate.name.map,'. Forecast time ',leadtime.week[lead],'.') + + plot(c(0,1),c(0,1),type="l",xlab="Forecast Frequency",ylab="Observed Frequency", col='gray30', main=my.title) + lines(my.score[[lead]]$p.avgs[!is.na(my.score[[lead]]$p.avgs)],my.score[[lead]]$cond.prob[!is.na(my.score[[lead]]$cond.prob)],type="o", pch=16, col="blue") + lines(my.score[[n.leadtimes+lead]]$p.avgs[!is.na(my.score[[n.leadtimes+lead]]$p.avgs)],my.score[[n.leadtimes+lead]]$cond.prob[!is.na(my.score[[n.leadtimes+lead]]$cond.prob)],type="o",pch=16,col="darkgreen") + lines(my.score[[2*n.leadtimes+lead]]$p.avgs[!is.na(my.score[[2*n.leadtimes+lead]]$p.avgs)], my.score[[2*n.leadtimes+lead]]$cond.prob[!is.na(my.score[[2*n.leadtimes+lead]]$cond.prob)],type="o", pch=16, col="red") + legend("bottomright", legend=c('Lower Tercile','Upper Tercile'), lty=c(1,1), lwd=c(2.5,2.5), col=c('blue','red')) + + dev.off() + } + + + } # next lead + + } # next score + +} # next month + +############################################################################# +# Visualize summary tables for a region # +############################################################################# + +# DY European areas: (in the format: lon.min /lon.max / lat.min /lat.max. lon.min MUST be a NEGATIVE number, long values MUST be from 0 and 360, and +# lat values MUST be from 90 to -90 degrees) + +# all the World: +area0 <- c(-180,180,-90,90) +# Northern Europe: +area1 <- c(-15,45,45,75) +# Southwestern Europe: +area2 <- c(-15,20,35,45) +# Southeastern Europe: +area3 <- c(20,45,35,45) +# All Europe: +area4 <- c(-15,45,35,75) +# North America: +area5<-c(-130,-60,30,50) + +# Choose one region between those defined above: +area <- area4 + +# position of long values of Europe only (without the Atlantic Sea and America): +#if(fields.name == "ERA-Interim") EU <- c(1:which(lon >= lon.max)[1], (length(lon)-30):length(lon)) # restrict area to continental europe only +lon.min <- 360 + area[1] +lon.max <- area[2] +lat.min <- area[3] +lat.max <- area[4] + +########################################################################## + +array.EnsCorr <- array.FairRpss <- array.FairCrpss <- array(NA,c(12,4)) + +# Load on file, just to take the lat and lon values: +load(file=paste0(work.dir,'/',var.name,'_',my.month[1],'.RData')) + +area.lon <- c(1:which(lons >= lon.max)[1], (which(lons >= lon.min)[1]:length(lons))) # restrict area to continental europe only +area.lat <- c(which(lats <= lat.max)[1]:(which(lats <= lat.min)[1]-1)) # restrict area to continental europe only + +# Load all data and average it over the chosen region: +for(month in 1:12){ + + load(file=paste0(work.dir,'/',var.name,'_',my.month[month],'.RData')) + + for(l in 0:3){ + #load(file=paste0(work.dir,"/",fields.name,"_",my.period[p],"_leadmonth_",l,"_","ClusterNames",".RData")) # load cluster names and summarized data + array.EnsCorr[month, 1+l] <- mean(my.EnsCorr.chunk[1+l, area.lat, area.lon], na.rm=T) + array.FairRpss[month, 1+l] <- mean(my.FairRpss.chunk[1+l, area.lat, area.lon], na.rm=T) + array.FairCrpss[month, 1+l] <- mean(my.FairCrpss.chunk[1+l, area.lat, area.lon], na.rm=T) + } +} + + +#png(filename=paste0(work.dir,"/",forecast.name,"_summary_impact_",var.name[var.num],".png"), width=550, height=400) +png(file=paste0(work.dir,'/',var.name,'/Summary_',var.name,'.png'),width=700,height=500) + +my.cols <- rev(c('#b2182b','#d6604d','#f4a582','#fddbc7','#d1e5f0','#92c5de','#4393c3','#2166ac')) +my.seq <- c(-1,-0.6,-0.4,-0.2,0,0.2,0.4,0.6,1) + +# create an array similar to array.pers but with colors instead of frequencies: +plot.new() + +par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) +mtext("Subseasonal sfcWind 1994-2013", cex=1.8) + +par(mar=c(0,0,4,0), fig=c(0.07, 0.27, 0.80, 1), new=TRUE) +mtext("EnsCorr", cex=1.5) + +par(mar=c(1,4,1,0), fig=c(0.02, 0.27, 0.1, 0.9), new=TRUE) +plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,3.2), ann=F) +mtext(side = 1, text = "Leadtime", line = 3.5, cex=1.2) +mtext(side = 2, text = "Startdate", line = 3, cex=1.2) +axis(1, at=seq(0,3), las=3, cex.axis=1, labels=c("5-11","12-18","19-25","26-32"), mgp=c(3,0.7,0)) +axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + +array.colors <- array(my.cols[8],c(12,4)) +array.colors[array.EnsCorr < my.seq[8]] <- my.cols[7] +array.colors[array.EnsCorr < my.seq[7]] <- my.cols[6] +array.colors[array.EnsCorr < my.seq[6]] <- my.cols[5] +array.colors[array.EnsCorr < my.seq[5]] <- my.cols[4] +array.colors[array.EnsCorr < my.seq[4]] <- my.cols[3] +array.colors[array.EnsCorr < my.seq[3]] <- my.cols[2] +array.colors[array.EnsCorr < my.seq[2]] <- my.cols[1] + +for(p in 1:12){ for(l in 0:3){ polygon(c(0.5+l-1, 0.5+l-1, 1.5+l-1, 1.5+l-1), c(-0.5+p, 0.5+p, 0.5+p, -0.5+p), col=array.colors[p,1+l]) }} + +par(mar=c(0,0,4,0), fig=c(0.32, 0.57, 0.8, 1), new=TRUE) +mtext("FairRPSS", cex=1.5) + +par(mar=c(1,4,1,0), fig=c(0.32, 0.57, 0.1, 0.9), new=TRUE) +plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,3.2), ann=F) +mtext(side = 1, text = "Leadtime", line = 3.5, cex=1.2) +mtext(side = 2, text = "Startdate", line = 3, cex=1.2) +axis(1, at=seq(0,3), las=3, cex.axis=1, labels=c("5-11","12-18","19-25","26-32"), mgp=c(3,0.7,0)) +axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + +array.colors <- array(my.cols[8],c(12,4)) +array.colors[array.FairRpss < my.seq[8]] <- my.cols[7] +array.colors[array.FairRpss < my.seq[7]] <- my.cols[6] +array.colors[array.FairRpss < my.seq[6]] <- my.cols[5] +array.colors[array.FairRpss < my.seq[5]] <- my.cols[4] +array.colors[array.FairRpss < my.seq[4]] <- my.cols[3] +array.colors[array.FairRpss < my.seq[3]] <- my.cols[2] +array.colors[array.FairRpss < my.seq[2]] <- my.cols[1] + +for(p in 1:12){ for(l in 0:3){ polygon(c(0.5+l-1, 0.5+l-1, 1.5+l-1, 1.5+l-1), c(-0.5+p, 0.5+p, 0.5+p, -0.5+p), col=array.colors[p,1+l]) }} + +par(mar=c(0,0,4,0), fig=c(0.62, 0.87, 0.8, 1), new=TRUE) +mtext("FairCRPSS", cex=1.5) + +par(mar=c(1,4,1,0), fig=c(0.62, 0.87, 0.1, 0.9), new=TRUE) +plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,3.2), ann=F) +mtext(side = 1, text = "Leadtime", line = 3.5, cex=1.2) +mtext(side = 2, text = "Startdate", line = 3, cex=1.2) +axis(1, at=seq(0,3), las=3, cex.axis=1, labels=c("5-11","12-18","19-25","26-32"), mgp=c(3,0.7,0)) +axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + +array.colors <- array(my.cols[8],c(12,4)) +array.colors[array.FairCrpss < my.seq[8]] <- my.cols[7] +array.colors[array.FairCrpss < my.seq[7]] <- my.cols[6] +array.colors[array.FairCrpss < my.seq[6]] <- my.cols[5] +array.colors[array.FairCrpss < my.seq[5]] <- my.cols[4] +array.colors[array.FairCrpss < my.seq[4]] <- my.cols[3] +array.colors[array.FairCrpss < my.seq[3]] <- my.cols[2] +array.colors[array.FairCrpss < my.seq[2]] <- my.cols[1] + +for(p in 1:12){ for(l in 0:3){ polygon(c(0.5+l-1, 0.5+l-1, 1.5+l-1, 1.5+l-1), c(-0.5+p, 0.5+p, 0.5+p, -0.5+p), col=array.colors[p,1+l]) }} +#for(p in 1:12){ for(l in 0:3){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.colors[p,1+l])}} + +par(fig=c(0.89, 1, 0.1, 0.9), new=TRUE) +#ColorBar2(brks=my.brks, cols=my.cols, vert=FALSE, cex=legend1.cex, label.dist=2, my.ticks=-0.5 + 1:length(my.brks), my.labels=my.brks) +ColorBar2(brks = my.seq, cols = my.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + +dev.off() + + + + + + + + + + + + diff --git a/old/SkillScores_map_v4.R b/old/SkillScores_map_v4.R new file mode 100644 index 0000000000000000000000000000000000000000..90643ea5c3e4b9aad637da141d59f8261f4235c7 --- /dev/null +++ b/old/SkillScores_map_v4.R @@ -0,0 +1,464 @@ +########################################################################################### +# Visualize and save the score maps for one score and period # +########################################################################################### +# run it only after computing and saving all the skill score data: + +rm(list=ls()) + +########################################################################################## +# Load functions and libraries # +########################################################################################## + +library(SpecsVerification) # for skill scores +library(s2dverification) # for Load() function +library(abind) # for merging arrays +source('/home/Earth/ncortesi/scripts/Rfunctions.R') +source('/home/Earth/ncortesi/Downloads/ESS/general/hatching.R') +########################################################################################## +# User's settings # +########################################################################################## +#if(!mare) {source('/home/Earth/ncortesi/Downloads/scripts/Rfunctions.R')} else {source('/gpfs/projects/bsc32/bsc32842/scripts/Rfunctions.R')} + +# working dir where to put the output maps and files in the /Data and /Maps subdirs: +work.dir <- "/scratch/Earth/ncortesi/RESILIENCE/Subestacional" + +var.name <- 'sfcWind' # forecast variable. It is the name used inside the netCDF files and as suffix in map filenames, i.e: 'sfcWind' or 'psl' + +my.pvalue <- 0.05 # in case of computing the EnsCorr, set the p_value level to show in the ensemble mean correlation maps + +# coordinates of a chosen point in the world to plot the time series over the 52 maps (they must be the same values in objects la y lo) +#my.lat <- 55.3197120 +#my.lon <- 0.5625 + +my.months=1:12 #9:12 # choose the month(s) to plot and save + +col <- c("#0C046E", "#1914BE", "#2341F7", "#2F55FB", "#3E64FF", "#528CFF", "#64AAFF", "#82C8FF", "#A0DCFF", "#B4F0FF", "#FFFBAF", "#FFDD9A", "#FFBF87", "#FFA173", "#FF7055", "#FE6346", "#F7403B", "#E92D36", "#C80F1E", "#A50519") + +# Define regions for averaging skill scores: (in the format: lon.min /lon.max / lat.min /lat.max. lon.min MUST be a NEGATIVE number, long values MUST be from 0 and 360, and +# lat values MUST be from 90 to -90 degrees) +area0.name <- "World" +area0 <- c(-180,180,-90,90) +area1.name <- "Northern_Europe" +area1 <- c(-15,45,45,75) +area2.name <- "Southern_Europe" +area2 <- c(-15,45,35,45) +area3.name <- "Southwestern_Europe" +area3 <- c(-15,20,35,45) +area4.name <- "Southeastern_Europe" +area4 <- c(20,45,35,45) +area5.name <- "Europe" +area5 <- c(-15,45,35,75) +area6.name <- "North_America" +area6 <- c(-130,-60,30,50) +area7.name <- "North_Sea" +area7 <- c(-4, 15, 50, 65) +area8.name <- "Iberian_Peninsula" +area8 <- c(-10, 4, 36, 44) +area9.name <- "Canadian region" +area9 <- c(-114, -111.8, 49.6, 51.7) + +# Choose one region between those defined above for the reliability diagrams and for the summary tables: +area <- area5 +area.name <- area5.name +land.only <- FALSE # set it to true if you want to average only over land, FALSE otherwise [warning: the reliability diagram is computed ONLY over land+sea ] + +# position of max and min long and lat values the chosen region: +lon.min <- 360 + area[1] +lon.max <- area[2] +lat.min <- area[3] +lat.max <- area[4] + +########################################################################################## +# visualize worldwide monthly maps of skill scores # +########################################################################################## +# choose one or more skills scores to visualize between ACC (EnsCorr), FairRPSS, FairCRPSS and/or RelDiagr: +my.score.name <- 'RelDiagr' #c('FairRpss','FairCrpss','EnsCorr','RelDiagr') + +my.score.name.title <- c('FairRpss','FairCrpss','Correlation of the ensemble mean','Reliability Diagram') # names to be visualized in the graph titles + +for(month in my.months){ + #month=1 # for the debug + print(paste0("Month=",month)) + + # Load data: + load(file=paste0(work.dir,'/',var.name,'_',my.month[month],'.RData')) + + sdates.seq <- weekly.seq(forecast.year,mes,day) # sequence of dates corresponding to all the thursday of the year + my.startdates.days <- as.integer(substr(sdates.seq[which(as.integer(substr(sdates.seq,5,6)) == month)],7,8)) #startdates.monthly[[month]] #c(1:5) + stringa <- paste0(my.startdates.days,collapse=",") + + if(lon.min < 360) area.lon <- c(1:(which(lons >= lon.max)[1]), (which(lons >= lon.min)[1]:length(lons))) # restrict area to chosen one + if(lon.min > 360) area.lon <- (which(lons >= lon.min-360)[1]):(which(lons >= lon.max)[1]-1) + + if(area.name != "World") area.lat <- c(which(lats <= lat.max)[1]:(which(lats <= lat.min)[1]-1)) # restrict area to chosen one + if(area.name == "World"){ area.lat <- 1:length(lats); area.lon <- 1:length(lons) } + + n.lonr <- n.lon - ceiling(length(lons[lons>0 & lons < 180])) # count the number of longitude values between 180 and 360 degrees longitud + + for(my.score.name.map in my.score.name){ + #my.score.name.map='Acc' # for the debug + + print(paste0("Score=",my.score.name.map)) + + if(my.score.name.map=='FairRpss') my.score <- my.FairRpss.chunk + if(my.score.name.map=='FairCrpss') my.score <- my.FairCrpss.chunk + if(my.score.name.map=="EnsCorr") my.score <- my.EnsCorr.chunk + #if(my.score.name.map=="RelDiagr") my.score <- my.RelDiagr + + # old green-red palette: + # brk.rpss<-c(-1,seq(0,1,by=0.05)) + # col.rpss<-c("darkred",colorRampPalette(c("red","white","darkgreen"))(length(brk.rpss)-2)) + # brk.rps<-c(seq(0,1,by=0.1),10) + # col.rps<-c(colorRampPalette(c("darkgreen","white","red"))(length(brk.rps)-2),"darkred") + # if(my.score.name.map==my.Rps | my.score==my.Rps.clim) {my.brk<-brk.rps} else {my.brk<-brk.rpss} + # if(my.score.name.map==my.Rps | my.score==my.Rps.clim) {my.col<-col.rps} else {my.col<-col.rpss} + + brk.rpss <- c(seq(-1,1,by=0.1)) + col.rpss <- colorRampPalette(col)(length(brk.rpss)-1) + + my.brk<-brk.rpss # at present all breaks and colors are the same, so there is no need to differenciate between indexes + my.col<-col.rpss + + my.RelDiagr<-list() + + for(lead in 1:n.leadtimes){ + print(paste0("Leadtime=",lead)) + + #my.title<-paste0(my.score.name.map,' of ',cfs.name,' + #10m Wind Speed for ',startdate.name.map,'. Forecast time ',leadtime.week[lead],'.') + + if(my.score.name.map != "RelDiagr"){ + + #postscript(file=paste0(workdir,'/Maps_',var.name,'/',my.score.name.map,'_',startdate.name.map,'_Forecast_time_',leadtime.week[lead],'_',var.name,'.ps'), + # paper="special",width=12,height=7,horizontal=F) + + #layout(matrix(c(1,2), 1, 2, byrow = TRUE),widths=c(11,1.5)) + #par(oma=c(0,0,4,0),mar=c(0,0,0,0)) + + #n.lonr <- n.lon - ceiling(length(lons[lons<180 & lons > 0])) + #hatching(latsr,c(lons[c((nlonr+1):nlon)]-360,lons[1:nlonr]), my.Pvalue.rev, dens = 12, ang = 45, col_line = '#252525', lwd_size = 0.5, crosshatching = FALSE) + #hatching(lats,c(lons[c((n.lonr+1):n.lon)]-360,lons[1:n.lonr]), my.Pvalue.rev, dens = 12, ang = 45, col_line = '#252525', lwd_size = 0.5, crosshatching = FALSE) + #n.lonr <- n.lon - ceiling(length(lons[lons<180 & lons > 0])) + #PlotEquiMap(my.score[lead,,][n.lat:1,c((n.lonr+1):n.lon,1:n.lonr)], lons,lats ,title_scale=0.8, brks=my.brk, cols=my.col ,axelab=F, filled.continents=FALSE, drawleg=F) + # lat values must be in decreasing order from +90 to -90 degrees and long from 0 to 360 degrees: + + my.file <- paste0(work.dir,'/',var.name,'/',var.name,'_',my.score.name.map,'_',my.month[month],'_leadtime_',leadtime.week[lead],'.png') + my.file2 <- paste0(work.dir,'/',var.name,'/formatted/',var.name,'_',my.score.name.map,'_',my.month[month],'_leadtime_',leadtime.week[lead],'.png') + png(file=my.file,width=1000,height=600) + + # Map: + par(fig=c(0,0.92,0,0.89), new=TRUE) + + PlotEquiMap(my.score[lead,,c((n.lonr+1):n.lon,1:n.lonr)], c(lons[c((n.lonr+1):n.lon)]-360, lons[1:n.lonr]), lats ,title_scale=0.8, brks=my.brk, cols=my.col ,axelab=F, filled.continents=FALSE, drawleg=F) + + if(my.score.name.map=="EnsCorr") my.PValue.rev <- my.EnsCorr.pvalue < 0.05 + if(my.score.name.map=="FairRpss") my.PValue.rev <- my.FairRpss.pvalue < 0.05 + if(my.score.name.map=="FairCrpss") my.PValue.rev <- my.FairCrpss.pvalue < 0.05 + + # Draw the significance diagonal lines: + pv <- aperm(my.PValue.rev, c(1,3,2)) + + hatching(lats, c(lons[c((n.lonr+1):n.lon)]-360, lons[1:n.lonr]), pv[lead,,], dens = 12, ang = 45, col_line = '#252525', lwd_size = 0.5, crosshatching = FALSE) + + # Map title: + par(fig=c(0,1,0.82,0.91), new=TRUE) + #my.title <- paste0(my.score.name.map,' of ',cfs.name,'\n ', var.name.map,' for ',my.month[month],'. Forecast time ',leadtime.week[lead],'.') + my.title <- paste0("ECMWF-MPS / ", var.name.map, " / ", my.score.name.title[which(my.score.name == my.score.name.map)], "\n", my.month[month], " (",leadtime.week[lead]," days lead time)/ 1994-2013") + mtext(my.title, cex=2.4, font=2) + + # Color legend: + par(fig=c(0.92,1,0,0.9), new=TRUE) + ColorBar2(my.brk, cols=my.col, vert=T, my.ticks=-0.5 + 1:length(my.brk), my.labels=my.brk) + + ### arrange lat/ lon in my.PValue (a second variable in the EnsCorr file) to start from +90 degr. (lat) and from 0 degr (lon): + ### my.PValue.rev[lead,,] <- my.PValue[i,n.lat:1,c((n.lonr+1):n.lon,1:n.lonr)] + + dev.off() + + system(paste0("~/scripts/fig2catalog.sh -s 0.8 -c 'Start dates: ", stringa," of ",my.month[month],"\nLead time: ", leadtime.week[lead], " days\nReference dataset: ERA-Interim\nBias correction: none\nHatched area: significant from a bootstrapping test (p_value=0.05)' ", my.file," ", my.file2)) + + } # close if on !RelDiagr + + gc() + + if(my.score.name.map == "RelDiagr") { + + my.file <- paste0(work.dir,'/',var.name,'/',var.name,'_RelDiagr_',my.month[month],'_leadtime_',leadtime.week[lead],'_',var.name,'.png') + my.file2 <- paste0(work.dir,'/',var.name,'/formatted/',var.name,'_RelDiagr_',my.month[month],'_leadtime_',leadtime.week[lead],'_',var.name,'.png') + + png(file=my.file,width=600,height=600) + + #my.title <- paste0('Reliability Diagram of ',cfs.name,'\n ',var.name.map,' for ',my.month[month],'. Forecast time ',leadtime.week[lead],'.') + my.title <- paste0("ECMWF-MPS / ", var.name.map, " / Reliability Diagram \n", my.month[month], " (", leadtime.week[lead]," days lead time) / 1994-2013") + + # Note that bins=5 should correspond to the number of hindcast members + 1 + my.RelDiagr[[lead]] <- ReliabilityDiagram(ens.chunk.prob[[lead]][int1,,], obs.chunk.prob[[lead]][int1,,], bins=5, nboot=0, plot=FALSE, plot.refin=F) #tercile 1 + + #my.RelDiagr[[n.leadtimes+lead]] <- ReliabilityDiagram(ens.chunk.prob[[lead]][int2,,], obs.chunk.prob[[lead]][int2,,], bins=5, nboot=0, plot=FALSE, plot.refin=F) + my.RelDiagr[[2*n.leadtimes+lead]] <- ReliabilityDiagram(ens.chunk.prob[[lead]][int3,,], obs.chunk.prob[[lead]][int3,,], bins=5, nboot=0, plot=FALSE, plot.refin=F) + + plot(c(0,1),c(0,1),type="l",xlab="Forecast Frequency",ylab="Observed Frequency", col='gray30', main=my.title, cex.main=1.6) + + lines(my.RelDiagr[[lead]]$p.avgs[!is.na(my.RelDiagr[[lead]]$p.avgs)],my.RelDiagr[[lead]]$cond.prob[!is.na(my.RelDiagr[[lead]]$cond.prob)],type="o", pch=16, col="blue") + + #lines(my.score[[n.leadtimes+lead]]$p.avgs[!is.na(my.score[[n.leadtimes+lead]]$p.avgs)],my.score[[n.leadtimes+lead]]$cond.prob[!is.na(my.score[[n.leadtimes+lead]]$cond.prob)],type="o",pch=16,col="darkgreen") + + lines(my.RelDiagr[[2*n.leadtimes+lead]]$p.avgs[!is.na(my.RelDiagr[[2*n.leadtimes+lead]]$p.avgs)], my.RelDiagr[[2*n.leadtimes+lead]]$cond.prob[!is.na(my.RelDiagr[[2*n.leadtimes+lead]]$cond.prob)],type="o", pch=16, col="red") + + legend("bottomright", legend=c('Lower Tercile','Upper Tercile'), lty=c(1,1), lwd=c(2.5,2.5), col=c('blue','red')) + + dev.off() + + system(paste0("~/scripts/fig2catalog.sh -c 'Start dates: ", stringa," of ",my.month[month],"\nLead time: ", leadtime.week[lead], " days\nRegion: global\nReference dataset: ERA-Interim \nBias correction: none' ", my.file," ", my.file2)) + } + + gc() + } # next lead + + gc() + } # next score + + gc() +} # next month + + +############################################################################# +# Visualize summary tables for a region # +############################################################################# + +# If you use a mask, load it to average only over land and check that it has the same lat and lon positions of the score maps: +if(land.only == TRUE){ + load(paste0('/shared/earth/EarthSystemServices/TOOLS/Topography_bathymetry_and_masks/Seamask.', n.lon, '.', n.lat,'.50m','.RData')) + mask <- Seamask.512.256.50m + #Coordinates repositioning and mask conditioning + mask <- mask[c((n.lonr+1):n.lon,1:n.lonr),n.lat:1] + mask <- ifelse(mask == 1, 0, 1) # assign 1 to land and 0 to sea to plot the mask + mask <- t(mask) + myImagePlot(mask) + myImagePlot(my.FairRpss.chunk[1,,]) + mask <- ifelse(mask == 0, NA, 1) # assign 1 to land and NA to sea to multiply it for the score values later +} + + +### plot a box with the chosen area (do it only once): +# +#png(file=paste0(work.dir,'/',var.name,'/Box_',area.name,'.png'),width=1400,height=800) +#PlotEquiMap(my.FairRpss.chunk[1,,c((n.lonr+1):n.lon,1:n.lonr)], c(lons[c((n.lonr+1):n.lon)]-360, lons[1:n.lonr]), lats, brks=c(-1000,1000), cols="lightblue" , axelab=F, filled.continents=TRUE, drawleg=F, boxlim=c(area[1],area[3],area[2],area[4]), boxcol="black", boxlwd=.7) +#dev.off() + +########################################################################## + +array.EnsCorr <- array.FairRpss <- array.FairCrpss <- array(NA,c(12,4)) + +# Load one file, just to take the lat and lon values: +load(file=paste0(work.dir,'/',var.name,'_',my.month[1],'.RData')) + +if(lon.min < 360) area.lon <- c(1:(which(lons >= lon.max)[1]), (which(lons >= lon.min)[1]:length(lons))) # restrict area to chosen one +if(lon.min > 360) area.lon <- (which(lons >= lon.min-360)[1]):(which(lons >= lon.max)[1]-1) + +if(area.name != "World") area.lat <- c(which(lats <= lat.max)[1]:(which(lats <= lat.min)[1]-1)) # restrict area to chosen one +if(area.name == "World"){ area.lat <- 1:length(lats); area.lon <- 1:length(lons) } + +# Load all data and average it over the chosen region: +for(month in 1:12){ + + load(file=paste0(work.dir,'/',var.name,'_',my.month[month],'.RData')) + + if(land.only == TRUE){ + # remove sea values: + for(i in 1:4) my.EnsCorr.chunk[i,,] <- my.EnsCorr.chunk[i,,]*mask + for(i in 1:4) my.FairRpss.chunk[i,,] <- my.FairRpss.chunk[i,,]*mask + for(i in 1:4) my.FairCrpss.chunk[i,,] <- my.FairCrpss.chunk[i,,]*mask + } + + for(l in 0:3){ + #load(file=paste0(work.dir,"/",fields.name,"_",my.period[p],"_leadmonth_",l,"_","ClusterNames",".RData")) # load cluster names and summarized data + array.EnsCorr[month, 1+l] <- mean(my.EnsCorr.chunk[1+l, area.lat, area.lon], na.rm=T) + array.FairRpss[month, 1+l] <- mean(my.FairRpss.chunk[1+l, area.lat, area.lon], na.rm=T) + array.FairCrpss[month, 1+l] <- mean(my.FairCrpss.chunk[1+l, area.lat, area.lon], na.rm=T) + } +} + + +if(land.only == TRUE) { mod.filename <- "_land" } else { mod.filename <- "" } + +my.file <- paste0(work.dir,'/',var.name,'/Summary_',var.name,'_', area.name, mod.filename,'.png') +my.file2 <- paste0(work.dir,'/',var.name,'/formatted/Summary_',var.name,'_', area.name, mod.filename,'.png') +png(file=my.file,width=700,height=500) + +#my.cols <- rev(c('#b2182b','#d6604d','#f4a582','#fddbc7','#d1e5f0','#92c5de','#4393c3','#2166ac')) +#my.cols <- c('#0570b0','#bdc9e1', '#fff7ec', '#fee8c8','#fdd49e','#fdbb84','#fc8d59','#ef6548','#d7301f','#990000') +#my.cols <- c('#0570b0','#bdc9e1','#fef0d9','#fdd49e','#fdbb84','#fc8d59','#ef6548','#d7301f','#990000') +#my.cols <- c('#74a9cf','#bdc9e1','#fef0d9','#fdd49e','#fdbb84','#fc8d59','#e34a33','#b30000') +my.cols <- c('#bdc9e1','#fef0d9','#fdd49e','#fdbb84','#fc8d59','#e34a33','#b30000', '#7f0000') + +#my.seq <- c(-1,-0.6,-0.4,-0.2,0,0.2,0.4,0.6,1) +#my.seq <- c(-0.2,-0.1,0,0.1,0.2,0.3,0.4,0.5,0.6) +my.seq <- c(-0.1,0,0.1,0.2,0.3,0.4,0.5,0.6,0.7) + +# create an array similar to array.pers but with colors instead of frequencies: +plot.new() + +par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) +mtext("Subseasonal sfcWind 1994-2013", cex=1.8) + +par(mar=c(0,0,4,0), fig=c(0.07, 0.27, 0.80, 1), new=TRUE) +mtext("ACC", cex=1.5) + +par(mar=c(1,4,1,0), fig=c(0.02, 0.27, 0.1, 0.9), new=TRUE) +plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,3.2), ann=F) +mtext(side = 1, text = "Leadtime", line = 3.5, cex=1.2) +mtext(side = 2, text = "Startdate", line = 3, cex=1.2) +axis(1, at=seq(0,3), las=3, cex.axis=1, labels=c("5-11","12-18","19-25","26-32"), mgp=c(3,0.7,0)) +axis(2, at=seq(1,12), las=2, labels=rev(my.month.short), cex.axis=1) + +array.colors <- array(my.cols[8],c(12,4)) +array.colors[array.EnsCorr < my.seq[8]] <- my.cols[7] +array.colors[array.EnsCorr < my.seq[7]] <- my.cols[6] +array.colors[array.EnsCorr < my.seq[6]] <- my.cols[5] +array.colors[array.EnsCorr < my.seq[5]] <- my.cols[4] +array.colors[array.EnsCorr < my.seq[4]] <- my.cols[3] +array.colors[array.EnsCorr < my.seq[3]] <- my.cols[2] +array.colors[array.EnsCorr < my.seq[2]] <- my.cols[1] + +for(p in 1:12){ for(l in 0:3){ polygon(c(0.5+l-1, 0.5+l-1, 1.5+l-1, 1.5+l-1), c(-0.5+13-p, 0.5+13-p, 0.5+13-p, -0.5+13-p), col=array.colors[p,1+l]) }} + +par(mar=c(0,0,4,0), fig=c(0.32, 0.57, 0.8, 1), new=TRUE) +mtext("FairRPSS", cex=1.5) + +par(mar=c(1,4,1,0), fig=c(0.32, 0.57, 0.1, 0.9), new=TRUE) +plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,3.2), ann=F) +mtext(side = 1, text = "Leadtime", line = 3.5, cex=1.2) +mtext(side = 2, text = "Startdate", line = 3, cex=1.2) +axis(1, at=seq(0,3), las=3, cex.axis=1, labels=c("5-11","12-18","19-25","26-32"), mgp=c(3,0.7,0)) +axis(2, at=seq(1,12), las=2, labels=rev(my.month.short), cex.axis=1) + +array.colors <- array(my.cols[8],c(12,4)) +array.colors[array.FairRpss < my.seq[8]] <- my.cols[7] +array.colors[array.FairRpss < my.seq[7]] <- my.cols[6] +array.colors[array.FairRpss < my.seq[6]] <- my.cols[5] +array.colors[array.FairRpss < my.seq[5]] <- my.cols[4] +array.colors[array.FairRpss < my.seq[4]] <- my.cols[3] +array.colors[array.FairRpss < my.seq[3]] <- my.cols[2] +array.colors[array.FairRpss < my.seq[2]] <- my.cols[1] + +for(p in 1:12){ for(l in 0:3){ polygon(c(0.5+l-1, 0.5+l-1, 1.5+l-1, 1.5+l-1), c(-0.5+13-p, 0.5+13-p, 0.5+13-p, -0.5+13-p), col=array.colors[p,1+l]) }} + +par(mar=c(0,0,4,0), fig=c(0.62, 0.87, 0.8, 1), new=TRUE) +mtext("FairCRPSS", cex=1.5) + +par(mar=c(1,4,1,0), fig=c(0.62, 0.87, 0.1, 0.9), new=TRUE) +plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,3.2), ann=F) +mtext(side = 1, text = "Leadtime", line = 3.5, cex=1.2) +mtext(side = 2, text = "Startdate", line = 3, cex=1.2) +axis(1, at=seq(0,3), las=3, cex.axis=1, labels=c("5-11","12-18","19-25","26-32"), mgp=c(3,0.7,0)) +axis(2, at=seq(1,12), las=2, labels=rev(my.month.short), cex.axis=1) + +array.colors <- array(my.cols[8],c(12,4)) +array.colors[array.FairCrpss < my.seq[8]] <- my.cols[7] +array.colors[array.FairCrpss < my.seq[7]] <- my.cols[6] +array.colors[array.FairCrpss < my.seq[6]] <- my.cols[5] +array.colors[array.FairCrpss < my.seq[5]] <- my.cols[4] +array.colors[array.FairCrpss < my.seq[4]] <- my.cols[3] +array.colors[array.FairCrpss < my.seq[3]] <- my.cols[2] +array.colors[array.FairCrpss < my.seq[2]] <- my.cols[1] + +for(p in 1:12){ for(l in 0:3){ polygon(c(0.5+l-1, 0.5+l-1, 1.5+l-1, 1.5+l-1), c(-0.5+13-p, 0.5+13-p, 0.5+13-p, -0.5+13-p), col=array.colors[p,1+l]) }} + +par(fig=c(0.89, 1, 0.1, 0.9), new=TRUE) +#ColorBar2(brks=my.brks, cols=my.cols, vert=FALSE, cex=legend1.cex, label.dist=2, my.ticks=-0.5 + 1:length(my.brks), my.labels=my.brks) +ColorBar2(brks = my.seq, cols = my.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) +gc() + +dev.off() + +# Not checked but whould work: +#system(paste0("~/scripts/fig2catalog.sh -r 50 -c 'Region: ", area.name, "\nReference dataset: ERA-Interim' ", my.file," ", my.file2)) + + + +# Monthly reliability diagrams, all over the same graph # +# ONLY FOR SEA+LAND!!!! + +my.RelDiagr<-list() + +my.file <- paste0(work.dir,'/',var.name,'/Summary_RelDiagr_', area.name,'.png') +my.file2 <- paste0(work.dir,'/',var.name,'/formatted/','/Summary_RelDiagr_', area.name,'.png') + +#n.lonr <- n.lon - ceiling(length(lons[lons>0 & lons < 180])) # count the number of longitude values between 180 and 360 degrees longitud + +png(file=my.file, width=1000, height=700) +plot(0, type = "n", axes = FALSE, ann = FALSE) +mtext(side = 3, text = "ECMWF-MFS / Reliability Diagram\nJanuary to December / 1994-2013", line = -1, cex=2.4, font=2) +par(mar=c(0,0,0,0) , fig=c(0, 1, 0, 0.4), new=TRUE) +legend(0.8,-0.8,0,0,legend=my.month[c(1,4,7,10)], lty=c(1,1), lwd=c(2.5,2.5), col=col.month[c(1,4,7,10)], ncol=4) + +par(mar=c(4.8,3.8,3.8,3.8) , fig=c(0, 1, 0, 0.4), new=TRUE) + +for(tercile in c(1,3)){ + print(paste0("Tercile: ",tercile)) + + my.lead <- (1:n.leadtimes) + (tercile-1) * 4 + + for(lead2 in my.lead){ + print(paste0("Lead time: ",lead2)) + + xmin <- 0.25 * ((lead2-1) %% 4) + xmax <- xmin + 0.25 + ymin <- 0.10 + 0.38 * (tercile-1)/2 + ymax <- ymin + 0.38 + + #my.title <- paste0('Reliability Diagram of ',cfs.name,'\n ',var.name.map,' for ',my.month[month],'. Forecast time ',leadtime.week[lead],'.') + #my.title <- paste0("ECMWF-S4 / ", var.name.map, " / Reliability Diagram \nJanuary to December / 1994-2013") + my.title <- paste0("Lead time: ", leadtime.week[1+((lead2-1) %% 4)], " days") + + par(fig=c(xmin, xmax, ymin, ymax), new=TRUE) + mod.subtitle <- ifelse(tercile == 1, "lower","upper") + plot(c(0,1),c(0,1),type="l",xlab=paste0("Forecast frequency ", mod.subtitle," tercile"),ylab="Observed frequency", col='gray30', main=my.title, cex.main=1.4) + + + for(month in c(1,4,7,10)){ #my.months){ + #month=1 # for the debug + #print(paste0("Month=",month)) + + # Load data: + load(file=paste0(work.dir,'/',var.name,'_',my.month[month],'.RData')) + + cat(paste0('Computing the Reliability Diagram for month ', month,'. Please wait... \n')) + + lead <- 1 + (lead2-1) %% 4 + + if(tercile == 1) my.RelDiagr[[lead]] <- ReliabilityDiagram(ens.chunk.prob[[lead]][int1,area.lat,area.lon], obs.chunk.prob[[lead]][int1,area.lat,area.lon], bins=5, nboot=0, plot=FALSE, plot.refin=F) #tercile 1 + + #my.RelDiagr[[n.leadtimes+lead]] <- ReliabilityDiagram(ens.chunk.prob[[lead]][int2,area.lat,area.lon], obs.chunk.prob[[lead]][int2,area.lat,area.lon], bins=5, nboot=0, plot=FALSE, plot.refin=F) + + if(tercile == 3) my.RelDiagr[[2*n.leadtimes+lead]] <- ReliabilityDiagram(ens.chunk.prob[[lead]][int3,area.lat,area.lon], obs.chunk.prob[[lead]][int3,area.lat,area.lon], bins=5, nboot=0, plot=FALSE, plot.refin=F) + + #col.month <- c('#a6cee3','#1f78b4','#b2df8a','#33a02c','#fb9a99','#e31a1c','#fdbf6f','#ff7f00','#cab2d6','#6a3d9a','#ffff99') + #col.month <- c('#8dd3c7','#8dd3c7','#8dd3c7','#ffffb3','#ffffb3','#ffffb3','#bebada','#bebada','#bebada','#fb8072','#fb8072','#fb8072') + col.month <- c('#e41a1c','#e41a1c','#e41a1c','#377eb8','#377eb8','#377eb8','#4daf4a','#4daf4a','#4daf4a','#984ea3','#984ea3','#984ea3') + + if(tercile == 1) lines(my.RelDiagr[[lead]]$p.avgs[!is.na(my.RelDiagr[[lead]]$p.avgs)],my.RelDiagr[[lead]]$cond.prob[!is.na(my.RelDiagr[[lead]]$cond.prob)],type="o", pch=16, col=col.month[month]) + + if(tercile == 3) lines(my.RelDiagr[[2*n.leadtimes+lead]]$p.avgs[!is.na(my.RelDiagr[[2*n.leadtimes+lead]]$p.avgs)], my.RelDiagr[[2*n.leadtimes+lead]]$cond.prob[!is.na(my.RelDiagr[[2*n.leadtimes+lead]]$cond.prob)],type="o", pch=16, col=col.month[month]) + + rm(obs.chunk.prob, ens.chunk.prob) + gc() + } # next month + + } # next lead +} # next tercile + + +dev.off() + + + + + +system(paste0("~/scripts/fig2catalog.sh -c 'Region: Reference dataset: ERA-Interim \nLead time: ", leadtime.week[lead], " days' ", my.file," ", my.file2)) + + + + + diff --git a/old/SkillScores_map_v4.R~ b/old/SkillScores_map_v4.R~ new file mode 100644 index 0000000000000000000000000000000000000000..79fdf22dbede2104ad1bccfce87e96ef5b879097 --- /dev/null +++ b/old/SkillScores_map_v4.R~ @@ -0,0 +1,448 @@ +########################################################################################### +# Visualize and save the score maps for one score and period # +########################################################################################### +# run it only after computing and saving all the skill score data: + +rm(list=ls()) + +########################################################################################## +# Load functions and libraries # +########################################################################################## + +library(SpecsVerification) # for skill scores +library(s2dverification) # for Load() function +library(abind) # for merging arrays +source('/home/Earth/ncortesi/scripts/Rfunctions.R') +source('/home/Earth/ncortesi/Downloads/ESS/general/hatching.R') +########################################################################################## +# User's settings # +########################################################################################## +#if(!mare) {source('/home/Earth/ncortesi/Downloads/scripts/Rfunctions.R')} else {source('/gpfs/projects/bsc32/bsc32842/scripts/Rfunctions.R')} + +# working dir where to put the output maps and files in the /Data and /Maps subdirs: +work.dir <- "/scratch/Earth/ncortesi/RESILIENCE/Subestacional" + +var.name <- 'sfcWind' # forecast variable. It is the name used inside the netCDF files and as suffix in map filenames, i.e: 'sfcWind' or 'psl' + +my.pvalue <- 0.05 # in case of computing the EnsCorr, set the p_value level to show in the ensemble mean correlation maps + +# coordinates of a chosen point in the world to plot the time series over the 52 maps (they must be the same values in objects la y lo) +#my.lat <- 55.3197120 +#my.lon <- 0.5625 + +my.months=1:12 #9:12 # choose the month(s) to plot and save + +# choose one or more skills scores to visualize between ACC (EnsCorr), FairRPSS, FairCRPSS and/or RelDiagr: +my.score.name <- c('FairRpss','FairCrpss','EnsCorr','RelDiagr') + +col <- c("#0C046E", "#1914BE", "#2341F7", "#2F55FB", "#3E64FF", "#528CFF", "#64AAFF", "#82C8FF", "#A0DCFF", "#B4F0FF", "#FFFBAF", "#FFDD9A", "#FFBF87", "#FFA173", "#FF7055", "#FE6346", "#F7403B", "#E92D36", "#C80F1E", "#A50519") + +# Define regions for averaging skill scores: (in the format: lon.min /lon.max / lat.min /lat.max. lon.min MUST be a NEGATIVE number, long values MUST be from 0 and 360, and +# lat values MUST be from 90 to -90 degrees) +area0.name <- "World" +area0 <- c(-180,180,-90,90) +area1.name <- "Northern_Europe" +area1 <- c(-15,45,45,75) +area2.name <- "Southern_Europe" +area2 <- c(-15,45,35,45) +area3.name <- "Southwestern_Europe" +area3 <- c(-15,20,35,45) +area4.name <- "Southeastern_Europe" +area4 <- c(20,45,35,45) +area5.name <- "Europe" +area5 <- c(-15,45,35,75) +area6.name <- "North_America" +area6 <- c(-130,-60,30,50) +area7.name <- "North_Sea" +area7 <- c(-4, 15, 50, 65) +area8.name <- "Iberian_Peninsula" +area8 <- c(-10, 4, 36, 44) +area9.name <- "Canadian region" +area9 <- c(-114, -111.8, 49.6, 51.7) + +########################################################################################## +# visualize worldwide monthly maps of skill scores # +########################################################################################## + +for(month in my.months){ + #month=1 # for the debug + print(paste0("Month=",month)) + + # Load data: + load(file=paste0(work.dir,'/',var.name,'_',my.month[month],'.RData')) + + n.lonr <- n.lon - ceiling(length(lons[lons>0 & lons < 180])) # count the number of longitude values between 180 and 360 degrees longitud + + for(my.score.name.map in my.score.name){ + #my.score.name.map='Acc' # for the debug + + print(paste0("Score=",my.score.name.map)) + + if(my.score.name.map=='FairRpss') my.score <- my.FairRpss.chunk + if(my.score.name.map=='FairCrpss') my.score <- my.FairCrpss.chunk + if(my.score.name.map=="EnsCorr") my.score <- my.EnsCorr.chunk + #if(my.score.name.map=="RelDiagr") my.score <- my.RelDiagr + + # old green-red palette: + # brk.rpss<-c(-1,seq(0,1,by=0.05)) + # col.rpss<-c("darkred",colorRampPalette(c("red","white","darkgreen"))(length(brk.rpss)-2)) + # brk.rps<-c(seq(0,1,by=0.1),10) + # col.rps<-c(colorRampPalette(c("darkgreen","white","red"))(length(brk.rps)-2),"darkred") + # if(my.score.name.map==my.Rps | my.score==my.Rps.clim) {my.brk<-brk.rps} else {my.brk<-brk.rpss} + # if(my.score.name.map==my.Rps | my.score==my.Rps.clim) {my.col<-col.rps} else {my.col<-col.rpss} + + brk.rpss <- c(seq(-1,1,by=0.1)) + col.rpss <- colorRampPalette(col)(length(brk.rpss)-1) + + my.brk<-brk.rpss # at present all breaks and colors are the same, so there is no need to differenciate between indexes + my.col<-col.rpss + + my.RelDiagr<-list() + + for(lead in 1:n.leadtimes){ + print(paste0("Leadtime=",lead)) + + #my.title<-paste0(my.score.name.map,' of ',cfs.name,' + #10m Wind Speed for ',startdate.name.map,'. Forecast time ',leadtime.week[lead],'.') + + if(my.score.name.map != "RelDiagr"){ + + #postscript(file=paste0(workdir,'/Maps_',var.name,'/',my.score.name.map,'_',startdate.name.map,'_Forecast_time_',leadtime.week[lead],'_',var.name,'.ps'), + # paper="special",width=12,height=7,horizontal=F) + + #layout(matrix(c(1,2), 1, 2, byrow = TRUE),widths=c(11,1.5)) + #par(oma=c(0,0,4,0),mar=c(0,0,0,0)) + + #n.lonr <- n.lon - ceiling(length(lons[lons<180 & lons > 0])) + #hatching(latsr,c(lons[c((nlonr+1):nlon)]-360,lons[1:nlonr]), my.Pvalue.rev, dens = 12, ang = 45, col_line = '#252525', lwd_size = 0.5, crosshatching = FALSE) + #hatching(lats,c(lons[c((n.lonr+1):n.lon)]-360,lons[1:n.lonr]), my.Pvalue.rev, dens = 12, ang = 45, col_line = '#252525', lwd_size = 0.5, crosshatching = FALSE) + #n.lonr <- n.lon - ceiling(length(lons[lons<180 & lons > 0])) + #PlotEquiMap(my.score[lead,,][n.lat:1,c((n.lonr+1):n.lon,1:n.lonr)], lons,lats ,title_scale=0.8, brks=my.brk, cols=my.col ,axelab=F, filled.continents=FALSE, drawleg=F) + # lat values must be in decreasing order from +90 to -90 degrees and long from 0 to 360 degrees: + + my.file <- paste0(work.dir,'/',var.name,'/',var.name,'_',my.score.name.map,'_',my.month[month],'_leadtime_',lead,'.png') + my.file2 <- paste0(work.dir,'/',var.name,'/formatted/',var.name,'_',my.score.name.map,'_',my.month[month],'_leadtime_',lead,'.png') + png(file=my.file,width=1000,height=600) + + # Map: + par(fig=c(0,0.92,0,0.89), new=TRUE) + + PlotEquiMap(my.score[lead,,c((n.lonr+1):n.lon,1:n.lonr)], c(lons[c((n.lonr+1):n.lon)]-360, lons[1:n.lonr]), lats ,title_scale=0.8, brks=my.brk, cols=my.col ,axelab=F, filled.continents=FALSE, drawleg=F) + + if(my.score.name.map=="EnsCorr") my.PValue.rev <- my.EnsCorr.pvalue < 0.05 + if(my.score.name.map=="FairRpss") my.PValue.rev <- my.FairRpss.pvalue < 0.05 + if(my.score.name.map=="FairCrpss") my.PValue.rev <- my.FairCrpss.pvalue < 0.05 + + # Draw the significance diagonal lines: + pv <- aperm(my.PValue.rev, c(1,3,2)) + + hatching(lats, c(lons[c((n.lonr+1):n.lon)]-360, lons[1:n.lonr]), pv[lead,,], dens = 12, ang = 45, col_line = '#252525', lwd_size = 0.5, crosshatching = FALSE) + + # Map title: + par(fig=c(0,1,0.81,0.91), new=TRUE) + #my.title <- paste0(my.score.name.map,' of ',cfs.name,'\n ', var.name.map,' for ',my.month[month],'. Forecast time ',leadtime.week[lead],'.') + my.title <- paste0("ECMWF-S4 / ", var.name.map, " / ", my.score.name.map, "\n", my.month[month], " / 1994-2013") + mtext(my.title, cex=2.4) + + # Color legend: + par(fig=c(0.92,1,0,0.9), new=TRUE) + ColorBar2(my.brk, cols=my.col, vert=T, my.ticks=-0.5 + 1:length(my.brk), my.labels=my.brk) + + ### arrange lat/ lon in my.PValue (a second variable in the EnsCorr file) to start from +90 degr. (lat) and from 0 degr (lon): + ### my.PValue.rev[lead,,] <- my.PValue[i,n.lat:1,c((n.lonr+1):n.lon,1:n.lonr)] + + dev.off() + + system(paste0("~/scripts/fig2catalog.sh -s 0.8 -c 'Start date: January to December\nLead time: ", leadtime.week[lead], " days\nReference dataset: ERA-Interim\nHatched area: significant from a bootstrapping test (p_value=0.05)' ", my.file," ", my.file2)) + + } # close if on !RelDiagr + + if(my.score.name.map == "RelDiagr") { + + my.file <- paste0(work.dir,'/',var.name,'/',var.name,'_RelDiagr_',my.month[month],'_leadtime_',leadtime.week[lead],'_',var.name,'.png') + my.file2 <- paste0(work.dir,'/',var.name,'/formatted/','/RelDiagr_',my.month[month],'_leadtime_',leadtime.week[lead],'_',var.name,'.png') + + png(file=my.file,width=600,height=600) + + #my.title <- paste0('Reliability Diagram of ',cfs.name,'\n ',var.name.map,' for ',my.month[month],'. Forecast time ',leadtime.week[lead],'.') + my.title <- paste0("ECMWF-S4 / ", var.name.map, " / Reliability Diagram \n", my.month[month], " / 1994-2013") + + # Note that bins=5 should correspond to the number of hindcast members + 1 + my.RelDiagr[[lead]] <- ReliabilityDiagram(ens.chunk.prob[[lead]][int1,,], obs.chunk.prob[[lead]][int1,,], bins=5, nboot=0, plot=FALSE, plot.refin=F) #tercile 1 + + #my.RelDiagr[[n.leadtimes+lead]] <- ReliabilityDiagram(ens.chunk.prob[[lead]][int2,,], obs.chunk.prob[[lead]][int2,,], bins=5, nboot=0, plot=FALSE, plot.refin=F) + my.RelDiagr[[2*n.leadtimes+lead]] <- ReliabilityDiagram(ens.chunk.prob[[lead]][int3,,], obs.chunk.prob[[lead]][int3,,], bins=5, nboot=0, plot=FALSE, plot.refin=F) + + plot(c(0,1),c(0,1),type="l",xlab="Forecast Frequency",ylab="Observed Frequency", col='gray30', main=my.title, cex.main=1.6) + + lines(my.RelDiagr[[lead]]$p.avgs[!is.na(my.RelDiagr[[lead]]$p.avgs)],my.RelDiagr[[lead]]$cond.prob[!is.na(my.RelDiagr[[lead]]$cond.prob)],type="o", pch=16, col="blue") + + #lines(my.score[[n.leadtimes+lead]]$p.avgs[!is.na(my.score[[n.leadtimes+lead]]$p.avgs)],my.score[[n.leadtimes+lead]]$cond.prob[!is.na(my.score[[n.leadtimes+lead]]$cond.prob)],type="o",pch=16,col="darkgreen") + + lines(my.RelDiagr[[2*n.leadtimes+lead]]$p.avgs[!is.na(my.RelDiagr[[2*n.leadtimes+lead]]$p.avgs)], my.RelDiagr[[2*n.leadtimes+lead]]$cond.prob[!is.na(my.RelDiagr[[2*n.leadtimes+lead]]$cond.prob)],type="o", pch=16, col="red") + + legend("bottomright", legend=c('Lower Tercile','Upper Tercile'), lty=c(1,1), lwd=c(2.5,2.5), col=c('blue','red')) + + dev.off() + + system(paste0("~/scripts/fig2catalog.sh -c 'Reference dataset: ERA-Interim \nLead time: ", leadtime.week[lead], " days' ", my.file," ", my.file2)) + } + + } # next lead + + } # next score + +} # next month + + +############################################################################# +# Visualize summary tables for a region # +############################################################################# + +# Choose one region between those defined above: +area <- area9 +area.name <- area9.name +land.only <- FALSE # set it to true if you want to average only over land, FALSE otherwise + +# If you use a mask, load it to average only over land and check that it has the same lat and lon positions of the score maps: +if(land.only == TRUE){ + load(paste0('/shared/earth/EarthSystemServices/TOOLS/Topography_bathymetry_and_masks/Seamask.', n.lon, '.', n.lat,'.50m','.RData')) + mask <- Seamask.512.256.50m + #Coordinates repositioning and mask conditioning + mask <- mask[c((n.lonr+1):n.lon,1:n.lonr),n.lat:1] + mask <- ifelse(mask == 1, 0, 1) # assign 1 to land and 0 to sea to plot the mask + mask <- t(mask) + myImagePlot(mask) + myImagePlot(my.FairRpss.chunk[1,,]) + mask <- ifelse(mask == 0, NA, 1) # assign 1 to land and NA to sea to multiply it for the score values later +} + + +### plot a box with the chosen area (do it once): +png(file=paste0(work.dir,'/',var.name,'/Box_',area.name,'.png'),width=1400,height=800) +PlotEquiMap(my.FairRpss.chunk[1,,c((n.lonr+1):n.lon,1:n.lonr)], c(lons[c((n.lonr+1):n.lon)]-360, lons[1:n.lonr]), lats, brks=c(-1000,1000), cols="lightblue" , axelab=F, filled.continents=TRUE, drawleg=F, boxlim=c(area[1],area[3],area[2],area[4]), boxcol="black", boxlwd=.7) +dev.off() + +# position of max and min long and lat values the chosen region: +lon.min <- 360 + area[1] +lon.max <- area[2] +lat.min <- area[3] +lat.max <- area[4] + +########################################################################## + +array.EnsCorr <- array.FairRpss <- array.FairCrpss <- array(NA,c(12,4)) + +# Load on file, just to take the lat and lon values: +load(file=paste0(work.dir,'/',var.name,'_',my.month[1],'.RData')) + +if(lon.min < 360) area.lon <- c(1:(which(lons >= lon.max)[1]), (which(lons >= lon.min)[1]:length(lons))) # restrict area to chosen one +if(lon.min > 360) area.lon <- (which(lons >= lon.min-360)[1]):(which(lons >= lon.max)[1]-1) + +if(area.name != "World") area.lat <- c(which(lats <= lat.max)[1]:(which(lats <= lat.min)[1]-1)) # restrict area to chosen one +if(area.name == "World"){ area.lat <- 1:length(lats); area.lon <- 1:length(lons) } + +# Load all data and average it over the chosen region: + +for(month in 1:12){ + + load(file=paste0(work.dir,'/',var.name,'_',my.month[month],'.RData')) + + if(land.only == TRUE){ + # remove sea values: + for(i in 1:4) my.EnsCorr.chunk[i,,] <- my.EnsCorr.chunk[i,,]*mask + for(i in 1:4) my.FairRpss.chunk[i,,] <- my.FairRpss.chunk[i,,]*mask + for(i in 1:4) my.FairCrpss.chunk[i,,] <- my.FairCrpss.chunk[i,,]*mask + } + + for(l in 0:3){ + #load(file=paste0(work.dir,"/",fields.name,"_",my.period[p],"_leadmonth_",l,"_","ClusterNames",".RData")) # load cluster names and summarized data + array.EnsCorr[month, 1+l] <- mean(my.EnsCorr.chunk[1+l, area.lat, area.lon], na.rm=T) + array.FairRpss[month, 1+l] <- mean(my.FairRpss.chunk[1+l, area.lat, area.lon], na.rm=T) + array.FairCrpss[month, 1+l] <- mean(my.FairCrpss.chunk[1+l, area.lat, area.lon], na.rm=T) + } +} + + +if(land.only == TRUE) { mod.filename <- "_land" } else { mod.filename <- "" } + +my.file <- paste0(work.dir,'/',var.name,'/Summary_',var.name,'_', area.name, mod.filename,'.png') +my.file2 <- paste0(work.dir,'/',var.name,'/formatted/Summary_',var.name,'_', area.name, mod.filename,'.png') +png(file=my.file,width=700,height=500) + +#my.cols <- rev(c('#b2182b','#d6604d','#f4a582','#fddbc7','#d1e5f0','#92c5de','#4393c3','#2166ac')) +#my.cols <- c('#0570b0','#bdc9e1', '#fff7ec', '#fee8c8','#fdd49e','#fdbb84','#fc8d59','#ef6548','#d7301f','#990000') +#my.cols <- c('#0570b0','#bdc9e1','#fef0d9','#fdd49e','#fdbb84','#fc8d59','#ef6548','#d7301f','#990000') +#my.cols <- c('#74a9cf','#bdc9e1','#fef0d9','#fdd49e','#fdbb84','#fc8d59','#e34a33','#b30000') +my.cols <- c('#bdc9e1','#fef0d9','#fdd49e','#fdbb84','#fc8d59','#e34a33','#b30000', '#7f0000') + +#my.seq <- c(-1,-0.6,-0.4,-0.2,0,0.2,0.4,0.6,1) +#my.seq <- c(-0.2,-0.1,0,0.1,0.2,0.3,0.4,0.5,0.6) +my.seq <- c(-0.1,0,0.1,0.2,0.3,0.4,0.5,0.6,0.7) + +# create an array similar to array.pers but with colors instead of frequencies: +plot.new() + +par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) +mtext("Subseasonal sfcWind 1994-2013", cex=1.8) + +par(mar=c(0,0,4,0), fig=c(0.07, 0.27, 0.80, 1), new=TRUE) +mtext("ACC", cex=1.5) + +par(mar=c(1,4,1,0), fig=c(0.02, 0.27, 0.1, 0.9), new=TRUE) +plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,3.2), ann=F) +mtext(side = 1, text = "Leadtime", line = 3.5, cex=1.2) +mtext(side = 2, text = "Startdate", line = 3, cex=1.2) +axis(1, at=seq(0,3), las=3, cex.axis=1, labels=c("5-11","12-18","19-25","26-32"), mgp=c(3,0.7,0)) +axis(2, at=seq(1,12), las=2, labels=rev(my.month.short), cex.axis=1) + +array.colors <- array(my.cols[8],c(12,4)) +array.colors[array.EnsCorr < my.seq[8]] <- my.cols[7] +array.colors[array.EnsCorr < my.seq[7]] <- my.cols[6] +array.colors[array.EnsCorr < my.seq[6]] <- my.cols[5] +array.colors[array.EnsCorr < my.seq[5]] <- my.cols[4] +array.colors[array.EnsCorr < my.seq[4]] <- my.cols[3] +array.colors[array.EnsCorr < my.seq[3]] <- my.cols[2] +array.colors[array.EnsCorr < my.seq[2]] <- my.cols[1] + +for(p in 1:12){ for(l in 0:3){ polygon(c(0.5+l-1, 0.5+l-1, 1.5+l-1, 1.5+l-1), c(-0.5+13-p, 0.5+13-p, 0.5+13-p, -0.5+13-p), col=array.colors[p,1+l]) }} + +par(mar=c(0,0,4,0), fig=c(0.32, 0.57, 0.8, 1), new=TRUE) +mtext("FairRPSS", cex=1.5) + +par(mar=c(1,4,1,0), fig=c(0.32, 0.57, 0.1, 0.9), new=TRUE) +plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,3.2), ann=F) +mtext(side = 1, text = "Leadtime", line = 3.5, cex=1.2) +mtext(side = 2, text = "Startdate", line = 3, cex=1.2) +axis(1, at=seq(0,3), las=3, cex.axis=1, labels=c("5-11","12-18","19-25","26-32"), mgp=c(3,0.7,0)) +axis(2, at=seq(1,12), las=2, labels=rev(my.month.short), cex.axis=1) + +array.colors <- array(my.cols[8],c(12,4)) +array.colors[array.FairRpss < my.seq[8]] <- my.cols[7] +array.colors[array.FairRpss < my.seq[7]] <- my.cols[6] +array.colors[array.FairRpss < my.seq[6]] <- my.cols[5] +array.colors[array.FairRpss < my.seq[5]] <- my.cols[4] +array.colors[array.FairRpss < my.seq[4]] <- my.cols[3] +array.colors[array.FairRpss < my.seq[3]] <- my.cols[2] +array.colors[array.FairRpss < my.seq[2]] <- my.cols[1] + +for(p in 1:12){ for(l in 0:3){ polygon(c(0.5+l-1, 0.5+l-1, 1.5+l-1, 1.5+l-1), c(-0.5+13-p, 0.5+13-p, 0.5+13-p, -0.5+13-p), col=array.colors[p,1+l]) }} + +par(mar=c(0,0,4,0), fig=c(0.62, 0.87, 0.8, 1), new=TRUE) +mtext("FairCRPSS", cex=1.5) + +par(mar=c(1,4,1,0), fig=c(0.62, 0.87, 0.1, 0.9), new=TRUE) +plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,3.2), ann=F) +mtext(side = 1, text = "Leadtime", line = 3.5, cex=1.2) +mtext(side = 2, text = "Startdate", line = 3, cex=1.2) +axis(1, at=seq(0,3), las=3, cex.axis=1, labels=c("5-11","12-18","19-25","26-32"), mgp=c(3,0.7,0)) +axis(2, at=seq(1,12), las=2, labels=rev(my.month.short), cex.axis=1) + +array.colors <- array(my.cols[8],c(12,4)) +array.colors[array.FairCrpss < my.seq[8]] <- my.cols[7] +array.colors[array.FairCrpss < my.seq[7]] <- my.cols[6] +array.colors[array.FairCrpss < my.seq[6]] <- my.cols[5] +array.colors[array.FairCrpss < my.seq[5]] <- my.cols[4] +array.colors[array.FairCrpss < my.seq[4]] <- my.cols[3] +array.colors[array.FairCrpss < my.seq[3]] <- my.cols[2] +array.colors[array.FairCrpss < my.seq[2]] <- my.cols[1] + +for(p in 1:12){ for(l in 0:3){ polygon(c(0.5+l-1, 0.5+l-1, 1.5+l-1, 1.5+l-1), c(-0.5+13-p, 0.5+13-p, 0.5+13-p, -0.5+13-p), col=array.colors[p,1+l]) }} + +par(fig=c(0.89, 1, 0.1, 0.9), new=TRUE) +#ColorBar2(brks=my.brks, cols=my.cols, vert=FALSE, cex=legend1.cex, label.dist=2, my.ticks=-0.5 + 1:length(my.brks), my.labels=my.brks) +ColorBar2(brks = my.seq, cols = my.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) +gc() + +dev.off() + +# Not checked but whould work: +#system(paste0("~/scripts/fig2catalog.sh -r 50 -c 'Region: ", area.name, "\nReference dataset: ERA-Interim' ", my.file," ", my.file2)) + + + +# Monthly reliability diagrams, all over the same graph # +# ONLY FOR SEA+LAND!!!! + +my.RelDiagr<-list() + +my.file <- paste0(work.dir,'/',var.name,'/Summary_RelDiagr_', area.name,'.png') +my.file2 <- paste0(work.dir,'/',var.name,'/formatted/','/Summary_RelDiagr_', area.name,'.png') + +#n.lonr <- n.lon - ceiling(length(lons[lons>0 & lons < 180])) # count the number of longitude values between 180 and 360 degrees longitud + +png(file=my.file, width=1000, height=700) +plot(0, type = "n", axes = FALSE, ann = FALSE) +mtext(side = 3, text = "ECMWF-MFS / Reliability Diagram\nJanuary to December / 1994-2013", line = -1, cex=2.4, font=2) +par(mar=c(0,0,0,0) , fig=c(0, 1, 0, 0.4), new=TRUE) +legend(0.8,-0.8,0,0,legend=my.month[c(1,4,7,10)], lty=c(1,1), lwd=c(2.5,2.5), col=col.month[c(1,4,7,10)], ncol=4) + +par(mar=c(4.8,3.8,3.8,3.8) , fig=c(0, 1, 0, 0.4), new=TRUE) + +for(tercile in c(1,3)){ + print(paste0("Tercile: ",tercile)) + + my.lead <- (1:n.leadtimes) + (tercile-1) * 4 + + for(lead2 in my.lead){ + print(paste0("Lead time: ",lead2)) + + xmin <- 0.25 * ((lead2-1) %% 4) + xmax <- xmin + 0.25 + ymin <- 0.10 + 0.38 * (tercile-1)/2 + ymax <- ymin + 0.38 + + #my.title <- paste0('Reliability Diagram of ',cfs.name,'\n ',var.name.map,' for ',my.month[month],'. Forecast time ',leadtime.week[lead],'.') + #my.title <- paste0("ECMWF-S4 / ", var.name.map, " / Reliability Diagram \nJanuary to December / 1994-2013") + my.title <- paste0("Lead time: ", leadtime.week[1+((lead2-1) %% 4)], " days") + + par(fig=c(xmin, xmax, ymin, ymax), new=TRUE) + mod.subtitle <- ifelse(tercile == 1, "lower","upper") + plot(c(0,1),c(0,1),type="l",xlab=paste0("Forecast frequency ", mod.subtitle," tercile"),ylab="Observed frequency", col='gray30', main=my.title, cex.main=1.4) + + + for(month in c(1,4,7,10)){ #my.months){ + #month=1 # for the debug + #print(paste0("Month=",month)) + + # Load data: + load(file=paste0(work.dir,'/',var.name,'_',my.month[month],'.RData')) + + cat(paste0('Computing the Reliability Diagram for month ', month,'. Please wait... \n')) + + lead <- 1 + (lead2-1) %% 4 + + if(tercile == 1) my.RelDiagr[[lead]] <- ReliabilityDiagram(ens.chunk.prob[[lead]][int1,area.lat,area.lon], obs.chunk.prob[[lead]][int1,area.lat,area.lon], bins=5, nboot=0, plot=FALSE, plot.refin=F) #tercile 1 + + #my.RelDiagr[[n.leadtimes+lead]] <- ReliabilityDiagram(ens.chunk.prob[[lead]][int2,area.lat,area.lon], obs.chunk.prob[[lead]][int2,area.lat,area.lon], bins=5, nboot=0, plot=FALSE, plot.refin=F) + + if(tercile == 3) my.RelDiagr[[2*n.leadtimes+lead]] <- ReliabilityDiagram(ens.chunk.prob[[lead]][int3,area.lat,area.lon], obs.chunk.prob[[lead]][int3,area.lat,area.lon], bins=5, nboot=0, plot=FALSE, plot.refin=F) + + #col.month <- c('#a6cee3','#1f78b4','#b2df8a','#33a02c','#fb9a99','#e31a1c','#fdbf6f','#ff7f00','#cab2d6','#6a3d9a','#ffff99') + #col.month <- c('#8dd3c7','#8dd3c7','#8dd3c7','#ffffb3','#ffffb3','#ffffb3','#bebada','#bebada','#bebada','#fb8072','#fb8072','#fb8072') + col.month <- c('#e41a1c','#e41a1c','#e41a1c','#377eb8','#377eb8','#377eb8','#4daf4a','#4daf4a','#4daf4a','#984ea3','#984ea3','#984ea3') + + if(tercile == 1) lines(my.RelDiagr[[lead]]$p.avgs[!is.na(my.RelDiagr[[lead]]$p.avgs)],my.RelDiagr[[lead]]$cond.prob[!is.na(my.RelDiagr[[lead]]$cond.prob)],type="o", pch=16, col=col.month[month]) + + if(tercile == 3) lines(my.RelDiagr[[2*n.leadtimes+lead]]$p.avgs[!is.na(my.RelDiagr[[2*n.leadtimes+lead]]$p.avgs)], my.RelDiagr[[2*n.leadtimes+lead]]$cond.prob[!is.na(my.RelDiagr[[2*n.leadtimes+lead]]$cond.prob)],type="o", pch=16, col=col.month[month]) + + rm(obs.chunk.prob, ens.chunk.prob) + gc() + } # next month + + } # next lead +} # next tercile + + +dev.off() + + + + + +system(paste0("~/scripts/fig2catalog.sh -c 'Region: Reference dataset: ERA-Interim \nLead time: ", leadtime.week[lead], " days' ", my.file," ", my.file2)) + + + + + diff --git a/old/SkillScores_map_v5.R b/old/SkillScores_map_v5.R new file mode 100644 index 0000000000000000000000000000000000000000..4f18313a055966b660ffba2c7709b49a372ee190 --- /dev/null +++ b/old/SkillScores_map_v5.R @@ -0,0 +1,471 @@ +########################################################################################### +# Visualize and save the score maps for one score and period # +########################################################################################### +# run it only after computing and saving all the skill score data: + +rm(list=ls()) + +########################################################################################## +# Load functions and libraries # +########################################################################################## + +library(SpecsVerification) # for skill scores +library(s2dverification) # for Load() function +library(abind) # for merging arrays +source('/home/Earth/ncortesi/scripts/Rfunctions.R') +source('/home/Earth/ncortesi/Downloads/ESS/general/hatching.R') +########################################################################################## +# User's settings # +########################################################################################## +#if(!mare) {source('/home/Earth/ncortesi/Downloads/scripts/Rfunctions.R')} else {source('/gpfs/projects/bsc32/bsc32842/scripts/Rfunctions.R')} + +# working dir where to put the output maps and files in the /Data and /Maps subdirs: +work.dir <- "/scratch/Earth/ncortesi/RESILIENCE/Subestacional" + +var.name <- 'sfcWind' # forecast variable. It is the name used inside the netCDF files and as suffix in map filenames, i.e: 'sfcWind' or 'psl' + +my.pvalue <- 0.05 # in case of computing the EnsCorr, set the p_value level to show in the ensemble mean correlation maps + +# Choose one region between those defined below for the reliability diagrams and for the summary tables (0=World, 1=Northern europe, etc.) +area.num <- 0 +land.only <- FALSE # set it to true if you want to average only over land, FALSE otherwise [warning: the reliability diagram is computed ONLY over land+sea ] + +# coordinates of a chosen point in the world to plot the time series over the 52 maps (they must be the same values in objects la y lo) +#my.lat <- 55.3197120 +#my.lon <- 0.5625 + +my.months=1:12 #9:12 # choose the month(s) to plot and save + +# Define regions for averaging skill scores: (in the format: lon.min /lon.max / lat.min /lat.max. lon.min MUST be a NEGATIVE number, long values MUST be from 0 and 360, and +# lat values MUST be from 90 to -90 degrees) +area0.name <- "World" +area0 <- c(-180,180,-90,90) +area1.name <- "Northern_Europe" +area1 <- c(-15,45,45,75) +area2.name <- "Southern_Europe" +area2 <- c(-15,45,35,45) +area3.name <- "Southwestern_Europe" +area3 <- c(-15,20,35,45) +area4.name <- "Southeastern_Europe" +area4 <- c(20,45,35,45) +area5.name <- "Europe" +area5 <- c(-15,45,35,75) +area6.name <- "North_America" +area6 <- c(-130,-60,30,50) +area7.name <- "North_Sea" +area7 <- c(-4, 15, 50, 65) +area8.name <- "Iberian_Peninsula" +area8 <- c(-10, 4, 36, 44) +area9.name <- "Canadian region" +area9 <- c(-114, -111.8, 49.6, 51.7) + +######################################################################################### +col <- c("#0C046E", "#1914BE", "#2341F7", "#2F55FB", "#3E64FF", "#528CFF", "#64AAFF", "#82C8FF", "#A0DCFF", "#B4F0FF", "#FFFBAF", "#FFDD9A", "#FFBF87", "#FFA173", "#FF7055", "#FE6346", "#F7403B", "#E92D36", "#C80F1E", "#A50519") + +# replace old command area.name <- area0.name: +area <- eval(parse(text=paste0("area", area.num))) +area.name <- eval(parse(text=paste0("area", area.num,".name"))) + +# position of max and min long and lat values the chosen region: +lon.min <- 360 + area[1] +lon.max <- area[2] +lat.min <- area[3] +lat.max <- area[4] + +########################################################################################## +# visualize worldwide monthly maps of skill scores # +########################################################################################## +# choose one or more skills scores to visualize between ACC (EnsCorr), FairRPSS, FairCRPSS and/or RelDiagr: +my.score.name <- c('FairRpss','FairCrpss','EnsCorr') #c('FairRpss','FairCrpss','EnsCorr','RelDiagr') + +my.score.name.title <- c('FairRpss','FairCrpss','Correlation of the ensemble mean','Reliability Diagram') # names to be visualized in the graph titles + +for(month in my.months){ + #month=1 # for the debug + print(paste0("Month=",month)) + + # Load data: + load(file=paste0(work.dir,'/',var.name,'_',my.month[month],'.RData')) + + sdates.seq <- weekly.seq(forecast.year,mes,day) # sequence of dates corresponding to all the thursday of the year + my.startdates.days <- as.integer(substr(sdates.seq[which(as.integer(substr(sdates.seq,5,6)) == month)],7,8)) #startdates.monthly[[month]] #c(1:5) + stringa <- paste0(my.startdates.days,collapse=",") + + if(lon.min < 360) area.lon <- c(1:(which(lons >= lon.max)[1]), (which(lons >= lon.min)[1]:length(lons))) # restrict area to chosen one + if(lon.min > 360) area.lon <- (which(lons >= lon.min-360)[1]):(which(lons >= lon.max)[1]-1) + + if(area.name != "World") area.lat <- c(which(lats <= lat.max)[1]:(which(lats <= lat.min)[1]-1)) # restrict area to chosen one + if(area.name == "World"){ area.lat <- 1:length(lats); area.lon <- 1:length(lons) } + + n.lonr <- n.lon - ceiling(length(lons[lons>0 & lons < 180])) # count the number of longitude values between 180 and 360 degrees longitud + + for(my.score.name.map in my.score.name){ + #my.score.name.map='Acc' # for the debug + + print(paste0("Score=",my.score.name.map)) + + if(my.score.name.map=='FairRpss') my.score <- my.FairRpss.chunk + if(my.score.name.map=='FairCrpss') my.score <- my.FairCrpss.chunk + if(my.score.name.map=="EnsCorr") my.score <- my.EnsCorr.chunk + #if(my.score.name.map=="RelDiagr") my.score <- my.RelDiagr + + # old green-red palette: + # brk.rpss<-c(-1,seq(0,1,by=0.05)) + # col.rpss<-c("darkred",colorRampPalette(c("red","white","darkgreen"))(length(brk.rpss)-2)) + # brk.rps<-c(seq(0,1,by=0.1),10) + # col.rps<-c(colorRampPalette(c("darkgreen","white","red"))(length(brk.rps)-2),"darkred") + # if(my.score.name.map==my.Rps | my.score==my.Rps.clim) {my.brk<-brk.rps} else {my.brk<-brk.rpss} + # if(my.score.name.map==my.Rps | my.score==my.Rps.clim) {my.col<-col.rps} else {my.col<-col.rpss} + + brk.rpss <- c(seq(-1,1,by=0.1)) + col.rpss <- colorRampPalette(col)(length(brk.rpss)-1) + + # at present all breaks and colors are the same, so there is no need to differenciate between indexes, aside the two Fair scores whose range is [-inf,0] + my.brk <- brk.rpss + my.col <- col.rpss + my.brk.labels <- my.brk + if(my.score.name.map == 'FairRpss' | my.score.name.map=='FairCrpss') my.brk.labels <- c(expression(-infinity), my.brk[-1]) + + my.RelDiagr<-list() + + for(lead in 1:n.leadtimes){ + print(paste0("Leadtime=",lead)) + + #my.title<-paste0(my.score.name.map,' of ',cfs.name,' + #10m Wind Speed for ',startdate.name.map,'. Forecast time ',leadtime.week[lead],'.') + + if(my.score.name.map != "RelDiagr"){ + + #postscript(file=paste0(workdir,'/Maps_',var.name,'/',my.score.name.map,'_',startdate.name.map,'_Forecast_time_',leadtime.week[lead],'_',var.name,'.ps'), + # paper="special",width=12,height=7,horizontal=F) + + #layout(matrix(c(1,2), 1, 2, byrow = TRUE),widths=c(11,1.5)) + #par(oma=c(0,0,4,0),mar=c(0,0,0,0)) + + #n.lonr <- n.lon - ceiling(length(lons[lons<180 & lons > 0])) + #hatching(latsr,c(lons[c((nlonr+1):nlon)]-360,lons[1:nlonr]), my.Pvalue.rev, dens = 12, ang = 45, col_line = '#252525', lwd_size = 0.5, crosshatching = FALSE) + #hatching(lats,c(lons[c((n.lonr+1):n.lon)]-360,lons[1:n.lonr]), my.Pvalue.rev, dens = 12, ang = 45, col_line = '#252525', lwd_size = 0.5, crosshatching = FALSE) + #n.lonr <- n.lon - ceiling(length(lons[lons<180 & lons > 0])) + #PlotEquiMap(my.score[lead,,][n.lat:1,c((n.lonr+1):n.lon,1:n.lonr)], lons,lats ,title_scale=0.8, brks=my.brk, cols=my.col ,axelab=F, filled.continents=FALSE, drawleg=F) + # lat values must be in decreasing order from +90 to -90 degrees and long from 0 to 360 degrees: + + my.file <- paste0(work.dir,'/',var.name,'/',var.name,'_',my.score.name.map,'_',my.month[month],'_leadtime_',leadtime.week[lead],'.png') + my.file2 <- paste0(work.dir,'/',var.name,'/formatted/',var.name,'_',my.score.name.map,'_',my.month[month],'_leadtime_',leadtime.week[lead],'.png') + png(file=my.file,width=1000,height=600) + + # Map: + par(fig=c(0,0.92,0,0.89), new=TRUE) + + PlotEquiMap(my.score[lead,,c((n.lonr+1):n.lon,1:n.lonr)], c(lons[c((n.lonr+1):n.lon)]-360, lons[1:n.lonr]), lats ,title_scale=0.8, brks=my.brk, cols=my.col ,axelab=F, filled.continents=FALSE, drawleg=F) + + if(my.score.name.map=="EnsCorr") my.PValue.rev <- my.EnsCorr.pvalue < 0.05 + if(my.score.name.map=="FairRpss") my.PValue.rev <- my.FairRpss.pvalue < 0.05 + if(my.score.name.map=="FairCrpss") my.PValue.rev <- my.FairCrpss.pvalue < 0.05 + + # Draw the significance diagonal lines: + pv <- aperm(my.PValue.rev, c(1,3,2)) + + hatching(lats, c(lons[c((n.lonr+1):n.lon)]-360, lons[1:n.lonr]), pv[lead,,], dens = 12, ang = 45, col_line = '#252525', lwd_size = 0.5, crosshatching = FALSE) + + # Map title: + par(fig=c(0,1,0.82,0.91), new=TRUE) + #my.title <- paste0(my.score.name.map,' of ',cfs.name,'\n ', var.name.map,' for ',my.month[month],'. Forecast time ',leadtime.week[lead],'.') + my.title <- paste0("ECMWF-MPS / ", var.name.map, " / ", my.score.name.title[which(my.score.name == my.score.name.map)], "\n", my.month[month], " (",leadtime.week[lead]," days lead time)/ 1994-2013") + mtext(my.title, cex=2.4, font=2) + + # Color legend: + par(fig=c(0.92,1,0,0.9), new=TRUE) + #par(fig=c(0.52,0.82,0,0.9), new=TRUE) + + ColorBar2(my.brk, cols=my.col, vert=T, my.ticks=-0.5 + 1:length(my.brk), my.labels=my.brk.labels) + + ### arrange lat/ lon in my.PValue (a second variable in the EnsCorr file) to start from +90 degr. (lat) and from 0 degr (lon): + ### my.PValue.rev[lead,,] <- my.PValue[i,n.lat:1,c((n.lonr+1):n.lon,1:n.lonr)] + + dev.off() + + system(paste0("~/scripts/fig2catalog.sh -s 0.8 -c 'Start dates: ", stringa," of ",my.month[month],"\nLead time: ", leadtime.week[lead], " days\nReference dataset: ERA-Interim\nBias correction: none\nHatched area: significant from a bootstrapping test (p_value=0.05)' ", my.file," ", my.file2)) + + } # close if on !RelDiagr + + gc() + + if(my.score.name.map == "RelDiagr") { + + my.file <- paste0(work.dir,'/', var.name,'/', var.name,'_RelDiagr_', area.name,'_', my.month[month],'_leadtime_', leadtime.week[lead],'_', var.name,'.png') + my.file2 <- paste0(work.dir,'/', var.name,'/formatted/', var.name,'_RelDiagr_', area.name, '_', my.month[month],'_leadtime_', leadtime.week[lead],'_', var.name,'.png') + + png(file=my.file,width=600,height=600) + + #my.title <- paste0('Reliability Diagram of ',cfs.name,'\n ',var.name.map,' for ',my.month[month],'. Forecast time ',leadtime.week[lead],'.') + my.title <- paste0("ECMWF-MPS / ", var.name.map, " / Reliability Diagram \n", my.month[month], " (", leadtime.week[lead]," days lead time) / 1994-2013") + + # Note that bins=5 should correspond to the number of hindcast members + 1 + my.RelDiagr[[lead]] <- ReliabilityDiagram(ens.chunk.prob[[lead]][int1,area.lat,area.lon], obs.chunk.prob[[lead]][int1,area.lat,area.lon], bins=5, nboot=0, plot=FALSE, plot.refin=F) #tercile 1 + + + ReliabilityDiagram(ens.chunk.prob[[lead]][int1,area.lat,area.lon], obs.chunk.prob[[lead]][int1,area.lat,area.lon], bins=5, nboot=50, plot=TRUE, plot.refin=TRUE) + + ReliabilityDiagram(ens.chunk.prob[[lead]][int1,10,20:21], obs.chunk.prob[[lead]][int1,10,20:21], bins=5, nboot=50, plot=TRUE, plot.refin=TRUE) + + #my.RelDiagr[[n.leadtimes+lead]] <- ReliabilityDiagram(ens.chunk.prob[[lead]][int2,,], obs.chunk.prob[[lead]][int2,,], bins=5, nboot=0, plot=FALSE, plot.refin=F) + my.RelDiagr[[2*n.leadtimes+lead]] <- ReliabilityDiagram(ens.chunk.prob[[lead]][int3,area.lat,area.lon], obs.chunk.prob[[lead]][int3,area.lat,area.lon], bins=5, nboot=0, plot=FALSE, plot.refin=F) + + plot(c(0,1),c(0,1),type="l",xlab="Forecast Frequency",ylab="Observed Frequency", col='gray30', main=my.title, cex.main=1.6) + + lines(my.RelDiagr[[lead]]$p.avgs[!is.na(my.RelDiagr[[lead]]$p.avgs)],my.RelDiagr[[lead]]$cond.prob[!is.na(my.RelDiagr[[lead]]$cond.prob)],type="o", pch=16, col="blue") + + #lines(my.score[[n.leadtimes+lead]]$p.avgs[!is.na(my.score[[n.leadtimes+lead]]$p.avgs)],my.score[[n.leadtimes+lead]]$cond.prob[!is.na(my.score[[n.leadtimes+lead]]$cond.prob)],type="o",pch=16,col="darkgreen") + + lines(my.RelDiagr[[2*n.leadtimes+lead]]$p.avgs[!is.na(my.RelDiagr[[2*n.leadtimes+lead]]$p.avgs)], my.RelDiagr[[2*n.leadtimes+lead]]$cond.prob[!is.na(my.RelDiagr[[2*n.leadtimes+lead]]$cond.prob)],type="o", pch=16, col="red") + + legend("bottomright", legend=c('Lower Tercile','Upper Tercile'), lty=c(1,1), lwd=c(2.5,2.5), col=c('blue','red')) + + dev.off() + + system(paste0("~/scripts/fig2catalog.sh -c 'Start dates: ", stringa," of ",my.month[month],"\nLead time: ", leadtime.week[lead], " days\nRegion: ", area.name,"\nReference dataset: ERA-Interim \nBias correction: none' ", my.file," ", my.file2)) + } + + gc() + } # next lead + + gc() + } # next score + + gc() +} # next month + + +############################################################################# +# Visualize summary tables for a region # +############################################################################# + +# If you use a mask, load it to average only over land and check that it has the same lat and lon positions of the score maps: +if(land.only == TRUE){ + load(paste0('/shared/earth/EarthSystemServices/TOOLS/Topography_bathymetry_and_masks/Seamask.', n.lon, '.', n.lat,'.50m','.RData')) + mask <- Seamask.512.256.50m + #Coordinates repositioning and mask conditioning + mask <- mask[c((n.lonr+1):n.lon,1:n.lonr),n.lat:1] + mask <- ifelse(mask == 1, 0, 1) # assign 1 to land and 0 to sea to plot the mask + mask <- t(mask) + myImagePlot(mask) + myImagePlot(my.FairRpss.chunk[1,,]) + mask <- ifelse(mask == 0, NA, 1) # assign 1 to land and NA to sea to multiply it for the score values later +} + + +# plot a box with the chosen area (do it only once): +# +#png(file=paste0(work.dir,'/',var.name,'/Box_',area.name,'.png'),width=1400,height=800) +#PlotEquiMap(my.FairRpss.chunk[1,,c((n.lonr+1):n.lon,1:n.lonr)], c(lons[c((n.lonr+1):n.lon)]-360, lons[1:n.lonr]), lats, brks=c(-1000,1000), cols="lightblue" , axelab=F, filled.continents=TRUE, drawleg=F, boxlim=c(area[1],area[3],area[2],area[4]), boxcol="black", boxlwd=.7) +#dev.off() + +array.EnsCorr <- array.FairRpss <- array.FairCrpss <- array(NA,c(12,4)) + +# Load one file, just to take the lat and lon values: +load(file=paste0(work.dir,'/',var.name,'_',my.month[1],'.RData')) + +if(lon.min < 360) area.lon <- c(1:(which(lons >= lon.max)[1]), (which(lons >= lon.min)[1]:length(lons))) # restrict area to chosen one +if(lon.min > 360) area.lon <- (which(lons >= lon.min-360)[1]):(which(lons >= lon.max)[1]-1) + +if(area.name != "World") area.lat <- c(which(lats <= lat.max)[1]:(which(lats <= lat.min)[1]-1)) # restrict area to chosen one +if(area.name == "World"){ area.lat <- 1:length(lats); area.lon <- 1:length(lons) } + +# Load all data and average it over the chosen region: +for(month in 1:12){ + + load(file=paste0(work.dir,'/',var.name,'_',my.month[month],'.RData')) + + if(land.only == TRUE){ + # remove sea values: + for(i in 1:4) my.EnsCorr.chunk[i,,] <- my.EnsCorr.chunk[i,,]*mask + for(i in 1:4) my.FairRpss.chunk[i,,] <- my.FairRpss.chunk[i,,]*mask + for(i in 1:4) my.FairCrpss.chunk[i,,] <- my.FairCrpss.chunk[i,,]*mask + } + + for(l in 0:3){ + #load(file=paste0(work.dir,"/",fields.name,"_",my.period[p],"_leadmonth_",l,"_","ClusterNames",".RData")) # load cluster names and summarized data + array.EnsCorr[month, 1+l] <- mean(my.EnsCorr.chunk[1+l, area.lat, area.lon], na.rm=T) + array.FairRpss[month, 1+l] <- mean(my.FairRpss.chunk[1+l, area.lat, area.lon], na.rm=T) + array.FairCrpss[month, 1+l] <- mean(my.FairCrpss.chunk[1+l, area.lat, area.lon], na.rm=T) + } +} + + +if(land.only == TRUE) { mod.filename <- "_land" } else { mod.filename <- "" } + +my.file <- paste0(work.dir,'/',var.name,'/Summary_',var.name,'_', area.name, mod.filename,'.png') +my.file2 <- paste0(work.dir,'/',var.name,'/formatted/Summary_',var.name,'_', area.name, mod.filename,'.png') + +png(file=my.file,width=700,height=500) + +#my.cols <- rev(c('#b2182b','#d6604d','#f4a582','#fddbc7','#d1e5f0','#92c5de','#4393c3','#2166ac')) +#my.cols <- c('#0570b0','#bdc9e1', '#fff7ec', '#fee8c8','#fdd49e','#fdbb84','#fc8d59','#ef6548','#d7301f','#990000') +#my.cols <- c('#0570b0','#bdc9e1','#fef0d9','#fdd49e','#fdbb84','#fc8d59','#ef6548','#d7301f','#990000') +#my.cols <- c('#74a9cf','#bdc9e1','#fef0d9','#fdd49e','#fdbb84','#fc8d59','#e34a33','#b30000') +my.cols <- c('#bdc9e1','#fef0d9','#fdd49e','#fdbb84','#fc8d59','#e34a33','#b30000', '#7f0000') + +#my.seq <- c(-1,-0.6,-0.4,-0.2,0,0.2,0.4,0.6,1) +#my.seq <- c(-0.2,-0.1,0,0.1,0.2,0.3,0.4,0.5,0.6) +my.seq <- c(-0.1,0,0.1,0.2,0.3,0.4,0.5,0.6,0.7) + +# create an array similar to array.pers but with colors instead of frequencies: +plot.new() + +par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) +mtext("Subseasonal sfcWind 1994-2013", cex=1.8) + +par(mar=c(0,0,4,0), fig=c(0.07, 0.27, 0.80, 1), new=TRUE) +mtext("ACC", cex=1.5) + +par(mar=c(1,4,1,0), fig=c(0.02, 0.27, 0.1, 0.9), new=TRUE) +plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,3.2), ann=F) +mtext(side = 1, text = "Lead time", line = 3.5, cex=1.2) +mtext(side = 2, text = "Start date", line = 3, cex=1.2) +axis(1, at=seq(0,3), las=3, cex.axis=1, labels=c("5-11","12-18","19-25","26-32"), mgp=c(3,0.7,0)) +axis(2, at=seq(1,12), las=2, labels=rev(my.month.short), cex.axis=1) + +array.colors <- array(my.cols[8],c(12,4)) +array.colors[array.EnsCorr < my.seq[8]] <- my.cols[7] +array.colors[array.EnsCorr < my.seq[7]] <- my.cols[6] +array.colors[array.EnsCorr < my.seq[6]] <- my.cols[5] +array.colors[array.EnsCorr < my.seq[5]] <- my.cols[4] +array.colors[array.EnsCorr < my.seq[4]] <- my.cols[3] +array.colors[array.EnsCorr < my.seq[3]] <- my.cols[2] +array.colors[array.EnsCorr < my.seq[2]] <- my.cols[1] + +for(p in 1:12){ for(l in 0:3){ polygon(c(0.5+l-1, 0.5+l-1, 1.5+l-1, 1.5+l-1), c(-0.5+13-p, 0.5+13-p, 0.5+13-p, -0.5+13-p), col=array.colors[p,1+l]) }} + +par(mar=c(0,0,4,0), fig=c(0.32, 0.57, 0.8, 1), new=TRUE) +mtext("FairRPSS", cex=1.5) + +par(mar=c(1,4,1,0), fig=c(0.32, 0.57, 0.1, 0.9), new=TRUE) +plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,3.2), ann=F) +mtext(side = 1, text = "Lead time", line = 3.5, cex=1.2) +mtext(side = 2, text = "Start date", line = 3, cex=1.2) +axis(1, at=seq(0,3), las=3, cex.axis=1, labels=c("5-11","12-18","19-25","26-32"), mgp=c(3,0.7,0)) +axis(2, at=seq(1,12), las=2, labels=rev(my.month.short), cex.axis=1) + +array.colors <- array(my.cols[8],c(12,4)) +array.colors[array.FairRpss < my.seq[8]] <- my.cols[7] +array.colors[array.FairRpss < my.seq[7]] <- my.cols[6] +array.colors[array.FairRpss < my.seq[6]] <- my.cols[5] +array.colors[array.FairRpss < my.seq[5]] <- my.cols[4] +array.colors[array.FairRpss < my.seq[4]] <- my.cols[3] +array.colors[array.FairRpss < my.seq[3]] <- my.cols[2] +array.colors[array.FairRpss < my.seq[2]] <- my.cols[1] + +for(p in 1:12){ for(l in 0:3){ polygon(c(0.5+l-1, 0.5+l-1, 1.5+l-1, 1.5+l-1), c(-0.5+13-p, 0.5+13-p, 0.5+13-p, -0.5+13-p), col=array.colors[p,1+l]) }} + +par(mar=c(0,0,4,0), fig=c(0.62, 0.87, 0.8, 1), new=TRUE) +mtext("FairCRPSS", cex=1.5) + +par(mar=c(1,4,1,0), fig=c(0.62, 0.87, 0.1, 0.9), new=TRUE) +plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,3.2), ann=F) +mtext(side = 1, text = "Lead time", line = 3.5, cex=1.2) +mtext(side = 2, text = "Start date", line = 3, cex=1.2) +axis(1, at=seq(0,3), las=3, cex.axis=1, labels=c("5-11","12-18","19-25","26-32"), mgp=c(3,0.7,0)) +axis(2, at=seq(1,12), las=2, labels=rev(my.month.short), cex.axis=1) + +array.colors <- array(my.cols[8],c(12,4)) +array.colors[array.FairCrpss < my.seq[8]] <- my.cols[7] +array.colors[array.FairCrpss < my.seq[7]] <- my.cols[6] +array.colors[array.FairCrpss < my.seq[6]] <- my.cols[5] +array.colors[array.FairCrpss < my.seq[5]] <- my.cols[4] +array.colors[array.FairCrpss < my.seq[4]] <- my.cols[3] +array.colors[array.FairCrpss < my.seq[3]] <- my.cols[2] +array.colors[array.FairCrpss < my.seq[2]] <- my.cols[1] + +for(p in 1:12){ for(l in 0:3){ polygon(c(0.5+l-1, 0.5+l-1, 1.5+l-1, 1.5+l-1), c(-0.5+13-p, 0.5+13-p, 0.5+13-p, -0.5+13-p), col=array.colors[p,1+l]) }} + +par(fig=c(0.89, 1, 0.1, 0.9), new=TRUE) +#ColorBar2(brks=my.brks, cols=my.cols, vert=FALSE, cex=legend1.cex, label.dist=2, my.ticks=-0.5 + 1:length(my.brks), my.labels=my.brks) +ColorBar2(brks = my.seq, cols = my.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) +gc() + +dev.off() + +# Not checked but whould work: +#system(paste0("~/scripts/fig2catalog.sh -r 50 -c 'Region: ", area.name, "\nReference dataset: ERA-Interim' ", my.file," ", my.file2)) + + + +# Monthly reliability diagrams, all over the same graph # +# ONLY FOR SEA+LAND!!!! + +my.RelDiagr<-list() + +my.file <- paste0(work.dir,'/',var.name,'/Summary_RelDiagr_', area.name,'.png') +my.file2 <- paste0(work.dir,'/',var.name,'/formatted/','/Summary_RelDiagr_', area.name,'.png') + +#n.lonr <- n.lon - ceiling(length(lons[lons>0 & lons < 180])) # count the number of longitude values between 180 and 360 degrees longitud + +png(file=my.file, width=1000, height=700) +plot(0, type = "n", axes = FALSE, ann = FALSE) +mtext(side = 3, text = "ECMWF-MFS / Reliability Diagram\nJanuary to December / 1994-2013", line = -1, cex=2.4, font=2) +par(mar=c(0,0,0,0) , fig=c(0, 1, 0, 0.4), new=TRUE) +legend(0.8,-0.8,0,0,legend=my.month[c(1,4,7,10)], lty=c(1,1), lwd=c(2.5,2.5), col=col.month[c(1,4,7,10)], ncol=4) + +par(mar=c(4.8,3.8,3.8,3.8) , fig=c(0, 1, 0, 0.4), new=TRUE) + +for(tercile in c(1,3)){ + print(paste0("Tercile: ",tercile)) + + my.lead <- (1:n.leadtimes) + (tercile-1) * 4 + + for(lead2 in my.lead){ + print(paste0("Lead time: ",lead2)) + + xmin <- 0.25 * ((lead2-1) %% 4) + xmax <- xmin + 0.25 + ymin <- 0.10 + 0.38 * (tercile-1)/2 + ymax <- ymin + 0.38 + + #my.title <- paste0('Reliability Diagram of ',cfs.name,'\n ',var.name.map,' for ',my.month[month],'. Forecast time ',leadtime.week[lead],'.') + #my.title <- paste0("ECMWF-S4 / ", var.name.map, " / Reliability Diagram \nJanuary to December / 1994-2013") + my.title <- paste0("Lead time: ", leadtime.week[1+((lead2-1) %% 4)], " days") + + par(fig=c(xmin, xmax, ymin, ymax), new=TRUE) + mod.subtitle <- ifelse(tercile == 1, "lower","upper") + plot(c(0,1),c(0,1),type="l",xlab=paste0("Forecast frequency ", mod.subtitle," tercile"),ylab="Observed frequency", col='gray30', main=my.title, cex.main=1.4) + + + for(month in c(1,4,7,10)){ #my.months){ + #month=1 # for the debug + #print(paste0("Month=",month)) + + # Load data: + load(file=paste0(work.dir,'/',var.name,'_',my.month[month],'.RData')) + + cat(paste0('Computing the Reliability Diagram for month ', month,'. Please wait... \n')) + + lead <- 1 + (lead2-1) %% 4 + + if(tercile == 1) my.RelDiagr[[lead]] <- ReliabilityDiagram(ens.chunk.prob[[lead]][int1,area.lat,area.lon], obs.chunk.prob[[lead]][int1,area.lat,area.lon], bins=5, nboot=0, plot=FALSE, plot.refin=F) #tercile 1 + + #my.RelDiagr[[n.leadtimes+lead]] <- ReliabilityDiagram(ens.chunk.prob[[lead]][int2,area.lat,area.lon], obs.chunk.prob[[lead]][int2,area.lat,area.lon], bins=5, nboot=0, plot=FALSE, plot.refin=F) + + if(tercile == 3) my.RelDiagr[[2*n.leadtimes+lead]] <- ReliabilityDiagram(ens.chunk.prob[[lead]][int3,area.lat,area.lon], obs.chunk.prob[[lead]][int3,area.lat,area.lon], bins=5, nboot=0, plot=FALSE, plot.refin=F) + + #col.month <- c('#a6cee3','#1f78b4','#b2df8a','#33a02c','#fb9a99','#e31a1c','#fdbf6f','#ff7f00','#cab2d6','#6a3d9a','#ffff99') + #col.month <- c('#8dd3c7','#8dd3c7','#8dd3c7','#ffffb3','#ffffb3','#ffffb3','#bebada','#bebada','#bebada','#fb8072','#fb8072','#fb8072') + col.month <- c('#e41a1c','#e41a1c','#e41a1c','#377eb8','#377eb8','#377eb8','#4daf4a','#4daf4a','#4daf4a','#984ea3','#984ea3','#984ea3') + + if(tercile == 1) lines(my.RelDiagr[[lead]]$p.avgs[!is.na(my.RelDiagr[[lead]]$p.avgs)],my.RelDiagr[[lead]]$cond.prob[!is.na(my.RelDiagr[[lead]]$cond.prob)],type="o", pch=16, col=col.month[month]) + + if(tercile == 3) lines(my.RelDiagr[[2*n.leadtimes+lead]]$p.avgs[!is.na(my.RelDiagr[[2*n.leadtimes+lead]]$p.avgs)], my.RelDiagr[[2*n.leadtimes+lead]]$cond.prob[!is.na(my.RelDiagr[[2*n.leadtimes+lead]]$cond.prob)],type="o", pch=16, col=col.month[month]) + + rm(obs.chunk.prob, ens.chunk.prob) + gc() + } # next month + + } # next lead +} # next tercile + + +dev.off() + + + + + diff --git a/old/SkillScores_map_v5.R~ b/old/SkillScores_map_v5.R~ new file mode 100644 index 0000000000000000000000000000000000000000..932fa43c83425ca50d63876fb9e8f5aeabe0a03d --- /dev/null +++ b/old/SkillScores_map_v5.R~ @@ -0,0 +1,467 @@ +########################################################################################### +# Visualize and save the score maps for one score and period # +########################################################################################### +# run it only after computing and saving all the skill score data: + +rm(list=ls()) + +########################################################################################## +# Load functions and libraries # +########################################################################################## + +library(SpecsVerification) # for skill scores +library(s2dverification) # for Load() function +library(abind) # for merging arrays +source('/home/Earth/ncortesi/scripts/Rfunctions.R') +source('/home/Earth/ncortesi/Downloads/ESS/general/hatching.R') +########################################################################################## +# User's settings # +########################################################################################## +#if(!mare) {source('/home/Earth/ncortesi/Downloads/scripts/Rfunctions.R')} else {source('/gpfs/projects/bsc32/bsc32842/scripts/Rfunctions.R')} + +# working dir where to put the output maps and files in the /Data and /Maps subdirs: +work.dir <- "/scratch/Earth/ncortesi/RESILIENCE/Subestacional" + +var.name <- 'sfcWind' # forecast variable. It is the name used inside the netCDF files and as suffix in map filenames, i.e: 'sfcWind' or 'psl' + +my.pvalue <- 0.05 # in case of computing the EnsCorr, set the p_value level to show in the ensemble mean correlation maps + +# coordinates of a chosen point in the world to plot the time series over the 52 maps (they must be the same values in objects la y lo) +#my.lat <- 55.3197120 +#my.lon <- 0.5625 + +my.months=1:12 #9:12 # choose the month(s) to plot and save + +col <- c("#0C046E", "#1914BE", "#2341F7", "#2F55FB", "#3E64FF", "#528CFF", "#64AAFF", "#82C8FF", "#A0DCFF", "#B4F0FF", "#FFFBAF", "#FFDD9A", "#FFBF87", "#FFA173", "#FF7055", "#FE6346", "#F7403B", "#E92D36", "#C80F1E", "#A50519") + +# Define regions for averaging skill scores: (in the format: lon.min /lon.max / lat.min /lat.max. lon.min MUST be a NEGATIVE number, long values MUST be from 0 and 360, and +# lat values MUST be from 90 to -90 degrees) +area0.name <- "World" +area0 <- c(-180,180,-90,90) +area1.name <- "Northern_Europe" +area1 <- c(-15,45,45,75) +area2.name <- "Southern_Europe" +area2 <- c(-15,45,35,45) +area3.name <- "Southwestern_Europe" +area3 <- c(-15,20,35,45) +area4.name <- "Southeastern_Europe" +area4 <- c(20,45,35,45) +area5.name <- "Europe" +area5 <- c(-15,45,35,75) +area6.name <- "North_America" +area6 <- c(-130,-60,30,50) +area7.name <- "North_Sea" +area7 <- c(-4, 15, 50, 65) +area8.name <- "Iberian_Peninsula" +area8 <- c(-10, 4, 36, 44) +area9.name <- "Canadian region" +area9 <- c(-114, -111.8, 49.6, 51.7) + +# Choose one region between those defined above for the reliability diagrams and for the summary tables (0=World, 1=Northern europe, etc.) +area.num <- 0 +land.only <- FALSE # set it to true if you want to average only over land, FALSE otherwise [warning: the reliability diagram is computed ONLY over land+sea ] + +######################################################################################### + +# replace old command area.name <- area0.name: +area <- eval(parse(text=paste0("area", area.num))) +area.name <- eval(parse(text=paste0("area", area.num,".name"))) + +# position of max and min long and lat values the chosen region: +lon.min <- 360 + area[1] +lon.max <- area[2] +lat.min <- area[3] +lat.max <- area[4] + +########################################################################################## +# visualize worldwide monthly maps of skill scores # +########################################################################################## +# choose one or more skills scores to visualize between ACC (EnsCorr), FairRPSS, FairCRPSS and/or RelDiagr: +my.score.name <- c('FairRpss','FairCrpss','EnsCorr') #c('FairRpss','FairCrpss','EnsCorr','RelDiagr') + +my.score.name.title <- c('FairRpss','FairCrpss','Correlation of the ensemble mean','Reliability Diagram') # names to be visualized in the graph titles + +for(month in my.months){ + #month=1 # for the debug + print(paste0("Month=",month)) + + # Load data: + load(file=paste0(work.dir,'/',var.name,'_',my.month[month],'.RData')) + + sdates.seq <- weekly.seq(forecast.year,mes,day) # sequence of dates corresponding to all the thursday of the year + my.startdates.days <- as.integer(substr(sdates.seq[which(as.integer(substr(sdates.seq,5,6)) == month)],7,8)) #startdates.monthly[[month]] #c(1:5) + stringa <- paste0(my.startdates.days,collapse=",") + + if(lon.min < 360) area.lon <- c(1:(which(lons >= lon.max)[1]), (which(lons >= lon.min)[1]:length(lons))) # restrict area to chosen one + if(lon.min > 360) area.lon <- (which(lons >= lon.min-360)[1]):(which(lons >= lon.max)[1]-1) + + if(area.name != "World") area.lat <- c(which(lats <= lat.max)[1]:(which(lats <= lat.min)[1]-1)) # restrict area to chosen one + if(area.name == "World"){ area.lat <- 1:length(lats); area.lon <- 1:length(lons) } + + n.lonr <- n.lon - ceiling(length(lons[lons>0 & lons < 180])) # count the number of longitude values between 180 and 360 degrees longitud + + for(my.score.name.map in my.score.name){ + #my.score.name.map='Acc' # for the debug + + print(paste0("Score=",my.score.name.map)) + + if(my.score.name.map=='FairRpss') my.score <- my.FairRpss.chunk + if(my.score.name.map=='FairCrpss') my.score <- my.FairCrpss.chunk + if(my.score.name.map=="EnsCorr") my.score <- my.EnsCorr.chunk + #if(my.score.name.map=="RelDiagr") my.score <- my.RelDiagr + + # old green-red palette: + # brk.rpss<-c(-1,seq(0,1,by=0.05)) + # col.rpss<-c("darkred",colorRampPalette(c("red","white","darkgreen"))(length(brk.rpss)-2)) + # brk.rps<-c(seq(0,1,by=0.1),10) + # col.rps<-c(colorRampPalette(c("darkgreen","white","red"))(length(brk.rps)-2),"darkred") + # if(my.score.name.map==my.Rps | my.score==my.Rps.clim) {my.brk<-brk.rps} else {my.brk<-brk.rpss} + # if(my.score.name.map==my.Rps | my.score==my.Rps.clim) {my.col<-col.rps} else {my.col<-col.rpss} + + brk.rpss <- c(seq(-1,1,by=0.1)) + col.rpss <- colorRampPalette(col)(length(brk.rpss)-1) + + # at present all breaks and colors are the same, so there is no need to differenciate between indexes, aside the two Fair scores whose range is [-inf,0] + my.brk <- brk.rpss + my.col <- col.rpss + my.brk.labels <- my.brk + if(my.score.name.map == 'FairRpss' | my.score.name.map=='FairCrpss') my.brk.labels <- c(expression(-infinity), my.brk[-1]) + + my.RelDiagr<-list() + + for(lead in 1:n.leadtimes){ + print(paste0("Leadtime=",lead)) + + #my.title<-paste0(my.score.name.map,' of ',cfs.name,' + #10m Wind Speed for ',startdate.name.map,'. Forecast time ',leadtime.week[lead],'.') + + if(my.score.name.map != "RelDiagr"){ + + #postscript(file=paste0(workdir,'/Maps_',var.name,'/',my.score.name.map,'_',startdate.name.map,'_Forecast_time_',leadtime.week[lead],'_',var.name,'.ps'), + # paper="special",width=12,height=7,horizontal=F) + + #layout(matrix(c(1,2), 1, 2, byrow = TRUE),widths=c(11,1.5)) + #par(oma=c(0,0,4,0),mar=c(0,0,0,0)) + + #n.lonr <- n.lon - ceiling(length(lons[lons<180 & lons > 0])) + #hatching(latsr,c(lons[c((nlonr+1):nlon)]-360,lons[1:nlonr]), my.Pvalue.rev, dens = 12, ang = 45, col_line = '#252525', lwd_size = 0.5, crosshatching = FALSE) + #hatching(lats,c(lons[c((n.lonr+1):n.lon)]-360,lons[1:n.lonr]), my.Pvalue.rev, dens = 12, ang = 45, col_line = '#252525', lwd_size = 0.5, crosshatching = FALSE) + #n.lonr <- n.lon - ceiling(length(lons[lons<180 & lons > 0])) + #PlotEquiMap(my.score[lead,,][n.lat:1,c((n.lonr+1):n.lon,1:n.lonr)], lons,lats ,title_scale=0.8, brks=my.brk, cols=my.col ,axelab=F, filled.continents=FALSE, drawleg=F) + # lat values must be in decreasing order from +90 to -90 degrees and long from 0 to 360 degrees: + + my.file <- paste0(work.dir,'/',var.name,'/',var.name,'_',my.score.name.map,'_',my.month[month],'_leadtime_',leadtime.week[lead],'.png') + my.file2 <- paste0(work.dir,'/',var.name,'/formatted/',var.name,'_',my.score.name.map,'_',my.month[month],'_leadtime_',leadtime.week[lead],'.png') + png(file=my.file,width=1000,height=600) + + # Map: + par(fig=c(0,0.92,0,0.89), new=TRUE) + + PlotEquiMap(my.score[lead,,c((n.lonr+1):n.lon,1:n.lonr)], c(lons[c((n.lonr+1):n.lon)]-360, lons[1:n.lonr]), lats ,title_scale=0.8, brks=my.brk, cols=my.col ,axelab=F, filled.continents=FALSE, drawleg=F) + + if(my.score.name.map=="EnsCorr") my.PValue.rev <- my.EnsCorr.pvalue < 0.05 + if(my.score.name.map=="FairRpss") my.PValue.rev <- my.FairRpss.pvalue < 0.05 + if(my.score.name.map=="FairCrpss") my.PValue.rev <- my.FairCrpss.pvalue < 0.05 + + # Draw the significance diagonal lines: + pv <- aperm(my.PValue.rev, c(1,3,2)) + + hatching(lats, c(lons[c((n.lonr+1):n.lon)]-360, lons[1:n.lonr]), pv[lead,,], dens = 12, ang = 45, col_line = '#252525', lwd_size = 0.5, crosshatching = FALSE) + + # Map title: + par(fig=c(0,1,0.82,0.91), new=TRUE) + #my.title <- paste0(my.score.name.map,' of ',cfs.name,'\n ', var.name.map,' for ',my.month[month],'. Forecast time ',leadtime.week[lead],'.') + my.title <- paste0("ECMWF-MPS / ", var.name.map, " / ", my.score.name.title[which(my.score.name == my.score.name.map)], "\n", my.month[month], " (",leadtime.week[lead]," days lead time)/ 1994-2013") + mtext(my.title, cex=2.4, font=2) + + # Color legend: + par(fig=c(0.92,1,0,0.9), new=TRUE) + #par(fig=c(0.52,0.82,0,0.9), new=TRUE) + + ColorBar2(my.brk, cols=my.col, vert=T, my.ticks=-0.5 + 1:length(my.brk), my.labels=my.brk.labels) + + ### arrange lat/ lon in my.PValue (a second variable in the EnsCorr file) to start from +90 degr. (lat) and from 0 degr (lon): + ### my.PValue.rev[lead,,] <- my.PValue[i,n.lat:1,c((n.lonr+1):n.lon,1:n.lonr)] + + dev.off() + + system(paste0("~/scripts/fig2catalog.sh -s 0.8 -c 'Start dates: ", stringa," of ",my.month[month],"\nLead time: ", leadtime.week[lead], " days\nReference dataset: ERA-Interim\nBias correction: none\nHatched area: significant from a bootstrapping test (p_value=0.05)' ", my.file," ", my.file2)) + + } # close if on !RelDiagr + + gc() + + if(my.score.name.map == "RelDiagr") { + + my.file <- paste0(work.dir,'/', var.name,'/', var.name,'_RelDiagr_', area.name,'_', my.month[month],'_leadtime_', leadtime.week[lead],'_', var.name,'.png') + my.file2 <- paste0(work.dir,'/', var.name,'/formatted/', var.name,'_RelDiagr_', area.name, '_', my.month[month],'_leadtime_', leadtime.week[lead],'_', var.name,'.png') + + png(file=my.file,width=600,height=600) + + #my.title <- paste0('Reliability Diagram of ',cfs.name,'\n ',var.name.map,' for ',my.month[month],'. Forecast time ',leadtime.week[lead],'.') + my.title <- paste0("ECMWF-MPS / ", var.name.map, " / Reliability Diagram \n", my.month[month], " (", leadtime.week[lead]," days lead time) / 1994-2013") + + # Note that bins=5 should correspond to the number of hindcast members + 1 + my.RelDiagr[[lead]] <- ReliabilityDiagram(ens.chunk.prob[[lead]][int1,area.lat,area.lon], obs.chunk.prob[[lead]][int1,area.lat,area.lon], bins=5, nboot=0, plot=FALSE, plot.refin=F) #tercile 1 + + #my.RelDiagr[[n.leadtimes+lead]] <- ReliabilityDiagram(ens.chunk.prob[[lead]][int2,,], obs.chunk.prob[[lead]][int2,,], bins=5, nboot=0, plot=FALSE, plot.refin=F) + my.RelDiagr[[2*n.leadtimes+lead]] <- ReliabilityDiagram(ens.chunk.prob[[lead]][int3,area.lat,area.lon], obs.chunk.prob[[lead]][int3,area.lat,area.lon], bins=5, nboot=0, plot=FALSE, plot.refin=F) + + plot(c(0,1),c(0,1),type="l",xlab="Forecast Frequency",ylab="Observed Frequency", col='gray30', main=my.title, cex.main=1.6) + + lines(my.RelDiagr[[lead]]$p.avgs[!is.na(my.RelDiagr[[lead]]$p.avgs)],my.RelDiagr[[lead]]$cond.prob[!is.na(my.RelDiagr[[lead]]$cond.prob)],type="o", pch=16, col="blue") + + #lines(my.score[[n.leadtimes+lead]]$p.avgs[!is.na(my.score[[n.leadtimes+lead]]$p.avgs)],my.score[[n.leadtimes+lead]]$cond.prob[!is.na(my.score[[n.leadtimes+lead]]$cond.prob)],type="o",pch=16,col="darkgreen") + + lines(my.RelDiagr[[2*n.leadtimes+lead]]$p.avgs[!is.na(my.RelDiagr[[2*n.leadtimes+lead]]$p.avgs)], my.RelDiagr[[2*n.leadtimes+lead]]$cond.prob[!is.na(my.RelDiagr[[2*n.leadtimes+lead]]$cond.prob)],type="o", pch=16, col="red") + + legend("bottomright", legend=c('Lower Tercile','Upper Tercile'), lty=c(1,1), lwd=c(2.5,2.5), col=c('blue','red')) + + dev.off() + + system(paste0("~/scripts/fig2catalog.sh -c 'Start dates: ", stringa," of ",my.month[month],"\nLead time: ", leadtime.week[lead], " days\nRegion: ", area.name,"\nReference dataset: ERA-Interim \nBias correction: none' ", my.file," ", my.file2)) + } + + gc() + } # next lead + + gc() + } # next score + + gc() +} # next month + + +############################################################################# +# Visualize summary tables for a region # +############################################################################# + +# If you use a mask, load it to average only over land and check that it has the same lat and lon positions of the score maps: +if(land.only == TRUE){ + load(paste0('/shared/earth/EarthSystemServices/TOOLS/Topography_bathymetry_and_masks/Seamask.', n.lon, '.', n.lat,'.50m','.RData')) + mask <- Seamask.512.256.50m + #Coordinates repositioning and mask conditioning + mask <- mask[c((n.lonr+1):n.lon,1:n.lonr),n.lat:1] + mask <- ifelse(mask == 1, 0, 1) # assign 1 to land and 0 to sea to plot the mask + mask <- t(mask) + myImagePlot(mask) + myImagePlot(my.FairRpss.chunk[1,,]) + mask <- ifelse(mask == 0, NA, 1) # assign 1 to land and NA to sea to multiply it for the score values later +} + + +# plot a box with the chosen area (do it only once): +# +#png(file=paste0(work.dir,'/',var.name,'/Box_',area.name,'.png'),width=1400,height=800) +#PlotEquiMap(my.FairRpss.chunk[1,,c((n.lonr+1):n.lon,1:n.lonr)], c(lons[c((n.lonr+1):n.lon)]-360, lons[1:n.lonr]), lats, brks=c(-1000,1000), cols="lightblue" , axelab=F, filled.continents=TRUE, drawleg=F, boxlim=c(area[1],area[3],area[2],area[4]), boxcol="black", boxlwd=.7) +#dev.off() + +array.EnsCorr <- array.FairRpss <- array.FairCrpss <- array(NA,c(12,4)) + +# Load one file, just to take the lat and lon values: +load(file=paste0(work.dir,'/',var.name,'_',my.month[1],'.RData')) + +if(lon.min < 360) area.lon <- c(1:(which(lons >= lon.max)[1]), (which(lons >= lon.min)[1]:length(lons))) # restrict area to chosen one +if(lon.min > 360) area.lon <- (which(lons >= lon.min-360)[1]):(which(lons >= lon.max)[1]-1) + +if(area.name != "World") area.lat <- c(which(lats <= lat.max)[1]:(which(lats <= lat.min)[1]-1)) # restrict area to chosen one +if(area.name == "World"){ area.lat <- 1:length(lats); area.lon <- 1:length(lons) } + +# Load all data and average it over the chosen region: +for(month in 1:12){ + + load(file=paste0(work.dir,'/',var.name,'_',my.month[month],'.RData')) + + if(land.only == TRUE){ + # remove sea values: + for(i in 1:4) my.EnsCorr.chunk[i,,] <- my.EnsCorr.chunk[i,,]*mask + for(i in 1:4) my.FairRpss.chunk[i,,] <- my.FairRpss.chunk[i,,]*mask + for(i in 1:4) my.FairCrpss.chunk[i,,] <- my.FairCrpss.chunk[i,,]*mask + } + + for(l in 0:3){ + #load(file=paste0(work.dir,"/",fields.name,"_",my.period[p],"_leadmonth_",l,"_","ClusterNames",".RData")) # load cluster names and summarized data + array.EnsCorr[month, 1+l] <- mean(my.EnsCorr.chunk[1+l, area.lat, area.lon], na.rm=T) + array.FairRpss[month, 1+l] <- mean(my.FairRpss.chunk[1+l, area.lat, area.lon], na.rm=T) + array.FairCrpss[month, 1+l] <- mean(my.FairCrpss.chunk[1+l, area.lat, area.lon], na.rm=T) + } +} + + +if(land.only == TRUE) { mod.filename <- "_land" } else { mod.filename <- "" } + +my.file <- paste0(work.dir,'/',var.name,'/Summary_',var.name,'_', area.name, mod.filename,'.png') +my.file2 <- paste0(work.dir,'/',var.name,'/formatted/Summary_',var.name,'_', area.name, mod.filename,'.png') + +png(file=my.file,width=700,height=500) + +#my.cols <- rev(c('#b2182b','#d6604d','#f4a582','#fddbc7','#d1e5f0','#92c5de','#4393c3','#2166ac')) +#my.cols <- c('#0570b0','#bdc9e1', '#fff7ec', '#fee8c8','#fdd49e','#fdbb84','#fc8d59','#ef6548','#d7301f','#990000') +#my.cols <- c('#0570b0','#bdc9e1','#fef0d9','#fdd49e','#fdbb84','#fc8d59','#ef6548','#d7301f','#990000') +#my.cols <- c('#74a9cf','#bdc9e1','#fef0d9','#fdd49e','#fdbb84','#fc8d59','#e34a33','#b30000') +my.cols <- c('#bdc9e1','#fef0d9','#fdd49e','#fdbb84','#fc8d59','#e34a33','#b30000', '#7f0000') + +#my.seq <- c(-1,-0.6,-0.4,-0.2,0,0.2,0.4,0.6,1) +#my.seq <- c(-0.2,-0.1,0,0.1,0.2,0.3,0.4,0.5,0.6) +my.seq <- c(-0.1,0,0.1,0.2,0.3,0.4,0.5,0.6,0.7) + +# create an array similar to array.pers but with colors instead of frequencies: +plot.new() + +par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) +mtext("Subseasonal sfcWind 1994-2013", cex=1.8) + +par(mar=c(0,0,4,0), fig=c(0.07, 0.27, 0.80, 1), new=TRUE) +mtext("ACC", cex=1.5) + +par(mar=c(1,4,1,0), fig=c(0.02, 0.27, 0.1, 0.9), new=TRUE) +plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,3.2), ann=F) +mtext(side = 1, text = "Lead time", line = 3.5, cex=1.2) +mtext(side = 2, text = "Start date", line = 3, cex=1.2) +axis(1, at=seq(0,3), las=3, cex.axis=1, labels=c("5-11","12-18","19-25","26-32"), mgp=c(3,0.7,0)) +axis(2, at=seq(1,12), las=2, labels=rev(my.month.short), cex.axis=1) + +array.colors <- array(my.cols[8],c(12,4)) +array.colors[array.EnsCorr < my.seq[8]] <- my.cols[7] +array.colors[array.EnsCorr < my.seq[7]] <- my.cols[6] +array.colors[array.EnsCorr < my.seq[6]] <- my.cols[5] +array.colors[array.EnsCorr < my.seq[5]] <- my.cols[4] +array.colors[array.EnsCorr < my.seq[4]] <- my.cols[3] +array.colors[array.EnsCorr < my.seq[3]] <- my.cols[2] +array.colors[array.EnsCorr < my.seq[2]] <- my.cols[1] + +for(p in 1:12){ for(l in 0:3){ polygon(c(0.5+l-1, 0.5+l-1, 1.5+l-1, 1.5+l-1), c(-0.5+13-p, 0.5+13-p, 0.5+13-p, -0.5+13-p), col=array.colors[p,1+l]) }} + +par(mar=c(0,0,4,0), fig=c(0.32, 0.57, 0.8, 1), new=TRUE) +mtext("FairRPSS", cex=1.5) + +par(mar=c(1,4,1,0), fig=c(0.32, 0.57, 0.1, 0.9), new=TRUE) +plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,3.2), ann=F) +mtext(side = 1, text = "Lead time", line = 3.5, cex=1.2) +mtext(side = 2, text = "Start date", line = 3, cex=1.2) +axis(1, at=seq(0,3), las=3, cex.axis=1, labels=c("5-11","12-18","19-25","26-32"), mgp=c(3,0.7,0)) +axis(2, at=seq(1,12), las=2, labels=rev(my.month.short), cex.axis=1) + +array.colors <- array(my.cols[8],c(12,4)) +array.colors[array.FairRpss < my.seq[8]] <- my.cols[7] +array.colors[array.FairRpss < my.seq[7]] <- my.cols[6] +array.colors[array.FairRpss < my.seq[6]] <- my.cols[5] +array.colors[array.FairRpss < my.seq[5]] <- my.cols[4] +array.colors[array.FairRpss < my.seq[4]] <- my.cols[3] +array.colors[array.FairRpss < my.seq[3]] <- my.cols[2] +array.colors[array.FairRpss < my.seq[2]] <- my.cols[1] + +for(p in 1:12){ for(l in 0:3){ polygon(c(0.5+l-1, 0.5+l-1, 1.5+l-1, 1.5+l-1), c(-0.5+13-p, 0.5+13-p, 0.5+13-p, -0.5+13-p), col=array.colors[p,1+l]) }} + +par(mar=c(0,0,4,0), fig=c(0.62, 0.87, 0.8, 1), new=TRUE) +mtext("FairCRPSS", cex=1.5) + +par(mar=c(1,4,1,0), fig=c(0.62, 0.87, 0.1, 0.9), new=TRUE) +plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,3.2), ann=F) +mtext(side = 1, text = "Lead time", line = 3.5, cex=1.2) +mtext(side = 2, text = "Start date", line = 3, cex=1.2) +axis(1, at=seq(0,3), las=3, cex.axis=1, labels=c("5-11","12-18","19-25","26-32"), mgp=c(3,0.7,0)) +axis(2, at=seq(1,12), las=2, labels=rev(my.month.short), cex.axis=1) + +array.colors <- array(my.cols[8],c(12,4)) +array.colors[array.FairCrpss < my.seq[8]] <- my.cols[7] +array.colors[array.FairCrpss < my.seq[7]] <- my.cols[6] +array.colors[array.FairCrpss < my.seq[6]] <- my.cols[5] +array.colors[array.FairCrpss < my.seq[5]] <- my.cols[4] +array.colors[array.FairCrpss < my.seq[4]] <- my.cols[3] +array.colors[array.FairCrpss < my.seq[3]] <- my.cols[2] +array.colors[array.FairCrpss < my.seq[2]] <- my.cols[1] + +for(p in 1:12){ for(l in 0:3){ polygon(c(0.5+l-1, 0.5+l-1, 1.5+l-1, 1.5+l-1), c(-0.5+13-p, 0.5+13-p, 0.5+13-p, -0.5+13-p), col=array.colors[p,1+l]) }} + +par(fig=c(0.89, 1, 0.1, 0.9), new=TRUE) +#ColorBar2(brks=my.brks, cols=my.cols, vert=FALSE, cex=legend1.cex, label.dist=2, my.ticks=-0.5 + 1:length(my.brks), my.labels=my.brks) +ColorBar2(brks = my.seq, cols = my.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) +gc() + +dev.off() + +# Not checked but whould work: +#system(paste0("~/scripts/fig2catalog.sh -r 50 -c 'Region: ", area.name, "\nReference dataset: ERA-Interim' ", my.file," ", my.file2)) + + + +# Monthly reliability diagrams, all over the same graph # +# ONLY FOR SEA+LAND!!!! + +my.RelDiagr<-list() + +my.file <- paste0(work.dir,'/',var.name,'/Summary_RelDiagr_', area.name,'.png') +my.file2 <- paste0(work.dir,'/',var.name,'/formatted/','/Summary_RelDiagr_', area.name,'.png') + +#n.lonr <- n.lon - ceiling(length(lons[lons>0 & lons < 180])) # count the number of longitude values between 180 and 360 degrees longitud + +png(file=my.file, width=1000, height=700) +plot(0, type = "n", axes = FALSE, ann = FALSE) +mtext(side = 3, text = "ECMWF-MFS / Reliability Diagram\nJanuary to December / 1994-2013", line = -1, cex=2.4, font=2) +par(mar=c(0,0,0,0) , fig=c(0, 1, 0, 0.4), new=TRUE) +legend(0.8,-0.8,0,0,legend=my.month[c(1,4,7,10)], lty=c(1,1), lwd=c(2.5,2.5), col=col.month[c(1,4,7,10)], ncol=4) + +par(mar=c(4.8,3.8,3.8,3.8) , fig=c(0, 1, 0, 0.4), new=TRUE) + +for(tercile in c(1,3)){ + print(paste0("Tercile: ",tercile)) + + my.lead <- (1:n.leadtimes) + (tercile-1) * 4 + + for(lead2 in my.lead){ + print(paste0("Lead time: ",lead2)) + + xmin <- 0.25 * ((lead2-1) %% 4) + xmax <- xmin + 0.25 + ymin <- 0.10 + 0.38 * (tercile-1)/2 + ymax <- ymin + 0.38 + + #my.title <- paste0('Reliability Diagram of ',cfs.name,'\n ',var.name.map,' for ',my.month[month],'. Forecast time ',leadtime.week[lead],'.') + #my.title <- paste0("ECMWF-S4 / ", var.name.map, " / Reliability Diagram \nJanuary to December / 1994-2013") + my.title <- paste0("Lead time: ", leadtime.week[1+((lead2-1) %% 4)], " days") + + par(fig=c(xmin, xmax, ymin, ymax), new=TRUE) + mod.subtitle <- ifelse(tercile == 1, "lower","upper") + plot(c(0,1),c(0,1),type="l",xlab=paste0("Forecast frequency ", mod.subtitle," tercile"),ylab="Observed frequency", col='gray30', main=my.title, cex.main=1.4) + + + for(month in c(1,4,7,10)){ #my.months){ + #month=1 # for the debug + #print(paste0("Month=",month)) + + # Load data: + load(file=paste0(work.dir,'/',var.name,'_',my.month[month],'.RData')) + + cat(paste0('Computing the Reliability Diagram for month ', month,'. Please wait... \n')) + + lead <- 1 + (lead2-1) %% 4 + + if(tercile == 1) my.RelDiagr[[lead]] <- ReliabilityDiagram(ens.chunk.prob[[lead]][int1,area.lat,area.lon], obs.chunk.prob[[lead]][int1,area.lat,area.lon], bins=5, nboot=0, plot=FALSE, plot.refin=F) #tercile 1 + + #my.RelDiagr[[n.leadtimes+lead]] <- ReliabilityDiagram(ens.chunk.prob[[lead]][int2,area.lat,area.lon], obs.chunk.prob[[lead]][int2,area.lat,area.lon], bins=5, nboot=0, plot=FALSE, plot.refin=F) + + if(tercile == 3) my.RelDiagr[[2*n.leadtimes+lead]] <- ReliabilityDiagram(ens.chunk.prob[[lead]][int3,area.lat,area.lon], obs.chunk.prob[[lead]][int3,area.lat,area.lon], bins=5, nboot=0, plot=FALSE, plot.refin=F) + + #col.month <- c('#a6cee3','#1f78b4','#b2df8a','#33a02c','#fb9a99','#e31a1c','#fdbf6f','#ff7f00','#cab2d6','#6a3d9a','#ffff99') + #col.month <- c('#8dd3c7','#8dd3c7','#8dd3c7','#ffffb3','#ffffb3','#ffffb3','#bebada','#bebada','#bebada','#fb8072','#fb8072','#fb8072') + col.month <- c('#e41a1c','#e41a1c','#e41a1c','#377eb8','#377eb8','#377eb8','#4daf4a','#4daf4a','#4daf4a','#984ea3','#984ea3','#984ea3') + + if(tercile == 1) lines(my.RelDiagr[[lead]]$p.avgs[!is.na(my.RelDiagr[[lead]]$p.avgs)],my.RelDiagr[[lead]]$cond.prob[!is.na(my.RelDiagr[[lead]]$cond.prob)],type="o", pch=16, col=col.month[month]) + + if(tercile == 3) lines(my.RelDiagr[[2*n.leadtimes+lead]]$p.avgs[!is.na(my.RelDiagr[[2*n.leadtimes+lead]]$p.avgs)], my.RelDiagr[[2*n.leadtimes+lead]]$cond.prob[!is.na(my.RelDiagr[[2*n.leadtimes+lead]]$cond.prob)],type="o", pch=16, col=col.month[month]) + + rm(obs.chunk.prob, ens.chunk.prob) + gc() + } # next month + + } # next lead +} # next tercile + + +dev.off() + + + + + diff --git a/old/SkillScores_map_v6.R b/old/SkillScores_map_v6.R new file mode 100644 index 0000000000000000000000000000000000000000..6a57ee75b374f4b9397c65e916668e3615ae2189 --- /dev/null +++ b/old/SkillScores_map_v6.R @@ -0,0 +1,513 @@ +########################################################################################### +# Visualize and save the score maps for one score and period # +########################################################################################### +# run it only after computing and saving all the skill score data: + +rm(list=ls()) + +########################################################################################## +# Load functions and libraries # +########################################################################################## + +library(SpecsVerification) # for skill scores +library(s2dverification) # for Load() function +library(abind) # for merging arrays +source('/home/Earth/ncortesi/scripts/Rfunctions.R') +source('/home/Earth/ncortesi/Downloads/ESS/general/hatching.R') +########################################################################################## +# User's settings # +########################################################################################## +#if(!mare) {source('/home/Earth/ncortesi/Downloads/scripts/Rfunctions.R')} else {source('/gpfs/projects/bsc32/bsc32842/scripts/Rfunctions.R')} + +# working dir where to put the output maps and files in the /Data and /Maps subdirs: +work.dir <- "/scratch/Earth/ncortesi/RESILIENCE/Subestacional" + +var.name <- 'sfcWind' # forecast variable. It is the name used inside the netCDF files and as suffix in map filenames, i.e: 'sfcWind' or 'psl' + +my.pvalue <- 0.05 # in case of computing the EnsCorr, set the p_value level to show in the ensemble mean correlation maps +n.intervals <- 12 # in case of computing the Reliabilty Diagram, set the number of intervals of the forecasted frequency (X axis). It is convenient to set it to + # the number of ensemble member + 1 + +# Choose one region between those defined below for the reliability diagrams and for the summary tables (0=World, 1=Northern europe, etc.) +area.num <- 8 +land.only <- FALSE # set it to true if you want to average only over land, FALSE otherwise [warning: the reliability diagram is computed ONLY over land+sea ] + +# coordinates of a chosen point in the world to plot the time series over the 52 maps (they must be the same values in objects la y lo) +#my.lat <- 55.3197120 +#my.lon <- 0.5625 + +my.months=1:12 #9:12 # choose the month(s) to plot and save + +# Define regions for averaging skill scores: (in the format: lon.min /lon.max / lat.min /lat.max. lon.min MUST be a NEGATIVE number, long values MUST be from 0 and 360, and +# lat values MUST be from 90 to -90 degrees) +area0.name <- "World" +area0 <- c(-180,180,-90,90) +area1.name <- "Northern_Europe" +area1 <- c(-15,45,45,75) +area2.name <- "Southern_Europe" +area2 <- c(-15,45,35,45) +area3.name <- "Southwestern_Europe" +area3 <- c(-15,20,35,45) +area4.name <- "Southeastern_Europe" +area4 <- c(20,45,35,45) +area5.name <- "Europe" +area5 <- c(-15,45,35,75) +area6.name <- "North_America" +area6 <- c(-130,-60,30,50) +area7.name <- "North_Sea" +area7 <- c(-4, 15, 50, 65) +area8.name <- "Iberian_Peninsula" +area8 <- c(-10, 4, 36, 44) +area9.name <- "Canadian region" +area9 <- c(-114, -111.8, 49.6, 51.7) + +######################################################################################### + +col <- c("#0C046E", "#1914BE", "#2341F7", "#2F55FB", "#3E64FF", "#528CFF", "#64AAFF", "#82C8FF", "#A0DCFF", "#B4F0FF", "#FFFBAF", "#FFDD9A", "#FFBF87", "#FFA173", "#FF7055", "#FE6346", "#F7403B", "#E92D36", "#C80F1E", "#A50519") + +# replace old command area.name <- area0.name: +area <- eval(parse(text=paste0("area", area.num))) +area.name <- eval(parse(text=paste0("area", area.num,".name"))) + +# position of max and min long and lat values the chosen region: +lon.min <- 360 + area[1] +lon.max <- area[2] +lat.min <- area[3] +lat.max <- area[4] + +dir.create(path=paste0(work.dir,"/",var.name), showWarnings=FALSE) # if directory already exists, this function doesn't create it again +dir.create(path=paste0(work.dir,"/",var.name,"/formatted"), showWarnings=FALSE) # if directory already exists, this function doesn't create it again + +########################################################################################## +# visualize worldwide monthly maps of skill scores # +########################################################################################## +# choose one or more skills scores to visualize between ACC (EnsCorr), FairRPSS, FairCRPSS and/or RelDiagr: +my.score.name <- c('FairRpss','FairCrpss','EnsCorr','RelDiagr') +my.score.name.title <- c('FairRpss','FairCrpss','Correlation of the ensemble mean','Reliability Diagram') # names to be visualized in the graph titles + +for(month in my.months){ + #month=1 # for the debug + print(paste0("Month=",month)) + + # Load data: + load(file=paste0(work.dir,'/',var.name,'_',my.month[month],'.RData')) + + sdates.seq <- weekly.seq(forecast.year,mes,day) # sequence of dates corresponding to all the thursday of the year + my.startdates.days <- as.integer(substr(sdates.seq[which(as.integer(substr(sdates.seq,5,6)) == month)],7,8)) #startdates.monthly[[month]] #c(1:5) + stringa <- paste0(my.startdates.days,collapse=",") + + if(lon.min < 360) area.lon <- c(1:(which(lons >= lon.max)[1]), (which(lons >= lon.min)[1]:length(lons))) # restrict area to chosen one + if(lon.min > 360) area.lon <- (which(lons >= lon.min-360)[1]):(which(lons >= lon.max)[1]-1) + + if(area.name != "World") area.lat <- c(which(lats <= lat.max)[1]:(which(lats <= lat.min)[1]-1)) # restrict area to chosen one + if(area.name == "World"){ area.lat <- 1:length(lats); area.lon <- 1:length(lons) } + + n.lonr <- n.lon - ceiling(length(lons[lons>0 & lons < 180])) # count the number of longitude values between 180 and 360 degrees longitud + + for(my.score.name.map in my.score.name){ + #my.score.name.map='Acc' # for the debug + + print(paste0("Score=",my.score.name.map)) + + if(my.score.name.map=='FairRpss') my.score <- my.FairRpss.chunk + if(my.score.name.map=='FairCrpss') my.score <- my.FairCrpss.chunk + if(my.score.name.map=="EnsCorr") my.score <- my.EnsCorr.chunk + #if(my.score.name.map=="RelDiagr") my.score <- my.RelDiagr + + # old green-red palette: + # brk.rpss<-c(-1,seq(0,1,by=0.05)) + # col.rpss<-c("darkred",colorRampPalette(c("red","white","darkgreen"))(length(brk.rpss)-2)) + # brk.rps<-c(seq(0,1,by=0.1),10) + # col.rps<-c(colorRampPalette(c("darkgreen","white","red"))(length(brk.rps)-2),"darkred") + # if(my.score.name.map==my.Rps | my.score==my.Rps.clim) {my.brk<-brk.rps} else {my.brk<-brk.rpss} + # if(my.score.name.map==my.Rps | my.score==my.Rps.clim) {my.col<-col.rps} else {my.col<-col.rpss} + + brk.rpss <- c(seq(-1,1,by=0.1)) + col.rpss <- colorRampPalette(col)(length(brk.rpss)-1) + + # at present all breaks and colors are the same, so there is no need to differenciate between indexes, aside the two Fair scores whose range is [-inf,0] + my.brk <- brk.rpss + my.col <- col.rpss + my.brk.labels <- my.brk + if(my.score.name.map == 'FairRpss' | my.score.name.map=='FairCrpss') my.brk.labels <- c(expression(-infinity), my.brk[-1]) + + my.RelDiagr<-list() + + for(lead in 1:n.leadtimes){ + print(paste0("Leadtime=",lead)) + + #my.title<-paste0(my.score.name.map,' of ',cfs.name,' + #10m Wind Speed for ',startdate.name.map,'. Forecast time ',leadtime.week[lead],'.') + + if(my.score.name.map != "RelDiagr"){ + + #postscript(file=paste0(workdir,'/Maps_',var.name,'/',my.score.name.map,'_',startdate.name.map,'_Forecast_time_',leadtime.week[lead],'_',var.name,'.ps'), + # paper="special",width=12,height=7,horizontal=F) + + #layout(matrix(c(1,2), 1, 2, byrow = TRUE),widths=c(11,1.5)) + #par(oma=c(0,0,4,0),mar=c(0,0,0,0)) + + #n.lonr <- n.lon - ceiling(length(lons[lons<180 & lons > 0])) + #hatching(latsr,c(lons[c((nlonr+1):nlon)]-360,lons[1:nlonr]), my.Pvalue.rev, dens = 12, ang = 45, col_line = '#252525', lwd_size = 0.5, crosshatching = FALSE) + #hatching(lats,c(lons[c((n.lonr+1):n.lon)]-360,lons[1:n.lonr]), my.Pvalue.rev, dens = 12, ang = 45, col_line = '#252525', lwd_size = 0.5, crosshatching = FALSE) + #n.lonr <- n.lon - ceiling(length(lons[lons<180 & lons > 0])) + #PlotEquiMap(my.score[lead,,][n.lat:1,c((n.lonr+1):n.lon,1:n.lonr)], lons,lats ,title_scale=0.8, brks=my.brk, cols=my.col ,axelab=F, filled.continents=FALSE, drawleg=F) + # lat values must be in decreasing order from +90 to -90 degrees and long from 0 to 360 degrees: + + my.file <- paste0(work.dir,'/',var.name,'/',var.name,'_',my.score.name.map,'_',my.month[month],'_leadtime_',leadtime.week[lead],'.png') + my.file2 <- paste0(work.dir,'/',var.name,'/formatted/',var.name,'_',my.score.name.map,'_',my.month[month],'_leadtime_',leadtime.week[lead],'.png') + png(file=my.file,width=1000,height=600) + + # Map: + par(fig=c(0,0.92,0,0.89), new=TRUE) + + PlotEquiMap(my.score[lead,,c((n.lonr+1):n.lon,1:n.lonr)], c(lons[c((n.lonr+1):n.lon)]-360, lons[1:n.lonr]), lats ,title_scale=0.8, brks=my.brk, cols=my.col ,axelab=F, filled.continents=FALSE, drawleg=F) + + if(my.score.name.map=="EnsCorr") my.PValue.rev <- my.EnsCorr.pvalue < 0.05 + if(my.score.name.map=="FairRpss") my.PValue.rev <- my.FairRpss.pvalue < 0.05 + if(my.score.name.map=="FairCrpss") my.PValue.rev <- my.FairCrpss.pvalue < 0.05 + + # Draw the significance diagonal lines: + pv <- aperm(my.PValue.rev, c(1,3,2)) + + hatching(lats, c(lons[c((n.lonr+1):n.lon)]-360, lons[1:n.lonr]), pv[lead,,], dens = 12, ang = 45, col_line = '#252525', lwd_size = 0.5, crosshatching = FALSE) + + # Map title: + par(fig=c(0,1,0.82,0.91), new=TRUE) + #my.title <- paste0(my.score.name.map,' of ',cfs.name,'\n ', var.name.map,' for ',my.month[month],'. Forecast time ',leadtime.week[lead],'.') + my.title <- paste0("ECMWF-MPS / ", var.name.map, " / ", my.score.name.title[which(my.score.name == my.score.name.map)], "\n", my.month[month], " (",leadtime.week[lead]," days lead time)/ 1994-2013") + mtext(my.title, cex=2.4, font=2) + + # Color legend: + par(fig=c(0.92,1,0,0.9), new=TRUE) + #par(fig=c(0.52,0.82,0,0.9), new=TRUE) + + ColorBar2(my.brk, cols=my.col, vert=T, my.ticks=-0.5 + 1:length(my.brk), my.labels=my.brk.labels) + + ### arrange lat/ lon in my.PValue (a second variable in the EnsCorr file) to start from +90 degr. (lat) and from 0 degr (lon): + ### my.PValue.rev[lead,,] <- my.PValue[i,n.lat:1,c((n.lonr+1):n.lon,1:n.lonr)] + + dev.off() + + system(paste0("~/scripts/fig2catalog.sh -s 0.8 -c 'Start dates: ", stringa," of ",my.month[month],"\nLead time: ", leadtime.week[lead], " days\nReference dataset: ERA-Interim\nBias correction: none\nHatched area: significant from a bootstrapping test (p_value=0.05)' ", my.file," ", my.file2)) + + + } # close if on !RelDiagr + + gc() + + if(my.score.name.map == "RelDiagr") { + + # Note that bins should correspond to the number of hindcast members + 1, for systems with a low number of members (below ~20) + # Warning: if the bootstrap show a gray point instead of a gray bar, decrease the number of bins! + my.RelDiagr[[lead]] <- ReliabilityDiagramHist(ens.chunk.prob[[lead]][int1,area.lat,area.lon], obs.chunk.prob[[lead]][int1,area.lat,area.lon], bins=n.intervals, nboot=500, plot=FALSE, plot.refin=TRUE) # Below normal tercile + + #my.RelDiagr[[n.leadtimes+lead]] <- ReliabilityDiagramHist(ens.chunk.prob[[lead]][int2,,], obs.chunk.prob[[lead]][int2,,], bins=5, nboot=0, plot=FALSE, plot.refin=F) + my.RelDiagr[[2*n.leadtimes+lead]] <- ReliabilityDiagramHist(ens.chunk.prob[[lead]][int3,area.lat,area.lon], obs.chunk.prob[[lead]][int3,area.lat,area.lon], bins=n.intervals, nboot=500, plot=FALSE, plot.refin=TRUE) # Above Normale tercile + + + my.file <- paste0(work.dir,'/', var.name,'/', var.name,'_RelDiagr_', area.name,'_', my.month[month],'_leadtime_', leadtime.week[lead],'_', var.name,'.png') + my.file2 <- paste0(work.dir,'/', var.name,'/formatted/', var.name,'_RelDiagr_', area.name, '_', my.month[month],'_leadtime_', leadtime.week[lead],'_', var.name,'.png') + + png(file=my.file,width=650,height=600) + + #my.title <- paste0('Reliability Diagram of ',cfs.name,'\n ',var.name.map,' for ',my.month[month],'. Forecast time ',leadtime.week[lead],'.') + my.title <- paste0("ECMWF-MPS / ", var.name.map, " / Reliability Diagram \n", my.month[month], " (", leadtime.week[lead]," days lead time) / 1994-2013") + + par(fig=c(0, 0.83, 0, 1), new=TRUE) + + plot(c(0,1),c(0,1),type="l",xlab="Forecast probability (%)",ylab="Observed frequency (%)", xaxt='n', yaxt='n',col='gray30', main=my.title, cex.main=1.6) + + middle <- 100*round(my.RelDiagr[[lead]]$p.avgs,2) + interval <- (middle/2)[2] + left <- round(middle - interval,0) + left[1] <- 0 + right <- round(middle + interval,0) + right[l(right)] <- 100 + bins.label <- paste0(paste0(left,"-"),right) + + axis(1, at=my.RelDiagr[[lead]]$p.avgs, labels=FALSE) + text(x=my.RelDiagr[[lead]]$p.avgs, par("usr")[3]-0.03, labels=bins.label, srt=45, pos=1, xpd=TRUE) + axis(2, at=(0:10)/10, labels=10*(0:10), cex.axis=1) + + no_res <- 1/3 #sum(my.RelDiagr[[lead]]$obs.counts)/sum(my.RelDiagr[[lead]]$hist.counts) + numb <- c(seq(0,1,by=0.1)) + no_skill <- (numb+no_res)/2 + lines(c(0,1), c(no_res,no_res), col="gray", lty=3) + lines(c(1/3,1/3), c(0,1), col="gray", lty=3) + lines(c(0,1), c(no_skill[1],no_skill[11]), col="black", lty=3) + + #lines(my.RelDiagr[[lead]]$p.avgs[!is.na(my.RelDiagr[[lead]]$p.avgs)],my.RelDiagr[[lead]]$cond.prob[!is.na(my.RelDiagr[[lead]]$cond.prob)],type="o", pch=16, col="blue") + for (i in 1:n.intervals){ + lines(rep(my.RelDiagr[[lead]]$p.avgs[i], 2), c(my.RelDiagr[[lead]]$cbar.lo[i],my.RelDiagr[[lead]]$cbar.hi[i]), col="pink", lwd=2) + lines(0.005+rep(my.RelDiagr[[2*n.leadtimes+lead]]$p.avgs[i], 2), c(my.RelDiagr[[2*n.leadtimes+lead]]$cbar.lo[i],my.RelDiagr[[2*n.leadtimes+lead]]$cbar.hi[i]), col="lightblue", lwd=2) + } + + + points(my.RelDiagr[[lead]]$p.avgs[!is.na(my.RelDiagr[[lead]]$p.avgs)],my.RelDiagr[[lead]]$cond.prob[!is.na(my.RelDiagr[[lead]]$cond.prob)],type="b", pch=1, col="red", cex=1.5, lwd=5) + + #points(my.RelDiagr[[n.leadtimes+lead]]$p.avgs[!is.na(my.RelDiagr[[n.leadtimes+lead]]$p.avgs)],my.RelDiagr[[n.leadtimes+lead]]$cond.prob[!is.na(my.RelDiagr[[n.leadtimes+lead]]$cond.prob)],type="b", pch=11, col="gold", cex=1.5, lwd=5) # col="darkgreen" + + points(0.005+my.RelDiagr[[2*n.leadtimes+lead]]$p.avgs[!is.na(my.RelDiagr[[2*n.leadtimes+lead]]$p.avgs)], my.RelDiagr[[2*n.leadtimes+lead]]$cond.prob[!is.na(my.RelDiagr[[2*n.leadtimes+lead]]$cond.prob)],type="b", pch=1, col="blue", cex=1.5, lwd=5) + + legend("bottomright", legend=c('Above Normal','Below Normal'), lty=c(1,1), lwd=c(2.5,2.5), col=c('blue','red')) + + par(fig=c(0.75, 1, 0, 0.35), new=TRUE) + perc1 <- 100*my.RelDiagr[[lead]]$hist.counts/sum(my.RelDiagr[[lead]]$hist.counts) + barplot(perc1, beside=T,space=c(0,1.2), axes = T, axis.lty=T, axisnames = T, col = "red", ylim=c(0,max(perc1)+5), main="Below Normal") + par(fig=c(0.75, 1, 0.34, 0.36), new=TRUE) + mtext("% of forecasts for bin") + + par(fig=c(0.75, 1, 0.25, 0.6), new=TRUE) + perc2 <- 100*my.RelDiagr[[2*n.leadtimes+lead]]$hist.counts/sum(my.RelDiagr[[2*n.leadtimes+lead]]$hist.counts) + barplot(perc2, beside=T, space=c(0,1.2), axes = T, axis.lty=T, axisnames = T, col = "blue", ylim=c(0,max(perc2)+5), main="Above Normal") + par(fig=c(0.75, 1, 0.59, 0.61), new=TRUE) + mtext("% of forecasts for bin") + dev.off() + + system(paste0("~/scripts/fig2catalog.sh -c 'Start dates: ", stringa," of ",my.month[month],"\nLead time: ", leadtime.week[lead], " days\nRegion: ", area.name,"\nReference dataset: ERA-Interim \nBias correction: none' ", my.file," ", my.file2)) + } + + gc() + } # next lead + + gc() + } # next score + + gc() +} # next month + + +############################################################################# +# Visualize summary tables for a region # +############################################################################# + +# If you use a mask, load it to average only over land and check that it has the same lat and lon positions of the score maps: +if(land.only == TRUE){ + load(paste0('/shared/earth/EarthSystemServices/TOOLS/Topography_bathymetry_and_masks/Seamask.', n.lon, '.', n.lat,'.50m','.RData')) + mask <- Seamask.512.256.50m + #Coordinates repositioning and mask conditioning + mask <- mask[c((n.lonr+1):n.lon,1:n.lonr),n.lat:1] + mask <- ifelse(mask == 1, 0, 1) # assign 1 to land and 0 to sea to plot the mask + mask <- t(mask) + myImagePlot(mask) + myImagePlot(my.FairRpss.chunk[1,,]) + mask <- ifelse(mask == 0, NA, 1) # assign 1 to land and NA to sea to multiply it for the score values later +} + + +# plot a box with the chosen area (do it only once): +# +#png(file=paste0(work.dir,'/',var.name,'/Box_',area.name,'.png'),width=1400,height=800) +#PlotEquiMap(my.FairRpss.chunk[1,,c((n.lonr+1):n.lon,1:n.lonr)], c(lons[c((n.lonr+1):n.lon)]-360, lons[1:n.lonr]), lats, brks=c(-1000,1000), cols="lightblue" , axelab=F, filled.continents=TRUE, drawleg=F, boxlim=c(area[1],area[3],area[2],area[4]), boxcol="black", boxlwd=.7) +#dev.off() + +array.EnsCorr <- array.FairRpss <- array.FairCrpss <- array(NA,c(12,4)) + +# Load one file, just to take the lat and lon values: +load(file=paste0(work.dir,'/',var.name,'_',my.month[1],'.RData')) + +if(lon.min < 360) area.lon <- c(1:(which(lons >= lon.max)[1]), (which(lons >= lon.min)[1]:length(lons))) # restrict area to chosen one +if(lon.min > 360) area.lon <- (which(lons >= lon.min-360)[1]):(which(lons >= lon.max)[1]-1) + +if(area.name != "World") area.lat <- c(which(lats <= lat.max)[1]:(which(lats <= lat.min)[1]-1)) # restrict area to chosen one +if(area.name == "World"){ area.lat <- 1:length(lats); area.lon <- 1:length(lons) } + +# Load all data and average it over the chosen region: +for(month in 1:12){ + + load(file=paste0(work.dir,'/',var.name,'_',my.month[month],'.RData')) + + if(land.only == TRUE){ + # remove sea values: + for(i in 1:4) my.EnsCorr.chunk[i,,] <- my.EnsCorr.chunk[i,,]*mask + for(i in 1:4) my.FairRpss.chunk[i,,] <- my.FairRpss.chunk[i,,]*mask + for(i in 1:4) my.FairCrpss.chunk[i,,] <- my.FairCrpss.chunk[i,,]*mask + } + + for(l in 0:3){ + #load(file=paste0(work.dir,"/",fields.name,"_",my.period[p],"_leadmonth_",l,"_","ClusterNames",".RData")) # load cluster names and summarized data + array.EnsCorr[month, 1+l] <- mean(my.EnsCorr.chunk[1+l, area.lat, area.lon], na.rm=T) + array.FairRpss[month, 1+l] <- mean(my.FairRpss.chunk[1+l, area.lat, area.lon], na.rm=T) + array.FairCrpss[month, 1+l] <- mean(my.FairCrpss.chunk[1+l, area.lat, area.lon], na.rm=T) + } +} + + +if(land.only == TRUE) { mod.filename <- "_land" } else { mod.filename <- "" } + +my.file <- paste0(work.dir,'/',var.name,'/Summary_',var.name,'_', area.name, mod.filename,'_with_letras.png') +my.file2 <- paste0(work.dir,'/',var.name,'/formatted/Summary_',var.name,'_', area.name, mod.filename,'_with_letras.png') + +png(file=my.file,width=700,height=500) + +#my.cols <- rev(c('#b2182b','#d6604d','#f4a582','#fddbc7','#d1e5f0','#92c5de','#4393c3','#2166ac')) +#my.cols <- c('#0570b0','#bdc9e1', '#fff7ec', '#fee8c8','#fdd49e','#fdbb84','#fc8d59','#ef6548','#d7301f','#990000') +#my.cols <- c('#0570b0','#bdc9e1','#fef0d9','#fdd49e','#fdbb84','#fc8d59','#ef6548','#d7301f','#990000') +#my.cols <- c('#74a9cf','#bdc9e1','#fef0d9','#fdd49e','#fdbb84','#fc8d59','#e34a33','#b30000') +my.cols <- c('#bdc9e1','#fef0d9','#fdd49e','#fdbb84','#fc8d59','#e34a33','#b30000', '#7f0000') + +#my.seq <- c(-1,-0.6,-0.4,-0.2,0,0.2,0.4,0.6,1) +#my.seq <- c(-0.2,-0.1,0,0.1,0.2,0.3,0.4,0.5,0.6) +my.seq <- c(-0.1,0,0.1,0.2,0.3,0.4,0.5,0.6,0.7) + +# create an array similar to array.pers but with colors instead of frequencies: +plot.new() + +#par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) +#mtext("Subseasonal sfcWind 1994-2013", cex=1.8) + +par(mar=c(0,0,2,0), fig=c(0.07, 0.27, 0.9, 1), new=TRUE) +mtext("(a) EnsCorr", cex=1.5) + +par(mar=c(1,4,1,0), fig=c(0.02, 0.27, 0.1, 0.9), new=TRUE) +plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,3.2), ann=F) +mtext(side = 1, text = "Lead time (days)", line = 3.5, cex=1.2) +mtext(side = 2, text = "Start date", line = 3, cex=1.2) +axis(1, at=seq(0,3), las=3, cex.axis=1, labels=c("5-11","12-18","19-25","26-32"), mgp=c(3,0.7,0)) +axis(2, at=seq(1,12), las=2, labels=rev(my.month.short), cex.axis=1) + +array.colors <- array(my.cols[8],c(12,4)) +array.colors[array.EnsCorr < my.seq[8]] <- my.cols[7] +array.colors[array.EnsCorr < my.seq[7]] <- my.cols[6] +array.colors[array.EnsCorr < my.seq[6]] <- my.cols[5] +array.colors[array.EnsCorr < my.seq[5]] <- my.cols[4] +array.colors[array.EnsCorr < my.seq[4]] <- my.cols[3] +array.colors[array.EnsCorr < my.seq[3]] <- my.cols[2] +array.colors[array.EnsCorr < my.seq[2]] <- my.cols[1] + +for(p in 1:12){ for(l in 0:3){ polygon(c(0.5+l-1, 0.5+l-1, 1.5+l-1, 1.5+l-1), c(-0.5+13-p, 0.5+13-p, 0.5+13-p, -0.5+13-p), col=array.colors[p,1+l]) }} + +par(mar=c(0,0,2,0), fig=c(0.32, 0.57, 0.9, 1), new=TRUE) +mtext("(b) FairRPSS", cex=1.5) + +par(mar=c(1,4,1,0), fig=c(0.32, 0.57, 0.1, 0.9), new=TRUE) +plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,3.2), ann=F) +mtext(side = 1, text = "Lead time (days)", line = 3.5, cex=1.2) +mtext(side = 2, text = "Start date", line = 3, cex=1.2) +axis(1, at=seq(0,3), las=3, cex.axis=1, labels=c("5-11","12-18","19-25","26-32"), mgp=c(3,0.7,0)) +axis(2, at=seq(1,12), las=2, labels=rev(my.month.short), cex.axis=1) + +array.colors <- array(my.cols[8],c(12,4)) +array.colors[array.FairRpss < my.seq[8]] <- my.cols[7] +array.colors[array.FairRpss < my.seq[7]] <- my.cols[6] +array.colors[array.FairRpss < my.seq[6]] <- my.cols[5] +array.colors[array.FairRpss < my.seq[5]] <- my.cols[4] +array.colors[array.FairRpss < my.seq[4]] <- my.cols[3] +array.colors[array.FairRpss < my.seq[3]] <- my.cols[2] +array.colors[array.FairRpss < my.seq[2]] <- my.cols[1] + +for(p in 1:12){ for(l in 0:3){ polygon(c(0.5+l-1, 0.5+l-1, 1.5+l-1, 1.5+l-1), c(-0.5+13-p, 0.5+13-p, 0.5+13-p, -0.5+13-p), col=array.colors[p,1+l]) }} + +par(mar=c(0,0,2,0), fig=c(0.62, 0.87, 0.9, 1), new=TRUE) +mtext("(c) FairCRPSS", cex=1.5) + +par(mar=c(1,4,1,0), fig=c(0.62, 0.87, 0.1, 0.9), new=TRUE) +plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,3.2), ann=F) +mtext(side = 1, text = "Lead time (days)", line = 3.5, cex=1.2) +mtext(side = 2, text = "Start date", line = 3, cex=1.2) +axis(1, at=seq(0,3), las=3, cex.axis=1, labels=c("5-11","12-18","19-25","26-32"), mgp=c(3,0.7,0)) +axis(2, at=seq(1,12), las=2, labels=rev(my.month.short), cex.axis=1) + +array.colors <- array(my.cols[8],c(12,4)) +array.colors[array.FairCrpss < my.seq[8]] <- my.cols[7] +array.colors[array.FairCrpss < my.seq[7]] <- my.cols[6] +array.colors[array.FairCrpss < my.seq[6]] <- my.cols[5] +array.colors[array.FairCrpss < my.seq[5]] <- my.cols[4] +array.colors[array.FairCrpss < my.seq[4]] <- my.cols[3] +array.colors[array.FairCrpss < my.seq[3]] <- my.cols[2] +array.colors[array.FairCrpss < my.seq[2]] <- my.cols[1] + +for(p in 1:12){ for(l in 0:3){ polygon(c(0.5+l-1, 0.5+l-1, 1.5+l-1, 1.5+l-1), c(-0.5+13-p, 0.5+13-p, 0.5+13-p, -0.5+13-p), col=array.colors[p,1+l]) }} + +par(fig=c(0.89, 1, 0.1, 0.9), new=TRUE) +#ColorBar2(brks=my.brks, cols=my.cols, vert=FALSE, cex=legend1.cex, label.dist=2, my.ticks=-0.5 + 1:length(my.brks), my.labels=my.brks) +ColorBar2(brks = my.seq, cols = my.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) +gc() + +dev.off() + +# Not checked but whould work: +#system(paste0("~/scripts/fig2catalog.sh -r 50 -c 'Region: ", area.name, "\nReference dataset: ERA-Interim' ", my.file," ", my.file2)) + + + +# Monthly reliability diagrams, all over the same graph # +# ONLY FOR SEA+LAND!!!! + +my.RelDiagr<-list() + +my.file <- paste0(work.dir,'/',var.name,'/Summary_RelDiagr_', area.name,'.png') +my.file2 <- paste0(work.dir,'/',var.name,'/formatted/','/Summary_RelDiagr_', area.name,'.png') + +#n.lonr <- n.lon - ceiling(length(lons[lons>0 & lons < 180])) # count the number of longitude values between 180 and 360 degrees longitud + +png(file=my.file, width=1000, height=700) +plot(0, type = "n", axes = FALSE, ann = FALSE) +mtext(side = 3, text = "ECMWF-MFS / Reliability Diagram\nJanuary to December / 1994-2013", line = -1, cex=2.4, font=2) +par(mar=c(0,0,0,0) , fig=c(0, 1, 0, 0.4), new=TRUE) +legend(0.8,-0.8,0,0,legend=my.month[c(1,4,7,10)], lty=c(1,1), lwd=c(2.5,2.5), col=col.month[c(1,4,7,10)], ncol=4) + +par(mar=c(4.8,3.8,3.8,3.8) , fig=c(0, 1, 0, 0.4), new=TRUE) + +for(tercile in c(1,3)){ + print(paste0("Tercile: ",tercile)) + + my.lead <- (1:n.leadtimes) + (tercile-1) * 4 + + for(lead2 in my.lead){ + print(paste0("Lead time: ",lead2)) + + xmin <- 0.25 * ((lead2-1) %% 4) + xmax <- xmin + 0.25 + ymin <- 0.10 + 0.38 * (tercile-1)/2 + ymax <- ymin + 0.38 + + #my.title <- paste0('Reliability Diagram of ',cfs.name,'\n ',var.name.map,' for ',my.month[month],'. Forecast time ',leadtime.week[lead],'.') + #my.title <- paste0("ECMWF-S4 / ", var.name.map, " / Reliability Diagram \nJanuary to December / 1994-2013") + my.title <- paste0("Lead time: ", leadtime.week[1+((lead2-1) %% 4)], " days") + + par(fig=c(xmin, xmax, ymin, ymax), new=TRUE) + mod.subtitle <- ifelse(tercile == 1, "lower","upper") + plot(c(0,1),c(0,1),type="l",xlab=paste0("Forecast frequency ", mod.subtitle," tercile"),ylab="Observed frequency", col='gray30', main=my.title, cex.main=1.4) + + + for(month in c(1,4,7,10)){ #my.months){ + #month=1 # for the debug + #print(paste0("Month=",month)) + + # Load data: + load(file=paste0(work.dir,'/',var.name,'_',my.month[month],'.RData')) + + cat(paste0('Computing the Reliability Diagram for month ', month,'. Please wait... \n')) + + lead <- 1 + (lead2-1) %% 4 + + if(tercile == 1) my.RelDiagr[[lead]] <- ReliabilityDiagram(ens.chunk.prob[[lead]][int1,area.lat,area.lon], obs.chunk.prob[[lead]][int1,area.lat,area.lon], bins=5, nboot=0, plot=FALSE, plot.refin=F) #tercile 1 + + #my.RelDiagr[[n.leadtimes+lead]] <- ReliabilityDiagram(ens.chunk.prob[[lead]][int2,area.lat,area.lon], obs.chunk.prob[[lead]][int2,area.lat,area.lon], bins=5, nboot=0, plot=FALSE, plot.refin=F) + + if(tercile == 3) my.RelDiagr[[2*n.leadtimes+lead]] <- ReliabilityDiagram(ens.chunk.prob[[lead]][int3,area.lat,area.lon], obs.chunk.prob[[lead]][int3,area.lat,area.lon], bins=5, nboot=0, plot=FALSE, plot.refin=F) + + #col.month <- c('#a6cee3','#1f78b4','#b2df8a','#33a02c','#fb9a99','#e31a1c','#fdbf6f','#ff7f00','#cab2d6','#6a3d9a','#ffff99') + #col.month <- c('#8dd3c7','#8dd3c7','#8dd3c7','#ffffb3','#ffffb3','#ffffb3','#bebada','#bebada','#bebada','#fb8072','#fb8072','#fb8072') + col.month <- c('#e41a1c','#e41a1c','#e41a1c','#377eb8','#377eb8','#377eb8','#4daf4a','#4daf4a','#4daf4a','#984ea3','#984ea3','#984ea3') + + if(tercile == 1) lines(my.RelDiagr[[lead]]$p.avgs[!is.na(my.RelDiagr[[lead]]$p.avgs)],my.RelDiagr[[lead]]$cond.prob[!is.na(my.RelDiagr[[lead]]$cond.prob)],type="o", pch=16, col=col.month[month]) + + if(tercile == 3) lines(my.RelDiagr[[2*n.leadtimes+lead]]$p.avgs[!is.na(my.RelDiagr[[2*n.leadtimes+lead]]$p.avgs)], my.RelDiagr[[2*n.leadtimes+lead]]$cond.prob[!is.na(my.RelDiagr[[2*n.leadtimes+lead]]$cond.prob)],type="o", pch=16, col=col.month[month]) + + rm(obs.chunk.prob, ens.chunk.prob) + gc() + } # next month + + } # next lead +} # next tercile + + +dev.off() + + + + + diff --git a/old/SkillScores_map_v6.R~ b/old/SkillScores_map_v6.R~ new file mode 100644 index 0000000000000000000000000000000000000000..e2e8829963533205bc35a03b4efe107e8509b023 --- /dev/null +++ b/old/SkillScores_map_v6.R~ @@ -0,0 +1,508 @@ +########################################################################################### +# Visualize and save the score maps for one score and period # +########################################################################################### +# run it only after computing and saving all the skill score data: + +rm(list=ls()) + +########################################################################################## +# Load functions and libraries # +########################################################################################## + +library(SpecsVerification) # for skill scores +library(s2dverification) # for Load() function +library(abind) # for merging arrays +source('/home/Earth/ncortesi/scripts/Rfunctions.R') +source('/home/Earth/ncortesi/Downloads/ESS/general/hatching.R') +########################################################################################## +# User's settings # +########################################################################################## +#if(!mare) {source('/home/Earth/ncortesi/Downloads/scripts/Rfunctions.R')} else {source('/gpfs/projects/bsc32/bsc32842/scripts/Rfunctions.R')} + +# working dir where to put the output maps and files in the /Data and /Maps subdirs: +work.dir <- "/scratch/Earth/ncortesi/RESILIENCE/Subestacional" + +var.name <- 'sfcWind' # forecast variable. It is the name used inside the netCDF files and as suffix in map filenames, i.e: 'sfcWind' or 'psl' + +my.pvalue <- 0.05 # in case of computing the EnsCorr, set the p_value level to show in the ensemble mean correlation maps +n.intervals <- 12 # in case of computing the Reliabilty Diagram, set the number of intervals of the forecasted frequency (X axis). It is convenient to set it to + # the number of ensemble member + 1 + +# Choose one region between those defined below for the reliability diagrams and for the summary tables (0=World, 1=Northern europe, etc.) +area.num <- 8 +land.only <- FALSE # set it to true if you want to average only over land, FALSE otherwise [warning: the reliability diagram is computed ONLY over land+sea ] + +# coordinates of a chosen point in the world to plot the time series over the 52 maps (they must be the same values in objects la y lo) +#my.lat <- 55.3197120 +#my.lon <- 0.5625 + +my.months=1:12 #9:12 # choose the month(s) to plot and save + +# Define regions for averaging skill scores: (in the format: lon.min /lon.max / lat.min /lat.max. lon.min MUST be a NEGATIVE number, long values MUST be from 0 and 360, and +# lat values MUST be from 90 to -90 degrees) +area0.name <- "World" +area0 <- c(-180,180,-90,90) +area1.name <- "Northern_Europe" +area1 <- c(-15,45,45,75) +area2.name <- "Southern_Europe" +area2 <- c(-15,45,35,45) +area3.name <- "Southwestern_Europe" +area3 <- c(-15,20,35,45) +area4.name <- "Southeastern_Europe" +area4 <- c(20,45,35,45) +area5.name <- "Europe" +area5 <- c(-15,45,35,75) +area6.name <- "North_America" +area6 <- c(-130,-60,30,50) +area7.name <- "North_Sea" +area7 <- c(-4, 15, 50, 65) +area8.name <- "Iberian_Peninsula" +area8 <- c(-10, 4, 36, 44) +area9.name <- "Canadian region" +area9 <- c(-114, -111.8, 49.6, 51.7) + +######################################################################################### + +col <- c("#0C046E", "#1914BE", "#2341F7", "#2F55FB", "#3E64FF", "#528CFF", "#64AAFF", "#82C8FF", "#A0DCFF", "#B4F0FF", "#FFFBAF", "#FFDD9A", "#FFBF87", "#FFA173", "#FF7055", "#FE6346", "#F7403B", "#E92D36", "#C80F1E", "#A50519") + +# replace old command area.name <- area0.name: +area <- eval(parse(text=paste0("area", area.num))) +area.name <- eval(parse(text=paste0("area", area.num,".name"))) + +# position of max and min long and lat values the chosen region: +lon.min <- 360 + area[1] +lon.max <- area[2] +lat.min <- area[3] +lat.max <- area[4] + +dir.create(path=paste0(work.dir,"/",var.name), showWarnings=FALSE) # if directory already exists, this function doesn't create it again +dir.create(path=paste0(work.dir,"/",var.name,"/formatted"), showWarnings=FALSE) # if directory already exists, this function doesn't create it again + +########################################################################################## +# visualize worldwide monthly maps of skill scores # +########################################################################################## +# choose one or more skills scores to visualize between ACC (EnsCorr), FairRPSS, FairCRPSS and/or RelDiagr: +my.score.name <- c('FairRpss','FairCrpss','EnsCorr','RelDiagr') +my.score.name.title <- c('FairRpss','FairCrpss','Correlation of the ensemble mean','Reliability Diagram') # names to be visualized in the graph titles + +for(month in my.months){ + #month=1 # for the debug + print(paste0("Month=",month)) + + # Load data: + load(file=paste0(work.dir,'/',var.name,'_',my.month[month],'.RData')) + + sdates.seq <- weekly.seq(forecast.year,mes,day) # sequence of dates corresponding to all the thursday of the year + my.startdates.days <- as.integer(substr(sdates.seq[which(as.integer(substr(sdates.seq,5,6)) == month)],7,8)) #startdates.monthly[[month]] #c(1:5) + stringa <- paste0(my.startdates.days,collapse=",") + + if(lon.min < 360) area.lon <- c(1:(which(lons >= lon.max)[1]), (which(lons >= lon.min)[1]:length(lons))) # restrict area to chosen one + if(lon.min > 360) area.lon <- (which(lons >= lon.min-360)[1]):(which(lons >= lon.max)[1]-1) + + if(area.name != "World") area.lat <- c(which(lats <= lat.max)[1]:(which(lats <= lat.min)[1]-1)) # restrict area to chosen one + if(area.name == "World"){ area.lat <- 1:length(lats); area.lon <- 1:length(lons) } + + n.lonr <- n.lon - ceiling(length(lons[lons>0 & lons < 180])) # count the number of longitude values between 180 and 360 degrees longitud + + for(my.score.name.map in my.score.name){ + #my.score.name.map='Acc' # for the debug + + print(paste0("Score=",my.score.name.map)) + + if(my.score.name.map=='FairRpss') my.score <- my.FairRpss.chunk + if(my.score.name.map=='FairCrpss') my.score <- my.FairCrpss.chunk + if(my.score.name.map=="EnsCorr") my.score <- my.EnsCorr.chunk + #if(my.score.name.map=="RelDiagr") my.score <- my.RelDiagr + + # old green-red palette: + # brk.rpss<-c(-1,seq(0,1,by=0.05)) + # col.rpss<-c("darkred",colorRampPalette(c("red","white","darkgreen"))(length(brk.rpss)-2)) + # brk.rps<-c(seq(0,1,by=0.1),10) + # col.rps<-c(colorRampPalette(c("darkgreen","white","red"))(length(brk.rps)-2),"darkred") + # if(my.score.name.map==my.Rps | my.score==my.Rps.clim) {my.brk<-brk.rps} else {my.brk<-brk.rpss} + # if(my.score.name.map==my.Rps | my.score==my.Rps.clim) {my.col<-col.rps} else {my.col<-col.rpss} + + brk.rpss <- c(seq(-1,1,by=0.1)) + col.rpss <- colorRampPalette(col)(length(brk.rpss)-1) + + # at present all breaks and colors are the same, so there is no need to differenciate between indexes, aside the two Fair scores whose range is [-inf,0] + my.brk <- brk.rpss + my.col <- col.rpss + my.brk.labels <- my.brk + if(my.score.name.map == 'FairRpss' | my.score.name.map=='FairCrpss') my.brk.labels <- c(expression(-infinity), my.brk[-1]) + + my.RelDiagr<-list() + + for(lead in 1:n.leadtimes){ + print(paste0("Leadtime=",lead)) + + #my.title<-paste0(my.score.name.map,' of ',cfs.name,' + #10m Wind Speed for ',startdate.name.map,'. Forecast time ',leadtime.week[lead],'.') + + if(my.score.name.map != "RelDiagr"){ + + #postscript(file=paste0(workdir,'/Maps_',var.name,'/',my.score.name.map,'_',startdate.name.map,'_Forecast_time_',leadtime.week[lead],'_',var.name,'.ps'), + # paper="special",width=12,height=7,horizontal=F) + + #layout(matrix(c(1,2), 1, 2, byrow = TRUE),widths=c(11,1.5)) + #par(oma=c(0,0,4,0),mar=c(0,0,0,0)) + + #n.lonr <- n.lon - ceiling(length(lons[lons<180 & lons > 0])) + #hatching(latsr,c(lons[c((nlonr+1):nlon)]-360,lons[1:nlonr]), my.Pvalue.rev, dens = 12, ang = 45, col_line = '#252525', lwd_size = 0.5, crosshatching = FALSE) + #hatching(lats,c(lons[c((n.lonr+1):n.lon)]-360,lons[1:n.lonr]), my.Pvalue.rev, dens = 12, ang = 45, col_line = '#252525', lwd_size = 0.5, crosshatching = FALSE) + #n.lonr <- n.lon - ceiling(length(lons[lons<180 & lons > 0])) + #PlotEquiMap(my.score[lead,,][n.lat:1,c((n.lonr+1):n.lon,1:n.lonr)], lons,lats ,title_scale=0.8, brks=my.brk, cols=my.col ,axelab=F, filled.continents=FALSE, drawleg=F) + # lat values must be in decreasing order from +90 to -90 degrees and long from 0 to 360 degrees: + + my.file <- paste0(work.dir,'/',var.name,'/',var.name,'_',my.score.name.map,'_',my.month[month],'_leadtime_',leadtime.week[lead],'.png') + my.file2 <- paste0(work.dir,'/',var.name,'/formatted/',var.name,'_',my.score.name.map,'_',my.month[month],'_leadtime_',leadtime.week[lead],'.png') + png(file=my.file,width=1000,height=600) + + # Map: + par(fig=c(0,0.92,0,0.89), new=TRUE) + + PlotEquiMap(my.score[lead,,c((n.lonr+1):n.lon,1:n.lonr)], c(lons[c((n.lonr+1):n.lon)]-360, lons[1:n.lonr]), lats ,title_scale=0.8, brks=my.brk, cols=my.col ,axelab=F, filled.continents=FALSE, drawleg=F) + + if(my.score.name.map=="EnsCorr") my.PValue.rev <- my.EnsCorr.pvalue < 0.05 + if(my.score.name.map=="FairRpss") my.PValue.rev <- my.FairRpss.pvalue < 0.05 + if(my.score.name.map=="FairCrpss") my.PValue.rev <- my.FairCrpss.pvalue < 0.05 + + # Draw the significance diagonal lines: + pv <- aperm(my.PValue.rev, c(1,3,2)) + + hatching(lats, c(lons[c((n.lonr+1):n.lon)]-360, lons[1:n.lonr]), pv[lead,,], dens = 12, ang = 45, col_line = '#252525', lwd_size = 0.5, crosshatching = FALSE) + + # Map title: + par(fig=c(0,1,0.82,0.91), new=TRUE) + #my.title <- paste0(my.score.name.map,' of ',cfs.name,'\n ', var.name.map,' for ',my.month[month],'. Forecast time ',leadtime.week[lead],'.') + my.title <- paste0("ECMWF-MPS / ", var.name.map, " / ", my.score.name.title[which(my.score.name == my.score.name.map)], "\n", my.month[month], " (",leadtime.week[lead]," days lead time)/ 1994-2013") + mtext(my.title, cex=2.4, font=2) + + # Color legend: + par(fig=c(0.92,1,0,0.9), new=TRUE) + #par(fig=c(0.52,0.82,0,0.9), new=TRUE) + + ColorBar2(my.brk, cols=my.col, vert=T, my.ticks=-0.5 + 1:length(my.brk), my.labels=my.brk.labels) + + ### arrange lat/ lon in my.PValue (a second variable in the EnsCorr file) to start from +90 degr. (lat) and from 0 degr (lon): + ### my.PValue.rev[lead,,] <- my.PValue[i,n.lat:1,c((n.lonr+1):n.lon,1:n.lonr)] + + dev.off() + + system(paste0("~/scripts/fig2catalog.sh -s 0.8 -c 'Start dates: ", stringa," of ",my.month[month],"\nLead time: ", leadtime.week[lead], " days\nReference dataset: ERA-Interim\nBias correction: none\nHatched area: significant from a bootstrapping test (p_value=0.05)' ", my.file," ", my.file2)) + + + } # close if on !RelDiagr + + gc() + + if(my.score.name.map == "RelDiagr") { + + # Note that bins should correspond to the number of hindcast members + 1, for systems with a low number of members (below ~20) + # Warning: if the bootstrap show a gray point instead of a gray bar, decrease the number of bins! + my.RelDiagr[[lead]] <- ReliabilityDiagramHist(ens.chunk.prob[[lead]][int1,area.lat,area.lon], obs.chunk.prob[[lead]][int1,area.lat,area.lon], bins=n.intervals, nboot=500, plot=FALSE, plot.refin=TRUE) # Below normal tercile + + #my.RelDiagr[[n.leadtimes+lead]] <- ReliabilityDiagramHist(ens.chunk.prob[[lead]][int2,,], obs.chunk.prob[[lead]][int2,,], bins=5, nboot=0, plot=FALSE, plot.refin=F) + my.RelDiagr[[2*n.leadtimes+lead]] <- ReliabilityDiagramHist(ens.chunk.prob[[lead]][int3,area.lat,area.lon], obs.chunk.prob[[lead]][int3,area.lat,area.lon], bins=n.intervals, nboot=500, plot=FALSE, plot.refin=TRUE) # Above Normale tercile + + + my.file <- paste0(work.dir,'/', var.name,'/', var.name,'_RelDiagr_', area.name,'_', my.month[month],'_leadtime_', leadtime.week[lead],'_', var.name,'.png') + my.file2 <- paste0(work.dir,'/', var.name,'/formatted/', var.name,'_RelDiagr_', area.name, '_', my.month[month],'_leadtime_', leadtime.week[lead],'_', var.name,'.png') + + png(file=my.file,width=650,height=600) + + #my.title <- paste0('Reliability Diagram of ',cfs.name,'\n ',var.name.map,' for ',my.month[month],'. Forecast time ',leadtime.week[lead],'.') + my.title <- paste0("ECMWF-MPS / ", var.name.map, " / Reliability Diagram \n", my.month[month], " (", leadtime.week[lead]," days lead time) / 1994-2013") + + par(fig=c(0, 0.83, 0, 1), new=TRUE) + + plot(c(0,1),c(0,1),type="l",xlab="Forecast probability (%)",ylab="Observed frequency (%)", xaxt='n', yaxt='n',col='gray30', main=my.title, cex.main=1.6) + + middle <- 100*round(my.RelDiagr[[lead]]$p.avgs,2) + interval <- (middle/2)[2] + left <- round(middle - interval,0) + left[1] <- 0 + right <- round(middle + interval,0) + right[l(right)] <- 100 + bins.label <- paste0(paste0(left,"-"),right) + + axis(1, at=my.RelDiagr[[lead]]$p.avgs, labels=FALSE) + text(x=my.RelDiagr[[lead]]$p.avgs, par("usr")[3]-0.03, labels=bins.label, srt=45, pos=1, xpd=TRUE) + axis(2, at=(0:10)/10, labels=10*(0:10), cex.axis=1) + + no_res <- 1/3 #sum(my.RelDiagr[[lead]]$obs.counts)/sum(my.RelDiagr[[lead]]$hist.counts) + numb <- c(seq(0,1,by=0.1)) + no_skill <- (numb+no_res)/2 + lines(c(0,1), c(no_res,no_res), col="gray", lty=3) + lines(c(1/3,1/3), c(0,1), col="gray", lty=3) + lines(c(0,1), c(no_skill[1],no_skill[11]), col="black", lty=3) + + #lines(my.RelDiagr[[lead]]$p.avgs[!is.na(my.RelDiagr[[lead]]$p.avgs)],my.RelDiagr[[lead]]$cond.prob[!is.na(my.RelDiagr[[lead]]$cond.prob)],type="o", pch=16, col="blue") + for (i in 1:n.intervals){ + lines(rep(my.RelDiagr[[lead]]$p.avgs[i], 2), c(my.RelDiagr[[lead]]$cbar.lo[i],my.RelDiagr[[lead]]$cbar.hi[i]), col="pink", lwd=2) + lines(0.005+rep(my.RelDiagr[[2*n.leadtimes+lead]]$p.avgs[i], 2), c(my.RelDiagr[[2*n.leadtimes+lead]]$cbar.lo[i],my.RelDiagr[[2*n.leadtimes+lead]]$cbar.hi[i]), col="lightblue", lwd=2) + } + + + points(my.RelDiagr[[lead]]$p.avgs[!is.na(my.RelDiagr[[lead]]$p.avgs)],my.RelDiagr[[lead]]$cond.prob[!is.na(my.RelDiagr[[lead]]$cond.prob)],type="b", pch=1, col="red", cex=1.5, lwd=5) + + #points(my.RelDiagr[[n.leadtimes+lead]]$p.avgs[!is.na(my.RelDiagr[[n.leadtimes+lead]]$p.avgs)],my.RelDiagr[[n.leadtimes+lead]]$cond.prob[!is.na(my.RelDiagr[[n.leadtimes+lead]]$cond.prob)],type="b", pch=11, col="gold", cex=1.5, lwd=5) # col="darkgreen" + + points(0.005+my.RelDiagr[[2*n.leadtimes+lead]]$p.avgs[!is.na(my.RelDiagr[[2*n.leadtimes+lead]]$p.avgs)], my.RelDiagr[[2*n.leadtimes+lead]]$cond.prob[!is.na(my.RelDiagr[[2*n.leadtimes+lead]]$cond.prob)],type="b", pch=1, col="blue", cex=1.5, lwd=5) + + legend("bottomright", legend=c('Upper Tercile','Lower Tercile'), lty=c(1,1), lwd=c(2.5,2.5), col=c('blue','red')) + + par(fig=c(0.75, 1, 0, 0.35), new=TRUE) + barplot(100*my.RelDiagr[[lead]]$hist.counts/sum(my.RelDiagr[[lead]]$hist.counts), beside=T,space=c(0,1.2), axes = T, axis.lty=T, axisnames = T, col = "red", ylim=c(0,25), main="% of forecasts") + + par(fig=c(0.75, 1, 0.35, 0.7), new=TRUE) + barplot(100*my.RelDiagr[[2*n.leadtimes+lead]]$hist.counts/sum(my.RelDiagr[[2*n.leadtimes+lead]]$hist.counts), beside=T, space=c(0,1.2), axes = T, axis.lty=T, axisnames = T, col = "blue", ylim=c(0,25), main="% of forecasts") + + dev.off() + + system(paste0("~/scripts/fig2catalog.sh -c 'Start dates: ", stringa," of ",my.month[month],"\nLead time: ", leadtime.week[lead], " days\nRegion: ", area.name,"\nReference dataset: ERA-Interim \nBias correction: none' ", my.file," ", my.file2)) + } + + gc() + } # next lead + + gc() + } # next score + + gc() +} # next month + + +############################################################################# +# Visualize summary tables for a region # +############################################################################# + +# If you use a mask, load it to average only over land and check that it has the same lat and lon positions of the score maps: +if(land.only == TRUE){ + load(paste0('/shared/earth/EarthSystemServices/TOOLS/Topography_bathymetry_and_masks/Seamask.', n.lon, '.', n.lat,'.50m','.RData')) + mask <- Seamask.512.256.50m + #Coordinates repositioning and mask conditioning + mask <- mask[c((n.lonr+1):n.lon,1:n.lonr),n.lat:1] + mask <- ifelse(mask == 1, 0, 1) # assign 1 to land and 0 to sea to plot the mask + mask <- t(mask) + myImagePlot(mask) + myImagePlot(my.FairRpss.chunk[1,,]) + mask <- ifelse(mask == 0, NA, 1) # assign 1 to land and NA to sea to multiply it for the score values later +} + + +# plot a box with the chosen area (do it only once): +# +#png(file=paste0(work.dir,'/',var.name,'/Box_',area.name,'.png'),width=1400,height=800) +#PlotEquiMap(my.FairRpss.chunk[1,,c((n.lonr+1):n.lon,1:n.lonr)], c(lons[c((n.lonr+1):n.lon)]-360, lons[1:n.lonr]), lats, brks=c(-1000,1000), cols="lightblue" , axelab=F, filled.continents=TRUE, drawleg=F, boxlim=c(area[1],area[3],area[2],area[4]), boxcol="black", boxlwd=.7) +#dev.off() + +array.EnsCorr <- array.FairRpss <- array.FairCrpss <- array(NA,c(12,4)) + +# Load one file, just to take the lat and lon values: +load(file=paste0(work.dir,'/',var.name,'_',my.month[1],'.RData')) + +if(lon.min < 360) area.lon <- c(1:(which(lons >= lon.max)[1]), (which(lons >= lon.min)[1]:length(lons))) # restrict area to chosen one +if(lon.min > 360) area.lon <- (which(lons >= lon.min-360)[1]):(which(lons >= lon.max)[1]-1) + +if(area.name != "World") area.lat <- c(which(lats <= lat.max)[1]:(which(lats <= lat.min)[1]-1)) # restrict area to chosen one +if(area.name == "World"){ area.lat <- 1:length(lats); area.lon <- 1:length(lons) } + +# Load all data and average it over the chosen region: +for(month in 1:12){ + + load(file=paste0(work.dir,'/',var.name,'_',my.month[month],'.RData')) + + if(land.only == TRUE){ + # remove sea values: + for(i in 1:4) my.EnsCorr.chunk[i,,] <- my.EnsCorr.chunk[i,,]*mask + for(i in 1:4) my.FairRpss.chunk[i,,] <- my.FairRpss.chunk[i,,]*mask + for(i in 1:4) my.FairCrpss.chunk[i,,] <- my.FairCrpss.chunk[i,,]*mask + } + + for(l in 0:3){ + #load(file=paste0(work.dir,"/",fields.name,"_",my.period[p],"_leadmonth_",l,"_","ClusterNames",".RData")) # load cluster names and summarized data + array.EnsCorr[month, 1+l] <- mean(my.EnsCorr.chunk[1+l, area.lat, area.lon], na.rm=T) + array.FairRpss[month, 1+l] <- mean(my.FairRpss.chunk[1+l, area.lat, area.lon], na.rm=T) + array.FairCrpss[month, 1+l] <- mean(my.FairCrpss.chunk[1+l, area.lat, area.lon], na.rm=T) + } +} + + +if(land.only == TRUE) { mod.filename <- "_land" } else { mod.filename <- "" } + +my.file <- paste0(work.dir,'/',var.name,'/Summary_',var.name,'_', area.name, mod.filename,'_with_letras.png') +my.file2 <- paste0(work.dir,'/',var.name,'/formatted/Summary_',var.name,'_', area.name, mod.filename,'_with_letras.png') + +png(file=my.file,width=700,height=500) + +#my.cols <- rev(c('#b2182b','#d6604d','#f4a582','#fddbc7','#d1e5f0','#92c5de','#4393c3','#2166ac')) +#my.cols <- c('#0570b0','#bdc9e1', '#fff7ec', '#fee8c8','#fdd49e','#fdbb84','#fc8d59','#ef6548','#d7301f','#990000') +#my.cols <- c('#0570b0','#bdc9e1','#fef0d9','#fdd49e','#fdbb84','#fc8d59','#ef6548','#d7301f','#990000') +#my.cols <- c('#74a9cf','#bdc9e1','#fef0d9','#fdd49e','#fdbb84','#fc8d59','#e34a33','#b30000') +my.cols <- c('#bdc9e1','#fef0d9','#fdd49e','#fdbb84','#fc8d59','#e34a33','#b30000', '#7f0000') + +#my.seq <- c(-1,-0.6,-0.4,-0.2,0,0.2,0.4,0.6,1) +#my.seq <- c(-0.2,-0.1,0,0.1,0.2,0.3,0.4,0.5,0.6) +my.seq <- c(-0.1,0,0.1,0.2,0.3,0.4,0.5,0.6,0.7) + +# create an array similar to array.pers but with colors instead of frequencies: +plot.new() + +#par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) +#mtext("Subseasonal sfcWind 1994-2013", cex=1.8) + +par(mar=c(0,0,2,0), fig=c(0.07, 0.27, 0.9, 1), new=TRUE) +mtext("(a) EnsCorr", cex=1.5) + +par(mar=c(1,4,1,0), fig=c(0.02, 0.27, 0.1, 0.9), new=TRUE) +plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,3.2), ann=F) +mtext(side = 1, text = "Lead time (days)", line = 3.5, cex=1.2) +mtext(side = 2, text = "Start date", line = 3, cex=1.2) +axis(1, at=seq(0,3), las=3, cex.axis=1, labels=c("5-11","12-18","19-25","26-32"), mgp=c(3,0.7,0)) +axis(2, at=seq(1,12), las=2, labels=rev(my.month.short), cex.axis=1) + +array.colors <- array(my.cols[8],c(12,4)) +array.colors[array.EnsCorr < my.seq[8]] <- my.cols[7] +array.colors[array.EnsCorr < my.seq[7]] <- my.cols[6] +array.colors[array.EnsCorr < my.seq[6]] <- my.cols[5] +array.colors[array.EnsCorr < my.seq[5]] <- my.cols[4] +array.colors[array.EnsCorr < my.seq[4]] <- my.cols[3] +array.colors[array.EnsCorr < my.seq[3]] <- my.cols[2] +array.colors[array.EnsCorr < my.seq[2]] <- my.cols[1] + +for(p in 1:12){ for(l in 0:3){ polygon(c(0.5+l-1, 0.5+l-1, 1.5+l-1, 1.5+l-1), c(-0.5+13-p, 0.5+13-p, 0.5+13-p, -0.5+13-p), col=array.colors[p,1+l]) }} + +par(mar=c(0,0,2,0), fig=c(0.32, 0.57, 0.9, 1), new=TRUE) +mtext("(b) FairRPSS", cex=1.5) + +par(mar=c(1,4,1,0), fig=c(0.32, 0.57, 0.1, 0.9), new=TRUE) +plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,3.2), ann=F) +mtext(side = 1, text = "Lead time (days)", line = 3.5, cex=1.2) +mtext(side = 2, text = "Start date", line = 3, cex=1.2) +axis(1, at=seq(0,3), las=3, cex.axis=1, labels=c("5-11","12-18","19-25","26-32"), mgp=c(3,0.7,0)) +axis(2, at=seq(1,12), las=2, labels=rev(my.month.short), cex.axis=1) + +array.colors <- array(my.cols[8],c(12,4)) +array.colors[array.FairRpss < my.seq[8]] <- my.cols[7] +array.colors[array.FairRpss < my.seq[7]] <- my.cols[6] +array.colors[array.FairRpss < my.seq[6]] <- my.cols[5] +array.colors[array.FairRpss < my.seq[5]] <- my.cols[4] +array.colors[array.FairRpss < my.seq[4]] <- my.cols[3] +array.colors[array.FairRpss < my.seq[3]] <- my.cols[2] +array.colors[array.FairRpss < my.seq[2]] <- my.cols[1] + +for(p in 1:12){ for(l in 0:3){ polygon(c(0.5+l-1, 0.5+l-1, 1.5+l-1, 1.5+l-1), c(-0.5+13-p, 0.5+13-p, 0.5+13-p, -0.5+13-p), col=array.colors[p,1+l]) }} + +par(mar=c(0,0,2,0), fig=c(0.62, 0.87, 0.9, 1), new=TRUE) +mtext("(c) FairCRPSS", cex=1.5) + +par(mar=c(1,4,1,0), fig=c(0.62, 0.87, 0.1, 0.9), new=TRUE) +plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,3.2), ann=F) +mtext(side = 1, text = "Lead time (days)", line = 3.5, cex=1.2) +mtext(side = 2, text = "Start date", line = 3, cex=1.2) +axis(1, at=seq(0,3), las=3, cex.axis=1, labels=c("5-11","12-18","19-25","26-32"), mgp=c(3,0.7,0)) +axis(2, at=seq(1,12), las=2, labels=rev(my.month.short), cex.axis=1) + +array.colors <- array(my.cols[8],c(12,4)) +array.colors[array.FairCrpss < my.seq[8]] <- my.cols[7] +array.colors[array.FairCrpss < my.seq[7]] <- my.cols[6] +array.colors[array.FairCrpss < my.seq[6]] <- my.cols[5] +array.colors[array.FairCrpss < my.seq[5]] <- my.cols[4] +array.colors[array.FairCrpss < my.seq[4]] <- my.cols[3] +array.colors[array.FairCrpss < my.seq[3]] <- my.cols[2] +array.colors[array.FairCrpss < my.seq[2]] <- my.cols[1] + +for(p in 1:12){ for(l in 0:3){ polygon(c(0.5+l-1, 0.5+l-1, 1.5+l-1, 1.5+l-1), c(-0.5+13-p, 0.5+13-p, 0.5+13-p, -0.5+13-p), col=array.colors[p,1+l]) }} + +par(fig=c(0.89, 1, 0.1, 0.9), new=TRUE) +#ColorBar2(brks=my.brks, cols=my.cols, vert=FALSE, cex=legend1.cex, label.dist=2, my.ticks=-0.5 + 1:length(my.brks), my.labels=my.brks) +ColorBar2(brks = my.seq, cols = my.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) +gc() + +dev.off() + +# Not checked but whould work: +#system(paste0("~/scripts/fig2catalog.sh -r 50 -c 'Region: ", area.name, "\nReference dataset: ERA-Interim' ", my.file," ", my.file2)) + + + +# Monthly reliability diagrams, all over the same graph # +# ONLY FOR SEA+LAND!!!! + +my.RelDiagr<-list() + +my.file <- paste0(work.dir,'/',var.name,'/Summary_RelDiagr_', area.name,'.png') +my.file2 <- paste0(work.dir,'/',var.name,'/formatted/','/Summary_RelDiagr_', area.name,'.png') + +#n.lonr <- n.lon - ceiling(length(lons[lons>0 & lons < 180])) # count the number of longitude values between 180 and 360 degrees longitud + +png(file=my.file, width=1000, height=700) +plot(0, type = "n", axes = FALSE, ann = FALSE) +mtext(side = 3, text = "ECMWF-MFS / Reliability Diagram\nJanuary to December / 1994-2013", line = -1, cex=2.4, font=2) +par(mar=c(0,0,0,0) , fig=c(0, 1, 0, 0.4), new=TRUE) +legend(0.8,-0.8,0,0,legend=my.month[c(1,4,7,10)], lty=c(1,1), lwd=c(2.5,2.5), col=col.month[c(1,4,7,10)], ncol=4) + +par(mar=c(4.8,3.8,3.8,3.8) , fig=c(0, 1, 0, 0.4), new=TRUE) + +for(tercile in c(1,3)){ + print(paste0("Tercile: ",tercile)) + + my.lead <- (1:n.leadtimes) + (tercile-1) * 4 + + for(lead2 in my.lead){ + print(paste0("Lead time: ",lead2)) + + xmin <- 0.25 * ((lead2-1) %% 4) + xmax <- xmin + 0.25 + ymin <- 0.10 + 0.38 * (tercile-1)/2 + ymax <- ymin + 0.38 + + #my.title <- paste0('Reliability Diagram of ',cfs.name,'\n ',var.name.map,' for ',my.month[month],'. Forecast time ',leadtime.week[lead],'.') + #my.title <- paste0("ECMWF-S4 / ", var.name.map, " / Reliability Diagram \nJanuary to December / 1994-2013") + my.title <- paste0("Lead time: ", leadtime.week[1+((lead2-1) %% 4)], " days") + + par(fig=c(xmin, xmax, ymin, ymax), new=TRUE) + mod.subtitle <- ifelse(tercile == 1, "lower","upper") + plot(c(0,1),c(0,1),type="l",xlab=paste0("Forecast frequency ", mod.subtitle," tercile"),ylab="Observed frequency", col='gray30', main=my.title, cex.main=1.4) + + + for(month in c(1,4,7,10)){ #my.months){ + #month=1 # for the debug + #print(paste0("Month=",month)) + + # Load data: + load(file=paste0(work.dir,'/',var.name,'_',my.month[month],'.RData')) + + cat(paste0('Computing the Reliability Diagram for month ', month,'. Please wait... \n')) + + lead <- 1 + (lead2-1) %% 4 + + if(tercile == 1) my.RelDiagr[[lead]] <- ReliabilityDiagram(ens.chunk.prob[[lead]][int1,area.lat,area.lon], obs.chunk.prob[[lead]][int1,area.lat,area.lon], bins=5, nboot=0, plot=FALSE, plot.refin=F) #tercile 1 + + #my.RelDiagr[[n.leadtimes+lead]] <- ReliabilityDiagram(ens.chunk.prob[[lead]][int2,area.lat,area.lon], obs.chunk.prob[[lead]][int2,area.lat,area.lon], bins=5, nboot=0, plot=FALSE, plot.refin=F) + + if(tercile == 3) my.RelDiagr[[2*n.leadtimes+lead]] <- ReliabilityDiagram(ens.chunk.prob[[lead]][int3,area.lat,area.lon], obs.chunk.prob[[lead]][int3,area.lat,area.lon], bins=5, nboot=0, plot=FALSE, plot.refin=F) + + #col.month <- c('#a6cee3','#1f78b4','#b2df8a','#33a02c','#fb9a99','#e31a1c','#fdbf6f','#ff7f00','#cab2d6','#6a3d9a','#ffff99') + #col.month <- c('#8dd3c7','#8dd3c7','#8dd3c7','#ffffb3','#ffffb3','#ffffb3','#bebada','#bebada','#bebada','#fb8072','#fb8072','#fb8072') + col.month <- c('#e41a1c','#e41a1c','#e41a1c','#377eb8','#377eb8','#377eb8','#4daf4a','#4daf4a','#4daf4a','#984ea3','#984ea3','#984ea3') + + if(tercile == 1) lines(my.RelDiagr[[lead]]$p.avgs[!is.na(my.RelDiagr[[lead]]$p.avgs)],my.RelDiagr[[lead]]$cond.prob[!is.na(my.RelDiagr[[lead]]$cond.prob)],type="o", pch=16, col=col.month[month]) + + if(tercile == 3) lines(my.RelDiagr[[2*n.leadtimes+lead]]$p.avgs[!is.na(my.RelDiagr[[2*n.leadtimes+lead]]$p.avgs)], my.RelDiagr[[2*n.leadtimes+lead]]$cond.prob[!is.na(my.RelDiagr[[2*n.leadtimes+lead]]$cond.prob)],type="o", pch=16, col=col.month[month]) + + rm(obs.chunk.prob, ens.chunk.prob) + gc() + } # next month + + } # next lead +} # next tercile + + +dev.off() + + + + + diff --git a/old/SkillScores_map_v7.R~ b/old/SkillScores_map_v7.R~ new file mode 100644 index 0000000000000000000000000000000000000000..0ccbe52f3dc471b2745df1152a9a7c91690ce7de --- /dev/null +++ b/old/SkillScores_map_v7.R~ @@ -0,0 +1,520 @@ +########################################################################################### +# Visualize and save the score maps for one score and period # +########################################################################################### +# run it only after computing and saving all the skill score data: + +rm(list=ls()) + +########################################################################################## +# Load functions and libraries # +########################################################################################## + +library(SpecsVerification) # for skill scores +library(s2dverification) # for Load() function +library(abind) # for merging arrays +source('/home/Earth/ncortesi/scripts/Rfunctions.R') +source('/home/Earth/ncortesi/Downloads/ESS/general/hatching.R') +########################################################################################## +# User's settings # +########################################################################################## +#if(!mare) {source('/home/Earth/ncortesi/Downloads/scripts/Rfunctions.R')} else {source('/gpfs/projects/bsc32/bsc32842/scripts/Rfunctions.R')} + +# working dir where to put the output maps and files in the /Data and /Maps subdirs: +work.dir <- "/scratch/Earth/ncortesi/RESILIENCE/Subestacional" + +var.name <- 'sfcWind' # forecast variable. It is the name used inside the netCDF files and as suffix in map filenames, i.e: 'sfcWind' or 'psl' + +my.pvalue <- 0.05 # in case of computing the EnsCorr, set the p_value level to show in the ensemble mean correlation maps +n.intervals <- 12 # in case of computing the Reliabilty Diagram, set the number of intervals of the forecasted frequency (X axis). It is convenient to set it to + # the number of ensemble member + 1 + +# Choose one or more regions between those defined below for the reliability diagrams and for the summary tables (0=World, 1=Northern europe, etc.) +areas <- 0:9 + +land.only <- FALSE # set it to true if you want to average only over land, FALSE otherwise [warning: the reliability diagram is computed ONLY over land+sea ] + +# coordinates of a chosen point in the world to plot the time series over the 52 maps (they must be the same values in objects la y lo) +#my.lat <- 55.3197120 +#my.lon <- 0.5625 + +my.months=1:12 #9:12 # choose the month(s) to plot and save + +# Define regions for averaging skill scores: (in the format: lon.min /lon.max / lat.min /lat.max. lon.min MUST be a NEGATIVE number, long values MUST be from 0 and 360, and +# lat values MUST be from 90 to -90 degrees) +area0.name <- "World" +area0 <- c(-180,180,-90,90) +area1.name <- "Northern_Europe" +area1 <- c(-15,45,45,75) +area2.name <- "Southern_Europe" +area2 <- c(-15,45,35,45) +area3.name <- "Southwestern_Europe" +area3 <- c(-15,20,35,45) +area4.name <- "Southeastern_Europe" +area4 <- c(20,45,35,45) +area5.name <- "Europe" +area5 <- c(-15,45,35,75) +area6.name <- "North_America" +area6 <- c(-130,-60,30,50) +area7.name <- "North_Sea" +area7 <- c(-4, 15, 50, 65) +area8.name <- "Iberian_Peninsula" +area8 <- c(-10, 4, 36, 44) +area9.name <- "Canadian region" +area9 <- c(-114, -111.8, 49.6, 51.7) + +######################################################################################### + +col <- c("#0C046E", "#1914BE", "#2341F7", "#2F55FB", "#3E64FF", "#528CFF", "#64AAFF", "#82C8FF", "#A0DCFF", "#B4F0FF", "#FFFBAF", "#FFDD9A", "#FFBF87", "#FFA173", "#FF7055", "#FE6346", "#F7403B", "#E92D36", "#C80F1E", "#A50519") + +dir.create(path=paste0(work.dir,"/",var.name), showWarnings=FALSE) # if directory already exists, this function doesn't create it again +dir.create(path=paste0(work.dir,"/",var.name,"/formatted"), showWarnings=FALSE) # if directory already exists, this function doesn't create it again + +########################################################################################## +# visualize worldwide monthly maps of skill scores # +########################################################################################## +# choose one or more skills scores to visualize between ACC (EnsCorr), FairRPSS, FairCRPSS and/or RelDiagr: +my.score.name <- c('FairRpss','FairCrpss','EnsCorr','RelDiagr') +my.score.name.title <- c('FairRpss','FairCrpss','Correlation of the ensemble mean','Reliability Diagram') # names to be visualized in the graph titles + +for(area.num in areas){ + + ## replace old command area.name <- area0.name: + area <- eval(parse(text=paste0("area", area.num))) + area.name <- eval(parse(text=paste0("area", area.num,".name"))) + + ## position of max and min long and lat values the chosen region: + lon.min <- 360 + area[1] + lon.max <- area[2] + lat.min <- area[3] + lat.max <- area[4] + + for(month in my.months){ + ## month=1 # for the debug + print(paste0("Month=",month)) + + ## Load data: + load(file=paste0(work.dir,'/',var.name,'_',my.month[month],'.RData')) + + sdates.seq <- weekly.seq(forecast.year,mes,day) # sequence of dates corresponding to all the thursday of the year + my.startdates.days <- as.integer(substr(sdates.seq[which(as.integer(substr(sdates.seq,5,6)) == month)],7,8)) #startdates.monthly[[month]] #c(1:5) + stringa <- paste0(my.startdates.days,collapse=",") + + if(lon.min < 360) area.lon <- c(1:(which(lons >= lon.max)[1]), (which(lons >= lon.min)[1]:length(lons))) # restrict area to chosen one + if(lon.min > 360) area.lon <- (which(lons >= lon.min-360)[1]):(which(lons >= lon.max)[1]-1) + + ##if(area.name != "World") area.lat <- c(which(lats <= lat.max)[1]:(which(lats <= lat.min)[1]-1)) # restrict area to chosen one + ##if(area.name == "World"){ area.lat <- 1:length(lats); area.lon <- 1:length(lons) } + + area.lat <- 1:length(lats) + area.lon <- 1:length(lons) + + n.lonr <- n.lon - ceiling(length(lons[lons>0 & lons < 180])) # count the number of longitude values between 180 and 360 degrees longitud + + for(my.score.name.map in my.score.name){ + ##my.score.name.map='Acc' # for the debug + + print(paste0("Score=",my.score.name.map)) + + if(my.score.name.map=='FairRpss') my.score <- my.FairRpss.chunk + if(my.score.name.map=='FairCrpss') my.score <- my.FairCrpss.chunk + if(my.score.name.map=="EnsCorr") my.score <- my.EnsCorr.chunk + ##if(my.score.name.map=="RelDiagr") my.score <- my.RelDiagr + + ## old green-red palette: + ## brk.rpss<-c(-1,seq(0,1,by=0.05)) + ## col.rpss<-c("darkred",colorRampPalette(c("red","white","darkgreen"))(length(brk.rpss)-2)) + ## brk.rps<-c(seq(0,1,by=0.1),10) + ## col.rps<-c(colorRampPalette(c("darkgreen","white","red"))(length(brk.rps)-2),"darkred") + ## if(my.score.name.map==my.Rps | my.score==my.Rps.clim) {my.brk<-brk.rps} else {my.brk<-brk.rpss} + ## if(my.score.name.map==my.Rps | my.score==my.Rps.clim) {my.col<-col.rps} else {my.col<-col.rpss} + + brk.rpss <- c(seq(-1,1,by=0.1)) + col.rpss <- colorRampPalette(col)(length(brk.rpss)-1) + + ## at present all breaks and colors are the same, so there is no need to differenciate between indexes, aside the two Fair scores whose range is [-inf,0] + my.brk <- brk.rpss + my.col <- col.rpss + my.brk.labels <- my.brk + if(my.score.name.map == 'FairRpss' | my.score.name.map=='FairCrpss') my.brk.labels <- c(expression(-infinity), my.brk[-1]) + + my.RelDiagr<-list() + + for(lead in 1:n.leadtimes){ + print(paste0("Leadtime=",lead)) + + ##my.title<-paste0(my.score.name.map,' of ',cfs.name,' + ##10m Wind Speed for ',startdate.name.map,'. Forecast time ',leadtime.week[lead],'.') + + if(my.score.name.map != "RelDiagr" && area.name == "World"){ + + ##postscript(file=paste0(workdir,'/Maps_',var.name,'/',my.score.name.map,'_',startdate.name.map,'_Forecast_time_',leadtime.week[lead],'_',var.name,'.ps'), + ## paper="special",width=12,height=7,horizontal=F) + + ##layout(matrix(c(1,2), 1, 2, byrow = TRUE),widths=c(11,1.5)) + ##par(oma=c(0,0,4,0),mar=c(0,0,0,0)) + + ##n.lonr <- n.lon - ceiling(length(lons[lons<180 & lons > 0])) + ##hatching(latsr,c(lons[c((nlonr+1):nlon)]-360,lons[1:nlonr]), my.Pvalue.rev, dens = 12, ang = 45, col_line = '#252525', lwd_size = 0.5, crosshatching = FALSE) + ##hatching(lats,c(lons[c((n.lonr+1):n.lon)]-360,lons[1:n.lonr]), my.Pvalue.rev, dens = 12, ang = 45, col_line = '#252525', lwd_size = 0.5, crosshatching = FALSE) + ##n.lonr <- n.lon - ceiling(length(lons[lons<180 & lons > 0])) + ##PlotEquiMap(my.score[lead,,][n.lat:1,c((n.lonr+1):n.lon,1:n.lonr)], lons,lats ,title_scale=0.8, brks=my.brk, cols=my.col ,axelab=F, filled.continents=FALSE, drawleg=F) + ## lat values must be in decreasing order from +90 to -90 degrees and long from 0 to 360 degrees: + + my.file <- paste0(work.dir,'/',var.name,'/',var.name,'_',my.score.name.map,'_',my.month[month],'_leadtime_',leadtime.week[lead],'.png') + my.file2 <- paste0(work.dir,'/',var.name,'/formatted/',var.name,'_',my.score.name.map,'_',my.month[month],'_leadtime_',leadtime.week[lead],'.png') + png(file=my.file,width=1000,height=600) + + ## Map: + par(fig=c(0,0.92,0,0.89), new=TRUE) + + PlotEquiMap(my.score[lead,,c((n.lonr+1):n.lon,1:n.lonr)], c(lons[c((n.lonr+1):n.lon)]-360, lons[1:n.lonr]), lats ,title_scale=0.8, brks=my.brk, cols=my.col ,axelab=F, filled.continents=FALSE, drawleg=F) + + if(my.score.name.map=="EnsCorr") my.PValue.rev <- my.EnsCorr.pvalue < 0.05 + if(my.score.name.map=="FairRpss") my.PValue.rev <- my.FairRpss.pvalue < 0.05 + if(my.score.name.map=="FairCrpss") my.PValue.rev <- my.FairCrpss.pvalue < 0.05 + + ## Draw the significance diagonal lines: + pv <- aperm(my.PValue.rev, c(1,3,2)) + + hatching(lats, c(lons[c((n.lonr+1):n.lon)]-360, lons[1:n.lonr]), pv[lead,,], dens = 12, ang = 45, col_line = '#252525', lwd_size = 0.5, crosshatching = FALSE) + + ## Map title: + par(fig=c(0,1,0.82,0.91), new=TRUE) + ##my.title <- paste0(my.score.name.map,' of ',cfs.name,'\n ', var.name.map,' for ',my.month[month],'. Forecast time ',leadtime.week[lead],'.') + my.title <- paste0("ECMWF-MPS / ", var.name.map, " / ", my.score.name.title[which(my.score.name == my.score.name.map)], "\n", my.month[month], " (",leadtime.week[lead]," days lead time)/ 1994-2013") + mtext(my.title, cex=2.4, font=2) + + ## Color legend: + par(fig=c(0.92,1,0,0.9), new=TRUE) + ##par(fig=c(0.52,0.82,0,0.9), new=TRUE) + + ColorBar2(my.brk, cols=my.col, vert=T, my.ticks=-0.5 + 1:length(my.brk), my.labels=my.brk.labels) + + ## arrange lat/ lon in my.PValue (a second variable in the EnsCorr file) to start from +90 degr. (lat) and from 0 degr (lon): + ## my.PValue.rev[lead,,] <- my.PValue[i,n.lat:1,c((n.lonr+1):n.lon,1:n.lonr)] + + dev.off() + + system(paste0("~/scripts/fig2catalog.sh -s 0.8 -c 'Start dates: ", stringa," of ",my.month[month],"\nLead time: ", leadtime.week[lead], " days\nReference dataset: ERA-Interim\nBias correction: none\nHatched area: significant from a bootstrapping test (p_value=0.05)' ", my.file," ", my.file2)) + + + } # close if on !RelDiagr + + gc() + + if(my.score.name.map == "RelDiagr") { + + ## Note that bins should correspond to the number of hindcast members + 1, for systems with a low number of members (below ~20) + ## Warning: if the bootstrap show a gray point instead of a gray bar, decrease the number of bins! + my.RelDiagr[[lead]] <- ReliabilityDiagramHist(ens.chunk.prob[[lead]][int1,area.lat,area.lon], obs.chunk.prob[[lead]][int1,area.lat,area.lon], bins=n.intervals, nboot=500, plot=FALSE, plot.refin=TRUE) # Below normal tercile + + ##my.RelDiagr[[n.leadtimes+lead]] <- ReliabilityDiagramHist(ens.chunk.prob[[lead]][int2,,], obs.chunk.prob[[lead]][int2,,], bins=5, nboot=0, plot=FALSE, plot.refin=F) + my.RelDiagr[[2*n.leadtimes+lead]] <- ReliabilityDiagramHist(ens.chunk.prob[[lead]][int3,area.lat,area.lon], obs.chunk.prob[[lead]][int3,area.lat,area.lon], bins=n.intervals, nboot=500, plot=FALSE, plot.refin=TRUE) # Above Normale tercile + + + my.file <- paste0(work.dir,'/', var.name,'/', var.name,'_RelDiagr_', area.name,'_', my.month[month],'_leadtime_', leadtime.week[lead],'_', var.name,'.png') + my.file2 <- paste0(work.dir,'/', var.name,'/formatted/', var.name,'_RelDiagr_', area.name, '_', my.month[month],'_leadtime_', leadtime.week[lead],'_', var.name,'.png') + + png(file=my.file,width=650,height=600) + + ##my.title <- paste0('Reliability Diagram of ',cfs.name,'\n ',var.name.map,' for ',my.month[month],'. Forecast time ',leadtime.week[lead],'.') + my.title <- paste0("ECMWF-MPS / ", var.name.map, " / Reliability Diagram \n", my.month[month], " (", leadtime.week[lead]," days lead time) / 1994-2013") + + par(fig=c(0, 0.83, 0, 1), new=TRUE) + + plot(c(0,1),c(0,1),type="l",xlab="Forecast probability (%)",ylab="Observed frequency (%)", xaxt='n', yaxt='n',col='gray30', main=my.title, cex.main=1.6) + + middle <- 100*round(my.RelDiagr[[lead]]$p.avgs,2) + interval <- (middle/2)[2] + left <- round(middle - interval,0) + left[1] <- 0 + right <- round(middle + interval,0) + right[l(right)] <- 100 + bins.label <- paste0(paste0(left,"-"),right) + + axis(1, at=my.RelDiagr[[lead]]$p.avgs, labels=FALSE) + text(x=my.RelDiagr[[lead]]$p.avgs, par("usr")[3]-0.03, labels=bins.label, srt=45, pos=1, xpd=TRUE) + axis(2, at=(0:10)/10, labels=10*(0:10), cex.axis=1) + + no_res <- 1/3 #sum(my.RelDiagr[[lead]]$obs.counts)/sum(my.RelDiagr[[lead]]$hist.counts) + numb <- c(seq(0,1,by=0.1)) + no_skill <- (numb+no_res)/2 + lines(c(0,1), c(no_res,no_res), col="gray", lty=3) + lines(c(1/3,1/3), c(0,1), col="gray", lty=3) + lines(c(0,1), c(no_skill[1],no_skill[11]), col="black", lty=3) + + ##lines(my.RelDiagr[[lead]]$p.avgs[!is.na(my.RelDiagr[[lead]]$p.avgs)],my.RelDiagr[[lead]]$cond.prob[!is.na(my.RelDiagr[[lead]]$cond.prob)],type="o", pch=16, col="blue") + for (i in 1:n.intervals){ + lines(rep(my.RelDiagr[[lead]]$p.avgs[i], 2), c(my.RelDiagr[[lead]]$cbar.lo[i],my.RelDiagr[[lead]]$cbar.hi[i]), col="pink", lwd=2) + lines(0.005+rep(my.RelDiagr[[2*n.leadtimes+lead]]$p.avgs[i], 2), c(my.RelDiagr[[2*n.leadtimes+lead]]$cbar.lo[i],my.RelDiagr[[2*n.leadtimes+lead]]$cbar.hi[i]), col="lightblue", lwd=2) + } + + + points(my.RelDiagr[[lead]]$p.avgs[!is.na(my.RelDiagr[[lead]]$p.avgs)],my.RelDiagr[[lead]]$cond.prob[!is.na(my.RelDiagr[[lead]]$cond.prob)],type="b", pch=1, col="red", cex=1.5, lwd=5) + + ##points(my.RelDiagr[[n.leadtimes+lead]]$p.avgs[!is.na(my.RelDiagr[[n.leadtimes+lead]]$p.avgs)],my.RelDiagr[[n.leadtimes+lead]]$cond.prob[!is.na(my.RelDiagr[[n.leadtimes+lead]]$cond.prob)],type="b", pch=11, col="gold", cex=1.5, lwd=5) # col="darkgreen" + + points(0.005+my.RelDiagr[[2*n.leadtimes+lead]]$p.avgs[!is.na(my.RelDiagr[[2*n.leadtimes+lead]]$p.avgs)], my.RelDiagr[[2*n.leadtimes+lead]]$cond.prob[!is.na(my.RelDiagr[[2*n.leadtimes+lead]]$cond.prob)],type="b", pch=1, col="blue", cex=1.5, lwd=5) + + legend("bottomright", legend=c('Above Normal','Below Normal'), lty=c(1,1), lwd=c(2.5,2.5), col=c('blue','red')) + + par(fig=c(0.75, 1, 0, 0.35), new=TRUE) + perc1 <- 100*my.RelDiagr[[lead]]$hist.counts/sum(my.RelDiagr[[lead]]$hist.counts) + barplot(perc1, beside=T,space=c(0,1.2), axes = T, axis.lty=T, axisnames = T, col = "red", ylim=c(0,max(perc1)+5), main="Below Normal") + par(fig=c(0.75, 1, 0.34, 0.36), new=TRUE) + mtext("% of forecasts for bin") + + par(fig=c(0.75, 1, 0.25, 0.6), new=TRUE) + perc2 <- 100*my.RelDiagr[[2*n.leadtimes+lead]]$hist.counts/sum(my.RelDiagr[[2*n.leadtimes+lead]]$hist.counts) + barplot(perc2, beside=T, space=c(0,1.2), axes = T, axis.lty=T, axisnames = T, col = "blue", ylim=c(0,max(perc2)+5), main="Above Normal") + par(fig=c(0.75, 1, 0.59, 0.61), new=TRUE) + mtext("% of forecasts for bin") + dev.off() + + system(paste0("~/scripts/fig2catalog.sh -c 'Start dates: ", stringa," of ",my.month[month],"\nLead time: ", leadtime.week[lead], " days\nRegion: ", area.name,"\nReference dataset: ERA-Interim \nBias correction: none' ", my.file," ", my.file2)) + } + + gc() + } # next lead + + gc() + } # next score + + gc() + } # next month + + + ############################################################################# + # Visualize summary tables for a region # + ############################################################################# + + ## If you use a mask, load it to average only over land and check that it has the same lat and lon positions of the score maps: + if(land.only == TRUE){ + load(paste0('/shared/earth/EarthSystemServices/TOOLS/Topography_bathymetry_and_masks/Seamask.', n.lon, '.', n.lat,'.50m','.RData')) + mask <- Seamask.512.256.50m + ##Coordinates repositioning and mask conditioning + mask <- mask[c((n.lonr+1):n.lon,1:n.lonr),n.lat:1] + mask <- ifelse(mask == 1, 0, 1) ## assign 1 to land and 0 to sea to plot the mask + mask <- t(mask) + myImagePlot(mask) + myImagePlot(my.FairRpss.chunk[1,,]) + mask <- ifelse(mask == 0, NA, 1) ## assign 1 to land and NA to sea to multiply it for the score values later + } + + ## plot a box with the chosen area (do it only once): + ## + ##png(file=paste0(work.dir,'/',var.name,'/Box_',area.name,'.png'),width=1400,height=800) + ##PlotEquiMap(my.FairRpss.chunk[1,,c((n.lonr+1):n.lon,1:n.lonr)], c(lons[c((n.lonr+1):n.lon)]-360, lons[1:n.lonr]), lats, brks=c(-1000,1000), cols="lightblue" , axelab=F, filled.continents=TRUE, drawleg=F, boxlim=c(area[1],area[3],area[2],area[4]), boxcol="black", boxlwd=.7) + ##dev.off() + + + array.EnsCorr <- array.FairRpss <- array.FairCrpss <- array(NA,c(12,4)) + + ## Load one file, just to take the lat and lon values: + load(file=paste0(work.dir,'/',var.name,'_',my.month[1],'.RData')) + + if(lon.min < 360) area.lon <- c(1:(which(lons >= lon.max)[1]), (which(lons >= lon.min)[1]:length(lons))) # restrict area to chosen one + if(lon.min > 360) area.lon <- (which(lons >= lon.min-360)[1]):(which(lons >= lon.max)[1]-1) + + if(area.name != "World") area.lat <- c(which(lats <= lat.max)[1]:(which(lats <= lat.min)[1]-1)) # restrict area to chosen one + if(area.name == "World"){ area.lat <- 1:length(lats); area.lon <- 1:length(lons) } + + ## Load all data and average it over the chosen region: + for(month in 1:12){ + + load(file=paste0(work.dir,'/',var.name,'_',my.month[month],'.RData')) + + if(land.only == TRUE){ + ## remove sea values: + for(i in 1:4) my.EnsCorr.chunk[i,,] <- my.EnsCorr.chunk[i,,]*mask + for(i in 1:4) my.FairRpss.chunk[i,,] <- my.FairRpss.chunk[i,,]*mask + for(i in 1:4) my.FairCrpss.chunk[i,,] <- my.FairCrpss.chunk[i,,]*mask + } + + for(l in 0:3){ + ##load(file=paste0(work.dir,"/",fields.name,"_",my.period[p],"_leadmonth_",l,"_","ClusterNames",".RData")) # load cluster names and summarized data + array.EnsCorr[month, 1+l] <- mean(my.EnsCorr.chunk[1+l, area.lat, area.lon], na.rm=T) + array.FairRpss[month, 1+l] <- mean(my.FairRpss.chunk[1+l, area.lat, area.lon], na.rm=T) + array.FairCrpss[month, 1+l] <- mean(my.FairCrpss.chunk[1+l, area.lat, area.lon], na.rm=T) + } + } + + + if(land.only == TRUE) { mod.filename <- "_land" } else { mod.filename <- "" } + + my.file <- paste0(work.dir,'/',var.name,'/Summary_',var.name,'_', area.name, mod.filename,'_with_letras.png') + my.file2 <- paste0(work.dir,'/',var.name,'/formatted/Summary_',var.name,'_', area.name, mod.filename,'_with_letras.png') + + png(file=my.file,width=700,height=500) + + ##my.cols <- rev(c('#b2182b','#d6604d','#f4a582','#fddbc7','#d1e5f0','#92c5de','#4393c3','#2166ac')) + ##my.cols <- c('#0570b0','#bdc9e1', '#fff7ec', '#fee8c8','#fdd49e','#fdbb84','#fc8d59','#ef6548','#d7301f','#990000') + ##my.cols <- c('#0570b0','#bdc9e1','#fef0d9','#fdd49e','#fdbb84','#fc8d59','#ef6548','#d7301f','#990000') + ##my.cols <- c('#74a9cf','#bdc9e1','#fef0d9','#fdd49e','#fdbb84','#fc8d59','#e34a33','#b30000') + my.cols <- c('#bdc9e1','#fef0d9','#fdd49e','#fdbb84','#fc8d59','#e34a33','#b30000', '#7f0000') + + ##my.seq <- c(-1,-0.6,-0.4,-0.2,0,0.2,0.4,0.6,1) + ##my.seq <- c(-0.2,-0.1,0,0.1,0.2,0.3,0.4,0.5,0.6) + my.seq <- c(-0.1,0,0.1,0.2,0.3,0.4,0.5,0.6,0.7) + + ## create an array similar to array.pers but with colors instead of frequencies: + plot.new() + + ##par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + ##mtext("Subseasonal sfcWind 1994-2013", cex=1.8) + + par(mar=c(0,0,2,0), fig=c(0.07, 0.27, 0.9, 1), new=TRUE) + mtext("(a) EnsCorr", cex=1.5) + + par(mar=c(1,4,1,0), fig=c(0.02, 0.27, 0.1, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,3.2), ann=F) + mtext(side = 1, text = "Lead time (days)", line = 3.5, cex=1.2) + mtext(side = 2, text = "Start date", line = 3, cex=1.2) + axis(1, at=seq(0,3), las=3, cex.axis=1, labels=c("5-11","12-18","19-25","26-32"), mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=rev(my.month.short), cex.axis=1) + + array.colors <- array(my.cols[8],c(12,4)) + array.colors[array.EnsCorr < my.seq[8]] <- my.cols[7] + array.colors[array.EnsCorr < my.seq[7]] <- my.cols[6] + array.colors[array.EnsCorr < my.seq[6]] <- my.cols[5] + array.colors[array.EnsCorr < my.seq[5]] <- my.cols[4] + array.colors[array.EnsCorr < my.seq[4]] <- my.cols[3] + array.colors[array.EnsCorr < my.seq[3]] <- my.cols[2] + array.colors[array.EnsCorr < my.seq[2]] <- my.cols[1] + + for(p in 1:12){ for(l in 0:3){ polygon(c(0.5+l-1, 0.5+l-1, 1.5+l-1, 1.5+l-1), c(-0.5+13-p, 0.5+13-p, 0.5+13-p, -0.5+13-p), col=array.colors[p,1+l]) }} + + par(mar=c(0,0,2,0), fig=c(0.32, 0.57, 0.9, 1), new=TRUE) + mtext("(b) FairRPSS", cex=1.5) + + par(mar=c(1,4,1,0), fig=c(0.32, 0.57, 0.1, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,3.2), ann=F) + mtext(side = 1, text = "Lead time (days)", line = 3.5, cex=1.2) + mtext(side = 2, text = "Start date", line = 3, cex=1.2) + axis(1, at=seq(0,3), las=3, cex.axis=1, labels=c("5-11","12-18","19-25","26-32"), mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=rev(my.month.short), cex.axis=1) + + array.colors <- array(my.cols[8],c(12,4)) + array.colors[array.FairRpss < my.seq[8]] <- my.cols[7] + array.colors[array.FairRpss < my.seq[7]] <- my.cols[6] + array.colors[array.FairRpss < my.seq[6]] <- my.cols[5] + array.colors[array.FairRpss < my.seq[5]] <- my.cols[4] + array.colors[array.FairRpss < my.seq[4]] <- my.cols[3] + array.colors[array.FairRpss < my.seq[3]] <- my.cols[2] + array.colors[array.FairRpss < my.seq[2]] <- my.cols[1] + + for(p in 1:12){ for(l in 0:3){ polygon(c(0.5+l-1, 0.5+l-1, 1.5+l-1, 1.5+l-1), c(-0.5+13-p, 0.5+13-p, 0.5+13-p, -0.5+13-p), col=array.colors[p,1+l]) }} + + par(mar=c(0,0,2,0), fig=c(0.62, 0.87, 0.9, 1), new=TRUE) + mtext("(c) FairCRPSS", cex=1.5) + + par(mar=c(1,4,1,0), fig=c(0.62, 0.87, 0.1, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,3.2), ann=F) + mtext(side = 1, text = "Lead time (days)", line = 3.5, cex=1.2) + mtext(side = 2, text = "Start date", line = 3, cex=1.2) + axis(1, at=seq(0,3), las=3, cex.axis=1, labels=c("5-11","12-18","19-25","26-32"), mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=rev(my.month.short), cex.axis=1) + + array.colors <- array(my.cols[8],c(12,4)) + array.colors[array.FairCrpss < my.seq[8]] <- my.cols[7] + array.colors[array.FairCrpss < my.seq[7]] <- my.cols[6] + array.colors[array.FairCrpss < my.seq[6]] <- my.cols[5] + array.colors[array.FairCrpss < my.seq[5]] <- my.cols[4] + array.colors[array.FairCrpss < my.seq[4]] <- my.cols[3] + array.colors[array.FairCrpss < my.seq[3]] <- my.cols[2] + array.colors[array.FairCrpss < my.seq[2]] <- my.cols[1] + + for(p in 1:12){ for(l in 0:3){ polygon(c(0.5+l-1, 0.5+l-1, 1.5+l-1, 1.5+l-1), c(-0.5+13-p, 0.5+13-p, 0.5+13-p, -0.5+13-p), col=array.colors[p,1+l]) }} + + par(fig=c(0.89, 1, 0.1, 0.9), new=TRUE) + ##ColorBar2(brks=my.brks, cols=my.cols, vert=FALSE, cex=legend1.cex, label.dist=2, my.ticks=-0.5 + 1:length(my.brks), my.labels=my.brks) + ColorBar2(brks = my.seq, cols = my.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + gc() + + dev.off() + + ## Not checked but whould work: + ##system(paste0("~/scripts/fig2catalog.sh -r 50 -c 'Region: ", area.name, "\nReference dataset: ERA-Interim' ", my.file," ", my.file2)) + + + + + ## Monthly reliability diagrams, all over the same graph # + ## ONLY FOR SEA+LAND!!!! + + my.RelDiagr<-list() + + my.file <- paste0(work.dir,'/',var.name,'/Summary_RelDiagr_', area.name,'.png') + my.file2 <- paste0(work.dir,'/',var.name,'/formatted/','/Summary_RelDiagr_', area.name,'.png') + + ##n.lonr <- n.lon - ceiling(length(lons[lons>0 & lons < 180])) # count the number of longitude values between 180 and 360 degrees longitud + + png(file=my.file, width=1000, height=700) + plot(0, type = "n", axes = FALSE, ann = FALSE) + mtext(side = 3, text = "ECMWF-MFS / Reliability Diagram\nJanuary to December / 1994-2013", line = -1, cex=2.4, font=2) + par(mar=c(0,0,0,0) , fig=c(0, 1, 0, 0.4), new=TRUE) + legend(0.8,-0.8,0,0,legend=my.month[c(1,4,7,10)], lty=c(1,1), lwd=c(2.5,2.5), col=col.month[c(1,4,7,10)], ncol=4) + + par(mar=c(4.8,3.8,3.8,3.8) , fig=c(0, 1, 0, 0.4), new=TRUE) + + for(tercile in c(1,3)){ + print(paste0("Tercile: ",tercile)) + + my.lead <- (1:n.leadtimes) + (tercile-1) * 4 + + for(lead2 in my.lead){ + print(paste0("Lead time: ",lead2)) + + xmin <- 0.25 * ((lead2-1) %% 4) + xmax <- xmin + 0.25 + ymin <- 0.10 + 0.38 * (tercile-1)/2 + ymax <- ymin + 0.38 + + ##my.title <- paste0('Reliability Diagram of ',cfs.name,'\n ',var.name.map,' for ',my.month[month],'. Forecast time ',leadtime.week[lead],'.') + ##my.title <- paste0("ECMWF-S4 / ", var.name.map, " / Reliability Diagram \nJanuary to December / 1994-2013") + my.title <- paste0("Lead time: ", leadtime.week[1+((lead2-1) %% 4)], " days") + + par(fig=c(xmin, xmax, ymin, ymax), new=TRUE) + mod.subtitle <- ifelse(tercile == 1, "lower","upper") + plot(c(0,1),c(0,1),type="l",xlab=paste0("Forecast frequency ", mod.subtitle," tercile"),ylab="Observed frequency", col='gray30', main=my.title, cex.main=1.4) + + + for(month in c(1,4,7,10)){ #my.months){ + ##month=1 # for the debug + ##print(paste0("Month=",month)) + + ## Load data: + load(file=paste0(work.dir,'/',var.name,'_',my.month[month],'.RData')) + + cat(paste0('Computing the Reliability Diagram for month ', month,'. Please wait... \n')) + + lead <- 1 + (lead2-1) %% 4 + + if(tercile == 1) my.RelDiagr[[lead]] <- ReliabilityDiagram(ens.chunk.prob[[lead]][int1,area.lat,area.lon], obs.chunk.prob[[lead]][int1,area.lat,area.lon], bins=n.intervals, nboot=0, plot=FALSE, plot.refin=F) #tercile 1 + + ##my.RelDiagr[[n.leadtimes+lead]] <- ReliabilityDiagram(ens.chunk.prob[[lead]][int2,area.lat,area.lon], obs.chunk.prob[[lead]][int2,area.lat,area.lon], bins=5, nboot=0, plot=FALSE, plot.refin=F) + + if(tercile == 3) my.RelDiagr[[2*n.leadtimes+lead]] <- ReliabilityDiagram(ens.chunk.prob[[lead]][int3,area.lat,area.lon], obs.chunk.prob[[lead]][int3,area.lat,area.lon], bins=n.intervals, nboot=0, plot=FALSE, plot.refin=F) + + ##col.month <- c('#a6cee3','#1f78b4','#b2df8a','#33a02c','#fb9a99','#e31a1c','#fdbf6f','#ff7f00','#cab2d6','#6a3d9a','#ffff99') + ##col.month <- c('#8dd3c7','#8dd3c7','#8dd3c7','#ffffb3','#ffffb3','#ffffb3','#bebada','#bebada','#bebada','#fb8072','#fb8072','#fb8072') + col.month <- c('#e41a1c','#e41a1c','#e41a1c','#377eb8','#377eb8','#377eb8','#4daf4a','#4daf4a','#4daf4a','#984ea3','#984ea3','#984ea3') + + if(tercile == 1) points(my.RelDiagr[[lead]]$p.avgs[!is.na(my.RelDiagr[[lead]]$p.avgs)],my.RelDiagr[[lead]]$cond.prob[!is.na(my.RelDiagr[[lead]]$cond.prob)],type="o", pch=16, col=col.month[month], cex=.8, lwd=2) + + if(tercile == 3) points(my.RelDiagr[[2*n.leadtimes+lead]]$p.avgs[!is.na(my.RelDiagr[[2*n.leadtimes+lead]]$p.avgs)], my.RelDiagr[[2*n.leadtimes+lead]]$cond.prob[!is.na(my.RelDiagr[[2*n.leadtimes+lead]]$cond.prob)],type="o", pch=16, col=col.month[month], cex=.8, lwd=2) + + rm(obs.chunk.prob, ens.chunk.prob) + gc() + } # next month + + } # next lead + } # next tercile + + + dev.off() + + + +} # close for on area.num + diff --git a/old/SkillScores_misc.R b/old/SkillScores_misc.R new file mode 100644 index 0000000000000000000000000000000000000000..1672b1c4788c57413f8652212386b9408f1d6b2d --- /dev/null +++ b/old/SkillScores_misc.R @@ -0,0 +1,573 @@ + +################################################################################## +# Toy Model # +################################################################################## + +library(s2dverification) +library(SpecsVerification) +library(easyVerification) + +my.prob=c(1/3,2/3) # quantiles used for FairRPSS and the RelDiagr (usually they are the terciles) + +N=80 # number of years +M=14 # number of members + +for(M in c(2:10,20,51,100,200)){ + + for(N in c(5,10,15,20,25,30,40,50,100,200,500,1000,2000,5000,10000,20000,50000,100000)){ + + anom.rean<-rnorm(N) + anom.hind<-array(rnorm(N*M),c(N,M)) + #quantile(anom.hind,prob=my.prob,type=8) + + obs<-convert2prob(anom.rean,prob<-my.prob) + #mean(obs[,1]) # check if it is equal to 0.333 for K=3 + + ens<-convert2prob(anom.hind,prob<-my.prob) + + # climatological forecast: + anom.hind.clim<-array(sample(anom.hind,N*M),c(N,M)) # extract a random sample with no replacement of the hindcast + ens.clim<-convert2prob(anom.hind.clim,prob<-my.prob) + #mean(ens.clim[,1]) # check if it is equal to M/3 (1.333 for M=4) + + # alternative definition of climatological forecast, based on observations instead (as applied by veriApply via convert2prob when option fcst.ref is missing): + #anom.rean.clim<-ClimEns(anom.rean) # create a climatological ensemble from a vector of observations (anom.rean) with the leave-one-out cross validation + anom.rean.clim<-t(array(anom.rean,c(N,N))) # function used by convert2prob when ref is not specified; it is ~ equal to the above function ClimEns, it only has a column more + ens.clim.rean<-convert2prob(anom.rean.clim,prob<-c(1/3,2/3)) # create a new prob.table based on the climatological ensemble of observations + + p<-count2prob(ens) # we should add the option type=4, in not the sum for each year is not 100%!!! + y<-count2prob(obs) # we should add the option type=4, in not the sum for each year is not 100%!!! + ReliabilityDiagram(p[,1], y[,1], bins=10, nboot=0, plot=TRUE, cons.prob=c(0.05, 0.85),plot.refin=F) + + ### computation of verification scores: + + Rps<-round(mean(EnsRps(ens,obs)),4) + FairRps<-round(mean(FairRps(ens,obs)),4) + #Rps.easy<-round(mean(veriApply("EnsRps", fcst=anom.hind, obs=anom.rean, prob=my.prob, tdim=1, ensdim=2)),4) # check once to see if it is the same value of Rps + #FairRps.easy<-round(mean(veriApply("FairRps", fcst=anom.hind, obs=anom.rean, prob=my.prob, tdim=1, ensdim=2)),4) # check once to see if it is the same value of FairRps + + Rps.clim<-round(mean(EnsRps(ens.clim,obs)),4) + FairRps.clim<-round(mean(FairRps(ens.clim,obs)),4) + FairRps.clim.rean<-round(mean(FairRps(ens.clim.rean,obs)),4) + + #FairRps.clim.stable<-0.44444444 + #Rps.clim.easy<-round(mean(veriApply("EnsRps", fcst=anom.hind.clim, obs=anom.rean, prob=my.prob, tdim=1, ensdim=2)),4) # check once to see if it is the same value of Rps.clim + #FairRps.clim.easy<-round(mean(veriApply("FairRps", fcst=anom.hind.clim, obs=anom.rean, prob=my.prob, tdim=1, ensdim=2)),4) # check once to see if it is = to FairRps.clim + + Rpss<-round(1-(Rps/Rps.clim),4) + #Rpss.specs<-round(EnsRpss(ens=ens, ens.ref=ens.clim, obs=obs)$rpss,4) # check it once to see if it is the same as Rpss (yes) + Rpss.easy<-veriApply("EnsRpss", fcst=anom.hind, fcst.ref=anom.hind.clim, obs=anom.rean, prob=my.prob, tdim=1, ensdim=2)$rpss # we provide the clim.forecast; = to Rpss.specs + system.time(Rpss.easy.noclim<-round(veriApply("EnsRpss", fcst=anom.hind, obs=anom.rean, prob=my.prob, tdim=1, ensdim=2, parallel=FALSE, ncpus=5)$rpss,4)) # using their climatological forecast + #cat(Rps.clim.noclim<-Rps/(1-Rpss.easy.noclim)) # sale igual a ~0.45 per ogni hindcast; é diverso sia da Rps.clim che da Rps.clim per N-> +oo (0.555) + + Rpss.easy<-veriApply("EnsRpss", fcst=anom.hind, fcst.ref=anom.rean.clim, obs=anom.rean, prob=my.prob, tdim=1, ensdim=2)$rpss # we provide the clim.forecast; = to Rpss.specs + Rpss.easy.noclim<-veriApply("EnsRpss", fcst=anom.hind, obs=anom.rean, prob=my.prob, tdim=1, ensdim=2)$rpss # we provide the clim.forecast; = to Rpss.specs + + FairRpss<-round(1-(FairRps/FairRps.clim),4) + #FairRpss.specs<-round(FairRpss(ens=ens,ens.ref=ens.clim,obs=obs)$rpss,4) # check it once to see if it is the same as FairRpss (yes) + #FairRpss.easy<-veriApply("FairRpss", fcst=anom.hind, fcst.ref=anom.hind.clim, obs=anom.rean, prob=c(1/3,2/3), tdim=1, ensdim=2)$rps + #FairRpss.easy.noclim<-round(veriApply("FairRpss", fcst=anom.hind, obs=anom.rean, prob=c(1/3,2/3), tdim=1, ensdim=2)$rpss,4) # using their climatological forecast + #cat(FairRps.clim.noclim<-Rps/(1-FairRpss.easy.noclim)) # sale igual a ~0.55; é diverso sia da FairRps.clim che da FairRps.clim per N-> +oo (0.444) + + cat(paste0('n.memb: ' ,M,' n.yrs: ',N,' : ',FairRps,' : ' ,FairRps.clim,' FairRpss: ',FairRpss,'\n')) + + #cat(paste0('n.memb: ' ,M,' n.yrs: ',N,' : ',FairRps,' : ' ,FairRps.clim,' FairRpss: ',FairRpss,'\n')) + + #cat(paste0('n.memb: ' ,M,' n.yrs: ',N,' : ',Rps,' : ' ,Rps.clim,' Rpss: ',Rpss,'\n','n.memb: ' ,M,' n.yrs: ',N,' : ',FairRps,' : ' ,FairRps.clim,' FairRpss: ',FairRpss,'\n')) + + #cat(paste0('n.memb: ' ,M,' n.yrs: ',N,' : ',Rps,' : ' ,Rps.clim,' Rpss: ',Rpss,' Rpss.easy: ',Rpss.easy.noclim,'\n','n.memb: ' ,M,' n.yrs: ',N,' #: ',FairRps,' : ' ,FairRps.clim,' FairRpss: ',FairRpss,' FairRpss.easy: ',FairRpss.easy.noclim)) + + } + + # Crpss: + + #Crps<-round(mean(EnsCrps(ens,obs)),4) + #FairCrps<-round(mean(FairCrps(ens,obs)),4) + + #cat(Crps.clim<-round(mean(EnsCrps(ens.clim,obs)),4)) + #FairCrps.clim<-round(mean(FairCrps(ens.clim,obs)),4) + + #Crpss<-round(1-(Crps/Crps.clim),4) + #Crpss.specs<-round(EnsCrpss(ens=ens, ens.ref=ens.clim, obs=obs)$rpss,4) + + #FairCrpss<-round(1-(FairCrps/FairCrps.clim),4) + #FairCrpss.specs<-round(FairCrpss(ens=ens,ens.ref=ens.clim,obs=obs)$rpss,4) + +} + + +################################################################################## +# Other analysis # +################################################################################## + +my.startdates=1 #c(1:9) # select the startdates (weeks) you want to merge together + +#tm<-toyarray(dims=c(32,64),N=20,nens=4) # in the number of startdates in case of forecasts or the number of years in case of hindcasts +#str(tm) # tm$fcst: [32,64,20,4] tm$obs: [32,64,20] +# +#f.corr <- veriApply("EnsCorr", fcst=tm$fcst, obs=tm$obs) +#f.me <- veriApply('EnsMe', tm$fcst, tm$obs) +#f.crps <- veriApply("FairCrps", fcst=tm$fcst, obs=tm$obs) +#str(f.crps) # [32,64,20] + +#cst <- array(rnorm(prod(4:8)), 4:8) +#obs <- array(rnorm(prod(4:7)), 4:7) +#f.me <- veriApply('EnsMe', fcst=fcst, obs=obs) +#dim(f.me) +#fcst2 <- array(aperm(fcst, c(5,1,4,2,3)), c(8, 4, 7, 5*6)) # very interesting use of aperm not only to swap dimensions, but also to merge two dimensions in the same one! +#obs2 <- array(aperm(obs, c(1,4,2,3)), c(4,7,5*6)) +#f.me2 <- veriApply('EnsMe', fcst=fcst2, obs=obs2, tdim=3, ensdim=1) +#dim(f.me2) + +# when you have for each leadtime all the 52 weeks of year 2014 stored in a map, +# you can plot the time serie for a particular point and leadtime: +pos.lat<-which(round(la,3)==round(my.lat,3)) +pos.lon<-which(round(lo,3)==round(my.lon,3)) + +x11() +plot(1:week,EnMeanCorr[,leadtime],type="b", + main=paste0("Weekly time serie of point with lat=",round(my.lat,2),", lon=",round(my.lon,2)), + xlab="week",ylab="Ensemble Mean Corr.",ylim=c(-1,1)) + + + +### check if 4 time steps are better than one: + +yrs.hind<-yr2.hind-yr1.hind+1 +my.point.obs<-array(NA,c(yrs.hind,28)) + +# load obs.data for the four time steps: +for(year in yr1.hind:yr2.hind){ + data_erai_6h <- Load(var=var.name, exp = 'ERAint6h', + obs = NULL, sdates=paste0(year,'01'), leadtimemin = 61, + leadtimemax = 88, output = 'lonlat', configfile = file_path, grid=my.grid,method='distance-weighted') + + my.point.obs[year-yr1.hind+1,]<-data_erai_6h$mod[1,1,1,,65,633] +} + +utm0<-c(1,5,9,13,17,21,25) +utm6<-utm0+1 +utm12<-utm0+2 +utm18<-utm0+3 +my.points.obs.weekly.mean.utm0<-rowMeans(my.point.obs[,utm0]) +my.points.obs.weekly.mean.utm6<-rowMeans(my.point.obs[,utm6]) +my.points.obs.weekly.mean.utm12<-rowMeans(my.point.obs[,utm12]) +my.points.obs.weekly.mean.utm18<-rowMeans(my.point.obs[,utm18]) +my.points.obs.weekly.mean.all<-rowMeans(my.point.obs) + +cor(my.points.exp.weekly.mean.all,my.points.obs.weekly.mean.all,use="complete.obs") + +my.points.obs.weekly.mean.utm<-c(my.points.obs.weekly.mean.utm0,my.points.obs.weekly.mean.utm6,my.points.obs.weekly.mean.utm12,my.points.obs.weekly.mean.utm18) +my.points.exp.weekly.mean.utm<-c(my.points.exp.weekly.mean.utm0,my.points.exp.weekly.mean.utm6,my.points.exp.weekly.mean.utm12,my.points.exp.weekly.mean.utm18) +cor(my.points.exp.weekly.mean.utm,my.points.obs.weekly.mean.utm,use="complete.obs") + + +##### Calculate Reanalysis climatology loading only 1 file each 4 (it's quick but it needs to have all files beforehand): + +ss<-c(seq(1,52,by=4),50,51,52) # take only 1 startdate each 4, more the last 3 startdates that must be computed individually because they are not a complete set of 4 + +for(startdate in ss){ # the startdate day of 2014 to load in the sdates weekly sequence along with the same startdates of the previous 20 years + data.ERAI <- Load(var=var.name, exp = 'ERAintEnsWeek', + obs = NULL, sdates=paste0(yr1.hind:yr2.hind,substr(sdates.seq[startdate],5,6),substr(sdates.seq[startdate],7,8)), + nleadtime = 4, leadtimemin = 1, leadtimemax = 4, output = 'lonlat', configfile = file_path, grid=my.grid, method='distance-weighted') + + clim.ERAI[startdate,,,]<-apply(data.ERAI$mod,c(1,2,4,5,6),mean)[1,1,,,] # average for each leadtime and pixel + + # the intermediate startdate between startdate week i and startdate week i+4 are repeated: + if(startdate <= 49){ + clim.ERAI[startdate+1,1,,]<- clim.ERAI[startdate,2,,] # startdate leadtime (week) + clim.ERAI[startdate+1,2,,]<- clim.ERAI[startdate,3,,] # (week) 1 2 3 4 + clim.ERAI[startdate+2,1,,]<- clim.ERAI[startdate,3,,] # 1 a b c d <- only startdate 1 and 5 are necessary to calculate all startdates between 1 and 5 + clim.ERAI[startdate+1,3,,]<- clim.ERAI[startdate,4,,] # 2 b c d e + clim.ERAI[startdate+2,2,,]<- clim.ERAI[startdate,4,,] # 3 c d e f + clim.ERAI[startdate+3,1,,]<- clim.ERAI[startdate,4,,] # 4 d e f g + } # 5 e f g h + + if(startdate > 1 && startdate <=49){ # fill the previous startdates: + clim.ERAI[startdate-1,4,,]<- clim.ERAI[startdate,3,,] + clim.ERAI[startdate-1,3,,]<- clim.ERAI[startdate,2,,] + clim.ERAI[startdate-2,4,,]<- clim.ERAI[startdate,2,,] + clim.ERAI[startdate-1,2,,]<- clim.ERAI[startdate,1,,] + clim.ERAI[startdate-2,3,,]<- clim.ERAI[startdate,1,,] + clim.ERAI[startdate-3,4,,]<- clim.ERAI[startdate,1,,] + } + +} + + + + +# You can make cor much faster by stripping away all the error checking code and calling the internal c function directly: +# r <- apply(m1, 1, function(x) { apply(m2, 1, function(y) { cor(x,y) })}) +# r2 <- apply(m1, 1, function(x) { apply(m2, 1, function(y) {.Internal(cor(x, y, 4L, FALSE)) })}) +C_cor<-get("C_cor", asNamespace("stats")) +test<-.Call(C_cor, x=rnorm(100,1), y=rnorm(100,1), na.method=2, FALSE) +my.EnsCorr.chunk[i,j,k]<-.Call(C_cor, x=anom.hindcast.mean.chunk[,i,j,k], y=nom.rean.chunk[,i,j,k], na.method=2, FALSE) + + + +####################################################################################### +# Raster Animations # +####################################################################################### + +library(png) +library(animation) + + +clustPNG <- function(pngobj, nclust = 3){ + + j <- pngobj + # If there is an alpha component remove it... + # might be better to just adjust the mat matrix + if(dim(j)[3] > 3){ + j <- j[,,1:3] + } + k <- apply(j, c(1,2), c) + mat <- matrix(unlist(k), ncol = 3, byrow = TRUE) + grd <- expand.grid(1:dim(j)[1], 1:dim(j)[2]) + clust <- kmeans(mat, nclust, iter.max = 20) + new <- clust$centers[clust$cluster,] + + for(i in 1:3){ + tmp <- matrix(new[,i], nrow = dim(j)[1]) + j[,,i] <- tmp + } + + plot.new() + rasterImage(j, 0, 0, 1, 1, interpolate = FALSE) +} + +makeAni <- function(file, n = 10){ + j <- readPNG(file) + for(i in 1:n){ + clustPNG(j, i) + mtext(paste("Number of clusters:", i)) + } + + j <- readPNG(file) + plot.new() + rasterImage(j, 0, 0, 1, 1, interpolate = FALSE) + mtext("True Picture") +} + +file <- "~/Dropbox/Photos/ClassyDogCropPng.png" +n <- 20 +saveGIF(makeAni(file, n), movie.name = "tmp.gif", interval = c(rep(1,n), 5)) + + + + +####################################################################################### +# ProgressBar # +####################################################################################### + +for(i in 1:10) { + Sys.sleep(0.2) + # Dirk says using cat() like this is naughty ;-) + #cat(i,"\r") + # So you can use message() like this, thanks to Sharpie's + # comment to use appendLF=FALSE. + message(i,"\r",appendLF=FALSE) + flush.console() +} + + +for(i in 1:10) { + Sys.sleep(0.2) + # Dirk says using cat() like this is naughty ;-) + cat(i,"\r") + + +} + + + +####################################################################################### +# Load large datasets with ff # +####################################################################################### + +library(ff) +my.scratch<-"/scratch/Earth/ncortesi/myBigData" +anom.hind<-as.ff(anom.hind, vmode="double", file=my.scratch) +ffsave(anom.hind,file=my.scratch) +close(anom.hind);rm(anom.hind) + +ffload(file=my.scratch, list=c(anom.hind)) +open(anom.hind) +my.SkillScore <- veriApplyBig("FairRpss", fcst=anom.hind, obs=anom.rean, tdim=2, ensdim=1, prob=c(1/3,1/3)) +close(anom.hind);rm(anom.hind) + + + +BigDataPath <- "/scratch/Earth/ncortesi/myBigData" +source('/home/Earth/ncortesi/Downloads/RESILIENCE/veriApplyBig.R') + +anom.hind.dim<-c(5,3,1,256,512) +anom.hind<-array(rnorm(prod(anom.hind.dim)),anom.hind.dim) # doesn't fit in memory +save.big(array=anom.hind, path=BigDataPath) + +veriApplyBig <- function(, obsmy.scratch){ + ffload(file=my.scratch) + open(anom.hind) + my.SkillScore <- veriApplyBig("FairRpss", fcst=anom.hind, obs=anom.rean, tdim=2, ensdim=1, prob=c(1/3,1/3)) + close(anom.hind) +} + + +anom.hind<-as.ff(anom.hind, vmode="double", file=my.scratch) +ffsave(anom.hind, file=my.scratch) + +my.scratch<-"/scratch/Earth/ncortesi/myBigData" + +anom.hind<-as.ff(anom.hind, vmode="double", file=my.scratch) +ffsave(anom.hind, file=my.scratch) +close(anom.hind); rm(anom.hind) + +ffload(file=my.scratch, list=c(anom.hind)) +open(anom.hind) +my.SkillScore <- veriApplyBig("FairRpss", fcst=anom.hind, obs=anom.rean, tdim=2, ensdim=1, prob=c(1/3,1/3)) +close(anom.hind); rm(anom.hind) + +anom.hind.dim<-c(51,30,1,256,51) +anom.hind<-ff(rnorm(prod(anom.hind.dim)), dim=anom.hind.dim, vmode="double", filename="/scratch/Earth/ncortesi/anomhind") +str(anom.hind) +dim(anom.hind) + +anom.hind.dim<-c(51,30,1,256,51) +anom.hind<-array(rnorm(prod(anom.hind.dim)),anom.hind.dim) # doesn't fit in memory +anom.hind<-as.ff(anom.hind, vmode="double", file="/scratch/Earth/ncortesi/myBigData") +str(anom.hind) +dim(anom.hind) + +ffsave(anom.hind,file="/scratch/Earth/ncortesi/anomhind") +#ffinfo(file="/scratch/Earth/ncortesi/anomhind") +close(anom.hind) +#delete(anom.hind) +#rm(anom.hind) + +ffload(file="/scratch/Earth/ncortesi/anomhind") + +anom.rean<-array(rnorm(prod(anom.hind.dim[-1])), anom.hind.dim[-1]) # doesn't fit in memory + +open(anom.hind) +my.SkillScore <- veriApplyBig("FairRpss", fcst=anom.hind, obs=anom.rean, tdim=2, ensdim=1, prob=c(1/3,1/3)) +fcst<-anom.hind +obs<-anom.rean + +close(anom.hind) +rm(anom.hind) + +#ffdrop(file="/scratch/Earth/ncortesi/anomhind") + +a<-ffapply(X=anom.hind.big, MARGIN=c(1,3,4,5), AFUN=mean, RETURN=TRUE) +a<-ffapply(X=anom.hind.big, MARGIN=c(1,3,4,5), AFUN=function(x) sample(x, replace=TRUE), CFUN="list", RETURN=TRUE) + + +pred_Cal_cross_ff <- as.ff(pred_Cal_cross, vmode='double', file=fileoutput_cross) +ffsave(pred_Cal_cross_ff, file=paste0(fileoutput_cross)) +delete(pred_Cal_cross_ff) + +assign("pred_Cal_cross_ff", as.ff(pred_Cal_cross, vmode='double', file=fileoutput_cross)) +ffsave(pred_Cal_cross_ff, file=paste0(fileoutput_cross)) +delete(pred_Cal_cross_ff) +rm(pred_Cal_cross_ff) + +assign("pred_Cal_cross_ff", as.ff(pred_Cal_cross, vmode='double', file=fileoutput_cross)) +a<-get("pred_Cal_cross_ff") # get() works well in this case but not in the row below! (you must use do.call, see WT_drivers_v2.R) +ffsave(get("pred_Cal_cross_ff"), file=paste0(fileoutput_cross)) +delete(pred_Cal_cross_ff) +rm(pred_Cal_cross_ff) + + + +######################################################################################### +# Calculate and save the FairRpss and/or the FairCrpss for a chosen period using ff # +######################################################################################### + +bigdata=FALSE # if TRUE, compute the Rpss and the Crpss using the package ff (storing arrays in the disk instead than in the RAM) to remove the memory limit + +for(month in veri.month){ + month=1 # for debug + my.startdates <- which(as.integer(substr(sdates.seq,5,6)) == month) #startdates.monthly[[month]] #c(1:5) # select the startdates (weeks) you want to compute + startdate.name=my.month[month] # name given to the period of the selected startdates, i.e:"January" + n.yrs <- length(my.startdates)*n.yrs.hind # number of total years for the selected month + print(paste0("Computing Skill Scores for ",startdate.name)) + + if(!boot) anom.rean.big<-array(NA, dim=c(n.yrs, n.leadtimes, n.lat, n.lon)) # reanalysis data is not converted to ff because it is a small array + + if(boot) mod<-1 else mod=0 # in case of the boostrap, anom.hind.big includes also the reanalysis data as additional last member + + if(bigdata) anom.hind.big <- ff(NA, dim=c(n.members + mod, n.yrs, n.leadtimes, n.lat, n.lon), vmode="double", filename=paste0(workdir,"/anomhindbig.ff")) + if(!bigdata) anom.hind.big <- array(NA, dim=c(n.members + mod, n.yrs, n.leadtimes, n.lat, n.lon)) + + for(startdate in my.startdates){ + + pos.startdate<-which(startdate==my.startdates) # count of the number of stardates already loaded +1 + my.time.interv<-(1+(pos.startdate-1)*n.yrs.hind):(pos.startdate*n.yrs.hind) # time interval where to load data + + load(paste0(workdir,'/Data_',var.name,'/anom_rean_startdate',startdate,'.RData')) + if(!boot) { + anom.rean<-drop(anom.rean) + anom.rean.big[my.time.interv,,,] <- anom.rean + } + + if(any(my.score.name=="FairRpss") || any(my.score.name=="FairCrpss")){ + load(paste0(workdir,'/Data_',var.name,'/anom_hindcast_startdate',startdate,'.RData')) # load hindcast data + anom.hindcast<-drop(anom.hindcast) + + if(!boot) anom.hind.big[,my.time.interv,,,] <- anom.hindcast + if(boot) anom.hind.big[,my.time.interv,,,] <- abind(anom.hindcast, anom.rean, along=1) + + rm(anom.hindcast, anom.rean) + } + gc() + } + + if(any(my.score.name=="FairRpss" || my.score.name=="FairCrpss")){ + if(!boot) { + if(any(my.score.name=="FairRpss")) my.FairRpss <- veriApplyBig("FairRpss", fcst=anom.hind.big, obs=anom.rean.big, + prob=my.prob, tdim=2, ensdim=1, parallel=TRUE, ncpus=n.cpus) + if(any(my.score.name=="FairCrpss")) my.FairCrpss <- veriApplyBig("FairCrpss", fcst=anom.hind.big, obs=anom.rean.big, tdim=2, ensdim=1, parallel=TRUE, ncpus=n.cpus) + + } else { + # bootstrapping: + my.FairRpssBoot<-my.FairCrpssBoot<-array(NA, c(n.boot, n.leadtimes, n.lat, n.lon)) + if(!bigdata) save(anom.hind.big, file=paste0(workdir,"/anom_hind_big.RData")) # save the anomalies to free memory + + for(b in 1:n.boot){ + print(paste0("resampling n. ",b,"/",n.boot)) + yrs.sampled <- sample(1:n.yrs, replace=TRUE) + + if(bigdata) anom.hind.big.sampled <- ff(NA, dim=c(n.members+1, n.yrs, n.leadtimes, n.lat, n.lon), vmode="double", filename=paste0(workdir,"/anomhindbigsampled")) + if(!bigdata) anom.hind.big.sampled <- array(NA, dim=c(n.members+1, n.yrs, n.leadtimes, n.lat, n.lon)) + + if(!bigdata && b>1) load(paste0(workdir,"/anom_hind_big.RData")) # need to load the hindcasts if b>1 + + for(y in 1:n.yrs) anom.hind.big.sampled[,y,,,] <- anom.hind.big[,yrs.sampled[y],,,] # with ff takes 53s x 100 ~1h!!! (without, only 4s x 100 ~6m) + + if(!bigdata) rm(anom.hind.big); gc() # free memory for the following commands + + # faster way that will be able to replace the above one when ffapply output parameters wll be improved: + #a<-ffapply(X=anom.hind.big, MARGIN=c(3,4,5), AFUN=function(x) my.sample(x,n.members+1, replace=TRUE), CFUN="list", RETURN=TRUE, FF_RETURN=TRUE) # takes 3m only + #aa<-abind(a[1:10], along=4) # it doesn't fit into memory! + + if(any(my.score.name=="FairRpss")) my.FairRpssBoot[b,,,] <- veriApplyBig("FairRpss", fcst=anom.hind.big.sampled[1:n.members,,,,], + obs=anom.hind.big.sampled[n.members+1,,,,], + prob=my.prob, tdim=2, ensdim=1, parallel=TRUE, ncpus=n.cpus) #$rpss + + if(any(my.score.name=="FairCrpss")) my.FairCrpssBoot[b,,,] <- veriApplyBig("FairCrpss", fcst=anom.hind.big.sampled[1:n.members,,,,], + obs=anom.hind.big.sampled[n.members+1,,,,], + tdim=2, ensdim=1, parallel=TRUE, ncpus=n.cpus) #$crpss + + if(bigdata) delete(anom.hind.big.sampled) + rm(anom.hind.big.sampled) # delete the sampled hindcast + + } + + # calculate 5 and 95 percentiles of skill scores: + if(any(my.score.name=="FairRpss")) my.FairRpssConf <- apply(my.FairRpssBoot, c(2,3,4), function(x) quantile(x, probs=c(0.05,0.95), type=8)) + if(any(my.score.name=="FairCrpss")) my.FairCrpssConf <- quantile(my.FairCrpssBoot, probs=c(0.05,0.95), type=8) + + } # close if on boot + + } + + if(bigdata) delete(anom.hind.big) # delete the file with the hindcasts + rm(anom.hind.big) + rm(anom.rean.big) + gc() + + if(!boot){ + if(any(my.score.name=="FairRpss")) save(my.FairRpss, file=paste0(workdir,'/Data_',var.name,'/FairRpss_',startdate.name,'.RData')) + if(any(my.score.name=="FairCrpss")) save(my.FairCrpss, file=paste0(workdir,'/Data_',var.name,'/FairCrpss_',startdate.name,'.RData')) + } else { + if(any(my.score.name=="FairRpss")) save(my.FairRpssBoot, my.FairRpssConf, file=paste0(workdir,'/Data_',var.name,'/FairRpssBoot_',startdate.name,'.RData')) + if(any(my.score.name=="FairCrpss")) save(my.FairCrpssBoot, my.FairCrpssConf, file=paste0(workdir,'/Data_',var.name,'/FairCrpssBoot_',startdate.name,'.RData')) + } + + + if(is.object(my.FairRpss)) rm(my.FairRpss); if(is.object(my.FairCrpss)) rm(my.FairCrpss) + if(is.object(my.FairRpssBoot)) rm(my.FairRpssBoot); if(is.object(my.FairCrpssBoot)) rm(my.FairCrpssBoot) + if(is.object(my.FairRpssConf)) rm(my.FairRpssConf); if(is.object(my.FairCrpssConf)) rm(my.FairCrpssConf) + +} # next m (month) + + +######################################################################################### +# Calculate and save the FairRpss for a chosen period using ff and Load() # +######################################################################################### + +anom.rean.big<-array(NA, dim=c(length(my.startdates)*n.yrs.hind, n.leadtimes, n.lat, n.lon)) +anom.hind.big <- ff(NA, dim=c(n.members,length(my.startdates)*n.yrs.hind, n.leadtimes, n.lat, n.lon), vmode="double", filename="/scratch/Earth/anomhindbig") + +for(startdate in my.startdates){ + pos.startdate<-which(startdate==my.startdates) # count of the number of stardates already loaded+1 + my.time.interv<-(1+(pos.startdate-1)*n.yrs.hind):(pos.startdate*n.yrs.hind) # time interval where to load data + + #load(paste0(workdir,'/Data_',var.name,'/anom_rean_startdate',startdate,'.RData')) + #anom.rean<-drop(anom.rean) + #anom.rean.big[my.time.interv,,,] <- anom.rean + + my.date <- paste0(yr1.hind:yr2.hind,substr(sdates.seq[startdate],5,6),substr(sdates.seq[startdate],7,8)) + anom.rean <- Load(var=var.name, exp = 'EnsEcmwfWeekHind', + obs = NULL, sdates=my.date, nleadtime = n.leadtimes, leadtimemin = 1, leadtimemax = n.leadtimes, + output = 'lonlat', configfile = file_path, method='distance-weighted' ) + + #load(paste0(workdir,'/Data_',var.name,'/anom_hindcast_startdate',startdate,'.RData')) # load hindcast data + #anom.hindcast<-drop(anom.hindcast) + + anom.hind.big[,my.time.interv,,,] <- anom.hindcast + rm(anom.hindcast) +} + +my.FairRpss <- veriApplyBig("FairRpss", fcst=anom.hind.big, obs=anom.rean.big, prob=my.prob, tdim=2, ensdim=1, parallel=TRUE, ncpus=n.cpus) + +save(my.FairRpss, file=paste0(workdir,'/Data_',var.name,'/FairRpss_',startdate.name,'.RData')) + + +######################################################################################### +# Alternative way to speed up Load() # +######################################################################################### + +ERAint <- list(path = '/esnas/reconstructions/ecmwf/eraint/$STORE_FREQ$_mean/$VAR_NAME$_f6h/$VAR_NAME$_$YEAR$$MONTH$.nc') +var.rean=ERAint # daily reanalysis dataset used for the var data; it can have a different resolution of the MSLP dataset +var.name='sfcWind' #'tas' #'prlr' # any daily variable we want to find if it is WTs-driven +var <- Load(var = var.name, exp = NULL, obs = list(var.rean), paste0(y,'0101'), storefreq = 'daily', leadtimemax = 1, output = 'lonlat') + +# Forecast system list: +S4 <- list(path = ...) +CFSv2 <- list(path = ...) + +# Reanalysis list: +ERAint <- list(path = '/esnas/reconstructions/ecmwf/eraint/$STORE_FREQ$_mean/$VAR_NAME$_f6h/$VAR_NAME$_$YEAR$$MONTH$.nc') +JRA55 <- list(path = ...) + + + + +# Select one forecast system and one reanalysis (put NULL if you don't need to load any experiment/observation): +exp.source <- NULL +obs.source <- list(path = '/esnas/reconstructions/ecmwf/eraint/$STORE_FREQ$_mean/$VAR_NAME$_f6h/$VAR_NAME$_$YEAR$$MONTH$.nc') + +# Select one variable and its frequency: +var.name <- 'sfcWind' +store.freq <- 'daily' + +# Extract only the directory path of the forecast and reanalysis: +obs.source2 <- gsub("\\$STORE_FREQ\\$", store.freq, obs.source$path) +obs.source3 <- gsub("\\$VAR_NAME\\$", var.name, obs.source2) +obs.source4 <- strsplit(obs.source3,'/') +obs.source5 <- paste0(obs.source4[[1]][-length(obs.source4[[1]])], collapse="/") + +obs.source6 <- list.files(path = obs.source5) + +system(paste0("ncks -O -h -d longitude,5,6 ", '/esnas/reconstructions/ecmwf/eraint/',STORE_FREQ,'_mean/',VAR_NAME,'_f6h')) +system("ncks -O -h -d longitude,5,6 sfcWind_201405.nc ~/test.nc") + +y <- 1990 +var <- Load(var = VAR_NAME, exp = list(exp.source), obs = list(obs.source), sdates = paste0(y,'0101'), storefreq = store.freq, leadtimemax = 1, output = 'lonlat') + + +} # close if on mare + + diff --git a/old/SkillScores_v1.R b/old/SkillScores_v1.R new file mode 100644 index 0000000000000000000000000000000000000000..89a283934fac12a54c7dbbc8f5df228dc78509d9 --- /dev/null +++ b/old/SkillScores_v1.R @@ -0,0 +1,1129 @@ + +slurm=TRUE # set it to TRUE only if you run the script wieh slurm + +#load libraries and functions: +library(s2dverification) +library(SpecsVerification) +library(easyVerification) +library(jpeg) +library(abind) +library(ff) +library(ffbase) +source('/scratch/Earth/ncortesi/RESILIENCE/Rfunctions.R') + +######################################################################################### +# User's settings # +######################################################################################### + +workdir="/scratch/Earth/ncortesi/RESILIENCE" # working dir where to put the output maps and files 8in the /Data and /Maps subdirs +#workdir="/home/Earth/ncortesi/Downloads/RESILIENCE" + +file_path <- '/home/Earth/ngonzal2/R/ConfFiles/IC3.conf' # Load() file path +#file_path <- '/home/Earth/ncortesi/Downloads/RESILIENCE/IC3.conf' + +cfs.name='ECMWF' # name of the climate forecast system (CFS) used for monthly predictions (to be used in map titles) +var.name='sfcWind' # forecast variable. It is the name used inside the netCDF files and as suffix in map filenames, i.e: 'sfcWind' or 'psl' +var.name.map='10m Wind Speed' # forecast variable to verify (name used in the map titles), i.e: '10m Wind Speed' or 'SLP' + +yr1=2014 # starting year of the weekly sequence of the forecasts +mes=1 # starting forecast month (usually january) +day=2 # starting forecast day + +yr1.hind=1994 #1994 # first hindcast year +yr2.hind=2013 #2013 # last hindcast year (usually the forecast year -1) + +leadtime.week<-c('5-11','12-18','19-25','26-32') # which leadtimes are available in the weekly dataset +n.members<-4 # number of hindcast members + +my.score.name=c('FairRpss','FairCrpss') #c('EnsCorr','FairRpss','FairCrpss','RelDiagr') # choose one or more skills scores to compute between EnsCorr, FairRPSS, FairCRPSS and/or RelDiagr + +veri.month=1 #1:12 # select the month(s) you want to compute the chosen skill scores + +my.pvalue=0.05 # in case of computing the EnsCorr, set the p_value level to show in the ensemble mean correlation maps +my.prob=c(1/3,2/3) # quantiles used for FairRPSS and the RelDiagr (usually they are the terciles) +conf.level=c(0.05, 0.95)# percentiles employed for the calculation of the confidence level for the Rpss and Crpss (can be more than 2 if needed) +int.lat <- NULL #1:160 # latitude position of grid points selected for the Reliability Diagram (if you want to subset a region; if not, put NULL) +int.lon <- NULL #1:320 # longitude position of grid points selected for the Reliability Diagram (if you want to subset a region; if not, put NULL) + +boot=TRUE # in case of computing the FairRpss and/or the FairCrpss, you can choose to do a bootstrap to evaluate the uncertainty of the skill scores +n.boot=20 # number of resamples considered in the bootstrapping + +n.cpus=8 # number of cpu used by parApply for the calculation of skill scores (our desktop has 4 cpus but 8 virtual ones, so it's better to specify 8) + # for parallelizing the apply function for the EnsCorr computation, no cores are selected because the input array is big (~500 GB) + # when this script is run with slurm (in amdahl or in moore) with sbatch or in the interactive mode, it should uses exactly 8 physical cores. +max.n.el=10000000 # maximum number of elements in an array (each element of type 'numeric' occupies 8 byte,so max.n.el*8/1000000 is the maximum size in MB you want to use) + # 10000000 is the optimal value for 8 GB machines + +# coordinates of a chosen point in the world to plot the time series over the 52 maps (they must be the same values in objects la y lo) +my.lat=55.3197120 +my.lon=0.5625 + +# You might also want to run the job on another workstation. This is the possible list of available workstation: (please check before that nobody is using it!!!) +# 300 Vero +# 305 Workstation in front of Albert (usually free) +# 310 Dani +# 311 Nicola +# 312 Doo Young + +###################################### Derived variables ################################## + +#source(paste0(workdir,'/ColorBarV.R')) # Color scale used for maps +n.categ<-1+length(my.prob) # number of forecasting categories (usually 3 if terciles are used) +col<-as.character(read.csv("/home/Earth/vtorralb/rgbhex.csv",header=F)[,1]) # blue-yellow-red colors + +sdates.seq<-weekly.seq(yr1,mes,day) # load the sequence of dates corresponding to all the thursday of the year +n.leadtimes<-length(leadtime.week) +n.yrs.hind<-yr2.hind-yr1.hind+1 +my.month.short<-substr(my.month,1,3) + +# Monthly Startdates for 2014 reforecasts: (in future you can modify it to work for a generic year) +startdates.monthly<-list() +startdates.monthly[[1]]<-1:5 +startdates.monthly[[2]]<-6:9 +startdates.monthly[[3]]<-10:13 +startdates.monthly[[4]]<-14:17 +startdates.monthly[[5]]<-18:22 +startdates.monthly[[6]]<-23:26 +startdates.monthly[[7]]<-27:31 +startdates.monthly[[8]]<-32:35 +startdates.monthly[[9]]<-36:39 +startdates.monthly[[10]]<-40:44 +startdates.monthly[[11]]<-45:48 +startdates.monthly[[12]]<-49:52 + +## extract geographic coordinates; do only ONCE for each preducton system and then save the lat/lon in a coordinates.RData and comment these rows to use function load() below: +#data.hindcast <- Load(var='sfcWind', exp = 'EnsEcmwfWeekHind', +# obs = NULL, sdates=paste0(yr1.hind:yr2.hind,substr(sdates.seq[startdate],5,6),substr(sdates.seq[startdate],7,8)), +# nleadtime = 1, leadtimemin = 1, leadtimemax = 1, output = 'lonlat', configfile = file_path, method='distance-weighted' ) + +#lats<-data.hindcast$lat +#lons<-data.hindcast$lon +#save(lats,lons,file=paste0(workdir,'/coordinates.RData')) +#rm(data.hindcast);gc() + +# format geographic coordinates to use with PlotEquiMap: +load(paste0(workdir,'/coordinates.RData')) +n.lon <- length(lons) +n.lat <- length(lats) +n.lonr <- n.lon-ceiling(length(lons[lons<180 & lons > 0])) +la<-rev(lats) +lo<-c(lons[c((n.lonr+1):n.lon)]-360,lons[1:n.lonr]) + + +######################################################################################### +# Calculate and save up to all the chosen Skill Scores for a chosen period # +######################################################################################### +# +# Remember that before computing the skill scores, you have to create and save the anomalies running once the preformatting part at the end of this script. +# + +for(month in veri.month){ + month=1 # for the debug + my.startdates<-startdates.monthly[[month]] #c(1:5) # select the startdates (weeks) you want to compute + startdate.name=my.month[month] # name given to the period of the selected startdates # i.e:"January" + n.startdates<-length(my.startdates) # number of startdates in the selected month + n.yrs <- length(my.startdates)*n.yrs.hind # number of total years for the selected month + print(paste0("Computing Skill Scores for ",startdate.name)) + + # Merge all selected startdates: + hind.dim <- c(n.members, n.yrs.hind*n.startdates, n.leadtimes, n.lat, n.lon) # dimensions of the hindcast array + n.sub <- prod(hind.dim)/max.n.el + n.sub <- ceiling(n.sub) # number of sub-arrays in which to split the hindcast and rean.data + n.lon <- tail(hind.dim,1) # number of longitude elements + sub.size <- floor(n.lon/n.sub) # number of elements in the last dimension (lon) of each subarray rounded to the lower integer + if(sub.size<=1) stop("Subarrays too small. Increase ten times the value of max.n.el") + n.sub <- n.lon %/% sub.size # take only the integer part of the ratio , i.e: it is equal to floor(n.lon/sub.size) + sub.size.last <- n.lon %% n.sub # number of additional elements of the last subarray (if >0) + + my.FairRpss<-my.FairCrpss<-my.EnsCorr<-my.PValue<-array(NA,c(n.leadtimes,n.lat,n.lon)) + if(boot) my.FairRpssBoot<-my.FairCrpssBoot<-array(NA, c(n.boot, n.leadtimes, n.lat, n.lon)) + if(boot) my.FairRpssConf<-my.FairCrpssConf<-array(NA, c(length(conf.level), n.leadtimes, n.lat, n.lon)) + + cat('Subarray n. ') + + for(s in 1:n.sub){ # EnsCorr, FairRpss and FairCrpss calculation: + #s=1 # for the debug + cat(paste0(s,'/',n.sub,' ')) + + if(s==n.sub){ add.last <- sub.size.last } else { add.last <- 0 } # because the last subarray is longer than the others, if sub.size.last>0 + + anom.hindcast.sub<-anom.hindcast.sub.sampled<-array(NA, c(n.members, n.startdates*n.yrs.hind, n.leadtimes, n.lat, sub.size + add.last)) + anom.rean.sub<-anom.hindcast.mean.sub<-anom.rean.sub.sampled<-array(NA, c(n.startdates*n.yrs.hind, n.leadtimes, n.lat, sub.size + add.last)) + + for(startdate in my.startdates){ + pos.startdate<-which(startdate==my.startdates) # count of the number of stardates already loaded+1 + my.time.interv<-(1+(pos.startdate-1)*n.yrs.hind):(pos.startdate*n.yrs.hind) # time interval where to load data + my.interv<-(1 + sub.size*(s-1)):((sub.size*s) + add.last) # longitude interval where to load data + + # Load reanalysis data: + load(paste0(workdir,'/Data_',var.name,'/anom_rean_startdate',startdate,'.RData')) + anom.rean<-drop(anom.rean) + anom.rean.sub[my.time.interv,,,]<-anom.rean[,,,my.interv] + + # Load hindcast data: + if(any(my.score.name=="FairRpss") || any(my.score.name=="FairCrpss" || any(my.score.name=="RelDiagr"))){ + load(paste0(workdir,'/Data_',var.name,'/anom_hindcast_startdate',startdate,'.RData')) + anom.hindcast<-drop(anom.hindcast) + anom.hindcast.sub[,my.time.interv,,,]<-anom.hindcast[,,,,my.interv] + rm(anom.hindcast,anom.rean) + } + + if(any(my.score.name=="EnsCorr")){ + load(paste0(workdir,'/Data_',var.name,'/anom_hindcast_mean_startdate',startdate,'.RData')) # load ensemble mean hindcast data + anom.hindcast.mean<-drop(anom.hindcast.mean) + anom.hindcast.mean.sub[my.time.interv,,,]<-anom.hindcast.mean[,,,my.interv] + rm(anom.hindcast.mean) + } + + } + + + if(any(my.score.name=="FairRpss")){ + if(!boot){ + my.FairRpss.sub <- veriApply("FairRpss", fcst=anom.hindcast.sub, obs=anom.rean.sub, prob=my.prob, tdim=2, ensdim=1, parallel=TRUE, ncpus=n.cpus)$rpss + my.FairRpss[,,my.interv]<-my.FairRpss.sub + rm(my.FairRpss.sub) + + } else { # bootstrapping: + + for(b in 1:n.boot){ + cat(paste0("resampling n. ",b,"/",n.boot)) + yrs.sampled <- sample(1:n.yrs, replace=TRUE) + + for(y in 1:n.yrs) anom.hindcast.sub.sampled[,y,,,] <- anom.hindcast.sub[,yrs.sampled[y],,,] + for(y in 1:n.yrs) anom.rean.sub.sampled[y,,,] <- anom.rean.sub[yrs.sampled[y],,,] + + my.FairRpss.sub <- veriApply("FairRpss", fcst=anom.hindcast.sub.sampled, obs=anom.rean.sub.sampled, + prob=my.prob, tdim=2, ensdim=1, parallel=TRUE, ncpus=n.cpus)$rpss + my.FairRpssBoot[b,,,my.interv]<-my.FairRpss.sub + rm(my.FairRpss.sub) + } + + cat('\n') + + # calculate the percentiles of the skill score: + my.FairRpssConf[,,,my.interv] <- apply(my.FairRpssBoot[,,,my.interv], c(2,3,4), function(x) quantile(x, probs=conf.level, type=8)) + } + } + + if(any(my.score.name=="FairCrpss")){ + if(!boot){ + my.FairCrpss.sub <- veriApply("FairCrpss", fcst=anom.hindcast.sub, obs=anom.rean.sub, tdim=2, ensdim=1, parallel=TRUE, ncpus=n.cpus)$crpss + my.FairCrpss[,,my.interv]<-my.FairCrpss.sub + rm(my.FairCrpss.sub) + + } else { # bootstrapping: + + for(b in 1:n.boot){ + print(paste0("resampling n. ",b,"/",n.boot)) + yrs.sampled <- sample(1:n.yrs, replace=TRUE) + + for(y in 1:n.yrs) anom.hindcast.sub.sampled[,y,,,] <- anom.hindcast.sub[,yrs.sampled[y],,,] + for(y in 1:n.yrs) anom.rean.sub.sampled[y,,,] <- anom.rean.sub[yrs.sampled[y],,,] + + my.FairCrpss.sub <- veriApply("FairCrpss", fcst=anom.hindcast.sub.sampled, obs=anom.rean.sub.sampled, + tdim=2, ensdim=1, parallel=TRUE, ncpus=n.cpus)$crpss + my.FairCrpssBoot[b,,,my.interv] <- my.FairCrpss.sub + rm(my.FairCrpss.sub) + } + + cat('\n') + + # calculate the percentiles of the skill score: + my.FairCrpssConf[,,,my.interv] <- apply(my.FairCrpssBoot[,,,my.interv], c(2,3,4), function(x) quantile(x, probs=conf.level, type=8)) + } + + } + + if(any(my.score.name=="EnsCorr")){ + # ensemble mean correlation for the selected startdates (first dim, 'week') and all leadtimes (second dim): + my.EnsCorr.sub<-my.PValue.sub<-array(NA,c(n.leadtimes,n.lat,sub.size+add.last)) + + for(i in 1:n.leadtimes){ + for(j in 1:n.lat){ + for(k in 1:(sub.size+add.last)){ + my.EnsCorr.sub[i,j,k]<-cor(anom.hindcast.mean.sub[,i,j,k],anom.rean.sub[,i,j,k], use="complete.obs") + + # option 'na.method' is chosen from the following list: c("all.obs", "complete.obs", "pairwise.complete.obs", "everything", "na.or.complete") + + my.PValue.sub[i,j,k]<-cor.test(anom.hindcast.mean.sub[,i,j,k],anom.rean.sub[,i,j,k], use="complete.obs")$p.value + + } + } + } + + my.EnsCorr[,,my.interv]<-my.EnsCorr.sub + my.PValue[,,my.interv]<-my.PValue.sub + + rm(anom.hindcast.sub,anom.hindcast.mean.sub,my.EnsCorr.sub,my.PValue.sub) + } + + rm(anom.rean.sub) + gc() + #mem() + + } # next s (subarray) + + + if(any(my.score.name=="RelDiagr")){ # in this case the computation cannot be split in groups of grid points, but can be split for leadtime instead: + my.RelDiagr<-list() + + for(lead in 1:n.leadtimes){ + cat(paste0('leadtime n.: ',lead,'/',n.leadtimes)) + anom.rean.sub<-array(NA,c(n.startdates*n.yrs.hind,n.lat,n.lon)) + anom.hindcast.sub<-array(NA,c(n.members,n.startdates*n.yrs.hind,n.lat,n.lon)) + cat(' startdate: ') + i=0 + + for(startdate in my.startdates){ + i=i+1 + cat(paste0(i,'/',length(my.startdates),' ')) + + pos.startdate<-which(startdate==my.startdates) # count of the number of stardates already loaded+1 + my.time.interv<-(1+(pos.startdate-1)*n.yrs.hind):(pos.startdate*n.yrs.hind) # time interval where to load data + + # Load reanalysis data of next startdate: + load(paste0(workdir,'/Data_',var.name,'/anom_rean_startdate',startdate,'.RData')) + anom.rean<-drop(anom.rean) + anom.rean.sub[my.time.interv,,]<-anom.rean[,lead,,] + + # Lead hindcast data of next startdate: + load(paste0(workdir,'/Data_',var.name,'/anom_hindcast_startdate',startdate,'.RData')) # load hindcast data + anom.hindcast<-drop(anom.hindcast) + anom.hindcast.sub[,my.time.interv,,]<-anom.hindcast[,,lead,,] + + rm(anom.rean,anom.hindcast) + } + + + anom.hindcast.sub.perm<-aperm(anom.hindcast.sub,c(2,1,3,4)) # swap the first two dimensions to put the years as first dimension (needed for convert2prob) + gc() + + cat('Computing the Reliability Diagram. Please wait... ') + ens.sub<-apply(anom.hindcast.sub.perm, c(3,4), convert2prob, prob<-my.prob) # its dimensions are: [n.startdates*n.yrs.hind*n.forecast.categories, n.lat, n.lon] + obs.sub<-apply(anom.rean.sub,c(2,3), convert2prob, prob<-my.prob) # the first n.members*n.yrs.hind elements belong to the first tercile, and so on. + + ens.sub.prob<-apply(ens.sub, c(2,3), function(x) count2prob(matrix(x, n.startdates*n.yrs.hind, n.categ), type=4)) # type=4 is the only method with sum(prob)=1 !!! + obs.sub.prob<-apply(obs.sub, c(2,3), function(x) count2prob(matrix(x, n.startdates*n.yrs.hind, n.categ), type=4)) # it not deserves parallelization: it only takes 10s + + if(is.null(int.lat)) int.lat <- 1:n.lat # if there is no region selected, select all the world + if(is.null(int.lon)) int.lon <- 1:n.lon + + int1 <- 1:(n.startdates*n.yrs.hind) # time interval of the data of the first tercile + my.RelDiagr[[lead]]<-ReliabilityDiagram(ens.sub.prob[int1,int.lat,int.lon], obs.sub.prob[int1,int.lat,int.lon], nboot=0, plot=FALSE, plot.refin=F) # 1st tercile + + int2 <- (1+(n.startdates*n.yrs.hind)):(2*n.startdates*n.yrs.hind) # time interval of the data of the second tercile + my.RelDiagr[[n.leadtimes+lead]]<-ReliabilityDiagram(ens.sub.prob[int2,int.lat,int.lon], obs.sub.prob[int2,int.lat,int.lon], nboot=0, plot=FALSE, plot.refin=F) + + int3 <- (1+(2*n.startdates*n.yrs.hind)):(3*n.startdates*n.yrs.hind) # time interval of the data of the third tercile + my.RelDiagr[[2*n.leadtimes+lead]]<-ReliabilityDiagram(ens.sub.prob[int3,int.lat,int.lon], obs.sub.prob[int3,int.lat,int.lon], nboot=0, plot=FALSE, plot.refin=F) + + cat('done\n') + #print(my.RelDiagr) # for debugging + + rm(anom.rean.sub,anom.hindcast.sub,ens.sub,obs.sub,ens.sub.prob,obs.sub.prob) + gc() + + } # next lead + + } # close if on RelDiagr + + if(!boot){ + if(any(my.score.name=="FairRpss")) save(my.FairRpss, file=paste0(workdir,'/Data_',var.name,'/FairRpss_',startdate.name,'.RData')) + if(any(my.score.name=="FairCrpss")) save(my.FairCrpss, file=paste0(workdir,'/Data_',var.name,'/FairCrpss_',startdate.name,'.RData')) + if(any(my.score.name=="EnsCorr")) save(my.EnsCorr, file=paste0(workdir,'/Data_',var.name,'/EnsCorr_',startdate.name,'.RData')) + if(any(my.score.name=="RelDiagr")) save(my.RelDiagr, my.PValue, file=paste0(workdir,'/Data_',var.name,'/RelDiagr_',startdate.name,'.RData')) + } else { # bootstrapping output: + if(any(my.score.name=="FairRpss")) save(my.FairRpssBoot, my.FairRpssConf, file=paste0(workdir,'/Data_',var.name,'/FairRpssBoot_',startdate.name,'.RData')) + if(any(my.score.name=="FairCrpss")) save(my.FairCrpssBoot, my.FairCrpssConf, file=paste0(workdir,'/Data_',var.name,'/FairCrpssBoot_',startdate.name,'.RData')) + } + + +} # next m (month) + + +if(slurm==FALSE){ +########################################################################################### +# Visualize and save the score maps for one score and period # +########################################################################################### + +# run it only after computing and saving all the skill score data: + +my.months=1 #9:12 # choose the month(s) to plot and save + +for(month in my.months){ + #month=2 # for the debug + startdate.name.map<-my.month[month] # i.e:"January" + + for(my.score.name.map in my.score.name){ + #my.score.name.map='EnsCorr' # for the debug + + if(my.score.name.map=="EnsCorr") { load(file=paste0(workdir,'/Data_',var.name,'/EnsCorr_',startdate.name.map,'.RData')); my.score<-my.EnsCorr } + if(my.score.name.map=='FairRpss') { load(file=paste0(workdir,'/Data_',var.name,'/FairRpss_',startdate.name.map,'.RData')); my.score<-my.FairRpss } + if(my.score.name.map=='FairCrpss') { load(file=paste0(workdir,'/Data_',var.name,'/FairCrpss_',startdate.name.map,'.RData')); my.score<-my.FairCrpss } + if(my.score.name.map=="RelDiagr") { load(file=paste0(workdir,'/Data_',var.name,'/RelDiagr_',startdate.name.map,'.RData')); my.score<-my.RelDiagr } + + # old green-red palette: + # brk.rpss<-c(-1,seq(0,1,by=0.05)) + # col.rpss<-c("darkred",colorRampPalette(c("red","white","darkgreen"))(length(brk.rpss)-2)) + # brk.rps<-c(seq(0,1,by=0.1),10) + # col.rps<-c(colorRampPalette(c("darkgreen","white","red"))(length(brk.rps)-2),"darkred") + # if(my.score.name.map==my.Rps | my.score==my.Rps.clim) {my.brk<-brk.rps} else {my.brk<-brk.rpss} + # if(my.score.name.map==my.Rps | my.score==my.Rps.clim) {my.col<-col.rps} else {my.col<-col.rpss} + + brk.rpss<-c(-10,seq(-1,1,by=0.1)) + col.rpss<-colorRampPalette(col)(length(brk.rpss)-1) + + my.brk<-brk.rpss # at present all breaks and colors are the same, so there is no need to differenciate between indexes + my.col<-col.rpss + + for(lead in 1:n.leadtimes){ + + #my.title<-paste0(my.score.name.map,' of ',cfs.name,' + #10m Wind Speed for ',startdate.name.map,'. Forecast time ',leadtime.week[lead],'.') + + if(my.score.name.map!="RelDiagr"){ + + #postscript(file=paste0(workdir,'/Maps_',var.name,'/',my.score.name.map,'_',startdate.name.map,'_Forecast_time_',leadtime.week[lead],'_',var.name,'.ps'), + # paper="special",width=12,height=7,horizontal=F) + + jpeg(file=paste0(workdir,'/Maps_',var.name,'/',my.score.name.map,'_',startdate.name.map,'_Forecast_time_',leadtime.week[lead],'_',var.name,'.jpg'),width=1000,height=600) + + layout(matrix(c(1,2), 1, 2, byrow = TRUE),widths=c(11,1.2)) + par(oma=c(1,1,4,1)) + + # lat values must be in decreasing order from +90 to -90 degrees and long from 0 to 360 degrees: + PlotEquiMap(my.score[lead,,][n.lat:1,c((n.lonr+1):n.lon,1:n.lonr)], lo,la,sizetit=0.8, brks=my.brk, cols=my.col,axelab=F, filled.continents=FALSE, drawleg=F) + my.title<-paste0(my.score.name.map,' of ',cfs.name,'\n ', var.name.map,' for ',startdate.name.map,'. Forecast time ',leadtime.week[lead],'.') + title(my.title,line=0.5,outer=T) + + ColorBar(my.brk, cols=my.col, vert=T, cex=1.4) + + if(my.score.name.map=="EnsCorra"){ #add a small grey point if the corr.is significant: + + # arrange lat/ lon in my.PValue (a second variable in the EnsCorr file) to start from +90 degr. (lat) and from 0 degr (lon): + my.PValue.rev<-my.PValue + my.PValue.rev[lead,,]<-my.PValue[i,n.lat:1,c((n.lonr+1):n.lon,1:n.lonr)] + for (x in 1:n.lon) { + for (y in 1:n.lat) { + if (my.PValue.rev[lead,y, x] == TRUE) { + text(x = lo[x], y = la[y], ".", cex = .2) + } + } + } + } + dev.off() + + } # close if on !RelDiagr + + if(my.score.name.map=="RelDiagr") { + + jpeg(file=paste0(workdir,'/Maps_',var.name,'/RelDiagr_',startdate.name.map,'_Forecast_time_',leadtime.week[lead],'_',var.name,'.jpg'),width=600,height=600) + + my.title<-paste0('Reliability Diagram of ',cfs.name,'\n ',var.name.map,' for ',startdate.name.map,'. Forecast time ',leadtime.week[lead],'.') + + plot(c(0,1),c(0,1),type="l",xlab="Forecast Frequency",ylab="Observed Frequency", col='gray30', main=my.title) + lines(my.score[[lead]]$p.avgs[!is.na(my.score[[lead]]$p.avgs)],my.score[[lead]]$cond.prob[!is.na(my.score[[lead]]$cond.prob)],type="o", pch=16, col="blue") + #lines(my.score[[n.leadtimes+lead]]$p.avgs[!is.na(my.score[[n.leadtimes+lead]]$p.avgs)],my.score[[n.leadtimes+lead]]$cond.prob[!is.na(my.score[[n.leadtimes+lead]]$cond.prob)],type="o",pch=16,col="darkgreen") + lines(my.score[[2*n.leadtimes+lead]]$p.avgs[!is.na(my.score[[2*n.leadtimes+lead]]$p.avgs)], my.score[[2*n.leadtimes+lead]]$cond.prob[!is.na(my.score[[2*n.leadtimes+lead]]$cond.prob)],type="o", pch=16, col="red") + legend("bottomright", legend=c('Lower Tercile','Upper Tercile'), lty=c(1,1), lwd=c(2.5,2.5), col=c('blue','red')) + + dev.off() + } + + } # next lead + + } # next score + +} # next month + + +############################################################################################### +# Composite map of the four skill scores # +############################################################################################### + +# run it only when you have all the 4 skill scores for each month: + +my.months=9:12 # choose the month(s) to plot and save + +for(month in my.months){ + + startdate.name.map<-my.month[month] # i.e:"January" + + #postscript(file=paste0(workdir,'/Maps_',var.name,'/SkillScores_',startdate.name.map,'_',var.name,'.ps'), paper="special",width=12,height=7,horizontal=F) + + jpeg(file=paste0(workdir,'/Maps_',var.name,'/SkillScores_',startdate.name.map,'_',var.name,'.jpg'), width=4000, height=2400, quality=100) + + layout(matrix(1:16, 4, 4, byrow=FALSE), widths=c(1.8,1.8,1.8,1.2), heights=c(1,1,1,1)) + #layout.show(16) + + for(score in 1:4){ + for(lead in 1:n.leadtimes){ + + img<-readJPEG(paste0(workdir,'/Maps_',var.name,'/',my.score.name[score],'_',startdate.name.map,'_Forecast_time_',leadtime.week[lead],'_',var.name,'.jpg')) + par(mar = rep(0, 4), xaxs='i', yaxs='i' ) + plot.new() + rasterImage(img, 0, 0, 1, 1, interpolate=FALSE) + + } # next lead + + } # next score + + dev.off() + +} # next month + + + +# Dani Map script for the small figure: +#/home/Earth/vtorralb/R/scripts/Veronica_2014/TimeSeries/PlotAno_mod_obs.R + + +############################################################################################### +# Preformatting: calculate weekly climatologies and anomalies for each hindcast startdate # +############################################################################################### + +# Run it only once at the beginning: + +my.startdates=1:52 # choose a sequence of startdates to save their anomalies + +for(startdate in my.startdates){ # the startdate week to load inside the sdates weekly sequence: + print(paste0('Computing anomalies for startdate ', which(startdate==my.startdates),'/',length(my.startdates))) + + data.hindcast <- Load(var = var.name, exp = 'EnsEcmwfWeekHind', + obs = NULL, sdates = paste0(yr1.hind:yr2.hind,substr(sdates.seq[startdate],5,6),substr(sdates.seq[startdate],7,8)), + nleadtime = n.leadtimes, leadtimemin = 1, leadtimemax = n.leadtimes, output = 'lonlat', configfile = file_path, method='distance-weighted' ) + + # dim(data.hindcast$mod) # [1,4,20,4,320,640] + + # Mean of the 4 members of the Hindcasts and of the 20 years (for each point and leadtime); + clim.hindcast<-apply(data.hindcast$mod,c(1,4,5,6),mean)[1,,,] # average for each leadtime and pixel + clim.hindcast<-InsertDim(InsertDim(InsertDim(clim.hindcast,1,n.yrs.hind),1,n.members),1,1) # repeat the same values and times to calc. anomalies + + anom.hindcast <- data.hindcast$mod - clim.hindcast + + rm(data.hindcast,clim.hindcast) + gc() + + # average the anomalies over the 4 hindcast members [1,20,4,320,640] + anom.hindcast.mean<-apply(anom.hindcast,c(1,3,4,5,6),mean) + + my.grid<-paste0('r',n.lon,'x',n.lat) + data.rean <- Load(var = var.name, exp = 'ERAintEnsWeek', + obs = NULL, sdates=paste0(yr1.hind:yr2.hind,substr(sdates.seq[startdate],5,6),substr(sdates.seq[startdate],7,8)), + nleadtime = n.leadtimes, leadtimemin = 1, leadtimemax = n.leadtimes, output = 'lonlat', configfile = file_path, grid=my.grid, method='distance-weighted') + + # dim(data.rean$mod) # [1,1,20,4,320,640] + + # Mean of the 20 years (for each point and leadtime) + clim.rean<-apply(data.rean$mod,c(1,2,4,5,6),mean)[1,1,,,] # average for each leadtime and pixel + clim.rean<-InsertDim(InsertDim(InsertDim(clim.rean,1,n.yrs.hind),1,1),1,1) # repeat the same values times to be able to calculate the anomalies #[1,1,20,4,320,640] + + anom.rean <- data.rean$mod - clim.rean + + anom.rean<-InsertDim(anom.rean[1,1,,,,],1,1) # [1,20,4,320,640] + + #save(anom.hindcast.mean,anom.rean,file=paste0(workdir,'/Data/anom_for_ACC_startdate',startdate,'.RData')) + + save(anom.hindcast,file=paste0(workdir,'/Data/anom_hindcast_startdate',startdate,'.RData')) + save(anom.hindcast.mean,file=paste0(workdir,'/Data/anom_hindcast_mean_startdate',startdate,'.RData')) + save(anom.rean,file=paste0(workdir,'/Data/anom_rean_startdate',startdate,'.RData')) + save(clim.rean,file=paste0(workdir,'/Data/clim_rean_startdate',startdate,'.RData')) + + rm(data.rean,anom.hindcast,anom.hindcast.mean,clim.rean,anom.rean) + gc() + +} + +############################################################################################### +# Preformatting: calculate weekly climatologies and anomalies for each forecast startdate # +############################################################################################### +# +# finish!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! +# +# the only difference is that anomalies are computed using climatologies from the hincast data, +# and not from the forecast data. + +# Run it only once at the beginning: + +my.startdates=1:52 # choose a sequence of forecast startdates to save their anomalies + +for(startdate in my.startdates){ # the startdate week to load inside the sdates weekly sequence: + print(paste0('Computing anomalies for startdate ', which(startdate==my.startdates),'/',length(my.startdates))) + + data.hindcast <- Load(var=var.name, exp = 'EnsEcmwfWeekHind', + obs = NULL, sdates=paste0(yr1.hind:yr2.hind,substr(sdates.seq[startdate],5,6),substr(sdates.seq[startdate],7,8)), + nleadtime = n.leadtimes, leadtimemin = 1, leadtimemax = n.leadtimes, output = 'lonlat', configfile = file_path, method='distance-weighted' ) + + # dim(data.hindcast$mod) # [1,4,20,4,320,640] + + # Mean of the 4 members of the Hindcasts and of the 20 years (for each point and leadtime); + clim.hindcast<-apply(data.hindcast$mod,c(1,4,5,6),mean)[1,,,] # average for each leadtime and pixel + clim.hindcast<-InsertDim(InsertDim(InsertDim(clim.hindcast,1,n.yrs.hind),1,n.members),1,1) # repeat the same values and times to calc. anomalies + + anom.hindcast <- data.hindcast$mod - clim.hindcast + + rm(data.hindcast,clim.hindcast) + gc() + + # average the anomalies over the 4 hindcast members [1,20,4,320,640] + anom.hindcast.mean<-apply(anom.hindcast,c(1,3,4,5,6),mean) + + my.grid<-paste0('r',n.lon,'x',n.lat) + data.rean <- Load(var=var.name, exp = 'ERAintEnsWeek', + obs = NULL, sdates=paste0(yr1.hind:yr2.hind,substr(sdates.seq[startdate],5,6),substr(sdates.seq[startdate],7,8)), + nleadtime = n.leadtimes, leadtimemin = 1, leadtimemax = n.leadtimes, output = 'lonlat', configfile = file_path, grid=my.grid, method='distance-weighted') + # dim(data.rean$mod) # [1,1,20,4,320,640] + + # Mean of the 20 years (for each point and leadtime) + clim.rean<-apply(data.rean$mod,c(1,2,4,5,6),mean)[1,1,,,] # average for each leadtime and pixel + clim.rean<-InsertDim(InsertDim(InsertDim(clim.rean,1,n.yrs.hind),1,1),1,1) # repeat the same values times to be able to calculate the anomalies #[1,1,20,4,320,640] + + anom.rean <- data.rean$mod - clim.rean + + anom.rean<-InsertDim(anom.rean[1,1,,,,],1,1) # [1,20,4,320,640] + + #save(anom.hindcast.mean,anom.rean,file=paste0(workdir,'/Data/anom_for_ACC_startdate',startdate,'.RData')) + + save(anom.hindcast,file=paste0(workdir,'/Data/anom_hindcast_startdate',startdate,'.RData')) + save(anom.hindcast.mean,file=paste0(workdir,'/Data/anom_hindcast_mean_startdate',startdate,'.RData')) + save(anom.rean,file=paste0(workdir,'/Data/anom_rean_startdate',startdate,'.RData')) + save(clim.rean,file=paste0(workdir,'/Data/clim_rean_startdate',startdate,'.RData')) + + rm(data.rean,anom.hindcast,anom.hindcast.mean,clim.rean,anom.rean) + gc() + +} + + + + + + + +################################################################################## +# Toy Model # +################################################################################## + +library(s2dverification) +library(SpecsVerification) +library(easyVerification) + +my.prob=c(1/3,2/3) # quantiles used for FairRPSS and the RelDiagr (usually they are the terciles) + +N=30 # number of years +M=50 # number of members + +for(M in c(2:10,20,51,100,200)){ + +for(N in c(5,10,15,20,25,30,40,50,100,200,500,1000,2000,5000,10000,20000,50000,100000)){ + +anom.rean<-rnorm(N) +anom.hind<-array(rnorm(N*M),c(N,M)) +#quantile(anom.hind,prob=my.prob,type=8) + +obs<-convert2prob(anom.rean,prob<-my.prob) +#mean(obs[,1]) # check if it is equal to 0.333 for K=3 + +ens<-convert2prob(anom.hind,prob<-my.prob) + +# climatological forecast: +anom.hind.clim<-array(sample(anom.hind,N*M),c(N,M)) # extract a random sample with no replacement of the hindcast +ens.clim<-convert2prob(anom.hind.clim,prob<-my.prob) +#mean(ens.clim[,1]) # check if it is equal to M/3 (1.333 for M=4) + +# alternative definition of climatological forecast, based on observations instead (as applied by veriApply via convert2prob when option fcst.ref is missing): +#anom.rean.clim<-ClimEns(anom.rean) # create a climatological ensemble from a vector of observations (anom.rean) with the leave-one-out cross validation +anom.rean.clim<-t(array(anom.rean,c(N,N))) # function used by convert2prob when ref is not specified; it is ~ equal to the above function ClimEns, it only has a column more +ens.clim.rean<-convert2prob(anom.rean.clim,prob<-c(1/3,2/3)) # create a new prob.table based on the climatological ensemble of observations + +p<-count2prob(ens) # we should add the option type=4, in not the sum for each year is not 100%!!! +y<-count2prob(obs) # we should add the option type=4, in not the sum for each year is not 100%!!! +ReliabilityDiagram(p[,1], y[,1], bins=10, nboot=0, plot=TRUE, cons.prob=c(0.05, 0.85),plot.refin=F) + +### computation of verification scores: + +Rps<-round(mean(EnsRps(ens,obs)),4) +FairRps<-round(mean(FairRps(ens,obs)),4) +#Rps.easy<-round(mean(veriApply("EnsRps", fcst=anom.hind, obs=anom.rean, prob=my.prob, tdim=1, ensdim=2)),4) # check once to see if it is the same value of Rps +#FairRps.easy<-round(mean(veriApply("FairRps", fcst=anom.hind, obs=anom.rean, prob=my.prob, tdim=1, ensdim=2)),4) # check once to see if it is the same value of FairRps + +Rps.clim<-round(mean(EnsRps(ens.clim,obs)),4) +FairRps.clim<-round(mean(FairRps(ens.clim,obs)),4) +FairRps.clim.rean<-round(mean(FairRps(ens.clim.rean,obs)),4) + +#FairRps.clim.stable<-0.44444444 +#Rps.clim.easy<-round(mean(veriApply("EnsRps", fcst=anom.hind.clim, obs=anom.rean, prob=my.prob, tdim=1, ensdim=2)),4) # check once to see if it is the same value of Rps.clim +#FairRps.clim.easy<-round(mean(veriApply("FairRps", fcst=anom.hind.clim, obs=anom.rean, prob=my.prob, tdim=1, ensdim=2)),4) # check once to see if it is = to FairRps.clim + +Rpss<-round(1-(Rps/Rps.clim),4) +#Rpss.specs<-round(EnsRpss(ens=ens, ens.ref=ens.clim, obs=obs)$rpss,4) # check it once to see if it is the same as Rpss (yes) +Rpss.easy<-veriApply("EnsRpss", fcst=anom.hind, fcst.ref=anom.hind.clim, obs=anom.rean, prob=my.prob, tdim=1, ensdim=2)$rpss # we provide the clim.forecast; = to Rpss.specs +#Rpss.easy.noclim<-round(veriApply("EnsRpss", fcst=anom.hind, obs=anom.rean, prob=my.prob, tdim=1, ensdim=2)$rpss,4) # using their climatological forecast +#cat(Rps.clim.noclim<-Rps/(1-Rpss.easy.noclim)) # sale igual a ~0.45 per ogni hindcast; é diverso sia da Rps.clim che da Rps.clim per N-> +oo (0.555) + +Rpss.easy<-veriApply("EnsRpss", fcst=anom.hind, fcst.ref=anom.rean.clim, obs=anom.rean, prob=my.prob, tdim=1, ensdim=2)$rpss # we provide the clim.forecast; = to Rpss.specs +Rpss.easy.noclim<-veriApply("EnsRpss", fcst=anom.hind, obs=anom.rean, prob=my.prob, tdim=1, ensdim=2)$rpss # we provide the clim.forecast; = to Rpss.specs + +FairRpss<-round(1-(FairRps/FairRps.clim),4) +#FairRpss.specs<-round(FairRpss(ens=ens,ens.ref=ens.clim,obs=obs)$rpss,4) # check it once to see if it is the same as Rpss (yes) +#FairRpss.easy<-veriApply("FairRpss", fcst=anom.hind, fcst.ref=anom.hind.clim, obs=anom.rean, prob=c(1/3,2/3), tdim=1, ensdim=2)$rps +#FairRpss.easy.noclim<-round(veriApply("FairRpss", fcst=anom.hind, obs=anom.rean, prob=c(1/3,2/3), tdim=1, ensdim=2)$rpss,4) # using their climatological forecast +#cat(FairRps.clim.noclim<-Rps/(1-FairRpss.easy.noclim)) # sale igual a ~0.55; é diverso sia da FairRps.clim che da FairRps.clim per N-> +oo (0.444) + +cat(paste0('n.memb: ' ,M,' n.yrs: ',N,' : ',FairRps,' : ' ,FairRps.clim,' FairRpss: ',FairRpss,'\n')) + +#cat(paste0('n.memb: ' ,M,' n.yrs: ',N,' : ',FairRps,' : ' ,FairRps.clim,' FairRpss: ',FairRpss,'\n')) + +#cat(paste0('n.memb: ' ,M,' n.yrs: ',N,' : ',Rps,' : ' ,Rps.clim,' Rpss: ',Rpss,'\n','n.memb: ' ,M,' n.yrs: ',N,' : ',FairRps,' : ' ,FairRps.clim,' FairRpss: ',FairRpss,'\n')) + +#cat(paste0('n.memb: ' ,M,' n.yrs: ',N,' : ',Rps,' : ' ,Rps.clim,' Rpss: ',Rpss,' Rpss.easy: ',Rpss.easy.noclim,'\n','n.memb: ' ,M,' n.yrs: ',N,' #: ',FairRps,' : ' ,FairRps.clim,' FairRpss: ',FairRpss,' FairRpss.easy: ',FairRpss.easy.noclim)) + +} + +# Crpss: + +#Crps<-round(mean(EnsCrps(ens,obs)),4) +#FairCrps<-round(mean(FairCrps(ens,obs)),4) + +#cat(Crps.clim<-round(mean(EnsCrps(ens.clim,obs)),4)) +#FairCrps.clim<-round(mean(FairCrps(ens.clim,obs)),4) + +#Crpss<-round(1-(Crps/Crps.clim),4) +#Crpss.specs<-round(EnsCrpss(ens=ens, ens.ref=ens.clim, obs=obs)$rpss,4) + +#FairCrpss<-round(1-(FairCrps/FairCrps.clim),4) +#FairCrpss.specs<-round(FairCrpss(ens=ens,ens.ref=ens.clim,obs=obs)$rpss,4) + +} + + +################################################################################## +# Other analysis # +################################################################################## + +my.startdates=1 #c(1:9) # select the startdates (weeks) you want to merge together + +#tm<-toyarray(dims=c(32,64),N=20,nens=4) # in the number of startdates in case of forecasts or the number of years in case of hindcasts +#str(tm) # tm$fcst: [32,64,20,4] tm$obs: [32,64,20] +# +#f.corr <- veriApply("EnsCorr", fcst=tm$fcst, obs=tm$obs) +#f.me <- veriApply('EnsMe', tm$fcst, tm$obs) +#f.crps <- veriApply("FairCrps", fcst=tm$fcst, obs=tm$obs) +#str(f.crps) # [32,64,20] + +#cst <- array(rnorm(prod(4:8)), 4:8) +#obs <- array(rnorm(prod(4:7)), 4:7) +#f.me <- veriApply('EnsMe', fcst=fcst, obs=obs) +#dim(f.me) +#fcst2 <- array(aperm(fcst, c(5,1,4,2,3)), c(8, 4, 7, 5*6)) # very interesting use of aperm not only to swap dimensions, but also to merge two dimensions in the same one! +#obs2 <- array(aperm(obs, c(1,4,2,3)), c(4,7,5*6)) +#f.me2 <- veriApply('EnsMe', fcst=fcst2, obs=obs2, tdim=3, ensdim=1) +#dim(f.me2) + +# when you have for each leadtime all the 52 weeks of year 2014 stored in a map, +# you can plot the time serie for a particular point and leadtime: +pos.lat<-which(round(la,3)==round(my.lat,3)) +pos.lon<-which(round(lo,3)==round(my.lon,3)) + +x11() +plot(1:week,EnMeanCorr[,leadtime],type="b", + main=paste0("Weekly time serie of point with lat=",round(my.lat,2),", lon=",round(my.lon,2)), + xlab="week",ylab="Ensemble Mean Corr.",ylim=c(-1,1)) + + + +### check if 4 time steps are better than one: + +yrs.hind<-yr2.hind-yr1.hind+1 +my.point.obs<-array(NA,c(yrs.hind,28)) + +# load obs.data for the four time steps: +for(year in yr1.hind:yr2.hind){ + data_erai_6h <- Load(var=var.name, exp = 'ERAint6h', + obs = NULL, sdates=paste0(year,'01'), leadtimemin = 61, + leadtimemax = 88, output = 'lonlat', configfile = file_path, grid=my.grid,method='distance-weighted') + + my.point.obs[year-yr1.hind+1,]<-data_erai_6h$mod[1,1,1,,65,633] +} + +utm0<-c(1,5,9,13,17,21,25) +utm6<-utm0+1 +utm12<-utm0+2 +utm18<-utm0+3 +my.points.obs.weekly.mean.utm0<-rowMeans(my.point.obs[,utm0]) +my.points.obs.weekly.mean.utm6<-rowMeans(my.point.obs[,utm6]) +my.points.obs.weekly.mean.utm12<-rowMeans(my.point.obs[,utm12]) +my.points.obs.weekly.mean.utm18<-rowMeans(my.point.obs[,utm18]) +my.points.obs.weekly.mean.all<-rowMeans(my.point.obs) + +cor(my.points.exp.weekly.mean.all,my.points.obs.weekly.mean.all,use="complete.obs") + +my.points.obs.weekly.mean.utm<-c(my.points.obs.weekly.mean.utm0,my.points.obs.weekly.mean.utm6,my.points.obs.weekly.mean.utm12,my.points.obs.weekly.mean.utm18) +my.points.exp.weekly.mean.utm<-c(my.points.exp.weekly.mean.utm0,my.points.exp.weekly.mean.utm6,my.points.exp.weekly.mean.utm12,my.points.exp.weekly.mean.utm18) +cor(my.points.exp.weekly.mean.utm,my.points.obs.weekly.mean.utm,use="complete.obs") + + +##### Calculate Reanalysis climatology loading only 1 file each 4 (it's quick but it needs to have all files beforehand): + +ss<-c(seq(1,52,by=4),50,51,52) # take only 1 startdate each 4, more the last 3 startdates that must be computed individually because they are not a complete set of 4 + +for(startdate in ss){ # the startdate day of 2014 to load in the sdates weekly sequence along with the same startdates of the previous 20 years + data.ERAI <- Load(var=var.name, exp = 'ERAintEnsWeek', + obs = NULL, sdates=paste0(yr1.hind:yr2.hind,substr(sdates.seq[startdate],5,6),substr(sdates.seq[startdate],7,8)), + nleadtime = 4, leadtimemin = 1, leadtimemax = 4, output = 'lonlat', configfile = file_path, grid=my.grid, method='distance-weighted') + + clim.ERAI[startdate,,,]<-apply(data.ERAI$mod,c(1,2,4,5,6),mean)[1,1,,,] # average for each leadtime and pixel + + # the intermediate startdate between startdate week i and startdate week i+4 are repeated: + if(startdate <= 49){ + clim.ERAI[startdate+1,1,,]<- clim.ERAI[startdate,2,,] # startdate leadtime (week) + clim.ERAI[startdate+1,2,,]<- clim.ERAI[startdate,3,,] # (week) 1 2 3 4 + clim.ERAI[startdate+2,1,,]<- clim.ERAI[startdate,3,,] # 1 a b c d <- only startdate 1 and 5 are necessary to calculate all startdates between 1 and 5 + clim.ERAI[startdate+1,3,,]<- clim.ERAI[startdate,4,,] # 2 b c d e + clim.ERAI[startdate+2,2,,]<- clim.ERAI[startdate,4,,] # 3 c d e f + clim.ERAI[startdate+3,1,,]<- clim.ERAI[startdate,4,,] # 4 d e f g + } # 5 e f g h + + if(startdate > 1 && startdate <=49){ # fill the previous startdates: + clim.ERAI[startdate-1,4,,]<- clim.ERAI[startdate,3,,] + clim.ERAI[startdate-1,3,,]<- clim.ERAI[startdate,2,,] + clim.ERAI[startdate-2,4,,]<- clim.ERAI[startdate,2,,] + clim.ERAI[startdate-1,2,,]<- clim.ERAI[startdate,1,,] + clim.ERAI[startdate-2,3,,]<- clim.ERAI[startdate,1,,] + clim.ERAI[startdate-3,4,,]<- clim.ERAI[startdate,1,,] + } + +} + + + + + # You can make cor much faster by stripping away all the error checking code and calling the internal c function directly: + # r <- apply(m1, 1, function(x) { apply(m2, 1, function(y) { cor(x,y) })}) + # r2 <- apply(m1, 1, function(x) { apply(m2, 1, function(y) {.Internal(cor(x, y, 4L, FALSE)) })}) + C_cor<-get("C_cor", asNamespace("stats")) + test<-.Call(C_cor, x=rnorm(100,1), y=rnorm(100,1), na.method=2, FALSE) + my.EnsCorr.sub[i,j,k]<-.Call(C_cor, x=anom.hindcast.mean.sub[,i,j,k], y=nom.rean.sub[,i,j,k], na.method=2, FALSE) + + + +####################################################################################### +# Raster Animations # +####################################################################################### + +library(png) +library(animation) + + +clustPNG <- function(pngobj, nclust = 3){ + + j <- pngobj + # If there is an alpha component remove it... + # might be better to just adjust the mat matrix + if(dim(j)[3] > 3){ + j <- j[,,1:3] + } + k <- apply(j, c(1,2), c) + mat <- matrix(unlist(k), ncol = 3, byrow = TRUE) + grd <- expand.grid(1:dim(j)[1], 1:dim(j)[2]) + clust <- kmeans(mat, nclust, iter.max = 20) + new <- clust$centers[clust$cluster,] + + for(i in 1:3){ + tmp <- matrix(new[,i], nrow = dim(j)[1]) + j[,,i] <- tmp + } + + plot.new() + rasterImage(j, 0, 0, 1, 1, interpolate = FALSE) +} + +makeAni <- function(file, n = 10){ + j <- readPNG(file) + for(i in 1:n){ + clustPNG(j, i) + mtext(paste("Number of clusters:", i)) + } + + j <- readPNG(file) + plot.new() + rasterImage(j, 0, 0, 1, 1, interpolate = FALSE) + mtext("True Picture") +} + +file <- "~/Dropbox/Photos/ClassyDogCropPng.png" +n <- 20 +saveGIF(makeAni(file, n), movie.name = "tmp.gif", interval = c(rep(1,n), 5)) + + + + +####################################################################################### +# ProgressBar # +####################################################################################### + +for(i in 1:10) { + Sys.sleep(0.2) + # Dirk says using cat() like this is naughty ;-) + #cat(i,"\r") + # So you can use message() like this, thanks to Sharpie's + # comment to use appendLF=FALSE. + message(i,"\r",appendLF=FALSE) + flush.console() +} + + +for(i in 1:10) { + Sys.sleep(0.2) + # Dirk says using cat() like this is naughty ;-) + cat(i,"\r") + + +} + + + +####################################################################################### +# Load large datasets with ff # +####################################################################################### + +library(ff) +my.scratch<-"/scratch/Earth/ncortesi/myBigData" +anom.hind<-as.ff(anom.hind, vmode="double", file=my.scratch) +ffsave(anom.hind,file=my.scratch) +close(anom.hind);rm(anom.hind) + +ffload(file=my.scratch, list=c(anom.hind)) +open(anom.hind) +my.SkillScore <- veriApplyBig("FairRpss", fcst=anom.hind, obs=anom.rean, tdim=2, ensdim=1, prob=c(1/3,1/3)) +close(anom.hind);rm(anom.hind) + + + +BigDataPath <- "/scratch/Earth/ncortesi/myBigData" +source('/home/Earth/ncortesi/Downloads/RESILIENCE/veriApplyBig.R') + +anom.hind.dim<-c(5,3,1,256,512) +anom.hind<-array(rnorm(prod(anom.hind.dim)),anom.hind.dim) # doesn't fit in memory +save.big(array=anom.hind, path=BigDataPath) + +veriApplyBig <- function(, obsmy.scratch){ + ffload(file=my.scratch) + open(anom.hind) + my.SkillScore <- veriApplyBig("FairRpss", fcst=anom.hind, obs=anom.rean, tdim=2, ensdim=1, prob=c(1/3,1/3)) + close(anom.hind) +} + + +anom.hind<-as.ff(anom.hind, vmode="double", file=my.scratch) +ffsave(anom.hind, file=my.scratch) + +my.scratch<-"/scratch/Earth/ncortesi/myBigData" + +anom.hind<-as.ff(anom.hind, vmode="double", file=my.scratch) +ffsave(anom.hind, file=my.scratch) +close(anom.hind); rm(anom.hind) + +ffload(file=my.scratch, list=c(anom.hind)) +open(anom.hind) +my.SkillScore <- veriApplyBig("FairRpss", fcst=anom.hind, obs=anom.rean, tdim=2, ensdim=1, prob=c(1/3,1/3)) +close(anom.hind); rm(anom.hind) + +anom.hind.dim<-c(51,30,1,256,51) +anom.hind<-ff(rnorm(prod(anom.hind.dim)), dim=anom.hind.dim, vmode="double", filename="/scratch/Earth/ncortesi/anomhind") +str(anom.hind) +dim(anom.hind) + +anom.hind.dim<-c(51,30,1,256,51) +anom.hind<-array(rnorm(prod(anom.hind.dim)),anom.hind.dim) # doesn't fit in memory +anom.hind<-as.ff(anom.hind, vmode="double", file="/scratch/Earth/ncortesi/myBigData") +str(anom.hind) +dim(anom.hind) + +ffsave(anom.hind,file="/scratch/Earth/ncortesi/anomhind") +#ffinfo(file="/scratch/Earth/ncortesi/anomhind") +close(anom.hind) +#delete(anom.hind) +#rm(anom.hind) + +ffload(file="/scratch/Earth/ncortesi/anomhind") + +anom.rean<-array(rnorm(prod(anom.hind.dim[-1])), anom.hind.dim[-1]) # doesn't fit in memory + +open(anom.hind) +my.SkillScore <- veriApplyBig("FairRpss", fcst=anom.hind, obs=anom.rean, tdim=2, ensdim=1, prob=c(1/3,1/3)) +fcst<-anom.hind +obs<-anom.rean + +close(anom.hind) +rm(anom.hind) + +#ffdrop(file="/scratch/Earth/ncortesi/anomhind") + +a<-ffapply(X=anom.hind.big, MARGIN=c(1,3,4,5), AFUN=mean, RETURN=TRUE) +a<-ffapply(X=anom.hind.big, MARGIN=c(1,3,4,5), AFUN=function(x) sample(x, replace=TRUE), CFUN="list", RETURN=TRUE) + + +pred_Cal_cross_ff <- as.ff(pred_Cal_cross, vmode='double', file=fileoutput_cross) +ffsave(pred_Cal_cross_ff, file=paste0(fileoutput_cross)) +delete(pred_Cal_cross_ff) + +assign("pred_Cal_cross_ff", as.ff(pred_Cal_cross, vmode='double', file=fileoutput_cross)) +ffsave(pred_Cal_cross_ff, file=paste0(fileoutput_cross)) +delete(pred_Cal_cross_ff) +rm(pred_Cal_cross_ff) + +assign("pred_Cal_cross_ff", as.ff(pred_Cal_cross, vmode='double', file=fileoutput_cross)) +a<-get("pred_Cal_cross_ff") # get() works well in this case but not in the row below! (you must use do.call, see WT_drivers_v2.R) +ffsave(get("pred_Cal_cross_ff"), file=paste0(fileoutput_cross)) +delete(pred_Cal_cross_ff) +rm(pred_Cal_cross_ff) + + + +######################################################################################### +# Calculate and save the FairRpss and/or the FairCrpss for a chosen period using ff # +######################################################################################### + +bigdata=FALSE # if TRUE, compute the Rpss and the Crpss using the package ff (storing arrays in the disk instead than in the RAM) to remove the memory limit + +for(month in veri.month){ + month=1 # for debug + my.startdates<-startdates.monthly[[month]] #c(1:5) # select the startdates (weeks) you want to compute + startdate.name=my.month[month] # name given to the period of the selected startdates, i.e:"January" + n.yrs <- length(my.startdates)*n.yrs.hind # number of total years for the selected month + print(paste0("Computing Skill Scores for ",startdate.name)) + + if(!boot) anom.rean.big<-array(NA, dim=c(n.yrs, n.leadtimes, n.lat, n.lon)) # reanalysis data is not converted to ff because it is a small array + + if(boot) mod<-1 else mod=0 # in case of the boostrap, anom.hind.big includes also the reanalysis data as additional last member + + if(bigdata) anom.hind.big <- ff(NA, dim=c(n.members + mod, n.yrs, n.leadtimes, n.lat, n.lon), vmode="double", filename=paste0(workdir,"/anomhindbig.ff")) + if(!bigdata) anom.hind.big <- array(NA, dim=c(n.members + mod, n.yrs, n.leadtimes, n.lat, n.lon)) + + for(startdate in my.startdates){ + + pos.startdate<-which(startdate==my.startdates) # count of the number of stardates already loaded +1 + my.time.interv<-(1+(pos.startdate-1)*n.yrs.hind):(pos.startdate*n.yrs.hind) # time interval where to load data + + load(paste0(workdir,'/Data_',var.name,'/anom_rean_startdate',startdate,'.RData')) + if(!boot) { + anom.rean<-drop(anom.rean) + anom.rean.big[my.time.interv,,,] <- anom.rean + } + + if(any(my.score.name=="FairRpss") || any(my.score.name=="FairCrpss")){ + load(paste0(workdir,'/Data_',var.name,'/anom_hindcast_startdate',startdate,'.RData')) # load hindcast data + anom.hindcast<-drop(anom.hindcast) + + if(!boot) anom.hind.big[,my.time.interv,,,] <- anom.hindcast + if(boot) anom.hind.big[,my.time.interv,,,] <- abind(anom.hindcast, anom.rean, along=1) + + rm(anom.hindcast, anom.rean) + } + gc() + } + + if(any(my.score.name=="FairRpss" || my.score.name=="FairCrpss")){ + if(!boot) { + if(any(my.score.name=="FairRpss")) my.FairRpss <- veriApplyBig("FairRpss", fcst=anom.hind.big, obs=anom.rean.big, + prob=my.prob, tdim=2, ensdim=1, parallel=TRUE, ncpus=n.cpus) + if(any(my.score.name=="FairCrpss")) my.FairCrpss <- veriApplyBig("FairCrpss", fcst=anom.hind.big, obs=anom.rean.big, tdim=2, ensdim=1, parallel=TRUE, ncpus=n.cpus) + + } else { + # bootstrapping: + my.FairRpssBoot<-my.FairCrpssBoot<-array(NA, c(n.boot, n.leadtimes, n.lat, n.lon)) + if(!bigdata) save(anom.hind.big, file=paste0(workdir,"/anom_hind_big.RData")) # save the anomalies to free memory + + for(b in 1:n.boot){ + print(paste0("resampling n. ",b,"/",n.boot)) + yrs.sampled <- sample(1:n.yrs, replace=TRUE) + + if(bigdata) anom.hind.big.sampled <- ff(NA, dim=c(n.members+1, n.yrs, n.leadtimes, n.lat, n.lon), vmode="double", filename=paste0(workdir,"/anomhindbigsampled")) + if(!bigdata) anom.hind.big.sampled <- array(NA, dim=c(n.members+1, n.yrs, n.leadtimes, n.lat, n.lon)) + + if(!bigdata && b>1) load(paste0(workdir,"/anom_hind_big.RData")) # need to load the hindcasts if b>1 + + for(y in 1:n.yrs) anom.hind.big.sampled[,y,,,] <- anom.hind.big[,yrs.sampled[y],,,] # with ff takes 53s x 100 ~1h!!! (without, only 4s x 100 ~6m) + + if(!bigdata) rm(anom.hind.big); gc() # free memory for the following commands + + # faster way that will be able to replace the above one when ffapply output parameters wll be improved: + #a<-ffapply(X=anom.hind.big, MARGIN=c(3,4,5), AFUN=function(x) my.sample(x,n.members+1, replace=TRUE), CFUN="list", RETURN=TRUE, FF_RETURN=TRUE) # takes 3m only + #aa<-abind(a[1:10], along=4) # it doesn't fit into memory! + + if(any(my.score.name=="FairRpss")) my.FairRpssBoot[b,,,] <- veriApplyBig("FairRpss", fcst=anom.hind.big.sampled[1:n.members,,,,], + obs=anom.hind.big.sampled[n.members+1,,,,], + prob=my.prob, tdim=2, ensdim=1, parallel=TRUE, ncpus=n.cpus) #$rpss + + if(any(my.score.name=="FairCrpss")) my.FairCrpssBoot[b,,,] <- veriApplyBig("FairCrpss", fcst=anom.hind.big.sampled[1:n.members,,,,], + obs=anom.hind.big.sampled[n.members+1,,,,], + tdim=2, ensdim=1, parallel=TRUE, ncpus=n.cpus) #$crpss + + if(bigdata) delete(anom.hind.big.sampled) + rm(anom.hind.big.sampled) # delete the sampled hindcast + + } + + # calculate 5 and 95 percentiles of skill scores: + if(any(my.score.name=="FairRpss")) my.FairRpssConf <- apply(my.FairRpssBoot, c(2,3,4), function(x) quantile(x, probs=c(0.05,0.95), type=8)) + if(any(my.score.name=="FairCrpss")) my.FairCrpssConf <- quantile(my.FairCrpssBoot, probs=c(0.05,0.95), type=8) + + } # close if on boot + + } + + if(bigdata) delete(anom.hind.big) # delete the file with the hindcasts + rm(anom.hind.big) + rm(anom.rean.big) + gc() + + if(!boot){ + if(any(my.score.name=="FairRpss")) save(my.FairRpss, file=paste0(workdir,'/Data_',var.name,'/FairRpss_',startdate.name,'.RData')) + if(any(my.score.name=="FairCrpss")) save(my.FairCrpss, file=paste0(workdir,'/Data_',var.name,'/FairCrpss_',startdate.name,'.RData')) + } else { + if(any(my.score.name=="FairRpss")) save(my.FairRpssBoot, my.FairRpssConf, file=paste0(workdir,'/Data_',var.name,'/FairRpssBoot_',startdate.name,'.RData')) + if(any(my.score.name=="FairCrpss")) save(my.FairCrpssBoot, my.FairCrpssConf, file=paste0(workdir,'/Data_',var.name,'/FairCrpssBoot_',startdate.name,'.RData')) + } + + + if(is.object(my.FairRpss)) rm(my.FairRpss); if(is.object(my.FairCrpss)) rm(my.FairCrpss) + if(is.object(my.FairRpssBoot)) rm(my.FairRpssBoot); if(is.object(my.FairCrpssBoot)) rm(my.FairCrpssBoot) + if(is.object(my.FairRpssConf)) rm(my.FairRpssConf); if(is.object(my.FairCrpssConf)) rm(my.FairCrpssConf) + +} # next m (month) + + +######################################################################################### +# Calculate and save the FairRpss for a chosen period using ff and Load() # +######################################################################################### + + anom.rean.big<-array(NA, dim=c(length(my.startdates)*n.yrs.hind, n.leadtimes, n.lat, n.lon)) + anom.hind.big <- ff(NA, dim=c(n.members,length(my.startdates)*n.yrs.hind, n.leadtimes, n.lat, n.lon), vmode="double", filename="/scratch/Earth/anomhindbig") + + for(startdate in my.startdates){ + pos.startdate<-which(startdate==my.startdates) # count of the number of stardates already loaded+1 + my.time.interv<-(1+(pos.startdate-1)*n.yrs.hind):(pos.startdate*n.yrs.hind) # time interval where to load data + + #load(paste0(workdir,'/Data_',var.name,'/anom_rean_startdate',startdate,'.RData')) + #anom.rean<-drop(anom.rean) + #anom.rean.big[my.time.interv,,,] <- anom.rean + + my.date <- paste0(yr1.hind:yr2.hind,substr(sdates.seq[startdate],5,6),substr(sdates.seq[startdate],7,8)) + anom.rean <- Load(var=var.name, exp = 'EnsEcmwfWeekHind', + obs = NULL, sdates=my.date, nleadtime = n.leadtimes, leadtimemin = 1, leadtimemax = n.leadtimes, + output = 'lonlat', configfile = file_path, method='distance-weighted' ) + + #load(paste0(workdir,'/Data_',var.name,'/anom_hindcast_startdate',startdate,'.RData')) # load hindcast data + #anom.hindcast<-drop(anom.hindcast) + + anom.hind.big[,my.time.interv,,,] <- anom.hindcast + rm(anom.hindcast) + } + + my.FairRpss <- veriApplyBig("FairRpss", fcst=anom.hind.big, obs=anom.rean.big, prob=my.prob, tdim=2, ensdim=1, parallel=TRUE, ncpus=n.cpus) + + save(my.FairRpss, file=paste0(workdir,'/Data_',var.name,'/FairRpss_',startdate.name,'.RData')) + +} # close if on slurm diff --git a/old/SkillScores_v10.R b/old/SkillScores_v10.R new file mode 100644 index 0000000000000000000000000000000000000000..88eab97d302d772378a5dfcd17eefd279a47965b --- /dev/null +++ b/old/SkillScores_v10.R @@ -0,0 +1,495 @@ +######################################################################################### +# Sub-seasonal Skill Scores # +######################################################################################### +# you can run it from the terminal with the syntax: +# +# Rscript SkillScores_v7.R +# +# i.e: to run only the chunk number 3, write: +# +# Rscript SkillScores_v4.R 3 +# +# if you don't specify any argument, or you run it from the R interface, it runs all the chunks in a sequential way. +# Use this last approach only if the computational speed is not a problem. + +rm(list=ls()) + +########################################################################################## +# Load functions and libraries # +########################################################################################## + +#load libraries and functions: +library(SpecsVerification) # for skill scores +library(s2dverification) # for Load() function +library(easyVerification) # for veriApply() function +library(abind) # for merging arrays +source('/home/Earth/ncortesi/scripts/Rfunctions.R') + +########################################################################################## +# MareNostrum settings # +########################################################################################## + +args <- commandArgs(TRUE) # put into variable args the list with all the optional arguments with whom the script was run from the terminal + +if(length(args) == 0) print("Running the script in a sequential way") +if(length(args) > 1) stop("Only one argument is required") + +chunk <- ifelse(length(args) == 1 ,TRUE, FALSE) # set variable 'chunk' to TRUE if we are running the script from the command line with 1 argument, FALSE otherwise + +if(chunk) chunk.month <- as.integer(args[1]) # number of the month to run in this script (if chunk == TRUE) + +########################################################################################## +# User's settings # +########################################################################################## + +# working dir where to put the output maps and files in the /Data and /Maps subdirs: +work.dir <- "/scratch/Earth/ncortesi/RESILIENCE/Subestacional" + +# path of the weekly var rean data: +#rean.dir <- '/esarchive/old-files/recon_ecmwf_erainterim/weekly_mean/$VAR_NAME$_f6h/$VAR_NAME$_$START_DATE$.nc' + +rean.dir <- '/esnas/recon/ecmwf/erainterim/weekly_mean/$VAR_NAME$-ecmwf_f6h/$VAR_NAME$_$START_DATE$.nc' +rean.name <- "ERA-Interim" + +# path of the monthly forecast system files: +forecast.year <- 2014 # starting year of the weekly sequence of the forecasts +forecast.dir <- paste0('/esnas/exp/ECMWF/monthly/ensforhc/weekly_mean/$VAR_NAME$_f6h/',forecast.year,'$MONTH$$DAY$00/$VAR_NAME$_$START_DATE$00.nc') + +cfs.name <- 'ECMWF' # name of the climate forecast system (CFS) used for monthly predictions (to be used in map titles) +var.name <- 'sfcWind' # forecast variable. It is the name used inside the netCDF files and as suffix in map filenames, i.e: 'sfcWind' or 'psl' +var.name.map <- '10m Wind Speed' # forecast variable to verify (name used in the map titles), i.e: '10m Wind Speed' or 'SLP' + +mes <- 1 # starting forecast month (1: january, 2: february, etc.) +day <- 2 # starting forecast day + +yr1.hind <- 1994 #1994 # first hindcast year +yr2.hind <- 2013 #2013 # last hindcast year (usually the forecast year -1) + +leadtime.week <- c('5-11','12-18','19-25','26-32') # which leadtimes are available in the weekly dataset +n.members <- 4 # number of hindcast members + +my.score.name <- c('FairRpss','FairCrpss','EnsCorr','RelDiagr') # choose one or more skills scores to compute between EnsCorr, FairRPSS, FairCRPSS and/or RelDiagr + +veri.month <- 1:12 # select the month(s) you want to compute the chosen skill scores + +my.prob <- c(1/3,2/3) # quantiles used for FairRPSS and the RelDiagr (usually they are the terciles) +#conf.level <- c(0.025, 0.975) # percentiles employed for the calculation of the confidence level for the Rpss and Crpss (can be more than 2 if needed) + +int.lat <- NULL #1:160 # latitude position of grid points selected for the Reliability Diagram (if you want to subset a region; if not, put NULL) +int.lon <- NULL #1:320 # longitude position of grid points selected for the Reliability Diagram (if you want to subset a region; if not, put NULL) + +n.boot <- 1000 # number of resamples considered in the bootstrapping of the FairRPSS and the FairCRPSS + +# coordinates of a chosen point in the world to plot the time series over the 52 maps (they must be the same values in objects la y lo) +#my.lat <- 55.3197120 +#my.lon <- 0.5625 + +###################################### Derived variables ############################################################################# + +#source(paste0(workdir,'/ColorBarV.R')) # Color scale used for maps +n.categ <- 1+length(my.prob) # number of forecasting categories (usually 3 if terciles are used) + +# blue-yellow-red colors: +#col <- ifelse(!mare, as.character(read.csv("/scratch/Earth/ncortesi/RESILIENCE/rgbhex.csv",header=F)[,1]), as.character(read.csv("/gpfs/projects/bsc32//bsc32842/scripts/rgbhex.csv",header=F)[,1])) +#col<-as.character(read.csv("/home/Earth/vtorralb/rgbhex.csv",header=F)[,1]) # blue-yellow-red colors +col <- c("#0C046E", "#1914BE", "#2341F7", "#2F55FB", "#3E64FF", "#528CFF", "#64AAFF", "#82C8FF", "#A0DCFF", "#B4F0FF", "#FFFBAF", "#FFDD9A", "#FFBF87", "#FFA173", "#FF7055", "#FE6346", "#F7403B", "#E92D36", "#C80F1E", "#A50519") + +sdates.seq <- weekly.seq(forecast.year,mes,day) # load the sequence of dates corresponding to all the thursday of the year +n.leadtimes <- length(leadtime.week) +n.yrs.hind <- yr2.hind-yr1.hind+1 +#my.month.short <- substr(my.month,1,3) + +## extract geographic coordinates; do only ONCE for each preducton system and then save the lat/lon in a coordinates.RData and comment these rows to use function load() below: +#data.hindcast <- Load(var='sfcWind', exp = 'EnsEcmwfWeekHind', +# obs = NULL, sdates=paste0(yr1.hind:yr2.hind,substr(sdates.seq[startdate],5,6),substr(sdates.seq[startdate],7,8)), +# nleadtime = 1, leadtimemin = 1, leadtimemax = 1, output = 'lonlat', configfile = file_path, method='distance-weighted' ) + +#lats<-data.hindcast$lat +#lons<-data.hindcast$lon +#save(lats,lons,file=paste0(workdir,'/coordinates.RData')) +#rm(data.hindcast);gc() + +# format geographic coordinates to use with PlotEquiMap: +#load(paste0(workdir,'/coordinates.RData')) +#n.lon <- length(lons) +#n.lat <- length(lats) +#n.lonr <- n.lon - ceiling(length(lons[lons<180 & lons > 0])) +#la<-rev(lats) +#lo<-c(lons[c((n.lonr+1):n.lon)]-360,lons[1:n.lonr]) + +# load only 1 year of weekly var rean data from reanalysis to get lat and lon: +data.rean <- Load(var = var.name, exp = list(list(path=rean.dir)), + obs = NULL, sdates=paste0(yr1.hind,substr(sdates.seq[1],5,6),substr(sdates.seq[1],7,8)), + nleadtime = 1, leadtimemin = 1, leadtimemax = 1, output = 'lonlat', nprocs=1) + +lons <- data.rean$lon +lats <- data.rean$lat +n.lon <- length(lons) +n.lat <- length(lats) +my.grid<-paste0('r',n.lon,'x',n.lat) + +#file_path='/home/Earth/ngonzal2/R/ConfFiles/IC3.conf' # Load() file path +#file_path <- '/home/Earth/ncortesi/Downloads/RESILIENCE/IC3.conf' + +######################################################################################### +# Calculate and save up to all the chosen Skill Scores for a selected period # +######################################################################################### +# if we run the script from the terminal with an argument, it only computes the month we specify in the second argument and save the results for that month +if(chunk) veri.month <- chunk.month + +for(month in veri.month){ + #month=1 # for the debug + + # select the startdates (weeks) you want to compute: + my.startdates <- which(as.integer(substr(sdates.seq,5,6)) == month) #startdates.monthly[[month]] #c(1:5) + + startdate.name <- my.month[month] # name given to the period of the selected startdates # i.e:"January" + n.startdates <- length(my.startdates) # number of startdates in the selected month + n.yrs <- length(my.startdates)*n.yrs.hind # number of total years for the selected month + + print(paste0("Computing Skill Scores for ",startdate.name)) + + # Merge all selected startdates: + hind.dim <- c(n.members, n.yrs.hind*n.startdates, n.leadtimes, n.lat, n.lon) # dimensions of the hindcast array + + my.FairRpss <- my.FairCrpss <- my.EnsCorr <- array(NA,c(n.leadtimes, n.lat,n.lon)) + my.FairRpssBoot <- my.FairCrpssBoot <- array(NA, c(n.boot, n.leadtimes, n.lat, n.lon)) + #my.FairRpssConf <- my.FairCrpssConf <- array(NA, c(length(conf.level), n.leadtimes, n.lat, n.lon)) + my.FairRpss.pvalue <- my.FairCrpss.pvalue <- my.EnsCorr.pvalue <- array(NA, c(n.leadtimes, n.lat, n.lon)) + + anom.rean.chunk <- anom.hindcast.mean.chunk <- array(NA, c(n.startdates*n.yrs.hind, n.leadtimes, n.lat, n.lon)) + anom.rean.clim <- array(NA, c(n.startdates*n.yrs.hind, n.startdates*n.yrs.hind, n.leadtimes, n.lat, n.lon)) + anom.hindcast.chunk <- array(NA, c(n.members, n.startdates*n.yrs.hind, n.leadtimes, n.lat, n.lon)) + #my.interv<-(1 + sub.size*(s-1)):((sub.size*s) + add.last) # longitude interval where to load data + + for(startdate in my.startdates){ + pos.startdate <- which(startdate == my.startdates) # count of the number of stardates already loaded+1 + my.time.interv <- (1+(pos.startdate-1)*n.yrs.hind):(pos.startdate*n.yrs.hind) # time interval where to load data + + print(paste0('Computing reanalysis anomalies for startdate ', which(startdate==my.startdates),'/',length(my.startdates))) + + # load weekly var rean data in: /esnas/reconstructions/ecmwf/eraint/weekly_mean + data.rean <- Load(var = var.name, exp = list(list(path=rean.dir)), + obs = NULL, sdates=paste0(yr1.hind:yr2.hind,substr(sdates.seq[startdate],5,6),substr(sdates.seq[startdate],7,8)), + nleadtime = n.leadtimes, leadtimemin = 1, leadtimemax = n.leadtimes, output = 'lonlat', nprocs=1) + + # dim(data.rean$mod) # [1,1,20,4,320,640] + + # Mean of the 20 years (for each point and leadtime) + clim.rean <- apply(data.rean$mod,c(1,2,4,5,6),mean)[1,1,,,] # average of all years for each leadtime and pixel + clim.rean <- InsertDim(InsertDim(InsertDim(clim.rean,1,n.yrs.hind),1,1),1,1) # repeat the same values times to be able to calculate the anomalies #[1,1,20,4,320,640] + anom.rean <- data.rean$mod - clim.rean + #anom.rean<-InsertDim(anom.rean[1,1,,,,],1,1) # [1,20,4,320,640] + + rm(data.rean, clim.rean) + gc() + + #load(paste0(workdir,'/Data_',var.name,'/anom_rean_startdate',startdate,'.RData')) + #anom.rean <- drop(anom.rean) + anom.rean.chunk[my.time.interv,,,] <- anom.rean + + # Load hindcast data: + print(paste0('Computing forecast anomalies for startdate ', which(startdate==my.startdates),'/',length(my.startdates))) + + # Load weekly var subseasonal data in: /esnas/exp/ECMWF/monthly/ensforhc/weekly_mean and interpolate them to the same ERA-Interim resolution (512x256) + data.hindcast <- Load(var = var.name, exp = list(list(path=forecast.dir)), + obs = NULL, sdates = paste0(yr1.hind:yr2.hind, substr(sdates.seq[startdate],5,6), substr(sdates.seq[startdate],7,8)), + nleadtime = n.leadtimes, leadtimemin = 1, leadtimemax = n.leadtimes, output = 'lonlat', grid=my.grid, method='bilinear', nprocs=1) + + # dim(data.hindcast$mod) # [1,4,20,4,320,640] + + # Mean of the 4 members of the Hindcasts and of the 20 years (for each point and leadtime); + clim.hindcast <- apply(data.hindcast$mod,c(1,4,5,6),mean)[1,,,] # average of all years and members for each leadtime and pixel + clim.hindcast <- InsertDim(InsertDim(InsertDim(clim.hindcast,1,n.yrs.hind),1,n.members),1,1) # repeat the same values and times to calc. anomalies + anom.hindcast <- data.hindcast$mod - clim.hindcast + + rm(data.hindcast,clim.hindcast) + gc() + + # average the anomalies over the 4 hindcast members to compute the ensemble mean [1,20,4,320,640] + anom.hindcast.mean<-apply(anom.hindcast,c(1,3,4,5,6),mean) + + gc() + + #load(paste0(workdir,'/Data_',var.name,'/anom_hindcast_startdate',startdate,'.RData')) + anom.hindcast <- drop(anom.hindcast) + anom.hindcast.chunk[,my.time.interv,,,] <- anom.hindcast + + rm(anom.hindcast, anom.rean) + gc() + + anom.hindcast.mean <- drop(anom.hindcast.mean) + anom.hindcast.mean.chunk[my.time.interv,,,] <- anom.hindcast.mean + + rm(anom.hindcast.mean) + gc() + + } # end load data and conversion to anomalies + + + if(any(my.score.name=="FairRpss")){ + #my.FairRpss.sub <- veriApply("FairRpss", fcst=anom.hindcast.sub, obs=anom.rean.sub, prob=my.prob, tdim=2, ensdim=1, parallel=FALSE, ncpus=n.cpus) + my.FairRps.chunk <- veriApply("FairRps", fcst=anom.hindcast.chunk, obs=anom.rean.chunk, prob=my.prob, tdim=2, ensdim=1, parallel=FALSE, ncpus=1) + #my.FairRps.chunk.double <- veriApply("FairRps", fcst=anom.hindcast.chunk.double, obs=anom.rean.chunk.double, prob=my.prob, tdim=2, ensdim=1, parallel=FALSE, ncpus=n.cpus) + my.FairRps.chunk.sum <- apply(my.FairRps.chunk, c(2,3,4), sum) + + + # the prob.for the climatological ensemble can be set without using convert2prob and they are the same for all points and leadtimes (33% for each tercile): + ens.ref <- array(33,c(n.startdates*n.yrs.hind,3)) + + # the observed probabilities vary from point to point and for each leadtime: + obs <- apply(anom.rean.chunk, c(2,3,4), function(x){convert2prob(x, prob <- my.prob)}) + + # reshape obs to be able to apply EnsRps(ens, obs) below: + obs2 <- InsertDim(obs,1,3) + obs2[2,1:(n.startdates*n.yrs.hind),,,] <- obs2[1,(n.startdates*n.yrs.hind + 1:(n.startdates*n.yrs.hind)),,,] + obs2[3,1:(n.startdates*n.yrs.hind),,,] <- obs2[1,(2*n.startdates*n.yrs.hind + 1:(n.startdates*n.yrs.hind)),,,] + obs3 <- obs2[,1:(n.startdates*n.yrs.hind),,,,drop=F] # drop=F is used not to loose the dimension(s) with only 1 element + + my.Rps.clim.chunk <- apply(obs3,c(3,4,5), function(x){ EnsRps(ens.ref,t(x)) }) + my.Rps.clim.chunk.sum <- apply(my.Rps.clim.chunk, c(2,3,4), sum) + + my.FairRpss.chunk <- 1-(my.FairRps.chunk.sum / my.Rps.clim.chunk.sum) + + rm(ens.ref, obs, obs2, obs3, my.FairRps.chunk.sum, my.Rps.clim.chunk.sum) + gc() + + # bootstrapping: + for(b in 1:n.boot){ + cat(paste0("Bootstrapping FairRpss: resampling n. ",b,"/",n.boot,"\n")) + yrs.sampled <- sample(1:n.yrs, replace=TRUE) + + #for(y in 1:n.yrs) anom.rean.chunk.sampled[y,,,] <- anom.rean.chunk[yrs.sampled[y],,,] + #for(y in 1:n.yrs) anom.hindcast.chunk.sampled[,y,,,] <- anom.hindcast.chunk[,yrs.sampled[y],,,] + + my.FairRps.sampled <- my.FairRps.chunk + my.Rps.clim.sampled <- my.Rps.clim.chunk + + for(y in 1:n.yrs) my.FairRps.sampled[y,,,] <- my.FairRps.chunk[yrs.sampled[y],,,] + for(y in 1:n.yrs) my.Rps.clim.sampled[y,,,] <- my.Rps.clim.chunk[yrs.sampled[y],,,] + + #my.FairRps.chunk <- veriApply("FairRps", fcst=anom.hindcast.chunk.sampled, obs=anom.rean.chunk.sampled, prob=my.prob, tdim=2, ensdim=1, parallel=FALSE, ncpus=n.cpus) + #ens <- array(33,c(n.startdates*n.yrs.hind,3)) + #obs <- apply(anom.rean.chunk.sampled, c(2,3,4), function(x){convert2prob(x, prob <- my.prob)}) + #obs2 <- InsertDim(obs,1,3) + #obs2[2,1:(n.startdates*n.yrs.hind),,,] <- obs2[1,(n.startdates*n.yrs.hind + 1:(n.startdates*n.yrs.hind)),,,] + #obs2[3,1:(n.startdates*n.yrs.hind),,,] <- obs2[1,(2*n.startdates*n.yrs.hind + 1:(n.startdates*n.yrs.hind)),,,] + #obs3 <- obs2[,1:(n.startdates*n.yrs.hind),,,,drop=F] + #my.Rps.clim.chunk <- apply(obs3,c(3,4,5), function(x){ EnsRps(ens,t(x)) }) + + my.FairRps.sampled.sum <- apply(my.FairRps.sampled,c(2,3,4), sum) + my.Rps.clim.sampled.sum <- apply(my.Rps.clim.sampled,c(2,3,4), sum) + my.FairRpss.sampled <- 1-(my.FairRps.sampled.sum/my.Rps.clim.sampled.sum) + + my.FairRpssBoot[b,,,] <- my.FairRpss.sampled + + gc() + } + + rm(my.FairRps.sampled, my.Rps.clim.sampled, my.FairRps.sampled.sum, my.Rps.clim.sampled.sum, my.FairRpss.sampled, yrs.sampled) + gc() + + my.FairRpssBootMean <- apply(my.FairRpssBoot,c(2,3,4),mean,na.rm=T) + + my.test.value <- 0 # my.FairRpss.chunk # test if the bootstrapped mean value is significantly different from this value or not + + my.FairRpss.diff1 <- my.FairRpssBootMean + abs(my.test.value - my.FairRpssBootMean) + my.FairRpss.diff2 <- my.FairRpssBootMean - abs(my.test.value - my.FairRpssBootMean) + + for(i in 1:dim(my.FairRpssBoot)[2]){ + for(j in 1:dim(my.FairRpssBoot)[3]){ + for(k in 1:dim(my.FairRpssBoot)[4]){ + extr1 <- which(my.FairRpssBoot[,i,j,k] > my.FairRpss.diff1[i,j,k]) + extr2 <- which(my.FairRpssBoot[,i,j,k] < my.FairRpss.diff2[i,j,k]) + if(my.test.value != 0) my.FairRpss.pvalue[i,j,k] <- (length(extr1) + length(extr2)) / n.boot # two-tailed test + if(my.test.value == 0) my.FairRpss.pvalue[i,j,k] <- ifelse(my.FairRpssBootMean[i,j,k] > 0, length(extr2)/n.boot, length(extr1)/n.boot) + + } + } + } + + rm(my.FairRpssBootMean, my.FairRpss.diff1, my.FairRpss.diff2, extr1, extr2) + gc() + + #which(my.FairRpss.pvalue > 0.05) # points where the FairRPSS is significant (i.e: whose FairRPSS is not significantly different from the population mean) + + # calculate the percentiles of the skill score in case you want to compute the confidence interval of the 95%: + #my.FairRpssConf[,,,chunk$int[[c]]] <- apply(my.FairRpssBoot[,,,chunk$int[[c]]], c(2,3,4), function(x) quantile(x, probs=conf.level, type=8)) + + } # close if on FairRpss + + if(any(my.score.name=="FairCrpss")){ + + #my.FairCrpss.chunk <- veriApply("FairCrpss", fcst=anom.hindcast.chunk, obs=anom.rean.chunk, tdim=2, ensdim=1, parallel=FALSE, ncpus=1) + + #Numerador: + my.FairCrps.chunk <- veriApply("FairCrps", fcst=anom.hindcast.chunk, obs=anom.rean.chunk, tdim=2, ensdim=1, parallel=FALSE, ncpus=1) + my.FairCrps.chunk.sum <- apply(my.FairCrps.chunk,c(2,3,4), sum) + + # Denominador: + my.Crps.clim.chunk <- array(NA, c(n.startdates*n.yrs.hind, n.leadtimes, n.lat, n.lon)) + anom.hind.clim <- array(NA, c(n.members, n.startdates*n.yrs.hind, n.leadtimes, n.lat, n.lon)) + + for(i in 1:n.leadtimes){ + print(paste0("leadtime= ",i)) + for(j in 1:n.lat){ + for(k in 1:n.lon){ + # extract a random sample with no replacement of the hindcast: + #anom.hind.clim[,,i,j,k] <- array(sample(anom.hindcast.chunk[,,i,j,k],n.startdates*n.yrs.hind*n.members),c(n.startdates*n.yrs.hind,n.members)) + anom.rean.clim[,,i,j,k] <- t(array(anom.rean.chunk[,i,j,k],c(n.startdates*n.yrs.hind, n.startdates*n.yrs.hind))) + my.Crps.clim.chunk[,i,j,k] <- EnsCrps((anom.rean.clim[,,i,j,k]), anom.rean.chunk[,i,j,k]) + } + } + } + + ### anom.hind.clim <- apply(anom.hindcast.chunk,c(3,4,5), sample) + ### anom.all.chunk <- abind(anom.hind.clim,anom.rean.chunk, along=1) # merge exp and obs together to use apply: + ### my.Crps.clim.chunk <- apply(anom.all.chunk, c(3,4,5), function(x){ EnsCrps(t(x[1:n.members,]), x[n.members+1,])}) + + my.Crps.clim.chunk.sum <- apply(my.Crps.clim.chunk,c(2,3,4), sum) + + ## Skill Score: + my.FairCrpss.chunk <- 1 - (my.FairCrps.chunk.sum / my.Crps.clim.chunk.sum) + + #rm(anom.all.chunk, my.FairCrps.chunk.sum, my.Crps.clim.chunk.sum) + + rm(my.FairCrps.chunk.sum, my.Crps.clim.chunk.sum) + gc() + + ### bootstrapping: + for(b in 1:n.boot){ + cat(paste0("Bootstrapping FairCrpss: resampling n. ",b,"/",n.boot,"\n")) + + yrs.sampled <- sample(1:n.yrs, replace=TRUE) + + my.FairCrps.sampled <- my.FairCrps.chunk + my.Crps.clim.sampled <- my.Crps.clim.chunk + + for(y in 1:n.yrs) my.FairCrps.sampled[y,,,] <- my.FairCrps.chunk[yrs.sampled[y],,,] + for(y in 1:n.yrs) my.Crps.clim.sampled[y,,,] <- my.Crps.clim.chunk[yrs.sampled[y],,,] + + my.FairCrps.sampled.sum <- apply(my.FairCrps.sampled,c(2,3,4), sum) + my.Crps.clim.sampled.sum <- apply(my.Crps.clim.sampled,c(2,3,4), sum) + + my.FairCrpss.sampled <- 1-(my.FairCrps.sampled.sum / my.Crps.clim.sampled.sum) + + my.FairCrpssBoot[b,,,] <- my.FairCrpss.sampled + + gc() + } + + rm(my.FairCrps.sampled, my.Crps.clim.sampled, my.FairCrpss.sampled , yrs.sampled) + gc() + + my.FairCrpssBootMean <- apply(my.FairCrpssBoot,c(2,3,4),mean,na.rm=T) + + my.test.value <- 0 # my.FairCrpss.chunk # test if the bootstrapped mean value is significantly different from this value or not + + my.FairCrpss.diff1 <- my.FairCrpssBootMean + abs(my.test.value - my.FairCrpssBootMean) + my.FairCrpss.diff2 <- my.FairCrpssBootMean - abs(my.test.value - my.FairCrpssBootMean) + + for(i in 1:dim(my.FairCrpssBoot)[2]){ + for(j in 1:dim(my.FairCrpssBoot)[3]){ + for(k in 1:dim(my.FairCrpssBoot)[4]){ + extr1 <- which(my.FairCrpssBoot[,i,j,k] > my.FairCrpss.diff1[i,j,k]) + extr2 <- which(my.FairCrpssBoot[,i,j,k] < my.FairCrpss.diff2[i,j,k]) + if(my.test.value != 0) my.FairCrpss.pvalue[i,j,k] <- (length(extr1) + length(extr2)) / n.boot + if(my.test.value == 0) my.FairCrpss.pvalue[i,j,k] <- ifelse(my.FairCrpssBootMean[i,j,k] > 0, length(extr2)/n.boot, length(extr1)/n.boot) + + } + } + } + + rm(my.FairCrpssBootMean, my.FairCrpss.diff1, my.FairCrpss.diff2, extr1, extr2) + ###which(my.FairCrpss.pvalue > 0.05) # points where thw FairCRPSS is significant (i.e: whose FairRPSS not significantly different from the population mean) + + # calculate the percentiles of the skill score if you want to measure hte confidence interval: + #my.FairCrpssConf[,,,chunk$int[[c]]] <- apply(my.FairCrpssBoot[,,,chunk$int[[c]]], c(2,3,4), function(x) quantile(x, probs=conf.level, type=8)) + + } # close if on FairCrpss + + if(any(my.score.name=="EnsCorr")){ + # ensemble mean correlation for the selected startdates (first dim, 'week') and all leadtimes (second dim): + my.EnsCorr.chunk <- my.PValue.chunk <- array(NA, c(n.leadtimes, n.lat, n.lon)) + + for(i in 1:n.leadtimes){ + for(j in 1:n.lat){ + for(k in 1:n.lon){ + my.EnsCorr.chunk[i,j,k] <- cor(anom.hindcast.mean.chunk[,i,j,k],anom.rean.chunk[,i,j,k], use="complete.obs") + # option 'na.method' is chosen from the following list: c("all.obs", "complete.obs", "pairwise.complete.obs", "everything", "na.or.complete") + + my.EnsCorr.pvalue[i,j,k] <- cor.test(anom.hindcast.mean.chunk[,i,j,k],anom.rean.chunk[,i,j,k], use="complete.obs")$p.value + + } + } + } + + #my.EnsCorr[,,chunk$int[[c]]] <- my.EnsCorr.chunk + #my.PValue[,,chunk$int[[c]]] <- my.PValue.chunk + + #rm(anom.hindcast.chunk,anom.hindcast.mean.chunk,my.EnsCorr.chunk,my.PValue.chunk) + } + + #rm(anom.rean.chunk) + gc() + #mem() + + # the Reliability diagram cannot be computed splitting the data in chunk, so it is computed only if the script is run from the R interface or with no arguments: + + if(any(my.score.name=="RelDiagr")){ # in this case the computation cannot be split in groups of grid points, but can be split for leadtime instead: + my.RelDiagr<-list() + + for(lead in 1:n.leadtimes){ + cat(paste0('leadtime n.: ',lead,'/',n.leadtimes,'\n')) + + anom.hindcast.lead <- anom.hindcast.chunk[,,lead,,] # extract only the lead time we are interested in + anom.hindcast.chunk.perm <- aperm(anom.hindcast.lead,c(2,1,3,4)) # swap the first two dimensions to put the years as first dimension (needed for convert2prob) + gc() + + cat('Computing the Reliability Diagram. Please wait... \n') + ens.chunk<-apply(anom.hindcast.chunk.perm, c(3,4), convert2prob, prob = my.prob) # its dimens. are: [n.startdates*n.yrs.hind*n.forecast.categories, n.lat, n.lon] + anom.rean.lead <- anom.rean.chunk[,lead,,] + + cat('Computing the Reliability Diagram. Please wait...... \n') + + obs.chunk<-apply(anom.rean.lead,c(2,3), convert2prob, prob = my.prob) # the first n.members*n.yrs.hind elements belong to the first tercile, and so on. + ens.chunk.prob<-apply(ens.chunk, c(2,3), function(x) count2prob(matrix(x, n.startdates*n.yrs.hind, n.categ), type=4)) # type=4 is the only method with sum(prob)=1 !! + cat('Computing the Reliability Diagram. Please wait......... \n') + + obs.chunk.prob<-apply(obs.chunk, c(2,3), function(x) count2prob(matrix(x, n.startdates*n.yrs.hind, n.categ), type=4)) + + if(is.null(int.lat)) int.lat <- 1:n.lat # if there is no region selected, select all the world + if(is.null(int.lon)) int.lon <- 1:n.lon + + cat('Computing the Reliability Diagram. Please wait............ \n') + + int1 <- 1:(n.startdates*n.yrs.hind) # time interval of the data of the first tercile + my.RelDiagr[[lead]] <- ReliabilityDiagram(ens.chunk.prob[int1,int.lat,int.lon], obs.chunk.prob[int1,int.lat,int.lon], bins=5, nboot=0, plot=FALSE, plot.refin=F) #tercile 1 + + int2 <- (1+(n.startdates*n.yrs.hind)):(2*n.startdates*n.yrs.hind) # time interval of the data of the second tercile + my.RelDiagr[[n.leadtimes+lead]] <- ReliabilityDiagram(ens.chunk.prob[int2,int.lat,int.lon], obs.chunk.prob[int2,int.lat,int.lon], bins=5, nboot=0, plot=FALSE, plot.refin=F) + + int3 <- (1+(2*n.startdates*n.yrs.hind)):(3*n.startdates*n.yrs.hind) # time interval of the data of the third tercile + my.RelDiagr[[2*n.leadtimes+lead]] <- ReliabilityDiagram(ens.chunk.prob[int3,int.lat,int.lon], obs.chunk.prob[int3,int.lat,int.lon], bins=5, nboot=0, plot=FALSE, plot.refin=F) + + #print(my.RelDiagr) # for debugging + rm(ens.chunk, obs.chunk, ens.chunk.prob, obs.chunk.prob, int1, int2, int3) + gc() + + } # next lead + + } # close if on RelDiagr + + print("Saving results...") + + # save the results for the chunk: + save(my.FairRpss.chunk, my.FairRpss.pvalue, my.FairCrpss.chunk, my.FairCrpss.pvalue, my.EnsCorr.chunk, my.EnsCorr.pvalue, my.RelDiagr, my.FairRpssBoot, my.FairCrpssBoot, work.dir, rean.dir, rean.name, forecast.year, forecast.dir, cfs.name, var.name, var.name.map, mes, day, yr1.hind, yr2.hind, leadtime.week, n.members, my.prob, int.lat, int.lon, n.boot, n.categ, sdates.seq, n.leadtimes, n.yrs.hind, lons, lats, n.lon, n.lat, my.grid, veri.month, my.startdates, startdate.name, n.startdates, n.yrs, file=paste0(work.dir,'/',var.name,'_',my.month[month],'.RData')) + + #if(any(my.score.name=="FairRpss")) save(my.FairRpss.chunk, file=paste0(work.dir,'/',var.name,'/FairRpss_',startdate.name,'.RData')) + + print("Saving results...done") + +} # next m (month) + + +print("Finished!") diff --git a/old/SkillScores_v10.R~ b/old/SkillScores_v10.R~ new file mode 100644 index 0000000000000000000000000000000000000000..ec99b074596a29411d7b9e4f133e9d223ef0d1bb --- /dev/null +++ b/old/SkillScores_v10.R~ @@ -0,0 +1,495 @@ +######################################################################################### +# Sub-seasonal Skill Scores # +######################################################################################### +# you can run it from the terminal with the syntax: +# +# Rscript SkillScores_v7.R +# +# i.e: to run only the chunk number 3, write: +# +# Rscript SkillScores_v4.R 3 +# +# if you don't specify any argument, or you run it from the R interface, it runs all the chunks in a sequential way. +# Use this last approach only if the computational speed is not a problem. + +#rm(list=ls()) + +########################################################################################## +# Load functions and libraries # +########################################################################################## + +#load libraries and functions: +library(SpecsVerification) # for skill scores +library(s2dverification) # for Load() function +library(easyVerification) # for veriApply() function +library(abind) # for merging arrays +source('/home/Earth/ncortesi/scripts/Rfunctions.R') + +########################################################################################## +# MareNostrum settings # +########################################################################################## + +args <- commandArgs(TRUE) # put into variable args the list with all the optional arguments with whom the script was run from the terminal + +if(length(args) == 0) print("Running the script in a sequential way") +if(length(args) > 1) stop("Only one argument is required") + +chunk <- ifelse(length(args) == 1 ,TRUE, FALSE) # set variable 'chunk' to TRUE if we are running the script from the command line with 1 argument, FALSE otherwise + +if(chunk) chunk.month <- as.integer(args[1]) # number of the month to run in this script (if chunk == TRUE) + +########################################################################################## +# User's settings # +########################################################################################## + +# working dir where to put the output maps and files in the /Data and /Maps subdirs: +work.dir <- "/scratch/Earth/ncortesi/RESILIENCE/Subestacional" + +# path of the weekly var rean data: +#rean.dir <- '/esarchive/old-files/recon_ecmwf_erainterim/weekly_mean/$VAR_NAME$_f6h/$VAR_NAME$_$START_DATE$.nc' + +rean.dir <- '/esnas/recon/ecmwf/erainterim/weekly_mean/$VAR_NAME$-ecmwf_f6h/$VAR_NAME$_$START_DATE$.nc' +rean.name <- "ERA-Interim" + +# path of the monthly forecast system files: +forecast.year <- 2014 # starting year of the weekly sequence of the forecasts +forecast.dir <- paste0('/esnas/exp/ECMWF/monthly/ensforhc/weekly_mean/$VAR_NAME$_f6h/',forecast.year,'$MONTH$$DAY$00/$VAR_NAME$_$START_DATE$00.nc') + +cfs.name <- 'ECMWF' # name of the climate forecast system (CFS) used for monthly predictions (to be used in map titles) +var.name <- 'sfcWind' # forecast variable. It is the name used inside the netCDF files and as suffix in map filenames, i.e: 'sfcWind' or 'psl' +var.name.map <- '10m Wind Speed' # forecast variable to verify (name used in the map titles), i.e: '10m Wind Speed' or 'SLP' + +mes <- 1 # starting forecast month (1: january, 2: february, etc.) +day <- 2 # starting forecast day + +yr1.hind <- 1994 #1994 # first hindcast year +yr2.hind <- 2013 #2013 # last hindcast year (usually the forecast year -1) + +leadtime.week <- c('5-11','12-18','19-25','26-32') # which leadtimes are available in the weekly dataset +n.members <- 4 # number of hindcast members + +my.score.name <- c('FairRpss','FairCrpss','EnsCorr','RelDiagr') # choose one or more skills scores to compute between EnsCorr, FairRPSS, FairCRPSS and/or RelDiagr + +veri.month <- 1:12 # select the month(s) you want to compute the chosen skill scores + +my.prob <- c(1/3,2/3) # quantiles used for FairRPSS and the RelDiagr (usually they are the terciles) +#conf.level <- c(0.025, 0.975) # percentiles employed for the calculation of the confidence level for the Rpss and Crpss (can be more than 2 if needed) + +int.lat <- NULL #1:160 # latitude position of grid points selected for the Reliability Diagram (if you want to subset a region; if not, put NULL) +int.lon <- NULL #1:320 # longitude position of grid points selected for the Reliability Diagram (if you want to subset a region; if not, put NULL) + +n.boot <- 1000 # number of resamples considered in the bootstrapping of the FairRPSS and the FairCRPSS + +# coordinates of a chosen point in the world to plot the time series over the 52 maps (they must be the same values in objects la y lo) +#my.lat <- 55.3197120 +#my.lon <- 0.5625 + +###################################### Derived variables ############################################################################# + +#source(paste0(workdir,'/ColorBarV.R')) # Color scale used for maps +n.categ <- 1+length(my.prob) # number of forecasting categories (usually 3 if terciles are used) + +# blue-yellow-red colors: +#col <- ifelse(!mare, as.character(read.csv("/scratch/Earth/ncortesi/RESILIENCE/rgbhex.csv",header=F)[,1]), as.character(read.csv("/gpfs/projects/bsc32//bsc32842/scripts/rgbhex.csv",header=F)[,1])) +#col<-as.character(read.csv("/home/Earth/vtorralb/rgbhex.csv",header=F)[,1]) # blue-yellow-red colors +col <- c("#0C046E", "#1914BE", "#2341F7", "#2F55FB", "#3E64FF", "#528CFF", "#64AAFF", "#82C8FF", "#A0DCFF", "#B4F0FF", "#FFFBAF", "#FFDD9A", "#FFBF87", "#FFA173", "#FF7055", "#FE6346", "#F7403B", "#E92D36", "#C80F1E", "#A50519") + +sdates.seq <- weekly.seq(forecast.year,mes,day) # load the sequence of dates corresponding to all the thursday of the year +n.leadtimes <- length(leadtime.week) +n.yrs.hind <- yr2.hind-yr1.hind+1 +#my.month.short <- substr(my.month,1,3) + +## extract geographic coordinates; do only ONCE for each preducton system and then save the lat/lon in a coordinates.RData and comment these rows to use function load() below: +#data.hindcast <- Load(var='sfcWind', exp = 'EnsEcmwfWeekHind', +# obs = NULL, sdates=paste0(yr1.hind:yr2.hind,substr(sdates.seq[startdate],5,6),substr(sdates.seq[startdate],7,8)), +# nleadtime = 1, leadtimemin = 1, leadtimemax = 1, output = 'lonlat', configfile = file_path, method='distance-weighted' ) + +#lats<-data.hindcast$lat +#lons<-data.hindcast$lon +#save(lats,lons,file=paste0(workdir,'/coordinates.RData')) +#rm(data.hindcast);gc() + +# format geographic coordinates to use with PlotEquiMap: +#load(paste0(workdir,'/coordinates.RData')) +#n.lon <- length(lons) +#n.lat <- length(lats) +#n.lonr <- n.lon - ceiling(length(lons[lons<180 & lons > 0])) +#la<-rev(lats) +#lo<-c(lons[c((n.lonr+1):n.lon)]-360,lons[1:n.lonr]) + +# load only 1 year of weekly var rean data from reanalysis to get lat and lon: +data.rean <- Load(var = var.name, exp = list(list(path=rean.dir)), + obs = NULL, sdates=paste0(yr1.hind,substr(sdates.seq[1],5,6),substr(sdates.seq[1],7,8)), + nleadtime = 1, leadtimemin = 1, leadtimemax = 1, output = 'lonlat', nprocs=1) + +lons <- data.rean$lon +lats <- data.rean$lat +n.lon <- length(lons) +n.lat <- length(lats) +my.grid<-paste0('r',n.lon,'x',n.lat) + +#file_path='/home/Earth/ngonzal2/R/ConfFiles/IC3.conf' # Load() file path +#file_path <- '/home/Earth/ncortesi/Downloads/RESILIENCE/IC3.conf' + +######################################################################################### +# Calculate and save up to all the chosen Skill Scores for a selected period # +######################################################################################### +# if we run the script from the terminal with an argument, it only computes the month we specify in the second argument and save the results for that month +if(chunk) veri.month <- chunk.month + +for(month in veri.month){ + #month=1 # for the debug + + # select the startdates (weeks) you want to compute: + my.startdates <- which(as.integer(substr(sdates.seq,5,6)) == month) #startdates.monthly[[month]] #c(1:5) + + startdate.name <- my.month[month] # name given to the period of the selected startdates # i.e:"January" + n.startdates <- length(my.startdates) # number of startdates in the selected month + n.yrs <- length(my.startdates)*n.yrs.hind # number of total years for the selected month + + print(paste0("Computing Skill Scores for ",startdate.name)) + + # Merge all selected startdates: + hind.dim <- c(n.members, n.yrs.hind*n.startdates, n.leadtimes, n.lat, n.lon) # dimensions of the hindcast array + + my.FairRpss <- my.FairCrpss <- my.EnsCorr <- array(NA,c(n.leadtimes, n.lat,n.lon)) + my.FairRpssBoot <- my.FairCrpssBoot <- array(NA, c(n.boot, n.leadtimes, n.lat, n.lon)) + #my.FairRpssConf <- my.FairCrpssConf <- array(NA, c(length(conf.level), n.leadtimes, n.lat, n.lon)) + my.FairRpss.pvalue <- my.FairCrpss.pvalue <- my.EnsCorr.pvalue <- array(NA, c(n.leadtimes, n.lat, n.lon)) + + anom.rean.chunk <- anom.hindcast.mean.chunk <- array(NA, c(n.startdates*n.yrs.hind, n.leadtimes, n.lat, n.lon)) + anom.rean.clim <- array(NA, c(n.startdates*n.yrs.hind, n.startdates*n.yrs.hind, n.leadtimes, n.lat, n.lon)) + anom.hindcast.chunk <- array(NA, c(n.members, n.startdates*n.yrs.hind, n.leadtimes, n.lat, n.lon)) + #my.interv<-(1 + sub.size*(s-1)):((sub.size*s) + add.last) # longitude interval where to load data + + for(startdate in my.startdates){ + pos.startdate <- which(startdate == my.startdates) # count of the number of stardates already loaded+1 + my.time.interv <- (1+(pos.startdate-1)*n.yrs.hind):(pos.startdate*n.yrs.hind) # time interval where to load data + + print(paste0('Computing reanalysis anomalies for startdate ', which(startdate==my.startdates),'/',length(my.startdates))) + + # load weekly var rean data in: /esnas/reconstructions/ecmwf/eraint/weekly_mean + data.rean <- Load(var = var.name, exp = list(list(path=rean.dir)), + obs = NULL, sdates=paste0(yr1.hind:yr2.hind,substr(sdates.seq[startdate],5,6),substr(sdates.seq[startdate],7,8)), + nleadtime = n.leadtimes, leadtimemin = 1, leadtimemax = n.leadtimes, output = 'lonlat', nprocs=1) + + # dim(data.rean$mod) # [1,1,20,4,320,640] + + # Mean of the 20 years (for each point and leadtime) + clim.rean <- apply(data.rean$mod,c(1,2,4,5,6),mean)[1,1,,,] # average of all years for each leadtime and pixel + clim.rean <- InsertDim(InsertDim(InsertDim(clim.rean,1,n.yrs.hind),1,1),1,1) # repeat the same values times to be able to calculate the anomalies #[1,1,20,4,320,640] + anom.rean <- data.rean$mod - clim.rean + #anom.rean<-InsertDim(anom.rean[1,1,,,,],1,1) # [1,20,4,320,640] + + rm(data.rean, clim.rean) + gc() + + #load(paste0(workdir,'/Data_',var.name,'/anom_rean_startdate',startdate,'.RData')) + #anom.rean <- drop(anom.rean) + anom.rean.chunk[my.time.interv,,,] <- anom.rean + + # Load hindcast data: + print(paste0('Computing forecast anomalies for startdate ', which(startdate==my.startdates),'/',length(my.startdates))) + + # Load weekly var subseasonal data in: /esnas/exp/ECMWF/monthly/ensforhc/weekly_mean and interpolate them to the same ERA-Interim resolution (512x256) + data.hindcast <- Load(var = var.name, exp = list(list(path=forecast.dir)), + obs = NULL, sdates = paste0(yr1.hind:yr2.hind, substr(sdates.seq[startdate],5,6), substr(sdates.seq[startdate],7,8)), + nleadtime = n.leadtimes, leadtimemin = 1, leadtimemax = n.leadtimes, output = 'lonlat', grid=my.grid, method='bilinear', nprocs=1) + + # dim(data.hindcast$mod) # [1,4,20,4,320,640] + + # Mean of the 4 members of the Hindcasts and of the 20 years (for each point and leadtime); + clim.hindcast <- apply(data.hindcast$mod,c(1,4,5,6),mean)[1,,,] # average of all years and members for each leadtime and pixel + clim.hindcast <- InsertDim(InsertDim(InsertDim(clim.hindcast,1,n.yrs.hind),1,n.members),1,1) # repeat the same values and times to calc. anomalies + anom.hindcast <- data.hindcast$mod - clim.hindcast + + rm(data.hindcast,clim.hindcast) + gc() + + # average the anomalies over the 4 hindcast members to compute the ensemble mean [1,20,4,320,640] + anom.hindcast.mean<-apply(anom.hindcast,c(1,3,4,5,6),mean) + + gc() + + #load(paste0(workdir,'/Data_',var.name,'/anom_hindcast_startdate',startdate,'.RData')) + anom.hindcast <- drop(anom.hindcast) + anom.hindcast.chunk[,my.time.interv,,,] <- anom.hindcast + + rm(anom.hindcast, anom.rean) + gc() + + anom.hindcast.mean <- drop(anom.hindcast.mean) + anom.hindcast.mean.chunk[my.time.interv,,,] <- anom.hindcast.mean + + rm(anom.hindcast.mean) + gc() + + } # end load data and conversion to anomalies + + + if(any(my.score.name=="FairRpss")){ + #my.FairRpss.sub <- veriApply("FairRpss", fcst=anom.hindcast.sub, obs=anom.rean.sub, prob=my.prob, tdim=2, ensdim=1, parallel=FALSE, ncpus=n.cpus) + my.FairRps.chunk <- veriApply("FairRps", fcst=anom.hindcast.chunk, obs=anom.rean.chunk, prob=my.prob, tdim=2, ensdim=1, parallel=FALSE, ncpus=1) + #my.FairRps.chunk.double <- veriApply("FairRps", fcst=anom.hindcast.chunk.double, obs=anom.rean.chunk.double, prob=my.prob, tdim=2, ensdim=1, parallel=FALSE, ncpus=n.cpus) + my.FairRps.chunk.sum <- apply(my.FairRps.chunk, c(2,3,4), sum) + + + # the prob.for the climatological ensemble can be set without using convert2prob and they are the same for all points and leadtimes (33% for each tercile): + ens.ref <- array(33,c(n.startdates*n.yrs.hind,3)) + + # the observed probabilities vary from point to point and for each leadtime: + obs <- apply(anom.rean.chunk, c(2,3,4), function(x){convert2prob(x, prob <- my.prob)}) + + # reshape obs to be able to apply EnsRps(ens, obs) below: + obs2 <- InsertDim(obs,1,3) + obs2[2,1:(n.startdates*n.yrs.hind),,,] <- obs2[1,(n.startdates*n.yrs.hind + 1:(n.startdates*n.yrs.hind)),,,] + obs2[3,1:(n.startdates*n.yrs.hind),,,] <- obs2[1,(2*n.startdates*n.yrs.hind + 1:(n.startdates*n.yrs.hind)),,,] + obs3 <- obs2[,1:(n.startdates*n.yrs.hind),,,,drop=F] # drop=F is used not to loose the dimension(s) with only 1 element + + my.Rps.clim.chunk <- apply(obs3,c(3,4,5), function(x){ EnsRps(ens.ref,t(x)) }) + my.Rps.clim.chunk.sum <- apply(my.Rps.clim.chunk, c(2,3,4), sum) + + my.FairRpss.chunk <- 1-(my.FairRps.chunk.sum / my.Rps.clim.chunk.sum) + + rm(ens.ref, obs, obs2, obs3, my.FairRps.chunk.sum, my.Rps.clim.chunk.sum) + gc() + + # bootstrapping: + for(b in 1:n.boot){ + cat(paste0("Bootstrapping FairRpss: resampling n. ",b,"/",n.boot,"\n")) + yrs.sampled <- sample(1:n.yrs, replace=TRUE) + + #for(y in 1:n.yrs) anom.rean.chunk.sampled[y,,,] <- anom.rean.chunk[yrs.sampled[y],,,] + #for(y in 1:n.yrs) anom.hindcast.chunk.sampled[,y,,,] <- anom.hindcast.chunk[,yrs.sampled[y],,,] + + my.FairRps.sampled <- my.FairRps.chunk + my.Rps.clim.sampled <- my.Rps.clim.chunk + + for(y in 1:n.yrs) my.FairRps.sampled[y,,,] <- my.FairRps.chunk[yrs.sampled[y],,,] + for(y in 1:n.yrs) my.Rps.clim.sampled[y,,,] <- my.Rps.clim.chunk[yrs.sampled[y],,,] + + #my.FairRps.chunk <- veriApply("FairRps", fcst=anom.hindcast.chunk.sampled, obs=anom.rean.chunk.sampled, prob=my.prob, tdim=2, ensdim=1, parallel=FALSE, ncpus=n.cpus) + #ens <- array(33,c(n.startdates*n.yrs.hind,3)) + #obs <- apply(anom.rean.chunk.sampled, c(2,3,4), function(x){convert2prob(x, prob <- my.prob)}) + #obs2 <- InsertDim(obs,1,3) + #obs2[2,1:(n.startdates*n.yrs.hind),,,] <- obs2[1,(n.startdates*n.yrs.hind + 1:(n.startdates*n.yrs.hind)),,,] + #obs2[3,1:(n.startdates*n.yrs.hind),,,] <- obs2[1,(2*n.startdates*n.yrs.hind + 1:(n.startdates*n.yrs.hind)),,,] + #obs3 <- obs2[,1:(n.startdates*n.yrs.hind),,,,drop=F] + #my.Rps.clim.chunk <- apply(obs3,c(3,4,5), function(x){ EnsRps(ens,t(x)) }) + + my.FairRps.sampled.sum <- apply(my.FairRps.sampled,c(2,3,4), sum) + my.Rps.clim.sampled.sum <- apply(my.Rps.clim.sampled,c(2,3,4), sum) + my.FairRpss.sampled <- 1-(my.FairRps.sampled.sum/my.Rps.clim.sampled.sum) + + my.FairRpssBoot[b,,,] <- my.FairRpss.sampled + + gc() + } + + rm(my.FairRps.sampled, my.Rps.clim.sampled, my.FairRps.sampled.sum, my.Rps.clim.sampled.sum, my.FairRpss.sampled, yrs.sampled) + gc() + + my.FairRpssBootMean <- apply(my.FairRpssBoot,c(2,3,4),mean,na.rm=T) + + my.test.value <- 0 # my.FairRpss.chunk # test if the bootstrapped mean value is significantly different from this value or not + + my.FairRpss.diff1 <- my.FairRpssBootMean + abs(my.test.value - my.FairRpssBootMean) + my.FairRpss.diff2 <- my.FairRpssBootMean - abs(my.test.value - my.FairRpssBootMean) + + for(i in 1:dim(my.FairRpssBoot)[2]){ + for(j in 1:dim(my.FairRpssBoot)[3]){ + for(k in 1:dim(my.FairRpssBoot)[4]){ + extr1 <- which(my.FairRpssBoot[,i,j,k] > my.FairRpss.diff1[i,j,k]) + extr2 <- which(my.FairRpssBoot[,i,j,k] < my.FairRpss.diff2[i,j,k]) + if(my.test.value != 0) my.FairRpss.pvalue[i,j,k] <- (length(extr1) + length(extr2)) / n.boot # two-tailed test + if(my.test.value == 0) my.FairRpss.pvalue[i,j,k] <- ifelse(my.FairRpssBootMean[i,j,k] > 0, length(extr2)/n.boot, length(extr1)/n.boot) + + } + } + } + + rm(my.FairRpssBootMean, my.FairRpss.diff1, my.FairRpss.diff2, extr1, extr2) + gc() + + #which(my.FairRpss.pvalue > 0.05) # points where the FairRPSS is significant (i.e: whose FairRPSS is not significantly different from the population mean) + + # calculate the percentiles of the skill score in case you want to compute the confidence interval of the 95%: + #my.FairRpssConf[,,,chunk$int[[c]]] <- apply(my.FairRpssBoot[,,,chunk$int[[c]]], c(2,3,4), function(x) quantile(x, probs=conf.level, type=8)) + + } # close if on FairRpss + + if(any(my.score.name=="FairCrpss")){ + + #my.FairCrpss.chunk <- veriApply("FairCrpss", fcst=anom.hindcast.chunk, obs=anom.rean.chunk, tdim=2, ensdim=1, parallel=FALSE, ncpus=1) + + #Numerador: + my.FairCrps.chunk <- veriApply("FairCrps", fcst=anom.hindcast.chunk, obs=anom.rean.chunk, tdim=2, ensdim=1, parallel=FALSE, ncpus=1) + my.FairCrps.chunk.sum <- apply(my.FairCrps.chunk,c(2,3,4), sum) + + # Denominador: + my.Crps.clim.chunk <- array(NA, c(n.startdates*n.yrs.hind, n.leadtimes, n.lat, n.lon)) + anom.hind.clim <- array(NA, c(n.members, n.startdates*n.yrs.hind, n.leadtimes, n.lat, n.lon)) + + for(i in 1:n.leadtimes){ + print(paste0("leadtime= ",i)) + for(j in 1:n.lat){ + for(k in 1:n.lon){ + # extract a random sample with no replacement of the hindcast: + #anom.hind.clim[,,i,j,k] <- array(sample(anom.hindcast.chunk[,,i,j,k],n.startdates*n.yrs.hind*n.members),c(n.startdates*n.yrs.hind,n.members)) + anom.rean.clim[,,i,j,k] <- t(array(anom.rean.chunk[,i,j,k],c(n.startdates*n.yrs.hind, n.startdates*n.yrs.hind))) + my.Crps.clim.chunk[,i,j,k] <- EnsCrps((anom.rean.clim[,,i,j,k]), anom.rean.chunk[,i,j,k]) + } + } + } + + ### anom.hind.clim <- apply(anom.hindcast.chunk,c(3,4,5), sample) + ### anom.all.chunk <- abind(anom.hind.clim,anom.rean.chunk, along=1) # merge exp and obs together to use apply: + ### my.Crps.clim.chunk <- apply(anom.all.chunk, c(3,4,5), function(x){ EnsCrps(t(x[1:n.members,]), x[n.members+1,])}) + + my.Crps.clim.chunk.sum <- apply(my.Crps.clim.chunk,c(2,3,4), sum) + + ## Skill Score: + my.FairCrpss.chunk <- 1 - (my.FairCrps.chunk.sum / my.Crps.clim.chunk.sum) + + #rm(anom.all.chunk, my.FairCrps.chunk.sum, my.Crps.clim.chunk.sum) + + rm(my.FairCrps.chunk.sum, my.Crps.clim.chunk.sum) + gc() + + ### bootstrapping: + for(b in 1:n.boot){ + cat(paste0("Bootstrapping FairCrpss: resampling n. ",b,"/",n.boot,"\n")) + + yrs.sampled <- sample(1:n.yrs, replace=TRUE) + + my.FairCrps.sampled <- my.FairCrps.chunk + my.Crps.clim.sampled <- my.Crps.clim.chunk + + for(y in 1:n.yrs) my.FairCrps.sampled[y,,,] <- my.FairCrps.chunk[yrs.sampled[y],,,] + for(y in 1:n.yrs) my.Crps.clim.sampled[y,,,] <- my.Crps.clim.chunk[yrs.sampled[y],,,] + + my.FairCrps.sampled.sum <- apply(my.FairCrps.sampled,c(2,3,4), sum) + my.Crps.clim.sampled.sum <- apply(my.Crps.clim.sampled,c(2,3,4), sum) + + my.FairCrpss.sampled <- 1-(my.FairCrps.sampled.sum / my.Crps.clim.sampled.sum) + + my.FairCrpssBoot[b,,,] <- my.FairCrpss.sampled + + gc() + } + + rm(my.FairCrps.sampled, my.Crps.clim.sampled, my.FairCrpss.sampled , yrs.sampled) + gc() + + my.FairCrpssBootMean <- apply(my.FairCrpssBoot,c(2,3,4),mean,na.rm=T) + + my.test.value <- 0 # my.FairCrpss.chunk # test if the bootstrapped mean value is significantly different from this value or not + + my.FairCrpss.diff1 <- my.FairCrpssBootMean + abs(my.test.value - my.FairCrpssBootMean) + my.FairCrpss.diff2 <- my.FairCrpssBootMean - abs(my.test.value - my.FairCrpssBootMean) + + for(i in 1:dim(my.FairCrpssBoot)[2]){ + for(j in 1:dim(my.FairCrpssBoot)[3]){ + for(k in 1:dim(my.FairCrpssBoot)[4]){ + extr1 <- which(my.FairCrpssBoot[,i,j,k] > my.FairCrpss.diff1[i,j,k]) + extr2 <- which(my.FairCrpssBoot[,i,j,k] < my.FairCrpss.diff2[i,j,k]) + if(my.test.value != 0) my.FairCrpss.pvalue[i,j,k] <- (length(extr1) + length(extr2)) / n.boot + if(my.test.value == 0) my.FairCrpss.pvalue[i,j,k] <- ifelse(my.FairCrpssBootMean[i,j,k] > 0, length(extr2)/n.boot, length(extr1)/n.boot) + + } + } + } + + rm(my.FairCrpssBootMean, my.FairCrpss.diff1, my.FairCrpss.diff2, extr1, extr2) + ###which(my.FairCrpss.pvalue > 0.05) # points where thw FairCRPSS is significant (i.e: whose FairRPSS not significantly different from the population mean) + + # calculate the percentiles of the skill score if you want to measure hte confidence interval: + #my.FairCrpssConf[,,,chunk$int[[c]]] <- apply(my.FairCrpssBoot[,,,chunk$int[[c]]], c(2,3,4), function(x) quantile(x, probs=conf.level, type=8)) + + } # close if on FairCrpss + + if(any(my.score.name=="EnsCorr")){ + # ensemble mean correlation for the selected startdates (first dim, 'week') and all leadtimes (second dim): + my.EnsCorr.chunk <- my.PValue.chunk <- array(NA, c(n.leadtimes, n.lat, n.lon)) + + for(i in 1:n.leadtimes){ + for(j in 1:n.lat){ + for(k in 1:n.lon){ + my.EnsCorr.chunk[i,j,k] <- cor(anom.hindcast.mean.chunk[,i,j,k],anom.rean.chunk[,i,j,k], use="complete.obs") + # option 'na.method' is chosen from the following list: c("all.obs", "complete.obs", "pairwise.complete.obs", "everything", "na.or.complete") + + my.EnsCorr.pvalue[i,j,k] <- cor.test(anom.hindcast.mean.chunk[,i,j,k],anom.rean.chunk[,i,j,k], use="complete.obs")$p.value + + } + } + } + + #my.EnsCorr[,,chunk$int[[c]]] <- my.EnsCorr.chunk + #my.PValue[,,chunk$int[[c]]] <- my.PValue.chunk + + #rm(anom.hindcast.chunk,anom.hindcast.mean.chunk,my.EnsCorr.chunk,my.PValue.chunk) + } + + #rm(anom.rean.chunk) + gc() + #mem() + + # the Reliability diagram cannot be computed splitting the data in chunk, so it is computed only if the script is run from the R interface or with no arguments: + + if(any(my.score.name=="RelDiagr")){ # in this case the computation cannot be split in groups of grid points, but can be split for leadtime instead: + my.RelDiagr<-list() + + for(lead in 1:n.leadtimes){ + cat(paste0('leadtime n.: ',lead,'/',n.leadtimes,'\n')) + + anom.hindcast.lead <- anom.hindcast.chunk[,,lead,,] # extract only the lead time we are interested in + anom.hindcast.chunk.perm <- aperm(anom.hindcast.lead,c(2,1,3,4)) # swap the first two dimensions to put the years as first dimension (needed for convert2prob) + gc() + + cat('Computing the Reliability Diagram. Please wait... \n') + ens.chunk<-apply(anom.hindcast.chunk.perm, c(3,4), convert2prob, prob = my.prob) # its dimens. are: [n.startdates*n.yrs.hind*n.forecast.categories, n.lat, n.lon] + anom.rean.lead <- anom.rean.chunk[,lead,,] + + cat('Computing the Reliability Diagram. Please wait...... \n') + + obs.chunk<-apply(anom.rean.lead,c(2,3), convert2prob, prob = my.prob) # the first n.members*n.yrs.hind elements belong to the first tercile, and so on. + ens.chunk.prob<-apply(ens.chunk, c(2,3), function(x) count2prob(matrix(x, n.startdates*n.yrs.hind, n.categ), type=4)) # type=4 is the only method with sum(prob)=1 !! + cat('Computing the Reliability Diagram. Please wait......... \n') + + obs.chunk.prob<-apply(obs.chunk, c(2,3), function(x) count2prob(matrix(x, n.startdates*n.yrs.hind, n.categ), type=4)) + + if(is.null(int.lat)) int.lat <- 1:n.lat # if there is no region selected, select all the world + if(is.null(int.lon)) int.lon <- 1:n.lon + + cat('Computing the Reliability Diagram. Please wait............ \n') + + int1 <- 1:(n.startdates*n.yrs.hind) # time interval of the data of the first tercile + my.RelDiagr[[lead]] <- ReliabilityDiagram(ens.chunk.prob[int1,int.lat,int.lon], obs.chunk.prob[int1,int.lat,int.lon], bins=5, nboot=0, plot=FALSE, plot.refin=F) #tercile 1 + + int2 <- (1+(n.startdates*n.yrs.hind)):(2*n.startdates*n.yrs.hind) # time interval of the data of the second tercile + my.RelDiagr[[n.leadtimes+lead]] <- ReliabilityDiagram(ens.chunk.prob[int2,int.lat,int.lon], obs.chunk.prob[int2,int.lat,int.lon], bins=5, nboot=0, plot=FALSE, plot.refin=F) + + int3 <- (1+(2*n.startdates*n.yrs.hind)):(3*n.startdates*n.yrs.hind) # time interval of the data of the third tercile + my.RelDiagr[[2*n.leadtimes+lead]] <- ReliabilityDiagram(ens.chunk.prob[int3,int.lat,int.lon], obs.chunk.prob[int3,int.lat,int.lon], bins=5, nboot=0, plot=FALSE, plot.refin=F) + + #print(my.RelDiagr) # for debugging + rm(ens.chunk, obs.chunk, ens.chunk.prob, obs.chunk.prob, int1, int2, int3) + gc() + + } # next lead + + } # close if on RelDiagr + + print("Saving results...") + + # save the results for the chunk: + save(my.FairRpss.chunk, my.FairRpss.pvalue, my.FairCrpss.chunk, my.FairCrpss.pvalue, my.EnsCorr.chunk, my.EnsCorr.pvalue, my.RelDiagr, my.FairRpssBoot, my.FairCrpssBoot, work.dir, rean.dir, rean.name, forecast.year, forecast.dir, cfs.name, var.name, var.name.map, mes, day, yr1.hind, yr2.hind, leadtime.week, n.members, my.prob, int.lat, int.lon, n.boot, n.categ, sdates.seq, n.leadtimes, n.yrs.hind, lons, lats, n.lon, n.lat, my.grid, veri.month, my.startdates, startdate.name, n.startdates, n.yrs, file=paste0(work.dir,'/',var.name,'_',my.month[month],'.RData')) + + #if(any(my.score.name=="FairRpss")) save(my.FairRpss.chunk, file=paste0(work.dir,'/',var.name,'/FairRpss_',startdate.name,'.RData')) + + print("Saving results...done") + +} # next m (month) + + +print("Finished!") diff --git a/old/SkillScores_v11.R b/old/SkillScores_v11.R new file mode 100644 index 0000000000000000000000000000000000000000..2aa0ae207e31c4c6f17242caa69e8e924d5b66e8 --- /dev/null +++ b/old/SkillScores_v11.R @@ -0,0 +1,510 @@ +######################################################################################### +# Sub-seasonal Skill Scores # +######################################################################################### +# you can run it from the terminal with the syntax: +# +# Rscript SkillScores.R +# +# i.e: to run only the chunk number 3, write: +# +# Rscript SkillScores.R 3 +# +# if you don't specify any argument, or you run it from the R interface, it runs all the chunks in a sequential way. +# Use this last approach only if the computational speed is not a problem. + +rm(list=ls()) + +########################################################################################## +# Load functions and libraries # +########################################################################################## + +#load libraries and functions: +library(SpecsVerification) # for skill scores +library(s2dverification) # for Load() function +library(easyVerification) # for veriApply() function +library(abind) # for merging arrays +source('/home/Earth/ncortesi/scripts/Rfunctions.R') + +########################################################################################## +# MareNostrum settings # +########################################################################################## + +args <- commandArgs(TRUE) # put into variable args the list with all the optional arguments with whom the script was run from the terminal + +if(length(args) == 0) { print("Running the script in a sequential way"); nloadcores <- 8 } +if(length(args) == 1) { print("Running the script in a parallel way"); nloadcores <- 1 } +if(length(args) > 1) stop("Only one argument is required") + +chunk <- ifelse(length(args) == 1 ,TRUE, FALSE) # set variable 'chunk' to TRUE if we are running the script from the command line with 1 argument, FALSE otherwise + +if(chunk) chunk.month <- as.integer(args[1]) # number of the month to run in this script (if chunk == TRUE) + +########################################################################################## +# User's settings # +########################################################################################## + +# working dir where to store the output maps and files in the /Data and /Maps subdirs: +work.dir <- "/scratch/Earth/ncortesi/RESILIENCE/Subestacional" + +# path of the weekly reanalysis data of the variable 'var' to study: +#rean.dir <- '/esarchive/old-files/recon_ecmwf_erainterim/weekly_mean/$VAR_NAME$_f6h/$VAR_NAME$_$START_DATE$.nc' +rean.dir <- '/esnas/recon/ecmwf/erainterim/weekly_mean/$VAR_NAME$-ecmwf_f6h/$VAR_NAME$_$START_DATE$.nc' +rean.name <- "ERA-Interim" + +# path of the monthly forecast system files: +forecast.year <- 2016 #2014 # starting year of the weekly sequence of the forecasts +#forecast.dir <- paste0('/esnas/exp/ECMWF/monthly/ensforhc/weekly_mean/$VAR_NAME$_f6h/',forecast.year,'$MONTH$$DAY$00/$VAR_NAME$_$START_DATE$00.nc') +#forecast.dir <- paste0('/esnas/exp/ecmwf/monthly_ensforhc/weekly_mean/$VAR_NAME$_f6h/',forecast.year,'$MONTH$$DAY$00/$VAR_NAME$_$START_DATE$00.nc') +forecast.dir <- paste0('/esnas/exp/ecmwf/s2s-monthly_ensforhc/weekly_mean/$VAR_NAME$_f24h/',forecast.year,'$MONTH$$DAY$/$VAR_NAME$_$START_DATE$.nc') +cfs.name <- 'ECMWF' # name of the climate forecast system (CFS) used for predictions (to be used in map titles) + +var.name <- 'sfcWind' #'tas' # forecast variable to verify. It is the name used inside the netCDF files and as suffix in map filenames, i.e: 'sfcWind' or 'psl' +var.name.map <- "10-m wind speed" #'2m Temperature' #'10m Wind Speed' # forecast variable to verify (name used in the map titles), i.e: '10m Wind Speed' or 'SLP' + +mes <- 1 # starting forecast month (1: january, 2: february, etc.) +day <- 4 #2 # starting forecast day + +# choose one or more skills scores to compute between EnsCorr, FairRPSS, FairCRPSS and/or RelDiagr: +my.score.name <- c('FairRpss','FairCrpss','EnsCorr','RelDiagr') + +# number of resamples considered in the bootstrapping of the FairRPSS and the FairCRPSS (should be AT LEAST 1000, better with 5000-10000) : +n.boot <- 5000 + +yr1.hind <- 1996 #1994 #1994 # first hindcast year +yr2.hind <- 2015 #2013 #2013 # last hindcast year (usually the forecast year -1) + +leadtime.week <- c('5-11','12-18','19-25','26-32') # which leadtimes are available in the weekly dataset +n.members <- 11 #4 # number of hindcast members + +veri.month <- 2 #1:12 # select the month(s) you want to compute the chosen skill scores + +rean.int <- TRUE # if TRUE, interpolate the reanalysis to the same resolution of the model; if FALSE, interpolate the model to the same res.of the reanalysis + +my.prob <- c(1/3,2/3) # quantiles used for FairRPSS and the RelDiagr (usually they are the terciles) +#conf.level <- c(0.025, 0.975) # percentiles employed for the calculation of the confidence level for the Rpss and Crpss (can be more than 2 if needed) + +#int.lat <- NULL #1:160 # latitude position of grid points selected for the Reliability Diagram (if you want to subset a region; if not, put NULL) +#int.lon <- NULL #1:320 # longitude position of grid points selected for the Reliability Diagram (if you want to subset a region; if not, put NULL) + +# coordinates of a chosen point in the world to plot the time series over the 52 maps (they must be the same values in objects la y lo) +#my.lat <- 55.3197120 +#my.lon <- 0.5625 + +###################################### Derived variables ############################################################################# + +#source(paste0(workdir,'/ColorBarV.R')) # Color scale used for maps +n.categ <- 1+length(my.prob) # number of forecasting categories (usually 3 if terciles are used) + +# blue-yellow-red colors: +#col <- ifelse(!mare, as.character(read.csv("/scratch/Earth/ncortesi/RESILIENCE/rgbhex.csv",header=F)[,1]), as.character(read.csv("/gpfs/projects/bsc32//bsc32842/scripts/rgbhex.csv",header=F)[,1])) +#col<-as.character(read.csv("/home/Earth/vtorralb/rgbhex.csv",header=F)[,1]) # blue-yellow-red colors +col <- c("#0C046E", "#1914BE", "#2341F7", "#2F55FB", "#3E64FF", "#528CFF", "#64AAFF", "#82C8FF", "#A0DCFF", "#B4F0FF", "#FFFBAF", "#FFDD9A", "#FFBF87", "#FFA173", "#FF7055", "#FE6346", "#F7403B", "#E92D36", "#C80F1E", "#A50519") + +sdates.seq <- weekly.seq(forecast.year,mes,day) # load the sequence of dates corresponding to all the thursday of the year +pos.bis <- which(sdates.seq == paste0(forecast.year,"0229")) # find if there is a startdate at the 29th of february and return its position in the ector sdates.seq +if(l(pos.bis) != 0) sdates.seq <- sdates.seq[-pos.bis] # if there is a startdate at the 29th of february, remove it. + +n.leadtimes <- length(leadtime.week) +n.yrs.hind <- yr2.hind-yr1.hind+1 +#my.month.short <- substr(my.month,1,3) + +## extract geographic coordinates; do only ONCE for each preducton system and then save the lat/lon in a coordinates.RData and comment these rows to use function load() below: +#data.hindcast <- Load(var='sfcWind', exp = 'EnsEcmwfWeekHind', +# obs = NULL, sdates=paste0(yr1.hind:yr2.hind,substr(sdates.seq[startdate],5,6),substr(sdates.seq[startdate],7,8)), +# nleadtime = 1, leadtimemin = 1, leadtimemax = 1, output = 'lonlat', configfile = file_path, method='distance-weighted' ) + +#lats<-data.hindcast$lat +#lons<-data.hindcast$lon +#save(lats,lons,file=paste0(workdir,'/coordinates.RData')) +#rm(data.hindcast);gc() + +# format geographic coordinates to use with PlotEquiMap: +#load(paste0(workdir,'/coordinates.RData')) +#n.lon <- length(lons) +#n.lat <- length(lats) +#n.lonr <- n.lon - ceiling(length(lons[lons<180 & lons > 0])) +#la<-rev(lats) +#lo<-c(lons[c((n.lonr+1):n.lon)]-360,lons[1:n.lonr]) + +# load only 1 year of weekly var rean data from reanalysis to get lat and lon: +if(!rean.int) data.rean <- Load(var = var.name, exp = list(list(path=rean.dir)), + obs = NULL, sdates=paste0(yr1.hind,substr(sdates.seq[1],5,6),substr(sdates.seq[1],7,8)), + nleadtime = 1, leadtimemin = 1, leadtimemax = 1, output = 'lonlat', nprocs=1) + +if(rean.int) data.rean <- Load(var = var.name, exp = list(list(path=forecast.dir)), + obs = NULL, sdates=paste0(yr1.hind,substr(sdates.seq[1],5,6),substr(sdates.seq[1],7,8)), + nleadtime = 1, leadtimemin = 1, leadtimemax = 1, output = 'lonlat', nprocs=1) + +lons <- data.rean$lon +lats <- data.rean$lat +n.lon <- length(lons) +n.lat <- length(lats) +my.grid<-paste0('r',n.lon,'x',n.lat) + +#file_path='/home/Earth/ngonzal2/R/ConfFiles/IC3.conf' # Load() file path +#file_path <- '/home/Earth/ncortesi/Downloads/RESILIENCE/IC3.conf' + +######################################################################################### +# Calculate and save up to all the chosen Skill Scores for a selected period # +######################################################################################### +# if we run the script from the terminal with an argument, it only computes the month we specify in the second argument and save the results for that month +if(chunk) veri.month <- chunk.month + +for(month in veri.month){ + #month=1 # for the debug + + # select the startdates (weeks) you want to compute: + my.startdates <- which(as.integer(substr(sdates.seq,5,6)) == month) #startdates.monthly[[month]] #c(1:5) + + startdate.name <- my.month[month] # name given to the period of the selected startdates # i.e:"January" + n.startdates <- length(my.startdates) # number of startdates in the selected month + n.yrs <- length(my.startdates)*n.yrs.hind # number of total years for the selected month + + print(paste0("Computing Skill Scores for ",startdate.name)) + + # Merge all selected startdates: + hind.dim <- c(n.members, n.yrs.hind*n.startdates, n.leadtimes, n.lat, n.lon) # dimensions of the hindcast array + + my.FairRpss <- my.FairCrpss <- my.EnsCorr <- array(NA,c(n.leadtimes, n.lat,n.lon)) + #my.FairRpssConf <- my.FairCrpssConf <- array(NA, c(length(conf.level), n.leadtimes, n.lat, n.lon)) + my.FairRpss.pvalue <- my.FairCrpss.pvalue <- my.EnsCorr.pvalue <- array(NA, c(n.leadtimes, n.lat, n.lon)) + + anom.rean.chunk <- anom.hindcast.mean.chunk <- array(NA, c(n.startdates*n.yrs.hind, n.leadtimes, n.lat, n.lon)) + anom.rean.clim <- array(NA, c(n.startdates*n.yrs.hind, n.startdates*n.yrs.hind, n.leadtimes, n.lat, n.lon)) + anom.hindcast.chunk <- array(NA, c(n.members, n.startdates*n.yrs.hind, n.leadtimes, n.lat, n.lon)) + #my.interv<-(1 + sub.size*(s-1)):((sub.size*s) + add.last) # longitude interval where to load data + + for(startdate in my.startdates){ + pos.startdate <- which(startdate == my.startdates) # count of the number of stardates already loaded+1 + my.time.interv <- (1+(pos.startdate-1)*n.yrs.hind):(pos.startdate*n.yrs.hind) # time interval where to load data + + print(paste0('Computing reanalysis anomalies for startdate ', which(startdate==my.startdates),'/',length(my.startdates))) + + # load weekly var rean data in: /esnas/reconstructions/ecmwf/eraint/weekly_mean + if(!rean.int) data.rean <- Load(var = var.name, exp = list(list(path=rean.dir)), + obs = NULL, sdates=paste0(yr1.hind:yr2.hind,substr(sdates.seq[startdate],5,6),substr(sdates.seq[startdate],7,8)), + nleadtime = n.leadtimes, leadtimemin = 1, leadtimemax = n.leadtimes, output = 'lonlat', nprocs=nloadcores) + + if(rean.int) data.rean <- Load(var = var.name, exp = list(list(path=rean.dir)), + obs = NULL, sdates=paste0(yr1.hind:yr2.hind,substr(sdates.seq[startdate],5,6),substr(sdates.seq[startdate],7,8)), + nleadtime = n.leadtimes, leadtimemin = 1, leadtimemax = n.leadtimes, output = 'lonlat', grid=my.grid, method='bilinear', nprocs=nloadcores) + + # dim(data.rean$mod) # [1,1,20,4,320,640] + + # Mean of the 20 years (for each point and leadtime) + clim.rean <- apply(data.rean$mod,c(1,2,4,5,6),mean)[1,1,,,] # average of all years for each leadtime and pixel + clim.rean <- InsertDim(InsertDim(InsertDim(clim.rean,1,n.yrs.hind),1,1),1,1) # repeat the same values times to be able to calculate the anomalies #[1,1,20,4,320,640] + anom.rean <- data.rean$mod - clim.rean + #anom.rean<-InsertDim(anom.rean[1,1,,,,],1,1) # [1,20,4,320,640] + + rm(data.rean, clim.rean) + gc() + + #load(paste0(workdir,'/Data_',var.name,'/anom_rean_startdate',startdate,'.RData')) + #anom.rean <- drop(anom.rean) + anom.rean.chunk[my.time.interv,,,] <- anom.rean + + # Load hindcast data: + print(paste0('Computing forecast anomalies for startdate ', which(startdate == my.startdates),'/',length(my.startdates))) + + # Load weekly var subseasonal data in: /esnas/exp/ECMWF/monthly/ensforhc/weekly_mean and interpolate them to the same ERA-Interim resolution (512x256) + if(rean.int) data.hindcast <- Load(var = var.name, exp = list(list(path=forecast.dir)), + obs = NULL, sdates = paste0(yr1.hind:yr2.hind, substr(sdates.seq[startdate],5,6), substr(sdates.seq[startdate],7,8)), + nleadtime = n.leadtimes, leadtimemin = 1, leadtimemax = n.leadtimes, output = 'lonlat', nprocs=nloadcores) + if(!rean.int) data.hindcast <- Load(var = var.name, exp = list(list(path=forecast.dir)), + obs = NULL, sdates = paste0(yr1.hind:yr2.hind, substr(sdates.seq[startdate],5,6), substr(sdates.seq[startdate],7,8)), + nleadtime = n.leadtimes, leadtimemin = 1, leadtimemax = n.leadtimes, output = 'lonlat', grid=my.grid, method='bilinear', nprocs=nloadcores) + + # dim(data.hindcast$mod) # [1,4,20,4,320,640] + + # Mean of the 4 members of the Hindcasts and of the 20 years (for each point and leadtime); + clim.hindcast <- apply(data.hindcast$mod,c(1,4,5,6),mean)[1,,,] # average of all years and members for each leadtime and pixel + clim.hindcast <- InsertDim(InsertDim(InsertDim(clim.hindcast,1,n.yrs.hind),1,n.members),1,1) # repeat the same values and times to calc. anomalies + anom.hindcast <- data.hindcast$mod - clim.hindcast + + rm(data.hindcast,clim.hindcast) + gc() + + # average the anomalies over the 4 hindcast members to compute the ensemble mean [1,20,4,320,640] + anom.hindcast.mean<-apply(anom.hindcast,c(1,3,4,5,6),mean) + + gc() + + #load(paste0(workdir,'/Data_',var.name,'/anom_hindcast_startdate',startdate,'.RData')) + anom.hindcast <- drop(anom.hindcast) + anom.hindcast.chunk[,my.time.interv,,,] <- anom.hindcast + + rm(anom.hindcast, anom.rean) + gc() + + anom.hindcast.mean <- drop(anom.hindcast.mean) + anom.hindcast.mean.chunk[my.time.interv,,,] <- anom.hindcast.mean + + rm(anom.hindcast.mean) + gc() + + } # end load data and conversion to anomalies + + + if(any(my.score.name == "FairRpss")){ + #my.FairRpss.sub <- veriApply("FairRpss", fcst=anom.hindcast.sub, obs=anom.rean.sub, prob=my.prob, tdim=2, ensdim=1, parallel=FALSE, ncpus=n.cpus) + my.FairRps.chunk <- veriApply("FairRps", fcst=anom.hindcast.chunk, obs=anom.rean.chunk, prob=my.prob, tdim=2, ensdim=1, parallel=FALSE, ncpus=1) + #my.FairRps.chunk.double <- veriApply("FairRps", fcst=anom.hindcast.chunk.double, obs=anom.rean.chunk.double, prob=my.prob, tdim=2, ensdim=1, parallel=FALSE, ncpus=n.cpus) + my.FairRps.chunk.sum <- apply(my.FairRps.chunk, c(2,3,4), sum) + + # the prob.for the climatological ensemble can be set without using convert2prob and they are the same for all points and leadtimes (33% for each tercile): + ens.ref <- array(33,c(n.startdates*n.yrs.hind,3)) + + # the observed probabilities vary from point to point and for each leadtime: [obs]= [3*n.years*n.startdates, n.leadtimes, lat, lon] + obs <- apply(anom.rean.chunk, c(2,3,4), function(x){convert2prob(x, prob <- my.prob)}) + + # reshape obs to be able to apply EnsRps(ens, obs) below: + obs2 <- InsertDim(obs,1,3) + obs2[2,1:(n.startdates*n.yrs.hind),,,] <- obs2[1,(n.startdates*n.yrs.hind + 1:(n.startdates*n.yrs.hind)),,,] + obs2[3,1:(n.startdates*n.yrs.hind),,,] <- obs2[1,(2*n.startdates*n.yrs.hind + 1:(n.startdates*n.yrs.hind)),,,] + obs3 <- obs2[,1:(n.startdates*n.yrs.hind),,,,drop=F] # drop=F is used not to loose the dimension(s) with only 1 element + + my.Rps.clim.chunk <- apply(obs3,c(3,4,5), function(x){ EnsRps(ens.ref,t(x), format="members") }) + my.Rps.clim.chunk.sum <- apply(my.Rps.clim.chunk, c(2,3,4), sum) + + my.FairRpss.chunk <- 1-(my.FairRps.chunk.sum / my.Rps.clim.chunk.sum) + + rm(ens.ref, obs, obs2, obs3, my.FairRps.chunk.sum, my.Rps.clim.chunk.sum) + gc() + + ########################### bootstrapping ############################## + my.FairRpssBoot <- array(NA, c(n.boot, n.leadtimes, n.lat, n.lon)) # define it only now because for n.boot=1000, it is a 2 GB object!!! + + for(b in 1:n.boot){ + cat(paste0("Bootstrapping FairRpss: resampling n. ",b,"/",n.boot,"\n")) + yrs.sampled <- sample(1:n.yrs, replace=TRUE) + + #for(y in 1:n.yrs) anom.rean.chunk.sampled[y,,,] <- anom.rean.chunk[yrs.sampled[y],,,] + #for(y in 1:n.yrs) anom.hindcast.chunk.sampled[,y,,,] <- anom.hindcast.chunk[,yrs.sampled[y],,,] + + my.FairRps.sampled <- my.FairRps.chunk + my.Rps.clim.sampled <- my.Rps.clim.chunk + + for(y in 1:n.yrs) my.FairRps.sampled[y,,,] <- my.FairRps.chunk[yrs.sampled[y],,,] + for(y in 1:n.yrs) my.Rps.clim.sampled[y,,,] <- my.Rps.clim.chunk[yrs.sampled[y],,,] + + #my.FairRps.chunk <- veriApply("FairRps", fcst=anom.hindcast.chunk.sampled, obs=anom.rean.chunk.sampled, prob=my.prob, tdim=2, ensdim=1, parallel=FALSE, ncpus=n.cpus) + #ens <- array(33,c(n.startdates*n.yrs.hind,3)) + #obs <- apply(anom.rean.chunk.sampled, c(2,3,4), function(x){convert2prob(x, prob <- my.prob)}) + #obs2 <- InsertDim(obs,1,3) + #obs2[2,1:(n.startdates*n.yrs.hind),,,] <- obs2[1,(n.startdates*n.yrs.hind + 1:(n.startdates*n.yrs.hind)),,,] + #obs2[3,1:(n.startdates*n.yrs.hind),,,] <- obs2[1,(2*n.startdates*n.yrs.hind + 1:(n.startdates*n.yrs.hind)),,,] + #obs3 <- obs2[,1:(n.startdates*n.yrs.hind),,,,drop=F] + #my.Rps.clim.chunk <- apply(obs3,c(3,4,5), function(x){ EnsRps(ens,t(x),format="members") }) + + my.FairRps.sampled.sum <- apply(my.FairRps.sampled,c(2,3,4), sum) + my.Rps.clim.sampled.sum <- apply(my.Rps.clim.sampled,c(2,3,4), sum) + my.FairRpss.sampled <- 1-(my.FairRps.sampled.sum/my.Rps.clim.sampled.sum) + + my.FairRpssBoot[b,,,] <- my.FairRpss.sampled + + gc() + } + + rm(my.FairRps.sampled, my.Rps.clim.sampled, my.FairRps.sampled.sum, my.Rps.clim.sampled.sum, my.FairRpss.sampled, yrs.sampled) + gc() + + my.FairRpssBootMean <- apply(my.FairRpssBoot,c(2,3,4),mean,na.rm=T) + + my.test.value <- 0 # my.FairRpss.chunk # test if the bootstrapped mean value is significantly different from this value or not + + my.FairRpss.diff1 <- my.FairRpssBootMean + abs(my.test.value - my.FairRpssBootMean) + my.FairRpss.diff2 <- my.FairRpssBootMean - abs(my.test.value - my.FairRpssBootMean) + + for(i in 1:dim(my.FairRpssBoot)[2]){ + for(j in 1:dim(my.FairRpssBoot)[3]){ + for(k in 1:dim(my.FairRpssBoot)[4]){ + extr1 <- which(my.FairRpssBoot[,i,j,k] > my.FairRpss.diff1[i,j,k]) + extr2 <- which(my.FairRpssBoot[,i,j,k] < my.FairRpss.diff2[i,j,k]) + if(my.test.value != 0) my.FairRpss.pvalue[i,j,k] <- (length(extr1) + length(extr2)) / n.boot # two-tailed test + if(my.test.value == 0) my.FairRpss.pvalue[i,j,k] <- ifelse(my.FairRpssBootMean[i,j,k] > 0, length(extr2)/n.boot, length(extr1)/n.boot) + + } + } + } + + rm(my.FairRpssBoot, my.FairRpssBootMean, my.FairRpss.diff1, my.FairRpss.diff2, extr1, extr2) + gc() + + #which(my.FairRpss.pvalue > 0.05) # points where the FairRPSS is significant (i.e: whose FairRPSS is not significantly different from the population mean) + + # calculate the percentiles of the skill score in case you want to compute the confidence interval of the 95%: + #my.FairRpssConf[,,,chunk$int[[c]]] <- apply(my.FairRpssBoot[,,,chunk$int[[c]]], c(2,3,4), function(x) quantile(x, probs=conf.level, type=8)) + + } # close if on FairRpss + + if(any(my.score.name=="FairCrpss")){ + + #my.FairCrpss.chunk <- veriApply("FairCrpss", fcst=anom.hindcast.chunk, obs=anom.rean.chunk, tdim=2, ensdim=1, parallel=FALSE, ncpus=1) + + #Numerador: + my.FairCrps.chunk <- veriApply("FairCrps", fcst=anom.hindcast.chunk, obs=anom.rean.chunk, tdim=2, ensdim=1, parallel=FALSE, ncpus=1) + my.FairCrps.chunk.sum <- apply(my.FairCrps.chunk,c(2,3,4), sum) + + # Denominador: + my.Crps.clim.chunk <- array(NA, c(n.startdates*n.yrs.hind, n.leadtimes, n.lat, n.lon)) + anom.hind.clim <- array(NA, c(n.members, n.startdates*n.yrs.hind, n.leadtimes, n.lat, n.lon)) + + for(i in 1:n.leadtimes){ + print(paste0("leadtime= ",i)) + for(j in 1:n.lat){ + for(k in 1:n.lon){ + # extract a random sample with no replacement of the hindcast: + #anom.hind.clim[,,i,j,k] <- array(sample(anom.hindcast.chunk[,,i,j,k],n.startdates*n.yrs.hind*n.members),c(n.startdates*n.yrs.hind,n.members)) + anom.rean.clim[,,i,j,k] <- t(array(anom.rean.chunk[,i,j,k],c(n.startdates*n.yrs.hind, n.startdates*n.yrs.hind))) + my.Crps.clim.chunk[,i,j,k] <- EnsCrps((anom.rean.clim[,,i,j,k]), anom.rean.chunk[,i,j,k]) + } + } + } + + ### anom.hind.clim <- apply(anom.hindcast.chunk,c(3,4,5), sample) + ### anom.all.chunk <- abind(anom.hind.clim,anom.rean.chunk, along=1) # merge exp and obs together to use apply: + ### my.Crps.clim.chunk <- apply(anom.all.chunk, c(3,4,5), function(x){ EnsCrps(t(x[1:n.members,]), x[n.members+1,])}) + + my.Crps.clim.chunk.sum <- apply(my.Crps.clim.chunk,c(2,3,4), sum) + + ## Skill Score: + my.FairCrpss.chunk <- 1 - (my.FairCrps.chunk.sum / my.Crps.clim.chunk.sum) + + #rm(anom.all.chunk, my.FairCrps.chunk.sum, my.Crps.clim.chunk.sum) + + rm(my.FairCrps.chunk.sum, my.Crps.clim.chunk.sum) + gc() + + ################################# bootstrapping ########################################### + my.FairCrpssBoot <- array(NA, c(n.boot, n.leadtimes, n.lat, n.lon)) # define it only now, because for n.boot = 1000, it is a 2 GB object!!! + + for(b in 1:n.boot){ + cat(paste0("Bootstrapping FairCrpss: resampling n. ",b,"/",n.boot,"\n")) + + yrs.sampled <- sample(1:n.yrs, replace=TRUE) + + my.FairCrps.sampled <- my.FairCrps.chunk + my.Crps.clim.sampled <- my.Crps.clim.chunk + + for(y in 1:n.yrs) my.FairCrps.sampled[y,,,] <- my.FairCrps.chunk[yrs.sampled[y],,,] + for(y in 1:n.yrs) my.Crps.clim.sampled[y,,,] <- my.Crps.clim.chunk[yrs.sampled[y],,,] + + my.FairCrps.sampled.sum <- apply(my.FairCrps.sampled,c(2,3,4), sum) + my.Crps.clim.sampled.sum <- apply(my.Crps.clim.sampled,c(2,3,4), sum) + + my.FairCrpss.sampled <- 1-(my.FairCrps.sampled.sum / my.Crps.clim.sampled.sum) + + my.FairCrpssBoot[b,,,] <- my.FairCrpss.sampled + + gc() + } + + rm(my.FairCrps.sampled, my.Crps.clim.sampled, my.FairCrpss.sampled , yrs.sampled) + gc() + + my.FairCrpssBootMean <- apply(my.FairCrpssBoot,c(2,3,4),mean,na.rm=T) + + my.test.value <- 0 # my.FairCrpss.chunk # test if the bootstrapped mean value is significantly different from this value or not + + my.FairCrpss.diff1 <- my.FairCrpssBootMean + abs(my.test.value - my.FairCrpssBootMean) + my.FairCrpss.diff2 <- my.FairCrpssBootMean - abs(my.test.value - my.FairCrpssBootMean) + + for(i in 1:dim(my.FairCrpssBoot)[2]){ + for(j in 1:dim(my.FairCrpssBoot)[3]){ + for(k in 1:dim(my.FairCrpssBoot)[4]){ + extr1 <- which(my.FairCrpssBoot[,i,j,k] > my.FairCrpss.diff1[i,j,k]) + extr2 <- which(my.FairCrpssBoot[,i,j,k] < my.FairCrpss.diff2[i,j,k]) + if(my.test.value != 0) my.FairCrpss.pvalue[i,j,k] <- (length(extr1) + length(extr2)) / n.boot + if(my.test.value == 0) my.FairCrpss.pvalue[i,j,k] <- ifelse(my.FairCrpssBootMean[i,j,k] > 0, length(extr2)/n.boot, length(extr1)/n.boot) + + } + } + } + + rm(my.FairCrpssBoot, my.FairCrpssBootMean, my.FairCrpss.diff1, my.FairCrpss.diff2, extr1, extr2) + ###which(my.FairCrpss.pvalue > 0.05) # points where thw FairCRPSS is significant (i.e: whose FairRPSS not significantly different from the population mean) + + # calculate the percentiles of the skill score if you want to measure hte confidence interval: + #my.FairCrpssConf[,,,chunk$int[[c]]] <- apply(my.FairCrpssBoot[,,,chunk$int[[c]]], c(2,3,4), function(x) quantile(x, probs=conf.level, type=8)) + + } # close if on FairCrpss + + if(any(my.score.name=="EnsCorr")){ + # ensemble mean correlation for the selected startdates (first dim, 'week') and all leadtimes (second dim): + my.EnsCorr.chunk <- my.PValue.chunk <- array(NA, c(n.leadtimes, n.lat, n.lon)) + + for(i in 1:n.leadtimes){ + for(j in 1:n.lat){ + for(k in 1:n.lon){ + my.EnsCorr.chunk[i,j,k] <- cor(anom.hindcast.mean.chunk[,i,j,k],anom.rean.chunk[,i,j,k], use="complete.obs") + # option 'na.method' is chosen from the following list: c("all.obs", "complete.obs", "pairwise.complete.obs", "everything", "na.or.complete") + + my.EnsCorr.pvalue[i,j,k] <- cor.test(anom.hindcast.mean.chunk[,i,j,k],anom.rean.chunk[,i,j,k], use="complete.obs")$p.value + + } + } + } + + #my.EnsCorr[,,chunk$int[[c]]] <- my.EnsCorr.chunk + #my.PValue[,,chunk$int[[c]]] <- my.PValue.chunk + + #rm(anom.hindcast.chunk,anom.hindcast.mean.chunk,my.EnsCorr.chunk,my.PValue.chunk) + } + + #rm(anom.rean.chunk) + gc() + #mem() + + # the Reliability diagram cannot be computed splitting the data in chunk, so it is computed only if the script is run from the R interface or with no arguments: + + if(any(my.score.name=="RelDiagr")){ # in this case the computation cannot be split in groups of grid points, but can be split for leadtime instead: + obs.chunk.prob <- ens.chunk.prob <- list() + + int1 <- 1:(n.startdates*n.yrs.hind) # time interval of the data of the first tercile + int2 <- (1+(n.startdates*n.yrs.hind)):(2*n.startdates*n.yrs.hind) # time interval of the data of the second tercile + int3 <- (1+(2*n.startdates*n.yrs.hind)):(3*n.startdates*n.yrs.hind) # time interval of the data of the third tercile + + for(lead in 1:n.leadtimes){ + cat(paste0('leadtime n.: ',lead,'/',n.leadtimes,'\n')) + + anom.hindcast.lead <- anom.hindcast.chunk[,,lead,,] # extract only the lead time we are interested in + anom.hindcast.chunk.perm <- aperm(anom.hindcast.lead,c(2,1,3,4)) # swap the first two dimensions to put the years as first dimension (needed for convert2prob) + gc() + + cat('Computing the Reliability Diagram. Please wait... \n') + ens.chunk <- apply(anom.hindcast.chunk.perm, c(3,4), convert2prob, prob = my.prob) # its dimens. are: [n.startdates*n.yrs.hind*n.forecast.categories, n.lat, n.lon] + anom.rean.lead <- anom.rean.chunk[,lead,,] + + cat('Computing the Reliability Diagram. Please wait...... \n') + + obs.chunk <- apply(anom.rean.lead,c(2,3), convert2prob, prob = my.prob) # the first n.members*n.yrs.hind elements belong to the first tercile, and so on. + ens.chunk.prob[[lead]] <- apply(ens.chunk, c(2,3), function(x) count2prob(matrix(x, n.startdates*n.yrs.hind, n.categ), type=4)) # type=4 is the only method with sum(prob)=1 !! + cat('Computing the Reliability Diagram. Please wait......... \n') + + obs.chunk.prob[[lead]] <- apply(obs.chunk, c(2,3), function(x) count2prob(matrix(x, n.startdates*n.yrs.hind, n.categ), type=4)) + + #print(my.RelDiagr) # for debugging + rm(ens.chunk, obs.chunk) + gc() + + } # next lead + + } # close if on RelDiagr + + print("Saving results...") + + # save the results for the chunk: + save(my.FairRpss.chunk, my.FairRpss.pvalue, my.FairCrpss.chunk, my.FairCrpss.pvalue, my.EnsCorr.chunk, my.EnsCorr.pvalue, obs.chunk.prob, ens.chunk.prob, int1, int2, int3, work.dir, rean.dir, rean.name, forecast.year, forecast.dir, cfs.name, var.name, var.name.map, mes, day, yr1.hind, yr2.hind, leadtime.week, n.members, my.prob, n.boot, n.categ, sdates.seq, n.leadtimes, n.yrs.hind, lons, lats, n.lon, n.lat, my.grid, veri.month, my.startdates, startdate.name, n.startdates, n.yrs, file=paste0(work.dir,'/',var.name,'_',my.month[month],'.RData')) + + # if you only compute the RelDiagr, save this: + #save(obs.chunk.prob, ens.chunk.prob, int1, int2, int3, work.dir, rean.dir, rean.name, forecast.year, forecast.dir, cfs.name, var.name, var.name.map, mes, day, yr1.hind, yr2.hind, leadtime.week, n.members, my.prob, n.boot, n.categ, sdates.seq, n.leadtimes, n.yrs.hind, lons, lats, n.lon, n.lat, my.grid, veri.month, my.startdates, startdate.name, n.startdates, n.yrs, file=paste0(work.dir,'/',var.name,'_',my.month[month],'.RData')) + + #if(any(my.score.name=="FairRpss")) save(my.FairRpss.chunk, file=paste0(work.dir,'/',var.name,'/FairRpss_',startdate.name,'.RData')) + + print("Saving results...done") + +} # next m (month) + + +print("Finished!") diff --git a/old/SkillScores_v11.R~ b/old/SkillScores_v11.R~ new file mode 100644 index 0000000000000000000000000000000000000000..8cdbb5eabb45d6afbaccf87559bfb9e59fc017f1 --- /dev/null +++ b/old/SkillScores_v11.R~ @@ -0,0 +1,510 @@ +######################################################################################### +# Sub-seasonal Skill Scores # +######################################################################################### +# you can run it from the terminal with the syntax: +# +# Rscript SkillScores_v7.R +# +# i.e: to run only the chunk number 3, write: +# +# Rscript SkillScores_v4.R 3 +# +# if you don't specify any argument, or you run it from the R interface, it runs all the chunks in a sequential way. +# Use this last approach only if the computational speed is not a problem. + +rm(list=ls()) + +########################################################################################## +# Load functions and libraries # +########################################################################################## + +#load libraries and functions: +library(SpecsVerification) # for skill scores +library(s2dverification) # for Load() function +library(easyVerification) # for veriApply() function +library(abind) # for merging arrays +source('/home/Earth/ncortesi/scripts/Rfunctions.R') + +########################################################################################## +# MareNostrum settings # +########################################################################################## + +args <- commandArgs(TRUE) # put into variable args the list with all the optional arguments with whom the script was run from the terminal + +if(length(args) == 0) { print("Running the script in a sequential way"); nloadcores <- 8 } +if(length(args) == 1) { print("Running the script in a parallel way"); nloadcores <- 1 } +if(length(args) > 1) stop("Only one argument is required") + +chunk <- ifelse(length(args) == 1 ,TRUE, FALSE) # set variable 'chunk' to TRUE if we are running the script from the command line with 1 argument, FALSE otherwise + +if(chunk) chunk.month <- as.integer(args[1]) # number of the month to run in this script (if chunk == TRUE) + +########################################################################################## +# User's settings # +########################################################################################## + +# working dir where to put the output maps and files in the /Data and /Maps subdirs: +work.dir <- "/scratch/Earth/ncortesi/RESILIENCE/Subestacional" + +# path of the weekly var rean data: +#rean.dir <- '/esarchive/old-files/recon_ecmwf_erainterim/weekly_mean/$VAR_NAME$_f6h/$VAR_NAME$_$START_DATE$.nc' +rean.dir <- '/esnas/recon/ecmwf/erainterim/weekly_mean/$VAR_NAME$-ecmwf_f6h/$VAR_NAME$_$START_DATE$.nc' +rean.name <- "ERA-Interim" + +# path of the monthly forecast system files: +forecast.year <- 2016 #2014 # starting year of the weekly sequence of the forecasts +#forecast.dir <- paste0('/esnas/exp/ECMWF/monthly/ensforhc/weekly_mean/$VAR_NAME$_f6h/',forecast.year,'$MONTH$$DAY$00/$VAR_NAME$_$START_DATE$00.nc') +#forecast.dir <- paste0('/esnas/exp/ecmwf/monthly_ensforhc/weekly_mean/$VAR_NAME$_f6h/',forecast.year,'$MONTH$$DAY$00/$VAR_NAME$_$START_DATE$00.nc') +forecast.dir <- paste0('/esnas/exp/ecmwf/s2s-monthly_ensforhc/weekly_mean/$VAR_NAME$_f24h/',forecast.year,'$MONTH$$DAY$/$VAR_NAME$_$START_DATE$.nc') + +cfs.name <- 'ECMWF' # name of the climate forecast system (CFS) used for monthly predictions (to be used in map titles) +var.name <- 'sfcWind' #'tas' # forecast variable. It is the name used inside the netCDF files and as suffix in map filenames, i.e: 'sfcWind' or 'psl' +var.name.map <- "10-m wind speed" #'2m Temperature' #'10m Wind Speed' # forecast variable to verify (name used in the map titles), i.e: '10m Wind Speed' or 'SLP' + +mes <- 1 # starting forecast month (1: january, 2: february, etc.) +day <- 4 #2 # starting forecast day + +# choose one or more skills scores to compute between EnsCorr, FairRPSS, FairCRPSS and/or RelDiagr: +my.score.name <- c('FairRpss','FairCrpss','EnsCorr','RelDiagr') + +# number of resamples considered in the bootstrapping of the FairRPSS and the FairCRPSS: +n.boot <- 1000 + +yr1.hind <- 1996 #1994 #1994 # first hindcast year +yr2.hind <- 2015 #2013 #2013 # last hindcast year (usually the forecast year -1) + +leadtime.week <- c('5-11','12-18','19-25','26-32') # which leadtimes are available in the weekly dataset +n.members <- 11 #4 # number of hindcast members + +veri.month <- 2 #1:12 # select the month(s) you want to compute the chosen skill scores + +rean.int <- TRUE # if TRUE, interpolate the reanalysis to the same resolution of the model; if FALSE, interpolate the model to the same res.of the reanalysis + +my.prob <- c(1/3,2/3) # quantiles used for FairRPSS and the RelDiagr (usually they are the terciles) +#conf.level <- c(0.025, 0.975) # percentiles employed for the calculation of the confidence level for the Rpss and Crpss (can be more than 2 if needed) + +#int.lat <- NULL #1:160 # latitude position of grid points selected for the Reliability Diagram (if you want to subset a region; if not, put NULL) +#int.lon <- NULL #1:320 # longitude position of grid points selected for the Reliability Diagram (if you want to subset a region; if not, put NULL) + +# coordinates of a chosen point in the world to plot the time series over the 52 maps (they must be the same values in objects la y lo) +#my.lat <- 55.3197120 +#my.lon <- 0.5625 + +###################################### Derived variables ############################################################################# + +#source(paste0(workdir,'/ColorBarV.R')) # Color scale used for maps +n.categ <- 1+length(my.prob) # number of forecasting categories (usually 3 if terciles are used) + +# blue-yellow-red colors: +#col <- ifelse(!mare, as.character(read.csv("/scratch/Earth/ncortesi/RESILIENCE/rgbhex.csv",header=F)[,1]), as.character(read.csv("/gpfs/projects/bsc32//bsc32842/scripts/rgbhex.csv",header=F)[,1])) +#col<-as.character(read.csv("/home/Earth/vtorralb/rgbhex.csv",header=F)[,1]) # blue-yellow-red colors +col <- c("#0C046E", "#1914BE", "#2341F7", "#2F55FB", "#3E64FF", "#528CFF", "#64AAFF", "#82C8FF", "#A0DCFF", "#B4F0FF", "#FFFBAF", "#FFDD9A", "#FFBF87", "#FFA173", "#FF7055", "#FE6346", "#F7403B", "#E92D36", "#C80F1E", "#A50519") + +sdates.seq <- weekly.seq(forecast.year,mes,day) # load the sequence of dates corresponding to all the thursday of the year +pos.bis <- which(sdates.seq == paste0(forecast.year,"0229")) # find if there is a startdate at the 29th of february and return its position in the ector sdates.seq +if(l(pos.bis) != 0) sdates.seq <- sdates.seq[-pos.bis] # if there is a startdate at the 29th of february, remove it. + +n.leadtimes <- length(leadtime.week) +n.yrs.hind <- yr2.hind-yr1.hind+1 +#my.month.short <- substr(my.month,1,3) + +## extract geographic coordinates; do only ONCE for each preducton system and then save the lat/lon in a coordinates.RData and comment these rows to use function load() below: +#data.hindcast <- Load(var='sfcWind', exp = 'EnsEcmwfWeekHind', +# obs = NULL, sdates=paste0(yr1.hind:yr2.hind,substr(sdates.seq[startdate],5,6),substr(sdates.seq[startdate],7,8)), +# nleadtime = 1, leadtimemin = 1, leadtimemax = 1, output = 'lonlat', configfile = file_path, method='distance-weighted' ) + +#lats<-data.hindcast$lat +#lons<-data.hindcast$lon +#save(lats,lons,file=paste0(workdir,'/coordinates.RData')) +#rm(data.hindcast);gc() + +# format geographic coordinates to use with PlotEquiMap: +#load(paste0(workdir,'/coordinates.RData')) +#n.lon <- length(lons) +#n.lat <- length(lats) +#n.lonr <- n.lon - ceiling(length(lons[lons<180 & lons > 0])) +#la<-rev(lats) +#lo<-c(lons[c((n.lonr+1):n.lon)]-360,lons[1:n.lonr]) + +# load only 1 year of weekly var rean data from reanalysis to get lat and lon: +if(!rean.int) data.rean <- Load(var = var.name, exp = list(list(path=rean.dir)), + obs = NULL, sdates=paste0(yr1.hind,substr(sdates.seq[1],5,6),substr(sdates.seq[1],7,8)), + nleadtime = 1, leadtimemin = 1, leadtimemax = 1, output = 'lonlat', nprocs=1) + +if(rean.int) data.rean <- Load(var = var.name, exp = list(list(path=forecast.dir)), + obs = NULL, sdates=paste0(yr1.hind,substr(sdates.seq[1],5,6),substr(sdates.seq[1],7,8)), + nleadtime = 1, leadtimemin = 1, leadtimemax = 1, output = 'lonlat', nprocs=1) + +lons <- data.rean$lon +lats <- data.rean$lat +n.lon <- length(lons) +n.lat <- length(lats) +my.grid<-paste0('r',n.lon,'x',n.lat) + +#file_path='/home/Earth/ngonzal2/R/ConfFiles/IC3.conf' # Load() file path +#file_path <- '/home/Earth/ncortesi/Downloads/RESILIENCE/IC3.conf' + +######################################################################################### +# Calculate and save up to all the chosen Skill Scores for a selected period # +######################################################################################### +# if we run the script from the terminal with an argument, it only computes the month we specify in the second argument and save the results for that month +if(chunk) veri.month <- chunk.month + +for(month in veri.month){ + #month=1 # for the debug + + # select the startdates (weeks) you want to compute: + my.startdates <- which(as.integer(substr(sdates.seq,5,6)) == month) #startdates.monthly[[month]] #c(1:5) + + startdate.name <- my.month[month] # name given to the period of the selected startdates # i.e:"January" + n.startdates <- length(my.startdates) # number of startdates in the selected month + n.yrs <- length(my.startdates)*n.yrs.hind # number of total years for the selected month + + print(paste0("Computing Skill Scores for ",startdate.name)) + + # Merge all selected startdates: + hind.dim <- c(n.members, n.yrs.hind*n.startdates, n.leadtimes, n.lat, n.lon) # dimensions of the hindcast array + + my.FairRpss <- my.FairCrpss <- my.EnsCorr <- array(NA,c(n.leadtimes, n.lat,n.lon)) + #my.FairRpssConf <- my.FairCrpssConf <- array(NA, c(length(conf.level), n.leadtimes, n.lat, n.lon)) + my.FairRpss.pvalue <- my.FairCrpss.pvalue <- my.EnsCorr.pvalue <- array(NA, c(n.leadtimes, n.lat, n.lon)) + + anom.rean.chunk <- anom.hindcast.mean.chunk <- array(NA, c(n.startdates*n.yrs.hind, n.leadtimes, n.lat, n.lon)) + anom.rean.clim <- array(NA, c(n.startdates*n.yrs.hind, n.startdates*n.yrs.hind, n.leadtimes, n.lat, n.lon)) + anom.hindcast.chunk <- array(NA, c(n.members, n.startdates*n.yrs.hind, n.leadtimes, n.lat, n.lon)) + #my.interv<-(1 + sub.size*(s-1)):((sub.size*s) + add.last) # longitude interval where to load data + + for(startdate in my.startdates){ + pos.startdate <- which(startdate == my.startdates) # count of the number of stardates already loaded+1 + my.time.interv <- (1+(pos.startdate-1)*n.yrs.hind):(pos.startdate*n.yrs.hind) # time interval where to load data + + print(paste0('Computing reanalysis anomalies for startdate ', which(startdate==my.startdates),'/',length(my.startdates))) + + # load weekly var rean data in: /esnas/reconstructions/ecmwf/eraint/weekly_mean + if(!rean.int) data.rean <- Load(var = var.name, exp = list(list(path=rean.dir)), + obs = NULL, sdates=paste0(yr1.hind:yr2.hind,substr(sdates.seq[startdate],5,6),substr(sdates.seq[startdate],7,8)), + nleadtime = n.leadtimes, leadtimemin = 1, leadtimemax = n.leadtimes, output = 'lonlat', nprocs=nloadcores) + + if(rean.int) data.rean <- Load(var = var.name, exp = list(list(path=rean.dir)), + obs = NULL, sdates=paste0(yr1.hind:yr2.hind,substr(sdates.seq[startdate],5,6),substr(sdates.seq[startdate],7,8)), + nleadtime = n.leadtimes, leadtimemin = 1, leadtimemax = n.leadtimes, output = 'lonlat', grid=my.grid, method='bilinear', nprocs=nloadcores) + + # dim(data.rean$mod) # [1,1,20,4,320,640] + + # Mean of the 20 years (for each point and leadtime) + clim.rean <- apply(data.rean$mod,c(1,2,4,5,6),mean)[1,1,,,] # average of all years for each leadtime and pixel + clim.rean <- InsertDim(InsertDim(InsertDim(clim.rean,1,n.yrs.hind),1,1),1,1) # repeat the same values times to be able to calculate the anomalies #[1,1,20,4,320,640] + anom.rean <- data.rean$mod - clim.rean + #anom.rean<-InsertDim(anom.rean[1,1,,,,],1,1) # [1,20,4,320,640] + + rm(data.rean, clim.rean) + gc() + + #load(paste0(workdir,'/Data_',var.name,'/anom_rean_startdate',startdate,'.RData')) + #anom.rean <- drop(anom.rean) + anom.rean.chunk[my.time.interv,,,] <- anom.rean + + # Load hindcast data: + print(paste0('Computing forecast anomalies for startdate ', which(startdate == my.startdates),'/',length(my.startdates))) + + # Load weekly var subseasonal data in: /esnas/exp/ECMWF/monthly/ensforhc/weekly_mean and interpolate them to the same ERA-Interim resolution (512x256) + if(rean.int) data.hindcast <- Load(var = var.name, exp = list(list(path=forecast.dir)), + obs = NULL, sdates = paste0(yr1.hind:yr2.hind, substr(sdates.seq[startdate],5,6), substr(sdates.seq[startdate],7,8)), + nleadtime = n.leadtimes, leadtimemin = 1, leadtimemax = n.leadtimes, output = 'lonlat', nprocs=nloadcores) + if(!rean.int) data.hindcast <- Load(var = var.name, exp = list(list(path=forecast.dir)), + obs = NULL, sdates = paste0(yr1.hind:yr2.hind, substr(sdates.seq[startdate],5,6), substr(sdates.seq[startdate],7,8)), + nleadtime = n.leadtimes, leadtimemin = 1, leadtimemax = n.leadtimes, output = 'lonlat', grid=my.grid, method='bilinear', nprocs=nloadcores) + + # dim(data.hindcast$mod) # [1,4,20,4,320,640] + + # Mean of the 4 members of the Hindcasts and of the 20 years (for each point and leadtime); + clim.hindcast <- apply(data.hindcast$mod,c(1,4,5,6),mean)[1,,,] # average of all years and members for each leadtime and pixel + clim.hindcast <- InsertDim(InsertDim(InsertDim(clim.hindcast,1,n.yrs.hind),1,n.members),1,1) # repeat the same values and times to calc. anomalies + anom.hindcast <- data.hindcast$mod - clim.hindcast + + rm(data.hindcast,clim.hindcast) + gc() + + # average the anomalies over the 4 hindcast members to compute the ensemble mean [1,20,4,320,640] + anom.hindcast.mean<-apply(anom.hindcast,c(1,3,4,5,6),mean) + + gc() + + #load(paste0(workdir,'/Data_',var.name,'/anom_hindcast_startdate',startdate,'.RData')) + anom.hindcast <- drop(anom.hindcast) + anom.hindcast.chunk[,my.time.interv,,,] <- anom.hindcast + + rm(anom.hindcast, anom.rean) + gc() + + anom.hindcast.mean <- drop(anom.hindcast.mean) + anom.hindcast.mean.chunk[my.time.interv,,,] <- anom.hindcast.mean + + rm(anom.hindcast.mean) + gc() + + } # end load data and conversion to anomalies + + + if(any(my.score.name == "FairRpss")){ + #my.FairRpss.sub <- veriApply("FairRpss", fcst=anom.hindcast.sub, obs=anom.rean.sub, prob=my.prob, tdim=2, ensdim=1, parallel=FALSE, ncpus=n.cpus) + my.FairRps.chunk <- veriApply("FairRps", fcst=anom.hindcast.chunk, obs=anom.rean.chunk, prob=my.prob, tdim=2, ensdim=1, parallel=FALSE, ncpus=1) + #my.FairRps.chunk.double <- veriApply("FairRps", fcst=anom.hindcast.chunk.double, obs=anom.rean.chunk.double, prob=my.prob, tdim=2, ensdim=1, parallel=FALSE, ncpus=n.cpus) + my.FairRps.chunk.sum <- apply(my.FairRps.chunk, c(2,3,4), sum) + + # the prob.for the climatological ensemble can be set without using convert2prob and they are the same for all points and leadtimes (33% for each tercile): + ens.ref <- array(33,c(n.startdates*n.yrs.hind,3)) + + # the observed probabilities vary from point to point and for each leadtime: [obs]= [3*n.years*n.startdates, n.leadtimes, lat, lon] + obs <- apply(anom.rean.chunk, c(2,3,4), function(x){convert2prob(x, prob <- my.prob)}) + + # reshape obs to be able to apply EnsRps(ens, obs) below: + obs2 <- InsertDim(obs,1,3) + obs2[2,1:(n.startdates*n.yrs.hind),,,] <- obs2[1,(n.startdates*n.yrs.hind + 1:(n.startdates*n.yrs.hind)),,,] + obs2[3,1:(n.startdates*n.yrs.hind),,,] <- obs2[1,(2*n.startdates*n.yrs.hind + 1:(n.startdates*n.yrs.hind)),,,] + obs3 <- obs2[,1:(n.startdates*n.yrs.hind),,,,drop=F] # drop=F is used not to loose the dimension(s) with only 1 element + + my.Rps.clim.chunk <- apply(obs3,c(3,4,5), function(x){ EnsRps(ens.ref,t(x), format="members") }) + my.Rps.clim.chunk.sum <- apply(my.Rps.clim.chunk, c(2,3,4), sum) + + my.FairRpss.chunk <- 1-(my.FairRps.chunk.sum / my.Rps.clim.chunk.sum) + + rm(ens.ref, obs, obs2, obs3, my.FairRps.chunk.sum, my.Rps.clim.chunk.sum) + gc() + + ########################### bootstrapping ############################## + my.FairRpssBoot <- array(NA, c(n.boot, n.leadtimes, n.lat, n.lon)) # define it only now because for n.boot=1000, it is a 2 GB object!!! + + for(b in 1:n.boot){ + cat(paste0("Bootstrapping FairRpss: resampling n. ",b,"/",n.boot,"\n")) + yrs.sampled <- sample(1:n.yrs, replace=TRUE) + + #for(y in 1:n.yrs) anom.rean.chunk.sampled[y,,,] <- anom.rean.chunk[yrs.sampled[y],,,] + #for(y in 1:n.yrs) anom.hindcast.chunk.sampled[,y,,,] <- anom.hindcast.chunk[,yrs.sampled[y],,,] + + my.FairRps.sampled <- my.FairRps.chunk + my.Rps.clim.sampled <- my.Rps.clim.chunk + + for(y in 1:n.yrs) my.FairRps.sampled[y,,,] <- my.FairRps.chunk[yrs.sampled[y],,,] + for(y in 1:n.yrs) my.Rps.clim.sampled[y,,,] <- my.Rps.clim.chunk[yrs.sampled[y],,,] + + #my.FairRps.chunk <- veriApply("FairRps", fcst=anom.hindcast.chunk.sampled, obs=anom.rean.chunk.sampled, prob=my.prob, tdim=2, ensdim=1, parallel=FALSE, ncpus=n.cpus) + #ens <- array(33,c(n.startdates*n.yrs.hind,3)) + #obs <- apply(anom.rean.chunk.sampled, c(2,3,4), function(x){convert2prob(x, prob <- my.prob)}) + #obs2 <- InsertDim(obs,1,3) + #obs2[2,1:(n.startdates*n.yrs.hind),,,] <- obs2[1,(n.startdates*n.yrs.hind + 1:(n.startdates*n.yrs.hind)),,,] + #obs2[3,1:(n.startdates*n.yrs.hind),,,] <- obs2[1,(2*n.startdates*n.yrs.hind + 1:(n.startdates*n.yrs.hind)),,,] + #obs3 <- obs2[,1:(n.startdates*n.yrs.hind),,,,drop=F] + #my.Rps.clim.chunk <- apply(obs3,c(3,4,5), function(x){ EnsRps(ens,t(x),format="members") }) + + my.FairRps.sampled.sum <- apply(my.FairRps.sampled,c(2,3,4), sum) + my.Rps.clim.sampled.sum <- apply(my.Rps.clim.sampled,c(2,3,4), sum) + my.FairRpss.sampled <- 1-(my.FairRps.sampled.sum/my.Rps.clim.sampled.sum) + + my.FairRpssBoot[b,,,] <- my.FairRpss.sampled + + gc() + } + + rm(my.FairRps.sampled, my.Rps.clim.sampled, my.FairRps.sampled.sum, my.Rps.clim.sampled.sum, my.FairRpss.sampled, yrs.sampled) + gc() + + my.FairRpssBootMean <- apply(my.FairRpssBoot,c(2,3,4),mean,na.rm=T) + + my.test.value <- 0 # my.FairRpss.chunk # test if the bootstrapped mean value is significantly different from this value or not + + my.FairRpss.diff1 <- my.FairRpssBootMean + abs(my.test.value - my.FairRpssBootMean) + my.FairRpss.diff2 <- my.FairRpssBootMean - abs(my.test.value - my.FairRpssBootMean) + + for(i in 1:dim(my.FairRpssBoot)[2]){ + for(j in 1:dim(my.FairRpssBoot)[3]){ + for(k in 1:dim(my.FairRpssBoot)[4]){ + extr1 <- which(my.FairRpssBoot[,i,j,k] > my.FairRpss.diff1[i,j,k]) + extr2 <- which(my.FairRpssBoot[,i,j,k] < my.FairRpss.diff2[i,j,k]) + if(my.test.value != 0) my.FairRpss.pvalue[i,j,k] <- (length(extr1) + length(extr2)) / n.boot # two-tailed test + if(my.test.value == 0) my.FairRpss.pvalue[i,j,k] <- ifelse(my.FairRpssBootMean[i,j,k] > 0, length(extr2)/n.boot, length(extr1)/n.boot) + + } + } + } + + rm(my.FairRpssBoot, my.FairRpssBootMean, my.FairRpss.diff1, my.FairRpss.diff2, extr1, extr2) + gc() + + #which(my.FairRpss.pvalue > 0.05) # points where the FairRPSS is significant (i.e: whose FairRPSS is not significantly different from the population mean) + + # calculate the percentiles of the skill score in case you want to compute the confidence interval of the 95%: + #my.FairRpssConf[,,,chunk$int[[c]]] <- apply(my.FairRpssBoot[,,,chunk$int[[c]]], c(2,3,4), function(x) quantile(x, probs=conf.level, type=8)) + + } # close if on FairRpss + + if(any(my.score.name=="FairCrpss")){ + + #my.FairCrpss.chunk <- veriApply("FairCrpss", fcst=anom.hindcast.chunk, obs=anom.rean.chunk, tdim=2, ensdim=1, parallel=FALSE, ncpus=1) + + #Numerador: + my.FairCrps.chunk <- veriApply("FairCrps", fcst=anom.hindcast.chunk, obs=anom.rean.chunk, tdim=2, ensdim=1, parallel=FALSE, ncpus=1) + my.FairCrps.chunk.sum <- apply(my.FairCrps.chunk,c(2,3,4), sum) + + # Denominador: + my.Crps.clim.chunk <- array(NA, c(n.startdates*n.yrs.hind, n.leadtimes, n.lat, n.lon)) + anom.hind.clim <- array(NA, c(n.members, n.startdates*n.yrs.hind, n.leadtimes, n.lat, n.lon)) + + for(i in 1:n.leadtimes){ + print(paste0("leadtime= ",i)) + for(j in 1:n.lat){ + for(k in 1:n.lon){ + # extract a random sample with no replacement of the hindcast: + #anom.hind.clim[,,i,j,k] <- array(sample(anom.hindcast.chunk[,,i,j,k],n.startdates*n.yrs.hind*n.members),c(n.startdates*n.yrs.hind,n.members)) + anom.rean.clim[,,i,j,k] <- t(array(anom.rean.chunk[,i,j,k],c(n.startdates*n.yrs.hind, n.startdates*n.yrs.hind))) + my.Crps.clim.chunk[,i,j,k] <- EnsCrps((anom.rean.clim[,,i,j,k]), anom.rean.chunk[,i,j,k]) + } + } + } + + ### anom.hind.clim <- apply(anom.hindcast.chunk,c(3,4,5), sample) + ### anom.all.chunk <- abind(anom.hind.clim,anom.rean.chunk, along=1) # merge exp and obs together to use apply: + ### my.Crps.clim.chunk <- apply(anom.all.chunk, c(3,4,5), function(x){ EnsCrps(t(x[1:n.members,]), x[n.members+1,])}) + + my.Crps.clim.chunk.sum <- apply(my.Crps.clim.chunk,c(2,3,4), sum) + + ## Skill Score: + my.FairCrpss.chunk <- 1 - (my.FairCrps.chunk.sum / my.Crps.clim.chunk.sum) + + #rm(anom.all.chunk, my.FairCrps.chunk.sum, my.Crps.clim.chunk.sum) + + rm(my.FairCrps.chunk.sum, my.Crps.clim.chunk.sum) + gc() + + ################################# bootstrapping ########################################### + my.FairCrpssBoot <- array(NA, c(n.boot, n.leadtimes, n.lat, n.lon)) # define it only now, because for n.boot = 1000, it is a 2 GB object!!! + + for(b in 1:n.boot){ + cat(paste0("Bootstrapping FairCrpss: resampling n. ",b,"/",n.boot,"\n")) + + yrs.sampled <- sample(1:n.yrs, replace=TRUE) + + my.FairCrps.sampled <- my.FairCrps.chunk + my.Crps.clim.sampled <- my.Crps.clim.chunk + + for(y in 1:n.yrs) my.FairCrps.sampled[y,,,] <- my.FairCrps.chunk[yrs.sampled[y],,,] + for(y in 1:n.yrs) my.Crps.clim.sampled[y,,,] <- my.Crps.clim.chunk[yrs.sampled[y],,,] + + my.FairCrps.sampled.sum <- apply(my.FairCrps.sampled,c(2,3,4), sum) + my.Crps.clim.sampled.sum <- apply(my.Crps.clim.sampled,c(2,3,4), sum) + + my.FairCrpss.sampled <- 1-(my.FairCrps.sampled.sum / my.Crps.clim.sampled.sum) + + my.FairCrpssBoot[b,,,] <- my.FairCrpss.sampled + + gc() + } + + rm(my.FairCrps.sampled, my.Crps.clim.sampled, my.FairCrpss.sampled , yrs.sampled) + gc() + + my.FairCrpssBootMean <- apply(my.FairCrpssBoot,c(2,3,4),mean,na.rm=T) + + my.test.value <- 0 # my.FairCrpss.chunk # test if the bootstrapped mean value is significantly different from this value or not + + my.FairCrpss.diff1 <- my.FairCrpssBootMean + abs(my.test.value - my.FairCrpssBootMean) + my.FairCrpss.diff2 <- my.FairCrpssBootMean - abs(my.test.value - my.FairCrpssBootMean) + + for(i in 1:dim(my.FairCrpssBoot)[2]){ + for(j in 1:dim(my.FairCrpssBoot)[3]){ + for(k in 1:dim(my.FairCrpssBoot)[4]){ + extr1 <- which(my.FairCrpssBoot[,i,j,k] > my.FairCrpss.diff1[i,j,k]) + extr2 <- which(my.FairCrpssBoot[,i,j,k] < my.FairCrpss.diff2[i,j,k]) + if(my.test.value != 0) my.FairCrpss.pvalue[i,j,k] <- (length(extr1) + length(extr2)) / n.boot + if(my.test.value == 0) my.FairCrpss.pvalue[i,j,k] <- ifelse(my.FairCrpssBootMean[i,j,k] > 0, length(extr2)/n.boot, length(extr1)/n.boot) + + } + } + } + + rm(my.FairCrpssBoot, my.FairCrpssBootMean, my.FairCrpss.diff1, my.FairCrpss.diff2, extr1, extr2) + ###which(my.FairCrpss.pvalue > 0.05) # points where thw FairCRPSS is significant (i.e: whose FairRPSS not significantly different from the population mean) + + # calculate the percentiles of the skill score if you want to measure hte confidence interval: + #my.FairCrpssConf[,,,chunk$int[[c]]] <- apply(my.FairCrpssBoot[,,,chunk$int[[c]]], c(2,3,4), function(x) quantile(x, probs=conf.level, type=8)) + + } # close if on FairCrpss + + if(any(my.score.name=="EnsCorr")){ + # ensemble mean correlation for the selected startdates (first dim, 'week') and all leadtimes (second dim): + my.EnsCorr.chunk <- my.PValue.chunk <- array(NA, c(n.leadtimes, n.lat, n.lon)) + + for(i in 1:n.leadtimes){ + for(j in 1:n.lat){ + for(k in 1:n.lon){ + my.EnsCorr.chunk[i,j,k] <- cor(anom.hindcast.mean.chunk[,i,j,k],anom.rean.chunk[,i,j,k], use="complete.obs") + # option 'na.method' is chosen from the following list: c("all.obs", "complete.obs", "pairwise.complete.obs", "everything", "na.or.complete") + + my.EnsCorr.pvalue[i,j,k] <- cor.test(anom.hindcast.mean.chunk[,i,j,k],anom.rean.chunk[,i,j,k], use="complete.obs")$p.value + + } + } + } + + #my.EnsCorr[,,chunk$int[[c]]] <- my.EnsCorr.chunk + #my.PValue[,,chunk$int[[c]]] <- my.PValue.chunk + + #rm(anom.hindcast.chunk,anom.hindcast.mean.chunk,my.EnsCorr.chunk,my.PValue.chunk) + } + + #rm(anom.rean.chunk) + gc() + #mem() + + # the Reliability diagram cannot be computed splitting the data in chunk, so it is computed only if the script is run from the R interface or with no arguments: + + if(any(my.score.name=="RelDiagr")){ # in this case the computation cannot be split in groups of grid points, but can be split for leadtime instead: + obs.chunk.prob <- ens.chunk.prob <- list() + + int1 <- 1:(n.startdates*n.yrs.hind) # time interval of the data of the first tercile + int2 <- (1+(n.startdates*n.yrs.hind)):(2*n.startdates*n.yrs.hind) # time interval of the data of the second tercile + int3 <- (1+(2*n.startdates*n.yrs.hind)):(3*n.startdates*n.yrs.hind) # time interval of the data of the third tercile + + for(lead in 1:n.leadtimes){ + cat(paste0('leadtime n.: ',lead,'/',n.leadtimes,'\n')) + + anom.hindcast.lead <- anom.hindcast.chunk[,,lead,,] # extract only the lead time we are interested in + anom.hindcast.chunk.perm <- aperm(anom.hindcast.lead,c(2,1,3,4)) # swap the first two dimensions to put the years as first dimension (needed for convert2prob) + gc() + + cat('Computing the Reliability Diagram. Please wait... \n') + ens.chunk <- apply(anom.hindcast.chunk.perm, c(3,4), convert2prob, prob = my.prob) # its dimens. are: [n.startdates*n.yrs.hind*n.forecast.categories, n.lat, n.lon] + anom.rean.lead <- anom.rean.chunk[,lead,,] + + cat('Computing the Reliability Diagram. Please wait...... \n') + + obs.chunk <- apply(anom.rean.lead,c(2,3), convert2prob, prob = my.prob) # the first n.members*n.yrs.hind elements belong to the first tercile, and so on. + ens.chunk.prob[[lead]] <- apply(ens.chunk, c(2,3), function(x) count2prob(matrix(x, n.startdates*n.yrs.hind, n.categ), type=4)) # type=4 is the only method with sum(prob)=1 !! + cat('Computing the Reliability Diagram. Please wait......... \n') + + obs.chunk.prob[[lead]] <- apply(obs.chunk, c(2,3), function(x) count2prob(matrix(x, n.startdates*n.yrs.hind, n.categ), type=4)) + + #print(my.RelDiagr) # for debugging + rm(ens.chunk, obs.chunk) + gc() + + } # next lead + + } # close if on RelDiagr + + print("Saving results...") + + # save the results for the chunk: + save(my.FairRpss.chunk, my.FairRpss.pvalue, my.FairCrpss.chunk, my.FairCrpss.pvalue, my.EnsCorr.chunk, my.EnsCorr.pvalue, obs.chunk.prob, ens.chunk.prob, int1, int2, int3, work.dir, rean.dir, rean.name, forecast.year, forecast.dir, cfs.name, var.name, var.name.map, mes, day, yr1.hind, yr2.hind, leadtime.week, n.members, my.prob, n.boot, n.categ, sdates.seq, n.leadtimes, n.yrs.hind, lons, lats, n.lon, n.lat, my.grid, veri.month, my.startdates, startdate.name, n.startdates, n.yrs, file=paste0(work.dir,'/',var.name,'_',my.month[month],'.RData')) + + # if you only compute the RelDiagr, save this: + #save(obs.chunk.prob, ens.chunk.prob, int1, int2, int3, work.dir, rean.dir, rean.name, forecast.year, forecast.dir, cfs.name, var.name, var.name.map, mes, day, yr1.hind, yr2.hind, leadtime.week, n.members, my.prob, n.boot, n.categ, sdates.seq, n.leadtimes, n.yrs.hind, lons, lats, n.lon, n.lat, my.grid, veri.month, my.startdates, startdate.name, n.startdates, n.yrs, file=paste0(work.dir,'/',var.name,'_',my.month[month],'.RData')) + + #if(any(my.score.name=="FairRpss")) save(my.FairRpss.chunk, file=paste0(work.dir,'/',var.name,'/FairRpss_',startdate.name,'.RData')) + + print("Saving results...done") + +} # next m (month) + + +print("Finished!") diff --git a/old/SkillScores_v2.R b/old/SkillScores_v2.R new file mode 100644 index 0000000000000000000000000000000000000000..c9bfd8052fd297e4e798c2ea9831a5a184501d66 --- /dev/null +++ b/old/SkillScores_v2.R @@ -0,0 +1,1187 @@ + +slurm=TRUE # set it to TRUE only if you run the script wieh slurm + +#load libraries and functions: +library(s2dverification) +library(SpecsVerification) +library(easyVerification) +library(jpeg) +library(abind) +#library(ff) +#library(ffbase) +source('/scratch/Earth/ncortesi/RESILIENCE/Rfunctions.R') + +######################################################################################### +# User's settings # +######################################################################################### + +Mare=TRUE # if MareNostrum is TRUE, the script must be run on MareNostrum; if it is FALSE, on our workstations. + +workdir="/scratch/Earth/ncortesi/RESILIENCE" # working dir where to put the output maps and files 8in the /Data and /Maps subdirs +workdirMare="/gpfs/projects/bootstrapping" + +cfs.name='ECMWF' # name of the climate forecast system (CFS) used for monthly predictions (to be used in map titles) +var.name='sfcWind' # forecast variable. It is the name used inside the netCDF files and as suffix in map filenames, i.e: 'sfcWind' or 'psl' +var.name.map='10m Wind Speed' # forecast variable to verify (name used in the map titles), i.e: '10m Wind Speed' or 'SLP' + +yr1=2014 # starting year of the weekly sequence of the forecasts +mes=1 # starting forecast month (usually january) +day=2 # starting forecast day + +yr1.hind=1994 #1994 # first hindcast year +yr2.hind=2013 #2013 # last hindcast year (usually the forecast year -1) + +leadtime.week<-c('5-11','12-18','19-25','26-32') # which leadtimes are available in the weekly dataset +n.members<-4 # number of hindcast members + +my.score.name=c('FairRpss','FairCrpss') #c('EnsCorr','FairRpss','FairCrpss','RelDiagr') # choose one or more skills scores to compute between EnsCorr, FairRPSS, FairCRPSS and/or RelDiagr + +veri.month=1 #1:12 # select the month(s) you want to compute the chosen skill scores + +my.pvalue=0.05 # in case of computing the EnsCorr, set the p_value level to show in the ensemble mean correlation maps +my.prob=c(1/3,2/3) # quantiles used for FairRPSS and the RelDiagr (usually they are the terciles) +conf.level=c(0.05, 0.95)# percentiles employed for the calculation of the confidence level for the Rpss and Crpss (can be more than 2 if needed) +int.lat <- NULL #1:160 # latitude position of grid points selected for the Reliability Diagram (if you want to subset a region; if not, put NULL) +int.lon <- NULL #1:320 # longitude position of grid points selected for the Reliability Diagram (if you want to subset a region; if not, put NULL) + +boot=TRUE # in case of computing the FairRpss and/or the FairCrpss, you can choose to do a bootstrap to evaluate the uncertainty of the skill scores +n.boot=20 # number of resamples considered in the bootstrapping + +n.cpus=8 # number of cpu used by parApply for the calculation of skill scores (our desktop has 4 cpus but 8 virtual ones, so it's better to specify 8) + # for parallelizing the apply function for the EnsCorr computation, no cores are selected because the input array is big (~500 GB) + # when this script is run with slurm (in amdahl or in moore) with sbatch or in the interactive mode, it should uses exactly 8 physical cores. +max.n.el=10000000 # maximum number of elements in an array (each element of type 'numeric' occupies 8 byte,so max.n.el*8/1000000 is the maximum size in MB you want to use) + # 10000000 is the optimal value for 8 GB machines + +# coordinates of a chosen point in the world to plot the time series over the 52 maps (they must be the same values in objects la y lo) +my.lat=55.3197120 +my.lon=0.5625 + +# You might also want to run the job on another workstation. This is the possible list of available workstation: (please check before that nobody is using it!!!) +# 300 Vero +# 305 Workstation in front of Albert (usually free) +# 310 Dani +# 311 Nicola +# 312 Doo Young + +###################################### Derived variables ################################## + +#source(paste0(workdir,'/ColorBarV.R')) # Color scale used for maps +n.categ<-1+length(my.prob) # number of forecasting categories (usually 3 if terciles are used) +col<-as.character(read.csv("/home/Earth/vtorralb/rgbhex.csv",header=F)[,1]) # blue-yellow-red colors + +sdates.seq<-weekly.seq(yr1,mes,day) # load the sequence of dates corresponding to all the thursday of the year +n.leadtimes<-length(leadtime.week) +n.yrs.hind<-yr2.hind-yr1.hind+1 +my.month.short<-substr(my.month,1,3) + +# Monthly Startdates for 2014 reforecasts: (in future you can modify it to work for a generic year) +startdates.monthly<-list() +startdates.monthly[[1]]<-1:5 +startdates.monthly[[2]]<-6:9 +startdates.monthly[[3]]<-10:13 +startdates.monthly[[4]]<-14:17 +startdates.monthly[[5]]<-18:22 +startdates.monthly[[6]]<-23:26 +startdates.monthly[[7]]<-27:31 +startdates.monthly[[8]]<-32:35 +startdates.monthly[[9]]<-36:39 +startdates.monthly[[10]]<-40:44 +startdates.monthly[[11]]<-45:48 +startdates.monthly[[12]]<-49:52 + +## extract geographic coordinates; do only ONCE for each preducton system and then save the lat/lon in a coordinates.RData and comment these rows to use function load() below: +#data.hindcast <- Load(var='sfcWind', exp = 'EnsEcmwfWeekHind', +# obs = NULL, sdates=paste0(yr1.hind:yr2.hind,substr(sdates.seq[startdate],5,6),substr(sdates.seq[startdate],7,8)), +# nleadtime = 1, leadtimemin = 1, leadtimemax = 1, output = 'lonlat', configfile = file_path, method='distance-weighted' ) + +#lats<-data.hindcast$lat +#lons<-data.hindcast$lon +#save(lats,lons,file=paste0(workdir,'/coordinates.RData')) +#rm(data.hindcast);gc() + +# format geographic coordinates to use with PlotEquiMap: +load(paste0(workdir,'/coordinates.RData')) +n.lon <- length(lons) +n.lat <- length(lats) +n.lonr <- n.lon-ceiling(length(lons[lons<180 & lons > 0])) +la<-rev(lats) +lo<-c(lons[c((n.lonr+1):n.lon)]-360,lons[1:n.lonr]) + + +######################################################################################### +# Calculate and save up to all the chosen Skill Scores for a chosen period # +######################################################################################### +# +# Remember that before computing the skill scores, you have to create and save the anomalies running once the preformatting part at the end of this script. +# + +for(month in veri.month){ + #month=1 # for the debug + my.startdates<-startdates.monthly[[month]] #c(1:5) # select the startdates (weeks) you want to compute + startdate.name=my.month[month] # name given to the period of the selected startdates # i.e:"January" + n.startdates<-length(my.startdates) # number of startdates in the selected month + n.yrs <- length(my.startdates)*n.yrs.hind # number of total years for the selected month + print(paste0("Computing Skill Scores for ",startdate.name)) + + # Merge all selected startdates: + hind.dim <- c(n.members, n.yrs.hind*n.startdates, n.leadtimes, n.lat, n.lon) # dimensions of the hindcast array + n.sub <- prod(hind.dim)/max.n.el + n.sub <- ceiling(n.sub) # number of sub-arrays in which to split the hindcast and rean.data + n.lon <- tail(hind.dim,1) # number of longitude elements + sub.size <- floor(n.lon/n.sub) # number of elements in the last dimension (lon) of each subarray rounded to the lower integer + if(sub.size<=1) stop("Subarrays too small. Increase ten times the value of max.n.el") + n.sub <- n.lon %/% sub.size # take only the integer part of the ratio , i.e: it is equal to floor(n.lon/sub.size) + sub.size.last <- n.lon %% n.sub # number of additional elements of the last subarray (if >0) + + my.FairRpss <- my.FairCrpss <- my.EnsCorr <- my.PValue <- array(NA,c(n.leadtimes,n.lat,n.lon)) + if(boot) my.FairRpssBoot <- my.FairCrpssBoot <- array(NA, c(n.boot, n.leadtimes, n.lat, n.lon)) + if(boot) my.FairRpssConf <- my.FairCrpssConf <- array(NA, c(length(conf.level), n.leadtimes, n.lat, n.lon)) + + cat('Subarray n. ') + + for(s in 1:n.sub){ # EnsCorr, FairRpss and FairCrpss calculation: + #s=1 # for the debug + cat(paste0(s,'/',n.sub,' ')) + + if(s==n.sub){ add.last <- sub.size.last } else { add.last <- 0 } # because the last subarray is longer than the others, if sub.size.last>0 + + anom.hindcast.sub<-anom.hindcast.sub.sampled<-array(NA, c(n.members, n.startdates*n.yrs.hind, n.leadtimes, n.lat, sub.size + add.last)) + anom.rean.sub<-anom.hindcast.mean.sub<-anom.rean.sub.sampled<-array(NA, c(n.startdates*n.yrs.hind, n.leadtimes, n.lat, sub.size + add.last)) + my.interv<-(1 + sub.size*(s-1)):((sub.size*s) + add.last) # longitude interval where to load data + + for(startdate in my.startdates){ + pos.startdate<-which(startdate==my.startdates) # count of the number of stardates already loaded+1 + my.time.interv<-(1+(pos.startdate-1)*n.yrs.hind):(pos.startdate*n.yrs.hind) # time interval where to load data + + # Load reanalysis data: + load(paste0(workdir,'/Data_',var.name,'/anom_rean_startdate',startdate,'.RData')) + anom.rean <- drop(anom.rean) + anom.rean.sub[my.time.interv,,,]<-anom.rean[,,,my.interv] + + # Load hindcast data: + if(any(my.score.name=="FairRpss") || any(my.score.name=="FairCrpss" || any(my.score.name=="RelDiagr"))){ + load(paste0(workdir,'/Data_',var.name,'/anom_hindcast_startdate',startdate,'.RData')) + anom.hindcast <- drop(anom.hindcast) + anom.hindcast.sub[,my.time.interv,,,]<-anom.hindcast[,,,,my.interv] + rm(anom.hindcast, anom.rean) + gc() + } + + if(any(my.score.name=="EnsCorr")){ + load(paste0(workdir,'/Data_',var.name,'/anom_hindcast_mean_startdate',startdate,'.RData')) # load ensemble mean hindcast data + anom.hindcast.mean<-drop(anom.hindcast.mean) + anom.hindcast.mean.sub[my.time.interv,,,]<-anom.hindcast.mean[,,,my.interv] + rm(anom.hindcast.mean) + gc() + } + + } + + + if(any(my.score.name=="FairRpss")){ + if(!boot){ + + #my.FairRpss.sub <- veriApply("FairRpss", fcst=anom.hindcast.sub, obs=anom.rean.sub, prob=my.prob, tdim=2, ensdim=1, parallel=FALSE, ncpus=n.cpus) + my.FairRps.sub <- veriApply("FairRps", fcst=anom.hindcast.sub, obs=anom.rean.sub, prob=my.prob, tdim=2, ensdim=1, parallel=FALSE, ncpus=n.cpus) + + # the prob.for the climatological ensemble can be set without using convert2prob and they are the same for all points and leadtimes (33% for each tercile): + ens <- array(33,c(n.startdates*n.yrs.hind,3)) + + # the observed probabilities vary from point to point and for each leadtime: + obs <- apply(anom.rean.sub, c(2,3,4), function(x){convert2prob(x, prob <- my.prob)}) + + # reshape obs to be able to apply EnsRps(ens, obs) below: + obs2 <- InsertDim(obs,1,3) + obs2[2,1:(n.startdates*n.yrs.hind),,,] <- obs2[1,(n.startdates*n.yrs.hind + 1:(n.startdates*n.yrs.hind)),,,] + obs2[3,1:(n.startdates*n.yrs.hind),,,] <- obs2[1,(2*n.startdates*n.yrs.hind + 1:(n.startdates*n.yrs.hind)),,,] + obs3 <- obs2[,1:(n.startdates*n.yrs.hind),,,] + + my.Rps.clim.sub <- apply(obs3,c(3,4,5), function(x){ EnsRps(ens,t(x)) }) + + my.FairRps.sub.sum <- apply(my.FairRps.sub,c(2,3,4), sum) + my.Rps.clim.sub.sum <- apply(my.Rps.clim.sub,c(2,3,4), sum) + + my.FairRpss.sub <- 1-(my.FairRps.sub.sum/my.Rps.clim.sub.sum) + + my.FairRpss[,,my.interv] <- my.FairRpss.sub + rm(my.FairRpss.sub, ens, obs, obs2, obs3, my.FairRps.sub.sum, my.Rps.clim.sub.sum) + gc() + + } else { # bootstrapping: + + for(b in 1:n.boot){ + cat(paste0("resampling n. ",b,"/",n.boot)) + yrs.sampled <- sample(1:n.yrs, replace=TRUE) + + for(y in 1:n.yrs) anom.hindcast.sub.sampled[,y,,,] <- anom.hindcast.sub[,yrs.sampled[y],,,] + for(y in 1:n.yrs) anom.rean.sub.sampled[y,,,] <- anom.rean.sub[yrs.sampled[y],,,] + + my.FairRps.sub <- veriApply("FairRps", fcst=anom.hindcast.sub.sampled, obs=anom.rean.sub.sampled, prob=my.prob, tdim=2, ensdim=1, parallel=FALSE, ncpus=n.cpus) + ens <- array(33,c(n.startdates*n.yrs.hind,3)) + obs <- apply(anom.rean.sub.sampled, c(2,3,4), function(x){convert2prob(x, prob <- my.prob)}) + obs2 <- InsertDim(obs,1,3) + obs2[2,1:(n.startdates*n.yrs.hind),,,] <- obs2[1,(n.startdates*n.yrs.hind + 1:(n.startdates*n.yrs.hind)),,,] + obs2[3,1:(n.startdates*n.yrs.hind),,,] <- obs2[1,(2*n.startdates*n.yrs.hind + 1:(n.startdates*n.yrs.hind)),,,] + obs3 <- obs2[,1:(n.startdates*n.yrs.hind),,,] + my.Rps.clim.sub <- apply(obs3,c(3,4,5), function(x){ EnsRps(ens,t(x)) }) + my.FairRps.sub.sum <- apply(my.FairRps.sub,c(2,3,4), sum) + my.Rps.clim.sub.sum <- apply(my.Rps.clim.sub,c(2,3,4), sum) + my.FairRpss.sub <- 1-(my.FairRps.sub.sum/my.Rps.clim.sub.sum) + + my.FairRpssBoot[b,,,my.interv]<-my.FairRpss.sub + rm(my.FairRpss.sub, ens, obs, obs2, obs3, my.FairRps.sub.sum, my.Rps.clim.sub.sum) + gc() + } + + cat('\n') + + # calculate the percentiles of the skill score: + my.FairRpssConf[,,,my.interv] <- apply(my.FairRpssBoot[,,,my.interv], c(2,3,4), function(x) quantile(x, probs=conf.level, type=8)) + } + } + + if(any(my.score.name=="FairCrpss")){ + if(!boot){ + my.FairCrps.sub <- veriApply("FairCrps", fcst=anom.hindcast.sub, obs=anom.rean.sub, tdim=2, ensdim=1, parallel=FALSE, ncpus=n.cpus) + anom.all.sub <- abind(anom.hindcast.sub,anom.rean.sub, along=1) # merge exp and obs together to use apply: + my.Crps.clim.sub <- apply(anom.all.sub, c(3,4,5), function(x){ EnsCrps(t(x[1:n.members,]), x[n.members+1,])}) + + my.FairCrps.sub.sum <- apply(my.FairCrps.sub,c(2,3,4), sum) + my.Crps.clim.sub.sum <- apply(my.Crps.clim.sub,c(2,3,4), sum) + my.FairCrpss.sub <- 1-(my.FairCrps.sub.sum/my.Crps.clim.sub.sum) + + my.FairCrpss[,,my.interv]<-my.FairCrpss.sub + rm(my.FairCrpss.sub, my.FairCrps.sub.sum, my.Crps.clim.sub.sum) + gc() + + } else { # bootstrapping: + + for(b in 1:n.boot){ + print(paste0("resampling n. ",b,"/",n.boot)) + yrs.sampled <- sample(1:n.yrs, replace=TRUE) + + for(y in 1:n.yrs) anom.hindcast.sub.sampled[,y,,,] <- anom.hindcast.sub[,yrs.sampled[y],,,] + for(y in 1:n.yrs) anom.rean.sub.sampled[y,,,] <- anom.rean.sub[yrs.sampled[y],,,] + + my.FairCrps.sub <- veriApply("FairCrps", fcst=anom.hindcast.sub.sampled, obs=anom.rean.sub.sampled, tdim=2, ensdim=1, parallel=TRUE, ncpus=n.cpus) + anom.all.sub <- abind(anom.hindcast.sub.sampled, anom.rean.sub.sampled, along=1) # merge exp and obs together to use apply: + my.Crps.clim.sub <- apply(anom.all.sub, c(3,4,5), function(x){ EnsCrps(t(x[1:n.members,]), x[n.members+1,])}) + my.FairCrps.sub.sum <- apply(my.FairCrps.sub,c(2,3,4), sum) + my.Crps.clim.sub.sum <- apply(my.Crps.clim.sub,c(2,3,4), sum) + my.FairCrpss.sub <- 1-(my.FairCrps.sub.sum/my.Crps.clim.sub.sum) + + my.FairCrpssBoot[b,,,my.interv] <- my.FairCrpss.sub + rm(my.FairCrpss.sub, my.FairCrps.sub.sum, my.Crps.clim.sub.sum) + gc() + + } + + cat('\n') + + # calculate the percentiles of the skill score: + my.FairCrpssConf[,,,my.interv] <- apply(my.FairCrpssBoot[,,,my.interv], c(2,3,4), function(x) quantile(x, probs=conf.level, type=8)) + } + + } + + if(any(my.score.name=="EnsCorr")){ + # ensemble mean correlation for the selected startdates (first dim, 'week') and all leadtimes (second dim): + my.EnsCorr.sub<-my.PValue.sub<-array(NA,c(n.leadtimes,n.lat,sub.size+add.last)) + + for(i in 1:n.leadtimes){ + for(j in 1:n.lat){ + for(k in 1:(sub.size+add.last)){ + my.EnsCorr.sub[i,j,k]<-cor(anom.hindcast.mean.sub[,i,j,k],anom.rean.sub[,i,j,k], use="complete.obs") + + # option 'na.method' is chosen from the following list: c("all.obs", "complete.obs", "pairwise.complete.obs", "everything", "na.or.complete") + + my.PValue.sub[i,j,k]<-cor.test(anom.hindcast.mean.sub[,i,j,k],anom.rean.sub[,i,j,k], use="complete.obs")$p.value + + } + } + } + + my.EnsCorr[,,my.interv]<-my.EnsCorr.sub + my.PValue[,,my.interv]<-my.PValue.sub + + rm(anom.hindcast.sub,anom.hindcast.mean.sub,my.EnsCorr.sub,my.PValue.sub) + } + + rm(anom.rean.sub) + gc() + #mem() + + } # next s (subarray) + + + if(any(my.score.name=="RelDiagr")){ # in this case the computation cannot be split in groups of grid points, but can be split for leadtime instead: + my.RelDiagr<-list() + + for(lead in 1:n.leadtimes){ + cat(paste0('leadtime n.: ',lead,'/',n.leadtimes)) + anom.rean.sub<-array(NA,c(n.startdates*n.yrs.hind,n.lat,n.lon)) + anom.hindcast.sub<-array(NA,c(n.members,n.startdates*n.yrs.hind,n.lat,n.lon)) + cat(' startdate: ') + i=0 + + for(startdate in my.startdates){ + i=i+1 + cat(paste0(i,'/',length(my.startdates),' ')) + + pos.startdate<-which(startdate==my.startdates) # count of the number of stardates already loaded+1 + my.time.interv<-(1+(pos.startdate-1)*n.yrs.hind):(pos.startdate*n.yrs.hind) # time interval where to load data + + # Load reanalysis data of next startdate: + load(paste0(workdir,'/Data_',var.name,'/anom_rean_startdate',startdate,'.RData')) + anom.rean<-drop(anom.rean) + anom.rean.sub[my.time.interv,,]<-anom.rean[,lead,,] + + # Lead hindcast data of next startdate: + load(paste0(workdir,'/Data_',var.name,'/anom_hindcast_startdate',startdate,'.RData')) # load hindcast data + anom.hindcast<-drop(anom.hindcast) + anom.hindcast.sub[,my.time.interv,,]<-anom.hindcast[,,lead,,] + + rm(anom.rean,anom.hindcast) + } + + + anom.hindcast.sub.perm<-aperm(anom.hindcast.sub,c(2,1,3,4)) # swap the first two dimensions to put the years as first dimension (needed for convert2prob) + gc() + + cat('Computing the Reliability Diagram. Please wait... ') + ens.sub<-apply(anom.hindcast.sub.perm, c(3,4), convert2prob, prob<-my.prob) # its dimensions are: [n.startdates*n.yrs.hind*n.forecast.categories, n.lat, n.lon] + obs.sub<-apply(anom.rean.sub,c(2,3), convert2prob, prob<-my.prob) # the first n.members*n.yrs.hind elements belong to the first tercile, and so on. + + ens.sub.prob<-apply(ens.sub, c(2,3), function(x) count2prob(matrix(x, n.startdates*n.yrs.hind, n.categ), type=4)) # type=4 is the only method with sum(prob)=1 !!! + obs.sub.prob<-apply(obs.sub, c(2,3), function(x) count2prob(matrix(x, n.startdates*n.yrs.hind, n.categ), type=4)) # it not deserves parallelization: it only takes 10s + + if(is.null(int.lat)) int.lat <- 1:n.lat # if there is no region selected, select all the world + if(is.null(int.lon)) int.lon <- 1:n.lon + + int1 <- 1:(n.startdates*n.yrs.hind) # time interval of the data of the first tercile + my.RelDiagr[[lead]]<-ReliabilityDiagram(ens.sub.prob[int1,int.lat,int.lon], obs.sub.prob[int1,int.lat,int.lon], nboot=0, plot=FALSE, plot.refin=F) # 1st tercile + + int2 <- (1+(n.startdates*n.yrs.hind)):(2*n.startdates*n.yrs.hind) # time interval of the data of the second tercile + my.RelDiagr[[n.leadtimes+lead]]<-ReliabilityDiagram(ens.sub.prob[int2,int.lat,int.lon], obs.sub.prob[int2,int.lat,int.lon], nboot=0, plot=FALSE, plot.refin=F) + + int3 <- (1+(2*n.startdates*n.yrs.hind)):(3*n.startdates*n.yrs.hind) # time interval of the data of the third tercile + my.RelDiagr[[2*n.leadtimes+lead]]<-ReliabilityDiagram(ens.sub.prob[int3,int.lat,int.lon], obs.sub.prob[int3,int.lat,int.lon], nboot=0, plot=FALSE, plot.refin=F) + + cat('done\n') + #print(my.RelDiagr) # for debugging + + rm(anom.rean.sub,anom.hindcast.sub,ens.sub,obs.sub,ens.sub.prob,obs.sub.prob) + gc() + + } # next lead + + } # close if on RelDiagr + + if(!boot){ + if(any(my.score.name=="FairRpss")) save(my.FairRpss, file=paste0(workdir,'/Data_',var.name,'/FairRpss_',startdate.name,'.RData')) + if(any(my.score.name=="FairCrpss")) save(my.FairCrpss, file=paste0(workdir,'/Data_',var.name,'/FairCrpss_',startdate.name,'.RData')) + if(any(my.score.name=="EnsCorr")) save(my.EnsCorr, file=paste0(workdir,'/Data_',var.name,'/EnsCorr_',startdate.name,'.RData')) + if(any(my.score.name=="RelDiagr")) save(my.RelDiagr, my.PValue, file=paste0(workdir,'/Data_',var.name,'/RelDiagr_',startdate.name,'.RData')) + } else { # bootstrapping output: + if(any(my.score.name=="FairRpss")) save(my.FairRpssBoot, my.FairRpssConf, file=paste0(workdir,'/Data_',var.name,'/FairRpssBoot_',startdate.name,'.RData')) + if(any(my.score.name=="FairCrpss")) save(my.FairCrpssBoot, my.FairCrpssConf, file=paste0(workdir,'/Data_',var.name,'/FairCrpssBoot_',startdate.name,'.RData')) + } + + +} # next m (month) + + +if(slurm==FALSE){ +########################################################################################### +# Visualize and save the score maps for one score and period # +########################################################################################### + +# run it only after computing and saving all the skill score data: + +my.months=1 #9:12 # choose the month(s) to plot and save + +for(month in my.months){ + #month=2 # for the debug + startdate.name.map<-my.month[month] # i.e:"January" + + for(my.score.name.map in my.score.name){ + #my.score.name.map='EnsCorr' # for the debug + + if(my.score.name.map=="EnsCorr") { load(file=paste0(workdir,'/Data_',var.name,'/EnsCorr_',startdate.name.map,'.RData')); my.score<-my.EnsCorr } + if(my.score.name.map=='FairRpss') { load(file=paste0(workdir,'/Data_',var.name,'/FairRpss_',startdate.name.map,'.RData')); my.score<-my.FairRpss } + if(my.score.name.map=='FairCrpss') { load(file=paste0(workdir,'/Data_',var.name,'/FairCrpss_',startdate.name.map,'.RData')); my.score<-my.FairCrpss } + if(my.score.name.map=="RelDiagr") { load(file=paste0(workdir,'/Data_',var.name,'/RelDiagr_',startdate.name.map,'.RData')); my.score<-my.RelDiagr } + + # old green-red palette: + # brk.rpss<-c(-1,seq(0,1,by=0.05)) + # col.rpss<-c("darkred",colorRampPalette(c("red","white","darkgreen"))(length(brk.rpss)-2)) + # brk.rps<-c(seq(0,1,by=0.1),10) + # col.rps<-c(colorRampPalette(c("darkgreen","white","red"))(length(brk.rps)-2),"darkred") + # if(my.score.name.map==my.Rps | my.score==my.Rps.clim) {my.brk<-brk.rps} else {my.brk<-brk.rpss} + # if(my.score.name.map==my.Rps | my.score==my.Rps.clim) {my.col<-col.rps} else {my.col<-col.rpss} + + brk.rpss<-c(-10,seq(-1,1,by=0.1)) + col.rpss<-colorRampPalette(col)(length(brk.rpss)-1) + + my.brk<-brk.rpss # at present all breaks and colors are the same, so there is no need to differenciate between indexes + my.col<-col.rpss + + for(lead in 1:n.leadtimes){ + + #my.title<-paste0(my.score.name.map,' of ',cfs.name,' + #10m Wind Speed for ',startdate.name.map,'. Forecast time ',leadtime.week[lead],'.') + + if(my.score.name.map!="RelDiagr"){ + + #postscript(file=paste0(workdir,'/Maps_',var.name,'/',my.score.name.map,'_',startdate.name.map,'_Forecast_time_',leadtime.week[lead],'_',var.name,'.ps'), + # paper="special",width=12,height=7,horizontal=F) + + jpeg(file=paste0(workdir,'/Maps_',var.name,'/',my.score.name.map,'_',startdate.name.map,'_Forecast_time_',leadtime.week[lead],'_',var.name,'.jpg'),width=1000,height=600) + + layout(matrix(c(1,2), 1, 2, byrow = TRUE),widths=c(11,1.2)) + par(oma=c(1,1,4,1)) + + # lat values must be in decreasing order from +90 to -90 degrees and long from 0 to 360 degrees: + PlotEquiMap(my.score[lead,,][n.lat:1,c((n.lonr+1):n.lon,1:n.lonr)], lo,la,sizetit=0.8, brks=my.brk, cols=my.col,axelab=F, filled.continents=FALSE, drawleg=F) + my.title<-paste0(my.score.name.map,' of ',cfs.name,'\n ', var.name.map,' for ',startdate.name.map,'. Forecast time ',leadtime.week[lead],'.') + title(my.title,line=0.5,outer=T) + + ColorBar(my.brk, cols=my.col, vert=T, cex=1.4) + + if(my.score.name.map=="EnsCorra"){ #add a small grey point if the corr.is significant: + + # arrange lat/ lon in my.PValue (a second variable in the EnsCorr file) to start from +90 degr. (lat) and from 0 degr (lon): + my.PValue.rev<-my.PValue + my.PValue.rev[lead,,]<-my.PValue[i,n.lat:1,c((n.lonr+1):n.lon,1:n.lonr)] + for (x in 1:n.lon) { + for (y in 1:n.lat) { + if (my.PValue.rev[lead,y, x] == TRUE) { + text(x = lo[x], y = la[y], ".", cex = .2) + } + } + } + } + dev.off() + + } # close if on !RelDiagr + + if(my.score.name.map=="RelDiagr") { + + jpeg(file=paste0(workdir,'/Maps_',var.name,'/RelDiagr_',startdate.name.map,'_Forecast_time_',leadtime.week[lead],'_',var.name,'.jpg'),width=600,height=600) + + my.title<-paste0('Reliability Diagram of ',cfs.name,'\n ',var.name.map,' for ',startdate.name.map,'. Forecast time ',leadtime.week[lead],'.') + + plot(c(0,1),c(0,1),type="l",xlab="Forecast Frequency",ylab="Observed Frequency", col='gray30', main=my.title) + lines(my.score[[lead]]$p.avgs[!is.na(my.score[[lead]]$p.avgs)],my.score[[lead]]$cond.prob[!is.na(my.score[[lead]]$cond.prob)],type="o", pch=16, col="blue") + #lines(my.score[[n.leadtimes+lead]]$p.avgs[!is.na(my.score[[n.leadtimes+lead]]$p.avgs)],my.score[[n.leadtimes+lead]]$cond.prob[!is.na(my.score[[n.leadtimes+lead]]$cond.prob)],type="o",pch=16,col="darkgreen") + lines(my.score[[2*n.leadtimes+lead]]$p.avgs[!is.na(my.score[[2*n.leadtimes+lead]]$p.avgs)], my.score[[2*n.leadtimes+lead]]$cond.prob[!is.na(my.score[[2*n.leadtimes+lead]]$cond.prob)],type="o", pch=16, col="red") + legend("bottomright", legend=c('Lower Tercile','Upper Tercile'), lty=c(1,1), lwd=c(2.5,2.5), col=c('blue','red')) + + dev.off() + } + + } # next lead + + } # next score + +} # next month + + +############################################################################################### +# Composite map of the four skill scores # +############################################################################################### + +# run it only when you have all the 4 skill scores for each month: + +my.months=9:12 # choose the month(s) to plot and save + +for(month in my.months){ + + startdate.name.map<-my.month[month] # i.e:"January" + + #postscript(file=paste0(workdir,'/Maps_',var.name,'/SkillScores_',startdate.name.map,'_',var.name,'.ps'), paper="special",width=12,height=7,horizontal=F) + + jpeg(file=paste0(workdir,'/Maps_',var.name,'/SkillScores_',startdate.name.map,'_',var.name,'.jpg'), width=4000, height=2400, quality=100) + + layout(matrix(1:16, 4, 4, byrow=FALSE), widths=c(1.8,1.8,1.8,1.2), heights=c(1,1,1,1)) + #layout.show(16) + + for(score in 1:4){ + for(lead in 1:n.leadtimes){ + + img<-readJPEG(paste0(workdir,'/Maps_',var.name,'/',my.score.name[score],'_',startdate.name.map,'_Forecast_time_',leadtime.week[lead],'_',var.name,'.jpg')) + par(mar = rep(0, 4), xaxs='i', yaxs='i' ) + plot.new() + rasterImage(img, 0, 0, 1, 1, interpolate=FALSE) + + } # next lead + + } # next score + + dev.off() + +} # next month + + + +# Dani Map script for the small figure: +#/home/Earth/vtorralb/R/scripts/Veronica_2014/TimeSeries/PlotAno_mod_obs.R + + +############################################################################################### +# Preformatting: calculate weekly climatologies and anomalies for each hindcast startdate # +############################################################################################### + +# Run it only once at the beginning: + +file_path='/home/Earth/ngonzal2/R/ConfFiles/IC3.conf' # Load() file path +#file_path <- '/home/Earth/ncortesi/Downloads/RESILIENCE/IC3.conf' + +my.startdates=1:52 # choose a sequence of startdates to save their anomalies + +for(startdate in my.startdates){ # the startdate week to load inside the sdates weekly sequence: + print(paste0('Computing anomalies for startdate ', which(startdate==my.startdates),'/',length(my.startdates))) + + data.hindcast <- Load(var = var.name, exp = 'EnsEcmwfWeekHind', + obs = NULL, sdates = paste0(yr1.hind:yr2.hind,substr(sdates.seq[startdate],5,6),substr(sdates.seq[startdate],7,8)), + nleadtime = n.leadtimes, leadtimemin = 1, leadtimemax = n.leadtimes, output = 'lonlat', configfile = file_path, method='distance-weighted' ) + + # dim(data.hindcast$mod) # [1,4,20,4,320,640] + + # Mean of the 4 members of the Hindcasts and of the 20 years (for each point and leadtime); + clim.hindcast<-apply(data.hindcast$mod,c(1,4,5,6),mean)[1,,,] # average for each leadtime and pixel + clim.hindcast<-InsertDim(InsertDim(InsertDim(clim.hindcast,1,n.yrs.hind),1,n.members),1,1) # repeat the same values and times to calc. anomalies + + anom.hindcast <- data.hindcast$mod - clim.hindcast + + rm(data.hindcast,clim.hindcast) + gc() + + # average the anomalies over the 4 hindcast members [1,20,4,320,640] + anom.hindcast.mean<-apply(anom.hindcast,c(1,3,4,5,6),mean) + + my.grid<-paste0('r',n.lon,'x',n.lat) + data.rean <- Load(var = var.name, exp = 'ERAintEnsWeek', + obs = NULL, sdates=paste0(yr1.hind:yr2.hind,substr(sdates.seq[startdate],5,6),substr(sdates.seq[startdate],7,8)), + nleadtime = n.leadtimes, leadtimemin = 1, leadtimemax = n.leadtimes, output = 'lonlat', configfile = file_path, grid=my.grid, method='distance-weighted') + + # dim(data.rean$mod) # [1,1,20,4,320,640] + + # Mean of the 20 years (for each point and leadtime) + clim.rean<-apply(data.rean$mod,c(1,2,4,5,6),mean)[1,1,,,] # average for each leadtime and pixel + clim.rean<-InsertDim(InsertDim(InsertDim(clim.rean,1,n.yrs.hind),1,1),1,1) # repeat the same values times to be able to calculate the anomalies #[1,1,20,4,320,640] + + anom.rean <- data.rean$mod - clim.rean + + anom.rean<-InsertDim(anom.rean[1,1,,,,],1,1) # [1,20,4,320,640] + + #save(anom.hindcast.mean,anom.rean,file=paste0(workdir,'/Data/anom_for_ACC_startdate',startdate,'.RData')) + + save(anom.hindcast,file=paste0(workdir,'/Data/anom_hindcast_startdate',startdate,'.RData')) + save(anom.hindcast.mean,file=paste0(workdir,'/Data/anom_hindcast_mean_startdate',startdate,'.RData')) + save(anom.rean,file=paste0(workdir,'/Data/anom_rean_startdate',startdate,'.RData')) + save(clim.rean,file=paste0(workdir,'/Data/clim_rean_startdate',startdate,'.RData')) + + rm(data.rean,anom.hindcast,anom.hindcast.mean,clim.rean,anom.rean) + gc() + +} + +############################################################################################### +# Preformatting: calculate weekly climatologies and anomalies for each forecast startdate # +############################################################################################### +# +# finish!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! +# +# the only difference is that anomalies are computed using climatologies from the hincast data, +# and not from the forecast data. + +# Run it only once at the beginning: + +file_path='/home/Earth/ngonzal2/R/ConfFiles/IC3.conf' # Load() file path +#file_path <- '/home/Earth/ncortesi/Downloads/RESILIENCE/IC3.conf' + +my.startdates=1:52 # choose a sequence of forecast startdates to save their anomalies + +for(startdate in my.startdates){ # the startdate week to load inside the sdates weekly sequence: + print(paste0('Computing anomalies for startdate ', which(startdate==my.startdates),'/',length(my.startdates))) + + data.hindcast <- Load(var=var.name, exp = 'EnsEcmwfWeekHind', + obs = NULL, sdates=paste0(yr1.hind:yr2.hind,substr(sdates.seq[startdate],5,6),substr(sdates.seq[startdate],7,8)), + nleadtime = n.leadtimes, leadtimemin = 1, leadtimemax = n.leadtimes, output = 'lonlat', configfile = file_path, method='distance-weighted' ) + + # dim(data.hindcast$mod) # [1,4,20,4,320,640] + + # Mean of the 4 members of the Hindcasts and of the 20 years (for each point and leadtime); + clim.hindcast<-apply(data.hindcast$mod,c(1,4,5,6),mean)[1,,,] # average for each leadtime and pixel + clim.hindcast<-InsertDim(InsertDim(InsertDim(clim.hindcast,1,n.yrs.hind),1,n.members),1,1) # repeat the same values and times to calc. anomalies + + anom.hindcast <- data.hindcast$mod - clim.hindcast + + rm(data.hindcast,clim.hindcast) + gc() + + # average the anomalies over the 4 hindcast members [1,20,4,320,640] + anom.hindcast.mean<-apply(anom.hindcast,c(1,3,4,5,6),mean) + + my.grid<-paste0('r',n.lon,'x',n.lat) + data.rean <- Load(var=var.name, exp = 'ERAintEnsWeek', + obs = NULL, sdates=paste0(yr1.hind:yr2.hind,substr(sdates.seq[startdate],5,6),substr(sdates.seq[startdate],7,8)), + nleadtime = n.leadtimes, leadtimemin = 1, leadtimemax = n.leadtimes, output = 'lonlat', configfile = file_path, grid=my.grid, method='distance-weighted') + # dim(data.rean$mod) # [1,1,20,4,320,640] + + # Mean of the 20 years (for each point and leadtime) + clim.rean<-apply(data.rean$mod,c(1,2,4,5,6),mean)[1,1,,,] # average for each leadtime and pixel + clim.rean<-InsertDim(InsertDim(InsertDim(clim.rean,1,n.yrs.hind),1,1),1,1) # repeat the same values times to be able to calculate the anomalies #[1,1,20,4,320,640] + + anom.rean <- data.rean$mod - clim.rean + + anom.rean<-InsertDim(anom.rean[1,1,,,,],1,1) # [1,20,4,320,640] + + #save(anom.hindcast.mean,anom.rean,file=paste0(workdir,'/Data/anom_for_ACC_startdate',startdate,'.RData')) + + save(anom.hindcast,file=paste0(workdir,'/Data/anom_hindcast_startdate',startdate,'.RData')) + save(anom.hindcast.mean,file=paste0(workdir,'/Data/anom_hindcast_mean_startdate',startdate,'.RData')) + save(anom.rean,file=paste0(workdir,'/Data/anom_rean_startdate',startdate,'.RData')) + save(clim.rean,file=paste0(workdir,'/Data/clim_rean_startdate',startdate,'.RData')) + + rm(data.rean,anom.hindcast,anom.hindcast.mean,clim.rean,anom.rean) + gc() + +} + + + + + + + +################################################################################## +# Toy Model # +################################################################################## + +library(s2dverification) +library(SpecsVerification) +library(easyVerification) + +my.prob=c(1/3,2/3) # quantiles used for FairRPSS and the RelDiagr (usually they are the terciles) + +N=800 # number of years +M=140 # number of members + +for(M in c(2:10,20,51,100,200)){ + +for(N in c(5,10,15,20,25,30,40,50,100,200,500,1000,2000,5000,10000,20000,50000,100000)){ + +anom.rean<-rnorm(N) +anom.hind<-array(rnorm(N*M),c(N,M)) +#quantile(anom.hind,prob=my.prob,type=8) + +obs<-convert2prob(anom.rean,prob<-my.prob) +#mean(obs[,1]) # check if it is equal to 0.333 for K=3 + +ens<-convert2prob(anom.hind,prob<-my.prob) + +# climatological forecast: +anom.hind.clim<-array(sample(anom.hind,N*M),c(N,M)) # extract a random sample with no replacement of the hindcast +ens.clim<-convert2prob(anom.hind.clim,prob<-my.prob) +#mean(ens.clim[,1]) # check if it is equal to M/3 (1.333 for M=4) + +# alternative definition of climatological forecast, based on observations instead (as applied by veriApply via convert2prob when option fcst.ref is missing): +#anom.rean.clim<-ClimEns(anom.rean) # create a climatological ensemble from a vector of observations (anom.rean) with the leave-one-out cross validation +anom.rean.clim<-t(array(anom.rean,c(N,N))) # function used by convert2prob when ref is not specified; it is ~ equal to the above function ClimEns, it only has a column more +ens.clim.rean<-convert2prob(anom.rean.clim,prob<-c(1/3,2/3)) # create a new prob.table based on the climatological ensemble of observations + +p<-count2prob(ens) # we should add the option type=4, in not the sum for each year is not 100%!!! +y<-count2prob(obs) # we should add the option type=4, in not the sum for each year is not 100%!!! +ReliabilityDiagram(p[,1], y[,1], bins=10, nboot=0, plot=TRUE, cons.prob=c(0.05, 0.85),plot.refin=F) + +### computation of verification scores: + +Rps<-round(mean(EnsRps(ens,obs)),4) +FairRps<-round(mean(FairRps(ens,obs)),4) +#Rps.easy<-round(mean(veriApply("EnsRps", fcst=anom.hind, obs=anom.rean, prob=my.prob, tdim=1, ensdim=2)),4) # check once to see if it is the same value of Rps +#FairRps.easy<-round(mean(veriApply("FairRps", fcst=anom.hind, obs=anom.rean, prob=my.prob, tdim=1, ensdim=2)),4) # check once to see if it is the same value of FairRps + +Rps.clim<-round(mean(EnsRps(ens.clim,obs)),4) +FairRps.clim<-round(mean(FairRps(ens.clim,obs)),4) +FairRps.clim.rean<-round(mean(FairRps(ens.clim.rean,obs)),4) + +#FairRps.clim.stable<-0.44444444 +#Rps.clim.easy<-round(mean(veriApply("EnsRps", fcst=anom.hind.clim, obs=anom.rean, prob=my.prob, tdim=1, ensdim=2)),4) # check once to see if it is the same value of Rps.clim +#FairRps.clim.easy<-round(mean(veriApply("FairRps", fcst=anom.hind.clim, obs=anom.rean, prob=my.prob, tdim=1, ensdim=2)),4) # check once to see if it is = to FairRps.clim + +Rpss<-round(1-(Rps/Rps.clim),4) +#Rpss.specs<-round(EnsRpss(ens=ens, ens.ref=ens.clim, obs=obs)$rpss,4) # check it once to see if it is the same as Rpss (yes) +Rpss.easy<-veriApply("EnsRpss", fcst=anom.hind, fcst.ref=anom.hind.clim, obs=anom.rean, prob=my.prob, tdim=1, ensdim=2)$rpss # we provide the clim.forecast; = to Rpss.specs +system.time(Rpss.easy.noclim<-round(veriApply("EnsRpss", fcst=anom.hind, obs=anom.rean, prob=my.prob, tdim=1, ensdim=2, parallel=FALSE, ncpus=5)$rpss,4)) # using their climatological forecast +#cat(Rps.clim.noclim<-Rps/(1-Rpss.easy.noclim)) # sale igual a ~0.45 per ogni hindcast; é diverso sia da Rps.clim che da Rps.clim per N-> +oo (0.555) + +Rpss.easy<-veriApply("EnsRpss", fcst=anom.hind, fcst.ref=anom.rean.clim, obs=anom.rean, prob=my.prob, tdim=1, ensdim=2)$rpss # we provide the clim.forecast; = to Rpss.specs +Rpss.easy.noclim<-veriApply("EnsRpss", fcst=anom.hind, obs=anom.rean, prob=my.prob, tdim=1, ensdim=2)$rpss # we provide the clim.forecast; = to Rpss.specs + +FairRpss<-round(1-(FairRps/FairRps.clim),4) +#FairRpss.specs<-round(FairRpss(ens=ens,ens.ref=ens.clim,obs=obs)$rpss,4) # check it once to see if it is the same as FairRpss (yes) +#FairRpss.easy<-veriApply("FairRpss", fcst=anom.hind, fcst.ref=anom.hind.clim, obs=anom.rean, prob=c(1/3,2/3), tdim=1, ensdim=2)$rps +#FairRpss.easy.noclim<-round(veriApply("FairRpss", fcst=anom.hind, obs=anom.rean, prob=c(1/3,2/3), tdim=1, ensdim=2)$rpss,4) # using their climatological forecast +#cat(FairRps.clim.noclim<-Rps/(1-FairRpss.easy.noclim)) # sale igual a ~0.55; é diverso sia da FairRps.clim che da FairRps.clim per N-> +oo (0.444) + +cat(paste0('n.memb: ' ,M,' n.yrs: ',N,' : ',FairRps,' : ' ,FairRps.clim,' FairRpss: ',FairRpss,'\n')) + +#cat(paste0('n.memb: ' ,M,' n.yrs: ',N,' : ',FairRps,' : ' ,FairRps.clim,' FairRpss: ',FairRpss,'\n')) + +#cat(paste0('n.memb: ' ,M,' n.yrs: ',N,' : ',Rps,' : ' ,Rps.clim,' Rpss: ',Rpss,'\n','n.memb: ' ,M,' n.yrs: ',N,' : ',FairRps,' : ' ,FairRps.clim,' FairRpss: ',FairRpss,'\n')) + +#cat(paste0('n.memb: ' ,M,' n.yrs: ',N,' : ',Rps,' : ' ,Rps.clim,' Rpss: ',Rpss,' Rpss.easy: ',Rpss.easy.noclim,'\n','n.memb: ' ,M,' n.yrs: ',N,' #: ',FairRps,' : ' ,FairRps.clim,' FairRpss: ',FairRpss,' FairRpss.easy: ',FairRpss.easy.noclim)) + +} + +# Crpss: + +#Crps<-round(mean(EnsCrps(ens,obs)),4) +#FairCrps<-round(mean(FairCrps(ens,obs)),4) + +#cat(Crps.clim<-round(mean(EnsCrps(ens.clim,obs)),4)) +#FairCrps.clim<-round(mean(FairCrps(ens.clim,obs)),4) + +#Crpss<-round(1-(Crps/Crps.clim),4) +#Crpss.specs<-round(EnsCrpss(ens=ens, ens.ref=ens.clim, obs=obs)$rpss,4) + +#FairCrpss<-round(1-(FairCrps/FairCrps.clim),4) +#FairCrpss.specs<-round(FairCrpss(ens=ens,ens.ref=ens.clim,obs=obs)$rpss,4) + +} + + +################################################################################## +# Other analysis # +################################################################################## + +my.startdates=1 #c(1:9) # select the startdates (weeks) you want to merge together + +#tm<-toyarray(dims=c(32,64),N=20,nens=4) # in the number of startdates in case of forecasts or the number of years in case of hindcasts +#str(tm) # tm$fcst: [32,64,20,4] tm$obs: [32,64,20] +# +#f.corr <- veriApply("EnsCorr", fcst=tm$fcst, obs=tm$obs) +#f.me <- veriApply('EnsMe', tm$fcst, tm$obs) +#f.crps <- veriApply("FairCrps", fcst=tm$fcst, obs=tm$obs) +#str(f.crps) # [32,64,20] + +#cst <- array(rnorm(prod(4:8)), 4:8) +#obs <- array(rnorm(prod(4:7)), 4:7) +#f.me <- veriApply('EnsMe', fcst=fcst, obs=obs) +#dim(f.me) +#fcst2 <- array(aperm(fcst, c(5,1,4,2,3)), c(8, 4, 7, 5*6)) # very interesting use of aperm not only to swap dimensions, but also to merge two dimensions in the same one! +#obs2 <- array(aperm(obs, c(1,4,2,3)), c(4,7,5*6)) +#f.me2 <- veriApply('EnsMe', fcst=fcst2, obs=obs2, tdim=3, ensdim=1) +#dim(f.me2) + +# when you have for each leadtime all the 52 weeks of year 2014 stored in a map, +# you can plot the time serie for a particular point and leadtime: +pos.lat<-which(round(la,3)==round(my.lat,3)) +pos.lon<-which(round(lo,3)==round(my.lon,3)) + +x11() +plot(1:week,EnMeanCorr[,leadtime],type="b", + main=paste0("Weekly time serie of point with lat=",round(my.lat,2),", lon=",round(my.lon,2)), + xlab="week",ylab="Ensemble Mean Corr.",ylim=c(-1,1)) + + + +### check if 4 time steps are better than one: + +yrs.hind<-yr2.hind-yr1.hind+1 +my.point.obs<-array(NA,c(yrs.hind,28)) + +# load obs.data for the four time steps: +for(year in yr1.hind:yr2.hind){ + data_erai_6h <- Load(var=var.name, exp = 'ERAint6h', + obs = NULL, sdates=paste0(year,'01'), leadtimemin = 61, + leadtimemax = 88, output = 'lonlat', configfile = file_path, grid=my.grid,method='distance-weighted') + + my.point.obs[year-yr1.hind+1,]<-data_erai_6h$mod[1,1,1,,65,633] +} + +utm0<-c(1,5,9,13,17,21,25) +utm6<-utm0+1 +utm12<-utm0+2 +utm18<-utm0+3 +my.points.obs.weekly.mean.utm0<-rowMeans(my.point.obs[,utm0]) +my.points.obs.weekly.mean.utm6<-rowMeans(my.point.obs[,utm6]) +my.points.obs.weekly.mean.utm12<-rowMeans(my.point.obs[,utm12]) +my.points.obs.weekly.mean.utm18<-rowMeans(my.point.obs[,utm18]) +my.points.obs.weekly.mean.all<-rowMeans(my.point.obs) + +cor(my.points.exp.weekly.mean.all,my.points.obs.weekly.mean.all,use="complete.obs") + +my.points.obs.weekly.mean.utm<-c(my.points.obs.weekly.mean.utm0,my.points.obs.weekly.mean.utm6,my.points.obs.weekly.mean.utm12,my.points.obs.weekly.mean.utm18) +my.points.exp.weekly.mean.utm<-c(my.points.exp.weekly.mean.utm0,my.points.exp.weekly.mean.utm6,my.points.exp.weekly.mean.utm12,my.points.exp.weekly.mean.utm18) +cor(my.points.exp.weekly.mean.utm,my.points.obs.weekly.mean.utm,use="complete.obs") + + +##### Calculate Reanalysis climatology loading only 1 file each 4 (it's quick but it needs to have all files beforehand): + +ss<-c(seq(1,52,by=4),50,51,52) # take only 1 startdate each 4, more the last 3 startdates that must be computed individually because they are not a complete set of 4 + +for(startdate in ss){ # the startdate day of 2014 to load in the sdates weekly sequence along with the same startdates of the previous 20 years + data.ERAI <- Load(var=var.name, exp = 'ERAintEnsWeek', + obs = NULL, sdates=paste0(yr1.hind:yr2.hind,substr(sdates.seq[startdate],5,6),substr(sdates.seq[startdate],7,8)), + nleadtime = 4, leadtimemin = 1, leadtimemax = 4, output = 'lonlat', configfile = file_path, grid=my.grid, method='distance-weighted') + + clim.ERAI[startdate,,,]<-apply(data.ERAI$mod,c(1,2,4,5,6),mean)[1,1,,,] # average for each leadtime and pixel + + # the intermediate startdate between startdate week i and startdate week i+4 are repeated: + if(startdate <= 49){ + clim.ERAI[startdate+1,1,,]<- clim.ERAI[startdate,2,,] # startdate leadtime (week) + clim.ERAI[startdate+1,2,,]<- clim.ERAI[startdate,3,,] # (week) 1 2 3 4 + clim.ERAI[startdate+2,1,,]<- clim.ERAI[startdate,3,,] # 1 a b c d <- only startdate 1 and 5 are necessary to calculate all startdates between 1 and 5 + clim.ERAI[startdate+1,3,,]<- clim.ERAI[startdate,4,,] # 2 b c d e + clim.ERAI[startdate+2,2,,]<- clim.ERAI[startdate,4,,] # 3 c d e f + clim.ERAI[startdate+3,1,,]<- clim.ERAI[startdate,4,,] # 4 d e f g + } # 5 e f g h + + if(startdate > 1 && startdate <=49){ # fill the previous startdates: + clim.ERAI[startdate-1,4,,]<- clim.ERAI[startdate,3,,] + clim.ERAI[startdate-1,3,,]<- clim.ERAI[startdate,2,,] + clim.ERAI[startdate-2,4,,]<- clim.ERAI[startdate,2,,] + clim.ERAI[startdate-1,2,,]<- clim.ERAI[startdate,1,,] + clim.ERAI[startdate-2,3,,]<- clim.ERAI[startdate,1,,] + clim.ERAI[startdate-3,4,,]<- clim.ERAI[startdate,1,,] + } + +} + + + + + # You can make cor much faster by stripping away all the error checking code and calling the internal c function directly: + # r <- apply(m1, 1, function(x) { apply(m2, 1, function(y) { cor(x,y) })}) + # r2 <- apply(m1, 1, function(x) { apply(m2, 1, function(y) {.Internal(cor(x, y, 4L, FALSE)) })}) + C_cor<-get("C_cor", asNamespace("stats")) + test<-.Call(C_cor, x=rnorm(100,1), y=rnorm(100,1), na.method=2, FALSE) + my.EnsCorr.sub[i,j,k]<-.Call(C_cor, x=anom.hindcast.mean.sub[,i,j,k], y=nom.rean.sub[,i,j,k], na.method=2, FALSE) + + + +####################################################################################### +# Raster Animations # +####################################################################################### + +library(png) +library(animation) + + +clustPNG <- function(pngobj, nclust = 3){ + + j <- pngobj + # If there is an alpha component remove it... + # might be better to just adjust the mat matrix + if(dim(j)[3] > 3){ + j <- j[,,1:3] + } + k <- apply(j, c(1,2), c) + mat <- matrix(unlist(k), ncol = 3, byrow = TRUE) + grd <- expand.grid(1:dim(j)[1], 1:dim(j)[2]) + clust <- kmeans(mat, nclust, iter.max = 20) + new <- clust$centers[clust$cluster,] + + for(i in 1:3){ + tmp <- matrix(new[,i], nrow = dim(j)[1]) + j[,,i] <- tmp + } + + plot.new() + rasterImage(j, 0, 0, 1, 1, interpolate = FALSE) +} + +makeAni <- function(file, n = 10){ + j <- readPNG(file) + for(i in 1:n){ + clustPNG(j, i) + mtext(paste("Number of clusters:", i)) + } + + j <- readPNG(file) + plot.new() + rasterImage(j, 0, 0, 1, 1, interpolate = FALSE) + mtext("True Picture") +} + +file <- "~/Dropbox/Photos/ClassyDogCropPng.png" +n <- 20 +saveGIF(makeAni(file, n), movie.name = "tmp.gif", interval = c(rep(1,n), 5)) + + + + +####################################################################################### +# ProgressBar # +####################################################################################### + +for(i in 1:10) { + Sys.sleep(0.2) + # Dirk says using cat() like this is naughty ;-) + #cat(i,"\r") + # So you can use message() like this, thanks to Sharpie's + # comment to use appendLF=FALSE. + message(i,"\r",appendLF=FALSE) + flush.console() +} + + +for(i in 1:10) { + Sys.sleep(0.2) + # Dirk says using cat() like this is naughty ;-) + cat(i,"\r") + + +} + + + +####################################################################################### +# Load large datasets with ff # +####################################################################################### + +library(ff) +my.scratch<-"/scratch/Earth/ncortesi/myBigData" +anom.hind<-as.ff(anom.hind, vmode="double", file=my.scratch) +ffsave(anom.hind,file=my.scratch) +close(anom.hind);rm(anom.hind) + +ffload(file=my.scratch, list=c(anom.hind)) +open(anom.hind) +my.SkillScore <- veriApplyBig("FairRpss", fcst=anom.hind, obs=anom.rean, tdim=2, ensdim=1, prob=c(1/3,1/3)) +close(anom.hind);rm(anom.hind) + + + +BigDataPath <- "/scratch/Earth/ncortesi/myBigData" +source('/home/Earth/ncortesi/Downloads/RESILIENCE/veriApplyBig.R') + +anom.hind.dim<-c(5,3,1,256,512) +anom.hind<-array(rnorm(prod(anom.hind.dim)),anom.hind.dim) # doesn't fit in memory +save.big(array=anom.hind, path=BigDataPath) + +veriApplyBig <- function(, obsmy.scratch){ + ffload(file=my.scratch) + open(anom.hind) + my.SkillScore <- veriApplyBig("FairRpss", fcst=anom.hind, obs=anom.rean, tdim=2, ensdim=1, prob=c(1/3,1/3)) + close(anom.hind) +} + + +anom.hind<-as.ff(anom.hind, vmode="double", file=my.scratch) +ffsave(anom.hind, file=my.scratch) + +my.scratch<-"/scratch/Earth/ncortesi/myBigData" + +anom.hind<-as.ff(anom.hind, vmode="double", file=my.scratch) +ffsave(anom.hind, file=my.scratch) +close(anom.hind); rm(anom.hind) + +ffload(file=my.scratch, list=c(anom.hind)) +open(anom.hind) +my.SkillScore <- veriApplyBig("FairRpss", fcst=anom.hind, obs=anom.rean, tdim=2, ensdim=1, prob=c(1/3,1/3)) +close(anom.hind); rm(anom.hind) + +anom.hind.dim<-c(51,30,1,256,51) +anom.hind<-ff(rnorm(prod(anom.hind.dim)), dim=anom.hind.dim, vmode="double", filename="/scratch/Earth/ncortesi/anomhind") +str(anom.hind) +dim(anom.hind) + +anom.hind.dim<-c(51,30,1,256,51) +anom.hind<-array(rnorm(prod(anom.hind.dim)),anom.hind.dim) # doesn't fit in memory +anom.hind<-as.ff(anom.hind, vmode="double", file="/scratch/Earth/ncortesi/myBigData") +str(anom.hind) +dim(anom.hind) + +ffsave(anom.hind,file="/scratch/Earth/ncortesi/anomhind") +#ffinfo(file="/scratch/Earth/ncortesi/anomhind") +close(anom.hind) +#delete(anom.hind) +#rm(anom.hind) + +ffload(file="/scratch/Earth/ncortesi/anomhind") + +anom.rean<-array(rnorm(prod(anom.hind.dim[-1])), anom.hind.dim[-1]) # doesn't fit in memory + +open(anom.hind) +my.SkillScore <- veriApplyBig("FairRpss", fcst=anom.hind, obs=anom.rean, tdim=2, ensdim=1, prob=c(1/3,1/3)) +fcst<-anom.hind +obs<-anom.rean + +close(anom.hind) +rm(anom.hind) + +#ffdrop(file="/scratch/Earth/ncortesi/anomhind") + +a<-ffapply(X=anom.hind.big, MARGIN=c(1,3,4,5), AFUN=mean, RETURN=TRUE) +a<-ffapply(X=anom.hind.big, MARGIN=c(1,3,4,5), AFUN=function(x) sample(x, replace=TRUE), CFUN="list", RETURN=TRUE) + + +pred_Cal_cross_ff <- as.ff(pred_Cal_cross, vmode='double', file=fileoutput_cross) +ffsave(pred_Cal_cross_ff, file=paste0(fileoutput_cross)) +delete(pred_Cal_cross_ff) + +assign("pred_Cal_cross_ff", as.ff(pred_Cal_cross, vmode='double', file=fileoutput_cross)) +ffsave(pred_Cal_cross_ff, file=paste0(fileoutput_cross)) +delete(pred_Cal_cross_ff) +rm(pred_Cal_cross_ff) + +assign("pred_Cal_cross_ff", as.ff(pred_Cal_cross, vmode='double', file=fileoutput_cross)) +a<-get("pred_Cal_cross_ff") # get() works well in this case but not in the row below! (you must use do.call, see WT_drivers_v2.R) +ffsave(get("pred_Cal_cross_ff"), file=paste0(fileoutput_cross)) +delete(pred_Cal_cross_ff) +rm(pred_Cal_cross_ff) + + + +######################################################################################### +# Calculate and save the FairRpss and/or the FairCrpss for a chosen period using ff # +######################################################################################### + +bigdata=FALSE # if TRUE, compute the Rpss and the Crpss using the package ff (storing arrays in the disk instead than in the RAM) to remove the memory limit + +for(month in veri.month){ + month=1 # for debug + my.startdates<-startdates.monthly[[month]] #c(1:5) # select the startdates (weeks) you want to compute + startdate.name=my.month[month] # name given to the period of the selected startdates, i.e:"January" + n.yrs <- length(my.startdates)*n.yrs.hind # number of total years for the selected month + print(paste0("Computing Skill Scores for ",startdate.name)) + + if(!boot) anom.rean.big<-array(NA, dim=c(n.yrs, n.leadtimes, n.lat, n.lon)) # reanalysis data is not converted to ff because it is a small array + + if(boot) mod<-1 else mod=0 # in case of the boostrap, anom.hind.big includes also the reanalysis data as additional last member + + if(bigdata) anom.hind.big <- ff(NA, dim=c(n.members + mod, n.yrs, n.leadtimes, n.lat, n.lon), vmode="double", filename=paste0(workdir,"/anomhindbig.ff")) + if(!bigdata) anom.hind.big <- array(NA, dim=c(n.members + mod, n.yrs, n.leadtimes, n.lat, n.lon)) + + for(startdate in my.startdates){ + + pos.startdate<-which(startdate==my.startdates) # count of the number of stardates already loaded +1 + my.time.interv<-(1+(pos.startdate-1)*n.yrs.hind):(pos.startdate*n.yrs.hind) # time interval where to load data + + load(paste0(workdir,'/Data_',var.name,'/anom_rean_startdate',startdate,'.RData')) + if(!boot) { + anom.rean<-drop(anom.rean) + anom.rean.big[my.time.interv,,,] <- anom.rean + } + + if(any(my.score.name=="FairRpss") || any(my.score.name=="FairCrpss")){ + load(paste0(workdir,'/Data_',var.name,'/anom_hindcast_startdate',startdate,'.RData')) # load hindcast data + anom.hindcast<-drop(anom.hindcast) + + if(!boot) anom.hind.big[,my.time.interv,,,] <- anom.hindcast + if(boot) anom.hind.big[,my.time.interv,,,] <- abind(anom.hindcast, anom.rean, along=1) + + rm(anom.hindcast, anom.rean) + } + gc() + } + + if(any(my.score.name=="FairRpss" || my.score.name=="FairCrpss")){ + if(!boot) { + if(any(my.score.name=="FairRpss")) my.FairRpss <- veriApplyBig("FairRpss", fcst=anom.hind.big, obs=anom.rean.big, + prob=my.prob, tdim=2, ensdim=1, parallel=TRUE, ncpus=n.cpus) + if(any(my.score.name=="FairCrpss")) my.FairCrpss <- veriApplyBig("FairCrpss", fcst=anom.hind.big, obs=anom.rean.big, tdim=2, ensdim=1, parallel=TRUE, ncpus=n.cpus) + + } else { + # bootstrapping: + my.FairRpssBoot<-my.FairCrpssBoot<-array(NA, c(n.boot, n.leadtimes, n.lat, n.lon)) + if(!bigdata) save(anom.hind.big, file=paste0(workdir,"/anom_hind_big.RData")) # save the anomalies to free memory + + for(b in 1:n.boot){ + print(paste0("resampling n. ",b,"/",n.boot)) + yrs.sampled <- sample(1:n.yrs, replace=TRUE) + + if(bigdata) anom.hind.big.sampled <- ff(NA, dim=c(n.members+1, n.yrs, n.leadtimes, n.lat, n.lon), vmode="double", filename=paste0(workdir,"/anomhindbigsampled")) + if(!bigdata) anom.hind.big.sampled <- array(NA, dim=c(n.members+1, n.yrs, n.leadtimes, n.lat, n.lon)) + + if(!bigdata && b>1) load(paste0(workdir,"/anom_hind_big.RData")) # need to load the hindcasts if b>1 + + for(y in 1:n.yrs) anom.hind.big.sampled[,y,,,] <- anom.hind.big[,yrs.sampled[y],,,] # with ff takes 53s x 100 ~1h!!! (without, only 4s x 100 ~6m) + + if(!bigdata) rm(anom.hind.big); gc() # free memory for the following commands + + # faster way that will be able to replace the above one when ffapply output parameters wll be improved: + #a<-ffapply(X=anom.hind.big, MARGIN=c(3,4,5), AFUN=function(x) my.sample(x,n.members+1, replace=TRUE), CFUN="list", RETURN=TRUE, FF_RETURN=TRUE) # takes 3m only + #aa<-abind(a[1:10], along=4) # it doesn't fit into memory! + + if(any(my.score.name=="FairRpss")) my.FairRpssBoot[b,,,] <- veriApplyBig("FairRpss", fcst=anom.hind.big.sampled[1:n.members,,,,], + obs=anom.hind.big.sampled[n.members+1,,,,], + prob=my.prob, tdim=2, ensdim=1, parallel=TRUE, ncpus=n.cpus) #$rpss + + if(any(my.score.name=="FairCrpss")) my.FairCrpssBoot[b,,,] <- veriApplyBig("FairCrpss", fcst=anom.hind.big.sampled[1:n.members,,,,], + obs=anom.hind.big.sampled[n.members+1,,,,], + tdim=2, ensdim=1, parallel=TRUE, ncpus=n.cpus) #$crpss + + if(bigdata) delete(anom.hind.big.sampled) + rm(anom.hind.big.sampled) # delete the sampled hindcast + + } + + # calculate 5 and 95 percentiles of skill scores: + if(any(my.score.name=="FairRpss")) my.FairRpssConf <- apply(my.FairRpssBoot, c(2,3,4), function(x) quantile(x, probs=c(0.05,0.95), type=8)) + if(any(my.score.name=="FairCrpss")) my.FairCrpssConf <- quantile(my.FairCrpssBoot, probs=c(0.05,0.95), type=8) + + } # close if on boot + + } + + if(bigdata) delete(anom.hind.big) # delete the file with the hindcasts + rm(anom.hind.big) + rm(anom.rean.big) + gc() + + if(!boot){ + if(any(my.score.name=="FairRpss")) save(my.FairRpss, file=paste0(workdir,'/Data_',var.name,'/FairRpss_',startdate.name,'.RData')) + if(any(my.score.name=="FairCrpss")) save(my.FairCrpss, file=paste0(workdir,'/Data_',var.name,'/FairCrpss_',startdate.name,'.RData')) + } else { + if(any(my.score.name=="FairRpss")) save(my.FairRpssBoot, my.FairRpssConf, file=paste0(workdir,'/Data_',var.name,'/FairRpssBoot_',startdate.name,'.RData')) + if(any(my.score.name=="FairCrpss")) save(my.FairCrpssBoot, my.FairCrpssConf, file=paste0(workdir,'/Data_',var.name,'/FairCrpssBoot_',startdate.name,'.RData')) + } + + + if(is.object(my.FairRpss)) rm(my.FairRpss); if(is.object(my.FairCrpss)) rm(my.FairCrpss) + if(is.object(my.FairRpssBoot)) rm(my.FairRpssBoot); if(is.object(my.FairCrpssBoot)) rm(my.FairCrpssBoot) + if(is.object(my.FairRpssConf)) rm(my.FairRpssConf); if(is.object(my.FairCrpssConf)) rm(my.FairCrpssConf) + +} # next m (month) + + +######################################################################################### +# Calculate and save the FairRpss for a chosen period using ff and Load() # +######################################################################################### + + anom.rean.big<-array(NA, dim=c(length(my.startdates)*n.yrs.hind, n.leadtimes, n.lat, n.lon)) + anom.hind.big <- ff(NA, dim=c(n.members,length(my.startdates)*n.yrs.hind, n.leadtimes, n.lat, n.lon), vmode="double", filename="/scratch/Earth/anomhindbig") + + for(startdate in my.startdates){ + pos.startdate<-which(startdate==my.startdates) # count of the number of stardates already loaded+1 + my.time.interv<-(1+(pos.startdate-1)*n.yrs.hind):(pos.startdate*n.yrs.hind) # time interval where to load data + + #load(paste0(workdir,'/Data_',var.name,'/anom_rean_startdate',startdate,'.RData')) + #anom.rean<-drop(anom.rean) + #anom.rean.big[my.time.interv,,,] <- anom.rean + + my.date <- paste0(yr1.hind:yr2.hind,substr(sdates.seq[startdate],5,6),substr(sdates.seq[startdate],7,8)) + anom.rean <- Load(var=var.name, exp = 'EnsEcmwfWeekHind', + obs = NULL, sdates=my.date, nleadtime = n.leadtimes, leadtimemin = 1, leadtimemax = n.leadtimes, + output = 'lonlat', configfile = file_path, method='distance-weighted' ) + + #load(paste0(workdir,'/Data_',var.name,'/anom_hindcast_startdate',startdate,'.RData')) # load hindcast data + #anom.hindcast<-drop(anom.hindcast) + + anom.hind.big[,my.time.interv,,,] <- anom.hindcast + rm(anom.hindcast) + } + + my.FairRpss <- veriApplyBig("FairRpss", fcst=anom.hind.big, obs=anom.rean.big, prob=my.prob, tdim=2, ensdim=1, parallel=TRUE, ncpus=n.cpus) + + save(my.FairRpss, file=paste0(workdir,'/Data_',var.name,'/FairRpss_',startdate.name,'.RData')) + +} # close if on slurm + + diff --git a/old/SkillScores_v3.R b/old/SkillScores_v3.R new file mode 100644 index 0000000000000000000000000000000000000000..13b0234c38b0b372afeb493154213df5c183fdd4 --- /dev/null +++ b/old/SkillScores_v3.R @@ -0,0 +1,1181 @@ + +Mare=TRUE # if MareNostrum is TRUE, the script must be run on MareNostrum; if it is FALSE, on our workstations. +Slurm=FALSE # set it to TRUE only if you run the script with slurm + +#load libraries and functions: +library(s2dverification) +library(SpecsVerification) +library(easyVerification) +library(jpeg) +library(abind) +#library(ff) +#library(ffbase) +if(!Mare) {source('/scratch/Earth/ncortesi/RESILIENCE/Rfunctions.R')} else {source('/gpfs/scratch/bsc32842/RESILIENCE/Rfunctions.R')} + +######################################################################################### +# User's settings # +######################################################################################### +# working dir where to put the output maps and files in the /Data and /Maps subdirs: +workdir=ifelse(!Mare, "/scratch/Earth/ncortesi/RESILIENCE", "/gpfs/scratch/bsc32842/RESILIENCE") + +cfs.name='ECMWF' # name of the climate forecast system (CFS) used for monthly predictions (to be used in map titles) +var.name='sfcWind' # forecast variable. It is the name used inside the netCDF files and as suffix in map filenames, i.e: 'sfcWind' or 'psl' +var.name.map='10m Wind Speed' # forecast variable to verify (name used in the map titles), i.e: '10m Wind Speed' or 'SLP' + +yr1=2014 # starting year of the weekly sequence of the forecasts +mes=1 # starting forecast month (usually january) +day=2 # starting forecast day + +yr1.hind=1994 #1994 # first hindcast year +yr2.hind=2013 #2013 # last hindcast year (usually the forecast year -1) + +leadtime.week<-c('5-11','12-18','19-25','26-32') # which leadtimes are available in the weekly dataset +n.members<-4 # number of hindcast members + +my.score.name=c('FairRpss','FairCrpss') #c('EnsCorr','FairRpss','FairCrpss','RelDiagr') # choose one or more skills scores to compute between EnsCorr, FairRPSS, FairCRPSS and/or RelDiagr + +veri.month=1 #1:12 # select the month(s) you want to compute the chosen skill scores + +my.pvalue=0.05 # in case of computing the EnsCorr, set the p_value level to show in the ensemble mean correlation maps +my.prob=c(1/3,2/3) # quantiles used for FairRPSS and the RelDiagr (usually they are the terciles) +conf.level=c(0.05, 0.95)# percentiles employed for the calculation of the confidence level for the Rpss and Crpss (can be more than 2 if needed) +int.lat <- NULL #1:160 # latitude position of grid points selected for the Reliability Diagram (if you want to subset a region; if not, put NULL) +int.lon <- NULL #1:320 # longitude position of grid points selected for the Reliability Diagram (if you want to subset a region; if not, put NULL) + +boot=TRUE # in case of computing the FairRpss and/or the FairCrpss, you can choose to do a bootstrap to evaluate the uncertainty of the skill scores +n.boot=20 # number of resamples considered in the bootstrapping + +n.cpus=8 # number of cpu used by parApply for the calculation of skill scores (our desktop has 4 cpus but 8 virtual ones, so it's better to specify 8) + # for parallelizing the apply function for the EnsCorr computation, no cores are selected because the input array is big (~500 GB) + # when this script is run with Slurm (in amdahl or in moore) with sbatch or in the interactive mode, it should uses exactly 8 physical cores. +max.n.el=10000000 # maximum number of elements in an array (each element of type 'numeric' occupies 8 byte,so max.n.el*8/1000000 is the maximum size in MB you want to use) + # 10000000 is the optimal value for 8 GB machines + +# coordinates of a chosen point in the world to plot the time series over the 52 maps (they must be the same values in objects la y lo) +my.lat=55.3197120 +my.lon=0.5625 + +###################################### Derived variables ################################## + +#source(paste0(workdir,'/ColorBarV.R')) # Color scale used for maps +n.categ <- 1+length(my.prob) # number of forecasting categories (usually 3 if terciles are used) + +# blue-yellow-red colors: +col <- ifelse(!Mare) {as.character(read.csv("/scratch/Earth/ncortesi/RESILIENCE/rgbhex.csv",header=F)[,1]), as.character(read.csv("/gpfs/scratch/bsc32842/RESILIENCE/rgbhex.csv",header=F)[,1]) +#col<-as.character(read.csv("/home/Earth/vtorralb/rgbhex.csv",header=F)[,1]) # blue-yellow-red colors + +sdates.seq<-weekly.seq(yr1,mes,day) # load the sequence of dates corresponding to all the thursday of the year +n.leadtimes<-length(leadtime.week) +n.yrs.hind<-yr2.hind-yr1.hind+1 +my.month.short<-substr(my.month,1,3) + +# Monthly Startdates for 2014 reforecasts: (in future you can modify it to work for a generic year) +startdates.monthly<-list() +startdates.monthly[[1]]<-1:5 +startdates.monthly[[2]]<-6:9 +startdates.monthly[[3]]<-10:13 +startdates.monthly[[4]]<-14:17 +startdates.monthly[[5]]<-18:22 +startdates.monthly[[6]]<-23:26 +startdates.monthly[[7]]<-27:31 +startdates.monthly[[8]]<-32:35 +startdates.monthly[[9]]<-36:39 +startdates.monthly[[10]]<-40:44 +startdates.monthly[[11]]<-45:48 +startdates.monthly[[12]]<-49:52 + +## extract geographic coordinates; do only ONCE for each preducton system and then save the lat/lon in a coordinates.RData and comment these rows to use function load() below: +#data.hindcast <- Load(var='sfcWind', exp = 'EnsEcmwfWeekHind', +# obs = NULL, sdates=paste0(yr1.hind:yr2.hind,substr(sdates.seq[startdate],5,6),substr(sdates.seq[startdate],7,8)), +# nleadtime = 1, leadtimemin = 1, leadtimemax = 1, output = 'lonlat', configfile = file_path, method='distance-weighted' ) + +#lats<-data.hindcast$lat +#lons<-data.hindcast$lon +#save(lats,lons,file=paste0(workdir,'/coordinates.RData')) +#rm(data.hindcast);gc() + +# format geographic coordinates to use with PlotEquiMap: +load(paste0(workdir,'/coordinates.RData')) +n.lon <- length(lons) +n.lat <- length(lats) +n.lonr <- n.lon-ceiling(length(lons[lons<180 & lons > 0])) +la<-rev(lats) +lo<-c(lons[c((n.lonr+1):n.lon)]-360,lons[1:n.lonr]) + + +######################################################################################### +# Calculate and save up to all the chosen Skill Scores for a chosen period # +######################################################################################### +# +# Remember that before computing the skill scores, you have to create and save the anomalies running once the preformatting part at the end of this script. +# + +for(month in veri.month){ + #month=1 # for the debug + my.startdates<-startdates.monthly[[month]] #c(1:5) # select the startdates (weeks) you want to compute + startdate.name=my.month[month] # name given to the period of the selected startdates # i.e:"January" + n.startdates<-length(my.startdates) # number of startdates in the selected month + n.yrs <- length(my.startdates)*n.yrs.hind # number of total years for the selected month + print(paste0("Computing Skill Scores for ",startdate.name)) + + # Merge all selected startdates: + hind.dim <- c(n.members, n.yrs.hind*n.startdates, n.leadtimes, n.lat, n.lon) # dimensions of the hindcast array + n.sub <- prod(hind.dim)/max.n.el + n.sub <- ceiling(n.sub) # number of sub-arrays in which to split the hindcast and rean.data + n.lon <- tail(hind.dim,1) # number of longitude elements + sub.size <- floor(n.lon/n.sub) # number of elements in the last dimension (lon) of each subarray rounded to the lower integer + if(sub.size<=1) stop("Subarrays too small. Increase ten times the value of max.n.el") + n.sub <- n.lon %/% sub.size # take only the integer part of the ratio , i.e: it is equal to floor(n.lon/sub.size) + sub.size.last <- n.lon %% n.sub # number of additional elements of the last subarray (if >0) + + my.FairRpss <- my.FairCrpss <- my.EnsCorr <- my.PValue <- array(NA,c(n.leadtimes,n.lat,n.lon)) + if(boot) my.FairRpssBoot <- my.FairCrpssBoot <- array(NA, c(n.boot, n.leadtimes, n.lat, n.lon)) + if(boot) my.FairRpssConf <- my.FairCrpssConf <- array(NA, c(length(conf.level), n.leadtimes, n.lat, n.lon)) + + cat('Subarray n. ') + + for(s in 1:n.sub){ # EnsCorr, FairRpss and FairCrpss calculation: + #s=1 # for the debug + cat(paste0(s,'/',n.sub,' ')) + + if(s==n.sub){ add.last <- sub.size.last } else { add.last <- 0 } # because the last subarray is longer than the others, if sub.size.last>0 + + anom.hindcast.sub<-anom.hindcast.sub.sampled<-array(NA, c(n.members, n.startdates*n.yrs.hind, n.leadtimes, n.lat, sub.size + add.last)) + anom.rean.sub<-anom.hindcast.mean.sub<-anom.rean.sub.sampled<-array(NA, c(n.startdates*n.yrs.hind, n.leadtimes, n.lat, sub.size + add.last)) + my.interv<-(1 + sub.size*(s-1)):((sub.size*s) + add.last) # longitude interval where to load data + + for(startdate in my.startdates){ + pos.startdate<-which(startdate==my.startdates) # count of the number of stardates already loaded+1 + my.time.interv<-(1+(pos.startdate-1)*n.yrs.hind):(pos.startdate*n.yrs.hind) # time interval where to load data + + # Load reanalysis data: + load(paste0(workdir,'/Data_',var.name,'/anom_rean_startdate',startdate,'.RData')) + anom.rean <- drop(anom.rean) + anom.rean.sub[my.time.interv,,,]<-anom.rean[,,,my.interv] + + # Load hindcast data: + if(any(my.score.name=="FairRpss") || any(my.score.name=="FairCrpss" || any(my.score.name=="RelDiagr"))){ + load(paste0(workdir,'/Data_',var.name,'/anom_hindcast_startdate',startdate,'.RData')) + anom.hindcast <- drop(anom.hindcast) + anom.hindcast.sub[,my.time.interv,,,]<-anom.hindcast[,,,,my.interv] + rm(anom.hindcast, anom.rean) + gc() + } + + if(any(my.score.name=="EnsCorr")){ + load(paste0(workdir,'/Data_',var.name,'/anom_hindcast_mean_startdate',startdate,'.RData')) # load ensemble mean hindcast data + anom.hindcast.mean<-drop(anom.hindcast.mean) + anom.hindcast.mean.sub[my.time.interv,,,]<-anom.hindcast.mean[,,,my.interv] + rm(anom.hindcast.mean) + gc() + } + + } + + + if(any(my.score.name=="FairRpss")){ + if(!boot){ + + #my.FairRpss.sub <- veriApply("FairRpss", fcst=anom.hindcast.sub, obs=anom.rean.sub, prob=my.prob, tdim=2, ensdim=1, parallel=FALSE, ncpus=n.cpus) + my.FairRps.sub <- veriApply("FairRps", fcst=anom.hindcast.sub, obs=anom.rean.sub, prob=my.prob, tdim=2, ensdim=1, parallel=FALSE, ncpus=n.cpus) + + # the prob.for the climatological ensemble can be set without using convert2prob and they are the same for all points and leadtimes (33% for each tercile): + ens <- array(33,c(n.startdates*n.yrs.hind,3)) + + # the observed probabilities vary from point to point and for each leadtime: + obs <- apply(anom.rean.sub, c(2,3,4), function(x){convert2prob(x, prob <- my.prob)}) + + # reshape obs to be able to apply EnsRps(ens, obs) below: + obs2 <- InsertDim(obs,1,3) + obs2[2,1:(n.startdates*n.yrs.hind),,,] <- obs2[1,(n.startdates*n.yrs.hind + 1:(n.startdates*n.yrs.hind)),,,] + obs2[3,1:(n.startdates*n.yrs.hind),,,] <- obs2[1,(2*n.startdates*n.yrs.hind + 1:(n.startdates*n.yrs.hind)),,,] + obs3 <- obs2[,1:(n.startdates*n.yrs.hind),,,] + + my.Rps.clim.sub <- apply(obs3,c(3,4,5), function(x){ EnsRps(ens,t(x)) }) + + my.FairRps.sub.sum <- apply(my.FairRps.sub,c(2,3,4), sum) + my.Rps.clim.sub.sum <- apply(my.Rps.clim.sub,c(2,3,4), sum) + + my.FairRpss.sub <- 1-(my.FairRps.sub.sum/my.Rps.clim.sub.sum) + + my.FairRpss[,,my.interv] <- my.FairRpss.sub + rm(my.FairRpss.sub, ens, obs, obs2, obs3, my.FairRps.sub.sum, my.Rps.clim.sub.sum) + gc() + + } else { # bootstrapping: + + for(b in 1:n.boot){ + cat(paste0("resampling n. ",b,"/",n.boot)) + yrs.sampled <- sample(1:n.yrs, replace=TRUE) + + for(y in 1:n.yrs) anom.hindcast.sub.sampled[,y,,,] <- anom.hindcast.sub[,yrs.sampled[y],,,] + for(y in 1:n.yrs) anom.rean.sub.sampled[y,,,] <- anom.rean.sub[yrs.sampled[y],,,] + + my.FairRps.sub <- veriApply("FairRps", fcst=anom.hindcast.sub.sampled, obs=anom.rean.sub.sampled, prob=my.prob, tdim=2, ensdim=1, parallel=FALSE, ncpus=n.cpus) + ens <- array(33,c(n.startdates*n.yrs.hind,3)) + obs <- apply(anom.rean.sub.sampled, c(2,3,4), function(x){convert2prob(x, prob <- my.prob)}) + obs2 <- InsertDim(obs,1,3) + obs2[2,1:(n.startdates*n.yrs.hind),,,] <- obs2[1,(n.startdates*n.yrs.hind + 1:(n.startdates*n.yrs.hind)),,,] + obs2[3,1:(n.startdates*n.yrs.hind),,,] <- obs2[1,(2*n.startdates*n.yrs.hind + 1:(n.startdates*n.yrs.hind)),,,] + obs3 <- obs2[,1:(n.startdates*n.yrs.hind),,,] + my.Rps.clim.sub <- apply(obs3,c(3,4,5), function(x){ EnsRps(ens,t(x)) }) + my.FairRps.sub.sum <- apply(my.FairRps.sub,c(2,3,4), sum) + my.Rps.clim.sub.sum <- apply(my.Rps.clim.sub,c(2,3,4), sum) + my.FairRpss.sub <- 1-(my.FairRps.sub.sum/my.Rps.clim.sub.sum) + + my.FairRpssBoot[b,,,my.interv]<-my.FairRpss.sub + rm(my.FairRpss.sub, ens, obs, obs2, obs3, my.FairRps.sub.sum, my.Rps.clim.sub.sum) + gc() + } + + cat('\n') + + # calculate the percentiles of the skill score: + my.FairRpssConf[,,,my.interv] <- apply(my.FairRpssBoot[,,,my.interv], c(2,3,4), function(x) quantile(x, probs=conf.level, type=8)) + } + } + + if(any(my.score.name=="FairCrpss")){ + if(!boot){ + my.FairCrps.sub <- veriApply("FairCrps", fcst=anom.hindcast.sub, obs=anom.rean.sub, tdim=2, ensdim=1, parallel=FALSE, ncpus=n.cpus) + anom.all.sub <- abind(anom.hindcast.sub,anom.rean.sub, along=1) # merge exp and obs together to use apply: + my.Crps.clim.sub <- apply(anom.all.sub, c(3,4,5), function(x){ EnsCrps(t(x[1:n.members,]), x[n.members+1,])}) + + my.FairCrps.sub.sum <- apply(my.FairCrps.sub,c(2,3,4), sum) + my.Crps.clim.sub.sum <- apply(my.Crps.clim.sub,c(2,3,4), sum) + my.FairCrpss.sub <- 1-(my.FairCrps.sub.sum/my.Crps.clim.sub.sum) + + my.FairCrpss[,,my.interv]<-my.FairCrpss.sub + rm(my.FairCrpss.sub, my.FairCrps.sub.sum, my.Crps.clim.sub.sum) + gc() + + } else { # bootstrapping: + + for(b in 1:n.boot){ + print(paste0("resampling n. ",b,"/",n.boot)) + yrs.sampled <- sample(1:n.yrs, replace=TRUE) + + for(y in 1:n.yrs) anom.hindcast.sub.sampled[,y,,,] <- anom.hindcast.sub[,yrs.sampled[y],,,] + for(y in 1:n.yrs) anom.rean.sub.sampled[y,,,] <- anom.rean.sub[yrs.sampled[y],,,] + + my.FairCrps.sub <- veriApply("FairCrps", fcst=anom.hindcast.sub.sampled, obs=anom.rean.sub.sampled, tdim=2, ensdim=1, parallel=TRUE, ncpus=n.cpus) + anom.all.sub <- abind(anom.hindcast.sub.sampled, anom.rean.sub.sampled, along=1) # merge exp and obs together to use apply: + my.Crps.clim.sub <- apply(anom.all.sub, c(3,4,5), function(x){ EnsCrps(t(x[1:n.members,]), x[n.members+1,])}) + my.FairCrps.sub.sum <- apply(my.FairCrps.sub,c(2,3,4), sum) + my.Crps.clim.sub.sum <- apply(my.Crps.clim.sub,c(2,3,4), sum) + my.FairCrpss.sub <- 1-(my.FairCrps.sub.sum/my.Crps.clim.sub.sum) + + my.FairCrpssBoot[b,,,my.interv] <- my.FairCrpss.sub + rm(my.FairCrpss.sub, my.FairCrps.sub.sum, my.Crps.clim.sub.sum) + gc() + + } + + cat('\n') + + # calculate the percentiles of the skill score: + my.FairCrpssConf[,,,my.interv] <- apply(my.FairCrpssBoot[,,,my.interv], c(2,3,4), function(x) quantile(x, probs=conf.level, type=8)) + } + + } + + if(any(my.score.name=="EnsCorr")){ + # ensemble mean correlation for the selected startdates (first dim, 'week') and all leadtimes (second dim): + my.EnsCorr.sub<-my.PValue.sub<-array(NA,c(n.leadtimes,n.lat,sub.size+add.last)) + + for(i in 1:n.leadtimes){ + for(j in 1:n.lat){ + for(k in 1:(sub.size+add.last)){ + my.EnsCorr.sub[i,j,k]<-cor(anom.hindcast.mean.sub[,i,j,k],anom.rean.sub[,i,j,k], use="complete.obs") + + # option 'na.method' is chosen from the following list: c("all.obs", "complete.obs", "pairwise.complete.obs", "everything", "na.or.complete") + + my.PValue.sub[i,j,k]<-cor.test(anom.hindcast.mean.sub[,i,j,k],anom.rean.sub[,i,j,k], use="complete.obs")$p.value + + } + } + } + + my.EnsCorr[,,my.interv]<-my.EnsCorr.sub + my.PValue[,,my.interv]<-my.PValue.sub + + rm(anom.hindcast.sub,anom.hindcast.mean.sub,my.EnsCorr.sub,my.PValue.sub) + } + + rm(anom.rean.sub) + gc() + #mem() + + } # next s (subarray) + + + if(any(my.score.name=="RelDiagr")){ # in this case the computation cannot be split in groups of grid points, but can be split for leadtime instead: + my.RelDiagr<-list() + + for(lead in 1:n.leadtimes){ + cat(paste0('leadtime n.: ',lead,'/',n.leadtimes)) + anom.rean.sub<-array(NA,c(n.startdates*n.yrs.hind,n.lat,n.lon)) + anom.hindcast.sub<-array(NA,c(n.members,n.startdates*n.yrs.hind,n.lat,n.lon)) + cat(' startdate: ') + i=0 + + for(startdate in my.startdates){ + i=i+1 + cat(paste0(i,'/',length(my.startdates),' ')) + + pos.startdate<-which(startdate==my.startdates) # count of the number of stardates already loaded+1 + my.time.interv<-(1+(pos.startdate-1)*n.yrs.hind):(pos.startdate*n.yrs.hind) # time interval where to load data + + # Load reanalysis data of next startdate: + load(paste0(workdir,'/Data_',var.name,'/anom_rean_startdate',startdate,'.RData')) + anom.rean<-drop(anom.rean) + anom.rean.sub[my.time.interv,,]<-anom.rean[,lead,,] + + # Lead hindcast data of next startdate: + load(paste0(workdir,'/Data_',var.name,'/anom_hindcast_startdate',startdate,'.RData')) # load hindcast data + anom.hindcast<-drop(anom.hindcast) + anom.hindcast.sub[,my.time.interv,,]<-anom.hindcast[,,lead,,] + + rm(anom.rean,anom.hindcast) + } + + + anom.hindcast.sub.perm<-aperm(anom.hindcast.sub,c(2,1,3,4)) # swap the first two dimensions to put the years as first dimension (needed for convert2prob) + gc() + + cat('Computing the Reliability Diagram. Please wait... ') + ens.sub<-apply(anom.hindcast.sub.perm, c(3,4), convert2prob, prob<-my.prob) # its dimensions are: [n.startdates*n.yrs.hind*n.forecast.categories, n.lat, n.lon] + obs.sub<-apply(anom.rean.sub,c(2,3), convert2prob, prob<-my.prob) # the first n.members*n.yrs.hind elements belong to the first tercile, and so on. + + ens.sub.prob<-apply(ens.sub, c(2,3), function(x) count2prob(matrix(x, n.startdates*n.yrs.hind, n.categ), type=4)) # type=4 is the only method with sum(prob)=1 !!! + obs.sub.prob<-apply(obs.sub, c(2,3), function(x) count2prob(matrix(x, n.startdates*n.yrs.hind, n.categ), type=4)) # it not deserves parallelization: it only takes 10s + + if(is.null(int.lat)) int.lat <- 1:n.lat # if there is no region selected, select all the world + if(is.null(int.lon)) int.lon <- 1:n.lon + + int1 <- 1:(n.startdates*n.yrs.hind) # time interval of the data of the first tercile + my.RelDiagr[[lead]]<-ReliabilityDiagram(ens.sub.prob[int1,int.lat,int.lon], obs.sub.prob[int1,int.lat,int.lon], nboot=0, plot=FALSE, plot.refin=F) # 1st tercile + + int2 <- (1+(n.startdates*n.yrs.hind)):(2*n.startdates*n.yrs.hind) # time interval of the data of the second tercile + my.RelDiagr[[n.leadtimes+lead]]<-ReliabilityDiagram(ens.sub.prob[int2,int.lat,int.lon], obs.sub.prob[int2,int.lat,int.lon], nboot=0, plot=FALSE, plot.refin=F) + + int3 <- (1+(2*n.startdates*n.yrs.hind)):(3*n.startdates*n.yrs.hind) # time interval of the data of the third tercile + my.RelDiagr[[2*n.leadtimes+lead]]<-ReliabilityDiagram(ens.sub.prob[int3,int.lat,int.lon], obs.sub.prob[int3,int.lat,int.lon], nboot=0, plot=FALSE, plot.refin=F) + + cat('done\n') + #print(my.RelDiagr) # for debugging + + rm(anom.rean.sub,anom.hindcast.sub,ens.sub,obs.sub,ens.sub.prob,obs.sub.prob) + gc() + + } # next lead + + } # close if on RelDiagr + + if(!boot){ + if(any(my.score.name=="FairRpss")) save(my.FairRpss, file=paste0(workdir,'/Data_',var.name,'/FairRpss_',startdate.name,'.RData')) + if(any(my.score.name=="FairCrpss")) save(my.FairCrpss, file=paste0(workdir,'/Data_',var.name,'/FairCrpss_',startdate.name,'.RData')) + if(any(my.score.name=="EnsCorr")) save(my.EnsCorr, file=paste0(workdir,'/Data_',var.name,'/EnsCorr_',startdate.name,'.RData')) + if(any(my.score.name=="RelDiagr")) save(my.RelDiagr, my.PValue, file=paste0(workdir,'/Data_',var.name,'/RelDiagr_',startdate.name,'.RData')) + } else { # bootstrapping output: + if(any(my.score.name=="FairRpss")) save(my.FairRpssBoot, my.FairRpssConf, file=paste0(workdir,'/Data_',var.name,'/FairRpssBoot_',startdate.name,'.RData')) + if(any(my.score.name=="FairCrpss")) save(my.FairCrpssBoot, my.FairCrpssConf, file=paste0(workdir,'/Data_',var.name,'/FairCrpssBoot_',startdate.name,'.RData')) + } + + +} # next m (month) + + +if(Slurm==FALSE){ +########################################################################################### +# Visualize and save the score maps for one score and period # +########################################################################################### + +# run it only after computing and saving all the skill score data: + +my.months=1 #9:12 # choose the month(s) to plot and save + +for(month in my.months){ + #month=2 # for the debug + startdate.name.map<-my.month[month] # i.e:"January" + + for(my.score.name.map in my.score.name){ + #my.score.name.map='EnsCorr' # for the debug + + if(my.score.name.map=="EnsCorr") { load(file=paste0(workdir,'/Data_',var.name,'/EnsCorr_',startdate.name.map,'.RData')); my.score<-my.EnsCorr } + if(my.score.name.map=='FairRpss') { load(file=paste0(workdir,'/Data_',var.name,'/FairRpss_',startdate.name.map,'.RData')); my.score<-my.FairRpss } + if(my.score.name.map=='FairCrpss') { load(file=paste0(workdir,'/Data_',var.name,'/FairCrpss_',startdate.name.map,'.RData')); my.score<-my.FairCrpss } + if(my.score.name.map=="RelDiagr") { load(file=paste0(workdir,'/Data_',var.name,'/RelDiagr_',startdate.name.map,'.RData')); my.score<-my.RelDiagr } + + # old green-red palette: + # brk.rpss<-c(-1,seq(0,1,by=0.05)) + # col.rpss<-c("darkred",colorRampPalette(c("red","white","darkgreen"))(length(brk.rpss)-2)) + # brk.rps<-c(seq(0,1,by=0.1),10) + # col.rps<-c(colorRampPalette(c("darkgreen","white","red"))(length(brk.rps)-2),"darkred") + # if(my.score.name.map==my.Rps | my.score==my.Rps.clim) {my.brk<-brk.rps} else {my.brk<-brk.rpss} + # if(my.score.name.map==my.Rps | my.score==my.Rps.clim) {my.col<-col.rps} else {my.col<-col.rpss} + + brk.rpss<-c(-10,seq(-1,1,by=0.1)) + col.rpss<-colorRampPalette(col)(length(brk.rpss)-1) + + my.brk<-brk.rpss # at present all breaks and colors are the same, so there is no need to differenciate between indexes + my.col<-col.rpss + + for(lead in 1:n.leadtimes){ + + #my.title<-paste0(my.score.name.map,' of ',cfs.name,' + #10m Wind Speed for ',startdate.name.map,'. Forecast time ',leadtime.week[lead],'.') + + if(my.score.name.map!="RelDiagr"){ + + #postscript(file=paste0(workdir,'/Maps_',var.name,'/',my.score.name.map,'_',startdate.name.map,'_Forecast_time_',leadtime.week[lead],'_',var.name,'.ps'), + # paper="special",width=12,height=7,horizontal=F) + + jpeg(file=paste0(workdir,'/Maps_',var.name,'/',my.score.name.map,'_',startdate.name.map,'_Forecast_time_',leadtime.week[lead],'_',var.name,'.jpg'),width=1000,height=600) + + layout(matrix(c(1,2), 1, 2, byrow = TRUE),widths=c(11,1.2)) + par(oma=c(1,1,4,1)) + + # lat values must be in decreasing order from +90 to -90 degrees and long from 0 to 360 degrees: + PlotEquiMap(my.score[lead,,][n.lat:1,c((n.lonr+1):n.lon,1:n.lonr)], lo,la,sizetit=0.8, brks=my.brk, cols=my.col,axelab=F, filled.continents=FALSE, drawleg=F) + my.title<-paste0(my.score.name.map,' of ',cfs.name,'\n ', var.name.map,' for ',startdate.name.map,'. Forecast time ',leadtime.week[lead],'.') + title(my.title,line=0.5,outer=T) + + ColorBar(my.brk, cols=my.col, vert=T, cex=1.4) + + if(my.score.name.map=="EnsCorra"){ #add a small grey point if the corr.is significant: + + # arrange lat/ lon in my.PValue (a second variable in the EnsCorr file) to start from +90 degr. (lat) and from 0 degr (lon): + my.PValue.rev<-my.PValue + my.PValue.rev[lead,,]<-my.PValue[i,n.lat:1,c((n.lonr+1):n.lon,1:n.lonr)] + for (x in 1:n.lon) { + for (y in 1:n.lat) { + if (my.PValue.rev[lead,y, x] == TRUE) { + text(x = lo[x], y = la[y], ".", cex = .2) + } + } + } + } + dev.off() + + } # close if on !RelDiagr + + if(my.score.name.map=="RelDiagr") { + + jpeg(file=paste0(workdir,'/Maps_',var.name,'/RelDiagr_',startdate.name.map,'_Forecast_time_',leadtime.week[lead],'_',var.name,'.jpg'),width=600,height=600) + + my.title<-paste0('Reliability Diagram of ',cfs.name,'\n ',var.name.map,' for ',startdate.name.map,'. Forecast time ',leadtime.week[lead],'.') + + plot(c(0,1),c(0,1),type="l",xlab="Forecast Frequency",ylab="Observed Frequency", col='gray30', main=my.title) + lines(my.score[[lead]]$p.avgs[!is.na(my.score[[lead]]$p.avgs)],my.score[[lead]]$cond.prob[!is.na(my.score[[lead]]$cond.prob)],type="o", pch=16, col="blue") + #lines(my.score[[n.leadtimes+lead]]$p.avgs[!is.na(my.score[[n.leadtimes+lead]]$p.avgs)],my.score[[n.leadtimes+lead]]$cond.prob[!is.na(my.score[[n.leadtimes+lead]]$cond.prob)],type="o",pch=16,col="darkgreen") + lines(my.score[[2*n.leadtimes+lead]]$p.avgs[!is.na(my.score[[2*n.leadtimes+lead]]$p.avgs)], my.score[[2*n.leadtimes+lead]]$cond.prob[!is.na(my.score[[2*n.leadtimes+lead]]$cond.prob)],type="o", pch=16, col="red") + legend("bottomright", legend=c('Lower Tercile','Upper Tercile'), lty=c(1,1), lwd=c(2.5,2.5), col=c('blue','red')) + + dev.off() + } + + } # next lead + + } # next score + +} # next month + + +############################################################################################### +# Composite map of the four skill scores # +############################################################################################### + +# run it only when you have all the 4 skill scores for each month: + +my.months=9:12 # choose the month(s) to plot and save + +for(month in my.months){ + + startdate.name.map<-my.month[month] # i.e:"January" + + #postscript(file=paste0(workdir,'/Maps_',var.name,'/SkillScores_',startdate.name.map,'_',var.name,'.ps'), paper="special",width=12,height=7,horizontal=F) + + jpeg(file=paste0(workdir,'/Maps_',var.name,'/SkillScores_',startdate.name.map,'_',var.name,'.jpg'), width=4000, height=2400, quality=100) + + layout(matrix(1:16, 4, 4, byrow=FALSE), widths=c(1.8,1.8,1.8,1.2), heights=c(1,1,1,1)) + #layout.show(16) + + for(score in 1:4){ + for(lead in 1:n.leadtimes){ + + img<-readJPEG(paste0(workdir,'/Maps_',var.name,'/',my.score.name[score],'_',startdate.name.map,'_Forecast_time_',leadtime.week[lead],'_',var.name,'.jpg')) + par(mar = rep(0, 4), xaxs='i', yaxs='i' ) + plot.new() + rasterImage(img, 0, 0, 1, 1, interpolate=FALSE) + + } # next lead + + } # next score + + dev.off() + +} # next month + + + +# Dani Map script for the small figure: +#/home/Earth/vtorralb/R/scripts/Veronica_2014/TimeSeries/PlotAno_mod_obs.R + + +############################################################################################### +# Preformatting: calculate weekly climatologies and anomalies for each hindcast startdate # +############################################################################################### + +# Run it only once at the beginning: + +file_path='/home/Earth/ngonzal2/R/ConfFiles/IC3.conf' # Load() file path +#file_path <- '/home/Earth/ncortesi/Downloads/RESILIENCE/IC3.conf' + +my.startdates=1:52 # choose a sequence of startdates to save their anomalies + +for(startdate in my.startdates){ # the startdate week to load inside the sdates weekly sequence: + print(paste0('Computing anomalies for startdate ', which(startdate==my.startdates),'/',length(my.startdates))) + + data.hindcast <- Load(var = var.name, exp = 'EnsEcmwfWeekHind', + obs = NULL, sdates = paste0(yr1.hind:yr2.hind,substr(sdates.seq[startdate],5,6),substr(sdates.seq[startdate],7,8)), + nleadtime = n.leadtimes, leadtimemin = 1, leadtimemax = n.leadtimes, output = 'lonlat', configfile = file_path, method='distance-weighted' ) + + # dim(data.hindcast$mod) # [1,4,20,4,320,640] + + # Mean of the 4 members of the Hindcasts and of the 20 years (for each point and leadtime); + clim.hindcast<-apply(data.hindcast$mod,c(1,4,5,6),mean)[1,,,] # average for each leadtime and pixel + clim.hindcast<-InsertDim(InsertDim(InsertDim(clim.hindcast,1,n.yrs.hind),1,n.members),1,1) # repeat the same values and times to calc. anomalies + + anom.hindcast <- data.hindcast$mod - clim.hindcast + + rm(data.hindcast,clim.hindcast) + gc() + + # average the anomalies over the 4 hindcast members [1,20,4,320,640] + anom.hindcast.mean<-apply(anom.hindcast,c(1,3,4,5,6),mean) + + my.grid<-paste0('r',n.lon,'x',n.lat) + data.rean <- Load(var = var.name, exp = 'ERAintEnsWeek', + obs = NULL, sdates=paste0(yr1.hind:yr2.hind,substr(sdates.seq[startdate],5,6),substr(sdates.seq[startdate],7,8)), + nleadtime = n.leadtimes, leadtimemin = 1, leadtimemax = n.leadtimes, output = 'lonlat', configfile = file_path, grid=my.grid, method='distance-weighted') + + # dim(data.rean$mod) # [1,1,20,4,320,640] + + # Mean of the 20 years (for each point and leadtime) + clim.rean<-apply(data.rean$mod,c(1,2,4,5,6),mean)[1,1,,,] # average for each leadtime and pixel + clim.rean<-InsertDim(InsertDim(InsertDim(clim.rean,1,n.yrs.hind),1,1),1,1) # repeat the same values times to be able to calculate the anomalies #[1,1,20,4,320,640] + + anom.rean <- data.rean$mod - clim.rean + + anom.rean<-InsertDim(anom.rean[1,1,,,,],1,1) # [1,20,4,320,640] + + #save(anom.hindcast.mean,anom.rean,file=paste0(workdir,'/Data/anom_for_ACC_startdate',startdate,'.RData')) + + save(anom.hindcast,file=paste0(workdir,'/Data/anom_hindcast_startdate',startdate,'.RData')) + save(anom.hindcast.mean,file=paste0(workdir,'/Data/anom_hindcast_mean_startdate',startdate,'.RData')) + save(anom.rean,file=paste0(workdir,'/Data/anom_rean_startdate',startdate,'.RData')) + save(clim.rean,file=paste0(workdir,'/Data/clim_rean_startdate',startdate,'.RData')) + + rm(data.rean,anom.hindcast,anom.hindcast.mean,clim.rean,anom.rean) + gc() + +} + +############################################################################################### +# Preformatting: calculate weekly climatologies and anomalies for each forecast startdate # +############################################################################################### +# +# finish!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! +# +# the only difference is that anomalies are computed using climatologies from the hincast data, +# and not from the forecast data. + +# Run it only once at the beginning: + +file_path='/home/Earth/ngonzal2/R/ConfFiles/IC3.conf' # Load() file path +#file_path <- '/home/Earth/ncortesi/Downloads/RESILIENCE/IC3.conf' + +my.startdates=1:52 # choose a sequence of forecast startdates to save their anomalies + +for(startdate in my.startdates){ # the startdate week to load inside the sdates weekly sequence: + print(paste0('Computing anomalies for startdate ', which(startdate==my.startdates),'/',length(my.startdates))) + + data.hindcast <- Load(var=var.name, exp = 'EnsEcmwfWeekHind', + obs = NULL, sdates=paste0(yr1.hind:yr2.hind,substr(sdates.seq[startdate],5,6),substr(sdates.seq[startdate],7,8)), + nleadtime = n.leadtimes, leadtimemin = 1, leadtimemax = n.leadtimes, output = 'lonlat', configfile = file_path, method='distance-weighted' ) + + # dim(data.hindcast$mod) # [1,4,20,4,320,640] + + # Mean of the 4 members of the Hindcasts and of the 20 years (for each point and leadtime); + clim.hindcast<-apply(data.hindcast$mod,c(1,4,5,6),mean)[1,,,] # average for each leadtime and pixel + clim.hindcast<-InsertDim(InsertDim(InsertDim(clim.hindcast,1,n.yrs.hind),1,n.members),1,1) # repeat the same values and times to calc. anomalies + + anom.hindcast <- data.hindcast$mod - clim.hindcast + + rm(data.hindcast,clim.hindcast) + gc() + + # average the anomalies over the 4 hindcast members [1,20,4,320,640] + anom.hindcast.mean<-apply(anom.hindcast,c(1,3,4,5,6),mean) + + my.grid<-paste0('r',n.lon,'x',n.lat) + data.rean <- Load(var=var.name, exp = 'ERAintEnsWeek', + obs = NULL, sdates=paste0(yr1.hind:yr2.hind,substr(sdates.seq[startdate],5,6),substr(sdates.seq[startdate],7,8)), + nleadtime = n.leadtimes, leadtimemin = 1, leadtimemax = n.leadtimes, output = 'lonlat', configfile = file_path, grid=my.grid, method='distance-weighted') + # dim(data.rean$mod) # [1,1,20,4,320,640] + + # Mean of the 20 years (for each point and leadtime) + clim.rean<-apply(data.rean$mod,c(1,2,4,5,6),mean)[1,1,,,] # average for each leadtime and pixel + clim.rean<-InsertDim(InsertDim(InsertDim(clim.rean,1,n.yrs.hind),1,1),1,1) # repeat the same values times to be able to calculate the anomalies #[1,1,20,4,320,640] + + anom.rean <- data.rean$mod - clim.rean + + anom.rean<-InsertDim(anom.rean[1,1,,,,],1,1) # [1,20,4,320,640] + + #save(anom.hindcast.mean,anom.rean,file=paste0(workdir,'/Data/anom_for_ACC_startdate',startdate,'.RData')) + + save(anom.hindcast,file=paste0(workdir,'/Data/anom_hindcast_startdate',startdate,'.RData')) + save(anom.hindcast.mean,file=paste0(workdir,'/Data/anom_hindcast_mean_startdate',startdate,'.RData')) + save(anom.rean,file=paste0(workdir,'/Data/anom_rean_startdate',startdate,'.RData')) + save(clim.rean,file=paste0(workdir,'/Data/clim_rean_startdate',startdate,'.RData')) + + rm(data.rean,anom.hindcast,anom.hindcast.mean,clim.rean,anom.rean) + gc() + +} + + + + + + + +################################################################################## +# Toy Model # +################################################################################## + +library(s2dverification) +library(SpecsVerification) +library(easyVerification) + +my.prob=c(1/3,2/3) # quantiles used for FairRPSS and the RelDiagr (usually they are the terciles) + +N=800 # number of years +M=140 # number of members + +for(M in c(2:10,20,51,100,200)){ + +for(N in c(5,10,15,20,25,30,40,50,100,200,500,1000,2000,5000,10000,20000,50000,100000)){ + +anom.rean<-rnorm(N) +anom.hind<-array(rnorm(N*M),c(N,M)) +#quantile(anom.hind,prob=my.prob,type=8) + +obs<-convert2prob(anom.rean,prob<-my.prob) +#mean(obs[,1]) # check if it is equal to 0.333 for K=3 + +ens<-convert2prob(anom.hind,prob<-my.prob) + +# climatological forecast: +anom.hind.clim<-array(sample(anom.hind,N*M),c(N,M)) # extract a random sample with no replacement of the hindcast +ens.clim<-convert2prob(anom.hind.clim,prob<-my.prob) +#mean(ens.clim[,1]) # check if it is equal to M/3 (1.333 for M=4) + +# alternative definition of climatological forecast, based on observations instead (as applied by veriApply via convert2prob when option fcst.ref is missing): +#anom.rean.clim<-ClimEns(anom.rean) # create a climatological ensemble from a vector of observations (anom.rean) with the leave-one-out cross validation +anom.rean.clim<-t(array(anom.rean,c(N,N))) # function used by convert2prob when ref is not specified; it is ~ equal to the above function ClimEns, it only has a column more +ens.clim.rean<-convert2prob(anom.rean.clim,prob<-c(1/3,2/3)) # create a new prob.table based on the climatological ensemble of observations + +p<-count2prob(ens) # we should add the option type=4, in not the sum for each year is not 100%!!! +y<-count2prob(obs) # we should add the option type=4, in not the sum for each year is not 100%!!! +ReliabilityDiagram(p[,1], y[,1], bins=10, nboot=0, plot=TRUE, cons.prob=c(0.05, 0.85),plot.refin=F) + +### computation of verification scores: + +Rps<-round(mean(EnsRps(ens,obs)),4) +FairRps<-round(mean(FairRps(ens,obs)),4) +#Rps.easy<-round(mean(veriApply("EnsRps", fcst=anom.hind, obs=anom.rean, prob=my.prob, tdim=1, ensdim=2)),4) # check once to see if it is the same value of Rps +#FairRps.easy<-round(mean(veriApply("FairRps", fcst=anom.hind, obs=anom.rean, prob=my.prob, tdim=1, ensdim=2)),4) # check once to see if it is the same value of FairRps + +Rps.clim<-round(mean(EnsRps(ens.clim,obs)),4) +FairRps.clim<-round(mean(FairRps(ens.clim,obs)),4) +FairRps.clim.rean<-round(mean(FairRps(ens.clim.rean,obs)),4) + +#FairRps.clim.stable<-0.44444444 +#Rps.clim.easy<-round(mean(veriApply("EnsRps", fcst=anom.hind.clim, obs=anom.rean, prob=my.prob, tdim=1, ensdim=2)),4) # check once to see if it is the same value of Rps.clim +#FairRps.clim.easy<-round(mean(veriApply("FairRps", fcst=anom.hind.clim, obs=anom.rean, prob=my.prob, tdim=1, ensdim=2)),4) # check once to see if it is = to FairRps.clim + +Rpss<-round(1-(Rps/Rps.clim),4) +#Rpss.specs<-round(EnsRpss(ens=ens, ens.ref=ens.clim, obs=obs)$rpss,4) # check it once to see if it is the same as Rpss (yes) +Rpss.easy<-veriApply("EnsRpss", fcst=anom.hind, fcst.ref=anom.hind.clim, obs=anom.rean, prob=my.prob, tdim=1, ensdim=2)$rpss # we provide the clim.forecast; = to Rpss.specs +system.time(Rpss.easy.noclim<-round(veriApply("EnsRpss", fcst=anom.hind, obs=anom.rean, prob=my.prob, tdim=1, ensdim=2, parallel=FALSE, ncpus=5)$rpss,4)) # using their climatological forecast +#cat(Rps.clim.noclim<-Rps/(1-Rpss.easy.noclim)) # sale igual a ~0.45 per ogni hindcast; é diverso sia da Rps.clim che da Rps.clim per N-> +oo (0.555) + +Rpss.easy<-veriApply("EnsRpss", fcst=anom.hind, fcst.ref=anom.rean.clim, obs=anom.rean, prob=my.prob, tdim=1, ensdim=2)$rpss # we provide the clim.forecast; = to Rpss.specs +Rpss.easy.noclim<-veriApply("EnsRpss", fcst=anom.hind, obs=anom.rean, prob=my.prob, tdim=1, ensdim=2)$rpss # we provide the clim.forecast; = to Rpss.specs + +FairRpss<-round(1-(FairRps/FairRps.clim),4) +#FairRpss.specs<-round(FairRpss(ens=ens,ens.ref=ens.clim,obs=obs)$rpss,4) # check it once to see if it is the same as FairRpss (yes) +#FairRpss.easy<-veriApply("FairRpss", fcst=anom.hind, fcst.ref=anom.hind.clim, obs=anom.rean, prob=c(1/3,2/3), tdim=1, ensdim=2)$rps +#FairRpss.easy.noclim<-round(veriApply("FairRpss", fcst=anom.hind, obs=anom.rean, prob=c(1/3,2/3), tdim=1, ensdim=2)$rpss,4) # using their climatological forecast +#cat(FairRps.clim.noclim<-Rps/(1-FairRpss.easy.noclim)) # sale igual a ~0.55; é diverso sia da FairRps.clim che da FairRps.clim per N-> +oo (0.444) + +cat(paste0('n.memb: ' ,M,' n.yrs: ',N,' : ',FairRps,' : ' ,FairRps.clim,' FairRpss: ',FairRpss,'\n')) + +#cat(paste0('n.memb: ' ,M,' n.yrs: ',N,' : ',FairRps,' : ' ,FairRps.clim,' FairRpss: ',FairRpss,'\n')) + +#cat(paste0('n.memb: ' ,M,' n.yrs: ',N,' : ',Rps,' : ' ,Rps.clim,' Rpss: ',Rpss,'\n','n.memb: ' ,M,' n.yrs: ',N,' : ',FairRps,' : ' ,FairRps.clim,' FairRpss: ',FairRpss,'\n')) + +#cat(paste0('n.memb: ' ,M,' n.yrs: ',N,' : ',Rps,' : ' ,Rps.clim,' Rpss: ',Rpss,' Rpss.easy: ',Rpss.easy.noclim,'\n','n.memb: ' ,M,' n.yrs: ',N,' #: ',FairRps,' : ' ,FairRps.clim,' FairRpss: ',FairRpss,' FairRpss.easy: ',FairRpss.easy.noclim)) + +} + +# Crpss: + +#Crps<-round(mean(EnsCrps(ens,obs)),4) +#FairCrps<-round(mean(FairCrps(ens,obs)),4) + +#cat(Crps.clim<-round(mean(EnsCrps(ens.clim,obs)),4)) +#FairCrps.clim<-round(mean(FairCrps(ens.clim,obs)),4) + +#Crpss<-round(1-(Crps/Crps.clim),4) +#Crpss.specs<-round(EnsCrpss(ens=ens, ens.ref=ens.clim, obs=obs)$rpss,4) + +#FairCrpss<-round(1-(FairCrps/FairCrps.clim),4) +#FairCrpss.specs<-round(FairCrpss(ens=ens,ens.ref=ens.clim,obs=obs)$rpss,4) + +} + + +################################################################################## +# Other analysis # +################################################################################## + +my.startdates=1 #c(1:9) # select the startdates (weeks) you want to merge together + +#tm<-toyarray(dims=c(32,64),N=20,nens=4) # in the number of startdates in case of forecasts or the number of years in case of hindcasts +#str(tm) # tm$fcst: [32,64,20,4] tm$obs: [32,64,20] +# +#f.corr <- veriApply("EnsCorr", fcst=tm$fcst, obs=tm$obs) +#f.me <- veriApply('EnsMe', tm$fcst, tm$obs) +#f.crps <- veriApply("FairCrps", fcst=tm$fcst, obs=tm$obs) +#str(f.crps) # [32,64,20] + +#cst <- array(rnorm(prod(4:8)), 4:8) +#obs <- array(rnorm(prod(4:7)), 4:7) +#f.me <- veriApply('EnsMe', fcst=fcst, obs=obs) +#dim(f.me) +#fcst2 <- array(aperm(fcst, c(5,1,4,2,3)), c(8, 4, 7, 5*6)) # very interesting use of aperm not only to swap dimensions, but also to merge two dimensions in the same one! +#obs2 <- array(aperm(obs, c(1,4,2,3)), c(4,7,5*6)) +#f.me2 <- veriApply('EnsMe', fcst=fcst2, obs=obs2, tdim=3, ensdim=1) +#dim(f.me2) + +# when you have for each leadtime all the 52 weeks of year 2014 stored in a map, +# you can plot the time serie for a particular point and leadtime: +pos.lat<-which(round(la,3)==round(my.lat,3)) +pos.lon<-which(round(lo,3)==round(my.lon,3)) + +x11() +plot(1:week,EnMeanCorr[,leadtime],type="b", + main=paste0("Weekly time serie of point with lat=",round(my.lat,2),", lon=",round(my.lon,2)), + xlab="week",ylab="Ensemble Mean Corr.",ylim=c(-1,1)) + + + +### check if 4 time steps are better than one: + +yrs.hind<-yr2.hind-yr1.hind+1 +my.point.obs<-array(NA,c(yrs.hind,28)) + +# load obs.data for the four time steps: +for(year in yr1.hind:yr2.hind){ + data_erai_6h <- Load(var=var.name, exp = 'ERAint6h', + obs = NULL, sdates=paste0(year,'01'), leadtimemin = 61, + leadtimemax = 88, output = 'lonlat', configfile = file_path, grid=my.grid,method='distance-weighted') + + my.point.obs[year-yr1.hind+1,]<-data_erai_6h$mod[1,1,1,,65,633] +} + +utm0<-c(1,5,9,13,17,21,25) +utm6<-utm0+1 +utm12<-utm0+2 +utm18<-utm0+3 +my.points.obs.weekly.mean.utm0<-rowMeans(my.point.obs[,utm0]) +my.points.obs.weekly.mean.utm6<-rowMeans(my.point.obs[,utm6]) +my.points.obs.weekly.mean.utm12<-rowMeans(my.point.obs[,utm12]) +my.points.obs.weekly.mean.utm18<-rowMeans(my.point.obs[,utm18]) +my.points.obs.weekly.mean.all<-rowMeans(my.point.obs) + +cor(my.points.exp.weekly.mean.all,my.points.obs.weekly.mean.all,use="complete.obs") + +my.points.obs.weekly.mean.utm<-c(my.points.obs.weekly.mean.utm0,my.points.obs.weekly.mean.utm6,my.points.obs.weekly.mean.utm12,my.points.obs.weekly.mean.utm18) +my.points.exp.weekly.mean.utm<-c(my.points.exp.weekly.mean.utm0,my.points.exp.weekly.mean.utm6,my.points.exp.weekly.mean.utm12,my.points.exp.weekly.mean.utm18) +cor(my.points.exp.weekly.mean.utm,my.points.obs.weekly.mean.utm,use="complete.obs") + + +##### Calculate Reanalysis climatology loading only 1 file each 4 (it's quick but it needs to have all files beforehand): + +ss<-c(seq(1,52,by=4),50,51,52) # take only 1 startdate each 4, more the last 3 startdates that must be computed individually because they are not a complete set of 4 + +for(startdate in ss){ # the startdate day of 2014 to load in the sdates weekly sequence along with the same startdates of the previous 20 years + data.ERAI <- Load(var=var.name, exp = 'ERAintEnsWeek', + obs = NULL, sdates=paste0(yr1.hind:yr2.hind,substr(sdates.seq[startdate],5,6),substr(sdates.seq[startdate],7,8)), + nleadtime = 4, leadtimemin = 1, leadtimemax = 4, output = 'lonlat', configfile = file_path, grid=my.grid, method='distance-weighted') + + clim.ERAI[startdate,,,]<-apply(data.ERAI$mod,c(1,2,4,5,6),mean)[1,1,,,] # average for each leadtime and pixel + + # the intermediate startdate between startdate week i and startdate week i+4 are repeated: + if(startdate <= 49){ + clim.ERAI[startdate+1,1,,]<- clim.ERAI[startdate,2,,] # startdate leadtime (week) + clim.ERAI[startdate+1,2,,]<- clim.ERAI[startdate,3,,] # (week) 1 2 3 4 + clim.ERAI[startdate+2,1,,]<- clim.ERAI[startdate,3,,] # 1 a b c d <- only startdate 1 and 5 are necessary to calculate all startdates between 1 and 5 + clim.ERAI[startdate+1,3,,]<- clim.ERAI[startdate,4,,] # 2 b c d e + clim.ERAI[startdate+2,2,,]<- clim.ERAI[startdate,4,,] # 3 c d e f + clim.ERAI[startdate+3,1,,]<- clim.ERAI[startdate,4,,] # 4 d e f g + } # 5 e f g h + + if(startdate > 1 && startdate <=49){ # fill the previous startdates: + clim.ERAI[startdate-1,4,,]<- clim.ERAI[startdate,3,,] + clim.ERAI[startdate-1,3,,]<- clim.ERAI[startdate,2,,] + clim.ERAI[startdate-2,4,,]<- clim.ERAI[startdate,2,,] + clim.ERAI[startdate-1,2,,]<- clim.ERAI[startdate,1,,] + clim.ERAI[startdate-2,3,,]<- clim.ERAI[startdate,1,,] + clim.ERAI[startdate-3,4,,]<- clim.ERAI[startdate,1,,] + } + +} + + + + + # You can make cor much faster by stripping away all the error checking code and calling the internal c function directly: + # r <- apply(m1, 1, function(x) { apply(m2, 1, function(y) { cor(x,y) })}) + # r2 <- apply(m1, 1, function(x) { apply(m2, 1, function(y) {.Internal(cor(x, y, 4L, FALSE)) })}) + C_cor<-get("C_cor", asNamespace("stats")) + test<-.Call(C_cor, x=rnorm(100,1), y=rnorm(100,1), na.method=2, FALSE) + my.EnsCorr.sub[i,j,k]<-.Call(C_cor, x=anom.hindcast.mean.sub[,i,j,k], y=nom.rean.sub[,i,j,k], na.method=2, FALSE) + + + +####################################################################################### +# Raster Animations # +####################################################################################### + +library(png) +library(animation) + + +clustPNG <- function(pngobj, nclust = 3){ + + j <- pngobj + # If there is an alpha component remove it... + # might be better to just adjust the mat matrix + if(dim(j)[3] > 3){ + j <- j[,,1:3] + } + k <- apply(j, c(1,2), c) + mat <- matrix(unlist(k), ncol = 3, byrow = TRUE) + grd <- expand.grid(1:dim(j)[1], 1:dim(j)[2]) + clust <- kmeans(mat, nclust, iter.max = 20) + new <- clust$centers[clust$cluster,] + + for(i in 1:3){ + tmp <- matrix(new[,i], nrow = dim(j)[1]) + j[,,i] <- tmp + } + + plot.new() + rasterImage(j, 0, 0, 1, 1, interpolate = FALSE) +} + +makeAni <- function(file, n = 10){ + j <- readPNG(file) + for(i in 1:n){ + clustPNG(j, i) + mtext(paste("Number of clusters:", i)) + } + + j <- readPNG(file) + plot.new() + rasterImage(j, 0, 0, 1, 1, interpolate = FALSE) + mtext("True Picture") +} + +file <- "~/Dropbox/Photos/ClassyDogCropPng.png" +n <- 20 +saveGIF(makeAni(file, n), movie.name = "tmp.gif", interval = c(rep(1,n), 5)) + + + + +####################################################################################### +# ProgressBar # +####################################################################################### + +for(i in 1:10) { + Sys.sleep(0.2) + # Dirk says using cat() like this is naughty ;-) + #cat(i,"\r") + # So you can use message() like this, thanks to Sharpie's + # comment to use appendLF=FALSE. + message(i,"\r",appendLF=FALSE) + flush.console() +} + + +for(i in 1:10) { + Sys.sleep(0.2) + # Dirk says using cat() like this is naughty ;-) + cat(i,"\r") + + +} + + + +####################################################################################### +# Load large datasets with ff # +####################################################################################### + +library(ff) +my.scratch<-"/scratch/Earth/ncortesi/myBigData" +anom.hind<-as.ff(anom.hind, vmode="double", file=my.scratch) +ffsave(anom.hind,file=my.scratch) +close(anom.hind);rm(anom.hind) + +ffload(file=my.scratch, list=c(anom.hind)) +open(anom.hind) +my.SkillScore <- veriApplyBig("FairRpss", fcst=anom.hind, obs=anom.rean, tdim=2, ensdim=1, prob=c(1/3,1/3)) +close(anom.hind);rm(anom.hind) + + + +BigDataPath <- "/scratch/Earth/ncortesi/myBigData" +source('/home/Earth/ncortesi/Downloads/RESILIENCE/veriApplyBig.R') + +anom.hind.dim<-c(5,3,1,256,512) +anom.hind<-array(rnorm(prod(anom.hind.dim)),anom.hind.dim) # doesn't fit in memory +save.big(array=anom.hind, path=BigDataPath) + +veriApplyBig <- function(, obsmy.scratch){ + ffload(file=my.scratch) + open(anom.hind) + my.SkillScore <- veriApplyBig("FairRpss", fcst=anom.hind, obs=anom.rean, tdim=2, ensdim=1, prob=c(1/3,1/3)) + close(anom.hind) +} + + +anom.hind<-as.ff(anom.hind, vmode="double", file=my.scratch) +ffsave(anom.hind, file=my.scratch) + +my.scratch<-"/scratch/Earth/ncortesi/myBigData" + +anom.hind<-as.ff(anom.hind, vmode="double", file=my.scratch) +ffsave(anom.hind, file=my.scratch) +close(anom.hind); rm(anom.hind) + +ffload(file=my.scratch, list=c(anom.hind)) +open(anom.hind) +my.SkillScore <- veriApplyBig("FairRpss", fcst=anom.hind, obs=anom.rean, tdim=2, ensdim=1, prob=c(1/3,1/3)) +close(anom.hind); rm(anom.hind) + +anom.hind.dim<-c(51,30,1,256,51) +anom.hind<-ff(rnorm(prod(anom.hind.dim)), dim=anom.hind.dim, vmode="double", filename="/scratch/Earth/ncortesi/anomhind") +str(anom.hind) +dim(anom.hind) + +anom.hind.dim<-c(51,30,1,256,51) +anom.hind<-array(rnorm(prod(anom.hind.dim)),anom.hind.dim) # doesn't fit in memory +anom.hind<-as.ff(anom.hind, vmode="double", file="/scratch/Earth/ncortesi/myBigData") +str(anom.hind) +dim(anom.hind) + +ffsave(anom.hind,file="/scratch/Earth/ncortesi/anomhind") +#ffinfo(file="/scratch/Earth/ncortesi/anomhind") +close(anom.hind) +#delete(anom.hind) +#rm(anom.hind) + +ffload(file="/scratch/Earth/ncortesi/anomhind") + +anom.rean<-array(rnorm(prod(anom.hind.dim[-1])), anom.hind.dim[-1]) # doesn't fit in memory + +open(anom.hind) +my.SkillScore <- veriApplyBig("FairRpss", fcst=anom.hind, obs=anom.rean, tdim=2, ensdim=1, prob=c(1/3,1/3)) +fcst<-anom.hind +obs<-anom.rean + +close(anom.hind) +rm(anom.hind) + +#ffdrop(file="/scratch/Earth/ncortesi/anomhind") + +a<-ffapply(X=anom.hind.big, MARGIN=c(1,3,4,5), AFUN=mean, RETURN=TRUE) +a<-ffapply(X=anom.hind.big, MARGIN=c(1,3,4,5), AFUN=function(x) sample(x, replace=TRUE), CFUN="list", RETURN=TRUE) + + +pred_Cal_cross_ff <- as.ff(pred_Cal_cross, vmode='double', file=fileoutput_cross) +ffsave(pred_Cal_cross_ff, file=paste0(fileoutput_cross)) +delete(pred_Cal_cross_ff) + +assign("pred_Cal_cross_ff", as.ff(pred_Cal_cross, vmode='double', file=fileoutput_cross)) +ffsave(pred_Cal_cross_ff, file=paste0(fileoutput_cross)) +delete(pred_Cal_cross_ff) +rm(pred_Cal_cross_ff) + +assign("pred_Cal_cross_ff", as.ff(pred_Cal_cross, vmode='double', file=fileoutput_cross)) +a<-get("pred_Cal_cross_ff") # get() works well in this case but not in the row below! (you must use do.call, see WT_drivers_v2.R) +ffsave(get("pred_Cal_cross_ff"), file=paste0(fileoutput_cross)) +delete(pred_Cal_cross_ff) +rm(pred_Cal_cross_ff) + + + +######################################################################################### +# Calculate and save the FairRpss and/or the FairCrpss for a chosen period using ff # +######################################################################################### + +bigdata=FALSE # if TRUE, compute the Rpss and the Crpss using the package ff (storing arrays in the disk instead than in the RAM) to remove the memory limit + +for(month in veri.month){ + month=1 # for debug + my.startdates<-startdates.monthly[[month]] #c(1:5) # select the startdates (weeks) you want to compute + startdate.name=my.month[month] # name given to the period of the selected startdates, i.e:"January" + n.yrs <- length(my.startdates)*n.yrs.hind # number of total years for the selected month + print(paste0("Computing Skill Scores for ",startdate.name)) + + if(!boot) anom.rean.big<-array(NA, dim=c(n.yrs, n.leadtimes, n.lat, n.lon)) # reanalysis data is not converted to ff because it is a small array + + if(boot) mod<-1 else mod=0 # in case of the boostrap, anom.hind.big includes also the reanalysis data as additional last member + + if(bigdata) anom.hind.big <- ff(NA, dim=c(n.members + mod, n.yrs, n.leadtimes, n.lat, n.lon), vmode="double", filename=paste0(workdir,"/anomhindbig.ff")) + if(!bigdata) anom.hind.big <- array(NA, dim=c(n.members + mod, n.yrs, n.leadtimes, n.lat, n.lon)) + + for(startdate in my.startdates){ + + pos.startdate<-which(startdate==my.startdates) # count of the number of stardates already loaded +1 + my.time.interv<-(1+(pos.startdate-1)*n.yrs.hind):(pos.startdate*n.yrs.hind) # time interval where to load data + + load(paste0(workdir,'/Data_',var.name,'/anom_rean_startdate',startdate,'.RData')) + if(!boot) { + anom.rean<-drop(anom.rean) + anom.rean.big[my.time.interv,,,] <- anom.rean + } + + if(any(my.score.name=="FairRpss") || any(my.score.name=="FairCrpss")){ + load(paste0(workdir,'/Data_',var.name,'/anom_hindcast_startdate',startdate,'.RData')) # load hindcast data + anom.hindcast<-drop(anom.hindcast) + + if(!boot) anom.hind.big[,my.time.interv,,,] <- anom.hindcast + if(boot) anom.hind.big[,my.time.interv,,,] <- abind(anom.hindcast, anom.rean, along=1) + + rm(anom.hindcast, anom.rean) + } + gc() + } + + if(any(my.score.name=="FairRpss" || my.score.name=="FairCrpss")){ + if(!boot) { + if(any(my.score.name=="FairRpss")) my.FairRpss <- veriApplyBig("FairRpss", fcst=anom.hind.big, obs=anom.rean.big, + prob=my.prob, tdim=2, ensdim=1, parallel=TRUE, ncpus=n.cpus) + if(any(my.score.name=="FairCrpss")) my.FairCrpss <- veriApplyBig("FairCrpss", fcst=anom.hind.big, obs=anom.rean.big, tdim=2, ensdim=1, parallel=TRUE, ncpus=n.cpus) + + } else { + # bootstrapping: + my.FairRpssBoot<-my.FairCrpssBoot<-array(NA, c(n.boot, n.leadtimes, n.lat, n.lon)) + if(!bigdata) save(anom.hind.big, file=paste0(workdir,"/anom_hind_big.RData")) # save the anomalies to free memory + + for(b in 1:n.boot){ + print(paste0("resampling n. ",b,"/",n.boot)) + yrs.sampled <- sample(1:n.yrs, replace=TRUE) + + if(bigdata) anom.hind.big.sampled <- ff(NA, dim=c(n.members+1, n.yrs, n.leadtimes, n.lat, n.lon), vmode="double", filename=paste0(workdir,"/anomhindbigsampled")) + if(!bigdata) anom.hind.big.sampled <- array(NA, dim=c(n.members+1, n.yrs, n.leadtimes, n.lat, n.lon)) + + if(!bigdata && b>1) load(paste0(workdir,"/anom_hind_big.RData")) # need to load the hindcasts if b>1 + + for(y in 1:n.yrs) anom.hind.big.sampled[,y,,,] <- anom.hind.big[,yrs.sampled[y],,,] # with ff takes 53s x 100 ~1h!!! (without, only 4s x 100 ~6m) + + if(!bigdata) rm(anom.hind.big); gc() # free memory for the following commands + + # faster way that will be able to replace the above one when ffapply output parameters wll be improved: + #a<-ffapply(X=anom.hind.big, MARGIN=c(3,4,5), AFUN=function(x) my.sample(x,n.members+1, replace=TRUE), CFUN="list", RETURN=TRUE, FF_RETURN=TRUE) # takes 3m only + #aa<-abind(a[1:10], along=4) # it doesn't fit into memory! + + if(any(my.score.name=="FairRpss")) my.FairRpssBoot[b,,,] <- veriApplyBig("FairRpss", fcst=anom.hind.big.sampled[1:n.members,,,,], + obs=anom.hind.big.sampled[n.members+1,,,,], + prob=my.prob, tdim=2, ensdim=1, parallel=TRUE, ncpus=n.cpus) #$rpss + + if(any(my.score.name=="FairCrpss")) my.FairCrpssBoot[b,,,] <- veriApplyBig("FairCrpss", fcst=anom.hind.big.sampled[1:n.members,,,,], + obs=anom.hind.big.sampled[n.members+1,,,,], + tdim=2, ensdim=1, parallel=TRUE, ncpus=n.cpus) #$crpss + + if(bigdata) delete(anom.hind.big.sampled) + rm(anom.hind.big.sampled) # delete the sampled hindcast + + } + + # calculate 5 and 95 percentiles of skill scores: + if(any(my.score.name=="FairRpss")) my.FairRpssConf <- apply(my.FairRpssBoot, c(2,3,4), function(x) quantile(x, probs=c(0.05,0.95), type=8)) + if(any(my.score.name=="FairCrpss")) my.FairCrpssConf <- quantile(my.FairCrpssBoot, probs=c(0.05,0.95), type=8) + + } # close if on boot + + } + + if(bigdata) delete(anom.hind.big) # delete the file with the hindcasts + rm(anom.hind.big) + rm(anom.rean.big) + gc() + + if(!boot){ + if(any(my.score.name=="FairRpss")) save(my.FairRpss, file=paste0(workdir,'/Data_',var.name,'/FairRpss_',startdate.name,'.RData')) + if(any(my.score.name=="FairCrpss")) save(my.FairCrpss, file=paste0(workdir,'/Data_',var.name,'/FairCrpss_',startdate.name,'.RData')) + } else { + if(any(my.score.name=="FairRpss")) save(my.FairRpssBoot, my.FairRpssConf, file=paste0(workdir,'/Data_',var.name,'/FairRpssBoot_',startdate.name,'.RData')) + if(any(my.score.name=="FairCrpss")) save(my.FairCrpssBoot, my.FairCrpssConf, file=paste0(workdir,'/Data_',var.name,'/FairCrpssBoot_',startdate.name,'.RData')) + } + + + if(is.object(my.FairRpss)) rm(my.FairRpss); if(is.object(my.FairCrpss)) rm(my.FairCrpss) + if(is.object(my.FairRpssBoot)) rm(my.FairRpssBoot); if(is.object(my.FairCrpssBoot)) rm(my.FairCrpssBoot) + if(is.object(my.FairRpssConf)) rm(my.FairRpssConf); if(is.object(my.FairCrpssConf)) rm(my.FairCrpssConf) + +} # next m (month) + + +######################################################################################### +# Calculate and save the FairRpss for a chosen period using ff and Load() # +######################################################################################### + + anom.rean.big<-array(NA, dim=c(length(my.startdates)*n.yrs.hind, n.leadtimes, n.lat, n.lon)) + anom.hind.big <- ff(NA, dim=c(n.members,length(my.startdates)*n.yrs.hind, n.leadtimes, n.lat, n.lon), vmode="double", filename="/scratch/Earth/anomhindbig") + + for(startdate in my.startdates){ + pos.startdate<-which(startdate==my.startdates) # count of the number of stardates already loaded+1 + my.time.interv<-(1+(pos.startdate-1)*n.yrs.hind):(pos.startdate*n.yrs.hind) # time interval where to load data + + #load(paste0(workdir,'/Data_',var.name,'/anom_rean_startdate',startdate,'.RData')) + #anom.rean<-drop(anom.rean) + #anom.rean.big[my.time.interv,,,] <- anom.rean + + my.date <- paste0(yr1.hind:yr2.hind,substr(sdates.seq[startdate],5,6),substr(sdates.seq[startdate],7,8)) + anom.rean <- Load(var=var.name, exp = 'EnsEcmwfWeekHind', + obs = NULL, sdates=my.date, nleadtime = n.leadtimes, leadtimemin = 1, leadtimemax = n.leadtimes, + output = 'lonlat', configfile = file_path, method='distance-weighted' ) + + #load(paste0(workdir,'/Data_',var.name,'/anom_hindcast_startdate',startdate,'.RData')) # load hindcast data + #anom.hindcast<-drop(anom.hindcast) + + anom.hind.big[,my.time.interv,,,] <- anom.hindcast + rm(anom.hindcast) + } + + my.FairRpss <- veriApplyBig("FairRpss", fcst=anom.hind.big, obs=anom.rean.big, prob=my.prob, tdim=2, ensdim=1, parallel=TRUE, ncpus=n.cpus) + + save(my.FairRpss, file=paste0(workdir,'/Data_',var.name,'/FairRpss_',startdate.name,'.RData')) + +} # close if on Slurm + + diff --git a/old/SkillScores_v4.R b/old/SkillScores_v4.R new file mode 100644 index 0000000000000000000000000000000000000000..09180293bb0809f25c8fff0e9730b1900d8c5109 --- /dev/null +++ b/old/SkillScores_v4.R @@ -0,0 +1,1211 @@ +######################################################################################### +# Sub-seasonal Skill Scores # +######################################################################################### +# you can run it from the terminal with the syntax: +# +# Rscript SkillScores_v4.R +# +# i.e: to split the data in 8 chunks and run only the chunk number 3, write: +# +# Rscript SkillScores_v4.R 8 3 +# +# if you don't specify any argument, or you run it from the R interface, it runs all the chunks in a sequential way. +# Use this last approach if the computational speed is not a problem. +# + +chunks <- 8 + + +Mare=TRUE # if MareNostrum is TRUE, the script must run on MareNostrum; if it is FALSE, the script must run on our workstations or on our clusters + +#load libraries and functions: +library(s2dverification) +library(SpecsVerification) +library(easyVerification) +library(jpeg) +library(abind) +#library(ff) +#library(ffbase) +# Load function split.array: +if(!Mare) {source('/scratch/Earth/ncortesi/RESILIENCE/Rfunctions.R')} else {source('/gpfs/scratch/bsc32842/RESILIENCE/Rfunctions.R')} + +########################################################################################## +# User's settings # +########################################################################################## + +# working dir where to put the output maps and files in the /Data and /Maps subdirs: +workdir <- ifelse(!Mare, "/scratch/Earth/ncortesi/RESILIENCE", "/gpfs/scratch/bsc32842/RESILIENCE") + +cfs.name <- 'ECMWF' # name of the climate forecast system (CFS) used for monthly predictions (to be used in map titles) +var.name <- 'sfcWind' # forecast variable. It is the name used inside the netCDF files and as suffix in map filenames, i.e: 'sfcWind' or 'psl' +var.name.map <- '10m Wind Speed' # forecast variable to verify (name used in the map titles), i.e: '10m Wind Speed' or 'SLP' + +yr1 <- 2014 # starting year of the weekly sequence of the forecasts +mes <- 1 # starting forecast month (usually january) +day <- 2 # starting forecast day + +yr1.hind <- 1994 #1994 # first hindcast year +yr2.hind <- 2013 #2013 # last hindcast year (usually the forecast year -1) + +leadtime.week <- c('5-11','12-18','19-25','26-32') # which leadtimes are available in the weekly dataset +n.members <- 4 # number of hindcast members + +my.score.name <- c('FairRpss','FairCrpss') #c('EnsCorr','FairRpss','FairCrpss','RelDiagr') # choose one or more skills scores to compute between EnsCorr, FairRPSS, FairCRPSS and/or RelDiagr + +veri.month <- 1 #1:12 # select the month(s) you want to compute the chosen skill scores + +my.pvalue <- 0.05 # in case of computing the EnsCorr, set the p_value level to show in the ensemble mean correlation maps +my.prob <- c(1/3,2/3) # quantiles used for FairRPSS and the RelDiagr (usually they are the terciles) +conf.level <- c(0.05, 0.95)# percentiles employed for the calculation of the confidence level for the Rpss and Crpss (can be more than 2 if needed) +int.lat <- NULL #1:160 # latitude position of grid points selected for the Reliability Diagram (if you want to subset a region; if not, put NULL) +int.lon <- NULL #1:320 # longitude position of grid points selected for the Reliability Diagram (if you want to subset a region; if not, put NULL) + +boot <- TRUE # in case of computing the FairRpss and/or the FairCrpss, you can choose to do a bootstrap to evaluate the uncertainty of the skill scores +n.boot <- 20 # number of resamples considered in the bootstrapping + +# coordinates of a chosen point in the world to plot the time series over the 52 maps (they must be the same values in objects la y lo) +my.lat <- 55.3197120 +my.lon <- 0.5625 + +###################################### Derived variables ################################## + +args <- commandArgs(TRUE) # put into variable args the list with all the optional arguments with whom the script was run from the terminal + +if(length(args) == 0) print("Running the script in a sequential way") +if(length(args) == 1) stop("Please specify a second argument to the script") +if(length(args) > 2) stop("Only two arguments are required") + +multicore <- ifelse(length(args) == 2 ,TRUE,FALSE) # set variable multicore to TRUE if we are running the script from the terminal with two arguments + +#source(paste0(workdir,'/ColorBarV.R')) # Color scale used for maps +n.categ <- 1+length(my.prob) # number of forecasting categories (usually 3 if terciles are used) + +# blue-yellow-red colors: +col <- ifelse(!Mare) {as.character(read.csv("/scratch/Earth/ncortesi/RESILIENCE/rgbhex.csv",header=F)[,1]), as.character(read.csv("/gpfs/scratch/bsc32842/RESILIENCE/rgbhex.csv",header=F)[,1]) +#col<-as.character(read.csv("/home/Earth/vtorralb/rgbhex.csv",header=F)[,1]) # blue-yellow-red colors + +sdates.seq <- weekly.seq(yr1,mes,day) # load the sequence of dates corresponding to all the thursday of the year +n.leadtimes <- length(leadtime.week) +n.yrs.hind <- yr2.hind-yr1.hind+1 +my.month.short <- substr(my.month,1,3) + +# Monthly Startdates for 2014 reforecasts: (in future you can modify it to work for a generic year) +startdates.monthly<-list() +startdates.monthly[[1]]<-1:5 +startdates.monthly[[2]]<-6:9 +startdates.monthly[[3]]<-10:13 +startdates.monthly[[4]]<-14:17 +startdates.monthly[[5]]<-18:22 +startdates.monthly[[6]]<-23:26 +startdates.monthly[[7]]<-27:31 +startdates.monthly[[8]]<-32:35 +startdates.monthly[[9]]<-36:39 +startdates.monthly[[10]]<-40:44 +startdates.monthly[[11]]<-45:48 +startdates.monthly[[12]]<-49:52 + +## extract geographic coordinates; do only ONCE for each preducton system and then save the lat/lon in a coordinates.RData and comment these rows to use function load() below: +#data.hindcast <- Load(var='sfcWind', exp = 'EnsEcmwfWeekHind', +# obs = NULL, sdates=paste0(yr1.hind:yr2.hind,substr(sdates.seq[startdate],5,6),substr(sdates.seq[startdate],7,8)), +# nleadtime = 1, leadtimemin = 1, leadtimemax = 1, output = 'lonlat', configfile = file_path, method='distance-weighted' ) + +#lats<-data.hindcast$lat +#lons<-data.hindcast$lon +#save(lats,lons,file=paste0(workdir,'/coordinates.RData')) +#rm(data.hindcast);gc() + +# format geographic coordinates to use with PlotEquiMap: +load(paste0(workdir,'/coordinates.RData')) +n.lon <- length(lons) +n.lat <- length(lats) +n.lonr <- n.lon-ceiling(length(lons[lons<180 & lons > 0])) +la<-rev(lats) +lo<-c(lons[c((n.lonr+1):n.lon)]-360,lons[1:n.lonr]) + + +######################################################################################### +# Calculate and save up to all the chosen Skill Scores for a chosen period # +######################################################################################### +# +# Remember that before computing the skill scores, you have to create and save the anomalies running once the preformatting part at the end of this script. +# + +for(month in veri.month){ + #month=1 # for the debug + my.startdates <- startdates.monthly[[month]] #c(1:5) # select the startdates (weeks) you want to compute + startdate.name <- my.month[month] # name given to the period of the selected startdates # i.e:"January" + n.startdates <- length(my.startdates) # number of startdates in the selected month + n.yrs <- length(my.startdates)*n.yrs.hind # number of total years for the selected month + + print(paste0("Computing Skill Scores for ",startdate.name)) + + # Merge all selected startdates: + hind.dim <- c(n.members, n.yrs.hind*n.startdates, n.leadtimes, n.lat, n.lon) # dimensions of the hindcast array + + # If multicore=FALSE, split the array in the usual way; if not, it splits it using a number of chunks equal to the first argument of the script + if(multicore == FALSE) {chunk <- split.array(dimensions = hind.dim, along = 5)} else {chunk <- split.array(dimensions = hind.dim, along = 5, chunks=as.integer(args[1])} + + my.FairRpss <- my.FairCrpss <- my.EnsCorr <- my.PValue <- array(NA,c(n.leadtimes,n.lat,n.lon)) + if(boot) my.FairRpssBoot <- my.FairCrpssBoot <- array(NA, c(n.boot, n.leadtimes, n.lat, n.lon)) + if(boot) my.FairRpssConf <- my.FairCrpssConf <- array(NA, c(length(conf.level), n.leadtimes, n.lat, n.lon)) + + cat('Chunk n. ') + + # if we run the script from the terminal with an argument, it only computes the chunk we specify in the second argument and save the results for that chunk. + # If not, it loops over all chunks in a serial way and save the results (already united so chunks disappear) at the end. + if(multicore == FALSE) {my.chunks <- 1:chunk$n.chunk} else {my.chunks <- as.integer(args[2])} + + for(c in my.chunks){ # EnsCorr, FairRpss and FairCrpss calculation: + #s=1 # for the debug + cat(paste0(c,'/', n.chunk,' ')) + + #if(s == chunk$n.chunk){ add.last <- chunk$chunk.size.last } else { add.last <- 0 } # because the last chunk is longer than the others, if chunk.size.last>0 + + anom.hindcast.chunk <- anom.hindcast.chunk.sampled <- array(NA, c(n.members, n.startdates*n.yrs.hind, n.leadtimes, n.lat, chunk$n.int[[c]])) + anom.rean.chunk <- anom.hindcast.mean.chunk <- anom.rean.chunk.sampled <- array(NA, c(n.startdates*n.yrs.hind, n.leadtimes, n.lat, chunk$n.int[[c]])) + #my.interv<-(1 + sub.size*(s-1)):((sub.size*s) + add.last) # longitude interval where to load data + + for(startdate in my.startdates){ + pos.startdate<-which(startdate == my.startdates) # count of the number of stardates already loaded+1 + my.time.interv<-(1+(pos.startdate-1)*n.yrs.hind):(pos.startdate*n.yrs.hind) # time interval where to load data + + # Load reanalysis data: + load(paste0(workdir,'/Data_',var.name,'/anom_rean_startdate',startdate,'.RData')) + anom.rean <- drop(anom.rean) + anom.rean.chunk[my.time.interv,,,] <- anom.rean[,,,chunk$n.int[[c]]] + + # Load hindcast data: + if(any(my.score.name=="FairRpss") || any(my.score.name=="FairCrpss" || any(my.score.name=="RelDiagr"))){ + load(paste0(workdir,'/Data_',var.name,'/anom_hindcast_startdate',startdate,'.RData')) + anom.hindcast <- drop(anom.hindcast) + anom.hindcast.chunk[,my.time.interv,,,] <- anom.hindcast[,,,,chunk$n.int[[c]]] + rm(anom.hindcast, anom.rean) + gc() + } + + if(any(my.score.name=="EnsCorr")){ + load(paste0(workdir,'/Data_',var.name,'/anom_hindcast_mean_startdate',startdate,'.RData')) # load ensemble mean hindcast data + anom.hindcast.mean<-drop(anom.hindcast.mean) + anom.hindcast.mean.chunk[my.time.interv,,,] <- anom.hindcast.mean[,,,chunk$n.int[[c]]] + rm(anom.hindcast.mean) + gc() + } + + } + + if(any(my.score.name=="FairRpss")){ + if(!boot){ + + #my.FairRpss.sub <- veriApply("FairRpss", fcst=anom.hindcast.sub, obs=anom.rean.sub, prob=my.prob, tdim=2, ensdim=1, parallel=FALSE, ncpus=n.cpus) + my.FairRps.chunk <- veriApply("FairRps", fcst=anom.hindcast.chunk, obs=anom.rean.chunk, prob=my.prob, tdim=2, ensdim=1, parallel=FALSE, ncpus=n.cpus) + + # the prob.for the climatological ensemble can be set without using convert2prob and they are the same for all points and leadtimes (33% for each tercile): + ens <- array(33,c(n.startdates*n.yrs.hind,3)) + + # the observed probabilities vary from point to point and for each leadtime: + obs <- apply(anom.rean.chunk, c(2,3,4), function(x){convert2prob(x, prob <- my.prob)}) + + # reshape obs to be able to apply EnsRps(ens, obs) below: + obs2 <- InsertDim(obs,1,3) + obs2[2,1:(n.startdates*n.yrs.hind),,,] <- obs2[1,(n.startdates*n.yrs.hind + 1:(n.startdates*n.yrs.hind)),,,] + obs2[3,1:(n.startdates*n.yrs.hind),,,] <- obs2[1,(2*n.startdates*n.yrs.hind + 1:(n.startdates*n.yrs.hind)),,,] + obs3 <- obs2[,1:(n.startdates*n.yrs.hind),,,] + + my.Rps.clim.chunk <- apply(obs3,c(3,4,5), function(x){ EnsRps(ens,t(x)) }) + + my.FairRps.chunk.sum <- apply(my.FairRps.chunk,c(2,3,4), sum) + my.Rps.clim.chunk.sum <- apply(my.Rps.clim.chunk,c(2,3,4), sum) + + my.FairRpss.chunk <- 1-(my.FairRps.chunk.sum/my.Rps.clim.chunk.sum) + + my.FairRpss[,,chunk$n.int[[c]]] <- my.FairRpss.chunk + rm(my.FairRpss.chunk, ens, obs, obs2, obs3, my.FairRps.chunk.sum, my.Rps.clim.chunk.sum) + gc() + + } else { # bootstrapping: + + for(b in 1:n.boot){ + cat(paste0("resampling n. ",b,"/",n.boot)) + yrs.sampled <- sample(1:n.yrs, replace=TRUE) + + for(y in 1:n.yrs) anom.hindcast.chunk.sampled[,y,,,] <- anom.hindcast.chunk[,yrs.sampled[y],,,] + for(y in 1:n.yrs) anom.rean.chunk.sampled[y,,,] <- anom.rean.chunk[yrs.sampled[y],,,] + + my.FairRps.chunk <- veriApply("FairRps", fcst=anom.hindcast.chunk.sampled, obs=anom.rean.chunk.sampled, prob=my.prob, tdim=2, ensdim=1, parallel=FALSE, ncpus=n.cpus) + ens <- array(33,c(n.startdates*n.yrs.hind,3)) + obs <- apply(anom.rean.chunk.sampled, c(2,3,4), function(x){convert2prob(x, prob <- my.prob)}) + obs2 <- InsertDim(obs,1,3) + obs2[2,1:(n.startdates*n.yrs.hind),,,] <- obs2[1,(n.startdates*n.yrs.hind + 1:(n.startdates*n.yrs.hind)),,,] + obs2[3,1:(n.startdates*n.yrs.hind),,,] <- obs2[1,(2*n.startdates*n.yrs.hind + 1:(n.startdates*n.yrs.hind)),,,] + obs3 <- obs2[,1:(n.startdates*n.yrs.hind),,,] + my.Rps.clim.chunk <- apply(obs3,c(3,4,5), function(x){ EnsRps(ens,t(x)) }) + my.FairRps.chunk.sum <- apply(my.FairRps.chunk,c(2,3,4), sum) + my.Rps.clim.chunk.sum <- apply(my.Rps.clim.chunk,c(2,3,4), sum) + my.FairRpss.chunk <- 1-(my.FairRps.chunk.sum/my.Rps.clim.chunk.sum) + + my.FairRpssBoot[b,,,chunk$n.int[[c]]]<-my.FairRpss.chunk + rm(my.FairRpss.chunk, ens, obs, obs2, obs3, my.FairRps.chunk.sum, my.Rps.clim.chunk.sum) + gc() + } + + cat('\n') + + # calculate the percentiles of the skill score: + my.FairRpssConf[,,,chunk$n.int[[c]]] <- apply(my.FairRpssBoot[,,,chunk$n.int[[c]]], c(2,3,4), function(x) quantile(x, probs=conf.level, type=8)) + } + } + + if(any(my.score.name=="FairCrpss")){ + if(!boot){ + my.FairCrps.chunk <- veriApply("FairCrps", fcst=anom.hindcast.chunk, obs=anom.rean.chunk, tdim=2, ensdim=1, parallel=FALSE, ncpus=n.cpus) + anom.all.chunk <- abind(anom.hindcast.chunk,anom.rean.chunk, along=1) # merge exp and obs together to use apply: + my.Crps.clim.chunk <- apply(anom.all.chunk, c(3,4,5), function(x){ EnsCrps(t(x[1:n.members,]), x[n.members+1,])}) + + my.FairCrps.chunk.sum <- apply(my.FairCrps.chunk,c(2,3,4), sum) + my.Crps.clim.chunk.sum <- apply(my.Crps.clim.chunk,c(2,3,4), sum) + my.FairCrpss.chunk <- 1-(my.FairCrps.chunk.sum/my.Crps.clim.chunk.sum) + + my.FairCrpss[,,chunk$n.int[[c]]]<-my.FairCrpss.chunk + rm(my.FairCrpss.chunk, my.FairCrps.chunk.sum, my.Crps.clim.chunk.sum) + gc() + + } else { # bootstrapping: + + for(b in 1:n.boot){ + print(paste0("resampling n. ",b,"/",n.boot)) + yrs.sampled <- sample(1:n.yrs, replace=TRUE) + + for(y in 1:n.yrs) anom.hindcast.chunk.sampled[,y,,,] <- anom.hindcast.chunk[,yrs.sampled[y],,,] + for(y in 1:n.yrs) anom.rean.chunk.sampled[y,,,] <- anom.rean.chunk[yrs.sampled[y],,,] + + my.FairCrps.chunk <- veriApply("FairCrps", fcst=anom.hindcast.chunk.sampled, obs=anom.rean.chunk.sampled, tdim=2, ensdim=1, parallel=TRUE, ncpus=n.cpus) + anom.all.chunk <- abind(anom.hindcast.chunk.sampled, anom.rean.chunk.sampled, along=1) # merge exp and obs together to use apply: + my.Crps.clim.chunk <- apply(anom.all.chunk, c(3,4,5), function(x){ EnsCrps(t(x[1:n.members,]), x[n.members+1,])}) + my.FairCrps.chunk.sum <- apply(my.FairCrps.chunk,c(2,3,4), sum) + my.Crps.clim.chunk.sum <- apply(my.Crps.clim.chunk,c(2,3,4), sum) + my.FairCrpss.chunk <- 1-(my.FairCrps.chunk.sum/my.Crps.clim.chunk.sum) + + my.FairCrpssBoot[b,,,chunk$n.int[[c]]] <- my.FairCrpss.chunk + rm(my.FairCrpss.chunk, my.FairCrps.chunk.sum, my.Crps.clim.chunk.sum) + gc() + + } + + cat('\n') + + # calculate the percentiles of the skill score: + my.FairCrpssConf[,,,chunk$n.int[[c]]] <- apply(my.FairCrpssBoot[,,,chunk$n.int[[c]]], c(2,3,4), function(x) quantile(x, probs=conf.level, type=8)) + } + + } + + if(any(my.score.name=="EnsCorr")){ + # ensemble mean correlation for the selected startdates (first dim, 'week') and all leadtimes (second dim): + my.EnsCorr.chunk <- my.PValue.chunk <- array(NA, c(n.leadtimes, n.lat, chunk$n.int[[c]])) + + for(i in 1:n.leadtimes){ + for(j in 1:n.lat){ + for(k in 1:chunk$n.int[[c]]){ + my.EnsCorr.chunk[i,j,k] <- cor(anom.hindcast.mean.chunk[,i,j,k],anom.rean.chunk[,i,j,k], use="complete.obs") + # option 'na.method' is chosen from the following list: c("all.obs", "complete.obs", "pairwise.complete.obs", "everything", "na.or.complete") + + my.PValue.chunk[i,j,k] <- cor.test(anom.hindcast.mean.chunk[,i,j,k],anom.rean.chunk[,i,j,k], use="complete.obs")$p.value + + } + } + } + + my.EnsCorr[,,chunk$n.int[[c]]] <- my.EnsCorr.chunk + my.PValue[,,chunk$n.int[[c]]] <- my.PValue.chunk + + rm(anom.hindcast.chunk,anom.hindcast.mean.chunk,my.EnsCorr.chunk,my.PValue.chunk) + } + + rm(anom.rean.chunk) + gc() + #mem() + + if(multicore == TRUE) { + if(any(my.score.name=="FairRpss")) save(my.FairRpss, file=paste0(workdir,'/Data_',var.name,'/FairRpss_',startdate.name,'_chunk_',c,'.RData')) + if(any(my.score.name=="FairCrpss")) save(my.FairCrpss, file=paste0(workdir,'/Data_',var.name,'/FairCrpss_',startdate.name,'_chunk_',c'.RData')) + if(any(my.score.name=="EnsCorr")) save(my.EnsCorr, file=paste0(workdir,'/Data_',var.name,'/EnsCorr_',startdate.name,'_chunk_',c'.RData')) + } + + + } # next c (chunk) + + # the Reliability diagram cannot be computed splitting the data in chunk, so it is computed only if the script is run from the R interface or with no arguments: + if(multicore == FALSE) { + if(any(my.score.name=="RelDiagr")){ # in this case the computation cannot be split in groups of grid points, but can be split for leadtime instead: + my.RelDiagr<-list() + + for(lead in 1:n.leadtimes){ + cat(paste0('leadtime n.: ',lead,'/',n.leadtimes)) + anom.rean.chunk <- array(NA,c(n.startdates*n.yrs.hind,n.lat,n.lon)) + anom.hindcast.chunk <- array(NA,c(n.members,n.startdates*n.yrs.hind,n.lat,n.lon)) + cat(' startdate: ') + i=0 + + for(startdate in my.startdates){ + i=i+1 + cat(paste0(i,'/',length(my.startdates),' ')) + + pos.startdate<-which(startdate==my.startdates) # count of the number of stardates already loaded+1 + my.time.interv<-(1+(pos.startdate-1)*n.yrs.hind):(pos.startdate*n.yrs.hind) # time interval where to load data + + # Load reanalysis data of next startdate: + load(paste0(workdir,'/Data_',var.name,'/anom_rean_startdate',startdate,'.RData')) + anom.rean <- drop(anom.rean) + anom.rean.chunk[my.time.interv,,] <- anom.rean[,lead,,] + + # Lead hindcast data of next startdate: + load(paste0(workdir,'/Data_',var.name,'/anom_hindcast_startdate',startdate,'.RData')) # load hindcast data + anom.hindcast <- drop(anom.hindcast) + anom.hindcast.chunk[,my.time.interv,,] <- anom.hindcast[,,lead,,] + + rm(anom.rean,anom.hindcast) + } + + + anom.hindcast.chunk.perm<-aperm(anom.hindcast.chunk,c(2,1,3,4)) # swap the first two dimensions to put the years as first dimension (needed for convert2prob) + gc() + + cat('Computing the Reliability Diagram. Please wait... ') + ens.chunk<-apply(anom.hindcast.chunk.perm, c(3,4), convert2prob, prob<-my.prob) # its dimens. are: [n.startdates*n.yrs.hind*n.forecast.categories, n.lat, n.lon] + obs.chunk<-apply(anom.rean.chunk,c(2,3), convert2prob, prob<-my.prob) # the first n.members*n.yrs.hind elements belong to the first tercile, and so on. + ens.chunk.prob<-apply(ens.chunk, c(2,3), function(x) count2prob(matrix(x, n.startdates*n.yrs.hind, n.categ), type=4)) # type=4 is the only method with sum(prob)=1 !! + obs.chunk.prob<-apply(obs.chunk, c(2,3), function(x) count2prob(matrix(x, n.startdates*n.yrs.hind, n.categ), type=4)) #no parallelization here: it only takes 10s + + if(is.null(int.lat)) int.lat <- 1:n.lat # if there is no region selected, select all the world + if(is.null(int.lon)) int.lon <- 1:n.lon + + int1 <- 1:(n.startdates*n.yrs.hind) # time interval of the data of the first tercile + my.RelDiagr[[lead]]<-ReliabilityDiagram(ens.chunk.prob[int1,int.lat,int.lon], obs.chunk.prob[int1,int.lat,int.lon], nboot=0, plot=FALSE, plot.refin=F) #tercile 1 + + int2 <- (1+(n.startdates*n.yrs.hind)):(2*n.startdates*n.yrs.hind) # time interval of the data of the second tercile + my.RelDiagr[[n.leadtimes+lead]]<-ReliabilityDiagram(ens.chunk.prob[int2,int.lat,int.lon], obs.chunk.prob[int2,int.lat,int.lon], nboot=0, plot=FALSE, plot.refin=F) + + int3 <- (1+(2*n.startdates*n.yrs.hind)):(3*n.startdates*n.yrs.hind) # time interval of the data of the third tercile + my.RelDiagr[[2*n.leadtimes+lead]]<-ReliabilityDiagram(ens.chunk.prob[int3,int.lat,int.lon], obs.chunk.prob[int3,int.lat,int.lon], nboot=0, plot=FALSE, plot.refin=F) + + cat('done\n') + #print(my.RelDiagr) # for debugging + + rm(anom.rean.chunk,anom.hindcast.chunk,ens.chunk,obs.chunk,ens.chunk.prob,obs.chunk.prob) + gc() + + } # next lead + + } # close if on RelDiagr + + # we can save the results for all chunks only if multicore == FALSE (if it is TRUE, we have the results for 1 chunk only, that have already been saved + if(!boot){ + if(any(my.score.name=="FairRpss")) save(my.FairRpss, file=paste0(workdir,'/Data_',var.name,'/FairRpss_',startdate.name,'.RData')) + if(any(my.score.name=="FairCrpss")) save(my.FairCrpss, file=paste0(workdir,'/Data_',var.name,'/FairCrpss_',startdate.name,'.RData')) + if(any(my.score.name=="EnsCorr")) save(my.EnsCorr, file=paste0(workdir,'/Data_',var.name,'/EnsCorr_',startdate.name,'.RData')) + if(any(my.score.name=="RelDiagr")) save(my.RelDiagr, my.PValue, file=paste0(workdir,'/Data_',var.name,'/RelDiagr_',startdate.name,'.RData')) + } else { # bootstrapping output: + if(any(my.score.name=="FairRpss")) save(my.FairRpssBoot, my.FairRpssConf, file=paste0(workdir,'/Data_',var.name,'/FairRpssBoot_',startdate.name,'.RData')) + if(any(my.score.name=="FairCrpss")) save(my.FairCrpssBoot, my.FairCrpssConf, file=paste0(workdir,'/Data_',var.name,'/FairCrpssBoot_',startdate.name,'.RData')) + } + + } # close if on multicore + +} # next m (month) + + +# all pre-formatting (conversion to anomalies) and post-formatting (visualization) tasks are done below: +if(multicore == FALSE){ + +########################################################################################### +# Visualize and save the score maps for one score and period # +########################################################################################### + +# run it only after computing and saving all the skill score data: + +my.months=1 #9:12 # choose the month(s) to plot and save + +for(month in my.months){ + #month=2 # for the debug + startdate.name.map<-my.month[month] # i.e:"January" + + for(my.score.name.map in my.score.name){ + #my.score.name.map='EnsCorr' # for the debug + + if(my.score.name.map=="EnsCorr") { load(file=paste0(workdir,'/Data_',var.name,'/EnsCorr_',startdate.name.map,'.RData')); my.score<-my.EnsCorr } + if(my.score.name.map=='FairRpss') { load(file=paste0(workdir,'/Data_',var.name,'/FairRpss_',startdate.name.map,'.RData')); my.score<-my.FairRpss } + if(my.score.name.map=='FairCrpss') { load(file=paste0(workdir,'/Data_',var.name,'/FairCrpss_',startdate.name.map,'.RData')); my.score<-my.FairCrpss } + if(my.score.name.map=="RelDiagr") { load(file=paste0(workdir,'/Data_',var.name,'/RelDiagr_',startdate.name.map,'.RData')); my.score<-my.RelDiagr } + + # old green-red palette: + # brk.rpss<-c(-1,seq(0,1,by=0.05)) + # col.rpss<-c("darkred",colorRampPalette(c("red","white","darkgreen"))(length(brk.rpss)-2)) + # brk.rps<-c(seq(0,1,by=0.1),10) + # col.rps<-c(colorRampPalette(c("darkgreen","white","red"))(length(brk.rps)-2),"darkred") + # if(my.score.name.map==my.Rps | my.score==my.Rps.clim) {my.brk<-brk.rps} else {my.brk<-brk.rpss} + # if(my.score.name.map==my.Rps | my.score==my.Rps.clim) {my.col<-col.rps} else {my.col<-col.rpss} + + brk.rpss<-c(-10,seq(-1,1,by=0.1)) + col.rpss<-colorRampPalette(col)(length(brk.rpss)-1) + + my.brk<-brk.rpss # at present all breaks and colors are the same, so there is no need to differenciate between indexes + my.col<-col.rpss + + for(lead in 1:n.leadtimes){ + + #my.title<-paste0(my.score.name.map,' of ',cfs.name,' + #10m Wind Speed for ',startdate.name.map,'. Forecast time ',leadtime.week[lead],'.') + + if(my.score.name.map!="RelDiagr"){ + + #postscript(file=paste0(workdir,'/Maps_',var.name,'/',my.score.name.map,'_',startdate.name.map,'_Forecast_time_',leadtime.week[lead],'_',var.name,'.ps'), + # paper="special",width=12,height=7,horizontal=F) + + jpeg(file=paste0(workdir,'/Maps_',var.name,'/',my.score.name.map,'_',startdate.name.map,'_Forecast_time_',leadtime.week[lead],'_',var.name,'.jpg'),width=1000,height=600) + + layout(matrix(c(1,2), 1, 2, byrow = TRUE),widths=c(11,1.2)) + par(oma=c(1,1,4,1)) + + # lat values must be in decreasing order from +90 to -90 degrees and long from 0 to 360 degrees: + PlotEquiMap(my.score[lead,,][n.lat:1,c((n.lonr+1):n.lon,1:n.lonr)], lo,la,sizetit=0.8, brks=my.brk, cols=my.col,axelab=F, filled.continents=FALSE, drawleg=F) + my.title<-paste0(my.score.name.map,' of ',cfs.name,'\n ', var.name.map,' for ',startdate.name.map,'. Forecast time ',leadtime.week[lead],'.') + title(my.title,line=0.5,outer=T) + + ColorBar(my.brk, cols=my.col, vert=T, cex=1.4) + + if(my.score.name.map=="EnsCorra"){ #add a small grey point if the corr.is significant: + + # arrange lat/ lon in my.PValue (a second variable in the EnsCorr file) to start from +90 degr. (lat) and from 0 degr (lon): + my.PValue.rev<-my.PValue + my.PValue.rev[lead,,]<-my.PValue[i,n.lat:1,c((n.lonr+1):n.lon,1:n.lonr)] + for (x in 1:n.lon) { + for (y in 1:n.lat) { + if (my.PValue.rev[lead,y, x] == TRUE) { + text(x = lo[x], y = la[y], ".", cex = .2) + } + } + } + } + dev.off() + + } # close if on !RelDiagr + + if(my.score.name.map=="RelDiagr") { + + jpeg(file=paste0(workdir,'/Maps_',var.name,'/RelDiagr_',startdate.name.map,'_Forecast_time_',leadtime.week[lead],'_',var.name,'.jpg'),width=600,height=600) + + my.title<-paste0('Reliability Diagram of ',cfs.name,'\n ',var.name.map,' for ',startdate.name.map,'. Forecast time ',leadtime.week[lead],'.') + + plot(c(0,1),c(0,1),type="l",xlab="Forecast Frequency",ylab="Observed Frequency", col='gray30', main=my.title) + lines(my.score[[lead]]$p.avgs[!is.na(my.score[[lead]]$p.avgs)],my.score[[lead]]$cond.prob[!is.na(my.score[[lead]]$cond.prob)],type="o", pch=16, col="blue") + #lines(my.score[[n.leadtimes+lead]]$p.avgs[!is.na(my.score[[n.leadtimes+lead]]$p.avgs)],my.score[[n.leadtimes+lead]]$cond.prob[!is.na(my.score[[n.leadtimes+lead]]$cond.prob)],type="o",pch=16,col="darkgreen") + lines(my.score[[2*n.leadtimes+lead]]$p.avgs[!is.na(my.score[[2*n.leadtimes+lead]]$p.avgs)], my.score[[2*n.leadtimes+lead]]$cond.prob[!is.na(my.score[[2*n.leadtimes+lead]]$cond.prob)],type="o", pch=16, col="red") + legend("bottomright", legend=c('Lower Tercile','Upper Tercile'), lty=c(1,1), lwd=c(2.5,2.5), col=c('blue','red')) + + dev.off() + } + + } # next lead + + } # next score + +} # next month + + +############################################################################################### +# Composite map of the four skill scores # +############################################################################################### + +# run it only when you have all the 4 skill scores for each month: + +my.months=9:12 # choose the month(s) to plot and save + +for(month in my.months){ + + startdate.name.map<-my.month[month] # i.e:"January" + + #postscript(file=paste0(workdir,'/Maps_',var.name,'/SkillScores_',startdate.name.map,'_',var.name,'.ps'), paper="special",width=12,height=7,horizontal=F) + + jpeg(file=paste0(workdir,'/Maps_',var.name,'/SkillScores_',startdate.name.map,'_',var.name,'.jpg'), width=4000, height=2400, quality=100) + + layout(matrix(1:16, 4, 4, byrow=FALSE), widths=c(1.8,1.8,1.8,1.2), heights=c(1,1,1,1)) + #layout.show(16) + + for(score in 1:4){ + for(lead in 1:n.leadtimes){ + + img<-readJPEG(paste0(workdir,'/Maps_',var.name,'/',my.score.name[score],'_',startdate.name.map,'_Forecast_time_',leadtime.week[lead],'_',var.name,'.jpg')) + par(mar = rep(0, 4), xaxs='i', yaxs='i' ) + plot.new() + rasterImage(img, 0, 0, 1, 1, interpolate=FALSE) + + } # next lead + + } # next score + + dev.off() + +} # next month + + + +# Dani Map script for the small figure: +#/home/Earth/vtorralb/R/scripts/Veronica_2014/TimeSeries/PlotAno_mod_obs.R + + +############################################################################################### +# Preformatting: calculate weekly climatologies and anomalies for each hindcast startdate # +############################################################################################### + +# Run it only once at the beginning: + +file_path='/home/Earth/ngonzal2/R/ConfFiles/IC3.conf' # Load() file path +#file_path <- '/home/Earth/ncortesi/Downloads/RESILIENCE/IC3.conf' + +my.startdates=1:52 # choose a sequence of startdates to save their anomalies + +for(startdate in my.startdates){ # the startdate week to load inside the sdates weekly sequence: + print(paste0('Computing anomalies for startdate ', which(startdate==my.startdates),'/',length(my.startdates))) + + data.hindcast <- Load(var = var.name, exp = 'EnsEcmwfWeekHind', + obs = NULL, sdates = paste0(yr1.hind:yr2.hind,substr(sdates.seq[startdate],5,6),substr(sdates.seq[startdate],7,8)), + nleadtime = n.leadtimes, leadtimemin = 1, leadtimemax = n.leadtimes, output = 'lonlat', configfile = file_path, method='distance-weighted' ) + + # dim(data.hindcast$mod) # [1,4,20,4,320,640] + + # Mean of the 4 members of the Hindcasts and of the 20 years (for each point and leadtime); + clim.hindcast<-apply(data.hindcast$mod,c(1,4,5,6),mean)[1,,,] # average for each leadtime and pixel + clim.hindcast<-InsertDim(InsertDim(InsertDim(clim.hindcast,1,n.yrs.hind),1,n.members),1,1) # repeat the same values and times to calc. anomalies + + anom.hindcast <- data.hindcast$mod - clim.hindcast + + rm(data.hindcast,clim.hindcast) + gc() + + # average the anomalies over the 4 hindcast members [1,20,4,320,640] + anom.hindcast.mean<-apply(anom.hindcast,c(1,3,4,5,6),mean) + + my.grid<-paste0('r',n.lon,'x',n.lat) + data.rean <- Load(var = var.name, exp = 'ERAintEnsWeek', + obs = NULL, sdates=paste0(yr1.hind:yr2.hind,substr(sdates.seq[startdate],5,6),substr(sdates.seq[startdate],7,8)), + nleadtime = n.leadtimes, leadtimemin = 1, leadtimemax = n.leadtimes, output = 'lonlat', configfile = file_path, grid=my.grid, method='distance-weighted') + + # dim(data.rean$mod) # [1,1,20,4,320,640] + + # Mean of the 20 years (for each point and leadtime) + clim.rean<-apply(data.rean$mod,c(1,2,4,5,6),mean)[1,1,,,] # average for each leadtime and pixel + clim.rean<-InsertDim(InsertDim(InsertDim(clim.rean,1,n.yrs.hind),1,1),1,1) # repeat the same values times to be able to calculate the anomalies #[1,1,20,4,320,640] + + anom.rean <- data.rean$mod - clim.rean + + anom.rean<-InsertDim(anom.rean[1,1,,,,],1,1) # [1,20,4,320,640] + + #save(anom.hindcast.mean,anom.rean,file=paste0(workdir,'/Data/anom_for_ACC_startdate',startdate,'.RData')) + + save(anom.hindcast,file=paste0(workdir,'/Data/anom_hindcast_startdate',startdate,'.RData')) + save(anom.hindcast.mean,file=paste0(workdir,'/Data/anom_hindcast_mean_startdate',startdate,'.RData')) + save(anom.rean,file=paste0(workdir,'/Data/anom_rean_startdate',startdate,'.RData')) + save(clim.rean,file=paste0(workdir,'/Data/clim_rean_startdate',startdate,'.RData')) + + rm(data.rean,anom.hindcast,anom.hindcast.mean,clim.rean,anom.rean) + gc() + +} + +############################################################################################### +# Preformatting: calculate weekly climatologies and anomalies for each forecast startdate # +############################################################################################### +# +# finish!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! +# +# the only difference is that anomalies are computed using climatologies from the hincast data, +# and not from the forecast data. + +# Run it only once at the beginning: + +file_path='/home/Earth/ngonzal2/R/ConfFiles/IC3.conf' # Load() file path +#file_path <- '/home/Earth/ncortesi/Downloads/RESILIENCE/IC3.conf' + +my.startdates=1:52 # choose a sequence of forecast startdates to save their anomalies + +for(startdate in my.startdates){ # the startdate week to load inside the sdates weekly sequence: + print(paste0('Computing anomalies for startdate ', which(startdate==my.startdates),'/',length(my.startdates))) + + data.hindcast <- Load(var=var.name, exp = 'EnsEcmwfWeekHind', + obs = NULL, sdates=paste0(yr1.hind:yr2.hind,substr(sdates.seq[startdate],5,6),substr(sdates.seq[startdate],7,8)), + nleadtime = n.leadtimes, leadtimemin = 1, leadtimemax = n.leadtimes, output = 'lonlat', configfile = file_path, method='distance-weighted' ) + + # dim(data.hindcast$mod) # [1,4,20,4,320,640] + + # Mean of the 4 members of the Hindcasts and of the 20 years (for each point and leadtime); + clim.hindcast<-apply(data.hindcast$mod,c(1,4,5,6),mean)[1,,,] # average for each leadtime and pixel + clim.hindcast<-InsertDim(InsertDim(InsertDim(clim.hindcast,1,n.yrs.hind),1,n.members),1,1) # repeat the same values and times to calc. anomalies + + anom.hindcast <- data.hindcast$mod - clim.hindcast + + rm(data.hindcast,clim.hindcast) + gc() + + # average the anomalies over the 4 hindcast members [1,20,4,320,640] + anom.hindcast.mean<-apply(anom.hindcast,c(1,3,4,5,6),mean) + + my.grid<-paste0('r',n.lon,'x',n.lat) + data.rean <- Load(var=var.name, exp = 'ERAintEnsWeek', + obs = NULL, sdates=paste0(yr1.hind:yr2.hind,substr(sdates.seq[startdate],5,6),substr(sdates.seq[startdate],7,8)), + nleadtime = n.leadtimes, leadtimemin = 1, leadtimemax = n.leadtimes, output = 'lonlat', configfile = file_path, grid=my.grid, method='distance-weighted') + # dim(data.rean$mod) # [1,1,20,4,320,640] + + # Mean of the 20 years (for each point and leadtime) + clim.rean<-apply(data.rean$mod,c(1,2,4,5,6),mean)[1,1,,,] # average for each leadtime and pixel + clim.rean<-InsertDim(InsertDim(InsertDim(clim.rean,1,n.yrs.hind),1,1),1,1) # repeat the same values times to be able to calculate the anomalies #[1,1,20,4,320,640] + + anom.rean <- data.rean$mod - clim.rean + + anom.rean<-InsertDim(anom.rean[1,1,,,,],1,1) # [1,20,4,320,640] + + #save(anom.hindcast.mean,anom.rean,file=paste0(workdir,'/Data/anom_for_ACC_startdate',startdate,'.RData')) + + save(anom.hindcast,file=paste0(workdir,'/Data/anom_hindcast_startdate',startdate,'.RData')) + save(anom.hindcast.mean,file=paste0(workdir,'/Data/anom_hindcast_mean_startdate',startdate,'.RData')) + save(anom.rean,file=paste0(workdir,'/Data/anom_rean_startdate',startdate,'.RData')) + save(clim.rean,file=paste0(workdir,'/Data/clim_rean_startdate',startdate,'.RData')) + + rm(data.rean,anom.hindcast,anom.hindcast.mean,clim.rean,anom.rean) + gc() + +} + + + + + + + +################################################################################## +# Toy Model # +################################################################################## + +library(s2dverification) +library(SpecsVerification) +library(easyVerification) + +my.prob=c(1/3,2/3) # quantiles used for FairRPSS and the RelDiagr (usually they are the terciles) + +N=800 # number of years +M=140 # number of members + +for(M in c(2:10,20,51,100,200)){ + +for(N in c(5,10,15,20,25,30,40,50,100,200,500,1000,2000,5000,10000,20000,50000,100000)){ + +anom.rean<-rnorm(N) +anom.hind<-array(rnorm(N*M),c(N,M)) +#quantile(anom.hind,prob=my.prob,type=8) + +obs<-convert2prob(anom.rean,prob<-my.prob) +#mean(obs[,1]) # check if it is equal to 0.333 for K=3 + +ens<-convert2prob(anom.hind,prob<-my.prob) + +# climatological forecast: +anom.hind.clim<-array(sample(anom.hind,N*M),c(N,M)) # extract a random sample with no replacement of the hindcast +ens.clim<-convert2prob(anom.hind.clim,prob<-my.prob) +#mean(ens.clim[,1]) # check if it is equal to M/3 (1.333 for M=4) + +# alternative definition of climatological forecast, based on observations instead (as applied by veriApply via convert2prob when option fcst.ref is missing): +#anom.rean.clim<-ClimEns(anom.rean) # create a climatological ensemble from a vector of observations (anom.rean) with the leave-one-out cross validation +anom.rean.clim<-t(array(anom.rean,c(N,N))) # function used by convert2prob when ref is not specified; it is ~ equal to the above function ClimEns, it only has a column more +ens.clim.rean<-convert2prob(anom.rean.clim,prob<-c(1/3,2/3)) # create a new prob.table based on the climatological ensemble of observations + +p<-count2prob(ens) # we should add the option type=4, in not the sum for each year is not 100%!!! +y<-count2prob(obs) # we should add the option type=4, in not the sum for each year is not 100%!!! +ReliabilityDiagram(p[,1], y[,1], bins=10, nboot=0, plot=TRUE, cons.prob=c(0.05, 0.85),plot.refin=F) + +### computation of verification scores: + +Rps<-round(mean(EnsRps(ens,obs)),4) +FairRps<-round(mean(FairRps(ens,obs)),4) +#Rps.easy<-round(mean(veriApply("EnsRps", fcst=anom.hind, obs=anom.rean, prob=my.prob, tdim=1, ensdim=2)),4) # check once to see if it is the same value of Rps +#FairRps.easy<-round(mean(veriApply("FairRps", fcst=anom.hind, obs=anom.rean, prob=my.prob, tdim=1, ensdim=2)),4) # check once to see if it is the same value of FairRps + +Rps.clim<-round(mean(EnsRps(ens.clim,obs)),4) +FairRps.clim<-round(mean(FairRps(ens.clim,obs)),4) +FairRps.clim.rean<-round(mean(FairRps(ens.clim.rean,obs)),4) + +#FairRps.clim.stable<-0.44444444 +#Rps.clim.easy<-round(mean(veriApply("EnsRps", fcst=anom.hind.clim, obs=anom.rean, prob=my.prob, tdim=1, ensdim=2)),4) # check once to see if it is the same value of Rps.clim +#FairRps.clim.easy<-round(mean(veriApply("FairRps", fcst=anom.hind.clim, obs=anom.rean, prob=my.prob, tdim=1, ensdim=2)),4) # check once to see if it is = to FairRps.clim + +Rpss<-round(1-(Rps/Rps.clim),4) +#Rpss.specs<-round(EnsRpss(ens=ens, ens.ref=ens.clim, obs=obs)$rpss,4) # check it once to see if it is the same as Rpss (yes) +Rpss.easy<-veriApply("EnsRpss", fcst=anom.hind, fcst.ref=anom.hind.clim, obs=anom.rean, prob=my.prob, tdim=1, ensdim=2)$rpss # we provide the clim.forecast; = to Rpss.specs +system.time(Rpss.easy.noclim<-round(veriApply("EnsRpss", fcst=anom.hind, obs=anom.rean, prob=my.prob, tdim=1, ensdim=2, parallel=FALSE, ncpus=5)$rpss,4)) # using their climatological forecast +#cat(Rps.clim.noclim<-Rps/(1-Rpss.easy.noclim)) # sale igual a ~0.45 per ogni hindcast; é diverso sia da Rps.clim che da Rps.clim per N-> +oo (0.555) + +Rpss.easy<-veriApply("EnsRpss", fcst=anom.hind, fcst.ref=anom.rean.clim, obs=anom.rean, prob=my.prob, tdim=1, ensdim=2)$rpss # we provide the clim.forecast; = to Rpss.specs +Rpss.easy.noclim<-veriApply("EnsRpss", fcst=anom.hind, obs=anom.rean, prob=my.prob, tdim=1, ensdim=2)$rpss # we provide the clim.forecast; = to Rpss.specs + +FairRpss<-round(1-(FairRps/FairRps.clim),4) +#FairRpss.specs<-round(FairRpss(ens=ens,ens.ref=ens.clim,obs=obs)$rpss,4) # check it once to see if it is the same as FairRpss (yes) +#FairRpss.easy<-veriApply("FairRpss", fcst=anom.hind, fcst.ref=anom.hind.clim, obs=anom.rean, prob=c(1/3,2/3), tdim=1, ensdim=2)$rps +#FairRpss.easy.noclim<-round(veriApply("FairRpss", fcst=anom.hind, obs=anom.rean, prob=c(1/3,2/3), tdim=1, ensdim=2)$rpss,4) # using their climatological forecast +#cat(FairRps.clim.noclim<-Rps/(1-FairRpss.easy.noclim)) # sale igual a ~0.55; é diverso sia da FairRps.clim che da FairRps.clim per N-> +oo (0.444) + +cat(paste0('n.memb: ' ,M,' n.yrs: ',N,' : ',FairRps,' : ' ,FairRps.clim,' FairRpss: ',FairRpss,'\n')) + +#cat(paste0('n.memb: ' ,M,' n.yrs: ',N,' : ',FairRps,' : ' ,FairRps.clim,' FairRpss: ',FairRpss,'\n')) + +#cat(paste0('n.memb: ' ,M,' n.yrs: ',N,' : ',Rps,' : ' ,Rps.clim,' Rpss: ',Rpss,'\n','n.memb: ' ,M,' n.yrs: ',N,' : ',FairRps,' : ' ,FairRps.clim,' FairRpss: ',FairRpss,'\n')) + +#cat(paste0('n.memb: ' ,M,' n.yrs: ',N,' : ',Rps,' : ' ,Rps.clim,' Rpss: ',Rpss,' Rpss.easy: ',Rpss.easy.noclim,'\n','n.memb: ' ,M,' n.yrs: ',N,' #: ',FairRps,' : ' ,FairRps.clim,' FairRpss: ',FairRpss,' FairRpss.easy: ',FairRpss.easy.noclim)) + +} + +# Crpss: + +#Crps<-round(mean(EnsCrps(ens,obs)),4) +#FairCrps<-round(mean(FairCrps(ens,obs)),4) + +#cat(Crps.clim<-round(mean(EnsCrps(ens.clim,obs)),4)) +#FairCrps.clim<-round(mean(FairCrps(ens.clim,obs)),4) + +#Crpss<-round(1-(Crps/Crps.clim),4) +#Crpss.specs<-round(EnsCrpss(ens=ens, ens.ref=ens.clim, obs=obs)$rpss,4) + +#FairCrpss<-round(1-(FairCrps/FairCrps.clim),4) +#FairCrpss.specs<-round(FairCrpss(ens=ens,ens.ref=ens.clim,obs=obs)$rpss,4) + +} + + +################################################################################## +# Other analysis # +################################################################################## + +my.startdates=1 #c(1:9) # select the startdates (weeks) you want to merge together + +#tm<-toyarray(dims=c(32,64),N=20,nens=4) # in the number of startdates in case of forecasts or the number of years in case of hindcasts +#str(tm) # tm$fcst: [32,64,20,4] tm$obs: [32,64,20] +# +#f.corr <- veriApply("EnsCorr", fcst=tm$fcst, obs=tm$obs) +#f.me <- veriApply('EnsMe', tm$fcst, tm$obs) +#f.crps <- veriApply("FairCrps", fcst=tm$fcst, obs=tm$obs) +#str(f.crps) # [32,64,20] + +#cst <- array(rnorm(prod(4:8)), 4:8) +#obs <- array(rnorm(prod(4:7)), 4:7) +#f.me <- veriApply('EnsMe', fcst=fcst, obs=obs) +#dim(f.me) +#fcst2 <- array(aperm(fcst, c(5,1,4,2,3)), c(8, 4, 7, 5*6)) # very interesting use of aperm not only to swap dimensions, but also to merge two dimensions in the same one! +#obs2 <- array(aperm(obs, c(1,4,2,3)), c(4,7,5*6)) +#f.me2 <- veriApply('EnsMe', fcst=fcst2, obs=obs2, tdim=3, ensdim=1) +#dim(f.me2) + +# when you have for each leadtime all the 52 weeks of year 2014 stored in a map, +# you can plot the time serie for a particular point and leadtime: +pos.lat<-which(round(la,3)==round(my.lat,3)) +pos.lon<-which(round(lo,3)==round(my.lon,3)) + +x11() +plot(1:week,EnMeanCorr[,leadtime],type="b", + main=paste0("Weekly time serie of point with lat=",round(my.lat,2),", lon=",round(my.lon,2)), + xlab="week",ylab="Ensemble Mean Corr.",ylim=c(-1,1)) + + + +### check if 4 time steps are better than one: + +yrs.hind<-yr2.hind-yr1.hind+1 +my.point.obs<-array(NA,c(yrs.hind,28)) + +# load obs.data for the four time steps: +for(year in yr1.hind:yr2.hind){ + data_erai_6h <- Load(var=var.name, exp = 'ERAint6h', + obs = NULL, sdates=paste0(year,'01'), leadtimemin = 61, + leadtimemax = 88, output = 'lonlat', configfile = file_path, grid=my.grid,method='distance-weighted') + + my.point.obs[year-yr1.hind+1,]<-data_erai_6h$mod[1,1,1,,65,633] +} + +utm0<-c(1,5,9,13,17,21,25) +utm6<-utm0+1 +utm12<-utm0+2 +utm18<-utm0+3 +my.points.obs.weekly.mean.utm0<-rowMeans(my.point.obs[,utm0]) +my.points.obs.weekly.mean.utm6<-rowMeans(my.point.obs[,utm6]) +my.points.obs.weekly.mean.utm12<-rowMeans(my.point.obs[,utm12]) +my.points.obs.weekly.mean.utm18<-rowMeans(my.point.obs[,utm18]) +my.points.obs.weekly.mean.all<-rowMeans(my.point.obs) + +cor(my.points.exp.weekly.mean.all,my.points.obs.weekly.mean.all,use="complete.obs") + +my.points.obs.weekly.mean.utm<-c(my.points.obs.weekly.mean.utm0,my.points.obs.weekly.mean.utm6,my.points.obs.weekly.mean.utm12,my.points.obs.weekly.mean.utm18) +my.points.exp.weekly.mean.utm<-c(my.points.exp.weekly.mean.utm0,my.points.exp.weekly.mean.utm6,my.points.exp.weekly.mean.utm12,my.points.exp.weekly.mean.utm18) +cor(my.points.exp.weekly.mean.utm,my.points.obs.weekly.mean.utm,use="complete.obs") + + +##### Calculate Reanalysis climatology loading only 1 file each 4 (it's quick but it needs to have all files beforehand): + +ss<-c(seq(1,52,by=4),50,51,52) # take only 1 startdate each 4, more the last 3 startdates that must be computed individually because they are not a complete set of 4 + +for(startdate in ss){ # the startdate day of 2014 to load in the sdates weekly sequence along with the same startdates of the previous 20 years + data.ERAI <- Load(var=var.name, exp = 'ERAintEnsWeek', + obs = NULL, sdates=paste0(yr1.hind:yr2.hind,substr(sdates.seq[startdate],5,6),substr(sdates.seq[startdate],7,8)), + nleadtime = 4, leadtimemin = 1, leadtimemax = 4, output = 'lonlat', configfile = file_path, grid=my.grid, method='distance-weighted') + + clim.ERAI[startdate,,,]<-apply(data.ERAI$mod,c(1,2,4,5,6),mean)[1,1,,,] # average for each leadtime and pixel + + # the intermediate startdate between startdate week i and startdate week i+4 are repeated: + if(startdate <= 49){ + clim.ERAI[startdate+1,1,,]<- clim.ERAI[startdate,2,,] # startdate leadtime (week) + clim.ERAI[startdate+1,2,,]<- clim.ERAI[startdate,3,,] # (week) 1 2 3 4 + clim.ERAI[startdate+2,1,,]<- clim.ERAI[startdate,3,,] # 1 a b c d <- only startdate 1 and 5 are necessary to calculate all startdates between 1 and 5 + clim.ERAI[startdate+1,3,,]<- clim.ERAI[startdate,4,,] # 2 b c d e + clim.ERAI[startdate+2,2,,]<- clim.ERAI[startdate,4,,] # 3 c d e f + clim.ERAI[startdate+3,1,,]<- clim.ERAI[startdate,4,,] # 4 d e f g + } # 5 e f g h + + if(startdate > 1 && startdate <=49){ # fill the previous startdates: + clim.ERAI[startdate-1,4,,]<- clim.ERAI[startdate,3,,] + clim.ERAI[startdate-1,3,,]<- clim.ERAI[startdate,2,,] + clim.ERAI[startdate-2,4,,]<- clim.ERAI[startdate,2,,] + clim.ERAI[startdate-1,2,,]<- clim.ERAI[startdate,1,,] + clim.ERAI[startdate-2,3,,]<- clim.ERAI[startdate,1,,] + clim.ERAI[startdate-3,4,,]<- clim.ERAI[startdate,1,,] + } + +} + + + + + # You can make cor much faster by stripping away all the error checking code and calling the internal c function directly: + # r <- apply(m1, 1, function(x) { apply(m2, 1, function(y) { cor(x,y) })}) + # r2 <- apply(m1, 1, function(x) { apply(m2, 1, function(y) {.Internal(cor(x, y, 4L, FALSE)) })}) + C_cor<-get("C_cor", asNamespace("stats")) + test<-.Call(C_cor, x=rnorm(100,1), y=rnorm(100,1), na.method=2, FALSE) + my.EnsCorr.chunk[i,j,k]<-.Call(C_cor, x=anom.hindcast.mean.chunk[,i,j,k], y=nom.rean.chunk[,i,j,k], na.method=2, FALSE) + + + +####################################################################################### +# Raster Animations # +####################################################################################### + +library(png) +library(animation) + + +clustPNG <- function(pngobj, nclust = 3){ + + j <- pngobj + # If there is an alpha component remove it... + # might be better to just adjust the mat matrix + if(dim(j)[3] > 3){ + j <- j[,,1:3] + } + k <- apply(j, c(1,2), c) + mat <- matrix(unlist(k), ncol = 3, byrow = TRUE) + grd <- expand.grid(1:dim(j)[1], 1:dim(j)[2]) + clust <- kmeans(mat, nclust, iter.max = 20) + new <- clust$centers[clust$cluster,] + + for(i in 1:3){ + tmp <- matrix(new[,i], nrow = dim(j)[1]) + j[,,i] <- tmp + } + + plot.new() + rasterImage(j, 0, 0, 1, 1, interpolate = FALSE) +} + +makeAni <- function(file, n = 10){ + j <- readPNG(file) + for(i in 1:n){ + clustPNG(j, i) + mtext(paste("Number of clusters:", i)) + } + + j <- readPNG(file) + plot.new() + rasterImage(j, 0, 0, 1, 1, interpolate = FALSE) + mtext("True Picture") +} + +file <- "~/Dropbox/Photos/ClassyDogCropPng.png" +n <- 20 +saveGIF(makeAni(file, n), movie.name = "tmp.gif", interval = c(rep(1,n), 5)) + + + + +####################################################################################### +# ProgressBar # +####################################################################################### + +for(i in 1:10) { + Sys.sleep(0.2) + # Dirk says using cat() like this is naughty ;-) + #cat(i,"\r") + # So you can use message() like this, thanks to Sharpie's + # comment to use appendLF=FALSE. + message(i,"\r",appendLF=FALSE) + flush.console() +} + + +for(i in 1:10) { + Sys.sleep(0.2) + # Dirk says using cat() like this is naughty ;-) + cat(i,"\r") + + +} + + + +####################################################################################### +# Load large datasets with ff # +####################################################################################### + +library(ff) +my.scratch<-"/scratch/Earth/ncortesi/myBigData" +anom.hind<-as.ff(anom.hind, vmode="double", file=my.scratch) +ffsave(anom.hind,file=my.scratch) +close(anom.hind);rm(anom.hind) + +ffload(file=my.scratch, list=c(anom.hind)) +open(anom.hind) +my.SkillScore <- veriApplyBig("FairRpss", fcst=anom.hind, obs=anom.rean, tdim=2, ensdim=1, prob=c(1/3,1/3)) +close(anom.hind);rm(anom.hind) + + + +BigDataPath <- "/scratch/Earth/ncortesi/myBigData" +source('/home/Earth/ncortesi/Downloads/RESILIENCE/veriApplyBig.R') + +anom.hind.dim<-c(5,3,1,256,512) +anom.hind<-array(rnorm(prod(anom.hind.dim)),anom.hind.dim) # doesn't fit in memory +save.big(array=anom.hind, path=BigDataPath) + +veriApplyBig <- function(, obsmy.scratch){ + ffload(file=my.scratch) + open(anom.hind) + my.SkillScore <- veriApplyBig("FairRpss", fcst=anom.hind, obs=anom.rean, tdim=2, ensdim=1, prob=c(1/3,1/3)) + close(anom.hind) +} + + +anom.hind<-as.ff(anom.hind, vmode="double", file=my.scratch) +ffsave(anom.hind, file=my.scratch) + +my.scratch<-"/scratch/Earth/ncortesi/myBigData" + +anom.hind<-as.ff(anom.hind, vmode="double", file=my.scratch) +ffsave(anom.hind, file=my.scratch) +close(anom.hind); rm(anom.hind) + +ffload(file=my.scratch, list=c(anom.hind)) +open(anom.hind) +my.SkillScore <- veriApplyBig("FairRpss", fcst=anom.hind, obs=anom.rean, tdim=2, ensdim=1, prob=c(1/3,1/3)) +close(anom.hind); rm(anom.hind) + +anom.hind.dim<-c(51,30,1,256,51) +anom.hind<-ff(rnorm(prod(anom.hind.dim)), dim=anom.hind.dim, vmode="double", filename="/scratch/Earth/ncortesi/anomhind") +str(anom.hind) +dim(anom.hind) + +anom.hind.dim<-c(51,30,1,256,51) +anom.hind<-array(rnorm(prod(anom.hind.dim)),anom.hind.dim) # doesn't fit in memory +anom.hind<-as.ff(anom.hind, vmode="double", file="/scratch/Earth/ncortesi/myBigData") +str(anom.hind) +dim(anom.hind) + +ffsave(anom.hind,file="/scratch/Earth/ncortesi/anomhind") +#ffinfo(file="/scratch/Earth/ncortesi/anomhind") +close(anom.hind) +#delete(anom.hind) +#rm(anom.hind) + +ffload(file="/scratch/Earth/ncortesi/anomhind") + +anom.rean<-array(rnorm(prod(anom.hind.dim[-1])), anom.hind.dim[-1]) # doesn't fit in memory + +open(anom.hind) +my.SkillScore <- veriApplyBig("FairRpss", fcst=anom.hind, obs=anom.rean, tdim=2, ensdim=1, prob=c(1/3,1/3)) +fcst<-anom.hind +obs<-anom.rean + +close(anom.hind) +rm(anom.hind) + +#ffdrop(file="/scratch/Earth/ncortesi/anomhind") + +a<-ffapply(X=anom.hind.big, MARGIN=c(1,3,4,5), AFUN=mean, RETURN=TRUE) +a<-ffapply(X=anom.hind.big, MARGIN=c(1,3,4,5), AFUN=function(x) sample(x, replace=TRUE), CFUN="list", RETURN=TRUE) + + +pred_Cal_cross_ff <- as.ff(pred_Cal_cross, vmode='double', file=fileoutput_cross) +ffsave(pred_Cal_cross_ff, file=paste0(fileoutput_cross)) +delete(pred_Cal_cross_ff) + +assign("pred_Cal_cross_ff", as.ff(pred_Cal_cross, vmode='double', file=fileoutput_cross)) +ffsave(pred_Cal_cross_ff, file=paste0(fileoutput_cross)) +delete(pred_Cal_cross_ff) +rm(pred_Cal_cross_ff) + +assign("pred_Cal_cross_ff", as.ff(pred_Cal_cross, vmode='double', file=fileoutput_cross)) +a<-get("pred_Cal_cross_ff") # get() works well in this case but not in the row below! (you must use do.call, see WT_drivers_v2.R) +ffsave(get("pred_Cal_cross_ff"), file=paste0(fileoutput_cross)) +delete(pred_Cal_cross_ff) +rm(pred_Cal_cross_ff) + + + +######################################################################################### +# Calculate and save the FairRpss and/or the FairCrpss for a chosen period using ff # +######################################################################################### + +bigdata=FALSE # if TRUE, compute the Rpss and the Crpss using the package ff (storing arrays in the disk instead than in the RAM) to remove the memory limit + +for(month in veri.month){ + month=1 # for debug + my.startdates<-startdates.monthly[[month]] #c(1:5) # select the startdates (weeks) you want to compute + startdate.name=my.month[month] # name given to the period of the selected startdates, i.e:"January" + n.yrs <- length(my.startdates)*n.yrs.hind # number of total years for the selected month + print(paste0("Computing Skill Scores for ",startdate.name)) + + if(!boot) anom.rean.big<-array(NA, dim=c(n.yrs, n.leadtimes, n.lat, n.lon)) # reanalysis data is not converted to ff because it is a small array + + if(boot) mod<-1 else mod=0 # in case of the boostrap, anom.hind.big includes also the reanalysis data as additional last member + + if(bigdata) anom.hind.big <- ff(NA, dim=c(n.members + mod, n.yrs, n.leadtimes, n.lat, n.lon), vmode="double", filename=paste0(workdir,"/anomhindbig.ff")) + if(!bigdata) anom.hind.big <- array(NA, dim=c(n.members + mod, n.yrs, n.leadtimes, n.lat, n.lon)) + + for(startdate in my.startdates){ + + pos.startdate<-which(startdate==my.startdates) # count of the number of stardates already loaded +1 + my.time.interv<-(1+(pos.startdate-1)*n.yrs.hind):(pos.startdate*n.yrs.hind) # time interval where to load data + + load(paste0(workdir,'/Data_',var.name,'/anom_rean_startdate',startdate,'.RData')) + if(!boot) { + anom.rean<-drop(anom.rean) + anom.rean.big[my.time.interv,,,] <- anom.rean + } + + if(any(my.score.name=="FairRpss") || any(my.score.name=="FairCrpss")){ + load(paste0(workdir,'/Data_',var.name,'/anom_hindcast_startdate',startdate,'.RData')) # load hindcast data + anom.hindcast<-drop(anom.hindcast) + + if(!boot) anom.hind.big[,my.time.interv,,,] <- anom.hindcast + if(boot) anom.hind.big[,my.time.interv,,,] <- abind(anom.hindcast, anom.rean, along=1) + + rm(anom.hindcast, anom.rean) + } + gc() + } + + if(any(my.score.name=="FairRpss" || my.score.name=="FairCrpss")){ + if(!boot) { + if(any(my.score.name=="FairRpss")) my.FairRpss <- veriApplyBig("FairRpss", fcst=anom.hind.big, obs=anom.rean.big, + prob=my.prob, tdim=2, ensdim=1, parallel=TRUE, ncpus=n.cpus) + if(any(my.score.name=="FairCrpss")) my.FairCrpss <- veriApplyBig("FairCrpss", fcst=anom.hind.big, obs=anom.rean.big, tdim=2, ensdim=1, parallel=TRUE, ncpus=n.cpus) + + } else { + # bootstrapping: + my.FairRpssBoot<-my.FairCrpssBoot<-array(NA, c(n.boot, n.leadtimes, n.lat, n.lon)) + if(!bigdata) save(anom.hind.big, file=paste0(workdir,"/anom_hind_big.RData")) # save the anomalies to free memory + + for(b in 1:n.boot){ + print(paste0("resampling n. ",b,"/",n.boot)) + yrs.sampled <- sample(1:n.yrs, replace=TRUE) + + if(bigdata) anom.hind.big.sampled <- ff(NA, dim=c(n.members+1, n.yrs, n.leadtimes, n.lat, n.lon), vmode="double", filename=paste0(workdir,"/anomhindbigsampled")) + if(!bigdata) anom.hind.big.sampled <- array(NA, dim=c(n.members+1, n.yrs, n.leadtimes, n.lat, n.lon)) + + if(!bigdata && b>1) load(paste0(workdir,"/anom_hind_big.RData")) # need to load the hindcasts if b>1 + + for(y in 1:n.yrs) anom.hind.big.sampled[,y,,,] <- anom.hind.big[,yrs.sampled[y],,,] # with ff takes 53s x 100 ~1h!!! (without, only 4s x 100 ~6m) + + if(!bigdata) rm(anom.hind.big); gc() # free memory for the following commands + + # faster way that will be able to replace the above one when ffapply output parameters wll be improved: + #a<-ffapply(X=anom.hind.big, MARGIN=c(3,4,5), AFUN=function(x) my.sample(x,n.members+1, replace=TRUE), CFUN="list", RETURN=TRUE, FF_RETURN=TRUE) # takes 3m only + #aa<-abind(a[1:10], along=4) # it doesn't fit into memory! + + if(any(my.score.name=="FairRpss")) my.FairRpssBoot[b,,,] <- veriApplyBig("FairRpss", fcst=anom.hind.big.sampled[1:n.members,,,,], + obs=anom.hind.big.sampled[n.members+1,,,,], + prob=my.prob, tdim=2, ensdim=1, parallel=TRUE, ncpus=n.cpus) #$rpss + + if(any(my.score.name=="FairCrpss")) my.FairCrpssBoot[b,,,] <- veriApplyBig("FairCrpss", fcst=anom.hind.big.sampled[1:n.members,,,,], + obs=anom.hind.big.sampled[n.members+1,,,,], + tdim=2, ensdim=1, parallel=TRUE, ncpus=n.cpus) #$crpss + + if(bigdata) delete(anom.hind.big.sampled) + rm(anom.hind.big.sampled) # delete the sampled hindcast + + } + + # calculate 5 and 95 percentiles of skill scores: + if(any(my.score.name=="FairRpss")) my.FairRpssConf <- apply(my.FairRpssBoot, c(2,3,4), function(x) quantile(x, probs=c(0.05,0.95), type=8)) + if(any(my.score.name=="FairCrpss")) my.FairCrpssConf <- quantile(my.FairCrpssBoot, probs=c(0.05,0.95), type=8) + + } # close if on boot + + } + + if(bigdata) delete(anom.hind.big) # delete the file with the hindcasts + rm(anom.hind.big) + rm(anom.rean.big) + gc() + + if(!boot){ + if(any(my.score.name=="FairRpss")) save(my.FairRpss, file=paste0(workdir,'/Data_',var.name,'/FairRpss_',startdate.name,'.RData')) + if(any(my.score.name=="FairCrpss")) save(my.FairCrpss, file=paste0(workdir,'/Data_',var.name,'/FairCrpss_',startdate.name,'.RData')) + } else { + if(any(my.score.name=="FairRpss")) save(my.FairRpssBoot, my.FairRpssConf, file=paste0(workdir,'/Data_',var.name,'/FairRpssBoot_',startdate.name,'.RData')) + if(any(my.score.name=="FairCrpss")) save(my.FairCrpssBoot, my.FairCrpssConf, file=paste0(workdir,'/Data_',var.name,'/FairCrpssBoot_',startdate.name,'.RData')) + } + + + if(is.object(my.FairRpss)) rm(my.FairRpss); if(is.object(my.FairCrpss)) rm(my.FairCrpss) + if(is.object(my.FairRpssBoot)) rm(my.FairRpssBoot); if(is.object(my.FairCrpssBoot)) rm(my.FairCrpssBoot) + if(is.object(my.FairRpssConf)) rm(my.FairRpssConf); if(is.object(my.FairCrpssConf)) rm(my.FairCrpssConf) + +} # next m (month) + + +######################################################################################### +# Calculate and save the FairRpss for a chosen period using ff and Load() # +######################################################################################### + + anom.rean.big<-array(NA, dim=c(length(my.startdates)*n.yrs.hind, n.leadtimes, n.lat, n.lon)) + anom.hind.big <- ff(NA, dim=c(n.members,length(my.startdates)*n.yrs.hind, n.leadtimes, n.lat, n.lon), vmode="double", filename="/scratch/Earth/anomhindbig") + + for(startdate in my.startdates){ + pos.startdate<-which(startdate==my.startdates) # count of the number of stardates already loaded+1 + my.time.interv<-(1+(pos.startdate-1)*n.yrs.hind):(pos.startdate*n.yrs.hind) # time interval where to load data + + #load(paste0(workdir,'/Data_',var.name,'/anom_rean_startdate',startdate,'.RData')) + #anom.rean<-drop(anom.rean) + #anom.rean.big[my.time.interv,,,] <- anom.rean + + my.date <- paste0(yr1.hind:yr2.hind,substr(sdates.seq[startdate],5,6),substr(sdates.seq[startdate],7,8)) + anom.rean <- Load(var=var.name, exp = 'EnsEcmwfWeekHind', + obs = NULL, sdates=my.date, nleadtime = n.leadtimes, leadtimemin = 1, leadtimemax = n.leadtimes, + output = 'lonlat', configfile = file_path, method='distance-weighted' ) + + #load(paste0(workdir,'/Data_',var.name,'/anom_hindcast_startdate',startdate,'.RData')) # load hindcast data + #anom.hindcast<-drop(anom.hindcast) + + anom.hind.big[,my.time.interv,,,] <- anom.hindcast + rm(anom.hindcast) + } + + my.FairRpss <- veriApplyBig("FairRpss", fcst=anom.hind.big, obs=anom.rean.big, prob=my.prob, tdim=2, ensdim=1, parallel=TRUE, ncpus=n.cpus) + + save(my.FairRpss, file=paste0(workdir,'/Data_',var.name,'/FairRpss_',startdate.name,'.RData')) + +} # close if on Slurm + + diff --git a/old/SkillScores_v5.R b/old/SkillScores_v5.R new file mode 100644 index 0000000000000000000000000000000000000000..fa2c1c0df635f5d4278235538bcdbdd1c3a95c26 --- /dev/null +++ b/old/SkillScores_v5.R @@ -0,0 +1,1274 @@ +######################################################################################### +# Sub-seasonal Skill Scores # +######################################################################################### +# you can run it from the terminal with the syntax: +# +# Rscript SkillScores_v4.R +# +# i.e: to split the data in 8 chunks and run only the chunk number 3, write: +# +# Rscript SkillScores_v4.R 8 3 +# +# if you don't specify any argument, or you run it from the R interface, it runs all the chunks in a sequential way. +# Use this last approach if the computational speed is not a problem. +# + +mare=TRUE # if mare is TRUE, the script must run on MareNostrum; if it is FALSE, the script must run on our workstations or on our clusters + +#load libraries and functions: +library(s2dverification) +library(SpecsVerification) +library(easyVerification) +library(jpeg) +library(abind) +#library(ff) +#library(ffbase) +# Load function split.array: +if(!mare) {source('/scratch/Earth/ncortesi/RESILIENCE/Rfunctions.R')} else {source('/gpfs/scratch/bsc32842/RESILIENCE/Rfunctions.R')} + +########################################################################################## +# User's settings # +########################################################################################## + + + +# working dir where to put the output maps and files in the /Data and /Maps subdirs: +workdir <- ifelse(!mare, "/scratch/Earth/ncortesi/RESILIENCE", "/gpfs/scratch/bsc32842/RESILIENCE") + +cfs.name <- 'ECMWF' # name of the climate forecast system (CFS) used for monthly predictions (to be used in map titles) +var.name <- 'sfcWind' # forecast variable. It is the name used inside the netCDF files and as suffix in map filenames, i.e: 'sfcWind' or 'psl' +var.name.map <- '10m Wind Speed' # forecast variable to verify (name used in the map titles), i.e: '10m Wind Speed' or 'SLP' + +yr1 <- 2014 # starting year of the weekly sequence of the forecasts +mes <- 1 # starting forecast month (usually january) +day <- 2 # starting forecast day + +yr1.hind <- 1994 #1994 # first hindcast year +yr2.hind <- 2013 #2013 # last hindcast year (usually the forecast year -1) + +leadtime.week <- c('5-11','12-18','19-25','26-32') # which leadtimes are available in the weekly dataset +n.members <- 4 # number of hindcast members + +my.score.name <- c('FairRpss','FairCrpss') #c('EnsCorr','FairRpss','FairCrpss','RelDiagr') # choose one or more skills scores to compute between EnsCorr, FairRPSS, FairCRPSS and/or RelDiagr + +veri.month <- 1 #1:12 # select the month(s) you want to compute the chosen skill scores + +my.pvalue <- 0.05 # in case of computing the EnsCorr, set the p_value level to show in the ensemble mean correlation maps +my.prob <- c(1/3,2/3) # quantiles used for FairRPSS and the RelDiagr (usually they are the terciles) +conf.level <- c(0.05, 0.95)# percentiles employed for the calculation of the confidence level for the Rpss and Crpss (can be more than 2 if needed) +int.lat <- NULL #1:160 # latitude position of grid points selected for the Reliability Diagram (if you want to subset a region; if not, put NULL) +int.lon <- NULL #1:320 # longitude position of grid points selected for the Reliability Diagram (if you want to subset a region; if not, put NULL) + +boot <- TRUE # in case of computing the FairRpss and/or the FairCrpss, you can choose to do a bootstrap to evaluate the uncertainty of the skill scores +n.boot <- 20 # number of resamples considered in the bootstrapping + +# coordinates of a chosen point in the world to plot the time series over the 52 maps (they must be the same values in objects la y lo) +my.lat <- 55.3197120 +my.lon <- 0.5625 + +###################################### Derived variables ############################################################################# + +args <- commandArgs(TRUE) # put into variable args the list with all the optional arguments with whom the script was run from the terminal + +if(length(args) == 0) print("Running the script in a sequential way") +if(length(args) == 1) stop("Please specify a second argument to the script") +if(length(args) > 2) stop("Only two arguments are required") + +multicore <- ifelse(length(args) == 2 ,TRUE,FALSE) # set variable multicore to TRUE if we are running the script from the terminal with two arguments + +if(multicore){ + tot.chunks <- as.integer(args[1]) # total number of chunks used to split the hindcast array + job.chunk <- as.integer(args[2]) # number of the chunk to run (from 1 to tot.chunks) +} + +#source(paste0(workdir,'/ColorBarV.R')) # Color scale used for maps +n.categ <- 1+length(my.prob) # number of forecasting categories (usually 3 if terciles are used) + +# blue-yellow-red colors: +col <- ifelse(!mare) {as.character(read.csv("/scratch/Earth/ncortesi/RESILIENCE/rgbhex.csv",header=F)[,1]), as.character(read.csv("/gpfs/scratch/bsc32842/RESILIENCE/rgbhex.csv",header=F)[,1]) +#col<-as.character(read.csv("/home/Earth/vtorralb/rgbhex.csv",header=F)[,1]) # blue-yellow-red colors + +sdates.seq <- weekly.seq(yr1,mes,day) # load the sequence of dates corresponding to all the thursday of the year +n.leadtimes <- length(leadtime.week) +n.yrs.hind <- yr2.hind-yr1.hind+1 +my.month.short <- substr(my.month,1,3) + +# Monthly Startdates for 2014 reforecasts: (in future you can modify it to work for a generic year) +startdates.monthly<-list() +startdates.monthly[[1]]<-1:5 +startdates.monthly[[2]]<-6:9 +startdates.monthly[[3]]<-10:13 +startdates.monthly[[4]]<-14:17 +startdates.monthly[[5]]<-18:22 +startdates.monthly[[6]]<-23:26 +startdates.monthly[[7]]<-27:31 +startdates.monthly[[8]]<-32:35 +startdates.monthly[[9]]<-36:39 +startdates.monthly[[10]]<-40:44 +startdates.monthly[[11]]<-45:48 +startdates.monthly[[12]]<-49:52 + +## extract geographic coordinates; do only ONCE for each preducton system and then save the lat/lon in a coordinates.RData and comment these rows to use function load() below: +#data.hindcast <- Load(var='sfcWind', exp = 'EnsEcmwfWeekHind', +# obs = NULL, sdates=paste0(yr1.hind:yr2.hind,substr(sdates.seq[startdate],5,6),substr(sdates.seq[startdate],7,8)), +# nleadtime = 1, leadtimemin = 1, leadtimemax = 1, output = 'lonlat', configfile = file_path, method='distance-weighted' ) + +#lats<-data.hindcast$lat +#lons<-data.hindcast$lon +#save(lats,lons,file=paste0(workdir,'/coordinates.RData')) +#rm(data.hindcast);gc() + +# format geographic coordinates to use with PlotEquiMap: +load(paste0(workdir,'/coordinates.RData')) +n.lon <- length(lons) +n.lat <- length(lats) +n.lonr <- n.lon-ceiling(length(lons[lons<180 & lons > 0])) +la<-rev(lats) +lo<-c(lons[c((n.lonr+1):n.lon)]-360,lons[1:n.lonr]) + + +######################################################################################### +# Calculate and save up to all the chosen Skill Scores for a selected period # +######################################################################################### +# +# Remember that before computing the skill scores, you have to create and save the anomalies running once the preformatting part at the end of this script. +# + +for(month in veri.month){ + #month=1 # for the debug + my.startdates <- startdates.monthly[[month]] #c(1:5) # select the startdates (weeks) you want to compute + startdate.name <- my.month[month] # name given to the period of the selected startdates # i.e:"January" + n.startdates <- length(my.startdates) # number of startdates in the selected month + n.yrs <- length(my.startdates)*n.yrs.hind # number of total years for the selected month + + print(paste0("Computing Skill Scores for ",startdate.name)) + + # Merge all selected startdates: + hind.dim <- c(n.members, n.yrs.hind*n.startdates, n.leadtimes, n.lat, n.lon) # dimensions of the hindcast array + + # If multicore=FALSE, split the array in the usual way; if not, it splits it using a number of chunks equal to the first argument of the script + if(multicore == FALSE) {chunk <- split.array(dimensions = hind.dim, along = 5)} else {chunk <- split.array(dimensions = hind.dim, along = 5, chunks=tot.chunks} + + my.FairRpss <- my.FairCrpss <- my.EnsCorr <- my.PValue <- array(NA,c(n.leadtimes,n.lat,n.lon)) + if(boot) my.FairRpssBoot <- my.FairCrpssBoot <- array(NA, c(n.boot, n.leadtimes, n.lat, n.lon)) + if(boot) my.FairRpssConf <- my.FairCrpssConf <- array(NA, c(length(conf.level), n.leadtimes, n.lat, n.lon)) + + cat('Chunk n. ') + + # if we run the script from the terminal with an argument, it only computes the chunk we specify in the second argument and save the results for that chunk. + # If not, it loops over all chunks in a serial way and save the results (already united so chunks disappear) at the end. + if(multicore == FALSE) {my.chunks <- 1:chunk$n.chunk} else {my.chunks <- job.chunk} + + for(c in my.chunks){ # EnsCorr, FairRpss and FairCrpss calculation: + #s=1 # for the debug + cat(paste0(c,'/', n.chunk,' ')) + + #if(s == chunk$n.chunk){ add.last <- chunk$chunk.size.last } else { add.last <- 0 } # because the last chunk is longer than the others, if chunk.size.last>0 + + anom.hindcast.chunk <- anom.hindcast.chunk.sampled <- array(NA, c(n.members, n.startdates*n.yrs.hind, n.leadtimes, n.lat, chunk$n.int[[c]])) + anom.rean.chunk <- anom.hindcast.mean.chunk <- anom.rean.chunk.sampled <- array(NA, c(n.startdates*n.yrs.hind, n.leadtimes, n.lat, chunk$n.int[[c]])) + #my.interv<-(1 + sub.size*(s-1)):((sub.size*s) + add.last) # longitude interval where to load data + + for(startdate in my.startdates){ + pos.startdate<-which(startdate == my.startdates) # count of the number of stardates already loaded+1 + my.time.interv<-(1+(pos.startdate-1)*n.yrs.hind):(pos.startdate*n.yrs.hind) # time interval where to load data + + # Load reanalysis data: + load(paste0(workdir,'/Data_',var.name,'/anom_rean_startdate',startdate,'.RData')) + anom.rean <- drop(anom.rean) + anom.rean.chunk[my.time.interv,,,] <- anom.rean[,,,chunk$int[[c]]] + + # Load hindcast data: + if(any(my.score.name=="FairRpss") || any(my.score.name=="FairCrpss" || any(my.score.name=="RelDiagr"))){ + load(paste0(workdir,'/Data_',var.name,'/anom_hindcast_startdate',startdate,'.RData')) + anom.hindcast <- drop(anom.hindcast) + anom.hindcast.chunk[,my.time.interv,,,] <- anom.hindcast[,,,,chunk$int[[c]]] + rm(anom.hindcast, anom.rean) + gc() + } + + if(any(my.score.name=="EnsCorr")){ + load(paste0(workdir,'/Data_',var.name,'/anom_hindcast_mean_startdate',startdate,'.RData')) # load ensemble mean hindcast data + anom.hindcast.mean <- drop(anom.hindcast.mean) + anom.hindcast.mean.chunk[my.time.interv,,,] <- anom.hindcast.mean[,,,chunk$int[[c]]] + rm(anom.hindcast.mean) + gc() + } + + } + + + if(any(my.score.name=="FairRpss")){ + if(!boot){ + + #my.FairRpss.sub <- veriApply("FairRpss", fcst=anom.hindcast.sub, obs=anom.rean.sub, prob=my.prob, tdim=2, ensdim=1, parallel=FALSE, ncpus=n.cpus) + my.FairRps.chunk <- veriApply("FairRps", fcst=anom.hindcast.chunk, obs=anom.rean.chunk, prob=my.prob, tdim=2, ensdim=1, parallel=FALSE, ncpus=n.cpus) + + # the prob.for the climatological ensemble can be set without using convert2prob and they are the same for all points and leadtimes (33% for each tercile): + ens <- array(33,c(n.startdates*n.yrs.hind,3)) + + # the observed probabilities vary from point to point and for each leadtime: + obs <- apply(anom.rean.chunk, c(2,3,4), function(x){convert2prob(x, prob <- my.prob)}) + + # reshape obs to be able to apply EnsRps(ens, obs) below: + obs2 <- InsertDim(obs,1,3) + obs2[2,1:(n.startdates*n.yrs.hind),,,] <- obs2[1,(n.startdates*n.yrs.hind + 1:(n.startdates*n.yrs.hind)),,,] + obs2[3,1:(n.startdates*n.yrs.hind),,,] <- obs2[1,(2*n.startdates*n.yrs.hind + 1:(n.startdates*n.yrs.hind)),,,] + obs3 <- obs2[,1:(n.startdates*n.yrs.hind),,,] + + my.Rps.clim.chunk <- apply(obs3,c(3,4,5), function(x){ EnsRps(ens,t(x)) }) + + my.FairRps.chunk.sum <- apply(my.FairRps.chunk,c(2,3,4), sum) + my.Rps.clim.chunk.sum <- apply(my.Rps.clim.chunk,c(2,3,4), sum) + + my.FairRpss.chunk <- 1-(my.FairRps.chunk.sum/my.Rps.clim.chunk.sum) + + my.FairRpss[,,chunk$int[[c]]] <- my.FairRpss.chunk + rm(my.FairRpss.chunk, ens, obs, obs2, obs3, my.FairRps.chunk.sum, my.Rps.clim.chunk.sum) + gc() + + } else { # bootstrapping: + + for(b in 1:n.boot){ + cat(paste0("resampling n. ",b,"/",n.boot)) + yrs.sampled <- sample(1:n.yrs, replace=TRUE) + + for(y in 1:n.yrs) anom.hindcast.chunk.sampled[,y,,,] <- anom.hindcast.chunk[,yrs.sampled[y],,,] + for(y in 1:n.yrs) anom.rean.chunk.sampled[y,,,] <- anom.rean.chunk[yrs.sampled[y],,,] + + my.FairRps.chunk <- veriApply("FairRps", fcst=anom.hindcast.chunk.sampled, obs=anom.rean.chunk.sampled, prob=my.prob, tdim=2, ensdim=1, parallel=FALSE, ncpus=n.cpus) + ens <- array(33,c(n.startdates*n.yrs.hind,3)) + obs <- apply(anom.rean.chunk.sampled, c(2,3,4), function(x){convert2prob(x, prob <- my.prob)}) + obs2 <- InsertDim(obs,1,3) + obs2[2,1:(n.startdates*n.yrs.hind),,,] <- obs2[1,(n.startdates*n.yrs.hind + 1:(n.startdates*n.yrs.hind)),,,] + obs2[3,1:(n.startdates*n.yrs.hind),,,] <- obs2[1,(2*n.startdates*n.yrs.hind + 1:(n.startdates*n.yrs.hind)),,,] + obs3 <- obs2[,1:(n.startdates*n.yrs.hind),,,] + my.Rps.clim.chunk <- apply(obs3,c(3,4,5), function(x){ EnsRps(ens,t(x)) }) + my.FairRps.chunk.sum <- apply(my.FairRps.chunk,c(2,3,4), sum) + my.Rps.clim.chunk.sum <- apply(my.Rps.clim.chunk,c(2,3,4), sum) + my.FairRpss.chunk <- 1-(my.FairRps.chunk.sum/my.Rps.clim.chunk.sum) + + my.FairRpssBoot[b,,,chunk$int[[c]]] <- my.FairRpss.chunk + rm(my.FairRpss.chunk, ens, obs, obs2, obs3, my.FairRps.chunk.sum, my.Rps.clim.chunk.sum) + gc() + } + + cat('\n') + + # calculate the percentiles of the skill score: + my.FairRpssConf[,,,chunk$int[[c]]] <- apply(my.FairRpssBoot[,,,chunk$int[[c]]], c(2,3,4), function(x) quantile(x, probs=conf.level, type=8)) + } + } + + if(any(my.score.name=="FairCrpss")){ + if(!boot){ + my.FairCrps.chunk <- veriApply("FairCrps", fcst=anom.hindcast.chunk, obs=anom.rean.chunk, tdim=2, ensdim=1, parallel=FALSE, ncpus=n.cpus) + anom.all.chunk <- abind(anom.hindcast.chunk,anom.rean.chunk, along=1) # merge exp and obs together to use apply: + my.Crps.clim.chunk <- apply(anom.all.chunk, c(3,4,5), function(x){ EnsCrps(t(x[1:n.members,]), x[n.members+1,])}) + + my.FairCrps.chunk.sum <- apply(my.FairCrps.chunk,c(2,3,4), sum) + my.Crps.clim.chunk.sum <- apply(my.Crps.clim.chunk,c(2,3,4), sum) + my.FairCrpss.chunk <- 1-(my.FairCrps.chunk.sum/my.Crps.clim.chunk.sum) + + my.FairCrpss[,,chunk$int[[c]]]<-my.FairCrpss.chunk + rm(my.FairCrpss.chunk, my.FairCrps.chunk.sum, my.Crps.clim.chunk.sum) + gc() + + } else { # bootstrapping: + + for(b in 1:n.boot){ + print(paste0("resampling n. ",b,"/",n.boot)) + yrs.sampled <- sample(1:n.yrs, replace=TRUE) + + for(y in 1:n.yrs) anom.hindcast.chunk.sampled[,y,,,] <- anom.hindcast.chunk[,yrs.sampled[y],,,] + for(y in 1:n.yrs) anom.rean.chunk.sampled[y,,,] <- anom.rean.chunk[yrs.sampled[y],,,] + + my.FairCrps.chunk <- veriApply("FairCrps", fcst=anom.hindcast.chunk.sampled, obs=anom.rean.chunk.sampled, tdim=2, ensdim=1, parallel=TRUE, ncpus=n.cpus) + anom.all.chunk <- abind(anom.hindcast.chunk.sampled, anom.rean.chunk.sampled, along=1) # merge exp and obs together to use apply: + my.Crps.clim.chunk <- apply(anom.all.chunk, c(3,4,5), function(x){ EnsCrps(t(x[1:n.members,]), x[n.members+1,])}) + my.FairCrps.chunk.sum <- apply(my.FairCrps.chunk,c(2,3,4), sum) + my.Crps.clim.chunk.sum <- apply(my.Crps.clim.chunk,c(2,3,4), sum) + my.FairCrpss.chunk <- 1-(my.FairCrps.chunk.sum/my.Crps.clim.chunk.sum) + + my.FairCrpssBoot[b,,,chunk$int[[c]]] <- my.FairCrpss.chunk + rm(my.FairCrpss.chunk, my.FairCrps.chunk.sum, my.Crps.clim.chunk.sum) + gc() + + } + + cat('\n') + + # calculate the percentiles of the skill score: + my.FairCrpssConf[,,,chunk$int[[c]]] <- apply(my.FairCrpssBoot[,,,chunk$int[[c]]], c(2,3,4), function(x) quantile(x, probs=conf.level, type=8)) + } + + } + + if(any(my.score.name=="EnsCorr")){ + # ensemble mean correlation for the selected startdates (first dim, 'week') and all leadtimes (second dim): + my.EnsCorr.chunk <- my.PValue.chunk <- array(NA, c(n.leadtimes, n.lat, chunk$n.int[[c]])) + + for(i in 1:n.leadtimes){ + for(j in 1:n.lat){ + for(k in 1:chunk$n.int[[c]]){ + my.EnsCorr.chunk[i,j,k] <- cor(anom.hindcast.mean.chunk[,i,j,k],anom.rean.chunk[,i,j,k], use="complete.obs") + # option 'na.method' is chosen from the following list: c("all.obs", "complete.obs", "pairwise.complete.obs", "everything", "na.or.complete") + + my.PValue.chunk[i,j,k] <- cor.test(anom.hindcast.mean.chunk[,i,j,k],anom.rean.chunk[,i,j,k], use="complete.obs")$p.value + + } + } + } + + my.EnsCorr[,,chunk$int[[c]]] <- my.EnsCorr.chunk + my.PValue[,,chunk$int[[c]]] <- my.PValue.chunk + + rm(anom.hindcast.chunk,anom.hindcast.mean.chunk,my.EnsCorr.chunk,my.PValue.chunk) + } + + rm(anom.rean.chunk) + gc() + #mem() + + if(multicore == TRUE) { + if(any(my.score.name=="FairRpss")) save(my.FairRpss, file=paste0(workdir,'/Data_',var.name,'/FairRpss_',startdate.name,'_chunk_',c,'.RData')) + if(any(my.score.name=="FairCrpss")) save(my.FairCrpss, file=paste0(workdir,'/Data_',var.name,'/FairCrpss_',startdate.name,'_chunk_',c'.RData')) + if(any(my.score.name=="EnsCorr")) save(my.EnsCorr, file=paste0(workdir,'/Data_',var.name,'/EnsCorr_',startdate.name,'_chunk_',c'.RData')) + } + + + } # next c (chunk) + + # the Reliability diagram cannot be computed splitting the data in chunk, so it is computed only if the script is run from the R interface or with no arguments: + if(multicore == FALSE) { + if(any(my.score.name=="RelDiagr")){ # in this case the computation cannot be split in groups of grid points, but can be split for leadtime instead: + my.RelDiagr<-list() + + for(lead in 1:n.leadtimes){ + cat(paste0('leadtime n.: ',lead,'/',n.leadtimes)) + anom.rean.chunk <- array(NA,c(n.startdates*n.yrs.hind,n.lat,n.lon)) + anom.hindcast.chunk <- array(NA,c(n.members,n.startdates*n.yrs.hind,n.lat,n.lon)) + cat(' startdate: ') + i=0 + + for(startdate in my.startdates){ + i=i+1 + cat(paste0(i,'/',length(my.startdates),' ')) + + pos.startdate<-which(startdate==my.startdates) # count of the number of stardates already loaded+1 + my.time.interv<-(1+(pos.startdate-1)*n.yrs.hind):(pos.startdate*n.yrs.hind) # time interval where to load data + + # Load reanalysis data of next startdate: + load(paste0(workdir,'/Data_',var.name,'/anom_rean_startdate',startdate,'.RData')) + anom.rean <- drop(anom.rean) + anom.rean.chunk[my.time.interv,,] <- anom.rean[,lead,,] + + # Lead hindcast data of next startdate: + load(paste0(workdir,'/Data_',var.name,'/anom_hindcast_startdate',startdate,'.RData')) # load hindcast data + anom.hindcast <- drop(anom.hindcast) + anom.hindcast.chunk[,my.time.interv,,] <- anom.hindcast[,,lead,,] + + rm(anom.rean,anom.hindcast) + } + + + anom.hindcast.chunk.perm<-aperm(anom.hindcast.chunk,c(2,1,3,4)) # swap the first two dimensions to put the years as first dimension (needed for convert2prob) + gc() + + cat('Computing the Reliability Diagram. Please wait... ') + ens.chunk<-apply(anom.hindcast.chunk.perm, c(3,4), convert2prob, prob<-my.prob) # its dimens. are: [n.startdates*n.yrs.hind*n.forecast.categories, n.lat, n.lon] + obs.chunk<-apply(anom.rean.chunk,c(2,3), convert2prob, prob<-my.prob) # the first n.members*n.yrs.hind elements belong to the first tercile, and so on. + ens.chunk.prob<-apply(ens.chunk, c(2,3), function(x) count2prob(matrix(x, n.startdates*n.yrs.hind, n.categ), type=4)) # type=4 is the only method with sum(prob)=1 !! + obs.chunk.prob<-apply(obs.chunk, c(2,3), function(x) count2prob(matrix(x, n.startdates*n.yrs.hind, n.categ), type=4)) #no parallelization here: it only takes 10s + + if(is.null(int.lat)) int.lat <- 1:n.lat # if there is no region selected, select all the world + if(is.null(int.lon)) int.lon <- 1:n.lon + + int1 <- 1:(n.startdates*n.yrs.hind) # time interval of the data of the first tercile + my.RelDiagr[[lead]]<-ReliabilityDiagram(ens.chunk.prob[int1,int.lat,int.lon], obs.chunk.prob[int1,int.lat,int.lon], nboot=0, plot=FALSE, plot.refin=F) #tercile 1 + + int2 <- (1+(n.startdates*n.yrs.hind)):(2*n.startdates*n.yrs.hind) # time interval of the data of the second tercile + my.RelDiagr[[n.leadtimes+lead]]<-ReliabilityDiagram(ens.chunk.prob[int2,int.lat,int.lon], obs.chunk.prob[int2,int.lat,int.lon], nboot=0, plot=FALSE, plot.refin=F) + + int3 <- (1+(2*n.startdates*n.yrs.hind)):(3*n.startdates*n.yrs.hind) # time interval of the data of the third tercile + my.RelDiagr[[2*n.leadtimes+lead]]<-ReliabilityDiagram(ens.chunk.prob[int3,int.lat,int.lon], obs.chunk.prob[int3,int.lat,int.lon], nboot=0, plot=FALSE, plot.refin=F) + + cat('done\n') + #print(my.RelDiagr) # for debugging + + rm(anom.rean.chunk,anom.hindcast.chunk,ens.chunk,obs.chunk,ens.chunk.prob,obs.chunk.prob) + gc() + + } # next lead + + } # close if on RelDiagr + + # we can save the results for all chunks only if multicore == FALSE (if it is TRUE, we have the results for 1 chunk only, that have already been saved + if(!boot){ + if(any(my.score.name=="FairRpss")) save(my.FairRpss, file=paste0(workdir,'/Data_',var.name,'/FairRpss_',startdate.name,'.RData')) + if(any(my.score.name=="FairCrpss")) save(my.FairCrpss, file=paste0(workdir,'/Data_',var.name,'/FairCrpss_',startdate.name,'.RData')) + if(any(my.score.name=="EnsCorr")) save(my.EnsCorr, file=paste0(workdir,'/Data_',var.name,'/EnsCorr_',startdate.name,'.RData')) + if(any(my.score.name=="RelDiagr")) save(my.RelDiagr, my.PValue, file=paste0(workdir,'/Data_',var.name,'/RelDiagr_',startdate.name,'.RData')) + } else { # bootstrapping output: + if(any(my.score.name=="FairRpss")) save(my.FairRpssBoot, my.FairRpssConf, file=paste0(workdir,'/Data_',var.name,'/FairRpssBoot_',startdate.name,'.RData')) + if(any(my.score.name=="FairCrpss")) save(my.FairCrpssBoot, my.FairCrpssConf, file=paste0(workdir,'/Data_',var.name,'/FairCrpssBoot_',startdate.name,'.RData')) + } + + # If it has finished computing the last chunk, it can load all results in one file, deleting the intermediate output files: + if(job.chunk == tot.chunks){ + + } + + + } # close if on multicore + +} # next m (month) + +if + + + + + + + + +# all pre-formatting (conversion to anomalies) and post-formatting (visualization) tasks are done below: +if(multicore == FALSE){ + +########################################################################################### +# Visualize and save the score maps for one score and period # +########################################################################################### + +# run it only after computing and saving all the skill score data: + +my.months=1 #9:12 # choose the month(s) to plot and save + +for(month in my.months){ + #month=2 # for the debug + startdate.name.map<-my.month[month] # i.e:"January" + + for(my.score.name.map in my.score.name){ + #my.score.name.map='EnsCorr' # for the debug + + if(my.score.name.map=="EnsCorr") { load(file=paste0(workdir,'/Data_',var.name,'/EnsCorr_',startdate.name.map,'.RData')); my.score<-my.EnsCorr } + if(my.score.name.map=='FairRpss') { load(file=paste0(workdir,'/Data_',var.name,'/FairRpss_',startdate.name.map,'.RData')); my.score<-my.FairRpss } + if(my.score.name.map=='FairCrpss') { load(file=paste0(workdir,'/Data_',var.name,'/FairCrpss_',startdate.name.map,'.RData')); my.score<-my.FairCrpss } + if(my.score.name.map=="RelDiagr") { load(file=paste0(workdir,'/Data_',var.name,'/RelDiagr_',startdate.name.map,'.RData')); my.score<-my.RelDiagr } + + # old green-red palette: + # brk.rpss<-c(-1,seq(0,1,by=0.05)) + # col.rpss<-c("darkred",colorRampPalette(c("red","white","darkgreen"))(length(brk.rpss)-2)) + # brk.rps<-c(seq(0,1,by=0.1),10) + # col.rps<-c(colorRampPalette(c("darkgreen","white","red"))(length(brk.rps)-2),"darkred") + # if(my.score.name.map==my.Rps | my.score==my.Rps.clim) {my.brk<-brk.rps} else {my.brk<-brk.rpss} + # if(my.score.name.map==my.Rps | my.score==my.Rps.clim) {my.col<-col.rps} else {my.col<-col.rpss} + + brk.rpss<-c(-10,seq(-1,1,by=0.1)) + col.rpss<-colorRampPalette(col)(length(brk.rpss)-1) + + my.brk<-brk.rpss # at present all breaks and colors are the same, so there is no need to differenciate between indexes + my.col<-col.rpss + + for(lead in 1:n.leadtimes){ + + #my.title<-paste0(my.score.name.map,' of ',cfs.name,' + #10m Wind Speed for ',startdate.name.map,'. Forecast time ',leadtime.week[lead],'.') + + if(my.score.name.map!="RelDiagr"){ + + #postscript(file=paste0(workdir,'/Maps_',var.name,'/',my.score.name.map,'_',startdate.name.map,'_Forecast_time_',leadtime.week[lead],'_',var.name,'.ps'), + # paper="special",width=12,height=7,horizontal=F) + + jpeg(file=paste0(workdir,'/Maps_',var.name,'/',my.score.name.map,'_',startdate.name.map,'_Forecast_time_',leadtime.week[lead],'_',var.name,'.jpg'),width=1000,height=600) + + layout(matrix(c(1,2), 1, 2, byrow = TRUE),widths=c(11,1.2)) + par(oma=c(1,1,4,1)) + + # lat values must be in decreasing order from +90 to -90 degrees and long from 0 to 360 degrees: + PlotEquiMap(my.score[lead,,][n.lat:1,c((n.lonr+1):n.lon,1:n.lonr)], lo,la,sizetit=0.8, brks=my.brk, cols=my.col,axelab=F, filled.continents=FALSE, drawleg=F) + my.title<-paste0(my.score.name.map,' of ',cfs.name,'\n ', var.name.map,' for ',startdate.name.map,'. Forecast time ',leadtime.week[lead],'.') + title(my.title,line=0.5,outer=T) + + ColorBar(my.brk, cols=my.col, vert=T, cex=1.4) + + if(my.score.name.map=="EnsCorr"){ #add a small grey point if the corr.is significant: + + # arrange lat/ lon in my.PValue (a second variable in the EnsCorr file) to start from +90 degr. (lat) and from 0 degr (lon): + my.PValue.rev <- my.PValue + my.PValue.rev[lead,,] <- my.PValue[i,n.lat:1,c((n.lonr+1):n.lon,1:n.lonr)] + for (x in 1:n.lon) { + for (y in 1:n.lat) { + if (my.PValue.rev[lead,y, x] == TRUE) { + text(x = lo[x], y = la[y], ".", cex = .2) + } + } + } + } + dev.off() + + } # close if on !RelDiagr + + if(my.score.name.map=="RelDiagr") { + + jpeg(file=paste0(workdir,'/Maps_',var.name,'/RelDiagr_',startdate.name.map,'_Forecast_time_',leadtime.week[lead],'_',var.name,'.jpg'),width=600,height=600) + + my.title<-paste0('Reliability Diagram of ',cfs.name,'\n ',var.name.map,' for ',startdate.name.map,'. Forecast time ',leadtime.week[lead],'.') + + plot(c(0,1),c(0,1),type="l",xlab="Forecast Frequency",ylab="Observed Frequency", col='gray30', main=my.title) + lines(my.score[[lead]]$p.avgs[!is.na(my.score[[lead]]$p.avgs)],my.score[[lead]]$cond.prob[!is.na(my.score[[lead]]$cond.prob)],type="o", pch=16, col="blue") + #lines(my.score[[n.leadtimes+lead]]$p.avgs[!is.na(my.score[[n.leadtimes+lead]]$p.avgs)],my.score[[n.leadtimes+lead]]$cond.prob[!is.na(my.score[[n.leadtimes+lead]]$cond.prob)],type="o",pch=16,col="darkgreen") + lines(my.score[[2*n.leadtimes+lead]]$p.avgs[!is.na(my.score[[2*n.leadtimes+lead]]$p.avgs)], my.score[[2*n.leadtimes+lead]]$cond.prob[!is.na(my.score[[2*n.leadtimes+lead]]$cond.prob)],type="o", pch=16, col="red") + legend("bottomright", legend=c('Lower Tercile','Upper Tercile'), lty=c(1,1), lwd=c(2.5,2.5), col=c('blue','red')) + + dev.off() + } + + } # next lead + + } # next score + +} # next month + + +############################################################################################### +# Composite map of the four skill scores # +############################################################################################### + +# run it only when you have all the 4 skill scores for each month: + +my.months=9:12 # choose the month(s) to plot and save + +for(month in my.months){ + + startdate.name.map<-my.month[month] # i.e:"January" + + #postscript(file=paste0(workdir,'/Maps_',var.name,'/SkillScores_',startdate.name.map,'_',var.name,'.ps'), paper="special",width=12,height=7,horizontal=F) + + jpeg(file=paste0(workdir,'/Maps_',var.name,'/SkillScores_',startdate.name.map,'_',var.name,'.jpg'), width=4000, height=2400, quality=100) + + layout(matrix(1:16, 4, 4, byrow=FALSE), widths=c(1.8,1.8,1.8,1.2), heights=c(1,1,1,1)) + #layout.show(16) + + for(score in 1:4){ + for(lead in 1:n.leadtimes){ + + img<-readJPEG(paste0(workdir,'/Maps_',var.name,'/',my.score.name[score],'_',startdate.name.map,'_Forecast_time_',leadtime.week[lead],'_',var.name,'.jpg')) + par(mar = rep(0, 4), xaxs='i', yaxs='i' ) + plot.new() + rasterImage(img, 0, 0, 1, 1, interpolate=FALSE) + + } # next lead + + } # next score + + dev.off() + +} # next month + + + +# Dani Map script for the small figure: +#/home/Earth/vtorralb/R/scripts/Veronica_2014/TimeSeries/PlotAno_mod_obs.R + + +############################################################################################### +# Preformatting: calculate weekly climatologies and anomalies for each hindcast startdate # +############################################################################################### + +# Run it only once at the beginning: + +file_path='/home/Earth/ngonzal2/R/ConfFiles/IC3.conf' # Load() file path +#file_path <- '/home/Earth/ncortesi/Downloads/RESILIENCE/IC3.conf' + +my.startdates=1:52 # choose a sequence of startdates to save their anomalies + +for(startdate in my.startdates){ # the startdate week to load inside the sdates weekly sequence: + print(paste0('Computing anomalies for startdate ', which(startdate==my.startdates),'/',length(my.startdates))) + + data.hindcast <- Load(var = var.name, exp = 'EnsEcmwfWeekHind', + obs = NULL, sdates = paste0(yr1.hind:yr2.hind,substr(sdates.seq[startdate],5,6),substr(sdates.seq[startdate],7,8)), + nleadtime = n.leadtimes, leadtimemin = 1, leadtimemax = n.leadtimes, output = 'lonlat', configfile = file_path, method='distance-weighted' ) + + # dim(data.hindcast$mod) # [1,4,20,4,320,640] + + # Mean of the 4 members of the Hindcasts and of the 20 years (for each point and leadtime); + clim.hindcast<-apply(data.hindcast$mod,c(1,4,5,6),mean)[1,,,] # average for each leadtime and pixel + clim.hindcast<-InsertDim(InsertDim(InsertDim(clim.hindcast,1,n.yrs.hind),1,n.members),1,1) # repeat the same values and times to calc. anomalies + + anom.hindcast <- data.hindcast$mod - clim.hindcast + + rm(data.hindcast,clim.hindcast) + gc() + + # average the anomalies over the 4 hindcast members [1,20,4,320,640] + anom.hindcast.mean<-apply(anom.hindcast,c(1,3,4,5,6),mean) + + my.grid<-paste0('r',n.lon,'x',n.lat) + data.rean <- Load(var = var.name, exp = 'ERAintEnsWeek', + obs = NULL, sdates=paste0(yr1.hind:yr2.hind,substr(sdates.seq[startdate],5,6),substr(sdates.seq[startdate],7,8)), + nleadtime = n.leadtimes, leadtimemin = 1, leadtimemax = n.leadtimes, output = 'lonlat', configfile = file_path, grid=my.grid, method='distance-weighted') + + # dim(data.rean$mod) # [1,1,20,4,320,640] + + # Mean of the 20 years (for each point and leadtime) + clim.rean<-apply(data.rean$mod,c(1,2,4,5,6),mean)[1,1,,,] # average for each leadtime and pixel + clim.rean<-InsertDim(InsertDim(InsertDim(clim.rean,1,n.yrs.hind),1,1),1,1) # repeat the same values times to be able to calculate the anomalies #[1,1,20,4,320,640] + + anom.rean <- data.rean$mod - clim.rean + + anom.rean<-InsertDim(anom.rean[1,1,,,,],1,1) # [1,20,4,320,640] + + #save(anom.hindcast.mean,anom.rean,file=paste0(workdir,'/Data/anom_for_ACC_startdate',startdate,'.RData')) + + save(anom.hindcast,file=paste0(workdir,'/Data/anom_hindcast_startdate',startdate,'.RData')) + save(anom.hindcast.mean,file=paste0(workdir,'/Data/anom_hindcast_mean_startdate',startdate,'.RData')) + save(anom.rean,file=paste0(workdir,'/Data/anom_rean_startdate',startdate,'.RData')) + save(clim.rean,file=paste0(workdir,'/Data/clim_rean_startdate',startdate,'.RData')) + + rm(data.rean,anom.hindcast,anom.hindcast.mean,clim.rean,anom.rean) + gc() + +} + +############################################################################################### +# Preformatting: calculate weekly climatologies and anomalies for each forecast startdate # +############################################################################################### +# +# finish!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! +# +# the only difference is that anomalies are computed using climatologies from the hincast data, +# and not from the forecast data. + +# Run it only once at the beginning: + +file_path='/home/Earth/ngonzal2/R/ConfFiles/IC3.conf' # Load() file path +#file_path <- '/home/Earth/ncortesi/Downloads/RESILIENCE/IC3.conf' + +my.startdates=1:52 # choose a sequence of forecast startdates to save their anomalies + +for(startdate in my.startdates){ # the startdate week to load inside the sdates weekly sequence: + print(paste0('Computing anomalies for startdate ', which(startdate==my.startdates),'/',length(my.startdates))) + + data.hindcast <- Load(var=var.name, exp = 'EnsEcmwfWeekHind', + obs = NULL, sdates=paste0(yr1.hind:yr2.hind,substr(sdates.seq[startdate],5,6),substr(sdates.seq[startdate],7,8)), + nleadtime = n.leadtimes, leadtimemin = 1, leadtimemax = n.leadtimes, output = 'lonlat', configfile = file_path, method='distance-weighted' ) + + # dim(data.hindcast$mod) # [1,4,20,4,320,640] + + # Mean of the 4 members of the Hindcasts and of the 20 years (for each point and leadtime); + clim.hindcast<-apply(data.hindcast$mod,c(1,4,5,6),mean)[1,,,] # average for each leadtime and pixel + clim.hindcast<-InsertDim(InsertDim(InsertDim(clim.hindcast,1,n.yrs.hind),1,n.members),1,1) # repeat the same values and times to calc. anomalies + + anom.hindcast <- data.hindcast$mod - clim.hindcast + + rm(data.hindcast,clim.hindcast) + gc() + + # average the anomalies over the 4 hindcast members [1,20,4,320,640] + anom.hindcast.mean<-apply(anom.hindcast,c(1,3,4,5,6),mean) + + my.grid<-paste0('r',n.lon,'x',n.lat) + data.rean <- Load(var=var.name, exp = 'ERAintEnsWeek', + obs = NULL, sdates=paste0(yr1.hind:yr2.hind,substr(sdates.seq[startdate],5,6),substr(sdates.seq[startdate],7,8)), + nleadtime = n.leadtimes, leadtimemin = 1, leadtimemax = n.leadtimes, output = 'lonlat', configfile = file_path, grid=my.grid, method='distance-weighted') + # dim(data.rean$mod) # [1,1,20,4,320,640] + + # Mean of the 20 years (for each point and leadtime) + clim.rean<-apply(data.rean$mod,c(1,2,4,5,6),mean)[1,1,,,] # average for each leadtime and pixel + clim.rean<-InsertDim(InsertDim(InsertDim(clim.rean,1,n.yrs.hind),1,1),1,1) # repeat the same values times to be able to calculate the anomalies #[1,1,20,4,320,640] + + anom.rean <- data.rean$mod - clim.rean + + anom.rean<-InsertDim(anom.rean[1,1,,,,],1,1) # [1,20,4,320,640] + + #save(anom.hindcast.mean,anom.rean,file=paste0(workdir,'/Data/anom_for_ACC_startdate',startdate,'.RData')) + + save(anom.hindcast,file=paste0(workdir,'/Data/anom_hindcast_startdate',startdate,'.RData')) + save(anom.hindcast.mean,file=paste0(workdir,'/Data/anom_hindcast_mean_startdate',startdate,'.RData')) + save(anom.rean,file=paste0(workdir,'/Data/anom_rean_startdate',startdate,'.RData')) + save(clim.rean,file=paste0(workdir,'/Data/clim_rean_startdate',startdate,'.RData')) + + rm(data.rean,anom.hindcast,anom.hindcast.mean,clim.rean,anom.rean) + gc() + +} + + + + + + + +################################################################################## +# Toy Model # +################################################################################## + +library(s2dverification) +library(SpecsVerification) +library(easyVerification) + +my.prob=c(1/3,2/3) # quantiles used for FairRPSS and the RelDiagr (usually they are the terciles) + +N=80 # number of years +M=14 # number of members + +for(M in c(2:10,20,51,100,200)){ + +for(N in c(5,10,15,20,25,30,40,50,100,200,500,1000,2000,5000,10000,20000,50000,100000)){ + +anom.rean<-rnorm(N) +anom.hind<-array(rnorm(N*M),c(N,M)) +#quantile(anom.hind,prob=my.prob,type=8) + +obs<-convert2prob(anom.rean,prob<-my.prob) +#mean(obs[,1]) # check if it is equal to 0.333 for K=3 + +ens<-convert2prob(anom.hind,prob<-my.prob) + +# climatological forecast: +anom.hind.clim<-array(sample(anom.hind,N*M),c(N,M)) # extract a random sample with no replacement of the hindcast +ens.clim<-convert2prob(anom.hind.clim,prob<-my.prob) +#mean(ens.clim[,1]) # check if it is equal to M/3 (1.333 for M=4) + +# alternative definition of climatological forecast, based on observations instead (as applied by veriApply via convert2prob when option fcst.ref is missing): +#anom.rean.clim<-ClimEns(anom.rean) # create a climatological ensemble from a vector of observations (anom.rean) with the leave-one-out cross validation +anom.rean.clim<-t(array(anom.rean,c(N,N))) # function used by convert2prob when ref is not specified; it is ~ equal to the above function ClimEns, it only has a column more +ens.clim.rean<-convert2prob(anom.rean.clim,prob<-c(1/3,2/3)) # create a new prob.table based on the climatological ensemble of observations + +p<-count2prob(ens) # we should add the option type=4, in not the sum for each year is not 100%!!! +y<-count2prob(obs) # we should add the option type=4, in not the sum for each year is not 100%!!! +ReliabilityDiagram(p[,1], y[,1], bins=10, nboot=0, plot=TRUE, cons.prob=c(0.05, 0.85),plot.refin=F) + +### computation of verification scores: + +Rps<-round(mean(EnsRps(ens,obs)),4) +FairRps<-round(mean(FairRps(ens,obs)),4) +#Rps.easy<-round(mean(veriApply("EnsRps", fcst=anom.hind, obs=anom.rean, prob=my.prob, tdim=1, ensdim=2)),4) # check once to see if it is the same value of Rps +#FairRps.easy<-round(mean(veriApply("FairRps", fcst=anom.hind, obs=anom.rean, prob=my.prob, tdim=1, ensdim=2)),4) # check once to see if it is the same value of FairRps + +Rps.clim<-round(mean(EnsRps(ens.clim,obs)),4) +FairRps.clim<-round(mean(FairRps(ens.clim,obs)),4) +FairRps.clim.rean<-round(mean(FairRps(ens.clim.rean,obs)),4) + +#FairRps.clim.stable<-0.44444444 +#Rps.clim.easy<-round(mean(veriApply("EnsRps", fcst=anom.hind.clim, obs=anom.rean, prob=my.prob, tdim=1, ensdim=2)),4) # check once to see if it is the same value of Rps.clim +#FairRps.clim.easy<-round(mean(veriApply("FairRps", fcst=anom.hind.clim, obs=anom.rean, prob=my.prob, tdim=1, ensdim=2)),4) # check once to see if it is = to FairRps.clim + +Rpss<-round(1-(Rps/Rps.clim),4) +#Rpss.specs<-round(EnsRpss(ens=ens, ens.ref=ens.clim, obs=obs)$rpss,4) # check it once to see if it is the same as Rpss (yes) +Rpss.easy<-veriApply("EnsRpss", fcst=anom.hind, fcst.ref=anom.hind.clim, obs=anom.rean, prob=my.prob, tdim=1, ensdim=2)$rpss # we provide the clim.forecast; = to Rpss.specs +system.time(Rpss.easy.noclim<-round(veriApply("EnsRpss", fcst=anom.hind, obs=anom.rean, prob=my.prob, tdim=1, ensdim=2, parallel=FALSE, ncpus=5)$rpss,4)) # using their climatological forecast +#cat(Rps.clim.noclim<-Rps/(1-Rpss.easy.noclim)) # sale igual a ~0.45 per ogni hindcast; é diverso sia da Rps.clim che da Rps.clim per N-> +oo (0.555) + +Rpss.easy<-veriApply("EnsRpss", fcst=anom.hind, fcst.ref=anom.rean.clim, obs=anom.rean, prob=my.prob, tdim=1, ensdim=2)$rpss # we provide the clim.forecast; = to Rpss.specs +Rpss.easy.noclim<-veriApply("EnsRpss", fcst=anom.hind, obs=anom.rean, prob=my.prob, tdim=1, ensdim=2)$rpss # we provide the clim.forecast; = to Rpss.specs + +FairRpss<-round(1-(FairRps/FairRps.clim),4) +#FairRpss.specs<-round(FairRpss(ens=ens,ens.ref=ens.clim,obs=obs)$rpss,4) # check it once to see if it is the same as FairRpss (yes) +#FairRpss.easy<-veriApply("FairRpss", fcst=anom.hind, fcst.ref=anom.hind.clim, obs=anom.rean, prob=c(1/3,2/3), tdim=1, ensdim=2)$rps +#FairRpss.easy.noclim<-round(veriApply("FairRpss", fcst=anom.hind, obs=anom.rean, prob=c(1/3,2/3), tdim=1, ensdim=2)$rpss,4) # using their climatological forecast +#cat(FairRps.clim.noclim<-Rps/(1-FairRpss.easy.noclim)) # sale igual a ~0.55; é diverso sia da FairRps.clim che da FairRps.clim per N-> +oo (0.444) + +cat(paste0('n.memb: ' ,M,' n.yrs: ',N,' : ',FairRps,' : ' ,FairRps.clim,' FairRpss: ',FairRpss,'\n')) + +#cat(paste0('n.memb: ' ,M,' n.yrs: ',N,' : ',FairRps,' : ' ,FairRps.clim,' FairRpss: ',FairRpss,'\n')) + +#cat(paste0('n.memb: ' ,M,' n.yrs: ',N,' : ',Rps,' : ' ,Rps.clim,' Rpss: ',Rpss,'\n','n.memb: ' ,M,' n.yrs: ',N,' : ',FairRps,' : ' ,FairRps.clim,' FairRpss: ',FairRpss,'\n')) + +#cat(paste0('n.memb: ' ,M,' n.yrs: ',N,' : ',Rps,' : ' ,Rps.clim,' Rpss: ',Rpss,' Rpss.easy: ',Rpss.easy.noclim,'\n','n.memb: ' ,M,' n.yrs: ',N,' #: ',FairRps,' : ' ,FairRps.clim,' FairRpss: ',FairRpss,' FairRpss.easy: ',FairRpss.easy.noclim)) + +} + +# Crpss: + +#Crps<-round(mean(EnsCrps(ens,obs)),4) +#FairCrps<-round(mean(FairCrps(ens,obs)),4) + +#cat(Crps.clim<-round(mean(EnsCrps(ens.clim,obs)),4)) +#FairCrps.clim<-round(mean(FairCrps(ens.clim,obs)),4) + +#Crpss<-round(1-(Crps/Crps.clim),4) +#Crpss.specs<-round(EnsCrpss(ens=ens, ens.ref=ens.clim, obs=obs)$rpss,4) + +#FairCrpss<-round(1-(FairCrps/FairCrps.clim),4) +#FairCrpss.specs<-round(FairCrpss(ens=ens,ens.ref=ens.clim,obs=obs)$rpss,4) + +} + + +################################################################################## +# Other analysis # +################################################################################## + +my.startdates=1 #c(1:9) # select the startdates (weeks) you want to merge together + +#tm<-toyarray(dims=c(32,64),N=20,nens=4) # in the number of startdates in case of forecasts or the number of years in case of hindcasts +#str(tm) # tm$fcst: [32,64,20,4] tm$obs: [32,64,20] +# +#f.corr <- veriApply("EnsCorr", fcst=tm$fcst, obs=tm$obs) +#f.me <- veriApply('EnsMe', tm$fcst, tm$obs) +#f.crps <- veriApply("FairCrps", fcst=tm$fcst, obs=tm$obs) +#str(f.crps) # [32,64,20] + +#cst <- array(rnorm(prod(4:8)), 4:8) +#obs <- array(rnorm(prod(4:7)), 4:7) +#f.me <- veriApply('EnsMe', fcst=fcst, obs=obs) +#dim(f.me) +#fcst2 <- array(aperm(fcst, c(5,1,4,2,3)), c(8, 4, 7, 5*6)) # very interesting use of aperm not only to swap dimensions, but also to merge two dimensions in the same one! +#obs2 <- array(aperm(obs, c(1,4,2,3)), c(4,7,5*6)) +#f.me2 <- veriApply('EnsMe', fcst=fcst2, obs=obs2, tdim=3, ensdim=1) +#dim(f.me2) + +# when you have for each leadtime all the 52 weeks of year 2014 stored in a map, +# you can plot the time serie for a particular point and leadtime: +pos.lat<-which(round(la,3)==round(my.lat,3)) +pos.lon<-which(round(lo,3)==round(my.lon,3)) + +x11() +plot(1:week,EnMeanCorr[,leadtime],type="b", + main=paste0("Weekly time serie of point with lat=",round(my.lat,2),", lon=",round(my.lon,2)), + xlab="week",ylab="Ensemble Mean Corr.",ylim=c(-1,1)) + + + +### check if 4 time steps are better than one: + +yrs.hind<-yr2.hind-yr1.hind+1 +my.point.obs<-array(NA,c(yrs.hind,28)) + +# load obs.data for the four time steps: +for(year in yr1.hind:yr2.hind){ + data_erai_6h <- Load(var=var.name, exp = 'ERAint6h', + obs = NULL, sdates=paste0(year,'01'), leadtimemin = 61, + leadtimemax = 88, output = 'lonlat', configfile = file_path, grid=my.grid,method='distance-weighted') + + my.point.obs[year-yr1.hind+1,]<-data_erai_6h$mod[1,1,1,,65,633] +} + +utm0<-c(1,5,9,13,17,21,25) +utm6<-utm0+1 +utm12<-utm0+2 +utm18<-utm0+3 +my.points.obs.weekly.mean.utm0<-rowMeans(my.point.obs[,utm0]) +my.points.obs.weekly.mean.utm6<-rowMeans(my.point.obs[,utm6]) +my.points.obs.weekly.mean.utm12<-rowMeans(my.point.obs[,utm12]) +my.points.obs.weekly.mean.utm18<-rowMeans(my.point.obs[,utm18]) +my.points.obs.weekly.mean.all<-rowMeans(my.point.obs) + +cor(my.points.exp.weekly.mean.all,my.points.obs.weekly.mean.all,use="complete.obs") + +my.points.obs.weekly.mean.utm<-c(my.points.obs.weekly.mean.utm0,my.points.obs.weekly.mean.utm6,my.points.obs.weekly.mean.utm12,my.points.obs.weekly.mean.utm18) +my.points.exp.weekly.mean.utm<-c(my.points.exp.weekly.mean.utm0,my.points.exp.weekly.mean.utm6,my.points.exp.weekly.mean.utm12,my.points.exp.weekly.mean.utm18) +cor(my.points.exp.weekly.mean.utm,my.points.obs.weekly.mean.utm,use="complete.obs") + + +##### Calculate Reanalysis climatology loading only 1 file each 4 (it's quick but it needs to have all files beforehand): + +ss<-c(seq(1,52,by=4),50,51,52) # take only 1 startdate each 4, more the last 3 startdates that must be computed individually because they are not a complete set of 4 + +for(startdate in ss){ # the startdate day of 2014 to load in the sdates weekly sequence along with the same startdates of the previous 20 years + data.ERAI <- Load(var=var.name, exp = 'ERAintEnsWeek', + obs = NULL, sdates=paste0(yr1.hind:yr2.hind,substr(sdates.seq[startdate],5,6),substr(sdates.seq[startdate],7,8)), + nleadtime = 4, leadtimemin = 1, leadtimemax = 4, output = 'lonlat', configfile = file_path, grid=my.grid, method='distance-weighted') + + clim.ERAI[startdate,,,]<-apply(data.ERAI$mod,c(1,2,4,5,6),mean)[1,1,,,] # average for each leadtime and pixel + + # the intermediate startdate between startdate week i and startdate week i+4 are repeated: + if(startdate <= 49){ + clim.ERAI[startdate+1,1,,]<- clim.ERAI[startdate,2,,] # startdate leadtime (week) + clim.ERAI[startdate+1,2,,]<- clim.ERAI[startdate,3,,] # (week) 1 2 3 4 + clim.ERAI[startdate+2,1,,]<- clim.ERAI[startdate,3,,] # 1 a b c d <- only startdate 1 and 5 are necessary to calculate all startdates between 1 and 5 + clim.ERAI[startdate+1,3,,]<- clim.ERAI[startdate,4,,] # 2 b c d e + clim.ERAI[startdate+2,2,,]<- clim.ERAI[startdate,4,,] # 3 c d e f + clim.ERAI[startdate+3,1,,]<- clim.ERAI[startdate,4,,] # 4 d e f g + } # 5 e f g h + + if(startdate > 1 && startdate <=49){ # fill the previous startdates: + clim.ERAI[startdate-1,4,,]<- clim.ERAI[startdate,3,,] + clim.ERAI[startdate-1,3,,]<- clim.ERAI[startdate,2,,] + clim.ERAI[startdate-2,4,,]<- clim.ERAI[startdate,2,,] + clim.ERAI[startdate-1,2,,]<- clim.ERAI[startdate,1,,] + clim.ERAI[startdate-2,3,,]<- clim.ERAI[startdate,1,,] + clim.ERAI[startdate-3,4,,]<- clim.ERAI[startdate,1,,] + } + +} + + + + + # You can make cor much faster by stripping away all the error checking code and calling the internal c function directly: + # r <- apply(m1, 1, function(x) { apply(m2, 1, function(y) { cor(x,y) })}) + # r2 <- apply(m1, 1, function(x) { apply(m2, 1, function(y) {.Internal(cor(x, y, 4L, FALSE)) })}) + C_cor<-get("C_cor", asNamespace("stats")) + test<-.Call(C_cor, x=rnorm(100,1), y=rnorm(100,1), na.method=2, FALSE) + my.EnsCorr.chunk[i,j,k]<-.Call(C_cor, x=anom.hindcast.mean.chunk[,i,j,k], y=nom.rean.chunk[,i,j,k], na.method=2, FALSE) + + + +####################################################################################### +# Raster Animations # +####################################################################################### + +library(png) +library(animation) + + +clustPNG <- function(pngobj, nclust = 3){ + + j <- pngobj + # If there is an alpha component remove it... + # might be better to just adjust the mat matrix + if(dim(j)[3] > 3){ + j <- j[,,1:3] + } + k <- apply(j, c(1,2), c) + mat <- matrix(unlist(k), ncol = 3, byrow = TRUE) + grd <- expand.grid(1:dim(j)[1], 1:dim(j)[2]) + clust <- kmeans(mat, nclust, iter.max = 20) + new <- clust$centers[clust$cluster,] + + for(i in 1:3){ + tmp <- matrix(new[,i], nrow = dim(j)[1]) + j[,,i] <- tmp + } + + plot.new() + rasterImage(j, 0, 0, 1, 1, interpolate = FALSE) +} + +makeAni <- function(file, n = 10){ + j <- readPNG(file) + for(i in 1:n){ + clustPNG(j, i) + mtext(paste("Number of clusters:", i)) + } + + j <- readPNG(file) + plot.new() + rasterImage(j, 0, 0, 1, 1, interpolate = FALSE) + mtext("True Picture") +} + +file <- "~/Dropbox/Photos/ClassyDogCropPng.png" +n <- 20 +saveGIF(makeAni(file, n), movie.name = "tmp.gif", interval = c(rep(1,n), 5)) + + + + +####################################################################################### +# ProgressBar # +####################################################################################### + +for(i in 1:10) { + Sys.sleep(0.2) + # Dirk says using cat() like this is naughty ;-) + #cat(i,"\r") + # So you can use message() like this, thanks to Sharpie's + # comment to use appendLF=FALSE. + message(i,"\r",appendLF=FALSE) + flush.console() +} + + +for(i in 1:10) { + Sys.sleep(0.2) + # Dirk says using cat() like this is naughty ;-) + cat(i,"\r") + + +} + + + +####################################################################################### +# Load large datasets with ff # +####################################################################################### + +library(ff) +my.scratch<-"/scratch/Earth/ncortesi/myBigData" +anom.hind<-as.ff(anom.hind, vmode="double", file=my.scratch) +ffsave(anom.hind,file=my.scratch) +close(anom.hind);rm(anom.hind) + +ffload(file=my.scratch, list=c(anom.hind)) +open(anom.hind) +my.SkillScore <- veriApplyBig("FairRpss", fcst=anom.hind, obs=anom.rean, tdim=2, ensdim=1, prob=c(1/3,1/3)) +close(anom.hind);rm(anom.hind) + + + +BigDataPath <- "/scratch/Earth/ncortesi/myBigData" +source('/home/Earth/ncortesi/Downloads/RESILIENCE/veriApplyBig.R') + +anom.hind.dim<-c(5,3,1,256,512) +anom.hind<-array(rnorm(prod(anom.hind.dim)),anom.hind.dim) # doesn't fit in memory +save.big(array=anom.hind, path=BigDataPath) + +veriApplyBig <- function(, obsmy.scratch){ + ffload(file=my.scratch) + open(anom.hind) + my.SkillScore <- veriApplyBig("FairRpss", fcst=anom.hind, obs=anom.rean, tdim=2, ensdim=1, prob=c(1/3,1/3)) + close(anom.hind) +} + + +anom.hind<-as.ff(anom.hind, vmode="double", file=my.scratch) +ffsave(anom.hind, file=my.scratch) + +my.scratch<-"/scratch/Earth/ncortesi/myBigData" + +anom.hind<-as.ff(anom.hind, vmode="double", file=my.scratch) +ffsave(anom.hind, file=my.scratch) +close(anom.hind); rm(anom.hind) + +ffload(file=my.scratch, list=c(anom.hind)) +open(anom.hind) +my.SkillScore <- veriApplyBig("FairRpss", fcst=anom.hind, obs=anom.rean, tdim=2, ensdim=1, prob=c(1/3,1/3)) +close(anom.hind); rm(anom.hind) + +anom.hind.dim<-c(51,30,1,256,51) +anom.hind<-ff(rnorm(prod(anom.hind.dim)), dim=anom.hind.dim, vmode="double", filename="/scratch/Earth/ncortesi/anomhind") +str(anom.hind) +dim(anom.hind) + +anom.hind.dim<-c(51,30,1,256,51) +anom.hind<-array(rnorm(prod(anom.hind.dim)),anom.hind.dim) # doesn't fit in memory +anom.hind<-as.ff(anom.hind, vmode="double", file="/scratch/Earth/ncortesi/myBigData") +str(anom.hind) +dim(anom.hind) + +ffsave(anom.hind,file="/scratch/Earth/ncortesi/anomhind") +#ffinfo(file="/scratch/Earth/ncortesi/anomhind") +close(anom.hind) +#delete(anom.hind) +#rm(anom.hind) + +ffload(file="/scratch/Earth/ncortesi/anomhind") + +anom.rean<-array(rnorm(prod(anom.hind.dim[-1])), anom.hind.dim[-1]) # doesn't fit in memory + +open(anom.hind) +my.SkillScore <- veriApplyBig("FairRpss", fcst=anom.hind, obs=anom.rean, tdim=2, ensdim=1, prob=c(1/3,1/3)) +fcst<-anom.hind +obs<-anom.rean + +close(anom.hind) +rm(anom.hind) + +#ffdrop(file="/scratch/Earth/ncortesi/anomhind") + +a<-ffapply(X=anom.hind.big, MARGIN=c(1,3,4,5), AFUN=mean, RETURN=TRUE) +a<-ffapply(X=anom.hind.big, MARGIN=c(1,3,4,5), AFUN=function(x) sample(x, replace=TRUE), CFUN="list", RETURN=TRUE) + + +pred_Cal_cross_ff <- as.ff(pred_Cal_cross, vmode='double', file=fileoutput_cross) +ffsave(pred_Cal_cross_ff, file=paste0(fileoutput_cross)) +delete(pred_Cal_cross_ff) + +assign("pred_Cal_cross_ff", as.ff(pred_Cal_cross, vmode='double', file=fileoutput_cross)) +ffsave(pred_Cal_cross_ff, file=paste0(fileoutput_cross)) +delete(pred_Cal_cross_ff) +rm(pred_Cal_cross_ff) + +assign("pred_Cal_cross_ff", as.ff(pred_Cal_cross, vmode='double', file=fileoutput_cross)) +a<-get("pred_Cal_cross_ff") # get() works well in this case but not in the row below! (you must use do.call, see WT_drivers_v2.R) +ffsave(get("pred_Cal_cross_ff"), file=paste0(fileoutput_cross)) +delete(pred_Cal_cross_ff) +rm(pred_Cal_cross_ff) + + + +######################################################################################### +# Calculate and save the FairRpss and/or the FairCrpss for a chosen period using ff # +######################################################################################### + +bigdata=FALSE # if TRUE, compute the Rpss and the Crpss using the package ff (storing arrays in the disk instead than in the RAM) to remove the memory limit + +for(month in veri.month){ + month=1 # for debug + my.startdates<-startdates.monthly[[month]] #c(1:5) # select the startdates (weeks) you want to compute + startdate.name=my.month[month] # name given to the period of the selected startdates, i.e:"January" + n.yrs <- length(my.startdates)*n.yrs.hind # number of total years for the selected month + print(paste0("Computing Skill Scores for ",startdate.name)) + + if(!boot) anom.rean.big<-array(NA, dim=c(n.yrs, n.leadtimes, n.lat, n.lon)) # reanalysis data is not converted to ff because it is a small array + + if(boot) mod<-1 else mod=0 # in case of the boostrap, anom.hind.big includes also the reanalysis data as additional last member + + if(bigdata) anom.hind.big <- ff(NA, dim=c(n.members + mod, n.yrs, n.leadtimes, n.lat, n.lon), vmode="double", filename=paste0(workdir,"/anomhindbig.ff")) + if(!bigdata) anom.hind.big <- array(NA, dim=c(n.members + mod, n.yrs, n.leadtimes, n.lat, n.lon)) + + for(startdate in my.startdates){ + + pos.startdate<-which(startdate==my.startdates) # count of the number of stardates already loaded +1 + my.time.interv<-(1+(pos.startdate-1)*n.yrs.hind):(pos.startdate*n.yrs.hind) # time interval where to load data + + load(paste0(workdir,'/Data_',var.name,'/anom_rean_startdate',startdate,'.RData')) + if(!boot) { + anom.rean<-drop(anom.rean) + anom.rean.big[my.time.interv,,,] <- anom.rean + } + + if(any(my.score.name=="FairRpss") || any(my.score.name=="FairCrpss")){ + load(paste0(workdir,'/Data_',var.name,'/anom_hindcast_startdate',startdate,'.RData')) # load hindcast data + anom.hindcast<-drop(anom.hindcast) + + if(!boot) anom.hind.big[,my.time.interv,,,] <- anom.hindcast + if(boot) anom.hind.big[,my.time.interv,,,] <- abind(anom.hindcast, anom.rean, along=1) + + rm(anom.hindcast, anom.rean) + } + gc() + } + + if(any(my.score.name=="FairRpss" || my.score.name=="FairCrpss")){ + if(!boot) { + if(any(my.score.name=="FairRpss")) my.FairRpss <- veriApplyBig("FairRpss", fcst=anom.hind.big, obs=anom.rean.big, + prob=my.prob, tdim=2, ensdim=1, parallel=TRUE, ncpus=n.cpus) + if(any(my.score.name=="FairCrpss")) my.FairCrpss <- veriApplyBig("FairCrpss", fcst=anom.hind.big, obs=anom.rean.big, tdim=2, ensdim=1, parallel=TRUE, ncpus=n.cpus) + + } else { + # bootstrapping: + my.FairRpssBoot<-my.FairCrpssBoot<-array(NA, c(n.boot, n.leadtimes, n.lat, n.lon)) + if(!bigdata) save(anom.hind.big, file=paste0(workdir,"/anom_hind_big.RData")) # save the anomalies to free memory + + for(b in 1:n.boot){ + print(paste0("resampling n. ",b,"/",n.boot)) + yrs.sampled <- sample(1:n.yrs, replace=TRUE) + + if(bigdata) anom.hind.big.sampled <- ff(NA, dim=c(n.members+1, n.yrs, n.leadtimes, n.lat, n.lon), vmode="double", filename=paste0(workdir,"/anomhindbigsampled")) + if(!bigdata) anom.hind.big.sampled <- array(NA, dim=c(n.members+1, n.yrs, n.leadtimes, n.lat, n.lon)) + + if(!bigdata && b>1) load(paste0(workdir,"/anom_hind_big.RData")) # need to load the hindcasts if b>1 + + for(y in 1:n.yrs) anom.hind.big.sampled[,y,,,] <- anom.hind.big[,yrs.sampled[y],,,] # with ff takes 53s x 100 ~1h!!! (without, only 4s x 100 ~6m) + + if(!bigdata) rm(anom.hind.big); gc() # free memory for the following commands + + # faster way that will be able to replace the above one when ffapply output parameters wll be improved: + #a<-ffapply(X=anom.hind.big, MARGIN=c(3,4,5), AFUN=function(x) my.sample(x,n.members+1, replace=TRUE), CFUN="list", RETURN=TRUE, FF_RETURN=TRUE) # takes 3m only + #aa<-abind(a[1:10], along=4) # it doesn't fit into memory! + + if(any(my.score.name=="FairRpss")) my.FairRpssBoot[b,,,] <- veriApplyBig("FairRpss", fcst=anom.hind.big.sampled[1:n.members,,,,], + obs=anom.hind.big.sampled[n.members+1,,,,], + prob=my.prob, tdim=2, ensdim=1, parallel=TRUE, ncpus=n.cpus) #$rpss + + if(any(my.score.name=="FairCrpss")) my.FairCrpssBoot[b,,,] <- veriApplyBig("FairCrpss", fcst=anom.hind.big.sampled[1:n.members,,,,], + obs=anom.hind.big.sampled[n.members+1,,,,], + tdim=2, ensdim=1, parallel=TRUE, ncpus=n.cpus) #$crpss + + if(bigdata) delete(anom.hind.big.sampled) + rm(anom.hind.big.sampled) # delete the sampled hindcast + + } + + # calculate 5 and 95 percentiles of skill scores: + if(any(my.score.name=="FairRpss")) my.FairRpssConf <- apply(my.FairRpssBoot, c(2,3,4), function(x) quantile(x, probs=c(0.05,0.95), type=8)) + if(any(my.score.name=="FairCrpss")) my.FairCrpssConf <- quantile(my.FairCrpssBoot, probs=c(0.05,0.95), type=8) + + } # close if on boot + + } + + if(bigdata) delete(anom.hind.big) # delete the file with the hindcasts + rm(anom.hind.big) + rm(anom.rean.big) + gc() + + if(!boot){ + if(any(my.score.name=="FairRpss")) save(my.FairRpss, file=paste0(workdir,'/Data_',var.name,'/FairRpss_',startdate.name,'.RData')) + if(any(my.score.name=="FairCrpss")) save(my.FairCrpss, file=paste0(workdir,'/Data_',var.name,'/FairCrpss_',startdate.name,'.RData')) + } else { + if(any(my.score.name=="FairRpss")) save(my.FairRpssBoot, my.FairRpssConf, file=paste0(workdir,'/Data_',var.name,'/FairRpssBoot_',startdate.name,'.RData')) + if(any(my.score.name=="FairCrpss")) save(my.FairCrpssBoot, my.FairCrpssConf, file=paste0(workdir,'/Data_',var.name,'/FairCrpssBoot_',startdate.name,'.RData')) + } + + + if(is.object(my.FairRpss)) rm(my.FairRpss); if(is.object(my.FairCrpss)) rm(my.FairCrpss) + if(is.object(my.FairRpssBoot)) rm(my.FairRpssBoot); if(is.object(my.FairCrpssBoot)) rm(my.FairCrpssBoot) + if(is.object(my.FairRpssConf)) rm(my.FairRpssConf); if(is.object(my.FairCrpssConf)) rm(my.FairCrpssConf) + +} # next m (month) + + +######################################################################################### +# Calculate and save the FairRpss for a chosen period using ff and Load() # +######################################################################################### + + anom.rean.big<-array(NA, dim=c(length(my.startdates)*n.yrs.hind, n.leadtimes, n.lat, n.lon)) + anom.hind.big <- ff(NA, dim=c(n.members,length(my.startdates)*n.yrs.hind, n.leadtimes, n.lat, n.lon), vmode="double", filename="/scratch/Earth/anomhindbig") + + for(startdate in my.startdates){ + pos.startdate<-which(startdate==my.startdates) # count of the number of stardates already loaded+1 + my.time.interv<-(1+(pos.startdate-1)*n.yrs.hind):(pos.startdate*n.yrs.hind) # time interval where to load data + + #load(paste0(workdir,'/Data_',var.name,'/anom_rean_startdate',startdate,'.RData')) + #anom.rean<-drop(anom.rean) + #anom.rean.big[my.time.interv,,,] <- anom.rean + + my.date <- paste0(yr1.hind:yr2.hind,substr(sdates.seq[startdate],5,6),substr(sdates.seq[startdate],7,8)) + anom.rean <- Load(var=var.name, exp = 'EnsEcmwfWeekHind', + obs = NULL, sdates=my.date, nleadtime = n.leadtimes, leadtimemin = 1, leadtimemax = n.leadtimes, + output = 'lonlat', configfile = file_path, method='distance-weighted' ) + + #load(paste0(workdir,'/Data_',var.name,'/anom_hindcast_startdate',startdate,'.RData')) # load hindcast data + #anom.hindcast<-drop(anom.hindcast) + + anom.hind.big[,my.time.interv,,,] <- anom.hindcast + rm(anom.hindcast) + } + + my.FairRpss <- veriApplyBig("FairRpss", fcst=anom.hind.big, obs=anom.rean.big, prob=my.prob, tdim=2, ensdim=1, parallel=TRUE, ncpus=n.cpus) + + save(my.FairRpss, file=paste0(workdir,'/Data_',var.name,'/FairRpss_',startdate.name,'.RData')) + + +######################################################################################### +# Alternative way to speed up Load() # +######################################################################################### + +ERAint <- list(path = '/esnas/reconstructions/ecmwf/eraint/$STORE_FREQ$_mean/$VAR_NAME$_f6h/$VAR_NAME$_$YEAR$$MONTH$.nc') + var.rean=ERAint # daily reanalysis dataset used for the var data; it can have a different resolution of the MSLP dataset + var.name='sfcWind' #'tas' #'prlr' # any daily variable we want to find if it is WTs-driven + var <- Load(var = var.name, exp = NULL, obs = list(var.rean), paste0(y,'0101'), storefreq = 'daily', leadtimemax = 1, output = 'lonlat') + + # Forecast system list: + S4 <- list(path = ...) + CFSv2 <- list(path = ...) + + # Reanalysis list: + ERAint <- list(path = '/esnas/reconstructions/ecmwf/eraint/$STORE_FREQ$_mean/$VAR_NAME$_f6h/$VAR_NAME$_$YEAR$$MONTH$.nc') + JRA55 <- list(path = ...) + + + + + # Select one forecast system and one reanalysis (put NULL if you don't need to load any experiment/observation): + exp.source <- NULL + obs.source <- list(path = '/esnas/reconstructions/ecmwf/eraint/$STORE_FREQ$_mean/$VAR_NAME$_f6h/$VAR_NAME$_$YEAR$$MONTH$.nc') + + # Select one variable and its frequency: + var.name <- 'sfcWind' + store.freq <- 'daily' + + # Extract only the directory path of the forecast and reanalysis: + obs.source2 <- gsub("\\$STORE_FREQ\\$", store.freq, obs.source$path) + obs.source3 <- gsub("\\$VAR_NAME\\$", var.name, obs.source2) + obs.source4 <- strsplit(obs.source3,'/') + obs.source5 <- paste0(obs.source4[[1]][-length(obs.source4[[1]])], collapse="/") + + obs.source6 <- list.files(path = obs.source5) + + system(paste0("ncks -O -h -d longitude,5,6 ", '/esnas/reconstructions/ecmwf/eraint/',STORE_FREQ,'_mean/',VAR_NAME,'_f6h')) + system("ncks -O -h -d longitude,5,6 sfcWind_201405.nc ~/test.nc") + + y <- 1990 + var <- Load(var = VAR_NAME, exp = list(exp.source), obs = list(obs.source), sdates = paste0(y,'0101'), storefreq = store.freq, leadtimemax = 1, output = 'lonlat') + + +} # close if on multicore + + diff --git a/old/SkillScores_v5_fake.R b/old/SkillScores_v5_fake.R new file mode 100644 index 0000000000000000000000000000000000000000..c3d887a63c09543247255133465c1edcf144a54e --- /dev/null +++ b/old/SkillScores_v5_fake.R @@ -0,0 +1,1105 @@ +######################################################################################### +# Sub-seasonal Skill Scores # +######################################################################################### +# you can run it from the terminal with the syntax: +# +# Rscript SkillScores_v4.R +# +# i.e: to split the data in 8 chunks and run only the chunk number 3, write: +# +# Rscript SkillScores_v4.R 8 3 +# +# if you don't specify any argument, or you run it from the R interface, it runs all the chunks in a sequential way. +# Use this last approach if the computational speed is not a problem. +# + +#load libraries and functions: +library(s2dverification) +library(SpecsVerification) +library(easyVerification) +library(jpeg) +library(abind) +#library(ff) +#library(ffbase) +# Load function split.array: +if(!Mare) {source('/scratch/Earth/ncortesi/RESILIENCE/Rfunctions.R')} else {source('/gpfs/scratch/bsc32842/RESILIENCE/Rfunctions.R')} + +########################################################################################## +# User's settings # +########################################################################################## + +Mare=TRUE # if MareNostrum is TRUE, the script must run on MareNostrum; if it is FALSE, the script must run on our workstations or on our clusters + +# working dir where to put the output maps and files in the /Data and /Maps subdirs: +workdir <- ifelse(!Mare, "/scratch/Earth/ncortesi/RESILIENCE", "/gpfs/scratch/bsc32842/RESILIENCE") + +cfs.name <- 'ECMWF' # name of the climate forecast system (CFS) used for monthly predictions (to be used in map titles) +var.name <- 'sfcWind' # forecast variable. It is the name used inside the netCDF files and as suffix in map filenames, i.e: 'sfcWind' or 'psl' +var.name.map <- '10m Wind Speed' # forecast variable to verify (name used in the map titles), i.e: '10m Wind Speed' or 'SLP' + +yr1 <- 2014 # starting year of the weekly sequence of the forecasts +mes <- 1 # starting forecast month (usually january) +day <- 2 # starting forecast day + +yr1.hind <- 1994 #1994 # first hindcast year +yr2.hind <- 2013 #2013 # last hindcast year (usually the forecast year -1) + +leadtime.week <- c('5-11','12-18','19-25','26-32') # which leadtimes are available in the weekly dataset +n.members <- 4 # number of hindcast members + +my.score.name <- c('FairRpss','FairCrpss') #c('EnsCorr','FairRpss','FairCrpss','RelDiagr') # choose one or more skills scores to compute between EnsCorr, FairRPSS, FairCRPSS and/or RelDiagr + +veri.month <- 1 #1:12 # select the month(s) you want to compute the chosen skill scores + +my.pvalue <- 0.05 # in case of computing the EnsCorr, set the p_value level to show in the ensemble mean correlation maps +my.prob <- c(1/3,2/3) # quantiles used for FairRPSS and the RelDiagr (usually they are the terciles) +conf.level <- c(0.05, 0.95)# percentiles employed for the calculation of the confidence level for the Rpss and Crpss (can be more than 2 if needed) +int.lat <- NULL #1:160 # latitude position of grid points selected for the Reliability Diagram (if you want to subset a region; if not, put NULL) +int.lon <- NULL #1:320 # longitude position of grid points selected for the Reliability Diagram (if you want to subset a region; if not, put NULL) + +boot <- TRUE # in case of computing the FairRpss and/or the FairCrpss, you can choose to do a bootstrap to evaluate the uncertainty of the skill scores +n.boot <- 20 # number of resamples considered in the bootstrapping + +# coordinates of a chosen point in the world to plot the time series over the 52 maps (they must be the same values in objects la y lo) +my.lat <- 55.3197120 +my.lon <- 0.5625 + +# Create an object with the info necessary to split the hindcast array. +# The first argument ('dimensions' is a vector representing the number of elements for each dimension of the hindcast array, +# the second argument ('along') sets the dimension to split, + +info <- split.array(dimensions = c(4,80,4,256,512), along = 5) + +# Number of the chunk processed. It is taken from the argument of this script when running from terminal, +# i.e: to run the chunk number 7, type in the terminal the command 'Rscript SkillScores.R 7' + +chunk <- as.integer(commandArgs(TRUE)[1]) + +my.startdates <- startdates.monthly[[month]] #c(1:5) # select the startdates (weeks) you want to compute +startdate.name <- my.month[month] # name given to the period of the selected startdates # i.e:"January" +n.startdates <- length(my.startdates) # number of startdates in the selected month +n.yrs <- length(my.startdates)*n.yrs.hind # number of total years for the selected month + +print(paste0("Computing Skill Scores for ", startdate.name)) + +my.FairRpss <- my.FairCrpss <- my.EnsCorr <- my.PValue <- array(NA,c(n.leadtimes,n.lat, n.lon)) +if(boot) my.FairRpssBoot <- my.FairCrpssBoot <- array(NA, c(n.boot, n.leadtimes, n.lat, n.lon)) +if(boot) my.FairRpssConf <- my.FairCrpssConf <- array(NA, c(length(conf.level), n.leadtimes, n.lat, n.lon)) + +anom.hindcast.chunk <- anom.hindcast.chunk.sampled <- array(NA, c(n.members, n.startdates*n.yrs.hind, n.leadtimes, n.lat, info$n.int[[chunk]])) +anom.rean.chunk <- anom.hindcast.mean.chunk <- anom.rean.chunk.sampled <- array(NA, c(n.startdates*n.yrs.hind, n.leadtimes, n.lat, info$n.int[[chunk]])) + +for(startdate in my.startdates){ + pos.startdate<-which(startdate == my.startdates) # count of the number of stardates already loaded+1 + my.time.interv<-(1+(pos.startdate-1)*n.yrs.hind):(pos.startdate*n.yrs.hind) # time interval where to load data + + # Load reanalysis data: + load(paste0(workdir,'/Data_',var.name,'/anom_rean_startdate',startdate,'.RData')) + anom.rean <- drop(anom.rean) + anom.rean.chunk[my.time.interv,,,] <- anom.rean[,,,chunk$int[[c]]] + + # Load hindcast data: + if(any(my.score.name=="FairRpss") || any(my.score.name=="FairCrpss" || any(my.score.name=="RelDiagr"))){ + load(paste0(workdir,'/Data_',var.name,'/anom_hindcast_startdate',startdate,'.RData')) + anom.hindcast <- drop(anom.hindcast) + anom.hindcast.chunk[,my.time.interv,,,] <- anom.hindcast[,,,, info$int[[chunk]]] + rm(anom.hindcast, anom.rean) + gc() + } + + if(any(my.score.name=="FairRpss")){ + if(!boot){ + #my.FairRpss.sub <- veriApply("FairRpss", fcst=anom.hindcast.sub, obs=anom.rean.sub, prob=my.prob, tdim=2, ensdim=1, parallel=FALSE, ncpus=n.cpus) + my.FairRps.chunk <- veriApply("FairRps", fcst=anom.hindcast.chunk, obs=anom.rean.chunk, prob=my.prob, tdim=2, ensdim=1, parallel=FALSE, ncpus=n.cpus) + + # the prob.for the climatological ensemble can be set without using convert2prob and they are the same for all points and leadtimes (33% for each tercile): + ens <- array(33,c(n.startdates*n.yrs.hind,3)) + + # the observed probabilities vary from point to point and for each leadtime: + obs <- apply(anom.rean.chunk, c(2,3,4), function(x){convert2prob(x, prob <- my.prob)}) + + # reshape obs to be able to apply EnsRps(ens, obs) below: + obs2 <- InsertDim(obs,1,3) + obs2[2,1:(n.startdates*n.yrs.hind),,,] <- obs2[1,(n.startdates*n.yrs.hind + 1:(n.startdates*n.yrs.hind)),,,] + obs2[3,1:(n.startdates*n.yrs.hind),,,] <- obs2[1,(2*n.startdates*n.yrs.hind + 1:(n.startdates*n.yrs.hind)),,,] + obs3 <- obs2[,1:(n.startdates*n.yrs.hind),,,] + + my.Rps.clim.chunk <- apply(obs3,c(3,4,5), function(x){ EnsRps(ens,t(x)) }) + + my.FairRps.chunk.sum <- apply(my.FairRps.chunk,c(2,3,4), sum) + my.Rps.clim.chunk.sum <- apply(my.Rps.clim.chunk,c(2,3,4), sum) + + my.FairRpss.chunk <- 1-(my.FairRps.chunk.sum/my.Rps.clim.chunk.sum) + + my.FairRpss[,,info$int[[c]]] <- my.FairRpss.chunk + rm(my.FairRpss.chunk, ens, obs, obs2, obs3, my.FairRps.chunk.sum, my.Rps.clim.chunk.sum) + gc() + + } else { # bootstrapping: + + for(b in 1:n.boot){ + cat(paste0("resampling n. ",b,"/",n.boot)) + yrs.sampled <- sample(1:n.yrs, replace=TRUE) + + for(y in 1:n.yrs) anom.hindcast.chunk.sampled[,y,,,] <- anom.hindcast.chunk[,yrs.sampled[y],,,] + for(y in 1:n.yrs) anom.rean.chunk.sampled[y,,,] <- anom.rean.chunk[yrs.sampled[y],,,] + + my.FairRps.chunk <- veriApply("FairRps", fcst=anom.hindcast.chunk.sampled, obs=anom.rean.chunk.sampled, prob=my.prob, tdim=2, ensdim=1, parallel=FALSE, ncpus=n.cpus) + ens <- array(33,c(n.startdates*n.yrs.hind,3)) + obs <- apply(anom.rean.chunk.sampled, c(2,3,4), function(x){convert2prob(x, prob <- my.prob)}) + obs2 <- InsertDim(obs,1,3) + obs2[2,1:(n.startdates*n.yrs.hind),,,] <- obs2[1,(n.startdates*n.yrs.hind + 1:(n.startdates*n.yrs.hind)),,,] + obs2[3,1:(n.startdates*n.yrs.hind),,,] <- obs2[1,(2*n.startdates*n.yrs.hind + 1:(n.startdates*n.yrs.hind)),,,] + obs3 <- obs2[,1:(n.startdates*n.yrs.hind),,,] + + my.Rps.clim.chunk <- apply(obs3,c(3,4,5), function(x){ EnsRps(ens,t(x)) }) + my.FairRps.chunk.sum <- apply(my.FairRps.chunk,c(2,3,4), sum) + my.Rps.clim.chunk.sum <- apply(my.Rps.clim.chunk,c(2,3,4), sum) + my.FairRpss.chunk <- 1-(my.FairRps.chunk.sum/my.Rps.clim.chunk.sum) + + my.FairRpssBoot[b,,,chunk$int[[c]]] <- my.FairRpss.chunk + rm(my.FairRpss.chunk, ens, obs, obs2, obs3, my.FairRps.chunk.sum, my.Rps.clim.chunk.sum) + gc() + } + + cat('\n') + + # calculate the percentiles of the skill score: + my.FairRpssConf[,,, info$int[[chunk]]] <- apply(my.FairRpssBoot[,,, info$int[[chunk]]], c(2,3,4), function(x) quantile(x, probs=conf.level, type=8)) + } + } + + if(any(my.score.name=="FairCrpss")){ + if(!boot){ + my.FairCrps.chunk <- veriApply("FairCrps", fcst=anom.hindcast.chunk, obs=anom.rean.chunk, tdim=2, ensdim=1, parallel=FALSE, ncpus=n.cpus) + anom.all.chunk <- abind(anom.hindcast.chunk,anom.rean.chunk, along=1) # merge exp and obs together to use apply: + my.Crps.clim.chunk <- apply(anom.all.chunk, c(3,4,5), function(x){ EnsCrps(t(x[1:n.members,]), x[n.members+1,])}) + + my.FairCrps.chunk.sum <- apply(my.FairCrps.chunk,c(2,3,4), sum) + my.Crps.clim.chunk.sum <- apply(my.Crps.clim.chunk,c(2,3,4), sum) + my.FairCrpss.chunk <- 1-(my.FairCrps.chunk.sum/my.Crps.clim.chunk.sum) + + my.FairCrpss[,,chunk$int[[c]]]<-my.FairCrpss.chunk + rm(my.FairCrpss.chunk, my.FairCrps.chunk.sum, my.Crps.clim.chunk.sum) + gc() + + } else { # bootstrapping: + + for(b in 1:n.boot){ + print(paste0("resampling n. ",b,"/",n.boot)) + yrs.sampled <- sample(1:n.yrs, replace=TRUE) + + for(y in 1:n.yrs) anom.hindcast.chunk.sampled[,y,,,] <- anom.hindcast.chunk[,yrs.sampled[y],,,] + for(y in 1:n.yrs) anom.rean.chunk.sampled[y,,,] <- anom.rean.chunk[yrs.sampled[y],,,] + + my.FairCrps.chunk <- veriApply("FairCrps", fcst=anom.hindcast.chunk.sampled, obs=anom.rean.chunk.sampled, tdim=2, ensdim=1, parallel=TRUE, ncpus=n.cpus) + anom.all.chunk <- abind(anom.hindcast.chunk.sampled, anom.rean.chunk.sampled, along=1) # merge exp and obs together to use apply: + my.Crps.clim.chunk <- apply(anom.all.chunk, c(3,4,5), function(x){ EnsCrps(t(x[1:n.members,]), x[n.members+1,])}) + my.FairCrps.chunk.sum <- apply(my.FairCrps.chunk,c(2,3,4), sum) + my.Crps.clim.chunk.sum <- apply(my.Crps.clim.chunk,c(2,3,4), sum) + my.FairCrpss.chunk <- 1-(my.FairCrps.chunk.sum/my.Crps.clim.chunk.sum) + + my.FairCrpssBoot[b,,,chunk$int[[c]]] <- my.FairCrpss.chunk + rm(my.FairCrpss.chunk, my.FairCrps.chunk.sum, my.Crps.clim.chunk.sum) + gc() + + } + + cat('\n') + + # calculate the percentiles of the skill score: + my.FairCrpssConf[,,,chunk$int[[c]]] <- apply(my.FairCrpssBoot[,,,chunk$int[[c]]], c(2,3,4), function(x) quantile(x, probs=conf.level, type=8)) + } + + } + + if(any(my.score.name=="EnsCorr")){ + # ensemble mean correlation for the selected startdates (first dim, 'week') and all leadtimes (second dim): + my.EnsCorr.chunk <- my.PValue.chunk <- array(NA, c(n.leadtimes, n.lat, chunk$n.int[[c]])) + + for(i in 1:n.leadtimes){ + for(j in 1:n.lat){ + for(k in 1:chunk$n.int[[c]]){ + my.EnsCorr.chunk[i,j,k] <- cor(anom.hindcast.mean.chunk[,i,j,k],anom.rean.chunk[,i,j,k], use="complete.obs") + # option 'na.method' is chosen from the following list: c("all.obs", "complete.obs", "pairwise.complete.obs", "everything", "na.or.complete") + + my.PValue.chunk[i,j,k] <- cor.test(anom.hindcast.mean.chunk[,i,j,k],anom.rean.chunk[,i,j,k], use="complete.obs")$p.value + + } + } + } + + my.EnsCorr[,,chunk$int[[c]]] <- my.EnsCorr.chunk + my.PValue[,,chunk$int[[c]]] <- my.PValue.chunk + + rm(anom.hindcast.chunk,anom.hindcast.mean.chunk,my.EnsCorr.chunk,my.PValue.chunk) + } + + rm(anom.rean.chunk) + gc() + + + if(any(my.score.name=="FairRpss")) save(my.FairRpss, file=paste0(workdir,'/Data_',var.name,'/FairRpss_',startdate.name,'_chunk_',chunk,'.RData')) + if(any(my.score.name=="FairCrpss")) save(my.FairCrpss, file=paste0(workdir,'/Data_',var.name,'/FairCrpss_',startdate.name,'_chunk_',chunk'.RData')) + +} # close the for on my.startdates + + + + + + + + + + + + + + + + + + + + + +# all pre-formatting (conversion to anomalies) and post-formatting (visualization) tasks are done below: +if(multicore == FALSE){ + +########################################################################################### +# Visualize and save the score maps for one score and period # +########################################################################################### + +# run it only after computing and saving all the skill score data: + +my.months=1 #9:12 # choose the month(s) to plot and save + +for(month in my.months){ + #month=2 # for the debug + startdate.name.map<-my.month[month] # i.e:"January" + + for(my.score.name.map in my.score.name){ + #my.score.name.map='EnsCorr' # for the debug + + if(my.score.name.map=="EnsCorr") { load(file=paste0(workdir,'/Data_',var.name,'/EnsCorr_',startdate.name.map,'.RData')); my.score<-my.EnsCorr } + if(my.score.name.map=='FairRpss') { load(file=paste0(workdir,'/Data_',var.name,'/FairRpss_',startdate.name.map,'.RData')); my.score<-my.FairRpss } + if(my.score.name.map=='FairCrpss') { load(file=paste0(workdir,'/Data_',var.name,'/FairCrpss_',startdate.name.map,'.RData')); my.score<-my.FairCrpss } + if(my.score.name.map=="RelDiagr") { load(file=paste0(workdir,'/Data_',var.name,'/RelDiagr_',startdate.name.map,'.RData')); my.score<-my.RelDiagr } + + # old green-red palette: + # brk.rpss<-c(-1,seq(0,1,by=0.05)) + # col.rpss<-c("darkred",colorRampPalette(c("red","white","darkgreen"))(length(brk.rpss)-2)) + # brk.rps<-c(seq(0,1,by=0.1),10) + # col.rps<-c(colorRampPalette(c("darkgreen","white","red"))(length(brk.rps)-2),"darkred") + # if(my.score.name.map==my.Rps | my.score==my.Rps.clim) {my.brk<-brk.rps} else {my.brk<-brk.rpss} + # if(my.score.name.map==my.Rps | my.score==my.Rps.clim) {my.col<-col.rps} else {my.col<-col.rpss} + + brk.rpss<-c(-10,seq(-1,1,by=0.1)) + col.rpss<-colorRampPalette(col)(length(brk.rpss)-1) + + my.brk<-brk.rpss # at present all breaks and colors are the same, so there is no need to differenciate between indexes + my.col<-col.rpss + + for(lead in 1:n.leadtimes){ + + #my.title<-paste0(my.score.name.map,' of ',cfs.name,' + #10m Wind Speed for ',startdate.name.map,'. Forecast time ',leadtime.week[lead],'.') + + if(my.score.name.map!="RelDiagr"){ + + #postscript(file=paste0(workdir,'/Maps_',var.name,'/',my.score.name.map,'_',startdate.name.map,'_Forecast_time_',leadtime.week[lead],'_',var.name,'.ps'), + # paper="special",width=12,height=7,horizontal=F) + + jpeg(file=paste0(workdir,'/Maps_',var.name,'/',my.score.name.map,'_',startdate.name.map,'_Forecast_time_',leadtime.week[lead],'_',var.name,'.jpg'),width=1000,height=600) + + layout(matrix(c(1,2), 1, 2, byrow = TRUE),widths=c(11,1.2)) + par(oma=c(1,1,4,1)) + + # lat values must be in decreasing order from +90 to -90 degrees and long from 0 to 360 degrees: + PlotEquiMap(my.score[lead,,][n.lat:1,c((n.lonr+1):n.lon,1:n.lonr)], lo,la,sizetit=0.8, brks=my.brk, cols=my.col,axelab=F, filled.continents=FALSE, drawleg=F) + my.title<-paste0(my.score.name.map,' of ',cfs.name,'\n ', var.name.map,' for ',startdate.name.map,'. Forecast time ',leadtime.week[lead],'.') + title(my.title,line=0.5,outer=T) + + ColorBar(my.brk, cols=my.col, vert=T, cex=1.4) + + if(my.score.name.map=="EnsCorra"){ #add a small grey point if the corr.is significant: + + # arrange lat/ lon in my.PValue (a second variable in the EnsCorr file) to start from +90 degr. (lat) and from 0 degr (lon): + my.PValue.rev<-my.PValue + my.PValue.rev[lead,,]<-my.PValue[i,n.lat:1,c((n.lonr+1):n.lon,1:n.lonr)] + for (x in 1:n.lon) { + for (y in 1:n.lat) { + if (my.PValue.rev[lead,y, x] == TRUE) { + text(x = lo[x], y = la[y], ".", cex = .2) + } + } + } + } + dev.off() + + } # close if on !RelDiagr + + if(my.score.name.map=="RelDiagr") { + + jpeg(file=paste0(workdir,'/Maps_',var.name,'/RelDiagr_',startdate.name.map,'_Forecast_time_',leadtime.week[lead],'_',var.name,'.jpg'),width=600,height=600) + + my.title<-paste0('Reliability Diagram of ',cfs.name,'\n ',var.name.map,' for ',startdate.name.map,'. Forecast time ',leadtime.week[lead],'.') + + plot(c(0,1),c(0,1),type="l",xlab="Forecast Frequency",ylab="Observed Frequency", col='gray30', main=my.title) + lines(my.score[[lead]]$p.avgs[!is.na(my.score[[lead]]$p.avgs)],my.score[[lead]]$cond.prob[!is.na(my.score[[lead]]$cond.prob)],type="o", pch=16, col="blue") + #lines(my.score[[n.leadtimes+lead]]$p.avgs[!is.na(my.score[[n.leadtimes+lead]]$p.avgs)],my.score[[n.leadtimes+lead]]$cond.prob[!is.na(my.score[[n.leadtimes+lead]]$cond.prob)],type="o",pch=16,col="darkgreen") + lines(my.score[[2*n.leadtimes+lead]]$p.avgs[!is.na(my.score[[2*n.leadtimes+lead]]$p.avgs)], my.score[[2*n.leadtimes+lead]]$cond.prob[!is.na(my.score[[2*n.leadtimes+lead]]$cond.prob)],type="o", pch=16, col="red") + legend("bottomright", legend=c('Lower Tercile','Upper Tercile'), lty=c(1,1), lwd=c(2.5,2.5), col=c('blue','red')) + + dev.off() + } + + } # next lead + + } # next score + +} # next month + + +############################################################################################### +# Composite map of the four skill scores # +############################################################################################### + +# run it only when you have all the 4 skill scores for each month: + +my.months=9:12 # choose the month(s) to plot and save + +for(month in my.months){ + + startdate.name.map<-my.month[month] # i.e:"January" + + #postscript(file=paste0(workdir,'/Maps_',var.name,'/SkillScores_',startdate.name.map,'_',var.name,'.ps'), paper="special",width=12,height=7,horizontal=F) + + jpeg(file=paste0(workdir,'/Maps_',var.name,'/SkillScores_',startdate.name.map,'_',var.name,'.jpg'), width=4000, height=2400, quality=100) + + layout(matrix(1:16, 4, 4, byrow=FALSE), widths=c(1.8,1.8,1.8,1.2), heights=c(1,1,1,1)) + #layout.show(16) + + for(score in 1:4){ + for(lead in 1:n.leadtimes){ + + img<-readJPEG(paste0(workdir,'/Maps_',var.name,'/',my.score.name[score],'_',startdate.name.map,'_Forecast_time_',leadtime.week[lead],'_',var.name,'.jpg')) + par(mar = rep(0, 4), xaxs='i', yaxs='i' ) + plot.new() + rasterImage(img, 0, 0, 1, 1, interpolate=FALSE) + + } # next lead + + } # next score + + dev.off() + +} # next month + + + +# Dani Map script for the small figure: +#/home/Earth/vtorralb/R/scripts/Veronica_2014/TimeSeries/PlotAno_mod_obs.R + + +############################################################################################### +# Preformatting: calculate weekly climatologies and anomalies for each hindcast startdate # +############################################################################################### + +# Run it only once at the beginning: + +file_path='/home/Earth/ngonzal2/R/ConfFiles/IC3.conf' # Load() file path +#file_path <- '/home/Earth/ncortesi/Downloads/RESILIENCE/IC3.conf' + +my.startdates=1:52 # choose a sequence of startdates to save their anomalies + +for(startdate in my.startdates){ # the startdate week to load inside the sdates weekly sequence: + print(paste0('Computing anomalies for startdate ', which(startdate==my.startdates),'/',length(my.startdates))) + + data.hindcast <- Load(var = var.name, exp = 'EnsEcmwfWeekHind', + obs = NULL, sdates = paste0(yr1.hind:yr2.hind,substr(sdates.seq[startdate],5,6),substr(sdates.seq[startdate],7,8)), + nleadtime = n.leadtimes, leadtimemin = 1, leadtimemax = n.leadtimes, output = 'lonlat', configfile = file_path, method='distance-weighted' ) + + # dim(data.hindcast$mod) # [1,4,20,4,320,640] + + # Mean of the 4 members of the Hindcasts and of the 20 years (for each point and leadtime); + clim.hindcast<-apply(data.hindcast$mod,c(1,4,5,6),mean)[1,,,] # average for each leadtime and pixel + clim.hindcast<-InsertDim(InsertDim(InsertDim(clim.hindcast,1,n.yrs.hind),1,n.members),1,1) # repeat the same values and times to calc. anomalies + + anom.hindcast <- data.hindcast$mod - clim.hindcast + + rm(data.hindcast,clim.hindcast) + gc() + + # average the anomalies over the 4 hindcast members [1,20,4,320,640] + anom.hindcast.mean<-apply(anom.hindcast,c(1,3,4,5,6),mean) + + my.grid<-paste0('r',n.lon,'x',n.lat) + data.rean <- Load(var = var.name, exp = 'ERAintEnsWeek', + obs = NULL, sdates=paste0(yr1.hind:yr2.hind,substr(sdates.seq[startdate],5,6),substr(sdates.seq[startdate],7,8)), + nleadtime = n.leadtimes, leadtimemin = 1, leadtimemax = n.leadtimes, output = 'lonlat', configfile = file_path, grid=my.grid, method='distance-weighted') + + # dim(data.rean$mod) # [1,1,20,4,320,640] + + # Mean of the 20 years (for each point and leadtime) + clim.rean<-apply(data.rean$mod,c(1,2,4,5,6),mean)[1,1,,,] # average for each leadtime and pixel + clim.rean<-InsertDim(InsertDim(InsertDim(clim.rean,1,n.yrs.hind),1,1),1,1) # repeat the same values times to be able to calculate the anomalies #[1,1,20,4,320,640] + + anom.rean <- data.rean$mod - clim.rean + + anom.rean<-InsertDim(anom.rean[1,1,,,,],1,1) # [1,20,4,320,640] + + #save(anom.hindcast.mean,anom.rean,file=paste0(workdir,'/Data/anom_for_ACC_startdate',startdate,'.RData')) + + save(anom.hindcast,file=paste0(workdir,'/Data/anom_hindcast_startdate',startdate,'.RData')) + save(anom.hindcast.mean,file=paste0(workdir,'/Data/anom_hindcast_mean_startdate',startdate,'.RData')) + save(anom.rean,file=paste0(workdir,'/Data/anom_rean_startdate',startdate,'.RData')) + save(clim.rean,file=paste0(workdir,'/Data/clim_rean_startdate',startdate,'.RData')) + + rm(data.rean,anom.hindcast,anom.hindcast.mean,clim.rean,anom.rean) + gc() + +} + +############################################################################################### +# Preformatting: calculate weekly climatologies and anomalies for each forecast startdate # +############################################################################################### +# +# finish!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! +# +# the only difference is that anomalies are computed using climatologies from the hincast data, +# and not from the forecast data. + +# Run it only once at the beginning: + +file_path='/home/Earth/ngonzal2/R/ConfFiles/IC3.conf' # Load() file path +#file_path <- '/home/Earth/ncortesi/Downloads/RESILIENCE/IC3.conf' + +my.startdates=1:52 # choose a sequence of forecast startdates to save their anomalies + +for(startdate in my.startdates){ # the startdate week to load inside the sdates weekly sequence: + print(paste0('Computing anomalies for startdate ', which(startdate==my.startdates),'/',length(my.startdates))) + + data.hindcast <- Load(var=var.name, exp = 'EnsEcmwfWeekHind', + obs = NULL, sdates=paste0(yr1.hind:yr2.hind,substr(sdates.seq[startdate],5,6),substr(sdates.seq[startdate],7,8)), + nleadtime = n.leadtimes, leadtimemin = 1, leadtimemax = n.leadtimes, output = 'lonlat', configfile = file_path, method='distance-weighted' ) + + # dim(data.hindcast$mod) # [1,4,20,4,320,640] + + # Mean of the 4 members of the Hindcasts and of the 20 years (for each point and leadtime); + clim.hindcast<-apply(data.hindcast$mod,c(1,4,5,6),mean)[1,,,] # average for each leadtime and pixel + clim.hindcast<-InsertDim(InsertDim(InsertDim(clim.hindcast,1,n.yrs.hind),1,n.members),1,1) # repeat the same values and times to calc. anomalies + + anom.hindcast <- data.hindcast$mod - clim.hindcast + + rm(data.hindcast,clim.hindcast) + gc() + + # average the anomalies over the 4 hindcast members [1,20,4,320,640] + anom.hindcast.mean<-apply(anom.hindcast,c(1,3,4,5,6),mean) + + my.grid<-paste0('r',n.lon,'x',n.lat) + data.rean <- Load(var=var.name, exp = 'ERAintEnsWeek', + obs = NULL, sdates=paste0(yr1.hind:yr2.hind,substr(sdates.seq[startdate],5,6),substr(sdates.seq[startdate],7,8)), + nleadtime = n.leadtimes, leadtimemin = 1, leadtimemax = n.leadtimes, output = 'lonlat', configfile = file_path, grid=my.grid, method='distance-weighted') + # dim(data.rean$mod) # [1,1,20,4,320,640] + + # Mean of the 20 years (for each point and leadtime) + clim.rean<-apply(data.rean$mod,c(1,2,4,5,6),mean)[1,1,,,] # average for each leadtime and pixel + clim.rean<-InsertDim(InsertDim(InsertDim(clim.rean,1,n.yrs.hind),1,1),1,1) # repeat the same values times to be able to calculate the anomalies #[1,1,20,4,320,640] + + anom.rean <- data.rean$mod - clim.rean + + anom.rean<-InsertDim(anom.rean[1,1,,,,],1,1) # [1,20,4,320,640] + + #save(anom.hindcast.mean,anom.rean,file=paste0(workdir,'/Data/anom_for_ACC_startdate',startdate,'.RData')) + + save(anom.hindcast,file=paste0(workdir,'/Data/anom_hindcast_startdate',startdate,'.RData')) + save(anom.hindcast.mean,file=paste0(workdir,'/Data/anom_hindcast_mean_startdate',startdate,'.RData')) + save(anom.rean,file=paste0(workdir,'/Data/anom_rean_startdate',startdate,'.RData')) + save(clim.rean,file=paste0(workdir,'/Data/clim_rean_startdate',startdate,'.RData')) + + rm(data.rean,anom.hindcast,anom.hindcast.mean,clim.rean,anom.rean) + gc() + +} + + + + + + + +################################################################################## +# Toy Model # +################################################################################## + +library(s2dverification) +library(SpecsVerification) +library(easyVerification) + +my.prob=c(1/3,2/3) # quantiles used for FairRPSS and the RelDiagr (usually they are the terciles) + +N=80 # number of years +M=14 # number of members + +for(M in c(2:10,20,51,100,200)){ + +for(N in c(5,10,15,20,25,30,40,50,100,200,500,1000,2000,5000,10000,20000,50000,100000)){ + +anom.rean<-rnorm(N) +anom.hind<-array(rnorm(N*M),c(N,M)) +#quantile(anom.hind,prob=my.prob,type=8) + +obs<-convert2prob(anom.rean,prob<-my.prob) +#mean(obs[,1]) # check if it is equal to 0.333 for K=3 + +ens<-convert2prob(anom.hind,prob<-my.prob) + +# climatological forecast: +anom.hind.clim<-array(sample(anom.hind,N*M),c(N,M)) # extract a random sample with no replacement of the hindcast +ens.clim<-convert2prob(anom.hind.clim,prob<-my.prob) +#mean(ens.clim[,1]) # check if it is equal to M/3 (1.333 for M=4) + +# alternative definition of climatological forecast, based on observations instead (as applied by veriApply via convert2prob when option fcst.ref is missing): +#anom.rean.clim<-ClimEns(anom.rean) # create a climatological ensemble from a vector of observations (anom.rean) with the leave-one-out cross validation +anom.rean.clim<-t(array(anom.rean,c(N,N))) # function used by convert2prob when ref is not specified; it is ~ equal to the above function ClimEns, it only has a column more +ens.clim.rean<-convert2prob(anom.rean.clim,prob<-c(1/3,2/3)) # create a new prob.table based on the climatological ensemble of observations + +p<-count2prob(ens) # we should add the option type=4, in not the sum for each year is not 100%!!! +y<-count2prob(obs) # we should add the option type=4, in not the sum for each year is not 100%!!! +ReliabilityDiagram(p[,1], y[,1], bins=10, nboot=0, plot=TRUE, cons.prob=c(0.05, 0.85),plot.refin=F) + +### computation of verification scores: + +Rps<-round(mean(EnsRps(ens,obs)),4) +FairRps<-round(mean(FairRps(ens,obs)),4) +#Rps.easy<-round(mean(veriApply("EnsRps", fcst=anom.hind, obs=anom.rean, prob=my.prob, tdim=1, ensdim=2)),4) # check once to see if it is the same value of Rps +#FairRps.easy<-round(mean(veriApply("FairRps", fcst=anom.hind, obs=anom.rean, prob=my.prob, tdim=1, ensdim=2)),4) # check once to see if it is the same value of FairRps + +Rps.clim<-round(mean(EnsRps(ens.clim,obs)),4) +FairRps.clim<-round(mean(FairRps(ens.clim,obs)),4) +FairRps.clim.rean<-round(mean(FairRps(ens.clim.rean,obs)),4) + +#FairRps.clim.stable<-0.44444444 +#Rps.clim.easy<-round(mean(veriApply("EnsRps", fcst=anom.hind.clim, obs=anom.rean, prob=my.prob, tdim=1, ensdim=2)),4) # check once to see if it is the same value of Rps.clim +#FairRps.clim.easy<-round(mean(veriApply("FairRps", fcst=anom.hind.clim, obs=anom.rean, prob=my.prob, tdim=1, ensdim=2)),4) # check once to see if it is = to FairRps.clim + +Rpss<-round(1-(Rps/Rps.clim),4) +#Rpss.specs<-round(EnsRpss(ens=ens, ens.ref=ens.clim, obs=obs)$rpss,4) # check it once to see if it is the same as Rpss (yes) +Rpss.easy<-veriApply("EnsRpss", fcst=anom.hind, fcst.ref=anom.hind.clim, obs=anom.rean, prob=my.prob, tdim=1, ensdim=2)$rpss # we provide the clim.forecast; = to Rpss.specs +system.time(Rpss.easy.noclim<-round(veriApply("EnsRpss", fcst=anom.hind, obs=anom.rean, prob=my.prob, tdim=1, ensdim=2, parallel=FALSE, ncpus=5)$rpss,4)) # using their climatological forecast +#cat(Rps.clim.noclim<-Rps/(1-Rpss.easy.noclim)) # sale igual a ~0.45 per ogni hindcast; é diverso sia da Rps.clim che da Rps.clim per N-> +oo (0.555) + +Rpss.easy<-veriApply("EnsRpss", fcst=anom.hind, fcst.ref=anom.rean.clim, obs=anom.rean, prob=my.prob, tdim=1, ensdim=2)$rpss # we provide the clim.forecast; = to Rpss.specs +Rpss.easy.noclim<-veriApply("EnsRpss", fcst=anom.hind, obs=anom.rean, prob=my.prob, tdim=1, ensdim=2)$rpss # we provide the clim.forecast; = to Rpss.specs + +FairRpss<-round(1-(FairRps/FairRps.clim),4) +#FairRpss.specs<-round(FairRpss(ens=ens,ens.ref=ens.clim,obs=obs)$rpss,4) # check it once to see if it is the same as FairRpss (yes) +#FairRpss.easy<-veriApply("FairRpss", fcst=anom.hind, fcst.ref=anom.hind.clim, obs=anom.rean, prob=c(1/3,2/3), tdim=1, ensdim=2)$rps +#FairRpss.easy.noclim<-round(veriApply("FairRpss", fcst=anom.hind, obs=anom.rean, prob=c(1/3,2/3), tdim=1, ensdim=2)$rpss,4) # using their climatological forecast +#cat(FairRps.clim.noclim<-Rps/(1-FairRpss.easy.noclim)) # sale igual a ~0.55; é diverso sia da FairRps.clim che da FairRps.clim per N-> +oo (0.444) + +cat(paste0('n.memb: ' ,M,' n.yrs: ',N,' : ',FairRps,' : ' ,FairRps.clim,' FairRpss: ',FairRpss,'\n')) + +#cat(paste0('n.memb: ' ,M,' n.yrs: ',N,' : ',FairRps,' : ' ,FairRps.clim,' FairRpss: ',FairRpss,'\n')) + +#cat(paste0('n.memb: ' ,M,' n.yrs: ',N,' : ',Rps,' : ' ,Rps.clim,' Rpss: ',Rpss,'\n','n.memb: ' ,M,' n.yrs: ',N,' : ',FairRps,' : ' ,FairRps.clim,' FairRpss: ',FairRpss,'\n')) + +#cat(paste0('n.memb: ' ,M,' n.yrs: ',N,' : ',Rps,' : ' ,Rps.clim,' Rpss: ',Rpss,' Rpss.easy: ',Rpss.easy.noclim,'\n','n.memb: ' ,M,' n.yrs: ',N,' #: ',FairRps,' : ' ,FairRps.clim,' FairRpss: ',FairRpss,' FairRpss.easy: ',FairRpss.easy.noclim)) + +} + +# Crpss: + +#Crps<-round(mean(EnsCrps(ens,obs)),4) +#FairCrps<-round(mean(FairCrps(ens,obs)),4) + +#cat(Crps.clim<-round(mean(EnsCrps(ens.clim,obs)),4)) +#FairCrps.clim<-round(mean(FairCrps(ens.clim,obs)),4) + +#Crpss<-round(1-(Crps/Crps.clim),4) +#Crpss.specs<-round(EnsCrpss(ens=ens, ens.ref=ens.clim, obs=obs)$rpss,4) + +#FairCrpss<-round(1-(FairCrps/FairCrps.clim),4) +#FairCrpss.specs<-round(FairCrpss(ens=ens,ens.ref=ens.clim,obs=obs)$rpss,4) + +} + + +################################################################################## +# Other analysis # +################################################################################## + +my.startdates=1 #c(1:9) # select the startdates (weeks) you want to merge together + +#tm<-toyarray(dims=c(32,64),N=20,nens=4) # in the number of startdates in case of forecasts or the number of years in case of hindcasts +#str(tm) # tm$fcst: [32,64,20,4] tm$obs: [32,64,20] +# +#f.corr <- veriApply("EnsCorr", fcst=tm$fcst, obs=tm$obs) +#f.me <- veriApply('EnsMe', tm$fcst, tm$obs) +#f.crps <- veriApply("FairCrps", fcst=tm$fcst, obs=tm$obs) +#str(f.crps) # [32,64,20] + +#cst <- array(rnorm(prod(4:8)), 4:8) +#obs <- array(rnorm(prod(4:7)), 4:7) +#f.me <- veriApply('EnsMe', fcst=fcst, obs=obs) +#dim(f.me) +#fcst2 <- array(aperm(fcst, c(5,1,4,2,3)), c(8, 4, 7, 5*6)) # very interesting use of aperm not only to swap dimensions, but also to merge two dimensions in the same one! +#obs2 <- array(aperm(obs, c(1,4,2,3)), c(4,7,5*6)) +#f.me2 <- veriApply('EnsMe', fcst=fcst2, obs=obs2, tdim=3, ensdim=1) +#dim(f.me2) + +# when you have for each leadtime all the 52 weeks of year 2014 stored in a map, +# you can plot the time serie for a particular point and leadtime: +pos.lat<-which(round(la,3)==round(my.lat,3)) +pos.lon<-which(round(lo,3)==round(my.lon,3)) + +x11() +plot(1:week,EnMeanCorr[,leadtime],type="b", + main=paste0("Weekly time serie of point with lat=",round(my.lat,2),", lon=",round(my.lon,2)), + xlab="week",ylab="Ensemble Mean Corr.",ylim=c(-1,1)) + + + +### check if 4 time steps are better than one: + +yrs.hind<-yr2.hind-yr1.hind+1 +my.point.obs<-array(NA,c(yrs.hind,28)) + +# load obs.data for the four time steps: +for(year in yr1.hind:yr2.hind){ + data_erai_6h <- Load(var=var.name, exp = 'ERAint6h', + obs = NULL, sdates=paste0(year,'01'), leadtimemin = 61, + leadtimemax = 88, output = 'lonlat', configfile = file_path, grid=my.grid,method='distance-weighted') + + my.point.obs[year-yr1.hind+1,]<-data_erai_6h$mod[1,1,1,,65,633] +} + +utm0<-c(1,5,9,13,17,21,25) +utm6<-utm0+1 +utm12<-utm0+2 +utm18<-utm0+3 +my.points.obs.weekly.mean.utm0<-rowMeans(my.point.obs[,utm0]) +my.points.obs.weekly.mean.utm6<-rowMeans(my.point.obs[,utm6]) +my.points.obs.weekly.mean.utm12<-rowMeans(my.point.obs[,utm12]) +my.points.obs.weekly.mean.utm18<-rowMeans(my.point.obs[,utm18]) +my.points.obs.weekly.mean.all<-rowMeans(my.point.obs) + +cor(my.points.exp.weekly.mean.all,my.points.obs.weekly.mean.all,use="complete.obs") + +my.points.obs.weekly.mean.utm<-c(my.points.obs.weekly.mean.utm0,my.points.obs.weekly.mean.utm6,my.points.obs.weekly.mean.utm12,my.points.obs.weekly.mean.utm18) +my.points.exp.weekly.mean.utm<-c(my.points.exp.weekly.mean.utm0,my.points.exp.weekly.mean.utm6,my.points.exp.weekly.mean.utm12,my.points.exp.weekly.mean.utm18) +cor(my.points.exp.weekly.mean.utm,my.points.obs.weekly.mean.utm,use="complete.obs") + + +##### Calculate Reanalysis climatology loading only 1 file each 4 (it's quick but it needs to have all files beforehand): + +ss<-c(seq(1,52,by=4),50,51,52) # take only 1 startdate each 4, more the last 3 startdates that must be computed individually because they are not a complete set of 4 + +for(startdate in ss){ # the startdate day of 2014 to load in the sdates weekly sequence along with the same startdates of the previous 20 years + data.ERAI <- Load(var=var.name, exp = 'ERAintEnsWeek', + obs = NULL, sdates=paste0(yr1.hind:yr2.hind,substr(sdates.seq[startdate],5,6),substr(sdates.seq[startdate],7,8)), + nleadtime = 4, leadtimemin = 1, leadtimemax = 4, output = 'lonlat', configfile = file_path, grid=my.grid, method='distance-weighted') + + clim.ERAI[startdate,,,]<-apply(data.ERAI$mod,c(1,2,4,5,6),mean)[1,1,,,] # average for each leadtime and pixel + + # the intermediate startdate between startdate week i and startdate week i+4 are repeated: + if(startdate <= 49){ + clim.ERAI[startdate+1,1,,]<- clim.ERAI[startdate,2,,] # startdate leadtime (week) + clim.ERAI[startdate+1,2,,]<- clim.ERAI[startdate,3,,] # (week) 1 2 3 4 + clim.ERAI[startdate+2,1,,]<- clim.ERAI[startdate,3,,] # 1 a b c d <- only startdate 1 and 5 are necessary to calculate all startdates between 1 and 5 + clim.ERAI[startdate+1,3,,]<- clim.ERAI[startdate,4,,] # 2 b c d e + clim.ERAI[startdate+2,2,,]<- clim.ERAI[startdate,4,,] # 3 c d e f + clim.ERAI[startdate+3,1,,]<- clim.ERAI[startdate,4,,] # 4 d e f g + } # 5 e f g h + + if(startdate > 1 && startdate <=49){ # fill the previous startdates: + clim.ERAI[startdate-1,4,,]<- clim.ERAI[startdate,3,,] + clim.ERAI[startdate-1,3,,]<- clim.ERAI[startdate,2,,] + clim.ERAI[startdate-2,4,,]<- clim.ERAI[startdate,2,,] + clim.ERAI[startdate-1,2,,]<- clim.ERAI[startdate,1,,] + clim.ERAI[startdate-2,3,,]<- clim.ERAI[startdate,1,,] + clim.ERAI[startdate-3,4,,]<- clim.ERAI[startdate,1,,] + } + +} + + + + + # You can make cor much faster by stripping away all the error checking code and calling the internal c function directly: + # r <- apply(m1, 1, function(x) { apply(m2, 1, function(y) { cor(x,y) })}) + # r2 <- apply(m1, 1, function(x) { apply(m2, 1, function(y) {.Internal(cor(x, y, 4L, FALSE)) })}) + C_cor<-get("C_cor", asNamespace("stats")) + test<-.Call(C_cor, x=rnorm(100,1), y=rnorm(100,1), na.method=2, FALSE) + my.EnsCorr.chunk[i,j,k]<-.Call(C_cor, x=anom.hindcast.mean.chunk[,i,j,k], y=nom.rean.chunk[,i,j,k], na.method=2, FALSE) + + + +####################################################################################### +# Raster Animations # +####################################################################################### + +library(png) +library(animation) + + +clustPNG <- function(pngobj, nclust = 3){ + + j <- pngobj + # If there is an alpha component remove it... + # might be better to just adjust the mat matrix + if(dim(j)[3] > 3){ + j <- j[,,1:3] + } + k <- apply(j, c(1,2), c) + mat <- matrix(unlist(k), ncol = 3, byrow = TRUE) + grd <- expand.grid(1:dim(j)[1], 1:dim(j)[2]) + clust <- kmeans(mat, nclust, iter.max = 20) + new <- clust$centers[clust$cluster,] + + for(i in 1:3){ + tmp <- matrix(new[,i], nrow = dim(j)[1]) + j[,,i] <- tmp + } + + plot.new() + rasterImage(j, 0, 0, 1, 1, interpolate = FALSE) +} + +makeAni <- function(file, n = 10){ + j <- readPNG(file) + for(i in 1:n){ + clustPNG(j, i) + mtext(paste("Number of clusters:", i)) + } + + j <- readPNG(file) + plot.new() + rasterImage(j, 0, 0, 1, 1, interpolate = FALSE) + mtext("True Picture") +} + +file <- "~/Dropbox/Photos/ClassyDogCropPng.png" +n <- 20 +saveGIF(makeAni(file, n), movie.name = "tmp.gif", interval = c(rep(1,n), 5)) + + + + +####################################################################################### +# ProgressBar # +####################################################################################### + +for(i in 1:10) { + Sys.sleep(0.2) + # Dirk says using cat() like this is naughty ;-) + #cat(i,"\r") + # So you can use message() like this, thanks to Sharpie's + # comment to use appendLF=FALSE. + message(i,"\r",appendLF=FALSE) + flush.console() +} + + +for(i in 1:10) { + Sys.sleep(0.2) + # Dirk says using cat() like this is naughty ;-) + cat(i,"\r") + + +} + + + +####################################################################################### +# Load large datasets with ff # +####################################################################################### + +library(ff) +my.scratch<-"/scratch/Earth/ncortesi/myBigData" +anom.hind<-as.ff(anom.hind, vmode="double", file=my.scratch) +ffsave(anom.hind,file=my.scratch) +close(anom.hind);rm(anom.hind) + +ffload(file=my.scratch, list=c(anom.hind)) +open(anom.hind) +my.SkillScore <- veriApplyBig("FairRpss", fcst=anom.hind, obs=anom.rean, tdim=2, ensdim=1, prob=c(1/3,1/3)) +close(anom.hind);rm(anom.hind) + + + +BigDataPath <- "/scratch/Earth/ncortesi/myBigData" +source('/home/Earth/ncortesi/Downloads/RESILIENCE/veriApplyBig.R') + +anom.hind.dim<-c(5,3,1,256,512) +anom.hind<-array(rnorm(prod(anom.hind.dim)),anom.hind.dim) # doesn't fit in memory +save.big(array=anom.hind, path=BigDataPath) + +veriApplyBig <- function(, obsmy.scratch){ + ffload(file=my.scratch) + open(anom.hind) + my.SkillScore <- veriApplyBig("FairRpss", fcst=anom.hind, obs=anom.rean, tdim=2, ensdim=1, prob=c(1/3,1/3)) + close(anom.hind) +} + + +anom.hind<-as.ff(anom.hind, vmode="double", file=my.scratch) +ffsave(anom.hind, file=my.scratch) + +my.scratch<-"/scratch/Earth/ncortesi/myBigData" + +anom.hind<-as.ff(anom.hind, vmode="double", file=my.scratch) +ffsave(anom.hind, file=my.scratch) +close(anom.hind); rm(anom.hind) + +ffload(file=my.scratch, list=c(anom.hind)) +open(anom.hind) +my.SkillScore <- veriApplyBig("FairRpss", fcst=anom.hind, obs=anom.rean, tdim=2, ensdim=1, prob=c(1/3,1/3)) +close(anom.hind); rm(anom.hind) + +anom.hind.dim<-c(51,30,1,256,51) +anom.hind<-ff(rnorm(prod(anom.hind.dim)), dim=anom.hind.dim, vmode="double", filename="/scratch/Earth/ncortesi/anomhind") +str(anom.hind) +dim(anom.hind) + +anom.hind.dim<-c(51,30,1,256,51) +anom.hind<-array(rnorm(prod(anom.hind.dim)),anom.hind.dim) # doesn't fit in memory +anom.hind<-as.ff(anom.hind, vmode="double", file="/scratch/Earth/ncortesi/myBigData") +str(anom.hind) +dim(anom.hind) + +ffsave(anom.hind,file="/scratch/Earth/ncortesi/anomhind") +#ffinfo(file="/scratch/Earth/ncortesi/anomhind") +close(anom.hind) +#delete(anom.hind) +#rm(anom.hind) + +ffload(file="/scratch/Earth/ncortesi/anomhind") + +anom.rean<-array(rnorm(prod(anom.hind.dim[-1])), anom.hind.dim[-1]) # doesn't fit in memory + +open(anom.hind) +my.SkillScore <- veriApplyBig("FairRpss", fcst=anom.hind, obs=anom.rean, tdim=2, ensdim=1, prob=c(1/3,1/3)) +fcst<-anom.hind +obs<-anom.rean + +close(anom.hind) +rm(anom.hind) + +#ffdrop(file="/scratch/Earth/ncortesi/anomhind") + +a<-ffapply(X=anom.hind.big, MARGIN=c(1,3,4,5), AFUN=mean, RETURN=TRUE) +a<-ffapply(X=anom.hind.big, MARGIN=c(1,3,4,5), AFUN=function(x) sample(x, replace=TRUE), CFUN="list", RETURN=TRUE) + + +pred_Cal_cross_ff <- as.ff(pred_Cal_cross, vmode='double', file=fileoutput_cross) +ffsave(pred_Cal_cross_ff, file=paste0(fileoutput_cross)) +delete(pred_Cal_cross_ff) + +assign("pred_Cal_cross_ff", as.ff(pred_Cal_cross, vmode='double', file=fileoutput_cross)) +ffsave(pred_Cal_cross_ff, file=paste0(fileoutput_cross)) +delete(pred_Cal_cross_ff) +rm(pred_Cal_cross_ff) + +assign("pred_Cal_cross_ff", as.ff(pred_Cal_cross, vmode='double', file=fileoutput_cross)) +a<-get("pred_Cal_cross_ff") # get() works well in this case but not in the row below! (you must use do.call, see WT_drivers_v2.R) +ffsave(get("pred_Cal_cross_ff"), file=paste0(fileoutput_cross)) +delete(pred_Cal_cross_ff) +rm(pred_Cal_cross_ff) + + + +######################################################################################### +# Calculate and save the FairRpss and/or the FairCrpss for a chosen period using ff # +######################################################################################### + +bigdata=FALSE # if TRUE, compute the Rpss and the Crpss using the package ff (storing arrays in the disk instead than in the RAM) to remove the memory limit + +for(month in veri.month){ + month=1 # for debug + my.startdates<-startdates.monthly[[month]] #c(1:5) # select the startdates (weeks) you want to compute + startdate.name=my.month[month] # name given to the period of the selected startdates, i.e:"January" + n.yrs <- length(my.startdates)*n.yrs.hind # number of total years for the selected month + print(paste0("Computing Skill Scores for ",startdate.name)) + + if(!boot) anom.rean.big<-array(NA, dim=c(n.yrs, n.leadtimes, n.lat, n.lon)) # reanalysis data is not converted to ff because it is a small array + + if(boot) mod<-1 else mod=0 # in case of the boostrap, anom.hind.big includes also the reanalysis data as additional last member + + if(bigdata) anom.hind.big <- ff(NA, dim=c(n.members + mod, n.yrs, n.leadtimes, n.lat, n.lon), vmode="double", filename=paste0(workdir,"/anomhindbig.ff")) + if(!bigdata) anom.hind.big <- array(NA, dim=c(n.members + mod, n.yrs, n.leadtimes, n.lat, n.lon)) + + for(startdate in my.startdates){ + + pos.startdate<-which(startdate==my.startdates) # count of the number of stardates already loaded +1 + my.time.interv<-(1+(pos.startdate-1)*n.yrs.hind):(pos.startdate*n.yrs.hind) # time interval where to load data + + load(paste0(workdir,'/Data_',var.name,'/anom_rean_startdate',startdate,'.RData')) + if(!boot) { + anom.rean<-drop(anom.rean) + anom.rean.big[my.time.interv,,,] <- anom.rean + } + + if(any(my.score.name=="FairRpss") || any(my.score.name=="FairCrpss")){ + load(paste0(workdir,'/Data_',var.name,'/anom_hindcast_startdate',startdate,'.RData')) # load hindcast data + anom.hindcast<-drop(anom.hindcast) + + if(!boot) anom.hind.big[,my.time.interv,,,] <- anom.hindcast + if(boot) anom.hind.big[,my.time.interv,,,] <- abind(anom.hindcast, anom.rean, along=1) + + rm(anom.hindcast, anom.rean) + } + gc() + } + + if(any(my.score.name=="FairRpss" || my.score.name=="FairCrpss")){ + if(!boot) { + if(any(my.score.name=="FairRpss")) my.FairRpss <- veriApplyBig("FairRpss", fcst=anom.hind.big, obs=anom.rean.big, + prob=my.prob, tdim=2, ensdim=1, parallel=TRUE, ncpus=n.cpus) + if(any(my.score.name=="FairCrpss")) my.FairCrpss <- veriApplyBig("FairCrpss", fcst=anom.hind.big, obs=anom.rean.big, tdim=2, ensdim=1, parallel=TRUE, ncpus=n.cpus) + + } else { + # bootstrapping: + my.FairRpssBoot<-my.FairCrpssBoot<-array(NA, c(n.boot, n.leadtimes, n.lat, n.lon)) + if(!bigdata) save(anom.hind.big, file=paste0(workdir,"/anom_hind_big.RData")) # save the anomalies to free memory + + for(b in 1:n.boot){ + print(paste0("resampling n. ",b,"/",n.boot)) + yrs.sampled <- sample(1:n.yrs, replace=TRUE) + + if(bigdata) anom.hind.big.sampled <- ff(NA, dim=c(n.members+1, n.yrs, n.leadtimes, n.lat, n.lon), vmode="double", filename=paste0(workdir,"/anomhindbigsampled")) + if(!bigdata) anom.hind.big.sampled <- array(NA, dim=c(n.members+1, n.yrs, n.leadtimes, n.lat, n.lon)) + + if(!bigdata && b>1) load(paste0(workdir,"/anom_hind_big.RData")) # need to load the hindcasts if b>1 + + for(y in 1:n.yrs) anom.hind.big.sampled[,y,,,] <- anom.hind.big[,yrs.sampled[y],,,] # with ff takes 53s x 100 ~1h!!! (without, only 4s x 100 ~6m) + + if(!bigdata) rm(anom.hind.big); gc() # free memory for the following commands + + # faster way that will be able to replace the above one when ffapply output parameters wll be improved: + #a<-ffapply(X=anom.hind.big, MARGIN=c(3,4,5), AFUN=function(x) my.sample(x,n.members+1, replace=TRUE), CFUN="list", RETURN=TRUE, FF_RETURN=TRUE) # takes 3m only + #aa<-abind(a[1:10], along=4) # it doesn't fit into memory! + + if(any(my.score.name=="FairRpss")) my.FairRpssBoot[b,,,] <- veriApplyBig("FairRpss", fcst=anom.hind.big.sampled[1:n.members,,,,], + obs=anom.hind.big.sampled[n.members+1,,,,], + prob=my.prob, tdim=2, ensdim=1, parallel=TRUE, ncpus=n.cpus) #$rpss + + if(any(my.score.name=="FairCrpss")) my.FairCrpssBoot[b,,,] <- veriApplyBig("FairCrpss", fcst=anom.hind.big.sampled[1:n.members,,,,], + obs=anom.hind.big.sampled[n.members+1,,,,], + tdim=2, ensdim=1, parallel=TRUE, ncpus=n.cpus) #$crpss + + if(bigdata) delete(anom.hind.big.sampled) + rm(anom.hind.big.sampled) # delete the sampled hindcast + + } + + # calculate 5 and 95 percentiles of skill scores: + if(any(my.score.name=="FairRpss")) my.FairRpssConf <- apply(my.FairRpssBoot, c(2,3,4), function(x) quantile(x, probs=c(0.05,0.95), type=8)) + if(any(my.score.name=="FairCrpss")) my.FairCrpssConf <- quantile(my.FairCrpssBoot, probs=c(0.05,0.95), type=8) + + } # close if on boot + + } + + if(bigdata) delete(anom.hind.big) # delete the file with the hindcasts + rm(anom.hind.big) + rm(anom.rean.big) + gc() + + if(!boot){ + if(any(my.score.name=="FairRpss")) save(my.FairRpss, file=paste0(workdir,'/Data_',var.name,'/FairRpss_',startdate.name,'.RData')) + if(any(my.score.name=="FairCrpss")) save(my.FairCrpss, file=paste0(workdir,'/Data_',var.name,'/FairCrpss_',startdate.name,'.RData')) + } else { + if(any(my.score.name=="FairRpss")) save(my.FairRpssBoot, my.FairRpssConf, file=paste0(workdir,'/Data_',var.name,'/FairRpssBoot_',startdate.name,'.RData')) + if(any(my.score.name=="FairCrpss")) save(my.FairCrpssBoot, my.FairCrpssConf, file=paste0(workdir,'/Data_',var.name,'/FairCrpssBoot_',startdate.name,'.RData')) + } + + + if(is.object(my.FairRpss)) rm(my.FairRpss); if(is.object(my.FairCrpss)) rm(my.FairCrpss) + if(is.object(my.FairRpssBoot)) rm(my.FairRpssBoot); if(is.object(my.FairCrpssBoot)) rm(my.FairCrpssBoot) + if(is.object(my.FairRpssConf)) rm(my.FairRpssConf); if(is.object(my.FairCrpssConf)) rm(my.FairCrpssConf) + +} # next m (month) + + +######################################################################################### +# Calculate and save the FairRpss for a chosen period using ff and Load() # +######################################################################################### + + anom.rean.big<-array(NA, dim=c(length(my.startdates)*n.yrs.hind, n.leadtimes, n.lat, n.lon)) + anom.hind.big <- ff(NA, dim=c(n.members,length(my.startdates)*n.yrs.hind, n.leadtimes, n.lat, n.lon), vmode="double", filename="/scratch/Earth/anomhindbig") + + for(startdate in my.startdates){ + pos.startdate<-which(startdate==my.startdates) # count of the number of stardates already loaded+1 + my.time.interv<-(1+(pos.startdate-1)*n.yrs.hind):(pos.startdate*n.yrs.hind) # time interval where to load data + + #load(paste0(workdir,'/Data_',var.name,'/anom_rean_startdate',startdate,'.RData')) + #anom.rean<-drop(anom.rean) + #anom.rean.big[my.time.interv,,,] <- anom.rean + + my.date <- paste0(yr1.hind:yr2.hind,substr(sdates.seq[startdate],5,6),substr(sdates.seq[startdate],7,8)) + anom.rean <- Load(var=var.name, exp = 'EnsEcmwfWeekHind', + obs = NULL, sdates=my.date, nleadtime = n.leadtimes, leadtimemin = 1, leadtimemax = n.leadtimes, + output = 'lonlat', configfile = file_path, method='distance-weighted' ) + + #load(paste0(workdir,'/Data_',var.name,'/anom_hindcast_startdate',startdate,'.RData')) # load hindcast data + #anom.hindcast<-drop(anom.hindcast) + + anom.hind.big[,my.time.interv,,,] <- anom.hindcast + rm(anom.hindcast) + } + + my.FairRpss <- veriApplyBig("FairRpss", fcst=anom.hind.big, obs=anom.rean.big, prob=my.prob, tdim=2, ensdim=1, parallel=TRUE, ncpus=n.cpus) + + save(my.FairRpss, file=paste0(workdir,'/Data_',var.name,'/FairRpss_',startdate.name,'.RData')) + + +######################################################################################### +# Alternative way to speed up Load() # +######################################################################################### + +ERAint <- list(path = '/esnas/reconstructions/ecmwf/eraint/$STORE_FREQ$_mean/$VAR_NAME$_f6h/$VAR_NAME$_$YEAR$$MONTH$.nc') + var.rean=ERAint # daily reanalysis dataset used for the var data; it can have a different resolution of the MSLP dataset + var.name='sfcWind' #'tas' #'prlr' # any daily variable we want to find if it is WTs-driven + var <- Load(var = var.name, exp = NULL, obs = list(var.rean), paste0(y,'0101'), storefreq = 'daily', leadtimemax = 1, output = 'lonlat') + + # Forecast system list: + S4 <- list(path = ...) + CFSv2 <- list(path = ...) + + # Reanalysis list: + ERAint <- list(path = '/esnas/reconstructions/ecmwf/eraint/$STORE_FREQ$_mean/$VAR_NAME$_f6h/$VAR_NAME$_$YEAR$$MONTH$.nc') + JRA55 <- list(path = ...) + + + + + # Select one forecast system and one reanalysis (put NULL if you don't need to load any experiment/observation): + exp.source <- NULL + obs.source <- list(path = '/esnas/reconstructions/ecmwf/eraint/$STORE_FREQ$_mean/$VAR_NAME$_f6h/$VAR_NAME$_$YEAR$$MONTH$.nc') + + # Select one variable and its frequency: + var.name <- 'sfcWind' + store.freq <- 'daily' + + # Extract only the directory path of the forecast and reanalysis: + obs.source2 <- gsub("\\$STORE_FREQ\\$", store.freq, obs.source$path) + obs.source3 <- gsub("\\$VAR_NAME\\$", var.name, obs.source2) + obs.source4 <- strsplit(obs.source3,'/') + obs.source5 <- paste0(obs.source4[[1]][-length(obs.source4[[1]])], collapse="/") + + obs.source6 <- list.files(path = obs.source5) + + system(paste0("ncks -O -h -d longitude,5,6 ", '/esnas/reconstructions/ecmwf/eraint/',STORE_FREQ,'_mean/',VAR_NAME,'_f6h')) + system("ncks -O -h -d longitude,5,6 sfcWind_201405.nc ~/test.nc") + + y <- 1990 + var <- Load(var = VAR_NAME, exp = list(exp.source), obs = list(obs.source), sdates = paste0(y,'0101'), storefreq = store.freq, leadtimemax = 1, output = 'lonlat') + + +} # close if on multicore + + diff --git a/old/SkillScores_v6.R b/old/SkillScores_v6.R new file mode 100644 index 0000000000000000000000000000000000000000..28f4ce73abc600d05c35a556c49c5d465e80495c --- /dev/null +++ b/old/SkillScores_v6.R @@ -0,0 +1,1299 @@ +######################################################################################### +# Sub-seasonal Skill Scores # +######################################################################################### +# you can run it from the terminal with the syntax: +# +# Rscript SkillScores_v4.R +# +# i.e: to split the data in 8 chunks and run only the chunk number 3, write: +# +# Rscript SkillScores_v4.R 8 3 +# +# if you don't specify any argument, or you run it from the R interface, it runs all the chunks in a sequential way. +# Use this last approach if the computational speed is not a problem. +# + +########################################################################################## +# MareNostrum settings # +########################################################################################## + +args <- commandArgs(TRUE) # put into variable args the list with all the optional arguments with whom the script was run from the terminal + +if(length(args) == 0) print("Running the script in a sequential way") +if(length(args) > 1) stop("Only one argument is required") + +mare <- ifelse(length(args) == 1 ,TRUE,FALSE) # set variable mare to TRUE if we are running the script in MareNostrum, FALSE otherwise + +if(mare) chunk <- as.integer(args[1]) # number of the chunk to run in this script + +########################################################################################## +# Load functions and libraries # +########################################################################################## + +#load libraries and functions: +library(s2dverification) +library(SpecsVerification) +library(easyVerification) +library(abind) +#library(ff) +#library(ffbase) +# Load function split.array: +if(!mare) {source('/home/Earth/ncortesi/Downloads/scripts/Rfunctions.R')} else {source('/gpfs/projects/bsc32/bsc32842/scripts/Rfunctions.R')} + +########################################################################################## +# User's settings # +########################################################################################## + +# working dir where to put the output maps and files in the /Data and /Maps subdirs: +workdir <- ifelse(!mare, "/scratch/Earth/ncortesi/RESILIENCE", "/gpfs/projects/bsc32/bsc32842/results") + +cfs.name <- 'ECMWF' # name of the climate forecast system (CFS) used for monthly predictions (to be used in map titles) +var.name <- 'sfcWind' # forecast variable. It is the name used inside the netCDF files and as suffix in map filenames, i.e: 'sfcWind' or 'psl' +var.name.map <- '10m Wind Speed' # forecast variable to verify (name used in the map titles), i.e: '10m Wind Speed' or 'SLP' + +forecast.year <- 2014 # starting year of the weekly sequence of the forecasts +mes <- 1 # starting forecast month (usually january) +day <- 2 # starting forecast day + +# generic path of the forecast system files: +ECMWF_monthly <- list(path = paste0('/esnas/exp/ECMWF/monthly/ensforhc/weekly_mean/$VAR_NAME$_f6h/',forecast.year,'$MONTH$$DAY$00/$VAR_NAME$_$START_DATE$00.nc')) + +yr1.hind <- 1994 #1994 # first hindcast year +yr2.hind <- 2013 #2013 # last hindcast year (usually the forecast year -1) + +leadtime.week <- c('5-11','12-18','19-25','26-32') # which leadtimes are available in the weekly dataset +n.members <- 4 # number of hindcast members + +my.score.name <- c('FairRpss','FairCrpss') #c('EnsCorr','FairRpss','FairCrpss','RelDiagr') # choose one or more skills scores to compute between EnsCorr, FairRPSS, FairCRPSS and/or RelDiagr + +veri.month <- 1 #1:12 # select the month(s) you want to compute the chosen skill scores + +my.pvalue <- 0.05 # in case of computing the EnsCorr, set the p_value level to show in the ensemble mean correlation maps +my.prob <- c(1/3,2/3) # quantiles used for FairRPSS and the RelDiagr (usually they are the terciles) +conf.level <- c(0.05, 0.95)# percentiles employed for the calculation of the confidence level for the Rpss and Crpss (can be more than 2 if needed) +int.lat <- NULL #1:160 # latitude position of grid points selected for the Reliability Diagram (if you want to subset a region; if not, put NULL) +int.lon <- NULL #1:320 # longitude position of grid points selected for the Reliability Diagram (if you want to subset a region; if not, put NULL) + +boot <- TRUE # in case of computing the FairRpss and/or the FairCrpss, you can choose to do a bootstrap to evaluate the uncertainty of the skill scores +n.boot <- 20 # number of resamples considered in the bootstrapping + +# coordinates of a chosen point in the world to plot the time series over the 52 maps (they must be the same values in objects la y lo) +my.lat <- 55.3197120 +my.lon <- 0.5625 + +###################################### Derived variables ############################################################################# + +#source(paste0(workdir,'/ColorBarV.R')) # Color scale used for maps +n.categ <- 1+length(my.prob) # number of forecasting categories (usually 3 if terciles are used) + +# blue-yellow-red colors: +col <- ifelse(!mare, as.character(read.csv("/scratch/Earth/ncortesi/RESILIENCE/rgbhex.csv",header=F)[,1]), as.character(read.csv("/gpfs/projects/bsc32//bsc32842/scripts/rgbhex.csv",header=F)[,1])) +#col<-as.character(read.csv("/home/Earth/vtorralb/rgbhex.csv",header=F)[,1]) # blue-yellow-red colors + +sdates.seq <- weekly.seq(yr1,mes,day) # load the sequence of dates corresponding to all the thursday of the year +n.leadtimes <- length(leadtime.week) +n.yrs.hind <- yr2.hind-yr1.hind+1 +my.month.short <- substr(my.month,1,3) + +# Monthly Startdates for 2014 reforecasts: (in future you can modify it to work for a generic year) +startdates.monthly<-list() +startdates.monthly[[1]]<-1:5 +startdates.monthly[[2]]<-6:9 +startdates.monthly[[3]]<-10:13 +startdates.monthly[[4]]<-14:17 +startdates.monthly[[5]]<-18:22 +startdates.monthly[[6]]<-23:26 +startdates.monthly[[7]]<-27:31 +startdates.monthly[[8]]<-32:35 +startdates.monthly[[9]]<-36:39 +startdates.monthly[[10]]<-40:44 +startdates.monthly[[11]]<-45:48 +startdates.monthly[[12]]<-49:52 + +## extract geographic coordinates; do only ONCE for each preducton system and then save the lat/lon in a coordinates.RData and comment these rows to use function load() below: +#data.hindcast <- Load(var='sfcWind', exp = 'EnsEcmwfWeekHind', +# obs = NULL, sdates=paste0(yr1.hind:yr2.hind,substr(sdates.seq[startdate],5,6),substr(sdates.seq[startdate],7,8)), +# nleadtime = 1, leadtimemin = 1, leadtimemax = 1, output = 'lonlat', configfile = file_path, method='distance-weighted' ) + +#lats<-data.hindcast$lat +#lons<-data.hindcast$lon +#save(lats,lons,file=paste0(workdir,'/coordinates.RData')) +#rm(data.hindcast);gc() + +# format geographic coordinates to use with PlotEquiMap: +load(paste0(workdir,'/coordinates.RData')) +n.lon <- length(lons) +n.lat <- length(lats) +n.lonr <- n.lon - ceiling(length(lons[lons<180 & lons > 0])) +#la<-rev(lats) +#lo<-c(lons[c((n.lonr+1):n.lon)]-360,lons[1:n.lonr]) + +if(mare) n.lat == 1 + +######################################################################################### +# Calculate and save up to all the chosen Skill Scores for a selected period # +######################################################################################### +# +# Remember that before computing the skill scores, you have to create and save the anomalies running once the preformatting part at the end of this script. +# + +for(month in veri.month){ + #month=1 # for the debug + my.startdates <- startdates.monthly[[month]] #c(1:5) # select the startdates (weeks) you want to compute + startdate.name <- my.month[month] # name given to the period of the selected startdates # i.e:"January" + n.startdates <- length(my.startdates) # number of startdates in the selected month + n.yrs <- length(my.startdates)*n.yrs.hind # number of total years for the selected month + + print(paste0("Computing Skill Scores for ",startdate.name)) + + # Merge all selected startdates: + hind.dim <- c(n.members, n.yrs.hind*n.startdates, n.leadtimes, n.lat, n.lon) # dimensions of the hindcast array + + # If mare == FALSE, split the array in our wokstation considering the optimal size of a chunk; if mare == TRUE, split the array on MN for lat with chunk size = 1 value + if(mare == FALSE) chunk <- split.array(dimensions = hind.dim, along = 5) + + my.FairRpss <- my.FairCrpss <- my.EnsCorr <- my.PValue <- array(NA,c(n.leadtimes,n.lat,n.lon)) + if(boot) my.FairRpssBoot <- my.FairCrpssBoot <- array(NA, c(n.boot, n.leadtimes, n.lat, n.lon)) + if(boot) my.FairRpssConf <- my.FairCrpssConf <- array(NA, c(length(conf.level), n.leadtimes, n.lat, n.lon)) + + cat('Chunk n. ') + + # if we run the script from the terminal with an argument, it only computes the chunk we specify in the second argument and save the results for that chunk. + # If not, it loops over all chunks in a serial way and save the results (already united so chunks disappear) at the end. + if(mare == FALSE) {my.chunks <- 1:chunk$n.chunk} else {my.chunks <- chunk} + + for(c in my.chunks){ # EnsCorr, FairRpss and FairCrpss calculation: + #s=1 # for the debug + cat(paste0(c,'/', my.chunk,' ')) + + #if(s == chunk$n.chunk){ add.last <- chunk$chunk.size.last } else { add.last <- 0 } # because the last chunk is longer than the others, if chunk.size.last>0 + + anom.hindcast.chunk <- anom.hindcast.chunk.sampled <- array(NA, c(n.members, n.startdates*n.yrs.hind, n.leadtimes, n.lat, chunk$n.int[[c]])) + anom.rean.chunk <- anom.hindcast.mean.chunk <- anom.rean.chunk.sampled <- array(NA, c(n.startdates*n.yrs.hind, n.leadtimes, n.lat, chunk$n.int[[c]])) + #my.interv<-(1 + sub.size*(s-1)):((sub.size*s) + add.last) # longitude interval where to load data + + +# PORTA TUTTO SU MARENOSTRUM!!!! + +forecast.year <- 2014 # starting year of the weekly sequence of the forecasts + +# generic path of the forecast system files: +ECMWF_monthly <- list(path = paste0('/esnas/exp/ECMWF/monthly/ensforhc/weekly_mean/$VAR_NAME$_f6h/',forecast.year,'$MONTH$$DAY$00/$VAR_NAME$_$START_DATE$00.nc')) + +# load once 1 file to retrieve the lat and lon values, then save them in an .RData file to retrieve them when needed: +coord <- Load(var = 'sfcWind', exp = list(ECMWF_monthly), obs = NULL, sdates=paste0(2013,'0102'), leadtimemin = 1, leadtimemax=1, output = 'lonlat', nprocs=1) +lat <- coord$lat +lon <- coord$lon +save() +load() + + +system.time(a<-Load(var = 'sfcWind', exp = list(ECMWF_monthly), obs = NULL, sdates=paste0(2010:2013,'0102'), leadtimemin = 1, leadtimemax=3, output = 'lonlat', nprocs=1, latmin = lat[5], latmax = lat[5])) + +#ERAint <- list(path = '/esnas/reconstructions/ecmwf/eraint/$STORE_FREQ$_mean/$VAR_NAME$_f6h/$VAR_NAME$_$YEAR$$MONTH$.nc') +#system.time(a<-Load(var = 'z500', exp = NULL, obs = list(ERAint), paste0(year.start,'0101'), storefreq = 'daily', leadtimemax = 360, output = 'lonlat', nprocs=1, lonmin = domain$lon[5], lonmax = domain$lon[5])) + + + for(startdate in my.startdates){ + pos.startdate<-which(startdate == my.startdates) # count of the number of stardates already loaded+1 + my.time.interv<-(1+(pos.startdate-1)*n.yrs.hind):(pos.startdate*n.yrs.hind) # time interval where to load data + + # Load reanalysis data: + load(paste0(workdir,'/Data_',var.name,'/anom_rean_startdate',startdate,'.RData')) + anom.rean <- drop(anom.rean) + anom.rean.chunk[my.time.interv,,,] <- anom.rean[,,,chunk$int[[c]]] + + # Load hindcast data: + if(any(my.score.name=="FairRpss") || any(my.score.name=="FairCrpss" || any(my.score.name=="RelDiagr"))){ + load(paste0(workdir,'/Data_',var.name,'/anom_hindcast_startdate',startdate,'.RData')) + anom.hindcast <- drop(anom.hindcast) + anom.hindcast.chunk[,my.time.interv,,,] <- anom.hindcast[,,,,chunk$int[[c]]] + rm(anom.hindcast, anom.rean) + gc() + } + + if(any(my.score.name=="EnsCorr")){ + load(paste0(workdir,'/Data_',var.name,'/anom_hindcast_mean_startdate',startdate,'.RData')) # load ensemble mean hindcast data + anom.hindcast.mean <- drop(anom.hindcast.mean) + anom.hindcast.mean.chunk[my.time.interv,,,] <- anom.hindcast.mean[,,,chunk$int[[c]]] + rm(anom.hindcast.mean) + gc() + } + + } + + + if(any(my.score.name=="FairRpss")){ + if(!boot){ + + #my.FairRpss.sub <- veriApply("FairRpss", fcst=anom.hindcast.sub, obs=anom.rean.sub, prob=my.prob, tdim=2, ensdim=1, parallel=FALSE, ncpus=n.cpus) + my.FairRps.chunk <- veriApply("FairRps", fcst=anom.hindcast.chunk, obs=anom.rean.chunk, prob=my.prob, tdim=2, ensdim=1, parallel=FALSE, ncpus=n.cpus) + + # the prob.for the climatological ensemble can be set without using convert2prob and they are the same for all points and leadtimes (33% for each tercile): + ens <- array(33,c(n.startdates*n.yrs.hind,3)) + + # the observed probabilities vary from point to point and for each leadtime: + obs <- apply(anom.rean.chunk, c(2,3,4), function(x){convert2prob(x, prob <- my.prob)}) + + # reshape obs to be able to apply EnsRps(ens, obs) below: + obs2 <- InsertDim(obs,1,3) + obs2[2,1:(n.startdates*n.yrs.hind),,,] <- obs2[1,(n.startdates*n.yrs.hind + 1:(n.startdates*n.yrs.hind)),,,] + obs2[3,1:(n.startdates*n.yrs.hind),,,] <- obs2[1,(2*n.startdates*n.yrs.hind + 1:(n.startdates*n.yrs.hind)),,,] + obs3 <- obs2[,1:(n.startdates*n.yrs.hind),,,] + + my.Rps.clim.chunk <- apply(obs3,c(3,4,5), function(x){ EnsRps(ens,t(x)) }) + + my.FairRps.chunk.sum <- apply(my.FairRps.chunk,c(2,3,4), sum) + my.Rps.clim.chunk.sum <- apply(my.Rps.clim.chunk,c(2,3,4), sum) + + my.FairRpss.chunk <- 1-(my.FairRps.chunk.sum/my.Rps.clim.chunk.sum) + + my.FairRpss[,,chunk$int[[c]]] <- my.FairRpss.chunk + rm(my.FairRpss.chunk, ens, obs, obs2, obs3, my.FairRps.chunk.sum, my.Rps.clim.chunk.sum) + gc() + + } else { # bootstrapping: + + for(b in 1:n.boot){ + cat(paste0("resampling n. ",b,"/",n.boot)) + yrs.sampled <- sample(1:n.yrs, replace=TRUE) + + for(y in 1:n.yrs) anom.hindcast.chunk.sampled[,y,,,] <- anom.hindcast.chunk[,yrs.sampled[y],,,] + for(y in 1:n.yrs) anom.rean.chunk.sampled[y,,,] <- anom.rean.chunk[yrs.sampled[y],,,] + + my.FairRps.chunk <- veriApply("FairRps", fcst=anom.hindcast.chunk.sampled, obs=anom.rean.chunk.sampled, prob=my.prob, tdim=2, ensdim=1, parallel=FALSE, ncpus=n.cpus) + ens <- array(33,c(n.startdates*n.yrs.hind,3)) + obs <- apply(anom.rean.chunk.sampled, c(2,3,4), function(x){convert2prob(x, prob <- my.prob)}) + obs2 <- InsertDim(obs,1,3) + obs2[2,1:(n.startdates*n.yrs.hind),,,] <- obs2[1,(n.startdates*n.yrs.hind + 1:(n.startdates*n.yrs.hind)),,,] + obs2[3,1:(n.startdates*n.yrs.hind),,,] <- obs2[1,(2*n.startdates*n.yrs.hind + 1:(n.startdates*n.yrs.hind)),,,] + obs3 <- obs2[,1:(n.startdates*n.yrs.hind),,,] + my.Rps.clim.chunk <- apply(obs3,c(3,4,5), function(x){ EnsRps(ens,t(x)) }) + my.FairRps.chunk.sum <- apply(my.FairRps.chunk,c(2,3,4), sum) + my.Rps.clim.chunk.sum <- apply(my.Rps.clim.chunk,c(2,3,4), sum) + my.FairRpss.chunk <- 1-(my.FairRps.chunk.sum/my.Rps.clim.chunk.sum) + + my.FairRpssBoot[b,,,chunk$int[[c]]] <- my.FairRpss.chunk + rm(my.FairRpss.chunk, ens, obs, obs2, obs3, my.FairRps.chunk.sum, my.Rps.clim.chunk.sum) + gc() + } + + cat('\n') + + # calculate the percentiles of the skill score: + my.FairRpssConf[,,,chunk$int[[c]]] <- apply(my.FairRpssBoot[,,,chunk$int[[c]]], c(2,3,4), function(x) quantile(x, probs=conf.level, type=8)) + } + } + + if(any(my.score.name=="FairCrpss")){ + if(!boot){ + my.FairCrps.chunk <- veriApply("FairCrps", fcst=anom.hindcast.chunk, obs=anom.rean.chunk, tdim=2, ensdim=1, parallel=FALSE, ncpus=n.cpus) + anom.all.chunk <- abind(anom.hindcast.chunk,anom.rean.chunk, along=1) # merge exp and obs together to use apply: + my.Crps.clim.chunk <- apply(anom.all.chunk, c(3,4,5), function(x){ EnsCrps(t(x[1:n.members,]), x[n.members+1,])}) + + my.FairCrps.chunk.sum <- apply(my.FairCrps.chunk,c(2,3,4), sum) + my.Crps.clim.chunk.sum <- apply(my.Crps.clim.chunk,c(2,3,4), sum) + my.FairCrpss.chunk <- 1-(my.FairCrps.chunk.sum/my.Crps.clim.chunk.sum) + + my.FairCrpss[,,chunk$int[[c]]]<-my.FairCrpss.chunk + rm(my.FairCrpss.chunk, my.FairCrps.chunk.sum, my.Crps.clim.chunk.sum) + gc() + + } else { # bootstrapping: + + for(b in 1:n.boot){ + print(paste0("resampling n. ",b,"/",n.boot)) + yrs.sampled <- sample(1:n.yrs, replace=TRUE) + + for(y in 1:n.yrs) anom.hindcast.chunk.sampled[,y,,,] <- anom.hindcast.chunk[,yrs.sampled[y],,,] + for(y in 1:n.yrs) anom.rean.chunk.sampled[y,,,] <- anom.rean.chunk[yrs.sampled[y],,,] + + my.FairCrps.chunk <- veriApply("FairCrps", fcst=anom.hindcast.chunk.sampled, obs=anom.rean.chunk.sampled, tdim=2, ensdim=1, parallel=TRUE, ncpus=n.cpus) + anom.all.chunk <- abind(anom.hindcast.chunk.sampled, anom.rean.chunk.sampled, along=1) # merge exp and obs together to use apply: + my.Crps.clim.chunk <- apply(anom.all.chunk, c(3,4,5), function(x){ EnsCrps(t(x[1:n.members,]), x[n.members+1,])}) + my.FairCrps.chunk.sum <- apply(my.FairCrps.chunk,c(2,3,4), sum) + my.Crps.clim.chunk.sum <- apply(my.Crps.clim.chunk,c(2,3,4), sum) + my.FairCrpss.chunk <- 1-(my.FairCrps.chunk.sum/my.Crps.clim.chunk.sum) + + my.FairCrpssBoot[b,,,chunk$int[[c]]] <- my.FairCrpss.chunk + rm(my.FairCrpss.chunk, my.FairCrps.chunk.sum, my.Crps.clim.chunk.sum) + gc() + + } + + cat('\n') + + # calculate the percentiles of the skill score: + my.FairCrpssConf[,,,chunk$int[[c]]] <- apply(my.FairCrpssBoot[,,,chunk$int[[c]]], c(2,3,4), function(x) quantile(x, probs=conf.level, type=8)) + } + + } + + if(any(my.score.name=="EnsCorr")){ + # ensemble mean correlation for the selected startdates (first dim, 'week') and all leadtimes (second dim): + my.EnsCorr.chunk <- my.PValue.chunk <- array(NA, c(n.leadtimes, n.lat, chunk$n.int[[c]])) + + for(i in 1:n.leadtimes){ + for(j in 1:n.lat){ + for(k in 1:chunk$n.int[[c]]){ + my.EnsCorr.chunk[i,j,k] <- cor(anom.hindcast.mean.chunk[,i,j,k],anom.rean.chunk[,i,j,k], use="complete.obs") + # option 'na.method' is chosen from the following list: c("all.obs", "complete.obs", "pairwise.complete.obs", "everything", "na.or.complete") + + my.PValue.chunk[i,j,k] <- cor.test(anom.hindcast.mean.chunk[,i,j,k],anom.rean.chunk[,i,j,k], use="complete.obs")$p.value + + } + } + } + + my.EnsCorr[,,chunk$int[[c]]] <- my.EnsCorr.chunk + my.PValue[,,chunk$int[[c]]] <- my.PValue.chunk + + rm(anom.hindcast.chunk,anom.hindcast.mean.chunk,my.EnsCorr.chunk,my.PValue.chunk) + } + + rm(anom.rean.chunk) + gc() + #mem() + + if(mare == TRUE) { + if(any(my.score.name=="FairRpss")) save(my.FairRpss, file=paste0(workdir,'/Data_',var.name,'/FairRpss_',startdate.name,'_chunk_',c,'.RData')) + if(any(my.score.name=="FairCrpss")) save(my.FairCrpss, file=paste0(workdir,'/Data_',var.name,'/FairCrpss_',startdate.name,'_chunk_',c'.RData')) + if(any(my.score.name=="EnsCorr")) save(my.EnsCorr, file=paste0(workdir,'/Data_',var.name,'/EnsCorr_',startdate.name,'_chunk_',c'.RData')) + } + + + } # next c (chunk) + + # the Reliability diagram cannot be computed splitting the data in chunk, so it is computed only if the script is run from the R interface or with no arguments: + if(mare == FALSE) { + if(any(my.score.name=="RelDiagr")){ # in this case the computation cannot be split in groups of grid points, but can be split for leadtime instead: + my.RelDiagr<-list() + + for(lead in 1:n.leadtimes){ + cat(paste0('leadtime n.: ',lead,'/',n.leadtimes)) + anom.rean.chunk <- array(NA,c(n.startdates*n.yrs.hind,n.lat,n.lon)) + anom.hindcast.chunk <- array(NA,c(n.members,n.startdates*n.yrs.hind,n.lat,n.lon)) + cat(' startdate: ') + i=0 + + for(startdate in my.startdates){ + i=i+1 + cat(paste0(i,'/',length(my.startdates),' ')) + + pos.startdate<-which(startdate==my.startdates) # count of the number of stardates already loaded+1 + my.time.interv<-(1+(pos.startdate-1)*n.yrs.hind):(pos.startdate*n.yrs.hind) # time interval where to load data + + # Load reanalysis data of next startdate: + load(paste0(workdir,'/Data_',var.name,'/anom_rean_startdate',startdate,'.RData')) + anom.rean <- drop(anom.rean) + anom.rean.chunk[my.time.interv,,] <- anom.rean[,lead,,] + + # Lead hindcast data of next startdate: + load(paste0(workdir,'/Data_',var.name,'/anom_hindcast_startdate',startdate,'.RData')) # load hindcast data + anom.hindcast <- drop(anom.hindcast) + anom.hindcast.chunk[,my.time.interv,,] <- anom.hindcast[,,lead,,] + + rm(anom.rean,anom.hindcast) + } + + + anom.hindcast.chunk.perm<-aperm(anom.hindcast.chunk,c(2,1,3,4)) # swap the first two dimensions to put the years as first dimension (needed for convert2prob) + gc() + + cat('Computing the Reliability Diagram. Please wait... ') + ens.chunk<-apply(anom.hindcast.chunk.perm, c(3,4), convert2prob, prob<-my.prob) # its dimens. are: [n.startdates*n.yrs.hind*n.forecast.categories, n.lat, n.lon] + obs.chunk<-apply(anom.rean.chunk,c(2,3), convert2prob, prob<-my.prob) # the first n.members*n.yrs.hind elements belong to the first tercile, and so on. + ens.chunk.prob<-apply(ens.chunk, c(2,3), function(x) count2prob(matrix(x, n.startdates*n.yrs.hind, n.categ), type=4)) # type=4 is the only method with sum(prob)=1 !! + obs.chunk.prob<-apply(obs.chunk, c(2,3), function(x) count2prob(matrix(x, n.startdates*n.yrs.hind, n.categ), type=4)) #no parallelization here: it only takes 10s + + if(is.null(int.lat)) int.lat <- 1:n.lat # if there is no region selected, select all the world + if(is.null(int.lon)) int.lon <- 1:n.lon + + int1 <- 1:(n.startdates*n.yrs.hind) # time interval of the data of the first tercile + my.RelDiagr[[lead]]<-ReliabilityDiagram(ens.chunk.prob[int1,int.lat,int.lon], obs.chunk.prob[int1,int.lat,int.lon], nboot=0, plot=FALSE, plot.refin=F) #tercile 1 + + int2 <- (1+(n.startdates*n.yrs.hind)):(2*n.startdates*n.yrs.hind) # time interval of the data of the second tercile + my.RelDiagr[[n.leadtimes+lead]]<-ReliabilityDiagram(ens.chunk.prob[int2,int.lat,int.lon], obs.chunk.prob[int2,int.lat,int.lon], nboot=0, plot=FALSE, plot.refin=F) + + int3 <- (1+(2*n.startdates*n.yrs.hind)):(3*n.startdates*n.yrs.hind) # time interval of the data of the third tercile + my.RelDiagr[[2*n.leadtimes+lead]]<-ReliabilityDiagram(ens.chunk.prob[int3,int.lat,int.lon], obs.chunk.prob[int3,int.lat,int.lon], nboot=0, plot=FALSE, plot.refin=F) + + cat('done\n') + #print(my.RelDiagr) # for debugging + + rm(anom.rean.chunk,anom.hindcast.chunk,ens.chunk,obs.chunk,ens.chunk.prob,obs.chunk.prob) + gc() + + } # next lead + + } # close if on RelDiagr + + # we can save the results for all chunks only if mare == FALSE (if it is TRUE, we have the results for 1 chunk only, that have already been saved + if(!boot){ + if(any(my.score.name=="FairRpss")) save(my.FairRpss, file=paste0(workdir,'/Data_',var.name,'/FairRpss_',startdate.name,'.RData')) + if(any(my.score.name=="FairCrpss")) save(my.FairCrpss, file=paste0(workdir,'/Data_',var.name,'/FairCrpss_',startdate.name,'.RData')) + if(any(my.score.name=="EnsCorr")) save(my.EnsCorr, file=paste0(workdir,'/Data_',var.name,'/EnsCorr_',startdate.name,'.RData')) + if(any(my.score.name=="RelDiagr")) save(my.RelDiagr, my.PValue, file=paste0(workdir,'/Data_',var.name,'/RelDiagr_',startdate.name,'.RData')) + } else { # bootstrapping output: + if(any(my.score.name=="FairRpss")) save(my.FairRpssBoot, my.FairRpssConf, file=paste0(workdir,'/Data_',var.name,'/FairRpssBoot_',startdate.name,'.RData')) + if(any(my.score.name=="FairCrpss")) save(my.FairCrpssBoot, my.FairCrpssConf, file=paste0(workdir,'/Data_',var.name,'/FairCrpssBoot_',startdate.name,'.RData')) + } + + # If it has finished computing the last chunk, it can load all results in one file, deleting the intermediate output files: + if(job.chunk == tot.chunks){ + + } + + + } # close if on mare + +} # next m (month) + +if + + + + + + + + +# all pre-formatting (conversion to anomalies) and post-formatting (visualization) tasks are done below: +if(mare == FALSE){ + +########################################################################################### +# Visualize and save the score maps for one score and period # +########################################################################################### + +# run it only after computing and saving all the skill score data: + +my.months=1 #9:12 # choose the month(s) to plot and save + +for(month in my.months){ + #month=2 # for the debug + startdate.name.map<-my.month[month] # i.e:"January" + + for(my.score.name.map in my.score.name){ + #my.score.name.map='EnsCorr' # for the debug + + if(my.score.name.map=="EnsCorr") { load(file=paste0(workdir,'/Data_',var.name,'/EnsCorr_',startdate.name.map,'.RData')); my.score<-my.EnsCorr } + if(my.score.name.map=='FairRpss') { load(file=paste0(workdir,'/Data_',var.name,'/FairRpss_',startdate.name.map,'.RData')); my.score<-my.FairRpss } + if(my.score.name.map=='FairCrpss') { load(file=paste0(workdir,'/Data_',var.name,'/FairCrpss_',startdate.name.map,'.RData')); my.score<-my.FairCrpss } + if(my.score.name.map=="RelDiagr") { load(file=paste0(workdir,'/Data_',var.name,'/RelDiagr_',startdate.name.map,'.RData')); my.score<-my.RelDiagr } + + # old green-red palette: + # brk.rpss<-c(-1,seq(0,1,by=0.05)) + # col.rpss<-c("darkred",colorRampPalette(c("red","white","darkgreen"))(length(brk.rpss)-2)) + # brk.rps<-c(seq(0,1,by=0.1),10) + # col.rps<-c(colorRampPalette(c("darkgreen","white","red"))(length(brk.rps)-2),"darkred") + # if(my.score.name.map==my.Rps | my.score==my.Rps.clim) {my.brk<-brk.rps} else {my.brk<-brk.rpss} + # if(my.score.name.map==my.Rps | my.score==my.Rps.clim) {my.col<-col.rps} else {my.col<-col.rpss} + + brk.rpss<-c(-10,seq(-1,1,by=0.1)) + col.rpss<-colorRampPalette(col)(length(brk.rpss)-1) + + my.brk<-brk.rpss # at present all breaks and colors are the same, so there is no need to differenciate between indexes + my.col<-col.rpss + + for(lead in 1:n.leadtimes){ + + #my.title<-paste0(my.score.name.map,' of ',cfs.name,' + #10m Wind Speed for ',startdate.name.map,'. Forecast time ',leadtime.week[lead],'.') + + if(my.score.name.map!="RelDiagr"){ + + #postscript(file=paste0(workdir,'/Maps_',var.name,'/',my.score.name.map,'_',startdate.name.map,'_Forecast_time_',leadtime.week[lead],'_',var.name,'.ps'), + # paper="special",width=12,height=7,horizontal=F) + + jpeg(file=paste0(workdir,'/Maps_',var.name,'/',my.score.name.map,'_',startdate.name.map,'_Forecast_time_',leadtime.week[lead],'_',var.name,'.jpg'),width=1000,height=600) + + layout(matrix(c(1,2), 1, 2, byrow = TRUE),widths=c(11,1.2)) + par(oma=c(1,1,4,1)) + + # lat values must be in decreasing order from +90 to -90 degrees and long from 0 to 360 degrees: + PlotEquiMap(my.score[lead,,][n.lat:1,c((n.lonr+1):n.lon,1:n.lonr)], lo,la,sizetit=0.8, brks=my.brk, cols=my.col,axelab=F, filled.continents=FALSE, drawleg=F) + my.title<-paste0(my.score.name.map,' of ',cfs.name,'\n ', var.name.map,' for ',startdate.name.map,'. Forecast time ',leadtime.week[lead],'.') + title(my.title,line=0.5,outer=T) + + ColorBar(my.brk, cols=my.col, vert=T, cex=1.4) + + if(my.score.name.map=="EnsCorr"){ #add a small grey point if the corr.is significant: + + # arrange lat/ lon in my.PValue (a second variable in the EnsCorr file) to start from +90 degr. (lat) and from 0 degr (lon): + my.PValue.rev <- my.PValue + my.PValue.rev[lead,,] <- my.PValue[i,n.lat:1,c((n.lonr+1):n.lon,1:n.lonr)] + for (x in 1:n.lon) { + for (y in 1:n.lat) { + if (my.PValue.rev[lead,y, x] == TRUE) { + text(x = lo[x], y = la[y], ".", cex = .2) + } + } + } + } + dev.off() + + } # close if on !RelDiagr + + if(my.score.name.map=="RelDiagr") { + + jpeg(file=paste0(workdir,'/Maps_',var.name,'/RelDiagr_',startdate.name.map,'_Forecast_time_',leadtime.week[lead],'_',var.name,'.jpg'),width=600,height=600) + + my.title<-paste0('Reliability Diagram of ',cfs.name,'\n ',var.name.map,' for ',startdate.name.map,'. Forecast time ',leadtime.week[lead],'.') + + plot(c(0,1),c(0,1),type="l",xlab="Forecast Frequency",ylab="Observed Frequency", col='gray30', main=my.title) + lines(my.score[[lead]]$p.avgs[!is.na(my.score[[lead]]$p.avgs)],my.score[[lead]]$cond.prob[!is.na(my.score[[lead]]$cond.prob)],type="o", pch=16, col="blue") + #lines(my.score[[n.leadtimes+lead]]$p.avgs[!is.na(my.score[[n.leadtimes+lead]]$p.avgs)],my.score[[n.leadtimes+lead]]$cond.prob[!is.na(my.score[[n.leadtimes+lead]]$cond.prob)],type="o",pch=16,col="darkgreen") + lines(my.score[[2*n.leadtimes+lead]]$p.avgs[!is.na(my.score[[2*n.leadtimes+lead]]$p.avgs)], my.score[[2*n.leadtimes+lead]]$cond.prob[!is.na(my.score[[2*n.leadtimes+lead]]$cond.prob)],type="o", pch=16, col="red") + legend("bottomright", legend=c('Lower Tercile','Upper Tercile'), lty=c(1,1), lwd=c(2.5,2.5), col=c('blue','red')) + + dev.off() + } + + } # next lead + + } # next score + +} # next month + + +############################################################################################### +# Composite map of the four skill scores # +############################################################################################### + +# run it only when you have all the 4 skill scores for each month: + +my.months=9:12 # choose the month(s) to plot and save + +for(month in my.months){ + + startdate.name.map<-my.month[month] # i.e:"January" + + #postscript(file=paste0(workdir,'/Maps_',var.name,'/SkillScores_',startdate.name.map,'_',var.name,'.ps'), paper="special",width=12,height=7,horizontal=F) + + jpeg(file=paste0(workdir,'/Maps_',var.name,'/SkillScores_',startdate.name.map,'_',var.name,'.jpg'), width=4000, height=2400, quality=100) + + layout(matrix(1:16, 4, 4, byrow=FALSE), widths=c(1.8,1.8,1.8,1.2), heights=c(1,1,1,1)) + #layout.show(16) + + for(score in 1:4){ + for(lead in 1:n.leadtimes){ + + img<-readJPEG(paste0(workdir,'/Maps_',var.name,'/',my.score.name[score],'_',startdate.name.map,'_Forecast_time_',leadtime.week[lead],'_',var.name,'.jpg')) + par(mar = rep(0, 4), xaxs='i', yaxs='i' ) + plot.new() + rasterImage(img, 0, 0, 1, 1, interpolate=FALSE) + + } # next lead + + } # next score + + dev.off() + +} # next month + + + +# Dani Map script for the small figure: +#/home/Earth/vtorralb/R/scripts/Veronica_2014/TimeSeries/PlotAno_mod_obs.R + + +############################################################################################### +# Preformatting: calculate weekly climatologies and anomalies for each hindcast startdate # +############################################################################################### + +# Run it only once at the beginning: + +file_path='/home/Earth/ngonzal2/R/ConfFiles/IC3.conf' # Load() file path +#file_path <- '/home/Earth/ncortesi/Downloads/RESILIENCE/IC3.conf' + +my.startdates=1:52 # choose a sequence of startdates to save their anomalies + +for(startdate in my.startdates){ # the startdate week to load inside the sdates weekly sequence: + print(paste0('Computing anomalies for startdate ', which(startdate==my.startdates),'/',length(my.startdates))) + + data.hindcast <- Load(var = var.name, exp = 'EnsEcmwfWeekHind', + obs = NULL, sdates = paste0(yr1.hind:yr2.hind,substr(sdates.seq[startdate],5,6),substr(sdates.seq[startdate],7,8)), + nleadtime = n.leadtimes, leadtimemin = 1, leadtimemax = n.leadtimes, output = 'lonlat', configfile = file_path, method='distance-weighted' ) + + # dim(data.hindcast$mod) # [1,4,20,4,320,640] + + # Mean of the 4 members of the Hindcasts and of the 20 years (for each point and leadtime); + clim.hindcast<-apply(data.hindcast$mod,c(1,4,5,6),mean)[1,,,] # average for each leadtime and pixel + clim.hindcast<-InsertDim(InsertDim(InsertDim(clim.hindcast,1,n.yrs.hind),1,n.members),1,1) # repeat the same values and times to calc. anomalies + + anom.hindcast <- data.hindcast$mod - clim.hindcast + + rm(data.hindcast,clim.hindcast) + gc() + + # average the anomalies over the 4 hindcast members [1,20,4,320,640] + anom.hindcast.mean<-apply(anom.hindcast,c(1,3,4,5,6),mean) + + my.grid<-paste0('r',n.lon,'x',n.lat) + data.rean <- Load(var = var.name, exp = 'ERAintEnsWeek', + obs = NULL, sdates=paste0(yr1.hind:yr2.hind,substr(sdates.seq[startdate],5,6),substr(sdates.seq[startdate],7,8)), + nleadtime = n.leadtimes, leadtimemin = 1, leadtimemax = n.leadtimes, output = 'lonlat', configfile = file_path, grid=my.grid, method='distance-weighted') + + # dim(data.rean$mod) # [1,1,20,4,320,640] + + # Mean of the 20 years (for each point and leadtime) + clim.rean<-apply(data.rean$mod,c(1,2,4,5,6),mean)[1,1,,,] # average for each leadtime and pixel + clim.rean<-InsertDim(InsertDim(InsertDim(clim.rean,1,n.yrs.hind),1,1),1,1) # repeat the same values times to be able to calculate the anomalies #[1,1,20,4,320,640] + + anom.rean <- data.rean$mod - clim.rean + + anom.rean<-InsertDim(anom.rean[1,1,,,,],1,1) # [1,20,4,320,640] + + #save(anom.hindcast.mean,anom.rean,file=paste0(workdir,'/Data/anom_for_ACC_startdate',startdate,'.RData')) + + save(anom.hindcast,file=paste0(workdir,'/Data/anom_hindcast_startdate',startdate,'.RData')) + save(anom.hindcast.mean,file=paste0(workdir,'/Data/anom_hindcast_mean_startdate',startdate,'.RData')) + save(anom.rean,file=paste0(workdir,'/Data/anom_rean_startdate',startdate,'.RData')) + save(clim.rean,file=paste0(workdir,'/Data/clim_rean_startdate',startdate,'.RData')) + + rm(data.rean,anom.hindcast,anom.hindcast.mean,clim.rean,anom.rean) + gc() + +} + +############################################################################################### +# Preformatting: calculate weekly climatologies and anomalies for each forecast startdate # +############################################################################################### +# +# finish!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! +# +# the only difference is that anomalies are computed using climatologies from the hincast data, +# and not from the forecast data. + +# Run it only once at the beginning: + +file_path='/home/Earth/ngonzal2/R/ConfFiles/IC3.conf' # Load() file path +#file_path <- '/home/Earth/ncortesi/Downloads/RESILIENCE/IC3.conf' + +my.startdates=1:52 # choose a sequence of forecast startdates to save their anomalies + +for(startdate in my.startdates){ # the startdate week to load inside the sdates weekly sequence: + print(paste0('Computing anomalies for startdate ', which(startdate==my.startdates),'/',length(my.startdates))) + + data.hindcast <- Load(var=var.name, exp = 'EnsEcmwfWeekHind', + obs = NULL, sdates=paste0(yr1.hind:yr2.hind,substr(sdates.seq[startdate],5,6),substr(sdates.seq[startdate],7,8)), + nleadtime = n.leadtimes, leadtimemin = 1, leadtimemax = n.leadtimes, output = 'lonlat', configfile = file_path, method='distance-weighted' ) + + # dim(data.hindcast$mod) # [1,4,20,4,320,640] + + # Mean of the 4 members of the Hindcasts and of the 20 years (for each point and leadtime); + clim.hindcast<-apply(data.hindcast$mod,c(1,4,5,6),mean)[1,,,] # average for each leadtime and pixel + clim.hindcast<-InsertDim(InsertDim(InsertDim(clim.hindcast,1,n.yrs.hind),1,n.members),1,1) # repeat the same values and times to calc. anomalies + + anom.hindcast <- data.hindcast$mod - clim.hindcast + + rm(data.hindcast,clim.hindcast) + gc() + + # average the anomalies over the 4 hindcast members [1,20,4,320,640] + anom.hindcast.mean<-apply(anom.hindcast,c(1,3,4,5,6),mean) + + my.grid<-paste0('r',n.lon,'x',n.lat) + data.rean <- Load(var=var.name, exp = 'ERAintEnsWeek', + obs = NULL, sdates=paste0(yr1.hind:yr2.hind,substr(sdates.seq[startdate],5,6),substr(sdates.seq[startdate],7,8)), + nleadtime = n.leadtimes, leadtimemin = 1, leadtimemax = n.leadtimes, output = 'lonlat', configfile = file_path, grid=my.grid, method='distance-weighted') + # dim(data.rean$mod) # [1,1,20,4,320,640] + + # Mean of the 20 years (for each point and leadtime) + clim.rean<-apply(data.rean$mod,c(1,2,4,5,6),mean)[1,1,,,] # average for each leadtime and pixel + clim.rean<-InsertDim(InsertDim(InsertDim(clim.rean,1,n.yrs.hind),1,1),1,1) # repeat the same values times to be able to calculate the anomalies #[1,1,20,4,320,640] + + anom.rean <- data.rean$mod - clim.rean + + anom.rean<-InsertDim(anom.rean[1,1,,,,],1,1) # [1,20,4,320,640] + + #save(anom.hindcast.mean,anom.rean,file=paste0(workdir,'/Data/anom_for_ACC_startdate',startdate,'.RData')) + + save(anom.hindcast,file=paste0(workdir,'/Data/anom_hindcast_startdate',startdate,'.RData')) + save(anom.hindcast.mean,file=paste0(workdir,'/Data/anom_hindcast_mean_startdate',startdate,'.RData')) + save(anom.rean,file=paste0(workdir,'/Data/anom_rean_startdate',startdate,'.RData')) + save(clim.rean,file=paste0(workdir,'/Data/clim_rean_startdate',startdate,'.RData')) + + rm(data.rean,anom.hindcast,anom.hindcast.mean,clim.rean,anom.rean) + gc() + +} + + + + + + + +################################################################################## +# Toy Model # +################################################################################## + +library(s2dverification) +library(SpecsVerification) +library(easyVerification) + +my.prob=c(1/3,2/3) # quantiles used for FairRPSS and the RelDiagr (usually they are the terciles) + +N=80 # number of years +M=14 # number of members + +for(M in c(2:10,20,51,100,200)){ + +for(N in c(5,10,15,20,25,30,40,50,100,200,500,1000,2000,5000,10000,20000,50000,100000)){ + +anom.rean<-rnorm(N) +anom.hind<-array(rnorm(N*M),c(N,M)) +#quantile(anom.hind,prob=my.prob,type=8) + +obs<-convert2prob(anom.rean,prob<-my.prob) +#mean(obs[,1]) # check if it is equal to 0.333 for K=3 + +ens<-convert2prob(anom.hind,prob<-my.prob) + +# climatological forecast: +anom.hind.clim<-array(sample(anom.hind,N*M),c(N,M)) # extract a random sample with no replacement of the hindcast +ens.clim<-convert2prob(anom.hind.clim,prob<-my.prob) +#mean(ens.clim[,1]) # check if it is equal to M/3 (1.333 for M=4) + +# alternative definition of climatological forecast, based on observations instead (as applied by veriApply via convert2prob when option fcst.ref is missing): +#anom.rean.clim<-ClimEns(anom.rean) # create a climatological ensemble from a vector of observations (anom.rean) with the leave-one-out cross validation +anom.rean.clim<-t(array(anom.rean,c(N,N))) # function used by convert2prob when ref is not specified; it is ~ equal to the above function ClimEns, it only has a column more +ens.clim.rean<-convert2prob(anom.rean.clim,prob<-c(1/3,2/3)) # create a new prob.table based on the climatological ensemble of observations + +p<-count2prob(ens) # we should add the option type=4, in not the sum for each year is not 100%!!! +y<-count2prob(obs) # we should add the option type=4, in not the sum for each year is not 100%!!! +ReliabilityDiagram(p[,1], y[,1], bins=10, nboot=0, plot=TRUE, cons.prob=c(0.05, 0.85),plot.refin=F) + +### computation of verification scores: + +Rps<-round(mean(EnsRps(ens,obs)),4) +FairRps<-round(mean(FairRps(ens,obs)),4) +#Rps.easy<-round(mean(veriApply("EnsRps", fcst=anom.hind, obs=anom.rean, prob=my.prob, tdim=1, ensdim=2)),4) # check once to see if it is the same value of Rps +#FairRps.easy<-round(mean(veriApply("FairRps", fcst=anom.hind, obs=anom.rean, prob=my.prob, tdim=1, ensdim=2)),4) # check once to see if it is the same value of FairRps + +Rps.clim<-round(mean(EnsRps(ens.clim,obs)),4) +FairRps.clim<-round(mean(FairRps(ens.clim,obs)),4) +FairRps.clim.rean<-round(mean(FairRps(ens.clim.rean,obs)),4) + +#FairRps.clim.stable<-0.44444444 +#Rps.clim.easy<-round(mean(veriApply("EnsRps", fcst=anom.hind.clim, obs=anom.rean, prob=my.prob, tdim=1, ensdim=2)),4) # check once to see if it is the same value of Rps.clim +#FairRps.clim.easy<-round(mean(veriApply("FairRps", fcst=anom.hind.clim, obs=anom.rean, prob=my.prob, tdim=1, ensdim=2)),4) # check once to see if it is = to FairRps.clim + +Rpss<-round(1-(Rps/Rps.clim),4) +#Rpss.specs<-round(EnsRpss(ens=ens, ens.ref=ens.clim, obs=obs)$rpss,4) # check it once to see if it is the same as Rpss (yes) +Rpss.easy<-veriApply("EnsRpss", fcst=anom.hind, fcst.ref=anom.hind.clim, obs=anom.rean, prob=my.prob, tdim=1, ensdim=2)$rpss # we provide the clim.forecast; = to Rpss.specs +system.time(Rpss.easy.noclim<-round(veriApply("EnsRpss", fcst=anom.hind, obs=anom.rean, prob=my.prob, tdim=1, ensdim=2, parallel=FALSE, ncpus=5)$rpss,4)) # using their climatological forecast +#cat(Rps.clim.noclim<-Rps/(1-Rpss.easy.noclim)) # sale igual a ~0.45 per ogni hindcast; é diverso sia da Rps.clim che da Rps.clim per N-> +oo (0.555) + +Rpss.easy<-veriApply("EnsRpss", fcst=anom.hind, fcst.ref=anom.rean.clim, obs=anom.rean, prob=my.prob, tdim=1, ensdim=2)$rpss # we provide the clim.forecast; = to Rpss.specs +Rpss.easy.noclim<-veriApply("EnsRpss", fcst=anom.hind, obs=anom.rean, prob=my.prob, tdim=1, ensdim=2)$rpss # we provide the clim.forecast; = to Rpss.specs + +FairRpss<-round(1-(FairRps/FairRps.clim),4) +#FairRpss.specs<-round(FairRpss(ens=ens,ens.ref=ens.clim,obs=obs)$rpss,4) # check it once to see if it is the same as FairRpss (yes) +#FairRpss.easy<-veriApply("FairRpss", fcst=anom.hind, fcst.ref=anom.hind.clim, obs=anom.rean, prob=c(1/3,2/3), tdim=1, ensdim=2)$rps +#FairRpss.easy.noclim<-round(veriApply("FairRpss", fcst=anom.hind, obs=anom.rean, prob=c(1/3,2/3), tdim=1, ensdim=2)$rpss,4) # using their climatological forecast +#cat(FairRps.clim.noclim<-Rps/(1-FairRpss.easy.noclim)) # sale igual a ~0.55; é diverso sia da FairRps.clim che da FairRps.clim per N-> +oo (0.444) + +cat(paste0('n.memb: ' ,M,' n.yrs: ',N,' : ',FairRps,' : ' ,FairRps.clim,' FairRpss: ',FairRpss,'\n')) + +#cat(paste0('n.memb: ' ,M,' n.yrs: ',N,' : ',FairRps,' : ' ,FairRps.clim,' FairRpss: ',FairRpss,'\n')) + +#cat(paste0('n.memb: ' ,M,' n.yrs: ',N,' : ',Rps,' : ' ,Rps.clim,' Rpss: ',Rpss,'\n','n.memb: ' ,M,' n.yrs: ',N,' : ',FairRps,' : ' ,FairRps.clim,' FairRpss: ',FairRpss,'\n')) + +#cat(paste0('n.memb: ' ,M,' n.yrs: ',N,' : ',Rps,' : ' ,Rps.clim,' Rpss: ',Rpss,' Rpss.easy: ',Rpss.easy.noclim,'\n','n.memb: ' ,M,' n.yrs: ',N,' #: ',FairRps,' : ' ,FairRps.clim,' FairRpss: ',FairRpss,' FairRpss.easy: ',FairRpss.easy.noclim)) + +} + +# Crpss: + +#Crps<-round(mean(EnsCrps(ens,obs)),4) +#FairCrps<-round(mean(FairCrps(ens,obs)),4) + +#cat(Crps.clim<-round(mean(EnsCrps(ens.clim,obs)),4)) +#FairCrps.clim<-round(mean(FairCrps(ens.clim,obs)),4) + +#Crpss<-round(1-(Crps/Crps.clim),4) +#Crpss.specs<-round(EnsCrpss(ens=ens, ens.ref=ens.clim, obs=obs)$rpss,4) + +#FairCrpss<-round(1-(FairCrps/FairCrps.clim),4) +#FairCrpss.specs<-round(FairCrpss(ens=ens,ens.ref=ens.clim,obs=obs)$rpss,4) + +} + + +################################################################################## +# Other analysis # +################################################################################## + +my.startdates=1 #c(1:9) # select the startdates (weeks) you want to merge together + +#tm<-toyarray(dims=c(32,64),N=20,nens=4) # in the number of startdates in case of forecasts or the number of years in case of hindcasts +#str(tm) # tm$fcst: [32,64,20,4] tm$obs: [32,64,20] +# +#f.corr <- veriApply("EnsCorr", fcst=tm$fcst, obs=tm$obs) +#f.me <- veriApply('EnsMe', tm$fcst, tm$obs) +#f.crps <- veriApply("FairCrps", fcst=tm$fcst, obs=tm$obs) +#str(f.crps) # [32,64,20] + +#cst <- array(rnorm(prod(4:8)), 4:8) +#obs <- array(rnorm(prod(4:7)), 4:7) +#f.me <- veriApply('EnsMe', fcst=fcst, obs=obs) +#dim(f.me) +#fcst2 <- array(aperm(fcst, c(5,1,4,2,3)), c(8, 4, 7, 5*6)) # very interesting use of aperm not only to swap dimensions, but also to merge two dimensions in the same one! +#obs2 <- array(aperm(obs, c(1,4,2,3)), c(4,7,5*6)) +#f.me2 <- veriApply('EnsMe', fcst=fcst2, obs=obs2, tdim=3, ensdim=1) +#dim(f.me2) + +# when you have for each leadtime all the 52 weeks of year 2014 stored in a map, +# you can plot the time serie for a particular point and leadtime: +pos.lat<-which(round(la,3)==round(my.lat,3)) +pos.lon<-which(round(lo,3)==round(my.lon,3)) + +x11() +plot(1:week,EnMeanCorr[,leadtime],type="b", + main=paste0("Weekly time serie of point with lat=",round(my.lat,2),", lon=",round(my.lon,2)), + xlab="week",ylab="Ensemble Mean Corr.",ylim=c(-1,1)) + + + +### check if 4 time steps are better than one: + +yrs.hind<-yr2.hind-yr1.hind+1 +my.point.obs<-array(NA,c(yrs.hind,28)) + +# load obs.data for the four time steps: +for(year in yr1.hind:yr2.hind){ + data_erai_6h <- Load(var=var.name, exp = 'ERAint6h', + obs = NULL, sdates=paste0(year,'01'), leadtimemin = 61, + leadtimemax = 88, output = 'lonlat', configfile = file_path, grid=my.grid,method='distance-weighted') + + my.point.obs[year-yr1.hind+1,]<-data_erai_6h$mod[1,1,1,,65,633] +} + +utm0<-c(1,5,9,13,17,21,25) +utm6<-utm0+1 +utm12<-utm0+2 +utm18<-utm0+3 +my.points.obs.weekly.mean.utm0<-rowMeans(my.point.obs[,utm0]) +my.points.obs.weekly.mean.utm6<-rowMeans(my.point.obs[,utm6]) +my.points.obs.weekly.mean.utm12<-rowMeans(my.point.obs[,utm12]) +my.points.obs.weekly.mean.utm18<-rowMeans(my.point.obs[,utm18]) +my.points.obs.weekly.mean.all<-rowMeans(my.point.obs) + +cor(my.points.exp.weekly.mean.all,my.points.obs.weekly.mean.all,use="complete.obs") + +my.points.obs.weekly.mean.utm<-c(my.points.obs.weekly.mean.utm0,my.points.obs.weekly.mean.utm6,my.points.obs.weekly.mean.utm12,my.points.obs.weekly.mean.utm18) +my.points.exp.weekly.mean.utm<-c(my.points.exp.weekly.mean.utm0,my.points.exp.weekly.mean.utm6,my.points.exp.weekly.mean.utm12,my.points.exp.weekly.mean.utm18) +cor(my.points.exp.weekly.mean.utm,my.points.obs.weekly.mean.utm,use="complete.obs") + + +##### Calculate Reanalysis climatology loading only 1 file each 4 (it's quick but it needs to have all files beforehand): + +ss<-c(seq(1,52,by=4),50,51,52) # take only 1 startdate each 4, more the last 3 startdates that must be computed individually because they are not a complete set of 4 + +for(startdate in ss){ # the startdate day of 2014 to load in the sdates weekly sequence along with the same startdates of the previous 20 years + data.ERAI <- Load(var=var.name, exp = 'ERAintEnsWeek', + obs = NULL, sdates=paste0(yr1.hind:yr2.hind,substr(sdates.seq[startdate],5,6),substr(sdates.seq[startdate],7,8)), + nleadtime = 4, leadtimemin = 1, leadtimemax = 4, output = 'lonlat', configfile = file_path, grid=my.grid, method='distance-weighted') + + clim.ERAI[startdate,,,]<-apply(data.ERAI$mod,c(1,2,4,5,6),mean)[1,1,,,] # average for each leadtime and pixel + + # the intermediate startdate between startdate week i and startdate week i+4 are repeated: + if(startdate <= 49){ + clim.ERAI[startdate+1,1,,]<- clim.ERAI[startdate,2,,] # startdate leadtime (week) + clim.ERAI[startdate+1,2,,]<- clim.ERAI[startdate,3,,] # (week) 1 2 3 4 + clim.ERAI[startdate+2,1,,]<- clim.ERAI[startdate,3,,] # 1 a b c d <- only startdate 1 and 5 are necessary to calculate all startdates between 1 and 5 + clim.ERAI[startdate+1,3,,]<- clim.ERAI[startdate,4,,] # 2 b c d e + clim.ERAI[startdate+2,2,,]<- clim.ERAI[startdate,4,,] # 3 c d e f + clim.ERAI[startdate+3,1,,]<- clim.ERAI[startdate,4,,] # 4 d e f g + } # 5 e f g h + + if(startdate > 1 && startdate <=49){ # fill the previous startdates: + clim.ERAI[startdate-1,4,,]<- clim.ERAI[startdate,3,,] + clim.ERAI[startdate-1,3,,]<- clim.ERAI[startdate,2,,] + clim.ERAI[startdate-2,4,,]<- clim.ERAI[startdate,2,,] + clim.ERAI[startdate-1,2,,]<- clim.ERAI[startdate,1,,] + clim.ERAI[startdate-2,3,,]<- clim.ERAI[startdate,1,,] + clim.ERAI[startdate-3,4,,]<- clim.ERAI[startdate,1,,] + } + +} + + + + + # You can make cor much faster by stripping away all the error checking code and calling the internal c function directly: + # r <- apply(m1, 1, function(x) { apply(m2, 1, function(y) { cor(x,y) })}) + # r2 <- apply(m1, 1, function(x) { apply(m2, 1, function(y) {.Internal(cor(x, y, 4L, FALSE)) })}) + C_cor<-get("C_cor", asNamespace("stats")) + test<-.Call(C_cor, x=rnorm(100,1), y=rnorm(100,1), na.method=2, FALSE) + my.EnsCorr.chunk[i,j,k]<-.Call(C_cor, x=anom.hindcast.mean.chunk[,i,j,k], y=nom.rean.chunk[,i,j,k], na.method=2, FALSE) + + + +####################################################################################### +# Raster Animations # +####################################################################################### + +library(png) +library(animation) + + +clustPNG <- function(pngobj, nclust = 3){ + + j <- pngobj + # If there is an alpha component remove it... + # might be better to just adjust the mat matrix + if(dim(j)[3] > 3){ + j <- j[,,1:3] + } + k <- apply(j, c(1,2), c) + mat <- matrix(unlist(k), ncol = 3, byrow = TRUE) + grd <- expand.grid(1:dim(j)[1], 1:dim(j)[2]) + clust <- kmeans(mat, nclust, iter.max = 20) + new <- clust$centers[clust$cluster,] + + for(i in 1:3){ + tmp <- matrix(new[,i], nrow = dim(j)[1]) + j[,,i] <- tmp + } + + plot.new() + rasterImage(j, 0, 0, 1, 1, interpolate = FALSE) +} + +makeAni <- function(file, n = 10){ + j <- readPNG(file) + for(i in 1:n){ + clustPNG(j, i) + mtext(paste("Number of clusters:", i)) + } + + j <- readPNG(file) + plot.new() + rasterImage(j, 0, 0, 1, 1, interpolate = FALSE) + mtext("True Picture") +} + +file <- "~/Dropbox/Photos/ClassyDogCropPng.png" +n <- 20 +saveGIF(makeAni(file, n), movie.name = "tmp.gif", interval = c(rep(1,n), 5)) + + + + +####################################################################################### +# ProgressBar # +####################################################################################### + +for(i in 1:10) { + Sys.sleep(0.2) + # Dirk says using cat() like this is naughty ;-) + #cat(i,"\r") + # So you can use message() like this, thanks to Sharpie's + # comment to use appendLF=FALSE. + message(i,"\r",appendLF=FALSE) + flush.console() +} + + +for(i in 1:10) { + Sys.sleep(0.2) + # Dirk says using cat() like this is naughty ;-) + cat(i,"\r") + + +} + + + +####################################################################################### +# Load large datasets with ff # +####################################################################################### + +library(ff) +my.scratch<-"/scratch/Earth/ncortesi/myBigData" +anom.hind<-as.ff(anom.hind, vmode="double", file=my.scratch) +ffsave(anom.hind,file=my.scratch) +close(anom.hind);rm(anom.hind) + +ffload(file=my.scratch, list=c(anom.hind)) +open(anom.hind) +my.SkillScore <- veriApplyBig("FairRpss", fcst=anom.hind, obs=anom.rean, tdim=2, ensdim=1, prob=c(1/3,1/3)) +close(anom.hind);rm(anom.hind) + + + +BigDataPath <- "/scratch/Earth/ncortesi/myBigData" +source('/home/Earth/ncortesi/Downloads/RESILIENCE/veriApplyBig.R') + +anom.hind.dim<-c(5,3,1,256,512) +anom.hind<-array(rnorm(prod(anom.hind.dim)),anom.hind.dim) # doesn't fit in memory +save.big(array=anom.hind, path=BigDataPath) + +veriApplyBig <- function(, obsmy.scratch){ + ffload(file=my.scratch) + open(anom.hind) + my.SkillScore <- veriApplyBig("FairRpss", fcst=anom.hind, obs=anom.rean, tdim=2, ensdim=1, prob=c(1/3,1/3)) + close(anom.hind) +} + + +anom.hind<-as.ff(anom.hind, vmode="double", file=my.scratch) +ffsave(anom.hind, file=my.scratch) + +my.scratch<-"/scratch/Earth/ncortesi/myBigData" + +anom.hind<-as.ff(anom.hind, vmode="double", file=my.scratch) +ffsave(anom.hind, file=my.scratch) +close(anom.hind); rm(anom.hind) + +ffload(file=my.scratch, list=c(anom.hind)) +open(anom.hind) +my.SkillScore <- veriApplyBig("FairRpss", fcst=anom.hind, obs=anom.rean, tdim=2, ensdim=1, prob=c(1/3,1/3)) +close(anom.hind); rm(anom.hind) + +anom.hind.dim<-c(51,30,1,256,51) +anom.hind<-ff(rnorm(prod(anom.hind.dim)), dim=anom.hind.dim, vmode="double", filename="/scratch/Earth/ncortesi/anomhind") +str(anom.hind) +dim(anom.hind) + +anom.hind.dim<-c(51,30,1,256,51) +anom.hind<-array(rnorm(prod(anom.hind.dim)),anom.hind.dim) # doesn't fit in memory +anom.hind<-as.ff(anom.hind, vmode="double", file="/scratch/Earth/ncortesi/myBigData") +str(anom.hind) +dim(anom.hind) + +ffsave(anom.hind,file="/scratch/Earth/ncortesi/anomhind") +#ffinfo(file="/scratch/Earth/ncortesi/anomhind") +close(anom.hind) +#delete(anom.hind) +#rm(anom.hind) + +ffload(file="/scratch/Earth/ncortesi/anomhind") + +anom.rean<-array(rnorm(prod(anom.hind.dim[-1])), anom.hind.dim[-1]) # doesn't fit in memory + +open(anom.hind) +my.SkillScore <- veriApplyBig("FairRpss", fcst=anom.hind, obs=anom.rean, tdim=2, ensdim=1, prob=c(1/3,1/3)) +fcst<-anom.hind +obs<-anom.rean + +close(anom.hind) +rm(anom.hind) + +#ffdrop(file="/scratch/Earth/ncortesi/anomhind") + +a<-ffapply(X=anom.hind.big, MARGIN=c(1,3,4,5), AFUN=mean, RETURN=TRUE) +a<-ffapply(X=anom.hind.big, MARGIN=c(1,3,4,5), AFUN=function(x) sample(x, replace=TRUE), CFUN="list", RETURN=TRUE) + + +pred_Cal_cross_ff <- as.ff(pred_Cal_cross, vmode='double', file=fileoutput_cross) +ffsave(pred_Cal_cross_ff, file=paste0(fileoutput_cross)) +delete(pred_Cal_cross_ff) + +assign("pred_Cal_cross_ff", as.ff(pred_Cal_cross, vmode='double', file=fileoutput_cross)) +ffsave(pred_Cal_cross_ff, file=paste0(fileoutput_cross)) +delete(pred_Cal_cross_ff) +rm(pred_Cal_cross_ff) + +assign("pred_Cal_cross_ff", as.ff(pred_Cal_cross, vmode='double', file=fileoutput_cross)) +a<-get("pred_Cal_cross_ff") # get() works well in this case but not in the row below! (you must use do.call, see WT_drivers_v2.R) +ffsave(get("pred_Cal_cross_ff"), file=paste0(fileoutput_cross)) +delete(pred_Cal_cross_ff) +rm(pred_Cal_cross_ff) + + + +######################################################################################### +# Calculate and save the FairRpss and/or the FairCrpss for a chosen period using ff # +######################################################################################### + +bigdata=FALSE # if TRUE, compute the Rpss and the Crpss using the package ff (storing arrays in the disk instead than in the RAM) to remove the memory limit + +for(month in veri.month){ + month=1 # for debug + my.startdates<-startdates.monthly[[month]] #c(1:5) # select the startdates (weeks) you want to compute + startdate.name=my.month[month] # name given to the period of the selected startdates, i.e:"January" + n.yrs <- length(my.startdates)*n.yrs.hind # number of total years for the selected month + print(paste0("Computing Skill Scores for ",startdate.name)) + + if(!boot) anom.rean.big<-array(NA, dim=c(n.yrs, n.leadtimes, n.lat, n.lon)) # reanalysis data is not converted to ff because it is a small array + + if(boot) mod<-1 else mod=0 # in case of the boostrap, anom.hind.big includes also the reanalysis data as additional last member + + if(bigdata) anom.hind.big <- ff(NA, dim=c(n.members + mod, n.yrs, n.leadtimes, n.lat, n.lon), vmode="double", filename=paste0(workdir,"/anomhindbig.ff")) + if(!bigdata) anom.hind.big <- array(NA, dim=c(n.members + mod, n.yrs, n.leadtimes, n.lat, n.lon)) + + for(startdate in my.startdates){ + + pos.startdate<-which(startdate==my.startdates) # count of the number of stardates already loaded +1 + my.time.interv<-(1+(pos.startdate-1)*n.yrs.hind):(pos.startdate*n.yrs.hind) # time interval where to load data + + load(paste0(workdir,'/Data_',var.name,'/anom_rean_startdate',startdate,'.RData')) + if(!boot) { + anom.rean<-drop(anom.rean) + anom.rean.big[my.time.interv,,,] <- anom.rean + } + + if(any(my.score.name=="FairRpss") || any(my.score.name=="FairCrpss")){ + load(paste0(workdir,'/Data_',var.name,'/anom_hindcast_startdate',startdate,'.RData')) # load hindcast data + anom.hindcast<-drop(anom.hindcast) + + if(!boot) anom.hind.big[,my.time.interv,,,] <- anom.hindcast + if(boot) anom.hind.big[,my.time.interv,,,] <- abind(anom.hindcast, anom.rean, along=1) + + rm(anom.hindcast, anom.rean) + } + gc() + } + + if(any(my.score.name=="FairRpss" || my.score.name=="FairCrpss")){ + if(!boot) { + if(any(my.score.name=="FairRpss")) my.FairRpss <- veriApplyBig("FairRpss", fcst=anom.hind.big, obs=anom.rean.big, + prob=my.prob, tdim=2, ensdim=1, parallel=TRUE, ncpus=n.cpus) + if(any(my.score.name=="FairCrpss")) my.FairCrpss <- veriApplyBig("FairCrpss", fcst=anom.hind.big, obs=anom.rean.big, tdim=2, ensdim=1, parallel=TRUE, ncpus=n.cpus) + + } else { + # bootstrapping: + my.FairRpssBoot<-my.FairCrpssBoot<-array(NA, c(n.boot, n.leadtimes, n.lat, n.lon)) + if(!bigdata) save(anom.hind.big, file=paste0(workdir,"/anom_hind_big.RData")) # save the anomalies to free memory + + for(b in 1:n.boot){ + print(paste0("resampling n. ",b,"/",n.boot)) + yrs.sampled <- sample(1:n.yrs, replace=TRUE) + + if(bigdata) anom.hind.big.sampled <- ff(NA, dim=c(n.members+1, n.yrs, n.leadtimes, n.lat, n.lon), vmode="double", filename=paste0(workdir,"/anomhindbigsampled")) + if(!bigdata) anom.hind.big.sampled <- array(NA, dim=c(n.members+1, n.yrs, n.leadtimes, n.lat, n.lon)) + + if(!bigdata && b>1) load(paste0(workdir,"/anom_hind_big.RData")) # need to load the hindcasts if b>1 + + for(y in 1:n.yrs) anom.hind.big.sampled[,y,,,] <- anom.hind.big[,yrs.sampled[y],,,] # with ff takes 53s x 100 ~1h!!! (without, only 4s x 100 ~6m) + + if(!bigdata) rm(anom.hind.big); gc() # free memory for the following commands + + # faster way that will be able to replace the above one when ffapply output parameters wll be improved: + #a<-ffapply(X=anom.hind.big, MARGIN=c(3,4,5), AFUN=function(x) my.sample(x,n.members+1, replace=TRUE), CFUN="list", RETURN=TRUE, FF_RETURN=TRUE) # takes 3m only + #aa<-abind(a[1:10], along=4) # it doesn't fit into memory! + + if(any(my.score.name=="FairRpss")) my.FairRpssBoot[b,,,] <- veriApplyBig("FairRpss", fcst=anom.hind.big.sampled[1:n.members,,,,], + obs=anom.hind.big.sampled[n.members+1,,,,], + prob=my.prob, tdim=2, ensdim=1, parallel=TRUE, ncpus=n.cpus) #$rpss + + if(any(my.score.name=="FairCrpss")) my.FairCrpssBoot[b,,,] <- veriApplyBig("FairCrpss", fcst=anom.hind.big.sampled[1:n.members,,,,], + obs=anom.hind.big.sampled[n.members+1,,,,], + tdim=2, ensdim=1, parallel=TRUE, ncpus=n.cpus) #$crpss + + if(bigdata) delete(anom.hind.big.sampled) + rm(anom.hind.big.sampled) # delete the sampled hindcast + + } + + # calculate 5 and 95 percentiles of skill scores: + if(any(my.score.name=="FairRpss")) my.FairRpssConf <- apply(my.FairRpssBoot, c(2,3,4), function(x) quantile(x, probs=c(0.05,0.95), type=8)) + if(any(my.score.name=="FairCrpss")) my.FairCrpssConf <- quantile(my.FairCrpssBoot, probs=c(0.05,0.95), type=8) + + } # close if on boot + + } + + if(bigdata) delete(anom.hind.big) # delete the file with the hindcasts + rm(anom.hind.big) + rm(anom.rean.big) + gc() + + if(!boot){ + if(any(my.score.name=="FairRpss")) save(my.FairRpss, file=paste0(workdir,'/Data_',var.name,'/FairRpss_',startdate.name,'.RData')) + if(any(my.score.name=="FairCrpss")) save(my.FairCrpss, file=paste0(workdir,'/Data_',var.name,'/FairCrpss_',startdate.name,'.RData')) + } else { + if(any(my.score.name=="FairRpss")) save(my.FairRpssBoot, my.FairRpssConf, file=paste0(workdir,'/Data_',var.name,'/FairRpssBoot_',startdate.name,'.RData')) + if(any(my.score.name=="FairCrpss")) save(my.FairCrpssBoot, my.FairCrpssConf, file=paste0(workdir,'/Data_',var.name,'/FairCrpssBoot_',startdate.name,'.RData')) + } + + + if(is.object(my.FairRpss)) rm(my.FairRpss); if(is.object(my.FairCrpss)) rm(my.FairCrpss) + if(is.object(my.FairRpssBoot)) rm(my.FairRpssBoot); if(is.object(my.FairCrpssBoot)) rm(my.FairCrpssBoot) + if(is.object(my.FairRpssConf)) rm(my.FairRpssConf); if(is.object(my.FairCrpssConf)) rm(my.FairCrpssConf) + +} # next m (month) + + +######################################################################################### +# Calculate and save the FairRpss for a chosen period using ff and Load() # +######################################################################################### + + anom.rean.big<-array(NA, dim=c(length(my.startdates)*n.yrs.hind, n.leadtimes, n.lat, n.lon)) + anom.hind.big <- ff(NA, dim=c(n.members,length(my.startdates)*n.yrs.hind, n.leadtimes, n.lat, n.lon), vmode="double", filename="/scratch/Earth/anomhindbig") + + for(startdate in my.startdates){ + pos.startdate<-which(startdate==my.startdates) # count of the number of stardates already loaded+1 + my.time.interv<-(1+(pos.startdate-1)*n.yrs.hind):(pos.startdate*n.yrs.hind) # time interval where to load data + + #load(paste0(workdir,'/Data_',var.name,'/anom_rean_startdate',startdate,'.RData')) + #anom.rean<-drop(anom.rean) + #anom.rean.big[my.time.interv,,,] <- anom.rean + + my.date <- paste0(yr1.hind:yr2.hind,substr(sdates.seq[startdate],5,6),substr(sdates.seq[startdate],7,8)) + anom.rean <- Load(var=var.name, exp = 'EnsEcmwfWeekHind', + obs = NULL, sdates=my.date, nleadtime = n.leadtimes, leadtimemin = 1, leadtimemax = n.leadtimes, + output = 'lonlat', configfile = file_path, method='distance-weighted' ) + + #load(paste0(workdir,'/Data_',var.name,'/anom_hindcast_startdate',startdate,'.RData')) # load hindcast data + #anom.hindcast<-drop(anom.hindcast) + + anom.hind.big[,my.time.interv,,,] <- anom.hindcast + rm(anom.hindcast) + } + + my.FairRpss <- veriApplyBig("FairRpss", fcst=anom.hind.big, obs=anom.rean.big, prob=my.prob, tdim=2, ensdim=1, parallel=TRUE, ncpus=n.cpus) + + save(my.FairRpss, file=paste0(workdir,'/Data_',var.name,'/FairRpss_',startdate.name,'.RData')) + + +######################################################################################### +# Alternative way to speed up Load() # +######################################################################################### + +ERAint <- list(path = '/esnas/reconstructions/ecmwf/eraint/$STORE_FREQ$_mean/$VAR_NAME$_f6h/$VAR_NAME$_$YEAR$$MONTH$.nc') + var.rean=ERAint # daily reanalysis dataset used for the var data; it can have a different resolution of the MSLP dataset + var.name='sfcWind' #'tas' #'prlr' # any daily variable we want to find if it is WTs-driven + var <- Load(var = var.name, exp = NULL, obs = list(var.rean), paste0(y,'0101'), storefreq = 'daily', leadtimemax = 1, output = 'lonlat') + + # Forecast system list: + S4 <- list(path = ...) + CFSv2 <- list(path = ...) + + # Reanalysis list: + ERAint <- list(path = '/esnas/reconstructions/ecmwf/eraint/$STORE_FREQ$_mean/$VAR_NAME$_f6h/$VAR_NAME$_$YEAR$$MONTH$.nc') + JRA55 <- list(path = ...) + + + + + # Select one forecast system and one reanalysis (put NULL if you don't need to load any experiment/observation): + exp.source <- NULL + obs.source <- list(path = '/esnas/reconstructions/ecmwf/eraint/$STORE_FREQ$_mean/$VAR_NAME$_f6h/$VAR_NAME$_$YEAR$$MONTH$.nc') + + # Select one variable and its frequency: + var.name <- 'sfcWind' + store.freq <- 'daily' + + # Extract only the directory path of the forecast and reanalysis: + obs.source2 <- gsub("\\$STORE_FREQ\\$", store.freq, obs.source$path) + obs.source3 <- gsub("\\$VAR_NAME\\$", var.name, obs.source2) + obs.source4 <- strsplit(obs.source3,'/') + obs.source5 <- paste0(obs.source4[[1]][-length(obs.source4[[1]])], collapse="/") + + obs.source6 <- list.files(path = obs.source5) + + system(paste0("ncks -O -h -d longitude,5,6 ", '/esnas/reconstructions/ecmwf/eraint/',STORE_FREQ,'_mean/',VAR_NAME,'_f6h')) + system("ncks -O -h -d longitude,5,6 sfcWind_201405.nc ~/test.nc") + + y <- 1990 + var <- Load(var = VAR_NAME, exp = list(exp.source), obs = list(obs.source), sdates = paste0(y,'0101'), storefreq = store.freq, leadtimemax = 1, output = 'lonlat') + + +} # close if on mare + + diff --git a/old/SkillScores_v6.R~ b/old/SkillScores_v6.R~ new file mode 100644 index 0000000000000000000000000000000000000000..32979fdbc52d180c69c60b14db937cf7cbe6da5e --- /dev/null +++ b/old/SkillScores_v6.R~ @@ -0,0 +1,1299 @@ +######################################################################################### +# Sub-seasonal Skill Scores # +######################################################################################### +# you can run it from the terminal with the syntax: +# +# Rscript SkillScores_v4.R +# +# i.e: to split the data in 8 chunks and run only the chunk number 3, write: +# +# Rscript SkillScores_v4.R 8 3 +# +# if you don't specify any argument, or you run it from the R interface, it runs all the chunks in a sequential way. +# Use this last approach if the computational speed is not a problem. +# + +########################################################################################## +# MareNostrum settings # +########################################################################################## + +args <- commandArgs(TRUE) # put into variable args the list with all the optional arguments with whom the script was run from the terminal + +if(length(args) == 0) print("Running the script in a sequential way") +if(length(args) > 1) stop("Only one argument is required") + +mare <- ifelse(length(args) == 1 ,TRUE,FALSE) # set variable mare to TRUE if we are running the script in MareNostrum + +if(mare) chunk <- as.integer(args[1]) # number of the chunk to run in this script + +########################################################################################## +# Load functions and libraries # +########################################################################################## + +#load libraries and functions: +library(s2dverification) +library(SpecsVerification) +library(easyVerification) +library(abind) +#library(ff) +#library(ffbase) +# Load function split.array: +if(!mare) {source('/scratch/Earth/ncortesi/RESILIENCE/Rfunctions.R')} else {source('/gpfs/projects/bsc32/bsc32842/scripts/Rfunctions.R')} + +########################################################################################## +# User's settings # +########################################################################################## + +# working dir where to put the output maps and files in the /Data and /Maps subdirs: +workdir <- ifelse(!mare, "/scratch/Earth/ncortesi/RESILIENCE", "/gpfs/projects/bsc32/bsc32842/results") + +cfs.name <- 'ECMWF' # name of the climate forecast system (CFS) used for monthly predictions (to be used in map titles) +var.name <- 'sfcWind' # forecast variable. It is the name used inside the netCDF files and as suffix in map filenames, i.e: 'sfcWind' or 'psl' +var.name.map <- '10m Wind Speed' # forecast variable to verify (name used in the map titles), i.e: '10m Wind Speed' or 'SLP' + +forecast.year <- 2014 # starting year of the weekly sequence of the forecasts +mes <- 1 # starting forecast month (usually january) +day <- 2 # starting forecast day + +# generic path of the forecast system files: +ECMWF_monthly <- list(path = paste0('/esnas/exp/ECMWF/monthly/ensforhc/weekly_mean/$VAR_NAME$_f6h/',forecast.year,'$MONTH$$DAY$00/$VAR_NAME$_$START_DATE$00.nc')) + +yr1.hind <- 1994 #1994 # first hindcast year +yr2.hind <- 2013 #2013 # last hindcast year (usually the forecast year -1) + +leadtime.week <- c('5-11','12-18','19-25','26-32') # which leadtimes are available in the weekly dataset +n.members <- 4 # number of hindcast members + +my.score.name <- c('FairRpss','FairCrpss') #c('EnsCorr','FairRpss','FairCrpss','RelDiagr') # choose one or more skills scores to compute between EnsCorr, FairRPSS, FairCRPSS and/or RelDiagr + +veri.month <- 1 #1:12 # select the month(s) you want to compute the chosen skill scores + +my.pvalue <- 0.05 # in case of computing the EnsCorr, set the p_value level to show in the ensemble mean correlation maps +my.prob <- c(1/3,2/3) # quantiles used for FairRPSS and the RelDiagr (usually they are the terciles) +conf.level <- c(0.05, 0.95)# percentiles employed for the calculation of the confidence level for the Rpss and Crpss (can be more than 2 if needed) +int.lat <- NULL #1:160 # latitude position of grid points selected for the Reliability Diagram (if you want to subset a region; if not, put NULL) +int.lon <- NULL #1:320 # longitude position of grid points selected for the Reliability Diagram (if you want to subset a region; if not, put NULL) + +boot <- TRUE # in case of computing the FairRpss and/or the FairCrpss, you can choose to do a bootstrap to evaluate the uncertainty of the skill scores +n.boot <- 20 # number of resamples considered in the bootstrapping + +# coordinates of a chosen point in the world to plot the time series over the 52 maps (they must be the same values in objects la y lo) +my.lat <- 55.3197120 +my.lon <- 0.5625 + +###################################### Derived variables ############################################################################# + +#source(paste0(workdir,'/ColorBarV.R')) # Color scale used for maps +n.categ <- 1+length(my.prob) # number of forecasting categories (usually 3 if terciles are used) + +# blue-yellow-red colors: +col <- ifelse(!mare) {as.character(read.csv("/scratch/Earth/ncortesi/RESILIENCE/rgbhex.csv",header=F)[,1]), as.character(read.csv("/gpfs/projects/bsc32//bsc32842/scripts/rgbhex.csv",header=F)[,1]) +#col<-as.character(read.csv("/home/Earth/vtorralb/rgbhex.csv",header=F)[,1]) # blue-yellow-red colors + +sdates.seq <- weekly.seq(yr1,mes,day) # load the sequence of dates corresponding to all the thursday of the year +n.leadtimes <- length(leadtime.week) +n.yrs.hind <- yr2.hind-yr1.hind+1 +my.month.short <- substr(my.month,1,3) + +# Monthly Startdates for 2014 reforecasts: (in future you can modify it to work for a generic year) +startdates.monthly<-list() +startdates.monthly[[1]]<-1:5 +startdates.monthly[[2]]<-6:9 +startdates.monthly[[3]]<-10:13 +startdates.monthly[[4]]<-14:17 +startdates.monthly[[5]]<-18:22 +startdates.monthly[[6]]<-23:26 +startdates.monthly[[7]]<-27:31 +startdates.monthly[[8]]<-32:35 +startdates.monthly[[9]]<-36:39 +startdates.monthly[[10]]<-40:44 +startdates.monthly[[11]]<-45:48 +startdates.monthly[[12]]<-49:52 + +## extract geographic coordinates; do only ONCE for each preducton system and then save the lat/lon in a coordinates.RData and comment these rows to use function load() below: +#data.hindcast <- Load(var='sfcWind', exp = 'EnsEcmwfWeekHind', +# obs = NULL, sdates=paste0(yr1.hind:yr2.hind,substr(sdates.seq[startdate],5,6),substr(sdates.seq[startdate],7,8)), +# nleadtime = 1, leadtimemin = 1, leadtimemax = 1, output = 'lonlat', configfile = file_path, method='distance-weighted' ) + +#lats<-data.hindcast$lat +#lons<-data.hindcast$lon +#save(lats,lons,file=paste0(workdir,'/coordinates.RData')) +#rm(data.hindcast);gc() + +# format geographic coordinates to use with PlotEquiMap: +load(paste0(workdir,'/coordinates.RData')) +n.lon <- length(lons) +n.lat <- length(lats) +n.lonr <- n.lon - ceiling(length(lons[lons<180 & lons > 0])) +#la<-rev(lats) +#lo<-c(lons[c((n.lonr+1):n.lon)]-360,lons[1:n.lonr]) + +if(mare) n.lat == 1 + +######################################################################################### +# Calculate and save up to all the chosen Skill Scores for a selected period # +######################################################################################### +# +# Remember that before computing the skill scores, you have to create and save the anomalies running once the preformatting part at the end of this script. +# + +for(month in veri.month){ + #month=1 # for the debug + my.startdates <- startdates.monthly[[month]] #c(1:5) # select the startdates (weeks) you want to compute + startdate.name <- my.month[month] # name given to the period of the selected startdates # i.e:"January" + n.startdates <- length(my.startdates) # number of startdates in the selected month + n.yrs <- length(my.startdates)*n.yrs.hind # number of total years for the selected month + + print(paste0("Computing Skill Scores for ",startdate.name)) + + # Merge all selected startdates: + hind.dim <- c(n.members, n.yrs.hind*n.startdates, n.leadtimes, n.lat, n.lon) # dimensions of the hindcast array + + # If mare == FALSE, split the array in our wokstation considering the optimal size of a chunk; if mare == TRUE, split the array on MN for lat with chunk size = 1 value + if(mare == FALSE) chunk <- split.array(dimensions = hind.dim, along = 5) + + my.FairRpss <- my.FairCrpss <- my.EnsCorr <- my.PValue <- array(NA,c(n.leadtimes,n.lat,n.lon)) + if(boot) my.FairRpssBoot <- my.FairCrpssBoot <- array(NA, c(n.boot, n.leadtimes, n.lat, n.lon)) + if(boot) my.FairRpssConf <- my.FairCrpssConf <- array(NA, c(length(conf.level), n.leadtimes, n.lat, n.lon)) + + cat('Chunk n. ') + + # if we run the script from the terminal with an argument, it only computes the chunk we specify in the second argument and save the results for that chunk. + # If not, it loops over all chunks in a serial way and save the results (already united so chunks disappear) at the end. + if(mare == FALSE) {my.chunks <- 1:chunk$n.chunk} else {my.chunks <- chunk} + + for(c in my.chunks){ # EnsCorr, FairRpss and FairCrpss calculation: + #s=1 # for the debug + cat(paste0(c,'/', my.chunk,' ')) + + #if(s == chunk$n.chunk){ add.last <- chunk$chunk.size.last } else { add.last <- 0 } # because the last chunk is longer than the others, if chunk.size.last>0 + + anom.hindcast.chunk <- anom.hindcast.chunk.sampled <- array(NA, c(n.members, n.startdates*n.yrs.hind, n.leadtimes, n.lat, chunk$n.int[[c]])) + anom.rean.chunk <- anom.hindcast.mean.chunk <- anom.rean.chunk.sampled <- array(NA, c(n.startdates*n.yrs.hind, n.leadtimes, n.lat, chunk$n.int[[c]])) + #my.interv<-(1 + sub.size*(s-1)):((sub.size*s) + add.last) # longitude interval where to load data + + +# PORTA TUTTO SU MARENOSTRUM!!!! + +forecast.year <- 2014 # starting year of the weekly sequence of the forecasts + +# generic path of the forecast system files: +ECMWF_monthly <- list(path = paste0('/esnas/exp/ECMWF/monthly/ensforhc/weekly_mean/$VAR_NAME$_f6h/',forecast.year,'$MONTH$$DAY$00/$VAR_NAME$_$START_DATE$00.nc')) + +# load once 1 file to retrieve the lat and lon values, then save them in an .RData file to retrieve them when needed: +coord <- Load(var = 'sfcWind', exp = list(ECMWF_monthly), obs = NULL, sdates=paste0(2013,'0102'), leadtimemin = 1, leadtimemax=1, output = 'lonlat', nprocs=1) +lat <- coord$lat +lon <- coord$lon +save() +load() + + +system.time(a<-Load(var = 'sfcWind', exp = list(ECMWF_monthly), obs = NULL, sdates=paste0(2010:2013,'0102'), leadtimemin = 1, leadtimemax=3, output = 'lonlat', nprocs=1, latmin = lat[5], latmax = lat[5])) + +#ERAint <- list(path = '/esnas/reconstructions/ecmwf/eraint/$STORE_FREQ$_mean/$VAR_NAME$_f6h/$VAR_NAME$_$YEAR$$MONTH$.nc') +#system.time(a<-Load(var = 'z500', exp = NULL, obs = list(ERAint), paste0(year.start,'0101'), storefreq = 'daily', leadtimemax = 360, output = 'lonlat', nprocs=1, lonmin = domain$lon[5], lonmax = domain$lon[5])) + + + for(startdate in my.startdates){ + pos.startdate<-which(startdate == my.startdates) # count of the number of stardates already loaded+1 + my.time.interv<-(1+(pos.startdate-1)*n.yrs.hind):(pos.startdate*n.yrs.hind) # time interval where to load data + + # Load reanalysis data: + load(paste0(workdir,'/Data_',var.name,'/anom_rean_startdate',startdate,'.RData')) + anom.rean <- drop(anom.rean) + anom.rean.chunk[my.time.interv,,,] <- anom.rean[,,,chunk$int[[c]]] + + # Load hindcast data: + if(any(my.score.name=="FairRpss") || any(my.score.name=="FairCrpss" || any(my.score.name=="RelDiagr"))){ + load(paste0(workdir,'/Data_',var.name,'/anom_hindcast_startdate',startdate,'.RData')) + anom.hindcast <- drop(anom.hindcast) + anom.hindcast.chunk[,my.time.interv,,,] <- anom.hindcast[,,,,chunk$int[[c]]] + rm(anom.hindcast, anom.rean) + gc() + } + + if(any(my.score.name=="EnsCorr")){ + load(paste0(workdir,'/Data_',var.name,'/anom_hindcast_mean_startdate',startdate,'.RData')) # load ensemble mean hindcast data + anom.hindcast.mean <- drop(anom.hindcast.mean) + anom.hindcast.mean.chunk[my.time.interv,,,] <- anom.hindcast.mean[,,,chunk$int[[c]]] + rm(anom.hindcast.mean) + gc() + } + + } + + + if(any(my.score.name=="FairRpss")){ + if(!boot){ + + #my.FairRpss.sub <- veriApply("FairRpss", fcst=anom.hindcast.sub, obs=anom.rean.sub, prob=my.prob, tdim=2, ensdim=1, parallel=FALSE, ncpus=n.cpus) + my.FairRps.chunk <- veriApply("FairRps", fcst=anom.hindcast.chunk, obs=anom.rean.chunk, prob=my.prob, tdim=2, ensdim=1, parallel=FALSE, ncpus=n.cpus) + + # the prob.for the climatological ensemble can be set without using convert2prob and they are the same for all points and leadtimes (33% for each tercile): + ens <- array(33,c(n.startdates*n.yrs.hind,3)) + + # the observed probabilities vary from point to point and for each leadtime: + obs <- apply(anom.rean.chunk, c(2,3,4), function(x){convert2prob(x, prob <- my.prob)}) + + # reshape obs to be able to apply EnsRps(ens, obs) below: + obs2 <- InsertDim(obs,1,3) + obs2[2,1:(n.startdates*n.yrs.hind),,,] <- obs2[1,(n.startdates*n.yrs.hind + 1:(n.startdates*n.yrs.hind)),,,] + obs2[3,1:(n.startdates*n.yrs.hind),,,] <- obs2[1,(2*n.startdates*n.yrs.hind + 1:(n.startdates*n.yrs.hind)),,,] + obs3 <- obs2[,1:(n.startdates*n.yrs.hind),,,] + + my.Rps.clim.chunk <- apply(obs3,c(3,4,5), function(x){ EnsRps(ens,t(x)) }) + + my.FairRps.chunk.sum <- apply(my.FairRps.chunk,c(2,3,4), sum) + my.Rps.clim.chunk.sum <- apply(my.Rps.clim.chunk,c(2,3,4), sum) + + my.FairRpss.chunk <- 1-(my.FairRps.chunk.sum/my.Rps.clim.chunk.sum) + + my.FairRpss[,,chunk$int[[c]]] <- my.FairRpss.chunk + rm(my.FairRpss.chunk, ens, obs, obs2, obs3, my.FairRps.chunk.sum, my.Rps.clim.chunk.sum) + gc() + + } else { # bootstrapping: + + for(b in 1:n.boot){ + cat(paste0("resampling n. ",b,"/",n.boot)) + yrs.sampled <- sample(1:n.yrs, replace=TRUE) + + for(y in 1:n.yrs) anom.hindcast.chunk.sampled[,y,,,] <- anom.hindcast.chunk[,yrs.sampled[y],,,] + for(y in 1:n.yrs) anom.rean.chunk.sampled[y,,,] <- anom.rean.chunk[yrs.sampled[y],,,] + + my.FairRps.chunk <- veriApply("FairRps", fcst=anom.hindcast.chunk.sampled, obs=anom.rean.chunk.sampled, prob=my.prob, tdim=2, ensdim=1, parallel=FALSE, ncpus=n.cpus) + ens <- array(33,c(n.startdates*n.yrs.hind,3)) + obs <- apply(anom.rean.chunk.sampled, c(2,3,4), function(x){convert2prob(x, prob <- my.prob)}) + obs2 <- InsertDim(obs,1,3) + obs2[2,1:(n.startdates*n.yrs.hind),,,] <- obs2[1,(n.startdates*n.yrs.hind + 1:(n.startdates*n.yrs.hind)),,,] + obs2[3,1:(n.startdates*n.yrs.hind),,,] <- obs2[1,(2*n.startdates*n.yrs.hind + 1:(n.startdates*n.yrs.hind)),,,] + obs3 <- obs2[,1:(n.startdates*n.yrs.hind),,,] + my.Rps.clim.chunk <- apply(obs3,c(3,4,5), function(x){ EnsRps(ens,t(x)) }) + my.FairRps.chunk.sum <- apply(my.FairRps.chunk,c(2,3,4), sum) + my.Rps.clim.chunk.sum <- apply(my.Rps.clim.chunk,c(2,3,4), sum) + my.FairRpss.chunk <- 1-(my.FairRps.chunk.sum/my.Rps.clim.chunk.sum) + + my.FairRpssBoot[b,,,chunk$int[[c]]] <- my.FairRpss.chunk + rm(my.FairRpss.chunk, ens, obs, obs2, obs3, my.FairRps.chunk.sum, my.Rps.clim.chunk.sum) + gc() + } + + cat('\n') + + # calculate the percentiles of the skill score: + my.FairRpssConf[,,,chunk$int[[c]]] <- apply(my.FairRpssBoot[,,,chunk$int[[c]]], c(2,3,4), function(x) quantile(x, probs=conf.level, type=8)) + } + } + + if(any(my.score.name=="FairCrpss")){ + if(!boot){ + my.FairCrps.chunk <- veriApply("FairCrps", fcst=anom.hindcast.chunk, obs=anom.rean.chunk, tdim=2, ensdim=1, parallel=FALSE, ncpus=n.cpus) + anom.all.chunk <- abind(anom.hindcast.chunk,anom.rean.chunk, along=1) # merge exp and obs together to use apply: + my.Crps.clim.chunk <- apply(anom.all.chunk, c(3,4,5), function(x){ EnsCrps(t(x[1:n.members,]), x[n.members+1,])}) + + my.FairCrps.chunk.sum <- apply(my.FairCrps.chunk,c(2,3,4), sum) + my.Crps.clim.chunk.sum <- apply(my.Crps.clim.chunk,c(2,3,4), sum) + my.FairCrpss.chunk <- 1-(my.FairCrps.chunk.sum/my.Crps.clim.chunk.sum) + + my.FairCrpss[,,chunk$int[[c]]]<-my.FairCrpss.chunk + rm(my.FairCrpss.chunk, my.FairCrps.chunk.sum, my.Crps.clim.chunk.sum) + gc() + + } else { # bootstrapping: + + for(b in 1:n.boot){ + print(paste0("resampling n. ",b,"/",n.boot)) + yrs.sampled <- sample(1:n.yrs, replace=TRUE) + + for(y in 1:n.yrs) anom.hindcast.chunk.sampled[,y,,,] <- anom.hindcast.chunk[,yrs.sampled[y],,,] + for(y in 1:n.yrs) anom.rean.chunk.sampled[y,,,] <- anom.rean.chunk[yrs.sampled[y],,,] + + my.FairCrps.chunk <- veriApply("FairCrps", fcst=anom.hindcast.chunk.sampled, obs=anom.rean.chunk.sampled, tdim=2, ensdim=1, parallel=TRUE, ncpus=n.cpus) + anom.all.chunk <- abind(anom.hindcast.chunk.sampled, anom.rean.chunk.sampled, along=1) # merge exp and obs together to use apply: + my.Crps.clim.chunk <- apply(anom.all.chunk, c(3,4,5), function(x){ EnsCrps(t(x[1:n.members,]), x[n.members+1,])}) + my.FairCrps.chunk.sum <- apply(my.FairCrps.chunk,c(2,3,4), sum) + my.Crps.clim.chunk.sum <- apply(my.Crps.clim.chunk,c(2,3,4), sum) + my.FairCrpss.chunk <- 1-(my.FairCrps.chunk.sum/my.Crps.clim.chunk.sum) + + my.FairCrpssBoot[b,,,chunk$int[[c]]] <- my.FairCrpss.chunk + rm(my.FairCrpss.chunk, my.FairCrps.chunk.sum, my.Crps.clim.chunk.sum) + gc() + + } + + cat('\n') + + # calculate the percentiles of the skill score: + my.FairCrpssConf[,,,chunk$int[[c]]] <- apply(my.FairCrpssBoot[,,,chunk$int[[c]]], c(2,3,4), function(x) quantile(x, probs=conf.level, type=8)) + } + + } + + if(any(my.score.name=="EnsCorr")){ + # ensemble mean correlation for the selected startdates (first dim, 'week') and all leadtimes (second dim): + my.EnsCorr.chunk <- my.PValue.chunk <- array(NA, c(n.leadtimes, n.lat, chunk$n.int[[c]])) + + for(i in 1:n.leadtimes){ + for(j in 1:n.lat){ + for(k in 1:chunk$n.int[[c]]){ + my.EnsCorr.chunk[i,j,k] <- cor(anom.hindcast.mean.chunk[,i,j,k],anom.rean.chunk[,i,j,k], use="complete.obs") + # option 'na.method' is chosen from the following list: c("all.obs", "complete.obs", "pairwise.complete.obs", "everything", "na.or.complete") + + my.PValue.chunk[i,j,k] <- cor.test(anom.hindcast.mean.chunk[,i,j,k],anom.rean.chunk[,i,j,k], use="complete.obs")$p.value + + } + } + } + + my.EnsCorr[,,chunk$int[[c]]] <- my.EnsCorr.chunk + my.PValue[,,chunk$int[[c]]] <- my.PValue.chunk + + rm(anom.hindcast.chunk,anom.hindcast.mean.chunk,my.EnsCorr.chunk,my.PValue.chunk) + } + + rm(anom.rean.chunk) + gc() + #mem() + + if(mare == TRUE) { + if(any(my.score.name=="FairRpss")) save(my.FairRpss, file=paste0(workdir,'/Data_',var.name,'/FairRpss_',startdate.name,'_chunk_',c,'.RData')) + if(any(my.score.name=="FairCrpss")) save(my.FairCrpss, file=paste0(workdir,'/Data_',var.name,'/FairCrpss_',startdate.name,'_chunk_',c'.RData')) + if(any(my.score.name=="EnsCorr")) save(my.EnsCorr, file=paste0(workdir,'/Data_',var.name,'/EnsCorr_',startdate.name,'_chunk_',c'.RData')) + } + + + } # next c (chunk) + + # the Reliability diagram cannot be computed splitting the data in chunk, so it is computed only if the script is run from the R interface or with no arguments: + if(mare == FALSE) { + if(any(my.score.name=="RelDiagr")){ # in this case the computation cannot be split in groups of grid points, but can be split for leadtime instead: + my.RelDiagr<-list() + + for(lead in 1:n.leadtimes){ + cat(paste0('leadtime n.: ',lead,'/',n.leadtimes)) + anom.rean.chunk <- array(NA,c(n.startdates*n.yrs.hind,n.lat,n.lon)) + anom.hindcast.chunk <- array(NA,c(n.members,n.startdates*n.yrs.hind,n.lat,n.lon)) + cat(' startdate: ') + i=0 + + for(startdate in my.startdates){ + i=i+1 + cat(paste0(i,'/',length(my.startdates),' ')) + + pos.startdate<-which(startdate==my.startdates) # count of the number of stardates already loaded+1 + my.time.interv<-(1+(pos.startdate-1)*n.yrs.hind):(pos.startdate*n.yrs.hind) # time interval where to load data + + # Load reanalysis data of next startdate: + load(paste0(workdir,'/Data_',var.name,'/anom_rean_startdate',startdate,'.RData')) + anom.rean <- drop(anom.rean) + anom.rean.chunk[my.time.interv,,] <- anom.rean[,lead,,] + + # Lead hindcast data of next startdate: + load(paste0(workdir,'/Data_',var.name,'/anom_hindcast_startdate',startdate,'.RData')) # load hindcast data + anom.hindcast <- drop(anom.hindcast) + anom.hindcast.chunk[,my.time.interv,,] <- anom.hindcast[,,lead,,] + + rm(anom.rean,anom.hindcast) + } + + + anom.hindcast.chunk.perm<-aperm(anom.hindcast.chunk,c(2,1,3,4)) # swap the first two dimensions to put the years as first dimension (needed for convert2prob) + gc() + + cat('Computing the Reliability Diagram. Please wait... ') + ens.chunk<-apply(anom.hindcast.chunk.perm, c(3,4), convert2prob, prob<-my.prob) # its dimens. are: [n.startdates*n.yrs.hind*n.forecast.categories, n.lat, n.lon] + obs.chunk<-apply(anom.rean.chunk,c(2,3), convert2prob, prob<-my.prob) # the first n.members*n.yrs.hind elements belong to the first tercile, and so on. + ens.chunk.prob<-apply(ens.chunk, c(2,3), function(x) count2prob(matrix(x, n.startdates*n.yrs.hind, n.categ), type=4)) # type=4 is the only method with sum(prob)=1 !! + obs.chunk.prob<-apply(obs.chunk, c(2,3), function(x) count2prob(matrix(x, n.startdates*n.yrs.hind, n.categ), type=4)) #no parallelization here: it only takes 10s + + if(is.null(int.lat)) int.lat <- 1:n.lat # if there is no region selected, select all the world + if(is.null(int.lon)) int.lon <- 1:n.lon + + int1 <- 1:(n.startdates*n.yrs.hind) # time interval of the data of the first tercile + my.RelDiagr[[lead]]<-ReliabilityDiagram(ens.chunk.prob[int1,int.lat,int.lon], obs.chunk.prob[int1,int.lat,int.lon], nboot=0, plot=FALSE, plot.refin=F) #tercile 1 + + int2 <- (1+(n.startdates*n.yrs.hind)):(2*n.startdates*n.yrs.hind) # time interval of the data of the second tercile + my.RelDiagr[[n.leadtimes+lead]]<-ReliabilityDiagram(ens.chunk.prob[int2,int.lat,int.lon], obs.chunk.prob[int2,int.lat,int.lon], nboot=0, plot=FALSE, plot.refin=F) + + int3 <- (1+(2*n.startdates*n.yrs.hind)):(3*n.startdates*n.yrs.hind) # time interval of the data of the third tercile + my.RelDiagr[[2*n.leadtimes+lead]]<-ReliabilityDiagram(ens.chunk.prob[int3,int.lat,int.lon], obs.chunk.prob[int3,int.lat,int.lon], nboot=0, plot=FALSE, plot.refin=F) + + cat('done\n') + #print(my.RelDiagr) # for debugging + + rm(anom.rean.chunk,anom.hindcast.chunk,ens.chunk,obs.chunk,ens.chunk.prob,obs.chunk.prob) + gc() + + } # next lead + + } # close if on RelDiagr + + # we can save the results for all chunks only if mare == FALSE (if it is TRUE, we have the results for 1 chunk only, that have already been saved + if(!boot){ + if(any(my.score.name=="FairRpss")) save(my.FairRpss, file=paste0(workdir,'/Data_',var.name,'/FairRpss_',startdate.name,'.RData')) + if(any(my.score.name=="FairCrpss")) save(my.FairCrpss, file=paste0(workdir,'/Data_',var.name,'/FairCrpss_',startdate.name,'.RData')) + if(any(my.score.name=="EnsCorr")) save(my.EnsCorr, file=paste0(workdir,'/Data_',var.name,'/EnsCorr_',startdate.name,'.RData')) + if(any(my.score.name=="RelDiagr")) save(my.RelDiagr, my.PValue, file=paste0(workdir,'/Data_',var.name,'/RelDiagr_',startdate.name,'.RData')) + } else { # bootstrapping output: + if(any(my.score.name=="FairRpss")) save(my.FairRpssBoot, my.FairRpssConf, file=paste0(workdir,'/Data_',var.name,'/FairRpssBoot_',startdate.name,'.RData')) + if(any(my.score.name=="FairCrpss")) save(my.FairCrpssBoot, my.FairCrpssConf, file=paste0(workdir,'/Data_',var.name,'/FairCrpssBoot_',startdate.name,'.RData')) + } + + # If it has finished computing the last chunk, it can load all results in one file, deleting the intermediate output files: + if(job.chunk == tot.chunks){ + + } + + + } # close if on mare + +} # next m (month) + +if + + + + + + + + +# all pre-formatting (conversion to anomalies) and post-formatting (visualization) tasks are done below: +if(mare == FALSE){ + +########################################################################################### +# Visualize and save the score maps for one score and period # +########################################################################################### + +# run it only after computing and saving all the skill score data: + +my.months=1 #9:12 # choose the month(s) to plot and save + +for(month in my.months){ + #month=2 # for the debug + startdate.name.map<-my.month[month] # i.e:"January" + + for(my.score.name.map in my.score.name){ + #my.score.name.map='EnsCorr' # for the debug + + if(my.score.name.map=="EnsCorr") { load(file=paste0(workdir,'/Data_',var.name,'/EnsCorr_',startdate.name.map,'.RData')); my.score<-my.EnsCorr } + if(my.score.name.map=='FairRpss') { load(file=paste0(workdir,'/Data_',var.name,'/FairRpss_',startdate.name.map,'.RData')); my.score<-my.FairRpss } + if(my.score.name.map=='FairCrpss') { load(file=paste0(workdir,'/Data_',var.name,'/FairCrpss_',startdate.name.map,'.RData')); my.score<-my.FairCrpss } + if(my.score.name.map=="RelDiagr") { load(file=paste0(workdir,'/Data_',var.name,'/RelDiagr_',startdate.name.map,'.RData')); my.score<-my.RelDiagr } + + # old green-red palette: + # brk.rpss<-c(-1,seq(0,1,by=0.05)) + # col.rpss<-c("darkred",colorRampPalette(c("red","white","darkgreen"))(length(brk.rpss)-2)) + # brk.rps<-c(seq(0,1,by=0.1),10) + # col.rps<-c(colorRampPalette(c("darkgreen","white","red"))(length(brk.rps)-2),"darkred") + # if(my.score.name.map==my.Rps | my.score==my.Rps.clim) {my.brk<-brk.rps} else {my.brk<-brk.rpss} + # if(my.score.name.map==my.Rps | my.score==my.Rps.clim) {my.col<-col.rps} else {my.col<-col.rpss} + + brk.rpss<-c(-10,seq(-1,1,by=0.1)) + col.rpss<-colorRampPalette(col)(length(brk.rpss)-1) + + my.brk<-brk.rpss # at present all breaks and colors are the same, so there is no need to differenciate between indexes + my.col<-col.rpss + + for(lead in 1:n.leadtimes){ + + #my.title<-paste0(my.score.name.map,' of ',cfs.name,' + #10m Wind Speed for ',startdate.name.map,'. Forecast time ',leadtime.week[lead],'.') + + if(my.score.name.map!="RelDiagr"){ + + #postscript(file=paste0(workdir,'/Maps_',var.name,'/',my.score.name.map,'_',startdate.name.map,'_Forecast_time_',leadtime.week[lead],'_',var.name,'.ps'), + # paper="special",width=12,height=7,horizontal=F) + + jpeg(file=paste0(workdir,'/Maps_',var.name,'/',my.score.name.map,'_',startdate.name.map,'_Forecast_time_',leadtime.week[lead],'_',var.name,'.jpg'),width=1000,height=600) + + layout(matrix(c(1,2), 1, 2, byrow = TRUE),widths=c(11,1.2)) + par(oma=c(1,1,4,1)) + + # lat values must be in decreasing order from +90 to -90 degrees and long from 0 to 360 degrees: + PlotEquiMap(my.score[lead,,][n.lat:1,c((n.lonr+1):n.lon,1:n.lonr)], lo,la,sizetit=0.8, brks=my.brk, cols=my.col,axelab=F, filled.continents=FALSE, drawleg=F) + my.title<-paste0(my.score.name.map,' of ',cfs.name,'\n ', var.name.map,' for ',startdate.name.map,'. Forecast time ',leadtime.week[lead],'.') + title(my.title,line=0.5,outer=T) + + ColorBar(my.brk, cols=my.col, vert=T, cex=1.4) + + if(my.score.name.map=="EnsCorr"){ #add a small grey point if the corr.is significant: + + # arrange lat/ lon in my.PValue (a second variable in the EnsCorr file) to start from +90 degr. (lat) and from 0 degr (lon): + my.PValue.rev <- my.PValue + my.PValue.rev[lead,,] <- my.PValue[i,n.lat:1,c((n.lonr+1):n.lon,1:n.lonr)] + for (x in 1:n.lon) { + for (y in 1:n.lat) { + if (my.PValue.rev[lead,y, x] == TRUE) { + text(x = lo[x], y = la[y], ".", cex = .2) + } + } + } + } + dev.off() + + } # close if on !RelDiagr + + if(my.score.name.map=="RelDiagr") { + + jpeg(file=paste0(workdir,'/Maps_',var.name,'/RelDiagr_',startdate.name.map,'_Forecast_time_',leadtime.week[lead],'_',var.name,'.jpg'),width=600,height=600) + + my.title<-paste0('Reliability Diagram of ',cfs.name,'\n ',var.name.map,' for ',startdate.name.map,'. Forecast time ',leadtime.week[lead],'.') + + plot(c(0,1),c(0,1),type="l",xlab="Forecast Frequency",ylab="Observed Frequency", col='gray30', main=my.title) + lines(my.score[[lead]]$p.avgs[!is.na(my.score[[lead]]$p.avgs)],my.score[[lead]]$cond.prob[!is.na(my.score[[lead]]$cond.prob)],type="o", pch=16, col="blue") + #lines(my.score[[n.leadtimes+lead]]$p.avgs[!is.na(my.score[[n.leadtimes+lead]]$p.avgs)],my.score[[n.leadtimes+lead]]$cond.prob[!is.na(my.score[[n.leadtimes+lead]]$cond.prob)],type="o",pch=16,col="darkgreen") + lines(my.score[[2*n.leadtimes+lead]]$p.avgs[!is.na(my.score[[2*n.leadtimes+lead]]$p.avgs)], my.score[[2*n.leadtimes+lead]]$cond.prob[!is.na(my.score[[2*n.leadtimes+lead]]$cond.prob)],type="o", pch=16, col="red") + legend("bottomright", legend=c('Lower Tercile','Upper Tercile'), lty=c(1,1), lwd=c(2.5,2.5), col=c('blue','red')) + + dev.off() + } + + } # next lead + + } # next score + +} # next month + + +############################################################################################### +# Composite map of the four skill scores # +############################################################################################### + +# run it only when you have all the 4 skill scores for each month: + +my.months=9:12 # choose the month(s) to plot and save + +for(month in my.months){ + + startdate.name.map<-my.month[month] # i.e:"January" + + #postscript(file=paste0(workdir,'/Maps_',var.name,'/SkillScores_',startdate.name.map,'_',var.name,'.ps'), paper="special",width=12,height=7,horizontal=F) + + jpeg(file=paste0(workdir,'/Maps_',var.name,'/SkillScores_',startdate.name.map,'_',var.name,'.jpg'), width=4000, height=2400, quality=100) + + layout(matrix(1:16, 4, 4, byrow=FALSE), widths=c(1.8,1.8,1.8,1.2), heights=c(1,1,1,1)) + #layout.show(16) + + for(score in 1:4){ + for(lead in 1:n.leadtimes){ + + img<-readJPEG(paste0(workdir,'/Maps_',var.name,'/',my.score.name[score],'_',startdate.name.map,'_Forecast_time_',leadtime.week[lead],'_',var.name,'.jpg')) + par(mar = rep(0, 4), xaxs='i', yaxs='i' ) + plot.new() + rasterImage(img, 0, 0, 1, 1, interpolate=FALSE) + + } # next lead + + } # next score + + dev.off() + +} # next month + + + +# Dani Map script for the small figure: +#/home/Earth/vtorralb/R/scripts/Veronica_2014/TimeSeries/PlotAno_mod_obs.R + + +############################################################################################### +# Preformatting: calculate weekly climatologies and anomalies for each hindcast startdate # +############################################################################################### + +# Run it only once at the beginning: + +file_path='/home/Earth/ngonzal2/R/ConfFiles/IC3.conf' # Load() file path +#file_path <- '/home/Earth/ncortesi/Downloads/RESILIENCE/IC3.conf' + +my.startdates=1:52 # choose a sequence of startdates to save their anomalies + +for(startdate in my.startdates){ # the startdate week to load inside the sdates weekly sequence: + print(paste0('Computing anomalies for startdate ', which(startdate==my.startdates),'/',length(my.startdates))) + + data.hindcast <- Load(var = var.name, exp = 'EnsEcmwfWeekHind', + obs = NULL, sdates = paste0(yr1.hind:yr2.hind,substr(sdates.seq[startdate],5,6),substr(sdates.seq[startdate],7,8)), + nleadtime = n.leadtimes, leadtimemin = 1, leadtimemax = n.leadtimes, output = 'lonlat', configfile = file_path, method='distance-weighted' ) + + # dim(data.hindcast$mod) # [1,4,20,4,320,640] + + # Mean of the 4 members of the Hindcasts and of the 20 years (for each point and leadtime); + clim.hindcast<-apply(data.hindcast$mod,c(1,4,5,6),mean)[1,,,] # average for each leadtime and pixel + clim.hindcast<-InsertDim(InsertDim(InsertDim(clim.hindcast,1,n.yrs.hind),1,n.members),1,1) # repeat the same values and times to calc. anomalies + + anom.hindcast <- data.hindcast$mod - clim.hindcast + + rm(data.hindcast,clim.hindcast) + gc() + + # average the anomalies over the 4 hindcast members [1,20,4,320,640] + anom.hindcast.mean<-apply(anom.hindcast,c(1,3,4,5,6),mean) + + my.grid<-paste0('r',n.lon,'x',n.lat) + data.rean <- Load(var = var.name, exp = 'ERAintEnsWeek', + obs = NULL, sdates=paste0(yr1.hind:yr2.hind,substr(sdates.seq[startdate],5,6),substr(sdates.seq[startdate],7,8)), + nleadtime = n.leadtimes, leadtimemin = 1, leadtimemax = n.leadtimes, output = 'lonlat', configfile = file_path, grid=my.grid, method='distance-weighted') + + # dim(data.rean$mod) # [1,1,20,4,320,640] + + # Mean of the 20 years (for each point and leadtime) + clim.rean<-apply(data.rean$mod,c(1,2,4,5,6),mean)[1,1,,,] # average for each leadtime and pixel + clim.rean<-InsertDim(InsertDim(InsertDim(clim.rean,1,n.yrs.hind),1,1),1,1) # repeat the same values times to be able to calculate the anomalies #[1,1,20,4,320,640] + + anom.rean <- data.rean$mod - clim.rean + + anom.rean<-InsertDim(anom.rean[1,1,,,,],1,1) # [1,20,4,320,640] + + #save(anom.hindcast.mean,anom.rean,file=paste0(workdir,'/Data/anom_for_ACC_startdate',startdate,'.RData')) + + save(anom.hindcast,file=paste0(workdir,'/Data/anom_hindcast_startdate',startdate,'.RData')) + save(anom.hindcast.mean,file=paste0(workdir,'/Data/anom_hindcast_mean_startdate',startdate,'.RData')) + save(anom.rean,file=paste0(workdir,'/Data/anom_rean_startdate',startdate,'.RData')) + save(clim.rean,file=paste0(workdir,'/Data/clim_rean_startdate',startdate,'.RData')) + + rm(data.rean,anom.hindcast,anom.hindcast.mean,clim.rean,anom.rean) + gc() + +} + +############################################################################################### +# Preformatting: calculate weekly climatologies and anomalies for each forecast startdate # +############################################################################################### +# +# finish!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! +# +# the only difference is that anomalies are computed using climatologies from the hincast data, +# and not from the forecast data. + +# Run it only once at the beginning: + +file_path='/home/Earth/ngonzal2/R/ConfFiles/IC3.conf' # Load() file path +#file_path <- '/home/Earth/ncortesi/Downloads/RESILIENCE/IC3.conf' + +my.startdates=1:52 # choose a sequence of forecast startdates to save their anomalies + +for(startdate in my.startdates){ # the startdate week to load inside the sdates weekly sequence: + print(paste0('Computing anomalies for startdate ', which(startdate==my.startdates),'/',length(my.startdates))) + + data.hindcast <- Load(var=var.name, exp = 'EnsEcmwfWeekHind', + obs = NULL, sdates=paste0(yr1.hind:yr2.hind,substr(sdates.seq[startdate],5,6),substr(sdates.seq[startdate],7,8)), + nleadtime = n.leadtimes, leadtimemin = 1, leadtimemax = n.leadtimes, output = 'lonlat', configfile = file_path, method='distance-weighted' ) + + # dim(data.hindcast$mod) # [1,4,20,4,320,640] + + # Mean of the 4 members of the Hindcasts and of the 20 years (for each point and leadtime); + clim.hindcast<-apply(data.hindcast$mod,c(1,4,5,6),mean)[1,,,] # average for each leadtime and pixel + clim.hindcast<-InsertDim(InsertDim(InsertDim(clim.hindcast,1,n.yrs.hind),1,n.members),1,1) # repeat the same values and times to calc. anomalies + + anom.hindcast <- data.hindcast$mod - clim.hindcast + + rm(data.hindcast,clim.hindcast) + gc() + + # average the anomalies over the 4 hindcast members [1,20,4,320,640] + anom.hindcast.mean<-apply(anom.hindcast,c(1,3,4,5,6),mean) + + my.grid<-paste0('r',n.lon,'x',n.lat) + data.rean <- Load(var=var.name, exp = 'ERAintEnsWeek', + obs = NULL, sdates=paste0(yr1.hind:yr2.hind,substr(sdates.seq[startdate],5,6),substr(sdates.seq[startdate],7,8)), + nleadtime = n.leadtimes, leadtimemin = 1, leadtimemax = n.leadtimes, output = 'lonlat', configfile = file_path, grid=my.grid, method='distance-weighted') + # dim(data.rean$mod) # [1,1,20,4,320,640] + + # Mean of the 20 years (for each point and leadtime) + clim.rean<-apply(data.rean$mod,c(1,2,4,5,6),mean)[1,1,,,] # average for each leadtime and pixel + clim.rean<-InsertDim(InsertDim(InsertDim(clim.rean,1,n.yrs.hind),1,1),1,1) # repeat the same values times to be able to calculate the anomalies #[1,1,20,4,320,640] + + anom.rean <- data.rean$mod - clim.rean + + anom.rean<-InsertDim(anom.rean[1,1,,,,],1,1) # [1,20,4,320,640] + + #save(anom.hindcast.mean,anom.rean,file=paste0(workdir,'/Data/anom_for_ACC_startdate',startdate,'.RData')) + + save(anom.hindcast,file=paste0(workdir,'/Data/anom_hindcast_startdate',startdate,'.RData')) + save(anom.hindcast.mean,file=paste0(workdir,'/Data/anom_hindcast_mean_startdate',startdate,'.RData')) + save(anom.rean,file=paste0(workdir,'/Data/anom_rean_startdate',startdate,'.RData')) + save(clim.rean,file=paste0(workdir,'/Data/clim_rean_startdate',startdate,'.RData')) + + rm(data.rean,anom.hindcast,anom.hindcast.mean,clim.rean,anom.rean) + gc() + +} + + + + + + + +################################################################################## +# Toy Model # +################################################################################## + +library(s2dverification) +library(SpecsVerification) +library(easyVerification) + +my.prob=c(1/3,2/3) # quantiles used for FairRPSS and the RelDiagr (usually they are the terciles) + +N=80 # number of years +M=14 # number of members + +for(M in c(2:10,20,51,100,200)){ + +for(N in c(5,10,15,20,25,30,40,50,100,200,500,1000,2000,5000,10000,20000,50000,100000)){ + +anom.rean<-rnorm(N) +anom.hind<-array(rnorm(N*M),c(N,M)) +#quantile(anom.hind,prob=my.prob,type=8) + +obs<-convert2prob(anom.rean,prob<-my.prob) +#mean(obs[,1]) # check if it is equal to 0.333 for K=3 + +ens<-convert2prob(anom.hind,prob<-my.prob) + +# climatological forecast: +anom.hind.clim<-array(sample(anom.hind,N*M),c(N,M)) # extract a random sample with no replacement of the hindcast +ens.clim<-convert2prob(anom.hind.clim,prob<-my.prob) +#mean(ens.clim[,1]) # check if it is equal to M/3 (1.333 for M=4) + +# alternative definition of climatological forecast, based on observations instead (as applied by veriApply via convert2prob when option fcst.ref is missing): +#anom.rean.clim<-ClimEns(anom.rean) # create a climatological ensemble from a vector of observations (anom.rean) with the leave-one-out cross validation +anom.rean.clim<-t(array(anom.rean,c(N,N))) # function used by convert2prob when ref is not specified; it is ~ equal to the above function ClimEns, it only has a column more +ens.clim.rean<-convert2prob(anom.rean.clim,prob<-c(1/3,2/3)) # create a new prob.table based on the climatological ensemble of observations + +p<-count2prob(ens) # we should add the option type=4, in not the sum for each year is not 100%!!! +y<-count2prob(obs) # we should add the option type=4, in not the sum for each year is not 100%!!! +ReliabilityDiagram(p[,1], y[,1], bins=10, nboot=0, plot=TRUE, cons.prob=c(0.05, 0.85),plot.refin=F) + +### computation of verification scores: + +Rps<-round(mean(EnsRps(ens,obs)),4) +FairRps<-round(mean(FairRps(ens,obs)),4) +#Rps.easy<-round(mean(veriApply("EnsRps", fcst=anom.hind, obs=anom.rean, prob=my.prob, tdim=1, ensdim=2)),4) # check once to see if it is the same value of Rps +#FairRps.easy<-round(mean(veriApply("FairRps", fcst=anom.hind, obs=anom.rean, prob=my.prob, tdim=1, ensdim=2)),4) # check once to see if it is the same value of FairRps + +Rps.clim<-round(mean(EnsRps(ens.clim,obs)),4) +FairRps.clim<-round(mean(FairRps(ens.clim,obs)),4) +FairRps.clim.rean<-round(mean(FairRps(ens.clim.rean,obs)),4) + +#FairRps.clim.stable<-0.44444444 +#Rps.clim.easy<-round(mean(veriApply("EnsRps", fcst=anom.hind.clim, obs=anom.rean, prob=my.prob, tdim=1, ensdim=2)),4) # check once to see if it is the same value of Rps.clim +#FairRps.clim.easy<-round(mean(veriApply("FairRps", fcst=anom.hind.clim, obs=anom.rean, prob=my.prob, tdim=1, ensdim=2)),4) # check once to see if it is = to FairRps.clim + +Rpss<-round(1-(Rps/Rps.clim),4) +#Rpss.specs<-round(EnsRpss(ens=ens, ens.ref=ens.clim, obs=obs)$rpss,4) # check it once to see if it is the same as Rpss (yes) +Rpss.easy<-veriApply("EnsRpss", fcst=anom.hind, fcst.ref=anom.hind.clim, obs=anom.rean, prob=my.prob, tdim=1, ensdim=2)$rpss # we provide the clim.forecast; = to Rpss.specs +system.time(Rpss.easy.noclim<-round(veriApply("EnsRpss", fcst=anom.hind, obs=anom.rean, prob=my.prob, tdim=1, ensdim=2, parallel=FALSE, ncpus=5)$rpss,4)) # using their climatological forecast +#cat(Rps.clim.noclim<-Rps/(1-Rpss.easy.noclim)) # sale igual a ~0.45 per ogni hindcast; é diverso sia da Rps.clim che da Rps.clim per N-> +oo (0.555) + +Rpss.easy<-veriApply("EnsRpss", fcst=anom.hind, fcst.ref=anom.rean.clim, obs=anom.rean, prob=my.prob, tdim=1, ensdim=2)$rpss # we provide the clim.forecast; = to Rpss.specs +Rpss.easy.noclim<-veriApply("EnsRpss", fcst=anom.hind, obs=anom.rean, prob=my.prob, tdim=1, ensdim=2)$rpss # we provide the clim.forecast; = to Rpss.specs + +FairRpss<-round(1-(FairRps/FairRps.clim),4) +#FairRpss.specs<-round(FairRpss(ens=ens,ens.ref=ens.clim,obs=obs)$rpss,4) # check it once to see if it is the same as FairRpss (yes) +#FairRpss.easy<-veriApply("FairRpss", fcst=anom.hind, fcst.ref=anom.hind.clim, obs=anom.rean, prob=c(1/3,2/3), tdim=1, ensdim=2)$rps +#FairRpss.easy.noclim<-round(veriApply("FairRpss", fcst=anom.hind, obs=anom.rean, prob=c(1/3,2/3), tdim=1, ensdim=2)$rpss,4) # using their climatological forecast +#cat(FairRps.clim.noclim<-Rps/(1-FairRpss.easy.noclim)) # sale igual a ~0.55; é diverso sia da FairRps.clim che da FairRps.clim per N-> +oo (0.444) + +cat(paste0('n.memb: ' ,M,' n.yrs: ',N,' : ',FairRps,' : ' ,FairRps.clim,' FairRpss: ',FairRpss,'\n')) + +#cat(paste0('n.memb: ' ,M,' n.yrs: ',N,' : ',FairRps,' : ' ,FairRps.clim,' FairRpss: ',FairRpss,'\n')) + +#cat(paste0('n.memb: ' ,M,' n.yrs: ',N,' : ',Rps,' : ' ,Rps.clim,' Rpss: ',Rpss,'\n','n.memb: ' ,M,' n.yrs: ',N,' : ',FairRps,' : ' ,FairRps.clim,' FairRpss: ',FairRpss,'\n')) + +#cat(paste0('n.memb: ' ,M,' n.yrs: ',N,' : ',Rps,' : ' ,Rps.clim,' Rpss: ',Rpss,' Rpss.easy: ',Rpss.easy.noclim,'\n','n.memb: ' ,M,' n.yrs: ',N,' #: ',FairRps,' : ' ,FairRps.clim,' FairRpss: ',FairRpss,' FairRpss.easy: ',FairRpss.easy.noclim)) + +} + +# Crpss: + +#Crps<-round(mean(EnsCrps(ens,obs)),4) +#FairCrps<-round(mean(FairCrps(ens,obs)),4) + +#cat(Crps.clim<-round(mean(EnsCrps(ens.clim,obs)),4)) +#FairCrps.clim<-round(mean(FairCrps(ens.clim,obs)),4) + +#Crpss<-round(1-(Crps/Crps.clim),4) +#Crpss.specs<-round(EnsCrpss(ens=ens, ens.ref=ens.clim, obs=obs)$rpss,4) + +#FairCrpss<-round(1-(FairCrps/FairCrps.clim),4) +#FairCrpss.specs<-round(FairCrpss(ens=ens,ens.ref=ens.clim,obs=obs)$rpss,4) + +} + + +################################################################################## +# Other analysis # +################################################################################## + +my.startdates=1 #c(1:9) # select the startdates (weeks) you want to merge together + +#tm<-toyarray(dims=c(32,64),N=20,nens=4) # in the number of startdates in case of forecasts or the number of years in case of hindcasts +#str(tm) # tm$fcst: [32,64,20,4] tm$obs: [32,64,20] +# +#f.corr <- veriApply("EnsCorr", fcst=tm$fcst, obs=tm$obs) +#f.me <- veriApply('EnsMe', tm$fcst, tm$obs) +#f.crps <- veriApply("FairCrps", fcst=tm$fcst, obs=tm$obs) +#str(f.crps) # [32,64,20] + +#cst <- array(rnorm(prod(4:8)), 4:8) +#obs <- array(rnorm(prod(4:7)), 4:7) +#f.me <- veriApply('EnsMe', fcst=fcst, obs=obs) +#dim(f.me) +#fcst2 <- array(aperm(fcst, c(5,1,4,2,3)), c(8, 4, 7, 5*6)) # very interesting use of aperm not only to swap dimensions, but also to merge two dimensions in the same one! +#obs2 <- array(aperm(obs, c(1,4,2,3)), c(4,7,5*6)) +#f.me2 <- veriApply('EnsMe', fcst=fcst2, obs=obs2, tdim=3, ensdim=1) +#dim(f.me2) + +# when you have for each leadtime all the 52 weeks of year 2014 stored in a map, +# you can plot the time serie for a particular point and leadtime: +pos.lat<-which(round(la,3)==round(my.lat,3)) +pos.lon<-which(round(lo,3)==round(my.lon,3)) + +x11() +plot(1:week,EnMeanCorr[,leadtime],type="b", + main=paste0("Weekly time serie of point with lat=",round(my.lat,2),", lon=",round(my.lon,2)), + xlab="week",ylab="Ensemble Mean Corr.",ylim=c(-1,1)) + + + +### check if 4 time steps are better than one: + +yrs.hind<-yr2.hind-yr1.hind+1 +my.point.obs<-array(NA,c(yrs.hind,28)) + +# load obs.data for the four time steps: +for(year in yr1.hind:yr2.hind){ + data_erai_6h <- Load(var=var.name, exp = 'ERAint6h', + obs = NULL, sdates=paste0(year,'01'), leadtimemin = 61, + leadtimemax = 88, output = 'lonlat', configfile = file_path, grid=my.grid,method='distance-weighted') + + my.point.obs[year-yr1.hind+1,]<-data_erai_6h$mod[1,1,1,,65,633] +} + +utm0<-c(1,5,9,13,17,21,25) +utm6<-utm0+1 +utm12<-utm0+2 +utm18<-utm0+3 +my.points.obs.weekly.mean.utm0<-rowMeans(my.point.obs[,utm0]) +my.points.obs.weekly.mean.utm6<-rowMeans(my.point.obs[,utm6]) +my.points.obs.weekly.mean.utm12<-rowMeans(my.point.obs[,utm12]) +my.points.obs.weekly.mean.utm18<-rowMeans(my.point.obs[,utm18]) +my.points.obs.weekly.mean.all<-rowMeans(my.point.obs) + +cor(my.points.exp.weekly.mean.all,my.points.obs.weekly.mean.all,use="complete.obs") + +my.points.obs.weekly.mean.utm<-c(my.points.obs.weekly.mean.utm0,my.points.obs.weekly.mean.utm6,my.points.obs.weekly.mean.utm12,my.points.obs.weekly.mean.utm18) +my.points.exp.weekly.mean.utm<-c(my.points.exp.weekly.mean.utm0,my.points.exp.weekly.mean.utm6,my.points.exp.weekly.mean.utm12,my.points.exp.weekly.mean.utm18) +cor(my.points.exp.weekly.mean.utm,my.points.obs.weekly.mean.utm,use="complete.obs") + + +##### Calculate Reanalysis climatology loading only 1 file each 4 (it's quick but it needs to have all files beforehand): + +ss<-c(seq(1,52,by=4),50,51,52) # take only 1 startdate each 4, more the last 3 startdates that must be computed individually because they are not a complete set of 4 + +for(startdate in ss){ # the startdate day of 2014 to load in the sdates weekly sequence along with the same startdates of the previous 20 years + data.ERAI <- Load(var=var.name, exp = 'ERAintEnsWeek', + obs = NULL, sdates=paste0(yr1.hind:yr2.hind,substr(sdates.seq[startdate],5,6),substr(sdates.seq[startdate],7,8)), + nleadtime = 4, leadtimemin = 1, leadtimemax = 4, output = 'lonlat', configfile = file_path, grid=my.grid, method='distance-weighted') + + clim.ERAI[startdate,,,]<-apply(data.ERAI$mod,c(1,2,4,5,6),mean)[1,1,,,] # average for each leadtime and pixel + + # the intermediate startdate between startdate week i and startdate week i+4 are repeated: + if(startdate <= 49){ + clim.ERAI[startdate+1,1,,]<- clim.ERAI[startdate,2,,] # startdate leadtime (week) + clim.ERAI[startdate+1,2,,]<- clim.ERAI[startdate,3,,] # (week) 1 2 3 4 + clim.ERAI[startdate+2,1,,]<- clim.ERAI[startdate,3,,] # 1 a b c d <- only startdate 1 and 5 are necessary to calculate all startdates between 1 and 5 + clim.ERAI[startdate+1,3,,]<- clim.ERAI[startdate,4,,] # 2 b c d e + clim.ERAI[startdate+2,2,,]<- clim.ERAI[startdate,4,,] # 3 c d e f + clim.ERAI[startdate+3,1,,]<- clim.ERAI[startdate,4,,] # 4 d e f g + } # 5 e f g h + + if(startdate > 1 && startdate <=49){ # fill the previous startdates: + clim.ERAI[startdate-1,4,,]<- clim.ERAI[startdate,3,,] + clim.ERAI[startdate-1,3,,]<- clim.ERAI[startdate,2,,] + clim.ERAI[startdate-2,4,,]<- clim.ERAI[startdate,2,,] + clim.ERAI[startdate-1,2,,]<- clim.ERAI[startdate,1,,] + clim.ERAI[startdate-2,3,,]<- clim.ERAI[startdate,1,,] + clim.ERAI[startdate-3,4,,]<- clim.ERAI[startdate,1,,] + } + +} + + + + + # You can make cor much faster by stripping away all the error checking code and calling the internal c function directly: + # r <- apply(m1, 1, function(x) { apply(m2, 1, function(y) { cor(x,y) })}) + # r2 <- apply(m1, 1, function(x) { apply(m2, 1, function(y) {.Internal(cor(x, y, 4L, FALSE)) })}) + C_cor<-get("C_cor", asNamespace("stats")) + test<-.Call(C_cor, x=rnorm(100,1), y=rnorm(100,1), na.method=2, FALSE) + my.EnsCorr.chunk[i,j,k]<-.Call(C_cor, x=anom.hindcast.mean.chunk[,i,j,k], y=nom.rean.chunk[,i,j,k], na.method=2, FALSE) + + + +####################################################################################### +# Raster Animations # +####################################################################################### + +library(png) +library(animation) + + +clustPNG <- function(pngobj, nclust = 3){ + + j <- pngobj + # If there is an alpha component remove it... + # might be better to just adjust the mat matrix + if(dim(j)[3] > 3){ + j <- j[,,1:3] + } + k <- apply(j, c(1,2), c) + mat <- matrix(unlist(k), ncol = 3, byrow = TRUE) + grd <- expand.grid(1:dim(j)[1], 1:dim(j)[2]) + clust <- kmeans(mat, nclust, iter.max = 20) + new <- clust$centers[clust$cluster,] + + for(i in 1:3){ + tmp <- matrix(new[,i], nrow = dim(j)[1]) + j[,,i] <- tmp + } + + plot.new() + rasterImage(j, 0, 0, 1, 1, interpolate = FALSE) +} + +makeAni <- function(file, n = 10){ + j <- readPNG(file) + for(i in 1:n){ + clustPNG(j, i) + mtext(paste("Number of clusters:", i)) + } + + j <- readPNG(file) + plot.new() + rasterImage(j, 0, 0, 1, 1, interpolate = FALSE) + mtext("True Picture") +} + +file <- "~/Dropbox/Photos/ClassyDogCropPng.png" +n <- 20 +saveGIF(makeAni(file, n), movie.name = "tmp.gif", interval = c(rep(1,n), 5)) + + + + +####################################################################################### +# ProgressBar # +####################################################################################### + +for(i in 1:10) { + Sys.sleep(0.2) + # Dirk says using cat() like this is naughty ;-) + #cat(i,"\r") + # So you can use message() like this, thanks to Sharpie's + # comment to use appendLF=FALSE. + message(i,"\r",appendLF=FALSE) + flush.console() +} + + +for(i in 1:10) { + Sys.sleep(0.2) + # Dirk says using cat() like this is naughty ;-) + cat(i,"\r") + + +} + + + +####################################################################################### +# Load large datasets with ff # +####################################################################################### + +library(ff) +my.scratch<-"/scratch/Earth/ncortesi/myBigData" +anom.hind<-as.ff(anom.hind, vmode="double", file=my.scratch) +ffsave(anom.hind,file=my.scratch) +close(anom.hind);rm(anom.hind) + +ffload(file=my.scratch, list=c(anom.hind)) +open(anom.hind) +my.SkillScore <- veriApplyBig("FairRpss", fcst=anom.hind, obs=anom.rean, tdim=2, ensdim=1, prob=c(1/3,1/3)) +close(anom.hind);rm(anom.hind) + + + +BigDataPath <- "/scratch/Earth/ncortesi/myBigData" +source('/home/Earth/ncortesi/Downloads/RESILIENCE/veriApplyBig.R') + +anom.hind.dim<-c(5,3,1,256,512) +anom.hind<-array(rnorm(prod(anom.hind.dim)),anom.hind.dim) # doesn't fit in memory +save.big(array=anom.hind, path=BigDataPath) + +veriApplyBig <- function(, obsmy.scratch){ + ffload(file=my.scratch) + open(anom.hind) + my.SkillScore <- veriApplyBig("FairRpss", fcst=anom.hind, obs=anom.rean, tdim=2, ensdim=1, prob=c(1/3,1/3)) + close(anom.hind) +} + + +anom.hind<-as.ff(anom.hind, vmode="double", file=my.scratch) +ffsave(anom.hind, file=my.scratch) + +my.scratch<-"/scratch/Earth/ncortesi/myBigData" + +anom.hind<-as.ff(anom.hind, vmode="double", file=my.scratch) +ffsave(anom.hind, file=my.scratch) +close(anom.hind); rm(anom.hind) + +ffload(file=my.scratch, list=c(anom.hind)) +open(anom.hind) +my.SkillScore <- veriApplyBig("FairRpss", fcst=anom.hind, obs=anom.rean, tdim=2, ensdim=1, prob=c(1/3,1/3)) +close(anom.hind); rm(anom.hind) + +anom.hind.dim<-c(51,30,1,256,51) +anom.hind<-ff(rnorm(prod(anom.hind.dim)), dim=anom.hind.dim, vmode="double", filename="/scratch/Earth/ncortesi/anomhind") +str(anom.hind) +dim(anom.hind) + +anom.hind.dim<-c(51,30,1,256,51) +anom.hind<-array(rnorm(prod(anom.hind.dim)),anom.hind.dim) # doesn't fit in memory +anom.hind<-as.ff(anom.hind, vmode="double", file="/scratch/Earth/ncortesi/myBigData") +str(anom.hind) +dim(anom.hind) + +ffsave(anom.hind,file="/scratch/Earth/ncortesi/anomhind") +#ffinfo(file="/scratch/Earth/ncortesi/anomhind") +close(anom.hind) +#delete(anom.hind) +#rm(anom.hind) + +ffload(file="/scratch/Earth/ncortesi/anomhind") + +anom.rean<-array(rnorm(prod(anom.hind.dim[-1])), anom.hind.dim[-1]) # doesn't fit in memory + +open(anom.hind) +my.SkillScore <- veriApplyBig("FairRpss", fcst=anom.hind, obs=anom.rean, tdim=2, ensdim=1, prob=c(1/3,1/3)) +fcst<-anom.hind +obs<-anom.rean + +close(anom.hind) +rm(anom.hind) + +#ffdrop(file="/scratch/Earth/ncortesi/anomhind") + +a<-ffapply(X=anom.hind.big, MARGIN=c(1,3,4,5), AFUN=mean, RETURN=TRUE) +a<-ffapply(X=anom.hind.big, MARGIN=c(1,3,4,5), AFUN=function(x) sample(x, replace=TRUE), CFUN="list", RETURN=TRUE) + + +pred_Cal_cross_ff <- as.ff(pred_Cal_cross, vmode='double', file=fileoutput_cross) +ffsave(pred_Cal_cross_ff, file=paste0(fileoutput_cross)) +delete(pred_Cal_cross_ff) + +assign("pred_Cal_cross_ff", as.ff(pred_Cal_cross, vmode='double', file=fileoutput_cross)) +ffsave(pred_Cal_cross_ff, file=paste0(fileoutput_cross)) +delete(pred_Cal_cross_ff) +rm(pred_Cal_cross_ff) + +assign("pred_Cal_cross_ff", as.ff(pred_Cal_cross, vmode='double', file=fileoutput_cross)) +a<-get("pred_Cal_cross_ff") # get() works well in this case but not in the row below! (you must use do.call, see WT_drivers_v2.R) +ffsave(get("pred_Cal_cross_ff"), file=paste0(fileoutput_cross)) +delete(pred_Cal_cross_ff) +rm(pred_Cal_cross_ff) + + + +######################################################################################### +# Calculate and save the FairRpss and/or the FairCrpss for a chosen period using ff # +######################################################################################### + +bigdata=FALSE # if TRUE, compute the Rpss and the Crpss using the package ff (storing arrays in the disk instead than in the RAM) to remove the memory limit + +for(month in veri.month){ + month=1 # for debug + my.startdates<-startdates.monthly[[month]] #c(1:5) # select the startdates (weeks) you want to compute + startdate.name=my.month[month] # name given to the period of the selected startdates, i.e:"January" + n.yrs <- length(my.startdates)*n.yrs.hind # number of total years for the selected month + print(paste0("Computing Skill Scores for ",startdate.name)) + + if(!boot) anom.rean.big<-array(NA, dim=c(n.yrs, n.leadtimes, n.lat, n.lon)) # reanalysis data is not converted to ff because it is a small array + + if(boot) mod<-1 else mod=0 # in case of the boostrap, anom.hind.big includes also the reanalysis data as additional last member + + if(bigdata) anom.hind.big <- ff(NA, dim=c(n.members + mod, n.yrs, n.leadtimes, n.lat, n.lon), vmode="double", filename=paste0(workdir,"/anomhindbig.ff")) + if(!bigdata) anom.hind.big <- array(NA, dim=c(n.members + mod, n.yrs, n.leadtimes, n.lat, n.lon)) + + for(startdate in my.startdates){ + + pos.startdate<-which(startdate==my.startdates) # count of the number of stardates already loaded +1 + my.time.interv<-(1+(pos.startdate-1)*n.yrs.hind):(pos.startdate*n.yrs.hind) # time interval where to load data + + load(paste0(workdir,'/Data_',var.name,'/anom_rean_startdate',startdate,'.RData')) + if(!boot) { + anom.rean<-drop(anom.rean) + anom.rean.big[my.time.interv,,,] <- anom.rean + } + + if(any(my.score.name=="FairRpss") || any(my.score.name=="FairCrpss")){ + load(paste0(workdir,'/Data_',var.name,'/anom_hindcast_startdate',startdate,'.RData')) # load hindcast data + anom.hindcast<-drop(anom.hindcast) + + if(!boot) anom.hind.big[,my.time.interv,,,] <- anom.hindcast + if(boot) anom.hind.big[,my.time.interv,,,] <- abind(anom.hindcast, anom.rean, along=1) + + rm(anom.hindcast, anom.rean) + } + gc() + } + + if(any(my.score.name=="FairRpss" || my.score.name=="FairCrpss")){ + if(!boot) { + if(any(my.score.name=="FairRpss")) my.FairRpss <- veriApplyBig("FairRpss", fcst=anom.hind.big, obs=anom.rean.big, + prob=my.prob, tdim=2, ensdim=1, parallel=TRUE, ncpus=n.cpus) + if(any(my.score.name=="FairCrpss")) my.FairCrpss <- veriApplyBig("FairCrpss", fcst=anom.hind.big, obs=anom.rean.big, tdim=2, ensdim=1, parallel=TRUE, ncpus=n.cpus) + + } else { + # bootstrapping: + my.FairRpssBoot<-my.FairCrpssBoot<-array(NA, c(n.boot, n.leadtimes, n.lat, n.lon)) + if(!bigdata) save(anom.hind.big, file=paste0(workdir,"/anom_hind_big.RData")) # save the anomalies to free memory + + for(b in 1:n.boot){ + print(paste0("resampling n. ",b,"/",n.boot)) + yrs.sampled <- sample(1:n.yrs, replace=TRUE) + + if(bigdata) anom.hind.big.sampled <- ff(NA, dim=c(n.members+1, n.yrs, n.leadtimes, n.lat, n.lon), vmode="double", filename=paste0(workdir,"/anomhindbigsampled")) + if(!bigdata) anom.hind.big.sampled <- array(NA, dim=c(n.members+1, n.yrs, n.leadtimes, n.lat, n.lon)) + + if(!bigdata && b>1) load(paste0(workdir,"/anom_hind_big.RData")) # need to load the hindcasts if b>1 + + for(y in 1:n.yrs) anom.hind.big.sampled[,y,,,] <- anom.hind.big[,yrs.sampled[y],,,] # with ff takes 53s x 100 ~1h!!! (without, only 4s x 100 ~6m) + + if(!bigdata) rm(anom.hind.big); gc() # free memory for the following commands + + # faster way that will be able to replace the above one when ffapply output parameters wll be improved: + #a<-ffapply(X=anom.hind.big, MARGIN=c(3,4,5), AFUN=function(x) my.sample(x,n.members+1, replace=TRUE), CFUN="list", RETURN=TRUE, FF_RETURN=TRUE) # takes 3m only + #aa<-abind(a[1:10], along=4) # it doesn't fit into memory! + + if(any(my.score.name=="FairRpss")) my.FairRpssBoot[b,,,] <- veriApplyBig("FairRpss", fcst=anom.hind.big.sampled[1:n.members,,,,], + obs=anom.hind.big.sampled[n.members+1,,,,], + prob=my.prob, tdim=2, ensdim=1, parallel=TRUE, ncpus=n.cpus) #$rpss + + if(any(my.score.name=="FairCrpss")) my.FairCrpssBoot[b,,,] <- veriApplyBig("FairCrpss", fcst=anom.hind.big.sampled[1:n.members,,,,], + obs=anom.hind.big.sampled[n.members+1,,,,], + tdim=2, ensdim=1, parallel=TRUE, ncpus=n.cpus) #$crpss + + if(bigdata) delete(anom.hind.big.sampled) + rm(anom.hind.big.sampled) # delete the sampled hindcast + + } + + # calculate 5 and 95 percentiles of skill scores: + if(any(my.score.name=="FairRpss")) my.FairRpssConf <- apply(my.FairRpssBoot, c(2,3,4), function(x) quantile(x, probs=c(0.05,0.95), type=8)) + if(any(my.score.name=="FairCrpss")) my.FairCrpssConf <- quantile(my.FairCrpssBoot, probs=c(0.05,0.95), type=8) + + } # close if on boot + + } + + if(bigdata) delete(anom.hind.big) # delete the file with the hindcasts + rm(anom.hind.big) + rm(anom.rean.big) + gc() + + if(!boot){ + if(any(my.score.name=="FairRpss")) save(my.FairRpss, file=paste0(workdir,'/Data_',var.name,'/FairRpss_',startdate.name,'.RData')) + if(any(my.score.name=="FairCrpss")) save(my.FairCrpss, file=paste0(workdir,'/Data_',var.name,'/FairCrpss_',startdate.name,'.RData')) + } else { + if(any(my.score.name=="FairRpss")) save(my.FairRpssBoot, my.FairRpssConf, file=paste0(workdir,'/Data_',var.name,'/FairRpssBoot_',startdate.name,'.RData')) + if(any(my.score.name=="FairCrpss")) save(my.FairCrpssBoot, my.FairCrpssConf, file=paste0(workdir,'/Data_',var.name,'/FairCrpssBoot_',startdate.name,'.RData')) + } + + + if(is.object(my.FairRpss)) rm(my.FairRpss); if(is.object(my.FairCrpss)) rm(my.FairCrpss) + if(is.object(my.FairRpssBoot)) rm(my.FairRpssBoot); if(is.object(my.FairCrpssBoot)) rm(my.FairCrpssBoot) + if(is.object(my.FairRpssConf)) rm(my.FairRpssConf); if(is.object(my.FairCrpssConf)) rm(my.FairCrpssConf) + +} # next m (month) + + +######################################################################################### +# Calculate and save the FairRpss for a chosen period using ff and Load() # +######################################################################################### + + anom.rean.big<-array(NA, dim=c(length(my.startdates)*n.yrs.hind, n.leadtimes, n.lat, n.lon)) + anom.hind.big <- ff(NA, dim=c(n.members,length(my.startdates)*n.yrs.hind, n.leadtimes, n.lat, n.lon), vmode="double", filename="/scratch/Earth/anomhindbig") + + for(startdate in my.startdates){ + pos.startdate<-which(startdate==my.startdates) # count of the number of stardates already loaded+1 + my.time.interv<-(1+(pos.startdate-1)*n.yrs.hind):(pos.startdate*n.yrs.hind) # time interval where to load data + + #load(paste0(workdir,'/Data_',var.name,'/anom_rean_startdate',startdate,'.RData')) + #anom.rean<-drop(anom.rean) + #anom.rean.big[my.time.interv,,,] <- anom.rean + + my.date <- paste0(yr1.hind:yr2.hind,substr(sdates.seq[startdate],5,6),substr(sdates.seq[startdate],7,8)) + anom.rean <- Load(var=var.name, exp = 'EnsEcmwfWeekHind', + obs = NULL, sdates=my.date, nleadtime = n.leadtimes, leadtimemin = 1, leadtimemax = n.leadtimes, + output = 'lonlat', configfile = file_path, method='distance-weighted' ) + + #load(paste0(workdir,'/Data_',var.name,'/anom_hindcast_startdate',startdate,'.RData')) # load hindcast data + #anom.hindcast<-drop(anom.hindcast) + + anom.hind.big[,my.time.interv,,,] <- anom.hindcast + rm(anom.hindcast) + } + + my.FairRpss <- veriApplyBig("FairRpss", fcst=anom.hind.big, obs=anom.rean.big, prob=my.prob, tdim=2, ensdim=1, parallel=TRUE, ncpus=n.cpus) + + save(my.FairRpss, file=paste0(workdir,'/Data_',var.name,'/FairRpss_',startdate.name,'.RData')) + + +######################################################################################### +# Alternative way to speed up Load() # +######################################################################################### + +ERAint <- list(path = '/esnas/reconstructions/ecmwf/eraint/$STORE_FREQ$_mean/$VAR_NAME$_f6h/$VAR_NAME$_$YEAR$$MONTH$.nc') + var.rean=ERAint # daily reanalysis dataset used for the var data; it can have a different resolution of the MSLP dataset + var.name='sfcWind' #'tas' #'prlr' # any daily variable we want to find if it is WTs-driven + var <- Load(var = var.name, exp = NULL, obs = list(var.rean), paste0(y,'0101'), storefreq = 'daily', leadtimemax = 1, output = 'lonlat') + + # Forecast system list: + S4 <- list(path = ...) + CFSv2 <- list(path = ...) + + # Reanalysis list: + ERAint <- list(path = '/esnas/reconstructions/ecmwf/eraint/$STORE_FREQ$_mean/$VAR_NAME$_f6h/$VAR_NAME$_$YEAR$$MONTH$.nc') + JRA55 <- list(path = ...) + + + + + # Select one forecast system and one reanalysis (put NULL if you don't need to load any experiment/observation): + exp.source <- NULL + obs.source <- list(path = '/esnas/reconstructions/ecmwf/eraint/$STORE_FREQ$_mean/$VAR_NAME$_f6h/$VAR_NAME$_$YEAR$$MONTH$.nc') + + # Select one variable and its frequency: + var.name <- 'sfcWind' + store.freq <- 'daily' + + # Extract only the directory path of the forecast and reanalysis: + obs.source2 <- gsub("\\$STORE_FREQ\\$", store.freq, obs.source$path) + obs.source3 <- gsub("\\$VAR_NAME\\$", var.name, obs.source2) + obs.source4 <- strsplit(obs.source3,'/') + obs.source5 <- paste0(obs.source4[[1]][-length(obs.source4[[1]])], collapse="/") + + obs.source6 <- list.files(path = obs.source5) + + system(paste0("ncks -O -h -d longitude,5,6 ", '/esnas/reconstructions/ecmwf/eraint/',STORE_FREQ,'_mean/',VAR_NAME,'_f6h')) + system("ncks -O -h -d longitude,5,6 sfcWind_201405.nc ~/test.nc") + + y <- 1990 + var <- Load(var = VAR_NAME, exp = list(exp.source), obs = list(obs.source), sdates = paste0(y,'0101'), storefreq = store.freq, leadtimemax = 1, output = 'lonlat') + + +} # close if on mare + + diff --git a/old/SkillScores_v7.R b/old/SkillScores_v7.R new file mode 100644 index 0000000000000000000000000000000000000000..9f1b9f0b328ec1739ed3432ba24538e0acacabb0 --- /dev/null +++ b/old/SkillScores_v7.R @@ -0,0 +1,1299 @@ +######################################################################################### +# Sub-seasonal Skill Scores # +######################################################################################### +# you can run it from the terminal with the syntax: +# +# Rscript SkillScores_v7.R +# +# i.e: to run only the chunk number 3, write: +# +# Rscript SkillScores_v4.R 3 +# +# if you don't specify any argument, or you run it from the R interface, it runs all the chunks in a sequential way. +# Use this last approach only if the computational speed is not a problem. + +########################################################################################## +# Load functions and libraries # +########################################################################################## + +#load libraries and functions: +library(SpecsVerification) # for skill scores +library(s2dverification) # for Load() function +library(easyVerification) # for veriApply() function +library(abind) # for merging arrays +source('/home/Earth/ncortesi/Downloads/scripts/Rfunctions.R') + +########################################################################################## +# MareNostrum settings # +########################################################################################## + +args <- commandArgs(TRUE) # put into variable args the list with all the optional arguments with whom the script was run from the terminal + +if(length(args) == 0) print("Running the script in a sequential way") +if(length(args) > 1) stop("Only one argument is required") + +mare <- ifelse(length(args) == 1 ,TRUE, FALSE) # set variable 'mare' to TRUE if we are running the script in MareNostrum, FALSE otherwise + +if(mare) chunk <- as.integer(args[1]) # number of the chunk to run in this script (if mare == TRUE) + +########################################################################################## +# User's settings # +########################################################################################## +#if(!mare) {source('/home/Earth/ncortesi/Downloads/scripts/Rfunctions.R')} else {source('/gpfs/projects/bsc32/bsc32842/scripts/Rfunctions.R')} + +# working dir where to put the output maps and files in the /Data and /Maps subdirs: +workdir <- ifelse(!mare, "/scratch/Earth/ncortesi/RESILIENCE", "/gpfs/projects/bsc32/bsc32842/results") + +rean.name <- "ERA-Interim" + +cfs.name <- 'ECMWF' # name of the climate forecast system (CFS) used for monthly predictions (to be used in map titles) +var.name <- 'sfcWind' # forecast variable. It is the name used inside the netCDF files and as suffix in map filenames, i.e: 'sfcWind' or 'psl' +var.name.map <- '10m Wind Speed' # forecast variable to verify (name used in the map titles), i.e: '10m Wind Speed' or 'SLP' + +forecast.year <- 2014 # starting year of the weekly sequence of the forecasts +mes <- 1 # starting forecast month (1: january, 2: february, etc.) +day <- 2 # starting forecast day + +# generic path of the forecast system files: +ECMWF_monthly <- list(path = paste0('/esnas/exp/ECMWF/monthly/ensforhc/weekly_mean/$VAR_NAME$_f6h/',forecast.year,'$MONTH$$DAY$00/$VAR_NAME$_$START_DATE$00.nc')) + +yr1.hind <- 1994 #1994 # first hindcast year +yr2.hind <- 2013 #2013 # last hindcast year (usually the forecast year -1) + +leadtime.week <- c('5-11','12-18','19-25','26-32') # which leadtimes are available in the weekly dataset +n.members <- 4 # number of hindcast members + +my.score.name <- c('FairRpss','FairCrpss') #c('EnsCorr','FairRpss','FairCrpss','RelDiagr') # choose one or more skills scores to compute between EnsCorr, FairRPSS, FairCRPSS and/or RelDiagr + +veri.month <- 1:12 # select the month(s) you want to compute the chosen skill scores + +my.pvalue <- 0.05 # in case of computing the EnsCorr, set the p_value level to show in the ensemble mean correlation maps +my.prob <- c(1/3,2/3) # quantiles used for FairRPSS and the RelDiagr (usually they are the terciles) +conf.level <- c(0.05, 0.95)# percentiles employed for the calculation of the confidence level for the Rpss and Crpss (can be more than 2 if needed) +int.lat <- NULL #1:160 # latitude position of grid points selected for the Reliability Diagram (if you want to subset a region; if not, put NULL) +int.lon <- NULL #1:320 # longitude position of grid points selected for the Reliability Diagram (if you want to subset a region; if not, put NULL) + +boot <- TRUE # in case of computing the FairRpss and/or the FairCrpss, you can choose to do a bootstrap to evaluate the uncertainty of the skill scores +n.boot <- 20 # number of resamples considered in the bootstrapping + +# coordinates of a chosen point in the world to plot the time series over the 52 maps (they must be the same values in objects la y lo) +my.lat <- 55.3197120 +my.lon <- 0.5625 + +###################################### Derived variables ############################################################################# + +#source(paste0(workdir,'/ColorBarV.R')) # Color scale used for maps +n.categ <- 1+length(my.prob) # number of forecasting categories (usually 3 if terciles are used) + +# blue-yellow-red colors: +#col <- ifelse(!mare, as.character(read.csv("/scratch/Earth/ncortesi/RESILIENCE/rgbhex.csv",header=F)[,1]), as.character(read.csv("/gpfs/projects/bsc32//bsc32842/scripts/rgbhex.csv",header=F)[,1])) +#col<-as.character(read.csv("/home/Earth/vtorralb/rgbhex.csv",header=F)[,1]) # blue-yellow-red colors +col <- c("#0C046E", "#1914BE", "#2341F7", "#2F55FB", "#3E64FF", "#528CFF", "#64AAFF", "#82C8FF", "#A0DCFF", "#B4F0FF", "#FFFBAF", "#FFDD9A", "#FFBF87", "#FFA173", "#FF7055", "#FE6346", "#F7403B", "#E92D36", "#C80F1E", "#A50519") + +sdates.seq <- weekly.seq(forecast.year,mes,day) # load the sequence of dates corresponding to all the thursday of the year +n.leadtimes <- length(leadtime.week) +n.yrs.hind <- yr2.hind-yr1.hind+1 +my.month.short <- substr(my.month,1,3) + +## extract geographic coordinates; do only ONCE for each preducton system and then save the lat/lon in a coordinates.RData and comment these rows to use function load() below: +#data.hindcast <- Load(var='sfcWind', exp = 'EnsEcmwfWeekHind', +# obs = NULL, sdates=paste0(yr1.hind:yr2.hind,substr(sdates.seq[startdate],5,6),substr(sdates.seq[startdate],7,8)), +# nleadtime = 1, leadtimemin = 1, leadtimemax = 1, output = 'lonlat', configfile = file_path, method='distance-weighted' ) + +#lats<-data.hindcast$lat +#lons<-data.hindcast$lon +#save(lats,lons,file=paste0(workdir,'/coordinates.RData')) +#rm(data.hindcast);gc() + +# format geographic coordinates to use with PlotEquiMap: +#load(paste0(workdir,'/coordinates.RData')) +#n.lon <- length(lons) +#n.lat <- length(lats) +#n.lonr <- n.lon - ceiling(length(lons[lons<180 & lons > 0])) +#la<-rev(lats) +#lo<-c(lons[c((n.lonr+1):n.lon)]-360,lons[1:n.lonr]) + + # load only 1 year of var data from reanalysis to get lat and lon: +data.rean <- Load(var = var.name, exp = 'ERAintEnsWeek', + obs = NULL, sdates=paste0(yr1.hind,substr(sdates.seq[startdate],5,6),substr(sdates.seq[startdate],7,8)), + nleadtime = 1, leadtimemin = 1, leadtimemax = 1, output = 'lonlat', configfile = file_path) #, grid=my.grid, method='bilinear') + +lons <- data.rean$lon +lats <- data.rean$lat +n.lon <- length(lons) +n.lat <- length(lats) +n.lonr <- n.lon - ceiling(length(lons[lons<180 & lons > 0])) +if(mare) n.lat == 1 + +my.grid<-paste0('r',n.lon,'x',n.lat) + + + +############################################################################################### +# Preformatting: calculate weekly climatologies and anomalies for each hindcast startdate # +############################################################################################### + +# Run it only once at the beginning to generate the .RData files: +# Note that forecast data is interpolated to the same resolution of ERA-Interim + +file_path='/home/Earth/ngonzal2/R/ConfFiles/IC3.conf' # Load() file path +#file_path <- '/home/Earth/ncortesi/Downloads/RESILIENCE/IC3.conf' + +my.startdates=1:52 # choose a sequence of startdates to save their anomalies + +for(startdate in my.startdates){ # the startdate week to load inside the sdates weekly sequence: + print(paste0('Computing reanalysis anomalies for startdate ', which(startdate==my.startdates),'/',length(my.startdates))) + + # load weekly var rean data in: /esnas/reconstructions/ecmwf/eraint/weekly_mean + data.rean <- Load(var = var.name, exp = 'ERAintEnsWeek', + obs = NULL, sdates=paste0(yr1.hind:yr2.hind,substr(sdates.seq[startdate],5,6),substr(sdates.seq[startdate],7,8)), + nleadtime = n.leadtimes, leadtimemin = 1, leadtimemax = n.leadtimes, latmin=lats[cnk], latmax=output = 'lonlat', configfile = file_path) #, grid=my.grid, method='bilinear') + + # dim(data.rean$mod) # [1,1,20,4,320,640] + + # Mean of the 20 years (for each point and leadtime) + clim.rean<-apply(data.rean$mod,c(1,2,4,5,6),mean)[1,1,,,] # average for each leadtime and pixel + clim.rean<-InsertDim(InsertDim(InsertDim(clim.rean,1,n.yrs.hind),1,1),1,1) # repeat the same values times to be able to calculate the anomalies #[1,1,20,4,320,640] + anom.rean <- data.rean$mod - clim.rean + anom.rean<-InsertDim(anom.rean[1,1,,,,],1,1) # [1,20,4,320,640] + + save(anom.rean,file=paste0(workdir,'/Data_',var.name,'_',rean.name,'/anom_rean_startdate',startdate,'.RData')) + save(clim.rean,file=paste0(workdir,'/Data_',var.name,'_',rean.name,'/clim_rean_startdate',startdate,'.RData')) + + rm(data.rean,clim.rean) + gc() + +} + + +for(startdate in my.startdates){ # the startdate week to load inside the sdates weekly sequence: + print(paste0('Computing forecast anomalies for startdate ', which(startdate==my.startdates),'/',length(my.startdates))) + + # Load weekly var subseasonal data in: /esnas/exp/ECMWF/monthly/ensforhc/weekly_mean and interpolate them to the same ERA-Interim resolution (512x 256) + data.hindcast <- Load(var = var.name, exp = 'EnsEcmwfWeekHind', + obs = NULL, sdates = paste0(yr1.hind:yr2.hind, substr(sdates.seq[startdate],5,6), substr(sdates.seq[startdate],7,8)), + nleadtime = n.leadtimes, leadtimemin = 1, leadtimemax = n.leadtimes, output = 'lonlat', configfile = file_path, grid=my.grid, method='bilinear') + + #data.hindcast <- Load(var = var.name, exp = 'list(list(path=fields))', obs = NULL, sdates = paste0(yr1.hind:yr2.hind, substr(sdates.seq[startdate],5,6), substr(sdates.seq[startdate],7,8)), nleadtime = n.leadtimes, leadtimemin = 1, leadtimemax = n.leadtimes, storefreq='daily', output = 'lonlat', grid=my.grid, method='bilinear' ) + + #Load(var = psl, exp = list(list(path=fields)), obs = NULL, sdates=paste0(years, start.month.char,'01'), dimnames=list(member=member.name), nmember=n.members, leadtimemax=n.leadtimes, storefreq='daily', output = 'lonlat', latmin = lat.min, latmax = lat.max, lonmin = lon.min, lonmax = lon.max, grid=my.grid, method='bilinear')$mod + + # dim(data.hindcast$mod) # [1,4,20,4,320,640] + + # Mean of the 4 members of the Hindcasts and of the 20 years (for each point and leadtime); + clim.hindcast<-apply(data.hindcast$mod,c(1,4,5,6),mean)[1,,,] # average for each leadtime and pixel + clim.hindcast<-InsertDim(InsertDim(InsertDim(clim.hindcast,1,n.yrs.hind),1,n.members),1,1) # repeat the same values and times to calc. anomalies + + anom.hindcast <- data.hindcast$mod - clim.hindcast + + rm(data.hindcast,clim.hindcast) + gc() + + # average the anomalies over the 4 hindcast members to compute the ensemble mean [1,20,4,320,640] + anom.hindcast.mean<-apply(anom.hindcast,c(1,3,4,5,6),mean) + + #save(anom.hindcast.mean,anom.rean,file=paste0(workdir,'/Data/anom_for_ACC_startdate',startdate,'.RData')) + save(anom.hindcast,file=paste0(workdir,'/Data_',var.name,'_',cfs.name,'/anom_hindcast_startdate',startdate,'.RData')) + save(anom.hindcast.mean,file=paste0(workdir,'/Data_',var.name,'_',cfs.name,'/anom_hindcast_mean_startdate',startdate,'.RData')) + + rm(data.rean,anom.hindcast,anom.hindcast.mean,clim.rean,anom.rean) + gc() + +} + + +######################################################################################### +# Calculate and save up to all the chosen Skill Scores for a selected period # +######################################################################################### +# +# Remember that before computing the skill scores, you have to create and save the anomalies running once the preformatting part above +# + +for(month in veri.month){ + #month=1 # for the debug + + my.startdates <- which(as.integer(substr(sdates.seq,5,6)) == month) #startdates.monthly[[month]] #c(1:5) # select the startdates (weeks) you want to compute + startdate.name <- my.month[month] # name given to the period of the selected startdates # i.e:"January" + n.startdates <- length(my.startdates) # number of startdates in the selected month + n.yrs <- length(my.startdates)*n.yrs.hind # number of total years for the selected month + + print(paste0("Computing Skill Scores for ",startdate.name)) + + # Merge all selected startdates: + hind.dim <- c(n.members, n.yrs.hind*n.startdates, n.leadtimes, n.lat, n.lon) # dimensions of the hindcast array + + my.FairRpss <- my.FairCrpss <- my.EnsCorr <- my.PValue <- array(NA,c(n.leadtimes, n.lat,n.lon)) + if(boot) my.FairRpssBoot <- my.FairCrpssBoot <- array(NA, c(n.boot, n.leadtimes, n.lat, n.lon)) + if(boot) my.FairRpssConf <- my.FairCrpssConf <- array(NA, c(length(conf.level), n.leadtimes, n.lat, n.lon)) + + cat('Chunk n. ') + + # if we run the script from the terminal with an argument, it only computes the chunk we specify in the second argument and save the results for that chunk. + # If not, it loops over all chunks in a serial way and save the results at the end. + if(!mare) {my.chunks <- 1:n.lat} else {my.chunks <- chunk} + + for(cnk in my.chunks){ # EnsCorr, FairRpss and FairCrpss calculation: + #s=1 # for the debug + cat(paste0(cnk,'/', n.lat,' ')) + anom.hindcast.chunk <- anom.hindcast.chunk.sampled <- array(NA, c(n.members, n.startdates*n.yrs.hind, n.leadtimes, cnk, n.lon)) + anom.rean.chunk <- anom.hindcast.mean.chunk <- anom.rean.chunk.sampled <- array(NA, c(n.startdates*n.yrs.hind, n.leadtimes, cnk, n.lon)) + #my.interv<-(1 + sub.size*(s-1)):((sub.size*s) + add.last) # longitude interval where to load data + + # not working on MareNostrum still: + # load 1 file ONLY ONCE to retrieve the lat and lon values, then save them in an .RData file to retrieve them when needed: + #coord <- Load(var = var.name, exp = list(ECMWF_monthly), obs = NULL, sdates=paste0(2013,'0102'), leadtimemin = 1, leadtimemax=1, output = 'lonlat', nprocs=1) + # lat <- coord$lat + # lon <- coord$lon + # save() + # load() + #system.time(a<-Load(var = 'sfcWind', exp = list(ECMWF_monthly), obs = NULL, sdates=paste0(2010:2013,'0102'), leadtimemin = 1, leadtimemax=3, output = 'lonlat', nprocs=1, latmin = lat[5], latmax = lat[5])) + + for(startdate in my.startdates){ + pos.startdate<-which(startdate == my.startdates) # count of the number of stardates already loaded+1 + my.time.interv<-(1+(pos.startdate-1)*n.yrs.hind):(pos.startdate*n.yrs.hind) # time interval where to load data + + # Load reanalysis data: + load(paste0(workdir,'/Data_',var.name,'/anom_rean_startdate',startdate,'.RData')) + anom.rean <- drop(anom.rean) + anom.rean.chunk[my.time.interv,,,] <- anom.rean[,,c,] + + # Load hindcast data: + if(any(my.score.name=="FairRpss") || any(my.score.name=="FairCrpss" || any(my.score.name=="RelDiagr"))){ + load(paste0(workdir,'/Data_',var.name,'/anom_hindcast_startdate',startdate,'.RData')) + anom.hindcast <- drop(anom.hindcast) + anom.hindcast.chunk[,my.time.interv,,,] <- anom.hindcast[,,,c,] + rm(anom.hindcast, anom.rean) + gc() + } + + if(any(my.score.name=="EnsCorr")){ + load(paste0(workdir,'/Data_',var.name,'/anom_hindcast_mean_startdate',startdate,'.RData')) # load ensemble mean hindcast data + anom.hindcast.mean <- drop(anom.hindcast.mean) + anom.hindcast.mean.chunk[my.time.interv,,,] <- anom.hindcast.mean[,,c,] + rm(anom.hindcast.mean) + gc() + } + + } + + if(any(my.score.name=="FairRpss")){ + if(!boot){ + #anom.hindcast.chunk.double <- abind(list(anom.hindcast.chunk,anom.hindcast.chunk),along=4) + #anom.rean.chunk.double <- abind(list(anom.rean.chunk,anom.rean.chunk),along=3) + + + #my.FairRpss.sub <- veriApply("FairRpss", fcst=anom.hindcast.sub, obs=anom.rean.sub, prob=my.prob, tdim=2, ensdim=1, parallel=FALSE, ncpus=n.cpus) + my.FairRps.chunk <- veriApply("FairRps", fcst=anom.hindcast.chunk, obs=anom.rean.chunk, prob=my.prob, tdim=2, ensdim=1, parallel=FALSE, ncpus=n.cpus) + #my.FairRps.chunk.double <- veriApply("FairRps", fcst=anom.hindcast.chunk.double, obs=anom.rean.chunk.double, prob=my.prob, tdim=2, ensdim=1, parallel=FALSE, ncpus=n.cpus) + + # the prob.for the climatological ensemble can be set without using convert2prob and they are the same for all points and leadtimes (33% for each tercile): + ens <- array(33,c(n.startdates*n.yrs.hind,3)) + + # the observed probabilities vary from point to point and for each leadtime: + obs <- apply(anom.rean.chunk, c(2,3,4), function(x){convert2prob(x, prob <- my.prob)}) + + # reshape obs to be able to apply EnsRps(ens, obs) below: + obs2 <- InsertDim(obs,1,3) + obs2[2,1:(n.startdates*n.yrs.hind),,,] <- obs2[1,(n.startdates*n.yrs.hind + 1:(n.startdates*n.yrs.hind)),,,] + obs2[3,1:(n.startdates*n.yrs.hind),,,] <- obs2[1,(2*n.startdates*n.yrs.hind + 1:(n.startdates*n.yrs.hind)),,,] + obs3 <- obs2[,1:(n.startdates*n.yrs.hind),,,,drop=F] # drop=F is used not to loose the dimension(s) with only 1 element + + my.Rps.clim.chunk <- apply(obs3,c(3,4,5), function(x){ EnsRps(ens,t(x)) }) + + my.FairRps.chunk.sum <- apply(my.FairRps.chunk, c(2,3,4), sum) + my.Rps.clim.chunk.sum <- apply(my.Rps.clim.chunk, c(2,3,4), sum) + + my.FairRpss.chunk <- 1-(my.FairRps.chunk.sum/my.Rps.clim.chunk.sum) + + my.FairRpss[,c,] <- my.FairRpss.chunk + + rm(my.FairRpss.chunk, ens, obs, obs2, obs3, my.FairRps.chunk.sum, my.Rps.clim.chunk.sum) + gc() + + } else { # bootstrapping: + + for(b in 1:n.boot){ + cat(paste0("resampling n. ",b,"/",n.boot)) + yrs.sampled <- sample(1:n.yrs, replace=TRUE) + + for(y in 1:n.yrs) anom.hindcast.chunk.sampled[,y,,,] <- anom.hindcast.chunk[,yrs.sampled[y],,,] + for(y in 1:n.yrs) anom.rean.chunk.sampled[y,,,] <- anom.rean.chunk[yrs.sampled[y],,,] + + my.FairRps.chunk <- veriApply("FairRps", fcst=anom.hindcast.chunk.sampled, obs=anom.rean.chunk.sampled, prob=my.prob, tdim=2, ensdim=1, parallel=FALSE, ncpus=n.cpus) + ens <- array(33,c(n.startdates*n.yrs.hind,3)) + obs <- apply(anom.rean.chunk.sampled, c(2,3,4), function(x){convert2prob(x, prob <- my.prob)}) + obs2 <- InsertDim(obs,1,3) + obs2[2,1:(n.startdates*n.yrs.hind),,,] <- obs2[1,(n.startdates*n.yrs.hind + 1:(n.startdates*n.yrs.hind)),,,] + obs2[3,1:(n.startdates*n.yrs.hind),,,] <- obs2[1,(2*n.startdates*n.yrs.hind + 1:(n.startdates*n.yrs.hind)),,,] + obs3 <- obs2[,1:(n.startdates*n.yrs.hind),,,,drop=F] + my.Rps.clim.chunk <- apply(obs3,c(3,4,5), function(x){ EnsRps(ens,t(x)) }) + my.FairRps.chunk.sum <- apply(my.FairRps.chunk,c(2,3,4), sum) + my.Rps.clim.chunk.sum <- apply(my.Rps.clim.chunk,c(2,3,4), sum) + my.FairRpss.chunk <- 1-(my.FairRps.chunk.sum/my.Rps.clim.chunk.sum) + + my.FairRpssBoot[b,,,chunk$int[[c]]] <- my.FairRpss.chunk + rm(my.FairRpss.chunk, ens, obs, obs2, obs3, my.FairRps.chunk.sum, my.Rps.clim.chunk.sum) + gc() + } + + cat('\n') + + # calculate the percentiles of the skill score: + my.FairRpssConf[,,,chunk$int[[c]]] <- apply(my.FairRpssBoot[,,,chunk$int[[c]]], c(2,3,4), function(x) quantile(x, probs=conf.level, type=8)) + } + } + + if(any(my.score.name=="FairCrpss")){ + if(!boot){ + my.FairCrps.chunk <- veriApply("FairCrps", fcst=anom.hindcast.chunk, obs=anom.rean.chunk, tdim=2, ensdim=1, parallel=FALSE, ncpus=n.cpus) + anom.all.chunk <- abind(anom.hindcast.chunk,anom.rean.chunk, along=1) # merge exp and obs together to use apply: + my.Crps.clim.chunk <- apply(anom.all.chunk, c(3,4,5), function(x){ EnsCrps(t(x[1:n.members,]), x[n.members+1,])}) + + my.FairCrps.chunk.sum <- apply(my.FairCrps.chunk,c(2,3,4), sum) + my.Crps.clim.chunk.sum <- apply(my.Crps.clim.chunk,c(2,3,4), sum) + my.FairCrpss.chunk <- 1-(my.FairCrps.chunk.sum/my.Crps.clim.chunk.sum) + + my.FairCrpss[,,chunk$int[[c]]]<-my.FairCrpss.chunk + rm(my.FairCrpss.chunk, my.FairCrps.chunk.sum, my.Crps.clim.chunk.sum) + gc() + + } else { # bootstrapping: + + for(b in 1:n.boot){ + print(paste0("resampling n. ",b,"/",n.boot)) + yrs.sampled <- sample(1:n.yrs, replace=TRUE) + + for(y in 1:n.yrs) anom.hindcast.chunk.sampled[,y,,,] <- anom.hindcast.chunk[,yrs.sampled[y],,,] + for(y in 1:n.yrs) anom.rean.chunk.sampled[y,,,] <- anom.rean.chunk[yrs.sampled[y],,,] + + my.FairCrps.chunk <- veriApply("FairCrps", fcst=anom.hindcast.chunk.sampled, obs=anom.rean.chunk.sampled, tdim=2, ensdim=1, parallel=TRUE, ncpus=n.cpus) + anom.all.chunk <- abind(anom.hindcast.chunk.sampled, anom.rean.chunk.sampled, along=1) # merge exp and obs together to use apply: + my.Crps.clim.chunk <- apply(anom.all.chunk, c(3,4,5), function(x){ EnsCrps(t(x[1:n.members,]), x[n.members+1,])}) + my.FairCrps.chunk.sum <- apply(my.FairCrps.chunk,c(2,3,4), sum) + my.Crps.clim.chunk.sum <- apply(my.Crps.clim.chunk,c(2,3,4), sum) + my.FairCrpss.chunk <- 1-(my.FairCrps.chunk.sum/my.Crps.clim.chunk.sum) + + my.FairCrpssBoot[b,,,chunk$int[[c]]] <- my.FairCrpss.chunk + rm(my.FairCrpss.chunk, my.FairCrps.chunk.sum, my.Crps.clim.chunk.sum) + gc() + + } + + cat('\n') + + # calculate the percentiles of the skill score: + my.FairCrpssConf[,,,chunk$int[[c]]] <- apply(my.FairCrpssBoot[,,,chunk$int[[c]]], c(2,3,4), function(x) quantile(x, probs=conf.level, type=8)) + } + + } + + if(any(my.score.name=="EnsCorr")){ + # ensemble mean correlation for the selected startdates (first dim, 'week') and all leadtimes (second dim): + my.EnsCorr.chunk <- my.PValue.chunk <- array(NA, c(n.leadtimes, n.lat, chunk$n.int[[c]])) + + for(i in 1:n.leadtimes){ + for(j in 1:n.lat){ + for(k in 1:chunk$n.int[[c]]){ + my.EnsCorr.chunk[i,j,k] <- cor(anom.hindcast.mean.chunk[,i,j,k],anom.rean.chunk[,i,j,k], use="complete.obs") + # option 'na.method' is chosen from the following list: c("all.obs", "complete.obs", "pairwise.complete.obs", "everything", "na.or.complete") + + my.PValue.chunk[i,j,k] <- cor.test(anom.hindcast.mean.chunk[,i,j,k],anom.rean.chunk[,i,j,k], use="complete.obs")$p.value + + } + } + } + + my.EnsCorr[,,chunk$int[[c]]] <- my.EnsCorr.chunk + my.PValue[,,chunk$int[[c]]] <- my.PValue.chunk + + rm(anom.hindcast.chunk,anom.hindcast.mean.chunk,my.EnsCorr.chunk,my.PValue.chunk) + } + + rm(anom.rean.chunk) + gc() + #mem() + + if(mare == TRUE) { + if(any(my.score.name=="FairRpss")) save(my.FairRpss, file=paste0(workdir,'/Data_',var.name,'/FairRpss_',startdate.name,'_chunk_',c,'.RData')) + if(any(my.score.name=="FairCrpss")) save(my.FairCrpss, file=paste0(workdir,'/Data_',var.name,'/FairCrpss_',startdate.name,'_chunk_',c'.RData')) + if(any(my.score.name=="EnsCorr")) save(my.EnsCorr, file=paste0(workdir,'/Data_',var.name,'/EnsCorr_',startdate.name,'_chunk_',c'.RData')) + } + + + } # next c (chunk) + + # the Reliability diagram cannot be computed splitting the data in chunk, so it is computed only if the script is run from the R interface or with no arguments: + if(mare == FALSE) { + if(any(my.score.name=="RelDiagr")){ # in this case the computation cannot be split in groups of grid points, but can be split for leadtime instead: + my.RelDiagr<-list() + + for(lead in 1:n.leadtimes){ + cat(paste0('leadtime n.: ',lead,'/',n.leadtimes)) + anom.rean.chunk <- array(NA,c(n.startdates*n.yrs.hind,n.lat,n.lon)) + anom.hindcast.chunk <- array(NA,c(n.members,n.startdates*n.yrs.hind,n.lat,n.lon)) + cat(' startdate: ') + i=0 + + for(startdate in my.startdates){ + i=i+1 + cat(paste0(i,'/',length(my.startdates),' ')) + + pos.startdate<-which(startdate==my.startdates) # count of the number of stardates already loaded+1 + my.time.interv<-(1+(pos.startdate-1)*n.yrs.hind):(pos.startdate*n.yrs.hind) # time interval where to load data + + # Load reanalysis data of next startdate: + load(paste0(workdir,'/Data_',var.name,'/anom_rean_startdate',startdate,'.RData')) + anom.rean <- drop(anom.rean) + anom.rean.chunk[my.time.interv,,] <- anom.rean[,lead,,] + + # Lead hindcast data of next startdate: + load(paste0(workdir,'/Data_',var.name,'/anom_hindcast_startdate',startdate,'.RData')) # load hindcast data + anom.hindcast <- drop(anom.hindcast) + anom.hindcast.chunk[,my.time.interv,,] <- anom.hindcast[,,lead,,] + + rm(anom.rean,anom.hindcast) + } + + + anom.hindcast.chunk.perm<-aperm(anom.hindcast.chunk,c(2,1,3,4)) # swap the first two dimensions to put the years as first dimension (needed for convert2prob) + gc() + + cat('Computing the Reliability Diagram. Please wait... ') + ens.chunk<-apply(anom.hindcast.chunk.perm, c(3,4), convert2prob, prob<-my.prob) # its dimens. are: [n.startdates*n.yrs.hind*n.forecast.categories, n.lat, n.lon] + obs.chunk<-apply(anom.rean.chunk,c(2,3), convert2prob, prob<-my.prob) # the first n.members*n.yrs.hind elements belong to the first tercile, and so on. + ens.chunk.prob<-apply(ens.chunk, c(2,3), function(x) count2prob(matrix(x, n.startdates*n.yrs.hind, n.categ), type=4)) # type=4 is the only method with sum(prob)=1 !! + obs.chunk.prob<-apply(obs.chunk, c(2,3), function(x) count2prob(matrix(x, n.startdates*n.yrs.hind, n.categ), type=4)) #no parallelization here: it only takes 10s + + if(is.null(int.lat)) int.lat <- 1:n.lat # if there is no region selected, select all the world + if(is.null(int.lon)) int.lon <- 1:n.lon + + int1 <- 1:(n.startdates*n.yrs.hind) # time interval of the data of the first tercile + my.RelDiagr[[lead]]<-ReliabilityDiagram(ens.chunk.prob[int1,int.lat,int.lon], obs.chunk.prob[int1,int.lat,int.lon], nboot=0, plot=FALSE, plot.refin=F) #tercile 1 + + int2 <- (1+(n.startdates*n.yrs.hind)):(2*n.startdates*n.yrs.hind) # time interval of the data of the second tercile + my.RelDiagr[[n.leadtimes+lead]]<-ReliabilityDiagram(ens.chunk.prob[int2,int.lat,int.lon], obs.chunk.prob[int2,int.lat,int.lon], nboot=0, plot=FALSE, plot.refin=F) + + int3 <- (1+(2*n.startdates*n.yrs.hind)):(3*n.startdates*n.yrs.hind) # time interval of the data of the third tercile + my.RelDiagr[[2*n.leadtimes+lead]]<-ReliabilityDiagram(ens.chunk.prob[int3,int.lat,int.lon], obs.chunk.prob[int3,int.lat,int.lon], nboot=0, plot=FALSE, plot.refin=F) + + cat('done\n') + #print(my.RelDiagr) # for debugging + + rm(anom.rean.chunk,anom.hindcast.chunk,ens.chunk,obs.chunk,ens.chunk.prob,obs.chunk.prob) + gc() + + } # next lead + + } # close if on RelDiagr + + # we can save the results for all chunks only if mare == FALSE (if it is TRUE, we have the results for 1 chunk only, that have already been saved + if(!boot){ + if(any(my.score.name=="FairRpss")) save(my.FairRpss, file=paste0(workdir,'/Data_',var.name,'/FairRpss_',startdate.name,'.RData')) + if(any(my.score.name=="FairCrpss")) save(my.FairCrpss, file=paste0(workdir,'/Data_',var.name,'/FairCrpss_',startdate.name,'.RData')) + if(any(my.score.name=="EnsCorr")) save(my.EnsCorr, file=paste0(workdir,'/Data_',var.name,'/EnsCorr_',startdate.name,'.RData')) + if(any(my.score.name=="RelDiagr")) save(my.RelDiagr, my.PValue, file=paste0(workdir,'/Data_',var.name,'/RelDiagr_',startdate.name,'.RData')) + } else { # bootstrapping output: + if(any(my.score.name=="FairRpss")) save(my.FairRpssBoot, my.FairRpssConf, file=paste0(workdir,'/Data_',var.name,'/FairRpssBoot_',startdate.name,'.RData')) + if(any(my.score.name=="FairCrpss")) save(my.FairCrpssBoot, my.FairCrpssConf, file=paste0(workdir,'/Data_',var.name,'/FairCrpssBoot_',startdate.name,'.RData')) + } + + # If it has finished computing the last chunk, it can load all results in one file, deleting the intermediate output files: + if(job.chunk == tot.chunks){ + + } + + + } # close if on mare + +} # next m (month) + +if + + + + + + + + +# all pre-formatting (conversion to anomalies) and post-formatting (visualization) tasks are done below: +if(mare == FALSE){ + +########################################################################################### +# Visualize and save the score maps for one score and period # +########################################################################################### + +# run it only after computing and saving all the skill score data: + +my.months=1 #9:12 # choose the month(s) to plot and save + +for(month in my.months){ + #month=2 # for the debug + startdate.name.map<-my.month[month] # i.e:"January" + + for(my.score.name.map in my.score.name){ + #my.score.name.map='EnsCorr' # for the debug + + if(my.score.name.map=="EnsCorr") { load(file=paste0(workdir,'/Data_',var.name,'/EnsCorr_',startdate.name.map,'.RData')); my.score<-my.EnsCorr } + if(my.score.name.map=='FairRpss') { load(file=paste0(workdir,'/Data_',var.name,'/FairRpss_',startdate.name.map,'.RData')); my.score<-my.FairRpss } + if(my.score.name.map=='FairCrpss') { load(file=paste0(workdir,'/Data_',var.name,'/FairCrpss_',startdate.name.map,'.RData')); my.score<-my.FairCrpss } + if(my.score.name.map=="RelDiagr") { load(file=paste0(workdir,'/Data_',var.name,'/RelDiagr_',startdate.name.map,'.RData')); my.score<-my.RelDiagr } + + # old green-red palette: + # brk.rpss<-c(-1,seq(0,1,by=0.05)) + # col.rpss<-c("darkred",colorRampPalette(c("red","white","darkgreen"))(length(brk.rpss)-2)) + # brk.rps<-c(seq(0,1,by=0.1),10) + # col.rps<-c(colorRampPalette(c("darkgreen","white","red"))(length(brk.rps)-2),"darkred") + # if(my.score.name.map==my.Rps | my.score==my.Rps.clim) {my.brk<-brk.rps} else {my.brk<-brk.rpss} + # if(my.score.name.map==my.Rps | my.score==my.Rps.clim) {my.col<-col.rps} else {my.col<-col.rpss} + + brk.rpss<-c(seq(-1,1,by=0.1)) + col.rpss<-colorRampPalette(col)(length(brk.rpss)-1) + + my.brk<-brk.rpss # at present all breaks and colors are the same, so there is no need to differenciate between indexes + my.col<-col.rpss + + for(lead in 1:n.leadtimes){ + + #my.title<-paste0(my.score.name.map,' of ',cfs.name,' + #10m Wind Speed for ',startdate.name.map,'. Forecast time ',leadtime.week[lead],'.') + + if(my.score.name.map!="RelDiagr"){ + + #postscript(file=paste0(workdir,'/Maps_',var.name,'/',my.score.name.map,'_',startdate.name.map,'_Forecast_time_',leadtime.week[lead],'_',var.name,'.ps'), + # paper="special",width=12,height=7,horizontal=F) + + jpeg(file=paste0(workdir,'/Maps_',var.name,'/',my.score.name.map,'_',startdate.name.map,'_Forecast_time_',leadtime.week[lead],'_',var.name,'.jpg'),width=1000,height=600) + + layout(matrix(c(1,2), 1, 2, byrow = TRUE),widths=c(11,1.2)) + par(oma=c(1,1,4,1)) + + # lat values must be in decreasing order from +90 to -90 degrees and long from 0 to 360 degrees: + PlotEquiMap(my.score[lead,,][n.lat:1,c((n.lonr+1):n.lon,1:n.lonr)], lons,lats ,sizetit=0.8, brks=my.brk, cols=my.col,axelab=F, filled.continents=FALSE, drawleg=F) + my.title<-paste0(my.score.name.map,' of ',cfs.name,'\n ', var.name.map,' for ',startdate.name.map,'. Forecast time ',leadtime.week[lead],'.') + title(my.title,line=0.5,outer=T) + + ColorBar(my.brk, cols=my.col, vert=T, cex=1.4) + + if(my.score.name.map=="EnsCorr"){ #add a small grey point if the corr.is significant: + + # arrange lat/ lon in my.PValue (a second variable in the EnsCorr file) to start from +90 degr. (lat) and from 0 degr (lon): + my.PValue.rev <- my.PValue + my.PValue.rev[lead,,] <- my.PValue[i,n.lat:1,c((n.lonr+1):n.lon,1:n.lonr)] + for (x in 1:n.lon) { + for (y in 1:n.lat) { + if (my.PValue.rev[lead,y, x] == TRUE) { + text(x = lo[x], y = la[y], ".", cex = .2) + } + } + } + } + dev.off() + + } # close if on !RelDiagr + + if(my.score.name.map=="RelDiagr") { + + jpeg(file=paste0(workdir,'/Maps_',var.name,'/RelDiagr_',startdate.name.map,'_Forecast_time_',leadtime.week[lead],'_',var.name,'.jpg'),width=600,height=600) + + my.title<-paste0('Reliability Diagram of ',cfs.name,'\n ',var.name.map,' for ',startdate.name.map,'. Forecast time ',leadtime.week[lead],'.') + + plot(c(0,1),c(0,1),type="l",xlab="Forecast Frequency",ylab="Observed Frequency", col='gray30', main=my.title) + lines(my.score[[lead]]$p.avgs[!is.na(my.score[[lead]]$p.avgs)],my.score[[lead]]$cond.prob[!is.na(my.score[[lead]]$cond.prob)],type="o", pch=16, col="blue") + #lines(my.score[[n.leadtimes+lead]]$p.avgs[!is.na(my.score[[n.leadtimes+lead]]$p.avgs)],my.score[[n.leadtimes+lead]]$cond.prob[!is.na(my.score[[n.leadtimes+lead]]$cond.prob)],type="o",pch=16,col="darkgreen") + lines(my.score[[2*n.leadtimes+lead]]$p.avgs[!is.na(my.score[[2*n.leadtimes+lead]]$p.avgs)], my.score[[2*n.leadtimes+lead]]$cond.prob[!is.na(my.score[[2*n.leadtimes+lead]]$cond.prob)],type="o", pch=16, col="red") + legend("bottomright", legend=c('Lower Tercile','Upper Tercile'), lty=c(1,1), lwd=c(2.5,2.5), col=c('blue','red')) + + dev.off() + } + + } # next lead + + } # next score + +} # next month + + +############################################################################################### +# Composite map of the four skill scores # +############################################################################################### + +# run it only when you have all the 4 skill scores for each month: + +my.months=9:12 # choose the month(s) to plot and save + +for(month in my.months){ + + startdate.name.map<-my.month[month] # i.e:"January" + + #postscript(file=paste0(workdir,'/Maps_',var.name,'/SkillScores_',startdate.name.map,'_',var.name,'.ps'), paper="special",width=12,height=7,horizontal=F) + + jpeg(file=paste0(workdir,'/Maps_',var.name,'/SkillScores_',startdate.name.map,'_',var.name,'.jpg'), width=4000, height=2400, quality=100) + + layout(matrix(1:16, 4, 4, byrow=FALSE), widths=c(1.8,1.8,1.8,1.2), heights=c(1,1,1,1)) + #layout.show(16) + + for(score in 1:4){ + for(lead in 1:n.leadtimes){ + + img<-readJPEG(paste0(workdir,'/Maps_',var.name,'/',my.score.name[score],'_',startdate.name.map,'_Forecast_time_',leadtime.week[lead],'_',var.name,'.jpg')) + par(mar = rep(0, 4), xaxs='i', yaxs='i' ) + plot.new() + rasterImage(img, 0, 0, 1, 1, interpolate=FALSE) + + } # next lead + + } # next score + + dev.off() + +} # next month + + + +# Dani Map script for the small figure: +#/home/Earth/vtorralb/R/scripts/Veronica_2014/TimeSeries/PlotAno_mod_obs.R + + + +############################################################################################### +# Preformatting: calculate weekly climatologies and anomalies for each forecast startdate # +############################################################################################### +# +# finish!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! +# +# the only difference is that anomalies are computed using climatologies from the hincast data, +# and not from the forecast data. + +# Run it only once at the beginning: + +file_path='/home/Earth/ngonzal2/R/ConfFiles/IC3.conf' # Load() file path +#file_path <- '/home/Earth/ncortesi/Downloads/RESILIENCE/IC3.conf' + +my.startdates=1:52 # choose a sequence of forecast startdates to save their anomalies + +for(startdate in my.startdates){ # the startdate week to load inside the sdates weekly sequence: + print(paste0('Computing anomalies for startdate ', which(startdate==my.startdates),'/',length(my.startdates))) + + data.hindcast <- Load(var=var.name, exp = 'EnsEcmwfWeekHind', + obs = NULL, sdates=paste0(yr1.hind:yr2.hind,substr(sdates.seq[startdate],5,6),substr(sdates.seq[startdate],7,8)), + nleadtime = n.leadtimes, leadtimemin = 1, leadtimemax = n.leadtimes, output = 'lonlat', configfile = file_path, method='distance-weighted' ) + + # dim(data.hindcast$mod) # [1,4,20,4,320,640] + + # Mean of the 4 members of the Hindcasts and of the 20 years (for each point and leadtime); + clim.hindcast<-apply(data.hindcast$mod,c(1,4,5,6),mean)[1,,,] # average for each leadtime and pixel + clim.hindcast<-InsertDim(InsertDim(InsertDim(clim.hindcast,1,n.yrs.hind),1,n.members),1,1) # repeat the same values and times to calc. anomalies + + anom.hindcast <- data.hindcast$mod - clim.hindcast + + rm(data.hindcast,clim.hindcast) + gc() + + # average the anomalies over the 4 hindcast members [1,20,4,320,640] + anom.hindcast.mean<-apply(anom.hindcast,c(1,3,4,5,6),mean) + + my.grid<-paste0('r',n.lon,'x',n.lat) + data.rean <- Load(var=var.name, exp = 'ERAintEnsWeek', + obs = NULL, sdates=paste0(yr1.hind:yr2.hind,substr(sdates.seq[startdate],5,6),substr(sdates.seq[startdate],7,8)), + nleadtime = n.leadtimes, leadtimemin = 1, leadtimemax = n.leadtimes, output = 'lonlat', configfile = file_path, grid=my.grid, method='distance-weighted') + # dim(data.rean$mod) # [1,1,20,4,320,640] + + # Mean of the 20 years (for each point and leadtime) + clim.rean<-apply(data.rean$mod,c(1,2,4,5,6),mean)[1,1,,,] # average for each leadtime and pixel + clim.rean<-InsertDim(InsertDim(InsertDim(clim.rean,1,n.yrs.hind),1,1),1,1) # repeat the same values times to be able to calculate the anomalies #[1,1,20,4,320,640] + + anom.rean <- data.rean$mod - clim.rean + + anom.rean<-InsertDim(anom.rean[1,1,,,,],1,1) # [1,20,4,320,640] + + #save(anom.hindcast.mean,anom.rean,file=paste0(workdir,'/Data/anom_for_ACC_startdate',startdate,'.RData')) + + save(anom.hindcast,file=paste0(workdir,'/Data/anom_hindcast_startdate',startdate,'.RData')) + save(anom.hindcast.mean,file=paste0(workdir,'/Data/anom_hindcast_mean_startdate',startdate,'.RData')) + save(anom.rean,file=paste0(workdir,'/Data/anom_rean_startdate',startdate,'.RData')) + save(clim.rean,file=paste0(workdir,'/Data/clim_rean_startdate',startdate,'.RData')) + + rm(data.rean,anom.hindcast,anom.hindcast.mean,clim.rean,anom.rean) + gc() + +} + + + + + + + +################################################################################## +# Toy Model # +################################################################################## + +library(s2dverification) +library(SpecsVerification) +library(easyVerification) + +my.prob=c(1/3,2/3) # quantiles used for FairRPSS and the RelDiagr (usually they are the terciles) + +N=80 # number of years +M=14 # number of members + +for(M in c(2:10,20,51,100,200)){ + +for(N in c(5,10,15,20,25,30,40,50,100,200,500,1000,2000,5000,10000,20000,50000,100000)){ + +anom.rean<-rnorm(N) +anom.hind<-array(rnorm(N*M),c(N,M)) +#quantile(anom.hind,prob=my.prob,type=8) + +obs<-convert2prob(anom.rean,prob<-my.prob) +#mean(obs[,1]) # check if it is equal to 0.333 for K=3 + +ens<-convert2prob(anom.hind,prob<-my.prob) + +# climatological forecast: +anom.hind.clim<-array(sample(anom.hind,N*M),c(N,M)) # extract a random sample with no replacement of the hindcast +ens.clim<-convert2prob(anom.hind.clim,prob<-my.prob) +#mean(ens.clim[,1]) # check if it is equal to M/3 (1.333 for M=4) + +# alternative definition of climatological forecast, based on observations instead (as applied by veriApply via convert2prob when option fcst.ref is missing): +#anom.rean.clim<-ClimEns(anom.rean) # create a climatological ensemble from a vector of observations (anom.rean) with the leave-one-out cross validation +anom.rean.clim<-t(array(anom.rean,c(N,N))) # function used by convert2prob when ref is not specified; it is ~ equal to the above function ClimEns, it only has a column more +ens.clim.rean<-convert2prob(anom.rean.clim,prob<-c(1/3,2/3)) # create a new prob.table based on the climatological ensemble of observations + +p<-count2prob(ens) # we should add the option type=4, in not the sum for each year is not 100%!!! +y<-count2prob(obs) # we should add the option type=4, in not the sum for each year is not 100%!!! +ReliabilityDiagram(p[,1], y[,1], bins=10, nboot=0, plot=TRUE, cons.prob=c(0.05, 0.85),plot.refin=F) + +### computation of verification scores: + +Rps<-round(mean(EnsRps(ens,obs)),4) +FairRps<-round(mean(FairRps(ens,obs)),4) +#Rps.easy<-round(mean(veriApply("EnsRps", fcst=anom.hind, obs=anom.rean, prob=my.prob, tdim=1, ensdim=2)),4) # check once to see if it is the same value of Rps +#FairRps.easy<-round(mean(veriApply("FairRps", fcst=anom.hind, obs=anom.rean, prob=my.prob, tdim=1, ensdim=2)),4) # check once to see if it is the same value of FairRps + +Rps.clim<-round(mean(EnsRps(ens.clim,obs)),4) +FairRps.clim<-round(mean(FairRps(ens.clim,obs)),4) +FairRps.clim.rean<-round(mean(FairRps(ens.clim.rean,obs)),4) + +#FairRps.clim.stable<-0.44444444 +#Rps.clim.easy<-round(mean(veriApply("EnsRps", fcst=anom.hind.clim, obs=anom.rean, prob=my.prob, tdim=1, ensdim=2)),4) # check once to see if it is the same value of Rps.clim +#FairRps.clim.easy<-round(mean(veriApply("FairRps", fcst=anom.hind.clim, obs=anom.rean, prob=my.prob, tdim=1, ensdim=2)),4) # check once to see if it is = to FairRps.clim + +Rpss<-round(1-(Rps/Rps.clim),4) +#Rpss.specs<-round(EnsRpss(ens=ens, ens.ref=ens.clim, obs=obs)$rpss,4) # check it once to see if it is the same as Rpss (yes) +Rpss.easy<-veriApply("EnsRpss", fcst=anom.hind, fcst.ref=anom.hind.clim, obs=anom.rean, prob=my.prob, tdim=1, ensdim=2)$rpss # we provide the clim.forecast; = to Rpss.specs +system.time(Rpss.easy.noclim<-round(veriApply("EnsRpss", fcst=anom.hind, obs=anom.rean, prob=my.prob, tdim=1, ensdim=2, parallel=FALSE, ncpus=5)$rpss,4)) # using their climatological forecast +#cat(Rps.clim.noclim<-Rps/(1-Rpss.easy.noclim)) # sale igual a ~0.45 per ogni hindcast; é diverso sia da Rps.clim che da Rps.clim per N-> +oo (0.555) + +Rpss.easy<-veriApply("EnsRpss", fcst=anom.hind, fcst.ref=anom.rean.clim, obs=anom.rean, prob=my.prob, tdim=1, ensdim=2)$rpss # we provide the clim.forecast; = to Rpss.specs +Rpss.easy.noclim<-veriApply("EnsRpss", fcst=anom.hind, obs=anom.rean, prob=my.prob, tdim=1, ensdim=2)$rpss # we provide the clim.forecast; = to Rpss.specs + +FairRpss<-round(1-(FairRps/FairRps.clim),4) +#FairRpss.specs<-round(FairRpss(ens=ens,ens.ref=ens.clim,obs=obs)$rpss,4) # check it once to see if it is the same as FairRpss (yes) +#FairRpss.easy<-veriApply("FairRpss", fcst=anom.hind, fcst.ref=anom.hind.clim, obs=anom.rean, prob=c(1/3,2/3), tdim=1, ensdim=2)$rps +#FairRpss.easy.noclim<-round(veriApply("FairRpss", fcst=anom.hind, obs=anom.rean, prob=c(1/3,2/3), tdim=1, ensdim=2)$rpss,4) # using their climatological forecast +#cat(FairRps.clim.noclim<-Rps/(1-FairRpss.easy.noclim)) # sale igual a ~0.55; é diverso sia da FairRps.clim che da FairRps.clim per N-> +oo (0.444) + +cat(paste0('n.memb: ' ,M,' n.yrs: ',N,' : ',FairRps,' : ' ,FairRps.clim,' FairRpss: ',FairRpss,'\n')) + +#cat(paste0('n.memb: ' ,M,' n.yrs: ',N,' : ',FairRps,' : ' ,FairRps.clim,' FairRpss: ',FairRpss,'\n')) + +#cat(paste0('n.memb: ' ,M,' n.yrs: ',N,' : ',Rps,' : ' ,Rps.clim,' Rpss: ',Rpss,'\n','n.memb: ' ,M,' n.yrs: ',N,' : ',FairRps,' : ' ,FairRps.clim,' FairRpss: ',FairRpss,'\n')) + +#cat(paste0('n.memb: ' ,M,' n.yrs: ',N,' : ',Rps,' : ' ,Rps.clim,' Rpss: ',Rpss,' Rpss.easy: ',Rpss.easy.noclim,'\n','n.memb: ' ,M,' n.yrs: ',N,' #: ',FairRps,' : ' ,FairRps.clim,' FairRpss: ',FairRpss,' FairRpss.easy: ',FairRpss.easy.noclim)) + +} + +# Crpss: + +#Crps<-round(mean(EnsCrps(ens,obs)),4) +#FairCrps<-round(mean(FairCrps(ens,obs)),4) + +#cat(Crps.clim<-round(mean(EnsCrps(ens.clim,obs)),4)) +#FairCrps.clim<-round(mean(FairCrps(ens.clim,obs)),4) + +#Crpss<-round(1-(Crps/Crps.clim),4) +#Crpss.specs<-round(EnsCrpss(ens=ens, ens.ref=ens.clim, obs=obs)$rpss,4) + +#FairCrpss<-round(1-(FairCrps/FairCrps.clim),4) +#FairCrpss.specs<-round(FairCrpss(ens=ens,ens.ref=ens.clim,obs=obs)$rpss,4) + +} + + +################################################################################## +# Other analysis # +################################################################################## + +my.startdates=1 #c(1:9) # select the startdates (weeks) you want to merge together + +#tm<-toyarray(dims=c(32,64),N=20,nens=4) # in the number of startdates in case of forecasts or the number of years in case of hindcasts +#str(tm) # tm$fcst: [32,64,20,4] tm$obs: [32,64,20] +# +#f.corr <- veriApply("EnsCorr", fcst=tm$fcst, obs=tm$obs) +#f.me <- veriApply('EnsMe', tm$fcst, tm$obs) +#f.crps <- veriApply("FairCrps", fcst=tm$fcst, obs=tm$obs) +#str(f.crps) # [32,64,20] + +#cst <- array(rnorm(prod(4:8)), 4:8) +#obs <- array(rnorm(prod(4:7)), 4:7) +#f.me <- veriApply('EnsMe', fcst=fcst, obs=obs) +#dim(f.me) +#fcst2 <- array(aperm(fcst, c(5,1,4,2,3)), c(8, 4, 7, 5*6)) # very interesting use of aperm not only to swap dimensions, but also to merge two dimensions in the same one! +#obs2 <- array(aperm(obs, c(1,4,2,3)), c(4,7,5*6)) +#f.me2 <- veriApply('EnsMe', fcst=fcst2, obs=obs2, tdim=3, ensdim=1) +#dim(f.me2) + +# when you have for each leadtime all the 52 weeks of year 2014 stored in a map, +# you can plot the time serie for a particular point and leadtime: +pos.lat<-which(round(la,3)==round(my.lat,3)) +pos.lon<-which(round(lo,3)==round(my.lon,3)) + +x11() +plot(1:week,EnMeanCorr[,leadtime],type="b", + main=paste0("Weekly time serie of point with lat=",round(my.lat,2),", lon=",round(my.lon,2)), + xlab="week",ylab="Ensemble Mean Corr.",ylim=c(-1,1)) + + + +### check if 4 time steps are better than one: + +yrs.hind<-yr2.hind-yr1.hind+1 +my.point.obs<-array(NA,c(yrs.hind,28)) + +# load obs.data for the four time steps: +for(year in yr1.hind:yr2.hind){ + data_erai_6h <- Load(var=var.name, exp = 'ERAint6h', + obs = NULL, sdates=paste0(year,'01'), leadtimemin = 61, + leadtimemax = 88, output = 'lonlat', configfile = file_path, grid=my.grid,method='distance-weighted') + + my.point.obs[year-yr1.hind+1,]<-data_erai_6h$mod[1,1,1,,65,633] +} + +utm0<-c(1,5,9,13,17,21,25) +utm6<-utm0+1 +utm12<-utm0+2 +utm18<-utm0+3 +my.points.obs.weekly.mean.utm0<-rowMeans(my.point.obs[,utm0]) +my.points.obs.weekly.mean.utm6<-rowMeans(my.point.obs[,utm6]) +my.points.obs.weekly.mean.utm12<-rowMeans(my.point.obs[,utm12]) +my.points.obs.weekly.mean.utm18<-rowMeans(my.point.obs[,utm18]) +my.points.obs.weekly.mean.all<-rowMeans(my.point.obs) + +cor(my.points.exp.weekly.mean.all,my.points.obs.weekly.mean.all,use="complete.obs") + +my.points.obs.weekly.mean.utm<-c(my.points.obs.weekly.mean.utm0,my.points.obs.weekly.mean.utm6,my.points.obs.weekly.mean.utm12,my.points.obs.weekly.mean.utm18) +my.points.exp.weekly.mean.utm<-c(my.points.exp.weekly.mean.utm0,my.points.exp.weekly.mean.utm6,my.points.exp.weekly.mean.utm12,my.points.exp.weekly.mean.utm18) +cor(my.points.exp.weekly.mean.utm,my.points.obs.weekly.mean.utm,use="complete.obs") + + +##### Calculate Reanalysis climatology loading only 1 file each 4 (it's quick but it needs to have all files beforehand): + +ss<-c(seq(1,52,by=4),50,51,52) # take only 1 startdate each 4, more the last 3 startdates that must be computed individually because they are not a complete set of 4 + +for(startdate in ss){ # the startdate day of 2014 to load in the sdates weekly sequence along with the same startdates of the previous 20 years + data.ERAI <- Load(var=var.name, exp = 'ERAintEnsWeek', + obs = NULL, sdates=paste0(yr1.hind:yr2.hind,substr(sdates.seq[startdate],5,6),substr(sdates.seq[startdate],7,8)), + nleadtime = 4, leadtimemin = 1, leadtimemax = 4, output = 'lonlat', configfile = file_path, grid=my.grid, method='distance-weighted') + + clim.ERAI[startdate,,,]<-apply(data.ERAI$mod,c(1,2,4,5,6),mean)[1,1,,,] # average for each leadtime and pixel + + # the intermediate startdate between startdate week i and startdate week i+4 are repeated: + if(startdate <= 49){ + clim.ERAI[startdate+1,1,,]<- clim.ERAI[startdate,2,,] # startdate leadtime (week) + clim.ERAI[startdate+1,2,,]<- clim.ERAI[startdate,3,,] # (week) 1 2 3 4 + clim.ERAI[startdate+2,1,,]<- clim.ERAI[startdate,3,,] # 1 a b c d <- only startdate 1 and 5 are necessary to calculate all startdates between 1 and 5 + clim.ERAI[startdate+1,3,,]<- clim.ERAI[startdate,4,,] # 2 b c d e + clim.ERAI[startdate+2,2,,]<- clim.ERAI[startdate,4,,] # 3 c d e f + clim.ERAI[startdate+3,1,,]<- clim.ERAI[startdate,4,,] # 4 d e f g + } # 5 e f g h + + if(startdate > 1 && startdate <=49){ # fill the previous startdates: + clim.ERAI[startdate-1,4,,]<- clim.ERAI[startdate,3,,] + clim.ERAI[startdate-1,3,,]<- clim.ERAI[startdate,2,,] + clim.ERAI[startdate-2,4,,]<- clim.ERAI[startdate,2,,] + clim.ERAI[startdate-1,2,,]<- clim.ERAI[startdate,1,,] + clim.ERAI[startdate-2,3,,]<- clim.ERAI[startdate,1,,] + clim.ERAI[startdate-3,4,,]<- clim.ERAI[startdate,1,,] + } + +} + + + + + # You can make cor much faster by stripping away all the error checking code and calling the internal c function directly: + # r <- apply(m1, 1, function(x) { apply(m2, 1, function(y) { cor(x,y) })}) + # r2 <- apply(m1, 1, function(x) { apply(m2, 1, function(y) {.Internal(cor(x, y, 4L, FALSE)) })}) + C_cor<-get("C_cor", asNamespace("stats")) + test<-.Call(C_cor, x=rnorm(100,1), y=rnorm(100,1), na.method=2, FALSE) + my.EnsCorr.chunk[i,j,k]<-.Call(C_cor, x=anom.hindcast.mean.chunk[,i,j,k], y=nom.rean.chunk[,i,j,k], na.method=2, FALSE) + + + +####################################################################################### +# Raster Animations # +####################################################################################### + +library(png) +library(animation) + + +clustPNG <- function(pngobj, nclust = 3){ + + j <- pngobj + # If there is an alpha component remove it... + # might be better to just adjust the mat matrix + if(dim(j)[3] > 3){ + j <- j[,,1:3] + } + k <- apply(j, c(1,2), c) + mat <- matrix(unlist(k), ncol = 3, byrow = TRUE) + grd <- expand.grid(1:dim(j)[1], 1:dim(j)[2]) + clust <- kmeans(mat, nclust, iter.max = 20) + new <- clust$centers[clust$cluster,] + + for(i in 1:3){ + tmp <- matrix(new[,i], nrow = dim(j)[1]) + j[,,i] <- tmp + } + + plot.new() + rasterImage(j, 0, 0, 1, 1, interpolate = FALSE) +} + +makeAni <- function(file, n = 10){ + j <- readPNG(file) + for(i in 1:n){ + clustPNG(j, i) + mtext(paste("Number of clusters:", i)) + } + + j <- readPNG(file) + plot.new() + rasterImage(j, 0, 0, 1, 1, interpolate = FALSE) + mtext("True Picture") +} + +file <- "~/Dropbox/Photos/ClassyDogCropPng.png" +n <- 20 +saveGIF(makeAni(file, n), movie.name = "tmp.gif", interval = c(rep(1,n), 5)) + + + + +####################################################################################### +# ProgressBar # +####################################################################################### + +for(i in 1:10) { + Sys.sleep(0.2) + # Dirk says using cat() like this is naughty ;-) + #cat(i,"\r") + # So you can use message() like this, thanks to Sharpie's + # comment to use appendLF=FALSE. + message(i,"\r",appendLF=FALSE) + flush.console() +} + + +for(i in 1:10) { + Sys.sleep(0.2) + # Dirk says using cat() like this is naughty ;-) + cat(i,"\r") + + +} + + + +####################################################################################### +# Load large datasets with ff # +####################################################################################### + +library(ff) +my.scratch<-"/scratch/Earth/ncortesi/myBigData" +anom.hind<-as.ff(anom.hind, vmode="double", file=my.scratch) +ffsave(anom.hind,file=my.scratch) +close(anom.hind);rm(anom.hind) + +ffload(file=my.scratch, list=c(anom.hind)) +open(anom.hind) +my.SkillScore <- veriApplyBig("FairRpss", fcst=anom.hind, obs=anom.rean, tdim=2, ensdim=1, prob=c(1/3,1/3)) +close(anom.hind);rm(anom.hind) + + + +BigDataPath <- "/scratch/Earth/ncortesi/myBigData" +source('/home/Earth/ncortesi/Downloads/RESILIENCE/veriApplyBig.R') + +anom.hind.dim<-c(5,3,1,256,512) +anom.hind<-array(rnorm(prod(anom.hind.dim)),anom.hind.dim) # doesn't fit in memory +save.big(array=anom.hind, path=BigDataPath) + +veriApplyBig <- function(, obsmy.scratch){ + ffload(file=my.scratch) + open(anom.hind) + my.SkillScore <- veriApplyBig("FairRpss", fcst=anom.hind, obs=anom.rean, tdim=2, ensdim=1, prob=c(1/3,1/3)) + close(anom.hind) +} + + +anom.hind<-as.ff(anom.hind, vmode="double", file=my.scratch) +ffsave(anom.hind, file=my.scratch) + +my.scratch<-"/scratch/Earth/ncortesi/myBigData" + +anom.hind<-as.ff(anom.hind, vmode="double", file=my.scratch) +ffsave(anom.hind, file=my.scratch) +close(anom.hind); rm(anom.hind) + +ffload(file=my.scratch, list=c(anom.hind)) +open(anom.hind) +my.SkillScore <- veriApplyBig("FairRpss", fcst=anom.hind, obs=anom.rean, tdim=2, ensdim=1, prob=c(1/3,1/3)) +close(anom.hind); rm(anom.hind) + +anom.hind.dim<-c(51,30,1,256,51) +anom.hind<-ff(rnorm(prod(anom.hind.dim)), dim=anom.hind.dim, vmode="double", filename="/scratch/Earth/ncortesi/anomhind") +str(anom.hind) +dim(anom.hind) + +anom.hind.dim<-c(51,30,1,256,51) +anom.hind<-array(rnorm(prod(anom.hind.dim)),anom.hind.dim) # doesn't fit in memory +anom.hind<-as.ff(anom.hind, vmode="double", file="/scratch/Earth/ncortesi/myBigData") +str(anom.hind) +dim(anom.hind) + +ffsave(anom.hind,file="/scratch/Earth/ncortesi/anomhind") +#ffinfo(file="/scratch/Earth/ncortesi/anomhind") +close(anom.hind) +#delete(anom.hind) +#rm(anom.hind) + +ffload(file="/scratch/Earth/ncortesi/anomhind") + +anom.rean<-array(rnorm(prod(anom.hind.dim[-1])), anom.hind.dim[-1]) # doesn't fit in memory + +open(anom.hind) +my.SkillScore <- veriApplyBig("FairRpss", fcst=anom.hind, obs=anom.rean, tdim=2, ensdim=1, prob=c(1/3,1/3)) +fcst<-anom.hind +obs<-anom.rean + +close(anom.hind) +rm(anom.hind) + +#ffdrop(file="/scratch/Earth/ncortesi/anomhind") + +a<-ffapply(X=anom.hind.big, MARGIN=c(1,3,4,5), AFUN=mean, RETURN=TRUE) +a<-ffapply(X=anom.hind.big, MARGIN=c(1,3,4,5), AFUN=function(x) sample(x, replace=TRUE), CFUN="list", RETURN=TRUE) + + +pred_Cal_cross_ff <- as.ff(pred_Cal_cross, vmode='double', file=fileoutput_cross) +ffsave(pred_Cal_cross_ff, file=paste0(fileoutput_cross)) +delete(pred_Cal_cross_ff) + +assign("pred_Cal_cross_ff", as.ff(pred_Cal_cross, vmode='double', file=fileoutput_cross)) +ffsave(pred_Cal_cross_ff, file=paste0(fileoutput_cross)) +delete(pred_Cal_cross_ff) +rm(pred_Cal_cross_ff) + +assign("pred_Cal_cross_ff", as.ff(pred_Cal_cross, vmode='double', file=fileoutput_cross)) +a<-get("pred_Cal_cross_ff") # get() works well in this case but not in the row below! (you must use do.call, see WT_drivers_v2.R) +ffsave(get("pred_Cal_cross_ff"), file=paste0(fileoutput_cross)) +delete(pred_Cal_cross_ff) +rm(pred_Cal_cross_ff) + + + +######################################################################################### +# Calculate and save the FairRpss and/or the FairCrpss for a chosen period using ff # +######################################################################################### + +bigdata=FALSE # if TRUE, compute the Rpss and the Crpss using the package ff (storing arrays in the disk instead than in the RAM) to remove the memory limit + +for(month in veri.month){ + month=1 # for debug + my.startdates <- which(as.integer(substr(sdates.seq,5,6)) == month) #startdates.monthly[[month]] #c(1:5) # select the startdates (weeks) you want to compute + startdate.name=my.month[month] # name given to the period of the selected startdates, i.e:"January" + n.yrs <- length(my.startdates)*n.yrs.hind # number of total years for the selected month + print(paste0("Computing Skill Scores for ",startdate.name)) + + if(!boot) anom.rean.big<-array(NA, dim=c(n.yrs, n.leadtimes, n.lat, n.lon)) # reanalysis data is not converted to ff because it is a small array + + if(boot) mod<-1 else mod=0 # in case of the boostrap, anom.hind.big includes also the reanalysis data as additional last member + + if(bigdata) anom.hind.big <- ff(NA, dim=c(n.members + mod, n.yrs, n.leadtimes, n.lat, n.lon), vmode="double", filename=paste0(workdir,"/anomhindbig.ff")) + if(!bigdata) anom.hind.big <- array(NA, dim=c(n.members + mod, n.yrs, n.leadtimes, n.lat, n.lon)) + + for(startdate in my.startdates){ + + pos.startdate<-which(startdate==my.startdates) # count of the number of stardates already loaded +1 + my.time.interv<-(1+(pos.startdate-1)*n.yrs.hind):(pos.startdate*n.yrs.hind) # time interval where to load data + + load(paste0(workdir,'/Data_',var.name,'/anom_rean_startdate',startdate,'.RData')) + if(!boot) { + anom.rean<-drop(anom.rean) + anom.rean.big[my.time.interv,,,] <- anom.rean + } + + if(any(my.score.name=="FairRpss") || any(my.score.name=="FairCrpss")){ + load(paste0(workdir,'/Data_',var.name,'/anom_hindcast_startdate',startdate,'.RData')) # load hindcast data + anom.hindcast<-drop(anom.hindcast) + + if(!boot) anom.hind.big[,my.time.interv,,,] <- anom.hindcast + if(boot) anom.hind.big[,my.time.interv,,,] <- abind(anom.hindcast, anom.rean, along=1) + + rm(anom.hindcast, anom.rean) + } + gc() + } + + if(any(my.score.name=="FairRpss" || my.score.name=="FairCrpss")){ + if(!boot) { + if(any(my.score.name=="FairRpss")) my.FairRpss <- veriApplyBig("FairRpss", fcst=anom.hind.big, obs=anom.rean.big, + prob=my.prob, tdim=2, ensdim=1, parallel=TRUE, ncpus=n.cpus) + if(any(my.score.name=="FairCrpss")) my.FairCrpss <- veriApplyBig("FairCrpss", fcst=anom.hind.big, obs=anom.rean.big, tdim=2, ensdim=1, parallel=TRUE, ncpus=n.cpus) + + } else { + # bootstrapping: + my.FairRpssBoot<-my.FairCrpssBoot<-array(NA, c(n.boot, n.leadtimes, n.lat, n.lon)) + if(!bigdata) save(anom.hind.big, file=paste0(workdir,"/anom_hind_big.RData")) # save the anomalies to free memory + + for(b in 1:n.boot){ + print(paste0("resampling n. ",b,"/",n.boot)) + yrs.sampled <- sample(1:n.yrs, replace=TRUE) + + if(bigdata) anom.hind.big.sampled <- ff(NA, dim=c(n.members+1, n.yrs, n.leadtimes, n.lat, n.lon), vmode="double", filename=paste0(workdir,"/anomhindbigsampled")) + if(!bigdata) anom.hind.big.sampled <- array(NA, dim=c(n.members+1, n.yrs, n.leadtimes, n.lat, n.lon)) + + if(!bigdata && b>1) load(paste0(workdir,"/anom_hind_big.RData")) # need to load the hindcasts if b>1 + + for(y in 1:n.yrs) anom.hind.big.sampled[,y,,,] <- anom.hind.big[,yrs.sampled[y],,,] # with ff takes 53s x 100 ~1h!!! (without, only 4s x 100 ~6m) + + if(!bigdata) rm(anom.hind.big); gc() # free memory for the following commands + + # faster way that will be able to replace the above one when ffapply output parameters wll be improved: + #a<-ffapply(X=anom.hind.big, MARGIN=c(3,4,5), AFUN=function(x) my.sample(x,n.members+1, replace=TRUE), CFUN="list", RETURN=TRUE, FF_RETURN=TRUE) # takes 3m only + #aa<-abind(a[1:10], along=4) # it doesn't fit into memory! + + if(any(my.score.name=="FairRpss")) my.FairRpssBoot[b,,,] <- veriApplyBig("FairRpss", fcst=anom.hind.big.sampled[1:n.members,,,,], + obs=anom.hind.big.sampled[n.members+1,,,,], + prob=my.prob, tdim=2, ensdim=1, parallel=TRUE, ncpus=n.cpus) #$rpss + + if(any(my.score.name=="FairCrpss")) my.FairCrpssBoot[b,,,] <- veriApplyBig("FairCrpss", fcst=anom.hind.big.sampled[1:n.members,,,,], + obs=anom.hind.big.sampled[n.members+1,,,,], + tdim=2, ensdim=1, parallel=TRUE, ncpus=n.cpus) #$crpss + + if(bigdata) delete(anom.hind.big.sampled) + rm(anom.hind.big.sampled) # delete the sampled hindcast + + } + + # calculate 5 and 95 percentiles of skill scores: + if(any(my.score.name=="FairRpss")) my.FairRpssConf <- apply(my.FairRpssBoot, c(2,3,4), function(x) quantile(x, probs=c(0.05,0.95), type=8)) + if(any(my.score.name=="FairCrpss")) my.FairCrpssConf <- quantile(my.FairCrpssBoot, probs=c(0.05,0.95), type=8) + + } # close if on boot + + } + + if(bigdata) delete(anom.hind.big) # delete the file with the hindcasts + rm(anom.hind.big) + rm(anom.rean.big) + gc() + + if(!boot){ + if(any(my.score.name=="FairRpss")) save(my.FairRpss, file=paste0(workdir,'/Data_',var.name,'/FairRpss_',startdate.name,'.RData')) + if(any(my.score.name=="FairCrpss")) save(my.FairCrpss, file=paste0(workdir,'/Data_',var.name,'/FairCrpss_',startdate.name,'.RData')) + } else { + if(any(my.score.name=="FairRpss")) save(my.FairRpssBoot, my.FairRpssConf, file=paste0(workdir,'/Data_',var.name,'/FairRpssBoot_',startdate.name,'.RData')) + if(any(my.score.name=="FairCrpss")) save(my.FairCrpssBoot, my.FairCrpssConf, file=paste0(workdir,'/Data_',var.name,'/FairCrpssBoot_',startdate.name,'.RData')) + } + + + if(is.object(my.FairRpss)) rm(my.FairRpss); if(is.object(my.FairCrpss)) rm(my.FairCrpss) + if(is.object(my.FairRpssBoot)) rm(my.FairRpssBoot); if(is.object(my.FairCrpssBoot)) rm(my.FairCrpssBoot) + if(is.object(my.FairRpssConf)) rm(my.FairRpssConf); if(is.object(my.FairCrpssConf)) rm(my.FairCrpssConf) + +} # next m (month) + + +######################################################################################### +# Calculate and save the FairRpss for a chosen period using ff and Load() # +######################################################################################### + + anom.rean.big<-array(NA, dim=c(length(my.startdates)*n.yrs.hind, n.leadtimes, n.lat, n.lon)) + anom.hind.big <- ff(NA, dim=c(n.members,length(my.startdates)*n.yrs.hind, n.leadtimes, n.lat, n.lon), vmode="double", filename="/scratch/Earth/anomhindbig") + + for(startdate in my.startdates){ + pos.startdate<-which(startdate==my.startdates) # count of the number of stardates already loaded+1 + my.time.interv<-(1+(pos.startdate-1)*n.yrs.hind):(pos.startdate*n.yrs.hind) # time interval where to load data + + #load(paste0(workdir,'/Data_',var.name,'/anom_rean_startdate',startdate,'.RData')) + #anom.rean<-drop(anom.rean) + #anom.rean.big[my.time.interv,,,] <- anom.rean + + my.date <- paste0(yr1.hind:yr2.hind,substr(sdates.seq[startdate],5,6),substr(sdates.seq[startdate],7,8)) + anom.rean <- Load(var=var.name, exp = 'EnsEcmwfWeekHind', + obs = NULL, sdates=my.date, nleadtime = n.leadtimes, leadtimemin = 1, leadtimemax = n.leadtimes, + output = 'lonlat', configfile = file_path, method='distance-weighted' ) + + #load(paste0(workdir,'/Data_',var.name,'/anom_hindcast_startdate',startdate,'.RData')) # load hindcast data + #anom.hindcast<-drop(anom.hindcast) + + anom.hind.big[,my.time.interv,,,] <- anom.hindcast + rm(anom.hindcast) + } + + my.FairRpss <- veriApplyBig("FairRpss", fcst=anom.hind.big, obs=anom.rean.big, prob=my.prob, tdim=2, ensdim=1, parallel=TRUE, ncpus=n.cpus) + + save(my.FairRpss, file=paste0(workdir,'/Data_',var.name,'/FairRpss_',startdate.name,'.RData')) + + +######################################################################################### +# Alternative way to speed up Load() # +######################################################################################### + +ERAint <- list(path = '/esnas/reconstructions/ecmwf/eraint/$STORE_FREQ$_mean/$VAR_NAME$_f6h/$VAR_NAME$_$YEAR$$MONTH$.nc') + var.rean=ERAint # daily reanalysis dataset used for the var data; it can have a different resolution of the MSLP dataset + var.name='sfcWind' #'tas' #'prlr' # any daily variable we want to find if it is WTs-driven + var <- Load(var = var.name, exp = NULL, obs = list(var.rean), paste0(y,'0101'), storefreq = 'daily', leadtimemax = 1, output = 'lonlat') + + # Forecast system list: + S4 <- list(path = ...) + CFSv2 <- list(path = ...) + + # Reanalysis list: + ERAint <- list(path = '/esnas/reconstructions/ecmwf/eraint/$STORE_FREQ$_mean/$VAR_NAME$_f6h/$VAR_NAME$_$YEAR$$MONTH$.nc') + JRA55 <- list(path = ...) + + + + + # Select one forecast system and one reanalysis (put NULL if you don't need to load any experiment/observation): + exp.source <- NULL + obs.source <- list(path = '/esnas/reconstructions/ecmwf/eraint/$STORE_FREQ$_mean/$VAR_NAME$_f6h/$VAR_NAME$_$YEAR$$MONTH$.nc') + + # Select one variable and its frequency: + var.name <- 'sfcWind' + store.freq <- 'daily' + + # Extract only the directory path of the forecast and reanalysis: + obs.source2 <- gsub("\\$STORE_FREQ\\$", store.freq, obs.source$path) + obs.source3 <- gsub("\\$VAR_NAME\\$", var.name, obs.source2) + obs.source4 <- strsplit(obs.source3,'/') + obs.source5 <- paste0(obs.source4[[1]][-length(obs.source4[[1]])], collapse="/") + + obs.source6 <- list.files(path = obs.source5) + + system(paste0("ncks -O -h -d longitude,5,6 ", '/esnas/reconstructions/ecmwf/eraint/',STORE_FREQ,'_mean/',VAR_NAME,'_f6h')) + system("ncks -O -h -d longitude,5,6 sfcWind_201405.nc ~/test.nc") + + y <- 1990 + var <- Load(var = VAR_NAME, exp = list(exp.source), obs = list(obs.source), sdates = paste0(y,'0101'), storefreq = store.freq, leadtimemax = 1, output = 'lonlat') + + +} # close if on mare + + diff --git a/old/SkillScores_v7.R~ b/old/SkillScores_v7.R~ new file mode 100644 index 0000000000000000000000000000000000000000..6861626568384da87e42c06a3c65f9dd852c4ae2 --- /dev/null +++ b/old/SkillScores_v7.R~ @@ -0,0 +1,1299 @@ +######################################################################################### +# Sub-seasonal Skill Scores # +######################################################################################### +# you can run it from the terminal with the syntax: +# +# Rscript SkillScores_v7.R +# +# i.e: to run only the chunk number 3, write: +# +# Rscript SkillScores_v4.R 3 +# +# if you don't specify any argument, or you run it from the R interface, it runs all the chunks in a sequential way. +# Use this last approach only if the computational speed is not a problem. + +########################################################################################## +# Load functions and libraries # +########################################################################################## + +#load libraries and functions: +library(SpecsVerification) # for skill scores +library(s2dverification) # for Load() function +library(easyVerification) # for veriApply() function +library(abind) # for merging arrays +source('/home/Earth/ncortesi/Downloads/scripts/Rfunctions.R') + +########################################################################################## +# MareNostrum settings # +########################################################################################## + +args <- commandArgs(TRUE) # put into variable args the list with all the optional arguments with whom the script was run from the terminal + +if(length(args) == 0) print("Running the script in a sequential way") +if(length(args) > 1) stop("Only one argument is required") + +mare <- ifelse(length(args) == 1 ,TRUE, FALSE) # set variable 'mare' to TRUE if we are running the script in MareNostrum, FALSE otherwise + +if(mare) chunk <- as.integer(args[1]) # number of the chunk to run in this script (if mare == TRUE) + +########################################################################################## +# User's settings # +########################################################################################## +#if(!mare) {source('/home/Earth/ncortesi/Downloads/scripts/Rfunctions.R')} else {source('/gpfs/projects/bsc32/bsc32842/scripts/Rfunctions.R')} + +# working dir where to put the output maps and files in the /Data and /Maps subdirs: +workdir <- ifelse(!mare, "/scratch/Earth/ncortesi/RESILIENCE", "/gpfs/projects/bsc32/bsc32842/results") + +rean.name <- "ERA-Interim" + +cfs.name <- 'ECMWF' # name of the climate forecast system (CFS) used for monthly predictions (to be used in map titles) +var.name <- 'sfcWind' # forecast variable. It is the name used inside the netCDF files and as suffix in map filenames, i.e: 'sfcWind' or 'psl' +var.name.map <- '10m Wind Speed' # forecast variable to verify (name used in the map titles), i.e: '10m Wind Speed' or 'SLP' + +forecast.year <- 2014 # starting year of the weekly sequence of the forecasts +mes <- 1 # starting forecast month (1: january, 2: february, etc.) +day <- 2 # starting forecast day + +# generic path of the forecast system files: +ECMWF_monthly <- list(path = paste0('/esnas/exp/ECMWF/monthly/ensforhc/weekly_mean/$VAR_NAME$_f6h/',forecast.year,'$MONTH$$DAY$00/$VAR_NAME$_$START_DATE$00.nc')) + +yr1.hind <- 1994 #1994 # first hindcast year +yr2.hind <- 2013 #2013 # last hindcast year (usually the forecast year -1) + +leadtime.week <- c('5-11','12-18','19-25','26-32') # which leadtimes are available in the weekly dataset +n.members <- 4 # number of hindcast members + +my.score.name <- c('FairRpss','FairCrpss') #c('EnsCorr','FairRpss','FairCrpss','RelDiagr') # choose one or more skills scores to compute between EnsCorr, FairRPSS, FairCRPSS and/or RelDiagr + +veri.month <- 1:12 # select the month(s) you want to compute the chosen skill scores + +my.pvalue <- 0.05 # in case of computing the EnsCorr, set the p_value level to show in the ensemble mean correlation maps +my.prob <- c(1/3,2/3) # quantiles used for FairRPSS and the RelDiagr (usually they are the terciles) +conf.level <- c(0.05, 0.95)# percentiles employed for the calculation of the confidence level for the Rpss and Crpss (can be more than 2 if needed) +int.lat <- NULL #1:160 # latitude position of grid points selected for the Reliability Diagram (if you want to subset a region; if not, put NULL) +int.lon <- NULL #1:320 # longitude position of grid points selected for the Reliability Diagram (if you want to subset a region; if not, put NULL) + +boot <- TRUE # in case of computing the FairRpss and/or the FairCrpss, you can choose to do a bootstrap to evaluate the uncertainty of the skill scores +n.boot <- 20 # number of resamples considered in the bootstrapping + +# coordinates of a chosen point in the world to plot the time series over the 52 maps (they must be the same values in objects la y lo) +my.lat <- 55.3197120 +my.lon <- 0.5625 + +###################################### Derived variables ############################################################################# + +#source(paste0(workdir,'/ColorBarV.R')) # Color scale used for maps +n.categ <- 1+length(my.prob) # number of forecasting categories (usually 3 if terciles are used) + +# blue-yellow-red colors: +#col <- ifelse(!mare, as.character(read.csv("/scratch/Earth/ncortesi/RESILIENCE/rgbhex.csv",header=F)[,1]), as.character(read.csv("/gpfs/projects/bsc32//bsc32842/scripts/rgbhex.csv",header=F)[,1])) +#col<-as.character(read.csv("/home/Earth/vtorralb/rgbhex.csv",header=F)[,1]) # blue-yellow-red colors +col <- c("#0C046E", "#1914BE", "#2341F7", "#2F55FB", "#3E64FF", "#528CFF", "#64AAFF", "#82C8FF", "#A0DCFF", "#B4F0FF", "#FFFBAF", "#FFDD9A", "#FFBF87", "#FFA173", "#FF7055", "#FE6346", "#F7403B", "#E92D36", "#C80F1E", "#A50519") + +sdates.seq <- weekly.seq(forecast.year,mes,day) # load the sequence of dates corresponding to all the thursday of the year +n.leadtimes <- length(leadtime.week) +n.yrs.hind <- yr2.hind-yr1.hind+1 +my.month.short <- substr(my.month,1,3) + +## extract geographic coordinates; do only ONCE for each preducton system and then save the lat/lon in a coordinates.RData and comment these rows to use function load() below: +#data.hindcast <- Load(var='sfcWind', exp = 'EnsEcmwfWeekHind', +# obs = NULL, sdates=paste0(yr1.hind:yr2.hind,substr(sdates.seq[startdate],5,6),substr(sdates.seq[startdate],7,8)), +# nleadtime = 1, leadtimemin = 1, leadtimemax = 1, output = 'lonlat', configfile = file_path, method='distance-weighted' ) + +#lats<-data.hindcast$lat +#lons<-data.hindcast$lon +#save(lats,lons,file=paste0(workdir,'/coordinates.RData')) +#rm(data.hindcast);gc() + +# format geographic coordinates to use with PlotEquiMap: +#load(paste0(workdir,'/coordinates.RData')) +#n.lon <- length(lons) +#n.lat <- length(lats) +#n.lonr <- n.lon - ceiling(length(lons[lons<180 & lons > 0])) +#la<-rev(lats) +#lo<-c(lons[c((n.lonr+1):n.lon)]-360,lons[1:n.lonr]) + + # load only 1 year of var data from reanalysis to get lat and lon: +data.rean <- Load(var = var.name, exp = 'ERAintEnsWeek', + obs = NULL, sdates=paste0(yr1.hind,substr(sdates.seq[startdate],5,6),substr(sdates.seq[startdate],7,8)), + nleadtime = 1, leadtimemin = 1, leadtimemax = 1, output = 'lonlat', configfile = file_path) #, grid=my.grid, method='bilinear') + +lons <- data.rean$lon +lats <- data.rean$lat +n.lon <- length(lons) +n.lat <- length(lats) +n.lonr <- n.lon - ceiling(length(lons[lons<180 & lons > 0])) +if(mare) n.lat == 1 + +my.grid<-paste0('r',n.lon,'x',n.lat) + + + +############################################################################################### +# Preformatting: calculate weekly climatologies and anomalies for each hindcast startdate # +############################################################################################### + +# Run it only once at the beginning to generate the .RData files: +# Note that forecast data is interpolated to the same resolution of ERA-Interim + +file_path='/home/Earth/ngonzal2/R/ConfFiles/IC3.conf' # Load() file path +#file_path <- '/home/Earth/ncortesi/Downloads/RESILIENCE/IC3.conf' + +my.startdates=1:52 # choose a sequence of startdates to save their anomalies + +for(startdate in my.startdates){ # the startdate week to load inside the sdates weekly sequence: + print(paste0('Computing reanalysis anomalies for startdate ', which(startdate==my.startdates),'/',length(my.startdates))) + + # load weekly var rean data in: /esnas/reconstructions/ecmwf/eraint/weekly_mean + data.rean <- Load(var = var.name, exp = 'ERAintEnsWeek', + obs = NULL, sdates=paste0(yr1.hind:yr2.hind,substr(sdates.seq[startdate],5,6),substr(sdates.seq[startdate],7,8)), + nleadtime = n.leadtimes, leadtimemin = 1, leadtimemax = n.leadtimes, output = 'lonlat', configfile = file_path) #, grid=my.grid, method='bilinear') + + # dim(data.rean$mod) # [1,1,20,4,320,640] + + # Mean of the 20 years (for each point and leadtime) + clim.rean<-apply(data.rean$mod,c(1,2,4,5,6),mean)[1,1,,,] # average for each leadtime and pixel + clim.rean<-InsertDim(InsertDim(InsertDim(clim.rean,1,n.yrs.hind),1,1),1,1) # repeat the same values times to be able to calculate the anomalies #[1,1,20,4,320,640] + anom.rean <- data.rean$mod - clim.rean + anom.rean<-InsertDim(anom.rean[1,1,,,,],1,1) # [1,20,4,320,640] + + save(anom.rean,file=paste0(workdir,'/Data_',var.name,'_',rean.name,'/anom_rean_startdate',startdate,'.RData')) + save(clim.rean,file=paste0(workdir,'/Data_',var.name,'_',rean.name,'/clim_rean_startdate',startdate,'.RData')) + + rm(data.rean,clim.rean) + gc() + +} + + +for(startdate in my.startdates){ # the startdate week to load inside the sdates weekly sequence: + print(paste0('Computing forecast anomalies for startdate ', which(startdate==my.startdates),'/',length(my.startdates))) + + # Load weekly var subseasonal data in: /esnas/exp/ECMWF/monthly/ensforhc/weekly_mean and interpolate them to the same ERA-Interim resolution (512x 256) + data.hindcast <- Load(var = var.name, exp = 'EnsEcmwfWeekHind', + obs = NULL, sdates = paste0(yr1.hind:yr2.hind, substr(sdates.seq[startdate],5,6), substr(sdates.seq[startdate],7,8)), + nleadtime = n.leadtimes, leadtimemin = 1, leadtimemax = n.leadtimes, output = 'lonlat', configfile = file_path, grid=my.grid, method='bilinear') + + #data.hindcast <- Load(var = var.name, exp = 'list(list(path=fields))', obs = NULL, sdates = paste0(yr1.hind:yr2.hind, substr(sdates.seq[startdate],5,6), substr(sdates.seq[startdate],7,8)), nleadtime = n.leadtimes, leadtimemin = 1, leadtimemax = n.leadtimes, storefreq='daily', output = 'lonlat', grid=my.grid, method='bilinear' ) + + #Load(var = psl, exp = list(list(path=fields)), obs = NULL, sdates=paste0(years, start.month.char,'01'), dimnames=list(member=member.name), nmember=n.members, leadtimemax=n.leadtimes, storefreq='daily', output = 'lonlat', latmin = lat.min, latmax = lat.max, lonmin = lon.min, lonmax = lon.max, grid=my.grid, method='bilinear')$mod + + # dim(data.hindcast$mod) # [1,4,20,4,320,640] + + # Mean of the 4 members of the Hindcasts and of the 20 years (for each point and leadtime); + clim.hindcast<-apply(data.hindcast$mod,c(1,4,5,6),mean)[1,,,] # average for each leadtime and pixel + clim.hindcast<-InsertDim(InsertDim(InsertDim(clim.hindcast,1,n.yrs.hind),1,n.members),1,1) # repeat the same values and times to calc. anomalies + + anom.hindcast <- data.hindcast$mod - clim.hindcast + + rm(data.hindcast,clim.hindcast) + gc() + + # average the anomalies over the 4 hindcast members to compute the ensemble mean [1,20,4,320,640] + anom.hindcast.mean<-apply(anom.hindcast,c(1,3,4,5,6),mean) + + #save(anom.hindcast.mean,anom.rean,file=paste0(workdir,'/Data/anom_for_ACC_startdate',startdate,'.RData')) + save(anom.hindcast,file=paste0(workdir,'/Data_',var.name,'_',cfs.name,'/anom_hindcast_startdate',startdate,'.RData')) + save(anom.hindcast.mean,file=paste0(workdir,'/Data_',var.name,'_',cfs.name,'/anom_hindcast_mean_startdate',startdate,'.RData')) + + rm(data.rean,anom.hindcast,anom.hindcast.mean,clim.rean,anom.rean) + gc() + +} + + +######################################################################################### +# Calculate and save up to all the chosen Skill Scores for a selected period # +######################################################################################### +# +# Remember that before computing the skill scores, you have to create and save the anomalies running once the preformatting part above +# + +for(month in veri.month){ + #month=1 # for the debug + + my.startdates <- which(as.integer(substr(sdates.seq,5,6)) == month) #startdates.monthly[[month]] #c(1:5) # select the startdates (weeks) you want to compute + startdate.name <- my.month[month] # name given to the period of the selected startdates # i.e:"January" + n.startdates <- length(my.startdates) # number of startdates in the selected month + n.yrs <- length(my.startdates)*n.yrs.hind # number of total years for the selected month + + print(paste0("Computing Skill Scores for ",startdate.name)) + + # Merge all selected startdates: + hind.dim <- c(n.members, n.yrs.hind*n.startdates, n.leadtimes, n.lat, n.lon) # dimensions of the hindcast array + + my.FairRpss <- my.FairCrpss <- my.EnsCorr <- my.PValue <- array(NA,c(n.leadtimes, n.lat,n.lon)) + if(boot) my.FairRpssBoot <- my.FairCrpssBoot <- array(NA, c(n.boot, n.leadtimes, n.lat, n.lon)) + if(boot) my.FairRpssConf <- my.FairCrpssConf <- array(NA, c(length(conf.level), n.leadtimes, n.lat, n.lon)) + + cat('Chunk n. ') + + # if we run the script from the terminal with an argument, it only computes the chunk we specify in the second argument and save the results for that chunk. + # If not, it loops over all chunks in a serial way and save the results at the end. + if(!mare) {my.chunks <- 1:n.lat} else {my.chunks <- chunk} + + for(cnk in my.chunks){ # EnsCorr, FairRpss and FairCrpss calculation: + #s=1 # for the debug + cat(paste0(cnk,'/', n.lat,' ')) + anom.hindcast.chunk <- anom.hindcast.chunk.sampled <- array(NA, c(n.members, n.startdates*n.yrs.hind, n.leadtimes, cnk, n.lon)) + anom.rean.chunk <- anom.hindcast.mean.chunk <- anom.rean.chunk.sampled <- array(NA, c(n.startdates*n.yrs.hind, n.leadtimes, cnk, n.lon)) + #my.interv<-(1 + sub.size*(s-1)):((sub.size*s) + add.last) # longitude interval where to load data + + # not working on MareNostrum still: + # load 1 file ONLY ONCE to retrieve the lat and lon values, then save them in an .RData file to retrieve them when needed: + #coord <- Load(var = var.name, exp = list(ECMWF_monthly), obs = NULL, sdates=paste0(2013,'0102'), leadtimemin = 1, leadtimemax=1, output = 'lonlat', nprocs=1) + # lat <- coord$lat + # lon <- coord$lon + # save() + # load() + #system.time(a<-Load(var = 'sfcWind', exp = list(ECMWF_monthly), obs = NULL, sdates=paste0(2010:2013,'0102'), leadtimemin = 1, leadtimemax=3, output = 'lonlat', nprocs=1, latmin = lat[5], latmax = lat[5])) + + for(startdate in my.startdates){ + pos.startdate<-which(startdate == my.startdates) # count of the number of stardates already loaded+1 + my.time.interv<-(1+(pos.startdate-1)*n.yrs.hind):(pos.startdate*n.yrs.hind) # time interval where to load data + + # Load reanalysis data: + load(paste0(workdir,'/Data_',var.name,'/anom_rean_startdate',startdate,'.RData')) + anom.rean <- drop(anom.rean) + anom.rean.chunk[my.time.interv,,,] <- anom.rean[,,c,] + + # Load hindcast data: + if(any(my.score.name=="FairRpss") || any(my.score.name=="FairCrpss" || any(my.score.name=="RelDiagr"))){ + load(paste0(workdir,'/Data_',var.name,'/anom_hindcast_startdate',startdate,'.RData')) + anom.hindcast <- drop(anom.hindcast) + anom.hindcast.chunk[,my.time.interv,,,] <- anom.hindcast[,,,c,] + rm(anom.hindcast, anom.rean) + gc() + } + + if(any(my.score.name=="EnsCorr")){ + load(paste0(workdir,'/Data_',var.name,'/anom_hindcast_mean_startdate',startdate,'.RData')) # load ensemble mean hindcast data + anom.hindcast.mean <- drop(anom.hindcast.mean) + anom.hindcast.mean.chunk[my.time.interv,,,] <- anom.hindcast.mean[,,c,] + rm(anom.hindcast.mean) + gc() + } + + } + + if(any(my.score.name=="FairRpss")){ + if(!boot){ + #anom.hindcast.chunk.double <- abind(list(anom.hindcast.chunk,anom.hindcast.chunk),along=4) + #anom.rean.chunk.double <- abind(list(anom.rean.chunk,anom.rean.chunk),along=3) + + + #my.FairRpss.sub <- veriApply("FairRpss", fcst=anom.hindcast.sub, obs=anom.rean.sub, prob=my.prob, tdim=2, ensdim=1, parallel=FALSE, ncpus=n.cpus) + my.FairRps.chunk <- veriApply("FairRps", fcst=anom.hindcast.chunk, obs=anom.rean.chunk, prob=my.prob, tdim=2, ensdim=1, parallel=FALSE, ncpus=n.cpus) + #my.FairRps.chunk.double <- veriApply("FairRps", fcst=anom.hindcast.chunk.double, obs=anom.rean.chunk.double, prob=my.prob, tdim=2, ensdim=1, parallel=FALSE, ncpus=n.cpus) + + # the prob.for the climatological ensemble can be set without using convert2prob and they are the same for all points and leadtimes (33% for each tercile): + ens <- array(33,c(n.startdates*n.yrs.hind,3)) + + # the observed probabilities vary from point to point and for each leadtime: + obs <- apply(anom.rean.chunk, c(2,3,4), function(x){convert2prob(x, prob <- my.prob)}) + + # reshape obs to be able to apply EnsRps(ens, obs) below: + obs2 <- InsertDim(obs,1,3) + obs2[2,1:(n.startdates*n.yrs.hind),,,] <- obs2[1,(n.startdates*n.yrs.hind + 1:(n.startdates*n.yrs.hind)),,,] + obs2[3,1:(n.startdates*n.yrs.hind),,,] <- obs2[1,(2*n.startdates*n.yrs.hind + 1:(n.startdates*n.yrs.hind)),,,] + obs3 <- obs2[,1:(n.startdates*n.yrs.hind),,,,drop=F] # drop=F is used not to loose the dimension(s) with only 1 element + + my.Rps.clim.chunk <- apply(obs3,c(3,4,5), function(x){ EnsRps(ens,t(x)) }) + + my.FairRps.chunk.sum <- apply(my.FairRps.chunk, c(2,3,4), sum) + my.Rps.clim.chunk.sum <- apply(my.Rps.clim.chunk, c(2,3,4), sum) + + my.FairRpss.chunk <- 1-(my.FairRps.chunk.sum/my.Rps.clim.chunk.sum) + + my.FairRpss[,c,] <- my.FairRpss.chunk + + rm(my.FairRpss.chunk, ens, obs, obs2, obs3, my.FairRps.chunk.sum, my.Rps.clim.chunk.sum) + gc() + + } else { # bootstrapping: + + for(b in 1:n.boot){ + cat(paste0("resampling n. ",b,"/",n.boot)) + yrs.sampled <- sample(1:n.yrs, replace=TRUE) + + for(y in 1:n.yrs) anom.hindcast.chunk.sampled[,y,,,] <- anom.hindcast.chunk[,yrs.sampled[y],,,] + for(y in 1:n.yrs) anom.rean.chunk.sampled[y,,,] <- anom.rean.chunk[yrs.sampled[y],,,] + + my.FairRps.chunk <- veriApply("FairRps", fcst=anom.hindcast.chunk.sampled, obs=anom.rean.chunk.sampled, prob=my.prob, tdim=2, ensdim=1, parallel=FALSE, ncpus=n.cpus) + ens <- array(33,c(n.startdates*n.yrs.hind,3)) + obs <- apply(anom.rean.chunk.sampled, c(2,3,4), function(x){convert2prob(x, prob <- my.prob)}) + obs2 <- InsertDim(obs,1,3) + obs2[2,1:(n.startdates*n.yrs.hind),,,] <- obs2[1,(n.startdates*n.yrs.hind + 1:(n.startdates*n.yrs.hind)),,,] + obs2[3,1:(n.startdates*n.yrs.hind),,,] <- obs2[1,(2*n.startdates*n.yrs.hind + 1:(n.startdates*n.yrs.hind)),,,] + obs3 <- obs2[,1:(n.startdates*n.yrs.hind),,,,drop=F] + my.Rps.clim.chunk <- apply(obs3,c(3,4,5), function(x){ EnsRps(ens,t(x)) }) + my.FairRps.chunk.sum <- apply(my.FairRps.chunk,c(2,3,4), sum) + my.Rps.clim.chunk.sum <- apply(my.Rps.clim.chunk,c(2,3,4), sum) + my.FairRpss.chunk <- 1-(my.FairRps.chunk.sum/my.Rps.clim.chunk.sum) + + my.FairRpssBoot[b,,,chunk$int[[c]]] <- my.FairRpss.chunk + rm(my.FairRpss.chunk, ens, obs, obs2, obs3, my.FairRps.chunk.sum, my.Rps.clim.chunk.sum) + gc() + } + + cat('\n') + + # calculate the percentiles of the skill score: + my.FairRpssConf[,,,chunk$int[[c]]] <- apply(my.FairRpssBoot[,,,chunk$int[[c]]], c(2,3,4), function(x) quantile(x, probs=conf.level, type=8)) + } + } + + if(any(my.score.name=="FairCrpss")){ + if(!boot){ + my.FairCrps.chunk <- veriApply("FairCrps", fcst=anom.hindcast.chunk, obs=anom.rean.chunk, tdim=2, ensdim=1, parallel=FALSE, ncpus=n.cpus) + anom.all.chunk <- abind(anom.hindcast.chunk,anom.rean.chunk, along=1) # merge exp and obs together to use apply: + my.Crps.clim.chunk <- apply(anom.all.chunk, c(3,4,5), function(x){ EnsCrps(t(x[1:n.members,]), x[n.members+1,])}) + + my.FairCrps.chunk.sum <- apply(my.FairCrps.chunk,c(2,3,4), sum) + my.Crps.clim.chunk.sum <- apply(my.Crps.clim.chunk,c(2,3,4), sum) + my.FairCrpss.chunk <- 1-(my.FairCrps.chunk.sum/my.Crps.clim.chunk.sum) + + my.FairCrpss[,,chunk$int[[c]]]<-my.FairCrpss.chunk + rm(my.FairCrpss.chunk, my.FairCrps.chunk.sum, my.Crps.clim.chunk.sum) + gc() + + } else { # bootstrapping: + + for(b in 1:n.boot){ + print(paste0("resampling n. ",b,"/",n.boot)) + yrs.sampled <- sample(1:n.yrs, replace=TRUE) + + for(y in 1:n.yrs) anom.hindcast.chunk.sampled[,y,,,] <- anom.hindcast.chunk[,yrs.sampled[y],,,] + for(y in 1:n.yrs) anom.rean.chunk.sampled[y,,,] <- anom.rean.chunk[yrs.sampled[y],,,] + + my.FairCrps.chunk <- veriApply("FairCrps", fcst=anom.hindcast.chunk.sampled, obs=anom.rean.chunk.sampled, tdim=2, ensdim=1, parallel=TRUE, ncpus=n.cpus) + anom.all.chunk <- abind(anom.hindcast.chunk.sampled, anom.rean.chunk.sampled, along=1) # merge exp and obs together to use apply: + my.Crps.clim.chunk <- apply(anom.all.chunk, c(3,4,5), function(x){ EnsCrps(t(x[1:n.members,]), x[n.members+1,])}) + my.FairCrps.chunk.sum <- apply(my.FairCrps.chunk,c(2,3,4), sum) + my.Crps.clim.chunk.sum <- apply(my.Crps.clim.chunk,c(2,3,4), sum) + my.FairCrpss.chunk <- 1-(my.FairCrps.chunk.sum/my.Crps.clim.chunk.sum) + + my.FairCrpssBoot[b,,,chunk$int[[c]]] <- my.FairCrpss.chunk + rm(my.FairCrpss.chunk, my.FairCrps.chunk.sum, my.Crps.clim.chunk.sum) + gc() + + } + + cat('\n') + + # calculate the percentiles of the skill score: + my.FairCrpssConf[,,,chunk$int[[c]]] <- apply(my.FairCrpssBoot[,,,chunk$int[[c]]], c(2,3,4), function(x) quantile(x, probs=conf.level, type=8)) + } + + } + + if(any(my.score.name=="EnsCorr")){ + # ensemble mean correlation for the selected startdates (first dim, 'week') and all leadtimes (second dim): + my.EnsCorr.chunk <- my.PValue.chunk <- array(NA, c(n.leadtimes, n.lat, chunk$n.int[[c]])) + + for(i in 1:n.leadtimes){ + for(j in 1:n.lat){ + for(k in 1:chunk$n.int[[c]]){ + my.EnsCorr.chunk[i,j,k] <- cor(anom.hindcast.mean.chunk[,i,j,k],anom.rean.chunk[,i,j,k], use="complete.obs") + # option 'na.method' is chosen from the following list: c("all.obs", "complete.obs", "pairwise.complete.obs", "everything", "na.or.complete") + + my.PValue.chunk[i,j,k] <- cor.test(anom.hindcast.mean.chunk[,i,j,k],anom.rean.chunk[,i,j,k], use="complete.obs")$p.value + + } + } + } + + my.EnsCorr[,,chunk$int[[c]]] <- my.EnsCorr.chunk + my.PValue[,,chunk$int[[c]]] <- my.PValue.chunk + + rm(anom.hindcast.chunk,anom.hindcast.mean.chunk,my.EnsCorr.chunk,my.PValue.chunk) + } + + rm(anom.rean.chunk) + gc() + #mem() + + if(mare == TRUE) { + if(any(my.score.name=="FairRpss")) save(my.FairRpss, file=paste0(workdir,'/Data_',var.name,'/FairRpss_',startdate.name,'_chunk_',c,'.RData')) + if(any(my.score.name=="FairCrpss")) save(my.FairCrpss, file=paste0(workdir,'/Data_',var.name,'/FairCrpss_',startdate.name,'_chunk_',c'.RData')) + if(any(my.score.name=="EnsCorr")) save(my.EnsCorr, file=paste0(workdir,'/Data_',var.name,'/EnsCorr_',startdate.name,'_chunk_',c'.RData')) + } + + + } # next c (chunk) + + # the Reliability diagram cannot be computed splitting the data in chunk, so it is computed only if the script is run from the R interface or with no arguments: + if(mare == FALSE) { + if(any(my.score.name=="RelDiagr")){ # in this case the computation cannot be split in groups of grid points, but can be split for leadtime instead: + my.RelDiagr<-list() + + for(lead in 1:n.leadtimes){ + cat(paste0('leadtime n.: ',lead,'/',n.leadtimes)) + anom.rean.chunk <- array(NA,c(n.startdates*n.yrs.hind,n.lat,n.lon)) + anom.hindcast.chunk <- array(NA,c(n.members,n.startdates*n.yrs.hind,n.lat,n.lon)) + cat(' startdate: ') + i=0 + + for(startdate in my.startdates){ + i=i+1 + cat(paste0(i,'/',length(my.startdates),' ')) + + pos.startdate<-which(startdate==my.startdates) # count of the number of stardates already loaded+1 + my.time.interv<-(1+(pos.startdate-1)*n.yrs.hind):(pos.startdate*n.yrs.hind) # time interval where to load data + + # Load reanalysis data of next startdate: + load(paste0(workdir,'/Data_',var.name,'/anom_rean_startdate',startdate,'.RData')) + anom.rean <- drop(anom.rean) + anom.rean.chunk[my.time.interv,,] <- anom.rean[,lead,,] + + # Lead hindcast data of next startdate: + load(paste0(workdir,'/Data_',var.name,'/anom_hindcast_startdate',startdate,'.RData')) # load hindcast data + anom.hindcast <- drop(anom.hindcast) + anom.hindcast.chunk[,my.time.interv,,] <- anom.hindcast[,,lead,,] + + rm(anom.rean,anom.hindcast) + } + + + anom.hindcast.chunk.perm<-aperm(anom.hindcast.chunk,c(2,1,3,4)) # swap the first two dimensions to put the years as first dimension (needed for convert2prob) + gc() + + cat('Computing the Reliability Diagram. Please wait... ') + ens.chunk<-apply(anom.hindcast.chunk.perm, c(3,4), convert2prob, prob<-my.prob) # its dimens. are: [n.startdates*n.yrs.hind*n.forecast.categories, n.lat, n.lon] + obs.chunk<-apply(anom.rean.chunk,c(2,3), convert2prob, prob<-my.prob) # the first n.members*n.yrs.hind elements belong to the first tercile, and so on. + ens.chunk.prob<-apply(ens.chunk, c(2,3), function(x) count2prob(matrix(x, n.startdates*n.yrs.hind, n.categ), type=4)) # type=4 is the only method with sum(prob)=1 !! + obs.chunk.prob<-apply(obs.chunk, c(2,3), function(x) count2prob(matrix(x, n.startdates*n.yrs.hind, n.categ), type=4)) #no parallelization here: it only takes 10s + + if(is.null(int.lat)) int.lat <- 1:n.lat # if there is no region selected, select all the world + if(is.null(int.lon)) int.lon <- 1:n.lon + + int1 <- 1:(n.startdates*n.yrs.hind) # time interval of the data of the first tercile + my.RelDiagr[[lead]]<-ReliabilityDiagram(ens.chunk.prob[int1,int.lat,int.lon], obs.chunk.prob[int1,int.lat,int.lon], nboot=0, plot=FALSE, plot.refin=F) #tercile 1 + + int2 <- (1+(n.startdates*n.yrs.hind)):(2*n.startdates*n.yrs.hind) # time interval of the data of the second tercile + my.RelDiagr[[n.leadtimes+lead]]<-ReliabilityDiagram(ens.chunk.prob[int2,int.lat,int.lon], obs.chunk.prob[int2,int.lat,int.lon], nboot=0, plot=FALSE, plot.refin=F) + + int3 <- (1+(2*n.startdates*n.yrs.hind)):(3*n.startdates*n.yrs.hind) # time interval of the data of the third tercile + my.RelDiagr[[2*n.leadtimes+lead]]<-ReliabilityDiagram(ens.chunk.prob[int3,int.lat,int.lon], obs.chunk.prob[int3,int.lat,int.lon], nboot=0, plot=FALSE, plot.refin=F) + + cat('done\n') + #print(my.RelDiagr) # for debugging + + rm(anom.rean.chunk,anom.hindcast.chunk,ens.chunk,obs.chunk,ens.chunk.prob,obs.chunk.prob) + gc() + + } # next lead + + } # close if on RelDiagr + + # we can save the results for all chunks only if mare == FALSE (if it is TRUE, we have the results for 1 chunk only, that have already been saved + if(!boot){ + if(any(my.score.name=="FairRpss")) save(my.FairRpss, file=paste0(workdir,'/Data_',var.name,'/FairRpss_',startdate.name,'.RData')) + if(any(my.score.name=="FairCrpss")) save(my.FairCrpss, file=paste0(workdir,'/Data_',var.name,'/FairCrpss_',startdate.name,'.RData')) + if(any(my.score.name=="EnsCorr")) save(my.EnsCorr, file=paste0(workdir,'/Data_',var.name,'/EnsCorr_',startdate.name,'.RData')) + if(any(my.score.name=="RelDiagr")) save(my.RelDiagr, my.PValue, file=paste0(workdir,'/Data_',var.name,'/RelDiagr_',startdate.name,'.RData')) + } else { # bootstrapping output: + if(any(my.score.name=="FairRpss")) save(my.FairRpssBoot, my.FairRpssConf, file=paste0(workdir,'/Data_',var.name,'/FairRpssBoot_',startdate.name,'.RData')) + if(any(my.score.name=="FairCrpss")) save(my.FairCrpssBoot, my.FairCrpssConf, file=paste0(workdir,'/Data_',var.name,'/FairCrpssBoot_',startdate.name,'.RData')) + } + + # If it has finished computing the last chunk, it can load all results in one file, deleting the intermediate output files: + if(job.chunk == tot.chunks){ + + } + + + } # close if on mare + +} # next m (month) + +if + + + + + + + + +# all pre-formatting (conversion to anomalies) and post-formatting (visualization) tasks are done below: +if(mare == FALSE){ + +########################################################################################### +# Visualize and save the score maps for one score and period # +########################################################################################### + +# run it only after computing and saving all the skill score data: + +my.months=1 #9:12 # choose the month(s) to plot and save + +for(month in my.months){ + #month=2 # for the debug + startdate.name.map<-my.month[month] # i.e:"January" + + for(my.score.name.map in my.score.name){ + #my.score.name.map='EnsCorr' # for the debug + + if(my.score.name.map=="EnsCorr") { load(file=paste0(workdir,'/Data_',var.name,'/EnsCorr_',startdate.name.map,'.RData')); my.score<-my.EnsCorr } + if(my.score.name.map=='FairRpss') { load(file=paste0(workdir,'/Data_',var.name,'/FairRpss_',startdate.name.map,'.RData')); my.score<-my.FairRpss } + if(my.score.name.map=='FairCrpss') { load(file=paste0(workdir,'/Data_',var.name,'/FairCrpss_',startdate.name.map,'.RData')); my.score<-my.FairCrpss } + if(my.score.name.map=="RelDiagr") { load(file=paste0(workdir,'/Data_',var.name,'/RelDiagr_',startdate.name.map,'.RData')); my.score<-my.RelDiagr } + + # old green-red palette: + # brk.rpss<-c(-1,seq(0,1,by=0.05)) + # col.rpss<-c("darkred",colorRampPalette(c("red","white","darkgreen"))(length(brk.rpss)-2)) + # brk.rps<-c(seq(0,1,by=0.1),10) + # col.rps<-c(colorRampPalette(c("darkgreen","white","red"))(length(brk.rps)-2),"darkred") + # if(my.score.name.map==my.Rps | my.score==my.Rps.clim) {my.brk<-brk.rps} else {my.brk<-brk.rpss} + # if(my.score.name.map==my.Rps | my.score==my.Rps.clim) {my.col<-col.rps} else {my.col<-col.rpss} + + brk.rpss<-c(seq(-1,1,by=0.1)) + col.rpss<-colorRampPalette(col)(length(brk.rpss)-1) + + my.brk<-brk.rpss # at present all breaks and colors are the same, so there is no need to differenciate between indexes + my.col<-col.rpss + + for(lead in 1:n.leadtimes){ + + #my.title<-paste0(my.score.name.map,' of ',cfs.name,' + #10m Wind Speed for ',startdate.name.map,'. Forecast time ',leadtime.week[lead],'.') + + if(my.score.name.map!="RelDiagr"){ + + #postscript(file=paste0(workdir,'/Maps_',var.name,'/',my.score.name.map,'_',startdate.name.map,'_Forecast_time_',leadtime.week[lead],'_',var.name,'.ps'), + # paper="special",width=12,height=7,horizontal=F) + + jpeg(file=paste0(workdir,'/Maps_',var.name,'/',my.score.name.map,'_',startdate.name.map,'_Forecast_time_',leadtime.week[lead],'_',var.name,'.jpg'),width=1000,height=600) + + layout(matrix(c(1,2), 1, 2, byrow = TRUE),widths=c(11,1.2)) + par(oma=c(1,1,4,1)) + + # lat values must be in decreasing order from +90 to -90 degrees and long from 0 to 360 degrees: + PlotEquiMap(my.score[lead,,][n.lat:1,c((n.lonr+1):n.lon,1:n.lonr)], lons,lats ,sizetit=0.8, brks=my.brk, cols=my.col,axelab=F, filled.continents=FALSE, drawleg=F) + my.title<-paste0(my.score.name.map,' of ',cfs.name,'\n ', var.name.map,' for ',startdate.name.map,'. Forecast time ',leadtime.week[lead],'.') + title(my.title,line=0.5,outer=T) + + ColorBar(my.brk, cols=my.col, vert=T, cex=1.4) + + if(my.score.name.map=="EnsCorr"){ #add a small grey point if the corr.is significant: + + # arrange lat/ lon in my.PValue (a second variable in the EnsCorr file) to start from +90 degr. (lat) and from 0 degr (lon): + my.PValue.rev <- my.PValue + my.PValue.rev[lead,,] <- my.PValue[i,n.lat:1,c((n.lonr+1):n.lon,1:n.lonr)] + for (x in 1:n.lon) { + for (y in 1:n.lat) { + if (my.PValue.rev[lead,y, x] == TRUE) { + text(x = lo[x], y = la[y], ".", cex = .2) + } + } + } + } + dev.off() + + } # close if on !RelDiagr + + if(my.score.name.map=="RelDiagr") { + + jpeg(file=paste0(workdir,'/Maps_',var.name,'/RelDiagr_',startdate.name.map,'_Forecast_time_',leadtime.week[lead],'_',var.name,'.jpg'),width=600,height=600) + + my.title<-paste0('Reliability Diagram of ',cfs.name,'\n ',var.name.map,' for ',startdate.name.map,'. Forecast time ',leadtime.week[lead],'.') + + plot(c(0,1),c(0,1),type="l",xlab="Forecast Frequency",ylab="Observed Frequency", col='gray30', main=my.title) + lines(my.score[[lead]]$p.avgs[!is.na(my.score[[lead]]$p.avgs)],my.score[[lead]]$cond.prob[!is.na(my.score[[lead]]$cond.prob)],type="o", pch=16, col="blue") + #lines(my.score[[n.leadtimes+lead]]$p.avgs[!is.na(my.score[[n.leadtimes+lead]]$p.avgs)],my.score[[n.leadtimes+lead]]$cond.prob[!is.na(my.score[[n.leadtimes+lead]]$cond.prob)],type="o",pch=16,col="darkgreen") + lines(my.score[[2*n.leadtimes+lead]]$p.avgs[!is.na(my.score[[2*n.leadtimes+lead]]$p.avgs)], my.score[[2*n.leadtimes+lead]]$cond.prob[!is.na(my.score[[2*n.leadtimes+lead]]$cond.prob)],type="o", pch=16, col="red") + legend("bottomright", legend=c('Lower Tercile','Upper Tercile'), lty=c(1,1), lwd=c(2.5,2.5), col=c('blue','red')) + + dev.off() + } + + } # next lead + + } # next score + +} # next month + + +############################################################################################### +# Composite map of the four skill scores # +############################################################################################### + +# run it only when you have all the 4 skill scores for each month: + +my.months=9:12 # choose the month(s) to plot and save + +for(month in my.months){ + + startdate.name.map<-my.month[month] # i.e:"January" + + #postscript(file=paste0(workdir,'/Maps_',var.name,'/SkillScores_',startdate.name.map,'_',var.name,'.ps'), paper="special",width=12,height=7,horizontal=F) + + jpeg(file=paste0(workdir,'/Maps_',var.name,'/SkillScores_',startdate.name.map,'_',var.name,'.jpg'), width=4000, height=2400, quality=100) + + layout(matrix(1:16, 4, 4, byrow=FALSE), widths=c(1.8,1.8,1.8,1.2), heights=c(1,1,1,1)) + #layout.show(16) + + for(score in 1:4){ + for(lead in 1:n.leadtimes){ + + img<-readJPEG(paste0(workdir,'/Maps_',var.name,'/',my.score.name[score],'_',startdate.name.map,'_Forecast_time_',leadtime.week[lead],'_',var.name,'.jpg')) + par(mar = rep(0, 4), xaxs='i', yaxs='i' ) + plot.new() + rasterImage(img, 0, 0, 1, 1, interpolate=FALSE) + + } # next lead + + } # next score + + dev.off() + +} # next month + + + +# Dani Map script for the small figure: +#/home/Earth/vtorralb/R/scripts/Veronica_2014/TimeSeries/PlotAno_mod_obs.R + + + +############################################################################################### +# Preformatting: calculate weekly climatologies and anomalies for each forecast startdate # +############################################################################################### +# +# finish!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! +# +# the only difference is that anomalies are computed using climatologies from the hincast data, +# and not from the forecast data. + +# Run it only once at the beginning: + +file_path='/home/Earth/ngonzal2/R/ConfFiles/IC3.conf' # Load() file path +#file_path <- '/home/Earth/ncortesi/Downloads/RESILIENCE/IC3.conf' + +my.startdates=1:52 # choose a sequence of forecast startdates to save their anomalies + +for(startdate in my.startdates){ # the startdate week to load inside the sdates weekly sequence: + print(paste0('Computing anomalies for startdate ', which(startdate==my.startdates),'/',length(my.startdates))) + + data.hindcast <- Load(var=var.name, exp = 'EnsEcmwfWeekHind', + obs = NULL, sdates=paste0(yr1.hind:yr2.hind,substr(sdates.seq[startdate],5,6),substr(sdates.seq[startdate],7,8)), + nleadtime = n.leadtimes, leadtimemin = 1, leadtimemax = n.leadtimes, output = 'lonlat', configfile = file_path, method='distance-weighted' ) + + # dim(data.hindcast$mod) # [1,4,20,4,320,640] + + # Mean of the 4 members of the Hindcasts and of the 20 years (for each point and leadtime); + clim.hindcast<-apply(data.hindcast$mod,c(1,4,5,6),mean)[1,,,] # average for each leadtime and pixel + clim.hindcast<-InsertDim(InsertDim(InsertDim(clim.hindcast,1,n.yrs.hind),1,n.members),1,1) # repeat the same values and times to calc. anomalies + + anom.hindcast <- data.hindcast$mod - clim.hindcast + + rm(data.hindcast,clim.hindcast) + gc() + + # average the anomalies over the 4 hindcast members [1,20,4,320,640] + anom.hindcast.mean<-apply(anom.hindcast,c(1,3,4,5,6),mean) + + my.grid<-paste0('r',n.lon,'x',n.lat) + data.rean <- Load(var=var.name, exp = 'ERAintEnsWeek', + obs = NULL, sdates=paste0(yr1.hind:yr2.hind,substr(sdates.seq[startdate],5,6),substr(sdates.seq[startdate],7,8)), + nleadtime = n.leadtimes, leadtimemin = 1, leadtimemax = n.leadtimes, output = 'lonlat', configfile = file_path, grid=my.grid, method='distance-weighted') + # dim(data.rean$mod) # [1,1,20,4,320,640] + + # Mean of the 20 years (for each point and leadtime) + clim.rean<-apply(data.rean$mod,c(1,2,4,5,6),mean)[1,1,,,] # average for each leadtime and pixel + clim.rean<-InsertDim(InsertDim(InsertDim(clim.rean,1,n.yrs.hind),1,1),1,1) # repeat the same values times to be able to calculate the anomalies #[1,1,20,4,320,640] + + anom.rean <- data.rean$mod - clim.rean + + anom.rean<-InsertDim(anom.rean[1,1,,,,],1,1) # [1,20,4,320,640] + + #save(anom.hindcast.mean,anom.rean,file=paste0(workdir,'/Data/anom_for_ACC_startdate',startdate,'.RData')) + + save(anom.hindcast,file=paste0(workdir,'/Data/anom_hindcast_startdate',startdate,'.RData')) + save(anom.hindcast.mean,file=paste0(workdir,'/Data/anom_hindcast_mean_startdate',startdate,'.RData')) + save(anom.rean,file=paste0(workdir,'/Data/anom_rean_startdate',startdate,'.RData')) + save(clim.rean,file=paste0(workdir,'/Data/clim_rean_startdate',startdate,'.RData')) + + rm(data.rean,anom.hindcast,anom.hindcast.mean,clim.rean,anom.rean) + gc() + +} + + + + + + + +################################################################################## +# Toy Model # +################################################################################## + +library(s2dverification) +library(SpecsVerification) +library(easyVerification) + +my.prob=c(1/3,2/3) # quantiles used for FairRPSS and the RelDiagr (usually they are the terciles) + +N=80 # number of years +M=14 # number of members + +for(M in c(2:10,20,51,100,200)){ + +for(N in c(5,10,15,20,25,30,40,50,100,200,500,1000,2000,5000,10000,20000,50000,100000)){ + +anom.rean<-rnorm(N) +anom.hind<-array(rnorm(N*M),c(N,M)) +#quantile(anom.hind,prob=my.prob,type=8) + +obs<-convert2prob(anom.rean,prob<-my.prob) +#mean(obs[,1]) # check if it is equal to 0.333 for K=3 + +ens<-convert2prob(anom.hind,prob<-my.prob) + +# climatological forecast: +anom.hind.clim<-array(sample(anom.hind,N*M),c(N,M)) # extract a random sample with no replacement of the hindcast +ens.clim<-convert2prob(anom.hind.clim,prob<-my.prob) +#mean(ens.clim[,1]) # check if it is equal to M/3 (1.333 for M=4) + +# alternative definition of climatological forecast, based on observations instead (as applied by veriApply via convert2prob when option fcst.ref is missing): +#anom.rean.clim<-ClimEns(anom.rean) # create a climatological ensemble from a vector of observations (anom.rean) with the leave-one-out cross validation +anom.rean.clim<-t(array(anom.rean,c(N,N))) # function used by convert2prob when ref is not specified; it is ~ equal to the above function ClimEns, it only has a column more +ens.clim.rean<-convert2prob(anom.rean.clim,prob<-c(1/3,2/3)) # create a new prob.table based on the climatological ensemble of observations + +p<-count2prob(ens) # we should add the option type=4, in not the sum for each year is not 100%!!! +y<-count2prob(obs) # we should add the option type=4, in not the sum for each year is not 100%!!! +ReliabilityDiagram(p[,1], y[,1], bins=10, nboot=0, plot=TRUE, cons.prob=c(0.05, 0.85),plot.refin=F) + +### computation of verification scores: + +Rps<-round(mean(EnsRps(ens,obs)),4) +FairRps<-round(mean(FairRps(ens,obs)),4) +#Rps.easy<-round(mean(veriApply("EnsRps", fcst=anom.hind, obs=anom.rean, prob=my.prob, tdim=1, ensdim=2)),4) # check once to see if it is the same value of Rps +#FairRps.easy<-round(mean(veriApply("FairRps", fcst=anom.hind, obs=anom.rean, prob=my.prob, tdim=1, ensdim=2)),4) # check once to see if it is the same value of FairRps + +Rps.clim<-round(mean(EnsRps(ens.clim,obs)),4) +FairRps.clim<-round(mean(FairRps(ens.clim,obs)),4) +FairRps.clim.rean<-round(mean(FairRps(ens.clim.rean,obs)),4) + +#FairRps.clim.stable<-0.44444444 +#Rps.clim.easy<-round(mean(veriApply("EnsRps", fcst=anom.hind.clim, obs=anom.rean, prob=my.prob, tdim=1, ensdim=2)),4) # check once to see if it is the same value of Rps.clim +#FairRps.clim.easy<-round(mean(veriApply("FairRps", fcst=anom.hind.clim, obs=anom.rean, prob=my.prob, tdim=1, ensdim=2)),4) # check once to see if it is = to FairRps.clim + +Rpss<-round(1-(Rps/Rps.clim),4) +#Rpss.specs<-round(EnsRpss(ens=ens, ens.ref=ens.clim, obs=obs)$rpss,4) # check it once to see if it is the same as Rpss (yes) +Rpss.easy<-veriApply("EnsRpss", fcst=anom.hind, fcst.ref=anom.hind.clim, obs=anom.rean, prob=my.prob, tdim=1, ensdim=2)$rpss # we provide the clim.forecast; = to Rpss.specs +system.time(Rpss.easy.noclim<-round(veriApply("EnsRpss", fcst=anom.hind, obs=anom.rean, prob=my.prob, tdim=1, ensdim=2, parallel=FALSE, ncpus=5)$rpss,4)) # using their climatological forecast +#cat(Rps.clim.noclim<-Rps/(1-Rpss.easy.noclim)) # sale igual a ~0.45 per ogni hindcast; é diverso sia da Rps.clim che da Rps.clim per N-> +oo (0.555) + +Rpss.easy<-veriApply("EnsRpss", fcst=anom.hind, fcst.ref=anom.rean.clim, obs=anom.rean, prob=my.prob, tdim=1, ensdim=2)$rpss # we provide the clim.forecast; = to Rpss.specs +Rpss.easy.noclim<-veriApply("EnsRpss", fcst=anom.hind, obs=anom.rean, prob=my.prob, tdim=1, ensdim=2)$rpss # we provide the clim.forecast; = to Rpss.specs + +FairRpss<-round(1-(FairRps/FairRps.clim),4) +#FairRpss.specs<-round(FairRpss(ens=ens,ens.ref=ens.clim,obs=obs)$rpss,4) # check it once to see if it is the same as FairRpss (yes) +#FairRpss.easy<-veriApply("FairRpss", fcst=anom.hind, fcst.ref=anom.hind.clim, obs=anom.rean, prob=c(1/3,2/3), tdim=1, ensdim=2)$rps +#FairRpss.easy.noclim<-round(veriApply("FairRpss", fcst=anom.hind, obs=anom.rean, prob=c(1/3,2/3), tdim=1, ensdim=2)$rpss,4) # using their climatological forecast +#cat(FairRps.clim.noclim<-Rps/(1-FairRpss.easy.noclim)) # sale igual a ~0.55; é diverso sia da FairRps.clim che da FairRps.clim per N-> +oo (0.444) + +cat(paste0('n.memb: ' ,M,' n.yrs: ',N,' : ',FairRps,' : ' ,FairRps.clim,' FairRpss: ',FairRpss,'\n')) + +#cat(paste0('n.memb: ' ,M,' n.yrs: ',N,' : ',FairRps,' : ' ,FairRps.clim,' FairRpss: ',FairRpss,'\n')) + +#cat(paste0('n.memb: ' ,M,' n.yrs: ',N,' : ',Rps,' : ' ,Rps.clim,' Rpss: ',Rpss,'\n','n.memb: ' ,M,' n.yrs: ',N,' : ',FairRps,' : ' ,FairRps.clim,' FairRpss: ',FairRpss,'\n')) + +#cat(paste0('n.memb: ' ,M,' n.yrs: ',N,' : ',Rps,' : ' ,Rps.clim,' Rpss: ',Rpss,' Rpss.easy: ',Rpss.easy.noclim,'\n','n.memb: ' ,M,' n.yrs: ',N,' #: ',FairRps,' : ' ,FairRps.clim,' FairRpss: ',FairRpss,' FairRpss.easy: ',FairRpss.easy.noclim)) + +} + +# Crpss: + +#Crps<-round(mean(EnsCrps(ens,obs)),4) +#FairCrps<-round(mean(FairCrps(ens,obs)),4) + +#cat(Crps.clim<-round(mean(EnsCrps(ens.clim,obs)),4)) +#FairCrps.clim<-round(mean(FairCrps(ens.clim,obs)),4) + +#Crpss<-round(1-(Crps/Crps.clim),4) +#Crpss.specs<-round(EnsCrpss(ens=ens, ens.ref=ens.clim, obs=obs)$rpss,4) + +#FairCrpss<-round(1-(FairCrps/FairCrps.clim),4) +#FairCrpss.specs<-round(FairCrpss(ens=ens,ens.ref=ens.clim,obs=obs)$rpss,4) + +} + + +################################################################################## +# Other analysis # +################################################################################## + +my.startdates=1 #c(1:9) # select the startdates (weeks) you want to merge together + +#tm<-toyarray(dims=c(32,64),N=20,nens=4) # in the number of startdates in case of forecasts or the number of years in case of hindcasts +#str(tm) # tm$fcst: [32,64,20,4] tm$obs: [32,64,20] +# +#f.corr <- veriApply("EnsCorr", fcst=tm$fcst, obs=tm$obs) +#f.me <- veriApply('EnsMe', tm$fcst, tm$obs) +#f.crps <- veriApply("FairCrps", fcst=tm$fcst, obs=tm$obs) +#str(f.crps) # [32,64,20] + +#cst <- array(rnorm(prod(4:8)), 4:8) +#obs <- array(rnorm(prod(4:7)), 4:7) +#f.me <- veriApply('EnsMe', fcst=fcst, obs=obs) +#dim(f.me) +#fcst2 <- array(aperm(fcst, c(5,1,4,2,3)), c(8, 4, 7, 5*6)) # very interesting use of aperm not only to swap dimensions, but also to merge two dimensions in the same one! +#obs2 <- array(aperm(obs, c(1,4,2,3)), c(4,7,5*6)) +#f.me2 <- veriApply('EnsMe', fcst=fcst2, obs=obs2, tdim=3, ensdim=1) +#dim(f.me2) + +# when you have for each leadtime all the 52 weeks of year 2014 stored in a map, +# you can plot the time serie for a particular point and leadtime: +pos.lat<-which(round(la,3)==round(my.lat,3)) +pos.lon<-which(round(lo,3)==round(my.lon,3)) + +x11() +plot(1:week,EnMeanCorr[,leadtime],type="b", + main=paste0("Weekly time serie of point with lat=",round(my.lat,2),", lon=",round(my.lon,2)), + xlab="week",ylab="Ensemble Mean Corr.",ylim=c(-1,1)) + + + +### check if 4 time steps are better than one: + +yrs.hind<-yr2.hind-yr1.hind+1 +my.point.obs<-array(NA,c(yrs.hind,28)) + +# load obs.data for the four time steps: +for(year in yr1.hind:yr2.hind){ + data_erai_6h <- Load(var=var.name, exp = 'ERAint6h', + obs = NULL, sdates=paste0(year,'01'), leadtimemin = 61, + leadtimemax = 88, output = 'lonlat', configfile = file_path, grid=my.grid,method='distance-weighted') + + my.point.obs[year-yr1.hind+1,]<-data_erai_6h$mod[1,1,1,,65,633] +} + +utm0<-c(1,5,9,13,17,21,25) +utm6<-utm0+1 +utm12<-utm0+2 +utm18<-utm0+3 +my.points.obs.weekly.mean.utm0<-rowMeans(my.point.obs[,utm0]) +my.points.obs.weekly.mean.utm6<-rowMeans(my.point.obs[,utm6]) +my.points.obs.weekly.mean.utm12<-rowMeans(my.point.obs[,utm12]) +my.points.obs.weekly.mean.utm18<-rowMeans(my.point.obs[,utm18]) +my.points.obs.weekly.mean.all<-rowMeans(my.point.obs) + +cor(my.points.exp.weekly.mean.all,my.points.obs.weekly.mean.all,use="complete.obs") + +my.points.obs.weekly.mean.utm<-c(my.points.obs.weekly.mean.utm0,my.points.obs.weekly.mean.utm6,my.points.obs.weekly.mean.utm12,my.points.obs.weekly.mean.utm18) +my.points.exp.weekly.mean.utm<-c(my.points.exp.weekly.mean.utm0,my.points.exp.weekly.mean.utm6,my.points.exp.weekly.mean.utm12,my.points.exp.weekly.mean.utm18) +cor(my.points.exp.weekly.mean.utm,my.points.obs.weekly.mean.utm,use="complete.obs") + + +##### Calculate Reanalysis climatology loading only 1 file each 4 (it's quick but it needs to have all files beforehand): + +ss<-c(seq(1,52,by=4),50,51,52) # take only 1 startdate each 4, more the last 3 startdates that must be computed individually because they are not a complete set of 4 + +for(startdate in ss){ # the startdate day of 2014 to load in the sdates weekly sequence along with the same startdates of the previous 20 years + data.ERAI <- Load(var=var.name, exp = 'ERAintEnsWeek', + obs = NULL, sdates=paste0(yr1.hind:yr2.hind,substr(sdates.seq[startdate],5,6),substr(sdates.seq[startdate],7,8)), + nleadtime = 4, leadtimemin = 1, leadtimemax = 4, output = 'lonlat', configfile = file_path, grid=my.grid, method='distance-weighted') + + clim.ERAI[startdate,,,]<-apply(data.ERAI$mod,c(1,2,4,5,6),mean)[1,1,,,] # average for each leadtime and pixel + + # the intermediate startdate between startdate week i and startdate week i+4 are repeated: + if(startdate <= 49){ + clim.ERAI[startdate+1,1,,]<- clim.ERAI[startdate,2,,] # startdate leadtime (week) + clim.ERAI[startdate+1,2,,]<- clim.ERAI[startdate,3,,] # (week) 1 2 3 4 + clim.ERAI[startdate+2,1,,]<- clim.ERAI[startdate,3,,] # 1 a b c d <- only startdate 1 and 5 are necessary to calculate all startdates between 1 and 5 + clim.ERAI[startdate+1,3,,]<- clim.ERAI[startdate,4,,] # 2 b c d e + clim.ERAI[startdate+2,2,,]<- clim.ERAI[startdate,4,,] # 3 c d e f + clim.ERAI[startdate+3,1,,]<- clim.ERAI[startdate,4,,] # 4 d e f g + } # 5 e f g h + + if(startdate > 1 && startdate <=49){ # fill the previous startdates: + clim.ERAI[startdate-1,4,,]<- clim.ERAI[startdate,3,,] + clim.ERAI[startdate-1,3,,]<- clim.ERAI[startdate,2,,] + clim.ERAI[startdate-2,4,,]<- clim.ERAI[startdate,2,,] + clim.ERAI[startdate-1,2,,]<- clim.ERAI[startdate,1,,] + clim.ERAI[startdate-2,3,,]<- clim.ERAI[startdate,1,,] + clim.ERAI[startdate-3,4,,]<- clim.ERAI[startdate,1,,] + } + +} + + + + + # You can make cor much faster by stripping away all the error checking code and calling the internal c function directly: + # r <- apply(m1, 1, function(x) { apply(m2, 1, function(y) { cor(x,y) })}) + # r2 <- apply(m1, 1, function(x) { apply(m2, 1, function(y) {.Internal(cor(x, y, 4L, FALSE)) })}) + C_cor<-get("C_cor", asNamespace("stats")) + test<-.Call(C_cor, x=rnorm(100,1), y=rnorm(100,1), na.method=2, FALSE) + my.EnsCorr.chunk[i,j,k]<-.Call(C_cor, x=anom.hindcast.mean.chunk[,i,j,k], y=nom.rean.chunk[,i,j,k], na.method=2, FALSE) + + + +####################################################################################### +# Raster Animations # +####################################################################################### + +library(png) +library(animation) + + +clustPNG <- function(pngobj, nclust = 3){ + + j <- pngobj + # If there is an alpha component remove it... + # might be better to just adjust the mat matrix + if(dim(j)[3] > 3){ + j <- j[,,1:3] + } + k <- apply(j, c(1,2), c) + mat <- matrix(unlist(k), ncol = 3, byrow = TRUE) + grd <- expand.grid(1:dim(j)[1], 1:dim(j)[2]) + clust <- kmeans(mat, nclust, iter.max = 20) + new <- clust$centers[clust$cluster,] + + for(i in 1:3){ + tmp <- matrix(new[,i], nrow = dim(j)[1]) + j[,,i] <- tmp + } + + plot.new() + rasterImage(j, 0, 0, 1, 1, interpolate = FALSE) +} + +makeAni <- function(file, n = 10){ + j <- readPNG(file) + for(i in 1:n){ + clustPNG(j, i) + mtext(paste("Number of clusters:", i)) + } + + j <- readPNG(file) + plot.new() + rasterImage(j, 0, 0, 1, 1, interpolate = FALSE) + mtext("True Picture") +} + +file <- "~/Dropbox/Photos/ClassyDogCropPng.png" +n <- 20 +saveGIF(makeAni(file, n), movie.name = "tmp.gif", interval = c(rep(1,n), 5)) + + + + +####################################################################################### +# ProgressBar # +####################################################################################### + +for(i in 1:10) { + Sys.sleep(0.2) + # Dirk says using cat() like this is naughty ;-) + #cat(i,"\r") + # So you can use message() like this, thanks to Sharpie's + # comment to use appendLF=FALSE. + message(i,"\r",appendLF=FALSE) + flush.console() +} + + +for(i in 1:10) { + Sys.sleep(0.2) + # Dirk says using cat() like this is naughty ;-) + cat(i,"\r") + + +} + + + +####################################################################################### +# Load large datasets with ff # +####################################################################################### + +library(ff) +my.scratch<-"/scratch/Earth/ncortesi/myBigData" +anom.hind<-as.ff(anom.hind, vmode="double", file=my.scratch) +ffsave(anom.hind,file=my.scratch) +close(anom.hind);rm(anom.hind) + +ffload(file=my.scratch, list=c(anom.hind)) +open(anom.hind) +my.SkillScore <- veriApplyBig("FairRpss", fcst=anom.hind, obs=anom.rean, tdim=2, ensdim=1, prob=c(1/3,1/3)) +close(anom.hind);rm(anom.hind) + + + +BigDataPath <- "/scratch/Earth/ncortesi/myBigData" +source('/home/Earth/ncortesi/Downloads/RESILIENCE/veriApplyBig.R') + +anom.hind.dim<-c(5,3,1,256,512) +anom.hind<-array(rnorm(prod(anom.hind.dim)),anom.hind.dim) # doesn't fit in memory +save.big(array=anom.hind, path=BigDataPath) + +veriApplyBig <- function(, obsmy.scratch){ + ffload(file=my.scratch) + open(anom.hind) + my.SkillScore <- veriApplyBig("FairRpss", fcst=anom.hind, obs=anom.rean, tdim=2, ensdim=1, prob=c(1/3,1/3)) + close(anom.hind) +} + + +anom.hind<-as.ff(anom.hind, vmode="double", file=my.scratch) +ffsave(anom.hind, file=my.scratch) + +my.scratch<-"/scratch/Earth/ncortesi/myBigData" + +anom.hind<-as.ff(anom.hind, vmode="double", file=my.scratch) +ffsave(anom.hind, file=my.scratch) +close(anom.hind); rm(anom.hind) + +ffload(file=my.scratch, list=c(anom.hind)) +open(anom.hind) +my.SkillScore <- veriApplyBig("FairRpss", fcst=anom.hind, obs=anom.rean, tdim=2, ensdim=1, prob=c(1/3,1/3)) +close(anom.hind); rm(anom.hind) + +anom.hind.dim<-c(51,30,1,256,51) +anom.hind<-ff(rnorm(prod(anom.hind.dim)), dim=anom.hind.dim, vmode="double", filename="/scratch/Earth/ncortesi/anomhind") +str(anom.hind) +dim(anom.hind) + +anom.hind.dim<-c(51,30,1,256,51) +anom.hind<-array(rnorm(prod(anom.hind.dim)),anom.hind.dim) # doesn't fit in memory +anom.hind<-as.ff(anom.hind, vmode="double", file="/scratch/Earth/ncortesi/myBigData") +str(anom.hind) +dim(anom.hind) + +ffsave(anom.hind,file="/scratch/Earth/ncortesi/anomhind") +#ffinfo(file="/scratch/Earth/ncortesi/anomhind") +close(anom.hind) +#delete(anom.hind) +#rm(anom.hind) + +ffload(file="/scratch/Earth/ncortesi/anomhind") + +anom.rean<-array(rnorm(prod(anom.hind.dim[-1])), anom.hind.dim[-1]) # doesn't fit in memory + +open(anom.hind) +my.SkillScore <- veriApplyBig("FairRpss", fcst=anom.hind, obs=anom.rean, tdim=2, ensdim=1, prob=c(1/3,1/3)) +fcst<-anom.hind +obs<-anom.rean + +close(anom.hind) +rm(anom.hind) + +#ffdrop(file="/scratch/Earth/ncortesi/anomhind") + +a<-ffapply(X=anom.hind.big, MARGIN=c(1,3,4,5), AFUN=mean, RETURN=TRUE) +a<-ffapply(X=anom.hind.big, MARGIN=c(1,3,4,5), AFUN=function(x) sample(x, replace=TRUE), CFUN="list", RETURN=TRUE) + + +pred_Cal_cross_ff <- as.ff(pred_Cal_cross, vmode='double', file=fileoutput_cross) +ffsave(pred_Cal_cross_ff, file=paste0(fileoutput_cross)) +delete(pred_Cal_cross_ff) + +assign("pred_Cal_cross_ff", as.ff(pred_Cal_cross, vmode='double', file=fileoutput_cross)) +ffsave(pred_Cal_cross_ff, file=paste0(fileoutput_cross)) +delete(pred_Cal_cross_ff) +rm(pred_Cal_cross_ff) + +assign("pred_Cal_cross_ff", as.ff(pred_Cal_cross, vmode='double', file=fileoutput_cross)) +a<-get("pred_Cal_cross_ff") # get() works well in this case but not in the row below! (you must use do.call, see WT_drivers_v2.R) +ffsave(get("pred_Cal_cross_ff"), file=paste0(fileoutput_cross)) +delete(pred_Cal_cross_ff) +rm(pred_Cal_cross_ff) + + + +######################################################################################### +# Calculate and save the FairRpss and/or the FairCrpss for a chosen period using ff # +######################################################################################### + +bigdata=FALSE # if TRUE, compute the Rpss and the Crpss using the package ff (storing arrays in the disk instead than in the RAM) to remove the memory limit + +for(month in veri.month){ + month=1 # for debug + my.startdates <- which(as.integer(substr(sdates.seq,5,6)) == month) #startdates.monthly[[month]] #c(1:5) # select the startdates (weeks) you want to compute + startdate.name=my.month[month] # name given to the period of the selected startdates, i.e:"January" + n.yrs <- length(my.startdates)*n.yrs.hind # number of total years for the selected month + print(paste0("Computing Skill Scores for ",startdate.name)) + + if(!boot) anom.rean.big<-array(NA, dim=c(n.yrs, n.leadtimes, n.lat, n.lon)) # reanalysis data is not converted to ff because it is a small array + + if(boot) mod<-1 else mod=0 # in case of the boostrap, anom.hind.big includes also the reanalysis data as additional last member + + if(bigdata) anom.hind.big <- ff(NA, dim=c(n.members + mod, n.yrs, n.leadtimes, n.lat, n.lon), vmode="double", filename=paste0(workdir,"/anomhindbig.ff")) + if(!bigdata) anom.hind.big <- array(NA, dim=c(n.members + mod, n.yrs, n.leadtimes, n.lat, n.lon)) + + for(startdate in my.startdates){ + + pos.startdate<-which(startdate==my.startdates) # count of the number of stardates already loaded +1 + my.time.interv<-(1+(pos.startdate-1)*n.yrs.hind):(pos.startdate*n.yrs.hind) # time interval where to load data + + load(paste0(workdir,'/Data_',var.name,'/anom_rean_startdate',startdate,'.RData')) + if(!boot) { + anom.rean<-drop(anom.rean) + anom.rean.big[my.time.interv,,,] <- anom.rean + } + + if(any(my.score.name=="FairRpss") || any(my.score.name=="FairCrpss")){ + load(paste0(workdir,'/Data_',var.name,'/anom_hindcast_startdate',startdate,'.RData')) # load hindcast data + anom.hindcast<-drop(anom.hindcast) + + if(!boot) anom.hind.big[,my.time.interv,,,] <- anom.hindcast + if(boot) anom.hind.big[,my.time.interv,,,] <- abind(anom.hindcast, anom.rean, along=1) + + rm(anom.hindcast, anom.rean) + } + gc() + } + + if(any(my.score.name=="FairRpss" || my.score.name=="FairCrpss")){ + if(!boot) { + if(any(my.score.name=="FairRpss")) my.FairRpss <- veriApplyBig("FairRpss", fcst=anom.hind.big, obs=anom.rean.big, + prob=my.prob, tdim=2, ensdim=1, parallel=TRUE, ncpus=n.cpus) + if(any(my.score.name=="FairCrpss")) my.FairCrpss <- veriApplyBig("FairCrpss", fcst=anom.hind.big, obs=anom.rean.big, tdim=2, ensdim=1, parallel=TRUE, ncpus=n.cpus) + + } else { + # bootstrapping: + my.FairRpssBoot<-my.FairCrpssBoot<-array(NA, c(n.boot, n.leadtimes, n.lat, n.lon)) + if(!bigdata) save(anom.hind.big, file=paste0(workdir,"/anom_hind_big.RData")) # save the anomalies to free memory + + for(b in 1:n.boot){ + print(paste0("resampling n. ",b,"/",n.boot)) + yrs.sampled <- sample(1:n.yrs, replace=TRUE) + + if(bigdata) anom.hind.big.sampled <- ff(NA, dim=c(n.members+1, n.yrs, n.leadtimes, n.lat, n.lon), vmode="double", filename=paste0(workdir,"/anomhindbigsampled")) + if(!bigdata) anom.hind.big.sampled <- array(NA, dim=c(n.members+1, n.yrs, n.leadtimes, n.lat, n.lon)) + + if(!bigdata && b>1) load(paste0(workdir,"/anom_hind_big.RData")) # need to load the hindcasts if b>1 + + for(y in 1:n.yrs) anom.hind.big.sampled[,y,,,] <- anom.hind.big[,yrs.sampled[y],,,] # with ff takes 53s x 100 ~1h!!! (without, only 4s x 100 ~6m) + + if(!bigdata) rm(anom.hind.big); gc() # free memory for the following commands + + # faster way that will be able to replace the above one when ffapply output parameters wll be improved: + #a<-ffapply(X=anom.hind.big, MARGIN=c(3,4,5), AFUN=function(x) my.sample(x,n.members+1, replace=TRUE), CFUN="list", RETURN=TRUE, FF_RETURN=TRUE) # takes 3m only + #aa<-abind(a[1:10], along=4) # it doesn't fit into memory! + + if(any(my.score.name=="FairRpss")) my.FairRpssBoot[b,,,] <- veriApplyBig("FairRpss", fcst=anom.hind.big.sampled[1:n.members,,,,], + obs=anom.hind.big.sampled[n.members+1,,,,], + prob=my.prob, tdim=2, ensdim=1, parallel=TRUE, ncpus=n.cpus) #$rpss + + if(any(my.score.name=="FairCrpss")) my.FairCrpssBoot[b,,,] <- veriApplyBig("FairCrpss", fcst=anom.hind.big.sampled[1:n.members,,,,], + obs=anom.hind.big.sampled[n.members+1,,,,], + tdim=2, ensdim=1, parallel=TRUE, ncpus=n.cpus) #$crpss + + if(bigdata) delete(anom.hind.big.sampled) + rm(anom.hind.big.sampled) # delete the sampled hindcast + + } + + # calculate 5 and 95 percentiles of skill scores: + if(any(my.score.name=="FairRpss")) my.FairRpssConf <- apply(my.FairRpssBoot, c(2,3,4), function(x) quantile(x, probs=c(0.05,0.95), type=8)) + if(any(my.score.name=="FairCrpss")) my.FairCrpssConf <- quantile(my.FairCrpssBoot, probs=c(0.05,0.95), type=8) + + } # close if on boot + + } + + if(bigdata) delete(anom.hind.big) # delete the file with the hindcasts + rm(anom.hind.big) + rm(anom.rean.big) + gc() + + if(!boot){ + if(any(my.score.name=="FairRpss")) save(my.FairRpss, file=paste0(workdir,'/Data_',var.name,'/FairRpss_',startdate.name,'.RData')) + if(any(my.score.name=="FairCrpss")) save(my.FairCrpss, file=paste0(workdir,'/Data_',var.name,'/FairCrpss_',startdate.name,'.RData')) + } else { + if(any(my.score.name=="FairRpss")) save(my.FairRpssBoot, my.FairRpssConf, file=paste0(workdir,'/Data_',var.name,'/FairRpssBoot_',startdate.name,'.RData')) + if(any(my.score.name=="FairCrpss")) save(my.FairCrpssBoot, my.FairCrpssConf, file=paste0(workdir,'/Data_',var.name,'/FairCrpssBoot_',startdate.name,'.RData')) + } + + + if(is.object(my.FairRpss)) rm(my.FairRpss); if(is.object(my.FairCrpss)) rm(my.FairCrpss) + if(is.object(my.FairRpssBoot)) rm(my.FairRpssBoot); if(is.object(my.FairCrpssBoot)) rm(my.FairCrpssBoot) + if(is.object(my.FairRpssConf)) rm(my.FairRpssConf); if(is.object(my.FairCrpssConf)) rm(my.FairCrpssConf) + +} # next m (month) + + +######################################################################################### +# Calculate and save the FairRpss for a chosen period using ff and Load() # +######################################################################################### + + anom.rean.big<-array(NA, dim=c(length(my.startdates)*n.yrs.hind, n.leadtimes, n.lat, n.lon)) + anom.hind.big <- ff(NA, dim=c(n.members,length(my.startdates)*n.yrs.hind, n.leadtimes, n.lat, n.lon), vmode="double", filename="/scratch/Earth/anomhindbig") + + for(startdate in my.startdates){ + pos.startdate<-which(startdate==my.startdates) # count of the number of stardates already loaded+1 + my.time.interv<-(1+(pos.startdate-1)*n.yrs.hind):(pos.startdate*n.yrs.hind) # time interval where to load data + + #load(paste0(workdir,'/Data_',var.name,'/anom_rean_startdate',startdate,'.RData')) + #anom.rean<-drop(anom.rean) + #anom.rean.big[my.time.interv,,,] <- anom.rean + + my.date <- paste0(yr1.hind:yr2.hind,substr(sdates.seq[startdate],5,6),substr(sdates.seq[startdate],7,8)) + anom.rean <- Load(var=var.name, exp = 'EnsEcmwfWeekHind', + obs = NULL, sdates=my.date, nleadtime = n.leadtimes, leadtimemin = 1, leadtimemax = n.leadtimes, + output = 'lonlat', configfile = file_path, method='distance-weighted' ) + + #load(paste0(workdir,'/Data_',var.name,'/anom_hindcast_startdate',startdate,'.RData')) # load hindcast data + #anom.hindcast<-drop(anom.hindcast) + + anom.hind.big[,my.time.interv,,,] <- anom.hindcast + rm(anom.hindcast) + } + + my.FairRpss <- veriApplyBig("FairRpss", fcst=anom.hind.big, obs=anom.rean.big, prob=my.prob, tdim=2, ensdim=1, parallel=TRUE, ncpus=n.cpus) + + save(my.FairRpss, file=paste0(workdir,'/Data_',var.name,'/FairRpss_',startdate.name,'.RData')) + + +######################################################################################### +# Alternative way to speed up Load() # +######################################################################################### + +ERAint <- list(path = '/esnas/reconstructions/ecmwf/eraint/$STORE_FREQ$_mean/$VAR_NAME$_f6h/$VAR_NAME$_$YEAR$$MONTH$.nc') + var.rean=ERAint # daily reanalysis dataset used for the var data; it can have a different resolution of the MSLP dataset + var.name='sfcWind' #'tas' #'prlr' # any daily variable we want to find if it is WTs-driven + var <- Load(var = var.name, exp = NULL, obs = list(var.rean), paste0(y,'0101'), storefreq = 'daily', leadtimemax = 1, output = 'lonlat') + + # Forecast system list: + S4 <- list(path = ...) + CFSv2 <- list(path = ...) + + # Reanalysis list: + ERAint <- list(path = '/esnas/reconstructions/ecmwf/eraint/$STORE_FREQ$_mean/$VAR_NAME$_f6h/$VAR_NAME$_$YEAR$$MONTH$.nc') + JRA55 <- list(path = ...) + + + + + # Select one forecast system and one reanalysis (put NULL if you don't need to load any experiment/observation): + exp.source <- NULL + obs.source <- list(path = '/esnas/reconstructions/ecmwf/eraint/$STORE_FREQ$_mean/$VAR_NAME$_f6h/$VAR_NAME$_$YEAR$$MONTH$.nc') + + # Select one variable and its frequency: + var.name <- 'sfcWind' + store.freq <- 'daily' + + # Extract only the directory path of the forecast and reanalysis: + obs.source2 <- gsub("\\$STORE_FREQ\\$", store.freq, obs.source$path) + obs.source3 <- gsub("\\$VAR_NAME\\$", var.name, obs.source2) + obs.source4 <- strsplit(obs.source3,'/') + obs.source5 <- paste0(obs.source4[[1]][-length(obs.source4[[1]])], collapse="/") + + obs.source6 <- list.files(path = obs.source5) + + system(paste0("ncks -O -h -d longitude,5,6 ", '/esnas/reconstructions/ecmwf/eraint/',STORE_FREQ,'_mean/',VAR_NAME,'_f6h')) + system("ncks -O -h -d longitude,5,6 sfcWind_201405.nc ~/test.nc") + + y <- 1990 + var <- Load(var = VAR_NAME, exp = list(exp.source), obs = list(obs.source), sdates = paste0(y,'0101'), storefreq = store.freq, leadtimemax = 1, output = 'lonlat') + + +} # close if on mare + + diff --git a/old/SkillScores_v8.R b/old/SkillScores_v8.R new file mode 100644 index 0000000000000000000000000000000000000000..5fc4e5766ce4c6eea7654e0e635d71166a1d89b5 --- /dev/null +++ b/old/SkillScores_v8.R @@ -0,0 +1,1308 @@ +######################################################################################### +# Sub-seasonal Skill Scores # +######################################################################################### +# you can run it from the terminal with the syntax: +# +# Rscript SkillScores_v7.R +# +# i.e: to run only the chunk number 3, write: +# +# Rscript SkillScores_v4.R 3 +# +# if you don't specify any argument, or you run it from the R interface, it runs all the chunks in a sequential way. +# Use this last approach only if the computational speed is not a problem. + +########################################################################################## +# Load functions and libraries # +########################################################################################## + +#load libraries and functions: +library(SpecsVerification) # for skill scores +library(s2dverification) # for Load() function +library(easyVerification) # for veriApply() function +library(abind) # for merging arrays +source('/home/Earth/ncortesi/Downloads/scripts/Rfunctions.R') + +########################################################################################## +# MareNostrum settings # +########################################################################################## + +args <- commandArgs(TRUE) # put into variable args the list with all the optional arguments with whom the script was run from the terminal + +if(length(args) == 0) print("Running the script in a sequential way") +if(length(args) > 1) stop("Only one argument is required") + +mare <- ifelse(length(args) == 1 ,TRUE, FALSE) # set variable 'mare' to TRUE if we are running the script in MareNostrum, FALSE otherwise + +if(mare) chunk <- as.integer(args[1]) # number of the chunk to run in this script (if mare == TRUE) + +########################################################################################## +# User's settings # +########################################################################################## +#if(!mare) {source('/home/Earth/ncortesi/Downloads/scripts/Rfunctions.R')} else {source('/gpfs/projects/bsc32/bsc32842/scripts/Rfunctions.R')} + +# working dir where to put the output maps and files in the /Data and /Maps subdirs: +workdir <- ifelse(!mare, "/scratch/Earth/ncortesi/RESILIENCE", "/gpfs/projects/bsc32/bsc32842/results") + +rean.name <- "ERA-Interim" + +cfs.name <- 'ECMWF' # name of the climate forecast system (CFS) used for monthly predictions (to be used in map titles) +var.name <- 'sfcWind' # forecast variable. It is the name used inside the netCDF files and as suffix in map filenames, i.e: 'sfcWind' or 'psl' +var.name.map <- '10m Wind Speed' # forecast variable to verify (name used in the map titles), i.e: '10m Wind Speed' or 'SLP' + +forecast.year <- 2014 # starting year of the weekly sequence of the forecasts +mes <- 1 # starting forecast month (1: january, 2: february, etc.) +day <- 2 # starting forecast day + +# generic path of the forecast system files: +ECMWF_monthly <- list(path = paste0('/esnas/exp/ECMWF/monthly/ensforhc/weekly_mean/$VAR_NAME$_f6h/',forecast.year,'$MONTH$$DAY$00/$VAR_NAME$_$START_DATE$00.nc')) + +yr1.hind <- 1994 #1994 # first hindcast year +yr2.hind <- 2013 #2013 # last hindcast year (usually the forecast year -1) + +leadtime.week <- c('5-11','12-18','19-25','26-32') # which leadtimes are available in the weekly dataset +n.members <- 4 # number of hindcast members + +my.score.name <- c('FairRpss','FairCrpss') #c('EnsCorr','FairRpss','FairCrpss','RelDiagr') # choose one or more skills scores to compute between EnsCorr, FairRPSS, FairCRPSS and/or RelDiagr + +veri.month <- 1:12 # select the month(s) you want to compute the chosen skill scores + +my.pvalue <- 0.05 # in case of computing the EnsCorr, set the p_value level to show in the ensemble mean correlation maps +my.prob <- c(1/3,2/3) # quantiles used for FairRPSS and the RelDiagr (usually they are the terciles) +conf.level <- c(0.05, 0.95)# percentiles employed for the calculation of the confidence level for the Rpss and Crpss (can be more than 2 if needed) +int.lat <- NULL #1:160 # latitude position of grid points selected for the Reliability Diagram (if you want to subset a region; if not, put NULL) +int.lon <- NULL #1:320 # longitude position of grid points selected for the Reliability Diagram (if you want to subset a region; if not, put NULL) + +pref.rean <- FALSE # If TRUE, instead of measuring the skill scores, it computes the anomalies and climatologies both for reanalisys and forecast. + # To do it once the first time you evaluate a new forecast system or you use a new reanalysis, and then set it to FALSE to measure the skill +pref.forecasts <- FALSE + +boot <- TRUE # in case of computing the FairRpss and/or the FairCrpss, you can choose to do a bootstrap to evaluate the uncertainty of the skill scores +n.boot <- 20 # number of resamples considered in the bootstrapping + +# coordinates of a chosen point in the world to plot the time series over the 52 maps (they must be the same values in objects la y lo) +my.lat <- 55.3197120 +my.lon <- 0.5625 + +###################################### Derived variables ############################################################################# + +#source(paste0(workdir,'/ColorBarV.R')) # Color scale used for maps +n.categ <- 1+length(my.prob) # number of forecasting categories (usually 3 if terciles are used) + +# blue-yellow-red colors: +#col <- ifelse(!mare, as.character(read.csv("/scratch/Earth/ncortesi/RESILIENCE/rgbhex.csv",header=F)[,1]), as.character(read.csv("/gpfs/projects/bsc32//bsc32842/scripts/rgbhex.csv",header=F)[,1])) +#col<-as.character(read.csv("/home/Earth/vtorralb/rgbhex.csv",header=F)[,1]) # blue-yellow-red colors +col <- c("#0C046E", "#1914BE", "#2341F7", "#2F55FB", "#3E64FF", "#528CFF", "#64AAFF", "#82C8FF", "#A0DCFF", "#B4F0FF", "#FFFBAF", "#FFDD9A", "#FFBF87", "#FFA173", "#FF7055", "#FE6346", "#F7403B", "#E92D36", "#C80F1E", "#A50519") + +sdates.seq <- weekly.seq(forecast.year,mes,day) # load the sequence of dates corresponding to all the thursday of the year +n.leadtimes <- length(leadtime.week) +n.yrs.hind <- yr2.hind-yr1.hind+1 +my.month.short <- substr(my.month,1,3) + +## extract geographic coordinates; do only ONCE for each preducton system and then save the lat/lon in a coordinates.RData and comment these rows to use function load() below: +#data.hindcast <- Load(var='sfcWind', exp = 'EnsEcmwfWeekHind', +# obs = NULL, sdates=paste0(yr1.hind:yr2.hind,substr(sdates.seq[startdate],5,6),substr(sdates.seq[startdate],7,8)), +# nleadtime = 1, leadtimemin = 1, leadtimemax = 1, output = 'lonlat', configfile = file_path, method='distance-weighted' ) + +#lats<-data.hindcast$lat +#lons<-data.hindcast$lon +#save(lats,lons,file=paste0(workdir,'/coordinates.RData')) +#rm(data.hindcast);gc() + +# format geographic coordinates to use with PlotEquiMap: +#load(paste0(workdir,'/coordinates.RData')) +#n.lon <- length(lons) +#n.lat <- length(lats) +#n.lonr <- n.lon - ceiling(length(lons[lons<180 & lons > 0])) +#la<-rev(lats) +#lo<-c(lons[c((n.lonr+1):n.lon)]-360,lons[1:n.lonr]) + + # load only 1 year of var data from reanalysis to get lat and lon: +data.rean <- Load(var = var.name, exp = 'ERAintEnsWeek', + obs = NULL, sdates=paste0(yr1.hind,substr(sdates.seq[startdate],5,6),substr(sdates.seq[startdate],7,8)), + nleadtime = 1, leadtimemin = 1, leadtimemax = 1, output = 'lonlat', configfile = file_path) #, grid=my.grid, method='bilinear') + +lons <- data.rean$lon +lats <- data.rean$lat +n.lon <- length(lons) +n.lat <- length(lats) +n.lonr <- n.lon - ceiling(length(lons[lons<180 & lons > 0])) +if(mare) n.lat == 1 + +my.grid<-paste0('r',n.lon,'x',n.lat) + + +############################################################################################### +# Preformatting: calculate weekly climatologies and anomalies for each hindcast startdate # +############################################################################################### + +if(preformatting.rean == TRUE){ + + # Run it only once at the beginning to generate the .RData files: + # Note that forecast data is interpolated to the same resolution of ERA-Interim + + file_path='/home/Earth/ngonzal2/R/ConfFiles/IC3.conf' # Load() file path + #file_path <- '/home/Earth/ncortesi/Downloads/RESILIENCE/IC3.conf' + + my.startdates=1:52 # choose a sequence of startdates to save their anomalies + + + for(startdate in my.startdates){ # the startdate week to load inside the sdates weekly sequence: + print(paste0('Computing reanalysis anomalies for startdate ', which(startdate==my.startdates),'/',length(my.startdates))) + + # load weekly var rean data in: /esnas/reconstructions/ecmwf/eraint/weekly_mean + data.rean <- Load(var = var.name, exp = 'ERAintEnsWeek', + obs = NULL, sdates=paste0(yr1.hind:yr2.hind,substr(sdates.seq[startdate],5,6),substr(sdates.seq[startdate],7,8)), + nleadtime = n.leadtimes, leadtimemin = 1, leadtimemax = n.leadtimes, output = 'lonlat', configfile = file_path) #, grid=my.grid, method='bilinear') + + # dim(data.rean$mod) # [1,1,20,4,320,640] + + # Mean of the 20 years (for each point and leadtime) + clim.rean<-apply(data.rean$mod,c(1,2,4,5,6),mean)[1,1,,,] # average for each leadtime and pixel + clim.rean<-InsertDim(InsertDim(InsertDim(clim.rean,1,n.yrs.hind),1,1),1,1) # repeat the same values times to be able to calculate the anomalies #[1,1,20,4,320,640] + anom.rean <- data.rean$mod - clim.rean + anom.rean<-InsertDim(anom.rean[1,1,,,,],1,1) # [1,20,4,320,640] + + save(anom.rean,file=paste0(workdir,'/Data_',var.name,'_',rean.name,'/anom_rean_startdate',startdate,'.RData')) + save(clim.rean,file=paste0(workdir,'/Data_',var.name,'_',rean.name,'/clim_rean_startdate',startdate,'.RData')) + + rm(data.rean,clim.rean) + gc() + + } +} # close if on preformatting.rean == TRUE + +if(preformatting.forecasts == TRUE){ + + for(startdate in my.startdates){ # the startdate week to load inside the sdates weekly sequence: + print(paste0('Computing forecast anomalies for startdate ', which(startdate==my.startdates),'/',length(my.startdates))) + + # Load weekly var subseasonal data in: /esnas/exp/ECMWF/monthly/ensforhc/weekly_mean and interpolate them to the same ERA-Interim resolution (512x 256) + data.hindcast <- Load(var = var.name, exp = 'EnsEcmwfWeekHind', + obs = NULL, sdates = paste0(yr1.hind:yr2.hind, substr(sdates.seq[startdate],5,6), substr(sdates.seq[startdate],7,8)), + nleadtime = n.leadtimes, leadtimemin = 1, leadtimemax = n.leadtimes, output = 'lonlat', configfile = file_path, grid=my.grid, method='bilinear') + + #data.hindcast <- Load(var = var.name, exp = 'list(list(path=fields))', obs = NULL, sdates = paste0(yr1.hind:yr2.hind, substr(sdates.seq[startdate],5,6), substr(sdates.seq[startdate],7,8)), nleadtime = n.leadtimes, leadtimemin = 1, leadtimemax = n.leadtimes, storefreq='daily', output = 'lonlat', grid=my.grid, method='bilinear' ) + + #Load(var = psl, exp = list(list(path=fields)), obs = NULL, sdates=paste0(years, start.month.char,'01'), dimnames=list(member=member.name), nmember=n.members, leadtimemax=n.leadtimes, storefreq='daily', output = 'lonlat', latmin = lat.min, latmax = lat.max, lonmin = lon.min, lonmax = lon.max, grid=my.grid, method='bilinear')$mod + + # dim(data.hindcast$mod) # [1,4,20,4,320,640] + + # Mean of the 4 members of the Hindcasts and of the 20 years (for each point and leadtime); + clim.hindcast<-apply(data.hindcast$mod,c(1,4,5,6),mean)[1,,,] # average for each leadtime and pixel + clim.hindcast<-InsertDim(InsertDim(InsertDim(clim.hindcast,1,n.yrs.hind),1,n.members),1,1) # repeat the same values and times to calc. anomalies + + anom.hindcast <- data.hindcast$mod - clim.hindcast + + rm(data.hindcast,clim.hindcast) + gc() + + # average the anomalies over the 4 hindcast members to compute the ensemble mean [1,20,4,320,640] + anom.hindcast.mean<-apply(anom.hindcast,c(1,3,4,5,6),mean) + + # save(anom.hindcast.mean,anom.rean,file=paste0(workdir,'/Data/anom_for_ACC_startdate',startdate,'.RData')) + save(anom.hindcast,file=paste0(workdir,'/Data_',var.name,'_',cfs.name,'/anom_hindcast_startdate',startdate,'.RData')) + save(anom.hindcast.mean,file=paste0(workdir,'/Data_',var.name,'_',cfs.name,'/anom_hindcast_mean_startdate',startdate,'.RData')) + + rm(data.rean,anom.hindcast,anom.hindcast.mean,clim.rean,anom.rean) + gc() + + } + +} # close if on preformatting.forecasts == TRUE + +######################################################################################### +# Calculate and save up to all the chosen Skill Scores for a selected period # +######################################################################################### +# +# Remember that before computing the skill scores, you have to create and save the anomalies running once the preformatting part above +# + +for(month in veri.month){ + #month=1 # for the debug + + my.startdates <- which(as.integer(substr(sdates.seq,5,6)) == month) #startdates.monthly[[month]] #c(1:5) # select the startdates (weeks) you want to compute + startdate.name <- my.month[month] # name given to the period of the selected startdates # i.e:"January" + n.startdates <- length(my.startdates) # number of startdates in the selected month + n.yrs <- length(my.startdates)*n.yrs.hind # number of total years for the selected month + + print(paste0("Computing Skill Scores for ",startdate.name)) + + # Merge all selected startdates: + hind.dim <- c(n.members, n.yrs.hind*n.startdates, n.leadtimes, n.lat, n.lon) # dimensions of the hindcast array + + my.FairRpss <- my.FairCrpss <- my.EnsCorr <- my.PValue <- array(NA,c(n.leadtimes, n.lat,n.lon)) + if(boot) my.FairRpssBoot <- my.FairCrpssBoot <- array(NA, c(n.boot, n.leadtimes, n.lat, n.lon)) + if(boot) my.FairRpssConf <- my.FairCrpssConf <- array(NA, c(length(conf.level), n.leadtimes, n.lat, n.lon)) + + cat('Chunk n. ') + + # if we run the script from the terminal with an argument, it only computes the chunk we specify in the second argument and save the results for that chunk. + # If not, it loops over all chunks in a serial way and save the results at the end. + if(!mare) {my.chunks <- 1:n.lat} else {my.chunks <- chunk} + + for(cnk in my.chunks){ # EnsCorr, FairRpss and FairCrpss calculation: + #s=1 # for the debug + cat(paste0(cnk,'/', n.lat,' ')) + anom.hindcast.chunk <- anom.hindcast.chunk.sampled <- array(NA, c(n.members, n.startdates*n.yrs.hind, n.leadtimes, cnk, n.lon)) + anom.rean.chunk <- anom.hindcast.mean.chunk <- anom.rean.chunk.sampled <- array(NA, c(n.startdates*n.yrs.hind, n.leadtimes, cnk, n.lon)) + #my.interv<-(1 + sub.size*(s-1)):((sub.size*s) + add.last) # longitude interval where to load data + + # not working on MareNostrum still: + # load 1 file ONLY ONCE to retrieve the lat and lon values, then save them in an .RData file to retrieve them when needed: + #coord <- Load(var = var.name, exp = list(ECMWF_monthly), obs = NULL, sdates=paste0(2013,'0102'), leadtimemin = 1, leadtimemax=1, output = 'lonlat', nprocs=1) + # lat <- coord$lat + # lon <- coord$lon + # save() + # load() + #system.time(a<-Load(var = 'sfcWind', exp = list(ECMWF_monthly), obs = NULL, sdates=paste0(2010:2013,'0102'), leadtimemin = 1, leadtimemax=3, output = 'lonlat', nprocs=1, latmin = lat[5], latmax = lat[5])) + + for(startdate in my.startdates){ + pos.startdate<-which(startdate == my.startdates) # count of the number of stardates already loaded+1 + my.time.interv<-(1+(pos.startdate-1)*n.yrs.hind):(pos.startdate*n.yrs.hind) # time interval where to load data + + # Load reanalysis data: + load(paste0(workdir,'/Data_',var.name,'/anom_rean_startdate',startdate,'.RData')) + anom.rean <- drop(anom.rean) + anom.rean.chunk[my.time.interv,,,] <- anom.rean[,,c,] + + # Load hindcast data: + if(any(my.score.name=="FairRpss") || any(my.score.name=="FairCrpss" || any(my.score.name=="RelDiagr"))){ + load(paste0(workdir,'/Data_',var.name,'/anom_hindcast_startdate',startdate,'.RData')) + anom.hindcast <- drop(anom.hindcast) + anom.hindcast.chunk[,my.time.interv,,,] <- anom.hindcast[,,,c,] + rm(anom.hindcast, anom.rean) + gc() + } + + if(any(my.score.name=="EnsCorr")){ + load(paste0(workdir,'/Data_',var.name,'/anom_hindcast_mean_startdate',startdate,'.RData')) # load ensemble mean hindcast data + anom.hindcast.mean <- drop(anom.hindcast.mean) + anom.hindcast.mean.chunk[my.time.interv,,,] <- anom.hindcast.mean[,,c,] + rm(anom.hindcast.mean) + gc() + } + + } + + if(any(my.score.name=="FairRpss")){ + if(!boot){ + #anom.hindcast.chunk.double <- abind(list(anom.hindcast.chunk,anom.hindcast.chunk),along=4) + #anom.rean.chunk.double <- abind(list(anom.rean.chunk,anom.rean.chunk),along=3) + + + #my.FairRpss.sub <- veriApply("FairRpss", fcst=anom.hindcast.sub, obs=anom.rean.sub, prob=my.prob, tdim=2, ensdim=1, parallel=FALSE, ncpus=n.cpus) + my.FairRps.chunk <- veriApply("FairRps", fcst=anom.hindcast.chunk, obs=anom.rean.chunk, prob=my.prob, tdim=2, ensdim=1, parallel=FALSE, ncpus=n.cpus) + #my.FairRps.chunk.double <- veriApply("FairRps", fcst=anom.hindcast.chunk.double, obs=anom.rean.chunk.double, prob=my.prob, tdim=2, ensdim=1, parallel=FALSE, ncpus=n.cpus) + + # the prob.for the climatological ensemble can be set without using convert2prob and they are the same for all points and leadtimes (33% for each tercile): + ens <- array(33,c(n.startdates*n.yrs.hind,3)) + + # the observed probabilities vary from point to point and for each leadtime: + obs <- apply(anom.rean.chunk, c(2,3,4), function(x){convert2prob(x, prob <- my.prob)}) + + # reshape obs to be able to apply EnsRps(ens, obs) below: + obs2 <- InsertDim(obs,1,3) + obs2[2,1:(n.startdates*n.yrs.hind),,,] <- obs2[1,(n.startdates*n.yrs.hind + 1:(n.startdates*n.yrs.hind)),,,] + obs2[3,1:(n.startdates*n.yrs.hind),,,] <- obs2[1,(2*n.startdates*n.yrs.hind + 1:(n.startdates*n.yrs.hind)),,,] + obs3 <- obs2[,1:(n.startdates*n.yrs.hind),,,,drop=F] # drop=F is used not to loose the dimension(s) with only 1 element + + my.Rps.clim.chunk <- apply(obs3,c(3,4,5), function(x){ EnsRps(ens,t(x)) }) + + my.FairRps.chunk.sum <- apply(my.FairRps.chunk, c(2,3,4), sum) + my.Rps.clim.chunk.sum <- apply(my.Rps.clim.chunk, c(2,3,4), sum) + + my.FairRpss.chunk <- 1-(my.FairRps.chunk.sum/my.Rps.clim.chunk.sum) + + my.FairRpss[,c,] <- my.FairRpss.chunk + + rm(my.FairRpss.chunk, ens, obs, obs2, obs3, my.FairRps.chunk.sum, my.Rps.clim.chunk.sum) + gc() + + } else { # bootstrapping: + + for(b in 1:n.boot){ + cat(paste0("resampling n. ",b,"/",n.boot)) + yrs.sampled <- sample(1:n.yrs, replace=TRUE) + + for(y in 1:n.yrs) anom.hindcast.chunk.sampled[,y,,,] <- anom.hindcast.chunk[,yrs.sampled[y],,,] + for(y in 1:n.yrs) anom.rean.chunk.sampled[y,,,] <- anom.rean.chunk[yrs.sampled[y],,,] + + my.FairRps.chunk <- veriApply("FairRps", fcst=anom.hindcast.chunk.sampled, obs=anom.rean.chunk.sampled, prob=my.prob, tdim=2, ensdim=1, parallel=FALSE, ncpus=n.cpus) + ens <- array(33,c(n.startdates*n.yrs.hind,3)) + obs <- apply(anom.rean.chunk.sampled, c(2,3,4), function(x){convert2prob(x, prob <- my.prob)}) + obs2 <- InsertDim(obs,1,3) + obs2[2,1:(n.startdates*n.yrs.hind),,,] <- obs2[1,(n.startdates*n.yrs.hind + 1:(n.startdates*n.yrs.hind)),,,] + obs2[3,1:(n.startdates*n.yrs.hind),,,] <- obs2[1,(2*n.startdates*n.yrs.hind + 1:(n.startdates*n.yrs.hind)),,,] + obs3 <- obs2[,1:(n.startdates*n.yrs.hind),,,,drop=F] + my.Rps.clim.chunk <- apply(obs3,c(3,4,5), function(x){ EnsRps(ens,t(x)) }) + my.FairRps.chunk.sum <- apply(my.FairRps.chunk,c(2,3,4), sum) + my.Rps.clim.chunk.sum <- apply(my.Rps.clim.chunk,c(2,3,4), sum) + my.FairRpss.chunk <- 1-(my.FairRps.chunk.sum/my.Rps.clim.chunk.sum) + + my.FairRpssBoot[b,,,chunk$int[[c]]] <- my.FairRpss.chunk + rm(my.FairRpss.chunk, ens, obs, obs2, obs3, my.FairRps.chunk.sum, my.Rps.clim.chunk.sum) + gc() + } + + cat('\n') + + # calculate the percentiles of the skill score: + my.FairRpssConf[,,,chunk$int[[c]]] <- apply(my.FairRpssBoot[,,,chunk$int[[c]]], c(2,3,4), function(x) quantile(x, probs=conf.level, type=8)) + } + } + + if(any(my.score.name=="FairCrpss")){ + if(!boot){ + my.FairCrps.chunk <- veriApply("FairCrps", fcst=anom.hindcast.chunk, obs=anom.rean.chunk, tdim=2, ensdim=1, parallel=FALSE, ncpus=n.cpus) + anom.all.chunk <- abind(anom.hindcast.chunk,anom.rean.chunk, along=1) # merge exp and obs together to use apply: + my.Crps.clim.chunk <- apply(anom.all.chunk, c(3,4,5), function(x){ EnsCrps(t(x[1:n.members,]), x[n.members+1,])}) + + my.FairCrps.chunk.sum <- apply(my.FairCrps.chunk,c(2,3,4), sum) + my.Crps.clim.chunk.sum <- apply(my.Crps.clim.chunk,c(2,3,4), sum) + my.FairCrpss.chunk <- 1-(my.FairCrps.chunk.sum/my.Crps.clim.chunk.sum) + + my.FairCrpss[,,chunk$int[[c]]]<-my.FairCrpss.chunk + rm(my.FairCrpss.chunk, my.FairCrps.chunk.sum, my.Crps.clim.chunk.sum) + gc() + + } else { # bootstrapping: + + for(b in 1:n.boot){ + print(paste0("resampling n. ",b,"/",n.boot)) + yrs.sampled <- sample(1:n.yrs, replace=TRUE) + + for(y in 1:n.yrs) anom.hindcast.chunk.sampled[,y,,,] <- anom.hindcast.chunk[,yrs.sampled[y],,,] + for(y in 1:n.yrs) anom.rean.chunk.sampled[y,,,] <- anom.rean.chunk[yrs.sampled[y],,,] + + my.FairCrps.chunk <- veriApply("FairCrps", fcst=anom.hindcast.chunk.sampled, obs=anom.rean.chunk.sampled, tdim=2, ensdim=1, parallel=TRUE, ncpus=n.cpus) + anom.all.chunk <- abind(anom.hindcast.chunk.sampled, anom.rean.chunk.sampled, along=1) # merge exp and obs together to use apply: + my.Crps.clim.chunk <- apply(anom.all.chunk, c(3,4,5), function(x){ EnsCrps(t(x[1:n.members,]), x[n.members+1,])}) + my.FairCrps.chunk.sum <- apply(my.FairCrps.chunk,c(2,3,4), sum) + my.Crps.clim.chunk.sum <- apply(my.Crps.clim.chunk,c(2,3,4), sum) + my.FairCrpss.chunk <- 1-(my.FairCrps.chunk.sum/my.Crps.clim.chunk.sum) + + my.FairCrpssBoot[b,,,chunk$int[[c]]] <- my.FairCrpss.chunk + rm(my.FairCrpss.chunk, my.FairCrps.chunk.sum, my.Crps.clim.chunk.sum) + gc() + + } + + cat('\n') + + # calculate the percentiles of the skill score: + my.FairCrpssConf[,,,chunk$int[[c]]] <- apply(my.FairCrpssBoot[,,,chunk$int[[c]]], c(2,3,4), function(x) quantile(x, probs=conf.level, type=8)) + } + + } + + if(any(my.score.name=="EnsCorr")){ + # ensemble mean correlation for the selected startdates (first dim, 'week') and all leadtimes (second dim): + my.EnsCorr.chunk <- my.PValue.chunk <- array(NA, c(n.leadtimes, n.lat, chunk$n.int[[c]])) + + for(i in 1:n.leadtimes){ + for(j in 1:n.lat){ + for(k in 1:chunk$n.int[[c]]){ + my.EnsCorr.chunk[i,j,k] <- cor(anom.hindcast.mean.chunk[,i,j,k],anom.rean.chunk[,i,j,k], use="complete.obs") + # option 'na.method' is chosen from the following list: c("all.obs", "complete.obs", "pairwise.complete.obs", "everything", "na.or.complete") + + my.PValue.chunk[i,j,k] <- cor.test(anom.hindcast.mean.chunk[,i,j,k],anom.rean.chunk[,i,j,k], use="complete.obs")$p.value + + } + } + } + + my.EnsCorr[,,chunk$int[[c]]] <- my.EnsCorr.chunk + my.PValue[,,chunk$int[[c]]] <- my.PValue.chunk + + rm(anom.hindcast.chunk,anom.hindcast.mean.chunk,my.EnsCorr.chunk,my.PValue.chunk) + } + + rm(anom.rean.chunk) + gc() + #mem() + + if(mare == TRUE) { + if(any(my.score.name=="FairRpss")) save(my.FairRpss, file=paste0(workdir,'/Data_',var.name,'/FairRpss_',startdate.name,'_chunk_',c,'.RData')) + if(any(my.score.name=="FairCrpss")) save(my.FairCrpss, file=paste0(workdir,'/Data_',var.name,'/FairCrpss_',startdate.name,'_chunk_',c'.RData')) + if(any(my.score.name=="EnsCorr")) save(my.EnsCorr, file=paste0(workdir,'/Data_',var.name,'/EnsCorr_',startdate.name,'_chunk_',c'.RData')) + } + + + } # next c (chunk) + + # the Reliability diagram cannot be computed splitting the data in chunk, so it is computed only if the script is run from the R interface or with no arguments: + if(mare == FALSE) { + if(any(my.score.name=="RelDiagr")){ # in this case the computation cannot be split in groups of grid points, but can be split for leadtime instead: + my.RelDiagr<-list() + + for(lead in 1:n.leadtimes){ + cat(paste0('leadtime n.: ',lead,'/',n.leadtimes)) + anom.rean.chunk <- array(NA,c(n.startdates*n.yrs.hind,n.lat,n.lon)) + anom.hindcast.chunk <- array(NA,c(n.members,n.startdates*n.yrs.hind,n.lat,n.lon)) + cat(' startdate: ') + i=0 + + for(startdate in my.startdates){ + i=i+1 + cat(paste0(i,'/',length(my.startdates),' ')) + + pos.startdate<-which(startdate==my.startdates) # count of the number of stardates already loaded+1 + my.time.interv<-(1+(pos.startdate-1)*n.yrs.hind):(pos.startdate*n.yrs.hind) # time interval where to load data + + # Load reanalysis data of next startdate: + load(paste0(workdir,'/Data_',var.name,'/anom_rean_startdate',startdate,'.RData')) + anom.rean <- drop(anom.rean) + anom.rean.chunk[my.time.interv,,] <- anom.rean[,lead,,] + + # Lead hindcast data of next startdate: + load(paste0(workdir,'/Data_',var.name,'/anom_hindcast_startdate',startdate,'.RData')) # load hindcast data + anom.hindcast <- drop(anom.hindcast) + anom.hindcast.chunk[,my.time.interv,,] <- anom.hindcast[,,lead,,] + + rm(anom.rean,anom.hindcast) + } + + + anom.hindcast.chunk.perm<-aperm(anom.hindcast.chunk,c(2,1,3,4)) # swap the first two dimensions to put the years as first dimension (needed for convert2prob) + gc() + + cat('Computing the Reliability Diagram. Please wait... ') + ens.chunk<-apply(anom.hindcast.chunk.perm, c(3,4), convert2prob, prob<-my.prob) # its dimens. are: [n.startdates*n.yrs.hind*n.forecast.categories, n.lat, n.lon] + obs.chunk<-apply(anom.rean.chunk,c(2,3), convert2prob, prob<-my.prob) # the first n.members*n.yrs.hind elements belong to the first tercile, and so on. + ens.chunk.prob<-apply(ens.chunk, c(2,3), function(x) count2prob(matrix(x, n.startdates*n.yrs.hind, n.categ), type=4)) # type=4 is the only method with sum(prob)=1 !! + obs.chunk.prob<-apply(obs.chunk, c(2,3), function(x) count2prob(matrix(x, n.startdates*n.yrs.hind, n.categ), type=4)) #no parallelization here: it only takes 10s + + if(is.null(int.lat)) int.lat <- 1:n.lat # if there is no region selected, select all the world + if(is.null(int.lon)) int.lon <- 1:n.lon + + int1 <- 1:(n.startdates*n.yrs.hind) # time interval of the data of the first tercile + my.RelDiagr[[lead]]<-ReliabilityDiagram(ens.chunk.prob[int1,int.lat,int.lon], obs.chunk.prob[int1,int.lat,int.lon], nboot=0, plot=FALSE, plot.refin=F) #tercile 1 + + int2 <- (1+(n.startdates*n.yrs.hind)):(2*n.startdates*n.yrs.hind) # time interval of the data of the second tercile + my.RelDiagr[[n.leadtimes+lead]]<-ReliabilityDiagram(ens.chunk.prob[int2,int.lat,int.lon], obs.chunk.prob[int2,int.lat,int.lon], nboot=0, plot=FALSE, plot.refin=F) + + int3 <- (1+(2*n.startdates*n.yrs.hind)):(3*n.startdates*n.yrs.hind) # time interval of the data of the third tercile + my.RelDiagr[[2*n.leadtimes+lead]]<-ReliabilityDiagram(ens.chunk.prob[int3,int.lat,int.lon], obs.chunk.prob[int3,int.lat,int.lon], nboot=0, plot=FALSE, plot.refin=F) + + cat('done\n') + #print(my.RelDiagr) # for debugging + + rm(anom.rean.chunk,anom.hindcast.chunk,ens.chunk,obs.chunk,ens.chunk.prob,obs.chunk.prob) + gc() + + } # next lead + + } # close if on RelDiagr + + # we can save the results for all chunks only if mare == FALSE (if it is TRUE, we have the results for 1 chunk only, that have already been saved + if(!boot){ + if(any(my.score.name=="FairRpss")) save(my.FairRpss, file=paste0(workdir,'/Data_',var.name,'/FairRpss_',startdate.name,'.RData')) + if(any(my.score.name=="FairCrpss")) save(my.FairCrpss, file=paste0(workdir,'/Data_',var.name,'/FairCrpss_',startdate.name,'.RData')) + if(any(my.score.name=="EnsCorr")) save(my.EnsCorr, file=paste0(workdir,'/Data_',var.name,'/EnsCorr_',startdate.name,'.RData')) + if(any(my.score.name=="RelDiagr")) save(my.RelDiagr, my.PValue, file=paste0(workdir,'/Data_',var.name,'/RelDiagr_',startdate.name,'.RData')) + } else { # bootstrapping output: + if(any(my.score.name=="FairRpss")) save(my.FairRpssBoot, my.FairRpssConf, file=paste0(workdir,'/Data_',var.name,'/FairRpssBoot_',startdate.name,'.RData')) + if(any(my.score.name=="FairCrpss")) save(my.FairCrpssBoot, my.FairCrpssConf, file=paste0(workdir,'/Data_',var.name,'/FairCrpssBoot_',startdate.name,'.RData')) + } + + # If it has finished computing the last chunk, it can load all results in one file, deleting the intermediate output files: + if(job.chunk == tot.chunks){ + + } + + + } # close if on mare + +} # next m (month) + +if + + + + + + + + +# all pre-formatting (conversion to anomalies) and post-formatting (visualization) tasks are done below: +if(mare == FALSE){ + +########################################################################################### +# Visualize and save the score maps for one score and period # +########################################################################################### + +# run it only after computing and saving all the skill score data: + +my.months=1 #9:12 # choose the month(s) to plot and save + +for(month in my.months){ + #month=2 # for the debug + startdate.name.map<-my.month[month] # i.e:"January" + + for(my.score.name.map in my.score.name){ + #my.score.name.map='EnsCorr' # for the debug + + if(my.score.name.map=="EnsCorr") { load(file=paste0(workdir,'/Data_',var.name,'/EnsCorr_',startdate.name.map,'.RData')); my.score<-my.EnsCorr } + if(my.score.name.map=='FairRpss') { load(file=paste0(workdir,'/Data_',var.name,'/FairRpss_',startdate.name.map,'.RData')); my.score<-my.FairRpss } + if(my.score.name.map=='FairCrpss') { load(file=paste0(workdir,'/Data_',var.name,'/FairCrpss_',startdate.name.map,'.RData')); my.score<-my.FairCrpss } + if(my.score.name.map=="RelDiagr") { load(file=paste0(workdir,'/Data_',var.name,'/RelDiagr_',startdate.name.map,'.RData')); my.score<-my.RelDiagr } + + # old green-red palette: + # brk.rpss<-c(-1,seq(0,1,by=0.05)) + # col.rpss<-c("darkred",colorRampPalette(c("red","white","darkgreen"))(length(brk.rpss)-2)) + # brk.rps<-c(seq(0,1,by=0.1),10) + # col.rps<-c(colorRampPalette(c("darkgreen","white","red"))(length(brk.rps)-2),"darkred") + # if(my.score.name.map==my.Rps | my.score==my.Rps.clim) {my.brk<-brk.rps} else {my.brk<-brk.rpss} + # if(my.score.name.map==my.Rps | my.score==my.Rps.clim) {my.col<-col.rps} else {my.col<-col.rpss} + + brk.rpss<-c(seq(-1,1,by=0.1)) + col.rpss<-colorRampPalette(col)(length(brk.rpss)-1) + + my.brk<-brk.rpss # at present all breaks and colors are the same, so there is no need to differenciate between indexes + my.col<-col.rpss + + for(lead in 1:n.leadtimes){ + + #my.title<-paste0(my.score.name.map,' of ',cfs.name,' + #10m Wind Speed for ',startdate.name.map,'. Forecast time ',leadtime.week[lead],'.') + + if(my.score.name.map!="RelDiagr"){ + + #postscript(file=paste0(workdir,'/Maps_',var.name,'/',my.score.name.map,'_',startdate.name.map,'_Forecast_time_',leadtime.week[lead],'_',var.name,'.ps'), + # paper="special",width=12,height=7,horizontal=F) + + jpeg(file=paste0(workdir,'/Maps_',var.name,'/',my.score.name.map,'_',startdate.name.map,'_Forecast_time_',leadtime.week[lead],'_',var.name,'.jpg'),width=1000,height=600) + + layout(matrix(c(1,2), 1, 2, byrow = TRUE),widths=c(11,1.2)) + par(oma=c(1,1,4,1)) + + # lat values must be in decreasing order from +90 to -90 degrees and long from 0 to 360 degrees: + PlotEquiMap(my.score[lead,,][n.lat:1,c((n.lonr+1):n.lon,1:n.lonr)], lons,lats ,sizetit=0.8, brks=my.brk, cols=my.col,axelab=F, filled.continents=FALSE, drawleg=F) + my.title<-paste0(my.score.name.map,' of ',cfs.name,'\n ', var.name.map,' for ',startdate.name.map,'. Forecast time ',leadtime.week[lead],'.') + title(my.title,line=0.5,outer=T) + + ColorBar(my.brk, cols=my.col, vert=T, cex=1.4) + + if(my.score.name.map=="EnsCorr"){ #add a small grey point if the corr.is significant: + + # arrange lat/ lon in my.PValue (a second variable in the EnsCorr file) to start from +90 degr. (lat) and from 0 degr (lon): + my.PValue.rev <- my.PValue + my.PValue.rev[lead,,] <- my.PValue[i,n.lat:1,c((n.lonr+1):n.lon,1:n.lonr)] + for (x in 1:n.lon) { + for (y in 1:n.lat) { + if (my.PValue.rev[lead,y, x] == TRUE) { + text(x = lo[x], y = la[y], ".", cex = .2) + } + } + } + } + dev.off() + + } # close if on !RelDiagr + + if(my.score.name.map=="RelDiagr") { + + jpeg(file=paste0(workdir,'/Maps_',var.name,'/RelDiagr_',startdate.name.map,'_Forecast_time_',leadtime.week[lead],'_',var.name,'.jpg'),width=600,height=600) + + my.title<-paste0('Reliability Diagram of ',cfs.name,'\n ',var.name.map,' for ',startdate.name.map,'. Forecast time ',leadtime.week[lead],'.') + + plot(c(0,1),c(0,1),type="l",xlab="Forecast Frequency",ylab="Observed Frequency", col='gray30', main=my.title) + lines(my.score[[lead]]$p.avgs[!is.na(my.score[[lead]]$p.avgs)],my.score[[lead]]$cond.prob[!is.na(my.score[[lead]]$cond.prob)],type="o", pch=16, col="blue") + #lines(my.score[[n.leadtimes+lead]]$p.avgs[!is.na(my.score[[n.leadtimes+lead]]$p.avgs)],my.score[[n.leadtimes+lead]]$cond.prob[!is.na(my.score[[n.leadtimes+lead]]$cond.prob)],type="o",pch=16,col="darkgreen") + lines(my.score[[2*n.leadtimes+lead]]$p.avgs[!is.na(my.score[[2*n.leadtimes+lead]]$p.avgs)], my.score[[2*n.leadtimes+lead]]$cond.prob[!is.na(my.score[[2*n.leadtimes+lead]]$cond.prob)],type="o", pch=16, col="red") + legend("bottomright", legend=c('Lower Tercile','Upper Tercile'), lty=c(1,1), lwd=c(2.5,2.5), col=c('blue','red')) + + dev.off() + } + + } # next lead + + } # next score + +} # next month + + +############################################################################################### +# Composite map of the four skill scores # +############################################################################################### + +# run it only when you have all the 4 skill scores for each month: + +my.months=9:12 # choose the month(s) to plot and save + +for(month in my.months){ + + startdate.name.map<-my.month[month] # i.e:"January" + + #postscript(file=paste0(workdir,'/Maps_',var.name,'/SkillScores_',startdate.name.map,'_',var.name,'.ps'), paper="special",width=12,height=7,horizontal=F) + + jpeg(file=paste0(workdir,'/Maps_',var.name,'/SkillScores_',startdate.name.map,'_',var.name,'.jpg'), width=4000, height=2400, quality=100) + + layout(matrix(1:16, 4, 4, byrow=FALSE), widths=c(1.8,1.8,1.8,1.2), heights=c(1,1,1,1)) + #layout.show(16) + + for(score in 1:4){ + for(lead in 1:n.leadtimes){ + + img<-readJPEG(paste0(workdir,'/Maps_',var.name,'/',my.score.name[score],'_',startdate.name.map,'_Forecast_time_',leadtime.week[lead],'_',var.name,'.jpg')) + par(mar = rep(0, 4), xaxs='i', yaxs='i' ) + plot.new() + rasterImage(img, 0, 0, 1, 1, interpolate=FALSE) + + } # next lead + + } # next score + + dev.off() + +} # next month + + + +# Dani Map script for the small figure: +#/home/Earth/vtorralb/R/scripts/Veronica_2014/TimeSeries/PlotAno_mod_obs.R + + + +############################################################################################### +# Preformatting: calculate weekly climatologies and anomalies for each forecast startdate # +############################################################################################### +# +# finish!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! +# +# the only difference is that anomalies are computed using climatologies from the hincast data, +# and not from the forecast data. + +# Run it only once at the beginning: + +file_path='/home/Earth/ngonzal2/R/ConfFiles/IC3.conf' # Load() file path +#file_path <- '/home/Earth/ncortesi/Downloads/RESILIENCE/IC3.conf' + +my.startdates=1:52 # choose a sequence of forecast startdates to save their anomalies + +for(startdate in my.startdates){ # the startdate week to load inside the sdates weekly sequence: + print(paste0('Computing anomalies for startdate ', which(startdate==my.startdates),'/',length(my.startdates))) + + data.hindcast <- Load(var=var.name, exp = 'EnsEcmwfWeekHind', + obs = NULL, sdates=paste0(yr1.hind:yr2.hind,substr(sdates.seq[startdate],5,6),substr(sdates.seq[startdate],7,8)), + nleadtime = n.leadtimes, leadtimemin = 1, leadtimemax = n.leadtimes, output = 'lonlat', configfile = file_path, method='distance-weighted' ) + + # dim(data.hindcast$mod) # [1,4,20,4,320,640] + + # Mean of the 4 members of the Hindcasts and of the 20 years (for each point and leadtime); + clim.hindcast<-apply(data.hindcast$mod,c(1,4,5,6),mean)[1,,,] # average for each leadtime and pixel + clim.hindcast<-InsertDim(InsertDim(InsertDim(clim.hindcast,1,n.yrs.hind),1,n.members),1,1) # repeat the same values and times to calc. anomalies + + anom.hindcast <- data.hindcast$mod - clim.hindcast + + rm(data.hindcast,clim.hindcast) + gc() + + # average the anomalies over the 4 hindcast members [1,20,4,320,640] + anom.hindcast.mean<-apply(anom.hindcast,c(1,3,4,5,6),mean) + + my.grid<-paste0('r',n.lon,'x',n.lat) + data.rean <- Load(var=var.name, exp = 'ERAintEnsWeek', + obs = NULL, sdates=paste0(yr1.hind:yr2.hind,substr(sdates.seq[startdate],5,6),substr(sdates.seq[startdate],7,8)), + nleadtime = n.leadtimes, leadtimemin = 1, leadtimemax = n.leadtimes, output = 'lonlat', configfile = file_path, grid=my.grid, method='distance-weighted') + # dim(data.rean$mod) # [1,1,20,4,320,640] + + # Mean of the 20 years (for each point and leadtime) + clim.rean<-apply(data.rean$mod,c(1,2,4,5,6),mean)[1,1,,,] # average for each leadtime and pixel + clim.rean<-InsertDim(InsertDim(InsertDim(clim.rean,1,n.yrs.hind),1,1),1,1) # repeat the same values times to be able to calculate the anomalies #[1,1,20,4,320,640] + + anom.rean <- data.rean$mod - clim.rean + + anom.rean<-InsertDim(anom.rean[1,1,,,,],1,1) # [1,20,4,320,640] + + #save(anom.hindcast.mean,anom.rean,file=paste0(workdir,'/Data/anom_for_ACC_startdate',startdate,'.RData')) + + save(anom.hindcast,file=paste0(workdir,'/Data/anom_hindcast_startdate',startdate,'.RData')) + save(anom.hindcast.mean,file=paste0(workdir,'/Data/anom_hindcast_mean_startdate',startdate,'.RData')) + save(anom.rean,file=paste0(workdir,'/Data/anom_rean_startdate',startdate,'.RData')) + save(clim.rean,file=paste0(workdir,'/Data/clim_rean_startdate',startdate,'.RData')) + + rm(data.rean,anom.hindcast,anom.hindcast.mean,clim.rean,anom.rean) + gc() + +} + + + + + + + +################################################################################## +# Toy Model # +################################################################################## + +library(s2dverification) +library(SpecsVerification) +library(easyVerification) + +my.prob=c(1/3,2/3) # quantiles used for FairRPSS and the RelDiagr (usually they are the terciles) + +N=80 # number of years +M=14 # number of members + +for(M in c(2:10,20,51,100,200)){ + +for(N in c(5,10,15,20,25,30,40,50,100,200,500,1000,2000,5000,10000,20000,50000,100000)){ + +anom.rean<-rnorm(N) +anom.hind<-array(rnorm(N*M),c(N,M)) +#quantile(anom.hind,prob=my.prob,type=8) + +obs<-convert2prob(anom.rean,prob<-my.prob) +#mean(obs[,1]) # check if it is equal to 0.333 for K=3 + +ens<-convert2prob(anom.hind,prob<-my.prob) + +# climatological forecast: +anom.hind.clim<-array(sample(anom.hind,N*M),c(N,M)) # extract a random sample with no replacement of the hindcast +ens.clim<-convert2prob(anom.hind.clim,prob<-my.prob) +#mean(ens.clim[,1]) # check if it is equal to M/3 (1.333 for M=4) + +# alternative definition of climatological forecast, based on observations instead (as applied by veriApply via convert2prob when option fcst.ref is missing): +#anom.rean.clim<-ClimEns(anom.rean) # create a climatological ensemble from a vector of observations (anom.rean) with the leave-one-out cross validation +anom.rean.clim<-t(array(anom.rean,c(N,N))) # function used by convert2prob when ref is not specified; it is ~ equal to the above function ClimEns, it only has a column more +ens.clim.rean<-convert2prob(anom.rean.clim,prob<-c(1/3,2/3)) # create a new prob.table based on the climatological ensemble of observations + +p<-count2prob(ens) # we should add the option type=4, in not the sum for each year is not 100%!!! +y<-count2prob(obs) # we should add the option type=4, in not the sum for each year is not 100%!!! +ReliabilityDiagram(p[,1], y[,1], bins=10, nboot=0, plot=TRUE, cons.prob=c(0.05, 0.85),plot.refin=F) + +### computation of verification scores: + +Rps<-round(mean(EnsRps(ens,obs)),4) +FairRps<-round(mean(FairRps(ens,obs)),4) +#Rps.easy<-round(mean(veriApply("EnsRps", fcst=anom.hind, obs=anom.rean, prob=my.prob, tdim=1, ensdim=2)),4) # check once to see if it is the same value of Rps +#FairRps.easy<-round(mean(veriApply("FairRps", fcst=anom.hind, obs=anom.rean, prob=my.prob, tdim=1, ensdim=2)),4) # check once to see if it is the same value of FairRps + +Rps.clim<-round(mean(EnsRps(ens.clim,obs)),4) +FairRps.clim<-round(mean(FairRps(ens.clim,obs)),4) +FairRps.clim.rean<-round(mean(FairRps(ens.clim.rean,obs)),4) + +#FairRps.clim.stable<-0.44444444 +#Rps.clim.easy<-round(mean(veriApply("EnsRps", fcst=anom.hind.clim, obs=anom.rean, prob=my.prob, tdim=1, ensdim=2)),4) # check once to see if it is the same value of Rps.clim +#FairRps.clim.easy<-round(mean(veriApply("FairRps", fcst=anom.hind.clim, obs=anom.rean, prob=my.prob, tdim=1, ensdim=2)),4) # check once to see if it is = to FairRps.clim + +Rpss<-round(1-(Rps/Rps.clim),4) +#Rpss.specs<-round(EnsRpss(ens=ens, ens.ref=ens.clim, obs=obs)$rpss,4) # check it once to see if it is the same as Rpss (yes) +Rpss.easy<-veriApply("EnsRpss", fcst=anom.hind, fcst.ref=anom.hind.clim, obs=anom.rean, prob=my.prob, tdim=1, ensdim=2)$rpss # we provide the clim.forecast; = to Rpss.specs +system.time(Rpss.easy.noclim<-round(veriApply("EnsRpss", fcst=anom.hind, obs=anom.rean, prob=my.prob, tdim=1, ensdim=2, parallel=FALSE, ncpus=5)$rpss,4)) # using their climatological forecast +#cat(Rps.clim.noclim<-Rps/(1-Rpss.easy.noclim)) # sale igual a ~0.45 per ogni hindcast; é diverso sia da Rps.clim che da Rps.clim per N-> +oo (0.555) + +Rpss.easy<-veriApply("EnsRpss", fcst=anom.hind, fcst.ref=anom.rean.clim, obs=anom.rean, prob=my.prob, tdim=1, ensdim=2)$rpss # we provide the clim.forecast; = to Rpss.specs +Rpss.easy.noclim<-veriApply("EnsRpss", fcst=anom.hind, obs=anom.rean, prob=my.prob, tdim=1, ensdim=2)$rpss # we provide the clim.forecast; = to Rpss.specs + +FairRpss<-round(1-(FairRps/FairRps.clim),4) +#FairRpss.specs<-round(FairRpss(ens=ens,ens.ref=ens.clim,obs=obs)$rpss,4) # check it once to see if it is the same as FairRpss (yes) +#FairRpss.easy<-veriApply("FairRpss", fcst=anom.hind, fcst.ref=anom.hind.clim, obs=anom.rean, prob=c(1/3,2/3), tdim=1, ensdim=2)$rps +#FairRpss.easy.noclim<-round(veriApply("FairRpss", fcst=anom.hind, obs=anom.rean, prob=c(1/3,2/3), tdim=1, ensdim=2)$rpss,4) # using their climatological forecast +#cat(FairRps.clim.noclim<-Rps/(1-FairRpss.easy.noclim)) # sale igual a ~0.55; é diverso sia da FairRps.clim che da FairRps.clim per N-> +oo (0.444) + +cat(paste0('n.memb: ' ,M,' n.yrs: ',N,' : ',FairRps,' : ' ,FairRps.clim,' FairRpss: ',FairRpss,'\n')) + +#cat(paste0('n.memb: ' ,M,' n.yrs: ',N,' : ',FairRps,' : ' ,FairRps.clim,' FairRpss: ',FairRpss,'\n')) + +#cat(paste0('n.memb: ' ,M,' n.yrs: ',N,' : ',Rps,' : ' ,Rps.clim,' Rpss: ',Rpss,'\n','n.memb: ' ,M,' n.yrs: ',N,' : ',FairRps,' : ' ,FairRps.clim,' FairRpss: ',FairRpss,'\n')) + +#cat(paste0('n.memb: ' ,M,' n.yrs: ',N,' : ',Rps,' : ' ,Rps.clim,' Rpss: ',Rpss,' Rpss.easy: ',Rpss.easy.noclim,'\n','n.memb: ' ,M,' n.yrs: ',N,' #: ',FairRps,' : ' ,FairRps.clim,' FairRpss: ',FairRpss,' FairRpss.easy: ',FairRpss.easy.noclim)) + +} + +# Crpss: + +#Crps<-round(mean(EnsCrps(ens,obs)),4) +#FairCrps<-round(mean(FairCrps(ens,obs)),4) + +#cat(Crps.clim<-round(mean(EnsCrps(ens.clim,obs)),4)) +#FairCrps.clim<-round(mean(FairCrps(ens.clim,obs)),4) + +#Crpss<-round(1-(Crps/Crps.clim),4) +#Crpss.specs<-round(EnsCrpss(ens=ens, ens.ref=ens.clim, obs=obs)$rpss,4) + +#FairCrpss<-round(1-(FairCrps/FairCrps.clim),4) +#FairCrpss.specs<-round(FairCrpss(ens=ens,ens.ref=ens.clim,obs=obs)$rpss,4) + +} + + +################################################################################## +# Other analysis # +################################################################################## + +my.startdates=1 #c(1:9) # select the startdates (weeks) you want to merge together + +#tm<-toyarray(dims=c(32,64),N=20,nens=4) # in the number of startdates in case of forecasts or the number of years in case of hindcasts +#str(tm) # tm$fcst: [32,64,20,4] tm$obs: [32,64,20] +# +#f.corr <- veriApply("EnsCorr", fcst=tm$fcst, obs=tm$obs) +#f.me <- veriApply('EnsMe', tm$fcst, tm$obs) +#f.crps <- veriApply("FairCrps", fcst=tm$fcst, obs=tm$obs) +#str(f.crps) # [32,64,20] + +#cst <- array(rnorm(prod(4:8)), 4:8) +#obs <- array(rnorm(prod(4:7)), 4:7) +#f.me <- veriApply('EnsMe', fcst=fcst, obs=obs) +#dim(f.me) +#fcst2 <- array(aperm(fcst, c(5,1,4,2,3)), c(8, 4, 7, 5*6)) # very interesting use of aperm not only to swap dimensions, but also to merge two dimensions in the same one! +#obs2 <- array(aperm(obs, c(1,4,2,3)), c(4,7,5*6)) +#f.me2 <- veriApply('EnsMe', fcst=fcst2, obs=obs2, tdim=3, ensdim=1) +#dim(f.me2) + +# when you have for each leadtime all the 52 weeks of year 2014 stored in a map, +# you can plot the time serie for a particular point and leadtime: +pos.lat<-which(round(la,3)==round(my.lat,3)) +pos.lon<-which(round(lo,3)==round(my.lon,3)) + +x11() +plot(1:week,EnMeanCorr[,leadtime],type="b", + main=paste0("Weekly time serie of point with lat=",round(my.lat,2),", lon=",round(my.lon,2)), + xlab="week",ylab="Ensemble Mean Corr.",ylim=c(-1,1)) + + + +### check if 4 time steps are better than one: + +yrs.hind<-yr2.hind-yr1.hind+1 +my.point.obs<-array(NA,c(yrs.hind,28)) + +# load obs.data for the four time steps: +for(year in yr1.hind:yr2.hind){ + data_erai_6h <- Load(var=var.name, exp = 'ERAint6h', + obs = NULL, sdates=paste0(year,'01'), leadtimemin = 61, + leadtimemax = 88, output = 'lonlat', configfile = file_path, grid=my.grid,method='distance-weighted') + + my.point.obs[year-yr1.hind+1,]<-data_erai_6h$mod[1,1,1,,65,633] +} + +utm0<-c(1,5,9,13,17,21,25) +utm6<-utm0+1 +utm12<-utm0+2 +utm18<-utm0+3 +my.points.obs.weekly.mean.utm0<-rowMeans(my.point.obs[,utm0]) +my.points.obs.weekly.mean.utm6<-rowMeans(my.point.obs[,utm6]) +my.points.obs.weekly.mean.utm12<-rowMeans(my.point.obs[,utm12]) +my.points.obs.weekly.mean.utm18<-rowMeans(my.point.obs[,utm18]) +my.points.obs.weekly.mean.all<-rowMeans(my.point.obs) + +cor(my.points.exp.weekly.mean.all,my.points.obs.weekly.mean.all,use="complete.obs") + +my.points.obs.weekly.mean.utm<-c(my.points.obs.weekly.mean.utm0,my.points.obs.weekly.mean.utm6,my.points.obs.weekly.mean.utm12,my.points.obs.weekly.mean.utm18) +my.points.exp.weekly.mean.utm<-c(my.points.exp.weekly.mean.utm0,my.points.exp.weekly.mean.utm6,my.points.exp.weekly.mean.utm12,my.points.exp.weekly.mean.utm18) +cor(my.points.exp.weekly.mean.utm,my.points.obs.weekly.mean.utm,use="complete.obs") + + +##### Calculate Reanalysis climatology loading only 1 file each 4 (it's quick but it needs to have all files beforehand): + +ss<-c(seq(1,52,by=4),50,51,52) # take only 1 startdate each 4, more the last 3 startdates that must be computed individually because they are not a complete set of 4 + +for(startdate in ss){ # the startdate day of 2014 to load in the sdates weekly sequence along with the same startdates of the previous 20 years + data.ERAI <- Load(var=var.name, exp = 'ERAintEnsWeek', + obs = NULL, sdates=paste0(yr1.hind:yr2.hind,substr(sdates.seq[startdate],5,6),substr(sdates.seq[startdate],7,8)), + nleadtime = 4, leadtimemin = 1, leadtimemax = 4, output = 'lonlat', configfile = file_path, grid=my.grid, method='distance-weighted') + + clim.ERAI[startdate,,,]<-apply(data.ERAI$mod,c(1,2,4,5,6),mean)[1,1,,,] # average for each leadtime and pixel + + # the intermediate startdate between startdate week i and startdate week i+4 are repeated: + if(startdate <= 49){ + clim.ERAI[startdate+1,1,,]<- clim.ERAI[startdate,2,,] # startdate leadtime (week) + clim.ERAI[startdate+1,2,,]<- clim.ERAI[startdate,3,,] # (week) 1 2 3 4 + clim.ERAI[startdate+2,1,,]<- clim.ERAI[startdate,3,,] # 1 a b c d <- only startdate 1 and 5 are necessary to calculate all startdates between 1 and 5 + clim.ERAI[startdate+1,3,,]<- clim.ERAI[startdate,4,,] # 2 b c d e + clim.ERAI[startdate+2,2,,]<- clim.ERAI[startdate,4,,] # 3 c d e f + clim.ERAI[startdate+3,1,,]<- clim.ERAI[startdate,4,,] # 4 d e f g + } # 5 e f g h + + if(startdate > 1 && startdate <=49){ # fill the previous startdates: + clim.ERAI[startdate-1,4,,]<- clim.ERAI[startdate,3,,] + clim.ERAI[startdate-1,3,,]<- clim.ERAI[startdate,2,,] + clim.ERAI[startdate-2,4,,]<- clim.ERAI[startdate,2,,] + clim.ERAI[startdate-1,2,,]<- clim.ERAI[startdate,1,,] + clim.ERAI[startdate-2,3,,]<- clim.ERAI[startdate,1,,] + clim.ERAI[startdate-3,4,,]<- clim.ERAI[startdate,1,,] + } + +} + + + + + # You can make cor much faster by stripping away all the error checking code and calling the internal c function directly: + # r <- apply(m1, 1, function(x) { apply(m2, 1, function(y) { cor(x,y) })}) + # r2 <- apply(m1, 1, function(x) { apply(m2, 1, function(y) {.Internal(cor(x, y, 4L, FALSE)) })}) + C_cor<-get("C_cor", asNamespace("stats")) + test<-.Call(C_cor, x=rnorm(100,1), y=rnorm(100,1), na.method=2, FALSE) + my.EnsCorr.chunk[i,j,k]<-.Call(C_cor, x=anom.hindcast.mean.chunk[,i,j,k], y=nom.rean.chunk[,i,j,k], na.method=2, FALSE) + + + +####################################################################################### +# Raster Animations # +####################################################################################### + +library(png) +library(animation) + + +clustPNG <- function(pngobj, nclust = 3){ + + j <- pngobj + # If there is an alpha component remove it... + # might be better to just adjust the mat matrix + if(dim(j)[3] > 3){ + j <- j[,,1:3] + } + k <- apply(j, c(1,2), c) + mat <- matrix(unlist(k), ncol = 3, byrow = TRUE) + grd <- expand.grid(1:dim(j)[1], 1:dim(j)[2]) + clust <- kmeans(mat, nclust, iter.max = 20) + new <- clust$centers[clust$cluster,] + + for(i in 1:3){ + tmp <- matrix(new[,i], nrow = dim(j)[1]) + j[,,i] <- tmp + } + + plot.new() + rasterImage(j, 0, 0, 1, 1, interpolate = FALSE) +} + +makeAni <- function(file, n = 10){ + j <- readPNG(file) + for(i in 1:n){ + clustPNG(j, i) + mtext(paste("Number of clusters:", i)) + } + + j <- readPNG(file) + plot.new() + rasterImage(j, 0, 0, 1, 1, interpolate = FALSE) + mtext("True Picture") +} + +file <- "~/Dropbox/Photos/ClassyDogCropPng.png" +n <- 20 +saveGIF(makeAni(file, n), movie.name = "tmp.gif", interval = c(rep(1,n), 5)) + + + + +####################################################################################### +# ProgressBar # +####################################################################################### + +for(i in 1:10) { + Sys.sleep(0.2) + # Dirk says using cat() like this is naughty ;-) + #cat(i,"\r") + # So you can use message() like this, thanks to Sharpie's + # comment to use appendLF=FALSE. + message(i,"\r",appendLF=FALSE) + flush.console() +} + + +for(i in 1:10) { + Sys.sleep(0.2) + # Dirk says using cat() like this is naughty ;-) + cat(i,"\r") + + +} + + + +####################################################################################### +# Load large datasets with ff # +####################################################################################### + +library(ff) +my.scratch<-"/scratch/Earth/ncortesi/myBigData" +anom.hind<-as.ff(anom.hind, vmode="double", file=my.scratch) +ffsave(anom.hind,file=my.scratch) +close(anom.hind);rm(anom.hind) + +ffload(file=my.scratch, list=c(anom.hind)) +open(anom.hind) +my.SkillScore <- veriApplyBig("FairRpss", fcst=anom.hind, obs=anom.rean, tdim=2, ensdim=1, prob=c(1/3,1/3)) +close(anom.hind);rm(anom.hind) + + + +BigDataPath <- "/scratch/Earth/ncortesi/myBigData" +source('/home/Earth/ncortesi/Downloads/RESILIENCE/veriApplyBig.R') + +anom.hind.dim<-c(5,3,1,256,512) +anom.hind<-array(rnorm(prod(anom.hind.dim)),anom.hind.dim) # doesn't fit in memory +save.big(array=anom.hind, path=BigDataPath) + +veriApplyBig <- function(, obsmy.scratch){ + ffload(file=my.scratch) + open(anom.hind) + my.SkillScore <- veriApplyBig("FairRpss", fcst=anom.hind, obs=anom.rean, tdim=2, ensdim=1, prob=c(1/3,1/3)) + close(anom.hind) +} + + +anom.hind<-as.ff(anom.hind, vmode="double", file=my.scratch) +ffsave(anom.hind, file=my.scratch) + +my.scratch<-"/scratch/Earth/ncortesi/myBigData" + +anom.hind<-as.ff(anom.hind, vmode="double", file=my.scratch) +ffsave(anom.hind, file=my.scratch) +close(anom.hind); rm(anom.hind) + +ffload(file=my.scratch, list=c(anom.hind)) +open(anom.hind) +my.SkillScore <- veriApplyBig("FairRpss", fcst=anom.hind, obs=anom.rean, tdim=2, ensdim=1, prob=c(1/3,1/3)) +close(anom.hind); rm(anom.hind) + +anom.hind.dim<-c(51,30,1,256,51) +anom.hind<-ff(rnorm(prod(anom.hind.dim)), dim=anom.hind.dim, vmode="double", filename="/scratch/Earth/ncortesi/anomhind") +str(anom.hind) +dim(anom.hind) + +anom.hind.dim<-c(51,30,1,256,51) +anom.hind<-array(rnorm(prod(anom.hind.dim)),anom.hind.dim) # doesn't fit in memory +anom.hind<-as.ff(anom.hind, vmode="double", file="/scratch/Earth/ncortesi/myBigData") +str(anom.hind) +dim(anom.hind) + +ffsave(anom.hind,file="/scratch/Earth/ncortesi/anomhind") +#ffinfo(file="/scratch/Earth/ncortesi/anomhind") +close(anom.hind) +#delete(anom.hind) +#rm(anom.hind) + +ffload(file="/scratch/Earth/ncortesi/anomhind") + +anom.rean<-array(rnorm(prod(anom.hind.dim[-1])), anom.hind.dim[-1]) # doesn't fit in memory + +open(anom.hind) +my.SkillScore <- veriApplyBig("FairRpss", fcst=anom.hind, obs=anom.rean, tdim=2, ensdim=1, prob=c(1/3,1/3)) +fcst<-anom.hind +obs<-anom.rean + +close(anom.hind) +rm(anom.hind) + +#ffdrop(file="/scratch/Earth/ncortesi/anomhind") + +a<-ffapply(X=anom.hind.big, MARGIN=c(1,3,4,5), AFUN=mean, RETURN=TRUE) +a<-ffapply(X=anom.hind.big, MARGIN=c(1,3,4,5), AFUN=function(x) sample(x, replace=TRUE), CFUN="list", RETURN=TRUE) + + +pred_Cal_cross_ff <- as.ff(pred_Cal_cross, vmode='double', file=fileoutput_cross) +ffsave(pred_Cal_cross_ff, file=paste0(fileoutput_cross)) +delete(pred_Cal_cross_ff) + +assign("pred_Cal_cross_ff", as.ff(pred_Cal_cross, vmode='double', file=fileoutput_cross)) +ffsave(pred_Cal_cross_ff, file=paste0(fileoutput_cross)) +delete(pred_Cal_cross_ff) +rm(pred_Cal_cross_ff) + +assign("pred_Cal_cross_ff", as.ff(pred_Cal_cross, vmode='double', file=fileoutput_cross)) +a<-get("pred_Cal_cross_ff") # get() works well in this case but not in the row below! (you must use do.call, see WT_drivers_v2.R) +ffsave(get("pred_Cal_cross_ff"), file=paste0(fileoutput_cross)) +delete(pred_Cal_cross_ff) +rm(pred_Cal_cross_ff) + + + +######################################################################################### +# Calculate and save the FairRpss and/or the FairCrpss for a chosen period using ff # +######################################################################################### + +bigdata=FALSE # if TRUE, compute the Rpss and the Crpss using the package ff (storing arrays in the disk instead than in the RAM) to remove the memory limit + +for(month in veri.month){ + month=1 # for debug + my.startdates <- which(as.integer(substr(sdates.seq,5,6)) == month) #startdates.monthly[[month]] #c(1:5) # select the startdates (weeks) you want to compute + startdate.name=my.month[month] # name given to the period of the selected startdates, i.e:"January" + n.yrs <- length(my.startdates)*n.yrs.hind # number of total years for the selected month + print(paste0("Computing Skill Scores for ",startdate.name)) + + if(!boot) anom.rean.big<-array(NA, dim=c(n.yrs, n.leadtimes, n.lat, n.lon)) # reanalysis data is not converted to ff because it is a small array + + if(boot) mod<-1 else mod=0 # in case of the boostrap, anom.hind.big includes also the reanalysis data as additional last member + + if(bigdata) anom.hind.big <- ff(NA, dim=c(n.members + mod, n.yrs, n.leadtimes, n.lat, n.lon), vmode="double", filename=paste0(workdir,"/anomhindbig.ff")) + if(!bigdata) anom.hind.big <- array(NA, dim=c(n.members + mod, n.yrs, n.leadtimes, n.lat, n.lon)) + + for(startdate in my.startdates){ + + pos.startdate<-which(startdate==my.startdates) # count of the number of stardates already loaded +1 + my.time.interv<-(1+(pos.startdate-1)*n.yrs.hind):(pos.startdate*n.yrs.hind) # time interval where to load data + + load(paste0(workdir,'/Data_',var.name,'/anom_rean_startdate',startdate,'.RData')) + if(!boot) { + anom.rean<-drop(anom.rean) + anom.rean.big[my.time.interv,,,] <- anom.rean + } + + if(any(my.score.name=="FairRpss") || any(my.score.name=="FairCrpss")){ + load(paste0(workdir,'/Data_',var.name,'/anom_hindcast_startdate',startdate,'.RData')) # load hindcast data + anom.hindcast<-drop(anom.hindcast) + + if(!boot) anom.hind.big[,my.time.interv,,,] <- anom.hindcast + if(boot) anom.hind.big[,my.time.interv,,,] <- abind(anom.hindcast, anom.rean, along=1) + + rm(anom.hindcast, anom.rean) + } + gc() + } + + if(any(my.score.name=="FairRpss" || my.score.name=="FairCrpss")){ + if(!boot) { + if(any(my.score.name=="FairRpss")) my.FairRpss <- veriApplyBig("FairRpss", fcst=anom.hind.big, obs=anom.rean.big, + prob=my.prob, tdim=2, ensdim=1, parallel=TRUE, ncpus=n.cpus) + if(any(my.score.name=="FairCrpss")) my.FairCrpss <- veriApplyBig("FairCrpss", fcst=anom.hind.big, obs=anom.rean.big, tdim=2, ensdim=1, parallel=TRUE, ncpus=n.cpus) + + } else { + # bootstrapping: + my.FairRpssBoot<-my.FairCrpssBoot<-array(NA, c(n.boot, n.leadtimes, n.lat, n.lon)) + if(!bigdata) save(anom.hind.big, file=paste0(workdir,"/anom_hind_big.RData")) # save the anomalies to free memory + + for(b in 1:n.boot){ + print(paste0("resampling n. ",b,"/",n.boot)) + yrs.sampled <- sample(1:n.yrs, replace=TRUE) + + if(bigdata) anom.hind.big.sampled <- ff(NA, dim=c(n.members+1, n.yrs, n.leadtimes, n.lat, n.lon), vmode="double", filename=paste0(workdir,"/anomhindbigsampled")) + if(!bigdata) anom.hind.big.sampled <- array(NA, dim=c(n.members+1, n.yrs, n.leadtimes, n.lat, n.lon)) + + if(!bigdata && b>1) load(paste0(workdir,"/anom_hind_big.RData")) # need to load the hindcasts if b>1 + + for(y in 1:n.yrs) anom.hind.big.sampled[,y,,,] <- anom.hind.big[,yrs.sampled[y],,,] # with ff takes 53s x 100 ~1h!!! (without, only 4s x 100 ~6m) + + if(!bigdata) rm(anom.hind.big); gc() # free memory for the following commands + + # faster way that will be able to replace the above one when ffapply output parameters wll be improved: + #a<-ffapply(X=anom.hind.big, MARGIN=c(3,4,5), AFUN=function(x) my.sample(x,n.members+1, replace=TRUE), CFUN="list", RETURN=TRUE, FF_RETURN=TRUE) # takes 3m only + #aa<-abind(a[1:10], along=4) # it doesn't fit into memory! + + if(any(my.score.name=="FairRpss")) my.FairRpssBoot[b,,,] <- veriApplyBig("FairRpss", fcst=anom.hind.big.sampled[1:n.members,,,,], + obs=anom.hind.big.sampled[n.members+1,,,,], + prob=my.prob, tdim=2, ensdim=1, parallel=TRUE, ncpus=n.cpus) #$rpss + + if(any(my.score.name=="FairCrpss")) my.FairCrpssBoot[b,,,] <- veriApplyBig("FairCrpss", fcst=anom.hind.big.sampled[1:n.members,,,,], + obs=anom.hind.big.sampled[n.members+1,,,,], + tdim=2, ensdim=1, parallel=TRUE, ncpus=n.cpus) #$crpss + + if(bigdata) delete(anom.hind.big.sampled) + rm(anom.hind.big.sampled) # delete the sampled hindcast + + } + + # calculate 5 and 95 percentiles of skill scores: + if(any(my.score.name=="FairRpss")) my.FairRpssConf <- apply(my.FairRpssBoot, c(2,3,4), function(x) quantile(x, probs=c(0.05,0.95), type=8)) + if(any(my.score.name=="FairCrpss")) my.FairCrpssConf <- quantile(my.FairCrpssBoot, probs=c(0.05,0.95), type=8) + + } # close if on boot + + } + + if(bigdata) delete(anom.hind.big) # delete the file with the hindcasts + rm(anom.hind.big) + rm(anom.rean.big) + gc() + + if(!boot){ + if(any(my.score.name=="FairRpss")) save(my.FairRpss, file=paste0(workdir,'/Data_',var.name,'/FairRpss_',startdate.name,'.RData')) + if(any(my.score.name=="FairCrpss")) save(my.FairCrpss, file=paste0(workdir,'/Data_',var.name,'/FairCrpss_',startdate.name,'.RData')) + } else { + if(any(my.score.name=="FairRpss")) save(my.FairRpssBoot, my.FairRpssConf, file=paste0(workdir,'/Data_',var.name,'/FairRpssBoot_',startdate.name,'.RData')) + if(any(my.score.name=="FairCrpss")) save(my.FairCrpssBoot, my.FairCrpssConf, file=paste0(workdir,'/Data_',var.name,'/FairCrpssBoot_',startdate.name,'.RData')) + } + + + if(is.object(my.FairRpss)) rm(my.FairRpss); if(is.object(my.FairCrpss)) rm(my.FairCrpss) + if(is.object(my.FairRpssBoot)) rm(my.FairRpssBoot); if(is.object(my.FairCrpssBoot)) rm(my.FairCrpssBoot) + if(is.object(my.FairRpssConf)) rm(my.FairRpssConf); if(is.object(my.FairCrpssConf)) rm(my.FairCrpssConf) + +} # next m (month) + + +######################################################################################### +# Calculate and save the FairRpss for a chosen period using ff and Load() # +######################################################################################### + + anom.rean.big<-array(NA, dim=c(length(my.startdates)*n.yrs.hind, n.leadtimes, n.lat, n.lon)) + anom.hind.big <- ff(NA, dim=c(n.members,length(my.startdates)*n.yrs.hind, n.leadtimes, n.lat, n.lon), vmode="double", filename="/scratch/Earth/anomhindbig") + + for(startdate in my.startdates){ + pos.startdate<-which(startdate==my.startdates) # count of the number of stardates already loaded+1 + my.time.interv<-(1+(pos.startdate-1)*n.yrs.hind):(pos.startdate*n.yrs.hind) # time interval where to load data + + #load(paste0(workdir,'/Data_',var.name,'/anom_rean_startdate',startdate,'.RData')) + #anom.rean<-drop(anom.rean) + #anom.rean.big[my.time.interv,,,] <- anom.rean + + my.date <- paste0(yr1.hind:yr2.hind,substr(sdates.seq[startdate],5,6),substr(sdates.seq[startdate],7,8)) + anom.rean <- Load(var=var.name, exp = 'EnsEcmwfWeekHind', + obs = NULL, sdates=my.date, nleadtime = n.leadtimes, leadtimemin = 1, leadtimemax = n.leadtimes, + output = 'lonlat', configfile = file_path, method='distance-weighted' ) + + #load(paste0(workdir,'/Data_',var.name,'/anom_hindcast_startdate',startdate,'.RData')) # load hindcast data + #anom.hindcast<-drop(anom.hindcast) + + anom.hind.big[,my.time.interv,,,] <- anom.hindcast + rm(anom.hindcast) + } + + my.FairRpss <- veriApplyBig("FairRpss", fcst=anom.hind.big, obs=anom.rean.big, prob=my.prob, tdim=2, ensdim=1, parallel=TRUE, ncpus=n.cpus) + + save(my.FairRpss, file=paste0(workdir,'/Data_',var.name,'/FairRpss_',startdate.name,'.RData')) + + +######################################################################################### +# Alternative way to speed up Load() # +######################################################################################### + +ERAint <- list(path = '/esnas/reconstructions/ecmwf/eraint/$STORE_FREQ$_mean/$VAR_NAME$_f6h/$VAR_NAME$_$YEAR$$MONTH$.nc') + var.rean=ERAint # daily reanalysis dataset used for the var data; it can have a different resolution of the MSLP dataset + var.name='sfcWind' #'tas' #'prlr' # any daily variable we want to find if it is WTs-driven + var <- Load(var = var.name, exp = NULL, obs = list(var.rean), paste0(y,'0101'), storefreq = 'daily', leadtimemax = 1, output = 'lonlat') + + # Forecast system list: + S4 <- list(path = ...) + CFSv2 <- list(path = ...) + + # Reanalysis list: + ERAint <- list(path = '/esnas/reconstructions/ecmwf/eraint/$STORE_FREQ$_mean/$VAR_NAME$_f6h/$VAR_NAME$_$YEAR$$MONTH$.nc') + JRA55 <- list(path = ...) + + + + + # Select one forecast system and one reanalysis (put NULL if you don't need to load any experiment/observation): + exp.source <- NULL + obs.source <- list(path = '/esnas/reconstructions/ecmwf/eraint/$STORE_FREQ$_mean/$VAR_NAME$_f6h/$VAR_NAME$_$YEAR$$MONTH$.nc') + + # Select one variable and its frequency: + var.name <- 'sfcWind' + store.freq <- 'daily' + + # Extract only the directory path of the forecast and reanalysis: + obs.source2 <- gsub("\\$STORE_FREQ\\$", store.freq, obs.source$path) + obs.source3 <- gsub("\\$VAR_NAME\\$", var.name, obs.source2) + obs.source4 <- strsplit(obs.source3,'/') + obs.source5 <- paste0(obs.source4[[1]][-length(obs.source4[[1]])], collapse="/") + + obs.source6 <- list.files(path = obs.source5) + + system(paste0("ncks -O -h -d longitude,5,6 ", '/esnas/reconstructions/ecmwf/eraint/',STORE_FREQ,'_mean/',VAR_NAME,'_f6h')) + system("ncks -O -h -d longitude,5,6 sfcWind_201405.nc ~/test.nc") + + y <- 1990 + var <- Load(var = VAR_NAME, exp = list(exp.source), obs = list(obs.source), sdates = paste0(y,'0101'), storefreq = store.freq, leadtimemax = 1, output = 'lonlat') + + +} # close if on mare + + diff --git a/old/SkillScores_v8.R~ b/old/SkillScores_v8.R~ new file mode 100644 index 0000000000000000000000000000000000000000..736840945a15f29f5ce2674ca52823cca5339f76 --- /dev/null +++ b/old/SkillScores_v8.R~ @@ -0,0 +1,1298 @@ +######################################################################################### +# Sub-seasonal Skill Scores # +######################################################################################### +# you can run it from the terminal with the syntax: +# +# Rscript SkillScores_v7.R +# +# i.e: to run only the chunk number 3, write: +# +# Rscript SkillScores_v4.R 3 +# +# if you don't specify any argument, or you run it from the R interface, it runs all the chunks in a sequential way. +# Use this last approach only if the computational speed is not a problem. + +########################################################################################## +# Load functions and libraries # +########################################################################################## + +#load libraries and functions: +library(SpecsVerification) # for skill scores +library(s2dverification) # for Load() function +library(easyVerification) # for veriApply() function +library(abind) # for merging arrays +source('/home/Earth/ncortesi/Downloads/scripts/Rfunctions.R') + +########################################################################################## +# MareNostrum settings # +########################################################################################## + +args <- commandArgs(TRUE) # put into variable args the list with all the optional arguments with whom the script was run from the terminal + +if(length(args) == 0) print("Running the script in a sequential way") +if(length(args) > 1) stop("Only one argument is required") + +mare <- ifelse(length(args) == 1 ,TRUE, FALSE) # set variable 'mare' to TRUE if we are running the script in MareNostrum, FALSE otherwise + +if(mare) chunk <- as.integer(args[1]) # number of the chunk to run in this script (if mare == TRUE) + +########################################################################################## +# User's settings # +########################################################################################## +#if(!mare) {source('/home/Earth/ncortesi/Downloads/scripts/Rfunctions.R')} else {source('/gpfs/projects/bsc32/bsc32842/scripts/Rfunctions.R')} + +# working dir where to put the output maps and files in the /Data and /Maps subdirs: +workdir <- ifelse(!mare, "/scratch/Earth/ncortesi/RESILIENCE", "/gpfs/projects/bsc32/bsc32842/results") + +rean.name <- "ERA-Interim" + +cfs.name <- 'ECMWF' # name of the climate forecast system (CFS) used for monthly predictions (to be used in map titles) +var.name <- 'sfcWind' # forecast variable. It is the name used inside the netCDF files and as suffix in map filenames, i.e: 'sfcWind' or 'psl' +var.name.map <- '10m Wind Speed' # forecast variable to verify (name used in the map titles), i.e: '10m Wind Speed' or 'SLP' + +forecast.year <- 2014 # starting year of the weekly sequence of the forecasts +mes <- 1 # starting forecast month (1: january, 2: february, etc.) +day <- 2 # starting forecast day + +# generic path of the forecast system files: +ECMWF_monthly <- list(path = paste0('/esnas/exp/ECMWF/monthly/ensforhc/weekly_mean/$VAR_NAME$_f6h/',forecast.year,'$MONTH$$DAY$00/$VAR_NAME$_$START_DATE$00.nc')) + +yr1.hind <- 1994 #1994 # first hindcast year +yr2.hind <- 2013 #2013 # last hindcast year (usually the forecast year -1) + +leadtime.week <- c('5-11','12-18','19-25','26-32') # which leadtimes are available in the weekly dataset +n.members <- 4 # number of hindcast members + +my.score.name <- c('FairRpss','FairCrpss') #c('EnsCorr','FairRpss','FairCrpss','RelDiagr') # choose one or more skills scores to compute between EnsCorr, FairRPSS, FairCRPSS and/or RelDiagr + +veri.month <- 1:12 # select the month(s) you want to compute the chosen skill scores + +my.pvalue <- 0.05 # in case of computing the EnsCorr, set the p_value level to show in the ensemble mean correlation maps +my.prob <- c(1/3,2/3) # quantiles used for FairRPSS and the RelDiagr (usually they are the terciles) +conf.level <- c(0.05, 0.95)# percentiles employed for the calculation of the confidence level for the Rpss and Crpss (can be more than 2 if needed) +int.lat <- NULL #1:160 # latitude position of grid points selected for the Reliability Diagram (if you want to subset a region; if not, put NULL) +int.lon <- NULL #1:320 # longitude position of grid points selected for the Reliability Diagram (if you want to subset a region; if not, put NULL) + +boot <- TRUE # in case of computing the FairRpss and/or the FairCrpss, you can choose to do a bootstrap to evaluate the uncertainty of the skill scores +n.boot <- 20 # number of resamples considered in the bootstrapping + +# coordinates of a chosen point in the world to plot the time series over the 52 maps (they must be the same values in objects la y lo) +my.lat <- 55.3197120 +my.lon <- 0.5625 + +###################################### Derived variables ############################################################################# + +#source(paste0(workdir,'/ColorBarV.R')) # Color scale used for maps +n.categ <- 1+length(my.prob) # number of forecasting categories (usually 3 if terciles are used) + +# blue-yellow-red colors: +#col <- ifelse(!mare, as.character(read.csv("/scratch/Earth/ncortesi/RESILIENCE/rgbhex.csv",header=F)[,1]), as.character(read.csv("/gpfs/projects/bsc32//bsc32842/scripts/rgbhex.csv",header=F)[,1])) +#col<-as.character(read.csv("/home/Earth/vtorralb/rgbhex.csv",header=F)[,1]) # blue-yellow-red colors +col <- c("#0C046E", "#1914BE", "#2341F7", "#2F55FB", "#3E64FF", "#528CFF", "#64AAFF", "#82C8FF", "#A0DCFF", "#B4F0FF", "#FFFBAF", "#FFDD9A", "#FFBF87", "#FFA173", "#FF7055", "#FE6346", "#F7403B", "#E92D36", "#C80F1E", "#A50519") + +sdates.seq <- weekly.seq(forecast.year,mes,day) # load the sequence of dates corresponding to all the thursday of the year +n.leadtimes <- length(leadtime.week) +n.yrs.hind <- yr2.hind-yr1.hind+1 +my.month.short <- substr(my.month,1,3) + +## extract geographic coordinates; do only ONCE for each preducton system and then save the lat/lon in a coordinates.RData and comment these rows to use function load() below: +#data.hindcast <- Load(var='sfcWind', exp = 'EnsEcmwfWeekHind', +# obs = NULL, sdates=paste0(yr1.hind:yr2.hind,substr(sdates.seq[startdate],5,6),substr(sdates.seq[startdate],7,8)), +# nleadtime = 1, leadtimemin = 1, leadtimemax = 1, output = 'lonlat', configfile = file_path, method='distance-weighted' ) + +#lats<-data.hindcast$lat +#lons<-data.hindcast$lon +#save(lats,lons,file=paste0(workdir,'/coordinates.RData')) +#rm(data.hindcast);gc() + +# format geographic coordinates to use with PlotEquiMap: +#load(paste0(workdir,'/coordinates.RData')) +#n.lon <- length(lons) +#n.lat <- length(lats) +#n.lonr <- n.lon - ceiling(length(lons[lons<180 & lons > 0])) +#la<-rev(lats) +#lo<-c(lons[c((n.lonr+1):n.lon)]-360,lons[1:n.lonr]) + + # load only 1 year of var data from reanalysis to get lat and lon: +data.rean <- Load(var = var.name, exp = 'ERAintEnsWeek', + obs = NULL, sdates=paste0(yr1.hind,substr(sdates.seq[startdate],5,6),substr(sdates.seq[startdate],7,8)), + nleadtime = 1, leadtimemin = 1, leadtimemax = 1, output = 'lonlat', configfile = file_path) #, grid=my.grid, method='bilinear') + +lons <- data.rean$lon +lats <- data.rean$lat +n.lon <- length(lons) +n.lat <- length(lats) +n.lonr <- n.lon - ceiling(length(lons[lons<180 & lons > 0])) +if(mare) n.lat == 1 + +my.grid<-paste0('r',n.lon,'x',n.lat) + + +############################################################################################### +# Preformatting: calculate weekly climatologies and anomalies for each hindcast startdate # +############################################################################################### + +# Run it only once at the beginning to generate the .RData files: +# Note that forecast data is interpolated to the same resolution of ERA-Interim + +file_path='/home/Earth/ngonzal2/R/ConfFiles/IC3.conf' # Load() file path +#file_path <- '/home/Earth/ncortesi/Downloads/RESILIENCE/IC3.conf' + +my.startdates=1:52 # choose a sequence of startdates to save their anomalies + +for(startdate in my.startdates){ # the startdate week to load inside the sdates weekly sequence: + print(paste0('Computing reanalysis anomalies for startdate ', which(startdate==my.startdates),'/',length(my.startdates))) + + # load weekly var rean data in: /esnas/reconstructions/ecmwf/eraint/weekly_mean + data.rean <- Load(var = var.name, exp = 'ERAintEnsWeek', + obs = NULL, sdates=paste0(yr1.hind:yr2.hind,substr(sdates.seq[startdate],5,6),substr(sdates.seq[startdate],7,8)), + nleadtime = n.leadtimes, leadtimemin = 1, leadtimemax = n.leadtimes, output = 'lonlat', configfile = file_path) #, grid=my.grid, method='bilinear') + + # dim(data.rean$mod) # [1,1,20,4,320,640] + + # Mean of the 20 years (for each point and leadtime) + clim.rean<-apply(data.rean$mod,c(1,2,4,5,6),mean)[1,1,,,] # average for each leadtime and pixel + clim.rean<-InsertDim(InsertDim(InsertDim(clim.rean,1,n.yrs.hind),1,1),1,1) # repeat the same values times to be able to calculate the anomalies #[1,1,20,4,320,640] + anom.rean <- data.rean$mod - clim.rean + anom.rean<-InsertDim(anom.rean[1,1,,,,],1,1) # [1,20,4,320,640] + + save(anom.rean,file=paste0(workdir,'/Data_',var.name,'_',rean.name,'/anom_rean_startdate',startdate,'.RData')) + save(clim.rean,file=paste0(workdir,'/Data_',var.name,'_',rean.name,'/clim_rean_startdate',startdate,'.RData')) + + rm(data.rean,clim.rean) + gc() + +} + + +for(startdate in my.startdates){ # the startdate week to load inside the sdates weekly sequence: + print(paste0('Computing forecast anomalies for startdate ', which(startdate==my.startdates),'/',length(my.startdates))) + + # Load weekly var subseasonal data in: /esnas/exp/ECMWF/monthly/ensforhc/weekly_mean and interpolate them to the same ERA-Interim resolution (512x 256) + data.hindcast <- Load(var = var.name, exp = 'EnsEcmwfWeekHind', + obs = NULL, sdates = paste0(yr1.hind:yr2.hind, substr(sdates.seq[startdate],5,6), substr(sdates.seq[startdate],7,8)), + nleadtime = n.leadtimes, leadtimemin = 1, leadtimemax = n.leadtimes, output = 'lonlat', configfile = file_path, grid=my.grid, method='bilinear') + + #data.hindcast <- Load(var = var.name, exp = 'list(list(path=fields))', obs = NULL, sdates = paste0(yr1.hind:yr2.hind, substr(sdates.seq[startdate],5,6), substr(sdates.seq[startdate],7,8)), nleadtime = n.leadtimes, leadtimemin = 1, leadtimemax = n.leadtimes, storefreq='daily', output = 'lonlat', grid=my.grid, method='bilinear' ) + + #Load(var = psl, exp = list(list(path=fields)), obs = NULL, sdates=paste0(years, start.month.char,'01'), dimnames=list(member=member.name), nmember=n.members, leadtimemax=n.leadtimes, storefreq='daily', output = 'lonlat', latmin = lat.min, latmax = lat.max, lonmin = lon.min, lonmax = lon.max, grid=my.grid, method='bilinear')$mod + + # dim(data.hindcast$mod) # [1,4,20,4,320,640] + + # Mean of the 4 members of the Hindcasts and of the 20 years (for each point and leadtime); + clim.hindcast<-apply(data.hindcast$mod,c(1,4,5,6),mean)[1,,,] # average for each leadtime and pixel + clim.hindcast<-InsertDim(InsertDim(InsertDim(clim.hindcast,1,n.yrs.hind),1,n.members),1,1) # repeat the same values and times to calc. anomalies + + anom.hindcast <- data.hindcast$mod - clim.hindcast + + rm(data.hindcast,clim.hindcast) + gc() + + # average the anomalies over the 4 hindcast members to compute the ensemble mean [1,20,4,320,640] + anom.hindcast.mean<-apply(anom.hindcast,c(1,3,4,5,6),mean) + + #save(anom.hindcast.mean,anom.rean,file=paste0(workdir,'/Data/anom_for_ACC_startdate',startdate,'.RData')) + save(anom.hindcast,file=paste0(workdir,'/Data_',var.name,'_',cfs.name,'/anom_hindcast_startdate',startdate,'.RData')) + save(anom.hindcast.mean,file=paste0(workdir,'/Data_',var.name,'_',cfs.name,'/anom_hindcast_mean_startdate',startdate,'.RData')) + + rm(data.rean,anom.hindcast,anom.hindcast.mean,clim.rean,anom.rean) + gc() + +} + + +######################################################################################### +# Calculate and save up to all the chosen Skill Scores for a selected period # +######################################################################################### +# +# Remember that before computing the skill scores, you have to create and save the anomalies running once the preformatting part above +# + +for(month in veri.month){ + #month=1 # for the debug + + my.startdates <- which(as.integer(substr(sdates.seq,5,6)) == month) #startdates.monthly[[month]] #c(1:5) # select the startdates (weeks) you want to compute + startdate.name <- my.month[month] # name given to the period of the selected startdates # i.e:"January" + n.startdates <- length(my.startdates) # number of startdates in the selected month + n.yrs <- length(my.startdates)*n.yrs.hind # number of total years for the selected month + + print(paste0("Computing Skill Scores for ",startdate.name)) + + # Merge all selected startdates: + hind.dim <- c(n.members, n.yrs.hind*n.startdates, n.leadtimes, n.lat, n.lon) # dimensions of the hindcast array + + my.FairRpss <- my.FairCrpss <- my.EnsCorr <- my.PValue <- array(NA,c(n.leadtimes, n.lat,n.lon)) + if(boot) my.FairRpssBoot <- my.FairCrpssBoot <- array(NA, c(n.boot, n.leadtimes, n.lat, n.lon)) + if(boot) my.FairRpssConf <- my.FairCrpssConf <- array(NA, c(length(conf.level), n.leadtimes, n.lat, n.lon)) + + cat('Chunk n. ') + + # if we run the script from the terminal with an argument, it only computes the chunk we specify in the second argument and save the results for that chunk. + # If not, it loops over all chunks in a serial way and save the results at the end. + if(!mare) {my.chunks <- 1:n.lat} else {my.chunks <- chunk} + + for(cnk in my.chunks){ # EnsCorr, FairRpss and FairCrpss calculation: + #s=1 # for the debug + cat(paste0(cnk,'/', n.lat,' ')) + anom.hindcast.chunk <- anom.hindcast.chunk.sampled <- array(NA, c(n.members, n.startdates*n.yrs.hind, n.leadtimes, cnk, n.lon)) + anom.rean.chunk <- anom.hindcast.mean.chunk <- anom.rean.chunk.sampled <- array(NA, c(n.startdates*n.yrs.hind, n.leadtimes, cnk, n.lon)) + #my.interv<-(1 + sub.size*(s-1)):((sub.size*s) + add.last) # longitude interval where to load data + + # not working on MareNostrum still: + # load 1 file ONLY ONCE to retrieve the lat and lon values, then save them in an .RData file to retrieve them when needed: + #coord <- Load(var = var.name, exp = list(ECMWF_monthly), obs = NULL, sdates=paste0(2013,'0102'), leadtimemin = 1, leadtimemax=1, output = 'lonlat', nprocs=1) + # lat <- coord$lat + # lon <- coord$lon + # save() + # load() + #system.time(a<-Load(var = 'sfcWind', exp = list(ECMWF_monthly), obs = NULL, sdates=paste0(2010:2013,'0102'), leadtimemin = 1, leadtimemax=3, output = 'lonlat', nprocs=1, latmin = lat[5], latmax = lat[5])) + + for(startdate in my.startdates){ + pos.startdate<-which(startdate == my.startdates) # count of the number of stardates already loaded+1 + my.time.interv<-(1+(pos.startdate-1)*n.yrs.hind):(pos.startdate*n.yrs.hind) # time interval where to load data + + # Load reanalysis data: + load(paste0(workdir,'/Data_',var.name,'/anom_rean_startdate',startdate,'.RData')) + anom.rean <- drop(anom.rean) + anom.rean.chunk[my.time.interv,,,] <- anom.rean[,,c,] + + # Load hindcast data: + if(any(my.score.name=="FairRpss") || any(my.score.name=="FairCrpss" || any(my.score.name=="RelDiagr"))){ + load(paste0(workdir,'/Data_',var.name,'/anom_hindcast_startdate',startdate,'.RData')) + anom.hindcast <- drop(anom.hindcast) + anom.hindcast.chunk[,my.time.interv,,,] <- anom.hindcast[,,,c,] + rm(anom.hindcast, anom.rean) + gc() + } + + if(any(my.score.name=="EnsCorr")){ + load(paste0(workdir,'/Data_',var.name,'/anom_hindcast_mean_startdate',startdate,'.RData')) # load ensemble mean hindcast data + anom.hindcast.mean <- drop(anom.hindcast.mean) + anom.hindcast.mean.chunk[my.time.interv,,,] <- anom.hindcast.mean[,,c,] + rm(anom.hindcast.mean) + gc() + } + + } + + if(any(my.score.name=="FairRpss")){ + if(!boot){ + #anom.hindcast.chunk.double <- abind(list(anom.hindcast.chunk,anom.hindcast.chunk),along=4) + #anom.rean.chunk.double <- abind(list(anom.rean.chunk,anom.rean.chunk),along=3) + + + #my.FairRpss.sub <- veriApply("FairRpss", fcst=anom.hindcast.sub, obs=anom.rean.sub, prob=my.prob, tdim=2, ensdim=1, parallel=FALSE, ncpus=n.cpus) + my.FairRps.chunk <- veriApply("FairRps", fcst=anom.hindcast.chunk, obs=anom.rean.chunk, prob=my.prob, tdim=2, ensdim=1, parallel=FALSE, ncpus=n.cpus) + #my.FairRps.chunk.double <- veriApply("FairRps", fcst=anom.hindcast.chunk.double, obs=anom.rean.chunk.double, prob=my.prob, tdim=2, ensdim=1, parallel=FALSE, ncpus=n.cpus) + + # the prob.for the climatological ensemble can be set without using convert2prob and they are the same for all points and leadtimes (33% for each tercile): + ens <- array(33,c(n.startdates*n.yrs.hind,3)) + + # the observed probabilities vary from point to point and for each leadtime: + obs <- apply(anom.rean.chunk, c(2,3,4), function(x){convert2prob(x, prob <- my.prob)}) + + # reshape obs to be able to apply EnsRps(ens, obs) below: + obs2 <- InsertDim(obs,1,3) + obs2[2,1:(n.startdates*n.yrs.hind),,,] <- obs2[1,(n.startdates*n.yrs.hind + 1:(n.startdates*n.yrs.hind)),,,] + obs2[3,1:(n.startdates*n.yrs.hind),,,] <- obs2[1,(2*n.startdates*n.yrs.hind + 1:(n.startdates*n.yrs.hind)),,,] + obs3 <- obs2[,1:(n.startdates*n.yrs.hind),,,,drop=F] # drop=F is used not to loose the dimension(s) with only 1 element + + my.Rps.clim.chunk <- apply(obs3,c(3,4,5), function(x){ EnsRps(ens,t(x)) }) + + my.FairRps.chunk.sum <- apply(my.FairRps.chunk, c(2,3,4), sum) + my.Rps.clim.chunk.sum <- apply(my.Rps.clim.chunk, c(2,3,4), sum) + + my.FairRpss.chunk <- 1-(my.FairRps.chunk.sum/my.Rps.clim.chunk.sum) + + my.FairRpss[,c,] <- my.FairRpss.chunk + + rm(my.FairRpss.chunk, ens, obs, obs2, obs3, my.FairRps.chunk.sum, my.Rps.clim.chunk.sum) + gc() + + } else { # bootstrapping: + + for(b in 1:n.boot){ + cat(paste0("resampling n. ",b,"/",n.boot)) + yrs.sampled <- sample(1:n.yrs, replace=TRUE) + + for(y in 1:n.yrs) anom.hindcast.chunk.sampled[,y,,,] <- anom.hindcast.chunk[,yrs.sampled[y],,,] + for(y in 1:n.yrs) anom.rean.chunk.sampled[y,,,] <- anom.rean.chunk[yrs.sampled[y],,,] + + my.FairRps.chunk <- veriApply("FairRps", fcst=anom.hindcast.chunk.sampled, obs=anom.rean.chunk.sampled, prob=my.prob, tdim=2, ensdim=1, parallel=FALSE, ncpus=n.cpus) + ens <- array(33,c(n.startdates*n.yrs.hind,3)) + obs <- apply(anom.rean.chunk.sampled, c(2,3,4), function(x){convert2prob(x, prob <- my.prob)}) + obs2 <- InsertDim(obs,1,3) + obs2[2,1:(n.startdates*n.yrs.hind),,,] <- obs2[1,(n.startdates*n.yrs.hind + 1:(n.startdates*n.yrs.hind)),,,] + obs2[3,1:(n.startdates*n.yrs.hind),,,] <- obs2[1,(2*n.startdates*n.yrs.hind + 1:(n.startdates*n.yrs.hind)),,,] + obs3 <- obs2[,1:(n.startdates*n.yrs.hind),,,,drop=F] + my.Rps.clim.chunk <- apply(obs3,c(3,4,5), function(x){ EnsRps(ens,t(x)) }) + my.FairRps.chunk.sum <- apply(my.FairRps.chunk,c(2,3,4), sum) + my.Rps.clim.chunk.sum <- apply(my.Rps.clim.chunk,c(2,3,4), sum) + my.FairRpss.chunk <- 1-(my.FairRps.chunk.sum/my.Rps.clim.chunk.sum) + + my.FairRpssBoot[b,,,chunk$int[[c]]] <- my.FairRpss.chunk + rm(my.FairRpss.chunk, ens, obs, obs2, obs3, my.FairRps.chunk.sum, my.Rps.clim.chunk.sum) + gc() + } + + cat('\n') + + # calculate the percentiles of the skill score: + my.FairRpssConf[,,,chunk$int[[c]]] <- apply(my.FairRpssBoot[,,,chunk$int[[c]]], c(2,3,4), function(x) quantile(x, probs=conf.level, type=8)) + } + } + + if(any(my.score.name=="FairCrpss")){ + if(!boot){ + my.FairCrps.chunk <- veriApply("FairCrps", fcst=anom.hindcast.chunk, obs=anom.rean.chunk, tdim=2, ensdim=1, parallel=FALSE, ncpus=n.cpus) + anom.all.chunk <- abind(anom.hindcast.chunk,anom.rean.chunk, along=1) # merge exp and obs together to use apply: + my.Crps.clim.chunk <- apply(anom.all.chunk, c(3,4,5), function(x){ EnsCrps(t(x[1:n.members,]), x[n.members+1,])}) + + my.FairCrps.chunk.sum <- apply(my.FairCrps.chunk,c(2,3,4), sum) + my.Crps.clim.chunk.sum <- apply(my.Crps.clim.chunk,c(2,3,4), sum) + my.FairCrpss.chunk <- 1-(my.FairCrps.chunk.sum/my.Crps.clim.chunk.sum) + + my.FairCrpss[,,chunk$int[[c]]]<-my.FairCrpss.chunk + rm(my.FairCrpss.chunk, my.FairCrps.chunk.sum, my.Crps.clim.chunk.sum) + gc() + + } else { # bootstrapping: + + for(b in 1:n.boot){ + print(paste0("resampling n. ",b,"/",n.boot)) + yrs.sampled <- sample(1:n.yrs, replace=TRUE) + + for(y in 1:n.yrs) anom.hindcast.chunk.sampled[,y,,,] <- anom.hindcast.chunk[,yrs.sampled[y],,,] + for(y in 1:n.yrs) anom.rean.chunk.sampled[y,,,] <- anom.rean.chunk[yrs.sampled[y],,,] + + my.FairCrps.chunk <- veriApply("FairCrps", fcst=anom.hindcast.chunk.sampled, obs=anom.rean.chunk.sampled, tdim=2, ensdim=1, parallel=TRUE, ncpus=n.cpus) + anom.all.chunk <- abind(anom.hindcast.chunk.sampled, anom.rean.chunk.sampled, along=1) # merge exp and obs together to use apply: + my.Crps.clim.chunk <- apply(anom.all.chunk, c(3,4,5), function(x){ EnsCrps(t(x[1:n.members,]), x[n.members+1,])}) + my.FairCrps.chunk.sum <- apply(my.FairCrps.chunk,c(2,3,4), sum) + my.Crps.clim.chunk.sum <- apply(my.Crps.clim.chunk,c(2,3,4), sum) + my.FairCrpss.chunk <- 1-(my.FairCrps.chunk.sum/my.Crps.clim.chunk.sum) + + my.FairCrpssBoot[b,,,chunk$int[[c]]] <- my.FairCrpss.chunk + rm(my.FairCrpss.chunk, my.FairCrps.chunk.sum, my.Crps.clim.chunk.sum) + gc() + + } + + cat('\n') + + # calculate the percentiles of the skill score: + my.FairCrpssConf[,,,chunk$int[[c]]] <- apply(my.FairCrpssBoot[,,,chunk$int[[c]]], c(2,3,4), function(x) quantile(x, probs=conf.level, type=8)) + } + + } + + if(any(my.score.name=="EnsCorr")){ + # ensemble mean correlation for the selected startdates (first dim, 'week') and all leadtimes (second dim): + my.EnsCorr.chunk <- my.PValue.chunk <- array(NA, c(n.leadtimes, n.lat, chunk$n.int[[c]])) + + for(i in 1:n.leadtimes){ + for(j in 1:n.lat){ + for(k in 1:chunk$n.int[[c]]){ + my.EnsCorr.chunk[i,j,k] <- cor(anom.hindcast.mean.chunk[,i,j,k],anom.rean.chunk[,i,j,k], use="complete.obs") + # option 'na.method' is chosen from the following list: c("all.obs", "complete.obs", "pairwise.complete.obs", "everything", "na.or.complete") + + my.PValue.chunk[i,j,k] <- cor.test(anom.hindcast.mean.chunk[,i,j,k],anom.rean.chunk[,i,j,k], use="complete.obs")$p.value + + } + } + } + + my.EnsCorr[,,chunk$int[[c]]] <- my.EnsCorr.chunk + my.PValue[,,chunk$int[[c]]] <- my.PValue.chunk + + rm(anom.hindcast.chunk,anom.hindcast.mean.chunk,my.EnsCorr.chunk,my.PValue.chunk) + } + + rm(anom.rean.chunk) + gc() + #mem() + + if(mare == TRUE) { + if(any(my.score.name=="FairRpss")) save(my.FairRpss, file=paste0(workdir,'/Data_',var.name,'/FairRpss_',startdate.name,'_chunk_',c,'.RData')) + if(any(my.score.name=="FairCrpss")) save(my.FairCrpss, file=paste0(workdir,'/Data_',var.name,'/FairCrpss_',startdate.name,'_chunk_',c'.RData')) + if(any(my.score.name=="EnsCorr")) save(my.EnsCorr, file=paste0(workdir,'/Data_',var.name,'/EnsCorr_',startdate.name,'_chunk_',c'.RData')) + } + + + } # next c (chunk) + + # the Reliability diagram cannot be computed splitting the data in chunk, so it is computed only if the script is run from the R interface or with no arguments: + if(mare == FALSE) { + if(any(my.score.name=="RelDiagr")){ # in this case the computation cannot be split in groups of grid points, but can be split for leadtime instead: + my.RelDiagr<-list() + + for(lead in 1:n.leadtimes){ + cat(paste0('leadtime n.: ',lead,'/',n.leadtimes)) + anom.rean.chunk <- array(NA,c(n.startdates*n.yrs.hind,n.lat,n.lon)) + anom.hindcast.chunk <- array(NA,c(n.members,n.startdates*n.yrs.hind,n.lat,n.lon)) + cat(' startdate: ') + i=0 + + for(startdate in my.startdates){ + i=i+1 + cat(paste0(i,'/',length(my.startdates),' ')) + + pos.startdate<-which(startdate==my.startdates) # count of the number of stardates already loaded+1 + my.time.interv<-(1+(pos.startdate-1)*n.yrs.hind):(pos.startdate*n.yrs.hind) # time interval where to load data + + # Load reanalysis data of next startdate: + load(paste0(workdir,'/Data_',var.name,'/anom_rean_startdate',startdate,'.RData')) + anom.rean <- drop(anom.rean) + anom.rean.chunk[my.time.interv,,] <- anom.rean[,lead,,] + + # Lead hindcast data of next startdate: + load(paste0(workdir,'/Data_',var.name,'/anom_hindcast_startdate',startdate,'.RData')) # load hindcast data + anom.hindcast <- drop(anom.hindcast) + anom.hindcast.chunk[,my.time.interv,,] <- anom.hindcast[,,lead,,] + + rm(anom.rean,anom.hindcast) + } + + + anom.hindcast.chunk.perm<-aperm(anom.hindcast.chunk,c(2,1,3,4)) # swap the first two dimensions to put the years as first dimension (needed for convert2prob) + gc() + + cat('Computing the Reliability Diagram. Please wait... ') + ens.chunk<-apply(anom.hindcast.chunk.perm, c(3,4), convert2prob, prob<-my.prob) # its dimens. are: [n.startdates*n.yrs.hind*n.forecast.categories, n.lat, n.lon] + obs.chunk<-apply(anom.rean.chunk,c(2,3), convert2prob, prob<-my.prob) # the first n.members*n.yrs.hind elements belong to the first tercile, and so on. + ens.chunk.prob<-apply(ens.chunk, c(2,3), function(x) count2prob(matrix(x, n.startdates*n.yrs.hind, n.categ), type=4)) # type=4 is the only method with sum(prob)=1 !! + obs.chunk.prob<-apply(obs.chunk, c(2,3), function(x) count2prob(matrix(x, n.startdates*n.yrs.hind, n.categ), type=4)) #no parallelization here: it only takes 10s + + if(is.null(int.lat)) int.lat <- 1:n.lat # if there is no region selected, select all the world + if(is.null(int.lon)) int.lon <- 1:n.lon + + int1 <- 1:(n.startdates*n.yrs.hind) # time interval of the data of the first tercile + my.RelDiagr[[lead]]<-ReliabilityDiagram(ens.chunk.prob[int1,int.lat,int.lon], obs.chunk.prob[int1,int.lat,int.lon], nboot=0, plot=FALSE, plot.refin=F) #tercile 1 + + int2 <- (1+(n.startdates*n.yrs.hind)):(2*n.startdates*n.yrs.hind) # time interval of the data of the second tercile + my.RelDiagr[[n.leadtimes+lead]]<-ReliabilityDiagram(ens.chunk.prob[int2,int.lat,int.lon], obs.chunk.prob[int2,int.lat,int.lon], nboot=0, plot=FALSE, plot.refin=F) + + int3 <- (1+(2*n.startdates*n.yrs.hind)):(3*n.startdates*n.yrs.hind) # time interval of the data of the third tercile + my.RelDiagr[[2*n.leadtimes+lead]]<-ReliabilityDiagram(ens.chunk.prob[int3,int.lat,int.lon], obs.chunk.prob[int3,int.lat,int.lon], nboot=0, plot=FALSE, plot.refin=F) + + cat('done\n') + #print(my.RelDiagr) # for debugging + + rm(anom.rean.chunk,anom.hindcast.chunk,ens.chunk,obs.chunk,ens.chunk.prob,obs.chunk.prob) + gc() + + } # next lead + + } # close if on RelDiagr + + # we can save the results for all chunks only if mare == FALSE (if it is TRUE, we have the results for 1 chunk only, that have already been saved + if(!boot){ + if(any(my.score.name=="FairRpss")) save(my.FairRpss, file=paste0(workdir,'/Data_',var.name,'/FairRpss_',startdate.name,'.RData')) + if(any(my.score.name=="FairCrpss")) save(my.FairCrpss, file=paste0(workdir,'/Data_',var.name,'/FairCrpss_',startdate.name,'.RData')) + if(any(my.score.name=="EnsCorr")) save(my.EnsCorr, file=paste0(workdir,'/Data_',var.name,'/EnsCorr_',startdate.name,'.RData')) + if(any(my.score.name=="RelDiagr")) save(my.RelDiagr, my.PValue, file=paste0(workdir,'/Data_',var.name,'/RelDiagr_',startdate.name,'.RData')) + } else { # bootstrapping output: + if(any(my.score.name=="FairRpss")) save(my.FairRpssBoot, my.FairRpssConf, file=paste0(workdir,'/Data_',var.name,'/FairRpssBoot_',startdate.name,'.RData')) + if(any(my.score.name=="FairCrpss")) save(my.FairCrpssBoot, my.FairCrpssConf, file=paste0(workdir,'/Data_',var.name,'/FairCrpssBoot_',startdate.name,'.RData')) + } + + # If it has finished computing the last chunk, it can load all results in one file, deleting the intermediate output files: + if(job.chunk == tot.chunks){ + + } + + + } # close if on mare + +} # next m (month) + +if + + + + + + + + +# all pre-formatting (conversion to anomalies) and post-formatting (visualization) tasks are done below: +if(mare == FALSE){ + +########################################################################################### +# Visualize and save the score maps for one score and period # +########################################################################################### + +# run it only after computing and saving all the skill score data: + +my.months=1 #9:12 # choose the month(s) to plot and save + +for(month in my.months){ + #month=2 # for the debug + startdate.name.map<-my.month[month] # i.e:"January" + + for(my.score.name.map in my.score.name){ + #my.score.name.map='EnsCorr' # for the debug + + if(my.score.name.map=="EnsCorr") { load(file=paste0(workdir,'/Data_',var.name,'/EnsCorr_',startdate.name.map,'.RData')); my.score<-my.EnsCorr } + if(my.score.name.map=='FairRpss') { load(file=paste0(workdir,'/Data_',var.name,'/FairRpss_',startdate.name.map,'.RData')); my.score<-my.FairRpss } + if(my.score.name.map=='FairCrpss') { load(file=paste0(workdir,'/Data_',var.name,'/FairCrpss_',startdate.name.map,'.RData')); my.score<-my.FairCrpss } + if(my.score.name.map=="RelDiagr") { load(file=paste0(workdir,'/Data_',var.name,'/RelDiagr_',startdate.name.map,'.RData')); my.score<-my.RelDiagr } + + # old green-red palette: + # brk.rpss<-c(-1,seq(0,1,by=0.05)) + # col.rpss<-c("darkred",colorRampPalette(c("red","white","darkgreen"))(length(brk.rpss)-2)) + # brk.rps<-c(seq(0,1,by=0.1),10) + # col.rps<-c(colorRampPalette(c("darkgreen","white","red"))(length(brk.rps)-2),"darkred") + # if(my.score.name.map==my.Rps | my.score==my.Rps.clim) {my.brk<-brk.rps} else {my.brk<-brk.rpss} + # if(my.score.name.map==my.Rps | my.score==my.Rps.clim) {my.col<-col.rps} else {my.col<-col.rpss} + + brk.rpss<-c(seq(-1,1,by=0.1)) + col.rpss<-colorRampPalette(col)(length(brk.rpss)-1) + + my.brk<-brk.rpss # at present all breaks and colors are the same, so there is no need to differenciate between indexes + my.col<-col.rpss + + for(lead in 1:n.leadtimes){ + + #my.title<-paste0(my.score.name.map,' of ',cfs.name,' + #10m Wind Speed for ',startdate.name.map,'. Forecast time ',leadtime.week[lead],'.') + + if(my.score.name.map!="RelDiagr"){ + + #postscript(file=paste0(workdir,'/Maps_',var.name,'/',my.score.name.map,'_',startdate.name.map,'_Forecast_time_',leadtime.week[lead],'_',var.name,'.ps'), + # paper="special",width=12,height=7,horizontal=F) + + jpeg(file=paste0(workdir,'/Maps_',var.name,'/',my.score.name.map,'_',startdate.name.map,'_Forecast_time_',leadtime.week[lead],'_',var.name,'.jpg'),width=1000,height=600) + + layout(matrix(c(1,2), 1, 2, byrow = TRUE),widths=c(11,1.2)) + par(oma=c(1,1,4,1)) + + # lat values must be in decreasing order from +90 to -90 degrees and long from 0 to 360 degrees: + PlotEquiMap(my.score[lead,,][n.lat:1,c((n.lonr+1):n.lon,1:n.lonr)], lons,lats ,sizetit=0.8, brks=my.brk, cols=my.col,axelab=F, filled.continents=FALSE, drawleg=F) + my.title<-paste0(my.score.name.map,' of ',cfs.name,'\n ', var.name.map,' for ',startdate.name.map,'. Forecast time ',leadtime.week[lead],'.') + title(my.title,line=0.5,outer=T) + + ColorBar(my.brk, cols=my.col, vert=T, cex=1.4) + + if(my.score.name.map=="EnsCorr"){ #add a small grey point if the corr.is significant: + + # arrange lat/ lon in my.PValue (a second variable in the EnsCorr file) to start from +90 degr. (lat) and from 0 degr (lon): + my.PValue.rev <- my.PValue + my.PValue.rev[lead,,] <- my.PValue[i,n.lat:1,c((n.lonr+1):n.lon,1:n.lonr)] + for (x in 1:n.lon) { + for (y in 1:n.lat) { + if (my.PValue.rev[lead,y, x] == TRUE) { + text(x = lo[x], y = la[y], ".", cex = .2) + } + } + } + } + dev.off() + + } # close if on !RelDiagr + + if(my.score.name.map=="RelDiagr") { + + jpeg(file=paste0(workdir,'/Maps_',var.name,'/RelDiagr_',startdate.name.map,'_Forecast_time_',leadtime.week[lead],'_',var.name,'.jpg'),width=600,height=600) + + my.title<-paste0('Reliability Diagram of ',cfs.name,'\n ',var.name.map,' for ',startdate.name.map,'. Forecast time ',leadtime.week[lead],'.') + + plot(c(0,1),c(0,1),type="l",xlab="Forecast Frequency",ylab="Observed Frequency", col='gray30', main=my.title) + lines(my.score[[lead]]$p.avgs[!is.na(my.score[[lead]]$p.avgs)],my.score[[lead]]$cond.prob[!is.na(my.score[[lead]]$cond.prob)],type="o", pch=16, col="blue") + #lines(my.score[[n.leadtimes+lead]]$p.avgs[!is.na(my.score[[n.leadtimes+lead]]$p.avgs)],my.score[[n.leadtimes+lead]]$cond.prob[!is.na(my.score[[n.leadtimes+lead]]$cond.prob)],type="o",pch=16,col="darkgreen") + lines(my.score[[2*n.leadtimes+lead]]$p.avgs[!is.na(my.score[[2*n.leadtimes+lead]]$p.avgs)], my.score[[2*n.leadtimes+lead]]$cond.prob[!is.na(my.score[[2*n.leadtimes+lead]]$cond.prob)],type="o", pch=16, col="red") + legend("bottomright", legend=c('Lower Tercile','Upper Tercile'), lty=c(1,1), lwd=c(2.5,2.5), col=c('blue','red')) + + dev.off() + } + + } # next lead + + } # next score + +} # next month + + +############################################################################################### +# Composite map of the four skill scores # +############################################################################################### + +# run it only when you have all the 4 skill scores for each month: + +my.months=9:12 # choose the month(s) to plot and save + +for(month in my.months){ + + startdate.name.map<-my.month[month] # i.e:"January" + + #postscript(file=paste0(workdir,'/Maps_',var.name,'/SkillScores_',startdate.name.map,'_',var.name,'.ps'), paper="special",width=12,height=7,horizontal=F) + + jpeg(file=paste0(workdir,'/Maps_',var.name,'/SkillScores_',startdate.name.map,'_',var.name,'.jpg'), width=4000, height=2400, quality=100) + + layout(matrix(1:16, 4, 4, byrow=FALSE), widths=c(1.8,1.8,1.8,1.2), heights=c(1,1,1,1)) + #layout.show(16) + + for(score in 1:4){ + for(lead in 1:n.leadtimes){ + + img<-readJPEG(paste0(workdir,'/Maps_',var.name,'/',my.score.name[score],'_',startdate.name.map,'_Forecast_time_',leadtime.week[lead],'_',var.name,'.jpg')) + par(mar = rep(0, 4), xaxs='i', yaxs='i' ) + plot.new() + rasterImage(img, 0, 0, 1, 1, interpolate=FALSE) + + } # next lead + + } # next score + + dev.off() + +} # next month + + + +# Dani Map script for the small figure: +#/home/Earth/vtorralb/R/scripts/Veronica_2014/TimeSeries/PlotAno_mod_obs.R + + + +############################################################################################### +# Preformatting: calculate weekly climatologies and anomalies for each forecast startdate # +############################################################################################### +# +# finish!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! +# +# the only difference is that anomalies are computed using climatologies from the hincast data, +# and not from the forecast data. + +# Run it only once at the beginning: + +file_path='/home/Earth/ngonzal2/R/ConfFiles/IC3.conf' # Load() file path +#file_path <- '/home/Earth/ncortesi/Downloads/RESILIENCE/IC3.conf' + +my.startdates=1:52 # choose a sequence of forecast startdates to save their anomalies + +for(startdate in my.startdates){ # the startdate week to load inside the sdates weekly sequence: + print(paste0('Computing anomalies for startdate ', which(startdate==my.startdates),'/',length(my.startdates))) + + data.hindcast <- Load(var=var.name, exp = 'EnsEcmwfWeekHind', + obs = NULL, sdates=paste0(yr1.hind:yr2.hind,substr(sdates.seq[startdate],5,6),substr(sdates.seq[startdate],7,8)), + nleadtime = n.leadtimes, leadtimemin = 1, leadtimemax = n.leadtimes, output = 'lonlat', configfile = file_path, method='distance-weighted' ) + + # dim(data.hindcast$mod) # [1,4,20,4,320,640] + + # Mean of the 4 members of the Hindcasts and of the 20 years (for each point and leadtime); + clim.hindcast<-apply(data.hindcast$mod,c(1,4,5,6),mean)[1,,,] # average for each leadtime and pixel + clim.hindcast<-InsertDim(InsertDim(InsertDim(clim.hindcast,1,n.yrs.hind),1,n.members),1,1) # repeat the same values and times to calc. anomalies + + anom.hindcast <- data.hindcast$mod - clim.hindcast + + rm(data.hindcast,clim.hindcast) + gc() + + # average the anomalies over the 4 hindcast members [1,20,4,320,640] + anom.hindcast.mean<-apply(anom.hindcast,c(1,3,4,5,6),mean) + + my.grid<-paste0('r',n.lon,'x',n.lat) + data.rean <- Load(var=var.name, exp = 'ERAintEnsWeek', + obs = NULL, sdates=paste0(yr1.hind:yr2.hind,substr(sdates.seq[startdate],5,6),substr(sdates.seq[startdate],7,8)), + nleadtime = n.leadtimes, leadtimemin = 1, leadtimemax = n.leadtimes, output = 'lonlat', configfile = file_path, grid=my.grid, method='distance-weighted') + # dim(data.rean$mod) # [1,1,20,4,320,640] + + # Mean of the 20 years (for each point and leadtime) + clim.rean<-apply(data.rean$mod,c(1,2,4,5,6),mean)[1,1,,,] # average for each leadtime and pixel + clim.rean<-InsertDim(InsertDim(InsertDim(clim.rean,1,n.yrs.hind),1,1),1,1) # repeat the same values times to be able to calculate the anomalies #[1,1,20,4,320,640] + + anom.rean <- data.rean$mod - clim.rean + + anom.rean<-InsertDim(anom.rean[1,1,,,,],1,1) # [1,20,4,320,640] + + #save(anom.hindcast.mean,anom.rean,file=paste0(workdir,'/Data/anom_for_ACC_startdate',startdate,'.RData')) + + save(anom.hindcast,file=paste0(workdir,'/Data/anom_hindcast_startdate',startdate,'.RData')) + save(anom.hindcast.mean,file=paste0(workdir,'/Data/anom_hindcast_mean_startdate',startdate,'.RData')) + save(anom.rean,file=paste0(workdir,'/Data/anom_rean_startdate',startdate,'.RData')) + save(clim.rean,file=paste0(workdir,'/Data/clim_rean_startdate',startdate,'.RData')) + + rm(data.rean,anom.hindcast,anom.hindcast.mean,clim.rean,anom.rean) + gc() + +} + + + + + + + +################################################################################## +# Toy Model # +################################################################################## + +library(s2dverification) +library(SpecsVerification) +library(easyVerification) + +my.prob=c(1/3,2/3) # quantiles used for FairRPSS and the RelDiagr (usually they are the terciles) + +N=80 # number of years +M=14 # number of members + +for(M in c(2:10,20,51,100,200)){ + +for(N in c(5,10,15,20,25,30,40,50,100,200,500,1000,2000,5000,10000,20000,50000,100000)){ + +anom.rean<-rnorm(N) +anom.hind<-array(rnorm(N*M),c(N,M)) +#quantile(anom.hind,prob=my.prob,type=8) + +obs<-convert2prob(anom.rean,prob<-my.prob) +#mean(obs[,1]) # check if it is equal to 0.333 for K=3 + +ens<-convert2prob(anom.hind,prob<-my.prob) + +# climatological forecast: +anom.hind.clim<-array(sample(anom.hind,N*M),c(N,M)) # extract a random sample with no replacement of the hindcast +ens.clim<-convert2prob(anom.hind.clim,prob<-my.prob) +#mean(ens.clim[,1]) # check if it is equal to M/3 (1.333 for M=4) + +# alternative definition of climatological forecast, based on observations instead (as applied by veriApply via convert2prob when option fcst.ref is missing): +#anom.rean.clim<-ClimEns(anom.rean) # create a climatological ensemble from a vector of observations (anom.rean) with the leave-one-out cross validation +anom.rean.clim<-t(array(anom.rean,c(N,N))) # function used by convert2prob when ref is not specified; it is ~ equal to the above function ClimEns, it only has a column more +ens.clim.rean<-convert2prob(anom.rean.clim,prob<-c(1/3,2/3)) # create a new prob.table based on the climatological ensemble of observations + +p<-count2prob(ens) # we should add the option type=4, in not the sum for each year is not 100%!!! +y<-count2prob(obs) # we should add the option type=4, in not the sum for each year is not 100%!!! +ReliabilityDiagram(p[,1], y[,1], bins=10, nboot=0, plot=TRUE, cons.prob=c(0.05, 0.85),plot.refin=F) + +### computation of verification scores: + +Rps<-round(mean(EnsRps(ens,obs)),4) +FairRps<-round(mean(FairRps(ens,obs)),4) +#Rps.easy<-round(mean(veriApply("EnsRps", fcst=anom.hind, obs=anom.rean, prob=my.prob, tdim=1, ensdim=2)),4) # check once to see if it is the same value of Rps +#FairRps.easy<-round(mean(veriApply("FairRps", fcst=anom.hind, obs=anom.rean, prob=my.prob, tdim=1, ensdim=2)),4) # check once to see if it is the same value of FairRps + +Rps.clim<-round(mean(EnsRps(ens.clim,obs)),4) +FairRps.clim<-round(mean(FairRps(ens.clim,obs)),4) +FairRps.clim.rean<-round(mean(FairRps(ens.clim.rean,obs)),4) + +#FairRps.clim.stable<-0.44444444 +#Rps.clim.easy<-round(mean(veriApply("EnsRps", fcst=anom.hind.clim, obs=anom.rean, prob=my.prob, tdim=1, ensdim=2)),4) # check once to see if it is the same value of Rps.clim +#FairRps.clim.easy<-round(mean(veriApply("FairRps", fcst=anom.hind.clim, obs=anom.rean, prob=my.prob, tdim=1, ensdim=2)),4) # check once to see if it is = to FairRps.clim + +Rpss<-round(1-(Rps/Rps.clim),4) +#Rpss.specs<-round(EnsRpss(ens=ens, ens.ref=ens.clim, obs=obs)$rpss,4) # check it once to see if it is the same as Rpss (yes) +Rpss.easy<-veriApply("EnsRpss", fcst=anom.hind, fcst.ref=anom.hind.clim, obs=anom.rean, prob=my.prob, tdim=1, ensdim=2)$rpss # we provide the clim.forecast; = to Rpss.specs +system.time(Rpss.easy.noclim<-round(veriApply("EnsRpss", fcst=anom.hind, obs=anom.rean, prob=my.prob, tdim=1, ensdim=2, parallel=FALSE, ncpus=5)$rpss,4)) # using their climatological forecast +#cat(Rps.clim.noclim<-Rps/(1-Rpss.easy.noclim)) # sale igual a ~0.45 per ogni hindcast; é diverso sia da Rps.clim che da Rps.clim per N-> +oo (0.555) + +Rpss.easy<-veriApply("EnsRpss", fcst=anom.hind, fcst.ref=anom.rean.clim, obs=anom.rean, prob=my.prob, tdim=1, ensdim=2)$rpss # we provide the clim.forecast; = to Rpss.specs +Rpss.easy.noclim<-veriApply("EnsRpss", fcst=anom.hind, obs=anom.rean, prob=my.prob, tdim=1, ensdim=2)$rpss # we provide the clim.forecast; = to Rpss.specs + +FairRpss<-round(1-(FairRps/FairRps.clim),4) +#FairRpss.specs<-round(FairRpss(ens=ens,ens.ref=ens.clim,obs=obs)$rpss,4) # check it once to see if it is the same as FairRpss (yes) +#FairRpss.easy<-veriApply("FairRpss", fcst=anom.hind, fcst.ref=anom.hind.clim, obs=anom.rean, prob=c(1/3,2/3), tdim=1, ensdim=2)$rps +#FairRpss.easy.noclim<-round(veriApply("FairRpss", fcst=anom.hind, obs=anom.rean, prob=c(1/3,2/3), tdim=1, ensdim=2)$rpss,4) # using their climatological forecast +#cat(FairRps.clim.noclim<-Rps/(1-FairRpss.easy.noclim)) # sale igual a ~0.55; é diverso sia da FairRps.clim che da FairRps.clim per N-> +oo (0.444) + +cat(paste0('n.memb: ' ,M,' n.yrs: ',N,' : ',FairRps,' : ' ,FairRps.clim,' FairRpss: ',FairRpss,'\n')) + +#cat(paste0('n.memb: ' ,M,' n.yrs: ',N,' : ',FairRps,' : ' ,FairRps.clim,' FairRpss: ',FairRpss,'\n')) + +#cat(paste0('n.memb: ' ,M,' n.yrs: ',N,' : ',Rps,' : ' ,Rps.clim,' Rpss: ',Rpss,'\n','n.memb: ' ,M,' n.yrs: ',N,' : ',FairRps,' : ' ,FairRps.clim,' FairRpss: ',FairRpss,'\n')) + +#cat(paste0('n.memb: ' ,M,' n.yrs: ',N,' : ',Rps,' : ' ,Rps.clim,' Rpss: ',Rpss,' Rpss.easy: ',Rpss.easy.noclim,'\n','n.memb: ' ,M,' n.yrs: ',N,' #: ',FairRps,' : ' ,FairRps.clim,' FairRpss: ',FairRpss,' FairRpss.easy: ',FairRpss.easy.noclim)) + +} + +# Crpss: + +#Crps<-round(mean(EnsCrps(ens,obs)),4) +#FairCrps<-round(mean(FairCrps(ens,obs)),4) + +#cat(Crps.clim<-round(mean(EnsCrps(ens.clim,obs)),4)) +#FairCrps.clim<-round(mean(FairCrps(ens.clim,obs)),4) + +#Crpss<-round(1-(Crps/Crps.clim),4) +#Crpss.specs<-round(EnsCrpss(ens=ens, ens.ref=ens.clim, obs=obs)$rpss,4) + +#FairCrpss<-round(1-(FairCrps/FairCrps.clim),4) +#FairCrpss.specs<-round(FairCrpss(ens=ens,ens.ref=ens.clim,obs=obs)$rpss,4) + +} + + +################################################################################## +# Other analysis # +################################################################################## + +my.startdates=1 #c(1:9) # select the startdates (weeks) you want to merge together + +#tm<-toyarray(dims=c(32,64),N=20,nens=4) # in the number of startdates in case of forecasts or the number of years in case of hindcasts +#str(tm) # tm$fcst: [32,64,20,4] tm$obs: [32,64,20] +# +#f.corr <- veriApply("EnsCorr", fcst=tm$fcst, obs=tm$obs) +#f.me <- veriApply('EnsMe', tm$fcst, tm$obs) +#f.crps <- veriApply("FairCrps", fcst=tm$fcst, obs=tm$obs) +#str(f.crps) # [32,64,20] + +#cst <- array(rnorm(prod(4:8)), 4:8) +#obs <- array(rnorm(prod(4:7)), 4:7) +#f.me <- veriApply('EnsMe', fcst=fcst, obs=obs) +#dim(f.me) +#fcst2 <- array(aperm(fcst, c(5,1,4,2,3)), c(8, 4, 7, 5*6)) # very interesting use of aperm not only to swap dimensions, but also to merge two dimensions in the same one! +#obs2 <- array(aperm(obs, c(1,4,2,3)), c(4,7,5*6)) +#f.me2 <- veriApply('EnsMe', fcst=fcst2, obs=obs2, tdim=3, ensdim=1) +#dim(f.me2) + +# when you have for each leadtime all the 52 weeks of year 2014 stored in a map, +# you can plot the time serie for a particular point and leadtime: +pos.lat<-which(round(la,3)==round(my.lat,3)) +pos.lon<-which(round(lo,3)==round(my.lon,3)) + +x11() +plot(1:week,EnMeanCorr[,leadtime],type="b", + main=paste0("Weekly time serie of point with lat=",round(my.lat,2),", lon=",round(my.lon,2)), + xlab="week",ylab="Ensemble Mean Corr.",ylim=c(-1,1)) + + + +### check if 4 time steps are better than one: + +yrs.hind<-yr2.hind-yr1.hind+1 +my.point.obs<-array(NA,c(yrs.hind,28)) + +# load obs.data for the four time steps: +for(year in yr1.hind:yr2.hind){ + data_erai_6h <- Load(var=var.name, exp = 'ERAint6h', + obs = NULL, sdates=paste0(year,'01'), leadtimemin = 61, + leadtimemax = 88, output = 'lonlat', configfile = file_path, grid=my.grid,method='distance-weighted') + + my.point.obs[year-yr1.hind+1,]<-data_erai_6h$mod[1,1,1,,65,633] +} + +utm0<-c(1,5,9,13,17,21,25) +utm6<-utm0+1 +utm12<-utm0+2 +utm18<-utm0+3 +my.points.obs.weekly.mean.utm0<-rowMeans(my.point.obs[,utm0]) +my.points.obs.weekly.mean.utm6<-rowMeans(my.point.obs[,utm6]) +my.points.obs.weekly.mean.utm12<-rowMeans(my.point.obs[,utm12]) +my.points.obs.weekly.mean.utm18<-rowMeans(my.point.obs[,utm18]) +my.points.obs.weekly.mean.all<-rowMeans(my.point.obs) + +cor(my.points.exp.weekly.mean.all,my.points.obs.weekly.mean.all,use="complete.obs") + +my.points.obs.weekly.mean.utm<-c(my.points.obs.weekly.mean.utm0,my.points.obs.weekly.mean.utm6,my.points.obs.weekly.mean.utm12,my.points.obs.weekly.mean.utm18) +my.points.exp.weekly.mean.utm<-c(my.points.exp.weekly.mean.utm0,my.points.exp.weekly.mean.utm6,my.points.exp.weekly.mean.utm12,my.points.exp.weekly.mean.utm18) +cor(my.points.exp.weekly.mean.utm,my.points.obs.weekly.mean.utm,use="complete.obs") + + +##### Calculate Reanalysis climatology loading only 1 file each 4 (it's quick but it needs to have all files beforehand): + +ss<-c(seq(1,52,by=4),50,51,52) # take only 1 startdate each 4, more the last 3 startdates that must be computed individually because they are not a complete set of 4 + +for(startdate in ss){ # the startdate day of 2014 to load in the sdates weekly sequence along with the same startdates of the previous 20 years + data.ERAI <- Load(var=var.name, exp = 'ERAintEnsWeek', + obs = NULL, sdates=paste0(yr1.hind:yr2.hind,substr(sdates.seq[startdate],5,6),substr(sdates.seq[startdate],7,8)), + nleadtime = 4, leadtimemin = 1, leadtimemax = 4, output = 'lonlat', configfile = file_path, grid=my.grid, method='distance-weighted') + + clim.ERAI[startdate,,,]<-apply(data.ERAI$mod,c(1,2,4,5,6),mean)[1,1,,,] # average for each leadtime and pixel + + # the intermediate startdate between startdate week i and startdate week i+4 are repeated: + if(startdate <= 49){ + clim.ERAI[startdate+1,1,,]<- clim.ERAI[startdate,2,,] # startdate leadtime (week) + clim.ERAI[startdate+1,2,,]<- clim.ERAI[startdate,3,,] # (week) 1 2 3 4 + clim.ERAI[startdate+2,1,,]<- clim.ERAI[startdate,3,,] # 1 a b c d <- only startdate 1 and 5 are necessary to calculate all startdates between 1 and 5 + clim.ERAI[startdate+1,3,,]<- clim.ERAI[startdate,4,,] # 2 b c d e + clim.ERAI[startdate+2,2,,]<- clim.ERAI[startdate,4,,] # 3 c d e f + clim.ERAI[startdate+3,1,,]<- clim.ERAI[startdate,4,,] # 4 d e f g + } # 5 e f g h + + if(startdate > 1 && startdate <=49){ # fill the previous startdates: + clim.ERAI[startdate-1,4,,]<- clim.ERAI[startdate,3,,] + clim.ERAI[startdate-1,3,,]<- clim.ERAI[startdate,2,,] + clim.ERAI[startdate-2,4,,]<- clim.ERAI[startdate,2,,] + clim.ERAI[startdate-1,2,,]<- clim.ERAI[startdate,1,,] + clim.ERAI[startdate-2,3,,]<- clim.ERAI[startdate,1,,] + clim.ERAI[startdate-3,4,,]<- clim.ERAI[startdate,1,,] + } + +} + + + + + # You can make cor much faster by stripping away all the error checking code and calling the internal c function directly: + # r <- apply(m1, 1, function(x) { apply(m2, 1, function(y) { cor(x,y) })}) + # r2 <- apply(m1, 1, function(x) { apply(m2, 1, function(y) {.Internal(cor(x, y, 4L, FALSE)) })}) + C_cor<-get("C_cor", asNamespace("stats")) + test<-.Call(C_cor, x=rnorm(100,1), y=rnorm(100,1), na.method=2, FALSE) + my.EnsCorr.chunk[i,j,k]<-.Call(C_cor, x=anom.hindcast.mean.chunk[,i,j,k], y=nom.rean.chunk[,i,j,k], na.method=2, FALSE) + + + +####################################################################################### +# Raster Animations # +####################################################################################### + +library(png) +library(animation) + + +clustPNG <- function(pngobj, nclust = 3){ + + j <- pngobj + # If there is an alpha component remove it... + # might be better to just adjust the mat matrix + if(dim(j)[3] > 3){ + j <- j[,,1:3] + } + k <- apply(j, c(1,2), c) + mat <- matrix(unlist(k), ncol = 3, byrow = TRUE) + grd <- expand.grid(1:dim(j)[1], 1:dim(j)[2]) + clust <- kmeans(mat, nclust, iter.max = 20) + new <- clust$centers[clust$cluster,] + + for(i in 1:3){ + tmp <- matrix(new[,i], nrow = dim(j)[1]) + j[,,i] <- tmp + } + + plot.new() + rasterImage(j, 0, 0, 1, 1, interpolate = FALSE) +} + +makeAni <- function(file, n = 10){ + j <- readPNG(file) + for(i in 1:n){ + clustPNG(j, i) + mtext(paste("Number of clusters:", i)) + } + + j <- readPNG(file) + plot.new() + rasterImage(j, 0, 0, 1, 1, interpolate = FALSE) + mtext("True Picture") +} + +file <- "~/Dropbox/Photos/ClassyDogCropPng.png" +n <- 20 +saveGIF(makeAni(file, n), movie.name = "tmp.gif", interval = c(rep(1,n), 5)) + + + + +####################################################################################### +# ProgressBar # +####################################################################################### + +for(i in 1:10) { + Sys.sleep(0.2) + # Dirk says using cat() like this is naughty ;-) + #cat(i,"\r") + # So you can use message() like this, thanks to Sharpie's + # comment to use appendLF=FALSE. + message(i,"\r",appendLF=FALSE) + flush.console() +} + + +for(i in 1:10) { + Sys.sleep(0.2) + # Dirk says using cat() like this is naughty ;-) + cat(i,"\r") + + +} + + + +####################################################################################### +# Load large datasets with ff # +####################################################################################### + +library(ff) +my.scratch<-"/scratch/Earth/ncortesi/myBigData" +anom.hind<-as.ff(anom.hind, vmode="double", file=my.scratch) +ffsave(anom.hind,file=my.scratch) +close(anom.hind);rm(anom.hind) + +ffload(file=my.scratch, list=c(anom.hind)) +open(anom.hind) +my.SkillScore <- veriApplyBig("FairRpss", fcst=anom.hind, obs=anom.rean, tdim=2, ensdim=1, prob=c(1/3,1/3)) +close(anom.hind);rm(anom.hind) + + + +BigDataPath <- "/scratch/Earth/ncortesi/myBigData" +source('/home/Earth/ncortesi/Downloads/RESILIENCE/veriApplyBig.R') + +anom.hind.dim<-c(5,3,1,256,512) +anom.hind<-array(rnorm(prod(anom.hind.dim)),anom.hind.dim) # doesn't fit in memory +save.big(array=anom.hind, path=BigDataPath) + +veriApplyBig <- function(, obsmy.scratch){ + ffload(file=my.scratch) + open(anom.hind) + my.SkillScore <- veriApplyBig("FairRpss", fcst=anom.hind, obs=anom.rean, tdim=2, ensdim=1, prob=c(1/3,1/3)) + close(anom.hind) +} + + +anom.hind<-as.ff(anom.hind, vmode="double", file=my.scratch) +ffsave(anom.hind, file=my.scratch) + +my.scratch<-"/scratch/Earth/ncortesi/myBigData" + +anom.hind<-as.ff(anom.hind, vmode="double", file=my.scratch) +ffsave(anom.hind, file=my.scratch) +close(anom.hind); rm(anom.hind) + +ffload(file=my.scratch, list=c(anom.hind)) +open(anom.hind) +my.SkillScore <- veriApplyBig("FairRpss", fcst=anom.hind, obs=anom.rean, tdim=2, ensdim=1, prob=c(1/3,1/3)) +close(anom.hind); rm(anom.hind) + +anom.hind.dim<-c(51,30,1,256,51) +anom.hind<-ff(rnorm(prod(anom.hind.dim)), dim=anom.hind.dim, vmode="double", filename="/scratch/Earth/ncortesi/anomhind") +str(anom.hind) +dim(anom.hind) + +anom.hind.dim<-c(51,30,1,256,51) +anom.hind<-array(rnorm(prod(anom.hind.dim)),anom.hind.dim) # doesn't fit in memory +anom.hind<-as.ff(anom.hind, vmode="double", file="/scratch/Earth/ncortesi/myBigData") +str(anom.hind) +dim(anom.hind) + +ffsave(anom.hind,file="/scratch/Earth/ncortesi/anomhind") +#ffinfo(file="/scratch/Earth/ncortesi/anomhind") +close(anom.hind) +#delete(anom.hind) +#rm(anom.hind) + +ffload(file="/scratch/Earth/ncortesi/anomhind") + +anom.rean<-array(rnorm(prod(anom.hind.dim[-1])), anom.hind.dim[-1]) # doesn't fit in memory + +open(anom.hind) +my.SkillScore <- veriApplyBig("FairRpss", fcst=anom.hind, obs=anom.rean, tdim=2, ensdim=1, prob=c(1/3,1/3)) +fcst<-anom.hind +obs<-anom.rean + +close(anom.hind) +rm(anom.hind) + +#ffdrop(file="/scratch/Earth/ncortesi/anomhind") + +a<-ffapply(X=anom.hind.big, MARGIN=c(1,3,4,5), AFUN=mean, RETURN=TRUE) +a<-ffapply(X=anom.hind.big, MARGIN=c(1,3,4,5), AFUN=function(x) sample(x, replace=TRUE), CFUN="list", RETURN=TRUE) + + +pred_Cal_cross_ff <- as.ff(pred_Cal_cross, vmode='double', file=fileoutput_cross) +ffsave(pred_Cal_cross_ff, file=paste0(fileoutput_cross)) +delete(pred_Cal_cross_ff) + +assign("pred_Cal_cross_ff", as.ff(pred_Cal_cross, vmode='double', file=fileoutput_cross)) +ffsave(pred_Cal_cross_ff, file=paste0(fileoutput_cross)) +delete(pred_Cal_cross_ff) +rm(pred_Cal_cross_ff) + +assign("pred_Cal_cross_ff", as.ff(pred_Cal_cross, vmode='double', file=fileoutput_cross)) +a<-get("pred_Cal_cross_ff") # get() works well in this case but not in the row below! (you must use do.call, see WT_drivers_v2.R) +ffsave(get("pred_Cal_cross_ff"), file=paste0(fileoutput_cross)) +delete(pred_Cal_cross_ff) +rm(pred_Cal_cross_ff) + + + +######################################################################################### +# Calculate and save the FairRpss and/or the FairCrpss for a chosen period using ff # +######################################################################################### + +bigdata=FALSE # if TRUE, compute the Rpss and the Crpss using the package ff (storing arrays in the disk instead than in the RAM) to remove the memory limit + +for(month in veri.month){ + month=1 # for debug + my.startdates <- which(as.integer(substr(sdates.seq,5,6)) == month) #startdates.monthly[[month]] #c(1:5) # select the startdates (weeks) you want to compute + startdate.name=my.month[month] # name given to the period of the selected startdates, i.e:"January" + n.yrs <- length(my.startdates)*n.yrs.hind # number of total years for the selected month + print(paste0("Computing Skill Scores for ",startdate.name)) + + if(!boot) anom.rean.big<-array(NA, dim=c(n.yrs, n.leadtimes, n.lat, n.lon)) # reanalysis data is not converted to ff because it is a small array + + if(boot) mod<-1 else mod=0 # in case of the boostrap, anom.hind.big includes also the reanalysis data as additional last member + + if(bigdata) anom.hind.big <- ff(NA, dim=c(n.members + mod, n.yrs, n.leadtimes, n.lat, n.lon), vmode="double", filename=paste0(workdir,"/anomhindbig.ff")) + if(!bigdata) anom.hind.big <- array(NA, dim=c(n.members + mod, n.yrs, n.leadtimes, n.lat, n.lon)) + + for(startdate in my.startdates){ + + pos.startdate<-which(startdate==my.startdates) # count of the number of stardates already loaded +1 + my.time.interv<-(1+(pos.startdate-1)*n.yrs.hind):(pos.startdate*n.yrs.hind) # time interval where to load data + + load(paste0(workdir,'/Data_',var.name,'/anom_rean_startdate',startdate,'.RData')) + if(!boot) { + anom.rean<-drop(anom.rean) + anom.rean.big[my.time.interv,,,] <- anom.rean + } + + if(any(my.score.name=="FairRpss") || any(my.score.name=="FairCrpss")){ + load(paste0(workdir,'/Data_',var.name,'/anom_hindcast_startdate',startdate,'.RData')) # load hindcast data + anom.hindcast<-drop(anom.hindcast) + + if(!boot) anom.hind.big[,my.time.interv,,,] <- anom.hindcast + if(boot) anom.hind.big[,my.time.interv,,,] <- abind(anom.hindcast, anom.rean, along=1) + + rm(anom.hindcast, anom.rean) + } + gc() + } + + if(any(my.score.name=="FairRpss" || my.score.name=="FairCrpss")){ + if(!boot) { + if(any(my.score.name=="FairRpss")) my.FairRpss <- veriApplyBig("FairRpss", fcst=anom.hind.big, obs=anom.rean.big, + prob=my.prob, tdim=2, ensdim=1, parallel=TRUE, ncpus=n.cpus) + if(any(my.score.name=="FairCrpss")) my.FairCrpss <- veriApplyBig("FairCrpss", fcst=anom.hind.big, obs=anom.rean.big, tdim=2, ensdim=1, parallel=TRUE, ncpus=n.cpus) + + } else { + # bootstrapping: + my.FairRpssBoot<-my.FairCrpssBoot<-array(NA, c(n.boot, n.leadtimes, n.lat, n.lon)) + if(!bigdata) save(anom.hind.big, file=paste0(workdir,"/anom_hind_big.RData")) # save the anomalies to free memory + + for(b in 1:n.boot){ + print(paste0("resampling n. ",b,"/",n.boot)) + yrs.sampled <- sample(1:n.yrs, replace=TRUE) + + if(bigdata) anom.hind.big.sampled <- ff(NA, dim=c(n.members+1, n.yrs, n.leadtimes, n.lat, n.lon), vmode="double", filename=paste0(workdir,"/anomhindbigsampled")) + if(!bigdata) anom.hind.big.sampled <- array(NA, dim=c(n.members+1, n.yrs, n.leadtimes, n.lat, n.lon)) + + if(!bigdata && b>1) load(paste0(workdir,"/anom_hind_big.RData")) # need to load the hindcasts if b>1 + + for(y in 1:n.yrs) anom.hind.big.sampled[,y,,,] <- anom.hind.big[,yrs.sampled[y],,,] # with ff takes 53s x 100 ~1h!!! (without, only 4s x 100 ~6m) + + if(!bigdata) rm(anom.hind.big); gc() # free memory for the following commands + + # faster way that will be able to replace the above one when ffapply output parameters wll be improved: + #a<-ffapply(X=anom.hind.big, MARGIN=c(3,4,5), AFUN=function(x) my.sample(x,n.members+1, replace=TRUE), CFUN="list", RETURN=TRUE, FF_RETURN=TRUE) # takes 3m only + #aa<-abind(a[1:10], along=4) # it doesn't fit into memory! + + if(any(my.score.name=="FairRpss")) my.FairRpssBoot[b,,,] <- veriApplyBig("FairRpss", fcst=anom.hind.big.sampled[1:n.members,,,,], + obs=anom.hind.big.sampled[n.members+1,,,,], + prob=my.prob, tdim=2, ensdim=1, parallel=TRUE, ncpus=n.cpus) #$rpss + + if(any(my.score.name=="FairCrpss")) my.FairCrpssBoot[b,,,] <- veriApplyBig("FairCrpss", fcst=anom.hind.big.sampled[1:n.members,,,,], + obs=anom.hind.big.sampled[n.members+1,,,,], + tdim=2, ensdim=1, parallel=TRUE, ncpus=n.cpus) #$crpss + + if(bigdata) delete(anom.hind.big.sampled) + rm(anom.hind.big.sampled) # delete the sampled hindcast + + } + + # calculate 5 and 95 percentiles of skill scores: + if(any(my.score.name=="FairRpss")) my.FairRpssConf <- apply(my.FairRpssBoot, c(2,3,4), function(x) quantile(x, probs=c(0.05,0.95), type=8)) + if(any(my.score.name=="FairCrpss")) my.FairCrpssConf <- quantile(my.FairCrpssBoot, probs=c(0.05,0.95), type=8) + + } # close if on boot + + } + + if(bigdata) delete(anom.hind.big) # delete the file with the hindcasts + rm(anom.hind.big) + rm(anom.rean.big) + gc() + + if(!boot){ + if(any(my.score.name=="FairRpss")) save(my.FairRpss, file=paste0(workdir,'/Data_',var.name,'/FairRpss_',startdate.name,'.RData')) + if(any(my.score.name=="FairCrpss")) save(my.FairCrpss, file=paste0(workdir,'/Data_',var.name,'/FairCrpss_',startdate.name,'.RData')) + } else { + if(any(my.score.name=="FairRpss")) save(my.FairRpssBoot, my.FairRpssConf, file=paste0(workdir,'/Data_',var.name,'/FairRpssBoot_',startdate.name,'.RData')) + if(any(my.score.name=="FairCrpss")) save(my.FairCrpssBoot, my.FairCrpssConf, file=paste0(workdir,'/Data_',var.name,'/FairCrpssBoot_',startdate.name,'.RData')) + } + + + if(is.object(my.FairRpss)) rm(my.FairRpss); if(is.object(my.FairCrpss)) rm(my.FairCrpss) + if(is.object(my.FairRpssBoot)) rm(my.FairRpssBoot); if(is.object(my.FairCrpssBoot)) rm(my.FairCrpssBoot) + if(is.object(my.FairRpssConf)) rm(my.FairRpssConf); if(is.object(my.FairCrpssConf)) rm(my.FairCrpssConf) + +} # next m (month) + + +######################################################################################### +# Calculate and save the FairRpss for a chosen period using ff and Load() # +######################################################################################### + + anom.rean.big<-array(NA, dim=c(length(my.startdates)*n.yrs.hind, n.leadtimes, n.lat, n.lon)) + anom.hind.big <- ff(NA, dim=c(n.members,length(my.startdates)*n.yrs.hind, n.leadtimes, n.lat, n.lon), vmode="double", filename="/scratch/Earth/anomhindbig") + + for(startdate in my.startdates){ + pos.startdate<-which(startdate==my.startdates) # count of the number of stardates already loaded+1 + my.time.interv<-(1+(pos.startdate-1)*n.yrs.hind):(pos.startdate*n.yrs.hind) # time interval where to load data + + #load(paste0(workdir,'/Data_',var.name,'/anom_rean_startdate',startdate,'.RData')) + #anom.rean<-drop(anom.rean) + #anom.rean.big[my.time.interv,,,] <- anom.rean + + my.date <- paste0(yr1.hind:yr2.hind,substr(sdates.seq[startdate],5,6),substr(sdates.seq[startdate],7,8)) + anom.rean <- Load(var=var.name, exp = 'EnsEcmwfWeekHind', + obs = NULL, sdates=my.date, nleadtime = n.leadtimes, leadtimemin = 1, leadtimemax = n.leadtimes, + output = 'lonlat', configfile = file_path, method='distance-weighted' ) + + #load(paste0(workdir,'/Data_',var.name,'/anom_hindcast_startdate',startdate,'.RData')) # load hindcast data + #anom.hindcast<-drop(anom.hindcast) + + anom.hind.big[,my.time.interv,,,] <- anom.hindcast + rm(anom.hindcast) + } + + my.FairRpss <- veriApplyBig("FairRpss", fcst=anom.hind.big, obs=anom.rean.big, prob=my.prob, tdim=2, ensdim=1, parallel=TRUE, ncpus=n.cpus) + + save(my.FairRpss, file=paste0(workdir,'/Data_',var.name,'/FairRpss_',startdate.name,'.RData')) + + +######################################################################################### +# Alternative way to speed up Load() # +######################################################################################### + +ERAint <- list(path = '/esnas/reconstructions/ecmwf/eraint/$STORE_FREQ$_mean/$VAR_NAME$_f6h/$VAR_NAME$_$YEAR$$MONTH$.nc') + var.rean=ERAint # daily reanalysis dataset used for the var data; it can have a different resolution of the MSLP dataset + var.name='sfcWind' #'tas' #'prlr' # any daily variable we want to find if it is WTs-driven + var <- Load(var = var.name, exp = NULL, obs = list(var.rean), paste0(y,'0101'), storefreq = 'daily', leadtimemax = 1, output = 'lonlat') + + # Forecast system list: + S4 <- list(path = ...) + CFSv2 <- list(path = ...) + + # Reanalysis list: + ERAint <- list(path = '/esnas/reconstructions/ecmwf/eraint/$STORE_FREQ$_mean/$VAR_NAME$_f6h/$VAR_NAME$_$YEAR$$MONTH$.nc') + JRA55 <- list(path = ...) + + + + + # Select one forecast system and one reanalysis (put NULL if you don't need to load any experiment/observation): + exp.source <- NULL + obs.source <- list(path = '/esnas/reconstructions/ecmwf/eraint/$STORE_FREQ$_mean/$VAR_NAME$_f6h/$VAR_NAME$_$YEAR$$MONTH$.nc') + + # Select one variable and its frequency: + var.name <- 'sfcWind' + store.freq <- 'daily' + + # Extract only the directory path of the forecast and reanalysis: + obs.source2 <- gsub("\\$STORE_FREQ\\$", store.freq, obs.source$path) + obs.source3 <- gsub("\\$VAR_NAME\\$", var.name, obs.source2) + obs.source4 <- strsplit(obs.source3,'/') + obs.source5 <- paste0(obs.source4[[1]][-length(obs.source4[[1]])], collapse="/") + + obs.source6 <- list.files(path = obs.source5) + + system(paste0("ncks -O -h -d longitude,5,6 ", '/esnas/reconstructions/ecmwf/eraint/',STORE_FREQ,'_mean/',VAR_NAME,'_f6h')) + system("ncks -O -h -d longitude,5,6 sfcWind_201405.nc ~/test.nc") + + y <- 1990 + var <- Load(var = VAR_NAME, exp = list(exp.source), obs = list(obs.source), sdates = paste0(y,'0101'), storefreq = store.freq, leadtimemax = 1, output = 'lonlat') + + +} # close if on mare + + diff --git a/old/SkillScores_v9.R b/old/SkillScores_v9.R new file mode 100644 index 0000000000000000000000000000000000000000..9e7d3ea91f8a45aeb4ba4f029bb53ed58adb1749 --- /dev/null +++ b/old/SkillScores_v9.R @@ -0,0 +1,1251 @@ +######################################################################################### +# Sub-seasonal Skill Scores # +######################################################################################### +# you can run it from the terminal with the syntax: +# +# Rscript SkillScores_v7.R +# +# i.e: to run only the chunk number 3, write: +# +# Rscript SkillScores_v4.R 3 +# +# if you don't specify any argument, or you run it from the R interface, it runs all the chunks in a sequential way. +# Use this last approach only if the computational speed is not a problem. + +rm(list=ls()) + +########################################################################################## +# Load functions and libraries # +########################################################################################## + +#load libraries and functions: +library(SpecsVerification) # for skill scores +library(s2dverification) # for Load() function +library(easyVerification) # for veriApply() function +library(abind) # for merging arrays +source('/home/Earth/ncortesi/scripts/Rfunctions.R') + +########################################################################################## +# MareNostrum settings # +########################################################################################## + +args <- commandArgs(TRUE) # put into variable args the list with all the optional arguments with whom the script was run from the terminal + +if(length(args) == 0) print("Running the script in a sequential way") +if(length(args) > 1) stop("Only one argument is required") + +mare <- ifelse(length(args) == 1 ,TRUE, FALSE) # set variable 'mare' to TRUE if we are running the script in MareNostrum, FALSE otherwise + +if(mare) my.month <- as.integer(args[1]) # number of the month to run in this script (if mare == TRUE) + +########################################################################################## +# User's settings # +########################################################################################## +#if(!mare) {source('/home/Earth/ncortesi/Downloads/scripts/Rfunctions.R')} else {source('/gpfs/projects/bsc32/bsc32842/scripts/Rfunctions.R')} + +# working dir where to put the output maps and files in the /Data and /Maps subdirs: +work.dir <- ifelse(!mare, "/scratch/Earth/ncortesi/RESILIENCE/Subestacional", "/gpfs/projects/bsc32/bsc32842/results") + +# path of the weekly var rean data: +rean.dir <- '/esarchive/old-files/recon_ecmwf_erainterim/weekly_mean/$VAR_NAME$_f6h/$VAR_NAME$_$START_DATE$.nc' +rean.name <- "ERA-Interim" + +# path of the monthly forecast system files: +forecast.year <- 2014 # starting year of the weekly sequence of the forecasts +forecast.dir <- paste0('/esnas/exp/ECMWF/monthly/ensforhc/weekly_mean/$VAR_NAME$_f6h/',forecast.year,'$MONTH$$DAY$00/$VAR_NAME$_$START_DATE$00.nc') + +cfs.name <- 'ECMWF' # name of the climate forecast system (CFS) used for monthly predictions (to be used in map titles) +var.name <- 'sfcWind' # forecast variable. It is the name used inside the netCDF files and as suffix in map filenames, i.e: 'sfcWind' or 'psl' +var.name.map <- '10m Wind Speed' # forecast variable to verify (name used in the map titles), i.e: '10m Wind Speed' or 'SLP' + +mes <- 1 # starting forecast month (1: january, 2: february, etc.) +day <- 2 # starting forecast day + +yr1.hind <- 1994 #1994 # first hindcast year +yr2.hind <- 2013 #2013 # last hindcast year (usually the forecast year -1) + +leadtime.week <- c('5-11','12-18','19-25','26-32') # which leadtimes are available in the weekly dataset +n.members <- 4 # number of hindcast members + +my.score.name <- c('FairRpss','FairCrpss','EnsCorr','RelDiagr') # choose one or more skills scores to compute between EnsCorr, FairRPSS, FairCRPSS and/or RelDiagr + +veri.month <- 1:12 # select the month(s) you want to compute the chosen skill scores + +my.pvalue <- 0.05 # in case of computing the EnsCorr, set the p_value level to show in the ensemble mean correlation maps +my.prob <- c(1/3,2/3) # quantiles used for FairRPSS and the RelDiagr (usually they are the terciles) +conf.level <- c(0.05, 0.95)# percentiles employed for the calculation of the confidence level for the Rpss and Crpss (can be more than 2 if needed) +int.lat <- NULL #1:160 # latitude position of grid points selected for the Reliability Diagram (if you want to subset a region; if not, put NULL) +int.lon <- NULL #1:320 # longitude position of grid points selected for the Reliability Diagram (if you want to subset a region; if not, put NULL) + +pref.rean <- FALSE # If TRUE, instead of measuring the skill scores, it computes the anomalies and climatologies both for reanalisys and forecast. + # To do it once the first time you evaluate a new forecast system or you use a new reanalysis, and then set it to FALSE to measure the skill +pref.forecasts <- FALSE + +boot <- TRUE # in case of computing the FairRpss and/or the FairCrpss, you can choose to do a bootstrap to evaluate the uncertainty of the skill scores +n.boot <- 20 # number of resamples considered in the bootstrapping + +# coordinates of a chosen point in the world to plot the time series over the 52 maps (they must be the same values in objects la y lo) +my.lat <- 55.3197120 +my.lon <- 0.5625 + +###################################### Derived variables ############################################################################# + +#source(paste0(workdir,'/ColorBarV.R')) # Color scale used for maps +n.categ <- 1+length(my.prob) # number of forecasting categories (usually 3 if terciles are used) + +# blue-yellow-red colors: +#col <- ifelse(!mare, as.character(read.csv("/scratch/Earth/ncortesi/RESILIENCE/rgbhex.csv",header=F)[,1]), as.character(read.csv("/gpfs/projects/bsc32//bsc32842/scripts/rgbhex.csv",header=F)[,1])) +#col<-as.character(read.csv("/home/Earth/vtorralb/rgbhex.csv",header=F)[,1]) # blue-yellow-red colors +col <- c("#0C046E", "#1914BE", "#2341F7", "#2F55FB", "#3E64FF", "#528CFF", "#64AAFF", "#82C8FF", "#A0DCFF", "#B4F0FF", "#FFFBAF", "#FFDD9A", "#FFBF87", "#FFA173", "#FF7055", "#FE6346", "#F7403B", "#E92D36", "#C80F1E", "#A50519") + +sdates.seq <- weekly.seq(forecast.year,mes,day) # load the sequence of dates corresponding to all the thursday of the year +n.leadtimes <- length(leadtime.week) +n.yrs.hind <- yr2.hind-yr1.hind+1 +my.month.short <- substr(my.month,1,3) + +## extract geographic coordinates; do only ONCE for each preducton system and then save the lat/lon in a coordinates.RData and comment these rows to use function load() below: +#data.hindcast <- Load(var='sfcWind', exp = 'EnsEcmwfWeekHind', +# obs = NULL, sdates=paste0(yr1.hind:yr2.hind,substr(sdates.seq[startdate],5,6),substr(sdates.seq[startdate],7,8)), +# nleadtime = 1, leadtimemin = 1, leadtimemax = 1, output = 'lonlat', configfile = file_path, method='distance-weighted' ) + +#lats<-data.hindcast$lat +#lons<-data.hindcast$lon +#save(lats,lons,file=paste0(workdir,'/coordinates.RData')) +#rm(data.hindcast);gc() + +# format geographic coordinates to use with PlotEquiMap: +#load(paste0(workdir,'/coordinates.RData')) +#n.lon <- length(lons) +#n.lat <- length(lats) +#n.lonr <- n.lon - ceiling(length(lons[lons<180 & lons > 0])) +#la<-rev(lats) +#lo<-c(lons[c((n.lonr+1):n.lon)]-360,lons[1:n.lonr]) + +# load only 1 year of weekly var rean data from reanalysis to get lat and lon: +data.rean <- Load(var = var.name, exp = list(list(path=rean.dir)), + obs = NULL, sdates=paste0(yr1.hind,substr(sdates.seq[1],5,6),substr(sdates.seq[1],7,8)), + nleadtime = 1, leadtimemin = 1, leadtimemax = 1, output = 'lonlat', nprocs=1) + +lons <- data.rean$lon +lats <- data.rean$lat +n.lon <- length(lons) +n.lat <- length(lats) +n.lonr <- n.lon - ceiling(length(lons[lons<180 & lons > 0])) +if(mare) n.lat == 1 + +my.grid<-paste0('r',n.lon,'x',n.lat) + +#file_path='/home/Earth/ngonzal2/R/ConfFiles/IC3.conf' # Load() file path +#file_path <- '/home/Earth/ncortesi/Downloads/RESILIENCE/IC3.conf' + +######################################################################################### +# Calculate and save up to all the chosen Skill Scores for a selected period # +######################################################################################### +# if we run the script from the terminal with an argument, it only computes the month we specify in the second argument and save the results for that month +if(mare) veri.month <- my.month + +for(month in veri.month){ + #month=1 # for the debug + + # select the startdates (weeks) you want to compute: + my.startdates <- which(as.integer(substr(sdates.seq,5,6)) == month) #startdates.monthly[[month]] #c(1:5) + + startdate.name <- my.month[month] # name given to the period of the selected startdates # i.e:"January" + n.startdates <- length(my.startdates) # number of startdates in the selected month + n.yrs <- length(my.startdates)*n.yrs.hind # number of total years for the selected month + + print(paste0("Computing Skill Scores for ",startdate.name)) + + # Merge all selected startdates: + hind.dim <- c(n.members, n.yrs.hind*n.startdates, n.leadtimes, n.lat, n.lon) # dimensions of the hindcast array + + my.FairRpss <- my.FairCrpss <- my.EnsCorr <- my.PValue <- array(NA,c(n.leadtimes, n.lat,n.lon)) + if(boot) my.FairRpssBoot <- my.FairCrpssBoot <- array(NA, c(n.boot, n.leadtimes, n.lat, n.lon)) + if(boot) my.FairRpssConf <- my.FairCrpssConf <- array(NA, c(length(conf.level), n.leadtimes, n.lat, n.lon)) + + anom.rean.chunk <- anom.hindcast.mean.chunk <- array(NA, c(n.startdates*n.yrs.hind, n.leadtimes, n.lat, n.lon)) + anom.hindcast.chunk <- array(NA, c(n.members, n.startdates*n.yrs.hind, n.leadtimes, n.lat, n.lon)) + #my.interv<-(1 + sub.size*(s-1)):((sub.size*s) + add.last) # longitude interval where to load data + + for(startdate in my.startdates){ + pos.startdate <- which(startdate == my.startdates) # count of the number of stardates already loaded+1 + my.time.interv <- (1+(pos.startdate-1)*n.yrs.hind):(pos.startdate*n.yrs.hind) # time interval where to load data + + print(paste0('Computing reanalysis anomalies for startdate ', which(startdate==my.startdates),'/',length(my.startdates))) + + # load weekly var rean data in: /esnas/reconstructions/ecmwf/eraint/weekly_mean + data.rean <- Load(var = var.name, exp = list(list(path=rean.dir)), + obs = NULL, sdates=paste0(yr1.hind:yr2.hind,substr(sdates.seq[startdate],5,6),substr(sdates.seq[startdate],7,8)), + nleadtime = n.leadtimes, leadtimemin = 1, leadtimemax = n.leadtimes, output = 'lonlat', nprocs=1) + + # dim(data.rean$mod) # [1,1,20,4,320,640] + + # Mean of the 20 years (for each point and leadtime) + clim.rean <- apply(data.rean$mod,c(1,2,4,5,6),mean)[1,1,,,] # average of all years for each leadtime and pixel + clim.rean <- InsertDim(InsertDim(InsertDim(clim.rean,1,n.yrs.hind),1,1),1,1) # repeat the same values times to be able to calculate the anomalies #[1,1,20,4,320,640] + anom.rean <- data.rean$mod - clim.rean + #anom.rean<-InsertDim(anom.rean[1,1,,,,],1,1) # [1,20,4,320,640] + + rm(data.rean, clim.rean) + gc() + + #load(paste0(workdir,'/Data_',var.name,'/anom_rean_startdate',startdate,'.RData')) + #anom.rean <- drop(anom.rean) + anom.rean.chunk[my.time.interv,,,] <- anom.rean + + # Load hindcast data: + print(paste0('Computing forecast anomalies for startdate ', which(startdate==my.startdates),'/',length(my.startdates))) + + # Load weekly var subseasonal data in: /esnas/exp/ECMWF/monthly/ensforhc/weekly_mean and interpolate them to the same ERA-Interim resolution (512x256) + data.hindcast <- Load(var = var.name, exp = list(list(path=forecast.dir)), + obs = NULL, sdates = paste0(yr1.hind:yr2.hind, substr(sdates.seq[startdate],5,6), substr(sdates.seq[startdate],7,8)), + nleadtime = n.leadtimes, leadtimemin = 1, leadtimemax = n.leadtimes, output = 'lonlat', grid=my.grid, method='bilinear', nprocs=1) + + # dim(data.hindcast$mod) # [1,4,20,4,320,640] + + # Mean of the 4 members of the Hindcasts and of the 20 years (for each point and leadtime); + clim.hindcast <- apply(data.hindcast$mod,c(1,4,5,6),mean)[1,,,] # average of all years and members for each leadtime and pixel + clim.hindcast <- InsertDim(InsertDim(InsertDim(clim.hindcast,1,n.yrs.hind),1,n.members),1,1) # repeat the same values and times to calc. anomalies + anom.hindcast <- data.hindcast$mod - clim.hindcast + + rm(data.hindcast,clim.hindcast) + gc() + + # average the anomalies over the 4 hindcast members to compute the ensemble mean [1,20,4,320,640] + anom.hindcast.mean<-apply(anom.hindcast,c(1,3,4,5,6),mean) + + gc() + + #load(paste0(workdir,'/Data_',var.name,'/anom_hindcast_startdate',startdate,'.RData')) + anom.hindcast <- drop(anom.hindcast) + anom.hindcast.chunk[,my.time.interv,,,] <- anom.hindcast + + rm(anom.hindcast, anom.rean) + gc() + + if(any(my.score.name=="EnsCorr")){ + + #load(paste0(workdir,'/Data_',var.name,'/anom_hindcast_mean_startdate',startdate,'.RData')) # load ensemble mean hindcast data + + anom.hindcast.mean <- drop(anom.hindcast.mean) + anom.hindcast.mean.chunk[my.time.interv,,,] <- anom.hindcast.mean + + rm(anom.hindcast.mean) + gc() + } + + } + + if(any(my.score.name=="FairRpss")){ + + #anom.hindcast.chunk.double <- abind(list(anom.hindcast.chunk,anom.hindcast.chunk),along=4) + #anom.rean.chunk.double <- abind(list(anom.rean.chunk,anom.rean.chunk),along=3) + + #my.FairRpss.sub <- veriApply("FairRpss", fcst=anom.hindcast.sub, obs=anom.rean.sub, prob=my.prob, tdim=2, ensdim=1, parallel=FALSE, ncpus=n.cpus) + my.FairRps.chunk <- veriApply("FairRps", fcst=anom.hindcast.chunk, obs=anom.rean.chunk, prob=my.prob, tdim=2, ensdim=1, parallel=FALSE, ncpus=1) + #my.FairRps.chunk.double <- veriApply("FairRps", fcst=anom.hindcast.chunk.double, obs=anom.rean.chunk.double, prob=my.prob, tdim=2, ensdim=1, parallel=FALSE, ncpus=n.cpus) + + # the prob.for the climatological ensemble can be set without using convert2prob and they are the same for all points and leadtimes (33% for each tercile): + ens <- array(33,c(n.startdates*n.yrs.hind,3)) + + # the observed probabilities vary from point to point and for each leadtime: + obs <- apply(anom.rean.chunk, c(2,3,4), function(x){convert2prob(x, prob <- my.prob)}) + + # reshape obs to be able to apply EnsRps(ens, obs) below: + obs2 <- InsertDim(obs,1,3) + obs2[2,1:(n.startdates*n.yrs.hind),,,] <- obs2[1,(n.startdates*n.yrs.hind + 1:(n.startdates*n.yrs.hind)),,,] + obs2[3,1:(n.startdates*n.yrs.hind),,,] <- obs2[1,(2*n.startdates*n.yrs.hind + 1:(n.startdates*n.yrs.hind)),,,] + obs3 <- obs2[,1:(n.startdates*n.yrs.hind),,,,drop=F] # drop=F is used not to loose the dimension(s) with only 1 element + + my.Rps.clim.chunk <- apply(obs3,c(3,4,5), function(x){ EnsRps(ens,t(x)) }) + + my.FairRps.chunk.sum <- apply(my.FairRps.chunk, c(2,3,4), sum) + my.Rps.clim.chunk.sum <- apply(my.Rps.clim.chunk, c(2,3,4), sum) + + my.FairRpss.chunk <- 1-(my.FairRps.chunk.sum/my.Rps.clim.chunk.sum) + + #my.FairRpss[,c,] <- my.FairRpss.chunk + + #rm(my.FairRpss.chunk, ens, obs, obs2, obs3, my.FairRps.chunk.sum, my.Rps.clim.chunk.sum) + gc() + + # bootstrapping: + + for(b in 1:n.boot){ + cat(paste0("resampling n. ",b,"/",n.boot)) + yrs.sampled <- sample(1:n.yrs, replace=TRUE) + + #for(y in 1:n.yrs) anom.rean.chunk.sampled[y,,,] <- anom.rean.chunk[yrs.sampled[y],,,] + #for(y in 1:n.yrs) anom.hindcast.chunk.sampled[,y,,,] <- anom.hindcast.chunk[,yrs.sampled[y],,,] + + my.FairRps.sampled <- my.FairRps.chunk + my.Rps.clim.sampled <- my.Rps.clim.chunk + + for(y in 1:n.yrs) my.FairRps.sampled[y,,,] <- my.FairRps.chunk[yrs.sampled[y],,,] + for(y in 1:n.yrs) my.Rps.clim.sampled[y,,,] <- my.Rps.clim.chunk[yrs.sampled[y],,,] + + #my.FairRps.chunk <- veriApply("FairRps", fcst=anom.hindcast.chunk.sampled, obs=anom.rean.chunk.sampled, prob=my.prob, tdim=2, ensdim=1, parallel=FALSE, ncpus=n.cpus) + #ens <- array(33,c(n.startdates*n.yrs.hind,3)) + #obs <- apply(anom.rean.chunk.sampled, c(2,3,4), function(x){convert2prob(x, prob <- my.prob)}) + #obs2 <- InsertDim(obs,1,3) + #obs2[2,1:(n.startdates*n.yrs.hind),,,] <- obs2[1,(n.startdates*n.yrs.hind + 1:(n.startdates*n.yrs.hind)),,,] + #obs2[3,1:(n.startdates*n.yrs.hind),,,] <- obs2[1,(2*n.startdates*n.yrs.hind + 1:(n.startdates*n.yrs.hind)),,,] + #obs3 <- obs2[,1:(n.startdates*n.yrs.hind),,,,drop=F] + #my.Rps.clim.chunk <- apply(obs3,c(3,4,5), function(x){ EnsRps(ens,t(x)) }) + + my.FairRps.chunk.sum <- apply(my.FairRps.chunk,c(2,3,4), sum) + my.Rps.clim.chunk.sum <- apply(my.Rps.clim.chunk,c(2,3,4), sum) + + my.FairRpss.chunk <- 1-(my.FairRps.chunk.sum/my.Rps.clim.chunk.sum) + + my.FairRpssBoot[b,,,chunk$int[[c]]] <- my.FairRpss.chunk + rm(my.FairRpss.chunk, ens, obs, obs2, obs3, my.FairRps.chunk.sum, my.Rps.clim.chunk.sum) + gc() + } + + cat('\n') + + # calculate the percentiles of the skill score: + my.FairRpssConf[,,,chunk$int[[c]]] <- apply(my.FairRpssBoot[,,,chunk$int[[c]]], c(2,3,4), function(x) quantile(x, probs=conf.level, type=8)) + + } + + if(any(my.score.name=="FairCrpss")){ + + my.FairCrps.chunk <- veriApply("FairCrps", fcst=anom.hindcast.chunk, obs=anom.rean.chunk, tdim=2, ensdim=1, parallel=FALSE, ncpus=n.cpus) + anom.all.chunk <- abind(anom.hindcast.chunk,anom.rean.chunk, along=1) # merge exp and obs together to use apply: + my.Crps.clim.chunk <- apply(anom.all.chunk, c(3,4,5), function(x){ EnsCrps(t(x[1:n.members,]), x[n.members+1,])}) + + my.FairCrps.chunk.sum <- apply(my.FairCrps.chunk,c(2,3,4), sum) + my.Crps.clim.chunk.sum <- apply(my.Crps.clim.chunk,c(2,3,4), sum) + my.FairCrpss.chunk <- 1-(my.FairCrps.chunk.sum/my.Crps.clim.chunk.sum) + + my.FairCrpss[,,chunk$int[[c]]]<-my.FairCrpss.chunk + rm(my.FairCrpss.chunk, my.FairCrps.chunk.sum, my.Crps.clim.chunk.sum) + gc() + + # bootstrapping: + + for(b in 1:n.boot){ + print(paste0("resampling n. ",b,"/",n.boot)) + yrs.sampled <- sample(1:n.yrs, replace=TRUE) + + for(y in 1:n.yrs) anom.hindcast.chunk.sampled[,y,,,] <- anom.hindcast.chunk[,yrs.sampled[y],,,] + for(y in 1:n.yrs) anom.rean.chunk.sampled[y,,,] <- anom.rean.chunk[yrs.sampled[y],,,] + + my.FairCrps.chunk <- veriApply("FairCrps", fcst=anom.hindcast.chunk.sampled, obs=anom.rean.chunk.sampled, tdim=2, ensdim=1, parallel=TRUE, ncpus=n.cpus) + anom.all.chunk <- abind(anom.hindcast.chunk.sampled, anom.rean.chunk.sampled, along=1) # merge exp and obs together to use apply: + my.Crps.clim.chunk <- apply(anom.all.chunk, c(3,4,5), function(x){ EnsCrps(t(x[1:n.members,]), x[n.members+1,])}) + my.FairCrps.chunk.sum <- apply(my.FairCrps.chunk,c(2,3,4), sum) + my.Crps.clim.chunk.sum <- apply(my.Crps.clim.chunk,c(2,3,4), sum) + my.FairCrpss.chunk <- 1-(my.FairCrps.chunk.sum/my.Crps.clim.chunk.sum) + + my.FairCrpssBoot[b,,,chunk$int[[c]]] <- my.FairCrpss.chunk + rm(my.FairCrpss.chunk, my.FairCrps.chunk.sum, my.Crps.clim.chunk.sum) + gc() + + } + + cat('\n') + + # calculate the percentiles of the skill score: + my.FairCrpssConf[,,,chunk$int[[c]]] <- apply(my.FairCrpssBoot[,,,chunk$int[[c]]], c(2,3,4), function(x) quantile(x, probs=conf.level, type=8)) + + + } + + if(any(my.score.name=="EnsCorr")){ + # ensemble mean correlation for the selected startdates (first dim, 'week') and all leadtimes (second dim): + my.EnsCorr.chunk <- my.PValue.chunk <- array(NA, c(n.leadtimes, n.lat, chunk$n.int[[c]])) + + for(i in 1:n.leadtimes){ + for(j in 1:n.lat){ + for(k in 1:chunk$n.int[[c]]){ + my.EnsCorr.chunk[i,j,k] <- cor(anom.hindcast.mean.chunk[,i,j,k],anom.rean.chunk[,i,j,k], use="complete.obs") + # option 'na.method' is chosen from the following list: c("all.obs", "complete.obs", "pairwise.complete.obs", "everything", "na.or.complete") + + my.PValue.chunk[i,j,k] <- cor.test(anom.hindcast.mean.chunk[,i,j,k],anom.rean.chunk[,i,j,k], use="complete.obs")$p.value + + } + } + } + + my.EnsCorr[,,chunk$int[[c]]] <- my.EnsCorr.chunk + my.PValue[,,chunk$int[[c]]] <- my.PValue.chunk + + rm(anom.hindcast.chunk,anom.hindcast.mean.chunk,my.EnsCorr.chunk,my.PValue.chunk) + } + + rm(anom.rean.chunk) + gc() + #mem() + + #if(mare == TRUE) { + # if(any(my.score.name=="FairRpss")) save(my.FairRpss, file=paste0(workdir,'/Data_',var.name,'/FairRpss_',startdate.name,'_chunk_',c,'.RData')) + # if(any(my.score.name=="FairCrpss")) save(my.FairCrpss, file=paste0(workdir,'/Data_',var.name,'/FairCrpss_',startdate.name,'_chunk_',c'.RData')) + # if(any(my.score.name=="EnsCorr")) save(my.EnsCorr, file=paste0(workdir,'/Data_',var.name,'/EnsCorr_',startdate.name,'_chunk_',c'.RData')) + #} + + + # the Reliability diagram cannot be computed splitting the data in chunk, so it is computed only if the script is run from the R interface or with no arguments: + + if(any(my.score.name=="RelDiagr")){ # in this case the computation cannot be split in groups of grid points, but can be split for leadtime instead: + my.RelDiagr<-list() + + for(lead in 1:n.leadtimes){ + cat(paste0('leadtime n.: ',lead,'/',n.leadtimes)) + anom.rean.chunk <- array(NA,c(n.startdates*n.yrs.hind,n.lat,n.lon)) + anom.hindcast.chunk <- array(NA,c(n.members,n.startdates*n.yrs.hind,n.lat,n.lon)) + cat(' startdate: ') + i=0 + + for(startdate in my.startdates){ + i=i+1 + cat(paste0(i,'/',length(my.startdates),' ')) + + pos.startdate<-which(startdate==my.startdates) # count of the number of stardates already loaded+1 + my.time.interv<-(1+(pos.startdate-1)*n.yrs.hind):(pos.startdate*n.yrs.hind) # time interval where to load data + + # Load reanalysis data of next startdate: + load(paste0(work.dir,'/Data_',var.name,'/anom_rean_startdate',startdate,'.RData')) + anom.rean <- drop(anom.rean) + anom.rean.chunk[my.time.interv,,] <- anom.rean[,lead,,] + + # Lead hindcast data of next startdate: + load(paste0(work.dir,'/Data_',var.name,'/anom_hindcast_startdate',startdate,'.RData')) # load hindcast data + anom.hindcast <- drop(anom.hindcast) + anom.hindcast.chunk[,my.time.interv,,] <- anom.hindcast[,,lead,,] + + rm(anom.rean,anom.hindcast) + } + + + anom.hindcast.chunk.perm<-aperm(anom.hindcast.chunk,c(2,1,3,4)) # swap the first two dimensions to put the years as first dimension (needed for convert2prob) + gc() + + cat('Computing the Reliability Diagram. Please wait... ') + ens.chunk<-apply(anom.hindcast.chunk.perm, c(3,4), convert2prob, prob<-my.prob) # its dimens. are: [n.startdates*n.yrs.hind*n.forecast.categories, n.lat, n.lon] + obs.chunk<-apply(anom.rean.chunk,c(2,3), convert2prob, prob<-my.prob) # the first n.members*n.yrs.hind elements belong to the first tercile, and so on. + ens.chunk.prob<-apply(ens.chunk, c(2,3), function(x) count2prob(matrix(x, n.startdates*n.yrs.hind, n.categ), type=4)) # type=4 is the only method with sum(prob)=1 !! + obs.chunk.prob<-apply(obs.chunk, c(2,3), function(x) count2prob(matrix(x, n.startdates*n.yrs.hind, n.categ), type=4)) #no parallelization here: it only takes 10s + + if(is.null(int.lat)) int.lat <- 1:n.lat # if there is no region selected, select all the world + if(is.null(int.lon)) int.lon <- 1:n.lon + + int1 <- 1:(n.startdates*n.yrs.hind) # time interval of the data of the first tercile + my.RelDiagr[[lead]]<-ReliabilityDiagram(ens.chunk.prob[int1,int.lat,int.lon], obs.chunk.prob[int1,int.lat,int.lon], nboot=0, plot=FALSE, plot.refin=F) #tercile 1 + + int2 <- (1+(n.startdates*n.yrs.hind)):(2*n.startdates*n.yrs.hind) # time interval of the data of the second tercile + my.RelDiagr[[n.leadtimes+lead]]<-ReliabilityDiagram(ens.chunk.prob[int2,int.lat,int.lon], obs.chunk.prob[int2,int.lat,int.lon], nboot=0, plot=FALSE, plot.refin=F) + + int3 <- (1+(2*n.startdates*n.yrs.hind)):(3*n.startdates*n.yrs.hind) # time interval of the data of the third tercile + my.RelDiagr[[2*n.leadtimes+lead]]<-ReliabilityDiagram(ens.chunk.prob[int3,int.lat,int.lon], obs.chunk.prob[int3,int.lat,int.lon], nboot=0, plot=FALSE, plot.refin=F) + + cat('done\n') + #print(my.RelDiagr) # for debugging + + rm(anom.rean.chunk,anom.hindcast.chunk,ens.chunk,obs.chunk,ens.chunk.prob,obs.chunk.prob) + gc() + + } # next lead + + } # close if on RelDiagr + + # we can save the results for all chunks only if mare == FALSE (if it is TRUE, we have the results for 1 chunk only, that have already been saved + save(my.FairRpss.chunk, my.FairCrpss.chunk, my.EnsCorr.chunk, my.RelDiagr.chunk, my.FairRpssBoot, my.FairCrpssBoot, file=paste0(work.dir,'/',var.name,'/FairRpss_',startdate.name,'.RData')) + + #if(any(my.score.name=="FairRpss")) save(my.FairRpss.chunk, file=paste0(work.dir,'/',var.name,'/FairRpss_',startdate.name,'.RData')) + #if(any(my.score.name=="FairCrpss")) save(my.FairCrpss.chunk, file=paste0(work.dir,'/',var.name,'/FairCrpss_',startdate.name,'.RData')) + #if(any(my.score.name=="EnsCorr")) save(my.EnsCorr.chunk, file=paste0(work.dir,'/',var.name,'/EnsCorr_',startdate.name,'.RData')) + #if(any(my.score.name=="RelDiagr")) save(my.RelDiagr.chunk, my.PValue, file=paste0(work.dir,'/',var.name,'/RelDiagr_',startdate.name,'.RData')) + + # bootstrapping output: + #if(any(my.score.name=="FairRpss")) save(my.FairRpssBoot, my.FairRpssConf, file=paste0(work.dir,'/',var.name,'/FairRpssBoot_',startdate.name,'.RData')) + #if(any(my.score.name=="FairCrpss")) save(my.FairCrpssBoot, my.FairCrpssConf, file=paste0(work.dir,'/',var.name,'/FairCrpssBoot_',startdate.name,'.RData')) + +} # next m (month) + + + + +########################################################################################### +# Visualize and save the score maps for one score and period # +########################################################################################### + +# run it only after computing and saving all the skill score data: + +my.months=1:12 #9:12 # choose the month(s) to plot and save + + +my.score.name <- c('FairRpss','FairCrpss','EnsCorr','RelDiagr') # choose one or more skills scores to compute between EnsCorr, FairRPSS, FairCRPSS and/or RelDiagr + +for(month in my.months){ + #month=1 # for the debug + startdate.name.map<-my.month[month] # i.e:"January" + + for(my.score.name.map in my.score.name){ + #my.score.name.map='EnsCorr' # for the debug + + if(my.score.name.map=="EnsCorr") { load(file=paste0(workdir,'/Data_',var.name,'/EnsCorr_',startdate.name.map,'.RData')); my.score<-my.EnsCorr } + if(my.score.name.map=='FairRpss') { load(file=paste0(workdir,'/Data_',var.name,'/FairRpss_',startdate.name.map,'.RData')); my.score<-my.FairRpss } + if(my.score.name.map=='FairCrpss') { load(file=paste0(workdir,'/Data_',var.name,'/FairCrpss_',startdate.name.map,'.RData')); my.score<-my.FairCrpss } + if(my.score.name.map=="RelDiagr") { load(file=paste0(workdir,'/Data_',var.name,'/RelDiagr_',startdate.name.map,'.RData')); my.score<-my.RelDiagr } + + # old green-red palette: + # brk.rpss<-c(-1,seq(0,1,by=0.05)) + # col.rpss<-c("darkred",colorRampPalette(c("red","white","darkgreen"))(length(brk.rpss)-2)) + # brk.rps<-c(seq(0,1,by=0.1),10) + # col.rps<-c(colorRampPalette(c("darkgreen","white","red"))(length(brk.rps)-2),"darkred") + # if(my.score.name.map==my.Rps | my.score==my.Rps.clim) {my.brk<-brk.rps} else {my.brk<-brk.rpss} + # if(my.score.name.map==my.Rps | my.score==my.Rps.clim) {my.col<-col.rps} else {my.col<-col.rpss} + + brk.rpss<-c(seq(-1,1,by=0.1)) + col.rpss<-colorRampPalette(col)(length(brk.rpss)-1) + + my.brk<-brk.rpss # at present all breaks and colors are the same, so there is no need to differenciate between indexes + my.col<-col.rpss + + for(lead in 1:n.leadtimes){ + + #my.title<-paste0(my.score.name.map,' of ',cfs.name,' + #10m Wind Speed for ',startdate.name.map,'. Forecast time ',leadtime.week[lead],'.') + + if(my.score.name.map!="RelDiagr"){ + + #postscript(file=paste0(workdir,'/Maps_',var.name,'/',my.score.name.map,'_',startdate.name.map,'_Forecast_time_',leadtime.week[lead],'_',var.name,'.ps'), + # paper="special",width=12,height=7,horizontal=F) + + jpeg(file=paste0(workdir,'/Maps_',var.name,'/',my.score.name.map,'_',startdate.name.map,'_Forecast_time_',leadtime.week[lead],'_',var.name,'.jpg'),width=1000,height=600) + + layout(matrix(c(1,2), 1, 2, byrow = TRUE),widths=c(11,1.2)) + par(oma=c(1,1,4,1)) + + # lat values must be in decreasing order from +90 to -90 degrees and long from 0 to 360 degrees: + PlotEquiMap(my.score[lead,,][n.lat:1,c((n.lonr+1):n.lon,1:n.lonr)], lons,lats ,sizetit=0.8, brks=my.brk, cols=my.col,axelab=F, filled.continents=FALSE, drawleg=F) + my.title<-paste0(my.score.name.map,' of ',cfs.name,'\n ', var.name.map,' for ',startdate.name.map,'. Forecast time ',leadtime.week[lead],'.') + title(my.title,line=0.5,outer=T) + + ColorBar(my.brk, cols=my.col, vert=T, cex=1.4) + + if(my.score.name.map=="EnsCorr"){ #add a small grey point if the corr.is significant: + + # arrange lat/ lon in my.PValue (a second variable in the EnsCorr file) to start from +90 degr. (lat) and from 0 degr (lon): + my.PValue.rev <- my.PValue + my.PValue.rev[lead,,] <- my.PValue[i,n.lat:1,c((n.lonr+1):n.lon,1:n.lonr)] + for (x in 1:n.lon) { + for (y in 1:n.lat) { + if (my.PValue.rev[lead,y, x] == TRUE) { + text(x = lo[x], y = la[y], ".", cex = .2) + } + } + } + } + dev.off() + + } # close if on !RelDiagr + + if(my.score.name.map=="RelDiagr") { + + jpeg(file=paste0(workdir,'/Maps_',var.name,'/RelDiagr_',startdate.name.map,'_Forecast_time_',leadtime.week[lead],'_',var.name,'.jpg'),width=600,height=600) + + my.title<-paste0('Reliability Diagram of ',cfs.name,'\n ',var.name.map,' for ',startdate.name.map,'. Forecast time ',leadtime.week[lead],'.') + + plot(c(0,1),c(0,1),type="l",xlab="Forecast Frequency",ylab="Observed Frequency", col='gray30', main=my.title) + lines(my.score[[lead]]$p.avgs[!is.na(my.score[[lead]]$p.avgs)],my.score[[lead]]$cond.prob[!is.na(my.score[[lead]]$cond.prob)],type="o", pch=16, col="blue") + #lines(my.score[[n.leadtimes+lead]]$p.avgs[!is.na(my.score[[n.leadtimes+lead]]$p.avgs)],my.score[[n.leadtimes+lead]]$cond.prob[!is.na(my.score[[n.leadtimes+lead]]$cond.prob)],type="o",pch=16,col="darkgreen") + lines(my.score[[2*n.leadtimes+lead]]$p.avgs[!is.na(my.score[[2*n.leadtimes+lead]]$p.avgs)], my.score[[2*n.leadtimes+lead]]$cond.prob[!is.na(my.score[[2*n.leadtimes+lead]]$cond.prob)],type="o", pch=16, col="red") + legend("bottomright", legend=c('Lower Tercile','Upper Tercile'), lty=c(1,1), lwd=c(2.5,2.5), col=c('blue','red')) + + dev.off() + } + + } # next lead + + } # next score + +} # next month + + +############################################################################################### +# Composite map of the four skill scores # +############################################################################################### + +# run it only when you have all the 4 skill scores for each month: + +my.months=9:12 # choose the month(s) to plot and save + +for(month in my.months){ + + startdate.name.map<-my.month[month] # i.e:"January" + + #postscript(file=paste0(workdir,'/Maps_',var.name,'/SkillScores_',startdate.name.map,'_',var.name,'.ps'), paper="special",width=12,height=7,horizontal=F) + + jpeg(file=paste0(workdir,'/Maps_',var.name,'/SkillScores_',startdate.name.map,'_',var.name,'.jpg'), width=4000, height=2400, quality=100) + + layout(matrix(1:16, 4, 4, byrow=FALSE), widths=c(1.8,1.8,1.8,1.2), heights=c(1,1,1,1)) + #layout.show(16) + + for(score in 1:4){ + for(lead in 1:n.leadtimes){ + + img<-readJPEG(paste0(workdir,'/Maps_',var.name,'/',my.score.name[score],'_',startdate.name.map,'_Forecast_time_',leadtime.week[lead],'_',var.name,'.jpg')) + par(mar = rep(0, 4), xaxs='i', yaxs='i' ) + plot.new() + rasterImage(img, 0, 0, 1, 1, interpolate=FALSE) + + } # next lead + + } # next score + + dev.off() + +} # next month + + + +# Dani Map script for the small figure: +#/home/Earth/vtorralb/R/scripts/Veronica_2014/TimeSeries/PlotAno_mod_obs.R + + + +############################################################################################### +# Preformatting: calculate weekly climatologies and anomalies for each forecast startdate # +############################################################################################### +# +# finish!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! +# +# the only difference is that anomalies are computed using climatologies from the hincast data, +# and not from the forecast data. + +# Run it only once at the beginning: + +file_path='/home/Earth/ngonzal2/R/ConfFiles/IC3.conf' # Load() file path +#file_path <- '/home/Earth/ncortesi/Downloads/RESILIENCE/IC3.conf' + +my.startdates=1:52 # choose a sequence of forecast startdates to save their anomalies + +for(startdate in my.startdates){ # the startdate week to load inside the sdates weekly sequence: + print(paste0('Computing anomalies for startdate ', which(startdate==my.startdates),'/',length(my.startdates))) + + data.hindcast <- Load(var=var.name, exp = 'EnsEcmwfWeekHind', + obs = NULL, sdates=paste0(yr1.hind:yr2.hind,substr(sdates.seq[startdate],5,6),substr(sdates.seq[startdate],7,8)), + nleadtime = n.leadtimes, leadtimemin = 1, leadtimemax = n.leadtimes, output = 'lonlat', configfile = file_path, method='distance-weighted' ) + + # dim(data.hindcast$mod) # [1,4,20,4,320,640] + + # Mean of the 4 members of the Hindcasts and of the 20 years (for each point and leadtime); + clim.hindcast<-apply(data.hindcast$mod,c(1,4,5,6),mean)[1,,,] # average for each leadtime and pixel + clim.hindcast<-InsertDim(InsertDim(InsertDim(clim.hindcast,1,n.yrs.hind),1,n.members),1,1) # repeat the same values and times to calc. anomalies + + anom.hindcast <- data.hindcast$mod - clim.hindcast + + rm(data.hindcast,clim.hindcast) + gc() + + # average the anomalies over the 4 hindcast members [1,20,4,320,640] + anom.hindcast.mean<-apply(anom.hindcast,c(1,3,4,5,6),mean) + + my.grid<-paste0('r',n.lon,'x',n.lat) + data.rean <- Load(var=var.name, exp = 'ERAintEnsWeek', + obs = NULL, sdates=paste0(yr1.hind:yr2.hind,substr(sdates.seq[startdate],5,6),substr(sdates.seq[startdate],7,8)), + nleadtime = n.leadtimes, leadtimemin = 1, leadtimemax = n.leadtimes, output = 'lonlat', configfile = file_path, grid=my.grid, method='distance-weighted') + # dim(data.rean$mod) # [1,1,20,4,320,640] + + # Mean of the 20 years (for each point and leadtime) + clim.rean<-apply(data.rean$mod,c(1,2,4,5,6),mean)[1,1,,,] # average for each leadtime and pixel + clim.rean<-InsertDim(InsertDim(InsertDim(clim.rean,1,n.yrs.hind),1,1),1,1) # repeat the same values times to be able to calculate the anomalies #[1,1,20,4,320,640] + + anom.rean <- data.rean$mod - clim.rean + + anom.rean<-InsertDim(anom.rean[1,1,,,,],1,1) # [1,20,4,320,640] + + #save(anom.hindcast.mean,anom.rean,file=paste0(workdir,'/Data/anom_for_ACC_startdate',startdate,'.RData')) + + save(anom.hindcast,file=paste0(workdir,'/Data/anom_hindcast_startdate',startdate,'.RData')) + save(anom.hindcast.mean,file=paste0(workdir,'/Data/anom_hindcast_mean_startdate',startdate,'.RData')) + save(anom.rean,file=paste0(workdir,'/Data/anom_rean_startdate',startdate,'.RData')) + save(clim.rean,file=paste0(workdir,'/Data/clim_rean_startdate',startdate,'.RData')) + + rm(data.rean,anom.hindcast,anom.hindcast.mean,clim.rean,anom.rean) + gc() + +} + + + + + + + +################################################################################## +# Toy Model # +################################################################################## + +library(s2dverification) +library(SpecsVerification) +library(easyVerification) + +my.prob=c(1/3,2/3) # quantiles used for FairRPSS and the RelDiagr (usually they are the terciles) + +N=80 # number of years +M=14 # number of members + +for(M in c(2:10,20,51,100,200)){ + +for(N in c(5,10,15,20,25,30,40,50,100,200,500,1000,2000,5000,10000,20000,50000,100000)){ + +anom.rean<-rnorm(N) +anom.hind<-array(rnorm(N*M),c(N,M)) +#quantile(anom.hind,prob=my.prob,type=8) + +obs<-convert2prob(anom.rean,prob<-my.prob) +#mean(obs[,1]) # check if it is equal to 0.333 for K=3 + +ens<-convert2prob(anom.hind,prob<-my.prob) + +# climatological forecast: +anom.hind.clim<-array(sample(anom.hind,N*M),c(N,M)) # extract a random sample with no replacement of the hindcast +ens.clim<-convert2prob(anom.hind.clim,prob<-my.prob) +#mean(ens.clim[,1]) # check if it is equal to M/3 (1.333 for M=4) + +# alternative definition of climatological forecast, based on observations instead (as applied by veriApply via convert2prob when option fcst.ref is missing): +#anom.rean.clim<-ClimEns(anom.rean) # create a climatological ensemble from a vector of observations (anom.rean) with the leave-one-out cross validation +anom.rean.clim<-t(array(anom.rean,c(N,N))) # function used by convert2prob when ref is not specified; it is ~ equal to the above function ClimEns, it only has a column more +ens.clim.rean<-convert2prob(anom.rean.clim,prob<-c(1/3,2/3)) # create a new prob.table based on the climatological ensemble of observations + +p<-count2prob(ens) # we should add the option type=4, in not the sum for each year is not 100%!!! +y<-count2prob(obs) # we should add the option type=4, in not the sum for each year is not 100%!!! +ReliabilityDiagram(p[,1], y[,1], bins=10, nboot=0, plot=TRUE, cons.prob=c(0.05, 0.85),plot.refin=F) + +### computation of verification scores: + +Rps<-round(mean(EnsRps(ens,obs)),4) +FairRps<-round(mean(FairRps(ens,obs)),4) +#Rps.easy<-round(mean(veriApply("EnsRps", fcst=anom.hind, obs=anom.rean, prob=my.prob, tdim=1, ensdim=2)),4) # check once to see if it is the same value of Rps +#FairRps.easy<-round(mean(veriApply("FairRps", fcst=anom.hind, obs=anom.rean, prob=my.prob, tdim=1, ensdim=2)),4) # check once to see if it is the same value of FairRps + +Rps.clim<-round(mean(EnsRps(ens.clim,obs)),4) +FairRps.clim<-round(mean(FairRps(ens.clim,obs)),4) +FairRps.clim.rean<-round(mean(FairRps(ens.clim.rean,obs)),4) + +#FairRps.clim.stable<-0.44444444 +#Rps.clim.easy<-round(mean(veriApply("EnsRps", fcst=anom.hind.clim, obs=anom.rean, prob=my.prob, tdim=1, ensdim=2)),4) # check once to see if it is the same value of Rps.clim +#FairRps.clim.easy<-round(mean(veriApply("FairRps", fcst=anom.hind.clim, obs=anom.rean, prob=my.prob, tdim=1, ensdim=2)),4) # check once to see if it is = to FairRps.clim + +Rpss<-round(1-(Rps/Rps.clim),4) +#Rpss.specs<-round(EnsRpss(ens=ens, ens.ref=ens.clim, obs=obs)$rpss,4) # check it once to see if it is the same as Rpss (yes) +Rpss.easy<-veriApply("EnsRpss", fcst=anom.hind, fcst.ref=anom.hind.clim, obs=anom.rean, prob=my.prob, tdim=1, ensdim=2)$rpss # we provide the clim.forecast; = to Rpss.specs +system.time(Rpss.easy.noclim<-round(veriApply("EnsRpss", fcst=anom.hind, obs=anom.rean, prob=my.prob, tdim=1, ensdim=2, parallel=FALSE, ncpus=5)$rpss,4)) # using their climatological forecast +#cat(Rps.clim.noclim<-Rps/(1-Rpss.easy.noclim)) # sale igual a ~0.45 per ogni hindcast; é diverso sia da Rps.clim che da Rps.clim per N-> +oo (0.555) + +Rpss.easy<-veriApply("EnsRpss", fcst=anom.hind, fcst.ref=anom.rean.clim, obs=anom.rean, prob=my.prob, tdim=1, ensdim=2)$rpss # we provide the clim.forecast; = to Rpss.specs +Rpss.easy.noclim<-veriApply("EnsRpss", fcst=anom.hind, obs=anom.rean, prob=my.prob, tdim=1, ensdim=2)$rpss # we provide the clim.forecast; = to Rpss.specs + +FairRpss<-round(1-(FairRps/FairRps.clim),4) +#FairRpss.specs<-round(FairRpss(ens=ens,ens.ref=ens.clim,obs=obs)$rpss,4) # check it once to see if it is the same as FairRpss (yes) +#FairRpss.easy<-veriApply("FairRpss", fcst=anom.hind, fcst.ref=anom.hind.clim, obs=anom.rean, prob=c(1/3,2/3), tdim=1, ensdim=2)$rps +#FairRpss.easy.noclim<-round(veriApply("FairRpss", fcst=anom.hind, obs=anom.rean, prob=c(1/3,2/3), tdim=1, ensdim=2)$rpss,4) # using their climatological forecast +#cat(FairRps.clim.noclim<-Rps/(1-FairRpss.easy.noclim)) # sale igual a ~0.55; é diverso sia da FairRps.clim che da FairRps.clim per N-> +oo (0.444) + +cat(paste0('n.memb: ' ,M,' n.yrs: ',N,' : ',FairRps,' : ' ,FairRps.clim,' FairRpss: ',FairRpss,'\n')) + +#cat(paste0('n.memb: ' ,M,' n.yrs: ',N,' : ',FairRps,' : ' ,FairRps.clim,' FairRpss: ',FairRpss,'\n')) + +#cat(paste0('n.memb: ' ,M,' n.yrs: ',N,' : ',Rps,' : ' ,Rps.clim,' Rpss: ',Rpss,'\n','n.memb: ' ,M,' n.yrs: ',N,' : ',FairRps,' : ' ,FairRps.clim,' FairRpss: ',FairRpss,'\n')) + +#cat(paste0('n.memb: ' ,M,' n.yrs: ',N,' : ',Rps,' : ' ,Rps.clim,' Rpss: ',Rpss,' Rpss.easy: ',Rpss.easy.noclim,'\n','n.memb: ' ,M,' n.yrs: ',N,' #: ',FairRps,' : ' ,FairRps.clim,' FairRpss: ',FairRpss,' FairRpss.easy: ',FairRpss.easy.noclim)) + +} + +# Crpss: + +#Crps<-round(mean(EnsCrps(ens,obs)),4) +#FairCrps<-round(mean(FairCrps(ens,obs)),4) + +#cat(Crps.clim<-round(mean(EnsCrps(ens.clim,obs)),4)) +#FairCrps.clim<-round(mean(FairCrps(ens.clim,obs)),4) + +#Crpss<-round(1-(Crps/Crps.clim),4) +#Crpss.specs<-round(EnsCrpss(ens=ens, ens.ref=ens.clim, obs=obs)$rpss,4) + +#FairCrpss<-round(1-(FairCrps/FairCrps.clim),4) +#FairCrpss.specs<-round(FairCrpss(ens=ens,ens.ref=ens.clim,obs=obs)$rpss,4) + +} + + +################################################################################## +# Other analysis # +################################################################################## + +my.startdates=1 #c(1:9) # select the startdates (weeks) you want to merge together + +#tm<-toyarray(dims=c(32,64),N=20,nens=4) # in the number of startdates in case of forecasts or the number of years in case of hindcasts +#str(tm) # tm$fcst: [32,64,20,4] tm$obs: [32,64,20] +# +#f.corr <- veriApply("EnsCorr", fcst=tm$fcst, obs=tm$obs) +#f.me <- veriApply('EnsMe', tm$fcst, tm$obs) +#f.crps <- veriApply("FairCrps", fcst=tm$fcst, obs=tm$obs) +#str(f.crps) # [32,64,20] + +#cst <- array(rnorm(prod(4:8)), 4:8) +#obs <- array(rnorm(prod(4:7)), 4:7) +#f.me <- veriApply('EnsMe', fcst=fcst, obs=obs) +#dim(f.me) +#fcst2 <- array(aperm(fcst, c(5,1,4,2,3)), c(8, 4, 7, 5*6)) # very interesting use of aperm not only to swap dimensions, but also to merge two dimensions in the same one! +#obs2 <- array(aperm(obs, c(1,4,2,3)), c(4,7,5*6)) +#f.me2 <- veriApply('EnsMe', fcst=fcst2, obs=obs2, tdim=3, ensdim=1) +#dim(f.me2) + +# when you have for each leadtime all the 52 weeks of year 2014 stored in a map, +# you can plot the time serie for a particular point and leadtime: +pos.lat<-which(round(la,3)==round(my.lat,3)) +pos.lon<-which(round(lo,3)==round(my.lon,3)) + +x11() +plot(1:week,EnMeanCorr[,leadtime],type="b", + main=paste0("Weekly time serie of point with lat=",round(my.lat,2),", lon=",round(my.lon,2)), + xlab="week",ylab="Ensemble Mean Corr.",ylim=c(-1,1)) + + + +### check if 4 time steps are better than one: + +yrs.hind<-yr2.hind-yr1.hind+1 +my.point.obs<-array(NA,c(yrs.hind,28)) + +# load obs.data for the four time steps: +for(year in yr1.hind:yr2.hind){ + data_erai_6h <- Load(var=var.name, exp = 'ERAint6h', + obs = NULL, sdates=paste0(year,'01'), leadtimemin = 61, + leadtimemax = 88, output = 'lonlat', configfile = file_path, grid=my.grid,method='distance-weighted') + + my.point.obs[year-yr1.hind+1,]<-data_erai_6h$mod[1,1,1,,65,633] +} + +utm0<-c(1,5,9,13,17,21,25) +utm6<-utm0+1 +utm12<-utm0+2 +utm18<-utm0+3 +my.points.obs.weekly.mean.utm0<-rowMeans(my.point.obs[,utm0]) +my.points.obs.weekly.mean.utm6<-rowMeans(my.point.obs[,utm6]) +my.points.obs.weekly.mean.utm12<-rowMeans(my.point.obs[,utm12]) +my.points.obs.weekly.mean.utm18<-rowMeans(my.point.obs[,utm18]) +my.points.obs.weekly.mean.all<-rowMeans(my.point.obs) + +cor(my.points.exp.weekly.mean.all,my.points.obs.weekly.mean.all,use="complete.obs") + +my.points.obs.weekly.mean.utm<-c(my.points.obs.weekly.mean.utm0,my.points.obs.weekly.mean.utm6,my.points.obs.weekly.mean.utm12,my.points.obs.weekly.mean.utm18) +my.points.exp.weekly.mean.utm<-c(my.points.exp.weekly.mean.utm0,my.points.exp.weekly.mean.utm6,my.points.exp.weekly.mean.utm12,my.points.exp.weekly.mean.utm18) +cor(my.points.exp.weekly.mean.utm,my.points.obs.weekly.mean.utm,use="complete.obs") + + +##### Calculate Reanalysis climatology loading only 1 file each 4 (it's quick but it needs to have all files beforehand): + +ss<-c(seq(1,52,by=4),50,51,52) # take only 1 startdate each 4, more the last 3 startdates that must be computed individually because they are not a complete set of 4 + +for(startdate in ss){ # the startdate day of 2014 to load in the sdates weekly sequence along with the same startdates of the previous 20 years + data.ERAI <- Load(var=var.name, exp = 'ERAintEnsWeek', + obs = NULL, sdates=paste0(yr1.hind:yr2.hind,substr(sdates.seq[startdate],5,6),substr(sdates.seq[startdate],7,8)), + nleadtime = 4, leadtimemin = 1, leadtimemax = 4, output = 'lonlat', configfile = file_path, grid=my.grid, method='distance-weighted') + + clim.ERAI[startdate,,,]<-apply(data.ERAI$mod,c(1,2,4,5,6),mean)[1,1,,,] # average for each leadtime and pixel + + # the intermediate startdate between startdate week i and startdate week i+4 are repeated: + if(startdate <= 49){ + clim.ERAI[startdate+1,1,,]<- clim.ERAI[startdate,2,,] # startdate leadtime (week) + clim.ERAI[startdate+1,2,,]<- clim.ERAI[startdate,3,,] # (week) 1 2 3 4 + clim.ERAI[startdate+2,1,,]<- clim.ERAI[startdate,3,,] # 1 a b c d <- only startdate 1 and 5 are necessary to calculate all startdates between 1 and 5 + clim.ERAI[startdate+1,3,,]<- clim.ERAI[startdate,4,,] # 2 b c d e + clim.ERAI[startdate+2,2,,]<- clim.ERAI[startdate,4,,] # 3 c d e f + clim.ERAI[startdate+3,1,,]<- clim.ERAI[startdate,4,,] # 4 d e f g + } # 5 e f g h + + if(startdate > 1 && startdate <=49){ # fill the previous startdates: + clim.ERAI[startdate-1,4,,]<- clim.ERAI[startdate,3,,] + clim.ERAI[startdate-1,3,,]<- clim.ERAI[startdate,2,,] + clim.ERAI[startdate-2,4,,]<- clim.ERAI[startdate,2,,] + clim.ERAI[startdate-1,2,,]<- clim.ERAI[startdate,1,,] + clim.ERAI[startdate-2,3,,]<- clim.ERAI[startdate,1,,] + clim.ERAI[startdate-3,4,,]<- clim.ERAI[startdate,1,,] + } + +} + + + + + # You can make cor much faster by stripping away all the error checking code and calling the internal c function directly: + # r <- apply(m1, 1, function(x) { apply(m2, 1, function(y) { cor(x,y) })}) + # r2 <- apply(m1, 1, function(x) { apply(m2, 1, function(y) {.Internal(cor(x, y, 4L, FALSE)) })}) + C_cor<-get("C_cor", asNamespace("stats")) + test<-.Call(C_cor, x=rnorm(100,1), y=rnorm(100,1), na.method=2, FALSE) + my.EnsCorr.chunk[i,j,k]<-.Call(C_cor, x=anom.hindcast.mean.chunk[,i,j,k], y=nom.rean.chunk[,i,j,k], na.method=2, FALSE) + + + +####################################################################################### +# Raster Animations # +####################################################################################### + +library(png) +library(animation) + + +clustPNG <- function(pngobj, nclust = 3){ + + j <- pngobj + # If there is an alpha component remove it... + # might be better to just adjust the mat matrix + if(dim(j)[3] > 3){ + j <- j[,,1:3] + } + k <- apply(j, c(1,2), c) + mat <- matrix(unlist(k), ncol = 3, byrow = TRUE) + grd <- expand.grid(1:dim(j)[1], 1:dim(j)[2]) + clust <- kmeans(mat, nclust, iter.max = 20) + new <- clust$centers[clust$cluster,] + + for(i in 1:3){ + tmp <- matrix(new[,i], nrow = dim(j)[1]) + j[,,i] <- tmp + } + + plot.new() + rasterImage(j, 0, 0, 1, 1, interpolate = FALSE) +} + +makeAni <- function(file, n = 10){ + j <- readPNG(file) + for(i in 1:n){ + clustPNG(j, i) + mtext(paste("Number of clusters:", i)) + } + + j <- readPNG(file) + plot.new() + rasterImage(j, 0, 0, 1, 1, interpolate = FALSE) + mtext("True Picture") +} + +file <- "~/Dropbox/Photos/ClassyDogCropPng.png" +n <- 20 +saveGIF(makeAni(file, n), movie.name = "tmp.gif", interval = c(rep(1,n), 5)) + + + + +####################################################################################### +# ProgressBar # +####################################################################################### + +for(i in 1:10) { + Sys.sleep(0.2) + # Dirk says using cat() like this is naughty ;-) + #cat(i,"\r") + # So you can use message() like this, thanks to Sharpie's + # comment to use appendLF=FALSE. + message(i,"\r",appendLF=FALSE) + flush.console() +} + + +for(i in 1:10) { + Sys.sleep(0.2) + # Dirk says using cat() like this is naughty ;-) + cat(i,"\r") + + +} + + + +####################################################################################### +# Load large datasets with ff # +####################################################################################### + +library(ff) +my.scratch<-"/scratch/Earth/ncortesi/myBigData" +anom.hind<-as.ff(anom.hind, vmode="double", file=my.scratch) +ffsave(anom.hind,file=my.scratch) +close(anom.hind);rm(anom.hind) + +ffload(file=my.scratch, list=c(anom.hind)) +open(anom.hind) +my.SkillScore <- veriApplyBig("FairRpss", fcst=anom.hind, obs=anom.rean, tdim=2, ensdim=1, prob=c(1/3,1/3)) +close(anom.hind);rm(anom.hind) + + + +BigDataPath <- "/scratch/Earth/ncortesi/myBigData" +source('/home/Earth/ncortesi/Downloads/RESILIENCE/veriApplyBig.R') + +anom.hind.dim<-c(5,3,1,256,512) +anom.hind<-array(rnorm(prod(anom.hind.dim)),anom.hind.dim) # doesn't fit in memory +save.big(array=anom.hind, path=BigDataPath) + +veriApplyBig <- function(, obsmy.scratch){ + ffload(file=my.scratch) + open(anom.hind) + my.SkillScore <- veriApplyBig("FairRpss", fcst=anom.hind, obs=anom.rean, tdim=2, ensdim=1, prob=c(1/3,1/3)) + close(anom.hind) +} + + +anom.hind<-as.ff(anom.hind, vmode="double", file=my.scratch) +ffsave(anom.hind, file=my.scratch) + +my.scratch<-"/scratch/Earth/ncortesi/myBigData" + +anom.hind<-as.ff(anom.hind, vmode="double", file=my.scratch) +ffsave(anom.hind, file=my.scratch) +close(anom.hind); rm(anom.hind) + +ffload(file=my.scratch, list=c(anom.hind)) +open(anom.hind) +my.SkillScore <- veriApplyBig("FairRpss", fcst=anom.hind, obs=anom.rean, tdim=2, ensdim=1, prob=c(1/3,1/3)) +close(anom.hind); rm(anom.hind) + +anom.hind.dim<-c(51,30,1,256,51) +anom.hind<-ff(rnorm(prod(anom.hind.dim)), dim=anom.hind.dim, vmode="double", filename="/scratch/Earth/ncortesi/anomhind") +str(anom.hind) +dim(anom.hind) + +anom.hind.dim<-c(51,30,1,256,51) +anom.hind<-array(rnorm(prod(anom.hind.dim)),anom.hind.dim) # doesn't fit in memory +anom.hind<-as.ff(anom.hind, vmode="double", file="/scratch/Earth/ncortesi/myBigData") +str(anom.hind) +dim(anom.hind) + +ffsave(anom.hind,file="/scratch/Earth/ncortesi/anomhind") +#ffinfo(file="/scratch/Earth/ncortesi/anomhind") +close(anom.hind) +#delete(anom.hind) +#rm(anom.hind) + +ffload(file="/scratch/Earth/ncortesi/anomhind") + +anom.rean<-array(rnorm(prod(anom.hind.dim[-1])), anom.hind.dim[-1]) # doesn't fit in memory + +open(anom.hind) +my.SkillScore <- veriApplyBig("FairRpss", fcst=anom.hind, obs=anom.rean, tdim=2, ensdim=1, prob=c(1/3,1/3)) +fcst<-anom.hind +obs<-anom.rean + +close(anom.hind) +rm(anom.hind) + +#ffdrop(file="/scratch/Earth/ncortesi/anomhind") + +a<-ffapply(X=anom.hind.big, MARGIN=c(1,3,4,5), AFUN=mean, RETURN=TRUE) +a<-ffapply(X=anom.hind.big, MARGIN=c(1,3,4,5), AFUN=function(x) sample(x, replace=TRUE), CFUN="list", RETURN=TRUE) + + +pred_Cal_cross_ff <- as.ff(pred_Cal_cross, vmode='double', file=fileoutput_cross) +ffsave(pred_Cal_cross_ff, file=paste0(fileoutput_cross)) +delete(pred_Cal_cross_ff) + +assign("pred_Cal_cross_ff", as.ff(pred_Cal_cross, vmode='double', file=fileoutput_cross)) +ffsave(pred_Cal_cross_ff, file=paste0(fileoutput_cross)) +delete(pred_Cal_cross_ff) +rm(pred_Cal_cross_ff) + +assign("pred_Cal_cross_ff", as.ff(pred_Cal_cross, vmode='double', file=fileoutput_cross)) +a<-get("pred_Cal_cross_ff") # get() works well in this case but not in the row below! (you must use do.call, see WT_drivers_v2.R) +ffsave(get("pred_Cal_cross_ff"), file=paste0(fileoutput_cross)) +delete(pred_Cal_cross_ff) +rm(pred_Cal_cross_ff) + + + +######################################################################################### +# Calculate and save the FairRpss and/or the FairCrpss for a chosen period using ff # +######################################################################################### + +bigdata=FALSE # if TRUE, compute the Rpss and the Crpss using the package ff (storing arrays in the disk instead than in the RAM) to remove the memory limit + +for(month in veri.month){ + month=1 # for debug + my.startdates <- which(as.integer(substr(sdates.seq,5,6)) == month) #startdates.monthly[[month]] #c(1:5) # select the startdates (weeks) you want to compute + startdate.name=my.month[month] # name given to the period of the selected startdates, i.e:"January" + n.yrs <- length(my.startdates)*n.yrs.hind # number of total years for the selected month + print(paste0("Computing Skill Scores for ",startdate.name)) + + if(!boot) anom.rean.big<-array(NA, dim=c(n.yrs, n.leadtimes, n.lat, n.lon)) # reanalysis data is not converted to ff because it is a small array + + if(boot) mod<-1 else mod=0 # in case of the boostrap, anom.hind.big includes also the reanalysis data as additional last member + + if(bigdata) anom.hind.big <- ff(NA, dim=c(n.members + mod, n.yrs, n.leadtimes, n.lat, n.lon), vmode="double", filename=paste0(workdir,"/anomhindbig.ff")) + if(!bigdata) anom.hind.big <- array(NA, dim=c(n.members + mod, n.yrs, n.leadtimes, n.lat, n.lon)) + + for(startdate in my.startdates){ + + pos.startdate<-which(startdate==my.startdates) # count of the number of stardates already loaded +1 + my.time.interv<-(1+(pos.startdate-1)*n.yrs.hind):(pos.startdate*n.yrs.hind) # time interval where to load data + + load(paste0(workdir,'/Data_',var.name,'/anom_rean_startdate',startdate,'.RData')) + if(!boot) { + anom.rean<-drop(anom.rean) + anom.rean.big[my.time.interv,,,] <- anom.rean + } + + if(any(my.score.name=="FairRpss") || any(my.score.name=="FairCrpss")){ + load(paste0(workdir,'/Data_',var.name,'/anom_hindcast_startdate',startdate,'.RData')) # load hindcast data + anom.hindcast<-drop(anom.hindcast) + + if(!boot) anom.hind.big[,my.time.interv,,,] <- anom.hindcast + if(boot) anom.hind.big[,my.time.interv,,,] <- abind(anom.hindcast, anom.rean, along=1) + + rm(anom.hindcast, anom.rean) + } + gc() + } + + if(any(my.score.name=="FairRpss" || my.score.name=="FairCrpss")){ + if(!boot) { + if(any(my.score.name=="FairRpss")) my.FairRpss <- veriApplyBig("FairRpss", fcst=anom.hind.big, obs=anom.rean.big, + prob=my.prob, tdim=2, ensdim=1, parallel=TRUE, ncpus=n.cpus) + if(any(my.score.name=="FairCrpss")) my.FairCrpss <- veriApplyBig("FairCrpss", fcst=anom.hind.big, obs=anom.rean.big, tdim=2, ensdim=1, parallel=TRUE, ncpus=n.cpus) + + } else { + # bootstrapping: + my.FairRpssBoot<-my.FairCrpssBoot<-array(NA, c(n.boot, n.leadtimes, n.lat, n.lon)) + if(!bigdata) save(anom.hind.big, file=paste0(workdir,"/anom_hind_big.RData")) # save the anomalies to free memory + + for(b in 1:n.boot){ + print(paste0("resampling n. ",b,"/",n.boot)) + yrs.sampled <- sample(1:n.yrs, replace=TRUE) + + if(bigdata) anom.hind.big.sampled <- ff(NA, dim=c(n.members+1, n.yrs, n.leadtimes, n.lat, n.lon), vmode="double", filename=paste0(workdir,"/anomhindbigsampled")) + if(!bigdata) anom.hind.big.sampled <- array(NA, dim=c(n.members+1, n.yrs, n.leadtimes, n.lat, n.lon)) + + if(!bigdata && b>1) load(paste0(workdir,"/anom_hind_big.RData")) # need to load the hindcasts if b>1 + + for(y in 1:n.yrs) anom.hind.big.sampled[,y,,,] <- anom.hind.big[,yrs.sampled[y],,,] # with ff takes 53s x 100 ~1h!!! (without, only 4s x 100 ~6m) + + if(!bigdata) rm(anom.hind.big); gc() # free memory for the following commands + + # faster way that will be able to replace the above one when ffapply output parameters wll be improved: + #a<-ffapply(X=anom.hind.big, MARGIN=c(3,4,5), AFUN=function(x) my.sample(x,n.members+1, replace=TRUE), CFUN="list", RETURN=TRUE, FF_RETURN=TRUE) # takes 3m only + #aa<-abind(a[1:10], along=4) # it doesn't fit into memory! + + if(any(my.score.name=="FairRpss")) my.FairRpssBoot[b,,,] <- veriApplyBig("FairRpss", fcst=anom.hind.big.sampled[1:n.members,,,,], + obs=anom.hind.big.sampled[n.members+1,,,,], + prob=my.prob, tdim=2, ensdim=1, parallel=TRUE, ncpus=n.cpus) #$rpss + + if(any(my.score.name=="FairCrpss")) my.FairCrpssBoot[b,,,] <- veriApplyBig("FairCrpss", fcst=anom.hind.big.sampled[1:n.members,,,,], + obs=anom.hind.big.sampled[n.members+1,,,,], + tdim=2, ensdim=1, parallel=TRUE, ncpus=n.cpus) #$crpss + + if(bigdata) delete(anom.hind.big.sampled) + rm(anom.hind.big.sampled) # delete the sampled hindcast + + } + + # calculate 5 and 95 percentiles of skill scores: + if(any(my.score.name=="FairRpss")) my.FairRpssConf <- apply(my.FairRpssBoot, c(2,3,4), function(x) quantile(x, probs=c(0.05,0.95), type=8)) + if(any(my.score.name=="FairCrpss")) my.FairCrpssConf <- quantile(my.FairCrpssBoot, probs=c(0.05,0.95), type=8) + + } # close if on boot + + } + + if(bigdata) delete(anom.hind.big) # delete the file with the hindcasts + rm(anom.hind.big) + rm(anom.rean.big) + gc() + + if(!boot){ + if(any(my.score.name=="FairRpss")) save(my.FairRpss, file=paste0(workdir,'/Data_',var.name,'/FairRpss_',startdate.name,'.RData')) + if(any(my.score.name=="FairCrpss")) save(my.FairCrpss, file=paste0(workdir,'/Data_',var.name,'/FairCrpss_',startdate.name,'.RData')) + } else { + if(any(my.score.name=="FairRpss")) save(my.FairRpssBoot, my.FairRpssConf, file=paste0(workdir,'/Data_',var.name,'/FairRpssBoot_',startdate.name,'.RData')) + if(any(my.score.name=="FairCrpss")) save(my.FairCrpssBoot, my.FairCrpssConf, file=paste0(workdir,'/Data_',var.name,'/FairCrpssBoot_',startdate.name,'.RData')) + } + + + if(is.object(my.FairRpss)) rm(my.FairRpss); if(is.object(my.FairCrpss)) rm(my.FairCrpss) + if(is.object(my.FairRpssBoot)) rm(my.FairRpssBoot); if(is.object(my.FairCrpssBoot)) rm(my.FairCrpssBoot) + if(is.object(my.FairRpssConf)) rm(my.FairRpssConf); if(is.object(my.FairCrpssConf)) rm(my.FairCrpssConf) + +} # next m (month) + + +######################################################################################### +# Calculate and save the FairRpss for a chosen period using ff and Load() # +######################################################################################### + + anom.rean.big<-array(NA, dim=c(length(my.startdates)*n.yrs.hind, n.leadtimes, n.lat, n.lon)) + anom.hind.big <- ff(NA, dim=c(n.members,length(my.startdates)*n.yrs.hind, n.leadtimes, n.lat, n.lon), vmode="double", filename="/scratch/Earth/anomhindbig") + + for(startdate in my.startdates){ + pos.startdate<-which(startdate==my.startdates) # count of the number of stardates already loaded+1 + my.time.interv<-(1+(pos.startdate-1)*n.yrs.hind):(pos.startdate*n.yrs.hind) # time interval where to load data + + #load(paste0(workdir,'/Data_',var.name,'/anom_rean_startdate',startdate,'.RData')) + #anom.rean<-drop(anom.rean) + #anom.rean.big[my.time.interv,,,] <- anom.rean + + my.date <- paste0(yr1.hind:yr2.hind,substr(sdates.seq[startdate],5,6),substr(sdates.seq[startdate],7,8)) + anom.rean <- Load(var=var.name, exp = 'EnsEcmwfWeekHind', + obs = NULL, sdates=my.date, nleadtime = n.leadtimes, leadtimemin = 1, leadtimemax = n.leadtimes, + output = 'lonlat', configfile = file_path, method='distance-weighted' ) + + #load(paste0(workdir,'/Data_',var.name,'/anom_hindcast_startdate',startdate,'.RData')) # load hindcast data + #anom.hindcast<-drop(anom.hindcast) + + anom.hind.big[,my.time.interv,,,] <- anom.hindcast + rm(anom.hindcast) + } + + my.FairRpss <- veriApplyBig("FairRpss", fcst=anom.hind.big, obs=anom.rean.big, prob=my.prob, tdim=2, ensdim=1, parallel=TRUE, ncpus=n.cpus) + + save(my.FairRpss, file=paste0(workdir,'/Data_',var.name,'/FairRpss_',startdate.name,'.RData')) + + +######################################################################################### +# Alternative way to speed up Load() # +######################################################################################### + +ERAint <- list(path = '/esnas/reconstructions/ecmwf/eraint/$STORE_FREQ$_mean/$VAR_NAME$_f6h/$VAR_NAME$_$YEAR$$MONTH$.nc') + var.rean=ERAint # daily reanalysis dataset used for the var data; it can have a different resolution of the MSLP dataset + var.name='sfcWind' #'tas' #'prlr' # any daily variable we want to find if it is WTs-driven + var <- Load(var = var.name, exp = NULL, obs = list(var.rean), paste0(y,'0101'), storefreq = 'daily', leadtimemax = 1, output = 'lonlat') + + # Forecast system list: + S4 <- list(path = ...) + CFSv2 <- list(path = ...) + + # Reanalysis list: + ERAint <- list(path = '/esnas/reconstructions/ecmwf/eraint/$STORE_FREQ$_mean/$VAR_NAME$_f6h/$VAR_NAME$_$YEAR$$MONTH$.nc') + JRA55 <- list(path = ...) + + + + + # Select one forecast system and one reanalysis (put NULL if you don't need to load any experiment/observation): + exp.source <- NULL + obs.source <- list(path = '/esnas/reconstructions/ecmwf/eraint/$STORE_FREQ$_mean/$VAR_NAME$_f6h/$VAR_NAME$_$YEAR$$MONTH$.nc') + + # Select one variable and its frequency: + var.name <- 'sfcWind' + store.freq <- 'daily' + + # Extract only the directory path of the forecast and reanalysis: + obs.source2 <- gsub("\\$STORE_FREQ\\$", store.freq, obs.source$path) + obs.source3 <- gsub("\\$VAR_NAME\\$", var.name, obs.source2) + obs.source4 <- strsplit(obs.source3,'/') + obs.source5 <- paste0(obs.source4[[1]][-length(obs.source4[[1]])], collapse="/") + + obs.source6 <- list.files(path = obs.source5) + + system(paste0("ncks -O -h -d longitude,5,6 ", '/esnas/reconstructions/ecmwf/eraint/',STORE_FREQ,'_mean/',VAR_NAME,'_f6h')) + system("ncks -O -h -d longitude,5,6 sfcWind_201405.nc ~/test.nc") + + y <- 1990 + var <- Load(var = VAR_NAME, exp = list(exp.source), obs = list(obs.source), sdates = paste0(y,'0101'), storefreq = store.freq, leadtimemax = 1, output = 'lonlat') + + +} # close if on mare + + diff --git a/old/SkillScores_v9.R~ b/old/SkillScores_v9.R~ new file mode 100644 index 0000000000000000000000000000000000000000..db34ac9b2924ea49393753aeee1984ea6f8cae68 --- /dev/null +++ b/old/SkillScores_v9.R~ @@ -0,0 +1,1253 @@ +######################################################################################### +# Sub-seasonal Skill Scores # +######################################################################################### +# you can run it from the terminal with the syntax: +# +# Rscript SkillScores_v7.R +# +# i.e: to run only the chunk number 3, write: +# +# Rscript SkillScores_v4.R 3 +# +# if you don't specify any argument, or you run it from the R interface, it runs all the chunks in a sequential way. +# Use this last approach only if the computational speed is not a problem. + +rm(list=ls()) + +########################################################################################## +# Load functions and libraries # +########################################################################################## + +#load libraries and functions: +library(SpecsVerification) # for skill scores +library(s2dverification) # for Load() function +library(easyVerification) # for veriApply() function +library(abind) # for merging arrays +source('/home/Earth/ncortesi/scripts/Rfunctions.R') + +########################################################################################## +# MareNostrum settings # +########################################################################################## + +args <- commandArgs(TRUE) # put into variable args the list with all the optional arguments with whom the script was run from the terminal + +if(length(args) == 0) print("Running the script in a sequential way") +if(length(args) > 1) stop("Only one argument is required") + +mare <- ifelse(length(args) == 1 ,TRUE, FALSE) # set variable 'mare' to TRUE if we are running the script in MareNostrum, FALSE otherwise + +if(mare) my.month <- as.integer(args[1]) # number of the month to run in this script (if mare == TRUE) + +########################################################################################## +# User's settings # +########################################################################################## +#if(!mare) {source('/home/Earth/ncortesi/Downloads/scripts/Rfunctions.R')} else {source('/gpfs/projects/bsc32/bsc32842/scripts/Rfunctions.R')} + +# working dir where to put the output maps and files in the /Data and /Maps subdirs: +work.dir <- ifelse(!mare, "/scratch/Earth/ncortesi/RESILIENCE/Subestacional", "/gpfs/projects/bsc32/bsc32842/results") + +# path of the weekly var rean data: +rean.dir <- '/esarchive/old-files/recon_ecmwf_erainterim/weekly_mean/$VAR_NAME$_f6h/$VAR_NAME$_$START_DATE$.nc' +rean.name <- "ERA-Interim" + +# path of the monthly forecast system files: +forecast.year <- 2014 # starting year of the weekly sequence of the forecasts +forecast.dir <- paste0('/esnas/exp/ECMWF/monthly/ensforhc/weekly_mean/$VAR_NAME$_f6h/',forecast.year,'$MONTH$$DAY$00/$VAR_NAME$_$START_DATE$00.nc') + +cfs.name <- 'ECMWF' # name of the climate forecast system (CFS) used for monthly predictions (to be used in map titles) +var.name <- 'sfcWind' # forecast variable. It is the name used inside the netCDF files and as suffix in map filenames, i.e: 'sfcWind' or 'psl' +var.name.map <- '10m Wind Speed' # forecast variable to verify (name used in the map titles), i.e: '10m Wind Speed' or 'SLP' + +mes <- 1 # starting forecast month (1: january, 2: february, etc.) +day <- 2 # starting forecast day + +yr1.hind <- 1994 #1994 # first hindcast year +yr2.hind <- 2013 #2013 # last hindcast year (usually the forecast year -1) + +leadtime.week <- c('5-11','12-18','19-25','26-32') # which leadtimes are available in the weekly dataset +n.members <- 4 # number of hindcast members + +my.score.name <- c('FairRpss','FairCrpss','EnsCorr','RelDiagr') # choose one or more skills scores to compute between EnsCorr, FairRPSS, FairCRPSS and/or RelDiagr + +veri.month <- 1:12 # select the month(s) you want to compute the chosen skill scores + +my.pvalue <- 0.05 # in case of computing the EnsCorr, set the p_value level to show in the ensemble mean correlation maps +my.prob <- c(1/3,2/3) # quantiles used for FairRPSS and the RelDiagr (usually they are the terciles) +conf.level <- c(0.05, 0.95)# percentiles employed for the calculation of the confidence level for the Rpss and Crpss (can be more than 2 if needed) +int.lat <- NULL #1:160 # latitude position of grid points selected for the Reliability Diagram (if you want to subset a region; if not, put NULL) +int.lon <- NULL #1:320 # longitude position of grid points selected for the Reliability Diagram (if you want to subset a region; if not, put NULL) + +pref.rean <- FALSE # If TRUE, instead of measuring the skill scores, it computes the anomalies and climatologies both for reanalisys and forecast. + # To do it once the first time you evaluate a new forecast system or you use a new reanalysis, and then set it to FALSE to measure the skill +pref.forecasts <- FALSE + +boot <- TRUE # in case of computing the FairRpss and/or the FairCrpss, you can choose to do a bootstrap to evaluate the uncertainty of the skill scores +n.boot <- 20 # number of resamples considered in the bootstrapping + +# coordinates of a chosen point in the world to plot the time series over the 52 maps (they must be the same values in objects la y lo) +my.lat <- 55.3197120 +my.lon <- 0.5625 + +###################################### Derived variables ############################################################################# + +#source(paste0(workdir,'/ColorBarV.R')) # Color scale used for maps +n.categ <- 1+length(my.prob) # number of forecasting categories (usually 3 if terciles are used) + +# blue-yellow-red colors: +#col <- ifelse(!mare, as.character(read.csv("/scratch/Earth/ncortesi/RESILIENCE/rgbhex.csv",header=F)[,1]), as.character(read.csv("/gpfs/projects/bsc32//bsc32842/scripts/rgbhex.csv",header=F)[,1])) +#col<-as.character(read.csv("/home/Earth/vtorralb/rgbhex.csv",header=F)[,1]) # blue-yellow-red colors +col <- c("#0C046E", "#1914BE", "#2341F7", "#2F55FB", "#3E64FF", "#528CFF", "#64AAFF", "#82C8FF", "#A0DCFF", "#B4F0FF", "#FFFBAF", "#FFDD9A", "#FFBF87", "#FFA173", "#FF7055", "#FE6346", "#F7403B", "#E92D36", "#C80F1E", "#A50519") + +sdates.seq <- weekly.seq(forecast.year,mes,day) # load the sequence of dates corresponding to all the thursday of the year +n.leadtimes <- length(leadtime.week) +n.yrs.hind <- yr2.hind-yr1.hind+1 +my.month.short <- substr(my.month,1,3) + +## extract geographic coordinates; do only ONCE for each preducton system and then save the lat/lon in a coordinates.RData and comment these rows to use function load() below: +#data.hindcast <- Load(var='sfcWind', exp = 'EnsEcmwfWeekHind', +# obs = NULL, sdates=paste0(yr1.hind:yr2.hind,substr(sdates.seq[startdate],5,6),substr(sdates.seq[startdate],7,8)), +# nleadtime = 1, leadtimemin = 1, leadtimemax = 1, output = 'lonlat', configfile = file_path, method='distance-weighted' ) + +#lats<-data.hindcast$lat +#lons<-data.hindcast$lon +#save(lats,lons,file=paste0(workdir,'/coordinates.RData')) +#rm(data.hindcast);gc() + +# format geographic coordinates to use with PlotEquiMap: +#load(paste0(workdir,'/coordinates.RData')) +#n.lon <- length(lons) +#n.lat <- length(lats) +#n.lonr <- n.lon - ceiling(length(lons[lons<180 & lons > 0])) +#la<-rev(lats) +#lo<-c(lons[c((n.lonr+1):n.lon)]-360,lons[1:n.lonr]) + +# load only 1 year of weekly var rean data from reanalysis to get lat and lon: +data.rean <- Load(var = var.name, exp = list(list(path=rean.dir)), + obs = NULL, sdates=paste0(yr1.hind,substr(sdates.seq[1],5,6),substr(sdates.seq[1],7,8)), + nleadtime = 1, leadtimemin = 1, leadtimemax = 1, output = 'lonlat', nprocs=1) + +lons <- data.rean$lon +lats <- data.rean$lat +n.lon <- length(lons) +n.lat <- length(lats) +n.lonr <- n.lon - ceiling(length(lons[lons<180 & lons > 0])) +if(mare) n.lat == 1 + +my.grid<-paste0('r',n.lon,'x',n.lat) + +#file_path='/home/Earth/ngonzal2/R/ConfFiles/IC3.conf' # Load() file path +#file_path <- '/home/Earth/ncortesi/Downloads/RESILIENCE/IC3.conf' + +######################################################################################### +# Calculate and save up to all the chosen Skill Scores for a selected period # +######################################################################################### +# if we run the script from the terminal with an argument, it only computes the month we specify in the second argument and save the results for that month +if(mare) veri.month <- my.month + +for(month in veri.month){ + #month=1 # for the debug + + # select the startdates (weeks) you want to compute: + my.startdates <- which(as.integer(substr(sdates.seq,5,6)) == month) #startdates.monthly[[month]] #c(1:5) + + startdate.name <- my.month[month] # name given to the period of the selected startdates # i.e:"January" + n.startdates <- length(my.startdates) # number of startdates in the selected month + n.yrs <- length(my.startdates)*n.yrs.hind # number of total years for the selected month + + print(paste0("Computing Skill Scores for ",startdate.name)) + + # Merge all selected startdates: + hind.dim <- c(n.members, n.yrs.hind*n.startdates, n.leadtimes, n.lat, n.lon) # dimensions of the hindcast array + + my.FairRpss <- my.FairCrpss <- my.EnsCorr <- my.PValue <- array(NA,c(n.leadtimes, n.lat,n.lon)) + if(boot) my.FairRpssBoot <- my.FairCrpssBoot <- array(NA, c(n.boot, n.leadtimes, n.lat, n.lon)) + if(boot) my.FairRpssConf <- my.FairCrpssConf <- array(NA, c(length(conf.level), n.leadtimes, n.lat, n.lon)) + + + anom.rean.chunk <- anom.hindcast.mean.chunk <- anom.rean.chunk.sampled <- array(NA, c(n.startdates*n.yrs.hind, n.leadtimes, n.lat, n.lon)) + anom.hindcast.chunk <- anom.hindcast.chunk.sampled <- array(NA, c(n.members, n.startdates*n.yrs.hind, n.leadtimes, n.lat, n.lon)) + #my.interv<-(1 + sub.size*(s-1)):((sub.size*s) + add.last) # longitude interval where to load data + + # not working on MareNostrum still: + # load 1 file ONLY ONCE to retrieve the lat and lon values, then save them in an .RData file to retrieve them when needed: + #coord <- Load(var = var.name, exp = list(ECMWF_monthly), obs = NULL, sdates=paste0(2013,'0102'), leadtimemin = 1, leadtimemax=1, output = 'lonlat', nprocs=1) + # lat <- coord$lat + # lon <- coord$lon + # save() + # load() + #system.time(a<-Load(var = 'sfcWind', exp = list(ECMWF_monthly), obs = NULL, sdates=paste0(2010:2013,'0102'), leadtimemin = 1, leadtimemax=3, output = 'lonlat', nprocs=1, latmin = lat[5], latmax = lat[5])) + + for(startdate in my.startdates){ + pos.startdate <- which(startdate == my.startdates) # count of the number of stardates already loaded+1 + my.time.interv <- (1+(pos.startdate-1)*n.yrs.hind):(pos.startdate*n.yrs.hind) # time interval where to load data + + print(paste0('Computing reanalysis anomalies for startdate ', which(startdate==my.startdates),'/',length(my.startdates))) + + # load weekly var rean data in: /esnas/reconstructions/ecmwf/eraint/weekly_mean + data.rean <- Load(var = var.name, exp = list(list(path=rean.dir)), + obs = NULL, sdates=paste0(yr1.hind:yr2.hind,substr(sdates.seq[startdate],5,6),substr(sdates.seq[startdate],7,8)), + nleadtime = n.leadtimes, leadtimemin = 1, leadtimemax = n.leadtimes, output = 'lonlat', nprocs=1) + + # dim(data.rean$mod) # [1,1,20,4,320,640] + + # Mean of the 20 years (for each point and leadtime) + clim.rean <- apply(data.rean$mod,c(1,2,4,5,6),mean)[1,1,,,] # average of all years for each leadtime and pixel + clim.rean <- InsertDim(InsertDim(InsertDim(clim.rean,1,n.yrs.hind),1,1),1,1) # repeat the same values times to be able to calculate the anomalies #[1,1,20,4,320,640] + anom.rean <- data.rean$mod - clim.rean + #anom.rean<-InsertDim(anom.rean[1,1,,,,],1,1) # [1,20,4,320,640] + + rm(data.rean, clim.rean) + gc() + + #load(paste0(workdir,'/Data_',var.name,'/anom_rean_startdate',startdate,'.RData')) + #anom.rean <- drop(anom.rean) + anom.rean.chunk[my.time.interv,,,] <- anom.rean + + # Load hindcast data: + print(paste0('Computing forecast anomalies for startdate ', which(startdate==my.startdates),'/',length(my.startdates))) + + # Load weekly var subseasonal data in: /esnas/exp/ECMWF/monthly/ensforhc/weekly_mean and interpolate them to the same ERA-Interim resolution (512x256) + data.hindcast <- Load(var = var.name, exp = list(list(path=forecast.dir)), + obs = NULL, sdates = paste0(yr1.hind:yr2.hind, substr(sdates.seq[startdate],5,6), substr(sdates.seq[startdate],7,8)), + nleadtime = n.leadtimes, leadtimemin = 1, leadtimemax = n.leadtimes, output = 'lonlat', grid=my.grid, method='bilinear', nprocs=1) + + # dim(data.hindcast$mod) # [1,4,20,4,320,640] + + # Mean of the 4 members of the Hindcasts and of the 20 years (for each point and leadtime); + clim.hindcast <- apply(data.hindcast$mod,c(1,4,5,6),mean)[1,,,] # average of all years and members for each leadtime and pixel + clim.hindcast <- InsertDim(InsertDim(InsertDim(clim.hindcast,1,n.yrs.hind),1,n.members),1,1) # repeat the same values and times to calc. anomalies + anom.hindcast <- data.hindcast$mod - clim.hindcast + + rm(data.hindcast,clim.hindcast) + gc() + + # average the anomalies over the 4 hindcast members to compute the ensemble mean [1,20,4,320,640] + anom.hindcast.mean<-apply(anom.hindcast,c(1,3,4,5,6),mean) + + gc() + + #load(paste0(workdir,'/Data_',var.name,'/anom_hindcast_startdate',startdate,'.RData')) + anom.hindcast <- drop(anom.hindcast) + anom.hindcast.chunk[,my.time.interv,,,] <- anom.hindcast + + rm(anom.hindcast, anom.rean) + gc() + + if(any(my.score.name=="EnsCorr")){ + + #load(paste0(workdir,'/Data_',var.name,'/anom_hindcast_mean_startdate',startdate,'.RData')) # load ensemble mean hindcast data + + anom.hindcast.mean <- drop(anom.hindcast.mean) + anom.hindcast.mean.chunk[my.time.interv,,,] <- anom.hindcast.mean + + rm(anom.hindcast.mean) + gc() + } + + } + + if(any(my.score.name=="FairRpss")){ + + #anom.hindcast.chunk.double <- abind(list(anom.hindcast.chunk,anom.hindcast.chunk),along=4) + #anom.rean.chunk.double <- abind(list(anom.rean.chunk,anom.rean.chunk),along=3) + + #my.FairRpss.sub <- veriApply("FairRpss", fcst=anom.hindcast.sub, obs=anom.rean.sub, prob=my.prob, tdim=2, ensdim=1, parallel=FALSE, ncpus=n.cpus) + my.FairRps.chunk <- veriApply("FairRps", fcst=anom.hindcast.chunk, obs=anom.rean.chunk, prob=my.prob, tdim=2, ensdim=1, parallel=FALSE, ncpus=1) + #my.FairRps.chunk.double <- veriApply("FairRps", fcst=anom.hindcast.chunk.double, obs=anom.rean.chunk.double, prob=my.prob, tdim=2, ensdim=1, parallel=FALSE, ncpus=n.cpus) + + # the prob.for the climatological ensemble can be set without using convert2prob and they are the same for all points and leadtimes (33% for each tercile): + ens <- array(33,c(n.startdates*n.yrs.hind,3)) + + # the observed probabilities vary from point to point and for each leadtime: + obs <- apply(anom.rean.chunk, c(2,3,4), function(x){convert2prob(x, prob <- my.prob)}) + + # reshape obs to be able to apply EnsRps(ens, obs) below: + obs2 <- InsertDim(obs,1,3) + obs2[2,1:(n.startdates*n.yrs.hind),,,] <- obs2[1,(n.startdates*n.yrs.hind + 1:(n.startdates*n.yrs.hind)),,,] + obs2[3,1:(n.startdates*n.yrs.hind),,,] <- obs2[1,(2*n.startdates*n.yrs.hind + 1:(n.startdates*n.yrs.hind)),,,] + obs3 <- obs2[,1:(n.startdates*n.yrs.hind),,,,drop=F] # drop=F is used not to loose the dimension(s) with only 1 element + + my.Rps.clim.chunk <- apply(obs3,c(3,4,5), function(x){ EnsRps(ens,t(x)) }) + + my.FairRps.chunk.sum <- apply(my.FairRps.chunk, c(2,3,4), sum) + my.Rps.clim.chunk.sum <- apply(my.Rps.clim.chunk, c(2,3,4), sum) + + my.FairRpss.chunk <- 1-(my.FairRps.chunk.sum/my.Rps.clim.chunk.sum) + + #my.FairRpss[,c,] <- my.FairRpss.chunk + + #rm(my.FairRpss.chunk, ens, obs, obs2, obs3, my.FairRps.chunk.sum, my.Rps.clim.chunk.sum) + gc() + + # bootstrapping: + + for(b in 1:n.boot){ + cat(paste0("resampling n. ",b,"/",n.boot)) + yrs.sampled <- sample(1:n.yrs, replace=TRUE) + + for(y in 1:n.yrs) anom.hindcast.chunk.sampled[,y,,,] <- anom.hindcast.chunk[,yrs.sampled[y],,,] + for(y in 1:n.yrs) anom.rean.chunk.sampled[y,,,] <- anom.rean.chunk[yrs.sampled[y],,,] + + my.FairRps.chunk <- veriApply("FairRps", fcst=anom.hindcast.chunk.sampled, obs=anom.rean.chunk.sampled, prob=my.prob, tdim=2, ensdim=1, parallel=FALSE, ncpus=n.cpus) + ens <- array(33,c(n.startdates*n.yrs.hind,3)) + obs <- apply(anom.rean.chunk.sampled, c(2,3,4), function(x){convert2prob(x, prob <- my.prob)}) + obs2 <- InsertDim(obs,1,3) + obs2[2,1:(n.startdates*n.yrs.hind),,,] <- obs2[1,(n.startdates*n.yrs.hind + 1:(n.startdates*n.yrs.hind)),,,] + obs2[3,1:(n.startdates*n.yrs.hind),,,] <- obs2[1,(2*n.startdates*n.yrs.hind + 1:(n.startdates*n.yrs.hind)),,,] + obs3 <- obs2[,1:(n.startdates*n.yrs.hind),,,,drop=F] + my.Rps.clim.chunk <- apply(obs3,c(3,4,5), function(x){ EnsRps(ens,t(x)) }) + my.FairRps.chunk.sum <- apply(my.FairRps.chunk,c(2,3,4), sum) + my.Rps.clim.chunk.sum <- apply(my.Rps.clim.chunk,c(2,3,4), sum) + my.FairRpss.chunk <- 1-(my.FairRps.chunk.sum/my.Rps.clim.chunk.sum) + + my.FairRpssBoot[b,,,chunk$int[[c]]] <- my.FairRpss.chunk + rm(my.FairRpss.chunk, ens, obs, obs2, obs3, my.FairRps.chunk.sum, my.Rps.clim.chunk.sum) + gc() + } + + cat('\n') + + # calculate the percentiles of the skill score: + my.FairRpssConf[,,,chunk$int[[c]]] <- apply(my.FairRpssBoot[,,,chunk$int[[c]]], c(2,3,4), function(x) quantile(x, probs=conf.level, type=8)) + + } + + if(any(my.score.name=="FairCrpss")){ + + my.FairCrps.chunk <- veriApply("FairCrps", fcst=anom.hindcast.chunk, obs=anom.rean.chunk, tdim=2, ensdim=1, parallel=FALSE, ncpus=n.cpus) + anom.all.chunk <- abind(anom.hindcast.chunk,anom.rean.chunk, along=1) # merge exp and obs together to use apply: + my.Crps.clim.chunk <- apply(anom.all.chunk, c(3,4,5), function(x){ EnsCrps(t(x[1:n.members,]), x[n.members+1,])}) + + my.FairCrps.chunk.sum <- apply(my.FairCrps.chunk,c(2,3,4), sum) + my.Crps.clim.chunk.sum <- apply(my.Crps.clim.chunk,c(2,3,4), sum) + my.FairCrpss.chunk <- 1-(my.FairCrps.chunk.sum/my.Crps.clim.chunk.sum) + + my.FairCrpss[,,chunk$int[[c]]]<-my.FairCrpss.chunk + rm(my.FairCrpss.chunk, my.FairCrps.chunk.sum, my.Crps.clim.chunk.sum) + gc() + + # bootstrapping: + + for(b in 1:n.boot){ + print(paste0("resampling n. ",b,"/",n.boot)) + yrs.sampled <- sample(1:n.yrs, replace=TRUE) + + for(y in 1:n.yrs) anom.hindcast.chunk.sampled[,y,,,] <- anom.hindcast.chunk[,yrs.sampled[y],,,] + for(y in 1:n.yrs) anom.rean.chunk.sampled[y,,,] <- anom.rean.chunk[yrs.sampled[y],,,] + + my.FairCrps.chunk <- veriApply("FairCrps", fcst=anom.hindcast.chunk.sampled, obs=anom.rean.chunk.sampled, tdim=2, ensdim=1, parallel=TRUE, ncpus=n.cpus) + anom.all.chunk <- abind(anom.hindcast.chunk.sampled, anom.rean.chunk.sampled, along=1) # merge exp and obs together to use apply: + my.Crps.clim.chunk <- apply(anom.all.chunk, c(3,4,5), function(x){ EnsCrps(t(x[1:n.members,]), x[n.members+1,])}) + my.FairCrps.chunk.sum <- apply(my.FairCrps.chunk,c(2,3,4), sum) + my.Crps.clim.chunk.sum <- apply(my.Crps.clim.chunk,c(2,3,4), sum) + my.FairCrpss.chunk <- 1-(my.FairCrps.chunk.sum/my.Crps.clim.chunk.sum) + + my.FairCrpssBoot[b,,,chunk$int[[c]]] <- my.FairCrpss.chunk + rm(my.FairCrpss.chunk, my.FairCrps.chunk.sum, my.Crps.clim.chunk.sum) + gc() + + } + + cat('\n') + + # calculate the percentiles of the skill score: + my.FairCrpssConf[,,,chunk$int[[c]]] <- apply(my.FairCrpssBoot[,,,chunk$int[[c]]], c(2,3,4), function(x) quantile(x, probs=conf.level, type=8)) + + + } + + if(any(my.score.name=="EnsCorr")){ + # ensemble mean correlation for the selected startdates (first dim, 'week') and all leadtimes (second dim): + my.EnsCorr.chunk <- my.PValue.chunk <- array(NA, c(n.leadtimes, n.lat, chunk$n.int[[c]])) + + for(i in 1:n.leadtimes){ + for(j in 1:n.lat){ + for(k in 1:chunk$n.int[[c]]){ + my.EnsCorr.chunk[i,j,k] <- cor(anom.hindcast.mean.chunk[,i,j,k],anom.rean.chunk[,i,j,k], use="complete.obs") + # option 'na.method' is chosen from the following list: c("all.obs", "complete.obs", "pairwise.complete.obs", "everything", "na.or.complete") + + my.PValue.chunk[i,j,k] <- cor.test(anom.hindcast.mean.chunk[,i,j,k],anom.rean.chunk[,i,j,k], use="complete.obs")$p.value + + } + } + } + + my.EnsCorr[,,chunk$int[[c]]] <- my.EnsCorr.chunk + my.PValue[,,chunk$int[[c]]] <- my.PValue.chunk + + rm(anom.hindcast.chunk,anom.hindcast.mean.chunk,my.EnsCorr.chunk,my.PValue.chunk) + } + + rm(anom.rean.chunk) + gc() + #mem() + + #if(mare == TRUE) { + # if(any(my.score.name=="FairRpss")) save(my.FairRpss, file=paste0(workdir,'/Data_',var.name,'/FairRpss_',startdate.name,'_chunk_',c,'.RData')) + # if(any(my.score.name=="FairCrpss")) save(my.FairCrpss, file=paste0(workdir,'/Data_',var.name,'/FairCrpss_',startdate.name,'_chunk_',c'.RData')) + # if(any(my.score.name=="EnsCorr")) save(my.EnsCorr, file=paste0(workdir,'/Data_',var.name,'/EnsCorr_',startdate.name,'_chunk_',c'.RData')) + #} + + + # the Reliability diagram cannot be computed splitting the data in chunk, so it is computed only if the script is run from the R interface or with no arguments: + + if(any(my.score.name=="RelDiagr")){ # in this case the computation cannot be split in groups of grid points, but can be split for leadtime instead: + my.RelDiagr<-list() + + for(lead in 1:n.leadtimes){ + cat(paste0('leadtime n.: ',lead,'/',n.leadtimes)) + anom.rean.chunk <- array(NA,c(n.startdates*n.yrs.hind,n.lat,n.lon)) + anom.hindcast.chunk <- array(NA,c(n.members,n.startdates*n.yrs.hind,n.lat,n.lon)) + cat(' startdate: ') + i=0 + + for(startdate in my.startdates){ + i=i+1 + cat(paste0(i,'/',length(my.startdates),' ')) + + pos.startdate<-which(startdate==my.startdates) # count of the number of stardates already loaded+1 + my.time.interv<-(1+(pos.startdate-1)*n.yrs.hind):(pos.startdate*n.yrs.hind) # time interval where to load data + + # Load reanalysis data of next startdate: + load(paste0(work.dir,'/Data_',var.name,'/anom_rean_startdate',startdate,'.RData')) + anom.rean <- drop(anom.rean) + anom.rean.chunk[my.time.interv,,] <- anom.rean[,lead,,] + + # Lead hindcast data of next startdate: + load(paste0(work.dir,'/Data_',var.name,'/anom_hindcast_startdate',startdate,'.RData')) # load hindcast data + anom.hindcast <- drop(anom.hindcast) + anom.hindcast.chunk[,my.time.interv,,] <- anom.hindcast[,,lead,,] + + rm(anom.rean,anom.hindcast) + } + + + anom.hindcast.chunk.perm<-aperm(anom.hindcast.chunk,c(2,1,3,4)) # swap the first two dimensions to put the years as first dimension (needed for convert2prob) + gc() + + cat('Computing the Reliability Diagram. Please wait... ') + ens.chunk<-apply(anom.hindcast.chunk.perm, c(3,4), convert2prob, prob<-my.prob) # its dimens. are: [n.startdates*n.yrs.hind*n.forecast.categories, n.lat, n.lon] + obs.chunk<-apply(anom.rean.chunk,c(2,3), convert2prob, prob<-my.prob) # the first n.members*n.yrs.hind elements belong to the first tercile, and so on. + ens.chunk.prob<-apply(ens.chunk, c(2,3), function(x) count2prob(matrix(x, n.startdates*n.yrs.hind, n.categ), type=4)) # type=4 is the only method with sum(prob)=1 !! + obs.chunk.prob<-apply(obs.chunk, c(2,3), function(x) count2prob(matrix(x, n.startdates*n.yrs.hind, n.categ), type=4)) #no parallelization here: it only takes 10s + + if(is.null(int.lat)) int.lat <- 1:n.lat # if there is no region selected, select all the world + if(is.null(int.lon)) int.lon <- 1:n.lon + + int1 <- 1:(n.startdates*n.yrs.hind) # time interval of the data of the first tercile + my.RelDiagr[[lead]]<-ReliabilityDiagram(ens.chunk.prob[int1,int.lat,int.lon], obs.chunk.prob[int1,int.lat,int.lon], nboot=0, plot=FALSE, plot.refin=F) #tercile 1 + + int2 <- (1+(n.startdates*n.yrs.hind)):(2*n.startdates*n.yrs.hind) # time interval of the data of the second tercile + my.RelDiagr[[n.leadtimes+lead]]<-ReliabilityDiagram(ens.chunk.prob[int2,int.lat,int.lon], obs.chunk.prob[int2,int.lat,int.lon], nboot=0, plot=FALSE, plot.refin=F) + + int3 <- (1+(2*n.startdates*n.yrs.hind)):(3*n.startdates*n.yrs.hind) # time interval of the data of the third tercile + my.RelDiagr[[2*n.leadtimes+lead]]<-ReliabilityDiagram(ens.chunk.prob[int3,int.lat,int.lon], obs.chunk.prob[int3,int.lat,int.lon], nboot=0, plot=FALSE, plot.refin=F) + + cat('done\n') + #print(my.RelDiagr) # for debugging + + rm(anom.rean.chunk,anom.hindcast.chunk,ens.chunk,obs.chunk,ens.chunk.prob,obs.chunk.prob) + gc() + + } # next lead + + } # close if on RelDiagr + + # we can save the results for all chunks only if mare == FALSE (if it is TRUE, we have the results for 1 chunk only, that have already been saved + + if(any(my.score.name=="FairRpss")) save(my.FairRpss.chunk, file=paste0(work.dir,'/',var.name,'/FairRpss_',startdate.name,'.RData')) + if(any(my.score.name=="FairCrpss")) save(my.FairCrpss.chunk, file=paste0(work.dir,'/',var.name,'/FairCrpss_',startdate.name,'.RData')) + if(any(my.score.name=="EnsCorr")) save(my.EnsCorr.chunk, file=paste0(work.dir,'/',var.name,'/EnsCorr_',startdate.name,'.RData')) + if(any(my.score.name=="RelDiagr")) save(my.RelDiagr.chunk, my.PValue, file=paste0(work.dir,'/',var.name,'/RelDiagr_',startdate.name,'.RData')) + + # bootstrapping output: + if(any(my.score.name=="FairRpss")) save(my.FairRpssBoot, my.FairRpssConf, file=paste0(work.dir,'/',var.name,'/FairRpssBoot_',startdate.name,'.RData')) + if(any(my.score.name=="FairCrpss")) save(my.FairCrpssBoot, my.FairCrpssConf, file=paste0(work.dir,'/',var.name,'/FairCrpssBoot_',startdate.name,'.RData')) + + +} # next m (month) + + + + +########################################################################################### +# Visualize and save the score maps for one score and period # +########################################################################################### + +# run it only after computing and saving all the skill score data: + +my.months=1:12 #9:12 # choose the month(s) to plot and save + + +my.score.name <- c('FairRpss','FairCrpss','EnsCorr','RelDiagr') # choose one or more skills scores to compute between EnsCorr, FairRPSS, FairCRPSS and/or RelDiagr + +for(month in my.months){ + #month=1 # for the debug + startdate.name.map<-my.month[month] # i.e:"January" + + for(my.score.name.map in my.score.name){ + #my.score.name.map='EnsCorr' # for the debug + + if(my.score.name.map=="EnsCorr") { load(file=paste0(workdir,'/Data_',var.name,'/EnsCorr_',startdate.name.map,'.RData')); my.score<-my.EnsCorr } + if(my.score.name.map=='FairRpss') { load(file=paste0(workdir,'/Data_',var.name,'/FairRpss_',startdate.name.map,'.RData')); my.score<-my.FairRpss } + if(my.score.name.map=='FairCrpss') { load(file=paste0(workdir,'/Data_',var.name,'/FairCrpss_',startdate.name.map,'.RData')); my.score<-my.FairCrpss } + if(my.score.name.map=="RelDiagr") { load(file=paste0(workdir,'/Data_',var.name,'/RelDiagr_',startdate.name.map,'.RData')); my.score<-my.RelDiagr } + + # old green-red palette: + # brk.rpss<-c(-1,seq(0,1,by=0.05)) + # col.rpss<-c("darkred",colorRampPalette(c("red","white","darkgreen"))(length(brk.rpss)-2)) + # brk.rps<-c(seq(0,1,by=0.1),10) + # col.rps<-c(colorRampPalette(c("darkgreen","white","red"))(length(brk.rps)-2),"darkred") + # if(my.score.name.map==my.Rps | my.score==my.Rps.clim) {my.brk<-brk.rps} else {my.brk<-brk.rpss} + # if(my.score.name.map==my.Rps | my.score==my.Rps.clim) {my.col<-col.rps} else {my.col<-col.rpss} + + brk.rpss<-c(seq(-1,1,by=0.1)) + col.rpss<-colorRampPalette(col)(length(brk.rpss)-1) + + my.brk<-brk.rpss # at present all breaks and colors are the same, so there is no need to differenciate between indexes + my.col<-col.rpss + + for(lead in 1:n.leadtimes){ + + #my.title<-paste0(my.score.name.map,' of ',cfs.name,' + #10m Wind Speed for ',startdate.name.map,'. Forecast time ',leadtime.week[lead],'.') + + if(my.score.name.map!="RelDiagr"){ + + #postscript(file=paste0(workdir,'/Maps_',var.name,'/',my.score.name.map,'_',startdate.name.map,'_Forecast_time_',leadtime.week[lead],'_',var.name,'.ps'), + # paper="special",width=12,height=7,horizontal=F) + + jpeg(file=paste0(workdir,'/Maps_',var.name,'/',my.score.name.map,'_',startdate.name.map,'_Forecast_time_',leadtime.week[lead],'_',var.name,'.jpg'),width=1000,height=600) + + layout(matrix(c(1,2), 1, 2, byrow = TRUE),widths=c(11,1.2)) + par(oma=c(1,1,4,1)) + + # lat values must be in decreasing order from +90 to -90 degrees and long from 0 to 360 degrees: + PlotEquiMap(my.score[lead,,][n.lat:1,c((n.lonr+1):n.lon,1:n.lonr)], lons,lats ,sizetit=0.8, brks=my.brk, cols=my.col,axelab=F, filled.continents=FALSE, drawleg=F) + my.title<-paste0(my.score.name.map,' of ',cfs.name,'\n ', var.name.map,' for ',startdate.name.map,'. Forecast time ',leadtime.week[lead],'.') + title(my.title,line=0.5,outer=T) + + ColorBar(my.brk, cols=my.col, vert=T, cex=1.4) + + if(my.score.name.map=="EnsCorr"){ #add a small grey point if the corr.is significant: + + # arrange lat/ lon in my.PValue (a second variable in the EnsCorr file) to start from +90 degr. (lat) and from 0 degr (lon): + my.PValue.rev <- my.PValue + my.PValue.rev[lead,,] <- my.PValue[i,n.lat:1,c((n.lonr+1):n.lon,1:n.lonr)] + for (x in 1:n.lon) { + for (y in 1:n.lat) { + if (my.PValue.rev[lead,y, x] == TRUE) { + text(x = lo[x], y = la[y], ".", cex = .2) + } + } + } + } + dev.off() + + } # close if on !RelDiagr + + if(my.score.name.map=="RelDiagr") { + + jpeg(file=paste0(workdir,'/Maps_',var.name,'/RelDiagr_',startdate.name.map,'_Forecast_time_',leadtime.week[lead],'_',var.name,'.jpg'),width=600,height=600) + + my.title<-paste0('Reliability Diagram of ',cfs.name,'\n ',var.name.map,' for ',startdate.name.map,'. Forecast time ',leadtime.week[lead],'.') + + plot(c(0,1),c(0,1),type="l",xlab="Forecast Frequency",ylab="Observed Frequency", col='gray30', main=my.title) + lines(my.score[[lead]]$p.avgs[!is.na(my.score[[lead]]$p.avgs)],my.score[[lead]]$cond.prob[!is.na(my.score[[lead]]$cond.prob)],type="o", pch=16, col="blue") + #lines(my.score[[n.leadtimes+lead]]$p.avgs[!is.na(my.score[[n.leadtimes+lead]]$p.avgs)],my.score[[n.leadtimes+lead]]$cond.prob[!is.na(my.score[[n.leadtimes+lead]]$cond.prob)],type="o",pch=16,col="darkgreen") + lines(my.score[[2*n.leadtimes+lead]]$p.avgs[!is.na(my.score[[2*n.leadtimes+lead]]$p.avgs)], my.score[[2*n.leadtimes+lead]]$cond.prob[!is.na(my.score[[2*n.leadtimes+lead]]$cond.prob)],type="o", pch=16, col="red") + legend("bottomright", legend=c('Lower Tercile','Upper Tercile'), lty=c(1,1), lwd=c(2.5,2.5), col=c('blue','red')) + + dev.off() + } + + } # next lead + + } # next score + +} # next month + + +############################################################################################### +# Composite map of the four skill scores # +############################################################################################### + +# run it only when you have all the 4 skill scores for each month: + +my.months=9:12 # choose the month(s) to plot and save + +for(month in my.months){ + + startdate.name.map<-my.month[month] # i.e:"January" + + #postscript(file=paste0(workdir,'/Maps_',var.name,'/SkillScores_',startdate.name.map,'_',var.name,'.ps'), paper="special",width=12,height=7,horizontal=F) + + jpeg(file=paste0(workdir,'/Maps_',var.name,'/SkillScores_',startdate.name.map,'_',var.name,'.jpg'), width=4000, height=2400, quality=100) + + layout(matrix(1:16, 4, 4, byrow=FALSE), widths=c(1.8,1.8,1.8,1.2), heights=c(1,1,1,1)) + #layout.show(16) + + for(score in 1:4){ + for(lead in 1:n.leadtimes){ + + img<-readJPEG(paste0(workdir,'/Maps_',var.name,'/',my.score.name[score],'_',startdate.name.map,'_Forecast_time_',leadtime.week[lead],'_',var.name,'.jpg')) + par(mar = rep(0, 4), xaxs='i', yaxs='i' ) + plot.new() + rasterImage(img, 0, 0, 1, 1, interpolate=FALSE) + + } # next lead + + } # next score + + dev.off() + +} # next month + + + +# Dani Map script for the small figure: +#/home/Earth/vtorralb/R/scripts/Veronica_2014/TimeSeries/PlotAno_mod_obs.R + + + +############################################################################################### +# Preformatting: calculate weekly climatologies and anomalies for each forecast startdate # +############################################################################################### +# +# finish!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! +# +# the only difference is that anomalies are computed using climatologies from the hincast data, +# and not from the forecast data. + +# Run it only once at the beginning: + +file_path='/home/Earth/ngonzal2/R/ConfFiles/IC3.conf' # Load() file path +#file_path <- '/home/Earth/ncortesi/Downloads/RESILIENCE/IC3.conf' + +my.startdates=1:52 # choose a sequence of forecast startdates to save their anomalies + +for(startdate in my.startdates){ # the startdate week to load inside the sdates weekly sequence: + print(paste0('Computing anomalies for startdate ', which(startdate==my.startdates),'/',length(my.startdates))) + + data.hindcast <- Load(var=var.name, exp = 'EnsEcmwfWeekHind', + obs = NULL, sdates=paste0(yr1.hind:yr2.hind,substr(sdates.seq[startdate],5,6),substr(sdates.seq[startdate],7,8)), + nleadtime = n.leadtimes, leadtimemin = 1, leadtimemax = n.leadtimes, output = 'lonlat', configfile = file_path, method='distance-weighted' ) + + # dim(data.hindcast$mod) # [1,4,20,4,320,640] + + # Mean of the 4 members of the Hindcasts and of the 20 years (for each point and leadtime); + clim.hindcast<-apply(data.hindcast$mod,c(1,4,5,6),mean)[1,,,] # average for each leadtime and pixel + clim.hindcast<-InsertDim(InsertDim(InsertDim(clim.hindcast,1,n.yrs.hind),1,n.members),1,1) # repeat the same values and times to calc. anomalies + + anom.hindcast <- data.hindcast$mod - clim.hindcast + + rm(data.hindcast,clim.hindcast) + gc() + + # average the anomalies over the 4 hindcast members [1,20,4,320,640] + anom.hindcast.mean<-apply(anom.hindcast,c(1,3,4,5,6),mean) + + my.grid<-paste0('r',n.lon,'x',n.lat) + data.rean <- Load(var=var.name, exp = 'ERAintEnsWeek', + obs = NULL, sdates=paste0(yr1.hind:yr2.hind,substr(sdates.seq[startdate],5,6),substr(sdates.seq[startdate],7,8)), + nleadtime = n.leadtimes, leadtimemin = 1, leadtimemax = n.leadtimes, output = 'lonlat', configfile = file_path, grid=my.grid, method='distance-weighted') + # dim(data.rean$mod) # [1,1,20,4,320,640] + + # Mean of the 20 years (for each point and leadtime) + clim.rean<-apply(data.rean$mod,c(1,2,4,5,6),mean)[1,1,,,] # average for each leadtime and pixel + clim.rean<-InsertDim(InsertDim(InsertDim(clim.rean,1,n.yrs.hind),1,1),1,1) # repeat the same values times to be able to calculate the anomalies #[1,1,20,4,320,640] + + anom.rean <- data.rean$mod - clim.rean + + anom.rean<-InsertDim(anom.rean[1,1,,,,],1,1) # [1,20,4,320,640] + + #save(anom.hindcast.mean,anom.rean,file=paste0(workdir,'/Data/anom_for_ACC_startdate',startdate,'.RData')) + + save(anom.hindcast,file=paste0(workdir,'/Data/anom_hindcast_startdate',startdate,'.RData')) + save(anom.hindcast.mean,file=paste0(workdir,'/Data/anom_hindcast_mean_startdate',startdate,'.RData')) + save(anom.rean,file=paste0(workdir,'/Data/anom_rean_startdate',startdate,'.RData')) + save(clim.rean,file=paste0(workdir,'/Data/clim_rean_startdate',startdate,'.RData')) + + rm(data.rean,anom.hindcast,anom.hindcast.mean,clim.rean,anom.rean) + gc() + +} + + + + + + + +################################################################################## +# Toy Model # +################################################################################## + +library(s2dverification) +library(SpecsVerification) +library(easyVerification) + +my.prob=c(1/3,2/3) # quantiles used for FairRPSS and the RelDiagr (usually they are the terciles) + +N=80 # number of years +M=14 # number of members + +for(M in c(2:10,20,51,100,200)){ + +for(N in c(5,10,15,20,25,30,40,50,100,200,500,1000,2000,5000,10000,20000,50000,100000)){ + +anom.rean<-rnorm(N) +anom.hind<-array(rnorm(N*M),c(N,M)) +#quantile(anom.hind,prob=my.prob,type=8) + +obs<-convert2prob(anom.rean,prob<-my.prob) +#mean(obs[,1]) # check if it is equal to 0.333 for K=3 + +ens<-convert2prob(anom.hind,prob<-my.prob) + +# climatological forecast: +anom.hind.clim<-array(sample(anom.hind,N*M),c(N,M)) # extract a random sample with no replacement of the hindcast +ens.clim<-convert2prob(anom.hind.clim,prob<-my.prob) +#mean(ens.clim[,1]) # check if it is equal to M/3 (1.333 for M=4) + +# alternative definition of climatological forecast, based on observations instead (as applied by veriApply via convert2prob when option fcst.ref is missing): +#anom.rean.clim<-ClimEns(anom.rean) # create a climatological ensemble from a vector of observations (anom.rean) with the leave-one-out cross validation +anom.rean.clim<-t(array(anom.rean,c(N,N))) # function used by convert2prob when ref is not specified; it is ~ equal to the above function ClimEns, it only has a column more +ens.clim.rean<-convert2prob(anom.rean.clim,prob<-c(1/3,2/3)) # create a new prob.table based on the climatological ensemble of observations + +p<-count2prob(ens) # we should add the option type=4, in not the sum for each year is not 100%!!! +y<-count2prob(obs) # we should add the option type=4, in not the sum for each year is not 100%!!! +ReliabilityDiagram(p[,1], y[,1], bins=10, nboot=0, plot=TRUE, cons.prob=c(0.05, 0.85),plot.refin=F) + +### computation of verification scores: + +Rps<-round(mean(EnsRps(ens,obs)),4) +FairRps<-round(mean(FairRps(ens,obs)),4) +#Rps.easy<-round(mean(veriApply("EnsRps", fcst=anom.hind, obs=anom.rean, prob=my.prob, tdim=1, ensdim=2)),4) # check once to see if it is the same value of Rps +#FairRps.easy<-round(mean(veriApply("FairRps", fcst=anom.hind, obs=anom.rean, prob=my.prob, tdim=1, ensdim=2)),4) # check once to see if it is the same value of FairRps + +Rps.clim<-round(mean(EnsRps(ens.clim,obs)),4) +FairRps.clim<-round(mean(FairRps(ens.clim,obs)),4) +FairRps.clim.rean<-round(mean(FairRps(ens.clim.rean,obs)),4) + +#FairRps.clim.stable<-0.44444444 +#Rps.clim.easy<-round(mean(veriApply("EnsRps", fcst=anom.hind.clim, obs=anom.rean, prob=my.prob, tdim=1, ensdim=2)),4) # check once to see if it is the same value of Rps.clim +#FairRps.clim.easy<-round(mean(veriApply("FairRps", fcst=anom.hind.clim, obs=anom.rean, prob=my.prob, tdim=1, ensdim=2)),4) # check once to see if it is = to FairRps.clim + +Rpss<-round(1-(Rps/Rps.clim),4) +#Rpss.specs<-round(EnsRpss(ens=ens, ens.ref=ens.clim, obs=obs)$rpss,4) # check it once to see if it is the same as Rpss (yes) +Rpss.easy<-veriApply("EnsRpss", fcst=anom.hind, fcst.ref=anom.hind.clim, obs=anom.rean, prob=my.prob, tdim=1, ensdim=2)$rpss # we provide the clim.forecast; = to Rpss.specs +system.time(Rpss.easy.noclim<-round(veriApply("EnsRpss", fcst=anom.hind, obs=anom.rean, prob=my.prob, tdim=1, ensdim=2, parallel=FALSE, ncpus=5)$rpss,4)) # using their climatological forecast +#cat(Rps.clim.noclim<-Rps/(1-Rpss.easy.noclim)) # sale igual a ~0.45 per ogni hindcast; é diverso sia da Rps.clim che da Rps.clim per N-> +oo (0.555) + +Rpss.easy<-veriApply("EnsRpss", fcst=anom.hind, fcst.ref=anom.rean.clim, obs=anom.rean, prob=my.prob, tdim=1, ensdim=2)$rpss # we provide the clim.forecast; = to Rpss.specs +Rpss.easy.noclim<-veriApply("EnsRpss", fcst=anom.hind, obs=anom.rean, prob=my.prob, tdim=1, ensdim=2)$rpss # we provide the clim.forecast; = to Rpss.specs + +FairRpss<-round(1-(FairRps/FairRps.clim),4) +#FairRpss.specs<-round(FairRpss(ens=ens,ens.ref=ens.clim,obs=obs)$rpss,4) # check it once to see if it is the same as FairRpss (yes) +#FairRpss.easy<-veriApply("FairRpss", fcst=anom.hind, fcst.ref=anom.hind.clim, obs=anom.rean, prob=c(1/3,2/3), tdim=1, ensdim=2)$rps +#FairRpss.easy.noclim<-round(veriApply("FairRpss", fcst=anom.hind, obs=anom.rean, prob=c(1/3,2/3), tdim=1, ensdim=2)$rpss,4) # using their climatological forecast +#cat(FairRps.clim.noclim<-Rps/(1-FairRpss.easy.noclim)) # sale igual a ~0.55; é diverso sia da FairRps.clim che da FairRps.clim per N-> +oo (0.444) + +cat(paste0('n.memb: ' ,M,' n.yrs: ',N,' : ',FairRps,' : ' ,FairRps.clim,' FairRpss: ',FairRpss,'\n')) + +#cat(paste0('n.memb: ' ,M,' n.yrs: ',N,' : ',FairRps,' : ' ,FairRps.clim,' FairRpss: ',FairRpss,'\n')) + +#cat(paste0('n.memb: ' ,M,' n.yrs: ',N,' : ',Rps,' : ' ,Rps.clim,' Rpss: ',Rpss,'\n','n.memb: ' ,M,' n.yrs: ',N,' : ',FairRps,' : ' ,FairRps.clim,' FairRpss: ',FairRpss,'\n')) + +#cat(paste0('n.memb: ' ,M,' n.yrs: ',N,' : ',Rps,' : ' ,Rps.clim,' Rpss: ',Rpss,' Rpss.easy: ',Rpss.easy.noclim,'\n','n.memb: ' ,M,' n.yrs: ',N,' #: ',FairRps,' : ' ,FairRps.clim,' FairRpss: ',FairRpss,' FairRpss.easy: ',FairRpss.easy.noclim)) + +} + +# Crpss: + +#Crps<-round(mean(EnsCrps(ens,obs)),4) +#FairCrps<-round(mean(FairCrps(ens,obs)),4) + +#cat(Crps.clim<-round(mean(EnsCrps(ens.clim,obs)),4)) +#FairCrps.clim<-round(mean(FairCrps(ens.clim,obs)),4) + +#Crpss<-round(1-(Crps/Crps.clim),4) +#Crpss.specs<-round(EnsCrpss(ens=ens, ens.ref=ens.clim, obs=obs)$rpss,4) + +#FairCrpss<-round(1-(FairCrps/FairCrps.clim),4) +#FairCrpss.specs<-round(FairCrpss(ens=ens,ens.ref=ens.clim,obs=obs)$rpss,4) + +} + + +################################################################################## +# Other analysis # +################################################################################## + +my.startdates=1 #c(1:9) # select the startdates (weeks) you want to merge together + +#tm<-toyarray(dims=c(32,64),N=20,nens=4) # in the number of startdates in case of forecasts or the number of years in case of hindcasts +#str(tm) # tm$fcst: [32,64,20,4] tm$obs: [32,64,20] +# +#f.corr <- veriApply("EnsCorr", fcst=tm$fcst, obs=tm$obs) +#f.me <- veriApply('EnsMe', tm$fcst, tm$obs) +#f.crps <- veriApply("FairCrps", fcst=tm$fcst, obs=tm$obs) +#str(f.crps) # [32,64,20] + +#cst <- array(rnorm(prod(4:8)), 4:8) +#obs <- array(rnorm(prod(4:7)), 4:7) +#f.me <- veriApply('EnsMe', fcst=fcst, obs=obs) +#dim(f.me) +#fcst2 <- array(aperm(fcst, c(5,1,4,2,3)), c(8, 4, 7, 5*6)) # very interesting use of aperm not only to swap dimensions, but also to merge two dimensions in the same one! +#obs2 <- array(aperm(obs, c(1,4,2,3)), c(4,7,5*6)) +#f.me2 <- veriApply('EnsMe', fcst=fcst2, obs=obs2, tdim=3, ensdim=1) +#dim(f.me2) + +# when you have for each leadtime all the 52 weeks of year 2014 stored in a map, +# you can plot the time serie for a particular point and leadtime: +pos.lat<-which(round(la,3)==round(my.lat,3)) +pos.lon<-which(round(lo,3)==round(my.lon,3)) + +x11() +plot(1:week,EnMeanCorr[,leadtime],type="b", + main=paste0("Weekly time serie of point with lat=",round(my.lat,2),", lon=",round(my.lon,2)), + xlab="week",ylab="Ensemble Mean Corr.",ylim=c(-1,1)) + + + +### check if 4 time steps are better than one: + +yrs.hind<-yr2.hind-yr1.hind+1 +my.point.obs<-array(NA,c(yrs.hind,28)) + +# load obs.data for the four time steps: +for(year in yr1.hind:yr2.hind){ + data_erai_6h <- Load(var=var.name, exp = 'ERAint6h', + obs = NULL, sdates=paste0(year,'01'), leadtimemin = 61, + leadtimemax = 88, output = 'lonlat', configfile = file_path, grid=my.grid,method='distance-weighted') + + my.point.obs[year-yr1.hind+1,]<-data_erai_6h$mod[1,1,1,,65,633] +} + +utm0<-c(1,5,9,13,17,21,25) +utm6<-utm0+1 +utm12<-utm0+2 +utm18<-utm0+3 +my.points.obs.weekly.mean.utm0<-rowMeans(my.point.obs[,utm0]) +my.points.obs.weekly.mean.utm6<-rowMeans(my.point.obs[,utm6]) +my.points.obs.weekly.mean.utm12<-rowMeans(my.point.obs[,utm12]) +my.points.obs.weekly.mean.utm18<-rowMeans(my.point.obs[,utm18]) +my.points.obs.weekly.mean.all<-rowMeans(my.point.obs) + +cor(my.points.exp.weekly.mean.all,my.points.obs.weekly.mean.all,use="complete.obs") + +my.points.obs.weekly.mean.utm<-c(my.points.obs.weekly.mean.utm0,my.points.obs.weekly.mean.utm6,my.points.obs.weekly.mean.utm12,my.points.obs.weekly.mean.utm18) +my.points.exp.weekly.mean.utm<-c(my.points.exp.weekly.mean.utm0,my.points.exp.weekly.mean.utm6,my.points.exp.weekly.mean.utm12,my.points.exp.weekly.mean.utm18) +cor(my.points.exp.weekly.mean.utm,my.points.obs.weekly.mean.utm,use="complete.obs") + + +##### Calculate Reanalysis climatology loading only 1 file each 4 (it's quick but it needs to have all files beforehand): + +ss<-c(seq(1,52,by=4),50,51,52) # take only 1 startdate each 4, more the last 3 startdates that must be computed individually because they are not a complete set of 4 + +for(startdate in ss){ # the startdate day of 2014 to load in the sdates weekly sequence along with the same startdates of the previous 20 years + data.ERAI <- Load(var=var.name, exp = 'ERAintEnsWeek', + obs = NULL, sdates=paste0(yr1.hind:yr2.hind,substr(sdates.seq[startdate],5,6),substr(sdates.seq[startdate],7,8)), + nleadtime = 4, leadtimemin = 1, leadtimemax = 4, output = 'lonlat', configfile = file_path, grid=my.grid, method='distance-weighted') + + clim.ERAI[startdate,,,]<-apply(data.ERAI$mod,c(1,2,4,5,6),mean)[1,1,,,] # average for each leadtime and pixel + + # the intermediate startdate between startdate week i and startdate week i+4 are repeated: + if(startdate <= 49){ + clim.ERAI[startdate+1,1,,]<- clim.ERAI[startdate,2,,] # startdate leadtime (week) + clim.ERAI[startdate+1,2,,]<- clim.ERAI[startdate,3,,] # (week) 1 2 3 4 + clim.ERAI[startdate+2,1,,]<- clim.ERAI[startdate,3,,] # 1 a b c d <- only startdate 1 and 5 are necessary to calculate all startdates between 1 and 5 + clim.ERAI[startdate+1,3,,]<- clim.ERAI[startdate,4,,] # 2 b c d e + clim.ERAI[startdate+2,2,,]<- clim.ERAI[startdate,4,,] # 3 c d e f + clim.ERAI[startdate+3,1,,]<- clim.ERAI[startdate,4,,] # 4 d e f g + } # 5 e f g h + + if(startdate > 1 && startdate <=49){ # fill the previous startdates: + clim.ERAI[startdate-1,4,,]<- clim.ERAI[startdate,3,,] + clim.ERAI[startdate-1,3,,]<- clim.ERAI[startdate,2,,] + clim.ERAI[startdate-2,4,,]<- clim.ERAI[startdate,2,,] + clim.ERAI[startdate-1,2,,]<- clim.ERAI[startdate,1,,] + clim.ERAI[startdate-2,3,,]<- clim.ERAI[startdate,1,,] + clim.ERAI[startdate-3,4,,]<- clim.ERAI[startdate,1,,] + } + +} + + + + + # You can make cor much faster by stripping away all the error checking code and calling the internal c function directly: + # r <- apply(m1, 1, function(x) { apply(m2, 1, function(y) { cor(x,y) })}) + # r2 <- apply(m1, 1, function(x) { apply(m2, 1, function(y) {.Internal(cor(x, y, 4L, FALSE)) })}) + C_cor<-get("C_cor", asNamespace("stats")) + test<-.Call(C_cor, x=rnorm(100,1), y=rnorm(100,1), na.method=2, FALSE) + my.EnsCorr.chunk[i,j,k]<-.Call(C_cor, x=anom.hindcast.mean.chunk[,i,j,k], y=nom.rean.chunk[,i,j,k], na.method=2, FALSE) + + + +####################################################################################### +# Raster Animations # +####################################################################################### + +library(png) +library(animation) + + +clustPNG <- function(pngobj, nclust = 3){ + + j <- pngobj + # If there is an alpha component remove it... + # might be better to just adjust the mat matrix + if(dim(j)[3] > 3){ + j <- j[,,1:3] + } + k <- apply(j, c(1,2), c) + mat <- matrix(unlist(k), ncol = 3, byrow = TRUE) + grd <- expand.grid(1:dim(j)[1], 1:dim(j)[2]) + clust <- kmeans(mat, nclust, iter.max = 20) + new <- clust$centers[clust$cluster,] + + for(i in 1:3){ + tmp <- matrix(new[,i], nrow = dim(j)[1]) + j[,,i] <- tmp + } + + plot.new() + rasterImage(j, 0, 0, 1, 1, interpolate = FALSE) +} + +makeAni <- function(file, n = 10){ + j <- readPNG(file) + for(i in 1:n){ + clustPNG(j, i) + mtext(paste("Number of clusters:", i)) + } + + j <- readPNG(file) + plot.new() + rasterImage(j, 0, 0, 1, 1, interpolate = FALSE) + mtext("True Picture") +} + +file <- "~/Dropbox/Photos/ClassyDogCropPng.png" +n <- 20 +saveGIF(makeAni(file, n), movie.name = "tmp.gif", interval = c(rep(1,n), 5)) + + + + +####################################################################################### +# ProgressBar # +####################################################################################### + +for(i in 1:10) { + Sys.sleep(0.2) + # Dirk says using cat() like this is naughty ;-) + #cat(i,"\r") + # So you can use message() like this, thanks to Sharpie's + # comment to use appendLF=FALSE. + message(i,"\r",appendLF=FALSE) + flush.console() +} + + +for(i in 1:10) { + Sys.sleep(0.2) + # Dirk says using cat() like this is naughty ;-) + cat(i,"\r") + + +} + + + +####################################################################################### +# Load large datasets with ff # +####################################################################################### + +library(ff) +my.scratch<-"/scratch/Earth/ncortesi/myBigData" +anom.hind<-as.ff(anom.hind, vmode="double", file=my.scratch) +ffsave(anom.hind,file=my.scratch) +close(anom.hind);rm(anom.hind) + +ffload(file=my.scratch, list=c(anom.hind)) +open(anom.hind) +my.SkillScore <- veriApplyBig("FairRpss", fcst=anom.hind, obs=anom.rean, tdim=2, ensdim=1, prob=c(1/3,1/3)) +close(anom.hind);rm(anom.hind) + + + +BigDataPath <- "/scratch/Earth/ncortesi/myBigData" +source('/home/Earth/ncortesi/Downloads/RESILIENCE/veriApplyBig.R') + +anom.hind.dim<-c(5,3,1,256,512) +anom.hind<-array(rnorm(prod(anom.hind.dim)),anom.hind.dim) # doesn't fit in memory +save.big(array=anom.hind, path=BigDataPath) + +veriApplyBig <- function(, obsmy.scratch){ + ffload(file=my.scratch) + open(anom.hind) + my.SkillScore <- veriApplyBig("FairRpss", fcst=anom.hind, obs=anom.rean, tdim=2, ensdim=1, prob=c(1/3,1/3)) + close(anom.hind) +} + + +anom.hind<-as.ff(anom.hind, vmode="double", file=my.scratch) +ffsave(anom.hind, file=my.scratch) + +my.scratch<-"/scratch/Earth/ncortesi/myBigData" + +anom.hind<-as.ff(anom.hind, vmode="double", file=my.scratch) +ffsave(anom.hind, file=my.scratch) +close(anom.hind); rm(anom.hind) + +ffload(file=my.scratch, list=c(anom.hind)) +open(anom.hind) +my.SkillScore <- veriApplyBig("FairRpss", fcst=anom.hind, obs=anom.rean, tdim=2, ensdim=1, prob=c(1/3,1/3)) +close(anom.hind); rm(anom.hind) + +anom.hind.dim<-c(51,30,1,256,51) +anom.hind<-ff(rnorm(prod(anom.hind.dim)), dim=anom.hind.dim, vmode="double", filename="/scratch/Earth/ncortesi/anomhind") +str(anom.hind) +dim(anom.hind) + +anom.hind.dim<-c(51,30,1,256,51) +anom.hind<-array(rnorm(prod(anom.hind.dim)),anom.hind.dim) # doesn't fit in memory +anom.hind<-as.ff(anom.hind, vmode="double", file="/scratch/Earth/ncortesi/myBigData") +str(anom.hind) +dim(anom.hind) + +ffsave(anom.hind,file="/scratch/Earth/ncortesi/anomhind") +#ffinfo(file="/scratch/Earth/ncortesi/anomhind") +close(anom.hind) +#delete(anom.hind) +#rm(anom.hind) + +ffload(file="/scratch/Earth/ncortesi/anomhind") + +anom.rean<-array(rnorm(prod(anom.hind.dim[-1])), anom.hind.dim[-1]) # doesn't fit in memory + +open(anom.hind) +my.SkillScore <- veriApplyBig("FairRpss", fcst=anom.hind, obs=anom.rean, tdim=2, ensdim=1, prob=c(1/3,1/3)) +fcst<-anom.hind +obs<-anom.rean + +close(anom.hind) +rm(anom.hind) + +#ffdrop(file="/scratch/Earth/ncortesi/anomhind") + +a<-ffapply(X=anom.hind.big, MARGIN=c(1,3,4,5), AFUN=mean, RETURN=TRUE) +a<-ffapply(X=anom.hind.big, MARGIN=c(1,3,4,5), AFUN=function(x) sample(x, replace=TRUE), CFUN="list", RETURN=TRUE) + + +pred_Cal_cross_ff <- as.ff(pred_Cal_cross, vmode='double', file=fileoutput_cross) +ffsave(pred_Cal_cross_ff, file=paste0(fileoutput_cross)) +delete(pred_Cal_cross_ff) + +assign("pred_Cal_cross_ff", as.ff(pred_Cal_cross, vmode='double', file=fileoutput_cross)) +ffsave(pred_Cal_cross_ff, file=paste0(fileoutput_cross)) +delete(pred_Cal_cross_ff) +rm(pred_Cal_cross_ff) + +assign("pred_Cal_cross_ff", as.ff(pred_Cal_cross, vmode='double', file=fileoutput_cross)) +a<-get("pred_Cal_cross_ff") # get() works well in this case but not in the row below! (you must use do.call, see WT_drivers_v2.R) +ffsave(get("pred_Cal_cross_ff"), file=paste0(fileoutput_cross)) +delete(pred_Cal_cross_ff) +rm(pred_Cal_cross_ff) + + + +######################################################################################### +# Calculate and save the FairRpss and/or the FairCrpss for a chosen period using ff # +######################################################################################### + +bigdata=FALSE # if TRUE, compute the Rpss and the Crpss using the package ff (storing arrays in the disk instead than in the RAM) to remove the memory limit + +for(month in veri.month){ + month=1 # for debug + my.startdates <- which(as.integer(substr(sdates.seq,5,6)) == month) #startdates.monthly[[month]] #c(1:5) # select the startdates (weeks) you want to compute + startdate.name=my.month[month] # name given to the period of the selected startdates, i.e:"January" + n.yrs <- length(my.startdates)*n.yrs.hind # number of total years for the selected month + print(paste0("Computing Skill Scores for ",startdate.name)) + + if(!boot) anom.rean.big<-array(NA, dim=c(n.yrs, n.leadtimes, n.lat, n.lon)) # reanalysis data is not converted to ff because it is a small array + + if(boot) mod<-1 else mod=0 # in case of the boostrap, anom.hind.big includes also the reanalysis data as additional last member + + if(bigdata) anom.hind.big <- ff(NA, dim=c(n.members + mod, n.yrs, n.leadtimes, n.lat, n.lon), vmode="double", filename=paste0(workdir,"/anomhindbig.ff")) + if(!bigdata) anom.hind.big <- array(NA, dim=c(n.members + mod, n.yrs, n.leadtimes, n.lat, n.lon)) + + for(startdate in my.startdates){ + + pos.startdate<-which(startdate==my.startdates) # count of the number of stardates already loaded +1 + my.time.interv<-(1+(pos.startdate-1)*n.yrs.hind):(pos.startdate*n.yrs.hind) # time interval where to load data + + load(paste0(workdir,'/Data_',var.name,'/anom_rean_startdate',startdate,'.RData')) + if(!boot) { + anom.rean<-drop(anom.rean) + anom.rean.big[my.time.interv,,,] <- anom.rean + } + + if(any(my.score.name=="FairRpss") || any(my.score.name=="FairCrpss")){ + load(paste0(workdir,'/Data_',var.name,'/anom_hindcast_startdate',startdate,'.RData')) # load hindcast data + anom.hindcast<-drop(anom.hindcast) + + if(!boot) anom.hind.big[,my.time.interv,,,] <- anom.hindcast + if(boot) anom.hind.big[,my.time.interv,,,] <- abind(anom.hindcast, anom.rean, along=1) + + rm(anom.hindcast, anom.rean) + } + gc() + } + + if(any(my.score.name=="FairRpss" || my.score.name=="FairCrpss")){ + if(!boot) { + if(any(my.score.name=="FairRpss")) my.FairRpss <- veriApplyBig("FairRpss", fcst=anom.hind.big, obs=anom.rean.big, + prob=my.prob, tdim=2, ensdim=1, parallel=TRUE, ncpus=n.cpus) + if(any(my.score.name=="FairCrpss")) my.FairCrpss <- veriApplyBig("FairCrpss", fcst=anom.hind.big, obs=anom.rean.big, tdim=2, ensdim=1, parallel=TRUE, ncpus=n.cpus) + + } else { + # bootstrapping: + my.FairRpssBoot<-my.FairCrpssBoot<-array(NA, c(n.boot, n.leadtimes, n.lat, n.lon)) + if(!bigdata) save(anom.hind.big, file=paste0(workdir,"/anom_hind_big.RData")) # save the anomalies to free memory + + for(b in 1:n.boot){ + print(paste0("resampling n. ",b,"/",n.boot)) + yrs.sampled <- sample(1:n.yrs, replace=TRUE) + + if(bigdata) anom.hind.big.sampled <- ff(NA, dim=c(n.members+1, n.yrs, n.leadtimes, n.lat, n.lon), vmode="double", filename=paste0(workdir,"/anomhindbigsampled")) + if(!bigdata) anom.hind.big.sampled <- array(NA, dim=c(n.members+1, n.yrs, n.leadtimes, n.lat, n.lon)) + + if(!bigdata && b>1) load(paste0(workdir,"/anom_hind_big.RData")) # need to load the hindcasts if b>1 + + for(y in 1:n.yrs) anom.hind.big.sampled[,y,,,] <- anom.hind.big[,yrs.sampled[y],,,] # with ff takes 53s x 100 ~1h!!! (without, only 4s x 100 ~6m) + + if(!bigdata) rm(anom.hind.big); gc() # free memory for the following commands + + # faster way that will be able to replace the above one when ffapply output parameters wll be improved: + #a<-ffapply(X=anom.hind.big, MARGIN=c(3,4,5), AFUN=function(x) my.sample(x,n.members+1, replace=TRUE), CFUN="list", RETURN=TRUE, FF_RETURN=TRUE) # takes 3m only + #aa<-abind(a[1:10], along=4) # it doesn't fit into memory! + + if(any(my.score.name=="FairRpss")) my.FairRpssBoot[b,,,] <- veriApplyBig("FairRpss", fcst=anom.hind.big.sampled[1:n.members,,,,], + obs=anom.hind.big.sampled[n.members+1,,,,], + prob=my.prob, tdim=2, ensdim=1, parallel=TRUE, ncpus=n.cpus) #$rpss + + if(any(my.score.name=="FairCrpss")) my.FairCrpssBoot[b,,,] <- veriApplyBig("FairCrpss", fcst=anom.hind.big.sampled[1:n.members,,,,], + obs=anom.hind.big.sampled[n.members+1,,,,], + tdim=2, ensdim=1, parallel=TRUE, ncpus=n.cpus) #$crpss + + if(bigdata) delete(anom.hind.big.sampled) + rm(anom.hind.big.sampled) # delete the sampled hindcast + + } + + # calculate 5 and 95 percentiles of skill scores: + if(any(my.score.name=="FairRpss")) my.FairRpssConf <- apply(my.FairRpssBoot, c(2,3,4), function(x) quantile(x, probs=c(0.05,0.95), type=8)) + if(any(my.score.name=="FairCrpss")) my.FairCrpssConf <- quantile(my.FairCrpssBoot, probs=c(0.05,0.95), type=8) + + } # close if on boot + + } + + if(bigdata) delete(anom.hind.big) # delete the file with the hindcasts + rm(anom.hind.big) + rm(anom.rean.big) + gc() + + if(!boot){ + if(any(my.score.name=="FairRpss")) save(my.FairRpss, file=paste0(workdir,'/Data_',var.name,'/FairRpss_',startdate.name,'.RData')) + if(any(my.score.name=="FairCrpss")) save(my.FairCrpss, file=paste0(workdir,'/Data_',var.name,'/FairCrpss_',startdate.name,'.RData')) + } else { + if(any(my.score.name=="FairRpss")) save(my.FairRpssBoot, my.FairRpssConf, file=paste0(workdir,'/Data_',var.name,'/FairRpssBoot_',startdate.name,'.RData')) + if(any(my.score.name=="FairCrpss")) save(my.FairCrpssBoot, my.FairCrpssConf, file=paste0(workdir,'/Data_',var.name,'/FairCrpssBoot_',startdate.name,'.RData')) + } + + + if(is.object(my.FairRpss)) rm(my.FairRpss); if(is.object(my.FairCrpss)) rm(my.FairCrpss) + if(is.object(my.FairRpssBoot)) rm(my.FairRpssBoot); if(is.object(my.FairCrpssBoot)) rm(my.FairCrpssBoot) + if(is.object(my.FairRpssConf)) rm(my.FairRpssConf); if(is.object(my.FairCrpssConf)) rm(my.FairCrpssConf) + +} # next m (month) + + +######################################################################################### +# Calculate and save the FairRpss for a chosen period using ff and Load() # +######################################################################################### + + anom.rean.big<-array(NA, dim=c(length(my.startdates)*n.yrs.hind, n.leadtimes, n.lat, n.lon)) + anom.hind.big <- ff(NA, dim=c(n.members,length(my.startdates)*n.yrs.hind, n.leadtimes, n.lat, n.lon), vmode="double", filename="/scratch/Earth/anomhindbig") + + for(startdate in my.startdates){ + pos.startdate<-which(startdate==my.startdates) # count of the number of stardates already loaded+1 + my.time.interv<-(1+(pos.startdate-1)*n.yrs.hind):(pos.startdate*n.yrs.hind) # time interval where to load data + + #load(paste0(workdir,'/Data_',var.name,'/anom_rean_startdate',startdate,'.RData')) + #anom.rean<-drop(anom.rean) + #anom.rean.big[my.time.interv,,,] <- anom.rean + + my.date <- paste0(yr1.hind:yr2.hind,substr(sdates.seq[startdate],5,6),substr(sdates.seq[startdate],7,8)) + anom.rean <- Load(var=var.name, exp = 'EnsEcmwfWeekHind', + obs = NULL, sdates=my.date, nleadtime = n.leadtimes, leadtimemin = 1, leadtimemax = n.leadtimes, + output = 'lonlat', configfile = file_path, method='distance-weighted' ) + + #load(paste0(workdir,'/Data_',var.name,'/anom_hindcast_startdate',startdate,'.RData')) # load hindcast data + #anom.hindcast<-drop(anom.hindcast) + + anom.hind.big[,my.time.interv,,,] <- anom.hindcast + rm(anom.hindcast) + } + + my.FairRpss <- veriApplyBig("FairRpss", fcst=anom.hind.big, obs=anom.rean.big, prob=my.prob, tdim=2, ensdim=1, parallel=TRUE, ncpus=n.cpus) + + save(my.FairRpss, file=paste0(workdir,'/Data_',var.name,'/FairRpss_',startdate.name,'.RData')) + + +######################################################################################### +# Alternative way to speed up Load() # +######################################################################################### + +ERAint <- list(path = '/esnas/reconstructions/ecmwf/eraint/$STORE_FREQ$_mean/$VAR_NAME$_f6h/$VAR_NAME$_$YEAR$$MONTH$.nc') + var.rean=ERAint # daily reanalysis dataset used for the var data; it can have a different resolution of the MSLP dataset + var.name='sfcWind' #'tas' #'prlr' # any daily variable we want to find if it is WTs-driven + var <- Load(var = var.name, exp = NULL, obs = list(var.rean), paste0(y,'0101'), storefreq = 'daily', leadtimemax = 1, output = 'lonlat') + + # Forecast system list: + S4 <- list(path = ...) + CFSv2 <- list(path = ...) + + # Reanalysis list: + ERAint <- list(path = '/esnas/reconstructions/ecmwf/eraint/$STORE_FREQ$_mean/$VAR_NAME$_f6h/$VAR_NAME$_$YEAR$$MONTH$.nc') + JRA55 <- list(path = ...) + + + + + # Select one forecast system and one reanalysis (put NULL if you don't need to load any experiment/observation): + exp.source <- NULL + obs.source <- list(path = '/esnas/reconstructions/ecmwf/eraint/$STORE_FREQ$_mean/$VAR_NAME$_f6h/$VAR_NAME$_$YEAR$$MONTH$.nc') + + # Select one variable and its frequency: + var.name <- 'sfcWind' + store.freq <- 'daily' + + # Extract only the directory path of the forecast and reanalysis: + obs.source2 <- gsub("\\$STORE_FREQ\\$", store.freq, obs.source$path) + obs.source3 <- gsub("\\$VAR_NAME\\$", var.name, obs.source2) + obs.source4 <- strsplit(obs.source3,'/') + obs.source5 <- paste0(obs.source4[[1]][-length(obs.source4[[1]])], collapse="/") + + obs.source6 <- list.files(path = obs.source5) + + system(paste0("ncks -O -h -d longitude,5,6 ", '/esnas/reconstructions/ecmwf/eraint/',STORE_FREQ,'_mean/',VAR_NAME,'_f6h')) + system("ncks -O -h -d longitude,5,6 sfcWind_201405.nc ~/test.nc") + + y <- 1990 + var <- Load(var = VAR_NAME, exp = list(exp.source), obs = list(obs.source), sdates = paste0(y,'0101'), storefreq = store.freq, leadtimemax = 1, output = 'lonlat') + + +} # close if on mare + + diff --git a/old/WT_drivers_v1.R b/old/WT_drivers_v1.R new file mode 100644 index 0000000000000000000000000000000000000000..9eedccf2cd2a3345fa6a8baaeda8c9b17732b0dd --- /dev/null +++ b/old/WT_drivers_v1.R @@ -0,0 +1,455 @@ +########################################################################################## +# Relationship between WTs and a chosen climate variable # +########################################################################################## + +library(s2dverification) # for the function Load() +source('/home/Earth/ncortesi/Downloads/scripts/Rfunctions.R') # for the calendar functions +Load.path <- '/home/Earth/ncortesi/Downloads/scripts/IC3.conf' # Load() file path for resampled MSLP + +workdir="/scratch/Earth/ncortesi/RESILIENCE/WT" # working dir with the input files with the WT classifications for each MSLP grid point +mapdir="/scratch/Earth/ncortesi/RESILIENCE/WT_maps" # output dir with seasonal and yearly maps + +psl.rean='ERAintDailyHighRes' # daily reanalysis dataset used for MSLP data +var.name='sfcWind' # any daily reanalysis variable we want to find if it is driven by WTs +var.rean='ERAint' # daily reanalysis dataset used for the var data; it can have a different resolution of the MSLP dataset + +year.start=1985 #1985 # starting year of the MSLP daily data (from the 1st of january) +year.end=2014 #2014 # ending year of the MSLP daily data (up to the 31 of December) + +########################################################################################## + +WTs.type <- c("NE","E","SE","S","SW","W","NW","N","C","C.NE","C.E","C.SE","C.S","C.SW","C.W","C.NW","C.N","A","A.NE","A.E","A.SE","A.S","A.SW","A.W","A.NW","A.N") +WTs.type10<-c("NE","E","SE","S","SW","W","NW","N","C","A") +WTs.type10.name<-c("Northeasterly (NE)","Easterly (E)","Southeasterly (SE)","Southerly (S)","Southwesterly (SW)","Westerly (W)","Northwesterly (NW)", + "Northerly (N)","Cyclonic (C)","Anticyclonic (A)") +year.tot <- year.end - year.start + 1 + +periods=c(1:17) # identify monthly (from 1=Jan to 12=Dec) , seasonal (from 13=winter to 16=Autumn) and yearly=17 values +n.periods <- length(periods) + +# Load just 1 day of var data to detect the number of latitude and longitude points; +var <- Load(var.name, NULL, var.rean, paste0(year.start,'0101'), storefreq = 'daily', leadtimemax = 1, output = 'lonlat') +var.lat <- round(var$lat,3) +var.lon <- round(var$lon,3) +var.n.lat <- length(var.lat) # number of latitude values of var +var.n.lon <- length(var.lon) +var.lon.pos <- ifelse(min(var.lon>=0), TRUE, FALSE) # set lon.pos to TRUE if the longitude values of the reanalysis grid has no negative values, FALSE if not. + +# Load just 1 day of MSLP data to detect the number of latitude and longitude points of the WT classifications +# we must exlude points > +80 degrees lat and < -80 deg. because the Lamb grid was created excluding these points: +MSLP <- Load('psl', NULL, psl.rean, paste0(year.start,'0101'), storefreq = 'daily', leadtimemax = 1, output = 'lonlat', configfile = Load.path) +psl.n.lat <- length(MSLP$lat) # number of latitude values or MSLP +psl.n.lon <- length(MSLP$lon) # number of longitude values of MSLP +psl.lon.pos <- ifelse(min(MSLP$lon>=0), TRUE, FALSE) # set lon.pos to TRUE if the longitude values of the reanalysis grid has no negative values, FALSE if not. + +psl.n.lat.unused.poles <- 20 # number of discarded latitude values near each of the two poles (it depends on the spatial resolutions of the MSLP reanalysis) +#psl.n.lat.unused.equat <- 20 # number of unused latitude values near each side of the equator (must exclude 10 degrees N/S for side, so it depends on the spat.res. of the reanalysis) +#psl.pos.lat.eq.north <- tail(which(MSLP$lat >= 0),1) # note than the eventual point at lat=0 is always included +#psl.pos.lat.eq.south <- head(which(MSLP$lat < 0),1) # note than the eventual point at lat=0 is always excluded +#psl.pos.lat.unused.eq.north <- (psl.pos.lat.eq.north - psl.n.lat.unused.poles + 1):psl.pos.lat.eq.north +#psl.pos.lat.unused.eq.south <- psl.pos.lat.eq.south:(psl.pos.lat.eq.south + psl.n.lat.unused.equat - 1) + +# final latitude values used as central points: +#psl.lat.used <- MSLP$lat[-c(1:psl.n.lat.unused.poles,(psl.n.lat-psl.n.lat.unused.poles):psl.n.lat, psl.pos.lat.unused.eq.north, psl.pos.lat.unused.eq.south)] +psl.lat.used <- round(MSLP$lat[-c(1:psl.n.lat.unused.poles,(psl.n.lat-psl.n.lat.unused.poles):psl.n.lat)],3) # latitude values used as central points +psl.lon.used <- round(MSLP$lon,3) # longitude values used as central points + +psl.n.lat.used <- length(psl.lat.used) +psl.n.lon.used <- length(psl.lon.used) +psl.n.grid.points <- psl.n.lat.used * psl.n.lon.used + +if(var.lon.pos && !psl.lon.pos) {ss <- which(psl.lon.used<0); psl.lon.used[ss] <- psl.lon.used[ss] + 360} # convert the negative long of MSLP to the [0, +360] range +if(!var.lon.pos && psl.lon.pos) {ss <- which(psl.lon.used>180); psl.lon.used[ss] <- psl.lon.used[ss] - 360} # convert the positive long of MSLP > 180 to the [-180, +180] range + + +# exlude var points > +~80 degrees lat and < -80 deg. because the Lamb grid was created excluding these points: +var.n.lat.unused.poles <- 20 # number of discarded latitude values near each of the two poles (it depends on the spatial resolutions of the var reanalysis) +var.n.lat.unused.equat <- 20 # numb. of unused latitude values near each side of the equator (must exclude 10 degrees N/S for side, so it depends on the spat.res. of the reanalysis) + +var.pos.lat.eq.north <- tail(which(var$lat >= 0),1) # note than the eventual point at lat=0 is always included +var.pos.lat.eq.south <- head(which(var$lat < 0),1) # note than the eventual point at lat=0 is always excluded +var.pos.lat.unused.eq.north <- (var.pos.lat.eq.north - var.n.lat.unused.equat+1):var.pos.lat.eq.north +var.pos.lat.unused.eq.south <- var.pos.lat.eq.south:(var.pos.lat.eq.south + var.n.lat.unused.equat - 1) +var.pos.lat.unused.eq <- c(var.pos.lat.unused.eq.north, var.pos.lat.unused.eq.south) + +#var.lat.used <- var.lat[-c(1:var.n.lat.unused.poles,(var.n.lat-var.n.lat.unused.poles):var.n.lat, var.pos.lat.unused.eq.north, var.pos.lat.unused.eq.south)] # latitude values used as central points + +var.lat.used <- var.lat[-c(1:var.n.lat.unused.poles,(var.n.lat-var.n.lat.unused.poles):var.n.lat)] +var.lon.used <- var.lon # longitude values used as central points +var.n.lat.used <- length(var.lat.used) +var.n.lon.used <- length(var.lon.used) +var.n.grid.points <- var.n.lat.used * var.n.lon.used + +# for each var grid point used, find the lat/lon position of the nearby point center of the WT classification: +closer.psl.lat <- closer.psl.lon <- array(NA, c(var.n.lat.used, var.n.lon.used)) # 2 matrices with the lat and lon position of the nearest MSLP grid point + +i<-0;vlat.pos <- 0 +for (vlat in var.lat.used) { + vlat.pos<-vlat.pos+1 + + vlon.pos<-0 + for(vlon in var.lon.used) { + i<-i+1 + cat(paste0("Point #",i,"/", var.n.grid.points), '\r') + vlon.pos<-vlon.pos+1 + + closer.psl.pos <- nearest(vlat, vlon, psl.lat.used, psl.lon.used) + closer.psl.lat[vlat.pos, vlon.pos] <- psl.lat.used[closer.psl.pos[1]] + closer.psl.lon[vlat.pos, vlon.pos] <- psl.lon.used[closer.psl.pos[2]] + + #print(paste0("vlat=",vlat, " vlat.pos=",vlat.pos, " vlon=",vlon, " vlon.pos=",vlon.pos, " closer.psl.lat.pos=", closer.psl.pos[1], " closer.psl.lon.pos=",closer.psl.pos[2], " closer.psl.lat=", psl.lat.used[closer.psl.pos[1]], " closer.psl.lon=", psl.lon.used[closer.psl.pos[2]] )) # for the debug + + } +} + +#save(closer.psl.lat, closer.psl.lon, file=paste0(workdir,"/closer_psl.RData")) # save it if it is the first time +load(file=paste0(workdir,"/closer_psl.RData")) # load it if already saved + +#PlotEquiMap(closer.psl.lon, var.lon.used, var.lat.used, filled.continents = FALSE, brks=seq(0,360,1) ) # for the debug +#PlotEquiMap(closer.psl.lat, var.lon.used, var.lat.used, filled.continents = FALSE, brks=seq(-90,90,1) ) # for the debug + +# for each var grid point used, load 1 year of daily var and wt data at time, and assign it to the closer mslp central point: +# (you can't open all data at once because it weights too much, but you can open 1 year at time) + +#n.days.tot <- n.days.in.a.yearly.period(year.start,year.end) +n.days.tot <- 365 # for the debug + +sub <- split.array.big(dimensions=c(n.days.in.a.yearly.period(year.start,year.end),var.n.lat.used, var.n.lon.used), along=3) +#save(sub, file=paste0(workdir,"/sub.RData")) # save sub to retrieve it later of you run the next loop only for 1 year only (because 'year.end' changes, so 'sub' changes too) +load(paste0(workdir,"/sub.RData")) + +i=0 +for(y in year.start:year.end){ + #y<-1981 # for the debug + i<-i+1 + print(paste0("Year #",i,"/", year.tot)) + + n.days <- n.days.in.a.year(y) + var <- Load(var.name, NULL, var.rean, paste0(y,'0101'), storefreq = 'daily', leadtimemax = n.days, output = 'lonlat') # load var data for the year y only + #PlotEquiMap(var$obs[1, 1, 1, 1, , ], var.lon, var.lat, filled.continents = FALSE) + + var.serie <- wt.serie <- array(NA,c(var.n.lat.used, var.n.lon.used, n.days)) # arrays where to store the var and wt daily data of year y for all points + + j<-0 + vlat.pos <- 0 + for (vlat in var.lat.used) { + #vlat<-var.lat.used[1]; vlon<-var.lon.used[1] # for the debug + j<-j+1 + vlat.pos <- vlat.pos+1 + cat('Latitude ',j,'/',var.n.lat.used,'\r') + + vlon.pos <- 0 + for(vlon in var.lon.used) { + vlon.pos <- vlon.pos+1 + + var.serie[vlat.pos, vlon.pos,] <- var$obs[1,1,1,,which(var.lat==vlat),which(var.lon==vlon)] + + #WT <- read.table(file=paste0(workdir,"/","10WTs_",psl.rean,"_",year.start,"-",year.end,"_lat_",closer.psl.lat[vlat.pos,vlon.pos],"_lon_",closer.psl.lon[vlat.pos,vlon.pos],".txt"), header=TRUE, sep=" ", stringsAsFactors=FALSE, row.names=NULL) + + # load WTs classifications (variable WTs) for that year created with WT_vX.R: + load(paste0(workdir,"/",y,"_Rdata/","10WTs_",psl.rean,"_year_",y,"_lat_",closer.psl.lat[vlat.pos,vlon.pos],"_lon_",closer.psl.lon[vlat.pos,vlon.pos],".RData")) + + # interval of days belonging to the year y, but starting to count from the year year.begin: + #seq.days.year <- n.days.in.a.yearly.period(year.start,y) - n.days.in.a.yearly.period(year.start,year.start) + 1:n.days.in.a.year(y) + + wt.serie[vlat.pos, vlon.pos,] <- WTs #[seq.days.year] #,4] # extract only the days of the year y + + } + } + + #PlotEquiMap(var.serie[,,1], var.lon, var.lat.used, filled.continents = FALSE) # for the debug + + #save(var.serie, file=paste0(workdir,"/local_",var.name,"_",psl.rean,"_year_",y,".RData"), compress=FALSE) + #save(wt.serie, file=paste0(workdir,"/local_","10WTs_",psl.rean,"_year_",y,".RData"), compress=FALSE) + + # instead of saving only 1 file for all the spatial domain, save 1 file for each subarray s: (to be able to load them faster in the following step) + for(s in 1:sub$n.sub){ + var.serie.year.sub<-var.serie[,sub$int[[s]],] # format: [lat, lon, day] + save(var.serie.year.sub, file=paste0(workdir,"/",var.name,"_sub/local_",var.name,"_",psl.rean,"_year_",y,"_sub_",s,".RData"), compress=FALSE) + + wt.serie.year.sub <- wt.serie[,sub$int[[s]],] + save(wt.serie.year.sub, file=paste0(workdir,"/wt_sub/local_","10WTs_",psl.rean,"_year_",y,"_sub_",s,".RData"), compress=FALSE) + # n.wt10<-apply(wt.serie.year.sub,c(1,2),function(x)length(which(x==18))) # for the debug + # edit(n.wt10) # for the debug + } + +} # close for on y + +#rm(closer.psl.lat, closer.psl.lon, var) + + +# compute the climatology of var and its daily anomalies for each subarray: +var.clim <- array(NA, c(var.n.lat.used, var.n.lon.used)) + +for(s in 1:sub$n.sub){ + #s=1;y=year.start # for the debug + cat(paste0("Computing subarray n. ", s,"/", sub$n.sub),'\r') + + if(s == sub$n.sub) { + var.serie.sub <- array(NA, c(var.n.lat.used, sub$sub.size.last, n.days.tot)) + } else { + var.serie.sub <- array(NA, c(var.n.lat.used, sub$sub.size, n.days.tot)) + } + + for(y in year.start:year.end){ + load(file=paste0(workdir,"/",var.name,"_sub/local_",var.name,"_",psl.rean,"_year_",y,"_sub_",s,".RData")) # load var.serie.year.sub + int.days.year <- seq.days.in.a.future.year(year.start,y) + + var.serie.sub[,,int.days.year] <- var.serie.year.sub + + #int.days.period <- pos.period(y, p) + #int.days.future.period <- n.days.in.a.future.year(year.start, y) + int.days.period + #int.days.na <- which(1:n.days.in.a.year(y)==int.days.future.period) + #var.serie.sub[,,int.days.future.period] <- + + rm(var.serie.year.sub) + } + + var.clim.sub <- apply(var.serie.sub,c(1,2),mean, na.rm=T) + + var.anom.sub <- var.serie.sub - InsertDim(var.clim.sub,3,n.days.tot) + var.clim[,sub$int[[s]]] <- var.clim.sub + + # save the var anomaly, one file for each subarray: + save(var.anom.sub, file=paste0(workdir,"/",var.name,"_sub/local_",var.name,"_anomaly_",psl.rean,"_",year.start,"-",year.end,"_sub_",s,".RData"), compress=FALSE) + + rm(var.serie.sub, var.anom.sub) +} + +save(var.clim, file=paste0(workdir,"/",var.name,"_sub/local_",var.name,"_climatology_",psl.rean,"_",year.start,"-",year.end,".RData"), compress=FALSE) +load(paste0(workdir,"/",var.name,"_sub/local_",var.name,"_climatology_",psl.rean,"_",year.start,"-",year.end,".RData")) + +# do the same as above, but for the wt: + +wts <- c(1:10) # weather type numbers inside wt.serie +n.wts<-length(wts) +n.wt <- var.wt.sum <- var.wt.mean.anom <- var.wt.sd.anom <- array(NA, c(n.wts, var.n.lat.used, var.n.lon.used)) +#vlat.mat <- vlon.mat <- array(NA, c(var.n.lat.used, var.n.lon.used)) # for the debug + +for(s in 1:sub$n.sub){ + #s=1;y=year.start # for the debug + cat(paste0("Computing subarray n. ", s,"/", sub$n.sub),'\r') + + if(s == sub$n.sub) { + wt.serie.sub <- array(NA, c(var.n.lat.used, sub$sub.size.last, n.days.tot)) + n.wt.sub <- var.wt.sum.sub <- var.wt.mean.anom.sub <- var.wt.sd.anom.sub <- array(NA,c(n.wts, var.n.lat.used, sub$sub.size.last)) + var.serie.sub <- array(NA, c(var.n.lat.used, sub$sub.size.last, n.days.tot)) + } else { + wt.serie.sub <- array(NA, c(var.n.lat.used, sub$sub.size, n.days.tot)) + n.wt.sub <- var.wt.sum.sub <- var.wt.mean.anom.sub <- var.wt.sd.anom.sub <- array(NA,c(n.wts, var.n.lat.used, sub$sub.size)) + var.serie.sub <- array(NA, c(var.n.lat.used, sub$sub.size, n.days.tot)) + } + + + for(y in year.start:year.end){ + load(file=paste0(workdir,"/",var.name,"_sub/local_",var.name,"_",psl.rean,"_year_",y,"_sub_",s,".RData")) # load var.serie.year.sub + int.days.year <- seq.days.in.a.future.year(year.start,y) + + var.serie.sub[,,int.days.year] <- var.serie.year.sub + rm(var.serie.year.sub) + } + + + for(y in year.start:year.end){ + load(file=paste0(workdir,"/wt_sub/local_","10WTs_",psl.rean,"_year_",y,"_sub_",s,".RData")) # load wt.serie.year.sub + + wt.serie.year.sub[which(wt.serie.year.sub==18)]<-10 # rename the A type from number 18 to number 10, to have only the numbers from 1 to 10 + int.days <- seq.days.in.a.future.year(year.start,y) + + wt.serie.sub[,,int.days] <- wt.serie.year.sub #[,sub$int[[s]],] + rm(wt.serie.year.sub) + } + + load(file=paste0(workdir,"/",var.name,"_sub/local_",var.name,"_anomaly_",psl.rean,"_",year.start,"-",year.end,"_sub_",s,".RData")) # load var.anom.sub + + vlat.pos <- 0 + for (vlat in var.lat.used) { + #vlat<-var.lat.used[1]; vlon<-var.lon.used[1]; wt=1 # for the debug + vlat.pos <- vlat.pos + 1 + #cat('Latitude ',vlat.pos,'/',var.n.lat.used,'\r') + + vlon.pos <- 0 #sub$int[[s]]-1 + for(vlon in var.lon.used[sub$int[[s]]]) { + vlon.pos <- vlon.pos + 1 + for(wt in wts){ + pos.wt <- which(wt.serie.sub[vlat.pos,vlon.pos,]==wt) + + n.wt.sub[wt,vlat.pos,vlon.pos] <- length(pos.wt) + var.wt.sum.sub[wt,vlat.pos,vlon.pos] <- sum(var.serie.sub[vlat.pos,vlon.pos,][pos.wt]) # only for wind and prec + var.wt.mean.anom.sub[wt,vlat.pos,vlon.pos] <- mean(var.anom.sub[vlat.pos,vlon.pos,][pos.wt]) + var.wt.sd.anom.sub[wt,vlat.pos,vlon.pos] <- sd(var.anom.sub[vlat.pos,vlon.pos,][pos.wt]) + + } + } + } + + n.wt[,,sub$int[[s]]] <- n.wt.sub + var.wt.sum[,,sub$int[[s]]] <- var.wt.sum.sub + var.wt.mean.anom[,,sub$int[[s]]] <- var.wt.mean.anom.sub + var.wt.sd.anom[,,sub$int[[s]]] <- var.wt.sd.anom.sub +} + +save(n.wt, var.wt.sum, var.wt.mean.anom, file=paste0(workdir,"/",var.name,"_sub/output_",psl.rean,"_",year.start,"-",year.end,".RData"), compress=FALSE) +load(file=paste0(workdir,"/",var.name,"_sub/output_",psl.rean,"_",year.start,"-",year.end,".RData")) + +#s <- which(is.na(var.wt.mean)) +#ar.wt.mean[ss] <- -999 +var.lon.used.bis <- c(var.lon.used[c(257:512)]-360,var.lon.used[c(1:256)]) # to put Europe in the middle of the map + +#pos.var.lat.unused <- which(is.na( match(var.lat,var.lat.used))) # position inside var.lat of the latitude values not used (those not in var.lat.used) +pos.var.lat.used <- match(var.lat.used, var.lat) # =which(!is.na( match(var.lat,var.lat.used))) # position inside var.lat of the latitude values used (those in var.lat.used) +#var.lat[pos.var.lat.unused] + +var.wt.mean <- var.wt.sum/n.wt # average value of var associated to a WT +n.wt <- 100*n.wt/n.days.tot # convert to yearly frequency in % +var.wt.mean.contrib <- 100*var.wt.sum/(InsertDim(var.clim,1,n.wts)*n.days.tot) # convert the sum to a % contribution + +var.clim.NA <- array(NA, c(var.n.lat, var.n.lon.used)) +n.wt.NA <- var.wt.mean.contrib.NA <- var.wt.sum.NA <- var.wt.mean.anom.NA <- var.wt.mean.NA <- var.wt.sd.anom.NA <- array(NA, c(n.wts, var.n.lat, var.n.lon.used)) + +var.clim.NA[pos.var.lat.used,] <- var.clim +n.wt.NA[,pos.var.lat.used,] <- n.wt +var.wt.mean.NA[,pos.var.lat.used,] <- var.wt.mean +var.wt.sum.NA[,pos.var.lat.used,] <- var.wt.sum +var.wt.mean.anom.NA[,pos.var.lat.used,] <- var.wt.mean.anom +var.wt.mean.contrib.NA[,pos.var.lat.used,] <- var.wt.mean.contrib +var.wt.sd.anom.NA[,pos.var.lat.used,] <- var.wt.sd.anom.NA + +# remove the equatorial area from visualization: +n.wt.NA[,var.pos.lat.unused.eq,] <- var.wt.mean.NA[,var.pos.lat.unused.eq,] <- var.wt.sum.NA[,var.pos.lat.unused.eq,] <- var.wt.mean.contrib.NA[,var.pos.lat.unused.eq,] <- NA +var.wt.sd.anom.NA[,var.pos.lat.unused.eq,] <- NA + +# Map intervals and colors: +my.brks <- list() + +#my.brks[[1]] <- c(0,seq(1.5,9,0.5),15) # Wind speed Climatology +my.brks[[1]] <- c(0,seq(1.05,10,0.05),15) # Wind speed Climatology +#my.brks[[2]] <- c(-10,seq(-4.95,5,0.05),10) # Wind Speed Anomaly associated to a WT +my.brks[[2]] <- c(seq(0,10,0.1),100) # Mean wind speed in m/s associated to a WT +my.brks[[3]] <- c(seq(0,30,0.1),100) # Frequency associated to a WT +my.brks[[4]] <- c(seq(0,30,0.1),100) # % Contribution of a WT to total var +my.brks[[5]] <- c(seq(-10,-3,1),seq(-2.3,2.3,0.1),seq(3,10,1)) # % Mean anomaly of a WT +my.brks[[6]] <- c(seq(0,3,0.1),10) # Standard deviation of the anomalies pf a WT + +my.cols <- list() +#my.cols[[index]] <- colorRampPalette(my.palette[[index]])(length(my.brks[[index]])-1) +#my.cols[[1]] <- colorRampPalette(as.character(read.csv("/home/Earth/vtorralb/rgbhex.csv",header=F)[,1]))(length(my.brks[[1]])-1) # blue-green-white-yellow-red colors +#my.cols[[1]] <- colorRampPalette(c("white","yellow","orange","red","darkred"))(length(my.brks[[1]])-1) # blue-yellow-red colors +#my.cols[[1]] <- colorRampPalette(c("white","white","green","yellow","orange","red","darkred"))(length(my.brks[[1]])-1) # blue-yellow-red colors +#my.cols[[1]] <- colorRampPalette(c("blue","white","darkred"))(length(my.brks[[1]])-1) # blue-yellow-red colors +#my.cols[[1]] <- colorRampPalette(c("blue","green","yellow","red","brown","violetred4"))(length(my.brks[[1]])-1) # blue-yellow-red colors +my.cols[[1]] <- colorRampPalette(c("white","cyan2","gold","orange","red","brown","brown4","deeppink4"))(length(my.brks[[1]])-1) # blue-yellow-red colors +#my.cols[[2]] <- colorRampPalette(c("deeppink4","darkblue","blue","white","red","darkred","brown4"))(length(my.brks[[2]])-1) # blue-white-red colors +my.cols[[2]] <- colorRampPalette(as.character(read.csv("/home/Earth/vtorralb/rgbhex.csv",header=F)[,1]))(length(my.brks[[2]])-1) # blue--yellow-red colors +my.cols[[3]] <- rev(colorRampPalette(as.character(read.csv("/home/Earth/vtorralb/rgbhex.csv",header=F)[,1]))(length(my.brks[[3]])-1)) # blue-yellow-red colors +#my.cols[[4]] <- colorRampPalette(c("white","cyan2","blue","deeppink4"))(length(my.brks[[4]])-1) # blue-yellow-red colors +my.cols[[4]] <- colorRampPalette(as.character(read.csv("/home/Earth/vtorralb/rgbhex.csv",header=F)[,1]))(length(my.brks[[4]])-1) # blue--yellow-red colors +my.cols[[5]] <- colorRampPalette(as.character(read.csv("/home/Earth/vtorralb/rgbhex.csv",header=F)[,1]))(length(my.brks[[5]])-1) # blue--yellow-red colors +my.cols[[6]] <- colorRampPalette(as.character(read.csv("/home/Earth/vtorralb/rgbhex.csv",header=F)[,1]))(length(my.brks[[6]])-1) # blue--yellow-red colors + + +# Create and save maps: + +index=1 + +png(filename=paste0(mapdir,"/10-m_Wind_Speed_Climatology.png"),width=1000,height=700) + +layout(matrix(c(rep(1,10),2), 11, 1, byrow = TRUE)) +#layout.show(2) +#PlotEquiMap2(var.clim.NA, var.lon.used, var.lat, filled.continents = FALSE, brks=my.brks[[index]], cols=my.cols[[index]], drawleg=F) +# change longitude in format -180,180 to displey europe in the middle of the map: + +var.clim.NA.bis <- var.clim.NA[,c(257:512,1:256)] +PlotEquiMap(var.clim.NA.bis, var.lon.used.bis, var.lat, filled.continents = FALSE, brks=my.brks[[index]], cols=my.cols[[index]], drawleg=F, colNA="gray") +ColorBar(my.brks[[index]], cols=my.cols[[index]], vert=FALSE, subsampleg=20, cex=1) + +dev.off() + + +index=2 +wt=6 + +png(filename=paste0(mapdir,"/Average_10-m_Wind_Speed_WT_", WTs.type10[wt],".png"),width=1000,height=700) + +layout(matrix(c(rep(1,10),2), 11, 1, byrow = TRUE)) +#PlotEquiMap(var.wt.mean.NA[wt,,], var.lon.used, var.lat, filled.continents = FALSE, brks=my.brks[[index]], cols=my.cols[[index]], drawleg=F, colNA="gray") +#PlotEquiMap(var.wt.mean.NA[wt,,100:200], var.lon.used[100:200], var.lat, filled.continents = FALSE, brks=my.brks[[index]], cols=my.cols[[index]], drawleg=F, colNA="gray") +var.wt.mean.NA.bis <- var.wt.mean.NA[,,c(257:512,1:256)] +PlotEquiMap(var.wt.mean.NA.bis[wt,,], var.lon.used.bis, var.lat, filled.continents = FALSE, brks=my.brks[[index]], cols=my.cols[[index]], drawleg=F, colNA="gray") +#PlotEquiMap(var.wt.mean.NA.bis[wt,,1:50], head(var.lon.used.bis,50), var.lat, filled.continents = FALSE, brks=my.brks[[index]], cols=my.cols[[index]], drawleg=F, colNA="gray") +ColorBar(my.brks[[index]], cols=my.cols[[index]], vert=FALSE, subsampleg=10, cex=1) + +dev.off() + + +index=3 +wt=10 + +png(filename=paste0(mapdir,"/Frequency_WT_", WTs.type10[wt],".png"),width=1000,height=700) + +layout(matrix(c(rep(1,10),2), 11, 1, byrow = TRUE)) +#PlotEquiMap(n.wt.NA[wt,,], var.lon.used, var.lat, filled.continents = FALSE, brks=my.brks[[index]], cols=my.cols[[index]], drawleg=F) +#PlotEquiMap(n.wt.NA[wt,,1:18], tail(var.lon.used,18), var.lat, filled.continents = FALSE, brks=my.brks[[index]], cols=my.cols[[index]], drawleg=F) +#n.wt.NA[wt,50:55,490:512] +#ColorBar(my.brks[[index]], cols=my.cols[[index]], vert=FALSE, subsampleg=5, cex=1) +#n.wt10<-apply(wt.serie.,c(1,2),function(x)length(which(x==18))) # for the debug +#edit(n.wt.NA[10,,]) # for the debug +#layout(matrix(c(rep(1,10),2), 11, 1, byrow = TRUE)) +n.wt.NA.bis <- n.wt.NA[,,c(257:512,1:256)] +PlotEquiMap(n.wt.NA.bis[wt,,], var.lon.used.bis, var.lat, filled.continents = FALSE, brks=my.brks[[index]], cols=my.cols[[index]], drawleg=F, colNA="gray") +ColorBar(my.brks[[index]], cols=my.cols[[index]], vert=FALSE, subsampleg=5, cex=1) + +dev.off() + +index=4 +wt=6 # for the debug + +for(wt in 1:n.wts){ + png(filename=paste0(mapdir,"/Daily_contribution_of_WT_", WTs.type10[wt],".png"),width=1000,height=700) + + layout(matrix(c(rep(1,10),2), 11, 1, byrow = TRUE)) + var.wt.mean.contrib.NA.bis <- var.wt.mean.contrib.NA[,,c(257:512,1:256)] + PlotEquiMap(var.wt.mean.contrib.NA.bis[wt,,], var.lon.used.bis, var.lat, filled.continents = FALSE, brks=my.brks[[index]], cols=my.cols[[index]], + drawleg=F, colNA="gray", toptitle=WTs.type10.name[wt] , sizetit=1) + ColorBar(my.brks[[index]], cols=my.cols[[index]], vert=FALSE, subsampleg=10, cex=1) + + dev.off() +} + +index=5 +wt=6 # for the debug +slide=TRUE # put TRUE if you wan to print maps for a slide, with bigger titles and legends, false otherwise + +if(!slide) {n.box <- 10; col.cex=1.5; my.title <- WTs.type10.name[wt]} else {n.box <- 5; col.cex=3} + +for(wt in 1:n.wts){ + png(filename=paste0(mapdir,"/Mean_Anomalies_of_WT_", WTs.type10[wt],".png"),width=1000,height=700) + + layout(matrix(c(rep(1,n.box),2), n.box+1, 1, byrow = TRUE)) + var.wt.mean.anom.NA.bis <- var.wt.mean.anom.NA[,,c(257:512,1:256)] + PlotEquiMap(var.wt.mean.anom.NA.bis[wt,,], var.lon.used.bis, var.lat, filled.continents = FALSE, brks=my.brks[[index]], cols=my.cols[[index]], + drawleg=F, colNA="gray", toptitle=my.title, sizetit=1.5) + ColorBar(my.brks[[index]], cols=my.cols[[index]], vert=FALSE, subsampleg=2, cex=col.cex) + if(slide) title(WTs.type10.name[wt],cex.main=3.5,outer=T) + + dev.off() +} + +# Poster version over Europe: +index=5 +for(wt in 1:n.wts){ # wt==9 + png(filename=paste0(mapdir,"/Mean_European_Anomalies_of_WT_", WTs.type10[wt],".png"),width=1000,height=700) + + layout(matrix(c(rep(1,5),2), 6, 1, byrow = TRUE)) #, widths=c(1,2,3,4,5,6,7,8,9)) + #layout(matrix(c(1,1,1,2), 4, 1, byrow = TRUE), widths=c(11,1.2)) + par(oma=c(0,0,2,0))#, mar = rep(5, 4)) + var.wt.mean.anom.NA.bis <- var.wt.mean.anom.NA[,,c(257:512,1:256)] + PlotEquiMap2(var.wt.mean.anom.NA.bis[wt,25:90,220:310], var.lon.used.bis[220:310], var.lat[25:90], filled.continents = FALSE, brks=my.brks[[index]], cols=my.cols[[index]], drawleg=F, colNA="gray", square=T) #, contours=var.wt.mean.anom.NA.bis[wt,25:90,220:310]) + ColorBar(my.brks[[index]], cols=my.cols[[index]], vert=FALSE, cex=3) + title(WTs.type10.name[wt],cex.main=3.5,outer=T) + + dev.off() +} + + +#WTs.type<-c("NE","E","SE","S","SW","W","NW","N","C","C.NE","C.E","C.SE","C.S","C.SW","C.W","C.NW","C.N","A","A.NE","A.E","A.SE","A.S","A.SW","A.W","A.NW","A.N") diff --git a/old/WT_drivers_v2.R b/old/WT_drivers_v2.R new file mode 100644 index 0000000000000000000000000000000000000000..441393ad68bc77dff91f02a4b42c7de969fda77e --- /dev/null +++ b/old/WT_drivers_v2.R @@ -0,0 +1,556 @@ +########################################################################################## +# Relationship between WTs and a chosen climate variable # +########################################################################################## + +library(s2dverification) # for the function Load() + +source('/home/Earth/ncortesi/Downloads/scripts/Rfunctions.R') # for the calendar functions +#Load.path <- '/home/Earth/ncortesi/Downloads/scripts/IC3.conf' # Load() file path for resampled MSLP + +workdir="/scratch/Earth/ncortesi/RESILIENCE/WT" # working dir with the input files with the WT classifications for each MSLP grid point +mapdir="/scratch/Earth/ncortesi/RESILIENCE/WT_maps" # output dir with seasonal and yearly maps + +ERAint <- list(path = '/esnas/reconstructions/ecmwf/eraint/$STORE_FREQ$_mean/$VAR_NAME$_f6h/$VAR_NAME$_$YEAR$$MONTH$.nc') +JRA55 <- list(path = '/esnas/reconstructions/jma/jra-55/$STORE_FREQ$_mean/$VAR_NAME$_f6h/$VAR_NAME$_$YEAR$$MONTH$.nc') + +psl.rean='ERAint' #'ERAintDailyHighRes' # choose a daily reanalysis dataset for MSLP data +var.rean='ERAint' # daily reanalysis dataset used for the var data; it can have a different resolution of the MSLP dataset +var.name='prlr' #'tas' #'sfcWind' # any daily variable we want to find if it is WTs-driven +var.name.file='Precipitation' #'Surface_Temperature' # '10-m_Wind_Speed' # variable name used in the ouput map's filename + +year.start=1985 # starting year of the MSLP daily data (from the 1st of january) +year.end=2011 #2014 # ending year of the MSLP daily data (up to the 31 of December) + +partial.end=FALSE # put TRUE if the last year ('year.end') has not all the yearly data but stop before December the 31th; in this case, must also specify the variable below +n.days.last=334 # number of days available in the last year (used only if partial.end=TRUE) I.e: data for 2015 doesn't have December, so it has 365-31=334 days + +########################################################################################## + +WTs.type <- c("NE","E","SE","S","SW","W","NW","N","C","C.NE","C.E","C.SE","C.S","C.SW","C.W","C.NW","C.N","A","A.NE","A.E","A.SE","A.S","A.SW","A.W","A.NW","A.N") +WTs.type10<-c("NE","E","SE","S","SW","W","NW","N","C","A") +WTs.type10.name<-c("Northeasterly (NE)","Easterly (E)","Southeasterly (SE)","Southerly (S)","Southwesterly (SW)","Westerly (W)","Northwesterly (NW)", + "Northerly (N)","Cyclonic (C)","Anticyclonic (A)") +year.tot <- year.end - year.start + 1 + +periods=c(1:17) # identify monthly (from 1=Jan to 12=Dec) , seasonal (from 13=winter to 16=Autumn) and yearly=17 values +n.periods <- length(periods) + +# Load just 1 day of var data to detect the number of latitude and longitude points; +#var <- Load(var.name, NULL, var.rean, paste0(year.start,'0101'), storefreq = 'daily', leadtimemax = 1, output = 'lonlat') +var <- Load(var = var.name, exp = NULL, obs = list(var.rean), paste0(y,'0101'), storefreq = 'daily', leadtimemax = 1, output = 'lonlat') + +var.lat <- round(var$lat,3) +var.lon <- round(var$lon,3) +var.n.lat <- length(var.lat) # number of latitude values of var +var.n.lon <- length(var.lon) +var.lon.pos <- ifelse(min(var.lon>=0), TRUE, FALSE) # set lon.pos to TRUE if the longitude values of the reanalysis grid has no negative values, FALSE if not. + +# Load just 1 day of MSLP data to detect the number of latitude and longitude points of the WT classifications +# we must exlude points > +80 degrees lat and < -80 deg. because the Lamb grid was created excluding these points: +#MSLP <- Load('psl', NULL, psl.rean, paste0(year.start,'0101'), storefreq = 'daily', leadtimemax = 1, output = 'lonlat', configfile = Load.path) +MSLP <- Load(var = 'psl', exp = NULL, obs = list(psl.rean), paste0(year.start,'0101'), storefreq = 'daily', leadtimemax = 1, output = 'lonlat') + +psl.n.lat <- length(MSLP$lat) # number of latitude values or MSLP +psl.n.lon <- length(MSLP$lon) # number of longitude values of MSLP +psl.lon.pos <- ifelse(min(MSLP$lon>=0), TRUE, FALSE) # set lon.pos to TRUE if the longitude values of the reanalysis grid has no negative values, FALSE if not. + +psl.n.lat.unused.poles <- 20 # number of discarded latitude values near each of the two poles (it depends on the spatial resolutions of the MSLP reanalysis) +#psl.n.lat.unused.equat <- 20 # number of unused latitude values near each side of the equator (must exclude 10 degrees N/S for side, so it depends on the spat.res. of the reanalysis) +#psl.pos.lat.eq.north <- tail(which(MSLP$lat >= 0),1) # note than the eventual point at lat=0 is always included +#psl.pos.lat.eq.south <- head(which(MSLP$lat < 0),1) # note than the eventual point at lat=0 is always excluded +#psl.pos.lat.unused.eq.north <- (psl.pos.lat.eq.north - psl.n.lat.unused.poles + 1):psl.pos.lat.eq.north +#psl.pos.lat.unused.eq.south <- psl.pos.lat.eq.south:(psl.pos.lat.eq.south + psl.n.lat.unused.equat - 1) + +# final latitude values used as central points: +#psl.lat.used <- MSLP$lat[-c(1:psl.n.lat.unused.poles,(psl.n.lat-psl.n.lat.unused.poles):psl.n.lat, psl.pos.lat.unused.eq.north, psl.pos.lat.unused.eq.south)] +psl.lat.used <- round(MSLP$lat[-c(1:psl.n.lat.unused.poles,(psl.n.lat-psl.n.lat.unused.poles):psl.n.lat)],3) # latitude values used as central points +psl.lon.used <- round(MSLP$lon,3) # longitude values used as central points + +psl.n.lat.used <- length(psl.lat.used) +psl.n.lon.used <- length(psl.lon.used) +psl.n.grid.points <- psl.n.lat.used * psl.n.lon.used + +if(var.lon.pos && !psl.lon.pos) {ss <- which(psl.lon.used<0); psl.lon.used[ss] <- psl.lon.used[ss] + 360} # convert the negative long of MSLP to the [0, +360] range +if(!var.lon.pos && psl.lon.pos) {ss <- which(psl.lon.used>180); psl.lon.used[ss] <- psl.lon.used[ss] - 360} # convert the positive long of MSLP > 180 to the [-180, +180] range + +# exlude var points > +~80 degrees lat and < -80 deg. because the Lamb grid was created excluding these points: +var.n.lat.unused.poles <- 20 # number of discarded latitude values near each of the two poles (it depends on the spatial resolutions of the var reanalysis) +var.n.lat.unused.equat <- 20 # numb. of unused latitude values near each side of the equator (must exclude 10 degrees N/S for side, so it depends on the spat.res. of the reanalysis) + +var.pos.lat.eq.north <- tail(which(var$lat >= 0),1) # note than the eventual point at lat=0 is always included +var.pos.lat.eq.south <- head(which(var$lat < 0),1) # note than the eventual point at lat=0 is always excluded +var.pos.lat.unused.eq.north <- (var.pos.lat.eq.north - var.n.lat.unused.equat+1):var.pos.lat.eq.north +var.pos.lat.unused.eq.south <- var.pos.lat.eq.south:(var.pos.lat.eq.south + var.n.lat.unused.equat - 1) +var.pos.lat.unused.eq <- c(var.pos.lat.unused.eq.north, var.pos.lat.unused.eq.south) + +#var.lat.used <- var.lat[-c(1:var.n.lat.unused.poles,(var.n.lat-var.n.lat.unused.poles):var.n.lat, var.pos.lat.unused.eq.north, var.pos.lat.unused.eq.south)] # latitude values used as central points + +var.lat.used <- var.lat[-c(1:var.n.lat.unused.poles,(var.n.lat-var.n.lat.unused.poles):var.n.lat)] +var.lon.used <- var.lon # longitude values used as central points +var.n.lat.used <- length(var.lat.used) +var.n.lon.used <- length(var.lon.used) +var.n.grid.points <- var.n.lat.used * var.n.lon.used +#var.lon.used.bis <- c(var.lon.used[c(257:512)]-360,var.lon.used[c(1:256)]) # to put Europe in the middle of the map +var.lon.used.bis <- c(var.lon.used[c(ceiling(var.n.lon.used/2):var.n.lon.used)]-360,var.lon.used[c(1:(ceiling(var.n.lon.used/2)-1))]) # to put Europe in the middle of the map + +#pos.var.lat.unused <- which(is.na( match(var.lat,var.lat.used))) # position inside var.lat of the latitude values not used (those not in var.lat.used) +pos.var.lat.used <- match(var.lat.used, var.lat) # =which(!is.na( match(var.lat,var.lat.used))) # position inside var.lat of the latitude values used (those in var.lat.used) +#var.lat[pos.var.lat.unused] + + +# Map of the central points for Europe: +void <- array(NA,c(var.n.lat.used, var.n.lon.used)) +#void.bis <- void[,c(257:512,1:256)] # when using the var dataset with res 256x512 +void.bis <- void[,c(ceiling(var.n.lon.used/2):var.n.lon.used,1:(ceiling(var.n.lon.used/2)-1))] +#PlotEquiMap(void.bis, var.lon.used.bis, var.lat.used, filled.continents = FALSE, brks=seq(0,360,1), intxlon=5, intylat=5, drawleg=F ) # for the debug +#PlotEquiMap(void.bis[1:215,], var.lon.used.bis, var.lat[1:215], filled.continents = FALSE, brks=seq(0,360,1), intxlon=5, intylat=5, drawleg=F ) # for the debug +par(oma=c(1,1,1,1)) +#PlotEquiMap(void.bis, var.lon.used.bis, var.lat.used, filled.continents = FALSE, brks=seq(0,360,1), intxlon=5, intylat=5, drawleg=F ) # for the debug +PlotEquiMap(void.bis[25:90, 220:317], var.lon.used.bis[220:317], var.lat[25:90], filled.continents = FALSE, brks=seq(0,360,1), intxlon=5, intylat=5, drawleg=F ) # for the debug +my.parcelas<-data.frame(name="", lat=rep(var.lat.used,var.n.lon.used), long=rep(var.lon.used.bis, each=var.n.lat.used), pop=0, capital=0, stringsAsFactors=F) +map.cities(my.parcelas, pch=3, cex=.5,col=c("gray40")) # add the 4 points of the 4 parcelas +write.table(cbind(lat=rep(var.lat.used,var.n.lon.used), lon=rep(var.lon.used, each=var.n.lat.used)),file=paste0(workdir,"/list_lat_lon.txt"),row.names=FALSE, col.names=TRUE, quote=FALSE, sep=" ") + + +# for each var grid point used, find the lat/lon position of the nearby point center of the WT classification: +closer.psl.lat <- closer.psl.lon <- array(NA, c(var.n.lat.used, var.n.lon.used)) # 2 matrices with the lat and lon position of the nearest MSLP grid point + +i<-0;vlat.pos <- 0 +for (vlat in var.lat.used) { + vlat.pos<-vlat.pos+1 + + vlon.pos<-0 + for(vlon in var.lon.used) { + i<-i+1 + cat(paste0("Point #",i,"/", var.n.grid.points), '\r') + vlon.pos<-vlon.pos+1 + + closer.psl.pos <- nearest(vlat, vlon, psl.lat.used, psl.lon.used) + closer.psl.lat[vlat.pos, vlon.pos] <- psl.lat.used[closer.psl.pos[1]] + closer.psl.lon[vlat.pos, vlon.pos] <- psl.lon.used[closer.psl.pos[2]] + + #print(paste0("vlat=",vlat, " vlat.pos=",vlat.pos, " vlon=",vlon, " vlon.pos=",vlon.pos, " closer.psl.lat.pos=", closer.psl.pos[1], " closer.psl.lon.pos=",closer.psl.pos[2], " closer.psl.lat=", psl.lat.used[closer.psl.pos[1]], " closer.psl.lon=", psl.lon.used[closer.psl.pos[2]] )) # for the debug + + } +} + +save(closer.psl.lat, closer.psl.lon, file=paste0(workdir,"/closer_psl_",var.name,".RData")) # save it if it is the first time +load(file=paste0(workdir,"/closer_psl_",var.name,".RData")) # load it if already saved + +#PlotEquiMap(closer.psl.lon, var.lon.used, var.lat.used, filled.continents = FALSE, brks=seq(0,360,1) ) # for the debug +#PlotEquiMap(closer.psl.lat, var.lon.used, var.lat.used, filled.continents = FALSE, brks=seq(-90,90,1) ) # for the debug + +# for each var grid point used, load 1 year of daily var and wt data at time, and assign it to the closer mslp central point: +# (you can't open all data at once because it weights too much, but you can open 1 year at time) + +sub <- split.array(dimensions=c(n.days.in.a.yearly.period(year.start,year.end),var.n.lat.used, var.n.lon.used), along=3) +save(sub, file=paste0(workdir,"/sub_",var.name,".RData")) # save sub to retrieve it later or you run the next loop for 1 year only ('year.end' changes, so 'sub' changes too) +load(paste0(workdir,"/sub_",var.name,".RData")) + +i=0 +for(y in year.start:year.end){ + #y<-1981 # for the debug + i<-i+1 + print(paste0("Year #",i,"/", year.tot)) + + n.days <- n.days.in.a.year(y) + if(partial.end==TRUE && y==year.end) n.days<-n.days.last # the last year can have a lower number of days + + var <- Load(var.name, NULL, var.rean, paste0(y,'0101'), storefreq = 'daily', leadtimemax = n.days, output = 'lonlat') # load var data for the year y only + #PlotEquiMap(var$obs[1, 1, 1, 1, , ], var.lon, var.lat, filled.continents = FALSE) + + ffload(file=paste0(workdir,"/WTs_",y)) # open the ff binary file with all the WT classification for that year + + var.serie <- wt.serie <- array(NA,c(var.n.lat.used, var.n.lon.used, n.days)) # arrays where to store the var and wt daily data of year y for all points + + j<-0 + vlat.pos <- 0 + for (vlat in var.lat.used) { + #vlat<-var.lat.used[1]; vlon<-var.lon.used[1] # for the debug + j<-j+1 + vlat.pos <- vlat.pos+1 + cat('Latitude ',j,'/',var.n.lat.used,'\r') + + vlon.pos <- 0 + for(vlon in var.lon.used) { + vlon.pos <- vlon.pos+1 + + var.serie[vlat.pos, vlon.pos,] <- var$obs[1,1,1,,which(var.lat==vlat),which(var.lon==vlon)] + + #WT <- read.table(file=paste0(workdir,"/","10WTs_",psl.rean,"_",year.start,"-",year.end,"_lat_",closer.psl.lat[vlat.pos,vlon.pos],"_lon_",closer.psl.lon[vlat.pos,vlon.pos],".txt"), header=TRUE, sep=" ", stringsAsFactors=FALSE, row.names=NULL) + + # load WTs classifications (variable WTs) for that year created with WT_vX.R: + #load(paste0(workdir,"/",y,"_Rdata/","10WTs_",psl.rean,"_year_",y,"_lat_",closer.psl.lat[vlat.pos,vlon.pos],"_lon_",closer.psl.lon[vlat.pos,vlon.pos],".RData")) + open(eval(parse(text=paste0("WTs_lat_", closer.psl.lat[vlat.pos,vlon.pos],"_lon_", closer.psl.lon[vlat.pos,vlon.pos])))) + + # interval of days belonging to the year y, but starting to count from the year year.begin: + #seq.days.year <- n.days.in.a.yearly.period(year.start,y) - n.days.in.a.yearly.period(year.start,year.start) + 1:n.days.in.a.year(y) + wt.serie[vlat.pos, vlon.pos,] <- WTs #[seq.days.year] #,4] # extract only the days of the year y + + } + } + + #PlotEquiMap(var.serie[,,1], var.lon, var.lat.used, filled.continents = FALSE) # for the debug + + #save(var.serie, file=paste0(workdir,"/local_",var.name,"_",psl.rean,"_year_",y,".RData"), compress=FALSE) + #save(wt.serie, file=paste0(workdir,"/local_","10WTs_",psl.rean,"_year_",y,".RData"), compress=FALSE) + + #year=4000 + #ffload(file=paste0(workdir,"/WTs_",year)) + #latc <- 70 + #lonc <- 0 + #open(eval(parse(text=paste0("WTs_lat_",latc,"_lon_", lonc)))) # do.call doesn't work with open, must + #close(eval(parse(text=paste0("WTs_lat_",latc,"_lon_", lonc)))) # do.call doesn't work with open, must use eval(parse(text=...))) + + + # instead of saving only 1 file for all the spatial domain, save 1 file for each subarray s: (to be able to load them faster in the following step) + for(s in 1:sub$n.sub){ + var.serie.year.sub<-var.serie[,sub$int[[s]],] # format: [lat, lon, day] + save(var.serie.year.sub, file=paste0(workdir,"/",var.name,"_sub/local_",var.name,"_",psl.rean,"_year_",y,"_sub_",s,".RData"), compress=FALSE) + + wt.serie.year.sub <- wt.serie[,sub$int[[s]],] + save(wt.serie.year.sub, file=paste0(workdir,"/",var.name,"_wt_sub/local_","10WTs_",psl.rean,"_year_",y,"_sub_",s,".RData"), compress=FALSE) + # n.wt10<-apply(wt.serie.year.sub,c(1,2),function(x)length(which(x==18))) # for the debug + # edit(n.wt10) # for the debug + } + +} # close for on y + +#rm(closer.psl.lat, closer.psl.lon, var) + + +# compute the climatology of var for each period and its daily anomalies for each subarray and period: +var.clim <- array(NA, c(n.periods, var.n.lat.used, var.n.lon.used)) + +n.days.tot <- n.days.in.a.yearly.period(year.start,year.end) +#n.days.tot <- 365 # for the debug + +days.period <- n.days.period <- list() +for(p in periods){ + days.period[[p]] <- NA + for(y in year.start:year.end) days.period[[p]] <- c(days.period[[p]], n.days.in.a.future.year(year.start, y) + pos.period(y,p)) + days.period[[p]] <- days.period[[p]][-1] # remove the NA at the begin that was introduced only to be able to execture the above command + n.days.period[[p]] <- length(days.period[[p]]) +} + +for(s in 1:sub$n.sub){ + #s=1 # for the debug + cat(paste0("Computing subarray n. ", s,"/", sub$n.sub),'\r') + + if(s == sub$n.sub) { + var.serie.sub <- array(NA, c(var.n.lat.used, sub$sub.size.last, n.days.tot)) + } else { + var.serie.sub <- array(NA, c(var.n.lat.used, sub$sub.size, n.days.tot)) + } + + for(y in year.start:year.end){ + load(file=paste0(workdir,"/",var.name,"_sub/local_",var.name,"_",psl.rean,"_year_",y,"_sub_",s,".RData")) # load var.serie.year.sub + int.days.year <- seq.days.in.a.future.year(year.start,y) + + var.serie.sub[,,int.days.year] <- var.serie.year.sub + rm(var.serie.year.sub) + } + + for(p in periods){ + var.serie.sub.period <- var.serie.sub[,,days.period[[p]]] # select only the days in the chosen period + + var.clim.sub <- apply(var.serie.sub.period,c(1,2),mean, na.rm=T) + + var.anom.sub.period <- var.serie.sub.period - InsertDim(var.clim.sub, 3, n.days.period[[p]]) + var.clim[p,,sub$int[[s]]] <- var.clim.sub + + # save the var anomaly, one file for each subarray and period: + assign(paste0("var.anom.sub.period",p), var.anom.sub.period) + + # save(, file=paste0(workdir,"/",var.name,"_sub/local_",var.name,"_anomaly_",psl.rean,"_",year.start,"-",year.end,"_sub_",s,"_period_",p,".RData"), compress=FALSE) + # save not working in this case, use do.call below: + do.call(save, list(paste0("var.anom.sub.period",p), file=paste0(workdir,"/",var.name,"_sub/local_",var.name,"_anomaly_",psl.rean,"_",year.start,"-",year.end,"_sub_",s,"_period_",p,".RData"), compress=FALSE)) # you must use this syntax when saving an Rdata with the variable name given by a string!!! + + rm(var.anom.sub.period,var.serie.sub.period) + do.call(rm, list(paste0("var.anom.sub.period",p))) + } + + rm(var.serie.sub) +} + +save(var.clim, file=paste0(workdir,"/",var.name,"_sub/local_",var.name,"_climatology_",psl.rean,"_",year.start,"-",year.end,".RData"), compress=FALSE) # save var.clim +load(paste0(workdir,"/",var.name,"_sub/local_",var.name,"_climatology_",psl.rean,"_",year.start,"-",year.end,".RData")) + + +# do the same as above, but for the wt: +wts <- c(1:10) # weather type numbers inside wt.serie +n.wts<-length(wts) +n.wt <- var.wt.sum <- var.wt.mean.anom <- var.wt.sd.anom <- var.wt.mean.contrib <- array(NA, c(n.periods, n.wts, var.n.lat.used, var.n.lon.used)) +#vlat.mat <- vlon.mat <- array(NA, c(var.n.lat.used, var.n.lon.used)) # for the debug + +for(s in 1:sub$n.sub){ + #s=1;y=year.start # for the debug + cat(paste0("Computing subarray n. ", s,"/", sub$n.sub),'\r') + + if(s == sub$n.sub) { + var.serie.sub <- array(NA, c(var.n.lat.used, sub$sub.size.last, n.days.tot)) + wt.serie.sub <- array(NA, c(var.n.lat.used, sub$sub.size.last, n.days.tot)) + n.wt.sub <- var.wt.sum.sub <- var.wt.mean.anom.sub <- var.wt.sd.anom.sub <- array(NA,c(n.periods, n.wts, var.n.lat.used, sub$sub.size.last)) + } else { + var.serie.sub <- array(NA, c(var.n.lat.used, sub$sub.size, n.days.tot)) + wt.serie.sub <- array(NA, c(var.n.lat.used, sub$sub.size, n.days.tot)) + n.wt.sub <- var.wt.sum.sub <- var.wt.mean.anom.sub <- var.wt.sd.anom.sub <- array(NA,c(n.periods, n.wts, var.n.lat.used, sub$sub.size)) + } + + for(y in year.start:year.end){ + load(file=paste0(workdir,"/",var.name,"_sub/local_",var.name,"_",psl.rean,"_year_",y,"_sub_",s,".RData")) # load var.serie.year.sub + int.days.year <- seq.days.in.a.future.year(year.start,y) + var.serie.sub[,,int.days.year] <- var.serie.year.sub + rm(var.serie.year.sub) + } + + + for(y in year.start:year.end){ + load(file=paste0(workdir,"/",var.name,"_wt_sub/local_","10WTs_",psl.rean,"_year_",y,"_sub_",s,".RData")) # load wt.serie.year.sub + + wt.serie.year.sub[which(wt.serie.year.sub==18)]<-10 # rename the A type from number 18 to number 10, to have only the numbers from 1 to 10 + int.days.year <- seq.days.in.a.future.year(year.start,y) + + wt.serie.sub[,,int.days.year] <- wt.serie.year.sub #[,sub$int[[s]],] + rm(wt.serie.year.sub) + } + + for(p in periods){ + + load(file=paste0(workdir,"/",var.name,"_sub/local_",var.name,"_anomaly_",psl.rean,"_",year.start,"-",year.end,"_sub_",s,"_period_",p,".RData"))# load var.anom.sub.period + var.serie.sub.period <- var.serie.sub[,,days.period[[p]]] + wt.serie.sub.period <- wt.serie.sub[,,days.period[[p]]] + + vlat.pos <- 0 + for (vlat in var.lat.used) { + #vlat<-var.lat.used[1]; vlon<-var.lon.used[1]; wt=1 # for the debug + vlat.pos <- vlat.pos + 1 + #cat('Latitude ',vlat.pos,'/',var.n.lat.used,'\r') + + vlon.pos <- 0 #sub$int[[s]]-1 + for(vlon in var.lon.used[sub$int[[s]]]) { + vlon.pos <- vlon.pos + 1 + for(wt in wts){ + pos.wt <- which(wt.serie.sub.period[vlat.pos,vlon.pos,]==wt) + var.anom.sub.period <- get(paste0("var.anom.sub.period",p)) + + n.wt.sub[p,wt,vlat.pos,vlon.pos] <- length(pos.wt) + var.wt.sum.sub[p,wt,vlat.pos,vlon.pos] <- sum(var.serie.sub.period[vlat.pos,vlon.pos,][pos.wt]) # only for wind and prec + var.wt.mean.anom.sub[p,wt,vlat.pos,vlon.pos] <- mean(var.anom.sub.period[vlat.pos,vlon.pos,][pos.wt]) + var.wt.sd.anom.sub[p,wt,vlat.pos,vlon.pos] <- sd(var.anom.sub.period[vlat.pos,vlon.pos,][pos.wt]) + + rm(pos.wt, var.anom.sub.period) + } + } + } + + n.wt[p,,,sub$int[[s]]] <- n.wt.sub[p,,,] + var.wt.sum[p,,,sub$int[[s]]] <- var.wt.sum.sub[p,,,] + var.wt.mean.anom[p,,,sub$int[[s]]] <- var.wt.mean.anom.sub[p,,,] + var.wt.sd.anom[p,,,sub$int[[s]]] <- var.wt.sd.anom.sub[p,,,] + } # close for on p +} + +save(n.wt, var.wt.sum, var.wt.mean.anom, var.wt.sd.anom.sub, file=paste0(workdir,"/",var.name,"_sub/output_",psl.rean,"_",year.start,"-",year.end,".RData"), compress=FALSE) + +load(file=paste0(workdir,"/",var.name,"_sub/output_",psl.rean,"_",year.start,"-",year.end,".RData")) + +var.wt.mean <- var.wt.sum/n.wt # average value of var associated to a WT +#n.wt <- 100*n.wt/n.days.tot # convert to yearly frequency in % +for(p in periods) n.wt[p,,,] <- 100*n.wt[p,,,]/n.days.period[[p]] # convert to yearly frequency in % +for(p in periods) var.wt.mean.contrib[p,,,] <- 100*var.wt.sum[p,,,]/(InsertDim(var.clim[p,,],1,n.wts)*n.days.period[[p]]) # convert the sum to a % contribution + +var.clim.NA <- array(NA, c(n.periods, var.n.lat, var.n.lon.used)) +n.wt.NA <- var.wt.mean.contrib.NA <- var.wt.sum.NA <- var.wt.mean.anom.NA <- var.wt.mean.NA <- var.wt.sd.anom.NA <- array(NA, c(n.periods, n.wts, var.n.lat, var.n.lon.used)) + +var.clim.NA[,pos.var.lat.used,] <- var.clim +n.wt.NA[,,pos.var.lat.used,] <- n.wt +var.wt.mean.NA[,,pos.var.lat.used,] <- var.wt.mean +var.wt.sum.NA[,,pos.var.lat.used,] <- var.wt.sum +var.wt.mean.anom.NA[,,pos.var.lat.used,] <- var.wt.mean.anom +var.wt.mean.contrib.NA[,,pos.var.lat.used,] <- var.wt.mean.contrib +var.wt.sd.anom.NA[,,pos.var.lat.used,] <- var.wt.sd.anom + +# remove the equatorial area from visualization: +n.wt.NA[,,var.pos.lat.unused.eq,]<- var.wt.mean.NA[,,var.pos.lat.unused.eq,] <- var.wt.sum.NA[,,var.pos.lat.unused.eq,] <- var.wt.mean.contrib.NA[,,var.pos.lat.unused.eq,] <- NA +var.wt.mean.anom.NA[,,var.pos.lat.unused.eq,] <- var.wt.sd.anom.NA[,,var.pos.lat.unused.eq,] <- NA + +# move Europe to the center of the maps: +p1 <- ceiling(var.n.lon.used/2)-1 +p2 <- ceiling(var.n.lon.used/2) +p3 <- var.n.lon.used + +var.clim.NA.bis <- var.clim.NA[,,c(p2:p3,1:p1)] +n.wt.NA.bis <- n.wt.NA[,,,c(p2:p3,1:p1)] +var.wt.mean.NA.bis <- var.wt.mean.NA[,,,c(p2:p3,1:p1)] +var.wt.mean.contrib.NA.bis <- var.wt.mean.contrib.NA[,,,c(p2:p3,1:p1)] +var.wt.mean.anom.NA.bis <- var.wt.mean.anom.NA[,,,c(p2:p3,1:p1)] +var.wt.sd.anom.NA.bis <- var.wt.sd.anom.NA[,,,c(p2:p3,1:p1)] + +save.image(file=paste0(workdir,"/",var.name,"_R_session_",psl.rean,"_",year.start,"-",year.end,".RData"), compress=FALSE) + +# Map intervals and colors: +my.brks <- list() + +#my.brks[[1]] <- c(0,seq(1.5,9,0.5),15) # Wind speed Climatology +my.brks[[1]] <- c(0,seq(1.05,10,0.05),15) # Wind speed Climatology +my.brks[[1]] <- c(-50,-40,-30,seq(-20,30,0.05),40,50) # Surface Temperature Climatology + +#my.brks[[2]] <- c(-10,seq(-4.95,5,0.05),10) # Wind Speed Anomaly associated to a WT +my.brks[[2]] <- c(seq(0,10,0.1),100) # Mean wind speed in m/s associated to a WT +my.brks[[3]] <- c(seq(0,20,0.1),100) # Frequency associated to a WT +my.brks[[4]] <- c(seq(0,30,0.1),100) # % Contribution of a WT to total var +my.brks[[5]] <- c(seq(-10,-3,1),seq(-2.3,2.3,0.1),seq(3,10,1)) # % Mean anomaly of a WT +my.brks[[5]] <- c(-50,seq(-10,-3,1),seq(-2.9,2.9,0.1),seq(3,10,1),50) # % Mean anomaly of a WT + +my.brks[[6]] <- c(seq(0,3,0.1),10) # Standard deviation of the anomalies pf a WT + +my.cols <- list() +#my.cols[[index]] <- colorRampPalette(my.palette[[index]])(length(my.brks[[index]])-1) +#my.cols[[1]] <- colorRampPalette(as.character(read.csv("/home/Earth/vtorralb/rgbhex.csv",header=F)[,1]))(length(my.brks[[1]])-1) # blue-green-white-yellow-red colors +#my.cols[[1]] <- colorRampPalette(c("white","yellow","orange","red","darkred"))(length(my.brks[[1]])-1) # blue-yellow-red colors +#my.cols[[1]] <- colorRampPalette(c("white","white","green","yellow","orange","red","darkred"))(length(my.brks[[1]])-1) # blue-yellow-red colors +#my.cols[[1]] <- colorRampPalette(c("blue","white","darkred"))(length(my.brks[[1]])-1) # blue-yellow-red colors +#my.cols[[1]] <- colorRampPalette(c("blue","green","yellow","red","brown","violetred4"))(length(my.brks[[1]])-1) # blue-yellow-red colors +#my.cols[[1]] <- colorRampPalette(c("white","cyan2","gold","orange","red","brown","brown4","deeppink4"))(length(my.brks[[1]])-1) # blue-yellow-red colors +my.cols[[1]] <- colorRampPalette(c("white","gray","cyan2","blue","deeppink4"))(length(my.brks[[1]])-1) # white-gray-blue-purple colores +my.cols[[1]] <- colorRampPalette(as.character(read.csv("/home/Earth/vtorralb/rgbhex.csv",header=F)[,1]))(length(my.brks[[1]])-1) # blue-yellow-red colors + +#my.cols[[2]] <- colorRampPalette(c("deeppink4","darkblue","blue","white","red","darkred","brown4"))(length(my.brks[[2]])-1) # blue-white-red colors +my.cols[[2]] <- colorRampPalette(as.character(read.csv("/home/Earth/vtorralb/rgbhex.csv",header=F)[,1]))(length(my.brks[[2]])-1) # blue--yellow-red colors +my.cols[[3]] <- colorRampPalette(as.character(read.csv("/home/Earth/vtorralb/rgbhex.csv",header=F)[,1]))(length(my.brks[[3]])-1) # blue-yellow-red colors +#my.cols[[4]] <- colorRampPalette(c("white","cyan2","blue","deeppink4"))(length(my.brks[[4]])-1) # blue-yellow-red colors +my.cols[[4]] <- colorRampPalette(as.character(read.csv("/home/Earth/vtorralb/rgbhex.csv",header=F)[,1]))(length(my.brks[[4]])-1) # blue--yellow-red colors +my.cols[[5]] <- colorRampPalette(as.character(read.csv("/home/Earth/vtorralb/rgbhex.csv",header=F)[,1]))(length(my.brks[[5]])-1) # blue--yellow-red colors +my.cols[[6]] <- colorRampPalette(as.character(read.csv("/home/Earth/vtorralb/rgbhex.csv",header=F)[,1]))(length(my.brks[[6]])-1) # blue--yellow-red colors + + +# Create and save maps: + +index=1 +#p=17 # for the debug + +for(p in periods){ + png(filename=paste0(mapdir,"/",var.name.file,"_Climatology_",period.name[p],".png"),width=1000,height=700) + + layout(matrix(c(rep(1,10),2), 11, 1, byrow = TRUE)) + #layout.show(2) + #PlotEquiMap2(var.clim.NA, var.lon.used, var.lat, filled.continents = FALSE, brks=my.brks[[index]], cols=my.cols[[index]], drawleg=F) + # change longitude in format -180,180 to displey europe in the middle of the map: + #PlotEquiMap(var.clim[p,,], var.lon.used, var.lat.used, filled.continents = FALSE, brks=my.brks[[index]], cols=my.cols[[index]], drawleg=F, colNA="gray") + + PlotEquiMap(var.clim.NA.bis[p,,]-273, var.lon.used.bis, var.lat, filled.continents = FALSE, brks=my.brks[[index]], cols=my.cols[[index]], drawleg=F, colNA="gray") + ColorBar(my.brks[[index]], cols=my.cols[[index]], vert=FALSE, subsampleg=20, cex=1) + + dev.off() +} + + +index=2 +#p=17 # for the debug +wt=6 + +for(p in periods){ + png(filename=paste0(mapdir,"/Average_10-m_Wind_Speed_WT_", WTs.type10[wt],period.name[p],".png"),width=1000,height=700) + + layout(matrix(c(rep(1,10),2), 11, 1, byrow = TRUE)) + #PlotEquiMap(var.wt.mean.NA[wt,,], var.lon.used, var.lat, filled.continents = FALSE, brks=my.brks[[index]], cols=my.cols[[index]], drawleg=F, colNA="gray") + #PlotEquiMap(var.wt.mean.NA[wt,,100:200], var.lon.used[100:200], var.lat, filled.continents = FALSE, brks=my.brks[[index]], cols=my.cols[[index]], drawleg=F, colNA="gray") + PlotEquiMap(var.wt.mean.NA.bis[p,wt,,], var.lon.used.bis, var.lat, filled.continents = FALSE, brks=my.brks[[index]], cols=my.cols[[index]], drawleg=F, colNA="gray") + #PlotEquiMap(var.wt.mean.NA.bis[wt,,1:50], head(var.lon.used.bis,50), var.lat, filled.continents = FALSE, brks=my.brks[[index]], cols=my.cols[[index]], drawleg=F, colNA="gray") + ColorBar(my.brks[[index]], cols=my.cols[[index]], vert=FALSE, subsampleg=10, cex=1) + + dev.off() +} + + +index=3 +p=17 # for the debug +wt=6 + +png(filename=paste0(mapdir,"/Frequency_WT_", WTs.type10[wt],".png"),width=1000,height=700) + +layout(matrix(c(rep(1,10),2), 11, 1, byrow = TRUE)) +#PlotEquiMap(n.wt.NA[wt,,], var.lon.used, var.lat, filled.continents = FALSE, brks=my.brks[[index]], cols=my.cols[[index]], drawleg=F) +#PlotEquiMap(n.wt.NA[wt,,1:18], tail(var.lon.used,18), var.lat, filled.continents = FALSE, brks=my.brks[[index]], cols=my.cols[[index]], drawleg=F) +#n.wt.NA[wt,50:55,490:512] +#ColorBar(my.brks[[index]], cols=my.cols[[index]], vert=FALSE, subsampleg=5, cex=1) +#n.wt10<-apply(wt.serie.,c(1,2),function(x)length(which(x==18))) # for the debug +#edit(n.wt.NA[10,,]) # for the debug +#layout(matrix(c(rep(1,10),2), 11, 1, byrow = TRUE)) +PlotEquiMap(n.wt.NA.bis[p,wt,,], var.lon.used.bis, var.lat, filled.continents = FALSE, brks=my.brks[[index]], cols=my.cols[[index]], drawleg=F, colNA="gray") +ColorBar(my.brks[[index]], cols=my.cols[[index]], vert=FALSE, subsampleg=5, cex=1) + +dev.off() + + +index=4 +p=17 +wt=6 # for the debug + +for(wt in 1:n.wts){ + png(filename=paste0(mapdir,"/Daily_contribution_of_WT_", WTs.type10[wt],".png"),width=1000,height=700) + + layout(matrix(c(rep(1,10),2), 11, 1, byrow = TRUE)) + PlotEquiMap(var.wt.mean.contrib.NA.bis[p,wt,,], var.lon.used.bis, var.lat, filled.continents = FALSE, brks=my.brks[[index]], cols=my.cols[[index]], + drawleg=F, colNA="gray", toptitle=WTs.type10.name[wt] , sizetit=1) + ColorBar(my.brks[[index]], cols=my.cols[[index]], vert=FALSE, subsampleg=10, cex=1) + + dev.off() +} + + +index=5 +#p=17 # for the debug +#wt=6 # for the debug +slide=FALSE # put TRUE if you wan to print maps for a slide, with bigger titles and legends, false otherwise +europe=FALSE # put TRUE to show only a zoom over Europe; false otherwise +map.only=FALSE # put TRUE if you want to print only the image with no legend or title (for posters) + +domain <- ifelse(europe, "European", "World") + +for(p in periods){ + for(wt in 1:n.wts){ + if(!slide) {n.box <- 10; col.cex=1; my.title <- paste(WTs.type10.name[wt],period.name[p])} else {n.box <- 5; col.cex=3; my.title <- ""} + if(map.only) my.title <- "" + + png(filename=paste0(mapdir,"/",var.name.file,"_Mean_",domain,"_Anomalies_of_WT_", WTs.type10[wt],"_",period.name[p],".png"),width=1000,height=700) + + layout(matrix(c(rep(1,n.box),2), n.box+1, 1, byrow = TRUE)) + if(!europe) PlotEquiMap(var.wt.mean.anom.NA.bis[p,wt,,], var.lon.used.bis, var.lat, filled.continents = FALSE, brks=my.brks[[index]], + cols=my.cols[[index]], drawleg=F, colNA="gray", toptitle=my.title, sizetit=1.5) + if(europe) PlotEquiMap(var.wt.mean.anom.NA.bis[p,wt,25:90,220:310], var.lon.used.bis[220:310], var.lat[25:90], filled.continents = FALSE, brks=my.brks[[index]], + cols=my.cols[[index]], drawleg=F, colNA="gray", toptitle=my.title, sizetit=1.5) + ColorBar(my.brks[[index]], cols=my.cols[[index]], vert=FALSE, subsampleg=2, cex=col.cex) + if(slide && map.only) title(paste0(WTs.type10.name[wt],period.name[p]),cex.main=3.5,outer=T) + + dev.off() + } + +} + + +index=6 +wt=6 # for the debug +slide=FALSE # put TRUE if you wan to print maps for a slide, with bigger titles and legends, false otherwise +europe=FALSE # put TRUE to show only a zoom over Europe; false otherwise + +if(!slide) {n.box <- 10; col.cex=1; my.title <- WTs.type10.name[wt]} else {n.box <- 5; col.cex=3; my.title <- ""} +domain <- ifelse(europe, "European", "World") + +for(wt in 1:n.wts){ + png(filename=paste0(mapdir,"/StDev_",domain,"_Anomalies_of_WT_", WTs.type10[wt],".png"),width=1000,height=700) + + layout(matrix(c(rep(1,n.box),2), n.box+1, 1, byrow = TRUE)) + + if(!europe) PlotEquiMap(var.wt.sd.anom.NA.bis[p,wt,,], var.lon.used.bis, var.lat, filled.continents = FALSE, brks=my.brks[[index]], + cols=my.cols[[index]], drawleg=F, colNA="gray", toptitle=my.title, sizetit=1.5) + if(europe) PlotEquiMap(var.wt.sd.anom.NA.bis[p,wt,25:90,220:310], var.lon.used.bis[220:310], var.lat[25:90], filled.continents = FALSE, brks=my.brks[[index]], + cols=my.cols[[index]], drawleg=F, colNA="gray", toptitle=my.title, sizetit=1.5) + ColorBar(my.brks[[index]], cols=my.cols[[index]], vert=FALSE, subsampleg=2, cex=col.cex) + if(slide) title(WTs.type10.name[wt],cex.main=3.5,outer=T) + + dev.off() +} + +#WTs.type<-c("NE","E","SE","S","SW","W","NW","N","C","C.NE","C.E","C.SE","C.S","C.SW","C.W","C.NW","C.N","A","A.NE","A.E","A.SE","A.S","A.SW","A.W","A.NW","A.N") diff --git a/old/WT_drivers_v3.R b/old/WT_drivers_v3.R new file mode 100644 index 0000000000000000000000000000000000000000..e0d12efc0bf79fc71b354116a0cf8d2bf419e345 --- /dev/null +++ b/old/WT_drivers_v3.R @@ -0,0 +1,626 @@ +########################################################################################## +# Relationship between WTs and a chosen climate variable # +########################################################################################## + +library(s2dverification) # for the function Load() +library(ff) + +source('/home/Earth/ncortesi/Downloads/scripts/Rfunctions.R') # for the calendar functions +#Load.path <- '/home/Earth/ncortesi/Downloads/scripts/IC3.conf' # Load() file path for resampled MSLP + +workdir="/scratch/Earth/ncortesi/RESILIENCE/WT" # working dir with the input files with the WT classifications for each MSLP grid point +mapdir="/scratch/Earth/ncortesi/RESILIENCE/WT_maps" # output dir with seasonal and yearly maps + +ERAint <- list(path = '/esnas/recon/ecmwf/erainterim/$STORE_FREQ$_mean/$VAR_NAME$_f6h/$VAR_NAME$_$YEAR$$MONTH$.nc') +ERAint2 <- list(path = '/esnas/recon/ecmwf/erainterim/$STORE_FREQ$_mean/$VAR_NAME$_s0-12h/$VAR_NAME$_$YEAR$$MONTH$.nc') # only for prec +JRA55 <- list(path = '/esnas/recon/jma/jra-55/$STORE_FREQ$_mean/$VAR_NAME$/$VAR_NAME$_$YEAR$$MONTH$.nc') + +psl.rean=ERAint # choose a daily reanalysis dataset for MSLP data +psl.rean.name="Era-Interim" + +var.rean=ERAint2 #ERAint # daily reanalysis dataset used for the var data; it can have a different resolution of the MSLP dataset +var.name='tas' #'prlr' #'tas' #'sfcWind' # any daily variable we want to find if it is WTs-driven +var.name.file='Temperature' #'Precipitation' #'Surface_Temperature' # '10-m_Wind_Speed' # variable name used in the ouput map's filename + +year.start=1979 # starting year of the MSLP daily data (from the 1st of january) +year.end=2013 #2014 # ending year of the MSLP daily data (up to the 31 of December) + +partial.end=FALSE # put TRUE if the last year ('year.end') has not all the yearly data but stop before December the 31th; in this case, must also specify the variable below +n.days.last=334 # number of days available in the last year (used only if partial.end=TRUE) I.e: data for 2015 doesn't have December, so it has 365-31=334 days + +########################################################################################## + +WTs.type <- c("NE","E","SE","S","SW","W","NW","N","C","C.NE","C.E","C.SE","C.S","C.SW","C.W","C.NW","C.N","A","A.NE","A.E","A.SE","A.S","A.SW","A.W","A.NW","A.N") +WTs.type10<-c("NE","E","SE","S","SW","W","NW","N","C","A") +WTs.type10.name<-c("Northeasterly (NE)","Easterly (E)","Southeasterly (SE)","Southerly (S)","Southwesterly (SW)","Westerly (W)","Northwesterly (NW)", + "Northerly (N)","Cyclonic (C)","Anticyclonic (A)") +year.tot <- year.end - year.start + 1 + +periods=c(1:17) # identify monthly (from 1=Jan to 12=Dec) , seasonal (from 13=winter to 16=Autumn) and yearly=17 values +n.periods <- length(periods) + +var <- Load(var.name, NULL, list(var.rean), paste0(y,'0101'), storefreq = 'daily', leadtimemax = n.days, output = 'lonlat') # load var data for the year y only + + + + + + + + + + + + + + + + + + + + + + + + + +# Load just 1 day of var data to detect the number of latitude and longitude points; +#var <- Load(var.name, NULL, var.rean, paste0(year.start,'0101'), storefreq = 'daily', leadtimemax = 1, output = 'lonlat') +var <- Load(var = var.name, exp = NULL, obs = list(var.rean), paste0(year.start,'0101'), storefreq = 'daily', leadtimemax = 1, output = 'lonlat') +var.lat <- round(var$lat,3) +var.lon <- round(var$lon,3) +var.n.lat <- length(var.lat) # number of latitude values of var +var.n.lon <- length(var.lon) +var.lon.pos <- ifelse(min(var.lon>=0), TRUE, FALSE) # set lon.pos to TRUE if the longitude values of the reanalysis grid has no negative values, FALSE if not. + +### Load just 1 day of MSLP data to detect the number of latitude and longitude points of the WT classifications +### we must exlude points > +80 degrees lat and < -80 deg. because the Lamb grid was created excluding these points: +###MSLP <- Load('psl', NULL, psl.rean, paste0(year.start,'0101'), storefreq = 'daily', leadtimemax = 1, output = 'lonlat', configfile = Load.path) +#MSLP <- Load(var = 'psl', exp = NULL, obs = list(psl.rean), paste0(year.start,'0101'), storefreq = 'daily', leadtimemax = 1, output = 'lonlat') + +#psl.n.lat <- length(MSLP$lat) # number of latitude values or MSLP +#psl.n.lon <- length(MSLP$lon) # number of longitude values of MSLP +#psl.lon.pos <- ifelse(min(MSLP$lon>=0), TRUE, FALSE) # set lon.pos to TRUE if the longitude values of the reanalysis grid has no negative values, FALSE if not. + +#psl.n.lat.unused.poles <- 17 # number of discarded latitude values near each of the two poles (it depends on the spatial resolutions of the MSLP reanalysis) +#psl.n.lat.unused.equat <- 20 # number of unused latitude values near each side of the equator (must exclude 10 degrees N/S for side, so it depends on the spat.res. of the reanalysis) +#psl.pos.lat.eq.north <- tail(which(MSLP$lat >= 0),1) # note than the eventual point at lat=0 is always included +#psl.pos.lat.eq.south <- head(which(MSLP$lat < 0),1) # note than the eventual point at lat=0 is always excluded +#psl.pos.lat.unused.eq.north <- (psl.pos.lat.eq.north - psl.n.lat.unused.poles + 1):psl.pos.lat.eq.north +#psl.pos.lat.unused.eq.south <- psl.pos.lat.eq.south:(psl.pos.lat.eq.south + psl.n.lat.unused.equat - 1) + +# final latitude values used as central points: +#psl.lat.used <- MSLP$lat[-c(1:psl.n.lat.unused.poles,(psl.n.lat-psl.n.lat.unused.poles):psl.n.lat, psl.pos.lat.unused.eq.north, psl.pos.lat.unused.eq.south)] +#psl.lat.used <- round(MSLP$lat[-c(1:psl.n.lat.unused.poles,(psl.n.lat-psl.n.lat.unused.poles):psl.n.lat)],3) # latitude values used as central points +#psl.lon.used <- round(MSLP$lon,3) # longitude values used as central points + +#psl.n.lat.used <- length(psl.lat.used) +#psl.n.lon.used <- length(psl.lon.used) +#psl.n.grid.points <- psl.n.lat.used * psl.n.lon.used + +#if(var.lon.pos && !psl.lon.pos) {ss <- which(psl.lon.used<0); psl.lon.used[ss] <- psl.lon.used[ss] + 360} # convert the negative long of MSLP to the [0, +360] range +#if(!var.lon.pos && psl.lon.pos) {ss <- which(psl.lon.used>180); psl.lon.used[ss] <- psl.lon.used[ss] - 360} # convert the positive long of MSLP > 180 to the [-180, +180] range + +# exlude var points > +~80 degrees lat and < -80 deg. because the Lamb grid was created excluding these points: +var.n.lat.unused.poles <- 17 # number of discarded latitude values near each of the two poles (it depends on the spatial resolutions of the var reanalysis) +var.n.lat.unused.equat <- 17 # numb. of unused latitude values near each side of the equator (must exclude 10 degrees N/S for side, so it depends on the spat.res. of the reanalysis) + +var.pos.lat.eq.north <- tail(which(var$lat >= 0),1) # note than the eventual point at lat=0 is always included +var.pos.lat.eq.south <- head(which(var$lat < 0),1) # note than the eventual point at lat=0 is always excluded +var.pos.lat.unused.eq.north <- (var.pos.lat.eq.north - var.n.lat.unused.equat+1):var.pos.lat.eq.north +var.pos.lat.unused.eq.south <- var.pos.lat.eq.south:(var.pos.lat.eq.south + var.n.lat.unused.equat - 1) +var.pos.lat.unused.eq <- c(var.pos.lat.unused.eq.north, var.pos.lat.unused.eq.south) + +#var.lat.used <- var.lat[-c(1:var.n.lat.unused.poles,(var.n.lat-var.n.lat.unused.poles):var.n.lat, var.pos.lat.unused.eq.north, var.pos.lat.unused.eq.south)] # latitude values used as central points + +var.lat.used <- var.lat[-c(1:var.n.lat.unused.poles,(var.n.lat-var.n.lat.unused.poles):var.n.lat)] +var.lon.used <- var.lon # longitude values used as central points +var.n.lat.used <- length(var.lat.used) +var.n.lon.used <- length(var.lon.used) +var.n.grid.points <- var.n.lat.used * var.n.lon.used +#var.lon.used.bis <- c(var.lon.used[c(257:512)]-360,var.lon.used[c(1:256)]) # to put Europe in the middle of the map +var.lon.used.bis <- c(var.lon.used[c(ceiling(var.n.lon.used/2):var.n.lon.used)]-360,var.lon.used[c(1:(ceiling(var.n.lon.used/2)-1))]) # to put Europe in the middle of the map + +#pos.var.lat.unused <- which(is.na( match(var.lat,var.lat.used))) # position inside var.lat of the latitude values not used (those not in var.lat.used) +pos.var.lat.used <- match(var.lat.used, var.lat) # =which(!is.na( match(var.lat,var.lat.used))) # position inside var.lat of the latitude values used (those in var.lat.used) +#var.lat[pos.var.lat.unused] + + +### Map of the central points for Europe: +#void <- array(NA,c(var.n.lat.used, var.n.lon.used)) +###void.bis <- void[,c(257:512,1:256)] # when using the var dataset with res 256x512 +#void.bis <- void[,c(ceiling(var.n.lon.used/2):var.n.lon.used,1:(ceiling(var.n.lon.used/2)-1))] +###PlotEquiMap(void.bis, var.lon.used.bis, var.lat.used, filled.continents = FALSE, brks=seq(0,360,1), intxlon=5, intylat=5, drawleg=F ) # for the debug +###PlotEquiMap(void.bis[1:215,], var.lon.used.bis, var.lat[1:215], filled.continents = FALSE, brks=seq(0,360,1), intxlon=5, intylat=5, drawleg=F ) # for the debug +#par(oma=c(1,1,1,1)) +###PlotEquiMap(void.bis, var.lon.used.bis, var.lat.used, filled.continents = FALSE, brks=seq(0,360,1), intxlon=5, intylat=5, drawleg=F ) # for the debug +#PlotEquiMap(void.bis[25:90, 220:317], var.lon.used.bis[220:317], var.lat[25:90], filled.continents = FALSE, brks=seq(0,360,1), intxlon=5, intylat=5, drawleg=F ) # for the debug +#my.parcelas<-data.frame(name="", lat=rep(var.lat.used,var.n.lon.used), long=rep(var.lon.used.bis, each=var.n.lat.used), pop=0, capital=0, stringsAsFactors=F) +#map.cities(my.parcelas, pch=3, cex=.5,col=c("gray40")) # add the 4 points of the 4 parcelas +#write.table(cbind(lat=rep(var.lat.used,var.n.lon.used), lon=rep(var.lon.used, each=var.n.lat.used)),file=paste0(workdir,"/list_lat_lon.txt"),row.names=FALSE, col.names=TRUE, quote=FALSE, sep=" ") + +# for each var grid point used, find the lat/lon position of the nearby point center of the WT classification: +closer.psl.lat <- closer.psl.lon <- array(NA, c(var.n.lat.used, var.n.lon.used)) # 2 matrices with the lat and lon position of the nearest MSLP grid point + +i<-0;vlat.pos <- 0 +for (vlat in var.lat.used) { + vlat.pos<-vlat.pos+1 + + vlon.pos<-0 + for(vlon in var.lon.used) { + i<-i+1 + cat(paste0("Point #",i,"/", var.n.grid.points), '\r') + vlon.pos<-vlon.pos+1 + + closer.psl.pos <- nearest(vlat, vlon, psl.lat.used, psl.lon.used) + closer.psl.lat[vlat.pos, vlon.pos] <- psl.lat.used[closer.psl.pos[1]] + closer.psl.lon[vlat.pos, vlon.pos] <- psl.lon.used[closer.psl.pos[2]] + + #print(paste0("vlat=",vlat, " vlat.pos=",vlat.pos, " vlon=",vlon, " vlon.pos=",vlon.pos, " closer.psl.lat.pos=", closer.psl.pos[1], " closer.psl.lon.pos=",closer.psl.pos[2], " closer.psl.lat=", psl.lat.used[closer.psl.pos[1]], " closer.psl.lon=", psl.lon.used[closer.psl.pos[2]] )) # for the debug + + } +} + +save(closer.psl.lat, closer.psl.lon, file=paste0(workdir,"/closer_psl_",var.name,".RData")) # save it if it is the first time +load(file=paste0(workdir,"/closer_psl_",var.name,".RData")) # load it if already saved + +#PlotEquiMap(closer.psl.lon, var.lon.used, var.lat.used, filled.continents = FALSE, brks=seq(0,360,1) ) # for the debug +#PlotEquiMap(closer.psl.lat, var.lon.used, var.lat.used, filled.continents = FALSE, brks=seq(-90,90,1) ) # for the debug + +# for each var grid point used, load 1 year of daily var and wt data at time, and assign it to the closer mslp central point: +# (you can't open all data at once because it weights too much, but you can open 1 year at time) +chunk <- split.array(dimensions=c(n.days.in.a.yearly.period(year.start,year.end), var.n.lat.used, var.n.lon.used), along=3) +save(chunk, file=paste0(workdir,"/chunk_",var.name,".RData")) # save the chunk to retrieve it later or you run the next loop for 1 year only ('year.end' changes, so 'chunk' changes too) +load(paste0(workdir,"/chunk_",var.name,".RData")) + +i=0 +for(y in year.start:year.end){ + #y<-1981 # for the debug + i<-i+1 + print(paste0("Year #",i,"/", year.tot)) + + n.days <- n.days.in.a.year(y) + if(partial.end==TRUE && y==year.end) n.days<-n.days.last # the last year can have a lower number of days + + var <- Load(var.name, NULL, list(var.rean), paste0(y,'0101'), storefreq = 'daily', leadtimemax = n.days, output = 'lonlat') # load var data for the year y only + #PlotEquiMap(var$obs[1, 1, 1, 1, , ], var.lon, var.lat, filled.continents = FALSE) + + # open the ff binary files with all the WT classification for that year: + if(file.exists(paste0(workdir,"/",paste0(y,"_RData/WTs1_",y,".RData")))) ffload(file=paste0(workdir,"/",paste0(y,"_RData/WTs1_",y)), overwrite=TRUE) + if(file.exists(paste0(workdir,"/",paste0(y,"_RData/WTs2_",y,".RData")))) ffload(file=paste0(workdir,"/",paste0(y,"_RData/WTs2_",y)), overwrite=TRUE) + if(file.exists(paste0(workdir,"/",paste0(y,"_RData/WTs3_",y,".RData")))) ffload(file=paste0(workdir,"/",paste0(y,"_RData/WTs3_",y)), overwrite=TRUE) + if(file.exists(paste0(workdir,"/",paste0(y,"_RData/WTs4_",y,".RData")))) ffload(file=paste0(workdir,"/",paste0(y,"_RData/WTs4_",y)), overwrite=TRUE) + if(file.exists(paste0(workdir,"/",paste0(y,"_RData/WTs5_",y,".RData")))) ffload(file=paste0(workdir,"/",paste0(y,"_RData/WTs5_",y)), overwrite=TRUE) + if(file.exists(paste0(workdir,"/",paste0(y,"_RData/WTs6_",y,".RData")))) ffload(file=paste0(workdir,"/",paste0(y,"_RData/WTs6_",y)), overwrite=TRUE) + if(file.exists(paste0(workdir,"/",paste0(y,"_RData/WTs7_",y,".RData")))) ffload(file=paste0(workdir,"/",paste0(y,"_RData/WTs7_",y)), overwrite=TRUE) + if(file.exists(paste0(workdir,"/",paste0(y,"_RData/WTs8_",y,".RData")))) ffload(file=paste0(workdir,"/",paste0(y,"_RData/WTs8_",y)), overwrite=TRUE) + if(file.exists(paste0(workdir,"/",paste0(y,"_RData/WTs9_",y,".RData")))) ffload(file=paste0(workdir,"/",paste0(y,"_RData/WTs9_",y)), overwrite=TRUE) + if(file.exists(paste0(workdir,"/",paste0(y,"_RData/WTs10_",y,".RData")))) ffload(file=paste0(workdir,"/",paste0(y,"_RData/WTs10_",y)), overwrite=TRUE) + + var.serie <- wt.serie <- array(NA,c(var.n.lat.used, var.n.lon.used, n.days)) # arrays where to store the var and wt daily data of year y for all points + + j<-0 + vlat.pos <- 0 + for (vlat in var.lat.used) { + #vlat<-var.lat.used[1]; vlon<-var.lon.used[1] # for the debug + j<-j+1 + vlat.pos <- vlat.pos+1 + cat('Latitude ',j,'/',var.n.lat.used,'\r') + + vlon.pos <- 0 + for(vlon in var.lon.used) { + vlon.pos <- vlon.pos+1 + + var.serie[vlat.pos, vlon.pos,] <- var$obs[1,1,1,,which(var.lat==vlat),which(var.lon==vlon)] + + #WT <- read.table(file=paste0(workdir,"/","10WTs_",psl.rean,"_",year.start,"-",year.end,"_lat_",closer.psl.lat[vlat.pos,vlon.pos],"_lon_",closer.psl.lon[vlat.pos,vlon.pos],".txt"), header=TRUE, sep=" ", stringsAsFactors=FALSE, row.names=NULL) + + # load WTs classifications (variable WTs) for that year created with WT_vX.R: + #load(paste0(workdir,"/",y,"_Rdata/","10WTs_",psl.rean,"_year_",y,"_lat_",closer.psl.lat[vlat.pos,vlon.pos],"_lon_",closer.psl.lon[vlat.pos,vlon.pos],".RData")) + open(eval(parse(text=paste0("WTs_lat_", closer.psl.lat[vlat.pos,vlon.pos],"_lon_", closer.psl.lon[vlat.pos,vlon.pos])))) + #open(eval(parse(text=gsub("-","m",paste0("WTs_lat_", closer.psl.lat[vlat.pos,vlon.pos],"_lon_", closer.psl.lon[vlat.pos,vlon.pos]))))) + + + # interval of days belonging to the year y, but starting to count from the year year.begin: + #seq.days.year <- n.days.in.a.yearly.period(year.start,y) - n.days.in.a.yearly.period(year.start,year.start) + 1:n.days.in.a.year(y) + WTs <- as.ram(eval(parse(text=paste0("WTs_lat_", closer.psl.lat[vlat.pos,vlon.pos],"_lon_", closer.psl.lon[vlat.pos,vlon.pos])))) + wt.serie[vlat.pos, vlon.pos,] <- WTs #[seq.days.year] #,4] # extract only the days of the year y + close(eval(parse(text=paste0("WTs_lat_", closer.psl.lat[vlat.pos,vlon.pos],"_lon_", closer.psl.lon[vlat.pos,vlon.pos])))) + rm(WTs) + } + + } + + #PlotEquiMap(var.serie[,,1], var.lon, var.lat.used, filled.continents = FALSE) # for the debug + + #save(var.serie, file=paste0(workdir,"/local_",var.name,"_",psl.rean,"_year_",y,".RData"), compress=FALSE) + #save(wt.serie, file=paste0(workdir,"/local_","10WTs_",psl.rean,"_year_",y,".RData"), compress=FALSE) + + #year=4000 + #ffload(file=paste0(workdir,"/WTs_",year)) + #latc <- 70 + #lonc <- 0 + #open(eval(parse(text=paste0("WTs_lat_",latc,"_lon_", lonc)))) # do.call doesn't work with open, must + #close(eval(parse(text=paste0("WTs_lat_",latc,"_lon_", lonc)))) # do.call doesn't work with open, must use eval(parse(text=...))) + + + # instead of saving only 1 file for all the spatial domain, save 1 file for each chunk c: (to be able to load them faster in the following step) + if(!dir.exists(paste0(workdir,"/",var.name,"_chunk"))) dir.create(paste0(workdir,"/",var.name,"_chunk")) + if(!dir.exists(paste0(workdir,"/wt_chunk"))) dir.create(paste0(workdir,"/wt_chunk")) + + for(c in 1:chunk$n.chunk){ + var.serie.year.chunk<-var.serie[,chunk$int[[c]],] # format: [lat, lon, day] + save(var.serie.year.chunk, file=paste0(workdir,"/",var.name,"_chunk/local_",var.name,"_",psl.rean.name,"_year_",y,"_chunk_",c,".RData"), compress=FALSE) + + wt.serie.year.chunk <- wt.serie[,chunk$int[[c]],] + save(wt.serie.year.chunk, file=paste0(workdir,"/",var.name,"_wt_chunk/local_","10WTs_",psl.rean.name,"_year_",y,"_chunk_",c,".RData"), compress=FALSE) + # n.wt10<-apply(wt.serie.year.chunk,c(1,2),function(x)length(which(x==18))) # for the debug + # edit(n.wt10) # for the debug + } + +} # close for on y + +#rm(closer.psl.lat, closer.psl.lon, var) + + +# compute the climatology of var for each period and its daily anomalies for each chunk and period: +var.clim <- array(NA, c(n.periods, var.n.lat.used, var.n.lon.used)) + +n.days.tot <- n.days.in.a.yearly.period(year.start,year.end) +#n.days.tot <- 365 # for the debug + +days.period <- n.days.period <- list() +for(p in periods){ + days.period[[p]] <- NA + for(y in year.start:year.end) days.period[[p]] <- c(days.period[[p]], n.days.in.a.future.year(year.start, y) + pos.period(y,p)) + days.period[[p]] <- days.period[[p]][-1] # remove the NA at the begin that was introduced only to be able to execture the above command + n.days.period[[p]] <- length(days.period[[p]]) +} + +for(c in 1:chunk$n.chunk){ + #c=1 # for the debug + cat(paste0("Computing chunk n. ", c,"/", chunk$n.chunk),'\r') + + if(c == chunk$n.chunk) { + var.serie.chunk <- array(NA, c(var.n.lat.used, chunk$chunk.size.last, n.days.tot)) + } else { + var.serie.chunk <- array(NA, c(var.n.lat.used, chunk$chunk.size, n.days.tot)) + } + + for(y in year.start:year.end){ + load(file=paste0(workdir,"/",var.name,"_chunk/local_",var.name,"_",psl.rean.name,"_year_",y,"_chunk_",c,".RData")) # load var.serie.year.chunk + int.days.year <- seq.days.in.a.future.year(year.start,y) + + var.serie.chunk[,,int.days.year] <- var.serie.year.chunk + rm(var.serie.year.chunk) + } + + for(p in periods){ + var.serie.chunk.period <- var.serie.chunk[,,days.period[[p]]] # select only the days in the chosen period + + var.clim.chunk <- apply(var.serie.chunk.period,c(1,2),mean, na.rm=T) + + var.anom.chunk.period <- var.serie.chunk.period - InsertDim(var.clim.chunk, 3, n.days.period[[p]]) + var.clim[p,,chunk$int[[c]]] <- var.clim.chunk + + # save the var anomaly, one file for each chunk and period: + assign(paste0("var.anom.chunk.period",p), var.anom.chunk.period) + + # save(, file=paste0(workdir,"/",var.name,"_chunk/local_",var.name,"_anomaly_",psl.rean,"_",year.start,"-",year.end,"_chunk_",c,"_period_",p,".RData"), compress=FALSE) + # save not working in this case, use do.call below: + do.call(save, list(paste0("var.anom.chunk.period",p), file=paste0(workdir,"/",var.name,"_chunk/local_",var.name,"_anomaly_",psl.rean.name,"_",year.start,"-",year.end,"_chunk_",c,"_period_",p,".RData"), compress=FALSE)) # you must use this syntax when saving an Rdata with the variable name given by a string!!! + + rm(var.anom.chunk.period,var.serie.chunk.period) + do.call(rm, list(paste0("var.anom.chunk.period",p))) + } + + rm(var.serie.chunk) +} + +save(var.clim, file=paste0(workdir,"/",var.name,"_chunk/local_",var.name,"_climatology_",psl.rean.name,"_",year.start,"-",year.end,".RData"), compress=FALSE) # save var.clim +load(paste0(workdir,"/",var.name,"_chunk/local_",var.name,"_climatology_",psl.rean.name,"_",year.start,"-",year.end,".RData")) + +# do the same as above, but for the wt: +wts <- c(1:10) # weather type numbers inside wt.serie +n.wts<-length(wts) +n.wt <- var.wt.sum <- var.wt.mean.anom <- var.wt.sd.anom <- array(NA, c(n.periods, n.wts, var.n.lat.used, var.n.lon.used)) +#vlat.mat <- vlon.mat <- array(NA, c(var.n.lat.used, var.n.lon.used)) # for the debug + +for(c in 1:chunk$n.chunk){ + #s=1;y=year.start # for the debug + cat(paste0("Computing chunk n. ", c,"/", chunk$n.chunk),'\r') + + if(c == chunk$n.chunk) { + var.serie.chunk <- array(NA, c(var.n.lat.used, chunk$chunk.size.last, n.days.tot)) + wt.serie.chunk <- array(NA, c(var.n.lat.used, chunk$chunk.size.last, n.days.tot)) + n.wt.chunk <- var.wt.sum.chunk <- var.wt.mean.anom.chunk <- var.wt.sd.anom.chunk <- array(NA,c(n.periods, n.wts, var.n.lat.used, chunk$chunk.size.last)) + } else { + var.serie.chunk <- array(NA, c(var.n.lat.used, chunk$chunk.size, n.days.tot)) + wt.serie.chunk <- array(NA, c(var.n.lat.used, chunk$chunk.size, n.days.tot)) + n.wt.chunk <- var.wt.sum.chunk <- var.wt.mean.anom.chunk <- var.wt.sd.anom.chunk <- array(NA,c(n.periods, n.wts, var.n.lat.used, chunk$chunk.size)) + } + + for(y in year.start:year.end){ + load(file=paste0(workdir,"/",var.name,"_chunk/local_",var.name,"_",psl.rean.name,"_year_",y,"_chunk_",c,".RData")) # load var.serie.year.chunk + int.days.year <- seq.days.in.a.future.year(year.start,y) + var.serie.chunk[,,int.days.year] <- var.serie.year.chunk + rm(var.serie.year.chunk) + } + + + for(y in year.start:year.end){ + load(file=paste0(workdir,"/",var.name,"_wt_chunk/local_","10WTs_",psl.rean.name,"_year_",y,"_chunk_",c,".RData")) # load wt.serie.year.chunk + + wt.serie.year.chunk[which(wt.serie.year.chunk==18)]<-10 # rename the A type from number 18 to number 10, to have only the numbers from 1 to 10 + int.days.year <- seq.days.in.a.future.year(year.start,y) + + wt.serie.chunk[,,int.days.year] <- wt.serie.year.chunk #[,chunk$int[[c]],] + rm(wt.serie.year.chunk) + } + + for(p in periods){ + # load var.anom.chunk.period + load(file=paste0(workdir,"/",var.name,"_chunk/local_",var.name,"_anomaly_",psl.rean.name,"_",year.start,"-",year.end,"_chunk_",c,"_period_",p,".RData")) + var.serie.chunk.period <- var.serie.chunk[,,days.period[[p]]] + wt.serie.chunk.period <- wt.serie.chunk[,,days.period[[p]]] + + vlat.pos <- 0 + for (vlat in var.lat.used) { + #vlat<-var.lat.used[1]; vlon<-var.lon.used[1]; wt=1 # for the debug + vlat.pos <- vlat.pos + 1 + #cat('Latitude ',vlat.pos,'/',var.n.lat.used,'\r') + + vlon.pos <- 0 #chunk$int[[c]]-1 + for(vlon in var.lon.used[chunk$int[[c]]]) { + vlon.pos <- vlon.pos + 1 + for(wt in wts){ + pos.wt <- which(wt.serie.chunk.period[vlat.pos,vlon.pos,]==wt) + var.anom.chunk.period <- get(paste0("var.anom.chunk.period",p)) + + n.wt.chunk[p, wt, vlat.pos, vlon.pos] <- length(pos.wt) + var.wt.sum.chunk[p, wt, vlat.pos, vlon.pos] <- sum(var.serie.chunk.period[vlat.pos,vlon.pos,][pos.wt]) # only for wind and prec + var.wt.mean.anom.chunk[p, wt, vlat.pos, vlon.pos] <- mean(var.anom.chunk.period[vlat.pos,vlon.pos,][pos.wt]) + var.wt.sd.anom.chunk[p, wt, vlat.pos, vlon.pos] <- sd(var.anom.chunk.period[vlat.pos,vlon.pos,][pos.wt]) + + rm(pos.wt, var.anom.chunk.period) + } + } + } + + n.wt[p,,,chunk$int[[c]]] <- n.wt.chunk[p,,,] + var.wt.sum[p,,,chunk$int[[c]]] <- var.wt.sum.chunk[p,,,] + var.wt.mean.anom[p,,,chunk$int[[c]]] <- var.wt.mean.anom.chunk[p,,,] + var.wt.sd.anom[p,,,chunk$int[[c]]] <- var.wt.sd.anom.chunk[p,,,] + } # close for on p +} + +save(n.wt, var.wt.sum, var.wt.mean.anom, var.wt.sd.anom.chunk, file=paste0(workdir,"/",var.name,"_chunk/output_",psl.rean.name,"_",year.start,"-",year.end,".RData"), compress=FALSE) +load(file=paste0(workdir,"/",var.name,"_chunk/output_",psl.rean.name,"_",year.start,"-",year.end,".RData")) + +#if(var.name=='prlr'){ # if it is not in m/s (as in ERA-Interim) but it is in kg/m2/s, you must multiply for 86400 to convert to mm/day +# var.clim <- var.clim * 86400 +# var.wt.sum <- var.wt.sum * 86400 +# var.wt.mean.anom <- var.wt.mean.anom * 86400 +# var.wt.sd.anom <- var.wt.sd.anom * 86400 +#} + +var.wt.mean.contrib <- array(NA, c(n.periods, n.wts, var.n.lat.used, var.n.lon.used)) + +var.wt.mean <- var.wt.sum/n.wt # average value of var associated to a WT +#n.wt <- 100*n.wt/n.days.tot # convert to yearly frequency in % +for(p in periods) n.wt[p,,,] <- 100*n.wt[p,,,]/n.days.period[[p]] # convert to yearly frequency in % +for(p in periods) var.wt.mean.contrib[p,,,] <- 100*var.wt.sum[p,,,]/(InsertDim(var.clim[p,,],1,n.wts)*n.days.period[[p]]) # convert the sum to a % contribution + +var.clim.NA <- array(NA, c(n.periods, var.n.lat, var.n.lon.used)) +n.wt.NA <- var.wt.mean.contrib.NA <- var.wt.sum.NA <- var.wt.mean.anom.NA <- var.wt.mean.NA <- var.wt.sd.anom.NA <- array(NA, c(n.periods, n.wts, var.n.lat, var.n.lon.used)) + +var.clim.NA[,pos.var.lat.used,] <- var.clim +n.wt.NA[,,pos.var.lat.used,] <- n.wt +var.wt.mean.NA[,,pos.var.lat.used,] <- var.wt.mean +var.wt.sum.NA[,,pos.var.lat.used,] <- var.wt.sum +var.wt.mean.anom.NA[,,pos.var.lat.used,] <- var.wt.mean.anom +var.wt.mean.contrib.NA[,,pos.var.lat.used,] <- var.wt.mean.contrib +var.wt.sd.anom.NA[,,pos.var.lat.used,] <- var.wt.sd.anom + +# remove the equatorial area from visualization: +n.wt.NA[,,var.pos.lat.unused.eq,]<- var.wt.mean.NA[,,var.pos.lat.unused.eq,] <- var.wt.sum.NA[,,var.pos.lat.unused.eq,] <- var.wt.mean.contrib.NA[,,var.pos.lat.unused.eq,] <- NA +var.wt.mean.anom.NA[,,var.pos.lat.unused.eq,] <- var.wt.sd.anom.NA[,,var.pos.lat.unused.eq,] <- NA + +# move Europe to the center of the maps: +p1 <- ceiling(var.n.lon.used/2)-1 +p2 <- ceiling(var.n.lon.used/2) +p3 <- var.n.lon.used + +var.clim.NA.bis <- var.clim.NA[,,c(p2:p3,1:p1)] +n.wt.NA.bis <- n.wt.NA[,,,c(p2:p3,1:p1)] +var.wt.mean.NA.bis <- var.wt.mean.NA[,,,c(p2:p3,1:p1)] +var.wt.mean.contrib.NA.bis <- var.wt.mean.contrib.NA[,,,c(p2:p3,1:p1)] +var.wt.mean.anom.NA.bis <- var.wt.mean.anom.NA[,,,c(p2:p3,1:p1)] +var.wt.sd.anom.NA.bis <- var.wt.sd.anom.NA[,,,c(p2:p3,1:p1)] +gc() + +# Map intervals and colors: +my.brks <- list() + +#my.brks[[1]] <- c(0,seq(1.5,9,0.5),15) # Wind speed Climatology +my.brks[[1]] <- c(0,seq(1.05,10,0.05),15) # Wind speed Climatology +my.brks[[1]] <- c(-50,-40,-30,seq(-20,30,0.05),40,50) # Surface Temperature Climatology +my.brks[[1]] <- c(0,seq(0.05,10,0.05),15) # Precipitation Climatology + +#my.brks[[2]] <- c(-10,seq(-4.95,5,0.05),10) # Wind Speed Anomaly associated to a WT +my.brks[[2]] <- c(seq(0,10,0.1),100) # Mean wind speed in m/s associated to a WT +my.brks[[3]] <- c(seq(0,20,0.1),100) # Frequency associated to a WT +my.brks[[4]] <- c(seq(0,30,0.1),100) # % Contribution of a WT to total var +my.brks[[5]] <- c(seq(-10,-3,1),seq(-2.3,2.3,0.1),seq(3,10,1)) # % Mean anomaly of a WT for wind speed +my.brks[[5]] <- c(-50,seq(-10,-3,1),seq(-2.9,2.9,0.1),seq(3,10,1),50) # % Mean anomaly of a WT for temperature +my.brks[[5]] <- c(-100000,seq(-100,0,1),seq(1,100,1),100000) # % Mean anomaly of a WT for precipitation + + +my.brks[[6]] <- c(seq(0,3,0.1),10) # Standard deviation of the anomalies pf a WT + +my.cols <- list() +#my.cols[[index]] <- colorRampPalette(my.palette[[index]])(length(my.brks[[index]])-1) +#my.cols[[1]] <- colorRampPalette(as.character(read.csv("/home/Earth/vtorralb/rgbhex.csv",header=F)[,1]))(length(my.brks[[1]])-1) # blue-green-white-yellow-red colors +#my.cols[[1]] <- colorRampPalette(c("white","yellow","orange","red","darkred"))(length(my.brks[[1]])-1) # blue-yellow-red colors +#my.cols[[1]] <- colorRampPalette(c("white","white","green","yellow","orange","red","darkred"))(length(my.brks[[1]])-1) # blue-yellow-red colors +#my.cols[[1]] <- colorRampPalette(c("blue","white","darkred"))(length(my.brks[[1]])-1) # blue-yellow-red colors +#my.cols[[1]] <- colorRampPalette(c("blue","green","yellow","red","brown","violetred4"))(length(my.brks[[1]])-1) # blue-yellow-red colors +#my.cols[[1]] <- colorRampPalette(c("white","cyan2","gold","orange","red","brown","brown4","deeppink4"))(length(my.brks[[1]])-1) # blue-yellow-red colors +my.cols[[1]] <- colorRampPalette(c("white","gray","cyan2","blue","deeppink4"))(length(my.brks[[1]])-1) # white-gray-blue-purple colores for wind speed +my.cols[[1]] <- colorRampPalette(as.character(read.csv("/home/Earth/vtorralb/rgbhex.csv",header=F)[,1]))(length(my.brks[[1]])-1) # blue-yellow-red colors + +#my.cols[[2]] <- colorRampPalette(c("deeppink4","darkblue","blue","white","red","darkred","brown4"))(length(my.brks[[2]])-1) # blue-white-red colors +my.cols[[2]] <- colorRampPalette(as.character(read.csv("/home/Earth/vtorralb/rgbhex.csv",header=F)[,1]))(length(my.brks[[2]])-1) # blue--yellow-red colors +my.cols[[3]] <- colorRampPalette(as.character(read.csv("/home/Earth/vtorralb/rgbhex.csv",header=F)[,1]))(length(my.brks[[3]])-1) # blue-yellow-red colors +#my.cols[[4]] <- colorRampPalette(c("white","cyan2","blue","deeppink4"))(length(my.brks[[4]])-1) # blue-yellow-red colors +my.cols[[4]] <- colorRampPalette(as.character(read.csv("/home/Earth/vtorralb/rgbhex.csv",header=F)[,1]))(length(my.brks[[4]])-1) # blue--yellow-red colors + +my.cols[[5]] <- c(colorRampPalette(c("dodgerblue3","white"))(length(my.brks[[5]])-1), colorRampPalette(c("white","darkorchid4"))(length(my.brks[[5]])-1)) +my.cols[[5]] <- my.cols[[5]][seq(1, length(my.cols[[5]]),2)] +my.cols[[5]] <- colorRampPalette(as.character(read.csv("/home/Earth/vtorralb/rgbhex.csv",header=F)[,1]))(length(my.brks[[5]])-1) # blue--yellow-red colors for temper +my.cols[[5]] <- c(colorRampPalette(c("blue","white"))(length(my.brks[[5]])-1), colorRampPalette(c("white","darkred"))(length(my.brks[[5]])-1)) # blue-white-red for prec +my.cols[[5]] <- my.cols[[5]][seq(1, length(my.cols[[5]]),2)] +#my.cols[[5]] <- colorRampPalette(c("white","blue","purple"))(length(my.brks[[5]])-1) + +my.cols[[6]] <- colorRampPalette(as.character(read.csv("/home/Earth/vtorralb/rgbhex.csv",header=F)[,1]))(length(my.brks[[6]])-1) # blue--yellow-red colors + + + +# Create and save maps: + +index=1 +#p=17 # for the debug + +for(p in periods){ + png(filename=paste0(mapdir,"/",var.name.file,"_Climatology_",period.name[p],".png"),width=1000,height=700) + + layout(matrix(c(rep(1,10),2), 11, 1, byrow = TRUE)) + #layout.show(2) + #PlotEquiMap2(var.clim.NA, var.lon.used, var.lat, filled.continents = FALSE, brks=my.brks[[index]], cols=my.cols[[index]], drawleg=F) + # change longitude in format -180,180 to displey europe in the middle of the map: + #PlotEquiMap(var.clim[p,,], var.lon.used, var.lat.used, filled.continents = FALSE, brks=my.brks[[index]], cols=my.cols[[index]], drawleg=F, colNA="gray") + + # for temperature, you must remove -273 after var.clim.NA.bis[p,,]: + PlotEquiMap(var.clim.NA.bis[p,,], var.lon.used.bis, var.lat, filled.continents = FALSE, brks=my.brks[[index]], cols=my.cols[[index]], drawleg=F, colNA="gray") + ColorBar(my.brks[[index]], cols=my.cols[[index]], vert=FALSE, subsampleg=20, cex=1) + + dev.off() +} + + +index=2 +#p=17 # for the debug +wt=6 + +for(p in periods){ + png(filename=paste0(mapdir,"/Average_10-m_Wind_Speed_WT_", WTs.type10[wt],period.name[p],".png"),width=1000,height=700) + + layout(matrix(c(rep(1,10),2), 11, 1, byrow = TRUE)) + #PlotEquiMap(var.wt.mean.NA[wt,,], var.lon.used, var.lat, filled.continents = FALSE, brks=my.brks[[index]], cols=my.cols[[index]], drawleg=F, colNA="gray") + #PlotEquiMap(var.wt.mean.NA[wt,,100:200], var.lon.used[100:200], var.lat, filled.continents = FALSE, brks=my.brks[[index]], cols=my.cols[[index]], drawleg=F, colNA="gray") + PlotEquiMap(var.wt.mean.NA.bis[p,wt,,], var.lon.used.bis, var.lat, filled.continents = FALSE, brks=my.brks[[index]], cols=my.cols[[index]], drawleg=F, colNA="gray") + #PlotEquiMap(var.wt.mean.NA.bis[wt,,1:50], head(var.lon.used.bis,50), var.lat, filled.continents = FALSE, brks=my.brks[[index]], cols=my.cols[[index]], drawleg=F, colNA="gray") + ColorBar(my.brks[[index]], cols=my.cols[[index]], vert=FALSE, subsampleg=10, cex=1) + + dev.off() +} + + +index=3 +p=17 # for the debug +wt=6 + +png(filename=paste0(mapdir,"/Frequency_WT_", WTs.type10[wt],".png"),width=1000,height=700) + +layout(matrix(c(rep(1,10),2), 11, 1, byrow = TRUE)) +#PlotEquiMap(n.wt.NA[wt,,], var.lon.used, var.lat, filled.continents = FALSE, brks=my.brks[[index]], cols=my.cols[[index]], drawleg=F) +#PlotEquiMap(n.wt.NA[wt,,1:18], tail(var.lon.used,18), var.lat, filled.continents = FALSE, brks=my.brks[[index]], cols=my.cols[[index]], drawleg=F) +#n.wt.NA[wt,50:55,490:512] +#ColorBar(my.brks[[index]], cols=my.cols[[index]], vert=FALSE, subsampleg=5, cex=1) +#n.wt10<-apply(wt.serie.,c(1,2),function(x)length(which(x==18))) # for the debug +#edit(n.wt.NA[10,,]) # for the debug +#layout(matrix(c(rep(1,10),2), 11, 1, byrow = TRUE)) +PlotEquiMap(n.wt.NA.bis[p,wt,,], var.lon.used.bis, var.lat, filled.continents = FALSE, brks=my.brks[[index]], cols=my.cols[[index]], drawleg=F, colNA="gray") +ColorBar(my.brks[[index]], cols=my.cols[[index]], vert=FALSE, subsampleg=5, cex=1) + +dev.off() + + +index=4 +p=17 +wt=6 # for the debug + +for(wt in 1:n.wts){ + png(filename=paste0(mapdir,"/Daily_contribution_of_WT_", WTs.type10[wt],".png"),width=1000,height=700) + + layout(matrix(c(rep(1,10),2), 11, 1, byrow = TRUE)) + PlotEquiMap(var.wt.mean.contrib.NA.bis[p,wt,,], var.lon.used.bis, var.lat, filled.continents = FALSE, brks=my.brks[[index]], cols=my.cols[[index]], + drawleg=F, colNA="gray", toptitle=WTs.type10.name[wt] , sizetit=1) + ColorBar(my.brks[[index]], cols=my.cols[[index]], vert=FALSE, subsampleg=10, cex=1) + + dev.off() +} + + +# Average anomalies associated to a WT: +# WARNING: for temerature, you must remove -273 after var.wt.mean.anom.NA.bis[p,wt,,] to convert to anomalies in degrees +# for precipitation, you must multiply it for 86400 +index=5 +#p=17 # for the debug +#wt=6 # for the debug +slide=FALSE # put TRUE if you wan to print maps for a slide, with bigger titles and legends, false otherwise +europe=TRUE # put TRUE to show only a zoom over Europe; false otherwise +map.only=FALSE # put TRUE if you want to print only the image with no legend or title (for posters) + +domain <- ifelse(europe, "European", "World") + +for(p in periods){ + for(wt in 1:n.wts){ + if(!slide) {n.box <- 10; col.cex=1; my.title <- paste(WTs.type10.name[wt],period.name[p])} else {n.box <- 5; col.cex=3; my.title <- ""} + if(map.only) my.title <- "" + + png(filename=paste0(mapdir,"/",var.name.file,"_Mean_",domain,"_Anomalies_of_WT_", WTs.type10[wt],"_",period.name[p],".png"),width=1000,height=700) + + layout(matrix(c(rep(1,n.box),2), n.box+1, 1, byrow = TRUE)) + if(!europe) PlotEquiMap(var.wt.mean.anom.NA.bis[p,wt,,], var.lon.used.bis, var.lat, filled.continents = FALSE, brks=my.brks[[index]], + cols=my.cols[[index]], drawleg=F, colNA="gray", toptitle=my.title, sizetit=1.5) + if(europe) PlotEquiMap(var.wt.mean.anom.NA.bis[p,wt,25:90,220:310], var.lon.used.bis[220:310], var.lat[25:90], filled.continents = FALSE, brks=my.brks[[index]], + cols=my.cols[[index]], drawleg=F, colNA="gray", toptitle=my.title, sizetit=1.5) + ColorBar(my.brks[[index]], cols=my.cols[[index]], vert=FALSE, subsampleg=2, cex=col.cex) + if(slide && map.only) title(paste0(WTs.type10.name[wt],period.name[p]),cex.main=3.5,outer=T) + + dev.off() + } + +} + + + + +index=6 +wt=6 # for the debug +slide=FALSE # put TRUE if you wan to print maps for a slide, with bigger titles and legends, false otherwise +europe=FALSE # put TRUE to show only a zoom over Europe; false otherwise + +if(!slide) {n.box <- 10; col.cex=1; my.title <- WTs.type10.name[wt]} else {n.box <- 5; col.cex=3; my.title <- ""} +domain <- ifelse(europe, "European", "World") + +for(wt in 1:n.wts){ + png(filename=paste0(mapdir,"/StDev_",domain,"_Anomalies_of_WT_", WTs.type10[wt],".png"),width=1000,height=700) + + layout(matrix(c(rep(1,n.box),2), n.box+1, 1, byrow = TRUE)) + + if(!europe) PlotEquiMap(var.wt.sd.anom.NA.bis[p,wt,,], var.lon.used.bis, var.lat, filled.continents = FALSE, brks=my.brks[[index]], + cols=my.cols[[index]], drawleg=F, colNA="gray", toptitle=my.title, sizetit=1.5) + if(europe) PlotEquiMap(var.wt.sd.anom.NA.bis[p,wt,25:90,220:310], var.lon.used.bis[220:310], var.lat[25:90], filled.continents = FALSE, brks=my.brks[[index]], + cols=my.cols[[index]], drawleg=F, colNA="gray", toptitle=my.title, sizetit=1.5) + ColorBar(my.brks[[index]], cols=my.cols[[index]], vert=FALSE, subsampleg=2, cex=col.cex) + if(slide) title(WTs.type10.name[wt],cex.main=3.5,outer=T) + + dev.off() +} + +#WTs.type<-c("NE","E","SE","S","SW","W","NW","N","C","C.NE","C.E","C.SE","C.S","C.SW","C.W","C.NW","C.N","A","A.NE","A.E","A.SE","A.S","A.SW","A.W","A.NW","A.N") + +save.image(file=paste0(workdir,"/Weather_types",var.name,"_",psl.rean.name,"_",year.start,"-",year.end,".RData"), compress=FALSE) diff --git a/old/WT_drivers_v3.R~ b/old/WT_drivers_v3.R~ new file mode 100644 index 0000000000000000000000000000000000000000..ab66d203b0ce5d67113b17337c8819e11ef9473d --- /dev/null +++ b/old/WT_drivers_v3.R~ @@ -0,0 +1,601 @@ +########################################################################################## +# Relationship between WTs and a chosen climate variable # +########################################################################################## + +library(s2dverification) # for the function Load() +library(ff) + +source('/home/Earth/ncortesi/Downloads/scripts/Rfunctions.R') # for the calendar functions +#Load.path <- '/home/Earth/ncortesi/Downloads/scripts/IC3.conf' # Load() file path for resampled MSLP + +workdir="/scratch/Earth/ncortesi/RESILIENCE/WT" # working dir with the input files with the WT classifications for each MSLP grid point +mapdir="/scratch/Earth/ncortesi/RESILIENCE/WT_maps" # output dir with seasonal and yearly maps + +ERAint <- list(path = '/esnas/reconstructions/ecmwf/eraint/$STORE_FREQ$_mean/$VAR_NAME$_f6h/$VAR_NAME$_$YEAR$$MONTH$.nc') +ERAint2 <- list(path = '/esnas/reconstructions/ecmwf/eraint/$STORE_FREQ$_mean/$VAR_NAME$_s0-12h/$VAR_NAME$_$YEAR$$MONTH$.nc') # only for prec +JRA55 <- list(path = '/esnas/reconstructions/jma/jra-55/$STORE_FREQ$_mean/$VAR_NAME$/$VAR_NAME$_$YEAR$$MONTH$.nc') + +psl.rean=ERAint # choose a daily reanalysis dataset for MSLP data +psl.rean.name="Era-Interim" + +var.rean=ERAint2 #ERAint # daily reanalysis dataset used for the var data; it can have a different resolution of the MSLP dataset +var.name='prlr' #'tas' #'sfcWind' # any daily variable we want to find if it is WTs-driven +var.name.file='Precipitation' #'Surface_Temperature' # '10-m_Wind_Speed' # variable name used in the ouput map's filename + +year.start=1979 # starting year of the MSLP daily data (from the 1st of january) +year.end=2013 #2014 # ending year of the MSLP daily data (up to the 31 of December) + +partial.end=FALSE # put TRUE if the last year ('year.end') has not all the yearly data but stop before December the 31th; in this case, must also specify the variable below +n.days.last=334 # number of days available in the last year (used only if partial.end=TRUE) I.e: data for 2015 doesn't have December, so it has 365-31=334 days + +########################################################################################## + +WTs.type <- c("NE","E","SE","S","SW","W","NW","N","C","C.NE","C.E","C.SE","C.S","C.SW","C.W","C.NW","C.N","A","A.NE","A.E","A.SE","A.S","A.SW","A.W","A.NW","A.N") +WTs.type10<-c("NE","E","SE","S","SW","W","NW","N","C","A") +WTs.type10.name<-c("Northeasterly (NE)","Easterly (E)","Southeasterly (SE)","Southerly (S)","Southwesterly (SW)","Westerly (W)","Northwesterly (NW)", + "Northerly (N)","Cyclonic (C)","Anticyclonic (A)") +year.tot <- year.end - year.start + 1 + +periods=c(1:17) # identify monthly (from 1=Jan to 12=Dec) , seasonal (from 13=winter to 16=Autumn) and yearly=17 values +n.periods <- length(periods) + +# Load just 1 day of var data to detect the number of latitude and longitude points; +#var <- Load(var.name, NULL, var.rean, paste0(year.start,'0101'), storefreq = 'daily', leadtimemax = 1, output = 'lonlat') +var <- Load(var = var.name, exp = NULL, obs = list(var.rean), paste0(year.start,'0101'), storefreq = 'daily', leadtimemax = 1, output = 'lonlat') +var.lat <- round(var$lat,3) +var.lon <- round(var$lon,3) +var.n.lat <- length(var.lat) # number of latitude values of var +var.n.lon <- length(var.lon) +var.lon.pos <- ifelse(min(var.lon>=0), TRUE, FALSE) # set lon.pos to TRUE if the longitude values of the reanalysis grid has no negative values, FALSE if not. + +# Load just 1 day of MSLP data to detect the number of latitude and longitude points of the WT classifications +# we must exlude points > +80 degrees lat and < -80 deg. because the Lamb grid was created excluding these points: +#MSLP <- Load('psl', NULL, psl.rean, paste0(year.start,'0101'), storefreq = 'daily', leadtimemax = 1, output = 'lonlat', configfile = Load.path) +MSLP <- Load(var = 'psl', exp = NULL, obs = list(psl.rean), paste0(year.start,'0101'), storefreq = 'daily', leadtimemax = 1, output = 'lonlat') + +psl.n.lat <- length(MSLP$lat) # number of latitude values or MSLP +psl.n.lon <- length(MSLP$lon) # number of longitude values of MSLP +psl.lon.pos <- ifelse(min(MSLP$lon>=0), TRUE, FALSE) # set lon.pos to TRUE if the longitude values of the reanalysis grid has no negative values, FALSE if not. + +psl.n.lat.unused.poles <- 20 # number of discarded latitude values near each of the two poles (it depends on the spatial resolutions of the MSLP reanalysis) +#psl.n.lat.unused.equat <- 20 # number of unused latitude values near each side of the equator (must exclude 10 degrees N/S for side, so it depends on the spat.res. of the reanalysis) +#psl.pos.lat.eq.north <- tail(which(MSLP$lat >= 0),1) # note than the eventual point at lat=0 is always included +#psl.pos.lat.eq.south <- head(which(MSLP$lat < 0),1) # note than the eventual point at lat=0 is always excluded +#psl.pos.lat.unused.eq.north <- (psl.pos.lat.eq.north - psl.n.lat.unused.poles + 1):psl.pos.lat.eq.north +#psl.pos.lat.unused.eq.south <- psl.pos.lat.eq.south:(psl.pos.lat.eq.south + psl.n.lat.unused.equat - 1) + +# final latitude values used as central points: +#psl.lat.used <- MSLP$lat[-c(1:psl.n.lat.unused.poles,(psl.n.lat-psl.n.lat.unused.poles):psl.n.lat, psl.pos.lat.unused.eq.north, psl.pos.lat.unused.eq.south)] +psl.lat.used <- round(MSLP$lat[-c(1:psl.n.lat.unused.poles,(psl.n.lat-psl.n.lat.unused.poles):psl.n.lat)],3) # latitude values used as central points +psl.lon.used <- round(MSLP$lon,3) # longitude values used as central points + +psl.n.lat.used <- length(psl.lat.used) +psl.n.lon.used <- length(psl.lon.used) +psl.n.grid.points <- psl.n.lat.used * psl.n.lon.used + +if(var.lon.pos && !psl.lon.pos) {ss <- which(psl.lon.used<0); psl.lon.used[ss] <- psl.lon.used[ss] + 360} # convert the negative long of MSLP to the [0, +360] range +if(!var.lon.pos && psl.lon.pos) {ss <- which(psl.lon.used>180); psl.lon.used[ss] <- psl.lon.used[ss] - 360} # convert the positive long of MSLP > 180 to the [-180, +180] range + +# exlude var points > +~80 degrees lat and < -80 deg. because the Lamb grid was created excluding these points: +var.n.lat.unused.poles <- 20 # number of discarded latitude values near each of the two poles (it depends on the spatial resolutions of the var reanalysis) +var.n.lat.unused.equat <- 20 # numb. of unused latitude values near each side of the equator (must exclude 10 degrees N/S for side, so it depends on the spat.res. of the reanalysis) + +var.pos.lat.eq.north <- tail(which(var$lat >= 0),1) # note than the eventual point at lat=0 is always included +var.pos.lat.eq.south <- head(which(var$lat < 0),1) # note than the eventual point at lat=0 is always excluded +var.pos.lat.unused.eq.north <- (var.pos.lat.eq.north - var.n.lat.unused.equat+1):var.pos.lat.eq.north +var.pos.lat.unused.eq.south <- var.pos.lat.eq.south:(var.pos.lat.eq.south + var.n.lat.unused.equat - 1) +var.pos.lat.unused.eq <- c(var.pos.lat.unused.eq.north, var.pos.lat.unused.eq.south) + +#var.lat.used <- var.lat[-c(1:var.n.lat.unused.poles,(var.n.lat-var.n.lat.unused.poles):var.n.lat, var.pos.lat.unused.eq.north, var.pos.lat.unused.eq.south)] # latitude values used as central points + +var.lat.used <- var.lat[-c(1:var.n.lat.unused.poles,(var.n.lat-var.n.lat.unused.poles):var.n.lat)] +var.lon.used <- var.lon # longitude values used as central points +var.n.lat.used <- length(var.lat.used) +var.n.lon.used <- length(var.lon.used) +var.n.grid.points <- var.n.lat.used * var.n.lon.used +#var.lon.used.bis <- c(var.lon.used[c(257:512)]-360,var.lon.used[c(1:256)]) # to put Europe in the middle of the map +var.lon.used.bis <- c(var.lon.used[c(ceiling(var.n.lon.used/2):var.n.lon.used)]-360,var.lon.used[c(1:(ceiling(var.n.lon.used/2)-1))]) # to put Europe in the middle of the map + +#pos.var.lat.unused <- which(is.na( match(var.lat,var.lat.used))) # position inside var.lat of the latitude values not used (those not in var.lat.used) +pos.var.lat.used <- match(var.lat.used, var.lat) # =which(!is.na( match(var.lat,var.lat.used))) # position inside var.lat of the latitude values used (those in var.lat.used) +#var.lat[pos.var.lat.unused] + + +# Map of the central points for Europe: +void <- array(NA,c(var.n.lat.used, var.n.lon.used)) +#void.bis <- void[,c(257:512,1:256)] # when using the var dataset with res 256x512 +void.bis <- void[,c(ceiling(var.n.lon.used/2):var.n.lon.used,1:(ceiling(var.n.lon.used/2)-1))] +#PlotEquiMap(void.bis, var.lon.used.bis, var.lat.used, filled.continents = FALSE, brks=seq(0,360,1), intxlon=5, intylat=5, drawleg=F ) # for the debug +#PlotEquiMap(void.bis[1:215,], var.lon.used.bis, var.lat[1:215], filled.continents = FALSE, brks=seq(0,360,1), intxlon=5, intylat=5, drawleg=F ) # for the debug +par(oma=c(1,1,1,1)) +#PlotEquiMap(void.bis, var.lon.used.bis, var.lat.used, filled.continents = FALSE, brks=seq(0,360,1), intxlon=5, intylat=5, drawleg=F ) # for the debug +PlotEquiMap(void.bis[25:90, 220:317], var.lon.used.bis[220:317], var.lat[25:90], filled.continents = FALSE, brks=seq(0,360,1), intxlon=5, intylat=5, drawleg=F ) # for the debug +my.parcelas<-data.frame(name="", lat=rep(var.lat.used,var.n.lon.used), long=rep(var.lon.used.bis, each=var.n.lat.used), pop=0, capital=0, stringsAsFactors=F) +map.cities(my.parcelas, pch=3, cex=.5,col=c("gray40")) # add the 4 points of the 4 parcelas +write.table(cbind(lat=rep(var.lat.used,var.n.lon.used), lon=rep(var.lon.used, each=var.n.lat.used)),file=paste0(workdir,"/list_lat_lon.txt"),row.names=FALSE, col.names=TRUE, quote=FALSE, sep=" ") + + +# for each var grid point used, find the lat/lon position of the nearby point center of the WT classification: +closer.psl.lat <- closer.psl.lon <- array(NA, c(var.n.lat.used, var.n.lon.used)) # 2 matrices with the lat and lon position of the nearest MSLP grid point + +i<-0;vlat.pos <- 0 +for (vlat in var.lat.used) { + vlat.pos<-vlat.pos+1 + + vlon.pos<-0 + for(vlon in var.lon.used) { + i<-i+1 + cat(paste0("Point #",i,"/", var.n.grid.points), '\r') + vlon.pos<-vlon.pos+1 + + closer.psl.pos <- nearest(vlat, vlon, psl.lat.used, psl.lon.used) + closer.psl.lat[vlat.pos, vlon.pos] <- psl.lat.used[closer.psl.pos[1]] + closer.psl.lon[vlat.pos, vlon.pos] <- psl.lon.used[closer.psl.pos[2]] + + #print(paste0("vlat=",vlat, " vlat.pos=",vlat.pos, " vlon=",vlon, " vlon.pos=",vlon.pos, " closer.psl.lat.pos=", closer.psl.pos[1], " closer.psl.lon.pos=",closer.psl.pos[2], " closer.psl.lat=", psl.lat.used[closer.psl.pos[1]], " closer.psl.lon=", psl.lon.used[closer.psl.pos[2]] )) # for the debug + + } +} + +save(closer.psl.lat, closer.psl.lon, file=paste0(workdir,"/closer_psl_",var.name,".RData")) # save it if it is the first time +load(file=paste0(workdir,"/closer_psl_",var.name,".RData")) # load it if already saved + +#PlotEquiMap(closer.psl.lon, var.lon.used, var.lat.used, filled.continents = FALSE, brks=seq(0,360,1) ) # for the debug +#PlotEquiMap(closer.psl.lat, var.lon.used, var.lat.used, filled.continents = FALSE, brks=seq(-90,90,1) ) # for the debug + +# for each var grid point used, load 1 year of daily var and wt data at time, and assign it to the closer mslp central point: +# (you can't open all data at once because it weights too much, but you can open 1 year at time) +chunk <- split.array(dimensions=c(n.days.in.a.yearly.period(year.start,year.end), var.n.lat.used, var.n.lon.used), along=3) +save(chunk, file=paste0(workdir,"/chunk_",var.name,".RData")) # save the chunk to retrieve it later or you run the next loop for 1 year only ('year.end' changes, so 'chunk' changes too) +load(paste0(workdir,"/chunk_",var.name,".RData")) + +i=0 +for(y in year.start:year.end){ + #y<-1981 # for the debug + i<-i+1 + print(paste0("Year #",i,"/", year.tot)) + + n.days <- n.days.in.a.year(y) + if(partial.end==TRUE && y==year.end) n.days<-n.days.last # the last year can have a lower number of days + + var <- Load(var.name, NULL, list(var.rean), paste0(y,'0101'), storefreq = 'daily', leadtimemax = n.days, output = 'lonlat') # load var data for the year y only + #PlotEquiMap(var$obs[1, 1, 1, 1, , ], var.lon, var.lat, filled.continents = FALSE) + + # open the ff binary files with all the WT classification for that year: + if(file.exists(paste0(workdir,"/",paste0(y,"_RData/WTs1_",y,".RData")))) ffload(file=paste0(workdir,"/",paste0(y,"_RData/WTs1_",y)), overwrite=TRUE) + if(file.exists(paste0(workdir,"/",paste0(y,"_RData/WTs2_",y,".RData")))) ffload(file=paste0(workdir,"/",paste0(y,"_RData/WTs2_",y)), overwrite=TRUE) + if(file.exists(paste0(workdir,"/",paste0(y,"_RData/WTs3_",y,".RData")))) ffload(file=paste0(workdir,"/",paste0(y,"_RData/WTs3_",y)), overwrite=TRUE) + if(file.exists(paste0(workdir,"/",paste0(y,"_RData/WTs4_",y,".RData")))) ffload(file=paste0(workdir,"/",paste0(y,"_RData/WTs4_",y)), overwrite=TRUE) + if(file.exists(paste0(workdir,"/",paste0(y,"_RData/WTs5_",y,".RData")))) ffload(file=paste0(workdir,"/",paste0(y,"_RData/WTs5_",y)), overwrite=TRUE) + if(file.exists(paste0(workdir,"/",paste0(y,"_RData/WTs6_",y,".RData")))) ffload(file=paste0(workdir,"/",paste0(y,"_RData/WTs6_",y)), overwrite=TRUE) + if(file.exists(paste0(workdir,"/",paste0(y,"_RData/WTs7_",y,".RData")))) ffload(file=paste0(workdir,"/",paste0(y,"_RData/WTs7_",y)), overwrite=TRUE) + if(file.exists(paste0(workdir,"/",paste0(y,"_RData/WTs8_",y,".RData")))) ffload(file=paste0(workdir,"/",paste0(y,"_RData/WTs8_",y)), overwrite=TRUE) + if(file.exists(paste0(workdir,"/",paste0(y,"_RData/WTs9_",y,".RData")))) ffload(file=paste0(workdir,"/",paste0(y,"_RData/WTs9_",y)), overwrite=TRUE) + if(file.exists(paste0(workdir,"/",paste0(y,"_RData/WTs10_",y,".RData")))) ffload(file=paste0(workdir,"/",paste0(y,"_RData/WTs10_",y)), overwrite=TRUE) + + var.serie <- wt.serie <- array(NA,c(var.n.lat.used, var.n.lon.used, n.days)) # arrays where to store the var and wt daily data of year y for all points + + j<-0 + vlat.pos <- 0 + for (vlat in var.lat.used) { + #vlat<-var.lat.used[1]; vlon<-var.lon.used[1] # for the debug + j<-j+1 + vlat.pos <- vlat.pos+1 + cat('Latitude ',j,'/',var.n.lat.used,'\r') + + vlon.pos <- 0 + for(vlon in var.lon.used) { + vlon.pos <- vlon.pos+1 + + var.serie[vlat.pos, vlon.pos,] <- var$obs[1,1,1,,which(var.lat==vlat),which(var.lon==vlon)] + + #WT <- read.table(file=paste0(workdir,"/","10WTs_",psl.rean,"_",year.start,"-",year.end,"_lat_",closer.psl.lat[vlat.pos,vlon.pos],"_lon_",closer.psl.lon[vlat.pos,vlon.pos],".txt"), header=TRUE, sep=" ", stringsAsFactors=FALSE, row.names=NULL) + + # load WTs classifications (variable WTs) for that year created with WT_vX.R: + #load(paste0(workdir,"/",y,"_Rdata/","10WTs_",psl.rean,"_year_",y,"_lat_",closer.psl.lat[vlat.pos,vlon.pos],"_lon_",closer.psl.lon[vlat.pos,vlon.pos],".RData")) + open(eval(parse(text=paste0("WTs_lat_", closer.psl.lat[vlat.pos,vlon.pos],"_lon_", closer.psl.lon[vlat.pos,vlon.pos])))) + #open(eval(parse(text=gsub("-","m",paste0("WTs_lat_", closer.psl.lat[vlat.pos,vlon.pos],"_lon_", closer.psl.lon[vlat.pos,vlon.pos]))))) + + + # interval of days belonging to the year y, but starting to count from the year year.begin: + #seq.days.year <- n.days.in.a.yearly.period(year.start,y) - n.days.in.a.yearly.period(year.start,year.start) + 1:n.days.in.a.year(y) + WTs <- as.ram(eval(parse(text=paste0("WTs_lat_", closer.psl.lat[vlat.pos,vlon.pos],"_lon_", closer.psl.lon[vlat.pos,vlon.pos])))) + wt.serie[vlat.pos, vlon.pos,] <- WTs #[seq.days.year] #,4] # extract only the days of the year y + close(eval(parse(text=paste0("WTs_lat_", closer.psl.lat[vlat.pos,vlon.pos],"_lon_", closer.psl.lon[vlat.pos,vlon.pos])))) + rm(WTs) + } + + } + + #PlotEquiMap(var.serie[,,1], var.lon, var.lat.used, filled.continents = FALSE) # for the debug + + #save(var.serie, file=paste0(workdir,"/local_",var.name,"_",psl.rean,"_year_",y,".RData"), compress=FALSE) + #save(wt.serie, file=paste0(workdir,"/local_","10WTs_",psl.rean,"_year_",y,".RData"), compress=FALSE) + + #year=4000 + #ffload(file=paste0(workdir,"/WTs_",year)) + #latc <- 70 + #lonc <- 0 + #open(eval(parse(text=paste0("WTs_lat_",latc,"_lon_", lonc)))) # do.call doesn't work with open, must + #close(eval(parse(text=paste0("WTs_lat_",latc,"_lon_", lonc)))) # do.call doesn't work with open, must use eval(parse(text=...))) + + + # instead of saving only 1 file for all the spatial domain, save 1 file for each chunk c: (to be able to load them faster in the following step) + if(!dir.exists(paste0(workdir,"/",var.name,"_chunk"))) dir.create(paste0(workdir,"/",var.name,"_chunk")) + if(!dir.exists(paste0(workdir,"/wt_chunk"))) dir.create(paste0(workdir,"/wt_chunk")) + + for(c in 1:chunk$n.chunk){ + var.serie.year.chunk<-var.serie[,chunk$int[[c]],] # format: [lat, lon, day] + save(var.serie.year.chunk, file=paste0(workdir,"/",var.name,"_chunk/local_",var.name,"_",psl.rean.name,"_year_",y,"_chunk_",c,".RData"), compress=FALSE) + + wt.serie.year.chunk <- wt.serie[,chunk$int[[c]],] + save(wt.serie.year.chunk, file=paste0(workdir,"/",var.name,"_wt_chunk/local_","10WTs_",psl.rean.name,"_year_",y,"_chunk_",c,".RData"), compress=FALSE) + # n.wt10<-apply(wt.serie.year.chunk,c(1,2),function(x)length(which(x==18))) # for the debug + # edit(n.wt10) # for the debug + } + +} # close for on y + +#rm(closer.psl.lat, closer.psl.lon, var) + + +# compute the climatology of var for each period and its daily anomalies for each chunk and period: +var.clim <- array(NA, c(n.periods, var.n.lat.used, var.n.lon.used)) + +n.days.tot <- n.days.in.a.yearly.period(year.start,year.end) +#n.days.tot <- 365 # for the debug + +days.period <- n.days.period <- list() +for(p in periods){ + days.period[[p]] <- NA + for(y in year.start:year.end) days.period[[p]] <- c(days.period[[p]], n.days.in.a.future.year(year.start, y) + pos.period(y,p)) + days.period[[p]] <- days.period[[p]][-1] # remove the NA at the begin that was introduced only to be able to execture the above command + n.days.period[[p]] <- length(days.period[[p]]) +} + +for(c in 1:chunk$n.chunk){ + #c=1 # for the debug + cat(paste0("Computing chunk n. ", c,"/", chunk$n.chunk),'\r') + + if(c == chunk$n.chunk) { + var.serie.chunk <- array(NA, c(var.n.lat.used, chunk$chunk.size.last, n.days.tot)) + } else { + var.serie.chunk <- array(NA, c(var.n.lat.used, chunk$chunk.size, n.days.tot)) + } + + for(y in year.start:year.end){ + load(file=paste0(workdir,"/",var.name,"_chunk/local_",var.name,"_",psl.rean.name,"_year_",y,"_chunk_",c,".RData")) # load var.serie.year.chunk + int.days.year <- seq.days.in.a.future.year(year.start,y) + + var.serie.chunk[,,int.days.year] <- var.serie.year.chunk + rm(var.serie.year.chunk) + } + + for(p in periods){ + var.serie.chunk.period <- var.serie.chunk[,,days.period[[p]]] # select only the days in the chosen period + + var.clim.chunk <- apply(var.serie.chunk.period,c(1,2),mean, na.rm=T) + + var.anom.chunk.period <- var.serie.chunk.period - InsertDim(var.clim.chunk, 3, n.days.period[[p]]) + var.clim[p,,chunk$int[[c]]] <- var.clim.chunk + + # save the var anomaly, one file for each chunk and period: + assign(paste0("var.anom.chunk.period",p), var.anom.chunk.period) + + # save(, file=paste0(workdir,"/",var.name,"_chunk/local_",var.name,"_anomaly_",psl.rean,"_",year.start,"-",year.end,"_chunk_",c,"_period_",p,".RData"), compress=FALSE) + # save not working in this case, use do.call below: + do.call(save, list(paste0("var.anom.chunk.period",p), file=paste0(workdir,"/",var.name,"_chunk/local_",var.name,"_anomaly_",psl.rean.name,"_",year.start,"-",year.end,"_chunk_",c,"_period_",p,".RData"), compress=FALSE)) # you must use this syntax when saving an Rdata with the variable name given by a string!!! + + rm(var.anom.chunk.period,var.serie.chunk.period) + do.call(rm, list(paste0("var.anom.chunk.period",p))) + } + + rm(var.serie.chunk) +} + +save(var.clim, file=paste0(workdir,"/",var.name,"_chunk/local_",var.name,"_climatology_",psl.rean.name,"_",year.start,"-",year.end,".RData"), compress=FALSE) # save var.clim +load(paste0(workdir,"/",var.name,"_chunk/local_",var.name,"_climatology_",psl.rean.name,"_",year.start,"-",year.end,".RData")) + +# do the same as above, but for the wt: +wts <- c(1:10) # weather type numbers inside wt.serie +n.wts<-length(wts) +n.wt <- var.wt.sum <- var.wt.mean.anom <- var.wt.sd.anom <- array(NA, c(n.periods, n.wts, var.n.lat.used, var.n.lon.used)) +#vlat.mat <- vlon.mat <- array(NA, c(var.n.lat.used, var.n.lon.used)) # for the debug + +for(c in 1:chunk$n.chunk){ + #s=1;y=year.start # for the debug + cat(paste0("Computing chunk n. ", c,"/", chunk$n.chunk),'\r') + + if(c == chunk$n.chunk) { + var.serie.chunk <- array(NA, c(var.n.lat.used, chunk$chunk.size.last, n.days.tot)) + wt.serie.chunk <- array(NA, c(var.n.lat.used, chunk$chunk.size.last, n.days.tot)) + n.wt.chunk <- var.wt.sum.chunk <- var.wt.mean.anom.chunk <- var.wt.sd.anom.chunk <- array(NA,c(n.periods, n.wts, var.n.lat.used, chunk$chunk.size.last)) + } else { + var.serie.chunk <- array(NA, c(var.n.lat.used, chunk$chunk.size, n.days.tot)) + wt.serie.chunk <- array(NA, c(var.n.lat.used, chunk$chunk.size, n.days.tot)) + n.wt.chunk <- var.wt.sum.chunk <- var.wt.mean.anom.chunk <- var.wt.sd.anom.chunk <- array(NA,c(n.periods, n.wts, var.n.lat.used, chunk$chunk.size)) + } + + for(y in year.start:year.end){ + load(file=paste0(workdir,"/",var.name,"_chunk/local_",var.name,"_",psl.rean.name,"_year_",y,"_chunk_",c,".RData")) # load var.serie.year.chunk + int.days.year <- seq.days.in.a.future.year(year.start,y) + var.serie.chunk[,,int.days.year] <- var.serie.year.chunk + rm(var.serie.year.chunk) + } + + + for(y in year.start:year.end){ + load(file=paste0(workdir,"/",var.name,"_wt_chunk/local_","10WTs_",psl.rean.name,"_year_",y,"_chunk_",c,".RData")) # load wt.serie.year.chunk + + wt.serie.year.chunk[which(wt.serie.year.chunk==18)]<-10 # rename the A type from number 18 to number 10, to have only the numbers from 1 to 10 + int.days.year <- seq.days.in.a.future.year(year.start,y) + + wt.serie.chunk[,,int.days.year] <- wt.serie.year.chunk #[,chunk$int[[c]],] + rm(wt.serie.year.chunk) + } + + for(p in periods){ + # load var.anom.chunk.period + load(file=paste0(workdir,"/",var.name,"_chunk/local_",var.name,"_anomaly_",psl.rean.name,"_",year.start,"-",year.end,"_chunk_",c,"_period_",p,".RData")) + var.serie.chunk.period <- var.serie.chunk[,,days.period[[p]]] + wt.serie.chunk.period <- wt.serie.chunk[,,days.period[[p]]] + + vlat.pos <- 0 + for (vlat in var.lat.used) { + #vlat<-var.lat.used[1]; vlon<-var.lon.used[1]; wt=1 # for the debug + vlat.pos <- vlat.pos + 1 + #cat('Latitude ',vlat.pos,'/',var.n.lat.used,'\r') + + vlon.pos <- 0 #chunk$int[[c]]-1 + for(vlon in var.lon.used[chunk$int[[c]]]) { + vlon.pos <- vlon.pos + 1 + for(wt in wts){ + pos.wt <- which(wt.serie.chunk.period[vlat.pos,vlon.pos,]==wt) + var.anom.chunk.period <- get(paste0("var.anom.chunk.period",p)) + + n.wt.chunk[p, wt, vlat.pos, vlon.pos] <- length(pos.wt) + var.wt.sum.chunk[p, wt, vlat.pos, vlon.pos] <- sum(var.serie.chunk.period[vlat.pos,vlon.pos,][pos.wt]) # only for wind and prec + var.wt.mean.anom.chunk[p, wt, vlat.pos, vlon.pos] <- mean(var.anom.chunk.period[vlat.pos,vlon.pos,][pos.wt]) + var.wt.sd.anom.chunk[p, wt, vlat.pos, vlon.pos] <- sd(var.anom.chunk.period[vlat.pos,vlon.pos,][pos.wt]) + + rm(pos.wt, var.anom.chunk.period) + } + } + } + + n.wt[p,,,chunk$int[[c]]] <- n.wt.chunk[p,,,] + var.wt.sum[p,,,chunk$int[[c]]] <- var.wt.sum.chunk[p,,,] + var.wt.mean.anom[p,,,chunk$int[[c]]] <- var.wt.mean.anom.chunk[p,,,] + var.wt.sd.anom[p,,,chunk$int[[c]]] <- var.wt.sd.anom.chunk[p,,,] + } # close for on p +} + +save(n.wt, var.wt.sum, var.wt.mean.anom, var.wt.sd.anom.chunk, file=paste0(workdir,"/",var.name,"_chunk/output_",psl.rean.name,"_",year.start,"-",year.end,".RData"), compress=FALSE) +load(file=paste0(workdir,"/",var.name,"_chunk/output_",psl.rean.name,"_",year.start,"-",year.end,".RData")) + +#if(var.name=='prlr'){ # if it is not in m/s (as in ERA-Interim) but it is in kg/m2/s, you must multiply for 86400 to convert to mm/day +# var.clim <- var.clim * 86400 +# var.wt.sum <- var.wt.sum * 86400 +# var.wt.mean.anom <- var.wt.mean.anom * 86400 +# var.wt.sd.anom <- var.wt.sd.anom * 86400 +#} + +var.wt.mean.contrib <- array(NA, c(n.periods, n.wts, var.n.lat.used, var.n.lon.used)) + +var.wt.mean <- var.wt.sum/n.wt # average value of var associated to a WT +#n.wt <- 100*n.wt/n.days.tot # convert to yearly frequency in % +for(p in periods) n.wt[p,,,] <- 100*n.wt[p,,,]/n.days.period[[p]] # convert to yearly frequency in % +for(p in periods) var.wt.mean.contrib[p,,,] <- 100*var.wt.sum[p,,,]/(InsertDim(var.clim[p,,],1,n.wts)*n.days.period[[p]]) # convert the sum to a % contribution + +var.clim.NA <- array(NA, c(n.periods, var.n.lat, var.n.lon.used)) +n.wt.NA <- var.wt.mean.contrib.NA <- var.wt.sum.NA <- var.wt.mean.anom.NA <- var.wt.mean.NA <- var.wt.sd.anom.NA <- array(NA, c(n.periods, n.wts, var.n.lat, var.n.lon.used)) + +var.clim.NA[,pos.var.lat.used,] <- var.clim +n.wt.NA[,,pos.var.lat.used,] <- n.wt +var.wt.mean.NA[,,pos.var.lat.used,] <- var.wt.mean +var.wt.sum.NA[,,pos.var.lat.used,] <- var.wt.sum +var.wt.mean.anom.NA[,,pos.var.lat.used,] <- var.wt.mean.anom +var.wt.mean.contrib.NA[,,pos.var.lat.used,] <- var.wt.mean.contrib +var.wt.sd.anom.NA[,,pos.var.lat.used,] <- var.wt.sd.anom + +# remove the equatorial area from visualization: +n.wt.NA[,,var.pos.lat.unused.eq,]<- var.wt.mean.NA[,,var.pos.lat.unused.eq,] <- var.wt.sum.NA[,,var.pos.lat.unused.eq,] <- var.wt.mean.contrib.NA[,,var.pos.lat.unused.eq,] <- NA +var.wt.mean.anom.NA[,,var.pos.lat.unused.eq,] <- var.wt.sd.anom.NA[,,var.pos.lat.unused.eq,] <- NA + +# move Europe to the center of the maps: +p1 <- ceiling(var.n.lon.used/2)-1 +p2 <- ceiling(var.n.lon.used/2) +p3 <- var.n.lon.used + +var.clim.NA.bis <- var.clim.NA[,,c(p2:p3,1:p1)] +n.wt.NA.bis <- n.wt.NA[,,,c(p2:p3,1:p1)] +var.wt.mean.NA.bis <- var.wt.mean.NA[,,,c(p2:p3,1:p1)] +var.wt.mean.contrib.NA.bis <- var.wt.mean.contrib.NA[,,,c(p2:p3,1:p1)] +var.wt.mean.anom.NA.bis <- var.wt.mean.anom.NA[,,,c(p2:p3,1:p1)] +var.wt.sd.anom.NA.bis <- var.wt.sd.anom.NA[,,,c(p2:p3,1:p1)] +gc() + +# Map intervals and colors: +my.brks <- list() + +#my.brks[[1]] <- c(0,seq(1.5,9,0.5),15) # Wind speed Climatology +my.brks[[1]] <- c(0,seq(1.05,10,0.05),15) # Wind speed Climatology +my.brks[[1]] <- c(-50,-40,-30,seq(-20,30,0.05),40,50) # Surface Temperature Climatology +my.brks[[1]] <- c(0,seq(0.05,10,0.05),15) # Precipitation Climatology + +#my.brks[[2]] <- c(-10,seq(-4.95,5,0.05),10) # Wind Speed Anomaly associated to a WT +my.brks[[2]] <- c(seq(0,10,0.1),100) # Mean wind speed in m/s associated to a WT +my.brks[[3]] <- c(seq(0,20,0.1),100) # Frequency associated to a WT +my.brks[[4]] <- c(seq(0,30,0.1),100) # % Contribution of a WT to total var +my.brks[[5]] <- c(seq(-10,-3,1),seq(-2.3,2.3,0.1),seq(3,10,1)) # % Mean anomaly of a WT for wind speed +my.brks[[5]] <- c(-50,seq(-10,-3,1),seq(-2.9,2.9,0.1),seq(3,10,1),50) # % Mean anomaly of a WT for temperature +my.brks[[5]] <- c(-100000,seq(-100,0,1),seq(1,100,1),100000) # % Mean anomaly of a WT for precipitation + + +my.brks[[6]] <- c(seq(0,3,0.1),10) # Standard deviation of the anomalies pf a WT + +my.cols <- list() +#my.cols[[index]] <- colorRampPalette(my.palette[[index]])(length(my.brks[[index]])-1) +#my.cols[[1]] <- colorRampPalette(as.character(read.csv("/home/Earth/vtorralb/rgbhex.csv",header=F)[,1]))(length(my.brks[[1]])-1) # blue-green-white-yellow-red colors +#my.cols[[1]] <- colorRampPalette(c("white","yellow","orange","red","darkred"))(length(my.brks[[1]])-1) # blue-yellow-red colors +#my.cols[[1]] <- colorRampPalette(c("white","white","green","yellow","orange","red","darkred"))(length(my.brks[[1]])-1) # blue-yellow-red colors +#my.cols[[1]] <- colorRampPalette(c("blue","white","darkred"))(length(my.brks[[1]])-1) # blue-yellow-red colors +#my.cols[[1]] <- colorRampPalette(c("blue","green","yellow","red","brown","violetred4"))(length(my.brks[[1]])-1) # blue-yellow-red colors +#my.cols[[1]] <- colorRampPalette(c("white","cyan2","gold","orange","red","brown","brown4","deeppink4"))(length(my.brks[[1]])-1) # blue-yellow-red colors +my.cols[[1]] <- colorRampPalette(c("white","gray","cyan2","blue","deeppink4"))(length(my.brks[[1]])-1) # white-gray-blue-purple colores for wind speed +my.cols[[1]] <- colorRampPalette(as.character(read.csv("/home/Earth/vtorralb/rgbhex.csv",header=F)[,1]))(length(my.brks[[1]])-1) # blue-yellow-red colors + +#my.cols[[2]] <- colorRampPalette(c("deeppink4","darkblue","blue","white","red","darkred","brown4"))(length(my.brks[[2]])-1) # blue-white-red colors +my.cols[[2]] <- colorRampPalette(as.character(read.csv("/home/Earth/vtorralb/rgbhex.csv",header=F)[,1]))(length(my.brks[[2]])-1) # blue--yellow-red colors +my.cols[[3]] <- colorRampPalette(as.character(read.csv("/home/Earth/vtorralb/rgbhex.csv",header=F)[,1]))(length(my.brks[[3]])-1) # blue-yellow-red colors +#my.cols[[4]] <- colorRampPalette(c("white","cyan2","blue","deeppink4"))(length(my.brks[[4]])-1) # blue-yellow-red colors +my.cols[[4]] <- colorRampPalette(as.character(read.csv("/home/Earth/vtorralb/rgbhex.csv",header=F)[,1]))(length(my.brks[[4]])-1) # blue--yellow-red colors + +my.cols[[5]] <- c(colorRampPalette(c("dodgerblue3","white"))(length(my.brks[[5]])-1), colorRampPalette(c("white","darkorchid4"))(length(my.brks[[5]])-1)) +my.cols[[5]] <- my.cols[[5]][seq(1, length(my.cols[[5]]),2)] +my.cols[[5]] <- colorRampPalette(as.character(read.csv("/home/Earth/vtorralb/rgbhex.csv",header=F)[,1]))(length(my.brks[[5]])-1) # blue--yellow-red colors for temper +my.cols[[5]] <- c(colorRampPalette(c("blue","white"))(length(my.brks[[5]])-1), colorRampPalette(c("white","darkred"))(length(my.brks[[5]])-1)) # blue-white-red for prec +my.cols[[5]] <- my.cols[[5]][seq(1, length(my.cols[[5]]),2)] +#my.cols[[5]] <- colorRampPalette(c("white","blue","purple"))(length(my.brks[[5]])-1) + +my.cols[[6]] <- colorRampPalette(as.character(read.csv("/home/Earth/vtorralb/rgbhex.csv",header=F)[,1]))(length(my.brks[[6]])-1) # blue--yellow-red colors + + + +# Create and save maps: + +index=1 +#p=17 # for the debug + +for(p in periods){ + png(filename=paste0(mapdir,"/",var.name.file,"_Climatology_",period.name[p],".png"),width=1000,height=700) + + layout(matrix(c(rep(1,10),2), 11, 1, byrow = TRUE)) + #layout.show(2) + #PlotEquiMap2(var.clim.NA, var.lon.used, var.lat, filled.continents = FALSE, brks=my.brks[[index]], cols=my.cols[[index]], drawleg=F) + # change longitude in format -180,180 to displey europe in the middle of the map: + #PlotEquiMap(var.clim[p,,], var.lon.used, var.lat.used, filled.continents = FALSE, brks=my.brks[[index]], cols=my.cols[[index]], drawleg=F, colNA="gray") + + # for temperature, you must remove -273 after var.clim.NA.bis[p,,]: + PlotEquiMap(var.clim.NA.bis[p,,], var.lon.used.bis, var.lat, filled.continents = FALSE, brks=my.brks[[index]], cols=my.cols[[index]], drawleg=F, colNA="gray") + ColorBar(my.brks[[index]], cols=my.cols[[index]], vert=FALSE, subsampleg=20, cex=1) + + dev.off() +} + + +index=2 +#p=17 # for the debug +wt=6 + +for(p in periods){ + png(filename=paste0(mapdir,"/Average_10-m_Wind_Speed_WT_", WTs.type10[wt],period.name[p],".png"),width=1000,height=700) + + layout(matrix(c(rep(1,10),2), 11, 1, byrow = TRUE)) + #PlotEquiMap(var.wt.mean.NA[wt,,], var.lon.used, var.lat, filled.continents = FALSE, brks=my.brks[[index]], cols=my.cols[[index]], drawleg=F, colNA="gray") + #PlotEquiMap(var.wt.mean.NA[wt,,100:200], var.lon.used[100:200], var.lat, filled.continents = FALSE, brks=my.brks[[index]], cols=my.cols[[index]], drawleg=F, colNA="gray") + PlotEquiMap(var.wt.mean.NA.bis[p,wt,,], var.lon.used.bis, var.lat, filled.continents = FALSE, brks=my.brks[[index]], cols=my.cols[[index]], drawleg=F, colNA="gray") + #PlotEquiMap(var.wt.mean.NA.bis[wt,,1:50], head(var.lon.used.bis,50), var.lat, filled.continents = FALSE, brks=my.brks[[index]], cols=my.cols[[index]], drawleg=F, colNA="gray") + ColorBar(my.brks[[index]], cols=my.cols[[index]], vert=FALSE, subsampleg=10, cex=1) + + dev.off() +} + + +index=3 +p=17 # for the debug +wt=6 + +png(filename=paste0(mapdir,"/Frequency_WT_", WTs.type10[wt],".png"),width=1000,height=700) + +layout(matrix(c(rep(1,10),2), 11, 1, byrow = TRUE)) +#PlotEquiMap(n.wt.NA[wt,,], var.lon.used, var.lat, filled.continents = FALSE, brks=my.brks[[index]], cols=my.cols[[index]], drawleg=F) +#PlotEquiMap(n.wt.NA[wt,,1:18], tail(var.lon.used,18), var.lat, filled.continents = FALSE, brks=my.brks[[index]], cols=my.cols[[index]], drawleg=F) +#n.wt.NA[wt,50:55,490:512] +#ColorBar(my.brks[[index]], cols=my.cols[[index]], vert=FALSE, subsampleg=5, cex=1) +#n.wt10<-apply(wt.serie.,c(1,2),function(x)length(which(x==18))) # for the debug +#edit(n.wt.NA[10,,]) # for the debug +#layout(matrix(c(rep(1,10),2), 11, 1, byrow = TRUE)) +PlotEquiMap(n.wt.NA.bis[p,wt,,], var.lon.used.bis, var.lat, filled.continents = FALSE, brks=my.brks[[index]], cols=my.cols[[index]], drawleg=F, colNA="gray") +ColorBar(my.brks[[index]], cols=my.cols[[index]], vert=FALSE, subsampleg=5, cex=1) + +dev.off() + + +index=4 +p=17 +wt=6 # for the debug + +for(wt in 1:n.wts){ + png(filename=paste0(mapdir,"/Daily_contribution_of_WT_", WTs.type10[wt],".png"),width=1000,height=700) + + layout(matrix(c(rep(1,10),2), 11, 1, byrow = TRUE)) + PlotEquiMap(var.wt.mean.contrib.NA.bis[p,wt,,], var.lon.used.bis, var.lat, filled.continents = FALSE, brks=my.brks[[index]], cols=my.cols[[index]], + drawleg=F, colNA="gray", toptitle=WTs.type10.name[wt] , sizetit=1) + ColorBar(my.brks[[index]], cols=my.cols[[index]], vert=FALSE, subsampleg=10, cex=1) + + dev.off() +} + + +# Average anomalies associated to a WT: +# WARNING: for temerature, you must remove -273 after var.wt.mean.anom.NA.bis[p,wt,,] to convert to anomalies in degrees +# for precipitation, you must multiply it for 86400 +index=5 +#p=17 # for the debug +#wt=6 # for the debug +slide=FALSE # put TRUE if you wan to print maps for a slide, with bigger titles and legends, false otherwise +europe=TRUE # put TRUE to show only a zoom over Europe; false otherwise +map.only=FALSE # put TRUE if you want to print only the image with no legend or title (for posters) + +domain <- ifelse(europe, "European", "World") + +for(p in periods){ + for(wt in 1:n.wts){ + if(!slide) {n.box <- 10; col.cex=1; my.title <- paste(WTs.type10.name[wt],period.name[p])} else {n.box <- 5; col.cex=3; my.title <- ""} + if(map.only) my.title <- "" + + png(filename=paste0(mapdir,"/",var.name.file,"_Mean_",domain,"_Anomalies_of_WT_", WTs.type10[wt],"_",period.name[p],".png"),width=1000,height=700) + + layout(matrix(c(rep(1,n.box),2), n.box+1, 1, byrow = TRUE)) + if(!europe) PlotEquiMap(var.wt.mean.anom.NA.bis[p,wt,,], var.lon.used.bis, var.lat, filled.continents = FALSE, brks=my.brks[[index]], + cols=my.cols[[index]], drawleg=F, colNA="gray", toptitle=my.title, sizetit=1.5) + if(europe) PlotEquiMap(var.wt.mean.anom.NA.bis[p,wt,25:90,220:310], var.lon.used.bis[220:310], var.lat[25:90], filled.continents = FALSE, brks=my.brks[[index]], + cols=my.cols[[index]], drawleg=F, colNA="gray", toptitle=my.title, sizetit=1.5) + ColorBar(my.brks[[index]], cols=my.cols[[index]], vert=FALSE, subsampleg=2, cex=col.cex) + if(slide && map.only) title(paste0(WTs.type10.name[wt],period.name[p]),cex.main=3.5,outer=T) + + dev.off() + } + +} + + + + +index=6 +wt=6 # for the debug +slide=FALSE # put TRUE if you wan to print maps for a slide, with bigger titles and legends, false otherwise +europe=FALSE # put TRUE to show only a zoom over Europe; false otherwise + +if(!slide) {n.box <- 10; col.cex=1; my.title <- WTs.type10.name[wt]} else {n.box <- 5; col.cex=3; my.title <- ""} +domain <- ifelse(europe, "European", "World") + +for(wt in 1:n.wts){ + png(filename=paste0(mapdir,"/StDev_",domain,"_Anomalies_of_WT_", WTs.type10[wt],".png"),width=1000,height=700) + + layout(matrix(c(rep(1,n.box),2), n.box+1, 1, byrow = TRUE)) + + if(!europe) PlotEquiMap(var.wt.sd.anom.NA.bis[p,wt,,], var.lon.used.bis, var.lat, filled.continents = FALSE, brks=my.brks[[index]], + cols=my.cols[[index]], drawleg=F, colNA="gray", toptitle=my.title, sizetit=1.5) + if(europe) PlotEquiMap(var.wt.sd.anom.NA.bis[p,wt,25:90,220:310], var.lon.used.bis[220:310], var.lat[25:90], filled.continents = FALSE, brks=my.brks[[index]], + cols=my.cols[[index]], drawleg=F, colNA="gray", toptitle=my.title, sizetit=1.5) + ColorBar(my.brks[[index]], cols=my.cols[[index]], vert=FALSE, subsampleg=2, cex=col.cex) + if(slide) title(WTs.type10.name[wt],cex.main=3.5,outer=T) + + dev.off() +} + +#WTs.type<-c("NE","E","SE","S","SW","W","NW","N","C","C.NE","C.E","C.SE","C.S","C.SW","C.W","C.NW","C.N","A","A.NE","A.E","A.SE","A.S","A.SW","A.W","A.NW","A.N") + +save.image(file=paste0(workdir,"/Weather_types",var.name,"_",psl.rean.name,"_",year.start,"-",year.end,".RData"), compress=FALSE) diff --git a/old/WT_drivers_v4.R b/old/WT_drivers_v4.R new file mode 100644 index 0000000000000000000000000000000000000000..246724ee4e0c4b13e15ceaf7e259e7cc5bc13c26 --- /dev/null +++ b/old/WT_drivers_v4.R @@ -0,0 +1,779 @@ +########################################################################################## +# Relationship between WTs and a chosen climate variable # +########################################################################################## + +library(s2dverification) # for the function Load() + +source('/home/Earth/ncortesi/scripts/Rfunctions.R') # for the calendar functions + +# directory where to find the WT classification +workdir <- "/scratch/Earth/ncortesi/RESILIENCE/WT" + +# reanalysis used for daily var data (must be the same as the mslp data): +var.rean <- list(path = '/esnas/recon/ecmwf/erainterim/$STORE_FREQ$_mean/$VAR_NAME$_f6h/$VAR_NAME$_$YEAR$$MONTH$.nc') + + # any daily variable: +var.name='tas' #'prlr' #'tas' #'sfcWind' +var.name.file='Temperature' #'Precipitation' #'Surface_Temperature' # '10-m_Wind_Speed' # variable name used in the ouput map's filename + +periods=1:17 # identify monthly (from 1=Jan to 12=Dec) , seasonal (from 13=winter to 16=Autumn) and yearly=17 values + +LOESS <- TRUE # climatology filter on/off (if off, daily climatology is computed instead and used to measure daily anomalies) + +index=1 # choose an index to plot. 1: temperature, 2: precipitation, 3: wind speed + +########################################################################################## + +WTs.type <- c("NE","E","SE","S","SW","W","NW","N","C","C.NE","C.E","C.SE","C.S","C.SW","C.W","C.NW","C.N","A","A.NE","A.E","A.SE","A.S","A.SW","A.W","A.NW","A.N") +WTs.type10<-c("NE","E","SE","S","SW","W","NW","N","C","A") +WTs.type10.name<-c("Northeasterly (NE)","Easterly (E)","Southeasterly (SE)","Southerly (S)","Southwesterly (SW)","Westerly (W)","Northwesterly (NW)", + "Northerly (N)","Cyclonic (C)","Anticyclonic (A)") +#year.tot <- year.end - year.start + 1 +n.periods <- length(periods) + + +# load 1 year of WT data to get the info on year.start and year,end +WTs_files <- list.files(paste0(workdir,"/txt/ERAint/all_years")) +WTs_file1 <- read.table(file=paste0(workdir,"/txt/ERAint/all_years/",WTs_files[1]), header=TRUE) + +year.start <- min(WTs_file1$Year) +year.end <- max(WTs_file1$Year) +n.years <- year.end - year.start + 1 + +# load daily var data for all years: +var <- Load(var.name, NULL, list(var.rean), paste0(year.start:year.end,'0101'), storefreq = 'daily', leadtimemax=366, output = 'lonlat', nprocs=1) + +var.lat <- var$lat +var.lon <- var$lon + +# remove bisestile days from var to have all arrays with the same dimensions: +cat("Removing bisestile days. Please wait...\n") +var365 <- var$obs[,,,1:365,,,drop=FALSE] + +for(y in year.start:year.end){ + y2 <- y - year.start + 1 + if(n.days.in.a.year(y) == 366) var365[,,y2,60:365,,] <- var$obs[,,y2,61:366,,] # take the march to december period removing the 29th of February +} + +rm(var) +gc() + + +# Create and save climatology maps: + +#p=17 # for the debug + +# Map intervals and colors: +my.brks <- list() +my.brks[[1]] <- c(-50,-40,-30,seq(-20,30,0.05),40,50) # Surface Temperature Climatology +my.brks[[2]] <- c(0,seq(0.05,10,0.05),15) # Precipitation Climatology +my.brks[[3]] <- c(0,seq(1.05,10,0.05),15) # Wind speed Climatology + +my.cols <- list() +my.cols[[1]] <- colorRampPalette(as.character(read.csv("/home/Earth/vtorralb/rgbhex.csv",header=F)[,1]))(length(my.brks[[1]])-1) # blue-yellow-red colors +my.cols[[2]] <- colorRampPalette(c("white","gray","cyan2","blue","deeppink4"))(length(my.brks[[1]])-1) # white-gray-blue-purple colores for wind speed + +for(p in periods){ + png(filename=paste0(workdir,"/",var.name.file,"_Climatology_",period.name[p],".png"),width=1000,height=700) + + layout(matrix(c(rep(1,10),2), 11, 1, byrow = TRUE)) + #layout.show(2) + #PlotEquiMap2(var.clim.NA, var.lon.used, var.lat, filled.continents = FALSE, brks=my.brks[[index]], cols=my.cols[[index]], drawleg=F) + # change longitude in format -180,180 to displey europe in the middle of the map: + #PlotEquiMap(var.clim[p,,], var.lon.used, var.lat.used, filled.continents = FALSE, brks=my.brks[[index]], cols=my.cols[[index]], drawleg=F, colNA="gray") + + # Select only days of the chosen month/season: + varPeriod <- var365[,,,pos.period(1,p),,,drop=FALSE] + varPeriodMean <- apply(varPeriod,c(1,2,5,6),mean,na.rm=TRUE) + + if(index == 1) mod.index <- -273.5 # for temperature, you must remove -273 after var.clim.NA.bis[p,,]: + PlotEquiMap(varPeriodMean[1,1,,]+mod.index, var.lon, var.lat, filled.continents = FALSE, brks=my.brks[[index]], cols=my.cols[[index]], drawleg=F, colNA="gray") + ColorBar(my.brks[[index]], cols=my.cols[[index]], vert=FALSE, subsampleg=20, cex=1) + + dev.off() +} + + + +# convert var in daily anomalies: +cat("Calculating anomalies. Please wait...\n") + +var365ClimDaily <- apply(var365, c(1,2,4,5,6), mean, na.rm=T) + +n.lat.var <- length(var.lat) +n.lon.var <- length(var.lon) + +if(LOESS == TRUE){ + var365ClimLoess <- var365ClimDaily + + for(i in 1:n.lat.var){ + for(j in 1:n.lon.var){ + my.data <- data.frame(ens.mean=var365ClimDaily[1,1,,i,j], day=1:365) + my.loess <- loess(ens.mean ~ day, my.data, span=0.35) + var365ClimLoess[1,1,,i,j] <- predict(my.loess) + } + } + + rm(var365ClimDaily, my.data, my.loess) + gc() + + var365Clim <- InsertDim(var365ClimLoess, 3, n.years) + + rm(var365ClimLoess) + gc() + +} else { + var365Clim <- InsertDim(var365ClimDaily, 3, n.years) + + rm(var365ClimDaily) + gc() +} + + +var365Anom <- var365 - var365Clim + +rm(var365Clim) +gc() + + + +# load daily WT classification for all years and grid points and with the same format of var365Anom: +load(paste0(workdir,"/txt/ERAint/metadata.RData")) # load lat.used and lon.used + +#WTs <- array(NA, c(n.years*365, n.lat, n.lon)) +WTs <- array(NA, c(n.years, 365, n.lat, n.lon)) + +# only for compatibility with older version: +lat <- round(MSLP$lat,3) +lon <- round(MSLP$lon,3) + +for(latc in lat){ + latc <- lat[20]; lonc <- lon[1]; # for the debug + + for(lonc in lon){ + pos.latc <- which(lat == latc) + pos.lonc <- which(lon == lonc) + + # Load WT classification for that point: + WT.file <- paste0(workdir,"/txt/ERAint/all_years/WTs_",year.start,"-",year.end,"_lat_",latc,"_lon_",lonc,".txt") + + if(file.exists(WT.file)){ + WT <- read.table(WT.file,header=TRUE, stringsAsFactors=FALSE) # load lat.used and lon.used + + # remove bisestile days from WT classification: + bis <- which(WT$MonthDay == "229") + WT$MonthDay <- paste0(WT$Month,WT$Day) # create a new column + WT365 <- WT[-bis,] + + rm(WT) + + # insert the WT classification with 10 WTs: + #WTs[, pos.latc, pos.lonc] <- WT$WT10.name + for(y in year.start:year.end){ + WTs[y,, pos.latc, pos.lonc] <- WT365$WT10.num[365*(y-year.start)+1:365] + } + + + rm(WT365) + } + } +} + +n.WTs <- length(WT365$WT10.num) + +# Impact of a WT on var (average of var only during the days belonging to a particular WT): + +for(p in periods){ + + # select var data only inside period p: + varPeriod <- var365Anom[,pos.period(1,p),,] + + # select WT data only inside period p: + WTsPeriod <- WTs[,pos.period(1,p),,] + + #WTs.type + + for(wt in 1:n.WTs){ + + apply(WTsPeriod, + + # visualize impact map: + png(filename=paste0(workdir,"/",var.name.file,"_Climatology_",period.name[p],".png"),width=1000,height=700) + + } +} + + + + + + + + + + + + + + + +# Load just 1 day of var data to detect the number of latitude and longitude points; +#var <- Load(var.name, NULL, var.rean, paste0(year.start,'0101'), storefreq = 'daily', leadtimemax = 1, output = 'lonlat') +var <- Load(var = var.name, exp = NULL, obs = list(var.rean), paste0(year.start,'0101'), storefreq = 'daily', leadtimemax = 1, output = 'lonlat') +var.lat <- round(var$lat,3) +var.lon <- round(var$lon,3) +var.n.lat <- length(var.lat) # number of latitude values of var +var.n.lon <- length(var.lon) +var.lon.pos <- ifelse(min(var.lon>=0), TRUE, FALSE) # set lon.pos to TRUE if the longitude values of the reanalysis grid has no negative values, FALSE if not. + +### Load just 1 day of MSLP data to detect the number of latitude and longitude points of the WT classifications +### we must exlude points > +80 degrees lat and < -80 deg. because the Lamb grid was created excluding these points: +###MSLP <- Load('psl', NULL, psl.rean, paste0(year.start,'0101'), storefreq = 'daily', leadtimemax = 1, output = 'lonlat', configfile = Load.path) +#MSLP <- Load(var = 'psl', exp = NULL, obs = list(psl.rean), paste0(year.start,'0101'), storefreq = 'daily', leadtimemax = 1, output = 'lonlat') + +#psl.n.lat <- length(MSLP$lat) # number of latitude values or MSLP +#psl.n.lon <- length(MSLP$lon) # number of longitude values of MSLP +#psl.lon.pos <- ifelse(min(MSLP$lon>=0), TRUE, FALSE) # set lon.pos to TRUE if the longitude values of the reanalysis grid has no negative values, FALSE if not. + +#psl.n.lat.unused.poles <- 17 # number of discarded latitude values near each of the two poles (it depends on the spatial resolutions of the MSLP reanalysis) +#psl.n.lat.unused.equat <- 20 # number of unused latitude values near each side of the equator (must exclude 10 degrees N/S for side, so it depends on the spat.res. of the reanalysis) +#psl.pos.lat.eq.north <- tail(which(MSLP$lat >= 0),1) # note than the eventual point at lat=0 is always included +#psl.pos.lat.eq.south <- head(which(MSLP$lat < 0),1) # note than the eventual point at lat=0 is always excluded +#psl.pos.lat.unused.eq.north <- (psl.pos.lat.eq.north - psl.n.lat.unused.poles + 1):psl.pos.lat.eq.north +#psl.pos.lat.unused.eq.south <- psl.pos.lat.eq.south:(psl.pos.lat.eq.south + psl.n.lat.unused.equat - 1) + +# final latitude values used as central points: +#psl.lat.used <- MSLP$lat[-c(1:psl.n.lat.unused.poles,(psl.n.lat-psl.n.lat.unused.poles):psl.n.lat, psl.pos.lat.unused.eq.north, psl.pos.lat.unused.eq.south)] +#psl.lat.used <- round(MSLP$lat[-c(1:psl.n.lat.unused.poles,(psl.n.lat-psl.n.lat.unused.poles):psl.n.lat)],3) # latitude values used as central points +#psl.lon.used <- round(MSLP$lon,3) # longitude values used as central points + +#psl.n.lat.used <- length(psl.lat.used) +#psl.n.lon.used <- length(psl.lon.used) +#psl.n.grid.points <- psl.n.lat.used * psl.n.lon.used + +#if(var.lon.pos && !psl.lon.pos) {ss <- which(psl.lon.used<0); psl.lon.used[ss] <- psl.lon.used[ss] + 360} # convert the negative long of MSLP to the [0, +360] range +#if(!var.lon.pos && psl.lon.pos) {ss <- which(psl.lon.used>180); psl.lon.used[ss] <- psl.lon.used[ss] - 360} # convert the positive long of MSLP > 180 to the [-180, +180] range + +# exlude var points > +~80 degrees lat and < -80 deg. because the Lamb grid was created excluding these points: +var.n.lat.unused.poles <- 17 # number of discarded latitude values near each of the two poles (it depends on the spatial resolutions of the var reanalysis) +var.n.lat.unused.equat <- 17 # numb. of unused latitude values near each side of the equator (must exclude 10 degrees N/S for side, so it depends on the spat.res. of the reanalysis) + +var.pos.lat.eq.north <- tail(which(var$lat >= 0),1) # note than the eventual point at lat=0 is always included +var.pos.lat.eq.south <- head(which(var$lat < 0),1) # note than the eventual point at lat=0 is always excluded +var.pos.lat.unused.eq.north <- (var.pos.lat.eq.north - var.n.lat.unused.equat+1):var.pos.lat.eq.north +var.pos.lat.unused.eq.south <- var.pos.lat.eq.south:(var.pos.lat.eq.south + var.n.lat.unused.equat - 1) +var.pos.lat.unused.eq <- c(var.pos.lat.unused.eq.north, var.pos.lat.unused.eq.south) + +#var.lat.used <- var.lat[-c(1:var.n.lat.unused.poles,(var.n.lat-var.n.lat.unused.poles):var.n.lat, var.pos.lat.unused.eq.north, var.pos.lat.unused.eq.south)] # latitude values used as central points + +var.lat.used <- var.lat[-c(1:var.n.lat.unused.poles,(var.n.lat-var.n.lat.unused.poles):var.n.lat)] +var.lon.used <- var.lon # longitude values used as central points +var.n.lat.used <- length(var.lat.used) +var.n.lon.used <- length(var.lon.used) +var.n.grid.points <- var.n.lat.used * var.n.lon.used +#var.lon.used.bis <- c(var.lon.used[c(257:512)]-360,var.lon.used[c(1:256)]) # to put Europe in the middle of the map +var.lon.used.bis <- c(var.lon.used[c(ceiling(var.n.lon.used/2):var.n.lon.used)]-360,var.lon.used[c(1:(ceiling(var.n.lon.used/2)-1))]) # to put Europe in the middle of the map + +#pos.var.lat.unused <- which(is.na( match(var.lat,var.lat.used))) # position inside var.lat of the latitude values not used (those not in var.lat.used) +pos.var.lat.used <- match(var.lat.used, var.lat) # =which(!is.na( match(var.lat,var.lat.used))) # position inside var.lat of the latitude values used (those in var.lat.used) +#var.lat[pos.var.lat.unused] + + +### Map of the central points for Europe: +#void <- array(NA,c(var.n.lat.used, var.n.lon.used)) +###void.bis <- void[,c(257:512,1:256)] # when using the var dataset with res 256x512 +#void.bis <- void[,c(ceiling(var.n.lon.used/2):var.n.lon.used,1:(ceiling(var.n.lon.used/2)-1))] +###PlotEquiMap(void.bis, var.lon.used.bis, var.lat.used, filled.continents = FALSE, brks=seq(0,360,1), intxlon=5, intylat=5, drawleg=F ) # for the debug +###PlotEquiMap(void.bis[1:215,], var.lon.used.bis, var.lat[1:215], filled.continents = FALSE, brks=seq(0,360,1), intxlon=5, intylat=5, drawleg=F ) # for the debug +#par(oma=c(1,1,1,1)) +###PlotEquiMap(void.bis, var.lon.used.bis, var.lat.used, filled.continents = FALSE, brks=seq(0,360,1), intxlon=5, intylat=5, drawleg=F ) # for the debug +#PlotEquiMap(void.bis[25:90, 220:317], var.lon.used.bis[220:317], var.lat[25:90], filled.continents = FALSE, brks=seq(0,360,1), intxlon=5, intylat=5, drawleg=F ) # for the debug +#my.parcelas<-data.frame(name="", lat=rep(var.lat.used,var.n.lon.used), long=rep(var.lon.used.bis, each=var.n.lat.used), pop=0, capital=0, stringsAsFactors=F) +#map.cities(my.parcelas, pch=3, cex=.5,col=c("gray40")) # add the 4 points of the 4 parcelas +#write.table(cbind(lat=rep(var.lat.used,var.n.lon.used), lon=rep(var.lon.used, each=var.n.lat.used)),file=paste0(workdir,"/list_lat_lon.txt"),row.names=FALSE, col.names=TRUE, quote=FALSE, sep=" ") + +# for each var grid point used, find the lat/lon position of the nearby point center of the WT classification: +closer.psl.lat <- closer.psl.lon <- array(NA, c(var.n.lat.used, var.n.lon.used)) # 2 matrices with the lat and lon position of the nearest MSLP grid point + +i<-0;vlat.pos <- 0 +for (vlat in var.lat.used) { + vlat.pos<-vlat.pos+1 + + vlon.pos<-0 + for(vlon in var.lon.used) { + i<-i+1 + cat(paste0("Point #",i,"/", var.n.grid.points), '\r') + vlon.pos<-vlon.pos+1 + + closer.psl.pos <- nearest(vlat, vlon, psl.lat.used, psl.lon.used) + closer.psl.lat[vlat.pos, vlon.pos] <- psl.lat.used[closer.psl.pos[1]] + closer.psl.lon[vlat.pos, vlon.pos] <- psl.lon.used[closer.psl.pos[2]] + + #print(paste0("vlat=",vlat, " vlat.pos=",vlat.pos, " vlon=",vlon, " vlon.pos=",vlon.pos, " closer.psl.lat.pos=", closer.psl.pos[1], " closer.psl.lon.pos=",closer.psl.pos[2], " closer.psl.lat=", psl.lat.used[closer.psl.pos[1]], " closer.psl.lon=", psl.lon.used[closer.psl.pos[2]] )) # for the debug + + } +} + +save(closer.psl.lat, closer.psl.lon, file=paste0(workdir,"/closer_psl_",var.name,".RData")) # save it if it is the first time +load(file=paste0(workdir,"/closer_psl_",var.name,".RData")) # load it if already saved + +#PlotEquiMap(closer.psl.lon, var.lon.used, var.lat.used, filled.continents = FALSE, brks=seq(0,360,1) ) # for the debug +#PlotEquiMap(closer.psl.lat, var.lon.used, var.lat.used, filled.continents = FALSE, brks=seq(-90,90,1) ) # for the debug + +# for each var grid point used, load 1 year of daily var and wt data at time, and assign it to the closer mslp central point: +# (you can't open all data at once because it weights too much, but you can open 1 year at time) +chunk <- split.array(dimensions=c(n.days.in.a.yearly.period(year.start,year.end), var.n.lat.used, var.n.lon.used), along=3) +save(chunk, file=paste0(workdir,"/chunk_",var.name,".RData")) # save the chunk to retrieve it later or you run the next loop for 1 year only ('year.end' changes, so 'chunk' changes too) +load(paste0(workdir,"/chunk_",var.name,".RData")) + +i=0 +for(y in year.start:year.end){ + #y<-1981 # for the debug + i<-i+1 + print(paste0("Year #",i,"/", year.tot)) + + n.days <- n.days.in.a.year(y) + if(partial.end==TRUE && y==year.end) n.days<-n.days.last # the last year can have a lower number of days + + var <- Load(var.name, NULL, list(var.rean), paste0(y,'0101'), storefreq = 'daily', leadtimemax = n.days, output = 'lonlat') # load var data for the year y only + #PlotEquiMap(var$obs[1, 1, 1, 1, , ], var.lon, var.lat, filled.continents = FALSE) + + # open the ff binary files with all the WT classification for that year: + if(file.exists(paste0(workdir,"/",paste0(y,"_RData/WTs1_",y,".RData")))) ffload(file=paste0(workdir,"/",paste0(y,"_RData/WTs1_",y)), overwrite=TRUE) + if(file.exists(paste0(workdir,"/",paste0(y,"_RData/WTs2_",y,".RData")))) ffload(file=paste0(workdir,"/",paste0(y,"_RData/WTs2_",y)), overwrite=TRUE) + if(file.exists(paste0(workdir,"/",paste0(y,"_RData/WTs3_",y,".RData")))) ffload(file=paste0(workdir,"/",paste0(y,"_RData/WTs3_",y)), overwrite=TRUE) + if(file.exists(paste0(workdir,"/",paste0(y,"_RData/WTs4_",y,".RData")))) ffload(file=paste0(workdir,"/",paste0(y,"_RData/WTs4_",y)), overwrite=TRUE) + if(file.exists(paste0(workdir,"/",paste0(y,"_RData/WTs5_",y,".RData")))) ffload(file=paste0(workdir,"/",paste0(y,"_RData/WTs5_",y)), overwrite=TRUE) + if(file.exists(paste0(workdir,"/",paste0(y,"_RData/WTs6_",y,".RData")))) ffload(file=paste0(workdir,"/",paste0(y,"_RData/WTs6_",y)), overwrite=TRUE) + if(file.exists(paste0(workdir,"/",paste0(y,"_RData/WTs7_",y,".RData")))) ffload(file=paste0(workdir,"/",paste0(y,"_RData/WTs7_",y)), overwrite=TRUE) + if(file.exists(paste0(workdir,"/",paste0(y,"_RData/WTs8_",y,".RData")))) ffload(file=paste0(workdir,"/",paste0(y,"_RData/WTs8_",y)), overwrite=TRUE) + if(file.exists(paste0(workdir,"/",paste0(y,"_RData/WTs9_",y,".RData")))) ffload(file=paste0(workdir,"/",paste0(y,"_RData/WTs9_",y)), overwrite=TRUE) + if(file.exists(paste0(workdir,"/",paste0(y,"_RData/WTs10_",y,".RData")))) ffload(file=paste0(workdir,"/",paste0(y,"_RData/WTs10_",y)), overwrite=TRUE) + + var.serie <- wt.serie <- array(NA,c(var.n.lat.used, var.n.lon.used, n.days)) # arrays where to store the var and wt daily data of year y for all points + + j<-0 + vlat.pos <- 0 + for (vlat in var.lat.used) { + #vlat<-var.lat.used[1]; vlon<-var.lon.used[1] # for the debug + j<-j+1 + vlat.pos <- vlat.pos+1 + cat('Latitude ',j,'/',var.n.lat.used,'\r') + + vlon.pos <- 0 + for(vlon in var.lon.used) { + vlon.pos <- vlon.pos+1 + + var.serie[vlat.pos, vlon.pos,] <- var$obs[1,1,1,,which(var.lat==vlat),which(var.lon==vlon)] + + #WT <- read.table(file=paste0(workdir,"/","10WTs_",psl.rean,"_",year.start,"-",year.end,"_lat_",closer.psl.lat[vlat.pos,vlon.pos],"_lon_",closer.psl.lon[vlat.pos,vlon.pos],".txt"), header=TRUE, sep=" ", stringsAsFactors=FALSE, row.names=NULL) + + # load WTs classifications (variable WTs) for that year created with WT_vX.R: + #load(paste0(workdir,"/",y,"_Rdata/","10WTs_",psl.rean,"_year_",y,"_lat_",closer.psl.lat[vlat.pos,vlon.pos],"_lon_",closer.psl.lon[vlat.pos,vlon.pos],".RData")) + open(eval(parse(text=paste0("WTs_lat_", closer.psl.lat[vlat.pos,vlon.pos],"_lon_", closer.psl.lon[vlat.pos,vlon.pos])))) + #open(eval(parse(text=gsub("-","m",paste0("WTs_lat_", closer.psl.lat[vlat.pos,vlon.pos],"_lon_", closer.psl.lon[vlat.pos,vlon.pos]))))) + + + # interval of days belonging to the year y, but starting to count from the year year.begin: + #seq.days.year <- n.days.in.a.yearly.period(year.start,y) - n.days.in.a.yearly.period(year.start,year.start) + 1:n.days.in.a.year(y) + WTs <- as.ram(eval(parse(text=paste0("WTs_lat_", closer.psl.lat[vlat.pos,vlon.pos],"_lon_", closer.psl.lon[vlat.pos,vlon.pos])))) + wt.serie[vlat.pos, vlon.pos,] <- WTs #[seq.days.year] #,4] # extract only the days of the year y + close(eval(parse(text=paste0("WTs_lat_", closer.psl.lat[vlat.pos,vlon.pos],"_lon_", closer.psl.lon[vlat.pos,vlon.pos])))) + rm(WTs) + } + + } + + #PlotEquiMap(var.serie[,,1], var.lon, var.lat.used, filled.continents = FALSE) # for the debug + + #save(var.serie, file=paste0(workdir,"/local_",var.name,"_",psl.rean,"_year_",y,".RData"), compress=FALSE) + #save(wt.serie, file=paste0(workdir,"/local_","10WTs_",psl.rean,"_year_",y,".RData"), compress=FALSE) + + #year=4000 + #ffload(file=paste0(workdir,"/WTs_",year)) + #latc <- 70 + #lonc <- 0 + #open(eval(parse(text=paste0("WTs_lat_",latc,"_lon_", lonc)))) # do.call doesn't work with open, must + #close(eval(parse(text=paste0("WTs_lat_",latc,"_lon_", lonc)))) # do.call doesn't work with open, must use eval(parse(text=...))) + + + # instead of saving only 1 file for all the spatial domain, save 1 file for each chunk c: (to be able to load them faster in the following step) + if(!dir.exists(paste0(workdir,"/",var.name,"_chunk"))) dir.create(paste0(workdir,"/",var.name,"_chunk")) + if(!dir.exists(paste0(workdir,"/wt_chunk"))) dir.create(paste0(workdir,"/wt_chunk")) + + for(c in 1:chunk$n.chunk){ + var.serie.year.chunk<-var.serie[,chunk$int[[c]],] # format: [lat, lon, day] + save(var.serie.year.chunk, file=paste0(workdir,"/",var.name,"_chunk/local_",var.name,"_",psl.rean.name,"_year_",y,"_chunk_",c,".RData"), compress=FALSE) + + wt.serie.year.chunk <- wt.serie[,chunk$int[[c]],] + save(wt.serie.year.chunk, file=paste0(workdir,"/",var.name,"_wt_chunk/local_","10WTs_",psl.rean.name,"_year_",y,"_chunk_",c,".RData"), compress=FALSE) + # n.wt10<-apply(wt.serie.year.chunk,c(1,2),function(x)length(which(x==18))) # for the debug + # edit(n.wt10) # for the debug + } + +} # close for on y + +#rm(closer.psl.lat, closer.psl.lon, var) + + +# compute the climatology of var for each period and its daily anomalies for each chunk and period: +var.clim <- array(NA, c(n.periods, var.n.lat.used, var.n.lon.used)) + +n.days.tot <- n.days.in.a.yearly.period(year.start,year.end) +#n.days.tot <- 365 # for the debug + +days.period <- n.days.period <- list() +for(p in periods){ + days.period[[p]] <- NA + for(y in year.start:year.end) days.period[[p]] <- c(days.period[[p]], n.days.in.a.future.year(year.start, y) + pos.period(y,p)) + + days.period[[p]] <- days.period[[p]][-1] # remove the NA at the begin that was introduced only to be able to execture the above command + n.days.period[[p]] <- length(days.period[[p]]) +} + +for(c in 1:chunk$n.chunk){ + #c=1 # for the debug + cat(paste0("Computing chunk n. ", c,"/", chunk$n.chunk),'\r') + + if(c == chunk$n.chunk) { + var.serie.chunk <- array(NA, c(var.n.lat.used, chunk$chunk.size.last, n.days.tot)) + } else { + var.serie.chunk <- array(NA, c(var.n.lat.used, chunk$chunk.size, n.days.tot)) + } + + for(y in year.start:year.end){ + load(file=paste0(workdir,"/",var.name,"_chunk/local_",var.name,"_",psl.rean.name,"_year_",y,"_chunk_",c,".RData")) # load var.serie.year.chunk + int.days.year <- seq.days.in.a.future.year(year.start,y) + + var.serie.chunk[,,int.days.year] <- var.serie.year.chunk + rm(var.serie.year.chunk) + } + + for(p in periods){ + var.serie.chunk.period <- var.serie.chunk[,,days.period[[p]]] # select only the days in the chosen period + + var.clim.chunk <- apply(var.serie.chunk.period,c(1,2),mean, na.rm=T) + + var.anom.chunk.period <- var.serie.chunk.period - InsertDim(var.clim.chunk, 3, n.days.period[[p]]) + var.clim[p,,chunk$int[[c]]] <- var.clim.chunk + + # save the var anomaly, one file for each chunk and period: + assign(paste0("var.anom.chunk.period",p), var.anom.chunk.period) + + # save(, file=paste0(workdir,"/",var.name,"_chunk/local_",var.name,"_anomaly_",psl.rean,"_",year.start,"-",year.end,"_chunk_",c,"_period_",p,".RData"), compress=FALSE) + # save not working in this case, use do.call below: + do.call(save, list(paste0("var.anom.chunk.period",p), file=paste0(workdir,"/",var.name,"_chunk/local_",var.name,"_anomaly_",psl.rean.name,"_",year.start,"-",year.end,"_chunk_",c,"_period_",p,".RData"), compress=FALSE)) # you must use this syntax when saving an Rdata with the variable name given by a string!!! + + rm(var.anom.chunk.period,var.serie.chunk.period) + do.call(rm, list(paste0("var.anom.chunk.period",p))) + } + + rm(var.serie.chunk) +} + +save(var.clim, file=paste0(workdir,"/",var.name,"_chunk/local_",var.name,"_climatology_",psl.rean.name,"_",year.start,"-",year.end,".RData"), compress=FALSE) # save var.clim +load(paste0(workdir,"/",var.name,"_chunk/local_",var.name,"_climatology_",psl.rean.name,"_",year.start,"-",year.end,".RData")) + +# do the same as above, but for the wt: +wts <- c(1:10) # weather type numbers inside wt.serie +n.wts<-length(wts) +n.wt <- var.wt.sum <- var.wt.mean.anom <- var.wt.sd.anom <- array(NA, c(n.periods, n.wts, var.n.lat.used, var.n.lon.used)) +#vlat.mat <- vlon.mat <- array(NA, c(var.n.lat.used, var.n.lon.used)) # for the debug + +for(c in 1:chunk$n.chunk){ + #s=1;y=year.start # for the debug + cat(paste0("Computing chunk n. ", c,"/", chunk$n.chunk),'\r') + + if(c == chunk$n.chunk) { + var.serie.chunk <- array(NA, c(var.n.lat.used, chunk$chunk.size.last, n.days.tot)) + wt.serie.chunk <- array(NA, c(var.n.lat.used, chunk$chunk.size.last, n.days.tot)) + n.wt.chunk <- var.wt.sum.chunk <- var.wt.mean.anom.chunk <- var.wt.sd.anom.chunk <- array(NA,c(n.periods, n.wts, var.n.lat.used, chunk$chunk.size.last)) + } else { + var.serie.chunk <- array(NA, c(var.n.lat.used, chunk$chunk.size, n.days.tot)) + wt.serie.chunk <- array(NA, c(var.n.lat.used, chunk$chunk.size, n.days.tot)) + n.wt.chunk <- var.wt.sum.chunk <- var.wt.mean.anom.chunk <- var.wt.sd.anom.chunk <- array(NA,c(n.periods, n.wts, var.n.lat.used, chunk$chunk.size)) + } + + for(y in year.start:year.end){ + load(file=paste0(workdir,"/",var.name,"_chunk/local_",var.name,"_",psl.rean.name,"_year_",y,"_chunk_",c,".RData")) # load var.serie.year.chunk + int.days.year <- seq.days.in.a.future.year(year.start,y) + var.serie.chunk[,,int.days.year] <- var.serie.year.chunk + rm(var.serie.year.chunk) + } + + + for(y in year.start:year.end){ + load(file=paste0(workdir,"/",var.name,"_wt_chunk/local_","10WTs_",psl.rean.name,"_year_",y,"_chunk_",c,".RData")) # load wt.serie.year.chunk + + wt.serie.year.chunk[which(wt.serie.year.chunk==18)]<-10 # rename the A type from number 18 to number 10, to have only the numbers from 1 to 10 + int.days.year <- seq.days.in.a.future.year(year.start,y) + + wt.serie.chunk[,,int.days.year] <- wt.serie.year.chunk #[,chunk$int[[c]],] + rm(wt.serie.year.chunk) + } + + for(p in periods){ + # load var.anom.chunk.period + load(file=paste0(workdir,"/",var.name,"_chunk/local_",var.name,"_anomaly_",psl.rean.name,"_",year.start,"-",year.end,"_chunk_",c,"_period_",p,".RData")) + var.serie.chunk.period <- var.serie.chunk[,,days.period[[p]]] + wt.serie.chunk.period <- wt.serie.chunk[,,days.period[[p]]] + + vlat.pos <- 0 + for (vlat in var.lat.used) { + #vlat<-var.lat.used[1]; vlon<-var.lon.used[1]; wt=1 # for the debug + vlat.pos <- vlat.pos + 1 + #cat('Latitude ',vlat.pos,'/',var.n.lat.used,'\r') + + vlon.pos <- 0 #chunk$int[[c]]-1 + for(vlon in var.lon.used[chunk$int[[c]]]) { + vlon.pos <- vlon.pos + 1 + for(wt in wts){ + pos.wt <- which(wt.serie.chunk.period[vlat.pos,vlon.pos,]==wt) + var.anom.chunk.period <- get(paste0("var.anom.chunk.period",p)) + + n.wt.chunk[p, wt, vlat.pos, vlon.pos] <- length(pos.wt) + var.wt.sum.chunk[p, wt, vlat.pos, vlon.pos] <- sum(var.serie.chunk.period[vlat.pos,vlon.pos,][pos.wt]) # only for wind and prec + var.wt.mean.anom.chunk[p, wt, vlat.pos, vlon.pos] <- mean(var.anom.chunk.period[vlat.pos,vlon.pos,][pos.wt]) + var.wt.sd.anom.chunk[p, wt, vlat.pos, vlon.pos] <- sd(var.anom.chunk.period[vlat.pos,vlon.pos,][pos.wt]) + + rm(pos.wt, var.anom.chunk.period) + } + } + } + + n.wt[p,,,chunk$int[[c]]] <- n.wt.chunk[p,,,] + var.wt.sum[p,,,chunk$int[[c]]] <- var.wt.sum.chunk[p,,,] + var.wt.mean.anom[p,,,chunk$int[[c]]] <- var.wt.mean.anom.chunk[p,,,] + var.wt.sd.anom[p,,,chunk$int[[c]]] <- var.wt.sd.anom.chunk[p,,,] + } # close for on p +} + +save(n.wt, var.wt.sum, var.wt.mean.anom, var.wt.sd.anom.chunk, file=paste0(workdir,"/",var.name,"_chunk/output_",psl.rean.name,"_",year.start,"-",year.end,".RData"), compress=FALSE) +load(file=paste0(workdir,"/",var.name,"_chunk/output_",psl.rean.name,"_",year.start,"-",year.end,".RData")) + +#if(var.name=='prlr'){ # if it is not in m/s (as in ERA-Interim) but it is in kg/m2/s, you must multiply for 86400 to convert to mm/day +# var.clim <- var.clim * 86400 +# var.wt.sum <- var.wt.sum * 86400 +# var.wt.mean.anom <- var.wt.mean.anom * 86400 +# var.wt.sd.anom <- var.wt.sd.anom * 86400 +#} + +var.wt.mean.contrib <- array(NA, c(n.periods, n.wts, var.n.lat.used, var.n.lon.used)) + +var.wt.mean <- var.wt.sum/n.wt # average value of var associated to a WT +#n.wt <- 100*n.wt/n.days.tot # convert to yearly frequency in % +for(p in periods) n.wt[p,,,] <- 100*n.wt[p,,,]/n.days.period[[p]] # convert to yearly frequency in % +for(p in periods) var.wt.mean.contrib[p,,,] <- 100*var.wt.sum[p,,,]/(InsertDim(var.clim[p,,],1,n.wts)*n.days.period[[p]]) # convert the sum to a % contribution + +var.clim.NA <- array(NA, c(n.periods, var.n.lat, var.n.lon.used)) +n.wt.NA <- var.wt.mean.contrib.NA <- var.wt.sum.NA <- var.wt.mean.anom.NA <- var.wt.mean.NA <- var.wt.sd.anom.NA <- array(NA, c(n.periods, n.wts, var.n.lat, var.n.lon.used)) + +var.clim.NA[,pos.var.lat.used,] <- var.clim +n.wt.NA[,,pos.var.lat.used,] <- n.wt +var.wt.mean.NA[,,pos.var.lat.used,] <- var.wt.mean +var.wt.sum.NA[,,pos.var.lat.used,] <- var.wt.sum +var.wt.mean.anom.NA[,,pos.var.lat.used,] <- var.wt.mean.anom +var.wt.mean.contrib.NA[,,pos.var.lat.used,] <- var.wt.mean.contrib +var.wt.sd.anom.NA[,,pos.var.lat.used,] <- var.wt.sd.anom + +# remove the equatorial area from visualization: +n.wt.NA[,,var.pos.lat.unused.eq,]<- var.wt.mean.NA[,,var.pos.lat.unused.eq,] <- var.wt.sum.NA[,,var.pos.lat.unused.eq,] <- var.wt.mean.contrib.NA[,,var.pos.lat.unused.eq,] <- NA +var.wt.mean.anom.NA[,,var.pos.lat.unused.eq,] <- var.wt.sd.anom.NA[,,var.pos.lat.unused.eq,] <- NA + +# move Europe to the center of the maps: +p1 <- ceiling(var.n.lon.used/2)-1 +p2 <- ceiling(var.n.lon.used/2) +p3 <- var.n.lon.used + +var.clim.NA.bis <- var.clim.NA[,,c(p2:p3,1:p1)] +n.wt.NA.bis <- n.wt.NA[,,,c(p2:p3,1:p1)] +var.wt.mean.NA.bis <- var.wt.mean.NA[,,,c(p2:p3,1:p1)] +var.wt.mean.contrib.NA.bis <- var.wt.mean.contrib.NA[,,,c(p2:p3,1:p1)] +var.wt.mean.anom.NA.bis <- var.wt.mean.anom.NA[,,,c(p2:p3,1:p1)] +var.wt.sd.anom.NA.bis <- var.wt.sd.anom.NA[,,,c(p2:p3,1:p1)] +gc() + +# Map intervals and colors: +my.brks <- list() + +#my.brks[[1]] <- c(0,seq(1.5,9,0.5),15) # Wind speed Climatology +my.brks[[1]] <- c(0,seq(1.05,10,0.05),15) # Wind speed Climatology +my.brks[[1]] <- c(-50,-40,-30,seq(-20,30,0.05),40,50) # Surface Temperature Climatology +my.brks[[1]] <- c(0,seq(0.05,10,0.05),15) # Precipitation Climatology + +#my.brks[[2]] <- c(-10,seq(-4.95,5,0.05),10) # Wind Speed Anomaly associated to a WT +my.brks[[2]] <- c(seq(0,10,0.1),100) # Mean wind speed in m/s associated to a WT +my.brks[[3]] <- c(seq(0,20,0.1),100) # Frequency associated to a WT +my.brks[[4]] <- c(seq(0,30,0.1),100) # % Contribution of a WT to total var +my.brks[[5]] <- c(seq(-10,-3,1),seq(-2.3,2.3,0.1),seq(3,10,1)) # % Mean anomaly of a WT for wind speed +my.brks[[5]] <- c(-50,seq(-10,-3,1),seq(-2.9,2.9,0.1),seq(3,10,1),50) # % Mean anomaly of a WT for temperature +my.brks[[5]] <- c(-100000,seq(-100,0,1),seq(1,100,1),100000) # % Mean anomaly of a WT for precipitation + + +my.brks[[6]] <- c(seq(0,3,0.1),10) # Standard deviation of the anomalies pf a WT + +my.cols <- list() +#my.cols[[index]] <- colorRampPalette(my.palette[[index]])(length(my.brks[[index]])-1) +#my.cols[[1]] <- colorRampPalette(as.character(read.csv("/home/Earth/vtorralb/rgbhex.csv",header=F)[,1]))(length(my.brks[[1]])-1) # blue-green-white-yellow-red colors +#my.cols[[1]] <- colorRampPalette(c("white","yellow","orange","red","darkred"))(length(my.brks[[1]])-1) # blue-yellow-red colors +#my.cols[[1]] <- colorRampPalette(c("white","white","green","yellow","orange","red","darkred"))(length(my.brks[[1]])-1) # blue-yellow-red colors +#my.cols[[1]] <- colorRampPalette(c("blue","white","darkred"))(length(my.brks[[1]])-1) # blue-yellow-red colors +#my.cols[[1]] <- colorRampPalette(c("blue","green","yellow","red","brown","violetred4"))(length(my.brks[[1]])-1) # blue-yellow-red colors +#my.cols[[1]] <- colorRampPalette(c("white","cyan2","gold","orange","red","brown","brown4","deeppink4"))(length(my.brks[[1]])-1) # blue-yellow-red colors +my.cols[[1]] <- colorRampPalette(c("white","gray","cyan2","blue","deeppink4"))(length(my.brks[[1]])-1) # white-gray-blue-purple colores for wind speed +my.cols[[1]] <- colorRampPalette(as.character(read.csv("/home/Earth/vtorralb/rgbhex.csv",header=F)[,1]))(length(my.brks[[1]])-1) # blue-yellow-red colors + +#my.cols[[2]] <- colorRampPalette(c("deeppink4","darkblue","blue","white","red","darkred","brown4"))(length(my.brks[[2]])-1) # blue-white-red colors +my.cols[[2]] <- colorRampPalette(as.character(read.csv("/home/Earth/vtorralb/rgbhex.csv",header=F)[,1]))(length(my.brks[[2]])-1) # blue--yellow-red colors +my.cols[[3]] <- colorRampPalette(as.character(read.csv("/home/Earth/vtorralb/rgbhex.csv",header=F)[,1]))(length(my.brks[[3]])-1) # blue-yellow-red colors +#my.cols[[4]] <- colorRampPalette(c("white","cyan2","blue","deeppink4"))(length(my.brks[[4]])-1) # blue-yellow-red colors +my.cols[[4]] <- colorRampPalette(as.character(read.csv("/home/Earth/vtorralb/rgbhex.csv",header=F)[,1]))(length(my.brks[[4]])-1) # blue--yellow-red colors + +my.cols[[5]] <- c(colorRampPalette(c("dodgerblue3","white"))(length(my.brks[[5]])-1), colorRampPalette(c("white","darkorchid4"))(length(my.brks[[5]])-1)) +my.cols[[5]] <- my.cols[[5]][seq(1, length(my.cols[[5]]),2)] +my.cols[[5]] <- colorRampPalette(as.character(read.csv("/home/Earth/vtorralb/rgbhex.csv",header=F)[,1]))(length(my.brks[[5]])-1) # blue--yellow-red colors for temper +my.cols[[5]] <- c(colorRampPalette(c("blue","white"))(length(my.brks[[5]])-1), colorRampPalette(c("white","darkred"))(length(my.brks[[5]])-1)) # blue-white-red for prec +my.cols[[5]] <- my.cols[[5]][seq(1, length(my.cols[[5]]),2)] +#my.cols[[5]] <- colorRampPalette(c("white","blue","purple"))(length(my.brks[[5]])-1) + +my.cols[[6]] <- colorRampPalette(as.character(read.csv("/home/Earth/vtorralb/rgbhex.csv",header=F)[,1]))(length(my.brks[[6]])-1) # blue--yellow-red colors + + + +# Create and save maps: + +index=1 +#p=17 # for the debug + +for(p in periods){ + png(filename=paste0(mapdir,"/",var.name.file,"_Climatology_",period.name[p],".png"),width=1000,height=700) + + layout(matrix(c(rep(1,10),2), 11, 1, byrow = TRUE)) + #layout.show(2) + #PlotEquiMap2(var.clim.NA, var.lon.used, var.lat, filled.continents = FALSE, brks=my.brks[[index]], cols=my.cols[[index]], drawleg=F) + # change longitude in format -180,180 to displey europe in the middle of the map: + #PlotEquiMap(var.clim[p,,], var.lon.used, var.lat.used, filled.continents = FALSE, brks=my.brks[[index]], cols=my.cols[[index]], drawleg=F, colNA="gray") + + # for temperature, you must remove -273 after var.clim.NA.bis[p,,]: + PlotEquiMap(var.clim.NA.bis[p,,], var.lon.used.bis, var.lat, filled.continents = FALSE, brks=my.brks[[index]], cols=my.cols[[index]], drawleg=F, colNA="gray") + ColorBar(my.brks[[index]], cols=my.cols[[index]], vert=FALSE, subsampleg=20, cex=1) + + dev.off() +} + + +index=2 +#p=17 # for the debug +wt=6 + +for(p in periods){ + png(filename=paste0(mapdir,"/Average_10-m_Wind_Speed_WT_", WTs.type10[wt],period.name[p],".png"),width=1000,height=700) + + layout(matrix(c(rep(1,10),2), 11, 1, byrow = TRUE)) + #PlotEquiMap(var.wt.mean.NA[wt,,], var.lon.used, var.lat, filled.continents = FALSE, brks=my.brks[[index]], cols=my.cols[[index]], drawleg=F, colNA="gray") + #PlotEquiMap(var.wt.mean.NA[wt,,100:200], var.lon.used[100:200], var.lat, filled.continents = FALSE, brks=my.brks[[index]], cols=my.cols[[index]], drawleg=F, colNA="gray") + PlotEquiMap(var.wt.mean.NA.bis[p,wt,,], var.lon.used.bis, var.lat, filled.continents = FALSE, brks=my.brks[[index]], cols=my.cols[[index]], drawleg=F, colNA="gray") + #PlotEquiMap(var.wt.mean.NA.bis[wt,,1:50], head(var.lon.used.bis,50), var.lat, filled.continents = FALSE, brks=my.brks[[index]], cols=my.cols[[index]], drawleg=F, colNA="gray") + ColorBar(my.brks[[index]], cols=my.cols[[index]], vert=FALSE, subsampleg=10, cex=1) + + dev.off() +} + + +index=3 +p=17 # for the debug +wt=6 + +png(filename=paste0(mapdir,"/Frequency_WT_", WTs.type10[wt],".png"),width=1000,height=700) + +layout(matrix(c(rep(1,10),2), 11, 1, byrow = TRUE)) +#PlotEquiMap(n.wt.NA[wt,,], var.lon.used, var.lat, filled.continents = FALSE, brks=my.brks[[index]], cols=my.cols[[index]], drawleg=F) +#PlotEquiMap(n.wt.NA[wt,,1:18], tail(var.lon.used,18), var.lat, filled.continents = FALSE, brks=my.brks[[index]], cols=my.cols[[index]], drawleg=F) +#n.wt.NA[wt,50:55,490:512] +#ColorBar(my.brks[[index]], cols=my.cols[[index]], vert=FALSE, subsampleg=5, cex=1) +#n.wt10<-apply(wt.serie.,c(1,2),function(x)length(which(x==18))) # for the debug +#edit(n.wt.NA[10,,]) # for the debug +#layout(matrix(c(rep(1,10),2), 11, 1, byrow = TRUE)) +PlotEquiMap(n.wt.NA.bis[p,wt,,], var.lon.used.bis, var.lat, filled.continents = FALSE, brks=my.brks[[index]], cols=my.cols[[index]], drawleg=F, colNA="gray") +ColorBar(my.brks[[index]], cols=my.cols[[index]], vert=FALSE, subsampleg=5, cex=1) + +dev.off() + + +index=4 +p=17 +wt=6 # for the debug + +for(wt in 1:n.wts){ + png(filename=paste0(mapdir,"/Daily_contribution_of_WT_", WTs.type10[wt],".png"),width=1000,height=700) + + layout(matrix(c(rep(1,10),2), 11, 1, byrow = TRUE)) + PlotEquiMap(var.wt.mean.contrib.NA.bis[p,wt,,], var.lon.used.bis, var.lat, filled.continents = FALSE, brks=my.brks[[index]], cols=my.cols[[index]], + drawleg=F, colNA="gray", toptitle=WTs.type10.name[wt] , sizetit=1) + ColorBar(my.brks[[index]], cols=my.cols[[index]], vert=FALSE, subsampleg=10, cex=1) + + dev.off() +} + + +# Average anomalies associated to a WT: +# WARNING: for temerature, you must remove -273 after var.wt.mean.anom.NA.bis[p,wt,,] to convert to anomalies in degrees +# for precipitation, you must multiply it for 86400 +index=5 +#p=17 # for the debug +#wt=6 # for the debug +slide=FALSE # put TRUE if you wan to print maps for a slide, with bigger titles and legends, false otherwise +europe=TRUE # put TRUE to show only a zoom over Europe; false otherwise +map.only=FALSE # put TRUE if you want to print only the image with no legend or title (for posters) + +domain <- ifelse(europe, "European", "World") + +for(p in periods){ + for(wt in 1:n.wts){ + if(!slide) {n.box <- 10; col.cex=1; my.title <- paste(WTs.type10.name[wt],period.name[p])} else {n.box <- 5; col.cex=3; my.title <- ""} + if(map.only) my.title <- "" + + png(filename=paste0(mapdir,"/",var.name.file,"_Mean_",domain,"_Anomalies_of_WT_", WTs.type10[wt],"_",period.name[p],".png"),width=1000,height=700) + + layout(matrix(c(rep(1,n.box),2), n.box+1, 1, byrow = TRUE)) + if(!europe) PlotEquiMap(var.wt.mean.anom.NA.bis[p,wt,,], var.lon.used.bis, var.lat, filled.continents = FALSE, brks=my.brks[[index]], + cols=my.cols[[index]], drawleg=F, colNA="gray", toptitle=my.title, sizetit=1.5) + if(europe) PlotEquiMap(var.wt.mean.anom.NA.bis[p,wt,25:90,220:310], var.lon.used.bis[220:310], var.lat[25:90], filled.continents = FALSE, brks=my.brks[[index]], + cols=my.cols[[index]], drawleg=F, colNA="gray", toptitle=my.title, sizetit=1.5) + ColorBar(my.brks[[index]], cols=my.cols[[index]], vert=FALSE, subsampleg=2, cex=col.cex) + if(slide && map.only) title(paste0(WTs.type10.name[wt],period.name[p]),cex.main=3.5,outer=T) + + dev.off() + } + +} + + + + +index=6 +wt=6 # for the debug +slide=FALSE # put TRUE if you wan to print maps for a slide, with bigger titles and legends, false otherwise +europe=FALSE # put TRUE to show only a zoom over Europe; false otherwise + +if(!slide) {n.box <- 10; col.cex=1; my.title <- WTs.type10.name[wt]} else {n.box <- 5; col.cex=3; my.title <- ""} +domain <- ifelse(europe, "European", "World") + +for(wt in 1:n.wts){ + png(filename=paste0(mapdir,"/StDev_",domain,"_Anomalies_of_WT_", WTs.type10[wt],".png"),width=1000,height=700) + + layout(matrix(c(rep(1,n.box),2), n.box+1, 1, byrow = TRUE)) + + if(!europe) PlotEquiMap(var.wt.sd.anom.NA.bis[p,wt,,], var.lon.used.bis, var.lat, filled.continents = FALSE, brks=my.brks[[index]], + cols=my.cols[[index]], drawleg=F, colNA="gray", toptitle=my.title, sizetit=1.5) + if(europe) PlotEquiMap(var.wt.sd.anom.NA.bis[p,wt,25:90,220:310], var.lon.used.bis[220:310], var.lat[25:90], filled.continents = FALSE, brks=my.brks[[index]], + cols=my.cols[[index]], drawleg=F, colNA="gray", toptitle=my.title, sizetit=1.5) + ColorBar(my.brks[[index]], cols=my.cols[[index]], vert=FALSE, subsampleg=2, cex=col.cex) + if(slide) title(WTs.type10.name[wt],cex.main=3.5,outer=T) + + dev.off() +} + +#WTs.type<-c("NE","E","SE","S","SW","W","NW","N","C","C.NE","C.E","C.SE","C.S","C.SW","C.W","C.NW","C.N","A","A.NE","A.E","A.SE","A.S","A.SW","A.W","A.NW","A.N") + +save.image(file=paste0(workdir,"/Weather_types",var.name,"_",psl.rean.name,"_",year.start,"-",year.end,".RData"), compress=FALSE) diff --git a/old/WT_drivers_v4.R~ b/old/WT_drivers_v4.R~ new file mode 100644 index 0000000000000000000000000000000000000000..d6d740f945cd9d6d7c97e28d68885bd6365a4750 --- /dev/null +++ b/old/WT_drivers_v4.R~ @@ -0,0 +1,749 @@ +########################################################################################## +# Relationship between WTs and a chosen climate variable # +########################################################################################## + +library(s2dverification) # for the function Load() + +source('/home/Earth/ncortesi/scripts/Rfunctions.R') # for the calendar functions + +# directory where to find the WT classification +workdir <- "/scratch/Earth/ncortesi/RESILIENCE/WT" + +# reanalysis used for daily var data (must be the same as the mslp data): +var.rean <- list(path = '/esnas/recon/ecmwf/erainterim/$STORE_FREQ$_mean/$VAR_NAME$_f6h/$VAR_NAME$_$YEAR$$MONTH$.nc') + + # any daily variable: +var.name='tas' #'prlr' #'tas' #'sfcWind' +var.name.file='Temperature' #'Precipitation' #'Surface_Temperature' # '10-m_Wind_Speed' # variable name used in the ouput map's filename + +periods=1:17 # identify monthly (from 1=Jan to 12=Dec) , seasonal (from 13=winter to 16=Autumn) and yearly=17 values + +LOESS <- TRUE # climatology filter on/off (if off, daily climatology is computed instead and used to measure daily anomalies) + +index=1 # choose an index to plot. 1: temperature, 2: precipitation, 3: wind speed + +########################################################################################## + +WTs.type <- c("NE","E","SE","S","SW","W","NW","N","C","C.NE","C.E","C.SE","C.S","C.SW","C.W","C.NW","C.N","A","A.NE","A.E","A.SE","A.S","A.SW","A.W","A.NW","A.N") +WTs.type10<-c("NE","E","SE","S","SW","W","NW","N","C","A") +WTs.type10.name<-c("Northeasterly (NE)","Easterly (E)","Southeasterly (SE)","Southerly (S)","Southwesterly (SW)","Westerly (W)","Northwesterly (NW)", + "Northerly (N)","Cyclonic (C)","Anticyclonic (A)") +#year.tot <- year.end - year.start + 1 +n.periods <- length(periods) + + +# load 1 year of WT data to get the info on year.start and year,end +WTs_files <- list.files(paste0(workdir,"/txt/ERAint/all_years")) +WTs_file1 <- read.table(file=paste0(workdir,"/txt/ERAint/all_years/",WTs_files[1]), header=TRUE) + +year.start <- min(WTs_file1$Year) +year.end <- max(WTs_file1$Year) +n.years <- year.end - year.start + 1 + +# load daily var data for all years: +var <- Load(var.name, NULL, list(var.rean), paste0(year.start:year.end,'0101'), storefreq = 'daily', leadtimemax=366, output = 'lonlat', nprocs=1) + +var.lat <- var$lat +var.lon <- var$lon + +# remove bisestile days from var to have all arrays with the same dimensions: +cat("Removing bisestile days. Please wait...\n") +var365 <- var$obs[,,,1:365,,,drop=FALSE] + +for(y in year.start:year.end){ + y2 <- y - year.start + 1 + if(n.days.in.a.year(y) == 366) var365[,,y2,60:365,,] <- var$obs[,,y2,61:366,,] # take the march to december period removing the 29th of February +} + +rm(var) +gc() + + +# Create and save climatology maps: + +#p=17 # for the debug + +# Map intervals and colors: +my.brks <- list() +my.brks[[1]] <- c(-50,-40,-30,seq(-20,30,0.05),40,50) # Surface Temperature Climatology +my.brks[[2]] <- c(0,seq(0.05,10,0.05),15) # Precipitation Climatology +my.brks[[3]] <- c(0,seq(1.05,10,0.05),15) # Wind speed Climatology + +my.cols <- list() +my.cols[[1]] <- colorRampPalette(as.character(read.csv("/home/Earth/vtorralb/rgbhex.csv",header=F)[,1]))(length(my.brks[[1]])-1) # blue-yellow-red colors +my.cols[[2]] <- colorRampPalette(c("white","gray","cyan2","blue","deeppink4"))(length(my.brks[[1]])-1) # white-gray-blue-purple colores for wind speed + +for(p in periods){ + png(filename=paste0(workdir,"/",var.name.file,"_Climatology_",period.name[p],".png"),width=1000,height=700) + + layout(matrix(c(rep(1,10),2), 11, 1, byrow = TRUE)) + #layout.show(2) + #PlotEquiMap2(var.clim.NA, var.lon.used, var.lat, filled.continents = FALSE, brks=my.brks[[index]], cols=my.cols[[index]], drawleg=F) + # change longitude in format -180,180 to displey europe in the middle of the map: + #PlotEquiMap(var.clim[p,,], var.lon.used, var.lat.used, filled.continents = FALSE, brks=my.brks[[index]], cols=my.cols[[index]], drawleg=F, colNA="gray") + + # Select only days of the chosen month/season: + varPeriod <- var365[,,,pos.period(1,p),,,drop=FALSE] + varPeriodMean <- apply(varPeriod,c(1,2,5,6),mean,na.rm=TRUE) + + if(index == 1) mod.index <- -273.5 # for temperature, you must remove -273 after var.clim.NA.bis[p,,]: + PlotEquiMap(varPeriodMean[1,1,,]+mod.index, var.lon, var.lat, filled.continents = FALSE, brks=my.brks[[index]], cols=my.cols[[index]], drawleg=F, colNA="gray") + ColorBar(my.brks[[index]], cols=my.cols[[index]], vert=FALSE, subsampleg=20, cex=1) + + dev.off() +} + + + +# convert var in daily anomalies: +cat("Calculating anomalies. Please wait...\n") + +var365ClimDaily <- apply(var365, c(1,2,4,5,6), mean, na.rm=T) + +n.lat.var <- length(var.lat) +n.lon.var <- length(var.lon) + +if(LOESS == TRUE){ + var365ClimLoess <- var365ClimDaily + + for(i in 1:n.lat.var){ + for(j in 1:n.lon.var){ + my.data <- data.frame(ens.mean=var365ClimDaily[1,1,,i,j], day=1:365) + my.loess <- loess(ens.mean ~ day, my.data, span=0.35) + var365ClimLoess[1,1,,i,j] <- predict(my.loess) + } + } + + rm(var365ClimDaily, my.data, my.loess) + gc() + + var365Clim <- InsertDim(var365ClimLoess, 3, n.years) + + rm(var365ClimLoess) + gc() + +} else { + var365Clim <- InsertDim(var365ClimDaily, 3, n.years) + + rm(var365ClimDaily) + gc() +} + + +var365anom <- var365 - var365Clim + +rm(var365Clim) +gc() + + + + + + + + +# load daily WT classification for all years and grid points: +load(paste0(workdir,"/txt/ERAint/metadata.RData")) # load lat.used and lon.used + +for(latc in lat.used){ + #latc <- lat.used[1]; lonc <- lon.used[1]; # for the debug + + for(lonc in lon.used){ + #pos.latc <- which(lat.used==latc) + #pos.lonc <- which(lon.used==lonc) + + WTs <- read.table(paste0(workdir,"/txt/ERAint/all_years/WTs_",year.start,"-",year.end,"_lat_",latc,"_lon_",lonc,".txt"),header=TRUE, stringsAsFactors=FALSE) # load lat.used and lon.used + + # remove bisestile days from WT classification: + psleuFull[,,,,,] <- psleuFull366[,,,1:365,,] + + + + + for(y in year.start:year.end){ + y2 <- y - year.start + 1 + if(n.days.in.a.year(y) == 366) psleuFull[,,y2,60:365,,] <- psleuFull366[,,y2,61:366,,] # take the march to december period removing the 29th of February + } + + rm(psleuFull366) + gc() + + + + + + + + + + + + + + + + + + + + +# Load just 1 day of var data to detect the number of latitude and longitude points; +#var <- Load(var.name, NULL, var.rean, paste0(year.start,'0101'), storefreq = 'daily', leadtimemax = 1, output = 'lonlat') +var <- Load(var = var.name, exp = NULL, obs = list(var.rean), paste0(year.start,'0101'), storefreq = 'daily', leadtimemax = 1, output = 'lonlat') +var.lat <- round(var$lat,3) +var.lon <- round(var$lon,3) +var.n.lat <- length(var.lat) # number of latitude values of var +var.n.lon <- length(var.lon) +var.lon.pos <- ifelse(min(var.lon>=0), TRUE, FALSE) # set lon.pos to TRUE if the longitude values of the reanalysis grid has no negative values, FALSE if not. + +### Load just 1 day of MSLP data to detect the number of latitude and longitude points of the WT classifications +### we must exlude points > +80 degrees lat and < -80 deg. because the Lamb grid was created excluding these points: +###MSLP <- Load('psl', NULL, psl.rean, paste0(year.start,'0101'), storefreq = 'daily', leadtimemax = 1, output = 'lonlat', configfile = Load.path) +#MSLP <- Load(var = 'psl', exp = NULL, obs = list(psl.rean), paste0(year.start,'0101'), storefreq = 'daily', leadtimemax = 1, output = 'lonlat') + +#psl.n.lat <- length(MSLP$lat) # number of latitude values or MSLP +#psl.n.lon <- length(MSLP$lon) # number of longitude values of MSLP +#psl.lon.pos <- ifelse(min(MSLP$lon>=0), TRUE, FALSE) # set lon.pos to TRUE if the longitude values of the reanalysis grid has no negative values, FALSE if not. + +#psl.n.lat.unused.poles <- 17 # number of discarded latitude values near each of the two poles (it depends on the spatial resolutions of the MSLP reanalysis) +#psl.n.lat.unused.equat <- 20 # number of unused latitude values near each side of the equator (must exclude 10 degrees N/S for side, so it depends on the spat.res. of the reanalysis) +#psl.pos.lat.eq.north <- tail(which(MSLP$lat >= 0),1) # note than the eventual point at lat=0 is always included +#psl.pos.lat.eq.south <- head(which(MSLP$lat < 0),1) # note than the eventual point at lat=0 is always excluded +#psl.pos.lat.unused.eq.north <- (psl.pos.lat.eq.north - psl.n.lat.unused.poles + 1):psl.pos.lat.eq.north +#psl.pos.lat.unused.eq.south <- psl.pos.lat.eq.south:(psl.pos.lat.eq.south + psl.n.lat.unused.equat - 1) + +# final latitude values used as central points: +#psl.lat.used <- MSLP$lat[-c(1:psl.n.lat.unused.poles,(psl.n.lat-psl.n.lat.unused.poles):psl.n.lat, psl.pos.lat.unused.eq.north, psl.pos.lat.unused.eq.south)] +#psl.lat.used <- round(MSLP$lat[-c(1:psl.n.lat.unused.poles,(psl.n.lat-psl.n.lat.unused.poles):psl.n.lat)],3) # latitude values used as central points +#psl.lon.used <- round(MSLP$lon,3) # longitude values used as central points + +#psl.n.lat.used <- length(psl.lat.used) +#psl.n.lon.used <- length(psl.lon.used) +#psl.n.grid.points <- psl.n.lat.used * psl.n.lon.used + +#if(var.lon.pos && !psl.lon.pos) {ss <- which(psl.lon.used<0); psl.lon.used[ss] <- psl.lon.used[ss] + 360} # convert the negative long of MSLP to the [0, +360] range +#if(!var.lon.pos && psl.lon.pos) {ss <- which(psl.lon.used>180); psl.lon.used[ss] <- psl.lon.used[ss] - 360} # convert the positive long of MSLP > 180 to the [-180, +180] range + +# exlude var points > +~80 degrees lat and < -80 deg. because the Lamb grid was created excluding these points: +var.n.lat.unused.poles <- 17 # number of discarded latitude values near each of the two poles (it depends on the spatial resolutions of the var reanalysis) +var.n.lat.unused.equat <- 17 # numb. of unused latitude values near each side of the equator (must exclude 10 degrees N/S for side, so it depends on the spat.res. of the reanalysis) + +var.pos.lat.eq.north <- tail(which(var$lat >= 0),1) # note than the eventual point at lat=0 is always included +var.pos.lat.eq.south <- head(which(var$lat < 0),1) # note than the eventual point at lat=0 is always excluded +var.pos.lat.unused.eq.north <- (var.pos.lat.eq.north - var.n.lat.unused.equat+1):var.pos.lat.eq.north +var.pos.lat.unused.eq.south <- var.pos.lat.eq.south:(var.pos.lat.eq.south + var.n.lat.unused.equat - 1) +var.pos.lat.unused.eq <- c(var.pos.lat.unused.eq.north, var.pos.lat.unused.eq.south) + +#var.lat.used <- var.lat[-c(1:var.n.lat.unused.poles,(var.n.lat-var.n.lat.unused.poles):var.n.lat, var.pos.lat.unused.eq.north, var.pos.lat.unused.eq.south)] # latitude values used as central points + +var.lat.used <- var.lat[-c(1:var.n.lat.unused.poles,(var.n.lat-var.n.lat.unused.poles):var.n.lat)] +var.lon.used <- var.lon # longitude values used as central points +var.n.lat.used <- length(var.lat.used) +var.n.lon.used <- length(var.lon.used) +var.n.grid.points <- var.n.lat.used * var.n.lon.used +#var.lon.used.bis <- c(var.lon.used[c(257:512)]-360,var.lon.used[c(1:256)]) # to put Europe in the middle of the map +var.lon.used.bis <- c(var.lon.used[c(ceiling(var.n.lon.used/2):var.n.lon.used)]-360,var.lon.used[c(1:(ceiling(var.n.lon.used/2)-1))]) # to put Europe in the middle of the map + +#pos.var.lat.unused <- which(is.na( match(var.lat,var.lat.used))) # position inside var.lat of the latitude values not used (those not in var.lat.used) +pos.var.lat.used <- match(var.lat.used, var.lat) # =which(!is.na( match(var.lat,var.lat.used))) # position inside var.lat of the latitude values used (those in var.lat.used) +#var.lat[pos.var.lat.unused] + + +### Map of the central points for Europe: +#void <- array(NA,c(var.n.lat.used, var.n.lon.used)) +###void.bis <- void[,c(257:512,1:256)] # when using the var dataset with res 256x512 +#void.bis <- void[,c(ceiling(var.n.lon.used/2):var.n.lon.used,1:(ceiling(var.n.lon.used/2)-1))] +###PlotEquiMap(void.bis, var.lon.used.bis, var.lat.used, filled.continents = FALSE, brks=seq(0,360,1), intxlon=5, intylat=5, drawleg=F ) # for the debug +###PlotEquiMap(void.bis[1:215,], var.lon.used.bis, var.lat[1:215], filled.continents = FALSE, brks=seq(0,360,1), intxlon=5, intylat=5, drawleg=F ) # for the debug +#par(oma=c(1,1,1,1)) +###PlotEquiMap(void.bis, var.lon.used.bis, var.lat.used, filled.continents = FALSE, brks=seq(0,360,1), intxlon=5, intylat=5, drawleg=F ) # for the debug +#PlotEquiMap(void.bis[25:90, 220:317], var.lon.used.bis[220:317], var.lat[25:90], filled.continents = FALSE, brks=seq(0,360,1), intxlon=5, intylat=5, drawleg=F ) # for the debug +#my.parcelas<-data.frame(name="", lat=rep(var.lat.used,var.n.lon.used), long=rep(var.lon.used.bis, each=var.n.lat.used), pop=0, capital=0, stringsAsFactors=F) +#map.cities(my.parcelas, pch=3, cex=.5,col=c("gray40")) # add the 4 points of the 4 parcelas +#write.table(cbind(lat=rep(var.lat.used,var.n.lon.used), lon=rep(var.lon.used, each=var.n.lat.used)),file=paste0(workdir,"/list_lat_lon.txt"),row.names=FALSE, col.names=TRUE, quote=FALSE, sep=" ") + +# for each var grid point used, find the lat/lon position of the nearby point center of the WT classification: +closer.psl.lat <- closer.psl.lon <- array(NA, c(var.n.lat.used, var.n.lon.used)) # 2 matrices with the lat and lon position of the nearest MSLP grid point + +i<-0;vlat.pos <- 0 +for (vlat in var.lat.used) { + vlat.pos<-vlat.pos+1 + + vlon.pos<-0 + for(vlon in var.lon.used) { + i<-i+1 + cat(paste0("Point #",i,"/", var.n.grid.points), '\r') + vlon.pos<-vlon.pos+1 + + closer.psl.pos <- nearest(vlat, vlon, psl.lat.used, psl.lon.used) + closer.psl.lat[vlat.pos, vlon.pos] <- psl.lat.used[closer.psl.pos[1]] + closer.psl.lon[vlat.pos, vlon.pos] <- psl.lon.used[closer.psl.pos[2]] + + #print(paste0("vlat=",vlat, " vlat.pos=",vlat.pos, " vlon=",vlon, " vlon.pos=",vlon.pos, " closer.psl.lat.pos=", closer.psl.pos[1], " closer.psl.lon.pos=",closer.psl.pos[2], " closer.psl.lat=", psl.lat.used[closer.psl.pos[1]], " closer.psl.lon=", psl.lon.used[closer.psl.pos[2]] )) # for the debug + + } +} + +save(closer.psl.lat, closer.psl.lon, file=paste0(workdir,"/closer_psl_",var.name,".RData")) # save it if it is the first time +load(file=paste0(workdir,"/closer_psl_",var.name,".RData")) # load it if already saved + +#PlotEquiMap(closer.psl.lon, var.lon.used, var.lat.used, filled.continents = FALSE, brks=seq(0,360,1) ) # for the debug +#PlotEquiMap(closer.psl.lat, var.lon.used, var.lat.used, filled.continents = FALSE, brks=seq(-90,90,1) ) # for the debug + +# for each var grid point used, load 1 year of daily var and wt data at time, and assign it to the closer mslp central point: +# (you can't open all data at once because it weights too much, but you can open 1 year at time) +chunk <- split.array(dimensions=c(n.days.in.a.yearly.period(year.start,year.end), var.n.lat.used, var.n.lon.used), along=3) +save(chunk, file=paste0(workdir,"/chunk_",var.name,".RData")) # save the chunk to retrieve it later or you run the next loop for 1 year only ('year.end' changes, so 'chunk' changes too) +load(paste0(workdir,"/chunk_",var.name,".RData")) + +i=0 +for(y in year.start:year.end){ + #y<-1981 # for the debug + i<-i+1 + print(paste0("Year #",i,"/", year.tot)) + + n.days <- n.days.in.a.year(y) + if(partial.end==TRUE && y==year.end) n.days<-n.days.last # the last year can have a lower number of days + + var <- Load(var.name, NULL, list(var.rean), paste0(y,'0101'), storefreq = 'daily', leadtimemax = n.days, output = 'lonlat') # load var data for the year y only + #PlotEquiMap(var$obs[1, 1, 1, 1, , ], var.lon, var.lat, filled.continents = FALSE) + + # open the ff binary files with all the WT classification for that year: + if(file.exists(paste0(workdir,"/",paste0(y,"_RData/WTs1_",y,".RData")))) ffload(file=paste0(workdir,"/",paste0(y,"_RData/WTs1_",y)), overwrite=TRUE) + if(file.exists(paste0(workdir,"/",paste0(y,"_RData/WTs2_",y,".RData")))) ffload(file=paste0(workdir,"/",paste0(y,"_RData/WTs2_",y)), overwrite=TRUE) + if(file.exists(paste0(workdir,"/",paste0(y,"_RData/WTs3_",y,".RData")))) ffload(file=paste0(workdir,"/",paste0(y,"_RData/WTs3_",y)), overwrite=TRUE) + if(file.exists(paste0(workdir,"/",paste0(y,"_RData/WTs4_",y,".RData")))) ffload(file=paste0(workdir,"/",paste0(y,"_RData/WTs4_",y)), overwrite=TRUE) + if(file.exists(paste0(workdir,"/",paste0(y,"_RData/WTs5_",y,".RData")))) ffload(file=paste0(workdir,"/",paste0(y,"_RData/WTs5_",y)), overwrite=TRUE) + if(file.exists(paste0(workdir,"/",paste0(y,"_RData/WTs6_",y,".RData")))) ffload(file=paste0(workdir,"/",paste0(y,"_RData/WTs6_",y)), overwrite=TRUE) + if(file.exists(paste0(workdir,"/",paste0(y,"_RData/WTs7_",y,".RData")))) ffload(file=paste0(workdir,"/",paste0(y,"_RData/WTs7_",y)), overwrite=TRUE) + if(file.exists(paste0(workdir,"/",paste0(y,"_RData/WTs8_",y,".RData")))) ffload(file=paste0(workdir,"/",paste0(y,"_RData/WTs8_",y)), overwrite=TRUE) + if(file.exists(paste0(workdir,"/",paste0(y,"_RData/WTs9_",y,".RData")))) ffload(file=paste0(workdir,"/",paste0(y,"_RData/WTs9_",y)), overwrite=TRUE) + if(file.exists(paste0(workdir,"/",paste0(y,"_RData/WTs10_",y,".RData")))) ffload(file=paste0(workdir,"/",paste0(y,"_RData/WTs10_",y)), overwrite=TRUE) + + var.serie <- wt.serie <- array(NA,c(var.n.lat.used, var.n.lon.used, n.days)) # arrays where to store the var and wt daily data of year y for all points + + j<-0 + vlat.pos <- 0 + for (vlat in var.lat.used) { + #vlat<-var.lat.used[1]; vlon<-var.lon.used[1] # for the debug + j<-j+1 + vlat.pos <- vlat.pos+1 + cat('Latitude ',j,'/',var.n.lat.used,'\r') + + vlon.pos <- 0 + for(vlon in var.lon.used) { + vlon.pos <- vlon.pos+1 + + var.serie[vlat.pos, vlon.pos,] <- var$obs[1,1,1,,which(var.lat==vlat),which(var.lon==vlon)] + + #WT <- read.table(file=paste0(workdir,"/","10WTs_",psl.rean,"_",year.start,"-",year.end,"_lat_",closer.psl.lat[vlat.pos,vlon.pos],"_lon_",closer.psl.lon[vlat.pos,vlon.pos],".txt"), header=TRUE, sep=" ", stringsAsFactors=FALSE, row.names=NULL) + + # load WTs classifications (variable WTs) for that year created with WT_vX.R: + #load(paste0(workdir,"/",y,"_Rdata/","10WTs_",psl.rean,"_year_",y,"_lat_",closer.psl.lat[vlat.pos,vlon.pos],"_lon_",closer.psl.lon[vlat.pos,vlon.pos],".RData")) + open(eval(parse(text=paste0("WTs_lat_", closer.psl.lat[vlat.pos,vlon.pos],"_lon_", closer.psl.lon[vlat.pos,vlon.pos])))) + #open(eval(parse(text=gsub("-","m",paste0("WTs_lat_", closer.psl.lat[vlat.pos,vlon.pos],"_lon_", closer.psl.lon[vlat.pos,vlon.pos]))))) + + + # interval of days belonging to the year y, but starting to count from the year year.begin: + #seq.days.year <- n.days.in.a.yearly.period(year.start,y) - n.days.in.a.yearly.period(year.start,year.start) + 1:n.days.in.a.year(y) + WTs <- as.ram(eval(parse(text=paste0("WTs_lat_", closer.psl.lat[vlat.pos,vlon.pos],"_lon_", closer.psl.lon[vlat.pos,vlon.pos])))) + wt.serie[vlat.pos, vlon.pos,] <- WTs #[seq.days.year] #,4] # extract only the days of the year y + close(eval(parse(text=paste0("WTs_lat_", closer.psl.lat[vlat.pos,vlon.pos],"_lon_", closer.psl.lon[vlat.pos,vlon.pos])))) + rm(WTs) + } + + } + + #PlotEquiMap(var.serie[,,1], var.lon, var.lat.used, filled.continents = FALSE) # for the debug + + #save(var.serie, file=paste0(workdir,"/local_",var.name,"_",psl.rean,"_year_",y,".RData"), compress=FALSE) + #save(wt.serie, file=paste0(workdir,"/local_","10WTs_",psl.rean,"_year_",y,".RData"), compress=FALSE) + + #year=4000 + #ffload(file=paste0(workdir,"/WTs_",year)) + #latc <- 70 + #lonc <- 0 + #open(eval(parse(text=paste0("WTs_lat_",latc,"_lon_", lonc)))) # do.call doesn't work with open, must + #close(eval(parse(text=paste0("WTs_lat_",latc,"_lon_", lonc)))) # do.call doesn't work with open, must use eval(parse(text=...))) + + + # instead of saving only 1 file for all the spatial domain, save 1 file for each chunk c: (to be able to load them faster in the following step) + if(!dir.exists(paste0(workdir,"/",var.name,"_chunk"))) dir.create(paste0(workdir,"/",var.name,"_chunk")) + if(!dir.exists(paste0(workdir,"/wt_chunk"))) dir.create(paste0(workdir,"/wt_chunk")) + + for(c in 1:chunk$n.chunk){ + var.serie.year.chunk<-var.serie[,chunk$int[[c]],] # format: [lat, lon, day] + save(var.serie.year.chunk, file=paste0(workdir,"/",var.name,"_chunk/local_",var.name,"_",psl.rean.name,"_year_",y,"_chunk_",c,".RData"), compress=FALSE) + + wt.serie.year.chunk <- wt.serie[,chunk$int[[c]],] + save(wt.serie.year.chunk, file=paste0(workdir,"/",var.name,"_wt_chunk/local_","10WTs_",psl.rean.name,"_year_",y,"_chunk_",c,".RData"), compress=FALSE) + # n.wt10<-apply(wt.serie.year.chunk,c(1,2),function(x)length(which(x==18))) # for the debug + # edit(n.wt10) # for the debug + } + +} # close for on y + +#rm(closer.psl.lat, closer.psl.lon, var) + + +# compute the climatology of var for each period and its daily anomalies for each chunk and period: +var.clim <- array(NA, c(n.periods, var.n.lat.used, var.n.lon.used)) + +n.days.tot <- n.days.in.a.yearly.period(year.start,year.end) +#n.days.tot <- 365 # for the debug + +days.period <- n.days.period <- list() +for(p in periods){ + days.period[[p]] <- NA + for(y in year.start:year.end) days.period[[p]] <- c(days.period[[p]], n.days.in.a.future.year(year.start, y) + pos.period(y,p)) + + days.period[[p]] <- days.period[[p]][-1] # remove the NA at the begin that was introduced only to be able to execture the above command + n.days.period[[p]] <- length(days.period[[p]]) +} + +for(c in 1:chunk$n.chunk){ + #c=1 # for the debug + cat(paste0("Computing chunk n. ", c,"/", chunk$n.chunk),'\r') + + if(c == chunk$n.chunk) { + var.serie.chunk <- array(NA, c(var.n.lat.used, chunk$chunk.size.last, n.days.tot)) + } else { + var.serie.chunk <- array(NA, c(var.n.lat.used, chunk$chunk.size, n.days.tot)) + } + + for(y in year.start:year.end){ + load(file=paste0(workdir,"/",var.name,"_chunk/local_",var.name,"_",psl.rean.name,"_year_",y,"_chunk_",c,".RData")) # load var.serie.year.chunk + int.days.year <- seq.days.in.a.future.year(year.start,y) + + var.serie.chunk[,,int.days.year] <- var.serie.year.chunk + rm(var.serie.year.chunk) + } + + for(p in periods){ + var.serie.chunk.period <- var.serie.chunk[,,days.period[[p]]] # select only the days in the chosen period + + var.clim.chunk <- apply(var.serie.chunk.period,c(1,2),mean, na.rm=T) + + var.anom.chunk.period <- var.serie.chunk.period - InsertDim(var.clim.chunk, 3, n.days.period[[p]]) + var.clim[p,,chunk$int[[c]]] <- var.clim.chunk + + # save the var anomaly, one file for each chunk and period: + assign(paste0("var.anom.chunk.period",p), var.anom.chunk.period) + + # save(, file=paste0(workdir,"/",var.name,"_chunk/local_",var.name,"_anomaly_",psl.rean,"_",year.start,"-",year.end,"_chunk_",c,"_period_",p,".RData"), compress=FALSE) + # save not working in this case, use do.call below: + do.call(save, list(paste0("var.anom.chunk.period",p), file=paste0(workdir,"/",var.name,"_chunk/local_",var.name,"_anomaly_",psl.rean.name,"_",year.start,"-",year.end,"_chunk_",c,"_period_",p,".RData"), compress=FALSE)) # you must use this syntax when saving an Rdata with the variable name given by a string!!! + + rm(var.anom.chunk.period,var.serie.chunk.period) + do.call(rm, list(paste0("var.anom.chunk.period",p))) + } + + rm(var.serie.chunk) +} + +save(var.clim, file=paste0(workdir,"/",var.name,"_chunk/local_",var.name,"_climatology_",psl.rean.name,"_",year.start,"-",year.end,".RData"), compress=FALSE) # save var.clim +load(paste0(workdir,"/",var.name,"_chunk/local_",var.name,"_climatology_",psl.rean.name,"_",year.start,"-",year.end,".RData")) + +# do the same as above, but for the wt: +wts <- c(1:10) # weather type numbers inside wt.serie +n.wts<-length(wts) +n.wt <- var.wt.sum <- var.wt.mean.anom <- var.wt.sd.anom <- array(NA, c(n.periods, n.wts, var.n.lat.used, var.n.lon.used)) +#vlat.mat <- vlon.mat <- array(NA, c(var.n.lat.used, var.n.lon.used)) # for the debug + +for(c in 1:chunk$n.chunk){ + #s=1;y=year.start # for the debug + cat(paste0("Computing chunk n. ", c,"/", chunk$n.chunk),'\r') + + if(c == chunk$n.chunk) { + var.serie.chunk <- array(NA, c(var.n.lat.used, chunk$chunk.size.last, n.days.tot)) + wt.serie.chunk <- array(NA, c(var.n.lat.used, chunk$chunk.size.last, n.days.tot)) + n.wt.chunk <- var.wt.sum.chunk <- var.wt.mean.anom.chunk <- var.wt.sd.anom.chunk <- array(NA,c(n.periods, n.wts, var.n.lat.used, chunk$chunk.size.last)) + } else { + var.serie.chunk <- array(NA, c(var.n.lat.used, chunk$chunk.size, n.days.tot)) + wt.serie.chunk <- array(NA, c(var.n.lat.used, chunk$chunk.size, n.days.tot)) + n.wt.chunk <- var.wt.sum.chunk <- var.wt.mean.anom.chunk <- var.wt.sd.anom.chunk <- array(NA,c(n.periods, n.wts, var.n.lat.used, chunk$chunk.size)) + } + + for(y in year.start:year.end){ + load(file=paste0(workdir,"/",var.name,"_chunk/local_",var.name,"_",psl.rean.name,"_year_",y,"_chunk_",c,".RData")) # load var.serie.year.chunk + int.days.year <- seq.days.in.a.future.year(year.start,y) + var.serie.chunk[,,int.days.year] <- var.serie.year.chunk + rm(var.serie.year.chunk) + } + + + for(y in year.start:year.end){ + load(file=paste0(workdir,"/",var.name,"_wt_chunk/local_","10WTs_",psl.rean.name,"_year_",y,"_chunk_",c,".RData")) # load wt.serie.year.chunk + + wt.serie.year.chunk[which(wt.serie.year.chunk==18)]<-10 # rename the A type from number 18 to number 10, to have only the numbers from 1 to 10 + int.days.year <- seq.days.in.a.future.year(year.start,y) + + wt.serie.chunk[,,int.days.year] <- wt.serie.year.chunk #[,chunk$int[[c]],] + rm(wt.serie.year.chunk) + } + + for(p in periods){ + # load var.anom.chunk.period + load(file=paste0(workdir,"/",var.name,"_chunk/local_",var.name,"_anomaly_",psl.rean.name,"_",year.start,"-",year.end,"_chunk_",c,"_period_",p,".RData")) + var.serie.chunk.period <- var.serie.chunk[,,days.period[[p]]] + wt.serie.chunk.period <- wt.serie.chunk[,,days.period[[p]]] + + vlat.pos <- 0 + for (vlat in var.lat.used) { + #vlat<-var.lat.used[1]; vlon<-var.lon.used[1]; wt=1 # for the debug + vlat.pos <- vlat.pos + 1 + #cat('Latitude ',vlat.pos,'/',var.n.lat.used,'\r') + + vlon.pos <- 0 #chunk$int[[c]]-1 + for(vlon in var.lon.used[chunk$int[[c]]]) { + vlon.pos <- vlon.pos + 1 + for(wt in wts){ + pos.wt <- which(wt.serie.chunk.period[vlat.pos,vlon.pos,]==wt) + var.anom.chunk.period <- get(paste0("var.anom.chunk.period",p)) + + n.wt.chunk[p, wt, vlat.pos, vlon.pos] <- length(pos.wt) + var.wt.sum.chunk[p, wt, vlat.pos, vlon.pos] <- sum(var.serie.chunk.period[vlat.pos,vlon.pos,][pos.wt]) # only for wind and prec + var.wt.mean.anom.chunk[p, wt, vlat.pos, vlon.pos] <- mean(var.anom.chunk.period[vlat.pos,vlon.pos,][pos.wt]) + var.wt.sd.anom.chunk[p, wt, vlat.pos, vlon.pos] <- sd(var.anom.chunk.period[vlat.pos,vlon.pos,][pos.wt]) + + rm(pos.wt, var.anom.chunk.period) + } + } + } + + n.wt[p,,,chunk$int[[c]]] <- n.wt.chunk[p,,,] + var.wt.sum[p,,,chunk$int[[c]]] <- var.wt.sum.chunk[p,,,] + var.wt.mean.anom[p,,,chunk$int[[c]]] <- var.wt.mean.anom.chunk[p,,,] + var.wt.sd.anom[p,,,chunk$int[[c]]] <- var.wt.sd.anom.chunk[p,,,] + } # close for on p +} + +save(n.wt, var.wt.sum, var.wt.mean.anom, var.wt.sd.anom.chunk, file=paste0(workdir,"/",var.name,"_chunk/output_",psl.rean.name,"_",year.start,"-",year.end,".RData"), compress=FALSE) +load(file=paste0(workdir,"/",var.name,"_chunk/output_",psl.rean.name,"_",year.start,"-",year.end,".RData")) + +#if(var.name=='prlr'){ # if it is not in m/s (as in ERA-Interim) but it is in kg/m2/s, you must multiply for 86400 to convert to mm/day +# var.clim <- var.clim * 86400 +# var.wt.sum <- var.wt.sum * 86400 +# var.wt.mean.anom <- var.wt.mean.anom * 86400 +# var.wt.sd.anom <- var.wt.sd.anom * 86400 +#} + +var.wt.mean.contrib <- array(NA, c(n.periods, n.wts, var.n.lat.used, var.n.lon.used)) + +var.wt.mean <- var.wt.sum/n.wt # average value of var associated to a WT +#n.wt <- 100*n.wt/n.days.tot # convert to yearly frequency in % +for(p in periods) n.wt[p,,,] <- 100*n.wt[p,,,]/n.days.period[[p]] # convert to yearly frequency in % +for(p in periods) var.wt.mean.contrib[p,,,] <- 100*var.wt.sum[p,,,]/(InsertDim(var.clim[p,,],1,n.wts)*n.days.period[[p]]) # convert the sum to a % contribution + +var.clim.NA <- array(NA, c(n.periods, var.n.lat, var.n.lon.used)) +n.wt.NA <- var.wt.mean.contrib.NA <- var.wt.sum.NA <- var.wt.mean.anom.NA <- var.wt.mean.NA <- var.wt.sd.anom.NA <- array(NA, c(n.periods, n.wts, var.n.lat, var.n.lon.used)) + +var.clim.NA[,pos.var.lat.used,] <- var.clim +n.wt.NA[,,pos.var.lat.used,] <- n.wt +var.wt.mean.NA[,,pos.var.lat.used,] <- var.wt.mean +var.wt.sum.NA[,,pos.var.lat.used,] <- var.wt.sum +var.wt.mean.anom.NA[,,pos.var.lat.used,] <- var.wt.mean.anom +var.wt.mean.contrib.NA[,,pos.var.lat.used,] <- var.wt.mean.contrib +var.wt.sd.anom.NA[,,pos.var.lat.used,] <- var.wt.sd.anom + +# remove the equatorial area from visualization: +n.wt.NA[,,var.pos.lat.unused.eq,]<- var.wt.mean.NA[,,var.pos.lat.unused.eq,] <- var.wt.sum.NA[,,var.pos.lat.unused.eq,] <- var.wt.mean.contrib.NA[,,var.pos.lat.unused.eq,] <- NA +var.wt.mean.anom.NA[,,var.pos.lat.unused.eq,] <- var.wt.sd.anom.NA[,,var.pos.lat.unused.eq,] <- NA + +# move Europe to the center of the maps: +p1 <- ceiling(var.n.lon.used/2)-1 +p2 <- ceiling(var.n.lon.used/2) +p3 <- var.n.lon.used + +var.clim.NA.bis <- var.clim.NA[,,c(p2:p3,1:p1)] +n.wt.NA.bis <- n.wt.NA[,,,c(p2:p3,1:p1)] +var.wt.mean.NA.bis <- var.wt.mean.NA[,,,c(p2:p3,1:p1)] +var.wt.mean.contrib.NA.bis <- var.wt.mean.contrib.NA[,,,c(p2:p3,1:p1)] +var.wt.mean.anom.NA.bis <- var.wt.mean.anom.NA[,,,c(p2:p3,1:p1)] +var.wt.sd.anom.NA.bis <- var.wt.sd.anom.NA[,,,c(p2:p3,1:p1)] +gc() + +# Map intervals and colors: +my.brks <- list() + +#my.brks[[1]] <- c(0,seq(1.5,9,0.5),15) # Wind speed Climatology +my.brks[[1]] <- c(0,seq(1.05,10,0.05),15) # Wind speed Climatology +my.brks[[1]] <- c(-50,-40,-30,seq(-20,30,0.05),40,50) # Surface Temperature Climatology +my.brks[[1]] <- c(0,seq(0.05,10,0.05),15) # Precipitation Climatology + +#my.brks[[2]] <- c(-10,seq(-4.95,5,0.05),10) # Wind Speed Anomaly associated to a WT +my.brks[[2]] <- c(seq(0,10,0.1),100) # Mean wind speed in m/s associated to a WT +my.brks[[3]] <- c(seq(0,20,0.1),100) # Frequency associated to a WT +my.brks[[4]] <- c(seq(0,30,0.1),100) # % Contribution of a WT to total var +my.brks[[5]] <- c(seq(-10,-3,1),seq(-2.3,2.3,0.1),seq(3,10,1)) # % Mean anomaly of a WT for wind speed +my.brks[[5]] <- c(-50,seq(-10,-3,1),seq(-2.9,2.9,0.1),seq(3,10,1),50) # % Mean anomaly of a WT for temperature +my.brks[[5]] <- c(-100000,seq(-100,0,1),seq(1,100,1),100000) # % Mean anomaly of a WT for precipitation + + +my.brks[[6]] <- c(seq(0,3,0.1),10) # Standard deviation of the anomalies pf a WT + +my.cols <- list() +#my.cols[[index]] <- colorRampPalette(my.palette[[index]])(length(my.brks[[index]])-1) +#my.cols[[1]] <- colorRampPalette(as.character(read.csv("/home/Earth/vtorralb/rgbhex.csv",header=F)[,1]))(length(my.brks[[1]])-1) # blue-green-white-yellow-red colors +#my.cols[[1]] <- colorRampPalette(c("white","yellow","orange","red","darkred"))(length(my.brks[[1]])-1) # blue-yellow-red colors +#my.cols[[1]] <- colorRampPalette(c("white","white","green","yellow","orange","red","darkred"))(length(my.brks[[1]])-1) # blue-yellow-red colors +#my.cols[[1]] <- colorRampPalette(c("blue","white","darkred"))(length(my.brks[[1]])-1) # blue-yellow-red colors +#my.cols[[1]] <- colorRampPalette(c("blue","green","yellow","red","brown","violetred4"))(length(my.brks[[1]])-1) # blue-yellow-red colors +#my.cols[[1]] <- colorRampPalette(c("white","cyan2","gold","orange","red","brown","brown4","deeppink4"))(length(my.brks[[1]])-1) # blue-yellow-red colors +my.cols[[1]] <- colorRampPalette(c("white","gray","cyan2","blue","deeppink4"))(length(my.brks[[1]])-1) # white-gray-blue-purple colores for wind speed +my.cols[[1]] <- colorRampPalette(as.character(read.csv("/home/Earth/vtorralb/rgbhex.csv",header=F)[,1]))(length(my.brks[[1]])-1) # blue-yellow-red colors + +#my.cols[[2]] <- colorRampPalette(c("deeppink4","darkblue","blue","white","red","darkred","brown4"))(length(my.brks[[2]])-1) # blue-white-red colors +my.cols[[2]] <- colorRampPalette(as.character(read.csv("/home/Earth/vtorralb/rgbhex.csv",header=F)[,1]))(length(my.brks[[2]])-1) # blue--yellow-red colors +my.cols[[3]] <- colorRampPalette(as.character(read.csv("/home/Earth/vtorralb/rgbhex.csv",header=F)[,1]))(length(my.brks[[3]])-1) # blue-yellow-red colors +#my.cols[[4]] <- colorRampPalette(c("white","cyan2","blue","deeppink4"))(length(my.brks[[4]])-1) # blue-yellow-red colors +my.cols[[4]] <- colorRampPalette(as.character(read.csv("/home/Earth/vtorralb/rgbhex.csv",header=F)[,1]))(length(my.brks[[4]])-1) # blue--yellow-red colors + +my.cols[[5]] <- c(colorRampPalette(c("dodgerblue3","white"))(length(my.brks[[5]])-1), colorRampPalette(c("white","darkorchid4"))(length(my.brks[[5]])-1)) +my.cols[[5]] <- my.cols[[5]][seq(1, length(my.cols[[5]]),2)] +my.cols[[5]] <- colorRampPalette(as.character(read.csv("/home/Earth/vtorralb/rgbhex.csv",header=F)[,1]))(length(my.brks[[5]])-1) # blue--yellow-red colors for temper +my.cols[[5]] <- c(colorRampPalette(c("blue","white"))(length(my.brks[[5]])-1), colorRampPalette(c("white","darkred"))(length(my.brks[[5]])-1)) # blue-white-red for prec +my.cols[[5]] <- my.cols[[5]][seq(1, length(my.cols[[5]]),2)] +#my.cols[[5]] <- colorRampPalette(c("white","blue","purple"))(length(my.brks[[5]])-1) + +my.cols[[6]] <- colorRampPalette(as.character(read.csv("/home/Earth/vtorralb/rgbhex.csv",header=F)[,1]))(length(my.brks[[6]])-1) # blue--yellow-red colors + + + +# Create and save maps: + +index=1 +#p=17 # for the debug + +for(p in periods){ + png(filename=paste0(mapdir,"/",var.name.file,"_Climatology_",period.name[p],".png"),width=1000,height=700) + + layout(matrix(c(rep(1,10),2), 11, 1, byrow = TRUE)) + #layout.show(2) + #PlotEquiMap2(var.clim.NA, var.lon.used, var.lat, filled.continents = FALSE, brks=my.brks[[index]], cols=my.cols[[index]], drawleg=F) + # change longitude in format -180,180 to displey europe in the middle of the map: + #PlotEquiMap(var.clim[p,,], var.lon.used, var.lat.used, filled.continents = FALSE, brks=my.brks[[index]], cols=my.cols[[index]], drawleg=F, colNA="gray") + + # for temperature, you must remove -273 after var.clim.NA.bis[p,,]: + PlotEquiMap(var.clim.NA.bis[p,,], var.lon.used.bis, var.lat, filled.continents = FALSE, brks=my.brks[[index]], cols=my.cols[[index]], drawleg=F, colNA="gray") + ColorBar(my.brks[[index]], cols=my.cols[[index]], vert=FALSE, subsampleg=20, cex=1) + + dev.off() +} + + +index=2 +#p=17 # for the debug +wt=6 + +for(p in periods){ + png(filename=paste0(mapdir,"/Average_10-m_Wind_Speed_WT_", WTs.type10[wt],period.name[p],".png"),width=1000,height=700) + + layout(matrix(c(rep(1,10),2), 11, 1, byrow = TRUE)) + #PlotEquiMap(var.wt.mean.NA[wt,,], var.lon.used, var.lat, filled.continents = FALSE, brks=my.brks[[index]], cols=my.cols[[index]], drawleg=F, colNA="gray") + #PlotEquiMap(var.wt.mean.NA[wt,,100:200], var.lon.used[100:200], var.lat, filled.continents = FALSE, brks=my.brks[[index]], cols=my.cols[[index]], drawleg=F, colNA="gray") + PlotEquiMap(var.wt.mean.NA.bis[p,wt,,], var.lon.used.bis, var.lat, filled.continents = FALSE, brks=my.brks[[index]], cols=my.cols[[index]], drawleg=F, colNA="gray") + #PlotEquiMap(var.wt.mean.NA.bis[wt,,1:50], head(var.lon.used.bis,50), var.lat, filled.continents = FALSE, brks=my.brks[[index]], cols=my.cols[[index]], drawleg=F, colNA="gray") + ColorBar(my.brks[[index]], cols=my.cols[[index]], vert=FALSE, subsampleg=10, cex=1) + + dev.off() +} + + +index=3 +p=17 # for the debug +wt=6 + +png(filename=paste0(mapdir,"/Frequency_WT_", WTs.type10[wt],".png"),width=1000,height=700) + +layout(matrix(c(rep(1,10),2), 11, 1, byrow = TRUE)) +#PlotEquiMap(n.wt.NA[wt,,], var.lon.used, var.lat, filled.continents = FALSE, brks=my.brks[[index]], cols=my.cols[[index]], drawleg=F) +#PlotEquiMap(n.wt.NA[wt,,1:18], tail(var.lon.used,18), var.lat, filled.continents = FALSE, brks=my.brks[[index]], cols=my.cols[[index]], drawleg=F) +#n.wt.NA[wt,50:55,490:512] +#ColorBar(my.brks[[index]], cols=my.cols[[index]], vert=FALSE, subsampleg=5, cex=1) +#n.wt10<-apply(wt.serie.,c(1,2),function(x)length(which(x==18))) # for the debug +#edit(n.wt.NA[10,,]) # for the debug +#layout(matrix(c(rep(1,10),2), 11, 1, byrow = TRUE)) +PlotEquiMap(n.wt.NA.bis[p,wt,,], var.lon.used.bis, var.lat, filled.continents = FALSE, brks=my.brks[[index]], cols=my.cols[[index]], drawleg=F, colNA="gray") +ColorBar(my.brks[[index]], cols=my.cols[[index]], vert=FALSE, subsampleg=5, cex=1) + +dev.off() + + +index=4 +p=17 +wt=6 # for the debug + +for(wt in 1:n.wts){ + png(filename=paste0(mapdir,"/Daily_contribution_of_WT_", WTs.type10[wt],".png"),width=1000,height=700) + + layout(matrix(c(rep(1,10),2), 11, 1, byrow = TRUE)) + PlotEquiMap(var.wt.mean.contrib.NA.bis[p,wt,,], var.lon.used.bis, var.lat, filled.continents = FALSE, brks=my.brks[[index]], cols=my.cols[[index]], + drawleg=F, colNA="gray", toptitle=WTs.type10.name[wt] , sizetit=1) + ColorBar(my.brks[[index]], cols=my.cols[[index]], vert=FALSE, subsampleg=10, cex=1) + + dev.off() +} + + +# Average anomalies associated to a WT: +# WARNING: for temerature, you must remove -273 after var.wt.mean.anom.NA.bis[p,wt,,] to convert to anomalies in degrees +# for precipitation, you must multiply it for 86400 +index=5 +#p=17 # for the debug +#wt=6 # for the debug +slide=FALSE # put TRUE if you wan to print maps for a slide, with bigger titles and legends, false otherwise +europe=TRUE # put TRUE to show only a zoom over Europe; false otherwise +map.only=FALSE # put TRUE if you want to print only the image with no legend or title (for posters) + +domain <- ifelse(europe, "European", "World") + +for(p in periods){ + for(wt in 1:n.wts){ + if(!slide) {n.box <- 10; col.cex=1; my.title <- paste(WTs.type10.name[wt],period.name[p])} else {n.box <- 5; col.cex=3; my.title <- ""} + if(map.only) my.title <- "" + + png(filename=paste0(mapdir,"/",var.name.file,"_Mean_",domain,"_Anomalies_of_WT_", WTs.type10[wt],"_",period.name[p],".png"),width=1000,height=700) + + layout(matrix(c(rep(1,n.box),2), n.box+1, 1, byrow = TRUE)) + if(!europe) PlotEquiMap(var.wt.mean.anom.NA.bis[p,wt,,], var.lon.used.bis, var.lat, filled.continents = FALSE, brks=my.brks[[index]], + cols=my.cols[[index]], drawleg=F, colNA="gray", toptitle=my.title, sizetit=1.5) + if(europe) PlotEquiMap(var.wt.mean.anom.NA.bis[p,wt,25:90,220:310], var.lon.used.bis[220:310], var.lat[25:90], filled.continents = FALSE, brks=my.brks[[index]], + cols=my.cols[[index]], drawleg=F, colNA="gray", toptitle=my.title, sizetit=1.5) + ColorBar(my.brks[[index]], cols=my.cols[[index]], vert=FALSE, subsampleg=2, cex=col.cex) + if(slide && map.only) title(paste0(WTs.type10.name[wt],period.name[p]),cex.main=3.5,outer=T) + + dev.off() + } + +} + + + + +index=6 +wt=6 # for the debug +slide=FALSE # put TRUE if you wan to print maps for a slide, with bigger titles and legends, false otherwise +europe=FALSE # put TRUE to show only a zoom over Europe; false otherwise + +if(!slide) {n.box <- 10; col.cex=1; my.title <- WTs.type10.name[wt]} else {n.box <- 5; col.cex=3; my.title <- ""} +domain <- ifelse(europe, "European", "World") + +for(wt in 1:n.wts){ + png(filename=paste0(mapdir,"/StDev_",domain,"_Anomalies_of_WT_", WTs.type10[wt],".png"),width=1000,height=700) + + layout(matrix(c(rep(1,n.box),2), n.box+1, 1, byrow = TRUE)) + + if(!europe) PlotEquiMap(var.wt.sd.anom.NA.bis[p,wt,,], var.lon.used.bis, var.lat, filled.continents = FALSE, brks=my.brks[[index]], + cols=my.cols[[index]], drawleg=F, colNA="gray", toptitle=my.title, sizetit=1.5) + if(europe) PlotEquiMap(var.wt.sd.anom.NA.bis[p,wt,25:90,220:310], var.lon.used.bis[220:310], var.lat[25:90], filled.continents = FALSE, brks=my.brks[[index]], + cols=my.cols[[index]], drawleg=F, colNA="gray", toptitle=my.title, sizetit=1.5) + ColorBar(my.brks[[index]], cols=my.cols[[index]], vert=FALSE, subsampleg=2, cex=col.cex) + if(slide) title(WTs.type10.name[wt],cex.main=3.5,outer=T) + + dev.off() +} + +#WTs.type<-c("NE","E","SE","S","SW","W","NW","N","C","C.NE","C.E","C.SE","C.S","C.SW","C.W","C.NW","C.N","A","A.NE","A.E","A.SE","A.S","A.SW","A.W","A.NW","A.N") + +save.image(file=paste0(workdir,"/Weather_types",var.name,"_",psl.rean.name,"_",year.start,"-",year.end,".RData"), compress=FALSE) diff --git a/old/WT_drivers_v5.R b/old/WT_drivers_v5.R new file mode 100644 index 0000000000000000000000000000000000000000..ea2d1ca7e13456635c3e1327adc73af9b7c4e628 --- /dev/null +++ b/old/WT_drivers_v5.R @@ -0,0 +1,864 @@ +########################################################################################## +# Relationship between WTs and a chosen climate variable # +########################################################################################## + +library(s2dverification) # for the function Load() + +source('/home/Earth/ncortesi/scripts/Rfunctions.R') # for the calendar functions + +# directory where to find the WT classification +workdir <- "/scratch/Earth/ncortesi/RESILIENCE/WT" + +# reanalysis used for daily var data (must be the same as the mslp data): +var.rean <- list(path = '/esnas/recon/ecmwf/erainterim/$STORE_FREQ$_mean/$VAR_NAME$_f6h/$VAR_NAME$_$YEAR$$MONTH$.nc') + + # any daily variable: +var.name='tas' #'prlr' #'tas' #'sfcWind' +var.name.file='Temperature' #'Precipitation' #'Surface_Temperature' # '10-m_Wind_Speed' # variable name used in the ouput map's filename + +periods=1:17 # identify monthly (from 1=Jan to 12=Dec) , seasonal (from 13=winter to 16=Autumn) and yearly=17 values + +LOESS <- TRUE # climatology filter on/off (if off, daily climatology is computed instead and used to measure daily anomalies) + +index=1 # choose an index to plot. 1: temperature, 2: precipitation, 3: wind speed + +########################################################################################## +n.periods <- length(periods) + +WTs.type <- c("NE","E","SE","S","SW","W","NW","N","C","C.NE","C.E","C.SE","C.S","C.SW","C.W","C.NW","C.N","A","A.NE","A.E","A.SE","A.S","A.SW","A.W","A.NW","A.N") +WTs.type10<-c("NE","E","SE","S","SW","W","NW","N","C","A") +WTs.type10.name<-c("Northeasterly (NE)","Easterly (E)","Southeasterly (SE)","Southerly (S)","Southwesterly (SW)","Westerly (W)","Northwesterly (NW)", + "Northerly (N)","Cyclonic (C)","Anticyclonic (A)") +#year.tot <- year.end - year.start + 1 + +# load WT metadata to get the info on year.start and year.end +#WTs_files <- list.files(paste0(workdir,"/txt/ERAint/all_years")) +#WTs_file1 <- read.table(file=paste0(workdir,"/txt/ERAint/all_years/",WTs_files[1]), header=TRUE) +load(paste0(workdir,"/txt/ERAint/metadata.RData")) + +#year.start <- min(WTs_file1$Year) +#year.end <- max(WTs_file1$Year) + +# load daily var data for all years: +var <- Load(var.name, NULL, list(var.rean), paste0(year.start:year.end,'0101'), storefreq = 'daily', leadtimemax=366, output = 'lonlat', nprocs=5) + +var.lat <- var$lat # var lat and lon MUST be the same of WT classification, even if the latter can have NA for certain lat values. +var.lon <- var$lon + +n.lat.var <- length(var.lat) +n.lon.var <- length(var.lon) + +# remove bisestile days from var to have all arrays with the same dimensions: +cat("Removing bisestile days. Please wait...\n") +var365 <- var$obs[,,,1:365,,,drop=FALSE] + +for(y in year.start:year.end){ + y2 <- y - year.start + 1 + if(n.days.in.a.year(y) == 366) var365[,,y2,60:365,,] <- var$obs[,,y2,61:366,,] # take the march to december period removing the 29th of February +} + +rm(var) +gc() + + +# Create and save monthly/seasonal/yearly climatology maps: + +index <- 1 # 1: temperature 2: precipitation 3: wind speed +#p=17 # for the debug + +# Map intervals and colors: +my.brks <- list() +my.brks[[1]] <- c(-50,-40,-30,seq(-20,30,0.05),40,50) # Surface Temperature Climatology +my.brks[[2]] <- c(0,seq(0.05,10,0.05),15) # Precipitation Climatology +my.brks[[3]] <- c(0,seq(1.05,10,0.05),15) # Wind speed Climatology + +my.cols <- list() +my.cols[[1]] <- colorRampPalette(as.character(read.csv("/home/Earth/vtorralb/rgbhex.csv",header=F)[,1]))(length(my.brks[[1]])-1) # blue-yellow-red colors +my.cols[[2]] <- colorRampPalette(c("white","gray","cyan2","blue","deeppink4"))(length(my.brks[[1]])-1) # white-gray-blue-purple colores for wind speed + +mod.index <- 0 +if(index == 1) mod.index <- -273.5 # for temperature, you must remove -273 after var.clim.NA.bis[p,,]: + +for(p in periods){ + # Select only days of the chosen month/season: + varPeriod <- var365[,,,pos.period(1,p),,,drop=FALSE] + varPeriodMean <- apply(varPeriod,c(1,2,5,6),mean,na.rm=TRUE) + + n.lonr <- n.lon.var - ceiling(length(var.lon[var.lon>0 & var.lon < 180])) # count the number of longitude values between 180 and 360 degrees longitud + lon.swapped <- c((n.lonr+1):n.lon.var,1:n.lonr) # lon is always positive but rearranged + var.lon2 <- c(var.lon[c((n.lonr+1):n.lon.var)]-360,var.lon[1:n.lonr]) # to have lon values from -180 to 180 + + png(filename=paste0(workdir,"/",var.name.file,"_Climatology_",period.name[p],".png"),width=1000,height=700) + + layout(matrix(c(rep(1,10),2), 11, 1, byrow = TRUE)) + + #PlotEquiMap(varPeriodMean[1,1,,]+mod.index, var.lon, var.lat, filled.continents = FALSE, brks=my.brks[[index]], cols=my.cols[[index]], drawleg=F, colNA="gray") + PlotEquiMap(varPeriodMean[1,1,,lon.swapped]+mod.index, var.lon2, var.lat, filled.continents = FALSE, brks=my.brks[[index]], cols=my.cols[[index]], drawleg=F) + ColorBar(my.brks[[index]], cols=my.cols[[index]], vert=FALSE, subsampleg=20, cex=1) + + dev.off() +} + + +# convert var to daily anomalies: +cat("Calculating anomalies. Please wait...\n") + +var365ClimDaily <- apply(var365, c(1,2,4,5,6), mean, na.rm=T) + +if(LOESS == TRUE){ + var365ClimLoess <- var365ClimDaily + + for(i in 1:n.lat.var){ + for(j in 1:n.lon.var){ + my.data <- data.frame(ens.mean=var365ClimDaily[1,1,,i,j], day=1:365) + my.loess <- loess(ens.mean ~ day, my.data, span=0.35) + var365ClimLoess[1,1,,i,j] <- predict(my.loess) + } + } + + rm(var365ClimDaily, my.data, my.loess) + gc() + + var365Clim <- InsertDim(var365ClimLoess, 3, n.years) + + rm(var365ClimLoess) + gc() + +} else { + var365Clim <- InsertDim(var365ClimDaily, 3, n.years) + + rm(var365ClimDaily) + gc() +} + + +var365Anom <- var365 - var365Clim + +rm(var365Clim) +gc() + + + + + +# Impact of a WT on var (average of var only during the days belonging to a particular WT): + +# load lat and lon from SLP grid: +load(paste0(workdir,"/txt/",mslp.rean.name,"/metadata.RData")) # load lat.used and lon.used + +# only for compatibility with older versions (it should be already loaded): +lat <- round(MSLP$lat,3) +lon <- round(MSLP$lon,3) + +# load daily WT classification for all years and grid points and with the same format of var365Anom: +load(paste0(workdir,"/txt/",mslp.rean.name,"/WTs.RData")) # load lat.used and lon.used + +n.WTs <- 10 # length(unique(WTs)) +wt.codes <- unique(WTs[1,1,,]) + +for(p in periods){ + + # select var data only inside period p: + varPeriod <- var365Anom[,pos.period(1,p),,] + + # select WT data only inside period p: + WTsPeriod <- WTs[,pos.period(1,p),,] + + #WTs.type + + for(wt in 1:n.WTs){ + + wt.code <- wt.codes[wt] # i.e: for 10 WTs, their codes are. 1 2 3 4 5 6 7 8 9 18 + + # remove from varPeriod the days not belonging to that wt (setting its value to NA): + WTsPeriod.wt <- WTsPeriod + ss <- which(WTsPeriod == wt.code) + pp <- which(WTPeriod != wt.code) + + WTsPeriod.wt[ss] <- 1 # set to 1 the days belonging to that wt + WTsPeriod.wt[pp] <- NA # set to NA the days not belonging to that wt + + varPeriod.wt <- varPeriod * WTsPeriod.wt + + var.mean.wt <- apply(varPeriod.wt, c(3,4), mean, na.rm=TRUE) + + # visualize impact map of the wt on var: + png(filename=paste0(workdir,"/",var.name.file,"/impact/",var.name,"_"period.name[p],".png"),width=1000,height=700) + + PlotEquiMap(var.mean.wt, var.lon, var.lat, filled.continents = FALSE, brks=my.brks[[index]], cols=my.cols[[index]], drawleg=F, colNA="gray") + + + + } +} + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +# Impact of a wind direction on var (average of var only during the days belonging to a particular wind direction): +year.start <- 1985 +year.end <- 2014 + +uas <- Load(var = 'uas', exp = NULL, obs = list(var.rean), paste0(year.start:year.end,'0101'), storefreq = 'daily', leadtimemax=366, output = 'lonlat', nprocs=8) +vas <- Load(var = 'vas', exp = NULL, obs = list(var.rean), paste0(year.start:year.end,'0101'), storefreq = 'daily', leadtimemax=366, output = 'lonlat', nprocs=8) + +windir <- 90 - ((180/pi)*atan2(uas,vas) + 180) # wind direction in degrees; 90 is necessary to shift from trigonometric system to cardinal system and +180 to shift + # from the direction wind is blowing to the direction wind is coming from +rm(uas, vas) + +dir1 <- which(windir < # N +dir2 # NE +... + +var[dir1] <- 1 # N +var[dir2] <- 2 # NE +var[dir3] <- 3 # E +var[dir4] <- 4 # SE +var[dir5] <- 5 # S +var[dir6] <- 6 # SW +var[dir7] <- 7 # W +var[dir8] <- 8 # NW + + +for(p in periods){ + + # select var data only inside period p: + varPeriod <- var365Anom[,pos.period(1,p),,] + + # select wind direction data only inside period p: + windirPeriod <- windir[,pos.period(1,p),,] + + for(wd in 1:8){ + + # remove from varPeriod the days not belonging to that wd (setting its value to NA): + WTsPeriod.wt <- WTsPeriod + ss <- which(WTsPeriod == wt.code) + pp <- which(WTPeriod != wt.code) + + WTsPeriod.wt[ss] <- 1 # set to 1 the days belonging to that wt + WTsPeriod.wt[pp] <- NA # set to NA the days not belonging to that wt + + varPeriod.wt <- varPeriod * WTsPeriod.wt + + var.mean.wt <- apply(varPeriod.wt, c(3,4), mean, na.rm=TRUE) + + # visualize impact map of the wt on var: + png(filename=paste0(workdir,"/",var.name.file,"/impact/",var.name,"_"period.name[p],".png"),width=1000,height=700) + + PlotEquiMap(var.mean.wt, var.lon, var.lat, filled.continents = FALSE, brks=my.brks[[index]], cols=my.cols[[index]], drawleg=F, colNA="gray") + + + + } +} + + + + + + + + + + + + + + + + + + + + +# Load just 1 day of var data to detect the number of latitude and longitude points; +#var <- Load(var.name, NULL, var.rean, paste0(year.start,'0101'), storefreq = 'daily', leadtimemax = 1, output = 'lonlat') +var <- Load(var = var.name, exp = NULL, obs = list(var.rean), paste0(year.start,'0101'), storefreq = 'daily', leadtimemax = 1, output = 'lonlat') +var.lat <- round(var$lat,3) +var.lon <- round(var$lon,3) +var.n.lat <- length(var.lat) # number of latitude values of var +var.n.lon <- length(var.lon) +var.lon.pos <- ifelse(min(var.lon>=0), TRUE, FALSE) # set lon.pos to TRUE if the longitude values of the reanalysis grid has no negative values, FALSE if not. + +### Load just 1 day of MSLP data to detect the number of latitude and longitude points of the WT classifications +### we must exlude points > +80 degrees lat and < -80 deg. because the Lamb grid was created excluding these points: +###MSLP <- Load('psl', NULL, psl.rean, paste0(year.start,'0101'), storefreq = 'daily', leadtimemax = 1, output = 'lonlat', configfile = Load.path) +#MSLP <- Load(var = 'psl', exp = NULL, obs = list(psl.rean), paste0(year.start,'0101'), storefreq = 'daily', leadtimemax = 1, output = 'lonlat') + +#psl.n.lat <- length(MSLP$lat) # number of latitude values or MSLP +#psl.n.lon <- length(MSLP$lon) # number of longitude values of MSLP +#psl.lon.pos <- ifelse(min(MSLP$lon>=0), TRUE, FALSE) # set lon.pos to TRUE if the longitude values of the reanalysis grid has no negative values, FALSE if not. + +#psl.n.lat.unused.poles <- 17 # number of discarded latitude values near each of the two poles (it depends on the spatial resolutions of the MSLP reanalysis) +#psl.n.lat.unused.equat <- 20 # number of unused latitude values near each side of the equator (must exclude 10 degrees N/S for side, so it depends on the spat.res. of the reanalysis) +#psl.pos.lat.eq.north <- tail(which(MSLP$lat >= 0),1) # note than the eventual point at lat=0 is always included +#psl.pos.lat.eq.south <- head(which(MSLP$lat < 0),1) # note than the eventual point at lat=0 is always excluded +#psl.pos.lat.unused.eq.north <- (psl.pos.lat.eq.north - psl.n.lat.unused.poles + 1):psl.pos.lat.eq.north +#psl.pos.lat.unused.eq.south <- psl.pos.lat.eq.south:(psl.pos.lat.eq.south + psl.n.lat.unused.equat - 1) + +# final latitude values used as central points: +#psl.lat.used <- MSLP$lat[-c(1:psl.n.lat.unused.poles,(psl.n.lat-psl.n.lat.unused.poles):psl.n.lat, psl.pos.lat.unused.eq.north, psl.pos.lat.unused.eq.south)] +#psl.lat.used <- round(MSLP$lat[-c(1:psl.n.lat.unused.poles,(psl.n.lat-psl.n.lat.unused.poles):psl.n.lat)],3) # latitude values used as central points +#psl.lon.used <- round(MSLP$lon,3) # longitude values used as central points + +#psl.n.lat.used <- length(psl.lat.used) +#psl.n.lon.used <- length(psl.lon.used) +#psl.n.grid.points <- psl.n.lat.used * psl.n.lon.used + +#if(var.lon.pos && !psl.lon.pos) {ss <- which(psl.lon.used<0); psl.lon.used[ss] <- psl.lon.used[ss] + 360} # convert the negative long of MSLP to the [0, +360] range +#if(!var.lon.pos && psl.lon.pos) {ss <- which(psl.lon.used>180); psl.lon.used[ss] <- psl.lon.used[ss] - 360} # convert the positive long of MSLP > 180 to the [-180, +180] range + +# exlude var points > +~80 degrees lat and < -80 deg. because the Lamb grid was created excluding these points: +var.n.lat.unused.poles <- 17 # number of discarded latitude values near each of the two poles (it depends on the spatial resolutions of the var reanalysis) +var.n.lat.unused.equat <- 17 # numb. of unused latitude values near each side of the equator (must exclude 10 degrees N/S for side, so it depends on the spat.res. of the reanalysis) + +var.pos.lat.eq.north <- tail(which(var$lat >= 0),1) # note than the eventual point at lat=0 is always included +var.pos.lat.eq.south <- head(which(var$lat < 0),1) # note than the eventual point at lat=0 is always excluded +var.pos.lat.unused.eq.north <- (var.pos.lat.eq.north - var.n.lat.unused.equat+1):var.pos.lat.eq.north +var.pos.lat.unused.eq.south <- var.pos.lat.eq.south:(var.pos.lat.eq.south + var.n.lat.unused.equat - 1) +var.pos.lat.unused.eq <- c(var.pos.lat.unused.eq.north, var.pos.lat.unused.eq.south) + +#var.lat.used <- var.lat[-c(1:var.n.lat.unused.poles,(var.n.lat-var.n.lat.unused.poles):var.n.lat, var.pos.lat.unused.eq.north, var.pos.lat.unused.eq.south)] # latitude values used as central points + +var.lat.used <- var.lat[-c(1:var.n.lat.unused.poles,(var.n.lat-var.n.lat.unused.poles):var.n.lat)] +var.lon.used <- var.lon # longitude values used as central points +var.n.lat.used <- length(var.lat.used) +var.n.lon.used <- length(var.lon.used) +var.n.grid.points <- var.n.lat.used * var.n.lon.used +#var.lon.used.bis <- c(var.lon.used[c(257:512)]-360,var.lon.used[c(1:256)]) # to put Europe in the middle of the map +var.lon.used.bis <- c(var.lon.used[c(ceiling(var.n.lon.used/2):var.n.lon.used)]-360,var.lon.used[c(1:(ceiling(var.n.lon.used/2)-1))]) # to put Europe in the middle of the map + +#pos.var.lat.unused <- which(is.na( match(var.lat,var.lat.used))) # position inside var.lat of the latitude values not used (those not in var.lat.used) +pos.var.lat.used <- match(var.lat.used, var.lat) # =which(!is.na( match(var.lat,var.lat.used))) # position inside var.lat of the latitude values used (those in var.lat.used) +#var.lat[pos.var.lat.unused] + + +### Map of the central points for Europe: +#void <- array(NA,c(var.n.lat.used, var.n.lon.used)) +###void.bis <- void[,c(257:512,1:256)] # when using the var dataset with res 256x512 +#void.bis <- void[,c(ceiling(var.n.lon.used/2):var.n.lon.used,1:(ceiling(var.n.lon.used/2)-1))] +###PlotEquiMap(void.bis, var.lon.used.bis, var.lat.used, filled.continents = FALSE, brks=seq(0,360,1), intxlon=5, intylat=5, drawleg=F ) # for the debug +###PlotEquiMap(void.bis[1:215,], var.lon.used.bis, var.lat[1:215], filled.continents = FALSE, brks=seq(0,360,1), intxlon=5, intylat=5, drawleg=F ) # for the debug +#par(oma=c(1,1,1,1)) +###PlotEquiMap(void.bis, var.lon.used.bis, var.lat.used, filled.continents = FALSE, brks=seq(0,360,1), intxlon=5, intylat=5, drawleg=F ) # for the debug +#PlotEquiMap(void.bis[25:90, 220:317], var.lon.used.bis[220:317], var.lat[25:90], filled.continents = FALSE, brks=seq(0,360,1), intxlon=5, intylat=5, drawleg=F ) # for the debug +#my.parcelas<-data.frame(name="", lat=rep(var.lat.used,var.n.lon.used), long=rep(var.lon.used.bis, each=var.n.lat.used), pop=0, capital=0, stringsAsFactors=F) +#map.cities(my.parcelas, pch=3, cex=.5,col=c("gray40")) # add the 4 points of the 4 parcelas +#write.table(cbind(lat=rep(var.lat.used,var.n.lon.used), lon=rep(var.lon.used, each=var.n.lat.used)),file=paste0(workdir,"/list_lat_lon.txt"),row.names=FALSE, col.names=TRUE, quote=FALSE, sep=" ") + +# for each var grid point used, find the lat/lon position of the nearby point center of the WT classification: +closer.psl.lat <- closer.psl.lon <- array(NA, c(var.n.lat.used, var.n.lon.used)) # 2 matrices with the lat and lon position of the nearest MSLP grid point + +i<-0;vlat.pos <- 0 +for (vlat in var.lat.used) { + vlat.pos<-vlat.pos+1 + + vlon.pos<-0 + for(vlon in var.lon.used) { + i<-i+1 + cat(paste0("Point #",i,"/", var.n.grid.points), '\r') + vlon.pos<-vlon.pos+1 + + closer.psl.pos <- nearest(vlat, vlon, psl.lat.used, psl.lon.used) + closer.psl.lat[vlat.pos, vlon.pos] <- psl.lat.used[closer.psl.pos[1]] + closer.psl.lon[vlat.pos, vlon.pos] <- psl.lon.used[closer.psl.pos[2]] + + #print(paste0("vlat=",vlat, " vlat.pos=",vlat.pos, " vlon=",vlon, " vlon.pos=",vlon.pos, " closer.psl.lat.pos=", closer.psl.pos[1], " closer.psl.lon.pos=",closer.psl.pos[2], " closer.psl.lat=", psl.lat.used[closer.psl.pos[1]], " closer.psl.lon=", psl.lon.used[closer.psl.pos[2]] )) # for the debug + + } +} + +save(closer.psl.lat, closer.psl.lon, file=paste0(workdir,"/closer_psl_",var.name,".RData")) # save it if it is the first time +load(file=paste0(workdir,"/closer_psl_",var.name,".RData")) # load it if already saved + +#PlotEquiMap(closer.psl.lon, var.lon.used, var.lat.used, filled.continents = FALSE, brks=seq(0,360,1) ) # for the debug +#PlotEquiMap(closer.psl.lat, var.lon.used, var.lat.used, filled.continents = FALSE, brks=seq(-90,90,1) ) # for the debug + +# for each var grid point used, load 1 year of daily var and wt data at time, and assign it to the closer mslp central point: +# (you can't open all data at once because it weights too much, but you can open 1 year at time) +chunk <- split.array(dimensions=c(n.days.in.a.yearly.period(year.start,year.end), var.n.lat.used, var.n.lon.used), along=3) +save(chunk, file=paste0(workdir,"/chunk_",var.name,".RData")) # save the chunk to retrieve it later or you run the next loop for 1 year only ('year.end' changes, so 'chunk' changes too) +load(paste0(workdir,"/chunk_",var.name,".RData")) + +i=0 +for(y in year.start:year.end){ + #y<-1981 # for the debug + i<-i+1 + print(paste0("Year #",i,"/", year.tot)) + + n.days <- n.days.in.a.year(y) + if(partial.end==TRUE && y==year.end) n.days<-n.days.last # the last year can have a lower number of days + + var <- Load(var.name, NULL, list(var.rean), paste0(y,'0101'), storefreq = 'daily', leadtimemax = n.days, output = 'lonlat') # load var data for the year y only + #PlotEquiMap(var$obs[1, 1, 1, 1, , ], var.lon, var.lat, filled.continents = FALSE) + + # open the ff binary files with all the WT classification for that year: + if(file.exists(paste0(workdir,"/",paste0(y,"_RData/WTs1_",y,".RData")))) ffload(file=paste0(workdir,"/",paste0(y,"_RData/WTs1_",y)), overwrite=TRUE) + if(file.exists(paste0(workdir,"/",paste0(y,"_RData/WTs2_",y,".RData")))) ffload(file=paste0(workdir,"/",paste0(y,"_RData/WTs2_",y)), overwrite=TRUE) + if(file.exists(paste0(workdir,"/",paste0(y,"_RData/WTs3_",y,".RData")))) ffload(file=paste0(workdir,"/",paste0(y,"_RData/WTs3_",y)), overwrite=TRUE) + if(file.exists(paste0(workdir,"/",paste0(y,"_RData/WTs4_",y,".RData")))) ffload(file=paste0(workdir,"/",paste0(y,"_RData/WTs4_",y)), overwrite=TRUE) + if(file.exists(paste0(workdir,"/",paste0(y,"_RData/WTs5_",y,".RData")))) ffload(file=paste0(workdir,"/",paste0(y,"_RData/WTs5_",y)), overwrite=TRUE) + if(file.exists(paste0(workdir,"/",paste0(y,"_RData/WTs6_",y,".RData")))) ffload(file=paste0(workdir,"/",paste0(y,"_RData/WTs6_",y)), overwrite=TRUE) + if(file.exists(paste0(workdir,"/",paste0(y,"_RData/WTs7_",y,".RData")))) ffload(file=paste0(workdir,"/",paste0(y,"_RData/WTs7_",y)), overwrite=TRUE) + if(file.exists(paste0(workdir,"/",paste0(y,"_RData/WTs8_",y,".RData")))) ffload(file=paste0(workdir,"/",paste0(y,"_RData/WTs8_",y)), overwrite=TRUE) + if(file.exists(paste0(workdir,"/",paste0(y,"_RData/WTs9_",y,".RData")))) ffload(file=paste0(workdir,"/",paste0(y,"_RData/WTs9_",y)), overwrite=TRUE) + if(file.exists(paste0(workdir,"/",paste0(y,"_RData/WTs10_",y,".RData")))) ffload(file=paste0(workdir,"/",paste0(y,"_RData/WTs10_",y)), overwrite=TRUE) + + var.serie <- wt.serie <- array(NA,c(var.n.lat.used, var.n.lon.used, n.days)) # arrays where to store the var and wt daily data of year y for all points + + j<-0 + vlat.pos <- 0 + for (vlat in var.lat.used) { + #vlat<-var.lat.used[1]; vlon<-var.lon.used[1] # for the debug + j<-j+1 + vlat.pos <- vlat.pos+1 + cat('Latitude ',j,'/',var.n.lat.used,'\r') + + vlon.pos <- 0 + for(vlon in var.lon.used) { + vlon.pos <- vlon.pos+1 + + var.serie[vlat.pos, vlon.pos,] <- var$obs[1,1,1,,which(var.lat==vlat),which(var.lon==vlon)] + + #WT <- read.table(file=paste0(workdir,"/","10WTs_",psl.rean,"_",year.start,"-",year.end,"_lat_",closer.psl.lat[vlat.pos,vlon.pos],"_lon_",closer.psl.lon[vlat.pos,vlon.pos],".txt"), header=TRUE, sep=" ", stringsAsFactors=FALSE, row.names=NULL) + + # load WTs classifications (variable WTs) for that year created with WT_vX.R: + #load(paste0(workdir,"/",y,"_Rdata/","10WTs_",psl.rean,"_year_",y,"_lat_",closer.psl.lat[vlat.pos,vlon.pos],"_lon_",closer.psl.lon[vlat.pos,vlon.pos],".RData")) + open(eval(parse(text=paste0("WTs_lat_", closer.psl.lat[vlat.pos,vlon.pos],"_lon_", closer.psl.lon[vlat.pos,vlon.pos])))) + #open(eval(parse(text=gsub("-","m",paste0("WTs_lat_", closer.psl.lat[vlat.pos,vlon.pos],"_lon_", closer.psl.lon[vlat.pos,vlon.pos]))))) + + + # interval of days belonging to the year y, but starting to count from the year year.begin: + #seq.days.year <- n.days.in.a.yearly.period(year.start,y) - n.days.in.a.yearly.period(year.start,year.start) + 1:n.days.in.a.year(y) + WTs <- as.ram(eval(parse(text=paste0("WTs_lat_", closer.psl.lat[vlat.pos,vlon.pos],"_lon_", closer.psl.lon[vlat.pos,vlon.pos])))) + wt.serie[vlat.pos, vlon.pos,] <- WTs #[seq.days.year] #,4] # extract only the days of the year y + close(eval(parse(text=paste0("WTs_lat_", closer.psl.lat[vlat.pos,vlon.pos],"_lon_", closer.psl.lon[vlat.pos,vlon.pos])))) + rm(WTs) + } + + } + + #PlotEquiMap(var.serie[,,1], var.lon, var.lat.used, filled.continents = FALSE) # for the debug + + #save(var.serie, file=paste0(workdir,"/local_",var.name,"_",psl.rean,"_year_",y,".RData"), compress=FALSE) + #save(wt.serie, file=paste0(workdir,"/local_","10WTs_",psl.rean,"_year_",y,".RData"), compress=FALSE) + + #year=4000 + #ffload(file=paste0(workdir,"/WTs_",year)) + #latc <- 70 + #lonc <- 0 + #open(eval(parse(text=paste0("WTs_lat_",latc,"_lon_", lonc)))) # do.call doesn't work with open, must + #close(eval(parse(text=paste0("WTs_lat_",latc,"_lon_", lonc)))) # do.call doesn't work with open, must use eval(parse(text=...))) + + + # instead of saving only 1 file for all the spatial domain, save 1 file for each chunk c: (to be able to load them faster in the following step) + if(!dir.exists(paste0(workdir,"/",var.name,"_chunk"))) dir.create(paste0(workdir,"/",var.name,"_chunk")) + if(!dir.exists(paste0(workdir,"/wt_chunk"))) dir.create(paste0(workdir,"/wt_chunk")) + + for(c in 1:chunk$n.chunk){ + var.serie.year.chunk<-var.serie[,chunk$int[[c]],] # format: [lat, lon, day] + save(var.serie.year.chunk, file=paste0(workdir,"/",var.name,"_chunk/local_",var.name,"_",psl.rean.name,"_year_",y,"_chunk_",c,".RData"), compress=FALSE) + + wt.serie.year.chunk <- wt.serie[,chunk$int[[c]],] + save(wt.serie.year.chunk, file=paste0(workdir,"/",var.name,"_wt_chunk/local_","10WTs_",psl.rean.name,"_year_",y,"_chunk_",c,".RData"), compress=FALSE) + # n.wt10<-apply(wt.serie.year.chunk,c(1,2),function(x)length(which(x==18))) # for the debug + # edit(n.wt10) # for the debug + } + +} # close for on y + +#rm(closer.psl.lat, closer.psl.lon, var) + + +# compute the climatology of var for each period and its daily anomalies for each chunk and period: +var.clim <- array(NA, c(n.periods, var.n.lat.used, var.n.lon.used)) + +n.days.tot <- n.days.in.a.yearly.period(year.start,year.end) +#n.days.tot <- 365 # for the debug + +days.period <- n.days.period <- list() +for(p in periods){ + days.period[[p]] <- NA + for(y in year.start:year.end) days.period[[p]] <- c(days.period[[p]], n.days.in.a.future.year(year.start, y) + pos.period(y,p)) + + days.period[[p]] <- days.period[[p]][-1] # remove the NA at the begin that was introduced only to be able to execture the above command + n.days.period[[p]] <- length(days.period[[p]]) +} + +for(c in 1:chunk$n.chunk){ + #c=1 # for the debug + cat(paste0("Computing chunk n. ", c,"/", chunk$n.chunk),'\r') + + if(c == chunk$n.chunk) { + var.serie.chunk <- array(NA, c(var.n.lat.used, chunk$chunk.size.last, n.days.tot)) + } else { + var.serie.chunk <- array(NA, c(var.n.lat.used, chunk$chunk.size, n.days.tot)) + } + + for(y in year.start:year.end){ + load(file=paste0(workdir,"/",var.name,"_chunk/local_",var.name,"_",psl.rean.name,"_year_",y,"_chunk_",c,".RData")) # load var.serie.year.chunk + int.days.year <- seq.days.in.a.future.year(year.start,y) + + var.serie.chunk[,,int.days.year] <- var.serie.year.chunk + rm(var.serie.year.chunk) + } + + for(p in periods){ + var.serie.chunk.period <- var.serie.chunk[,,days.period[[p]]] # select only the days in the chosen period + + var.clim.chunk <- apply(var.serie.chunk.period,c(1,2),mean, na.rm=T) + + var.anom.chunk.period <- var.serie.chunk.period - InsertDim(var.clim.chunk, 3, n.days.period[[p]]) + var.clim[p,,chunk$int[[c]]] <- var.clim.chunk + + # save the var anomaly, one file for each chunk and period: + assign(paste0("var.anom.chunk.period",p), var.anom.chunk.period) + + # save(, file=paste0(workdir,"/",var.name,"_chunk/local_",var.name,"_anomaly_",psl.rean,"_",year.start,"-",year.end,"_chunk_",c,"_period_",p,".RData"), compress=FALSE) + # save not working in this case, use do.call below: + do.call(save, list(paste0("var.anom.chunk.period",p), file=paste0(workdir,"/",var.name,"_chunk/local_",var.name,"_anomaly_",psl.rean.name,"_",year.start,"-",year.end,"_chunk_",c,"_period_",p,".RData"), compress=FALSE)) # you must use this syntax when saving an Rdata with the variable name given by a string!!! + + rm(var.anom.chunk.period,var.serie.chunk.period) + do.call(rm, list(paste0("var.anom.chunk.period",p))) + } + + rm(var.serie.chunk) +} + +save(var.clim, file=paste0(workdir,"/",var.name,"_chunk/local_",var.name,"_climatology_",psl.rean.name,"_",year.start,"-",year.end,".RData"), compress=FALSE) # save var.clim +load(paste0(workdir,"/",var.name,"_chunk/local_",var.name,"_climatology_",psl.rean.name,"_",year.start,"-",year.end,".RData")) + +# do the same as above, but for the wt: +wts <- c(1:10) # weather type numbers inside wt.serie +n.wts<-length(wts) +n.wt <- var.wt.sum <- var.wt.mean.anom <- var.wt.sd.anom <- array(NA, c(n.periods, n.wts, var.n.lat.used, var.n.lon.used)) +#vlat.mat <- vlon.mat <- array(NA, c(var.n.lat.used, var.n.lon.used)) # for the debug + +for(c in 1:chunk$n.chunk){ + #s=1;y=year.start # for the debug + cat(paste0("Computing chunk n. ", c,"/", chunk$n.chunk),'\r') + + if(c == chunk$n.chunk) { + var.serie.chunk <- array(NA, c(var.n.lat.used, chunk$chunk.size.last, n.days.tot)) + wt.serie.chunk <- array(NA, c(var.n.lat.used, chunk$chunk.size.last, n.days.tot)) + n.wt.chunk <- var.wt.sum.chunk <- var.wt.mean.anom.chunk <- var.wt.sd.anom.chunk <- array(NA,c(n.periods, n.wts, var.n.lat.used, chunk$chunk.size.last)) + } else { + var.serie.chunk <- array(NA, c(var.n.lat.used, chunk$chunk.size, n.days.tot)) + wt.serie.chunk <- array(NA, c(var.n.lat.used, chunk$chunk.size, n.days.tot)) + n.wt.chunk <- var.wt.sum.chunk <- var.wt.mean.anom.chunk <- var.wt.sd.anom.chunk <- array(NA,c(n.periods, n.wts, var.n.lat.used, chunk$chunk.size)) + } + + for(y in year.start:year.end){ + load(file=paste0(workdir,"/",var.name,"_chunk/local_",var.name,"_",psl.rean.name,"_year_",y,"_chunk_",c,".RData")) # load var.serie.year.chunk + int.days.year <- seq.days.in.a.future.year(year.start,y) + var.serie.chunk[,,int.days.year] <- var.serie.year.chunk + rm(var.serie.year.chunk) + } + + + for(y in year.start:year.end){ + load(file=paste0(workdir,"/",var.name,"_wt_chunk/local_","10WTs_",psl.rean.name,"_year_",y,"_chunk_",c,".RData")) # load wt.serie.year.chunk + + wt.serie.year.chunk[which(wt.serie.year.chunk==18)]<-10 # rename the A type from number 18 to number 10, to have only the numbers from 1 to 10 + int.days.year <- seq.days.in.a.future.year(year.start,y) + + wt.serie.chunk[,,int.days.year] <- wt.serie.year.chunk #[,chunk$int[[c]],] + rm(wt.serie.year.chunk) + } + + for(p in periods){ + # load var.anom.chunk.period + load(file=paste0(workdir,"/",var.name,"_chunk/local_",var.name,"_anomaly_",psl.rean.name,"_",year.start,"-",year.end,"_chunk_",c,"_period_",p,".RData")) + var.serie.chunk.period <- var.serie.chunk[,,days.period[[p]]] + wt.serie.chunk.period <- wt.serie.chunk[,,days.period[[p]]] + + vlat.pos <- 0 + for (vlat in var.lat.used) { + #vlat<-var.lat.used[1]; vlon<-var.lon.used[1]; wt=1 # for the debug + vlat.pos <- vlat.pos + 1 + #cat('Latitude ',vlat.pos,'/',var.n.lat.used,'\r') + + vlon.pos <- 0 #chunk$int[[c]]-1 + for(vlon in var.lon.used[chunk$int[[c]]]) { + vlon.pos <- vlon.pos + 1 + for(wt in wts){ + pos.wt <- which(wt.serie.chunk.period[vlat.pos,vlon.pos,]==wt) + var.anom.chunk.period <- get(paste0("var.anom.chunk.period",p)) + + n.wt.chunk[p, wt, vlat.pos, vlon.pos] <- length(pos.wt) + var.wt.sum.chunk[p, wt, vlat.pos, vlon.pos] <- sum(var.serie.chunk.period[vlat.pos,vlon.pos,][pos.wt]) # only for wind and prec + var.wt.mean.anom.chunk[p, wt, vlat.pos, vlon.pos] <- mean(var.anom.chunk.period[vlat.pos,vlon.pos,][pos.wt]) + var.wt.sd.anom.chunk[p, wt, vlat.pos, vlon.pos] <- sd(var.anom.chunk.period[vlat.pos,vlon.pos,][pos.wt]) + + rm(pos.wt, var.anom.chunk.period) + } + } + } + + n.wt[p,,,chunk$int[[c]]] <- n.wt.chunk[p,,,] + var.wt.sum[p,,,chunk$int[[c]]] <- var.wt.sum.chunk[p,,,] + var.wt.mean.anom[p,,,chunk$int[[c]]] <- var.wt.mean.anom.chunk[p,,,] + var.wt.sd.anom[p,,,chunk$int[[c]]] <- var.wt.sd.anom.chunk[p,,,] + } # close for on p +} + +save(n.wt, var.wt.sum, var.wt.mean.anom, var.wt.sd.anom.chunk, file=paste0(workdir,"/",var.name,"_chunk/output_",psl.rean.name,"_",year.start,"-",year.end,".RData"), compress=FALSE) +load(file=paste0(workdir,"/",var.name,"_chunk/output_",psl.rean.name,"_",year.start,"-",year.end,".RData")) + +#if(var.name=='prlr'){ # if it is not in m/s (as in ERA-Interim) but it is in kg/m2/s, you must multiply for 86400 to convert to mm/day +# var.clim <- var.clim * 86400 +# var.wt.sum <- var.wt.sum * 86400 +# var.wt.mean.anom <- var.wt.mean.anom * 86400 +# var.wt.sd.anom <- var.wt.sd.anom * 86400 +#} + +var.wt.mean.contrib <- array(NA, c(n.periods, n.wts, var.n.lat.used, var.n.lon.used)) + +var.wt.mean <- var.wt.sum/n.wt # average value of var associated to a WT +#n.wt <- 100*n.wt/n.days.tot # convert to yearly frequency in % +for(p in periods) n.wt[p,,,] <- 100*n.wt[p,,,]/n.days.period[[p]] # convert to yearly frequency in % +for(p in periods) var.wt.mean.contrib[p,,,] <- 100*var.wt.sum[p,,,]/(InsertDim(var.clim[p,,],1,n.wts)*n.days.period[[p]]) # convert the sum to a % contribution + +var.clim.NA <- array(NA, c(n.periods, var.n.lat, var.n.lon.used)) +n.wt.NA <- var.wt.mean.contrib.NA <- var.wt.sum.NA <- var.wt.mean.anom.NA <- var.wt.mean.NA <- var.wt.sd.anom.NA <- array(NA, c(n.periods, n.wts, var.n.lat, var.n.lon.used)) + +var.clim.NA[,pos.var.lat.used,] <- var.clim +n.wt.NA[,,pos.var.lat.used,] <- n.wt +var.wt.mean.NA[,,pos.var.lat.used,] <- var.wt.mean +var.wt.sum.NA[,,pos.var.lat.used,] <- var.wt.sum +var.wt.mean.anom.NA[,,pos.var.lat.used,] <- var.wt.mean.anom +var.wt.mean.contrib.NA[,,pos.var.lat.used,] <- var.wt.mean.contrib +var.wt.sd.anom.NA[,,pos.var.lat.used,] <- var.wt.sd.anom + +# remove the equatorial area from visualization: +n.wt.NA[,,var.pos.lat.unused.eq,]<- var.wt.mean.NA[,,var.pos.lat.unused.eq,] <- var.wt.sum.NA[,,var.pos.lat.unused.eq,] <- var.wt.mean.contrib.NA[,,var.pos.lat.unused.eq,] <- NA +var.wt.mean.anom.NA[,,var.pos.lat.unused.eq,] <- var.wt.sd.anom.NA[,,var.pos.lat.unused.eq,] <- NA + +# move Europe to the center of the maps: +p1 <- ceiling(var.n.lon.used/2)-1 +p2 <- ceiling(var.n.lon.used/2) +p3 <- var.n.lon.used + +var.clim.NA.bis <- var.clim.NA[,,c(p2:p3,1:p1)] +n.wt.NA.bis <- n.wt.NA[,,,c(p2:p3,1:p1)] +var.wt.mean.NA.bis <- var.wt.mean.NA[,,,c(p2:p3,1:p1)] +var.wt.mean.contrib.NA.bis <- var.wt.mean.contrib.NA[,,,c(p2:p3,1:p1)] +var.wt.mean.anom.NA.bis <- var.wt.mean.anom.NA[,,,c(p2:p3,1:p1)] +var.wt.sd.anom.NA.bis <- var.wt.sd.anom.NA[,,,c(p2:p3,1:p1)] +gc() + +# Map intervals and colors: +my.brks <- list() + +#my.brks[[1]] <- c(0,seq(1.5,9,0.5),15) # Wind speed Climatology +my.brks[[1]] <- c(0,seq(1.05,10,0.05),15) # Wind speed Climatology +my.brks[[1]] <- c(-50,-40,-30,seq(-20,30,0.05),40,50) # Surface Temperature Climatology +my.brks[[1]] <- c(0,seq(0.05,10,0.05),15) # Precipitation Climatology + +#my.brks[[2]] <- c(-10,seq(-4.95,5,0.05),10) # Wind Speed Anomaly associated to a WT +my.brks[[2]] <- c(seq(0,10,0.1),100) # Mean wind speed in m/s associated to a WT +my.brks[[3]] <- c(seq(0,20,0.1),100) # Frequency associated to a WT +my.brks[[4]] <- c(seq(0,30,0.1),100) # % Contribution of a WT to total var +my.brks[[5]] <- c(seq(-10,-3,1),seq(-2.3,2.3,0.1),seq(3,10,1)) # % Mean anomaly of a WT for wind speed +my.brks[[5]] <- c(-50,seq(-10,-3,1),seq(-2.9,2.9,0.1),seq(3,10,1),50) # % Mean anomaly of a WT for temperature +my.brks[[5]] <- c(-100000,seq(-100,0,1),seq(1,100,1),100000) # % Mean anomaly of a WT for precipitation + + +my.brks[[6]] <- c(seq(0,3,0.1),10) # Standard deviation of the anomalies pf a WT + +my.cols <- list() +#my.cols[[index]] <- colorRampPalette(my.palette[[index]])(length(my.brks[[index]])-1) +#my.cols[[1]] <- colorRampPalette(as.character(read.csv("/home/Earth/vtorralb/rgbhex.csv",header=F)[,1]))(length(my.brks[[1]])-1) # blue-green-white-yellow-red colors +#my.cols[[1]] <- colorRampPalette(c("white","yellow","orange","red","darkred"))(length(my.brks[[1]])-1) # blue-yellow-red colors +#my.cols[[1]] <- colorRampPalette(c("white","white","green","yellow","orange","red","darkred"))(length(my.brks[[1]])-1) # blue-yellow-red colors +#my.cols[[1]] <- colorRampPalette(c("blue","white","darkred"))(length(my.brks[[1]])-1) # blue-yellow-red colors +#my.cols[[1]] <- colorRampPalette(c("blue","green","yellow","red","brown","violetred4"))(length(my.brks[[1]])-1) # blue-yellow-red colors +#my.cols[[1]] <- colorRampPalette(c("white","cyan2","gold","orange","red","brown","brown4","deeppink4"))(length(my.brks[[1]])-1) # blue-yellow-red colors +my.cols[[1]] <- colorRampPalette(c("white","gray","cyan2","blue","deeppink4"))(length(my.brks[[1]])-1) # white-gray-blue-purple colores for wind speed +my.cols[[1]] <- colorRampPalette(as.character(read.csv("/home/Earth/vtorralb/rgbhex.csv",header=F)[,1]))(length(my.brks[[1]])-1) # blue-yellow-red colors + +#my.cols[[2]] <- colorRampPalette(c("deeppink4","darkblue","blue","white","red","darkred","brown4"))(length(my.brks[[2]])-1) # blue-white-red colors +my.cols[[2]] <- colorRampPalette(as.character(read.csv("/home/Earth/vtorralb/rgbhex.csv",header=F)[,1]))(length(my.brks[[2]])-1) # blue--yellow-red colors +my.cols[[3]] <- colorRampPalette(as.character(read.csv("/home/Earth/vtorralb/rgbhex.csv",header=F)[,1]))(length(my.brks[[3]])-1) # blue-yellow-red colors +#my.cols[[4]] <- colorRampPalette(c("white","cyan2","blue","deeppink4"))(length(my.brks[[4]])-1) # blue-yellow-red colors +my.cols[[4]] <- colorRampPalette(as.character(read.csv("/home/Earth/vtorralb/rgbhex.csv",header=F)[,1]))(length(my.brks[[4]])-1) # blue--yellow-red colors + +my.cols[[5]] <- c(colorRampPalette(c("dodgerblue3","white"))(length(my.brks[[5]])-1), colorRampPalette(c("white","darkorchid4"))(length(my.brks[[5]])-1)) +my.cols[[5]] <- my.cols[[5]][seq(1, length(my.cols[[5]]),2)] +my.cols[[5]] <- colorRampPalette(as.character(read.csv("/home/Earth/vtorralb/rgbhex.csv",header=F)[,1]))(length(my.brks[[5]])-1) # blue--yellow-red colors for temper +my.cols[[5]] <- c(colorRampPalette(c("blue","white"))(length(my.brks[[5]])-1), colorRampPalette(c("white","darkred"))(length(my.brks[[5]])-1)) # blue-white-red for prec +my.cols[[5]] <- my.cols[[5]][seq(1, length(my.cols[[5]]),2)] +#my.cols[[5]] <- colorRampPalette(c("white","blue","purple"))(length(my.brks[[5]])-1) + +my.cols[[6]] <- colorRampPalette(as.character(read.csv("/home/Earth/vtorralb/rgbhex.csv",header=F)[,1]))(length(my.brks[[6]])-1) # blue--yellow-red colors + + + +# Create and save maps: + +index <- 1 +#p=17 # for the debug + +for(p in periods){ + png(filename=paste0(mapdir,"/",var.name.file,"_Climatology_",period.name[p],".png"),width=1000,height=700) + + layout(matrix(c(rep(1,10),2), 11, 1, byrow = TRUE)) + #layout.show(2) + #PlotEquiMap2(var.clim.NA, var.lon.used, var.lat, filled.continents = FALSE, brks=my.brks[[index]], cols=my.cols[[index]], drawleg=F) + # change longitude in format -180,180 to displey europe in the middle of the map: + #PlotEquiMap(var.clim[p,,], var.lon.used, var.lat.used, filled.continents = FALSE, brks=my.brks[[index]], cols=my.cols[[index]], drawleg=F, colNA="gray") + + # for temperature, you must remove -273 after var.clim.NA.bis[p,,]: + PlotEquiMap(var.clim.NA.bis[p,,], var.lon.used.bis, var.lat, filled.continents = FALSE, brks=my.brks[[index]], cols=my.cols[[index]], drawleg=F, colNA="gray") + ColorBar(my.brks[[index]], cols=my.cols[[index]], vert=FALSE, subsampleg=20, cex=1) + + dev.off() +} + + +index=2 +#p=17 # for the debug +wt=6 + +for(p in periods){ + png(filename=paste0(mapdir,"/Average_10-m_Wind_Speed_WT_", WTs.type10[wt],period.name[p],".png"),width=1000,height=700) + + layout(matrix(c(rep(1,10),2), 11, 1, byrow = TRUE)) + #PlotEquiMap(var.wt.mean.NA[wt,,], var.lon.used, var.lat, filled.continents = FALSE, brks=my.brks[[index]], cols=my.cols[[index]], drawleg=F, colNA="gray") + #PlotEquiMap(var.wt.mean.NA[wt,,100:200], var.lon.used[100:200], var.lat, filled.continents = FALSE, brks=my.brks[[index]], cols=my.cols[[index]], drawleg=F, colNA="gray") + PlotEquiMap(var.wt.mean.NA.bis[p,wt,,], var.lon.used.bis, var.lat, filled.continents = FALSE, brks=my.brks[[index]], cols=my.cols[[index]], drawleg=F, colNA="gray") + #PlotEquiMap(var.wt.mean.NA.bis[wt,,1:50], head(var.lon.used.bis,50), var.lat, filled.continents = FALSE, brks=my.brks[[index]], cols=my.cols[[index]], drawleg=F, colNA="gray") + ColorBar(my.brks[[index]], cols=my.cols[[index]], vert=FALSE, subsampleg=10, cex=1) + + dev.off() +} + + +index=3 +p=17 # for the debug +wt=6 + +png(filename=paste0(mapdir,"/Frequency_WT_", WTs.type10[wt],".png"),width=1000,height=700) + +layout(matrix(c(rep(1,10),2), 11, 1, byrow = TRUE)) +#PlotEquiMap(n.wt.NA[wt,,], var.lon.used, var.lat, filled.continents = FALSE, brks=my.brks[[index]], cols=my.cols[[index]], drawleg=F) +#PlotEquiMap(n.wt.NA[wt,,1:18], tail(var.lon.used,18), var.lat, filled.continents = FALSE, brks=my.brks[[index]], cols=my.cols[[index]], drawleg=F) +#n.wt.NA[wt,50:55,490:512] +#ColorBar(my.brks[[index]], cols=my.cols[[index]], vert=FALSE, subsampleg=5, cex=1) +#n.wt10<-apply(wt.serie.,c(1,2),function(x)length(which(x==18))) # for the debug +#edit(n.wt.NA[10,,]) # for the debug +#layout(matrix(c(rep(1,10),2), 11, 1, byrow = TRUE)) +PlotEquiMap(n.wt.NA.bis[p,wt,,], var.lon.used.bis, var.lat, filled.continents = FALSE, brks=my.brks[[index]], cols=my.cols[[index]], drawleg=F, colNA="gray") +ColorBar(my.brks[[index]], cols=my.cols[[index]], vert=FALSE, subsampleg=5, cex=1) + +dev.off() + + +index=4 +p=17 +wt=6 # for the debug + +for(wt in 1:n.wts){ + png(filename=paste0(mapdir,"/Daily_contribution_of_WT_", WTs.type10[wt],".png"),width=1000,height=700) + + layout(matrix(c(rep(1,10),2), 11, 1, byrow = TRUE)) + PlotEquiMap(var.wt.mean.contrib.NA.bis[p,wt,,], var.lon.used.bis, var.lat, filled.continents = FALSE, brks=my.brks[[index]], cols=my.cols[[index]], + drawleg=F, colNA="gray", toptitle=WTs.type10.name[wt] , sizetit=1) + ColorBar(my.brks[[index]], cols=my.cols[[index]], vert=FALSE, subsampleg=10, cex=1) + + dev.off() +} + + +# Average anomalies associated to a WT: +# WARNING: for temerature, you must remove -273 after var.wt.mean.anom.NA.bis[p,wt,,] to convert to anomalies in degrees +# for precipitation, you must multiply it for 86400 +index=5 +#p=17 # for the debug +#wt=6 # for the debug +slide=FALSE # put TRUE if you wan to print maps for a slide, with bigger titles and legends, false otherwise +europe=TRUE # put TRUE to show only a zoom over Europe; false otherwise +map.only=FALSE # put TRUE if you want to print only the image with no legend or title (for posters) + +domain <- ifelse(europe, "European", "World") + +for(p in periods){ + for(wt in 1:n.wts){ + if(!slide) {n.box <- 10; col.cex=1; my.title <- paste(WTs.type10.name[wt],period.name[p])} else {n.box <- 5; col.cex=3; my.title <- ""} + if(map.only) my.title <- "" + + png(filename=paste0(mapdir,"/",var.name.file,"_Mean_",domain,"_Anomalies_of_WT_", WTs.type10[wt],"_",period.name[p],".png"),width=1000,height=700) + + layout(matrix(c(rep(1,n.box),2), n.box+1, 1, byrow = TRUE)) + if(!europe) PlotEquiMap(var.wt.mean.anom.NA.bis[p,wt,,], var.lon.used.bis, var.lat, filled.continents = FALSE, brks=my.brks[[index]], + cols=my.cols[[index]], drawleg=F, colNA="gray", toptitle=my.title, sizetit=1.5) + if(europe) PlotEquiMap(var.wt.mean.anom.NA.bis[p,wt,25:90,220:310], var.lon.used.bis[220:310], var.lat[25:90], filled.continents = FALSE, brks=my.brks[[index]], + cols=my.cols[[index]], drawleg=F, colNA="gray", toptitle=my.title, sizetit=1.5) + ColorBar(my.brks[[index]], cols=my.cols[[index]], vert=FALSE, subsampleg=2, cex=col.cex) + if(slide && map.only) title(paste0(WTs.type10.name[wt],period.name[p]),cex.main=3.5,outer=T) + + dev.off() + } + +} + + + + +index=6 +wt=6 # for the debug +slide=FALSE # put TRUE if you wan to print maps for a slide, with bigger titles and legends, false otherwise +europe=FALSE # put TRUE to show only a zoom over Europe; false otherwise + +if(!slide) {n.box <- 10; col.cex=1; my.title <- WTs.type10.name[wt]} else {n.box <- 5; col.cex=3; my.title <- ""} +domain <- ifelse(europe, "European", "World") + +for(wt in 1:n.wts){ + png(filename=paste0(mapdir,"/StDev_",domain,"_Anomalies_of_WT_", WTs.type10[wt],".png"),width=1000,height=700) + + layout(matrix(c(rep(1,n.box),2), n.box+1, 1, byrow = TRUE)) + + if(!europe) PlotEquiMap(var.wt.sd.anom.NA.bis[p,wt,,], var.lon.used.bis, var.lat, filled.continents = FALSE, brks=my.brks[[index]], + cols=my.cols[[index]], drawleg=F, colNA="gray", toptitle=my.title, sizetit=1.5) + if(europe) PlotEquiMap(var.wt.sd.anom.NA.bis[p,wt,25:90,220:310], var.lon.used.bis[220:310], var.lat[25:90], filled.continents = FALSE, brks=my.brks[[index]], + cols=my.cols[[index]], drawleg=F, colNA="gray", toptitle=my.title, sizetit=1.5) + ColorBar(my.brks[[index]], cols=my.cols[[index]], vert=FALSE, subsampleg=2, cex=col.cex) + if(slide) title(WTs.type10.name[wt],cex.main=3.5,outer=T) + + dev.off() +} + +#WTs.type<-c("NE","E","SE","S","SW","W","NW","N","C","C.NE","C.E","C.SE","C.S","C.SW","C.W","C.NW","C.N","A","A.NE","A.E","A.SE","A.S","A.SW","A.W","A.NW","A.N") + +save.image(file=paste0(workdir,"/Weather_types",var.name,"_",psl.rean.name,"_",year.start,"-",year.end,".RData"), compress=FALSE) diff --git a/old/WT_drivers_v5.R~ b/old/WT_drivers_v5.R~ new file mode 100644 index 0000000000000000000000000000000000000000..c1f8a55bc18b5b00bd38754786e49932d8500915 --- /dev/null +++ b/old/WT_drivers_v5.R~ @@ -0,0 +1,863 @@ +########################################################################################## +# Relationship between WTs and a chosen climate variable # +########################################################################################## + +library(s2dverification) # for the function Load() + +source('/home/Earth/ncortesi/scripts/Rfunctions.R') # for the calendar functions + +# directory where to find the WT classification +workdir <- "/scratch/Earth/ncortesi/RESILIENCE/WT" + +# reanalysis used for daily var data (must be the same as the mslp data): +var.rean <- list(path = '/esnas/recon/ecmwf/erainterim/$STORE_FREQ$_mean/$VAR_NAME$_f6h/$VAR_NAME$_$YEAR$$MONTH$.nc') + + # any daily variable: +var.name='tas' #'prlr' #'tas' #'sfcWind' +var.name.file='Temperature' #'Precipitation' #'Surface_Temperature' # '10-m_Wind_Speed' # variable name used in the ouput map's filename + +periods=1:17 # identify monthly (from 1=Jan to 12=Dec) , seasonal (from 13=winter to 16=Autumn) and yearly=17 values + +LOESS <- TRUE # climatology filter on/off (if off, daily climatology is computed instead and used to measure daily anomalies) + +index=1 # choose an index to plot. 1: temperature, 2: precipitation, 3: wind speed + +########################################################################################## + +WTs.type <- c("NE","E","SE","S","SW","W","NW","N","C","C.NE","C.E","C.SE","C.S","C.SW","C.W","C.NW","C.N","A","A.NE","A.E","A.SE","A.S","A.SW","A.W","A.NW","A.N") +WTs.type10<-c("NE","E","SE","S","SW","W","NW","N","C","A") +WTs.type10.name<-c("Northeasterly (NE)","Easterly (E)","Southeasterly (SE)","Southerly (S)","Southwesterly (SW)","Westerly (W)","Northwesterly (NW)", + "Northerly (N)","Cyclonic (C)","Anticyclonic (A)") + +#year.tot <- year.end - year.start + 1 +n.periods <- length(periods) + +# load WT metadata to get the info on year.start and year.end +#WTs_files <- list.files(paste0(workdir,"/txt/ERAint/all_years")) +#WTs_file1 <- read.table(file=paste0(workdir,"/txt/ERAint/all_years/",WTs_files[1]), header=TRUE) +load(paste0(workdir,"/txt/ERAint/metadata.RData")) + +#year.start <- min(WTs_file1$Year) +#year.end <- max(WTs_file1$Year) + +# load daily var data for all years: +var <- Load(var.name, NULL, list(var.rean), paste0(year.start:year.end,'0101'), storefreq = 'daily', leadtimemax=366, output = 'lonlat', nprocs=5) + +var.lat <- var$lat # var lat and lon MUST be the same of WT classification, even if the latter can have NA for certain lat values. +var.lon <- var$lon + +n.lat.var <- length(var.lat) +n.lon.var <- length(var.lon) + + +# remove bisestile days from var to have all arrays with the same dimensions: +cat("Removing bisestile days. Please wait...\n") +var365 <- var$obs[,,,1:365,,,drop=FALSE] + +for(y in year.start:year.end){ + y2 <- y - year.start + 1 + if(n.days.in.a.year(y) == 366) var365[,,y2,60:365,,] <- var$obs[,,y2,61:366,,] # take the march to december period removing the 29th of February +} + +rm(var) +gc() + + +# Create and save monthly/seasonal/yearly climatology maps: + +index <- 1 # 1: temperature 2: precipitation 3: wind speed +#p=17 # for the debug + +# Map intervals and colors: +my.brks <- list() +my.brks[[1]] <- c(-50,-40,-30,seq(-20,30,0.05),40,50) # Surface Temperature Climatology +my.brks[[2]] <- c(0,seq(0.05,10,0.05),15) # Precipitation Climatology +my.brks[[3]] <- c(0,seq(1.05,10,0.05),15) # Wind speed Climatology + +my.cols <- list() +my.cols[[1]] <- colorRampPalette(as.character(read.csv("/home/Earth/vtorralb/rgbhex.csv",header=F)[,1]))(length(my.brks[[1]])-1) # blue-yellow-red colors +my.cols[[2]] <- colorRampPalette(c("white","gray","cyan2","blue","deeppink4"))(length(my.brks[[1]])-1) # white-gray-blue-purple colores for wind speed + +mod.index <- 0 +if(index == 1) mod.index <- -273.5 # for temperature, you must remove -273 after var.clim.NA.bis[p,,]: + +for(p in periods){ + # Select only days of the chosen month/season: + varPeriod <- var365[,,,pos.period(1,p),,,drop=FALSE] + varPeriodMean <- apply(varPeriod,c(1,2,5,6),mean,na.rm=TRUE) + + n.lonr <- n.lon.var - ceiling(length(var.lon[var.lon>0 & var.lon < 180])) # count the number of longitude values between 180 and 360 degrees longitud + lon.swapped <- c((n.lonr+1):n.lon.var,1:n.lonr) # lon is always positive but rearranged + var.lon2 <- c(var.lon[c((n.lonr+1):n.lon.var)]-360,var.lon[1:n.lonr]) # to have lon values from -180 to 180 + + png(filename=paste0(workdir,"/",var.name.file,"_Climatology_",period.name[p],".png"),width=1000,height=700) + + layout(matrix(c(rep(1,10),2), 11, 1, byrow = TRUE)) + + #PlotEquiMap(varPeriodMean[1,1,,]+mod.index, var.lon, var.lat, filled.continents = FALSE, brks=my.brks[[index]], cols=my.cols[[index]], drawleg=F, colNA="gray") + PlotEquiMap(varPeriodMean[1,1,,lon.swapped]+mod.index, var.lon2, var.lat, filled.continents = FALSE, brks=my.brks[[index]], cols=my.cols[[index]], drawleg=F) + ColorBar(my.brks[[index]], cols=my.cols[[index]], vert=FALSE, subsampleg=20, cex=1) + + dev.off() +} + + +# convert var to daily anomalies: +cat("Calculating anomalies. Please wait...\n") + +var365ClimDaily <- apply(var365, c(1,2,4,5,6), mean, na.rm=T) + +if(LOESS == TRUE){ + var365ClimLoess <- var365ClimDaily + + for(i in 1:n.lat.var){ + for(j in 1:n.lon.var){ + my.data <- data.frame(ens.mean=var365ClimDaily[1,1,,i,j], day=1:365) + my.loess <- loess(ens.mean ~ day, my.data, span=0.35) + var365ClimLoess[1,1,,i,j] <- predict(my.loess) + } + } + + rm(var365ClimDaily, my.data, my.loess) + gc() + + var365Clim <- InsertDim(var365ClimLoess, 3, n.years) + + rm(var365ClimLoess) + gc() + +} else { + var365Clim <- InsertDim(var365ClimDaily, 3, n.years) + + rm(var365ClimDaily) + gc() +} + + +var365Anom <- var365 - var365Clim + +rm(var365Clim) +gc() + + + + + +# Impact of a WT on var (average of var only during the days belonging to a particular WT): + +# load lat and lon from SLP grid: +load(paste0(workdir,"/txt/",mslp.rean.name,"/metadata.RData")) # load lat.used and lon.used + +# only for compatibility with older versions (it should be already loaded): +lat <- round(MSLP$lat,3) +lon <- round(MSLP$lon,3) + +# load daily WT classification for all years and grid points and with the same format of var365Anom: +load(paste0(workdir,"/txt/",mslp.rean.name,"/WTs.RData")) # load lat.used and lon.used + +n.WTs <- 10 # length(unique(WTs)) +wt.codes <- unique(WTs[1,1,,]) + +for(p in periods){ + + # select var data only inside period p: + varPeriod <- var365Anom[,pos.period(1,p),,] + + # select WT data only inside period p: + WTsPeriod <- WTs[,pos.period(1,p),,] + + #WTs.type + + for(wt in 1:n.WTs){ + + wt.code <- wt.codes[wt] # i.e: for 10 WTs, their codes are. 1 2 3 4 5 6 7 8 9 18 + + # remove from varPeriod the days not belonging to that wt (setting its value to NA): + WTsPeriod.wt <- WTsPeriod + ss <- which(WTsPeriod == wt.code) + pp <- which(WTPeriod != wt.code) + + WTsPeriod.wt[ss] <- 1 # set to 1 the days belonging to that wt + WTsPeriod.wt[pp] <- NA # set to NA the days not belonging to that wt + + varPeriod.wt <- varPeriod * WTsPeriod.wt + + var.mean.wt <- apply(varPeriod.wt, c(3,4), mean, na.rm=TRUE) + + # visualize impact map of the wt on var: + png(filename=paste0(workdir,"/",var.name.file,"/impact/",var.name,"_"period.name[p],".png"),width=1000,height=700) + + PlotEquiMap(var.mean.wt, var.lon, var.lat, filled.continents = FALSE, brks=my.brks[[index]], cols=my.cols[[index]], drawleg=F, colNA="gray") + + + + } +} + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +# Impact of a wind direction on var (average of var only during the days belonging to a particular wind direction): +uas <- Load(var = 'uas', exp = NULL, obs = list(var.rean), paste0(year.start:year.end,'0101'), storefreq = 'daily', leadtimemax=366, output = 'lonlat', nprocs=5) +vas <- Load(var = 'vas', exp = NULL, obs = list(var.rean), paste0(year.start:year.end,'0101'), storefreq = 'daily', leadtimemax=366, output = 'lonlat', nprocs=5) + +windir <- 90 - ((180/pi)*atan2(uas,vas) + 180) # wind direction in degrees; 90 is necessary to shift from trigonometric system to cardinal system and +180 to shift + # from the direction wind is blowing to the direction wind is coming from +rm(uas, vas) + +dir1 <- which(windir < # N +dir2 # NE +... + +var[dir1] <- 1 # N +var[dir2] <- 2 # NE +var[dir3] <- 3 # E +var[dir4] <- 4 # SE +var[dir5] <- 5 # S +var[dir6] <- 6 # SW +var[dir7] <- 7 # W +var[dir8] <- 8 # NW + + +for(p in periods){ + + # select var data only inside period p: + varPeriod <- var365Anom[,pos.period(1,p),,] + + # select wind direction data only inside period p: + windirPeriod <- windir[,pos.period(1,p),,] + + for(wd in 1:8){ + + # remove from varPeriod the days not belonging to that wd (setting its value to NA): + WTsPeriod.wt <- WTsPeriod + ss <- which(WTsPeriod == wt.code) + pp <- which(WTPeriod != wt.code) + + WTsPeriod.wt[ss] <- 1 # set to 1 the days belonging to that wt + WTsPeriod.wt[pp] <- NA # set to NA the days not belonging to that wt + + varPeriod.wt <- varPeriod * WTsPeriod.wt + + var.mean.wt <- apply(varPeriod.wt, c(3,4), mean, na.rm=TRUE) + + # visualize impact map of the wt on var: + png(filename=paste0(workdir,"/",var.name.file,"/impact/",var.name,"_"period.name[p],".png"),width=1000,height=700) + + PlotEquiMap(var.mean.wt, var.lon, var.lat, filled.continents = FALSE, brks=my.brks[[index]], cols=my.cols[[index]], drawleg=F, colNA="gray") + + + + } +} + + + + + + + + + + + + + + + + + + + + +# Load just 1 day of var data to detect the number of latitude and longitude points; +#var <- Load(var.name, NULL, var.rean, paste0(year.start,'0101'), storefreq = 'daily', leadtimemax = 1, output = 'lonlat') +var <- Load(var = var.name, exp = NULL, obs = list(var.rean), paste0(year.start,'0101'), storefreq = 'daily', leadtimemax = 1, output = 'lonlat') +var.lat <- round(var$lat,3) +var.lon <- round(var$lon,3) +var.n.lat <- length(var.lat) # number of latitude values of var +var.n.lon <- length(var.lon) +var.lon.pos <- ifelse(min(var.lon>=0), TRUE, FALSE) # set lon.pos to TRUE if the longitude values of the reanalysis grid has no negative values, FALSE if not. + +### Load just 1 day of MSLP data to detect the number of latitude and longitude points of the WT classifications +### we must exlude points > +80 degrees lat and < -80 deg. because the Lamb grid was created excluding these points: +###MSLP <- Load('psl', NULL, psl.rean, paste0(year.start,'0101'), storefreq = 'daily', leadtimemax = 1, output = 'lonlat', configfile = Load.path) +#MSLP <- Load(var = 'psl', exp = NULL, obs = list(psl.rean), paste0(year.start,'0101'), storefreq = 'daily', leadtimemax = 1, output = 'lonlat') + +#psl.n.lat <- length(MSLP$lat) # number of latitude values or MSLP +#psl.n.lon <- length(MSLP$lon) # number of longitude values of MSLP +#psl.lon.pos <- ifelse(min(MSLP$lon>=0), TRUE, FALSE) # set lon.pos to TRUE if the longitude values of the reanalysis grid has no negative values, FALSE if not. + +#psl.n.lat.unused.poles <- 17 # number of discarded latitude values near each of the two poles (it depends on the spatial resolutions of the MSLP reanalysis) +#psl.n.lat.unused.equat <- 20 # number of unused latitude values near each side of the equator (must exclude 10 degrees N/S for side, so it depends on the spat.res. of the reanalysis) +#psl.pos.lat.eq.north <- tail(which(MSLP$lat >= 0),1) # note than the eventual point at lat=0 is always included +#psl.pos.lat.eq.south <- head(which(MSLP$lat < 0),1) # note than the eventual point at lat=0 is always excluded +#psl.pos.lat.unused.eq.north <- (psl.pos.lat.eq.north - psl.n.lat.unused.poles + 1):psl.pos.lat.eq.north +#psl.pos.lat.unused.eq.south <- psl.pos.lat.eq.south:(psl.pos.lat.eq.south + psl.n.lat.unused.equat - 1) + +# final latitude values used as central points: +#psl.lat.used <- MSLP$lat[-c(1:psl.n.lat.unused.poles,(psl.n.lat-psl.n.lat.unused.poles):psl.n.lat, psl.pos.lat.unused.eq.north, psl.pos.lat.unused.eq.south)] +#psl.lat.used <- round(MSLP$lat[-c(1:psl.n.lat.unused.poles,(psl.n.lat-psl.n.lat.unused.poles):psl.n.lat)],3) # latitude values used as central points +#psl.lon.used <- round(MSLP$lon,3) # longitude values used as central points + +#psl.n.lat.used <- length(psl.lat.used) +#psl.n.lon.used <- length(psl.lon.used) +#psl.n.grid.points <- psl.n.lat.used * psl.n.lon.used + +#if(var.lon.pos && !psl.lon.pos) {ss <- which(psl.lon.used<0); psl.lon.used[ss] <- psl.lon.used[ss] + 360} # convert the negative long of MSLP to the [0, +360] range +#if(!var.lon.pos && psl.lon.pos) {ss <- which(psl.lon.used>180); psl.lon.used[ss] <- psl.lon.used[ss] - 360} # convert the positive long of MSLP > 180 to the [-180, +180] range + +# exlude var points > +~80 degrees lat and < -80 deg. because the Lamb grid was created excluding these points: +var.n.lat.unused.poles <- 17 # number of discarded latitude values near each of the two poles (it depends on the spatial resolutions of the var reanalysis) +var.n.lat.unused.equat <- 17 # numb. of unused latitude values near each side of the equator (must exclude 10 degrees N/S for side, so it depends on the spat.res. of the reanalysis) + +var.pos.lat.eq.north <- tail(which(var$lat >= 0),1) # note than the eventual point at lat=0 is always included +var.pos.lat.eq.south <- head(which(var$lat < 0),1) # note than the eventual point at lat=0 is always excluded +var.pos.lat.unused.eq.north <- (var.pos.lat.eq.north - var.n.lat.unused.equat+1):var.pos.lat.eq.north +var.pos.lat.unused.eq.south <- var.pos.lat.eq.south:(var.pos.lat.eq.south + var.n.lat.unused.equat - 1) +var.pos.lat.unused.eq <- c(var.pos.lat.unused.eq.north, var.pos.lat.unused.eq.south) + +#var.lat.used <- var.lat[-c(1:var.n.lat.unused.poles,(var.n.lat-var.n.lat.unused.poles):var.n.lat, var.pos.lat.unused.eq.north, var.pos.lat.unused.eq.south)] # latitude values used as central points + +var.lat.used <- var.lat[-c(1:var.n.lat.unused.poles,(var.n.lat-var.n.lat.unused.poles):var.n.lat)] +var.lon.used <- var.lon # longitude values used as central points +var.n.lat.used <- length(var.lat.used) +var.n.lon.used <- length(var.lon.used) +var.n.grid.points <- var.n.lat.used * var.n.lon.used +#var.lon.used.bis <- c(var.lon.used[c(257:512)]-360,var.lon.used[c(1:256)]) # to put Europe in the middle of the map +var.lon.used.bis <- c(var.lon.used[c(ceiling(var.n.lon.used/2):var.n.lon.used)]-360,var.lon.used[c(1:(ceiling(var.n.lon.used/2)-1))]) # to put Europe in the middle of the map + +#pos.var.lat.unused <- which(is.na( match(var.lat,var.lat.used))) # position inside var.lat of the latitude values not used (those not in var.lat.used) +pos.var.lat.used <- match(var.lat.used, var.lat) # =which(!is.na( match(var.lat,var.lat.used))) # position inside var.lat of the latitude values used (those in var.lat.used) +#var.lat[pos.var.lat.unused] + + +### Map of the central points for Europe: +#void <- array(NA,c(var.n.lat.used, var.n.lon.used)) +###void.bis <- void[,c(257:512,1:256)] # when using the var dataset with res 256x512 +#void.bis <- void[,c(ceiling(var.n.lon.used/2):var.n.lon.used,1:(ceiling(var.n.lon.used/2)-1))] +###PlotEquiMap(void.bis, var.lon.used.bis, var.lat.used, filled.continents = FALSE, brks=seq(0,360,1), intxlon=5, intylat=5, drawleg=F ) # for the debug +###PlotEquiMap(void.bis[1:215,], var.lon.used.bis, var.lat[1:215], filled.continents = FALSE, brks=seq(0,360,1), intxlon=5, intylat=5, drawleg=F ) # for the debug +#par(oma=c(1,1,1,1)) +###PlotEquiMap(void.bis, var.lon.used.bis, var.lat.used, filled.continents = FALSE, brks=seq(0,360,1), intxlon=5, intylat=5, drawleg=F ) # for the debug +#PlotEquiMap(void.bis[25:90, 220:317], var.lon.used.bis[220:317], var.lat[25:90], filled.continents = FALSE, brks=seq(0,360,1), intxlon=5, intylat=5, drawleg=F ) # for the debug +#my.parcelas<-data.frame(name="", lat=rep(var.lat.used,var.n.lon.used), long=rep(var.lon.used.bis, each=var.n.lat.used), pop=0, capital=0, stringsAsFactors=F) +#map.cities(my.parcelas, pch=3, cex=.5,col=c("gray40")) # add the 4 points of the 4 parcelas +#write.table(cbind(lat=rep(var.lat.used,var.n.lon.used), lon=rep(var.lon.used, each=var.n.lat.used)),file=paste0(workdir,"/list_lat_lon.txt"),row.names=FALSE, col.names=TRUE, quote=FALSE, sep=" ") + +# for each var grid point used, find the lat/lon position of the nearby point center of the WT classification: +closer.psl.lat <- closer.psl.lon <- array(NA, c(var.n.lat.used, var.n.lon.used)) # 2 matrices with the lat and lon position of the nearest MSLP grid point + +i<-0;vlat.pos <- 0 +for (vlat in var.lat.used) { + vlat.pos<-vlat.pos+1 + + vlon.pos<-0 + for(vlon in var.lon.used) { + i<-i+1 + cat(paste0("Point #",i,"/", var.n.grid.points), '\r') + vlon.pos<-vlon.pos+1 + + closer.psl.pos <- nearest(vlat, vlon, psl.lat.used, psl.lon.used) + closer.psl.lat[vlat.pos, vlon.pos] <- psl.lat.used[closer.psl.pos[1]] + closer.psl.lon[vlat.pos, vlon.pos] <- psl.lon.used[closer.psl.pos[2]] + + #print(paste0("vlat=",vlat, " vlat.pos=",vlat.pos, " vlon=",vlon, " vlon.pos=",vlon.pos, " closer.psl.lat.pos=", closer.psl.pos[1], " closer.psl.lon.pos=",closer.psl.pos[2], " closer.psl.lat=", psl.lat.used[closer.psl.pos[1]], " closer.psl.lon=", psl.lon.used[closer.psl.pos[2]] )) # for the debug + + } +} + +save(closer.psl.lat, closer.psl.lon, file=paste0(workdir,"/closer_psl_",var.name,".RData")) # save it if it is the first time +load(file=paste0(workdir,"/closer_psl_",var.name,".RData")) # load it if already saved + +#PlotEquiMap(closer.psl.lon, var.lon.used, var.lat.used, filled.continents = FALSE, brks=seq(0,360,1) ) # for the debug +#PlotEquiMap(closer.psl.lat, var.lon.used, var.lat.used, filled.continents = FALSE, brks=seq(-90,90,1) ) # for the debug + +# for each var grid point used, load 1 year of daily var and wt data at time, and assign it to the closer mslp central point: +# (you can't open all data at once because it weights too much, but you can open 1 year at time) +chunk <- split.array(dimensions=c(n.days.in.a.yearly.period(year.start,year.end), var.n.lat.used, var.n.lon.used), along=3) +save(chunk, file=paste0(workdir,"/chunk_",var.name,".RData")) # save the chunk to retrieve it later or you run the next loop for 1 year only ('year.end' changes, so 'chunk' changes too) +load(paste0(workdir,"/chunk_",var.name,".RData")) + +i=0 +for(y in year.start:year.end){ + #y<-1981 # for the debug + i<-i+1 + print(paste0("Year #",i,"/", year.tot)) + + n.days <- n.days.in.a.year(y) + if(partial.end==TRUE && y==year.end) n.days<-n.days.last # the last year can have a lower number of days + + var <- Load(var.name, NULL, list(var.rean), paste0(y,'0101'), storefreq = 'daily', leadtimemax = n.days, output = 'lonlat') # load var data for the year y only + #PlotEquiMap(var$obs[1, 1, 1, 1, , ], var.lon, var.lat, filled.continents = FALSE) + + # open the ff binary files with all the WT classification for that year: + if(file.exists(paste0(workdir,"/",paste0(y,"_RData/WTs1_",y,".RData")))) ffload(file=paste0(workdir,"/",paste0(y,"_RData/WTs1_",y)), overwrite=TRUE) + if(file.exists(paste0(workdir,"/",paste0(y,"_RData/WTs2_",y,".RData")))) ffload(file=paste0(workdir,"/",paste0(y,"_RData/WTs2_",y)), overwrite=TRUE) + if(file.exists(paste0(workdir,"/",paste0(y,"_RData/WTs3_",y,".RData")))) ffload(file=paste0(workdir,"/",paste0(y,"_RData/WTs3_",y)), overwrite=TRUE) + if(file.exists(paste0(workdir,"/",paste0(y,"_RData/WTs4_",y,".RData")))) ffload(file=paste0(workdir,"/",paste0(y,"_RData/WTs4_",y)), overwrite=TRUE) + if(file.exists(paste0(workdir,"/",paste0(y,"_RData/WTs5_",y,".RData")))) ffload(file=paste0(workdir,"/",paste0(y,"_RData/WTs5_",y)), overwrite=TRUE) + if(file.exists(paste0(workdir,"/",paste0(y,"_RData/WTs6_",y,".RData")))) ffload(file=paste0(workdir,"/",paste0(y,"_RData/WTs6_",y)), overwrite=TRUE) + if(file.exists(paste0(workdir,"/",paste0(y,"_RData/WTs7_",y,".RData")))) ffload(file=paste0(workdir,"/",paste0(y,"_RData/WTs7_",y)), overwrite=TRUE) + if(file.exists(paste0(workdir,"/",paste0(y,"_RData/WTs8_",y,".RData")))) ffload(file=paste0(workdir,"/",paste0(y,"_RData/WTs8_",y)), overwrite=TRUE) + if(file.exists(paste0(workdir,"/",paste0(y,"_RData/WTs9_",y,".RData")))) ffload(file=paste0(workdir,"/",paste0(y,"_RData/WTs9_",y)), overwrite=TRUE) + if(file.exists(paste0(workdir,"/",paste0(y,"_RData/WTs10_",y,".RData")))) ffload(file=paste0(workdir,"/",paste0(y,"_RData/WTs10_",y)), overwrite=TRUE) + + var.serie <- wt.serie <- array(NA,c(var.n.lat.used, var.n.lon.used, n.days)) # arrays where to store the var and wt daily data of year y for all points + + j<-0 + vlat.pos <- 0 + for (vlat in var.lat.used) { + #vlat<-var.lat.used[1]; vlon<-var.lon.used[1] # for the debug + j<-j+1 + vlat.pos <- vlat.pos+1 + cat('Latitude ',j,'/',var.n.lat.used,'\r') + + vlon.pos <- 0 + for(vlon in var.lon.used) { + vlon.pos <- vlon.pos+1 + + var.serie[vlat.pos, vlon.pos,] <- var$obs[1,1,1,,which(var.lat==vlat),which(var.lon==vlon)] + + #WT <- read.table(file=paste0(workdir,"/","10WTs_",psl.rean,"_",year.start,"-",year.end,"_lat_",closer.psl.lat[vlat.pos,vlon.pos],"_lon_",closer.psl.lon[vlat.pos,vlon.pos],".txt"), header=TRUE, sep=" ", stringsAsFactors=FALSE, row.names=NULL) + + # load WTs classifications (variable WTs) for that year created with WT_vX.R: + #load(paste0(workdir,"/",y,"_Rdata/","10WTs_",psl.rean,"_year_",y,"_lat_",closer.psl.lat[vlat.pos,vlon.pos],"_lon_",closer.psl.lon[vlat.pos,vlon.pos],".RData")) + open(eval(parse(text=paste0("WTs_lat_", closer.psl.lat[vlat.pos,vlon.pos],"_lon_", closer.psl.lon[vlat.pos,vlon.pos])))) + #open(eval(parse(text=gsub("-","m",paste0("WTs_lat_", closer.psl.lat[vlat.pos,vlon.pos],"_lon_", closer.psl.lon[vlat.pos,vlon.pos]))))) + + + # interval of days belonging to the year y, but starting to count from the year year.begin: + #seq.days.year <- n.days.in.a.yearly.period(year.start,y) - n.days.in.a.yearly.period(year.start,year.start) + 1:n.days.in.a.year(y) + WTs <- as.ram(eval(parse(text=paste0("WTs_lat_", closer.psl.lat[vlat.pos,vlon.pos],"_lon_", closer.psl.lon[vlat.pos,vlon.pos])))) + wt.serie[vlat.pos, vlon.pos,] <- WTs #[seq.days.year] #,4] # extract only the days of the year y + close(eval(parse(text=paste0("WTs_lat_", closer.psl.lat[vlat.pos,vlon.pos],"_lon_", closer.psl.lon[vlat.pos,vlon.pos])))) + rm(WTs) + } + + } + + #PlotEquiMap(var.serie[,,1], var.lon, var.lat.used, filled.continents = FALSE) # for the debug + + #save(var.serie, file=paste0(workdir,"/local_",var.name,"_",psl.rean,"_year_",y,".RData"), compress=FALSE) + #save(wt.serie, file=paste0(workdir,"/local_","10WTs_",psl.rean,"_year_",y,".RData"), compress=FALSE) + + #year=4000 + #ffload(file=paste0(workdir,"/WTs_",year)) + #latc <- 70 + #lonc <- 0 + #open(eval(parse(text=paste0("WTs_lat_",latc,"_lon_", lonc)))) # do.call doesn't work with open, must + #close(eval(parse(text=paste0("WTs_lat_",latc,"_lon_", lonc)))) # do.call doesn't work with open, must use eval(parse(text=...))) + + + # instead of saving only 1 file for all the spatial domain, save 1 file for each chunk c: (to be able to load them faster in the following step) + if(!dir.exists(paste0(workdir,"/",var.name,"_chunk"))) dir.create(paste0(workdir,"/",var.name,"_chunk")) + if(!dir.exists(paste0(workdir,"/wt_chunk"))) dir.create(paste0(workdir,"/wt_chunk")) + + for(c in 1:chunk$n.chunk){ + var.serie.year.chunk<-var.serie[,chunk$int[[c]],] # format: [lat, lon, day] + save(var.serie.year.chunk, file=paste0(workdir,"/",var.name,"_chunk/local_",var.name,"_",psl.rean.name,"_year_",y,"_chunk_",c,".RData"), compress=FALSE) + + wt.serie.year.chunk <- wt.serie[,chunk$int[[c]],] + save(wt.serie.year.chunk, file=paste0(workdir,"/",var.name,"_wt_chunk/local_","10WTs_",psl.rean.name,"_year_",y,"_chunk_",c,".RData"), compress=FALSE) + # n.wt10<-apply(wt.serie.year.chunk,c(1,2),function(x)length(which(x==18))) # for the debug + # edit(n.wt10) # for the debug + } + +} # close for on y + +#rm(closer.psl.lat, closer.psl.lon, var) + + +# compute the climatology of var for each period and its daily anomalies for each chunk and period: +var.clim <- array(NA, c(n.periods, var.n.lat.used, var.n.lon.used)) + +n.days.tot <- n.days.in.a.yearly.period(year.start,year.end) +#n.days.tot <- 365 # for the debug + +days.period <- n.days.period <- list() +for(p in periods){ + days.period[[p]] <- NA + for(y in year.start:year.end) days.period[[p]] <- c(days.period[[p]], n.days.in.a.future.year(year.start, y) + pos.period(y,p)) + + days.period[[p]] <- days.period[[p]][-1] # remove the NA at the begin that was introduced only to be able to execture the above command + n.days.period[[p]] <- length(days.period[[p]]) +} + +for(c in 1:chunk$n.chunk){ + #c=1 # for the debug + cat(paste0("Computing chunk n. ", c,"/", chunk$n.chunk),'\r') + + if(c == chunk$n.chunk) { + var.serie.chunk <- array(NA, c(var.n.lat.used, chunk$chunk.size.last, n.days.tot)) + } else { + var.serie.chunk <- array(NA, c(var.n.lat.used, chunk$chunk.size, n.days.tot)) + } + + for(y in year.start:year.end){ + load(file=paste0(workdir,"/",var.name,"_chunk/local_",var.name,"_",psl.rean.name,"_year_",y,"_chunk_",c,".RData")) # load var.serie.year.chunk + int.days.year <- seq.days.in.a.future.year(year.start,y) + + var.serie.chunk[,,int.days.year] <- var.serie.year.chunk + rm(var.serie.year.chunk) + } + + for(p in periods){ + var.serie.chunk.period <- var.serie.chunk[,,days.period[[p]]] # select only the days in the chosen period + + var.clim.chunk <- apply(var.serie.chunk.period,c(1,2),mean, na.rm=T) + + var.anom.chunk.period <- var.serie.chunk.period - InsertDim(var.clim.chunk, 3, n.days.period[[p]]) + var.clim[p,,chunk$int[[c]]] <- var.clim.chunk + + # save the var anomaly, one file for each chunk and period: + assign(paste0("var.anom.chunk.period",p), var.anom.chunk.period) + + # save(, file=paste0(workdir,"/",var.name,"_chunk/local_",var.name,"_anomaly_",psl.rean,"_",year.start,"-",year.end,"_chunk_",c,"_period_",p,".RData"), compress=FALSE) + # save not working in this case, use do.call below: + do.call(save, list(paste0("var.anom.chunk.period",p), file=paste0(workdir,"/",var.name,"_chunk/local_",var.name,"_anomaly_",psl.rean.name,"_",year.start,"-",year.end,"_chunk_",c,"_period_",p,".RData"), compress=FALSE)) # you must use this syntax when saving an Rdata with the variable name given by a string!!! + + rm(var.anom.chunk.period,var.serie.chunk.period) + do.call(rm, list(paste0("var.anom.chunk.period",p))) + } + + rm(var.serie.chunk) +} + +save(var.clim, file=paste0(workdir,"/",var.name,"_chunk/local_",var.name,"_climatology_",psl.rean.name,"_",year.start,"-",year.end,".RData"), compress=FALSE) # save var.clim +load(paste0(workdir,"/",var.name,"_chunk/local_",var.name,"_climatology_",psl.rean.name,"_",year.start,"-",year.end,".RData")) + +# do the same as above, but for the wt: +wts <- c(1:10) # weather type numbers inside wt.serie +n.wts<-length(wts) +n.wt <- var.wt.sum <- var.wt.mean.anom <- var.wt.sd.anom <- array(NA, c(n.periods, n.wts, var.n.lat.used, var.n.lon.used)) +#vlat.mat <- vlon.mat <- array(NA, c(var.n.lat.used, var.n.lon.used)) # for the debug + +for(c in 1:chunk$n.chunk){ + #s=1;y=year.start # for the debug + cat(paste0("Computing chunk n. ", c,"/", chunk$n.chunk),'\r') + + if(c == chunk$n.chunk) { + var.serie.chunk <- array(NA, c(var.n.lat.used, chunk$chunk.size.last, n.days.tot)) + wt.serie.chunk <- array(NA, c(var.n.lat.used, chunk$chunk.size.last, n.days.tot)) + n.wt.chunk <- var.wt.sum.chunk <- var.wt.mean.anom.chunk <- var.wt.sd.anom.chunk <- array(NA,c(n.periods, n.wts, var.n.lat.used, chunk$chunk.size.last)) + } else { + var.serie.chunk <- array(NA, c(var.n.lat.used, chunk$chunk.size, n.days.tot)) + wt.serie.chunk <- array(NA, c(var.n.lat.used, chunk$chunk.size, n.days.tot)) + n.wt.chunk <- var.wt.sum.chunk <- var.wt.mean.anom.chunk <- var.wt.sd.anom.chunk <- array(NA,c(n.periods, n.wts, var.n.lat.used, chunk$chunk.size)) + } + + for(y in year.start:year.end){ + load(file=paste0(workdir,"/",var.name,"_chunk/local_",var.name,"_",psl.rean.name,"_year_",y,"_chunk_",c,".RData")) # load var.serie.year.chunk + int.days.year <- seq.days.in.a.future.year(year.start,y) + var.serie.chunk[,,int.days.year] <- var.serie.year.chunk + rm(var.serie.year.chunk) + } + + + for(y in year.start:year.end){ + load(file=paste0(workdir,"/",var.name,"_wt_chunk/local_","10WTs_",psl.rean.name,"_year_",y,"_chunk_",c,".RData")) # load wt.serie.year.chunk + + wt.serie.year.chunk[which(wt.serie.year.chunk==18)]<-10 # rename the A type from number 18 to number 10, to have only the numbers from 1 to 10 + int.days.year <- seq.days.in.a.future.year(year.start,y) + + wt.serie.chunk[,,int.days.year] <- wt.serie.year.chunk #[,chunk$int[[c]],] + rm(wt.serie.year.chunk) + } + + for(p in periods){ + # load var.anom.chunk.period + load(file=paste0(workdir,"/",var.name,"_chunk/local_",var.name,"_anomaly_",psl.rean.name,"_",year.start,"-",year.end,"_chunk_",c,"_period_",p,".RData")) + var.serie.chunk.period <- var.serie.chunk[,,days.period[[p]]] + wt.serie.chunk.period <- wt.serie.chunk[,,days.period[[p]]] + + vlat.pos <- 0 + for (vlat in var.lat.used) { + #vlat<-var.lat.used[1]; vlon<-var.lon.used[1]; wt=1 # for the debug + vlat.pos <- vlat.pos + 1 + #cat('Latitude ',vlat.pos,'/',var.n.lat.used,'\r') + + vlon.pos <- 0 #chunk$int[[c]]-1 + for(vlon in var.lon.used[chunk$int[[c]]]) { + vlon.pos <- vlon.pos + 1 + for(wt in wts){ + pos.wt <- which(wt.serie.chunk.period[vlat.pos,vlon.pos,]==wt) + var.anom.chunk.period <- get(paste0("var.anom.chunk.period",p)) + + n.wt.chunk[p, wt, vlat.pos, vlon.pos] <- length(pos.wt) + var.wt.sum.chunk[p, wt, vlat.pos, vlon.pos] <- sum(var.serie.chunk.period[vlat.pos,vlon.pos,][pos.wt]) # only for wind and prec + var.wt.mean.anom.chunk[p, wt, vlat.pos, vlon.pos] <- mean(var.anom.chunk.period[vlat.pos,vlon.pos,][pos.wt]) + var.wt.sd.anom.chunk[p, wt, vlat.pos, vlon.pos] <- sd(var.anom.chunk.period[vlat.pos,vlon.pos,][pos.wt]) + + rm(pos.wt, var.anom.chunk.period) + } + } + } + + n.wt[p,,,chunk$int[[c]]] <- n.wt.chunk[p,,,] + var.wt.sum[p,,,chunk$int[[c]]] <- var.wt.sum.chunk[p,,,] + var.wt.mean.anom[p,,,chunk$int[[c]]] <- var.wt.mean.anom.chunk[p,,,] + var.wt.sd.anom[p,,,chunk$int[[c]]] <- var.wt.sd.anom.chunk[p,,,] + } # close for on p +} + +save(n.wt, var.wt.sum, var.wt.mean.anom, var.wt.sd.anom.chunk, file=paste0(workdir,"/",var.name,"_chunk/output_",psl.rean.name,"_",year.start,"-",year.end,".RData"), compress=FALSE) +load(file=paste0(workdir,"/",var.name,"_chunk/output_",psl.rean.name,"_",year.start,"-",year.end,".RData")) + +#if(var.name=='prlr'){ # if it is not in m/s (as in ERA-Interim) but it is in kg/m2/s, you must multiply for 86400 to convert to mm/day +# var.clim <- var.clim * 86400 +# var.wt.sum <- var.wt.sum * 86400 +# var.wt.mean.anom <- var.wt.mean.anom * 86400 +# var.wt.sd.anom <- var.wt.sd.anom * 86400 +#} + +var.wt.mean.contrib <- array(NA, c(n.periods, n.wts, var.n.lat.used, var.n.lon.used)) + +var.wt.mean <- var.wt.sum/n.wt # average value of var associated to a WT +#n.wt <- 100*n.wt/n.days.tot # convert to yearly frequency in % +for(p in periods) n.wt[p,,,] <- 100*n.wt[p,,,]/n.days.period[[p]] # convert to yearly frequency in % +for(p in periods) var.wt.mean.contrib[p,,,] <- 100*var.wt.sum[p,,,]/(InsertDim(var.clim[p,,],1,n.wts)*n.days.period[[p]]) # convert the sum to a % contribution + +var.clim.NA <- array(NA, c(n.periods, var.n.lat, var.n.lon.used)) +n.wt.NA <- var.wt.mean.contrib.NA <- var.wt.sum.NA <- var.wt.mean.anom.NA <- var.wt.mean.NA <- var.wt.sd.anom.NA <- array(NA, c(n.periods, n.wts, var.n.lat, var.n.lon.used)) + +var.clim.NA[,pos.var.lat.used,] <- var.clim +n.wt.NA[,,pos.var.lat.used,] <- n.wt +var.wt.mean.NA[,,pos.var.lat.used,] <- var.wt.mean +var.wt.sum.NA[,,pos.var.lat.used,] <- var.wt.sum +var.wt.mean.anom.NA[,,pos.var.lat.used,] <- var.wt.mean.anom +var.wt.mean.contrib.NA[,,pos.var.lat.used,] <- var.wt.mean.contrib +var.wt.sd.anom.NA[,,pos.var.lat.used,] <- var.wt.sd.anom + +# remove the equatorial area from visualization: +n.wt.NA[,,var.pos.lat.unused.eq,]<- var.wt.mean.NA[,,var.pos.lat.unused.eq,] <- var.wt.sum.NA[,,var.pos.lat.unused.eq,] <- var.wt.mean.contrib.NA[,,var.pos.lat.unused.eq,] <- NA +var.wt.mean.anom.NA[,,var.pos.lat.unused.eq,] <- var.wt.sd.anom.NA[,,var.pos.lat.unused.eq,] <- NA + +# move Europe to the center of the maps: +p1 <- ceiling(var.n.lon.used/2)-1 +p2 <- ceiling(var.n.lon.used/2) +p3 <- var.n.lon.used + +var.clim.NA.bis <- var.clim.NA[,,c(p2:p3,1:p1)] +n.wt.NA.bis <- n.wt.NA[,,,c(p2:p3,1:p1)] +var.wt.mean.NA.bis <- var.wt.mean.NA[,,,c(p2:p3,1:p1)] +var.wt.mean.contrib.NA.bis <- var.wt.mean.contrib.NA[,,,c(p2:p3,1:p1)] +var.wt.mean.anom.NA.bis <- var.wt.mean.anom.NA[,,,c(p2:p3,1:p1)] +var.wt.sd.anom.NA.bis <- var.wt.sd.anom.NA[,,,c(p2:p3,1:p1)] +gc() + +# Map intervals and colors: +my.brks <- list() + +#my.brks[[1]] <- c(0,seq(1.5,9,0.5),15) # Wind speed Climatology +my.brks[[1]] <- c(0,seq(1.05,10,0.05),15) # Wind speed Climatology +my.brks[[1]] <- c(-50,-40,-30,seq(-20,30,0.05),40,50) # Surface Temperature Climatology +my.brks[[1]] <- c(0,seq(0.05,10,0.05),15) # Precipitation Climatology + +#my.brks[[2]] <- c(-10,seq(-4.95,5,0.05),10) # Wind Speed Anomaly associated to a WT +my.brks[[2]] <- c(seq(0,10,0.1),100) # Mean wind speed in m/s associated to a WT +my.brks[[3]] <- c(seq(0,20,0.1),100) # Frequency associated to a WT +my.brks[[4]] <- c(seq(0,30,0.1),100) # % Contribution of a WT to total var +my.brks[[5]] <- c(seq(-10,-3,1),seq(-2.3,2.3,0.1),seq(3,10,1)) # % Mean anomaly of a WT for wind speed +my.brks[[5]] <- c(-50,seq(-10,-3,1),seq(-2.9,2.9,0.1),seq(3,10,1),50) # % Mean anomaly of a WT for temperature +my.brks[[5]] <- c(-100000,seq(-100,0,1),seq(1,100,1),100000) # % Mean anomaly of a WT for precipitation + + +my.brks[[6]] <- c(seq(0,3,0.1),10) # Standard deviation of the anomalies pf a WT + +my.cols <- list() +#my.cols[[index]] <- colorRampPalette(my.palette[[index]])(length(my.brks[[index]])-1) +#my.cols[[1]] <- colorRampPalette(as.character(read.csv("/home/Earth/vtorralb/rgbhex.csv",header=F)[,1]))(length(my.brks[[1]])-1) # blue-green-white-yellow-red colors +#my.cols[[1]] <- colorRampPalette(c("white","yellow","orange","red","darkred"))(length(my.brks[[1]])-1) # blue-yellow-red colors +#my.cols[[1]] <- colorRampPalette(c("white","white","green","yellow","orange","red","darkred"))(length(my.brks[[1]])-1) # blue-yellow-red colors +#my.cols[[1]] <- colorRampPalette(c("blue","white","darkred"))(length(my.brks[[1]])-1) # blue-yellow-red colors +#my.cols[[1]] <- colorRampPalette(c("blue","green","yellow","red","brown","violetred4"))(length(my.brks[[1]])-1) # blue-yellow-red colors +#my.cols[[1]] <- colorRampPalette(c("white","cyan2","gold","orange","red","brown","brown4","deeppink4"))(length(my.brks[[1]])-1) # blue-yellow-red colors +my.cols[[1]] <- colorRampPalette(c("white","gray","cyan2","blue","deeppink4"))(length(my.brks[[1]])-1) # white-gray-blue-purple colores for wind speed +my.cols[[1]] <- colorRampPalette(as.character(read.csv("/home/Earth/vtorralb/rgbhex.csv",header=F)[,1]))(length(my.brks[[1]])-1) # blue-yellow-red colors + +#my.cols[[2]] <- colorRampPalette(c("deeppink4","darkblue","blue","white","red","darkred","brown4"))(length(my.brks[[2]])-1) # blue-white-red colors +my.cols[[2]] <- colorRampPalette(as.character(read.csv("/home/Earth/vtorralb/rgbhex.csv",header=F)[,1]))(length(my.brks[[2]])-1) # blue--yellow-red colors +my.cols[[3]] <- colorRampPalette(as.character(read.csv("/home/Earth/vtorralb/rgbhex.csv",header=F)[,1]))(length(my.brks[[3]])-1) # blue-yellow-red colors +#my.cols[[4]] <- colorRampPalette(c("white","cyan2","blue","deeppink4"))(length(my.brks[[4]])-1) # blue-yellow-red colors +my.cols[[4]] <- colorRampPalette(as.character(read.csv("/home/Earth/vtorralb/rgbhex.csv",header=F)[,1]))(length(my.brks[[4]])-1) # blue--yellow-red colors + +my.cols[[5]] <- c(colorRampPalette(c("dodgerblue3","white"))(length(my.brks[[5]])-1), colorRampPalette(c("white","darkorchid4"))(length(my.brks[[5]])-1)) +my.cols[[5]] <- my.cols[[5]][seq(1, length(my.cols[[5]]),2)] +my.cols[[5]] <- colorRampPalette(as.character(read.csv("/home/Earth/vtorralb/rgbhex.csv",header=F)[,1]))(length(my.brks[[5]])-1) # blue--yellow-red colors for temper +my.cols[[5]] <- c(colorRampPalette(c("blue","white"))(length(my.brks[[5]])-1), colorRampPalette(c("white","darkred"))(length(my.brks[[5]])-1)) # blue-white-red for prec +my.cols[[5]] <- my.cols[[5]][seq(1, length(my.cols[[5]]),2)] +#my.cols[[5]] <- colorRampPalette(c("white","blue","purple"))(length(my.brks[[5]])-1) + +my.cols[[6]] <- colorRampPalette(as.character(read.csv("/home/Earth/vtorralb/rgbhex.csv",header=F)[,1]))(length(my.brks[[6]])-1) # blue--yellow-red colors + + + +# Create and save maps: + +index <- 1 +#p=17 # for the debug + +for(p in periods){ + png(filename=paste0(mapdir,"/",var.name.file,"_Climatology_",period.name[p],".png"),width=1000,height=700) + + layout(matrix(c(rep(1,10),2), 11, 1, byrow = TRUE)) + #layout.show(2) + #PlotEquiMap2(var.clim.NA, var.lon.used, var.lat, filled.continents = FALSE, brks=my.brks[[index]], cols=my.cols[[index]], drawleg=F) + # change longitude in format -180,180 to displey europe in the middle of the map: + #PlotEquiMap(var.clim[p,,], var.lon.used, var.lat.used, filled.continents = FALSE, brks=my.brks[[index]], cols=my.cols[[index]], drawleg=F, colNA="gray") + + # for temperature, you must remove -273 after var.clim.NA.bis[p,,]: + PlotEquiMap(var.clim.NA.bis[p,,], var.lon.used.bis, var.lat, filled.continents = FALSE, brks=my.brks[[index]], cols=my.cols[[index]], drawleg=F, colNA="gray") + ColorBar(my.brks[[index]], cols=my.cols[[index]], vert=FALSE, subsampleg=20, cex=1) + + dev.off() +} + + +index=2 +#p=17 # for the debug +wt=6 + +for(p in periods){ + png(filename=paste0(mapdir,"/Average_10-m_Wind_Speed_WT_", WTs.type10[wt],period.name[p],".png"),width=1000,height=700) + + layout(matrix(c(rep(1,10),2), 11, 1, byrow = TRUE)) + #PlotEquiMap(var.wt.mean.NA[wt,,], var.lon.used, var.lat, filled.continents = FALSE, brks=my.brks[[index]], cols=my.cols[[index]], drawleg=F, colNA="gray") + #PlotEquiMap(var.wt.mean.NA[wt,,100:200], var.lon.used[100:200], var.lat, filled.continents = FALSE, brks=my.brks[[index]], cols=my.cols[[index]], drawleg=F, colNA="gray") + PlotEquiMap(var.wt.mean.NA.bis[p,wt,,], var.lon.used.bis, var.lat, filled.continents = FALSE, brks=my.brks[[index]], cols=my.cols[[index]], drawleg=F, colNA="gray") + #PlotEquiMap(var.wt.mean.NA.bis[wt,,1:50], head(var.lon.used.bis,50), var.lat, filled.continents = FALSE, brks=my.brks[[index]], cols=my.cols[[index]], drawleg=F, colNA="gray") + ColorBar(my.brks[[index]], cols=my.cols[[index]], vert=FALSE, subsampleg=10, cex=1) + + dev.off() +} + + +index=3 +p=17 # for the debug +wt=6 + +png(filename=paste0(mapdir,"/Frequency_WT_", WTs.type10[wt],".png"),width=1000,height=700) + +layout(matrix(c(rep(1,10),2), 11, 1, byrow = TRUE)) +#PlotEquiMap(n.wt.NA[wt,,], var.lon.used, var.lat, filled.continents = FALSE, brks=my.brks[[index]], cols=my.cols[[index]], drawleg=F) +#PlotEquiMap(n.wt.NA[wt,,1:18], tail(var.lon.used,18), var.lat, filled.continents = FALSE, brks=my.brks[[index]], cols=my.cols[[index]], drawleg=F) +#n.wt.NA[wt,50:55,490:512] +#ColorBar(my.brks[[index]], cols=my.cols[[index]], vert=FALSE, subsampleg=5, cex=1) +#n.wt10<-apply(wt.serie.,c(1,2),function(x)length(which(x==18))) # for the debug +#edit(n.wt.NA[10,,]) # for the debug +#layout(matrix(c(rep(1,10),2), 11, 1, byrow = TRUE)) +PlotEquiMap(n.wt.NA.bis[p,wt,,], var.lon.used.bis, var.lat, filled.continents = FALSE, brks=my.brks[[index]], cols=my.cols[[index]], drawleg=F, colNA="gray") +ColorBar(my.brks[[index]], cols=my.cols[[index]], vert=FALSE, subsampleg=5, cex=1) + +dev.off() + + +index=4 +p=17 +wt=6 # for the debug + +for(wt in 1:n.wts){ + png(filename=paste0(mapdir,"/Daily_contribution_of_WT_", WTs.type10[wt],".png"),width=1000,height=700) + + layout(matrix(c(rep(1,10),2), 11, 1, byrow = TRUE)) + PlotEquiMap(var.wt.mean.contrib.NA.bis[p,wt,,], var.lon.used.bis, var.lat, filled.continents = FALSE, brks=my.brks[[index]], cols=my.cols[[index]], + drawleg=F, colNA="gray", toptitle=WTs.type10.name[wt] , sizetit=1) + ColorBar(my.brks[[index]], cols=my.cols[[index]], vert=FALSE, subsampleg=10, cex=1) + + dev.off() +} + + +# Average anomalies associated to a WT: +# WARNING: for temerature, you must remove -273 after var.wt.mean.anom.NA.bis[p,wt,,] to convert to anomalies in degrees +# for precipitation, you must multiply it for 86400 +index=5 +#p=17 # for the debug +#wt=6 # for the debug +slide=FALSE # put TRUE if you wan to print maps for a slide, with bigger titles and legends, false otherwise +europe=TRUE # put TRUE to show only a zoom over Europe; false otherwise +map.only=FALSE # put TRUE if you want to print only the image with no legend or title (for posters) + +domain <- ifelse(europe, "European", "World") + +for(p in periods){ + for(wt in 1:n.wts){ + if(!slide) {n.box <- 10; col.cex=1; my.title <- paste(WTs.type10.name[wt],period.name[p])} else {n.box <- 5; col.cex=3; my.title <- ""} + if(map.only) my.title <- "" + + png(filename=paste0(mapdir,"/",var.name.file,"_Mean_",domain,"_Anomalies_of_WT_", WTs.type10[wt],"_",period.name[p],".png"),width=1000,height=700) + + layout(matrix(c(rep(1,n.box),2), n.box+1, 1, byrow = TRUE)) + if(!europe) PlotEquiMap(var.wt.mean.anom.NA.bis[p,wt,,], var.lon.used.bis, var.lat, filled.continents = FALSE, brks=my.brks[[index]], + cols=my.cols[[index]], drawleg=F, colNA="gray", toptitle=my.title, sizetit=1.5) + if(europe) PlotEquiMap(var.wt.mean.anom.NA.bis[p,wt,25:90,220:310], var.lon.used.bis[220:310], var.lat[25:90], filled.continents = FALSE, brks=my.brks[[index]], + cols=my.cols[[index]], drawleg=F, colNA="gray", toptitle=my.title, sizetit=1.5) + ColorBar(my.brks[[index]], cols=my.cols[[index]], vert=FALSE, subsampleg=2, cex=col.cex) + if(slide && map.only) title(paste0(WTs.type10.name[wt],period.name[p]),cex.main=3.5,outer=T) + + dev.off() + } + +} + + + + +index=6 +wt=6 # for the debug +slide=FALSE # put TRUE if you wan to print maps for a slide, with bigger titles and legends, false otherwise +europe=FALSE # put TRUE to show only a zoom over Europe; false otherwise + +if(!slide) {n.box <- 10; col.cex=1; my.title <- WTs.type10.name[wt]} else {n.box <- 5; col.cex=3; my.title <- ""} +domain <- ifelse(europe, "European", "World") + +for(wt in 1:n.wts){ + png(filename=paste0(mapdir,"/StDev_",domain,"_Anomalies_of_WT_", WTs.type10[wt],".png"),width=1000,height=700) + + layout(matrix(c(rep(1,n.box),2), n.box+1, 1, byrow = TRUE)) + + if(!europe) PlotEquiMap(var.wt.sd.anom.NA.bis[p,wt,,], var.lon.used.bis, var.lat, filled.continents = FALSE, brks=my.brks[[index]], + cols=my.cols[[index]], drawleg=F, colNA="gray", toptitle=my.title, sizetit=1.5) + if(europe) PlotEquiMap(var.wt.sd.anom.NA.bis[p,wt,25:90,220:310], var.lon.used.bis[220:310], var.lat[25:90], filled.continents = FALSE, brks=my.brks[[index]], + cols=my.cols[[index]], drawleg=F, colNA="gray", toptitle=my.title, sizetit=1.5) + ColorBar(my.brks[[index]], cols=my.cols[[index]], vert=FALSE, subsampleg=2, cex=col.cex) + if(slide) title(WTs.type10.name[wt],cex.main=3.5,outer=T) + + dev.off() +} + +#WTs.type<-c("NE","E","SE","S","SW","W","NW","N","C","C.NE","C.E","C.SE","C.S","C.SW","C.W","C.NW","C.N","A","A.NE","A.E","A.SE","A.S","A.SW","A.W","A.NW","A.N") + +save.image(file=paste0(workdir,"/Weather_types",var.name,"_",psl.rean.name,"_",year.start,"-",year.end,".RData"), compress=FALSE) diff --git a/old/WT_drivers_v6.R b/old/WT_drivers_v6.R new file mode 100644 index 0000000000000000000000000000000000000000..f7fb917fbd897d06d0f82d020bcf14c24c8f1acd --- /dev/null +++ b/old/WT_drivers_v6.R @@ -0,0 +1,488 @@ +########################################################################################## +# Relationship between WTs and a chosen climate variable # +########################################################################################## + +library(s2dverification) # for the function Load() + +source('/home/Earth/ncortesi/scripts/Rfunctions.R') # for the calendar functions + +# directory where to find the WT classification +workdir <- "/scratch/Earth/ncortesi/RESILIENCE/WT" + +# reanalysis used for daily var data (must be the same as the mslp data): +var.rean <- list(path = '/esnas/recon/ecmwf/erainterim/$STORE_FREQ$_mean/$VAR_NAME$_f6h/$VAR_NAME$_$YEAR$$MONTH$.nc') + + # any daily variable: +var.name='tas' #'tas' #'sfcWind' #'prlr' +var.name.file='Temperature' #'Precipitation' #'Surface_Temperature' # '10-m_Wind_Speed' # variable name used in the ouput map's filename + +periods=1:17 # identify monthly (from 1=Jan to 12=Dec) , seasonal (from 13=winter to 16=Autumn) and yearly=17 values + +LOESS <- TRUE # climatology filter on/off (if off, daily climatology is computed instead and used to measure daily anomalies) + +########################################################################################## +n.periods <- length(periods) + +my.brks <- my.cols <- my.labels <- my.unit <- list() +my.brks.freq <- c(0,0.05,seq(0.1,0.7,0.1),1) # Frequency of a WT or WD +my.cols.freq <- colorRampPalette(c('#f7fbff','#deebf7','#c6dbef','#9ecae1','#6baed6','#4292c6','#2171b5','#08519c','#08306b'))(length(my.brks.freq)-1) +my.unit.freq <- "%" + +if(var.name == 'tas') var.num <- 1 +if(var.name == 'sfcWind') var.num <- 2 +if(var.name == 'prlr') var.num <- 3 + +WTs.type <- c("NE","E","SE","S","SW","W","NW","N","C","C.NE","C.E","C.SE","C.S","C.SW","C.W","C.NW","C.N","A","A.NE","A.E","A.SE","A.S","A.SW","A.W","A.NW","A.N") +WTs.type10<-c("NE","E","SE","S","SW","W","NW","N","C","A") +WTs.type10.name<-c("Northeasterly (NE)","Easterly (E)","Southeasterly (SE)","Southerly (S)","Southwesterly (SW)","Westerly (W)","Northwesterly (NW)", + "Northerly (N)","Cyclonic (C)","Anticyclonic (A)") +#year.tot <- year.end - year.start + 1 + +# load WT metadata to get the info on year.start and year.end +#WTs_files <- list.files(paste0(workdir,"/txt/ERAint/all_years")) +#WTs_file1 <- read.table(file=paste0(workdir,"/txt/ERAint/all_years/",WTs_files[1]), header=TRUE) +load(paste0(workdir,"/txt/ERAint/metadata.RData")) + +# load daily var data for all years: +var <- Load(var.name, NULL, list(var.rean), paste0(year.start:year.end,'0101'), storefreq = 'daily', leadtimemax=366, output = 'lonlat', nprocs=8) +#var <- Load(var.name, NULL, list(var.rean), paste0(2001,'0101'), storefreq = 'daily', leadtimemax=1, output = 'lonlat', nprocs=8) + +var.lat <- var$lat # var lat and lon MUST be the same of WT classification, even if the latter can have NA for certain lat values. +var.lon <- var$lon + +n.lat.var <- length(var.lat) +n.lon.var <- length(var.lon) + +# remove bisestile days from var to have all arrays with the same dimensions: +cat("Removing bisestile days. Please wait...\n") +var365 <- var$obs[,,,1:365,,,drop=FALSE] + +for(y in year.start:year.end){ + y2 <- y - year.start + 1 + if(n.days.in.a.year(y) == 366) var365[,,y2,60:365,,] <- var$obs[,,y2,61:366,,] # take the march to december period removing the 29th of February +} + +rm(var) +gc() + +# convert var to daily anomalies: +cat("Calculating anomalies. Please wait...\n") + +var365ClimDaily <- apply(var365, c(1,2,4,5,6), mean, na.rm=T) + +if(LOESS == TRUE){ + var365ClimLoess <- var365ClimDaily + + for(i in 1:n.lat.var){ + for(j in 1:n.lon.var){ + my.data <- data.frame(ens.mean=var365ClimDaily[1,1,,i,j], day=1:365) + my.loess <- loess(ens.mean ~ day, my.data, span=0.35) + var365ClimLoess[1,1,,i,j] <- predict(my.loess) + } + } + + rm(var365ClimDaily, my.data, my.loess) + gc() + + var365Clim <- InsertDim(var365ClimLoess, 3, year.end-year.start+1) + + rm(var365ClimLoess) + gc() + +} else { + var365Clim <- InsertDim(var365ClimDaily, 3, n.years) + + rm(var365ClimDaily) + gc() +} + + +var365Anom <- var365 - var365Clim + +rm(var365Clim) +gc() + +rm(var365) +gc() + +# save var anomalies for retreiving them when necessary: +save(var365Anom,var.lat, var.lon, n.lat.var, n.lon.var, file=paste0(workdir,"/var365Anom.RData")) + + + + + + + + +# Impact of a WT on var (average of var only during the days belonging to a particular WT): + +# load lat and lon from SLP grid: +load(paste0(workdir,"/txt/",mslp.rean.name,"/metadata.RData")) # load lat.used and lon.used + +# only for compatibility with older versions (it should be already loaded): +lat <- round(MSLP$lat,3) +lon <- round(MSLP$lon,3) + +# load daily WT classification for all years and grid points and with the same format of var365Anom: +load(paste0(workdir,"/txt/",mslp.rean.name,"/WTs.RData")) # load lat.used and lon.used + +n.WTs <- 10 # length(unique(WTs)) +wt.codes <- unique(WTs[1,1,,]) + +# load var anomalies: +load(file=paste0(workdir,"/var365Anom.RData")) + +my.brks[[1]] <- c(-100,seq(-8,8,1),100) # % Mean anomaly of a WT for temperature +my.cols[[1]] <- colorRampPalette(as.character(read.csv("/home/Earth/vtorralb/rgbhex.csv",header=F)[,1]))(length(my.brks[[1]])-1) # blue--yellow-red colors for temperature + +for(p in periods){ + # select var data only inside period p: + varPeriod <- var365Anom[,pos.period(1,p),,] + + # select WT data only inside period p: + WTsPeriod <- WTs[,pos.period(1,p),,] + + for(wt in 1:n.WTs){ + + wt.code <- wt.codes[wt] # i.e: for 10 WTs, their codes are. 1 2 3 4 5 6 7 8 9 18 + + # remove from varPeriod the days not belonging to that wt (setting its value to NA): + WTsPeriod.wt <- WTsPeriod + ss <- which(WTsPeriod == wt.code) + pp <- which(WTPeriod != wt.code) + + WTsPeriod.wt[ss] <- 1 # set to 1 the days belonging to that wt + WTsPeriod.wt[pp] <- NA # set to NA the days not belonging to that wt + + varPeriod.wt <- varPeriod * WTsPeriod.wt + + var.mean.wt <- apply(varPeriod.wt, c(3,4), mean, na.rm=TRUE) + + # visualize impact map of the wt on var: + png(filename=paste0(workdir,"/",var.name.file,"/impact/",var.name,"_"period.name[p],".png"),width=1000,height=700) + + PlotEquiMap(var.mean.wt, var.lon, var.lat, filled.continents = FALSE, brks=my.brks[[var.num]], cols=my.cols[[var.num]], drawleg=F, colNA="gray") + + + + } +} + + + + + + + + + + + + + + + + + + +# Compute wind directions: +year.start <- 1985 +year.end <- 2014 + +n.years <- year.end - year.start + 1 + +uas <- Load(var = 'uas', exp = NULL, obs = list(var.rean), paste0(year.start:year.end,'0101'), storefreq = 'daily', leadtimemax=366, output = 'lonlat', nprocs=8)$obs +vas <- Load(var = 'vas', exp = NULL, obs = list(var.rean), paste0(year.start:year.end,'0101'), storefreq = 'daily', leadtimemax=366, output = 'lonlat', nprocs=8)$obs + +windir <- (180/pi)*atan2(uas,vas) + 180 # wind direction in degrees; 90 is necessary to shift from trigonometric system to cardinal system and +180 to shift + # from the direction wind is blowing to the direction wind is coming from +rm(uas, vas) +gc() + +# remove bisestile days from windir, to compare it with var, which has no bisestiles: +cat("Removing bisestile days. Please wait...\n") +windir365 <- windir[,,,1:365,,,drop=FALSE] + +for(y in year.start:year.end){ + y2 <- y - year.start + 1 + if(n.days.in.a.year(y) == 366) windir365[,,y2,60:365,,] <- windir[,,y2,61:366,,] # take the march to december period removing the 29th of February +} + + +rm(windir) +gc() + +dir2 <- which(windir365 > 22.5) +dir3 <- which(windir365 > 22.5 + 45) +dir4 <- which(windir365 > 22.5 + 90) +dir5 <- which(windir365 > 22.5 + 135) +dir6 <- which(windir365 > 22.5 + 180) +dir7 <- which(windir365 > 22.5 + 225) +dir8 <- which(windir365 > 22.5 + 270) +dir1 <- which(windir365 > 22.5 + 315) + +windirClass <- array(1, dim(windir365)) # N +windirClass[dir2] <- 2 # NE +windirClass[dir3] <- 3 # E +windirClass[dir4] <- 4 # SE +windirClass[dir5] <- 5 # S +windirClass[dir6] <- 6 # SW +windirClass[dir7] <- 7 # W +windirClass[dir8] <- 8 # NW +windirClass[dir1] <- 1 # N + +rm(windir365) +rm(dir1,dir2,dir3,dir4,dir5,dir6,dir7,dir8) +gc() + +# save it once to retreive it later: +save(windirClass, file=paste0(workdir,"/windirClass.RData")) + + +# save wind direction mean frequency maps: +for(p in periods){ + # select wind direction data only inside period p: + windirPeriod <- windirClass[1,1,,pos.period(1,p),,] + + for(wd in 1:8){ + + # remove from windPeriod the days not belonging to that wd (setting its value to NA): + windirPeriod.wt <- windirPeriod + + ss <- which(windirPeriod == wd) + pp <- which(windirPeriod != wd) + + windirPeriod.wt[ss] <- 1 # set to 1 the days belonging to that wt + windirPeriod.wt[pp] <- NA # set to NA the days not belonging to that wt + rm(ss,pp) + gc() + + # mean frequency maps: + windirDays <- apply(windirPeriod.wt, c(3,4), sum, na.rm=TRUE) + windirFreq <- windirDays / (n.years * n.days.in.a.period(p,1)) + + my.fileout <- paste0(workdir,"/windir/frequency_",period.name[p],"_direction_",wd.dir[wd],".png") + + png(filename=my.fileout,width=900,height=600) + + layout(matrix(c(rep(1,10),2), 11, 1, byrow = TRUE)) + PlotEquiMap(windirFreq[,lon.swapped], var.lon2, var.lat, filled.continents = FALSE, brks=my.brks.freq, cols=my.cols.freq, colNA="gray", drawleg=F) + ColorBar2(my.brks.freq, cols=my.cols.freq, vert=FALSE, cex=1, my.ticks=-0.5 + 1:length(my.brks.freq), my.labels=100*my.brks.freq, label.dist=.5) + dev.off() + + # mean frequency maps for North Pole: + my.fileout <- paste0(workdir,"/windir/frequency_polar_",period.name[p],"_direction_",wd.dir[wd],".png") + PlotStereoMap(windirFreq, var.lon, var.lat, latlims = c(60,90), brks=my.brks.freq, cols=my.cols.freq, subsampleg=1, units=my.unit.freq, colNA="gray", fileout=my.fileout) + + } # close for on wd + +} # close for on p + +# wind speed interannual frequencies: + + + + + + + + +# save impact map of wind direction on var (average of var only during the days belonging to a particular wind direction): + +# load var anomalies and wind direction array: +load(file=paste0(workdir,"/var365Anom.RData")) +load(paste0(workdir,"/windirClass.RData")) + +#my.brks[[1]] <- c(-100,seq(-8,8,1),100) # % Mean anomaly of a WT for temperature +my.brks[[1]] <- seq(-10,10,1) # % Mean anomaly of a WT for temperature +my.labels[[1]] <- my.brks[[1]] #c(c("-10",my.brks[[1]][-1])[-length(my.brks[[1]])],"10") +my.cols[[1]] <- colorRampPalette(as.character(read.csv("~/scripts/palettes/rgbhex.csv",header=F)[,1]))(length(my.brks[[1]])-1) # blue--yellow-red colors for temperature +my.unit[[1]] <- "degrees" + +n.lonr <- n.lon.var - ceiling(length(var.lon[var.lon>0 & var.lon < 180])) # count the num of lon values between 180 and 360 degrees +lon.swapped <- c((n.lonr+1):n.lon.var,1:n.lonr) # lon is always positive but rearranged +var.lon2 <- c(var.lon[c((n.lonr+1):n.lon.var)]-360,var.lon[1:n.lonr]) # to have lon values from -180 to 180 + +wd.dir <- c("N","NE","E","SE","S","SW","W","NW") + +for(p in periods){ + # select var data only inside period p: + varPeriod <- var365Anom[1,1,,pos.period(1,p),,] + + # select wind direction data only inside period p: + windirPeriod <- windirClass[1,1,,pos.period(1,p),,] + + for(wd in 1:8){ + + # remove from varPeriod the days not belonging to that wd (setting its value to NA): + windirPeriod.wt <- windirPeriod + + ss <- which(windirPeriod == wd) + pp <- which(windirPeriod != wd) + + windirPeriod.wt[ss] <- 1 # set to 1 the days belonging to that wt + windirPeriod.wt[pp] <- NA # set to NA the days not belonging to that wt + rm(ss,pp) + gc() + + varPeriod.wt <- varPeriod * windirPeriod.wt + + var.mean.wt <- apply(varPeriod.wt, c(3,4), mean, na.rm=TRUE) + + # visualize impact map of the wd on var: + my.fileout <- paste0(workdir,"/windir/impact_",var.name,"_",period.name[p],"_direction_",wd.dir[wd],".png") + + png(filename=my.fileout,width=900,height=600) + + layout(matrix(c(rep(1,10),2), 11, 1, byrow = TRUE)) + #PlotEquiMap(var.mean.wt, var.lon, var.lat, filled.continents = FALSE, brks=my.brks[[var.num]], cols=my.cols[[var.num]], drawleg=F, colNA="gray") + PlotEquiMap(rescale(var.mean.wt[,lon.swapped],-10,10), var.lon2, var.lat, filled.continents = FALSE, brks=my.brks[[var.num]], cols=my.cols[[var.num]], colNA="gray", drawleg=F) + ColorBar2(my.brks[[var.num]], cols=my.cols[[var.num]], vert=FALSE, cex=1, my.ticks=-0.5 + 1:length(my.brks[[var.num]]), my.labels=my.labels[[var.num]], label.dist=.5) + dev.off() + + # North Pole map: + #png(filename=paste0(workdir,"/windir/impact_polar_",var.name,"_",period.name[p],"_direction_",wd.dir[wd],".png")) + my.fileout <- paste0(workdir,"/windir/impact_polar_",var.name,"_",period.name[p],"_direction_",wd.dir[wd],".png") + PlotStereoMap(rescale(var.mean.wt,-10,10), var.lon, var.lat, latlims = c(60,90), brks=my.brks[[var.num]], cols=my.cols[[var.num]], subsampleg=1, units=my.unit[[var.num]], colNA="gray", fileout=my.fileout) + + + + # same maps but removing points with frequency < 3% + windirFreq.wt <- apply(windirPeriod.wt, c(3,4), sum, na.rm=TRUE) / (n.years * n.days.in.a.period(p,1)) + + ss <- which(windirFreq.wt < 0.03) + pp <- which(windirFreq.wt >= 0.03) + + windirFreq.wt[ss] <- NA + windirFreq.wt[pp] <- 1 + + var.mean.wt2 <- var.mean.wt * windirFreq.wt + rm(ss,windirFreq.wt) + gc() + + + + # visualize impact map of the wd on var: + my.fileout <- paste0(workdir,"/windir/impact_min3percent_",var.name,"_",period.name[p],"_direction_",wd.dir[wd],".png") + + png(filename=my.fileout,width=900,height=600) + + layout(matrix(c(rep(1,10),2), 11, 1, byrow = TRUE)) + #PlotEquiMap(var.mean.wt, var.lon, var.lat, filled.continents = FALSE, brks=my.brks[[var.num]], cols=my.cols[[var.num]], drawleg=F, colNA="gray") + PlotEquiMap(rescale(var.mean.wt2[,lon.swapped],-10,10), var.lon2, var.lat, filled.continents = FALSE, brks=my.brks[[var.num]], cols=my.cols[[var.num]], colNA="gray", drawleg=F) + ColorBar2(my.brks[[var.num]], cols=my.cols[[var.num]], vert=FALSE, cex=1, my.ticks=-0.5 + 1:length(my.brks[[var.num]]), my.labels=my.labels[[var.num]], label.dist=.5) + dev.off() + + # North Pole map: + #png(filename=paste0(workdir,"/windir/impact_polar_",var.name,"_",period.name[p],"_direction_",wd.dir[wd],".png")) + my.fileout <- paste0(workdir,"/windir/impact_min3percent_polar_",var.name,"_",period.name[p],"_direction_",wd.dir[wd],".png") + PlotStereoMap(rescale(var.mean.wt2,-10,10), var.lon, var.lat, latlims = c(60,90), brks=my.brks[[var.num]], cols=my.cols[[var.num]], subsampleg=1, units=my.unit[[var.num]], colNA="gray", fileout=my.fileout) + + } # close for on wd + +} # close for on p + + + + + + + + + + +# Create and save monthly/seasonal/yearly climatology maps of var: + +#p=17 # for the debug + +# Map intervals and colors: +my.brks[[1]] <- c(-50,-40,-30,seq(-20,30,0.05),40,50) # Surface Temperature Climatology +my.brks[[2]] <- c(0,seq(0.05,10,0.05),15) # Precipitation Climatology +my.brks[[3]] <- c(0,seq(1.05,10,0.05),15) # Wind speed Climatology + +my.cols[[1]] <- colorRampPalette(as.character(read.csv("/home/Earth/vtorralb/rgbhex.csv",header=F)[,1]))(length(my.brks[[1]])-1) # blue-yellow-red colors +my.cols[[2]] <- colorRampPalette(c("white","gray","cyan2","blue","deeppink4"))(length(my.brks[[1]])-1) # white-gray-blue-purple colores for wind speed + +n.lonr <- n.lon.var - ceiling(length(var.lon[var.lon>0 & var.lon < 180])) # count the num of lon values between 180 and 360 degrees +lon.swapped <- c((n.lonr+1):n.lon.var,1:n.lonr) # lon is always positive but rearranged +var.lon2 <- c(var.lon[c((n.lonr+1):n.lon.var)]-360,var.lon[1:n.lonr]) # to have lon values from -180 to 180 + +mod.var <- 0 +if(var.num == 1) mod.var <- -273.5 # for temperature, you must remove -273 after var.clim.NA.bis[p,,]: + +for(p in periods){ + # Select only days of the chosen month/season: + varPeriod <- var365[,,,pos.period(1,p),,,drop=FALSE] + varPeriodMean <- apply(varPeriod,c(1,2,5,6),mean,na.rm=TRUE) + + png(filename=paste0(workdir,"/",var.name.file,"_Climatology_",period.name[p],".png"),width=1000,height=700) + + layout(matrix(c(rep(1,10),2), 11, 1, byrow = TRUE)) + + #PlotEquiMap(varPeriodMean[1,1,,]+mod.var, var.lon, var.lat, filled.continents = FALSE, brks=my.brks[[var.num]], cols=my.cols[[var.num]], drawleg=F, colNA="gray") + PlotEquiMap(varPeriodMean[1,1,,lon.swapped]+mod.num, var.lon2, var.lat, filled.continents = FALSE, brks=my.brks[[var.num]], cols=my.cols[[var.num]], drawleg=F) + ColorBar(my.brks[[var.num]], cols=my.cols[[var.num]], vert=FALSE, subsampleg=20, cex=1) + + dev.off() +} + + + + + + + + + + + + + + + + + +#my.brks[[1]] <- c(0,seq(1.5,9,0.5),15) # Wind speed Climatology +my.brks[[1]] <- c(0,seq(1.05,10,0.05),15) # Wind speed Climatology +my.brks[[1]] <- c(-50,-40,-30,seq(-20,30,0.05),40,50) # Surface Temperature Climatology +my.brks[[1]] <- c(0,seq(0.05,10,0.05),15) # Precipitation Climatology + +#my.brks[[2]] <- c(-10,seq(-4.95,5,0.05),10) # Wind Speed Anomaly associated to a WT +my.brks[[2]] <- c(seq(0,10,0.1),100) # Mean wind speed in m/s associated to a WT +my.brks[[3]] <- c(seq(0,20,0.1),100) # Frequency associated to a WT +my.brks[[4]] <- c(seq(0,30,0.1),100) # % Contribution of a WT to total var +my.brks[[5]] <- c(seq(-10,-3,1),seq(-2.3,2.3,0.1),seq(3,10,1)) # % Mean anomaly of a WT for wind speed +my.brks[[5]] <- c(-50,seq(-10,-3,1),seq(-2.9,2.9,0.1),seq(3,10,1),50) # % Mean anomaly of a WT for temperature +my.brks[[5]] <- c(-100000,seq(-100,0,1),seq(1,100,1),100000) # % Mean anomaly of a WT for precipitation + + +my.brks[[6]] <- c(seq(0,3,0.1),10) # Standard deviation of the anomalies pf a WT + +my.cols <- list() +#my.cols[[index]] <- colorRampPalette(my.palette[[index]])(length(my.brks[[index]])-1) +#my.cols[[1]] <- colorRampPalette(as.character(read.csv("/home/Earth/vtorralb/rgbhex.csv",header=F)[,1]))(length(my.brks[[1]])-1) # blue-green-white-yellow-red colors +#my.cols[[1]] <- colorRampPalette(c("white","yellow","orange","red","darkred"))(length(my.brks[[1]])-1) # blue-yellow-red colors +#my.cols[[1]] <- colorRampPalette(c("white","white","green","yellow","orange","red","darkred"))(length(my.brks[[1]])-1) # blue-yellow-red colors +#my.cols[[1]] <- colorRampPalette(c("blue","white","darkred"))(length(my.brks[[1]])-1) # blue-yellow-red colors +#my.cols[[1]] <- colorRampPalette(c("blue","green","yellow","red","brown","violetred4"))(length(my.brks[[1]])-1) # blue-yellow-red colors +#my.cols[[1]] <- colorRampPalette(c("white","cyan2","gold","orange","red","brown","brown4","deeppink4"))(length(my.brks[[1]])-1) # blue-yellow-red colors +my.cols[[1]] <- colorRampPalette(c("white","gray","cyan2","blue","deeppink4"))(length(my.brks[[1]])-1) # white-gray-blue-purple colores for wind speed +my.cols[[1]] <- colorRampPalette(as.character(read.csv("/home/Earth/vtorralb/rgbhex.csv",header=F)[,1]))(length(my.brks[[1]])-1) # blue-yellow-red colors + +#my.cols[[2]] <- colorRampPalette(c("deeppink4","darkblue","blue","white","red","darkred","brown4"))(length(my.brks[[2]])-1) # blue-white-red colors +my.cols[[2]] <- colorRampPalette(as.character(read.csv("/home/Earth/vtorralb/rgbhex.csv",header=F)[,1]))(length(my.brks[[2]])-1) # blue--yellow-red colors +my.cols[[3]] <- colorRampPalette(as.character(read.csv("/home/Earth/vtorralb/rgbhex.csv",header=F)[,1]))(length(my.brks[[3]])-1) # blue-yellow-red colors +#my.cols[[4]] <- colorRampPalette(c("white","cyan2","blue","deeppink4"))(length(my.brks[[4]])-1) # blue-yellow-red colors +my.cols[[4]] <- colorRampPalette(as.character(read.csv("/home/Earth/vtorralb/rgbhex.csv",header=F)[,1]))(length(my.brks[[4]])-1) # blue--yellow-red colors + +my.cols[[5]] <- c(colorRampPalette(c("dodgerblue3","white"))(length(my.brks[[5]])-1), colorRampPalette(c("white","darkorchid4"))(length(my.brks[[5]])-1)) +my.cols[[5]] <- my.cols[[5]][seq(1, length(my.cols[[5]]),2)] +my.cols[[5]] <- colorRampPalette(as.character(read.csv("/home/Earth/vtorralb/rgbhex.csv",header=F)[,1]))(length(my.brks[[5]])-1) # blue--yellow-red colors for temper +my.cols[[5]] <- c(colorRampPalette(c("blue","white"))(length(my.brks[[5]])-1), colorRampPalette(c("white","darkred"))(length(my.brks[[5]])-1)) # blue-white-red for prec +my.cols[[5]] <- my.cols[[5]][seq(1, length(my.cols[[5]]),2)] +#my.cols[[5]] <- colorRampPalette(c("white","blue","purple"))(length(my.brks[[5]])-1) + +my.cols[[6]] <- colorRampPalette(as.character(read.csv("/home/Earth/vtorralb/rgbhex.csv",header=F)[,1]))(length(my.brks[[6]])-1) # blue--yellow-red colors + + diff --git a/old/WT_drivers_v6.R~ b/old/WT_drivers_v6.R~ new file mode 100644 index 0000000000000000000000000000000000000000..e85f4fc38c0fab88af12889ec8b6c9d6204eee15 --- /dev/null +++ b/old/WT_drivers_v6.R~ @@ -0,0 +1,381 @@ +########################################################################################## +# Relationship between WTs and a chosen climate variable # +########################################################################################## + +library(s2dverification) # for the function Load() + +source('/home/Earth/ncortesi/scripts/Rfunctions.R') # for the calendar functions + +# directory where to find the WT classification +workdir <- "/scratch/Earth/ncortesi/RESILIENCE/WT" + +# reanalysis used for daily var data (must be the same as the mslp data): +var.rean <- list(path = '/esnas/recon/ecmwf/erainterim/$STORE_FREQ$_mean/$VAR_NAME$_f6h/$VAR_NAME$_$YEAR$$MONTH$.nc') + + # any daily variable: +var.name='tas' #'tas' #'sfcWind' #'prlr' +var.name.file='Temperature' #'Precipitation' #'Surface_Temperature' # '10-m_Wind_Speed' # variable name used in the ouput map's filename + +periods=1:17 # identify monthly (from 1=Jan to 12=Dec) , seasonal (from 13=winter to 16=Autumn) and yearly=17 values + +LOESS <- TRUE # climatology filter on/off (if off, daily climatology is computed instead and used to measure daily anomalies) + +########################################################################################## +n.periods <- length(periods) + +my.brks <- list() +my.cols <- list() +if(var.name == 'tas') var.num <- 1 +if(var.name == 'sfcWind') var.num <- 2 +if(var.name == 'prlr') var.num <- 3 + +WTs.type <- c("NE","E","SE","S","SW","W","NW","N","C","C.NE","C.E","C.SE","C.S","C.SW","C.W","C.NW","C.N","A","A.NE","A.E","A.SE","A.S","A.SW","A.W","A.NW","A.N") +WTs.type10<-c("NE","E","SE","S","SW","W","NW","N","C","A") +WTs.type10.name<-c("Northeasterly (NE)","Easterly (E)","Southeasterly (SE)","Southerly (S)","Southwesterly (SW)","Westerly (W)","Northwesterly (NW)", + "Northerly (N)","Cyclonic (C)","Anticyclonic (A)") +#year.tot <- year.end - year.start + 1 + +# load WT metadata to get the info on year.start and year.end +#WTs_files <- list.files(paste0(workdir,"/txt/ERAint/all_years")) +#WTs_file1 <- read.table(file=paste0(workdir,"/txt/ERAint/all_years/",WTs_files[1]), header=TRUE) +load(paste0(workdir,"/txt/ERAint/metadata.RData")) + +#year.start <- min(WTs_file1$Year) +#year.end <- max(WTs_file1$Year) + +# load daily var data for all years: +var <- Load(var.name, NULL, list(var.rean), paste0(year.start:year.end,'0101'), storefreq = 'daily', leadtimemax=366, output = 'lonlat', nprocs=8) +#var <- Load(var.name, NULL, list(var.rean), paste0(2001,'0101'), storefreq = 'daily', leadtimemax=1, output = 'lonlat', nprocs=8) + +var.lat <- var$lat # var lat and lon MUST be the same of WT classification, even if the latter can have NA for certain lat values. +var.lon <- var$lon + +n.lat.var <- length(var.lat) +n.lon.var <- length(var.lon) + +# remove bisestile days from var to have all arrays with the same dimensions: +cat("Removing bisestile days. Please wait...\n") +var365 <- var$obs[,,,1:365,,,drop=FALSE] + +for(y in year.start:year.end){ + y2 <- y - year.start + 1 + if(n.days.in.a.year(y) == 366) var365[,,y2,60:365,,] <- var$obs[,,y2,61:366,,] # take the march to december period removing the 29th of February +} + +rm(var) +gc() + +# convert var to daily anomalies: +cat("Calculating anomalies. Please wait...\n") + +var365ClimDaily <- apply(var365, c(1,2,4,5,6), mean, na.rm=T) + +if(LOESS == TRUE){ + var365ClimLoess <- var365ClimDaily + + for(i in 1:n.lat.var){ + for(j in 1:n.lon.var){ + my.data <- data.frame(ens.mean=var365ClimDaily[1,1,,i,j], day=1:365) + my.loess <- loess(ens.mean ~ day, my.data, span=0.35) + var365ClimLoess[1,1,,i,j] <- predict(my.loess) + } + } + + rm(var365ClimDaily, my.data, my.loess) + gc() + + var365Clim <- InsertDim(var365ClimLoess, 3, year.end-year.start+1) + + rm(var365ClimLoess) + gc() + +} else { + var365Clim <- InsertDim(var365ClimDaily, 3, n.years) + + rm(var365ClimDaily) + gc() +} + + +var365Anom <- var365 - var365Clim + +rm(var365Clim) +gc() + +rm(var365) +gc() + +# save var anomalies for retreiving them when necessary: +save(var365Anom,var.lat, var.lon, file=paste0(workdir,"/var365Anom.RData")) + + + + + + + + +# Impact of a WT on var (average of var only during the days belonging to a particular WT): + +# load lat and lon from SLP grid: +load(paste0(workdir,"/txt/",mslp.rean.name,"/metadata.RData")) # load lat.used and lon.used + +# only for compatibility with older versions (it should be already loaded): +lat <- round(MSLP$lat,3) +lon <- round(MSLP$lon,3) + +# load daily WT classification for all years and grid points and with the same format of var365Anom: +load(paste0(workdir,"/txt/",mslp.rean.name,"/WTs.RData")) # load lat.used and lon.used + +n.WTs <- 10 # length(unique(WTs)) +wt.codes <- unique(WTs[1,1,,]) + +# load var anomalies: +load(file=paste0(workdir,"/var365Anom.RData")) + +my.brks[[1]] <- c(-100,seq(-8,8,1),100) # % Mean anomaly of a WT for temperature +my.cols[[1]] <- colorRampPalette(as.character(read.csv("/home/Earth/vtorralb/rgbhex.csv",header=F)[,1]))(length(my.brks[[1]])-1) # blue--yellow-red colors for temperature + +for(p in periods){ + # select var data only inside period p: + varPeriod <- var365Anom[,pos.period(1,p),,] + + # select WT data only inside period p: + WTsPeriod <- WTs[,pos.period(1,p),,] + + for(wt in 1:n.WTs){ + + wt.code <- wt.codes[wt] # i.e: for 10 WTs, their codes are. 1 2 3 4 5 6 7 8 9 18 + + # remove from varPeriod the days not belonging to that wt (setting its value to NA): + WTsPeriod.wt <- WTsPeriod + ss <- which(WTsPeriod == wt.code) + pp <- which(WTPeriod != wt.code) + + WTsPeriod.wt[ss] <- 1 # set to 1 the days belonging to that wt + WTsPeriod.wt[pp] <- NA # set to NA the days not belonging to that wt + + varPeriod.wt <- varPeriod * WTsPeriod.wt + + var.mean.wt <- apply(varPeriod.wt, c(3,4), mean, na.rm=TRUE) + + # visualize impact map of the wt on var: + png(filename=paste0(workdir,"/",var.name.file,"/impact/",var.name,"_"period.name[p],".png"),width=1000,height=700) + + PlotEquiMap(var.mean.wt, var.lon, var.lat, filled.continents = FALSE, brks=my.brks[[var.num]], cols=my.cols[[var.num]], drawleg=F, colNA="gray") + + + + } +} + + + + + + + + + + + + + + + + + + +# Impact of a wind direction on var (average of var only during the days belonging to a particular wind direction): +year.start <- 1985 +year.end <- 2014 + +uas <- Load(var = 'uas', exp = NULL, obs = list(var.rean), paste0(year.start:year.end,'0101'), storefreq = 'daily', leadtimemax=366, output = 'lonlat', nprocs=8)$obs +vas <- Load(var = 'vas', exp = NULL, obs = list(var.rean), paste0(year.start:year.end,'0101'), storefreq = 'daily', leadtimemax=366, output = 'lonlat', nprocs=8)$obs + +windir <- (180/pi)*atan2(uas,vas) + 180 # wind direction in degrees; 90 is necessary to shift from trigonometric system to cardinal system and +180 to shift + # from the direction wind is blowing to the direction wind is coming from +rm(uas, vas) +gc() + +# remove bisestile days from windir, to compare it with var, which has no bisestiles: +cat("Removing bisestile days. Please wait...\n") +windir365 <- windir[,,,1:365,,,drop=FALSE] + +for(y in year.start:year.end){ + y2 <- y - year.start + 1 + if(n.days.in.a.year(y) == 366) windir365[,,y2,60:365,,] <- windir[,,y2,61:366,,] # take the march to december period removing the 29th of February +} + + +rm(windir) +gc() + +dir2 <- which(windir365 > 22.5) +dir3 <- which(windir365 > 22.5 + 45) +dir4 <- which(windir365 > 22.5 + 90) +dir5 <- which(windir365 > 22.5 + 135) +dir6 <- which(windir365 > 22.5 + 180) +dir7 <- which(windir365 > 22.5 + 225) +dir8 <- which(windir365 > 22.5 + 270) +dir1 <- which(windir365 > 22.5 + 315) + +windirClass <- array(1, dim(windir365)) # N +windirClass[dir2] <- 2 # NE +windirClass[dir3] <- 3 # E +windirClass[dir4] <- 4 # SE +windirClass[dir5] <- 5 # S +windirClass[dir6] <- 6 # SW +windirClass[dir7] <- 7 # W +windirClass[dir8] <- 8 # NW +windirClass[dir1] <- 1 # N + +rm(windir365) +rm(dir1,dir2,dir3,dir4,dir5,dir6,dir7,dir8) +gc() + +my.labels <- list() +my.brks[[1]] <- c(-100,seq(-8,8,1),100) # % Mean anomaly of a WT for temperature +my.labels[[1]] <- c(c("-10",my.brks[[1]][-1])[-length(my.brks[[1]])],"10") + +my.cols[[1]] <- colorRampPalette(as.character(read.csv("/home/Earth/vtorralb/rgbhex.csv",header=F)[,1]))(length(my.brks[[1]])-1) # blue--yellow-red colors for temperature + +n.lonr <- n.lon.var - ceiling(length(var.lon[var.lon>0 & var.lon < 180])) # count the num of lon values between 180 and 360 degrees +lon.swapped <- c((n.lonr+1):n.lon.var,1:n.lonr) # lon is always positive but rearranged +var.lon2 <- c(var.lon[c((n.lonr+1):n.lon.var)]-360,var.lon[1:n.lonr]) # to have lon values from -180 to 180 + +wd.dir <- c("N","NE","E","SE","S","SW","W","NW") + +save(windirClass, file=paste0(workdir,"/windirClass.RData")) + +# load var anomalies: +load(file=paste0(workdir,"/var365Anom.RData")) + +for(p in periods){ + # select var data only inside period p: + varPeriod <- var365Anom[1,1,,pos.period(1,p),,] + + # select wind direction data only inside period p: + windirPeriod <- windirClass[1,1,,pos.period(1,p),,] + + for(wd in 1:8){ + + # remove from varPeriod the days not belonging to that wd (setting its value to NA): + windirPeriod.wt <- windirPeriod + + ss <- which(windirPeriod == wd) + pp <- which(windirPeriod != wd) + + windirPeriod.wt[ss] <- 1 # set to 1 the days belonging to that wt + windirPeriod.wt[pp] <- NA # set to NA the days not belonging to that wt + rm(ss,pp) + gc() + + varPeriod.wt <- varPeriod * windirPeriod.wt + + var.mean.wt <- apply(varPeriod.wt, c(3,4), mean, na.rm=TRUE) + + # visualize impact map of the wt on var: + png(filename=paste0(workdir,"/windir/impact_",var.name,"_",period.name[p],"_direction_",wd.dir[wd],".png"),width=1000,height=700) + + layout(matrix(c(rep(1,10),2), 11, 1, byrow = TRUE)) + PlotEquiMap(var.mean.wt[,lon.swapped], var.lon2, var.lat, filled.continents = FALSE, brks=my.brks[[var.num]], cols=my.cols[[var.num]], drawleg=F, colNA="gray") + ColorBar2(my.brks[[var.num]], cols=my.cols[[var.num]], vert=FALSE, cex=1, my.ticks=-0.5 + 1:length(my.brks[[var.num]]), my.labels=my.labels[[var.num]], label.dist=.5) + PlotStereoMap(var.mean.wt, var.lon, var.lat) + + dev.off() + + } + + +} + + +# Create and save monthly/seasonal/yearly climatology maps of var: + +#p=17 # for the debug + +# Map intervals and colors: +my.brks[[1]] <- c(-50,-40,-30,seq(-20,30,0.05),40,50) # Surface Temperature Climatology +my.brks[[2]] <- c(0,seq(0.05,10,0.05),15) # Precipitation Climatology +my.brks[[3]] <- c(0,seq(1.05,10,0.05),15) # Wind speed Climatology + +my.cols[[1]] <- colorRampPalette(as.character(read.csv("/home/Earth/vtorralb/rgbhex.csv",header=F)[,1]))(length(my.brks[[1]])-1) # blue-yellow-red colors +my.cols[[2]] <- colorRampPalette(c("white","gray","cyan2","blue","deeppink4"))(length(my.brks[[1]])-1) # white-gray-blue-purple colores for wind speed + +n.lonr <- n.lon.var - ceiling(length(var.lon[var.lon>0 & var.lon < 180])) # count the num of lon values between 180 and 360 degrees +lon.swapped <- c((n.lonr+1):n.lon.var,1:n.lonr) # lon is always positive but rearranged +var.lon2 <- c(var.lon[c((n.lonr+1):n.lon.var)]-360,var.lon[1:n.lonr]) # to have lon values from -180 to 180 + +mod.var <- 0 +if(var.num == 1) mod.var <- -273.5 # for temperature, you must remove -273 after var.clim.NA.bis[p,,]: + +for(p in periods){ + # Select only days of the chosen month/season: + varPeriod <- var365[,,,pos.period(1,p),,,drop=FALSE] + varPeriodMean <- apply(varPeriod,c(1,2,5,6),mean,na.rm=TRUE) + + png(filename=paste0(workdir,"/",var.name.file,"_Climatology_",period.name[p],".png"),width=1000,height=700) + + layout(matrix(c(rep(1,10),2), 11, 1, byrow = TRUE)) + + #PlotEquiMap(varPeriodMean[1,1,,]+mod.var, var.lon, var.lat, filled.continents = FALSE, brks=my.brks[[var.num]], cols=my.cols[[var.num]], drawleg=F, colNA="gray") + PlotEquiMap(varPeriodMean[1,1,,lon.swapped]+mod.num, var.lon2, var.lat, filled.continents = FALSE, brks=my.brks[[var.num]], cols=my.cols[[var.num]], drawleg=F) + ColorBar(my.brks[[var.num]], cols=my.cols[[var.num]], vert=FALSE, subsampleg=20, cex=1) + + dev.off() +} + + + + + + + + + + + +#my.brks[[1]] <- c(0,seq(1.5,9,0.5),15) # Wind speed Climatology +my.brks[[1]] <- c(0,seq(1.05,10,0.05),15) # Wind speed Climatology +my.brks[[1]] <- c(-50,-40,-30,seq(-20,30,0.05),40,50) # Surface Temperature Climatology +my.brks[[1]] <- c(0,seq(0.05,10,0.05),15) # Precipitation Climatology + +#my.brks[[2]] <- c(-10,seq(-4.95,5,0.05),10) # Wind Speed Anomaly associated to a WT +my.brks[[2]] <- c(seq(0,10,0.1),100) # Mean wind speed in m/s associated to a WT +my.brks[[3]] <- c(seq(0,20,0.1),100) # Frequency associated to a WT +my.brks[[4]] <- c(seq(0,30,0.1),100) # % Contribution of a WT to total var +my.brks[[5]] <- c(seq(-10,-3,1),seq(-2.3,2.3,0.1),seq(3,10,1)) # % Mean anomaly of a WT for wind speed +my.brks[[5]] <- c(-50,seq(-10,-3,1),seq(-2.9,2.9,0.1),seq(3,10,1),50) # % Mean anomaly of a WT for temperature +my.brks[[5]] <- c(-100000,seq(-100,0,1),seq(1,100,1),100000) # % Mean anomaly of a WT for precipitation + + +my.brks[[6]] <- c(seq(0,3,0.1),10) # Standard deviation of the anomalies pf a WT + +my.cols <- list() +#my.cols[[index]] <- colorRampPalette(my.palette[[index]])(length(my.brks[[index]])-1) +#my.cols[[1]] <- colorRampPalette(as.character(read.csv("/home/Earth/vtorralb/rgbhex.csv",header=F)[,1]))(length(my.brks[[1]])-1) # blue-green-white-yellow-red colors +#my.cols[[1]] <- colorRampPalette(c("white","yellow","orange","red","darkred"))(length(my.brks[[1]])-1) # blue-yellow-red colors +#my.cols[[1]] <- colorRampPalette(c("white","white","green","yellow","orange","red","darkred"))(length(my.brks[[1]])-1) # blue-yellow-red colors +#my.cols[[1]] <- colorRampPalette(c("blue","white","darkred"))(length(my.brks[[1]])-1) # blue-yellow-red colors +#my.cols[[1]] <- colorRampPalette(c("blue","green","yellow","red","brown","violetred4"))(length(my.brks[[1]])-1) # blue-yellow-red colors +#my.cols[[1]] <- colorRampPalette(c("white","cyan2","gold","orange","red","brown","brown4","deeppink4"))(length(my.brks[[1]])-1) # blue-yellow-red colors +my.cols[[1]] <- colorRampPalette(c("white","gray","cyan2","blue","deeppink4"))(length(my.brks[[1]])-1) # white-gray-blue-purple colores for wind speed +my.cols[[1]] <- colorRampPalette(as.character(read.csv("/home/Earth/vtorralb/rgbhex.csv",header=F)[,1]))(length(my.brks[[1]])-1) # blue-yellow-red colors + +#my.cols[[2]] <- colorRampPalette(c("deeppink4","darkblue","blue","white","red","darkred","brown4"))(length(my.brks[[2]])-1) # blue-white-red colors +my.cols[[2]] <- colorRampPalette(as.character(read.csv("/home/Earth/vtorralb/rgbhex.csv",header=F)[,1]))(length(my.brks[[2]])-1) # blue--yellow-red colors +my.cols[[3]] <- colorRampPalette(as.character(read.csv("/home/Earth/vtorralb/rgbhex.csv",header=F)[,1]))(length(my.brks[[3]])-1) # blue-yellow-red colors +#my.cols[[4]] <- colorRampPalette(c("white","cyan2","blue","deeppink4"))(length(my.brks[[4]])-1) # blue-yellow-red colors +my.cols[[4]] <- colorRampPalette(as.character(read.csv("/home/Earth/vtorralb/rgbhex.csv",header=F)[,1]))(length(my.brks[[4]])-1) # blue--yellow-red colors + +my.cols[[5]] <- c(colorRampPalette(c("dodgerblue3","white"))(length(my.brks[[5]])-1), colorRampPalette(c("white","darkorchid4"))(length(my.brks[[5]])-1)) +my.cols[[5]] <- my.cols[[5]][seq(1, length(my.cols[[5]]),2)] +my.cols[[5]] <- colorRampPalette(as.character(read.csv("/home/Earth/vtorralb/rgbhex.csv",header=F)[,1]))(length(my.brks[[5]])-1) # blue--yellow-red colors for temper +my.cols[[5]] <- c(colorRampPalette(c("blue","white"))(length(my.brks[[5]])-1), colorRampPalette(c("white","darkred"))(length(my.brks[[5]])-1)) # blue-white-red for prec +my.cols[[5]] <- my.cols[[5]][seq(1, length(my.cols[[5]]),2)] +#my.cols[[5]] <- colorRampPalette(c("white","blue","purple"))(length(my.brks[[5]])-1) + +my.cols[[6]] <- colorRampPalette(as.character(read.csv("/home/Earth/vtorralb/rgbhex.csv",header=F)[,1]))(length(my.brks[[6]])-1) # blue--yellow-red colors + + diff --git a/old/WT_drivers_v7.R b/old/WT_drivers_v7.R new file mode 100644 index 0000000000000000000000000000000000000000..78553888a44b887b755c3e3caa56fd925b725b9d --- /dev/null +++ b/old/WT_drivers_v7.R @@ -0,0 +1,527 @@ +########################################################################################## +# Relationship between WTs and a chosen climate variable # +########################################################################################## + +rm(list=ls()) +library(s2dverification) # for the function Load() +library(abind) # for funcion abind() +source('/home/Earth/ncortesi/scripts/Rfunctions.R') # for the calendar functions + +# directory where to find the WT classification +workdir <- "/scratch/Earth/ncortesi/RESILIENCE/WT" + +# reanalysis used for daily var data (must be the same as the mslp data): +var.rean <- list(path = '/esnas/recon/ecmwf/erainterim/$STORE_FREQ$_mean/$VAR_NAME$_f6h/$VAR_NAME$_$YEAR$$MONTH$.nc') +#var.rean <- list(path = '/esnas/recon/noaa/ncep-reanalysis/$STORE_FREQ$_mean/$VAR_NAME$_f6h/$VAR_NAME$_$YEAR$$MONTH$.nc') +rean.name <- 'erai' #'ncep' #'erai' + +# any daily variable: +var.name='sfcWind' #'tas' #'sfcWind' #'prlr' +var.name.file='10-m Wind Speed' #'Precipitation' #'Surface_Temperature' # '10-m_Wind_Speed' # variable name used in the ouput map's title + +periods=1:17 # identify monthly (from 1=Jan to 12=Dec) , seasonal (from 13=winter to 16=Autumn) and yearly=17 values + +LOESS <- FALSE # LOESS climatology filter on/off (if off, a 5-days mobile windows is usedto measure daily anomalies) + +year.start <- 1985 +year.end <- 2016 + +########################################################################################## +n.periods <- length(periods) +n.years <- year.end - year.start + 1 + +my.brks <- my.cols <- my.labels <- my.unit <- list() +my.brks.freq <- c(0,0.05,seq(0.1,0.7,0.1),1) # Frequency of a WT or WD +my.cols.freq <- colorRampPalette(c('#f7fbff','#deebf7','#c6dbef','#9ecae1','#6baed6','#4292c6','#2171b5','#08519c','#08306b'))(length(my.brks.freq)-1) +my.unit.freq <- "%" + +if(var.name == 'tas') var.num <- 1 +if(var.name == 'sfcWind') var.num <- 2 +if(var.name == 'prlr') var.num <- 3 + +WTs.type <- c("NE","E","SE","S","SW","W","NW","N","C","C.NE","C.E","C.SE","C.S","C.SW","C.W","C.NW","C.N","A","A.NE","A.E","A.SE","A.S","A.SW","A.W","A.NW","A.N") +WTs.type10<-c("NE","E","SE","S","SW","W","NW","N","C","A") +WTs.type10.name<-c("Northeasterly (NE)","Easterly (E)","Southeasterly (SE)","Southerly (S)","Southwesterly (SW)","Westerly (W)","Northwesterly (NW)","Northerly (N)","Cyclonic (C)","Anticyclonic (A)") +wd.dir <- c("N","NE","E","SE","S","SW","W","NW") +n.wd <- length(wd.dir) + +#year.tot <- year.end - year.start + 1 + +# load WT metadata to get the info on year.start and year.end +#WTs_files <- list.files(paste0(workdir,"/txt/ERAint/all_years")) +#WTs_file1 <- read.table(file=paste0(workdir,"/txt/ERAint/all_years/",WTs_files[1]), header=TRUE) +#load(paste0(workdir,"/txt/ERAint/metadata.RData")) + +# load daily var data for all years: +var <- Load(var.name, NULL, list(var.rean), paste0(year.start:year.end,'0101'), storefreq = 'daily', leadtimemax=366, output = 'lonlat', nprocs=8) +#var <- Load(var.name, NULL, list(var.rean), paste0(2001,'0101'), storefreq = 'daily', leadtimemax=1, output = 'lonlat', nprocs=8) + +var.lat <- var$lat # var lat and lon MUST be the same of WT classification, even if the latter can have NA for certain lat values. +var.lon <- var$lon + +n.lat.var <- length(var.lat) +n.lon.var <- length(var.lon) + +# remove bisestile days from var to have all arrays with the same dimensions: +cat("Removing bisestile days. Please wait...\n") +var365 <- var$obs[,,,1:365,,,drop=FALSE] + +for(y in year.start:year.end){ + y2 <- y - year.start + 1 + if(n.days.in.a.year(y) == 366) var365[,,y2,60:365,,] <- var$obs[,,y2,61:366,,] # take the march to december period removing the 29th of February +} + +rm(var) +gc() + +# convert var to daily anomalies: +cat("Calculating anomalies. Please wait...\n") + +if(LOESS == TRUE){ + var365ClimDaily <- apply(var365, c(1,2,4,5,6), mean, na.rm=T) + var365ClimLoess <- var365ClimDaily + + for(i in 1:n.lat.var){ + for(j in 1:n.lon.var){ + my.data <- data.frame(ens.mean=var365ClimDaily[1,1,,i,j], day=1:365) + my.loess <- loess(ens.mean ~ day, my.data, span=0.35) + var365ClimLoess[1,1,,i,j] <- predict(my.loess) + } + } + + rm(var365ClimDaily, my.data, my.loess) + gc() + + var365Clim <- InsertDim(var365ClimLoess, 3, year.end-year.start+1) + + rm(var365ClimLoess) + gc() + +} else { # apply a 5-days mobile window: + + #var365Clim <- InsertDim(var365ClimDaily, 3, n.years) + var365Clim <- array(NA,c(365,dim(var365)[5:6])) + var365x3 <- abind(var365,var365,var365,along=4) + + for(d in 1:365){ + window <- var365x3[1,1,,365+d+seq(-2,2),,drop=FALSE] + var365Clim[d,,] <- apply(window,c(5,6),mean,na.rm=TRUE) + } + + rm(var365x3) + gc() + + var365Clim <- InsertDim(var365Clim, 1, n.years) + var365Clim <- InsertDim(InsertDim(var365Clim,1,1),1,1) +} + + +var365Anom <- var365 - var365Clim + +rm(var365Clim) +gc() + +rm(var365) +gc() + +# save var anomalies for retreiving them when necessary: +save(var365Anom,var.lat, var.lon, n.lat.var, n.lon.var, file=paste0(workdir,"/",rean.name,"_",var.name,"365Anom.RData")) + + + + + + + + + +# Compute wind directions: +uas <- Load(var = 'uas', exp = NULL, obs = list(var.rean), paste0(year.start:year.end,'0101'), storefreq = 'daily', leadtimemax=366, output = 'lonlat', nprocs=8)$obs +vas <- Load(var = 'vas', exp = NULL, obs = list(var.rean), paste0(year.start:year.end,'0101'), storefreq = 'daily', leadtimemax=366, output = 'lonlat', nprocs=8)$obs + +windir <- (180/pi)*atan2(uas,vas) + 180 # wind direction in degrees; 90 is necessary to shift from trigonometric system to cardinal system and +180 to shift + # from the direction wind is blowing to the direction wind is coming from +rm(uas, vas) +gc() + +# remove bisestile days from windir, to compare it with var, which has no bisestiles: +cat("Removing bisestile days. Please wait...\n") +windir365 <- windir[,,,1:365,,,drop=FALSE] + +for(y in year.start:year.end){ + y2 <- y - year.start + 1 + if(n.days.in.a.year(y) == 366) windir365[,,y2,60:365,,] <- windir[,,y2,61:366,,] # take the march to december period removing the 29th of February +} + + +rm(windir) +gc() + +dir2 <- which(windir365 > 22.5) +dir3 <- which(windir365 > 22.5 + 45) +dir4 <- which(windir365 > 22.5 + 90) +dir5 <- which(windir365 > 22.5 + 135) +dir6 <- which(windir365 > 22.5 + 180) +dir7 <- which(windir365 > 22.5 + 225) +dir8 <- which(windir365 > 22.5 + 270) +dir1 <- which(windir365 > 22.5 + 315) + +windirClass <- array(1, dim(windir365)) # N +windirClass[dir2] <- 2 # NE +windirClass[dir3] <- 3 # E +windirClass[dir4] <- 4 # SE +windirClass[dir5] <- 5 # S +windirClass[dir6] <- 6 # SW +windirClass[dir7] <- 7 # W +windirClass[dir8] <- 8 # NW +windirClass[dir1] <- 1 # N + +rm(windir365) +rm(dir1,dir2,dir3,dir4,dir5,dir6,dir7,dir8) +gc() + +# save it once to retreive it later: +save(windirClass, file=paste0(workdir,"/",rean.name,"_windirClass.RData")) + +n.lonr <- n.lon.var - ceiling(length(var.lon[var.lon>0 & var.lon < 180])) # count the num of lon values between 180 and 360 degrees +lon.swapped <- c((n.lonr+1):n.lon.var,1:n.lonr) # lon is always positive but rearranged +var.lon2 <- c(var.lon[c((n.lonr+1):n.lon.var)]-360,var.lon[1:n.lonr]) # to have lon values from -180 to 180 + +windirFreqInter <- array(NA, c(n.wd, length(periods), n.years, n.lat.var, n.lon.var)) # array where to store the interannual frequencies + +# save wind direction mean frequency maps: +for(p in periods){ + # select wind direction data only inside period p: + windirPeriod <- windirClass[1,1,,pos.period(1,p),,] + + for(wd in 1:8){ + + # remove from windPeriod the days not belonging to that wd (setting its value to NA): + windirPeriod.wt <- windirPeriod + + ss <- which(windirPeriod == wd) + pp <- which(windirPeriod != wd) + + windirPeriod.wt[ss] <- 1 # set to 1 the days belonging to that wt + windirPeriod.wt[pp] <- NA # set to NA the days not belonging to that wt + rm(ss,pp) + gc() + + # mean frequency maps: + windirDays <- apply(windirPeriod.wt, c(3,4), sum, na.rm=TRUE) + windirFreq <- windirDays / (n.years * n.days.in.a.period(p,1)) + + my.fileout <- paste0(workdir,"/windir/",rean.name,"_frequency_",period.name[p],"_direction_",wd.dir[wd],".png") + + png(filename=my.fileout,width=900,height=600) + + layout(matrix(c(rep(1,10),2), 11, 1, byrow = TRUE)) + PlotEquiMap(windirFreq[,lon.swapped], var.lon2, var.lat, filled.continents = FALSE, brks=my.brks.freq, cols=my.cols.freq, colNA="gray", drawleg=F) + ColorBar2(my.brks.freq, cols=my.cols.freq, vert=FALSE, cex=1, my.ticks=-0.5 + 1:length(my.brks.freq), my.labels=100*my.brks.freq, label.dist=.5) + dev.off() + + # mean frequency maps for North Pole: + my.fileout <- paste0(workdir,"/windir/",rean.name,"_frequency_polar_",period.name[p],"_direction_",wd.dir[wd],".png") + PlotStereoMap(windirFreq, var.lon, var.lat, latlims = c(60,90), brks=my.brks.freq, cols=my.cols.freq, subsampleg=1, units=my.unit.freq, colNA="gray", fileout=my.fileout) + # measure the interannual frequency series of that wind directions (for each grid point): + windirFreqInter[wd,p,,,] <- apply(windirPeriod.wt,c(1,3,4), sum, na.rm=TRUE) / n.days.in.a.period(p,1) + + } # close for on wd + +} # close for on p + +save(windirFreqInter, file=paste0(workdir,"/",rean.name,"_windirFreqInter.RData")) + + + + + + + + +# save impact map of wind direction on var (average of var only during the days belonging to a particular wind direction): + +# load var anomalies and wind direction array: +load(file=paste0(workdir,"/",rean.name,"_",var.name,"365Anom.RData")) +load(paste0(workdir,"/",rean.name,"_windirClass.RData")) + +#my.brks[[1]] <- c(-100,seq(-8,8,1),100) # % Mean anomaly of a WT for temperature +my.brks[[1]] <- seq(-10,10,1) # % Mean anomaly of a WT for temperature +my.labels[[1]] <- my.brks[[1]] #c(c("-10",my.brks[[1]][-1])[-length(my.brks[[1]])],"10") +my.cols[[1]] <- colorRampPalette(as.character(read.csv("~/scripts/palettes/rgbhex.csv",header=F)[,1]))(length(my.brks[[1]])-1) # blue--yellow-red colors for temperature +my.unit[[1]] <- "degrees" + +n.lonr <- n.lon.var - ceiling(length(var.lon[var.lon>0 & var.lon < 180])) # count the num of lon values between 180 and 360 degrees +lon.swapped <- c((n.lonr+1):n.lon.var,1:n.lonr) # lon is always positive but rearranged +var.lon2 <- c(var.lon[c((n.lonr+1):n.lon.var)]-360,var.lon[1:n.lonr]) # to have lon values from -180 to 180 + +impact.var <- array(NA, c(n.wd, length(periods), n.lat.var, n.lon.var)) # array where to save the impact of each wd on var + +for(p in periods){ + # select var data only inside period p: + varPeriod <- var365Anom[1,1,,pos.period(1,p),,] + + # select wind direction data only inside period p: + windirPeriod <- windirClass[1,1,,pos.period(1,p),,] + + for(wd in 1:8){ + + # remove from varPeriod the days not belonging to that wd (setting its value to NA): + windirPeriod.wt <- windirPeriod + + ss <- which(windirPeriod == wd) + pp <- which(windirPeriod != wd) + + windirPeriod.wt[ss] <- 1 # set to 1 the days belonging to that wt + windirPeriod.wt[pp] <- NA # set to NA the days not belonging to that wt + rm(ss,pp) + gc() + + varPeriod.wt <- varPeriod * windirPeriod.wt + + var.mean.wt <- apply(varPeriod.wt, c(3,4), mean, na.rm=TRUE) + + impact.var[wd,p,,] <- var.mean.wt + + # visualize impact map of the wd on var: + my.fileout <- paste0(workdir,"/windir/",rean.name,"_impact_",var.name,"_",period.name[p],"_direction_",wd.dir[wd],".png") + + png(filename=my.fileout,width=900,height=600) + + layout(matrix(c(rep(1,10),2), 11, 1, byrow = TRUE)) + #PlotEquiMap(var.mean.wt, var.lon, var.lat, filled.continents = FALSE, brks=my.brks[[var.num]], cols=my.cols[[var.num]], drawleg=F, colNA="gray") + PlotEquiMap(rescale(var.mean.wt[,lon.swapped],-10,10), var.lon2, var.lat, filled.continents = FALSE, brks=my.brks[[var.num]], cols=my.cols[[var.num]], colNA="gray", drawleg=F) + ColorBar2(my.brks[[var.num]], cols=my.cols[[var.num]], vert=FALSE, cex=1, my.ticks=-0.5 + 1:length(my.brks[[var.num]]), my.labels=my.labels[[var.num]], label.dist=.5) + dev.off() + + # North Pole map: + #png(filename=paste0(workdir,"/windir/impact_polar_",var.name,"_",period.name[p],"_direction_",wd.dir[wd],".png")) + my.fileout <- paste0(workdir,"/windir/",rean.name,"_impact_polar_",var.name,"_",period.name[p],"_direction_",wd.dir[wd],".png") + PlotStereoMap(rescale(var.mean.wt,-10,10), var.lon, var.lat, latlims = c(60,90), brks=my.brks[[var.num]], cols=my.cols[[var.num]], subsampleg=1, units=my.unit[[var.num]], colNA="gray", fileout=my.fileout) + + # + # same maps as above but removing points with frequency < 3%: + # + + windirFreq.wt <- apply(windirPeriod.wt, c(3,4), sum, na.rm=TRUE) / (n.years * n.days.in.a.period(p,1)) + + ss <- which(windirFreq.wt < 0.03) + pp <- which(windirFreq.wt >= 0.03) + + windirFreq.wt[ss] <- NA + windirFreq.wt[pp] <- 1 + + var.mean.wt2 <- var.mean.wt * windirFreq.wt + rm(ss,windirFreq.wt) + gc() + + # visualize impact map of the wd on var: + my.fileout <- paste0(workdir,"/windir/",rean.name,"_impact_min3percent_",var.name,"_",period.name[p],"_direction_",wd.dir[wd],".png") + + png(filename=my.fileout,width=900,height=600) + + layout(matrix(c(rep(1,10),2), 11, 1, byrow = TRUE)) + #PlotEquiMap(var.mean.wt, var.lon, var.lat, filled.continents = FALSE, brks=my.brks[[var.num]], cols=my.cols[[var.num]], drawleg=F, colNA="gray") + PlotEquiMap(rescale(var.mean.wt2[,lon.swapped],-10,10), var.lon2, var.lat, filled.continents = FALSE, brks=my.brks[[var.num]], cols=my.cols[[var.num]], colNA="gray", drawleg=F) + ColorBar2(my.brks[[var.num]], cols=my.cols[[var.num]], vert=FALSE, cex=1, my.ticks=-0.5 + 1:length(my.brks[[var.num]]), my.labels=my.labels[[var.num]], label.dist=.5) + dev.off() + + # North Pole map: + #png(filename=paste0(workdir,"/windir/impact_polar_",var.name,"_",period.name[p],"_direction_",wd.dir[wd],".png")) + my.fileout <- paste0(workdir,"/windir/",rean.name,"_impact_min3percent_polar_",var.name,"_",period.name[p],"_direction_",wd.dir[wd],".png") + PlotStereoMap(rescale(var.mean.wt2,-10,10), var.lon, var.lat, latlims = c(60,90), brks=my.brks[[var.num]], cols=my.cols[[var.num]], subsampleg=1, units=my.unit[[var.num]], colNA="gray", fileout=my.fileout) + + rm(var.mean.wt, var.mean.wt2) + + } # close for on wd + +} # close for on p + +save(impact.var, file=paste0(workdir,"/",rean.name,"_",var.name,"_impact.RData")) + + + + + +# choose an year and a month to plot the simulated impact maps with the WDs: +my.year <- 2014 +my.month <- 9 + +load(paste0(workdir,"/",rean.name,"_windirFreqInter.RData")) +load(paste0(workdir,"/",rean.name,"_",var.name,"_impact.RData")) + + +impact.var2 <- InsertDim(impact.var, 2, n.years) + +impact.weighted <- impact.var2 * windirFreqInter + +impact.total <- apply(impact.weighted, c(2,3,4), sum, na.rm=FALSE) + + + + + + + + + +# Create and save monthly/seasonal/yearly climatology maps of var: + +#p=17 # for the debug + +# Map intervals and colors: +my.brks[[1]] <- c(-50,-40,-30,seq(-20,30,0.05),40,50) # Surface Temperature Climatology +my.brks[[2]] <- c(0,seq(0.05,10,0.05),15) # Precipitation Climatology +my.brks[[3]] <- c(0,seq(1.05,10,0.05),15) # Wind speed Climatology + +my.cols[[1]] <- colorRampPalette(as.character(read.csv("/home/Earth/vtorralb/rgbhex.csv",header=F)[,1]))(length(my.brks[[1]])-1) # blue-yellow-red colors +my.cols[[2]] <- colorRampPalette(c("white","gray","cyan2","blue","deeppink4"))(length(my.brks[[1]])-1) # white-gray-blue-purple colores for wind speed + +n.lonr <- n.lon.var - ceiling(length(var.lon[var.lon>0 & var.lon < 180])) # count the num of lon values between 180 and 360 degrees +lon.swapped <- c((n.lonr+1):n.lon.var,1:n.lonr) # lon is always positive but rearranged +var.lon2 <- c(var.lon[c((n.lonr+1):n.lon.var)]-360,var.lon[1:n.lonr]) # to have lon values from -180 to 180 + +mod.var <- 0 +if(var.num == 1) mod.var <- -273.5 # for temperature, you must remove -273 after var.clim.NA.bis[p,,]: + +for(p in periods){ + # Select only days of the chosen month/season: + varPeriod <- var365[,,,pos.period(1,p),,,drop=FALSE] + varPeriodMean <- apply(varPeriod,c(1,2,5,6),mean,na.rm=TRUE) + + png(filename=paste0(workdir,"/",var.name.file,"_Climatology_",period.name[p],".png"),width=1000,height=700) + + layout(matrix(c(rep(1,10),2), 11, 1, byrow = TRUE)) + + #PlotEquiMap(varPeriodMean[1,1,,]+mod.var, var.lon, var.lat, filled.continents = FALSE, brks=my.brks[[var.num]], cols=my.cols[[var.num]], drawleg=F, colNA="gray") + PlotEquiMap(varPeriodMean[1,1,,lon.swapped]+mod.num, var.lon2, var.lat, filled.continents = FALSE, brks=my.brks[[var.num]], cols=my.cols[[var.num]], drawleg=F) + ColorBar(my.brks[[var.num]], cols=my.cols[[var.num]], vert=FALSE, subsampleg=20, cex=1) + + dev.off() +} + + + + + + +# Impact of a WT on var (average of var only during the days belonging to a particular WT): + +# load lat and lon from SLP grid: +load(paste0(workdir,"/txt/",mslp.rean.name,"/metadata.RData")) # load lat.used and lon.used + +# only for compatibility with older versions (it should be already loaded): +lat <- round(MSLP$lat,3) +lon <- round(MSLP$lon,3) + +# load daily WT classification for all years and grid points and with the same format of var365Anom: +load(paste0(workdir,"/txt/",mslp.rean.name,"/WTs.RData")) # load lat.used and lon.used + +n.WTs <- 10 # length(unique(WTs)) +wt.codes <- unique(WTs[1,1,,]) + +# load var anomalies: +load(file=paste0(workdir,"/",rean.name,"_",var.name,"365Anom.RData")) + +my.brks[[1]] <- c(-100,seq(-8,8,1),100) # % Mean anomaly of a WT for temperature +my.cols[[1]] <- colorRampPalette(as.character(read.csv("/home/Earth/vtorralb/rgbhex.csv",header=F)[,1]))(length(my.brks[[1]])-1) # blue--yellow-red colors for temperature + +for(p in periods){ + # select var data only inside period p: + varPeriod <- var365Anom[,pos.period(1,p),,] + + # select WT data only inside period p: + WTsPeriod <- WTs[,pos.period(1,p),,] + + for(wt in 1:n.WTs){ + + wt.code <- wt.codes[wt] # i.e: for 10 WTs, their codes are. 1 2 3 4 5 6 7 8 9 18 + + # remove from varPeriod the days not belonging to that wt (setting its value to NA): + WTsPeriod.wt <- WTsPeriod + ss <- which(WTsPeriod == wt.code) + pp <- which(WTPeriod != wt.code) + + WTsPeriod.wt[ss] <- 1 # set to 1 the days belonging to that wt + WTsPeriod.wt[pp] <- NA # set to NA the days not belonging to that wt + + varPeriod.wt <- varPeriod * WTsPeriod.wt + + var.mean.wt <- apply(varPeriod.wt, c(3,4), mean, na.rm=TRUE) + + # visualize impact map of the wt on var: + png(filename=paste0(workdir,"/",var.name.file,"/impact/",var.name,"_"period.name[p],".png"),width=1000,height=700) + + PlotEquiMap(var.mean.wt, var.lon, var.lat, filled.continents = FALSE, brks=my.brks[[var.num]], cols=my.cols[[var.num]], drawleg=F, colNA="gray") + + + + } +} + + + + + + + + + + + + + + + + + + + + + + + + +#my.brks[[1]] <- c(0,seq(1.5,9,0.5),15) # Wind speed Climatology +my.brks[[1]] <- c(0,seq(1.05,10,0.05),15) # Wind speed Climatology +my.brks[[1]] <- c(-50,-40,-30,seq(-20,30,0.05),40,50) # Surface Temperature Climatology +my.brks[[1]] <- c(0,seq(0.05,10,0.05),15) # Precipitation Climatology + +#my.brks[[2]] <- c(-10,seq(-4.95,5,0.05),10) # Wind Speed Anomaly associated to a WT +my.brks[[2]] <- c(seq(0,10,0.1),100) # Mean wind speed in m/s associated to a WT +my.brks[[3]] <- c(seq(0,20,0.1),100) # Frequency associated to a WT +my.brks[[4]] <- c(seq(0,30,0.1),100) # % Contribution of a WT to total var +my.brks[[5]] <- c(seq(-10,-3,1),seq(-2.3,2.3,0.1),seq(3,10,1)) # % Mean anomaly of a WT for wind speed +my.brks[[5]] <- c(-50,seq(-10,-3,1),seq(-2.9,2.9,0.1),seq(3,10,1),50) # % Mean anomaly of a WT for temperature +my.brks[[5]] <- c(-100000,seq(-100,0,1),seq(1,100,1),100000) # % Mean anomaly of a WT for precipitation + + +my.brks[[6]] <- c(seq(0,3,0.1),10) # Standard deviation of the anomalies pf a WT + +my.cols <- list() +#my.cols[[index]] <- colorRampPalette(my.palette[[index]])(length(my.brks[[index]])-1) +#my.cols[[1]] <- colorRampPalette(as.character(read.csv("/home/Earth/vtorralb/rgbhex.csv",header=F)[,1]))(length(my.brks[[1]])-1) # blue-green-white-yellow-red colors +#my.cols[[1]] <- colorRampPalette(c("white","yellow","orange","red","darkred"))(length(my.brks[[1]])-1) # blue-yellow-red colors +#my.cols[[1]] <- colorRampPalette(c("white","white","green","yellow","orange","red","darkred"))(length(my.brks[[1]])-1) # blue-yellow-red colors +#my.cols[[1]] <- colorRampPalette(c("blue","white","darkred"))(length(my.brks[[1]])-1) # blue-yellow-red colors +#my.cols[[1]] <- colorRampPalette(c("blue","green","yellow","red","brown","violetred4"))(length(my.brks[[1]])-1) # blue-yellow-red colors +#my.cols[[1]] <- colorRampPalette(c("white","cyan2","gold","orange","red","brown","brown4","deeppink4"))(length(my.brks[[1]])-1) # blue-yellow-red colors +my.cols[[1]] <- colorRampPalette(c("white","gray","cyan2","blue","deeppink4"))(length(my.brks[[1]])-1) # white-gray-blue-purple colores for wind speed +my.cols[[1]] <- colorRampPalette(as.character(read.csv("/home/Earth/vtorralb/rgbhex.csv",header=F)[,1]))(length(my.brks[[1]])-1) # blue-yellow-red colors + +#my.cols[[2]] <- colorRampPalette(c("deeppink4","darkblue","blue","white","red","darkred","brown4"))(length(my.brks[[2]])-1) # blue-white-red colors +my.cols[[2]] <- colorRampPalette(as.character(read.csv("/home/Earth/vtorralb/rgbhex.csv",header=F)[,1]))(length(my.brks[[2]])-1) # blue--yellow-red colors +my.cols[[3]] <- colorRampPalette(as.character(read.csv("/home/Earth/vtorralb/rgbhex.csv",header=F)[,1]))(length(my.brks[[3]])-1) # blue-yellow-red colors +#my.cols[[4]] <- colorRampPalette(c("white","cyan2","blue","deeppink4"))(length(my.brks[[4]])-1) # blue-yellow-red colors +my.cols[[4]] <- colorRampPalette(as.character(read.csv("/home/Earth/vtorralb/rgbhex.csv",header=F)[,1]))(length(my.brks[[4]])-1) # blue--yellow-red colors + +my.cols[[5]] <- c(colorRampPalette(c("dodgerblue3","white"))(length(my.brks[[5]])-1), colorRampPalette(c("white","darkorchid4"))(length(my.brks[[5]])-1)) +my.cols[[5]] <- my.cols[[5]][seq(1, length(my.cols[[5]]),2)] +my.cols[[5]] <- colorRampPalette(as.character(read.csv("/home/Earth/vtorralb/rgbhex.csv",header=F)[,1]))(length(my.brks[[5]])-1) # blue--yellow-red colors for temper +my.cols[[5]] <- c(colorRampPalette(c("blue","white"))(length(my.brks[[5]])-1), colorRampPalette(c("white","darkred"))(length(my.brks[[5]])-1)) # blue-white-red for prec +my.cols[[5]] <- my.cols[[5]][seq(1, length(my.cols[[5]]),2)] +#my.cols[[5]] <- colorRampPalette(c("white","blue","purple"))(length(my.brks[[5]])-1) + +my.cols[[6]] <- colorRampPalette(as.character(read.csv("/home/Earth/vtorralb/rgbhex.csv",header=F)[,1]))(length(my.brks[[6]])-1) # blue--yellow-red colors + + diff --git a/old/WT_drivers_v7.R~ b/old/WT_drivers_v7.R~ new file mode 100644 index 0000000000000000000000000000000000000000..9fc4160e59de38484ebf96b1b0c42971bdd3bc97 --- /dev/null +++ b/old/WT_drivers_v7.R~ @@ -0,0 +1,517 @@ +########################################################################################## +# Relationship between WTs and a chosen climate variable # +########################################################################################## + +rm(list=ls()) +library(s2dverification) # for the function Load() +source('/home/Earth/ncortesi/scripts/Rfunctions.R') # for the calendar functions + +# directory where to find the WT classification +workdir <- "/scratch/Earth/ncortesi/RESILIENCE/WT" + +# reanalysis used for daily var data (must be the same as the mslp data): +#var.rean <- list(path = '/esnas/recon/ecmwf/erainterim/$STORE_FREQ$_mean/$VAR_NAME$_f6h/$VAR_NAME$_$YEAR$$MONTH$.nc') +var.rean <- list(path = '/esnas/recon/noaa/ncep-reanalysis/$STORE_FREQ$_mean/$VAR_NAME$_f6h/$VAR_NAME$_$YEAR$$MONTH$.nc') +rean.name <- 'ncep' #'erai' + +# any daily variable: +var.name='sfcWind' #'tas' #'sfcWind' #'prlr' +var.name.file='10-m Wind Speed' #'Precipitation' #'Surface_Temperature' # '10-m_Wind_Speed' # variable name used in the ouput map's title + +periods=1:17 # identify monthly (from 1=Jan to 12=Dec) , seasonal (from 13=winter to 16=Autumn) and yearly=17 values + +LOESS <- TRUE # climatology filter on/off (if off, daily climatology is computed instead and used to measure daily anomalies) + +year.start <- 1985 +year.end <- 2016 + +########################################################################################## +n.periods <- length(periods) +n.years <- year.end - year.start + 1 + +my.brks <- my.cols <- my.labels <- my.unit <- list() +my.brks.freq <- c(0,0.05,seq(0.1,0.7,0.1),1) # Frequency of a WT or WD +my.cols.freq <- colorRampPalette(c('#f7fbff','#deebf7','#c6dbef','#9ecae1','#6baed6','#4292c6','#2171b5','#08519c','#08306b'))(length(my.brks.freq)-1) +my.unit.freq <- "%" + +if(var.name == 'tas') var.num <- 1 +if(var.name == 'sfcWind') var.num <- 2 +if(var.name == 'prlr') var.num <- 3 + +WTs.type <- c("NE","E","SE","S","SW","W","NW","N","C","C.NE","C.E","C.SE","C.S","C.SW","C.W","C.NW","C.N","A","A.NE","A.E","A.SE","A.S","A.SW","A.W","A.NW","A.N") +WTs.type10<-c("NE","E","SE","S","SW","W","NW","N","C","A") +WTs.type10.name<-c("Northeasterly (NE)","Easterly (E)","Southeasterly (SE)","Southerly (S)","Southwesterly (SW)","Westerly (W)","Northwesterly (NW)", + "Northerly (N)","Cyclonic (C)","Anticyclonic (A)") +wd.dir <- c("N","NE","E","SE","S","SW","W","NW") +n.wd <- length(wd.dir) + +#year.tot <- year.end - year.start + 1 + +# load WT metadata to get the info on year.start and year.end +#WTs_files <- list.files(paste0(workdir,"/txt/ERAint/all_years")) +#WTs_file1 <- read.table(file=paste0(workdir,"/txt/ERAint/all_years/",WTs_files[1]), header=TRUE) +#load(paste0(workdir,"/txt/ERAint/metadata.RData")) + +# load daily var data for all years: +var <- Load(var.name, NULL, list(var.rean), paste0(year.start:year.end,'0101'), storefreq = 'daily', leadtimemax=366, output = 'lonlat', nprocs=8) +#var <- Load(var.name, NULL, list(var.rean), paste0(2001,'0101'), storefreq = 'daily', leadtimemax=1, output = 'lonlat', nprocs=8) + +var.lat <- var$lat # var lat and lon MUST be the same of WT classification, even if the latter can have NA for certain lat values. +var.lon <- var$lon + +n.lat.var <- length(var.lat) +n.lon.var <- length(var.lon) + +# remove bisestile days from var to have all arrays with the same dimensions: +cat("Removing bisestile days. Please wait...\n") +var365 <- var$obs[,,,1:365,,,drop=FALSE] + +for(y in year.start:year.end){ + y2 <- y - year.start + 1 + if(n.days.in.a.year(y) == 366) var365[,,y2,60:365,,] <- var$obs[,,y2,61:366,,] # take the march to december period removing the 29th of February +} + +rm(var) +gc() + +# convert var to daily anomalies: +cat("Calculating anomalies. Please wait...\n") + +var365ClimDaily <- apply(var365, c(1,2,4,5,6), mean, na.rm=T) + +if(LOESS == TRUE){ + var365ClimLoess <- var365ClimDaily + + for(i in 1:n.lat.var){ + for(j in 1:n.lon.var){ + my.data <- data.frame(ens.mean=var365ClimDaily[1,1,,i,j], day=1:365) + my.loess <- loess(ens.mean ~ day, my.data, span=0.35) + var365ClimLoess[1,1,,i,j] <- predict(my.loess) + } + } + + rm(var365ClimDaily, my.data, my.loess) + gc() + + var365Clim <- InsertDim(var365ClimLoess, 3, year.end-year.start+1) + + rm(var365ClimLoess) + gc() + +} else { + var365Clim <- InsertDim(var365ClimDaily, 3, n.years) + + rm(var365ClimDaily) + gc() +} + + +var365Anom <- var365 - var365Clim + +rm(var365Clim) +gc() + +rm(var365) +gc() + +# save var anomalies for retreiving them when necessary: +save(var365Anom,var.lat, var.lon, n.lat.var, n.lon.var, file=paste0(workdir,"/",rean.name,"_",var.name,"365Anom.RData")) + + + + + + + + + +# Compute wind directions: +uas <- Load(var = 'uas', exp = NULL, obs = list(var.rean), paste0(year.start:year.end,'0101'), storefreq = 'daily', leadtimemax=366, output = 'lonlat', nprocs=8)$obs +vas <- Load(var = 'vas', exp = NULL, obs = list(var.rean), paste0(year.start:year.end,'0101'), storefreq = 'daily', leadtimemax=366, output = 'lonlat', nprocs=8)$obs + +windir <- (180/pi)*atan2(uas,vas) + 180 # wind direction in degrees; 90 is necessary to shift from trigonometric system to cardinal system and +180 to shift + # from the direction wind is blowing to the direction wind is coming from +rm(uas, vas) +gc() + +# remove bisestile days from windir, to compare it with var, which has no bisestiles: +cat("Removing bisestile days. Please wait...\n") +windir365 <- windir[,,,1:365,,,drop=FALSE] + +for(y in year.start:year.end){ + y2 <- y - year.start + 1 + if(n.days.in.a.year(y) == 366) windir365[,,y2,60:365,,] <- windir[,,y2,61:366,,] # take the march to december period removing the 29th of February +} + + +rm(windir) +gc() + +dir2 <- which(windir365 > 22.5) +dir3 <- which(windir365 > 22.5 + 45) +dir4 <- which(windir365 > 22.5 + 90) +dir5 <- which(windir365 > 22.5 + 135) +dir6 <- which(windir365 > 22.5 + 180) +dir7 <- which(windir365 > 22.5 + 225) +dir8 <- which(windir365 > 22.5 + 270) +dir1 <- which(windir365 > 22.5 + 315) + +windirClass <- array(1, dim(windir365)) # N +windirClass[dir2] <- 2 # NE +windirClass[dir3] <- 3 # E +windirClass[dir4] <- 4 # SE +windirClass[dir5] <- 5 # S +windirClass[dir6] <- 6 # SW +windirClass[dir7] <- 7 # W +windirClass[dir8] <- 8 # NW +windirClass[dir1] <- 1 # N + +rm(windir365) +rm(dir1,dir2,dir3,dir4,dir5,dir6,dir7,dir8) +gc() + +# save it once to retreive it later: +save(windirClass, file=paste0(workdir,"/",rean.name,"_windirClass.RData")) + +n.lonr <- n.lon.var - ceiling(length(var.lon[var.lon>0 & var.lon < 180])) # count the num of lon values between 180 and 360 degrees +lon.swapped <- c((n.lonr+1):n.lon.var,1:n.lonr) # lon is always positive but rearranged +var.lon2 <- c(var.lon[c((n.lonr+1):n.lon.var)]-360,var.lon[1:n.lonr]) # to have lon values from -180 to 180 + +windirFreqInter <- array(NA, c(n.wd, length(periods), n.years, n.lat.var, n.lon.var)) # array where to store the interannual frequencies + +# save wind direction mean frequency maps: +for(p in periods){ + # select wind direction data only inside period p: + windirPeriod <- windirClass[1,1,,pos.period(1,p),,] + + for(wd in 1:8){ + + # remove from windPeriod the days not belonging to that wd (setting its value to NA): + windirPeriod.wt <- windirPeriod + + ss <- which(windirPeriod == wd) + pp <- which(windirPeriod != wd) + + windirPeriod.wt[ss] <- 1 # set to 1 the days belonging to that wt + windirPeriod.wt[pp] <- NA # set to NA the days not belonging to that wt + rm(ss,pp) + gc() + + # mean frequency maps: + windirDays <- apply(windirPeriod.wt, c(3,4), sum, na.rm=TRUE) + windirFreq <- windirDays / (n.years * n.days.in.a.period(p,1)) + + my.fileout <- paste0(workdir,"/windir/",rean.name,"_frequency_",period.name[p],"_direction_",wd.dir[wd],".png") + + png(filename=my.fileout,width=900,height=600) + + layout(matrix(c(rep(1,10),2), 11, 1, byrow = TRUE)) + PlotEquiMap(windirFreq[,lon.swapped], var.lon2, var.lat, filled.continents = FALSE, brks=my.brks.freq, cols=my.cols.freq, colNA="gray", drawleg=F) + ColorBar2(my.brks.freq, cols=my.cols.freq, vert=FALSE, cex=1, my.ticks=-0.5 + 1:length(my.brks.freq), my.labels=100*my.brks.freq, label.dist=.5) + dev.off() + + # mean frequency maps for North Pole: + my.fileout <- paste0(workdir,"/windir/",rean.name,"_frequency_polar_",period.name[p],"_direction_",wd.dir[wd],".png") + PlotStereoMap(windirFreq, var.lon, var.lat, latlims = c(60,90), brks=my.brks.freq, cols=my.cols.freq, subsampleg=1, units=my.unit.freq, colNA="gray", fileout=my.fileout) + # measure the interannual frequency series of that wind directions (for each grid point): + windirFreqInter[wd,p,,,] <- apply(windirPeriod.wt,c(1,3,4), sum, na.rm=TRUE) / n.days.in.a.period(p,1) + + } # close for on wd + +} # close for on p + +save(windirFreqInter, file=paste0(workdir,"/",rean.name,"_windirFreqInter.RData")) + + + + + + + + +# save impact map of wind direction on var (average of var only during the days belonging to a particular wind direction): + +# load var anomalies and wind direction array: +load(file=paste0(workdir,"/",rean.name,"_",var.name,"365Anom.RData")) +load(paste0(workdir,"/",rean.name,"_windirClass.RData")) + +#my.brks[[1]] <- c(-100,seq(-8,8,1),100) # % Mean anomaly of a WT for temperature +my.brks[[1]] <- seq(-10,10,1) # % Mean anomaly of a WT for temperature +my.labels[[1]] <- my.brks[[1]] #c(c("-10",my.brks[[1]][-1])[-length(my.brks[[1]])],"10") +my.cols[[1]] <- colorRampPalette(as.character(read.csv("~/scripts/palettes/rgbhex.csv",header=F)[,1]))(length(my.brks[[1]])-1) # blue--yellow-red colors for temperature +my.unit[[1]] <- "degrees" + +n.lonr <- n.lon.var - ceiling(length(var.lon[var.lon>0 & var.lon < 180])) # count the num of lon values between 180 and 360 degrees +lon.swapped <- c((n.lonr+1):n.lon.var,1:n.lonr) # lon is always positive but rearranged +var.lon2 <- c(var.lon[c((n.lonr+1):n.lon.var)]-360,var.lon[1:n.lonr]) # to have lon values from -180 to 180 + +impact.var <- array(NA, c(n.wd, length(periods), n.lat.var, n.lon.var)) # array where to save the impact of each wd on var + +for(p in periods){ + # select var data only inside period p: + varPeriod <- var365Anom[1,1,,pos.period(1,p),,] + + # select wind direction data only inside period p: + windirPeriod <- windirClass[1,1,,pos.period(1,p),,] + + for(wd in 1:8){ + + # remove from varPeriod the days not belonging to that wd (setting its value to NA): + windirPeriod.wt <- windirPeriod + + ss <- which(windirPeriod == wd) + pp <- which(windirPeriod != wd) + + windirPeriod.wt[ss] <- 1 # set to 1 the days belonging to that wt + windirPeriod.wt[pp] <- NA # set to NA the days not belonging to that wt + rm(ss,pp) + gc() + + varPeriod.wt <- varPeriod * windirPeriod.wt + + var.mean.wt <- apply(varPeriod.wt, c(3,4), mean, na.rm=TRUE) + + impact.var[wd,p,,] <- var.mean.wt + + # visualize impact map of the wd on var: + my.fileout <- paste0(workdir,"/windir/",rean.name,"_impact_",var.name,"_",period.name[p],"_direction_",wd.dir[wd],".png") + + png(filename=my.fileout,width=900,height=600) + + layout(matrix(c(rep(1,10),2), 11, 1, byrow = TRUE)) + #PlotEquiMap(var.mean.wt, var.lon, var.lat, filled.continents = FALSE, brks=my.brks[[var.num]], cols=my.cols[[var.num]], drawleg=F, colNA="gray") + PlotEquiMap(rescale(var.mean.wt[,lon.swapped],-10,10), var.lon2, var.lat, filled.continents = FALSE, brks=my.brks[[var.num]], cols=my.cols[[var.num]], colNA="gray", drawleg=F) + ColorBar2(my.brks[[var.num]], cols=my.cols[[var.num]], vert=FALSE, cex=1, my.ticks=-0.5 + 1:length(my.brks[[var.num]]), my.labels=my.labels[[var.num]], label.dist=.5) + dev.off() + + # North Pole map: + #png(filename=paste0(workdir,"/windir/impact_polar_",var.name,"_",period.name[p],"_direction_",wd.dir[wd],".png")) + my.fileout <- paste0(workdir,"/windir/",rean.name,"_impact_polar_",var.name,"_",period.name[p],"_direction_",wd.dir[wd],".png") + PlotStereoMap(rescale(var.mean.wt,-10,10), var.lon, var.lat, latlims = c(60,90), brks=my.brks[[var.num]], cols=my.cols[[var.num]], subsampleg=1, units=my.unit[[var.num]], colNA="gray", fileout=my.fileout) + + # + # same maps as above but removing points with frequency < 3%: + # + + windirFreq.wt <- apply(windirPeriod.wt, c(3,4), sum, na.rm=TRUE) / (n.years * n.days.in.a.period(p,1)) + + ss <- which(windirFreq.wt < 0.03) + pp <- which(windirFreq.wt >= 0.03) + + windirFreq.wt[ss] <- NA + windirFreq.wt[pp] <- 1 + + var.mean.wt2 <- var.mean.wt * windirFreq.wt + rm(ss,windirFreq.wt) + gc() + + # visualize impact map of the wd on var: + my.fileout <- paste0(workdir,"/windir/",rean.name,"_impact_min3percent_",var.name,"_",period.name[p],"_direction_",wd.dir[wd],".png") + + png(filename=my.fileout,width=900,height=600) + + layout(matrix(c(rep(1,10),2), 11, 1, byrow = TRUE)) + #PlotEquiMap(var.mean.wt, var.lon, var.lat, filled.continents = FALSE, brks=my.brks[[var.num]], cols=my.cols[[var.num]], drawleg=F, colNA="gray") + PlotEquiMap(rescale(var.mean.wt2[,lon.swapped],-10,10), var.lon2, var.lat, filled.continents = FALSE, brks=my.brks[[var.num]], cols=my.cols[[var.num]], colNA="gray", drawleg=F) + ColorBar2(my.brks[[var.num]], cols=my.cols[[var.num]], vert=FALSE, cex=1, my.ticks=-0.5 + 1:length(my.brks[[var.num]]), my.labels=my.labels[[var.num]], label.dist=.5) + dev.off() + + # North Pole map: + #png(filename=paste0(workdir,"/windir/impact_polar_",var.name,"_",period.name[p],"_direction_",wd.dir[wd],".png")) + my.fileout <- paste0(workdir,"/windir/",rean.name,"_impact_min3percent_polar_",var.name,"_",period.name[p],"_direction_",wd.dir[wd],".png") + PlotStereoMap(rescale(var.mean.wt2,-10,10), var.lon, var.lat, latlims = c(60,90), brks=my.brks[[var.num]], cols=my.cols[[var.num]], subsampleg=1, units=my.unit[[var.num]], colNA="gray", fileout=my.fileout) + + rm(var.mean.wt, var.mean.wt2) + + } # close for on wd + +} # close for on p + +save(impact.var, file=paste0(workdir,"/",rean.name,"_",var.name,"_impact.RData")) + + + + + +# choose an year and a month to plot the simulated impact maps with the WDs: +my.year <- 2014 +my.month <- 9 + +load(paste0(workdir,"/",rean.name,"_windirFreqInter.RData")) +load(paste0(workdir,"/",rean.name,"_",var.name,"_impact.RData")) + + +impact.var2 <- InsertDim(impact.var, 2, n.years) + +impact.weighted <- impact.var2 * windirFreqInter + +impact.total <- apply(impact.weighted, c(2,3,4), sum, na.rm=FALSE) + + + + + + + + + +# Create and save monthly/seasonal/yearly climatology maps of var: + +#p=17 # for the debug + +# Map intervals and colors: +my.brks[[1]] <- c(-50,-40,-30,seq(-20,30,0.05),40,50) # Surface Temperature Climatology +my.brks[[2]] <- c(0,seq(0.05,10,0.05),15) # Precipitation Climatology +my.brks[[3]] <- c(0,seq(1.05,10,0.05),15) # Wind speed Climatology + +my.cols[[1]] <- colorRampPalette(as.character(read.csv("/home/Earth/vtorralb/rgbhex.csv",header=F)[,1]))(length(my.brks[[1]])-1) # blue-yellow-red colors +my.cols[[2]] <- colorRampPalette(c("white","gray","cyan2","blue","deeppink4"))(length(my.brks[[1]])-1) # white-gray-blue-purple colores for wind speed + +n.lonr <- n.lon.var - ceiling(length(var.lon[var.lon>0 & var.lon < 180])) # count the num of lon values between 180 and 360 degrees +lon.swapped <- c((n.lonr+1):n.lon.var,1:n.lonr) # lon is always positive but rearranged +var.lon2 <- c(var.lon[c((n.lonr+1):n.lon.var)]-360,var.lon[1:n.lonr]) # to have lon values from -180 to 180 + +mod.var <- 0 +if(var.num == 1) mod.var <- -273.5 # for temperature, you must remove -273 after var.clim.NA.bis[p,,]: + +for(p in periods){ + # Select only days of the chosen month/season: + varPeriod <- var365[,,,pos.period(1,p),,,drop=FALSE] + varPeriodMean <- apply(varPeriod,c(1,2,5,6),mean,na.rm=TRUE) + + png(filename=paste0(workdir,"/",var.name.file,"_Climatology_",period.name[p],".png"),width=1000,height=700) + + layout(matrix(c(rep(1,10),2), 11, 1, byrow = TRUE)) + + #PlotEquiMap(varPeriodMean[1,1,,]+mod.var, var.lon, var.lat, filled.continents = FALSE, brks=my.brks[[var.num]], cols=my.cols[[var.num]], drawleg=F, colNA="gray") + PlotEquiMap(varPeriodMean[1,1,,lon.swapped]+mod.num, var.lon2, var.lat, filled.continents = FALSE, brks=my.brks[[var.num]], cols=my.cols[[var.num]], drawleg=F) + ColorBar(my.brks[[var.num]], cols=my.cols[[var.num]], vert=FALSE, subsampleg=20, cex=1) + + dev.off() +} + + + + + + +# Impact of a WT on var (average of var only during the days belonging to a particular WT): + +# load lat and lon from SLP grid: +load(paste0(workdir,"/txt/",mslp.rean.name,"/metadata.RData")) # load lat.used and lon.used + +# only for compatibility with older versions (it should be already loaded): +lat <- round(MSLP$lat,3) +lon <- round(MSLP$lon,3) + +# load daily WT classification for all years and grid points and with the same format of var365Anom: +load(paste0(workdir,"/txt/",mslp.rean.name,"/WTs.RData")) # load lat.used and lon.used + +n.WTs <- 10 # length(unique(WTs)) +wt.codes <- unique(WTs[1,1,,]) + +# load var anomalies: +load(file=paste0(workdir,"/",rean.name,"_",var.name,"365Anom.RData")) + +my.brks[[1]] <- c(-100,seq(-8,8,1),100) # % Mean anomaly of a WT for temperature +my.cols[[1]] <- colorRampPalette(as.character(read.csv("/home/Earth/vtorralb/rgbhex.csv",header=F)[,1]))(length(my.brks[[1]])-1) # blue--yellow-red colors for temperature + +for(p in periods){ + # select var data only inside period p: + varPeriod <- var365Anom[,pos.period(1,p),,] + + # select WT data only inside period p: + WTsPeriod <- WTs[,pos.period(1,p),,] + + for(wt in 1:n.WTs){ + + wt.code <- wt.codes[wt] # i.e: for 10 WTs, their codes are. 1 2 3 4 5 6 7 8 9 18 + + # remove from varPeriod the days not belonging to that wt (setting its value to NA): + WTsPeriod.wt <- WTsPeriod + ss <- which(WTsPeriod == wt.code) + pp <- which(WTPeriod != wt.code) + + WTsPeriod.wt[ss] <- 1 # set to 1 the days belonging to that wt + WTsPeriod.wt[pp] <- NA # set to NA the days not belonging to that wt + + varPeriod.wt <- varPeriod * WTsPeriod.wt + + var.mean.wt <- apply(varPeriod.wt, c(3,4), mean, na.rm=TRUE) + + # visualize impact map of the wt on var: + png(filename=paste0(workdir,"/",var.name.file,"/impact/",var.name,"_"period.name[p],".png"),width=1000,height=700) + + PlotEquiMap(var.mean.wt, var.lon, var.lat, filled.continents = FALSE, brks=my.brks[[var.num]], cols=my.cols[[var.num]], drawleg=F, colNA="gray") + + + + } +} + + + + + + + + + + + + + + + + + + + + + + + + +#my.brks[[1]] <- c(0,seq(1.5,9,0.5),15) # Wind speed Climatology +my.brks[[1]] <- c(0,seq(1.05,10,0.05),15) # Wind speed Climatology +my.brks[[1]] <- c(-50,-40,-30,seq(-20,30,0.05),40,50) # Surface Temperature Climatology +my.brks[[1]] <- c(0,seq(0.05,10,0.05),15) # Precipitation Climatology + +#my.brks[[2]] <- c(-10,seq(-4.95,5,0.05),10) # Wind Speed Anomaly associated to a WT +my.brks[[2]] <- c(seq(0,10,0.1),100) # Mean wind speed in m/s associated to a WT +my.brks[[3]] <- c(seq(0,20,0.1),100) # Frequency associated to a WT +my.brks[[4]] <- c(seq(0,30,0.1),100) # % Contribution of a WT to total var +my.brks[[5]] <- c(seq(-10,-3,1),seq(-2.3,2.3,0.1),seq(3,10,1)) # % Mean anomaly of a WT for wind speed +my.brks[[5]] <- c(-50,seq(-10,-3,1),seq(-2.9,2.9,0.1),seq(3,10,1),50) # % Mean anomaly of a WT for temperature +my.brks[[5]] <- c(-100000,seq(-100,0,1),seq(1,100,1),100000) # % Mean anomaly of a WT for precipitation + + +my.brks[[6]] <- c(seq(0,3,0.1),10) # Standard deviation of the anomalies pf a WT + +my.cols <- list() +#my.cols[[index]] <- colorRampPalette(my.palette[[index]])(length(my.brks[[index]])-1) +#my.cols[[1]] <- colorRampPalette(as.character(read.csv("/home/Earth/vtorralb/rgbhex.csv",header=F)[,1]))(length(my.brks[[1]])-1) # blue-green-white-yellow-red colors +#my.cols[[1]] <- colorRampPalette(c("white","yellow","orange","red","darkred"))(length(my.brks[[1]])-1) # blue-yellow-red colors +#my.cols[[1]] <- colorRampPalette(c("white","white","green","yellow","orange","red","darkred"))(length(my.brks[[1]])-1) # blue-yellow-red colors +#my.cols[[1]] <- colorRampPalette(c("blue","white","darkred"))(length(my.brks[[1]])-1) # blue-yellow-red colors +#my.cols[[1]] <- colorRampPalette(c("blue","green","yellow","red","brown","violetred4"))(length(my.brks[[1]])-1) # blue-yellow-red colors +#my.cols[[1]] <- colorRampPalette(c("white","cyan2","gold","orange","red","brown","brown4","deeppink4"))(length(my.brks[[1]])-1) # blue-yellow-red colors +my.cols[[1]] <- colorRampPalette(c("white","gray","cyan2","blue","deeppink4"))(length(my.brks[[1]])-1) # white-gray-blue-purple colores for wind speed +my.cols[[1]] <- colorRampPalette(as.character(read.csv("/home/Earth/vtorralb/rgbhex.csv",header=F)[,1]))(length(my.brks[[1]])-1) # blue-yellow-red colors + +#my.cols[[2]] <- colorRampPalette(c("deeppink4","darkblue","blue","white","red","darkred","brown4"))(length(my.brks[[2]])-1) # blue-white-red colors +my.cols[[2]] <- colorRampPalette(as.character(read.csv("/home/Earth/vtorralb/rgbhex.csv",header=F)[,1]))(length(my.brks[[2]])-1) # blue--yellow-red colors +my.cols[[3]] <- colorRampPalette(as.character(read.csv("/home/Earth/vtorralb/rgbhex.csv",header=F)[,1]))(length(my.brks[[3]])-1) # blue-yellow-red colors +#my.cols[[4]] <- colorRampPalette(c("white","cyan2","blue","deeppink4"))(length(my.brks[[4]])-1) # blue-yellow-red colors +my.cols[[4]] <- colorRampPalette(as.character(read.csv("/home/Earth/vtorralb/rgbhex.csv",header=F)[,1]))(length(my.brks[[4]])-1) # blue--yellow-red colors + +my.cols[[5]] <- c(colorRampPalette(c("dodgerblue3","white"))(length(my.brks[[5]])-1), colorRampPalette(c("white","darkorchid4"))(length(my.brks[[5]])-1)) +my.cols[[5]] <- my.cols[[5]][seq(1, length(my.cols[[5]]),2)] +my.cols[[5]] <- colorRampPalette(as.character(read.csv("/home/Earth/vtorralb/rgbhex.csv",header=F)[,1]))(length(my.brks[[5]])-1) # blue--yellow-red colors for temper +my.cols[[5]] <- c(colorRampPalette(c("blue","white"))(length(my.brks[[5]])-1), colorRampPalette(c("white","darkred"))(length(my.brks[[5]])-1)) # blue-white-red for prec +my.cols[[5]] <- my.cols[[5]][seq(1, length(my.cols[[5]]),2)] +#my.cols[[5]] <- colorRampPalette(c("white","blue","purple"))(length(my.brks[[5]])-1) + +my.cols[[6]] <- colorRampPalette(as.character(read.csv("/home/Earth/vtorralb/rgbhex.csv",header=F)[,1]))(length(my.brks[[6]])-1) # blue--yellow-red colors + + diff --git a/old/WT_drivers_v8.R b/old/WT_drivers_v8.R new file mode 100644 index 0000000000000000000000000000000000000000..089adb318e375997d90da466eaa231c23d4c9a37 --- /dev/null +++ b/old/WT_drivers_v8.R @@ -0,0 +1,545 @@ +########################################################################################## +# Relationship between WTs and a chosen climate variable # +########################################################################################## + +rm(list=ls()) +library(s2dverification) # for the function Load() +library(abind) # for funcion abind() +source('/home/Earth/ncortesi/scripts/Rfunctions.R') # for the calendar functions + +# directory where to find the WT classification +workdir <- "/scratch/Earth/ncortesi/RESILIENCE/WT" + +# reanalysis used for daily var data (must be the same as the mslp data): +var.rean <- list(path = '/esnas/recon/ecmwf/erainterim/$STORE_FREQ$_mean/$VAR_NAME$_f6h/$VAR_NAME$_$YEAR$$MONTH$.nc') +#var.rean <- list(path = '/esnas/recon/noaa/ncep-reanalysis/$STORE_FREQ$_mean/$VAR_NAME$_f6h/$VAR_NAME$_$YEAR$$MONTH$.nc') +rean.name <- 'erai' #'ncep' #'erai' + +# any daily variable: +var.name='tas' #'sfcWind' #'prlr' +#var.name.file='Temperature' #'10-m Wind Speed' #'Precipitation' #'Surface_Temperature' # '10-m_Wind_Speed' # variable name used in the ouput map's title + +year.start <- 1985 +year.end <- 2014 #2016 + +periods=1:17 # identify monthly (from 1=Jan to 12=Dec) , seasonal (from 13=winter to 16=Autumn) and yearly=17 values + +LOESS <- FALSE # LOESS climatology filter on/off (if off, a 5-days mobile windows is usedto measure daily anomalies) + +########################################################################################## +n.periods <- length(periods) +n.years <- year.end - year.start + 1 + +my.brks <- my.cols <- my.labels <- my.unit <- list() +my.brks.freq <- c(0,0.05,seq(0.1,0.7,0.1),1) # Frequency of a WT or WD +my.cols.freq <- colorRampPalette(c('#f7fbff','#deebf7','#c6dbef','#9ecae1','#6baed6','#4292c6','#2171b5','#08519c','#08306b'))(length(my.brks.freq)-1) +my.unit.freq <- "%" + +if(var.name == 'tas') var.num <- 1 +if(var.name == 'sfcWind') var.num <- 2 +if(var.name == 'prlr') var.num <- 3 + +WTs.type <- c("NE","E","SE","S","SW","W","NW","N","C","C.NE","C.E","C.SE","C.S","C.SW","C.W","C.NW","C.N","A","A.NE","A.E","A.SE","A.S","A.SW","A.W","A.NW","A.N") +WTs.type10<-c("NE","E","SE","S","SW","W","NW","N","C","A") +WTs.type10.name<-c("Northeasterly (NE)","Easterly (E)","Southeasterly (SE)","Southerly (S)","Southwesterly (SW)","Westerly (W)","Northwesterly (NW)","Northerly (N)","Cyclonic (C)","Anticyclonic (A)") +wd.dir <- c("N","NE","E","SE","S","SW","W","NW") +n.wd <- length(wd.dir) + +# load one day of var data only to detect lat and lon values: +var <- Load(var.name, NULL, list(var.rean), paste0(year.start,'0101'), storefreq = 'daily', leadtimemax=1, output = 'lonlat', nprocs=1) +var.lat <- var$lat # var lat and lon MUST be the same of WT classification, even if the latter can have NA for certain lat values. +var.lon <- var$lon +n.lat.var <- length(var.lat) +n.lon.var <- length(var.lon) +n.lonr <- n.lon.var - ceiling(length(var.lon[var.lon>0 & var.lon < 180])) # count the num of lon values between 180 and 360 degrees +lon.swapped <- c((n.lonr+1):n.lon.var,1:n.lonr) # lon is always positive but rearranged +var.lon2 <- c(var.lon[c((n.lonr+1):n.lon.var)]-360,var.lon[1:n.lonr]) # to have lon values from -180 to 180 + +########################################################################################## + +# save var anomalies: + +# load WT metadata to get the info on year.start and year.end +#WTs_files <- list.files(paste0(workdir,"/txt/ERAint/all_years")) +#WTs_file1 <- read.table(file=paste0(workdir,"/txt/ERAint/all_years/",WTs_files[1]), header=TRUE) +#load(paste0(workdir,"/txt/ERAint/metadata.RData")) + +# load daily var data for all years: +var <- Load(var.name, NULL, list(var.rean), paste0(year.start:year.end,'0101'), storefreq = 'daily', leadtimemax=366, output = 'lonlat', nprocs=8) + +# remove bisestile days from var to have all arrays with the same dimensions: +cat("Removing bisestile days. Please wait...\n") +var365 <- var$obs[,,,1:365,,,drop=FALSE] + +for(y in year.start:year.end){ + y2 <- y - year.start + 1 + if(n.days.in.a.year(y) == 366) var365[,,y2,60:365,,] <- var$obs[,,y2,61:366,,] # take the march to december period removing the 29th of February +} + +rm(var) +gc() + +# convert var to daily anomalies: +cat("Calculating anomalies. Please wait...\n") + +if(LOESS == TRUE){ + var365ClimDaily <- apply(var365, c(1,2,4,5,6), mean, na.rm=T) + var365ClimLoess <- var365ClimDaily + + for(i in 1:n.lat.var){ + for(j in 1:n.lon.var){ + my.data <- data.frame(ens.mean=var365ClimDaily[1,1,,i,j], day=1:365) + my.loess <- loess(ens.mean ~ day, my.data, span=0.35) + var365ClimLoess[1,1,,i,j] <- predict(my.loess) + } + } + + rm(var365ClimDaily, my.data, my.loess) + gc() + + var365Clim <- InsertDim(var365ClimLoess, 3, year.end-year.start+1) + + rm(var365ClimLoess) + gc() + +} else { # apply a 5-days mobile window: + + #var365Clim <- InsertDim(var365ClimDaily, 3, n.years) + var365Clim <- array(NA,c(365,dim(var365)[5:6])) + var365x3 <- abind(var365[,,,364:365,,],var365,var365[,,,1:2,,],along=4) # add the two days before 1st january and after 31 dec + + for(d in 1:365){ + window <- var365x3[1,1,,2+d+seq(-2,2),,drop=FALSE] + var365Clim[d,,] <- apply(window,c(5,6),mean,na.rm=TRUE) + } + + rm(var365x3) + gc() + + var365Clim <- InsertDim(var365Clim, 1, n.years) + +} + + +var365Anom <- var365[1,1,,,,] - var365Clim + +rm(var365Clim) +gc() + +rm(var365) +gc() + +# save var anomalies for retreiving them when necessary: +save(var365Anom, file=paste0(workdir,"/",rean.name,"_",var.name,"365Anom.RData")) + + + + +if(LOESS == 'test'){ + +# Compute wind directions: +uas <- Load(var = 'uas', exp = NULL, obs = list(var.rean), paste0(year.start:year.end,'0101'), storefreq = 'daily', leadtimemax=366, output = 'lonlat', nprocs=8)$obs +vas <- Load(var = 'vas', exp = NULL, obs = list(var.rean), paste0(year.start:year.end,'0101'), storefreq = 'daily', leadtimemax=366, output = 'lonlat', nprocs=8)$obs + +windir <- (180/pi)*atan2(uas,vas) + 180 # wind direction in degrees; 90 is necessary to shift from trigonometric system to cardinal system and +180 to shift + # from the direction wind is blowing to the direction wind is coming from +rm(uas, vas) +gc() + +# remove bisestile days from windir, to compare it with var, which has no bisestiles: +cat("Removing bisestile days. Please wait...\n") +windir365 <- windir[,,,1:365,,,drop=FALSE] + +for(y in year.start:year.end){ + y2 <- y - year.start + 1 + if(n.days.in.a.year(y) == 366) windir365[,,y2,60:365,,] <- windir[,,y2,61:366,,] # take the march to december period removing the 29th of February +} + + +rm(windir) +gc() + +dir2 <- which(windir365 > 22.5) +dir3 <- which(windir365 > 22.5 + 45) +dir4 <- which(windir365 > 22.5 + 90) +dir5 <- which(windir365 > 22.5 + 135) +dir6 <- which(windir365 > 22.5 + 180) +dir7 <- which(windir365 > 22.5 + 225) +dir8 <- which(windir365 > 22.5 + 270) +dir1 <- which(windir365 > 22.5 + 315) + +windirClass <- array(1, dim(windir365)) # N +windirClass[dir2] <- 2 # NE +windirClass[dir3] <- 3 # E +windirClass[dir4] <- 4 # SE +windirClass[dir5] <- 5 # S +windirClass[dir6] <- 6 # SW +windirClass[dir7] <- 7 # W +windirClass[dir8] <- 8 # NW +windirClass[dir1] <- 1 # N + +rm(windir365) +rm(dir1,dir2,dir3,dir4,dir5,dir6,dir7,dir8) +gc() + +# save it once to retreive it later: +save(windirClass, file=paste0(workdir,"/",rean.name,"_windirClass.RData")) + + + + + +# Plot frequency maps: + +load(paste0(workdir,"/",rean.name,"_windirClass.RData")) + +windirFreqInter <- array(NA, c(n.wd, length(periods), n.years, n.lat.var, n.lon.var)) # array where to store the interannual frequencies + +# save wind direction mean frequency maps: +for(p in periods){ + # select wind direction data only inside period p: + windirPeriod <- windirClass[1,1,,pos.period(1,p),,] + + for(wd in 1:8){ + + # remove from windPeriod the days not belonging to that wd (setting its value to NA): + windirPeriod.wt <- windirPeriod + + ss <- which(windirPeriod == wd) + pp <- which(windirPeriod != wd) + + windirPeriod.wt[ss] <- 1 # set to 1 the days belonging to that wt + windirPeriod.wt[pp] <- NA # set to NA the days not belonging to that wt + rm(ss,pp) + gc() + + # mean frequency maps: + windirDays <- apply(windirPeriod.wt, c(3,4), sum, na.rm=TRUE) + windirFreq <- windirDays / (n.years * n.days.in.a.period(p,1)) + + my.fileout <- paste0(workdir,"/windir/",rean.name,"_frequency_",period.name[p],"_direction_",wd.dir[wd],".png") + + png(filename=my.fileout,width=900,height=600) + + layout(matrix(c(rep(1,10),2), 11, 1, byrow = TRUE)) + PlotEquiMap(windirFreq[,lon.swapped], var.lon2, var.lat, filled.continents = FALSE, brks=my.brks.freq, cols=my.cols.freq, colNA="gray", drawleg=F) + ColorBar2(my.brks.freq, cols=my.cols.freq, vert=FALSE, cex=1, my.ticks=-0.5 + 1:length(my.brks.freq), my.labels=100*my.brks.freq, label.dist=.5) + dev.off() + + # mean frequency maps for North Pole: + my.fileout <- paste0(workdir,"/windir/",rean.name,"_frequency_polar_",period.name[p],"_direction_",wd.dir[wd],".png") + PlotStereoMap(windirFreq, var.lon, var.lat, latlims = c(60,90), brks=my.brks.freq, cols=my.cols.freq, subsampleg=1, units=my.unit.freq, colNA="gray", fileout=my.fileout) + + # measure the interannual frequency series of that wind direction (for each grid point): + windirFreqInter[wd,p,,,] <- apply(windirPeriod.wt,c(1,3,4), sum, na.rm=TRUE) / n.days.in.a.period(p,1) + #windirPeriod.wt[year,days,,] + + # plot the frequency maps for each year: + for(y in year.start:year.end){ + y2 <- y - year.start + 1 + my.fileout <- paste0(workdir,"/windir/",rean.name,"_frequency_",period.name[p],"_year_",y,"_direction_",wd.dir[wd],".png") + png(filename=my.fileout,width=900,height=600) + layout(matrix(c(rep(1,10),2), 11, 1, byrow = TRUE)) + PlotEquiMap(windirFreqInter[wd,p,y2,,lon.swapped], var.lon2, var.lat, filled.continents = FALSE, brks=my.brks.freq, cols=my.cols.freq, colNA="gray", drawleg=F) + ColorBar2(my.brks.freq, cols=my.cols.freq, vert=FALSE, cex=1, my.ticks=-0.5 + 1:length(my.brks.freq), my.labels=100*my.brks.freq, label.dist=.5) + dev.off() + + } # close for on y + + } # close for on wd + +} # close for on p + +save(windirFreqInter, file=paste0(workdir,"/",rean.name,"_windirFreqInter.RData")) + + + + + + + +# save impact map of wind direction on var (average of var only during the days belonging to a particular wind direction): + +# load var anomalies and wind direction array: +load(file=paste0(workdir,"/",rean.name,"_",var.name,"365Anom.RData")) +load(paste0(workdir,"/",rean.name,"_windirClass.RData")) + +my.brks[[1]] <- seq(-10,10,1) # % Mean anomaly of a WT for temperature +my.cols[[1]] <- colorRampPalette(as.character(read.csv("~/scripts/palettes/rgbhex.csv",header=F)[,1]))(length(my.brks[[1]])-1) # blue--yellow-red colors for temperature +my.unit[[1]] <- "degrees" + +my.brks[[2]] <- seq(-3,3,0.5) # Mean wind speed anomaly in m/s +my.cols[[2]] <- colorRampPalette(rev(brewer.pal(11,"RdBu")))(length(my.brks[[2]])-1) # blue--white--red colors for wind speed impact +my.unit[[2]] <- "m/s" + + +impact.var <- array(NA, c(n.wd, length(periods), n.lat.var, n.lon.var)) # array where to save the impact of each wd on var + +for(p in periods){ + # select var data only inside period p: + varPeriod <- var365Anom[,pos.period(1,p),,] + + # select wind direction data only inside period p: + windirPeriod <- windirClass[1,1,,pos.period(1,p),,] + + for(wd in 1:8){ + + # remove from varPeriod the days not belonging to that wd (setting its value to NA): + windirPeriod.wt <- windirPeriod + + ss <- which(windirPeriod == wd) + pp <- which(windirPeriod != wd) + + windirPeriod.wt[ss] <- 1 # set to 1 the days belonging to that wt + windirPeriod.wt[pp] <- NA # set to NA the days not belonging to that wt + rm(ss,pp) + gc() + + varPeriod.wt <- varPeriod * windirPeriod.wt + + var.mean.wt <- apply(varPeriod.wt, c(3,4), mean, na.rm=TRUE) + + impact.var[wd,p,,] <- var.mean.wt + + # visualize impact map of the wd on var: + my.fileout <- paste0(workdir,"/windir/",rean.name,"_impact_",var.name,"_",period.name[p],"_direction_",wd.dir[wd],".png") + + png(filename=my.fileout,width=900,height=600) + + layout(matrix(c(rep(1,10),2), 11, 1, byrow = TRUE)) + #PlotEquiMap(var.mean.wt, var.lon, var.lat, filled.continents = FALSE, brks=my.brks[[var.num]], cols=my.cols[[var.num]], drawleg=F, colNA="gray") + PlotEquiMap(rescale(var.mean.wt[,lon.swapped],min(my.brks[[var.num]]),max(my.brks[[var.num]])), var.lon2, var.lat, filled.continents = FALSE, brks=my.brks[[var.num]], cols=my.cols[[var.num]], colNA="gray", drawleg=F) + ColorBar2(my.brks[[var.num]], cols=my.cols[[var.num]], vert=FALSE, cex=1, my.ticks=-0.5 + 1:length(my.brks[[var.num]]), my.labels=my.brks[[var.num]], label.dist=.5) + dev.off() + + # North Pole map: + #png(filename=paste0(workdir,"/windir/impact_polar_",var.name,"_",period.name[p],"_direction_",wd.dir[wd],".png")) + my.fileout <- paste0(workdir,"/windir/",rean.name,"_impact_polar_",var.name,"_",period.name[p],"_direction_",wd.dir[wd],".png") + PlotStereoMap(rescale(var.mean.wt,min(my.brks[[var.num]]),max(my.brks[[var.num]])), var.lon, var.lat, latlims = c(60,90), brks=my.brks[[var.num]], cols=my.cols[[var.num]], subsampleg=1, units=my.unit[[var.num]], colNA="gray", fileout=my.fileout) + + # + # same maps as above but removing points with frequency < 3%: + # + + windirFreq.wt <- apply(windirPeriod.wt, c(3,4), sum, na.rm=TRUE) / (n.years * n.days.in.a.period(p,1)) + + ss <- which(windirFreq.wt < 0.03) + pp <- which(windirFreq.wt >= 0.03) + + windirFreq.wt[ss] <- NA + windirFreq.wt[pp] <- 1 + + var.mean.wt2 <- var.mean.wt * windirFreq.wt + rm(ss,windirFreq.wt) + gc() + + # visualize impact map of the wd on var: + my.fileout <- paste0(workdir,"/windir/",rean.name,"_impact_min3percent_",var.name,"_",period.name[p],"_direction_",wd.dir[wd],".png") + + png(filename=my.fileout,width=900,height=600) + + layout(matrix(c(rep(1,10),2), 11, 1, byrow = TRUE)) + #PlotEquiMap(var.mean.wt, var.lon, var.lat, filled.continents = FALSE, brks=my.brks[[var.num]], cols=my.cols[[var.num]], drawleg=F, colNA="gray") + PlotEquiMap(rescale(var.mean.wt2[,lon.swapped],min(my.brks[[var.num]]),max(my.brks[[var.num]])), var.lon2, var.lat, filled.continents = FALSE, brks=my.brks[[var.num]], cols=my.cols[[var.num]], colNA="gray", drawleg=F) + ColorBar2(my.brks[[var.num]], cols=my.cols[[var.num]], vert=FALSE, cex=1, my.ticks=-0.5 + 1:length(my.brks[[var.num]]), my.labels=my.brks[[var.num]], label.dist=.5) + dev.off() + + # North Pole map: + #png(filename=paste0(workdir,"/windir/impact_polar_",var.name,"_",period.name[p],"_direction_",wd.dir[wd],".png")) + my.fileout <- paste0(workdir,"/windir/",rean.name,"_impact_min3percent_polar_",var.name,"_",period.name[p],"_direction_",wd.dir[wd],".png") + PlotStereoMap(rescale(var.mean.wt2,min(my.brks[[var.num]]),max(my.brks[[var.num]])), var.lon, var.lat, latlims = c(60,90), brks=my.brks[[var.num]], cols=my.cols[[var.num]], subsampleg=1, units=my.unit[[var.num]], colNA="gray", fileout=my.fileout) + + rm(var.mean.wt, var.mean.wt2) + + } # close for on wd + +} # close for on p + +save(impact.var, file=paste0(workdir,"/",rean.name,"_",var.name,"_impact.RData")) + + + + +# visualize and save the reconstructed monthly/seasonal anomalies with the WDs: + +load(paste0(workdir,"/",rean.name,"_windirFreqInter.RData")) +load(paste0(workdir,"/",rean.name,"_",var.name,"_impact.RData")) + +impact.total <- array(NA, c(length(periods), n.years, n.lat.var, n.lon.var)) # array where to store the interannual frequencies + +my.brks[[1]] <- seq(-10,10,1) # % Mean anomaly of a WT for temperature +my.cols[[1]] <- colorRampPalette(as.character(read.csv("~/scripts/palettes/rgbhex.csv",header=F)[,1]))(length(my.brks[[1]])-1) +my.unit[[1]] <- "degrees" + +my.brks[[2]] <- seq(-3,3,0.5) # Mean wind speed anomaly in m/s +my.cols[[2]] <- colorRampPalette(rev(brewer.pal(11,"RdBu")))(length(my.brks[[2]])-1) # blue--white--red colors for wind speed impact +my.unit[[2]] <- "m/s" + +for(p in periods){ + for(y in year.start:year.end){ + #p <- 10; y <- 2016 # for debugging + + #test <- impact.var[1,10,,]*windirFreqInter[1,10,32,,]+impact.var[2,10,,]*windirFreqInter[2,10,32,,]+impact.var[3,10,,]*windirFreqInter[3,10,32,,]+impact.var[4,10,,]*windirFreqInter[4,10,32,,]+impact.var[5,10,,]*windirFreqInter[5,10,32,,]+impact.var[6,10,,]*windirFreqInter[6,10,32,,]+impact.var[7,10,,]*windirFreqInter[7,10,32,,]+impact.var[8,10,,]*windirFreqInter[8,10,32,,] + + #PlotEquiMap(test[,lon.swapped], var.lon2, var.lat, filled.continents = FALSE, brks=my.brks[[var.num]], cols=my.cols[[var.num]], colNA="gray", drawleg=F) + # windirFreqInter[,10,32,66,507] + #test2 <- impact.var[3,10,,]*0.4516 # + impact.var[2,10,,]*0.225 + impact.var[8,10,,]*0.0967 + impact.var[4,10,,]*0.06451 + impact.var[6,10,,]*0.0645 + impact.var[1,10,,]*0.03225 + impact.var[5,10,,]*0.0645 + + #PlotEquiMap(test2[,lon.swapped], var.lon2, var.lat, filled.continents = FALSE, brks=my.brks[[var.num]], cols=my.cols[[var.num]], colNA="gray", drawleg=F) + + #test3 <- impact.var[3,10,,] #*windirFreqInter[3,10,32,,] + #PlotEquiMap(test3[,lon.swapped], var.lon2, var.lat, filled.continents = FALSE, brks=my.brks[[var.num]], cols=my.cols[[var.num]], colNA="gray", drawleg=F) + + #test4 <- impact.var[3,10,,] * windirFreqInter[3,10,32,,] + #PlotEquiMap(test4[,lon.swapped], var.lon2, var.lat, filled.continents = FALSE, brks=my.brks[[var.num]], cols=my.cols[[var.num]], colNA="gray", drawleg=F) + + y2 <- y - year.start + 1 + impact.weighted <- impact.var[,p,,] * windirFreqInter[,p,y2,,] + impact.total[p,y2,,] <- apply(impact.weighted, c(2,3), sum, na.rm=T) + + my.fileout <- paste0(workdir,"/windir/",rean.name,"_reconstructed_",var.name,"_anomalies_",period.name[p],"_year_",y,".png") + png(filename=my.fileout,width=900,height=600) + + layout(matrix(c(rep(1,10),2), 11, 1, byrow = TRUE)) + PlotEquiMap(rescale(impact.total[p,y2,,lon.swapped],min(my.brks[[var.num]]),max(my.brks[[var.num]])), var.lon2, var.lat, filled.continents = FALSE, brks=my.brks[[var.num]], cols=my.cols[[var.num]], colNA="gray", drawleg=F) + ColorBar2(my.brks[[var.num]], cols=my.cols[[var.num]], vert=FALSE, cex=1, my.ticks=-0.5 + 1:length(my.brks[[var.num]]), my.labels=my.brks[[var.num]], label.dist=.5) + dev.off() + + for(wd in 1:n.wd){ + my.fileout <- paste0(workdir,"/windir/",rean.name,"_weighted_impact_",var.name,"_",period.name[p],"_year_",y,"_direction_",wd.dir[wd],".png") + png(filename=my.fileout,width=900,height=600) + + layout(matrix(c(rep(1,10),2), 11, 1, byrow = TRUE)) + PlotEquiMap(rescale(impact.weighted[wd,,lon.swapped],min(my.brks[[var.num]]),max(my.brks[[var.num]])), var.lon2, var.lat, filled.continents = FALSE, brks=my.brks[[var.num]], cols=my.cols[[var.num]], colNA="gray", drawleg=F) + ColorBar2(my.brks[[var.num]], cols=my.cols[[var.num]], vert=FALSE, cex=1, my.ticks=-0.5 + 1:length(my.brks[[var.num]]), my.labels=my.brks[[var.num]], label.dist=.5) + dev.off() + + } + + } # close for on y +} # close for on p + +save(impact.total, file=paste0(workdir,"/",rean.name,"_",var.name,"_reconstructed_anomalies.RData")) + + + + + + + +# Create and save monthly/seasonal/yearly climatology maps of var: + +#p=17 # for the debug + +# Map intervals and colors: +my.brks[[1]] <- c(-50,-40,-30,seq(-20,30,0.05),40,50) # Surface Temperature Climatology +my.brks[[2]] <- c(0,seq(0.05,10,0.05),15) # Precipitation Climatology +my.brks[[3]] <- c(0,seq(1.05,10,0.05),15) # Wind speed Climatology + +my.cols[[1]] <- colorRampPalette(as.character(read.csv("/home/Earth/vtorralb/rgbhex.csv",header=F)[,1]))(length(my.brks[[1]])-1) # blue-yellow-red colors +my.cols[[2]] <- colorRampPalette(c("white","gray","cyan2","blue","deeppink4"))(length(my.brks[[1]])-1) # white-gray-blue-purple colores for wind speed + +n.lonr <- n.lon.var - ceiling(length(var.lon[var.lon>0 & var.lon < 180])) # count the num of lon values between 180 and 360 degrees +lon.swapped <- c((n.lonr+1):n.lon.var,1:n.lonr) # lon is always positive but rearranged +var.lon2 <- c(var.lon[c((n.lonr+1):n.lon.var)]-360,var.lon[1:n.lonr]) # to have lon values from -180 to 180 + +mod.var <- 0 +if(var.num == 1) mod.var <- -273.5 # for temperature, you must remove -273 after var.clim.NA.bis[p,,]: + +for(p in periods){ + # Select only days of the chosen month/season: + varPeriod <- var365[,pos.period(1,p),,,drop=FALSE] + varPeriodMean <- apply(varPeriod,c(3,4),mean,na.rm=TRUE) + + png(filename=paste0(workdir,"/",var.name,"_Climatology_",period.name[p],".png"),width=1000,height=700) + + layout(matrix(c(rep(1,10),2), 11, 1, byrow = TRUE)) + + #PlotEquiMap(varPeriodMean+mod.var, var.lon, var.lat, filled.continents = FALSE, brks=my.brks[[var.num]], cols=my.cols[[var.num]], drawleg=F, colNA="gray") + PlotEquiMap(varPeriodMean[,lon.swapped]+mod.num, var.lon2, var.lat, filled.continents = FALSE, brks=my.brks[[var.num]], cols=my.cols[[var.num]], drawleg=F) + ColorBar(my.brks[[var.num]], cols=my.cols[[var.num]], vert=FALSE, subsampleg=20, cex=1) + + dev.off() +} + + + + + + + + + + + + + +# Impact of a WT on var (average of var only during the days belonging to a particular WT): + +# load lat and lon from SLP grid: +load(paste0(workdir,"/txt/",mslp.rean.name,"/metadata.RData")) # load lat.used and lon.used + +# only for compatibility with older versions (it should be already loaded): +lat <- round(MSLP$lat,3) +lon <- round(MSLP$lon,3) + +# load daily WT classification for all years and grid points and with the same format of var365Anom: +load(paste0(workdir,"/txt/",mslp.rean.name,"/WTs.RData")) # load lat.used and lon.used + +n.WTs <- 10 # length(unique(WTs)) +wt.codes <- unique(WTs[1,1,,]) + +# load var anomalies: +load(file=paste0(workdir,"/",rean.name,"_",var.name,"365Anom.RData")) + +my.brks[[1]] <- c(-100,seq(-8,8,1),100) # % Mean temperature anomaly +my.cols[[1]] <- colorRampPalette(as.character(read.csv("/scripts/palettes/rgbhex.csv",header=F)[,1]))(length(my.brks[[1]])-1) # blue--yellow-red colors for temperature +my.brks[[2]] <- c(-100,seq(-3,3,0.5),100) # Mean wind speed anomaly in m/s +my.cols[[2]] <- colorRampPalette(rev(brewer.pal(11,"RdBu")))(length(my.brks.var)-1) # blue--white--red colors for wind speed impacr + +for(p in periods){ + # select var data only inside period p: + varPeriod <- var365Anom[,pos.period(1,p),,] + + # select WT data only inside period p: + WTsPeriod <- WTs[,pos.period(1,p),,] + + for(wt in 1:n.WTs){ + + wt.code <- wt.codes[wt] # i.e: for 10 WTs, their codes are. 1 2 3 4 5 6 7 8 9 18 + + # remove from varPeriod the days not belonging to that wt (setting its value to NA): + WTsPeriod.wt <- WTsPeriod + ss <- which(WTsPeriod == wt.code) + pp <- which(WTPeriod != wt.code) + + WTsPeriod.wt[ss] <- 1 # set to 1 the days belonging to that wt + WTsPeriod.wt[pp] <- NA # set to NA the days not belonging to that wt + + varPeriod.wt <- varPeriod * WTsPeriod.wt + + var.mean.wt <- apply(varPeriod.wt, c(3,4), mean, na.rm=TRUE) + + # visualize impact map of the wt on var: + png(filename=paste0(workdir,"/",var.name.file,"/impact/",var.name,"_"period.name[p],".png"),width=1000,height=700) + + PlotEquiMap(var.mean.wt, var.lon, var.lat, filled.continents = FALSE, brks=my.brks[[var.num]], cols=my.cols[[var.num]], drawleg=F, colNA="gray") + + + + } +} + + +} # close test + + + + + + + + + + + diff --git a/old/WT_drivers_v8.R~ b/old/WT_drivers_v8.R~ new file mode 100644 index 0000000000000000000000000000000000000000..b7af2fdc343355d4e8a216ca9d65fb26c8f00347 --- /dev/null +++ b/old/WT_drivers_v8.R~ @@ -0,0 +1,551 @@ +########################################################################################## +# Relationship between WTs and a chosen climate variable # +########################################################################################## + +rm(list=ls()) +library(s2dverification) # for the function Load() +library(abind) # for funcion abind() +source('/home/Earth/ncortesi/scripts/Rfunctions.R') # for the calendar functions + +# directory where to find the WT classification +workdir <- "/scratch/Earth/ncortesi/RESILIENCE/WT" + +# reanalysis used for daily var data (must be the same as the mslp data): +var.rean <- list(path = '/esnas/recon/ecmwf/erainterim/$STORE_FREQ$_mean/$VAR_NAME$_f6h/$VAR_NAME$_$YEAR$$MONTH$.nc') +#var.rean <- list(path = '/esnas/recon/noaa/ncep-reanalysis/$STORE_FREQ$_mean/$VAR_NAME$_f6h/$VAR_NAME$_$YEAR$$MONTH$.nc') +rean.name <- 'erai' #'ncep' #'erai' + +# any daily variable: +var.name='sfcWind' #'tas' #'sfcWind' #'prlr' +var.name.file='10-m Wind Speed' #'Precipitation' #'Surface_Temperature' # '10-m_Wind_Speed' # variable name used in the ouput map's title + +year.start <- 1985 +year.end <- 2016 + +periods=1:17 # identify monthly (from 1=Jan to 12=Dec) , seasonal (from 13=winter to 16=Autumn) and yearly=17 values + +LOESS <- FALSE # LOESS climatology filter on/off (if off, a 5-days mobile windows is usedto measure daily anomalies) + +########################################################################################## +n.periods <- length(periods) +n.years <- year.end - year.start + 1 + +my.brks <- my.cols <- my.labels <- my.unit <- list() +my.brks.freq <- c(0,0.05,seq(0.1,0.7,0.1),1) # Frequency of a WT or WD +my.cols.freq <- colorRampPalette(c('#f7fbff','#deebf7','#c6dbef','#9ecae1','#6baed6','#4292c6','#2171b5','#08519c','#08306b'))(length(my.brks.freq)-1) +my.unit.freq <- "%" + +if(var.name == 'tas') var.num <- 1 +if(var.name == 'sfcWind') var.num <- 2 +if(var.name == 'prlr') var.num <- 3 + +WTs.type <- c("NE","E","SE","S","SW","W","NW","N","C","C.NE","C.E","C.SE","C.S","C.SW","C.W","C.NW","C.N","A","A.NE","A.E","A.SE","A.S","A.SW","A.W","A.NW","A.N") +WTs.type10<-c("NE","E","SE","S","SW","W","NW","N","C","A") +WTs.type10.name<-c("Northeasterly (NE)","Easterly (E)","Southeasterly (SE)","Southerly (S)","Southwesterly (SW)","Westerly (W)","Northwesterly (NW)","Northerly (N)","Cyclonic (C)","Anticyclonic (A)") +wd.dir <- c("N","NE","E","SE","S","SW","W","NW") +n.wd <- length(wd.dir) + +# load one day of var data only to detect lat and lon values: +var <- Load(var.name, NULL, list(var.rean), paste0(year.start,'0101'), storefreq = 'daily', leadtimemax=1, output = 'lonlat', nprocs=1) +var.lat <- var$lat # var lat and lon MUST be the same of WT classification, even if the latter can have NA for certain lat values. +var.lon <- var$lon +n.lat.var <- length(var.lat) +n.lon.var <- length(var.lon) +n.lonr <- n.lon.var - ceiling(length(var.lon[var.lon>0 & var.lon < 180])) # count the num of lon values between 180 and 360 degrees +lon.swapped <- c((n.lonr+1):n.lon.var,1:n.lonr) # lon is always positive but rearranged +var.lon2 <- c(var.lon[c((n.lonr+1):n.lon.var)]-360,var.lon[1:n.lonr]) # to have lon values from -180 to 180 + +########################################################################################## + +if(LOESS == 'test'){ +# save var anomalies: + +# load WT metadata to get the info on year.start and year.end +#WTs_files <- list.files(paste0(workdir,"/txt/ERAint/all_years")) +#WTs_file1 <- read.table(file=paste0(workdir,"/txt/ERAint/all_years/",WTs_files[1]), header=TRUE) +#load(paste0(workdir,"/txt/ERAint/metadata.RData")) + +# load daily var data for all years: +var <- Load(var.name, NULL, list(var.rean), paste0(year.start:year.end,'0101'), storefreq = 'daily', leadtimemax=366, output = 'lonlat', nprocs=8) + + +# remove bisestile days from var to have all arrays with the same dimensions: +cat("Removing bisestile days. Please wait...\n") +var365 <- var$obs[,,,1:365,,,drop=FALSE] + +for(y in year.start:year.end){ + y2 <- y - year.start + 1 + if(n.days.in.a.year(y) == 366) var365[,,y2,60:365,,] <- var$obs[,,y2,61:366,,] # take the march to december period removing the 29th of February +} + +rm(var) +gc() + +# convert var to daily anomalies: +cat("Calculating anomalies. Please wait...\n") + +if(LOESS == TRUE){ + var365ClimDaily <- apply(var365, c(1,2,4,5,6), mean, na.rm=T) + var365ClimLoess <- var365ClimDaily + + for(i in 1:n.lat.var){ + for(j in 1:n.lon.var){ + my.data <- data.frame(ens.mean=var365ClimDaily[1,1,,i,j], day=1:365) + my.loess <- loess(ens.mean ~ day, my.data, span=0.35) + var365ClimLoess[1,1,,i,j] <- predict(my.loess) + } + } + + rm(var365ClimDaily, my.data, my.loess) + gc() + + var365Clim <- InsertDim(var365ClimLoess, 3, year.end-year.start+1) + + rm(var365ClimLoess) + gc() + +} else { # apply a 5-days mobile window: + + #var365Clim <- InsertDim(var365ClimDaily, 3, n.years) + var365Clim <- array(NA,c(365,dim(var365)[5:6])) + var365x3 <- abind(var365[,,,364:365,,],var365,var365[,,,1:2,,],along=4) # add the two days before 1st january and after 31 dec + + for(d in 1:365){ + window <- var365x3[1,1,,2+d+seq(-2,2),,drop=FALSE] + var365Clim[d,,] <- apply(window,c(5,6),mean,na.rm=TRUE) + } + + rm(var365x3) + gc() + + var365Clim <- InsertDim(var365Clim, 1, n.years) + +} + + +var365Anom <- var365[1,1,,,,] - var365Clim + +rm(var365Clim) +gc() + +rm(var365) +gc() + +# save var anomalies for retreiving them when necessary: +save(var365Anom, file=paste0(workdir,"/",rean.name,"_",var.name,"365Anom.RData")) + + + +# Compute wind directions: +uas <- Load(var = 'uas', exp = NULL, obs = list(var.rean), paste0(year.start:year.end,'0101'), storefreq = 'daily', leadtimemax=366, output = 'lonlat', nprocs=8)$obs +vas <- Load(var = 'vas', exp = NULL, obs = list(var.rean), paste0(year.start:year.end,'0101'), storefreq = 'daily', leadtimemax=366, output = 'lonlat', nprocs=8)$obs + +windir <- (180/pi)*atan2(uas,vas) + 180 # wind direction in degrees; 90 is necessary to shift from trigonometric system to cardinal system and +180 to shift + # from the direction wind is blowing to the direction wind is coming from +rm(uas, vas) +gc() + +# remove bisestile days from windir, to compare it with var, which has no bisestiles: +cat("Removing bisestile days. Please wait...\n") +windir365 <- windir[,,,1:365,,,drop=FALSE] + +for(y in year.start:year.end){ + y2 <- y - year.start + 1 + if(n.days.in.a.year(y) == 366) windir365[,,y2,60:365,,] <- windir[,,y2,61:366,,] # take the march to december period removing the 29th of February +} + + +rm(windir) +gc() + +dir2 <- which(windir365 > 22.5) +dir3 <- which(windir365 > 22.5 + 45) +dir4 <- which(windir365 > 22.5 + 90) +dir5 <- which(windir365 > 22.5 + 135) +dir6 <- which(windir365 > 22.5 + 180) +dir7 <- which(windir365 > 22.5 + 225) +dir8 <- which(windir365 > 22.5 + 270) +dir1 <- which(windir365 > 22.5 + 315) + +windirClass <- array(1, dim(windir365)) # N +windirClass[dir2] <- 2 # NE +windirClass[dir3] <- 3 # E +windirClass[dir4] <- 4 # SE +windirClass[dir5] <- 5 # S +windirClass[dir6] <- 6 # SW +windirClass[dir7] <- 7 # W +windirClass[dir8] <- 8 # NW +windirClass[dir1] <- 1 # N + +rm(windir365) +rm(dir1,dir2,dir3,dir4,dir5,dir6,dir7,dir8) +gc() + +# save it once to retreive it later: +save(windirClass, file=paste0(workdir,"/",rean.name,"_windirClass.RData")) + + + + + +# Plot frequency maps: + +load(paste0(workdir,"/",rean.name,"_windirClass.RData")) + +windirFreqInter <- array(NA, c(n.wd, length(periods), n.years, n.lat.var, n.lon.var)) # array where to store the interannual frequencies + +# save wind direction mean frequency maps: +for(p in periods){ + # select wind direction data only inside period p: + windirPeriod <- windirClass[1,1,,pos.period(1,p),,] + + for(wd in 1:8){ + + # remove from windPeriod the days not belonging to that wd (setting its value to NA): + windirPeriod.wt <- windirPeriod + + ss <- which(windirPeriod == wd) + pp <- which(windirPeriod != wd) + + windirPeriod.wt[ss] <- 1 # set to 1 the days belonging to that wt + windirPeriod.wt[pp] <- NA # set to NA the days not belonging to that wt + rm(ss,pp) + gc() + + # mean frequency maps: + windirDays <- apply(windirPeriod.wt, c(3,4), sum, na.rm=TRUE) + windirFreq <- windirDays / (n.years * n.days.in.a.period(p,1)) + + my.fileout <- paste0(workdir,"/windir/",rean.name,"_frequency_",period.name[p],"_direction_",wd.dir[wd],".png") + + png(filename=my.fileout,width=900,height=600) + + layout(matrix(c(rep(1,10),2), 11, 1, byrow = TRUE)) + PlotEquiMap(windirFreq[,lon.swapped], var.lon2, var.lat, filled.continents = FALSE, brks=my.brks.freq, cols=my.cols.freq, colNA="gray", drawleg=F) + ColorBar2(my.brks.freq, cols=my.cols.freq, vert=FALSE, cex=1, my.ticks=-0.5 + 1:length(my.brks.freq), my.labels=100*my.brks.freq, label.dist=.5) + dev.off() + + # mean frequency maps for North Pole: + my.fileout <- paste0(workdir,"/windir/",rean.name,"_frequency_polar_",period.name[p],"_direction_",wd.dir[wd],".png") + PlotStereoMap(windirFreq, var.lon, var.lat, latlims = c(60,90), brks=my.brks.freq, cols=my.cols.freq, subsampleg=1, units=my.unit.freq, colNA="gray", fileout=my.fileout) + + # measure the interannual frequency series of that wind direction (for each grid point): + windirFreqInter[wd,p,,,] <- apply(windirPeriod.wt,c(1,3,4), sum, na.rm=TRUE) / n.days.in.a.period(p,1) + #windirPeriod.wt[year,days,,] + + # plot the frequency maps for each year: + for(y in year.start:year.end){ + y2 <- y - year.start + 1 + my.fileout <- paste0(workdir,"/windir/",rean.name,"_frequency_",period.name[p],"_year_",y,"_direction_",wd.dir[wd],".png") + png(filename=my.fileout,width=900,height=600) + layout(matrix(c(rep(1,10),2), 11, 1, byrow = TRUE)) + PlotEquiMap(windirFreqInter[wd,p,y2,,lon.swapped], var.lon2, var.lat, filled.continents = FALSE, brks=my.brks.freq, cols=my.cols.freq, colNA="gray", drawleg=F) + ColorBar2(my.brks.freq, cols=my.cols.freq, vert=FALSE, cex=1, my.ticks=-0.5 + 1:length(my.brks.freq), my.labels=100*my.brks.freq, label.dist=.5) + dev.off() + + } # close for on y + + } # close for on wd + +} # close for on p + +save(windirFreqInter, file=paste0(workdir,"/",rean.name,"_windirFreqInter.RData")) + + + + + + + +# save impact map of wind direction on var (average of var only during the days belonging to a particular wind direction): + +# load var anomalies and wind direction array: +load(file=paste0(workdir,"/",rean.name,"_",var.name,"365Anom.RData")) +load(paste0(workdir,"/",rean.name,"_windirClass.RData")) + +my.brks[[1]] <- seq(-10,10,1) # % Mean anomaly of a WT for temperature +my.cols[[1]] <- colorRampPalette(as.character(read.csv("~/scripts/palettes/rgbhex.csv",header=F)[,1]))(length(my.brks[[1]])-1) # blue--yellow-red colors for temperature +my.unit[[1]] <- "degrees" + +my.brks[[2]] <- seq(-3,3,0.5) # Mean wind speed anomaly in m/s +my.cols[[2]] <- colorRampPalette(rev(brewer.pal(11,"RdBu")))(length(my.brks[[2]])-1) # blue--white--red colors for wind speed impact +my.unit[[2]] <- "m/s" + + +impact.var <- array(NA, c(n.wd, length(periods), n.lat.var, n.lon.var)) # array where to save the impact of each wd on var + +for(p in periods){ + # select var data only inside period p: + varPeriod <- var365Anom[,pos.period(1,p),,] + + # select wind direction data only inside period p: + windirPeriod <- windirClass[1,1,,pos.period(1,p),,] + + for(wd in 1:8){ + + # remove from varPeriod the days not belonging to that wd (setting its value to NA): + windirPeriod.wt <- windirPeriod + + ss <- which(windirPeriod == wd) + pp <- which(windirPeriod != wd) + + windirPeriod.wt[ss] <- 1 # set to 1 the days belonging to that wt + windirPeriod.wt[pp] <- NA # set to NA the days not belonging to that wt + rm(ss,pp) + gc() + + varPeriod.wt <- varPeriod * windirPeriod.wt + + var.mean.wt <- apply(varPeriod.wt, c(3,4), mean, na.rm=TRUE) + + impact.var[wd,p,,] <- var.mean.wt + + # visualize impact map of the wd on var: + my.fileout <- paste0(workdir,"/windir/",rean.name,"_impact_",var.name,"_",period.name[p],"_direction_",wd.dir[wd],".png") + + png(filename=my.fileout,width=900,height=600) + + layout(matrix(c(rep(1,10),2), 11, 1, byrow = TRUE)) + #PlotEquiMap(var.mean.wt, var.lon, var.lat, filled.continents = FALSE, brks=my.brks[[var.num]], cols=my.cols[[var.num]], drawleg=F, colNA="gray") + PlotEquiMap(rescale(var.mean.wt[,lon.swapped],min(my.brks[[var.num]]),max(my.brks[[var.num]])), var.lon2, var.lat, filled.continents = FALSE, brks=my.brks[[var.num]], cols=my.cols[[var.num]], colNA="gray", drawleg=F) + ColorBar2(my.brks[[var.num]], cols=my.cols[[var.num]], vert=FALSE, cex=1, my.ticks=-0.5 + 1:length(my.brks[[var.num]]), my.labels=my.brks[[var.num]], label.dist=.5) + dev.off() + + # North Pole map: + #png(filename=paste0(workdir,"/windir/impact_polar_",var.name,"_",period.name[p],"_direction_",wd.dir[wd],".png")) + my.fileout <- paste0(workdir,"/windir/",rean.name,"_impact_polar_",var.name,"_",period.name[p],"_direction_",wd.dir[wd],".png") + PlotStereoMap(rescale(var.mean.wt,min(my.brks[[var.num]]),max(my.brks[[var.num]])), var.lon, var.lat, latlims = c(60,90), brks=my.brks[[var.num]], cols=my.cols[[var.num]], subsampleg=1, units=my.unit[[var.num]], colNA="gray", fileout=my.fileout) + + # + # same maps as above but removing points with frequency < 3%: + # + + windirFreq.wt <- apply(windirPeriod.wt, c(3,4), sum, na.rm=TRUE) / (n.years * n.days.in.a.period(p,1)) + + ss <- which(windirFreq.wt < 0.03) + pp <- which(windirFreq.wt >= 0.03) + + windirFreq.wt[ss] <- NA + windirFreq.wt[pp] <- 1 + + var.mean.wt2 <- var.mean.wt * windirFreq.wt + rm(ss,windirFreq.wt) + gc() + + # visualize impact map of the wd on var: + my.fileout <- paste0(workdir,"/windir/",rean.name,"_impact_min3percent_",var.name,"_",period.name[p],"_direction_",wd.dir[wd],".png") + + png(filename=my.fileout,width=900,height=600) + + layout(matrix(c(rep(1,10),2), 11, 1, byrow = TRUE)) + #PlotEquiMap(var.mean.wt, var.lon, var.lat, filled.continents = FALSE, brks=my.brks[[var.num]], cols=my.cols[[var.num]], drawleg=F, colNA="gray") + PlotEquiMap(rescale(var.mean.wt2[,lon.swapped],min(my.brks[[var.num]]),max(my.brks[[var.num]])), var.lon2, var.lat, filled.continents = FALSE, brks=my.brks[[var.num]], cols=my.cols[[var.num]], colNA="gray", drawleg=F) + ColorBar2(my.brks[[var.num]], cols=my.cols[[var.num]], vert=FALSE, cex=1, my.ticks=-0.5 + 1:length(my.brks[[var.num]]), my.labels=my.brks[[var.num]], label.dist=.5) + dev.off() + + # North Pole map: + #png(filename=paste0(workdir,"/windir/impact_polar_",var.name,"_",period.name[p],"_direction_",wd.dir[wd],".png")) + my.fileout <- paste0(workdir,"/windir/",rean.name,"_impact_min3percent_polar_",var.name,"_",period.name[p],"_direction_",wd.dir[wd],".png") + PlotStereoMap(rescale(var.mean.wt2,min(my.brks[[var.num]]),max(my.brks[[var.num]])), var.lon, var.lat, latlims = c(60,90), brks=my.brks[[var.num]], cols=my.cols[[var.num]], subsampleg=1, units=my.unit[[var.num]], colNA="gray", fileout=my.fileout) + + rm(var.mean.wt, var.mean.wt2) + + } # close for on wd + +} # close for on p + +save(impact.var, file=paste0(workdir,"/",rean.name,"_",var.name,"_impact.RData")) + + + + +} # close test + + +# visualize and save the reconstructed monthly/seasonal anomalies with the WDs: + +load(paste0(workdir,"/",rean.name,"_windirFreqInter.RData")) +load(paste0(workdir,"/",rean.name,"_",var.name,"_impact.RData")) + +impact.total <- array(NA, c(length(periods), n.years, n.lat.var, n.lon.var)) # array where to store the interannual frequencies + +my.brks[[1]] <- seq(-10,10,1) # % Mean anomaly of a WT for temperature +my.cols[[1]] <- colorRampPalette(as.character(read.csv("~/scripts/palettes/rgbhex.csv",header=F)[,1]))(length(my.brks[[1]])-1) +my.unit[[1]] <- "degrees" + +my.brks[[2]] <- seq(-3,3,0.5) # Mean wind speed anomaly in m/s +my.cols[[2]] <- colorRampPalette(rev(brewer.pal(11,"RdBu")))(length(my.brks[[2]])-1) # blue--white--red colors for wind speed impact +my.unit[[2]] <- "m/s" + +for(p in periods){ + for(y in year.start:year.end){ + #p <- 10; y <- 2016 # for debugging + + #test <- impact.var[1,10,,]*windirFreqInter[1,10,32,,]+impact.var[2,10,,]*windirFreqInter[2,10,32,,]+impact.var[3,10,,]*windirFreqInter[3,10,32,,]+impact.var[4,10,,]*windirFreqInter[4,10,32,,]+impact.var[5,10,,]*windirFreqInter[5,10,32,,]+impact.var[6,10,,]*windirFreqInter[6,10,32,,]+impact.var[7,10,,]*windirFreqInter[7,10,32,,]+impact.var[8,10,,]*windirFreqInter[8,10,32,,] + + #PlotEquiMap(test[,lon.swapped], var.lon2, var.lat, filled.continents = FALSE, brks=my.brks[[var.num]], cols=my.cols[[var.num]], colNA="gray", drawleg=F) + # windirFreqInter[,10,32,66,507] + #test2 <- impact.var[3,10,,]*0.4516 # + impact.var[2,10,,]*0.225 + impact.var[8,10,,]*0.0967 + impact.var[4,10,,]*0.06451 + impact.var[6,10,,]*0.0645 + impact.var[1,10,,]*0.03225 + impact.var[5,10,,]*0.0645 + + #PlotEquiMap(test2[,lon.swapped], var.lon2, var.lat, filled.continents = FALSE, brks=my.brks[[var.num]], cols=my.cols[[var.num]], colNA="gray", drawleg=F) + + #test3 <- impact.var[3,10,,] #*windirFreqInter[3,10,32,,] + #PlotEquiMap(test3[,lon.swapped], var.lon2, var.lat, filled.continents = FALSE, brks=my.brks[[var.num]], cols=my.cols[[var.num]], colNA="gray", drawleg=F) + + #test4 <- impact.var[3,10,,] * windirFreqInter[3,10,32,,] + #PlotEquiMap(test4[,lon.swapped], var.lon2, var.lat, filled.continents = FALSE, brks=my.brks[[var.num]], cols=my.cols[[var.num]], colNA="gray", drawleg=F) + + y2 <- y - year.start + 1 + impact.weighted <- impact.var[,p,,] * windirFreqInter[,p,y2,,] + impact.total[p,y2,,] <- apply(impact.weighted, c(2,3), sum, na.rm=T) + + my.fileout <- paste0(workdir,"/windir/",rean.name,"_reconstructed_",var.name,"_anomalies_",period.name[p],"_year_",y,".png") + png(filename=my.fileout,width=900,height=600) + + layout(matrix(c(rep(1,10),2), 11, 1, byrow = TRUE)) + PlotEquiMap(rescale(impact.total[p,y2,,lon.swapped],min(my.brks[[var.num]]),max(my.brks[[var.num]])), var.lon2, var.lat, filled.continents = FALSE, brks=my.brks[[var.num]], cols=my.cols[[var.num]], colNA="gray", drawleg=F) + ColorBar2(my.brks[[var.num]], cols=my.cols[[var.num]], vert=FALSE, cex=1, my.ticks=-0.5 + 1:length(my.brks[[var.num]]), my.labels=my.brks[[var.num]], label.dist=.5) + dev.off() + + for(wd in 1:n.wd){ + my.fileout <- paste0(workdir,"/windir/",rean.name,"_weighted_impact_",var.name,"_",period.name[p],"_year_",y,"_direction_",wd.dir[wd],".png") + png(filename=my.fileout,width=900,height=600) + + layout(matrix(c(rep(1,10),2), 11, 1, byrow = TRUE)) + PlotEquiMap(rescale(impact.weighted[wd,,lon.swapped],min(my.brks[[var.num]]),max(my.brks[[var.num]])), var.lon2, var.lat, filled.continents = FALSE, brks=my.brks[[var.num]], cols=my.cols[[var.num]], colNA="gray", drawleg=F) + ColorBar2(my.brks[[var.num]], cols=my.cols[[var.num]], vert=FALSE, cex=1, my.ticks=-0.5 + 1:length(my.brks[[var.num]]), my.labels=my.brks[[var.num]], label.dist=.5) + dev.off() + + } + + } # close for on y +} # close for on p + +save(impact.total, file=paste0(workdir,"/",rean.name,"_",var.name,"_reconstructed_anomalies.RData")) + + + + + + +if(LOESS == 'test'){ + + +# Create and save monthly/seasonal/yearly climatology maps of var: + +#p=17 # for the debug + +# Map intervals and colors: +my.brks[[1]] <- c(-50,-40,-30,seq(-20,30,0.05),40,50) # Surface Temperature Climatology +my.brks[[2]] <- c(0,seq(0.05,10,0.05),15) # Precipitation Climatology +my.brks[[3]] <- c(0,seq(1.05,10,0.05),15) # Wind speed Climatology + +my.cols[[1]] <- colorRampPalette(as.character(read.csv("/home/Earth/vtorralb/rgbhex.csv",header=F)[,1]))(length(my.brks[[1]])-1) # blue-yellow-red colors +my.cols[[2]] <- colorRampPalette(c("white","gray","cyan2","blue","deeppink4"))(length(my.brks[[1]])-1) # white-gray-blue-purple colores for wind speed + +n.lonr <- n.lon.var - ceiling(length(var.lon[var.lon>0 & var.lon < 180])) # count the num of lon values between 180 and 360 degrees +lon.swapped <- c((n.lonr+1):n.lon.var,1:n.lonr) # lon is always positive but rearranged +var.lon2 <- c(var.lon[c((n.lonr+1):n.lon.var)]-360,var.lon[1:n.lonr]) # to have lon values from -180 to 180 + +mod.var <- 0 +if(var.num == 1) mod.var <- -273.5 # for temperature, you must remove -273 after var.clim.NA.bis[p,,]: + +for(p in periods){ + # Select only days of the chosen month/season: + varPeriod <- var365[,pos.period(1,p),,,drop=FALSE] + varPeriodMean <- apply(varPeriod,c(3,4),mean,na.rm=TRUE) + + png(filename=paste0(workdir,"/",var.name.file,"_Climatology_",period.name[p],".png"),width=1000,height=700) + + layout(matrix(c(rep(1,10),2), 11, 1, byrow = TRUE)) + + #PlotEquiMap(varPeriodMean+mod.var, var.lon, var.lat, filled.continents = FALSE, brks=my.brks[[var.num]], cols=my.cols[[var.num]], drawleg=F, colNA="gray") + PlotEquiMap(varPeriodMean[,lon.swapped]+mod.num, var.lon2, var.lat, filled.continents = FALSE, brks=my.brks[[var.num]], cols=my.cols[[var.num]], drawleg=F) + ColorBar(my.brks[[var.num]], cols=my.cols[[var.num]], vert=FALSE, subsampleg=20, cex=1) + + dev.off() +} + + + + + + + + + + + + + +# Impact of a WT on var (average of var only during the days belonging to a particular WT): + +# load lat and lon from SLP grid: +load(paste0(workdir,"/txt/",mslp.rean.name,"/metadata.RData")) # load lat.used and lon.used + +# only for compatibility with older versions (it should be already loaded): +lat <- round(MSLP$lat,3) +lon <- round(MSLP$lon,3) + +# load daily WT classification for all years and grid points and with the same format of var365Anom: +load(paste0(workdir,"/txt/",mslp.rean.name,"/WTs.RData")) # load lat.used and lon.used + +n.WTs <- 10 # length(unique(WTs)) +wt.codes <- unique(WTs[1,1,,]) + +# load var anomalies: +load(file=paste0(workdir,"/",rean.name,"_",var.name,"365Anom.RData")) + +my.brks[[1]] <- c(-100,seq(-8,8,1),100) # % Mean temperature anomaly +my.cols[[1]] <- colorRampPalette(as.character(read.csv("/scripts/palettes/rgbhex.csv",header=F)[,1]))(length(my.brks[[1]])-1) # blue--yellow-red colors for temperature +my.brks[[2]] <- c(-100,seq(-3,3,0.5),100) # Mean wind speed anomaly in m/s +my.cols[[2]] <- colorRampPalette(rev(brewer.pal(11,"RdBu")))(length(my.brks.var)-1) # blue--white--red colors for wind speed impacr + +for(p in periods){ + # select var data only inside period p: + varPeriod <- var365Anom[,pos.period(1,p),,] + + # select WT data only inside period p: + WTsPeriod <- WTs[,pos.period(1,p),,] + + for(wt in 1:n.WTs){ + + wt.code <- wt.codes[wt] # i.e: for 10 WTs, their codes are. 1 2 3 4 5 6 7 8 9 18 + + # remove from varPeriod the days not belonging to that wt (setting its value to NA): + WTsPeriod.wt <- WTsPeriod + ss <- which(WTsPeriod == wt.code) + pp <- which(WTPeriod != wt.code) + + WTsPeriod.wt[ss] <- 1 # set to 1 the days belonging to that wt + WTsPeriod.wt[pp] <- NA # set to NA the days not belonging to that wt + + varPeriod.wt <- varPeriod * WTsPeriod.wt + + var.mean.wt <- apply(varPeriod.wt, c(3,4), mean, na.rm=TRUE) + + # visualize impact map of the wt on var: + png(filename=paste0(workdir,"/",var.name.file,"/impact/",var.name,"_"period.name[p],".png"),width=1000,height=700) + + PlotEquiMap(var.mean.wt, var.lon, var.lat, filled.continents = FALSE, brks=my.brks[[var.num]], cols=my.cols[[var.num]], drawleg=F, colNA="gray") + + + + } +} + + + + + + + +} + + + + + + + + diff --git a/old/WT_v1.R b/old/WT_v1.R new file mode 100644 index 0000000000000000000000000000000000000000..7079088c665570504adf4189c31034fbd8dd941a --- /dev/null +++ b/old/WT_v1.R @@ -0,0 +1,265 @@ +######################################################################################### +# Lamb's WTs with ERA-Interim # +######################################################################################### + +library(s2dverification) # for the function Load() +source('/home/Earth/ncortesi/Downloads/scripts/Rfunctions.R') # for the calendar functions +Load.path <- '/home/Earth/ncortesi/Downloads/scripts/IC3.conf' # Load() file path + +workdir="/scratch/Earth/ncortesi/RESILIENCE/WT" # working dir where to put the output maps and files +subdatadir="/scratch/Earth/ncortesi/RESILIENCE/WT_subdata" # dir where to put the intermediate daily psl data (1 file for each subarray) + +mslp.rean='ERAintDailyHighRes' #'ERAintDailyLowRes' # daily reanalysis dataset used for MSLP data +year.start=1985 # starting year of the MSLP daily data (from the 1st of january) +year.end=2014 # ending year of the MSLP daily data (up to the 31 of December) + +low.res.size=5 # even numb. of MSLP nearby grid points to compute the psl at each of the 16 points (both for lon and lat: actual n. of pts used is its square) + +########################################################################################## + +WTs.type<-c("NE","E","SE","S","SW","W","NW","N","C","C.NE","C.E","C.SE","C.S","C.SW","C.W","C.NW","C.N","A","A.NE","A.E","A.SE","A.S","A.SW","A.W","A.NW","A.N") +radius <- (low.res.size-1)/2 # number of grid points to use as search radius for averaging the psl at each Lamb grid point + +# Load 1 day of MSLP data from the reanalysis chosen, just to detect the number of latitude and longitude points: +MSLP <- Load('psl', NULL, mslp.rean, paste0(year.start,'0101'), storefreq = 'daily', leadtimemax = 1, output = 'lonlat', configfile = Load.path) + +n.lat <- length(MSLP$lat) # number of latitude values +n.lon <- length(MSLP$lon) # number of longitude values + +lon.pos <- ifelse(min(MSLP$lon>=0), TRUE, FALSE) # set lon.pos to TRUE if the longitude values of the reanalysis grid has no negative values, FALSE if not. + +# for each grid point of the reanalysis, compute the Lamb WT classification with Lamb grid centered on that point. +# we exclude only central points > +80 degrees lat and < -80 deg. because the Lamb grid needs 2 points at 10 deg. north/south of the central point. +n.lat.unused.poles <- 8 # number of unused latitude values near each of the two poles (must exclude last 10 degrees N/S, so it depends on the spatial res. of the reanalysis) +n.lat.unused.equat <- 8 # number of unused latitude values near each side of the equator (must exclude 10 degrees N/S for side, so it depends on the spat.res. of the reanalysis) + +pos.lat.eq.north <- tail(which(MSLP$lat >= 0),1) # note than the eventual point at lat=0 is always included +pos.lat.eq.south <- head(which(MSLP$lat < 0),1) # note than the eventual point at lat=0 is always excluded +pos.lat.unused.eq.north <- (pos.lat.eq.north-n.lat.unused.poles+1):pos.lat.eq.north +pos.lat.unused.eq.south <- pos.lat.eq.south:(pos.lat.eq.south+n.lat.unused.equat-1) + +lat.used <- MSLP$lat[-c(1:n.lat.unused.poles,(n.lat-n.lat.unused.poles):n.lat, pos.lat.unused.eq.north, pos.lat.unused.eq.south)] # latitude values used as central points +n.lat.used <- length(lat.used) + +lon.used <- MSLP$lon # longitude values used as central points +n.lon.used <- length(lon.used) +n.grid.points <- length(lat.used)*length(lon.used) + +n.days.tot <- n.days.in.a.yearly.period(year.start,year.end) +#MSLP.year <- Load('psl', NULL, mslp.rean, paste0(year.start,'0101'), storefreq = 'daily', leadtimemax = n.days.in.a.year(y), output = 'lonlat', configfile = Load.path) +#gc() + +## sub <- split.array.big(dimensions=c(n.days, n.lat.used, n.lon.used), along=3) +## for(y in year.start:year.end){ +## cat(paste0("Splitting year: ",y)) + +## MSLP.year <- Load('psl', NULL, mslp.rean, paste0(year.start,'0101'), storefreq = 'daily', leadtimemax = n.days.in.a.year(y), output = 'lonlat', configfile = Load.path) + +## for(s in 1:sub$n.sub){ +## MSLP.year.sub <- MSLP.year$obs[1,1,1,,,sub$int[[s]]] +## save(MSLP.year.sub, file=paste0(subdatadir,"/","MSLP_",mslp.rean,"_year_",y,"_sub_",s,".RData"), compress=FALSE) +## } +## } +## ### lonmin=lon.used[head(sub$int[[s]],1)], lonmax=lon.used[tail(sub$int[[s]],1)]) + +# load all MSLP data one year at time: +for(y in year.start:year.end){ + print(paste0("Year: ",y)) + n.days <- n.days.in.a.year(y) + MSLP.year <- Load('psl', NULL, mslp.rean, paste0(y,'0101'), storefreq = 'daily', leadtimemax = n.days, output = 'lonlat', configfile = Load.path) + gc() + + # loop over the lat/lon of the central point of the Lamb classification: + for(latc in lat.used){ + #latc <- lat.used[1]; lonc <- lon.used[1]; # for the debug + + # first, computes the latitude coefficients: + SF1<-1/cos(latc*pi/180) + ZS1<-1/(2*cos(latc*pi/180)^2) + ZW1<-sin(latc*pi/180)/sin((latc-5)*pi/180) + ZW2<-sin(latc*pi/180)/sin((latc+5)*pi/180) + + for(lonc in lon.used){ + pos.latc <- which(lat.used==latc) + pos.lonc <- which(lon.used==lonc) + cat(paste0("Grid point saved: ",(pos.latc-1)*n.lon.used+pos.lonc ,"/", n.grid.points, " "), "\r") + + # assign the lat and lon values of the 16 grid points of the Lamb grid centered over the central point: + lat <- lon <- c() # lat and lon of the 16 points of Lamb's grid + lat[1] <- lat[2] <- latc + 10 + lat[3] <- lat[4] <- lat[5] <- lat[6] <- latc + 5 + lat[7] <- lat[8] <- lat[9] <- lat[10] <- latc + lat[11] <- lat[12] <- lat[13] <- lat[14] <- latc - 5 + lat[15] <- lat[16] <- latc - 10 + + lon[1] <- lon[4] <- lon[8] <- lon[12] <- lon[15] <- lonc - 5 + lon[3] <- lon[7] <- lon[11] <- lonc - 15 + lon[2] <- lon[5] <- lon[9] <- lon[13] <- lon[16] <- lonc + 5 + lon[6] <- lon[10] <- lon[14] <- lonc + 15 + + # longitude correction for reanalysis with positive-only longitude (to stay always positive): + if(lon.pos){ + if(lonc - 5 < 0){ + lon[1] <- lon[4] <- lon[8] <- lon[12] <- lon[15] <- lonc - 5 + 360 + lon[3] <- lon[7] <- lon[11] <- lonc - 15 + 360 + } + if(lonc - 15 < 0 && lon - 5 >= 0){ + lon[3] <- lon[7] <- lon[11] <- lonc - 15 + 360 + } + if(lonc + 5 >= 360){ + lon[2] <- lon[5] <- lon[9] <- lon[13] <- lon[16] <- lonc + 5 - 360 + lon[6] <- lon[10] <- lon[14] <- lonc + 15 - 360 + } + if(lonc + 15 >= 360 && lonc + 5 < 360){ + lon[6] <- lon[10] <- lon[14] <- lonc + 15 - 360 + } + } + + # longitude correction for reanalysis with [-180, +180] longitude (to stay always in the same interval): + # MUST STILL UPDATE FOR CASE lonc + 15 >= 360 !!! + if(!lon.pos){ + if(lonc - 5 < -180){ + lon[1] <- lon[4] <- lon[8] <- lon[12] <- lon[15] <- lonc - 5 + 360 + lon[3] <- lon[7] <- lon[11] <- lonc - 15 + 360 + } + if(lonc + 5 >= 180){ + lon[2] <- lon[5] <- lon[9] <- lon[13] <- lon[16] <- lonc + 5 - 360 + lon[6] <- lon[10] <- lon[14] <- lonc + 15 - 360 + } + } + + # lat and lon position of the 16 Lamb grid points inside the MSLP reanalysis: + pos.lat <- pos.lon <- c() + for(p in 1:16){ + pos <- nearest(lat[p], lon[p], MSLP$lat, MSLP$lon) + pos.lat[p] <- pos[1] + pos.lon[p] <- pos[2] + } + + #WTs <- data.frame() + + # mean psl at each of the 16 grid points: + psl <- array(NA,c(n.days,16)) # for each day of the year, MSLP of the 16 points of Lamb's grid + for(p in 1:16) psl[,p] <- MSLP.year$obs[1,1,1,,pos.lat[p],pos.lon[p]] + + for(p in 1:16) { + pos.lat.low.res <- (pos.lat[p]-radius):(pos.lat[p]+radius) # NO RADIUS + pos.lon.low.res <- (pos.lon[p]-radius):(pos.lon[p]+radius) + psl[,p] <- mean(MSLP.year$obs[1,1,1,,pos.lat.low.res[p],pos.lon.low.res[p]]) + } + + # compute geostrophical indexes (don't round them to the 3rd decimal number as done by Trigo and Martin-Vide, not to introduce an error that change 3-4% of WTs!!!): + SF<-WF<-F<-D<-D2<-c() + + WF <- 0.5*(psl[,12]+psl[,13]) - 0.5*(psl[,4]+psl[,5]) + SF <- SF1*(0.25*(psl[,5]+2*psl[,9]+psl[,13]) - 0.25*(psl[,4]+2*psl[,8]+psl[,12])) + F <- (SF*SF + WF*WF)^0.5 + + if(latc < 0){ + WF <- -WF # because in the southern hemisphere, cyclones and anticyclones spins in the opposite sense! + SF <- -SF + } + + # D e' la direzione del flusso; dato che l'angolo risultato di atan e' espresso in radianti, bisogna convertirlo in gradi moltiplicandolo per (180/pi) + # D e' l'angolo tra la direzione del vento e la sua proiezione in direzione nord-sud (SF) + D <- (180/pi) * atan(WF/SF) + + # trasforma l'angolo D iniziale in un angolo D2 sempre positivo che si calcola a partire del Nord andando in senso orario: + Q1 <- which(WF>0 & SF>0) + Q2 <- which(WF>0 & SF<=0) + Q3 <- which(WF<0 & SF<=0) # calcola quali angoli D appartengono al quadrante 1 (Q1), quali al quadrante 2, quali al 3 e quali al 4. + Q4 <- which(WF<0 & SF>0) # il primo quadrante e' in alto a destra, il secondo in basso a destra, il terzo in basso a sinistra, il quarto in alto a sinistra. + + # the wind direction is usually different from the angle D where the wind is blowing to, so a correction has to be made: + D2 <- D + D2[Q1] <- D[Q1] + 180 + D2[Q2] <- D[Q2] + 360 + D2[Q4] <- D[Q4] + 180 + + # individua i WTs direzionali: + NE <- which(D2>=22.5 & D2<67.5) + E <- which(D2>=67.5 & D2<112.5) + SE <- which(D2>=112.5 & D2<157.5) + S <- which(D2>=157.5 & D2<202.5) + SW <- which(D2>=202.5 & D2<247.5) + W <- which(D2>=247.5 & D2<292.5) + NW <- which(D2>=292.5 & D2<337.5) + N <- which(D2>=337.5 | D2<22.5) + + # aggiunge una colonna a destra con il WT direzionale associato all'angolo: + D2.dir<-rep(NA,length(D2)) + D2.dir[NE]<-"NE";D2.dir[E]<-"E";D2.dir[SE]<-"SE";D2.dir[S]<-"S";D2.dir[SW]<-"SW";D2.dir[W]<-"W";D2.dir[NW]<-"NW";D2.dir[N]<-"N" + + # calcolo componenti vorticita': + ZS<-ZW<-Z<-c() + ZS <- ZS1*(0.25*(psl[,6]+2*psl[,10]+psl[,14]) - 0.25*(psl[,5]+2*psl[,9]+psl[,13]) - 0.25*(psl[,4]+2*psl[,8]+psl[,12]) + 0.25*(psl[,3]+2*psl[,7]+psl[,11])) + ZW <- ZW1*(0.5*(psl[,15]+psl[,16]) - 0.5*(psl[,8]+psl[,9])) - ZW2*(0.5*(psl[,8]+psl[,9]) - 0.5*(psl[,1]+psl[,2])) + Z <- ZS + ZW + + # Pure and hybrid WTs + cyc.pure <- which(Z>2*F) + anticyc.pure <- which(Z<(-2*F)) + hybrid.cyc <- which(Z>0 & abs(Z)<2*F & abs(Z)>F) + hybrid.anticyc <- which(Z<0 & abs(Z)<2*F & abs(Z)>F) + indeter <- which(F<6 & abs(Z)<6) # tipo di tempo indeterminato (U); the choice of 6 depend on grid size and should be changed if grid res.is higher! + + # create a data frame with all the info about the WT classification: + + #years.period <- months.period <- days.period <- c() + #for(y in year.start:year.end) years.period <- c(years.period, rep(y, n.days.in.a.year(y))) + #for(y in year.start:year.end) months.period <- c(months.period, seq.months.in.a.year(y)) + #for(y in year.start:year.end) days.period <- c(days.period, seq.days.in.a.year(y)) + years.period <- rep(y, n.days) + months.period <- seq.months.in.a.year(y) + days.period <- seq.days.in.a.year(y) + WT<-data.frame(Year=years.period, Month=months.period, Day=days.period, Direction=round(D2,3), WT.dir=D2.dir, stringsAsFactors=FALSE) + + WT$Year<-as.numeric(WT$Year);WT$Month<-as.numeric(WT$Month);WT$Day<-as.numeric(WT$Day);WT$Direction<-as.numeric(WT$Direction);WT$WT.dir<-as.character(WT$WT.dir) + + # colonna che identifica quali giorni sono di tipo ibrido e quali di tipo direzionale: + WT$Hyb <- rep("Directional",length(D2)) + WT$Hyb[cyc.pure] <- "Pure_C" + WT$Hyb[anticyc.pure] <- "Pure_A" + WT$Hyb[hybrid.cyc] <- "Hybrid_C" + WT$Hyb[hybrid.anticyc] <- "Hybrid_A" + + # colonna con la lista dei 26 WTs officiali: + WT$WT <- WT$D2.dir + WT$WT[cyc.pure] <- "C" + WT$WT[anticyc.pure] <- "A" + WT$WT[hybrid.cyc] <- paste("C.",WT$WT.dir[hybrid.cyc],sep="") + WT$WT[hybrid.anticyc] <- paste("A.", WT$WT.dir[hybrid.anticyc],sep="") + #WT$WT[indeter] <- "U" # comment this line not to include the Unclassified U WT + + WT$WT.num <- match(WT$WT,WTs.type)# aggiunge una colonna con la conversione dei 26 tipi di tempo a numeri da 1 a 26 + + # colonna con la lista dei 10 WTs ottenuti unendo a C e ad A gli otto WT direzionali puri; + # i WTs ibridi si aggiungono ognuno al suo tipo direzionale corrispondente (es: C.SW si aggiunge a SW, ecc.) + WT$WT10 <- WT$WT.dir + WT$WT10[cyc.pure] <- "C" + WT$WT10[anticyc.pure] <- "A" + WT$WT10.num <- match(WT$WT10,WTs.type) # conversione dei 10 tipi di tempo a numeri da 1 a 9 + il tipo A che si converte nel numero 18 + + # add the other geostrophical indexes: + WT$SF=round(SF,5) + WT$WF=round(WF,5) + WT$F=round(F,5) + WT$ZS=round(ZS,5) + WT$ZW=round(ZW,5) + WT$Z=round(Z,5) + + #WTs<-rbind(WTs,WT) + + # save the full data table as a .txt to exchange it with other people: + write.table(WT,file=paste0(workdir,"/","WTs_",mslp.rean,"_year_",y,"_lat_",latc,"_lon_",lonc,".txt"),row.names=FALSE, col.names=TRUE, quote=FALSE, sep=" ") + # save only the classification with 10 WTs, to have a smaller (binary) file: + #WTs<-cbind(WT$Year, WT$Month, WT$Day, WT$WT10.num) + WTs <- WT$WT10.num + save(WTs, file=paste0(workdir,"/","10WTs_",mslp.rean,"_year_",y,"_lat_",latc,"_lon_",lonc,".RData"), compress=FALSE) + + rm(WT,SF,WF,F,D,ZS,ZW,W) + + } # close for on lonc + } # close for on latc + +} # close for on y diff --git a/old/WT_v1.R~ b/old/WT_v1.R~ new file mode 100644 index 0000000000000000000000000000000000000000..0a13e6a50d9014e2f6509a2dfc24932874ef437d --- /dev/null +++ b/old/WT_v1.R~ @@ -0,0 +1,247 @@ +######################################################################################### +# Lamb's WTs with ERA-Interim # +######################################################################################### + +library(s2dverification) # for the function Load() +source('/home/Earth/ncortesi/Downloads/scripts/Rfunctions.R') # for the calendar functions +Load.path <- '/home/Earth/ncortesi/Downloads/scripts/IC3.conf' # Load() file path + +workdir="/scratch/Earth/ncortesi/RESILIENCE/WT" # working dir where to put the output maps and files + +mslp.rean='ERAintDailyLowRes' # daily reanalysis dataset used for MSLP data +year.start=1985 # starting year of the MSLP daily data (from the 1st of january) +year.end=2014 # ending year of the MSLP daily data (up to the 31 of December) + +########################################################################################## + +WTs.type<-c("NE","E","SE","S","SW","W","NW","N","C","C.NE","C.E","C.SE","C.S","C.SW","C.W","C.NW","C.N","A","A.NE","A.E","A.SE","A.S","A.SW","A.W","A.NW","A.N") + +# Load 1 day of MSLP data from the reanalysis chosen, just to detect the number of latitude and longitude points: +MSLP <- Load('psl', NULL, mslp.rean, paste0(year.start,'0101'), storefreq = 'daily', leadtimemax = 1, output = 'lonlat', configfile = Load.path) + +n.lat <- length(MSLP$lat) # number of latitude values +n.lon <- length(MSLP$lon) # number of longitude values + +lon.pos <- ifelse(min(MSLP$lon>=0), TRUE, FALSE) # set lon.pos to TRUE if the longitude values of the reanalysis grid has no negative values, FALSE if not. + +# for each grid point of the reanalysis, compute the Lamb WT classification with Lamb grid centered on that point. +# we exclude only central points > +80 degrees lat and < -80 deg. because the Lamb grid needs 2 points at 10 deg. north/south of the central point. +n.lat.unused.poles <- 8 # number of unused latitude values near each of the two poles (must exclude last 10 degrees N/S, so it depends on the spatial res. of the reanalysis) +n.lat.unused.equat <- 8 # number of unused latitude values near each side of the equator (must exclude 10 degrees N/S for side, so it depends on the spat.res. of the reanalysis) + +pos.lat.eq.north <- tail(which(MSLP$lat >= 0),1) # note than the eventual point at lat=0 is always included +pos.lat.eq.south <- head(which(MSLP$lat < 0),1) # note than the eventual point at lat=0 is always excluded +pos.lat.unused.eq.north <- (pos.lat.eq.north-n.lat.unused.poles+1):pos.lat.eq.north +pos.lat.unused.eq.south <- pos.lat.eq.south:(pos.lat.eq.south+n.lat.unused.equat-1) + +lat.used <- MSLP$lat[-c(1:n.lat.unused.poles,(n.lat-n.lat.unused.poles):n.lat, pos.lat.unused.eq.north, pos.lat.unused.eq.south)] # latitude values used as central points + +lon.used <- MSLP$lon # longitude values used as central points +n.lon.used <- length(lon.used) +n.grid.points <- length(lat.used)*length(lon.used) + +# Year/Month/day for the chosen period: +years.period <- months.period <- days.period <- c() +for(y in year.start:year.end) years.period <- c(years.period, rep(y, n.days.in.a.year(y))) +for(y in year.start:year.end) months.period <- c(months.period, seq.months.in.a.year(y)) +for(y in year.start:year.end) days.period <- c(days.period, seq.days.in.a.year(y)) + +# load all MSLP data for the chosen period: +n.days <- n.days.in.a.yearly.period(year.start,year.end) +MSLP.year <- Load('psl', NULL, mslp.rean, paste0(year.start,'0101'), storefreq = 'daily', leadtimemax = n.days, output = 'lonlat', configfile = Load.path) +gc() + +print("Grid points saved: ") + +# loop over the lat/lon of the central point of the Lamb classification and the year of the chosen period: +for(latc in lat.used){ + #latc <- lat.used[1]; lonc <- lon.used[1]; # for the debug + + # first, computes the latitude coefficients: + SF1<-1/cos(latc*pi/180) + ZS1<-1/(2*cos(latc*pi/180)^2) + ZW1<-sin(latc*pi/180)/sin((latc-5)*pi/180) + ZW2<-sin(latc*pi/180)/sin((latc+5)*pi/180) + + for(lonc in lon.used){ + pos.latc <- which(lat.used==latc) + pos.lonc <- which(lon.used==lonc) + cat(paste0((pos.latc-1)*n.lon.used+pos.lonc ,"/", n.grid.points, " "), "\r") + + # assign the lat and lon values of the 16 grid points of the Lamb grid centered over the central point: + lat <- lon <- c() # lat and lon of the 16 points of Lamb's grid + lat[1] <- lat[2] <- latc + 10 + lat[3] <- lat[4] <- lat[5] <- lat[6] <- latc + 5 + lat[7] <- lat[8] <- lat[9] <- lat[10] <- latc + lat[11] <- lat[12] <- lat[13] <- lat[14] <- latc - 5 + lat[15] <- lat[16] <- latc - 10 + + lon[1] <- lon[4] <- lon[8] <- lon[12] <- lon[15] <- lonc - 5 + lon[3] <- lon[7] <- lon[11] <- lonc - 15 + lon[2] <- lon[5] <- lon[9] <- lon[13] <- lon[16] <- lonc + 5 + lon[6] <- lon[10] <- lon[14] <- lonc + 15 + + # longitude correction for reanalysis with positive-only longitude (to stay always positive): + if(lon.pos){ + if(lonc - 5 < 0){ + lon[1] <- lon[4] <- lon[8] <- lon[12] <- lon[15] <- lonc - 5 + 360 + lon[3] <- lon[7] <- lon[11] <- lonc - 15 + 360 + } + if(lonc - 15 < 0 && lon - 5 >= 0){ + lon[3] <- lon[7] <- lon[11] <- lonc - 15 + 360 + } + if(lonc + 5 >= 360){ + lon[2] <- lon[5] <- lon[9] <- lon[13] <- lon[16] <- lonc + 5 - 360 + lon[6] <- lon[10] <- lon[14] <- lonc + 15 - 360 + } + if(lonc + 15 >= 360 && lonc + 5 < 360){ + lon[6] <- lon[10] <- lon[14] <- lonc + 15 - 360 + } + } + + # longitude correction for reanalysis with [-180, +180] longitude (to stay always in the same interval): + # MUST STILL UPDATE FOR CASE lonc + 15 >= 360 !!! + if(!lon.pos){ + if(lonc - 5 < -180){ + lon[1] <- lon[4] <- lon[8] <- lon[12] <- lon[15] <- lonc - 5 + 360 + lon[3] <- lon[7] <- lon[11] <- lonc - 15 + 360 + } + if(lonc + 5 >= 180){ + lon[2] <- lon[5] <- lon[9] <- lon[13] <- lon[16] <- lonc + 5 - 360 + lon[6] <- lon[10] <- lon[14] <- lonc + 15 - 360 + } + } + + # lat and lon position of the 16 Lamb grid points inside the MSLP reanalysis: + pos.lat <- pos.lon <- c() + for(p in 1:16){ + pos <- nearest(lat[p], lon[p], MSLP$lat, MSLP$lon) + pos.lat[p] <- pos[1] + pos.lon[p] <- pos[2] + } + + #WTs <- data.frame() + + # mean psl at each of the 16 grid points: + psl <- array(NA,c(n.days,16)) # for each day of the year, MSLP of the 16 points of Lamb's grid + for(p in 1:16) psl[,p] <- MSLP.year$obs[1,1,1,,pos.lat[p],pos.lon[p]] + + low.res.size <- 5 # even numb. of MSLP nearby grid points to compute the psl at each of the 16 points (both for lon and lat: actual numb. of pts used is its square) + + radius <- (low.res.size-1)/2 + for(p in 1:16) { + pos.lat.low.res <- (pos.lat[p]-radius):(pos.lat[p]+radius) # NO RADIUS + pos.lon.low.res <- (pos.lon[p]-radius):(pos.lon[p]+radius) + psl[,p] <- mean(MSLP.year$obs[1,1,1,,pos.lat.low.res[p],pos.lon.low.res[p]]) + } + + + # compute geostrophical indexes (don't round them to the 3rd decimal number as done by Trigo and Martin-Vide, not to introduce an error that change 3-4% of WTs!!!): + SF<-WF<-F<-D<-D2<-c() + + WF <- 0.5*(psl[,12]+psl[,13]) - 0.5*(psl[,4]+psl[,5]) + SF <- SF1*(0.25*(psl[,5]+2*psl[,9]+psl[,13]) - 0.25*(psl[,4]+2*psl[,8]+psl[,12])) + F <- (SF*SF + WF*WF)^0.5 + + if(latc < 0){ + WF <- -WF # because in the southern hemisphere, cyclones and anticyclones spins in the opposite sense! + SF <- -SF + } + + # D e' la direzione del flusso; dato che l'angolo risultato di atan e' espresso in radianti, bisogna convertirlo in gradi moltiplicandolo per (180/pi) + # D e' l'angolo tra la direzione del vento e la sua proiezione in direzione nord-sud (SF) + D <- (180/pi) * atan(WF/SF) + + # trasforma l'angolo D iniziale in un angolo D2 sempre positivo che si calcola a partire del Nord andando in senso orario: + Q1 <- which(WF>0 & SF>0) + Q2 <- which(WF>0 & SF<=0) + Q3 <- which(WF<0 & SF<=0) # calcola quali angoli D appartengono al quadrante 1 (Q1), quali al quadrante 2, quali al 3 e quali al 4. + Q4 <- which(WF<0 & SF>0) # il primo quadrante e' in alto a destra, il secondo in basso a destra, il terzo in basso a sinistra, il quarto in alto a sinistra. + + # the wind direction is usually different from the angle D where the wind is blowing to, so a correction has to be made: + D2 <- D + D2[Q1] <- D[Q1] + 180 + D2[Q2] <- D[Q2] + 360 + D2[Q4] <- D[Q4] + 180 + + # individua i WTs direzionali: + NE <- which(D2>=22.5 & D2<67.5) + E <- which(D2>=67.5 & D2<112.5) + SE <- which(D2>=112.5 & D2<157.5) + S <- which(D2>=157.5 & D2<202.5) + SW <- which(D2>=202.5 & D2<247.5) + W <- which(D2>=247.5 & D2<292.5) + NW <- which(D2>=292.5 & D2<337.5) + N <- which(D2>=337.5 | D2<22.5) + + # aggiunge una colonna a destra con il WT direzionale associato all'angolo: + D2.dir<-rep(NA,length(D2)) + D2.dir[NE]<-"NE";D2.dir[E]<-"E";D2.dir[SE]<-"SE";D2.dir[S]<-"S";D2.dir[SW]<-"SW";D2.dir[W]<-"W";D2.dir[NW]<-"NW";D2.dir[N]<-"N" + + + # calcolo componenti vorticita': + ZS<-ZW<-Z<-c() + + ZS <- ZS1*(0.25*(psl[,6]+2*psl[,10]+psl[,14]) - 0.25*(psl[,5]+2*psl[,9]+psl[,13]) - 0.25*(psl[,4]+2*psl[,8]+psl[,12]) + 0.25*(psl[,3]+2*psl[,7]+psl[,11])) + + ZW <- ZW1*(0.5*(psl[,15]+psl[,16]) - 0.5*(psl[,8]+psl[,9])) - ZW2*(0.5*(psl[,8]+psl[,9]) - 0.5*(psl[,1]+psl[,2])) + + Z <- ZS + ZW + + # Pure and hybrid WTs + cyc.pure <- which(Z>2*F) + anticyc.pure <- which(Z<(-2*F)) + hybrid.cyc <- which(abs(Z)<2*F & abs(Z)>F & Z>0) + hybrid.anticyc <- which(abs(Z)<2*F & abs(Z)>F & Z<0) + indeter <- which(F<6 | abs(Z)<6) # tipo di tempo indeterminato (U) + + # create a data frame with all the info about the WT classification: + WT<-data.frame(Year=years.period, Month=months.period, Day=days.period, Direction=round(D2,3), WT.dir=D2.dir, stringsAsFactors=FALSE) + + WT$Year<-as.numeric(WT$Year);WT$Month<-as.numeric(WT$Month);WT$Day<-as.numeric(WT$Day);WT$Direction<-as.numeric(WT$Direction);WT$WT.dir<-as.character(WT$WT.dir) + + # colonna che identifica quali giorni sono di tipo ibrido e quali di tipo direzionale: + WT$Hyb <- rep("Directional",length(D2)) + WT$Hyb[cyc.pure] <- "Pure_C" + WT$Hyb[anticyc.pure] <- "Pure_A" + WT$Hyb[hybrid.cyc] <- "Hybrid_C" + WT$Hyb[hybrid.anticyc] <- "Hybrid_A" + + # colonna con la lista dei 26 WTs officiali: + WT$WT <- WT$D2.dir + WT$WT[cyc.pure] <- "C" + WT$WT[anticyc.pure] <- "A" + WT$WT[hybrid.cyc] <- paste("C.",WT$WT.dir[hybrid.cyc],sep="") + WT$WT[hybrid.anticyc] <- paste("A.", WT$WT.dir[hybrid.anticyc],sep="") + #WT$WT[indeter] <- "U" # comment this line not to include the Unclassified U WT + + WT$WT.num <- match(WT$WT,WTs.type)# aggiunge una colonna con la conversione dei 26 tipi di tempo a numeri da 1 a 26 + + # colonna con la lista dei 10 WTs ottenuti unendo a C e ad A gli otto WT direzionali puri; + # i WTs ibridi si aggiungono ognuno al suo tipo direzionale corrispondente (es: C.SW si aggiunge a SW, ecc.) + WT$WT10 <- WT$WT.dir + WT$WT10[cyc.pure] <- "C" + WT$WT10[anticyc.pure] <- "A" + WT$WT10.num <- match(WT$WT10,WTs.type) # conversione dei 10 tipi di tempo a numeri da 1 a 9 + il tipo A che si converte nel numero 18 + + # add the other geostrophical indexes: + WT$SF=round(SF,5) + WT$WF=round(WF,5) + WT$F=round(F,5) + WT$ZS=round(ZS,5) + WT$ZW=round(ZW,5) + WT$Z=round(Z,5) + + #WTs<-rbind(WTs,WT) + + # save the full data table as a .txt to exchange it with other people: + write.table(WT,file=paste0(workdir,"/","WTs_",mslp.rean,"_",year.start,"-",year.end,"_lat_",latc,"_lon_",lonc,".txt"),row.names=FALSE, col.names=TRUE, quote=FALSE, sep=" ") + # save only the classification with 10 WTs, to have a smaller (binary) file: + #WTs<-cbind(WT$Year, WT$Month, WT$Day, WT$WT10.num) + WTs <- WT$WT10.num + save(WTs, file=paste0(workdir,"/","10WTs_",mslp.rean,"_",year.start,"-",year.end,"_lat_",latc,"_lon_",lonc,".RData"), compress=FALSE) + + rm(WT,SF,WF,F,D,ZS,ZW,W) + + } # close for on lonc +} # close for on latc diff --git a/old/WT_v2.R b/old/WT_v2.R new file mode 100644 index 0000000000000000000000000000000000000000..6db1aa2147aa0d3cdf4062c3641eafb78ab167ac --- /dev/null +++ b/old/WT_v2.R @@ -0,0 +1,310 @@ +######################################################################################### +# Lamb's WTs with ERA-Interim # +######################################################################################### +# run it from the bash with: +# +# Rscript WT_v2.R 1984 +# +# being 1984 the year you want to classify the WTs; +# in this way you can run up to 8 jobs at the same time, each one producing its output files! +# You can also run it for a sequence of years with the syntax: +# +# Rscript WT_vs.R 1980 2014 +# +# and it will compute each year from 1980 to 2014, each one after finishing the previous one. +# If you want to run many years in parallel with just only 1 command, run from the bash: +# +# for y in {1980..2014}; do Rscript WT_v2.R &; done +# +# but it'd need 24 processors! In practice, better to run only 4-8 years at time + + +library(s2dverification) # for the function Load() +source('/home/Earth/ncortesi/Downloads/scripts/Rfunctions.R') # for the calendar functions +Load.path <- '/home/Earth/ncortesi/Downloads/scripts/IC3.conf' # Load() file path + +workdir="/scratch/Earth/ncortesi/RESILIENCE/WT" # working dir where to put the output maps and files +subdatadir="/scratch/Earth/ncortesi/RESILIENCE/WT_subdata" # dir where to put the intermediate daily psl data (1 file for each subarray) + +mslp.rean='ERAintDailyHighRes' #'ERAintDailyLowRes' # daily reanalysis dataset used for MSLP data + +year.start=1980 # starting year of the MSLP daily data (from the 1st of january) +year.end=1980 # ending year of the MSLP daily data (up to the 31 of December) + +low.res.size=5 # even numb. of MSLP nearby grid points to compute the psl at each of the 16 points (both for lon and lat: actual n. of pts used is its square) + +########################################################################################## + +args <- commandArgs(TRUE) + +if(length(args) == 1) year.start <- year.end <- as.integer(args[1]) +if(length(args) == 2) {year.start <- as.integer(args[1]); year.end <- as.integer(args[2])} + +WTs.type<-c("NE","E","SE","S","SW","W","NW","N","C","C.NE","C.E","C.SE","C.S","C.SW","C.W","C.NW","C.N","A","A.NE","A.E","A.SE","A.S","A.SW","A.W","A.NW","A.N") +radius <- (low.res.size-1)/2 # number of grid points to use as search radius for averaging the psl at each Lamb grid point + +# Load 1 day of MSLP data from the reanalysis chosen, just to detect the number of latitude and longitude points: +MSLP <- Load('psl', NULL, mslp.rean, paste0(year.start,'0101'), storefreq = 'daily', leadtimemax = 1, output = 'lonlat', configfile = Load.path) + +n.lat <- length(MSLP$lat) # number of latitude values +n.lon <- length(MSLP$lon) # number of longitude values + +lon.pos <- ifelse(min(MSLP$lon>=0), TRUE, FALSE) # set lon.pos to TRUE if the longitude values of the reanalysis grid has no negative values, FALSE if not. + +# for each grid point of the reanalysis, compute the Lamb WT classification with Lamb grid centered on that point. +# we exclude only central points > +77 degrees lat and < -77 deg. because the Lamb grid needs 12.5 deg. north/south of the central point. +n.lat.unused.poles <- 20 # number of unused latitude values near each of the two poles (must exclude last 10 degrees N/S, so it depends on the spatial res. of the reanalysis) +#n.lat.unused.equat <- 20 # number of unused latitude values near each side of the equator (must exclude 10 degrees N/S for side, so it depends on the sp.res. of the reanalysis) + +#pos.lat.eq.north <- tail(which(MSLP$lat >= 0),1) # note than the eventual point at lat=0 is always included +#pos.lat.eq.south <- head(which(MSLP$lat < 0),1) # note than the eventual point at lat=0 is always excluded +#pos.lat.unused.eq.north <- (pos.lat.eq.north-n.lat.unused.poles+1):pos.lat.eq.north +#pos.lat.unused.eq.south <- pos.lat.eq.south:(pos.lat.eq.south+n.lat.unused.equat-1) + +#lat.used <- MSLP$lat[-c(1:n.lat.unused.poles,(n.lat-n.lat.unused.poles):n.lat, pos.lat.unused.eq.north, pos.lat.unused.eq.south)] # latitude values used as central points +lat.used <- round(MSLP$lat[-c(1:n.lat.unused.poles,(n.lat-n.lat.unused.poles):n.lat)],3) # latitude values used as central points rounded to the third decimal +n.lat.used <- length(lat.used) + +lon.used <- round(MSLP$lon,3) # longitude values used as central points rounded to the third decimal (precision: ~100 m) +n.lon.used <- length(lon.used) + +n.grid.points <- length(lat.used)*length(lon.used) + +#n.days.tot <- n.days.in.a.yearly.period(year.start,year.end) +#MSLP.year <- Load('psl', NULL, mslp.rean, paste0(year.start,'0101'), storefreq = 'daily', leadtimemax = n.days.in.a.year(y), output = 'lonlat', configfile = Load.path) +#gc() + +## sub <- split.array.big(dimensions=c(n.days, n.lat.used, n.lon.used), along=3) +## for(y in year.start:year.end){ +## cat(paste0("Splitting year: ",y)) + +## MSLP.year <- Load('psl', NULL, mslp.rean, paste0(year.start,'0101'), storefreq = 'daily', leadtimemax = n.days.in.a.year(y), output = 'lonlat', configfile = Load.path) + +## for(s in 1:sub$n.sub){ +## MSLP.year.sub <- MSLP.year$obs[1,1,1,,,sub$int[[s]]] +## save(MSLP.year.sub, file=paste0(subdatadir,"/","MSLP_",mslp.rean,"_year_",y,"_sub_",s,".RData"), compress=FALSE) +## } +## } +## ### lonmin=lon.used[head(sub$int[[s]],1)], lonmax=lon.used[tail(sub$int[[s]],1)]) + +# load all MSLP data one year at time: +for(y in year.start:year.end){ + print(paste0("Year: ",y)) + n.days <- n.days.in.a.year(y) + MSLP.year <- Load('psl', NULL, mslp.rean, paste0(y,'0101'), storefreq = 'daily', leadtimemax = n.days, output = 'lonlat', configfile = Load.path) + gc() + + # create a subdir where to put the yearly output (there are too many files to put them in only 1 dir) + if(!dir.exists(file.path(workdir,paste0(y,"_Rdata")))) dir.create(file.path(workdir,paste0(y,"_Rdata"))) # create a subdir where to put the yerly output (there are too many files to put them in only 1 dir) + + if(!dir.exists(file.path(workdir,paste0(y,"_txt")))) dir.create(file.path(workdir,paste0(y,"_txt"))) # create a subdir where to put the yerly output (there are too many files to put them in only 1 dir) + + # loop over the lat/lon of the central point of the Lamb classification: + for(latc in lat.used){ + #latc <- lat.used[1]; lonc <- lon.used[1]; # for the debug + + # first, computes the latitude coefficients: + SF1<-1/cos(latc*pi/180) + ZS1<-1/(2*cos(latc*pi/180)^2) + ZW1<-sin(latc*pi/180)/sin((latc-5)*pi/180) + ZW2<-sin(latc*pi/180)/sin((latc+5)*pi/180) + + for(lonc in lon.used){ + pos.latc <- which(lat.used==latc) + pos.lonc <- which(lon.used==lonc) + cat(paste0("Grid point saved: ",(pos.latc-1)*n.lon.used+pos.lonc ,"/", n.grid.points, " "), "\r") + + # assign the lat and lon values of the 16 grid points of the Lamb grid centered over the central point: + lat <- lon <- c() # lat and lon of the 16 points of Lamb's grid + lat[1] <- lat[2] <- latc + 10 + lat[3] <- lat[4] <- lat[5] <- lat[6] <- latc + 5 + lat[7] <- lat[8] <- lat[9] <- lat[10] <- latc + lat[11] <- lat[12] <- lat[13] <- lat[14] <- latc - 5 + lat[15] <- lat[16] <- latc - 10 + + lon[1] <- lon[4] <- lon[8] <- lon[12] <- lon[15] <- lonc - 5 + lon[3] <- lon[7] <- lon[11] <- lonc - 15 + lon[2] <- lon[5] <- lon[9] <- lon[13] <- lon[16] <- lonc + 5 + lon[6] <- lon[10] <- lon[14] <- lonc + 15 + + # longitude correction for reanalysis with positive-only longitude (to stay always positive): + if(lon.pos){ + if(lonc - 5 < 0){ + lon[1] <- lon[4] <- lon[8] <- lon[12] <- lon[15] <- lonc - 5 + 360 + lon[3] <- lon[7] <- lon[11] <- lonc - 15 + 360 + } + if(lonc - 15 < 0 && lon - 5 >= 0){ + lon[3] <- lon[7] <- lon[11] <- lonc - 15 + 360 + } + if(lonc + 5 >= 360){ + lon[2] <- lon[5] <- lon[9] <- lon[13] <- lon[16] <- lonc + 5 - 360 + lon[6] <- lon[10] <- lon[14] <- lonc + 15 - 360 + } + if(lonc + 15 >= 360 && lonc + 5 < 360){ + lon[6] <- lon[10] <- lon[14] <- lonc + 15 - 360 + } + } + + # longitude correction for reanalysis with [-180, +180] longitude (to stay always in the same interval): + # MUST STILL UPDATE FOR CASE lonc + 15 >= 360 !!! + if(!lon.pos){ + if(lonc - 5 < -180){ + lon[1] <- lon[4] <- lon[8] <- lon[12] <- lon[15] <- lonc - 5 + 360 + lon[3] <- lon[7] <- lon[11] <- lonc - 15 + 360 + } + if(lonc + 5 >= 180){ + lon[2] <- lon[5] <- lon[9] <- lon[13] <- lon[16] <- lonc + 5 - 360 + lon[6] <- lon[10] <- lon[14] <- lonc + 15 - 360 + } + } + + # lat and lon position of the 16 Lamb grid points inside the MSLP reanalysis: + pos.lat <- pos.lon <- c() + for(p in 1:16){ + pos <- nearest(lat[p], lon[p], MSLP$lat, MSLP$lon) + pos.lat[p] <- pos[1] + pos.lon[p] <- pos[2] + } + + #WTs <- data.frame() + + # mean psl at each of the 16 grid points: + psl <- array(NA,c(n.days,16)) # for each day of the year, MSLP of the 16 points of Lamb's grid + #for(p in 1:16) psl[,p] <- MSLP.year$obs[1,1,1,,pos.lat[p],pos.lon[p]] + + for(p in 1:16) { + + pos.lon.low.res <- (pos.lon[p]-radius):(pos.lon[p]+radius) + + if(head(pos.lon.low.res,1) <= 0){ + ss <- which(pos.lon.low.res <= 0) + pos.lon.low.res <- c(pos.lon.low.res[ss] + n.lon.used, pos.lon.low.res[-ss]) + } + if(tail(pos.lon.low.res,1) > n.lon.used){ + ss <- which(pos.lon.low.res > n.lon.used) + pos.lon.low.res <- c(pos.lon.low.res[-ss], pos.lon.low.res[ss]-n.lon.used) + } + + pos.lat.low.res <- (pos.lat[p]-radius):(pos.lat[p]+radius) # it never has to be corrected because we excluded the poles + + MSLP.low.res <- MSLP.year$obs[1,1,1,,pos.lat.low.res,pos.lon.low.res] + psl[,p] <- apply(MSLP.low.res, 1, mean) + } + + # compute geostrophical indexes (don't round them to the 3rd decimal number as done by Trigo and Martin-Vide, not to introduce an error that change 3-4% of WTs!!!): + SF<-WF<-F<-D<-D2<-c() + + WF <- 0.5*(psl[,12]+psl[,13]) - 0.5*(psl[,4]+psl[,5]) + SF <- SF1*(0.25 * (psl[,5]+2*psl[,9]+psl[,13]) - 0.25 * (psl[,4]+2*psl[,8]+psl[,12])) + F <- (SF*SF + WF*WF)^0.5 + + if(latc < 0){ + WF <- -WF # because in the southern hemisphere, cyclones and anticyclones spins in the opposite sense! + SF <- -SF + } + + # D e' la direzione del flusso; dato che l'angolo risultato di atan e' espresso in radianti, bisogna convertirlo in gradi moltiplicandolo per (180/pi) + # D e' l'angolo tra la direzione del vento e la sua proiezione in direzione nord-sud (SF) + D <- (180/pi) * atan(WF/SF) + + # trasforma l'angolo D iniziale in un angolo D2 sempre positivo che si calcola a partire del Nord andando in senso orario: + Q1 <- which(WF>0 & SF>0) + Q2 <- which(WF>0 & SF<=0) + Q3 <- which(WF<0 & SF<=0) # calcola quali angoli D appartengono al quadrante 1 (Q1), quali al quadrante 2, quali al 3 e quali al 4. + Q4 <- which(WF<0 & SF>0) # il primo quadrante e' in alto a destra, il secondo in basso a destra, il terzo in basso a sinistra, il quarto in alto a sinistra. + + # the wind direction is usually different from the angle D where the wind is blowing to, so a correction has to be made: + D2 <- D + D2[Q1] <- D[Q1] + 180 + D2[Q2] <- D[Q2] + 360 + D2[Q4] <- D[Q4] + 180 + + # individua i WTs direzionali: + NE <- which(D2>=22.5 & D2<67.5) + E <- which(D2>=67.5 & D2<112.5) + SE <- which(D2>=112.5 & D2<157.5) + S <- which(D2>=157.5 & D2<202.5) + SW <- which(D2>=202.5 & D2<247.5) + W <- which(D2>=247.5 & D2<292.5) + NW <- which(D2>=292.5 & D2<337.5) + N <- which(D2>=337.5 | D2<22.5) + + # aggiunge una colonna a destra con il WT direzionale associato all'angolo: + D2.dir<-rep(NA,length(D2)) + D2.dir[NE]<-"NE";D2.dir[E]<-"E";D2.dir[SE]<-"SE";D2.dir[S]<-"S";D2.dir[SW]<-"SW";D2.dir[W]<-"W";D2.dir[NW]<-"NW";D2.dir[N]<-"N" + + # calcolo componenti vorticita': + ZS<-ZW<-Z<-c() + + ZS <- ZS1*(0.25 * (psl[,6]+2*psl[,10]+psl[,14]) - 0.25 * (psl[,5]+2*psl[,9]+psl[,13]) - 0.25 * (psl[,4]+2*psl[,8]+psl[,12]) + 0.25 * (psl[,3]+2*psl[,7]+psl[,11])) + ZW <- ZW1 * (0.5*(psl[,15]+psl[,16]) - 0.5*(psl[,8]+psl[,9])) - ZW2 * (0.5*(psl[,8]+psl[,9]) - 0.5*(psl[,1]+psl[,2])) + Z <- ZS + ZW + + # Pure and hybrid WTs + cyc.pure <- which(Z>2*F) + anticyc.pure <- which(Z<(-2*F)) + hybrid.cyc <- which(Z>0 & abs(Z)<2*F & abs(Z)>F) + hybrid.anticyc <- which(Z<0 & abs(Z)<2*F & abs(Z)>F) + indeter <- which(F<6 & abs(Z)<6) # tipo di tempo indeterminato (U); the choice of 6 depend on grid size and should be changed if grid res.is higher! + + # create a data frame with all the info about the WT classification: + + #years.period <- months.period <- days.period <- c() + #for(y in year.start:year.end) years.period <- c(years.period, rep(y, n.days.in.a.year(y))) + #for(y in year.start:year.end) months.period <- c(months.period, seq.months.in.a.year(y)) + #for(y in year.start:year.end) days.period <- c(days.period, seq.days.in.a.year(y)) + years.period <- rep(y, n.days) + months.period <- seq.months.in.a.year(y) + days.period <- seq.days.in.a.year(y) + WT<-data.frame(Year=years.period, Month=months.period, Day=days.period, Direction=round(D2,3), WT.dir=D2.dir, stringsAsFactors=FALSE) + + WT$Year<-as.numeric(WT$Year);WT$Month<-as.numeric(WT$Month);WT$Day<-as.numeric(WT$Day);WT$Direction<-as.numeric(WT$Direction);WT$WT.dir<-as.character(WT$WT.dir) + + # colonna che identifica quali giorni sono di tipo ibrido e quali di tipo direzionale: + WT$Hyb <- rep("Directional",length(D2)) + WT$Hyb[cyc.pure] <- "Pure_C" + WT$Hyb[anticyc.pure] <- "Pure_A" + WT$Hyb[hybrid.cyc] <- "Hybrid_C" + WT$Hyb[hybrid.anticyc] <- "Hybrid_A" + + # colonna con la lista dei 26 WTs officiali: + WT$WT <- WT$D2.dir + WT$WT[cyc.pure] <- "C" + WT$WT[anticyc.pure] <- "A" + WT$WT[hybrid.cyc] <- paste("C.",WT$WT.dir[hybrid.cyc],sep="") + WT$WT[hybrid.anticyc] <- paste("A.", WT$WT.dir[hybrid.anticyc],sep="") + #WT$WT[indeter] <- "U" # comment this line not to include the Unclassified U WT + + WT$WT.num <- match(WT$WT,WTs.type)# aggiunge una colonna con la conversione dei 26 tipi di tempo a numeri da 1 a 26 + + # colonna con la lista dei 10 WTs ottenuti unendo a C e ad A gli otto WT direzionali puri; + # i WTs ibridi si aggiungono ognuno al suo tipo direzionale corrispondente (es: C.SW si aggiunge a SW, ecc.) + WT$WT10 <- WT$WT.dir + WT$WT10[cyc.pure] <- "C" + WT$WT10[anticyc.pure] <- "A" + WT$WT10.num <- match(WT$WT10,WTs.type) # conversione dei 10 tipi di tempo a numeri da 1 a 9 + il tipo A che si converte nel numero 18 + + # add the other geostrophical indexes: + WT$SF=round(SF,5) + WT$WF=round(WF,5) + WT$F=round(F,5) + WT$ZS=round(ZS,5) + WT$ZW=round(ZW,5) + WT$Z=round(Z,5) + + #WTs<-rbind(WTs,WT) + + # save the full data table as a .txt to exchange it with other people: + write.table(WT,file=paste0(workdir,"/",y,"_txt/WTs_",mslp.rean,"_year_",y,"_lat_",latc,"_lon_",lonc,".txt"),row.names=FALSE, col.names=TRUE, quote=FALSE, sep=" ") + # save only the classification with 10 WTs, to have a smaller (binary) file: + #WTs<-cbind(WT$Year, WT$Month, WT$Day, WT$WT10.num) + WTs <- WT$WT10.num + save(WTs, file=paste0(workdir,"/",y,"_Rdata/10WTs_",mslp.rean,"_year_",y,"_lat_",latc,"_lon_",lonc,".RData"), compress=FALSE) + + rm(WT,SF,WF,F,D,ZS,ZW,W) + + } # close for on lonc + } # close for on latc + cat("\n") + +} # close for on y diff --git a/old/WT_v3.R b/old/WT_v3.R new file mode 100644 index 0000000000000000000000000000000000000000..9b1ff971cefbdccd971b9dcc3c76c3387ad39295 --- /dev/null +++ b/old/WT_v3.R @@ -0,0 +1,289 @@ +######################################################################################### +# Lamb's WTs with ERA-Interim # +######################################################################################### +# run it from the bash with: +# +# Rscript WT_v2.R 1984 +# +# being 1984 the year you want to classify the WTs; +# in this way you can run up to 8 jobs at the same time, each one producing its output files! +# You can also run it for a sequence of years with the syntax: +# +# Rscript WT_vs.R 1980 2014 +# +# and it will compute each year from 1980 to 2014, each one after finishing the previous one. +# If you want to run many years in parallel with just only 1 command, run from the bash: +# +# for y in {1980..2014}; do Rscript WT_v2.R &; done +# +# but it'd need 24 processors! In practice, better to run only 4-8 years at time + + +library(s2dverification) # for the function Load() +source('/home/Earth/ncortesi/Downloads/scripts/Rfunctions.R') # for the calendar functions +Load.path <- '/home/Earth/ncortesi/Downloads/scripts/IC3.conf' # Load() file path + +workdir="/scratch/Earth/ncortesi/RESILIENCE/WT" # working dir where to put the output maps and files +subdatadir="/scratch/Earth/ncortesi/RESILIENCE/WT_subdata" # dir where to put the intermediate daily psl data (1 file for each subarray) + +mslp.rean='ERAintDailyHighRes' #'ERAintDailyLowRes' # daily reanalysis dataset used for MSLP data + +year.start=1980 # starting year of the MSLP daily data (from the 1st of january) +year.end=1980 # ending year of the MSLP daily data (up to the 31 of December) + +lamb.res=4.9 # spacing between the Lamb grid points in the meriodional direction (in the zonal direction, it is exactly the double of this value) + # it should be put equal to the multiple of 'psl.res' closer to 5 degrees; i.e, equal to: psl.res * round((5/psl.res)) +low.res.size=5 # even numb. of MSLP nearby grid points to compute the psl at each of the 16 points (both for lon and lat: actual n. of pts used is its square) + +########################################################################################## + +args <- commandArgs(TRUE) + +if(length(args) == 1) year.start <- year.end <- as.integer(args[1]) +if(length(args) == 2) {year.start <- as.integer(args[1]); year.end <- as.integer(args[2])} + +WTs.type<-c("NE","E","SE","S","SW","W","NW","N","C","C.NE","C.E","C.SE","C.S","C.SW","C.W","C.NW","C.N","A","A.NE","A.E","A.SE","A.S","A.SW","A.W","A.NW","A.N") +radius <- (low.res.size-1)/2 # number of grid points to use as search radius for averaging the psl at each Lamb grid point + +# Load 1 day of MSLP data from the reanalysis chosen, just to detect the number of latitude and longitude points: +MSLP <- Load('psl', NULL, mslp.rean, paste0(year.start,'0101'), storefreq = 'daily', leadtimemax = 1, output = 'lonlat', configfile = Load.path) + +n.lat <- length(MSLP$lat) # number of latitude values +n.lon <- length(MSLP$lon) # number of longitude values + +lon.pos <- ifelse(min(MSLP$lon>=0), TRUE, FALSE) # set lon.pos to TRUE if the longitude values of the reanalysis grid has no negative values, FALSE if not. + +# for each grid point of the reanalysis, compute the Lamb WT classification with Lamb grid centered on that point. +# we exclude only central points > +77 degrees lat and < -77 deg. because the Lamb grid needs 12.5 deg. north/south of the central point. +n.lat.unused.poles <- 20 # number of unused latitude values near each of the two poles (must exclude last 10 degrees N/S, so it depends on the spatial res. of the reanalysis) + +lat.used <- MSLP$lat[-c(1:n.lat.unused.poles,(n.lat-n.lat.unused.poles):n.lat)] # latitude values used as central points +psl.res <- diff(lat.used)[1] # psl grid resolution +lat.used <- round(lat.used,3) # round psl values to the third decimal to save them in a file with a short file name (precision: ~100 m) +n.lat.used <- length(lat.used) + +lon.used <- round(MSLP$lon,3) # longitude values used as central points rounded to the third decimal (precision: ~100 m) +n.lon.used <- length(lon.used) + +n.grid.points <- length(lat.used)*length(lon.used) + +# load all MSLP data one year at time: +for(y in year.start:year.end){ + print(paste0("Year: ",y)) + n.days <- n.days.in.a.year(y) + MSLP.year <- Load('psl', NULL, mslp.rean, paste0(y,'0101'), storefreq = 'daily', leadtimemax = n.days, output = 'lonlat', configfile = Load.path) + gc() + + # create two subdirs where to put the yearly output (there are too many files to put them in only 1 dir) + if(!dir.exists(file.path(workdir,paste0(y,"_Rdata")))) dir.create(file.path(workdir,paste0(y,"_Rdata"))) + if(!dir.exists(file.path(workdir,paste0(y,"_txt")))) dir.create(file.path(workdir,paste0(y,"_txt"))) + + # loop over the lat/lon of the central point of the Lamb classification: + for(latc in lat.used){ + #latc <- lat.used[1]; lonc <- lon.used[1]; # for the debug + + # first, computes the latitude coefficients: + SF1<-1/cos(latc*pi/180) + ZS1<-1/(2*cos(latc*pi/180)^2) + ZW1<-sin(latc*pi/180)/sin((latc-5)*pi/180) + ZW2<-sin(latc*pi/180)/sin((latc+5)*pi/180) + + for(lonc in lon.used){ + pos.latc <- which(lat.used==latc) + pos.lonc <- which(lon.used==lonc) + cat(paste0("Grid point saved: ",(pos.latc-1)*n.lon.used+pos.lonc ,"/", n.grid.points, " "), "\r") + + # assign the lat and lon values of the 16 grid points of the Lamb grid centered over the central point: + lat <- lon <- c() # lat and lon of the 16 points of Lamb's grid + lat[1] <- lat[2] <- latc + 10 + lat[3] <- lat[4] <- lat[5] <- lat[6] <- latc + 5 + lat[7] <- lat[8] <- lat[9] <- lat[10] <- latc + lat[11] <- lat[12] <- lat[13] <- lat[14] <- latc - 5 + lat[15] <- lat[16] <- latc - 10 + + lon[1] <- lon[4] <- lon[8] <- lon[12] <- lon[15] <- lonc - 5 + lon[3] <- lon[7] <- lon[11] <- lonc - 15 + lon[2] <- lon[5] <- lon[9] <- lon[13] <- lon[16] <- lonc + 5 + lon[6] <- lon[10] <- lon[14] <- lonc + 15 + + # longitude correction for reanalysis with positive-only longitude (to stay always positive): + if(lon.pos){ + if(lonc - 5 < 0){ + lon[1] <- lon[4] <- lon[8] <- lon[12] <- lon[15] <- lonc - 5 + 360 + lon[3] <- lon[7] <- lon[11] <- lonc - 15 + 360 + } + if(lonc - 15 < 0 && lon - 5 >= 0){ + lon[3] <- lon[7] <- lon[11] <- lonc - 15 + 360 + } + if(lonc + 5 >= 360){ + lon[2] <- lon[5] <- lon[9] <- lon[13] <- lon[16] <- lonc + 5 - 360 + lon[6] <- lon[10] <- lon[14] <- lonc + 15 - 360 + } + if(lonc + 15 >= 360 && lonc + 5 < 360){ + lon[6] <- lon[10] <- lon[14] <- lonc + 15 - 360 + } + } + + # longitude correction for reanalysis with [-180, +180] longitude (to stay always in the same interval): + # MUST STILL UPDATE FOR CASE lonc + 15 >= 360 !!! + if(!lon.pos){ + if(lonc - 5 < -180){ + lon[1] <- lon[4] <- lon[8] <- lon[12] <- lon[15] <- lonc - 5 + 360 + lon[3] <- lon[7] <- lon[11] <- lonc - 15 + 360 + } + if(lonc + 5 >= 180){ + lon[2] <- lon[5] <- lon[9] <- lon[13] <- lon[16] <- lonc + 5 - 360 + lon[6] <- lon[10] <- lon[14] <- lonc + 15 - 360 + } + } + + # lat and lon position of the 16 Lamb grid points inside the MSLP reanalysis: + pos.lat <- pos.lon <- c() + for(p in 1:16){ + pos <- nearest(lat[p], lon[p], MSLP$lat, MSLP$lon) + pos.lat[p] <- pos[1] + pos.lon[p] <- pos[2] + } + + #WTs <- data.frame() + + # mean psl at each of the 16 grid points: + psl <- array(NA,c(n.days,16)) # for each day of the year, MSLP of the 16 points of Lamb's grid + #for(p in 1:16) psl[,p] <- MSLP.year$obs[1,1,1,,pos.lat[p],pos.lon[p]] + + for(p in 1:16) { + + pos.lon.low.res <- (pos.lon[p]-radius):(pos.lon[p]+radius) + + if(head(pos.lon.low.res,1) <= 0){ + ss <- which(pos.lon.low.res <= 0) + pos.lon.low.res <- c(pos.lon.low.res[ss] + n.lon.used, pos.lon.low.res[-ss]) + } + if(tail(pos.lon.low.res,1) > n.lon.used){ + ss <- which(pos.lon.low.res > n.lon.used) + pos.lon.low.res <- c(pos.lon.low.res[-ss], pos.lon.low.res[ss]-n.lon.used) + } + + pos.lat.low.res <- (pos.lat[p]-radius):(pos.lat[p]+radius) # it never has to be corrected because we excluded the poles + + MSLP.low.res <- MSLP.year$obs[1,1,1,,pos.lat.low.res,pos.lon.low.res] + psl[,p] <- apply(MSLP.low.res, 1, mean) + } + + # compute geostrophical indexes (don't round them to the 3rd decimal number as done by Trigo and Martin-Vide, not to introduce an error that change 3-4% of WTs!!!): + SF<-WF<-F<-D<-D2<-c() + + WF <- 0.5*(psl[,12]+psl[,13]) - 0.5*(psl[,4]+psl[,5]) + SF <- SF1*(0.25 * (psl[,5]+2*psl[,9]+psl[,13]) - 0.25 * (psl[,4]+2*psl[,8]+psl[,12])) + F <- (SF*SF + WF*WF)^0.5 + + if(latc < 0){ + WF <- -WF # because in the southern hemisphere, cyclones and anticyclones spins in the opposite sense! + SF <- -SF + } + + # D e' la direzione del flusso; dato che l'angolo risultato di atan e' espresso in radianti, bisogna convertirlo in gradi moltiplicandolo per (180/pi) + # D e' l'angolo tra la direzione del vento e la sua proiezione in direzione nord-sud (SF) + D <- (180/pi) * atan(WF/SF) + + # trasforma l'angolo D iniziale in un angolo D2 sempre positivo che si calcola a partire del Nord andando in senso orario: + Q1 <- which(WF>0 & SF>0) + Q2 <- which(WF>0 & SF<=0) + Q3 <- which(WF<0 & SF<=0) # calcola quali angoli D appartengono al quadrante 1 (Q1), quali al quadrante 2, quali al 3 e quali al 4. + Q4 <- which(WF<0 & SF>0) # il primo quadrante e' in alto a destra, il secondo in basso a destra, il terzo in basso a sinistra, il quarto in alto a sinistra. + + # the wind direction is usually different from the angle D where the wind is blowing to, so a correction has to be made: + D2 <- D + D2[Q1] <- D[Q1] + 180 + D2[Q2] <- D[Q2] + 360 + D2[Q4] <- D[Q4] + 180 + + # individua i WTs direzionali: + NE <- which(D2>=22.5 & D2<67.5) + E <- which(D2>=67.5 & D2<112.5) + SE <- which(D2>=112.5 & D2<157.5) + S <- which(D2>=157.5 & D2<202.5) + SW <- which(D2>=202.5 & D2<247.5) + W <- which(D2>=247.5 & D2<292.5) + NW <- which(D2>=292.5 & D2<337.5) + N <- which(D2>=337.5 | D2<22.5) + + # aggiunge una colonna a destra con il WT direzionale associato all'angolo: + D2.dir<-rep(NA,length(D2)) + D2.dir[NE]<-"NE";D2.dir[E]<-"E";D2.dir[SE]<-"SE";D2.dir[S]<-"S";D2.dir[SW]<-"SW";D2.dir[W]<-"W";D2.dir[NW]<-"NW";D2.dir[N]<-"N" + + # calcolo componenti vorticita': + ZS<-ZW<-Z<-c() + + ZS <- ZS1*(0.25 * (psl[,6]+2*psl[,10]+psl[,14]) - 0.25 * (psl[,5]+2*psl[,9]+psl[,13]) - 0.25 * (psl[,4]+2*psl[,8]+psl[,12]) + 0.25 * (psl[,3]+2*psl[,7]+psl[,11])) + ZW <- ZW1 * (0.5*(psl[,15]+psl[,16]) - 0.5*(psl[,8]+psl[,9])) - ZW2 * (0.5*(psl[,8]+psl[,9]) - 0.5*(psl[,1]+psl[,2])) + Z <- ZS + ZW + + # Pure and hybrid WTs + cyc.pure <- which(Z>2*F) + anticyc.pure <- which(Z<(-2*F)) + hybrid.cyc <- which(Z>0 & abs(Z)<2*F & abs(Z)>F) + hybrid.anticyc <- which(Z<0 & abs(Z)<2*F & abs(Z)>F) + indeter <- which(F<6 & abs(Z)<6) # tipo di tempo indeterminato (U); the choice of 6 depend on grid size and should be changed if grid res.is higher! + + # create a data frame with all the info about the WT classification: + + #years.period <- months.period <- days.period <- c() + #for(y in year.start:year.end) years.period <- c(years.period, rep(y, n.days.in.a.year(y))) + #for(y in year.start:year.end) months.period <- c(months.period, seq.months.in.a.year(y)) + #for(y in year.start:year.end) days.period <- c(days.period, seq.days.in.a.year(y)) + years.period <- rep(y, n.days) + months.period <- seq.months.in.a.year(y) + days.period <- seq.days.in.a.year(y) + WT<-data.frame(Year=years.period, Month=months.period, Day=days.period, Direction=round(D2,3), WT.dir=D2.dir, stringsAsFactors=FALSE) + + WT$Year<-as.numeric(WT$Year);WT$Month<-as.numeric(WT$Month);WT$Day<-as.numeric(WT$Day);WT$Direction<-as.numeric(WT$Direction);WT$WT.dir<-as.character(WT$WT.dir) + + # colonna che identifica quali giorni sono di tipo ibrido e quali di tipo direzionale: + WT$Hyb <- rep("Directional",length(D2)) + WT$Hyb[cyc.pure] <- "Pure_C" + WT$Hyb[anticyc.pure] <- "Pure_A" + WT$Hyb[hybrid.cyc] <- "Hybrid_C" + WT$Hyb[hybrid.anticyc] <- "Hybrid_A" + + # colonna con la lista dei 26 WTs officiali: + WT$WT <- WT$D2.dir + WT$WT[cyc.pure] <- "C" + WT$WT[anticyc.pure] <- "A" + WT$WT[hybrid.cyc] <- paste("C.",WT$WT.dir[hybrid.cyc],sep="") + WT$WT[hybrid.anticyc] <- paste("A.", WT$WT.dir[hybrid.anticyc],sep="") + #WT$WT[indeter] <- "U" # comment this line not to include the Unclassified U WT + + WT$WT.num <- match(WT$WT,WTs.type)# aggiunge una colonna con la conversione dei 26 tipi di tempo a numeri da 1 a 26 + + # colonna con la lista dei 10 WTs ottenuti unendo a C e ad A gli otto WT direzionali puri; + # i WTs ibridi si aggiungono ognuno al suo tipo direzionale corrispondente (es: C.SW si aggiunge a SW, ecc.) + WT$WT10 <- WT$WT.dir + WT$WT10[cyc.pure] <- "C" + WT$WT10[anticyc.pure] <- "A" + WT$WT10.num <- match(WT$WT10,WTs.type) # conversione dei 10 tipi di tempo a numeri da 1 a 9 + il tipo A che si converte nel numero 18 + + # add the other geostrophical indexes: + WT$SF=round(SF,5) + WT$WF=round(WF,5) + WT$F=round(F,5) + WT$ZS=round(ZS,5) + WT$ZW=round(ZW,5) + WT$Z=round(Z,5) + + #WTs<-rbind(WTs,WT) + + # save the full data table as a .txt to exchange it with other people: + write.table(WT,file=paste0(workdir,"/",y,"_txt/WTs_",mslp.rean,"_year_",y,"_lat_",latc,"_lon_",lonc,".txt"),row.names=FALSE, col.names=TRUE, quote=FALSE, sep=" ") + # save only the classification with 10 WTs, to have a smaller (binary) file: + #WTs<-cbind(WT$Year, WT$Month, WT$Day, WT$WT10.num) + WTs <- WT$WT10.num + save(WTs, file=paste0(workdir,"/",y,"_Rdata/10WTs_",mslp.rean,"_year_",y,"_lat_",latc,"_lon_",lonc,".RData"), compress=FALSE) + + rm(WT,SF,WF,F,D,ZS,ZW,W) + + } # close for on lonc + } # close for on latc + cat("\n") + +} # close for on y diff --git a/old/WT_v4.R b/old/WT_v4.R new file mode 100644 index 0000000000000000000000000000000000000000..85be37da1b7ec020712b299abcc727006e52d1a9 --- /dev/null +++ b/old/WT_v4.R @@ -0,0 +1,326 @@ +######################################################################################### +# Lamb's WTs with ERA-Interim # +######################################################################################### +# run it from the bash with: +# +# Rscript WT_v2.R 1984 +# +# being 1984 the year you want to classify the WTs; +# in this way you can run up to 8 jobs at the same time, each one producing its output files! +# You can also run it for a sequence of years with the syntax: +# +# Rscript WT_vX.R 1980 2014 +# +# and it will compute each year from 1980 to 2014, each one after finishing the previous one. +# If you want to run many years in parallel with just only 1 command, run from the bash: +# +# for y in {1980..2014}; do Rscript WT_v2.R &; done +# +# but it'd need 24 processors! In practice, it is possible to run only 4-8 years at time + + +library(s2dverification) # for the function Load() +source('/home/Earth/ncortesi/Downloads/scripts/Rfunctions.R') # for the calendar functions +Load.path <- '/home/Earth/ncortesi/Downloads/scripts/IC3.conf' # Load() file path + +workdir="/scratch/Earth/ncortesi/RESILIENCE/WT" # working dir where to put the output maps and files +subdatadir="/scratch/Earth/ncortesi/RESILIENCE/WT_subdata" # dir where to put the intermediate daily psl data (1 file for each subarray) + +mslp.rean='ERAintDailyHighRes' #'ERAintDailyLowRes' # daily reanalysis dataset used for MSLP data + +year.start=1980 # starting year of the MSLP daily data (from the 1st of january) +year.end=2015 # ending year of the MSLP daily data (up to the 31 of December) + +res=5.25 #4.9 # spacing between the Lamb grid points in the meridional direction (in the zonal direction, it is exactly the double of this value) + # it should be put equal to the multiple of 'psl.res' closer to 5 degrees, i.e: res = psl.res * round((5/psl.res)) + +low.res.size=7 # odd numb. of MSLP nearby grid points to compute the psl at each of the 16 points (both for lon and lat: the actual n.of points used is its square) + +partial.end=FALSE # put TRUE if the last year ('year.end') has not all the yearly data but stop before December the 31th; in this case, must also specify the variable below +n.days.last=334 # number of days available in the last year (used only if partial.end=TRUE) I.e: data for 2015 doesn't have December, so it has 365-31=334 days + +merge=FALSE # put TRUE if you want to concatenate all the WT classification for different years and same grid point at the end of the analysis, FALSE otherwise + +########################################################################################## + +args <- commandArgs(TRUE) + +if(length(args) == 1) year.start <- year.end <- as.integer(args[1]) +if(length(args) == 2) {year.start <- as.integer(args[1]); year.end <- as.integer(args[2])} + +WTs.type<-c("NE","E","SE","S","SW","W","NW","N","C","C.NE","C.E","C.SE","C.S","C.SW","C.W","C.NW","C.N","A","A.NE","A.E","A.SE","A.S","A.SW","A.W","A.NW","A.N") +radius <- (low.res.size-1)/2 # number of grid points to use as search radius for averaging the psl at each Lamb grid point + +# Load 1 day of MSLP data from the reanalysis chosen, just to detect the number of latitude and longitude points: +MSLP <- Load('psl', NULL, mslp.rean, paste0(year.start,'0101'), storefreq = 'daily', leadtimemax = 1, output = 'lonlat', configfile = Load.path) + +n.lat <- length(MSLP$lat) # number of latitude values +n.lon <- length(MSLP$lon) # number of longitude values + +lon.pos <- ifelse(min(MSLP$lon>=0), TRUE, FALSE) # set lon.pos to TRUE if the longitude values of the reanalysis grid has no negative values, FALSE if not. + +# for each grid point of the reanalysis, compute the Lamb WT classification with Lamb grid centered on that point. +# we exclude only central points > +77 degrees lat and < -77 deg. because the Lamb grid needs 12.5 deg. north/south of the central point. +n.lat.unused.poles <- 20 # number of unused latitude values near each of the two poles (must exclude last 10 degrees N/S, so it depends on the spatial res. of the reanalysis) + +lat.used <- MSLP$lat[-c(1:n.lat.unused.poles,(n.lat-n.lat.unused.poles):n.lat)] # latitude values used as central points +psl.res <- diff(lat.used)[1] # psl grid resolution +lat.used <- round(lat.used,3) # round psl values to the third decimal to save them in a file with a short file name (precision: ~100 m) +n.lat.used <- length(lat.used) + +lon.used <- round(MSLP$lon,3) # longitude values used as central points rounded to the third decimal (precision: ~100 m) +n.lon.used <- length(lon.used) + +n.grid.points <- length(lat.used)*length(lon.used) + +# load all MSLP data one year at time: +for(y in year.start:year.end){ + print(paste0("Year: ",y)) + n.days <- n.days.in.a.year(y) + if(partial.end==TRUE && y==year.end) n.days<-n.days.last # the last year can have a lower number of days + + MSLP.year <- Load('psl', NULL, mslp.rean, paste0(y,'0101'), storefreq = 'daily', leadtimemax = n.days, output = 'lonlat', configfile = Load.path) + gc() + + # loop over the lat/lon of the central point of the Lamb classification: + for(latc in lat.used){ + #latc <- lat.used[1]; lonc <- lon.used[1]; # for the debug + + ## first, computes the latitude coefficients: + SF1<-1/cos(latc*pi/180) + ZS1<-1/(2*cos(latc*pi/180)^2) + ZW1<-sin(latc*pi/180)/sin((latc-res)*pi/180) + ZW2<-sin(latc*pi/180)/sin((latc+res)*pi/180) + + for(lonc in lon.used){ + pos.latc <- which(lat.used==latc) + pos.lonc <- which(lon.used==lonc) + cat(paste0("Grid point saved: ",(pos.latc-1)*n.lon.used+pos.lonc ,"/", n.grid.points, " "), "\r") + + # assign the lat and lon values of the 16 grid points of the Lamb grid centered over the central point: + lat <- lon <- c() # lat and lon of the 16 points of Lamb's grid + lat[1] <- lat[2] <- latc + 2*res + lat[3] <- lat[4] <- lat[5] <- lat[6] <- latc + res + lat[7] <- lat[8] <- lat[9] <- lat[10] <- latc + lat[11] <- lat[12] <- lat[13] <- lat[14] <- latc - res + lat[15] <- lat[16] <- latc - 2*res + + lon[1] <- lon[4] <- lon[8] <- lon[12] <- lon[15] <- lonc - res + lon[3] <- lon[7] <- lon[11] <- lonc - 3*res + lon[2] <- lon[5] <- lon[9] <- lon[13] <- lon[16] <- lonc + res + lon[6] <- lon[10] <- lon[14] <- lonc + 3*res + + # longitude correction for reanalysis with positive-only longitude (to stay always positive): + if(lon.pos){ + if(lonc - res < 0){ + lon[1] <- lon[4] <- lon[8] <- lon[12] <- lon[15] <- lonc - res + 360 + lon[3] <- lon[7] <- lon[11] <- lonc - 3*res + 360 + } + if(lonc - 3*res < 0 && lon - res >= 0){ + lon[3] <- lon[7] <- lon[11] <- lonc - 3*res + 360 + } + if(lonc + res >= 360){ + lon[2] <- lon[5] <- lon[9] <- lon[13] <- lon[16] <- lonc + res - 360 + lon[6] <- lon[10] <- lon[14] <- lonc + 3*res - 360 + } + if(lonc + 3*res >= 360 && lonc + res < 360){ + lon[6] <- lon[10] <- lon[14] <- lonc + 3*res - 360 + } + } + + # longitude correction for reanalysis with [-180, +180] longitude (to stay always in the same interval): + # MUST STILL UPDATE FOR CASE lonc + 15 >= 360 !!! + if(!lon.pos){ + if(lonc - res < -180){ + lon[1] <- lon[4] <- lon[8] <- lon[12] <- lon[15] <- lonc - res + 360 + lon[3] <- lon[7] <- lon[11] <- lonc - 3*res + 360 + } + if(lonc + res >= 180){ + lon[2] <- lon[5] <- lon[9] <- lon[13] <- lon[16] <- lonc + res - 360 + lon[6] <- lon[10] <- lon[14] <- lonc + 3*res - 360 + } + } + + # lat and lon position of the 16 Lamb grid points inside the MSLP reanalysis: + pos.lat <- pos.lon <- c() + for(p in 1:16){ + pos <- nearest(lat[p], lon[p], MSLP$lat, MSLP$lon) + pos.lat[p] <- pos[1] + pos.lon[p] <- pos[2] + } + + #WTs <- data.frame() + + # mean psl at each of the 16 grid points: + psl <- array(NA,c(n.days,16)) # for each day of the year, MSLP of the 16 points of Lamb's grid + #for(p in 1:16) psl[,p] <- MSLP.year$obs[1,1,1,,pos.lat[p],pos.lon[p]] + + for(p in 1:16) { + + pos.lon.low.res <- (pos.lon[p]-radius):(pos.lon[p]+radius) + + if(head(pos.lon.low.res,1) <= 0){ + ss <- which(pos.lon.low.res <= 0) + pos.lon.low.res <- c(pos.lon.low.res[ss] + n.lon.used, pos.lon.low.res[-ss]) + } + if(tail(pos.lon.low.res,1) > n.lon.used){ + ss <- which(pos.lon.low.res > n.lon.used) + pos.lon.low.res <- c(pos.lon.low.res[-ss], pos.lon.low.res[ss]-n.lon.used) + } + + pos.lat.low.res <- (pos.lat[p]-radius):(pos.lat[p]+radius) # it never has to be corrected because we excluded the poles + + MSLP.low.res <- MSLP.year$obs[1,1,1,,pos.lat.low.res,pos.lon.low.res] + psl[,p] <- apply(MSLP.low.res, 1, mean) + } + + # compute geostrophical indexes (don't round them to the 3rd decimal number as done by Trigo and Martin-Vide, not to introduce an error that change 3-4% of WTs!!!): + SF<-WF<-F<-D<-D2<-c() + + WF <- 0.5*(psl[,12]+psl[,13]) - 0.5*(psl[,4]+psl[,5]) + SF <- SF1*(0.25 * (psl[,5]+2*psl[,9]+psl[,13]) - 0.25 * (psl[,4]+2*psl[,8]+psl[,12])) + F <- (SF*SF + WF*WF)^0.5 + + if(latc < 0){ + WF <- -WF # because in the southern hemisphere, cyclones and anticyclones spins in the opposite sense! + SF <- -SF + } + + # D e' la direzione del flusso; dato che l'angolo risultato di atan e' espresso in radianti, bisogna convertirlo in gradi moltiplicandolo per (180/pi) + # D e' l'angolo tra la direzione del vento e la sua proiezione in direzione nord-sud (SF) + D <- (180/pi) * atan(WF/SF) + + # trasforma l'angolo D iniziale in un angolo D2 sempre positivo che si calcola a partire del Nord andando in senso orario: + Q1 <- which(WF>0 & SF>0) + Q2 <- which(WF>0 & SF<=0) + Q3 <- which(WF<0 & SF<=0) # calcola quali angoli D appartengono al quadrante 1 (Q1), quali al quadrante 2, quali al 3 e quali al 4. + Q4 <- which(WF<0 & SF>0) # il primo quadrante e' in alto a destra, il secondo in basso a destra, il terzo in basso a sinistra, il quarto in alto a sinistra. + + # the wind direction is usually different from the angle D where the wind is blowing to, so a correction has to be made: + D2 <- D + D2[Q1] <- D[Q1] + 180 + D2[Q2] <- D[Q2] + 360 + D2[Q4] <- D[Q4] + 180 + + # individua i WTs direzionali: + NE <- which(D2>=22.5 & D2<67.5) + E <- which(D2>=67.5 & D2<112.5) + SE <- which(D2>=112.5 & D2<157.5) + S <- which(D2>=157.5 & D2<202.5) + SW <- which(D2>=202.5 & D2<247.5) + W <- which(D2>=247.5 & D2<292.5) + NW <- which(D2>=292.5 & D2<337.5) + N <- which(D2>=337.5 | D2<22.5) + + # aggiunge una colonna a destra con il WT direzionale associato all'angolo: + D2.dir<-rep(NA,length(D2)) + D2.dir[NE]<-"NE";D2.dir[E]<-"E";D2.dir[SE]<-"SE";D2.dir[S]<-"S";D2.dir[SW]<-"SW";D2.dir[W]<-"W";D2.dir[NW]<-"NW";D2.dir[N]<-"N" + + # calcolo componenti vorticita': + ZS<-ZW<-Z<-c() + + ZS <- ZS1*(0.25 * (psl[,6]+2*psl[,10]+psl[,14]) - 0.25 * (psl[,5]+2*psl[,9]+psl[,13]) - 0.25 * (psl[,4]+2*psl[,8]+psl[,12]) + 0.25 * (psl[,3]+2*psl[,7]+psl[,11])) + ZW <- ZW1 * (0.5*(psl[,15]+psl[,16]) - 0.5*(psl[,8]+psl[,9])) - ZW2 * (0.5*(psl[,8]+psl[,9]) - 0.5*(psl[,1]+psl[,2])) + Z <- ZS + ZW + + # Pure and hybrid WTs + cyc.pure <- which(Z>2*F) + anticyc.pure <- which(Z<(-2*F)) + hybrid.cyc <- which(Z>0 & abs(Z)<2*F & abs(Z)>F) + hybrid.anticyc <- which(Z<0 & abs(Z)<2*F & abs(Z)>F) + indeter <- which(F<6 & abs(Z)<6) # tipo di tempo indeterminato (U); the choice of 6 depend on grid size and should be changed if grid res.is higher! + + # create a data frame with all the info about the WT classification: + + #years.period <- months.period <- days.period <- c() + #for(y in year.start:year.end) years.period <- c(years.period, rep(y, n.days.in.a.year(y))) + #for(y in year.start:year.end) months.period <- c(months.period, seq.months.in.a.year(y)) + #for(y in year.start:year.end) days.period <- c(days.period, seq.days.in.a.year(y)) + years.period <- rep(y, n.days) + months.period <- seq.months.in.a.year(y)[1:n.days] + days.period <- seq.days.in.a.year(y)[1:n.days] + WT<-data.frame(Year=years.period, Month=months.period, Day=days.period, Direction=round(D2,3), WT.dir=D2.dir, stringsAsFactors=FALSE) + + WT$Year<-as.numeric(WT$Year);WT$Month<-as.numeric(WT$Month);WT$Day<-as.numeric(WT$Day);WT$Direction<-as.numeric(WT$Direction);WT$WT.dir<-as.character(WT$WT.dir) + + # colonna che identifica quali giorni sono di tipo ibrido e quali di tipo direzionale: + WT$Hyb <- rep("Directional",length(D2)) + WT$Hyb[cyc.pure] <- "Pure_C" + WT$Hyb[anticyc.pure] <- "Pure_A" + WT$Hyb[hybrid.cyc] <- "Hybrid_C" + WT$Hyb[hybrid.anticyc] <- "Hybrid_A" + + # colonna con la lista dei 26 WTs officiali: + WT$WT <- WT$D2.dir + WT$WT[cyc.pure] <- "C" + WT$WT[anticyc.pure] <- "A" + WT$WT[hybrid.cyc] <- paste("C.",WT$WT.dir[hybrid.cyc],sep="") + WT$WT[hybrid.anticyc] <- paste("A.", WT$WT.dir[hybrid.anticyc],sep="") + #WT$WT[indeter] <- "U" # comment this line not to include the Unclassified U WT + + WT$WT.num <- match(WT$WT,WTs.type)# aggiunge una colonna con la conversione dei 26 tipi di tempo a numeri da 1 a 26 + + # colonna con la lista dei 10 WTs ottenuti unendo a C e ad A gli otto WT direzionali puri; + # i WTs ibridi si aggiungono ognuno al suo tipo direzionale corrispondente (es: C.SW si aggiunge a SW, ecc.) + WT$WT10 <- WT$WT.dir + WT$WT10[cyc.pure] <- "C" + WT$WT10[anticyc.pure] <- "A" + WT$WT10.num <- match(WT$WT10,WTs.type) # conversione dei 10 tipi di tempo a numeri da 1 a 9 + il tipo A che si converte nel numero 18 + + # add the other geostrophical indexes: + WT$SF=round(SF,5) + WT$WF=round(WF,5) + WT$F=round(F,5) + WT$ZS=round(ZS,5) + WT$ZW=round(ZW,5) + WT$Z=round(Z,5) + + #WTs<-rbind(WTs,WT) + + # create two subdirs where to put the yearly output (there are too many files to put them in only 1 dir) + if(!dir.exists(file.path(workdir,paste0(y,"_Rdata")))) dir.create(file.path(workdir,paste0(y,"_Rdata"))) + if(!dir.exists(file.path(workdir,paste0(y,"_txt")))) dir.create(file.path(workdir,paste0(y,"_txt"))) + + # save the full data table as a .txt to exchange it with other people: + write.table(WT,file=paste0(workdir,"/",y,"_txt/WTs_",mslp.rean,"_year_",y,"_lat_",latc,"_lon_",lonc,".txt"),row.names=FALSE, col.names=TRUE, quote=FALSE, sep=" ") + # save only the classification with 10 WTs, to have a smaller (binary) file: + #WTs<-cbind(WT$Year, WT$Month, WT$Day, WT$WT10.num) + WTs <- WT$WT10.num + save(WTs, file=paste0(workdir,"/",y,"_Rdata/10WTs_",mslp.rean,"_year_",y,"_lat_",latc,"_lon_",lonc,".RData"), compress=FALSE) + + rm(WT,SF,WF,F,D,ZS,ZW,W) + + } # close for on lonc + } # close for on latc + cat("\n") + +} # close for on y + + +# merge all txt data of the same lat and lon in 1 file to have all the years together: +if(merge){ + for(latc in lat.used){ + #latc <- lat.used[1]; lonc <- lon.used[1]; # for the debug + + for(lonc in lon.used){ + pos.latc <- which(lat.used==latc) + pos.lonc <- which(lon.used==lonc) + cat(paste0("Merging classification at grid point: ",(pos.latc-1)*n.lon.used+pos.lonc ,"/", n.grid.points, " "), "\r") + + for(y in year.start:year.end){ + #print(paste0("Year: ",y)) + + WTs <- read.table(file=paste0(workdir,"/",y,"_txt/WTs_",mslp.rean,"_year_",y,"_lat_",latc,"_lon_",lonc,".txt"),stringsAsFactors=FALSE, header=TRUE) # Load WTs + if(y==year.start){ + WTs_full_period <- WTs + } else { + WTs_full_period <- rbind(WTs_full_period, WTs) + } + } + + write.table(WTs_full_period, file=paste0(workdir,"/wt_txt/WTs_",mslp.rean,"_",year.start,"-",year.end,"_lat_",latc,"_lon_",lonc,".txt"),row.names=FALSE, col.names=TRUE, quote=FALSE, sep=" ") + rm(WTs_full_period) + } + } + +} # close if on merge diff --git a/old/WT_v5.R b/old/WT_v5.R new file mode 100644 index 0000000000000000000000000000000000000000..36eecb435bb97fbf58904c8ac33ad573efe33536 --- /dev/null +++ b/old/WT_v5.R @@ -0,0 +1,438 @@ +######################################################################################### +# Lamb's WTs with ERA-Interim # +######################################################################################### +# run it from the bash with: +# +# Rscript WT_v5.R 1984 +# +# being 1984 the year you want to classify the WTs; +# in this way you can run up to 8 jobs at the same time, each one producing its output files! +# You can also run it for a sequence of years with the syntax: +# +# Rscript WT_v5.R 1980 2014 +# +# and it will compute each year from 1980 to 2014, each one after finishing the previous one. +# +# If you want to run many years in parallel with just only 1 command, run from the bash: +# +# for y in {1980..2014}; do Rscript WT_v5.R &; done +# +# but it'd need 24 processors! In practice, it is possible to run only 4-8 years at time. +# +# Rscript WT_v5.R 1980 1984 +# Rscript WT_v5.R 1985 1989 +# Rscript WT_v5.R 1990 1994 +# Rscript WT_v5.R 1995 1999 +# Rscript WT_v5.R 2000 2004 +# Rscript WT_v5.R 2005 2009 +# Rscript WT_v5.R 2010 2014 +# +# Rscript WT_v5.R 1981 1984 +# Rscript WT_v5.R 1986 1989 +# Rscript WT_v5.R 1991 1994 +# Rscript WT_v5.R 1996 1999 +# +# Rscript WT_v5.R 2001 2004 +# Rscript WT_v5.R 2006 2009 +# Rscript WT_v5.R 2011 2014 + +library(s2dverification) # for the function Load() +library(ff) +source('/home/Earth/ncortesi/Downloads/scripts/Rfunctions.R') # for the calendar functions +#Load.path <- '/home/Earth/ncortesi/Downloads/scripts/IC3.conf' # Load() file path +# Available reanalysis: +ERAint <- list(path = '/esnas/reconstructions/ecmwf/eraint/$STORE_FREQ$_mean/$VAR_NAME$_f6h/$VAR_NAME$_$YEAR$$MONTH$.nc') +JRA55 <- list(path = '/esnas/reconstructions/jma/jra-55/$STORE_FREQ$_mean/$VAR_NAME$_f6h/$VAR_NAME$_$YEAR$$MONTH$.nc') + +workdir="/scratch/Earth/ncortesi/RESILIENCE/WT" # working dir where to put the output maps and files +subdatadir="/scratch/Earth/ncortesi/RESILIENCE/WT_subdata" # dir where to put the intermediate daily psl data (1 file for each subarray) + +mslp.rean=ERAint #'ERAintDailyHighRes' #'ERAintDailyLowRes' # choose one of the two daily reanalysis above for loading MSLP data +mslp.rean.name <- 'ERAint' + +year.start=1980 # starting year of the MSLP daily data (from the 1st of january) +year.end=2015 # ending year of the MSLP daily data (up to the 31 of December) + +res=5.25 #4.9 # spacing between the Lamb grid points in the meridional direction (in the zonal direction, it is exactly the double of this value) + # it should be put equal to the multiple of 'psl.res' closer to 5 degrees, i.e: res = psl.res * round((5/psl.res)) + +low.res.size=7 # odd numb. of MSLP nearby grid points to compute the psl at each of the 16 points (both for lon and lat: the actual n.of points used is its square) + +partial.end=FALSE # put TRUE if the last year ('year.end') has not all the yearly data but stop before December the 31th; in this case, must also specify the variable below +n.days.last=334 # number of days available in the last year (used only if partial.end=TRUE) I.e: data for 2015 doesn't have December, so it has 365-31=334 days + +merge=FALSE # put TRUE if you want to concatenate all the WT classification for different years and same grid point at the end of the analysis, FALSE otherwise + +########################################################################################## + +args <- commandArgs(TRUE) + +if(length(args) == 1) year.start <- year.end <- as.integer(args[1]) +if(length(args) == 2) {year.start <- as.integer(args[1]); year.end <- as.integer(args[2])} + +WTs.type<-c("NE","E","SE","S","SW","W","NW","N","C","C.NE","C.E","C.SE","C.S","C.SW","C.W","C.NW","C.N","A","A.NE","A.E","A.SE","A.S","A.SW","A.W","A.NW","A.N") +radius <- (low.res.size-1)/2 # number of grid points to use as search radius for averaging the psl at each Lamb grid point + +# Load 1 day of MSLP data from the reanalysis chosen, just to detect the number of latitude and longitude points: +#MSLP <- Load('psl', NULL, mslp.rean, paste0(year.start,'0101'), storefreq = 'daily', leadtimemax = 1, output = 'lonlat', configfile = Load.path) +MSLP <- Load(var = 'psl', exp = NULL, obs = list(mslp.rean), paste0(year.start,'0101'), storefreq = 'daily', leadtimemax = 1, output = 'lonlat') + +n.lat <- length(MSLP$lat) # number of latitude values +n.lon <- length(MSLP$lon) # number of longitude values + +lon.pos <- ifelse(min(MSLP$lon>=0), TRUE, FALSE) # set lon.pos to TRUE if the longitude values of the reanalysis grid has no negative values, FALSE if not. + +# for each grid point of the reanalysis, compute the Lamb WT classification with Lamb grid centered on that point. +# we exclude only central points > +77 degrees lat and < -77 deg. because the Lamb grid needs 12.5 deg. north/south of the central point. +n.lat.unused.poles <- 20 # number of unused latitude values near each of the two poles (must exclude last 10 degrees N/S, so it depends on the spatial res. of the reanalysis) + +lat.used <- MSLP$lat[-c(1:n.lat.unused.poles,(n.lat-n.lat.unused.poles):n.lat)] # latitude values used as central points +psl.res <- diff(lat.used)[1] # psl grid resolution +lat.used <- round(lat.used,3) # round psl values to the third decimal to save them in a file with a short file name (precision: ~100 m) +n.lat.used <- length(lat.used) + +lon.used <- round(MSLP$lon,3) # longitude values used as central points rounded to the third decimal (precision: ~100 m) +n.lon.used <- length(lon.used) + +n.grid.points <- length(lat.used)*length(lon.used) + +# load all MSLP data one year at time: +for(y in year.start:year.end){ + print(paste0("Year: ",y)) + n.days <- n.days.in.a.year(y) + if(partial.end==TRUE && y==year.end) n.days<-n.days.last # the last year can have a lower number of days + + # create two subdir where to put the yearly ascii and .ff output: + if(!dir.exists(file.path(workdir,paste0(y,"_txt")))) dir.create(file.path(workdir,paste0(y,"_txt"))) + if(!dir.exists(file.path(workdir,paste0(y,"_RData")))) dir.create(file.path(workdir,paste0(y,"_RData"))) + + #test <- 0 # dummy variable to create an ff file + system("rm") + test <- as.ff(0, vmode="double", file=paste0(workdir,"/WTs1_",y)) # create WTs_ + #test2 <- as.ff(0, vmode="double", file=paste0(workdir,"/WTs2_",y)) # create WTs_ + #test3 <- as.ff(0, vmode="double", file=paste0(workdir,"/WTs3_",y)) # create WTs_ + + + ffsave(test, file=paste0(workdir,"/",y,"_RData/WTs1_",y), compress=FALSE) # create WTs1_.ff and WTs_.RData # there is a limit of 47000 objects that can be saved + ffsave(test, file=paste0(workdir,"/",y,"_RData/WTs2_",y), compress=FALSE) # create WTs2_.ff and WTs_.RData # with ffsave. since we have ~100000 objects to save, + ffsave(test, file=paste0(workdir,"/",y,"_RData/WTs3_",y), compress=FALSE) # create WTs3_.ff and WTs_.RData # we must create different output files for each year. + ffsave(test, file=paste0(workdir,"/",y,"_RData/WTs4_",y), compress=FALSE) # create WTs3_.ff and WTs_.RData # Furthermore, saving speed decreases with the size of + ffsave(test, file=paste0(workdir,"/",y,"_RData/WTs5_",y), compress=FALSE) # create WTs3_.ff and WTs_.RData # the .ff file. Thus, it's better not to save more than + ffsave(test, file=paste0(workdir,"/",y,"_RData/WTs6_",y), compress=FALSE) # create WTs3_.ff and WTs_.RData # ~10000 objects in one file. For this reasons, we + ffsave(test, file=paste0(workdir,"/",y,"_RData/WTs7_",y), compress=FALSE) # create WTs3_.ff and WTs_.RData # are creating 10 different save files. + ffsave(test, file=paste0(workdir,"/",y,"_RData/WTs8_",y), compress=FALSE) # create WTs3_.ff and WTs_.RData + ffsave(test, file=paste0(workdir,"/",y,"_RData/WTs9_",y), compress=FALSE) # create WTs3_.ff and WTs_.RData + ffsave(test, file=paste0(workdir,"/",y,"_RData/WTs10_",y), compress=FALSE) # create WTs3_.ff and WTs_.RData + + close(test) + file.remove(paste0(workdir,"/WTs1_",y)) # delete the temporary ff file with test variable + + MSLP.year <- Load(var = 'psl', exp = NULL, obs = list(mslp.rean), paste0(y,'0101'), storefreq = 'daily', leadtimemax = n.days, output = 'lonlat') + gc() + + # loop over the lat/lon of the central point of the Lamb classification: + for(latc in lat.used){ + #latc <- lat.used[1]; lonc <- lon.used[1]; # for the debug + + ## first, computes the latitude coefficients: + SF1<-1/cos(latc*pi/180) + ZS1<-1/(2*cos(latc*pi/180)^2) + ZW1<-sin(latc*pi/180)/sin((latc-res)*pi/180) + ZW2<-sin(latc*pi/180)/sin((latc+res)*pi/180) + + for(lonc in lon.used){ + pos.latc <- which(lat.used==latc) + pos.lonc <- which(lon.used==lonc) + + n.point <- (pos.latc-1)*n.lon.used + pos.lonc + cat(paste0("Grid point saved: ", n.point ,"/", n.grid.points, " "), "\r") + + # assign the lat and lon values of the 16 grid points of the Lamb grid centered over the central point: + lat <- lon <- c() # lat and lon of the 16 points of Lamb's grid + lat[1] <- lat[2] <- latc + 2*res + lat[3] <- lat[4] <- lat[5] <- lat[6] <- latc + res + lat[7] <- lat[8] <- lat[9] <- lat[10] <- latc + lat[11] <- lat[12] <- lat[13] <- lat[14] <- latc - res + lat[15] <- lat[16] <- latc - 2*res + + lon[1] <- lon[4] <- lon[8] <- lon[12] <- lon[15] <- lonc - res + lon[3] <- lon[7] <- lon[11] <- lonc - 3*res + lon[2] <- lon[5] <- lon[9] <- lon[13] <- lon[16] <- lonc + res + lon[6] <- lon[10] <- lon[14] <- lonc + 3*res + + # longitude correction for reanalysis with positive-only longitude (to stay always positive): + if(lon.pos){ + if(lonc - res < 0){ + lon[1] <- lon[4] <- lon[8] <- lon[12] <- lon[15] <- lonc - res + 360 + lon[3] <- lon[7] <- lon[11] <- lonc - 3*res + 360 + } + if(lonc - 3*res < 0 && lon - res >= 0){ + lon[3] <- lon[7] <- lon[11] <- lonc - 3*res + 360 + } + if(lonc + res >= 360){ + lon[2] <- lon[5] <- lon[9] <- lon[13] <- lon[16] <- lonc + res - 360 + lon[6] <- lon[10] <- lon[14] <- lonc + 3*res - 360 + } + if(lonc + 3*res >= 360 && lonc + res < 360){ + lon[6] <- lon[10] <- lon[14] <- lonc + 3*res - 360 + } + } + + # longitude correction for reanalysis with [-180, +180] longitude (to stay always in the same interval): + # MUST STILL UPDATE FOR CASE lonc + 15 >= 360 !!! + if(!lon.pos){ + if(lonc - res < -180){ + lon[1] <- lon[4] <- lon[8] <- lon[12] <- lon[15] <- lonc - res + 360 + lon[3] <- lon[7] <- lon[11] <- lonc - 3*res + 360 + } + if(lonc + res >= 180){ + lon[2] <- lon[5] <- lon[9] <- lon[13] <- lon[16] <- lonc + res - 360 + lon[6] <- lon[10] <- lon[14] <- lonc + 3*res - 360 + } + } + + # lat and lon position of the 16 Lamb grid points inside the MSLP reanalysis: + pos.lat <- pos.lon <- c() + for(p in 1:16){ + pos <- nearest(lat[p], lon[p], MSLP$lat, MSLP$lon) + pos.lat[p] <- pos[1] + pos.lon[p] <- pos[2] + } + + #WTs <- data.frame() + + # mean psl at each of the 16 grid points: + psl <- array(NA,c(n.days,16)) # for each day of the year, MSLP of the 16 points of Lamb's grid + #for(p in 1:16) psl[,p] <- MSLP.year$obs[1,1,1,,pos.lat[p],pos.lon[p]] + + for(p in 1:16) { + + pos.lon.low.res <- (pos.lon[p]-radius):(pos.lon[p]+radius) + + if(head(pos.lon.low.res,1) <= 0){ + ss <- which(pos.lon.low.res <= 0) + pos.lon.low.res <- c(pos.lon.low.res[ss] + n.lon.used, pos.lon.low.res[-ss]) + } + if(tail(pos.lon.low.res,1) > n.lon.used){ + ss <- which(pos.lon.low.res > n.lon.used) + pos.lon.low.res <- c(pos.lon.low.res[-ss], pos.lon.low.res[ss]-n.lon.used) + } + + pos.lat.low.res <- (pos.lat[p]-radius):(pos.lat[p]+radius) # it never has to be corrected because we excluded the poles + + MSLP.low.res <- MSLP.year$obs[1,1,1,,pos.lat.low.res,pos.lon.low.res] + psl[,p] <- apply(MSLP.low.res, 1, mean) + } + + # compute geostrophical indexes (don't round them to the 3rd decimal number as done by Trigo and Martin-Vide, not to introduce an error that change 3-4% of WTs!!!): + SF<-WF<-F<-D<-D2<-c() + + WF <- 0.5*(psl[,12]+psl[,13]) - 0.5*(psl[,4]+psl[,5]) + SF <- SF1*(0.25 * (psl[,5]+2*psl[,9]+psl[,13]) - 0.25 * (psl[,4]+2*psl[,8]+psl[,12])) + F <- (SF*SF + WF*WF)^0.5 + + if(latc < 0){ + WF <- -WF # because in the southern hemisphere, cyclones and anticyclones spins in the opposite sense! + SF <- -SF + } + + # D e' la direzione del flusso; dato che l'angolo risultato di atan e' espresso in radianti, bisogna convertirlo in gradi moltiplicandolo per (180/pi) + # D e' l'angolo tra la direzione del vento e la sua proiezione in direzione nord-sud (SF) + D <- (180/pi) * atan(WF/SF) + + # trasforma l'angolo D iniziale in un angolo D2 sempre positivo che si calcola a partire del Nord andando in senso orario: + Q1 <- which(WF>0 & SF>0) + Q2 <- which(WF>0 & SF<=0) + Q3 <- which(WF<0 & SF<=0) # calcola quali angoli D appartengono al quadrante 1 (Q1), quali al quadrante 2, quali al 3 e quali al 4. + Q4 <- which(WF<0 & SF>0) # il primo quadrante e' in alto a destra, il secondo in basso a destra, il terzo in basso a sinistra, il quarto in alto a sinistra. + + # the wind direction is usually different from the angle D where the wind is blowing to, so a correction has to be made: + D2 <- D + D2[Q1] <- D[Q1] + 180 + D2[Q2] <- D[Q2] + 360 + D2[Q4] <- D[Q4] + 180 + + # individua i WTs direzionali: + NE <- which(D2>=22.5 & D2<67.5) + E <- which(D2>=67.5 & D2<112.5) + SE <- which(D2>=112.5 & D2<157.5) + S <- which(D2>=157.5 & D2<202.5) + SW <- which(D2>=202.5 & D2<247.5) + W <- which(D2>=247.5 & D2<292.5) + NW <- which(D2>=292.5 & D2<337.5) + N <- which(D2>=337.5 | D2<22.5) + + # aggiunge una colonna a destra con il WT direzionale associato all'angolo: + D2.dir<-rep(NA,length(D2)) + D2.dir[NE]<-"NE";D2.dir[E]<-"E";D2.dir[SE]<-"SE";D2.dir[S]<-"S";D2.dir[SW]<-"SW";D2.dir[W]<-"W";D2.dir[NW]<-"NW";D2.dir[N]<-"N" + + # calcolo componenti vorticita': + ZS<-ZW<-Z<-c() + + ZS <- ZS1*(0.25 * (psl[,6]+2*psl[,10]+psl[,14]) - 0.25 * (psl[,5]+2*psl[,9]+psl[,13]) - 0.25 * (psl[,4]+2*psl[,8]+psl[,12]) + 0.25 * (psl[,3]+2*psl[,7]+psl[,11])) + ZW <- ZW1 * (0.5*(psl[,15]+psl[,16]) - 0.5*(psl[,8]+psl[,9])) - ZW2 * (0.5*(psl[,8]+psl[,9]) - 0.5*(psl[,1]+psl[,2])) + Z <- ZS + ZW + + # Pure and hybrid WTs + cyc.pure <- which(Z>2*F) + anticyc.pure <- which(Z<(-2*F)) + hybrid.cyc <- which(Z>0 & abs(Z)<2*F & abs(Z)>F) + hybrid.anticyc <- which(Z<0 & abs(Z)<2*F & abs(Z)>F) + indeter <- which(F<6 & abs(Z)<6) # tipo di tempo indeterminato (U); the choice of 6 depend on grid size and should be changed if grid res.is higher! + + # create a data frame with all the info about the WT classification: + + #years.period <- months.period <- days.period <- c() + #for(y in year.start:year.end) years.period <- c(years.period, rep(y, n.days.in.a.year(y))) + #for(y in year.start:year.end) months.period <- c(months.period, seq.months.in.a.year(y)) + #for(y in year.start:year.end) days.period <- c(days.period, seq.days.in.a.year(y)) + years.period <- rep(y, n.days) + months.period <- seq.months.in.a.year(y)[1:n.days] + days.period <- seq.days.in.a.year(y)[1:n.days] + WT<-data.frame(Year=years.period, Month=months.period, Day=days.period, Direction=round(D2,3), WT.dir=D2.dir, stringsAsFactors=FALSE) + + WT$Year<-as.numeric(WT$Year);WT$Month<-as.numeric(WT$Month);WT$Day<-as.numeric(WT$Day);WT$Direction<-as.numeric(WT$Direction);WT$WT.dir<-as.character(WT$WT.dir) + + # colonna che identifica quali giorni sono di tipo ibrido e quali di tipo direzionale: + WT$Hyb <- rep("Directional",length(D2)) + WT$Hyb[cyc.pure] <- "Pure_C" + WT$Hyb[anticyc.pure] <- "Pure_A" + WT$Hyb[hybrid.cyc] <- "Hybrid_C" + WT$Hyb[hybrid.anticyc] <- "Hybrid_A" + + # colonna con la lista dei 26 WTs officiali: + WT$WT <- WT$D2.dir + WT$WT[cyc.pure] <- "C" + WT$WT[anticyc.pure] <- "A" + WT$WT[hybrid.cyc] <- paste("C.",WT$WT.dir[hybrid.cyc],sep="") + WT$WT[hybrid.anticyc] <- paste("A.", WT$WT.dir[hybrid.anticyc],sep="") + #WT$WT[indeter] <- "U" # comment this line not to include the Unclassified U WT + + WT$WT.num <- match(WT$WT,WTs.type)# aggiunge una colonna con la conversione dei 26 tipi di tempo a numeri da 1 a 26 + + # colonna con la lista dei 10 WTs ottenuti unendo a C e ad A gli otto WT direzionali puri; + # i WTs ibridi si aggiungono ognuno al suo tipo direzionale corrispondente (es: C.SW si aggiunge a SW, ecc.) + WT$WT10 <- WT$WT.dir + WT$WT10[cyc.pure] <- "C" + WT$WT10[anticyc.pure] <- "A" + WT$WT10.num <- match(WT$WT10,WTs.type) # conversione dei 10 tipi di tempo a numeri da 1 a 9 + il tipo A che si converte nel numero 18 + + # add the other geostrophical indexes: + WT$SF=round(SF,5) + WT$WF=round(WF,5) + WT$F=round(F,5) + WT$ZS=round(ZS,5) + WT$ZW=round(ZW,5) + WT$Z=round(Z,5) + + #WTs<-rbind(WTs,WT) + + # save the full data table as a .txt to exchange it with other people: + #write.table(WT,file=paste0(workdir,"/",y,"_txt/WTs_",mslp.rean,"_year_",y,"_lat_",latc,"_lon_",lonc,".txt"),row.names=FALSE, col.names=TRUE, quote=FALSE, sep=" ") + output_txt <- paste0(workdir,"/",y,"_txt/WTs_",mslp.rean.name,"_year_",y,"_lat_",latc,"_lon_",lonc,".txt") + write.table(WT,file=output_txt,row.names=FALSE, col.names=TRUE, quote=FALSE, sep=" ") + + # save only the classification with 10 WTs, to have a smaller (binary) file: + #WTs<-cbind(WT$Year, WT$Month, WT$Day, WT$WT10.num) + #WTs <- WT$WT10.num + + #a <- 1:1000 + #year <- 4000 + #WTs <- as.ff(a, vmode="double", file=paste0(workdir,"/WTs_",year)) # create WTs_3000 + #rm(WTs) + + #latc <- 70 + #lonc <- 0 + #assign(paste0("WTs_lat_",latc,"_lon_",lonc), WTs) + + # you must use this syntax when saving an Rdata with the variable name given by a string: + #do.call(ffsave, list(paste0("WTs_lat_",latc,"_lon_", lonc), file=paste0(workdir,"/WTs_",year), compress=FALSE)) # create WTs_3000.ff and WTs_3000.RData + + #b <- 1001:2000 + #WTs2 <- as.ff(b, vmode="double", file=paste0(workdir,"/WTs_",year), overwrite=TRUE) # create WTs_3000 + + #latc <- 75 + #lonc <- 0 + + #WTs <- as.ff(WT$WT10.num, vmode="double", file=paste0(workdir,"/WTs_",y), overwrite=TRUE) # create WTs_3000 + #assign(paste0("WTs_lat_",latc,"_lon_",lonc), WTs) + + if(latc < 0) { latc.char <- paste0("n", abs(latc)) } else {latc.char <- latc} + if(lonc < 0) { lonc.char <- paste0("n", abs(lonc)) } else {lonc.char <- lonc} + + + # merge the two above commands in one command only, not to introduce the variable WTs: + assign(paste0("WTs_lat_",latc.char,"_lon_",lonc.char), as.ff(WT$WT10.num, vmode="double", file=paste0(workdir,"/",y,"_RData/WTs_",y), overwrite=TRUE)) # create WTs_3000 + # you must use this syntax when saving an Rdata with the variable name given by a string: + # (it creates WTs_3000.ff and WTs_3000.RData) + + if(n.point < 10000) do.call(ffsave, list(paste0("WTs_lat_",latc.char,"_lon_", lonc.char), file=paste0(workdir,"/",y,"_RData/WTs1_",y), compress=FALSE, add=TRUE)) + if(n.point >= 10000 & n.point < 20000) do.call(ffsave, list(paste0("WTs_lat_",latc.char,"_lon_", lonc.char), file=paste0(workdir,"/",y,"_RData/WTs2_",y), compress=FALSE, add=TRUE)) + if(n.point >= 20000 & n.point < 30000) do.call(ffsave, list(paste0("WTs_lat_",latc.char,"_lon_", lonc.char), file=paste0(workdir,"/",y,"_RData/WTs3_",y), compress=FALSE, add=TRUE)) + if(n.point >= 30000 & n.point < 40000) do.call(ffsave, list(paste0("WTs_lat_",latc.char,"_lon_", lonc.char), file=paste0(workdir,"/",y,"_RData/WTs4_",y), compress=FALSE, add=TRUE)) + if(n.point >= 40000 & n.point < 50000) do.call(ffsave, list(paste0("WTs_lat_",latc.char,"_lon_", lonc.char), file=paste0(workdir,"/",y,"_RData/WTs5_",y), compress=FALSE, add=TRUE)) + if(n.point >= 50000 & n.point < 60000) do.call(ffsave, list(paste0("WTs_lat_",latc.char,"_lon_", lonc.char), file=paste0(workdir,"/",y,"_RData/WTs6_",y), compress=FALSE, add=TRUE)) + if(n.point >= 60000 & n.point < 70000) do.call(ffsave, list(paste0("WTs_lat_",latc.char,"_lon_", lonc.char), file=paste0(workdir,"/",y,"_RData/WTs7_",y), compress=FALSE, add=TRUE)) + if(n.point >= 70000 & n.point < 80000) do.call(ffsave, list(paste0("WTs_lat_",latc.char,"_lon_", lonc.char), file=paste0(workdir,"/",y,"_RData/WTs8_",y), compress=FALSE, add=TRUE)) + if(n.point >= 80000 & n.point < 90000) do.call(ffsave, list(paste0("WTs_lat_",latc.char,"_lon_", lonc.char), file=paste0(workdir,"/",y,"_RData/WTs9_",y), compress=FALSE, add=TRUE)) + if(n.point >= 90000) do.call(ffsave, list(paste0("WTs_lat_",latc.char,"_lon_", lonc.char), file=paste0(workdir,"/",y,"_RData/WTs10_",y), compress=FALSE, add=TRUE)) + + #ffsave(WTs, file=paste0(workdir,"/WTs_",year), add=TRUE) + #save(WTs, file=paste0(workdir,"/",y,"_Rdata/10WTs_",mslp.rean.name,"_year_",y,"_lat_",latc,"_lon_",lonc,".RData"), compress=FALSE) + + rm(WT,SF,WF,F,D,ZS,ZW,W, output_txt) + do.call(rm, list(paste0("WTs_lat_",latc.char,"_lon_", lonc.char))) + gc() + + } # close for on lonc + } # close for on latc + cat("\n") + +} # close for on y + + +#delete(paste0("WTs_lat_",latc,"_lon_",lonc)) +#do.call(rm, list(paste0("WTs_lat_",latc,"_lon_",lonc))) +#gc() + + + + +# merge all txt data of the same lat and lon in 1 file to have all the years together and delete the yearly files: +if(merge){ + if(!dir.exists(file.path(workdir,paste0("wt_txt")))) dir.create(file.path(workdir,paste0("wt_txt"))) + + for(latc in lat.used){ + #latc <- lat.used[1]; lonc <- lon.used[1]; # for the debug + + for(lonc in lon.used){ + pos.latc <- which(lat.used==latc) + pos.lonc <- which(lon.used==lonc) + cat(paste0("Merging classification at grid point: ",(pos.latc-1)*n.lon.used+pos.lonc ,"/", n.grid.points, " "), "\r") + + for(y in year.start:year.end){ + #print(paste0("Year: ",y)) + + WTs <- read.table(file=paste0(workdir,"/",y,"_txt/WTs_",mslp.rean.name,"_year_",y,"_lat_",latc.char,"_lon_",lonc.char,".txt"),stringsAsFactors=FALSE, header=TRUE) # Load WTs + if(y==year.start){ + WTs_full_period <- WTs + } else { + WTs_full_period <- rbind(WTs_full_period, WTs) + } + + } + + write.table(WTs_full_period, file=paste0(workdir,"/wt_txt/WTs_",mslp.rean.name,"_",year.start,"-",year.end,"_lat_",latc.char,"_lon_",lonc.char,".txt"),row.names=FALSE, col.names=TRUE, quote=FALSE, sep=" ") + rm(WTs_full_period) + + } + } + + # delete the old yearly files in the _txt directories and the directories: + for(y in year.start:year.end){ + system(paste0("rm -fr ",workdir,"/",y,"_txt/")) + } + +} # close if on merge + + diff --git a/old/WT_v5.R~ b/old/WT_v5.R~ new file mode 100644 index 0000000000000000000000000000000000000000..36eecb435bb97fbf58904c8ac33ad573efe33536 --- /dev/null +++ b/old/WT_v5.R~ @@ -0,0 +1,438 @@ +######################################################################################### +# Lamb's WTs with ERA-Interim # +######################################################################################### +# run it from the bash with: +# +# Rscript WT_v5.R 1984 +# +# being 1984 the year you want to classify the WTs; +# in this way you can run up to 8 jobs at the same time, each one producing its output files! +# You can also run it for a sequence of years with the syntax: +# +# Rscript WT_v5.R 1980 2014 +# +# and it will compute each year from 1980 to 2014, each one after finishing the previous one. +# +# If you want to run many years in parallel with just only 1 command, run from the bash: +# +# for y in {1980..2014}; do Rscript WT_v5.R &; done +# +# but it'd need 24 processors! In practice, it is possible to run only 4-8 years at time. +# +# Rscript WT_v5.R 1980 1984 +# Rscript WT_v5.R 1985 1989 +# Rscript WT_v5.R 1990 1994 +# Rscript WT_v5.R 1995 1999 +# Rscript WT_v5.R 2000 2004 +# Rscript WT_v5.R 2005 2009 +# Rscript WT_v5.R 2010 2014 +# +# Rscript WT_v5.R 1981 1984 +# Rscript WT_v5.R 1986 1989 +# Rscript WT_v5.R 1991 1994 +# Rscript WT_v5.R 1996 1999 +# +# Rscript WT_v5.R 2001 2004 +# Rscript WT_v5.R 2006 2009 +# Rscript WT_v5.R 2011 2014 + +library(s2dverification) # for the function Load() +library(ff) +source('/home/Earth/ncortesi/Downloads/scripts/Rfunctions.R') # for the calendar functions +#Load.path <- '/home/Earth/ncortesi/Downloads/scripts/IC3.conf' # Load() file path +# Available reanalysis: +ERAint <- list(path = '/esnas/reconstructions/ecmwf/eraint/$STORE_FREQ$_mean/$VAR_NAME$_f6h/$VAR_NAME$_$YEAR$$MONTH$.nc') +JRA55 <- list(path = '/esnas/reconstructions/jma/jra-55/$STORE_FREQ$_mean/$VAR_NAME$_f6h/$VAR_NAME$_$YEAR$$MONTH$.nc') + +workdir="/scratch/Earth/ncortesi/RESILIENCE/WT" # working dir where to put the output maps and files +subdatadir="/scratch/Earth/ncortesi/RESILIENCE/WT_subdata" # dir where to put the intermediate daily psl data (1 file for each subarray) + +mslp.rean=ERAint #'ERAintDailyHighRes' #'ERAintDailyLowRes' # choose one of the two daily reanalysis above for loading MSLP data +mslp.rean.name <- 'ERAint' + +year.start=1980 # starting year of the MSLP daily data (from the 1st of january) +year.end=2015 # ending year of the MSLP daily data (up to the 31 of December) + +res=5.25 #4.9 # spacing between the Lamb grid points in the meridional direction (in the zonal direction, it is exactly the double of this value) + # it should be put equal to the multiple of 'psl.res' closer to 5 degrees, i.e: res = psl.res * round((5/psl.res)) + +low.res.size=7 # odd numb. of MSLP nearby grid points to compute the psl at each of the 16 points (both for lon and lat: the actual n.of points used is its square) + +partial.end=FALSE # put TRUE if the last year ('year.end') has not all the yearly data but stop before December the 31th; in this case, must also specify the variable below +n.days.last=334 # number of days available in the last year (used only if partial.end=TRUE) I.e: data for 2015 doesn't have December, so it has 365-31=334 days + +merge=FALSE # put TRUE if you want to concatenate all the WT classification for different years and same grid point at the end of the analysis, FALSE otherwise + +########################################################################################## + +args <- commandArgs(TRUE) + +if(length(args) == 1) year.start <- year.end <- as.integer(args[1]) +if(length(args) == 2) {year.start <- as.integer(args[1]); year.end <- as.integer(args[2])} + +WTs.type<-c("NE","E","SE","S","SW","W","NW","N","C","C.NE","C.E","C.SE","C.S","C.SW","C.W","C.NW","C.N","A","A.NE","A.E","A.SE","A.S","A.SW","A.W","A.NW","A.N") +radius <- (low.res.size-1)/2 # number of grid points to use as search radius for averaging the psl at each Lamb grid point + +# Load 1 day of MSLP data from the reanalysis chosen, just to detect the number of latitude and longitude points: +#MSLP <- Load('psl', NULL, mslp.rean, paste0(year.start,'0101'), storefreq = 'daily', leadtimemax = 1, output = 'lonlat', configfile = Load.path) +MSLP <- Load(var = 'psl', exp = NULL, obs = list(mslp.rean), paste0(year.start,'0101'), storefreq = 'daily', leadtimemax = 1, output = 'lonlat') + +n.lat <- length(MSLP$lat) # number of latitude values +n.lon <- length(MSLP$lon) # number of longitude values + +lon.pos <- ifelse(min(MSLP$lon>=0), TRUE, FALSE) # set lon.pos to TRUE if the longitude values of the reanalysis grid has no negative values, FALSE if not. + +# for each grid point of the reanalysis, compute the Lamb WT classification with Lamb grid centered on that point. +# we exclude only central points > +77 degrees lat and < -77 deg. because the Lamb grid needs 12.5 deg. north/south of the central point. +n.lat.unused.poles <- 20 # number of unused latitude values near each of the two poles (must exclude last 10 degrees N/S, so it depends on the spatial res. of the reanalysis) + +lat.used <- MSLP$lat[-c(1:n.lat.unused.poles,(n.lat-n.lat.unused.poles):n.lat)] # latitude values used as central points +psl.res <- diff(lat.used)[1] # psl grid resolution +lat.used <- round(lat.used,3) # round psl values to the third decimal to save them in a file with a short file name (precision: ~100 m) +n.lat.used <- length(lat.used) + +lon.used <- round(MSLP$lon,3) # longitude values used as central points rounded to the third decimal (precision: ~100 m) +n.lon.used <- length(lon.used) + +n.grid.points <- length(lat.used)*length(lon.used) + +# load all MSLP data one year at time: +for(y in year.start:year.end){ + print(paste0("Year: ",y)) + n.days <- n.days.in.a.year(y) + if(partial.end==TRUE && y==year.end) n.days<-n.days.last # the last year can have a lower number of days + + # create two subdir where to put the yearly ascii and .ff output: + if(!dir.exists(file.path(workdir,paste0(y,"_txt")))) dir.create(file.path(workdir,paste0(y,"_txt"))) + if(!dir.exists(file.path(workdir,paste0(y,"_RData")))) dir.create(file.path(workdir,paste0(y,"_RData"))) + + #test <- 0 # dummy variable to create an ff file + system("rm") + test <- as.ff(0, vmode="double", file=paste0(workdir,"/WTs1_",y)) # create WTs_ + #test2 <- as.ff(0, vmode="double", file=paste0(workdir,"/WTs2_",y)) # create WTs_ + #test3 <- as.ff(0, vmode="double", file=paste0(workdir,"/WTs3_",y)) # create WTs_ + + + ffsave(test, file=paste0(workdir,"/",y,"_RData/WTs1_",y), compress=FALSE) # create WTs1_.ff and WTs_.RData # there is a limit of 47000 objects that can be saved + ffsave(test, file=paste0(workdir,"/",y,"_RData/WTs2_",y), compress=FALSE) # create WTs2_.ff and WTs_.RData # with ffsave. since we have ~100000 objects to save, + ffsave(test, file=paste0(workdir,"/",y,"_RData/WTs3_",y), compress=FALSE) # create WTs3_.ff and WTs_.RData # we must create different output files for each year. + ffsave(test, file=paste0(workdir,"/",y,"_RData/WTs4_",y), compress=FALSE) # create WTs3_.ff and WTs_.RData # Furthermore, saving speed decreases with the size of + ffsave(test, file=paste0(workdir,"/",y,"_RData/WTs5_",y), compress=FALSE) # create WTs3_.ff and WTs_.RData # the .ff file. Thus, it's better not to save more than + ffsave(test, file=paste0(workdir,"/",y,"_RData/WTs6_",y), compress=FALSE) # create WTs3_.ff and WTs_.RData # ~10000 objects in one file. For this reasons, we + ffsave(test, file=paste0(workdir,"/",y,"_RData/WTs7_",y), compress=FALSE) # create WTs3_.ff and WTs_.RData # are creating 10 different save files. + ffsave(test, file=paste0(workdir,"/",y,"_RData/WTs8_",y), compress=FALSE) # create WTs3_.ff and WTs_.RData + ffsave(test, file=paste0(workdir,"/",y,"_RData/WTs9_",y), compress=FALSE) # create WTs3_.ff and WTs_.RData + ffsave(test, file=paste0(workdir,"/",y,"_RData/WTs10_",y), compress=FALSE) # create WTs3_.ff and WTs_.RData + + close(test) + file.remove(paste0(workdir,"/WTs1_",y)) # delete the temporary ff file with test variable + + MSLP.year <- Load(var = 'psl', exp = NULL, obs = list(mslp.rean), paste0(y,'0101'), storefreq = 'daily', leadtimemax = n.days, output = 'lonlat') + gc() + + # loop over the lat/lon of the central point of the Lamb classification: + for(latc in lat.used){ + #latc <- lat.used[1]; lonc <- lon.used[1]; # for the debug + + ## first, computes the latitude coefficients: + SF1<-1/cos(latc*pi/180) + ZS1<-1/(2*cos(latc*pi/180)^2) + ZW1<-sin(latc*pi/180)/sin((latc-res)*pi/180) + ZW2<-sin(latc*pi/180)/sin((latc+res)*pi/180) + + for(lonc in lon.used){ + pos.latc <- which(lat.used==latc) + pos.lonc <- which(lon.used==lonc) + + n.point <- (pos.latc-1)*n.lon.used + pos.lonc + cat(paste0("Grid point saved: ", n.point ,"/", n.grid.points, " "), "\r") + + # assign the lat and lon values of the 16 grid points of the Lamb grid centered over the central point: + lat <- lon <- c() # lat and lon of the 16 points of Lamb's grid + lat[1] <- lat[2] <- latc + 2*res + lat[3] <- lat[4] <- lat[5] <- lat[6] <- latc + res + lat[7] <- lat[8] <- lat[9] <- lat[10] <- latc + lat[11] <- lat[12] <- lat[13] <- lat[14] <- latc - res + lat[15] <- lat[16] <- latc - 2*res + + lon[1] <- lon[4] <- lon[8] <- lon[12] <- lon[15] <- lonc - res + lon[3] <- lon[7] <- lon[11] <- lonc - 3*res + lon[2] <- lon[5] <- lon[9] <- lon[13] <- lon[16] <- lonc + res + lon[6] <- lon[10] <- lon[14] <- lonc + 3*res + + # longitude correction for reanalysis with positive-only longitude (to stay always positive): + if(lon.pos){ + if(lonc - res < 0){ + lon[1] <- lon[4] <- lon[8] <- lon[12] <- lon[15] <- lonc - res + 360 + lon[3] <- lon[7] <- lon[11] <- lonc - 3*res + 360 + } + if(lonc - 3*res < 0 && lon - res >= 0){ + lon[3] <- lon[7] <- lon[11] <- lonc - 3*res + 360 + } + if(lonc + res >= 360){ + lon[2] <- lon[5] <- lon[9] <- lon[13] <- lon[16] <- lonc + res - 360 + lon[6] <- lon[10] <- lon[14] <- lonc + 3*res - 360 + } + if(lonc + 3*res >= 360 && lonc + res < 360){ + lon[6] <- lon[10] <- lon[14] <- lonc + 3*res - 360 + } + } + + # longitude correction for reanalysis with [-180, +180] longitude (to stay always in the same interval): + # MUST STILL UPDATE FOR CASE lonc + 15 >= 360 !!! + if(!lon.pos){ + if(lonc - res < -180){ + lon[1] <- lon[4] <- lon[8] <- lon[12] <- lon[15] <- lonc - res + 360 + lon[3] <- lon[7] <- lon[11] <- lonc - 3*res + 360 + } + if(lonc + res >= 180){ + lon[2] <- lon[5] <- lon[9] <- lon[13] <- lon[16] <- lonc + res - 360 + lon[6] <- lon[10] <- lon[14] <- lonc + 3*res - 360 + } + } + + # lat and lon position of the 16 Lamb grid points inside the MSLP reanalysis: + pos.lat <- pos.lon <- c() + for(p in 1:16){ + pos <- nearest(lat[p], lon[p], MSLP$lat, MSLP$lon) + pos.lat[p] <- pos[1] + pos.lon[p] <- pos[2] + } + + #WTs <- data.frame() + + # mean psl at each of the 16 grid points: + psl <- array(NA,c(n.days,16)) # for each day of the year, MSLP of the 16 points of Lamb's grid + #for(p in 1:16) psl[,p] <- MSLP.year$obs[1,1,1,,pos.lat[p],pos.lon[p]] + + for(p in 1:16) { + + pos.lon.low.res <- (pos.lon[p]-radius):(pos.lon[p]+radius) + + if(head(pos.lon.low.res,1) <= 0){ + ss <- which(pos.lon.low.res <= 0) + pos.lon.low.res <- c(pos.lon.low.res[ss] + n.lon.used, pos.lon.low.res[-ss]) + } + if(tail(pos.lon.low.res,1) > n.lon.used){ + ss <- which(pos.lon.low.res > n.lon.used) + pos.lon.low.res <- c(pos.lon.low.res[-ss], pos.lon.low.res[ss]-n.lon.used) + } + + pos.lat.low.res <- (pos.lat[p]-radius):(pos.lat[p]+radius) # it never has to be corrected because we excluded the poles + + MSLP.low.res <- MSLP.year$obs[1,1,1,,pos.lat.low.res,pos.lon.low.res] + psl[,p] <- apply(MSLP.low.res, 1, mean) + } + + # compute geostrophical indexes (don't round them to the 3rd decimal number as done by Trigo and Martin-Vide, not to introduce an error that change 3-4% of WTs!!!): + SF<-WF<-F<-D<-D2<-c() + + WF <- 0.5*(psl[,12]+psl[,13]) - 0.5*(psl[,4]+psl[,5]) + SF <- SF1*(0.25 * (psl[,5]+2*psl[,9]+psl[,13]) - 0.25 * (psl[,4]+2*psl[,8]+psl[,12])) + F <- (SF*SF + WF*WF)^0.5 + + if(latc < 0){ + WF <- -WF # because in the southern hemisphere, cyclones and anticyclones spins in the opposite sense! + SF <- -SF + } + + # D e' la direzione del flusso; dato che l'angolo risultato di atan e' espresso in radianti, bisogna convertirlo in gradi moltiplicandolo per (180/pi) + # D e' l'angolo tra la direzione del vento e la sua proiezione in direzione nord-sud (SF) + D <- (180/pi) * atan(WF/SF) + + # trasforma l'angolo D iniziale in un angolo D2 sempre positivo che si calcola a partire del Nord andando in senso orario: + Q1 <- which(WF>0 & SF>0) + Q2 <- which(WF>0 & SF<=0) + Q3 <- which(WF<0 & SF<=0) # calcola quali angoli D appartengono al quadrante 1 (Q1), quali al quadrante 2, quali al 3 e quali al 4. + Q4 <- which(WF<0 & SF>0) # il primo quadrante e' in alto a destra, il secondo in basso a destra, il terzo in basso a sinistra, il quarto in alto a sinistra. + + # the wind direction is usually different from the angle D where the wind is blowing to, so a correction has to be made: + D2 <- D + D2[Q1] <- D[Q1] + 180 + D2[Q2] <- D[Q2] + 360 + D2[Q4] <- D[Q4] + 180 + + # individua i WTs direzionali: + NE <- which(D2>=22.5 & D2<67.5) + E <- which(D2>=67.5 & D2<112.5) + SE <- which(D2>=112.5 & D2<157.5) + S <- which(D2>=157.5 & D2<202.5) + SW <- which(D2>=202.5 & D2<247.5) + W <- which(D2>=247.5 & D2<292.5) + NW <- which(D2>=292.5 & D2<337.5) + N <- which(D2>=337.5 | D2<22.5) + + # aggiunge una colonna a destra con il WT direzionale associato all'angolo: + D2.dir<-rep(NA,length(D2)) + D2.dir[NE]<-"NE";D2.dir[E]<-"E";D2.dir[SE]<-"SE";D2.dir[S]<-"S";D2.dir[SW]<-"SW";D2.dir[W]<-"W";D2.dir[NW]<-"NW";D2.dir[N]<-"N" + + # calcolo componenti vorticita': + ZS<-ZW<-Z<-c() + + ZS <- ZS1*(0.25 * (psl[,6]+2*psl[,10]+psl[,14]) - 0.25 * (psl[,5]+2*psl[,9]+psl[,13]) - 0.25 * (psl[,4]+2*psl[,8]+psl[,12]) + 0.25 * (psl[,3]+2*psl[,7]+psl[,11])) + ZW <- ZW1 * (0.5*(psl[,15]+psl[,16]) - 0.5*(psl[,8]+psl[,9])) - ZW2 * (0.5*(psl[,8]+psl[,9]) - 0.5*(psl[,1]+psl[,2])) + Z <- ZS + ZW + + # Pure and hybrid WTs + cyc.pure <- which(Z>2*F) + anticyc.pure <- which(Z<(-2*F)) + hybrid.cyc <- which(Z>0 & abs(Z)<2*F & abs(Z)>F) + hybrid.anticyc <- which(Z<0 & abs(Z)<2*F & abs(Z)>F) + indeter <- which(F<6 & abs(Z)<6) # tipo di tempo indeterminato (U); the choice of 6 depend on grid size and should be changed if grid res.is higher! + + # create a data frame with all the info about the WT classification: + + #years.period <- months.period <- days.period <- c() + #for(y in year.start:year.end) years.period <- c(years.period, rep(y, n.days.in.a.year(y))) + #for(y in year.start:year.end) months.period <- c(months.period, seq.months.in.a.year(y)) + #for(y in year.start:year.end) days.period <- c(days.period, seq.days.in.a.year(y)) + years.period <- rep(y, n.days) + months.period <- seq.months.in.a.year(y)[1:n.days] + days.period <- seq.days.in.a.year(y)[1:n.days] + WT<-data.frame(Year=years.period, Month=months.period, Day=days.period, Direction=round(D2,3), WT.dir=D2.dir, stringsAsFactors=FALSE) + + WT$Year<-as.numeric(WT$Year);WT$Month<-as.numeric(WT$Month);WT$Day<-as.numeric(WT$Day);WT$Direction<-as.numeric(WT$Direction);WT$WT.dir<-as.character(WT$WT.dir) + + # colonna che identifica quali giorni sono di tipo ibrido e quali di tipo direzionale: + WT$Hyb <- rep("Directional",length(D2)) + WT$Hyb[cyc.pure] <- "Pure_C" + WT$Hyb[anticyc.pure] <- "Pure_A" + WT$Hyb[hybrid.cyc] <- "Hybrid_C" + WT$Hyb[hybrid.anticyc] <- "Hybrid_A" + + # colonna con la lista dei 26 WTs officiali: + WT$WT <- WT$D2.dir + WT$WT[cyc.pure] <- "C" + WT$WT[anticyc.pure] <- "A" + WT$WT[hybrid.cyc] <- paste("C.",WT$WT.dir[hybrid.cyc],sep="") + WT$WT[hybrid.anticyc] <- paste("A.", WT$WT.dir[hybrid.anticyc],sep="") + #WT$WT[indeter] <- "U" # comment this line not to include the Unclassified U WT + + WT$WT.num <- match(WT$WT,WTs.type)# aggiunge una colonna con la conversione dei 26 tipi di tempo a numeri da 1 a 26 + + # colonna con la lista dei 10 WTs ottenuti unendo a C e ad A gli otto WT direzionali puri; + # i WTs ibridi si aggiungono ognuno al suo tipo direzionale corrispondente (es: C.SW si aggiunge a SW, ecc.) + WT$WT10 <- WT$WT.dir + WT$WT10[cyc.pure] <- "C" + WT$WT10[anticyc.pure] <- "A" + WT$WT10.num <- match(WT$WT10,WTs.type) # conversione dei 10 tipi di tempo a numeri da 1 a 9 + il tipo A che si converte nel numero 18 + + # add the other geostrophical indexes: + WT$SF=round(SF,5) + WT$WF=round(WF,5) + WT$F=round(F,5) + WT$ZS=round(ZS,5) + WT$ZW=round(ZW,5) + WT$Z=round(Z,5) + + #WTs<-rbind(WTs,WT) + + # save the full data table as a .txt to exchange it with other people: + #write.table(WT,file=paste0(workdir,"/",y,"_txt/WTs_",mslp.rean,"_year_",y,"_lat_",latc,"_lon_",lonc,".txt"),row.names=FALSE, col.names=TRUE, quote=FALSE, sep=" ") + output_txt <- paste0(workdir,"/",y,"_txt/WTs_",mslp.rean.name,"_year_",y,"_lat_",latc,"_lon_",lonc,".txt") + write.table(WT,file=output_txt,row.names=FALSE, col.names=TRUE, quote=FALSE, sep=" ") + + # save only the classification with 10 WTs, to have a smaller (binary) file: + #WTs<-cbind(WT$Year, WT$Month, WT$Day, WT$WT10.num) + #WTs <- WT$WT10.num + + #a <- 1:1000 + #year <- 4000 + #WTs <- as.ff(a, vmode="double", file=paste0(workdir,"/WTs_",year)) # create WTs_3000 + #rm(WTs) + + #latc <- 70 + #lonc <- 0 + #assign(paste0("WTs_lat_",latc,"_lon_",lonc), WTs) + + # you must use this syntax when saving an Rdata with the variable name given by a string: + #do.call(ffsave, list(paste0("WTs_lat_",latc,"_lon_", lonc), file=paste0(workdir,"/WTs_",year), compress=FALSE)) # create WTs_3000.ff and WTs_3000.RData + + #b <- 1001:2000 + #WTs2 <- as.ff(b, vmode="double", file=paste0(workdir,"/WTs_",year), overwrite=TRUE) # create WTs_3000 + + #latc <- 75 + #lonc <- 0 + + #WTs <- as.ff(WT$WT10.num, vmode="double", file=paste0(workdir,"/WTs_",y), overwrite=TRUE) # create WTs_3000 + #assign(paste0("WTs_lat_",latc,"_lon_",lonc), WTs) + + if(latc < 0) { latc.char <- paste0("n", abs(latc)) } else {latc.char <- latc} + if(lonc < 0) { lonc.char <- paste0("n", abs(lonc)) } else {lonc.char <- lonc} + + + # merge the two above commands in one command only, not to introduce the variable WTs: + assign(paste0("WTs_lat_",latc.char,"_lon_",lonc.char), as.ff(WT$WT10.num, vmode="double", file=paste0(workdir,"/",y,"_RData/WTs_",y), overwrite=TRUE)) # create WTs_3000 + # you must use this syntax when saving an Rdata with the variable name given by a string: + # (it creates WTs_3000.ff and WTs_3000.RData) + + if(n.point < 10000) do.call(ffsave, list(paste0("WTs_lat_",latc.char,"_lon_", lonc.char), file=paste0(workdir,"/",y,"_RData/WTs1_",y), compress=FALSE, add=TRUE)) + if(n.point >= 10000 & n.point < 20000) do.call(ffsave, list(paste0("WTs_lat_",latc.char,"_lon_", lonc.char), file=paste0(workdir,"/",y,"_RData/WTs2_",y), compress=FALSE, add=TRUE)) + if(n.point >= 20000 & n.point < 30000) do.call(ffsave, list(paste0("WTs_lat_",latc.char,"_lon_", lonc.char), file=paste0(workdir,"/",y,"_RData/WTs3_",y), compress=FALSE, add=TRUE)) + if(n.point >= 30000 & n.point < 40000) do.call(ffsave, list(paste0("WTs_lat_",latc.char,"_lon_", lonc.char), file=paste0(workdir,"/",y,"_RData/WTs4_",y), compress=FALSE, add=TRUE)) + if(n.point >= 40000 & n.point < 50000) do.call(ffsave, list(paste0("WTs_lat_",latc.char,"_lon_", lonc.char), file=paste0(workdir,"/",y,"_RData/WTs5_",y), compress=FALSE, add=TRUE)) + if(n.point >= 50000 & n.point < 60000) do.call(ffsave, list(paste0("WTs_lat_",latc.char,"_lon_", lonc.char), file=paste0(workdir,"/",y,"_RData/WTs6_",y), compress=FALSE, add=TRUE)) + if(n.point >= 60000 & n.point < 70000) do.call(ffsave, list(paste0("WTs_lat_",latc.char,"_lon_", lonc.char), file=paste0(workdir,"/",y,"_RData/WTs7_",y), compress=FALSE, add=TRUE)) + if(n.point >= 70000 & n.point < 80000) do.call(ffsave, list(paste0("WTs_lat_",latc.char,"_lon_", lonc.char), file=paste0(workdir,"/",y,"_RData/WTs8_",y), compress=FALSE, add=TRUE)) + if(n.point >= 80000 & n.point < 90000) do.call(ffsave, list(paste0("WTs_lat_",latc.char,"_lon_", lonc.char), file=paste0(workdir,"/",y,"_RData/WTs9_",y), compress=FALSE, add=TRUE)) + if(n.point >= 90000) do.call(ffsave, list(paste0("WTs_lat_",latc.char,"_lon_", lonc.char), file=paste0(workdir,"/",y,"_RData/WTs10_",y), compress=FALSE, add=TRUE)) + + #ffsave(WTs, file=paste0(workdir,"/WTs_",year), add=TRUE) + #save(WTs, file=paste0(workdir,"/",y,"_Rdata/10WTs_",mslp.rean.name,"_year_",y,"_lat_",latc,"_lon_",lonc,".RData"), compress=FALSE) + + rm(WT,SF,WF,F,D,ZS,ZW,W, output_txt) + do.call(rm, list(paste0("WTs_lat_",latc.char,"_lon_", lonc.char))) + gc() + + } # close for on lonc + } # close for on latc + cat("\n") + +} # close for on y + + +#delete(paste0("WTs_lat_",latc,"_lon_",lonc)) +#do.call(rm, list(paste0("WTs_lat_",latc,"_lon_",lonc))) +#gc() + + + + +# merge all txt data of the same lat and lon in 1 file to have all the years together and delete the yearly files: +if(merge){ + if(!dir.exists(file.path(workdir,paste0("wt_txt")))) dir.create(file.path(workdir,paste0("wt_txt"))) + + for(latc in lat.used){ + #latc <- lat.used[1]; lonc <- lon.used[1]; # for the debug + + for(lonc in lon.used){ + pos.latc <- which(lat.used==latc) + pos.lonc <- which(lon.used==lonc) + cat(paste0("Merging classification at grid point: ",(pos.latc-1)*n.lon.used+pos.lonc ,"/", n.grid.points, " "), "\r") + + for(y in year.start:year.end){ + #print(paste0("Year: ",y)) + + WTs <- read.table(file=paste0(workdir,"/",y,"_txt/WTs_",mslp.rean.name,"_year_",y,"_lat_",latc.char,"_lon_",lonc.char,".txt"),stringsAsFactors=FALSE, header=TRUE) # Load WTs + if(y==year.start){ + WTs_full_period <- WTs + } else { + WTs_full_period <- rbind(WTs_full_period, WTs) + } + + } + + write.table(WTs_full_period, file=paste0(workdir,"/wt_txt/WTs_",mslp.rean.name,"_",year.start,"-",year.end,"_lat_",latc.char,"_lon_",lonc.char,".txt"),row.names=FALSE, col.names=TRUE, quote=FALSE, sep=" ") + rm(WTs_full_period) + + } + } + + # delete the old yearly files in the _txt directories and the directories: + for(y in year.start:year.end){ + system(paste0("rm -fr ",workdir,"/",y,"_txt/")) + } + +} # close if on merge + + diff --git a/old/WT_v6.R b/old/WT_v6.R new file mode 100644 index 0000000000000000000000000000000000000000..4a10b9bc33876e7dc1b132b8ed03a0ce9af110a6 --- /dev/null +++ b/old/WT_v6.R @@ -0,0 +1,354 @@ +######################################################################################### +# Lamb's WTs with ERA-Interim # +######################################################################################### +# run it from the bash with: +# +# Rscript WT_v5.R 1984 +# +# being 1984 the year you want to classify the WTs; +# in this way you can run up to 8 jobs at the same time, each one producing its output files! +# You can also run it for a sequence of years with the syntax: +# +# Rscript WT_v5.R 1980 2014 +# +# and it will compute each year from 1980 to 2014, each one after finishing the previous one. +# +# If you want to run many years in parallel with just only 1 command, run from the bash: +# +# for y in {1980..2014}; do Rscript WT_v5.R &; done +# +# but it'd need 24 processors! In practice, it is possible to run only 4-8 years at time. +# + +library(s2dverification) # for the function Load() +# library(ff) +source('/home/Earth/ncortesi/scripts/Rfunctions.R') # for the calendar functions +#Load.path <- '/home/Earth/ncortesi/Downloads/scripts/IC3.conf' # Load() file path +# Available reanalysis: +ERAint <- list(path = '/esnas/recon/ecmwf/erainterim/$STORE_FREQ$_mean/$VAR_NAME$_f6h/$VAR_NAME$_$YEAR$$MONTH$.nc') +JRA55 <- list(path = '/esnas/recon/jma/jra-55/$STORE_FREQ$_mean/$VAR_NAME$_f6h/$VAR_NAME$_$YEAR$$MONTH$.nc') + +workdir="/scratch/Earth/ncortesi/RESILIENCE/WT" # working dir where to put the output maps and files +#subdatadir="/scratch/Earth/ncortesi/RESILIENCE/WT_subdata" # dir where to put the intermediate daily psl data (1 file for each subarray) + +mslp.rean=ERAint #'ERAintDailyHighRes' #'ERAintDailyLowRes' # choose one of the daily reanalysis above for loading MSLP data +mslp.rean.name <- 'ERAint' + +year.start=1985 # starting year of the MSLP daily data (from the 1st of january) +year.end=2014 # ending year of the MSLP daily data (up to the 31 of December) + +res=4.9 #5.25 # spacing between the Lamb grid points in the meridional direction (in the zonal direction, it is exactly the double of this value) + # it should be put equal to the multiple of 'psl.res' closer to 5 degrees, i.e: res = psl.res * round((5/psl.res)) + +low.res.size=7 # odd numb. of MSLP nearby grid points to compute the psl at each of the 16 points (both for lon and lat: the actual n.of points used is its square) + +#partial.end=FALSE # put TRUE if the last year ('year.end') has not all the yearly data but stop before December the 31th; in this case, must also specify the variable below +#n.days.last=334 # number of days available in the last year (used only if partial.end=TRUE) I.e: data for 2015 doesn't have December, so it has 365-31=334 days + +merge=FALSE # put TRUE if you want to concatenate all the WT classification for different years and same grid point at the end of the analysis, FALSE otherwise + +########################################################################################## + +args <- commandArgs(TRUE) + +if(length(args) == 1) year.start <- year.end <- as.integer(args[1]) +if(length(args) == 2) {year.start <- as.integer(args[1]); year.end <- as.integer(args[2])} + +WTs.type<-c("NE","E","SE","S","SW","W","NW","N","C","C.NE","C.E","C.SE","C.S","C.SW","C.W","C.NW","C.N","A","A.NE","A.E","A.SE","A.S","A.SW","A.W","A.NW","A.N") +radius <- (low.res.size-1)/2 # number of grid points to use as search radius for averaging the psl at each Lamb grid point + +# Load 1 day of MSLP data from the reanalysis chosen, just to detect the number of latitude and longitude points: +#MSLP <- Load('psl', NULL, mslp.rean, paste0(year.start,'0101'), storefreq = 'daily', leadtimemax = 1, output = 'lonlat', configfile = Load.path) +MSLP <- Load(var = 'psl', exp = NULL, obs = list(mslp.rean), paste0(year.start,'0101'), storefreq = 'daily', leadtimemax = 1, output = 'lonlat', nprocs=1) + +n.lat <- length(MSLP$lat) # number of latitude values +n.lon <- length(MSLP$lon) # number of longitude values + +lon.pos <- ifelse(min(MSLP$lon>=0), TRUE, FALSE) # set lon.pos to TRUE if the longitude values of the reanalysis grid have no negative values, FALSE if not. + +# for each grid point of the reanalysis, compute the Lamb WT classification with Lamb grid centered on that point. +# we exclude only central points > +77 degrees lat and < -77 deg. because the Lamb grid needs 12.5 deg. north/south of the central point. +n.lat.unused.poles <- 17 #20 # number of unused latitude values near each of the two poles (must exclude last 10 degrees N/S, so it depends on the spatial res. of the reanalysis) + +lat.used <- MSLP$lat[-c(1:n.lat.unused.poles,(n.lat-n.lat.unused.poles):n.lat)] # latitude values used as central points +psl.res <- diff(lat.used)[1] # psl grid resolution +lat.used <- round(lat.used,3) # round psl values to the third decimal to save them in a file with a short file name (precision: ~100 m) +n.lat.used <- length(lat.used) + +lon.used <- round(MSLP$lon,3) # longitude values used as central points rounded to the third decimal (precision: ~100 m) +n.lon.used <- length(lon.used) + +n.grid.points <- length(lat.used)*length(lon.used) + +# load all MSLP data one year at time: +for(y in year.start:year.end){ + print(paste0("Year: ",y)) + + for(month in 1:12){ + print(paste0("Month: ",month)) + + month_2_digits <- ifelse(month<10, paste0("0",month), month) + n.days <- n.days.in.a.month(month,y) + MSLP.year <- Load(var = 'psl', exp = NULL, obs = list(mslp.rean), paste0(y,'01',month_2_digits), storefreq = 'daily', leadtimemax = n.days, output = 'lonlat', nprocs=1) + gc() + + # loop over the lat/lon of the central point of the Lamb classification: + for(latc in lat.used){ + #latc <- lat.used[1]; lonc <- lon.used[1]; # for the debug + + ## first, it computes the latitude coefficients: + SF1 <- 1/cos(latc*pi/180) + ZS1 <- 1/(2*cos(latc*pi/180)^2) + ZW1 <- sin(latc*pi/180)/sin((latc-res)*pi/180) + ZW2 <- sin(latc*pi/180)/sin((latc+res)*pi/180) + + for(lonc in lon.used){ + pos.latc <- which(lat.used==latc) + pos.lonc <- which(lon.used==lonc) + + n.point <- (pos.latc-1)*n.lon.used + pos.lonc # number of points already computed + #cat(paste0("Grid point saved: ", n.point ,"/", n.grid.points, " "), "\r") + + # assign the lat and lon values of the 16 grid points of the Lamb grid centered over the central point: + lat <- lon <- c() # lat and lon of the 16 points of Lamb's grid + lat[1] <- lat[2] <- latc + 2*res + lat[3] <- lat[4] <- lat[5] <- lat[6] <- latc + res + lat[7] <- lat[8] <- lat[9] <- lat[10] <- latc + lat[11] <- lat[12] <- lat[13] <- lat[14] <- latc - res + lat[15] <- lat[16] <- latc - 2*res + + lon[1] <- lon[4] <- lon[8] <- lon[12] <- lon[15] <- lonc - res + lon[3] <- lon[7] <- lon[11] <- lonc - 3*res + lon[2] <- lon[5] <- lon[9] <- lon[13] <- lon[16] <- lonc + res + lon[6] <- lon[10] <- lon[14] <- lonc + 3*res + + if(lat[1] > 90) stop("*** Lamb grid is employing points beyond 90 degrees ***") + + # longitude correction for reanalysis with positive-only longitudes (to stay always positive): + if(lon.pos){ + if(lonc - res < 0){ + lon[1] <- lon[4] <- lon[8] <- lon[12] <- lon[15] <- lonc - res + 360 + lon[3] <- lon[7] <- lon[11] <- lonc - 3*res + 360 + } + if(lonc - 3*res < 0 && lon - res >= 0){ + lon[3] <- lon[7] <- lon[11] <- lonc - 3*res + 360 + } + if(lonc + res >= 360){ + lon[2] <- lon[5] <- lon[9] <- lon[13] <- lon[16] <- lonc + res - 360 + lon[6] <- lon[10] <- lon[14] <- lonc + 3*res - 360 + } + if(lonc + 3*res >= 360 && lonc + res < 360){ + lon[6] <- lon[10] <- lon[14] <- lonc + 3*res - 360 + } + } + + # longitude correction for reanalysis with [-180, +180] longitude (to stay always in the same interval): + # MUST STILL UPDATE FOR CASE lonc + 15 >= 360 !!! + if(!lon.pos){ + if(lonc - res < -180){ + lon[1] <- lon[4] <- lon[8] <- lon[12] <- lon[15] <- lonc - res + 360 + lon[3] <- lon[7] <- lon[11] <- lonc - 3*res + 360 + } + if(lonc + res >= 180){ + lon[2] <- lon[5] <- lon[9] <- lon[13] <- lon[16] <- lonc + res - 360 + lon[6] <- lon[10] <- lon[14] <- lonc + 3*res - 360 + } + } + + # lat and lon position of the 16 Lamb grid points inside the MSLP reanalysis: + pos.lat <- pos.lon <- c() + for(p in 1:16){ + pos <- nearest(lat[p], lon[p], MSLP$lat, MSLP$lon) + pos.lat[p] <- pos[1] + pos.lon[p] <- pos[2] + } + + #WTs <- data.frame() + + # mean psl at each of the 16 grid points: + psl <- array(NA,c(n.days,16)) # for each day of the year, MSLP of the 16 points of Lamb's grid + #for(p in 1:16) psl[,p] <- MSLP.year$obs[1,1,1,,pos.lat[p],pos.lon[p]] + + for(p in 1:16) { + pos.lon.low.res <- (pos.lon[p]-radius):(pos.lon[p]+radius) + + if(head(pos.lon.low.res,1) <= 0){ + ss <- which(pos.lon.low.res <= 0) + pos.lon.low.res <- c(pos.lon.low.res[ss] + n.lon.used, pos.lon.low.res[-ss]) + } + if(tail(pos.lon.low.res,1) > n.lon.used){ + ss <- which(pos.lon.low.res > n.lon.used) + pos.lon.low.res <- c(pos.lon.low.res[-ss], pos.lon.low.res[ss]-n.lon.used) + } + + pos.lat.low.res <- (pos.lat[p]-radius):(pos.lat[p]+radius) # it never has to be corrected because we excluded the poles + + MSLP.low.res <- MSLP.year$obs[1,1,1,,pos.lat.low.res,pos.lon.low.res] + psl[,p] <- apply(MSLP.low.res, 1, mean) + } + + # compute geostrophical indexes (don't round them to the 3rd decimal number as done by Trigo and Martin-Vide, not to introduce an error that change 3-4% of WTs!!!): + SF<-WF<-F<-D<-D2<-c() + + WF <- 0.5*(psl[,12]+psl[,13]) - 0.5*(psl[,4]+psl[,5]) + SF <- SF1*(0.25 * (psl[,5]+2*psl[,9]+psl[,13]) - 0.25 * (psl[,4]+2*psl[,8]+psl[,12])) + F <- (SF*SF + WF*WF)^0.5 + + if(latc < 0){ + WF <- -WF # because in the southern hemisphere, cyclones and anticyclones spins in the opposite sense! + SF <- -SF + } + + # D e' la direzione del flusso; dato che l'angolo risultato di atan e' espresso in radianti, bisogna convertirlo in gradi moltiplicandolo per (180/pi) + # D e' l'angolo tra la direzione del vento e la sua proiezione in direzione nord-sud (SF) + D <- (180/pi) * atan(WF/SF) + + # trasforma l'angolo D iniziale in un angolo D2 sempre positivo che si calcola a partire del Nord andando in senso orario: + Q1 <- which(WF>0 & SF>0) + Q2 <- which(WF>0 & SF<=0) + Q3 <- which(WF<0 & SF<=0) # calcola quali angoli D appartengono al quadrante 1 (Q1), quali al quadrante 2, quali al 3 e quali al 4. + Q4 <- which(WF<0 & SF>0) # il primo quadrante e' in alto a destra, il secondo in basso a destra, il terzo in basso a sinistra, il quarto in alto a sinistra. + + # the wind direction is usually different from the angle D where the wind is blowing to, so a correction has to be made: + D2 <- D + D2[Q1] <- D[Q1] + 180 + D2[Q2] <- D[Q2] + 360 + D2[Q4] <- D[Q4] + 180 + + # individua i WTs direzionali: + NE <- which(D2>=22.5 & D2<67.5) + E <- which(D2>=67.5 & D2<112.5) + SE <- which(D2>=112.5 & D2<157.5) + S <- which(D2>=157.5 & D2<202.5) + SW <- which(D2>=202.5 & D2<247.5) + W <- which(D2>=247.5 & D2<292.5) + NW <- which(D2>=292.5 & D2<337.5) + N <- which(D2>=337.5 | D2<22.5) + + # aggiunge una colonna a destra con il WT direzionale associato all'angolo: + D2.dir<-rep(NA,length(D2)) + D2.dir[NE]<-"NE";D2.dir[E]<-"E";D2.dir[SE]<-"SE";D2.dir[S]<-"S";D2.dir[SW]<-"SW";D2.dir[W]<-"W";D2.dir[NW]<-"NW";D2.dir[N]<-"N" + + # calcolo componenti vorticita': + ZS<-ZW<-Z<-c() + + ZS <- ZS1*(0.25 * (psl[,6]+2*psl[,10]+psl[,14]) - 0.25 * (psl[,5]+2*psl[,9]+psl[,13]) - 0.25 * (psl[,4]+2*psl[,8]+psl[,12]) + 0.25 * (psl[,3]+2*psl[,7]+psl[,11])) + ZW <- ZW1 * (0.5*(psl[,15]+psl[,16]) - 0.5*(psl[,8]+psl[,9])) - ZW2 * (0.5*(psl[,8]+psl[,9]) - 0.5*(psl[,1]+psl[,2])) + Z <- ZS + ZW + + # Pure and hybrid WTs + cyc.pure <- which(Z>2*F) + anticyc.pure <- which(Z<(-2*F)) + hybrid.cyc <- which(Z>0 & abs(Z)<2*F & abs(Z)>F) + hybrid.anticyc <- which(Z<0 & abs(Z)<2*F & abs(Z)>F) + indeter <- which(F<6 & abs(Z)<6) # tipo di tempo indeterminato (U); the choice of 6 depend on grid size and should be changed if grid res.is higher! + + # create a data frame with all the info about the WT classification: + + #years.period <- months.period <- days.period <- c() + #for(y in year.start:year.end) years.period <- c(years.period, rep(y, n.days.in.a.year(y))) + #for(y in year.start:year.end) months.period <- c(months.period, seq.months.in.a.year(y)) + #for(y in year.start:year.end) days.period <- c(days.period, seq.days.in.a.year(y)) + years.period <- rep(y, n.days) + months.period <- rep(month, n.days) #seq.months.in.a.year(y)[1:n.days] + days.period <- 1:n.days.in.a.month(month,y) #seq.days.in.a.year(y)[1:n.days] + WT<-data.frame(Year=years.period, Month=months.period, Day=days.period, Direction=round(D2,3), WT.dir=D2.dir, stringsAsFactors=FALSE) + + WT$Year<-as.numeric(WT$Year);WT$Month<-as.numeric(WT$Month);WT$Day<-as.numeric(WT$Day);WT$Direction<-as.numeric(WT$Direction);WT$WT.dir<-as.character(WT$WT.dir) + + # colonna che identifica quali giorni sono di tipo ibrido e quali di tipo direzionale: + WT$Hyb <- rep("Directional",length(D2)) + WT$Hyb[cyc.pure] <- "Pure_C" + WT$Hyb[anticyc.pure] <- "Pure_A" + WT$Hyb[hybrid.cyc] <- "Hybrid_C" + WT$Hyb[hybrid.anticyc] <- "Hybrid_A" + + # colonna con la lista dei 26 WTs officiali: + WT$WT <- WT$D2.dir + WT$WT[cyc.pure] <- "C" + WT$WT[anticyc.pure] <- "A" + WT$WT[hybrid.cyc] <- paste("C.",WT$WT.dir[hybrid.cyc],sep="") + WT$WT[hybrid.anticyc] <- paste("A.", WT$WT.dir[hybrid.anticyc],sep="") + #WT$WT[indeter] <- "U" # comment this line not to include the Unclassified U WT + + WT$WT.num <- match(WT$WT,WTs.type)# aggiunge una colonna con la conversione dei 26 tipi di tempo a numeri da 1 a 26 + + # colonna con la lista dei 10 WTs ottenuti unendo a C e ad A gli otto WT direzionali puri; + # i WTs ibridi si aggiungono ognuno al suo tipo direzionale corrispondente (es: C.SW si aggiunge a SW, ecc.) + WT$WT10 <- WT$WT.dir + WT$WT10[cyc.pure] <- "C" + WT$WT10[anticyc.pure] <- "A" + WT$WT10.num <- match(WT$WT10,WTs.type) # conversione dei 10 tipi di tempo a numeri da 1 a 9 + il tipo A che si converte nel numero 18 + + # add the other geostrophical indexes: + WT$SF=round(SF,5) + WT$WF=round(WF,5) + WT$F=round(F,5) + WT$ZS=round(ZS,5) + WT$ZW=round(ZW,5) + WT$Z=round(Z,5) + + #WTs<-rbind(WTs,WT) + assign(paste0("WT","_year_",y,"_lat_",latc,"_lon_",lonc), WT) + + rm(WT,SF,WF,F,D,ZS,ZW,W) + + gc() + + } # close for on lonc + } # close for on latc + #cat("\n") + + # save all classification of a single year and month in a single .RData file: + output <- paste0(workdir,"/WTs_",mslp.rean.name,"_year_",y,"_month_",month,".RData") + save.image(file=output) + + gc() + + } # close for on month + gc() + +} # close for on y + + +# a <- read.table(output_list, stringsAsFactors=FALSE, header=TRUE) + + +## # merge all txt data of the same lat and lon in 1 file to have all the years together and delete the yearly files: +## if(merge){ +## if(!dir.exists(file.path(workdir,paste0("wt_txt")))) dir.create(file.path(workdir,paste0("wt_txt"))) + +## for(latc in lat.used){ +## #latc <- lat.used[1]; lonc <- lon.used[1]; # for the debug + +## for(lonc in lon.used){ +## pos.latc <- which(lat.used==latc) +## pos.lonc <- which(lon.used==lonc) +## cat(paste0("Merging classification at grid point: ",(pos.latc-1)*n.lon.used+pos.lonc ,"/", n.grid.points, " "), "\r") + +## for(y in year.start:year.end){ +## #print(paste0("Year: ",y)) + +## WTs <- read.table(file=paste0(workdir,"/",y,"_txt/WTs_",mslp.rean.name,"_year_",y,"_lat_",latc.char,"_lon_",lonc.char,".txt"),stringsAsFactors=FALSE, header=TRUE) # Load WTs +## if(y==year.start){ +## WTs_full_period <- WTs +## } else { +## WTs_full_period <- rbind(WTs_full_period, WTs) +## } + +## } + +## write.table(WTs_full_period, file=paste0(workdir,"/wt_txt/WTs_",mslp.rean.name,"_",year.start,"-",year.end,"_lat_",latc.char,"_lon_",lonc.char,".txt"),row.names=FALSE, col.names=TRUE, quote=FALSE, sep=" ") +## rm(WTs_full_period) + +## } +## } + +## # delete the old yearly files in the _txt directories and the directories: +## for(y in year.start:year.end){ +## system(paste0("rm -fr ",workdir,"/",y,"_txt/")) +## } + +## } # close if on merge + + diff --git a/old/WT_v6.R~ b/old/WT_v6.R~ new file mode 100644 index 0000000000000000000000000000000000000000..0e451d5dc579444d78329183269a5da1b5f2fa9d --- /dev/null +++ b/old/WT_v6.R~ @@ -0,0 +1,354 @@ +######################################################################################### +# Lamb's WTs with ERA-Interim # +######################################################################################### +# run it from the bash with: +# +# Rscript WT_v5.R 1984 +# +# being 1984 the year you want to classify the WTs; +# in this way you can run up to 8 jobs at the same time, each one producing its output files! +# You can also run it for a sequence of years with the syntax: +# +# Rscript WT_v5.R 1980 2014 +# +# and it will compute each year from 1980 to 2014, each one after finishing the previous one. +# +# If you want to run many years in parallel with just only 1 command, run from the bash: +# +# for y in {1980..2014}; do Rscript WT_v5.R &; done +# +# but it'd need 24 processors! In practice, it is possible to run only 4-8 years at time. +# + +library(s2dverification) # for the function Load() +# library(ff) +source('/home/Earth/ncortesi/scripts/Rfunctions.R') # for the calendar functions +#Load.path <- '/home/Earth/ncortesi/Downloads/scripts/IC3.conf' # Load() file path +# Available reanalysis: +ERAint <- list(path = '/esnas/recon/ecmwf/erainterim/$STORE_FREQ$_mean/$VAR_NAME$_f6h/$VAR_NAME$_$YEAR$$MONTH$.nc') +JRA55 <- list(path = '/esnas/recon/jma/jra-55/$STORE_FREQ$_mean/$VAR_NAME$_f6h/$VAR_NAME$_$YEAR$$MONTH$.nc') + +workdir="/scratch/Earth/ncortesi/RESILIENCE/WT" # working dir where to put the output maps and files +#subdatadir="/scratch/Earth/ncortesi/RESILIENCE/WT_subdata" # dir where to put the intermediate daily psl data (1 file for each subarray) + +mslp.rean=ERAint #'ERAintDailyHighRes' #'ERAintDailyLowRes' # choose one of the daily reanalysis above for loading MSLP data +mslp.rean.name <- 'ERAint' + +year.start=1985 # starting year of the MSLP daily data (from the 1st of january) +year.end=2014 # ending year of the MSLP daily data (up to the 31 of December) + +res=4.9 #5.25 # spacing between the Lamb grid points in the meridional direction (in the zonal direction, it is exactly the double of this value) + # it should be put equal to the multiple of 'psl.res' closer to 5 degrees, i.e: res = psl.res * round((5/psl.res)) + +low.res.size=7 # odd numb. of MSLP nearby grid points to compute the psl at each of the 16 points (both for lon and lat: the actual n.of points used is its square) + +#partial.end=FALSE # put TRUE if the last year ('year.end') has not all the yearly data but stop before December the 31th; in this case, must also specify the variable below +#n.days.last=334 # number of days available in the last year (used only if partial.end=TRUE) I.e: data for 2015 doesn't have December, so it has 365-31=334 days + +merge=FALSE # put TRUE if you want to concatenate all the WT classification for different years and same grid point at the end of the analysis, FALSE otherwise + +########################################################################################## + +args <- commandArgs(TRUE) + +if(length(args) == 1) year.start <- year.end <- as.integer(args[1]) +if(length(args) == 2) {year.start <- as.integer(args[1]); year.end <- as.integer(args[2])} + +WTs.type<-c("NE","E","SE","S","SW","W","NW","N","C","C.NE","C.E","C.SE","C.S","C.SW","C.W","C.NW","C.N","A","A.NE","A.E","A.SE","A.S","A.SW","A.W","A.NW","A.N") +radius <- (low.res.size-1)/2 # number of grid points to use as search radius for averaging the psl at each Lamb grid point + +# Load 1 day of MSLP data from the reanalysis chosen, just to detect the number of latitude and longitude points: +#MSLP <- Load('psl', NULL, mslp.rean, paste0(year.start,'0101'), storefreq = 'daily', leadtimemax = 1, output = 'lonlat', configfile = Load.path) +MSLP <- Load(var = 'psl', exp = NULL, obs = list(mslp.rean), paste0(year.start,'0101'), storefreq = 'daily', leadtimemax = 1, output = 'lonlat', nprocs=1) + +n.lat <- length(MSLP$lat) # number of latitude values +n.lon <- length(MSLP$lon) # number of longitude values + +lon.pos <- ifelse(min(MSLP$lon>=0), TRUE, FALSE) # set lon.pos to TRUE if the longitude values of the reanalysis grid have no negative values, FALSE if not. + +# for each grid point of the reanalysis, compute the Lamb WT classification with Lamb grid centered on that point. +# we exclude only central points > +77 degrees lat and < -77 deg. because the Lamb grid needs 12.5 deg. north/south of the central point. +n.lat.unused.poles <- 17 #20 # number of unused latitude values near each of the two poles (must exclude last 10 degrees N/S, so it depends on the spatial res. of the reanalysis) + +lat.used <- MSLP$lat[-c(1:n.lat.unused.poles,(n.lat-n.lat.unused.poles):n.lat)] # latitude values used as central points +psl.res <- diff(lat.used)[1] # psl grid resolution +lat.used <- round(lat.used,3) # round psl values to the third decimal to save them in a file with a short file name (precision: ~100 m) +n.lat.used <- length(lat.used) + +lon.used <- round(MSLP$lon,3) # longitude values used as central points rounded to the third decimal (precision: ~100 m) +n.lon.used <- length(lon.used) + +n.grid.points <- length(lat.used)*length(lon.used) + +# load all MSLP data one year at time: +for(y in year.start:year.end){ + print(paste0("Year: ",y)) + + for(month in 1:12){ + print(paste0("Month: ",month)) + + month_2_digits <- ifelse(month<10, paste0("0",month), month) + n.days <- n.days.in.a.month(month,y) + MSLP.year <- Load(var = 'psl', exp = NULL, obs = list(mslp.rean), paste0(y,'01',month_2_digits), storefreq = 'daily', leadtimemax = n.days, output = 'lonlat', nprocs=1) + gc() + + # loop over the lat/lon of the central point of the Lamb classification: + for(latc in lat.used[1]){ + #latc <- lat.used[1]; lonc <- lon.used[1]; # for the debug + + ## first, it computes the latitude coefficients: + SF1 <- 1/cos(latc*pi/180) + ZS1 <- 1/(2*cos(latc*pi/180)^2) + ZW1 <- sin(latc*pi/180)/sin((latc-res)*pi/180) + ZW2 <- sin(latc*pi/180)/sin((latc+res)*pi/180) + + for(lonc in lon.used[1]){ + pos.latc <- which(lat.used==latc) + pos.lonc <- which(lon.used==lonc) + + n.point <- (pos.latc-1)*n.lon.used + pos.lonc # number of points already computed + #cat(paste0("Grid point saved: ", n.point ,"/", n.grid.points, " "), "\r") + + # assign the lat and lon values of the 16 grid points of the Lamb grid centered over the central point: + lat <- lon <- c() # lat and lon of the 16 points of Lamb's grid + lat[1] <- lat[2] <- latc + 2*res + lat[3] <- lat[4] <- lat[5] <- lat[6] <- latc + res + lat[7] <- lat[8] <- lat[9] <- lat[10] <- latc + lat[11] <- lat[12] <- lat[13] <- lat[14] <- latc - res + lat[15] <- lat[16] <- latc - 2*res + + lon[1] <- lon[4] <- lon[8] <- lon[12] <- lon[15] <- lonc - res + lon[3] <- lon[7] <- lon[11] <- lonc - 3*res + lon[2] <- lon[5] <- lon[9] <- lon[13] <- lon[16] <- lonc + res + lon[6] <- lon[10] <- lon[14] <- lonc + 3*res + + if(lat[1] > 90) stop("*** Lamb grid is employing points beyond 90 degrees ***") + + # longitude correction for reanalysis with positive-only longitudes (to stay always positive): + if(lon.pos){ + if(lonc - res < 0){ + lon[1] <- lon[4] <- lon[8] <- lon[12] <- lon[15] <- lonc - res + 360 + lon[3] <- lon[7] <- lon[11] <- lonc - 3*res + 360 + } + if(lonc - 3*res < 0 && lon - res >= 0){ + lon[3] <- lon[7] <- lon[11] <- lonc - 3*res + 360 + } + if(lonc + res >= 360){ + lon[2] <- lon[5] <- lon[9] <- lon[13] <- lon[16] <- lonc + res - 360 + lon[6] <- lon[10] <- lon[14] <- lonc + 3*res - 360 + } + if(lonc + 3*res >= 360 && lonc + res < 360){ + lon[6] <- lon[10] <- lon[14] <- lonc + 3*res - 360 + } + } + + # longitude correction for reanalysis with [-180, +180] longitude (to stay always in the same interval): + # MUST STILL UPDATE FOR CASE lonc + 15 >= 360 !!! + if(!lon.pos){ + if(lonc - res < -180){ + lon[1] <- lon[4] <- lon[8] <- lon[12] <- lon[15] <- lonc - res + 360 + lon[3] <- lon[7] <- lon[11] <- lonc - 3*res + 360 + } + if(lonc + res >= 180){ + lon[2] <- lon[5] <- lon[9] <- lon[13] <- lon[16] <- lonc + res - 360 + lon[6] <- lon[10] <- lon[14] <- lonc + 3*res - 360 + } + } + + # lat and lon position of the 16 Lamb grid points inside the MSLP reanalysis: + pos.lat <- pos.lon <- c() + for(p in 1:16){ + pos <- nearest(lat[p], lon[p], MSLP$lat, MSLP$lon) + pos.lat[p] <- pos[1] + pos.lon[p] <- pos[2] + } + + #WTs <- data.frame() + + # mean psl at each of the 16 grid points: + psl <- array(NA,c(n.days,16)) # for each day of the year, MSLP of the 16 points of Lamb's grid + #for(p in 1:16) psl[,p] <- MSLP.year$obs[1,1,1,,pos.lat[p],pos.lon[p]] + + for(p in 1:16) { + pos.lon.low.res <- (pos.lon[p]-radius):(pos.lon[p]+radius) + + if(head(pos.lon.low.res,1) <= 0){ + ss <- which(pos.lon.low.res <= 0) + pos.lon.low.res <- c(pos.lon.low.res[ss] + n.lon.used, pos.lon.low.res[-ss]) + } + if(tail(pos.lon.low.res,1) > n.lon.used){ + ss <- which(pos.lon.low.res > n.lon.used) + pos.lon.low.res <- c(pos.lon.low.res[-ss], pos.lon.low.res[ss]-n.lon.used) + } + + pos.lat.low.res <- (pos.lat[p]-radius):(pos.lat[p]+radius) # it never has to be corrected because we excluded the poles + + MSLP.low.res <- MSLP.year$obs[1,1,1,,pos.lat.low.res,pos.lon.low.res] + psl[,p] <- apply(MSLP.low.res, 1, mean) + } + + # compute geostrophical indexes (don't round them to the 3rd decimal number as done by Trigo and Martin-Vide, not to introduce an error that change 3-4% of WTs!!!): + SF<-WF<-F<-D<-D2<-c() + + WF <- 0.5*(psl[,12]+psl[,13]) - 0.5*(psl[,4]+psl[,5]) + SF <- SF1*(0.25 * (psl[,5]+2*psl[,9]+psl[,13]) - 0.25 * (psl[,4]+2*psl[,8]+psl[,12])) + F <- (SF*SF + WF*WF)^0.5 + + if(latc < 0){ + WF <- -WF # because in the southern hemisphere, cyclones and anticyclones spins in the opposite sense! + SF <- -SF + } + + # D e' la direzione del flusso; dato che l'angolo risultato di atan e' espresso in radianti, bisogna convertirlo in gradi moltiplicandolo per (180/pi) + # D e' l'angolo tra la direzione del vento e la sua proiezione in direzione nord-sud (SF) + D <- (180/pi) * atan(WF/SF) + + # trasforma l'angolo D iniziale in un angolo D2 sempre positivo che si calcola a partire del Nord andando in senso orario: + Q1 <- which(WF>0 & SF>0) + Q2 <- which(WF>0 & SF<=0) + Q3 <- which(WF<0 & SF<=0) # calcola quali angoli D appartengono al quadrante 1 (Q1), quali al quadrante 2, quali al 3 e quali al 4. + Q4 <- which(WF<0 & SF>0) # il primo quadrante e' in alto a destra, il secondo in basso a destra, il terzo in basso a sinistra, il quarto in alto a sinistra. + + # the wind direction is usually different from the angle D where the wind is blowing to, so a correction has to be made: + D2 <- D + D2[Q1] <- D[Q1] + 180 + D2[Q2] <- D[Q2] + 360 + D2[Q4] <- D[Q4] + 180 + + # individua i WTs direzionali: + NE <- which(D2>=22.5 & D2<67.5) + E <- which(D2>=67.5 & D2<112.5) + SE <- which(D2>=112.5 & D2<157.5) + S <- which(D2>=157.5 & D2<202.5) + SW <- which(D2>=202.5 & D2<247.5) + W <- which(D2>=247.5 & D2<292.5) + NW <- which(D2>=292.5 & D2<337.5) + N <- which(D2>=337.5 | D2<22.5) + + # aggiunge una colonna a destra con il WT direzionale associato all'angolo: + D2.dir<-rep(NA,length(D2)) + D2.dir[NE]<-"NE";D2.dir[E]<-"E";D2.dir[SE]<-"SE";D2.dir[S]<-"S";D2.dir[SW]<-"SW";D2.dir[W]<-"W";D2.dir[NW]<-"NW";D2.dir[N]<-"N" + + # calcolo componenti vorticita': + ZS<-ZW<-Z<-c() + + ZS <- ZS1*(0.25 * (psl[,6]+2*psl[,10]+psl[,14]) - 0.25 * (psl[,5]+2*psl[,9]+psl[,13]) - 0.25 * (psl[,4]+2*psl[,8]+psl[,12]) + 0.25 * (psl[,3]+2*psl[,7]+psl[,11])) + ZW <- ZW1 * (0.5*(psl[,15]+psl[,16]) - 0.5*(psl[,8]+psl[,9])) - ZW2 * (0.5*(psl[,8]+psl[,9]) - 0.5*(psl[,1]+psl[,2])) + Z <- ZS + ZW + + # Pure and hybrid WTs + cyc.pure <- which(Z>2*F) + anticyc.pure <- which(Z<(-2*F)) + hybrid.cyc <- which(Z>0 & abs(Z)<2*F & abs(Z)>F) + hybrid.anticyc <- which(Z<0 & abs(Z)<2*F & abs(Z)>F) + indeter <- which(F<6 & abs(Z)<6) # tipo di tempo indeterminato (U); the choice of 6 depend on grid size and should be changed if grid res.is higher! + + # create a data frame with all the info about the WT classification: + + #years.period <- months.period <- days.period <- c() + #for(y in year.start:year.end) years.period <- c(years.period, rep(y, n.days.in.a.year(y))) + #for(y in year.start:year.end) months.period <- c(months.period, seq.months.in.a.year(y)) + #for(y in year.start:year.end) days.period <- c(days.period, seq.days.in.a.year(y)) + years.period <- rep(y, n.days) + months.period <- rep(month, n.days) #seq.months.in.a.year(y)[1:n.days] + days.period <- 1:n.days.in.a.month(month,y) #seq.days.in.a.year(y)[1:n.days] + WT<-data.frame(Year=years.period, Month=months.period, Day=days.period, Direction=round(D2,3), WT.dir=D2.dir, stringsAsFactors=FALSE) + + WT$Year<-as.numeric(WT$Year);WT$Month<-as.numeric(WT$Month);WT$Day<-as.numeric(WT$Day);WT$Direction<-as.numeric(WT$Direction);WT$WT.dir<-as.character(WT$WT.dir) + + # colonna che identifica quali giorni sono di tipo ibrido e quali di tipo direzionale: + WT$Hyb <- rep("Directional",length(D2)) + WT$Hyb[cyc.pure] <- "Pure_C" + WT$Hyb[anticyc.pure] <- "Pure_A" + WT$Hyb[hybrid.cyc] <- "Hybrid_C" + WT$Hyb[hybrid.anticyc] <- "Hybrid_A" + + # colonna con la lista dei 26 WTs officiali: + WT$WT <- WT$D2.dir + WT$WT[cyc.pure] <- "C" + WT$WT[anticyc.pure] <- "A" + WT$WT[hybrid.cyc] <- paste("C.",WT$WT.dir[hybrid.cyc],sep="") + WT$WT[hybrid.anticyc] <- paste("A.", WT$WT.dir[hybrid.anticyc],sep="") + #WT$WT[indeter] <- "U" # comment this line not to include the Unclassified U WT + + WT$WT.num <- match(WT$WT,WTs.type)# aggiunge una colonna con la conversione dei 26 tipi di tempo a numeri da 1 a 26 + + # colonna con la lista dei 10 WTs ottenuti unendo a C e ad A gli otto WT direzionali puri; + # i WTs ibridi si aggiungono ognuno al suo tipo direzionale corrispondente (es: C.SW si aggiunge a SW, ecc.) + WT$WT10 <- WT$WT.dir + WT$WT10[cyc.pure] <- "C" + WT$WT10[anticyc.pure] <- "A" + WT$WT10.num <- match(WT$WT10,WTs.type) # conversione dei 10 tipi di tempo a numeri da 1 a 9 + il tipo A che si converte nel numero 18 + + # add the other geostrophical indexes: + WT$SF=round(SF,5) + WT$WF=round(WF,5) + WT$F=round(F,5) + WT$ZS=round(ZS,5) + WT$ZW=round(ZW,5) + WT$Z=round(Z,5) + + #WTs<-rbind(WTs,WT) + assign(paste0("WT","_year_",y,"_lat_",latc,"_lon_",lonc), WT) + + rm(WT,SF,WF,F,D,ZS,ZW,W) + + gc() + + } # close for on lonc + } # close for on latc + #cat("\n") + + # save all classification of a single year and month in a single .RData file: + output <- paste0(workdir,"/WTs_",mslp.rean.name,"_year_",y,"_month_",month,".RData") + save.image(file=output) + + gc() + + } # close for on month + gc() + +} # close for on y + + +# a <- read.table(output_list, stringsAsFactors=FALSE, header=TRUE) + + +## # merge all txt data of the same lat and lon in 1 file to have all the years together and delete the yearly files: +## if(merge){ +## if(!dir.exists(file.path(workdir,paste0("wt_txt")))) dir.create(file.path(workdir,paste0("wt_txt"))) + +## for(latc in lat.used){ +## #latc <- lat.used[1]; lonc <- lon.used[1]; # for the debug + +## for(lonc in lon.used){ +## pos.latc <- which(lat.used==latc) +## pos.lonc <- which(lon.used==lonc) +## cat(paste0("Merging classification at grid point: ",(pos.latc-1)*n.lon.used+pos.lonc ,"/", n.grid.points, " "), "\r") + +## for(y in year.start:year.end){ +## #print(paste0("Year: ",y)) + +## WTs <- read.table(file=paste0(workdir,"/",y,"_txt/WTs_",mslp.rean.name,"_year_",y,"_lat_",latc.char,"_lon_",lonc.char,".txt"),stringsAsFactors=FALSE, header=TRUE) # Load WTs +## if(y==year.start){ +## WTs_full_period <- WTs +## } else { +## WTs_full_period <- rbind(WTs_full_period, WTs) +## } + +## } + +## write.table(WTs_full_period, file=paste0(workdir,"/wt_txt/WTs_",mslp.rean.name,"_",year.start,"-",year.end,"_lat_",latc.char,"_lon_",lonc.char,".txt"),row.names=FALSE, col.names=TRUE, quote=FALSE, sep=" ") +## rm(WTs_full_period) + +## } +## } + +## # delete the old yearly files in the _txt directories and the directories: +## for(y in year.start:year.end){ +## system(paste0("rm -fr ",workdir,"/",y,"_txt/")) +## } + +## } # close if on merge + + diff --git a/old/WT_v7.R b/old/WT_v7.R new file mode 100644 index 0000000000000000000000000000000000000000..ecbcc74d4a8efee0317abe47fc6dd8f9e3ce4d5c --- /dev/null +++ b/old/WT_v7.R @@ -0,0 +1,360 @@ +######################################################################################### +# Lamb's WTs with ERA-Interim # +######################################################################################### +# run it from the bash with: +# +# Rscript WT_v5.R 1984 +# +# being 1984 the year you want to classify the WTs; +# in this way you can run up to 8 jobs at the same time, each one producing its output files! +# You can also run it for a sequence of years with the syntax: +# +# Rscript WT_v5.R 1980 2014 +# +# and it will compute each year from 1980 to 2014, each one after finishing the previous one. +# +# If you want to run many years in parallel with just only 1 command, run from the bash: +# +# for y in {1980..2014}; do Rscript WT_v5.R &; done +# +# but it'd need 24 processors! In practice, it is possible to run only 4-8 years at time. +# + +library(s2dverification) # for the function Load() +# library(ff) +source('/home/Earth/ncortesi/scripts/Rfunctions.R') # for the calendar functions +#Load.path <- '/home/Earth/ncortesi/Downloads/scripts/IC3.conf' # Load() file path +# Available reanalysis: +ERAint <- list(path = '/esnas/recon/ecmwf/erainterim/$STORE_FREQ$_mean/$VAR_NAME$_f6h/$VAR_NAME$_$YEAR$$MONTH$.nc') +JRA55 <- list(path = '/esnas/recon/jma/jra-55/$STORE_FREQ$_mean/$VAR_NAME$_f6h/$VAR_NAME$_$YEAR$$MONTH$.nc') + +workdir="/scratch/Earth/ncortesi/RESILIENCE/WT" # working dir where to put the output maps and files +#subdatadir="/scratch/Earth/ncortesi/RESILIENCE/WT_subdata" # dir where to put the intermediate daily psl data (1 file for each subarray) + +mslp.rean=ERAint #'ERAintDailyHighRes' #'ERAintDailyLowRes' # choose one of the daily reanalysis above for loading MSLP data +mslp.rean.name <- 'ERAint' + +year.start=1985 # starting year of the MSLP daily data (from the 1st of january) +year.end=2014 # ending year of the MSLP daily data (up to the 31 of December) + +res=4.9 #5.25 # spacing between the Lamb grid points in the meridional direction (in the zonal direction, it is exactly the double of this value) + # it should be put equal to the multiple of 'psl.res' closer to 5 degrees, i.e: res = psl.res * round((5/psl.res)) + +low.res.size=7 # odd numb. of MSLP nearby grid points to compute the psl at each of the 16 points (both for lon and lat: the actual n.of points used is its square) + +#partial.end=FALSE # put TRUE if the last year ('year.end') has not all the yearly data but stop before December the 31th; in this case, must also specify the variable below +#n.days.last=334 # number of days available in the last year (used only if partial.end=TRUE) I.e: data for 2015 doesn't have December, so it has 365-31=334 days + +merge=FALSE # put TRUE if you want to concatenate all the WT classification for different years and same grid point at the end of the analysis, FALSE otherwise + +########################################################################################## + +args <- commandArgs(TRUE) + +if(length(args) == 1) year.start <- year.end <- as.integer(args[1]) +if(length(args) == 2) {year.start <- as.integer(args[1]); year.end <- as.integer(args[2])} + +WTs.type<-c("NE","E","SE","S","SW","W","NW","N","C","C.NE","C.E","C.SE","C.S","C.SW","C.W","C.NW","C.N","A","A.NE","A.E","A.SE","A.S","A.SW","A.W","A.NW","A.N") +radius <- (low.res.size-1)/2 # number of grid points to use as search radius for averaging the psl at each Lamb grid point + +# Load 1 day of MSLP data from the reanalysis chosen, just to detect the number of latitude and longitude points: +#MSLP <- Load('psl', NULL, mslp.rean, paste0(year.start,'0101'), storefreq = 'daily', leadtimemax = 1, output = 'lonlat', configfile = Load.path) +MSLP <- Load(var = 'psl', exp = NULL, obs = list(mslp.rean), paste0(year.start,'0101'), storefreq = 'daily', leadtimemax = 1, output = 'lonlat', nprocs=1) + +n.lat <- length(MSLP$lat) # number of latitude values +n.lon <- length(MSLP$lon) # number of longitude values + +lon.pos <- ifelse(min(MSLP$lon>=0), TRUE, FALSE) # set lon.pos to TRUE if the longitude values of the reanalysis grid have no negative values, FALSE if not. + +# for each grid point of the reanalysis, compute the Lamb WT classification with Lamb grid centered on that point. +# we exclude only central points > +77 degrees lat and < -77 deg. because the Lamb grid needs 12.5 deg. north/south of the central point. +n.lat.unused.poles <- 17 #20 # number of unused latitude values near each of the two poles (must exclude last 10 degrees N/S, so it depends on the spatial res. of the reanalysis) + +lat.used <- MSLP$lat[-c(1:n.lat.unused.poles,(n.lat-n.lat.unused.poles):n.lat)] # latitude values used as central points +psl.res <- diff(lat.used)[1] # psl grid resolution +lat.used <- round(lat.used,3) # round psl values to the third decimal to save them in a file with a short file name (precision: ~100 m) +n.lat.used <- length(lat.used) + +lon.used <- round(MSLP$lon,3) # longitude values used as central points rounded to the third decimal (precision: ~100 m) +n.lon.used <- length(lon.used) + +n.grid.points <- length(lat.used)*length(lon.used) + +# load all MSLP data one year at time: +for(y in year.start:year.end){ + print(paste0("Year: ",y)) + + #for(month in 1:12){ + # print(paste0("Month: ",month)) + + # month_2_digits <- ifelse(month<10, paste0("0",month), month) + n.days <- n.days.in.a.year(y) #n.days.in.a.month(month,y) + + # load data of year y: + MSLP.year <- Load(var = 'psl', exp = NULL, obs = list(mslp.rean), paste0(y,'01','01'), storefreq = 'daily', leadtimemax = n.days, output = 'lonlat', nprocs=1) + gc() + + # loop over the lat/lon of the central point of the Lamb classification: + for(latc in lat.used){ + #latc <- lat.used[1]; lonc <- lon.used[1]; # for the debug + + ## first, it computes the latitude coefficients: + SF1 <- 1/cos(latc*pi/180) + ZS1 <- 1/(2*cos(latc*pi/180)^2) + ZW1 <- sin(latc*pi/180)/sin((latc-res)*pi/180) + ZW2 <- sin(latc*pi/180)/sin((latc+res)*pi/180) + + for(lonc in lon.used){ + pos.latc <- which(lat.used==latc) + pos.lonc <- which(lon.used==lonc) + + n.point <- (pos.latc-1)*n.lon.used + pos.lonc # number of points already computed + #cat(paste0("Grid point saved: ", n.point ,"/", n.grid.points, " "), "\r") + + # assign the lat and lon values of the 16 grid points of the Lamb grid centered over the central point: + lat <- lon <- c() # lat and lon of the 16 points of Lamb's grid + lat[1] <- lat[2] <- latc + 2*res + lat[3] <- lat[4] <- lat[5] <- lat[6] <- latc + res + lat[7] <- lat[8] <- lat[9] <- lat[10] <- latc + lat[11] <- lat[12] <- lat[13] <- lat[14] <- latc - res + lat[15] <- lat[16] <- latc - 2*res + + lon[1] <- lon[4] <- lon[8] <- lon[12] <- lon[15] <- lonc - res + lon[3] <- lon[7] <- lon[11] <- lonc - 3*res + lon[2] <- lon[5] <- lon[9] <- lon[13] <- lon[16] <- lonc + res + lon[6] <- lon[10] <- lon[14] <- lonc + 3*res + + if(lat[1] > 90) stop("*** Lamb grid is employing points beyond 90 degrees ***") + + # longitude correction for reanalysis with positive-only longitudes (to stay always positive): + if(lon.pos){ + if(lonc - res < 0){ + lon[1] <- lon[4] <- lon[8] <- lon[12] <- lon[15] <- lonc - res + 360 + lon[3] <- lon[7] <- lon[11] <- lonc - 3*res + 360 + } + if(lonc - 3*res < 0 && lon - res >= 0){ + lon[3] <- lon[7] <- lon[11] <- lonc - 3*res + 360 + } + if(lonc + res >= 360){ + lon[2] <- lon[5] <- lon[9] <- lon[13] <- lon[16] <- lonc + res - 360 + lon[6] <- lon[10] <- lon[14] <- lonc + 3*res - 360 + } + if(lonc + 3*res >= 360 && lonc + res < 360){ + lon[6] <- lon[10] <- lon[14] <- lonc + 3*res - 360 + } + } + + # longitude correction for reanalysis with [-180, +180] longitude (to stay always in the same interval): + # MUST STILL UPDATE FOR CASE lonc + 15 >= 360 !!! + if(!lon.pos){ + if(lonc - res < -180){ + lon[1] <- lon[4] <- lon[8] <- lon[12] <- lon[15] <- lonc - res + 360 + lon[3] <- lon[7] <- lon[11] <- lonc - 3*res + 360 + } + if(lonc + res >= 180){ + lon[2] <- lon[5] <- lon[9] <- lon[13] <- lon[16] <- lonc + res - 360 + lon[6] <- lon[10] <- lon[14] <- lonc + 3*res - 360 + } + } + + # lat and lon position of the 16 Lamb grid points inside the MSLP reanalysis: + pos.lat <- pos.lon <- c() + for(p in 1:16){ + pos <- nearest(lat[p], lon[p], MSLP$lat, MSLP$lon) + pos.lat[p] <- pos[1] + pos.lon[p] <- pos[2] + } + + #WTs <- data.frame() + + # mean psl at each of the 16 grid points: + psl <- array(NA,c(n.days,16)) # for each day of the year, MSLP of the 16 points of Lamb's grid + #for(p in 1:16) psl[,p] <- MSLP.year$obs[1,1,1,,pos.lat[p],pos.lon[p]] + + for(p in 1:16) { + pos.lon.low.res <- (pos.lon[p]-radius):(pos.lon[p]+radius) + + if(head(pos.lon.low.res,1) <= 0){ + ss <- which(pos.lon.low.res <= 0) + pos.lon.low.res <- c(pos.lon.low.res[ss] + n.lon.used, pos.lon.low.res[-ss]) + } + if(tail(pos.lon.low.res,1) > n.lon.used){ + ss <- which(pos.lon.low.res > n.lon.used) + pos.lon.low.res <- c(pos.lon.low.res[-ss], pos.lon.low.res[ss]-n.lon.used) + } + + pos.lat.low.res <- (pos.lat[p]-radius):(pos.lat[p]+radius) # it never has to be corrected because we excluded the poles + + MSLP.low.res <- MSLP.year$obs[1,1,1,,pos.lat.low.res,pos.lon.low.res] + psl[,p] <- apply(MSLP.low.res, 1, mean) + } + + # compute geostrophical indexes (don't round them to the 3rd decimal number as done by Trigo and Martin-Vide, not to introduce an error that change 3-4% of WTs!!!): + SF<-WF<-F<-D<-D2<-c() + + WF <- 0.5*(psl[,12]+psl[,13]) - 0.5*(psl[,4]+psl[,5]) + SF <- SF1*(0.25 * (psl[,5]+2*psl[,9]+psl[,13]) - 0.25 * (psl[,4]+2*psl[,8]+psl[,12])) + F <- (SF*SF + WF*WF)^0.5 + + if(latc < 0){ + WF <- -WF # because in the southern hemisphere, cyclones and anticyclones spins in the opposite sense! + SF <- -SF + } + + # D e' la direzione del flusso; dato che l'angolo risultato di atan e' espresso in radianti, bisogna convertirlo in gradi moltiplicandolo per (180/pi) + # D e' l'angolo tra la direzione del vento e la sua proiezione in direzione nord-sud (SF) + D <- (180/pi) * atan(WF/SF) + + # trasforma l'angolo D iniziale in un angolo D2 sempre positivo che si calcola a partire del Nord andando in senso orario: + Q1 <- which(WF>0 & SF>0) + Q2 <- which(WF>0 & SF<=0) + Q3 <- which(WF<0 & SF<=0) # calcola quali angoli D appartengono al quadrante 1 (Q1), quali al quadrante 2, quali al 3 e quali al 4. + Q4 <- which(WF<0 & SF>0) # il primo quadrante e' in alto a destra, il secondo in basso a destra, il terzo in basso a sinistra, il quarto in alto a sinistra. + + # the wind direction is usually different from the angle D where the wind is blowing to, so a correction has to be made: + D2 <- D + D2[Q1] <- D[Q1] + 180 + D2[Q2] <- D[Q2] + 360 + D2[Q4] <- D[Q4] + 180 + + # individua i WTs direzionali: + NE <- which(D2>=22.5 & D2<67.5) + E <- which(D2>=67.5 & D2<112.5) + SE <- which(D2>=112.5 & D2<157.5) + S <- which(D2>=157.5 & D2<202.5) + SW <- which(D2>=202.5 & D2<247.5) + W <- which(D2>=247.5 & D2<292.5) + NW <- which(D2>=292.5 & D2<337.5) + N <- which(D2>=337.5 | D2<22.5) + + # aggiunge una colonna a destra con il WT direzionale associato all'angolo: + D2.dir<-rep(NA,length(D2)) + D2.dir[NE]<-"NE";D2.dir[E]<-"E";D2.dir[SE]<-"SE";D2.dir[S]<-"S";D2.dir[SW]<-"SW";D2.dir[W]<-"W";D2.dir[NW]<-"NW";D2.dir[N]<-"N" + + # calcolo componenti vorticita': + ZS<-ZW<-Z<-c() + + ZS <- ZS1*(0.25 * (psl[,6]+2*psl[,10]+psl[,14]) - 0.25 * (psl[,5]+2*psl[,9]+psl[,13]) - 0.25 * (psl[,4]+2*psl[,8]+psl[,12]) + 0.25 * (psl[,3]+2*psl[,7]+psl[,11])) + ZW <- ZW1 * (0.5*(psl[,15]+psl[,16]) - 0.5*(psl[,8]+psl[,9])) - ZW2 * (0.5*(psl[,8]+psl[,9]) - 0.5*(psl[,1]+psl[,2])) + Z <- ZS + ZW + + # Pure and hybrid WTs + cyc.pure <- which(Z>2*F) + anticyc.pure <- which(Z<(-2*F)) + hybrid.cyc <- which(Z>0 & abs(Z)<2*F & abs(Z)>F) + hybrid.anticyc <- which(Z<0 & abs(Z)<2*F & abs(Z)>F) + indeter <- which(F<6 & abs(Z)<6) # tipo di tempo indeterminato (U); the choice of 6 depend on grid size and should be changed if grid res.is higher! + + # create a data frame with all the info about the WT classification: + + #years.period <- months.period <- days.period <- c() + #for(y in year.start:year.end) years.period <- c(years.period, rep(y, n.days.in.a.year(y))) + #for(y in year.start:year.end) months.period <- c(months.period, seq.months.in.a.year(y)) + #for(y in year.start:year.end) days.period <- c(days.period, seq.days.in.a.year(y)) + years.period <- rep(y, n.days) + months.period <- seq.months.in.a.year(y)[1:n.days] + days.period <- seq.days.in.a.year(y)[1:n.days] + WT<-data.frame(Year=years.period, Month=months.period, Day=days.period, Direction=round(D2,3), WT.dir=D2.dir, stringsAsFactors=FALSE) + + WT$Year<-as.numeric(WT$Year);WT$Month<-as.numeric(WT$Month);WT$Day<-as.numeric(WT$Day);WT$Direction<-as.numeric(WT$Direction);WT$WT.dir<-as.character(WT$WT.dir) + + # colonna che identifica quali giorni sono di tipo ibrido e quali di tipo direzionale: + WT$Hyb <- rep("Directional",length(D2)) + WT$Hyb[cyc.pure] <- "Pure_C" + WT$Hyb[anticyc.pure] <- "Pure_A" + WT$Hyb[hybrid.cyc] <- "Hybrid_C" + WT$Hyb[hybrid.anticyc] <- "Hybrid_A" + + # colonna con la lista dei 26 WTs officiali: + WT$WT <- WT$D2.dir + WT$WT[cyc.pure] <- "C" + WT$WT[anticyc.pure] <- "A" + WT$WT[hybrid.cyc] <- paste("C.",WT$WT.dir[hybrid.cyc],sep="") + WT$WT[hybrid.anticyc] <- paste("A.", WT$WT.dir[hybrid.anticyc],sep="") + #WT$WT[indeter] <- "U" # comment this line not to include the Unclassified U WT + + WT$WT.num <- match(WT$WT,WTs.type)# aggiunge una colonna con la conversione dei 26 tipi di tempo a numeri da 1 a 26 + + # colonna con la lista dei 10 WTs ottenuti unendo a C e ad A gli otto WT direzionali puri; + # i WTs ibridi si aggiungono ognuno al suo tipo direzionale corrispondente (es: C.SW si aggiunge a SW, ecc.) + WT$WT10 <- WT$WT.dir + WT$WT10[cyc.pure] <- "C" + WT$WT10[anticyc.pure] <- "A" + WT$WT10.num <- match(WT$WT10,WTs.type) # conversione dei 10 tipi di tempo a numeri da 1 a 9 + il tipo A che si converte nel numero 18 + + # add the other geostrophical indexes: + WT$SF=round(SF,5) + WT$WF=round(WF,5) + WT$F=round(F,5) + WT$ZS=round(ZS,5) + WT$ZW=round(ZW,5) + WT$Z=round(Z,5) + + #WTs<-rbind(WTs,WT) + #assign(paste0("WT","_year_",y,"_lat_",latc,"_lon_",lonc), WT) + + write.table(WT,paste0(workdir,"/txt/ERAint/",y,"/WT_year_",y,"_lat_",latc,"_lon_",lonc,".txt")) + + rm(WT,SF,WF,F,D,ZS,ZW,W) + + gc() + + } # close for on lonc + } # close for on latc + #cat("\n") + + rm(MSLP.year) + + # save all classification of a single year and month in a single .RData file: + #output <- paste0(workdir,"/WTs_",mslp.rean.name,"_year_",y,".RData") + #save.image(file=output) + + gc() + + #} # close for on month + #gc() + +} # close for on y + + +# a <- read.table(output_list, stringsAsFactors=FALSE, header=TRUE) + + +## # merge all txt data of the same lat and lon in 1 file to have all the years together and delete the yearly files: +## if(merge){ +## if(!dir.exists(file.path(workdir,paste0("wt_txt")))) dir.create(file.path(workdir,paste0("wt_txt"))) + +## for(latc in lat.used){ +## #latc <- lat.used[1]; lonc <- lon.used[1]; # for the debug + +## for(lonc in lon.used){ +## pos.latc <- which(lat.used==latc) +## pos.lonc <- which(lon.used==lonc) +## cat(paste0("Merging classification at grid point: ",(pos.latc-1)*n.lon.used+pos.lonc ,"/", n.grid.points, " "), "\r") + +## for(y in year.start:year.end){ +## #print(paste0("Year: ",y)) + +## WTs <- read.table(file=paste0(workdir,"/",y,"_txt/WTs_",mslp.rean.name,"_year_",y,"_lat_",latc.char,"_lon_",lonc.char,".txt"),stringsAsFactors=FALSE, header=TRUE) # Load WTs +## if(y==year.start){ +## WTs_full_period <- WTs +## } else { +## WTs_full_period <- rbind(WTs_full_period, WTs) +## } + +## } + +## write.table(WTs_full_period, file=paste0(workdir,"/wt_txt/WTs_",mslp.rean.name,"_",year.start,"-",year.end,"_lat_",latc.char,"_lon_",lonc.char,".txt"),row.names=FALSE, col.names=TRUE, quote=FALSE, sep=" ") +## rm(WTs_full_period) + +## } +## } + +## # delete the old yearly files in the _txt directories and the directories: +## for(y in year.start:year.end){ +## system(paste0("rm -fr ",workdir,"/",y,"_txt/")) +## } + +## } # close if on merge + + diff --git a/old/WT_v7.R~ b/old/WT_v7.R~ new file mode 100644 index 0000000000000000000000000000000000000000..a818296feaf423ad5dd19c47fc3e93f0c57aac60 --- /dev/null +++ b/old/WT_v7.R~ @@ -0,0 +1,358 @@ +######################################################################################### +# Lamb's WTs with ERA-Interim # +######################################################################################### +# run it from the bash with: +# +# Rscript WT_v5.R 1984 +# +# being 1984 the year you want to classify the WTs; +# in this way you can run up to 8 jobs at the same time, each one producing its output files! +# You can also run it for a sequence of years with the syntax: +# +# Rscript WT_v5.R 1980 2014 +# +# and it will compute each year from 1980 to 2014, each one after finishing the previous one. +# +# If you want to run many years in parallel with just only 1 command, run from the bash: +# +# for y in {1980..2014}; do Rscript WT_v5.R &; done +# +# but it'd need 24 processors! In practice, it is possible to run only 4-8 years at time. +# + +library(s2dverification) # for the function Load() +# library(ff) +source('/home/Earth/ncortesi/scripts/Rfunctions.R') # for the calendar functions +#Load.path <- '/home/Earth/ncortesi/Downloads/scripts/IC3.conf' # Load() file path +# Available reanalysis: +ERAint <- list(path = '/esnas/recon/ecmwf/erainterim/$STORE_FREQ$_mean/$VAR_NAME$_f6h/$VAR_NAME$_$YEAR$$MONTH$.nc') +JRA55 <- list(path = '/esnas/recon/jma/jra-55/$STORE_FREQ$_mean/$VAR_NAME$_f6h/$VAR_NAME$_$YEAR$$MONTH$.nc') + +workdir="/scratch/Earth/ncortesi/RESILIENCE/WT" # working dir where to put the output maps and files +#subdatadir="/scratch/Earth/ncortesi/RESILIENCE/WT_subdata" # dir where to put the intermediate daily psl data (1 file for each subarray) + +mslp.rean=ERAint #'ERAintDailyHighRes' #'ERAintDailyLowRes' # choose one of the daily reanalysis above for loading MSLP data +mslp.rean.name <- 'ERAint' + +year.start=1985 # starting year of the MSLP daily data (from the 1st of january) +year.end=2014 # ending year of the MSLP daily data (up to the 31 of December) + +res=4.9 #5.25 # spacing between the Lamb grid points in the meridional direction (in the zonal direction, it is exactly the double of this value) + # it should be put equal to the multiple of 'psl.res' closer to 5 degrees, i.e: res = psl.res * round((5/psl.res)) + +low.res.size=7 # odd numb. of MSLP nearby grid points to compute the psl at each of the 16 points (both for lon and lat: the actual n.of points used is its square) + +#partial.end=FALSE # put TRUE if the last year ('year.end') has not all the yearly data but stop before December the 31th; in this case, must also specify the variable below +#n.days.last=334 # number of days available in the last year (used only if partial.end=TRUE) I.e: data for 2015 doesn't have December, so it has 365-31=334 days + +merge=FALSE # put TRUE if you want to concatenate all the WT classification for different years and same grid point at the end of the analysis, FALSE otherwise + +########################################################################################## + +args <- commandArgs(TRUE) + +if(length(args) == 1) year.start <- year.end <- as.integer(args[1]) +if(length(args) == 2) {year.start <- as.integer(args[1]); year.end <- as.integer(args[2])} + +WTs.type<-c("NE","E","SE","S","SW","W","NW","N","C","C.NE","C.E","C.SE","C.S","C.SW","C.W","C.NW","C.N","A","A.NE","A.E","A.SE","A.S","A.SW","A.W","A.NW","A.N") +radius <- (low.res.size-1)/2 # number of grid points to use as search radius for averaging the psl at each Lamb grid point + +# Load 1 day of MSLP data from the reanalysis chosen, just to detect the number of latitude and longitude points: +#MSLP <- Load('psl', NULL, mslp.rean, paste0(year.start,'0101'), storefreq = 'daily', leadtimemax = 1, output = 'lonlat', configfile = Load.path) +MSLP <- Load(var = 'psl', exp = NULL, obs = list(mslp.rean), paste0(year.start,'0101'), storefreq = 'daily', leadtimemax = 1, output = 'lonlat', nprocs=1) + +n.lat <- length(MSLP$lat) # number of latitude values +n.lon <- length(MSLP$lon) # number of longitude values + +lon.pos <- ifelse(min(MSLP$lon>=0), TRUE, FALSE) # set lon.pos to TRUE if the longitude values of the reanalysis grid have no negative values, FALSE if not. + +# for each grid point of the reanalysis, compute the Lamb WT classification with Lamb grid centered on that point. +# we exclude only central points > +77 degrees lat and < -77 deg. because the Lamb grid needs 12.5 deg. north/south of the central point. +n.lat.unused.poles <- 17 #20 # number of unused latitude values near each of the two poles (must exclude last 10 degrees N/S, so it depends on the spatial res. of the reanalysis) + +lat.used <- MSLP$lat[-c(1:n.lat.unused.poles,(n.lat-n.lat.unused.poles):n.lat)] # latitude values used as central points +psl.res <- diff(lat.used)[1] # psl grid resolution +lat.used <- round(lat.used,3) # round psl values to the third decimal to save them in a file with a short file name (precision: ~100 m) +n.lat.used <- length(lat.used) + +lon.used <- round(MSLP$lon,3) # longitude values used as central points rounded to the third decimal (precision: ~100 m) +n.lon.used <- length(lon.used) + +n.grid.points <- length(lat.used)*length(lon.used) + +# load all MSLP data one year at time: +for(y in year.start:year.end){ + print(paste0("Year: ",y)) + + #for(month in 1:12){ + # print(paste0("Month: ",month)) + + # month_2_digits <- ifelse(month<10, paste0("0",month), month) + n.days <- n.days.in.a.year(y) #n.days.in.a.month(month,y) + + # load data of year y: + MSLP.year <- Load(var = 'psl', exp = NULL, obs = list(mslp.rean), paste0(y,'01','01'), storefreq = 'daily', leadtimemax = n.days, output = 'lonlat', nprocs=1) + gc() + + # loop over the lat/lon of the central point of the Lamb classification: + for(latc in lat.used){ + #latc <- lat.used[1]; lonc <- lon.used[1]; # for the debug + + ## first, it computes the latitude coefficients: + SF1 <- 1/cos(latc*pi/180) + ZS1 <- 1/(2*cos(latc*pi/180)^2) + ZW1 <- sin(latc*pi/180)/sin((latc-res)*pi/180) + ZW2 <- sin(latc*pi/180)/sin((latc+res)*pi/180) + + for(lonc in lon.used){ + pos.latc <- which(lat.used==latc) + pos.lonc <- which(lon.used==lonc) + + n.point <- (pos.latc-1)*n.lon.used + pos.lonc # number of points already computed + #cat(paste0("Grid point saved: ", n.point ,"/", n.grid.points, " "), "\r") + + # assign the lat and lon values of the 16 grid points of the Lamb grid centered over the central point: + lat <- lon <- c() # lat and lon of the 16 points of Lamb's grid + lat[1] <- lat[2] <- latc + 2*res + lat[3] <- lat[4] <- lat[5] <- lat[6] <- latc + res + lat[7] <- lat[8] <- lat[9] <- lat[10] <- latc + lat[11] <- lat[12] <- lat[13] <- lat[14] <- latc - res + lat[15] <- lat[16] <- latc - 2*res + + lon[1] <- lon[4] <- lon[8] <- lon[12] <- lon[15] <- lonc - res + lon[3] <- lon[7] <- lon[11] <- lonc - 3*res + lon[2] <- lon[5] <- lon[9] <- lon[13] <- lon[16] <- lonc + res + lon[6] <- lon[10] <- lon[14] <- lonc + 3*res + + if(lat[1] > 90) stop("*** Lamb grid is employing points beyond 90 degrees ***") + + # longitude correction for reanalysis with positive-only longitudes (to stay always positive): + if(lon.pos){ + if(lonc - res < 0){ + lon[1] <- lon[4] <- lon[8] <- lon[12] <- lon[15] <- lonc - res + 360 + lon[3] <- lon[7] <- lon[11] <- lonc - 3*res + 360 + } + if(lonc - 3*res < 0 && lon - res >= 0){ + lon[3] <- lon[7] <- lon[11] <- lonc - 3*res + 360 + } + if(lonc + res >= 360){ + lon[2] <- lon[5] <- lon[9] <- lon[13] <- lon[16] <- lonc + res - 360 + lon[6] <- lon[10] <- lon[14] <- lonc + 3*res - 360 + } + if(lonc + 3*res >= 360 && lonc + res < 360){ + lon[6] <- lon[10] <- lon[14] <- lonc + 3*res - 360 + } + } + + # longitude correction for reanalysis with [-180, +180] longitude (to stay always in the same interval): + # MUST STILL UPDATE FOR CASE lonc + 15 >= 360 !!! + if(!lon.pos){ + if(lonc - res < -180){ + lon[1] <- lon[4] <- lon[8] <- lon[12] <- lon[15] <- lonc - res + 360 + lon[3] <- lon[7] <- lon[11] <- lonc - 3*res + 360 + } + if(lonc + res >= 180){ + lon[2] <- lon[5] <- lon[9] <- lon[13] <- lon[16] <- lonc + res - 360 + lon[6] <- lon[10] <- lon[14] <- lonc + 3*res - 360 + } + } + + # lat and lon position of the 16 Lamb grid points inside the MSLP reanalysis: + pos.lat <- pos.lon <- c() + for(p in 1:16){ + pos <- nearest(lat[p], lon[p], MSLP$lat, MSLP$lon) + pos.lat[p] <- pos[1] + pos.lon[p] <- pos[2] + } + + #WTs <- data.frame() + + # mean psl at each of the 16 grid points: + psl <- array(NA,c(n.days,16)) # for each day of the year, MSLP of the 16 points of Lamb's grid + #for(p in 1:16) psl[,p] <- MSLP.year$obs[1,1,1,,pos.lat[p],pos.lon[p]] + + for(p in 1:16) { + pos.lon.low.res <- (pos.lon[p]-radius):(pos.lon[p]+radius) + + if(head(pos.lon.low.res,1) <= 0){ + ss <- which(pos.lon.low.res <= 0) + pos.lon.low.res <- c(pos.lon.low.res[ss] + n.lon.used, pos.lon.low.res[-ss]) + } + if(tail(pos.lon.low.res,1) > n.lon.used){ + ss <- which(pos.lon.low.res > n.lon.used) + pos.lon.low.res <- c(pos.lon.low.res[-ss], pos.lon.low.res[ss]-n.lon.used) + } + + pos.lat.low.res <- (pos.lat[p]-radius):(pos.lat[p]+radius) # it never has to be corrected because we excluded the poles + + MSLP.low.res <- MSLP.year$obs[1,1,1,,pos.lat.low.res,pos.lon.low.res] + psl[,p] <- apply(MSLP.low.res, 1, mean) + } + + # compute geostrophical indexes (don't round them to the 3rd decimal number as done by Trigo and Martin-Vide, not to introduce an error that change 3-4% of WTs!!!): + SF<-WF<-F<-D<-D2<-c() + + WF <- 0.5*(psl[,12]+psl[,13]) - 0.5*(psl[,4]+psl[,5]) + SF <- SF1*(0.25 * (psl[,5]+2*psl[,9]+psl[,13]) - 0.25 * (psl[,4]+2*psl[,8]+psl[,12])) + F <- (SF*SF + WF*WF)^0.5 + + if(latc < 0){ + WF <- -WF # because in the southern hemisphere, cyclones and anticyclones spins in the opposite sense! + SF <- -SF + } + + # D e' la direzione del flusso; dato che l'angolo risultato di atan e' espresso in radianti, bisogna convertirlo in gradi moltiplicandolo per (180/pi) + # D e' l'angolo tra la direzione del vento e la sua proiezione in direzione nord-sud (SF) + D <- (180/pi) * atan(WF/SF) + + # trasforma l'angolo D iniziale in un angolo D2 sempre positivo che si calcola a partire del Nord andando in senso orario: + Q1 <- which(WF>0 & SF>0) + Q2 <- which(WF>0 & SF<=0) + Q3 <- which(WF<0 & SF<=0) # calcola quali angoli D appartengono al quadrante 1 (Q1), quali al quadrante 2, quali al 3 e quali al 4. + Q4 <- which(WF<0 & SF>0) # il primo quadrante e' in alto a destra, il secondo in basso a destra, il terzo in basso a sinistra, il quarto in alto a sinistra. + + # the wind direction is usually different from the angle D where the wind is blowing to, so a correction has to be made: + D2 <- D + D2[Q1] <- D[Q1] + 180 + D2[Q2] <- D[Q2] + 360 + D2[Q4] <- D[Q4] + 180 + + # individua i WTs direzionali: + NE <- which(D2>=22.5 & D2<67.5) + E <- which(D2>=67.5 & D2<112.5) + SE <- which(D2>=112.5 & D2<157.5) + S <- which(D2>=157.5 & D2<202.5) + SW <- which(D2>=202.5 & D2<247.5) + W <- which(D2>=247.5 & D2<292.5) + NW <- which(D2>=292.5 & D2<337.5) + N <- which(D2>=337.5 | D2<22.5) + + # aggiunge una colonna a destra con il WT direzionale associato all'angolo: + D2.dir<-rep(NA,length(D2)) + D2.dir[NE]<-"NE";D2.dir[E]<-"E";D2.dir[SE]<-"SE";D2.dir[S]<-"S";D2.dir[SW]<-"SW";D2.dir[W]<-"W";D2.dir[NW]<-"NW";D2.dir[N]<-"N" + + # calcolo componenti vorticita': + ZS<-ZW<-Z<-c() + + ZS <- ZS1*(0.25 * (psl[,6]+2*psl[,10]+psl[,14]) - 0.25 * (psl[,5]+2*psl[,9]+psl[,13]) - 0.25 * (psl[,4]+2*psl[,8]+psl[,12]) + 0.25 * (psl[,3]+2*psl[,7]+psl[,11])) + ZW <- ZW1 * (0.5*(psl[,15]+psl[,16]) - 0.5*(psl[,8]+psl[,9])) - ZW2 * (0.5*(psl[,8]+psl[,9]) - 0.5*(psl[,1]+psl[,2])) + Z <- ZS + ZW + + # Pure and hybrid WTs + cyc.pure <- which(Z>2*F) + anticyc.pure <- which(Z<(-2*F)) + hybrid.cyc <- which(Z>0 & abs(Z)<2*F & abs(Z)>F) + hybrid.anticyc <- which(Z<0 & abs(Z)<2*F & abs(Z)>F) + indeter <- which(F<6 & abs(Z)<6) # tipo di tempo indeterminato (U); the choice of 6 depend on grid size and should be changed if grid res.is higher! + + # create a data frame with all the info about the WT classification: + + #years.period <- months.period <- days.period <- c() + #for(y in year.start:year.end) years.period <- c(years.period, rep(y, n.days.in.a.year(y))) + #for(y in year.start:year.end) months.period <- c(months.period, seq.months.in.a.year(y)) + #for(y in year.start:year.end) days.period <- c(days.period, seq.days.in.a.year(y)) + years.period <- rep(y, n.days) + months.period <- seq.months.in.a.year(y)[1:n.days] + days.period <- seq.days.in.a.year(y)[1:n.days] + WT<-data.frame(Year=years.period, Month=months.period, Day=days.period, Direction=round(D2,3), WT.dir=D2.dir, stringsAsFactors=FALSE) + + WT$Year<-as.numeric(WT$Year);WT$Month<-as.numeric(WT$Month);WT$Day<-as.numeric(WT$Day);WT$Direction<-as.numeric(WT$Direction);WT$WT.dir<-as.character(WT$WT.dir) + + # colonna che identifica quali giorni sono di tipo ibrido e quali di tipo direzionale: + WT$Hyb <- rep("Directional",length(D2)) + WT$Hyb[cyc.pure] <- "Pure_C" + WT$Hyb[anticyc.pure] <- "Pure_A" + WT$Hyb[hybrid.cyc] <- "Hybrid_C" + WT$Hyb[hybrid.anticyc] <- "Hybrid_A" + + # colonna con la lista dei 26 WTs officiali: + WT$WT <- WT$D2.dir + WT$WT[cyc.pure] <- "C" + WT$WT[anticyc.pure] <- "A" + WT$WT[hybrid.cyc] <- paste("C.",WT$WT.dir[hybrid.cyc],sep="") + WT$WT[hybrid.anticyc] <- paste("A.", WT$WT.dir[hybrid.anticyc],sep="") + #WT$WT[indeter] <- "U" # comment this line not to include the Unclassified U WT + + WT$WT.num <- match(WT$WT,WTs.type)# aggiunge una colonna con la conversione dei 26 tipi di tempo a numeri da 1 a 26 + + # colonna con la lista dei 10 WTs ottenuti unendo a C e ad A gli otto WT direzionali puri; + # i WTs ibridi si aggiungono ognuno al suo tipo direzionale corrispondente (es: C.SW si aggiunge a SW, ecc.) + WT$WT10 <- WT$WT.dir + WT$WT10[cyc.pure] <- "C" + WT$WT10[anticyc.pure] <- "A" + WT$WT10.num <- match(WT$WT10,WTs.type) # conversione dei 10 tipi di tempo a numeri da 1 a 9 + il tipo A che si converte nel numero 18 + + # add the other geostrophical indexes: + WT$SF=round(SF,5) + WT$WF=round(WF,5) + WT$F=round(F,5) + WT$ZS=round(ZS,5) + WT$ZW=round(ZW,5) + WT$Z=round(Z,5) + + #WTs<-rbind(WTs,WT) + assign(paste0("WT","_year_",y,"_lat_",latc,"_lon_",lonc), WT) + + rm(WT,SF,WF,F,D,ZS,ZW,W) + + gc() + + } # close for on lonc + } # close for on latc + #cat("\n") + + rm(MSLP.year) + + # save all classification of a single year and month in a single .RData file: + output <- paste0(workdir,"/WTs_",mslp.rean.name,"_year_",y,".RData") + save.image(file=output) + + gc() + + #} # close for on month + #gc() + +} # close for on y + + +# a <- read.table(output_list, stringsAsFactors=FALSE, header=TRUE) + + +## # merge all txt data of the same lat and lon in 1 file to have all the years together and delete the yearly files: +## if(merge){ +## if(!dir.exists(file.path(workdir,paste0("wt_txt")))) dir.create(file.path(workdir,paste0("wt_txt"))) + +## for(latc in lat.used){ +## #latc <- lat.used[1]; lonc <- lon.used[1]; # for the debug + +## for(lonc in lon.used){ +## pos.latc <- which(lat.used==latc) +## pos.lonc <- which(lon.used==lonc) +## cat(paste0("Merging classification at grid point: ",(pos.latc-1)*n.lon.used+pos.lonc ,"/", n.grid.points, " "), "\r") + +## for(y in year.start:year.end){ +## #print(paste0("Year: ",y)) + +## WTs <- read.table(file=paste0(workdir,"/",y,"_txt/WTs_",mslp.rean.name,"_year_",y,"_lat_",latc.char,"_lon_",lonc.char,".txt"),stringsAsFactors=FALSE, header=TRUE) # Load WTs +## if(y==year.start){ +## WTs_full_period <- WTs +## } else { +## WTs_full_period <- rbind(WTs_full_period, WTs) +## } + +## } + +## write.table(WTs_full_period, file=paste0(workdir,"/wt_txt/WTs_",mslp.rean.name,"_",year.start,"-",year.end,"_lat_",latc.char,"_lon_",lonc.char,".txt"),row.names=FALSE, col.names=TRUE, quote=FALSE, sep=" ") +## rm(WTs_full_period) + +## } +## } + +## # delete the old yearly files in the _txt directories and the directories: +## for(y in year.start:year.end){ +## system(paste0("rm -fr ",workdir,"/",y,"_txt/")) +## } + +## } # close if on merge + + diff --git a/old/backup/CMIP5_MultiModel_4Torres.R b/old/backup/CMIP5_MultiModel_4Torres.R new file mode 100644 index 0000000000000000000000000000000000000000..15db988a0a6d66374198a6136f283bab71cd75eb --- /dev/null +++ b/old/backup/CMIP5_MultiModel_4Torres.R @@ -0,0 +1,2552 @@ + +# Creation: 6/2016 +# Author: Nicola Cortesi +# Aim: to generate a multimodel of CMIP5 simulation for a set of wine indices defined by the user, and estimate and visualize their projected climate change for two emission scenarios. +# I/O: input CMIP5 simulations. Output maps of measured and estimated wine indices. +# Assumption: none, the user can set all the parameters. +# Branch: multimodels + +rm(list=ls()) +#gc() +#source("~/.Rprofile") + +#save.image(file=paste0(outputdir,"/Bodega_base.RData")) + +rean=TRUE # put FALSE if you want to load reanalisis data from file +ensemble=TRUE # put true to load also the multimodel computation, if you already analyzed all the runs once +if(rean==FALSE && ensemble==FALSE) load("/home/Earth/ncortesi/Downloads/Bodegas_Torre/Bodega_base.RData") # store reanalisis data +if(rean==FALSE && ensemble==TRUE) load("/home/Earth/ncortesi/Downloads/Bodegas_Torre/models/Ensemble/Bodega_base_ensemble.RData") + +########### User-defined Variables ############################################ + +model.names=c("BCC-CSM1-1","BCC-CSM1-1-m","BNU-ESM","CCSM4","CNRM-CM5","CSIRO-Mk3-6-0","EC-EARTH","FIO-ESM","GFDL-CM3","GISS-E2-H","IPSL-CM5a-LR","IPSL-CM5A-MR","MIROC5","MPI-ESM-LR","MPI-ESM-MR") + +# number of runs for each model (warning: the run starting with r0 is not a run, but the land/sea mask, so the runs start with r1): +# each run must consist in 1 file for temp and 1 for prec starting with the year 1850 for the historical runs or 2006 for the proyections, and can end with any year. +# beware that if you change the size of these matrices, or the number of indices , also the code to calculate the ensemble must be changed! + +n.max.models=15 # maximum numb.of possible models (each one must have a subdir in the path_data dir) +n.max.runs=5 # maximum number of possible runs for each model (for both scenarios) # beware that if you change this value also the code to calculate the ensemble must be changed! + +# matrix with the number of the runs for each model used for the scenary 2.6: (runs eliminated: mod1 r1, mod3 r1, mod6 r2, mod8 r2) +n.runs.rcp26<-matrix(0,n.max.runs,n.max.models) +n.runs.rcp26[1,]=c(1,1,1,0,0,2,8, 2,1,1,1,1,1,1,1) # its first row stores the num.of the first run associated to each model: model #1 has run1, model #4 has no run associated, model #6 has run2 +n.runs.rcp26[2,]=c(0,0,0,0,0,0,12,0,0,0,2,0,2,2,0) # its second row stores the num.of the eventual second run associated to each model (i.e: model #11 has run2 (r2i1p1) associated) +n.runs.rcp26[3,]=c(0,0,0,0,0,0,0, 0,0,0,3,0,3,3,0) # its third row shows the number of the eventual third run associated to each model +n.runs.rcp26[4,]=c(0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0) # its fourth row shows the number of the eventual fourth run associated to each model +n.runs.rcp26[5,]=c(0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0) # its fifth row shows the number of the eventual fifth run associated to each model + +# matrix with the number of the runs for each model used for the scenary 8.5: (runs eliminated: mod3 r1) +n.runs.rcp85<-matrix(0,n.max.runs,n.max.models) +n.runs.rcp85[1,]=c(0,0,1,2,0,0,0,0,0,0,1,1,1,1,1) # its first row stores the num.of the first run associated to each model: model #1 has run1, model #4 has no run associated, model #6 has run2 +n.runs.rcp85[2,]=c(0,0,0,3,0,0,0,0,0,0,2,0,2,2,0) # its second row stores the num.of the eventual second run associated to each model (i.e: model #11 has run2 (r2i1p1) associated) +n.runs.rcp85[3,]=c(0,0,0,4,0,0,0,0,0,0,3,0,3,3,0) # its third row shows the number of the eventual third run associated to each model +n.runs.rcp85[4,]=c(0,0,0,5,0,0,0,0,0,0,0,0,0,0,0) +n.runs.rcp85[5,]=c(0,0,0,0,0,0,0,0,0,0,0,0,0,0,0) + +# Root directory with all input data from CMIP4 models (historical and the two scenaries RCP 8.5 and 2.6), one subfolder for each model: +# (subfolder name must be an increasing number for 1 to the total number of models) +path.data='/home/Earth/ncortesi/Downloads/Bodegas_Torre/models' + +# output dir for the reanalisis data (you must create them): +outputdir="/home/Earth/ncortesi/Downloads/Bodegas_Torre/reanalisis" + +# names of the variables to open to calculate the climate indices: +vars=c("tas","pr") # up to now there can be only exactly two values + +n.indices=6 # total number of indices to evaluate, actually 6: + +# 1. "Temperatura Media Anual" +# 2. "Temperatura media del ciclo vegetativo (Octubre a Abril)" +# 3. "Indice de Winkler para el periodo vegetativo (Octubre a Abril)" +# 4. "Winter Severity Index" +# 5. "Precipitacion Anual" +# 6. "Precipitacion durante el ciclo vegetativo (Octubre a Abril) + +# notice that both tas and pr historical data must begin in January and in the same year (the exact year is not important since it is detected looking at the file name) +first.year.hist=1985 # The first year of the historical data (both for renalaysis and models). It is really 1986, but we need to start from the october of the previous year to calculate some indices +last.year.hist=2005 # The last year of the historical data. + +# notice that both tas and pr data for scenary RCP 2.6 AND 8.5 must begin in January and in the same year (the exact year is not important since it is detected looking at the file name) + +first.year.rcp=2030 # The first year of the projections. Note that it is really 2031, but we need to start from the october of the previous year to calculate some indices) +last.year.rcp=2050 # The last year of the projections. Note that all years must obviously fall inside the time period of data files (1850-2005 fot the historical runs and 2006-2099 for the proyections) + +# Localizacion parcelas:: +parcelas.names=c("Coyhaique","Ñirehuao","La Tapera","Lago Verde") +parcelas.lat=c(-45.59,-45.27,-44.65,-44.24) +parcelas.lon=c(-72.06,-71.72,-71.67,-71.85) + +# Localizacion malla de parcelas: +grid.nlon=7 # number of points in the longitudinal sense +grid.nlat=15 # number of points in the latitudinal sense +grid.res.lon=0.703125 # resolution in grados in the long. sense +grid.res.lat=0.70175 # resolution in grados in the latitud. sense +grid.corner=c(-47.3683028,-73.82812) # lat/lon coordinates of the lower left corn of the grid of parcelas + +# Coordinates of the box of our Patagonia area: +pata.latmax=-43.5 +pata.latmin=-46 +pata.lonmin=285.5 +pata.lonmax=290 + +# Extended Coordinates of the box of our Patagonia area: (the area shown in the maps) +bigpata.latmax=pata.latmax+10 +bigpata.latmin=pata.latmin-10 +bigpata.lonmax=pata.lonmax+2 +bigpata.lonmin=pata.lonmin-2 + +dmax.idw=300 # maximum distance in km to calculate the correlations or the IDW + +################ Graphical variables ############################################################################ + +# Escala de colores (see http://vis.supstat.com/2013/04/plotting-symbols-and-color-palettes/) +my.palette1<-c("lightblue","white",brewer.pal(9, "YlOrRd")) #rev(brewer.pal(11, "RdYlBu")) # it has only 11 colors! +my.palette2<-c("lightblue","white",brewer.pal(9, "YlOrRd")) #rev(brewer.pal(11, "RdYlBu")) +my.palette3<-brewer.pal(9, "Greens") # only 9 colors +my.palette4<-rev(brewer.pal(9, "BuPu")) +my.palette5<-brewer.pal(9, "Blues") +my.palette6<-brewer.pal(9, "Blues") +my.palette<-list(my.palette1,my.palette2,my.palette3,my.palette4,my.palette5,my.palette6) + +# Intervalos: +my.brks1<-seq(-4,30,2) #c(-60,-40,-20,seq(-10,30,0.5),40) +my.brks2<-seq(-4,30,2) #c(-60,-40,-20,seq(-10,30,0.5),40) +my.brks3<-c(0,1,seq(200,3000,200),6000) +my.brks4<-c(-100,seq(0,10,0.5),40) +my.brks5<-c(seq(0,2000,10),4000,8000) +my.brks6<-c(seq(0,2000,10),4000,8000) +my.brks<-list(my.brks1,my.brks2,my.brks3,my.brks4,my.brks5,my.brks6) + +# Units of measure: +my.units<-c("Grados Celsius","Grados Celsius","Grados Celsius","Grados Celsius","mm","mm") + +# skip some labels not to show all labels in some plot: +my.subsampleg=c(1,1,1,1,5,5) + +# the same but for the leyend added with colorBar: +my.subsamplegbar=c(1,1,1,1,5,5) + +# show the line with 1200 degree days for the Winkler idex: +my.brks.dos<-list(NULL,NULL,1200,-15,NULL,NULL) + +# change the color of the line to separate areas where grape can grow and where it can't: +my.contours.col<-list(NULL,NULL,"black","red",NULL,NULL) + +# Escala de colores para los mapas de tendencias +my.palette1.trend<-brewer.pal(9,"YlOrBr") +my.palette2.trend<-brewer.pal(9,"YlOrBr") +my.palette3.trend<-brewer.pal(9,"Greens") # Winkler's Index +my.palette4.trend<-brewer.pal(9,"YlOrBr") # rev(brewer.pal(11,"RdBu")) +my.palette5.trend<-rev(brewer.pal(11,"RdBu")) +my.palette6.trend<-rev(brewer.pal(11,"RdBu")) +my.palette.trend<-list(my.palette1.trend,my.palette2.trend,my.palette3.trend,my.palette4.trend,my.palette5.trend,my.palette6.trend) + +# Intervalos para los mapas de tendencias: +my.brks1.trend<-seq(0,2,0.1) +my.brks2.trend<-seq(0,2,0.1) +my.brks3.trend<-c(seq(0,300,30),400,500) +my.brks4.trend<-seq(0,1.4,0.1) +my.brks5.trend<-seq(-200,200,40) +my.brks6.trend<-seq(-200,200,40) +my.brks.trend<-list(my.brks1.trend,my.brks2.trend,my.brks3.trend,my.brks4.trend,my.brks5.trend,my.brks6.trend) + +################ Derived variables ############################################################################ +#n.models.hist<-length(models.hist) # total number of historical models +#n.models.rcp26<-length(models.rcp26) # total number of RCP 2.6 models +#n.models.rcp85<-length(models.rcp85) # total number of RCP 8.5 models + +n.vars<-length(vars) # total number of model variables + +index.name<-c("Temperatura Media Anual", + "Temperatura Media (Octubre a Abril)", + "Indice de Winkler (Octubre a Abril)", + "Winter Severity Index", + "Precipitacion Anual", + "Precipitacion (Octubre a Abril)") + +index.name.short<-c("Temp.Media Anual", + "Temp.Media Oct-Abr", + "Indice de Winkler", + "Winter Sev.Index", + "Prec.Anual", + "Prec.Oct-Abr") + +index.name0<-c("Temperatura_Media_Anual", + "Temperatura_Media_Vegetativa", + "Indice_de_Winkler", + "Winter_Severity_Index", + "Precipitacion_Anual", + "Precipitacion_Vegetativa") + +index.name.very.short<-c("Temp_med","Temp_veget","Winkler","W_Severity","Prec_anual","Prec_veget") + +n.parcelas<-length(parcelas.names) # number of parcelas + + + +n.months.hist<-((last.year.hist-first.year.hist+1)*12) # number of historical months to extract starting from the first year +n.years.hist=n.months.hist/12 + +n.months.rcp<-((last.year.rcp-first.year.rcp+1)*12) # number of future months to extract starting from the first year for both rcp26 and rcp85 (usually it is the same as n.months.hist) +n.years.rcp=n.months.rcp/12 + +n.years.trans<-first.year.rcp-last.year.hist-1 #24 # number of years in the transition period from year following the last year of the historical period to the year before the first year of the rcp period +n.years.tot <- n.years.hist + n.years.trans + n.years.rcp # total number of years + +# grid de parcelas: +n.grid.points<-grid.nlon*grid.nlat +grid.lat.list<-rev(seq(grid.corner[1],grid.corner[1]+(grid.nlat-1)*grid.res.lat,by=grid.res.lat)) # ex: -44,-45,-46,-47 +grid.lon.list<-seq(grid.corner[2],grid.corner[2]+(grid.nlon-1)*grid.res.lon,by=grid.res.lon) # ex: -73,-72,-71 +grid.lat<-rep(grid.lat.list,grid.nlon) # ex: -44,-45,-46,-47, -44,-45,-46,-47, -44,-45,-46,-47 +grid.lon<-rep(grid.lon.list,each=grid.nlat) # ex: -73,-73,-73,-73,-72,-72,-72,-72,-71,-71,-71,-71 + +########### Load the observed data of temp and prec from Reanalysis and calculate and save all indices and maps ######## +if(rean==TRUE){ + +var1<-Load(var='tas',exp=NULL,obs = 'ERAint' ,sdates=paste0(first.year.hist,"0101"), storefreq = "monthly", sampleperiod = 1,output = "lonlat", method = "conservative")#, grid='r512x256') +var2<-Load(var='prlr',exp=NULL,obs = 'ERAint' ,sdates=paste0(first.year.hist,"0101"), storefreq = "monthly", sampleperiod = 1, output = "lonlat", method = "conservative")#, grid='r512x256') + +temp.rean<-var1$obs[1,1,1,1:n.months.hist,,] # var1 format: 1,1,1,month,lat,lon; temperature format: month,lat,lon +prec.rean<-var2$obs[1,1,1,1:n.months.hist,,] +#temp.rea2<-apply(temp.rea,2,rev) + +#lat.rea<-sort(var1$lat) +#lon.rea<-sort(var1$lon) +lat.rean<-var1$lat # ojo que la latitud esta desde valores mas altos a mas bajos, pero tambien si no la inviertes, la funcion PlotEquiMap dibuja bien la latitud. +lon.rean<-var1$lon +n.lat.rean<-length(lat.rean) +n.lon.rean<-length(lon.rean) + +temp.rean<-temp.rean-273.15 +prec.rean<-prec.rean*60*60*24*1000 # convert prec.from m/s to mm for day; must still convert from mm/day to mm/month: + +month31=c(1,3,5,7,8,10,12) # meses con 31 dias +my.months31<-rep(month31,n.years.hist)+rep(0:(n.years.hist-1)*12,each=length(month31)) # select only months with 31 days +prec.rean[my.months31,,]<-prec.rean[my.months31,,]*31 # multiply prec of these months for 31 + +month30=c(4,6,9,11) # meses con 30 dias +my.months30<-rep(month30,n.years.hist)+rep(0:(n.years.hist-1)*12,each=length(month30)) # select only months with 30 days +prec.rean[my.months30,,]<-prec.rean[my.months30,,]*30 # multiply prec of these months for 30 + +month28=2 # meses con 28 dias +my.months28<-rep(month28,n.years.hist)+rep(0:(n.years.hist-1)*12,each=length(month28)) # select only months with 28 days +prec.rean[my.months28,,]<-prec.rean[my.months28,,]*28.25 # multiply prec of these months for 28.25 + +rm(var1,var2) + +# Calculate the climate index for the reanlaysis and for each run and save results: +index.rean<-indices(temp.rean,prec.rean,n.months.hist) # For Reanalysis +save(index.rean,file=paste0(outputdir,"/indices.RData")) + +# Save global maps with indices:: +for(my.index in 1:n.indices){ + png(filename=paste0(outputdir,"/Worldmap_indice_",my.index,"_",index.name0[my.index],".png"),width=800,height=480) + layout(matrix(c(1,1,1,1,1,1,1,1,2), 9, 1, byrow = TRUE)) + #layout.show(2) + my.toptitle<-paste(index.name[my.index],"(ERAI)") + PlotEquiMap2(index.rean[[my.index]],lon.rean,lat.rean,toptitle=my.toptitle,sizetit = 0.6, + brks=my.brks[[my.index]],cols=colorRampPalette(my.palette[[my.index]])(length(my.brks[[my.index]])-1), + filled.continents=F,units=my.units[my.index],subsampleg=my.subsampleg[my.index], + contours=list(NULL,NULL,t(index.rean[[my.index]]),t(index.rean[[my.index]]),NULL,NULL)[[my.index]],brks2=my.brks.dos[[my.index]],contours.col=my.contours.col[[my.index]],drawleg=FALSE) + ColorBar(my.brks[[my.index]],cols=colorRampPalette(my.palette[[my.index]])(length(my.brks[[my.index]])-1),vert=F,cex=1) + dev.off() +} + +# the same charts as above, but only for Patagonia and plotting the location of the four parcelas: +my.lat.pos<-which(lat.reanpata.latmin) +my.lon.pos<-which(lon.reanpata.lonmin) +my.lat<-lat.rean[my.lat.pos] # select only Patagonia pixels +my.lon<-lon.rean[my.lon.pos] + +for(my.index in 1:n.indices){ + png(filename=paste0(outputdir,"/Patagonia_indice_",my.index,"_",index.name0[my.index],".png"),width=600,height=480) + layout(matrix(c(1,1,1,1,1,1,2), 1, 7, byrow = TRUE)) #layout.show(2) to see the window distribution + par(oma=c(1,0,0,0)) # add 1 cm of white space to the bottom border + my.toptitle<-paste(index.name[my.index],"(ERAI)") + PlotEquiMap2(index.rean[[my.index]][my.lat.pos,my.lon.pos],my.lon,my.lat,toptitle=my.toptitle,sizetit = 0.6, + brks=my.brks[[my.index]],cols=colorRampPalette(my.palette[[my.index]])(length(my.brks[[my.index]])-1), + filled.continents=F,units=my.units[my.index],subsampleg=my.subsampleg[my.index], + contours=list(NULL,NULL,t(index.rean[[my.index]][my.lat.pos,my.lon.pos]),t(index.rean[[my.index]][my.lat.pos,my.lon.pos]),NULL,NULL)[[my.index]], + brks2=my.brks.dos[[my.index]],contours.col=my.contours.col[[my.index]],intylat=1,intxlon=1,drawleg=F) + + my.parcelas<-data.frame(name=parcelas.names,lat=parcelas.lat,long=parcelas.lon,pop=0,capital=0,stringsAsFactors=F) + map.cities(my.parcelas,pch=3,cex=1,col=c("purple","blue","red","black")) # add the 4 points of the 4 parcelas + ColorBar(my.brks[[my.index]],cols=colorRampPalette(my.palette[[my.index]])(length(my.brks[[my.index]])-1),vert=T,cex=1,subsampleg=my.subsamplegbar[[my.index]]) + + dev.off() +} + +# the same charts as above, but only for the Big Patagonia and plotting the location of the four parcelas: +my.lat.pos<-which(lat.reanbigpata.latmin) +my.lon.pos<-which(lon.reanbigpata.lonmin) +my.lat<-lat.rean[my.lat.pos] # select only Patagonia pixels +my.lon<-lon.rean[my.lon.pos] + +for(my.index in 1:n.indices){ + png(filename=paste0(outputdir,"/BigPatagonia_indice_",my.index,"_",index.name0[my.index],".png"),width=500,height=1500) + layout(matrix(c(1,1,1,1,2), 1, 5, byrow = TRUE)) + #layout.show(2) + par(oma=c(1,0,0,0.5)) # add 1 cm of white space to the bottom border + my.toptitle<-paste(index.name.short[my.index],"(ERAI)") + PlotEquiMap2(index.rean[[my.index]][my.lat.pos,my.lon.pos],my.lon,my.lat,toptitle=my.toptitle,sizetit = 1, + brks=my.brks[[my.index]],cols=colorRampPalette(my.palette[[my.index]])(length(my.brks[[my.index]])-1), + filled.continents=F,units=my.units[my.index],subsampleg=my.subsampleg[my.index], + contours=list(NULL,NULL,t(index.rean[[my.index]][my.lat.pos,my.lon.pos]),t(index.rean[[my.index]][my.lat.pos,my.lon.pos]),NULL,NULL)[[my.index]], + brks2=my.brks.dos[[my.index]],contours.col=my.contours.col[[my.index]],intylat=1,intxlon=1,drawleg=F) + + my.parcelas<-data.frame(name=parcelas.names,lat=parcelas.lat,long=parcelas.lon,pop=0,capital=0,stringsAsFactors=F) + map.cities(my.parcelas,pch=3,cex=3,col=c("purple","blue","red","black"),label=FALSE) # add the 4 points of the 4 parcelas + ColorBar(my.brks[[my.index]],cols=colorRampPalette(my.palette[[my.index]])(length(my.brks[[my.index]])-1),vert=T,cex=1.5,subsampleg=my.subsamplegbar[[my.index]]) + + dev.off() +} + + +# assign each parcela to a grid point of the reanalysis: +p.pos.rean<-list() +for (p in 1:n.parcelas) p.pos.rean[[p]]=nearest(parcelas.lat[p],parcelas.lon[p],lat.rean,lon.rean) + +# save the index values for each of the parcelas: +parcelas.indices.rean<-array(NA,c(n.parcelas,n.indices)) +for (p in 1:n.parcelas) { for (i in 1:n.indices) { parcelas.indices.rean[p,i]<-index.rean[[i]][p.pos.rean[[p]][1],p.pos.rean[[p]][2]] } } +save(parcelas.indices.rean,file=paste0(outputdir,"/indices_parcelas.RData")) + +# save the yearly values of the indexes for each of the parcelas: +index.rean.year<-array(NA,c(n.indices,n.years.hist,n.parcelas),dimnames=list(paste0("Indice",1:n.indices),first.year.hist:last.year.hist,paste0("Parcela",1:n.parcelas))) + +for (p in 1:n.parcelas) { + for (year in 1:(n.years.hist)) { + begin.month = 1 + 12*(year-1) + my.months<-begin.month:n.months.hist + + # create a 3D array with 2 dummy variables corresponding to lat and lon to be able to use the indexX functions that need an array: + tt.rean<-array(temp.rean[my.months,p.pos.rean[[p]][1],p.pos.rean[[p]][2]],c(length(my.months),1,1)) + pp.rean<-array(prec.rean[my.months,p.pos.rean[[p]][1],p.pos.rean[[p]][2]],c(length(my.months),1,1)) + + index.rean.year[1,year,p]<-index1(tt.rean,12) # starting from 1985, select only one year of data at time + if (year < n.years.hist) index.rean.year[2,year,p]<-index2(tt.rean,24) # starting from 1985, select 2 years of data at time + if (year < n.years.hist) index.rean.year[3,year,p]<-index3(tt.rean,24) # starting from 1985, select 2 years of data at time + index.rean.year[4,year,p]<-index4(tt.rean,12) # starting from 1985, select only one year of data at time + index.rean.year[5,year,p]<-index5(pp.rean,12) # starting from 1985, select only one year of data at time + if (year < n.years.hist) index.rean.year[6,year,p]<-index6(pp.rean,24) # starting from 1985, select 2 years of data at time + } +} + +#plot(1985:2005,index.rean.year[1,,1],type="o") +save(index.rean.year,file=paste0(outputdir,"/indices_parcelas_anuales.RData")) + +# same as above, but for the grid of parcelas: +grid.pos.rean<-list() +for (p in 1:n.grid.points) grid.pos.rean[[p]]=nearest(grid.lat[p],grid.lon[p],lat.rean,lon.rean) + +# save the index values for each of the grid points: +grid.indices.rean<-array(NA,c(n.grid.points,n.indices)) +for (p in 1:n.grid.points) { for (i in 1:n.indices) { grid.indices.rean[p,i]<-index.rean[[i]][grid.pos.rean[[p]][1],grid.pos.rean[[p]][2]] } } +save(grid.indices.rean,file=paste0(outputdir,"/indices_grid_parcelas.RData")) + +#save.image("/home/ncortesi/Downloads/Bodegas_Torre/Bodega_base.RData") + +} # close if on rean + + + +########################################################################################################################################################## +############### Load, calculate indices, interpolate, visualize and save all the runs for the historical period and for the two scenarios ################ +########################################################################################################################################################## + +print("Loading model data...") +#mod=1 # number of the model to load in the path.data dir +#run=1 # number of the run to load in the path.data/mod dir +rcp26<-rcp85<-FALSE +for(my.mod in 1:n.max.models){ + for(my.run in 1:n.max.runs){ + mod=my.mod + if(n.runs.rcp26[my.run,my.mod]!=0 || n.runs.rcp85[my.run,my.mod]!=0){ + if(n.runs.rcp26[my.run,my.mod]!=0 && n.runs.rcp85[my.run,my.mod]==0){ + run=n.runs.rcp26[my.run,my.mod] + rcp26=TRUE # put true if the selected model and run has all the data for scenary RCP 2.6 + rcp85=FALSE # put true if the selected model and run has all the data for scenary RCP 8.5 + } + if(n.runs.rcp26[my.run,my.mod]==0 && n.runs.rcp85[my.run,my.mod]!=0){ + run=n.runs.rcp85[my.run,my.mod] + rcp26=FALSE # put true if the selected model and run has all the data for scenary RCP 2.6 + rcp85=TRUE # put true if the selected model and run has all the data for scenary RCP 8.5 + } + if(n.runs.rcp26[my.run,my.mod]!=0 && n.runs.rcp85[my.run,my.mod]!=0){ + run=n.runs.rcp26[my.run,my.mod] # in this case it is indifferent which of two two run number we select because they are the same, so we select that for rcp 2.6 + rcp26=TRUE # put true if the selected model and run has all the data for scenary RCP 2.6 + rcp85=TRUE # put true if the selected model and run has all the data for scenary RCP 8.5 + } + +#print(my.mod);print(my.run);print(n.runs.rcp26[my.run,my.mod]);print(n.runs.rcp85[my.run,my.mod]);print(rcp26);print(rcp85) # for the debug + +########### Load the historic data of temp and prec from each run of CMIP5 and calculate and save all indices and maps ######## + +print("~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~") +print(paste0(" Analyzing Model ",mod," Run ",run)) +print("~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~") + +# import the selected run of the selected model: + +#my.bar<-tkProgressBar(title = "Analyzing data...", label = "Please wait",min = 0, max = 100, initial = 0, width = 500) + + #setTkProgressBar(my.bar, run*100/n.runs.hist[mod], label="Please wait") + cat("Processing Historical...") + + my.dir.hist<-paste0(path.data,"/",mod,"/r",run,"/historical") + my.files<-list.files(path=my.dir.hist,pattern="\\.nc$") # select all files inside the input data directory + if(length(my.files)==0) print(paste0("There are no files in the input directory ",my.dir.hist)) + my.files.splitted<-strsplit(my.files,"_") # separate the suffix from the time range in the file name + first.year.hist.file <- as.integer(substr(my.files.splitted[[1]][2],1,4)) # The first year of data in ALL the historical files for tas and prec (usually 1850) + shift.hist<-12*(first.year.hist-first.year.hist.file) # number of months from the beginning of the data file (year 1850) to the beginning of the historical period used in the study (1985 in our case) + + # load the data for the first variable (i.e: tas): + for(f in 1:length(my.files.splitted)) if(my.files.splitted[[f]][1]==vars[1]) my.file.var1<-my.files[f] + + my.path1<-paste0(my.dir.hist,"/",my.file.var1) # the full path of the first file in the directory (there is only one) + my.ncdf1<-open.ncdf(my.path1) # open the netcdf file + var1<-get.var.ncdf(my.ncdf1,varid=vars[1]) # var1 format: [lon,lat,month] + + # load the data for the second variable (i.e: pr): + for(f in 1:length(my.files.splitted)) if(my.files.splitted[[f]][1]==vars[2]) my.file.var2<-my.files[f] + my.path2<-paste0(my.dir.hist,"/",my.file.var2) # the full path of the first file in the directory (there is only one) + my.ncdf2<-open.ncdf(my.path2) # open the netcdf file + var2<-get.var.ncdf(my.ncdf2,varid=vars[2]) # var1 format: [lon,lat,month] + + # Get lat and lon list: + lat.hist<-get.var.ncdf(my.ncdf1,varid="latitude") # <- get the latitude (it is the same both for temp and prec data and for all runs of the same model) + lon.hist<-get.var.ncdf(my.ncdf1,varid="longitude") # <- get the longitude + n.lat.hist<-length(lat.hist) + n.lon.hist<-length(lon.hist) + + close(my.ncdf1) + close(my.ncdf2) + + #array where to put all the data of the runs (format: [lon,lat,month]) + temp.hist<-prec.hist<-array(NA,c(n.lon.hist,n.lat.hist,n.months.hist)) + + # select only the months in the historical period selected because we know that the first year of loaded data starts always in the same year: + temp.hist[,,]<-var1[,,(1+shift.hist):(n.months.hist+shift.hist)] + prec.hist[,,]<-var2[,,(1+shift.hist):(n.months.hist+shift.hist)] + + rm(var1,var2) + + # check if the first dimension is the longitude or the latitude: + if(dim(temp.hist)[1]>dim(temp.hist)[2]) models.firstdim="lon" + if(dim(temp.hist)[1]pata.latmin) + my.lon.pos<-which(lon.histpata.lonmin) + if(length(my.lat.pos)<=1) my.lat.pos<-which(lat.histpata.latmin-2) # to plot at least 2 grid points for lat and 2 grid points for lon + if(length(my.lon.pos)<=1) my.lon.pos<-which(lon.histpata.lonmin-2) + my.lat<-lat.hist[my.lat.pos] # select only Patagonia pixels + my.lon<-lon.hist[my.lon.pos] + + for(my.index in 1:n.indices){ + #png(filename=paste0(outputdir,"/indices_historical/",models.hist[mod],"_run_",run,"_Patagonia_Indice_",my.index,"_",index.name0[my.index],".png"),width=600,height=480) + png(filename=paste0(path.data,"/",mod,"/r",run,"/historical/Patagonia_Indice_",my.index,"_",index.name0[my.index],".png"),width=600,height=480) + my.toptitle<-paste0(index.name[my.index]," (Historical Model ",mod," Run ",run,")") + + # to solve some bugs of PlotEquiMap: + if(models.firstdim=="lat") {plot.data<-index.hist[[my.index]][my.lon.pos,my.lat.pos] } else {plot.data<-index.hist[[my.index]][my.lat.pos,my.lon.pos]} + + layout(matrix(c(1,1,1,1,1,1,1,1,2), 9, 1, byrow = TRUE)) + #layout.show(2) + #par(oma=c(0,4,0,0)) #par(mfrow=c(2,1)) #x11(width=4,height=.8) + PlotEquiMap2(plot.data,my.lon,my.lat,toptitle=my.toptitle,sizetit = 0.6, + brks=my.brks[[my.index]],cols=colorRampPalette(my.palette[[my.index]])(length(my.brks[[my.index]])-1), + filled.continents=F,units=my.units[my.index],subsampleg=my.subsampleg[my.index],contours=NULL,intylat=1,intxlon=1,drawleg=F) + + my.parcelas<-data.frame(name=parcelas.names,lat=parcelas.lat,long=parcelas.lon,pop=0,capital=0,stringsAsFactors=F) + map.cities(my.parcelas,pch=3,cex=1,col=c("purple","blue","red","black")) # add the 4 points of the 4 parcelas + ColorBar(my.brks[[my.index]],cols=colorRampPalette(my.palette[[my.index]])(length(my.brks[[my.index]])-1),vert=F,cex=.8) + dev.off() + } + + # the same charts as above, but only for the Big Patagonia and plotting the location of the four parcelas: + my.lat.pos<-which(lat.hist < bigpata.latmax & lat.hist > bigpata.latmin) + my.lon.pos<-which(lon.hist < bigpata.lonmax & lon.hist > bigpata.lonmin) + if(length(my.lat.pos)<=1) my.lat.pos<-which(lat.histbigpata.latmin-2) # to plot at least 2 grid points for lat and 2 grid points for lon + if(length(my.lon.pos)<=1) my.lon.pos<-which(lon.histbigpata.lonmin-2) + my.lat<-lat.hist[my.lat.pos] # select only Patagonia pixels + my.lon<-lon.hist[my.lon.pos] + + for(my.index in 1:n.indices){ + #png(filename=paste0(outputdir,"/indices_historical/",models.hist[mod],"_run_",run,"_BigPatagonia_Indice_",my.index,"_",index.name0[my.index],".png"),width=500,height=1500) + png(filename=paste0(path.data,"/",mod,"/r",run,"/historical/BigPatagonia_Indice_",my.index,"_",index.name0[my.index],".png"),width=500,height=1500) + + my.toptitle<-paste0(index.name.short[my.index]," (Historical Model ",mod," Run ",run,")") + + # to solve some bugs of PlotEquiMap: + if(models.firstdim=="lat") {plot.data<-index.hist[[my.index]][my.lon.pos,my.lat.pos] } else {plot.data<-index.hist[[my.index]][my.lat.pos,my.lon.pos]} + + layout(matrix(c(1,1,1,1,1,2), 1, 6, byrow = TRUE)) + #layout.show(2) + par(oma=c(1,0,0,0.2)) + #par(mfrow=c(2,1)) + #x11(width=4,height=.8) + PlotEquiMap2(plot.data,my.lon,my.lat,toptitle=my.toptitle,sizetit = 0.6, + brks=my.brks[[my.index]],cols=colorRampPalette(my.palette[[my.index]])(length(my.brks[[my.index]])-1), + filled.continents=F,units=my.units[my.index],subsampleg=my.subsampleg[my.index],contours=NULL,intylat=1,intxlon=1,drawleg=F) + + my.parcelas<-data.frame(name=parcelas.names,lat=parcelas.lat,long=parcelas.lon,pop=0,capital=0,stringsAsFactors=F) + map.cities(my.parcelas,pch=3,cex=3,col=c("purple","blue","red","black"),label=FALSE) # add the 4 points of the 4 parcelas + ColorBar(my.brks[[my.index]],cols=colorRampPalette(my.palette[[my.index]])(length(my.brks[[my.index]])-1),vert=TRUE,cex=1.3,my.subsamplegbar[[my.index]]) + dev.off() + } + + # assign each parcela to a grid point of the historical model: + p.pos.hist<-list() + for (p in 1:n.parcelas) p.pos.hist[[p]]=nearest(parcelas.lat[p],parcelas.lon[p],lat.hist,lon.hist) + + # save the index values for each of the parcelas and the number of different grid points: + parcelas.indices.hist<-array(NA,c(n.parcelas,n.indices)) + for (p in 1:n.parcelas) { for (i in 1:n.indices) { parcelas.indices.hist[p,i]<-index.hist[[i]][p.pos.hist[[p]][1],p.pos.hist[[p]][2]] } } + #save(parcelas.indices.hist,file=paste0(outputdir,"/indices_historical/",models.hist[mod],"_run_",run,"_indices_parcelas.RData")) + save(parcelas.indices.hist,file=paste0(path.data,"/",mod,"/r",run,"/historical/Indices_parcelas.RData")) + + # save the yearly values of the indexes for each of the parcelas: + index.hist.year<-array(NA,c(n.indices,n.years.hist,n.parcelas),dimnames=list(paste0("Indice",1:n.indices),first.year.hist:last.year.hist,paste0("Parcela",1:n.parcelas))) + + for (p in 1:n.parcelas) { + for (year in 1:(n.years.hist)) { + begin.month = 1 + 12*(year-1) + my.months<-begin.month:n.months.hist + + # create a 3D array with 2 dummy variables corresponding to lat and lon to be able to use the indexX functions that need an array: + tt.hist<-array(temp.hist[my.months,p.pos.hist[[p]][1],p.pos.hist[[p]][2]],c(length(my.months),1,1)) + pp.hist<-array(prec.hist[my.months,p.pos.hist[[p]][1],p.pos.hist[[p]][2]],c(length(my.months),1,1)) + + index.hist.year[1,year,p]<-index1(tt.hist,12) # starting from 1985, select only one year of data at time + if (year < n.years.hist) index.hist.year[2,year,p]<-index2(tt.hist,24) # starting from 1985, select 2 years of data at time + if (year < n.years.hist) index.hist.year[3,year,p]<-index3(tt.hist,24) # starting from 1985, select 2 years of data at time + index.hist.year[4,year,p]<-index4(tt.hist,12) # starting from 1985, select only one year of data at time + index.hist.year[5,year,p]<-index5(pp.hist,12) # starting from 1985, select only one year of data at time + if (year < n.years.hist) index.hist.year[6,year,p]<-index6(pp.hist,24) # starting from 1985, select 2 years of data at time + } + } + + #plot(1985:2005,index.hist.year[1,,1],type="o") + + #save(index.hist.year,file=paste0(outputdir,"/indices_historical/",models.hist[mod],"_run_",run,"_indices_parcelas_anuales.RData")) + save(index.hist.year,file=paste0(path.data,"/",mod,"/r",run,"/historical/Indices_parcelas_anuales.RData")) + + # same as above, but for the grid of parcelas: + grid.pos.hist<-list() + for (p in 1:n.grid.points) grid.pos.hist[[p]]=nearest(grid.lat[p],grid.lon[p],lat.hist,lon.hist) + + # save the index values for each of the grid points: + grid.indices.hist<-array(NA,c(n.grid.points,n.indices)) + for (p in 1:n.grid.points) { for (i in 1:n.indices) { grid.indices.hist[p,i]<-index.hist[[i]][grid.pos.hist[[p]][1],grid.pos.hist[[p]][2]] } } + save(grid.indices.hist,file=paste0(path.data,"/",mod,"/r",run,"/historical/Indices_grid_parcelas.RData")) + + + # interpolate each indice on the same grid of the Reanalysis: + index.hist.array<-array(NA,c(n.indices,n.lat.hist,n.lon.hist)) + for(i in 1:n.indices) index.hist.array[i,,]<-index.hist[[i]] + + #if(exists("index.hist.interp")==TRUE) rm(index.hist.interp) + cat("done\n");cat("Interpolating...") + index.hist.interp<-bilinear(lat.rean,lon.rean,lat.hist,lon.hist,index.hist.array) # interpolation with the bilinear method + cat("done\n");cat("Plotting data...") + + # there is a bug that sometimes returns false values from the bilinear function if it returns a list; so we modified it to return a matrix instead, but we must convert it back to a list to plot it: + interp<-list(); for(l in 1:n.indices) interp[[l]]<-index.hist.interp[l,,] + rm(index.hist.interp); index.hist.interp=interp; rm(interp) + + save(index.hist.interp,file=paste0(path.data,"/",mod,"/r",run,"/historical/Indices_interp.RData")) + + # Save global interpolated maps of indices: + for(my.index in 1:n.indices){ + #png(filename=paste0(outputdir,"/indices_historical/",models.hist[mod],"_run_",run,"_interp_Worldmap_Indice_",my.index,"_",index.name0[my.index],".png"),width=800,height=480) + png(filename=paste0(path.data,"/",mod,"/r",run,"/historical/Interp_Worldmap_Indice_",my.index,"_",index.name0[my.index],".png"),width=800,height=480) + + layout(matrix(c(1,1,1,1,1,1,1,1,2), 9, 1, byrow = TRUE)) + #layout.show(2) + my.toptitle<-paste0(index.name[my.index]," (Historical Model ",mod," Run ",run," interpolated)") + if(models.firstdim=="lat") {contour.data<-index.hist.interp[[my.index]]} else {contour.data<-t(index.hist.interp[[my.index]])} + + PlotEquiMap2(index.hist.interp[[my.index]],lon.rean,lat.rean,toptitle=my.toptitle,sizetit = 0.6, + brks=my.brks[[my.index]],cols=colorRampPalette(my.palette[[my.index]])(length(my.brks[[my.index]])-1), + filled.continents=F,units=my.units[my.index],subsampleg=my.subsampleg[my.index], + contours=list(NULL,NULL,contour.data,NULL,NULL,NULL)[[my.index]],brks2=my.brks.dos[[my.index]],contours.col=my.contours.col[[my.index]],drawleg=F) + + ColorBar(my.brks[[my.index]],cols=colorRampPalette(my.palette[[my.index]])(length(my.brks[[my.index]])-1),vert=F,cex=.8) + dev.off() + } + + # the same charts as above, but only for interpolated Patagonia and plotting the location of the four parcelas: + my.lat.pos<-which(lat.reanpata.latmin) + my.lon.pos<-which(lon.reanpata.lonmin) + if(length(my.lat.pos)<=1) my.lat.pos<-which(lat.reanpata.latmin-2) # to plot at least 2 grid points for lat and 2 grid points for lon + if(length(my.lon.pos)<=1) my.lon.pos<-which(lon.reanpata.lonmin-2) + my.lat<-lat.rean[my.lat.pos] # select only Patagonia pixels + my.lon<-lon.rean[my.lon.pos] + + for(my.index in 1:n.indices){ + #png(filename=paste0(outputdir,"/indices_historical/",models.hist[mod],"_run_",run,"_interp_Patagonia_Indice_",my.index,"_",index.name0[my.index],".png"),width=600,height=480) + png(filename=paste0(path.data,"/",mod,"/r",run,"/historical/Interp_Patagonia_Indice_",my.index,"_",index.name0[my.index],".png"),width=600,height=480) + my.toptitle<-paste0(index.name[my.index]," (Historical Model ",mod," Run ",run," interpolated)") + + # to solve some bugs of PlotEquiMap: + if(models.firstdim=="lat") {plot.data<-index.hist.interp[[my.index]][my.lon.pos,my.lat.pos] } else {plot.data<-index.hist.interp[[my.index]][my.lat.pos,my.lon.pos]} + + layout(matrix(c(1,1,1,1,1,1,1,1,2), 9, 1, byrow = TRUE)) + #layout.show(2) + #par(oma=c(0,4,0,0)) #par(mfrow=c(2,1)) #x11(width=4,height=.8) + PlotEquiMap2(plot.data,my.lon,my.lat,toptitle=my.toptitle,sizetit = 0.6, + brks=my.brks[[my.index]],cols=colorRampPalette(my.palette[[my.index]])(length(my.brks[[my.index]])-1), + filled.continents=F,units=my.units[my.index],subsampleg=my.subsampleg[my.index],contours=NULL,intylat=1,intxlon=1,drawleg=F) + + my.parcelas<-data.frame(name=parcelas.names,lat=parcelas.lat,long=parcelas.lon,pop=0,capital=0,stringsAsFactors=F) + map.cities(my.parcelas,pch=3,cex=1,col=c("purple","blue","red","black")) # add the 4 points of the 4 parcelas + ColorBar(my.brks[[my.index]],cols=colorRampPalette(my.palette[[my.index]])(length(my.brks[[my.index]])-1),vert=F,cex=.8) + dev.off() + } + + # the same charts as above, but only for the Big Patagonia and plotting the location of the four parcelas: + my.lat.pos<-which(lat.rean < bigpata.latmax & lat.rean > bigpata.latmin) + my.lon.pos<-which(lon.rean < bigpata.lonmax & lon.rean > bigpata.lonmin) + if(length(my.lat.pos)<=1) my.lat.pos<-which(lat.reanbigpata.latmin-2) # to plot at least 2 grid points for lat and 2 grid points for lon + if(length(my.lon.pos)<=1) my.lon.pos<-which(lon.reanbigpata.lonmin-2) + my.lat<-lat.rean[my.lat.pos] # select only Patagonia pixels + my.lon<-lon.rean[my.lon.pos] + + for(my.index in 1:n.indices){ + #png(filename=paste0(outputdir,"/indices_historical/",models.hist[mod],"_run_",run,"_interp_BigPatagonia_Indice_",my.index,"_",index.name0[my.index],".png"),width=500,height=1500) + png(filename=paste0(path.data,"/",mod,"/r",run,"/historical/Interp_BigPatagonia_Indice_",my.index,"_",index.name0[my.index],".png"),width=500,height=1500) + my.toptitle<-paste0(index.name.short[my.index]," (Historical Model ",mod," Run ",run," interpolated)") + + # to solve some bugs of PlotEquiMap: + if(models.firstdim=="lat") {plot.data<-index.hist.interp[[my.index]][my.lon.pos,my.lat.pos] } else {plot.data<-index.hist.interp[[my.index]][my.lat.pos,my.lon.pos]} + + layout(matrix(c(1,1,1,1,1,2), 1, 6, byrow = TRUE)) + #layout.show(2) + par(oma=c(1,0,0,0.5)) + #par(mfrow=c(2,1)) + #x11(width=4,height=.8) + PlotEquiMap2(plot.data,my.lon,my.lat,toptitle=my.toptitle,sizetit = 0.6, + brks=my.brks[[my.index]],cols=colorRampPalette(my.palette[[my.index]])(length(my.brks[[my.index]])-1), + filled.continents=F,units=my.units[my.index],subsampleg=my.subsampleg[my.index],contours=NULL,intylat=1,intxlon=1,drawleg=F) + + my.parcelas<-data.frame(name=parcelas.names,lat=parcelas.lat,long=parcelas.lon,pop=0,capital=0,stringsAsFactors=F) + map.cities(my.parcelas,pch=3,cex=4,col=c("purple","blue","red","black"),label=FALSE) # add the 4 points of the 4 parcelas + ColorBar(my.brks[[my.index]],cols=colorRampPalette(my.palette[[my.index]])(length(my.brks[[my.index]])-1),vert=TRUE,cex=1.3,my.subsamplegbar[[my.index]]) + dev.off() + } + + +#~~~Load the projections from CMIP5 2.6~~~ + +#my.bar<-tkProgressBar(title = "Analizing data...", label = "Please wait",min = 0, max = 100, initial = 0, width = 500) +if(rcp26==TRUE){ + print("done");cat("Processing scenary RCP 2.6...") + + #setTkProgressBar(my.bar, run*100/n.runs[mod], label="Please wait") + my.dir.rcp26<-paste0(path.data,"/",mod,"/r",run,"/2.6") + my.files<-list.files(path=my.dir.rcp26,pattern="\\.nc$") # select all files inside the input data directory + if(length(my.files)==0) print(paste0("There are no files in the input directory ",my.dir.rcp26)) + my.files.splitted<-strsplit(my.files,"_") # separate the suffix from the time range in the file name + first.year.rcp.file <- as.integer(substr(my.files.splitted[[1]][2],1,4)) # The first year of data in ALL the rcp 2.6 and 8.5 files for tas and prec (usually 2006) + shift.rcp<-12*(first.year.rcp-first.year.rcp.file) # number of months from the beginning of the data file (year 2006) to the beginning of the future period used in the study (2030 in our case) + + # load the data for the first variable (i.e: tas): + for(f in 1:length(my.files.splitted)) if(my.files.splitted[[f]][1]==vars[1]) my.file.var1<-my.files[f] + my.path1<-paste0(my.dir.rcp26,"/",my.file.var1) # the full path of the first file in the directory (there is only one) + my.ncdf1<-open.ncdf(my.path1) # open the netcdf file + var1<-get.var.ncdf(my.ncdf1,varid=vars[1]) # var1 format: [lon,lat,month] + + # load the data for the second variable (i.e: pr): + for(f in 1:length(my.files.splitted)) if(my.files.splitted[[f]][1]==vars[2]) my.file.var2<-my.files[f] + my.path2<-paste0(my.dir.rcp26,"/",my.file.var2) # the full path of the first file in the directory (there is only one) + my.ncdf2<-open.ncdf(my.path2) # open the netcdf file + var2<-get.var.ncdf(my.ncdf2,varid=vars[2]) # var1 format: [lon,lat,month] + + lat.rcp26<-get.var.ncdf(my.ncdf1,varid="latitude") # <- get the latitude (it is the same both for temp and prec data and for all runs of the same model) + lon.rcp26<-get.var.ncdf(my.ncdf1,varid="longitude") # <- get the longitude + n.lat.rcp26<-length(lat.rcp26) + n.lon.rcp26<-length(lon.rcp26) + + close(my.ncdf1) + close(my.ncdf2) + + #array where to put all the data of the runs during the study period (format: [lon,lat,month]) + temp.rcp26<-prec.rcp26<-array(NA,c(n.lon.rcp26,n.lat.rcp26,n.months.rcp)) + + temp.rcp26[,,]<-var1[,,(1+shift.rcp):(n.months.rcp+shift.rcp)] + prec.rcp26[,,]<-var2[,,(1+shift.rcp):(n.months.rcp+shift.rcp)] + + #array where to put all the data of the runs for the transition period starting from last day of historica data (2006) up to the beginning of the future period (2030) (format: [lon,lat,month]) + temp.trans26<-prec.trans26<-array(NA,c(n.lon.rcp26,n.lat.rcp26,shift.rcp)) + temp.trans26[,,]<-var1[,,1:shift.rcp] + prec.trans26[,,]<-var2[,,1:shift.rcp] + + rm(var1,var2) + + # check if the first dimension is the longitude or the latitude: + if(dim(temp.rcp26)[1]>dim(temp.rcp26)[2]) models.firstdim="lon" + if(dim(temp.rcp26)[1]pata.latmin) + my.lon.pos<-which(lon.rcp26pata.lonmin) + if(length(my.lat.pos)<=1) my.lat.pos<-which(lat.rcp26pata.latmin-2) # to plot at least 2 grid points for lat and 2 grid points for lon + if(length(my.lon.pos)<=1) my.lon.pos<-which(lon.rcp26pata.lonmin-2) + my.lat<-lat.rcp26[my.lat.pos] # select only Patagonia pixels + my.lon<-lon.rcp26[my.lon.pos] + + for(my.index in 1:n.indices){ + #png(filename=paste0(outputdir,"/indices_rcp26/",models.rcp26[mod],"_run_",run,"_Patagonia_Indice_",my.index,"_",index.name0[my.index],".png"),width=600,height=480) + png(filename=paste0(path.data,"/",mod,"/r",run,"/2.6/Patagonia_Indice_",my.index,"_",index.name0[my.index],".png"),width=600,height=480) + + my.toptitle<-paste0(index.name[my.index]," (RCP 2.6 Model ",mod," Run ",run,")") + + # to solve some bugs of PlotEquiMap: + if(models.firstdim=="lat") {plot.data<-index.rcp26[[my.index]][my.lon.pos,my.lat.pos] } else {plot.data<-index.rcp26[[my.index]][my.lat.pos,my.lon.pos]} + + layout(matrix(c(1,1,1,1,1,1,1,1,2), 9, 1, byrow = TRUE)) + #layout.show(2) + #par(oma=c(0,4,0,0)) #par(mfrow=c(2,1)) #x11(width=4,height=.8) + PlotEquiMap2(plot.data,my.lon,my.lat,toptitle=my.toptitle,sizetit = 0.6, + brks=my.brks[[my.index]],cols=colorRampPalette(my.palette[[my.index]])(length(my.brks[[my.index]])-1), + filled.continents=F,units=my.units[my.index],subsampleg=my.subsampleg[my.index],contours=NULL,intylat=1,intxlon=1,drawleg=F) + + my.parcelas<-data.frame(name=parcelas.names,lat=parcelas.lat,long=parcelas.lon,pop=0,capital=0,stringsAsFactors=F) + map.cities(my.parcelas,pch=3,cex=1,col=c("purple","blue","red","black")) # add the 4 points of the 4 parcelas + ColorBar(my.brks[[my.index]],cols=colorRampPalette(my.palette[[my.index]])(length(my.brks[[my.index]])-1),vert=F,cex=.8) + dev.off() + } + + # the same charts as above, but only for the Big Patagonia and plotting the location of the four parcelas: + my.lat.pos<-which(lat.rcp26 < bigpata.latmax & lat.rcp26 > bigpata.latmin) + my.lon.pos<-which(lon.rcp26 < bigpata.lonmax & lon.rcp26 > bigpata.lonmin) + if(length(my.lat.pos)<=1) my.lat.pos<-which(lat.rcp26bigpata.latmin-2) # to plot at least 2 grid points for lat and 2 grid points for lon + if(length(my.lon.pos)<=1) my.lon.pos<-which(lon.rcp26bigpata.lonmin-2) + my.lat<-lat.rcp26[my.lat.pos] # select only Patagonia pixels + my.lon<-lon.rcp26[my.lon.pos] + + for(my.index in 1:n.indices){ + #png(filename=paste0(outputdir,"/indices_rcp26/",models.rcp26[mod],"_run_",run,"_BigPatagonia_Indice_",my.index,"_",index.name0[my.index],".png"),width=500,height=1500) + png(filename=paste0(path.data,"/",mod,"/r",run,"/2.6/BigPatagonia_Indice_",my.index,"_",index.name0[my.index],".png"),width=500,height=1500) + + my.toptitle<-paste0(index.name.short[my.index]," (RCP 2.6 Model ",mod," Run ",run,")") + + # to solve some bugs of PlotEquiMap: + if(models.firstdim=="lat") {plot.data<-index.rcp26[[my.index]][my.lon.pos,my.lat.pos] } else {plot.data<-index.rcp26[[my.index]][my.lat.pos,my.lon.pos]} + if(models.firstdim=="lat") {contour.data<-index.rcp26[[my.index]][my.lon.pos,my.lat.pos]} else {contour.data<-t(index.rcp26[[my.index]][my.lat.pos,my.lon.pos])} + + layout(matrix(c(1,1,1,1,1,2), 1, 6, byrow = TRUE)) + #layout.show(2) + par(oma=c(1,0,0,0.5)) + #par(mfrow=c(2,1)) + #x11(width=4,height=.8) + PlotEquiMap2(plot.data,my.lon,my.lat,toptitle=my.toptitle,sizetit = 0.6, + brks=my.brks[[my.index]],cols=colorRampPalette(my.palette[[my.index]])(length(my.brks[[my.index]])-1), + filled.continents=F,units=my.units[my.index],subsampleg=my.subsampleg[my.index], + contours=list(NULL,NULL,contour.data,contour.data,NULL,NULL)[[my.index]],brks2=my.brks.dos[[my.index]],contours.col=my.contours.col[[my.index]],intylat=1,intxlon=1,drawleg=F) + #contours=NULL,intylat=1,intxlon=1,drawleg=F) + + my.parcelas<-data.frame(name=parcelas.names,lat=parcelas.lat,long=parcelas.lon,pop=0,capital=0,stringsAsFactors=F) + map.cities(my.parcelas,pch=3,cex=3,col=c("purple","blue","red","black"),label=FALSE) # add the 4 points of the 4 parcelas + ColorBar(my.brks[[my.index]],cols=colorRampPalette(my.palette[[my.index]])(length(my.brks[[my.index]])-1),vert=TRUE,cex=1.3,my.subsamplegbar[[my.index]]) + dev.off() + } + + # assign each parcela to a grid point of the rcp26 model: + p.pos.rcp26<-list() + for (p in 1:n.parcelas) p.pos.rcp26[[p]]=nearest(parcelas.lat[p],parcelas.lon[p],lat.rcp26,lon.rcp26) + + # save the index values for each of the parcelas and the number of different grid points: + parcelas.indices.rcp26<-array(NA,c(n.parcelas,n.indices)) + for (p in 1:n.parcelas) { for (i in 1:n.indices) { parcelas.indices.rcp26[p,i]<-index.rcp26[[i]][p.pos.rcp26[[p]][1],p.pos.rcp26[[p]][2]] } } + #save(parcelas.indices.rcp26,file=paste0(outputdir,"/indices_rcp26/",models.rcp26[mod],"_run_",run,"_indices_parcelas.RData")) + save(parcelas.indices.rcp26,file=paste0(path.data,"/",mod,"/r",run,"/2.6/Indices_parcelas.RData")) + + # save the yearly values of the indexes for each of the parcelas for the period 2030-2050: + index.rcp26.year<-array(NA,c(n.indices,n.years.rcp,n.parcelas),dimnames=list(paste0("Indice",1:n.indices),first.year.rcp:last.year.rcp,paste0("Parcela",1:n.parcelas))) + + for (p in 1:n.parcelas) { + for (year in 1:(n.years.rcp)) { + begin.month = 1 + 12*(year-1) + my.months<-begin.month:n.months.rcp + + # create a 3D array with 2 dummy variables corresponding to lat and lon to be able to use the indexX functions that need an array: + tt.rcp26<-array(temp.rcp26[my.months,p.pos.rcp26[[p]][1],p.pos.rcp26[[p]][2]],c(length(my.months),1,1)) + pp.rcp26<-array(prec.rcp26[my.months,p.pos.rcp26[[p]][1],p.pos.rcp26[[p]][2]],c(length(my.months),1,1)) + + index.rcp26.year[1,year,p]<-index1(tt.rcp26,12) # starting from 2030, select only one year of data at time + if (year < n.years.rcp) index.rcp26.year[2,year,p]<-index2(tt.rcp26,24) # starting from 2030, select 2 years of data at time + if (year < n.years.rcp) index.rcp26.year[3,year,p]<-index3(tt.rcp26,24) # starting from 2030, select 2 years of data at time + index.rcp26.year[4,year,p]<-index4(tt.rcp26,12) # starting from 2030, select only one year of data at time + index.rcp26.year[5,year,p]<-index5(pp.rcp26,12) # starting from 2030, select only one year of data at time + if (year < n.years.rcp) index.rcp26.year[6,year,p]<-index6(pp.rcp26,24) # starting from 2030, select 2 years of data at time + } + } + + # save the yearly values of the indexes for each of the parcelas for the transition period 2006-2029: + index.trans26.year<-array(NA,c(n.indices,shift.rcp/12,n.parcelas),dimnames=list(paste0("Indice",1:n.indices),(last.year.hist+1):(first.year.rcp-1),paste0("Parcela",1:n.parcelas))) + + for (p in 1:n.parcelas) { + for (year in 1:(shift.rcp/12)) { + begin.month = 1 + 12*(year-1) + my.months<-begin.month:shift.rcp + + # create a 3D array with 2 dummy variables corresponding to lat and lon to be able to use the indexX functions that need an array: + tt.trans26<-array(temp.trans26[my.months,p.pos.rcp26[[p]][1],p.pos.rcp26[[p]][2]],c(length(my.months),1,1)) + pp.trans26<-array(prec.trans26[my.months,p.pos.rcp26[[p]][1],p.pos.rcp26[[p]][2]],c(length(my.months),1,1)) + + index.trans26.year[1,year,p]<-index1(tt.trans26,12) # starting from 2006, select only one year of data at time + if (year < shift.rcp/12) index.trans26.year[2,year,p]<-index2(tt.trans26,24) # starting from 2006, select 2 years of data at time + if (year < shift.rcp/12) index.trans26.year[3,year,p]<-index3(tt.trans26,24) + index.trans26.year[4,year,p]<-index4(tt.trans26,12) + index.trans26.year[5,year,p]<-index5(pp.trans26,12) + if (year < shift.rcp/12) index.trans26.year[6,year,p]<-index6(pp.trans26,24) + } + } + + #plot(2030:2050,index.rcp26.year[1,,1],type="o") + + #save(index.rcp26.year,file=paste0(outputdir,"/indices_rcp26/",models.rcp26[mod],"_run_",run,"_indices_parcelas_anuales.RData")) + save(index.rcp26.year,file=paste0(path.data,"/",mod,"/r",run,"/2.6/Indices_parcelas_anuales.RData")) + + #plot(2006:2029,index.trans26.year[1,,1],type="o") + + #save(index.trans26.year,file=paste0(outputdir,"/indices_rcp26/",models.rcp26[mod],"_run_",run,"_indices_parcelas_anuales_transition.RData")) + save(index.trans26.year,file=paste0(path.data,"/",mod,"/r",run,"/2.6/Indices_parcelas_anuales_transition.RData")) + + # same as above, but for the grid of parcelas: + grid.pos.rcp26<-list() + for (p in 1:n.grid.points) grid.pos.rcp26[[p]]=nearest(grid.lat[p],grid.lon[p],lat.rcp26,lon.rcp26) + + # save the index values for each of the grid points: + grid.indices.rcp26<-array(NA,c(n.grid.points,n.indices)) + for (p in 1:n.grid.points) { for (i in 1:n.indices) { grid.indices.rcp26[p,i]<-index.rcp26[[i]][grid.pos.rcp26[[p]][1],grid.pos.rcp26[[p]][2]] } } + #save(grid.indices.rean,file=paste0(outputdir,"/indices_reanalisis/indices_grid_parcelas.RData")) + save(grid.indices.rcp26,file=paste0(path.data,"/",mod,"/r",run,"/2.6/Indices_grid_parcelas.RData")) + + + # interpolate each indice on the same grid of the Reanalysis: + index.rcp26.array<-array(NA,c(n.indices,n.lat.rcp26,n.lon.rcp26)) + for(i in 1:n.indices) index.rcp26.array[i,,]<-index.rcp26[[i]] + + #rm(index.rcp26.interp) # if there is already an object with this name + print("done");cat("Interpolating...") + index.rcp26.interp<-bilinear(lat.rean,lon.rean,lat.rcp26,lon.rcp26,index.rcp26.array) # interpolation with the idw + print("done");cat("Plotting data...") + + # there is a bug that sometimes returns false values from the bilinear function if it returns a list; so we modified it to return a matrix instead, but we must convert it back to a list to plot it: + interp<-list(); for(l in 1:n.indices) interp[[l]]<-index.rcp26.interp[l,,] + rm(index.rcp26.interp); index.rcp26.interp=interp; rm(interp) + + save(index.rcp26.interp,file=paste0(path.data,"/",mod,"/r",run,"/2.6/Indices_interp.RData")) + + # Save global interpolated maps of indices: + for(my.index in 1:n.indices){ + #png(filename=paste0(outputdir,"/indices_rcp26/",models.rcp26[mod],"_run_",run,"_interp_Worldmap_Indice_",my.index,"_",index.name0[my.index],".png"),width=800,height=480) + png(filename=paste0(path.data,"/",mod,"/r",run,"/2.6/Interp_Worldmap_Indice_",my.index,"_",index.name0[my.index],".png"),width=800,height=480) + + layout(matrix(c(1,1,1,1,1,1,1,1,2), 9, 1, byrow = TRUE)) + #layout.show(2) + my.toptitle<-paste0(index.name[my.index]," (RCP 2.6 Model ",mod," Run ",run," interpolated)") + if(models.firstdim=="lat") {contour.data<-index.rcp26.interp[[my.index]]} else {contour.data<-t(index.rcp26.interp[[my.index]])} + PlotEquiMap2(index.rcp26.interp[[my.index]],lon.rean,lat.rean,toptitle=my.toptitle,sizetit = 0.6, + brks=my.brks[[my.index]],cols=colorRampPalette(my.palette[[my.index]])(length(my.brks[[my.index]])-1), + filled.continents=F,units=my.units[my.index],subsampleg=my.subsampleg[my.index], + contours=list(NULL,NULL,contour.data,contour.data,NULL,NULL)[[my.index]],brks2=my.brks.dos[[my.index]],contours.col=my.contours.col[[my.index]],drawleg=F) + + ColorBar(my.brks[[my.index]],cols=colorRampPalette(my.palette[[my.index]])(length(my.brks[[my.index]])-1),vert=F,cex=.8) + dev.off() + } + + # the same charts as above, but only for Patagonia and plotting the location of the four parcelas: + my.lat.pos<-which(lat.reanpata.latmin) + my.lon.pos<-which(lon.reanpata.lonmin) + if(length(my.lat.pos)<=1) my.lat.pos<-which(lat.reanpata.latmin-2) # to plot at least 2 grid points for lat and 2 grid points for lon + if(length(my.lon.pos)<=1) my.lon.pos<-which(lon.reanpata.lonmin-2) + my.lat<-lat.rean[my.lat.pos] # select only Patagonia pixels + my.lon<-lon.rean[my.lon.pos] + + for(my.index in 1:n.indices){ + #png(filename=paste0(outputdir,"/indices_rcp26/",models.rcp26[mod],"_run_",run,"_interp_Patagonia_Indice_",my.index,"_",index.name0[my.index],".png"),width=600,height=480) + png(filename=paste0(path.data,"/",mod,"/r",run,"/2.6/Interp_Patagonia_Indice_",my.index,"_",index.name0[my.index],".png"),width=600,height=480) + + my.toptitle<-paste0(index.name[my.index]," (RCP 2.6 Model ",mod," Run ",run," interpolated)") + + # to solve some bugs of PlotEquiMap: + if(models.firstdim=="lat") {plot.data<-index.rcp26.interp[[my.index]][my.lon.pos,my.lat.pos] } else {plot.data<-index.rcp26.interp[[my.index]][my.lat.pos,my.lon.pos]} + + layout(matrix(c(1,1,1,1,1,1,1,1,2), 9, 1, byrow = TRUE)) + #layout.show(2) + #par(oma=c(0,4,0,0)) #par(mfrow=c(2,1)) #x11(width=4,height=.8) + PlotEquiMap2(plot.data,my.lon,my.lat,toptitle=my.toptitle,sizetit = 0.6, + brks=my.brks[[my.index]],cols=colorRampPalette(my.palette[[my.index]])(length(my.brks[[my.index]])-1), + filled.continents=F,units=my.units[my.index],subsampleg=my.subsampleg[my.index],contours=NULL,intylat=1,intxlon=1,drawleg=F) + + my.parcelas<-data.frame(name=parcelas.names,lat=parcelas.lat,long=parcelas.lon,pop=0,capital=0,stringsAsFactors=F) + map.cities(my.parcelas,pch=3,cex=1,col=c("purple","blue","red","black")) # add the 4 points of the 4 parcelas + ColorBar(my.brks[[my.index]],cols=colorRampPalette(my.palette[[my.index]])(length(my.brks[[my.index]])-1),vert=F,cex=.8) + dev.off() + } + + + # the same charts as above, but only for the Big Patagonia and plotting the location of the four parcelas: + my.lat.pos<-which(lat.rean < bigpata.latmax & lat.rean > bigpata.latmin) + my.lon.pos<-which(lon.rean < bigpata.lonmax & lon.rean > bigpata.lonmin) + if(length(my.lat.pos)<=1) my.lat.pos<-which(lat.reanbigpata.latmin-2) # to plot at least 2 grid points for lat and 2 grid points for lon + if(length(my.lon.pos)<=1) my.lon.pos<-which(lon.reanbigpata.lonmin-2) + my.lat<-lat.rean[my.lat.pos] # select only Patagonia pixels + my.lon<-lon.rean[my.lon.pos] + + for(my.index in 1:n.indices){ + #png(filename=paste0(outputdir,"/indices_rcp26/",models.rcp26[mod],"_run_",run,"_interp_BigPatagonia_Indice_",my.index,"_",index.name0[my.index],".png"),width=500,height=1500) + png(filename=paste0(path.data,"/",mod,"/r",run,"/2.6/Interp_BigPatagonia_Indice_",my.index,"_",index.name0[my.index],".png"),width=500,height=1500) + + my.toptitle<-paste0(index.name.short[my.index]," (RCP 2.6 Model ",mod," Run ",run," interpolated)") + + # to solve some bugs of PlotEquiMap: + if(models.firstdim=="lat") {plot.data<-index.rcp26.interp[[my.index]][my.lon.pos,my.lat.pos] } else {plot.data<-index.rcp26.interp[[my.index]][my.lat.pos,my.lon.pos]} + if(models.firstdim=="lat") {contour.data<-index.rcp26.interp[[my.index]][my.lon.pos,my.lat.pos]} else {contour.data<-t(index.rcp26.interp[[my.index]][my.lat.pos,my.lon.pos])} + layout(matrix(c(1,1,1,1,1,2), 1, 6, byrow = TRUE)) + #layout.show(2) + par(oma=c(1,0,0,0.5)) + #par(mfrow=c(2,1)) + #x11(width=4,height=.8) + PlotEquiMap2(plot.data,my.lon,my.lat,toptitle=my.toptitle,sizetit = 0.6, + brks=my.brks[[my.index]],cols=colorRampPalette(my.palette[[my.index]])(length(my.brks[[my.index]])-1), + filled.continents=F,units=my.units[my.index],subsampleg=my.subsampleg[my.index], + contours=list(NULL,NULL,contour.data,contour.data,NULL,NULL)[[my.index]],brks2=my.brks.dos[[my.index]],contours.col=my.contours.col[[my.index]],intylat=1,intxlon=1,drawleg=F) + # contours=NULL,intylat=1,intxlon=1,drawleg=F) + + my.parcelas<-data.frame(name=parcelas.names,lat=parcelas.lat,long=parcelas.lon,pop=0,capital=0,stringsAsFactors=F) + map.cities(my.parcelas,pch=3,cex=3,col=c("purple","blue","red","black"),label=FALSE) # add the 4 points of the 4 parcelas + ColorBar(my.brks[[my.index]],cols=colorRampPalette(my.palette[[my.index]])(length(my.brks[[my.index]])-1),vert=TRUE,cex=1.3,my.subsamplegbar[[my.index]]) + dev.off() + } + + # the same charts as above, but plotting the difference between the rcp 2.6 and the historical data: + my.lat.pos<-which(lat.rean < bigpata.latmax & lat.rean > bigpata.latmin) + my.lon.pos<-which(lon.rean < bigpata.lonmax & lon.rean > bigpata.lonmin) + if(length(my.lat.pos)<=1) my.lat.pos<-which(lat.reanbigpata.latmin-2) # to plot at least 2 grid points for lat and 2 grid points for lon + if(length(my.lon.pos)<=1) my.lon.pos<-which(lon.reanbigpata.lonmin-2) + my.lat<-lat.rean[my.lat.pos] # select only Patagonia pixels + my.lon<-lon.rean[my.lon.pos] + + for(my.index in 1:n.indices){ + #png(filename=paste0(outputdir,"/indices_rcp26/",models.rcp26[mod],"_run_",run,"_interp_tendency_BigPatagonia_Indice_",my.index,"_",index.name0[my.index],".png"),width=500,height=1500) + png(filename=paste0(path.data,"/",mod,"/r",run,"/2.6/interp_tendency_BigPatagonia_Indice_",my.index,"_",index.name0[my.index],".png"),width=500,height=1500) + + my.toptitle<-paste0(index.name.short[my.index]," (RCP 2.6 - Historical Model ",mod," Run ",run," interpolated)") + + # to solve some bugs of PlotEquiMap: + if(models.firstdim=="lat") { + plot.data<-index.rcp26.interp[[my.index]][my.lon.pos,my.lat.pos]-index.hist.interp[[my.index]][my.lon.pos,my.lat.pos] + } else { + plot.data<-index.rcp26.interp[[my.index]][my.lat.pos,my.lon.pos]-index.hist.interp[[my.index]][my.lat.pos,my.lon.pos] + } + + layout(matrix(c(1,1,1,1,2), 1, 5, byrow = TRUE)) + par(oma=c(1,0,0,0.2)) + PlotEquiMap2(plot.data,my.lon,my.lat,toptitle=my.toptitle,sizetit = 0.6, + brks=my.brks.trend[[my.index]],cols=colorRampPalette(my.palette.trend[[my.index]])(length(my.brks.trend[[my.index]])-1), + filled.continents=F,units=my.units[my.index],subsampleg=my.subsampleg[my.index],contours=NULL,intylat=1,intxlon=1,drawleg=F) + + my.parcelas<-data.frame(name=parcelas.names,lat=parcelas.lat,long=parcelas.lon,pop=0,capital=0,stringsAsFactors=F) + map.cities(my.parcelas,pch=3,cex=3,col=c("purple","blue","red","black"),label=FALSE) # add the 4 points of the 4 parcelas + ColorBar(my.brks.trend[[my.index]],cols=colorRampPalette(my.palette.trend[[my.index]])(length(my.brks.trend[[my.index]])-1),vert=TRUE,cex=1.3) + + dev.off() + } + +} # close if on rcp26 +#close(my.bar) + + +#~~~Load the projections from CMIP5 RCP 8.5 ~~~ + +#my.bar<-tkProgressBar(title = "Analizing data...", label = "Please wait",min = 0, max = 100, initial = 0, width = 500) +if(rcp85==TRUE){ + print("done");cat("Processing scenary RCP 8.5...") + + #setTkProgressBar(my.bar, run*100/n.runs[mod], label="Please wait") + my.dir.rcp85<-paste0(path.data,"/",mod,"/r",run,"/8.5") + my.files<-list.files(path=my.dir.rcp85,pattern="\\.nc$") # select all files inside the input data directory + if(length(my.files)==0) print(paste0("There are no files in the input directory ",my.dir.rcp85)) + my.files.splitted<-strsplit(my.files,"_") # separate the suffix from the time range in the + first.year.rcp.file <- as.integer(substr(my.files.splitted[[1]][2],1,4)) # The first year of data in ALL the rcp 2.6 and 8.5 files for tas and prec (usually 2006) + shift.rcp<-12*(first.year.rcp-first.year.rcp.file) # number of months from the beginning of the data file (year 2006) to the beginning of the future period used in the study (2030 in our case) + + # load the data for the first variable (i.e: tas): + for(f in 1:length(my.files.splitted)) if(my.files.splitted[[f]][1]==vars[1]) my.file.var1<-my.files[f] + my.path1<-paste0(my.dir.rcp85,"/",my.file.var1) # the full path of the first file in the directory (there is only one) + my.ncdf1<-open.ncdf(my.path1) # open the netcdf file + var1<-get.var.ncdf(my.ncdf1,varid=vars[1]) # var1 format: [lon,lat,month] + + # load the data for the second variable (i.e: pr): + for(f in 1:length(my.files.splitted)) if(my.files.splitted[[f]][1]==vars[2]) my.file.var2<-my.files[f] + my.path2<-paste0(my.dir.rcp85,"/",my.file.var2) # the full path of the first file in the directory (there is only one) + my.ncdf2<-open.ncdf(my.path2) # open the netcdf file + var2<-get.var.ncdf(my.ncdf2,varid=vars[2]) # var1 format: [lon,lat,month] + + lat.rcp85<-get.var.ncdf(my.ncdf1,varid="latitude") # <- get the latitude (it is the same both for temp and prec data and for all runs of the same model) + lon.rcp85<-get.var.ncdf(my.ncdf1,varid="longitude") # <- get the longitude + n.lat.rcp85<-length(lat.rcp85) + n.lon.rcp85<-length(lon.rcp85) + + close(my.ncdf1) + close(my.ncdf2) + + #array where to put all the data of the runs during the study period (format: [lon,lat,month]) + temp.rcp85<-prec.rcp85<-array(NA,c(n.lon.rcp85,n.lat.rcp85,n.months.rcp)) + + temp.rcp85[,,]<-var1[,,(1+shift.rcp):(n.months.rcp+shift.rcp)] + prec.rcp85[,,]<-var2[,,(1+shift.rcp):(n.months.rcp+shift.rcp)] + + #array where to put all the data of the runs for the transition period starting from last day of historica data (2006) up to the beginning of the future period (2030) (format: [lon,lat,month]) + temp.trans85<-prec.trans85<-array(NA,c(n.lon.rcp85,n.lat.rcp85,shift.rcp)) + temp.trans85[,,]<-var1[,,1:shift.rcp] + prec.trans85[,,]<-var2[,,1:shift.rcp] + + rm(var1,var2) + + # check if the first dimension is the longitude or the latitude: + if(dim(temp.rcp85)[1]>dim(temp.rcp85)[2]) models.firstdim="lon" + if(dim(temp.rcp85)[1]pata.latmin) + my.lon.pos<-which(lon.rcp85pata.lonmin) + if(length(my.lat.pos)<=1) my.lat.pos<-which(lat.rcp85pata.latmin-2) # to plot at least 2 grid points for lat and 2 grid points for lon + if(length(my.lon.pos)<=1) my.lon.pos<-which(lon.rcp85pata.lonmin-2) + my.lat<-lat.rcp85[my.lat.pos] # select only Patagonia pixels + my.lon<-lon.rcp85[my.lon.pos] + + for(my.index in 1:n.indices){ + #png(filename=paste0(outputdir,"/indices_rcp85/",models.rcp85[mod],"_run_",run,"_Patagonia_Indice_",my.index,"_",index.name0[my.index],".png"),width=600,height=480) + png(filename=paste0(path.data,"/",mod,"/r",run,"/8.5/Patagonia_Indice_",my.index,"_",index.name0[my.index],".png"),width=600,height=480) + + my.toptitle<-paste0(index.name[my.index]," (RCP 8.5 Model ",mod," Run ",run,")") + + # to solve some bugs of PlotEquiMap: + if(models.firstdim=="lat") {plot.data<-index.rcp85[[my.index]][my.lon.pos,my.lat.pos] } else {plot.data<-index.rcp85[[my.index]][my.lat.pos,my.lon.pos]} + + layout(matrix(c(1,1,1,1,1,1,1,1,2), 9, 1, byrow = TRUE)) + #layout.show(2) + #par(oma=c(0,4,0,0)) #par(mfrow=c(2,1)) #x11(width=4,height=.8) + PlotEquiMap2(plot.data,my.lon,my.lat,toptitle=my.toptitle,sizetit = 0.6, + brks=my.brks[[my.index]],cols=colorRampPalette(my.palette[[my.index]])(length(my.brks[[my.index]])-1), + filled.continents=F,units=my.units[my.index],subsampleg=my.subsampleg[my.index],contours=NULL,intylat=1,intxlon=1,drawleg=F) + + my.parcelas<-data.frame(name=parcelas.names,lat=parcelas.lat,long=parcelas.lon,pop=0,capital=0,stringsAsFactors=F) + map.cities(my.parcelas,pch=3,cex=1,col=c("purple","blue","red","black")) # add the 4 points of the 4 parcelas + ColorBar(my.brks[[my.index]],cols=colorRampPalette(my.palette[[my.index]])(length(my.brks[[my.index]])-1),vert=F,cex=.8) + dev.off() + } + + # the same charts as above, but only for the Big Patagonia and plotting the location of the four parcelas: + my.lat.pos<-which(lat.rcp85 < bigpata.latmax & lat.rcp85 > bigpata.latmin) + my.lon.pos<-which(lon.rcp85 < bigpata.lonmax & lon.rcp85 > bigpata.lonmin) + if(length(my.lat.pos)<=1) my.lat.pos<-which(lat.rcp85bigpata.latmin-2) # to plot at least 2 grid points for lat and 2 grid points for lon + if(length(my.lon.pos)<=1) my.lon.pos<-which(lon.rcp85bigpata.lonmin-2) + my.lat<-lat.rcp85[my.lat.pos] # select only Patagonia pixels + my.lon<-lon.rcp85[my.lon.pos] + + for(my.index in 1:n.indices){ + #png(filename=paste0(outputdir,"/indices_rcp85/",models.rcp85[mod],"_run_",run,"_BigPatagonia_Indice_",my.index,"_",index.name0[my.index],".png"),width=500,height=1500) + png(filename=paste0(path.data,"/",mod,"/r",run,"/8.5/BigPatagonia_Indice_",my.index,"_",index.name0[my.index],".png"),width=500,height=1500) + + my.toptitle<-paste0(index.name.short[my.index]," (RCP 8.5 Model ",mod," run ",run,")") + + # to solve some bugs of PlotEquiMap: + if(models.firstdim=="lat") {plot.data<-index.rcp85[[my.index]][my.lon.pos,my.lat.pos] } else {plot.data<-index.rcp85[[my.index]][my.lat.pos,my.lon.pos]} + if(models.firstdim=="lat") {contour.data<-index.rcp85[[my.index]][my.lon.pos,my.lat.pos]} else {contour.data<-t(index.rcp85[[my.index]][my.lat.pos,my.lon.pos])} + layout(matrix(c(1,1,1,1,1,2), 1, 6, byrow = TRUE)) + #layout.show(2) + par(oma=c(1,0,0,0.5)) + #par(mfrow=c(2,1)) + #x11(width=4,height=.8) + PlotEquiMap2(plot.data,my.lon,my.lat,toptitle=my.toptitle,sizetit = 0.6, + brks=my.brks[[my.index]],cols=colorRampPalette(my.palette[[my.index]])(length(my.brks[[my.index]])-1), + filled.continents=F,units=my.units[my.index],subsampleg=my.subsampleg[my.index], + contours=list(NULL,NULL,contour.data,contour.data,NULL,NULL)[[my.index]],brks2=my.brks.dos[[my.index]],contours.col=my.contours.col[[my.index]],intylat=1,intxlon=1,drawleg=F) + # contours=NULL,intylat=1,intxlon=1,drawleg=F) + + my.parcelas<-data.frame(name=parcelas.names,lat=parcelas.lat,long=parcelas.lon,pop=0,capital=0,stringsAsFactors=F) + map.cities(my.parcelas,pch=3,cex=3,col=c("purple","blue","red","black"),label=FALSE) # add the 4 points of the 4 parcelas + ColorBar(my.brks[[my.index]],cols=colorRampPalette(my.palette[[my.index]])(length(my.brks[[my.index]])-1),vert=TRUE,cex=1.3,my.subsamplegbar[[my.index]]) + dev.off() + } + + + # assign each parcela to a grid point of the rcp85 model: + p.pos.rcp85<-list() + for (p in 1:n.parcelas) p.pos.rcp85[[p]]=nearest(parcelas.lat[p],parcelas.lon[p],lat.rcp85,lon.rcp85) + + # save the index values for each of the parcelas and the number of different grid points: + parcelas.indices.rcp85<-array(NA,c(n.parcelas,n.indices)) + for (p in 1:n.parcelas) { for (i in 1:n.indices) { parcelas.indices.rcp85[p,i]<-index.rcp85[[i]][p.pos.rcp85[[p]][1],p.pos.rcp85[[p]][2]] } } + #save(parcelas.indices.rcp85,file=paste0(outputdir,"/indices_rcp85/",models.rcp85[mod],"_run_",run,"_indices_parcelas.RData")) + save(parcelas.indices.rcp85,file=paste0(path.data,"/",mod,"/r",run,"/8.5/Indices_parcelas.RData")) + + # save the yearly values of the indexes for each of the parcelas for the period 2030-2050: + index.rcp85.year<-array(NA,c(n.indices,n.years.rcp,n.parcelas),dimnames=list(paste0("Indice",1:n.indices),first.year.rcp:last.year.rcp,paste0("Parcela",1:n.parcelas))) + + for (p in 1:n.parcelas) { + for (year in 1:(n.years.rcp)) { + begin.month = 1 + 12*(year-1) + my.months<-begin.month:n.months.rcp + + # create a 3D array with 2 dummy variables corresponding to lat and lon to be able to use the indexX functions that need an array: + tt.rcp85<-array(temp.rcp85[my.months,p.pos.rcp85[[p]][1],p.pos.rcp85[[p]][2]],c(length(my.months),1,1)) + pp.rcp85<-array(prec.rcp85[my.months,p.pos.rcp85[[p]][1],p.pos.rcp85[[p]][2]],c(length(my.months),1,1)) + + index.rcp85.year[1,year,p]<-index1(tt.rcp85,12) # starting from 2030, select only one year of data at time + if (year < n.years.rcp) index.rcp85.year[2,year,p]<-index2(tt.rcp85,24) # starting from 2030, select 2 years of data at time + if (year < n.years.rcp) index.rcp85.year[3,year,p]<-index3(tt.rcp85,24) # starting from 2030, select 2 years of data at time + index.rcp85.year[4,year,p]<-index4(tt.rcp85,12) # starting from 2030, select only one year of data at time + index.rcp85.year[5,year,p]<-index5(pp.rcp85,12) # starting from 2030, select only one year of data at time + if (year < n.years.rcp) index.rcp85.year[6,year,p]<-index6(pp.rcp85,24) # starting from 2030, select 2 years of data at time + } + } + + # save the yearly values of the indexes for each of the parcelas for the transition period 2006-2029: + index.trans85.year<-array(NA,c(n.indices,shift.rcp/12,n.parcelas),dimnames=list(paste0("Indice",1:n.indices),(last.year.hist+1):(first.year.rcp-1),paste0("Parcela",1:n.parcelas))) + + for (p in 1:n.parcelas) { + for (year in 1:(shift.rcp/12)) { + begin.month = 1 + 12*(year-1) + my.months<-begin.month:shift.rcp + + # create a 3D array with 2 dummy variables corresponding to lat and lon to be able to use the indexX functions that need an array: + tt.trans85<-array(temp.trans85[my.months,p.pos.rcp85[[p]][1],p.pos.rcp85[[p]][2]],c(length(my.months),1,1)) + pp.trans85<-array(prec.trans85[my.months,p.pos.rcp85[[p]][1],p.pos.rcp85[[p]][2]],c(length(my.months),1,1)) + + index.trans85.year[1,year,p]<-index1(tt.trans85,12) # starting from 2006, select only one year of data at time + if (year < shift.rcp/12) index.trans85.year[2,year,p]<-index2(tt.trans85,24) # starting from 2006, select 2 years of data at time + if (year < shift.rcp/12) index.trans85.year[3,year,p]<-index3(tt.trans85,24) + index.trans85.year[4,year,p]<-index4(tt.trans85,12) + index.trans85.year[5,year,p]<-index5(pp.trans85,12) + if (year < shift.rcp/12) index.trans85.year[6,year,p]<-index6(pp.trans85,24) + } + } + + #plot(2030:2050,index.rcp85.year[1,,1],type="o") + + #save(index.rcp85.year,file=paste0(outputdir,"/indices_rcp85/",models.rcp85[mod],"_run_",run,"_indices_parcelas_anuales.RData")) + save(index.rcp85.year,file=paste0(path.data,"/",mod,"/r",run,"/8.5/Indices_parcelas_anuales.RData")) + + #plot(2006:2029,index.trans85.year[1,,1],type="o") + + #save(index.trans85.year,file=paste0(outputdir,"/indices_rcp85/",models.rcp85[mod],"_run_",run,"_indices_parcelas_anuales_transition.RData")) + save(index.trans85.year,file=paste0(path.data,"/",mod,"/r",run,"/8.5/Indices_parcelas_anuales_transition.RData")) + + # same as above, but for the grid of parcelas: + grid.pos.rcp85<-list() + for (p in 1:n.grid.points) grid.pos.rcp85[[p]]=nearest(grid.lat[p],grid.lon[p],lat.rcp85,lon.rcp85) + + # save the index values for each of the grid points: + grid.indices.rcp85<-array(NA,c(n.grid.points,n.indices)) + for (p in 1:n.grid.points) { for (i in 1:n.indices) { grid.indices.rcp85[p,i]<-index.rcp85[[i]][grid.pos.rcp85[[p]][1],grid.pos.rcp85[[p]][2]] } } + save(grid.indices.rcp85,file=paste0(path.data,"/",mod,"/r",run,"/8.5/Indices_grid_parcelas.RData")) + + # interpolate each indice on the same grid of the Reanalysis: + index.rcp85.array<-array(NA,c(n.indices,n.lat.rcp85,n.lon.rcp85)) + for(i in 1:n.indices) index.rcp85.array[i,,]<-index.rcp85[[i]] + + #rm(index.rcp85.interp) # if there is already an object with this name + print("done");cat("Interpolating...") + index.rcp85.interp<-bilinear(lat.rean,lon.rean,lat.rcp85,lon.rcp85,index.rcp85.array) # interpolation with the bilinear method + print("done");print("Plotting data...") + + # there is a bug that sometimes returns false values from the bilinear function if it returns a list; so we modified it to return a matrix instead, but we must convert it back to a list to plot it: + interp<-list(); for(l in 1:n.indices) interp[[l]]<-index.rcp85.interp[l,,] + rm(index.rcp85.interp); index.rcp85.interp=interp; rm(interp) + + save(index.rcp85.interp,file=paste0(path.data,"/",mod,"/r",run,"/8.5/Indices_interp.RData")) + + # Save global interpolated maps of indices: + for(my.index in 1:n.indices){ + #png(filename=paste0(outputdir,"/indices_rcp85/",models.rcp85[mod],"_run_",run,"_interp_Worldmap_Indice_",my.index,"_",index.name0[my.index],".png"),width=800,height=480) + png(filename=paste0(path.data,"/",mod,"/r",run,"/8.5/Interp_Worldmap_Indice_",my.index,"_",index.name0[my.index],".png"),width=800,height=480) + + layout(matrix(c(1,1,1,1,1,1,1,1,2), 9, 1, byrow = TRUE)) + #layout.show(2) + my.toptitle<-paste0(index.name[my.index]," (RCP 8.5 Model ",mod," Run ",run," interpolated)") + if(models.firstdim=="lat") {contour.data<-index.rcp85.interp[[my.index]]} else {contour.data<-t(index.rcp85.interp[[my.index]])} + PlotEquiMap2(index.rcp85.interp[[my.index]],lon.rean,lat.rean,toptitle=my.toptitle,sizetit = 0.6, + brks=my.brks[[my.index]],cols=colorRampPalette(my.palette[[my.index]])(length(my.brks[[my.index]])-1), + filled.continents=F,units=my.units[my.index],subsampleg=my.subsampleg[my.index], + contours=list(NULL,NULL,contour.data,contour.data,NULL,NULL)[[my.index]],brks2=my.brks.dos[[my.index]],contours.col=my.contours.col[[my.index]],drawleg=F) + + ColorBar(my.brks[[my.index]],cols=colorRampPalette(my.palette[[my.index]])(length(my.brks[[my.index]])-1),vert=F,cex=.8) + dev.off() + } + + # the same charts as above, but only for Patagonia and plotting the location of the four parcelas: + my.lat.pos<-which(lat.reanpata.latmin) + my.lon.pos<-which(lon.reanpata.lonmin) + if(length(my.lat.pos)<=1) my.lat.pos<-which(lat.reanpata.latmin-2) # to plot at least 2 grid points for lat and 2 grid points for lon + if(length(my.lon.pos)<=1) my.lon.pos<-which(lon.reanpata.lonmin-2) + my.lat<-lat.rean[my.lat.pos] # select only Patagonia pixels + my.lon<-lon.rean[my.lon.pos] + + for(my.index in 1:n.indices){ + #png(filename=paste0(outputdir,"/indices_rcp85/",models.rcp85[mod],"_run_",run,"_interp_Patagonia_Indice_",my.index,"_",index.name0[my.index],".png"),width=600,height=480) + png(filename=paste0(path.data,"/",mod,"/r",run,"/8.5/Interp_Patagonia_Indice_",my.index,"_",index.name0[my.index],".png"),width=600,height=480) + + my.toptitle<-paste0(index.name[my.index]," (RCP 8.5 Model ",mod," Run ",run," interpolated)") + + # to solve some bugs of PlotEquiMap: + if(models.firstdim=="lat") {plot.data<-index.rcp85.interp[[my.index]][my.lon.pos,my.lat.pos] } else {plot.data<-index.rcp85.interp[[my.index]][my.lat.pos,my.lon.pos]} + + layout(matrix(c(1,1,1,1,1,1,1,1,2), 9, 1, byrow = TRUE)) + #layout.show(2) + #par(oma=c(0,4,0,0)) #par(mfrow=c(2,1)) #x11(width=4,height=.8) + PlotEquiMap2(plot.data,my.lon,my.lat,toptitle=my.toptitle,sizetit = 0.6, + brks=my.brks[[my.index]],cols=colorRampPalette(my.palette[[my.index]])(length(my.brks[[my.index]])-1), + filled.continents=F,units=my.units[my.index],subsampleg=my.subsampleg[my.index],contours=NULL,intylat=1,intxlon=1,drawleg=F) + + my.parcelas<-data.frame(name=parcelas.names,lat=parcelas.lat,long=parcelas.lon,pop=0,capital=0,stringsAsFactors=F) + map.cities(my.parcelas,pch=3,cex=1,col=c("purple","blue","red","black")) # add the 4 points of the 4 parcelas + ColorBar(my.brks[[my.index]],cols=colorRampPalette(my.palette[[my.index]])(length(my.brks[[my.index]])-1),vert=F,cex=.8) + dev.off() + } + + # the same charts as above, but only for the Big Patagonia and plotting the location of the four parcelas: + my.lat.pos<-which(lat.rean < bigpata.latmax & lat.rean > bigpata.latmin) + my.lon.pos<-which(lon.rean < bigpata.lonmax & lon.rean > bigpata.lonmin) + if(length(my.lat.pos)<=1) my.lat.pos<-which(lat.reanbigpata.latmin-2) # to plot at least 2 grid points for lat and 2 grid points for lon + if(length(my.lon.pos)<=1) my.lon.pos<-which(lon.reanbigpata.lonmin-2) + my.lat<-lat.rean[my.lat.pos] # select only Patagonia pixels + my.lon<-lon.rean[my.lon.pos] + + for(my.index in 1:n.indices){ + #png(filename=paste0(outputdir,"/indices_rcp85/",models.rcp85[mod],"_run_",run,"_interp_BigPatagonia_Indice_",my.index,"_",index.name0[my.index],".png"),width=500,height=1500) + png(filename=paste0(path.data,"/",mod,"/r",run,"/8.5/Interp_BigPatagonia_Indice_",my.index,"_",index.name0[my.index],".png"),width=500,height=1500) + + my.toptitle<-paste0(index.name.short[my.index]," (RCP 8.5 Model ",mod," Run ",run," interpolated)") + + # to solve some bugs of PlotEquiMap: + if(models.firstdim=="lat") {plot.data<-index.rcp85.interp[[my.index]][my.lon.pos,my.lat.pos] } else {plot.data<-index.rcp85.interp[[my.index]][my.lat.pos,my.lon.pos]} + if(models.firstdim=="lat") {contour.data<-index.rcp85.interp[[my.index]][my.lon.pos,my.lat.pos]} else {contour.data<-t(index.rcp85.interp[[my.index]][my.lat.pos,my.lon.pos])} + layout(matrix(c(1,1,1,1,1,2), 1, 6, byrow = TRUE)) + #layout.show(2) + par(oma=c(1,0,0,0.5)) + #par(mfrow=c(2,1)) + #x11(width=4,height=.8) + PlotEquiMap2(plot.data,my.lon,my.lat,toptitle=my.toptitle,sizetit = 0.6, + brks=my.brks[[my.index]],cols=colorRampPalette(my.palette[[my.index]])(length(my.brks[[my.index]])-1), + filled.continents=F,units=my.units[my.index],subsampleg=my.subsampleg[my.index], + contours=list(NULL,NULL,contour.data,contour.data,NULL,NULL)[[my.index]],brks2=my.brks.dos[[my.index]],contours.col=my.contours.col[[my.index]],intylat=1,intxlon=1,drawleg=F) + # contours=NULL,intylat=1,intxlon=1,drawleg=F) + + my.parcelas<-data.frame(name=parcelas.names,lat=parcelas.lat,long=parcelas.lon,pop=0,capital=0,stringsAsFactors=F) + map.cities(my.parcelas,pch=3,cex=3,col=c("purple","blue","red","black"),label=FALSE) # add the 4 points of the 4 parcelas + ColorBar(my.brks[[my.index]],cols=colorRampPalette(my.palette[[my.index]])(length(my.brks[[my.index]])-1),vert=TRUE,cex=1.3,my.subsamplegbar[[my.index]]) + dev.off() + } + + # the same charts as above, but plotting the difference between the rcp 8.5 and the historical data: + my.lat.pos<-which(lat.rean < bigpata.latmax & lat.rean > bigpata.latmin) + my.lon.pos<-which(lon.rean < bigpata.lonmax & lon.rean > bigpata.lonmin) + if(length(my.lat.pos)<=1) my.lat.pos<-which(lat.reanbigpata.latmin-2) # to plot at least 2 grid points for lat and 2 grid points for lon + if(length(my.lon.pos)<=1) my.lon.pos<-which(lon.reanbigpata.lonmin-2) + my.lat<-lat.rean[my.lat.pos] # select only Patagonia pixels + my.lon<-lon.rean[my.lon.pos] + + for(my.index in 1:n.indices){ + #png(filename=paste0(outputdir,"/indices_rcp85/",models.rcp85[mod],"_run_",run,"_interp_tendency_BigPatagonia_Indice_",my.index,"_",index.name0[my.index],".png"),width=500,height=1500) + png(filename=paste0(path.data,"/",mod,"/r",run,"/8.5/interp_tendency_BigPatagonia_Indice_",my.index,"_",index.name0[my.index],".png"),width=500,height=1500) + + my.toptitle<-paste0(index.name.short[my.index]," (RCP 8.5 - Historical Model ",mod," Run ",run," interpolated)") + + # to solve some bugs of PlotEquiMap: + if(models.firstdim=="lat") { + plot.data<-index.rcp85.interp[[my.index]][my.lon.pos,my.lat.pos]-index.hist.interp[[my.index]][my.lon.pos,my.lat.pos] + } else { + plot.data<-index.rcp85.interp[[my.index]][my.lat.pos,my.lon.pos]-index.hist.interp[[my.index]][my.lat.pos,my.lon.pos] + } + + layout(matrix(c(1,1,1,1,2), 1, 5, byrow = TRUE)) + par(oma=c(1,0,0,0.2)) + PlotEquiMap2(plot.data,my.lon,my.lat,toptitle=my.toptitle,sizetit = 0.6, + brks=my.brks.trend[[my.index]],cols=colorRampPalette(my.palette.trend[[my.index]])(length(my.brks.trend[[my.index]])-1), + filled.continents=F,units=my.units[my.index],subsampleg=my.subsampleg[my.index],contours=NULL,intylat=1,intxlon=1,drawleg=F) + + my.parcelas<-data.frame(name=parcelas.names,lat=parcelas.lat,long=parcelas.lon,pop=0,capital=0,stringsAsFactors=F) + map.cities(my.parcelas,pch=3,cex=3,col=c("purple","blue","red","black"),label=FALSE) # add the 4 points of the 4 parcelas + ColorBar(my.brks.trend[[my.index]],cols=colorRampPalette(my.palette.trend[[my.index]])(length(my.brks.trend[[my.index]])-1),vert=TRUE,cex=1.3) + + dev.off() + } + +} # close if on rcp85 +#close(my.bar) + +#print("Saving R session...") +#save.image(file=paste0(path.data,"/",mod,"/r",run,"/run.RData")) + +print("Finished") + +#parcelas.cambio26<-parcelas.indices.rcp26-parcelas.indices.hist +#parcelas.cambio85<-parcelas.indices.rcp85-parcelas.indices.hist + + + + } # close the loop on the run to analyze + } # close the loop on the run to analyze +} # close the loop on the run to analyze + + + + + + + + + + + + + + + + + + + + +############################################## Ensemble creation ################################################### + +if(ensemble==TRUE){ +# you need to have already analyized all runs individually and saved the data of each one before computing the ensemble + +# select the patagonia zoom (in tis case over the area of the 4 parcelas) to calculate the weighted ensemble and the Taylor's diagram (RMSE, SD ratio, R) only for a selected area: +zoom.lat.min=12 # the latitudinal position of the first pixel (lower lat.value) of the reanalysis dataset inside the BigPatagonia area +zoom.lat.max=20 # the latitudinal position of the last pixel (higher lat.value) of the reanalysis dataset inside the BigPatagonia area +zoom.lon.min=4 # ... +zoom.lon.max=9 # ... + +col.taylor=c(brewer.pal(12, "Paired"),"gray60","magenta","black") # model colors for the taylor diagram +col.taylor[11]<-"sienna4" # replace yellow with brown + +# suffix to add to the output file name of all the taylor's diagrams (+legend file): (i.e: selected_runs, alternative_weights) +suffix="" + +########################################################33 + +# variables ensemble: +run.rcp26<-run.rcp85<-list() # list with the data of each run and model for RCP 2.6 or to RCP 8.5 +run.hist26<-run.hist85<-list() # list with the data of each run and model for historial associated to RCP 2.6 or to RCP 8.5 + +# calculate the number of runs for each model: +#ntot.runs.hist<-colSums(n.runs.hist != 0) +ntot.runs.rcp26<-colSums(n.runs.rcp26 != 0) +ntot.runs.rcp85<-colSums(n.runs.rcp85 != 0) + +# calculate the number of models really used in each scenario: +#ntot.models.hist<-length(which(ntot.runs.hist != 0)) +ntot.models.rcp26<-length(which(ntot.runs.rcp26 != 0)) +ntot.models.rcp85<-length(which(ntot.runs.rcp85 != 0)) + +# define some void lists of 0s that will be attributed to not-existing runs: +void.index<-matrix(0,n.lat.rean,n.lon.rean) +void.run<-list(void.index,void.index,void.index,void.index,void.index,void.index) + +# load all runs in a list of run data: +for(mod in 1:n.max.models){ + run.rcp26[[mod]]<-list() + run.hist26[[mod]]<-list() + run.rcp85[[mod]]<-list() + run.hist85[[mod]]<-list() + + # load all the rcp 2.6 data and its historial of the indices of runs of the same model: + for(run in 1:n.max.runs){ + if(n.runs.rcp26[run,mod]!=0){ + run.rcp26[[mod]][[run]]<-get(load(paste0(path.data,"/",mod,"/r",n.runs.rcp26[run,mod],"/2.6/Indices_interp.RData"))) + run.hist26[[mod]][[run]]<-get(load(paste0(path.data,"/",mod,"/r",n.runs.rcp26[run,mod],"/historical/Indices_interp.RData"))) + } else { + run.rcp26[[mod]][[run]]<-void.run + run.hist26[[mod]][[run]]<-void.run + } + } + + # load all the rcp 8.5 data and its historial of the indices of runs of the same model: + for(run in 1:n.max.runs){ + if(n.runs.rcp85[run,mod]!=0){ + run.rcp85[[mod]][[run]]<-get(load(paste0(path.data,"/",mod,"/r",n.runs.rcp85[run,mod],"/8.5/Indices_interp.RData"))) + run.hist85[[mod]][[run]]<-get(load(paste0(path.data,"/",mod,"/r",n.runs.rcp85[run,mod],"/historical/Indices_interp.RData"))) + } else { + run.rcp85[[mod]][[run]]<-void.run + run.hist85[[mod]][[run]]<-void.run + } + } +} + +# load all runs for the parcelas: +run.rcp26.parcelas<-run.rcp85.parcelas<-list() # list with the parcelas data of each run and model for RCP 2.6 or RCP 8.5 +run.hist26.parcelas<-run.hist85.parcelas<-list() # list with the parcelas data of each run and model for historial associated to RCP 2.6 or to RCP 8.5 +void.index.parcelas<-rep(0,n.parcelas) +#void.run.parcelas<-list(void.index.parcelas,void.index.parcelas,void.index.parcelas,void.index.parcelas,void.index.parcelas,void.index.parcelas) +void.run.parcelas<-rep(list(void.index.parcelas),n.indices) + +for(mod in 1:n.max.models){ + run.rcp26.parcelas[[mod]]<-list() + run.hist26.parcelas[[mod]]<-list() + run.rcp85.parcelas[[mod]]<-list() + run.hist85.parcelas[[mod]]<-list() + + # load all the rcp 2.6 data and its historial for the 4 parcelas: + for(run in 1:n.max.runs){ + if(n.runs.rcp26[run,mod]!=0){ + tmp<-get(load(paste0(path.data,"/",mod,"/r",n.runs.rcp26[run,mod],"/2.6/Indices_parcelas.RData"))) + run.rcp26.parcelas[[mod]][[run]]<-list(tmp[,1],tmp[,2],tmp[,3],tmp[,4],tmp[,5],tmp[,6]) # create a list of six vectors, one for each index + tmph<-get(load(paste0(path.data,"/",mod,"/r",n.runs.rcp26[run,mod],"/historical/Indices_parcelas.RData"))) + run.hist26.parcelas[[mod]][[run]]<-list(tmph[,1],tmph[,2],tmph[,3],tmph[,4],tmph[,5],tmph[,6]) # create a list of six vectors, one for each index + } else { + run.rcp26.parcelas[[mod]][[run]]<-void.run.parcelas + run.hist26.parcelas[[mod]][[run]]<-void.run.parcelas + } + } + + # load all the rcp 8.5 data and its historial for the 4 parcelas: + for(run in 1:n.max.runs){ + if(n.runs.rcp85[run,mod]!=0){ + tmp<-get(load(paste0(path.data,"/",mod,"/r",n.runs.rcp85[run,mod],"/8.5/Indices_parcelas.RData"))) + run.rcp85.parcelas[[mod]][[run]]<-list(tmp[,1],tmp[,2],tmp[,3],tmp[,4],tmp[,5],tmp[,6]) # create a list of six vectors, one for each index + tmph<-get(load(paste0(path.data,"/",mod,"/r",n.runs.rcp85[run,mod],"/historical/Indices_parcelas.RData"))) + run.hist85.parcelas[[mod]][[run]]<-list(tmph[,1],tmph[,2],tmph[,3],tmph[,4],tmph[,5],tmph[,6]) # create a list of six vectors, one for each index + } else { + run.rcp85.parcelas[[mod]][[run]]<-void.run.parcelas + run.hist85.parcelas[[mod]][[run]]<-void.run.parcelas + } + } + +} + +# load all runs for the grid of parcelas: +run.rcp26.grid.parcelas<-run.rcp85.grid.parcelas<-list() # list with the parcelas data of each run and model for RCP 2.6 or RCP 8.5 +run.hist26.grid.parcelas<-run.hist85.grid.parcelas<-list() # list with the parcelas data of each run and model for historial associated to RCP 2.6 or to RCP 8.5 +void.index.grid.parcelas<-rep(0,n.grid.points) +#void.run.grid.parcelas<-list(void.index.grid.parcelas,void.index.grid.parcelas,void.index.grid.parcelas,void.index.grid.parcelas,void.index.grid.parcelas,void.index.grid.parcelas) +void.run.grid.parcelas<-rep(list(void.index.grid.parcelas),n.indices) + +for(mod in 1:n.max.models){ + run.rcp26.grid.parcelas[[mod]]<-list() + run.hist26.grid.parcelas[[mod]]<-list() + run.rcp85.grid.parcelas[[mod]]<-list() + run.hist85.grid.parcelas[[mod]]<-list() + + # load all the rcp 2.6 data and its historial for the grid of parcelas: + for(run in 1:n.max.runs){ + if(n.runs.rcp26[run,mod]!=0){ + tmp<-get(load(paste0(path.data,"/",mod,"/r",n.runs.rcp26[run,mod],"/2.6/Indices_grid_parcelas.RData"))) + run.rcp26.grid.parcelas[[mod]][[run]]<-list(tmp[,1],tmp[,2],tmp[,3],tmp[,4],tmp[,5],tmp[,6]) # create a list of six vectors, one for each index + tmph<-get(load(paste0(path.data,"/",mod,"/r",n.runs.rcp26[run,mod],"/historical/Indices_grid_parcelas.RData"))) + run.hist26.grid.parcelas[[mod]][[run]]<-list(tmph[,1],tmph[,2],tmph[,3],tmph[,4],tmph[,5],tmph[,6]) # create a list of six vectors, one for each index + } else { + run.rcp26.grid.parcelas[[mod]][[run]]<-void.run.grid.parcelas + run.hist26.grid.parcelas[[mod]][[run]]<-void.run.grid.parcelas + } + } + + # load all the rcp 8.5 data and its historial for the grid of parcelas: + for(run in 1:n.max.runs){ + if(n.runs.rcp85[run,mod]!=0){ + tmp<-get(load(paste0(path.data,"/",mod,"/r",n.runs.rcp85[run,mod],"/8.5/Indices_grid_parcelas.RData"))) + run.rcp85.grid.parcelas[[mod]][[run]]<-list(tmp[,1],tmp[,2],tmp[,3],tmp[,4],tmp[,5],tmp[,6]) # create a list of six vectors, one for each index + tmph<-get(load(paste0(path.data,"/",mod,"/r",n.runs.rcp85[run,mod],"/historical/Indices_grid_parcelas.RData"))) + run.hist85.grid.parcelas[[mod]][[run]]<-list(tmph[,1],tmph[,2],tmph[,3],tmph[,4],tmph[,5],tmph[,6]) # create a list of six vectors, one for each index + } else { + run.rcp85.grid.parcelas[[mod]][[run]]<-void.run.grid.parcelas + run.hist85.grid.parcelas[[mod]][[run]]<-void.run.grid.parcelas + } + } + +} + + +# load all yearly runs for the parcelas: +run.rcp26.yearly.parcelas<-run.rcp85.yearly.parcelas<-run.trans26.yearly.parcelas<-list() # list with the parcelas data of each run and model for RCP 2.6 or RCP 8.5 +run.hist26.yearly.parcelas<-run.hist85.yearly.parcelas<-run.trans85.yearly.parcelas<-list() # list with the parcelas data of each run and model for historial associated to RCP 2.6 or to RCP 8.5 + +void.index.yearly.parcelas.rcp<-matrix(0,n.years.rcp,n.parcelas) +void.index.yearly.parcelas.hist<-matrix(0,n.years.hist,n.parcelas) +void.index.yearly.parcelas.trans<-matrix(0,n.years.trans,n.parcelas) + +void.run.yearly.parcelas.rcp<-rep(list(void.index.yearly.parcelas.rcp),n.indices) +void.run.yearly.parcelas.hist<-rep(list(void.index.yearly.parcelas.hist),n.indices) +void.run.yearly.parcelas.trans<-rep(list(void.index.yearly.parcelas.trans),n.indices) + +for(mod in 1:n.max.models){ + run.rcp26.yearly.parcelas[[mod]]<-list() + run.hist26.yearly.parcelas[[mod]]<-list() + run.trans26.yearly.parcelas[[mod]]<-list() + + run.rcp85.yearly.parcelas[[mod]]<-list() + run.hist85.yearly.parcelas[[mod]]<-list() + run.trans85.yearly.parcelas[[mod]]<-list() + + # load all the rcp 2.6 data and its historial for the 4 parcelas: + for(run in 1:n.max.runs){ + if(n.runs.rcp26[run,mod]!=0){ + tmp<-get(load(paste0(path.data,"/",mod,"/r",n.runs.rcp26[run,mod],"/2.6/Indices_parcelas_anuales.RData"))) + run.rcp26.yearly.parcelas[[mod]][[run]]<-list(tmp[1,,],tmp[2,,],tmp[3,,],tmp[4,,],tmp[5,,],tmp[6,,]) # create a list of six vectors, one for each index + tmph<-get(load(paste0(path.data,"/",mod,"/r",n.runs.rcp26[run,mod],"/historical/Indices_parcelas_anuales.RData"))) + run.hist26.yearly.parcelas[[mod]][[run]]<-list(tmph[1,,],tmph[2,,],tmph[3,,],tmph[4,,],tmph[5,,],tmph[6,,]) # create a list of six vectors, one for each index + tmpr<-get(load(paste0(path.data,"/",mod,"/r",n.runs.rcp26[run,mod],"/2.6/Indices_parcelas_anuales_transition.RData"))) + run.trans26.yearly.parcelas[[mod]][[run]]<-list(tmpr[1,,],tmpr[2,,],tmpr[3,,],tmpr[4,,],tmpr[5,,],tmpr[6,,]) # create a list of six vectors, one for each index + + } else { + run.rcp26.yearly.parcelas[[mod]][[run]]<-void.run.yearly.parcelas.rcp + run.hist26.yearly.parcelas[[mod]][[run]]<-void.run.yearly.parcelas.hist + run.trans26.yearly.parcelas[[mod]][[run]]<-void.run.yearly.parcelas.trans + } + } + + # load all the rcp 8.5 data and its historial for the 4 parcelas: + for(run in 1:n.max.runs){ + if(n.runs.rcp85[run,mod]!=0){ + tmp<-get(load(paste0(path.data,"/",mod,"/r",n.runs.rcp85[run,mod],"/8.5/Indices_parcelas_anuales.RData"))) + run.rcp85.yearly.parcelas[[mod]][[run]]<-list(tmp[1,,],tmp[2,,],tmp[3,,],tmp[4,,],tmp[5,,],tmp[6,,]) # create a list of six vectors, one for each index + tmph<-get(load(paste0(path.data,"/",mod,"/r",n.runs.rcp85[run,mod],"/historical/Indices_parcelas_anuales.RData"))) + run.hist85.yearly.parcelas[[mod]][[run]]<-list(tmph[1,,],tmph[2,,],tmph[3,,],tmph[4,,],tmph[5,,],tmph[6,,]) # create a list of six vectors, one for each index + tmpr<-get(load(paste0(path.data,"/",mod,"/r",n.runs.rcp85[run,mod],"/8.5/Indices_parcelas_anuales_transition.RData"))) + run.trans85.yearly.parcelas[[mod]][[run]]<-list(tmpr[1,,],tmpr[2,,],tmpr[3,,],tmpr[4,,],tmpr[5,,],tmpr[6,,]) # create a list of six vectors, one for each index + } else { + run.rcp85.yearly.parcelas[[mod]][[run]]<-void.run.yearly.parcelas.rcp + run.hist85.yearly.parcelas[[mod]][[run]]<-void.run.yearly.parcelas.hist + run.trans85.yearly.parcelas[[mod]][[run]]<-void.run.yearly.parcelas.trans + } + } + +} + + +# calculate the ensemble mean of each run in the same model: +ensemble.runs.rcp26<-list() # list with the ensemble of all runs for one specific model +ensemble.runs.hist26<-list() # list with the ensemble of all runs for one specific model +ensemble.runs.rcp85<-list() # list with the ensemble of all runs for one specific model +ensemble.runs.hist85<-list() # list with the ensemble of all runs for one specific model + +for(mod in 1:n.max.models){ + if(ntot.runs.rcp26[mod]!=0){ + ensemble.runs.rcp26[[mod]] <-lapply(seq_along(run.rcp26[[mod]][[1]]),function(i) (matrix((unlist(run.rcp26[[mod]][[1]][i]) + unlist(run.rcp26[[mod]][[2]][i]) + unlist(run.rcp26[[mod]][[3]][i]) + unlist(run.rcp26[[mod]][[4]][i]) + unlist(run.rcp26[[mod]][[5]][i]))/ntot.runs.rcp26[mod],n.lat.rean,n.lon.rean))) + ensemble.runs.hist26[[mod]] <-lapply(seq_along(run.hist26[[mod]][[1]]),function(i) (matrix((unlist(run.hist26[[mod]][[1]][i]) + unlist(run.hist26[[mod]][[2]][i]) + unlist(run.hist26[[mod]][[3]][i]) + unlist(run.hist26[[mod]][[4]][i]) + unlist(run.hist26[[mod]][[5]][i]))/ntot.runs.rcp26[mod],n.lat.rean,n.lon.rean))) + } else { + ensemble.runs.rcp26[[mod]]<-void.run + ensemble.runs.hist26[[mod]]<-void.run + } + + if(ntot.runs.rcp85[mod]!=0){ + ensemble.runs.rcp85[[mod]] <-lapply(seq_along(run.rcp85[[mod]][[1]]),function(i) (matrix((unlist(run.rcp85[[mod]][[1]][i]) + unlist(run.rcp85[[mod]][[2]][i]) + unlist(run.rcp85[[mod]][[3]][i]) + unlist(run.rcp85[[mod]][[4]][i]) + unlist(run.rcp85[[mod]][[5]][i]))/ntot.runs.rcp85[mod],n.lat.rean,n.lon.rean))) + ensemble.runs.hist85[[mod]] <-lapply(seq_along(run.hist85[[mod]][[1]]),function(i) (matrix((unlist(run.hist85[[mod]][[1]][i]) + unlist(run.hist85[[mod]][[2]][i]) + unlist(run.hist85[[mod]][[3]][i]) + unlist(run.hist85[[mod]][[4]][i]) + unlist(run.hist85[[mod]][[5]][i]))/ntot.runs.rcp85[mod],n.lat.rean,n.lon.rean))) + } else { + ensemble.runs.rcp85[[mod]]<-void.run + ensemble.runs.hist85[[mod]]<-void.run + } +} + + +# calculate the ensemble mean of each run in the same model for the parcelas: +ensemble.runs.rcp26.parcelas<-list() # list with the ensemble of all runs for one specific model +ensemble.runs.hist26.parcelas<-list() # list with the ensemble of all runs for one specific model +ensemble.runs.rcp85.parcelas<-list() # list with the ensemble of all runs for one specific model +ensemble.runs.hist85.parcelas<-list() # list with the ensemble of all runs for one specific model + +for(mod in 1:n.max.models){ + if(ntot.runs.rcp26[mod]!=0){ + ensemble.runs.rcp26.parcelas[[mod]] <-lapply(seq_along(run.rcp26.parcelas[[mod]][[1]]),function(i) ((unlist(run.rcp26.parcelas[[mod]][[1]][i]) + unlist(run.rcp26.parcelas[[mod]][[2]][i]) + unlist(run.rcp26.parcelas[[mod]][[3]][i]) + unlist(run.rcp26.parcelas[[mod]][[4]][i]) + unlist(run.rcp26.parcelas[[mod]][[5]][i]))/ntot.runs.rcp26[mod])) + ensemble.runs.hist26.parcelas[[mod]] <-lapply(seq_along(run.hist26.parcelas[[mod]][[1]]),function(i) ((unlist(run.hist26.parcelas[[mod]][[1]][i]) + unlist(run.hist26.parcelas[[mod]][[2]][i]) + unlist(run.hist26.parcelas[[mod]][[3]][i]) + unlist(run.hist26.parcelas[[mod]][[4]][i]) + unlist(run.hist26.parcelas[[mod]][[5]][i]))/ntot.runs.rcp26[mod])) + } else { + ensemble.runs.rcp26.parcelas[[mod]]<-void.run.parcelas + ensemble.runs.hist26.parcelas[[mod]]<-void.run.parcelas + } + + if(ntot.runs.rcp85[mod]!=0){ + ensemble.runs.rcp85.parcelas[[mod]] <-lapply(seq_along(run.rcp85.parcelas[[mod]][[1]]),function(i) ((unlist(run.rcp85.parcelas[[mod]][[1]][i]) + unlist(run.rcp85.parcelas[[mod]][[2]][i]) + unlist(run.rcp85.parcelas[[mod]][[3]][i]) + unlist(run.rcp85.parcelas[[mod]][[4]][i]) + unlist(run.rcp85.parcelas[[mod]][[5]][i]))/ntot.runs.rcp85[mod])) + ensemble.runs.hist85.parcelas[[mod]] <-lapply(seq_along(run.hist85.parcelas[[mod]][[1]]),function(i) ((unlist(run.hist85.parcelas[[mod]][[1]][i]) + unlist(run.hist85.parcelas[[mod]][[2]][i]) + unlist(run.hist85.parcelas[[mod]][[3]][i]) + unlist(run.hist85.parcelas[[mod]][[4]][i]) + unlist(run.hist85.parcelas[[mod]][[5]][i]))/ntot.runs.rcp85[mod])) + } else { + ensemble.runs.rcp85.parcelas[[mod]]<-void.run.parcelas + ensemble.runs.hist85.parcelas[[mod]]<-void.run.parcelas + } +} + +# calculate the ensemble mean of each run in the same model for the grid of parcelas: +ensemble.runs.rcp26.grid.parcelas<-list() # list with the ensemble of all runs for one specific model +ensemble.runs.hist26.grid.parcelas<-list() # list with the ensemble of all runs for one specific model +ensemble.runs.rcp85.grid.parcelas<-list() # list with the ensemble of all runs for one specific model +ensemble.runs.hist85.grid.parcelas<-list() # list with the ensemble of all runs for one specific model + +for(mod in 1:n.max.models){ + if(ntot.runs.rcp26[mod]!=0){ + ensemble.runs.rcp26.grid.parcelas[[mod]] <-lapply(seq_along(run.rcp26.grid.parcelas[[mod]][[1]]),function(i) ((unlist(run.rcp26.grid.parcelas[[mod]][[1]][i]) + unlist(run.rcp26.grid.parcelas[[mod]][[2]][i]) + unlist(run.rcp26.grid.parcelas[[mod]][[3]][i]) + unlist(run.rcp26.grid.parcelas[[mod]][[4]][i]) + unlist(run.rcp26.grid.parcelas[[mod]][[5]][i]))/ntot.runs.rcp26[mod])) + ensemble.runs.hist26.grid.parcelas[[mod]] <-lapply(seq_along(run.hist26.grid.parcelas[[mod]][[1]]),function(i) ((unlist(run.hist26.grid.parcelas[[mod]][[1]][i]) + unlist(run.hist26.grid.parcelas[[mod]][[2]][i]) + unlist(run.hist26.grid.parcelas[[mod]][[3]][i]) + unlist(run.hist26.grid.parcelas[[mod]][[4]][i]) + unlist(run.hist26.grid.parcelas[[mod]][[5]][i]))/ntot.runs.rcp26[mod])) + } else { + ensemble.runs.rcp26.grid.parcelas[[mod]]<-void.run.grid.parcelas + ensemble.runs.hist26.grid.parcelas[[mod]]<-void.run.grid.parcelas + } + + if(ntot.runs.rcp85[mod]!=0){ + ensemble.runs.rcp85.grid.parcelas[[mod]] <-lapply(seq_along(run.rcp85.grid.parcelas[[mod]][[1]]),function(i) ((unlist(run.rcp85.grid.parcelas[[mod]][[1]][i]) + unlist(run.rcp85.grid.parcelas[[mod]][[2]][i]) + unlist(run.rcp85.grid.parcelas[[mod]][[3]][i]) + unlist(run.rcp85.grid.parcelas[[mod]][[4]][i]) + unlist(run.rcp85.grid.parcelas[[mod]][[5]][i]))/ntot.runs.rcp85[mod])) + ensemble.runs.hist85.grid.parcelas[[mod]] <-lapply(seq_along(run.hist85.grid.parcelas[[mod]][[1]]),function(i) ((unlist(run.hist85.grid.parcelas[[mod]][[1]][i]) + unlist(run.hist85.grid.parcelas[[mod]][[2]][i]) + unlist(run.hist85.grid.parcelas[[mod]][[3]][i]) + unlist(run.hist85.grid.parcelas[[mod]][[4]][i]) + unlist(run.hist85.grid.parcelas[[mod]][[5]][i]))/ntot.runs.rcp85[mod])) + } else { + ensemble.runs.rcp85.grid.parcelas[[mod]]<-void.run.grid.parcelas + ensemble.runs.hist85.grid.parcelas[[mod]]<-void.run.grid.parcelas + } +} + +# calculate the ensemble mean of each yearly run in the same model for the parcelas: +ensemble.runs.rcp26.yearly.parcelas<-list() # list with the ensemble of all runs for one specific model +ensemble.runs.hist26.yearly.parcelas<-list() # list with the ensemble of all runs for one specific model +ensemble.runs.trans26.yearly.parcelas<-list() # list with the ensemble of all runs for one specific model + +ensemble.runs.rcp85.yearly.parcelas<-list() # list with the ensemble of all runs for one specific model +ensemble.runs.hist85.yearly.parcelas<-list() # list with the ensemble of all runs for one specific model +ensemble.runs.trans85.yearly.parcelas<-list() # list with the ensemble of all runs for one specific model + +for(mod in 1:n.max.models){ + if(ntot.runs.rcp26[mod]!=0){ + ensemble.runs.rcp26.yearly.parcelas[[mod]] <-lapply(seq_along(run.rcp26.yearly.parcelas[[mod]][[1]]),function(i) (matrix((unlist(run.rcp26.yearly.parcelas[[mod]][[1]][i]) + unlist(run.rcp26.yearly.parcelas[[mod]][[2]][i]) + unlist(run.rcp26.yearly.parcelas[[mod]][[3]][i]) + unlist(run.rcp26.yearly.parcelas[[mod]][[4]][i]) + unlist(run.rcp26.yearly.parcelas[[mod]][[5]][i]))/ntot.runs.rcp26[mod],n.years.rcp,n.parcelas))) + ensemble.runs.hist26.yearly.parcelas[[mod]] <-lapply(seq_along(run.hist26.yearly.parcelas[[mod]][[1]]),function(i) (matrix((unlist(run.hist26.yearly.parcelas[[mod]][[1]][i]) + unlist(run.hist26.yearly.parcelas[[mod]][[2]][i]) + unlist(run.hist26.yearly.parcelas[[mod]][[3]][i]) + unlist(run.hist26.yearly.parcelas[[mod]][[4]][i]) + unlist(run.hist26.yearly.parcelas[[mod]][[5]][i]))/ntot.runs.rcp26[mod],n.years.hist,n.parcelas))) + ensemble.runs.trans26.yearly.parcelas[[mod]] <-lapply(seq_along(run.trans26.yearly.parcelas[[mod]][[1]]),function(i) (matrix((unlist(run.trans26.yearly.parcelas[[mod]][[1]][i]) + unlist(run.trans26.yearly.parcelas[[mod]][[2]][i]) + unlist(run.trans26.yearly.parcelas[[mod]][[3]][i]) + unlist(run.trans26.yearly.parcelas[[mod]][[4]][i]) + unlist(run.trans26.yearly.parcelas[[mod]][[5]][i]))/ntot.runs.rcp26[mod],n.years.trans,n.parcelas))) + } else { + ensemble.runs.rcp26.yearly.parcelas[[mod]]<-void.run.yearly.parcelas.rcp + ensemble.runs.hist26.yearly.parcelas[[mod]]<-void.run.yearly.parcelas.hist + ensemble.runs.trans26.yearly.parcelas[[mod]]<-void.run.yearly.parcelas.trans + } + + if(ntot.runs.rcp85[mod]!=0){ + ensemble.runs.rcp85.yearly.parcelas[[mod]] <-lapply(seq_along(run.rcp85.yearly.parcelas[[mod]][[1]]),function(i) (matrix((unlist(run.rcp85.yearly.parcelas[[mod]][[1]][i]) + unlist(run.rcp85.yearly.parcelas[[mod]][[2]][i]) + unlist(run.rcp85.yearly.parcelas[[mod]][[3]][i]) + unlist(run.rcp85.yearly.parcelas[[mod]][[4]][i]) + unlist(run.rcp85.yearly.parcelas[[mod]][[5]][i]))/ntot.runs.rcp85[mod],n.years.rcp,n.parcelas))) + ensemble.runs.hist85.yearly.parcelas[[mod]] <-lapply(seq_along(run.hist85.yearly.parcelas[[mod]][[1]]),function(i) (matrix((unlist(run.hist85.yearly.parcelas[[mod]][[1]][i]) + unlist(run.hist85.yearly.parcelas[[mod]][[2]][i]) + unlist(run.hist85.yearly.parcelas[[mod]][[3]][i]) + unlist(run.hist85.yearly.parcelas[[mod]][[4]][i]) + unlist(run.hist85.yearly.parcelas[[mod]][[5]][i]))/ntot.runs.rcp85[mod],n.years.hist,n.parcelas))) + ensemble.runs.trans85.yearly.parcelas[[mod]] <-lapply(seq_along(run.trans85.yearly.parcelas[[mod]][[1]]),function(i) (matrix((unlist(run.trans85.yearly.parcelas[[mod]][[1]][i]) + unlist(run.trans85.yearly.parcelas[[mod]][[2]][i]) + unlist(run.trans85.yearly.parcelas[[mod]][[3]][i]) + unlist(run.trans85.yearly.parcelas[[mod]][[4]][i]) + unlist(run.trans85.yearly.parcelas[[mod]][[5]][i]))/ntot.runs.rcp85[mod],n.years.trans,n.parcelas))) + } else { + ensemble.runs.rcp85.yearly.parcelas[[mod]]<-void.run.yearly.parcelas.rcp + ensemble.runs.hist85.yearly.parcelas[[mod]]<-void.run.yearly.parcelas.hist + ensemble.runs.trans85.yearly.parcelas[[mod]]<-void.run.yearly.parcelas.trans + } +} + + +# calculate the percentiles of the delta RCP26 - Hist26 for the parcelas: +percentil75.delta.rcp26.parcelas<-percentil25.delta.rcp26.parcelas<-matrix(NA,n.indices,n.parcelas) + +for(ind in 1:n.indices){ + tmp<-matrix(NA,n.max.models*n.max.runs,n.parcelas) + for(mod in 1:n.max.models){ + for(run in 1:n.max.runs){ + if(n.runs.rcp26[run,mod]!=0){ + tmp[(mod-1)*n.max.runs+run,] <- run.rcp26.parcelas[[mod]][[run]][[ind]] - run.hist26.parcelas[[mod]][[run]][[ind]] + } + } + } + percentil75.delta.rcp26.parcelas[ind,] <- apply(tmp,2,quantile,na.rm=TRUE,probs=0.75) + percentil25.delta.rcp26.parcelas[ind,] <- apply(tmp,2,quantile,na.rm=TRUE,probs=0.25) +} + +# calculate the percentiles of the delta RCP85 - Hist85 for the parcelas: +percentil75.delta.rcp85.parcelas<-percentil25.delta.rcp85.parcelas<-matrix(NA,n.indices,n.parcelas) + +for(ind in 1:n.indices){ + tmp<-matrix(NA,n.max.models*n.max.runs,n.parcelas) + for(mod in 1:n.max.models){ + for(run in 1:n.max.runs){ + if(n.runs.rcp85[run,mod]!=0){ + tmp[(mod-1)*n.max.runs+run,] <- run.rcp85.parcelas[[mod]][[run]][[ind]] - run.hist85.parcelas[[mod]][[run]][[ind]] + } + } + } + percentil75.delta.rcp85.parcelas[ind,] <- apply(tmp,2,quantile,na.rm=TRUE,probs=0.75) + percentil25.delta.rcp85.parcelas[ind,] <- apply(tmp,2,quantile,na.rm=TRUE,probs=0.25) +} + + +# calculate the percentiles of the delta RCP26 - Hist26 for the grid of parcelas: +percentil75.delta.rcp26.grid.parcelas<-percentil25.delta.rcp26.grid.parcelas<-matrix(NA,n.indices,n.grid.points) + +for(ind in 1:n.indices){ + tmp<-matrix(NA,n.max.models*n.max.runs,n.grid.points) + for(mod in 1:n.max.models){ + for(run in 1:n.max.runs){ + if(n.runs.rcp26[run,mod]!=0){ + tmp[(mod-1)*n.max.runs+run,] <- run.rcp26.grid.parcelas[[mod]][[run]][[ind]] - run.hist26.grid.parcelas[[mod]][[run]][[ind]] + } + } + } + percentil75.delta.rcp26.grid.parcelas[ind,] <- apply(tmp,2,quantile,na.rm=TRUE,probs=0.75) + percentil25.delta.rcp26.grid.parcelas[ind,] <- apply(tmp,2,quantile,na.rm=TRUE,probs=0.25) +} + +# calculate the percentiles of the delta RCP85 - Hist85 for the grid of parcelas: +percentil75.delta.rcp85.grid.parcelas<-percentil25.delta.rcp85.grid.parcelas<-matrix(NA,n.indices,n.grid.points) + +for(ind in 1:n.indices){ + tmp<-matrix(NA,n.max.models*n.max.runs,n.grid.points) + for(mod in 1:n.max.models){ + for(run in 1:n.max.runs){ + if(n.runs.rcp85[run,mod]!=0){ + tmp[(mod-1)*n.max.runs+run,] <- run.rcp85.grid.parcelas[[mod]][[run]][[ind]] - run.hist85.grid.parcelas[[mod]][[run]][[ind]] + } + } + } + percentil75.delta.rcp85.grid.parcelas[ind,] <- apply(tmp,2,quantile,na.rm=TRUE,probs=0.75) +rseis percentil25.delta.rcp85.grid.parcelas[ind,] <- apply(tmp,2,quantile,na.rm=TRUE,probs=0.25) +} + + + +# calculate the ensemble mean of all models: +ensemble.hist26<-lapply(seq_along(ensemble.runs.hist26[[1]]),function(i) (matrix((unlist(ensemble.runs.hist26[[1]][i]) + unlist(ensemble.runs.hist26[[2]][i]) + unlist(ensemble.runs.hist26[[3]][i]) + unlist(ensemble.runs.hist26[[4]][i]) + unlist(ensemble.runs.hist26[[5]][i]) + unlist(ensemble.runs.hist26[[6]][i]) + unlist(ensemble.runs.hist26[[7]][i]) + unlist(ensemble.runs.hist26[[8]][i]) + unlist(ensemble.runs.hist26[[9]][i]) + unlist(ensemble.runs.hist26[[10]][i]) + unlist(ensemble.runs.hist26[[11]][i]) + unlist(ensemble.runs.hist26[[12]][i]) + unlist(ensemble.runs.hist26[[13]][i]) + unlist(ensemble.runs.hist26[[14]][i]) + unlist(ensemble.runs.hist26[[15]][i]))/ntot.models.rcp26,n.lat.rean,n.lon.rean))) + +ensemble.hist85<-lapply(seq_along(ensemble.runs.hist85[[1]]),function(i) (matrix((unlist(ensemble.runs.hist85[[1]][i]) + unlist(ensemble.runs.hist85[[2]][i]) + unlist(ensemble.runs.hist85[[3]][i]) + unlist(ensemble.runs.hist85[[4]][i]) + unlist(ensemble.runs.hist85[[5]][i]) + unlist(ensemble.runs.hist85[[6]][i]) + unlist(ensemble.runs.hist85[[7]][i]) + unlist(ensemble.runs.hist85[[8]][i]) + unlist(ensemble.runs.hist85[[9]][i]) + unlist(ensemble.runs.hist85[[10]][i]) + unlist(ensemble.runs.hist85[[11]][i]) + unlist(ensemble.runs.hist85[[12]][i]) + unlist(ensemble.runs.hist85[[13]][i]) + unlist(ensemble.runs.hist85[[14]][i]) + unlist(ensemble.runs.hist85[[15]][i]))/ntot.models.rcp85,n.lat.rean,n.lon.rean))) + +ensemble.rcp26<-lapply(seq_along(ensemble.runs.rcp26[[1]]),function(i) (matrix((unlist(ensemble.runs.rcp26[[1]][i]) + unlist(ensemble.runs.rcp26[[2]][i]) + unlist(ensemble.runs.rcp26[[3]][i]) + unlist(ensemble.runs.rcp26[[4]][i]) + unlist(ensemble.runs.rcp26[[5]][i]) + unlist(ensemble.runs.rcp26[[6]][i]) + unlist(ensemble.runs.rcp26[[7]][i]) + unlist(ensemble.runs.rcp26[[8]][i]) + unlist(ensemble.runs.rcp26[[9]][i]) + unlist(ensemble.runs.rcp26[[10]][i]) + unlist(ensemble.runs.rcp26[[11]][i]) + unlist(ensemble.runs.rcp26[[12]][i]) + unlist(ensemble.runs.rcp26[[13]][i]) + unlist(ensemble.runs.rcp26[[14]][i]) + unlist(ensemble.runs.rcp26[[15]][i]))/ntot.models.rcp26,n.lat.rean,n.lon.rean))) + +ensemble.rcp85<-lapply(seq_along(ensemble.runs.rcp85[[1]]),function(i) (matrix((unlist(ensemble.runs.rcp85[[1]][i]) + unlist(ensemble.runs.rcp85[[2]][i]) + unlist(ensemble.runs.rcp85[[3]][i]) + unlist(ensemble.runs.rcp85[[4]][i]) + unlist(ensemble.runs.rcp85[[5]][i]) + unlist(ensemble.runs.rcp85[[6]][i]) + unlist(ensemble.runs.rcp85[[7]][i]) + unlist(ensemble.runs.rcp85[[8]][i]) + unlist(ensemble.runs.rcp85[[9]][i]) + unlist(ensemble.runs.rcp85[[10]][i]) + unlist(ensemble.runs.rcp85[[11]][i]) + unlist(ensemble.runs.rcp85[[12]][i]) + unlist(ensemble.runs.rcp85[[13]][i]) + unlist(ensemble.runs.rcp85[[14]][i]) + unlist(ensemble.runs.rcp85[[15]][i]))/ntot.models.rcp85,n.lat.rean,n.lon.rean))) + +## in the future you can replace the three above calculations with a function like this: +# multimodel.ensemble<-function(multirun=list(),ntot.models,n.lat,n.lon){ +# multimodel<-lapply(seq_along(multirun[[1]]),function(i) (matrix((unlist(multirun[[1]][i]) + unlist(multirun[[2]][i]) + unlist(multirun[[3]][i]) + unlist(multirun[[4]][i]) + unlist(multirun[[5]][i]) + unlist(multirun[[6]][i]) + unlist(multirun[[7]][i]) + unlist(multirun[[8]][i]) + unlist(multirun[[9]][i]) + unlist(multirun[[10]][i]) + unlist(multirun[[11]][i]) + unlist(multirun[[12]][i]) + unlist(multirun[[13]][i]) + unlist(multirun[[14]][i]) + unlist(multirun[[15]][i]))/ntot.models,n.lat,n.lon))) +# return(multimodel) +#} + +# calculate the ensemble mean of all models for the parcelas: +ensemble.hist26.parcelas<-lapply(seq_along(ensemble.runs.hist26.parcelas[[1]]),function(i) ((unlist(ensemble.runs.hist26.parcelas[[1]][i]) + unlist(ensemble.runs.hist26.parcelas[[2]][i]) + unlist(ensemble.runs.hist26.parcelas[[3]][i]) + unlist(ensemble.runs.hist26.parcelas[[4]][i]) + unlist(ensemble.runs.hist26.parcelas[[5]][i]) + unlist(ensemble.runs.hist26.parcelas[[6]][i]) + unlist(ensemble.runs.hist26.parcelas[[7]][i]) + unlist(ensemble.runs.hist26.parcelas[[8]][i]) + unlist(ensemble.runs.hist26.parcelas[[9]][i]) + unlist(ensemble.runs.hist26.parcelas[[10]][i]) + unlist(ensemble.runs.hist26.parcelas[[11]][i]) + unlist(ensemble.runs.hist26.parcelas[[12]][i]) + unlist(ensemble.runs.hist26.parcelas[[13]][i]) + unlist(ensemble.runs.hist26.parcelas[[14]][i]) + unlist(ensemble.runs.hist26.parcelas[[15]][i]))/ntot.models.rcp26)) + +ensemble.hist85.parcelas<-lapply(seq_along(ensemble.runs.hist85.parcelas[[1]]),function(i) ((unlist(ensemble.runs.hist85.parcelas[[1]][i]) + unlist(ensemble.runs.hist85.parcelas[[2]][i]) + unlist(ensemble.runs.hist85.parcelas[[3]][i]) + unlist(ensemble.runs.hist85.parcelas[[4]][i]) + unlist(ensemble.runs.hist85.parcelas[[5]][i]) + unlist(ensemble.runs.hist85.parcelas[[6]][i]) + unlist(ensemble.runs.hist85.parcelas[[7]][i]) + unlist(ensemble.runs.hist85.parcelas[[8]][i]) + unlist(ensemble.runs.hist85.parcelas[[9]][i]) + unlist(ensemble.runs.hist85.parcelas[[10]][i]) + unlist(ensemble.runs.hist85.parcelas[[11]][i]) + unlist(ensemble.runs.hist85.parcelas[[12]][i]) + unlist(ensemble.runs.hist85.parcelas[[13]][i]) + unlist(ensemble.runs.hist85.parcelas[[14]][i]) + unlist(ensemble.runs.hist85.parcelas[[15]][i]))/ntot.models.rcp85)) + +ensemble.rcp26.parcelas<-lapply(seq_along(ensemble.runs.rcp26.parcelas[[1]]),function(i) ((unlist(ensemble.runs.rcp26.parcelas[[1]][i]) + unlist(ensemble.runs.rcp26.parcelas[[2]][i]) + unlist(ensemble.runs.rcp26.parcelas[[3]][i]) + unlist(ensemble.runs.rcp26.parcelas[[4]][i]) + unlist(ensemble.runs.rcp26.parcelas[[5]][i]) + unlist(ensemble.runs.rcp26.parcelas[[6]][i]) + unlist(ensemble.runs.rcp26.parcelas[[7]][i]) + unlist(ensemble.runs.rcp26.parcelas[[8]][i]) + unlist(ensemble.runs.rcp26.parcelas[[9]][i]) + unlist(ensemble.runs.rcp26.parcelas[[10]][i]) + unlist(ensemble.runs.rcp26.parcelas[[11]][i]) + unlist(ensemble.runs.rcp26.parcelas[[12]][i]) + unlist(ensemble.runs.rcp26.parcelas[[13]][i]) + unlist(ensemble.runs.rcp26.parcelas[[14]][i]) + unlist(ensemble.runs.rcp26.parcelas[[15]][i]))/ntot.models.rcp26)) + +ensemble.rcp85.parcelas<-lapply(seq_along(ensemble.runs.rcp85.parcelas[[1]]),function(i) ((unlist(ensemble.runs.rcp85.parcelas[[1]][i]) + unlist(ensemble.runs.rcp85.parcelas[[2]][i]) + unlist(ensemble.runs.rcp85.parcelas[[3]][i]) + unlist(ensemble.runs.rcp85.parcelas[[4]][i]) + unlist(ensemble.runs.rcp85.parcelas[[5]][i]) + unlist(ensemble.runs.rcp85.parcelas[[6]][i]) + unlist(ensemble.runs.rcp85.parcelas[[7]][i]) + unlist(ensemble.runs.rcp85.parcelas[[8]][i]) + unlist(ensemble.runs.rcp85.parcelas[[9]][i]) + unlist(ensemble.runs.rcp85.parcelas[[10]][i]) + unlist(ensemble.runs.rcp85.parcelas[[11]][i]) + unlist(ensemble.runs.rcp85.parcelas[[12]][i]) + unlist(ensemble.runs.rcp85.parcelas[[13]][i]) + unlist(ensemble.runs.rcp85.parcelas[[14]][i]) + unlist(ensemble.runs.rcp85.parcelas[[15]][i]))/ntot.models.rcp85)) + +# calculate the ensemble mean of all models for the grid of parcelas: +ensemble.hist26.grid.parcelas<-lapply(seq_along(ensemble.runs.hist26.grid.parcelas[[1]]),function(i) ((unlist(ensemble.runs.hist26.grid.parcelas[[1]][i]) + unlist(ensemble.runs.hist26.grid.parcelas[[2]][i]) + unlist(ensemble.runs.hist26.grid.parcelas[[3]][i]) + unlist(ensemble.runs.hist26.grid.parcelas[[4]][i]) + unlist(ensemble.runs.hist26.grid.parcelas[[5]][i]) + unlist(ensemble.runs.hist26.grid.parcelas[[6]][i]) + unlist(ensemble.runs.hist26.grid.parcelas[[7]][i]) + unlist(ensemble.runs.hist26.grid.parcelas[[8]][i]) + unlist(ensemble.runs.hist26.grid.parcelas[[9]][i]) + unlist(ensemble.runs.hist26.grid.parcelas[[10]][i]) + unlist(ensemble.runs.hist26.grid.parcelas[[11]][i]) + unlist(ensemble.runs.hist26.grid.parcelas[[12]][i]) + unlist(ensemble.runs.hist26.grid.parcelas[[13]][i]) + unlist(ensemble.runs.hist26.grid.parcelas[[14]][i]) + unlist(ensemble.runs.hist26.grid.parcelas[[15]][i]))/ntot.models.rcp26)) + +ensemble.hist85.grid.parcelas<-lapply(seq_along(ensemble.runs.hist85.grid.parcelas[[1]]),function(i) ((unlist(ensemble.runs.hist85.grid.parcelas[[1]][i]) + unlist(ensemble.runs.hist85.grid.parcelas[[2]][i]) + unlist(ensemble.runs.hist85.grid.parcelas[[3]][i]) + unlist(ensemble.runs.hist85.grid.parcelas[[4]][i]) + unlist(ensemble.runs.hist85.grid.parcelas[[5]][i]) + unlist(ensemble.runs.hist85.grid.parcelas[[6]][i]) + unlist(ensemble.runs.hist85.grid.parcelas[[7]][i]) + unlist(ensemble.runs.hist85.grid.parcelas[[8]][i]) + unlist(ensemble.runs.hist85.grid.parcelas[[9]][i]) + unlist(ensemble.runs.hist85.grid.parcelas[[10]][i]) + unlist(ensemble.runs.hist85.grid.parcelas[[11]][i]) + unlist(ensemble.runs.hist85.grid.parcelas[[12]][i]) + unlist(ensemble.runs.hist85.grid.parcelas[[13]][i]) + unlist(ensemble.runs.hist85.grid.parcelas[[14]][i]) + unlist(ensemble.runs.hist85.grid.parcelas[[15]][i]))/ntot.models.rcp85)) + +ensemble.rcp26.grid.parcelas<-lapply(seq_along(ensemble.runs.rcp26.grid.parcelas[[1]]),function(i) ((unlist(ensemble.runs.rcp26.grid.parcelas[[1]][i]) + unlist(ensemble.runs.rcp26.grid.parcelas[[2]][i]) + unlist(ensemble.runs.rcp26.grid.parcelas[[3]][i]) + unlist(ensemble.runs.rcp26.grid.parcelas[[4]][i]) + unlist(ensemble.runs.rcp26.grid.parcelas[[5]][i]) + unlist(ensemble.runs.rcp26.grid.parcelas[[6]][i]) + unlist(ensemble.runs.rcp26.grid.parcelas[[7]][i]) + unlist(ensemble.runs.rcp26.grid.parcelas[[8]][i]) + unlist(ensemble.runs.rcp26.grid.parcelas[[9]][i]) + unlist(ensemble.runs.rcp26.grid.parcelas[[10]][i]) + unlist(ensemble.runs.rcp26.grid.parcelas[[11]][i]) + unlist(ensemble.runs.rcp26.grid.parcelas[[12]][i]) + unlist(ensemble.runs.rcp26.grid.parcelas[[13]][i]) + unlist(ensemble.runs.rcp26.grid.parcelas[[14]][i]) + unlist(ensemble.runs.rcp26.grid.parcelas[[15]][i]))/ntot.models.rcp26)) + +ensemble.rcp85.grid.parcelas<-lapply(seq_along(ensemble.runs.rcp85.grid.parcelas[[1]]),function(i) ((unlist(ensemble.runs.rcp85.grid.parcelas[[1]][i]) + unlist(ensemble.runs.rcp85.grid.parcelas[[2]][i]) + unlist(ensemble.runs.rcp85.grid.parcelas[[3]][i]) + unlist(ensemble.runs.rcp85.grid.parcelas[[4]][i]) + unlist(ensemble.runs.rcp85.grid.parcelas[[5]][i]) + unlist(ensemble.runs.rcp85.grid.parcelas[[6]][i]) + unlist(ensemble.runs.rcp85.grid.parcelas[[7]][i]) + unlist(ensemble.runs.rcp85.grid.parcelas[[8]][i]) + unlist(ensemble.runs.rcp85.grid.parcelas[[9]][i]) + unlist(ensemble.runs.rcp85.grid.parcelas[[10]][i]) + unlist(ensemble.runs.rcp85.grid.parcelas[[11]][i]) + unlist(ensemble.runs.rcp85.grid.parcelas[[12]][i]) + unlist(ensemble.runs.rcp85.grid.parcelas[[13]][i]) + unlist(ensemble.runs.rcp85.grid.parcelas[[14]][i]) + unlist(ensemble.runs.rcp85.grid.parcelas[[15]][i]))/ntot.models.rcp85)) + +# calculate the ensemble mean of all models for the yearly parcelas: +ensemble.hist26.yearly.parcelas<-lapply(seq_along(ensemble.runs.hist26.yearly.parcelas[[1]]),function(i) (matrix((unlist(ensemble.runs.hist26.yearly.parcelas[[1]][i]) + unlist(ensemble.runs.hist26.yearly.parcelas[[2]][i]) + unlist(ensemble.runs.hist26.yearly.parcelas[[3]][i]) + unlist(ensemble.runs.hist26.yearly.parcelas[[4]][i]) + unlist(ensemble.runs.hist26.yearly.parcelas[[5]][i]) + unlist(ensemble.runs.hist26.yearly.parcelas[[6]][i]) + unlist(ensemble.runs.hist26.yearly.parcelas[[7]][i]) + unlist(ensemble.runs.hist26.yearly.parcelas[[8]][i]) + unlist(ensemble.runs.hist26.yearly.parcelas[[9]][i]) + unlist(ensemble.runs.hist26.yearly.parcelas[[10]][i]) + unlist(ensemble.runs.hist26.yearly.parcelas[[11]][i]) + unlist(ensemble.runs.hist26.yearly.parcelas[[12]][i]) + unlist(ensemble.runs.hist26.yearly.parcelas[[13]][i]) + unlist(ensemble.runs.hist26.yearly.parcelas[[14]][i]) + unlist(ensemble.runs.hist26.yearly.parcelas[[15]][i]))/ntot.models.rcp26,n.years.hist,n.parcelas))) + +ensemble.hist85.yearly.parcelas<-lapply(seq_along(ensemble.runs.hist85.yearly.parcelas[[1]]),function(i) (matrix((unlist(ensemble.runs.hist85.yearly.parcelas[[1]][i]) + unlist(ensemble.runs.hist85.yearly.parcelas[[2]][i]) + unlist(ensemble.runs.hist85.yearly.parcelas[[3]][i]) + unlist(ensemble.runs.hist85.yearly.parcelas[[4]][i]) + unlist(ensemble.runs.hist85.yearly.parcelas[[5]][i]) + unlist(ensemble.runs.hist85.yearly.parcelas[[6]][i]) + unlist(ensemble.runs.hist85.yearly.parcelas[[7]][i]) + unlist(ensemble.runs.hist85.yearly.parcelas[[8]][i]) + unlist(ensemble.runs.hist85.yearly.parcelas[[9]][i]) + unlist(ensemble.runs.hist85.yearly.parcelas[[10]][i]) + unlist(ensemble.runs.hist85.yearly.parcelas[[11]][i]) + unlist(ensemble.runs.hist85.yearly.parcelas[[12]][i]) + unlist(ensemble.runs.hist85.yearly.parcelas[[13]][i]) + unlist(ensemble.runs.hist85.yearly.parcelas[[14]][i]) + unlist(ensemble.runs.hist85.yearly.parcelas[[15]][i]))/ntot.models.rcp85,n.years.hist,n.parcelas))) + +ensemble.rcp26.yearly.parcelas<-lapply(seq_along(ensemble.runs.rcp26.yearly.parcelas[[1]]),function(i) (matrix((unlist(ensemble.runs.rcp26.yearly.parcelas[[1]][i]) + unlist(ensemble.runs.rcp26.yearly.parcelas[[2]][i]) + unlist(ensemble.runs.rcp26.yearly.parcelas[[3]][i]) + unlist(ensemble.runs.rcp26.yearly.parcelas[[4]][i]) + unlist(ensemble.runs.rcp26.yearly.parcelas[[5]][i]) + unlist(ensemble.runs.rcp26.yearly.parcelas[[6]][i]) + unlist(ensemble.runs.rcp26.yearly.parcelas[[7]][i]) + unlist(ensemble.runs.rcp26.yearly.parcelas[[8]][i]) + unlist(ensemble.runs.rcp26.yearly.parcelas[[9]][i]) + unlist(ensemble.runs.rcp26.yearly.parcelas[[10]][i]) + unlist(ensemble.runs.rcp26.yearly.parcelas[[11]][i]) + unlist(ensemble.runs.rcp26.yearly.parcelas[[12]][i]) + unlist(ensemble.runs.rcp26.yearly.parcelas[[13]][i]) + unlist(ensemble.runs.rcp26.yearly.parcelas[[14]][i]) + unlist(ensemble.runs.rcp26.yearly.parcelas[[15]][i]))/ntot.models.rcp26,n.years.rcp,n.parcelas))) + +ensemble.rcp85.yearly.parcelas<-lapply(seq_along(ensemble.runs.rcp85.yearly.parcelas[[1]]),function(i) (matrix((unlist(ensemble.runs.rcp85.yearly.parcelas[[1]][i]) + unlist(ensemble.runs.rcp85.yearly.parcelas[[2]][i]) + unlist(ensemble.runs.rcp85.yearly.parcelas[[3]][i]) + unlist(ensemble.runs.rcp85.yearly.parcelas[[4]][i]) + unlist(ensemble.runs.rcp85.yearly.parcelas[[5]][i]) + unlist(ensemble.runs.rcp85.yearly.parcelas[[6]][i]) + unlist(ensemble.runs.rcp85.yearly.parcelas[[7]][i]) + unlist(ensemble.runs.rcp85.yearly.parcelas[[8]][i]) + unlist(ensemble.runs.rcp85.yearly.parcelas[[9]][i]) + unlist(ensemble.runs.rcp85.yearly.parcelas[[10]][i]) + unlist(ensemble.runs.rcp85.yearly.parcelas[[11]][i]) + unlist(ensemble.runs.rcp85.yearly.parcelas[[12]][i]) + unlist(ensemble.runs.rcp85.yearly.parcelas[[13]][i]) + unlist(ensemble.runs.rcp85.yearly.parcelas[[14]][i]) + unlist(ensemble.runs.rcp85.yearly.parcelas[[15]][i]))/ntot.models.rcp85,n.years.rcp,n.parcelas))) + +ensemble.trans26.yearly.parcelas<-lapply(seq_along(ensemble.runs.trans26.yearly.parcelas[[1]]),function(i) (matrix((unlist(ensemble.runs.trans26.yearly.parcelas[[1]][i]) + unlist(ensemble.runs.trans26.yearly.parcelas[[2]][i]) + unlist(ensemble.runs.trans26.yearly.parcelas[[3]][i]) + unlist(ensemble.runs.trans26.yearly.parcelas[[4]][i]) + unlist(ensemble.runs.trans26.yearly.parcelas[[5]][i]) + unlist(ensemble.runs.trans26.yearly.parcelas[[6]][i]) + unlist(ensemble.runs.trans26.yearly.parcelas[[7]][i]) + unlist(ensemble.runs.trans26.yearly.parcelas[[8]][i]) + unlist(ensemble.runs.trans26.yearly.parcelas[[9]][i]) + unlist(ensemble.runs.trans26.yearly.parcelas[[10]][i]) + unlist(ensemble.runs.trans26.yearly.parcelas[[11]][i]) + unlist(ensemble.runs.trans26.yearly.parcelas[[12]][i]) + unlist(ensemble.runs.trans26.yearly.parcelas[[13]][i]) + unlist(ensemble.runs.trans26.yearly.parcelas[[14]][i]) + unlist(ensemble.runs.trans26.yearly.parcelas[[15]][i]))/ntot.models.rcp26,n.years.trans,n.parcelas))) + +ensemble.trans85.yearly.parcelas<-lapply(seq_along(ensemble.runs.trans85.yearly.parcelas[[1]]),function(i) (matrix((unlist(ensemble.runs.trans85.yearly.parcelas[[1]][i]) + unlist(ensemble.runs.trans85.yearly.parcelas[[2]][i]) + unlist(ensemble.runs.trans85.yearly.parcelas[[3]][i]) + unlist(ensemble.runs.trans85.yearly.parcelas[[4]][i]) + unlist(ensemble.runs.trans85.yearly.parcelas[[5]][i]) + unlist(ensemble.runs.trans85.yearly.parcelas[[6]][i]) + unlist(ensemble.runs.trans85.yearly.parcelas[[7]][i]) + unlist(ensemble.runs.trans85.yearly.parcelas[[8]][i]) + unlist(ensemble.runs.trans85.yearly.parcelas[[9]][i]) + unlist(ensemble.runs.trans85.yearly.parcelas[[10]][i]) + unlist(ensemble.runs.trans85.yearly.parcelas[[11]][i]) + unlist(ensemble.runs.trans85.yearly.parcelas[[12]][i]) + unlist(ensemble.runs.trans85.yearly.parcelas[[13]][i]) + unlist(ensemble.runs.trans85.yearly.parcelas[[14]][i]) + unlist(ensemble.runs.trans85.yearly.parcelas[[15]][i]))/ntot.models.rcp85,n.years.trans,n.parcelas))) + +# calculate the delta for the parcelas: +ensemble.delta26.parcelas <- lapply(seq_along(ensemble.runs.rcp26.parcelas[[1]]),function(i) (unlist(ensemble.rcp26.parcelas[i]) - unlist(ensemble.hist26.parcelas[i]))) +ensemble.delta85.parcelas <- lapply(seq_along(ensemble.runs.rcp85.parcelas[[1]]),function(i) (unlist(ensemble.rcp85.parcelas[i]) - unlist(ensemble.hist85.parcelas[i]))) + +# calculate the delta for the grid of parcelas: +ensemble.delta26.grid.parcelas <- lapply(seq_along(ensemble.runs.rcp26.grid.parcelas[[1]]),function(i) (unlist(ensemble.rcp26.grid.parcelas[i]) - unlist(ensemble.hist26.grid.parcelas[i]))) +ensemble.delta85.grid.parcelas <- lapply(seq_along(ensemble.runs.rcp85.grid.parcelas[[1]]),function(i) (unlist(ensemble.rcp85.grid.parcelas[i]) - unlist(ensemble.hist85.grid.parcelas[i]))) + +# save the values for the grid of parcelas: +my.lat.lon.grid<-cbind(lat=grid.lat,lon=grid.lon) +my.grid.indices.rean<-as.data.frame(grid.indices.rean,col.names=index.name0);names(my.grid.indices.rean)<-paste("Obs",index.name.very.short,sep="_") +my.ensemble.delta26.grid.parcelas<-as.data.frame(matrix(unlist(ensemble.delta26.grid.parcelas),n.grid.points,n.indices));names(my.ensemble.delta26.grid.parcelas)<-paste("MM26",index.name.very.short,sep="_") +my.ensemble.delta85.grid.parcelas<-as.data.frame(matrix(unlist(ensemble.delta85.grid.parcelas),n.grid.points,n.indices));names(my.ensemble.delta85.grid.parcelas)<-paste("MM85",index.name.very.short,sep="_") +my.percentil25.delta.rcp26.grid.parcelas<-as.data.frame(t(percentil25.delta.rcp26.grid.parcelas));names(my.percentil25.delta.rcp26.grid.parcelas)<-paste("MM26_p25_",index.name.very.short,sep="_") +my.percentil75.delta.rcp26.grid.parcelas<-as.data.frame(t(percentil75.delta.rcp26.grid.parcelas));names(my.percentil75.delta.rcp26.grid.parcelas)<-paste("MM26_p75_",index.name.very.short,sep="_") +my.percentil25.delta.rcp85.grid.parcelas<-as.data.frame(t(percentil25.delta.rcp85.grid.parcelas));names(my.percentil25.delta.rcp85.grid.parcelas)<-paste("MM85_p25_",index.name.very.short,sep="_") +my.percentil75.delta.rcp85.grid.parcelas<-as.data.frame(t(percentil75.delta.rcp85.grid.parcelas));names(my.percentil75.delta.rcp85.grid.parcelas)<-paste("MM85_p75_",index.name.very.short,sep="_") +my.output.arcgis<-cbind(my.lat.lon.grid,my.grid.indices.rean,my.ensemble.delta26.grid.parcelas,my.percentil25.delta.rcp26.grid.parcelas,my.percentil75.delta.rcp26.grid.parcelas,my.ensemble.delta85.grid.parcelas,my.percentil25.delta.rcp85.grid.parcelas,my.percentil75.delta.rcp85.grid.parcelas) + +write.table(my.output.arcgis,file=paste0(path.data,"/input_gis.txt"), row.names=FALSE) + +# Maps of the ensemble of the Historical for RCP 2.6 (for each index): + for(my.index in 1:n.indices){ + #png(filename=paste0(outputdir,"/indices_historical/",models.hist[mod],"_run_",run,"_interp_Worldmap_Indice_",my.index,"_",index.name0[my.index],".png"),width=800,height=480) + png(filename=paste0(path.data,"/Ensemble/Ensemble_Historical26_Worldmap_Indice_",my.index,"_",index.name0[my.index],".png"),width=800,height=480) + + layout(matrix(c(1,1,1,1,1,1,1,1,2), 9, 1, byrow = TRUE)) + #layout.show(2) + my.toptitle<-paste0(index.name[my.index]," (Ensemble Historical for RCP 2.6)") + #if(models.firstdim=="lat") {contour.data<-ensemble.hist26[[my.index]]} else {contour.data<-t(ensemble.hist26[[my.index]])} + contour.data<-ensemble.hist26[[my.index]] # since the Ensemble always starts with the lat + + PlotEquiMap2(ensemble.hist26[[my.index]],lon.rean,lat.rean,toptitle=my.toptitle,sizetit = 0.6, + brks=my.brks[[my.index]],cols=colorRampPalette(my.palette[[my.index]])(length(my.brks[[my.index]])-1), + filled.continents=F,units=my.units[my.index],subsampleg=my.subsampleg[my.index], + contours=list(NULL,NULL,NULL,NULL,NULL,NULL)[[my.index]],brks2=my.brks.dos[[my.index]],contours.col=my.contours.col[[my.index]],drawleg=F) + + ColorBar(my.brks[[my.index]],cols=colorRampPalette(my.palette[[my.index]])(length(my.brks[[my.index]])-1),vert=F,cex=.8) + dev.off() + } + + # the same charts as above, but only for the Big Patagonia and plotting the location of the four parcelas: + my.lat.pos<-which(lat.rean < bigpata.latmax & lat.rean > bigpata.latmin) + my.lon.pos<-which(lon.rean < bigpata.lonmax & lon.rean > bigpata.lonmin) + if(length(my.lat.pos)<=1) my.lat.pos<-which(lat.reanbigpata.latmin-2) # to plot at least 2 grid points for lat and 2 grid points for lon + if(length(my.lon.pos)<=1) my.lon.pos<-which(lon.reanbigpata.lonmin-2) + my.lat<-lat.rean[my.lat.pos] # select only Patagonia pixels + my.lon<-lon.rean[my.lon.pos] + + for(my.index in 1:n.indices){ + #png(filename=paste0(outputdir,"/indices_historical/",models.hist[mod],"_run_",run,"_interp_BigPatagonia_Indice_",my.index,"_",index.name0[my.index],".png"),width=500,height=1500) + png(filename=paste0(path.data,"/Ensemble/Ensemble_Historical26_BigPatagonia_Indice_",my.index,"_",index.name0[my.index],".png"),width=500,height=1500) + my.toptitle<-paste0(index.name.short[my.index]," (Ensemble Historical for RCP 2.6)") + + # to solve some bugs of PlotEquiMap: + # if(models.firstdim=="lat") {plot.data<-ensemble.hist26[[my.index]][my.lon.pos,my.lat.pos] } else {plot.data<-ensemble.hist26[[my.index]][my.lat.pos,my.lon.pos]} + plot.data<-ensemble.hist26[[my.index]][my.lat.pos,my.lon.pos] # since the Ensemble data always starts with the lat + + layout(matrix(c(1,1,1,1,1,2), 1, 6, byrow = TRUE)) + #layout.show(2) + par(oma=c(1,0,0,0.5)) + #par(mfrow=c(2,1)) + #x11(width=4,height=.8) + PlotEquiMap2(plot.data,my.lon,my.lat,toptitle=my.toptitle,sizetit = 0.6, + brks=my.brks[[my.index]],cols=colorRampPalette(my.palette[[my.index]])(length(my.brks[[my.index]])-1), + filled.continents=F,units=my.units[my.index],subsampleg=my.subsampleg[my.index],contours=NULL,intylat=1,intxlon=1,drawleg=F) + + my.parcelas<-data.frame(name=parcelas.names,lat=parcelas.lat,long=parcelas.lon,pop=0,capital=0,stringsAsFactors=F) + map.cities(my.parcelas,pch=3,cex=4,col=c("purple","blue","red","black"),label=FALSE) # add the 4 points of the 4 parcelas + ColorBar(my.brks[[my.index]],cols=colorRampPalette(my.palette[[my.index]])(length(my.brks[[my.index]])-1),vert=TRUE,cex=1.3,my.subsamplegbar[[my.index]]) + dev.off() + } + + +# Maps of the ensemble of the Historical for RCP 8.5 (for each index): + for(my.index in 1:n.indices){ + #png(filename=paste0(outputdir,"/indices_historical/",models.hist[mod],"_run_",run,"_interp_Worldmap_Indice_",my.index,"_",index.name0[my.index],".png"),width=800,height=480) + png(filename=paste0(path.data,"/Ensemble/Ensemble_Historical85_Worldmap_Indice_",my.index,"_",index.name0[my.index],".png"),width=800,height=480) + + layout(matrix(c(1,1,1,1,1,1,1,1,2), 9, 1, byrow = TRUE)) + #layout.show(2) + my.toptitle<-paste0(index.name[my.index]," (Ensemble Historical for RCP 8.5)") + #if(models.firstdim=="lat") {contour.data<-ensemble.hist26[[my.index]]} else {contour.data<-t(ensemble.hist26[[my.index]])} + contour.data<-ensemble.hist85[[my.index]] # since the Ensemble always starts with the lat + + PlotEquiMap2(ensemble.hist85[[my.index]],lon.rean,lat.rean,toptitle=my.toptitle,sizetit = 0.6, + brks=my.brks[[my.index]],cols=colorRampPalette(my.palette[[my.index]])(length(my.brks[[my.index]])-1), + filled.continents=F,units=my.units[my.index],subsampleg=my.subsampleg[my.index], + contours=list(NULL,NULL,NULL,NULL,NULL,NULL)[[my.index]],brks2=my.brks.dos[[my.index]],contours.col=my.contours.col[[my.index]],drawleg=F) + + ColorBar(my.brks[[my.index]],cols=colorRampPalette(my.palette[[my.index]])(length(my.brks[[my.index]])-1),vert=F,cex=.8) + dev.off() + } + + # the same charts as above, but only for the Big Patagonia and plotting the location of the four parcelas: + my.lat.pos<-which(lat.rean < bigpata.latmax & lat.rean > bigpata.latmin) + my.lon.pos<-which(lon.rean < bigpata.lonmax & lon.rean > bigpata.lonmin) + if(length(my.lat.pos)<=1) my.lat.pos<-which(lat.reanbigpata.latmin-2) # to plot at least 2 grid points for lat and 2 grid points for lon + if(length(my.lon.pos)<=1) my.lon.pos<-which(lon.reanbigpata.lonmin-2) + my.lat<-lat.rean[my.lat.pos] # select only Patagonia pixels + my.lon<-lon.rean[my.lon.pos] + + for(my.index in 1:n.indices){ + #png(filename=paste0(outputdir,"/indices_historical/",models.hist[mod],"_run_",run,"_interp_BigPatagonia_Indice_",my.index,"_",index.name0[my.index],".png"),width=500,height=1500) + png(filename=paste0(path.data,"/Ensemble/Ensemble_Historical85_BigPatagonia_Indice_",my.index,"_",index.name0[my.index],".png"),width=500,height=1500) + my.toptitle<-paste0(index.name.short[my.index]," (Ensemble Historical for RCP 8.5)") + + # to solve some bugs of PlotEquiMap: + # if(models.firstdim=="lat") {plot.data<-ensemble.hist26[[my.index]][my.lon.pos,my.lat.pos] } else {plot.data<-ensemble.hist26[[my.index]][my.lat.pos,my.lon.pos]} + plot.data<-ensemble.hist85[[my.index]][my.lat.pos,my.lon.pos] # since the Ensemble data always starts with the lat + + layout(matrix(c(1,1,1,1,1,2), 1, 6, byrow = TRUE)) + #layout.show(2) + par(oma=c(1,0,0,0.5)) + #par(mfrow=c(2,1)) + #x11(width=4,height=.8) + PlotEquiMap2(plot.data,my.lon,my.lat,toptitle=my.toptitle,sizetit = 0.6, + brks=my.brks[[my.index]],cols=colorRampPalette(my.palette[[my.index]])(length(my.brks[[my.index]])-1), + filled.continents=F,units=my.units[my.index],subsampleg=my.subsampleg[my.index],contours=NULL,intylat=1,intxlon=1,drawleg=F) + + my.parcelas<-data.frame(name=parcelas.names,lat=parcelas.lat,long=parcelas.lon,pop=0,capital=0,stringsAsFactors=F) + map.cities(my.parcelas,pch=3,cex=4,col=c("purple","blue","red","black"),label=FALSE) # add the 4 points of the 4 parcelas + ColorBar(my.brks[[my.index]],cols=colorRampPalette(my.palette[[my.index]])(length(my.brks[[my.index]])-1),vert=TRUE,cex=1.3,my.subsamplegbar[[my.index]]) + dev.off() + } + + +### calculate the ensemble weighted of the historical of the RCP 2.6 with the inverse of the RMSE: +my.lat.pos.zoom<-my.lat.pos[zoom.lat.min:zoom.lat.max] +my.lon.pos.zoom<-my.lon.pos[zoom.lon.min:zoom.lon.max] + +# first, calculate the weights of each run: +rmse26<-array(NA,c(n.max.runs,n.max.models,n.indices)) +for(mod in 1:n.max.models){ + for(run in 1:n.max.runs){ + if(n.runs.rcp26[run,mod] != 0){ + for(ind in 1:n.indices){ + rmse26[run,mod,ind]<-RMSE(as.vector(index.rean[[ind]][my.lat.pos.zoom,my.lon.pos.zoom]),as.vector(run.hist26[[mod]][[run]][[ind]][my.lat.pos.zoom,my.lon.pos.zoom])) + } + } + } + } + +# then, calculate the weights of each model as the mean of the weights of its runs: +rmse26.ensemble<-apply(rmse26,c(2,3),mean,na.rm=T) # in the rows of rmse.ensemble there are the models and in the columns the indices + +## alternative way to calculate the weights of each model: +#rmse26.ensemble<-array(NA,c(n.max.models,n.indices)) +#for(mod in 1:n.max.models){ +# if(ntot.runs.rcp26[mod] != 0){ +# for(ind in 1:n.indices){ +# rmse26.ensemble[mod,ind]<-RMSE(as.vector(index.rean[[ind]][my.lat.pos.zoom,my.lon.pos.zoom]),as.vector(ensemble.runs.hist26[[mod]][[ind]][my.lat.pos.zoom,my.lon.pos.zoom])) +# } +# } +# } + +# weight calculation as the inverse of the rmse: +weight26<-1/rmse26.ensemble +ss<-which(is.na(rmse26.ensemble)) +if(length(ss)>0) weight26[ss]<-0 # if there are models with no runs we must put them to 0 weight +sum.weight26<-colSums(weight26) + +# finally, calculate the weighted ensemble: +ensemble.hist26.weighted<-lapply(seq_along(ensemble.runs.hist26[[1]]),function(i) (matrix((unlist(ensemble.runs.hist26[[1]][i])*weight26[1,i] + unlist(ensemble.runs.hist26[[2]][i])*weight26[2,i] + unlist(ensemble.runs.hist26[[3]][i])*weight26[3,i] + unlist(ensemble.runs.hist26[[4]][i])*weight26[4,i] + unlist(ensemble.runs.hist26[[5]][i])*weight26[5,i] + unlist(ensemble.runs.hist26[[6]][i])*weight26[6,i] + unlist(ensemble.runs.hist26[[7]][i])*weight26[7,i] + unlist(ensemble.runs.hist26[[8]][i])*weight26[8,i] + unlist(ensemble.runs.hist26[[9]][i])*weight26[9,i] + unlist(ensemble.runs.hist26[[10]][i])*weight26[10,i] + unlist(ensemble.runs.hist26[[11]][i])*weight26[11,i] + unlist(ensemble.runs.hist26[[12]][i])*weight26[12,i] + unlist(ensemble.runs.hist26[[13]][i])*weight26[13,i] + unlist(ensemble.runs.hist26[[14]][i])*weight26[14,i] + unlist(ensemble.runs.hist26[[15]][i])*weight26[15,i])/sum.weight26[i],n.lat.rean,n.lon.rean))) + +### calculate the ensemble weighted of the historical of the RCP 8.5 with the inverse of the RMSE: +my.lat.pos.zoom<-my.lat.pos[zoom.lat.min:zoom.lat.max] +my.lon.pos.zoom<-my.lon.pos[zoom.lon.min:zoom.lon.max] + +# first, calculate the weights of each run: +rmse85<-array(NA,c(n.max.runs,n.max.models,n.indices)) +for(mod in 1:n.max.models){ + for(run in 1:n.max.runs){ + if(n.runs.rcp85[run,mod] != 0){ + for(ind in 1:n.indices){ + rmse85[run,mod,ind]<-RMSE(as.vector(index.rean[[ind]][my.lat.pos.zoom,my.lon.pos.zoom]),as.vector(run.hist85[[mod]][[run]][[ind]][my.lat.pos.zoom,my.lon.pos.zoom])) + } + } + } + } + +# then, calculate the weights of each model as the mean of the weights of its runs: +rmse85.ensemble<-apply(rmse85,c(2,3),mean,na.rm=T) # in the rows of rmse.ensemble there are the models and in the columns the indices + +## alternative way to calculate the weights of each model: +#rmse85.ensemble<-array(NA,c(n.max.models,n.indices)) +#for(mod in 1:n.max.models){ +# if(ntot.runs.rcp85[mod] != 0){ +# for(ind in 1:n.indices){ +# rmse85.ensemble[mod,ind]<-RMSE(as.vector(index.rean[[ind]][my.lat.pos.zoom,my.lon.pos.zoom]),as.vector(ensemble.runs.hist85[[mod]][[ind]][my.lat.pos.zoom,my.lon.pos.zoom])) +# } +# } +# } +# + +weight85<-1/rmse85.ensemble +ss<-which(is.na(rmse85.ensemble)) +if(length(ss)>0) weight85[ss]<-0 # if there are models with no runs we must put them to 0 weight +sum.weight85<-colSums(weight85) + +# finally, calculate the weighted ensemble: +ensemble.hist85.weighted<-lapply(seq_along(ensemble.runs.hist85[[1]]),function(i) (matrix((unlist(ensemble.runs.hist85[[1]][i])*weight85[1,i] + unlist(ensemble.runs.hist85[[2]][i])*weight85[2,i] + unlist(ensemble.runs.hist85[[3]][i])*weight85[3,i] + unlist(ensemble.runs.hist85[[4]][i])*weight85[4,i] + unlist(ensemble.runs.hist85[[5]][i])*weight85[5,i] + unlist(ensemble.runs.hist85[[6]][i])*weight85[6,i] + unlist(ensemble.runs.hist85[[7]][i])*weight85[7,i] + unlist(ensemble.runs.hist85[[8]][i])*weight85[8,i] + unlist(ensemble.runs.hist85[[9]][i])*weight85[9,i] + unlist(ensemble.runs.hist85[[10]][i])*weight85[10,i] + unlist(ensemble.runs.hist85[[11]][i])*weight85[11,i] + unlist(ensemble.runs.hist85[[12]][i])*weight85[12,i] + unlist(ensemble.runs.hist85[[13]][i])*weight85[13,i] + unlist(ensemble.runs.hist85[[14]][i])*weight85[14,i] + unlist(ensemble.runs.hist85[[15]][i])*weight85[15,i])/sum.weight85[i],n.lat.rean,n.lon.rean))) + + +# Taylor diagram of all historical runs for RCP 2.6: +# only check index 1 (Mean Annual Temp) and index 5 (Total Precipitation), use x11() to open a new window +for(my.index in 1:n.indices){ + png(filename=paste0(path.data,"/Ensemble/Taylor",suffix,"_RCP26_",my.index,"_",index.name0[my.index],".png"),width=500,height=500) + my.obs<-as.vector(index.rean[[my.index]][my.lat.pos.zoom,my.lon.pos.zoom]) + first.point=0 + for(mod in 1:n.max.models){ + for(run in 1:n.max.runs){ + if(n.runs.rcp26[run,mod] != 0){ + #print(paste(mod,run,cor(as.vector(index.rean[[my.index]][my.lat.pos.zoom,my.lon.pos.zoom]),as.vector(run.hist[[mod]][[run]][[my.index]][my.lat.pos.zoom,my.lon.pos.zoom])))) + + my.pred<-as.vector(run.hist26[[mod]][[run]][[my.index]][my.lat.pos.zoom,my.lon.pos.zoom]) + if(first.point==0) my.taylor(my.obs,my.pred,sd.arcs=1,normalize=TRUE,main=paste0(index.name.short[my.index]),pcex=.7,cex.axis=1,xlab="Variance Ratio",ylab="Variance Ratio",ngamma=6, gamma.col="darkseagreen", mar = c(4, 4, 2, 2), BIAS=FALSE, my.text="", text.cex=1, col=col.taylor[mod]) + if(first.point==1) my.taylor(my.obs,my.pred,sd.arcs=1,normalize=TRUE,main=paste0(index.name.short(my.index)),pcex=.7,cex.axis=1,xlab="Variance Ratio",ylab="Variance Ratio",ngamma=6, gamma.col="darkseagreen", mar = c(4, 4, 2, 2), add=TRUE, BIAS=FALSE, my.text="", text.cex=1, col=col.taylor[mod]) + #Sys.sleep(1) + first.point=1 + } + } + } + + ### add reanalysis point: + my.taylor(my.obs,my.obs,sd.arcs=1,normalize=TRUE,main=paste0("Historial Ensemble Index ",my.index),pcex=1.5,cex.axis=1,xlab="RMSE",ylab="Variance Ratio",ngamma=6,mar = c(4, 4, 2, 2), add=TRUE, BIAS=FALSE, my.text="", text.cex=.7, pch=13, col="black") + + ### add ensemble point (the red *) to the Taylor's diagram (red *): + my.taylor(my.obs,as.vector(ensemble.hist26[[my.index]][my.lat.pos.zoom,my.lon.pos.zoom]),sd.arcs=1,normalize=TRUE,main=paste0("Historial Ensemble Index ",my.index),pcex=1,cex.axis=1,xlab="RMSE",ylab="Variance Ratio",ngamma=6,mar = c(4, 4, 2, 2), add=TRUE, BIAS=FALSE, my.text="", text.cex=.7, pch=8, col="red") + + ### add to the Taylor's diagram the point of the weighted ensemble (brown *): + #my.taylor(my.obs,as.vector(ensemble.hist26.weighted[[my.index]][my.lat.pos.zoom,my.lon.pos.zoom]),sd.arcs=1,normalize=TRUE,main=paste0("Historial Ensemble Index ",my.index),pcex=1,cex.axis=1,xlab="RMSE",ylab="Variance Ratio",ngamma=6,mar = c(4, 4, 2, 2), add=TRUE, BIAS=FALSE, my.text="", text.cex=.7, pch=8, col="brown") + + dev.off() + +} # close for on my.index + + +# add legend of the Taylor's diagram in another plot: +png(filename=paste0(path.data,"/Ensemble/Taylor",suffix,"_legend_RCP26.png"),width=300,height=500) + legend.labels<-legend.colors<-c() + for(mod in 1:n.max.models){ + for(run in 1:n.max.runs){ + if(n.runs.rcp26[run,mod] != 0){ + legend.labels<-c(legend.labels,paste0("Modelo ",model.names[mod]," Run #",n.runs.rcp26[run,mod])) + legend.colors<-c(legend.colors,col.taylor[mod]) + } + } + } + + #legend.labels<-c(legend.labels,"Multimodel","Weighted Multimodel") + legend.labels<-c(legend.labels,"Multimodelo","Multimodelo ponderado","Observaciones (ERA-Interim)") + legend.colors<-c(legend.colors,"red","blue","black") + + par(oma=c(0,0,0,0)) + plot(1, type = "n", axes = FALSE, ann = FALSE); # title("Legend") + legend("top", legend = legend.labels, col = legend.colors, ncol = 1, cex = .9, lwd = NA, pch=c(rep(20, sum(ntot.runs.rcp26,na.rm=T)),8,8,13), text.font = 1, text.col = legend.colors) +dev.off() + +#save(ensemble.rcp26,file=paste0(path.data,"/Ensemble/","Ensemble_Rcp26.RData")) +#save(ensemble.hist26,file=paste0(path.data,"/Ensemble/","Ensemble_Historical26.RData")) +#save(ensemble.hist,file=paste0(path.data,"/Ensemble/","Ensemble_Historical.RData")) +#rm(ensemble.hist26,ensemble.hist26.weighted,ensemble.runs.hist26) + +# Taylor diagram of all historical runs for RCP 8.5: +# only check index 1 (Mean Annual Temp) and index 5 (Total Precipitation), use x11() to open a new window +for(my.index in 1:n.indices){ + my.obs<-as.vector(index.rean[[my.index]][my.lat.pos.zoom,my.lon.pos.zoom]) + png(filename=paste0(path.data,"/Ensemble/Taylor",suffix,"_RCP85_",my.index,"_",index.name0[my.index],".png"),width=500,height=500) + first.point=0 + for(mod in 1:n.max.models){ + for(run in 1:n.max.runs){ + if(n.runs.rcp85[run,mod] != 0){ + #print(paste(mod,run,cor(as.vector(index.rean[[my.index]][my.lat.pos.zoom,my.lon.pos.zoom]),as.vector(run.hist[[mod]][[run]][[my.index]][my.lat.pos.zoom,my.lon.pos.zoom])))) + + my.pred<-as.vector(run.hist85[[mod]][[run]][[my.index]][my.lat.pos.zoom,my.lon.pos.zoom]) + if(first.point==0) my.taylor(my.obs,my.pred,sd.arcs=1,normalize=TRUE,main=paste0(index.name.short[my.index]),pcex=.6,cex.axis=1,xlab="Variance Ratio",ylab="Variance Ratio",ngamma=6, gamma.col="darkseagreen", mar = c(4, 4, 2, 2), BIAS=FALSE, my.text="", text.cex=1, col=col.taylor[mod]) + if(first.point==1) my.taylor(my.obs,my.pred,sd.arcs=1,normalize=TRUE,main=paste0(index.name.short(my.index)),pcex=.6,cex.axis=1,xlab="Variance Ratio",ylab="Variance Ratio",ngamma=6, gamma.col="darkseagreen", mar = c(4, 4, 2, 2), add=TRUE, BIAS=FALSE, my.text="", text.cex=1, col=col.taylor[mod]) + #Sys.sleep(1) + first.point=1 + } + } + } + + + ### add reanalysis point: + my.taylor(my.obs,my.obs,sd.arcs=1,normalize=TRUE,main=paste0("Historial Ensemble Index ",my.index),pcex=1.5,cex.axis=1,xlab="RMSE",ylab="Variance Ratio",ngamma=6,mar = c(4, 4, 2, 2), add=TRUE, BIAS=FALSE, my.text="", text.cex=.7, pch=13, col="black") + + ### add ensemble point (the red *) to the Taylor's diagram: + my.taylor(my.obs,as.vector(ensemble.hist85[[my.index]][my.lat.pos.zoom,my.lon.pos.zoom]),sd.arcs=1,normalize=TRUE,main=paste0("Historial Ensemble Index ",my.index),pcex=1,cex.axis=1,xlab="RMSE",ylab="Variance Ratio",ngamma=6,mar = c(4, 4, 2, 2), add=TRUE, BIAS=FALSE, my.text="", text.cex=.7, pch=8, col="red") + + ### add to the Taylor's diagram the point of the weighted ensemble (the blue *): + #my.taylor(my.obs,as.vector(ensemble.hist85.weighted[[my.index]][my.lat.pos.zoom,my.lon.pos.zoom]),sd.arcs=1,normalize=TRUE,main=paste0("Historial Ensemble Index ",my.index),pcex=1,cex.axis=1,xlab="RMSE",ylab="Variance Ratio",ngamma=6,mar = c(4, 4, 2, 2), add=TRUE, BIAS=FALSE, my.text="", text.cex=.7, pch=8, col="blue") + + dev.off() + +} # close for on my.index + + +# add legend of the Taylor's diagram in another plot: +png(filename=paste0(path.data,"/Ensemble/Taylor",suffix,"legend_RCP85.png"),width=300,height=500) + legend.labels<-legend.colors<-c() + for(mod in 1:n.max.models){ + for(run in 1:n.max.runs){ + if(n.runs.rcp85[run,mod] != 0){ + legend.labels<-c(legend.labels,paste0("Modelo ",model.names[mod]," Run #",n.runs.rcp85[run,mod])) + legend.colors<-c(legend.colors,col.taylor[mod]) + } + } + } + + #legend.labels<-c(legend.labels,"Multimodel","Weighted Multimodel") + legend.labels<-c(legend.labels,"Multimodelo","Multimodelo ponderado","Observaciones (ERA-Interim)") + legend.colors<-c(legend.colors,"red","blue","black") + + par(oma=c(0,0,0,0)) + plot(1, type = "n", axes = FALSE, ann = FALSE); # title("Legend") + legend("top", legend = legend.labels, col = legend.colors, ncol = 1, cex = .9, lwd = NA, pch=c(rep(20, sum(ntot.runs.rcp85,na.rm=T)),8,8,13), text.font = 1, text.col = legend.colors) +dev.off() + + +############ ENSEMBLE FUTURE SCENARIO with the selected runs ###################################################################### + +# Maps of the Ensemble of RCP 2.6: + for(my.index in 1:n.indices){ + png(filename=paste0(path.data,"/Ensemble/Ensemble_RCP26_Worldmap_Indice_",my.index,"_",index.name0[my.index],".png"),width=800,height=480) + + layout(matrix(c(1,1,1,1,1,1,1,1,2), 9, 1, byrow = TRUE)) + #layout.show(2) + my.toptitle<-paste0(index.name[my.index]," (Ensemble RCP 2.6)") + + PlotEquiMap2(ensemble.rcp26[[my.index]],lon.rean,lat.rean,toptitle=my.toptitle,sizetit = 0.6, + brks=my.brks[[my.index]],cols=colorRampPalette(my.palette[[my.index]])(length(my.brks[[my.index]])-1), + filled.continents=F,units=my.units[my.index],subsampleg=my.subsampleg[my.index], + contours=list(NULL,NULL,NULL,NULL,NULL,NULL)[[my.index]],brks2=my.brks.dos[[my.index]],contours.col=my.contours.col[[my.index]],drawleg=F) + + ColorBar(my.brks[[my.index]],cols=colorRampPalette(my.palette[[my.index]])(length(my.brks[[my.index]])-1),vert=F,cex=.8) + dev.off() + } + + # the same charts as above, but only for the Big Patagonia and plotting the location of the four parcelas: + my.lat.pos<-which(lat.rean < bigpata.latmax & lat.rean > bigpata.latmin) + my.lon.pos<-which(lon.rean < bigpata.lonmax & lon.rean > bigpata.lonmin) + my.lat<-lat.rean[my.lat.pos] # select only Patagonia pixels + my.lon<-lon.rean[my.lon.pos] + + for(my.index in 1:n.indices){ + png(filename=paste0(path.data,"/Ensemble/Ensemble_RCP26_BigPatagonia_Indice_",my.index,"_",index.name0[my.index],".png"),width=500,height=1500) + + my.toptitle<-paste0(index.name.short[my.index]," (Ensemble RCP 2.6)") + + # to solve some bugs of PlotEquiMap: + plot.data<-ensemble.rcp26[[my.index]][my.lat.pos,my.lon.pos] + #contour.data<-index.rcp26.interp[[my.index]][my.lon.pos,my.lat.pos] + layout(matrix(c(1,1,1,1,1,2), 1, 6, byrow = TRUE)) + #layout.show(2) + par(oma=c(1,0,0,0.5)) + #par(mfrow=c(2,1)) + #x11(width=4,height=.8) + PlotEquiMap2(plot.data,my.lon,my.lat,toptitle=my.toptitle,sizetit = 0.6, + brks=my.brks[[my.index]],cols=colorRampPalette(my.palette[[my.index]])(length(my.brks[[my.index]])-1), + filled.continents=F,units=my.units[my.index],subsampleg=my.subsampleg[my.index], + contours=list(NULL,NULL,NULL,NULL,NULL,NULL)[[my.index]],brks2=my.brks.dos[[my.index]],contours.col=my.contours.col[[my.index]],intylat=1,intxlon=1,drawleg=F) + # contours=NULL,intylat=1,intxlon=1,drawleg=F) + + my.parcelas<-data.frame(name=parcelas.names,lat=parcelas.lat,long=parcelas.lon,pop=0,capital=0,stringsAsFactors=F) + map.cities(my.parcelas,pch=3,cex=3,col=c("purple","blue","red","black"),label=FALSE) # add the 4 points of the 4 parcelas + ColorBar(my.brks[[my.index]],cols=colorRampPalette(my.palette[[my.index]])(length(my.brks[[my.index]])-1),vert=TRUE,cex=1.3,my.subsamplegbar[[my.index]]) + dev.off() + } + + # the same charts as above, but plotting the difference between the rcp 2.6 and the historical data: + for(my.index in 1:n.indices){ + png(filename=paste0(path.data,"/Ensemble/Ensemble_RCP26_tendency_Worldmap_Indice_",my.index,"_",index.name0[my.index],".png"),width=800,height=480) + + layout(matrix(c(1,1,1,1,1,1,1,1,2), 9, 1, byrow = TRUE)) + #layout.show(2) + my.toptitle<-paste0(index.name[my.index]," (Ensemble RCP 8.5 - Historical)") + plot.data<-ensemble.rcp26[[my.index]]-ensemble.hist26[[my.index]] + + PlotEquiMap2(plot.data,lon.rean,lat.rean,toptitle=my.toptitle,sizetit = 0.6, + brks=my.brks.trend[[my.index]],cols=colorRampPalette(my.palette.trend[[my.index]])(length(my.brks.trend[[my.index]])-1), + filled.continents=F,units=my.units[my.index],subsampleg=my.subsampleg[my.index], + contours=list(NULL,NULL,NULL,NULL,NULL,NULL)[[my.index]],brks2=my.brks.dos[[my.index]],contours.col=my.contours.col[[my.index]],drawleg=F) + + ColorBar(my.brks.trend[[my.index]],cols=colorRampPalette(my.palette.trend[[my.index]])(length(my.brks.trend[[my.index]])-1),vert=F,cex=.8) + dev.off() + } + + my.lat.pos<-which(lat.rean < bigpata.latmax & lat.rean > bigpata.latmin) + my.lon.pos<-which(lon.rean < bigpata.lonmax & lon.rean > bigpata.lonmin) + my.lat<-lat.rean[my.lat.pos] # select only Patagonia pixels + my.lon<-lon.rean[my.lon.pos] + + for(my.index in 1:n.indices){ + png(filename=paste0(path.data,"/Ensemble/Ensemble_RCP26_tendency_BigPatagonia_Indice_",my.index,"_",index.name0[my.index],".png"),width=500,height=1500) + + my.toptitle<-paste0(index.name.short[my.index]," (RCP 8.5 - Historical)") + + # to solve some bugs of PlotEquiMap: + plot.data<-ensemble.rcp26[[my.index]][my.lat.pos,my.lon.pos]-ensemble.hist26[[my.index]][my.lat.pos,my.lon.pos] + + layout(matrix(c(1,1,1,1,2), 1, 5, byrow = TRUE)) + par(oma=c(1,0,0,0.2)) + PlotEquiMap2(plot.data,my.lon,my.lat,toptitle=my.toptitle,sizetit = 0.6, + brks=my.brks.trend[[my.index]],cols=colorRampPalette(my.palette.trend[[my.index]])(length(my.brks.trend[[my.index]])-1), + filled.continents=F,units=my.units[my.index],subsampleg=my.subsampleg[my.index],contours=NULL,intylat=1,intxlon=1,drawleg=F) + + my.parcelas<-data.frame(name=parcelas.names,lat=parcelas.lat,long=parcelas.lon,pop=0,capital=0,stringsAsFactors=F) + map.cities(my.parcelas,pch=3,cex=3,col=c("purple","blue","red","black"),label=FALSE) # add the 4 points of the 4 parcelas + ColorBar(my.brks.trend[[my.index]],cols=colorRampPalette(my.palette.trend[[my.index]])(length(my.brks.trend[[my.index]])-1),vert=TRUE,cex=1.3) + + dev.off() + } + + +# Maps of the Ensemble of RCP 8.5: + for(my.index in 1:n.indices){ + png(filename=paste0(path.data,"/Ensemble/Ensemble_RCP85_Worldmap_Indice_",my.index,"_",index.name0[my.index],".png"),width=800,height=480) + + layout(matrix(c(1,1,1,1,1,1,1,1,2), 9, 1, byrow = TRUE)) + #layout.show(2) + my.toptitle<-paste0(index.name[my.index]," (Ensemble RCP 8.5)") + + PlotEquiMap2(ensemble.rcp85[[my.index]],lon.rean,lat.rean,toptitle=my.toptitle,sizetit = 0.6, + brks=my.brks[[my.index]],cols=colorRampPalette(my.palette[[my.index]])(length(my.brks[[my.index]])-1), + filled.continents=F,units=my.units[my.index],subsampleg=my.subsampleg[my.index], + contours=list(NULL,NULL,NULL,NULL,NULL,NULL)[[my.index]],brks2=my.brks.dos[[my.index]],contours.col=my.contours.col[[my.index]],drawleg=F) + + ColorBar(my.brks[[my.index]],cols=colorRampPalette(my.palette[[my.index]])(length(my.brks[[my.index]])-1),vert=F,cex=.8) + dev.off() + } + + # the same charts as above, but only for the Big Patagonia and plotting the location of the four parcelas: + my.lat.pos<-which(lat.rean < bigpata.latmax & lat.rean > bigpata.latmin) + my.lon.pos<-which(lon.rean < bigpata.lonmax & lon.rean > bigpata.lonmin) + my.lat<-lat.rean[my.lat.pos] # select only Patagonia pixels + my.lon<-lon.rean[my.lon.pos] + + for(my.index in 1:n.indices){ + png(filename=paste0(path.data,"/Ensemble/Ensemble_RCP85_BigPatagonia_Indice_",my.index,"_",index.name0[my.index],".png"),width=500,height=1500) + + my.toptitle<-paste0(index.name.short[my.index]," (Ensemble RCP 8.5)") + + # to solve some bugs of PlotEquiMap: + plot.data<-ensemble.rcp85[[my.index]][my.lat.pos,my.lon.pos] + #contour.data<-index.rcp85.interp[[my.index]][my.lon.pos,my.lat.pos] + layout(matrix(c(1,1,1,1,1,2), 1, 6, byrow = TRUE)) + #layout.show(2) + par(oma=c(1,0,0,0.5)) + #par(mfrow=c(2,1)) + #x11(width=4,height=.8) + PlotEquiMap2(plot.data,my.lon,my.lat,toptitle=my.toptitle,sizetit = 0.6, + brks=my.brks[[my.index]],cols=colorRampPalette(my.palette[[my.index]])(length(my.brks[[my.index]])-1), + filled.continents=F,units=my.units[my.index],subsampleg=my.subsampleg[my.index], + contours=list(NULL,NULL,NULL,NULL,NULL,NULL)[[my.index]],brks2=my.brks.dos[[my.index]],contours.col=my.contours.col[[my.index]],intylat=1,intxlon=1,drawleg=F) + # contours=NULL,intylat=1,intxlon=1,drawleg=F) + + my.parcelas<-data.frame(name=parcelas.names,lat=parcelas.lat,long=parcelas.lon,pop=0,capital=0,stringsAsFactors=F) + map.cities(my.parcelas,pch=3,cex=3,col=c("purple","blue","red","black"),label=FALSE) # add the 4 points of the 4 parcelas + ColorBar(my.brks[[my.index]],cols=colorRampPalette(my.palette[[my.index]])(length(my.brks[[my.index]])-1),vert=TRUE,cex=1.3,my.subsamplegbar[[my.index]]) + dev.off() + } + + + # the same charts as above, but plotting the difference between the rcp 8.5 and the historical data: + for(my.index in 1:n.indices){ + png(filename=paste0(path.data,"/Ensemble/Ensemble_RCP85_tendency_Worldmap_Indice_",my.index,"_",index.name0[my.index],".png"),width=800,height=480) + + layout(matrix(c(1,1,1,1,1,1,1,1,2), 9, 1, byrow = TRUE)) + #layout.show(2) + my.toptitle<-paste0(index.name[my.index]," (Ensemble RCP 8.5 - Historical)") + plot.data<-ensemble.rcp85[[my.index]]-ensemble.hist85[[my.index]] + + PlotEquiMap2(plot.data,lon.rean,lat.rean,toptitle=my.toptitle,sizetit = 0.6, + brks=my.brks.trend[[my.index]],cols=colorRampPalette(my.palette.trend[[my.index]])(length(my.brks.trend[[my.index]])-1), + filled.continents=F,units=my.units[my.index],subsampleg=my.subsampleg[my.index], + contours=list(NULL,NULL,NULL,NULL,NULL,NULL)[[my.index]],brks2=my.brks.dos[[my.index]],contours.col=my.contours.col[[my.index]],drawleg=F) + + ColorBar(my.brks.trend[[my.index]],cols=colorRampPalette(my.palette.trend[[my.index]])(length(my.brks.trend[[my.index]])-1),vert=F,cex=.8) + dev.off() + } + + + my.lat.pos<-which(lat.rean < bigpata.latmax & lat.rean > bigpata.latmin) + my.lon.pos<-which(lon.rean < bigpata.lonmax & lon.rean > bigpata.lonmin) + my.lat<-lat.rean[my.lat.pos] # select only Patagonia pixels + my.lon<-lon.rean[my.lon.pos] + + for(my.index in 1:n.indices){ + png(filename=paste0(path.data,"/Ensemble/Ensemble_RCP85_tendency_BigPatagonia_Indice_",my.index,"_",index.name0[my.index],".png"),width=500,height=1500) + + my.toptitle<-paste0(index.name.short[my.index]," (Ensemble RCP 8.5 - Historical)") + + # to solve some bugs of PlotEquiMap: + plot.data<-ensemble.rcp85[[my.index]][my.lat.pos,my.lon.pos]-ensemble.hist85[[my.index]][my.lat.pos,my.lon.pos] + + layout(matrix(c(1,1,1,1,2), 1, 5, byrow = TRUE)) + par(oma=c(1,0,0,0.2)) + PlotEquiMap2(plot.data,my.lon,my.lat,toptitle=my.toptitle,sizetit = 0.6, + brks=my.brks.trend[[my.index]],cols=colorRampPalette(my.palette.trend[[my.index]])(length(my.brks.trend[[my.index]])-1), + filled.continents=F,units=my.units[my.index],subsampleg=my.subsampleg[my.index],contours=NULL,intylat=1,intxlon=1,drawleg=F) + + my.parcelas<-data.frame(name=parcelas.names,lat=parcelas.lat,long=parcelas.lon,pop=0,capital=0,stringsAsFactors=F) + map.cities(my.parcelas,pch=3,cex=3,col=c("purple","blue","red","black"),label=FALSE) # add the 4 points of the 4 parcelas + ColorBar(my.brks.trend[[my.index]],cols=colorRampPalette(my.palette.trend[[my.index]])(length(my.brks.trend[[my.index]])-1),vert=TRUE,cex=1.3) + + dev.off() + } + + + + +# plot yearly time serie of each index: + +ind=1 + +#png(filename=paste0(path.data,"/Ensemble/Time_series_",ind,"_",index.name0[ind],".png"),width=1000,height=480) + +ensemble.obs.trans.rcp26.yearly.parcelas<-matrix(NA,n.years.tot,n.parcelas) +ensemble.obs.trans.rcp85.yearly.parcelas<-matrix(NA,n.years.tot,n.parcelas) + +index.rean.year.full<-as.numeric(c(index.rean.year[ind,,par],rep(NA,n.years.trans+n.years.rcp))) + +for(par in 1:n.parcelas) ensemble.obs.trans.rcp26.yearly.parcelas[,par]<-c(rep(NA,n.years.hist),ensemble.trans26.yearly.parcelas[[ind]][,par],ensemble.rcp26.yearly.parcelas[[ind]][,par]) +for(par in 1:n.parcelas) ensemble.obs.trans.rcp85.yearly.parcelas[,par]<-c(rep(NA,n.years.hist),ensemble.trans85.yearly.parcelas[[ind]][,par],ensemble.rcp85.yearly.parcelas[[ind]][,par]) + +yrange<-range(c(index.rean.year.full[!is.na(index.rean.year.full)],ensemble.obs.trans.rcp26.yearly.parcelas[!is.na(ensemble.obs.trans.rcp26.yearly.parcelas)],ensemble.obs.trans.rcp85.yearly.parcelas[!is.na(ensemble.obs.trans.rcp85.yearly.parcelas)])) + +par(mar=c(5,8,1,2)+0.1,mgp=c(5,1,0)) +plot(1985:2050,index.rean.year.full,xlab="Año",ylab=index.name.short[ind],cex.lab=2,cex.axis=1.5,las=1,ylim=yrange,col="black",type="l",xaxp=c(1985,2050,13)) + +lines(1985:2050,ensemble.obs.trans.rcp26.yearly.parcelas[,1],type="l",col="red",lty=2) +lines(1985:2050,ensemble.obs.trans.rcp26.yearly.parcelas[,2],type="l",col="blue",lty=2) +lines(1985:2050,ensemble.obs.trans.rcp26.yearly.parcelas[,3],type="l",col="brown",lty=2) +lines(1985:2050,ensemble.obs.trans.rcp26.yearly.parcelas[,4],type="l",col="purple",lty=2) + +lines(1985:2050,ensemble.obs.trans.rcp85.yearly.parcelas[,1],type="l",col="red") +lines(1985:2050,ensemble.obs.trans.rcp85.yearly.parcelas[,2],type="l",col="blue") +lines(1985:2050,ensemble.obs.trans.rcp85.yearly.parcelas[,3],type="l",col="brown") +lines(1985:2050,ensemble.obs.trans.rcp85.yearly.parcelas[,4],type="l",col="purple") + +#dev.off() + + + +# the same but without adding the ERA-Interima data (it is replaced by the historical data) +ensemble.all26.yearly.parcelas<-matrix(NA,n.years.tot,n.parcelas) +ensemble.all85.yearly.parcelas<-matrix(NA,n.years.tot,n.parcelas) + +for(par in 1:n.parcelas) ensemble.all26.yearly.parcelas[,par]<-c(ensemble.hist26.yearly.parcelas[[ind]][,par],ensemble.trans26.yearly.parcelas[[ind]][,par],ensemble.rcp26.yearly.parcelas[[ind]][,par]) +for(par in 1:n.parcelas) ensemble.all85.yearly.parcelas[,par]<-c(ensemble.hist85.yearly.parcelas[[ind]][,par],ensemble.trans85.yearly.parcelas[[ind]][,par],ensemble.rcp85.yearly.parcelas[[ind]][,par]) + +yrange<-range(c(ensemble.all26.yearly.parcelas,ensemble.all85.yearly.parcelas)) + +plot(1985:2050,1:66,type="n",xlab="Año",ylab=index.name.short[ind],cex.lab=2,cex.axis=1.5,las=1,xaxp=c(1985,2050,13),ylim=yrange) + +lines(1985:2050,ensemble.all26.yearly.parcelas[,1],type="l",col="red",lty=2) +lines(1985:2050,ensemble.all26.yearly.parcelas[,2],type="l",col="blue",lty=2) +lines(1985:2050,ensemble.all26.yearly.parcelas[,3],type="l",col="black",lty=2) +lines(1985:2050,ensemble.all26.yearly.parcelas[,4],type="l",col="purple",lty=2) + +lines(1985:2050,ensemble.all85.yearly.parcelas[,1],type="l",col="red") +lines(1985:2050,ensemble.all85.yearly.parcelas[,2],type="l",col="blue") +lines(1985:2050,ensemble.all85.yearly.parcelas[,3],type="l",col="black") +lines(1985:2050,ensemble.all85.yearly.parcelas[,4],type="l",col="purple") + + +# plot yearly time serie of each index with the delta rcp - hist after the observed data: + +for (ind in 1:6){ + +ensemble.obs.trans.rcp26.yearly.parcelas<-matrix(NA,n.years.tot,n.parcelas) +ensemble.obs.trans.rcp85.yearly.parcelas<-matrix(NA,n.years.tot,n.parcelas) + +index.rean.year.full<-as.numeric(c(index.rean.year[ind,,],rep(NA,n.years.trans+n.years.rcp))) + +for(par in 1:n.parcelas) ensemble.obs.trans.rcp26.yearly.parcelas[,par]<-c(rep(NA,n.years.hist),c(ensemble.trans26.yearly.parcelas[[ind]][,par],ensemble.rcp26.yearly.parcelas[[ind]][,par]) - ensemble.hist26.parcelas[[ind]][par] + parcelas.indices.rean[par,ind]) + +for(par in 1:n.parcelas) ensemble.obs.trans.rcp85.yearly.parcelas[,par]<-c(rep(NA,n.years.hist),c(ensemble.trans85.yearly.parcelas[[ind]][,par],ensemble.rcp85.yearly.parcelas[[ind]][,par]) - ensemble.hist85.parcelas[[ind]][par] + parcelas.indices.rean[par,ind]) + +yrange<-range(c(index.rean.year.full[!is.na(index.rean.year.full)],ensemble.obs.trans.rcp26.yearly.parcelas[!is.na(ensemble.obs.trans.rcp26.yearly.parcelas)],ensemble.obs.trans.rcp85.yearly.parcelas[!is.na(ensemble.obs.trans.rcp85.yearly.parcelas)])) + +if(ind<5) {my.legend='bottomright'} else {my.legend='topright'} + +png(filename=paste0(path.data,"/Ensemble/Time_series_RCP26_",ind,"_",index.name0[ind],".png"),width=1000,height=400) + +par(mar=c(6,8,1,2)+0.1,mgp=c(5,1,0)) +plot(1985:2050,1:66,xlab="Año",ylab=index.name.short[ind],cex.lab=2,cex.axis=1.5,las=1,ylim=yrange,type="n",xaxp=c(1985,2050,13)) + +lines(1985:2050,as.numeric(c(index.rean.year[ind,,1],rep(NA,n.years.trans+n.years.rcp))),type="l",col="red",lty=1) +lines(1985:2050,as.numeric(c(index.rean.year[ind,,2],rep(NA,n.years.trans+n.years.rcp))),type="l",col="blue",lty=1) +lines(1985:2050,as.numeric(c(index.rean.year[ind,,3],rep(NA,n.years.trans+n.years.rcp))),type="l",col="darkgreen",lty=1) +lines(1985:2050,as.numeric(c(index.rean.year[ind,,4],rep(NA,n.years.trans+n.years.rcp))),type="l",col="black",lty=1) + +lines(1985:2050,ensemble.obs.trans.rcp26.yearly.parcelas[,1],type="l",col="red",lty=1) +lines(1985:2050,ensemble.obs.trans.rcp26.yearly.parcelas[,2],type="l",col="blue",lty=1) +lines(1985:2050,ensemble.obs.trans.rcp26.yearly.parcelas[,3],type="l",col="darkgreen",lty=1) +lines(1985:2050,ensemble.obs.trans.rcp26.yearly.parcelas[,4],type="l",col="black",lty=1) + +legend(my.legend, parcelas.names, lty=1, col=c('red', 'blue', 'darkgreen',' black'), bty='n', cex=1) + +dev.off() + +# the same, but for the RCP 8.5 + +png(filename=paste0(path.data,"/Ensemble/Time_series_RCP85_",ind,"_",index.name0[ind],".png"),width=1000,height=400) + +par(mar=c(6,8,1,2)+0.1,mgp=c(5,1,0)) +plot(1985:2050,1:66,xlab="Año",ylab=index.name.short[ind],cex.lab=2,cex.axis=1.5,las=1,ylim=yrange,type="n",xaxp=c(1985,2050,13)) + +lines(1985:2050,as.numeric(c(index.rean.year[ind,,1],rep(NA,n.years.trans+n.years.rcp))),type="l",col="red",lty=1) +lines(1985:2050,as.numeric(c(index.rean.year[ind,,2],rep(NA,n.years.trans+n.years.rcp))),type="l",col="blue",lty=1) +lines(1985:2050,as.numeric(c(index.rean.year[ind,,3],rep(NA,n.years.trans+n.years.rcp))),type="l",col="darkgreen",lty=1) +lines(1985:2050,as.numeric(c(index.rean.year[ind,,4],rep(NA,n.years.trans+n.years.rcp))),type="l",col="black",lty=1) + +lines(1985:2050,ensemble.obs.trans.rcp85.yearly.parcelas[,1],type="l",col="red") +lines(1985:2050,ensemble.obs.trans.rcp85.yearly.parcelas[,2],type="l",col="blue") +lines(1985:2050,ensemble.obs.trans.rcp85.yearly.parcelas[,3],type="l",col="darkgreen") +lines(1985:2050,ensemble.obs.trans.rcp85.yearly.parcelas[,4],type="l",col="black") +legend(my.legend, parcelas.names, lty=1, col=c('red', 'blue', 'darkgreen',' black'), bty='n', cex=1) + +dev.off() + +} # cloose loop on ind + + +save.image(file=paste0(path.data,"/Ensemble/Bodega_base_ensemble.RData")) + +} # close if on ensemble + diff --git a/old/backup/README.docx b/old/backup/README.docx new file mode 100644 index 0000000000000000000000000000000000000000..ed25ab7bca444c22ae1cc3db13bc312f3acf548d Binary files /dev/null and b/old/backup/README.docx differ diff --git a/old/backup/README.md b/old/backup/README.md new file mode 100644 index 0000000000000000000000000000000000000000..a077f2ad186c94432c7c346bbbd66f7d79609d4a --- /dev/null +++ b/old/backup/README.md @@ -0,0 +1,12 @@ +FILENAME AUTHOR SHORT DESCRIPTION + +/general/fig2catalog.sh ncortesi Formatting images for the ESS catalogue +/general/grid2contour.R ncortesi display contour lines excluding small areas +/general/taylor.R ncortesi taylor diagram as in package Plotrix with more display options +/general/checking_ncdata.sh ncortesi quick quality control of dimensions and variables of NETcdf files +/general/lmfitfast.R ncortesi linear regression 10x faster +/general/parallel.R ncortesi run multiple jobs on SLURM +/general/diagnostic.R ncortesi example of how to split a diagnostic on multiple jobs on SLURM +/skill_assessment/subskill.R ncortesi validate any subseasonal system with any skill score +/skill_assessment/subskillmaps.R ncortesi visualize skill scores & reliability diagrams generated with previous script +/multimodels/cmip5_multimodel_4torres ncortesi ensemble of cmip4 models to estimate climate change for a set of wine indices diff --git a/old/backup/README.txt b/old/backup/README.txt new file mode 100644 index 0000000000000000000000000000000000000000..d398b81c295d5c45304f5c3ed1c917e56937d405 --- /dev/null +++ b/old/backup/README.txt @@ -0,0 +1,30 @@ +Nicola's ESS Group files: + +FILENAME SHORT DESCRIPTION + +/general: + fig2catalog.sh formatting images for the ESS catalogue + checking_ncdata.sh QC of NETcdf files + grid2contour.R display contour lines excluding small areas + lmfitfast.R linear regression 10x faster + taylor.R taylor diagram with more display options + parallel.R run multiple jobs on SLURM + diagnostic.R example of running multiple jobs on sLURM + + removed: + ColorBarCustom.R obsolete, use Vero's function instead + PlotEquiMap_colored.R implemented in new version of s2dverification + +/skill_assessment: (remove and create a common project with Andrea?) + subskill.R validate any subseasonal system with any skill score + subskillmaps.R visualize skill scores & rel.diagrams generated with previous script + +/weather_regimes: (add all scripts in /shared/earth/Operational/EDPR ?) + weather_regimes_EDPR.R classify and plot weather regimes for EDPR + + removed: + weather_regimes.R classify weather_regimes + weather_regimes_maps.R plot weather_regimes + +/multimodels: <- move this folder to my personal repository? + cmip5_multimodel_4torres ensemble of cmip4 models to estimate climate change for a set of wine indices diff --git a/old/backup/SubSkill.R b/old/backup/SubSkill.R new file mode 100644 index 0000000000000000000000000000000000000000..6c3c9de661be93bdff857039b48fe630389d7542 --- /dev/null +++ b/old/backup/SubSkill.R @@ -0,0 +1,520 @@ +######################################################################################### +# Sub-seasonal Skill Scores # +######################################################################################### + +# Creation: 02/2016 +# Author: Nicola Cortesi +# Aim: validate subseasonal forecast systems +# I/O: input monthly hindcasts formatted as in /esnas, +# output: .png with the desired skill score +# Assumptions: it relies on Load() to import data +# Branch: skill_assessment + +# you can run it from the terminal with the syntax: +# +# Rscript SkillScores.R +# +# i.e: to run only the chunk number 3, write: +# +# Rscript SkillScores.R 3 +# +# if you don't specify any argument, or you run it from the R interface, it runs all the chunks in a sequential way. +# Use this last approach only if the computational speed is not a problem. + +rm(list=ls()) + +########################################################################################## +# Load functions and libraries # +########################################################################################## + +#load libraries and functions: +library(SpecsVerification) # for skill scores +library(s2dverification) # for Load() function +library(easyVerification) # for veriApply() function +library(abind) # for merging arrays +source('/home/Earth/ncortesi/scripts/Rfunctions.R') +#source('~/GIT/s2dverification/R/Utils.R') + +########################################################################################## +# MareNostrum settings # +########################################################################################## + +args <- commandArgs(TRUE) # put into variable args the list with all the optional arguments with whom the script was run from the terminal + +if(length(args) == 0) { print("Running the script in a sequential way"); nloadcores <- 8 } +if(length(args) == 1) { print("Running the script in a parallel way"); nloadcores <- 1 } +if(length(args) > 1) stop("Only one argument is required") + +chunk <- ifelse(length(args) == 1 ,TRUE, FALSE) # set variable 'chunk' to TRUE if we are running the script from the command line with 1 argument, FALSE otherwise + +if(chunk) chunk.month <- as.integer(args[1]) # number of the month to run in this script (if chunk == TRUE) + +########################################################################################## +# User's settings # +########################################################################################## + +# working dir where to store the output maps and files in the /Data and /Maps subdirs: +work.dir <- "/scratch/Earth/ncortesi/RESILIENCE/Subestacional" + +# path of the weekly reanalysis data of the variable 'var' to study: +#rean.dir <- '/esarchive/old-files/recon_ecmwf_erainterim/weekly_mean/$VAR_NAME$_f6h/$VAR_NAME$_$START_DATE$.nc' +rean.dir <- '/esnas/recon/ecmwf/erainterim/weekly_mean/$VAR_NAME$-ecmwf_f6h/$VAR_NAME$_$START_DATE$.nc' +rean.name <- "ERA-Interim" + +# path of the monthly forecast system files: +forecast.year <- 2016 #2014 # starting year of the weekly sequence of the forecasts +#forecast.dir <- paste0('/esnas/exp/ECMWF/monthly/ensforhc/weekly_mean/$VAR_NAME$_f6h/',forecast.year,'$MONTH$$DAY$00/$VAR_NAME$_$START_DATE$00.nc') +#forecast.dir <- paste0('/esnas/exp/ecmwf/monthly_ensforhc/weekly_mean/$VAR_NAME$_f6h/',forecast.year,'$MONTH$$DAY$00/$VAR_NAME$_$START_DATE$00.nc') +forecast.dir <- paste0('/esnas/exp/ecmwf/s2s-monthly_ensforhc/weekly_mean/$VAR_NAME$_f24h/',forecast.year,'$MONTH$$DAY$/$VAR_NAME$_$START_DATE$.nc') +cfs.name <- 'ECMWF' # name of the climate forecast system (CFS) used for predictions (to be used in map titles) + +var.name <- 'sfcWind' #'tas' # forecast variable to verify. It is the name used inside the netCDF files and as suffix in map filenames, i.e: 'sfcWind' or 'psl' +var.name.map <- "10-m wind speed" #'2m Temperature' #'10m Wind Speed' # forecast variable to verify (name used in the map titles), i.e: '10m Wind Speed' or 'SLP' + +mes <- 1 # starting forecast month (1: january, 2: february, etc.) +day <- 4 #2 # starting forecast day + +# choose one or more skills scores to compute between EnsCorr, FairRPSS, FairCRPSS and/or RelDiagr: +my.score.name <- c('FairRpss','FairCrpss','EnsCorr','RelDiagr') + +# number of resamples considered in the bootstrapping of the FairRPSS and the FairCRPSS (should be AT LEAST 1000, better with 5000-10000) : +n.boot <- 5000 + +yr1.hind <- 1996 #1994 #1994 # first hindcast year +yr2.hind <- 2015 #2013 #2013 # last hindcast year (usually the forecast year -1) + +leadtime.week <- c('5-11','12-18','19-25','26-32') # which leadtimes are available in the weekly dataset +n.members <- 11 #4 # number of hindcast members + +veri.month <- 2 #1:12 # select the month(s) you want to compute the chosen skill scores + +rean.int <- TRUE # if TRUE, interpolate the reanalysis to the same resolution of the model; if FALSE, interpolate the model to the same res.of the reanalysis + +my.prob <- c(1/3,2/3) # quantiles used for FairRPSS and the RelDiagr (usually they are the terciles) +#conf.level <- c(0.025, 0.975) # percentiles employed for the calculation of the confidence level for the Rpss and Crpss (can be more than 2 if needed) + +#int.lat <- NULL #1:160 # latitude position of grid points selected for the Reliability Diagram (if you want to subset a region; if not, put NULL) +#int.lon <- NULL #1:320 # longitude position of grid points selected for the Reliability Diagram (if you want to subset a region; if not, put NULL) + +# coordinates of a chosen point in the world to plot the time series over the 52 maps (they must be the same values in objects la y lo) +#my.lat <- 55.3197120 +#my.lon <- 0.5625 + +###################################### Derived variables ############################################################################# + +#source(paste0(workdir,'/ColorBarV.R')) # Color scale used for maps +n.categ <- 1+length(my.prob) # number of forecasting categories (usually 3 if terciles are used) + +# blue-yellow-red colors: +#col <- ifelse(!mare, as.character(read.csv("/scratch/Earth/ncortesi/RESILIENCE/rgbhex.csv",header=F)[,1]), as.character(read.csv("/gpfs/projects/bsc32//bsc32842/scripts/rgbhex.csv",header=F)[,1])) +#col<-as.character(read.csv("/home/Earth/vtorralb/rgbhex.csv",header=F)[,1]) # blue-yellow-red colors +col <- c("#0C046E", "#1914BE", "#2341F7", "#2F55FB", "#3E64FF", "#528CFF", "#64AAFF", "#82C8FF", "#A0DCFF", "#B4F0FF", "#FFFBAF", "#FFDD9A", "#FFBF87", "#FFA173", "#FF7055", "#FE6346", "#F7403B", "#E92D36", "#C80F1E", "#A50519") + +sdates.seq <- weekly.seq(forecast.year,mes,day) # load the sequence of dates corresponding to all the thursday of the year +pos.bis <- which(sdates.seq == paste0(forecast.year,"0229")) # find if there is a startdate at the 29th of february and return its position in the ector sdates.seq +if(l(pos.bis) != 0) sdates.seq <- sdates.seq[-pos.bis] # if there is a startdate at the 29th of february, remove it. + +n.leadtimes <- length(leadtime.week) +n.yrs.hind <- yr2.hind-yr1.hind+1 +#my.month.short <- substr(my.month,1,3) + +## extract geographic coordinates; do only ONCE for each preducton system and then save the lat/lon in a coordinates.RData and comment these rows to use function load() below: +#data.hindcast <- Load(var='sfcWind', exp = 'EnsEcmwfWeekHind', +# obs = NULL, sdates=paste0(yr1.hind:yr2.hind,substr(sdates.seq[startdate],5,6),substr(sdates.seq[startdate],7,8)), +# nleadtime = 1, leadtimemin = 1, leadtimemax = 1, output = 'lonlat', configfile = file_path, method='distance-weighted' ) + +#lats<-data.hindcast$lat +#lons<-data.hindcast$lon +#save(lats,lons,file=paste0(workdir,'/coordinates.RData')) +#rm(data.hindcast);gc() + +# format geographic coordinates to use with PlotEquiMap: +#load(paste0(workdir,'/coordinates.RData')) +#n.lon <- length(lons) +#n.lat <- length(lats) +#n.lonr <- n.lon - ceiling(length(lons[lons<180 & lons > 0])) +#la<-rev(lats) +#lo<-c(lons[c((n.lonr+1):n.lon)]-360,lons[1:n.lonr]) + +# load only 1 year of weekly var rean data from reanalysis to get lat and lon: +if(!rean.int) data.rean <- Load(var = var.name, exp = list(list(path=rean.dir)), + obs = NULL, sdates=paste0(yr1.hind,substr(sdates.seq[1],5,6),substr(sdates.seq[1],7,8)), + nleadtime = 1, leadtimemin = 1, leadtimemax = 1, output = 'lonlat', nprocs=1) + +if(rean.int) data.rean <- Load(var = var.name, exp = list(list(path=forecast.dir)), + obs = NULL, sdates=paste0(yr1.hind,substr(sdates.seq[1],5,6),substr(sdates.seq[1],7,8)), + nleadtime = 1, leadtimemin = 1, leadtimemax = 1, output = 'lonlat', nprocs=1) + +lons <- data.rean$lon +lats <- data.rean$lat +n.lon <- length(lons) +n.lat <- length(lats) +my.grid<-paste0('r',n.lon,'x',n.lat) + +#file_path='/home/Earth/ngonzal2/R/ConfFiles/IC3.conf' # Load() file path +#file_path <- '/home/Earth/ncortesi/Downloads/RESILIENCE/IC3.conf' + +######################################################################################### +# Calculate and save up to all the chosen Skill Scores for a selected period # +######################################################################################### +# if we run the script from the terminal with an argument, it only computes the month we specify in the second argument and save the results for that month +if(chunk) veri.month <- chunk.month + +for(month in veri.month){ + #month=1 # for the debug + + # select the startdates (weeks) you want to compute: + my.startdates <- which(as.integer(substr(sdates.seq,5,6)) == month) #startdates.monthly[[month]] #c(1:5) + + startdate.name <- my.month[month] # name given to the period of the selected startdates # i.e:"January" + n.startdates <- length(my.startdates) # number of startdates in the selected month + n.yrs <- length(my.startdates)*n.yrs.hind # number of total years for the selected month + + print(paste0("Computing Skill Scores for ",startdate.name)) + + # Merge all selected startdates: + hind.dim <- c(n.members, n.yrs.hind*n.startdates, n.leadtimes, n.lat, n.lon) # dimensions of the hindcast array + + my.FairRpss <- my.FairCrpss <- my.EnsCorr <- array(NA,c(n.leadtimes, n.lat,n.lon)) + #my.FairRpssConf <- my.FairCrpssConf <- array(NA, c(length(conf.level), n.leadtimes, n.lat, n.lon)) + my.FairRpss.pvalue <- my.FairCrpss.pvalue <- my.EnsCorr.pvalue <- array(NA, c(n.leadtimes, n.lat, n.lon)) + + anom.rean.chunk <- anom.hindcast.mean.chunk <- array(NA, c(n.startdates*n.yrs.hind, n.leadtimes, n.lat, n.lon)) + anom.rean.clim <- array(NA, c(n.startdates*n.yrs.hind, n.startdates*n.yrs.hind, n.leadtimes, n.lat, n.lon)) + anom.hindcast.chunk <- array(NA, c(n.members, n.startdates*n.yrs.hind, n.leadtimes, n.lat, n.lon)) + #my.interv<-(1 + sub.size*(s-1)):((sub.size*s) + add.last) # longitude interval where to load data + + for(startdate in my.startdates){ + pos.startdate <- which(startdate == my.startdates) # count of the number of stardates already loaded+1 + my.time.interv <- (1+(pos.startdate-1)*n.yrs.hind):(pos.startdate*n.yrs.hind) # time interval where to load data + + print(paste0('Computing reanalysis anomalies for startdate ', which(startdate==my.startdates),'/',length(my.startdates))) + + # load weekly var rean data in: /esnas/reconstructions/ecmwf/eraint/weekly_mean + if(!rean.int) data.rean <- Load(var = var.name, exp = list(list(path=rean.dir)), + obs = NULL, sdates=paste0(yr1.hind:yr2.hind,substr(sdates.seq[startdate],5,6),substr(sdates.seq[startdate],7,8)), + nleadtime = n.leadtimes, leadtimemin = 1, leadtimemax = n.leadtimes, output = 'lonlat', nprocs=nloadcores) + + if(rean.int) data.rean <- Load(var = var.name, exp = list(list(path=rean.dir)), + obs = NULL, sdates=paste0(yr1.hind:yr2.hind,substr(sdates.seq[startdate],5,6),substr(sdates.seq[startdate],7,8)), + nleadtime = n.leadtimes, leadtimemin = 1, leadtimemax = n.leadtimes, output = 'lonlat', grid=my.grid, method='bilinear', nprocs=nloadcores) + + # dim(data.rean$mod) # [1,1,20,4,320,640] + + # Mean of the 20 years (for each point and leadtime) + clim.rean <- apply(data.rean$mod,c(1,2,4,5,6),mean)[1,1,,,] # average of all years for each leadtime and pixel + clim.rean <- InsertDim(InsertDim(InsertDim(clim.rean,1,n.yrs.hind),1,1),1,1) # repeat the same values times to be able to calculate the anomalies #[1,1,20,4,320,640] + anom.rean <- data.rean$mod - clim.rean + #anom.rean<-InsertDim(anom.rean[1,1,,,,],1,1) # [1,20,4,320,640] + + rm(data.rean, clim.rean) + gc() + + #load(paste0(workdir,'/Data_',var.name,'/anom_rean_startdate',startdate,'.RData')) + #anom.rean <- drop(anom.rean) + anom.rean.chunk[my.time.interv,,,] <- anom.rean + + # Load hindcast data: + print(paste0('Computing forecast anomalies for startdate ', which(startdate == my.startdates),'/',length(my.startdates))) + + # Load weekly var subseasonal data in: /esnas/exp/ECMWF/monthly/ensforhc/weekly_mean and interpolate them to the same ERA-Interim resolution (512x256) + if(rean.int) data.hindcast <- Load(var = var.name, exp = list(list(path=forecast.dir)), + obs = NULL, sdates = paste0(yr1.hind:yr2.hind, substr(sdates.seq[startdate],5,6), substr(sdates.seq[startdate],7,8)), + nleadtime = n.leadtimes, leadtimemin = 1, leadtimemax = n.leadtimes, output = 'lonlat', nprocs=nloadcores) + if(!rean.int) data.hindcast <- Load(var = var.name, exp = list(list(path=forecast.dir)), + obs = NULL, sdates = paste0(yr1.hind:yr2.hind, substr(sdates.seq[startdate],5,6), substr(sdates.seq[startdate],7,8)), + nleadtime = n.leadtimes, leadtimemin = 1, leadtimemax = n.leadtimes, output = 'lonlat', grid=my.grid, method='bilinear', nprocs=nloadcores) + + # dim(data.hindcast$mod) # [1,4,20,4,320,640] + + # Mean of the 4 members of the Hindcasts and of the 20 years (for each point and leadtime); + clim.hindcast <- apply(data.hindcast$mod,c(1,4,5,6),mean)[1,,,] # average of all years and members for each leadtime and pixel + clim.hindcast <- InsertDim(InsertDim(InsertDim(clim.hindcast,1,n.yrs.hind),1,n.members),1,1) # repeat the same values and times to calc. anomalies + anom.hindcast <- data.hindcast$mod - clim.hindcast + + rm(data.hindcast,clim.hindcast) + gc() + + # average the anomalies over the 4 hindcast members to compute the ensemble mean [1,20,4,320,640] + anom.hindcast.mean<-apply(anom.hindcast,c(1,3,4,5,6),mean) + + gc() + + #load(paste0(workdir,'/Data_',var.name,'/anom_hindcast_startdate',startdate,'.RData')) + anom.hindcast <- drop(anom.hindcast) + anom.hindcast.chunk[,my.time.interv,,,] <- anom.hindcast + + rm(anom.hindcast, anom.rean) + gc() + + anom.hindcast.mean <- drop(anom.hindcast.mean) + anom.hindcast.mean.chunk[my.time.interv,,,] <- anom.hindcast.mean + + rm(anom.hindcast.mean) + gc() + + } # end load data and conversion to anomalies + + + if(any(my.score.name == "FairRpss")){ + #my.FairRpss.sub <- veriApply("FairRpss", fcst=anom.hindcast.sub, obs=anom.rean.sub, prob=my.prob, tdim=2, ensdim=1, parallel=FALSE, ncpus=n.cpus) + my.FairRps.chunk <- veriApply("FairRps", fcst=anom.hindcast.chunk, obs=anom.rean.chunk, prob=my.prob, tdim=2, ensdim=1, parallel=FALSE, ncpus=1) + #my.FairRps.chunk.double <- veriApply("FairRps", fcst=anom.hindcast.chunk.double, obs=anom.rean.chunk.double, prob=my.prob, tdim=2, ensdim=1, parallel=FALSE, ncpus=n.cpus) + my.FairRps.chunk.sum <- apply(my.FairRps.chunk, c(2,3,4), sum) + + # the prob.for the climatological ensemble can be set without using convert2prob and they are the same for all points and leadtimes (33% for each tercile): + ens.ref <- array(33,c(n.startdates*n.yrs.hind,3)) + + # the observed probabilities vary from point to point and for each leadtime: [obs]= [3*n.years*n.startdates, n.leadtimes, lat, lon] + obs <- apply(anom.rean.chunk, c(2,3,4), function(x){convert2prob(x, prob <- my.prob)}) + + # reshape obs to be able to apply EnsRps(ens, obs) below: + obs2 <- InsertDim(obs,1,3) + obs2[2,1:(n.startdates*n.yrs.hind),,,] <- obs2[1,(n.startdates*n.yrs.hind + 1:(n.startdates*n.yrs.hind)),,,] + obs2[3,1:(n.startdates*n.yrs.hind),,,] <- obs2[1,(2*n.startdates*n.yrs.hind + 1:(n.startdates*n.yrs.hind)),,,] + obs3 <- obs2[,1:(n.startdates*n.yrs.hind),,,,drop=F] # drop=F is used not to loose the dimension(s) with only 1 element + + my.Rps.clim.chunk <- apply(obs3,c(3,4,5), function(x){ EnsRps(ens.ref,t(x), format="members") }) + my.Rps.clim.chunk.sum <- apply(my.Rps.clim.chunk, c(2,3,4), sum) + + my.FairRpss.chunk <- 1-(my.FairRps.chunk.sum / my.Rps.clim.chunk.sum) + + rm(ens.ref, obs, obs2, obs3, my.FairRps.chunk.sum, my.Rps.clim.chunk.sum) + gc() + + ########################### bootstrapping ############################## + my.FairRpssBoot <- array(NA, c(n.boot, n.leadtimes, n.lat, n.lon)) # define it only now because for n.boot=1000, it is a 2 GB object!!! + + for(b in 1:n.boot){ + cat(paste0("Bootstrapping FairRpss: resampling n. ",b,"/",n.boot,"\n")) + yrs.sampled <- sample(1:n.yrs, replace=TRUE) + + #for(y in 1:n.yrs) anom.rean.chunk.sampled[y,,,] <- anom.rean.chunk[yrs.sampled[y],,,] + #for(y in 1:n.yrs) anom.hindcast.chunk.sampled[,y,,,] <- anom.hindcast.chunk[,yrs.sampled[y],,,] + + my.FairRps.sampled <- my.FairRps.chunk + my.Rps.clim.sampled <- my.Rps.clim.chunk + + for(y in 1:n.yrs) my.FairRps.sampled[y,,,] <- my.FairRps.chunk[yrs.sampled[y],,,] + for(y in 1:n.yrs) my.Rps.clim.sampled[y,,,] <- my.Rps.clim.chunk[yrs.sampled[y],,,] + + #my.FairRps.chunk <- veriApply("FairRps", fcst=anom.hindcast.chunk.sampled, obs=anom.rean.chunk.sampled, prob=my.prob, tdim=2, ensdim=1, parallel=FALSE, ncpus=n.cpus) + #ens <- array(33,c(n.startdates*n.yrs.hind,3)) + #obs <- apply(anom.rean.chunk.sampled, c(2,3,4), function(x){convert2prob(x, prob <- my.prob)}) + #obs2 <- InsertDim(obs,1,3) + #obs2[2,1:(n.startdates*n.yrs.hind),,,] <- obs2[1,(n.startdates*n.yrs.hind + 1:(n.startdates*n.yrs.hind)),,,] + #obs2[3,1:(n.startdates*n.yrs.hind),,,] <- obs2[1,(2*n.startdates*n.yrs.hind + 1:(n.startdates*n.yrs.hind)),,,] + #obs3 <- obs2[,1:(n.startdates*n.yrs.hind),,,,drop=F] + #my.Rps.clim.chunk <- apply(obs3,c(3,4,5), function(x){ EnsRps(ens,t(x),format="members") }) + + my.FairRps.sampled.sum <- apply(my.FairRps.sampled,c(2,3,4), sum) + my.Rps.clim.sampled.sum <- apply(my.Rps.clim.sampled,c(2,3,4), sum) + my.FairRpss.sampled <- 1-(my.FairRps.sampled.sum/my.Rps.clim.sampled.sum) + + my.FairRpssBoot[b,,,] <- my.FairRpss.sampled + + gc() + } + + rm(my.FairRps.sampled, my.Rps.clim.sampled, my.FairRps.sampled.sum, my.Rps.clim.sampled.sum, my.FairRpss.sampled, yrs.sampled) + gc() + + my.FairRpssBootMean <- apply(my.FairRpssBoot,c(2,3,4),mean,na.rm=T) + + my.test.value <- 0 # my.FairRpss.chunk # test if the bootstrapped mean value is significantly different from this value or not + + my.FairRpss.diff1 <- my.FairRpssBootMean + abs(my.test.value - my.FairRpssBootMean) + my.FairRpss.diff2 <- my.FairRpssBootMean - abs(my.test.value - my.FairRpssBootMean) + + for(i in 1:dim(my.FairRpssBoot)[2]){ + for(j in 1:dim(my.FairRpssBoot)[3]){ + for(k in 1:dim(my.FairRpssBoot)[4]){ + extr1 <- which(my.FairRpssBoot[,i,j,k] > my.FairRpss.diff1[i,j,k]) + extr2 <- which(my.FairRpssBoot[,i,j,k] < my.FairRpss.diff2[i,j,k]) + if(my.test.value != 0) my.FairRpss.pvalue[i,j,k] <- (length(extr1) + length(extr2)) / n.boot # two-tailed test + if(my.test.value == 0) my.FairRpss.pvalue[i,j,k] <- ifelse(my.FairRpssBootMean[i,j,k] > 0, length(extr2)/n.boot, length(extr1)/n.boot) + + } + } + } + + rm(my.FairRpssBoot, my.FairRpssBootMean, my.FairRpss.diff1, my.FairRpss.diff2, extr1, extr2) + gc() + + #which(my.FairRpss.pvalue > 0.05) # points where the FairRPSS is significant (i.e: whose FairRPSS is not significantly different from the population mean) + + # calculate the percentiles of the skill score in case you want to compute the confidence interval of the 95%: + #my.FairRpssConf[,,,chunk$int[[c]]] <- apply(my.FairRpssBoot[,,,chunk$int[[c]]], c(2,3,4), function(x) quantile(x, probs=conf.level, type=8)) + + } # close if on FairRpss + + if(any(my.score.name=="FairCrpss")){ + + #my.FairCrpss.chunk <- veriApply("FairCrpss", fcst=anom.hindcast.chunk, obs=anom.rean.chunk, tdim=2, ensdim=1, parallel=FALSE, ncpus=1) + + #Numerador: + my.FairCrps.chunk <- veriApply("FairCrps", fcst=anom.hindcast.chunk, obs=anom.rean.chunk, tdim=2, ensdim=1, parallel=FALSE, ncpus=1) + my.FairCrps.chunk.sum <- apply(my.FairCrps.chunk,c(2,3,4), sum) + + # Denominador: + my.Crps.clim.chunk <- array(NA, c(n.startdates*n.yrs.hind, n.leadtimes, n.lat, n.lon)) + anom.hind.clim <- array(NA, c(n.members, n.startdates*n.yrs.hind, n.leadtimes, n.lat, n.lon)) + + for(i in 1:n.leadtimes){ + print(paste0("leadtime= ",i)) + for(j in 1:n.lat){ + for(k in 1:n.lon){ + # extract a random sample with no replacement of the hindcast: + #anom.hind.clim[,,i,j,k] <- array(sample(anom.hindcast.chunk[,,i,j,k],n.startdates*n.yrs.hind*n.members),c(n.startdates*n.yrs.hind,n.members)) + anom.rean.clim[,,i,j,k] <- t(array(anom.rean.chunk[,i,j,k],c(n.startdates*n.yrs.hind, n.startdates*n.yrs.hind))) + my.Crps.clim.chunk[,i,j,k] <- EnsCrps((anom.rean.clim[,,i,j,k]), anom.rean.chunk[,i,j,k]) + } + } + } + + ### anom.hind.clim <- apply(anom.hindcast.chunk,c(3,4,5), sample) + ### anom.all.chunk <- abind(anom.hind.clim,anom.rean.chunk, along=1) # merge exp and obs together to use apply: + ### my.Crps.clim.chunk <- apply(anom.all.chunk, c(3,4,5), function(x){ EnsCrps(t(x[1:n.members,]), x[n.members+1,])}) + + my.Crps.clim.chunk.sum <- apply(my.Crps.clim.chunk,c(2,3,4), sum) + + ## Skill Score: + my.FairCrpss.chunk <- 1 - (my.FairCrps.chunk.sum / my.Crps.clim.chunk.sum) + + #rm(anom.all.chunk, my.FairCrps.chunk.sum, my.Crps.clim.chunk.sum) + + rm(my.FairCrps.chunk.sum, my.Crps.clim.chunk.sum) + gc() + + ################################# bootstrapping ########################################### + my.FairCrpssBoot <- array(NA, c(n.boot, n.leadtimes, n.lat, n.lon)) # define it only now, because for n.boot = 1000, it is a 2 GB object!!! + + for(b in 1:n.boot){ + cat(paste0("Bootstrapping FairCrpss: resampling n. ",b,"/",n.boot,"\n")) + + yrs.sampled <- sample(1:n.yrs, replace=TRUE) + + my.FairCrps.sampled <- my.FairCrps.chunk + my.Crps.clim.sampled <- my.Crps.clim.chunk + + for(y in 1:n.yrs) my.FairCrps.sampled[y,,,] <- my.FairCrps.chunk[yrs.sampled[y],,,] + for(y in 1:n.yrs) my.Crps.clim.sampled[y,,,] <- my.Crps.clim.chunk[yrs.sampled[y],,,] + + my.FairCrps.sampled.sum <- apply(my.FairCrps.sampled,c(2,3,4), sum) + my.Crps.clim.sampled.sum <- apply(my.Crps.clim.sampled,c(2,3,4), sum) + + my.FairCrpss.sampled <- 1-(my.FairCrps.sampled.sum / my.Crps.clim.sampled.sum) + + my.FairCrpssBoot[b,,,] <- my.FairCrpss.sampled + + gc() + } + + rm(my.FairCrps.sampled, my.Crps.clim.sampled, my.FairCrpss.sampled , yrs.sampled) + gc() + + my.FairCrpssBootMean <- apply(my.FairCrpssBoot,c(2,3,4),mean,na.rm=T) + + my.test.value <- 0 # my.FairCrpss.chunk # test if the bootstrapped mean value is significantly different from this value or not + + my.FairCrpss.diff1 <- my.FairCrpssBootMean + abs(my.test.value - my.FairCrpssBootMean) + my.FairCrpss.diff2 <- my.FairCrpssBootMean - abs(my.test.value - my.FairCrpssBootMean) + + for(i in 1:dim(my.FairCrpssBoot)[2]){ + for(j in 1:dim(my.FairCrpssBoot)[3]){ + for(k in 1:dim(my.FairCrpssBoot)[4]){ + extr1 <- which(my.FairCrpssBoot[,i,j,k] > my.FairCrpss.diff1[i,j,k]) + extr2 <- which(my.FairCrpssBoot[,i,j,k] < my.FairCrpss.diff2[i,j,k]) + if(my.test.value != 0) my.FairCrpss.pvalue[i,j,k] <- (length(extr1) + length(extr2)) / n.boot + if(my.test.value == 0) my.FairCrpss.pvalue[i,j,k] <- ifelse(my.FairCrpssBootMean[i,j,k] > 0, length(extr2)/n.boot, length(extr1)/n.boot) + + } + } + } + + rm(my.FairCrpssBoot, my.FairCrpssBootMean, my.FairCrpss.diff1, my.FairCrpss.diff2, extr1, extr2) + ###which(my.FairCrpss.pvalue > 0.05) # points where thw FairCRPSS is significant (i.e: whose FairRPSS not significantly different from the population mean) + + # calculate the percentiles of the skill score if you want to measure hte confidence interval: + #my.FairCrpssConf[,,,chunk$int[[c]]] <- apply(my.FairCrpssBoot[,,,chunk$int[[c]]], c(2,3,4), function(x) quantile(x, probs=conf.level, type=8)) + + } # close if on FairCrpss + + if(any(my.score.name=="EnsCorr")){ + # ensemble mean correlation for the selected startdates (first dim, 'week') and all leadtimes (second dim): + my.EnsCorr.chunk <- my.PValue.chunk <- array(NA, c(n.leadtimes, n.lat, n.lon)) + + for(i in 1:n.leadtimes){ + for(j in 1:n.lat){ + for(k in 1:n.lon){ + my.EnsCorr.chunk[i,j,k] <- cor(anom.hindcast.mean.chunk[,i,j,k],anom.rean.chunk[,i,j,k], use="complete.obs") + # option 'na.method' is chosen from the following list: c("all.obs", "complete.obs", "pairwise.complete.obs", "everything", "na.or.complete") + + my.EnsCorr.pvalue[i,j,k] <- cor.test(anom.hindcast.mean.chunk[,i,j,k],anom.rean.chunk[,i,j,k], use="complete.obs")$p.value + + } + } + } + + #my.EnsCorr[,,chunk$int[[c]]] <- my.EnsCorr.chunk + #my.PValue[,,chunk$int[[c]]] <- my.PValue.chunk + + #rm(anom.hindcast.chunk,anom.hindcast.mean.chunk,my.EnsCorr.chunk,my.PValue.chunk) + } + + #rm(anom.rean.chunk) + gc() + #mem() + + # the Reliability diagram cannot be computed splitting the data in chunk, so it is computed only if the script is run from the R interface or with no arguments: + + if(any(my.score.name=="RelDiagr")){ # in this case the computation cannot be split in groups of grid points, but can be split for leadtime instead: + obs.chunk.prob <- ens.chunk.prob <- list() + + int1 <- 1:(n.startdates*n.yrs.hind) # time interval of the data of the first tercile + int2 <- (1+(n.startdates*n.yrs.hind)):(2*n.startdates*n.yrs.hind) # time interval of the data of the second tercile + int3 <- (1+(2*n.startdates*n.yrs.hind)):(3*n.startdates*n.yrs.hind) # time interval of the data of the third tercile + + for(lead in 1:n.leadtimes){ + cat(paste0('leadtime n.: ',lead,'/',n.leadtimes,'\n')) + + anom.hindcast.lead <- anom.hindcast.chunk[,,lead,,] # extract only the lead time we are interested in + anom.hindcast.chunk.perm <- aperm(anom.hindcast.lead,c(2,1,3,4)) # swap the first two dimensions to put the years as first dimension (needed for convert2prob) + gc() + + cat('Computing the Reliability Diagram. Please wait... \n') + ens.chunk <- apply(anom.hindcast.chunk.perm, c(3,4), convert2prob, prob = my.prob) # its dimens. are: [n.startdates*n.yrs.hind*n.forecast.categories, n.lat, n.lon] + anom.rean.lead <- anom.rean.chunk[,lead,,] + + cat('Computing the Reliability Diagram. Please wait...... \n') + + obs.chunk <- apply(anom.rean.lead,c(2,3), convert2prob, prob = my.prob) # the first n.members*n.yrs.hind elements belong to the first tercile, and so on. + ens.chunk.prob[[lead]] <- apply(ens.chunk, c(2,3), function(x) count2prob(matrix(x, n.startdates*n.yrs.hind, n.categ), type=4)) # type=4 is the only method with sum(prob)=1 !! + cat('Computing the Reliability Diagram. Please wait......... \n') + + obs.chunk.prob[[lead]] <- apply(obs.chunk, c(2,3), function(x) count2prob(matrix(x, n.startdates*n.yrs.hind, n.categ), type=4)) + + #print(my.RelDiagr) # for debugging + rm(ens.chunk, obs.chunk) + gc() + + } # next lead + + } # close if on RelDiagr + + print("Saving results...") + + # save the results for the chunk: + save(my.FairRpss.chunk, my.FairRpss.pvalue, my.FairCrpss.chunk, my.FairCrpss.pvalue, my.EnsCorr.chunk, my.EnsCorr.pvalue, obs.chunk.prob, ens.chunk.prob, int1, int2, int3, work.dir, rean.dir, rean.name, forecast.year, forecast.dir, cfs.name, var.name, var.name.map, mes, day, yr1.hind, yr2.hind, leadtime.week, n.members, my.prob, n.boot, n.categ, sdates.seq, n.leadtimes, n.yrs.hind, lons, lats, n.lon, n.lat, my.grid, veri.month, my.startdates, startdate.name, n.startdates, n.yrs, file=paste0(work.dir,'/',var.name,'_',my.month[month],'.RData')) + + # if you only compute the RelDiagr, save this: + #save(obs.chunk.prob, ens.chunk.prob, int1, int2, int3, work.dir, rean.dir, rean.name, forecast.year, forecast.dir, cfs.name, var.name, var.name.map, mes, day, yr1.hind, yr2.hind, leadtime.week, n.members, my.prob, n.boot, n.categ, sdates.seq, n.leadtimes, n.yrs.hind, lons, lats, n.lon, n.lat, my.grid, veri.month, my.startdates, startdate.name, n.startdates, n.yrs, file=paste0(work.dir,'/',var.name,'_',my.month[month],'.RData')) + + #if(any(my.score.name=="FairRpss")) save(my.FairRpss.chunk, file=paste0(work.dir,'/',var.name,'/FairRpss_',startdate.name,'.RData')) + + print("Saving results...done") + +} # next m (month) + + +print("Finished!") diff --git a/old/backup/SubSkill.R~ b/old/backup/SubSkill.R~ new file mode 100644 index 0000000000000000000000000000000000000000..1e042a8ba2de016d7fa6a2097ee16707616621ca --- /dev/null +++ b/old/backup/SubSkill.R~ @@ -0,0 +1,519 @@ +######################################################################################### +# Sub-seasonal Skill Scores # +######################################################################################### + +# Creation: 02/2016 +# Author: Nicola Cortesi +# Aim: validate subseasonal forecast systems +# I/O: input monthly hindcasts formatted as in /esnas, +# output: .png with the desired skill score +# Assumptions: it relies on Load() to import data +# Branch: skill_assessment + +# you can run it from the terminal with the syntax: +# +# Rscript SkillScores.R +# +# i.e: to run only the chunk number 3, write: +# +# Rscript SkillScores.R 3 +# +# if you don't specify any argument, or you run it from the R interface, it runs all the chunks in a sequential way. +# Use this last approach only if the computational speed is not a problem. + +rm(list=ls()) + +########################################################################################## +# Load functions and libraries # +########################################################################################## + +#load libraries and functions: +library(SpecsVerification) # for skill scores +library(s2dverification) # for Load() function +library(easyVerification) # for veriApply() function +library(abind) # for merging arrays +source('/home/Earth/ncortesi/scripts/Rfunctions.R') + +########################################################################################## +# MareNostrum settings # +########################################################################################## + +args <- commandArgs(TRUE) # put into variable args the list with all the optional arguments with whom the script was run from the terminal + +if(length(args) == 0) { print("Running the script in a sequential way"); nloadcores <- 8 } +if(length(args) == 1) { print("Running the script in a parallel way"); nloadcores <- 1 } +if(length(args) > 1) stop("Only one argument is required") + +chunk <- ifelse(length(args) == 1 ,TRUE, FALSE) # set variable 'chunk' to TRUE if we are running the script from the command line with 1 argument, FALSE otherwise + +if(chunk) chunk.month <- as.integer(args[1]) # number of the month to run in this script (if chunk == TRUE) + +########################################################################################## +# User's settings # +########################################################################################## + +# working dir where to store the output maps and files in the /Data and /Maps subdirs: +work.dir <- "/scratch/Earth/ncortesi/RESILIENCE/Subestacional" + +# path of the weekly reanalysis data of the variable 'var' to study: +#rean.dir <- '/esarchive/old-files/recon_ecmwf_erainterim/weekly_mean/$VAR_NAME$_f6h/$VAR_NAME$_$START_DATE$.nc' +rean.dir <- '/esnas/recon/ecmwf/erainterim/weekly_mean/$VAR_NAME$-ecmwf_f6h/$VAR_NAME$_$START_DATE$.nc' +rean.name <- "ERA-Interim" + +# path of the monthly forecast system files: +forecast.year <- 2016 #2014 # starting year of the weekly sequence of the forecasts +#forecast.dir <- paste0('/esnas/exp/ECMWF/monthly/ensforhc/weekly_mean/$VAR_NAME$_f6h/',forecast.year,'$MONTH$$DAY$00/$VAR_NAME$_$START_DATE$00.nc') +#forecast.dir <- paste0('/esnas/exp/ecmwf/monthly_ensforhc/weekly_mean/$VAR_NAME$_f6h/',forecast.year,'$MONTH$$DAY$00/$VAR_NAME$_$START_DATE$00.nc') +forecast.dir <- paste0('/esnas/exp/ecmwf/s2s-monthly_ensforhc/weekly_mean/$VAR_NAME$_f24h/',forecast.year,'$MONTH$$DAY$/$VAR_NAME$_$START_DATE$.nc') +cfs.name <- 'ECMWF' # name of the climate forecast system (CFS) used for predictions (to be used in map titles) + +var.name <- 'sfcWind' #'tas' # forecast variable to verify. It is the name used inside the netCDF files and as suffix in map filenames, i.e: 'sfcWind' or 'psl' +var.name.map <- "10-m wind speed" #'2m Temperature' #'10m Wind Speed' # forecast variable to verify (name used in the map titles), i.e: '10m Wind Speed' or 'SLP' + +mes <- 1 # starting forecast month (1: january, 2: february, etc.) +day <- 4 #2 # starting forecast day + +# choose one or more skills scores to compute between EnsCorr, FairRPSS, FairCRPSS and/or RelDiagr: +my.score.name <- c('FairRpss','FairCrpss','EnsCorr','RelDiagr') + +# number of resamples considered in the bootstrapping of the FairRPSS and the FairCRPSS (should be AT LEAST 1000, better with 5000-10000) : +n.boot <- 5000 + +yr1.hind <- 1996 #1994 #1994 # first hindcast year +yr2.hind <- 2015 #2013 #2013 # last hindcast year (usually the forecast year -1) + +leadtime.week <- c('5-11','12-18','19-25','26-32') # which leadtimes are available in the weekly dataset +n.members <- 11 #4 # number of hindcast members + +veri.month <- 2 #1:12 # select the month(s) you want to compute the chosen skill scores + +rean.int <- TRUE # if TRUE, interpolate the reanalysis to the same resolution of the model; if FALSE, interpolate the model to the same res.of the reanalysis + +my.prob <- c(1/3,2/3) # quantiles used for FairRPSS and the RelDiagr (usually they are the terciles) +#conf.level <- c(0.025, 0.975) # percentiles employed for the calculation of the confidence level for the Rpss and Crpss (can be more than 2 if needed) + +#int.lat <- NULL #1:160 # latitude position of grid points selected for the Reliability Diagram (if you want to subset a region; if not, put NULL) +#int.lon <- NULL #1:320 # longitude position of grid points selected for the Reliability Diagram (if you want to subset a region; if not, put NULL) + +# coordinates of a chosen point in the world to plot the time series over the 52 maps (they must be the same values in objects la y lo) +#my.lat <- 55.3197120 +#my.lon <- 0.5625 + +###################################### Derived variables ############################################################################# + +#source(paste0(workdir,'/ColorBarV.R')) # Color scale used for maps +n.categ <- 1+length(my.prob) # number of forecasting categories (usually 3 if terciles are used) + +# blue-yellow-red colors: +#col <- ifelse(!mare, as.character(read.csv("/scratch/Earth/ncortesi/RESILIENCE/rgbhex.csv",header=F)[,1]), as.character(read.csv("/gpfs/projects/bsc32//bsc32842/scripts/rgbhex.csv",header=F)[,1])) +#col<-as.character(read.csv("/home/Earth/vtorralb/rgbhex.csv",header=F)[,1]) # blue-yellow-red colors +col <- c("#0C046E", "#1914BE", "#2341F7", "#2F55FB", "#3E64FF", "#528CFF", "#64AAFF", "#82C8FF", "#A0DCFF", "#B4F0FF", "#FFFBAF", "#FFDD9A", "#FFBF87", "#FFA173", "#FF7055", "#FE6346", "#F7403B", "#E92D36", "#C80F1E", "#A50519") + +sdates.seq <- weekly.seq(forecast.year,mes,day) # load the sequence of dates corresponding to all the thursday of the year +pos.bis <- which(sdates.seq == paste0(forecast.year,"0229")) # find if there is a startdate at the 29th of february and return its position in the ector sdates.seq +if(l(pos.bis) != 0) sdates.seq <- sdates.seq[-pos.bis] # if there is a startdate at the 29th of february, remove it. + +n.leadtimes <- length(leadtime.week) +n.yrs.hind <- yr2.hind-yr1.hind+1 +#my.month.short <- substr(my.month,1,3) + +## extract geographic coordinates; do only ONCE for each preducton system and then save the lat/lon in a coordinates.RData and comment these rows to use function load() below: +#data.hindcast <- Load(var='sfcWind', exp = 'EnsEcmwfWeekHind', +# obs = NULL, sdates=paste0(yr1.hind:yr2.hind,substr(sdates.seq[startdate],5,6),substr(sdates.seq[startdate],7,8)), +# nleadtime = 1, leadtimemin = 1, leadtimemax = 1, output = 'lonlat', configfile = file_path, method='distance-weighted' ) + +#lats<-data.hindcast$lat +#lons<-data.hindcast$lon +#save(lats,lons,file=paste0(workdir,'/coordinates.RData')) +#rm(data.hindcast);gc() + +# format geographic coordinates to use with PlotEquiMap: +#load(paste0(workdir,'/coordinates.RData')) +#n.lon <- length(lons) +#n.lat <- length(lats) +#n.lonr <- n.lon - ceiling(length(lons[lons<180 & lons > 0])) +#la<-rev(lats) +#lo<-c(lons[c((n.lonr+1):n.lon)]-360,lons[1:n.lonr]) + +# load only 1 year of weekly var rean data from reanalysis to get lat and lon: +if(!rean.int) data.rean <- Load(var = var.name, exp = list(list(path=rean.dir)), + obs = NULL, sdates=paste0(yr1.hind,substr(sdates.seq[1],5,6),substr(sdates.seq[1],7,8)), + nleadtime = 1, leadtimemin = 1, leadtimemax = 1, output = 'lonlat', nprocs=1) + +if(rean.int) data.rean <- Load(var = var.name, exp = list(list(path=forecast.dir)), + obs = NULL, sdates=paste0(yr1.hind,substr(sdates.seq[1],5,6),substr(sdates.seq[1],7,8)), + nleadtime = 1, leadtimemin = 1, leadtimemax = 1, output = 'lonlat', nprocs=1) + +lons <- data.rean$lon +lats <- data.rean$lat +n.lon <- length(lons) +n.lat <- length(lats) +my.grid<-paste0('r',n.lon,'x',n.lat) + +#file_path='/home/Earth/ngonzal2/R/ConfFiles/IC3.conf' # Load() file path +#file_path <- '/home/Earth/ncortesi/Downloads/RESILIENCE/IC3.conf' + +######################################################################################### +# Calculate and save up to all the chosen Skill Scores for a selected period # +######################################################################################### +# if we run the script from the terminal with an argument, it only computes the month we specify in the second argument and save the results for that month +if(chunk) veri.month <- chunk.month + +for(month in veri.month){ + #month=1 # for the debug + + # select the startdates (weeks) you want to compute: + my.startdates <- which(as.integer(substr(sdates.seq,5,6)) == month) #startdates.monthly[[month]] #c(1:5) + + startdate.name <- my.month[month] # name given to the period of the selected startdates # i.e:"January" + n.startdates <- length(my.startdates) # number of startdates in the selected month + n.yrs <- length(my.startdates)*n.yrs.hind # number of total years for the selected month + + print(paste0("Computing Skill Scores for ",startdate.name)) + + # Merge all selected startdates: + hind.dim <- c(n.members, n.yrs.hind*n.startdates, n.leadtimes, n.lat, n.lon) # dimensions of the hindcast array + + my.FairRpss <- my.FairCrpss <- my.EnsCorr <- array(NA,c(n.leadtimes, n.lat,n.lon)) + #my.FairRpssConf <- my.FairCrpssConf <- array(NA, c(length(conf.level), n.leadtimes, n.lat, n.lon)) + my.FairRpss.pvalue <- my.FairCrpss.pvalue <- my.EnsCorr.pvalue <- array(NA, c(n.leadtimes, n.lat, n.lon)) + + anom.rean.chunk <- anom.hindcast.mean.chunk <- array(NA, c(n.startdates*n.yrs.hind, n.leadtimes, n.lat, n.lon)) + anom.rean.clim <- array(NA, c(n.startdates*n.yrs.hind, n.startdates*n.yrs.hind, n.leadtimes, n.lat, n.lon)) + anom.hindcast.chunk <- array(NA, c(n.members, n.startdates*n.yrs.hind, n.leadtimes, n.lat, n.lon)) + #my.interv<-(1 + sub.size*(s-1)):((sub.size*s) + add.last) # longitude interval where to load data + + for(startdate in my.startdates){ + pos.startdate <- which(startdate == my.startdates) # count of the number of stardates already loaded+1 + my.time.interv <- (1+(pos.startdate-1)*n.yrs.hind):(pos.startdate*n.yrs.hind) # time interval where to load data + + print(paste0('Computing reanalysis anomalies for startdate ', which(startdate==my.startdates),'/',length(my.startdates))) + + # load weekly var rean data in: /esnas/reconstructions/ecmwf/eraint/weekly_mean + if(!rean.int) data.rean <- Load(var = var.name, exp = list(list(path=rean.dir)), + obs = NULL, sdates=paste0(yr1.hind:yr2.hind,substr(sdates.seq[startdate],5,6),substr(sdates.seq[startdate],7,8)), + nleadtime = n.leadtimes, leadtimemin = 1, leadtimemax = n.leadtimes, output = 'lonlat', nprocs=nloadcores) + + if(rean.int) data.rean <- Load(var = var.name, exp = list(list(path=rean.dir)), + obs = NULL, sdates=paste0(yr1.hind:yr2.hind,substr(sdates.seq[startdate],5,6),substr(sdates.seq[startdate],7,8)), + nleadtime = n.leadtimes, leadtimemin = 1, leadtimemax = n.leadtimes, output = 'lonlat', grid=my.grid, method='bilinear', nprocs=nloadcores) + + # dim(data.rean$mod) # [1,1,20,4,320,640] + + # Mean of the 20 years (for each point and leadtime) + clim.rean <- apply(data.rean$mod,c(1,2,4,5,6),mean)[1,1,,,] # average of all years for each leadtime and pixel + clim.rean <- InsertDim(InsertDim(InsertDim(clim.rean,1,n.yrs.hind),1,1),1,1) # repeat the same values times to be able to calculate the anomalies #[1,1,20,4,320,640] + anom.rean <- data.rean$mod - clim.rean + #anom.rean<-InsertDim(anom.rean[1,1,,,,],1,1) # [1,20,4,320,640] + + rm(data.rean, clim.rean) + gc() + + #load(paste0(workdir,'/Data_',var.name,'/anom_rean_startdate',startdate,'.RData')) + #anom.rean <- drop(anom.rean) + anom.rean.chunk[my.time.interv,,,] <- anom.rean + + # Load hindcast data: + print(paste0('Computing forecast anomalies for startdate ', which(startdate == my.startdates),'/',length(my.startdates))) + + # Load weekly var subseasonal data in: /esnas/exp/ECMWF/monthly/ensforhc/weekly_mean and interpolate them to the same ERA-Interim resolution (512x256) + if(rean.int) data.hindcast <- Load(var = var.name, exp = list(list(path=forecast.dir)), + obs = NULL, sdates = paste0(yr1.hind:yr2.hind, substr(sdates.seq[startdate],5,6), substr(sdates.seq[startdate],7,8)), + nleadtime = n.leadtimes, leadtimemin = 1, leadtimemax = n.leadtimes, output = 'lonlat', nprocs=nloadcores) + if(!rean.int) data.hindcast <- Load(var = var.name, exp = list(list(path=forecast.dir)), + obs = NULL, sdates = paste0(yr1.hind:yr2.hind, substr(sdates.seq[startdate],5,6), substr(sdates.seq[startdate],7,8)), + nleadtime = n.leadtimes, leadtimemin = 1, leadtimemax = n.leadtimes, output = 'lonlat', grid=my.grid, method='bilinear', nprocs=nloadcores) + + # dim(data.hindcast$mod) # [1,4,20,4,320,640] + + # Mean of the 4 members of the Hindcasts and of the 20 years (for each point and leadtime); + clim.hindcast <- apply(data.hindcast$mod,c(1,4,5,6),mean)[1,,,] # average of all years and members for each leadtime and pixel + clim.hindcast <- InsertDim(InsertDim(InsertDim(clim.hindcast,1,n.yrs.hind),1,n.members),1,1) # repeat the same values and times to calc. anomalies + anom.hindcast <- data.hindcast$mod - clim.hindcast + + rm(data.hindcast,clim.hindcast) + gc() + + # average the anomalies over the 4 hindcast members to compute the ensemble mean [1,20,4,320,640] + anom.hindcast.mean<-apply(anom.hindcast,c(1,3,4,5,6),mean) + + gc() + + #load(paste0(workdir,'/Data_',var.name,'/anom_hindcast_startdate',startdate,'.RData')) + anom.hindcast <- drop(anom.hindcast) + anom.hindcast.chunk[,my.time.interv,,,] <- anom.hindcast + + rm(anom.hindcast, anom.rean) + gc() + + anom.hindcast.mean <- drop(anom.hindcast.mean) + anom.hindcast.mean.chunk[my.time.interv,,,] <- anom.hindcast.mean + + rm(anom.hindcast.mean) + gc() + + } # end load data and conversion to anomalies + + + if(any(my.score.name == "FairRpss")){ + #my.FairRpss.sub <- veriApply("FairRpss", fcst=anom.hindcast.sub, obs=anom.rean.sub, prob=my.prob, tdim=2, ensdim=1, parallel=FALSE, ncpus=n.cpus) + my.FairRps.chunk <- veriApply("FairRps", fcst=anom.hindcast.chunk, obs=anom.rean.chunk, prob=my.prob, tdim=2, ensdim=1, parallel=FALSE, ncpus=1) + #my.FairRps.chunk.double <- veriApply("FairRps", fcst=anom.hindcast.chunk.double, obs=anom.rean.chunk.double, prob=my.prob, tdim=2, ensdim=1, parallel=FALSE, ncpus=n.cpus) + my.FairRps.chunk.sum <- apply(my.FairRps.chunk, c(2,3,4), sum) + + # the prob.for the climatological ensemble can be set without using convert2prob and they are the same for all points and leadtimes (33% for each tercile): + ens.ref <- array(33,c(n.startdates*n.yrs.hind,3)) + + # the observed probabilities vary from point to point and for each leadtime: [obs]= [3*n.years*n.startdates, n.leadtimes, lat, lon] + obs <- apply(anom.rean.chunk, c(2,3,4), function(x){convert2prob(x, prob <- my.prob)}) + + # reshape obs to be able to apply EnsRps(ens, obs) below: + obs2 <- InsertDim(obs,1,3) + obs2[2,1:(n.startdates*n.yrs.hind),,,] <- obs2[1,(n.startdates*n.yrs.hind + 1:(n.startdates*n.yrs.hind)),,,] + obs2[3,1:(n.startdates*n.yrs.hind),,,] <- obs2[1,(2*n.startdates*n.yrs.hind + 1:(n.startdates*n.yrs.hind)),,,] + obs3 <- obs2[,1:(n.startdates*n.yrs.hind),,,,drop=F] # drop=F is used not to loose the dimension(s) with only 1 element + + my.Rps.clim.chunk <- apply(obs3,c(3,4,5), function(x){ EnsRps(ens.ref,t(x), format="members") }) + my.Rps.clim.chunk.sum <- apply(my.Rps.clim.chunk, c(2,3,4), sum) + + my.FairRpss.chunk <- 1-(my.FairRps.chunk.sum / my.Rps.clim.chunk.sum) + + rm(ens.ref, obs, obs2, obs3, my.FairRps.chunk.sum, my.Rps.clim.chunk.sum) + gc() + + ########################### bootstrapping ############################## + my.FairRpssBoot <- array(NA, c(n.boot, n.leadtimes, n.lat, n.lon)) # define it only now because for n.boot=1000, it is a 2 GB object!!! + + for(b in 1:n.boot){ + cat(paste0("Bootstrapping FairRpss: resampling n. ",b,"/",n.boot,"\n")) + yrs.sampled <- sample(1:n.yrs, replace=TRUE) + + #for(y in 1:n.yrs) anom.rean.chunk.sampled[y,,,] <- anom.rean.chunk[yrs.sampled[y],,,] + #for(y in 1:n.yrs) anom.hindcast.chunk.sampled[,y,,,] <- anom.hindcast.chunk[,yrs.sampled[y],,,] + + my.FairRps.sampled <- my.FairRps.chunk + my.Rps.clim.sampled <- my.Rps.clim.chunk + + for(y in 1:n.yrs) my.FairRps.sampled[y,,,] <- my.FairRps.chunk[yrs.sampled[y],,,] + for(y in 1:n.yrs) my.Rps.clim.sampled[y,,,] <- my.Rps.clim.chunk[yrs.sampled[y],,,] + + #my.FairRps.chunk <- veriApply("FairRps", fcst=anom.hindcast.chunk.sampled, obs=anom.rean.chunk.sampled, prob=my.prob, tdim=2, ensdim=1, parallel=FALSE, ncpus=n.cpus) + #ens <- array(33,c(n.startdates*n.yrs.hind,3)) + #obs <- apply(anom.rean.chunk.sampled, c(2,3,4), function(x){convert2prob(x, prob <- my.prob)}) + #obs2 <- InsertDim(obs,1,3) + #obs2[2,1:(n.startdates*n.yrs.hind),,,] <- obs2[1,(n.startdates*n.yrs.hind + 1:(n.startdates*n.yrs.hind)),,,] + #obs2[3,1:(n.startdates*n.yrs.hind),,,] <- obs2[1,(2*n.startdates*n.yrs.hind + 1:(n.startdates*n.yrs.hind)),,,] + #obs3 <- obs2[,1:(n.startdates*n.yrs.hind),,,,drop=F] + #my.Rps.clim.chunk <- apply(obs3,c(3,4,5), function(x){ EnsRps(ens,t(x),format="members") }) + + my.FairRps.sampled.sum <- apply(my.FairRps.sampled,c(2,3,4), sum) + my.Rps.clim.sampled.sum <- apply(my.Rps.clim.sampled,c(2,3,4), sum) + my.FairRpss.sampled <- 1-(my.FairRps.sampled.sum/my.Rps.clim.sampled.sum) + + my.FairRpssBoot[b,,,] <- my.FairRpss.sampled + + gc() + } + + rm(my.FairRps.sampled, my.Rps.clim.sampled, my.FairRps.sampled.sum, my.Rps.clim.sampled.sum, my.FairRpss.sampled, yrs.sampled) + gc() + + my.FairRpssBootMean <- apply(my.FairRpssBoot,c(2,3,4),mean,na.rm=T) + + my.test.value <- 0 # my.FairRpss.chunk # test if the bootstrapped mean value is significantly different from this value or not + + my.FairRpss.diff1 <- my.FairRpssBootMean + abs(my.test.value - my.FairRpssBootMean) + my.FairRpss.diff2 <- my.FairRpssBootMean - abs(my.test.value - my.FairRpssBootMean) + + for(i in 1:dim(my.FairRpssBoot)[2]){ + for(j in 1:dim(my.FairRpssBoot)[3]){ + for(k in 1:dim(my.FairRpssBoot)[4]){ + extr1 <- which(my.FairRpssBoot[,i,j,k] > my.FairRpss.diff1[i,j,k]) + extr2 <- which(my.FairRpssBoot[,i,j,k] < my.FairRpss.diff2[i,j,k]) + if(my.test.value != 0) my.FairRpss.pvalue[i,j,k] <- (length(extr1) + length(extr2)) / n.boot # two-tailed test + if(my.test.value == 0) my.FairRpss.pvalue[i,j,k] <- ifelse(my.FairRpssBootMean[i,j,k] > 0, length(extr2)/n.boot, length(extr1)/n.boot) + + } + } + } + + rm(my.FairRpssBoot, my.FairRpssBootMean, my.FairRpss.diff1, my.FairRpss.diff2, extr1, extr2) + gc() + + #which(my.FairRpss.pvalue > 0.05) # points where the FairRPSS is significant (i.e: whose FairRPSS is not significantly different from the population mean) + + # calculate the percentiles of the skill score in case you want to compute the confidence interval of the 95%: + #my.FairRpssConf[,,,chunk$int[[c]]] <- apply(my.FairRpssBoot[,,,chunk$int[[c]]], c(2,3,4), function(x) quantile(x, probs=conf.level, type=8)) + + } # close if on FairRpss + + if(any(my.score.name=="FairCrpss")){ + + #my.FairCrpss.chunk <- veriApply("FairCrpss", fcst=anom.hindcast.chunk, obs=anom.rean.chunk, tdim=2, ensdim=1, parallel=FALSE, ncpus=1) + + #Numerador: + my.FairCrps.chunk <- veriApply("FairCrps", fcst=anom.hindcast.chunk, obs=anom.rean.chunk, tdim=2, ensdim=1, parallel=FALSE, ncpus=1) + my.FairCrps.chunk.sum <- apply(my.FairCrps.chunk,c(2,3,4), sum) + + # Denominador: + my.Crps.clim.chunk <- array(NA, c(n.startdates*n.yrs.hind, n.leadtimes, n.lat, n.lon)) + anom.hind.clim <- array(NA, c(n.members, n.startdates*n.yrs.hind, n.leadtimes, n.lat, n.lon)) + + for(i in 1:n.leadtimes){ + print(paste0("leadtime= ",i)) + for(j in 1:n.lat){ + for(k in 1:n.lon){ + # extract a random sample with no replacement of the hindcast: + #anom.hind.clim[,,i,j,k] <- array(sample(anom.hindcast.chunk[,,i,j,k],n.startdates*n.yrs.hind*n.members),c(n.startdates*n.yrs.hind,n.members)) + anom.rean.clim[,,i,j,k] <- t(array(anom.rean.chunk[,i,j,k],c(n.startdates*n.yrs.hind, n.startdates*n.yrs.hind))) + my.Crps.clim.chunk[,i,j,k] <- EnsCrps((anom.rean.clim[,,i,j,k]), anom.rean.chunk[,i,j,k]) + } + } + } + + ### anom.hind.clim <- apply(anom.hindcast.chunk,c(3,4,5), sample) + ### anom.all.chunk <- abind(anom.hind.clim,anom.rean.chunk, along=1) # merge exp and obs together to use apply: + ### my.Crps.clim.chunk <- apply(anom.all.chunk, c(3,4,5), function(x){ EnsCrps(t(x[1:n.members,]), x[n.members+1,])}) + + my.Crps.clim.chunk.sum <- apply(my.Crps.clim.chunk,c(2,3,4), sum) + + ## Skill Score: + my.FairCrpss.chunk <- 1 - (my.FairCrps.chunk.sum / my.Crps.clim.chunk.sum) + + #rm(anom.all.chunk, my.FairCrps.chunk.sum, my.Crps.clim.chunk.sum) + + rm(my.FairCrps.chunk.sum, my.Crps.clim.chunk.sum) + gc() + + ################################# bootstrapping ########################################### + my.FairCrpssBoot <- array(NA, c(n.boot, n.leadtimes, n.lat, n.lon)) # define it only now, because for n.boot = 1000, it is a 2 GB object!!! + + for(b in 1:n.boot){ + cat(paste0("Bootstrapping FairCrpss: resampling n. ",b,"/",n.boot,"\n")) + + yrs.sampled <- sample(1:n.yrs, replace=TRUE) + + my.FairCrps.sampled <- my.FairCrps.chunk + my.Crps.clim.sampled <- my.Crps.clim.chunk + + for(y in 1:n.yrs) my.FairCrps.sampled[y,,,] <- my.FairCrps.chunk[yrs.sampled[y],,,] + for(y in 1:n.yrs) my.Crps.clim.sampled[y,,,] <- my.Crps.clim.chunk[yrs.sampled[y],,,] + + my.FairCrps.sampled.sum <- apply(my.FairCrps.sampled,c(2,3,4), sum) + my.Crps.clim.sampled.sum <- apply(my.Crps.clim.sampled,c(2,3,4), sum) + + my.FairCrpss.sampled <- 1-(my.FairCrps.sampled.sum / my.Crps.clim.sampled.sum) + + my.FairCrpssBoot[b,,,] <- my.FairCrpss.sampled + + gc() + } + + rm(my.FairCrps.sampled, my.Crps.clim.sampled, my.FairCrpss.sampled , yrs.sampled) + gc() + + my.FairCrpssBootMean <- apply(my.FairCrpssBoot,c(2,3,4),mean,na.rm=T) + + my.test.value <- 0 # my.FairCrpss.chunk # test if the bootstrapped mean value is significantly different from this value or not + + my.FairCrpss.diff1 <- my.FairCrpssBootMean + abs(my.test.value - my.FairCrpssBootMean) + my.FairCrpss.diff2 <- my.FairCrpssBootMean - abs(my.test.value - my.FairCrpssBootMean) + + for(i in 1:dim(my.FairCrpssBoot)[2]){ + for(j in 1:dim(my.FairCrpssBoot)[3]){ + for(k in 1:dim(my.FairCrpssBoot)[4]){ + extr1 <- which(my.FairCrpssBoot[,i,j,k] > my.FairCrpss.diff1[i,j,k]) + extr2 <- which(my.FairCrpssBoot[,i,j,k] < my.FairCrpss.diff2[i,j,k]) + if(my.test.value != 0) my.FairCrpss.pvalue[i,j,k] <- (length(extr1) + length(extr2)) / n.boot + if(my.test.value == 0) my.FairCrpss.pvalue[i,j,k] <- ifelse(my.FairCrpssBootMean[i,j,k] > 0, length(extr2)/n.boot, length(extr1)/n.boot) + + } + } + } + + rm(my.FairCrpssBoot, my.FairCrpssBootMean, my.FairCrpss.diff1, my.FairCrpss.diff2, extr1, extr2) + ###which(my.FairCrpss.pvalue > 0.05) # points where thw FairCRPSS is significant (i.e: whose FairRPSS not significantly different from the population mean) + + # calculate the percentiles of the skill score if you want to measure hte confidence interval: + #my.FairCrpssConf[,,,chunk$int[[c]]] <- apply(my.FairCrpssBoot[,,,chunk$int[[c]]], c(2,3,4), function(x) quantile(x, probs=conf.level, type=8)) + + } # close if on FairCrpss + + if(any(my.score.name=="EnsCorr")){ + # ensemble mean correlation for the selected startdates (first dim, 'week') and all leadtimes (second dim): + my.EnsCorr.chunk <- my.PValue.chunk <- array(NA, c(n.leadtimes, n.lat, n.lon)) + + for(i in 1:n.leadtimes){ + for(j in 1:n.lat){ + for(k in 1:n.lon){ + my.EnsCorr.chunk[i,j,k] <- cor(anom.hindcast.mean.chunk[,i,j,k],anom.rean.chunk[,i,j,k], use="complete.obs") + # option 'na.method' is chosen from the following list: c("all.obs", "complete.obs", "pairwise.complete.obs", "everything", "na.or.complete") + + my.EnsCorr.pvalue[i,j,k] <- cor.test(anom.hindcast.mean.chunk[,i,j,k],anom.rean.chunk[,i,j,k], use="complete.obs")$p.value + + } + } + } + + #my.EnsCorr[,,chunk$int[[c]]] <- my.EnsCorr.chunk + #my.PValue[,,chunk$int[[c]]] <- my.PValue.chunk + + #rm(anom.hindcast.chunk,anom.hindcast.mean.chunk,my.EnsCorr.chunk,my.PValue.chunk) + } + + #rm(anom.rean.chunk) + gc() + #mem() + + # the Reliability diagram cannot be computed splitting the data in chunk, so it is computed only if the script is run from the R interface or with no arguments: + + if(any(my.score.name=="RelDiagr")){ # in this case the computation cannot be split in groups of grid points, but can be split for leadtime instead: + obs.chunk.prob <- ens.chunk.prob <- list() + + int1 <- 1:(n.startdates*n.yrs.hind) # time interval of the data of the first tercile + int2 <- (1+(n.startdates*n.yrs.hind)):(2*n.startdates*n.yrs.hind) # time interval of the data of the second tercile + int3 <- (1+(2*n.startdates*n.yrs.hind)):(3*n.startdates*n.yrs.hind) # time interval of the data of the third tercile + + for(lead in 1:n.leadtimes){ + cat(paste0('leadtime n.: ',lead,'/',n.leadtimes,'\n')) + + anom.hindcast.lead <- anom.hindcast.chunk[,,lead,,] # extract only the lead time we are interested in + anom.hindcast.chunk.perm <- aperm(anom.hindcast.lead,c(2,1,3,4)) # swap the first two dimensions to put the years as first dimension (needed for convert2prob) + gc() + + cat('Computing the Reliability Diagram. Please wait... \n') + ens.chunk <- apply(anom.hindcast.chunk.perm, c(3,4), convert2prob, prob = my.prob) # its dimens. are: [n.startdates*n.yrs.hind*n.forecast.categories, n.lat, n.lon] + anom.rean.lead <- anom.rean.chunk[,lead,,] + + cat('Computing the Reliability Diagram. Please wait...... \n') + + obs.chunk <- apply(anom.rean.lead,c(2,3), convert2prob, prob = my.prob) # the first n.members*n.yrs.hind elements belong to the first tercile, and so on. + ens.chunk.prob[[lead]] <- apply(ens.chunk, c(2,3), function(x) count2prob(matrix(x, n.startdates*n.yrs.hind, n.categ), type=4)) # type=4 is the only method with sum(prob)=1 !! + cat('Computing the Reliability Diagram. Please wait......... \n') + + obs.chunk.prob[[lead]] <- apply(obs.chunk, c(2,3), function(x) count2prob(matrix(x, n.startdates*n.yrs.hind, n.categ), type=4)) + + #print(my.RelDiagr) # for debugging + rm(ens.chunk, obs.chunk) + gc() + + } # next lead + + } # close if on RelDiagr + + print("Saving results...") + + # save the results for the chunk: + save(my.FairRpss.chunk, my.FairRpss.pvalue, my.FairCrpss.chunk, my.FairCrpss.pvalue, my.EnsCorr.chunk, my.EnsCorr.pvalue, obs.chunk.prob, ens.chunk.prob, int1, int2, int3, work.dir, rean.dir, rean.name, forecast.year, forecast.dir, cfs.name, var.name, var.name.map, mes, day, yr1.hind, yr2.hind, leadtime.week, n.members, my.prob, n.boot, n.categ, sdates.seq, n.leadtimes, n.yrs.hind, lons, lats, n.lon, n.lat, my.grid, veri.month, my.startdates, startdate.name, n.startdates, n.yrs, file=paste0(work.dir,'/',var.name,'_',my.month[month],'.RData')) + + # if you only compute the RelDiagr, save this: + #save(obs.chunk.prob, ens.chunk.prob, int1, int2, int3, work.dir, rean.dir, rean.name, forecast.year, forecast.dir, cfs.name, var.name, var.name.map, mes, day, yr1.hind, yr2.hind, leadtime.week, n.members, my.prob, n.boot, n.categ, sdates.seq, n.leadtimes, n.yrs.hind, lons, lats, n.lon, n.lat, my.grid, veri.month, my.startdates, startdate.name, n.startdates, n.yrs, file=paste0(work.dir,'/',var.name,'_',my.month[month],'.RData')) + + #if(any(my.score.name=="FairRpss")) save(my.FairRpss.chunk, file=paste0(work.dir,'/',var.name,'/FairRpss_',startdate.name,'.RData')) + + print("Saving results...done") + +} # next m (month) + + +print("Finished!") diff --git a/old/backup/SubSkillMaps.R b/old/backup/SubSkillMaps.R new file mode 100644 index 0000000000000000000000000000000000000000..4115984521baec9dc7a9c9869739d30a9322104f --- /dev/null +++ b/old/backup/SubSkillMaps.R @@ -0,0 +1,527 @@ +########################################################################################### +# Visualize and save the score maps for one score and period # +########################################################################################### +# run it only after computing and saving all the skill score data: + +rm(list=ls()) + +########################################################################################## +# Load functions and libraries # +########################################################################################## + +library(SpecsVerification) # for skill scores +library(s2dverification) # for Load() function +library(abind) # for merging arrays +source('/home/Earth/ncortesi/scripts/Rfunctions.R') +source('/home/Earth/ncortesi/Downloads/ESS/general/hatching.R') +########################################################################################## +# User's settings # +########################################################################################## +#if(!mare) {source('/home/Earth/ncortesi/Downloads/scripts/Rfunctions.R')} else {source('/gpfs/projects/bsc32/bsc32842/scripts/Rfunctions.R')} + +# working dir where to put the output maps and files in the /Data and /Maps subdirs: +work.dir <- "/scratch/Earth/ncortesi/RESILIENCE/Subestacional" + +var.name <- 'sfcWind' # forecast variable. It is the name used inside the netCDF files and as suffix in map filenames, i.e: 'sfcWind' or 'psl' + +my.pvalue <- 0.05 # in case of computing the EnsCorr, set the p_value level to show in the ensemble mean correlation maps +n.intervals <- 12 # in case of computing the Reliabilty Diagram, set the number of intervals of the forecasted frequency (X axis). It is convenient to set it to + # the number of ensemble member + 1 + +# Choose one or more regions between those defined below for the reliability diagrams and for the summary tables (0=World, 1=Northern europe, etc.) +areas <- 1:9 + +land.only <- FALSE # set it to true if you want to average only over land, FALSE otherwise [warning: the reliability diagram is computed ONLY over land+sea ] + +# coordinates of a chosen point in the world to plot the time series over the 52 maps (they must be the same values in objects la y lo) +#my.lat <- 55.3197120 +#my.lon <- 0.5625 + +my.months=1:12 #9:12 # choose the month(s) to plot and save + +# Define regions for averaging skill scores: (in the format: lon.min /lon.max / lat.min /lat.max. lon.min MUST be a NEGATIVE number, long values MUST be from 0 and 360, and +# lat values MUST be from 90 to -90 degrees) +area0.name <- "World" +area0 <- c(-180,180,-90,90) +area1.name <- "Northern_Europe" +area1 <- c(-15,45,45,75) +area2.name <- "Southern_Europe" +area2 <- c(-15,45,35,45) +area3.name <- "Southwestern_Europe" +area3 <- c(-15,20,35,45) +area4.name <- "Southeastern_Europe" +area4 <- c(20,45,35,45) +area5.name <- "Europe" +area5 <- c(-15,45,35,75) +area6.name <- "North_America" +area6 <- c(-130,-60,30,50) +area7.name <- "North_Sea" +area7 <- c(-4, 15, 50, 65) +area8.name <- "Iberian_Peninsula" +area8 <- c(-10, 4, 36, 44) +area9.name <- "Canadian region" +area9 <- c(-114, -111.8, 49.6, 51.7) + +######################################################################################### + +col <- c("#0C046E", "#1914BE", "#2341F7", "#2F55FB", "#3E64FF", "#528CFF", "#64AAFF", "#82C8FF", "#A0DCFF", "#B4F0FF", "#FFFBAF", "#FFDD9A", "#FFBF87", "#FFA173", "#FF7055", "#FE6346", "#F7403B", "#E92D36", "#C80F1E", "#A50519") + +# line color of Reliability Diagram summaries: +col.month <- c('#e41a1c','#e41a1c','#e41a1c','#377eb8','#377eb8','#377eb8','#4daf4a','#4daf4a','#4daf4a','#984ea3','#984ea3','#984ea3') + +dir.create(path=paste0(work.dir,"/",var.name), showWarnings=FALSE) # if directory already exists, this function doesn't create it again +dir.create(path=paste0(work.dir,"/",var.name,"/formatted"), showWarnings=FALSE) # if directory already exists, this function doesn't create it again + +########################################################################################## +# visualize worldwide monthly maps of skill scores # +########################################################################################## +# choose one or more skills scores to visualize between ACC (EnsCorr), FairRPSS, FairCRPSS and/or RelDiagr: +my.score.name <- c('FairRpss','FairCrpss','EnsCorr','RelDiagr') +my.score.name.title <- c('FairRpss','FairCrpss','Correlation of the ensemble mean','Reliability Diagram') # names to be visualized in the graph titles + +for(area.num in areas){ + ## replace old command area.name <- area0.name: + area <- eval(parse(text=paste0("area", area.num))) + area.name <- eval(parse(text=paste0("area", area.num,".name"))) + + print(paste0("Area: ",area.name)) + + ## position of max and min long and lat values the chosen region: + lon.min <- 360 + area[1] + lon.max <- area[2] + lat.min <- area[3] + lat.max <- area[4] + + for(month in my.months){ + ## month=1 # for the debug + print(paste0("Month=",month)) + + ## Load data: + load(file=paste0(work.dir,'/',var.name,'_',my.month[month],'.RData')) + + sdates.seq <- weekly.seq(forecast.year,mes,day) # sequence of dates corresponding to all the thursday of the year + my.startdates.days <- as.integer(substr(sdates.seq[which(as.integer(substr(sdates.seq,5,6)) == month)],7,8)) #startdates.monthly[[month]] #c(1:5) + stringa <- paste0(my.startdates.days,collapse=",") + + if(lon.min < 360) area.lon <- c(1:(which(lons >= lon.max)[1]), (which(lons >= lon.min)[1]:length(lons))) # restrict area to chosen one + if(lon.min > 360) area.lon <- (which(lons >= lon.min-360)[1]):(which(lons >= lon.max)[1]-1) + + ##if(area.name != "World") area.lat <- c(which(lats <= lat.max)[1]:(which(lats <= lat.min)[1]-1)) # restrict area to chosen one + ##if(area.name == "World"){ area.lat <- 1:length(lats); area.lon <- 1:length(lons) } + + area.lat <- 1:length(lats) + area.lon <- 1:length(lons) + + n.lonr <- n.lon - ceiling(length(lons[lons>0 & lons < 180])) # count the number of longitude values between 180 and 360 degrees longitud + + for(my.score.name.map in my.score.name){ + ##my.score.name.map='Acc' # for the debug + + print(paste0("Score=",my.score.name.map)) + + if(my.score.name.map=='FairRpss') my.score <- my.FairRpss.chunk + if(my.score.name.map=='FairCrpss') my.score <- my.FairCrpss.chunk + if(my.score.name.map=="EnsCorr") my.score <- my.EnsCorr.chunk + ##if(my.score.name.map=="RelDiagr") my.score <- my.RelDiagr + + ## old green-red palette: + ## brk.rpss<-c(-1,seq(0,1,by=0.05)) + ## col.rpss<-c("darkred",colorRampPalette(c("red","white","darkgreen"))(length(brk.rpss)-2)) + ## brk.rps<-c(seq(0,1,by=0.1),10) + ## col.rps<-c(colorRampPalette(c("darkgreen","white","red"))(length(brk.rps)-2),"darkred") + ## if(my.score.name.map==my.Rps | my.score==my.Rps.clim) {my.brk<-brk.rps} else {my.brk<-brk.rpss} + ## if(my.score.name.map==my.Rps | my.score==my.Rps.clim) {my.col<-col.rps} else {my.col<-col.rpss} + + brk.rpss <- c(seq(-1,1,by=0.1)) + col.rpss <- colorRampPalette(col)(length(brk.rpss)-1) + + ## at present all breaks and colors are the same, so there is no need to differenciate between indexes, aside the two Fair scores whose range is [-inf,0] + my.brk <- brk.rpss + my.col <- col.rpss + my.brk.labels <- my.brk + if(my.score.name.map == 'FairRpss' | my.score.name.map=='FairCrpss') my.brk.labels <- c(expression(-infinity), my.brk[-1]) + + my.RelDiagr<-list() + + for(lead in 1:n.leadtimes){ + print(paste0("Leadtime=",lead)) + + ##my.title<-paste0(my.score.name.map,' of ',cfs.name,' + ##10m Wind Speed for ',startdate.name.map,'. Forecast time ',leadtime.week[lead],'.') + + if(my.score.name.map != "RelDiagr" && area.name == "World"){ + + ##postscript(file=paste0(workdir,'/Maps_',var.name,'/',my.score.name.map,'_',startdate.name.map,'_Forecast_time_',leadtime.week[lead],'_',var.name,'.ps'), + ## paper="special",width=12,height=7,horizontal=F) + + ##layout(matrix(c(1,2), 1, 2, byrow = TRUE),widths=c(11,1.5)) + ##par(oma=c(0,0,4,0),mar=c(0,0,0,0)) + + ##n.lonr <- n.lon - ceiling(length(lons[lons<180 & lons > 0])) + ##hatching(latsr,c(lons[c((nlonr+1):nlon)]-360,lons[1:nlonr]), my.Pvalue.rev, dens = 12, ang = 45, col_line = '#252525', lwd_size = 0.5, crosshatching = FALSE) + ##hatching(lats,c(lons[c((n.lonr+1):n.lon)]-360,lons[1:n.lonr]), my.Pvalue.rev, dens = 12, ang = 45, col_line = '#252525', lwd_size = 0.5, crosshatching = FALSE) + ##n.lonr <- n.lon - ceiling(length(lons[lons<180 & lons > 0])) + ##PlotEquiMap(my.score[lead,,][n.lat:1,c((n.lonr+1):n.lon,1:n.lonr)], lons,lats ,title_scale=0.8, brks=my.brk, cols=my.col ,axelab=F, filled.continents=FALSE, drawleg=F) + ## lat values must be in decreasing order from +90 to -90 degrees and long from 0 to 360 degrees: + + my.file <- paste0(work.dir,'/',var.name,'/',var.name,'_',my.score.name.map,'_',my.month[month],'_leadtime_',leadtime.week[lead],'.png') + my.file2 <- paste0(work.dir,'/',var.name,'/formatted/',var.name,'_',my.score.name.map,'_',my.month[month],'_leadtime_',leadtime.week[lead],'.png') + png(file=my.file,width=1000,height=600) + + ## Map: + par(fig=c(0,0.92,0,0.89), new=TRUE) + + PlotEquiMap(my.score[lead,,c((n.lonr+1):n.lon,1:n.lonr)], c(lons[c((n.lonr+1):n.lon)]-360, lons[1:n.lonr]), lats ,title_scale=0.8, brks=my.brk, cols=my.col ,axelab=F, filled.continents=FALSE, drawleg=F) + + if(my.score.name.map=="EnsCorr") my.PValue.rev <- my.EnsCorr.pvalue < 0.05 + if(my.score.name.map=="FairRpss") my.PValue.rev <- my.FairRpss.pvalue < 0.05 + if(my.score.name.map=="FairCrpss") my.PValue.rev <- my.FairCrpss.pvalue < 0.05 + + ## Draw the significance diagonal lines: + pv <- aperm(my.PValue.rev, c(1,3,2)) + + hatching(lats, c(lons[c((n.lonr+1):n.lon)]-360, lons[1:n.lonr]), pv[lead,,], dens = 12, ang = 45, col_line = '#252525', lwd_size = 0.5, crosshatching = FALSE) + + ## Map title: + par(fig=c(0,1,0.82,0.91), new=TRUE) + ##my.title <- paste0(my.score.name.map,' of ',cfs.name,'\n ', var.name.map,' for ',my.month[month],'. Forecast time ',leadtime.week[lead],'.') + my.title <- paste0("ECMWF-MPS / ", var.name.map, " / ", my.score.name.title[which(my.score.name == my.score.name.map)], "\n", my.month[month], " (",leadtime.week[lead]," days lead time)/ 1994-2013") + mtext(my.title, cex=2.4, font=2) + + ## Color legend: + par(fig=c(0.92,1,0,0.9), new=TRUE) + ##par(fig=c(0.52,0.82,0,0.9), new=TRUE) + + ColorBar2(my.brk, cols=my.col, vert=T, my.ticks=-0.5 + 1:length(my.brk), my.labels=my.brk.labels) + + ## arrange lat/ lon in my.PValue (a second variable in the EnsCorr file) to start from +90 degr. (lat) and from 0 degr (lon): + ## my.PValue.rev[lead,,] <- my.PValue[i,n.lat:1,c((n.lonr+1):n.lon,1:n.lonr)] + + dev.off() + + system(paste0("~/scripts/fig2catalog.sh -s 0.8 -c 'Start dates: ", stringa," of ",my.month[month],"\nLead time: ", leadtime.week[lead], " days\nReference dataset: ERA-Interim\nBias correction: none\nHatched area: significant from a bootstrapping test (p_value=0.05)' ", my.file," ", my.file2)) + + + } # close if on !RelDiagr + + gc() + + if(my.score.name.map == "RelDiagr") { + + ## Note that bins should correspond to the number of hindcast members + 1, for systems with a low number of members (below ~20) + ## Warning: if the bootstrap show a gray point instead of a gray bar, decrease the number of bins! + my.RelDiagr[[lead]] <- ReliabilityDiagramHist(ens.chunk.prob[[lead]][int1,area.lat,area.lon], obs.chunk.prob[[lead]][int1,area.lat,area.lon], bins=n.intervals, nboot=500, plot=FALSE, plot.refin=TRUE) # Below normal tercile + + ##my.RelDiagr[[n.leadtimes+lead]] <- ReliabilityDiagramHist(ens.chunk.prob[[lead]][int2,,], obs.chunk.prob[[lead]][int2,,], bins=5, nboot=0, plot=FALSE, plot.refin=F) + my.RelDiagr[[2*n.leadtimes+lead]] <- ReliabilityDiagramHist(ens.chunk.prob[[lead]][int3,area.lat,area.lon], obs.chunk.prob[[lead]][int3,area.lat,area.lon], bins=n.intervals, nboot=500, plot=FALSE, plot.refin=TRUE) # Above Normale tercile + + + my.file <- paste0(work.dir,'/', var.name,'/', var.name,'_RelDiagr_', area.name,'_', my.month[month],'_leadtime_', leadtime.week[lead],'_', var.name,'.png') + my.file2 <- paste0(work.dir,'/', var.name,'/formatted/', var.name,'_RelDiagr_', area.name, '_', my.month[month],'_leadtime_', leadtime.week[lead],'_', var.name,'.png') + + png(file=my.file,width=650,height=600) + + ##my.title <- paste0('Reliability Diagram of ',cfs.name,'\n ',var.name.map,' for ',my.month[month],'. Forecast time ',leadtime.week[lead],'.') + my.title <- paste0("ECMWF-MPS / ", var.name.map, " / Reliability Diagram \n", my.month[month], " (", leadtime.week[lead]," days lead time) / 1994-2013") + + par(fig=c(0, 0.83, 0, 1), new=TRUE) + + plot(c(0,1),c(0,1),type="l",xlab="Forecast probability (%)",ylab="Observed frequency (%)", xaxt='n', yaxt='n',col='gray30', main=my.title, cex.main=1.6) + + middle <- 100*round(my.RelDiagr[[lead]]$p.avgs,2) + interval <- (middle/2)[2] + left <- round(middle - interval,0) + left[1] <- 0 + right <- round(middle + interval,0) + right[l(right)] <- 100 + bins.label <- paste0(paste0(left,"-"),right) + + axis(1, at=my.RelDiagr[[lead]]$p.avgs, labels=FALSE) + text(x=my.RelDiagr[[lead]]$p.avgs, par("usr")[3]-0.03, labels=bins.label, srt=45, pos=1, xpd=TRUE) + axis(2, at=(0:10)/10, labels=10*(0:10), cex.axis=1) + + no_res <- 1/3 #sum(my.RelDiagr[[lead]]$obs.counts)/sum(my.RelDiagr[[lead]]$hist.counts) + numb <- c(seq(0,1,by=0.1)) + no_skill <- (numb+no_res)/2 + lines(c(0,1), c(no_res,no_res), col="gray", lty=3) + lines(c(1/3,1/3), c(0,1), col="gray", lty=3) + lines(c(0,1), c(no_skill[1],no_skill[11]), col="black", lty=3) + + ##lines(my.RelDiagr[[lead]]$p.avgs[!is.na(my.RelDiagr[[lead]]$p.avgs)],my.RelDiagr[[lead]]$cond.prob[!is.na(my.RelDiagr[[lead]]$cond.prob)],type="o", pch=16, col="blue") + for (i in 1:n.intervals){ + lines(rep(my.RelDiagr[[lead]]$p.avgs[i], 2), c(my.RelDiagr[[lead]]$cbar.lo[i],my.RelDiagr[[lead]]$cbar.hi[i]), col="pink", lwd=2) + lines(0.005+rep(my.RelDiagr[[2*n.leadtimes+lead]]$p.avgs[i], 2), c(my.RelDiagr[[2*n.leadtimes+lead]]$cbar.lo[i],my.RelDiagr[[2*n.leadtimes+lead]]$cbar.hi[i]), col="lightblue", lwd=2) + } + + + points(my.RelDiagr[[lead]]$p.avgs[!is.na(my.RelDiagr[[lead]]$p.avgs)],my.RelDiagr[[lead]]$cond.prob[!is.na(my.RelDiagr[[lead]]$cond.prob)],type="b", pch=1, col="red", cex=1.5, lwd=5) + + ##points(my.RelDiagr[[n.leadtimes+lead]]$p.avgs[!is.na(my.RelDiagr[[n.leadtimes+lead]]$p.avgs)],my.RelDiagr[[n.leadtimes+lead]]$cond.prob[!is.na(my.RelDiagr[[n.leadtimes+lead]]$cond.prob)],type="b", pch=11, col="gold", cex=1.5, lwd=5) # col="darkgreen" + + points(0.005+my.RelDiagr[[2*n.leadtimes+lead]]$p.avgs[!is.na(my.RelDiagr[[2*n.leadtimes+lead]]$p.avgs)], my.RelDiagr[[2*n.leadtimes+lead]]$cond.prob[!is.na(my.RelDiagr[[2*n.leadtimes+lead]]$cond.prob)],type="b", pch=1, col="blue", cex=1.5, lwd=5) + + legend("bottomright", legend=c('Above Normal','Below Normal'), lty=c(1,1), lwd=c(2.5,2.5), col=c('blue','red')) + + par(fig=c(0.75, 1, 0, 0.35), new=TRUE) + perc1 <- 100*my.RelDiagr[[lead]]$hist.counts/sum(my.RelDiagr[[lead]]$hist.counts) + barplot(perc1, beside=T,space=c(0,1.2), axes = T, axis.lty=T, axisnames = T, col = "red", ylim=c(0,max(perc1)+5), main="Below Normal") + par(fig=c(0.75, 1, 0.34, 0.36), new=TRUE) + mtext("% of forecasts for bin") + + par(fig=c(0.75, 1, 0.25, 0.6), new=TRUE) + perc2 <- 100*my.RelDiagr[[2*n.leadtimes+lead]]$hist.counts/sum(my.RelDiagr[[2*n.leadtimes+lead]]$hist.counts) + barplot(perc2, beside=T, space=c(0,1.2), axes = T, axis.lty=T, axisnames = T, col = "blue", ylim=c(0,max(perc2)+5), main="Above Normal") + par(fig=c(0.75, 1, 0.59, 0.61), new=TRUE) + mtext("% of forecasts for bin") + dev.off() + + system(paste0("~/scripts/fig2catalog.sh -c 'Start dates: ", stringa," of ",my.month[month],"\nLead time: ", leadtime.week[lead], " days\nRegion: ", area.name,"\nReference dataset: ERA-Interim \nBias correction: none' ", my.file," ", my.file2)) + } + + gc() + } # next lead + + gc() + } # next score + + gc() + } # next month + + + ############################################################################# + # Visualize summary tables for a region # + ############################################################################# + + ## If you use a mask, load it to average only over land and check that it has the same lat and lon positions of the score maps: + if(land.only == TRUE){ + load(paste0('/shared/earth/EarthSystemServices/TOOLS/Topography_bathymetry_and_masks/Seamask.', n.lon, '.', n.lat,'.50m','.RData')) + mask <- Seamask.512.256.50m + ##Coordinates repositioning and mask conditioning + mask <- mask[c((n.lonr+1):n.lon,1:n.lonr),n.lat:1] + mask <- ifelse(mask == 1, 0, 1) ## assign 1 to land and 0 to sea to plot the mask + mask <- t(mask) + myImagePlot(mask) + myImagePlot(my.FairRpss.chunk[1,,]) + mask <- ifelse(mask == 0, NA, 1) ## assign 1 to land and NA to sea to multiply it for the score values later + } + + ## plot a box with the chosen area (do it only once): + ## + ##png(file=paste0(work.dir,'/',var.name,'/Box_',area.name,'.png'),width=1400,height=800) + ##PlotEquiMap(my.FairRpss.chunk[1,,c((n.lonr+1):n.lon,1:n.lonr)], c(lons[c((n.lonr+1):n.lon)]-360, lons[1:n.lonr]), lats, brks=c(-1000,1000), cols="lightblue" , axelab=F, filled.continents=TRUE, drawleg=F, boxlim=c(area[1],area[3],area[2],area[4]), boxcol="black", boxlwd=.7) + ##dev.off() + + + array.EnsCorr <- array.FairRpss <- array.FairCrpss <- array(NA,c(12,4)) + + ## Load one file, just to take the lat and lon values: + load(file=paste0(work.dir,'/',var.name,'_',my.month[1],'.RData')) + + if(lon.min < 360) area.lon <- c(1:(which(lons >= lon.max)[1]), (which(lons >= lon.min)[1]:length(lons))) # restrict area to chosen one + if(lon.min > 360) area.lon <- (which(lons >= lon.min-360)[1]):(which(lons >= lon.max)[1]-1) + + if(area.name != "World") area.lat <- c(which(lats <= lat.max)[1]:(which(lats <= lat.min)[1]-1)) # restrict area to chosen one + if(area.name == "World"){ area.lat <- 1:length(lats); area.lon <- 1:length(lons) } + + ## Load all data and average it over the chosen region: + for(month in 1:12){ + + load(file=paste0(work.dir,'/',var.name,'_',my.month[month],'.RData')) + + if(land.only == TRUE){ + ## remove sea values: + for(i in 1:4) my.EnsCorr.chunk[i,,] <- my.EnsCorr.chunk[i,,]*mask + for(i in 1:4) my.FairRpss.chunk[i,,] <- my.FairRpss.chunk[i,,]*mask + for(i in 1:4) my.FairCrpss.chunk[i,,] <- my.FairCrpss.chunk[i,,]*mask + } + + for(l in 0:3){ + ##load(file=paste0(work.dir,"/",fields.name,"_",my.period[p],"_leadmonth_",l,"_","ClusterNames",".RData")) # load cluster names and summarized data + array.EnsCorr[month, 1+l] <- mean(my.EnsCorr.chunk[1+l, area.lat, area.lon], na.rm=T) + array.FairRpss[month, 1+l] <- mean(my.FairRpss.chunk[1+l, area.lat, area.lon], na.rm=T) + array.FairCrpss[month, 1+l] <- mean(my.FairCrpss.chunk[1+l, area.lat, area.lon], na.rm=T) + } + } + + + if(land.only == TRUE) { mod.filename <- "_land" } else { mod.filename <- "" } + + my.file <- paste0(work.dir,'/',var.name,'/Summary_',var.name,'_', area.name, mod.filename,'_with_letras.png') + my.file2 <- paste0(work.dir,'/',var.name,'/formatted/Summary_',var.name,'_', area.name, mod.filename,'_with_letras.png') + + png(file=my.file,width=700,height=500) + + ##my.cols <- rev(c('#b2182b','#d6604d','#f4a582','#fddbc7','#d1e5f0','#92c5de','#4393c3','#2166ac')) + ##my.cols <- c('#0570b0','#bdc9e1', '#fff7ec', '#fee8c8','#fdd49e','#fdbb84','#fc8d59','#ef6548','#d7301f','#990000') + ##my.cols <- c('#0570b0','#bdc9e1','#fef0d9','#fdd49e','#fdbb84','#fc8d59','#ef6548','#d7301f','#990000') + ##my.cols <- c('#74a9cf','#bdc9e1','#fef0d9','#fdd49e','#fdbb84','#fc8d59','#e34a33','#b30000') + my.cols <- c('#bdc9e1','#fef0d9','#fdd49e','#fdbb84','#fc8d59','#e34a33','#b30000', '#7f0000') + + ##my.seq <- c(-1,-0.6,-0.4,-0.2,0,0.2,0.4,0.6,1) + ##my.seq <- c(-0.2,-0.1,0,0.1,0.2,0.3,0.4,0.5,0.6) + my.seq <- c(-0.1,0,0.1,0.2,0.3,0.4,0.5,0.6,0.7) + + ## create an array similar to array.pers but with colors instead of frequencies: + plot.new() + + ##par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + ##mtext("Subseasonal sfcWind 1994-2013", cex=1.8) + + par(mar=c(0,0,2,0), fig=c(0.07, 0.27, 0.9, 1), new=TRUE) + mtext("(a) EnsCorr", cex=1.5) + + par(mar=c(1,4,1,0), fig=c(0.02, 0.27, 0.1, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,3.2), ann=F) + mtext(side = 1, text = "Lead time (days)", line = 3.5, cex=1.2) + mtext(side = 2, text = "Start date", line = 3, cex=1.2) + axis(1, at=seq(0,3), las=3, cex.axis=1, labels=c("5-11","12-18","19-25","26-32"), mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=rev(my.month.short), cex.axis=1) + + array.colors <- array(my.cols[8],c(12,4)) + array.colors[array.EnsCorr < my.seq[8]] <- my.cols[7] + array.colors[array.EnsCorr < my.seq[7]] <- my.cols[6] + array.colors[array.EnsCorr < my.seq[6]] <- my.cols[5] + array.colors[array.EnsCorr < my.seq[5]] <- my.cols[4] + array.colors[array.EnsCorr < my.seq[4]] <- my.cols[3] + array.colors[array.EnsCorr < my.seq[3]] <- my.cols[2] + array.colors[array.EnsCorr < my.seq[2]] <- my.cols[1] + + for(p in 1:12){ for(l in 0:3){ polygon(c(0.5+l-1, 0.5+l-1, 1.5+l-1, 1.5+l-1), c(-0.5+13-p, 0.5+13-p, 0.5+13-p, -0.5+13-p), col=array.colors[p,1+l]) }} + + par(mar=c(0,0,2,0), fig=c(0.32, 0.57, 0.9, 1), new=TRUE) + mtext("(b) FairRPSS", cex=1.5) + + par(mar=c(1,4,1,0), fig=c(0.32, 0.57, 0.1, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,3.2), ann=F) + mtext(side = 1, text = "Lead time (days)", line = 3.5, cex=1.2) + mtext(side = 2, text = "Start date", line = 3, cex=1.2) + axis(1, at=seq(0,3), las=3, cex.axis=1, labels=c("5-11","12-18","19-25","26-32"), mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=rev(my.month.short), cex.axis=1) + + array.colors <- array(my.cols[8],c(12,4)) + array.colors[array.FairRpss < my.seq[8]] <- my.cols[7] + array.colors[array.FairRpss < my.seq[7]] <- my.cols[6] + array.colors[array.FairRpss < my.seq[6]] <- my.cols[5] + array.colors[array.FairRpss < my.seq[5]] <- my.cols[4] + array.colors[array.FairRpss < my.seq[4]] <- my.cols[3] + array.colors[array.FairRpss < my.seq[3]] <- my.cols[2] + array.colors[array.FairRpss < my.seq[2]] <- my.cols[1] + + for(p in 1:12){ for(l in 0:3){ polygon(c(0.5+l-1, 0.5+l-1, 1.5+l-1, 1.5+l-1), c(-0.5+13-p, 0.5+13-p, 0.5+13-p, -0.5+13-p), col=array.colors[p,1+l]) }} + + par(mar=c(0,0,2,0), fig=c(0.62, 0.87, 0.9, 1), new=TRUE) + mtext("(c) FairCRPSS", cex=1.5) + + par(mar=c(1,4,1,0), fig=c(0.62, 0.87, 0.1, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,3.2), ann=F) + mtext(side = 1, text = "Lead time (days)", line = 3.5, cex=1.2) + mtext(side = 2, text = "Start date", line = 3, cex=1.2) + axis(1, at=seq(0,3), las=3, cex.axis=1, labels=c("5-11","12-18","19-25","26-32"), mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=rev(my.month.short), cex.axis=1) + + array.colors <- array(my.cols[8],c(12,4)) + array.colors[array.FairCrpss < my.seq[8]] <- my.cols[7] + array.colors[array.FairCrpss < my.seq[7]] <- my.cols[6] + array.colors[array.FairCrpss < my.seq[6]] <- my.cols[5] + array.colors[array.FairCrpss < my.seq[5]] <- my.cols[4] + array.colors[array.FairCrpss < my.seq[4]] <- my.cols[3] + array.colors[array.FairCrpss < my.seq[3]] <- my.cols[2] + array.colors[array.FairCrpss < my.seq[2]] <- my.cols[1] + + for(p in 1:12){ for(l in 0:3){ polygon(c(0.5+l-1, 0.5+l-1, 1.5+l-1, 1.5+l-1), c(-0.5+13-p, 0.5+13-p, 0.5+13-p, -0.5+13-p), col=array.colors[p,1+l]) }} + + par(fig=c(0.89, 1, 0.1, 0.9), new=TRUE) + ##ColorBar2(brks=my.brks, cols=my.cols, vert=FALSE, cex=legend1.cex, label.dist=2, my.ticks=-0.5 + 1:length(my.brks), my.labels=my.brks) + ColorBar2(brks = my.seq, cols = my.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + gc() + + dev.off() + + ## Not checked but whould work: + ##system(paste0("~/scripts/fig2catalog.sh -r 50 -c 'Region: ", area.name, "\nReference dataset: ERA-Interim' ", my.file," ", my.file2)) + + + + + ## Monthly reliability diagrams, all over the same graph # + ## ONLY FOR SEA+LAND!!!! + + my.RelDiagr<-list() + + my.file <- paste0(work.dir,'/',var.name,'/Summary_RelDiagr_', area.name,'.png') + my.file2 <- paste0(work.dir,'/',var.name,'/formatted/','/Summary_RelDiagr_', area.name,'.png') + + ##n.lonr <- n.lon - ceiling(length(lons[lons>0 & lons < 180])) # count the number of longitude values between 180 and 360 degrees longitud + + png(file=my.file, width=1000, height=700) + plot(0, type = "n", axes = FALSE, ann = FALSE) + mtext(side = 3, text = "ECMWF-MFS / Reliability Diagram\nJanuary to December / 1994-2013", line = -1, cex=2.4, font=2) + par(mar=c(0,0,0,0) , fig=c(0, 1, 0, 0.4), new=TRUE) + legend(0.8,-0.8,0,0,legend=my.month[c(1,4,7,10)], lty=c(1,1), lwd=c(2.5,2.5), col=col.month[c(1,4,7,10)], ncol=4) + + par(mar=c(4.8,3.8,3.8,3.8) , fig=c(0, 1, 0, 0.4), new=TRUE) + + for(tercile in c(1,3)){ + print(paste0("Tercile: ",tercile)) + + my.lead <- (1:n.leadtimes) + (tercile-1) * 4 + + for(lead2 in my.lead){ + print(paste0("Lead time: ",lead2)) + + xmin <- 0.25 * ((lead2-1) %% 4) + xmax <- xmin + 0.25 + ymin <- 0.10 + 0.38 * (tercile-1)/2 + ymax <- ymin + 0.38 + + ##my.title <- paste0('Reliability Diagram of ',cfs.name,'\n ',var.name.map,' for ',my.month[month],'. Forecast time ',leadtime.week[lead],'.') + ##my.title <- paste0("ECMWF-S4 / ", var.name.map, " / Reliability Diagram \nJanuary to December / 1994-2013") + my.title <- paste0("Lead time: ", leadtime.week[1+((lead2-1) %% 4)], " days") + + par(fig=c(xmin, xmax, ymin, ymax), new=TRUE) + mod.subtitle <- ifelse(tercile == 1, "lower","upper") + plot(c(0,1),c(0,1),type="l",xlab=paste0("Forecast frequency ", mod.subtitle," tercile"),ylab="Observed frequency", col='gray30', main=my.title, cex.main=1.4) + + no_res <- 1/3 #sum(my.RelDiagr[[lead]]$obs.counts)/sum(my.RelDiagr[[lead]]$hist.counts) + numb <- c(seq(0,1,by=0.1)) + no_skill <- (numb+no_res)/2 + lines(c(0,1), c(no_res,no_res), col="gray", lty=3) + lines(c(1/3,1/3), c(0,1), col="gray", lty=3) + lines(c(0,1), c(no_skill[1],no_skill[11]), col="black", lty=3) + + for(month in c(1,4,7,10)){ #my.months){ + ##month=1 # for the debug + ##print(paste0("Month=",month)) + + ## Load data: + load(file=paste0(work.dir,'/',var.name,'_',my.month[month],'.RData')) + + cat(paste0('Computing the Reliability Diagram for month ', month,'. Please wait... \n')) + + lead <- 1 + (lead2-1) %% 4 + + if(tercile == 1) my.RelDiagr[[lead]] <- ReliabilityDiagram(ens.chunk.prob[[lead]][int1,area.lat,area.lon], obs.chunk.prob[[lead]][int1,area.lat,area.lon], bins=n.intervals, nboot=0, plot=FALSE, plot.refin=F) #tercile 1 + + ##my.RelDiagr[[n.leadtimes+lead]] <- ReliabilityDiagram(ens.chunk.prob[[lead]][int2,area.lat,area.lon], obs.chunk.prob[[lead]][int2,area.lat,area.lon], bins=5, nboot=0, plot=FALSE, plot.refin=F) + + if(tercile == 3) my.RelDiagr[[2*n.leadtimes+lead]] <- ReliabilityDiagram(ens.chunk.prob[[lead]][int3,area.lat,area.lon], obs.chunk.prob[[lead]][int3,area.lat,area.lon], bins=n.intervals, nboot=0, plot=FALSE, plot.refin=F) + + + if(tercile == 1) points(my.RelDiagr[[lead]]$p.avgs[!is.na(my.RelDiagr[[lead]]$p.avgs)],my.RelDiagr[[lead]]$cond.prob[!is.na(my.RelDiagr[[lead]]$cond.prob)],type="o", pch=16, col=col.month[month], cex=.8, lwd=2) + + if(tercile == 3) points(my.RelDiagr[[2*n.leadtimes+lead]]$p.avgs[!is.na(my.RelDiagr[[2*n.leadtimes+lead]]$p.avgs)], my.RelDiagr[[2*n.leadtimes+lead]]$cond.prob[!is.na(my.RelDiagr[[2*n.leadtimes+lead]]$cond.prob)],type="o", pch=16, col=col.month[month], cex=.8, lwd=2) + + rm(obs.chunk.prob, ens.chunk.prob) + gc() + } # next month + + } # next lead + } # next tercile + + + dev.off() + + + +} # close for on area.num + diff --git a/old/backup/bash/.Rhistory b/old/backup/bash/.Rhistory new file mode 100644 index 0000000000000000000000000000000000000000..d4718b6fbc6906220cf4052b6f681f3c5cb80d67 --- /dev/null +++ b/old/backup/bash/.Rhistory @@ -0,0 +1,2 @@ +q() +n diff --git a/old/backup/bash/.directory b/old/backup/bash/.directory new file mode 100644 index 0000000000000000000000000000000000000000..4063b83af2e55ea734ea370010265002115961df --- /dev/null +++ b/old/backup/bash/.directory @@ -0,0 +1,4 @@ +[Dolphin] +Timestamp=2016,6,2,11,24,59 +Version=3 +ViewMode=2 diff --git a/old/backup/bash/check_ECMWFS4_psl_6hourly.txt b/old/backup/bash/check_ECMWFS4_psl_6hourly.txt new file mode 100644 index 0000000000000000000000000000000000000000..d7d45ceb70ebf97d92cdc760f06ef342da150606 --- /dev/null +++ b/old/backup/bash/check_ECMWFS4_psl_6hourly.txt @@ -0,0 +1,435 @@ +Checked variable: psl +Path: /esnas/exp/ecmwf/system4_m1/daily_mean/sfcWind_f6h/ +Extension: .nc +Checked period: 1981-2015 +>>>>>>>>>>>>>>>>>>>>>>>> Expected elements <<<<<<<<<<<<<<<<<<<<<<<< +Latitude denomination: latitude +Number of latitude values: 181 +Longitude denomination: longitude +Number of longitude values: 360 +Ensemble denomination: ensemble +Number of ensemble values: 15 +Alternative number of ensemble values: 51 +Lead-time denonomination: time +Number of lead-times: 216 + >>>>>>>>>>>>>>>>>>>>>>>> Checking <<<<<<<<<<<<<<<<<<<<<<<< +psl_19810101.nc : >>>>> Missing file <<<<< +psl_19810201.nc : >>>>> Missing file <<<<< +psl_19810301.nc : >>>>> Missing file <<<<< +psl_19810401.nc : >>>>> Missing file <<<<< +psl_19810501.nc : >>>>> Missing file <<<<< +psl_19810601.nc : >>>>> Missing file <<<<< +psl_19810701.nc : >>>>> Missing file <<<<< +psl_19810801.nc : >>>>> Missing file <<<<< +psl_19810901.nc : >>>>> Missing file <<<<< +psl_19811001.nc : >>>>> Missing file <<<<< +psl_19811101.nc : >>>>> Missing file <<<<< +psl_19811201.nc : >>>>> Missing file <<<<< +psl_19820101.nc : >>>>> Missing file <<<<< +psl_19820201.nc : >>>>> Missing file <<<<< +psl_19820301.nc : >>>>> Missing file <<<<< +psl_19820401.nc : >>>>> Missing file <<<<< +psl_19820501.nc : >>>>> Missing file <<<<< +psl_19820601.nc : >>>>> Missing file <<<<< +psl_19820701.nc : >>>>> Missing file <<<<< +psl_19820801.nc : >>>>> Missing file <<<<< +psl_19820901.nc : >>>>> Missing file <<<<< +psl_19821001.nc : >>>>> Missing file <<<<< +psl_19821101.nc : >>>>> Missing file <<<<< +psl_19821201.nc : >>>>> Missing file <<<<< +psl_19830101.nc : >>>>> Missing file <<<<< +psl_19830201.nc : >>>>> Missing file <<<<< +psl_19830301.nc : >>>>> Missing file <<<<< +psl_19830401.nc : >>>>> Missing file <<<<< +psl_19830501.nc : >>>>> Missing file <<<<< +psl_19830601.nc : >>>>> Missing file <<<<< +psl_19830701.nc : >>>>> Missing file <<<<< +psl_19830801.nc : >>>>> Missing file <<<<< +psl_19830901.nc : >>>>> Missing file <<<<< +psl_19831001.nc : >>>>> Missing file <<<<< +psl_19831101.nc : >>>>> Missing file <<<<< +psl_19831201.nc : >>>>> Missing file <<<<< +psl_19840101.nc : >>>>> Missing file <<<<< +psl_19840201.nc : >>>>> Missing file <<<<< +psl_19840301.nc : >>>>> Missing file <<<<< +psl_19840401.nc : >>>>> Missing file <<<<< +psl_19840501.nc : >>>>> Missing file <<<<< +psl_19840601.nc : >>>>> Missing file <<<<< +psl_19840701.nc : >>>>> Missing file <<<<< +psl_19840801.nc : >>>>> Missing file <<<<< +psl_19840901.nc : >>>>> Missing file <<<<< +psl_19841001.nc : >>>>> Missing file <<<<< +psl_19841101.nc : >>>>> Missing file <<<<< +psl_19841201.nc : >>>>> Missing file <<<<< +psl_19850101.nc : >>>>> Missing file <<<<< +psl_19850201.nc : >>>>> Missing file <<<<< +psl_19850301.nc : >>>>> Missing file <<<<< +psl_19850401.nc : >>>>> Missing file <<<<< +psl_19850501.nc : >>>>> Missing file <<<<< +psl_19850601.nc : >>>>> Missing file <<<<< +psl_19850701.nc : >>>>> Missing file <<<<< +psl_19850801.nc : >>>>> Missing file <<<<< +psl_19850901.nc : >>>>> Missing file <<<<< +psl_19851001.nc : >>>>> Missing file <<<<< +psl_19851101.nc : >>>>> Missing file <<<<< +psl_19851201.nc : >>>>> Missing file <<<<< +psl_19860101.nc : >>>>> Missing file <<<<< +psl_19860201.nc : >>>>> Missing file <<<<< +psl_19860301.nc : >>>>> Missing file <<<<< +psl_19860401.nc : >>>>> Missing file <<<<< +psl_19860501.nc : >>>>> Missing file <<<<< +psl_19860601.nc : >>>>> Missing file <<<<< +psl_19860701.nc : >>>>> Missing file <<<<< +psl_19860801.nc : >>>>> Missing file <<<<< +psl_19860901.nc : >>>>> Missing file <<<<< +psl_19861001.nc : >>>>> Missing file <<<<< +psl_19861101.nc : >>>>> Missing file <<<<< +psl_19861201.nc : >>>>> Missing file <<<<< +psl_19870101.nc : >>>>> Missing file <<<<< +psl_19870201.nc : >>>>> Missing file <<<<< +psl_19870301.nc : >>>>> Missing file <<<<< +psl_19870401.nc : >>>>> Missing file <<<<< +psl_19870501.nc : >>>>> Missing file <<<<< +psl_19870601.nc : >>>>> Missing file <<<<< +psl_19870701.nc : >>>>> Missing file <<<<< +psl_19870801.nc : >>>>> Missing file <<<<< +psl_19870901.nc : >>>>> Missing file <<<<< +psl_19871001.nc : >>>>> Missing file <<<<< +psl_19871101.nc : >>>>> Missing file <<<<< +psl_19871201.nc : >>>>> Missing file <<<<< +psl_19880101.nc : >>>>> Missing file <<<<< +psl_19880201.nc : >>>>> Missing file <<<<< +psl_19880301.nc : >>>>> Missing file <<<<< +psl_19880401.nc : >>>>> Missing file <<<<< +psl_19880501.nc : >>>>> Missing file <<<<< +psl_19880601.nc : >>>>> Missing file <<<<< +psl_19880701.nc : >>>>> Missing file <<<<< +psl_19880801.nc : >>>>> Missing file <<<<< +psl_19880901.nc : >>>>> Missing file <<<<< +psl_19881001.nc : >>>>> Missing file <<<<< +psl_19881101.nc : >>>>> Missing file <<<<< +psl_19881201.nc : >>>>> Missing file <<<<< +psl_19890101.nc : >>>>> Missing file <<<<< +psl_19890201.nc : >>>>> Missing file <<<<< +psl_19890301.nc : >>>>> Missing file <<<<< +psl_19890401.nc : >>>>> Missing file <<<<< +psl_19890501.nc : >>>>> Missing file <<<<< +psl_19890601.nc : >>>>> Missing file <<<<< +psl_19890701.nc : >>>>> Missing file <<<<< +psl_19890801.nc : >>>>> Missing file <<<<< +psl_19890901.nc : >>>>> Missing file <<<<< +psl_19891001.nc : >>>>> Missing file <<<<< +psl_19891101.nc : >>>>> Missing file <<<<< +psl_19891201.nc : >>>>> Missing file <<<<< +psl_19900101.nc : >>>>> Missing file <<<<< +psl_19900201.nc : >>>>> Missing file <<<<< +psl_19900301.nc : >>>>> Missing file <<<<< +psl_19900401.nc : >>>>> Missing file <<<<< +psl_19900501.nc : >>>>> Missing file <<<<< +psl_19900601.nc : >>>>> Missing file <<<<< +psl_19900701.nc : >>>>> Missing file <<<<< +psl_19900801.nc : >>>>> Missing file <<<<< +psl_19900901.nc : >>>>> Missing file <<<<< +psl_19901001.nc : >>>>> Missing file <<<<< +psl_19901101.nc : >>>>> Missing file <<<<< +psl_19901201.nc : >>>>> Missing file <<<<< +psl_19910101.nc : >>>>> Missing file <<<<< +psl_19910201.nc : >>>>> Missing file <<<<< +psl_19910301.nc : >>>>> Missing file <<<<< +psl_19910401.nc : >>>>> Missing file <<<<< +psl_19910501.nc : >>>>> Missing file <<<<< +psl_19910601.nc : >>>>> Missing file <<<<< +psl_19910701.nc : >>>>> Missing file <<<<< +psl_19910801.nc : >>>>> Missing file <<<<< +psl_19910901.nc : >>>>> Missing file <<<<< +psl_19911001.nc : >>>>> Missing file <<<<< +psl_19911101.nc : >>>>> Missing file <<<<< +psl_19911201.nc : >>>>> Missing file <<<<< +psl_19920101.nc : >>>>> Missing file <<<<< +psl_19920201.nc : >>>>> Missing file <<<<< +psl_19920301.nc : >>>>> Missing file <<<<< +psl_19920401.nc : >>>>> Missing file <<<<< +psl_19920501.nc : >>>>> Missing file <<<<< +psl_19920601.nc : >>>>> Missing file <<<<< +psl_19920701.nc : >>>>> Missing file <<<<< +psl_19920801.nc : >>>>> Missing file <<<<< +psl_19920901.nc : >>>>> Missing file <<<<< +psl_19921001.nc : >>>>> Missing file <<<<< +psl_19921101.nc : >>>>> Missing file <<<<< +psl_19921201.nc : >>>>> Missing file <<<<< +psl_19930101.nc : >>>>> Missing file <<<<< +psl_19930201.nc : >>>>> Missing file <<<<< +psl_19930301.nc : >>>>> Missing file <<<<< +psl_19930401.nc : >>>>> Missing file <<<<< +psl_19930501.nc : >>>>> Missing file <<<<< +psl_19930601.nc : >>>>> Missing file <<<<< +psl_19930701.nc : >>>>> Missing file <<<<< +psl_19930801.nc : >>>>> Missing file <<<<< +psl_19930901.nc : >>>>> Missing file <<<<< +psl_19931001.nc : >>>>> Missing file <<<<< +psl_19931101.nc : >>>>> Missing file <<<<< +psl_19931201.nc : >>>>> Missing file <<<<< +psl_19940101.nc : >>>>> Missing file <<<<< +psl_19940201.nc : >>>>> Missing file <<<<< +psl_19940301.nc : >>>>> Missing file <<<<< +psl_19940401.nc : >>>>> Missing file <<<<< +psl_19940501.nc : >>>>> Missing file <<<<< +psl_19940601.nc : >>>>> Missing file <<<<< +psl_19940701.nc : >>>>> Missing file <<<<< +psl_19940801.nc : >>>>> Missing file <<<<< +psl_19940901.nc : >>>>> Missing file <<<<< +psl_19941001.nc : >>>>> Missing file <<<<< +psl_19941101.nc : >>>>> Missing file <<<<< +psl_19941201.nc : >>>>> Missing file <<<<< +psl_19950101.nc : >>>>> Missing file <<<<< +psl_19950201.nc : >>>>> Missing file <<<<< +psl_19950301.nc : >>>>> Missing file <<<<< +psl_19950401.nc : >>>>> Missing file <<<<< +psl_19950501.nc : >>>>> Missing file <<<<< +psl_19950601.nc : >>>>> Missing file <<<<< +psl_19950701.nc : >>>>> Missing file <<<<< +psl_19950801.nc : >>>>> Missing file <<<<< +psl_19950901.nc : >>>>> Missing file <<<<< +psl_19951001.nc : >>>>> Missing file <<<<< +psl_19951101.nc : >>>>> Missing file <<<<< +psl_19951201.nc : >>>>> Missing file <<<<< +psl_19960101.nc : >>>>> Missing file <<<<< +psl_19960201.nc : >>>>> Missing file <<<<< +psl_19960301.nc : >>>>> Missing file <<<<< +psl_19960401.nc : >>>>> Missing file <<<<< +psl_19960501.nc : >>>>> Missing file <<<<< +psl_19960601.nc : >>>>> Missing file <<<<< +psl_19960701.nc : >>>>> Missing file <<<<< +psl_19960801.nc : >>>>> Missing file <<<<< +psl_19960901.nc : >>>>> Missing file <<<<< +psl_19961001.nc : >>>>> Missing file <<<<< +psl_19961101.nc : >>>>> Missing file <<<<< +psl_19961201.nc : >>>>> Missing file <<<<< +psl_19970101.nc : >>>>> Missing file <<<<< +psl_19970201.nc : >>>>> Missing file <<<<< +psl_19970301.nc : >>>>> Missing file <<<<< +psl_19970401.nc : >>>>> Missing file <<<<< +psl_19970501.nc : >>>>> Missing file <<<<< +psl_19970601.nc : >>>>> Missing file <<<<< +psl_19970701.nc : >>>>> Missing file <<<<< +psl_19970801.nc : >>>>> Missing file <<<<< +psl_19970901.nc : >>>>> Missing file <<<<< +psl_19971001.nc : >>>>> Missing file <<<<< +psl_19971101.nc : >>>>> Missing file <<<<< +psl_19971201.nc : >>>>> Missing file <<<<< +psl_19980101.nc : >>>>> Missing file <<<<< +psl_19980201.nc : >>>>> Missing file <<<<< +psl_19980301.nc : >>>>> Missing file <<<<< +psl_19980401.nc : >>>>> Missing file <<<<< +psl_19980501.nc : >>>>> Missing file <<<<< +psl_19980601.nc : >>>>> Missing file <<<<< +psl_19980701.nc : >>>>> Missing file <<<<< +psl_19980801.nc : >>>>> Missing file <<<<< +psl_19980901.nc : >>>>> Missing file <<<<< +psl_19981001.nc : >>>>> Missing file <<<<< +psl_19981101.nc : >>>>> Missing file <<<<< +psl_19981201.nc : >>>>> Missing file <<<<< +psl_19990101.nc : >>>>> Missing file <<<<< +psl_19990201.nc : >>>>> Missing file <<<<< +psl_19990301.nc : >>>>> Missing file <<<<< +psl_19990401.nc : >>>>> Missing file <<<<< +psl_19990501.nc : >>>>> Missing file <<<<< +psl_19990601.nc : >>>>> Missing file <<<<< +psl_19990701.nc : >>>>> Missing file <<<<< +psl_19990801.nc : >>>>> Missing file <<<<< +psl_19990901.nc : >>>>> Missing file <<<<< +psl_19991001.nc : >>>>> Missing file <<<<< +psl_19991101.nc : >>>>> Missing file <<<<< +psl_19991201.nc : >>>>> Missing file <<<<< +psl_20000101.nc : >>>>> Missing file <<<<< +psl_20000201.nc : >>>>> Missing file <<<<< +psl_20000301.nc : >>>>> Missing file <<<<< +psl_20000401.nc : >>>>> Missing file <<<<< +psl_20000501.nc : >>>>> Missing file <<<<< +psl_20000601.nc : >>>>> Missing file <<<<< +psl_20000701.nc : >>>>> Missing file <<<<< +psl_20000801.nc : >>>>> Missing file <<<<< +psl_20000901.nc : >>>>> Missing file <<<<< +psl_20001001.nc : >>>>> Missing file <<<<< +psl_20001101.nc : >>>>> Missing file <<<<< +psl_20001201.nc : >>>>> Missing file <<<<< +psl_20010101.nc : >>>>> Missing file <<<<< +psl_20010201.nc : >>>>> Missing file <<<<< +psl_20010301.nc : >>>>> Missing file <<<<< +psl_20010401.nc : >>>>> Missing file <<<<< +psl_20010501.nc : >>>>> Missing file <<<<< +psl_20010601.nc : >>>>> Missing file <<<<< +psl_20010701.nc : >>>>> Missing file <<<<< +psl_20010801.nc : >>>>> Missing file <<<<< +psl_20010901.nc : >>>>> Missing file <<<<< +psl_20011001.nc : >>>>> Missing file <<<<< +psl_20011101.nc : >>>>> Missing file <<<<< +psl_20011201.nc : >>>>> Missing file <<<<< +psl_20020101.nc : >>>>> Missing file <<<<< +psl_20020201.nc : >>>>> Missing file <<<<< +psl_20020301.nc : >>>>> Missing file <<<<< +psl_20020401.nc : >>>>> Missing file <<<<< +psl_20020501.nc : >>>>> Missing file <<<<< +psl_20020601.nc : >>>>> Missing file <<<<< +psl_20020701.nc : >>>>> Missing file <<<<< +psl_20020801.nc : >>>>> Missing file <<<<< +psl_20020901.nc : >>>>> Missing file <<<<< +psl_20021001.nc : >>>>> Missing file <<<<< +psl_20021101.nc : >>>>> Missing file <<<<< +psl_20021201.nc : >>>>> Missing file <<<<< +psl_20030101.nc : >>>>> Missing file <<<<< +psl_20030201.nc : >>>>> Missing file <<<<< +psl_20030301.nc : >>>>> Missing file <<<<< +psl_20030401.nc : >>>>> Missing file <<<<< +psl_20030501.nc : >>>>> Missing file <<<<< +psl_20030601.nc : >>>>> Missing file <<<<< +psl_20030701.nc : >>>>> Missing file <<<<< +psl_20030801.nc : >>>>> Missing file <<<<< +psl_20030901.nc : >>>>> Missing file <<<<< +psl_20031001.nc : >>>>> Missing file <<<<< +psl_20031101.nc : >>>>> Missing file <<<<< +psl_20031201.nc : >>>>> Missing file <<<<< +psl_20040101.nc : >>>>> Missing file <<<<< +psl_20040201.nc : >>>>> Missing file <<<<< +psl_20040301.nc : >>>>> Missing file <<<<< +psl_20040401.nc : >>>>> Missing file <<<<< +psl_20040501.nc : >>>>> Missing file <<<<< +psl_20040601.nc : >>>>> Missing file <<<<< +psl_20040701.nc : >>>>> Missing file <<<<< +psl_20040801.nc : >>>>> Missing file <<<<< +psl_20040901.nc : >>>>> Missing file <<<<< +psl_20041001.nc : >>>>> Missing file <<<<< +psl_20041101.nc : >>>>> Missing file <<<<< +psl_20041201.nc : >>>>> Missing file <<<<< +psl_20050101.nc : >>>>> Missing file <<<<< +psl_20050201.nc : >>>>> Missing file <<<<< +psl_20050301.nc : >>>>> Missing file <<<<< +psl_20050401.nc : >>>>> Missing file <<<<< +psl_20050501.nc : >>>>> Missing file <<<<< +psl_20050601.nc : >>>>> Missing file <<<<< +psl_20050701.nc : >>>>> Missing file <<<<< +psl_20050801.nc : >>>>> Missing file <<<<< +psl_20050901.nc : >>>>> Missing file <<<<< +psl_20051001.nc : >>>>> Missing file <<<<< +psl_20051101.nc : >>>>> Missing file <<<<< +psl_20051201.nc : >>>>> Missing file <<<<< +psl_20060101.nc : >>>>> Missing file <<<<< +psl_20060201.nc : >>>>> Missing file <<<<< +psl_20060301.nc : >>>>> Missing file <<<<< +psl_20060401.nc : >>>>> Missing file <<<<< +psl_20060501.nc : >>>>> Missing file <<<<< +psl_20060601.nc : >>>>> Missing file <<<<< +psl_20060701.nc : >>>>> Missing file <<<<< +psl_20060801.nc : >>>>> Missing file <<<<< +psl_20060901.nc : >>>>> Missing file <<<<< +psl_20061001.nc : >>>>> Missing file <<<<< +psl_20061101.nc : >>>>> Missing file <<<<< +psl_20061201.nc : >>>>> Missing file <<<<< +psl_20070101.nc : >>>>> Missing file <<<<< +psl_20070201.nc : >>>>> Missing file <<<<< +psl_20070301.nc : >>>>> Missing file <<<<< +psl_20070401.nc : >>>>> Missing file <<<<< +psl_20070501.nc : >>>>> Missing file <<<<< +psl_20070601.nc : >>>>> Missing file <<<<< +psl_20070701.nc : >>>>> Missing file <<<<< +psl_20070801.nc : >>>>> Missing file <<<<< +psl_20070901.nc : >>>>> Missing file <<<<< +psl_20071001.nc : >>>>> Missing file <<<<< +psl_20071101.nc : >>>>> Missing file <<<<< +psl_20071201.nc : >>>>> Missing file <<<<< +psl_20080101.nc : >>>>> Missing file <<<<< +psl_20080201.nc : >>>>> Missing file <<<<< +psl_20080301.nc : >>>>> Missing file <<<<< +psl_20080401.nc : >>>>> Missing file <<<<< +psl_20080501.nc : >>>>> Missing file <<<<< +psl_20080601.nc : >>>>> Missing file <<<<< +psl_20080701.nc : >>>>> Missing file <<<<< +psl_20080801.nc : >>>>> Missing file <<<<< +psl_20080901.nc : >>>>> Missing file <<<<< +psl_20081001.nc : >>>>> Missing file <<<<< +psl_20081101.nc : >>>>> Missing file <<<<< +psl_20081201.nc : >>>>> Missing file <<<<< +psl_20090101.nc : >>>>> Missing file <<<<< +psl_20090201.nc : >>>>> Missing file <<<<< +psl_20090301.nc : >>>>> Missing file <<<<< +psl_20090401.nc : >>>>> Missing file <<<<< +psl_20090501.nc : >>>>> Missing file <<<<< +psl_20090601.nc : >>>>> Missing file <<<<< +psl_20090701.nc : >>>>> Missing file <<<<< +psl_20090801.nc : >>>>> Missing file <<<<< +psl_20090901.nc : >>>>> Missing file <<<<< +psl_20091001.nc : >>>>> Missing file <<<<< +psl_20091101.nc : >>>>> Missing file <<<<< +psl_20091201.nc : >>>>> Missing file <<<<< +psl_20100101.nc : >>>>> Missing file <<<<< +psl_20100201.nc : >>>>> Missing file <<<<< +psl_20100301.nc : >>>>> Missing file <<<<< +psl_20100401.nc : >>>>> Missing file <<<<< +psl_20100501.nc : >>>>> Missing file <<<<< +psl_20100601.nc : >>>>> Missing file <<<<< +psl_20100701.nc : >>>>> Missing file <<<<< +psl_20100801.nc : >>>>> Missing file <<<<< +psl_20100901.nc : >>>>> Missing file <<<<< +psl_20101001.nc : >>>>> Missing file <<<<< +psl_20101101.nc : >>>>> Missing file <<<<< +psl_20101201.nc : >>>>> Missing file <<<<< +psl_20110101.nc : >>>>> Missing file <<<<< +psl_20110201.nc : >>>>> Missing file <<<<< +psl_20110301.nc : >>>>> Missing file <<<<< +psl_20110401.nc : >>>>> Missing file <<<<< +psl_20110501.nc : >>>>> Missing file <<<<< +psl_20110601.nc : >>>>> Missing file <<<<< +psl_20110701.nc : >>>>> Missing file <<<<< +psl_20110801.nc : >>>>> Missing file <<<<< +psl_20110901.nc : >>>>> Missing file <<<<< +psl_20111001.nc : >>>>> Missing file <<<<< +psl_20111101.nc : >>>>> Missing file <<<<< +psl_20111201.nc : >>>>> Missing file <<<<< +psl_20120101.nc : >>>>> Missing file <<<<< +psl_20120201.nc : >>>>> Missing file <<<<< +psl_20120301.nc : >>>>> Missing file <<<<< +psl_20120401.nc : >>>>> Missing file <<<<< +psl_20120501.nc : >>>>> Missing file <<<<< +psl_20120601.nc : >>>>> Missing file <<<<< +psl_20120701.nc : >>>>> Missing file <<<<< +psl_20120801.nc : >>>>> Missing file <<<<< +psl_20120901.nc : >>>>> Missing file <<<<< +psl_20121001.nc : >>>>> Missing file <<<<< +psl_20121101.nc : >>>>> Missing file <<<<< +psl_20121201.nc : >>>>> Missing file <<<<< +psl_20130101.nc : >>>>> Missing file <<<<< +psl_20130201.nc : >>>>> Missing file <<<<< +psl_20130301.nc : >>>>> Missing file <<<<< +psl_20130401.nc : >>>>> Missing file <<<<< +psl_20130501.nc : >>>>> Missing file <<<<< +psl_20130601.nc : >>>>> Missing file <<<<< +psl_20130701.nc : >>>>> Missing file <<<<< +psl_20130801.nc : >>>>> Missing file <<<<< +psl_20130901.nc : >>>>> Missing file <<<<< +psl_20131001.nc : >>>>> Missing file <<<<< +psl_20131101.nc : >>>>> Missing file <<<<< +psl_20131201.nc : >>>>> Missing file <<<<< +psl_20140101.nc : >>>>> Missing file <<<<< +psl_20140201.nc : >>>>> Missing file <<<<< +psl_20140301.nc : >>>>> Missing file <<<<< +psl_20140401.nc : >>>>> Missing file <<<<< +psl_20140501.nc : >>>>> Missing file <<<<< +psl_20140601.nc : >>>>> Missing file <<<<< +psl_20140701.nc : >>>>> Missing file <<<<< +psl_20140801.nc : >>>>> Missing file <<<<< +psl_20140901.nc : >>>>> Missing file <<<<< +psl_20141001.nc : >>>>> Missing file <<<<< +psl_20141101.nc : >>>>> Missing file <<<<< +psl_20141201.nc : >>>>> Missing file <<<<< +psl_20150101.nc : >>>>> Missing file <<<<< +psl_20150201.nc : >>>>> Missing file <<<<< +psl_20150301.nc : >>>>> Missing file <<<<< +psl_20150401.nc : >>>>> Missing file <<<<< +psl_20150501.nc : >>>>> Missing file <<<<< +psl_20150601.nc : >>>>> Missing file <<<<< +psl_20150701.nc : >>>>> Missing file <<<<< +psl_20150801.nc : >>>>> Missing file <<<<< +psl_20150901.nc : >>>>> Missing file <<<<< +psl_20151001.nc : >>>>> Missing file <<<<< +psl_20151101.nc : >>>>> Missing file <<<<< +psl_20151201.nc : >>>>> Missing file <<<<< diff --git a/old/backup/bash/check_ECMWFS4_sfcWind_6hourly.txt b/old/backup/bash/check_ECMWFS4_sfcWind_6hourly.txt new file mode 100644 index 0000000000000000000000000000000000000000..213ee2a6dbc81158f145746abb1e8ce2bb6bec7c --- /dev/null +++ b/old/backup/bash/check_ECMWFS4_sfcWind_6hourly.txt @@ -0,0 +1,783 @@ +Checked variable: sfcWind +Path: /esnas/exp/ecmwf/system4_m1/daily_mean/sfcWind_f6h/ +Extension: .nc +Checked period: 1981-2015 +>>>>>>>>>>>>>>>>>>>>>>>> Expected elements <<<<<<<<<<<<<<<<<<<<<<<< +Latitude denomination: latitude +Number of latitude values: 181 +Longitude denomination: longitude +Number of longitude values: 360 +Ensemble denomination: ensemble +Number of ensemble values: 15 +Alternative number of ensemble values: 51 +Lead-time denonomination: time +Number of lead-times: 216 + >>>>>>>>>>>>>>>>>>>>>>>> Checking <<<<<<<<<<<<<<<<<<<<<<<< +sfcWind_19810101.nc : there is no dimension called ensemble inside this file +sfcWind_19810201.nc : there is no dimension called latitude inside this file +sfcWind_19810201.nc : there is no dimension called longitude inside this file +sfcWind_19810201.nc : there is no dimension called ensemble inside this file +sfcWind_19810201.nc : time dimension has 10812 values instead of 216 +sfcWind_19810301.nc : there is no dimension called ensemble inside this file +sfcWind_19810401.nc : there is no dimension called ensemble inside this file +sfcWind_19810501.nc : latitude dimension has 256 values instead of 181 +sfcWind_19810501.nc : longitude dimension has 512 values instead of 360 +sfcWind_19810501.nc : there is no dimension called ensemble inside this file +sfcWind_19810501.nc : there is no dimension called time inside this file +sfcWind_19810601.nc : there is no dimension called ensemble inside this file +sfcWind_19810701.nc : there is no dimension called ensemble inside this file +sfcWind_19810801.nc : latitude dimension has 256 values instead of 181 +sfcWind_19810801.nc : longitude dimension has 512 values instead of 360 +sfcWind_19810801.nc : there is no dimension called ensemble inside this file +sfcWind_19810801.nc : time dimension has 212 values instead of 216 +sfcWind_19810901.nc : there is no dimension called ensemble inside this file +sfcWind_19811001.nc : there is no dimension called ensemble inside this file +sfcWind_19811101.nc : latitude dimension has 256 values instead of 181 +sfcWind_19811101.nc : longitude dimension has 512 values instead of 360 +sfcWind_19811101.nc : there is no dimension called ensemble inside this file +sfcWind_19811101.nc : there is no dimension called time inside this file +sfcWind_19811201.nc : there is no dimension called ensemble inside this file +sfcWind_19820101.nc : there is no dimension called ensemble inside this file +sfcWind_19820201.nc : there is no dimension called latitude inside this file +sfcWind_19820201.nc : there is no dimension called longitude inside this file +sfcWind_19820201.nc : there is no dimension called ensemble inside this file +sfcWind_19820201.nc : time dimension has 10812 values instead of 216 +sfcWind_19820301.nc : there is no dimension called ensemble inside this file +sfcWind_19820401.nc : there is no dimension called ensemble inside this file +sfcWind_19820501.nc : latitude dimension has 256 values instead of 181 +sfcWind_19820501.nc : longitude dimension has 512 values instead of 360 +sfcWind_19820501.nc : there is no dimension called ensemble inside this file +sfcWind_19820501.nc : there is no dimension called time inside this file +sfcWind_19820601.nc : there is no dimension called ensemble inside this file +sfcWind_19820701.nc : there is no dimension called ensemble inside this file +sfcWind_19820701.nc : time dimension has 31 values instead of 216 +sfcWind_19820801.nc : latitude dimension has 256 values instead of 181 +sfcWind_19820801.nc : longitude dimension has 512 values instead of 360 +sfcWind_19820801.nc : there is no dimension called ensemble inside this file +sfcWind_19820801.nc : time dimension has 212 values instead of 216 +sfcWind_19820901.nc : there is no dimension called ensemble inside this file +sfcWind_19821001.nc : there is no dimension called ensemble inside this file +sfcWind_19821101.nc : latitude dimension has 256 values instead of 181 +sfcWind_19821101.nc : longitude dimension has 512 values instead of 360 +sfcWind_19821101.nc : there is no dimension called ensemble inside this file +sfcWind_19821101.nc : there is no dimension called time inside this file +sfcWind_19821201.nc : there is no dimension called ensemble inside this file +sfcWind_19830101.nc : there is no dimension called ensemble inside this file +sfcWind_19830201.nc : there is no dimension called latitude inside this file +sfcWind_19830201.nc : there is no dimension called longitude inside this file +sfcWind_19830201.nc : there is no dimension called ensemble inside this file +sfcWind_19830201.nc : time dimension has 10812 values instead of 216 +sfcWind_19830301.nc : there is no dimension called ensemble inside this file +sfcWind_19830401.nc : there is no dimension called ensemble inside this file +sfcWind_19830501.nc : latitude dimension has 256 values instead of 181 +sfcWind_19830501.nc : longitude dimension has 512 values instead of 360 +sfcWind_19830501.nc : there is no dimension called ensemble inside this file +sfcWind_19830501.nc : there is no dimension called time inside this file +sfcWind_19830601.nc : there is no dimension called ensemble inside this file +sfcWind_19830701.nc : there is no dimension called ensemble inside this file +sfcWind_19830801.nc : latitude dimension has 256 values instead of 181 +sfcWind_19830801.nc : longitude dimension has 512 values instead of 360 +sfcWind_19830801.nc : there is no dimension called ensemble inside this file +sfcWind_19830801.nc : time dimension has 213 values instead of 216 +sfcWind_19830901.nc : there is no dimension called ensemble inside this file +sfcWind_19831001.nc : there is no dimension called ensemble inside this file +sfcWind_19831101.nc : latitude dimension has 256 values instead of 181 +sfcWind_19831101.nc : longitude dimension has 512 values instead of 360 +sfcWind_19831101.nc : there is no dimension called ensemble inside this file +sfcWind_19831101.nc : there is no dimension called time inside this file +sfcWind_19831201.nc : there is no dimension called ensemble inside this file +sfcWind_19840101.nc : there is no dimension called ensemble inside this file +sfcWind_19840101.nc : time dimension has 9 values instead of 216 +sfcWind_19840201.nc : there is no dimension called latitude inside this file +sfcWind_19840201.nc : there is no dimension called longitude inside this file +sfcWind_19840201.nc : there is no dimension called ensemble inside this file +sfcWind_19840201.nc : time dimension has 10863 values instead of 216 +sfcWind_19840301.nc : there is no dimension called ensemble inside this file +sfcWind_19840401.nc : there is no dimension called ensemble inside this file +sfcWind_19840501.nc : latitude dimension has 256 values instead of 181 +sfcWind_19840501.nc : longitude dimension has 512 values instead of 360 +sfcWind_19840501.nc : there is no dimension called ensemble inside this file +sfcWind_19840501.nc : there is no dimension called time inside this file +sfcWind_19840601.nc : there is no dimension called ensemble inside this file +sfcWind_19840701.nc : there is no dimension called ensemble inside this file +sfcWind_19840801.nc : latitude dimension has 256 values instead of 181 +sfcWind_19840801.nc : longitude dimension has 512 values instead of 360 +sfcWind_19840801.nc : there is no dimension called ensemble inside this file +sfcWind_19840801.nc : time dimension has 212 values instead of 216 +sfcWind_19840901.nc : there is no dimension called ensemble inside this file +sfcWind_19841001.nc : there is no dimension called ensemble inside this file +sfcWind_19841101.nc : latitude dimension has 256 values instead of 181 +sfcWind_19841101.nc : longitude dimension has 512 values instead of 360 +sfcWind_19841101.nc : there is no dimension called ensemble inside this file +sfcWind_19841101.nc : there is no dimension called time inside this file +sfcWind_19841201.nc : there is no dimension called ensemble inside this file +sfcWind_19850101.nc : there is no dimension called ensemble inside this file +sfcWind_19850201.nc : there is no dimension called latitude inside this file +sfcWind_19850201.nc : there is no dimension called longitude inside this file +sfcWind_19850201.nc : there is no dimension called ensemble inside this file +sfcWind_19850201.nc : time dimension has 10812 values instead of 216 +sfcWind_19850301.nc : there is no dimension called ensemble inside this file +sfcWind_19850401.nc : there is no dimension called ensemble inside this file +sfcWind_19850501.nc : latitude dimension has 256 values instead of 181 +sfcWind_19850501.nc : longitude dimension has 512 values instead of 360 +sfcWind_19850501.nc : there is no dimension called ensemble inside this file +sfcWind_19850501.nc : there is no dimension called time inside this file +sfcWind_19850601.nc : there is no dimension called latitude inside this file +sfcWind_19850601.nc : there is no dimension called longitude inside this file +sfcWind_19850601.nc : there is no dimension called ensemble inside this file +sfcWind_19850601.nc : there is no dimension called time inside this file +sfcWind_19850701.nc : there is no dimension called ensemble inside this file +sfcWind_19850801.nc : >>>>> Missing file <<<<< +sfcWind_19850901.nc : there is no dimension called ensemble inside this file +sfcWind_19850901.nc : time dimension has 226 values instead of 216 +sfcWind_19851001.nc : there is no dimension called ensemble inside this file +sfcWind_19851101.nc : latitude dimension has 256 values instead of 181 +sfcWind_19851101.nc : longitude dimension has 512 values instead of 360 +sfcWind_19851101.nc : there is no dimension called ensemble inside this file +sfcWind_19851101.nc : time dimension has 214 values instead of 216 +sfcWind_19851201.nc : there is no dimension called ensemble inside this file +sfcWind_19860101.nc : there is no dimension called ensemble inside this file +sfcWind_19860101.nc : time dimension has 23 values instead of 216 +sfcWind_19860201.nc : there is no dimension called latitude inside this file +sfcWind_19860201.nc : there is no dimension called longitude inside this file +sfcWind_19860201.nc : there is no dimension called ensemble inside this file +sfcWind_19860201.nc : time dimension has 10812 values instead of 216 +sfcWind_19860301.nc : there is no dimension called ensemble inside this file +sfcWind_19860301.nc : time dimension has 285 values instead of 216 +sfcWind_19860401.nc : there is no dimension called ensemble inside this file +sfcWind_19860501.nc : >>>>> Missing file <<<<< +sfcWind_19860601.nc : there is no dimension called ensemble inside this file +sfcWind_19860701.nc : there is no dimension called ensemble inside this file +sfcWind_19860801.nc : >>>>> Missing file <<<<< +sfcWind_19860901.nc : there is no dimension called ensemble inside this file +sfcWind_19861001.nc : there is no dimension called ensemble inside this file +sfcWind_19861001.nc : time dimension has 94 values instead of 216 +sfcWind_19861101.nc : latitude dimension has 256 values instead of 181 +sfcWind_19861101.nc : longitude dimension has 512 values instead of 360 +sfcWind_19861101.nc : there is no dimension called ensemble inside this file +sfcWind_19861101.nc : time dimension has 214 values instead of 216 +sfcWind_19861201.nc : there is no dimension called ensemble inside this file +sfcWind_19870101.nc : there is no dimension called ensemble inside this file +sfcWind_19870201.nc : there is no dimension called latitude inside this file +sfcWind_19870201.nc : there is no dimension called longitude inside this file +sfcWind_19870201.nc : there is no dimension called ensemble inside this file +sfcWind_19870201.nc : time dimension has 10812 values instead of 216 +sfcWind_19870301.nc : there is no dimension called ensemble inside this file +sfcWind_19870401.nc : there is no dimension called ensemble inside this file +sfcWind_19870501.nc : >>>>> Missing file <<<<< +sfcWind_19870601.nc : there is no dimension called ensemble inside this file +sfcWind_19870701.nc : there is no dimension called ensemble inside this file +sfcWind_19870801.nc : >>>>> Missing file <<<<< +sfcWind_19870901.nc : there is no dimension called ensemble inside this file +sfcWind_19871001.nc : there is no dimension called ensemble inside this file +sfcWind_19871101.nc : latitude dimension has 256 values instead of 181 +sfcWind_19871101.nc : longitude dimension has 512 values instead of 360 +sfcWind_19871101.nc : there is no dimension called ensemble inside this file +sfcWind_19871101.nc : time dimension has 214 values instead of 216 +sfcWind_19871201.nc : there is no dimension called ensemble inside this file +sfcWind_19871201.nc : time dimension has 46 values instead of 216 +sfcWind_19880101.nc : there is no dimension called ensemble inside this file +sfcWind_19880201.nc : there is no dimension called latitude inside this file +sfcWind_19880201.nc : there is no dimension called longitude inside this file +sfcWind_19880201.nc : there is no dimension called ensemble inside this file +sfcWind_19880201.nc : time dimension has 10863 values instead of 216 +sfcWind_19880301.nc : there is no dimension called ensemble inside this file +sfcWind_19880401.nc : there is no dimension called ensemble inside this file +sfcWind_19880501.nc : >>>>> Missing file <<<<< +sfcWind_19880601.nc : there is no dimension called ensemble inside this file +sfcWind_19880701.nc : there is no dimension called ensemble inside this file +sfcWind_19880801.nc : >>>>> Missing file <<<<< +sfcWind_19880901.nc : there is no dimension called ensemble inside this file +sfcWind_19881001.nc : there is no dimension called ensemble inside this file +sfcWind_19881101.nc : latitude dimension has 256 values instead of 181 +sfcWind_19881101.nc : longitude dimension has 512 values instead of 360 +sfcWind_19881101.nc : there is no dimension called ensemble inside this file +sfcWind_19881101.nc : time dimension has 214 values instead of 216 +sfcWind_19881201.nc : there is no dimension called ensemble inside this file +sfcWind_19890101.nc : there is no dimension called ensemble inside this file +sfcWind_19890201.nc : there is no dimension called latitude inside this file +sfcWind_19890201.nc : there is no dimension called longitude inside this file +sfcWind_19890201.nc : there is no dimension called ensemble inside this file +sfcWind_19890201.nc : time dimension has 10812 values instead of 216 +sfcWind_19890301.nc : there is no dimension called ensemble inside this file +sfcWind_19890401.nc : there is no dimension called ensemble inside this file +sfcWind_19890501.nc : >>>>> Missing file <<<<< +sfcWind_19890601.nc : there is no dimension called ensemble inside this file +sfcWind_19890701.nc : there is no dimension called ensemble inside this file +sfcWind_19890801.nc : >>>>> Missing file <<<<< +sfcWind_19890901.nc : there is no dimension called ensemble inside this file +sfcWind_19891001.nc : there is no dimension called ensemble inside this file +sfcWind_19891101.nc : latitude dimension has 256 values instead of 181 +sfcWind_19891101.nc : longitude dimension has 512 values instead of 360 +sfcWind_19891101.nc : there is no dimension called ensemble inside this file +sfcWind_19891101.nc : time dimension has 214 values instead of 216 +sfcWind_19891201.nc : there is no dimension called ensemble inside this file +sfcWind_19900101.nc : there is no dimension called ensemble inside this file +sfcWind_19900201.nc : there is no dimension called latitude inside this file +sfcWind_19900201.nc : there is no dimension called longitude inside this file +sfcWind_19900201.nc : there is no dimension called ensemble inside this file +sfcWind_19900201.nc : time dimension has 10812 values instead of 216 +sfcWind_19900301.nc : there is no dimension called ensemble inside this file +sfcWind_19900401.nc : >>>>> Missing file <<<<< +sfcWind_19900501.nc : >>>>> Missing file <<<<< +sfcWind_19900601.nc : there is no dimension called ensemble inside this file +sfcWind_19900701.nc : >>>>> Missing file <<<<< +sfcWind_19900801.nc : >>>>> Missing file <<<<< +sfcWind_19900901.nc : there is no dimension called ensemble inside this file +sfcWind_19900901.nc : time dimension has 2 values instead of 216 +sfcWind_19901001.nc : there is no dimension called ensemble inside this file +sfcWind_19901001.nc : time dimension has 120 values instead of 216 +sfcWind_19901101.nc : latitude dimension has 256 values instead of 181 +sfcWind_19901101.nc : longitude dimension has 512 values instead of 360 +sfcWind_19901101.nc : there is no dimension called ensemble inside this file +sfcWind_19901101.nc : time dimension has 214 values instead of 216 +sfcWind_19901201.nc : there is no dimension called ensemble inside this file +sfcWind_19910101.nc : there is no dimension called ensemble inside this file +sfcWind_19910201.nc : there is no dimension called latitude inside this file +sfcWind_19910201.nc : there is no dimension called longitude inside this file +sfcWind_19910201.nc : there is no dimension called ensemble inside this file +sfcWind_19910201.nc : time dimension has 10812 values instead of 216 +sfcWind_19910301.nc : there is no dimension called ensemble inside this file +sfcWind_19910401.nc : there is no dimension called ensemble inside this file +sfcWind_19910501.nc : >>>>> Missing file <<<<< +sfcWind_19910601.nc : there is no dimension called ensemble inside this file +sfcWind_19910701.nc : there is no dimension called ensemble inside this file +sfcWind_19910801.nc : >>>>> Missing file <<<<< +sfcWind_19910901.nc : there is no dimension called ensemble inside this file +sfcWind_19911001.nc : there is no dimension called ensemble inside this file +sfcWind_19911001.nc : time dimension has 117 values instead of 216 +sfcWind_19911101.nc : latitude dimension has 256 values instead of 181 +sfcWind_19911101.nc : longitude dimension has 512 values instead of 360 +sfcWind_19911101.nc : there is no dimension called ensemble inside this file +sfcWind_19911101.nc : time dimension has 214 values instead of 216 +sfcWind_19911201.nc : there is no dimension called ensemble inside this file +sfcWind_19920101.nc : there is no dimension called ensemble inside this file +sfcWind_19920201.nc : there is no dimension called latitude inside this file +sfcWind_19920201.nc : there is no dimension called longitude inside this file +sfcWind_19920201.nc : there is no dimension called ensemble inside this file +sfcWind_19920201.nc : time dimension has 10863 values instead of 216 +sfcWind_19920301.nc : there is no dimension called ensemble inside this file +sfcWind_19920401.nc : there is no dimension called ensemble inside this file +sfcWind_19920501.nc : >>>>> Missing file <<<<< +sfcWind_19920601.nc : there is no dimension called ensemble inside this file +sfcWind_19920601.nc : time dimension has 188 values instead of 216 +sfcWind_19920701.nc : there is no dimension called ensemble inside this file +sfcWind_19920801.nc : >>>>> Missing file <<<<< +sfcWind_19920901.nc : there is no dimension called ensemble inside this file +sfcWind_19921001.nc : there is no dimension called ensemble inside this file +sfcWind_19921101.nc : latitude dimension has 256 values instead of 181 +sfcWind_19921101.nc : longitude dimension has 512 values instead of 360 +sfcWind_19921101.nc : there is no dimension called ensemble inside this file +sfcWind_19921101.nc : time dimension has 214 values instead of 216 +sfcWind_19921201.nc : there is no dimension called ensemble inside this file +sfcWind_19930101.nc : there is no dimension called ensemble inside this file +sfcWind_19930201.nc : there is no dimension called latitude inside this file +sfcWind_19930201.nc : there is no dimension called longitude inside this file +sfcWind_19930201.nc : there is no dimension called ensemble inside this file +sfcWind_19930201.nc : time dimension has 10812 values instead of 216 +sfcWind_19930301.nc : there is no dimension called ensemble inside this file +sfcWind_19930401.nc : there is no dimension called ensemble inside this file +sfcWind_19930501.nc : >>>>> Missing file <<<<< +sfcWind_19930601.nc : there is no dimension called latitude inside this file +sfcWind_19930601.nc : there is no dimension called longitude inside this file +sfcWind_19930601.nc : there is no dimension called ensemble inside this file +sfcWind_19930601.nc : there is no dimension called time inside this file +sfcWind_19930701.nc : there is no dimension called ensemble inside this file +sfcWind_19930801.nc : >>>>> Missing file <<<<< +sfcWind_19930901.nc : there is no dimension called ensemble inside this file +sfcWind_19931001.nc : there is no dimension called ensemble inside this file +sfcWind_19931101.nc : latitude dimension has 256 values instead of 181 +sfcWind_19931101.nc : longitude dimension has 512 values instead of 360 +sfcWind_19931101.nc : there is no dimension called ensemble inside this file +sfcWind_19931101.nc : time dimension has 214 values instead of 216 +sfcWind_19931201.nc : there is no dimension called ensemble inside this file +sfcWind_19931201.nc : time dimension has 29 values instead of 216 +sfcWind_19940101.nc : there is no dimension called ensemble inside this file +sfcWind_19940201.nc : there is no dimension called latitude inside this file +sfcWind_19940201.nc : there is no dimension called longitude inside this file +sfcWind_19940201.nc : there is no dimension called ensemble inside this file +sfcWind_19940201.nc : time dimension has 10812 values instead of 216 +sfcWind_19940301.nc : there is no dimension called ensemble inside this file +sfcWind_19940301.nc : time dimension has 101 values instead of 216 +sfcWind_19940401.nc : there is no dimension called ensemble inside this file +sfcWind_19940501.nc : >>>>> Missing file <<<<< +sfcWind_19940601.nc : there is no dimension called ensemble inside this file +sfcWind_19940701.nc : there is no dimension called ensemble inside this file +sfcWind_19940801.nc : >>>>> Missing file <<<<< +sfcWind_19940901.nc : there is no dimension called ensemble inside this file +sfcWind_19941001.nc : there is no dimension called ensemble inside this file +sfcWind_19941101.nc : latitude dimension has 256 values instead of 181 +sfcWind_19941101.nc : longitude dimension has 512 values instead of 360 +sfcWind_19941101.nc : there is no dimension called ensemble inside this file +sfcWind_19941101.nc : time dimension has 214 values instead of 216 +sfcWind_19941201.nc : there is no dimension called ensemble inside this file +sfcWind_19950101.nc : there is no dimension called ensemble inside this file +sfcWind_19950101.nc : time dimension has 75 values instead of 216 +sfcWind_19950201.nc : there is no dimension called latitude inside this file +sfcWind_19950201.nc : there is no dimension called longitude inside this file +sfcWind_19950201.nc : there is no dimension called ensemble inside this file +sfcWind_19950201.nc : time dimension has 10812 values instead of 216 +sfcWind_19950301.nc : there is no dimension called ensemble inside this file +sfcWind_19950401.nc : there is no dimension called ensemble inside this file +sfcWind_19950401.nc : time dimension has 187 values instead of 216 +sfcWind_19950501.nc : >>>>> Missing file <<<<< +sfcWind_19950601.nc : there is no dimension called ensemble inside this file +sfcWind_19950701.nc : there is no dimension called ensemble inside this file +sfcWind_19950801.nc : >>>>> Missing file <<<<< +sfcWind_19950901.nc : there is no dimension called ensemble inside this file +sfcWind_19951001.nc : there is no dimension called ensemble inside this file +sfcWind_19951101.nc : latitude dimension has 256 values instead of 181 +sfcWind_19951101.nc : longitude dimension has 512 values instead of 360 +sfcWind_19951101.nc : there is no dimension called ensemble inside this file +sfcWind_19951101.nc : time dimension has 214 values instead of 216 +sfcWind_19951201.nc : there is no dimension called ensemble inside this file +sfcWind_19960101.nc : there is no dimension called ensemble inside this file +sfcWind_19960201.nc : there is no dimension called latitude inside this file +sfcWind_19960201.nc : there is no dimension called longitude inside this file +sfcWind_19960201.nc : there is no dimension called ensemble inside this file +sfcWind_19960201.nc : time dimension has 10863 values instead of 216 +sfcWind_19960301.nc : there is no dimension called ensemble inside this file +sfcWind_19960401.nc : there is no dimension called ensemble inside this file +sfcWind_19960501.nc : >>>>> Missing file <<<<< +sfcWind_19960601.nc : there is no dimension called ensemble inside this file +sfcWind_19960701.nc : there is no dimension called ensemble inside this file +sfcWind_19960801.nc : >>>>> Missing file <<<<< +sfcWind_19960901.nc : there is no dimension called ensemble inside this file +sfcWind_19961001.nc : there is no dimension called ensemble inside this file +sfcWind_19961101.nc : latitude dimension has 256 values instead of 181 +sfcWind_19961101.nc : longitude dimension has 512 values instead of 360 +sfcWind_19961101.nc : there is no dimension called ensemble inside this file +sfcWind_19961101.nc : time dimension has 214 values instead of 216 +sfcWind_19961201.nc : there is no dimension called ensemble inside this file +sfcWind_19970101.nc : there is no dimension called ensemble inside this file +sfcWind_19970201.nc : there is no dimension called latitude inside this file +sfcWind_19970201.nc : there is no dimension called longitude inside this file +sfcWind_19970201.nc : there is no dimension called ensemble inside this file +sfcWind_19970201.nc : time dimension has 10812 values instead of 216 +sfcWind_19970301.nc : there is no dimension called ensemble inside this file +sfcWind_19970401.nc : there is no dimension called ensemble inside this file +sfcWind_19970501.nc : >>>>> Missing file <<<<< +sfcWind_19970601.nc : there is no dimension called ensemble inside this file +sfcWind_19970701.nc : there is no dimension called ensemble inside this file +sfcWind_19970801.nc : >>>>> Missing file <<<<< +sfcWind_19970901.nc : there is no dimension called ensemble inside this file +sfcWind_19971001.nc : there is no dimension called ensemble inside this file +sfcWind_19971101.nc : latitude dimension has 256 values instead of 181 +sfcWind_19971101.nc : longitude dimension has 512 values instead of 360 +sfcWind_19971101.nc : there is no dimension called ensemble inside this file +sfcWind_19971101.nc : time dimension has 214 values instead of 216 +sfcWind_19971201.nc : there is no dimension called ensemble inside this file +sfcWind_19980101.nc : there is no dimension called ensemble inside this file +sfcWind_19980201.nc : there is no dimension called latitude inside this file +sfcWind_19980201.nc : there is no dimension called longitude inside this file +sfcWind_19980201.nc : there is no dimension called ensemble inside this file +sfcWind_19980201.nc : time dimension has 10812 values instead of 216 +sfcWind_19980301.nc : there is no dimension called ensemble inside this file +sfcWind_19980401.nc : there is no dimension called ensemble inside this file +sfcWind_19980501.nc : >>>>> Missing file <<<<< +sfcWind_19980601.nc : there is no dimension called ensemble inside this file +sfcWind_19980701.nc : there is no dimension called ensemble inside this file +sfcWind_19980801.nc : >>>>> Missing file <<<<< +sfcWind_19980901.nc : there is no dimension called ensemble inside this file +sfcWind_19981001.nc : there is no dimension called ensemble inside this file +sfcWind_19981101.nc : latitude dimension has 256 values instead of 181 +sfcWind_19981101.nc : longitude dimension has 512 values instead of 360 +sfcWind_19981101.nc : there is no dimension called ensemble inside this file +sfcWind_19981101.nc : time dimension has 214 values instead of 216 +sfcWind_19981201.nc : there is no dimension called ensemble inside this file +sfcWind_19990101.nc : there is no dimension called ensemble inside this file +sfcWind_19990201.nc : there is no dimension called latitude inside this file +sfcWind_19990201.nc : there is no dimension called longitude inside this file +sfcWind_19990201.nc : there is no dimension called ensemble inside this file +sfcWind_19990201.nc : time dimension has 10812 values instead of 216 +sfcWind_19990301.nc : there is no dimension called ensemble inside this file +sfcWind_19990401.nc : there is no dimension called ensemble inside this file +sfcWind_19990501.nc : >>>>> Missing file <<<<< +sfcWind_19990601.nc : there is no dimension called ensemble inside this file +sfcWind_19990701.nc : there is no dimension called ensemble inside this file +sfcWind_19990801.nc : >>>>> Missing file <<<<< +sfcWind_19990901.nc : there is no dimension called ensemble inside this file +sfcWind_19991001.nc : there is no dimension called ensemble inside this file +sfcWind_19991101.nc : latitude dimension has 256 values instead of 181 +sfcWind_19991101.nc : longitude dimension has 512 values instead of 360 +sfcWind_19991101.nc : there is no dimension called ensemble inside this file +sfcWind_19991101.nc : there is no dimension called time inside this file +sfcWind_19991201.nc : there is no dimension called ensemble inside this file +sfcWind_20000101.nc : there is no dimension called ensemble inside this file +sfcWind_20000201.nc : there is no dimension called ensemble inside this file +sfcWind_20000201.nc : time dimension has 10863 values instead of 216 +sfcWind_20000301.nc : there is no dimension called ensemble inside this file +sfcWind_20000301.nc : time dimension has 55 values instead of 216 +sfcWind_20000401.nc : there is no dimension called ensemble inside this file +sfcWind_20000401.nc : time dimension has 54 values instead of 216 +sfcWind_20000501.nc : latitude dimension has 256 values instead of 181 +sfcWind_20000501.nc : longitude dimension has 512 values instead of 360 +sfcWind_20000501.nc : there is no dimension called ensemble inside this file +sfcWind_20000501.nc : there is no dimension called time inside this file +sfcWind_20000601.nc : there is no dimension called ensemble inside this file +sfcWind_20000701.nc : there is no dimension called ensemble inside this file +sfcWind_20000801.nc : there is no dimension called latitude inside this file +sfcWind_20000801.nc : there is no dimension called longitude inside this file +sfcWind_20000801.nc : there is no dimension called ensemble inside this file +sfcWind_20000801.nc : time dimension has 212 values instead of 216 +sfcWind_20000901.nc : there is no dimension called ensemble inside this file +sfcWind_20001001.nc : there is no dimension called ensemble inside this file +sfcWind_20001101.nc : latitude dimension has 256 values instead of 181 +sfcWind_20001101.nc : longitude dimension has 512 values instead of 360 +sfcWind_20001101.nc : there is no dimension called ensemble inside this file +sfcWind_20001101.nc : there is no dimension called time inside this file +sfcWind_20001201.nc : there is no dimension called ensemble inside this file +sfcWind_20010101.nc : there is no dimension called ensemble inside this file +sfcWind_20010201.nc : there is no dimension called latitude inside this file +sfcWind_20010201.nc : there is no dimension called longitude inside this file +sfcWind_20010201.nc : there is no dimension called ensemble inside this file +sfcWind_20010201.nc : time dimension has 10812 values instead of 216 +sfcWind_20010301.nc : there is no dimension called ensemble inside this file +sfcWind_20010401.nc : there is no dimension called ensemble inside this file +sfcWind_20010501.nc : latitude dimension has 256 values instead of 181 +sfcWind_20010501.nc : longitude dimension has 512 values instead of 360 +sfcWind_20010501.nc : there is no dimension called ensemble inside this file +sfcWind_20010501.nc : there is no dimension called time inside this file +sfcWind_20010601.nc : there is no dimension called ensemble inside this file +sfcWind_20010701.nc : there is no dimension called ensemble inside this file +sfcWind_20010801.nc : there is no dimension called latitude inside this file +sfcWind_20010801.nc : there is no dimension called longitude inside this file +sfcWind_20010801.nc : there is no dimension called ensemble inside this file +sfcWind_20010801.nc : time dimension has 212 values instead of 216 +sfcWind_20010901.nc : there is no dimension called ensemble inside this file +sfcWind_20011001.nc : there is no dimension called ensemble inside this file +sfcWind_20011101.nc : latitude dimension has 256 values instead of 181 +sfcWind_20011101.nc : longitude dimension has 512 values instead of 360 +sfcWind_20011101.nc : there is no dimension called ensemble inside this file +sfcWind_20011101.nc : there is no dimension called time inside this file +sfcWind_20011201.nc : there is no dimension called ensemble inside this file +sfcWind_20020101.nc : there is no dimension called ensemble inside this file +sfcWind_20020201.nc : there is no dimension called latitude inside this file +sfcWind_20020201.nc : there is no dimension called longitude inside this file +sfcWind_20020201.nc : there is no dimension called ensemble inside this file +sfcWind_20020201.nc : time dimension has 10812 values instead of 216 +sfcWind_20020301.nc : there is no dimension called ensemble inside this file +sfcWind_20020401.nc : there is no dimension called ensemble inside this file +sfcWind_20020501.nc : latitude dimension has 256 values instead of 181 +sfcWind_20020501.nc : longitude dimension has 512 values instead of 360 +sfcWind_20020501.nc : there is no dimension called ensemble inside this file +sfcWind_20020501.nc : there is no dimension called time inside this file +sfcWind_20020601.nc : there is no dimension called ensemble inside this file +sfcWind_20020701.nc : there is no dimension called ensemble inside this file +sfcWind_20020801.nc : there is no dimension called latitude inside this file +sfcWind_20020801.nc : there is no dimension called longitude inside this file +sfcWind_20020801.nc : there is no dimension called ensemble inside this file +sfcWind_20020801.nc : time dimension has 212 values instead of 216 +sfcWind_20020901.nc : there is no dimension called ensemble inside this file +sfcWind_20021001.nc : there is no dimension called ensemble inside this file +sfcWind_20021101.nc : latitude dimension has 256 values instead of 181 +sfcWind_20021101.nc : longitude dimension has 512 values instead of 360 +sfcWind_20021101.nc : there is no dimension called ensemble inside this file +sfcWind_20021101.nc : there is no dimension called time inside this file +sfcWind_20021201.nc : there is no dimension called ensemble inside this file +sfcWind_20030101.nc : there is no dimension called ensemble inside this file +sfcWind_20030201.nc : there is no dimension called latitude inside this file +sfcWind_20030201.nc : there is no dimension called longitude inside this file +sfcWind_20030201.nc : there is no dimension called ensemble inside this file +sfcWind_20030201.nc : time dimension has 10812 values instead of 216 +sfcWind_20030301.nc : there is no dimension called ensemble inside this file +sfcWind_20030401.nc : there is no dimension called ensemble inside this file +sfcWind_20030501.nc : latitude dimension has 256 values instead of 181 +sfcWind_20030501.nc : longitude dimension has 512 values instead of 360 +sfcWind_20030501.nc : there is no dimension called ensemble inside this file +sfcWind_20030501.nc : there is no dimension called time inside this file +sfcWind_20030601.nc : there is no dimension called ensemble inside this file +sfcWind_20030701.nc : there is no dimension called ensemble inside this file +sfcWind_20030801.nc : there is no dimension called latitude inside this file +sfcWind_20030801.nc : there is no dimension called longitude inside this file +sfcWind_20030801.nc : there is no dimension called ensemble inside this file +sfcWind_20030801.nc : time dimension has 213 values instead of 216 +sfcWind_20030901.nc : there is no dimension called ensemble inside this file +sfcWind_20031001.nc : there is no dimension called ensemble inside this file +sfcWind_20031101.nc : latitude dimension has 256 values instead of 181 +sfcWind_20031101.nc : longitude dimension has 512 values instead of 360 +sfcWind_20031101.nc : there is no dimension called ensemble inside this file +sfcWind_20031101.nc : there is no dimension called time inside this file +sfcWind_20031201.nc : there is no dimension called ensemble inside this file +sfcWind_20040101.nc : there is no dimension called ensemble inside this file +sfcWind_20040201.nc : there is no dimension called latitude inside this file +sfcWind_20040201.nc : there is no dimension called longitude inside this file +sfcWind_20040201.nc : there is no dimension called ensemble inside this file +sfcWind_20040201.nc : time dimension has 10863 values instead of 216 +sfcWind_20040301.nc : there is no dimension called ensemble inside this file +sfcWind_20040401.nc : there is no dimension called ensemble inside this file +sfcWind_20040501.nc : latitude dimension has 256 values instead of 181 +sfcWind_20040501.nc : longitude dimension has 512 values instead of 360 +sfcWind_20040501.nc : there is no dimension called ensemble inside this file +sfcWind_20040501.nc : there is no dimension called time inside this file +sfcWind_20040601.nc : there is no dimension called ensemble inside this file +sfcWind_20040701.nc : there is no dimension called ensemble inside this file +sfcWind_20040801.nc : there is no dimension called latitude inside this file +sfcWind_20040801.nc : there is no dimension called longitude inside this file +sfcWind_20040801.nc : there is no dimension called ensemble inside this file +sfcWind_20040801.nc : time dimension has 212 values instead of 216 +sfcWind_20040901.nc : there is no dimension called ensemble inside this file +sfcWind_20041001.nc : there is no dimension called ensemble inside this file +sfcWind_20041101.nc : latitude dimension has 256 values instead of 181 +sfcWind_20041101.nc : longitude dimension has 512 values instead of 360 +sfcWind_20041101.nc : there is no dimension called ensemble inside this file +sfcWind_20041101.nc : there is no dimension called time inside this file +sfcWind_20041201.nc : there is no dimension called ensemble inside this file +sfcWind_20050101.nc : there is no dimension called ensemble inside this file +sfcWind_20050201.nc : there is no dimension called latitude inside this file +sfcWind_20050201.nc : there is no dimension called longitude inside this file +sfcWind_20050201.nc : there is no dimension called ensemble inside this file +sfcWind_20050201.nc : time dimension has 10812 values instead of 216 +sfcWind_20050301.nc : there is no dimension called ensemble inside this file +sfcWind_20050401.nc : there is no dimension called ensemble inside this file +sfcWind_20050501.nc : latitude dimension has 256 values instead of 181 +sfcWind_20050501.nc : longitude dimension has 512 values instead of 360 +sfcWind_20050501.nc : there is no dimension called ensemble inside this file +sfcWind_20050501.nc : there is no dimension called time inside this file +sfcWind_20050601.nc : there is no dimension called ensemble inside this file +sfcWind_20050701.nc : there is no dimension called ensemble inside this file +sfcWind_20050801.nc : there is no dimension called latitude inside this file +sfcWind_20050801.nc : there is no dimension called longitude inside this file +sfcWind_20050801.nc : there is no dimension called ensemble inside this file +sfcWind_20050801.nc : time dimension has 212 values instead of 216 +sfcWind_20050901.nc : there is no dimension called ensemble inside this file +sfcWind_20051001.nc : there is no dimension called ensemble inside this file +sfcWind_20051101.nc : latitude dimension has 256 values instead of 181 +sfcWind_20051101.nc : longitude dimension has 512 values instead of 360 +sfcWind_20051101.nc : there is no dimension called ensemble inside this file +sfcWind_20051101.nc : there is no dimension called time inside this file +sfcWind_20051201.nc : there is no dimension called ensemble inside this file +sfcWind_20060101.nc : there is no dimension called ensemble inside this file +sfcWind_20060201.nc : there is no dimension called latitude inside this file +sfcWind_20060201.nc : there is no dimension called longitude inside this file +sfcWind_20060201.nc : there is no dimension called ensemble inside this file +sfcWind_20060201.nc : time dimension has 10812 values instead of 216 +sfcWind_20060301.nc : there is no dimension called ensemble inside this file +sfcWind_20060401.nc : there is no dimension called ensemble inside this file +sfcWind_20060501.nc : latitude dimension has 256 values instead of 181 +sfcWind_20060501.nc : longitude dimension has 512 values instead of 360 +sfcWind_20060501.nc : there is no dimension called ensemble inside this file +sfcWind_20060501.nc : there is no dimension called time inside this file +sfcWind_20060601.nc : there is no dimension called ensemble inside this file +sfcWind_20060701.nc : there is no dimension called ensemble inside this file +sfcWind_20060801.nc : there is no dimension called latitude inside this file +sfcWind_20060801.nc : there is no dimension called longitude inside this file +sfcWind_20060801.nc : there is no dimension called ensemble inside this file +sfcWind_20060801.nc : time dimension has 212 values instead of 216 +sfcWind_20060901.nc : there is no dimension called ensemble inside this file +sfcWind_20061001.nc : there is no dimension called ensemble inside this file +sfcWind_20061101.nc : latitude dimension has 256 values instead of 181 +sfcWind_20061101.nc : longitude dimension has 512 values instead of 360 +sfcWind_20061101.nc : there is no dimension called ensemble inside this file +sfcWind_20061101.nc : there is no dimension called time inside this file +sfcWind_20061201.nc : there is no dimension called ensemble inside this file +sfcWind_20070101.nc : there is no dimension called ensemble inside this file +sfcWind_20070201.nc : there is no dimension called latitude inside this file +sfcWind_20070201.nc : there is no dimension called longitude inside this file +sfcWind_20070201.nc : there is no dimension called ensemble inside this file +sfcWind_20070201.nc : time dimension has 10812 values instead of 216 +sfcWind_20070301.nc : there is no dimension called ensemble inside this file +sfcWind_20070401.nc : there is no dimension called ensemble inside this file +sfcWind_20070501.nc : latitude dimension has 256 values instead of 181 +sfcWind_20070501.nc : longitude dimension has 512 values instead of 360 +sfcWind_20070501.nc : there is no dimension called ensemble inside this file +sfcWind_20070501.nc : there is no dimension called time inside this file +sfcWind_20070601.nc : there is no dimension called ensemble inside this file +sfcWind_20070701.nc : there is no dimension called ensemble inside this file +sfcWind_20070801.nc : there is no dimension called latitude inside this file +sfcWind_20070801.nc : there is no dimension called longitude inside this file +sfcWind_20070801.nc : there is no dimension called ensemble inside this file +sfcWind_20070801.nc : time dimension has 213 values instead of 216 +sfcWind_20070901.nc : there is no dimension called ensemble inside this file +sfcWind_20071001.nc : there is no dimension called ensemble inside this file +sfcWind_20071101.nc : latitude dimension has 256 values instead of 181 +sfcWind_20071101.nc : longitude dimension has 512 values instead of 360 +sfcWind_20071101.nc : there is no dimension called ensemble inside this file +sfcWind_20071101.nc : there is no dimension called time inside this file +sfcWind_20071201.nc : there is no dimension called ensemble inside this file +sfcWind_20080101.nc : there is no dimension called ensemble inside this file +sfcWind_20080201.nc : there is no dimension called latitude inside this file +sfcWind_20080201.nc : there is no dimension called longitude inside this file +sfcWind_20080201.nc : there is no dimension called ensemble inside this file +sfcWind_20080201.nc : time dimension has 10863 values instead of 216 +sfcWind_20080301.nc : there is no dimension called ensemble inside this file +sfcWind_20080301.nc : time dimension has 2 values instead of 216 +sfcWind_20080401.nc : there is no dimension called ensemble inside this file +sfcWind_20080501.nc : latitude dimension has 256 values instead of 181 +sfcWind_20080501.nc : longitude dimension has 512 values instead of 360 +sfcWind_20080501.nc : there is no dimension called ensemble inside this file +sfcWind_20080501.nc : there is no dimension called time inside this file +sfcWind_20080601.nc : there is no dimension called ensemble inside this file +sfcWind_20080701.nc : there is no dimension called ensemble inside this file +sfcWind_20080801.nc : there is no dimension called latitude inside this file +sfcWind_20080801.nc : there is no dimension called longitude inside this file +sfcWind_20080801.nc : there is no dimension called ensemble inside this file +sfcWind_20080801.nc : time dimension has 212 values instead of 216 +sfcWind_20080901.nc : there is no dimension called ensemble inside this file +sfcWind_20081001.nc : there is no dimension called ensemble inside this file +sfcWind_20081101.nc : latitude dimension has 256 values instead of 181 +sfcWind_20081101.nc : longitude dimension has 512 values instead of 360 +sfcWind_20081101.nc : there is no dimension called ensemble inside this file +sfcWind_20081101.nc : there is no dimension called time inside this file +sfcWind_20081201.nc : there is no dimension called ensemble inside this file +sfcWind_20090101.nc : there is no dimension called ensemble inside this file +sfcWind_20090201.nc : there is no dimension called latitude inside this file +sfcWind_20090201.nc : there is no dimension called longitude inside this file +sfcWind_20090201.nc : there is no dimension called ensemble inside this file +sfcWind_20090201.nc : time dimension has 10812 values instead of 216 +sfcWind_20090301.nc : there is no dimension called ensemble inside this file +sfcWind_20090401.nc : there is no dimension called ensemble inside this file +sfcWind_20090501.nc : latitude dimension has 256 values instead of 181 +sfcWind_20090501.nc : longitude dimension has 512 values instead of 360 +sfcWind_20090501.nc : there is no dimension called ensemble inside this file +sfcWind_20090501.nc : there is no dimension called time inside this file +sfcWind_20090601.nc : there is no dimension called ensemble inside this file +sfcWind_20090701.nc : there is no dimension called ensemble inside this file +sfcWind_20090801.nc : there is no dimension called latitude inside this file +sfcWind_20090801.nc : there is no dimension called longitude inside this file +sfcWind_20090801.nc : there is no dimension called ensemble inside this file +sfcWind_20090801.nc : time dimension has 212 values instead of 216 +sfcWind_20090901.nc : there is no dimension called ensemble inside this file +sfcWind_20091001.nc : there is no dimension called ensemble inside this file +sfcWind_20091101.nc : latitude dimension has 256 values instead of 181 +sfcWind_20091101.nc : longitude dimension has 512 values instead of 360 +sfcWind_20091101.nc : there is no dimension called ensemble inside this file +sfcWind_20091101.nc : there is no dimension called time inside this file +sfcWind_20091201.nc : there is no dimension called ensemble inside this file +sfcWind_20100101.nc : there is no dimension called ensemble inside this file +sfcWind_20100201.nc : there is no dimension called latitude inside this file +sfcWind_20100201.nc : there is no dimension called longitude inside this file +sfcWind_20100201.nc : there is no dimension called ensemble inside this file +sfcWind_20100201.nc : time dimension has 10812 values instead of 216 +sfcWind_20100301.nc : there is no dimension called ensemble inside this file +sfcWind_20100401.nc : there is no dimension called ensemble inside this file +sfcWind_20100501.nc : latitude dimension has 256 values instead of 181 +sfcWind_20100501.nc : longitude dimension has 512 values instead of 360 +sfcWind_20100501.nc : there is no dimension called ensemble inside this file +sfcWind_20100501.nc : there is no dimension called time inside this file +sfcWind_20100601.nc : there is no dimension called ensemble inside this file +sfcWind_20100701.nc : there is no dimension called ensemble inside this file +sfcWind_20100801.nc : there is no dimension called latitude inside this file +sfcWind_20100801.nc : there is no dimension called longitude inside this file +sfcWind_20100801.nc : there is no dimension called ensemble inside this file +sfcWind_20100801.nc : time dimension has 212 values instead of 216 +sfcWind_20100901.nc : there is no dimension called ensemble inside this file +sfcWind_20100901.nc : time dimension has 110 values instead of 216 +sfcWind_20101001.nc : there is no dimension called ensemble inside this file +sfcWind_20101101.nc : latitude dimension has 256 values instead of 181 +sfcWind_20101101.nc : longitude dimension has 512 values instead of 360 +sfcWind_20101101.nc : there is no dimension called ensemble inside this file +sfcWind_20101101.nc : there is no dimension called time inside this file +sfcWind_20101201.nc : there is no dimension called ensemble inside this file +sfcWind_20101201.nc : time dimension has 160 values instead of 216 +sfcWind_20110101.nc : there is no dimension called ensemble inside this file +sfcWind_20110201.nc : there is no dimension called latitude inside this file +sfcWind_20110201.nc : there is no dimension called longitude inside this file +sfcWind_20110201.nc : there is no dimension called ensemble inside this file +sfcWind_20110201.nc : time dimension has 10812 values instead of 216 +sfcWind_20110301.nc : there is no dimension called ensemble inside this file +sfcWind_20110401.nc : there is no dimension called ensemble inside this file +sfcWind_20110501.nc : latitude dimension has 256 values instead of 181 +sfcWind_20110501.nc : longitude dimension has 512 values instead of 360 +sfcWind_20110501.nc : there is no dimension called ensemble inside this file +sfcWind_20110501.nc : there is no dimension called time inside this file +sfcWind_20110601.nc : there is no dimension called ensemble inside this file +sfcWind_20110701.nc : there is no dimension called ensemble inside this file +sfcWind_20110801.nc : there is no dimension called latitude inside this file +sfcWind_20110801.nc : there is no dimension called longitude inside this file +sfcWind_20110801.nc : there is no dimension called ensemble inside this file +sfcWind_20110801.nc : time dimension has 213 values instead of 216 +sfcWind_20110901.nc : there is no dimension called ensemble inside this file +sfcWind_20111001.nc : there is no dimension called ensemble inside this file +sfcWind_20111101.nc : latitude dimension has 256 values instead of 181 +sfcWind_20111101.nc : longitude dimension has 512 values instead of 360 +sfcWind_20111101.nc : there is no dimension called ensemble inside this file +sfcWind_20111101.nc : there is no dimension called time inside this file +sfcWind_20111201.nc : there is no dimension called ensemble inside this file +sfcWind_20120101.nc : there is no dimension called ensemble inside this file +sfcWind_20120201.nc : there is no dimension called latitude inside this file +sfcWind_20120201.nc : there is no dimension called longitude inside this file +sfcWind_20120201.nc : there is no dimension called ensemble inside this file +sfcWind_20120201.nc : time dimension has 10863 values instead of 216 +sfcWind_20120301.nc : there is no dimension called ensemble inside this file +sfcWind_20120401.nc : there is no dimension called ensemble inside this file +sfcWind_20120401.nc : time dimension has 180 values instead of 216 +sfcWind_20120501.nc : latitude dimension has 256 values instead of 181 +sfcWind_20120501.nc : longitude dimension has 512 values instead of 360 +sfcWind_20120501.nc : there is no dimension called ensemble inside this file +sfcWind_20120501.nc : there is no dimension called time inside this file +sfcWind_20120601.nc : there is no dimension called ensemble inside this file +sfcWind_20120601.nc : time dimension has 97 values instead of 216 +sfcWind_20120701.nc : there is no dimension called ensemble inside this file +sfcWind_20120801.nc : there is no dimension called latitude inside this file +sfcWind_20120801.nc : there is no dimension called longitude inside this file +sfcWind_20120801.nc : there is no dimension called ensemble inside this file +sfcWind_20120801.nc : time dimension has 212 values instead of 216 +sfcWind_20120901.nc : there is no dimension called ensemble inside this file +sfcWind_20121001.nc : >>>>> Missing file <<<<< +sfcWind_20121101.nc : latitude dimension has 256 values instead of 181 +sfcWind_20121101.nc : longitude dimension has 512 values instead of 360 +sfcWind_20121101.nc : there is no dimension called ensemble inside this file +sfcWind_20121101.nc : there is no dimension called time inside this file +sfcWind_20121201.nc : >>>>> Missing file <<<<< +sfcWind_20130101.nc : >>>>> Missing file <<<<< +sfcWind_20130201.nc : there is no dimension called latitude inside this file +sfcWind_20130201.nc : there is no dimension called longitude inside this file +sfcWind_20130201.nc : there is no dimension called ensemble inside this file +sfcWind_20130201.nc : time dimension has 11016 values instead of 216 +sfcWind_20130301.nc : >>>>> Missing file <<<<< +sfcWind_20130401.nc : >>>>> Missing file <<<<< +sfcWind_20130501.nc : there is no dimension called latitude inside this file +sfcWind_20130501.nc : there is no dimension called longitude inside this file +sfcWind_20130501.nc : there is no dimension called ensemble inside this file +sfcWind_20130501.nc : time dimension has 11016 values instead of 216 +sfcWind_20130601.nc : >>>>> Missing file <<<<< +sfcWind_20130701.nc : >>>>> Missing file <<<<< +sfcWind_20130801.nc : there is no dimension called latitude inside this file +sfcWind_20130801.nc : there is no dimension called longitude inside this file +sfcWind_20130801.nc : there is no dimension called ensemble inside this file +sfcWind_20130801.nc : time dimension has 212 values instead of 216 +sfcWind_20130901.nc : there is no dimension called ensemble inside this file +sfcWind_20131001.nc : there is no dimension called ensemble inside this file +sfcWind_20131101.nc : there is no dimension called latitude inside this file +sfcWind_20131101.nc : there is no dimension called longitude inside this file +sfcWind_20131101.nc : there is no dimension called ensemble inside this file +sfcWind_20131101.nc : time dimension has 212 values instead of 216 +sfcWind_20131201.nc : there is no dimension called ensemble inside this file +sfcWind_20140101.nc : >>>>> Missing file <<<<< +sfcWind_20140201.nc : there is no dimension called latitude inside this file +sfcWind_20140201.nc : there is no dimension called longitude inside this file +sfcWind_20140201.nc : there is no dimension called ensemble inside this file +sfcWind_20140201.nc : time dimension has 11016 values instead of 216 +sfcWind_20140301.nc : >>>>> Missing file <<<<< +sfcWind_20140401.nc : >>>>> Missing file <<<<< +sfcWind_20140501.nc : there is no dimension called latitude inside this file +sfcWind_20140501.nc : there is no dimension called longitude inside this file +sfcWind_20140501.nc : there is no dimension called ensemble inside this file +sfcWind_20140501.nc : time dimension has 214 values instead of 216 +sfcWind_20140601.nc : >>>>> Missing file <<<<< +sfcWind_20140701.nc : >>>>> Missing file <<<<< +sfcWind_20140801.nc : there is no dimension called ensemble inside this file +sfcWind_20140901.nc : there is no dimension called ensemble inside this file +sfcWind_20141001.nc : there is no dimension called ensemble inside this file +sfcWind_20141101.nc : there is no dimension called latitude inside this file +sfcWind_20141101.nc : there is no dimension called longitude inside this file +sfcWind_20141101.nc : there is no dimension called ensemble inside this file +sfcWind_20141101.nc : time dimension has 212 values instead of 216 +sfcWind_20141201.nc : there is no dimension called ensemble inside this file +sfcWind_20150101.nc : >>>>> Missing file <<<<< +sfcWind_20150201.nc : >>>>> Missing file <<<<< +sfcWind_20150301.nc : >>>>> Missing file <<<<< +sfcWind_20150401.nc : >>>>> Missing file <<<<< +sfcWind_20150501.nc : >>>>> Missing file <<<<< +sfcWind_20150601.nc : there is no dimension called ensemble inside this file +sfcWind_20150701.nc : there is no dimension called ensemble inside this file +sfcWind_20150801.nc : there is no dimension called ensemble inside this file +sfcWind_20150801.nc : time dimension has 197 values instead of 216 +sfcWind_20150901.nc : >>>>> Missing file <<<<< +sfcWind_20151001.nc : >>>>> Missing file <<<<< +sfcWind_20151101.nc : latitude dimension has 256 values instead of 181 +sfcWind_20151101.nc : longitude dimension has 512 values instead of 360 +sfcWind_20151101.nc : there is no dimension called ensemble inside this file +sfcWind_20151101.nc : time dimension has 236 values instead of 216 +sfcWind_20151201.nc : >>>>> Missing file <<<<< diff --git a/old/backup/bash/check_ECMWFS4_sfcWind_daily.txt b/old/backup/bash/check_ECMWFS4_sfcWind_daily.txt new file mode 100644 index 0000000000000000000000000000000000000000..18c7f82bd8eb4dfa165c22a5ea9cf16b26b06625 --- /dev/null +++ b/old/backup/bash/check_ECMWFS4_sfcWind_daily.txt @@ -0,0 +1,731 @@ +Checked variable: sfcWind +Path: /esnas/exp/ecmwf/system4_m1/daily_mean/sfcWind_f6h/ +Extension: .nc +Checked period: 1981-2015 +>>>>>>>>>>>>>>>>>>>>>>>> Expected elements <<<<<<<<<<<<<<<<<<<<<<<< +Latitude denomination: latitude +Number of latitude values: 181 +Longitude denomination: longitude +Number of longitude values: 360 +Ensemble denomination: ensemble +Number of ensemble values: 15 +Alternative number of ensemble values: 51 +Lead-time denonomination: time +Number of lead-times: 216 + >>>>>>>>>>>>>>>>>>>>>>>> Checking <<<<<<<<<<<<<<<<<<<<<<<< +sfcWind_19810101.nc : ok +sfcWind_19810201.nc : latitude dimension has 256 values instead of 181 +sfcWind_19810201.nc : longitude dimension has 512 values instead of 360 +sfcWind_19810201.nc : time dimension has 626 values instead of 216 +sfcWind_19810301.nc : ok +sfcWind_19810401.nc : ok +sfcWind_19810501.nc : latitude dimension has 256 values instead of 181 +sfcWind_19810501.nc : longitude dimension has 512 values instead of 360 +sfcWind_19810501.nc : time dimension has 214 values instead of 216 +sfcWind_19810601.nc : ok +sfcWind_19810701.nc : ok +sfcWind_19810801.nc : latitude dimension has 256 values instead of 181 +sfcWind_19810801.nc : longitude dimension has 512 values instead of 360 +sfcWind_19810801.nc : time dimension has 212 values instead of 216 +sfcWind_19810901.nc : ok +sfcWind_19811001.nc : ok +sfcWind_19811101.nc : latitude dimension has 256 values instead of 181 +sfcWind_19811101.nc : longitude dimension has 512 values instead of 360 +sfcWind_19811101.nc : time dimension has 214 values instead of 216 +sfcWind_19811201.nc : ok +sfcWind_19820101.nc : ok +sfcWind_19820201.nc : latitude dimension has 256 values instead of 181 +sfcWind_19820201.nc : longitude dimension has 512 values instead of 360 +sfcWind_19820201.nc : time dimension has 848 values instead of 216 +sfcWind_19820301.nc : ok +sfcWind_19820401.nc : ok +sfcWind_19820501.nc : latitude dimension has 256 values instead of 181 +sfcWind_19820501.nc : longitude dimension has 512 values instead of 360 +sfcWind_19820501.nc : time dimension has 214 values instead of 216 +sfcWind_19820601.nc : ok +sfcWind_19820701.nc : time dimension has 31 values instead of 216 +sfcWind_19820801.nc : latitude dimension has 256 values instead of 181 +sfcWind_19820801.nc : longitude dimension has 512 values instead of 360 +sfcWind_19820801.nc : time dimension has 212 values instead of 216 +sfcWind_19820901.nc : ok +sfcWind_19821001.nc : ok +sfcWind_19821101.nc : latitude dimension has 256 values instead of 181 +sfcWind_19821101.nc : longitude dimension has 512 values instead of 360 +sfcWind_19821101.nc : time dimension has 214 values instead of 216 +sfcWind_19821201.nc : ok +sfcWind_19830101.nc : ok +sfcWind_19830201.nc : latitude dimension has 256 values instead of 181 +sfcWind_19830201.nc : longitude dimension has 512 values instead of 360 +sfcWind_19830201.nc : time dimension has 848 values instead of 216 +sfcWind_19830301.nc : ok +sfcWind_19830401.nc : ok +sfcWind_19830501.nc : latitude dimension has 256 values instead of 181 +sfcWind_19830501.nc : longitude dimension has 512 values instead of 360 +sfcWind_19830501.nc : time dimension has 214 values instead of 216 +sfcWind_19830601.nc : ok +sfcWind_19830701.nc : ok +sfcWind_19830801.nc : latitude dimension has 256 values instead of 181 +sfcWind_19830801.nc : longitude dimension has 512 values instead of 360 +sfcWind_19830801.nc : time dimension has 213 values instead of 216 +sfcWind_19830901.nc : ok +sfcWind_19831001.nc : ok +sfcWind_19831101.nc : latitude dimension has 256 values instead of 181 +sfcWind_19831101.nc : longitude dimension has 512 values instead of 360 +sfcWind_19831101.nc : time dimension has 734 values instead of 216 +sfcWind_19831201.nc : ok +sfcWind_19840101.nc : time dimension has 9 values instead of 216 +sfcWind_19840201.nc : latitude dimension has 256 values instead of 181 +sfcWind_19840201.nc : longitude dimension has 512 values instead of 360 +sfcWind_19840201.nc : time dimension has 852 values instead of 216 +sfcWind_19840301.nc : ok +sfcWind_19840401.nc : ok +sfcWind_19840501.nc : latitude dimension has 256 values instead of 181 +sfcWind_19840501.nc : longitude dimension has 512 values instead of 360 +sfcWind_19840501.nc : time dimension has 214 values instead of 216 +sfcWind_19840601.nc : ok +sfcWind_19840701.nc : ok +sfcWind_19840801.nc : latitude dimension has 256 values instead of 181 +sfcWind_19840801.nc : longitude dimension has 512 values instead of 360 +sfcWind_19840801.nc : time dimension has 212 values instead of 216 +sfcWind_19840901.nc : ok +sfcWind_19841001.nc : ok +sfcWind_19841101.nc : latitude dimension has 256 values instead of 181 +sfcWind_19841101.nc : longitude dimension has 512 values instead of 360 +sfcWind_19841101.nc : time dimension has 214 values instead of 216 +sfcWind_19841201.nc : ok +sfcWind_19850101.nc : ok +sfcWind_19850201.nc : latitude dimension has 256 values instead of 181 +sfcWind_19850201.nc : longitude dimension has 512 values instead of 360 +sfcWind_19850201.nc : there is no dimension called ensemble inside this file +sfcWind_19850201.nc : time dimension has 14625 values instead of 216 +sfcWind_19850301.nc : ok +sfcWind_19850401.nc : ok +sfcWind_19850501.nc : latitude dimension has 256 values instead of 181 +sfcWind_19850501.nc : longitude dimension has 512 values instead of 360 +sfcWind_19850501.nc : time dimension has 214 values instead of 216 +sfcWind_19850601.nc : there is no dimension called latitude inside this file +sfcWind_19850601.nc : there is no dimension called longitude inside this file +sfcWind_19850601.nc : there is no dimension called ensemble inside this file +sfcWind_19850601.nc : there is no dimension called time inside this file +sfcWind_19850701.nc : ok +sfcWind_19850801.nc : there is no dimension called latitude inside this file +sfcWind_19850801.nc : there is no dimension called longitude inside this file +sfcWind_19850801.nc : there is no dimension called ensemble inside this file +sfcWind_19850801.nc : time dimension has 1 values instead of 216 +sfcWind_19850901.nc : time dimension has 226 values instead of 216 +sfcWind_19851001.nc : ok +sfcWind_19851101.nc : latitude dimension has 256 values instead of 181 +sfcWind_19851101.nc : longitude dimension has 512 values instead of 360 +sfcWind_19851101.nc : time dimension has 214 values instead of 216 +sfcWind_19851201.nc : ok +sfcWind_19860101.nc : time dimension has 23 values instead of 216 +sfcWind_19860201.nc : latitude dimension has 256 values instead of 181 +sfcWind_19860201.nc : longitude dimension has 512 values instead of 360 +sfcWind_19860201.nc : time dimension has 848 values instead of 216 +sfcWind_19860301.nc : time dimension has 285 values instead of 216 +sfcWind_19860401.nc : ok +sfcWind_19860501.nc : latitude dimension has 256 values instead of 181 +sfcWind_19860501.nc : longitude dimension has 512 values instead of 360 +sfcWind_19860501.nc : time dimension has 214 values instead of 216 +sfcWind_19860601.nc : ok +sfcWind_19860701.nc : ok +sfcWind_19860801.nc : latitude dimension has 256 values instead of 181 +sfcWind_19860801.nc : longitude dimension has 512 values instead of 360 +sfcWind_19860801.nc : time dimension has 212 values instead of 216 +sfcWind_19860901.nc : ok +sfcWind_19861001.nc : time dimension has 94 values instead of 216 +sfcWind_19861101.nc : latitude dimension has 256 values instead of 181 +sfcWind_19861101.nc : longitude dimension has 512 values instead of 360 +sfcWind_19861101.nc : time dimension has 214 values instead of 216 +sfcWind_19861201.nc : ok +sfcWind_19870101.nc : ok +sfcWind_19870201.nc : latitude dimension has 256 values instead of 181 +sfcWind_19870201.nc : longitude dimension has 512 values instead of 360 +sfcWind_19870201.nc : there is no dimension called ensemble inside this file +sfcWind_19870201.nc : time dimension has 10812 values instead of 216 +sfcWind_19870301.nc : ok +sfcWind_19870401.nc : ok +sfcWind_19870501.nc : latitude dimension has 256 values instead of 181 +sfcWind_19870501.nc : longitude dimension has 512 values instead of 360 +sfcWind_19870501.nc : time dimension has 214 values instead of 216 +sfcWind_19870601.nc : ok +sfcWind_19870701.nc : ok +sfcWind_19870801.nc : latitude dimension has 256 values instead of 181 +sfcWind_19870801.nc : longitude dimension has 512 values instead of 360 +sfcWind_19870801.nc : time dimension has 213 values instead of 216 +sfcWind_19870901.nc : ok +sfcWind_19871001.nc : ok +sfcWind_19871101.nc : latitude dimension has 256 values instead of 181 +sfcWind_19871101.nc : longitude dimension has 512 values instead of 360 +sfcWind_19871101.nc : time dimension has 214 values instead of 216 +sfcWind_19871201.nc : time dimension has 46 values instead of 216 +sfcWind_19880101.nc : ok +sfcWind_19880201.nc : latitude dimension has 256 values instead of 181 +sfcWind_19880201.nc : longitude dimension has 512 values instead of 360 +sfcWind_19880201.nc : time dimension has 852 values instead of 216 +sfcWind_19880301.nc : ok +sfcWind_19880401.nc : ok +sfcWind_19880501.nc : latitude dimension has 256 values instead of 181 +sfcWind_19880501.nc : longitude dimension has 512 values instead of 360 +sfcWind_19880501.nc : time dimension has 214 values instead of 216 +sfcWind_19880601.nc : ok +sfcWind_19880701.nc : ok +sfcWind_19880801.nc : latitude dimension has 256 values instead of 181 +sfcWind_19880801.nc : longitude dimension has 512 values instead of 360 +sfcWind_19880801.nc : time dimension has 212 values instead of 216 +sfcWind_19880901.nc : ok +sfcWind_19881001.nc : ok +sfcWind_19881101.nc : latitude dimension has 256 values instead of 181 +sfcWind_19881101.nc : longitude dimension has 512 values instead of 360 +sfcWind_19881101.nc : time dimension has 214 values instead of 216 +sfcWind_19881201.nc : ok +sfcWind_19890101.nc : ok +sfcWind_19890201.nc : latitude dimension has 256 values instead of 181 +sfcWind_19890201.nc : longitude dimension has 512 values instead of 360 +sfcWind_19890201.nc : there is no dimension called ensemble inside this file +sfcWind_19890201.nc : time dimension has 10812 values instead of 216 +sfcWind_19890301.nc : ok +sfcWind_19890401.nc : ok +sfcWind_19890501.nc : latitude dimension has 256 values instead of 181 +sfcWind_19890501.nc : longitude dimension has 512 values instead of 360 +sfcWind_19890501.nc : time dimension has 214 values instead of 216 +sfcWind_19890601.nc : ok +sfcWind_19890701.nc : ok +sfcWind_19890801.nc : latitude dimension has 256 values instead of 181 +sfcWind_19890801.nc : longitude dimension has 512 values instead of 360 +sfcWind_19890801.nc : time dimension has 720 values instead of 216 +sfcWind_19890901.nc : ok +sfcWind_19891001.nc : ok +sfcWind_19891101.nc : latitude dimension has 256 values instead of 181 +sfcWind_19891101.nc : longitude dimension has 512 values instead of 360 +sfcWind_19891101.nc : time dimension has 214 values instead of 216 +sfcWind_19891201.nc : ok +sfcWind_19900101.nc : ok +sfcWind_19900201.nc : latitude dimension has 256 values instead of 181 +sfcWind_19900201.nc : longitude dimension has 512 values instead of 360 +sfcWind_19900201.nc : time dimension has 848 values instead of 216 +sfcWind_19900301.nc : ok +sfcWind_19900401.nc : >>>>> Missing file <<<<< +sfcWind_19900501.nc : latitude dimension has 256 values instead of 181 +sfcWind_19900501.nc : longitude dimension has 512 values instead of 360 +sfcWind_19900501.nc : time dimension has 214 values instead of 216 +sfcWind_19900601.nc : ok +sfcWind_19900701.nc : >>>>> Missing file <<<<< +sfcWind_19900801.nc : latitude dimension has 256 values instead of 181 +sfcWind_19900801.nc : longitude dimension has 512 values instead of 360 +sfcWind_19900801.nc : time dimension has 212 values instead of 216 +sfcWind_19900901.nc : time dimension has 2 values instead of 216 +sfcWind_19901001.nc : time dimension has 120 values instead of 216 +sfcWind_19901101.nc : latitude dimension has 256 values instead of 181 +sfcWind_19901101.nc : longitude dimension has 512 values instead of 360 +sfcWind_19901101.nc : time dimension has 214 values instead of 216 +sfcWind_19901201.nc : ok +sfcWind_19910101.nc : ok +sfcWind_19910201.nc : latitude dimension has 256 values instead of 181 +sfcWind_19910201.nc : longitude dimension has 512 values instead of 360 +sfcWind_19910201.nc : there is no dimension called ensemble inside this file +sfcWind_19910201.nc : time dimension has 10812 values instead of 216 +sfcWind_19910301.nc : ok +sfcWind_19910401.nc : ok +sfcWind_19910501.nc : latitude dimension has 256 values instead of 181 +sfcWind_19910501.nc : longitude dimension has 512 values instead of 360 +sfcWind_19910501.nc : time dimension has 214 values instead of 216 +sfcWind_19910601.nc : ok +sfcWind_19910701.nc : ok +sfcWind_19910801.nc : latitude dimension has 256 values instead of 181 +sfcWind_19910801.nc : longitude dimension has 512 values instead of 360 +sfcWind_19910801.nc : time dimension has 213 values instead of 216 +sfcWind_19910901.nc : ok +sfcWind_19911001.nc : time dimension has 117 values instead of 216 +sfcWind_19911101.nc : latitude dimension has 256 values instead of 181 +sfcWind_19911101.nc : longitude dimension has 512 values instead of 360 +sfcWind_19911101.nc : time dimension has 214 values instead of 216 +sfcWind_19911201.nc : ok +sfcWind_19920101.nc : ok +sfcWind_19920201.nc : latitude dimension has 256 values instead of 181 +sfcWind_19920201.nc : longitude dimension has 512 values instead of 360 +sfcWind_19920201.nc : time dimension has 852 values instead of 216 +sfcWind_19920301.nc : ok +sfcWind_19920401.nc : ok +sfcWind_19920501.nc : latitude dimension has 256 values instead of 181 +sfcWind_19920501.nc : longitude dimension has 512 values instead of 360 +sfcWind_19920501.nc : time dimension has 214 values instead of 216 +sfcWind_19920601.nc : time dimension has 188 values instead of 216 +sfcWind_19920701.nc : ok +sfcWind_19920801.nc : latitude dimension has 256 values instead of 181 +sfcWind_19920801.nc : longitude dimension has 512 values instead of 360 +sfcWind_19920801.nc : time dimension has 212 values instead of 216 +sfcWind_19920901.nc : ok +sfcWind_19921001.nc : ok +sfcWind_19921101.nc : latitude dimension has 256 values instead of 181 +sfcWind_19921101.nc : longitude dimension has 512 values instead of 360 +sfcWind_19921101.nc : time dimension has 214 values instead of 216 +sfcWind_19921201.nc : ok +sfcWind_19930101.nc : ok +sfcWind_19930201.nc : latitude dimension has 256 values instead of 181 +sfcWind_19930201.nc : longitude dimension has 512 values instead of 360 +sfcWind_19930201.nc : there is no dimension called ensemble inside this file +sfcWind_19930201.nc : time dimension has 10812 values instead of 216 +sfcWind_19930301.nc : ok +sfcWind_19930401.nc : ok +sfcWind_19930501.nc : latitude dimension has 256 values instead of 181 +sfcWind_19930501.nc : longitude dimension has 512 values instead of 360 +sfcWind_19930501.nc : time dimension has 214 values instead of 216 +sfcWind_19930601.nc : there is no dimension called latitude inside this file +sfcWind_19930601.nc : there is no dimension called longitude inside this file +sfcWind_19930601.nc : there is no dimension called ensemble inside this file +sfcWind_19930601.nc : there is no dimension called time inside this file +sfcWind_19930701.nc : there is no dimension called ensemble inside this file +sfcWind_19930801.nc : latitude dimension has 256 values instead of 181 +sfcWind_19930801.nc : longitude dimension has 512 values instead of 360 +sfcWind_19930801.nc : time dimension has 212 values instead of 216 +sfcWind_19930901.nc : there is no dimension called ensemble inside this file +sfcWind_19931001.nc : there is no dimension called ensemble inside this file +sfcWind_19931101.nc : latitude dimension has 256 values instead of 181 +sfcWind_19931101.nc : longitude dimension has 512 values instead of 360 +sfcWind_19931101.nc : time dimension has 214 values instead of 216 +sfcWind_19931201.nc : time dimension has 29 values instead of 216 +sfcWind_19940101.nc : ok +sfcWind_19940201.nc : latitude dimension has 256 values instead of 181 +sfcWind_19940201.nc : longitude dimension has 512 values instead of 360 +sfcWind_19940201.nc : time dimension has 848 values instead of 216 +sfcWind_19940301.nc : time dimension has 101 values instead of 216 +sfcWind_19940401.nc : ok +sfcWind_19940501.nc : latitude dimension has 256 values instead of 181 +sfcWind_19940501.nc : longitude dimension has 512 values instead of 360 +sfcWind_19940501.nc : time dimension has 214 values instead of 216 +sfcWind_19940601.nc : ok +sfcWind_19940701.nc : ok +sfcWind_19940801.nc : latitude dimension has 256 values instead of 181 +sfcWind_19940801.nc : longitude dimension has 512 values instead of 360 +sfcWind_19940801.nc : time dimension has 212 values instead of 216 +sfcWind_19940901.nc : ok +sfcWind_19941001.nc : ok +sfcWind_19941101.nc : latitude dimension has 256 values instead of 181 +sfcWind_19941101.nc : longitude dimension has 512 values instead of 360 +sfcWind_19941101.nc : time dimension has 214 values instead of 216 +sfcWind_19941201.nc : ok +sfcWind_19950101.nc : time dimension has 75 values instead of 216 +sfcWind_19950201.nc : latitude dimension has 256 values instead of 181 +sfcWind_19950201.nc : longitude dimension has 512 values instead of 360 +sfcWind_19950201.nc : there is no dimension called ensemble inside this file +sfcWind_19950201.nc : time dimension has 10812 values instead of 216 +sfcWind_19950301.nc : ok +sfcWind_19950401.nc : time dimension has 187 values instead of 216 +sfcWind_19950501.nc : latitude dimension has 256 values instead of 181 +sfcWind_19950501.nc : longitude dimension has 512 values instead of 360 +sfcWind_19950501.nc : time dimension has 214 values instead of 216 +sfcWind_19950601.nc : ok +sfcWind_19950701.nc : ok +sfcWind_19950801.nc : latitude dimension has 256 values instead of 181 +sfcWind_19950801.nc : longitude dimension has 512 values instead of 360 +sfcWind_19950801.nc : time dimension has 213 values instead of 216 +sfcWind_19950901.nc : ok +sfcWind_19951001.nc : ok +sfcWind_19951101.nc : latitude dimension has 256 values instead of 181 +sfcWind_19951101.nc : longitude dimension has 512 values instead of 360 +sfcWind_19951101.nc : time dimension has 214 values instead of 216 +sfcWind_19951201.nc : ok +sfcWind_19960101.nc : ok +sfcWind_19960201.nc : latitude dimension has 256 values instead of 181 +sfcWind_19960201.nc : longitude dimension has 512 values instead of 360 +sfcWind_19960201.nc : time dimension has 852 values instead of 216 +sfcWind_19960301.nc : ok +sfcWind_19960401.nc : ok +sfcWind_19960501.nc : latitude dimension has 256 values instead of 181 +sfcWind_19960501.nc : longitude dimension has 512 values instead of 360 +sfcWind_19960501.nc : time dimension has 214 values instead of 216 +sfcWind_19960601.nc : ok +sfcWind_19960701.nc : ok +sfcWind_19960801.nc : latitude dimension has 256 values instead of 181 +sfcWind_19960801.nc : longitude dimension has 512 values instead of 360 +sfcWind_19960801.nc : time dimension has 212 values instead of 216 +sfcWind_19960901.nc : ok +sfcWind_19961001.nc : ok +sfcWind_19961101.nc : latitude dimension has 256 values instead of 181 +sfcWind_19961101.nc : longitude dimension has 512 values instead of 360 +sfcWind_19961101.nc : time dimension has 214 values instead of 216 +sfcWind_19961201.nc : ok +sfcWind_19970101.nc : ok +sfcWind_19970201.nc : latitude dimension has 256 values instead of 181 +sfcWind_19970201.nc : longitude dimension has 512 values instead of 360 +sfcWind_19970201.nc : there is no dimension called ensemble inside this file +sfcWind_19970201.nc : time dimension has 10812 values instead of 216 +sfcWind_19970301.nc : ok +sfcWind_19970401.nc : ok +sfcWind_19970501.nc : latitude dimension has 256 values instead of 181 +sfcWind_19970501.nc : longitude dimension has 512 values instead of 360 +sfcWind_19970501.nc : time dimension has 214 values instead of 216 +sfcWind_19970601.nc : ok +sfcWind_19970701.nc : ok +sfcWind_19970801.nc : latitude dimension has 256 values instead of 181 +sfcWind_19970801.nc : longitude dimension has 512 values instead of 360 +sfcWind_19970801.nc : time dimension has 407 values instead of 216 +sfcWind_19970901.nc : ok +sfcWind_19971001.nc : ok +sfcWind_19971101.nc : latitude dimension has 256 values instead of 181 +sfcWind_19971101.nc : longitude dimension has 512 values instead of 360 +sfcWind_19971101.nc : time dimension has 214 values instead of 216 +sfcWind_19971201.nc : ok +sfcWind_19980101.nc : ok +sfcWind_19980201.nc : latitude dimension has 256 values instead of 181 +sfcWind_19980201.nc : longitude dimension has 512 values instead of 360 +sfcWind_19980201.nc : time dimension has 848 values instead of 216 +sfcWind_19980301.nc : ok +sfcWind_19980401.nc : ok +sfcWind_19980501.nc : latitude dimension has 256 values instead of 181 +sfcWind_19980501.nc : longitude dimension has 512 values instead of 360 +sfcWind_19980501.nc : time dimension has 214 values instead of 216 +sfcWind_19980601.nc : ok +sfcWind_19980701.nc : ok +sfcWind_19980801.nc : latitude dimension has 256 values instead of 181 +sfcWind_19980801.nc : longitude dimension has 512 values instead of 360 +sfcWind_19980801.nc : time dimension has 212 values instead of 216 +sfcWind_19980901.nc : ok +sfcWind_19981001.nc : ok +sfcWind_19981101.nc : latitude dimension has 256 values instead of 181 +sfcWind_19981101.nc : longitude dimension has 512 values instead of 360 +sfcWind_19981101.nc : time dimension has 214 values instead of 216 +sfcWind_19981201.nc : ok +sfcWind_19990101.nc : ok +sfcWind_19990201.nc : latitude dimension has 256 values instead of 181 +sfcWind_19990201.nc : longitude dimension has 512 values instead of 360 +sfcWind_19990201.nc : there is no dimension called ensemble inside this file +sfcWind_19990201.nc : time dimension has 10812 values instead of 216 +sfcWind_19990301.nc : ok +sfcWind_19990401.nc : ok +sfcWind_19990501.nc : latitude dimension has 256 values instead of 181 +sfcWind_19990501.nc : longitude dimension has 512 values instead of 360 +sfcWind_19990501.nc : time dimension has 214 values instead of 216 +sfcWind_19990601.nc : ok +sfcWind_19990701.nc : ok +sfcWind_19990801.nc : latitude dimension has 256 values instead of 181 +sfcWind_19990801.nc : longitude dimension has 512 values instead of 360 +sfcWind_19990801.nc : time dimension has 213 values instead of 216 +sfcWind_19990901.nc : ok +sfcWind_19991001.nc : ok +sfcWind_19991101.nc : latitude dimension has 256 values instead of 181 +sfcWind_19991101.nc : longitude dimension has 512 values instead of 360 +sfcWind_19991101.nc : time dimension has 243 values instead of 216 +sfcWind_19991201.nc : ok +sfcWind_20000101.nc : ok +sfcWind_20000201.nc : latitude dimension has 256 values instead of 181 +sfcWind_20000201.nc : longitude dimension has 512 values instead of 360 +sfcWind_20000201.nc : there is no dimension called ensemble inside this file +sfcWind_20000201.nc : time dimension has 852 values instead of 216 +sfcWind_20000301.nc : time dimension has 55 values instead of 216 +sfcWind_20000401.nc : time dimension has 54 values instead of 216 +sfcWind_20000501.nc : latitude dimension has 256 values instead of 181 +sfcWind_20000501.nc : longitude dimension has 512 values instead of 360 +sfcWind_20000501.nc : time dimension has 214 values instead of 216 +sfcWind_20000601.nc : ok +sfcWind_20000701.nc : ok +sfcWind_20000801.nc : latitude dimension has 256 values instead of 181 +sfcWind_20000801.nc : longitude dimension has 512 values instead of 360 +sfcWind_20000801.nc : time dimension has 212 values instead of 216 +sfcWind_20000901.nc : ok +sfcWind_20001001.nc : ok +sfcWind_20001101.nc : latitude dimension has 256 values instead of 181 +sfcWind_20001101.nc : longitude dimension has 512 values instead of 360 +sfcWind_20001101.nc : time dimension has 214 values instead of 216 +sfcWind_20001201.nc : ok +sfcWind_20010101.nc : ok +sfcWind_20010201.nc : latitude dimension has 256 values instead of 181 +sfcWind_20010201.nc : longitude dimension has 512 values instead of 360 +sfcWind_20010201.nc : there is no dimension called ensemble inside this file +sfcWind_20010201.nc : time dimension has 10812 values instead of 216 +sfcWind_20010301.nc : ok +sfcWind_20010401.nc : ok +sfcWind_20010501.nc : latitude dimension has 256 values instead of 181 +sfcWind_20010501.nc : longitude dimension has 512 values instead of 360 +sfcWind_20010501.nc : time dimension has 214 values instead of 216 +sfcWind_20010601.nc : ok +sfcWind_20010701.nc : ok +sfcWind_20010801.nc : latitude dimension has 256 values instead of 181 +sfcWind_20010801.nc : longitude dimension has 512 values instead of 360 +sfcWind_20010801.nc : time dimension has 212 values instead of 216 +sfcWind_20010901.nc : ok +sfcWind_20011001.nc : ok +sfcWind_20011101.nc : latitude dimension has 256 values instead of 181 +sfcWind_20011101.nc : longitude dimension has 512 values instead of 360 +sfcWind_20011101.nc : time dimension has 214 values instead of 216 +sfcWind_20011201.nc : ok +sfcWind_20020101.nc : ok +sfcWind_20020201.nc : latitude dimension has 256 values instead of 181 +sfcWind_20020201.nc : longitude dimension has 512 values instead of 360 +sfcWind_20020201.nc : time dimension has 848 values instead of 216 +sfcWind_20020301.nc : ok +sfcWind_20020401.nc : ok +sfcWind_20020501.nc : latitude dimension has 256 values instead of 181 +sfcWind_20020501.nc : longitude dimension has 512 values instead of 360 +sfcWind_20020501.nc : time dimension has 214 values instead of 216 +sfcWind_20020601.nc : ok +sfcWind_20020701.nc : ok +sfcWind_20020801.nc : latitude dimension has 256 values instead of 181 +sfcWind_20020801.nc : longitude dimension has 512 values instead of 360 +sfcWind_20020801.nc : time dimension has 212 values instead of 216 +sfcWind_20020901.nc : ok +sfcWind_20021001.nc : ok +sfcWind_20021101.nc : latitude dimension has 256 values instead of 181 +sfcWind_20021101.nc : longitude dimension has 512 values instead of 360 +sfcWind_20021101.nc : time dimension has 214 values instead of 216 +sfcWind_20021201.nc : ok +sfcWind_20030101.nc : ok +sfcWind_20030201.nc : latitude dimension has 256 values instead of 181 +sfcWind_20030201.nc : longitude dimension has 512 values instead of 360 +sfcWind_20030201.nc : there is no dimension called ensemble inside this file +sfcWind_20030201.nc : time dimension has 10812 values instead of 216 +sfcWind_20030301.nc : ok +sfcWind_20030401.nc : ok +sfcWind_20030501.nc : latitude dimension has 256 values instead of 181 +sfcWind_20030501.nc : longitude dimension has 512 values instead of 360 +sfcWind_20030501.nc : time dimension has 214 values instead of 216 +sfcWind_20030601.nc : ok +sfcWind_20030701.nc : ok +sfcWind_20030801.nc : latitude dimension has 256 values instead of 181 +sfcWind_20030801.nc : longitude dimension has 512 values instead of 360 +sfcWind_20030801.nc : time dimension has 213 values instead of 216 +sfcWind_20030901.nc : ok +sfcWind_20031001.nc : ok +sfcWind_20031101.nc : latitude dimension has 256 values instead of 181 +sfcWind_20031101.nc : longitude dimension has 512 values instead of 360 +sfcWind_20031101.nc : time dimension has 214 values instead of 216 +sfcWind_20031201.nc : ok +sfcWind_20040101.nc : ok +sfcWind_20040201.nc : latitude dimension has 256 values instead of 181 +sfcWind_20040201.nc : longitude dimension has 512 values instead of 360 +sfcWind_20040201.nc : time dimension has 852 values instead of 216 +sfcWind_20040301.nc : ok +sfcWind_20040401.nc : ok +sfcWind_20040501.nc : latitude dimension has 256 values instead of 181 +sfcWind_20040501.nc : longitude dimension has 512 values instead of 360 +sfcWind_20040501.nc : time dimension has 214 values instead of 216 +sfcWind_20040601.nc : ok +sfcWind_20040701.nc : ok +sfcWind_20040801.nc : latitude dimension has 256 values instead of 181 +sfcWind_20040801.nc : longitude dimension has 512 values instead of 360 +sfcWind_20040801.nc : time dimension has 212 values instead of 216 +sfcWind_20040901.nc : ok +sfcWind_20041001.nc : ok +sfcWind_20041101.nc : latitude dimension has 256 values instead of 181 +sfcWind_20041101.nc : longitude dimension has 512 values instead of 360 +sfcWind_20041101.nc : time dimension has 214 values instead of 216 +sfcWind_20041201.nc : ok +sfcWind_20050101.nc : ok +sfcWind_20050201.nc : latitude dimension has 256 values instead of 181 +sfcWind_20050201.nc : longitude dimension has 512 values instead of 360 +sfcWind_20050201.nc : there is no dimension called ensemble inside this file +sfcWind_20050201.nc : time dimension has 10812 values instead of 216 +sfcWind_20050301.nc : ok +sfcWind_20050401.nc : ok +sfcWind_20050501.nc : latitude dimension has 256 values instead of 181 +sfcWind_20050501.nc : longitude dimension has 512 values instead of 360 +sfcWind_20050501.nc : time dimension has 214 values instead of 216 +sfcWind_20050601.nc : ok +sfcWind_20050701.nc : ok +sfcWind_20050801.nc : latitude dimension has 256 values instead of 181 +sfcWind_20050801.nc : longitude dimension has 512 values instead of 360 +sfcWind_20050801.nc : time dimension has 212 values instead of 216 +sfcWind_20050901.nc : ok +sfcWind_20051001.nc : ok +sfcWind_20051101.nc : latitude dimension has 256 values instead of 181 +sfcWind_20051101.nc : longitude dimension has 512 values instead of 360 +sfcWind_20051101.nc : time dimension has 214 values instead of 216 +sfcWind_20051201.nc : ok +sfcWind_20060101.nc : ok +sfcWind_20060201.nc : latitude dimension has 256 values instead of 181 +sfcWind_20060201.nc : longitude dimension has 512 values instead of 360 +sfcWind_20060201.nc : time dimension has 848 values instead of 216 +sfcWind_20060301.nc : ok +sfcWind_20060401.nc : ok +sfcWind_20060501.nc : latitude dimension has 256 values instead of 181 +sfcWind_20060501.nc : longitude dimension has 512 values instead of 360 +sfcWind_20060501.nc : time dimension has 214 values instead of 216 +sfcWind_20060601.nc : ok +sfcWind_20060701.nc : ok +sfcWind_20060801.nc : latitude dimension has 256 values instead of 181 +sfcWind_20060801.nc : longitude dimension has 512 values instead of 360 +sfcWind_20060801.nc : time dimension has 212 values instead of 216 +sfcWind_20060901.nc : ok +sfcWind_20061001.nc : ok +sfcWind_20061101.nc : latitude dimension has 256 values instead of 181 +sfcWind_20061101.nc : longitude dimension has 512 values instead of 360 +sfcWind_20061101.nc : time dimension has 214 values instead of 216 +sfcWind_20061201.nc : ok +sfcWind_20070101.nc : ok +sfcWind_20070201.nc : latitude dimension has 256 values instead of 181 +sfcWind_20070201.nc : longitude dimension has 512 values instead of 360 +sfcWind_20070201.nc : there is no dimension called ensemble inside this file +sfcWind_20070201.nc : time dimension has 10812 values instead of 216 +sfcWind_20070301.nc : ok +sfcWind_20070401.nc : ok +sfcWind_20070501.nc : latitude dimension has 256 values instead of 181 +sfcWind_20070501.nc : longitude dimension has 512 values instead of 360 +sfcWind_20070501.nc : time dimension has 214 values instead of 216 +sfcWind_20070601.nc : ok +sfcWind_20070701.nc : ok +sfcWind_20070801.nc : latitude dimension has 256 values instead of 181 +sfcWind_20070801.nc : longitude dimension has 512 values instead of 360 +sfcWind_20070801.nc : time dimension has 213 values instead of 216 +sfcWind_20070901.nc : ok +sfcWind_20071001.nc : ok +sfcWind_20071101.nc : latitude dimension has 256 values instead of 181 +sfcWind_20071101.nc : longitude dimension has 512 values instead of 360 +sfcWind_20071101.nc : time dimension has 214 values instead of 216 +sfcWind_20071201.nc : ok +sfcWind_20080101.nc : ok +sfcWind_20080201.nc : latitude dimension has 256 values instead of 181 +sfcWind_20080201.nc : longitude dimension has 512 values instead of 360 +sfcWind_20080201.nc : time dimension has 852 values instead of 216 +sfcWind_20080301.nc : time dimension has 2 values instead of 216 +sfcWind_20080401.nc : ok +sfcWind_20080501.nc : latitude dimension has 256 values instead of 181 +sfcWind_20080501.nc : longitude dimension has 512 values instead of 360 +sfcWind_20080501.nc : time dimension has 214 values instead of 216 +sfcWind_20080601.nc : ok +sfcWind_20080701.nc : ok +sfcWind_20080801.nc : latitude dimension has 256 values instead of 181 +sfcWind_20080801.nc : longitude dimension has 512 values instead of 360 +sfcWind_20080801.nc : time dimension has 212 values instead of 216 +sfcWind_20080901.nc : ok +sfcWind_20081001.nc : ok +sfcWind_20081101.nc : latitude dimension has 256 values instead of 181 +sfcWind_20081101.nc : longitude dimension has 512 values instead of 360 +sfcWind_20081101.nc : time dimension has 214 values instead of 216 +sfcWind_20081201.nc : ok +sfcWind_20090101.nc : ok +sfcWind_20090201.nc : latitude dimension has 256 values instead of 181 +sfcWind_20090201.nc : longitude dimension has 512 values instead of 360 +sfcWind_20090201.nc : there is no dimension called ensemble inside this file +sfcWind_20090201.nc : time dimension has 10812 values instead of 216 +sfcWind_20090301.nc : ok +sfcWind_20090401.nc : ok +sfcWind_20090501.nc : latitude dimension has 256 values instead of 181 +sfcWind_20090501.nc : longitude dimension has 512 values instead of 360 +sfcWind_20090501.nc : time dimension has 214 values instead of 216 +sfcWind_20090601.nc : ok +sfcWind_20090701.nc : ok +sfcWind_20090801.nc : latitude dimension has 256 values instead of 181 +sfcWind_20090801.nc : longitude dimension has 512 values instead of 360 +sfcWind_20090801.nc : time dimension has 212 values instead of 216 +sfcWind_20090901.nc : ok +sfcWind_20091001.nc : ok +sfcWind_20091101.nc : latitude dimension has 256 values instead of 181 +sfcWind_20091101.nc : longitude dimension has 512 values instead of 360 +sfcWind_20091101.nc : time dimension has 214 values instead of 216 +sfcWind_20091201.nc : ok +sfcWind_20100101.nc : ok +sfcWind_20100201.nc : latitude dimension has 256 values instead of 181 +sfcWind_20100201.nc : longitude dimension has 512 values instead of 360 +sfcWind_20100201.nc : time dimension has 848 values instead of 216 +sfcWind_20100301.nc : ok +sfcWind_20100401.nc : ok +sfcWind_20100501.nc : latitude dimension has 256 values instead of 181 +sfcWind_20100501.nc : longitude dimension has 512 values instead of 360 +sfcWind_20100501.nc : time dimension has 214 values instead of 216 +sfcWind_20100601.nc : ok +sfcWind_20100701.nc : ok +sfcWind_20100801.nc : latitude dimension has 256 values instead of 181 +sfcWind_20100801.nc : longitude dimension has 512 values instead of 360 +sfcWind_20100801.nc : time dimension has 212 values instead of 216 +sfcWind_20100901.nc : time dimension has 110 values instead of 216 +sfcWind_20101001.nc : ok +sfcWind_20101101.nc : latitude dimension has 256 values instead of 181 +sfcWind_20101101.nc : longitude dimension has 512 values instead of 360 +sfcWind_20101101.nc : time dimension has 214 values instead of 216 +sfcWind_20101201.nc : time dimension has 160 values instead of 216 +sfcWind_20110101.nc : ok +sfcWind_20110201.nc : latitude dimension has 256 values instead of 181 +sfcWind_20110201.nc : longitude dimension has 512 values instead of 360 +sfcWind_20110201.nc : there is no dimension called ensemble inside this file +sfcWind_20110201.nc : time dimension has 10812 values instead of 216 +sfcWind_20110301.nc : ok +sfcWind_20110401.nc : ok +sfcWind_20110501.nc : latitude dimension has 256 values instead of 181 +sfcWind_20110501.nc : longitude dimension has 512 values instead of 360 +sfcWind_20110501.nc : time dimension has 214 values instead of 216 +sfcWind_20110601.nc : ok +sfcWind_20110701.nc : ok +sfcWind_20110801.nc : latitude dimension has 256 values instead of 181 +sfcWind_20110801.nc : longitude dimension has 512 values instead of 360 +sfcWind_20110801.nc : time dimension has 213 values instead of 216 +sfcWind_20110901.nc : ok +sfcWind_20111001.nc : ok +sfcWind_20111101.nc : latitude dimension has 256 values instead of 181 +sfcWind_20111101.nc : longitude dimension has 512 values instead of 360 +sfcWind_20111101.nc : time dimension has 214 values instead of 216 +sfcWind_20111201.nc : ok +sfcWind_20120101.nc : ok +sfcWind_20120201.nc : latitude dimension has 256 values instead of 181 +sfcWind_20120201.nc : longitude dimension has 512 values instead of 360 +sfcWind_20120201.nc : time dimension has 852 values instead of 216 +sfcWind_20120301.nc : ok +sfcWind_20120401.nc : time dimension has 180 values instead of 216 +sfcWind_20120501.nc : latitude dimension has 256 values instead of 181 +sfcWind_20120501.nc : longitude dimension has 512 values instead of 360 +sfcWind_20120501.nc : time dimension has 214 values instead of 216 +sfcWind_20120601.nc : time dimension has 97 values instead of 216 +sfcWind_20120701.nc : ok +sfcWind_20120801.nc : latitude dimension has 256 values instead of 181 +sfcWind_20120801.nc : longitude dimension has 512 values instead of 360 +sfcWind_20120801.nc : time dimension has 212 values instead of 216 +sfcWind_20120901.nc : ok +sfcWind_20121001.nc : >>>>> Missing file <<<<< +sfcWind_20121101.nc : latitude dimension has 256 values instead of 181 +sfcWind_20121101.nc : longitude dimension has 512 values instead of 360 +sfcWind_20121101.nc : time dimension has 214 values instead of 216 +sfcWind_20121201.nc : >>>>> Missing file <<<<< +sfcWind_20130101.nc : >>>>> Missing file <<<<< +sfcWind_20130201.nc : latitude dimension has 256 values instead of 181 +sfcWind_20130201.nc : longitude dimension has 512 values instead of 360 +sfcWind_20130201.nc : there is no dimension called ensemble inside this file +sfcWind_20130201.nc : time dimension has 11016 values instead of 216 +sfcWind_20130301.nc : >>>>> Missing file <<<<< +sfcWind_20130401.nc : >>>>> Missing file <<<<< +sfcWind_20130501.nc : latitude dimension has 256 values instead of 181 +sfcWind_20130501.nc : longitude dimension has 512 values instead of 360 +sfcWind_20130501.nc : there is no dimension called ensemble inside this file +sfcWind_20130501.nc : time dimension has 11016 values instead of 216 +sfcWind_20130601.nc : >>>>> Missing file <<<<< +sfcWind_20130701.nc : >>>>> Missing file <<<<< +sfcWind_20130801.nc : latitude dimension has 256 values instead of 181 +sfcWind_20130801.nc : longitude dimension has 512 values instead of 360 +sfcWind_20130801.nc : time dimension has 212 values instead of 216 +sfcWind_20130901.nc : ok +sfcWind_20131001.nc : ok +sfcWind_20131101.nc : latitude dimension has 256 values instead of 181 +sfcWind_20131101.nc : longitude dimension has 512 values instead of 360 +sfcWind_20131101.nc : time dimension has 212 values instead of 216 +sfcWind_20131201.nc : ok +sfcWind_20140101.nc : >>>>> Missing file <<<<< +sfcWind_20140201.nc : latitude dimension has 256 values instead of 181 +sfcWind_20140201.nc : longitude dimension has 512 values instead of 360 +sfcWind_20140201.nc : time dimension has 861 values instead of 216 +sfcWind_20140301.nc : >>>>> Missing file <<<<< +sfcWind_20140401.nc : >>>>> Missing file <<<<< +sfcWind_20140501.nc : latitude dimension has 256 values instead of 181 +sfcWind_20140501.nc : longitude dimension has 512 values instead of 360 +sfcWind_20140501.nc : time dimension has 214 values instead of 216 +sfcWind_20140601.nc : >>>>> Missing file <<<<< +sfcWind_20140701.nc : >>>>> Missing file <<<<< +sfcWind_20140801.nc : ok +sfcWind_20140901.nc : ok +sfcWind_20141001.nc : ok +sfcWind_20141101.nc : latitude dimension has 256 values instead of 181 +sfcWind_20141101.nc : longitude dimension has 512 values instead of 360 +sfcWind_20141101.nc : time dimension has 212 values instead of 216 +sfcWind_20141201.nc : ok +sfcWind_20150101.nc : >>>>> Missing file <<<<< +sfcWind_20150201.nc : >>>>> Missing file <<<<< +sfcWind_20150301.nc : >>>>> Missing file <<<<< +sfcWind_20150401.nc : >>>>> Missing file <<<<< +sfcWind_20150501.nc : >>>>> Missing file <<<<< +sfcWind_20150601.nc : ok +sfcWind_20150701.nc : ok +sfcWind_20150801.nc : time dimension has 197 values instead of 216 +sfcWind_20150901.nc : >>>>> Missing file <<<<< +sfcWind_20151001.nc : >>>>> Missing file <<<<< +sfcWind_20151101.nc : latitude dimension has 256 values instead of 181 +sfcWind_20151101.nc : longitude dimension has 512 values instead of 360 +sfcWind_20151101.nc : time dimension has 236 values instead of 216 +sfcWind_20151201.nc : >>>>> Missing file <<<<< diff --git a/old/backup/bash/check_ECMWFS4_tas_daily.txt b/old/backup/bash/check_ECMWFS4_tas_daily.txt new file mode 100644 index 0000000000000000000000000000000000000000..24d458a576f63e7080a85c7a9f21fe671ad11093 --- /dev/null +++ b/old/backup/bash/check_ECMWFS4_tas_daily.txt @@ -0,0 +1,437 @@ +Checked variable: tas +Path: /esnas/exp/ecmwf/system4_m1/daily_mean/tas_f6h/ +Extension: .nc +Checked period: 1981-2015 +>>>>>>>>>>>>>>>>>>>>>>>> Expected elements <<<<<<<<<<<<<<<<<<<<<<<< +Latitude denomination: latitude +Number of latitude values: 181 +Longitude denomination: longitude +Number of longitude values: 360 +Ensemble denomination: ensemble +Number of ensemble values: 15 +Alternative number of ensemble values: 51 +Lead-time denonomination: time +Number of lead-times: 216 + >>>>>>>>>>>>>>>>>>>>>>>> Checking <<<<<<<<<<<<<<<<<<<<<<<< +tas_19810101.nc : time dimension has 153 values instead of 216 +tas_19810201.nc : ok +tas_19810301.nc : ok +tas_19810401.nc : ok +tas_19810501.nc : ok +tas_19810601.nc : ok +tas_19810701.nc : time dimension has 422 values instead of 216 +tas_19810801.nc : ok +tas_19810901.nc : ok +tas_19811001.nc : ok +tas_19811101.nc : ok +tas_19811201.nc : ok +tas_19820101.nc : ok +tas_19820201.nc : ok +tas_19820301.nc : ok +tas_19820401.nc : ok +tas_19820501.nc : ok +tas_19820601.nc : ok +tas_19820701.nc : time dimension has 537 values instead of 216 +tas_19820801.nc : ok +tas_19820901.nc : ok +tas_19821001.nc : ok +tas_19821101.nc : ok +tas_19821201.nc : ok +tas_19830101.nc : ok +tas_19830201.nc : ok +tas_19830301.nc : ok +tas_19830401.nc : time dimension has 10 values instead of 216 +tas_19830501.nc : ok +tas_19830601.nc : ok +tas_19830701.nc : ok +tas_19830801.nc : ok +tas_19830901.nc : time dimension has 316 values instead of 216 +tas_19831001.nc : ok +tas_19831101.nc : ok +tas_19831201.nc : ok +tas_19840101.nc : ok +tas_19840201.nc : ok +tas_19840301.nc : ok +tas_19840401.nc : ok +tas_19840501.nc : ok +tas_19840601.nc : latitude dimension has 256 values instead of 181 +tas_19840601.nc : longitude dimension has 512 values instead of 360 +tas_19840601.nc : time dimension has 861 values instead of 216 +tas_19840701.nc : ok +tas_19840801.nc : ok +tas_19840901.nc : time dimension has 824 values instead of 216 +tas_19841001.nc : ok +tas_19841101.nc : ok +tas_19841201.nc : ok +tas_19850101.nc : ok +tas_19850201.nc : ok +tas_19850301.nc : time dimension has 309 values instead of 216 +tas_19850401.nc : ok +tas_19850501.nc : ok +tas_19850601.nc : time dimension has 321 values instead of 216 +tas_19850701.nc : ok +tas_19850801.nc : ok +tas_19850901.nc : time dimension has 658 values instead of 216 +tas_19851001.nc : ok +tas_19851101.nc : ok +tas_19851201.nc : ok +tas_19860101.nc : ok +tas_19860201.nc : ok +tas_19860301.nc : time dimension has 815 values instead of 216 +tas_19860401.nc : ok +tas_19860501.nc : ok +tas_19860601.nc : ok +tas_19860701.nc : ok +tas_19860801.nc : ok +tas_19860901.nc : time dimension has 424 values instead of 216 +tas_19861001.nc : ok +tas_19861101.nc : ok +tas_19861201.nc : ok +tas_19870101.nc : ok +tas_19870201.nc : ok +tas_19870301.nc : time dimension has 817 values instead of 216 +tas_19870401.nc : ok +tas_19870501.nc : ok +tas_19870601.nc : ok +tas_19870701.nc : ok +tas_19870801.nc : ok +tas_19870901.nc : time dimension has 347 values instead of 216 +tas_19871001.nc : ok +tas_19871101.nc : ok +tas_19871201.nc : ok +tas_19880101.nc : ok +tas_19880201.nc : ok +tas_19880301.nc : time dimension has 362 values instead of 216 +tas_19880401.nc : ok +tas_19880501.nc : ok +tas_19880601.nc : ok +tas_19880701.nc : time dimension has 841 values instead of 216 +tas_19880801.nc : ok +tas_19880901.nc : time dimension has 482 values instead of 216 +tas_19881001.nc : ok +tas_19881101.nc : ok +tas_19881201.nc : ok +tas_19890101.nc : ok +tas_19890201.nc : ok +tas_19890301.nc : time dimension has 361 values instead of 216 +tas_19890401.nc : ok +tas_19890501.nc : ok +tas_19890601.nc : ok +tas_19890701.nc : time dimension has 512 values instead of 216 +tas_19890801.nc : ok +tas_19890901.nc : ok +tas_19891001.nc : ok +tas_19891101.nc : ok +tas_19891201.nc : ok +tas_19900101.nc : time dimension has 38 values instead of 216 +tas_19900201.nc : ok +tas_19900301.nc : time dimension has 260 values instead of 216 +tas_19900401.nc : ok +tas_19900501.nc : ok +tas_19900601.nc : ok +tas_19900701.nc : ok +tas_19900801.nc : ok +tas_19900901.nc : ok +tas_19901001.nc : ok +tas_19901101.nc : ok +tas_19901201.nc : ok +tas_19910101.nc : ok +tas_19910201.nc : ok +tas_19910301.nc : ok +tas_19910401.nc : ok +tas_19910501.nc : ok +tas_19910601.nc : ok +tas_19910701.nc : ok +tas_19910801.nc : ok +tas_19910901.nc : ok +tas_19911001.nc : ok +tas_19911101.nc : ok +tas_19911201.nc : ok +tas_19920101.nc : ok +tas_19920201.nc : ok +tas_19920301.nc : ok +tas_19920401.nc : ok +tas_19920501.nc : ok +tas_19920601.nc : ok +tas_19920701.nc : ok +tas_19920801.nc : ok +tas_19920901.nc : ok +tas_19921001.nc : ok +tas_19921101.nc : time dimension has 192 values instead of 216 +tas_19921201.nc : ok +tas_19930101.nc : ok +tas_19930201.nc : ok +tas_19930301.nc : ok +tas_19930401.nc : ok +tas_19930501.nc : ok +tas_19930601.nc : ok +tas_19930701.nc : ok +tas_19930801.nc : ok +tas_19930901.nc : ok +tas_19931001.nc : ok +tas_19931101.nc : ok +tas_19931201.nc : ok +tas_19940101.nc : ok +tas_19940201.nc : ok +tas_19940301.nc : ok +tas_19940401.nc : ok +tas_19940501.nc : ok +tas_19940601.nc : ok +tas_19940701.nc : ok +tas_19940801.nc : ok +tas_19940901.nc : ok +tas_19941001.nc : ok +tas_19941101.nc : ok +tas_19941201.nc : ok +tas_19950101.nc : ok +tas_19950201.nc : ok +tas_19950301.nc : ok +tas_19950401.nc : ok +tas_19950501.nc : ok +tas_19950601.nc : ok +tas_19950701.nc : ok +tas_19950801.nc : ok +tas_19950901.nc : ok +tas_19951001.nc : ok +tas_19951101.nc : ok +tas_19951201.nc : ok +tas_19960101.nc : ok +tas_19960201.nc : ok +tas_19960301.nc : ok +tas_19960401.nc : ok +tas_19960501.nc : ok +tas_19960601.nc : ok +tas_19960701.nc : ok +tas_19960801.nc : ok +tas_19960901.nc : ok +tas_19961001.nc : ok +tas_19961101.nc : ok +tas_19961201.nc : ok +tas_19970101.nc : ok +tas_19970201.nc : ok +tas_19970301.nc : ok +tas_19970401.nc : ok +tas_19970501.nc : ok +tas_19970601.nc : ok +tas_19970701.nc : ok +tas_19970801.nc : ok +tas_19970901.nc : ok +tas_19971001.nc : ok +tas_19971101.nc : ok +tas_19971201.nc : ok +tas_19980101.nc : ok +tas_19980201.nc : ok +tas_19980301.nc : time dimension has 9 values instead of 216 +tas_19980401.nc : ok +tas_19980501.nc : ok +tas_19980601.nc : ok +tas_19980701.nc : ok +tas_19980801.nc : ok +tas_19980901.nc : ok +tas_19981001.nc : ok +tas_19981101.nc : ok +tas_19981201.nc : ok +tas_19990101.nc : time dimension has 22 values instead of 216 +tas_19990201.nc : ok +tas_19990301.nc : time dimension has 16 values instead of 216 +tas_19990401.nc : ok +tas_19990501.nc : ok +tas_19990601.nc : ok +tas_19990701.nc : ok +tas_19990801.nc : ok +tas_19990901.nc : ok +tas_19991001.nc : ok +tas_19991101.nc : ok +tas_19991201.nc : ok +tas_20000101.nc : ok +tas_20000201.nc : ok +tas_20000301.nc : ok +tas_20000401.nc : ok +tas_20000501.nc : ok +tas_20000601.nc : ok +tas_20000701.nc : ok +tas_20000801.nc : ok +tas_20000901.nc : ok +tas_20001001.nc : ok +tas_20001101.nc : ok +tas_20001201.nc : ok +tas_20010101.nc : ok +tas_20010201.nc : ok +tas_20010301.nc : ok +tas_20010401.nc : ok +tas_20010501.nc : ok +tas_20010601.nc : ok +tas_20010701.nc : ok +tas_20010801.nc : ok +tas_20010901.nc : ok +tas_20011001.nc : ok +tas_20011101.nc : ok +tas_20011201.nc : ok +tas_20020101.nc : ok +tas_20020201.nc : time dimension has 207 values instead of 216 +tas_20020301.nc : ok +tas_20020401.nc : ok +tas_20020501.nc : ok +tas_20020601.nc : ok +tas_20020701.nc : ok +tas_20020801.nc : ok +tas_20020901.nc : ok +tas_20021001.nc : ok +tas_20021101.nc : ok +tas_20021201.nc : ok +tas_20030101.nc : ok +tas_20030201.nc : ok +tas_20030301.nc : ok +tas_20030401.nc : ok +tas_20030501.nc : ok +tas_20030601.nc : ok +tas_20030701.nc : ok +tas_20030801.nc : ok +tas_20030901.nc : ok +tas_20031001.nc : ok +tas_20031101.nc : ok +tas_20031201.nc : ok +tas_20040101.nc : ok +tas_20040201.nc : ok +tas_20040301.nc : ok +tas_20040401.nc : ok +tas_20040501.nc : ok +tas_20040601.nc : ok +tas_20040701.nc : ok +tas_20040801.nc : ok +tas_20040901.nc : ok +tas_20041001.nc : ok +tas_20041101.nc : ok +tas_20041201.nc : ok +tas_20050101.nc : ok +tas_20050201.nc : ok +tas_20050301.nc : ok +tas_20050401.nc : ok +tas_20050501.nc : ok +tas_20050601.nc : ok +tas_20050701.nc : ok +tas_20050801.nc : ok +tas_20050901.nc : ok +tas_20051001.nc : ok +tas_20051101.nc : ok +tas_20051201.nc : ok +tas_20060101.nc : ok +tas_20060201.nc : ok +tas_20060301.nc : ok +tas_20060401.nc : ok +tas_20060501.nc : time dimension has 186 values instead of 216 +tas_20060601.nc : ok +tas_20060701.nc : time dimension has 9 values instead of 216 +tas_20060801.nc : ok +tas_20060901.nc : ok +tas_20061001.nc : ok +tas_20061101.nc : ok +tas_20061201.nc : ok +tas_20070101.nc : ok +tas_20070201.nc : ok +tas_20070301.nc : ok +tas_20070401.nc : ok +tas_20070501.nc : ok +tas_20070601.nc : ok +tas_20070701.nc : ok +tas_20070801.nc : ok +tas_20070901.nc : ok +tas_20071001.nc : ok +tas_20071101.nc : ok +tas_20071201.nc : time dimension has 110 values instead of 216 +tas_20080101.nc : ok +tas_20080201.nc : ok +tas_20080301.nc : ok +tas_20080401.nc : ok +tas_20080501.nc : ok +tas_20080601.nc : ok +tas_20080701.nc : ok +tas_20080801.nc : ok +tas_20080901.nc : ok +tas_20081001.nc : ok +tas_20081101.nc : ok +tas_20081201.nc : ok +tas_20090101.nc : ok +tas_20090201.nc : ok +tas_20090301.nc : ok +tas_20090401.nc : ok +tas_20090501.nc : ok +tas_20090601.nc : ok +tas_20090701.nc : ok +tas_20090801.nc : ok +tas_20090901.nc : ok +tas_20091001.nc : ok +tas_20091101.nc : ok +tas_20091201.nc : ok +tas_20100101.nc : ok +tas_20100201.nc : ok +tas_20100301.nc : ok +tas_20100401.nc : ok +tas_20100501.nc : ok +tas_20100601.nc : ok +tas_20100701.nc : ok +tas_20100801.nc : ok +tas_20100901.nc : ok +tas_20101001.nc : ok +tas_20101101.nc : ok +tas_20101201.nc : ok +tas_20110101.nc : ok +tas_20110201.nc : ok +tas_20110301.nc : ok +tas_20110401.nc : ok +tas_20110501.nc : ok +tas_20110601.nc : ok +tas_20110701.nc : time dimension has 112 values instead of 216 +tas_20110801.nc : ok +tas_20110901.nc : time dimension has 118 values instead of 216 +tas_20111001.nc : ok +tas_20111101.nc : ok +tas_20111201.nc : ok +tas_20120101.nc : ok +tas_20120201.nc : ok +tas_20120301.nc : ok +tas_20120401.nc : ok +tas_20120501.nc : ok +tas_20120601.nc : ok +tas_20120701.nc : ok +tas_20120801.nc : ok +tas_20120901.nc : ok +tas_20121001.nc : ok +tas_20121101.nc : ok +tas_20121201.nc : ok +tas_20130101.nc : ok +tas_20130201.nc : ok +tas_20130301.nc : ok +tas_20130401.nc : ok +tas_20130501.nc : ok +tas_20130601.nc : ok +tas_20130701.nc : time dimension has 89 values instead of 216 +tas_20130801.nc : ok +tas_20130901.nc : ok +tas_20131001.nc : ok +tas_20131101.nc : ok +tas_20131201.nc : ok +tas_20140101.nc : ok +tas_20140201.nc : ok +tas_20140301.nc : ok +tas_20140401.nc : ok +tas_20140501.nc : ok +tas_20140601.nc : ok +tas_20140701.nc : ok +tas_20140801.nc : ok +tas_20140901.nc : ok +tas_20141001.nc : ok +tas_20141101.nc : ok +tas_20141201.nc : ok +tas_20150101.nc : ok +tas_20150201.nc : ok +tas_20150301.nc : ok +tas_20150401.nc : ok +tas_20150501.nc : ok +tas_20150601.nc : ok +tas_20150701.nc : time dimension has 811 values instead of 216 +tas_20150801.nc : ok +tas_20150901.nc : >>>>> Missing file <<<<< +tas_20151001.nc : >>>>> Missing file <<<<< +tas_20151101.nc : >>>>> Missing file <<<<< +tas_20151201.nc : >>>>> Missing file <<<<< diff --git a/old/backup/bash/checking_if_data_inside_is_the_same.txt b/old/backup/bash/checking_if_data_inside_is_the_same.txt new file mode 100644 index 0000000000000000000000000000000000000000..3278e08c41a35c60de357c446d47b524df193800 --- /dev/null +++ b/old/backup/bash/checking_if_data_inside_is_the_same.txt @@ -0,0 +1,32 @@ + +# check reanalysis: +month=01 +day=02 + +for year in {2002..2013} +do +ncks -O -h -d latitude,64 -d longitude,632 -d time,56,83 /esnas/exp/ECMWF/monthly/ensforhc/6hourly/sfcWind/2014${month}${day}00/sfcWind_${year}${month}${day}00.nc ~/my_point_sfcWind.nc; +ncwa -O -a ensemble ~/my_point_sfcWind.nc ~/my_point_ensemble_sfcWind.nc; +ncwa -O -a time ~/my_point_ensemble_sfcWind.nc ~/my_point_ensemble_all28_sfcWind_${year}010200.nc; +ncks -O -h -d time,0,,4 ~/my_point_ensemble_sfcWind.nc ~/my_point_ensemble_UTM_sfcWind.nc; +ncwa -O -a time ~/my_point_ensemble_UTM_sfcWind.nc ~/my_point_ensemble_UTM0_sfcWind_${year}010200.nc; +ncks -O -h -d time,1,,4 ~/my_point_ensemble_sfcWind.nc ~/my_point_ensemble_UTM_sfcWind.nc; +ncwa -O -a time ~/my_point_ensemble_UTM_sfcWind.nc ~/my_point_ensemble_UTM6_sfcWind_${year}010200.nc; +ncks -O -h -d time,2,,4 ~/my_point_ensemble_sfcWind.nc ~/my_point_ensemble_UTM_sfcWind.nc; +ncwa -O -a time ~/my_point_ensemble_UTM_sfcWind.nc ~/my_point_ensemble_UTM12_sfcWind_${year}010200.nc; +ncks -O -h -d time,3,,4 ~/my_point_ensemble_sfcWind.nc ~/my_point_ensemble_UTM_sfcWind.nc; +ncwa -O -a time ~/my_point_ensemble_UTM_sfcWind.nc ~/my_point_ensemble_UTM18_sfcWind_${year}010200.nc; +done + + + + +# check forecasts: +month=01 +for day in 02 09 16 23 30 +do +ncks -O -h -d latitude,64 -d longitude,632 -d time,56,83 /esnas/exp/ECMWF/monthly/ensfor/6hourly/sfcWind/sfcWind_2014${month}${day}00.nc ~/my_point_sfcWind.nc; +ncwa -O -a ensemble ~/my_point_sfcWind.nc ~/my_point_ensemble_sfcWind.nc; +ncwa -O -a time ~/my_point_ensemble_sfcWind.nc ~/my_point_ensemble_all28_sfcWind_2014${month}${day}00.nc; +done + diff --git a/old/backup/bash/checking_ncdata.sh b/old/backup/bash/checking_ncdata.sh new file mode 100755 index 0000000000000000000000000000000000000000..b539211e756e0dbec2989ea6c20d39185feeb7e8 --- /dev/null +++ b/old/backup/bash/checking_ncdata.sh @@ -0,0 +1,157 @@ +#!/bin/bash + +# Creation: 6/2016 +# Authors: Nicola Cortesi and Raul Marcos +# Aim: to do a quality control of the dimension variables inside all NetCDF data file in a directory. Notice that this script is not able to find if a file is corrupt. +# I/O: you only have to specify where are the files you want to check and the correct names and size of the dimensions inside. +# the output is a text file inside the directory where this script is run, whose name starts with "check_", with inside the results of the quality control. + +var=sfcWind #tas #sfcWind #psl # name of the variable to check +dat=ECMWFS4 # dataset name +freq=daily # time step (just for the output filename + +path=/esnas/exp/ecmwf/system4_m1/daily_mean/sfcWind_f6h/ +#path=/esnas/exp/ECMWF/seasonal/0001/s004/m001/6hourly/sfcWind/ # its path +#path=/esnas/exp/ecmwf/system4_m1/daily_mean/sfcWind_f6h/ +#/esnas/exp/Meteofrance/seasonal/0001/s004/m001/6hourly/sfcWind Meteofrance S4 +#/esnas/exp/ECMWF/seasonal/0001/s004/m001/6hourly/sfcWind ECMWF S4 + +suffix=01 # this suffix is usually present in all S4 netCDF file, right begore the file extension +ext=.nc # file extension + +yearStart=1981 # time period to check inside the files +yearEnd=2015 + +nameLatitude=latitude # name of the latitude dimension to check in the file +nLatitude=181 #128 #181 #256 # number of latitude values to check + +nameLongitude=longitude # name of the longitude dimension to check in the file +nLongitude=360 #256 #360 #512 # number of longitude values to check + +nameMembers=ensemble # name of the dimension with the model members to check in the file +nMembers=15 #51 #15 # number of members to check in the file +nMemberss=51 # if there is another number of ensemble members that it is ok (should be set the same as nMembers if not) + +nameLeadtimes=time # name of the dimension with the forecast time to check in the file +nLeadtimes=216 #861 # number of forecast times to check in the file (215 complete days * 4 (6hourly) + 1 (for the last midnight) + +printf "\t\n" > ktemp +printf "Checked variable: \t $var\n" >> ktemp +printf "Path: \t $path\n" >> ktemp +printf "Extension: \t $ext\n" >> ktemp +printf "Checked period: \t $yearStart-$yearEnd\n" >> ktemp +printf "\t\n" >> ktemp +printf ">>>>>>>>>>>>>>>>>>>>>>>> Expected elements <<<<<<<<<<<<<<<<<<<<<<<<\n" >> ktemp +printf "\t\n" >> ktemp +printf "Latitude denomination: \t $nameLatitude\n" >> ktemp +printf "Number of latitude values: \t $nLatitude\n" >> ktemp +printf "Longitude denomination: \t $nameLongitude\n" >> ktemp +printf "Number of longitude values:\t $nLongitude\n" >> ktemp +printf "Ensemble denomination: \t $nameMembers\n" >> ktemp +printf "Number of ensemble values: \t $nMembers\n" >> ktemp +if [ $nMembers != $nMemberss ]; then +printf "Alternative number of ensemble values: \t $nMemberss\n" >> ktemp +fi +printf "Lead-time denonomination: \t $nameLeadtimes\n" >> ktemp +printf "Number of lead-times: \t $nLeadtimes\n" >> ktemp + +printf "\n\n >>>>>>>>>>>>>>>>>>>>>>>> Checking <<<<<<<<<<<<<<<<<<<<<<<< \n\n" | tee -a ktemp + +outputfile=./check_${dat}_${var}_${freq}.txt + +# function that detect the size of a dimension variable in a netCDF file: +# +# usage: get_size myNetCDF.nc myDimensionName +# +function get_size { + ncks -m ${1} | grep -E -i ": ${2}, size =" | cut -d ' ' -f 7 | uniq +} + +# check file existence: +for ((i=$yearStart;i<=$yearEnd;i++)) +do +for j in 01 02 03 04 05 06 07 08 09 10 11 12 +do + ok1=0; ok2=0; ok3=0; ok4=0 ; ok5=0 + + # echo $path${var}_$i$j$suffix$ext > ktemp + if [ ! -f $path${var}_$i$j$suffix$ext ] + then + echo ${var}_$i$j$suffix$ext ": >>>>> Missing file <<<<<" | tee -a ktemp + else + nMemb=`get_size $path${var}_$i$j$suffix$ext $nameMembers` + nLead=`get_size $path${var}_$i$j$suffix$ext $nameLeadtimes` + nLat=`get_size $path${var}_$i$j$suffix$ext $nameLatitude` + nLon=`get_size $path${var}_$i$j$suffix$ext $nameLongitude` + + if [ -z $nLat ]; then # check if nLat is empty + echo ${var}_$i$j$suffix$ext ": there is no dimension called " $nameLatitude " inside this file " | tee -a ktemp + elif [ ${nLat//[[:blank:]]/} -ne $nLatitude ]; then + echo ${var}_$i$j$suffix$ext ": "$nameLatitude" dimension has " $nLat " values instead of " $nLatitude | tee -a ktemp + else + ok1=1 + fi + + if [ -z $nLon ]; then # check if nLon is empty + echo ${var}_$i$j$suffix$ext ": there is no dimension called " $nameLongitude " inside this file " | tee -a ktemp + elif [ ${nLon//[[:blank:]]/} -ne $nLongitude ]; then + echo ${var}_$i$j$suffix$ext ": "$nameLongitude "dimension has " $nLon " values instead of " $nLongitude | tee -a ktemp + else + ok2=1 + fi + + if [ -z $nMemb ]; then # check if nMemb is empty + echo ${var}_$i$j$suffix$ext ": there is no dimension called " $nameMembers " inside this file " | tee -a ktemp + elif [ ${nMemb//[[:blank:]]/} -ne $nMembers -a ${nMemb//[[:blank:]]/} -ne "$nMemberss" ]; then + echo ${var}_$i$j$suffix$ext ": "$nameMembers "dimension has " $nMemb " values instead of " $nMembers | tee -a ktemp + else + ok3=1 + fi + + if [ -z $nLead ]; then + echo ${var}_$i$j$suffix$ext ": there is no dimension called " $nameLeadtimes " inside this file " | tee -a ktemp + elif [ ${nLead//[[:blank:]]/} -ne $nLeadtimes ]; then + echo ${var}_$i$j$suffix$ext ": "$nameLeadtimes " dimension has " $nLead " values instead of " $nLeadtimes | tee -a ktemp + else + ok4=1 + fi + + + if [ $ok1 -eq 1 ] && [ $ok2 -eq 1 ] && [ $ok3 -eq 1 ] && [ $ok4 -eq 1 ]; then + echo ${var}_$i$j$suffix$ext ": ok" | tee -a ktemp + fi + fi +done +done + +column -t -s $'\t' ktemp > $outputfile +rm ktemp + +### other common changes to netCDF that can be useful: + +# rename dimensions/variables in all netCDF in the directory where the command is executed: +#for file in *; do ncrename -d .number,ensemble -d .lev,ensemble -v .number,realization -v .msl,psl $file; done +#for file in *; do ncrename -d .number,ensemble -d .lev,ensemble -v .number,realization -v .t2m,tas $file; done +#for file in *; do ncrename -d .reftime,time -d .sfc,ensemble -v .reftime,time -v .sfc,ensemble $file; done +#for file in *; do ncrename -d .lat,latitude -d .lon,longitude -v .lat,latitude -v .lon,longitude $file; done + +# # convert a 6-hourly file to a daily one: +# for file in *; do cdo daymean $file /esnas/exp/ecmwf/system4_m1/daily_mean/psl_f6h/$file; done + +# convert only February 6-hourly files to daily and update its variable names: +#for year in {1981..2015}; do cdo daymean sfcWind_${year}0201.nc /esnas/exp/ecmwf/system4_m1/daily_mean/sfcWind_f6h/sfcWind_${year}0201.nc; ncrename -d .lev,ensemble -v .lev,ensemble /esnas/exp/ecmwf/system4_m1/daily_mean/sfcWind_f6h/sfcWind_${year}0201.nc; done + +# cdo showdate + +# PA command to convert to both daily and monthly netCDF: +# convert a .nc file of ECMWF S4 after downloading: +#file=_grib2netcdf-atls01-95e2cf....nc ; ncrename -v .msl,psl -d .number,ensemble -v .number,realization $file ; date=$(cdo showdate $file | cut -f3 -d" " | sed -e s/-//g) ; mv $file /esnas/exp/ecmwf/system4_m1/6hourly/psl/psl_${date}.nc ; cdo daymean /esnas/exp/ecmwf/system4_m1/6hourly/psl/psl_${date}.nc /esnas/exp/ecmwf/system4_m1/daily_mean/psl_f6h/psl_${date}.nc ; cdo monmean /esnas/exp/ecmwf/system4_m1/daily_mean/psl_f6h/psl_${date}.nc /esnas/exp/ecmwf/system4_m1/monthly_mean/psl_f6h/psl_${date}.nc + + + +#file=_grib2netcdf-atls01-95e2cf....nc +#ncrename -v .msl,psl -d .number,ensemble -v .number,realization $file +#date=$(cdo showdate $file | cut -f3 -d" " | sed -e s/-//g) +#mv $file /esnas/exp/ecmwf/system4_m1/6hourly/psl/psl_${date}.nc +#cdo daymean /esnas/exp/ecmwf/system4_m1/6hourly/psl/psl_${date}.nc /esnas/exp/ecmwf/system4_m1/daily_mean/psl_f6h/psl_${date}.nc +#cdo monmean /esnas/exp/ecmwf/system4_m1/daily_mean/psl_f6h/psl_${date}.nc /esnas/exp/ecmwf/system4_m1/monthly_mean/psl_f6h/psl_${date}.nc diff --git a/old/backup/bash/checking_ncdata.sh~ b/old/backup/bash/checking_ncdata.sh~ new file mode 100755 index 0000000000000000000000000000000000000000..b539211e756e0dbec2989ea6c20d39185feeb7e8 --- /dev/null +++ b/old/backup/bash/checking_ncdata.sh~ @@ -0,0 +1,157 @@ +#!/bin/bash + +# Creation: 6/2016 +# Authors: Nicola Cortesi and Raul Marcos +# Aim: to do a quality control of the dimension variables inside all NetCDF data file in a directory. Notice that this script is not able to find if a file is corrupt. +# I/O: you only have to specify where are the files you want to check and the correct names and size of the dimensions inside. +# the output is a text file inside the directory where this script is run, whose name starts with "check_", with inside the results of the quality control. + +var=sfcWind #tas #sfcWind #psl # name of the variable to check +dat=ECMWFS4 # dataset name +freq=daily # time step (just for the output filename + +path=/esnas/exp/ecmwf/system4_m1/daily_mean/sfcWind_f6h/ +#path=/esnas/exp/ECMWF/seasonal/0001/s004/m001/6hourly/sfcWind/ # its path +#path=/esnas/exp/ecmwf/system4_m1/daily_mean/sfcWind_f6h/ +#/esnas/exp/Meteofrance/seasonal/0001/s004/m001/6hourly/sfcWind Meteofrance S4 +#/esnas/exp/ECMWF/seasonal/0001/s004/m001/6hourly/sfcWind ECMWF S4 + +suffix=01 # this suffix is usually present in all S4 netCDF file, right begore the file extension +ext=.nc # file extension + +yearStart=1981 # time period to check inside the files +yearEnd=2015 + +nameLatitude=latitude # name of the latitude dimension to check in the file +nLatitude=181 #128 #181 #256 # number of latitude values to check + +nameLongitude=longitude # name of the longitude dimension to check in the file +nLongitude=360 #256 #360 #512 # number of longitude values to check + +nameMembers=ensemble # name of the dimension with the model members to check in the file +nMembers=15 #51 #15 # number of members to check in the file +nMemberss=51 # if there is another number of ensemble members that it is ok (should be set the same as nMembers if not) + +nameLeadtimes=time # name of the dimension with the forecast time to check in the file +nLeadtimes=216 #861 # number of forecast times to check in the file (215 complete days * 4 (6hourly) + 1 (for the last midnight) + +printf "\t\n" > ktemp +printf "Checked variable: \t $var\n" >> ktemp +printf "Path: \t $path\n" >> ktemp +printf "Extension: \t $ext\n" >> ktemp +printf "Checked period: \t $yearStart-$yearEnd\n" >> ktemp +printf "\t\n" >> ktemp +printf ">>>>>>>>>>>>>>>>>>>>>>>> Expected elements <<<<<<<<<<<<<<<<<<<<<<<<\n" >> ktemp +printf "\t\n" >> ktemp +printf "Latitude denomination: \t $nameLatitude\n" >> ktemp +printf "Number of latitude values: \t $nLatitude\n" >> ktemp +printf "Longitude denomination: \t $nameLongitude\n" >> ktemp +printf "Number of longitude values:\t $nLongitude\n" >> ktemp +printf "Ensemble denomination: \t $nameMembers\n" >> ktemp +printf "Number of ensemble values: \t $nMembers\n" >> ktemp +if [ $nMembers != $nMemberss ]; then +printf "Alternative number of ensemble values: \t $nMemberss\n" >> ktemp +fi +printf "Lead-time denonomination: \t $nameLeadtimes\n" >> ktemp +printf "Number of lead-times: \t $nLeadtimes\n" >> ktemp + +printf "\n\n >>>>>>>>>>>>>>>>>>>>>>>> Checking <<<<<<<<<<<<<<<<<<<<<<<< \n\n" | tee -a ktemp + +outputfile=./check_${dat}_${var}_${freq}.txt + +# function that detect the size of a dimension variable in a netCDF file: +# +# usage: get_size myNetCDF.nc myDimensionName +# +function get_size { + ncks -m ${1} | grep -E -i ": ${2}, size =" | cut -d ' ' -f 7 | uniq +} + +# check file existence: +for ((i=$yearStart;i<=$yearEnd;i++)) +do +for j in 01 02 03 04 05 06 07 08 09 10 11 12 +do + ok1=0; ok2=0; ok3=0; ok4=0 ; ok5=0 + + # echo $path${var}_$i$j$suffix$ext > ktemp + if [ ! -f $path${var}_$i$j$suffix$ext ] + then + echo ${var}_$i$j$suffix$ext ": >>>>> Missing file <<<<<" | tee -a ktemp + else + nMemb=`get_size $path${var}_$i$j$suffix$ext $nameMembers` + nLead=`get_size $path${var}_$i$j$suffix$ext $nameLeadtimes` + nLat=`get_size $path${var}_$i$j$suffix$ext $nameLatitude` + nLon=`get_size $path${var}_$i$j$suffix$ext $nameLongitude` + + if [ -z $nLat ]; then # check if nLat is empty + echo ${var}_$i$j$suffix$ext ": there is no dimension called " $nameLatitude " inside this file " | tee -a ktemp + elif [ ${nLat//[[:blank:]]/} -ne $nLatitude ]; then + echo ${var}_$i$j$suffix$ext ": "$nameLatitude" dimension has " $nLat " values instead of " $nLatitude | tee -a ktemp + else + ok1=1 + fi + + if [ -z $nLon ]; then # check if nLon is empty + echo ${var}_$i$j$suffix$ext ": there is no dimension called " $nameLongitude " inside this file " | tee -a ktemp + elif [ ${nLon//[[:blank:]]/} -ne $nLongitude ]; then + echo ${var}_$i$j$suffix$ext ": "$nameLongitude "dimension has " $nLon " values instead of " $nLongitude | tee -a ktemp + else + ok2=1 + fi + + if [ -z $nMemb ]; then # check if nMemb is empty + echo ${var}_$i$j$suffix$ext ": there is no dimension called " $nameMembers " inside this file " | tee -a ktemp + elif [ ${nMemb//[[:blank:]]/} -ne $nMembers -a ${nMemb//[[:blank:]]/} -ne "$nMemberss" ]; then + echo ${var}_$i$j$suffix$ext ": "$nameMembers "dimension has " $nMemb " values instead of " $nMembers | tee -a ktemp + else + ok3=1 + fi + + if [ -z $nLead ]; then + echo ${var}_$i$j$suffix$ext ": there is no dimension called " $nameLeadtimes " inside this file " | tee -a ktemp + elif [ ${nLead//[[:blank:]]/} -ne $nLeadtimes ]; then + echo ${var}_$i$j$suffix$ext ": "$nameLeadtimes " dimension has " $nLead " values instead of " $nLeadtimes | tee -a ktemp + else + ok4=1 + fi + + + if [ $ok1 -eq 1 ] && [ $ok2 -eq 1 ] && [ $ok3 -eq 1 ] && [ $ok4 -eq 1 ]; then + echo ${var}_$i$j$suffix$ext ": ok" | tee -a ktemp + fi + fi +done +done + +column -t -s $'\t' ktemp > $outputfile +rm ktemp + +### other common changes to netCDF that can be useful: + +# rename dimensions/variables in all netCDF in the directory where the command is executed: +#for file in *; do ncrename -d .number,ensemble -d .lev,ensemble -v .number,realization -v .msl,psl $file; done +#for file in *; do ncrename -d .number,ensemble -d .lev,ensemble -v .number,realization -v .t2m,tas $file; done +#for file in *; do ncrename -d .reftime,time -d .sfc,ensemble -v .reftime,time -v .sfc,ensemble $file; done +#for file in *; do ncrename -d .lat,latitude -d .lon,longitude -v .lat,latitude -v .lon,longitude $file; done + +# # convert a 6-hourly file to a daily one: +# for file in *; do cdo daymean $file /esnas/exp/ecmwf/system4_m1/daily_mean/psl_f6h/$file; done + +# convert only February 6-hourly files to daily and update its variable names: +#for year in {1981..2015}; do cdo daymean sfcWind_${year}0201.nc /esnas/exp/ecmwf/system4_m1/daily_mean/sfcWind_f6h/sfcWind_${year}0201.nc; ncrename -d .lev,ensemble -v .lev,ensemble /esnas/exp/ecmwf/system4_m1/daily_mean/sfcWind_f6h/sfcWind_${year}0201.nc; done + +# cdo showdate + +# PA command to convert to both daily and monthly netCDF: +# convert a .nc file of ECMWF S4 after downloading: +#file=_grib2netcdf-atls01-95e2cf....nc ; ncrename -v .msl,psl -d .number,ensemble -v .number,realization $file ; date=$(cdo showdate $file | cut -f3 -d" " | sed -e s/-//g) ; mv $file /esnas/exp/ecmwf/system4_m1/6hourly/psl/psl_${date}.nc ; cdo daymean /esnas/exp/ecmwf/system4_m1/6hourly/psl/psl_${date}.nc /esnas/exp/ecmwf/system4_m1/daily_mean/psl_f6h/psl_${date}.nc ; cdo monmean /esnas/exp/ecmwf/system4_m1/daily_mean/psl_f6h/psl_${date}.nc /esnas/exp/ecmwf/system4_m1/monthly_mean/psl_f6h/psl_${date}.nc + + + +#file=_grib2netcdf-atls01-95e2cf....nc +#ncrename -v .msl,psl -d .number,ensemble -v .number,realization $file +#date=$(cdo showdate $file | cut -f3 -d" " | sed -e s/-//g) +#mv $file /esnas/exp/ecmwf/system4_m1/6hourly/psl/psl_${date}.nc +#cdo daymean /esnas/exp/ecmwf/system4_m1/6hourly/psl/psl_${date}.nc /esnas/exp/ecmwf/system4_m1/daily_mean/psl_f6h/psl_${date}.nc +#cdo monmean /esnas/exp/ecmwf/system4_m1/daily_mean/psl_f6h/psl_${date}.nc /esnas/exp/ecmwf/system4_m1/monthly_mean/psl_f6h/psl_${date}.nc diff --git a/old/backup/bash/copy_to_dropbox.sh b/old/backup/bash/copy_to_dropbox.sh new file mode 100644 index 0000000000000000000000000000000000000000..67ac486489aa731cd44b3c693f545abf4ccc3ff4 --- /dev/null +++ b/old/backup/bash/copy_to_dropbox.sh @@ -0,0 +1,55 @@ + +for lat in {30..72} +do + cp WTs_ERAintDailyHighRes_1980-2015_lat_$lat.*_lon_350.* /home/Earth/ncortesi/Dropbox/data + cp WTs_ERAintDailyHighRes_1980-2015_lat_$lat.*_lon_351.* /home/Earth/ncortesi/Dropbox/data + cp WTs_ERAintDailyHighRes_1980-2015_lat_$lat.*_lon_352.* /home/Earth/ncortesi/Dropbox/data + cp WTs_ERAintDailyHighRes_1980-2015_lat_$lat.*_lon_353.* /home/Earth/ncortesi/Dropbox/data + cp WTs_ERAintDailyHighRes_1980-2015_lat_$lat.*_lon_354.* /home/Earth/ncortesi/Dropbox/data + cp WTs_ERAintDailyHighRes_1980-2015_lat_$lat.*_lon_355.* /home/Earth/ncortesi/Dropbox/data + cp WTs_ERAintDailyHighRes_1980-2015_lat_$lat.*_lon_356.* /home/Earth/ncortesi/Dropbox/data + cp WTs_ERAintDailyHighRes_1980-2015_lat_$lat.*_lon_357.* /home/Earth/ncortesi/Dropbox/data + cp WTs_ERAintDailyHighRes_1980-2015_lat_$lat.*_lon_358.* /home/Earth/ncortesi/Dropbox/data + cp WTs_ERAintDailyHighRes_1980-2015_lat_$lat.*_lon_359.* /home/Earth/ncortesi/Dropbox/data + cp WTs_ERAintDailyHighRes_1980-2015_lat_$lat.*_lon_0.* /home/Earth/ncortesi/Dropbox/data + cp WTs_ERAintDailyHighRes_1980-2015_lat_$lat.*_lon_1.* /home/Earth/ncortesi/Dropbox/data + cp WTs_ERAintDailyHighRes_1980-2015_lat_$lat.*_lon_2.* /home/Earth/ncortesi/Dropbox/data + cp WTs_ERAintDailyHighRes_1980-2015_lat_$lat.*_lon_3.* /home/Earth/ncortesi/Dropbox/data + cp WTs_ERAintDailyHighRes_1980-2015_lat_$lat.*_lon_4.* /home/Earth/ncortesi/Dropbox/data + cp WTs_ERAintDailyHighRes_1980-2015_lat_$lat.*_lon_5.* /home/Earth/ncortesi/Dropbox/data + cp WTs_ERAintDailyHighRes_1980-2015_lat_$lat.*_lon_6.* /home/Earth/ncortesi/Dropbox/data + cp WTs_ERAintDailyHighRes_1980-2015_lat_$lat.*_lon_7.* /home/Earth/ncortesi/Dropbox/data + cp WTs_ERAintDailyHighRes_1980-2015_lat_$lat.*_lon_8.* /home/Earth/ncortesi/Dropbox/data + cp WTs_ERAintDailyHighRes_1980-2015_lat_$lat.*_lon_9.* /home/Earth/ncortesi/Dropbox/data + cp WTs_ERAintDailyHighRes_1980-2015_lat_$lat.*_lon_10.* /home/Earth/ncortesi/Dropbox/data + cp WTs_ERAintDailyHighRes_1980-2015_lat_$lat.*_lon_11.* /home/Earth/ncortesi/Dropbox/data + cp WTs_ERAintDailyHighRes_1980-2015_lat_$lat.*_lon_12.* /home/Earth/ncortesi/Dropbox/data + cp WTs_ERAintDailyHighRes_1980-2015_lat_$lat.*_lon_13.* /home/Earth/ncortesi/Dropbox/data + cp WTs_ERAintDailyHighRes_1980-2015_lat_$lat.*_lon_14.* /home/Earth/ncortesi/Dropbox/data + cp WTs_ERAintDailyHighRes_1980-2015_lat_$lat.*_lon_15.* /home/Earth/ncortesi/Dropbox/data + cp WTs_ERAintDailyHighRes_1980-2015_lat_$lat.*_lon_16.* /home/Earth/ncortesi/Dropbox/data + cp WTs_ERAintDailyHighRes_1980-2015_lat_$lat.*_lon_17.* /home/Earth/ncortesi/Dropbox/data + cp WTs_ERAintDailyHighRes_1980-2015_lat_$lat.*_lon_18.* /home/Earth/ncortesi/Dropbox/data + cp WTs_ERAintDailyHighRes_1980-2015_lat_$lat.*_lon_19.* /home/Earth/ncortesi/Dropbox/data + cp WTs_ERAintDailyHighRes_1980-2015_lat_$lat.*_lon_20.* /home/Earth/ncortesi/Dropbox/data + cp WTs_ERAintDailyHighRes_1980-2015_lat_$lat.*_lon_21.* /home/Earth/ncortesi/Dropbox/data + cp WTs_ERAintDailyHighRes_1980-2015_lat_$lat.*_lon_22.* /home/Earth/ncortesi/Dropbox/data + cp WTs_ERAintDailyHighRes_1980-2015_lat_$lat.*_lon_23.* /home/Earth/ncortesi/Dropbox/data + cp WTs_ERAintDailyHighRes_1980-2015_lat_$lat.*_lon_24.* /home/Earth/ncortesi/Dropbox/data + cp WTs_ERAintDailyHighRes_1980-2015_lat_$lat.*_lon_25.* /home/Earth/ncortesi/Dropbox/data + cp WTs_ERAintDailyHighRes_1980-2015_lat_$lat.*_lon_26.* /home/Earth/ncortesi/Dropbox/data + cp WTs_ERAintDailyHighRes_1980-2015_lat_$lat.*_lon_27.* /home/Earth/ncortesi/Dropbox/data + cp WTs_ERAintDailyHighRes_1980-2015_lat_$lat.*_lon_28.* /home/Earth/ncortesi/Dropbox/data + cp WTs_ERAintDailyHighRes_1980-2015_lat_$lat.*_lon_29.* /home/Earth/ncortesi/Dropbox/data + cp WTs_ERAintDailyHighRes_1980-2015_lat_$lat.*_lon_30.* /home/Earth/ncortesi/Dropbox/data + cp WTs_ERAintDailyHighRes_1980-2015_lat_$lat.*_lon_31.* /home/Earth/ncortesi/Dropbox/data + cp WTs_ERAintDailyHighRes_1980-2015_lat_$lat.*_lon_32.* /home/Earth/ncortesi/Dropbox/data + cp WTs_ERAintDailyHighRes_1980-2015_lat_$lat.*_lon_33.* /home/Earth/ncortesi/Dropbox/data + cp WTs_ERAintDailyHighRes_1980-2015_lat_$lat.*_lon_34.* /home/Earth/ncortesi/Dropbox/data + cp WTs_ERAintDailyHighRes_1980-2015_lat_$lat.*_lon_35.* /home/Earth/ncortesi/Dropbox/data + cp WTs_ERAintDailyHighRes_1980-2015_lat_$lat.*_lon_36.* /home/Earth/ncortesi/Dropbox/data + cp WTs_ERAintDailyHighRes_1980-2015_lat_$lat.*_lon_37.* /home/Earth/ncortesi/Dropbox/data + cp WTs_ERAintDailyHighRes_1980-2015_lat_$lat.*_lon_38.* /home/Earth/ncortesi/Dropbox/data + cp WTs_ERAintDailyHighRes_1980-2015_lat_$lat.*_lon_39.* /home/Earth/ncortesi/Dropbox/data + cp WTs_ERAintDailyHighRes_1980-2015_lat_$lat.*_lon_40.* /home/Earth/ncortesi/Dropbox/data +done \ No newline at end of file diff --git a/old/backup/bash/fix_ncdata.sh b/old/backup/bash/fix_ncdata.sh new file mode 100644 index 0000000000000000000000000000000000000000..7434ea6aa6cf1b74b1882e861e72a940ee2ee966 --- /dev/null +++ b/old/backup/bash/fix_ncdata.sh @@ -0,0 +1,144 @@ +#!/bin/bash + +# Creation: 6/2016 +# Authors: Nicola Cortesi and Raul Marcos +# Aim: to do a quality control of the dimension variables inside all NetCDF data file in a directory. Notice that this script is not able to find if a file is corrupt. +# I/O: you only have to specify where are the files you want to check and the correct names and size of the dimensions inside. +# the output is a text file inside the directory where this script is run, whose name starts with "check_", with inside the results of the quality control. + +var=psl #sfcWind # name of the variable to check +dat=ECMWFS4 # dataset name +freq=6hourly # time step +#path=/esnas/exp/ECMWF/seasonal/0001/s004/m001/6hourly/sfcWind/ # its path +path=/esnas/exp/ecmwf/system4_m1/6hourly/psl/ +#/esnas/exp/Meteofrance/seasonal/0001/s004/m001/6hourly/sfcWind Meteofrance S4 +#/esnas/exp/ECMWF/seasonal/0001/s004/m001/6hourly/sfcWind ECMWF S4 + +suffix=01 # this suffix is usually present in all S4 netCDF file, right begore the file extension +ext=.nc # file extension + +yearStart=1981 # time period to check inside the files +yearEnd=2015 + +nameLatitude=latitude # name of the latitude dimension to check in the file +nLatitude=181 #128 #181 #256 # number of latitude values to check + +nameLongitude=longitude # name of the longitude dimension to check in the file +nLongitude=360 #256 #360 #512 # number of longitude values to check + +nameMembers=ensemble # name of the dimension with the model members to check in the file +nMembers=15 #51 #15 # number of members to check in the file +nMemberss=51 # if there is another number of ensemble members that it is ok (should be set the same as nMembers if not) + +nameLeadtimes=time # name of the dimension with the forecast time to check in the file +nLeadtimes=861 #216 # number of forecast times to check in the file (215 complete days * 4 (6hourly) + 1 (for the last midnight) + +printf "\t\n" > ktemp +printf "Checked variable: \t $var\n" >> ktemp +printf "Path: \t $path\n" >> ktemp +printf "Extension: \t $ext\n" >> ktemp +printf "Checked period: \t $yearStart-$yearEnd\n" >> ktemp +printf "\t\n" >> ktemp +printf ">>>>>>>>>>>>>>>>>>>>>>>> Expected elements <<<<<<<<<<<<<<<<<<<<<<<<\n" >> ktemp +printf "\t\n" >> ktemp +printf "Latitude denomination: \t $nameLatitude\n" >> ktemp +printf "Number of latitude values: \t $nLatitude\n" >> ktemp +printf "Longitude denomination: \t $nameLongitude\n" >> ktemp +printf "Number of longitude values:\t $nLongitude\n" >> ktemp +printf "Ensemble denomination: \t $nameMembers\n" >> ktemp +printf "Number of ensemble values: \t $nMembers\n" >> ktemp +if [ $nMembers != $nMemberss ]; then +printf "Alternative number of ensemble values: \t $nMemberss\n" >> ktemp +fi +printf "Lead-time denonomination: \t $nameLeadtimes\n" >> ktemp +printf "Number of lead-times: \t $nLeadtimes\n" >> ktemp + +printf "\n\n >>>>>>>>>>>>>>>>>>>>>>>> Checking <<<<<<<<<<<<<<<<<<<<<<<< \n\n" | tee -a ktemp + +outputfile=./check_${dat}_${var}_${freq}.txt + +# function that detect the size of a dimension variable in a netCDF file: +# +# usage: get_size myNetCDF.nc myDimensionName +# +function get_size { + ncks -m ${1} | grep -E -i ": ${2}, size =" | cut -d ' ' -f 7 | uniq +} + +# check file existence: +for ((i=$yearStart;i<=$yearEnd;i++)) +do +for j in 01 02 03 04 05 06 07 08 09 10 11 12 +do + ok1=0; ok2=0; ok3=0; ok4=0 ; ok5=0 + + # echo $path${var}_$i$j$suffix$ext > ktemp + if [ ! -f $path${var}_$i$j$suffix$ext ] + then + echo ${var}_$i$j$suffix$ext ": >>>>> Missing file <<<<<" | tee -a ktemp + else + nMemb=`get_size $path${var}_$i$j$suffix$ext $nameMembers` + nLead=`get_size $path${var}_$i$j$suffix$ext $nameLeadtimes` + nLat=`get_size $path${var}_$i$j$suffix$ext $nameLatitude` + nLon=`get_size $path${var}_$i$j$suffix$ext $nameLongitude` + + if [ -z $nLat ]; then # check if nLat is empty + echo ${var}_$i$j$suffix$ext ": there is no dimension called " $nameLatitude " inside this file " | tee -a ktemp + elif [ ${nLat//[[:blank:]]/} -ne $nLatitude ]; then + echo ${var}_$i$j$suffix$ext ": "$nameLatitude" dimension has " $nLat " values instead of " $nLatitude | tee -a ktemp + else + ok1=1 + fi + + if [ -z $nLon ]; then # check if nLon is empty + echo ${var}_$i$j$suffix$ext ": there is no dimension called " $nameLongitude " inside this file " | tee -a ktemp + elif [ ${nLon//[[:blank:]]/} -ne $nLongitude ]; then + echo ${var}_$i$j$suffix$ext ": "$nameLongitude "dimension has " $nLon " values instead of " $nLongitude | tee -a ktemp + else + ok2=1 + fi + + if [ -z $nMemb ]; then # check if nMemb is empty + echo ${var}_$i$j$suffix$ext ": there is no dimension called " $nameMembers " inside this file " | tee -a ktemp + elif [ ${nMemb//[[:blank:]]/} -ne $nMembers -a ${nMemb//[[:blank:]]/} -ne "$nMemberss" ]; then + echo ${var}_$i$j$suffix$ext ": "$nameMembers "dimension has " $nMemb " values instead of " $nMembers | tee -a ktemp + else + ok3=1 + fi + + if [ -z $nLead ]; then + echo ${var}_$i$j$suffix$ext ": there is no dimension called " $nameLeadtimes " inside this file " | tee -a ktemp + elif [ ${nLead//[[:blank:]]/} -ne $nLeadtimes ]; then + echo ${var}_$i$j$suffix$ext ": "$nameLeadtimes " dimension has " $nLead " values instead of " $nLeadtimes | tee -a ktemp + else + ok4=1 + fi + + + if [ $ok1 -eq 1 ] && [ $ok2 -eq 1 ] && [ $ok3 -eq 1 ] && [ $ok4 -eq 1 ]; then + echo ${var}_$i$j$suffix$ext ": ok" | tee -a ktemp + fi + fi +done +done + +column -t -s $'\t' ktemp > $outputfile +rm ktemp + +### other common changes to netCDF that can be useful: +# # convert a 6-hourly file to a daily one: +# for file in *; do cdo daymean $file /esnas/exp/ecmwf/system4_m1/daily_mean/sfcWind_f6h/$file + +# cdo daymean +# cdo showdate +# for file in *; do ncrename -d $file ; done + +# convert a .nc file of ECMWF S4 after downloading: +#file=_grib2netcdf-atls01-95e2cf....nc ; ncrename -v .msl,psl -d .number,ensemble -v .number,realization $file ; date=$(cdo showdate $file | cut -f3 -d" " | sed -e s/-//g) ; mv $file /esnas/exp/ecmwf/system4_m1/6hourly/psl/psl_${date}.nc ; cdo daymean /esnas/exp/ecmwf/system4_m1/6hourly/psl/psl_${date}.nc /esnas/exp/ecmwf/system4_m1/daily_mean/psl_f6h/psl_${date}.nc ; cdo monmean /esnas/exp/ecmwf/system4_m1/daily_mean/psl_f6h/psl_${date}.nc /esnas/exp/ecmwf/system4_m1/monthly_mean/psl_f6h/psl_${date}.nc + +#file=_grib2netcdf-atls01-95e2cf....nc +#ncrename -v .msl,psl -d .number,ensemble -v .number,realization $file +#date=$(cdo showdate $file | cut -f3 -d" " | sed -e s/-//g) +#mv $file /esnas/exp/ecmwf/system4_m1/6hourly/psl/psl_${date}.nc +#cdo daymean /esnas/exp/ecmwf/system4_m1/6hourly/psl/psl_${date}.nc /esnas/exp/ecmwf/system4_m1/daily_mean/psl_f6h/psl_${date}.nc +#cdo monmean /esnas/exp/ecmwf/system4_m1/daily_mean/psl_f6h/psl_${date}.nc /esnas/exp/ecmwf/system4_m1/monthly_mean/psl_f6h/psl_${date}.nc diff --git a/old/backup/bash/logo_llorens_v3.sh b/old/backup/bash/logo_llorens_v3.sh new file mode 100644 index 0000000000000000000000000000000000000000..8a3f7179d567d323aa115ae3ea601d88ecb7ef8b --- /dev/null +++ b/old/backup/bash/logo_llorens_v3.sh @@ -0,0 +1,306 @@ +#!/usr/bin/sh + +#------------------- +# Usage info +#------------------- +show_help() { + cat << EOF +Usage: ${0##*/} [-hl] [-t "title"] [-c "caption"] INFILE OUTFILE +Add caption, title and logo to an image, for including in the catalog. +OPTIONS: + -h Display this help and exit + -t "title" Add a title on top of the image + -c "caption" Add a caption below the image + -l Add the bsc logo to the bottom right of the image +EOF +} + +#------------------- +# Initialize flags +#------------------- + +titsize=25 # size of the title. It is automatically rescaled if the size of the figure increases/decreases; i.e: if the figure double, the title size doubles too. +captionsize=12 # size of the caption. It is automatically rescaled too. +resizelogo=25 # width of the logo compared to the width of the image (in %). 25% is a good balance. + +cut_title=true # you can also cut an horizontal strip at the top of the image, if you want to remove and old title before adding the new one with option -t +cut_title_pixels=50 # set the height of the horizontal strip (in pixels) to remove, if cut_title=false. + +OPTIND=1 +LOGO=false +LOGO_file=/home/Earth/ncortesi/logo1.png + +#------------------- +# Parse input options +#------------------- +while getopts ":t:c:lh" opt; do + case $opt in + h) + show_help + exit 0 + ;; + t) + echo "-t was triggered, Title: $OPTARG" >&2 + TITLE=$OPTARG + ;; + c) + echo "-c was triggered, Caption: $OPTARG" >&2 + CAPTION=$OPTARG + ;; + l) + echo "-l was triggered, Include BSC logo" >&2 + LOGO=true + ;; + \?) + echo "Invalid option: -$OPTARG" >&2 + exit 1 + ;; + :) + echo "Option -$OPTARG requires an argument." >&2 + exit 1 + ;; + esac +done +shift "$((OPTIND-1))" # Shift off the options + +#------------------- +# Check we have two args remaining +#------------------- +if [[ $# -ne 2 ]]; +then + show_help + exit 0 +fi + +#------------------- +# Get in and out files +#------------------- +INFILE=$1 +OUTFILE=$2 + +#------------------- +# Check infile exists +#------------------- +if [[ ! -f $INFILE ]] +then + echo "File not found: $INFILE" >&2 + exit -1 +fi + +#################### +# Start doing some work +#################### + +#------------------------------ +# Convert ps to png with 300dpi +#------------------------------ +if [[ $INFILE == *.ps ]] +then + convert -units PixelsPerInch -density 300 -background white -flatten $INFILE crop.png +else + cp $INFILE crop.png +fi + +# get the width of the image in pixels: +width_figure=$(identify -ping -format %w crop.png) + +# get the height of the image in pixels: +height_figure=$(identify -ping -format %h crop.png) + +#------------------- +# Add caption +#------------------- +if [[ -v CAPTION ]] +then + + #------------------------------------- + # In case the logo has to be drawn too + #------------------------------------- + if ( $LOGO ) + then + width_logo_area=$(( $width_figure * $resizelogo / 100 )) + width_caption_area=$(( $width_figure - $width_logo_area )) + + width_caption=$(( $width_caption_area * 90 / 100)) + width_logo=$(( $width_logo_area * 90 / 100 )) + width_logo_file=$(identify -ping -format %w $LOGO_file) + + # 1) create the caption: + captionsize_rescaled=$(( $captionsize * $width_figure / 600)) + convert -background white -font Arial -pointsize $captionsize_rescaled -size ${width_caption}x -define pango:justify=true pango:"$CAPTION" -geometry +0+0 crop1.png + + # 2) insert the caption at the top center of the caption area: + convert crop1.png -gravity North -background white -extent ${width_caption_area}x crop2.png + + # 3) resize the logo, to be proportional to the size of the figure: + resize_logo=$(( $width_logo * 100 / $width_logo_file )) + convert -background white $LOGO_file -scale ${resize_logo}% crop3.png + + # 4) insert the logo over a larger canvas, (the logo area), in its top left part: + height_caption_area=$(identify -ping -format %h crop2.png) + height_logo_resized=$(identify -ping -format %h crop3.png) + + if [[ $height_caption_area -lt $height_logo_resized ]]; then + height_logo_area=$height_logo_resized + else + height_logo_area=$(identify -ping -format %h crop1.png) + fi + + convert crop3.png -gravity NorthWest -background white -extent ${width_logo_area}x${height_logo_area} crop4.png + + # if the caption is less high than the logo, insert the caption at the center of the caption area instead than at the top center: + if [[ $height_caption_area -lt $height_logo_resized ]]; then + convert crop1.png -gravity Center -background white -extent ${width_caption_area}x${height_logo_area} crop2.png + fi + + # 5) Merge the caption area and the logo area together: + montage crop2.png crop4.png -tile 2x1 -geometry +0+0 crop5.png + + # 6) Add a white horizontal strip over the caption+logo area: + convert -size x20 xc:white crop6.png + montage crop6.png crop5.png -tile 1x2 -geometry +0+0 crop7.png + + # 7) Add a white horizontal strip below the caption+logo area: + if [[ $height_caption_area -ge $height_logo_resized ]]; then + convert -size x20 xc:white crop8.png + montage crop7.png crop8.png -tile 1x2 -geometry +0+0 crop9.png + else + convert -size x5 xc:white crop8.png + montage crop7.png crop8.png -tile 1x2 -geometry +0+0 crop9.png + fi + + # 8) Add the figure: + montage crop.png crop9.png -tile 1x2 -geometry +0+0 crop10.png + + else + #------------------------- + # In case there is no logo + #------------------------- + + width_caption=$(( $width_figure * 90 / 100)) + + # 1) Create the caption: + captionsize_rescaled=$(( $captionsize * $width_figure / 600)) + convert -background white -font Arial -pointsize $captionsize_rescaled -size ${width_caption}x -define pango:justify=true pango:"$CAPTION" -geometry +0+0 crop1.png + + # 2) Insert the caption at the top center of the caption area: + convert crop1.png -gravity North -background white -extent ${width_figure}x crop2.png + + # 3) Add a white horizontal strip over the caption area: + convert -size x20 xc:white crop3.png + montage crop3.png crop2.png -tile 1x2 -geometry +0+0 crop7.png + + # 4) Add a white horizontal strip below the caption area: + convert -size x20 xc:white crop8.png + montage crop7.png crop8.png -tile 1x2 -geometry +0+0 crop9.png + + # 5) Add the figure: + montage crop.png crop9.png -tile 1x2 -geometry +0+0 crop10.png + + # xdg-open crop10.png # for the debug + + fi # end if on LOGO +else + #---------------------------- + # In case there is no caption + #---------------------------- + + CAPTION="" + + if ( $LOGO ) + then + #------------------------------------- + # In case the logo has to be drawn too + #------------------------------------- + + width_logo_area=$(( $width_figure * $resizelogo / 100 )) + width_caption_area=$(( $width_figure - $width_logo_area )) + + width_caption=$(( $width_caption_area * 90 / 100)) + width_logo=$(( $width_logo_area * 90 / 100 )) + width_logo_file=$(identify -ping -format %w $LOGO_file) + + # 1) create the caption: + captionsize_rescaled=$(( $captionsize * $width_figure / 600)) + convert -background white -font Arial -pointsize $captionsize_rescaled -size ${width_caption}x -define pango:justify=true pango:"$CAPTION" -geometry +0+0 crop1.png + + # 2) insert the caption at the top center of the caption area: + convert crop1.png -gravity North -background white -extent ${width_caption_area}x crop2.png + + # 3) resize the logo, to be proportional to the size of the figure: + resize_logo=$(( $width_logo * 100 / $width_logo_file )) + convert -background white $LOGO_file -scale ${resize_logo}% crop3.png + + # 4) insert the logo over a larger canvas, (the logo area), in its top left part: + height_caption_area=$(identify -ping -format %h crop2.png) + height_logo_resized=$(identify -ping -format %h crop3.png) + + if [[ $height_caption_area -lt $height_logo_resized ]]; then + height_logo_area=$height_logo_resized + else + height_logo_area=$(identify -ping -format %h crop1.png) + fi + + convert crop3.png -gravity NorthWest -background white -extent ${width_logo_area}x${height_logo_area} crop4.png + + # if the caption is less high than the logo, insert the caption at the center of the caption area instead than at the top center: + if [[ $height_caption_area -lt $height_logo_resized ]]; then + convert crop1.png -gravity Center -background white -extent ${width_caption_area}x${height_logo_area} crop2.png + fi + + # 5) Merge the caption area and the logo area together: + montage crop2.png crop4.png -tile 2x1 -geometry +0+0 crop5.png + + # 6) Add a white horizontal strip over the caption+logo area: + convert -size x20 xc:white crop6.png + montage crop6.png crop5.png -tile 1x2 -geometry +0+0 crop7.png + + # 7) Add a white horizontal strip below the caption+logo area: + if [[ $height_caption_area -ge $height_logo_resized ]]; then + convert -size x20 xc:white crop8.png + montage crop7.png crop8.png -tile 1x2 -geometry +0+0 crop9.png + else + convert -size x5 xc:white crop8.png + montage crop7.png crop8.png -tile 1x2 -geometry +0+0 crop9.png + fi + + # 8) Add the figure: + montage crop.png crop9.png -tile 1x2 -geometry +0+0 crop10.png + + else + #--------------------------------------------------------- + # In case there is only the title, and caption and no logo + #--------------------------------------------------------- + cp crop.png crop10.png + + fi # end if on LOGO + +fi # end if on CAPTION + + +#------------------------------------------ +# Cut old title +#------------------------------------------ + +if ( $cut_title ); then + # cut the upper part of the image of the desired size to remove the old title: + convert crop10.png -crop +0+${cut_title_pixels} +repage crop10.png +fi + +#------------------------------------------ +# Add title +#------------------------------------------ + +if [[ -v TITLE ]] +then + # add the new title: + titsize_rescaled=$(( $titsize * $width_figure / 600)) + convert crop10.png -background white -pointsize $titsize_rescaled label:"$TITLE" +swap -gravity Center -append crop11.png + cp crop11.png crop10.png +fi + + +cp crop10.png $OUTFILE + +# remove all temporary files: +rm crop*.* diff --git a/old/backup/bash/logo_llorens_v3.sh~ b/old/backup/bash/logo_llorens_v3.sh~ new file mode 100644 index 0000000000000000000000000000000000000000..d2ff8b0dfb8526d6fb4aa4cc5e4fc55052cb3a12 --- /dev/null +++ b/old/backup/bash/logo_llorens_v3.sh~ @@ -0,0 +1,306 @@ +#!/usr/bin/sh + +#------------------- +# Usage info +#------------------- +show_help() { + cat << EOF +Usage: ${0##*/} [-hl] [-t "title"] [-c "caption"] INFILE OUTFILE +Add caption, title and logo to an image, for including in the catalog. +OPTIONS: + -h Display this help and exit + -t "title" Add a title on top of the image + -c "caption" Add a caption below the image + -l Add the bsc logo to the bottom right of the image +EOF +} + +#------------------- +# Initialize flags +#------------------- + +titsize=30 # size of the title. It is automatically rescaled if the size of the figure increases/decreases; i.e: if the figure double, the title size doubles too. +captionsize=12 # size of the caption. It is automatically rescaled too. +resizelogo=25 # width of the logo compared to the width of the image (in %). 25% is a good balance. + +cut_title=false # you can also cut an horizontal strip at the topo of the image, if you want to remove and old title before adding the new one with option -t +cut_title_pixels=50 # set the height of the horizontal strip (in pixels) to remove, if cut_title=false. + +OPTIND=1 +LOGO=false +LOGO_file=/home/Earth/ncortesi/logo1.png + +#------------------- +# Parse input options +#------------------- +while getopts ":t:c:lh" opt; do + case $opt in + h) + show_help + exit 0 + ;; + t) + echo "-t was triggered, Title: $OPTARG" >&2 + TITLE=$OPTARG + ;; + c) + echo "-c was triggered, Caption: $OPTARG" >&2 + CAPTION=$OPTARG + ;; + l) + echo "-l was triggered, Include BSC logo" >&2 + LOGO=true + ;; + \?) + echo "Invalid option: -$OPTARG" >&2 + exit 1 + ;; + :) + echo "Option -$OPTARG requires an argument." >&2 + exit 1 + ;; + esac +done +shift "$((OPTIND-1))" # Shift off the options + +#------------------- +# Check we have two args remaining +#------------------- +if [[ $# -ne 2 ]]; +then + show_help + exit 0 +fi + +#------------------- +# Get in and out files +#------------------- +INFILE=$1 +OUTFILE=$2 + +#------------------- +# Check infile exists +#------------------- +if [[ ! -f $INFILE ]] +then + echo "File not found: $INFILE" >&2 + exit -1 +fi + +#################### +# Start doing some work +#################### + +#------------------------------ +# Convert ps to png with 300dpi +#------------------------------ +if [[ $INFILE == *.ps ]] +then + convert -units PixelsPerInch -density 300 -background white -flatten $INFILE crop.png +else + cp $INFILE crop.png +fi + +# get the width of the image in pixels: +width_figure=$(identify -ping -format %w crop.png) + +# get the height of the image in pixels: +height_figure=$(identify -ping -format %h crop.png) + +#------------------- +# Add caption +#------------------- +if [[ -v CAPTION ]] +then + + #------------------------------------- + # In case the logo has to be drawn too + #------------------------------------- + if ( $LOGO ) + then + width_logo_area=$(( $width_figure * $resizelogo / 100 )) + width_caption_area=$(( $width_figure - $width_logo_area )) + + width_caption=$(( $width_caption_area * 90 / 100)) + width_logo=$(( $width_logo_area * 90 / 100 )) + width_logo_file=$(identify -ping -format %w $LOGO_file) + + # 1) create the caption: + captionsize_rescaled=$(( $captionsize * $width_figure / 600)) + convert -background white -font Arial -pointsize $captionsize_rescaled -size ${width_caption}x -define pango:justify=true pango:"$CAPTION" -geometry +0+0 crop1.png + + # 2) insert the caption at the top center of the caption area: + convert crop1.png -gravity North -background white -extent ${width_caption_area}x crop2.png + + # 3) resize the logo, to be proportional to the size of the figure: + resize_logo=$(( $width_logo * 100 / $width_logo_file )) + convert -background white $LOGO_file -scale ${resize_logo}% crop3.png + + # 4) insert the logo over a larger canvas, (the logo area), in its top left part: + height_caption_area=$(identify -ping -format %h crop2.png) + height_logo_resized=$(identify -ping -format %h crop3.png) + + if [[ $height_caption_area -lt $height_logo_resized ]]; then + height_logo_area=$height_logo_resized + else + height_logo_area=$(identify -ping -format %h crop1.png) + fi + + convert crop3.png -gravity NorthWest -background white -extent ${width_logo_area}x${height_logo_area} crop4.png + + # if the caption is less high than the logo, insert the caption at the center of the caption area instead than at the top center: + if [[ $height_caption_area -lt $height_logo_resized ]]; then + convert crop1.png -gravity Center -background white -extent ${width_caption_area}x${height_logo_area} crop2.png + fi + + # 5) Merge the caption area and the logo area together: + montage crop2.png crop4.png -tile 2x1 -geometry +0+0 crop5.png + + # 6) Add a white horizontal strip over the caption+logo area: + convert -size x20 xc:white crop6.png + montage crop6.png crop5.png -tile 1x2 -geometry +0+0 crop7.png + + # 7) Add a white horizontal strip below the caption+logo area: + if [[ $height_caption_area -ge $height_logo_resized ]]; then + convert -size x20 xc:white crop8.png + montage crop7.png crop8.png -tile 1x2 -geometry +0+0 crop9.png + else + convert -size x5 xc:white crop8.png + montage crop7.png crop8.png -tile 1x2 -geometry +0+0 crop9.png + fi + + # 8) Add the figure: + montage crop.png crop9.png -tile 1x2 -geometry +0+0 crop10.png + + else + #------------------------- + # In case there is no logo + #------------------------- + + width_caption=$(( $width_figure * 90 / 100)) + + # 1) Create the caption: + captionsize_rescaled=$(( $captionsize * $width_figure / 600)) + convert -background white -font Arial -pointsize $captionsize_rescaled -size ${width_caption}x -define pango:justify=true pango:"$CAPTION" -geometry +0+0 crop1.png + + # 2) Insert the caption at the top center of the caption area: + convert crop1.png -gravity North -background white -extent ${width_figure}x crop2.png + + # 3) Add a white horizontal strip over the caption area: + convert -size x20 xc:white crop3.png + montage crop3.png crop2.png -tile 1x2 -geometry +0+0 crop7.png + + # 4) Add a white horizontal strip below the caption area: + convert -size x20 xc:white crop8.png + montage crop7.png crop8.png -tile 1x2 -geometry +0+0 crop9.png + + # 5) Add the figure: + montage crop.png crop9.png -tile 1x2 -geometry +0+0 crop10.png + + # xdg-open crop10.png # for the debug + + fi # end if on LOGO +else + #---------------------------- + # In case there is no caption + #---------------------------- + + CAPTION="" + + if ( $LOGO ) + then + #------------------------------------- + # In case the logo has to be drawn too + #------------------------------------- + + width_logo_area=$(( $width_figure * $resizelogo / 100 )) + width_caption_area=$(( $width_figure - $width_logo_area )) + + width_caption=$(( $width_caption_area * 90 / 100)) + width_logo=$(( $width_logo_area * 90 / 100 )) + width_logo_file=$(identify -ping -format %w $LOGO_file) + + # 1) create the caption: + captionsize_rescaled=$(( $captionsize * $width_figure / 600)) + convert -background white -font Arial -pointsize $captionsize_rescaled -size ${width_caption}x -define pango:justify=true pango:"$CAPTION" -geometry +0+0 crop1.png + + # 2) insert the caption at the top center of the caption area: + convert crop1.png -gravity North -background white -extent ${width_caption_area}x crop2.png + + # 3) resize the logo, to be proportional to the size of the figure: + resize_logo=$(( $width_logo * 100 / $width_logo_file )) + convert -background white $LOGO_file -scale ${resize_logo}% crop3.png + + # 4) insert the logo over a larger canvas, (the logo area), in its top left part: + height_caption_area=$(identify -ping -format %h crop2.png) + height_logo_resized=$(identify -ping -format %h crop3.png) + + if [[ $height_caption_area -lt $height_logo_resized ]]; then + height_logo_area=$height_logo_resized + else + height_logo_area=$(identify -ping -format %h crop1.png) + fi + + convert crop3.png -gravity NorthWest -background white -extent ${width_logo_area}x${height_logo_area} crop4.png + + # if the caption is less high than the logo, insert the caption at the center of the caption area instead than at the top center: + if [[ $height_caption_area -lt $height_logo_resized ]]; then + convert crop1.png -gravity Center -background white -extent ${width_caption_area}x${height_logo_area} crop2.png + fi + + # 5) Merge the caption area and the logo area together: + montage crop2.png crop4.png -tile 2x1 -geometry +0+0 crop5.png + + # 6) Add a white horizontal strip over the caption+logo area: + convert -size x20 xc:white crop6.png + montage crop6.png crop5.png -tile 1x2 -geometry +0+0 crop7.png + + # 7) Add a white horizontal strip below the caption+logo area: + if [[ $height_caption_area -ge $height_logo_resized ]]; then + convert -size x20 xc:white crop8.png + montage crop7.png crop8.png -tile 1x2 -geometry +0+0 crop9.png + else + convert -size x5 xc:white crop8.png + montage crop7.png crop8.png -tile 1x2 -geometry +0+0 crop9.png + fi + + # 8) Add the figure: + montage crop.png crop9.png -tile 1x2 -geometry +0+0 crop10.png + + else + #--------------------------------------------------------- + # In case there is only the title, and caption and no logo + #--------------------------------------------------------- + cp crop.png crop10.png + + fi # end if on LOGO + +fi # end if on CAPTION + + +#------------------------------------------ +# Cut old title +#------------------------------------------ + +if ( $cut_title ); then + # cut the upper part of the image of the desired size to remove the old title: + convert crop10.png -crop +0+${cut_title_pixels} +repage crop10.png +fi + +#------------------------------------------ +# Add title +#------------------------------------------ + +if [[ -v TITLE ]] +then + # add the new title: + titsize_rescaled=$(( $titsize * $width_figure / 600)) + convert crop10.png -background white -pointsize $titsize_rescaled label:"$TITLE" +swap -gravity Center -append crop11.png + cp crop11.png crop10.png +fi + + +cp crop10.png $OUTFILE + +# remove all temporary files: +rm crop*.* diff --git a/old/backup/bash/old/check_psl.out b/old/backup/bash/old/check_psl.out new file mode 100644 index 0000000000000000000000000000000000000000..0f3a0a602a2695fbb2e15ecf88cad3df17d1bc97 --- /dev/null +++ b/old/backup/bash/old/check_psl.out @@ -0,0 +1,455 @@ +psl_19810101.nc : has 21 leadtimes instead of 216 +psl_19810201.nc : has 51 members instead of 15 +psl_19810301.nc : ok +psl_19810401.nc : ok +psl_19810501.nc : there is no variable called number inside this file +psl_19810501.nc : has 49 leadtimes instead of 216 +psl_19810601.nc : ok +psl_19810701.nc : ok +psl_19810801.nc : has 51 members instead of 15 +psl_19810901.nc : ok +psl_19811001.nc : ok +psl_19811101.nc : there is no variable called number inside this file +psl_19811201.nc : ok +psl_19820101.nc : ok +psl_19820201.nc : has 51 members instead of 15 +psl_19820301.nc : ok +psl_19820401.nc : ok +psl_19820501.nc : there is no variable called number inside this file +psl_19820601.nc : ok +psl_19820701.nc : ok +psl_19820801.nc : has 51 members instead of 15 +psl_19820901.nc : ok +psl_19821001.nc : ok +psl_19821101.nc : there is no variable called number inside this file +psl_19821201.nc : there is no variable called number inside this file +psl_19830101.nc : ok +psl_19830201.nc : >>> missing file <<< +psl_19830301.nc : ok +psl_19830401.nc : ok +psl_19830501.nc : there is no variable called number inside this file +psl_19830501.nc : has 176 leadtimes instead of 216 +psl_19830601.nc : ok +psl_19830701.nc : ok +psl_19830801.nc : has 51 members instead of 15 +psl_19830901.nc : ok +psl_19831001.nc : ok +psl_19831101.nc : there is no variable called number inside this file +psl_19831201.nc : >>> missing file <<< +psl_19840101.nc : ok +psl_19840201.nc : has 51 members instead of 15 +psl_19840301.nc : ok +psl_19840401.nc : ok +psl_19840501.nc : there is no variable called number inside this file +psl_19840601.nc : ok +psl_19840701.nc : ok +psl_19840801.nc : has 51 members instead of 15 +psl_19840901.nc : ok +psl_19841001.nc : ok +psl_19841101.nc : there is no variable called number inside this file +psl_19841201.nc : there is no variable called number inside this file +psl_19850101.nc : ok +psl_19850201.nc : has 51 members instead of 15 +psl_19850301.nc : ok +psl_19850401.nc : ok +psl_19850501.nc : there is no variable called number inside this file +psl_19850501.nc : has 861 leadtimes instead of 216 +psl_19850601.nc : ok +psl_19850701.nc : ok +psl_19850801.nc : has 51 members instead of 15 +psl_19850901.nc : ok +psl_19851001.nc : ok +psl_19851101.nc : there is no variable called number inside this file +psl_19851201.nc : ok +psl_19860101.nc : ok +psl_19860201.nc : has 51 members instead of 15 +psl_19860301.nc : ok +psl_19860401.nc : ok +psl_19860501.nc : there is no variable called number inside this file +psl_19860601.nc : ok +psl_19860701.nc : ok +psl_19860801.nc : has 51 members instead of 15 +psl_19860901.nc : ok +psl_19861001.nc : ok +psl_19861101.nc : has 51 members instead of 15 +psl_19861201.nc : ok +psl_19870101.nc : ok +psl_19870201.nc : has 51 members instead of 15 +psl_19870301.nc : ok +psl_19870401.nc : ok +psl_19870501.nc : there is no variable called number inside this file +psl_19870601.nc : ok +psl_19870701.nc : ok +psl_19870801.nc : has 51 members instead of 15 +psl_19870901.nc : ok +psl_19871001.nc : ok +psl_19871101.nc : has 51 members instead of 15 +psl_19871201.nc : ok +psl_19880101.nc : ok +psl_19880201.nc : has 51 members instead of 15 +psl_19880301.nc : ok +psl_19880401.nc : ok +psl_19880501.nc : there is no variable called number inside this file +psl_19880501.nc : has 198 leadtimes instead of 216 +psl_19880601.nc : ok +psl_19880701.nc : ok +psl_19880801.nc : has 51 members instead of 15 +psl_19880901.nc : ok +psl_19881001.nc : ok +psl_19881101.nc : there is no variable called number inside this file +psl_19881201.nc : ok +psl_19890101.nc : ok +psl_19890201.nc : has 51 members instead of 15 +psl_19890301.nc : ok +psl_19890401.nc : ok +psl_19890501.nc : there is no variable called number inside this file +psl_19890601.nc : ok +psl_19890701.nc : ok +psl_19890801.nc : has 51 members instead of 15 +psl_19890901.nc : ok +psl_19891001.nc : ok +psl_19891101.nc : there is no variable called number inside this file +psl_19891201.nc : ok +psl_19900101.nc : ok +psl_19900201.nc : has 51 members instead of 15 +psl_19900301.nc : ok +psl_19900401.nc : ok +psl_19900501.nc : there is no variable called number inside this file +psl_19900501.nc : has 4 leadtimes instead of 216 +psl_19900601.nc : ok +psl_19900701.nc : ok +psl_19900801.nc : has 51 members instead of 15 +psl_19900901.nc : ok +psl_19901001.nc : ok +psl_19901101.nc : there is no variable called number inside this file +psl_19901201.nc : ok +psl_19910101.nc : ok +psl_19910201.nc : has 51 members instead of 15 +psl_19910301.nc : ok +psl_19910401.nc : ok +psl_19910501.nc : there is no variable called number inside this file +psl_19910601.nc : there is no variable called latitude inside this file +psl_19910601.nc : there is no variable called longitude inside this file +psl_19910601.nc : there is no variable called number inside this file +psl_19910601.nc : has 3240 leadtimes instead of 216 +psl_19910701.nc : there is no variable called latitude inside this file +psl_19910701.nc : there is no variable called longitude inside this file +psl_19910701.nc : there is no variable called number inside this file +psl_19910701.nc : has 3240 leadtimes instead of 216 +psl_19910801.nc : there is no variable called latitude inside this file +psl_19910801.nc : there is no variable called longitude inside this file +psl_19910801.nc : there is no variable called number inside this file +psl_19910801.nc : has 9628 leadtimes instead of 216 +psl_19910901.nc : there is no variable called latitude inside this file +psl_19910901.nc : there is no variable called longitude inside this file +psl_19910901.nc : there is no variable called number inside this file +psl_19910901.nc : has 2183 leadtimes instead of 216 +psl_19911001.nc : there is no variable called latitude inside this file +psl_19911001.nc : there is no variable called longitude inside this file +psl_19911001.nc : there is no variable called number inside this file +psl_19911001.nc : has 802 leadtimes instead of 216 +psl_19911101.nc : there is no variable called latitude inside this file +psl_19911101.nc : there is no variable called longitude inside this file +psl_19911101.nc : there is no variable called number inside this file +psl_19911101.nc : has 1000 leadtimes instead of 216 +psl_19911201.nc : there is no variable called latitude inside this file +psl_19911201.nc : there is no variable called longitude inside this file +psl_19911201.nc : there is no variable called number inside this file +psl_19911201.nc : has 951 leadtimes instead of 216 +psl_19920101.nc : ok +psl_19920201.nc : there is no variable called latitude inside this file +psl_19920201.nc : there is no variable called longitude inside this file +psl_19920201.nc : there is no variable called number inside this file +psl_19920201.nc : has 748 leadtimes instead of 216 +psl_19920301.nc : there is no variable called latitude inside this file +psl_19920301.nc : there is no variable called longitude inside this file +psl_19920301.nc : there is no variable called number inside this file +psl_19920301.nc : has 1693 leadtimes instead of 216 +psl_19920401.nc : there is no variable called latitude inside this file +psl_19920401.nc : there is no variable called longitude inside this file +psl_19920401.nc : there is no variable called number inside this file +psl_19920401.nc : has 705 leadtimes instead of 216 +psl_19920501.nc : there is no variable called number inside this file +psl_19920601.nc : ok +psl_19920701.nc : ok +psl_19920801.nc : has 51 members instead of 15 +psl_19920901.nc : ok +psl_19921001.nc : ok +psl_19921101.nc : there is no variable called number inside this file +psl_19921101.nc : has 56 leadtimes instead of 216 +psl_19921201.nc : ok +psl_19930101.nc : ok +psl_19930201.nc : has 51 members instead of 15 +psl_19930301.nc : ok +psl_19930401.nc : ok +psl_19930501.nc : there is no variable called number inside this file +psl_19930501.nc : has 2 leadtimes instead of 216 +psl_19930601.nc : ok +psl_19930701.nc : ok +psl_19930801.nc : has 51 members instead of 15 +psl_19930901.nc : ok +psl_19931001.nc : ok +psl_19931101.nc : there is no variable called number inside this file +psl_19931101.nc : has 65 leadtimes instead of 216 +psl_19931201.nc : ok +psl_19940101.nc : ok +psl_19940201.nc : has 51 members instead of 15 +psl_19940301.nc : ok +psl_19940401.nc : ok +psl_19940501.nc : there is no variable called number inside this file +psl_19940601.nc : ok +psl_19940701.nc : ok +psl_19940801.nc : has 51 members instead of 15 +psl_19940901.nc : has 130 leadtimes instead of 216 +psl_19941001.nc : ok +psl_19941101.nc : there is no variable called number inside this file +psl_19941101.nc : has 51 leadtimes instead of 216 +psl_19941201.nc : ok +psl_19950101.nc : ok +psl_19950201.nc : has 51 members instead of 15 +psl_19950301.nc : ok +psl_19950401.nc : ok +psl_19950501.nc : there is no variable called number inside this file +psl_19950601.nc : ok +psl_19950701.nc : ok +psl_19950801.nc : has 51 members instead of 15 +psl_19950901.nc : ok +psl_19951001.nc : ok +psl_19951101.nc : there is no variable called number inside this file +psl_19951201.nc : ok +psl_19960101.nc : ok +psl_19960201.nc : has 51 members instead of 15 +psl_19960301.nc : ok +psl_19960401.nc : ok +psl_19960501.nc : there is no variable called number inside this file +psl_19960601.nc : ok +psl_19960701.nc : ok +psl_19960801.nc : has 51 members instead of 15 +psl_19960901.nc : ok +psl_19961001.nc : there is no variable called number inside this file +psl_19961001.nc : has 109 leadtimes instead of 216 +psl_19961101.nc : there is no variable called number inside this file +psl_19961201.nc : ok +psl_19970101.nc : ok +psl_19970201.nc : has 51 members instead of 15 +psl_19970301.nc : ok +psl_19970401.nc : ok +psl_19970501.nc : has 51 members instead of 15 +psl_19970501.nc : has 180 leadtimes instead of 216 +psl_19970601.nc : ok +psl_19970701.nc : ok +psl_19970801.nc : has 51 members instead of 15 +psl_19970901.nc : ok +psl_19971001.nc : ok +psl_19971101.nc : there is no variable called number inside this file +psl_19971101.nc : has 49 leadtimes instead of 216 +psl_19971201.nc : has 142 leadtimes instead of 216 +psl_19980101.nc : ok +psl_19980201.nc : has 51 members instead of 15 +psl_19980301.nc : ok +psl_19980401.nc : ok +psl_19980501.nc : there is no variable called number inside this file +psl_19980501.nc : has 44 leadtimes instead of 216 +psl_19980601.nc : ok +psl_19980701.nc : ok +psl_19980801.nc : has 51 members instead of 15 +psl_19980901.nc : ok +psl_19981001.nc : ok +psl_19981101.nc : there is no variable called number inside this file +psl_19981201.nc : ok +psl_19990101.nc : ok +psl_19990201.nc : has 51 members instead of 15 +psl_19990301.nc : ok +psl_19990401.nc : ok +psl_19990501.nc : there is no variable called number inside this file +psl_19990501.nc : has 10 leadtimes instead of 216 +psl_19990601.nc : ok +psl_19990701.nc : ok +psl_19990801.nc : has 51 members instead of 15 +psl_19990901.nc : ok +psl_19991001.nc : ok +psl_19991101.nc : there is no variable called number inside this file +psl_19991201.nc : ok +psl_20000101.nc : ok +psl_20000201.nc : has 51 members instead of 15 +psl_20000301.nc : ok +psl_20000401.nc : ok +psl_20000501.nc : there is no variable called number inside this file +psl_20000501.nc : has 105 leadtimes instead of 216 +psl_20000601.nc : has 56 leadtimes instead of 216 +psl_20000701.nc : ok +psl_20000801.nc : has 51 members instead of 15 +psl_20000901.nc : ok +psl_20001001.nc : ok +psl_20001101.nc : there is no variable called number inside this file +psl_20001201.nc : ok +psl_20010101.nc : ok +psl_20010201.nc : has 51 members instead of 15 +psl_20010301.nc : ok +psl_20010401.nc : ok +psl_20010501.nc : there is no variable called number inside this file +psl_20010601.nc : ok +psl_20010701.nc : ok +psl_20010801.nc : has 51 members instead of 15 +psl_20010901.nc : ok +psl_20011001.nc : ok +psl_20011101.nc : there is no variable called number inside this file +psl_20011101.nc : has 10 leadtimes instead of 216 +psl_20011201.nc : ok +psl_20020101.nc : ok +psl_20020201.nc : has 51 members instead of 15 +psl_20020301.nc : ok +psl_20020401.nc : ok +psl_20020501.nc : there is no variable called number inside this file +psl_20020501.nc : has 153 leadtimes instead of 216 +psl_20020601.nc : ok +psl_20020701.nc : ok +psl_20020801.nc : has 51 members instead of 15 +psl_20020901.nc : ok +psl_20021001.nc : ok +psl_20021101.nc : there is no variable called number inside this file +psl_20021101.nc : has 163 leadtimes instead of 216 +psl_20021201.nc : ok +psl_20030101.nc : ok +psl_20030201.nc : has 51 members instead of 15 +psl_20030301.nc : ok +psl_20030401.nc : ok +psl_20030501.nc : there is no variable called number inside this file +psl_20030601.nc : ok +psl_20030701.nc : ok +psl_20030801.nc : has 51 members instead of 15 +psl_20030901.nc : ok +psl_20031001.nc : ok +psl_20031101.nc : there is no variable called number inside this file +psl_20031201.nc : ok +psl_20040101.nc : ok +psl_20040201.nc : has 51 members instead of 15 +psl_20040301.nc : ok +psl_20040401.nc : ok +psl_20040501.nc : there is no variable called number inside this file +psl_20040501.nc : has 7 leadtimes instead of 216 +psl_20040601.nc : has 28 leadtimes instead of 216 +psl_20040701.nc : ok +psl_20040801.nc : has 51 members instead of 15 +psl_20040901.nc : ok +psl_20041001.nc : ok +psl_20041101.nc : there is no variable called number inside this file +psl_20041201.nc : ok +psl_20050101.nc : ok +psl_20050201.nc : has 51 members instead of 15 +psl_20050301.nc : ok +psl_20050401.nc : ok +psl_20050501.nc : there is no variable called number inside this file +psl_20050601.nc : has 186 leadtimes instead of 216 +psl_20050701.nc : ok +psl_20050801.nc : has 51 members instead of 15 +psl_20050901.nc : ok +psl_20051001.nc : has 31 leadtimes instead of 216 +psl_20051101.nc : there is no variable called number inside this file +psl_20051101.nc : has 23 leadtimes instead of 216 +psl_20051201.nc : ok +psl_20060101.nc : ok +psl_20060201.nc : has 51 members instead of 15 +psl_20060301.nc : ok +psl_20060401.nc : has 40 leadtimes instead of 216 +psl_20060501.nc : there is no variable called number inside this file +psl_20060601.nc : ok +psl_20060701.nc : ok +psl_20060801.nc : has 51 members instead of 15 +psl_20060901.nc : ok +psl_20061001.nc : ok +psl_20061101.nc : there is no variable called number inside this file +psl_20061201.nc : ok +psl_20070101.nc : ok +psl_20070201.nc : has 51 members instead of 15 +psl_20070301.nc : ok +psl_20070401.nc : ok +psl_20070501.nc : there is no variable called number inside this file +psl_20070601.nc : ok +psl_20070701.nc : ok +psl_20070801.nc : has 51 members instead of 15 +psl_20070901.nc : ok +psl_20071001.nc : ok +psl_20071101.nc : there is no variable called number inside this file +psl_20071201.nc : ok +psl_20080101.nc : ok +psl_20080201.nc : has 51 members instead of 15 +psl_20080301.nc : ok +psl_20080401.nc : ok +psl_20080501.nc : there is no variable called number inside this file +psl_20080601.nc : ok +psl_20080701.nc : has 166 leadtimes instead of 216 +psl_20080801.nc : has 51 members instead of 15 +psl_20080901.nc : ok +psl_20081001.nc : ok +psl_20081101.nc : there is no variable called number inside this file +psl_20081201.nc : ok +psl_20090101.nc : ok +psl_20090201.nc : has 51 members instead of 15 +psl_20090301.nc : has 46 leadtimes instead of 216 +psl_20090401.nc : has 3 leadtimes instead of 216 +psl_20090501.nc : there is no variable called number inside this file +psl_20090601.nc : ok +psl_20090701.nc : ok +psl_20090801.nc : has 51 members instead of 15 +psl_20090901.nc : ok +psl_20091001.nc : ok +psl_20091101.nc : there is no variable called number inside this file +psl_20091201.nc : ok +psl_20100101.nc : ok +psl_20100201.nc : has 51 members instead of 15 +psl_20100301.nc : ok +psl_20100401.nc : ok +psl_20100501.nc : there is no variable called number inside this file +psl_20100601.nc : has 86 leadtimes instead of 216 +psl_20100701.nc : ok +psl_20100801.nc : has 51 members instead of 15 +psl_20100901.nc : ok +psl_20101001.nc : ok +psl_20101101.nc : there is no variable called number inside this file +psl_20101201.nc : there is no variable called number inside this file +psl_20110101.nc : ok +psl_20110201.nc : has 51 members instead of 15 +psl_20110301.nc : ok +psl_20110401.nc : ok +psl_20110501.nc : there is no variable called number inside this file +psl_20110601.nc : has 51 members instead of 15 +psl_20110601.nc : has 46 leadtimes instead of 216 +psl_20110701.nc : has 51 members instead of 15 +psl_20110701.nc : has 100 leadtimes instead of 216 +psl_20110801.nc : has 51 members instead of 15 +psl_20110801.nc : has 214 leadtimes instead of 216 +psl_20110901.nc : has 51 members instead of 15 +psl_20111001.nc : has 51 members instead of 15 +psl_20111101.nc : there is no variable called number inside this file +psl_20111201.nc : has 51 members instead of 15 +psl_20120101.nc : has 51 members instead of 15 +psl_20120201.nc : has 51 members instead of 15 +psl_20120201.nc : has 165 leadtimes instead of 216 +psl_20120301.nc : has 51 members instead of 15 +psl_20120401.nc : has 51 members instead of 15 +psl_20120501.nc : there is no variable called number inside this file +psl_20120601.nc : has 51 members instead of 15 +psl_20120601.nc : has 125 leadtimes instead of 216 +psl_20120701.nc : has 51 members instead of 15 +psl_20120801.nc : has 51 members instead of 15 +psl_20120901.nc : has 51 members instead of 15 +psl_20121001.nc : has 51 members instead of 15 +psl_20121001.nc : has 213 leadtimes instead of 216 +psl_20121101.nc : there is no variable called number inside this file +psl_20121201.nc : has 51 members instead of 15 +psl_20130101.nc : has 51 members instead of 15 +psl_20130201.nc : has 51 members instead of 15 +psl_20130301.nc : has 51 members instead of 15 +psl_20130301.nc : has 142 leadtimes instead of 216 +psl_20130401.nc : has 51 members instead of 15 +psl_20130501.nc : there is no variable called number inside this file +psl_20130601.nc : has 51 members instead of 15 +psl_20130601.nc : has 205 leadtimes instead of 216 +psl_20130701.nc : has 51 members instead of 15 +psl_20130801.nc : has 51 members instead of 15 +psl_20130901.nc : has 51 members instead of 15 +psl_20131001.nc : has 51 members instead of 15 +psl_20131101.nc : there is no variable called number inside this file +psl_20131101.nc : has 2 leadtimes instead of 216 +psl_20131201.nc : has 51 members instead of 15 diff --git a/old/backup/bash/old/check_sfcWind.out b/old/backup/bash/old/check_sfcWind.out new file mode 100644 index 0000000000000000000000000000000000000000..3093a1599af7fc1969002917f7831cd3e2ff29b2 --- /dev/null +++ b/old/backup/bash/old/check_sfcWind.out @@ -0,0 +1,399 @@ +sfcWind_19810101.nc : >>> missing file <<< +sfcWind_19810201.nc : >>> missing file <<< +sfcWind_19810301.nc : >>> missing file <<< +sfcWind_19810401.nc : >>> missing file <<< +sfcWind_19810501.nc : there is no variable called time inside this file +sfcWind_19810601.nc : >>> missing file <<< +sfcWind_19810701.nc : >>> missing file <<< +sfcWind_19810801.nc : there is no variable called latitude inside this file +sfcWind_19810801.nc : there is no variable called longitude inside this file +sfcWind_19810801.nc : there is no variable called lev inside this file +sfcWind_19810801.nc : has 1 leadtimes instead of 214 +sfcWind_19810901.nc : >>> missing file <<< +sfcWind_19811001.nc : >>> missing file <<< +sfcWind_19811101.nc : ok +sfcWind_19811201.nc : >>> missing file <<< +sfcWind_19820101.nc : >>> missing file <<< +sfcWind_19820201.nc : >>> missing file <<< +sfcWind_19820301.nc : >>> missing file <<< +sfcWind_19820401.nc : >>> missing file <<< +sfcWind_19820501.nc : >>> missing file <<< +sfcWind_19820601.nc : >>> missing file <<< +sfcWind_19820701.nc : >>> missing file <<< +sfcWind_19820801.nc : >>> missing file <<< +sfcWind_19820901.nc : >>> missing file <<< +sfcWind_19821001.nc : >>> missing file <<< +sfcWind_19821101.nc : ok +sfcWind_19821201.nc : >>> missing file <<< +sfcWind_19830101.nc : >>> missing file <<< +sfcWind_19830201.nc : >>> missing file <<< +sfcWind_19830301.nc : >>> missing file <<< +sfcWind_19830401.nc : >>> missing file <<< +sfcWind_19830501.nc : >>> missing file <<< +sfcWind_19830601.nc : >>> missing file <<< +sfcWind_19830701.nc : >>> missing file <<< +sfcWind_19830801.nc : >>> missing file <<< +sfcWind_19830901.nc : >>> missing file <<< +sfcWind_19831001.nc : >>> missing file <<< +sfcWind_19831101.nc : ok +sfcWind_19831201.nc : >>> missing file <<< +sfcWind_19840101.nc : >>> missing file <<< +sfcWind_19840201.nc : >>> missing file <<< +sfcWind_19840301.nc : >>> missing file <<< +sfcWind_19840401.nc : >>> missing file <<< +sfcWind_19840501.nc : >>> missing file <<< +sfcWind_19840601.nc : >>> missing file <<< +sfcWind_19840701.nc : >>> missing file <<< +sfcWind_19840801.nc : >>> missing file <<< +sfcWind_19840901.nc : >>> missing file <<< +sfcWind_19841001.nc : >>> missing file <<< +sfcWind_19841101.nc : ok +sfcWind_19841201.nc : >>> missing file <<< +sfcWind_19850101.nc : >>> missing file <<< +sfcWind_19850201.nc : >>> missing file <<< +sfcWind_19850301.nc : >>> missing file <<< +sfcWind_19850401.nc : >>> missing file <<< +sfcWind_19850501.nc : >>> missing file <<< +sfcWind_19850601.nc : >>> missing file <<< +sfcWind_19850701.nc : >>> missing file <<< +sfcWind_19850801.nc : >>> missing file <<< +sfcWind_19850901.nc : >>> missing file <<< +sfcWind_19851001.nc : >>> missing file <<< +sfcWind_19851101.nc : ok +sfcWind_19851201.nc : >>> missing file <<< +sfcWind_19860101.nc : >>> missing file <<< +sfcWind_19860201.nc : >>> missing file <<< +sfcWind_19860301.nc : >>> missing file <<< +sfcWind_19860401.nc : >>> missing file <<< +sfcWind_19860501.nc : >>> missing file <<< +sfcWind_19860601.nc : >>> missing file <<< +sfcWind_19860701.nc : >>> missing file <<< +sfcWind_19860801.nc : >>> missing file <<< +sfcWind_19860901.nc : >>> missing file <<< +sfcWind_19861001.nc : >>> missing file <<< +sfcWind_19861101.nc : ok +sfcWind_19861201.nc : >>> missing file <<< +sfcWind_19870101.nc : >>> missing file <<< +sfcWind_19870201.nc : >>> missing file <<< +sfcWind_19870301.nc : >>> missing file <<< +sfcWind_19870401.nc : >>> missing file <<< +sfcWind_19870501.nc : >>> missing file <<< +sfcWind_19870601.nc : >>> missing file <<< +sfcWind_19870701.nc : >>> missing file <<< +sfcWind_19870801.nc : >>> missing file <<< +sfcWind_19870901.nc : >>> missing file <<< +sfcWind_19871001.nc : >>> missing file <<< +sfcWind_19871101.nc : ok +sfcWind_19871201.nc : >>> missing file <<< +sfcWind_19880101.nc : >>> missing file <<< +sfcWind_19880201.nc : >>> missing file <<< +sfcWind_19880301.nc : >>> missing file <<< +sfcWind_19880401.nc : >>> missing file <<< +sfcWind_19880501.nc : >>> missing file <<< +sfcWind_19880601.nc : >>> missing file <<< +sfcWind_19880701.nc : >>> missing file <<< +sfcWind_19880801.nc : >>> missing file <<< +sfcWind_19880901.nc : >>> missing file <<< +sfcWind_19881001.nc : >>> missing file <<< +sfcWind_19881101.nc : ok +sfcWind_19881201.nc : >>> missing file <<< +sfcWind_19890101.nc : >>> missing file <<< +sfcWind_19890201.nc : >>> missing file <<< +sfcWind_19890301.nc : >>> missing file <<< +sfcWind_19890401.nc : >>> missing file <<< +sfcWind_19890501.nc : >>> missing file <<< +sfcWind_19890601.nc : >>> missing file <<< +sfcWind_19890701.nc : >>> missing file <<< +sfcWind_19890801.nc : >>> missing file <<< +sfcWind_19890901.nc : >>> missing file <<< +sfcWind_19891001.nc : >>> missing file <<< +sfcWind_19891101.nc : ok +sfcWind_19891201.nc : >>> missing file <<< +sfcWind_19900101.nc : >>> missing file <<< +sfcWind_19900201.nc : >>> missing file <<< +sfcWind_19900301.nc : >>> missing file <<< +sfcWind_19900401.nc : >>> missing file <<< +sfcWind_19900501.nc : >>> missing file <<< +sfcWind_19900601.nc : >>> missing file <<< +sfcWind_19900701.nc : >>> missing file <<< +sfcWind_19900801.nc : >>> missing file <<< +sfcWind_19900901.nc : >>> missing file <<< +sfcWind_19901001.nc : >>> missing file <<< +sfcWind_19901101.nc : ok +sfcWind_19901201.nc : >>> missing file <<< +sfcWind_19910101.nc : >>> missing file <<< +sfcWind_19910201.nc : >>> missing file <<< +sfcWind_19910301.nc : >>> missing file <<< +sfcWind_19910401.nc : >>> missing file <<< +sfcWind_19910501.nc : >>> missing file <<< +sfcWind_19910601.nc : >>> missing file <<< +sfcWind_19910701.nc : >>> missing file <<< +sfcWind_19910801.nc : >>> missing file <<< +sfcWind_19910901.nc : >>> missing file <<< +sfcWind_19911001.nc : >>> missing file <<< +sfcWind_19911101.nc : ok +sfcWind_19911201.nc : >>> missing file <<< +sfcWind_19920101.nc : >>> missing file <<< +sfcWind_19920201.nc : >>> missing file <<< +sfcWind_19920301.nc : >>> missing file <<< +sfcWind_19920401.nc : >>> missing file <<< +sfcWind_19920501.nc : >>> missing file <<< +sfcWind_19920601.nc : >>> missing file <<< +sfcWind_19920701.nc : >>> missing file <<< +sfcWind_19920801.nc : >>> missing file <<< +sfcWind_19920901.nc : >>> missing file <<< +sfcWind_19921001.nc : >>> missing file <<< +sfcWind_19921101.nc : ok +sfcWind_19921201.nc : >>> missing file <<< +sfcWind_19930101.nc : >>> missing file <<< +sfcWind_19930201.nc : >>> missing file <<< +sfcWind_19930301.nc : >>> missing file <<< +sfcWind_19930401.nc : >>> missing file <<< +sfcWind_19930501.nc : >>> missing file <<< +sfcWind_19930601.nc : >>> missing file <<< +sfcWind_19930701.nc : >>> missing file <<< +sfcWind_19930801.nc : >>> missing file <<< +sfcWind_19930901.nc : >>> missing file <<< +sfcWind_19931001.nc : >>> missing file <<< +sfcWind_19931101.nc : ok +sfcWind_19931201.nc : >>> missing file <<< +sfcWind_19940101.nc : >>> missing file <<< +sfcWind_19940201.nc : >>> missing file <<< +sfcWind_19940301.nc : >>> missing file <<< +sfcWind_19940401.nc : >>> missing file <<< +sfcWind_19940501.nc : >>> missing file <<< +sfcWind_19940601.nc : >>> missing file <<< +sfcWind_19940701.nc : >>> missing file <<< +sfcWind_19940801.nc : >>> missing file <<< +sfcWind_19940901.nc : >>> missing file <<< +sfcWind_19941001.nc : >>> missing file <<< +sfcWind_19941101.nc : ok +sfcWind_19941201.nc : >>> missing file <<< +sfcWind_19950101.nc : >>> missing file <<< +sfcWind_19950201.nc : >>> missing file <<< +sfcWind_19950301.nc : >>> missing file <<< +sfcWind_19950401.nc : >>> missing file <<< +sfcWind_19950501.nc : >>> missing file <<< +sfcWind_19950601.nc : >>> missing file <<< +sfcWind_19950701.nc : >>> missing file <<< +sfcWind_19950801.nc : >>> missing file <<< +sfcWind_19950901.nc : >>> missing file <<< +sfcWind_19951001.nc : >>> missing file <<< +sfcWind_19951101.nc : ok +sfcWind_19951201.nc : >>> missing file <<< +sfcWind_19960101.nc : >>> missing file <<< +sfcWind_19960201.nc : >>> missing file <<< +sfcWind_19960301.nc : >>> missing file <<< +sfcWind_19960401.nc : >>> missing file <<< +sfcWind_19960501.nc : >>> missing file <<< +sfcWind_19960601.nc : >>> missing file <<< +sfcWind_19960701.nc : >>> missing file <<< +sfcWind_19960801.nc : >>> missing file <<< +sfcWind_19960901.nc : >>> missing file <<< +sfcWind_19961001.nc : >>> missing file <<< +sfcWind_19961101.nc : ok +sfcWind_19961201.nc : >>> missing file <<< +sfcWind_19970101.nc : >>> missing file <<< +sfcWind_19970201.nc : >>> missing file <<< +sfcWind_19970301.nc : >>> missing file <<< +sfcWind_19970401.nc : >>> missing file <<< +sfcWind_19970501.nc : >>> missing file <<< +sfcWind_19970601.nc : >>> missing file <<< +sfcWind_19970701.nc : >>> missing file <<< +sfcWind_19970801.nc : >>> missing file <<< +sfcWind_19970901.nc : >>> missing file <<< +sfcWind_19971001.nc : >>> missing file <<< +sfcWind_19971101.nc : ok +sfcWind_19971201.nc : >>> missing file <<< +sfcWind_19980101.nc : >>> missing file <<< +sfcWind_19980201.nc : >>> missing file <<< +sfcWind_19980301.nc : >>> missing file <<< +sfcWind_19980401.nc : >>> missing file <<< +sfcWind_19980501.nc : >>> missing file <<< +sfcWind_19980601.nc : >>> missing file <<< +sfcWind_19980701.nc : >>> missing file <<< +sfcWind_19980801.nc : >>> missing file <<< +sfcWind_19980901.nc : >>> missing file <<< +sfcWind_19981001.nc : >>> missing file <<< +sfcWind_19981101.nc : ok +sfcWind_19981201.nc : >>> missing file <<< +sfcWind_19990101.nc : >>> missing file <<< +sfcWind_19990201.nc : >>> missing file <<< +sfcWind_19990301.nc : >>> missing file <<< +sfcWind_19990401.nc : >>> missing file <<< +sfcWind_19990501.nc : >>> missing file <<< +sfcWind_19990601.nc : >>> missing file <<< +sfcWind_19990701.nc : >>> missing file <<< +sfcWind_19990801.nc : >>> missing file <<< +sfcWind_19990901.nc : >>> missing file <<< +sfcWind_19991001.nc : >>> missing file <<< +sfcWind_19991101.nc : ok +sfcWind_19991201.nc : >>> missing file <<< +sfcWind_20000101.nc : >>> missing file <<< +sfcWind_20000201.nc : >>> missing file <<< +sfcWind_20000301.nc : >>> missing file <<< +sfcWind_20000401.nc : >>> missing file <<< +sfcWind_20000501.nc : >>> missing file <<< +sfcWind_20000601.nc : >>> missing file <<< +sfcWind_20000701.nc : >>> missing file <<< +sfcWind_20000801.nc : >>> missing file <<< +sfcWind_20000901.nc : >>> missing file <<< +sfcWind_20001001.nc : >>> missing file <<< +sfcWind_20001101.nc : ok +sfcWind_20001201.nc : >>> missing file <<< +sfcWind_20010101.nc : >>> missing file <<< +sfcWind_20010201.nc : >>> missing file <<< +sfcWind_20010301.nc : >>> missing file <<< +sfcWind_20010401.nc : >>> missing file <<< +sfcWind_20010501.nc : >>> missing file <<< +sfcWind_20010601.nc : >>> missing file <<< +sfcWind_20010701.nc : >>> missing file <<< +sfcWind_20010801.nc : >>> missing file <<< +sfcWind_20010901.nc : >>> missing file <<< +sfcWind_20011001.nc : >>> missing file <<< +sfcWind_20011101.nc : ok +sfcWind_20011201.nc : >>> missing file <<< +sfcWind_20020101.nc : >>> missing file <<< +sfcWind_20020201.nc : >>> missing file <<< +sfcWind_20020301.nc : >>> missing file <<< +sfcWind_20020401.nc : >>> missing file <<< +sfcWind_20020501.nc : >>> missing file <<< +sfcWind_20020601.nc : >>> missing file <<< +sfcWind_20020701.nc : >>> missing file <<< +sfcWind_20020801.nc : >>> missing file <<< +sfcWind_20020901.nc : >>> missing file <<< +sfcWind_20021001.nc : >>> missing file <<< +sfcWind_20021101.nc : ok +sfcWind_20021201.nc : >>> missing file <<< +sfcWind_20030101.nc : >>> missing file <<< +sfcWind_20030201.nc : >>> missing file <<< +sfcWind_20030301.nc : >>> missing file <<< +sfcWind_20030401.nc : >>> missing file <<< +sfcWind_20030501.nc : >>> missing file <<< +sfcWind_20030601.nc : >>> missing file <<< +sfcWind_20030701.nc : >>> missing file <<< +sfcWind_20030801.nc : >>> missing file <<< +sfcWind_20030901.nc : >>> missing file <<< +sfcWind_20031001.nc : >>> missing file <<< +sfcWind_20031101.nc : ok +sfcWind_20031201.nc : >>> missing file <<< +sfcWind_20040101.nc : >>> missing file <<< +sfcWind_20040201.nc : >>> missing file <<< +sfcWind_20040301.nc : >>> missing file <<< +sfcWind_20040401.nc : >>> missing file <<< +sfcWind_20040501.nc : >>> missing file <<< +sfcWind_20040601.nc : >>> missing file <<< +sfcWind_20040701.nc : >>> missing file <<< +sfcWind_20040801.nc : >>> missing file <<< +sfcWind_20040901.nc : >>> missing file <<< +sfcWind_20041001.nc : >>> missing file <<< +sfcWind_20041101.nc : ok +sfcWind_20041201.nc : >>> missing file <<< +sfcWind_20050101.nc : >>> missing file <<< +sfcWind_20050201.nc : >>> missing file <<< +sfcWind_20050301.nc : >>> missing file <<< +sfcWind_20050401.nc : >>> missing file <<< +sfcWind_20050501.nc : >>> missing file <<< +sfcWind_20050601.nc : >>> missing file <<< +sfcWind_20050701.nc : >>> missing file <<< +sfcWind_20050801.nc : >>> missing file <<< +sfcWind_20050901.nc : >>> missing file <<< +sfcWind_20051001.nc : >>> missing file <<< +sfcWind_20051101.nc : ok +sfcWind_20051201.nc : >>> missing file <<< +sfcWind_20060101.nc : >>> missing file <<< +sfcWind_20060201.nc : >>> missing file <<< +sfcWind_20060301.nc : >>> missing file <<< +sfcWind_20060401.nc : >>> missing file <<< +sfcWind_20060501.nc : >>> missing file <<< +sfcWind_20060601.nc : >>> missing file <<< +sfcWind_20060701.nc : >>> missing file <<< +sfcWind_20060801.nc : >>> missing file <<< +sfcWind_20060901.nc : >>> missing file <<< +sfcWind_20061001.nc : >>> missing file <<< +sfcWind_20061101.nc : ok +sfcWind_20061201.nc : >>> missing file <<< +sfcWind_20070101.nc : >>> missing file <<< +sfcWind_20070201.nc : >>> missing file <<< +sfcWind_20070301.nc : >>> missing file <<< +sfcWind_20070401.nc : >>> missing file <<< +sfcWind_20070501.nc : >>> missing file <<< +sfcWind_20070601.nc : >>> missing file <<< +sfcWind_20070701.nc : >>> missing file <<< +sfcWind_20070801.nc : >>> missing file <<< +sfcWind_20070901.nc : >>> missing file <<< +sfcWind_20071001.nc : >>> missing file <<< +sfcWind_20071101.nc : ok +sfcWind_20071201.nc : >>> missing file <<< +sfcWind_20080101.nc : >>> missing file <<< +sfcWind_20080201.nc : >>> missing file <<< +sfcWind_20080301.nc : >>> missing file <<< +sfcWind_20080401.nc : >>> missing file <<< +sfcWind_20080501.nc : >>> missing file <<< +sfcWind_20080601.nc : >>> missing file <<< +sfcWind_20080701.nc : >>> missing file <<< +sfcWind_20080801.nc : >>> missing file <<< +sfcWind_20080901.nc : >>> missing file <<< +sfcWind_20081001.nc : >>> missing file <<< +sfcWind_20081101.nc : ok +sfcWind_20081201.nc : >>> missing file <<< +sfcWind_20090101.nc : >>> missing file <<< +sfcWind_20090201.nc : >>> missing file <<< +sfcWind_20090301.nc : >>> missing file <<< +sfcWind_20090401.nc : >>> missing file <<< +sfcWind_20090501.nc : >>> missing file <<< +sfcWind_20090601.nc : >>> missing file <<< +sfcWind_20090701.nc : >>> missing file <<< +sfcWind_20090801.nc : >>> missing file <<< +sfcWind_20090901.nc : >>> missing file <<< +sfcWind_20091001.nc : >>> missing file <<< +sfcWind_20091101.nc : ok +sfcWind_20091201.nc : >>> missing file <<< +sfcWind_20100101.nc : >>> missing file <<< +sfcWind_20100201.nc : >>> missing file <<< +sfcWind_20100301.nc : >>> missing file <<< +sfcWind_20100401.nc : >>> missing file <<< +sfcWind_20100501.nc : >>> missing file <<< +sfcWind_20100601.nc : >>> missing file <<< +sfcWind_20100701.nc : >>> missing file <<< +sfcWind_20100801.nc : >>> missing file <<< +sfcWind_20100901.nc : >>> missing file <<< +sfcWind_20101001.nc : >>> missing file <<< +sfcWind_20101101.nc : ok +sfcWind_20101201.nc : >>> missing file <<< +sfcWind_20110101.nc : >>> missing file <<< +sfcWind_20110201.nc : >>> missing file <<< +sfcWind_20110301.nc : >>> missing file <<< +sfcWind_20110401.nc : >>> missing file <<< +sfcWind_20110501.nc : >>> missing file <<< +sfcWind_20110601.nc : >>> missing file <<< +sfcWind_20110701.nc : >>> missing file <<< +sfcWind_20110801.nc : >>> missing file <<< +sfcWind_20110901.nc : >>> missing file <<< +sfcWind_20111001.nc : >>> missing file <<< +sfcWind_20111101.nc : ok +sfcWind_20111201.nc : >>> missing file <<< +sfcWind_20120101.nc : >>> missing file <<< +sfcWind_20120201.nc : >>> missing file <<< +sfcWind_20120301.nc : >>> missing file <<< +sfcWind_20120401.nc : >>> missing file <<< +sfcWind_20120501.nc : >>> missing file <<< +sfcWind_20120601.nc : >>> missing file <<< +sfcWind_20120701.nc : >>> missing file <<< +sfcWind_20120801.nc : >>> missing file <<< +sfcWind_20120901.nc : >>> missing file <<< +sfcWind_20121001.nc : >>> missing file <<< +sfcWind_20121101.nc : ok +sfcWind_20121201.nc : >>> missing file <<< +sfcWind_20130101.nc : >>> missing file <<< +sfcWind_20130201.nc : >>> missing file <<< +sfcWind_20130301.nc : >>> missing file <<< +sfcWind_20130401.nc : >>> missing file <<< +sfcWind_20130501.nc : >>> missing file <<< +sfcWind_20130601.nc : >>> missing file <<< +sfcWind_20130701.nc : >>> missing file <<< +sfcWind_20130801.nc : >>> missing file <<< +sfcWind_20130901.nc : >>> missing file <<< +sfcWind_20131001.nc : >>> missing file <<< +sfcWind_20131101.nc : has 212 leadtimes instead of 214 +sfcWind_20131201.nc : >>> missing file <<< diff --git a/old/backup/bash/old/checking_S4.sh b/old/backup/bash/old/checking_S4.sh new file mode 100755 index 0000000000000000000000000000000000000000..3dfe39b539c6eacfa9957a9b8e669bc265c9d10d --- /dev/null +++ b/old/backup/bash/old/checking_S4.sh @@ -0,0 +1,174 @@ +#!/bin/bash + +var=psl #sfcWind # name of the variable to check +path=/esnas/exp/ecmwf/system4_m1/daily_mean/${var}_f6h/ # its path +suffix=01 # this suffix is usually present in all S4 netCDF file, right begore the file extension +ext=.nc # file extension + +yearStart=1981 # time period to check inside the files +yearEnd=2015 + +nameLatitude=latitude # name of the latitude dimension to check in the file +nLatitude=181 #256 #181 # number of latitude values to check + +nameLongitude=longitude # name of the longitude dimension to check in the file +nLongitude=360 #512 #360 # number of longitude values to check + +nameMembers=ensemble #lev #number # name of the dimension with the model members to check in the file +nMembers=15 #51 #15 # number of members to check in the file + +nameLeadtimes=time # name of the dimension with the forecast time to check in the file +nLeadtimes=216 #214 #216 # number of forecast times to check in the file + +# function that detect the size of a dimension variable in a netCDF file: +# +# usage: get_size myNetCDF.nc myDimensionName +# +function get_size { + ncks -m ${1} | grep -E -i ": ${2}, size =" | cut -d ' ' -f 7 | uniq +} + +# check file existence: +for ((i=$yearStart;i<=$yearEnd;i++)) +do +for j in 01 02 03 04 05 06 07 08 09 10 11 12 +do + ok1=0; ok2=0; ok3=0; ok4=0 + + #echo $path${var}_$i$j$suffix$ext + if [ ! -f $path${var}_$i$j$suffix$ext ] + then + echo ${var}_$i$j$suffix$ext ": >>>>> Missing file <<<<<" + else + nMemb=`get_size $path${var}_$i$j$suffix$ext $nameMembers` + nLead=`get_size $path${var}_$i$j$suffix$ext $nameLeadtimes` + nLat=`get_size $path${var}_$i$j$suffix$ext $nameLatitude` + nLon=`get_size $path${var}_$i$j$suffix$ext $nameLongitude` + + if [ -z $nLat ]; then # check if nLat is empty + echo ${var}_$i$j$suffix$ext ": there is no dimension called " $nameLatitude " inside this file " + elif [ ${nLat//[[:blank:]]/} -ne $nLatitude ]; then + echo ${var}_$i$j$suffix$ext ": " $nameLatitude " dimension has " $nLat " values instead of " $nLatitude + else + ok1=1 + fi + + if [ -z $nLon ]; then # check if nLon is empty + echo ${var}_$i$j$suffix$ext ": there is no dimension called " $nameLongitude " inside this file " + elif [ ${nLon//[[:blank:]]/} -ne $nLongitude ]; then + echo ${var}_$i$j$suffix$ext ": " $nameLongitude "dimension has " $nLon " values instead of " $nLongitude + else + ok2=1 + fi + + if [ -z $nMemb ]; then # check if nMemb is empty + echo ${var}_$i$j$suffix$ext ": there is no dimension called " $nameMembers " inside this file " + elif [ ${nMemb//[[:blank:]]/} -ne $nMembers ]; then + echo ${var}_$i$j$suffix$ext ": " $nameMembers " dimension has " $nMemb " values instead of " $nMembers + else + ok3=1 + fi + + if [ -z $nLead ]; then + echo ${var}_$i$j$suffix$ext ": there is no dimension called " $nameLeadtimes " inside this file " + elif [ ${nLead//[[:blank:]]/} -ne $nLeadtimes ]; then + echo ${var}_$i$j$suffix$ext ": " $nameLeadtimes " dimension has " $nLead " values instead of " $nLeadtimes + else + ok4=1 + fi + + if [ $ok1 -eq 1 ] && [ $ok2 -eq 1 ] && [ $ok3 -eq 1 ] && [ $ok4 -eq 1 ]; then + echo ${var}_$i$j$suffix$ext ": ok" + fi + fi +done +done + + +# # other common changes to netCDF: + +# for file in *; ncrename -d . +# # convert a 6-horly file to a daily one: +# cdo daymean + +# cdo showdate + +# convert a .nc file of ECMWF S4 after downloading: +#file=_grib2netcdf-atls01-95e2cf....nc ; ncrename -v .msl,psl -d .number,ensemble -v .number,realization $file ; date=$(cdo showdate $file | cut -f3 -d" " | sed -e s/-//g) ; mv $file /esnas/exp/ecmwf/system4_m1/6hourly/psl/psl_${date}.nc ; cdo daymean /esnas/exp/ecmwf/system4_m1/6hourly/psl/psl_${date}.nc /esnas/exp/ecmwf/system4_m1/daily_mean/psl_f6h/psl_${date}.nc ; cdo monmean /esnas/exp/ecmwf/system4_m1/daily_mean/psl_f6h/psl_${date}.nc /esnas/exp/ecmwf/system4_m1/monthly_mean/psl_f6h/psl_${date}.nc + +#file=_grib2netcdf-atls01-95e2cf....nc +#ncrename -v .msl,psl -d .number,ensemble -v .number,realization $file +#date=$(cdo showdate $file | cut -f3 -d" " | sed -e s/-//g) +#mv $file /esnas/exp/ecmwf/system4_m1/6hourly/psl/psl_${date}.nc +#cdo daymean /esnas/exp/ecmwf/system4_m1/6hourly/psl/psl_${date}.nc /esnas/exp/ecmwf/system4_m1/daily_mean/psl_f6h/psl_${date}.nc +#cdo monmean /esnas/exp/ecmwf/system4_m1/daily_mean/psl_f6h/psl_${date}.nc /esnas/exp/ecmwf/system4_m1/monthly_mean/psl_f6h/psl_${date}.nc + +# psl_19810101.nc +# psl_19810501.nc +# psl_19830501.nc +# psl_19850501.nc +# psl_19880501.nc +# psl_19900501.nc +# psl_19910601.nc +# psl_19910701.nc +# psl_19910801.nc +# psl_19910901.nc +# psl_19911001.nc +# psl_19911101.nc +# psl_19911201.nc +# psl_19920201.nc +# psl_19920301.nc +# psl_19920401.nc +# psl_19921101.nc +# psl_19930501.nc +# psl_19931101.nc +# psl_19940901.nc +# psl_19941101.nc +# psl_19961001.nc +# psl_19970501.nc +# psl_19971101.nc +# psl_19971201.nc +# psl_19980501.nc +# psl_19990501.nc +# psl_20000501.nc +# psl_20000601.nc +# psl_20011101.nc +# psl_20020501.nc +# psl_20021101.nc +# psl_20050601.nc +# psl_20051001.nc +# psl_20051101.nc + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/old/backup/bash/old/checking_S4.sh~ b/old/backup/bash/old/checking_S4.sh~ new file mode 100755 index 0000000000000000000000000000000000000000..10f8d1fcdd97a51a224e8e1ab599984dfea2a886 --- /dev/null +++ b/old/backup/bash/old/checking_S4.sh~ @@ -0,0 +1,174 @@ +#!/bin/bash + +var=psl #sfcWind #psl +path=/esnas/exp/ecmwf/system4_m1/daily_mean/${var}_f6h/ +suffix=01 +ext=.nc + +yearStart=1981 # time period to check in the files +yearEnd=2015 + +nameLatitude=latitude # name of the latitude dimension to check in the file +nLatitude=181 #256 #181 # number of latitude values to check + +nameLongitude=longitude # name of the longitude dimension to check in the file +nLongitude=360 #512 #360 # number of longitude values to check + +nameMembers=ensemble #lev #number # name of the dimension with the model members to check in the file +nMembers=15 #51 #15 # number of members to check in the file + +nameLeadtimes=time # name of the dimension with the forecast time to check in the file +nLeadtimes=216 #214 #216 # number of forecast times to check in the file + +# function that detect the size of a dimension variable in a netCDF file: +# +# usage: get_size myNetCDF.nc myDimensionName +# +function get_size { + ncks -m ${1} | grep -E -i ": ${2}, size =" | cut -d ' ' -f 7 | uniq +} + +# check file existence: +for ((i=$yearStart;i<=$yearEnd;i++)) +do +for j in 01 02 03 04 05 06 07 08 09 10 11 12 +do + ok1=0; ok2=0; ok3=0; ok4=0 + + #echo $path${var}_$i$j$suffix$ext + if [ ! -f $path${var}_$i$j$suffix$ext ] + then + echo ${var}_$i$j$suffix$ext ": >>>>> Missing file <<<<<" + else + nMemb=`get_size $path${var}_$i$j$suffix$ext $nameMembers` + nLead=`get_size $path${var}_$i$j$suffix$ext $nameLeadtimes` + nLat=`get_size $path${var}_$i$j$suffix$ext $nameLatitude` + nLon=`get_size $path${var}_$i$j$suffix$ext $nameLongitude` + + if [ -z $nLat ]; then # check if nLat is empty + echo ${var}_$i$j$suffix$ext ": there is no dimension called " $nameLatitude " inside this file " + elif [ ${nLat//[[:blank:]]/} -ne $nLatitude ]; then + echo ${var}_$i$j$suffix$ext ": " $nameLatitude " dimension has " $nLat " values instead of " $nLatitude + else + ok1=1 + fi + + if [ -z $nLon ]; then # check if nLon is empty + echo ${var}_$i$j$suffix$ext ": there is no dimension called " $nameLongitude " inside this file " + elif [ ${nLon//[[:blank:]]/} -ne $nLongitude ]; then + echo ${var}_$i$j$suffix$ext ": " $nameLongitude "dimension has " $nLon " values instead of " $nLongitude + else + ok2=1 + fi + + if [ -z $nMemb ]; then # check if nMemb is empty + echo ${var}_$i$j$suffix$ext ": there is no dimension called " $nameMembers " inside this file " + elif [ ${nMemb//[[:blank:]]/} -ne $nMembers ]; then + echo ${var}_$i$j$suffix$ext ": " $nameMembers " dimension has " $nMemb " values instead of " $nMembers + else + ok3=1 + fi + + if [ -z $nLead ]; then + echo ${var}_$i$j$suffix$ext ": there is no dimension called " $nameLeadtimes " inside this file " + elif [ ${nLead//[[:blank:]]/} -ne $nLeadtimes ]; then + echo ${var}_$i$j$suffix$ext ": " $nameLeadtimes " dimension has " $nLead " values instead of " $nLeadtimes + else + ok4=1 + fi + + if [ $ok1 -eq 1 ] && [ $ok2 -eq 1 ] && [ $ok3 -eq 1 ] && [ $ok4 -eq 1 ]; then + echo ${var}_$i$j$suffix$ext ": ok" + fi + fi +done +done + + +# # other common changes to netCDF: + +# for file in *; ncrename -d . +# # convert a 6-horly file to a daily one: +# cdo daymean + +# cdo showdate + +# convert a .nc file of ECMWF S4 after downloading: +#file=_grib2netcdf-atls01-95e2cf....nc ; ncrename -v .msl,psl -d .number,ensemble -v .number,realization $file ; date=$(cdo showdate $file | cut -f3 -d" " | sed -e s/-//g) ; mv $file /esnas/exp/ecmwf/system4_m1/6hourly/psl/psl_${date}.nc ; cdo daymean /esnas/exp/ecmwf/system4_m1/6hourly/psl/psl_${date}.nc /esnas/exp/ecmwf/system4_m1/daily_mean/psl_f6h/psl_${date}.nc ; cdo monmean /esnas/exp/ecmwf/system4_m1/daily_mean/psl_f6h/psl_${date}.nc /esnas/exp/ecmwf/system4_m1/monthly_mean/psl_f6h/psl_${date}.nc + +#file=_grib2netcdf-atls01-95e2cf....nc +#ncrename -v .msl,psl -d .number,ensemble -v .number,realization $file +#date=$(cdo showdate $file | cut -f3 -d" " | sed -e s/-//g) +#mv $file /esnas/exp/ecmwf/system4_m1/6hourly/psl/psl_${date}.nc +#cdo daymean /esnas/exp/ecmwf/system4_m1/6hourly/psl/psl_${date}.nc /esnas/exp/ecmwf/system4_m1/daily_mean/psl_f6h/psl_${date}.nc +#cdo monmean /esnas/exp/ecmwf/system4_m1/daily_mean/psl_f6h/psl_${date}.nc /esnas/exp/ecmwf/system4_m1/monthly_mean/psl_f6h/psl_${date}.nc + +# psl_19810101.nc +# psl_19810501.nc +# psl_19830501.nc +# psl_19850501.nc +# psl_19880501.nc +# psl_19900501.nc +# psl_19910601.nc +# psl_19910701.nc +# psl_19910801.nc +# psl_19910901.nc +# psl_19911001.nc +# psl_19911101.nc +# psl_19911201.nc +# psl_19920201.nc +# psl_19920301.nc +# psl_19920401.nc +# psl_19921101.nc +# psl_19930501.nc +# psl_19931101.nc +# psl_19940901.nc +# psl_19941101.nc +# psl_19961001.nc +# psl_19970501.nc +# psl_19971101.nc +# psl_19971201.nc +# psl_19980501.nc +# psl_19990501.nc +# psl_20000501.nc +# psl_20000601.nc +# psl_20011101.nc +# psl_20020501.nc +# psl_20021101.nc +# psl_20050601.nc +# psl_20051001.nc +# psl_20051101.nc + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/old/backup/checking_ncdata.sh b/old/backup/checking_ncdata.sh new file mode 100755 index 0000000000000000000000000000000000000000..b539211e756e0dbec2989ea6c20d39185feeb7e8 --- /dev/null +++ b/old/backup/checking_ncdata.sh @@ -0,0 +1,157 @@ +#!/bin/bash + +# Creation: 6/2016 +# Authors: Nicola Cortesi and Raul Marcos +# Aim: to do a quality control of the dimension variables inside all NetCDF data file in a directory. Notice that this script is not able to find if a file is corrupt. +# I/O: you only have to specify where are the files you want to check and the correct names and size of the dimensions inside. +# the output is a text file inside the directory where this script is run, whose name starts with "check_", with inside the results of the quality control. + +var=sfcWind #tas #sfcWind #psl # name of the variable to check +dat=ECMWFS4 # dataset name +freq=daily # time step (just for the output filename + +path=/esnas/exp/ecmwf/system4_m1/daily_mean/sfcWind_f6h/ +#path=/esnas/exp/ECMWF/seasonal/0001/s004/m001/6hourly/sfcWind/ # its path +#path=/esnas/exp/ecmwf/system4_m1/daily_mean/sfcWind_f6h/ +#/esnas/exp/Meteofrance/seasonal/0001/s004/m001/6hourly/sfcWind Meteofrance S4 +#/esnas/exp/ECMWF/seasonal/0001/s004/m001/6hourly/sfcWind ECMWF S4 + +suffix=01 # this suffix is usually present in all S4 netCDF file, right begore the file extension +ext=.nc # file extension + +yearStart=1981 # time period to check inside the files +yearEnd=2015 + +nameLatitude=latitude # name of the latitude dimension to check in the file +nLatitude=181 #128 #181 #256 # number of latitude values to check + +nameLongitude=longitude # name of the longitude dimension to check in the file +nLongitude=360 #256 #360 #512 # number of longitude values to check + +nameMembers=ensemble # name of the dimension with the model members to check in the file +nMembers=15 #51 #15 # number of members to check in the file +nMemberss=51 # if there is another number of ensemble members that it is ok (should be set the same as nMembers if not) + +nameLeadtimes=time # name of the dimension with the forecast time to check in the file +nLeadtimes=216 #861 # number of forecast times to check in the file (215 complete days * 4 (6hourly) + 1 (for the last midnight) + +printf "\t\n" > ktemp +printf "Checked variable: \t $var\n" >> ktemp +printf "Path: \t $path\n" >> ktemp +printf "Extension: \t $ext\n" >> ktemp +printf "Checked period: \t $yearStart-$yearEnd\n" >> ktemp +printf "\t\n" >> ktemp +printf ">>>>>>>>>>>>>>>>>>>>>>>> Expected elements <<<<<<<<<<<<<<<<<<<<<<<<\n" >> ktemp +printf "\t\n" >> ktemp +printf "Latitude denomination: \t $nameLatitude\n" >> ktemp +printf "Number of latitude values: \t $nLatitude\n" >> ktemp +printf "Longitude denomination: \t $nameLongitude\n" >> ktemp +printf "Number of longitude values:\t $nLongitude\n" >> ktemp +printf "Ensemble denomination: \t $nameMembers\n" >> ktemp +printf "Number of ensemble values: \t $nMembers\n" >> ktemp +if [ $nMembers != $nMemberss ]; then +printf "Alternative number of ensemble values: \t $nMemberss\n" >> ktemp +fi +printf "Lead-time denonomination: \t $nameLeadtimes\n" >> ktemp +printf "Number of lead-times: \t $nLeadtimes\n" >> ktemp + +printf "\n\n >>>>>>>>>>>>>>>>>>>>>>>> Checking <<<<<<<<<<<<<<<<<<<<<<<< \n\n" | tee -a ktemp + +outputfile=./check_${dat}_${var}_${freq}.txt + +# function that detect the size of a dimension variable in a netCDF file: +# +# usage: get_size myNetCDF.nc myDimensionName +# +function get_size { + ncks -m ${1} | grep -E -i ": ${2}, size =" | cut -d ' ' -f 7 | uniq +} + +# check file existence: +for ((i=$yearStart;i<=$yearEnd;i++)) +do +for j in 01 02 03 04 05 06 07 08 09 10 11 12 +do + ok1=0; ok2=0; ok3=0; ok4=0 ; ok5=0 + + # echo $path${var}_$i$j$suffix$ext > ktemp + if [ ! -f $path${var}_$i$j$suffix$ext ] + then + echo ${var}_$i$j$suffix$ext ": >>>>> Missing file <<<<<" | tee -a ktemp + else + nMemb=`get_size $path${var}_$i$j$suffix$ext $nameMembers` + nLead=`get_size $path${var}_$i$j$suffix$ext $nameLeadtimes` + nLat=`get_size $path${var}_$i$j$suffix$ext $nameLatitude` + nLon=`get_size $path${var}_$i$j$suffix$ext $nameLongitude` + + if [ -z $nLat ]; then # check if nLat is empty + echo ${var}_$i$j$suffix$ext ": there is no dimension called " $nameLatitude " inside this file " | tee -a ktemp + elif [ ${nLat//[[:blank:]]/} -ne $nLatitude ]; then + echo ${var}_$i$j$suffix$ext ": "$nameLatitude" dimension has " $nLat " values instead of " $nLatitude | tee -a ktemp + else + ok1=1 + fi + + if [ -z $nLon ]; then # check if nLon is empty + echo ${var}_$i$j$suffix$ext ": there is no dimension called " $nameLongitude " inside this file " | tee -a ktemp + elif [ ${nLon//[[:blank:]]/} -ne $nLongitude ]; then + echo ${var}_$i$j$suffix$ext ": "$nameLongitude "dimension has " $nLon " values instead of " $nLongitude | tee -a ktemp + else + ok2=1 + fi + + if [ -z $nMemb ]; then # check if nMemb is empty + echo ${var}_$i$j$suffix$ext ": there is no dimension called " $nameMembers " inside this file " | tee -a ktemp + elif [ ${nMemb//[[:blank:]]/} -ne $nMembers -a ${nMemb//[[:blank:]]/} -ne "$nMemberss" ]; then + echo ${var}_$i$j$suffix$ext ": "$nameMembers "dimension has " $nMemb " values instead of " $nMembers | tee -a ktemp + else + ok3=1 + fi + + if [ -z $nLead ]; then + echo ${var}_$i$j$suffix$ext ": there is no dimension called " $nameLeadtimes " inside this file " | tee -a ktemp + elif [ ${nLead//[[:blank:]]/} -ne $nLeadtimes ]; then + echo ${var}_$i$j$suffix$ext ": "$nameLeadtimes " dimension has " $nLead " values instead of " $nLeadtimes | tee -a ktemp + else + ok4=1 + fi + + + if [ $ok1 -eq 1 ] && [ $ok2 -eq 1 ] && [ $ok3 -eq 1 ] && [ $ok4 -eq 1 ]; then + echo ${var}_$i$j$suffix$ext ": ok" | tee -a ktemp + fi + fi +done +done + +column -t -s $'\t' ktemp > $outputfile +rm ktemp + +### other common changes to netCDF that can be useful: + +# rename dimensions/variables in all netCDF in the directory where the command is executed: +#for file in *; do ncrename -d .number,ensemble -d .lev,ensemble -v .number,realization -v .msl,psl $file; done +#for file in *; do ncrename -d .number,ensemble -d .lev,ensemble -v .number,realization -v .t2m,tas $file; done +#for file in *; do ncrename -d .reftime,time -d .sfc,ensemble -v .reftime,time -v .sfc,ensemble $file; done +#for file in *; do ncrename -d .lat,latitude -d .lon,longitude -v .lat,latitude -v .lon,longitude $file; done + +# # convert a 6-hourly file to a daily one: +# for file in *; do cdo daymean $file /esnas/exp/ecmwf/system4_m1/daily_mean/psl_f6h/$file; done + +# convert only February 6-hourly files to daily and update its variable names: +#for year in {1981..2015}; do cdo daymean sfcWind_${year}0201.nc /esnas/exp/ecmwf/system4_m1/daily_mean/sfcWind_f6h/sfcWind_${year}0201.nc; ncrename -d .lev,ensemble -v .lev,ensemble /esnas/exp/ecmwf/system4_m1/daily_mean/sfcWind_f6h/sfcWind_${year}0201.nc; done + +# cdo showdate + +# PA command to convert to both daily and monthly netCDF: +# convert a .nc file of ECMWF S4 after downloading: +#file=_grib2netcdf-atls01-95e2cf....nc ; ncrename -v .msl,psl -d .number,ensemble -v .number,realization $file ; date=$(cdo showdate $file | cut -f3 -d" " | sed -e s/-//g) ; mv $file /esnas/exp/ecmwf/system4_m1/6hourly/psl/psl_${date}.nc ; cdo daymean /esnas/exp/ecmwf/system4_m1/6hourly/psl/psl_${date}.nc /esnas/exp/ecmwf/system4_m1/daily_mean/psl_f6h/psl_${date}.nc ; cdo monmean /esnas/exp/ecmwf/system4_m1/daily_mean/psl_f6h/psl_${date}.nc /esnas/exp/ecmwf/system4_m1/monthly_mean/psl_f6h/psl_${date}.nc + + + +#file=_grib2netcdf-atls01-95e2cf....nc +#ncrename -v .msl,psl -d .number,ensemble -v .number,realization $file +#date=$(cdo showdate $file | cut -f3 -d" " | sed -e s/-//g) +#mv $file /esnas/exp/ecmwf/system4_m1/6hourly/psl/psl_${date}.nc +#cdo daymean /esnas/exp/ecmwf/system4_m1/6hourly/psl/psl_${date}.nc /esnas/exp/ecmwf/system4_m1/daily_mean/psl_f6h/psl_${date}.nc +#cdo monmean /esnas/exp/ecmwf/system4_m1/daily_mean/psl_f6h/psl_${date}.nc /esnas/exp/ecmwf/system4_m1/monthly_mean/psl_f6h/psl_${date}.nc diff --git a/old/backup/dem/elevation_512x256.nc b/old/backup/dem/elevation_512x256.nc new file mode 100755 index 0000000000000000000000000000000000000000..b8502be666df754544c463a4587f53adf178d0ab Binary files /dev/null and b/old/backup/dem/elevation_512x256.nc differ diff --git a/old/backup/diagnostic.R b/old/backup/diagnostic.R new file mode 100644 index 0000000000000000000000000000000000000000..cb059535cd2f882de6fbcf0fee339b512da4e796 --- /dev/null +++ b/old/backup/diagnostic.R @@ -0,0 +1,77 @@ +############################################################## +# In your script, before performing the analysis, the range # +# of values of its parameter(s) is specified somewhere # +# before the main analysis: # +############################################################## + +start_date <- 1:12 +lead_time <- 1:5 + +... + +############################################################### +# Insert these new lines to link your script with the job # +# file. They have to be inserted BEFORE the main loop where # +# you repeat the same analysis many times varying the # +# parameter(s) defined above (in this example, start_date and # +# lead_time): # +############################################################### + +script.arg <- as.integer(commandArgs(TRUE)) + +if(length(script.arg) > 0) { + start_date <- script.arg[1] + lead_time <- script.arg[2] +} + +############################################################### +# In case you script is written in Python, you have to # +# introduce these lines instead: # +############################################################### + +import sys +script.arg = int(sys.argv) + +if len(script.arg) > 0 : + start_date = script.arg[1] + lead_time <- script.arg[2] + + +############################################################### +# If your script is in fortran, C or another language, you # +# need to adapt the syntax of the previous lines to the # +# language used. # +############################################################### + +############################################################### +# Here you perform the main analysis (the one which the # +# major part of computing times) many times, cycling over one # +# parameter (or a few parameters) inside this loop until the # +# analysis is finished. When you link this script to the job # +# file, you'll be able to split this analysis in multiple # +# ones, one for each different value of the looping # +# parameter(s), and to assign each value to a different job, # +# running all jobs (or as many as possible) on our cluster. # +############################################################### + +for(sd in start_date){ + for(lt in lead_time){ + + # your analysis here + # ...... + + + ########################################################### + # When you save the outputs of the analysis, save them in # + # one file for each different value of the looping # + # parameter(s), because if not, each job will overwrite # + # the results of the previous jobs! # + ########################################################### + + save(output_1, output_2, ..., output_N, file=paste0(work.dir, "/my_analysis_start_date_", sd, "_lead_time_", lt, ".RData")) + + } +} + + + diff --git a/old/backup/doo_young/PlotRD.dy.MME4.R b/old/backup/doo_young/PlotRD.dy.MME4.R new file mode 100644 index 0000000000000000000000000000000000000000..e883e4755b0787b1bda920e96a2745678815ae91 --- /dev/null +++ b/old/backup/doo_young/PlotRD.dy.MME4.R @@ -0,0 +1,153 @@ +PlotRD<-function(rel_diag,nbins=10,consbars=F,tit=NULL,colLine=NULL,colBar=NULL,marHist=T,hist_ylim=NULL,Lg=NULL) { + +print("Plot") +# rel_diag<-rd +# nbins=10 +# consbars=T +# colLine=col_line +# colBar=col_bar +# tit=tit1 +# marHist=T +# hist_ylim=c(0,100) +# x11(width=12,height=10) + # x11() + # PLOT OF THE RELIABILITY DIAGRAM + # + ###################################################################################### + # rd: a list with the reliability diagrams that will be represented in the same plot + # cons.bars : if the consistency bar must be represented or not. + # nbins : number of equidistant points used to compute the reliability diagram (optional) + # tit: the title of the plot (optional) + # brierScores: The brier score linked to the reliability diagram (optional) + # marHist: Whether to plot the small refinement histogram is showed + ##################################################################################### + + # Some parameters are defined + nrd<-length(rel_diag) # nrd = 5, 4 models + mme + rg<-list() + # Check the dimensions of the rank histogram + for (i in 1:nrd){ + if (dim(rel_diag[[i]])[1]!=nbins){ + stop ('The bins of the reliability diagram must be the same that nbins') + } + rg[[i]]<-range(rel_diag[[i]]$hist.counts)# check the range of the histograms + } + if (is.null(hist_ylim)){ + rgH<-range(rg) +#print(rgH) + }else{ + rgH<-hist_ylim + } + + + ########################################## + # reliability plot + # par(mar=c(5,3,2,2)+0.1) + ########################################## + + layout(matrix(c(rep(1,nrd),seq(2,(nrd+1))),nrd,2,byrow=F),width=c(5,2)) + par(oma=c(2.5,4,5,1)) + #layout.show(a) + + # The axis are defined + # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + #x11(width=12,height=10) +# old.par <- par(no.readonly = TRUE) +#print(old.par) +# on.exit(par(old.par)) +# par(mar=c(5,5,5,0)) + old.par <- par(mar=c(5,5,5,0)) + on.exit(par(old.par)) + + plot(NULL, xlim = c(0,1), ylim = c(0,1),axes=F, xlab='', ylab='') + + axis(1, at=seq(0,1,by=0.1),labels=seq(0,1,by=0.1),cex.axis=2.0) + title(xlab= "Forecast probability",line=3.9,cex.lab=2.0) + + axis(2, at=seq(0,1,by=0.1), labels=seq(0,1,by=0.1), las=2,cex.axis=2.0) + #axis(2, at=seq(0,1,by=0.1), labels=seq(0,1,by=0.1), cex.axis=2.0) + box() + title(ylab= "Observed relative frequency", line=0.2,cex.lab=2.0,outer=T) + if(is.null(tit)==F){ +# title(tit,cex.main=4,outer=T,line=-1) +# title(tit,cex.main=2.0,outer=T,line=-4) + title(tit,cex.main=2.0,outer=T,line=-3) + } + + # Legend + # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + yloc <- c(1.0, 0.95, 0.90, 0.85, 0.80) + legend(0.,yloc[1], legend=Lg[[1]], fill=colLine[[1]], bty="n", cex=1.2) + legend(0.,yloc[2], legend=Lg[[2]], fill=colLine[[2]], bty="n", cex=1.2) + legend(0.,yloc[3], legend=Lg[[3]], fill=colLine[[3]], bty="n", cex=1.2) + legend(0.,yloc[4], legend=Lg[[4]], fill=colLine[[4]], bty="n", cex=1.2) + legend(0.,yloc[5], legend=Lg[[5]], fill=colLine[[5]], bty="n", cex=1.2) +# legend("topleft", "(x,y)", pch = 1, title = "topleft, inset = .05", inset = .05) + + # No resolution and No skill lines + # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + no_res <- sum(rel_diag[[1]]$obs.counts)/sum(rel_diag[[1]]$hist.counts) +# vt_res <- sum(rel_diag[[1]]$for.prob)/sum(rel_diag[[1]]$hist.counts) +#print(paste("no_res = ",no_res)) + numb <- c(seq(0,1,by=0.1)) +#print(numb) + no_skill <- (numb+no_res)/2. +#print(no_skill) + +# diagonal line + lines(c(0,1), c(0,1), lty=1) +# no_resolution line + lines(c(0,1), c(no_res,no_res), col="gray", lty=3) + lines(c(1/3,1/3), c(0,1), col="gray", lty=3) +# lines(c(vt_res,vt_res), c(0,1), col="gray", lty=3) +# lines(c(no_res,no_res), c(0,1), col="gray", lty=3) +# no_skill line + lines(c(0,1), c(no_skill[1],no_skill[11]), col="black", lty=3) + + + # Consistency bars + # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + HI<-matrix(NA,nrow=nrd,ncol=length(rel_diag[[1]]$hist.counts)) + + for (j in 1:nrd){ # nrd = 5: 4 models + mme + HI[j,]<-rel_diag[[j]]$hist.counts + if (consbars==T){ + # The lower limit of consistency bar i and the upper limit are combined in one list + consBars<-list() + consBars[[j]]<-abind(InsertDim(rel_diag[[j]]$cbar.lo,1,1),InsertDim(rel_diag[[j]]$cbar.hi,1,1),along=1) + + # plot consistency bars + for (i in 1:nbins){ # nbins = 10 bins +# lines(rep(rel_diag[[j]]$p.avgs[i], 2), consBars[[j]][, i], col=colBar[j], lwd=3) + lines(rep(rel_diag[[j]]$p.avgs[i], 2), consBars[[j]][, i], col=colBar[j], lwd=2) # lwd: line width + } + } + +# see plot: "p" for points, "l" for lines, "b" for both points and lines, "c" for empty points joined by lines, "o" for overplotted points and lines, "s" and "S" for stair steps and "h" for histogram-like vertical lines. Finally, "n" does not produce any points or lines. + points(rel_diag[[j]]$p.avgs, rel_diag[[j]]$cond.probs, type="b", pch=1 , col =colLine[[j]], cex=2.0 , lwd=3) + + } + + + # Number of forecasts + # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + if (marHist==TRUE){ + + for (i in 1:nrd){ +# par(mar=c(5,0,5,12)) +# par(mar=c(1.5,1.5,6,7)) # in case num of sharpness diagram is 3 or 4 + par(mar=c(0.5,1.5,5,7)) # in case num of sharpness diagram is 5 + barplot(HI[i,]/10000, beside=T,space=c(0,1.2),axes = F, axis.lty=F, axisnames = F, col = colLine[[i]], ylim=rgH/10000) +# axis(1, at=seq(0,1,by=0.1),labels=seq(0,1,by=0.1),cex.axis=1.5) + title(main = "# of forecasts (x10⁴)", font.main = 1.0, line=0.5) +# grid(1,5,col='#525252') + axis(4,cex.axis=1.0) + box(bg='grey') + } + #pp<- par("plt") + #par("plt" = c(pp[2] - 0.14 , pp[2], pp[3], pp[3]+ 0.15) ) + #par(new = TRUE) + } + +} + diff --git a/old/backup/doo_young/reliability_diagrams.dy_4models.fig.v2.MME4.R b/old/backup/doo_young/reliability_diagrams.dy_4models.fig.v2.MME4.R new file mode 100644 index 0000000000000000000000000000000000000000..a439a8784ebfa2549cc3dab8cbd78135300db02a --- /dev/null +++ b/old/backup/doo_young/reliability_diagrams.dy_4models.fig.v2.MME4.R @@ -0,0 +1,158 @@ +#clear workspace +rm(list=ls()) +gc() +#________________________________________________________________________________________________________ +# +# Reliability Diagram +#________________________________________________________________________________________________________ + +#load sources and libraries +library(s2dverification) +library(SpecsVerification) +library(ncdf) +library(statmod) +library(maps) +library(mapdata) +library(parallel) +library(doMC) +library(psych) +library(RColorBrewer) +library(MASS) +library(maptools) #for shapefiles +library(scales) #for transparency +library(abind) +library(TeachingDemos) +#source('~/R/scripts/functions_s2dverification/PlotEquiMapV.R') +source('../R/ReliabilityDiagramHist.R') +source('../R/PlotRD.dy.MME4.R') + +#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +# Select parameters +# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +variable<-'sfcwind' # variable +var_name<-'10m Wind Speed' +#variable<-'tas' # variable +#var_name<-'2m Temperature' +s1<-1991 # initial start date +s2<-2013 # final start date +s3<-s2-1 # final start date of the hindcast +sea<-1 # number of season (in targ) +# Lead times +ltmin<-2 +ltmax<-4 +# Reanalysis +nrls <- 1 +ranl <- list('wERAint','wJRA55') # exp: data of experiments +rinp <- list('ERA_Int','JRA_55') # exp: data of experiments + +# Season: 1:DJF, 2:JJA, 3:MAM, 4:SON +sea<-4 +targ <- list('DJF','JJA','MAM','SON') # targ: forecast season +fsti <- list('November','May','February','August')# fcst: start month of the forecast + +# Regions +nrg <- 10 +rgn <- list('GL','NH','SH','NA','SA','AU','EA','EU','TR','AF') +region<-list("Globe","Northern H.","Southern H.","North Am.","South Am.","Australia","East Asia","Europe","Tropics","Africa") +area1<-c(0,360,-90,90) +area2<-c(0,360,20,90) +area3<-c(0,360,-90,-20) +area4<-c(190,310,10,75) +area5<-c(270,330,-60,10) +area6<-c(110,180,-50,0) +area7<-c(90,150,20,50) +area8<-c(-15,40,35,75) +area9<-c(0,360,-20,20) +area10<-c(-20,55,-35,40) +range <- list(area1,area2,area3,area4,area5,area6,area7,area8,area9,area10) +######################################################################### +for (js in 1:nrls){ # 2 Reanalysis +#for (js in 1){ +print(paste("Reanalysis = ", rinp[[js]])) + +for (is in 1:sea){ # 4 seasons +#for (is in 1){ +print(paste("season = ", targ[[is]])) + +for (ms in 1:nrg){ # 10 Regions +#for (ms in 1){ +lonlatbox=range[[ms]] +print(rgn[[ms]]) +######################################################################### +# Colours of the rank histogram +#col_line<-c('#137AE1',"#feb24c","#FF0000") +col_line<-c("#FF0000","#feb24c",'#137AE1','#01854d','#4D0185') + +## alpha => light(0) to dark(1) +#col_bar<-c('#C7E7F9','#F5EAC0','#F9C7D4') +col_bar<-c(adjustcolor(col_line[1],alpha=0.3),adjustcolor(col_line[2],alpha=0.5),adjustcolor(col_line[3],alpha=0.3),adjustcolor(col_line[4],alpha=0.3),adjustcolor(col_line[5],alpha=0.3)) +#col_bar<-c('#A2D5F7','#FACD7F','#F6A4A4') + +file<-list('RelDiag_rawdata','RelDiag_sbc_cross','RelDiag_sbc_nocross','RelDiag_cal_cross','RelDiag_cal_nocross') + Stitle<-c('Raw Data','Simple bias correction in cross-validation', 'Simple bias correction without cross-validation', + 'Calibration in cross-validation','Calibration without cross-validation') +#################################################################### + + dirfiles <-paste('/esarchive/oper/DATA/skill/Exp5/',variable,'/',ranl[[js]],'/rld/mask/',sep='') + outputdir<-paste('/esarchive/oper/DATA/fig/Exp5/',ranl[[js]],'/',variable,'/rld/mask_mme4/',targ[[is]],'/',sep='') + + fileobs <-paste(dirfiles,rinp[[js]],'_',variable,'_',targ[[is]],'_1lead_',s1,'_',s3,'_global_ProBins_',rgn[[ms]],'.RData',sep='') + filepred1<-paste(dirfiles,'ECMWF_S4_',variable,'_',targ[[is]],'_1lead_',s1,'_',s3,'_global_ProBins_',rgn[[ms]],'.RData',sep='') + filepred2<-paste(dirfiles,'METFR_S3_',variable,'_',targ[[is]],'_1lead_',s1,'_',s3,'_global_ProBins_',rgn[[ms]],'.RData',sep='') + filepred3<-paste(dirfiles,'METFR_S4_',variable,'_',targ[[is]],'_1lead_',s1,'_',s3,'_global_ProBins_',rgn[[ms]],'.RData',sep='') + filepred4<-paste(dirfiles,'METFR_S5_',variable,'_',targ[[is]],'_1lead_',s1,'_',s3,'_global_ProBins_',rgn[[ms]],'.RData',sep='') + + # data: observations and predictions + obs<-get(load(fileobs)) + pred1<-get(load(filepred1)) + pred2<-get(load(filepred2)) + pred3<-get(load(filepred3)) + pred4<-get(load(filepred4)) + predm<-pred1 + +####################################################### + for (i in (1:5)){ # post-processing datasets +# for (i in 5){ # post-processing datasets +print(paste("post-pro = ", i)) +#print(dim(obs[[i]])) # 3 100804 +#print(dim(pred[[i]])) # 3 100804 + +####################################################### + for (jc in (1:3)){ # 3 categories (jc=1 ->'below', jc=2 ->'near', jc=3 ->'above') +# for (jc in 1){ # 3 categories + predm[[i]][jc, ] <- (pred1[[i]][jc, ]+pred2[[i]][jc, ]+pred3[[i]][jc, ]+pred4[[i]][jc, ])/4 + model <- rbind(rbind(pred1[[i]][jc,],pred2[[i]][jc,]),pred3[[i]][jc,],pred4[[i]][jc,],predm[[i]][jc,]) +print(dim(pred1)); print(dim(predm)); print(dim(model)) + + categ<-list('Below','Near','Above') + Model_Name <- list('ECMWF_S4','METFR_S3','METFR_S4','METFR_S5','MME') + nthr<-dim(model)[1] + +# fileout<-paste(outputdir,i,'_',categ[jc],'_',file[[i]],'_',variable,'_',s1,'_',s3,'_',rgn[[ms]],'_no_topo.ps',sep='') + fileout<-paste(outputdir,i,'_',categ[jc],'_',file[[i]],'_',variable,'_',s1,'_',s3,'_',rgn[[ms]],'.ps',sep='') + cairo_ps(fileout,width=8,height=7) + title<-paste('Reliability Diagram. (',var_name,', ',categ[jc],') +1 month lead, ',targ[[is]],' (',s1,'-',s3,'), ',region[[ms]],' +',rinp[[js]],', ',Stitle[[i]],sep='') + + rd<-list() + for(j in 1:nthr){ # j = Loop of 4 models + MME , jc = 3 categories + rd[[j]]<-ReliabilityDiagramHist(model[j,],obs[[i]][jc,],plot=F,nboot=500) + } + +# For Consistency Bar, consbars = T, otherwise, consbars = F +# PlotRD(rd,nbins=10,consbars=T,colLine=col_line,colBar=col_bar,tit=title,hist_ylim=c(0,100)) + PlotRD(rd,nbins=10,consbars=T,colLine=col_line,colBar=col_bar,tit=title,Lg=Model_Name) + dev.off() + + } # 3 categories (i=1 ->'below', i=2 ->'near', i=3 ->'above') +####################################################### + + } # 1 raw data and 4 post-processing datasets +####################################################### + + } # 10 Regions + + } # 4 seasons + +} # 2 Reanalysis diff --git a/old/backup/fig2catalog.sh b/old/backup/fig2catalog.sh new file mode 100755 index 0000000000000000000000000000000000000000..e8a8d6dd8835010d1c92ff99eb1bff89e9d72e20 --- /dev/null +++ b/old/backup/fig2catalog.sh @@ -0,0 +1,293 @@ +#!/usr/bin/sh +#------------------- +# Usage info +#------------------- +show_help() { + cat << EOF +Usage: ${0##*/} [-hl] [-t "title"] [-c "caption"] [-s scale_factor] INFILE OUTFILE +Add caption, title and logo to an image, for including in the catalog. +OPTIONS: + -h Display this help and exit + -t "title" Add a title on top of the image + -r pixels Remove an horizontal strip at the top of the image + of height equal to the specified number of pixels, + in case there is a pre-existing title to delete + -p pixels Add a white horizontal strip between the title and the figure + -c "caption" Add a caption below the image + -m pixels Remove an horizontal strip between the image and the caption, + if the distance to the caption is too much + -x pixels Add a white horizontal strip between the image and the caption + -s scale_factor Rescale font size of both title and caption by this factor + -l Do not add the bsc logo +EOF +} + +#------------------- +# Initialize flags and settings +#------------------- + +captionsize=25 # size of the caption. It is automatically rescaled depending on the image width. +resizefont=1 # scale factor for both title and fonts + +OPTIND=1 # needed for getopts maths +LOGO=true # flag for logo exclusion +resizelogo=25 # width of the logo compared to the width of the image (in %). 25% is a good balance. +LOGO_file=/shared/earth/EarthSystemServices/BSC_logo/logo.png + +cut_title=false # cut an horizontal strip at the top of the figure, if you want to remove and old title before adding the new one with option -t +cut_title_pixels=0 # set the height of the horizontal strip (in pixels) to remove, if cut_title=false +cut_bottom=false # cut an horizontal strip between the figure and the caption, if there is too much white space at the bottom of the figure +cut_bottom_pixels=0 + +add_strip_above_title=20 # space above the title in pixel +add_strip_below_title=0 # space between title and original figure in pixel +add_strip_above_caption=0 # space between original figure and caption in pixel +add_strip_below_caption=20 # space below caption in pixel + +#------------------- +# Parse input options +#------------------- +while getopts ":t:r:p:c:m:x:s:s:lh" opt; do + case $opt in + h) + show_help + exit 0 + ;; + t) + #echo "-t was triggered, Title: $OPTARG" >&2 + TITLE=$OPTARG + ;; + r) + #echo "-r was triggered, Title: $OPTARG" >&2 + cut_title=true + cut_title_pixels=$OPTARG + ;; + p) + #echo "-p was triggered, Caption: $OPTARG" >&2 + add_strip_below_title=$OPTARG + ;; + c) + #echo "-c was triggered, Caption: $OPTARG" >&2 + CAPTION=$OPTARG + ;; + m) + #echo "-m was triggered, Title: $OPTARG" >&2 + cut_bottom=true + cut_bottom_pixels=$OPTARG + ;; + x) + #echo "-p was triggered, Caption: $OPTARG" >&2 + add_strip_above_caption=$OPTARG + ;; + l) + #echo "-l was triggered, Include BSC logo" >&2 + LOGO=false + ;; + s) + #echo "-c was triggered, Caption: $OPTARG" >&2 + resizefont=$OPTARG + ;; + \?) + echo "Invalid option: -$OPTARG" >&2 + exit 1 + ;; + :) + echo "Option -$OPTARG requires an argument." >&2 + exit 1 + ;; + esac +done +shift "$((OPTIND-1))" # Shift off the options +#------------------- +# Check we have two args remaining +#------------------- +if [[ $# -ne 2 ]] ; then + show_help + exit 0 +fi +#------------------- +# Get in and out files +#------------------- +INFILE=$1 +OUTFILE=$2 +#------------------- +# Check infile exists +#------------------- +if [[ ! -f $INFILE ]] ; then + echo "File not found: $INFILE" >&2 + exit -1 +fi +#################### +# Start doing some work +#################### +#------------------------------ +# Create a tmp folder to store intermediate files. +# It will be cleared by system. +#------------------------------ +TMP="$(mktemp -d)" +#echo "Working in "$TMP" folder." +#------------------------------ +# Convert ps to png with 300dpi +#------------------------------ +if [[ $INFILE == *.ps ]] ; then + convert -units PixelsPerInch -density 300 -background white -flatten $INFILE $TMP/figure.png +else + cp $INFILE $TMP/figure.png +fi +#------------------------------ +# Get the width and height of the image and logo, in pixels. +#------------------------------ +width_figure=$(identify -ping -format %w $TMP/figure.png) +height_figure=$(identify -ping -format %h $TMP/figure.png) +width_logo_file=$(identify -ping -format %w $LOGO_file) +#------------------------------ +# Set the width of logo and logo area. +#------------------------------ +width_logo_area=$(( $width_figure * $resizelogo / 100 )) +width_logo=$(( $width_logo_area * 90 / 100 )) +resize_logo=$(( $width_logo * 100 / $width_logo_file )) +#------------------------------ +# Set the width of caption and caption area. +#------------------------------ +if ( $LOGO ) ; then + width_caption_area=$(( $width_figure - $width_logo_area )) +else + width_caption_area=$width_figure +fi +width_caption=$(( $width_caption_area * 90 / 100)) + +#------------------------------ +# Set rescaled sizes for fonts and other tricks +#------------------------------ +resizefont=$( echo "scale=3; $width_figure*$resizefont/125" | bc ) + +if [[ $width_figure -gt 1000 ]]; then + captionsize=$(( $captionsize - 2 )) + if [[ $width_figure -gt 1500 ]]; then + captionsize=$(( $captionsize - 3 )) + if [[ $width_figure -gt 2000 ]]; then + captionsize=$(( $captionsize - 2 )) + if [[ $width_figure -gt 3000 ]]; then + captionsize=$(( $captionsize - 3 )) + fi + fi + fi +fi + +captionsize_rescaled=$( echo "$captionsize*$resizefont/10" | bc ) +titsize_rescaled=$( echo "scale=3; $captionsize_rescaled*150/100" | bc ) # size of the title. It is automatically rescaled if the size of the figure increases/decreases; i.e: if the figure double, the title size doubles too. + +#------------------ +# We need to create a footer? +# (either caption or logo or both) +#------------------ +if [[ -v CAPTION ]] || ( $LOGO ) ; then + #--------------------- + # Create the caption and insert it at the top center of the caption area. + # NOTE: if caption is unset it writes an empty string anyway. + #--------------------- + convert -background white -font Arial -pointsize $captionsize_rescaled -size ${width_caption}x -define pango:justify=true pango:"$CAPTION" -geometry +0+0 $TMP/caption.png + convert $TMP/caption.png -gravity North -background white -extent ${width_caption_area}x $TMP/caption_area.png + height_caption=$(identify -ping -format %h $TMP/caption.png) + height_caption_area=$(identify -ping -format %h $TMP/caption_area.png) + #-------------------------- + # Draw the logo if needed + #-------------------------- + if ( $LOGO ) ; then + #--------------------- + # Resize the logo, to be proportional to the size of the figure + #--------------------- + convert -background white $LOGO_file -scale ${resize_logo}% $TMP/logo.png + height_logo_resized=$(identify -ping -format %h $TMP/logo.png) + #--------------------- + # Set height of logo area. + # If the caption is less high than the logo, insert the caption at the center of + # the caption area rather than at the top center (This overwrites previous image). + # In this case also set fewer space below footer + #--------------------- + if [[ $height_caption_area -lt $height_logo_resized ]]; then + height_logo_area=$height_logo_resized + convert $TMP/caption.png -gravity Center -background white -extent ${width_caption_area}x${height_logo_area} $TMP/caption_area.png + size_space=5 + else + height_logo_area=$height_caption + fi + #--------------------- + # Insert the logo at top left of the logo area + #--------------------- + convert $TMP/logo.png -gravity NorthWest -background white -extent ${width_logo_area}x${height_logo_area} $TMP/logo_area.png + #--------------------- + # Merge the caption area and the logo area together + #--------------------- + montage $TMP/caption_area.png $TMP/logo_area.png -tile 2x1 -geometry +0+0 $TMP/footer.png + else + #-------------------------- + # No logo: set the caption area as footer + #-------------------------- + mv $TMP/caption_area.png $TMP/footer.png + fi + #-------------------------- + # Optional: remove the bottom part of the figure + #-------------------------- + if ( $cut_bottom ); then + convert $TMP/figure.png -crop +0-${cut_bottom_pixels} +repage $TMP/figure.png + fi + #--------------------- + # Add a white horizontal strip above the footer area + #--------------------- + convert -size x${add_strip_above_caption} xc:white $TMP/space.png + montage $TMP/space.png $TMP/footer.png -tile 1x2 -geometry +0+0 $TMP/footer.png + #--------------------- + # Add a white horizontal strip below the footer area + #--------------------- + convert -size x${add_strip_below_caption} xc:white $TMP/space.png + montage $TMP/footer.png $TMP/space.png -tile 1x2 -geometry +0+0 $TMP/footer.png + #--------------------- + # Add the footer to the figure + #--------------------- + montage $TMP/figure.png $TMP/footer.png -tile 1x2 -geometry +0+0 $TMP/fig_and_footer.png +else + #--------------------- + # No title and no logo + #--------------------- + #----------------------------------------------- + # Optional: remove the bottom part of the figure + #----------------------------------------------- + if ( $cut_bottom ); then + convert $TMP/figure.png -crop +0-${cut_bottom_pixels} +repage $TMP/figure.png + fi + #--------------------------------------------------------- + # set figure as figure_and_footer + #--------------------------------------------------------- + mv $TMP/figure.png $TMP/fig_and_footer.png +fi +#------------------------------------------ +# Cut the upper part of the image to remove old title +#------------------------------------------ +if ( $cut_title ); then + convert $TMP/fig_and_footer.png -crop +0+${cut_title_pixels} +repage $TMP/fig_and_footer.png +fi +#------------------------------------------ +# Add title if needed +#------------------------------------------ +if [[ -v TITLE ]] +then + if [[ $add_strip_below_title -gt 0 ]]; then + convert -size x${add_strip_below_title} xc:white $TMP/white_strip.png + montage $TMP/white_strip.png $TMP/fig_and_footer.png -tile 1x2 -geometry +0+0 $TMP/fig_and_footer_and_strip.png + else + mv $TMP/fig_and_footer.png $TMP/fig_and_footer_and_strip.png + fi + convert $TMP/fig_and_footer_and_strip.png -background white -gravity Center -pointsize $titsize_rescaled -font Arial -define pango:center=true pango:"$TITLE" +swap -append $TMP/fig_footer_and_header.png +else + mv $TMP/fig_and_footer.png $TMP/fig_footer_and_header.png +fi +#------------------------------------------ +# Add space above the title +#------------------------------------------ +convert -size x${add_strip_above_title} xc:white $TMP/space.png +montage $TMP/space.png $TMP/fig_footer_and_header.png -tile 1x2 -geometry +0+0 $TMP/fig_footer_and_header.png +#------------------------------------------ +# Move final result to OUTFILE +#------------------------------------------ +mv $TMP/fig_footer_and_header.png $OUTFILE diff --git a/old/backup/fig2catalog.sh~ b/old/backup/fig2catalog.sh~ new file mode 100644 index 0000000000000000000000000000000000000000..ca4533cce7a44c384056e7c932389941a3baa9d7 --- /dev/null +++ b/old/backup/fig2catalog.sh~ @@ -0,0 +1,293 @@ +#!/usr/bin/sh +#------------------- +# Usage info +#------------------- +show_help() { + cat << EOF +Usage: ${0##*/} [-hl] [-t "title"] [-c "caption"] [-s scale_factor] INFILE OUTFILE +Add caption, title and logo to an image, for including in the catalog. +OPTIONS: + -h Display this help and exit + -t "title" Add a title on top of the image + -r pixels Remove an horizontal strip at the top of the image + of height equal to the specified number of pixels, + in case there is a pre-existing title to delete + -p pixels Add a white horizontal strip between the title and the figure + -c "caption" Add a caption below the image + -m pixels Remove an horizontal strip between the image and the caption, + if the distance to the caption is too much + -x pixels Add a white horizontal strip between the image and the caption + -s scale_factor Rescale font size of both title and caption by this factor + -l Do not add the bsc logo +EOF +} + +#------------------- +# Initialize flags and settings +#------------------- + +captionsize=25 # size of the caption. It is automatically rescaled depending on the image width. +resizefont=1 # scale factor for both title and fonts + +OPTIND=1 # needed for getopts maths +LOGO=true # flag for logo exclusion +resizelogo=25 # width of the logo compared to the width of the image (in %). 25% is a good balance. +LOGO_file=/shared/earth/EarthSystemServices/BSC_logo/logo.png + +cut_title=false # cut an horizontal strip at the top of the figure, if you want to remove and old title before adding the new one with option -t +cut_title_pixels=0 # set the height of the horizontal strip (in pixels) to remove, if cut_title=false +cut_bottom=false # cut an horizontal strip between the figure and the caption, if there is too much white space at the bottom of the figure +cut_bottom_pixels=0 + +add_strip_above_title=20 # space above the title in pixel +add_strip_below_title=0 # space between title and original figure in pixel +add_strip_above_caption=0 # space between original figure and caption in pixel +add_strip_below_caption=50 # space below caption in pixel + +#------------------- +# Parse input options +#------------------- +while getopts ":t:r:p:c:m:x:s:s:lh" opt; do + case $opt in + h) + show_help + exit 0 + ;; + t) + #echo "-t was triggered, Title: $OPTARG" >&2 + TITLE=$OPTARG + ;; + r) + #echo "-r was triggered, Title: $OPTARG" >&2 + cut_title=true + cut_title_pixels=$OPTARG + ;; + p) + #echo "-p was triggered, Caption: $OPTARG" >&2 + add_strip_below_title=$OPTARG + ;; + c) + #echo "-c was triggered, Caption: $OPTARG" >&2 + CAPTION=$OPTARG + ;; + m) + #echo "-m was triggered, Title: $OPTARG" >&2 + cut_bottom=true + cut_bottom_pixels=$OPTARG + ;; + x) + #echo "-p was triggered, Caption: $OPTARG" >&2 + add_strip_above_caption=$OPTARG + ;; + l) + #echo "-l was triggered, Include BSC logo" >&2 + LOGO=false + ;; + s) + #echo "-c was triggered, Caption: $OPTARG" >&2 + resizefont=$OPTARG + ;; + \?) + echo "Invalid option: -$OPTARG" >&2 + exit 1 + ;; + :) + echo "Option -$OPTARG requires an argument." >&2 + exit 1 + ;; + esac +done +shift "$((OPTIND-1))" # Shift off the options +#------------------- +# Check we have two args remaining +#------------------- +if [[ $# -ne 2 ]] ; then + show_help + exit 0 +fi +#------------------- +# Get in and out files +#------------------- +INFILE=$1 +OUTFILE=$2 +#------------------- +# Check infile exists +#------------------- +if [[ ! -f $INFILE ]] ; then + echo "File not found: $INFILE" >&2 + exit -1 +fi +#################### +# Start doing some work +#################### +#------------------------------ +# Create a tmp folder to store intermediate files. +# It will be cleared by system. +#------------------------------ +TMP="$(mktemp -d)" +echo "Working in "$TMP" folder." +#------------------------------ +# Convert ps to png with 300dpi +#------------------------------ +if [[ $INFILE == *.ps ]] ; then + convert -units PixelsPerInch -density 300 -background white -flatten $INFILE $TMP/figure.png +else + cp $INFILE $TMP/figure.png +fi +#------------------------------ +# Get the width and height of the image and logo, in pixels. +#------------------------------ +width_figure=$(identify -ping -format %w $TMP/figure.png) +height_figure=$(identify -ping -format %h $TMP/figure.png) +width_logo_file=$(identify -ping -format %w $LOGO_file) +#------------------------------ +# Set the width of logo and logo area. +#------------------------------ +width_logo_area=$(( $width_figure * $resizelogo / 100 )) +width_logo=$(( $width_logo_area * 90 / 100 )) +resize_logo=$(( $width_logo * 100 / $width_logo_file )) +#------------------------------ +# Set the width of caption and caption area. +#------------------------------ +if ( $LOGO ) ; then + width_caption_area=$(( $width_figure - $width_logo_area )) +else + width_caption_area=$width_figure +fi +width_caption=$(( $width_caption_area * 90 / 100)) + +#------------------------------ +# Set rescaled sizes for fonts and other tricks +#------------------------------ +resizefont=$( echo "scale=3; $width_figure*$resizefont/125" | bc ) + +if [[ $width_figure -gt 1000 ]]; then + captionsize=$(( $captionsize - 2 )) + if [[ $width_figure -gt 1500 ]]; then + captionsize=$(( $captionsize - 3 )) + if [[ $width_figure -gt 2000 ]]; then + captionsize=$(( $captionsize - 2 )) + if [[ $width_figure -gt 3000 ]]; then + captionsize=$(( $captionsize - 3 )) + fi + fi + fi +fi + +captionsize_rescaled=$( echo "$captionsize*$resizefont/10" | bc ) +titsize_rescaled=$( echo "scale=3; $captionsize_rescaled*150/100" | bc ) # size of the title. It is automatically rescaled if the size of the figure increases/decreases; i.e: if the figure double, the title size doubles too. + +#------------------ +# We need to create a footer? +# (either caption or logo or both) +#------------------ +if [[ -v CAPTION ]] || ( $LOGO ) ; then + #--------------------- + # Create the caption and insert it at the top center of the caption area. + # NOTE: if caption is unset it writes an empty string anyway. + #--------------------- + convert -background white -font Arial -pointsize $captionsize_rescaled -size ${width_caption}x -define pango:justify=true pango:"$CAPTION" -geometry +0+0 $TMP/caption.png + convert $TMP/caption.png -gravity North -background white -extent ${width_caption_area}x $TMP/caption_area.png + height_caption=$(identify -ping -format %h $TMP/caption.png) + height_caption_area=$(identify -ping -format %h $TMP/caption_area.png) + #-------------------------- + # Draw the logo if needed + #-------------------------- + if ( $LOGO ) ; then + #--------------------- + # Resize the logo, to be proportional to the size of the figure + #--------------------- + convert -background white $LOGO_file -scale ${resize_logo}% $TMP/logo.png + height_logo_resized=$(identify -ping -format %h $TMP/logo.png) + #--------------------- + # Set height of logo area. + # If the caption is less high than the logo, insert the caption at the center of + # the caption area rather than at the top center (This overwrites previous image). + # In this case also set fewer space below footer + #--------------------- + if [[ $height_caption_area -lt $height_logo_resized ]]; then + height_logo_area=$height_logo_resized + convert $TMP/caption.png -gravity Center -background white -extent ${width_caption_area}x${height_logo_area} $TMP/caption_area.png + size_space=5 + else + height_logo_area=$height_caption + fi + #--------------------- + # Insert the logo at top left of the logo area + #--------------------- + convert $TMP/logo.png -gravity NorthWest -background white -extent ${width_logo_area}x${height_logo_area} $TMP/logo_area.png + #--------------------- + # Merge the caption area and the logo area together + #--------------------- + montage $TMP/caption_area.png $TMP/logo_area.png -tile 2x1 -geometry +0+0 $TMP/footer.png + else + #-------------------------- + # No logo: set the caption area as footer + #-------------------------- + mv $TMP/caption_area.png $TMP/footer.png + fi + #-------------------------- + # Optional: remove the bottom part of the figure + #-------------------------- + if ( $cut_bottom ); then + convert $TMP/figure.png -crop +0-${cut_bottom_pixels} +repage $TMP/figure.png + fi + #--------------------- + # Add a white horizontal strip above the footer area + #--------------------- + convert -size x${add_strip_above_caption} xc:white $TMP/space.png + montage $TMP/space.png $TMP/footer.png -tile 1x2 -geometry +0+0 $TMP/footer.png + #--------------------- + # Add a white horizontal strip below the footer area + #--------------------- + convert -size x${add_strip_below_caption} xc:white $TMP/space.png + montage $TMP/footer.png $TMP/space.png -tile 1x2 -geometry +0+0 $TMP/footer.png + #--------------------- + # Add the footer to the figure + #--------------------- + montage $TMP/figure.png $TMP/footer.png -tile 1x2 -geometry +0+0 $TMP/fig_and_footer.png +else + #--------------------- + # No title and no logo + #--------------------- + #----------------------------------------------- + # Optional: remove the bottom part of the figure + #----------------------------------------------- + if ( $cut_bottom ); then + convert $TMP/figure.png -crop +0-${cut_bottom_pixels} +repage $TMP/figure.png + fi + #--------------------------------------------------------- + # set figure as figure_and_footer + #--------------------------------------------------------- + mv $TMP/figure.png $TMP/fig_and_footer.png +fi +#------------------------------------------ +# Cut the upper part of the image to remove old title +#------------------------------------------ +if ( $cut_title ); then + convert $TMP/fig_and_footer.png -crop +0+${cut_title_pixels} +repage $TMP/fig_and_footer.png +fi +#------------------------------------------ +# Add title if needed +#------------------------------------------ +if [[ -v TITLE ]] +then + if [[ $add_strip_below_title -gt 0 ]]; then + convert -size x${add_strip_below_title} xc:white $TMP/white_strip.png + montage $TMP/white_strip.png $TMP/fig_and_footer.png -tile 1x2 -geometry +0+0 $TMP/fig_and_footer_and_strip.png + else + mv $TMP/fig_and_footer.png $TMP/fig_and_footer_and_strip.png + fi + convert $TMP/fig_and_footer_and_strip.png -background white -gravity Center -pointsize $titsize_rescaled -font Arial -define pango:center=true pango:"$TITLE" +swap -append $TMP/fig_footer_and_header.png +else + mv $TMP/fig_and_footer.png $TMP/fig_footer_and_header.png +fi +#------------------------------------------ +# Add space above the title +#------------------------------------------ +convert -size x${add_strip_above_title} xc:white $TMP/space.png +montage $TMP/space.png $TMP/fig_footer_and_header.png -tile 1x2 -geometry +0+0 $TMP/fig_footer_and_header.png +#------------------------------------------ +# Move final result to OUTFILE +#------------------------------------------ +mv $TMP/fig_footer_and_header.png $OUTFILE diff --git a/old/backup/grid2contour.R b/old/backup/grid2contour.R new file mode 100644 index 0000000000000000000000000000000000000000..8870c2b91f2a418faec2018786f1d93b054a78ce --- /dev/null +++ b/old/backup/grid2contour.R @@ -0,0 +1,48 @@ + +# Creation: 6/2016 +# Author: Nicola Cortesi +# Aim: Remove the grid points above a certain value (argument 'level') [and below '-level' if two.sides=TRUE] that happens to be in areas with few points above that value. +# Useful to remove from a contour plot all the small spots of significative points that we don't want to contour. +# To do so, just apply this function inside the option 'contour' of 'PlotEquiMap' to remove the significative points (they are set to the value of 0). +# Argument 'size' determines the side of the square (in grid points) used to find if there are enough grid points with values above 'level' nearby +# the chosen point or not. Increasing it will incresase the number of grid points deleted, leaving only the bigger spots of points above the chosen value. +# I/O: a 2D lat/lon grid in geographic coordinates +# Assumptions: none +# Branch: general +# +# Example: +# data <- matrix(runif(48000,0,1)^2,300,160) + matrix(c(rep(0,20000),rep(0.6,3000),rep(0,25000)),300,160) +# PlotEquiMap(data,lon=1:300,lat=-80:79,filled.continents=F,cols=c("white","yellow","orange","red","darkred")) +# PlotEquiMap(data,lon=1:300,lat=-80:79,filled.continents=F,cols=c("white","yellow","orange","red","darkred"), contours=data, brks2=0.6) +# PlotEquiMap(data,lon=1:300,lat=-80:79,filled.continents=F,cols=c("white","yellow","orange","red","darkred"), contours=grid2contour(data,0.6,FALSE,5), brks2=0.6, contours.labels=FALSE) +# PlotEquiMap_colored(data,lon=1:300,lat=-80:79,filled.continents=F,cols=c("white","yellow","orange","red","darkred"), contours=grid2contour(data,0.6,FALSE,5), brks2=0.6, contours.labels=FALSE, contours.col="blue", continents.col="gray40") + +grid2contour <- function(grid, level, two.sides=FALSE, size=10){ + nrows <- dim(grid)[1] + ncols <- dim(grid)[2] + radius <- round(size/2) + + grid.weighted <- matrix(NA, nrows, ncols) + + grid.expanded <- rbind(cbind(grid[nrows:1,((ncols/2)+1):ncols],grid[nrows:1,],grid[nrows:1,],grid[nrows:1,1:(ncols/2)]),cbind(grid,grid,grid),cbind(grid[nrows:1,((ncols/2)+1):ncols],grid[nrows:1,],grid[nrows:1,],grid[nrows:1,1:(ncols/2)])) + + if(two.sides==FALSE){ + for(i in 1:nrows){ + for(j in 1:ncols){ + grid.weighted[i,j] <- sum(grid.expanded[(nrows+i-radius):(nrows+i+radius),(ncols+j-radius):(ncols+j+radius)] > level) + } + } + } else { + for(i in 1:nrows){ + for(j in 1:ncols){ + grid.weighted[i,j] <- sum(grid.expanded[(nrows+i-radius):(nrows+i+radius),(ncols+j-radius):(ncols+j+radius)] > level) + sum(grid.expanded[(nrows+i-radius):(nrows+i+radius),(ncols+j-radius):(ncols+j+radius)] < -level) + } + } + } + + n.points.min <- (2*radius+1)^2*0.3 # 30% of the total points in the square + ss <- which(grid.weighted < n.points.min) + grid[ss] <- 0 + return(grid) +} + diff --git a/old/backup/grid2contour.R~ b/old/backup/grid2contour.R~ new file mode 100644 index 0000000000000000000000000000000000000000..081c96d7f52a1747c57b3b636f77e2ceaeb79c4e --- /dev/null +++ b/old/backup/grid2contour.R~ @@ -0,0 +1,47 @@ + + # Creation: 6/2016 +# Author: Nicola Cortesi +# Aim: Remove the grid points above a certain value (argument 'level') [and below '-level' if two.sides=TRUE] that happens to be in areas with few points above that value. +# Useful to remove from a contour plot all the small spots of significative points that we don't want to contour. +# To do so, just apply this function inside the option 'contour' of 'PlotEquiMap' to remove the significative points (they are set to the value of 0). +# Argument 'size' determines the side of the square (in grid points) used to find if there are enough grid points with values above 'level' nearby +# the chosen point or not. Increasing it will incresase the number of grid points deleted, leaving only the bigger spots of points above the chosen value. +# I/O: a 2D lat/lon grid in geographic coordinates +# Assumptions: none +# Branch: general +# Example: +# data <- matrix(runif(48000,0,1)^2,300,160) + matrix(c(rep(0,20000),rep(0.6,3000),rep(0,25000)),300,160) +# PlotEquiMap(data,lon=1:300,lat=-80:79,filled.continents=F,cols=c("white","yellow","orange","red","darkred")) +# PlotEquiMap(data,lon=1:300,lat=-80:79,filled.continents=F,cols=c("white","yellow","orange","red","darkred"), contours=data, brks2=0.6) +# PlotEquiMap(data,lon=1:300,lat=-80:79,filled.continents=F,cols=c("white","yellow","orange","red","darkred"), contours=grid2contour(data,0.6,FALSE,5), brks2=0.6, contours.labels=FALSE) +# PlotEquiMap_colored(data,lon=1:300,lat=-80:79,filled.continents=F,cols=c("white","yellow","orange","red","darkred"), contours=grid2contour(data,0.6,FALSE,5), brks2=0.6, contours.labels=FALSE, contours.col="blue", continents.col="gray40") + +grid2contour <- function(grid, level, two.sides=FALSE, size=10){ + nrows <- dim(grid)[1] + ncols <- dim(grid)[2] + radius <- round(size/2) + + grid.weighted <- matrix(NA, nrows, ncols) + + grid.expanded <- rbind(cbind(grid[nrows:1,((ncols/2)+1):ncols],grid[nrows:1,],grid[nrows:1,],grid[nrows:1,1:(ncols/2)]),cbind(grid,grid,grid),cbind(grid[nrows:1,((ncols/2)+1):ncols],grid[nrows:1,],grid[nrows:1,],grid[nrows:1,1:(ncols/2)])) + + if(two.sides==FALSE){ + for(i in 1:nrows){ + for(j in 1:ncols){ + grid.weighted[i,j] <- sum(grid.expanded[(nrows+i-radius):(nrows+i+radius),(ncols+j-radius):(ncols+j+radius)] > level) + } + } + } else { + for(i in 1:nrows){ + for(j in 1:ncols){ + grid.weighted[i,j] <- sum(grid.expanded[(nrows+i-radius):(nrows+i+radius),(ncols+j-radius):(ncols+j+radius)] > level) + sum(grid.expanded[(nrows+i-radius):(nrows+i+radius),(ncols+j-radius):(ncols+j+radius)] < -level) + } + } + } + + n.points.min <- (2*radius+1)^2*0.3 # 30% of the total points in the square + ss <- which(grid.weighted < n.points.min) + grid[ss] <- 0 + return(grid) +} + diff --git a/old/backup/lmFitFast.R b/old/backup/lmFitFast.R new file mode 100644 index 0000000000000000000000000000000000000000..bfa359c6946181c5f90db6072b80287efafb5cb0 --- /dev/null +++ b/old/backup/lmFitFast.R @@ -0,0 +1,65 @@ + + +# Creation: 6/2016 +# Author: Nicola Cortesi +# Aim: modified version of the lm.fit() base R function, to increase its speed by removing some unnecessary outputs (such as the QR decomposition) +# Assumptions: the first column of the x matrix must be a column of 1, to represent the constant term. +# I/O: none, it's a function, not a script :) +# Branch: general + +lm.fit.fast<-function (x, y, offset = NULL, method = "qr", tol = 1e-07, singular.ok = TRUE, ...) +{ + if (is.null(n <- nrow(x))) + stop("'x' must be a matrix") + if (n == 0L) + stop("0 (non-NA) cases") + p <- ncol(x) + if (p == 0L) { + return(list(coefficients = numeric(0L), residuals = y, + fitted.values = 0 * y, rank = 0, df.residual = length(y))) + } + ny <- NCOL(y) + if (is.matrix(y) && ny == 1) + y <- drop(y) + if (!is.null(offset)) + y <- y - offset + if (NROW(y) != n) + stop("incompatible dimensions") + if (method != "qr") + warning(gettextf("method = '%s' is not supported. Using 'qr'", + method), domain = NA) + if (length(list(...))) + warning("extra arguments ", paste(names(list(...)), sep = ", "), + " are just disregarded.") + storage.mode(x) <- "double" + storage.mode(y) <- "double" + + z <- .Call(stats:::C_Cdqrls, x, y, tol, TRUE) + + if (!singular.ok && z$rank < p) + stop("singular fit encountered") + coef <- z$coefficients + pivot <- z$pivot + r1 <- seq_len(z$rank) + dn <- colnames(x) + if (is.null(dn)) + dn <- paste("x", 1L:p, sep = "") + r2 <- if (z$rank < p) + (z$rank + 1L):p + else integer(0L) + if (is.matrix(y)) { + coef[r2, ] <- NA + coef[pivot, ] <- coef + dimnames(coef) <- list(dn, colnames(y)) + } + else { + coef[r2] <- NA + coef[pivot] <- coef + names(coef) <- dn + } + z$coefficients <- coef + r1 <- y - z$residuals + if (!is.null(offset)) + r1 <- r1 + offset + c(z[c("coefficients", "residuals", "rank")], list(fitted.values = r1, df.residual = n - z$rank)) +} diff --git a/old/backup/lsf/diagnostics.R b/old/backup/lsf/diagnostics.R new file mode 100644 index 0000000000000000000000000000000000000000..29623ee327d478777420c47b42343aad094fec0b --- /dev/null +++ b/old/backup/lsf/diagnostics.R @@ -0,0 +1,53 @@ +######################################################################################### +# Sub-seasonal Skill Scores # +######################################################################################### + +library(s2dverification) +outdir <- "/gpfs/projects/bsc32/bsc32842/RESILIENCE" + +gpfs.path <- "/gpfs/projects/bsc32/bsc32842/RESILIENCE/2014010200/prlr_200705.nc" +moore.path <- "/scratch/Earth/ncortesi/prlr_200705.nc" +chunk <- as.integer(commandArgs(TRUE)[1]) +#chunk=1 + +# netcdf-3 (10 MB): +#lat <- read.table("/gpfs/projects/bsc32/bsc32842/RESILIENCE/latitudes.txt")[,1] +#lat=rev(lat) # to go in increasing order of latitudes instead of decreasing! +#time <- system.time(data.hindcast <- Load(var='sfcWind', exp = list(list(path="/gpfs/projects/bsc32/bsc32842/RESILIENCE/2014010200/sfcWind_2013010200.nc")), obs=NULL,sdates='20130102', nleadtime=1, output='lonlat', latmin=lat[chunk], latmax=lat[chunk+2],nprocs=1)) + +### netcdf-3 (1GB): +# [or netcdf-4 if change name in prlr_200706.nc] +lat=seq(-90,90,0.75) +# load 3 chunks (the minimum number possible) from gpfs:: +#time <- system.time(data.hindcast <- Load(var='tp', exp = list(list(path="/gpfs/projects/bsc32/bsc32842/RESILIENCE/2014010200/prlr_200705.nc")), obs=NULL,sdates='20070501', nleadtime=157, output='lonlat', latmin=lat[chunk], latmax=lat[chunk+2],nprocs=1)) +# load all chunks in gpfs: +#time <- system.time(data.hindcast <- Load(var='tp', exp = list(list(path=gpfs.path)), obs=NULL,sdates='20070501', nleadtime=157, output='lonlat',nprocs=1)) +# load all chunks in moore: +time <- system.time(data.hindcast <- Load(var='tp', exp = list(list(path=moore.path)), obs=NULL,sdates='20070501', nleadtime=157, output='lonlat',nprocs=1)) + +# load 3 chunks from esnas: +#time <- system.time(data.hindcast <- Load(var='tp', exp = list(list(path="/esnas/exp/ecmwf/system4_m1/daily_mean/prlr_f6h/prlr_200706.nc")), obs=NULL,sdates='20070601', nleadtime=157, output='lonlat', latmin=lat[chunk], latmax=lat[chunk+2], nprocs=1)) +# load all chunks from esnas: +#time <- system.time(data.hindcast <- Load(var='tp', exp = list(list(path="/esnas/exp/ecmwf/system4_m1/daily_mean/prlr_f6h/prlr_200705.nc")), obs=NULL,sdates='20070501', nleadtime=157, output='lonlat', nprocs=1)) + +# netcdf-3 (10 GB): +#time <- system.time(data.hindcast <- Load(var='psl', exp = list(list(path="/gpfs/projects/bsc32/bsc32842/RESILIENCE/2014010200/psl_19900501.nc")), obs=NULL,sdates='19900501', nleadtime=1, output='lonlat', latmin=lat[chunk], latmax=lat[chunk+2],nprocs=1)) +#lat=seq(-90,90,0.75) + +#.RData (1 GB): (must add +20% loading time because it is a file of 820 MB) +#time <- system.time(load(file="/gpfs/projects/bsc32/bsc32842/RESILIENCE/2014010200/test.RData")) +# in esnas: +#time <- system.time(load(file="/esnas/exp/ecmwf/system4_m1/daily_mean/psl_f6h/test.RData")) + +# save chunks: +#save(time, file=paste0(outdir,'/test_chunk_',chunk,'_time_',round(time[3],2),'_',data.hindcast$nleadtime,'.RData')) +#save(time, file=paste0(outdir,'/test_chunk_',chunk,'_time_',round(time[3],2),'.RData')) + +#a <- list.files("/esnas/exp/ecmwf/system4_m1/daily_mean/prlr_f6h") +#save(a, file=paste0(outdir,'/test_chunk_',chunk,'_file_',a[1],'.RData')) +#write.table(a, file=paste0(outdir,'/test_chunk_',chunk,'_file_',a[1],'.txt')) + + + + + diff --git a/old/backup/lsf/diagnostics.R~ b/old/backup/lsf/diagnostics.R~ new file mode 100644 index 0000000000000000000000000000000000000000..2ac7b06ca7db5c4dc64174aba93df1f2d71eb7e3 --- /dev/null +++ b/old/backup/lsf/diagnostics.R~ @@ -0,0 +1,48 @@ +######################################################################################### +# Sub-seasonal Skill Scores # +######################################################################################### + +library(s2dverification) +outdir <- "/gpfs/projects/bsc32/bsc32842/RESILIENCE" + +chunk <- as.integer(commandArgs(TRUE)[1]) +#chunk=1 + +# netcdf-3 (10 MB): +#lat <- read.table("/gpfs/projects/bsc32/bsc32842/RESILIENCE/latitudes.txt")[,1] +#lat=rev(lat) # to go in increasing order of latitudes instead of decreasing! +#time <- system.time(data.hindcast <- Load(var='sfcWind', exp = list(list(path="/gpfs/projects/bsc32/bsc32842/RESILIENCE/2014010200/sfcWind_2013010200.nc")), obs=NULL,sdates='20130102', nleadtime=1, output='lonlat', latmin=lat[chunk], latmax=lat[chunk+2],nprocs=1)) + +### netcdf-3 (1GB): +# [or netcdf-4 if change name in prlr_200706.nc] +lat=seq(-90,90,0.75) +# load 3 chunks (the minimum number possible): +#time <- system.time(data.hindcast <- Load(var='tp', exp = list(list(path="/gpfs/projects/bsc32/bsc32842/RESILIENCE/2014010200/prlr_200705.nc")), obs=NULL,sdates='20070501', nleadtime=157, output='lonlat', latmin=lat[chunk], latmax=lat[chunk+2],nprocs=1)) +# load all chunks: +#time <- system.time(data.hindcast <- Load(var='tp', exp = list(list(path="/gpfs/projects/bsc32/bsc32842/RESILIENCE/2014010200/prlr_200705.nc")), obs=NULL,sdates='20070501', nleadtime=157, output='lonlat',nprocs=1)) +# load 3 chunks from esnas: +time <- system.time(data.hindcast <- Load(var='tp', exp = list(list(path="/esnas/exp/ecmwf/system4_m1/daily_mean/prlr_f6h/prlr_200706.nc")), obs=NULL,sdates='20070601', nleadtime=157, output='lonlat', latmin=lat[chunk], latmax=lat[chunk+2], nprocs=1)) +# load all chunks from esnas: +#time <- system.time(data.hindcast <- Load(var='tp', exp = list(list(path="/esnas/exp/ecmwf/system4_m1/daily_mean/prlr_f6h/prlr_200705.nc")), obs=NULL,sdates='20070501', nleadtime=157, output='lonlat', nprocs=1)) + +# netcdf-3 (10 GB): +#time <- system.time(data.hindcast <- Load(var='psl', exp = list(list(path="/gpfs/projects/bsc32/bsc32842/RESILIENCE/2014010200/psl_19900501.nc")), obs=NULL,sdates='19900501', nleadtime=1, output='lonlat', latmin=lat[chunk], latmax=lat[chunk+2],nprocs=1)) +#lat=seq(-90,90,0.75) + +#.RData (1 GB): (must add +20% loading time because it is a file of 820 MB) +#time <- system.time(load(file="/gpfs/projects/bsc32/bsc32842/RESILIENCE/2014010200/test.RData")) +# in esnas: +#time <- system.time(load(file="/esnas/exp/ecmwf/system4_m1/daily_mean/psl_f6h/test.RData")) + +# save chunks: +#save(time, file=paste0(outdir,'/test_chunk_',chunk,'_time_',round(time[3],2),'_',data.hindcast$nleadtime,'.RData')) +#save(time, file=paste0(outdir,'/test_chunk_',chunk,'_time_',round(time[3],2),'.RData')) + +#a <- list.files("/esnas/exp/ecmwf/system4_m1/daily_mean/prlr_f6h") +#save(a, file=paste0(outdir,'/test_chunk_',chunk,'_file_',a[1],'.RData')) +#write.table(a, file=paste0(outdir,'/test_chunk_',chunk,'_file_',a[1],'.txt')) + + + + + diff --git a/old/backup/lsf/diagnostics_v2.R b/old/backup/lsf/diagnostics_v2.R new file mode 100644 index 0000000000000000000000000000000000000000..0c1cdce7d680be584a013e143e83f7ab58689641 --- /dev/null +++ b/old/backup/lsf/diagnostics_v2.R @@ -0,0 +1,57 @@ +######################################################################################### +# Sub-seasonal Skill Scores # +######################################################################################### + +library(s2dverification) +outdir <- "/gpfs/projects/bsc32/bsc32842/RESILIENCE" + +gpfs.path <- "/gpfs/projects/bsc32/bsc32842/RESILIENCE/2014010200/prlr_200705.nc" +moore.path <- "/scratch/Earth/ncortesi/prlr_$YEAR$$MONTH$.nc" +esnas.path <- "/esnas/exp/ecmwf/system4_m1/daily_mean/prlr_f6h/prlr_$YEAR$$MONTH$.nc" +chunk <- as.integer(commandArgs(TRUE)[1]) +#chunk=1 + +# netcdf-3 (10 MB): +#lat <- read.table("/gpfs/projects/bsc32/bsc32842/RESILIENCE/latitudes.txt")[,1] +#lat=rev(lat) # to go in increasing order of latitudes instead of decreasing! +#time <- system.time(data.hindcast <- Load(var='sfcWind', exp = list(list(path="/gpfs/projects/bsc32/bsc32842/RESILIENCE/2014010200/sfcWind_2013010200.nc")), obs=NULL,sdates='20130102', nleadtime=1, output='lonlat', latmin=lat[chunk], latmax=lat[chunk+2],nprocs=1)) + +### netcdf-3 (1GB): +# [or netcdf-4 if change name in prlr_200706.nc] +lat=seq(-90,90,0.75) +# load 3 chunks (the minimum number possible) from gpfs:: +#time <- system.time(data.hindcast <- Load(var='tp', exp = list(list(path="/gpfs/projects/bsc32/bsc32842/RESILIENCE/2014010200/prlr_200705.nc")), obs=NULL,sdates='20070501', nleadtime=157, output='lonlat', latmin=lat[chunk], latmax=lat[chunk+2],nprocs=1)) +# load all chunks in gpfs: +#time <- system.time(data.hindcast <- Load(var='tp', exp = list(list(path=gpfs.path)), obs=NULL,sdates='20070501', nleadtime=157, output='lonlat',nprocs=1)) +# load all chunks from moore's scratch to moore: +#time <- system.time(data.hindcast <- Load(var='tp', exp = list(list(path=moore.path)), obs=NULL, sdates='20070501', nleadtime=157, output='lonlat',nprocs=1)) +# load all chunks from esnas to moore: +time <- system.time(data.hindcast <- Load(var='tp', exp = list(list(path=esnas.path)), obs=NULL, sdates='20070501', nleadtime=157, output='lonlat',nprocs=1)) + + +# load 3 chunks from esnas: +#time <- system.time(data.hindcast <- Load(var='tp', exp = list(list(path="/esnas/exp/ecmwf/system4_m1/daily_mean/prlr_f6h/prlr_200706.nc")), obs=NULL,sdates='20070601', nleadtime=157, output='lonlat', latmin=lat[chunk], latmax=lat[chunk+2], nprocs=1)) +# load all chunks from esnas: +#time <- system.time(data.hindcast <- Load(var='tp', exp = list(list(path="/esnas/exp/ecmwf/system4_m1/daily_mean/prlr_f6h/prlr_200705.nc")), obs=NULL,sdates='20070501', nleadtime=157, output='lonlat', nprocs=1)) + +# netcdf-3 (10 GB): +#time <- system.time(data.hindcast <- Load(var='psl', exp = list(list(path="/gpfs/projects/bsc32/bsc32842/RESILIENCE/2014010200/psl_19900501.nc")), obs=NULL,sdates='19900501', nleadtime=1, output='lonlat', latmin=lat[chunk], latmax=lat[chunk+2],nprocs=1)) +#lat=seq(-90,90,0.75) + +#.RData (1 GB): (must add +20% loading time because it is a file of 820 MB) +#time <- system.time(load(file="/gpfs/projects/bsc32/bsc32842/RESILIENCE/2014010200/test.RData")) +# in esnas: +#time <- system.time(load(file="/esnas/exp/ecmwf/system4_m1/daily_mean/psl_f6h/test.RData")) + +# save chunks: +#save(time, file=paste0(outdir,'/test_chunk_',chunk,'_time_',round(time[3],2),'_',data.hindcast$nleadtime,'.RData')) +#save(time, file=paste0(outdir,'/test_chunk_',chunk,'_time_',round(time[3],2),'.RData')) + +#a <- list.files("/esnas/exp/ecmwf/system4_m1/daily_mean/prlr_f6h") +#save(a, file=paste0(outdir,'/test_chunk_',chunk,'_file_',a[1],'.RData')) +#write.table(a, file=paste0(outdir,'/test_chunk_',chunk,'_file_',a[1],'.txt')) + + + + + diff --git a/old/backup/lsf/diagnostics_v2.R~ b/old/backup/lsf/diagnostics_v2.R~ new file mode 100644 index 0000000000000000000000000000000000000000..ae02b9c220990eef70b618e77dc349e207049544 --- /dev/null +++ b/old/backup/lsf/diagnostics_v2.R~ @@ -0,0 +1,53 @@ +######################################################################################### +# Sub-seasonal Skill Scores # +######################################################################################### + +library(s2dverification) +outdir <- "/gpfs/projects/bsc32/bsc32842/RESILIENCE" + +gpfs.path <- "/gpfs/projects/bsc32/bsc32842/RESILIENCE/2014010200/prlr_200705.nc" +moore.path <- "/scratch/Earth/ncortesi/prlr_$YEAR$$MONTH$.nc" +chunk <- as.integer(commandArgs(TRUE)[1]) +#chunk=1 + +# netcdf-3 (10 MB): +#lat <- read.table("/gpfs/projects/bsc32/bsc32842/RESILIENCE/latitudes.txt")[,1] +#lat=rev(lat) # to go in increasing order of latitudes instead of decreasing! +#time <- system.time(data.hindcast <- Load(var='sfcWind', exp = list(list(path="/gpfs/projects/bsc32/bsc32842/RESILIENCE/2014010200/sfcWind_2013010200.nc")), obs=NULL,sdates='20130102', nleadtime=1, output='lonlat', latmin=lat[chunk], latmax=lat[chunk+2],nprocs=1)) + +### netcdf-3 (1GB): +# [or netcdf-4 if change name in prlr_200706.nc] +lat=seq(-90,90,0.75) +# load 3 chunks (the minimum number possible) from gpfs:: +#time <- system.time(data.hindcast <- Load(var='tp', exp = list(list(path="/gpfs/projects/bsc32/bsc32842/RESILIENCE/2014010200/prlr_200705.nc")), obs=NULL,sdates='20070501', nleadtime=157, output='lonlat', latmin=lat[chunk], latmax=lat[chunk+2],nprocs=1)) +# load all chunks in gpfs: +#time <- system.time(data.hindcast <- Load(var='tp', exp = list(list(path=gpfs.path)), obs=NULL,sdates='20070501', nleadtime=157, output='lonlat',nprocs=1)) +# load all chunks in moore: +time <- system.time(data.hindcast <- Load(var='tp', exp = list(list(path=moore.path)), obs=NULL, sdates='20070501', nleadtime=157, output='lonlat',nprocs=1)) + +# load 3 chunks from esnas: +#time <- system.time(data.hindcast <- Load(var='tp', exp = list(list(path="/esnas/exp/ecmwf/system4_m1/daily_mean/prlr_f6h/prlr_200706.nc")), obs=NULL,sdates='20070601', nleadtime=157, output='lonlat', latmin=lat[chunk], latmax=lat[chunk+2], nprocs=1)) +# load all chunks from esnas: +#time <- system.time(data.hindcast <- Load(var='tp', exp = list(list(path="/esnas/exp/ecmwf/system4_m1/daily_mean/prlr_f6h/prlr_200705.nc")), obs=NULL,sdates='20070501', nleadtime=157, output='lonlat', nprocs=1)) + +# netcdf-3 (10 GB): +#time <- system.time(data.hindcast <- Load(var='psl', exp = list(list(path="/gpfs/projects/bsc32/bsc32842/RESILIENCE/2014010200/psl_19900501.nc")), obs=NULL,sdates='19900501', nleadtime=1, output='lonlat', latmin=lat[chunk], latmax=lat[chunk+2],nprocs=1)) +#lat=seq(-90,90,0.75) + +#.RData (1 GB): (must add +20% loading time because it is a file of 820 MB) +#time <- system.time(load(file="/gpfs/projects/bsc32/bsc32842/RESILIENCE/2014010200/test.RData")) +# in esnas: +#time <- system.time(load(file="/esnas/exp/ecmwf/system4_m1/daily_mean/psl_f6h/test.RData")) + +# save chunks: +#save(time, file=paste0(outdir,'/test_chunk_',chunk,'_time_',round(time[3],2),'_',data.hindcast$nleadtime,'.RData')) +#save(time, file=paste0(outdir,'/test_chunk_',chunk,'_time_',round(time[3],2),'.RData')) + +#a <- list.files("/esnas/exp/ecmwf/system4_m1/daily_mean/prlr_f6h") +#save(a, file=paste0(outdir,'/test_chunk_',chunk,'_file_',a[1],'.RData')) +#write.table(a, file=paste0(outdir,'/test_chunk_',chunk,'_file_',a[1],'.txt')) + + + + + diff --git a/old/backup/lsf/example_test_v9.txt b/old/backup/lsf/example_test_v9.txt new file mode 100644 index 0000000000000000000000000000000000000000..bfacaf27ef941c83b1708380d7d89072843ff6c3 --- /dev/null +++ b/old/backup/lsf/example_test_v9.txt @@ -0,0 +1,17 @@ +Rscript /gpfs/projects/bsc32/bsc32842/test_v9.R 1 +Rscript /gpfs/projects/bsc32/bsc32842/test_v9.R 2 +Rscript /gpfs/projects/bsc32/bsc32842/test_v9.R 3 +Rscript /gpfs/projects/bsc32/bsc32842/test_v9.R 4 +Rscript /gpfs/projects/bsc32/bsc32842/test_v9.R 5 +Rscript /gpfs/projects/bsc32/bsc32842/test_v9.R 6 +Rscript /gpfs/projects/bsc32/bsc32842/test_v9.R 7 +Rscript /gpfs/projects/bsc32/bsc32842/test_v9.R 8 +Rscript /gpfs/projects/bsc32/bsc32842/test_v9.R 9 +Rscript /gpfs/projects/bsc32/bsc32842/test_v9.R 10 +Rscript /gpfs/projects/bsc32/bsc32842/test_v9.R 11 +Rscript /gpfs/projects/bsc32/bsc32842/test_v9.R 12 +Rscript /gpfs/projects/bsc32/bsc32842/test_v9.R 13 +Rscript /gpfs/projects/bsc32/bsc32842/test_v9.R 14 +Rscript /gpfs/projects/bsc32/bsc32842/test_v9.R 15 +Rscript /gpfs/projects/bsc32/bsc32842/test_v9.R 16 +Rscript /gpfs/projects/bsc32/bsc32842/test_v9.R 17 diff --git a/old/backup/lsf/load_netcdf.R b/old/backup/lsf/load_netcdf.R new file mode 100644 index 0000000000000000000000000000000000000000..cc10194ceb701b6a8e9a84b84fdfbcd173134ff2 --- /dev/null +++ b/old/backup/lsf/load_netcdf.R @@ -0,0 +1,6 @@ +library(s2dverification) + +# Carga un fichero NetCDF-3 de 1GB desde esnas: +time <- system.time(data.hindcast <- Load(var='tp', exp = list(list(path="/esnas/exp/ecmwf/system4_m1/daily_mean/prlr_f6h/prlr_$YEAR$$MONTH$.nc")), obs=NULL,sdates='20070501', nleadtime=1, output='lonlat', nprocs=1)) + +write.table(time, file=paste0("load_netcdf_total_time_",round(time,2),"_seconds.txt")) diff --git a/old/backup/lsf/load_netcdf.R~ b/old/backup/lsf/load_netcdf.R~ new file mode 100644 index 0000000000000000000000000000000000000000..a535da968b8ce4c815c3abdcedc60ecfb62933c4 --- /dev/null +++ b/old/backup/lsf/load_netcdf.R~ @@ -0,0 +1,6 @@ +library(s2dverification) + +# Carga un fichero NetCDF-3 de 1GB desde esnas: +time <- system.time(data.hindcast <- Load(var='tp', exp = list(list(path="/esnas/exp/ecmwf/system4_m1/daily_mean/prlr_f6h/prlr_200705.nc")), obs=NULL,sdates='20070501', nleadtime=157, output='lonlat', nprocs=1)) + +write.table(time, file=paste0("load_netcdf_total_time_",round(time,2),"_seconds.txt")) diff --git a/old/backup/lsf/load_netcdf.job b/old/backup/lsf/load_netcdf.job new file mode 100644 index 0000000000000000000000000000000000000000..ccf24c9995977028bf8eb4843f3ecd0748d23b0d --- /dev/null +++ b/old/backup/lsf/load_netcdf.job @@ -0,0 +1,74 @@ +#!/bin/bash + +#BSUB -J parallel +#BSUB -oo parallel-%J.out +#BSUB -eo parallel-%J.err + +############################################################# +# Line below specify to assign the job to the SMP queue, # +# and consequently the job will run in the SMP machine: # +############################################################# + +#BSUB -q smp + +############################################################# +# Set the total computation time of the parallel job. # +# Time max is 48 hours (syntax: HH:MM), but a lower value # +# means a faster queue! So, try to set it not too much # +# higher than the running time of one job only: # +############################################################# + +#BSUB -W 10:00 + +############################################################# +# Allocate the desired quantity of RAM (in MB) for each # +# processor (core). This value corresponds to the peak RAM # +# utilized by your jobs during their sequential execution: # +############################################################# + +#BSUB -M 10000 + +############################################################# +# Allocate a number of processors (cores) to this job. # +# it is also equal to the maximum number of jobs running at # +# the same time; the jobs exceeding this number will start # +# only when the previous jobs have finished computing. # +# The maximum number of cores that can be allocated by a # +# user on the SMP machine is 80. However, this number # +# cannot be higher than the total RAM of SMP (2000 GB) # +# divided by the RAM allocated to one core (rounded down). # +# For example, if you allocated 50 GB/core in the previous # +# line, you can't allocate more than 2000 / 50 = 40 cores # +# to your job. # +############################################################# + +#BSUB -n 1 + +nCores=1 # same as the number of cores above + +############################################################# +# inlcude the command below only if you need to write data # +# in /esnas. If not, comment it (while for reading from # +# /esnas this command is not necessary): # +############################################################# + +### newgrp Earth + +############################################################# +# These modules should already been loaded in your session. # +# They are provided here in case someone wasn't loaded: # +############################################################# + +module load R CDO NCO NETCDF gcc intel openmpi HDF5 UDUNITS + +############################################################# +# Set the name of your script to run; if no path is # +# provided, it is assumed to be in the directory where # +# the 'bsub' command is executed: # +############################################################# + +diagnostic="load_netcdf.R" + +#/apps/GREASY/2.1.2.1/bin/greasy $taskList + +Rscript $diagnostic diff --git a/old/backup/lsf/load_netcdf.job~ b/old/backup/lsf/load_netcdf.job~ new file mode 100644 index 0000000000000000000000000000000000000000..d9485db14601db915a21615955fb7c36dff95674 --- /dev/null +++ b/old/backup/lsf/load_netcdf.job~ @@ -0,0 +1,106 @@ +#!/bin/bash + +#BSUB -J parallel +#BSUB -oo parallel-%J.out +#BSUB -eo parallel-%J.err + +############################################################# +# Line below specify to assign the job to the SMP queue, # +# and consequently the job will run in the SMP machine: # +############################################################# + +#BSUB -q smp + +############################################################# +# Set the total computation time of the parallel job. # +# Time max is 48 hours (syntax: HH:MM), but a lower value # +# means a faster queue! So, try to set it not too much # +# higher than the running time of one job only: # +############################################################# + +#BSUB -W 10:00 + +############################################################# +# Allocate the desired quantity of RAM (in MB) for each # +# processor (core). This value corresponds to the peak RAM # +# utilized by your jobs during their sequential execution: # +############################################################# + +#BSUB -M 10000 + +############################################################# +# Allocate a number of processors (cores) to this job. # +# it is also equal to the maximum number of jobs running at # +# the same time; the jobs exceeding this number will start # +# only when the previous jobs have finished computing. # +# The maximum number of cores that can be allocated by a # +# user on the SMP machine is 80. However, this number # +# cannot be higher than the total RAM of SMP (2000 GB) # +# divided by the RAM allocated to one core (rounded down). # +# For example, if you allocated 50 GB/core in the previous # +# line, you can't allocate more than 2000 / 50 = 40 cores # +# to your job. # +############################################################# + +#BSUB -n 1 + +nCores=1 # same as the number of cores above + +############################################################# +# inlcude the command below only if you need to write data # +# in /esnas. If not, comment it (while for reading from # +# /esnas this command is not necessary): # +############################################################# + +### newgrp Earth + +############################################################# +# These modules should already been loaded in your session. # +# They are provided here in case someone wasn't loaded: # +############################################################# + +module load R CDO NCO NETCDF gcc intel openmpi HDF5 UDUNITS + +############################################################# +# Set the name of your script to run; if no path is # +# provided, it is assumed to be in the directory where # +# the 'bsub' command is executed: # +############################################################# + +diagnostic="./diagnostics.R" + +################################################## +# first and last chunks (tasks) in which the job is split. +# Examples: +# - If you are computing a Skill Score using hindcast data +# with 241 latitude values, set fistChunk=1 and lastChunk=241 +# - If you want to run the same script 12 times changing variable +# "month" from 1 to 12, set firstChunk=1 and lastChunk=12 +# - If you want to run the same script for a sequence of years, i.e: +# from 1980 to 2015, set firstChunk=1950 and lastChunk=2015 +################################################## + +firstChunk=1 +lastChunk=1 + +################################################## +# Create a .txt file with the list of tasks to run: +################################################## + +taskList=./diagnostics.txt + +echo "" > $taskList # new void file; if already existing, delete it + +for cnk in $(seq $firstChunk $lastChunk); do + echo "Rscript" $diagnostic $cnk >> $taskList +done + +#Rscript -e 'outdir <- "/gpfs/projects/bsc32/bsc32842"' -e 'a <- list.files("/esnas/exp/ecmwf/system4_m1/daily_mean/prlr_f6h")' -e "write.table(a, file=paste0(outdir,'/test_file_',a[1],'.txt'))" + +################################################## +# Run the job splitting it in many sequential tasks in parallel: +################################################## + +#/apps/GREASY/2.1.2.1/bin/greasy $taskList + +Rscript $diagnostic $fistChunk diff --git a/old/backup/lsf/old/diagnostics_MN.job b/old/backup/lsf/old/diagnostics_MN.job new file mode 100644 index 0000000000000000000000000000000000000000..d7862dd2f68df194254960457916ad9315bb6299 --- /dev/null +++ b/old/backup/lsf/old/diagnostics_MN.job @@ -0,0 +1,60 @@ +#!/bin/bash + +#BSUB -J split_job +#BSUB -oo diagnostics-%J.out +#BSUB -eo diagnostics-%J.err + +################################################## +# Name of your script to split in chunks: +################################################## + +script="./script_MN.R" + +################################################## +# Number of chunks (tasks) in which the job is split. +# i.e: if you have to run the same script 12 times changing +# the variable "month" from 1 to 12, set nChunks=12. +# If you are computing a Skill Score using hindcast data +# with 241 latitude values, set nChunks=241 +################################################## + +nChunks=2 + +################################################## +# Maximum number of tasks that are able to run at the same time, +# (same as the number of cores to reserve), more 1 task used for +# data communication only. +# It should be equal or lower than nChunks, i.e: +# if nChunks=241, you can set it to 242, or to 122 / 82 / 62 / 49 +# if you want to reserve less cores and wait for the task to finish +# before starting the following "battery" of tasks. In this case, to end +# sooner it is better to leave only a few cores unassigned. +################################################## + +#BSUB -n 3 + +################################################## +# Maximum execution time of the job (wall clock limit in hh:mm): +################################################## + +#BSUB -W 01:00 + +taskList=./diagnostics_MN.txt + +echo "" > $taskList + +for cnk in $(seq 1 $nChunks); do + echo "Rscript" $script $cnk >> $taskList +done + +# export GREASY_LOGFILE=diagnostics.log +# export GREASY_NWORKERS=4 +# export GREASY_NODELIST=node1,node2,node3 + +################################################## +# Run the job spltting it in many sequential tasks in parallel: +################################################## + +/apps/GREASY/2.1.2.1/bin/greasy $taskList + + diff --git a/old/backup/lsf/old/diagnostics_cluster.job b/old/backup/lsf/old/diagnostics_cluster.job new file mode 100644 index 0000000000000000000000000000000000000000..37e9b61953097c6e4bde995dc3de82b3bbeca2f3 --- /dev/null +++ b/old/backup/lsf/old/diagnostics_cluster.job @@ -0,0 +1,31 @@ +#!/bin/bash +#SBATCH -n 1 +#SBATCH -J diagnostic +#SBATCH -o diagnostic.out +#SBATCH -e diagnostic.err + +# set the maximum execution time of the diagnostic: +#SBATCH -t 12:00:00 + +# Set the name of your script to run: +diagnostic="script_cluster.R" + +# Set the total number of chunks to employ: +nChunks=2 + +# Set the maximum number of processes to run in parallel: +nCores=4 + +# run a process (thread) for each chunk in background: +for ARG in $(seq 1 $nChunks); do + Rscript $diagnostic $ARG & + nThreads=$(($nThreads+1)) + if [ "$nThreads" -ge $nCores ]; then + wait # wait until the first $nCores chunks have finished before executing the next $nCores + nThreads=0 + fi +done + +# wait until all chunks have been computed, then collect all the results of each chunk and merge them in the file 'diagnostic_output.RData': +wait +Rscript -e 'nChunks <- as.integer(commandArgs(TRUE)[1])' -e 'for(cnk in 1:nChunks){' -e 'load(paste0(getwd(),"/output_",cnk,".RData"))' -e 'if(cnk==1) output <- array(NA, c(nChunks, length(var)))' -e 'output[cnk,] <- var' -e 'file.remove(paste0(getwd(),"/output_",cnk,".RData"))}' -e 'save(var, file="diagnostic_output.RData")' $nChunks diff --git a/old/backup/lsf/old/script_MN.R b/old/backup/lsf/old/script_MN.R new file mode 100644 index 0000000000000000000000000000000000000000..631adea4f0913b2bdc149c754177f81c07609a80 --- /dev/null +++ b/old/backup/lsf/old/script_MN.R @@ -0,0 +1,19 @@ +######################################################################################### +# Sub-seasonal Skill Scores # +######################################################################################### + +library(s2dverification) +workdir <- "/gpfs/projects/bsc32/bsc32842/RESILIENCE" + +chunk <- as.integer(commandArgs(TRUE)[1]) +#chunk=1 + +lat <- read.table("/gpfs/projects/bsc32/bsc32842/RESILIENCE/latitudes.txt")[,1] +lat=rev(lat) # to go in increasing order of latitudes instead of decreasing! +time <- system.time(data.hindcast <- Load(var='sfcWind', exp = list(list(path="/gpfs/projects/bsc32/bsc32842/RESILIENCE/2014010200/sfcWind_2013010200.nc")), obs=NULL,sdates='20130102', nleadtime=1, output='lonlat', latmin=lat[chunk], latmax=lat[chunk+2],nprocs=1)) + + +#time <- system.time(data.hindcast <- Load(var='psl', exp = list(list(path="/gpfs/projects/bsc32/bsc32842/RESILIENCE/2014010200/psl_19900501.nc")), obs=NULL,sdates='19900501', nleadtime=1, output='lonlat', latmin=lat[chunk], latmax=lat[chunk+2],nprocs=1)) +#lat=seq(-90,90,0.75) + +save(time, file=paste0(workdir,'/test_chunk_',chunk,'_time_',round(time[3],2),'.RData')) diff --git a/old/backup/lsf/old/script_cluster.R b/old/backup/lsf/old/script_cluster.R new file mode 100644 index 0000000000000000000000000000000000000000..176fb8eb2c9602b4feac26dca55f5620269dc80c --- /dev/null +++ b/old/backup/lsf/old/script_cluster.R @@ -0,0 +1,19 @@ +library(s2dverification) + +chunk <- as.integer(commandArgs(TRUE)[1]) + +ERAint <- '/esnas/reconstructions/ecmwf/eraint/$STORE_FREQ$_mean/$VAR_NAME$_f6h/$VAR_NAME$_$YEAR$$MONTH$.nc' + +domain <- Load(var = "sfcWind", exp = NULL, obs = list(list(path=ERAint)), '19950101', storefreq = 'daily', leadtimemax = 1, output = 'lonlat', nprocs=1) +n.lat <- length(domain$lat) +n.lon <- length(domain$lon) + +data <- Load(var = "sfcWind", exp = NULL, obs = list(list(path=ERAint)), '19950101', storefreq = 'daily', leadtimemax = 3, output = 'lonlat', latmin = domain$lat[chunk+1], latmax = domain$lat[chunk], nprocs=1)$obs +data <- data[,,,,1,] + +n.lat <- 1 +my.RMS <- array(NA, c(n.lat, n.lon)) + +# it is mandatory to save the output variable with the name 'var' and inside the file 'output_" + chunk number + ".RData": +var <- RMS(data,data+1) +save(var, file=paste0("/scratch/Earth/ncortesi/output_",chunk,".RData")) diff --git a/old/backup/lsf/parallel_MN.job b/old/backup/lsf/parallel_MN.job new file mode 100644 index 0000000000000000000000000000000000000000..106baf996af8854a950381f9a02caa550b2c0147 --- /dev/null +++ b/old/backup/lsf/parallel_MN.job @@ -0,0 +1,90 @@ +#!/bin/bash + +#BSUB -J diagnostic +#BSUB -oo greasy-%J.out +#BSUB -eo greasy-%J.err + +################################################## +# these modules should already been loaded in your session, +# they are included here in case someone didn't load correctly: +################################################## + +module load R/3.2.2 gcc/4.7.2 NETCDF/4.1.3 intel/13.0.1 openmpi/1.8.1 HDF5/1.8.10 UDUNITS/2.1.24 CDO/1.7.0 NCO + +################################################## +# inlcude the command below only if you need to write in /esnas. +# if not comment it (for reading from /esnas it is not necessary): +################################################## + +# newgrp Earth + +################################################## +# include this line if you want to run the job in the SMP machine, +# or comment it with ### if you want to run the job in MareNostrum: +################################################## + +#BSUB -q smp + +################################################## +# Name of your script to split in chunks: +################################################## + +script="./diagnostics.R" + +################################################## +# first and last chunks (tasks) in which the job is split. +# examples: +# - If you are computing a Skill Score using hindcast data +# with 241 latitude values, set fistChunk=1 and lastChunk=241 +# - If you want to run the same script 12 times changing variable +# "month" from 1 to 12, set firstChunk=1 and lastChunk=12 +# - If you want to run the same script for a sequence of years, i.e: +# from 1980 to 2015, set firstChunk=1950 and lastChunk=2015 +################################################## + +firstChunk=1 +lastChunk=1 + +################################################## +# Number of cores to reserve. It should be equal to the number +# of chunks (lastChunk - firstChunk + 1), more 1 core, which is +# only used for data communication between tasks. +# For example, if there is a total of 241 chunks, set it to 242. +# You can also set it to a lower value than the number of chunks +# ( for example if the number of chunks is higher than the # of cores); +# in this case, the chunks that are not executed immediatly are +# put in an internal queue and executed after, when there are cores +# available. Consequently, running times will increase. +################################################## + +#BSUB -n 2 + +################################################## +# Set the maximum execution time of the job (in hh:mm). +# Upper limit is 48 hours, but a lower value means a faster queue, +# so try to set it a bit higher to the running time of one chunk. +################################################## + +#BSUB -W 10:00 + +################################################## +# Create a .txt file with the list of tasks to run: +################################################## + +taskList=./diagnostics.txt + +echo "" > $taskList # new void file; if already existing, delete it + +for cnk in $(seq $firstChunk $lastChunk); do + echo "Rscript" $script $cnk >> $taskList +done + +#Rscript -e 'outdir <- "/gpfs/projects/bsc32/bsc32842"' -e 'a <- list.files("/esnas/exp/ecmwf/system4_m1/daily_mean/prlr_f6h")' -e "write.table(a, file=paste0(outdir,'/test_file_',a[1],'.txt'))" + +################################################## +# Run the job spltting it in many sequential tasks in parallel: +################################################## + +/apps/GREASY/2.1.2.1/bin/greasy $taskList + + diff --git a/old/backup/lsf/parallel_MN.job~ b/old/backup/lsf/parallel_MN.job~ new file mode 100644 index 0000000000000000000000000000000000000000..ded7e4fe837a6a0ca2e7aed7b54ed07c01c9cc35 --- /dev/null +++ b/old/backup/lsf/parallel_MN.job~ @@ -0,0 +1,91 @@ +#!/bin/bash + +#BSUB -J diagnostic +#BSUB -oo greasy-%J.out +#BSUB -eo greasy-%J.err + +################################################## +# these modules should already been loaded in your session, +# they are included here in case someone didn't load correctly: +################################################## + +module load R/3.2.2 +module load gcc/4.7.2 NETCDF/4.1.3 intel/13.0.1 openmpi/1.8.1 HDF5/1.8.10 UDUNITS/2.1.24 CDO/1.7.0 NCO + +################################################## +# inlcude the command below only if you need to write in /esnas. +# if not comment it (for reading from /esnas it is not necessary): +################################################## + +# newgrp Earth + +################################################## +# include this line if you want to run the job in the SMP machine, +# or comment it with ### if you want to run the job in MareNostrum: +################################################## + +#BSUB -q smp + +################################################## +# Name of your script to split in chunks: +################################################## + +script="./diagnostics.R" + +################################################## +# first and last chunks (tasks) in which the job is split. +# examples: +# - If you are computing a Skill Score using hindcast data +# with 241 latitude values, set fistChunk=1 and lastChunk=241 +# - If you want to run the same script 12 times changing variable +# "month" from 1 to 12, set firstChunk=1 and lastChunk=12 +# - If you want to run the same script for a sequence of years, i.e: +# from 1980 to 2015, set firstChunk=1950 and lastChunk=2015 +################################################## + +firstChunk=1 +lastChunk=1 + +################################################## +# Number of cores to reserve. It should be equal to the number +# of chunks (lastChunk - firstChunk + 1), more 1 core, which is +# only used for data communication between tasks. +# For example, if there is a total of 241 chunks, set it to 242. +# You can also set it to a lower value than the number of chunks +# ( for example if the number of chunks is higher than the # of cores); +# in this case, the chunks that are not executed immediatly are +# put in an internal queue and executed after, when there are cores +# available. Consequently, running times will increase. +################################################## + +#BSUB -n 2 + +################################################## +# Set the maximum execution time of the job (in hh:mm). +# Upper limit is 48 hours, but a lower value means a faster queue, +# so try to set it a bit higher to the running time of one chunk. +################################################## + +#BSUB -W 10:00 + +################################################## +# Create a .txt file with the list of tasks to run: +################################################## + +taskList=./diagnostics.txt + +echo "" > $taskList # new void file; if already existing, delete it + +for cnk in $(seq $firstChunk $lastChunk); do + echo "Rscript" $script $cnk >> $taskList +done + +#Rscript -e 'outdir <- "/gpfs/projects/bsc32/bsc32842"' -e 'a <- list.files("/esnas/exp/ecmwf/system4_m1/daily_mean/prlr_f6h")' -e "write.table(a, file=paste0(outdir,'/test_file_',a[1],'.txt'))" + +################################################## +# Run the job spltting it in many sequential tasks in parallel: +################################################## + +/apps/GREASY/2.1.2.1/bin/greasy $taskList + + diff --git a/old/backup/lsf/parallel_SMP.R b/old/backup/lsf/parallel_SMP.R new file mode 100644 index 0000000000000000000000000000000000000000..ac8417c9174b8bb6d5bff3f083cb711e1df05b80 --- /dev/null +++ b/old/backup/lsf/parallel_SMP.R @@ -0,0 +1,56 @@ +######################################################################################### +# Sub-seasonal Skill Scores # +######################################################################################### + +library(s2dverification) +outdir <- "/gpfs/projects/bsc32/bsc32842/RESILIENCE" + +gpfs.path <- "/gpfs/projects/bsc32/bsc32842/RESILIENCE/2014010200/prlr_200705.nc" +moore.path <- "/scratch/Earth/ncortesi/prlr_$YEAR$$MONTH$.nc" +esnas.path <- "/esnas/exp/ecmwf/system4_m1/daily_mean/prlr_f6h/prlr_$YEAR$$MONTH$.nc" +chunk <- as.integer(commandArgs(TRUE)[1]) +#chunk=1 + +# netcdf-3 (10 MB): +#lat <- read.table("/gpfs/projects/bsc32/bsc32842/RESILIENCE/latitudes.txt")[,1] +#lat=rev(lat) # to go in increasing order of latitudes instead of decreasing! +#time <- system.time(data.hindcast <- Load(var='sfcWind', exp = list(list(path="/gpfs/projects/bsc32/bsc32842/RESILIENCE/2014010200/sfcWind_2013010200.nc")), obs=NULL,sdates='20130102', nleadtime=1, output='lonlat', latmin=lat[chunk], latmax=lat[chunk+2],nprocs=1)) + +### netcdf-3 (1GB): +# [or netcdf-4 if change name in prlr_200706.nc] +lat=seq(-90,90,0.75) +# load 3 chunks (the minimum number possible) from gpfs:: +#time <- system.time(data.hindcast <- Load(var='tp', exp = list(list(path="/gpfs/projects/bsc32/bsc32842/RESILIENCE/2014010200/prlr_200705.nc")), obs=NULL,sdates='20070501', nleadtime=157, output='lonlat', latmin=lat[chunk], latmax=lat[chunk+2],nprocs=1)) +#load all chunks in gpfs: +time <- system.time(data.hindcast <- Load(var='tp', exp = list(list(path=gpfs.path)), obs=NULL,sdates='20070501', nleadtime=157, output='lonlat',nprocs=1)) +# load all chunks from moore's scratch to moore: +#time <- system.time(data.hindcast <- Load(var='tp', exp = list(list(path=moore.path)), obs=NULL, sdates='20070501', nleadtime=157, output='lonlat',nprocs=1)) +# load all chunks from esnas to moore: +#time <- system.time(data.hindcast <- Load(var='tp', exp = list(list(path=esnas.path)), obs=NULL, sdates='20070501', nleadtime=157, output='lonlat',nprocs=1)) + +# load 3 chunks from esnas: +#time <- system.time(data.hindcast <- Load(var='tp', exp = list(list(path="/esnas/exp/ecmwf/system4_m1/daily_mean/prlr_f6h/prlr_200706.nc")), obs=NULL,sdates='20070601', nleadtime=157, output='lonlat', latmin=lat[chunk], latmax=lat[chunk+2], nprocs=1)) +# load all chunks from esnas: +#time <- system.time(data.hindcast <- Load(var='tp', exp = list(list(path="/esnas/exp/ecmwf/system4_m1/daily_mean/prlr_f6h/prlr_200705.nc")), obs=NULL,sdates='20070501', nleadtime=157, output='lonlat', nprocs=1)) + +# netcdf-3 (10 GB): +#time <- system.time(data.hindcast <- Load(var='psl', exp = list(list(path="/gpfs/projects/bsc32/bsc32842/RESILIENCE/2014010200/psl_19900501.nc")), obs=NULL,sdates='19900501', nleadtime=1, output='lonlat', latmin=lat[chunk], latmax=lat[chunk+2],nprocs=1)) +#lat=seq(-90,90,0.75) + +#.RData (1 GB): (must add +20% loading time because it is a file of 820 MB) +#time <- system.time(load(file="/gpfs/projects/bsc32/bsc32842/RESILIENCE/2014010200/test.RData")) +# in esnas: +#time <- system.time(load(file="/esnas/exp/ecmwf/system4_m1/daily_mean/psl_f6h/test.RData")) + +# save chunks: +#save(time, file=paste0(outdir,'/test_chunk_',chunk,'_time_',round(time[3],2),'_',data.hindcast$nleadtime,'.RData')) +#save(time, file=paste0(outdir,'/test_chunk_',chunk,'_time_',round(time[3],2),'.RData')) + +#a <- list.files("/esnas/exp/ecmwf/system4_m1/daily_mean/prlr_f6h") +#save(a, file=paste0(outdir,'/test_chunk_',chunk,'_file_',a[1],'.RData')) +#write.table(a, file=paste0(outdir,'/test_chunk_',chunk,'_file_',a[1],'.txt')) + + + + + diff --git a/old/backup/lsf/parallel_SMP.R~ b/old/backup/lsf/parallel_SMP.R~ new file mode 100644 index 0000000000000000000000000000000000000000..0c1cdce7d680be584a013e143e83f7ab58689641 --- /dev/null +++ b/old/backup/lsf/parallel_SMP.R~ @@ -0,0 +1,57 @@ +######################################################################################### +# Sub-seasonal Skill Scores # +######################################################################################### + +library(s2dverification) +outdir <- "/gpfs/projects/bsc32/bsc32842/RESILIENCE" + +gpfs.path <- "/gpfs/projects/bsc32/bsc32842/RESILIENCE/2014010200/prlr_200705.nc" +moore.path <- "/scratch/Earth/ncortesi/prlr_$YEAR$$MONTH$.nc" +esnas.path <- "/esnas/exp/ecmwf/system4_m1/daily_mean/prlr_f6h/prlr_$YEAR$$MONTH$.nc" +chunk <- as.integer(commandArgs(TRUE)[1]) +#chunk=1 + +# netcdf-3 (10 MB): +#lat <- read.table("/gpfs/projects/bsc32/bsc32842/RESILIENCE/latitudes.txt")[,1] +#lat=rev(lat) # to go in increasing order of latitudes instead of decreasing! +#time <- system.time(data.hindcast <- Load(var='sfcWind', exp = list(list(path="/gpfs/projects/bsc32/bsc32842/RESILIENCE/2014010200/sfcWind_2013010200.nc")), obs=NULL,sdates='20130102', nleadtime=1, output='lonlat', latmin=lat[chunk], latmax=lat[chunk+2],nprocs=1)) + +### netcdf-3 (1GB): +# [or netcdf-4 if change name in prlr_200706.nc] +lat=seq(-90,90,0.75) +# load 3 chunks (the minimum number possible) from gpfs:: +#time <- system.time(data.hindcast <- Load(var='tp', exp = list(list(path="/gpfs/projects/bsc32/bsc32842/RESILIENCE/2014010200/prlr_200705.nc")), obs=NULL,sdates='20070501', nleadtime=157, output='lonlat', latmin=lat[chunk], latmax=lat[chunk+2],nprocs=1)) +# load all chunks in gpfs: +#time <- system.time(data.hindcast <- Load(var='tp', exp = list(list(path=gpfs.path)), obs=NULL,sdates='20070501', nleadtime=157, output='lonlat',nprocs=1)) +# load all chunks from moore's scratch to moore: +#time <- system.time(data.hindcast <- Load(var='tp', exp = list(list(path=moore.path)), obs=NULL, sdates='20070501', nleadtime=157, output='lonlat',nprocs=1)) +# load all chunks from esnas to moore: +time <- system.time(data.hindcast <- Load(var='tp', exp = list(list(path=esnas.path)), obs=NULL, sdates='20070501', nleadtime=157, output='lonlat',nprocs=1)) + + +# load 3 chunks from esnas: +#time <- system.time(data.hindcast <- Load(var='tp', exp = list(list(path="/esnas/exp/ecmwf/system4_m1/daily_mean/prlr_f6h/prlr_200706.nc")), obs=NULL,sdates='20070601', nleadtime=157, output='lonlat', latmin=lat[chunk], latmax=lat[chunk+2], nprocs=1)) +# load all chunks from esnas: +#time <- system.time(data.hindcast <- Load(var='tp', exp = list(list(path="/esnas/exp/ecmwf/system4_m1/daily_mean/prlr_f6h/prlr_200705.nc")), obs=NULL,sdates='20070501', nleadtime=157, output='lonlat', nprocs=1)) + +# netcdf-3 (10 GB): +#time <- system.time(data.hindcast <- Load(var='psl', exp = list(list(path="/gpfs/projects/bsc32/bsc32842/RESILIENCE/2014010200/psl_19900501.nc")), obs=NULL,sdates='19900501', nleadtime=1, output='lonlat', latmin=lat[chunk], latmax=lat[chunk+2],nprocs=1)) +#lat=seq(-90,90,0.75) + +#.RData (1 GB): (must add +20% loading time because it is a file of 820 MB) +#time <- system.time(load(file="/gpfs/projects/bsc32/bsc32842/RESILIENCE/2014010200/test.RData")) +# in esnas: +#time <- system.time(load(file="/esnas/exp/ecmwf/system4_m1/daily_mean/psl_f6h/test.RData")) + +# save chunks: +#save(time, file=paste0(outdir,'/test_chunk_',chunk,'_time_',round(time[3],2),'_',data.hindcast$nleadtime,'.RData')) +#save(time, file=paste0(outdir,'/test_chunk_',chunk,'_time_',round(time[3],2),'.RData')) + +#a <- list.files("/esnas/exp/ecmwf/system4_m1/daily_mean/prlr_f6h") +#save(a, file=paste0(outdir,'/test_chunk_',chunk,'_file_',a[1],'.RData')) +#write.table(a, file=paste0(outdir,'/test_chunk_',chunk,'_file_',a[1],'.txt')) + + + + + diff --git a/old/backup/lsf/parallel_SMP.job b/old/backup/lsf/parallel_SMP.job new file mode 100644 index 0000000000000000000000000000000000000000..fe9a72f733af97d86510f6145e6df28e8b73ecf7 --- /dev/null +++ b/old/backup/lsf/parallel_SMP.job @@ -0,0 +1,112 @@ +#!/bin/bash + +#BSUB -J parallel +#BSUB -oo parallel-%J.out +#BSUB -eo parallel-%J.err + +############################################################# +# Line below specify to assign the job to the SMP queue, # +# and consequently the job will run in the SMP machine: # +############################################################# + +#BSUB -q smp + +############################################################# +# Set the total computation time of the parallel job. # +# Time max is 48 hours (syntax: HH:MM), but a lower value # +# means a faster queue! So, try to set it not too much # +# higher than the running time of one job only: # +############################################################# + +#BSUB -W 10:00 + +############################################################# +# Allocate the desired quantity of RAM (in MB) for each # +# processor (core). This value corresponds to the peak RAM # +# utilized by your jobs during their sequential execution: # +############################################################# + +#BSUB -M 65000 + +############################################################# +# Allocate a number of processors (cores) to this job. # +# it is also equal to the maximum number of jobs running at # +# the same time; the jobs exceeding this number will start # +# only when the previous jobs have finished computing. # +# The maximum number of cores that can be allocated by a # +# user on the SMP machine is 80. However, this number # +# cannot be higher than the total RAM of SMP (2000 GB) # +# divided by the RAM allocated to one core (rounded down). # +# For example, if you allocated 50 GB/core in the previous # +# line, you can't allocate more than 2000 / 50 = 40 cores # +# to your job. # +############################################################# + +#BSUB -n 4 + +nCores=4 # same as the number of cores above + +############################################################# +# inlcude the command below only if you need to write data # +# in /esnas. If not, comment it (while for reading from # +# /esnas this command is not necessary): # +############################################################# + +### newgrp Earth + +############################################################# +# These modules should already been loaded in your session. # +# They are provided here in case someone wasn't loaded: # +############################################################# + +module load R CDO NCO NETCDF gcc intel openmpi HDF5 UDUNITS + +############################################################# +# Set the name of your script to run; if no path is # +# provided, it is assumed to be in the directory where # +# the 'bsub' command is executed: # +############################################################# + +diagnostic="../weather_regimes_v35.R" + +############################################################# +# first and last chunks (tasks) in which the job is split. +# Examples: +# - If you are computing a Skill Score using hindcast data +# with 241 latitude values, set fistChunk=1 and lastChunk=241 +# - If you want to run the same script 12 times changing variable +# "month" from 1 to 12, set firstChunk=1 and lastChunk=12 +# - If you want to run the same script for a sequence of years, i.e: +# from 1980 to 2015, set firstChunk=1950 and lastChunk=2015 +############################################################# + +firstChunk=1 +lastChunk=1 + +############################################################# +# Name of the .txt file with the list of tasks to run: +############################################################# + +taskList=./parallel.txt + +############################################################# +# create the .txt file with the task list: +############################################################# + +echo "" > $taskList # new void file; if already existing, delete it + +for cnk in $(seq $firstChunk $lastChunk); do + echo "Rscript" $diagnostic $cnk >> $taskList +done + +#Rscript -e 'outdir <- "/gpfs/projects/bsc32/bsc32842"' -e 'a <- list.files("/esnas/exp/ecmwf/system4_m1/daily_mean/prlr_f6h")' -e "write.table(a, file=paste0(outdir,'/test_file_',a[1],'.txt'))" + +############################################################## +# Run the job splitting it in many sequential tasks, one for # +# each different set of values of the 3 above variables, and # +# in group of jobs at the same time: # +############################################################## + +#/apps/GREASY/2.1.2.1/bin/greasy $taskList + +Rscript $diagnostic $fistChunk diff --git a/old/backup/lsf/parallel_SMP.job~ b/old/backup/lsf/parallel_SMP.job~ new file mode 100644 index 0000000000000000000000000000000000000000..f9c78773d9c71ef5f826766516785942b45fe80b --- /dev/null +++ b/old/backup/lsf/parallel_SMP.job~ @@ -0,0 +1,106 @@ +#!/bin/bash + +#BSUB -J parallel +#BSUB -oo parallel-%J.out +#BSUB -eo parallel-%J.err + +############################################################# +# Line below specify to assign the job to the SMP queue, # +# and consequently the job will run in the SMP machine: # +############################################################# + +#BSUB -q smp + +############################################################# +# Set the total computation time of the parallel job. # +# Time max is 48 hours (syntax: HH:MM), but a lower value # +# means a faster queue! So, try to set it not too much # +# higher than the running time of one job only: # +############################################################# + +#BSUB -W 10:00 + +############################################################# +# Allocate the desired quantity of RAM (in MB) for each # +# processor (core). This value corresponds to the peak RAM # +# utilized by your jobs during their sequential execution: # +############################################################# + +#BSUB -M 65000 + +############################################################# +# Allocate a number of processors (cores) to this job. # +# it is also equal to the maximum number of jobs running at # +# the same time; the jobs exceeding this number will start # +# only when the previous jobs have finished computing. # +# The maximum number of cores that can be allocated by a # +# user on the SMP machine is 80. However, this number # +# cannot be higher than the total RAM of SMP (2000 GB) # +# divided by the RAM allocated to one core (rounded down). # +# For example, if you allocated 50 GB/core in the previous # +# line, you can't allocate more than 2000 / 50 = 40 cores # +# to your job. # +############################################################# + +#BSUB -n 4 + +nCores=4 # same as the number of cores above + +############################################################# +# inlcude the command below only if you need to write data # +# in /esnas. If not, comment it (while for reading from # +# /esnas this command is not necessary): # +############################################################# + +### newgrp Earth + +############################################################# +# These modules should already been loaded in your session. # +# They are provided here in case someone wasn't loaded: # +############################################################# + +module load R CDO NCO NETCDF gcc intel openmpi HDF5 UDUNITS + +############################################################# +# Set the name of your script to run; if no path is # +# provided, it is assumed to be in the directory where # +# the 'bsub' command is executed: # +############################################################# + +diagnostic="./diagnostics.R" + +################################################## +# first and last chunks (tasks) in which the job is split. +# Examples: +# - If you are computing a Skill Score using hindcast data +# with 241 latitude values, set fistChunk=1 and lastChunk=241 +# - If you want to run the same script 12 times changing variable +# "month" from 1 to 12, set firstChunk=1 and lastChunk=12 +# - If you want to run the same script for a sequence of years, i.e: +# from 1980 to 2015, set firstChunk=1950 and lastChunk=2015 +################################################## + +firstChunk=1 +lastChunk=1 + +################################################## +# Create a .txt file with the list of tasks to run: +################################################## + +taskList=./diagnostics.txt + +echo "" > $taskList # new void file; if already existing, delete it + +for cnk in $(seq $firstChunk $lastChunk); do + echo "Rscript" $diagnostic $cnk >> $taskList +done + +#Rscript -e 'outdir <- "/gpfs/projects/bsc32/bsc32842"' -e 'a <- list.files("/esnas/exp/ecmwf/system4_m1/daily_mean/prlr_f6h")' -e "write.table(a, file=paste0(outdir,'/test_file_',a[1],'.txt'))" + +################################################## +# Run the job splitting it in many sequential tasks in parallel: +################################################## + +#/apps/GREASY/2.1.2.1/bin/greasy $taskList + +Rscript $diagnostic $fistChunk diff --git a/old/backup/lsf/parallel_SMP_v2.job b/old/backup/lsf/parallel_SMP_v2.job new file mode 100644 index 0000000000000000000000000000000000000000..4c0a35224309dfdf1dc6cd75cce29fa64e4e907b --- /dev/null +++ b/old/backup/lsf/parallel_SMP_v2.job @@ -0,0 +1,112 @@ +#!/bin/bash + +#BSUB -J parallel +#BSUB -oo parallel-%J.out +#BSUB -eo parallel-%J.err + +############################################################# +# Line below specify to assign the job to the SMP queue, # +# and consequently the job will run in the SMP machine: # +############################################################# + +#BSUB -q smp + +############################################################# +# Set the total computation time of the parallel job. # +# Time max is 48 hours (syntax: HH:MM), but a lower value # +# means a faster queue! So, try to set it not too much # +# higher than the running time of one job only: # +############################################################# + +#BSUB -W 10:00 + +############################################################# +# Allocate the desired quantity of RAM (in MB) for each # +# processor (core). This value corresponds to the peak RAM # +# utilized by your jobs during their sequential execution: # +############################################################# + +#BSUB -M 65000 + +############################################################# +# Allocate a number of processors (cores) to this job. # +# it is also equal to the maximum number of jobs running at # +# the same time; the jobs exceeding this number will start # +# only when the previous jobs have finished computing. # +# The maximum number of cores that can be allocated by a # +# user on the SMP machine is 80. However, this number # +# cannot be higher than the total RAM of SMP (2000 GB) # +# divided by the RAM allocated to one core (rounded down). # +# For example, if you allocated 50 GB/core in the previous # +# line, you can't allocate more than 2000 / 50 = 40 cores # +# to your job. # +############################################################# + +#BSUB -n 4 + +nCores=4 # same as the number of cores above + +############################################################# +# inlcude the command below only if you need to write data # +# in /esnas. If not, comment it (while for reading from # +# /esnas this command is not necessary): # +############################################################# + +### newgrp Earth + +############################################################# +# These modules should already been loaded in your session. # +# They are provided here in case someone wasn't loaded: # +############################################################# + +module load R CDO NCO NETCDF gcc intel openmpi HDF5 UDUNITS + +############################################################# +# Set the name of your script to run; if no path is # +# provided, it is assumed to be in the directory where # +# the 'bsub' command is executed: # +############################################################# + +diagnostic="../weather_regimes_v35.R" + +############################################################# +# first and last chunks (tasks) in which the job is split. +# Examples: +# - If you are computing a Skill Score using hindcast data +# with 241 latitude values, set fistChunk=1 and lastChunk=241 +# - If you want to run the same script 12 times changing variable +# "month" from 1 to 12, set firstChunk=1 and lastChunk=12 +# - If you want to run the same script for a sequence of years, i.e: +# from 1980 to 2015, set firstChunk=1950 and lastChunk=2015 +############################################################# + +firstChunk=1 +lastChunk=1 + +############################################################# +# Name of the .txt file with the list of tasks to run: +############################################################# + +taskList=./parallel.txt + +############################################################# +# create the .txt file with the task list: +############################################################# + +echo "" > $taskList # new void file; if already existing, delete it + +for cnk in $(seq $firstChunk $lastChunk); do + echo "Rscript" $diagnostic $cnk >> $taskList +done + +#Rscript -e 'outdir <- "/gpfs/projects/bsc32/bsc32842"' -e 'a <- list.files("/esnas/exp/ecmwf/system4_m1/daily_mean/prlr_f6h")' -e "write.table(a, file=paste0(outdir,'/test_file_',a[1],'.txt'))" + +############################################################## +# Run the job splitting it in many sequential tasks, one for # +# each different set of values of the 3 above variables, and # +# in group of jobs at the same time: # +############################################################## + +/apps/GREASY/2.1.2.1/bin/greasy $taskList + +#Rscript $diagnostic $fistChunk diff --git a/old/backup/lsf/parallel_SMP_v2.job~ b/old/backup/lsf/parallel_SMP_v2.job~ new file mode 100644 index 0000000000000000000000000000000000000000..3261589471db9eca921cb244f8c9374b482aa175 --- /dev/null +++ b/old/backup/lsf/parallel_SMP_v2.job~ @@ -0,0 +1,112 @@ +#!/bin/bash + +#BSUB -J parallel +#BSUB -oo parallel-%J.out +#BSUB -eo parallel-%J.err + +############################################################# +# Line below specify to assign the job to the SMP queue, # +# and consequently the job will run in the SMP machine: # +############################################################# + +#BSUB -q smp + +############################################################# +# Set the total computation time of the parallel job. # +# Time max is 48 hours (syntax: HH:MM), but a lower value # +# means a faster queue! So, try to set it not too much # +# higher than the running time of one job only: # +############################################################# + +#BSUB -W 10:00 + +############################################################# +# Allocate the desired quantity of RAM (in MB) for each # +# processor (core). This value corresponds to the peak RAM # +# utilized by your jobs during their sequential execution: # +############################################################# + +#BSUB -M 65000 + +############################################################# +# Allocate a number of processors (cores) to this job. # +# it is also equal to the maximum number of jobs running at # +# the same time; the jobs exceeding this number will start # +# only when the previous jobs have finished computing. # +# The maximum number of cores that can be allocated by a # +# user on the SMP machine is 80. However, this number # +# cannot be higher than the total RAM of SMP (2000 GB) # +# divided by the RAM allocated to one core (rounded down). # +# For example, if you allocated 50 GB/core in the previous # +# line, you can't allocate more than 2000 / 50 = 40 cores # +# to your job. # +############################################################# + +#BSUB -n 4 + +nCores=4 # same as the number of cores above + +############################################################# +# inlcude the command below only if you need to write data # +# in /esnas. If not, comment it (while for reading from # +# /esnas this command is not necessary): # +############################################################# + +### newgrp Earth + +############################################################# +# These modules should already been loaded in your session. # +# They are provided here in case someone wasn't loaded: # +############################################################# + +module load R CDO NCO NETCDF gcc intel openmpi HDF5 UDUNITS + +############################################################# +# Set the name of your script to run; if no path is # +# provided, it is assumed to be in the directory where # +# the 'bsub' command is executed: # +############################################################# + +diagnostic="../weather_regimes_v35.R" + +############################################################# +# first and last chunks (tasks) in which the job is split. +# Examples: +# - If you are computing a Skill Score using hindcast data +# with 241 latitude values, set fistChunk=1 and lastChunk=241 +# - If you want to run the same script 12 times changing variable +# "month" from 1 to 12, set firstChunk=1 and lastChunk=12 +# - If you want to run the same script for a sequence of years, i.e: +# from 1980 to 2015, set firstChunk=1950 and lastChunk=2015 +############################################################# + +firstChunk=1 +lastChunk=1 + +############################################################# +# Name of the .txt file with the list of tasks to run: +############################################################# + +taskList=./parallel.txt + +############################################################# +# create the .txt file with the task list: +############################################################# + +echo "" > $taskList # new void file; if already existing, delete it + +for cnk in $(seq $firstChunk $lastChunk); do + echo "Rscript" $diagnostic $cnk >> $taskList +done + +#Rscript -e 'outdir <- "/gpfs/projects/bsc32/bsc32842"' -e 'a <- list.files("/esnas/exp/ecmwf/system4_m1/daily_mean/prlr_f6h")' -e "write.table(a, file=paste0(outdir,'/test_file_',a[1],'.txt'))" + +############################################################## +# Run the job splitting it in many sequential tasks, one for # +# each different set of values of the 3 above variables, and # +# in group of jobs at the same time: # +############################################################## + +/apps/GREASY/2.1.2.1/bin/greasy $taskList + +Rscript $diagnostic $fistChunk diff --git a/old/backup/palettes/rgbhex.csv b/old/backup/palettes/rgbhex.csv new file mode 100644 index 0000000000000000000000000000000000000000..7fb1ca44b4a715a70e8658e978350659ba79461f --- /dev/null +++ b/old/backup/palettes/rgbhex.csv @@ -0,0 +1,20 @@ +#0C046E +#1914BE +#2341F7 +#2F55FB +#3E64FF +#528CFF +#64AAFF +#82C8FF +#A0DCFF +#B4F0FF +#FFFBAF +#FFDD9A +#FFBF87 +#FFA173 +#FF7055 +#FE6346 +#F7403B +#E92D36 +#C80F1E +#A50519 diff --git a/old/backup/parallel.job b/old/backup/parallel.job new file mode 100644 index 0000000000000000000000000000000000000000..2f1e06d6880db42a1892b3dfadb3130dc8d8391b --- /dev/null +++ b/old/backup/parallel.job @@ -0,0 +1,283 @@ +#!/bin/bash + +############################################################# +# You can keep the name of your parallel job as it is below # +# ('parallel') or change it. In any case, only one standard # +# error and standard output files will be created, even if # +# you run hundreds of jobs, not to fill your directory with # +# unnecessary files. All the information you need on the # +# jobs is inside these two files, whose file names have the # +# job number appended as suffix. # +# # +# To increase productivity, create an alias en .bashrc # +# such as the one below to run this script from everywhere: # +# # +# alias p='sbatch ~/scripts/slurm/parallel.job' # +# # +# Remember to remove regularly any eventual data loaded in # +# the shared memory (/dev/shm/), because ths shared memory # +# is limited to a few GB (depending on the node), and when # +# it is full, no users can run jobs. You can create # +# an alias as the 'clean' alias below and execute it from # +# time to time. Run it also after a Load() function of # +# sd2verification terminates before finishing. # +# # +# clean='find /dev/shm/ -user $(whoami) -exec rm {} \;' # +# # +############################################################# + +#SBATCH -J parallel +#SBATCH -o parallel-%J.out +#SBATCH -e parallel-%J.err + +############################################################# +# The line below assigns your parallel jobs to the three # +# nodes of our cluster: # +# # +# *NODE* *CORES* *RAM (GB)* # +# moore 8 144 # +# amdahl 12 258 # +# gustafson 20 258 # +# # +# SLURM automatically distributes your jobs to one or more # +# nodes, depending on the available resources, if you want # +# to tun many sequential jobs at the same time it is better # +# to run them on the nodes with more resources available # +# (cores and memory); for example, if a node has its RAM # +# almost full you don't want to risk your jobs to be # +# cancelled by lack of RAM. Or, you want to save your # +# outputs not in the /esnas scratch but in the nodes's # +# /scratch, and you don't want to have them saved in the # +# /scratch of different nodes. # +# # +# In the past, option --mem-per-cpu allocated the desired # +# quantity of RAM for each processor (core). It was disabled# +# to allow more users to work at the same time, decreasing # +# queue times considerably. # +# # +# On the contrary, the option -m of interactive still works,# +# so you can take advantage of it to run an interactive # +# session in amdahl or gustafson, by specifying a higher # +# RAM amount than moore: # +# ~> sinteractive -m 200000 # +# # +# You can check if a node is full with 'squeue' or with: # +# ~> squeue -o "%.18i %.9P %.8j %.8u %.8T %.12M %.12l %.6D # +# %.15R %.5C %.8m" # +# which also shows how much memory and cores a job is using # +############################################################# + +#SBATCH -w amdahl + +############################################################# +# Set the maximum computation time of the parallel job. # +# It has to be higher than the estimated computation # +# time of all parallel job (syntax: DD-HH:MM:SS): # +# You can leave this value as set below (1 week). If your # +# job take more than 1 week to finish, even after # +# splitting it in many jobs that runs simultaneously, than # +# it is better not to run it on our cluster but on the SMP # +# machine or on MareNostrum instead, because it takes too # +# much computational resources that other users cannot # +# employ while your jobs are running. # +############################################################# + +#SBATCH -t 01:00:00 + +############################################################# +# Allocate a number of processors (cores) to this job, i.e: # +# it sets the number of sequential jobs that run at the # +# same time on the chosen node. Each time a job finishes, a # +# new job is run, so there will always be this number of # +# jobs running (except at the end, when there are less jobs # +# left than this number). # +# # +# This value also cannot be higher than the node's free RAM # +# divided by the RAM allocated to one core (rounded down). # +# / Gustafson, respectively. For example, if you allocated # +# 40 GB/core on Moore in the previous line, you can't # +# allocate more than 145 / 40 = 3 cores to your job. # +# # +# The Load() function of s2dverification loads data in # +# parallel automatically. You have to disable this feature # +# adding option nprocs=1, or you'll use all the cores of the# +# node even if you reserved less cores. The downside is that# +# loading times of each job will increase. This is the main # +# disadvantage of running many sequential jobs at the same # +# time. # +# # +# Variable 'nCores' below must be set always equal to the # +# SAME VALUE of the number of cores introduced in the line # +# #SBATCH -n (it is redundant but this job file needs it) # +# # +############################################################# + +#SBATCH -n 1 +nCores=1 + +############################################################# +# if TRUE, run the job with NO external options, # +# and in serial mode. # +# You have to set also #SBATCH -n 1 manually # +############################################################# + +serial=FALSE + +############################################################# +# Set the name of your script to run; if no path is # +# provided, it is assumed to be in the directory where # +# the 'sbatch' command is executed: # +############################################################# + +#script="/home/Earth/ncortesi/scripts/SkillScores_v11.R" +#script="/home/Earth/ncortesi/scripts/WT_v7.R" +#script="/home/Earth/ncortesi/scripts/WT_drivers_v8.R" +#script="/home/Earth/ncortesi/scripts/weather_regimes_maps_v29.R" +#script="/home/Earth/ncortesi/scripts/weather_regimes_v43.R" +#script="/home/Earth/ncortesi/scripts/weather_regimes_EDPR.R" +script="/shared/earth/Operational/EDPR/weather_regimes_EDPR.R" + + +############################################################# +# Be sure to include all the libraries your script needs: # +############################################################# + +module load R CDO NCO netCDF GCC HDF5 UDUNITS + +############################################################# +# First and last values of each of the three variables of # +# the parallel job. Values must be integers. Each value # +# between the first and last ones (including the extremes) # +# will be assigned to a different parallel job. If your job # +# has less than three variables, you can disable one or two # +# of them by leaving them empty in the lines below. # +# # +# IMPORTANT: All variables must be integers. First empty # +# variable must be arg3, and the second must be arg2. # +# # +# Examples: # +# - If you want to run the same script 12 times changing # +# only the variable "month" from 1 to 12, set arg1_min= 1 # +# and arg1_max = 12, leaving the others two in blank: # +# arg1_min = 1 # +# arg1_max = 12 # +# arg2_min = # +# arg2_max = # +# arg3_min = # +# arg3_max = # +# # +# - If you also want to run the same script for a sequence # +# of years, i.e: for each month from 1980 to 2015, also # +# set arg2_min = 1950 and arg2_max = 2015 # +# # +# To link these variables to your R script, just add the # +# following lines at the beginning of your script: # +# # +# script.arg <- as.integer(commandArgs(TRUE)) # +# if(length(script.arg) > 0){ # +# <- script.arg[2] # +# <- script.arg[3] # +# } # +# # +# or add the lines below if you are using Python: # +# # +# import sys # +# script.arg = int(sys.argv) # +# if len(script.arg) > 0 : # +# = script.arg[1] # +# = script.arg[2] # +# = script.arg[3] # +# # +# where , , are the variables that are # +# used in the script and that you'd like to run in parallel # +# (i.e: 'month', 'year', 'leadtime', etc.). # +# # +# To optimize computation time, try to set 'nCores' to be # +# which is a divisor of the total number of jobs, so it # +# will not run a lonely job or a few jobs in the last loop. # +# For example: if you have to run 20 jobs, run them in # +# blocks of 4, 5 or 10 jobs simultaneously # +# # +# If, after the jobs has finished, you need to re-run some # +# of them, just re-run this job file with arg1_min, arg1_max# +# and the other argX variables corresponding to the values # +# of the jobs you need to re-run. In case the values are not# +# consecutive (i.e: 2,5,and 11), then set 'nCores'=1 and run# +# those jobs in a sequential way, executing this job file # +# one time for each different value you need to recompute. # +############################################################# + +arg1_min=2017 +arg1_max=2017 + +arg2_min=2 +arg2_max=2 + +arg3_min= +arg3_max= + +############################################################# +# ! DO NOT MODIFY THE LINES BELOW ! # +############################################################# + +############################################################# +# the following lines run a process (thread) in background # +# for each different set of values of the 3 above variables # +# in groups of jobs at the same time: # +############################################################# + +bash_pid=$$ # get the pid of the parent job (use instead $! to get the pid of last child process) +nChildren=0 # set initial number of children processes (background jobs executed by this script) + +# if the user left arg2_min or arg3_min empty, fill them with -999 instead +# to be able to run the next for loop with only 1 'fake' value (-999)for arg2 and/or arg3: +size2=${#arg2_min} # length of arg2_min +size3=${#arg3_min} # length of arg3_min +if [ $size2 -eq 0 ] ; then arg2_min=-999; arg2_max=-999; fi +if [ $size3 -eq 0 ] ; then arg3_min=-999; arg3_max=-999; fi + +if [ $serial == 'FALSE' ]; then + +# For loops over the three variables to cycle all their values: +for arg1 in $(seq $arg1_min $arg1_max ); do + for arg2 in $(seq $arg2_min $arg2_max); do + for arg3 in $(seq $arg3_min $arg3_max); do + # disable 'arg2' and/or 'arg3' if the user left them empty: + if [ $size2 -eq 0 ]; then arg2=""; fi + if [ $size3 -eq 0 ]; then arg3=""; fi + + # detect the language of your script and run a new children job in background + if [ ${script: -1} == 'R' ]; then + Rscript $script $arg1 $arg2 $arg3 & + elif [ ${script: -2} == 'py' ]; then + python $script $arg1 $arg2 $arg3 & + else + $script $arg1 $arg2 $arg3 & + fi + + nChildren=$(($nChildren+1)) # increase the number of children process + + # if there are already job running, wait until at least one job has finished before executing the next job: + while [ $nChildren -ge $nCores ] + do + children=`ps -eo ppid | grep -w $bash_pid` # get the pids of its children + parent + nChildren=`echo $children | wc -w` # total number of children + parent + let nChildren=nChildren-1 # total number of children + sleep 1 # pause until at least one job has finished + done + done + done +done + + +else # run the job in a sequential way: + + if [ ${script: -1} == 'R' ]; then + Rscript $script + elif [ ${script: -2} == 'py' ]; then + python $script + else + $script + fi +fi diff --git a/old/backup/slurm/.RData b/old/backup/slurm/.RData new file mode 100644 index 0000000000000000000000000000000000000000..ddcfb6e3487c2b4709234d613a4426eecd0a49a1 Binary files /dev/null and b/old/backup/slurm/.RData differ diff --git a/old/backup/slurm/.directory b/old/backup/slurm/.directory new file mode 100644 index 0000000000000000000000000000000000000000..5bd515931fe0db1bba299aa6fe17e502bf09119f --- /dev/null +++ b/old/backup/slurm/.directory @@ -0,0 +1,4 @@ +[Dolphin] +Timestamp=2016,8,11,9,59,47 +Version=3 +ViewMode=2 diff --git a/old/backup/slurm/diagnostic.R b/old/backup/slurm/diagnostic.R new file mode 100644 index 0000000000000000000000000000000000000000..cb059535cd2f882de6fbcf0fee339b512da4e796 --- /dev/null +++ b/old/backup/slurm/diagnostic.R @@ -0,0 +1,77 @@ +############################################################## +# In your script, before performing the analysis, the range # +# of values of its parameter(s) is specified somewhere # +# before the main analysis: # +############################################################## + +start_date <- 1:12 +lead_time <- 1:5 + +... + +############################################################### +# Insert these new lines to link your script with the job # +# file. They have to be inserted BEFORE the main loop where # +# you repeat the same analysis many times varying the # +# parameter(s) defined above (in this example, start_date and # +# lead_time): # +############################################################### + +script.arg <- as.integer(commandArgs(TRUE)) + +if(length(script.arg) > 0) { + start_date <- script.arg[1] + lead_time <- script.arg[2] +} + +############################################################### +# In case you script is written in Python, you have to # +# introduce these lines instead: # +############################################################### + +import sys +script.arg = int(sys.argv) + +if len(script.arg) > 0 : + start_date = script.arg[1] + lead_time <- script.arg[2] + + +############################################################### +# If your script is in fortran, C or another language, you # +# need to adapt the syntax of the previous lines to the # +# language used. # +############################################################### + +############################################################### +# Here you perform the main analysis (the one which the # +# major part of computing times) many times, cycling over one # +# parameter (or a few parameters) inside this loop until the # +# analysis is finished. When you link this script to the job # +# file, you'll be able to split this analysis in multiple # +# ones, one for each different value of the looping # +# parameter(s), and to assign each value to a different job, # +# running all jobs (or as many as possible) on our cluster. # +############################################################### + +for(sd in start_date){ + for(lt in lead_time){ + + # your analysis here + # ...... + + + ########################################################### + # When you save the outputs of the analysis, save them in # + # one file for each different value of the looping # + # parameter(s), because if not, each job will overwrite # + # the results of the previous jobs! # + ########################################################### + + save(output_1, output_2, ..., output_N, file=paste0(work.dir, "/my_analysis_start_date_", sd, "_lead_time_", lt, ".RData")) + + } +} + + + diff --git a/old/backup/slurm/diagnostic.R~ b/old/backup/slurm/diagnostic.R~ new file mode 100644 index 0000000000000000000000000000000000000000..a481d525962de3861d4c817b8b39c057900ad8a4 --- /dev/null +++ b/old/backup/slurm/diagnostic.R~ @@ -0,0 +1,71 @@ +############################################################## +# In your script, before performing the analysis, the range # +# of values of its parameter(s) is specified somewhere # +# before the main analysis: # +############################################################## + +start_date <- 1:12 +lead_time <- 1:5 + +... + +############################################################### +# Insert these new lines to link your script with the job # +# file. They have to be inserted BEFORE the main loop where # +# you repeat the same analysis many times varying the # +# parameter(s) defined above (in this example, start_date and # +# lead_time): # +############################################################### + +script.arg <- as.integer(commandArgs(TRUE)) + +if(length(script.arg) > 0) { + start_date <- script.arg[1] + lead_time <- script.arg[2] +} + +############################################################### +# In case you script is written in Python, you have to # +# introduce these lines instead: # +############################################################### + + + +############################################################### +# If your script is in fortran, C or another language, you # +# need to adapt the syntax of the previous lines to the # +# language used. # +############################################################### + +############################################################### +# Here you perform the main analysis (the one which the # +# major part of computing times) many times, cycling over one # +# parameter (or a few parameters) inside this loop until the # +# analysis is finished. When you link this script to the job # +# file, you'll be able to split this analysis in multiple # +# ones, one for each different value of the looping # +# parameter(s), and to assign each value to a different job, # +# running all jobs (or as many as possible) on our cluster. # +############################################################### + +for(sd in start_date){ + for(lt in lead_time){ + + # your analysis here + # ...... + + + ########################################################### + # When you save the outputs of the analysis, save them in # + # one file for each different value of the looping # + # parameter(s), because if not, each job will overwrite # + # the results of the previous jobs! # + ########################################################### + + save(output_1, output_2, ..., output_N, file=paste0(work.dir, "/my_analysis_start_date_", sd, "_lead_time_", lt, ".RData")) + + } +} + + + diff --git a/old/backup/slurm/diags.com b/old/backup/slurm/diags.com new file mode 100644 index 0000000000000000000000000000000000000000..7dd629233ba15f474a1612b4822ed7005758c069 --- /dev/null +++ b/old/backup/slurm/diags.com @@ -0,0 +1,22 @@ +[DIAGNOSTICS] +SCRATCH_DIR = /scratch/Earth/$USER +DATA_DIR = /esnas:/esarchive +CON_FILES = /esnas/autosubmit/con_files/ +#DIAGS = moc mocarea,30,40,1000,2000,glob +DIAGS = moc areamoc + +FREQUENCY = mon + +[EXPERIMENT] +INSTITUTE = BSC +MODEL = EC-EARTH3 +MODEL_VERSION = Ec3.2_O1L75 +EXPID = t011 +STARTDATES = 19900101 +MEMBERS = 0 +CHUNK_SIZE = 1 +CHUNKS = 2 + +[ALIAS] +OHC = ohc,glob,0,1,10 +AREAMOC = mocarea,30,40,1000,2000,glob diff --git a/old/backup/slurm/diags.com~ b/old/backup/slurm/diags.com~ new file mode 100644 index 0000000000000000000000000000000000000000..54324378902f5f6cc6b54c4ba1728c2b1dba94bd --- /dev/null +++ b/old/backup/slurm/diags.com~ @@ -0,0 +1,21 @@ +[DIAGNOSTICS] +SCRATCH_DIR = /scratch/Earth/$USER +DATA_DIR = /esnas:/esarchive +CON_FILES = /esnas/autosubmit/con_files/ +#DIAGS = moc mocarea,30,40,1000,2000,glob +DIAGS = moc areamoc +FREQUENCY = mon + +[EXPERIMENT] +INSTITUTE = BSC +MODEL = EC-EARTH3 +MODEL_VERSION = Ec3.2_O1L75 +EXPID = t011 +STARTDATES = 19900101 +MEMBERS = 0 +CHUNK_SIZE = 1 +CHUNKS = 2 + +[ALIAS] +OHC = ohc,glob,0,1,10 +AREAMOC = mocarea,30,40,1000,2000,glob diff --git a/old/backup/slurm/old/diagnostic_cluster.R b/old/backup/slurm/old/diagnostic_cluster.R new file mode 100644 index 0000000000000000000000000000000000000000..176fb8eb2c9602b4feac26dca55f5620269dc80c --- /dev/null +++ b/old/backup/slurm/old/diagnostic_cluster.R @@ -0,0 +1,19 @@ +library(s2dverification) + +chunk <- as.integer(commandArgs(TRUE)[1]) + +ERAint <- '/esnas/reconstructions/ecmwf/eraint/$STORE_FREQ$_mean/$VAR_NAME$_f6h/$VAR_NAME$_$YEAR$$MONTH$.nc' + +domain <- Load(var = "sfcWind", exp = NULL, obs = list(list(path=ERAint)), '19950101', storefreq = 'daily', leadtimemax = 1, output = 'lonlat', nprocs=1) +n.lat <- length(domain$lat) +n.lon <- length(domain$lon) + +data <- Load(var = "sfcWind", exp = NULL, obs = list(list(path=ERAint)), '19950101', storefreq = 'daily', leadtimemax = 3, output = 'lonlat', latmin = domain$lat[chunk+1], latmax = domain$lat[chunk], nprocs=1)$obs +data <- data[,,,,1,] + +n.lat <- 1 +my.RMS <- array(NA, c(n.lat, n.lon)) + +# it is mandatory to save the output variable with the name 'var' and inside the file 'output_" + chunk number + ".RData": +var <- RMS(data,data+1) +save(var, file=paste0("/scratch/Earth/ncortesi/output_",chunk,".RData")) diff --git a/old/backup/slurm/old/diagnostic_cluster.job b/old/backup/slurm/old/diagnostic_cluster.job new file mode 100644 index 0000000000000000000000000000000000000000..6a59e9c1a921dae8320b26ba375be1897c36b275 --- /dev/null +++ b/old/backup/slurm/old/diagnostic_cluster.job @@ -0,0 +1,28 @@ +#!/bin/bash +#SBATCH -n 1 +#SBATCH -J diagnostic +#SBATCH -o diagnostic.out +#SBATCH -e diagnostic.err + +# set the maximum execution time of the diagnostic: +#SBATCH -t 12:00:00 + +# Set the name of your script to run: +diagnostic="../weather_regimes_v35.R" + +# Jobs to run: +firstJob=1 +lastJob=12 + +# Set the maximum number of jobs: +nCores=4 + +# run a process (thread) in background for each job: +for ARG in $(seq $firstJob $lastJob); do + Rscript $diagnostic $ARG & + nThreads=$(($nThreads+1)) + if [ "$nThreads" -ge $nCores ]; then + wait # wait until the first $nCores chunks have finished before executing the next $nCores + nThreads=0 + fi +done diff --git a/old/backup/slurm/old/diagnostic_cluster_v2.job b/old/backup/slurm/old/diagnostic_cluster_v2.job new file mode 100644 index 0000000000000000000000000000000000000000..191a8c554b831320ea3be3f78e894fe565f5e7c8 --- /dev/null +++ b/old/backup/slurm/old/diagnostic_cluster_v2.job @@ -0,0 +1,83 @@ +#!/bin/bash + +#SBATCH -J diagnostic +#SBATCH -o diagnostic.out +#SBATCH -e diagnostic.err + +# Set the total number of processors (cores) to allocate to the +# parallel job. Maximum is 8 cores for Moore, 12 for Amdahl +# and 20 for Gustafson. Slurm will choose automatically +# to which of the three clusters assign the job: + +#SBATCH -n 10 + +# Set the total computation time of the parallel job. +# Time max is 12 hours (syntax: hours:minutes:second): + +#SBATCH -t 12:00:00 + +# Set the name of your script to run: +# if no path is provided, it is assumed to be in the +# directory where the sbatch command is executed: + +diagnostic="../weather_regimes_v35.R" + +# First and last values of each of the three variables +# of the parallel job. Values must be integers. Each value +# between the first and last ones (including the extremes) +# will be assigned to a different parallel job. If your +# job has less than three variables, you can disable one or two +# of them by setting them to -999 (both varXmin and varXmax): + +var1min=8 +var1max=12 + +var2min=0 +var2max=6 + +var3min=-999 +var3max=-999 + +# Set the maximum number of jobs to run in parallel: +# it should be equal to the number of cores allocated for SLURM +# above at line #SBATCH -n X, unless the user wants to allocate +# more cores to run the parallel job in amdahl or gustafson: + +nCores=6 + +# In case the jobs have a high memory peak, you can introduce +# a time shift in the running of the following job, to shift +# the execution of the parallel jobs of a few seconds/minutes, +# so the memory peak is not reached at the same tim by the jobs +# and consequently more memory is avalable, for example to run +# more jobs than normal. Job shift is set below in seconds. Set +# it to 0 if you don't want to introduce a job shift: + +shiftJob=600 + +# the following lines are used to run the parallel job: + +arg1=$(seq $var1min $var1max) +arg2=$(seq $var2min $var2max) +arg3=$(seq $var3min $var3max) + +if [ "$var1min" -eq -999 ] && [ "$var1max" -eq -999 ]; then arg1=""; fi +if [ "$var2min" -eq -999 ] && [ "$var2max" -eq -999 ]; then arg2=""; fi +if [ "$var3min" -eq -999 ] && [ "$var3max" -eq -999 ]; then arg3=""; fi + +# run a process (thread) in background for each job: +for seq1 in $(seq $var1min $var1max ); do + for seq2 in $(seq $var2min $var2max); do + for seq3 in $(seq $var3min $var3max); do + + Rscript $diagnostic $arg1 $arg2 $arg3 & + sleep $shiftJob + + nThreads=$(($nThreads+1)) + if [ "$nThreads" -ge $nCores ]; then + wait # wait until the first $nCores chunks have finished before executing the next $nCores + nThreads=0 + fi + done + done +done diff --git a/old/backup/slurm/old/empty_sbatch.sh b/old/backup/slurm/old/empty_sbatch.sh new file mode 100644 index 0000000000000000000000000000000000000000..2e11413ded8f553ffcf264656453c4625dad9d7e --- /dev/null +++ b/old/backup/slurm/old/empty_sbatch.sh @@ -0,0 +1,4 @@ +#!/bin/bash + +Rscript Weather_regimes_v34.R 12 0 + diff --git a/old/backup/slurm/old/load_netcdf.R b/old/backup/slurm/old/load_netcdf.R new file mode 100644 index 0000000000000000000000000000000000000000..4d2b0cf30dc326269a535328b7db17f59a030bdf --- /dev/null +++ b/old/backup/slurm/old/load_netcdf.R @@ -0,0 +1,6 @@ +library(s2dverification) + +# Carga un fichero NetCDF-3 de 1GB desde esnas: +time <- system.time(data.hindcast <- Load(var='tp', exp = list(list(path="/esnas/exp/ecmwf/system4_m1/daily_mean/prlr_f6h/prlr_$YEAR$$MONTH$.nc")), obs=NULL,sdates='20070501', nleadtime=157, output='lonlat', nprocs=1)) + +write.table(time, file=paste0("load_netcdf_total_time_",round(time,2),"_seconds.txt")) diff --git a/old/backup/slurm/old/load_netcdf.job b/old/backup/slurm/old/load_netcdf.job new file mode 100644 index 0000000000000000000000000000000000000000..f3c603e0f0c35c41541e9e76cf0bbd87ec90a048 --- /dev/null +++ b/old/backup/slurm/old/load_netcdf.job @@ -0,0 +1,72 @@ +#!/bin/bash + +#SBATCH -J parallel +#SBATCH -o parallel-%J.out +#SBATCH -e parallel-%J.err + +############################################################# +# The line below specify to assign the job to one of the # +# nodes available (currently: moore, amdahl and gustafson) # +# if you don't have any preference, you can comment it: # +# You can check if a node is full with: # +# squeue -o "%.18i %.9P %.8j %.8u %.8T %.12M %.12l %.6D # +# %.15R %.5C %.8m" # +# which also shows how much memory and cores a job uses # +############################################################# + +#SBATCH -w amdahl + +############################################################# +# Set the maximum computation time of the parallel job. # +# It have to be higher than the estimated computation # +# time of the parallel job! (syntax HH:MM:SS): # +# (Time max is 78 hours, or 12 h for interactive sessions) # +############################################################# + +#SBATCH -t 78:00:00 + +############################################################# +# Allocate the desired quantity of RAM (in MB) for each # +# processor (core). This value corresponds to the peak RAM # +# utilized by your jobs during their sequential execution: # +############################################################# + +#SBATCH --mem-per-cpu 15000 + +############################################################# +# Allocate a number of processors (cores) to this job. # +# it is also equal to the maximum number of jobs running at # +# the same time; the jobs exceeding this number will start # +# only when the previous jobs have finished computing. # +# The maximum number of cores that can be allocated depends # +# on the cluster: it is 8 cores for Moore, 12 for Amdahl # +# and 20 for Gustafson. Slurm will choose automatically to # +# which of the three clusters assign the job. However, this # +# number also cannot be higher than the total cluster's RAM # +# divided by the RAM allocated to one core (rounded down). # +# Total cluster RAM is 145 / 258 / 264 GB on Moore / Amdahl # +# / Gustafson, respectively. For example, if you allocated # +# 40 GB/core on Moore in the previous line, you can't # +# allocate more than 145 / 40 = 3 cores to your job. # +############################################################# + +#SBATCH -n 1 + +nCores=1 # same as the number of cores above + +############################################################# +# These modules should already been loaded in your session. # +# They are provided here in case someone wasn't loaded: # +############################################################# + +module load R CDO NCO NETCDF gcc intel openmpi HDF5 UDUNITS + +############################################################# +# Set the name of your script to run; if no path is # +# provided, it is assumed to be in the directory where # +# the 'sbatch' command is executed: # +############################################################# + +diagnostic="./load_netcdf.R" + +Rscript $diagnostic diff --git a/old/backup/slurm/old/parallel_almost_old.job b/old/backup/slurm/old/parallel_almost_old.job new file mode 100644 index 0000000000000000000000000000000000000000..c446e01f97ec36cd58d62cdd4a9fc42f4a601358 --- /dev/null +++ b/old/backup/slurm/old/parallel_almost_old.job @@ -0,0 +1,175 @@ +#!/bin/bash + +#SBATCH -J parallel +#SBATCH -o parallel-%J.out +#SBATCH -e parallel-%J.err + +############################################################# +# The line below specify to assign the job to one of the # +# nodes available (currently: moore, amdahl and gustafson) # +# The node should be chosen on the basis of the jobs of the # +# other user already running in the node. # +# if you don't have any preference, you can comment this # +# line, so Slurm will assign your jobs to one or more of # +# the nodes avaiable. # +# Note that if you jobs are assigned to different nodes, # +# and each job creates one or more output files in the # +# /scratch, they will end in the /scratch of different nodes# +# # +# You can check if a node is full with 'squeue' or with: # +# squeue -o "%.18i %.9P %.8j %.8u %.8T %.12M %.12l %.6D # +# %.15R %.5C %.8m" # +# which also shows how much memory and cores a job uses # +############################################################# + +#SBATCH -w gustafson + +############################################################# +# Set the maximum computation time of the parallel job. # +# It has to be higher than the estimated computation # +# time of the parallel job (syntax: HH:MM:SS): # +# Time max is 96 hours (4 days), or 12 h for interactive # +# sessions. If there isn't any queue issue on the chosen # +# node, you can leave this value set to 96 hours. # +############################################################# + +#SBATCH -t 96:00:00 + +############################################################# +# Allocate the desired quantity of RAM (in MB) for each # +# processor (core). This value corresponds to the peak RAM # +# utilized by your jobs during their sequential execution: # +############################################################# + +#SBATCH --mem-per-cpu 65000 + +############################################################# +# Allocate a number of processors (cores) to this job. # +# it is also equal to the maximum number of jobs running at # +# the same time; the jobs exceeding this number will start # +# only when the previous jobs have finished computing. # +# The maximum number of cores that can be allocated depends # +# on the cluster: it is 8 cores for Moore, 12 for Amdahl # +# and 20 for Gustafson. Slurm will choose automatically to # +# which of the three clusters assign the job. However, this # +# number also cannot be higher than the total cluster's RAM # +# divided by the RAM allocated to one core (rounded down). # +# Total cluster RAM is 145 / 258 / 264 GB on Moore / Amdahl # +# / Gustafson, respectively. For example, if you allocated # +# 40 GB/core on Moore in the previous line, you can't # +# allocate more than 145 / 40 = 3 cores to your job. # +############################################################# + +#SBATCH -n 4 + +nCores=4 # same as the number of cores above + +############################################################# +# If your 'nCores' variable is limited by the total RAM, # +# and the peak memory use of each job is short compared to # +# the total computational time of a single job, you can # +# still increase 'nCores' by introducing a time delay # +# between jobs: each time you run a new job, the scheduler # +# will wait an amount of time determined by the user before # +# running the next job. In this way, the peak memory use of # +# the jobs don't overlap (i.e: don't happen at the same # +# time simulataneously), so more jobs can be executed in # +# parallel. However, have to disable the row above with # +# option. --mem-per-cpu commenting it. # +# Example: each of your jobs lasts ~ 50 min and needs an # +# average of 30 GB of RAM. However, for ~10 minutes, it # +# needs 60 GB of RAM. This memory peak usually should # +# halve 'nCores' value. However, setting 'delay' to 600 (s),# +# each job reaches its memory peak at a different time of # +# the other jobs, so you can double 'nCores', running in # +# this way more jobs in parallel. # +############################################################# + +delay=0 + +############################################################# +# These modules should already been loaded in your session. # +# They are provided here in case someone wasn't loaded: # +############################################################# + +module load R CDO NCO NETCDF gcc intel openmpi HDF5 UDUNITS + +############################################################# +# Set the name of your script to run; if no path is # +# provided, it is assumed to be in the directory where # +# the 'sbatch' command is executed: # +############################################################# + +diagnostic="../weather_regimes_v35.R" + +############################################################# +# First and last values of each of the three variables of # +# the parallel job. Values must be integers. Each value # +# between the first and last ones (including the extremes) # +# will be assigned to a different parallel job. If your job # +# has less than three variables, you can disable one or two # +# of them by setting both them to -999. # +# # +# Examples: # +# - If you want to run the same script 12 times changing # +# only the variable "month" from 1 to 12, set var1min = 1 # +# and var1max = 12, leaving the others to -999 # +# - If you also want to run the same script for a sequence # +# of years, i.e: for each month from 1980 to 2015, also # +# set var2min = 1950 and var2max = 2015 # +# # +# To link these variables to your script, just add the # +# following lines at the beginning of your script: # +# # +# var1 <- as.integer(commandArgs(TRUE)[1]) # +# var2 <- as.integer(commandArgs(TRUE)[2]) # +# var3 <- as.integer(commandArgs(TRUE)[3]) # +# # +# where var, var2, var3 are the variables that you want to # +# change in your script (i.e: 'month', 'year', etc.). If # +# use less than 3 variables, add less than three lines. # +# Note that the variables have to be integers! # +############################################################# + +var1min=12 +var1max=12 + +var2min=0 +var2max=6 + +var3min=-999 +var3max=-999 + +############################################################# +# the following lines run a process (thread) in background # +# for each different set of values of the 3 above variables # +# in group of jobs at the same time: # +############################################################# + +for arg1 in $(seq $var1min $var1max ); do + for arg2 in $(seq $var2min $var2max); do + for arg3 in $(seq $var3min $var3max); do + + if [ "$var1min" -eq -999 ] && [ "$var1max" -eq -999 ]; then arg1=""; fi + if [ "$var2min" -eq -999 ] && [ "$var2max" -eq -999 ]; then arg2=""; fi + if [ "$var3min" -eq -999 ] && [ "$var3max" -eq -999 ]; then arg3=""; fi + + Rscript $diagnostic $arg1 $arg2 $arg3 & + job_id_${arg1}_${arg2}_${arg3}=$! + sleep $delay + + nThreads=$(($nThreads+1)) + if [ "$nThreads" -ge $nCores ]; then + wait # wait until the first $nCores have finished before executing the next $nCores + nThreads=0 + fi + done + done +done + +############################################################ +# remove any eventual data loaded in the shared memory # +# /dev/shm/ before ending the job: # +############################################################ + +mpirun rm `ls /dev/shm -la | grep $(whoami) | awk ' { print "/dev/shm/" $9 } ' diff --git a/old/backup/slurm/old/sbatch.job b/old/backup/slurm/old/sbatch.job new file mode 100644 index 0000000000000000000000000000000000000000..67ed36fa19fbbf834d8c5eede963c583569b515f --- /dev/null +++ b/old/backup/slurm/old/sbatch.job @@ -0,0 +1,11 @@ +#!/bin/bash + +#SBATCH -n 1 +#SBATCH -o output_%J.out +#SBATCH -e output_%J.err +#SBATCH -J regimes +#SBATCH -t 10:00:00 +#SBATCH --mem-per-cpu 50000 + +Rscript weather_regimes_v34.R 12 0 + diff --git a/old/backup/slurm/old/sbatch2.job b/old/backup/slurm/old/sbatch2.job new file mode 100644 index 0000000000000000000000000000000000000000..bdd69f566ff31c033ea66fa731d96808d5d4db64 --- /dev/null +++ b/old/backup/slurm/old/sbatch2.job @@ -0,0 +1,11 @@ +#!/bin/bash + +#SBATCH -n 1 +#SBATCH -o output_%J.out +#SBATCH -e output_%J.err +#SBATCH -J regimes +#SBATCH -t 10:00:00 +#SBATCH --mem-per-cpu 50000 + +Rscript weather_regimes_v34.R 11 1 + diff --git a/old/backup/slurm/old/sbatch3.job b/old/backup/slurm/old/sbatch3.job new file mode 100644 index 0000000000000000000000000000000000000000..3607f7059cae164d5c1475f7cfee18399cda9e19 --- /dev/null +++ b/old/backup/slurm/old/sbatch3.job @@ -0,0 +1,11 @@ +#!/bin/bash + +#SBATCH -n 1 +#SBATCH -o output_%J.out +#SBATCH -e output_%J.err +#SBATCH -J regimes +#SBATCH -t 10:00:00 +#SBATCH --mem-per-cpu 50000 + +Rscript weather_regimes_v34.R 10 2 + diff --git a/old/backup/slurm/old/sbatch4.job b/old/backup/slurm/old/sbatch4.job new file mode 100644 index 0000000000000000000000000000000000000000..479eb4c9c4630a98999f65288233a3044fdf6bf4 --- /dev/null +++ b/old/backup/slurm/old/sbatch4.job @@ -0,0 +1,11 @@ +#!/bin/bash + +#SBATCH -n 1 +#SBATCH -o output_%J.out +#SBATCH -e output_%J.err +#SBATCH -J regimes +#SBATCH -t 10:00:00 +#SBATCH --mem-per-cpu 50000 + +Rscript weather_regimes_v34.R 9 3 + diff --git a/old/backup/slurm/old/sbatch5.job b/old/backup/slurm/old/sbatch5.job new file mode 100644 index 0000000000000000000000000000000000000000..c6a784feafd2ade2933e2ec7207f5542e9127d5e --- /dev/null +++ b/old/backup/slurm/old/sbatch5.job @@ -0,0 +1,11 @@ +#!/bin/bash + +#SBATCH -n 1 +#SBATCH -o output_%J.out +#SBATCH -e output_%J.err +#SBATCH -J regimes +#SBATCH -t 10:00:00 +#SBATCH --mem-per-cpu 50000 + +Rscript weather_regimes_v34.R 8 4 + diff --git a/old/backup/slurm/old/sbatch6.job b/old/backup/slurm/old/sbatch6.job new file mode 100644 index 0000000000000000000000000000000000000000..a123b67d5ede4536ecab8ffdd721119ec5b4a3da --- /dev/null +++ b/old/backup/slurm/old/sbatch6.job @@ -0,0 +1,11 @@ +#!/bin/bash + +#SBATCH -n 1 +#SBATCH -o output_%J.out +#SBATCH -e output_%J.err +#SBATCH -J regimes +#SBATCH -t 10:00:00 +#SBATCH --mem-per-cpu 50000 + +Rscript weather_regimes_v34.R 7 5 + diff --git a/old/backup/slurm/old/sbatch7.job b/old/backup/slurm/old/sbatch7.job new file mode 100644 index 0000000000000000000000000000000000000000..8f13041a49763767a847dd30302ed53a8e656a3f --- /dev/null +++ b/old/backup/slurm/old/sbatch7.job @@ -0,0 +1,11 @@ +#!/bin/bash + +#SBATCH -n 1 +#SBATCH -o output_%J.out +#SBATCH -e output_%J.err +#SBATCH -J regimes +#SBATCH -t 10:00:00 +#SBATCH --mem-per-cpu 50000 + +Rscript weather_regimes_v34.R 6 6 + diff --git a/old/backup/slurm/old/test_load.R b/old/backup/slurm/old/test_load.R new file mode 100644 index 0000000000000000000000000000000000000000..2ee6262dd7564cf5c1305d38ff1e42aa47f9eb47 --- /dev/null +++ b/old/backup/slurm/old/test_load.R @@ -0,0 +1,185 @@ +######################################################################################### +# Sub-seasonal Skill Scores # +######################################################################################### +# you can run it in a sequential way from the terminal of your workstation with the syntax: +# +# Rscript SkillScores_v7.R +# +# or in parallel on MareNostrum: +# +# bsub < SkillScores_v7.job +# + + +# i.e: to split the data in 8 chunks and run only the chunk number 3, write: +# +# Rscript SkillScores_v4.R 8 3 +# +# if you don't specify any argument, or you run it from the R interface, it runs all the chunks in a sequential way. +# Use this last approach if the computational speed is not a problem. +# + + + +########################################################################################## +# Load functions and libraries # +########################################################################################## + +#load libraries and functions: +library(s2dverification) +library(SpecsVerification) +#library(easyVerification) +#library(jpeg) +#library(abind) + +# Load function split.array: +source('/gpfs/projects/bsc32/bsc32842/scripts/Rfunctions.R') + +########################################################################################## +# User's settings # +########################################################################################## + +# working dir where to put the output maps and files in the /Data and /Maps subdirs: +workdir <- "/gpfs/projects/bsc32/bsc32842/RESILIENCE" + +cfs.name <- 'ECMWF' # name of the climate forecast system (CFS) used for monthly predictions (to be used in map titles) +var.name <- 'sfcWind' # forecast variable. It is the name used inside the netCDF files and as suffix in map filenames, i.e: 'sfcWind' or 'psl' +var.name.map <- '10m Wind Speed' # forecast variable to verify (name used in the map titles), i.e: '10m Wind Speed' or 'SLP' + +yr1 <- 2014 # starting year of the weekly sequence of the forecasts +mes <- 1 # starting forecast month (usually january) +day <- 2 # starting forecast day + +yr1.hind <- 1994 #1994 # first hindcast year +yr2.hind <- 2013 #2013 # last hindcast year (usually the forecast year -1) + +leadtime.week <- c('5-11','12-18','19-25','26-32') # which leadtimes are available in the weekly dataset +n.members <- 4 # number of hindcast members + +my.score.name <- c('FairRpss','FairCrpss') #c('EnsCorr','FairRpss','FairCrpss','RelDiagr') # choose one or more skills scores to compute between EnsCorr, FairRPSS, FairCRPSS and/or RelDiagr + +veri.month <- 2 #1:12 # select the month(s) you want to compute the chosen skill scores + +my.pvalue <- 0.05 # in case of computing the EnsCorr, set the p_value level to show in the ensemble mean correlation maps +my.prob <- c(1/3,2/3) # quantiles used for FairRPSS and the RelDiagr (usually they are the terciles) +conf.level <- c(0.05, 0.95)# percentiles employed for the calculation of the confidence level for the Rpss and Crpss (can be more than 2 if needed) +int.lat <- NULL #1:160 # latitude position of grid points selected for the Reliability Diagram (if you want to subset a region; if not, put NULL) +int.lon <- NULL #1:320 # longitude position of grid points selected for the Reliability Diagram (if you want to subset a region; if not, put NULL) + +boot <- TRUE # in case of computing the FairRpss and/or the FairCrpss, you can choose to do a bootstrap to evaluate the uncertainty of the skill scores +n.boot <- 20 # number of resamples considered in the bootstrapping + +# coordinates of a chosen point in the world to plot the time series over the 52 maps (they must be the same values in objects la y lo) +my.lat <- 55.3197120 +my.lon <- 0.5625 + +###################################### Derived variables ############################################################################# + +args <- commandArgs(TRUE) # put into variable args the list with all the optional arguments with whom the script was run from the terminal +chunk <- as.integer(args[1]) # number of the chunk to run in this script + +#source(paste0(workdir,'/ColorBarV.R')) # Color scale used for maps +n.categ <- 1+length(my.prob) # number of forecasting categories (usually 3 if terciles are used) + +# blue-yellow-red colors: +col <- as.character(read.csv("/gpfs/projects/bsc32//bsc32842/scripts/rgbhex.csv",header=F)[,1]) + +sdates.seq <- weekly.seq(yr1,mes,day) # load the sequence of dates corresponding to all the thursday of the year +n.leadtimes <- length(leadtime.week) +n.yrs.hind <- yr2.hind-yr1.hind+1 +my.month.short <- substr(my.month,1,3) + +# Monthly Startdates for 2014 reforecasts: (in future you can modify it to work for a generic year) +startdates.monthly<-list() +startdates.monthly[[1]]<-1:5 +startdates.monthly[[2]]<-6:9 +startdates.monthly[[3]]<-10:13 +startdates.monthly[[4]]<-14:17 +startdates.monthly[[5]]<-18:22 +startdates.monthly[[6]]<-23:26 +startdates.monthly[[7]]<-27:31 +startdates.monthly[[8]]<-32:35 +startdates.monthly[[9]]<-36:39 +startdates.monthly[[10]]<-40:44 +startdates.monthly[[11]]<-45:48 +startdates.monthly[[12]]<-49:52 + +## extract geographic coordinates; do only ONCE for each prediction system and then save the lat/lon in a coordinates.RData and comment these rows to use function load() below: +#data.hindcast <- Load(var='sfcWind', exp = 'EnsEcmwfWeekHind', +# obs = NULL, sdates=paste0(yr1.hind:yr2.hind,substr(sdates.seq[startdate],5,6),substr(sdates.seq[startdate],7,8)), +# nleadtime = 1, leadtimemin = 1, leadtimemax = 1, output = 'lonlat', configfile = file_path, method='distance-weighted' ) + +#lats<-data.hindcast$lat +#lons<-data.hindcast$lon +#save(lats,lons,file=paste0(workdir,'/coordinates.RData')) +#rm(data.hindcast);gc() + +# format geographic coordinates to use with PlotEquiMap: +load(paste0(workdir,'/coordinates.RData')) +n.lon <- length(lons) +n.lat <- length(lats) +n.lonr <- n.lon-ceiling(length(lons[lons<180 & lons > 0])) + +#la<-rev(lats) +#lo<-c(lons[c((n.lonr+1):n.lon)]-360,lons[1:n.lonr]) + +n.lat == 1 + +#ERAint <- list(path = '/esnas/reconstructions/ecmwf/eraint/$STORE_FREQ$_mean/$VAR_NAME$_f6h/$VAR_NAME$_$YEAR$$MONTH$.nc') +#var <- Load(var = 'z500', exp = NULL, obs = list(rean), paste0(2001,'0101'), storefreq = 'daily', leadtimemax = 365, output = 'lonlat') +#var <- Load(var = 'z500', exp = list(exp), obs = NULL, paste0(2000,'0101'), storefreq = 'daily', leadtimemax = 1, output = 'lonlat') + +for(month in veri.month){ + #month=1 # for the debug + my.startdates <- startdates.monthly[[month]] #c(1:5) # select the startdates (weeks) you want to compute + startdate.name <- my.month[month] # name given to the period of the selected startdates # i.e:"January" + n.startdates <- length(my.startdates) # number of startdates in the selected month + n.yrs <- length(my.startdates)*n.yrs.hind # number of total years for the selected month + + print(paste0("Computing Skill Scores for ", startdate.name)) + + # Merge all selected startdates: + hind.dim <- c(n.members, n.yrs.hind*n.startdates, n.leadtimes, n.lat, n.lon) # dimensions of the hindcast array + + #my.FairRpss <- my.FairCrpss <- my.EnsCorr <- my.PValue <- array(NA,c(n.leadtimes,n.lat,n.lon)) + #if(boot) my.FairRpssBoot <- my.FairCrpssBoot <- array(NA, c(n.boot, n.leadtimes, n.lat, n.lon)) + #if(boot) my.FairRpssConf <- my.FairCrpssConf <- array(NA, c(length(conf.level), n.leadtimes, n.lat, n.lon)) + + anom.hindcast.chunk <- anom.hindcast.chunk.sampled <- array(NA, c(n.members, n.startdates*n.yrs.hind, n.leadtimes, n.lat, n.lon)) + anom.rean.chunk <- anom.hindcast.mean.chunk <- anom.rean.chunk.sampled <- array(NA, c(n.startdates*n.yrs.hind, n.leadtimes, n.lat, n.lon)) + + time <- system.time({for(startdate in my.startdates){ + pos.startdate <- which(startdate == my.startdates) # count of the number of stardates already loaded+1 + my.time.interv <- (1+(pos.startdate-1)*n.yrs.hind):(pos.startdate*n.yrs.hind) # time interval where to load data + + # Load reanalysis data: + load(paste0(workdir,'/Data_',var.name,'/anom_rean_startdate',startdate,'.RData')) +# anom.rean <- drop(anom.rean) +# anom.rean.chunk[my.time.interv,,,] <- anom.rean[,,1,] +# + # Load hindcast data: +# if(any(my.score.name=="FairRpss") || any(my.score.name=="FairCrpss" || any(my.score.name=="RelDiagr"))){ +# load(paste0(workdir,'/Data_',var.name,'/anom_hindcast_startdate',startdate,'.RData')) +# anom.hindcast <- drop(anom.hindcast) +# anom.hindcast.chunk[,my.time.interv,,,] <- anom.hindcast[,,,1,] +# rm(anom.hindcast, anom.rean) +# gc() +# } +# + #if(any(my.score.name=="EnsCorr")){ + # load(paste0(workdir,'/Data_',var.name,'/anom_hindcast_mean_startdate',startdate,'.RData')) # load ensemble mean hindcast data + # anom.hindcast.mean <- drop(anom.hindcast.mean) + # anom.hindcast.mean.chunk[my.time.interv,,,] <- anom.hindcast.mean[,,1,] + # rm(anom.hindcast.mean) + # gc() + #} + + } # close for on startdate + }) + + + #save(time, file=paste0(workdir,'/test_',startdate.name,'_chunk_',chunk,'_time_',time,'.RData')) + save(time, file=paste0(workdir,'/test_',startdate.name,'_chunk_',chunk,'_time_',time[3],'.RData')) + +} # close for on month + diff --git a/old/backup/slurm/parallel.job b/old/backup/slurm/parallel.job new file mode 100644 index 0000000000000000000000000000000000000000..2f1e06d6880db42a1892b3dfadb3130dc8d8391b --- /dev/null +++ b/old/backup/slurm/parallel.job @@ -0,0 +1,283 @@ +#!/bin/bash + +############################################################# +# You can keep the name of your parallel job as it is below # +# ('parallel') or change it. In any case, only one standard # +# error and standard output files will be created, even if # +# you run hundreds of jobs, not to fill your directory with # +# unnecessary files. All the information you need on the # +# jobs is inside these two files, whose file names have the # +# job number appended as suffix. # +# # +# To increase productivity, create an alias en .bashrc # +# such as the one below to run this script from everywhere: # +# # +# alias p='sbatch ~/scripts/slurm/parallel.job' # +# # +# Remember to remove regularly any eventual data loaded in # +# the shared memory (/dev/shm/), because ths shared memory # +# is limited to a few GB (depending on the node), and when # +# it is full, no users can run jobs. You can create # +# an alias as the 'clean' alias below and execute it from # +# time to time. Run it also after a Load() function of # +# sd2verification terminates before finishing. # +# # +# clean='find /dev/shm/ -user $(whoami) -exec rm {} \;' # +# # +############################################################# + +#SBATCH -J parallel +#SBATCH -o parallel-%J.out +#SBATCH -e parallel-%J.err + +############################################################# +# The line below assigns your parallel jobs to the three # +# nodes of our cluster: # +# # +# *NODE* *CORES* *RAM (GB)* # +# moore 8 144 # +# amdahl 12 258 # +# gustafson 20 258 # +# # +# SLURM automatically distributes your jobs to one or more # +# nodes, depending on the available resources, if you want # +# to tun many sequential jobs at the same time it is better # +# to run them on the nodes with more resources available # +# (cores and memory); for example, if a node has its RAM # +# almost full you don't want to risk your jobs to be # +# cancelled by lack of RAM. Or, you want to save your # +# outputs not in the /esnas scratch but in the nodes's # +# /scratch, and you don't want to have them saved in the # +# /scratch of different nodes. # +# # +# In the past, option --mem-per-cpu allocated the desired # +# quantity of RAM for each processor (core). It was disabled# +# to allow more users to work at the same time, decreasing # +# queue times considerably. # +# # +# On the contrary, the option -m of interactive still works,# +# so you can take advantage of it to run an interactive # +# session in amdahl or gustafson, by specifying a higher # +# RAM amount than moore: # +# ~> sinteractive -m 200000 # +# # +# You can check if a node is full with 'squeue' or with: # +# ~> squeue -o "%.18i %.9P %.8j %.8u %.8T %.12M %.12l %.6D # +# %.15R %.5C %.8m" # +# which also shows how much memory and cores a job is using # +############################################################# + +#SBATCH -w amdahl + +############################################################# +# Set the maximum computation time of the parallel job. # +# It has to be higher than the estimated computation # +# time of all parallel job (syntax: DD-HH:MM:SS): # +# You can leave this value as set below (1 week). If your # +# job take more than 1 week to finish, even after # +# splitting it in many jobs that runs simultaneously, than # +# it is better not to run it on our cluster but on the SMP # +# machine or on MareNostrum instead, because it takes too # +# much computational resources that other users cannot # +# employ while your jobs are running. # +############################################################# + +#SBATCH -t 01:00:00 + +############################################################# +# Allocate a number of processors (cores) to this job, i.e: # +# it sets the number of sequential jobs that run at the # +# same time on the chosen node. Each time a job finishes, a # +# new job is run, so there will always be this number of # +# jobs running (except at the end, when there are less jobs # +# left than this number). # +# # +# This value also cannot be higher than the node's free RAM # +# divided by the RAM allocated to one core (rounded down). # +# / Gustafson, respectively. For example, if you allocated # +# 40 GB/core on Moore in the previous line, you can't # +# allocate more than 145 / 40 = 3 cores to your job. # +# # +# The Load() function of s2dverification loads data in # +# parallel automatically. You have to disable this feature # +# adding option nprocs=1, or you'll use all the cores of the# +# node even if you reserved less cores. The downside is that# +# loading times of each job will increase. This is the main # +# disadvantage of running many sequential jobs at the same # +# time. # +# # +# Variable 'nCores' below must be set always equal to the # +# SAME VALUE of the number of cores introduced in the line # +# #SBATCH -n (it is redundant but this job file needs it) # +# # +############################################################# + +#SBATCH -n 1 +nCores=1 + +############################################################# +# if TRUE, run the job with NO external options, # +# and in serial mode. # +# You have to set also #SBATCH -n 1 manually # +############################################################# + +serial=FALSE + +############################################################# +# Set the name of your script to run; if no path is # +# provided, it is assumed to be in the directory where # +# the 'sbatch' command is executed: # +############################################################# + +#script="/home/Earth/ncortesi/scripts/SkillScores_v11.R" +#script="/home/Earth/ncortesi/scripts/WT_v7.R" +#script="/home/Earth/ncortesi/scripts/WT_drivers_v8.R" +#script="/home/Earth/ncortesi/scripts/weather_regimes_maps_v29.R" +#script="/home/Earth/ncortesi/scripts/weather_regimes_v43.R" +#script="/home/Earth/ncortesi/scripts/weather_regimes_EDPR.R" +script="/shared/earth/Operational/EDPR/weather_regimes_EDPR.R" + + +############################################################# +# Be sure to include all the libraries your script needs: # +############################################################# + +module load R CDO NCO netCDF GCC HDF5 UDUNITS + +############################################################# +# First and last values of each of the three variables of # +# the parallel job. Values must be integers. Each value # +# between the first and last ones (including the extremes) # +# will be assigned to a different parallel job. If your job # +# has less than three variables, you can disable one or two # +# of them by leaving them empty in the lines below. # +# # +# IMPORTANT: All variables must be integers. First empty # +# variable must be arg3, and the second must be arg2. # +# # +# Examples: # +# - If you want to run the same script 12 times changing # +# only the variable "month" from 1 to 12, set arg1_min= 1 # +# and arg1_max = 12, leaving the others two in blank: # +# arg1_min = 1 # +# arg1_max = 12 # +# arg2_min = # +# arg2_max = # +# arg3_min = # +# arg3_max = # +# # +# - If you also want to run the same script for a sequence # +# of years, i.e: for each month from 1980 to 2015, also # +# set arg2_min = 1950 and arg2_max = 2015 # +# # +# To link these variables to your R script, just add the # +# following lines at the beginning of your script: # +# # +# script.arg <- as.integer(commandArgs(TRUE)) # +# if(length(script.arg) > 0){ # +# <- script.arg[2] # +# <- script.arg[3] # +# } # +# # +# or add the lines below if you are using Python: # +# # +# import sys # +# script.arg = int(sys.argv) # +# if len(script.arg) > 0 : # +# = script.arg[1] # +# = script.arg[2] # +# = script.arg[3] # +# # +# where , , are the variables that are # +# used in the script and that you'd like to run in parallel # +# (i.e: 'month', 'year', 'leadtime', etc.). # +# # +# To optimize computation time, try to set 'nCores' to be # +# which is a divisor of the total number of jobs, so it # +# will not run a lonely job or a few jobs in the last loop. # +# For example: if you have to run 20 jobs, run them in # +# blocks of 4, 5 or 10 jobs simultaneously # +# # +# If, after the jobs has finished, you need to re-run some # +# of them, just re-run this job file with arg1_min, arg1_max# +# and the other argX variables corresponding to the values # +# of the jobs you need to re-run. In case the values are not# +# consecutive (i.e: 2,5,and 11), then set 'nCores'=1 and run# +# those jobs in a sequential way, executing this job file # +# one time for each different value you need to recompute. # +############################################################# + +arg1_min=2017 +arg1_max=2017 + +arg2_min=2 +arg2_max=2 + +arg3_min= +arg3_max= + +############################################################# +# ! DO NOT MODIFY THE LINES BELOW ! # +############################################################# + +############################################################# +# the following lines run a process (thread) in background # +# for each different set of values of the 3 above variables # +# in groups of jobs at the same time: # +############################################################# + +bash_pid=$$ # get the pid of the parent job (use instead $! to get the pid of last child process) +nChildren=0 # set initial number of children processes (background jobs executed by this script) + +# if the user left arg2_min or arg3_min empty, fill them with -999 instead +# to be able to run the next for loop with only 1 'fake' value (-999)for arg2 and/or arg3: +size2=${#arg2_min} # length of arg2_min +size3=${#arg3_min} # length of arg3_min +if [ $size2 -eq 0 ] ; then arg2_min=-999; arg2_max=-999; fi +if [ $size3 -eq 0 ] ; then arg3_min=-999; arg3_max=-999; fi + +if [ $serial == 'FALSE' ]; then + +# For loops over the three variables to cycle all their values: +for arg1 in $(seq $arg1_min $arg1_max ); do + for arg2 in $(seq $arg2_min $arg2_max); do + for arg3 in $(seq $arg3_min $arg3_max); do + # disable 'arg2' and/or 'arg3' if the user left them empty: + if [ $size2 -eq 0 ]; then arg2=""; fi + if [ $size3 -eq 0 ]; then arg3=""; fi + + # detect the language of your script and run a new children job in background + if [ ${script: -1} == 'R' ]; then + Rscript $script $arg1 $arg2 $arg3 & + elif [ ${script: -2} == 'py' ]; then + python $script $arg1 $arg2 $arg3 & + else + $script $arg1 $arg2 $arg3 & + fi + + nChildren=$(($nChildren+1)) # increase the number of children process + + # if there are already job running, wait until at least one job has finished before executing the next job: + while [ $nChildren -ge $nCores ] + do + children=`ps -eo ppid | grep -w $bash_pid` # get the pids of its children + parent + nChildren=`echo $children | wc -w` # total number of children + parent + let nChildren=nChildren-1 # total number of children + sleep 1 # pause until at least one job has finished + done + done + done +done + + +else # run the job in a sequential way: + + if [ ${script: -1} == 'R' ]; then + Rscript $script + elif [ ${script: -2} == 'py' ]; then + python $script + else + $script + fi +fi diff --git a/old/backup/slurm/parallel.job~ b/old/backup/slurm/parallel.job~ new file mode 100644 index 0000000000000000000000000000000000000000..53eee0d13c8200a81c624fb828ce634ee7284e6e --- /dev/null +++ b/old/backup/slurm/parallel.job~ @@ -0,0 +1,281 @@ +#!/bin/bash + +############################################################# +# You can keep the name of your parallel job as it is below # +# ('parallel') or change it. In any case, only one standard # +# error and standard output files will be created, even if # +# you run hundreds of jobs, not to fill your directory with # +# unnecessary files. All the information you need on the # +# jobs is inside these two files, whose file names have the # +# job number appended as suffix. # +# # +# To increase productivity, create an alias en .bashrc # +# such as the one below to run this script from everywhere: # +# # +# alias p='sbatch ~/scripts/slurm/parallel.job' # +# # +# Remember to remove regularly any eventual data loaded in # +# the shared memory (/dev/shm/), because ths shared memory # +# is limited to a few GB (depending on the node), and when # +# it is full, no users can run jobs. You can create # +# an alias as the 'clean' alias below and execute it from # +# time to time. Run it also after a Load() function of # +# sd2verification terminates before finishing. # +# # +# clean='find /dev/shm/ -user $(whoami) -exec rm {} \;' # +# # +############################################################# + +#SBATCH -J parallel +#SBATCH -o parallel-%J.out +#SBATCH -e parallel-%J.err + +############################################################# +# The line below assigns your parallel jobs to the three # +# nodes of our cluster: # +# # +# *NODE* *CORES* *RAM (GB)* # +# moore 8 144 # +# amdahl 12 258 # +# gustafson 20 258 # +# # +# SLURM automatically distributes your jobs to one or more # +# nodes, depending on the available resources, if you want # +# to tun many sequential jobs at the same time it is better # +# to run them on the nodes with more resources available # +# (cores and memory); for example, if a node has its RAM # +# almost full you don't want to risk your jobs to be # +# cancelled by lack of RAM. Or, you want to save your # +# outputs not in the /esnas scratch but in the nodes's # +# /scratch, and you don't want to have them saved in the # +# /scratch of different nodes. # +# # +# In the past, option --mem-per-cpu allocated the desired # +# quantity of RAM for each processor (core). It was disabled# +# to allow more users to work at the same time, decreasing # +# queue times considerably. # +# # +# On the contrary, the option -m of interactive still works,# +# so you can take advantage of it to run an interactive # +# session in amdahl or gustafson, by specifying a higher # +# RAM amount than moore: # +# ~> sinteractive -m 200000 # +# # +# You can check if a node is full with 'squeue' or with: # +# ~> squeue -o "%.18i %.9P %.8j %.8u %.8T %.12M %.12l %.6D # +# %.15R %.5C %.8m" # +# which also shows how much memory and cores a job is using # +############################################################# + +#SBATCH -w amdahl + +############################################################# +# Set the maximum computation time of the parallel job. # +# It has to be higher than the estimated computation # +# time of all parallel job (syntax: DD-HH:MM:SS): # +# You can leave this value as set below (1 week). If your # +# job take more than 1 week to finish, even after # +# splitting it in many jobs that runs simultaneously, than # +# it is better not to run it on our cluster but on the SMP # +# machine or on MareNostrum instead, because it takes too # +# much computational resources that other users cannot # +# employ while your jobs are running. # +############################################################# + +#SBATCH -t 01:00:00 + +############################################################# +# Allocate a number of processors (cores) to this job, i.e: # +# it sets the number of sequential jobs that run at the # +# same time on the chosen node. Each time a job finishes, a # +# new job is run, so there will always be this number of # +# jobs running (except at the end, when there are less jobs # +# left than this number). # +# # +# This value also cannot be higher than the node's free RAM # +# divided by the RAM allocated to one core (rounded down). # +# / Gustafson, respectively. For example, if you allocated # +# 40 GB/core on Moore in the previous line, you can't # +# allocate more than 145 / 40 = 3 cores to your job. # +# # +# The Load() function of s2dverification loads data in # +# parallel automatically. You have to disable this feature # +# adding option nprocs=1, or you'll use all the cores of the# +# node even if you reserved less cores. The downside is that# +# loading times of each job will increase. This is the main # +# disadvantage of running many sequential jobs at the same # +# time. # +# # +# Variable 'nCores' below must be set always equal to the # +# SAME VALUE of the number of cores introduced in the line # +# #SBATCH -n (it is redundant but this job file needs it) # +# # +############################################################# + +#SBATCH -n 1 +nCores=1 + +############################################################# +# if TRUE, run the job with NO external options, # +# and in serial mode. # +# You have to set also #SBATCH -n 1 manually # +############################################################# + +serial=FALSE + +############################################################# +# Set the name of your script to run; if no path is # +# provided, it is assumed to be in the directory where # +# the 'sbatch' command is executed: # +############################################################# + +#script="/home/Earth/ncortesi/scripts/SkillScores_v11.R" +#script="/home/Earth/ncortesi/scripts/WT_v7.R" +#script="/home/Earth/ncortesi/scripts/WT_drivers_v8.R" +#script="/home/Earth/ncortesi/scripts/weather_regimes_maps_v29.R" +#script="/home/Earth/ncortesi/scripts/weather_regimes_v43.R" +script="/home/Earth/ncortesi/scripts/weather_regimes_EDPR.R" + +############################################################# +# Be sure to include all the libraries your script needs: # +############################################################# + +module load R CDO NCO netCDF GCC HDF5 UDUNITS + +############################################################# +# First and last values of each of the three variables of # +# the parallel job. Values must be integers. Each value # +# between the first and last ones (including the extremes) # +# will be assigned to a different parallel job. If your job # +# has less than three variables, you can disable one or two # +# of them by leaving them empty in the lines below. # +# # +# IMPORTANT: All variables must be integers. First empty # +# variable must be arg3, and the second must be arg2. # +# # +# Examples: # +# - If you want to run the same script 12 times changing # +# only the variable "month" from 1 to 12, set arg1_min= 1 # +# and arg1_max = 12, leaving the others two in blank: # +# arg1_min = 1 # +# arg1_max = 12 # +# arg2_min = # +# arg2_max = # +# arg3_min = # +# arg3_max = # +# # +# - If you also want to run the same script for a sequence # +# of years, i.e: for each month from 1980 to 2015, also # +# set arg2_min = 1950 and arg2_max = 2015 # +# # +# To link these variables to your R script, just add the # +# following lines at the beginning of your script: # +# # +# script.arg <- as.integer(commandArgs(TRUE)) # +# if(length(script.arg) > 0){ # +# <- script.arg[2] # +# <- script.arg[3] # +# } # +# # +# or add the lines below if you are using Python: # +# # +# import sys # +# script.arg = int(sys.argv) # +# if len(script.arg) > 0 : # +# = script.arg[1] # +# = script.arg[2] # +# = script.arg[3] # +# # +# where , , are the variables that are # +# used in the script and that you'd like to run in parallel # +# (i.e: 'month', 'year', 'leadtime', etc.). # +# # +# To optimize computation time, try to set 'nCores' to be # +# which is a divisor of the total number of jobs, so it # +# will not run a lonely job or a few jobs in the last loop. # +# For example: if you have to run 20 jobs, run them in # +# blocks of 4, 5 or 10 jobs simultaneously # +# # +# If, after the jobs has finished, you need to re-run some # +# of them, just re-run this job file with arg1_min, arg1_max# +# and the other argX variables corresponding to the values # +# of the jobs you need to re-run. In case the values are not# +# consecutive (i.e: 2,5,and 11), then set 'nCores'=1 and run# +# those jobs in a sequential way, executing this job file # +# one time for each different value you need to recompute. # +############################################################# + +arg1_min=2017 +arg1_max=2017 + +arg2_min=2 +arg2_max=2 + +arg3_min= +arg3_max= + +############################################################# +# ! DO NOT MODIFY THE LINES BELOW ! # +############################################################# + +############################################################# +# the following lines run a process (thread) in background # +# for each different set of values of the 3 above variables # +# in groups of jobs at the same time: # +############################################################# + +bash_pid=$$ # get the pid of the parent job (use instead $! to get the pid of last child process) +nChildren=0 # set initial number of children processes (background jobs executed by this script) + +# if the user left arg2_min or arg3_min empty, fill them with -999 instead +# to be able to run the next for loop with only 1 'fake' value (-999)for arg2 and/or arg3: +size2=${#arg2_min} # length of arg2_min +size3=${#arg3_min} # length of arg3_min +if [ $size2 -eq 0 ] ; then arg2_min=-999; arg2_max=-999; fi +if [ $size3 -eq 0 ] ; then arg3_min=-999; arg3_max=-999; fi + +if [ $serial == 'FALSE' ]; then + +# For loops over the three variables to cycle all their values: +for arg1 in $(seq $arg1_min $arg1_max ); do + for arg2 in $(seq $arg2_min $arg2_max); do + for arg3 in $(seq $arg3_min $arg3_max); do + # disable 'arg2' and/or 'arg3' if the user left them empty: + if [ $size2 -eq 0 ]; then arg2=""; fi + if [ $size3 -eq 0 ]; then arg3=""; fi + + # detect the language of your script and run a new children job in background + if [ ${script: -1} == 'R' ]; then + Rscript $script $arg1 $arg2 $arg3 & + elif [ ${script: -2} == 'py' ]; then + python $script $arg1 $arg2 $arg3 & + else + $script $arg1 $arg2 $arg3 & + fi + + nChildren=$(($nChildren+1)) # increase the number of children process + + # if there are already job running, wait until at least one job has finished before executing the next job: + while [ $nChildren -ge $nCores ] + do + children=`ps -eo ppid | grep -w $bash_pid` # get the pids of its children + parent + nChildren=`echo $children | wc -w` # total number of children + parent + let nChildren=nChildren-1 # total number of children + sleep 1 # pause until at least one job has finished + done + done + done +done + + +else # run the job in a sequential way: + + if [ ${script: -1} == 'R' ]; then + Rscript $script + elif [ ${script: -2} == 'py' ]; then + python $script + else + $script + fi +fi diff --git a/old/backup/slurm/parallel.sh b/old/backup/slurm/parallel.sh new file mode 100755 index 0000000000000000000000000000000000000000..9f82dc109ebf67d22c897614dc041db00cc05684 --- /dev/null +++ b/old/backup/slurm/parallel.sh @@ -0,0 +1,11 @@ +#!/bin/bash + +############################################################# +# Set the name of your script to run; if no path is # +# provided, it is assumed to be in the directory where # +# the 'sbatch' command is executed: # +############################################################# + +script="/home/Earth/ncortesi/scripts/test.R" + +sbatch /home/Earth/ncortesi/scripts/slurm/parallel.job $script diff --git a/old/backup/slurm/parallel.sh~ b/old/backup/slurm/parallel.sh~ new file mode 100644 index 0000000000000000000000000000000000000000..133aff160318f3b8ca70380cc3b9f226c50f20c6 --- /dev/null +++ b/old/backup/slurm/parallel.sh~ @@ -0,0 +1,260 @@ +#!/bin/bash + +############################################################# +# To increase productivity, you can an alias en .bashrc # +# such as the on below to edit and run this script from # +# everywhere: # +# # +# alias p='vi ~/scripts/slurm/parallel.job' # +# alias pp='sbatch ~/scripts/slurm/parallel.job' # +############################################################# + +#SBATCH -J parallel +#SBATCH -o parallel-%J.out +#SBATCH -e parallel-%J.err + +############################################################# +# The line below assigns your parallel jobs to the three # +# nodes of our cluster: # +# # +# *NODE* *CORES* *RAM (GB)* # +# moore 8 144 # +# amdahl 12 258 # +# gustafson 20 258 # +# # +# SLURM automatically distributes your jobs to one or more # +# nodes, depending on the available resources, but there are# +# some situations in which you might want to run your jobs # +# in the same node, for example when one node has its RAM # +# almost full and you don't want to risk your jobs to be # +# cancelled for a lack of RAM. Or, you want to save your # +# outputs not in the /esnas scratch but in the nodes's # +# /scratch, but you don't want to have them saved in the # +# /scratch of different nodes. # +# # +# You can check if a node is full with 'squeue' or with: # +# squeue -o "%.18i %.9P %.8j %.8u %.8T %.12M %.12l %.6D # +# %.15R %.5C %.8m" # +# which also shows how much memory and cores a job is using # +############################################################# + +#SBATCH -w amdahl + +############################################################# +# In the past, this option allocated the desired quantity # +# of RAM (in MB) for each processor (core). Now, it has been# +# disabled for all nodes of our cluster, but it is still # +# useful to estimate it just to let you know how many jobs # +# you might run theretically at the same time in the cluster# +# Such a value should be slightly greater than the peak # +# RAM utilized by one of your jobs during its sequential # +# execution. # +# On the contrary, the option -m of interactive still works,# +# so you can take advantage of it to run an interactive # +# session in amdahl or gustafson, by specifying a higher # +# RAM amount: # +# sinteractive -m 200000 # +############################################################# + +###SBATCH --mem-per-cpu 50000 + +############################################################# +# Set the maximum computation time of the parallel job. # +# It has to be higher than the estimated computation # +# time of the parallel job (syntax: HH:MM:SS): # +# Time max is 96 hours (4 days), or 12 h for interactive # +# sessions. If there isn't any queue issue on the chosen # +# node, you can leave this value set to 96 hours. # +############################################################# + +#SBATCH -t 5-23:59:59 + +############################################################# +# Set the name of your script to run; if no path is # +# provided, it is assumed to be in the directory where # +# the 'sbatch' command is executed: # +############################################################# + +script="/home/Earth/ncortesi/scripts/weather_regimes_impact_v3.R" + +############################################################# +# Allocate a number of processors (cores) to this job, i.e: # +# it sets the maximum number of jobs that can run at # +# the same time; the jobs exceeding this number will start # +# only when some of the previous jobs have finished. # +# The maximum number of cores that can be allocated depends # +# on the cluster: it is 8 cores for Moore, 12 for Amdahl # +# and 20 for Gustafson. However, this # +# number also cannot be higher than the total node's RAM # +# divided by the RAM allocated to one core (rounded down). # +# Total cluster RAM is 145 / 258 / 258 GB on Moore / Amdahl # +# / Gustafson, respectively. For example, if you allocated # +# 40 GB/core on Moore in the previous line, you can't # +# allocate more than 145 / 40 = 3 cores to your job. # +# # +# Note that the Load() function of s2dverification loads # +# data in parallel automatically and as such is not # +# affected by the SBATCH -n option unless the option # +# 'nprocs=1' is provided inside Load(). # +# ***** IT IS HIGHLY RECOMMENDED TO DO SO ***** # +# even if it increase the loading times of each job # +# If you don't specify it explicitly when you load the data # +# in your script, no job will be terminated but all the # +# node's cores will be totally used by your Load()'s jobs # +# until all data have been loaded. # +############################################################# + +#SBATCH -n 7 +nCores=7 # same as the number of cores above + +############################################################# +# These modules should already been loaded in your session. # +# They are provided here in case someone wasn't loaded: # +############################################################# + +module load R CDO NCO netCDF GCC HDF5 UDUNITS + +############################################################# +# First and last values of each of the three variables of # +# the parallel job. Values must be integers. Each value # +# between the first and last ones (including the extremes) # +# will be assigned to a different parallel job. If your job # +# has less than three variables, you can disable one or two # +# of them by leaving them empty in the lines below. # +# # +# IMPORTANT: All variables must be integers. First empty # +# variable must be var3, and the second must be var2. # +# # +# Examples: # +# - If you want to run the same script 12 times changing # +# only the variable "month" from 1 to 12, set var1min = 1 # +# and var1max = 12, leaving the others two in blank: # +# var1min=1 # +# var1max=12 # +# var2min= # +# var2max= # +# var3min= # +# var3max= # +# # +# - If you also want to run the same script for a sequence # +# of years, i.e: for each month from 1980 to 2015, also # +# set var2min = 1950 and var2max = 2015 # +# # +# To link these variables to your R script, just add the # +# following lines at the beginning of your script: # +# # +# script.arg <- as.integer(commandArgs(TRUE)) # +# if(length(script.arg) > 0){ # +# <- script.arg[2] # +# <- script.arg[3] # +# } # +# # +# or add the lines below if you are using Python: # +# # +# import sys # +# script.arg = int(sys.argv) # +# if len(script.arg) > 0 : # +# = script.arg[1] # +# = script.arg[2] # +# = script.arg[3] # +# # +# where , , are the variables that are # +# used in the script and that you'd like to run in parallel # +# (i.e: 'month', 'year', 'leadtime', etc.). # +# # +# To optimize computation time, try to run a number of jobs # +# which is a multiple of nCores. so it will not need to # +# compute a lonely job or a few jobs in the last loop # +############################################################# + +var1min=12 +var1max=12 + +var2min=0 +var2max=6 + +var3min= +var3max= + +############################################################# +# If your 'nCores' variable is limited by the total RAM, # +# and the peak memory use of each job is short compared to # +# the total computational time of a single job, you can # +# still increase 'nCores' by introducing a time delay # +# between jobs: each time you run a new job, the scheduler # +# will wait an amount of time determined by the user before # +# running the next job. In this way, the peak memory use of # +# the jobs don't overlap (i.e: don't happen at the same # +# time simulataneously), so more jobs can be executed in # +# parallel # +# Example: each of your jobs lasts ~ 50 min and needs an # +# average of 30 GB of RAM. However, for ~10 minutes, it # +# needs 60 GB of RAM. This memory peak usually should # +# halve 'nCores' value. However, setting 'delay' to 600 (s),# +# each job reaches its memory peak at a different time of # +# the other jobs, enabling you to double 'nCores'. # +############################################################# + +delay=0 + +############################################################# +# the following lines run a process (thread) in background # +# for each different set of values of the 3 above variables # +# in groups of jobs at the same time: # +############################################################# + +bash_pid=$$ # get the pid of the parent job (use instead $! to get the pid of last child process) +nChildren=0 # set initial number of children processes (background jobs executed by this script) + +# if the user left var2min or var3min empty, fill them with -999 instead +# to be able to run the next for loop with only 1 value for var2 and/or var3: +size2=${#var2min} # length of var2min +size3=${#var3min} # length of var3min +if [ $size2 -eq 0 ] ; then var2min=-999; var2max=-999; fi +if [ $size3 -eq 0 ] ; then var3min=-999; var3max=-999; fi + +# For loops over the three variables to cycle all their values: +for arg1 in $(seq $var1min $var1max ); do + for arg2 in $(seq $var2min $var2max); do + for arg3 in $(seq $var3min $var3max); do + # disable 'arg2' and/or 'arg3' if the user left them empty: + if [ "$var2min" -eq -999 ] && [ "$var2max" -eq -999 ]; then arg2=""; fi + if [ "$var3min" -eq -999 ] && [ "$var3max" -eq -999 ]; then arg3=""; fi + + # detect if your script is in R or in python: + if [ ${script: -1} == 'R' ]; then + Rscript $script $arg1 $arg2 $arg3 & # run a new children job in background + fi + + if [ ${script: -2} == 'py' ]; then + python $script $arg1 $arg2 $arg3 & # run a new children job in background + fi + + nChildren=$(($nChildren+1)) # increase the number of children process + sleep $delay # wait a moment if the user wants to run the following job after some time + + # if there are already job running, wait until at least one job has finished before executing the next job: + while [ $nChildren -ge $nCores ] + do + children=`ps -eo ppid | grep -w $bash_pid` # get the pids of its children + parent + nChildren=`echo $children | wc -w` # total number of children + parent + let nChildren=nChildren-1 # total number of children + sleep 1 # pause until at least one job has finished + done + done + done +done + +############################################################# +# remove any eventual data loaded in the shared memory # +# /dev/shm/ of the node before ending the job. # +# # +# You can also create an alias as the one below do it # +# manually, as it should also be used each time a Load() # +# command terminates before finishing: # +# # +# clean='find /dev/shm/ -user ncortesi -exec rm {} \;' # +############################################################# + +mpirun rm `ls /dev/shm -la | grep $(whoami) | awk ' { print "/dev/shm/" $9 } '` diff --git a/old/backup/slurm/parallel_chunk.job b/old/backup/slurm/parallel_chunk.job new file mode 100644 index 0000000000000000000000000000000000000000..c85be7746f4ca83bb28b1008d64cd2458289f81c --- /dev/null +++ b/old/backup/slurm/parallel_chunk.job @@ -0,0 +1,33 @@ +#!/bin/bash +#SBATCH -n 1 +#SBATCH -J diagnostic +#SBATCH -o diagnostic.out +#SBATCH -e diagnostic.err + +# set the maximum execution time of the diagnostic: +#SBATCH -t 12:00:00 + +# Set the name of your script to run: +diagnostic="weather_regimes_v35.R" #"diagnostic_cluster.R" + +# Set the total number of chunks to employ: +firstChunk=7 +lastChunk=12 + +# Set the maximum number of jobs: +nCores=4 + +# run a process (thread) for each chunk in background: +for ARG in $(seq $firstChunk $lastChunk); do + Rscript $diagnostic $ARG & + nThreads=$(($nThreads+1)) + if [ "$nThreads" -ge $nCores ]; then + wait # wait until the first $nCores chunks have finished before executing the next $nCores + nThreads=0 + fi +done + +# wait until all chunks have been computed, then in case we are splitting an array in chunk, +# collect all the results of each chunk and merge them in the file 'diagnostic_output.RData': +#wait +#Rscript -e 'firstChunk <- as.integer(commandArgs(TRUE)[1])' -e 'lastChunk <- as.integer(commandArgs(TRUE)[2])' -e 'for(cnk in firstChunk:lastChunk){' -e 'load(paste0(getwd(),"/output_",cnk,".RData"))' -e 'if(cnk==1) output <- array(NA, c(lastChunk-firstChunk+1, length(var)))' -e 'output[cnk,] <- var' -e 'file.remove(paste0(getwd(),"/output_",cnk,".RData"))}' -e 'save(var, file="diagnostic_output.RData")' $firstChunk $lastChunk diff --git a/old/backup/slurm/parallel_old.job b/old/backup/slurm/parallel_old.job new file mode 100644 index 0000000000000000000000000000000000000000..8cd94a94cbf1595c66cbea9eae01915eb6474014 --- /dev/null +++ b/old/backup/slurm/parallel_old.job @@ -0,0 +1,246 @@ +#!/bin/bash + +############################################################# +# To increase productivity, you can an alias en .bashrc # +# such as the on below to run this script from # +# everywhere: # +# # +# alias p='~/scripts/slurm/parallel.sh' # +############################################################# + +#SBATCH -J parallel +#SBATCH -o parallel-%J.out +#SBATCH -e parallel-%J.err + +############################################################# +# The line below assigns your parallel jobs to the three # +# nodes of our cluster: # +# # +# *NODE* *CORES* *RAM (GB)* # +# moore 8 144 # +# amdahl 12 258 # +# gustafson 20 258 # +# # +# SLURM automatically distributes your jobs to one or more # +# nodes, depending on the available resources, but there are# +# some situations in which you might want to run your jobs # +# in the same node, for example when one node has its RAM # +# almost full and you don't want to risk your jobs to be # +# cancelled for a lack of RAM. Or, you want to save your # +# outputs not in the /esnas scratch but in the nodes's # +# /scratch, but you don't want to have them saved in the # +# /scratch of different nodes. # +# # +# You can check if a node is full with 'squeue' or with: # +# squeue -o "%.18i %.9P %.8j %.8u %.8T %.12M %.12l %.6D # +# %.15R %.5C %.8m" # +# which also shows how much memory and cores a job is using # +############################################################# + +#SBATCH -w gustafson + +############################################################# +# In the past, this option allocated the desired quantity # +# of RAM (in MB) for each processor (core). Now, it has been# +# disabled for all nodes of our cluster, but it is still # +# useful to estimate it just to let you know how many jobs # +# you might run theretically at the same time in the cluster# +# Such a value should be slightly greater than the peak # +# RAM utilized by one of your jobs during its sequential # +# execution. # +# On the contrary, the option -m of interactive still works,# +# so you can take advantage of it to run an interactive # +# session in amdahl or gustafson, by specifying a higher # +# RAM amount: # +# sinteractive -m 200000 # +############################################################# + +###SBATCH --mem-per-cpu 50000 + +############################################################# +# Set the maximum computation time of the parallel job. # +# It has to be higher than the estimated computation # +# time of the parallel job (syntax: HH:MM:SS): # +# Time max is 96 hours (4 days), or 12 h for interactive # +# sessions. If there isn't any queue issue on the chosen # +# node, you can leave this value set to 96 hours. # +############################################################# + +#SBATCH -t 5-23:59:59 + +############################################################# +# Allocate a number of processors (cores) to this job, i.e: # +# it sets the maximum number of jobs that can run at # +# the same time; the jobs exceeding this number will start # +# only when some of the previous jobs have finished. # +# The maximum number of cores that can be allocated depends # +# on the cluster: it is 8 cores for Moore, 12 for Amdahl # +# and 20 for Gustafson. However, this # +# number also cannot be higher than the total node's RAM # +# divided by the RAM allocated to one core (rounded down). # +# Total cluster RAM is 145 / 258 / 258 GB on Moore / Amdahl # +# / Gustafson, respectively. For example, if you allocated # +# 40 GB/core on Moore in the previous line, you can't # +# allocate more than 145 / 40 = 3 cores to your job. # +# # +# Note that if you use the Load() function of # +# s2dverification to load you data, you HAVE TO ADD into # +# this function the option 'nprocs=1', if not the data will # +# be loaded using all the cores of the node. # +# Loading times of each job increases, but # +############################################################# + +#SBATCH -n 7 +nCores=7 # same as the number of cores above + +############################################################# +# These modules should already been loaded in your session. # +# They are provided here in case someone wasn't loaded: # +############################################################# + +module load R CDO NCO netCDF GCC HDF5 UDUNITS + +############################################################# +# First and last values of each of the three variables of # +# the parallel job. Values must be integers. Each value # +# between the first and last ones (including the extremes) # +# will be assigned to a different parallel job. If your job # +# has less than three variables, you can disable one or two # +# of them by leaving them empty in the lines below. # +# # +# IMPORTANT: All variables must be integers. First empty # +# variable must be var3, and the second must be var2. # +# # +# Examples: # +# - If you want to run the same script 12 times changing # +# only the variable "month" from 1 to 12, set var1min = 1 # +# and var1max = 12, leaving the others two in blank: # +# var1min=1 # +# var1max=12 # +# var2min= # +# var2max= # +# var3min= # +# var3max= # +# # +# - If you also want to run the same script for a sequence # +# of years, i.e: for each month from 1980 to 2015, also # +# set var2min = 1950 and var2max = 2015 # +# # +# To link these variables to your R script, just add the # +# following lines at the beginning of your script: # +# # +# script.arg <- as.integer(commandArgs(TRUE)) # +# if(length(script.arg) > 0){ # +# <- script.arg[2] # +# <- script.arg[3] # +# } # +# # +# or add the lines below if you are using Python: # +# # +# import sys # +# script.arg = int(sys.argv) # +# if len(script.arg) > 0 : # +# = script.arg[1] # +# = script.arg[2] # +# = script.arg[3] # +# # +# where , , are the variables that are # +# used in the script and that you'd like to run in parallel # +# (i.e: 'month', 'year', 'leadtime', etc.). # +# # +# To optimize computation time, try to run a number of jobs # +# which is a multiple of nCores. so it will not need to # +# compute a lonely job or a few jobs in the last loop # +############################################################# + +var1min=1 +var1max=2 + +var2min=0 +var2max=6 + +var3min= +var3max= + +############################################################# +# If your 'nCores' variable is limited by the total RAM, # +# and the peak memory use of each job is short compared to # +# the total computational time of a single job, you can # +# still increase 'nCores' by introducing a time delay # +# between jobs: each time you run a new job, the scheduler # +# will wait an amount of time determined by the user before # +# running the next job. In this way, the peak memory use of # +# the jobs don't overlap (i.e: don't happen at the same # +# time simulataneously), so more jobs can be executed in # +# parallel # +# Example: each of your jobs lasts ~ 50 min and needs an # +# average of 30 GB of RAM. However, for ~10 minutes, it # +# needs 60 GB of RAM. This memory peak usually should # +# halve 'nCores' value. However, setting 'delay' to 600 (s),# +# each job reaches its memory peak at a different time of # +# the other jobs, enabling you to double 'nCores'. # +############################################################# + +delay=0 + +############################################################# +# the following lines run a process (thread) in background # +# for each different set of values of the 3 above variables # +# in groups of jobs at the same time: # +############################################################# + +bash_pid=$$ # get the pid of the parent job (use instead $! to get the pid of last child process) +nChildren=0 # set initial number of children processes (background jobs executed by this script) + +# if the user left var2min or var3min empty, fill them with -999 instead +# to be able to run the next for loop with only 1 value for var2 and/or var3: +size2=${#var2min} # length of var2min +size3=${#var3min} # length of var3min +if [ $size2 -eq 0 ] ; then var2min=-999; var2max=-999; fi +if [ $size3 -eq 0 ] ; then var3min=-999; var3max=-999; fi + +# For loops over the three variables to cycle all their values: +for arg1 in $(seq $var1min $var1max ); do + for arg2 in $(seq $var2min $var2max); do + for arg3 in $(seq $var3min $var3max); do + # disable 'arg2' and/or 'arg3' if the user left them empty: + if [ "$var2min" -eq -999 ] && [ "$var2max" -eq -999 ]; then arg2=""; fi + if [ "$var3min" -eq -999 ] && [ "$var3max" -eq -999 ]; then arg3=""; fi + + # detect if your script is in R or in python: + if [ ${script: -1} == 'R' ]; then + Rscript $1 $arg1 $arg2 $arg3 & # run a new children job in background + fi + + if [ ${script: -2} == 'py' ]; then + python $1 $arg1 $arg2 $arg3 & # run a new children job in background + fi + + nChildren=$(($nChildren+1)) # increase the number of children process + sleep $delay # wait a moment if the user wants to run the following job after some time + + # if there are already job running, wait until at least one job has finished before executing the next job: + while [ $nChildren -ge $nCores ] + do + children=`ps -eo ppid | grep -w $bash_pid` # get the pids of its children + parent + nChildren=`echo $children | wc -w` # total number of children + parent + let nChildren=nChildren-1 # total number of children + sleep 1 # pause until at least one job has finished + done + done + done +done + +############################################################# +# remove any eventual data loaded in the shared memory # +# /dev/shm/ of the node before ending the job. # +# # +# You can also create an alias as the one below do it # +# manually, as it should also be used each time a Load() # +# command terminates before finishing: # +# # +# clean='find /dev/shm/ -user ncortesi -exec rm {} \;' # +############################################################# + +mpirun rm `ls /dev/shm -la | grep $(whoami) | awk ' { print "/dev/shm/" $9 } '` diff --git a/old/backup/slurm/parallel_old2.job b/old/backup/slurm/parallel_old2.job new file mode 100644 index 0000000000000000000000000000000000000000..a9b3a141e180a5e67f679efbdafa57b3c4b5f094 --- /dev/null +++ b/old/backup/slurm/parallel_old2.job @@ -0,0 +1,263 @@ +#!/bin/bash + +############################################################# +# You can keep the name of your parallel job as it is below # +# ('parallel') or change it. In any case, only one standard # +# error and standard output files will be created, even if # +# you run hundreds of jobs, not to fill your directory with # +# unnecessary files. All the information you need on the # +# jobs is inside these two files, whose file names have the # +# job number appended as suffix. # +# # +# To increase productivity, create an alias en .bashrc # +# such as the one below to run this script from everywhere: # +# # +# alias p='sbatch ~/scripts/slurm/parallel.job' # +############################################################# + +#SBATCH -J parallel +#SBATCH -o parallel-%J.out +#SBATCH -e parallel-%J.err + +############################################################# +# The line below assigns your parallel jobs to the three # +# nodes of our cluster: # +# # +# *NODE* *CORES* *RAM (GB)* # +# moore 8 144 # +# amdahl 12 258 # +# gustafson 20 258 # +# # +# SLURM automatically distributes your jobs to one or more # +# nodes, depending on the available resources, if you want # +# to tun many sequential jobs at the same time it is better # +# to run them on the nodes with more resources available # +# (cores and memory); for example, if a node has its RAM # +# almost full you don't want to risk your jobs to be # +# cancelled by lack of RAM. Or, you want to save your # +# outputs not in the /esnas scratch but in the nodes's # +# /scratch, and you don't want to have them saved in the # +# /scratch of different nodes. # +# # +# In the past, option --mem-per-cpu allocated the desired # +# quantity of RAM for each processor (core). It was disabled# +# to allow more users to work at the same time, decreasing # +# queue times considerably. # +# # +# On the contrary, the option -m of interactive still works,# +# so you can take advantage of it to run an interactive # +# session in amdahl or gustafson, by specifying a higher # +# RAM amount than moore: # +# ~> sinteractive -m 200000 # +# # +# You can check if a node is full with 'squeue' or with: # +# ~> squeue -o "%.18i %.9P %.8j %.8u %.8T %.12M %.12l %.6D # +# %.15R %.5C %.8m" # +# which also shows how much memory and cores a job is using # +############################################################# + +#SBATCH -w gustafson + +############################################################# +# Set the maximum computation time of the parallel job. # +# It has to be higher than the estimated computation # +# time of all parallel job (syntax: DD-HH:MM:SS): # +# You can leave this value as set below (1 week). If you # +# job take more than one week to finish, even after # +# splitting it in many jobs that runs simultaneously, than # +# it is better not to run it on our cluster but on the SMP # +# machine or on MareNostrum instead, because it takes too # +# much computational resources. # +############################################################# + +#SBATCH -t 7-00:00:00 + +############################################################# +# Set the name of your script to run; if no path is # +# provided, it is assumed to be in the directory where # +# the 'sbatch' command is executed: # +############################################################# + +script="/home/Earth/ncortesi/scripts/weather_regimes_impact_v3.R" + +############################################################# +# Allocate a number of processors (cores) to this job, i.e: # +# it sets the number of sequential jobs that run at the # +# same time on the chosen node. Each time a job finishes, a # +# new job is run, so there will always be this number of # +# jobs running (except at the end, when there are less jobs # +# left than this number). # +# # +# number also cannot be higher than the node's free RAM # +# divided by the RAM allocated to one core (rounded down). # +# / Gustafson, respectively. For example, if you allocated # +# 40 GB/core on Moore in the previous line, you can't # +# allocate more than 145 / 40 = 3 cores to your job. # +# # +# The Load() function of s2dverification loads data in # +# parallel automatically. You have to disable this feature # +# adding option nprocs=1, or you'll use all the cores of the# +# node even if you reserved less cores. The downside is that# +# loading times of each job will increase # +############################################################# + +#SBATCH -n 1 + +# variable 'nCores' must always be the same as the maximum +# number of cores introduced in the above line: + +nCores=1 + +############################################################# +# Be sure to include all the libraries your script needs: # +############################################################# + +module load R CDO NCO netCDF GCC HDF5 UDUNITS + +############################################################# +# First and last values of each of the three variables of # +# the parallel job. Values must be integers. Each value # +# between the first and last ones (including the extremes) # +# will be assigned to a different parallel job. If your job # +# has less than three variables, you can disable one or two # +# of them by leaving them empty in the lines below. # +# # +# IMPORTANT: All variables must be integers. First empty # +# variable must be arg3, and the second must be arg2. # +# # +# Examples: # +# - If you want to run the same script 12 times changing # +# only the variable "month" from 1 to 12, set arg1_min= 1 # +# and arg1_max = 12, leaving the others two in blank: # +# arg1_min = 1 # +# arg1_max = 12 # +# arg2_min = # +# arg2_max = # +# arg3_min = # +# arg3_max = # +# # +# - If you also want to run the same script for a sequence # +# of years, i.e: for each month from 1980 to 2015, also # +# set arg2_min = 1950 and arg2_max = 2015 # +# # +# To link these variables to your R script, just add the # +# following lines at the beginning of your script: # +# # +# script.arg <- as.integer(commandArgs(TRUE)) # +# if(length(script.arg) > 0){ # +# <- script.arg[2] # +# <- script.arg[3] # +# } # +# # +# or add the lines below if you are using Python: # +# # +# import sys # +# script.arg = int(sys.argv) # +# if len(script.arg) > 0 : # +# = script.arg[1] # +# = script.arg[2] # +# = script.arg[3] # +# # +# where , , are the variables that are # +# used in the script and that you'd like to run in parallel # +# (i.e: 'month', 'year', 'leadtime', etc.). # +# # +# To optimize computation time, try to run a number of jobs # +# which is a multiple of nCores. so it will not need to # +# compute a lonely job or a few jobs in the last loop # +############################################################# + +arg1_min=8 +arg1_max=8 + +arg2_min=6 +arg2_max=6 + +arg3_min= +arg3_max= + +############################################################# +# In the majority of cases, you can simply ignore variable # +# 'delay' introduced below and leave it to 0. Only in case # +# your 'nCores' variable is limited by the total memory, # +# and the peak memory use of each job is short compared to # +# the total computational time of a single job, you can # +# still increase 'nCores' by introducing a time delay # +# between jobs: each time you run a new job, the scheduler # +# will wait an amount of time determined by the user before # +# running the next job. In this way, the peak memory use of # +# the jobs don't overlap (i.e: don't happen at the same # +# time simulataneously), so more jobs can be executed in # +# parallel # +# Example: each of your jobs lasts ~ 50 min and needs an # +# average of 30 GB of RAM. However, for ~5 minutes, each job# +# needs 60 GB of RAM. This memory peak has the effect of # +# halving the 'nCores' value. However, setting 'delay' # +# to 600 (s), each job reaches its memory peak at a # +# different time of the other jobs, enabling you to # +# double 'nCores' value. # +############################################################# + +delay=0 + +############################################################# +# the following lines run a process (thread) in background # +# for each different set of values of the 3 above variables # +# in groups of jobs at the same time: # +############################################################# + +bash_pid=$$ # get the pid of the parent job (use instead $! to get the pid of last child process) +nChildren=0 # set initial number of children processes (background jobs executed by this script) + +# if the user left arg2_min or arg3_min empty, fill them with -999 instead +# to be able to run the next for loop with only 1 'fake' value (-999)for arg2 and/or arg3: +size2=${#arg2_min} # length of var2min +size3=${#arg3_min} # length of var3min +if [ $size2 -eq 0 ] ; then arg2_min=-999; arg2_max=-999; fi +if [ $size3 -eq 0 ] ; then arg3_min=-999; arg3_max=-999; fi + +# For loops over the three variables to cycle all their values: +for arg1 in $(seq $arg1_min $arg1_max ); do + for arg2 in $(seq $arg2_min $arg2_max); do + for arg3 in $(seq $arg3_min $arg3_max); do + # disable 'arg2' and/or 'arg3' if the user left them empty: + if [ "$arg2_min" -eq -999 ] && [ "$arg2_max" -eq -999 ]; then arg2=""; fi + if [ "$arg3_min" -eq -999 ] && [ "$arg3_max" -eq -999 ]; then arg3=""; fi + + # detect if your script is in R or in python: + if [ ${script: -1} == 'R' ]; then + Rscript $script $arg1 $arg2 $arg3 & # run a new children job in background + fi + + if [ ${script: -2} == 'py' ]; then + python $script $arg1 $arg2 $arg3 & # run a new children job in background + fi + + nChildren=$(($nChildren+1)) # increase the number of children process + sleep $delay # wait a moment if the user wants to run the following job after some time + + # if there are already job running, wait until at least one job has finished before executing the next job: + while [ $nChildren -ge $nCores ] + do + children=`ps -eo ppid | grep -w $bash_pid` # get the pids of its children + parent + nChildren=`echo $children | wc -w` # total number of children + parent + let nChildren=nChildren-1 # total number of children + sleep 1 # pause until at least one job has finished + done + done + done +done + +############################################################# +# remove any eventual data loaded in the shared memory # +# /dev/shm/ of the node before ending the job. # +# # +# You can also create an alias as the one below do it # +# manually, as it should also be used each time a Load() # +# command terminates before finishing: # +# # +# clean='find /dev/shm/ -user ncortesi -exec rm {} \;' # +############################################################# + +mpirun rm `ls /dev/shm -la | grep $(whoami) | awk ' { print "/dev/shm/" $9 } '` diff --git a/old/backup/slurm/parallel_old3.job b/old/backup/slurm/parallel_old3.job new file mode 100644 index 0000000000000000000000000000000000000000..06e2e0be3764b22b27cd0e38a3199ab2f3ce9426 --- /dev/null +++ b/old/backup/slurm/parallel_old3.job @@ -0,0 +1,273 @@ +#!/bin/bash + +############################################################# +# You can keep the name of your parallel job as it is below # +# ('parallel') or change it. In any case, only one standard # +# error and standard output files will be created, even if # +# you run hundreds of jobs, not to fill your directory with # +# unnecessary files. All the information you need on the # +# jobs is inside these two files, whose file names have the # +# job number appended as suffix. # +# # +# To increase productivity, create an alias en .bashrc # +# such as the one below to run this script from everywhere: # +# # +# alias p='sbatch ~/scripts/slurm/parallel.job' # +# # +# Remember to remove regularly any eventual data loaded in # +# the shared memory (/dev/shm/), because ths shared memory # +# is limited to a few GB (depending on the node), and when # +# it is full, no users can run jobs. You can create # +# an alias as the 'clean' alias below and execute it from # +# time to time. Run it also after a Load() function of # +# sd2verification terminates before finishing. # +# # +# clean='find /dev/shm/ -user ncortesi -exec rm {} \;' # +############################################################# + +#SBATCH -J parallel +#SBATCH -o parallel-%J.out +#SBATCH -e parallel-%J.err + +############################################################# +# The line below assigns your parallel jobs to the three # +# nodes of our cluster: # +# # +# *NODE* *CORES* *RAM (GB)* # +# moore 8 144 # +# amdahl 12 258 # +# gustafson 20 258 # +# # +# SLURM automatically distributes your jobs to one or more # +# nodes, depending on the available resources, if you want # +# to tun many sequential jobs at the same time it is better # +# to run them on the nodes with more resources available # +# (cores and memory); for example, if a node has its RAM # +# almost full you don't want to risk your jobs to be # +# cancelled by lack of RAM. Or, you want to save your # +# outputs not in the /esnas scratch but in the nodes's # +# /scratch, and you don't want to have them saved in the # +# /scratch of different nodes. # +# # +# In the past, option --mem-per-cpu allocated the desired # +# quantity of RAM for each processor (core). It was disabled# +# to allow more users to work at the same time, decreasing # +# queue times considerably. # +# # +# On the contrary, the option -m of interactive still works,# +# so you can take advantage of it to run an interactive # +# session in amdahl or gustafson, by specifying a higher # +# RAM amount than moore: # +# ~> sinteractive -m 200000 # +# # +# You can check if a node is full with 'squeue' or with: # +# ~> squeue -o "%.18i %.9P %.8j %.8u %.8T %.12M %.12l %.6D # +# %.15R %.5C %.8m" # +# which also shows how much memory and cores a job is using # +############################################################# + +#SBATCH -w gustafson + +############################################################# +# Set the maximum computation time of the parallel job. # +# It has to be higher than the estimated computation # +# time of all parallel job (syntax: DD-HH:MM:SS): # +# You can leave this value as set below (3 days). If your # +# job take more than 3 days to finish, even after # +# splitting it in many jobs that runs simultaneously, than # +# it is better not to run it on our cluster but on the SMP # +# machine or on MareNostrum instead, because it takes too # +# much computational resources that other users cannot # +# employ while your jobs are running. # +############################################################# + +#SBATCH -t 3-00:00:00 + +############################################################# +# Allocate a number of processors (cores) to this job, i.e: # +# it sets the number of sequential jobs that run at the # +# same time on the chosen node. Each time a job finishes, a # +# new job is run, so there will always be this number of # +# jobs running (except at the end, when there are less jobs # +# left than this number). # +# # +# number also cannot be higher than the node's free RAM # +# divided by the RAM allocated to one core (rounded down). # +# / Gustafson, respectively. For example, if you allocated # +# 40 GB/core on Moore in the previous line, you can't # +# allocate more than 145 / 40 = 3 cores to your job. # +# # +# The Load() function of s2dverification loads data in # +# parallel automatically. You have to disable this feature # +# adding option nprocs=1, or you'll use all the cores of the# +# node even if you reserved less cores. The downside is that# +# loading times of each job will increase. # +# # +############################################################# + +#SBATCH -n 4 + +# variable 'nCores' must always be the same as the maximum +# number of cores introduced in the above line: + +nCores=4 + +############################################################# +# Set the name of your script to run; if no path is # +# provided, it is assumed to be in the directory where # +# the 'sbatch' command is executed: # +############################################################# + +script="/home/Earth/fullano/my.script.R" + +############################################################# +# Be sure to include all the libraries your script needs: # +############################################################# + +module load R CDO NCO netCDF GCC HDF5 UDUNITS + +############################################################# +# First and last values of each of the three variables of # +# the parallel job. Values must be integers. Each value # +# between the first and last ones (including the extremes) # +# will be assigned to a different parallel job. If your job # +# has less than three variables, you can disable one or two # +# of them by leaving them empty in the lines below. # +# # +# IMPORTANT: All variables must be integers. First empty # +# variable must be arg3, and the second must be arg2. # +# # +# Examples: # +# - If you want to run the same script 12 times changing # +# only the variable "month" from 1 to 12, set arg1_min= 1 # +# and arg1_max = 12, leaving the others two in blank: # +# arg1_min = 1 # +# arg1_max = 12 # +# arg2_min = # +# arg2_max = # +# arg3_min = # +# arg3_max = # +# # +# - If you also want to run the same script for a sequence # +# of years, i.e: for each month from 1980 to 2015, also # +# set arg2_min = 1950 and arg2_max = 2015 # +# # +# To link these variables to your R script, just add the # +# following lines at the beginning of your script: # +# # +# script.arg <- as.integer(commandArgs(TRUE)) # +# if(length(script.arg) > 0){ # +# <- script.arg[2] # +# <- script.arg[3] # +# } # +# # +# or add the lines below if you are using Python: # +# # +# import sys # +# script.arg = int(sys.argv) # +# if len(script.arg) > 0 : # +# = script.arg[1] # +# = script.arg[2] # +# = script.arg[3] # +# # +# where , , are the variables that are # +# used in the script and that you'd like to run in parallel # +# (i.e: 'month', 'year', 'leadtime', etc.). # +# # +# To optimize computation time, try to set 'nCores' to be # +# which is a divisor of the total number of jobs, so it # +# will not run a lonely job or a few jobs in the last loop. # +# For example: if you have to run 20 jobs, run them in # +# blocks of 4, 5 or 10 jobs simultaneously # +# # +# If, after the jobs has finished, you need to re-run some # +# of them, just re-run this job file with arg1_min, arg1_max# +# and the other argX variables corresponding to the values # +# of the jobs you need to re-run. In case the values are not# +# consecutive (i.e: 2,5,and 11), then set 'nCores'=1 and run# +# those jobs in a sequential way, executing this job file # +# one time for each different value you need to recompute. # +############################################################# + +arg1_min=1 +arg1_max=12 + +arg2_min=1 +arg2_max=5 + +arg3_min= +arg3_max= + +############################################################# +# In the majority of cases, you can simply ignore variable # +# 'delay' introduced below and leave it to 0. Only in case # +# your 'nCores' variable is limited by the total memory, # +# and the peak memory use of each job is short compared to # +# the total computational time of a single job, you can # +# still increase 'nCores' by introducing a time delay # +# between jobs: each time you run a new job, the scheduler # +# will wait an amount of time determined by the user before # +# running the next job. In this way, the peak memory use of # +# the jobs don't overlap (i.e: don't happen at the same # +# time simulataneously), so more jobs can be executed in # +# parallel. # +# Example: each of your jobs lasts ~ 50 min and needs an # +# average of 30 GB of RAM. However, for ~5 minutes, each job# +# needs 60 GB of RAM. This memory peak has the effect of # +# halving the 'nCores' value. However, setting 'delay' # +# to 600 (s), each job reaches its memory peak at a # +# different time of the other jobs, enabling you to # +# double 'nCores' value. # +############################################################# + +delay=0 + +############################################################# +# the following lines run a process (thread) in background # +# for each different set of values of the 3 above variables # +# in groups of jobs at the same time: # +############################################################# + +bash_pid=$$ # get the pid of the parent job (use instead $! to get the pid of last child process) +nChildren=0 # set initial number of children processes (background jobs executed by this script) + +# if the user left arg2_min or arg3_min empty, fill them with -999 instead +# to be able to run the next for loop with only 1 'fake' value (-999)for arg2 and/or arg3: +size2=${#arg2_min} # length of arg2_min +size3=${#arg3_min} # length of arg3_min +if [ $size2 -eq 0 ] ; then arg2_min=-999; arg2_max=-999; fi +if [ $size3 -eq 0 ] ; then arg3_min=-999; arg3_max=-999; fi + +# For loops over the three variables to cycle all their values: +for arg1 in $(seq $arg1_min $arg1_max ); do + for arg2 in $(seq $arg2_min $arg2_max); do + for arg3 in $(seq $arg3_min $arg3_max); do + # disable 'arg2' and/or 'arg3' if the user left them empty: + if [ $size2 -eq 0 ]; then arg2=""; fi + if [ $size3 -eq 0 ]; then arg3=""; fi + + # detect the language of your script and run a new children job in background + if [ ${script: -1} == 'R' ]; then + Rscript $script $arg1 $arg2 $arg3 & + elif [ ${script: -2} == 'py' ]; then + python $script $arg1 $arg2 $arg3 & + else + $script $arg1 $arg2 $arg3 & + fi + + nChildren=$(($nChildren+1)) # increase the number of children process + sleep $delay # wait a moment if the user wants to run the following job after some time + + # if there are already job running, wait until at least one job has finished before executing the next job: + while [ $nChildren -ge $nCores ] + do + children=`ps -eo ppid | grep -w $bash_pid` # get the pids of its children + parent + nChildren=`echo $children | wc -w` # total number of children + parent + let nChildren=nChildren-1 # total number of children + sleep 1 # pause until at least one job has finished + done + done + done +done + diff --git a/old/backup/slurm/parallel_test.job b/old/backup/slurm/parallel_test.job new file mode 100644 index 0000000000000000000000000000000000000000..87b97cef6f73ff9648b77c547dbcb892c50022ea --- /dev/null +++ b/old/backup/slurm/parallel_test.job @@ -0,0 +1,24 @@ +#!/bin/bash + + +#SBATCH -J parallel +#SBATCH -o parallel-%J.out +#SBATCH -e parallel-%J.err +#SBATCH -w amdahl +#SBATCH -t 3-00:00:00 +#SBATCH -n 4 + +script="/home/Earth/ncortesi/scripts/test.R" + + +# variable 'nCores' must always be the same as the maximum +# number of cores introduced in the above line: + +module load R CDO NCO netCDF GCC HDF5 UDUNITS + +# For loops over the three variables to cycle all their values: + +for i in `seq 4`; do + Rscript $script i & +done + diff --git a/old/backup/slurm/parallel_test.job~ b/old/backup/slurm/parallel_test.job~ new file mode 100644 index 0000000000000000000000000000000000000000..95a487aa976632241fd9dd32d0bf3d89268c2b90 --- /dev/null +++ b/old/backup/slurm/parallel_test.job~ @@ -0,0 +1,274 @@ +#!/bin/bash + +############################################################# +# You can keep the name of your parallel job as it is below # +# ('parallel') or change it. In any case, only one standard # +# error and standard output files will be created, even if # +# you run hundreds of jobs, not to fill your directory with # +# unnecessary files. All the information you need on the # +# jobs is inside these two files, whose file names have the # +# job number appended as suffix. # +# # +# To increase productivity, create an alias en .bashrc # +# such as the one below to run this script from everywhere: # +# # +# alias p='sbatch ~/scripts/slurm/parallel.job' # +# # +# Remember to remove regularly any eventual data loaded in # +# the shared memory (/dev/shm/), because ths shared memory # +# is limited to a few GB (depending on the node), and when # +# it is full, no users can run jobs. You can create # +# an alias as the 'clean' alias below and execute it from # +# time to time. Run it also after a Load() function of # +# sd2verification terminates before finishing. # +# # +# clean='find /dev/shm/ -user ncortesi -exec rm {} \;' # +############################################################# + +#SBATCH -J parallel +#SBATCH -o parallel-%J.out +#SBATCH -e parallel-%J.err + +############################################################# +# The line below assigns your parallel jobs to the three # +# nodes of our cluster: # +# # +# *NODE* *CORES* *RAM (GB)* # +# moore 8 144 # +# amdahl 12 258 # +# gustafson 20 258 # +# # +# SLURM automatically distributes your jobs to one or more # +# nodes, depending on the available resources, if you want # +# to tun many sequential jobs at the same time it is better # +# to run them on the nodes with more resources available # +# (cores and memory); for example, if a node has its RAM # +# almost full you don't want to risk your jobs to be # +# cancelled by lack of RAM. Or, you want to save your # +# outputs not in the /esnas scratch but in the nodes's # +# /scratch, and you don't want to have them saved in the # +# /scratch of different nodes. # +# # +# In the past, option --mem-per-cpu allocated the desired # +# quantity of RAM for each processor (core). It was disabled# +# to allow more users to work at the same time, decreasing # +# queue times considerably. # +# # +# On the contrary, the option -m of interactive still works,# +# so you can take advantage of it to run an interactive # +# session in amdahl or gustafson, by specifying a higher # +# RAM amount than moore: # +# ~> sinteractive -m 200000 # +# # +# You can check if a node is full with 'squeue' or with: # +# ~> squeue -o "%.18i %.9P %.8j %.8u %.8T %.12M %.12l %.6D # +# %.15R %.5C %.8m" # +# which also shows how much memory and cores a job is using # +############################################################# + +#SBATCH -w amdahl + +############################################################# +# Set the maximum computation time of the parallel job. # +# It has to be higher than the estimated computation # +# time of all parallel job (syntax: DD-HH:MM:SS): # +# You can leave this value as set below (3 days). If your # +# job take more than 3 days to finish, even after # +# splitting it in many jobs that runs simultaneously, than # +# it is better not to run it on our cluster but on the SMP # +# machine or on MareNostrum instead, because it takes too # +# much computational resources that other users cannot # +# employ while your jobs are running. # +############################################################# + +#SBATCH -t 3-00:00:00 + +############################################################# +# Set the name of your script to run; if no path is # +# provided, it is assumed to be in the directory where # +# the 'sbatch' command is executed: # +############################################################# + +script="/home/Earth/ncortesi/scripts/SkillScores_v10.R" +#script="/home/Earth/ncortesi/scripts/test.R" + +############################################################# +# Allocate a number of processors (cores) to this job, i.e: # +# it sets the number of sequential jobs that run at the # +# same time on the chosen node. Each time a job finishes, a # +# new job is run, so there will always be this number of # +# jobs running (except at the end, when there are less jobs # +# left than this number). # +# # +# number also cannot be higher than the node's free RAM # +# divided by the RAM allocated to one core (rounded down). # +# / Gustafson, respectively. For example, if you allocated # +# 40 GB/core on Moore in the previous line, you can't # +# allocate more than 145 / 40 = 3 cores to your job. # +# # +# The Load() function of s2dverification loads data in # +# parallel automatically. You have to disable this feature # +# adding option nprocs=1, or you'll use all the cores of the# +# node even if you reserved less cores. The downside is that# +# loading times of each job will increase. # +# # +############################################################# + +#SBATCH -n 1 + +# variable 'nCores' must always be the same as the maximum +# number of cores introduced in the above line: + +nCores=1 + +############################################################# +# Be sure to include all the libraries your script needs: # +############################################################# + +module load R CDO NCO netCDF GCC HDF5 UDUNITS + +############################################################# +# First and last values of each of the three variables of # +# the parallel job. Values must be integers. Each value # +# between the first and last ones (including the extremes) # +# will be assigned to a different parallel job. If your job # +# has less than three variables, you can disable one or two # +# of them by leaving them empty in the lines below. # +# # +# IMPORTANT: All variables must be integers. First empty # +# variable must be arg3, and the second must be arg2. # +# # +# Examples: # +# - If you want to run the same script 12 times changing # +# only the variable "month" from 1 to 12, set arg1_min= 1 # +# and arg1_max = 12, leaving the others two in blank: # +# arg1_min = 1 # +# arg1_max = 12 # +# arg2_min = # +# arg2_max = # +# arg3_min = # +# arg3_max = # +# # +# - If you also want to run the same script for a sequence # +# of years, i.e: for each month from 1980 to 2015, also # +# set arg2_min = 1950 and arg2_max = 2015 # +# # +# To link these variables to your R script, just add the # +# following lines at the beginning of your script: # +# # +# script.arg <- as.integer(commandArgs(TRUE)) # +# if(length(script.arg) > 0){ # +# <- script.arg[2] # +# <- script.arg[3] # +# } # +# # +# or add the lines below if you are using Python: # +# # +# import sys # +# script.arg = int(sys.argv) # +# if len(script.arg) > 0 : # +# = script.arg[1] # +# = script.arg[2] # +# = script.arg[3] # +# # +# where , , are the variables that are # +# used in the script and that you'd like to run in parallel # +# (i.e: 'month', 'year', 'leadtime', etc.). # +# # +# To optimize computation time, try to set 'nCores' to be # +# which is a divisor of the total number of jobs, so it # +# will not run a lonely job or a few jobs in the last loop. # +# For example: if you have to run 20 jobs, run them in # +# blocks of 4, 5 or 10 jobs simultaneously # +# # +# If, after the jobs has finished, you need to re-run some # +# of them, just re-run this job file with arg1_min, arg1_max# +# and the other argX variables corresponding to the values # +# of the jobs you need to re-run. In case the values are not# +# consecutive (i.e: 2,5,and 11), then set 'nCores'=1 and run# +# those jobs in a sequential way, executing this job file # +# one time for each different value you need to recompute. # +############################################################# + +arg1_min=6 +arg1_max=6 + +arg2_min= +arg2_max= + +arg3_min= +arg3_max= + +############################################################# +# In the majority of cases, you can simply ignore variable # +# 'delay' introduced below and leave it to 0. Only in case # +# your 'nCores' variable is limited by the total memory, # +# and the peak memory use of each job is short compared to # +# the total computational time of a single job, you can # +# still increase 'nCores' by introducing a time delay # +# between jobs: each time you run a new job, the scheduler # +# will wait an amount of time determined by the user before # +# running the next job. In this way, the peak memory use of # +# the jobs don't overlap (i.e: don't happen at the same # +# time simulataneously), so more jobs can be executed in # +# parallel. # +# Example: each of your jobs lasts ~ 50 min and needs an # +# average of 30 GB of RAM. However, for ~5 minutes, each job# +# needs 60 GB of RAM. This memory peak has the effect of # +# halving the 'nCores' value. However, setting 'delay' # +# to 600 (s), each job reaches its memory peak at a # +# different time of the other jobs, enabling you to # +# double 'nCores' value. # +############################################################# + +delay=0 + +############################################################# +# the following lines run a process (thread) in background # +# for each different set of values of the 3 above variables # +# in groups of jobs at the same time: # +############################################################# + +bash_pid=$$ # get the pid of the parent job (use instead $! to get the pid of last child process) +nChildren=0 # set initial number of children processes (background jobs executed by this script) + +# if the user left arg2_min or arg3_min empty, fill them with -999 instead +# to be able to run the next for loop with only 1 'fake' value (-999)for arg2 and/or arg3: +size2=${#arg2_min} # length of arg2_min +size3=${#arg3_min} # length of arg3_min +if [ $size2 -eq 0 ] ; then arg2_min=-999; arg2_max=-999; fi +if [ $size3 -eq 0 ] ; then arg3_min=-999; arg3_max=-999; fi + +# For loops over the three variables to cycle all their values: +for arg1 in $(seq $arg1_min $arg1_max ); do + for arg2 in $(seq $arg2_min $arg2_max); do + for arg3 in $(seq $arg3_min $arg3_max); do + # disable 'arg2' and/or 'arg3' if the user left them empty: + if [ $size2 -eq 0 ]; then arg2=""; fi + if [ $size3 -eq 0 ]; then arg3=""; fi + + # detect the language of your script and run a new children job in background + if [ ${script: -1} == 'R' ]; then + Rscript $script $arg1 $arg2 $arg3 & + elif [ ${script: -2} == 'py' ]; then + python $script $arg1 $arg2 $arg3 & + else + $script $arg1 $arg2 $arg3 & + fi + + nChildren=$(($nChildren+1)) # increase the number of children process + sleep $delay # wait a moment if the user wants to run the following job after some time + + # if there are already job running, wait until at least one job has finished before executing the next job: + while [ $nChildren -ge $nCores ] + do + children=`ps -eo ppid | grep -w $bash_pid` # get the pids of its children + parent + nChildren=`echo $children | wc -w` # total number of children + parent + let nChildren=nChildren-1 # total number of children + sleep 1 # pause until at least one job has finished + done + done + done +done + diff --git a/old/backup/slurm/quick.sh b/old/backup/slurm/quick.sh new file mode 100644 index 0000000000000000000000000000000000000000..0c72753839b6bcd9665f0f6049ad0815f43c2216 --- /dev/null +++ b/old/backup/slurm/quick.sh @@ -0,0 +1,19 @@ +#!/bin/sh + +# dentro la termina, execute this job script: +# +# sbatch -n 1 -t 48:00:00 -w amdahl ~/scripts/slurm/quick.sh +# + +#SBATCH -J quick +#SBATCH -o quick-%J.out +#SBATCH -e quick-%J.err + +module load R CDO NCO netCDF GCC HDF5 UDUNITS + +# insert here your R script to run: +script="/home/Earth/ncortesi/scripts/weather_regimes_v38.R" + +Rscript $script + + diff --git a/old/backup/slurm/quick.sh~ b/old/backup/slurm/quick.sh~ new file mode 100644 index 0000000000000000000000000000000000000000..68044d3720ddbe3a3996ef6add6904e573918cf4 --- /dev/null +++ b/old/backup/slurm/quick.sh~ @@ -0,0 +1,12 @@ +#!/bin/sh + +# dentro la termina, execute this job script: +# +# sbatch -n 1 -t 48:00:00 -J parallel -w amdahl ~/scripts/slurm/quick.sh +# + +# insert here your R script to run: +script="/home/Earth/ncortesi/scripts/weather_regimes_v38.R" + +module load R CDO NCO netCDF GCC HDF5 UDUNITS + diff --git a/old/backup/slurm/tirar.sh b/old/backup/slurm/tirar.sh new file mode 100644 index 0000000000000000000000000000000000000000..c03260a8153d2622dad6aa1311d427dc1f4e705b --- /dev/null +++ b/old/backup/slurm/tirar.sh @@ -0,0 +1,75 @@ +#!/bin/bash + +#SBATCH -J parallel +#SBATCH -o parallel-%J.out +#SBATCH -e parallel-%J.err +#SBATCH -t 7-00:00:00 + +#SBATCH -w amdahl +#SBATCH -n 5 + +module load R CDO NCO netCDF GCC HDF5 UDUNITS + +script="/home/Earth/fullano/.../diagnostic.R" + +arg1_min=1 +arg1_max=12 + +arg2_min=1 +arg2_max=5 + +arg3_min= +arg3_max= + + + + + + + start_date = 1:12 + lead_time = 1:5 + + ... + + script.arg <- as.integer(commandArgs(TRUE)) + + if(length(script.arg) > 0){ + start_date <- script.arg[1] + lead_month <- script.arg[2] + } + + for (sd in start_date){ + for (lt in lead_time){ + ###################### + # Your analysis here # + ###################### + + + + } + } + + + save(output_1, ... , output_N, file=paste0(workdir,"/my_analysis.RData")) + + + + + + + + + save(output_1, ... , output_N, file=paste0(workdir, + "/my_analysis_start_date_",sd,"_lead_time_",lt,".RData")) + + import sys + script.arg = int(sys.argv) + + if len(script.arg) > 0 : + start_date = script.arg[1] + lead_month = script.arg[2] + + + + + diff --git a/old/backup/slurm/tirar.sh~ b/old/backup/slurm/tirar.sh~ new file mode 100644 index 0000000000000000000000000000000000000000..c001eae6c234c256b9b8184cf2fe29b2f8757199 --- /dev/null +++ b/old/backup/slurm/tirar.sh~ @@ -0,0 +1,73 @@ +#!/bin/bash + +#SBATCH -J parallel +#SBATCH -o parallel-%J.out +#SBATCH -e parallel-%J.err +#SBATCH -t 5-23:59:59 + +#SBATCH -w amdahl +#SBATCH -n 5 + +module load R CDO NCO netCDF GCC HDF5 UDUNITS + +script="/home/Earth/fullano/scripts/diagnostic.R" + +var1min=1 +var1max=12 + +var2min=1 +var2max=5 + +var3min= +var3max= + + + + + start_date = 1:12 + lead_time = 1:5 + + ... + + script.arg <- as.integer(commandArgs(TRUE)) + + if(length(script.arg) > 0){ + start_date <- script.arg[1] + lead_month <- script.arg[2] + } + + for (sd in start_date){ + for (lt in lead_time){ + ###################### + # Your analysis here # + ###################### + + + + } + } + + + save(output_1, ... , output_N, file=paste0(workdir,"/my_analysis.RData")) + + + + + + + + + save(output_1, ... , output_N, file=paste0(workdir, + "/my_analysis_start_date_",sd,"_lead_time_",lt,".RData")) + + import sys + script.arg = int(sys.argv) + + if len(script.arg) > 0 : + start_date = script.arg[1] + lead_month = script.arg[2] + + + + + diff --git a/old/backup/taylor.R b/old/backup/taylor.R new file mode 100644 index 0000000000000000000000000000000000000000..2c7d02777afbd0b98cd296a6a688f11fd79f687b --- /dev/null +++ b/old/backup/taylor.R @@ -0,0 +1,216 @@ + +# Creation: 6/2016 +# Author: Nicola Cortesi +# Aim: Taylor diagram (modified from function taylor.diagram of package Plotrix to have the same colors of Nube's taylor diagram) +# you can also specify a text label for each point (option 'my.text') and can put the color of each point proportional to its bias +# Assumptions: same as in function taylor.diagram() +# Branch: general + +my.taylor<-function (ref, model, add = FALSE, col = "red", pch = 19, pos.cor = TRUE, + xlab = "", ylab = "", main = "Taylor Diagram", show.gamma = TRUE, + ngamma = 3, gamma.col = "darkgreen", sd.arcs = 0, ref.sd = FALSE, sd.method = "sample", + grad.corr.lines = c(0.2, 0.4, 0.6, 0.8, 0.9), pcex = 1, cex.axis = 1, + normalize = FALSE, mar = c(5, 4, 6, 6), BIAS = FALSE, my.text = NULL, text.cex = pcex, ...) +{ + grad.corr.full <- c(0, 0.2, 0.4, 0.6, 0.8, 0.9, 0.95, 0.99, + 1) + R <- cor(ref, model, use = "na.or.complete") + + if(BIAS==TRUE){ + BIAS <- mean(model-ref,na.rm=TRUE) + my.equidist<-c(-75,-35,-15,-5,5,15,35,75) + my.colors<-c("magenta4","blue4","steelblue","skyblue2","orange","orangered","red","red4") + my.col<-my.colors[which.min(abs(my.equidist-BIAS))] + } + + if (is.list(ref)) + ref <- unlist(ref) + if (is.list(model)) + ref <- unlist(model) + SD <- function(x, subn) { + meanx <- mean(x, na.rm = TRUE) + devx <- x - meanx + ssd <- sqrt(sum(devx * devx, na.rm = TRUE)/(length(x[!is.na(x)]) - + subn)) + return(ssd) + } + subn <- sd.method != "sample" + sd.r <- SD(ref, subn) + sd.f <- SD(model, subn) + if (normalize) { + sd.f <- sd.f/sd.r + sd.r <- 1 + } + maxsd <- 1.5 * max(sd.f, sd.r) + oldpar <- par("mar", "xpd", "xaxs", "yaxs") + if (!add) { + if (pos.cor) { + if (nchar(ylab) == 0) + ylab = "Standard deviation" + par(mar = mar) + plot(0, xlim = c(0, maxsd), ylim = c(0, maxsd), xaxs = "i", + yaxs = "i", axes = FALSE, main = main, xlab = xlab, + ylab = ylab, type = "n", cex = cex.axis, ...) + if (grad.corr.lines[1]) { + for (gcl in grad.corr.lines) lines(c(0, maxsd * + gcl), c(0, maxsd * sqrt(1 - gcl^2)), lty = 3,col="blue") + } + segments(c(0, 0), c(0, 0), c(0, maxsd), c(maxsd, + 0),col="blue") + axis.ticks <- pretty(c(0, maxsd),n=6) + axis.ticks <- axis.ticks[axis.ticks <= maxsd] + axis(1, at = axis.ticks, cex.axis = cex.axis) + axis(2, at = axis.ticks, cex.axis = cex.axis) + if (sd.arcs[1]) { + if (length(sd.arcs) == 1) + sd.arcs <- axis.ticks + for (sdarc in sd.arcs) { + xcurve <- cos(seq(0, pi/2, by = 0.03)) * sdarc + ycurve <- sin(seq(0, pi/2, by = 0.03)) * sdarc + lines(xcurve, ycurve, col = "black", lty = 3) + } + } # if there is more than one curve for the st.dev: + if (show.gamma[1]) { + if (length(show.gamma) > 1) + gamma <- show.gamma + else gamma <- pretty(c(0, maxsd), n = ngamma)[-1] # [-1] for removing the first value of 0.0 + if (gamma[length(gamma)] > maxsd) + gamma <- gamma[-length(gamma)] + labelpos <- seq(45, 70, length.out = length(gamma)) + for (gindex in 1:length(gamma)) { + xcurve <- cos(seq(0, pi, by = 0.03)) * gamma[gindex] + + sd.r + endcurve <- which(xcurve < 0) + endcurve <- ifelse(length(endcurve), min(endcurve) - + 1, 105) + ycurve <- sin(seq(0, pi, by = 0.03)) * gamma[gindex] + maxcurve <- xcurve * xcurve + ycurve * ycurve + startcurve <- which(maxcurve > maxsd * maxsd) + startcurve <- ifelse(length(startcurve), max(startcurve) + + 1, 0) + lines(xcurve[startcurve:endcurve], ycurve[startcurve:endcurve], + col = gamma.col) + if (xcurve[labelpos[gindex]] > 0) + boxed.labels(xcurve[labelpos[gindex]], ycurve[labelpos[gindex]], + gamma[gindex], border = FALSE,cex=1, col=gamma.col) + } + } + xcurve <- cos(seq(0, pi/2, by = 0.01)) * maxsd + ycurve <- sin(seq(0, pi/2, by = 0.01)) * maxsd + lines(xcurve, ycurve) # external semicircle + bigtickangles <- acos(seq(0.1, 0.9, by = 0.1)) + medtickangles <- acos(seq(0.05, 0.95, by = 0.1)) + smltickangles <- acos(seq(0.91, 0.99, by = 0.01)) + segments(cos(bigtickangles) * maxsd, sin(bigtickangles) * + maxsd, cos(bigtickangles) * 0.97 * maxsd, sin(bigtickangles) * + 0.97 * maxsd) # external mayor ticks + par(xpd = TRUE) + if (ref.sd) { + xcurve <- cos(seq(0, pi/2, by = 0.01)) * sd.r + ycurve <- sin(seq(0, pi/2, by = 0.01)) * sd.r + lines(xcurve, ycurve) + } + points(sd.r, 0, cex = pcex) + text(cos(c(bigtickangles, acos(c(0.95, 0.99)))) * + 1.05 * maxsd, sin(c(bigtickangles, acos(c(0.95, + 0.99)))) * 1.05 * maxsd, c(seq(0.1, 0.9, by = 0.1), + 0.95, 0.99), col="blue") # correlation numbers + text(maxsd * 0.8, maxsd * 0.8, "Correlation", srt = 315, col="blue") + segments(cos(medtickangles) * maxsd, sin(medtickangles) * + maxsd, cos(medtickangles) * 0.98 * maxsd, sin(medtickangles) * + 0.98 * maxsd) + segments(cos(smltickangles) * maxsd, sin(smltickangles) * + maxsd, cos(smltickangles) * 0.99 * maxsd, sin(smltickangles) * + 0.99 * maxsd) + } + else { # case correlation is negative + x <- ref + y <- model + R <- cor(x, y, use = "pairwise.complete.obs") + E <- mean(x, na.rm = TRUE) - mean(y, na.rm = TRUE) + xprime <- x - mean(x, na.rm = TRUE) + yprime <- y - mean(y, na.rm = TRUE) + sumofsquares <- (xprime - yprime)^2 + Eprime <- sqrt(sum(sumofsquares)/length(complete.cases(x))) + E2 <- E^2 + Eprime^2 + if (add == FALSE) { + maxray <- 1.5 * max(sd.f, sd.r) + plot(c(-maxray, maxray), c(0, maxray), type = "n", + asp = 1, bty = "n", xaxt = "n", yaxt = "n", + xlab = xlab, ylab = ylab, main = main, cex = cex.axis) + discrete <- seq(180, 0, by = -1) + listepoints <- NULL + for (i in discrete) { + listepoints <- cbind(listepoints, maxray * + cos(i * pi/180), maxray * sin(i * pi/180)) + } + listepoints <- matrix(listepoints, 2, length(listepoints)/2) + listepoints <- t(listepoints) + lines(listepoints[, 1], listepoints[, 2]) + lines(c(-maxray, maxray), c(0, 0)) + lines(c(0, 0), c(0, maxray)) + for (i in grad.corr.lines) { + lines(c(0, maxray * i), c(0, maxray * sqrt(1 - + i^2)), lty = 3, col="blue") + lines(c(0, -maxray * i), c(0, maxray * sqrt(1 - + i^2)), lty = 3, col="blue") + } + for (i in grad.corr.full) { + text(1.05 * maxray * i, 1.05 * maxray * sqrt(1 - + i^2), i, cex = 0.6) + text(-1.05 * maxray * i, 1.05 * maxray * sqrt(1 - + i^2), -i, cex = 0.6) + } + seq.sd <- seq.int(0, 2 * maxray, by = (maxray/10))[-1] + for (i in seq.sd) { + xcircle <- sd.r + (cos(discrete * pi/180) * + i) + ycircle <- sin(discrete * pi/180) * i + for (j in 1:length(xcircle)) { + if ((xcircle[j]^2 + ycircle[j]^2) < (maxray^2)) { + points(xcircle[j], ycircle[j], col = "darkgreen", + pch = ".") + if (j == 10) + text(xcircle[j], ycircle[j], signif(i, + 2), cex = 0.5, col = "darkgreen") + } + } + } + seq.sd <- seq.int(0, maxray, length.out = 5) + for (i in seq.sd) { + xcircle <- (cos(discrete * pi/180) * i) + ycircle <- sin(discrete * pi/180) * i + if (i) + lines(xcircle, ycircle, lty = 3, col = "blue") + text(min(xcircle), -0.03 * maxray, signif(i, + 2), cex = 0.5, col = "blue") + text(max(xcircle), -0.03 * maxray, signif(i, + 2), cex = 0.5, col = "blue") + } + text(0, -0.08 * maxray, "Standard Deviation", + cex = 0.7, col = "blue") + text(0, -0.12 * maxray, "Centered RMS Difference", + cex = 0.7, col = "darkgreen") + points(sd.r, 0, pch = 22, bg = "darkgreen", cex = 1.1) + text(0, 1.1 * maxray, "Correlation Coefficient", + cex = 0.7) + } + S <- (2 * (1 + R))/(sd.f + (1/sd.f))^2 + } + } + + if(BIAS==TRUE){ + points(sd.f * R, sd.f * sin(acos(R)), pch = pch, col = my.col, cex = pcex) + } else { + points(sd.f * R, sd.f * sin(acos(R)), pch = pch, col = col, cex = pcex) + } + + # Label line; You can change the pos argument to your liking: + if(length(text)>0) text(sd.f * R, sd.f * sin(acos(R)), labels=my.text, cex = text.cex, pos=3) + text(0.9, 0.2, "RMSE", srt = 25, cex=1, col=gamma.col) + + text(1, 0.04, "ERA-Interim", srt = 0, cex=1, col="darkgray") + + invisible(oldpar) +} + diff --git a/old/grid2contour_Raul.R b/old/grid2contour_Raul.R new file mode 100644 index 0000000000000000000000000000000000000000..a92ce82a7ab533d2f5b255d8f66c3df31ebe8663 --- /dev/null +++ b/old/grid2contour_Raul.R @@ -0,0 +1,47 @@ + +# Creation: 6/2016 +# Author: Nicola Cortesi +# Aim: Remove the grid points above a certain value (argument 'level') [and below '-level' if two.sides=TRUE] that happens to be in areas with few points above that value. +# Useful to remove from a contour plot all the small spots of significative points that we don't want to contour. +# To do so, just apply this function inside the option 'contour' of 'PlotEquiMap' to remove the significative points (they are set to the value of 0). +# Argument 'size' determines the side of the square (in grid points) used to find if there are enough grid points with values above 'level' nearby +# the chosen point or not. Increasing it will incresase the number of grid points deleted, leaving only the bigger spots of points above the chosen value. +# I/O: a 2D lat/lon grid in geographic coordinates +# Assumptions: none +# Branch: general +# +# Example: +# data <- matrix(runif(48000,0,1)^2,300,160) + matrix(c(rep(0,20000),rep(0.6,3000),rep(0,25000)),300,160) +# PlotEquiMap(data,lon=1:300,lat=-80:79,filled.continents=F,cols=c("white","yellow","orange","red","darkred")) +# PlotEquiMap(data,lon=1:300,lat=-80:79,filled.continents=F,cols=c("white","yellow","orange","red","darkred"), contours=data, brks2=0.6) +# PlotEquiMap(data,lon=1:300,lat=-80:79,filled.continents=F,cols=c("white","yellow","orange","red","darkred"), contours=grid2contour(data,0.6,FALSE,5), brks2=0.6, contours.labels=FALSE) +# PlotEquiMap_colored(data,lon=1:300,lat=-80:79,filled.continents=F,cols=c("white","yellow","orange","red","darkred"), contours=grid2contour(data,0.6,FALSE,5), brks2=0.6, contours.labels=FALSE, contours.col="blue", continents.col="gray40") + +grid2contour <- function(grid, level, two.sides=FALSE, size=10){ + nrows <- dim(grid)[1] + ncols <- dim(grid)[2] + radius <- round(size/2) + + grid.weighted <- matrix(NA, nrows, ncols) + + grid.expanded <- rbind(cbind(grid[nrows:1,((ncols/2)+1):ncols],grid[nrows:1,],grid[nrows:1,],grid[nrows:1,1:(ncols/2)]),cbind(grid,grid,grid),cbind(grid[nrows:1,((ncols/2)+1):ncols],grid[nrows:1,],grid[nrows:1,],grid[nrows:1,1:(ncols/2)])) + + if(two.sides==FALSE){ + for(i in 1:nrows){ + for(j in 1:ncols){ + grid.weighted[i,j] <- sum(grid.expanded[(nrows+i-radius):(nrows+i+radius),(ncols+j-radius):(ncols+j+radius)] < level) + } + } + } else { + for(i in 1:nrows){ + for(j in 1:ncols){ + grid.weighted[i,j] <- sum(grid.expanded[(nrows+i-radius):(nrows+i+radius),(ncols+j-radius):(ncols+j+radius)] < level) + sum(grid.expanded[(nrows+i-radius):(nrows+i+radius),(ncols+j-radius):(ncols+j+radius)] > -level) + } + } + } + + n.points.min <- (2*radius+1)^2*0.3 # 30% of the total points in the square + ss <- which(grid.weighted < n.points.min) + grid[ss] <- 1 + return(grid) +} \ No newline at end of file diff --git a/old/taylor_regimes_observados_mesuales b/old/taylor_regimes_observados_mesuales new file mode 100644 index 0000000000000000000000000000000000000000..21b083343399684aa75cc6c5513d5020eb34035d --- /dev/null +++ b/old/taylor_regimes_observados_mesuales @@ -0,0 +1,105 @@ +library("s2dverification") +library("plotrix") +source('/home/Earth/ncortesi/Downloads/scripts/Rfunctions.R') # for the calendar functions + +load("/scratch/Earth/ncortesi/RESILIENCE/Regimes/18) as 12) but with no lat correction/ERA-Interim_Winter_sfcWind_psl.RData") +w1 <- pslwr1mean +w2 <- pslwr2mean +w3 <- pslwr3mean +w4 <- pslwr4mean + +load("/scratch/Earth/ncortesi/RESILIENCE/Regimes/41_ERA-Interim_monthly_1981-2015_LOESS_filter/ERA-Interim_November_psl.RData") +load("/scratch/Earth/ncortesi/RESILIENCE/Regimes/41_ERA-Interim_monthly_1981-2015_LOESS_filter/ERA-Interim_November_ClusterNames.RData") +cluster1.name11 <- cluster1.name +cluster2.name11 <- cluster2.name +cluster3.name11 <- cluster3.name +cluster4.name11 <- cluster4.name +p1mean11 <- pslwr1mean +p2mean11 <- pslwr2mean +p3mean11 <- pslwr3mean +p4mean11 <- pslwr3mean + + +c(cluster1.name1, cluster2.name1, cluster3.name1, cluster4.name1) +c(cluster1.name2, cluster2.name2, cluster3.name2, cluster4.name2) +c(cluster1.name3, cluster2.name3, cluster3.name3, cluster4.name3) +c(cluster1.name4, cluster2.name4, cluster3.name4, cluster4.name4) +c(cluster1.name5, cluster2.name5, cluster3.name5, cluster4.name5) +c(cluster1.name6, cluster2.name6, cluster3.name6, cluster4.name6) +c(cluster1.name7, cluster2.name7, cluster3.name7, cluster4.name7) +c(cluster1.name8, cluster2.name8, cluster3.name8, cluster4.name8) +c(cluster1.name9, cluster2.name9, cluster3.name9, cluster4.name9) +c(cluster1.name10, cluster2.name10, cluster3.name10, cluster4.name10) +c(cluster1.name11, cluster2.name11, cluster3.name11, cluster4.name11) +c(cluster1.name12, cluster2.name12, cluster3.name12, cluster4.name12) + +# NAO+ +my.taylor(as.vector(w3),as.vector(p2mean1), normalize=TRUE, my.text="Jan", ngamma=6) +#my.taylor(as.vector(w3),as.vector(p1mean2), normalize=TRUE, add=TRUE, my.text="Feb") +my.taylor(as.vector(w2),as.vector(p1mean3), normalize=TRUE, add=TRUE, my.text="Feb") +my.taylor(as.vector(w3),as.vector(p2mean3), normalize=TRUE, add=TRUE, my.text="Mar") +my.taylor(as.vector(w3),as.vector(p3mean4), normalize=TRUE, add=TRUE, my.text="Apr") +my.taylor(as.vector(w3),as.vector(p1mean5), normalize=TRUE, add=TRUE, my.text="May") +my.taylor(as.vector(w3),as.vector(p3mean6), normalize=TRUE, add=TRUE, my.text="Jun") +my.taylor(as.vector(w3),as.vector(p4mean7), normalize=TRUE, add=TRUE, my.text="Jul") +#my.taylor(as.vector(w3),as.vector(p2mean8), normalize=TRUE, add=TRUE, my.text="Aug") +my.taylor(as.vector(w3),as.vector(p2mean9), normalize=TRUE, add=TRUE, my.text="Sep") +my.taylor(as.vector(w3),as.vector(p2mean10), normalize=TRUE, add=TRUE, my.text="Oct") +my.taylor(as.vector(w3),as.vector(p1mean11), normalize=TRUE, add=TRUE, my.text="Nov") +my.taylor(as.vector(w3),as.vector(p2mean12), normalize=TRUE, add=TRUE, my.text="Dec") +my.taylor(as.vector(w1),as.vector(p3mean5), normalize=TRUE, add=TRUE, my.text="Aug") + +# NAO- +my.taylor(as.vector(w4),as.vector(p1mean1), normalize=TRUE, my.text="Mar", ngamma=6) +my.taylor(as.vector(w4),as.vector(p1mean2), normalize=TRUE, add=TRUE, my.text="Feb") +my.taylor(as.vector(w4),as.vector(p4mean3), normalize=TRUE, add=TRUE, my.text="Jan") +my.taylor(as.vector(w4),as.vector(p2mean4), normalize=TRUE, add=TRUE, my.text="Apr") +my.taylor(as.vector(w4),as.vector(p2mean5), normalize=TRUE, add=TRUE, my.text="May") +my.taylor(as.vector(w4),as.vector(p1mean6), normalize=TRUE, add=TRUE, my.text="Jun") +my.taylor(as.vector(w4),as.vector(p2mean7), normalize=TRUE, add=TRUE, my.text="Jul") +my.taylor(as.vector(w4),as.vector(p3mean8), normalize=TRUE, add=TRUE, my.text="Aug") +#my.taylor(as.vector(w4),as.vector(p3mean9), normalize=TRUE, add=TRUE, my.text="Sep") +my.taylor(as.vector(w3),as.vector(p3mean6), normalize=TRUE, add=TRUE, my.text="Sep") +#my.taylor(as.vector(w4),as.vector(p1mean10), normalize=TRUE, add=TRUE, my.text="Dec") +my.taylor(as.vector(w4),as.vector(p2mean11), normalize=TRUE, add=TRUE, my.text="Nov") +my.taylor(as.vector(w4),as.vector(p4mean12), normalize=TRUE, add=TRUE, my.text="Oct") +#my.taylor(as.vector(w3),as.vector(p2mean10), normalize=TRUE, add=TRUE, my.text="Dec") +my.taylor(as.vector(w2),as.vector(p1mean3), normalize=TRUE, add=TRUE, my.text="Dec") + +# Blocking +#my.taylor(as.vector(w2),as.vector(p4mean1), normalize=TRUE, my.text="Jan", ngamma=6) +my.taylor(as.vector(w2),as.vector(p2mean2), normalize=TRUE, ngamma=6, my.text="Feb") +my.taylor(as.vector(w2),as.vector(p1mean3), normalize=TRUE, add=TRUE, my.text="Mar") +my.taylor(as.vector(w2),as.vector(p4mean4), normalize=TRUE, add=TRUE, my.text="Apr") +#my.taylor(as.vector(w2),as.vector(p4mean5), normalize=TRUE, add=TRUE, my.text="May") +my.taylor(as.vector(w2),as.vector(p2mean6), normalize=TRUE, add=TRUE, my.text="Jun") +my.taylor(as.vector(w2),as.vector(p1mean7), normalize=TRUE, add=TRUE, my.text="Jul") +my.taylor(as.vector(w2),as.vector(p4mean8), normalize=TRUE, add=TRUE, my.text="Aug") +#my.taylor(as.vector(w2),as.vector(p4mean9), normalize=TRUE, add=TRUE, my.text="Sep") +my.taylor(as.vector(w2),as.vector(p4mean10), normalize=TRUE, add=TRUE, my.text="Nov") +my.taylor(as.vector(w2),as.vector(p4mean11), normalize=TRUE, add=TRUE, my.text="Nov") +my.taylor(as.vector(w2),as.vector(p1mean12), normalize=TRUE, add=TRUE, my.text="Dec") +my.taylor(as.vector(w4),as.vector(p2mean11), normalize=TRUE, add=TRUE, my.text="Jan") +#my.taylor(as.vector(w4),as.vector(p2mean4), normalize=TRUE, add=TRUE, my.text=3) +my.taylor(as.vector(w4),as.vector(p2mean5), normalize=TRUE, add=TRUE, my.text="Sep") +my.taylor(as.vector(w3),as.vector(p4mean7), normalize=TRUE, add=TRUE, my.text="May") +#my.taylor(as.vector(w4),as.vector(p1mean1), normalize=TRUE, add=TRUE, my.text="Dec") +my.taylor(as.vector(w1),as.vector(p1mean8), normalize=TRUE, add=TRUE, my.text="Oct") + +# Atl: +my.taylor(as.vector(w1),as.vector(p3mean1), normalize=TRUE, my.text="Jan", ngamma=6) +my.taylor(as.vector(w1),as.vector(p3mean2), normalize=TRUE, add=TRUE, my.text="Feb") +my.taylor(as.vector(w1),as.vector(p3mean3), normalize=TRUE, add=TRUE, my.text="Mar") +my.taylor(as.vector(w1),as.vector(p1mean4), normalize=TRUE, add=TRUE, my.text="Apr") +my.taylor(as.vector(w1),as.vector(p3mean5), normalize=TRUE, add=TRUE, my.text="May") +my.taylor(as.vector(w1),as.vector(p4mean6), normalize=TRUE, add=TRUE, my.text="Jun") +my.taylor(as.vector(w1),as.vector(p3mean7), normalize=TRUE, add=TRUE, my.text="Jul") +my.taylor(as.vector(w1),as.vector(p1mean8), normalize=TRUE, add=TRUE, my.text="Aug") +#my.taylor(as.vector(w1),as.vector(p1mean9), normalize=TRUE, add=TRUE, my.text="Sep") +my.taylor(as.vector(w1),as.vector(p3mean10), normalize=TRUE, add=TRUE, my.text="Oct") +my.taylor(as.vector(w1),as.vector(p3mean11), normalize=TRUE, add=TRUE, my.text="Nov") +my.taylor(as.vector(w1),as.vector(p3mean12), normalize=TRUE, add=TRUE, my.text="Dec") +my.taylor(as.vector(w4),as.vector(p2mean11), normalize=TRUE, add=TRUE, my.text="Sep") + + + diff --git a/old/weather_regimes_EDPR.R b/old/weather_regimes_EDPR.R new file mode 100644 index 0000000000000000000000000000000000000000..412c6315fcc3b801a3b79d80d74cb79ac192d481 --- /dev/null +++ b/old/weather_regimes_EDPR.R @@ -0,0 +1,1349 @@ + +# Creation: 6/2016 +# Author: Nicola Cortesi +# Aim: To compute the four Euro-Atlantic weather regimes from a reanalysis or from a forecast system +# +# I/O: input must be in a format compatible with Load(); 1 output .RData file is created in the 'workdir' folder. +# You can also run this script from terminal with: Rscript ~/scripts/weather_regimes_maps.R +# +# Assumption: its output are needed by the scripts weather_regimes_impact.R and weather_regimes.R +# +# Branch: weather_regimes + +rm(list=ls()) +library(s2dverification) # for the function Load() and InsertDim() +library(abind) # for function abind() +library(Kendall) # for the MannKendall test +library(reshape2) # for functions acast() and melt(). WARNING: after each update of s2dverification, it's better to remove and install this package again because sometimes it gets broken! + +source('/home/Earth/ncortesi/scripts/Rfunctions.R') + +rean <- '/esnas/recon/jma/jra55/$STORE_FREQ$_mean/$VAR_NAME$_f6h/$VAR_NAME$_$YEAR$$MONTH$.nc' +rean.name <- "JRA-55" + +psl <- "psl" #"g500" # pressure variable to use from the chosen reanalysis +psl.name <- "SLP" # "Z500" # the name to show in the maps + +year.start <- 1981 +year.end <- 2017 + +LOESS <- TRUE # To apply the LOESS filter or not to the climatology +running.cluster <- FALSE # add to the clustering also the daily SLP data of the two closer months to the month to use (only for monthly analysis) +running.15 <- FALSE # add to the clustering also the daily SLP data of the 15 days of the two closer months (only for monthly analysis). You cannot set to TRUE both + # this variable and 'running.cluster' above +lat.weighting <- TRUE # set it to true to weight psl data on latitude before applying the cluster analysis +subdaily <- FALSE # id TRUE, compute the clustering using 6-hourly data instead of daily data, to be more robust (only for reanalysis with 6-hourly data avail.) + +sequences <-FALSE # set it to TRUE if you want to select only days beloning to sequences of at least 5 consecutive days belonging to the same regime. +PCA <- FALSE # set it to TRUE if you want to apply the PCA before the k-means cluster analysis, FALSE otherwise +variance.explained <- 0.8 # the user can set here the minimum % of variance explained by the PCs (typically 0.8 which is equal to 80%) + +# Coordinates of the box enclosing the study region (the Northern Atlantic in this case): +lat.max <- 81 #81 #80 #70 +lat.min <- 27 #30 #20 #30 +lon.max <- 45 #36 #30 #40 +lon.min <- 274.5 #274.5 #270 #280 # put a positive number here because the geopotential has only positive vaues of longitud! + +period <- 1 #1:12 #13:16 # 1-12: Jan-Dec; 13-16: Winter-Spring-Summer-Autumn [bypassed by the optional argument of the script] + +var.num <- 1 # Choose a variable. 1: sfcWind 2: tas +var.name <- c("sfcWind","tas") # name of the 'predictand' variable of the chosen reanalysis +var.name.full <- c("wind Speed","temperature") # full name of the 'predictand' variable to put in the title of the graphs +var.unit <- c("m/s", "ºC") # unit of measure of var (for drawing color scales) + +orden <- c("NAO+","NAO-","Blocking","Atl.Ridge") + +no.regimes <- TRUE # if TRUE, instead of putting the regime names in the figure titles, insert "Cluster1", "Cluster2", "Cluster3" and "Cluster4" + # (when composition='edpr' or monthly_anomalies = TRUE) + +############################################################################################################################# + +# in case the script is run with two external arguments, it is assumed a forecast is used and the first argument represents the startmonth and the second one the leadmonth. +script.arg <- as.integer(commandArgs(TRUE)) + +if(length(script.arg) >= 2){ + year.end <- script.arg[1] + period <- script.arg[2] +} + +n.years <- year.end - year.start + 1 +year.end.clim <- year.end - 1 # last year for computation of climatology (unless year.end has data available for all months, it's better to set it to the previous year) +n.years.clim <- year.end.clim - year.start + 1 + +if(running.cluster && running.15) stop("both 'running.cluster' and 'running.15' cannot be TRUE simultaneously") + +cat(paste0("Rean: ", rean.name, " Period: ", period, " Year: ", year.end, "\n")) + +if(no.regimes) { regime.title <- paste0("Cluster",1:4) } else { regime.title <- orden} + +## create working directory: +workdir <- paste0("/shared/earth/EarthSystemServices/EDPR/", year.end, "_", my.month[period]) # working dir where output files will be generated +dir.create(file.path(workdir)) + +n.map <- 0 + +cat("Loading lat/lon. Please wait...\n") + +## load only 1 day of pressure data to detect the minimum and maximum lat and lon values which identify only the North Atlantic area: +domain <- Load(var = psl, exp = NULL, obs = list(list(path=rean)), paste0(year.start,'0101'), storefreq = 'daily', leadtimemax = 1, output = 'lonlat', nprocs=1) + +n.lon <- length(domain$lon) +n.lat <- length(domain$lat) + +pos.lat.max <- which(lat.max - round(domain$lat,4) >= 0)[1] # select the first latitude of the domain below the maximum latitude +pos.lat.min <- tail(which(round(domain$lat,4) - lat.min >= 0),1) # select the last latitude of the domain over the minumum latitude +pos.lon.max <- tail(which(lon.max - round(domain$lon,4) >= 0),1) +pos.lon.min <- which(round(domain$lon,4) - lon.min >= 0)[1] + +pos.lat <- if(pos.lat.min < pos.lat.max) {pos.lat.min:pos.lat.max} else {pos.lat.max:pos.lat.min} +pos.lon <- c(1:pos.lon.max,pos.lon.min:length(domain$lon)) # beware that it only consider the case where the study area cross the meridian of Greenwhich! +n.pos.lat <- length(pos.lat) +n.pos.lon <- length(pos.lon) + +lat <- domain$lat[pos.lat] # lat values of chosen area only (it is smaller than the whole spatial domain loaded by the data) +#if (lat[2] < lat[1]) lat <- rev(lat) # reverse lat values if they are in decreasing order +lon <- domain$lon[pos.lon] # lon values of chosen area only + +if(lat.min >= lat.max) stop("lat.min cannot be greater than lat.max") + +#################### Start analysis #######################################################3 + +# Load psl data (interpolating it to the same reanalysis grid, if necessary): +cat("Loading SLP data. Please wait...\n") + +psleuFull <-array(NA,c(1, 1, n.years, 365, n.pos.lat, n.pos.lon)) + +psleuFull366 <- Load(var = psl, exp = NULL, obs = list(list(path=rean)), paste0(year.start:year.end,'0101'), storefreq = 'daily', leadtimemax = 366, output = 'lonlat', latmin = lat.min, latmax = lat.max, lonmin = lon.min, lonmax = lon.max, nprocs=1) + +## remove bisestile days to have all arrays with the same dimensions: +cat("Removing bisestile days. Please wait...\n") +psleuFull[,,,,,] <- psleuFull366$obs[,,,1:365,,] + +for(y in year.start:year.end){ + y2 <- y - year.start + 1 + if(n.days.in.a.year(y) == 366) psleuFull[,,y2,60:365,,] <- psleuFull366$obs[,,y2,61:366,,] # take the march to december period removing the 29th of February +} + +psleuFull366$obs <- NULL +gc() + +if((running.cluster || running.15)) { + ## the months after the last month of the last year loaded are automatically filled with NA by Load(); however, the clustering algorithm doesn't want NA, so + ## we have to replace them with some values, in this case the mean of the previous years not to affect the analysis too much. + ## if we want to be able to compute the running clustering. + ## WARNING: in case your last month is December, for the running clustering it will use the january data of the last year loaded, not of the next year: + ## psleuFull[,,n.years,305:334,,] <- psleuFull[,,n.years-1,305:334,,] + + month.NA <- psleuFull366$not_found_files[1] # filename of the file with the first month with NA (string value with full file path) + first.month.NA <- as.integer(substr(month.NA,nchar(month.NA)-4,nchar(month.NA)-3)) # first month with NA + first.day.NA <- pos.period(1,first.month.NA)[1] # first day with NA (counting from the start of the year) + + psleuFullnolast <- psleuFull[,,1:(n.years-1),first.day.NA:365,,] + psleuFullMean <- apply(psleuFullnolast, c(2,3,4), mean, na.rm=TRUE) # mean of the previous years + + psleuFull[,,n.years,first.day.NA:365,,] <- psleuFullMean + + rm(psleuFullnolast, psleuFullMean) + +} + +## convert psl in daily anomalies with the LOESS filter: +cat("Calculating anomalies. Please wait...\n") +psleuFull_no_last_year <- psleuFull[1,1,1:n.years.clim,,,,drop=FALSE] +pslPeriodClim <- apply(psleuFull_no_last_year, c(1,2,4,5,6), mean, na.rm=T) +rm(psleuFull_no_last_year) +gc() + +if(LOESS == TRUE){ + pslPeriodClimLoess <- array(NA, dim(pslPeriodClim)) + + for(i in 1:n.pos.lat){ + for(j in 1:n.pos.lon){ + my.data <- data.frame(ens.mean=pslPeriodClim[1,1,,i,j], day=1:365) + my.loess <- loess(ens.mean ~ day, my.data, span=0.35) + pslPeriodClimLoess[1,1,,i,j] <- predict(my.loess) + } + } + + rm(pslPeriodClim) + gc() + + pslPeriodClim2 <- InsertDim(pslPeriodClimLoess, 3, n.years) + +} else { # leave the daily climatology to compute the anomalies: + pslPeriodClim2 <- InsertDim(pslPeriodClim, 3, n.years) + + rm(pslPeriodClim) + gc() +} + +psleuFull <- psleuFull - pslPeriodClim2 +rm(pslPeriodClim2) +gc() + +if(psl=="g500") psleuFull <- psleuFull/9.81 # convert geopotential to geopotential height (in m) +if(psl=="psl") psleuFull <- psleuFull/100 # convert MSLP in Pascal to MSLP in hPa +gc() + +# compute the cluster analysis: +my.PCA <- list() +my.cluster <- list() +my.cluster.array <- list() +tot.variance <- list() +n.pcs <- my.cluster <- c() + +for(p in period){ + ## Select only the data of the month/season we want: + + ## pslPeriod <- psleuFull[days.period[[p]],,] # select only days in the chosen period (i.e: winter) + if(running.cluster) { + if(subdaily){ + my.hours <- sort(c(pos.month.extended(1,p)*4-3, pos.month.extended(1,p)*4-2, pos.month.extended(1,p)*4-1, pos.month.extended(1,p)*4)) + if(p == 1) my.hours <- c(1337:1460,1:236) + if(p == 12) my.hours <- c(1217:1460,1:124) + + pslPeriod <- psleuFull[,my.hours,,] # select all days in the period of 3 months centered on the target month p + + } else { + pslPeriod <- psleuFull[1,1,,pos.month.extended(1,p),,] # select all days in the period of 3 months centered on the target month p + } + + } + + if(running.15) pslPeriod <- psleuFull[1,1,,pos.month.extended15(1,p),,] + + ## if there isn't any kind of running clustering: + if(!running.cluster && !running.15) { + if(subdaily){ + my.hours <- sort(c(pos.month(1,p)*4-3, pos.month(1,p)*4-2, pos.month(1,p)*4-1, pos.month(1,p)*4)) + pslPeriod <- psleuFull[,my.hours,,] + } else { + pslPeriod <- psleuFull[1,1,,pos.period(1,p),,] # select only days in the chosen period (i.e: winter) + } + } + + ## weight the pressure fields based on latitude (apply it to anomalies, not to absolute values!): + if(lat.weighting){ + lat.weighted <- cos(lat*pi/180)^0.5 + if(!running.cluster && !running.15) lat.weighted.array <- InsertDim(InsertDim(InsertDim(lat.weighted,2,n.pos.lon), 1, length(pos.period(2001,p))), 1, n.years) + if(running.cluster) lat.weighted.array <- InsertDim(InsertDim(InsertDim(lat.weighted,2,n.pos.lon), 1, length(pos.month.extended(2001,p))), 1, n.years) + if(running.15) lat.weighted.array <- InsertDim(InsertDim(InsertDim(lat.weighted,2,n.pos.lon), 1, length(pos.month.extended15(2001,p))), 1, n.years) + + pslPeriod <- pslPeriod * lat.weighted.array + + rm(lat.weighted.array) + gc() + } + + gc() + cat("Preformatting data for clustering. Please wait...\n") + psl.melted <- melt(pslPeriod[,,,, drop=FALSE], varnames=c("Year","Day","Lat","Lon")) + gc() + + cat("Preformatting data for clustering. Please wait......\n") + + ## this function eats much memory!!! + psl.kmeans <- unname(acast(psl.melted, Year + Day ~ Lat + Lon)) + gc() + + + # compute the clustering: + cat("Clustering data. Please wait.........\n") + if(PCA){ + my.seq <- seq(1, n.pos.lat*n.pos.lon, 16) # select only 1 point of 9 + pslcut <- psl.kmeans[,my.seq] + + my.PCA <- princomp(pslcut,cor=FALSE) + tot.variance <- head(cumsum(my.PCA$sdev^2 / sum(my.PCA$sdev^2)),50) # check how many PCAs to keep basing on the sum of their expl. variance + n.pcs <- head(as.numeric(which(tot.variance > variance.explained)),1) # select only the pcs that explains at least 80% of variance + my.cluster <- kmeans(my.PCA$scores[,1:n.pcs], centers=4, iter.max=100, nstart=30) # 4 is the number of clusters + rm(pslcut) + + } else { # 4 is the number of clusters: + + my.cluster <- kmeans(psl.kmeans, centers=4, iter.max=100, nstart=30, trace=FALSE) + gc() + + } + + rm(psl.kmeans) + gc() + + cat("Extracting monthly regimes. Please wait............\n") + + if(running.cluster && p < 13){ + ## select only the days inside the 3-months cluster series that belong only to the target month p: + n.days.period <- length(pos.month.extended(1,p)) # total number of days in the three months + + days.month <- days.month.new <- days.month.full <- c() + days.month <- which(pos.month.extended(1,p) == pos.month(1,p)[1]) - 1 + 1:length(pos.month(1,p)) + + for(y in 1:n.years){ + days.month.new <- days.month + n.days.period*(y-1) + days.month.full <- c(days.month.full, days.month.new) + ## print(days.month.new) + ## print(days.month) + } + + ## in this case, we are not selecting days but 6-hourly intervals: + if(subdaily) days.month.full <- sort(c(days.month.full*4, days.month.full*4+1, days.month.full*4+2, days.month.full*4+3)) + + ## select only the days of the cluster series inside the target month p: + cluster.sequence <- my.cluster$cluster[days.month.full] + + } + + if(running.15 && p < 13){ + ## select only the days inside the 3-months cluster series that belong only to the target month p: + n.days.period <- length(pos.month.extended15(1,p)) # total number of days in the three months + + days.month <- days.month.new <- days.month.full <- c() + days.month <- which(pos.month.extended15(1,p) == pos.month(1,p)[1]) - 1 + 1:length(pos.month(1,p)) + + for(y in 1:n.years){ + days.month.new <- days.month + n.days.period*(y-1) + days.month.full <- c(days.month.full, days.month.new) + ##print(days.month.new) + ##print(days.month) + } + + ## select only the days of the cluster series inside the target month p: + cluster.sequence <- my.cluster$cluster[days.month.full] + + } + + if(!running.cluster && !running.15) cluster.sequence <- my.cluster$cluster + + ## convert cluster sequence from 6-hourly to daily: + if(subdaily){ + type <- c() + + for(day in 1:(length(cluster.sequence)/4)){ + #day <- ceiling(hour/4) + hourly <- cluster.sequence[(1+(day-1)*4):(4+(day-1)*4)] + + t1 <- length(which(hourly == 1)) + t2 <- length(which(hourly == 2)) + t3 <- length(which(hourly == 3)) + t4 <- length(which(hourly == 4)) + tt <- c(t1,t2,t3,t4) + + ## if all the 4 time steps belong to the same regime, assign it to this day: + if(length(unique(hourly)) == 1) type[day] <- hourly[1] + + ## if there are two different regimes, check if one has a higher frequency: + if(length(unique(hourly)) == 2){ + if(any(tt == 3)){ # if 3 of the 4 time intervals belong to the same weather regime, assign this day to it + type[day] <- which(tt == 3) + } else { # in this case both regimes occur in 2 of the 4 time steps; arbitrary assign the regime occurring at 12.00 of that day + type[day] <- hourly[3] + } + } + + ## if there are three different regimes, assign it to the only possible regime with 2 time steps in that day: + if(length(unique(hourly)) == 3) type[day] <- which(tt == 2) + + ## if there are four different regimes (a very rare event!), assign it to the regime occurring at 12.00 of that day: + if(length(unique(hourly)) == 3) type[day] <- hourly[3] + + } # close for on day + + } # close for on subdaily + + if(sequences){ # it works only for rean data!!! + ## for each of the 4 clusters, remove the days not belonging to sequences of at least 5 days: + ## warning: first 4 days and last 4 days are not take into account y the algorithm and are treated as not belonging to any sequence (sequ=FALSE) + wrdiff <- diff(my.cluster$cluster) + + sequ <- rep(FALSE, n.days.period[[p]]) + for(i in 5:(n.days.period[[p]]-5)){ + sequ[i] <- TRUE + if(wrdiff[i] != 0 & wrdiff[i+1] != 0) sequ[i] = FALSE + if(wrdiff[i] != 0 & wrdiff[i+2] != 0) sequ[i] = FALSE + if(wrdiff[i] != 0 & wrdiff[i+3] != 0) sequ[i] = FALSE + if(wrdiff[i] != 0 & wrdiff[i+4] != 0) sequ[i] = FALSE + if(wrdiff[i-1] != 0 & wrdiff[i] != 0) sequ[i] = FALSE + if(wrdiff[i-1] != 0 & wrdiff[i+1] != 0) sequ[i] = FALSE + if(wrdiff[i-1] != 0 & wrdiff[i+2] != 0) sequ[i] = FALSE + if(wrdiff[i-1] != 0 & wrdiff[i+3] != 0) sequ[i] = FALSE + if(wrdiff[i-2] != 0 & wrdiff[i] != 0) sequ[i] = FALSE + if(wrdiff[i-2] != 0 & wrdiff[i+1] != 0) sequ[i] = FALSE + if(wrdiff[i-2] != 0 & wrdiff[i+2] != 0) sequ[i] = FALSE + if(wrdiff[i-3] != 0 & wrdiff[i] != 0) sequ[i] = FALSE + if(wrdiff[i-3] != 0 & wrdiff[i+1] != 0) sequ[i] = FALSE + if(wrdiff[i-4] != 0 & wrdiff[i] != 0) sequ[i] = FALSE + } # close for loop on i + + ## sequence of daily cluster numbers without the days that doesn't belong to sequences of 5 or more days: + cluster.sequence <- my.cluster$cluster * sequ + rm(wrdiff, sequ) + } # close if on frequencies + + + ## Replace the days belonging to a regime with the days belonging to a sequence of at least 5 days of that regime: + wr1 <- which(cluster.sequence == 1) + wr2 <- which(cluster.sequence == 2) + wr3 <- which(cluster.sequence == 3) + wr4 <- which(cluster.sequence == 4) + + ## yearly frequency of each regime: + wr1y <- wr2y <- wr3y <-wr4y <- c() + + mod.subdaily <- ifelse(subdaily,4,1) + np <- n.days.in.a.period(p,1)*mod.subdaily + + for(y in year.start:year.end){ + ## for both reanalysis and S4, the time order is always: year1day1, year1day2, ..., year1day31, year2day1, year2day2, ..., year2day28, etc, + wr1y[y-year.start+1] <- length(which(cluster.sequence[(y-year.start)*np + (1:np)] == 1)) + wr2y[y-year.start+1] <- length(which(cluster.sequence[(y-year.start)*np + (1:np)] == 2)) + wr3y[y-year.start+1] <- length(which(cluster.sequence[(y-year.start)*np + (1:np)] == 3)) + wr4y[y-year.start+1] <- length(which(cluster.sequence[(y-year.start)*np + (1:np)] == 4)) + } + + ## convert to frequencies in %: + wr1y <- wr1y/np + wr2y <- wr2y/np + wr3y <- wr3y/np + wr4y <- wr4y/np + + gc() + + cat("Extracting monthly regimes structure. Please wait...............\n") + + ## measure regime anomalies: + pslmat <- unname(acast(psl.melted, Year + Day ~ Lat ~ Lon)) + + if(running.cluster || running.15){ + pslmat.new <- pslmat + pslmat <- pslmat.new[days.month.full,,] + rm(pslmat.new) + gc() + } + + + pslwr1 <- pslmat[wr1,,, drop=F] + pslwr2 <- pslmat[wr2,,, drop=F] + pslwr3 <- pslmat[wr3,,, drop=F] + pslwr4 <- pslmat[wr4,,, drop=F] + + ## regime structure: + pslwr1mean <- apply(pslwr1, c(2,3), mean, na.rm=T) + pslwr2mean <- apply(pslwr2, c(2,3), mean, na.rm=T) + pslwr3mean <- apply(pslwr3, c(2,3), mean, na.rm=T) + pslwr4mean <- apply(pslwr4, c(2,3), mean, na.rm=T) + + ## spatial mean for each day to save for the measure of the FairRPSS: + pslwr1spmean <- apply(pslwr1, 1, mean, na.rm=T) + pslwr2spmean <- apply(pslwr2, 1, mean, na.rm=T) + pslwr3spmean <- apply(pslwr3, 1, mean, na.rm=T) + pslwr4spmean <- apply(pslwr4, 1, mean, na.rm=T) + + cat("Saving clusters. Please wait..................\n") + + ## save all the data necessary to redraw the graphs when we know the right regime: + save(my.cluster, cluster.sequence, lon, lon.max, lon.min, lat, lat.max, lat.min, pos.lat, pos.lon, n.pos.lat, n.pos.lon, psl, psl.name, p, workdir, rean.name, year.start, year.end, n.years, period, pslwr1mean, pslwr2mean, pslwr3mean, pslwr4mean, pslwr1spmean, pslwr2spmean, pslwr3spmean, pslwr4spmean, wr1y,wr2y,wr3y,wr4y,wr1,wr2,wr3,wr4, LOESS, running.cluster, running.15, lat.weighting, file=paste0(workdir,"/",rean.name,"_",my.period[p],"_psl.RData")) + + + # cat("Finished!\n") +} # close the for loop on 'p' + + +rm(psleuFull, psl.melted, pslmat, pslPeriod, pslPeriodClimLoess) +gc() + +################################################################################ +# Measure impact maps # +################################################################################ +cat("Measuring impact. Please wait.....................\n") + +# Load var data: +info.period <- period[1] # period used to get the variables n.years, lat.min, lat.max, lon.min y lon.max +period.old <- period + +vareuFull366 <- Load(var = var.name[var.num], exp = NULL, obs = list(list(path=rean)), paste0(year.start:year.end,'0101'), storefreq = 'daily', leadtimemax = 366, output = 'lonlat', latmin = lat.min, latmax = lat.max, lonmin = lon.min, lonmax = lon.max, nprocs=1)$obs + +vareuFull <-array(NA,c(1,1,n.years,365, dim(vareuFull366)[5], dim(vareuFull366)[6])) + +## remove bisestile days to have all arrays with the same dimensions: +cat("Removing bisestile days. Please wait...\n") +vareuFull[,,,,,] <- vareuFull366[,,,1:365,,] + +for(y in year.start:year.end){ + y <- y - year.start + 1 + if(n.days.in.a.year(y) == 366) vareuFull[,,y,60:365,,] <- vareuFull366[,,y,61:366,,] # take the march to december period removing the 29th of February +} + +rm(vareuFull366) +gc() + +## convert psl in daily anomalies with the LOESS filter: +cat("Calculating anomalies. Please wait...\n") +varPeriodClim <- apply(vareuFull, c(1,2,4,5,6), mean, na.rm=T) + +varPeriodClimLoess <- varPeriodClim +for(i in n.pos.lat){ + for(j in n.pos.lon){ + my.data <- data.frame(ens.mean=varPeriodClim[1,1,,i,j], day=1:365) + my.loess <- loess(ens.mean ~ day, my.data, span=0.35) + varPeriodClimLoess[1,1,,i,j] <- predict(my.loess) + } +} + +rm(varPeriodClim) +gc() + +varPeriodClim2 <- InsertDim(varPeriodClimLoess, 3, n.years) + +vareuFull <- vareuFull - varPeriodClim2 +##rm(varPeriodClim2) +gc() + +for(p in period){ + ##p=1 # for the debug + + ## load regime data for chosen month: + varPeriod <- vareuFull[1,1,,pos.period(1,p),,] # select only var data during the chosen period + varPeriodClim <- varPeriodClim2[1,1,,pos.period(1,p),,] + varPeriodRel <- varPeriod / varPeriodClim + + load(file=paste0(workdir,"/",rean.name,"_",my.period[p],"_psl.RData")) + + cluster.sequence <- my.cluster$cluster # old syntax: my.cluster[[p]]$cluster + + seasonal.data <- ifelse(length(my.cluster$cluster)/n.years > 33, TRUE, FALSE) # if there are more than 33 days, it means that we are loading sequences of 3 months + if(seasonal.data == TRUE && p < 13){ # select only the days of the 3-months cluster series inside the target month p + n.days.period <- length(pos.month.extended(2001,p)) # total number of days in the three months + + days.month <- days.month.new <- days.month.full <- c() + days.month <- which(pos.month.extended(2001,p) == pos.month(2001,p)[1])-1 + 1:length(pos.month(2001,p)) + + for(y in 1:n.years){ + days.month.new <- days.month + n.days.period*(y-1) + days.month.full <- c(days.month.full, days.month.new) + } + + cluster.sequence <- my.cluster$cluster[days.month.full] # select only the days of the cluster series inside the target month p + } + + wr1 <- which(cluster.sequence == 1) + wr2 <- which(cluster.sequence == 2) + wr3 <- which(cluster.sequence == 3) + wr4 <- which(cluster.sequence == 4) + + gc() + + cat("Formatting var data. Please wait...\n") + var.melted <- melt(varPeriod[,,,, drop=FALSE], varnames=c("Year","Day","Lat","Lon")) + var.meltedRel <- melt(varPeriodRel[,,,, drop=FALSE], varnames=c("Year","Day","Lat","Lon")) + gc() + + cat("Formatting var data. Please wait......\n") + varmat <- unname(acast(var.melted, Year + Day ~ Lat ~ Lon)) + varmatRel <- unname(acast(var.meltedRel, Year + Day ~ Lat ~ Lon)) + + varwr1 <- varmat[wr1,,,drop=F] + varwr2 <- varmat[wr2,,,drop=F] + varwr3 <- varmat[wr3,,,drop=F] + varwr4 <- varmat[wr4,,,drop=F] + + varwr1Rel <- varmatRel[wr1,,,drop=F] + varwr2Rel <- varmatRel[wr2,,,drop=F] + varwr3Rel <- varmatRel[wr3,,,drop=F] + varwr4Rel <- varmatRel[wr4,,,drop=F] + + + varwr1mean <- apply(varwr1,c(2,3),mean,na.rm=T) + varwr2mean <- apply(varwr2,c(2,3),mean,na.rm=T) + varwr3mean <- apply(varwr3,c(2,3),mean,na.rm=T) + varwr4mean <- apply(varwr4,c(2,3),mean,na.rm=T) + + varwr1meanRel <- apply(varwr1Rel,c(2,3),mean,na.rm=T) + varwr2meanRel <- apply(varwr2Rel,c(2,3),mean,na.rm=T) + varwr3meanRel <- apply(varwr3Rel,c(2,3),mean,na.rm=T) + varwr4meanRel <- apply(varwr4Rel,c(2,3),mean,na.rm=T) + + n.datos <- n.years * n.days.in.a.period(p,2001) + + varwrBoth1 <- abind(varmat, varwr1, along = 1) + pvalue1 <- apply(varwrBoth1, c(2,3), function(x) t.test(x[1:n.datos],x[(n.datos+1):length(x)])$p.value) + rm(varwrBoth1, varwr1) + gc() + + varwrBoth2 <- abind(varmat, varwr2, along = 1) + pvalue2 <- apply(varwrBoth2, c(2,3), function(x) t.test(x[1:n.datos],x[(n.datos+1):length(x)])$p.value) + rm(varwrBoth2, varwr2) + gc() + + varwrBoth3 <- abind(varmat, varwr3, along = 1) + pvalue3 <- apply(varwrBoth3, c(2,3), function(x) t.test(x[1:n.datos],x[(n.datos+1):length(x)])$p.value) + rm(varwrBoth3, varwr3) + gc() + + varwrBoth4 <- abind(varmat, varwr4, along = 1) + pvalue4 <- apply(varwrBoth4, c(2,3), function(x) t.test(x[1:n.datos],x[(n.datos+1):length(x)])$p.value) + rm(varwrBoth4, varwr4) + + rm(varmat, varmatRel) + gc() + + ## save all the data necessary to redraw the graphs when we know the right regime: + save(varwr1mean, varwr2mean, varwr3mean,varwr4mean, pvalue1, pvalue2, pvalue3, pvalue4, varwr1meanRel, varwr2meanRel, varwr3meanRel,varwr4meanRel, file=paste0(workdir,"/",rean.name,"_",my.period[p],"_",var.name[var.num],".RData")) + + rm(pvalue1,pvalue2,pvalue3,pvalue4) + rm(varwr1mean,varwr2mean,varwr3mean,varwr4mean) + gc() + +} # close p on period + +rm(vareuFull, varPeriodClim2, var.melted, var.meltedRel, varPeriod, varPeriodClim, varPeriodRel, varPeriodClimLoess) +gc() + +####################################################################################################### +# Order clusters for explained variance # +####################################################################################################### + +### order clusters for variance ( as in composition == "variance") and save them with '_ClusterNames' suffix: +cat("Ordening clusters for variance. Please wait........................\n") + +ordering <- TRUE +save.names <- TRUE +as.pdf <- FALSE + +for(p in period){ + ##p <- period[1] # for the debug + p.orig <- p + + print(paste("p =",p)) + + ## load regime data + impact.data <- FALSE + + load(file=paste0(workdir,"/",rean.name,"_",my.period[p],"_","psl",".RData")) + + ## load impact data only if it is available, if not it will leave an empty space in the composition: + if(file.exists(paste0(workdir,"/",rean.name,"_",my.period[p],"_",var.name[var.num],".RData"))){ + impact.data <- TRUE + print(paste0("Impact data for variable ",var.name[var.num] ," available for reanalysis ", rean.name)) + + load(file=paste0(workdir,"/",rean.name,"_",my.period[p],"_",var.name[var.num],".RData")) + + } else { + impact.data <- FALSE + print(paste0("Impact data for variable ",var.name[var.num] ," not available for reanalysis ", rean.name)) + } + + + my.cluster2 <- my.cluster # create a copy of my.cluster + + ss1 <- which(my.cluster$cluster == 1) + ss2 <- which(my.cluster$cluster == 2) + ss3 <- which(my.cluster$cluster == 3) + ss4 <- which(my.cluster$cluster == 4) + + withinss <- my.cluster$withinss + max1 <- which(my.cluster$withinss == max(withinss, na.rm=TRUE)) # first cluster with maximum variance + withinss[max1] <- NA + + max2 <- which(my.cluster$withinss == max(withinss, na.rm=TRUE)) # second cluster with maximum variance + withinss[max2] <- NA + + max3 <- which(my.cluster$withinss == max(withinss, na.rm=TRUE)) # third cluster with maximum variance + withinss[max3] <- NA + + max4 <- which(!is.na(withinss)) + rm(withinss) + + ## vector where the first element tells you which is the clister with the maximum variance the second element shows which is the cluster the + ## second maximum variance, and so on: + max.seq <- c(max1, max2, max3, max4) + + assign(paste0("cluster",max1,".name"), orden[1]) # associate the cluster with the highest explained variance to the first regime to plot (usually NAO+) + assign(paste0("cluster",max2,".name"), orden[2]) + assign(paste0("cluster",max3,".name"), orden[3]) + assign(paste0("cluster",max4,".name"), orden[4]) + + save(cluster1.name, cluster2.name, cluster3.name, cluster4.name, file=paste0(workdir,"/",rean.name,"_", my.period[p],"_","ClusterNames",".RData")) + +} + + +######################################################################################### +# Create maps # +######################################################################################### + +## Compute climatology and anomalies (with LOESS filter) for sfcWind data, and draw monthly anomalies, if you want to compare the mean daily wind speed anomalies reconstructed by the weather regimes classification with those observed by the reanalysis: + +load(paste0(workdir,"/",rean.name,"_",my.period[period],"_psl.RData")) # only to load year.start and year.end + +sfcWind366 <- Load(var = "sfcWind", exp = NULL, obs = list(list(path=rean)), paste0(year.start:year.end,'0101'), storefreq = 'daily', leadtimemax = 366, output = 'lonlat', latmin = lat.min, latmax = lat.max, lonmin = lon.min, lonmax = lon.max, nprocs=1)$obs + +sfcWind <- sfcWind366[,,,1:365,,] + +for(y in year.start:year.end){ + y2 <- y - year.start + 1 + if(n.days.in.a.year(y) == 366) sfcWind[y2,60:365,,] <- sfcWind366[1,1,y2,61:366,,] # take the march to december period removing the 29th of February +} + +rm(sfcWind366) +gc() + +## LOESS anomalies: +sfcWindClimDaily <- apply(sfcWind, c(2,3,4), mean, na.rm=T) + +sfcWindClimLoess <- sfcWindClimDaily +for(i in n.pos.lat){ + for(j in n.pos.lon){ + my.data <- data.frame(ens.mean=sfcWindClimDaily[,i,j], day=1:365) + my.loess <- loess(ens.mean ~ day, my.data, span=0.35) + sfcWindClimLoess[,i,j] <- predict(my.loess) + } +} + +rm(sfcWindClimDaily) +gc() + +sfcWindClim2 <- InsertDim(sfcWindClimLoess, 1, n.years) +sfcWindAnom <- sfcWind - sfcWindClim2 +sfcWindAnomRel <- sfcWindAnom / sfcWindClim2 + +rm(sfcWindClimLoess) +rm(sfcWind) +gc() + +## same as above but for computing climatology and anomalies (with LOESS filter) for psl data, and draw monthly slp anomalies: +slp366 <- Load(var = psl, exp = NULL, obs = list(list(path=rean)), paste0(year.start:year.end,'0101'), storefreq = 'daily', leadtimemax = 366, output = 'lonlat', latmin = lat.min, latmax = lat.max, lonmin = lon.min, lonmax = lon.max, nprocs=1)$obs + +slp <- slp366[,,,1:365,,] + +for(y in year.start:year.end){ + y2 <- y - year.start + 1 + if(n.days.in.a.year(y) == 366) slp[y2,60:365,,] <- slp366[1,1,y2,61:366,,] # take the march to december period removing the 29th of February +} + +rm(slp366) +gc() + +slpClimDaily <- apply(slp, c(2,3,4), mean, na.rm=T) + +slpClimLoess <- slpClimDaily +for(i in n.pos.lat){ + for(j in n.pos.lon){ + my.data <- data.frame(ens.mean=slpClimDaily[,i,j], day=1:365) + my.loess <- loess(ens.mean ~ day, my.data, span=0.35) + slpClimLoess[,i,j] <- predict(my.loess) + } +} + +rm(slpClimDaily) + +gc() + +slpClim2 <- InsertDim(slpClimLoess, 1, n.years) +slpAnom <- slp - slpClim2 + +rm(slpClimLoess) +rm(slp) +gc() + +if(psl == "psl") slpAnom <- slpAnom/100 # convert MSLP in Pascal to MSLP in hPa + +EU <- c(1:which(lon >= lon.max)[1], (which(lon >= 337)[1]:length(lon))) # restrict area to continental europe only + +## color scale for impact maps: +## my.brks.var <- c(-20,seq(-3,3,0.5),20) +my.brks.var <- c(-20,seq(-0.6,0.6,0.1),20) +my.cols.var <- colorRampPalette(rev(brewer.pal(11,"RdBu")))(length(my.brks.var)-1) # blue--white--red colors + +## same but for slp: +my.brks.var2 <- c(-100,seq(-21,-1,2),0,seq(1,21,2),100) +my.cols.var2 <- colorRampPalette(rev(brewer.pal(11,"RdBu")))(length(my.brks.var2)-1) # blue--red colors +my.cols.var2[floor(length(my.cols.var2)/2)] <- my.cols.var2[floor(length(my.cols.var2)/2)+1] <- "white" + +## save all monthly anomaly maps: +for(year.test in year.end){ + for(month.test in period){ + ##year.test <- 2016; month.test <- 10 # for the debug + + pos.year.test <- year.test - year.start + 1 + + ## wind anomaly for the chosen year and month: + sfcWindAnomPeriod <- sfcWindAnom[pos.year.test, pos.period(1,month.test),,] + sfcWindAnomPeriodRel <- sfcWindAnomRel[pos.year.test, pos.period(1,month.test),,] + + sfcWindAnomPeriodMean <- apply(sfcWindAnomPeriodRel, c(2,3), mean, na.rm=TRUE) + + ## psl anomaly for the chosen year and month: + slpAnomPeriod <- slpAnom[pos.year.test,pos.period(1,month.test),,] + + slpAnomPeriodMean <- apply(slpAnomPeriod, c(2,3), mean, na.rm=TRUE) + + fileoutput <- paste0(workdir,"/",rean.name,"_",my.period[month.test],"_monthly_anomaly.png") + + png(filename=fileoutput,width=2800,height=1000) + + ## reset par to its default values, because drawing with PlotEquiMap() alters some par values: + if(year.test == year.end && month.test == period) { op <- par(no.readonly = TRUE) } + + par(fig=c(0, 0.36, 0.08, 0.98), new=TRUE) + + PlotEquiMap(sfcWindAnomPeriodMean[,EU], lon[EU], lat, filled.continents=FALSE, brks=my.brks.var, cols=my.cols.var, title_scale=1.2, intxlon=10, intylat=10, drawleg=F) #, cex.lab=1.5) + + par(fig=c(0.03, 0.36, 0.00, 0.1), new=TRUE) + + ColorBar(brks=round(100*my.brks.var[2:(l(my.brks.var)-1)],0), cols=my.cols.var[2:(l(my.cols.var)-1)], vert=FALSE, label_scale=3, bar_limits=c(100*my.brks.var[2],100*my.brks.var[l(my.brks.var)-1]), col_inf=my.cols.var[1], col_sup=my.cols.var[l(my.cols.var)], subsample=2) + + par(fig=c(0.34, 0.37, 0, 0.028), new=TRUE) + mtext("%", cex=1.8) + + par(fig=c(0.37, 1, 0.1, 0.98), new=TRUE) + + PlotEquiMap2(slpAnomPeriodMean, lon, lat, filled.continents=FALSE, continents.col="black", brks=my.brks.var2, brks2=my.brks.var2, cols=my.cols.var2, intxlon=10, intylat=10, drawleg=F, contours=t(slpAnomPeriodMean), contours.lty="F1FF1F", cex.lab=1) + + par(op) # reset par parameters + par(fig=c(0.37, 1, 0, 0.1), new=TRUE) + ##ColorBar2(my.brks.var2, cols=my.cols.var2, vert=FALSE, my.ticks=-0.5 + 1:length(my.brks.var2), my.labels=my.brks.var2) #, subsampleg=1, label_scale=1.8) #, subset=my.subset2) + ColorBar(brks=my.brks.var2[2:(l(my.brks.var2)-1)], cols=my.cols.var2[2:(l(my.cols.var2)-1)], vert=FALSE, label_scale=3, bar_limits=c(my.brks.var2[2],my.brks.var2[l(my.brks.var2)-1]), col_inf=my.cols.var2[1], col_sup=my.cols.var2[l(my.cols.var2)], subsample=1) #my.ticks=-0.5 + 1:length(my.brks.var2), my.labels=my.brks.var2) subset=my.subset2) + + par(fig=c(0.96, 0.99, 0, 0.027), new=TRUE) + mtext("hPa", cex=1.8) + + dev.off() + + ## same image formatted for the catalogue: + fileoutput2 <- paste0(workdir,"/",rean.name,"_",my.period[month.test],"_monthly_anomaly_fig2cat.png") + + system(paste0("~/scripts/fig2catalog.sh -s 0.8 -t '",rean.name," / 10m wind speed and sea level pressure / Monthly anomalies \n",month.name[month.test]," / ",year.test,"' -c 'Region: North Atlantic (27°N-81°N, 85.5°W-45°E)\nReference dataset: ",rean.name," reanalysis' ", fileoutput," ", fileoutput2)) + + ## mean wind speed and slp anomaly only during days of the chosen year: + load(paste0(workdir,"/",rean.name,"_",my.period[month.test],"_psl.RData")) # load cluster.sequence + + ## arrange the regime sequence in an array, to separate months from years: + cluster2D <- array(cluster.sequence, c(n.days.in.a.period(month.test,1),n.years)) + ##cluster2D <- array(my.cluster$cluster, c(n.days.in.a.period(p,1),n.years)) # only for NCEP + + cluster.test <- cluster2D[,pos.year.test] + + fre1.days <- length(which(cluster.test == 1)) + fre2.days <- length(which(cluster.test == 2)) + fre3.days <- length(which(cluster.test == 3)) + fre4.days <- length(which(cluster.test == 4)) + + ## wind anomaly for the chosen year and month and cluster: + sfcWind1 <- sfcWindAnomPeriodRel[which(cluster.test == 1),,,drop=FALSE] + sfcWind2 <- sfcWindAnomPeriodRel[which(cluster.test == 2),,,drop=FALSE] + sfcWind3 <- sfcWindAnomPeriodRel[which(cluster.test == 3),,,drop=FALSE] + sfcWind4 <- sfcWindAnomPeriodRel[which(cluster.test == 4),,,drop=FALSE] + + ## in case a regime has NO days in that month, we must set it to NA: + if(length(which(cluster.test == 1)) == 0 ) sfcWind1 <- array(NA, c(1,dim(sfcWindAnomPeriod)[2:3])) + if(length(which(cluster.test == 2)) == 0 ) sfcWind2 <- array(NA, c(1,dim(sfcWindAnomPeriod)[2:3])) + if(length(which(cluster.test == 3)) == 0 ) sfcWind3 <- array(NA, c(1,dim(sfcWindAnomPeriod)[2:3])) + if(length(which(cluster.test == 4)) == 0 ) sfcWind4 <- array(NA, c(1,dim(sfcWindAnomPeriod)[2:3])) + + sfcWindMean1 <- apply(sfcWind1, c(2,3), mean, na.rm=FALSE) + sfcWindMean2 <- apply(sfcWind2, c(2,3), mean, na.rm=FALSE) + sfcWindMean3 <- apply(sfcWind3, c(2,3), mean, na.rm=FALSE) + sfcWindMean4 <- apply(sfcWind4, c(2,3), mean, na.rm=FALSE) + + ## load regime names: + load(paste0(workdir,"/",rean.name,"_", my.period[p],"_","ClusterNames",".RData")) + + ## add strip with daily sequence of WRs: + + mod.name1 <- substr(cluster1.name, nchar(cluster1.name), nchar(cluster1.name)) + mod.name2 <- substr(cluster2.name, nchar(cluster2.name), nchar(cluster2.name)) + mod.name3 <- substr(cluster3.name, nchar(cluster3.name), nchar(cluster3.name)) + mod.name4 <- substr(cluster4.name, nchar(cluster4.name), nchar(cluster4.name)) + + cluster1.name.short <- substr(cluster1.name,1,1) + cluster2.name.short <- substr(cluster2.name,1,1) + cluster3.name.short <- substr(cluster3.name,1,1) + cluster4.name.short <- substr(cluster4.name,1,1) + + ## add + or - at the end of the cluster name, if it is a NAO+ or NAO- regime: + if(mod.name1 == "+" || mod.name1 == "-") cluster1.name.short <- paste0(substr(cluster1.name,1,1), mod.name1) + if(mod.name2 == "+" || mod.name2 == "-") cluster2.name.short <- paste0(substr(cluster2.name,1,1), mod.name2) + if(mod.name3 == "+" || mod.name3 == "-") cluster3.name.short <- paste0(substr(cluster3.name,1,1), mod.name3) + if(mod.name4 == "+" || mod.name4 == "-") cluster4.name.short <- paste0(substr(cluster4.name,1,1), mod.name4) + + c1 <- which(cluster.test == 1) + c2 <- which(cluster.test == 2) + c3 <- which(cluster.test == 3) + c4 <- which(cluster.test == 4) + + cluster.test.letters <- cluster.test + cluster.test.letters[c1] <- cluster1.name.short + cluster.test.letters[c2] <- cluster2.name.short + cluster.test.letters[c3] <- cluster3.name.short + cluster.test.letters[c4] <- cluster4.name.short + + my.strip <- cluster.test.letters + + if(no.regimes) { + cluster.test.letters2 <- cluster.test.letters + cluster.test.letters2[which(cluster.test.letters == "N+")] <- "C1" + cluster.test.letters2[which(cluster.test.letters == "N-")] <- "C2" + cluster.test.letters2[which(cluster.test.letters == "B")] <- "C3" + cluster.test.letters2[which(cluster.test.letters == "A")] <- "C4" + my.strip <- cluster.test.letters2 + } + + cluster.col <- cluster.test.letters + cluster.col[which(cluster.test.letters == "N+")] <- "Firebrick1" + cluster.col[which(cluster.test.letters == "N-")] <- "Dodgerblue1" + cluster.col[which(cluster.test.letters == "B")] <- "White" + cluster.col[which(cluster.test.letters == "A")] <- "Darkgoldenrod1" + + cluster1 <- which(orden == cluster1.name) + cluster2 <- which(orden == cluster2.name) + cluster3 <- which(orden == cluster3.name) + cluster4 <- which(orden == cluster4.name) + + assign(paste0("fre.days",cluster1), fre1.days) + assign(paste0("fre.days",cluster2), fre2.days) + assign(paste0("fre.days",cluster3), fre3.days) + assign(paste0("fre.days",cluster4), fre4.days) + + assign(paste0("fre",cluster1), wr1y) + assign(paste0("fre",cluster2), wr2y) + assign(paste0("fre",cluster3), wr3y) + assign(paste0("fre",cluster4), wr4y) + + assign(paste0("imp.test",cluster1), sfcWindMean1) + assign(paste0("imp.test",cluster2), sfcWindMean2) + assign(paste0("imp.test",cluster3), sfcWindMean3) + assign(paste0("imp.test",cluster4), sfcWindMean4) + + ## psl anomaly for the chosen year and month and cluster: + slp1 <- slpAnomPeriod[which(cluster.test == 1),,,drop=FALSE] + slp2 <- slpAnomPeriod[which(cluster.test == 2),,,drop=FALSE] + slp3 <- slpAnomPeriod[which(cluster.test == 3),,,drop=FALSE] + slp4 <- slpAnomPeriod[which(cluster.test == 4),,,drop=FALSE] + + ## in case a regime has NO days in that month, we must set it to NA: + if(length(which(cluster.test == 1)) == 0 ) slp1 <- array(NA, c(1,dim(slpAnomPeriod)[2:3])) + if(length(which(cluster.test == 2)) == 0 ) slp2 <- array(NA, c(1,dim(slpAnomPeriod)[2:3])) + if(length(which(cluster.test == 3)) == 0 ) slp3 <- array(NA, c(1,dim(slpAnomPeriod)[2:3])) + if(length(which(cluster.test == 4)) == 0 ) slp4 <- array(NA, c(1,dim(slpAnomPeriod)[2:3])) + + slpMean1 <- apply(slp1, c(2,3), mean, na.rm=FALSE) + slpMean2 <- apply(slp2, c(2,3), mean, na.rm=FALSE) + slpMean3 <- apply(slp3, c(2,3), mean, na.rm=FALSE) + slpMean4 <- apply(slp4, c(2,3), mean, na.rm=FALSE) + + assign(paste0("psl.test",cluster1), slpMean1) + assign(paste0("psl.test",cluster2), slpMean2) + assign(paste0("psl.test",cluster3), slpMean3) + assign(paste0("psl.test",cluster4), slpMean4) + + ## save strip with the daily regime series for chosen month and year: + fileoutput.seq <- paste0(workdir,"/",rean.name,"_",my.period[month.test],"_regimes_sequence.png") + png(filename=fileoutput.seq,width=1500,height=1850) + + plot.new() + + sep <- 0.03 + for(day in 1: n.days.in.a.period(p, 2001)){ + sep.cum <- (day-1)*sep + polygon(c(sep.cum + 0.01, sep.cum + 0.01 + sep, sep.cum + 0.01 + sep, sep.cum + 0.01), c(1.01, 1.01, 1.01+sep, 1.01+sep), border="black", col=cluster.col[day]) + text(sep.cum + 0.01 + sep/2, 0.997 + sep + 0.005, labels=day, cex=1.5) + text(sep.cum + 0.01 + sep/2, 1.013 + 0.005, labels=my.strip[day], cex=2) + } + + dev.off() + + ## save average impact and sea level pressure only for chosen month and year: + fileoutput.test <- paste0(workdir,"/",rean.name,"_",my.period[month.test],"_monthly_anomaly_regimes.png") + + png(filename=fileoutput.test,width=1500,height=2000) + + plot.new() + + par(fig=c(0, 0.33, 0.77, 0.97), new=TRUE) + PlotEquiMap2(imp.test1[,EU], lon[EU], lat, filled.continents=FALSE, continents.col="black", brks=my.brks.var, cols=my.cols.var, cex.lab=1.2, intxlon=10, intylat=10, drawleg=F) + par(fig=c(0, 0.33, 0.54, 0.74), new=TRUE) + PlotEquiMap2(imp.test2[,EU], lon[EU], lat, filled.continents=FALSE, continents.col="black", brks=my.brks.var, cols=my.cols.var, cex.lab=1.2, intxlon=10, intylat=10, drawleg=F) + par(fig=c(0, 0.33, 0.31, 0.51), new=TRUE) + PlotEquiMap2(imp.test3[,EU], lon[EU], lat, filled.continents=FALSE, continents.col="black", brks=my.brks.var, cols=my.cols.var, cex.lab=1.2, intxlon=10, intylat=10, drawleg=F) + par(fig=c(0, 0.33, 0.08, 0.28), new=TRUE) + PlotEquiMap2(imp.test4[,EU], lon[EU], lat, filled.continents=FALSE, continents.col="black", brks=my.brks.var, cols=my.cols.var, cex.lab=1.2, intxlon=10, intylat=10, drawleg=F) + + if(no.regimes) { regime.title <- paste0("Cluster",1:4)} else { regime.title <- orden} + + par(fig=c(0,0.33,0.955,0.975), new=TRUE) + mtext(paste0(regime.title[1], " wind speed anomaly "), font=2, cex=2) + par(fig=c(0,0.33,0.725,0.745), new=TRUE) + mtext(paste0(regime.title[2], " wind speed anomaly "), font=2, cex=2) + par(fig=c(0,0.33,0.495,0.515), new=TRUE) + mtext(paste0(regime.title[3], " wind speed anomaly "), font=2, cex=2) + par(fig=c(0,0.33,0.265,0.285), new=TRUE) + mtext(paste0(regime.title[4], " wind speed anomaly "), font=2, cex=2) + + par(fig=c(0.03, 0.33, 0.015, 0.06), new=TRUE) + ##ColorBar(my.brks.var, cols=my.cols.var, vert=FALSE, label_scale=1.8, triangle_ends=c(FALSE,FALSE)) #, subset=my.subset2) + ColorBar(brks=round(100*my.brks.var[2:(l(my.brks.var)-1)]), cols=my.cols.var[2:(l(my.cols.var)-1)], vert=FALSE, label_scale=2, bar_limits=c(100*my.brks.var[2],100*my.brks.var[l(my.brks.var)-1]), col_inf=my.cols.var[1], col_sup=my.cols.var[l(my.cols.var)], subsample=2) #triangle_ends=c(T,T)) #, subset=my.subset2) + + par(fig=c(0.33, 0.34, 0.01, 0.044), new=TRUE) + mtext("%", cex=1.6) + + ## right figures: + par(fig=c(0.34, 0.92, 0.77, 0.97), new=TRUE) + PlotEquiMap2(psl.test1, lon, lat, filled.continents=FALSE, continents.col="black", brks=my.brks.var2, brks2=my.brks.var2, cols=my.cols.var2, intxlon=10, intylat=10, drawleg=F, contours=t(psl.test1), contours.lty="F1FF1F", cex.lab=1, xlabel.dist=.5) + par(fig=c(0.34, 0.92, 0.54, 0.74), new=TRUE) + PlotEquiMap2(psl.test2, lon, lat, filled.continents=FALSE, continents.col="black", brks=my.brks.var2, brks2=my.brks.var2, cols=my.cols.var2, intxlon=10, intylat=10, drawleg=F, contours=t(psl.test2), contours.lty="F1FF1F", cex.lab=1, xlabel.dist=.5) + par(fig=c(0.34, 0.92, 0.31, 0.51), new=TRUE) + PlotEquiMap2(psl.test3, lon, lat, filled.continents=FALSE, continents.col="black", brks=my.brks.var2, brks2=my.brks.var2, cols=my.cols.var2, intxlon=10, intylat=10, drawleg=F, contours=t(psl.test3), contours.lty="F1FF1F", cex.lab=1, xlabel.dist=.5) + par(fig=c(0.34, 0.92, 0.08, 0.28), new=TRUE) + PlotEquiMap2(psl.test4, lon, lat, filled.continents=FALSE, continents.col="black", brks=my.brks.var2, brks2=my.brks.var2, cols=my.cols.var2, intxlon=10, intylat=10, drawleg=F, contours=t(psl.test4), contours.lty="F1FF1F", cex.lab=1, xlabel.dist=.5) + + par(fig=c(0.34,0.92,0.955,0.975), new=TRUE) + mtext(paste0(regime.title[1], " SLP anomaly "), font=2, cex=2) + par(fig=c(0.34,0.92,0.725,0.745), new=TRUE) + mtext(paste0(regime.title[2], " SLP anomaly "), font=2, cex=2) + par(fig=c(0.34,0.92,0.495,0.515), new=TRUE) + mtext(paste0(regime.title[3], " SLP anomaly "), font=2, cex=2) + par(fig=c(0.34,0.92,0.265,0.285), new=TRUE) + mtext(paste0(regime.title[4], " SLP anomaly "), font=2, cex=2) + + par(fig=c(0.34, 0.93, 0.015, 0.06), new=TRUE) + ##ColorBar2(my.brks.var2, cols=my.cols.var2, vert=FALSE, my.ticks=-0.5 + 1:length(my.brks.var2), my.labels=my.brks.var2) #, subsampleg=1, label_scale=1.8) #, subset=my.subset2) + ColorBar(my.brks.var2[2:(length(my.brks.var2)-1)], cols=my.cols.var2[2:(length(my.cols.var2)-1)], vert=FALSE, label_scale=2, bar_limits=c(my.brks.var2[2],my.brks.var2[l(my.brks.var2)-1]), col_inf=my.cols.var2[1], col_sup=my.cols.var2[l(my.cols.var2)], subsample=1) + + par(fig=c(0.924, 0.930, 0.01, 0.044), new=TRUE) + mtext("hPa", cex=1.6) + + ##par(fig=c(0.627, 0.647, 0, 0.028), new=TRUE) + ##mtext("0", cex=1.8) + + n.days <- floor(n.days.in.a.period(month.test,1)) + + par(fig=c(0.93, 0.99, 0.77, 0.87), new=TRUE) + mtext(paste0(fre.days1," days\n(",round(100*fre.days1/n.days,1),"%)"), cex=2.8) + par(fig=c(0.93, 0.99, 0.54, 0.64), new=TRUE) + mtext(paste0(fre.days2," days\n(",round(100*fre.days2/n.days,1),"%)"), cex=2.8) + par(fig=c(0.93, 0.99, 0.31, 0.41), new=TRUE) + mtext(paste0(fre.days3," days\n(",round(100*fre.days3/n.days,1),"%)"), cex=2.8) + par(fig=c(0.93, 0.99, 0.08, 0.18), new=TRUE) + mtext(paste0(fre.days4," days\n(",round(100*fre.days4/n.days,1),"%)"), cex=2.8) + + dev.off() + + ## add the strip with the regime sequence over the average impact composition: + fileoutput.temp <- paste0(workdir,"/",rean.name,"_",my.period[month.test],"_temp.png") + fileoutput.both <- paste0(workdir,"/",rean.name,"_",my.period[month.test],"_temp2.png") + + system(paste0("convert ",fileoutput.seq," -crop +0-1730 +repage ",fileoutput.temp)) # cut the lower part of the strip + system(paste0("montage ",fileoutput.temp," ",fileoutput.test," -tile 1x2 -geometry +0+0 ",fileoutput.both)) + + + ## same image formatted for the catalogue: + fileoutput2.test <- paste0(workdir,"/",rean.name,"_",my.period[month.test],"_monthly_anomaly_regimes_fig2cat.png") + + system(paste0("~/scripts/fig2catalog.sh -s 0.8 -m 20 -r 40 -t '",rean.name," / 10m wind speed and sea level pressure / Monthly anomalies \n",month.name[month.test]," / ",year.test,"' -c 'Region: North Atlantic (27°N-81°N, 85.5°W-45°E)\nReference dataset: ",rean.name," reanalysis' ", fileoutput.both," ", fileoutput2.test)) + + system(paste0("rm ", fileoutput.temp, " ", fileoutput.both," ", fileoutput.seq," ", fileoutput.test, " ", fileoutput)) + + + } # close for on year.test +} # close for on month.test + + +#################################################################################### +# Create weather regimes maps for last 37 years # +#################################################################################### + +composition <- "edpr" +ordering <- TRUE # If TRUE, order the clusters following the regimes listed in the 'orden' vector (set it to TRUE only after you manually inserted the names below). +save.names <- FALSE # if TRUE, also save the cluster names (i.e: the association between clusters and weather regimes); if FALSE, the cluster names are loaded from a file +as.pdf <- FALSE # FALSE: save results as one .png file for each season/month, TRUE: save only one .pdf file with all seasons/months +mean.freq <- TRUE # if TRUE, when composition <- "simple" o 'edpr', it also add the mean frequency value over each frequency plot + +for(p in period){ + print(paste("p =",p)) + + ## load regime data (for forecasts, it load only the data corresponding to the chosen start month p and lead month 'lead.month') + impact.data <- FALSE + + load(file=paste0(workdir,"/",rean.name,"_",my.period[p],"_","psl",".RData")) + + + ## load impact data only if it is available, if not it will leave an empty space in the composition: + if(file.exists(paste0(workdir,"/",rean.name,"_",my.period[p],"_",var.name[var.num],".RData"))){ + impact.data <- TRUE + print(paste0("Impact data for variable ",var.name[var.num] ," available for reanalysis ", rean.name)) + load(file=paste0(workdir,"/",rean.name,"_",my.period[p],"_",var.name[var.num],".RData")) + } else { + impact.data <- FALSE + print(paste0("Impact data for variable ",var.name[var.num] ," not available for reanalysis ", rean.name)) + } + + + ## load the cluster names from the file already saved, if regimes come from a reanalysis: + ClusterName.file <- paste0(workdir,"/",rean.name,"_", my.period[p],"_","ClusterNames",".RData") + if(!file.exists(ClusterName.file)) stop(paste0("file: ",ClusterName.file," missing")) # check if file exists or not + load(ClusterName.file) # load cluster names + + ## assign to each cluster its regime: + if(ordering == FALSE){ + cluster1 <- 1; cluster2 <- 2; cluster3 <- 3; cluster4 <- 4 # same as: cluster1.name=orden[1], cluster2.name=orden[2], cluster3.name=orden[3], etc. + } else { + cluster1 <- which(orden == cluster1.name) + cluster2 <- which(orden == cluster2.name) + cluster3 <- which(orden == cluster3.name) + cluster4 <- which(orden == cluster4.name) + } + + assign(paste0("map",cluster1), pslwr1mean) + assign(paste0("map",cluster2), pslwr2mean) + assign(paste0("map",cluster3), pslwr3mean) + assign(paste0("map",cluster4), pslwr4mean) + + assign(paste0("fre",cluster1), wr1y) + assign(paste0("fre",cluster2), wr2y) + assign(paste0("fre",cluster3), wr3y) + assign(paste0("fre",cluster4), wr4y) + + if(impact.data == TRUE){ + assign(paste0("imp",cluster1), varwr1mean) + assign(paste0("imp",cluster2), varwr2mean) + assign(paste0("imp",cluster3), varwr3mean) + assign(paste0("imp",cluster4), varwr4mean) + + assign(paste0("sig",cluster1), pvalue1) + assign(paste0("sig",cluster2), pvalue2) + assign(paste0("sig",cluster3), pvalue3) + assign(paste0("sig",cluster4), pvalue4) + + assign(paste0("impRel",cluster1), varwr1meanRel) + assign(paste0("impRel",cluster2), varwr2meanRel) + assign(paste0("impRel",cluster3), varwr3meanRel) + assign(paste0("impRel",cluster4), varwr4meanRel) + } + + ## position of long values of Europe only (without the Atlantic Sea and America): + ##if(fields.name == "ERA-Interim") EU <- c(1:which(lon >= lon.max)[1], (length(lon)-30):length(lon)) # restrict area to continental europe only + EU <- c(1:which(lon >= lon.max)[1], (which(lon >= 337)[1]:length(lon))) # restrict area to continental europe only + + ## breaks and colors of the geopotential fields: + if(psl == "g500"){ + my.brks <- c(-300,seq(-200,200,10),300) # % Mean anomaly of a WR + my.brks2 <- c(-300,seq(-200,200,10),300) # % Mean anomaly of a WR for the contour lines + values.to.plot <- c(-200, -100, 0, 100, 200) # values we want to show in the labels + } + + if(psl == "psl"){ + my.brks <- c(seq(-21,-1,2),0,seq(1,21,2)) # % Mean anomaly of a WR + my.brks2 <- my.brks #c(seq(-20,20,2)) # % Mean anomaly of a WR for the contour lines + values.to.plot <- my.brks #c(-17,-15,-13,-11,-9,-7,-5,-3,-1,0,1,3,5,7,9,11,13,15,17) + } + + my.cols <- colorRampPalette(rev(brewer.pal(11,"RdBu")))(length(my.brks)-1) # blue--red colors + my.cols[floor(length(my.cols)/2)] <- my.cols[floor(length(my.cols)/2)+1] <- "white" # add white in the middle + + if(var.name[var.num] == "sfcWind") { + my.brks.var <- seq(-3,3,0.5) + values.to.plot2 <- c(-3,-2,-1,0,1,2,3) + } + if(var.name[var.num] == "tas") { + my.brks.var <- seq(-5,5,1) + values.to.plot2 <- my.brks.var #c(-5,-3,-1,0,1,3,5) + } + + ##my.cols <- colorRampPalette(as.character(read.csv("/home/Earth/vtorralb/rgbhex.csv",header=F)[,1]))(length(my.brks)-1) # blue--yellow-red colors + my.cols.var <- colorRampPalette(rev(brewer.pal(11,"RdBu")))(length(my.brks.var)-1) # blue--white--red colors + ##my.cols.var[floor(length(my.cols.var)/2)] <- my.cols.var[floor(length(my.cols.var)/2)+1] <- "white" + + freq.max <- 100 + fre1.NA <- fre1; fre2.NA <- fre2; fre3.NA <- fre3; fre4.NA <- fre4 + + ## adjust color legends to include triangles to the extremities increasing by two the number of intervals: + my.brks.var <- c(-20,seq(-0.6,0.6,0.1),20) + my.cols.var <- colorRampPalette(rev(brewer.pal(11,"RdBu")))(length(my.brks.var)-1) # blue--red colors + + ## same but for SLP: + my.brks <- c(-100,seq(-21,-1,2),0,seq(1,21,2),100) + my.cols <- colorRampPalette(rev(brewer.pal(11,"RdBu")))(length(my.brks)-1) # blue--red colors + my.cols[floor(length(my.cols)/2)] <- my.cols[floor(length(my.cols)/2)+1] <- "white" ## add white in the middle + + fileoutput <- paste0(workdir,"/",rean.name,"_",my.period[p],"_",var.name[var.num],"_edpr.png") + + png(filename=fileoutput,width=3000,height=3700) + + plot.new() + + y1 <- 0.10 + y3 <- 0.315 + y5 <- 0.53 + y7 <- 0.745 + y.width <- 0.18 + + y2 <- y1 + y.width; y4 <- y3 + y.width; y6 <- y5 + y.width; y8 <- y7 + y.width + yt1 <- y2+0.003; yt3 <- y4+0.003; yt5 <- y6+0.003; yt7 <- y8+0.003 + yt2 <- yt1 + 0.004; yt4 <- yt3 + 0.005; yt6 <- yt5 + 0.005; yt8 <- yt7 + 0.005 + + ## Centroid maps: + map.xpos <- 0.27 + map.width <- 0.46 + par(fig=c(map.xpos + 0.003, map.xpos + map.width - 0.003, y7, y8), new=TRUE) + PlotEquiMap2(map1, lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map1), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, xlabel.dist=1.5, contours.lty="F1FF1F", continents.col="black") + + par(fig=c(map.xpos, map.xpos + map.width, y5, y6), new=TRUE) + PlotEquiMap2(map2, lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map2), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F", continents.col="black") + + par(fig=c(map.xpos, map.xpos + map.width, y3, y4), new=TRUE) + PlotEquiMap2(map3, lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map3), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F", continents.col="black") + + par(fig=c(map.xpos, map.xpos + map.width, y1, y2), new=TRUE) + PlotEquiMap2(map4, lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map4), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F", continents.col="black") + + ## Legend centroid maps: + legend1.xpos <- 0.30 + legend1.width <- 0.40 + legend1.cex <- 3 + my.subset <- match(values.to.plot, my.brks) + par(fig=c(legend1.xpos, legend1.xpos + legend1.width, 0.045, 0.085), new=TRUE) + ##ColorBar3(my.brks, cols=my.cols[2:(length(my.cols)-1)], vert=FALSE, cex=legend1.cex, subset=my.subset) + ColorBar(my.brks[2:(l(my.brks)-1)], cols=my.cols[2:(length(my.cols)-1)], vert=FALSE, label_scale=legend1.cex, bar_limits=c(my.brks[2], my.brks[l(my.brks)-1]), col_inf=my.cols[1], col_sup=my.cols[l(my.cols)], subsample=1) + + if(psl=="g500") {mtext(side=4," m", cex=2.5, las=1)} else {mtext(side=4," hPa", cex=legend1.cex, las=1)} ## las=1 is to display in horizontal instead of vert + + ## Title Centroid Maps: + title1.xpos <- 0.3 + title1.width <- 0.44 + par(fig=c(title1.xpos, title1.xpos + title1.width, yt7+0.0025, yt7+0.0075), new=TRUE) + mtext(paste0(regime.title[1]," ", psl.name, " anomaly"), font=2, cex=4) + + par(fig=c(title1.xpos, title1.xpos + title1.width, yt5+0.0025, yt5+0.0075), new=TRUE) + mtext(paste0(regime.title[2]," ", psl.name, " anomaly"), font=2, cex=4) + + par(fig=c(title1.xpos, title1.xpos + title1.width, yt3+0.0025, yt3+0.0075), new=TRUE) + mtext(paste0(regime.title[3]," ", psl.name, " anomaly"), font=2, cex=4) + + par(fig=c(title1.xpos, title1.xpos + title1.width, yt1+0.0025, yt1+0.0075), new=TRUE) + mtext(paste0(regime.title[4]," ", psl.name, " anomaly"), font=2, cex=4) + + ## Impact maps: + if(impact.data == TRUE ){ + impact.xpos <- 0 + impact.width <- 0.26 + par(fig=c(impact.xpos, impact.xpos + impact.width, y7, y8), new=TRUE) + PlotEquiMap2(impRel1[,EU], lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=t(sig1[,EU] < 0.05), cex.lab=1.5, continents.col="black") + + par(fig=c(impact.xpos, impact.xpos + impact.width, y5, y6), new=TRUE) + PlotEquiMap2(impRel2[,EU], lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=t(sig2[,EU] < 0.05), cex.lab=1.5, continents.col="black") + + par(fig=c(impact.xpos, impact.xpos + impact.width, y3, y4), new=TRUE) + PlotEquiMap2(impRel3[,EU], lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=t(sig3[,EU] < 0.05), cex.lab=1.5, continents.col="black") + + par(fig=c(impact.xpos, impact.xpos + impact.width, y1, y2), new=TRUE) + PlotEquiMap2(impRel4[,EU], lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=t(sig4[,EU] < 0.05), cex.lab=1.5, continents.col="black") + + + ## Title impact maps: + title2.xpos <- 0 + title2.width <- 0.26 + par(fig=c(title2.xpos, title2.xpos + title2.width, yt7, yt8), new=TRUE) + mtext(paste0(regime.title[1], " impact on ", var.name.full[var.num]), font=2, cex=4) + + par(fig=c(title2.xpos, title2.xpos + title2.width, yt5, yt6), new=TRUE) + mtext(paste0(regime.title[2], " impact on ", var.name.full[var.num]), font=2, cex=4) + + par(fig=c(title2.xpos, title2.xpos + title2.width, yt3, yt4), new=TRUE) + mtext(paste0(regime.title[3], " impact on ", var.name.full[var.num]), font=2, cex=4) + + par(fig=c(title2.xpos, title2.xpos + title2.width, yt1, yt2), new=TRUE) + mtext(paste0(regime.title[4], " impact on ", var.name.full[var.num]), font=2, cex=4) + + ## Legend: + legend2.xpos <- 0.01 + legend2.width <- 0.25 + legend2.cex <- 3 + + my.subset2 <- match(values.to.plot2, my.brks.var) + par(fig=c(legend2.xpos, legend2.xpos + legend2.width, 0.045, 0.085), new=TRUE) + + ColorBar(brks=round(100*my.brks.var[2:(l(my.brks.var)-1)],0), cols=my.cols.var[2:(length(my.cols.var)-1)], vert=FALSE, label_scale=3, bar_limits=c(100*my.brks.var[2],100*my.brks.var[l(my.brks.var)-1]), col_inf=my.cols.var[1], col_sup=my.cols.var[l(my.cols.var)], subsample=2) + ##mtext(side=4,paste0(" ",var.unit[var.num]), cex=legend2.cex, las=1) + mtext(side=4,"%", cex=legend2.cex, las=1) + + } + + ## Frequency plots: + barplot.xpos <- 0.75 + barplot.width <- 0.25 + + par(fig=c(barplot.xpos, barplot.xpos + barplot.width, y7, y8-0.01), new=TRUE) + barplot.freq(100*fre1.NA, year.start, year.end, cex.y=2, cex.x=2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0)) + + par(fig=c(barplot.xpos, barplot.xpos + barplot.width, y5, y6-0.01), new=TRUE) + barplot.freq(100*fre2.NA, year.start, year.end, cex.y=2, cex.x=2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0)) + + par(fig=c(barplot.xpos, barplot.xpos + barplot.width, y3, y4-0.01), new=TRUE) + barplot.freq(100*fre3.NA, year.start, year.end, cex.y=2, cex.x=2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0)) + + par(fig=c(barplot.xpos, barplot.xpos + barplot.width, y1, y2-0.01), new=TRUE) + barplot.freq(100*fre4.NA, year.start, year.end, cex.y=2, cex.x=2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0)) + + ## Title Frequency maps: + title3.xpos <- 0.75 + title3.width <-0.25 + par(fig=c(title3.xpos, title3.xpos + title3.width, yt7, yt8), new=TRUE) + mtext(paste0(regime.title[1], " Frequency"), font=2, cex=4) # paste0 doesn't work inside an expression! + par(fig=c(title3.xpos, title3.xpos + title3.width, yt5, yt6), new=TRUE) + mtext(paste0(regime.title[2], " Frequency"), font=2, cex=4) + par(fig=c(title3.xpos, title3.xpos + title3.width, yt3, yt4), new=TRUE) + mtext(paste0(regime.title[3], " Frequency"), font=2, cex=4) + par(fig=c(title3.xpos, title3.xpos + title3.width, yt1, yt2), new=TRUE) + mtext(paste0(regime.title[4], " Frequency"), font=2, cex=4) + + ## % on Frequency plots: + symbol.xpos <- 0.735 + symbol.width <- 0.01 + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, y4+0.425, y4+0.425+0.01), new=TRUE) + mtext("%", cex=3.3) + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, y3+0.39, y3+0.39+0.01), new=TRUE) + mtext("%", cex=3.3) + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, y2+0.21, y2+0.21+0.01), new=TRUE) + mtext("%", cex=3.3) + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, y1+0.17, y1+0.17+0.01), new=TRUE) + mtext("%", cex=3.3) + + ## mean frequency on Frequency plots: + if(mean.freq == TRUE){ + symbol.xpos <- 0.83 + symbol.width <- 0.1 + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, y7+0.163, y7+0.163+0.01), new=TRUE) + mtext(bquote(bar(nu) == .(paste0(round(mean(100*fre1.NA),1),"%"))),cex=3.5) + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, y5+0.163, y5+0.163+0.01), new=TRUE) + mtext(bquote(bar(nu) == .(paste0(round(mean(100*fre2.NA),1),"%"))),cex=3.5) + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, y3+0.165, y3+0.165+0.01), new=TRUE) + mtext(bquote(bar(nu) == .(paste0(round(mean(100*fre3.NA),1),"%"))),cex=3.5) + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, y1+0.163, y1+0.163+0.01), new=TRUE) + mtext(bquote(bar(nu) == .(paste0(round(mean(100*fre4.NA),1),"%"))),cex=3.5) + } + + + ## Subtitle frequency maps: + map.title.xpos <- 0.96 + map.title.width <- 0.04 + par(fig=c(map.title.xpos, map.title.xpos + map.title.width, y7-0.013, y7-0.013+0.001), new=TRUE) + mtext("year", cex=3) + par(fig=c(map.title.xpos, map.title.xpos + map.title.width, y5-0.013, y5-0.013+0.001), new=TRUE) + mtext("year", cex=3) + par(fig=c(map.title.xpos, map.title.xpos + map.title.width, y3-0.013, y3-0.013+0.001), new=TRUE) + mtext("year", cex=3) + par(fig=c(map.title.xpos, map.title.xpos + map.title.width, y1-0.013, y1-0.013+0.001), new=TRUE) + mtext("year", cex=3) + + if(!as.pdf) dev.off() # for saving 4 png + + ## same image formatted for the catalogue: + fileoutput2 <- paste0(workdir,"/",rean.name,"_",my.period[p],"_",var.name[var.num],"_edpr_fig2cat.png") + + system(paste0("~/scripts/fig2catalog.sh -s 0.8 -r 150 -m 150 -t '",rean.name," / 10m wind speed and sea level pressure / Monthly anomalies and frequencies \n",month.name[p]," / ",year.start,"-",year.end,"' -c 'Region: North Atlantic (27°N-81°N, 85.5°W-45°E)\nReference dataset: ",rean.name," reanalysis' ", fileoutput," ", fileoutput2)) + + system(paste0("rm ", fileoutput)) + +} # close 'p' on 'period' + +print("Finished!") diff --git a/old/weather_regimes_EDPR.R~ b/old/weather_regimes_EDPR.R~ new file mode 100644 index 0000000000000000000000000000000000000000..0ea53fcd3146dd8f9241eda54f61d4710c1ac9c7 --- /dev/null +++ b/old/weather_regimes_EDPR.R~ @@ -0,0 +1,1353 @@ + +# Creation: 6/2016 +# Author: Nicola Cortesi +# Aim: To compute the four Euro-Atlantic weather regimes from a reanalysis or from a forecast system +# +# I/O: input must be in a format compatible with Load(); 1 output .RData file is created in the 'workdir' folder. +# You can also run this script from terminal with: Rscript ~/scripts/weather_regimes_maps.R +# +# Assumption: its output are needed by the scripts weather_regimes_impact.R and weather_regimes.R +# +# Branch: weather_regimes + +rm(list=ls()) +library(s2dverification) # for the function Load() and InsertDim() +library(abind) # for function abind() +library(Kendall) # for the MannKendall test +library(reshape2) # for functions acast() and melt(). WARNING: after each update of s2dverification, it's better to remove and install this package again because sometimes it gets broken! + +source('/home/Earth/ncortesi/scripts/Rfunctions.R') + +rean <- '/esnas/recon/jma/jra55/$STORE_FREQ$_mean/$VAR_NAME$_f6h/$VAR_NAME$_$YEAR$$MONTH$.nc' +rean.name <- "JRA-55" + +psl <- "psl" #"g500" # pressure variable to use from the chosen reanalysis +psl.name <- "SLP" # "Z500" # the name to show in the maps + +year.start <- 1981 +year.end <- 2017 + +LOESS <- TRUE # To apply the LOESS filter or not to the climatology +running.cluster <- FALSE # add to the clustering also the daily SLP data of the two closer months to the month to use (only for monthly analysis) +running.15 <- FALSE # add to the clustering also the daily SLP data of the 15 days of the two closer months (only for monthly analysis). You cannot set to TRUE both + # this variable and 'running.cluster' above +lat.weighting <- TRUE # set it to true to weight psl data on latitude before applying the cluster analysis +subdaily <- FALSE # id TRUE, compute the clustering using 6-hourly data instead of daily data, to be more robust (only for reanalysis with 6-hourly data avail.) + +sequences <-FALSE # set it to TRUE if you want to select only days beloning to sequences of at least 5 consecutive days belonging to the same regime. +PCA <- FALSE # set it to TRUE if you want to apply the PCA before the k-means cluster analysis, FALSE otherwise +variance.explained <- 0.8 # the user can set here the minimum % of variance explained by the PCs (typically 0.8 which is equal to 80%) + +# Coordinates of the box enclosing the study region (the Northern Atlantic in this case): +lat.max <- 81 #81 #80 #70 +lat.min <- 27 #30 #20 #30 +lon.max <- 45 #36 #30 #40 +lon.min <- 274.5 #274.5 #270 #280 # put a positive number here because the geopotential has only positive vaues of longitud! + +period <- 1 #1:12 #13:16 # 1-12: Jan-Dec; 13-16: Winter-Spring-Summer-Autumn [bypassed by the optional argument of the script] + +var.num <- 1 # Choose a variable. 1: sfcWind 2: tas +var.name <- c("sfcWind","tas") # name of the 'predictand' variable of the chosen reanalysis +var.name.full <- c("wind Speed","temperature") # full name of the 'predictand' variable to put in the title of the graphs +var.unit <- c("m/s", "ºC") # unit of measure of var (for drawing color scales) + +orden <- c("NAO+","NAO-","Blocking","Atl.Ridge") + +no.regimes <- TRUE # if TRUE, instead of putting the regime names in the figure titles, insert "Cluster1", "Cluster2", "Cluster3" and "Cluster4" + # (when composition='edpr' or monthly_anomalies = TRUE) + +############################################################################################################################# + +# in case the script is run with two external arguments, it is assumed a forecast is used and the first argument represents the startmonth and the second one the leadmonth. +script.arg <- as.integer(commandArgs(TRUE)) + +if(length(script.arg) >= 2){ + year.end <- script.arg[1] + period <- script.arg[2] +} + +n.years <- year.end - year.start + 1 +year.end.clim <- year.end - 1 # last year for computation of climatology (unless year.end has data available for all months, it's better to set it to the previous year) +n.years.clim <- year.end.clim - year.start + 1 + +if(running.cluster && running.15) stop("both 'running.cluster' and 'running.15' cannot be TRUE simultaneously") + +cat(paste0("Rean: ", rean.name, " Period: ", period, " Year: ", year.end, "\n")) + +if(no.regimes) { regime.title <- paste0("Cluster",1:4) } else { regime.title <- orden} + +## create working directory: +workdir <- paste0("/shared/earth/EarthSystemServices/EDPR/", year.end, "_", my.month[period]) # working dir where output files will be generated +dir.create(file.path(workdir)) + +n.map <- 0 + +cat("Loading lat/lon. Please wait...\n") + +## load only 1 day of pressure data to detect the minimum and maximum lat and lon values which identify only the North Atlantic area: +domain <- Load(var = psl, exp = NULL, obs = list(list(path=rean)), paste0(year.start,'0101'), storefreq = 'daily', leadtimemax = 1, output = 'lonlat', nprocs=1) + +n.lon <- length(domain$lon) +n.lat <- length(domain$lat) + +pos.lat.max <- which(lat.max - round(domain$lat,4) >= 0)[1] # select the first latitude of the domain below the maximum latitude +pos.lat.min <- tail(which(round(domain$lat,4) - lat.min >= 0),1) # select the last latitude of the domain over the minumum latitude +pos.lon.max <- tail(which(lon.max - round(domain$lon,4) >= 0),1) +pos.lon.min <- which(round(domain$lon,4) - lon.min >= 0)[1] + +pos.lat <- if(pos.lat.min < pos.lat.max) {pos.lat.min:pos.lat.max} else {pos.lat.max:pos.lat.min} +pos.lon <- c(1:pos.lon.max,pos.lon.min:length(domain$lon)) # beware that it only consider the case where the study area cross the meridian of Greenwhich! +n.pos.lat <- length(pos.lat) +n.pos.lon <- length(pos.lon) + +lat <- domain$lat[pos.lat] # lat values of chosen area only (it is smaller than the whole spatial domain loaded by the data) +#if (lat[2] < lat[1]) lat <- rev(lat) # reverse lat values if they are in decreasing order +lon <- domain$lon[pos.lon] # lon values of chosen area only + +if(lat.min >= lat.max) stop("lat.min cannot be greater than lat.max") + +#################### Start analysis #######################################################3 + +# Load psl data (interpolating it to the same reanalysis grid, if necessary): +cat("Loading SLP data. Please wait...\n") + +psleuFull <-array(NA,c(1, 1, n.years, 365, n.pos.lat, n.pos.lon)) + +psleuFull366 <- Load(var = psl, exp = NULL, obs = list(list(path=rean)), paste0(year.start:year.end,'0101'), storefreq = 'daily', leadtimemax = 366, output = 'lonlat', latmin = lat.min, latmax = lat.max, lonmin = lon.min, lonmax = lon.max, nprocs=1) + +## remove bisestile days to have all arrays with the same dimensions: +cat("Removing bisestile days. Please wait...\n") +psleuFull[,,,,,] <- psleuFull366$obs[,,,1:365,,] + +for(y in year.start:year.end){ + y2 <- y - year.start + 1 + if(n.days.in.a.year(y) == 366) psleuFull[,,y2,60:365,,] <- psleuFull366$obs[,,y2,61:366,,] # take the march to december period removing the 29th of February +} + +psleuFull366$obs <- NULL +gc() + +if((running.cluster || running.15)) { + ## the months after the last month of the last year loaded are automatically filled with NA by Load(); however, the clustering algorithm doesn't want NA, so + ## we have to replace them with some values, in this case the mean of the previous years not to affect the analysis too much. + ## if we want to be able to compute the running clustering. + ## WARNING: in case your last month is December, for the running clustering it will use the january data of the last year loaded, not of the next year: + ## psleuFull[,,n.years,305:334,,] <- psleuFull[,,n.years-1,305:334,,] + + month.NA <- psleuFull366$not_found_files[1] # filename of the file with the first month with NA (string value with full file path) + first.month.NA <- as.integer(substr(month.NA,nchar(month.NA)-4,nchar(month.NA)-3)) # first month with NA + first.day.NA <- pos.period(1,first.month.NA)[1] # first day with NA (counting from the start of the year) + + psleuFullnolast <- psleuFull[,,1:(n.years-1),first.day.NA:365,,] + psleuFullMean <- apply(psleuFullnolast, c(2,3,4), mean, na.rm=TRUE) # mean of the previous years + + psleuFull[,,n.years,first.day.NA:365,,] <- psleuFullMean + + rm(psleuFullnolast, psleuFullMean) + +} + +## convert psl in daily anomalies with the LOESS filter: +cat("Calculating anomalies. Please wait...\n") +psleuFull_no_last_year <- psleuFull[1,1,1:n.years.clim,,,,drop=FALSE] +pslPeriodClim <- apply(psleuFull_no_last_year, c(1,2,4,5,6), mean, na.rm=T) +rm(psleuFull_no_last_year) +gc() + +if(LOESS == TRUE){ + pslPeriodClimLoess <- array(NA, dim(pslPeriodClim)) + + for(i in 1:n.pos.lat){ + for(j in 1:n.pos.lon){ + my.data <- data.frame(ens.mean=pslPeriodClim[1,1,,i,j], day=1:365) + my.loess <- loess(ens.mean ~ day, my.data, span=0.35) + pslPeriodClimLoess[1,1,,i,j] <- predict(my.loess) + } + } + + rm(pslPeriodClim) + gc() + + pslPeriodClim2 <- InsertDim(pslPeriodClimLoess, 3, n.years) + +} else { # leave the daily climatology to compute the anomalies: + pslPeriodClim2 <- InsertDim(pslPeriodClim, 3, n.years) + + rm(pslPeriodClim) + gc() +} + +psleuFull <- psleuFull - pslPeriodClim2 +rm(pslPeriodClim2) +gc() + +if(psl=="g500") psleuFull <- psleuFull/9.81 # convert geopotential to geopotential height (in m) +if(psl=="psl") psleuFull <- psleuFull/100 # convert MSLP in Pascal to MSLP in hPa +gc() + +# compute the cluster analysis: +my.PCA <- list() +my.cluster <- list() +my.cluster.array <- list() +tot.variance <- list() +n.pcs <- my.cluster <- c() + +for(p in period){ + ## Select only the data of the month/season we want: + + ## pslPeriod <- psleuFull[days.period[[p]],,] # select only days in the chosen period (i.e: winter) + if(running.cluster) { + if(subdaily){ + my.hours <- sort(c(pos.month.extended(1,p)*4-3, pos.month.extended(1,p)*4-2, pos.month.extended(1,p)*4-1, pos.month.extended(1,p)*4)) + if(p == 1) my.hours <- c(1337:1460,1:236) + if(p == 12) my.hours <- c(1217:1460,1:124) + + pslPeriod <- psleuFull[,my.hours,,] # select all days in the period of 3 months centered on the target month p + + } else { + pslPeriod <- psleuFull[1,1,,pos.month.extended(1,p),,] # select all days in the period of 3 months centered on the target month p + } + + } + + if(running.15) pslPeriod <- psleuFull[1,1,,pos.month.extended15(1,p),,] + + ## if there isn't any kind of running clustering: + if(!running.cluster && !running.15) { + if(subdaily){ + my.hours <- sort(c(pos.month(1,p)*4-3, pos.month(1,p)*4-2, pos.month(1,p)*4-1, pos.month(1,p)*4)) + pslPeriod <- psleuFull[,my.hours,,] + } else { + pslPeriod <- psleuFull[1,1,,pos.period(1,p),,] # select only days in the chosen period (i.e: winter) + } + } + + ## weight the pressure fields based on latitude (apply it to anomalies, not to absolute values!): + if(lat.weighting){ + lat.weighted <- cos(lat*pi/180)^0.5 + if(!running.cluster && !running.15) lat.weighted.array <- InsertDim(InsertDim(InsertDim(lat.weighted,2,n.pos.lon), 1, length(pos.period(2001,p))), 1, n.years) + if(running.cluster) lat.weighted.array <- InsertDim(InsertDim(InsertDim(lat.weighted,2,n.pos.lon), 1, length(pos.month.extended(2001,p))), 1, n.years) + if(running.15) lat.weighted.array <- InsertDim(InsertDim(InsertDim(lat.weighted,2,n.pos.lon), 1, length(pos.month.extended15(2001,p))), 1, n.years) + + pslPeriod <- pslPeriod * lat.weighted.array + + rm(lat.weighted.array) + gc() + } + + gc() + cat("Preformatting data for clustering. Please wait...\n") + psl.melted <- melt(pslPeriod[,,,, drop=FALSE], varnames=c("Year","Day","Lat","Lon")) + gc() + + cat("Preformatting data for clustering. Please wait......\n") + + ## this function eats much memory!!! + psl.kmeans <- unname(acast(psl.melted, Year + Day ~ Lat + Lon)) + gc() + + + # compute the clustering: + cat("Clustering data. Please wait.........\n") + if(PCA){ + my.seq <- seq(1, n.pos.lat*n.pos.lon, 16) # select only 1 point of 9 + pslcut <- psl.kmeans[,my.seq] + + my.PCA <- princomp(pslcut,cor=FALSE) + tot.variance <- head(cumsum(my.PCA$sdev^2 / sum(my.PCA$sdev^2)),50) # check how many PCAs to keep basing on the sum of their expl. variance + n.pcs <- head(as.numeric(which(tot.variance > variance.explained)),1) # select only the pcs that explains at least 80% of variance + my.cluster <- kmeans(my.PCA$scores[,1:n.pcs], centers=4, iter.max=100, nstart=30) # 4 is the number of clusters + rm(pslcut) + + } else { # 4 is the number of clusters: + + my.cluster <- kmeans(psl.kmeans, centers=4, iter.max=100, nstart=30, trace=FALSE) + gc() + + } + + rm(psl.kmeans) + gc() + + cat("Extracting monthly regimes. Please wait............\n") + + if(running.cluster && p < 13){ + ## select only the days inside the 3-months cluster series that belong only to the target month p: + n.days.period <- length(pos.month.extended(1,p)) # total number of days in the three months + + days.month <- days.month.new <- days.month.full <- c() + days.month <- which(pos.month.extended(1,p) == pos.month(1,p)[1]) - 1 + 1:length(pos.month(1,p)) + + for(y in 1:n.years){ + days.month.new <- days.month + n.days.period*(y-1) + days.month.full <- c(days.month.full, days.month.new) + ## print(days.month.new) + ## print(days.month) + } + + ## in this case, we are not selecting days but 6-hourly intervals: + if(subdaily) days.month.full <- sort(c(days.month.full*4, days.month.full*4+1, days.month.full*4+2, days.month.full*4+3)) + + ## select only the days of the cluster series inside the target month p: + cluster.sequence <- my.cluster$cluster[days.month.full] + + } + + if(running.15 && p < 13){ + ## select only the days inside the 3-months cluster series that belong only to the target month p: + n.days.period <- length(pos.month.extended15(1,p)) # total number of days in the three months + + days.month <- days.month.new <- days.month.full <- c() + days.month <- which(pos.month.extended15(1,p) == pos.month(1,p)[1]) - 1 + 1:length(pos.month(1,p)) + + for(y in 1:n.years){ + days.month.new <- days.month + n.days.period*(y-1) + days.month.full <- c(days.month.full, days.month.new) + ##print(days.month.new) + ##print(days.month) + } + + ## select only the days of the cluster series inside the target month p: + cluster.sequence <- my.cluster$cluster[days.month.full] + + } + + if(!running.cluster && !running.15) cluster.sequence <- my.cluster$cluster + + ## convert cluster sequence from 6-hourly to daily: + if(subdaily){ + type <- c() + + for(day in 1:(length(cluster.sequence)/4)){ + #day <- ceiling(hour/4) + hourly <- cluster.sequence[(1+(day-1)*4):(4+(day-1)*4)] + + t1 <- length(which(hourly == 1)) + t2 <- length(which(hourly == 2)) + t3 <- length(which(hourly == 3)) + t4 <- length(which(hourly == 4)) + tt <- c(t1,t2,t3,t4) + + ## if all the 4 time steps belong to the same regime, assign it to this day: + if(length(unique(hourly)) == 1) type[day] <- hourly[1] + + ## if there are two different regimes, check if one has a higher frequency: + if(length(unique(hourly)) == 2){ + if(any(tt == 3)){ # if 3 of the 4 time intervals belong to the same weather regime, assign this day to it + type[day] <- which(tt == 3) + } else { # in this case both regimes occur in 2 of the 4 time steps; arbitrary assign the regime occurring at 12.00 of that day + type[day] <- hourly[3] + } + } + + ## if there are three different regimes, assign it to the only possible regime with 2 time steps in that day: + if(length(unique(hourly)) == 3) type[day] <- which(tt == 2) + + ## if there are four different regimes (a very rare event!), assign it to the regime occurring at 12.00 of that day: + if(length(unique(hourly)) == 3) type[day] <- hourly[3] + + } # close for on day + + } # close for on subdaily + + + if(sequences){ # it works only for rean data!!! + ## for each of the 4 clusters, remove the days not belonging to sequences of at least 5 days: + ## warning: first 4 days and last 4 days are not take into account y the algorithm and are treated as not belonging to any sequence (sequ=FALSE) + wrdiff <- diff(my.cluster$cluster) + + sequ <- rep(FALSE, n.days.period[[p]]) + for(i in 5:(n.days.period[[p]]-5)){ + sequ[i] <- TRUE + if(wrdiff[i] != 0 & wrdiff[i+1] != 0) sequ[i] = FALSE + if(wrdiff[i] != 0 & wrdiff[i+2] != 0) sequ[i] = FALSE + if(wrdiff[i] != 0 & wrdiff[i+3] != 0) sequ[i] = FALSE + if(wrdiff[i] != 0 & wrdiff[i+4] != 0) sequ[i] = FALSE + if(wrdiff[i-1] != 0 & wrdiff[i] != 0) sequ[i] = FALSE + if(wrdiff[i-1] != 0 & wrdiff[i+1] != 0) sequ[i] = FALSE + if(wrdiff[i-1] != 0 & wrdiff[i+2] != 0) sequ[i] = FALSE + if(wrdiff[i-1] != 0 & wrdiff[i+3] != 0) sequ[i] = FALSE + if(wrdiff[i-2] != 0 & wrdiff[i] != 0) sequ[i] = FALSE + if(wrdiff[i-2] != 0 & wrdiff[i+1] != 0) sequ[i] = FALSE + if(wrdiff[i-2] != 0 & wrdiff[i+2] != 0) sequ[i] = FALSE + if(wrdiff[i-3] != 0 & wrdiff[i] != 0) sequ[i] = FALSE + if(wrdiff[i-3] != 0 & wrdiff[i+1] != 0) sequ[i] = FALSE + if(wrdiff[i-4] != 0 & wrdiff[i] != 0) sequ[i] = FALSE + } # close for loop on i + + ## sequence of daily cluster numbers without the days that doesn't belong to sequences of 5 or more days: + cluster.sequence <- my.cluster$cluster * sequ + rm(wrdiff, sequ) + } # close if on frequencies + + + ## Replace the days belonging to a regime with the days belonging to a sequence of at least 5 days of that regime: + wr1 <- which(cluster.sequence == 1) + wr2 <- which(cluster.sequence == 2) + wr3 <- which(cluster.sequence == 3) + wr4 <- which(cluster.sequence == 4) + + ## yearly frequency of each regime: + wr1y <- wr2y <- wr3y <-wr4y <- c() + + mod.subdaily <- ifelse(subdaily,4,1) + np <- n.days.in.a.period(p,1)*mod.subdaily + + for(y in year.start:year.end){ + ## for both reanalysis and S4, the time order is always: year1day1, year1day2, ..., year1day31, year2day1, year2day2, ..., year2day28, etc, + wr1y[y-year.start+1] <- length(which(cluster.sequence[(y-year.start)*np + (1:np)] == 1)) + wr2y[y-year.start+1] <- length(which(cluster.sequence[(y-year.start)*np + (1:np)] == 2)) + wr3y[y-year.start+1] <- length(which(cluster.sequence[(y-year.start)*np + (1:np)] == 3)) + wr4y[y-year.start+1] <- length(which(cluster.sequence[(y-year.start)*np + (1:np)] == 4)) + } + + ## convert to frequencies in %: + wr1y <- wr1y/np + wr2y <- wr2y/np + wr3y <- wr3y/np + wr4y <- wr4y/np + + gc() + + cat("Extracting monthly regimes structure. Please wait...............\n") + + ## measure regime anomalies: + pslmat <- unname(acast(psl.melted, Year + Day ~ Lat ~ Lon)) + + if(running.cluster || running.15){ + pslmat.new <- pslmat + pslmat <- pslmat.new[days.month.full,,] + rm(pslmat.new) + gc() + } + + + pslwr1 <- pslmat[wr1,,, drop=F] + pslwr2 <- pslmat[wr2,,, drop=F] + pslwr3 <- pslmat[wr3,,, drop=F] + pslwr4 <- pslmat[wr4,,, drop=F] + + ## regime structure: + pslwr1mean <- apply(pslwr1, c(2,3), mean, na.rm=T) + pslwr2mean <- apply(pslwr2, c(2,3), mean, na.rm=T) + pslwr3mean <- apply(pslwr3, c(2,3), mean, na.rm=T) + pslwr4mean <- apply(pslwr4, c(2,3), mean, na.rm=T) + + ## spatial mean for each day to save for the measure of the FairRPSS: + pslwr1spmean <- apply(pslwr1, 1, mean, na.rm=T) + pslwr2spmean <- apply(pslwr2, 1, mean, na.rm=T) + pslwr3spmean <- apply(pslwr3, 1, mean, na.rm=T) + pslwr4spmean <- apply(pslwr4, 1, mean, na.rm=T) + + cat("Saving clusters. Please wait..................\n") + + ## save all the data necessary to redraw the graphs when we know the right regime: + save(my.cluster, cluster.sequence, lon, lon.max, lon.min, lat, lat.max, lat.min, pos.lat, pos.lon, n.pos.lat, n.pos.lon, psl, psl.name, p, workdir, rean.name, year.start, year.end, n.years, period, pslwr1mean, pslwr2mean, pslwr3mean, pslwr4mean, pslwr1spmean, pslwr2spmean, pslwr3spmean, pslwr4spmean, wr1y,wr2y,wr3y,wr4y,wr1,wr2,wr3,wr4, LOESS, running.cluster, running.15, lat.weighting, file=paste0(workdir,"/",rean.name,"_",my.period[p],"_psl.RData")) + + + # cat("Finished!\n") +} # close the for loop on 'p' + + +rm(psleuFull, psl.melted, pslmat, pslPeriod, pslPeriodClimLoess) +gc() + +################################################################################ +# Measure impact maps # +################################################################################ +cat("Measuring impact. Please wait.....................\n") + +# Load var data: +info.period <- period[1] # period used to get the variables n.years, lat.min, lat.max, lon.min y lon.max +period.old <- period + +vareuFull366 <- Load(var = var.name[var.num], exp = NULL, obs = list(list(path=rean)), paste0(year.start:year.end,'0101'), storefreq = 'daily', leadtimemax = 366, output = 'lonlat', latmin = lat.min, latmax = lat.max, lonmin = lon.min, lonmax = lon.max, nprocs=1)$obs + +vareuFull <-array(NA,c(1,1,n.years,365, dim(vareuFull366)[5], dim(vareuFull366)[6])) + +## remove bisestile days to have all arrays with the same dimensions: +cat("Removing bisestile days. Please wait...\n") +vareuFull[,,,,,] <- vareuFull366[,,,1:365,,] + +for(y in year.start:year.end){ + y <- y - year.start + 1 + if(n.days.in.a.year(y) == 366) vareuFull[,,y,60:365,,] <- vareuFull366[,,y,61:366,,] # take the march to december period removing the 29th of February +} + +rm(vareuFull366) +gc() + +## convert psl in daily anomalies with the LOESS filter: +cat("Calculating anomalies. Please wait...\n") +varPeriodClim <- apply(vareuFull, c(1,2,4,5,6), mean, na.rm=T) + +varPeriodClimLoess <- varPeriodClim +for(i in n.pos.lat){ + for(j in n.pos.lon){ + my.data <- data.frame(ens.mean=varPeriodClim[1,1,,i,j], day=1:365) + my.loess <- loess(ens.mean ~ day, my.data, span=0.35) + varPeriodClimLoess[1,1,,i,j] <- predict(my.loess) + } +} + +rm(varPeriodClim) +gc() + +varPeriodClim2 <- InsertDim(varPeriodClimLoess, 3, n.years) + +vareuFull <- vareuFull - varPeriodClim2 +##rm(varPeriodClim2) +gc() + +for(p in period){ + ##p=1 # for the debug + + ## load regime data for chosen month: + varPeriod <- vareuFull[1,1,,pos.period(1,p),,] # select only var data during the chosen period + varPeriodClim <- varPeriodClim2[1,1,,pos.period(1,p),,] + varPeriodRel <- varPeriod / varPeriodClim + + load(file=paste0(workdir,"/",rean.name,"_",my.period[p],"_psl.RData")) + + cluster.sequence <- my.cluster$cluster # old syntax: my.cluster[[p]]$cluster + + seasonal.data <- ifelse(length(my.cluster$cluster)/n.years > 33, TRUE, FALSE) # if there are more than 33 days, it means that we are loading sequences of 3 months + if(seasonal.data == TRUE && p < 13){ # select only the days of the 3-months cluster series inside the target month p + n.days.period <- length(pos.month.extended(2001,p)) # total number of days in the three months + + days.month <- days.month.new <- days.month.full <- c() + days.month <- which(pos.month.extended(2001,p) == pos.month(2001,p)[1])-1 + 1:length(pos.month(2001,p)) + + for(y in 1:n.years){ + days.month.new <- days.month + n.days.period*(y-1) + days.month.full <- c(days.month.full, days.month.new) + } + + cluster.sequence <- my.cluster$cluster[days.month.full] # select only the days of the cluster series inside the target month p + } + + wr1 <- which(cluster.sequence == 1) + wr2 <- which(cluster.sequence == 2) + wr3 <- which(cluster.sequence == 3) + wr4 <- which(cluster.sequence == 4) + + gc() + + cat("Formatting var data. Please wait...\n") + var.melted <- melt(varPeriod[,,,, drop=FALSE], varnames=c("Year","Day","Lat","Lon")) + var.meltedRel <- melt(varPeriodRel[,,,, drop=FALSE], varnames=c("Year","Day","Lat","Lon")) + gc() + + cat("Formatting var data. Please wait......\n") + varmat <- unname(acast(var.melted, Year + Day ~ Lat ~ Lon)) + varmatRel <- unname(acast(var.meltedRel, Year + Day ~ Lat ~ Lon)) + + varwr1 <- varmat[wr1,,,drop=F] + varwr2 <- varmat[wr2,,,drop=F] + varwr3 <- varmat[wr3,,,drop=F] + varwr4 <- varmat[wr4,,,drop=F] + + varwr1Rel <- varmatRel[wr1,,,drop=F] + varwr2Rel <- varmatRel[wr2,,,drop=F] + varwr3Rel <- varmatRel[wr3,,,drop=F] + varwr4Rel <- varmatRel[wr4,,,drop=F] + + + varwr1mean <- apply(varwr1,c(2,3),mean,na.rm=T) + varwr2mean <- apply(varwr2,c(2,3),mean,na.rm=T) + varwr3mean <- apply(varwr3,c(2,3),mean,na.rm=T) + varwr4mean <- apply(varwr4,c(2,3),mean,na.rm=T) + + varwr1meanRel <- apply(varwr1Rel,c(2,3),mean,na.rm=T) + varwr2meanRel <- apply(varwr2Rel,c(2,3),mean,na.rm=T) + varwr3meanRel <- apply(varwr3Rel,c(2,3),mean,na.rm=T) + varwr4meanRel <- apply(varwr4Rel,c(2,3),mean,na.rm=T) + + n.datos <- n.years * n.days.in.a.period(p,2001) + + varwrBoth1 <- abind(varmat, varwr1, along = 1) + pvalue1 <- apply(varwrBoth1, c(2,3), function(x) t.test(x[1:n.datos],x[(n.datos+1):length(x)])$p.value) + rm(varwrBoth1, varwr1) + gc() + + varwrBoth2 <- abind(varmat, varwr2, along = 1) + pvalue2 <- apply(varwrBoth2, c(2,3), function(x) t.test(x[1:n.datos],x[(n.datos+1):length(x)])$p.value) + rm(varwrBoth2, varwr2) + gc() + + varwrBoth3 <- abind(varmat, varwr3, along = 1) + pvalue3 <- apply(varwrBoth3, c(2,3), function(x) t.test(x[1:n.datos],x[(n.datos+1):length(x)])$p.value) + rm(varwrBoth3, varwr3) + gc() + + varwrBoth4 <- abind(varmat, varwr4, along = 1) + pvalue4 <- apply(varwrBoth4, c(2,3), function(x) t.test(x[1:n.datos],x[(n.datos+1):length(x)])$p.value) + rm(varwrBoth4, varwr4) + + rm(varmat, varmatRel) + gc() + + ## save all the data necessary to redraw the graphs when we know the right regime: + save(varwr1mean, varwr2mean, varwr3mean,varwr4mean, pvalue1, pvalue2, pvalue3, pvalue4, varwr1meanRel, varwr2meanRel, varwr3meanRel,varwr4meanRel, file=paste0(workdir,"/",rean.name,"_",my.period[p],"_",var.name[var.num],".RData")) + + rm(pvalue1,pvalue2,pvalue3,pvalue4) + rm(varwr1mean,varwr2mean,varwr3mean,varwr4mean) + gc() + +} # close p on period + +rm(vareuFull, varPeriodClim2, var.melted, var.meltedRel, varPeriod, varPeriodClim, varPeriodRel, varPeriodClimLoess) +gc() + +####################################################################################################### +# Order clusters for explained variance # +####################################################################################################### + +### order clusters for variance ( as in composition == "variance") and save them with '_ClusterNames' suffix: +cat("Ordening clusters for variance. Please wait........................\n") + +ordering <- TRUE +save.names <- TRUE +as.pdf <- FALSE + +for(p in period){ + ##p <- period[1] # for the debug + p.orig <- p + + print(paste("p =",p)) + + ## load regime data + impact.data <- FALSE + + load(file=paste0(workdir,"/",rean.name,"_",my.period[p],"_","psl",".RData")) + + ## load impact data only if it is available, if not it will leave an empty space in the composition: + if(file.exists(paste0(workdir,"/",rean.name,"_",my.period[p],"_",var.name[var.num],".RData"))){ + impact.data <- TRUE + print(paste0("Impact data for variable ",var.name[var.num] ," available for reanalysis ", rean.name)) + + load(file=paste0(workdir,"/",rean.name,"_",my.period[p],"_",var.name[var.num],".RData")) + + } else { + impact.data <- FALSE + print(paste0("Impact data for variable ",var.name[var.num] ," not available for reanalysis ", rean.name)) + } + + + my.cluster2 <- my.cluster # create a copy of my.cluster + + ss1 <- which(my.cluster$cluster == 1) + ss2 <- which(my.cluster$cluster == 2) + ss3 <- which(my.cluster$cluster == 3) + ss4 <- which(my.cluster$cluster == 4) + + withinss <- my.cluster$withinss + max1 <- which(my.cluster$withinss == max(withinss, na.rm=TRUE)) # first cluster with maximum variance + withinss[max1] <- NA + + max2 <- which(my.cluster$withinss == max(withinss, na.rm=TRUE)) # second cluster with maximum variance + withinss[max2] <- NA + + max3 <- which(my.cluster$withinss == max(withinss, na.rm=TRUE)) # third cluster with maximum variance + withinss[max3] <- NA + + max4 <- which(!is.na(withinss)) + rm(withinss) + + ## vector where the first element tells you which is the clister with the maximum variance the second element shows which is the cluster the + ## second maximum variance, and so on: + max.seq <- c(max1, max2, max3, max4) + + assign(paste0("cluster",max1,".name"), orden[1]) # associate the cluster with the highest explained variance to the first regime to plot (usually NAO+) + assign(paste0("cluster",max2,".name"), orden[2]) + assign(paste0("cluster",max3,".name"), orden[3]) + assign(paste0("cluster",max4,".name"), orden[4]) + + save(cluster1.name, cluster2.name, cluster3.name, cluster4.name, file=paste0(workdir,"/",rean.name,"_", my.period[p],"_","ClusterNames",".RData")) + +} + + +######################################################################################### +# Create maps # +######################################################################################### + +## Compute climatology and anomalies (with LOESS filter) for sfcWind data, and draw monthly anomalies, if you want to compare the mean daily wind speed anomalies reconstructed by the weather regimes classification with those observed by the reanalysis: + +load(paste0(workdir,"/",rean.name,"_",my.period[period],"_psl.RData")) # only to load year.start and year.end + +sfcWind366 <- Load(var = "sfcWind", exp = NULL, obs = list(list(path=rean)), paste0(year.start:year.end,'0101'), storefreq = 'daily', leadtimemax = 366, output = 'lonlat', latmin = lat.min, latmax = lat.max, lonmin = lon.min, lonmax = lon.max, nprocs=1)$obs + +sfcWind <- sfcWind366[,,,1:365,,] + +for(y in year.start:year.end){ + y2 <- y - year.start + 1 + if(n.days.in.a.year(y) == 366) sfcWind[y2,60:365,,] <- sfcWind366[1,1,y2,61:366,,] # take the march to december period removing the 29th of February +} + +rm(sfcWind366) +gc() + +## LOESS anomalies: +sfcWindClimDaily <- apply(sfcWind, c(2,3,4), mean, na.rm=T) + +sfcWindClimLoess <- sfcWindClimDaily +for(i in n.pos.lat){ + for(j in n.pos.lon){ + my.data <- data.frame(ens.mean=sfcWindClimDaily[,i,j], day=1:365) + my.loess <- loess(ens.mean ~ day, my.data, span=0.35) + sfcWindClimLoess[,i,j] <- predict(my.loess) + } +} + +rm(sfcWindClimDaily) +gc() + +sfcWindClim2 <- InsertDim(sfcWindClimLoess, 1, n.years) +sfcWindAnom <- sfcWind - sfcWindClim2 +sfcWindAnomRel <- sfcWindAnom / sfcWindClim2 + +rm(sfcWindClimLoess) +rm(sfcWind) +gc() + +## same as above but for computing climatology and anomalies (with LOESS filter) for psl data, and draw monthly slp anomalies: +slp366 <- Load(var = psl, exp = NULL, obs = list(list(path=rean)), paste0(year.start:year.end,'0101'), storefreq = 'daily', leadtimemax = 366, output = 'lonlat', latmin = lat.min, latmax = lat.max, lonmin = lon.min, lonmax = lon.max, nprocs=1)$obs + +slp <- slp366[,,,1:365,,] + +for(y in year.start:year.end){ + y2 <- y - year.start + 1 + if(n.days.in.a.year(y) == 366) slp[y2,60:365,,] <- slp366[1,1,y2,61:366,,] # take the march to december period removing the 29th of February +} + +rm(slp366) +gc() + +slpClimDaily <- apply(slp, c(2,3,4), mean, na.rm=T) + +slpClimLoess <- slpClimDaily +for(i in n.pos.lat){ + for(j in n.pos.lon){ + my.data <- data.frame(ens.mean=slpClimDaily[,i,j], day=1:365) + my.loess <- loess(ens.mean ~ day, my.data, span=0.35) + slpClimLoess[,i,j] <- predict(my.loess) + } +} + +rm(slpClimDaily) + +gc() + +slpClim2 <- InsertDim(slpClimLoess, 1, n.years) +slpAnom <- slp - slpClim2 + +rm(slpClimLoess) +rm(slp) +gc() + +if(psl == "psl") slpAnom <- slpAnom/100 # convert MSLP in Pascal to MSLP in hPa + +EU <- c(1:which(lon >= lon.max)[1], (which(lon >= 337)[1]:length(lon))) # restrict area to continental europe only + +## color scale for impact maps: +## my.brks.var <- c(-20,seq(-3,3,0.5),20) +my.brks.var <- c(-20,seq(-0.6,0.6,0.1),20) +my.cols.var <- colorRampPalette(rev(brewer.pal(11,"RdBu")))(length(my.brks.var)-1) # blue--white--red colors + +## same but for slp: +my.brks.var2 <- c(-100,seq(-21,-1,2),0,seq(1,21,2),100) +my.cols.var2 <- colorRampPalette(rev(brewer.pal(11,"RdBu")))(length(my.brks.var2)-1) # blue--red colors +my.cols.var2[floor(length(my.cols.var2)/2)] <- my.cols.var2[floor(length(my.cols.var2)/2)+1] <- "white" + +## save all monthly anomaly maps: +for(year.test in year.end){ + for(month.test in period){ + ##year.test <- 2016; month.test <- 10 # for the debug + + pos.year.test <- year.test - year.start + 1 + + ## wind anomaly for the chosen year and month: + sfcWindAnomPeriod <- sfcWindAnom[pos.year.test, pos.period(1,month.test),,] + sfcWindAnomPeriodRel <- sfcWindAnomRel[pos.year.test, pos.period(1,month.test),,] + + sfcWindAnomPeriodMean <- apply(sfcWindAnomPeriodRel, c(2,3), mean, na.rm=TRUE) + + ## psl anomaly for the chosen year and month: + slpAnomPeriod <- slpAnom[pos.year.test,pos.period(1,month.test),,] + + slpAnomPeriodMean <- apply(slpAnomPeriod, c(2,3), mean, na.rm=TRUE) + + fileoutput <- paste0(workdir,"/",rean.name,"_",my.period[month.test],"_monthly_anomaly.png") + + png(filename=fileoutput,width=2800,height=1000) + + ## reset par to its default values, because drawing with PlotEquiMap() alters some par values: + if(year.test == year.end && month.test == period) { op <- par(no.readonly = TRUE) } + + par(fig=c(0, 0.36, 0.08, 0.98), new=TRUE) + + PlotEquiMap(sfcWindAnomPeriodMean[,EU], lon[EU], lat, filled.continents=FALSE, brks=my.brks.var, cols=my.cols.var, title_scale=1.2, intxlon=10, intylat=10, drawleg=F) #, cex.lab=1.5) + + par(fig=c(0.03, 0.36, 0.00, 0.1), new=TRUE) + + ColorBar(brks=round(100*my.brks.var[2:(l(my.brks.var)-1)],0), cols=my.cols.var[2:(l(my.cols.var)-1)], vert=FALSE, label_scale=3, bar_limits=c(100*my.brks.var[2],100*my.brks.var[l(my.brks.var)-1]), col_inf=my.cols.var[1], col_sup=my.cols.var[l(my.cols.var)], subsample=2) + + par(fig=c(0.34, 0.37, 0, 0.028), new=TRUE) + mtext("%", cex=1.8) + + par(fig=c(0.37, 1, 0.1, 0.98), new=TRUE) + + PlotEquiMap2(slpAnomPeriodMean, lon, lat, filled.continents=FALSE, continents.col="black", brks=my.brks.var2, brks2=my.brks.var2, cols=my.cols.var2, intxlon=10, intylat=10, drawleg=F, contours=t(slpAnomPeriodMean), contours.lty="F1FF1F", cex.lab=1) + + par(op) # reset par parameters + par(fig=c(0.37, 1, 0, 0.1), new=TRUE) + ##ColorBar2(my.brks.var2, cols=my.cols.var2, vert=FALSE, my.ticks=-0.5 + 1:length(my.brks.var2), my.labels=my.brks.var2) #, subsampleg=1, label_scale=1.8) #, subset=my.subset2) + ColorBar(brks=my.brks.var2[2:(l(my.brks.var2)-1)], cols=my.cols.var2[2:(l(my.cols.var2)-1)], vert=FALSE, label_scale=3, bar_limits=c(my.brks.var2[2],my.brks.var2[l(my.brks.var2)-1]), col_inf=my.cols.var2[1], col_sup=my.cols.var2[l(my.cols.var2)], subsample=1) #my.ticks=-0.5 + 1:length(my.brks.var2), my.labels=my.brks.var2) subset=my.subset2) + + par(fig=c(0.96, 0.99, 0, 0.027), new=TRUE) + mtext("hPa", cex=1.8) + + dev.off() + + ## same image formatted for the catalogue: + fileoutput2 <- paste0(workdir,"/",rean.name,"_",my.period[month.test],"_monthly_anomaly_fig2cat.png") + + system(paste0("~/scripts/fig2catalog.sh -s 0.8 -t '",rean.name," / 10m wind speed and sea level pressure / Monthly anomalies \n",month.name[month.test]," / ",year.test,"' -c 'Region: North Atlantic (27°N-81°N, 85.5°W-45°E)\nReference dataset: ",rean.name," reanalysis' ", fileoutput," ", fileoutput2)) + + ## mean wind speed and slp anomaly only during days of the chosen year: + load(paste0(workdir,"/",rean.name,"_",my.period[month.test],"_psl.RData")) # load cluster.sequence + + ## arrange the regime sequence in an array, to separate months from years: + cluster2D <- array(cluster.sequence, c(n.days.in.a.period(month.test,1),n.years)) + ##cluster2D <- array(my.cluster$cluster, c(n.days.in.a.period(p,1),n.years)) # only for NCEP + + cluster.test <- cluster2D[,pos.year.test] + + fre1.days <- length(which(cluster.test == 1)) + fre2.days <- length(which(cluster.test == 2)) + fre3.days <- length(which(cluster.test == 3)) + fre4.days <- length(which(cluster.test == 4)) + + ## wind anomaly for the chosen year and month and cluster: + sfcWind1 <- sfcWindAnomPeriodRel[which(cluster.test == 1),,,drop=FALSE] + sfcWind2 <- sfcWindAnomPeriodRel[which(cluster.test == 2),,,drop=FALSE] + sfcWind3 <- sfcWindAnomPeriodRel[which(cluster.test == 3),,,drop=FALSE] + sfcWind4 <- sfcWindAnomPeriodRel[which(cluster.test == 4),,,drop=FALSE] + + ## in case a regime has NO days in that month, we must set it to NA: + if(length(which(cluster.test == 1)) == 0 ) sfcWind1 <- array(NA, c(1,dim(sfcWindAnomPeriod)[2:3])) + if(length(which(cluster.test == 2)) == 0 ) sfcWind2 <- array(NA, c(1,dim(sfcWindAnomPeriod)[2:3])) + if(length(which(cluster.test == 3)) == 0 ) sfcWind3 <- array(NA, c(1,dim(sfcWindAnomPeriod)[2:3])) + if(length(which(cluster.test == 4)) == 0 ) sfcWind4 <- array(NA, c(1,dim(sfcWindAnomPeriod)[2:3])) + + sfcWindMean1 <- apply(sfcWind1, c(2,3), mean, na.rm=FALSE) + sfcWindMean2 <- apply(sfcWind2, c(2,3), mean, na.rm=FALSE) + sfcWindMean3 <- apply(sfcWind3, c(2,3), mean, na.rm=FALSE) + sfcWindMean4 <- apply(sfcWind4, c(2,3), mean, na.rm=FALSE) + + ## load regime names: + load(paste0(workdir,"/",rean.name,"_", my.period[p],"_","ClusterNames",".RData")) + + ## add strip with daily sequence of WRs: + + mod.name1 <- substr(cluster1.name, nchar(cluster1.name), nchar(cluster1.name)) + mod.name2 <- substr(cluster2.name, nchar(cluster2.name), nchar(cluster2.name)) + mod.name3 <- substr(cluster3.name, nchar(cluster3.name), nchar(cluster3.name)) + mod.name4 <- substr(cluster4.name, nchar(cluster4.name), nchar(cluster4.name)) + + cluster1.name.short <- substr(cluster1.name,1,1) + cluster2.name.short <- substr(cluster2.name,1,1) + cluster3.name.short <- substr(cluster3.name,1,1) + cluster4.name.short <- substr(cluster4.name,1,1) + + ## add + or - at the end of the cluster name, if it is a NAO+ or NAO- regime: + if(mod.name1 == "+" || mod.name1 == "-") cluster1.name.short <- paste0(substr(cluster1.name,1,1), mod.name1) + if(mod.name2 == "+" || mod.name2 == "-") cluster2.name.short <- paste0(substr(cluster2.name,1,1), mod.name2) + if(mod.name3 == "+" || mod.name3 == "-") cluster3.name.short <- paste0(substr(cluster3.name,1,1), mod.name3) + if(mod.name4 == "+" || mod.name4 == "-") cluster4.name.short <- paste0(substr(cluster4.name,1,1), mod.name4) + + c1 <- which(cluster.test == 1) + c2 <- which(cluster.test == 2) + c3 <- which(cluster.test == 3) + c4 <- which(cluster.test == 4) + + cluster.test.letters <- cluster.test + cluster.test.letters[c1] <- cluster1.name.short + cluster.test.letters[c2] <- cluster2.name.short + cluster.test.letters[c3] <- cluster3.name.short + cluster.test.letters[c4] <- cluster4.name.short + + my.strip <- cluster.test.letters + + if(no.regimes) { + cluster.test.letters2 <- cluster.test.letters + cluster.test.letters2[which(cluster.test.letters == "N+")] <- "C1" + cluster.test.letters2[which(cluster.test.letters == "N-")] <- "C2" + cluster.test.letters2[which(cluster.test.letters == "B")] <- "C3" + cluster.test.letters2[which(cluster.test.letters == "A")] <- "C4" + my.strip <- cluster.test.letters2 + } + + cluster.col <- cluster.test.letters + cluster.col[which(cluster.test.letters == "N+")] <- "Firebrick1" + cluster.col[which(cluster.test.letters == "N-")] <- "Dodgerblue1" + cluster.col[which(cluster.test.letters == "B")] <- "White" + cluster.col[which(cluster.test.letters == "A")] <- "Darkgoldenrod1" + + cluster1 <- which(orden == cluster1.name) + cluster2 <- which(orden == cluster2.name) + cluster3 <- which(orden == cluster3.name) + cluster4 <- which(orden == cluster4.name) + + assign(paste0("fre.days",cluster1), fre1.days) + assign(paste0("fre.days",cluster2), fre2.days) + assign(paste0("fre.days",cluster3), fre3.days) + assign(paste0("fre.days",cluster4), fre4.days) + + assign(paste0("fre",cluster1), wr1y) + assign(paste0("fre",cluster2), wr2y) + assign(paste0("fre",cluster3), wr3y) + assign(paste0("fre",cluster4), wr4y) + + assign(paste0("imp.test",cluster1), sfcWindMean1) + assign(paste0("imp.test",cluster2), sfcWindMean2) + assign(paste0("imp.test",cluster3), sfcWindMean3) + assign(paste0("imp.test",cluster4), sfcWindMean4) + + ## psl anomaly for the chosen year and month and cluster: + slp1 <- slpAnomPeriod[which(cluster.test == 1),,,drop=FALSE] + slp2 <- slpAnomPeriod[which(cluster.test == 2),,,drop=FALSE] + slp3 <- slpAnomPeriod[which(cluster.test == 3),,,drop=FALSE] + slp4 <- slpAnomPeriod[which(cluster.test == 4),,,drop=FALSE] + + ## in case a regime has NO days in that month, we must set it to NA: + if(length(which(cluster.test == 1)) == 0 ) slp1 <- array(NA, c(1,dim(slpAnomPeriod)[2:3])) + if(length(which(cluster.test == 2)) == 0 ) slp2 <- array(NA, c(1,dim(slpAnomPeriod)[2:3])) + if(length(which(cluster.test == 3)) == 0 ) slp3 <- array(NA, c(1,dim(slpAnomPeriod)[2:3])) + if(length(which(cluster.test == 4)) == 0 ) slp4 <- array(NA, c(1,dim(slpAnomPeriod)[2:3])) + + slpMean1 <- apply(slp1, c(2,3), mean, na.rm=FALSE) + slpMean2 <- apply(slp2, c(2,3), mean, na.rm=FALSE) + slpMean3 <- apply(slp3, c(2,3), mean, na.rm=FALSE) + slpMean4 <- apply(slp4, c(2,3), mean, na.rm=FALSE) + + assign(paste0("psl.test",cluster1), slpMean1) + assign(paste0("psl.test",cluster2), slpMean2) + assign(paste0("psl.test",cluster3), slpMean3) + assign(paste0("psl.test",cluster4), slpMean4) + + ## save strip with the daily regime series for chosen month and year: + fileoutput.seq <- paste0(workdir,"/",rean.name,"_",my.period[month.test],"_regimes_sequence.png") + png(filename=fileoutput.seq,width=1500,height=1850) + + plot.new() + + sep <- 0.03 + for(day in 1: n.days.in.a.period(p, 2001)){ + sep.cum <- (day-1)*sep + polygon(c(sep.cum + 0.01, sep.cum + 0.01 + sep, sep.cum + 0.01 + sep, sep.cum + 0.01), c(1.01, 1.01, 1.01+sep, 1.01+sep), border="black", col=cluster.col[day]) + text(sep.cum + 0.01 + sep/2, 0.997 + sep + 0.005, labels=day, cex=1.5) + text(sep.cum + 0.01 + sep/2, 1.013 + 0.005, labels=my.strip[day], cex=2) + } + + dev.off() + + ## save average impact and sea level pressure only for chosen month and year: + fileoutput.test <- paste0(workdir,"/",rean.name,"_",my.period[month.test],"_monthly_anomaly_regimes.png") + + png(filename=fileoutput.test,width=1500,height=2000) + + plot.new() + + par(fig=c(0, 0.33, 0.77, 0.97), new=TRUE) + PlotEquiMap2(imp.test1[,EU], lon[EU], lat, filled.continents=FALSE, continents.col="black", brks=my.brks.var, cols=my.cols.var, cex.lab=1.2, intxlon=10, intylat=10, drawleg=F) + par(fig=c(0, 0.33, 0.54, 0.74), new=TRUE) + PlotEquiMap2(imp.test2[,EU], lon[EU], lat, filled.continents=FALSE, continents.col="black", brks=my.brks.var, cols=my.cols.var, cex.lab=1.2, intxlon=10, intylat=10, drawleg=F) + par(fig=c(0, 0.33, 0.31, 0.51), new=TRUE) + PlotEquiMap2(imp.test3[,EU], lon[EU], lat, filled.continents=FALSE, continents.col="black", brks=my.brks.var, cols=my.cols.var, cex.lab=1.2, intxlon=10, intylat=10, drawleg=F) + par(fig=c(0, 0.33, 0.08, 0.28), new=TRUE) + PlotEquiMap2(imp.test4[,EU], lon[EU], lat, filled.continents=FALSE, continents.col="black", brks=my.brks.var, cols=my.cols.var, cex.lab=1.2, intxlon=10, intylat=10, drawleg=F) + + if(no.regimes) { regime.title <- paste0("Cluster",1:4)} else { regime.title <- orden} + + par(fig=c(0,0.33,0.955,0.975), new=TRUE) + mtext(paste0(regime.title[1], " wind speed anomaly "), font=2, cex=2) + par(fig=c(0,0.33,0.725,0.745), new=TRUE) + mtext(paste0(regime.title[2], " wind speed anomaly "), font=2, cex=2) + par(fig=c(0,0.33,0.495,0.515), new=TRUE) + mtext(paste0(regime.title[3], " wind speed anomaly "), font=2, cex=2) + par(fig=c(0,0.33,0.265,0.285), new=TRUE) + mtext(paste0(regime.title[4], " wind speed anomaly "), font=2, cex=2) + + par(fig=c(0.03, 0.33, 0.015, 0.06), new=TRUE) + ##ColorBar(my.brks.var, cols=my.cols.var, vert=FALSE, label_scale=1.8, triangle_ends=c(FALSE,FALSE)) #, subset=my.subset2) + ColorBar(brks=round(100*my.brks.var[2:(l(my.brks.var)-1)]), cols=my.cols.var[2:(l(my.cols.var)-1)], vert=FALSE, label_scale=2, bar_limits=c(100*my.brks.var[2],100*my.brks.var[l(my.brks.var)-1]), col_inf=my.cols.var[1], col_sup=my.cols.var[l(my.cols.var)], subsample=2) #triangle_ends=c(T,T)) #, subset=my.subset2) + + par(fig=c(0.33, 0.34, 0.01, 0.044), new=TRUE) + mtext("%", cex=1.6) + + ## right figures: + par(fig=c(0.34, 0.92, 0.77, 0.97), new=TRUE) + PlotEquiMap2(psl.test1, lon, lat, filled.continents=FALSE, continents.col="black", brks=my.brks.var2, brks2=my.brks.var2, cols=my.cols.var2, intxlon=10, intylat=10, drawleg=F, contours=t(psl.test1), contours.lty="F1FF1F", cex.lab=1, xlabel.dist=.5) + par(fig=c(0.34, 0.92, 0.54, 0.74), new=TRUE) + PlotEquiMap2(psl.test2, lon, lat, filled.continents=FALSE, continents.col="black", brks=my.brks.var2, brks2=my.brks.var2, cols=my.cols.var2, intxlon=10, intylat=10, drawleg=F, contours=t(psl.test2), contours.lty="F1FF1F", cex.lab=1, xlabel.dist=.5) + par(fig=c(0.34, 0.92, 0.31, 0.51), new=TRUE) + PlotEquiMap2(psl.test3, lon, lat, filled.continents=FALSE, continents.col="black", brks=my.brks.var2, brks2=my.brks.var2, cols=my.cols.var2, intxlon=10, intylat=10, drawleg=F, contours=t(psl.test3), contours.lty="F1FF1F", cex.lab=1, xlabel.dist=.5) + par(fig=c(0.34, 0.92, 0.08, 0.28), new=TRUE) + PlotEquiMap2(psl.test4, lon, lat, filled.continents=FALSE, continents.col="black", brks=my.brks.var2, brks2=my.brks.var2, cols=my.cols.var2, intxlon=10, intylat=10, drawleg=F, contours=t(psl.test4), contours.lty="F1FF1F", cex.lab=1, xlabel.dist=.5) + + par(fig=c(0.34,0.92,0.955,0.975), new=TRUE) + mtext(paste0(regime.title[1], " SLP anomaly "), font=2, cex=2) + par(fig=c(0.34,0.92,0.725,0.745), new=TRUE) + mtext(paste0(regime.title[2], " SLP anomaly "), font=2, cex=2) + par(fig=c(0.34,0.92,0.495,0.515), new=TRUE) + mtext(paste0(regime.title[3], " SLP anomaly "), font=2, cex=2) + par(fig=c(0.34,0.92,0.265,0.285), new=TRUE) + mtext(paste0(regime.title[4], " SLP anomaly "), font=2, cex=2) + + par(fig=c(0.34, 0.93, 0.015, 0.06), new=TRUE) + ##ColorBar2(my.brks.var2, cols=my.cols.var2, vert=FALSE, my.ticks=-0.5 + 1:length(my.brks.var2), my.labels=my.brks.var2) #, subsampleg=1, label_scale=1.8) #, subset=my.subset2) + ColorBar(my.brks.var2[2:(length(my.brks.var2)-1)], cols=my.cols.var2[2:(length(my.cols.var2)-1)], vert=FALSE, label_scale=2, bar_limits=c(my.brks.var2[2],my.brks.var2[l(my.brks.var2)-1]), col_inf=my.cols.var2[1], col_sup=my.cols.var2[l(my.cols.var2)], subsample=1) + + par(fig=c(0.924, 0.930, 0.01, 0.044), new=TRUE) + mtext("hPa", cex=1.6) + + ##par(fig=c(0.627, 0.647, 0, 0.028), new=TRUE) + ##mtext("0", cex=1.8) + + n.days <- floor(n.days.in.a.period(month.test,1)) + + par(fig=c(0.93, 0.99, 0.77, 0.87), new=TRUE) + mtext(paste0(fre.days1," days\n(",round(100*fre.days1/n.days,1),"%)"), cex=2.8) + par(fig=c(0.93, 0.99, 0.54, 0.64), new=TRUE) + mtext(paste0(fre.days2," days\n(",round(100*fre.days2/n.days,1),"%)"), cex=2.8) + par(fig=c(0.93, 0.99, 0.31, 0.41), new=TRUE) + mtext(paste0(fre.days3," days\n(",round(100*fre.days3/n.days,1),"%)"), cex=2.8) + par(fig=c(0.93, 0.99, 0.08, 0.18), new=TRUE) + mtext(paste0(fre.days4," days\n(",round(100*fre.days4/n.days,1),"%)"), cex=2.8) + + dev.off() + + ## add the strip with the regime sequence over the average impact composition: + fileoutput.temp <- paste0(workdir,"/",rean.name,"_",my.period[month.test],"_temp.png") + fileoutput.both <- paste0(workdir,"/",rean.name,"_",my.period[month.test],"_temp2.png") + + system(paste0("convert ",fileoutput.seq," -crop +0-1730 +repage ",fileoutput.temp)) # cut the lower part of the strip + system(paste0("montage ",fileoutput.temp," ",fileoutput.test," -tile 1x2 -geometry +0+0 ",fileoutput.both)) + + + ## same image formatted for the catalogue: + fileoutput2.test <- paste0(workdir,"/",rean.name,"_",my.period[month.test],"_monthly_anomaly_regimes_fig2cat.png") + + system(paste0("~/scripts/fig2catalog.sh -s 0.8 -m 20 -r 40 -t '",rean.name," / 10m wind speed and sea level pressure / Monthly anomalies \n",month.name[month.test]," / ",year.test,"' -c 'Region: North Atlantic (27°N-81°N, 85.5°W-45°E)\nReference dataset: ",rean.name," reanalysis' ", fileoutput.both," ", fileoutput2.test)) + + system(paste0("rm ", fileoutput.temp, " ", fileoutput.both," ", fileoutput.seq," ", fileoutput.test, " ", fileoutput)) + + + } # close for on year.test +} # close for on month.test + + +#################################################################################### +# Create weather regimes maps for last 37 years # +#################################################################################### + +composition <- "edpr" +ordering <- TRUE # If TRUE, order the clusters following the regimes listed in the 'orden' vector (set it to TRUE only after you manually inserted the names below). +save.names <- FALSE # if TRUE, also save the cluster names (i.e: the association between clusters and weather regimes); if FALSE, the cluster names are loaded from a file +as.pdf <- FALSE # FALSE: save results as one .png file for each season/month, TRUE: save only one .pdf file with all seasons/months +mean.freq <- TRUE # if TRUE, when composition <- "simple" o 'edpr', it also add the mean frequency value over each frequency plot + +for(p in period){ + print(paste("p =",p)) + + # load regime data (for forecasts, it load only the data corresponding to the chosen start month p and lead month 'lead.month') + impact.data <- FALSE + + load(file=paste0(workdir,"/",rean.name,"_",my.period[p],"_","psl",".RData")) + + + ## load impact data only if it is available, if not it will leave an empty space in the composition: + if(file.exists(paste0(workdir,"/",rean.name,"_",my.period[p],"_",var.name[var.num],".RData"))){ + impact.data <- TRUE + print(paste0("Impact data for variable ",var.name[var.num] ," available for reanalysis ", rean.name)) + load(file=paste0(workdir,"/",rean.name,"_",my.period[p],"_",var.name[var.num],".RData")) + } else { + impact.data <- FALSE + print(paste0("Impact data for variable ",var.name[var.num] ," not available for reanalysis ", rean.name)) + } + + + ## load the cluster names from the file already saved, if regimes come from a reanalysis: + ClusterName.file <- paste0(workdir,"/",rean.name,"_", my.period[p],"_","ClusterNames",".RData") + if(!file.exists(ClusterName.file)) stop(paste0("file: ",ClusterName.file," missing")) # check if file exists or not + load(ClusterName.file) # load cluster names + + # assign to each cluster its regime: + if(ordering == FALSE){ + cluster1 <- 1; cluster2 <- 2; cluster3 <- 3; cluster4 <- 4 # same as: cluster1.name=orden[1], cluster2.name=orden[2], cluster3.name=orden[3], etc. + } else { + cluster1 <- which(orden == cluster1.name) + cluster2 <- which(orden == cluster2.name) + cluster3 <- which(orden == cluster3.name) + cluster4 <- which(orden == cluster4.name) + } + + assign(paste0("map",cluster1), pslwr1mean) + assign(paste0("map",cluster2), pslwr2mean) + assign(paste0("map",cluster3), pslwr3mean) + assign(paste0("map",cluster4), pslwr4mean) + + assign(paste0("fre",cluster1), wr1y) + assign(paste0("fre",cluster2), wr2y) + assign(paste0("fre",cluster3), wr3y) + assign(paste0("fre",cluster4), wr4y) + + if(impact.data == TRUE){ + assign(paste0("imp",cluster1), varwr1mean) + assign(paste0("imp",cluster2), varwr2mean) + assign(paste0("imp",cluster3), varwr3mean) + assign(paste0("imp",cluster4), varwr4mean) + + assign(paste0("sig",cluster1), pvalue1) + assign(paste0("sig",cluster2), pvalue2) + assign(paste0("sig",cluster3), pvalue3) + assign(paste0("sig",cluster4), pvalue4) + + assign(paste0("impRel",cluster1), varwr1meanRel) + assign(paste0("impRel",cluster2), varwr2meanRel) + assign(paste0("impRel",cluster3), varwr3meanRel) + assign(paste0("impRel",cluster4), varwr4meanRel) + } + + # position of long values of Europe only (without the Atlantic Sea and America): + #if(fields.name == "ERA-Interim") EU <- c(1:which(lon >= lon.max)[1], (length(lon)-30):length(lon)) # restrict area to continental europe only + EU <- c(1:which(lon >= lon.max)[1], (which(lon >= 337)[1]:length(lon))) # restrict area to continental europe only + + # breaks and colors of the geopotential fields: + if(psl == "g500"){ + #my.brks <- c(-300,-199:200,300) # % Mean anomaly of a WR + my.brks <- c(-300,seq(-200,200,10),300) # % Mean anomaly of a WR + my.brks2 <- c(-300,seq(-200,200,10),300) # % Mean anomaly of a WR for the contour lines + #my.cols <- colorRampPalette((brewer.pal(9,"PuBu")))(length(my.brks)-1) + values.to.plot <- c(-200, -100, 0, 100, 200) # values we want to show in the labels + } + + if(psl == "psl"){ + my.brks <- c(seq(-21,-1,2),0,seq(1,21,2)) # % Mean anomaly of a WR + # my.brks <- c(seq(-19,-1,2),0,seq(1,19,2)) # % Mean anomaly of a WR + + my.brks2 <- my.brks #c(seq(-20,20,2)) # % Mean anomaly of a WR for the contour lines + values.to.plot <- my.brks #c(-17,-15,-13,-11,-9,-7,-5,-3,-1,0,1,3,5,7,9,11,13,15,17) + } + + my.cols <- colorRampPalette(rev(brewer.pal(11,"RdBu")))(length(my.brks)-1) # blue--red colors + my.cols[floor(length(my.cols)/2)] <- my.cols[floor(length(my.cols)/2)+1] <- "white" # add white in the middle + + if(var.name[var.num] == "sfcWind") { + my.brks.var <- seq(-3,3,0.5) + values.to.plot2 <- c(-3,-2,-1,0,1,2,3) + } + if(var.name[var.num] == "tas") { + my.brks.var <- seq(-5,5,1) + values.to.plot2 <- my.brks.var #c(-5,-3,-1,0,1,3,5) + } + + #my.cols <- colorRampPalette(as.character(read.csv("/home/Earth/vtorralb/rgbhex.csv",header=F)[,1]))(length(my.brks)-1) # blue--yellow-red colors + my.cols.var <- colorRampPalette(rev(brewer.pal(11,"RdBu")))(length(my.brks.var)-1) # blue--white--red colors + #my.cols.var[floor(length(my.cols.var)/2)] <- my.cols.var[floor(length(my.cols.var)/2)+1] <- "white" + + fre1.NA <- fre1; fre2.NA <- fre2; fre3.NA <- fre3; fre4.NA <- fre4 + + ## adjust color legends to include triangles to the extremities increasing by two the number of intervals: + my.brks.var <- c(-20,seq(-0.6,0.6,0.1),20) + my.cols.var <- colorRampPalette(rev(brewer.pal(11,"RdBu")))(length(my.brks.var)-1) # blue--red colors + + ## same but for SLP: + my.brks <- c(-100,seq(-21,-1,2),0,seq(1,21,2),100) + my.cols <- colorRampPalette(rev(brewer.pal(11,"RdBu")))(length(my.brks)-1) # blue--red colors + my.cols[floor(length(my.cols)/2)] <- my.cols[floor(length(my.cols)/2)+1] <- "white" ## add white in the middle + + fileoutput <- paste0(workdir,"/",rean.name,"_",my.period[p],"_",var.name[var.num],"_edpr.png") + + png(filename=fileoutput,width=3000,height=3700) + + plot.new() + + y1 <- 0.10 + y3 <- 0.315 + y5 <- 0.53 + y7 <- 0.745 + y.width <- 0.18 + + y2 <- y1 + y.width; y4 <- y3 + y.width; y6 <- y5 + y.width; y8 <- y7 + y.width + yt1 <- y2+0.003; yt3 <- y4+0.003; yt5 <- y6+0.003; yt7 <- y8+0.003 + yt2 <- yt1 + 0.004; yt4 <- yt3 + 0.005; yt6 <- yt5 + 0.005; yt8 <- yt7 + 0.005 + + ## Centroid maps: + map.xpos <- 0.27 + map.width <- 0.46 + par(fig=c(map.xpos + 0.003, map.xpos + map.width - 0.003, y7, y8), new=TRUE) + PlotEquiMap2(map1, lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map1), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, xlabel.dist=1.5, contours.lty="F1FF1F", continents.col="black") + + par(fig=c(map.xpos, map.xpos + map.width, y5, y6), new=TRUE) + PlotEquiMap2(map2, lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map2), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F", continents.col="black") + + par(fig=c(map.xpos, map.xpos + map.width, y3, y4), new=TRUE) + PlotEquiMap2(map3, lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map3), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F", continents.col="black") + + par(fig=c(map.xpos, map.xpos + map.width, y1, y2), new=TRUE) + PlotEquiMap2(map4, lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map4), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F", continents.col="black") + + ## Legend centroid maps: + legend1.xpos <- 0.30 + legend1.width <- 0.40 + legend1.cex <- 3 + my.subset <- match(values.to.plot, my.brks) + par(fig=c(legend1.xpos, legend1.xpos + legend1.width, 0.045, 0.085), new=TRUE) + ##ColorBar3(my.brks, cols=my.cols[2:(length(my.cols)-1)], vert=FALSE, cex=legend1.cex, subset=my.subset) + ColorBar(my.brks[2:(l(my.brks)-1)], cols=my.cols[2:(length(my.cols)-1)], vert=FALSE, label_scale=legend1.cex, bar_limits=c(my.brks[2], my.brks[l(my.brks)-1]), col_inf=my.cols[1], col_sup=my.cols[l(my.cols)], subsample=1) + + if(psl=="g500") {mtext(side=4," m", cex=2.5, las=1)} else {mtext(side=4," hPa", cex=legend1.cex, las=1)} ## las=1 is to display in horizontal instead of vert + + ## Title Centroid Maps: + title1.xpos <- 0.3 + title1.width <- 0.44 + par(fig=c(title1.xpos, title1.xpos + title1.width, yt7+0.0025, yt7+0.0075), new=TRUE) + mtext(paste0(regime.title[1]," ", psl.name, " anomaly"), font=2, cex=4) + + par(fig=c(title1.xpos, title1.xpos + title1.width, yt5+0.0025, yt5+0.0075), new=TRUE) + mtext(paste0(regime.title[2]," ", psl.name, " anomaly"), font=2, cex=4) + + par(fig=c(title1.xpos, title1.xpos + title1.width, yt3+0.0025, yt3+0.0075), new=TRUE) + mtext(paste0(regime.title[3]," ", psl.name, " anomaly"), font=2, cex=4) + + par(fig=c(title1.xpos, title1.xpos + title1.width, yt1+0.0025, yt1+0.0075), new=TRUE) + mtext(paste0(regime.title[4]," ", psl.name, " anomaly"), font=2, cex=4) + + ## Impact maps: + if(impact.data == TRUE ){ + impact.xpos <- 0 + impact.width <- 0.26 + par(fig=c(impact.xpos, impact.xpos + impact.width, y7, y8), new=TRUE) + PlotEquiMap2(impRel1[,EU], lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=t(sig1[,EU] < 0.05), cex.lab=1.5, continents.col="black") + + par(fig=c(impact.xpos, impact.xpos + impact.width, y5, y6), new=TRUE) + PlotEquiMap2(impRel2[,EU], lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=t(sig2[,EU] < 0.05), cex.lab=1.5, continents.col="black") + + par(fig=c(impact.xpos, impact.xpos + impact.width, y3, y4), new=TRUE) + PlotEquiMap2(impRel3[,EU], lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=t(sig3[,EU] < 0.05), cex.lab=1.5, continents.col="black") + + par(fig=c(impact.xpos, impact.xpos + impact.width, y1, y2), new=TRUE) + PlotEquiMap2(impRel4[,EU], lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=t(sig4[,EU] < 0.05), cex.lab=1.5, continents.col="black") + + + ## Title impact maps: + title2.xpos <- 0 + title2.width <- 0.26 + par(fig=c(title2.xpos, title2.xpos + title2.width, yt7, yt8), new=TRUE) + mtext(paste0(regime.title[1], " impact on ", var.name.full[var.num]), font=2, cex=4) + + par(fig=c(title2.xpos, title2.xpos + title2.width, yt5, yt6), new=TRUE) + mtext(paste0(regime.title[2], " impact on ", var.name.full[var.num]), font=2, cex=4) + + par(fig=c(title2.xpos, title2.xpos + title2.width, yt3, yt4), new=TRUE) + mtext(paste0(regime.title[3], " impact on ", var.name.full[var.num]), font=2, cex=4) + + par(fig=c(title2.xpos, title2.xpos + title2.width, yt1, yt2), new=TRUE) + mtext(paste0(regime.title[4], " impact on ", var.name.full[var.num]), font=2, cex=4) + + ## Legend: + legend2.xpos <- 0.01 + legend2.width <- 0.25 + legend2.cex <- 3 + + my.subset2 <- match(values.to.plot2, my.brks.var) + par(fig=c(legend2.xpos, legend2.xpos + legend2.width, 0.045, 0.085), new=TRUE) + + ColorBar(brks=round(100*my.brks.var[2:(l(my.brks.var)-1)],0), cols=my.cols.var[2:(length(my.cols.var)-1)], vert=FALSE, label_scale=3, bar_limits=c(100*my.brks.var[2],100*my.brks.var[l(my.brks.var)-1]), col_inf=my.cols.var[1], col_sup=my.cols.var[l(my.cols.var)], subsample=2) + ##mtext(side=4,paste0(" ",var.unit[var.num]), cex=legend2.cex, las=1) + mtext(side=4,"%", cex=legend2.cex, las=1) + + } + + ## Frequency plots: + barplot.xpos <- 0.75 + barplot.width <- 0.25 + + par(fig=c(barplot.xpos, barplot.xpos + barplot.width, y7, y8-0.01), new=TRUE) + barplot.freq(100*fre1.NA, year.start, year.end, cex.y=2, cex.x=2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0)) + + par(fig=c(barplot.xpos, barplot.xpos + barplot.width, y5, y6-0.01), new=TRUE) + barplot.freq(100*fre2.NA, year.start, year.end, cex.y=2, cex.x=2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0)) + + par(fig=c(barplot.xpos, barplot.xpos + barplot.width, y3, y4-0.01), new=TRUE) + barplot.freq(100*fre3.NA, year.start, year.end, cex.y=2, cex.x=2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0)) + + par(fig=c(barplot.xpos, barplot.xpos + barplot.width, y1, y2-0.01), new=TRUE) + barplot.freq(100*fre4.NA, year.start, year.end, cex.y=2, cex.x=2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0)) + + ## Title Frequency maps: + title3.xpos <- 0.75 + title3.width <-0.25 + par(fig=c(title3.xpos, title3.xpos + title3.width, yt7, yt8), new=TRUE) + mtext(paste0(regime.title[1], " Frequency"), font=2, cex=4) # paste0 doesn't work inside an expression! + par(fig=c(title3.xpos, title3.xpos + title3.width, yt5, yt6), new=TRUE) + mtext(paste0(regime.title[2], " Frequency"), font=2, cex=4) + par(fig=c(title3.xpos, title3.xpos + title3.width, yt3, yt4), new=TRUE) + mtext(paste0(regime.title[3], " Frequency"), font=2, cex=4) + par(fig=c(title3.xpos, title3.xpos + title3.width, yt1, yt2), new=TRUE) + mtext(paste0(regime.title[4], " Frequency"), font=2, cex=4) + + ## % on Frequency plots: + symbol.xpos <- 0.735 + symbol.width <- 0.01 + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, y4+0.425, y4+0.425+0.01), new=TRUE) + mtext("%", cex=3.3) + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, y3+0.39, y3+0.39+0.01), new=TRUE) + mtext("%", cex=3.3) + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, y2+0.21, y2+0.21+0.01), new=TRUE) + mtext("%", cex=3.3) + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, y1+0.17, y1+0.17+0.01), new=TRUE) + mtext("%", cex=3.3) + + ## mean frequency on Frequency plots: + if(mean.freq == TRUE){ + symbol.xpos <- 0.83 + symbol.width <- 0.1 + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, y7+0.163, y7+0.163+0.01), new=TRUE) + mtext(bquote(bar(nu) == .(paste0(round(mean(100*fre1.NA),1),"%"))),cex=3.5) + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, y5+0.163, y5+0.163+0.01), new=TRUE) + mtext(bquote(bar(nu) == .(paste0(round(mean(100*fre2.NA),1),"%"))),cex=3.5) + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, y3+0.165, y3+0.165+0.01), new=TRUE) + mtext(bquote(bar(nu) == .(paste0(round(mean(100*fre3.NA),1),"%"))),cex=3.5) + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, y1+0.163, y1+0.163+0.01), new=TRUE) + mtext(bquote(bar(nu) == .(paste0(round(mean(100*fre4.NA),1),"%"))),cex=3.5) + } + + + ## Subtitle frequency maps: + map.title.xpos <- 0.96 + map.title.width <- 0.04 + par(fig=c(map.title.xpos, map.title.xpos + map.title.width, y7-0.012, y7-0.012+0.001), new=TRUE) + mtext("year", cex=3) + par(fig=c(map.title.xpos, map.title.xpos + map.title.width, y5-0.012, y5-0.012+0.001), new=TRUE) + mtext("year", cex=3) + par(fig=c(map.title.xpos, map.title.xpos + map.title.width, y3-0.012, y3-0.012+0.001), new=TRUE) + mtext("year", cex=3) + par(fig=c(map.title.xpos, map.title.xpos + map.title.width, y1-0.012, y1-0.012+0.001), new=TRUE) + mtext("year", cex=3) + + if(!as.pdf) dev.off() # for saving 4 png + + ## same image formatted for the catalogue: + fileoutput2 <- paste0(workdir,"/",rean.name,"_",my.period[p],"_",var.name[var.num],"_edpr_fig2cat.png") + + system(paste0("~/scripts/fig2catalog.sh -s 0.8 -r 150 -m 150 -t '",rean.name," / 10m wind speed and sea level pressure / Monthly anomalies and frequencies \n",month.name[p]," / ",year.start,"-",year.end,"' -c 'Region: North Atlantic (27°N-81°N, 85.5°W-45°E)\nReference dataset: ",rean.name," reanalysis' ", fileoutput," ", fileoutput2)) + + system(paste0("rm ", fileoutput)) + +} # close 'p' on 'period' + +print("Finished!") diff --git a/old/weather_regimes_impact_v1.R b/old/weather_regimes_impact_v1.R new file mode 100644 index 0000000000000000000000000000000000000000..fb912f2b72639715ed3e4f03935a8445bb1cbc0c --- /dev/null +++ b/old/weather_regimes_impact_v1.R @@ -0,0 +1,323 @@ + +# Creation: 6/2016 +# Author: Nicola Cortesi +# Aim: Measure the impact of Weather Regimes of a chosen variable, from a reanalysis of from a forecast systems. +# I/O: input are all the various "*_series.RData" and "*_psl.RData" created by the script 'weather_regimes.R' +# its output are "*_.RData" files which are need by the script weather_regimes_maps.R +# Assumption: all input/output files must be located in the 'workdir' folder +# Branch: weather_regimes + + +library(s2dverification) # for the function Load() +library(abind) +library(reshape2) # after each undate of s2dverification, it's better to remove and install this package again because sometimes it gets broken! +#library(TTR) +source('/home/Earth/ncortesi/Downloads/scripts/Rfunctions.R') # for the calendar functions +workdir <- "/scratch/Earth/ncortesi/RESILIENCE/Regimes" # working dir +# module load NCO # : load it from the terminal before running this script interactively in the SMP machine, if you didn't load it in the .bashrc + +# available reanalysis for the geopotential (g500) or the geopotential height (z500 = g500/9.81) and for var data: +ERAint <- '/esnas/reconstructions/ecmwf/eraint/$STORE_FREQ$_mean/$VAR_NAME$_f6h/$VAR_NAME$_$YEAR$$MONTH$.nc' +ERAint.name <- "ERA-Interim" + +JRA55 <- '/esnas/reconstructions/jma/jra-55/$STORE_FREQ$_mean/$VAR_NAME$_f6h/$VAR_NAME$_$YEAR$$MONTH$.nc' +JRA55.name <- "JRA-55" + +rean <- ERAint #JRA55 #ERAint # choose one of the two above reanalysis from where to load the input psl data + +# available forecast systems for the psl and var data: +#ECMWF_S4 <- list(path = paste0('/esnas/exp/ECMWF/seasonal/0001/s004/m001/$STORE_FREQ$_mean/$VAR_NAME$_f6h/$VAR_NAME$_$START_DATE$.nc')) +#ECMWF_S4 <- '/esnas/exp/ECMWF/seasonal/0001/s004/m001/$STORE_FREQ$_mean/psl_f6h/psl_$START_DATE$.nc' +ECMWF_S4 <- '/esnas/exp/ecmwf/system4_m1/$STORE_FREQ$_mean/$VAR_NAME$_f6h/$VAR_NAME$_$START_DATE$.nc' +ECMWF_S4.name <- "ECMWF-S4" +member.name <- "ensemble" # name of the dimension with the ensemble members inside the netCDF files of S4 + +ECMWF_monthly <- '/esnas/exp/ECMWF/monthly/ensforhc/6hourly/$VAR_NAME$/2014$MONTH$$DAY$00/$VAR_NAME$_$START_DATE$00.nc' +ECMWF_monthly.name <- "ECMWF-Monthly" + +forecast <- ECMWF_S4 + +fields <- rean #forecast #rean # set it to 'rean' to load pressure fields and var from reanalisis, or to 'forecast' to load them from forecast systems + +var.num <- 1 # Choose a variable. 1: sfcWind 2: tas + +var.name <- c("sfcWind","tas") # name of the 'predictand' variable of the chosen reanalysis +var.name.full <- c("Wind Speed","Temperature") # full name of the 'predictand' variable to put in the title of the graphs +var.unit <- c("m/s", "ºC") # unit of measure of var (for drawing color scales) + +# Only for Reanalysis: +period <- 1:12 #13:16 # 1-12: Jan-Dec; 13-16: Winter-Spring-Summer-Autumn [bypassed by the optional argument of the script] + +# Only for Seasonal forecasts: +startM <- 9 # start month (1=January, 12=December) [bypassed by the optional arguments of the script] +leadM <- 3 # select only the data of one lead month: [bypassed by the optional arguments of the script] +n.members <- 15 # number of members of the forecast system to load +n.leadtimes <- 216 # number of leadtimes of the forecast system to load + +############################################################################################################################# +#ECMWF_monthly <- list(path = paste0('/esnas/exp/ECMWF/monthly/ensforhc/6hourly/psl/2014010200/1994_010200.nc')) +#ECMWF_monthly <- list(path = paste0('/esnas/exp/ECMWF/monthly/ensforhc/6hourly/$VAR_NAME$/2014$MONTH$$DAY$00/$VAR_NAME$_$START_DATE$00.nc')) +rean.name <- unname(ifelse(rean == ERAint, ERAint.name, JRA55.name)) +forecast.name <- unname(ifelse(forecast == ECMWF_S4, ECMWF_S4.name, ECMWF_monthly.name)) +fields.name <- unname(ifelse(fields == rean, rean.name, forecast.name)) +n.years <- year.end - year.start + 1 +#if(fields.name == forecast.name) WR.period = 1 + +# in case the script is run with one argument, it is assumed a reanalysis is being used and the argument becomes the month we want to perform the clustering; +# in case the script is run with two arguments, it is assumed a forecast is used and the first argument represents the startmonth and the second one the leadmonth. +# in case the script is run with no arguments, the values of the variables inside the script are used: +script.arg <- as.integer(commandArgs(TRUE)) + +if(length(script.arg) == 0){ + start.month <- startM + WR.period <- start.month + if(fields.name == forecast.name) lead.month <- leadM +} + +# in case the script is run with 1 argument, it is assumed you are using a reanalysis: +if(length(script.arg) == 1){ + fields <- rean + fields.name <- rean.name + WR.period <- script.arg[1] +} + +if(length(script.arg) >= 2){ + fields <- forecast + fields.name <- forecast.name + start.month <- script.arg[1] + lead.month <- script.arg[2] + WR.period <- start.month +} + +if(length(script.arg) == 3){ # in this case, we are running the script in the SMP machine and we need to override the variables below: + source('/gpfs/projects/bsc32/bsc32842/Rfunctions.R') # for the calendar function + workdir <- "/gpfs/projects/bsc32/bsc32842/RESILIENCE" # working dir +} + +if(length(script.arg) >= 2) {lead.comment <- paste0("Lead month: ", lead.month)} else {lead.comment <- ""} +cat(paste0("Field: ", fields.name, " Period: ", WR.period, " ", lead.comment, "\n")) + +if(lat.min >= lat.max) stop("lat.min cannot be greater than lat.max") + +days.period <- n.days.period <- period.length <- list() +for (pp in 1:17){ + days.period[[pp]] <- NA + for(y in year.start:year.end) days.period[[pp]] <- c(days.period[[pp]], n.days.in.a.future.year(year.start, y) + pos.period(y,pp)) + days.period[[pp]] <- days.period[[pp]][-1] # remove the NA at the begin that was introduced only to be able to execute the above command + # number of days belonging to that period from year.start to year.end: + n.days.period[[pp]] <- length(days.period[[pp]]) + # Number of days belonging to that period in a single year: + period.length[[pp]] <- n.days.in.a.period(pp,1999) #+ ifelse(period==13 | period==2, 1, 0) # using year 1999 introduce a small error in winter season length because of bisestile years +} + + +# Load var data (interpolating it to the same reanalysis grid, if necessary): +cat("Loading data. Please wait...\n") + +# Load var data: +if(fields.name == rean.name){ # Load daily var data in the reanalysis case: + vareuFull <-array(NA,c(1,1,n.years,365, n.pos.lat, n.pos.lon)) + + #for (y in year.start:year.end){ + # var <- Load(var = var.name[var.num], exp = NULL, obs = list(fields), paste0(y,'0101'), storefreq = 'daily', leadtimemax = n.days.in.a.year(y), output = 'lonlat', + # latmin = lat.min, latmax = lat.max, lonmin = lon.min, lonmax = lon.max) + # vareuFull[seq.days.in.a.future.year(year.start, y),,] <- var$obs + # rm(var) + # gc() + #} + + vareuFull366 <- Load(var = var.nmae[var.num], exp = NULL, obs = list(list(path=fields)), paste0(year.start:year.end,'0101'), storefreq = 'daily', leadtimemax = 366, output = 'lonlat', latmin = lat.min, latmax = lat.max, lonmin = lon.min, lonmax = lon.max, nprocs=1)$obs + + # remove bisestile days to have all arrays with the same dimensions: + cat("Removing bisestile days. Please wait...\n") + vareuFull[,,,,,] <- vareuFull366[,,,1:365,,] + + for(y in year.start:year.end){ + y <- y - year.start + 1 + if(n.days.in.a.year(y) == 366) vareuFull[,,y,60:365,,] <- vareuFull366[,,y,61:366,,] # take the march to december period removing the 29th of February + } + + rm(vareuFull366) + gc() + + # convert psl in daily anomalies with the LOESS filter: + cat("Calculating anomalies. Please wait...\n") + varPeriodClim <- apply(vareuFull, c(1,2,4,5,6), mean, na.rm=T) + + varPeriodClimLoess <- varPeriodClim + for(i in n.pos.lat){ + for(j in n.pos.lon){ + my.data <- data.frame(ens.mean=varPeriodClim[1,1,,i,j], day=1:365) + my.loess <- loess(ens.mean ~ day, my.data, span=0.35) + varPeriodClimLoess[1,1,,i,j] <- predict(my.loess) + } + } + + rm(varPeriodClim) + gc() + + varPeriodClim2 <- InsertDim(varPeriodClimLoess, 3, n.years) + + ## s4.data <- data.frame(s4=pslPeriodClim[1,], day=1:216) + ## s4.loess <- loess(s4 ~ day, s4.data, span=0.35) + ## s4.pred <- predict(s4.loess) + + vareuFull <- vareuFull - varPeriodClim2 + rm(varPeriodClim2) + gc() + +} + + +if(fields.name == ECMWF_S4.name){ # in this case, we can load only the data for 1 month at time (since each month needs ~3 GB of memory) + if(start.month <= 9) {start.month.char <- paste0("0",as.character(start.month))} else {start.month.char <- start.month} + + #psleuFull <- Load(var = psl, exp = list(list(path=fields)), obs = NULL, sdates=paste0(years, start.month.char,'01'), dimnames=list(member=member.name), nmember=n.members, leadtimemax=n.leadtimes, storefreq='daily', output = 'lonlat', latmin = lat.min, latmax = lat.max, lonmin = lon.min, lonmax = lon.max, nprocs=1, grid=my.grid, method='bilinear')$mod # not working + + fields2 <- gsub("\\$STORE_FREQ\\$","daily",fields) + fields3 <- gsub("\\$VAR_NAME\\$",var.name[var.num],fields2) + + years <- c() + for(y in year.start:year.end){ + fields4 <- gsub("\\$START_DATE\\$",paste0(y, start.month.char,'01'),fields3) + + char.lead <- nchar(system(paste0("ncks -m ",fields4,"| grep -E -i 'psl size' | cut -d '*' -f1"), intern=T)) + # beware, in this way it can only take into account leadtimes from 100 to 999, but it is the only way in the SMP machine: + #num.lead <- as.integer(substr(system(paste0("ncks -m ",fields4,"| grep -E -i 'psl size' | cut -d '*' -f1"), intern=T),char.lead-3,char.lead)) + num.lead <- as.integer(system(paste0("ncks -m ",fields4,"| grep -E -i 'psl size' | cut -d '=' -f2 | cut -d '*' -f1"), intern=T)) # don't work well on the SMP + num.memb <- as.integer(system(paste0("ncks -m ",fields4,"| grep -E -i 'psl size' | cut -d '=' -f2 | cut -d '*' -f2"), intern=T)) + num.lat <- as.integer(system(paste0("ncks -m ",fields4,"| grep -E -i 'psl size' | cut -d '=' -f2 | cut -d '*' -f3"), intern=T)) + num.lon <- as.integer(system(paste0("ncks -m ",fields4,"| grep -E -i 'psl size' | cut -d '=' -f2 | cut -d '*' -f4"), intern=T)) + + if(num.lead == n.leadtimes && num.memb >= n.members && num.lat == 181 && num.lon == 360) years <- c(years,y) + } + + n.years.full <- length(years) # years really available + + #Load(var = "psl", exp = list(list(path="/esnas/exp/ecmwf/system4_m1/$STORE_FREQ$_mean/$VAR_NAME$_f6h/$VAR_NAME$_$START_DATE$.nc")), obs = NULL, sdates=paste0(1982:2013,'0101'), nmember=15, leadtimemax=216, storefreq='daily', output = 'lonlat', grid=my.grid, nprocs=1) + + vareuFull <- Load(var = var.name[var.num], exp = list(list(path=fields)), obs = NULL, sdates=paste0(years, start.month.char,'01'), dimnames=list(member=member.name), nmember=n.members, leadtimemax=n.leadtimes, storefreq='daily', output = 'lonlat', latmin = lat.min, latmax = lat.max, lonmin = lon.min, lonmax = lon.max, grid=my.grid, method='bilinear', nprocs=1)$mod + + # convert var in daily anomalies with the LOESS filter: + cat("Calculating anomalies. Please wait...\n") + varPeriodClim <- apply(vareuFull, c(1,4,5,6), mean, na.rm=T) + varPeriodClimLoess <- varPeriodClim + for(i in n.pos.lat){ + for(j in n.pos.lon){ + my.data <- data.frame(ens.mean=varPeriodClim[1,,i,j], day=1:n.leadtimes) + my.loess <- loess(ens.mean ~ day, my.data, span=0.35) + varPeriodClimLoess[1,,i,j] <- predict(my.loess) + } + } + + rm(varPeriodClim) + gc() + + varPeriodClim2 <- InsertDim(InsertDim(varPeriodClimLoess, 2, n.years.full), 2, n.members) + + ## s4.data <- data.frame(s4=pslPeriodClim[1,], day=1:216) + ## s4.loess <- loess(s4 ~ day, s4.data, span=0.35) + ## s4.pred <- predict(s4.loess) + + vareuFull <- vareuFull - varPeriodClim2 + rm(varPeriodClim2) + gc() +} + +if(fields.name == ECMWF_monthly.name){ + # Load 6-hourly psl data in the forecast case: + psleuFull <-array(NA, c(n.sdates*n.years, n.leadtimes, n.pos.lat, n.pos.lon)) # daily order: 19940102 19950102 ... 20130102 19940109 19950109 ... 20130109 ... + n.leadtimes <- length(leadtime) + sdates <- weekly.seq(forecast.year,mes,day) + n.sdates <- length(sdates) + + for (startdate in 1:n.sdates){ + for(lday in leadtime){ + var <- Load(var = psl, exp = NULL, obs = list(list(path=fields)), paste0(year.start:year.end, substr(sdates[startdate],5,6), substr(sdates[startdate],7,8) ), storefreq = 'daily', leadtimemin = lday*4-3, leadtimemax = lday*4, output = 'lonlat', latmin = lat.min, latmax = lat.max, lonmin = lon.min, lonmax = lon.max) + + mean.lday <- apply(var$obs, c(3,5,6), mean, na.rm=T) + rm(var) + gc() + + psleuFull[(1+(startdate-1)*n.years):(startdate*n.years), lday-leadtime[1]+1,,] <- mean.lday + rm(mean.lday) + gc() + } + } +} + + + +if(psl=="g500") psleuFull <- psleuFull/9.81 # convert geopotential to geopotential height (in m) +if(psl=="psl") psleuFull <- psleuFull/100 # convert MSLP in Pascal to MSLP in hPa +gc() + +# impact maps: +for(p in period){ + # load regime data for chosen month (for forecasts, it load only the data correpsonding to the chosen start month p and lead month 'lead.month':: + if(fields.name == rean.name) { + load(file=paste0(workdir,"/",fields.name,"_",my.period[p],"_psl.RData")) + + cluster.sequence <- my.cluster[[p]]$cluster + + varPeriod <- vareuFull[days.period[[p]],,] # select only var data during the chosen period + + varPeriodClim <- apply(varPeriod, c(2,3), mean, na.rm=T) + varPeriodClim2 <- InsertDim(varPeriodClim, 1, n.days.period[[p]]) + varPeriodAnom <- varPeriod - varPeriodClim2 + rm(varPeriod, varPeriodClim, varPeriodClim2) + gc() + + #PlotEquiMap2(varPeriodClim[,EU], lon[EU], lat, filled.continents = FALSE, cols=my.cols.var, intxlon=10, intylat=10, cex.lab=1.5) # plot the climatology of the period + varPeriodAnom1 <- varPeriodAnom[wr1,,] + varPeriodAnom2 <- varPeriodAnom[wr2,,] + varPeriodAnom3 <- varPeriodAnom[wr3,,] + varPeriodAnom4 <- varPeriodAnom[wr4,,] + + varPeriodAnom1mean <- apply(varPeriodAnom1,c(2,3),mean,na.rm=T) + varPeriodAnom2mean <- apply(varPeriodAnom2,c(2,3),mean,na.rm=T) + varPeriodAnom3mean <- apply(varPeriodAnom3,c(2,3),mean,na.rm=T) + varPeriodAnom4mean <- apply(varPeriodAnom4,c(2,3),mean,na.rm=T) + + varPeriodAnomBoth1 <- abind(varPeriodAnom, varPeriodAnom1, along = 1) + pvalue1 <- apply(varPeriodAnomBoth1, c(2,3), function(x) t.test(x[1:n.days.period[[p]]],x[(n.days.period[[p]]+1):length(x)])$p.value) + rm(varPeriodAnomBoth1, varPeriodAnom1) + gc() + + varPeriodAnomBoth2 <- abind(varPeriodAnom, varPeriodAnom2, along = 1) + pvalue2 <- apply(varPeriodAnomBoth2, c(2,3), function(x) t.test(x[1:n.days.period[[p]]],x[(n.days.period[[p]]+1):length(x)])$p.value) + rm(varPeriodAnomBoth2, varPeriodAnom2) + gc() + + varPeriodAnomBoth3 <- abind(varPeriodAnom, varPeriodAnom3, along = 1) + pvalue3 <- apply(varPeriodAnomBoth3, c(2,3), function(x) t.test(x[1:n.days.period[[p]]],x[(n.days.period[[p]]+1):length(x)])$p.value) + rm(varPeriodAnomBoth3, varPeriodAnom3) + gc() + + varPeriodAnomBoth4 <- abind(varPeriodAnom, varPeriodAnom4, along = 1) + pvalue4 <- apply(varPeriodAnomBoth4, c(2,3), function(x) t.test(x[1:n.days.period[[p]]],x[(n.days.period[[p]]+1):length(x)])$p.value) + rm(varPeriodAnomBoth4, varPeriodAnom4) + + rm(varPeriodAnom) + gc() + + # save all the data necessary to redraw the graphs when we know the right regime: + save(lon, lon.max, lon.min, lat, lat.max, lat.min, psl, psl.name, my.period, p, workdir,rean.name,fields.name,var.num,var.name,var.name.full,var.unit,year.start,year.end,WR.period, varPeriodAnom1mean, varPeriodAnom2mean, varPeriodAnom3mean,varPeriodAnom4mean,pvalue1,pvalue2,pvalue3,pvalue4,file=paste0(workdir,"/",fields.name,"_",my.period[p],"_",var.name,".RData")) + + } + + if(fields.name == forecast.name){ + load(file=paste0(workdir,"/",fields.name,"_",my.period[p],"_leadmonth_",lead.month,"_psl.RData")) + + + + } + + + + rm(pvalue1,pvalue2,pvalue3,pvalue4) + rm(varPeriodAnom1mean,varPeriodAnom2mean,varPeriodAnom3mean,varPeriodAnom4mean) + gc() + + + diff --git a/old/weather_regimes_impact_v1.R~ b/old/weather_regimes_impact_v1.R~ new file mode 100644 index 0000000000000000000000000000000000000000..7dace6e1df24ab86f6a9536b255d31cfdfb4bc45 --- /dev/null +++ b/old/weather_regimes_impact_v1.R~ @@ -0,0 +1,251 @@ + +# Creation: 6/2016 +# Author: Nicola Cortesi +# Aim: Measure the impact of Weather Regimes of a chosen variable, from a reanalysis of from a forecast systems. +# I/O: input are all the various "*_series.RData" and "*_psl.RData" created by the script 'weather_regimes.R' +# its output are "*_.RData" files which are need by the script weather_regimes_maps.R +# Assumption: all input/output files must be located in the 'workdir' folder +# Branch: weather_regimes + + +library(s2dverification) # for the function Load() +library(abind) +library(reshape2) # after each undate of s2dverification, it's better to remove and install this package again because sometimes it gets broken! +#library(TTR) +source('/home/Earth/ncortesi/Downloads/scripts/Rfunctions.R') # for the calendar functions +workdir <- "/scratch/Earth/ncortesi/RESILIENCE/Regimes" # working dir +# module load NCO # : load it from the terminal before running this script interactively in the SMP machine, if you didn't load it in the .bashrc + +# available reanalysis for the geopotential (g500) or the geopotential height (z500 = g500/9.81) and for var data: +ERAint <- '/esnas/reconstructions/ecmwf/eraint/$STORE_FREQ$_mean/$VAR_NAME$_f6h/$VAR_NAME$_$YEAR$$MONTH$.nc' +ERAint.name <- "ERA-Interim" + +JRA55 <- '/esnas/reconstructions/jma/jra-55/$STORE_FREQ$_mean/$VAR_NAME$_f6h/$VAR_NAME$_$YEAR$$MONTH$.nc' +JRA55.name <- "JRA-55" + +rean <- ERAint #JRA55 #ERAint # choose one of the two above reanalysis from where to load the input psl data + +# available forecast systems for the psl and var data: +#ECMWF_S4 <- list(path = paste0('/esnas/exp/ECMWF/seasonal/0001/s004/m001/$STORE_FREQ$_mean/$VAR_NAME$_f6h/$VAR_NAME$_$START_DATE$.nc')) +#ECMWF_S4 <- '/esnas/exp/ECMWF/seasonal/0001/s004/m001/$STORE_FREQ$_mean/psl_f6h/psl_$START_DATE$.nc' +ECMWF_S4 <- '/esnas/exp/ecmwf/system4_m1/$STORE_FREQ$_mean/$VAR_NAME$_f6h/$VAR_NAME$_$START_DATE$.nc' +ECMWF_S4.name <- "ECMWF-S4" +member.name <- "ensemble" # name of the dimension with the ensemble members inside the netCDF files of S4 + +ECMWF_monthly <- '/esnas/exp/ECMWF/monthly/ensforhc/6hourly/$VAR_NAME$/2014$MONTH$$DAY$00/$VAR_NAME$_$START_DATE$00.nc' +ECMWF_monthly.name <- "ECMWF-Monthly" + +forecast <- ECMWF_S4 + +fields <- forecast #rean # put 'rean' to load pressure fields and var from reanalisis, put 'forecast' to load them from forecast systems + +var.num <- 2 # Choose a variable. 1: sfcWind 2: tas + +var.name <- c("sfcWind","tas") # name of the 'predictand' variable of the chosen reanalysis +var.name.full <- c("Wind Speed","Temperature") # full name of the 'predictand' variable to put in the title of the graphs +var.unit <- c("m/s", "ºC") # unit of measure of var (for drawing color scales) + +# Only for Reanalysis: +period <- 1:12 #13:16 # 1-12: Jan-Dec; 13-16: Winter-Spring-Summer-Autumn [bypassed by the optional argument of the script] + +# Only for Seasonal forecasts: +startM <- 9 # start month (1=January, 12=December) [bypassed by the optional arguments of the script] +leadM <- 3 # select only the data of one lead month: [bypassed by the optional arguments of the script] +n.members <- 15 # number of members of the forecast system to load +n.leadtimes <- 216 # number of leadtimes of the forecast system to load + + +############################################################################################################################# +#ECMWF_monthly <- list(path = paste0('/esnas/exp/ECMWF/monthly/ensforhc/6hourly/psl/2014010200/1994_010200.nc')) +#ECMWF_monthly <- list(path = paste0('/esnas/exp/ECMWF/monthly/ensforhc/6hourly/$VAR_NAME$/2014$MONTH$$DAY$00/$VAR_NAME$_$START_DATE$00.nc')) +rean.name <- unname(ifelse(rean == ERAint, ERAint.name, JRA55.name)) +forecast.name <- unname(ifelse(forecast == ECMWF_S4, ECMWF_S4.name, ECMWF_monthly.name)) +fields.name <- unname(ifelse(fields == rean, rean.name, forecast.name)) +if(fields.name == forecast.name) WR.period = 1 +n.years <- year.end - year.start + 1 + +# in case the script is run with two arguments, they are assigned to the two variables below: +script.arg <- as.integer(commandArgs(TRUE)) + +if(length(script.arg) >= 2){ + start.month <- script.arg[1] + lead.month <- script.arg[2] + WR.period <- start.month +} else { + start.month <- startM + lead.month <- leadM + WR.period <- start.month +} + +if(length(script.arg) == 3){ # in this case, we are running the script in the SMP machine and we need to override the variables below: + source('/gpfs/projects/bsc32/bsc32842/Rfunctions.R') # for the calendar function + workdir <- "/gpfs/projects/bsc32/bsc32842/RESILIENCE" # working dir +} + +# in case the script is run with 1 argument, it is assumed you are using a reanalysis: +if(length(script.arg) == 1) WR.period <- script.arg[1] + +var.data <- fields # dataset from which to load the input var data (reanalysis or forecasts) +var.data.name <- fields.name #ECMWF_monthly.name # ERAint.name + +if(lat.min >= lat.max) stop("lat.min cannot be greater than lat.max") + +days.period <- n.days.period <- period.length <- list() +for (pp in 1:17){ + days.period[[pp]] <- NA + for(y in year.start:year.end) days.period[[pp]] <- c(days.period[[pp]], n.days.in.a.future.year(year.start, y) + pos.period(y,pp)) + days.period[[pp]] <- days.period[[pp]][-1] # remove the NA at the begin that was introduced only to be able to execute the above command + # number of days belonging to that period from year.start to year.end: + n.days.period[[pp]] <- length(days.period[[pp]]) + # Number of days belonging to that period in a single year: + period.length[[pp]] <- n.days.in.a.period(pp,1999) #+ ifelse(period==13 | period==2, 1, 0) # using year 1999 introduce a small error in winter season length because of bisestile years +} + + + +# Load var data (interpolating it to the same reanalysis grid, if necessary): +cat("Loading data. Please wait...\n") + +# Load var data: +if(fields.name == rean.name){ # Load daily var data in the reanalysis case: + vareuFull <-array(NA,c(n.days.in.a.yearly.period(year.start,year.end), n.pos.lat, n.pos.lon)) + + for (y in year.start:year.end){ + var <- Load(var = var.name[var.num], exp = NULL, obs = list(fields), paste0(y,'0101'), storefreq = 'daily', leadtimemax = n.days.in.a.year(y), output = 'lonlat', + latmin = lat.min, latmax = lat.max, lonmin = lon.min, lonmax = lon.max) + vareuFull[seq.days.in.a.future.year(year.start, y),,] <- var$obs + rm(var) + gc() + } +} + + +if(fields.name == ECMWF_S4.name){ # in this case, we can load only the data for 1 month at time (since each month needs ~3 GB of memory) + if(start.month <= 9) {start.month.char <- paste0("0",as.character(start.month))} else {start.month.char <- start.month} + + #psleuFull <- Load(var = psl, exp = list(list(path=fields)), obs = NULL, sdates=paste0(years, start.month.char,'01'), dimnames=list(member=member.name), nmember=n.members, leadtimemax=n.leadtimes, storefreq='daily', output = 'lonlat', latmin = lat.min, latmax = lat.max, lonmin = lon.min, lonmax = lon.max, nprocs=1, grid=my.grid, method='bilinear')$mod # not working + + fields2 <- gsub("\\$STORE_FREQ\\$","daily",fields) + fields3 <- gsub("\\$VAR_NAME\\$",var.name[var.num],fields2) + + years <- c() + for(y in year.start:year.end){ + fields4 <- gsub("\\$START_DATE\\$",paste0(y, start.month.char,'01'),fields3) + + char.lead <- nchar(system(paste0("ncks -m ",fields4,"| grep -E -i 'psl size' | cut -d '*' -f1"), intern=T)) + # beware, in this way it can only take into account leadtimes from 100 to 999, but it is the only way in the SMP machine: + #num.lead <- as.integer(substr(system(paste0("ncks -m ",fields4,"| grep -E -i 'psl size' | cut -d '*' -f1"), intern=T),char.lead-3,char.lead)) + num.lead <- as.integer(system(paste0("ncks -m ",fields4,"| grep -E -i 'psl size' | cut -d '=' -f2 | cut -d '*' -f1"), intern=T)) # don't work well on the SMP + num.memb <- as.integer(system(paste0("ncks -m ",fields4,"| grep -E -i 'psl size' | cut -d '=' -f2 | cut -d '*' -f2"), intern=T)) + num.lat <- as.integer(system(paste0("ncks -m ",fields4,"| grep -E -i 'psl size' | cut -d '=' -f2 | cut -d '*' -f3"), intern=T)) + num.lon <- as.integer(system(paste0("ncks -m ",fields4,"| grep -E -i 'psl size' | cut -d '=' -f2 | cut -d '*' -f4"), intern=T)) + + if(num.lead == n.leadtimes && num.memb >= n.members && num.lat == 181 && num.lon == 360) years <- c(years,y) + } + + n.years <- length(years) + + #Load(var = "psl", exp = list(list(path="/esnas/exp/ecmwf/system4_m1/$STORE_FREQ$_mean/$VAR_NAME$_f6h/$VAR_NAME$_$START_DATE$.nc")), obs = NULL, sdates=paste0(1982:2013,'0101'), nmember=15, leadtimemax=216, storefreq='daily', output = 'lonlat', grid=my.grid, nprocs=1) + + + psleuFull <- Load(var = var.name[var.num], exp = list(list(path=fields)), obs = NULL, sdates=paste0(years, start.month.char,'01'), dimnames=list(member=member.name), nmember=n.members, leadtimemax=n.leadtimes, storefreq='daily', output = 'lonlat', latmin = lat.min, latmax = lat.max, lonmin = lon.min, lonmax = lon.max, grid=my.grid, method='bilinear', nprocs=1)$mod + +} + +if(fields.name == ECMWF_monthly.name){ + # Load 6-hourly psl data in the forecast case: + psleuFull <-array(NA, c(n.sdates*n.years, n.leadtimes, n.pos.lat, n.pos.lon)) # daily order: 19940102 19950102 ... 20130102 19940109 19950109 ... 20130109 ... + n.leadtimes <- length(leadtime) + sdates <- weekly.seq(forecast.year,mes,day) + n.sdates <- length(sdates) + + for (startdate in 1:n.sdates){ + for(lday in leadtime){ + var <- Load(var = psl, exp = NULL, obs = list(list(path=fields)), paste0(year.start:year.end, substr(sdates[startdate],5,6), substr(sdates[startdate],7,8) ), storefreq = 'daily', leadtimemin = lday*4-3, leadtimemax = lday*4, output = 'lonlat', latmin = lat.min, latmax = lat.max, lonmin = lon.min, lonmax = lon.max) + + mean.lday <- apply(var$obs, c(3,5,6), mean, na.rm=T) + rm(var) + gc() + + psleuFull[(1+(startdate-1)*n.years):(startdate*n.years), lday-leadtime[1]+1,,] <- mean.lday + rm(mean.lday) + gc() + } + } +} + + + +if(psl=="g500") psleuFull <- psleuFull/9.81 # convert geopotential to geopotential height (in m) +if(psl=="psl") psleuFull <- psleuFull/100 # convert MSLP in Pascal to MSLP in hPa +gc() + +# impact maps: +for(p in period){ + # load regime data for chosen month (for forecasts, it load only the data correpsonding to the chosen start month p and lead month 'lead.month':: + if(fields.name == rean.name) { + load(file=paste0(workdir,"/",fields.name,"_",my.period[p],"_psl.RData")) + + cluster.sequence <- my.cluster[[p]]$cluster + + varPeriod <- vareuFull[days.period[[p]],,] # select only var data during the chosen period + + varPeriodClim <- apply(varPeriod, c(2,3), mean, na.rm=T) + varPeriodClim2 <- InsertDim(varPeriodClim, 1, n.days.period[[p]]) + varPeriodAnom <- varPeriod - varPeriodClim2 + rm(varPeriod, varPeriodClim, varPeriodClim2) + gc() + + #PlotEquiMap2(varPeriodClim[,EU], lon[EU], lat, filled.continents = FALSE, cols=my.cols.var, intxlon=10, intylat=10, cex.lab=1.5) # plot the climatology of the period + varPeriodAnom1 <- varPeriodAnom[wr1,,] + varPeriodAnom2 <- varPeriodAnom[wr2,,] + varPeriodAnom3 <- varPeriodAnom[wr3,,] + varPeriodAnom4 <- varPeriodAnom[wr4,,] + + varPeriodAnom1mean <- apply(varPeriodAnom1,c(2,3),mean,na.rm=T) + varPeriodAnom2mean <- apply(varPeriodAnom2,c(2,3),mean,na.rm=T) + varPeriodAnom3mean <- apply(varPeriodAnom3,c(2,3),mean,na.rm=T) + varPeriodAnom4mean <- apply(varPeriodAnom4,c(2,3),mean,na.rm=T) + + varPeriodAnomBoth1 <- abind(varPeriodAnom, varPeriodAnom1, along = 1) + pvalue1 <- apply(varPeriodAnomBoth1, c(2,3), function(x) t.test(x[1:n.days.period[[p]]],x[(n.days.period[[p]]+1):length(x)])$p.value) + rm(varPeriodAnomBoth1, varPeriodAnom1) + gc() + + varPeriodAnomBoth2 <- abind(varPeriodAnom, varPeriodAnom2, along = 1) + pvalue2 <- apply(varPeriodAnomBoth2, c(2,3), function(x) t.test(x[1:n.days.period[[p]]],x[(n.days.period[[p]]+1):length(x)])$p.value) + rm(varPeriodAnomBoth2, varPeriodAnom2) + gc() + + varPeriodAnomBoth3 <- abind(varPeriodAnom, varPeriodAnom3, along = 1) + pvalue3 <- apply(varPeriodAnomBoth3, c(2,3), function(x) t.test(x[1:n.days.period[[p]]],x[(n.days.period[[p]]+1):length(x)])$p.value) + rm(varPeriodAnomBoth3, varPeriodAnom3) + gc() + + varPeriodAnomBoth4 <- abind(varPeriodAnom, varPeriodAnom4, along = 1) + pvalue4 <- apply(varPeriodAnomBoth4, c(2,3), function(x) t.test(x[1:n.days.period[[p]]],x[(n.days.period[[p]]+1):length(x)])$p.value) + rm(varPeriodAnomBoth4, varPeriodAnom4) + + rm(varPeriodAnom) + gc() + + # save all the data necessary to redraw the graphs when we know the right regime: + save(lon, lon.max, lon.min, lat, lat.max, lat.min, psl, psl.name, my.period, p, workdir,rean.name,fields.name,var.num,var.name,var.name.full,var.unit,year.start,year.end,WR.period, varPeriodAnom1mean, varPeriodAnom2mean, varPeriodAnom3mean,varPeriodAnom4mean,pvalue1,pvalue2,pvalue3,pvalue4,file=paste0(workdir,"/",fields.name,"_",my.period[p],"_",var.name,".RData")) + + } + + if(fields.name == forecast.name){ + load(file=paste0(workdir,"/",fields.name,"_",my.period[p],"_leadmonth_",lead.month,"_psl.RData")) + + + + } + + + + rm(pvalue1,pvalue2,pvalue3,pvalue4) + rm(varPeriodAnom1mean,varPeriodAnom2mean,varPeriodAnom3mean,varPeriodAnom4mean) + gc() + + + diff --git a/old/weather_regimes_impact_v2.R b/old/weather_regimes_impact_v2.R new file mode 100644 index 0000000000000000000000000000000000000000..bdb4033250921c2553137f5d8cddfdd2879efc79 --- /dev/null +++ b/old/weather_regimes_impact_v2.R @@ -0,0 +1,346 @@ + +# Creation: 6/2016 +# Author: Nicola Cortesi +# Aim: Measure the impact of Weather Regimes of a chosen variable, from a reanalysis of from a forecast systems. +# I/O: input are all the various "*_psl.RData" created by the script 'weather_regimes.R' +# its output are "*_.RData" files which are need by the script weather_regimes_maps.R +# Assumption: all input/output files are located in the 'workdir' folder. Note that this script should not be run with many parallel jobs since it spends most of its time +# in loading data, so it's better to take advantage of the parallel loading feature of Load() to run the script once computing the monthly analysis in a sequential way +# Branch: weather_regimes + +rm(list=ls()) +library(s2dverification) # for the function Load() +library(abind) +library(reshape2) # after each undate of s2dverification, it's better to remove and install this package again because sometimes it gets broken! +#library(TTR) +source('/home/Earth/ncortesi/Downloads/scripts/Rfunctions.R') # for the calendar functions +workdir <- "/scratch/Earth/ncortesi/RESILIENCE/Regimes" # working dir +# module load NCO # : load it from the terminal before running this script interactively in the SMP machine, if you didn't load it in the .bashrc + +# available reanalysis for the geopotential (g500) or the geopotential height (z500 = g500/9.81) and for var data: +ERAint <- '/esnas/reconstructions/ecmwf/eraint/$STORE_FREQ$_mean/$VAR_NAME$_f6h/$VAR_NAME$_$YEAR$$MONTH$.nc' +ERAint.name <- "ERA-Interim" + +JRA55 <- '/esnas/reconstructions/jma/jra-55/$STORE_FREQ$_mean/$VAR_NAME$_f6h/$VAR_NAME$_$YEAR$$MONTH$.nc' +JRA55.name <- "JRA-55" + +rean <- ERAint #JRA55 #ERAint # choose one of the two above reanalysis from where to load the input psl data + +# available forecast systems for the psl and var data: +#ECMWF_S4 <- list(path = paste0('/esnas/exp/ECMWF/seasonal/0001/s004/m001/$STORE_FREQ$_mean/$VAR_NAME$_f6h/$VAR_NAME$_$START_DATE$.nc')) +#ECMWF_S4 <- '/esnas/exp/ECMWF/seasonal/0001/s004/m001/$STORE_FREQ$_mean/psl_f6h/psl_$START_DATE$.nc' +ECMWF_S4 <- '/esnas/exp/ecmwf/system4_m1/$STORE_FREQ$_mean/$VAR_NAME$_f6h/$VAR_NAME$_$START_DATE$.nc' +ECMWF_S4.name <- "ECMWF-S4" +member.name <- "ensemble" # name of the dimension with the ensemble members inside the netCDF files of S4 + +ECMWF_monthly <- '/esnas/exp/ECMWF/monthly/ensforhc/6hourly/$VAR_NAME$/2014$MONTH$$DAY$00/$VAR_NAME$_$START_DATE$00.nc' +ECMWF_monthly.name <- "ECMWF-Monthly" + +forecast <- ECMWF_S4 + +fields <- forecast #rean #forecast #rean # set it to 'rean' to load pressure fields and var from reanalisis, or to 'forecast' to load them from forecast systems +year.start <- 1981 #1982 #1981 #1994 #1979 #1981 # specify the first year of var data (must be the same as set in weather_regimes_vXX.R) +year.end <- 2015 #2013 #2010 # specify the last year of var data (must be the same as set in weather_regimes_vXX.R) + +var.num <- 2 # Choose a variable. 1: sfcWind 2: tas + +var.name <- c("sfcWind","tas") # name of the 'predictand' variable of the chosen reanalysis +var.name.full <- c("Wind Speed","Temperature") # full name of the 'predictand' variable to put in the title of the graphs +var.unit <- c("m/s", "ºC") # unit of measure of var (for drawing color scales) + +WR.period <- 1:12 #13:16 # 1-12: Jan-Dec; 13-16: Winter-Spring-Summer-Autumn # For forecasts, it is the start month + +# Only for forecasts: +#start.month <- 9 # start month (1=January, 12=December) [bypassed by the optional arguments of the script] +#lead.month <- 3 # select only the data of one lead month: [bypassed by the optional arguments of the script] +n.members <- 15 # number of members of the forecast system to load +n.leadtimes <- 216 # number of leadtimes of the forecast system to load + +############################################################################################################################# +#ECMWF_monthly <- list(path = paste0('/esnas/exp/ECMWF/monthly/ensforhc/6hourly/psl/2014010200/1994_010200.nc')) +#ECMWF_monthly <- list(path = paste0('/esnas/exp/ECMWF/monthly/ensforhc/6hourly/$VAR_NAME$/2014$MONTH$$DAY$00/$VAR_NAME$_$START_DATE$00.nc')) +rean.name <- unname(ifelse(rean == ERAint, ERAint.name, JRA55.name)) +forecast.name <- unname(ifelse(forecast == ECMWF_S4, ECMWF_S4.name, ECMWF_monthly.name)) +fields.name <- unname(ifelse(fields == rean, rean.name, forecast.name)) + +#if(fields.name == forecast.name) WR.period = 1 + +## # in case the script is run with one argument, it is assumed a reanalysis is being used and the argument becomes the month we want to perform the clustering; +## # in case the script is run with two arguments, it is assumed a forecast is used and the first argument represents the startmonth and the second one the leadmonth. +## # in case the script is run with no arguments, the values of the variables inside the script are used: +## script.arg <- as.integer(commandArgs(TRUE)) + +## if(length(script.arg) == 0 && fields.name == forecast.name){ +## start.month <- startM +## WR.period <- start.month +## lead.month <- leadM +## } + +## # in case the script is run with 1 argument, it is assumed you are using a reanalysis: +## if(length(script.arg) == 1){ +## fields <- rean +## fields.name <- rean.name +## WR.period <- script.arg[1] +## } + +## if(length(script.arg) >= 2){ +## fields <- forecast +## fields.name <- forecast.name +## start.month <- script.arg[1] +## lead.month <- script.arg[2] +## WR.period <- start.month +## } + +#if(length(script.arg) == 3){ # in this case, we are running the script in the SMP machine and we need to override the variables below: +# source('/gpfs/projects/bsc32/bsc32842/Rfunctions.R') # for the calendar function +# workdir <- "/gpfs/projects/bsc32/bsc32842/RESILIENCE" # working dir +#} + +#if(length(script.arg) >= 2) {lead.comment <- paste0("Lead month: ", lead.month)} else {lead.comment <- ""} +#cat(paste0("Field: ", fields.name, " Period: ", WR.period, " ", lead.comment, "\n")) + +## days.period <- n.days.period <- period.length <- list() +## for (pp in 1:17){ +## days.period[[pp]] <- NA +## for(y in year.start:year.end) days.period[[pp]] <- c(days.period[[pp]], n.days.in.a.future.year(year.start, y) + pos.period(y,pp)) +## days.period[[pp]] <- days.period[[pp]][-1] # remove the NA at the begin that was introduced only to be able to execute the above command +## # number of days belonging to that period from year.start to year.end: +## n.days.period[[pp]] <- length(days.period[[pp]]) +## # Number of days belonging to that period in a single year: +## period.length[[pp]] <- n.days.in.a.period(pp,1999) #+ ifelse(period==13 | period==2, 1, 0) # using year 1999 introduce a small error in winter season length because of bisestile years +## } + + +# Load var data (interpolating it to the same reanalysis grid, if necessary): +cat("Loading data. Please wait...\n") + +# Load var data: +if(fields.name == rean.name){ # Load daily var data in the reanalysis case: + info.period <- WR.period[1] # period used to get the variables n.years, lat.min, lat.max, lon.min y lon.max + load(file=paste0(workdir,"/",fields.name,"_",my.period[info.period],"_psl.RData")) + + vareuFull <-array(NA,c(1,1,n.years,365, n.pos.lat, n.pos.lon)) + + #for (y in year.start:year.end){ + # var <- Load(var = var.name[var.num], exp = NULL, obs = list(fields), paste0(y,'0101'), storefreq = 'daily', leadtimemax = n.days.in.a.year(y), output = 'lonlat', + # latmin = lat.min, latmax = lat.max, lonmin = lon.min, lonmax = lon.max) + # vareuFull[seq.days.in.a.future.year(year.start, y),,] <- var$obs + # rm(var) + # gc() + #} + + vareuFull366 <- Load(var = var.name[var.num], exp = NULL, obs = list(list(path=fields)), paste0(year.start:year.end,'0101'), storefreq = 'daily', leadtimemax = 366, output = 'lonlat', latmin = lat.min, latmax = lat.max, lonmin = lon.min, lonmax = lon.max)$obs + + # remove bisestile days to have all arrays with the same dimensions: + cat("Removing bisestile days. Please wait...\n") + vareuFull[,,,,,] <- vareuFull366[,,,1:365,,] + + for(y in year.start:year.end){ + y <- y - year.start + 1 + if(n.days.in.a.year(y) == 366) vareuFull[,,y,60:365,,] <- vareuFull366[,,y,61:366,,] # take the march to december period removing the 29th of February + } + + rm(vareuFull366) + gc() + + # convert psl in daily anomalies with the LOESS filter: + cat("Calculating anomalies. Please wait...\n") + varPeriodClim <- apply(vareuFull, c(1,2,4,5,6), mean, na.rm=T) + + varPeriodClimLoess <- varPeriodClim + for(i in n.pos.lat){ + for(j in n.pos.lon){ + my.data <- data.frame(ens.mean=varPeriodClim[1,1,,i,j], day=1:365) + my.loess <- loess(ens.mean ~ day, my.data, span=0.35) + varPeriodClimLoess[1,1,,i,j] <- predict(my.loess) + } + } + + rm(varPeriodClim) + gc() + + varPeriodClim2 <- InsertDim(varPeriodClimLoess, 3, n.years) + + ## s4.data <- data.frame(s4=pslPeriodClim[1,], day=1:216) + ## s4.loess <- loess(s4 ~ day, s4.data, span=0.35) + ## s4.pred <- predict(s4.loess) + + vareuFull <- vareuFull - varPeriodClim2 + rm(varPeriodClim2) + gc() + +} + +if(fields.name == ECMWF_S4.name){ # for forecasts, we can load only the data for 1 month at time (since each month needs ~20 GB of memory to load all its leadtimes) + for(p in WR.period){ + + start.month <- p + if(start.month <= 9) {start.month.char <- paste0("0",as.character(start.month))} else {start.month.char <- start.month} + + fields2 <- gsub("\\$STORE_FREQ\\$","daily",fields) + fields3 <- gsub("\\$VAR_NAME\\$",var.name[var.num],fields2) + + years <- c() + for(y in year.start:year.end){ + fields4 <- gsub("\\$START_DATE\\$",paste0(y, start.month.char,'01'),fields3) + + + # beware, in this way it can only take into account leadtimes from 100 to 999, but it is the only way in the SMP machine: + #char.lead <- nchar(system(paste0("ncks -m ",fields4,"| grep -E -i 'psl size' | cut -d '*' -f1"), intern=T)) + #num.lead <- as.integer(substr(system(paste0("ncks -m ",fields4,"| grep -E -i 'psl size' | cut -d '*' -f1"), intern=T),char.lead-3,char.lead)) + + num.lead <- as.integer(system(paste0("ncks -m ",fields4,"| grep -E -i 'psl size' | cut -d '=' -f2 | cut -d '*' -f1"), intern=T)) # don't work well on the SMP + + num.memb <- as.integer(system(paste0("ncks -m ",fields4,"| grep -E -i 'psl size' | cut -d '=' -f2 | cut -d '*' -f2"), intern=T)) + num.memb <- as.integer(system(paste0("ncks -m ",fields4,"| grep -E -i $ee ' dimension' | cut -d '=' -f2 | cut -d ' ' -f2"), intern=T)) + + + #num.lat <- as.integer(system(paste0("ncks -m ",fields4,"| grep -E -i 'psl size' | cut -d '=' -f2 | cut -d '*' -f3"), intern=T)) + num.lat1 <- as.integer(system(paste0("ncks -m ",fields4,"| grep -E -i 'latitude dimension' | cut -d '=' -f2 | cut -d ' ' -f2"), intern=T)) + num.lat2 <- as.integer(system(paste0("ncks -m ",fields4,"| grep -E -i 'lat dimension' | cut -d '=' -f2 | cut -d ' ' -f2"), intern=T)) + num.lat <- ifelse(length(num.lat2) == 0, num.lat1, num.lat2) + + #num.lon <- as.integer(system(paste0("ncks -m ",fields4,"| grep -E -i 'psl size' | cut -d '=' -f2 | cut -d '*' -f4"), intern=T)) + num.lon1 <- as.integer(system(paste0("ncks -m ",fields4,"| grep -E -i 'longitude dimension' | cut -d '=' -f2 | cut -d ' ' -f2"), intern=T)) + num.lon2 <- as.integer(system(paste0("ncks -m ",fields4,"| grep -E -i 'lon dimension' | cut -d '=' -f2 | cut -d ' ' -f2"), intern=T)) + num.lon <- ifelse(length(num.lon2) == 0, num.lon1, num.lon2) + + if(num.lead == n.leadtimes && num.memb >= n.members && num.lat == 181 && num.lon == 360) years <- c(years,y) + } + + n.years.full <- length(years) # years really available for that month + + #Load(var = "psl", exp = list(list(path="/esnas/exp/ecmwf/system4_m1/$STORE_FREQ$_mean/$VAR_NAME$_f6h/$VAR_NAME$_$START_DATE$.nc")), obs = NULL, sdates=paste0(1982:2013,'0101'), nmember=15, leadtimemax=216, storefreq='daily', output = 'lonlat', grid=my.grid, nprocs=1) + + vareuFull <- Load(var = var.name[var.num], exp = list(list(path=fields)), obs = NULL, sdates=paste0(years, start.month.char,'01'), dimnames=list(member=member.name), nmember=n.members, leadtimemax=n.leadtimes, storefreq='daily', output = 'lonlat', latmin = lat.min, latmax = lat.max, lonmin = lon.min, lonmax = lon.max, grid=my.grid, method='bilinear')$mod + + # convert var in daily anomalies with the LOESS filter: + cat("Calculating anomalies. Please wait...\n") + varPeriodClim <- apply(vareuFull, c(1,4,5,6), mean, na.rm=T) + varPeriodClimLoess <- varPeriodClim + for(i in n.pos.lat){ + for(j in n.pos.lon){ + my.data <- data.frame(ens.mean=varPeriodClim[1,,i,j], day=1:n.leadtimes) + my.loess <- loess(ens.mean ~ day, my.data, span=0.35) + varPeriodClimLoess[1,,i,j] <- predict(my.loess) + } + } + + rm(varPeriodClim) + gc() + + varPeriodClim2 <- InsertDim(InsertDim(varPeriodClimLoess, 2, n.years.full), 2, n.members) + + ## s4.data <- data.frame(s4=pslPeriodClim[1,], day=1:216) + ## s4.loess <- loess(s4 ~ day, s4.data, span=0.35) + ## s4.pred <- predict(s4.loess) + + vareuFull <- vareuFull - varPeriodClim2 + rm(varPeriodClim2) + gc() + } # close p on period +} # close if on data type + + if(fields.name == ECMWF_monthly.name){ + # Load 6-hourly psl data in the forecast case: + psleuFull <-array(NA, c(n.sdates*n.years, n.leadtimes, n.pos.lat, n.pos.lon)) # daily order: 19940102 19950102 ... 20130102 19940109 19950109 ... 20130109 ... + n.leadtimes <- length(leadtime) + sdates <- weekly.seq(forecast.year,mes,day) + n.sdates <- length(sdates) + + for (startdate in 1:n.sdates){ + for(lday in leadtime){ + var <- Load(var = psl, exp = NULL, obs = list(list(path=fields)), paste0(year.start:year.end, substr(sdates[startdate],5,6), substr(sdates[startdate],7,8) ), storefreq = 'daily', leadtimemin = lday*4-3, leadtimemax = lday*4, output = 'lonlat', latmin = lat.min, latmax = lat.max, lonmin = lon.min, lonmax = lon.max) + + mean.lday <- apply(var$obs, c(3,5,6), mean, na.rm=T) + rm(var) + gc() + + psleuFull[(1+(startdate-1)*n.years):(startdate*n.years), lday-leadtime[1]+1,,] <- mean.lday + rm(mean.lday) + gc() + } + } + } + + + #if(psl=="g500") psleuFull <- psleuFull/9.81 # convert geopotential to geopotential height (in m) + #if(psl=="psl") psleuFull <- psleuFull/100 # convert MSLP in Pascal to MSLP in hPa + gc() + + ###### impact maps: + + # load regime data for chosen month (for forecasts, it load only the data correpsonding to the chosen start month p and lead month 'lead.month':: + if(fields.name == rean.name) { + load(file=paste0(workdir,"/",fields.name,"_",my.period[p],"_psl.RData")) + + cluster.sequence <- my.cluster[[p]]$cluster + + wr1 <- which(cluster.sequence == 1) + wr2 <- which(cluster.sequence == 2) + wr3 <- which(cluster.sequence == 3) + wr4 <- which(cluster.sequence == 4) + + varPeriod <- vareuFull[1,1,,pos.period(2001,1),,] # select only var data during the chosen period + + gc() + cat("Formatting data. Please wait...\n") + var.melted <- melt(varPeriod[,,,, drop=FALSE], varnames=c("Year","Day","Lat","Lon")) + gc() + cat("Formatting data. Please wait......\n") + varmat <- unname(acast(var.melted, Year + Day ~ Lat ~ Lon)) + + varwr1 <- varmat[wr1,,,drop=F] + varwr2 <- varmat[wr2,,,drop=F] + varwr3 <- varmat[wr3,,,drop=F] + varwr4 <- varmat[wr4,,,drop=F] + + #varwrmean <- apply(varwr,c(2,3),mean,na.rm=T) + varwr1mean <- apply(varwr1,c(2,3),mean,na.rm=T) + varwr2mean <- apply(varwr2,c(2,3),mean,na.rm=T) + varwr3mean <- apply(varwr3,c(2,3),mean,na.rm=T) + varwr4mean <- apply(varwr4,c(2,3),mean,na.rm=T) + + n.datos <- n.years * n.days.in.a.period(1,2001) + + varwrBoth1 <- abind(varmat, varwr1, along = 1) + pvalue1 <- apply(varwrBoth1, c(2,3), function(x) t.test(x[1:n.datos],x[(n.datos+1):length(x)])$p.value) + rm(varwrBoth1, varwr1) + gc() + + varwrBoth2 <- abind(varmat, varwr2, along = 1) + pvalue2 <- apply(varwrBoth2, c(2,3), function(x) t.test(x[1:n.datos],x[(n.datos+1):length(x)])$p.value) + rm(varwrBoth2, varwr2) + gc() + + varwrBoth3 <- abind(varmat, varwr3, along = 1) + pvalue3 <- apply(varwrBoth3, c(2,3), function(x) t.test(x[1:n.datos],x[(n.datos+1):length(x)])$p.value) + rm(varwrBoth3, varwr3) + gc() + + varwrBoth4 <- abind(varmat, varwr4, along = 1) + pvalue4 <- apply(varwrBoth4, c(2,3), function(x) t.test(x[1:n.datos],x[(n.datos+1):length(x)])$p.value) + rm(varwrBoth4, varwr4) + + rm(varmat) + gc() + + # save all the data necessary to redraw the graphs when we know the right regime: + save(varwr1mean, varwr2mean, varwr3mean,varwr4mean,pvalue1,pvalue2,pvalue3,pvalue4, file=paste0(workdir,"/",fields.name,"_",my.period[p],"_",var.name[var.num],".RData")) + rm(pvalue1,pvalue2,pvalue3,pvalue4) + rm(varwr1mean,varwr2mean,varwr3mean,varwr4mean) + gc() + + } + + if(fields.name == forecast.name){ + load(file=paste0(workdir,"/",fields.name,"_",my.period[p],"_leadmonth_",lead.month,"_psl.RData")) + + + } + +} + + +cat("Finished!\n") + diff --git a/old/weather_regimes_impact_v2.R~ b/old/weather_regimes_impact_v2.R~ new file mode 100644 index 0000000000000000000000000000000000000000..5b2ef9d5faf2527728d442308058d20eeb8df739 --- /dev/null +++ b/old/weather_regimes_impact_v2.R~ @@ -0,0 +1,331 @@ + +# Creation: 6/2016 +# Author: Nicola Cortesi +# Aim: Measure the impact of Weather Regimes of a chosen variable, from a reanalysis of from a forecast systems. +# I/O: input are all the various "*_psl.RData" created by the script 'weather_regimes.R' +# its output are "*_.RData" files which are need by the script weather_regimes_maps.R +# Assumption: all input/output files are located in the 'workdir' folder. Note that this script should not be run with many parallel jobs since it spends most of its time +# in loading data, so it's better to take advantage of the parallel loading feature of Load() to run the script once computing the monthly analysis in a sequential way +# Branch: weather_regimes + +library(s2dverification) # for the function Load() +library(abind) +library(reshape2) # after each undate of s2dverification, it's better to remove and install this package again because sometimes it gets broken! +#library(TTR) +source('/home/Earth/ncortesi/Downloads/scripts/Rfunctions.R') # for the calendar functions +workdir <- "/scratch/Earth/ncortesi/RESILIENCE/Regimes" # working dir +# module load NCO # : load it from the terminal before running this script interactively in the SMP machine, if you didn't load it in the .bashrc + +# available reanalysis for the geopotential (g500) or the geopotential height (z500 = g500/9.81) and for var data: +ERAint <- '/esnas/reconstructions/ecmwf/eraint/$STORE_FREQ$_mean/$VAR_NAME$_f6h/$VAR_NAME$_$YEAR$$MONTH$.nc' +ERAint.name <- "ERA-Interim" + +JRA55 <- '/esnas/reconstructions/jma/jra-55/$STORE_FREQ$_mean/$VAR_NAME$_f6h/$VAR_NAME$_$YEAR$$MONTH$.nc' +JRA55.name <- "JRA-55" + +rean <- ERAint #JRA55 #ERAint # choose one of the two above reanalysis from where to load the input psl data + +# available forecast systems for the psl and var data: +#ECMWF_S4 <- list(path = paste0('/esnas/exp/ECMWF/seasonal/0001/s004/m001/$STORE_FREQ$_mean/$VAR_NAME$_f6h/$VAR_NAME$_$START_DATE$.nc')) +#ECMWF_S4 <- '/esnas/exp/ECMWF/seasonal/0001/s004/m001/$STORE_FREQ$_mean/psl_f6h/psl_$START_DATE$.nc' +ECMWF_S4 <- '/esnas/exp/ecmwf/system4_m1/$STORE_FREQ$_mean/$VAR_NAME$_f6h/$VAR_NAME$_$START_DATE$.nc' +ECMWF_S4.name <- "ECMWF-S4" +member.name <- "ensemble" # name of the dimension with the ensemble members inside the netCDF files of S4 + +ECMWF_monthly <- '/esnas/exp/ECMWF/monthly/ensforhc/6hourly/$VAR_NAME$/2014$MONTH$$DAY$00/$VAR_NAME$_$START_DATE$00.nc' +ECMWF_monthly.name <- "ECMWF-Monthly" + +forecast <- ECMWF_S4 + +fields <- rean #forecast #rean # set it to 'rean' to load pressure fields and var from reanalisis, or to 'forecast' to load them from forecast systems + +var.num <- 2 # Choose a variable. 1: sfcWind 2: tas + +var.name <- c("sfcWind","tas") # name of the 'predictand' variable of the chosen reanalysis +var.name.full <- c("Wind Speed","Temperature") # full name of the 'predictand' variable to put in the title of the graphs +var.unit <- c("m/s", "ºC") # unit of measure of var (for drawing color scales) + +WR.period <- 1:12 #13:16 # 1-12: Jan-Dec; 13-16: Winter-Spring-Summer-Autumn # For forecasts, it is the start month + +# Only for forecasts: +#start.month <- 9 # start month (1=January, 12=December) [bypassed by the optional arguments of the script] +#lead.month <- 3 # select only the data of one lead month: [bypassed by the optional arguments of the script] +n.members <- 15 # number of members of the forecast system to load +n.leadtimes <- 216 # number of leadtimes of the forecast system to load + +############################################################################################################################# +#ECMWF_monthly <- list(path = paste0('/esnas/exp/ECMWF/monthly/ensforhc/6hourly/psl/2014010200/1994_010200.nc')) +#ECMWF_monthly <- list(path = paste0('/esnas/exp/ECMWF/monthly/ensforhc/6hourly/$VAR_NAME$/2014$MONTH$$DAY$00/$VAR_NAME$_$START_DATE$00.nc')) +rean.name <- unname(ifelse(rean == ERAint, ERAint.name, JRA55.name)) +forecast.name <- unname(ifelse(forecast == ECMWF_S4, ECMWF_S4.name, ECMWF_monthly.name)) +fields.name <- unname(ifelse(fields == rean, rean.name, forecast.name)) + +#if(fields.name == forecast.name) WR.period = 1 + +## # in case the script is run with one argument, it is assumed a reanalysis is being used and the argument becomes the month we want to perform the clustering; +## # in case the script is run with two arguments, it is assumed a forecast is used and the first argument represents the startmonth and the second one the leadmonth. +## # in case the script is run with no arguments, the values of the variables inside the script are used: +## script.arg <- as.integer(commandArgs(TRUE)) + +## if(length(script.arg) == 0 && fields.name == forecast.name){ +## start.month <- startM +## WR.period <- start.month +## lead.month <- leadM +## } + +## # in case the script is run with 1 argument, it is assumed you are using a reanalysis: +## if(length(script.arg) == 1){ +## fields <- rean +## fields.name <- rean.name +## WR.period <- script.arg[1] +## } + +## if(length(script.arg) >= 2){ +## fields <- forecast +## fields.name <- forecast.name +## start.month <- script.arg[1] +## lead.month <- script.arg[2] +## WR.period <- start.month +## } + +if(length(script.arg) == 3){ # in this case, we are running the script in the SMP machine and we need to override the variables below: + source('/gpfs/projects/bsc32/bsc32842/Rfunctions.R') # for the calendar function + workdir <- "/gpfs/projects/bsc32/bsc32842/RESILIENCE" # working dir +} + +if(length(script.arg) >= 2) {lead.comment <- paste0("Lead month: ", lead.month)} else {lead.comment <- ""} +cat(paste0("Field: ", fields.name, " Period: ", WR.period, " ", lead.comment, "\n")) + +## days.period <- n.days.period <- period.length <- list() +## for (pp in 1:17){ +## days.period[[pp]] <- NA +## for(y in year.start:year.end) days.period[[pp]] <- c(days.period[[pp]], n.days.in.a.future.year(year.start, y) + pos.period(y,pp)) +## days.period[[pp]] <- days.period[[pp]][-1] # remove the NA at the begin that was introduced only to be able to execute the above command +## # number of days belonging to that period from year.start to year.end: +## n.days.period[[pp]] <- length(days.period[[pp]]) +## # Number of days belonging to that period in a single year: +## period.length[[pp]] <- n.days.in.a.period(pp,1999) #+ ifelse(period==13 | period==2, 1, 0) # using year 1999 introduce a small error in winter season length because of bisestile years +## } + + +# Load var data (interpolating it to the same reanalysis grid, if necessary): +cat("Loading data. Please wait...\n") + +# Load var data: +if(fields.name == rean.name){ # Load daily var data in the reanalysis case: + info.period <- WR.period[1] # period used to get the variables n.years, lat.min, lat.max, lon.min y lon.max + load(file=paste0(workdir,"/",fields.name,"_",my.period[info.period],"_psl.RData")) + + vareuFull <-array(NA,c(1,1,n.years,365, n.pos.lat, n.pos.lon)) + + #for (y in year.start:year.end){ + # var <- Load(var = var.name[var.num], exp = NULL, obs = list(fields), paste0(y,'0101'), storefreq = 'daily', leadtimemax = n.days.in.a.year(y), output = 'lonlat', + # latmin = lat.min, latmax = lat.max, lonmin = lon.min, lonmax = lon.max) + # vareuFull[seq.days.in.a.future.year(year.start, y),,] <- var$obs + # rm(var) + # gc() + #} + + vareuFull366 <- Load(var = var.name[var.num], exp = NULL, obs = list(list(path=fields)), paste0(year.start:year.end,'0101'), storefreq = 'daily', leadtimemax = 366, output = 'lonlat', latmin = lat.min, latmax = lat.max, lonmin = lon.min, lonmax = lon.max)$obs + + # remove bisestile days to have all arrays with the same dimensions: + cat("Removing bisestile days. Please wait...\n") + vareuFull[,,,,,] <- vareuFull366[,,,1:365,,] + + for(y in year.start:year.end){ + y <- y - year.start + 1 + if(n.days.in.a.year(y) == 366) vareuFull[,,y,60:365,,] <- vareuFull366[,,y,61:366,,] # take the march to december period removing the 29th of February + } + + rm(vareuFull366) + gc() + + # convert psl in daily anomalies with the LOESS filter: + cat("Calculating anomalies. Please wait...\n") + varPeriodClim <- apply(vareuFull, c(1,2,4,5,6), mean, na.rm=T) + + varPeriodClimLoess <- varPeriodClim + for(i in n.pos.lat){ + for(j in n.pos.lon){ + my.data <- data.frame(ens.mean=varPeriodClim[1,1,,i,j], day=1:365) + my.loess <- loess(ens.mean ~ day, my.data, span=0.35) + varPeriodClimLoess[1,1,,i,j] <- predict(my.loess) + } + } + + rm(varPeriodClim) + gc() + + varPeriodClim2 <- InsertDim(varPeriodClimLoess, 3, n.years) + + ## s4.data <- data.frame(s4=pslPeriodClim[1,], day=1:216) + ## s4.loess <- loess(s4 ~ day, s4.data, span=0.35) + ## s4.pred <- predict(s4.loess) + + vareuFull <- vareuFull - varPeriodClim2 + rm(varPeriodClim2) + gc() + +} + +for(p in WR.period){ + + if(fields.name == ECMWF_S4.name){ # for forecasts, we can load only the data for 1 month at time (since each month needs ~20 GB of memory to load all its leadtimes) + start.month <- p + if(start.month <= 9) {start.month.char <- paste0("0",as.character(start.month))} else {start.month.char <- start.month} + + #psleuFull <- Load(var = psl, exp = list(list(path=fields)), obs = NULL, sdates=paste0(years, start.month.char,'01'), dimnames=list(member=member.name), nmember=n.members, leadtimemax=n.leadtimes, storefreq='daily', output = 'lonlat', latmin = lat.min, latmax = lat.max, lonmin = lon.min, lonmax = lon.max, nprocs=1, grid=my.grid, method='bilinear')$mod # not working + + fields2 <- gsub("\\$STORE_FREQ\\$","daily",fields) + fields3 <- gsub("\\$VAR_NAME\\$",var.name[var.num],fields2) + + years <- c() + for(y in year.start:year.end){ + fields4 <- gsub("\\$START_DATE\\$",paste0(y, start.month.char,'01'),fields3) + + char.lead <- nchar(system(paste0("ncks -m ",fields4,"| grep -E -i 'psl size' | cut -d '*' -f1"), intern=T)) + # beware, in this way it can only take into account leadtimes from 100 to 999, but it is the only way in the SMP machine: + #num.lead <- as.integer(substr(system(paste0("ncks -m ",fields4,"| grep -E -i 'psl size' | cut -d '*' -f1"), intern=T),char.lead-3,char.lead)) + num.lead <- as.integer(system(paste0("ncks -m ",fields4,"| grep -E -i 'psl size' | cut -d '=' -f2 | cut -d '*' -f1"), intern=T)) # don't work well on the SMP + num.memb <- as.integer(system(paste0("ncks -m ",fields4,"| grep -E -i 'psl size' | cut -d '=' -f2 | cut -d '*' -f2"), intern=T)) + num.lat <- as.integer(system(paste0("ncks -m ",fields4,"| grep -E -i 'psl size' | cut -d '=' -f2 | cut -d '*' -f3"), intern=T)) + num.lon <- as.integer(system(paste0("ncks -m ",fields4,"| grep -E -i 'psl size' | cut -d '=' -f2 | cut -d '*' -f4"), intern=T)) + + if(num.lead == n.leadtimes && num.memb >= n.members && num.lat == 181 && num.lon == 360) years <- c(years,y) + } + + n.years.full <- length(years) # years really available + + #Load(var = "psl", exp = list(list(path="/esnas/exp/ecmwf/system4_m1/$STORE_FREQ$_mean/$VAR_NAME$_f6h/$VAR_NAME$_$START_DATE$.nc")), obs = NULL, sdates=paste0(1982:2013,'0101'), nmember=15, leadtimemax=216, storefreq='daily', output = 'lonlat', grid=my.grid, nprocs=1) + + vareuFull <- Load(var = var.name[var.num], exp = list(list(path=fields)), obs = NULL, sdates=paste0(years, start.month.char,'01'), dimnames=list(member=member.name), nmember=n.members, leadtimemax=n.leadtimes, storefreq='daily', output = 'lonlat', latmin = lat.min, latmax = lat.max, lonmin = lon.min, lonmax = lon.max, grid=my.grid, method='bilinear')$mod + + # convert var in daily anomalies with the LOESS filter: + cat("Calculating anomalies. Please wait...\n") + varPeriodClim <- apply(vareuFull, c(1,4,5,6), mean, na.rm=T) + varPeriodClimLoess <- varPeriodClim + for(i in n.pos.lat){ + for(j in n.pos.lon){ + my.data <- data.frame(ens.mean=varPeriodClim[1,,i,j], day=1:n.leadtimes) + my.loess <- loess(ens.mean ~ day, my.data, span=0.35) + varPeriodClimLoess[1,,i,j] <- predict(my.loess) + } + } + + rm(varPeriodClim) + gc() + + varPeriodClim2 <- InsertDim(InsertDim(varPeriodClimLoess, 2, n.years.full), 2, n.members) + + ## s4.data <- data.frame(s4=pslPeriodClim[1,], day=1:216) + ## s4.loess <- loess(s4 ~ day, s4.data, span=0.35) + ## s4.pred <- predict(s4.loess) + + vareuFull <- vareuFull - varPeriodClim2 + rm(varPeriodClim2) + gc() + } + + if(fields.name == ECMWF_monthly.name){ + # Load 6-hourly psl data in the forecast case: + psleuFull <-array(NA, c(n.sdates*n.years, n.leadtimes, n.pos.lat, n.pos.lon)) # daily order: 19940102 19950102 ... 20130102 19940109 19950109 ... 20130109 ... + n.leadtimes <- length(leadtime) + sdates <- weekly.seq(forecast.year,mes,day) + n.sdates <- length(sdates) + + for (startdate in 1:n.sdates){ + for(lday in leadtime){ + var <- Load(var = psl, exp = NULL, obs = list(list(path=fields)), paste0(year.start:year.end, substr(sdates[startdate],5,6), substr(sdates[startdate],7,8) ), storefreq = 'daily', leadtimemin = lday*4-3, leadtimemax = lday*4, output = 'lonlat', latmin = lat.min, latmax = lat.max, lonmin = lon.min, lonmax = lon.max) + + mean.lday <- apply(var$obs, c(3,5,6), mean, na.rm=T) + rm(var) + gc() + + psleuFull[(1+(startdate-1)*n.years):(startdate*n.years), lday-leadtime[1]+1,,] <- mean.lday + rm(mean.lday) + gc() + } + } + } + + + #if(psl=="g500") psleuFull <- psleuFull/9.81 # convert geopotential to geopotential height (in m) + #if(psl=="psl") psleuFull <- psleuFull/100 # convert MSLP in Pascal to MSLP in hPa + gc() + + ###### impact maps: + + # load regime data for chosen month (for forecasts, it load only the data correpsonding to the chosen start month p and lead month 'lead.month':: + if(fields.name == rean.name) { + load(file=paste0(workdir,"/",fields.name,"_",my.period[p],"_psl.RData")) + + cluster.sequence <- my.cluster[[p]]$cluster + + wr1 <- which(cluster.sequence == 1) + wr2 <- which(cluster.sequence == 2) + wr3 <- which(cluster.sequence == 3) + wr4 <- which(cluster.sequence == 4) + + varPeriod <- vareuFull[1,1,,pos.period(2001,1),,] # select only var data during the chosen period + + gc() + cat("Formatting data. Please wait...\n") + var.melted <- melt(varPeriod[,,,, drop=FALSE], varnames=c("Year","Day","Lat","Lon")) + gc() + cat("Formatting data. Please wait......\n") + varmat <- unname(acast(var.melted, Year + Day ~ Lat ~ Lon)) + + varwr1 <- varmat[wr1,,,drop=F] + varwr2 <- varmat[wr2,,,drop=F] + varwr3 <- varmat[wr3,,,drop=F] + varwr4 <- varmat[wr4,,,drop=F] + + #varwrmean <- apply(varwr,c(2,3),mean,na.rm=T) + varwr1mean <- apply(varwr1,c(2,3),mean,na.rm=T) + varwr2mean <- apply(varwr2,c(2,3),mean,na.rm=T) + varwr3mean <- apply(varwr3,c(2,3),mean,na.rm=T) + varwr4mean <- apply(varwr4,c(2,3),mean,na.rm=T) + + n.datos <- n.years * n.days.in.a.period(1,2001) + + varwrBoth1 <- abind(varmat, varwr1, along = 1) + pvalue1 <- apply(varwrBoth1, c(2,3), function(x) t.test(x[1:n.datos],x[(n.datos+1):length(x)])$p.value) + rm(varwrBoth1, varwr1) + gc() + + varwrBoth2 <- abind(varmat, varwr2, along = 1) + pvalue2 <- apply(varwrBoth2, c(2,3), function(x) t.test(x[1:n.datos],x[(n.datos+1):length(x)])$p.value) + rm(varwrBoth2, varwr2) + gc() + + varwrBoth3 <- abind(varmat, varwr3, along = 1) + pvalue3 <- apply(varwrBoth3, c(2,3), function(x) t.test(x[1:n.datos],x[(n.datos+1):length(x)])$p.value) + rm(varwrBoth3, varwr3) + gc() + + varwrBoth4 <- abind(varmat, varwr4, along = 1) + pvalue4 <- apply(varwrBoth4, c(2,3), function(x) t.test(x[1:n.datos],x[(n.datos+1):length(x)])$p.value) + rm(varwrBoth4, varwr4) + + rm(varmat) + gc() + + # save all the data necessary to redraw the graphs when we know the right regime: + save(varwr1mean, varwr2mean, varwr3mean,varwr4mean,pvalue1,pvalue2,pvalue3,pvalue4, file=paste0(workdir,"/",fields.name,"_",my.period[p],"_",var.name[var.num],".RData")) + rm(pvalue1,pvalue2,pvalue3,pvalue4) + rm(varwr1mean,varwr2mean,varwr3mean,varwr4mean) + gc() + + } + + if(fields.name == forecast.name){ + load(file=paste0(workdir,"/",fields.name,"_",my.period[p],"_leadmonth_",lead.month,"_psl.RData")) + + + } + +} + + +cat("Finished!\n") + diff --git a/old/weather_regimes_impact_v3.R b/old/weather_regimes_impact_v3.R new file mode 100644 index 0000000000000000000000000000000000000000..69ff5661b7851a0e1f1c62772a42c9920ce4389a --- /dev/null +++ b/old/weather_regimes_impact_v3.R @@ -0,0 +1,540 @@ + +# Creation: 6/2016 +# Author: Nicola Cortesi +# Aim: Measure the impact of Weather Regimes of a chosen variable, from a reanalysis of from a forecast systems. +# +# I/O: input are all the various "*_psl.RData" created by the script 'weather_regimes.R' +# its output are "*_.RData" files which are need by the script weather_regimes_maps.R +# You can also run this script from terminal with: Rscript ~/scripts/weather_regimes_impact.R +# +# Assumption: all input/output files are located in the 'workdir' folder. Note that this script should not be run with many parallel jobs since it spends most of its time +# in loading data, so it's better to take advantage of the parallel loading feature of Load() to run the script once computing the monthly analysis in a sequential way +# +# Branch: weather_regimes + +rm(list=ls()) +library(s2dverification) # for the function Load() +library(abind) +library(reshape2) # after each undate of s2dverification, it's better to remove and install this package again because sometimes it gets broken! +#library(TTR) +source('/home/Earth/ncortesi/scripts/Rfunctions.R') # for the calendar functions +#workdir <- "/scratch/Earth/ncortesi/RESILIENCE/Regimes" # working dir +# module load NCO # : load it from the terminal before running this script interactively in the SMP machine, if you didn't load it in the .bashrc + +#rean.dir <- "/scratch/Earth/ncortesi/RESILIENCE/Regimes/41_ERA-Interim_monthly_1981-2015_LOESS_filter" +#ean.dir <- "/scratch/Earth/ncortesi/RESILIENCE/Regimes/18) as 12) but with no lat correction" +#rean.dir <- "/scratch/Earth/ncortesi/RESILIENCE/Regimes/18_as_12_but_with_no_lat_correction" +#rean.dir <- "/scratch/Earth/ncortesi/RESILIENCE/Regimes/44_as_43_but_with_no_lat_correction" +#rean.dir <- "/scratch/Earth/ncortesi/RESILIENCE/Regimes/50_NCEP_monthly_1981-2016_LOESS_filter_lat_corr" +rean.dir <- "/scratch/Earth/ncortesi/RESILIENCE/Regimes/52_JRA55_monthly_1981-2016_LOESS_filter_lat_corr" + +forecast.dir <- "/scratch/Earth/ncortesi/RESILIENCE/Regimes/42_ECMWF_S4_monthly_1981-2015_LOESS_filter" + +# available reanalysis for var data: +#ERAint <- '/esnas/reconstructions/ecmwf/eraint/$STORE_FREQ$_mean/$VAR_NAME$_f6h/$VAR_NAME$_$YEAR$$MONTH$.nc' +ERAint <- '/esarchive/old-files/recon_ecmwf_erainterim/$STORE_FREQ$_mean/$VAR_NAME$_f6h/$VAR_NAME$_$YEAR$$MONTH$.nc' +ERAint.name <- "ERA-Interim" + +JRA55 <- '/esnas/recon/jma/jra55/$STORE_FREQ$_mean/$VAR_NAME$_f6h/$VAR_NAME$_$YEAR$$MONTH$.nc' +JRA55.name <- "JRA-55" + +NCEP <- '/esnas/recon/noaa/ncep-reanalysis/$STORE_FREQ$_mean/$VAR_NAME$_f6h/$VAR_NAME$_$YEAR$$MONTH$.nc' +NCEP.name <- "NCEP" + +rean <- JRA55 #JRA55 #ERAint #NCEP # choose one of the two above reanalysis from where to load the input psl data + +# available forecast systems for the psl and var data: +#ECMWF_S4 <- list(path = paste0('/esnas/exp/ECMWF/seasonal/0001/s004/m001/$STORE_FREQ$_mean/$VAR_NAME$_f6h/$VAR_NAME$_$START_DATE$.nc')) +#ECMWF_S4 <- '/esnas/exp/ECMWF/seasonal/0001/s004/m001/$STORE_FREQ$_mean/psl_f6h/psl_$START_DATE$.nc' +#ECMWF_S4 <- '/esnas/exp/ecmwf/system4_m1/$STORE_FREQ$_mean/$VAR_NAME$_f6h/$VAR_NAME$_$START_DATE$.nc' +ECMWF_S4 <- '/esarchive/old-files/exp_ecmwf_system4_m1/$STORE_FREQ$_mean/old/$VAR_NAME$_f6h/$VAR_NAME$_$START_DATE$.nc' # only for daily tas S4 + +ECMWF_S4.name <- "ECMWF-S4" +member.name <- "ensemble" # name of the dimension with the ensemble members inside the netCDF files of S4 +lat.name <- "latitude" +lon.name <- "longitude" + +ECMWF_monthly <- '/esnas/exp/ECMWF/monthly/ensforhc/6hourly/$VAR_NAME$/2014$MONTH$$DAY$00/$VAR_NAME$_$START_DATE$00.nc' +ECMWF_monthly.name <- "ECMWF-Monthly" + +forecast <- ECMWF_S4 + +# set it to 'rean' to load pressure fields and var from reanalisis, or to 'forecast' to load them from forecast systems: +fields <- rean #forecast #rean + +var.num <- 1 # Choose a variable. 1: sfcWind 2: tas +var.name <- c("sfcWind","tas") # name of the 'predictand' variable of the chosen reanalysis +var.name.full <- c("wind Speed","temperature") # full name of the 'predictand' variable to put in the title of the graphs +var.unit <- c("m/s", "ºC") # unit of measure of var (for drawing color scales) + +WR.period <- 1:12 #13:16 # 1-12: Jan-Dec; 13-16: Winter-Spring-Summer-Autumn # For forecasts, it is the start month + +# the following 8 variables have to be the same as in the weather_regimes_vXX.R script that processed the psl data: +year.start <- 1981 #1979 #1981 #1982 #1981 # specify the first year of var data +year.end <- 2016 #2013 #2015 #2013 #2010 # specify the last year of var data + +# Only for seasonal forecasts: +startM <- 3 # start month (1=January, 12=December) [bypassed by the optional arguments of the script] +leadM <- 6 # select only the data of one lead month: [bypassed by the optional arguments of the script] +n.members <- 15 # number of members of the forecast system to load +n.leadtimes <- 216 # number of leadtimes of the forecast system to load +missing.forecasts=FALSE # set it to TRUE if there can be missing hindcasts files in the forecast var data, so it will load only the available years and deals with NA +res <- 0.75 # set the resolution you want to interpolate the var data when loading it (i.e: set to 0.75 to use the same res of ERA-Interim) + # interpolation method is BILINEAR. + +# Coordinates of the box enclosing the study region (the Northern Atlantic in this case): +lat.max <- 81 #81 #80 #70 +lat.min <- 27 #30 #20 #30 +lon.max <- 45 #36 #30 #40 +lon.min <- 274.5 #274.5 #270 #280 # put a positive number here because the geopotential has only positive vaues of longitud! + +############################################################################################################################# +#ECMWF_monthly <- list(path = paste0('/esnas/exp/ECMWF/monthly/ensforhc/6hourly/psl/2014010200/1994_010200.nc')) +#ECMWF_monthly <- list(path = paste0('/esnas/exp/ECMWF/monthly/ensforhc/6hourly/$VAR_NAME$/2014$MONTH$$DAY$00/$VAR_NAME$_$START_DATE$00.nc')) + +rean.name <- unname(ifelse(rean == ERAint, ERAint.name, ifelse(rean == JRA55, JRA55.name, NCEP.name))) +forecast.name <- unname(ifelse(forecast == ECMWF_S4, ECMWF_S4.name, ECMWF_monthly.name)) +fields.name <- unname(ifelse(fields == rean, rean.name, forecast.name)) + +# in case the script is run with one argument, it is assumed a reanalysis is being used and the argument becomes the month we want to perform the clustering; +# in case the script is run with two arguments, it is assumed a forecast is used and the first argument represents the startmonth and the second one the leadmonth. +# in case the script is run with no arguments, the values of the variables inside the script are used: +script.arg <- as.integer(commandArgs(TRUE)) + +# in case the script is run with no arguments: +if(length(script.arg) == 0 && fields.name == forecast.name){ + start.month <- startM + #WR.period <- start.month + lead.month <- leadM +} + +# in case the script is run with 1 argument, it is assumed you are using a reanalysis, so the variable to change is the period: +if(length(script.arg) == 1){ + fields <- rean + fields.name <- rean.name + WR.period <- script.arg[1] +} + +# in case the script is run with 2 arguments, it is assumed you are using forecasts, so the variables to change are the start date and the lead time: +if(length(script.arg) >= 2){ + fields <- forecast + fields.name <- forecast.name + start.month <- script.arg[1] + lead.month <- script.arg[2] + WR.period <- start.month # it becomes just a copy of the start.month +} + +if(length(script.arg) == 3){ # in this case, we are running the script in the SMP machine and we need to override the variables below: + source('/gpfs/projects/bsc32/bsc32842/Rfunctions.R') # for the calendar function + #workdir <- "/gpfs/projects/bsc32/bsc32842/RESILIENCE" # working dir +} + +if(length(script.arg) >= 2) {lead.comment <- paste0("Lead month: ", lead.month)} else {lead.comment <- ""} + +cat(paste0("Field: ", fields.name, " Period: ", WR.period, " ", lead.comment, "\n")) + +#if(fields.name == forecast.name) WR.period = 1 + +#if(length(script.arg) == 3){ # in this case, we are running the script in the SMP machine and we need to override the variables below: +# source('/gpfs/projects/bsc32/bsc32842/Rfunctions.R') # for the calendar function +# workdir <- "/gpfs/projects/bsc32/bsc32842/RESILIENCE" # working dir +#} + +#if(length(script.arg) >= 2) {lead.comment <- paste0("Lead month: ", lead.month)} else {lead.comment <- ""} +#cat(paste0("Field: ", fields.name, " Period: ", WR.period, " ", lead.comment, "\n")) + + +# Create a regular lat/lon grid to interpolate the data to the chosen res: (Notice that the regular grid is always centered at lat=0 and lon=0!) +n.lon.grid <- 360/res +n.lat.grid <- (180/res)+1 +my.grid <- paste0('r',n.lon.grid,'x',n.lat.grid) + +## days.period <- n.days.period <- period.length <- list() +## for (pp in 1:17){ +## days.period[[pp]] <- NA +## for(y in year.start:year.end) days.period[[pp]] <- c(days.period[[pp]], n.days.in.a.future.year(year.start, y) + pos.period(y,pp)) +## days.period[[pp]] <- days.period[[pp]][-1] # remove the NA at the begin that was introduced only to be able to execute the above command +## # number of days belonging to that period from year.start to year.end: +## n.days.period[[pp]] <- length(days.period[[pp]]) +## # Number of days belonging to that period in a single year: +## period.length[[pp]] <- n.days.in.a.period(pp,1999) #+ ifelse(period==13 | period==2, 1, 0) # using year 1999 introduce a small error in winter season length because of bisestile years +## } + + +# Load var data (interpolating it to the same reanalysis grid, if necessary): +cat("Loading data. Please wait...\n") + +# Load var data: +if(fields.name == rean.name){ # Load daily var data in the reanalysis case: + info.period <- WR.period[1] # period used to get the variables n.years, lat.min, lat.max, lon.min y lon.max + WR.period.old <- WR.period + + load(file=paste0(rean.dir,"/",fields.name,"_",my.period[info.period],"_psl.RData")) + if(!identical(WR.period, WR.period.old)) WR.period <- WR.period.old + + n.years <- year.end - year.start + 1 + + #for (y in year.start:year.end){ + # var <- Load(var = var.name[var.num], exp = NULL, obs = list(fields), paste0(y,'0101'), storefreq = 'daily', leadtimemax = n.days.in.a.year(y), output = 'lonlat', + # latmin = lat.min, latmax = lat.max, lonmin = lon.min, lonmax = lon.max) + # vareuFull[seq.days.in.a.future.year(year.start, y),,] <- var$obs + # rm(var) + # gc() + #} + + vareuFull366 <- Load(var = var.name[var.num], exp = NULL, obs = list(list(path=fields)), paste0(year.start:year.end,'0101'), storefreq = 'daily', leadtimemax = 366, output = 'lonlat', latmin = lat.min, latmax = lat.max, lonmin = lon.min, lonmax = lon.max, nprocs=8)$obs + + vareuFull <-array(NA,c(1,1,n.years,365, dim(vareuFull366)[5], dim(vareuFull366)[6])) + + # remove bisestile days to have all arrays with the same dimensions: + cat("Removing bisestile days. Please wait...\n") + vareuFull[,,,,,] <- vareuFull366[,,,1:365,,] + + for(y in year.start:year.end){ + y <- y - year.start + 1 + if(n.days.in.a.year(y) == 366) vareuFull[,,y,60:365,,] <- vareuFull366[,,y,61:366,,] # take the march to december period removing the 29th of February + } + + rm(vareuFull366) + gc() + + # convert psl in daily anomalies with the LOESS filter: + cat("Calculating anomalies. Please wait...\n") + varPeriodClim <- apply(vareuFull, c(1,2,4,5,6), mean, na.rm=T) + + varPeriodClimLoess <- varPeriodClim + for(i in n.pos.lat){ + for(j in n.pos.lon){ + my.data <- data.frame(ens.mean=varPeriodClim[1,1,,i,j], day=1:365) + my.loess <- loess(ens.mean ~ day, my.data, span=0.35) + varPeriodClimLoess[1,1,,i,j] <- predict(my.loess) + } + } + + rm(varPeriodClim) + gc() + + varPeriodClim2 <- InsertDim(varPeriodClimLoess, 3, n.years) + + ## s4.data <- data.frame(s4=pslPeriodClim[1,], day=1:216) + ## s4.loess <- loess(s4 ~ day, s4.data, span=0.35) + ## s4.pred <- predict(s4.loess) + + vareuFull <- vareuFull - varPeriodClim2 + #rm(varPeriodClim2) + gc() + + for(p in WR.period){ + #p=1 # for the debug + + ## load regime data for chosen month: + varPeriod <- vareuFull[1,1,,pos.period(1,p),,] # select only var data during the chosen period + varPeriodClim <- varPeriodClim2[1,1,,pos.period(1,p),,] + varPeriodRel <- varPeriod / varPeriodClim + + load(file=paste0(rean.dir,"/",fields.name,"_",my.period[p],"_psl.RData")) + + seasonal.data <- ifelse(length(my.cluster$cluster)/n.years > 33, TRUE, FALSE) # if there are more than 33 days, it means that we are loading sequences of 3 months + + if(seasonal.data == TRUE && p < 13){ # select only the days of the 3-months cluster series inside the target month p + n.days.period <- length(pos.month.extended(2001,p)) # total number of days in the three months + + days.month <- days.month.new <- days.month.full <- c() + days.month <- which(pos.month.extended(2001,p) == pos.month(2001,p)[1])-1 + 1:length(pos.month(2001,p)) + + for(y in 1:n.years){ + days.month.new <- days.month + n.days.period*(y-1) + days.month.full <- c(days.month.full, days.month.new) + #print(days.month.new) + #print(days.month) + } + + cluster.sequence <- my.cluster$cluster[days.month.full] # select only the days of the cluster series inside the target month p + + } else { + cluster.sequence <- my.cluster$cluster # old syntax: my.cluster[[p]]$cluster + } + + wr1 <- which(cluster.sequence == 1) + wr2 <- which(cluster.sequence == 2) + wr3 <- which(cluster.sequence == 3) + wr4 <- which(cluster.sequence == 4) + + gc() + + cat("Formatting data. Please wait...\n") + var.melted <- melt(varPeriod[,,,, drop=FALSE], varnames=c("Year","Day","Lat","Lon")) + var.meltedRel <- melt(varPeriodRel[,,,, drop=FALSE], varnames=c("Year","Day","Lat","Lon")) + gc() + + cat("Formatting data. Please wait......\n") + varmat <- unname(acast(var.melted, Year + Day ~ Lat ~ Lon)) + varmatRel <- unname(acast(var.meltedRel, Year + Day ~ Lat ~ Lon)) + + varwr1 <- varmat[wr1,,,drop=F] + varwr2 <- varmat[wr2,,,drop=F] + varwr3 <- varmat[wr3,,,drop=F] + varwr4 <- varmat[wr4,,,drop=F] + + varwr1Rel <- varmatRel[wr1,,,drop=F] + varwr2Rel <- varmatRel[wr2,,,drop=F] + varwr3Rel <- varmatRel[wr3,,,drop=F] + varwr4Rel <- varmatRel[wr4,,,drop=F] + + + varwr1mean <- apply(varwr1,c(2,3),mean,na.rm=T) + varwr2mean <- apply(varwr2,c(2,3),mean,na.rm=T) + varwr3mean <- apply(varwr3,c(2,3),mean,na.rm=T) + varwr4mean <- apply(varwr4,c(2,3),mean,na.rm=T) + + varwr1meanRel <- apply(varwr1,c(2,3),mean,na.rm=T) + varwr2meanRel <- apply(varwr2,c(2,3),mean,na.rm=T) + varwr3meanRel <- apply(varwr3,c(2,3),mean,na.rm=T) + varwr4meanRel <- apply(varwr4,c(2,3),mean,na.rm=T) + + n.datos <- n.years * n.days.in.a.period(p,2001) + + varwrBoth1 <- abind(varmat, varwr1, along = 1) + pvalue1 <- apply(varwrBoth1, c(2,3), function(x) t.test(x[1:n.datos],x[(n.datos+1):length(x)])$p.value) + rm(varwrBoth1, varwr1) + gc() + + varwrBoth2 <- abind(varmat, varwr2, along = 1) + pvalue2 <- apply(varwrBoth2, c(2,3), function(x) t.test(x[1:n.datos],x[(n.datos+1):length(x)])$p.value) + rm(varwrBoth2, varwr2) + gc() + + varwrBoth3 <- abind(varmat, varwr3, along = 1) + pvalue3 <- apply(varwrBoth3, c(2,3), function(x) t.test(x[1:n.datos],x[(n.datos+1):length(x)])$p.value) + rm(varwrBoth3, varwr3) + gc() + + varwrBoth4 <- abind(varmat, varwr4, along = 1) + pvalue4 <- apply(varwrBoth4, c(2,3), function(x) t.test(x[1:n.datos],x[(n.datos+1):length(x)])$p.value) + rm(varwrBoth4, varwr4) + + rm(varmat) + gc() + + #EU <- c(1:which(lon >= lon.max)[1], (which(lon >= 337)[1]:length(lon))) # restrict area to continental europe only + #my.brks.var <- seq(-3,3,0.5) + #my.cols.var <- colorRampPalette(rev(brewer.pal(11,"RdBu")))(length(my.brks.var)-1) # blue--white--red colors + #PlotEquiMap2(rescale(varwr3mean[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, continents.col="black") + + # save all the data necessary to redraw the graphs when we know the right regime: + save(varwr1mean, varwr2mean, varwr3mean,varwr4mean, pvalue1,pvalue2,pvalue3,pvalue4, file=paste0(rean.dir,"/",fields.name,"_",my.period[p],"_",var.name[var.num],".RData")) + rm(pvalue1,pvalue2,pvalue3,pvalue4) + rm(varwr1mean,varwr2mean,varwr3mean,varwr4mean) + gc() + + } # close p on WR.period + +} + + + +if(fields.name == ECMWF_S4.name){ # for forecasts, we can load only the data for 1 month at time (since each month needs ~20 GB of memory to load all its leadtimes) + for(p in WR.period){ + #p=1 # for the debug + + start.month <- p + if(start.month <= 9) {start.month.char <- paste0("0",as.character(start.month))} else {start.month.char <- start.month} + + fields2 <- gsub("\\$STORE_FREQ\\$","daily",fields) + fields3 <- gsub("\\$VAR_NAME\\$",var.name[var.num],fields2) + + years <- c() + + for(y in year.start:year.end){ + fields4 <- gsub("\\$START_DATE\\$",paste0(y, start.month.char,'01'),fields3) + + if(file.exists(fields4)){ + # beware, in this way it can only take into account leadtimes from 100 to 999, but it is the only way in the SMP machine: + #char.lead <- nchar(system(paste0("ncks -m ",fields4,"| grep -E -i 'psl size' | cut -d '*' -f1"), intern=T)) + #num.lead <- as.integer(substr(system(paste0("ncks -m ",fields4,"| grep -E -i 'psl size' | cut -d '*' -f1"), intern=T),char.lead-3,char.lead)) + num.lead <- as.integer(system(paste0("ncks -m ",fields4,"| grep -E -i 'time dimension' | cut -d '=' -f2 | cut -d ' ' -f2"), intern=T)) #don't work well on the SMP + #num.memb <- as.integer(system(paste0("ncks -m ",fields4,"| grep -E -i 'psl size' | cut -d '=' -f2 | cut -d '*' -f2"), intern=T)) + + if(fields == forecast && forecast == ECMWF_S4 && var.name[var.num] == "sfcWind") { # sfcWind daily data of S4 has a different formatting: + num.memb <- as.integer(system(paste0("ncks -m ",fields4,"| grep -E -i '0: '", member.name,"', size' | cut -d '=' -f2 | cut -d ',' -f1"), intern=T)) + } else { + num.memb <- as.integer(system(paste0("ncks -m ",fields4,"| grep -E -i '0: '", member.name,"', size' | cut -d '=' -f2 | cut -d ' ' -f2"), intern=T)) } + + #num.lat <- as.integer(system(paste0("ncks -m ",fields4,"| grep -E -i 'psl size' | cut -d '=' -f2 | cut -d '*' -f3"), intern=T)) + num.lat1 <- as.integer(system(paste0("ncks -m ",fields4,"| grep -E -i 'latitude dimension' | cut -d '=' -f2 | cut -d ' ' -f2"), intern=T)) + num.lat2 <- as.integer(system(paste0("ncks -m ",fields4,"| grep -E -i 'lat dimension' | cut -d '=' -f2 | cut -d ' ' -f2"), intern=T)) + num.lat <- ifelse(length(num.lat2) == 0, num.lat1, num.lat2) + + #num.lon <- as.integer(system(paste0("ncks -m ",fields4,"| grep -E -i 'psl size' | cut -d '=' -f2 | cut -d '*' -f4"), intern=T)) + num.lon1 <- as.integer(system(paste0("ncks -m ",fields4,"| grep -E -i 'longitude dimension' | cut -d '=' -f2 | cut -d ' ' -f2"), intern=T)) + num.lon2 <- as.integer(system(paste0("ncks -m ",fields4,"| grep -E -i 'lon dimension' | cut -d '=' -f2 | cut -d ' ' -f2"), intern=T)) + num.lon <- ifelse(length(num.lon2) == 0, num.lon1, num.lon2) + + num.var <- system(paste0("ncks -m ",fields4,"| grep -E -i '", var.name[var.num], "'"), intern=T) + + if(length(num.lead) != 0 && length(num.memb) != 0 && length(num.lat) != 0 && length(num.lon) != 0 && length(num.var) != 0){ + if(num.lead == n.leadtimes && num.memb >= n.members) years <- c(years,y) + } + } + } + + n.years.full <- length(years) # years really available for that month + print(paste0(n.years.full, " available years for ", my.period[p])) + + vareuFull <- Load(var = var.name[var.num], exp = list(list(path=fields)), obs = NULL, sdates=paste0(years, start.month.char,'01'), dimnames=list(lon=lon.name, lat=lat.name, member=member.name), nmember=n.members, leadtimemax=n.leadtimes, storefreq='daily', output = 'lonlat', latmin = lat.min, latmax = lat.max, lonmin = lon.min, lonmax = lon.max, grid=my.grid, method='bilinear', nprocs=1)$mod + + # convert var in daily anomalies with the LOESS filter: + cat("Calculating anomalies. Please wait...\n") + varPeriodClim <- apply(vareuFull, c(1,4,5,6), mean, na.rm=T) + varPeriodClimLoess <- varPeriodClim + + n.pos.lat <- unname(dim(varPeriodClim)[3]) + n.pos.lon <- unname(dim(varPeriodClim)[4]) + + for(i in n.pos.lat){ + for(j in n.pos.lon){ + my.data <- data.frame(ens.mean=varPeriodClim[1,,i,j], day=1:n.leadtimes) + my.loess <- loess(ens.mean ~ day, my.data, span=0.35) + varPeriodClimLoess[1,,i,j] <- predict(my.loess) + } + } + + rm(varPeriodClim) + gc() + + varPeriodClim2 <- InsertDim(InsertDim(varPeriodClimLoess, 2, n.years.full), 2, n.members) + + ## s4.data <- data.frame(s4=pslPeriodClim[1,], day=1:216) + ## s4.loess <- loess(s4 ~ day, s4.data, span=0.35) + ## s4.pred <- predict(s4.loess) + + vareuFull <- vareuFull - varPeriodClim2 + rm(varPeriodClim2) + gc() + + #if(psl=="g500") psleuFull <- psleuFull/9.81 # convert geopotential to geopotential height (in m) + #if(psl=="psl") psleuFull <- psleuFull/100 # convert MSLP in Pascal to MSLP in hPa + gc() + + ###### impact maps: + + load.month <- p + lead.month + if(load.month > 12) load.month <- load.month - 12 + + # select only var data during the chosen leadtime, and removing bisestile days! + varPeriod <- array(NA,c(dim(vareuFull)[1:3],length(pos.period(2001,load.month)),dim(vareuFull)[5:6])) + + i <- 1 + for(y in years){ + #y.mod <- y + #if((p + lead.month) > 12) y.mod <- y + 1 + #pos.per <- pos.period(y.mod, load.month) - ifelse(p == 1, 0, length(c(pos.months.before(y, p-1), pos.period(y, p-1)))) + + if(lead.month == 0) pos.per <- 1:ndm(p,y) + if(lead.month == 1) pos.per <- ndm(p,y) + 1:ndm(p + 1,y) + if(lead.month == 2) pos.per <- ndm(p,y) + ndm(p + 1,y) + 1:ndm(p + 2,y) + if(lead.month == 3) pos.per <- ndm(p,y) + ndm(p + 1,y) + ndm(p + 2,y) + 1:ndm(p + 3,y) + if(lead.month == 4) pos.per <- ndm(p,y) + ndm(p + 1,y) + ndm(p + 2,y) + ndm(p + 3,y) + 1:ndm(p + 4,y) + if(lead.month == 5) pos.per <- ndm(p,y) + ndm(p + 1,y) + ndm(p + 2,y) + ndm(p + 3,y) + ndm(p + 4,y) + 1:ndm(p + 5,y) + if(lead.month == 6) pos.per <- ndm(p,y) + ndm(p + 1,y) + ndm(p + 2,y) + ndm(p + 3,y) + ndm(p + 4,y) + ndm(p + 5,y) + 1:ndm(p + 6,y) + + if(length(pos.per) == 29) { + #febmod = length(pos.period(y.mod,load.month)) + varPeriod[1,,i,,,] <- vareuFull[1,,i,pos.per[-29],,] + } else { + varPeriod[1,,i,,,] <- vareuFull[1,,i,pos.per,,] + } + + i <- i+1 + } + + gc() + + years.var <- years + load(file=paste0(forecast.dir,"/",fields.name,"_",my.period[p],"_leadmonth_",lead.month,"_psl.RData")) # load the cluster sequence for a chosen startdate and leadtime + #cluster.sequence <- my.cluster[[p]]$cluster #as.vector(my.cluster.array[[p]]) # no bisestile days are left, only the years can be different! + if(length(years.var) < length(years)) { + print("Warning: psl years are more than var years. Removing years not in common..") + ss <- which(is.na(match(years,years.var))) # remove psl years not present in var data + my.cluster.array2 <- my.cluster.array[[p]][,-ss,] # (no bisestile days are left in my.cluster.array) + } else { + my.cluster.array2 <- my.cluster.array[[p]] # (no bisestile days are left in my.cluster.array) + } + + wr1 <- which(my.cluster.array2 == 1, arr.ind=T) + wr2 <- which(my.cluster.array2 == 2, arr.ind=T) + wr3 <- which(my.cluster.array2 == 3, arr.ind=T) + wr4 <- which(my.cluster.array2 == 4, arr.ind=T) + + var1 <- var2 <- var3 <- var4 <- array(NA,c(dim(varPeriod))) + + for (i in 1:dim(wr1)[1]) var1[1, wr1[i,3], wr1[i,2], wr1[i,1],,] <- varPeriod[1, wr1[i,3], wr1[i,2], wr1[i,1],,] + for (i in 1:dim(wr2)[1]) var2[1, wr2[i,3], wr2[i,2], wr2[i,1],,] <- varPeriod[1, wr2[i,3], wr2[i,2], wr2[i,1],,] + for (i in 1:dim(wr3)[1]) var3[1, wr3[i,3], wr3[i,2], wr3[i,1],,] <- varPeriod[1, wr3[i,3], wr3[i,2], wr3[i,1],,] + for (i in 1:dim(wr4)[1]) var4[1, wr4[i,3], wr4[i,2], wr4[i,1],,] <- varPeriod[1, wr4[i,3], wr4[i,2], wr4[i,1],,] + + varwr1mean <- apply(var1, c(5,6), mean, na.rm=T) + varwr2mean <- apply(var2, c(5,6), mean, na.rm=T) + varwr3mean <- apply(var3, c(5,6), mean, na.rm=T) + varwr4mean <- apply(var4, c(5,6), mean, na.rm=T) + gc() + + n.datos <- n.years.full * n.days.in.a.period(p,2001) + + varwrBoth1 <- abind(varPeriod, var1, along = 1) + pvalue1 <- apply(varwrBoth1, c(5,6), function(x) t.test(x[1:n.datos],x[(n.datos+1):length(x)])$p.value) + rm(varwrBoth1) + gc() + + varwrBoth2 <- abind(varPeriod, var2, along = 1) + pvalue2 <- apply(varwrBoth2, c(5,6), function(x) t.test(x[1:n.datos],x[(n.datos+1):length(x)])$p.value) + rm(varwrBoth2) + gc() + + varwrBoth3 <- abind(varPeriod, var3, along = 1) + pvalue3 <- apply(varwrBoth3, c(5,6), function(x) t.test(x[1:n.datos],x[(n.datos+1):length(x)])$p.value) + rm(varwrBoth3) + gc() + + varwrBoth4 <- abind(varPeriod, var4, along = 1) + pvalue4 <- apply(varwrBoth4, c(5,6), function(x) t.test(x[1:n.datos],x[(n.datos+1):length(x)])$p.value) + rm(varwrBoth4) + + # save all the data necessary to redraw the graphs when we know the right regime: + save(varwr1mean, varwr2mean, varwr3mean, varwr4mean, pvalue1, pvalue2, pvalue3, pvalue4, file=paste0(forecast.dir,"/",fields.name,"_",my.period[p],"_leadmonth_",lead.month,"_",var.name[var.num],".RData")) + + rm(varwr1mean,varwr2mean,varwr3mean,varwr4mean) + gc() + + } # close p on period +} # close if on data type + + + + +if(fields.name == ECMWF_monthly.name){ + # Load 6-hourly psl data in the forecast case: + psleuFull <-array(NA, c(n.sdates*n.years, n.leadtimes, n.pos.lat, n.pos.lon)) # daily order: 19940102 19950102 ... 20130102 19940109 19950109 ... 20130109 ... + n.leadtimes <- length(leadtime) + sdates <- weekly.seq(forecast.year,mes,day) + n.sdates <- length(sdates) + + for (startdate in 1:n.sdates){ + for(lday in leadtime){ + var <- Load(var = psl, exp = NULL, obs = list(list(path=fields)), paste0(year.start:year.end, substr(sdates[startdate],5,6), substr(sdates[startdate],7,8) ), storefreq = 'daily', leadtimemin = lday*4-3, leadtimemax = lday*4, output = 'lonlat', latmin = lat.min, latmax = lat.max, lonmin = lon.min, lonmax = lon.max, nprocs=1) + + mean.lday <- apply(var$obs, c(3,5,6), mean, na.rm=T) + rm(var) + gc() + + psleuFull[(1+(startdate-1)*n.years):(startdate*n.years), lday-leadtime[1]+1,,] <- mean.lday + rm(mean.lday) + gc() + } + } + } + + + +cat("Finished!\n") + diff --git a/old/weather_regimes_impact_v3.R~ b/old/weather_regimes_impact_v3.R~ new file mode 100644 index 0000000000000000000000000000000000000000..b803846defd94a2d99c4b8977974894d30be9225 --- /dev/null +++ b/old/weather_regimes_impact_v3.R~ @@ -0,0 +1,524 @@ + +# Creation: 6/2016 +# Author: Nicola Cortesi +# Aim: Measure the impact of Weather Regimes of a chosen variable, from a reanalysis of from a forecast systems. +# +# I/O: input are all the various "*_psl.RData" created by the script 'weather_regimes.R' +# its output are "*_.RData" files which are need by the script weather_regimes_maps.R +# You can also run this script from terminal with: Rscript ~/scripts/weather_regimes_impact.R +# +# Assumption: all input/output files are located in the 'workdir' folder. Note that this script should not be run with many parallel jobs since it spends most of its time +# in loading data, so it's better to take advantage of the parallel loading feature of Load() to run the script once computing the monthly analysis in a sequential way +# +# Branch: weather_regimes + +rm(list=ls()) +library(s2dverification) # for the function Load() +library(abind) +library(reshape2) # after each undate of s2dverification, it's better to remove and install this package again because sometimes it gets broken! +#library(TTR) +source('/home/Earth/ncortesi/scripts/Rfunctions.R') # for the calendar functions +#workdir <- "/scratch/Earth/ncortesi/RESILIENCE/Regimes" # working dir +# module load NCO # : load it from the terminal before running this script interactively in the SMP machine, if you didn't load it in the .bashrc + +#rean.dir <- "/scratch/Earth/ncortesi/RESILIENCE/Regimes/41_ERA-Interim_monthly_1981-2015_LOESS_filter" +#ean.dir <- "/scratch/Earth/ncortesi/RESILIENCE/Regimes/18) as 12) but with no lat correction" +#rean.dir <- "/scratch/Earth/ncortesi/RESILIENCE/Regimes/18_as_12_but_with_no_lat_correction" +#rean.dir <- "/scratch/Earth/ncortesi/RESILIENCE/Regimes/44_as_43_but_with_no_lat_correction" +#rean.dir <- "/scratch/Earth/ncortesi/RESILIENCE/Regimes/50_NCEP_monthly_1981-2016_LOESS_filter_lat_corr" +rean.dir <- "/scratch/Earth/ncortesi/RESILIENCE/Regimes/52_JRA55_monthly_1981-2016_LOESS_filter_lat_corr" + +forecast.dir <- "/scratch/Earth/ncortesi/RESILIENCE/Regimes/42_ECMWF_S4_monthly_1981-2015_LOESS_filter" + +# available reanalysis for var data: +#ERAint <- '/esnas/reconstructions/ecmwf/eraint/$STORE_FREQ$_mean/$VAR_NAME$_f6h/$VAR_NAME$_$YEAR$$MONTH$.nc' +ERAint <- '/esarchive/old-files/recon_ecmwf_erainterim/$STORE_FREQ$_mean/$VAR_NAME$_f6h/$VAR_NAME$_$YEAR$$MONTH$.nc' +ERAint.name <- "ERA-Interim" + +JRA55 <- '/esnas/recon/jma/jra55/$STORE_FREQ$_mean/$VAR_NAME$_f6h/$VAR_NAME$_$YEAR$$MONTH$.nc' +JRA55.name <- "JRA-55" + +NCEP <- '/esnas/recon/noaa/ncep-reanalysis/$STORE_FREQ$_mean/$VAR_NAME$_f6h/$VAR_NAME$_$YEAR$$MONTH$.nc' +NCEP.name <- "NCEP" + +rean <- JRA55 #JRA55 #ERAint #NCEP # choose one of the two above reanalysis from where to load the input psl data + +# available forecast systems for the psl and var data: +#ECMWF_S4 <- list(path = paste0('/esnas/exp/ECMWF/seasonal/0001/s004/m001/$STORE_FREQ$_mean/$VAR_NAME$_f6h/$VAR_NAME$_$START_DATE$.nc')) +#ECMWF_S4 <- '/esnas/exp/ECMWF/seasonal/0001/s004/m001/$STORE_FREQ$_mean/psl_f6h/psl_$START_DATE$.nc' +#ECMWF_S4 <- '/esnas/exp/ecmwf/system4_m1/$STORE_FREQ$_mean/$VAR_NAME$_f6h/$VAR_NAME$_$START_DATE$.nc' +ECMWF_S4 <- '/esarchive/old-files/exp_ecmwf_system4_m1/$STORE_FREQ$_mean/old/$VAR_NAME$_f6h/$VAR_NAME$_$START_DATE$.nc' # only for daily tas S4 + +ECMWF_S4.name <- "ECMWF-S4" +member.name <- "ensemble" # name of the dimension with the ensemble members inside the netCDF files of S4 +lat.name <- "latitude" +lon.name <- "longitude" + +ECMWF_monthly <- '/esnas/exp/ECMWF/monthly/ensforhc/6hourly/$VAR_NAME$/2014$MONTH$$DAY$00/$VAR_NAME$_$START_DATE$00.nc' +ECMWF_monthly.name <- "ECMWF-Monthly" + +forecast <- ECMWF_S4 + +# set it to 'rean' to load pressure fields and var from reanalisis, or to 'forecast' to load them from forecast systems: +fields <- rean #forecast #rean + +var.num <- 1 # Choose a variable. 1: sfcWind 2: tas +var.name <- c("sfcWind","tas") # name of the 'predictand' variable of the chosen reanalysis +var.name.full <- c("wind Speed","temperature") # full name of the 'predictand' variable to put in the title of the graphs +var.unit <- c("m/s", "ºC") # unit of measure of var (for drawing color scales) + +WR.period <- 1:12 #13:16 # 1-12: Jan-Dec; 13-16: Winter-Spring-Summer-Autumn # For forecasts, it is the start month + +# the following 8 variables have to be the same as in the weather_regimes_vXX.R script that processed the psl data: +year.start <- 1981 #1979 #1981 #1982 #1981 # specify the first year of var data +year.end <- 2016 #2013 #2015 #2013 #2010 # specify the last year of var data + +# Only for seasonal forecasts: +startM <- 3 # start month (1=January, 12=December) [bypassed by the optional arguments of the script] +leadM <- 6 # select only the data of one lead month: [bypassed by the optional arguments of the script] +n.members <- 15 # number of members of the forecast system to load +n.leadtimes <- 216 # number of leadtimes of the forecast system to load +missing.forecasts=FALSE # set it to TRUE if there can be missing hindcasts files in the forecast var data, so it will load only the available years and deals with NA +res <- 0.75 # set the resolution you want to interpolate the var data when loading it (i.e: set to 0.75 to use the same res of ERA-Interim) + # interpolation method is BILINEAR. + +# Coordinates of the box enclosing the study region (the Northern Atlantic in this case): +lat.max <- 81 #81 #80 #70 +lat.min <- 27 #30 #20 #30 +lon.max <- 45 #36 #30 #40 +lon.min <- 274.5 #274.5 #270 #280 # put a positive number here because the geopotential has only positive vaues of longitud! + +############################################################################################################################# +#ECMWF_monthly <- list(path = paste0('/esnas/exp/ECMWF/monthly/ensforhc/6hourly/psl/2014010200/1994_010200.nc')) +#ECMWF_monthly <- list(path = paste0('/esnas/exp/ECMWF/monthly/ensforhc/6hourly/$VAR_NAME$/2014$MONTH$$DAY$00/$VAR_NAME$_$START_DATE$00.nc')) + +rean.name <- unname(ifelse(rean == ERAint, ERAint.name, ifelse(rean == JRA55, JRA55.name, NCEP.name))) +forecast.name <- unname(ifelse(forecast == ECMWF_S4, ECMWF_S4.name, ECMWF_monthly.name)) +fields.name <- unname(ifelse(fields == rean, rean.name, forecast.name)) + +# in case the script is run with one argument, it is assumed a reanalysis is being used and the argument becomes the month we want to perform the clustering; +# in case the script is run with two arguments, it is assumed a forecast is used and the first argument represents the startmonth and the second one the leadmonth. +# in case the script is run with no arguments, the values of the variables inside the script are used: +script.arg <- as.integer(commandArgs(TRUE)) + +# in case the script is run with no arguments: +if(length(script.arg) == 0 && fields.name == forecast.name){ + start.month <- startM + #WR.period <- start.month + lead.month <- leadM +} + +# in case the script is run with 1 argument, it is assumed you are using a reanalysis, so the variable to change is the period: +if(length(script.arg) == 1){ + fields <- rean + fields.name <- rean.name + WR.period <- script.arg[1] +} + +# in case the script is run with 2 arguments, it is assumed you are using forecasts, so the variables to change are the start date and the lead time: +if(length(script.arg) >= 2){ + fields <- forecast + fields.name <- forecast.name + start.month <- script.arg[1] + lead.month <- script.arg[2] + WR.period <- start.month # it becomes just a copy of the start.month +} + +if(length(script.arg) == 3){ # in this case, we are running the script in the SMP machine and we need to override the variables below: + source('/gpfs/projects/bsc32/bsc32842/Rfunctions.R') # for the calendar function + #workdir <- "/gpfs/projects/bsc32/bsc32842/RESILIENCE" # working dir +} + +if(length(script.arg) >= 2) {lead.comment <- paste0("Lead month: ", lead.month)} else {lead.comment <- ""} + +cat(paste0("Field: ", fields.name, " Period: ", WR.period, " ", lead.comment, "\n")) + +#if(fields.name == forecast.name) WR.period = 1 + +#if(length(script.arg) == 3){ # in this case, we are running the script in the SMP machine and we need to override the variables below: +# source('/gpfs/projects/bsc32/bsc32842/Rfunctions.R') # for the calendar function +# workdir <- "/gpfs/projects/bsc32/bsc32842/RESILIENCE" # working dir +#} + +#if(length(script.arg) >= 2) {lead.comment <- paste0("Lead month: ", lead.month)} else {lead.comment <- ""} +#cat(paste0("Field: ", fields.name, " Period: ", WR.period, " ", lead.comment, "\n")) + + +# Create a regular lat/lon grid to interpolate the data to the chosen res: (Notice that the regular grid is always centered at lat=0 and lon=0!) +n.lon.grid <- 360/res +n.lat.grid <- (180/res)+1 +my.grid <- paste0('r',n.lon.grid,'x',n.lat.grid) + +## days.period <- n.days.period <- period.length <- list() +## for (pp in 1:17){ +## days.period[[pp]] <- NA +## for(y in year.start:year.end) days.period[[pp]] <- c(days.period[[pp]], n.days.in.a.future.year(year.start, y) + pos.period(y,pp)) +## days.period[[pp]] <- days.period[[pp]][-1] # remove the NA at the begin that was introduced only to be able to execute the above command +## # number of days belonging to that period from year.start to year.end: +## n.days.period[[pp]] <- length(days.period[[pp]]) +## # Number of days belonging to that period in a single year: +## period.length[[pp]] <- n.days.in.a.period(pp,1999) #+ ifelse(period==13 | period==2, 1, 0) # using year 1999 introduce a small error in winter season length because of bisestile years +## } + + +# Load var data (interpolating it to the same reanalysis grid, if necessary): +cat("Loading data. Please wait...\n") + +# Load var data: +if(fields.name == rean.name){ # Load daily var data in the reanalysis case: + info.period <- WR.period[1] # period used to get the variables n.years, lat.min, lat.max, lon.min y lon.max + WR.period.old <- WR.period + + load(file=paste0(rean.dir,"/",fields.name,"_",my.period[info.period],"_psl.RData")) + if(!identical(WR.period, WR.period.old)) WR.period <- WR.period.old + + n.years <- year.end - year.start + 1 + + #for (y in year.start:year.end){ + # var <- Load(var = var.name[var.num], exp = NULL, obs = list(fields), paste0(y,'0101'), storefreq = 'daily', leadtimemax = n.days.in.a.year(y), output = 'lonlat', + # latmin = lat.min, latmax = lat.max, lonmin = lon.min, lonmax = lon.max) + # vareuFull[seq.days.in.a.future.year(year.start, y),,] <- var$obs + # rm(var) + # gc() + #} + + vareuFull366 <- Load(var = var.name[var.num], exp = NULL, obs = list(list(path=fields)), paste0(year.start:year.end,'0101'), storefreq = 'daily', leadtimemax = 366, output = 'lonlat', latmin = lat.min, latmax = lat.max, lonmin = lon.min, lonmax = lon.max, nprocs=8)$obs + + vareuFull <-array(NA,c(1,1,n.years,365, dim(vareuFull366)[5], dim(vareuFull366)[6])) + + # remove bisestile days to have all arrays with the same dimensions: + cat("Removing bisestile days. Please wait...\n") + vareuFull[,,,,,] <- vareuFull366[,,,1:365,,] + + for(y in year.start:year.end){ + y <- y - year.start + 1 + if(n.days.in.a.year(y) == 366) vareuFull[,,y,60:365,,] <- vareuFull366[,,y,61:366,,] # take the march to december period removing the 29th of February + } + + rm(vareuFull366) + gc() + + # convert psl in daily anomalies with the LOESS filter: + cat("Calculating anomalies. Please wait...\n") + varPeriodClim <- apply(vareuFull, c(1,2,4,5,6), mean, na.rm=T) + + varPeriodClimLoess <- varPeriodClim + for(i in n.pos.lat){ + for(j in n.pos.lon){ + my.data <- data.frame(ens.mean=varPeriodClim[1,1,,i,j], day=1:365) + my.loess <- loess(ens.mean ~ day, my.data, span=0.35) + varPeriodClimLoess[1,1,,i,j] <- predict(my.loess) + } + } + + rm(varPeriodClim) + gc() + + varPeriodClim2 <- InsertDim(varPeriodClimLoess, 3, n.years) + + ## s4.data <- data.frame(s4=pslPeriodClim[1,], day=1:216) + ## s4.loess <- loess(s4 ~ day, s4.data, span=0.35) + ## s4.pred <- predict(s4.loess) + + vareuFull <- vareuFull - varPeriodClim2 + rm(varPeriodClim2) + gc() + + for(p in WR.period){ + #p=1 # for the debug + + # load regime data for chosen month: + varPeriod <- vareuFull[1,1,,pos.period(2001,p),,] # select only var data during the chosen period + + load(file=paste0(rean.dir,"/",fields.name,"_",my.period[p],"_psl.RData")) + + seasonal.data <- ifelse(length(my.cluster$cluster)/n.years > 33, TRUE, FALSE) # if there are more than 33 days, it means that we are loading sequences of 3 months + + if(seasonal.data == TRUE && p < 13){ # select only the days of the 3-months cluster series inside the target month p + n.days.period <- length(pos.month.extended(2001,p)) # total number of days in the three months + + days.month <- days.month.new <- days.month.full <- c() + days.month <- which(pos.month.extended(2001,p) == pos.month(2001,p)[1])-1 + 1:length(pos.month(2001,p)) + + for(y in 1:n.years){ + days.month.new <- days.month + n.days.period*(y-1) + days.month.full <- c(days.month.full, days.month.new) + #print(days.month.new) + #print(days.month) + } + + cluster.sequence <- my.cluster$cluster[days.month.full] # select only the days of the cluster series inside the target month p + + } else { + cluster.sequence <- my.cluster$cluster # old syntax: my.cluster[[p]]$cluster + } + + wr1 <- which(cluster.sequence == 1) + wr2 <- which(cluster.sequence == 2) + wr3 <- which(cluster.sequence == 3) + wr4 <- which(cluster.sequence == 4) + + gc() + cat("Formatting data. Please wait...\n") + var.melted <- melt(varPeriod[,,,, drop=FALSE], varnames=c("Year","Day","Lat","Lon")) + gc() + cat("Formatting data. Please wait......\n") + varmat <- unname(acast(var.melted, Year + Day ~ Lat ~ Lon)) + + varwr1 <- varmat[wr1,,,drop=F] + varwr2 <- varmat[wr2,,,drop=F] + varwr3 <- varmat[wr3,,,drop=F] + varwr4 <- varmat[wr4,,,drop=F] + + #varwrmean <- apply(varwr,c(2,3),mean,na.rm=T) + varwr1mean <- apply(varwr1,c(2,3),mean,na.rm=T) + varwr2mean <- apply(varwr2,c(2,3),mean,na.rm=T) + varwr3mean <- apply(varwr3,c(2,3),mean,na.rm=T) + varwr4mean <- apply(varwr4,c(2,3),mean,na.rm=T) + + n.datos <- n.years * n.days.in.a.period(p,2001) + + varwrBoth1 <- abind(varmat, varwr1, along = 1) + pvalue1 <- apply(varwrBoth1, c(2,3), function(x) t.test(x[1:n.datos],x[(n.datos+1):length(x)])$p.value) + rm(varwrBoth1, varwr1) + gc() + + varwrBoth2 <- abind(varmat, varwr2, along = 1) + pvalue2 <- apply(varwrBoth2, c(2,3), function(x) t.test(x[1:n.datos],x[(n.datos+1):length(x)])$p.value) + rm(varwrBoth2, varwr2) + gc() + + varwrBoth3 <- abind(varmat, varwr3, along = 1) + pvalue3 <- apply(varwrBoth3, c(2,3), function(x) t.test(x[1:n.datos],x[(n.datos+1):length(x)])$p.value) + rm(varwrBoth3, varwr3) + gc() + + varwrBoth4 <- abind(varmat, varwr4, along = 1) + pvalue4 <- apply(varwrBoth4, c(2,3), function(x) t.test(x[1:n.datos],x[(n.datos+1):length(x)])$p.value) + rm(varwrBoth4, varwr4) + + rm(varmat) + gc() + + #EU <- c(1:which(lon >= lon.max)[1], (which(lon >= 337)[1]:length(lon))) # restrict area to continental europe only + #my.brks.var <- seq(-3,3,0.5) + #my.cols.var <- colorRampPalette(rev(brewer.pal(11,"RdBu")))(length(my.brks.var)-1) # blue--white--red colors + #PlotEquiMap2(rescale(varwr3mean[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, continents.col="black") + + # save all the data necessary to redraw the graphs when we know the right regime: + save(varwr1mean, varwr2mean, varwr3mean,varwr4mean, pvalue1,pvalue2,pvalue3,pvalue4, file=paste0(rean.dir,"/",fields.name,"_",my.period[p],"_",var.name[var.num],".RData")) + rm(pvalue1,pvalue2,pvalue3,pvalue4) + rm(varwr1mean,varwr2mean,varwr3mean,varwr4mean) + gc() + + } # close p on WR.period + +} + + + +if(fields.name == ECMWF_S4.name){ # for forecasts, we can load only the data for 1 month at time (since each month needs ~20 GB of memory to load all its leadtimes) + for(p in WR.period){ + #p=1 # for the debug + + start.month <- p + if(start.month <= 9) {start.month.char <- paste0("0",as.character(start.month))} else {start.month.char <- start.month} + + fields2 <- gsub("\\$STORE_FREQ\\$","daily",fields) + fields3 <- gsub("\\$VAR_NAME\\$",var.name[var.num],fields2) + + years <- c() + + for(y in year.start:year.end){ + fields4 <- gsub("\\$START_DATE\\$",paste0(y, start.month.char,'01'),fields3) + + if(file.exists(fields4)){ + # beware, in this way it can only take into account leadtimes from 100 to 999, but it is the only way in the SMP machine: + #char.lead <- nchar(system(paste0("ncks -m ",fields4,"| grep -E -i 'psl size' | cut -d '*' -f1"), intern=T)) + #num.lead <- as.integer(substr(system(paste0("ncks -m ",fields4,"| grep -E -i 'psl size' | cut -d '*' -f1"), intern=T),char.lead-3,char.lead)) + num.lead <- as.integer(system(paste0("ncks -m ",fields4,"| grep -E -i 'time dimension' | cut -d '=' -f2 | cut -d ' ' -f2"), intern=T)) #don't work well on the SMP + #num.memb <- as.integer(system(paste0("ncks -m ",fields4,"| grep -E -i 'psl size' | cut -d '=' -f2 | cut -d '*' -f2"), intern=T)) + + if(fields == forecast && forecast == ECMWF_S4 && var.name[var.num] == "sfcWind") { # sfcWind daily data of S4 has a different formatting: + num.memb <- as.integer(system(paste0("ncks -m ",fields4,"| grep -E -i '0: '", member.name,"', size' | cut -d '=' -f2 | cut -d ',' -f1"), intern=T)) + } else { + num.memb <- as.integer(system(paste0("ncks -m ",fields4,"| grep -E -i '0: '", member.name,"', size' | cut -d '=' -f2 | cut -d ' ' -f2"), intern=T)) } + + #num.lat <- as.integer(system(paste0("ncks -m ",fields4,"| grep -E -i 'psl size' | cut -d '=' -f2 | cut -d '*' -f3"), intern=T)) + num.lat1 <- as.integer(system(paste0("ncks -m ",fields4,"| grep -E -i 'latitude dimension' | cut -d '=' -f2 | cut -d ' ' -f2"), intern=T)) + num.lat2 <- as.integer(system(paste0("ncks -m ",fields4,"| grep -E -i 'lat dimension' | cut -d '=' -f2 | cut -d ' ' -f2"), intern=T)) + num.lat <- ifelse(length(num.lat2) == 0, num.lat1, num.lat2) + + #num.lon <- as.integer(system(paste0("ncks -m ",fields4,"| grep -E -i 'psl size' | cut -d '=' -f2 | cut -d '*' -f4"), intern=T)) + num.lon1 <- as.integer(system(paste0("ncks -m ",fields4,"| grep -E -i 'longitude dimension' | cut -d '=' -f2 | cut -d ' ' -f2"), intern=T)) + num.lon2 <- as.integer(system(paste0("ncks -m ",fields4,"| grep -E -i 'lon dimension' | cut -d '=' -f2 | cut -d ' ' -f2"), intern=T)) + num.lon <- ifelse(length(num.lon2) == 0, num.lon1, num.lon2) + + num.var <- system(paste0("ncks -m ",fields4,"| grep -E -i '", var.name[var.num], "'"), intern=T) + + if(length(num.lead) != 0 && length(num.memb) != 0 && length(num.lat) != 0 && length(num.lon) != 0 && length(num.var) != 0){ + if(num.lead == n.leadtimes && num.memb >= n.members) years <- c(years,y) + } + } + } + + n.years.full <- length(years) # years really available for that month + print(paste0(n.years.full, " available years for ", my.period[p])) + + vareuFull <- Load(var = var.name[var.num], exp = list(list(path=fields)), obs = NULL, sdates=paste0(years, start.month.char,'01'), dimnames=list(lon=lon.name, lat=lat.name, member=member.name), nmember=n.members, leadtimemax=n.leadtimes, storefreq='daily', output = 'lonlat', latmin = lat.min, latmax = lat.max, lonmin = lon.min, lonmax = lon.max, grid=my.grid, method='bilinear', nprocs=1)$mod + + # convert var in daily anomalies with the LOESS filter: + cat("Calculating anomalies. Please wait...\n") + varPeriodClim <- apply(vareuFull, c(1,4,5,6), mean, na.rm=T) + varPeriodClimLoess <- varPeriodClim + + n.pos.lat <- unname(dim(varPeriodClim)[3]) + n.pos.lon <- unname(dim(varPeriodClim)[4]) + + for(i in n.pos.lat){ + for(j in n.pos.lon){ + my.data <- data.frame(ens.mean=varPeriodClim[1,,i,j], day=1:n.leadtimes) + my.loess <- loess(ens.mean ~ day, my.data, span=0.35) + varPeriodClimLoess[1,,i,j] <- predict(my.loess) + } + } + + rm(varPeriodClim) + gc() + + varPeriodClim2 <- InsertDim(InsertDim(varPeriodClimLoess, 2, n.years.full), 2, n.members) + + ## s4.data <- data.frame(s4=pslPeriodClim[1,], day=1:216) + ## s4.loess <- loess(s4 ~ day, s4.data, span=0.35) + ## s4.pred <- predict(s4.loess) + + vareuFull <- vareuFull - varPeriodClim2 + rm(varPeriodClim2) + gc() + + #if(psl=="g500") psleuFull <- psleuFull/9.81 # convert geopotential to geopotential height (in m) + #if(psl=="psl") psleuFull <- psleuFull/100 # convert MSLP in Pascal to MSLP in hPa + gc() + + ###### impact maps: + + load.month <- p + lead.month + if(load.month > 12) load.month <- load.month - 12 + + # select only var data during the chosen leadtime, and removing bisestile days! + varPeriod <- array(NA,c(dim(vareuFull)[1:3],length(pos.period(2001,load.month)),dim(vareuFull)[5:6])) + + i <- 1 + for(y in years){ + #y.mod <- y + #if((p + lead.month) > 12) y.mod <- y + 1 + #pos.per <- pos.period(y.mod, load.month) - ifelse(p == 1, 0, length(c(pos.months.before(y, p-1), pos.period(y, p-1)))) + + if(lead.month == 0) pos.per <- 1:ndm(p,y) + if(lead.month == 1) pos.per <- ndm(p,y) + 1:ndm(p + 1,y) + if(lead.month == 2) pos.per <- ndm(p,y) + ndm(p + 1,y) + 1:ndm(p + 2,y) + if(lead.month == 3) pos.per <- ndm(p,y) + ndm(p + 1,y) + ndm(p + 2,y) + 1:ndm(p + 3,y) + if(lead.month == 4) pos.per <- ndm(p,y) + ndm(p + 1,y) + ndm(p + 2,y) + ndm(p + 3,y) + 1:ndm(p + 4,y) + if(lead.month == 5) pos.per <- ndm(p,y) + ndm(p + 1,y) + ndm(p + 2,y) + ndm(p + 3,y) + ndm(p + 4,y) + 1:ndm(p + 5,y) + if(lead.month == 6) pos.per <- ndm(p,y) + ndm(p + 1,y) + ndm(p + 2,y) + ndm(p + 3,y) + ndm(p + 4,y) + ndm(p + 5,y) + 1:ndm(p + 6,y) + + if(length(pos.per) == 29) { + #febmod = length(pos.period(y.mod,load.month)) + varPeriod[1,,i,,,] <- vareuFull[1,,i,pos.per[-29],,] + } else { + varPeriod[1,,i,,,] <- vareuFull[1,,i,pos.per,,] + } + + i <- i+1 + } + + gc() + + years.var <- years + load(file=paste0(forecast.dir,"/",fields.name,"_",my.period[p],"_leadmonth_",lead.month,"_psl.RData")) # load the cluster sequence for a chosen startdate and leadtime + #cluster.sequence <- my.cluster[[p]]$cluster #as.vector(my.cluster.array[[p]]) # no bisestile days are left, only the years can be different! + if(length(years.var) < length(years)) { + print("Warning: psl years are more than var years. Removing years not in common..") + ss <- which(is.na(match(years,years.var))) # remove psl years not present in var data + my.cluster.array2 <- my.cluster.array[[p]][,-ss,] # (no bisestile days are left in my.cluster.array) + } else { + my.cluster.array2 <- my.cluster.array[[p]] # (no bisestile days are left in my.cluster.array) + } + + wr1 <- which(my.cluster.array2 == 1, arr.ind=T) + wr2 <- which(my.cluster.array2 == 2, arr.ind=T) + wr3 <- which(my.cluster.array2 == 3, arr.ind=T) + wr4 <- which(my.cluster.array2 == 4, arr.ind=T) + + var1 <- var2 <- var3 <- var4 <- array(NA,c(dim(varPeriod))) + + for (i in 1:dim(wr1)[1]) var1[1, wr1[i,3], wr1[i,2], wr1[i,1],,] <- varPeriod[1, wr1[i,3], wr1[i,2], wr1[i,1],,] + for (i in 1:dim(wr2)[1]) var2[1, wr2[i,3], wr2[i,2], wr2[i,1],,] <- varPeriod[1, wr2[i,3], wr2[i,2], wr2[i,1],,] + for (i in 1:dim(wr3)[1]) var3[1, wr3[i,3], wr3[i,2], wr3[i,1],,] <- varPeriod[1, wr3[i,3], wr3[i,2], wr3[i,1],,] + for (i in 1:dim(wr4)[1]) var4[1, wr4[i,3], wr4[i,2], wr4[i,1],,] <- varPeriod[1, wr4[i,3], wr4[i,2], wr4[i,1],,] + + varwr1mean <- apply(var1, c(5,6), mean, na.rm=T) + varwr2mean <- apply(var2, c(5,6), mean, na.rm=T) + varwr3mean <- apply(var3, c(5,6), mean, na.rm=T) + varwr4mean <- apply(var4, c(5,6), mean, na.rm=T) + gc() + + n.datos <- n.years.full * n.days.in.a.period(p,2001) + + varwrBoth1 <- abind(varPeriod, var1, along = 1) + pvalue1 <- apply(varwrBoth1, c(5,6), function(x) t.test(x[1:n.datos],x[(n.datos+1):length(x)])$p.value) + rm(varwrBoth1) + gc() + + varwrBoth2 <- abind(varPeriod, var2, along = 1) + pvalue2 <- apply(varwrBoth2, c(5,6), function(x) t.test(x[1:n.datos],x[(n.datos+1):length(x)])$p.value) + rm(varwrBoth2) + gc() + + varwrBoth3 <- abind(varPeriod, var3, along = 1) + pvalue3 <- apply(varwrBoth3, c(5,6), function(x) t.test(x[1:n.datos],x[(n.datos+1):length(x)])$p.value) + rm(varwrBoth3) + gc() + + varwrBoth4 <- abind(varPeriod, var4, along = 1) + pvalue4 <- apply(varwrBoth4, c(5,6), function(x) t.test(x[1:n.datos],x[(n.datos+1):length(x)])$p.value) + rm(varwrBoth4) + + # save all the data necessary to redraw the graphs when we know the right regime: + save(varwr1mean, varwr2mean, varwr3mean, varwr4mean, pvalue1, pvalue2, pvalue3, pvalue4, file=paste0(forecast.dir,"/",fields.name,"_",my.period[p],"_leadmonth_",lead.month,"_",var.name[var.num],".RData")) + + rm(varwr1mean,varwr2mean,varwr3mean,varwr4mean) + gc() + + } # close p on period +} # close if on data type + + + + +if(fields.name == ECMWF_monthly.name){ + # Load 6-hourly psl data in the forecast case: + psleuFull <-array(NA, c(n.sdates*n.years, n.leadtimes, n.pos.lat, n.pos.lon)) # daily order: 19940102 19950102 ... 20130102 19940109 19950109 ... 20130109 ... + n.leadtimes <- length(leadtime) + sdates <- weekly.seq(forecast.year,mes,day) + n.sdates <- length(sdates) + + for (startdate in 1:n.sdates){ + for(lday in leadtime){ + var <- Load(var = psl, exp = NULL, obs = list(list(path=fields)), paste0(year.start:year.end, substr(sdates[startdate],5,6), substr(sdates[startdate],7,8) ), storefreq = 'daily', leadtimemin = lday*4-3, leadtimemax = lday*4, output = 'lonlat', latmin = lat.min, latmax = lat.max, lonmin = lon.min, lonmax = lon.max, nprocs=1) + + mean.lday <- apply(var$obs, c(3,5,6), mean, na.rm=T) + rm(var) + gc() + + psleuFull[(1+(startdate-1)*n.years):(startdate*n.years), lday-leadtime[1]+1,,] <- mean.lday + rm(mean.lday) + gc() + } + } + } + + + +cat("Finished!\n") + diff --git a/old/weather_regimes_maps.R b/old/weather_regimes_maps.R new file mode 100644 index 0000000000000000000000000000000000000000..e13112778f6cd6c43f575b879d80d7ef83df8dc3 --- /dev/null +++ b/old/weather_regimes_maps.R @@ -0,0 +1,310 @@ + +# Creation: 6/2016 +# Author: Nicola Cortesi +# Aim: to visualize the regime anomalies of the weather regimes, their interannual frequencies and their impact on a chosen cliamte variable (i.e: wind speed or temperature) +# I/O: the only input file needed is the output of the weather_regimes.R script, which ends with the suffix "_mapdata.RData". +# Output files are the maps, with the user can generate as .png or as .pdf +# Assumptions: this script must be run twice: once, with the option 'ordering = F' to create a .pdf or .png file that the user must open to find visually the order by which +# the four regimes are stored inside the four clusters. For example, he can find that the sequence of the images in the file (from top to down) corresponds to: +# Blocking / NAO- / Atl.Ridge / NAO+ regime. Subsequently, he has to change 'ordering' from F to T and set the four variables 'clusterX.name' (X=1..4) +# to follow the same order found inside that file; after that, he can run this script a second time to get the maps in the right order, which by default is, +# from top to down: "NAO+","NAO-","Blocking" and "Atl.Ridge" (you can change it with the variable 'orden'). +# During these first two runs, the variable 'save.names' must be set to TRUE, so an .RData file is written to store the order of the four regimes, in case +# in future a user needs to visualize these maps again. In this case, the user must set 'save.names= FALSE' and 'ordering=TRUE' to be able to visualize the maps +# in the correct order. +# Branch: weather_regimes + +library(s2dverification) # for the function Load() +library(Kendall) # for the MannKendall test + +source('/home/Earth/ncortesi/Downloads/scripts/Rfunctions.R') # for the calendar functions +workdir <- "/scratch/Earth/ncortesi/RESILIENCE/Regimes" # working dir with the input files with the WT classifications for each MSLP grid point + +rean.name <- "ERA-Interim" # reanalysis name (if input data comes from a reanalysis) +forecast.name <- "ECMWF-S4" # forecast system name (if input data comes from a forecast system) + +fields.name <- forecast.name # Choose between loading reanalysis or forecasts +var.num <- 1 # Choose a variable for the impact maps 1: sfcWind 2: tas +p <- 1 # Choose a period from 1 to 17. You need to have created before the '_mapdata.RData' file with the output of weather_regimes.R + # for that period. (1-12: Jan - Dec, 13-16: Winter, Spring, Summer, Autumn, 17: Year) +lead.month <- 2 # in case you selected forecasts, specify a leadtime (0 = same month of the start month) + +# run it twice: once, with ordering <- FALSE to look only at the cartography and find visually the right regime names of the four clusters; +# and the second time, to save the ordered cartography: +ordering <- T # If TRUE, order the clusters following the regimes listed in the 'orden' vector (after you manually insert the names). +as.pdf <- F # FALSE: save results as one .png file for each season, TRUE: save only 1 .pdf file with all seasons +save.names <- T # if TRUE, also save the cluster names (i.e: the association between clusters and weather regimes); if FALSE, the cluster names are loaded from the file + +if(save.names){ + cluster4.name <- "NAO+" + cluster3.name <- "NAO-" + cluster2.name <- "Blocking" + cluster1.name <- "Atl.Ridge" +} + +var.name <- c("sfcWind","tas") +if(fields.name == rean.name) load(file=paste0(workdir,"/",fields.name,"_",var.name[var.num],"_",my.period[p],"_mapdata.RData")) +if(fields.name == forecast.name) load(file=paste0(workdir,"/",fields.name,"_",var.name[var.num],"_",my.period[p],"_leadmonth_",lead.month,"_mapdata.RData")) + +# position of long values of Europe only (without the Atlantic Sea and America): +#if(fields.name == "ERA-Interim") EU <- c(1:which(lon >= lon.max)[1], (length(lon)-30):length(lon)) # restrict area to continental europe only +EU <- c(1:which(lon >= lon.max)[1], (which(lon >= 337)[1]:length(lon))) # restrict area to continental europe only + +# choose an order to give to the cartography, from top to bottom: +orden <- c("NAO+","NAO-","Blocking","Atl.Ridge") + +regime1.name <- orden[1] +regime2.name <- orden[2] +regime3.name <- orden[3] +regime4.name <- orden[4] + +if(save.names){cluster1.name.period <- cluster2.name.period <- cluster3.name.period <- cluster4.name.period <- c()} + +# breaks and colors of the geopotential fields: +if(psl == "g500"){ + #my.brks <- c(-300,-199:200,300) # % Mean anomaly of a WR + my.brks <- c(-300,seq(-200,200,10),300) # % Mean anomaly of a WR + my.brks2 <- c(-300,seq(-200,200,10),300) # % Mean anomaly of a WR for the contour lines + #my.cols <- colorRampPalette((brewer.pal(9,"PuBu")))(length(my.brks)-1) + values.to.plot <- c(-200, -100, 0, 100, 200) # values we want to show in the labels +} + +if(psl == "psl"){ + my.brks <- c(seq(-19,-1,2),0,seq(1,19,2)) # % Mean anomaly of a WR + # my.brks <- c(seq(-19,-1,2),0,seq(1,19,2)) # % Mean anomaly of a WR + + my.brks2 <- my.brks #c(seq(-20,20,2)) # % Mean anomaly of a WR for the contour lines + values.to.plot <- my.brks #c(-17,-15,-13,-11,-9,-7,-5,-3,-1,0,1,3,5,7,9,11,13,15,17) +} + +my.cols <- colorRampPalette(rev(brewer.pal(11,"RdBu")))(length(my.brks)-1) # blue--white--red colors +my.cols[floor(length(my.cols)/2)] <- my.cols[floor(length(my.cols)/2)+1] <- "white" + +# breaks and colors of the impact maps (valid both for Wind speed anomalies and Temperature anomalies): +#my.brks.var <- c(-20,seq(-10,-3,1),seq(-2.9,2.9,0.1),seq(3,10,1),20) # % Mean anomaly of a WR +#my.brks.var <- c(-20,-2,-1.5,-1,-0.5,-0.2,0.2,0.5,1,1.5,2,20) # % Mean anomaly of a WR +#my.brks.var <- c(-20,seq(-3,3,0.5),20) # % Mean anomaly of a WR +if(var.name[var.num] == "sfcWind") { + my.brks.var <- seq(-3,3,0.5) + values.to.plot2 <- c(-3,-2,-1,0,1,2,3) +} +if(var.name[var.num] == "tas") { + my.brks.var <- seq(-5,5,1) + values.to.plot2 <- my.brks.var #c(-5,-3,-1,0,1,3,5) +} + +#my.cols <- colorRampPalette(as.character(read.csv("/home/Earth/vtorralb/rgbhex.csv",header=F)[,1]))(length(my.brks)-1) # blue--yellow-red colors +my.cols.var <- colorRampPalette(rev(brewer.pal(11,"RdBu")))(length(my.brks.var)-1) # blue--white--red colors +#my.cols.var[floor(length(my.cols.var)/2)] <- my.cols.var[floor(length(my.cols.var)/2)+1] <- "white" + +if(as.pdf && fields.name==rean.name)(pdf(file=paste0(workdir,"/",fields.name,"_",var.name[var.num],"_",my.period[p],".pdf"),width=40, height=60)) +if(as.pdf && fields.name==forecast.name)(pdf(file=paste0(workdir,"/",fields.name,"_",var.name[var.num],"_",my.period[p],"_leadmonth_",lead.month,".pdf"),width=40,height=60)) + +if(save.names){ + + cluster1.name.period[p] <- cluster1.name + cluster2.name.period[p] <- cluster2.name + cluster3.name.period[p] <- cluster3.name + cluster4.name.period[p] <- cluster4.name + + if(fields.name==rean.name) save(cluster1.name.period, cluster2.name.period, cluster3.name.period, cluster4.name.period, file=paste0(workdir,"/",fields.name,"_", var.name[var.num],"_",my.period[p],"_ClusterNames.RData")) + if(fields.name==forecast.name) save(cluster1.name.period, cluster2.name.period, cluster3.name.period, cluster4.name.period, file=paste0(workdir,"/",fields.name,"_", var.name[var.num],"_",my.period[p],"_leadmonth_",lead.month,"_ClusterNames.RData")) + + } else { # in this case, we load the cluster names from the file already saved: + if(fields.name == rean.name) load(paste0(workdir,"/",fields.name,"_", var.name[var.num],"_",my.period[p],"_ClusterNames.RData")) + if(fields.name == forecast.name) load(paste0(workdir,"/",fields.name,"_", var.name[var.num],"_",my.period[p],"_leadmonth_",lead.month,"_ClusterNames.RData")) + + cluster1.name <- cluster1.name.period[p] + cluster2.name <- cluster2.name.period[p] + cluster3.name <- cluster3.name.period[p] + cluster4.name <- cluster4.name.period[p] + } + + # assign to each cluster its regime: + if(ordering == FALSE){ + cluster1 <- 1; cluster2 <- 2; cluster3 <- 3; cluster4 <- 4 + } else { + cluster1 <- which(orden == cluster1.name) + cluster2 <- which(orden == cluster2.name) + cluster3 <- which(orden == cluster3.name) + cluster4 <- which(orden == cluster4.name) + } + + assign(paste0("map",cluster1), pslwr1mean) + assign(paste0("map",cluster2), pslwr2mean) + assign(paste0("map",cluster3), pslwr3mean) + assign(paste0("map",cluster4), pslwr4mean) + + assign(paste0("fre",cluster1), wr1y) + assign(paste0("fre",cluster2), wr2y) + assign(paste0("fre",cluster3), wr3y) + assign(paste0("fre",cluster4), wr4y) + + if(fields.name == rean.name){ + assign(paste0("imp",cluster1), varPeriodAnom1mean) + assign(paste0("imp",cluster2), varPeriodAnom2mean) + assign(paste0("imp",cluster3), varPeriodAnom3mean) + assign(paste0("imp",cluster4), varPeriodAnom4mean) + + assign(paste0("sig",cluster1), pvalue1) + assign(paste0("sig",cluster2), pvalue2) + assign(paste0("sig",cluster3), pvalue3) + assign(paste0("sig",cluster4), pvalue4) + } + + + # Visualize the composition with the 3 graphs for all the 4 regimes: its pressure fields, its impact on var and its frequency series: + if(!as.pdf && fields.name==rean.name) png(filename=paste0(workdir,"/",fields.name,"_",var.name[var.num],"_",my.period[p],".png"),width=3000,height=3700) + if(!as.pdf && fields.name==forecast.name) png(filename=paste0(workdir,"/",fields.name,"_",var.name[var.num],"_",my.period[p],"_leadmonth_",lead.month,".png"),width=3000,height=3700) + + plot.new() + + # Sheet title: + par(fig=c(0, 1, 0.94, 0.98), new=TRUE) + mtext(paste0("Weather Regimes for ",my.period[p]," season (",year.start,"-",year.end,"). Source: ",fields.name), cex=6, font=2) + + # Centroid maps: + map.xpos <- 0 + map.width <- 0.46 + par(fig=c(map.xpos + 0.003, map.xpos + map.width - 0.003, 0.745, 0.925), new=TRUE) + PlotEquiMap2(rescale(map1,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map1), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, xlabel.dist=1.5, contours.lty="F1FF1F") + par(fig=c(map.xpos, map.xpos + map.width, 0.51, 0.69), new=TRUE) + PlotEquiMap2(rescale(map2,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map2), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F") + par(fig=c(map.xpos, map.xpos + map.width, 0.275, 0.455), new=TRUE) + PlotEquiMap2(rescale(map3,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map3), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F") + par(fig=c(map.xpos, map.xpos + map.width, 0.04, 0.22), new=TRUE) + PlotEquiMap2(rescale(map4,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map4), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F") + + # Title Centroid Maps: + map.title.xpos <- 0.76 + map.title.width <- 0.2 + par(fig=c(map.title.xpos, map.title.xpos + map.title.width, 0.728, 0.729), new=TRUE) + mtext("year", cex=3) + par(fig=c(map.title.xpos, map.title.xpos + map.title.width, 0.494, 0.495), new=TRUE) + mtext("year", cex=3) + par(fig=c(map.title.xpos, map.title.xpos + map.title.width, 0.260, 0.261), new=TRUE) + mtext("year", cex=3) + par(fig=c(map.title.xpos, map.title.xpos + map.title.width, 0.026, 0.027), new=TRUE) + mtext("year", cex=3) + + # Impact maps: + if(fields.name == rean.name){ + impact.xpos <- 0.47 + impact.width <- 0.26 + par(fig=c(impact.xpos, impact.xpos + impact.width, 0.745, 0.925), new=TRUE) + PlotEquiMap2(rescale(imp1[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=t(sig1[,EU] < 0.05), cex.lab=1.5) + par(fig=c(impact.xpos, impact.xpos + impact.width, 0.51, 0.69), new=TRUE) + PlotEquiMap2(rescale(imp2[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=t(sig2[,EU] < 0.05), cex.lab=1.5) + par(fig=c(impact.xpos, impact.xpos + impact.width, 0.275, 0.455), new=TRUE) + PlotEquiMap2(rescale(imp3[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=t(sig3[,EU] < 0.05), cex.lab=1.5) + par(fig=c(impact.xpos, impact.xpos + impact.width, 0.04, 0.22), new=TRUE) + PlotEquiMap2(rescale(imp4[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=t(sig4[,EU] < 0.05), cex.lab=1.5) + } + + # Frequency plots: + barplot.xpos <- 0.75 + barplot.width <- 0.25 + freq.max=60 + + par(fig=c(barplot.xpos, barplot.xpos + barplot.width, 0.745, 0.915), new=TRUE) + barplot.freq(100*fre1, year.start, year.end, cex.y=2, cex.x=2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0)) + par(fig=c(barplot.xpos, barplot.xpos + barplot.width,0.510,0.680), new=TRUE) + barplot.freq(100*fre2, year.start, year.end, cex.y=2, cex.x=2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0)) + par(fig=c(barplot.xpos, barplot.xpos + barplot.width,0.275,0.445), new=TRUE) + barplot.freq(100*fre3, year.start, year.end, cex.y=2, cex.x=2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0)) + par(fig=c(barplot.xpos, barplot.xpos + barplot.width,0.040,0.210), new=TRUE) + barplot.freq(100*fre4, year.start, year.end, cex.y=2, cex.x=2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0)) + + # % on Frequency plots: + symbol.xpos <- 0.735 + symbol.width <- 0.01 + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.83, 0.84), new=TRUE) + mtext("%", cex=3.3) + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.60, 0.61), new=TRUE) + mtext("%", cex=3.3) + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.36, 0.37), new=TRUE) + mtext("%", cex=3.3) + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.13, 0.14), new=TRUE) + mtext("%", cex=3.3) + + # Title 1: + title1.xpos <- 0.02 + title1.width <- 0.4 + par(fig=c(title1.xpos, title1.xpos + title1.width, 0.930, 0.935), new=TRUE) + mtext(paste0(regime1.name,": ", psl.name, " Anomaly"), font=2, cex=4) + par(fig=c(title1.xpos, title1.xpos + title1.width, 0.695, 0.700), new=TRUE) + mtext(paste0(regime2.name,": ", psl.name, " Anomaly"), font=2, cex=4) + par(fig=c(title1.xpos, title1.xpos + title1.width, 0.460, 0.465), new=TRUE) + mtext(paste0(regime3.name,": ", psl.name, " Anomaly"), font=2, cex=4) + par(fig=c(title1.xpos, title1.xpos + title1.width, 0.225, 0.230), new=TRUE) + mtext(paste0(regime4.name,": ", psl.name, " Anomaly"), font=2, cex=4) + + # Title 2: + if(fields.name == rean.name){ + title2.xpos <- 0.42 + title2.width <- 0.35 + par(fig=c(title2.xpos, title2.xpos + title2.width, 0.930, 0.935), new=TRUE) + mtext(paste0(regime1.name, ": Impact on ", var.name.full[var.num]), font=2, cex=4) + par(fig=c(title2.xpos, title2.xpos + title2.width, 0.695, 0.700), new=TRUE) + mtext(paste0(regime2.name, ": Impact on ", var.name.full[var.num]), font=2, cex=4) + par(fig=c(title2.xpos, title2.xpos + title2.width, 0.460, 0.465), new=TRUE) + mtext(paste0(regime3.name, ": Impact on ", var.name.full[var.num]), font=2, cex=4) + par(fig=c(title2.xpos, title2.xpos + title2.width, 0.225, 0.230), new=TRUE) + mtext(paste0(regime4.name, ": Impact on ", var.name.full[var.num]), font=2, cex=4) + } + + # Title 3: + title3.xpos <- 0.75 + title3.width <-0.25 + par(fig=c(title3.xpos, title3.xpos + title3.width, 0.930, 0.935), new=TRUE) + mtext(paste0(regime1.name, ": Frequency (", round(100*mean(fre1),1), "%)"), font=2, cex=4) + par(fig=c(title3.xpos, title3.xpos + title3.width, 0.695, 0.700), new=TRUE) + mtext(paste0(regime2.name, ": Frequency (", round(100*mean(fre2),1), "%)"), font=2, cex=4) + par(fig=c(title3.xpos, title3.xpos + title3.width, 0.460, 0.465), new=TRUE) + mtext(paste0(regime3.name, ": Frequency (", round(100*mean(fre3),1), "%)"), font=2, cex=4) + par(fig=c(title3.xpos, title3.xpos + title3.width, 0.225, 0.230), new=TRUE) + mtext(paste0(regime4.name, ": Frequency (", round(100*mean(fre4),1), "%)"), font=2, cex=4) + + # Legend 1: + legend1.xpos <- 0.01 + legend1.width <- 0.44 + legend1.cex <- 1.8 + my.subset <- match(values.to.plot, my.brks) + par(fig=c(legend1.xpos, legend1.xpos + legend1.width, 0.719, 0.744), new=TRUE) # syntax fig=c(xmin, xmax, ymin, ymax) + ColorBar3(my.brks, cols=my.cols, vert=FALSE, cex=legend1.cex, subset=my.subset) + if(psl=="g500") {mtext(side=4," m", cex=2.5)} else {mtext(side=4," hPa", cex=legend1.cex)} + par(fig=c(legend1.xpos, legend1.xpos + legend1.width, 0.485, 0.508), new=TRUE) + ColorBar3(my.brks, cols=my.cols, vert=FALSE, cex=legend1.cex, subset=my.subset) + if(psl=="g500") {mtext(side=4," m", cex=2.5)} else {mtext(side=4," hPa", cex=legend1.cex)} + par(fig=c(legend1.xpos, legend1.xpos + legend1.width, 0.250, 0.273), new=TRUE) + ColorBar3(my.brks, cols=my.cols, vert=FALSE, cex=legend1.cex, subset=my.subset) + if(psl=="g500") {mtext(side=4," m", cex=2.5)} else {mtext(side=4," hPa", cex=legend1.cex)} + par(fig=c(legend1.xpos, legend1.xpos + legend1.width, 0.015, 0.038), new=TRUE) + ColorBar3(my.brks, cols=my.cols, vert=FALSE, cex=legend1.cex, subset=my.subset) + if(psl=="g500") {mtext(side=4," m", cex=2.5)} else {mtext(side=4," hPa", cex=legend1.cex)} + + # Legend 2: + if(fields.name == rean.name){ + legend2.xpos <- 0.48 + legend2.width <- 0.25 + legend2.cex <- 1.8 + my.subset2 <- match(values.to.plot2, my.brks.var) + par(fig=c(legend2.xpos, legend2.xpos + legend2.width, 0.719 + 0.001, 0.744), new=TRUE) + ColorBar3(my.brks.var, cols=my.cols.var, vert=FALSE, cex=legend2.cex, subset=my.subset2) + mtext(side=4,paste0(" ",var.unit[var.num]), cex=legend2.cex) + par(fig=c(legend2.xpos, legend2.xpos + legend2.width, 0.485, 0.508), new=TRUE) + ColorBar3(my.brks.var, cols=my.cols.var, vert=FALSE, cex=legend2.cex, subset=my.subset2) + mtext(side=4,paste0(" ",var.unit[var.num]), cex=legend2.cex) + par(fig=c(legend2.xpos, legend2.xpos + legend2.width, 0.250, 0.273), new=TRUE) + ColorBar3(my.brks.var, cols=my.cols.var, vert=FALSE, cex=legend2.cex, subset=my.subset2) + mtext(side=4,paste0(" ",var.unit[var.num]), cex=legend2.cex) + par(fig=c(legend2.xpos, legend2.xpos + legend2.width, 0.015, 0.038), new=TRUE) + ColorBar3(my.brks.var, cols=my.cols.var, vert=FALSE, cex=legend2.cex, subset=my.subset2) + mtext(side=4,paste0(" ",var.unit[var.num]), cex=legend2.cex) + } + + if(!as.pdf) dev.off() # for saving 4 png + +if(as.pdf) dev.off() # for saving a single pdf with all seasons + + diff --git a/old/weather_regimes_maps_v10.R b/old/weather_regimes_maps_v10.R new file mode 100644 index 0000000000000000000000000000000000000000..ee6d789caa6b0716a1468212c0d6554ad619221c --- /dev/null +++ b/old/weather_regimes_maps_v10.R @@ -0,0 +1,995 @@ + +# Creation: 6/2016 +# Author: Nicola Cortesi +# +# Aim: to visualize the regime anomalies of the weather regimes, their interannual frequencies and their impact on a chosen cliamte variable (i.e: wind speed or temperature) +# +# I/O: the only input file needed is the output of the weather_regimes.R script, which ends with the suffix "_mapdata.RData". +# Output files are the maps, with the user can generate as .png or as .pdf +# +# Assumptions: If your regimes derive from a reanalysis, this script must be run twice: once, with the option 'ordering = F' and 'save.names = T' to create a .pdf or .png +# file that the user must open to find visually the order by which the four regimes are stored inside the four clusters. For example, he can find that the +# sequence of the images in the file (from top to down) corresponds to: Blocking / NAO- / Atl.Ridge / NAO+ regime. Subsequently, he has to change 'ordering' +# from F to T and set the four variables 'clusterX.name' (X=1..4) to follow the same order found inside that file. +# The second time, you have to run this script only to get the maps in the right order, which by default is, from top to down: +# "NAO+","NAO-","Blocking" and "Atl.Ridge" (you can change it with the variable 'orden'). You also have to set 'save.names' to TRUE, so an .RData file +# is written to store the order of the four regimes, in case in future a user needs to visualize these maps again. In this case, the user must set +# 'ordering = TRUE' and 'save.names = FALSE' to be able to visualize the maps in the correct order. +# +# If your regimes derive from a forecast system, you have to compute before the regimes derived from a reanalysis. Then, you can associate the reanalysis +# to the forecast system, to employ it to automatically associate to each simulated cluster one of the +# observed regimes, the one with the highest spatial correlation. Beware that the two maps of regime anomalies must have the same spatial resolution! +# +# Branch: weather_regimes + +library(s2dverification) # for the function Load() +library(Kendall) # for the MannKendall test + +source('/home/Earth/ncortesi/Downloads/scripts/Rfunctions.R') # for the calendar functions +workdir <- "/scratch/Earth/ncortesi/RESILIENCE/Regimes" #/41_ERA-Interim_monthly_1981-2015_LOESS_filter" # working dir with the input files with '_psl.RData' suffix: + +rean.name <- "ERA-Interim" # reanalysis name (if input data comes from a reanalysis) +forecast.name <- "ECMWF-S4" # forecast system name (if input data comes from a forecast system) + +fields.name <- forecast.name # Choose between loading reanalysis ('rean.name') or forecasts ('forecast.name') + +var.num <- 1 # if fields.name ='rean.name', Choose a variable for the impact maps 1: sfcWind 2: tas + +period <- 1:12 # Choose one or more periods to plot from 1 to 17 (1-12: Jan - Dec, 13-16: Winter, Spring, Summer, Autumn, 17: Year). + # You need to have created before the '_mapdata.RData' file with the output of 'weather_regimes'.R for that period. +lead.months <- 0:6 # in case you selected 'fields.name = forecast.name', specify a leadtime (0 = same month of the start month) to plot + # (and variable 'period' becomes the start month) + +# run it twice: once, with 'ordering <- FALSE' and 'save.names <- TRUE' to look only at the cartography and find visually the right regime names of the four clusters; then, +# insert the regime names in the correct order below and run this script a the second time with 'ordering = TRUE' and 'save.names = TRUE', to save the ordered cartography. +# after that, any time you need to run this script, run it with 'ordering = TRUE and 'save.names = FALSE', not to overwrite the file which stores the cluster order: +ordering <- T # If TRUE, order the clusters following the regimes listed in the 'orden' vector (set it to TRUE only after you manually inserted the names below). +save.names <- F # if TRUE, also save the cluster names (i.e: the association between clusters and weather regimes); if FALSE, the cluster names are loaded from a file + +as.pdf <- F # FALSE: save results as one .png file for each season/month, TRUE: save only 1 .pdf file with all seasons/months + +# if fields.name='forecast.name', set the subdir of 'workdir' where the weather regimes computed with a reanalysis are stored: +rean.dir <- "41_ERA-Interim_monthly_1981-2015_LOESS_filter" + +# Associates the regimes to the four cluster: +cluster1.name <- "NAO+" +cluster2.name <- "NAO-" +cluster4.name <- "Blocking" +cluster3.name <- "Atl.Ridge" + +# choose an order to give to the cartograpy, from top to bottom: +orden <- c("NAO+","NAO-","Blocking","Atl.Ridge") + +################################################################################################################################################################### + +if(fields.name != rean.name && fields.name != forecast.name) stop("variable 'fields.name' is not properly set") +regime1.name <- orden[1] +regime2.name <- orden[2] +regime3.name <- orden[3] +regime4.name <- orden[4] + +var.name <- c("sfcWind","tas") +var.name.full <- c("Wind Speed","Temperature") # full name of the 'predictand' variable to put in the title of the graphs +var.unit <- c("m/s", "ºC") # unit of measure of var (for drawing color scales) +var.num.orig <- var.num # loading _psl files, this value can change! +fields.name.orig <- fields.name +period.orig <- period; workdir.orig <- workdir +pdf.suffix <- ifelse(length(period)==1, my.period[period], "all_periods") + +for(lead.month in lead.months){ +#lead.month=0 # for the debug + +if(as.pdf && fields.name == rean.name)(pdf(file=paste0(workdir,"/",fields.name,"_",var.name[var.num],"_",pdf.suffix,".pdf"),width=40, height=60)) +if(as.pdf && fields.name == forecast.name)(pdf(file=paste0(workdir,"/",fields.name,"_",var.name[var.num],"_",pdf.suffix,"_leadmonth_",lead.month,".pdf"),width=40,height=60)) + +for(p in period){ + #p <- 1 # for the debug + p.orig <- p + + # load regime data (for forecasts, it load only the data corresponding to the chosen start month p and lead month 'lead.month':: + if(fields.name == rean.name) { + load(file=paste0(workdir,"/",fields.name,"_",my.period[p],"_","psl",".RData")) + if(var.num != var.num.orig) var.num <- var.num.orig # ripristinate original values of this script (necessary after loading regime data) + if(fields.name != fields.name.orig) fields.name <- fields.name.orig # ripristinate original values of this script + if(workdir != workdir.orig) workdir <- workdir.orig # ripristinate original values of this script + + load(file=paste0(workdir,"/",fields.name,"_",my.period[p],"_",var.name[var.num],".RData")) + } + if(fields.name == forecast.name){ + load(file=paste0(workdir,"/",fields.name,"_",my.period[p],"_leadmonth_",lead.month,"_","psl",".RData")) # load cluster time series and mean psl data + #load(file=paste0(workdir,"/",fields.name,"_",my.period[p],"_leadmonth_",lead.month,"_",var.name[var.num],".RData")) # load mean var data + } + + if(var.num != var.num.orig) var.num <- var.num.orig # ripristinate original values of this script (necessary after loading regime data) + if(fields.name != fields.name.orig) fields.name <- fields.name.orig # ripristinate original values of this script + if(p != p.orig) p <- p.orig + + if(fields.name == forecast.name){ + + # compute cluster persistence: + my.cluster.vector <- as.vector(my.cluster.array[[p]]) # reduce it to a vector with the order: day1month1member1 , day2month1member1, ... , day1month2member1, day2month2member1, ,,, + + sequ1 <- sequ2 <- sequ3 <- sequ4 <- rep(0,10000) + i1 <- i2 <- i3 <- i4 <- 1 + + if(my.cluster.vector[1] != 1) i1 <- 0 + if(my.cluster.vector[1] == 1) sequ1[i1] <- sequ1[i1]+1 + if(my.cluster.vector[1] != 2) i2 <- 0 + if(my.cluster.vector[1] == 2) sequ2[i2] <- sequ2[i2]+1 + if(my.cluster.vector[1] != 3) i3 <- 0 + if(my.cluster.vector[1] == 3) sequ3[i3] <- sequ3[i3]+1 + if(my.cluster.vector[1] != 4) i4 <- 0 + if(my.cluster.vector[1] == 4) sequ4[i4] <- sequ4[i4]+1 + n.dd <- length(my.cluster.vector) + for(d in 1:(n.dd-1)){ + if(my.cluster.vector[d] != 1 && my.cluster.vector[d+1] == 1) i1 <- i1+1 + if(my.cluster.vector[d] == 1) sequ1[i1] <- sequ1[i1]+1 + + if(my.cluster.vector[d] != 2 && my.cluster.vector[d+1] == 2) i2 <- i2+1 + if(my.cluster.vector[d] == 2) sequ2[i2] <- sequ2[i2]+1 + + if(my.cluster.vector[d] != 3 && my.cluster.vector[d+1] == 3) i3 <- i3+1 + if(my.cluster.vector[d] == 3) sequ3[i3] <- sequ3[i3]+1 + + if(my.cluster.vector[d] != 4 && my.cluster.vector[d+1] == 4) i4 <- i4+1 + if(my.cluster.vector[d] == 4) sequ4[i4] <- sequ4[i4]+1 + } + + if(my.cluster.vector[n.dd] == 1) sequ1[i1] <- sequ1[i1]+1 + if(my.cluster.vector[n.dd] == 2) sequ2[i2] <- sequ2[i2]+1 + if(my.cluster.vector[n.dd] == 3) sequ3[i3] <- sequ3[i3]+1 + if(my.cluster.vector[n.dd] == 4) sequ4[i4] <- sequ4[i4]+1 + + pers1 <- mean(sequ1[which(sequ1 != 0)]) + pers2 <- mean(sequ2[which(sequ2 != 0)]) + pers3 <- mean(sequ3[which(sequ3 != 0)]) + pers4 <- mean(sequ4[which(sequ4 != 0)]) + + # compute correlations between simulated clusters and observed regime's anomalies: + cluster1.sim <- pslwr1mean; cluster2.sim <- pslwr2mean; cluster3.sim <- pslwr3mean; cluster4.sim <- pslwr4mean + lat.forecast <- lat; lon.forecast <- lon + + if((p + lead.month) > 12) { load.month <- p + lead.month - 12 } else { load.month <- p + lead.month } # reanalysis forecast month to load + load(file=paste0(workdir,"/",rean.dir,"/",rean.name,"_",my.period[load.month],"_","psl",".RData")) # load reanalysis data, which overwrities the pslwrXmean variables + load(paste0(workdir,"/",rean.dir,"/",rean.name,"_", my.period[load.month],"_","ClusterNames",".RData")) # load also reanalysis regime names + + wr1y.obs <- wr1y; wr2y.obs <- wr2y; wr3y.obs <- wr3y; wr4y.obs <- wr4y # observed frecuencies of the regimes + + regimes.obs <- c(cluster1.name, cluster2.name, cluster3.name, cluster4.name) + + if(length(unique(regimes.obs)) < 4) stop("Two or more names of the weather types in the reanalsyis input file are repeated!") + + if(identical(rev(lat),lat.forecast)) {lat.forecast <- rev(lat.forecast); print("Warning: forecast latitudes are in the opposite order than renalysis's")} + if(!identical(lat, lat.forecast)) stop("forecast latitudes are different from reanalysis latitudes!") + if(!identical(lon, lon.forecast)) stop("forecast longitude are different from reanalysis longitudes!") + + cluster.corr <- array(NA, c(4,4), dimnames=list(c("cluster1.sim","cluster2.sim","cluster3.sim","cluster4.sim"), regimes.obs)) + cluster.corr[1,1] <- cor(as.vector(cluster1.sim), as.vector(pslwr1mean)) + cluster.corr[1,2] <- cor(as.vector(cluster1.sim), as.vector(pslwr2mean)) + cluster.corr[1,3] <- cor(as.vector(cluster1.sim), as.vector(pslwr3mean)) + cluster.corr[1,4] <- cor(as.vector(cluster1.sim), as.vector(pslwr4mean)) + cluster.corr[2,1] <- cor(as.vector(cluster2.sim), as.vector(pslwr1mean)) + cluster.corr[2,2] <- cor(as.vector(cluster2.sim), as.vector(pslwr2mean)) + cluster.corr[2,3] <- cor(as.vector(cluster2.sim), as.vector(pslwr3mean)) + cluster.corr[2,4] <- cor(as.vector(cluster2.sim), as.vector(pslwr4mean)) + cluster.corr[3,1] <- cor(as.vector(cluster3.sim), as.vector(pslwr1mean)) + cluster.corr[3,2] <- cor(as.vector(cluster3.sim), as.vector(pslwr2mean)) + cluster.corr[3,3] <- cor(as.vector(cluster3.sim), as.vector(pslwr3mean)) + cluster.corr[3,4] <- cor(as.vector(cluster3.sim), as.vector(pslwr4mean)) + cluster.corr[4,1] <- cor(as.vector(cluster4.sim), as.vector(pslwr1mean)) + cluster.corr[4,2] <- cor(as.vector(cluster4.sim), as.vector(pslwr2mean)) + cluster.corr[4,3] <- cor(as.vector(cluster4.sim), as.vector(pslwr3mean)) + cluster.corr[4,4] <- cor(as.vector(cluster4.sim), as.vector(pslwr4mean)) + + # associate each simulated cluster to one observed regime: + max1 <- which(cluster.corr == max(cluster.corr), arr.ind=T) + cluster.corr2 <- cluster.corr + cluster.corr2[max1[1],] <- -999 # set this row to negative values so it won't be found as a maximum anymore + cluster.corr2[,max1[2]] <- -999 # set this column to negative values so it won't be found as a maximum anymore + max2 <- which(cluster.corr2 == max(cluster.corr2), arr.ind=T) + cluster.corr3 <- cluster.corr2 + cluster.corr3[max2[1],] <- -999 + cluster.corr3[,max2[2]] <- -999 + max3 <- which(cluster.corr3 == max(cluster.corr3), arr.ind=T) + cluster.corr4 <- cluster.corr3 + cluster.corr4[max3[1],] <- -999 + cluster.corr4[,max3[2]] <- -999 + max4 <- which(cluster.corr4 == max(cluster.corr4), arr.ind=T) + + seq.max1 <- c(max1[1],max2[1],max3[1],max4[1]) + seq.max2 <- c(max1[2],max2[2],max3[2],max4[2]) + + cluster1.regime <- seq.max2[which(seq.max1 == 1)] + cluster2.regime <- seq.max2[which(seq.max1 == 2)] + cluster3.regime <- seq.max2[which(seq.max1 == 3)] + cluster4.regime <- seq.max2[which(seq.max1 == 4)] + + cluster1.name <- regimes.obs[cluster1.regime] + cluster2.name <- regimes.obs[cluster2.regime] + cluster3.name <- regimes.obs[cluster3.regime] + cluster4.name <- regimes.obs[cluster4.regime] + + # measure persistence for the reanalysis dataset: + + # compute cluster persistence: + my.cluster.vector <- my.cluster[[load.month]][[1]] + + sequ1 <- sequ2 <- sequ3 <- sequ4 <- rep(0,10000) + i1 <- i2 <- i3 <- i4 <- 1 + + if(my.cluster.vector[1] != 1) i1 <- 0 + if(my.cluster.vector[1] == 1) sequ1[i1] <- sequ1[i1]+1 + if(my.cluster.vector[1] != 2) i2 <- 0 + if(my.cluster.vector[1] == 2) sequ2[i2] <- sequ2[i2]+1 + if(my.cluster.vector[1] != 3) i3 <- 0 + if(my.cluster.vector[1] == 3) sequ3[i3] <- sequ3[i3]+1 + if(my.cluster.vector[1] != 4) i4 <- 0 + if(my.cluster.vector[1] == 4) sequ4[i4] <- sequ4[i4]+1 + + n.dd <- length(my.cluster.vector) + for(d in 1:(n.dd-1)){ + if(my.cluster.vector[d] != 1 && my.cluster.vector[d+1] == 1) i1 <- i1+1 + if(my.cluster.vector[d] == 1) sequ1[i1] <- sequ1[i1]+1 + + if(my.cluster.vector[d] != 2 && my.cluster.vector[d+1] == 2) i2 <- i2+1 + if(my.cluster.vector[d] == 2) sequ2[i2] <- sequ2[i2]+1 + + if(my.cluster.vector[d] != 3 && my.cluster.vector[d+1] == 3) i3 <- i3+1 + if(my.cluster.vector[d] == 3) sequ3[i3] <- sequ3[i3]+1 + + if(my.cluster.vector[d] != 4 && my.cluster.vector[d+1] == 4) i4 <- i4+1 + if(my.cluster.vector[d] == 4) sequ4[i4] <- sequ4[i4]+1 + } + + if(my.cluster.vector[n.dd] == 1) sequ1[i1] <- sequ1[i1]+1 + if(my.cluster.vector[n.dd] == 2) sequ2[i2] <- sequ2[i2]+1 + if(my.cluster.vector[n.dd] == 3) sequ3[i3] <- sequ3[i3]+1 + if(my.cluster.vector[n.dd] == 4) sequ4[i4] <- sequ4[i4]+1 + + persObs1 <- mean(sequ1[which(sequ1 != 0)]) + persObs2 <- mean(sequ2[which(sequ2 != 0)]) + persObs3 <- mean(sequ3[which(sequ3 != 0)]) + persObs4 <- mean(sequ4[which(sequ4 != 0)]) + + # insert here all the manual corrections to the regime assignation due to misasignment in the automatic selection: + #if(p= && lead.month= ) clusterX.name=... + + if(var.num != var.num.orig) var.num <- var.num.orig # ripristinate original values of this script + if(fields.name != fields.name.orig) fields.name <- fields.name.orig # ripristinate original values of this script + if(!identical(period.orig,period)) period <- period.orig + if(p.orig != p) p <- p.orig + + load(file=paste0(workdir,"/",fields.name,"_",my.period[p],"_leadmonth_",lead.month,"_","psl",".RData")) # reload forecast cluster time series and mean psl data + #load(file=paste0(workdir,"/",fields.name,"_",my.period[p],"_leadmonth_",lead.month,"_ClusterData.RData")) # reload forecast cluster data (for my.cluster.array) + + if(var.num != var.num.orig) var.num <- var.num.orig # ripristinate original values of this script + if(fields.name != fields.name.orig) fields.name <- fields.name.orig # ripristinate original values of this script + if(!identical(period.orig, period)) period <- period.orig + if(p.orig != p) p <- p.orig + if(identical(rev(lat),lat.forecast)) lat <- rev(lat) # ripristinate right lat order + + } # close for on 'forecast.name' + + if(fields.name == rean.name){ + if(save.names){ + save(cluster1.name, cluster2.name, cluster3.name, cluster4.name, file=paste0(workdir,"/",fields.name,"_", my.period[p],"_","ClusterNames",".RData")) + + } else { # in this case, we load the cluster names from the file already saved, if regimes come from a reanalysis: + load(paste0(workdir,"/",fields.name,"_", my.period[p],"_","ClusterNames",".RData")) + + if(var.num != var.num.orig) var.num <- var.num.orig # ripristinate original values of this script + if(fields.name != fields.name.orig) fields.name <- fields.name.orig # ripristinate original values of this script + } + } + + # assign to each cluster its regime: + if(ordering == FALSE && fields.name == rean.name){ + cluster1 <- 1; cluster2 <- 2; cluster3 <- 3; cluster4 <- 4 + } else { + cluster1 <- which(orden == cluster1.name) + cluster2 <- which(orden == cluster2.name) + cluster3 <- which(orden == cluster3.name) + cluster4 <- which(orden == cluster4.name) + } + + assign(paste0("map",cluster1), pslwr1mean) + assign(paste0("map",cluster2), pslwr2mean) + assign(paste0("map",cluster3), pslwr3mean) + assign(paste0("map",cluster4), pslwr4mean) + + assign(paste0("fre",cluster1), wr1y) + assign(paste0("fre",cluster2), wr2y) + assign(paste0("fre",cluster3), wr3y) + assign(paste0("fre",cluster4), wr4y) + + if(p == 100){ + assign(paste0("imp",cluster1), varwr1mean) + assign(paste0("imp",cluster2), varwr2mean) + assign(paste0("imp",cluster3), varwr3mean) + assign(paste0("imp",cluster4), varwr4mean) + + assign(paste0("sig",cluster1), pvalue1) + assign(paste0("sig",cluster2), pvalue2) + assign(paste0("sig",cluster3), pvalue3) + assign(paste0("sig",cluster4), pvalue4) + } + + if(fields.name == forecast.name){ + assign(paste0("freMax",cluster1), wr1yMax) + assign(paste0("freMax",cluster2), wr2yMax) + assign(paste0("freMax",cluster3), wr3yMax) + assign(paste0("freMax",cluster4), wr4yMax) + + assign(paste0("freMin",cluster1), wr1yMin) + assign(paste0("freMin",cluster2), wr2yMin) + assign(paste0("freMin",cluster3), wr3yMin) + assign(paste0("freMin",cluster4), wr4yMin) + + cluster1.Obs <- which(orden == regimes.obs[1]) + cluster2.Obs <- which(orden == regimes.obs[2]) + cluster3.Obs <- which(orden == regimes.obs[3]) + cluster4.Obs <- which(orden == regimes.obs[4]) + + assign(paste0("freObs",cluster1), wr1y.obs) + assign(paste0("freObs",cluster2), wr2y.obs) + assign(paste0("freObs",cluster3), wr3y.obs) + assign(paste0("freObs",cluster4), wr4y.obs) + + assign(paste0("persist",cluster1), pers1) + assign(paste0("persist",cluster2), pers2) + assign(paste0("persist",cluster3), pers3) + assign(paste0("persist",cluster4), pers4) + + assign(paste0("persistObs",cluster1), persObs1) + assign(paste0("persistObs",cluster2), persObs2) + assign(paste0("persistObs",cluster3), persObs3) + assign(paste0("persistObs",cluster4), persObs4) + + } + + + # position of long values of Europe only (without the Atlantic Sea and America): + #if(fields.name == "ERA-Interim") EU <- c(1:which(lon >= lon.max)[1], (length(lon)-30):length(lon)) # restrict area to continental europe only + EU <- c(1:which(lon >= lon.max)[1], (which(lon >= 337)[1]:length(lon))) # restrict area to continental europe only + + # breaks and colors of the geopotential fields: + if(psl == "g500"){ + #my.brks <- c(-300,-199:200,300) # % Mean anomaly of a WR + my.brks <- c(-300,seq(-200,200,10),300) # % Mean anomaly of a WR + my.brks2 <- c(-300,seq(-200,200,10),300) # % Mean anomaly of a WR for the contour lines + #my.cols <- colorRampPalette((brewer.pal(9,"PuBu")))(length(my.brks)-1) + values.to.plot <- c(-200, -100, 0, 100, 200) # values we want to show in the labels + } + + if(psl == "psl"){ + my.brks <- c(seq(-19,-1,2),0,seq(1,19,2)) # % Mean anomaly of a WR + # my.brks <- c(seq(-19,-1,2),0,seq(1,19,2)) # % Mean anomaly of a WR + + my.brks2 <- my.brks #c(seq(-20,20,2)) # % Mean anomaly of a WR for the contour lines + values.to.plot <- my.brks #c(-17,-15,-13,-11,-9,-7,-5,-3,-1,0,1,3,5,7,9,11,13,15,17) + } + + my.cols <- colorRampPalette(rev(brewer.pal(11,"RdBu")))(length(my.brks)-1) # blue--white--red colors + my.cols[floor(length(my.cols)/2)] <- my.cols[floor(length(my.cols)/2)+1] <- "white" + + # breaks and colors of the impact maps (valid both for Wind speed anomalies and Temperature anomalies): + #my.brks.var <- c(-20,seq(-10,-3,1),seq(-2.9,2.9,0.1),seq(3,10,1),20) # % Mean anomaly of a WR + #my.brks.var <- c(-20,-2,-1.5,-1,-0.5,-0.2,0.2,0.5,1,1.5,2,20) # % Mean anomaly of a WR + #my.brks.var <- c(-20,seq(-3,3,0.5),20) # % Mean anomaly of a WR + if(var.name[var.num] == "sfcWind") { + my.brks.var <- seq(-3,3,0.5) + values.to.plot2 <- c(-3,-2,-1,0,1,2,3) + } + if(var.name[var.num] == "tas") { + my.brks.var <- seq(-5,5,1) + values.to.plot2 <- my.brks.var #c(-5,-3,-1,0,1,3,5) + } + + #my.cols <- colorRampPalette(as.character(read.csv("/home/Earth/vtorralb/rgbhex.csv",header=F)[,1]))(length(my.brks)-1) # blue--yellow-red colors + my.cols.var <- colorRampPalette(rev(brewer.pal(11,"RdBu")))(length(my.brks.var)-1) # blue--white--red colors + #my.cols.var[floor(length(my.cols.var)/2)] <- my.cols.var[floor(length(my.cols.var)/2)+1] <- "white" + + # Visualize the composition with the 3 graphs for all the 4 regimes: its pressure fields, its impact on var and its frequency series: + if(!as.pdf && fields.name==rean.name) png(filename=paste0(workdir,"/",fields.name,"_",my.period[p],"_",var.name[var.num],".png"),width=3000,height=3700) + if(!as.pdf && fields.name==forecast.name) png(filename=paste0(workdir,"/",fields.name,"_",my.period[p],"_leadmonth_",lead.month,"_",var.name[var.num],".png"),width=3000,height=3700) + + plot.new() + + # Sheet title: + par(fig=c(0, 1, 0.94, 0.98), new=TRUE) + + if(fields.name == forecast.name) {forecast.title <- paste0(" startdate and ",lead.month," forecast month ")} else {forecast.title <- ""} + mtext(paste0(fields.name, " Weather Regimes for ", my.period[p], forecast.title," (",year.start,"-",year.end,")"), cex=5.5, font=2) + + # Centroid maps: + map.xpos <- 0 + map.width <- 0.46 + par(fig=c(map.xpos + 0.003, map.xpos + map.width - 0.003, 0.745, 0.925), new=TRUE) + PlotEquiMap2(rescale(map1,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map1), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, xlabel.dist=1.5, contours.lty="F1FF1F") + par(fig=c(map.xpos, map.xpos + map.width, 0.51, 0.69), new=TRUE) + PlotEquiMap2(rescale(map2,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map2), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F") + par(fig=c(map.xpos, map.xpos + map.width, 0.275, 0.455), new=TRUE) + PlotEquiMap2(rescale(map3,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map3), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F") + par(fig=c(map.xpos, map.xpos + map.width, 0.04, 0.22), new=TRUE) + PlotEquiMap2(rescale(map4,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map4), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F") + + # Subtitle centroid maps: + map.title.xpos <- 0.76 + map.title.width <- 0.2 + par(fig=c(map.title.xpos, map.title.xpos + map.title.width, 0.728, 0.729), new=TRUE) + mtext("year", cex=3) + par(fig=c(map.title.xpos, map.title.xpos + map.title.width, 0.494, 0.495), new=TRUE) + mtext("year", cex=3) + par(fig=c(map.title.xpos, map.title.xpos + map.title.width, 0.260, 0.261), new=TRUE) + mtext("year", cex=3) + par(fig=c(map.title.xpos, map.title.xpos + map.title.width, 0.026, 0.027), new=TRUE) + mtext("year", cex=3) + + # Title Centroid Maps: + title1.xpos <- 0.02 + title1.width <- 0.4 + par(fig=c(title1.xpos, title1.xpos + title1.width, 0.9305, 0.9355), new=TRUE) + mtext(paste0(regime1.name,": ", psl.name, " Anomaly"), font=2, cex=4) + par(fig=c(title1.xpos, title1.xpos + title1.width, 0.6955, 0.7005), new=TRUE) + mtext(paste0(regime2.name,": ", psl.name, " Anomaly"), font=2, cex=4) + par(fig=c(title1.xpos, title1.xpos + title1.width, 0.4605, 0.4655), new=TRUE) + mtext(paste0(regime3.name,": ", psl.name, " Anomaly"), font=2, cex=4) + par(fig=c(title1.xpos, title1.xpos + title1.width, 0.2255, 0.2305), new=TRUE) + mtext(paste0(regime4.name,": ", psl.name, " Anomaly"), font=2, cex=4) + + # Impact maps: + if(p == 100){ + impact.xpos <- 0.47 + impact.width <- 0.26 + par(fig=c(impact.xpos, impact.xpos + impact.width, 0.745, 0.925), new=TRUE) + PlotEquiMap2(rescale(imp1[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=t(sig1[,EU] < 0.05), cex.lab=1.5) + par(fig=c(impact.xpos, impact.xpos + impact.width, 0.51, 0.69), new=TRUE) + PlotEquiMap2(rescale(imp2[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=t(sig2[,EU] < 0.05), cex.lab=1.5) + par(fig=c(impact.xpos, impact.xpos + impact.width, 0.275, 0.455), new=TRUE) + PlotEquiMap2(rescale(imp3[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=t(sig3[,EU] < 0.05), cex.lab=1.5) + par(fig=c(impact.xpos, impact.xpos + impact.width, 0.04, 0.22), new=TRUE) + PlotEquiMap2(rescale(imp4[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=t(sig4[,EU] < 0.05), cex.lab=1.5) + + # Title impact maps: + title2.xpos <- 0.42 + title2.width <- 0.35 + par(fig=c(title2.xpos, title2.xpos + title2.width, 0.935, 0.940), new=TRUE) + mtext(paste0(regime1.name, ": Impact on ", var.name.full[var.num]), font=2, cex=4) + par(fig=c(title2.xpos, title2.xpos + title2.width, 0.700, 0.705), new=TRUE) + mtext(paste0(regime2.name, ": Impact on ", var.name.full[var.num]), font=2, cex=4) + par(fig=c(title2.xpos, title2.xpos + title2.width, 0.465, 0.470), new=TRUE) + mtext(paste0(regime3.name, ": Impact on ", var.name.full[var.num]), font=2, cex=4) + par(fig=c(title2.xpos, title2.xpos + title2.width, 0.230, 0.235), new=TRUE) + mtext(paste0(regime4.name, ": Impact on ", var.name.full[var.num]), font=2, cex=4) + } + + # Frequency plots: + barplot.xpos <- 0.75 + barplot.width <- 0.25 + freq.max=100 + + if(fields.name == forecast.name){ + pos.years.present <- match(year.start:year.end, years) + years.missing <- which(is.na(pos.years.present)) + fre1.NA <- fre2.NA <- fre3.NA <- fre4.NA <- freMax1.NA <- freMax2.NA <- freMax3.NA <- freMax4.NA <- freMin1.NA <- freMin2.NA <- freMin3.NA <- freMin4.NA <- c() + + k <- 0 + for(y in year.start:year.end) { + if(y %in% (years.missing + year.start - 1)) { + fre1.NA <- c(fre1.NA,NA); fre2.NA <- c(fre2.NA,NA); fre3.NA <- c(fre3.NA,NA); fre4.NA <- c(fre4.NA,NA) + freMax1.NA <- c(freMax1.NA,NA); freMax2.NA <- c(freMax2.NA,NA); freMax3.NA <- c(freMax3.NA,NA); freMax4.NA <- c(freMax4.NA,NA) + freMin1.NA <- c(freMin1.NA,NA); freMin2.NA <- c(freMin2.NA,NA); freMin3.NA <- c(freMin3.NA,NA); freMin4.NA <- c(freMin4.NA,NA) + k=k+1 + } else { + fre1.NA <- c(fre1.NA,fre1[y - year.start + 1 - k]); fre2.NA <- c(fre2.NA,fre2[y - year.start + 1 - k]) + fre3.NA <- c(fre3.NA,fre3[y - year.start + 1 - k]); fre4.NA <- c(fre4.NA,fre4[y - year.start + 1 - k]) + freMax1.NA <- c(freMax1.NA,freMax1[y - year.start + 1 - k]); freMax2.NA <- c(freMax2.NA,freMax2[y - year.start + 1 - k]) + freMax3.NA <- c(freMax3.NA,freMax3[y - year.start + 1 - k]); freMax4.NA <- c(freMax4.NA,freMax4[y - year.start + 1 - k]) + freMin1.NA <- c(freMin1.NA,freMin1[y - year.start + 1 - k]); freMin2.NA <- c(freMin2.NA,freMin2[y - year.start + 1 - k]) + freMin3.NA <- c(freMin3.NA,freMin3[y - year.start + 1 - k]); freMin4.NA <- c(freMin4.NA,freMin4[y - year.start + 1 - k]) + } + } + } else { fre1.NA <- fre1; fre2.NA <- fre2; fre3.NA <- fre3; fre4.NA <- fre4 } + + par(fig=c(barplot.xpos, barplot.xpos + barplot.width, 0.745, 0.915), new=TRUE) + if(fields.name == rean.name) barplot.freq(100*fre1.NA, year.start, year.end, cex.y=2, cex.x=2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0)) + if(fields.name == forecast.name) barplot.freq.sim2(100*fre1.NA, 100*freMax1.NA, 100*freMin1.NA, 100*freObs1, year.start, year.end, cex.y=2, cex.x=2, freq.min=-2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0), col.line="gray20", cex.r=3, cex.mean=2, cex.obs=2) + + par(fig=c(barplot.xpos, barplot.xpos + barplot.width,0.510,0.680), new=TRUE) + if(fields.name == rean.name) barplot.freq(100*fre2.NA, year.start, year.end, cex.y=2, cex.x=2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0)) + if(fields.name == forecast.name) barplot.freq.sim2(100*fre2.NA, 100*freMax2.NA, 100*freMin2.NA, 100*freObs2, year.start, year.end, cex.y=2, cex.x=2, freq.min=-2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0), col.line="gray20", cex.r=3, cex.mean=2, cex.obs=2) + + par(fig=c(barplot.xpos, barplot.xpos + barplot.width,0.275,0.445), new=TRUE) + if(fields.name == rean.name) barplot.freq(100*fre3.NA, year.start, year.end, cex.y=2, cex.x=2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0)) + if(fields.name == forecast.name) barplot.freq.sim2(100*fre3.NA, 100*freMax3.NA, 100*freMin3.NA, 100*freObs3, year.start, year.end, cex.y=2, cex.x=2, freq.min=-2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0), col.line="gray20", cex.r=3, cex.mean=2, cex.obs=2) + + par(fig=c(barplot.xpos, barplot.xpos + barplot.width,0.040,0.210), new=TRUE) + if(fields.name == rean.name) barplot.freq(100*fre4.NA, year.start, year.end, cex.y=2, cex.x=2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0)) + if(fields.name == forecast.name) barplot.freq.sim2(100*fre4.NA, 100*freMax4.NA, 100*freMin4.NA, 100*freObs4, year.start, year.end, cex.y=2, cex.x=2, freq.min=-2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0), col.line="gray20", cex.r=3, cex.mean=2, cex.obs=2) + + # Title Frequency maps: + title3.xpos <- 0.75 + title3.width <-0.25 + par(fig=c(title3.xpos, title3.xpos + title3.width, 0.935, 0.940), new=TRUE) + mtext(paste0(regime1.name, " Frequency"), font=2, cex=4) # paste0 doesn't work inside an expression! + par(fig=c(title3.xpos, title3.xpos + title3.width, 0.700, 0.705), new=TRUE) + mtext(paste0(regime2.name, " Frequency"), font=2, cex=4) + par(fig=c(title3.xpos, title3.xpos + title3.width, 0.465, 0.470), new=TRUE) + mtext(paste0(regime3.name, " Frequency"), font=2, cex=4) + par(fig=c(title3.xpos, title3.xpos + title3.width, 0.230, 0.235), new=TRUE) + mtext(paste0(regime4.name, " Frequency"), font=2, cex=4) + + # % on Frequency plots: + symbol.xpos <- 0.735 + symbol.width <- 0.01 + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.82, 0.83), new=TRUE) + mtext("%", cex=3.3) + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.59, 0.60), new=TRUE) + mtext("%", cex=3.3) + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.35, 0.36), new=TRUE) + mtext("%", cex=3.3) + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.12, 0.13), new=TRUE) + mtext("%", cex=3.3) + + # mean frequency on Frequency plots: + symbol.xpos <- 0.9 + symbol.width <- 0.1 + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.908, 0.918), new=TRUE) + mtext(bquote(bar(nu) == .(paste0(round(mean(100*fre1.NA),1),"%"))),cex=4.5) + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.673, 0.683), new=TRUE) + mtext(bquote(bar(nu) == .(paste0(round(mean(100*fre2.NA),1),"%"))),cex=4.5) + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.44, 0.45), new=TRUE) + mtext(bquote(bar(nu) == .(paste0(round(mean(100*fre3.NA),1),"%"))),cex=4.5) + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.203, 0.213), new=TRUE) + mtext(bquote(bar(nu) == .(paste0(round(mean(100*fre4.NA),1),"%"))),cex=4.5) + + # add corr between obs.time series and ensemble mean time series on Frequency plots: + if(fields.name == forecast.name){ + symbol.xpos <- 0.76 + symbol.width <- 0.1 + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.908, 0.918), new=TRUE) + sp.cor1 <- round(cor(fre1.NA,freObs1, use="complete.obs"),2) + mtext(paste0("r= ",sp.cor1), cex=4.5) + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.673, 0.683), new=TRUE) + sp.cor2 <- round(cor(fre2.NA,freObs2, use="complete.obs"),2) + mtext(paste0("r= ",sp.cor2), cex=4.5) + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.44, 0.45), new=TRUE) + sp.cor3 <- round(cor(fre3.NA,freObs3, use="complete.obs"),2) + mtext(paste0("r= ",sp.cor3), cex=4.5) + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.203, 0.213), new=TRUE) + sp.cor4 <- round(cor(fre4.NA,freObs4, use="complete.obs"),2) + mtext(paste0("r= ",sp.cor4), cex=4.5) + } + + # Legend 1: + legend1.xpos <- 0.01 + legend1.width <- 0.44 + legend1.cex <- 1.8 + my.subset <- match(values.to.plot, my.brks) + par(fig=c(legend1.xpos, legend1.xpos + legend1.width, 0.719, 0.744), new=TRUE) # syntax fig=c(xmin, xmax, ymin, ymax) + ColorBar3(my.brks, cols=my.cols, vert=FALSE, cex=legend1.cex, subset=my.subset) + if(psl=="g500") {mtext(side=4," m", cex=2.5)} else {mtext(side=4," hPa", cex=legend1.cex)} + par(fig=c(legend1.xpos, legend1.xpos + legend1.width, 0.485, 0.508), new=TRUE) + ColorBar3(my.brks, cols=my.cols, vert=FALSE, cex=legend1.cex, subset=my.subset) + if(psl=="g500") {mtext(side=4," m", cex=2.5)} else {mtext(side=4," hPa", cex=legend1.cex)} + par(fig=c(legend1.xpos, legend1.xpos + legend1.width, 0.250, 0.273), new=TRUE) + ColorBar3(my.brks, cols=my.cols, vert=FALSE, cex=legend1.cex, subset=my.subset) + if(psl=="g500") {mtext(side=4," m", cex=2.5)} else {mtext(side=4," hPa", cex=legend1.cex)} + par(fig=c(legend1.xpos, legend1.xpos + legend1.width, 0.015, 0.038), new=TRUE) + ColorBar3(my.brks, cols=my.cols, vert=FALSE, cex=legend1.cex, subset=my.subset) + if(psl=="g500") {mtext(side=4," m", cex=2.5)} else {mtext(side=4," hPa", cex=legend1.cex)} + + # Legend 2: + if(fields.name == rean.name){ + legend2.xpos <- 0.48 + legend2.width <- 0.25 + legend2.cex <- 1.8 + my.subset2 <- match(values.to.plot2, my.brks.var) + par(fig=c(legend2.xpos, legend2.xpos + legend2.width, 0.719 + 0.001, 0.744), new=TRUE) + ColorBar3(my.brks.var, cols=my.cols.var, vert=FALSE, cex=legend2.cex, subset=my.subset2) + mtext(side=4,paste0(" ",var.unit[var.num]), cex=legend2.cex) + par(fig=c(legend2.xpos, legend2.xpos + legend2.width, 0.485, 0.508), new=TRUE) + ColorBar3(my.brks.var, cols=my.cols.var, vert=FALSE, cex=legend2.cex, subset=my.subset2) + mtext(side=4,paste0(" ",var.unit[var.num]), cex=legend2.cex) + par(fig=c(legend2.xpos, legend2.xpos + legend2.width, 0.250, 0.273), new=TRUE) + ColorBar3(my.brks.var, cols=my.cols.var, vert=FALSE, cex=legend2.cex, subset=my.subset2) + mtext(side=4,paste0(" ",var.unit[var.num]), cex=legend2.cex) + par(fig=c(legend2.xpos, legend2.xpos + legend2.width, 0.015, 0.038), new=TRUE) + ColorBar3(my.brks.var, cols=my.cols.var, vert=FALSE, cex=legend2.cex, subset=my.subset2) + mtext(side=4,paste0(" ",var.unit[var.num]), cex=legend2.cex) + } + + if(!as.pdf) dev.off() # for saving 4 png + + #if(fields.name == forecast.name) save(orden, cluster1.name, cluster2.name, cluster3.name, cluster4.name, cluster.corr, fre1.NA, fre2.NA, fre3.NA, fre4.NA, freObs1, freObs2, freObs3, freObs4, sp.cor1, sp.cor2, sp.cor3, sp.cor4, file=paste0(workdir,"/",fields.name,"_", my.period[p],"_leadmonth_",lead.month,"_","ClusterNames",".RData")) + + # fre1, fre2, etc. already refer to the regimes listed in the 'orden' vector: + if(fields.name == forecast.name) save(orden, fre1.NA, fre2.NA, fre3.NA, fre4.NA, freObs1, freObs2, freObs3, freObs4, sp.cor1, sp.cor2, sp.cor3, sp.cor4, persist1, persist2, persist3, persist4, persistObs1, persistObs2, persistObs3, persistObs4, file=paste0(workdir,"/",fields.name,"_", my.period[p],"_leadmonth_",lead.month,"_","ClusterNames",".RData")) + +} # close 'p' on 'period' + +if(as.pdf) dev.off() # for saving a single pdf with all months/seasons + +} # close 'lead.month' on 'lead.months' + + + + + +# Summary graphs: +if(fields.name == forecast.name && p == 100){ + # array storing correlations in the format: [startdate, leadmonth, regime] + array.cor <- array.freq <- array.diff.freq <- array.rpss <- array.pers <- array.diff.pers <- array(NA,c(12,7,4)) + + for(p in 1:12){ + p.orig <- p + + for(l in 0:6){ + + load(file=paste0(workdir,"/",fields.name,"_",my.period[p],"_leadmonth_",l,"_","ClusterNames",".RData")) # load cluster names and summarized data + + if(var.num != var.num.orig) var.num <- var.num.orig # ripristinate original values of this script (necessary after loading regime data) + if(fields.name != fields.name.orig) fields.name <- fields.name.orig # ripristinate original values of this script + if(p != p.orig) p <- p.orig + + array.cor[p,1+l, 1] <- sp.cor1 # associates NAO+ to the first triangle (at left) + array.cor[p,1+l, 3] <- sp.cor2 # associates NAO- to the third triangle (at right) + array.cor[p,1+l, 4] <- sp.cor3 # associates Blocking to the fourth triangle (top one) + array.cor[p,1+l, 2] <- sp.cor4 # associates Atl.Ridge to the second triangle (bottom one) + + array.freq[p,1+l, 1] <- 100*(mean(fre1.NA)) # NAO+ + array.freq[p,1+l, 3] <- 100*(mean(fre2.NA)) # NAO- + array.freq[p,1+l, 4] <- 100*(mean(fre3.NA)) # Blocking + array.freq[p,1+l, 2] <- 100*(mean(fre4.NA)) # Atl.Ridge + + array.diff.freq[p,1+l, 1] <- 100*(mean(fre1.NA) - mean(freObs1)) # NAO+ + array.diff.freq[p,1+l, 3] <- 100*(mean(fre2.NA) - mean(freObs2)) # NAO- + array.diff.freq[p,1+l, 4] <- 100*(mean(fre3.NA) - mean(freObs3)) # Blocking + array.diff.freq[p,1+l, 2] <- 100*(mean(fre4.NA) - mean(freObs4)) # Atl.Ridge + + array.pers[p,1+l, 1] <- persist1 # NAO+ + array.pers[p,1+l, 3] <- persist2 # NAO- + array.pers[p,1+l, 4] <- persist3 # Blocking + array.pers[p,1+l, 2] <- persist4 # Atl.Ridge + + array.diff.pers[p,1+l, 1] <- persist1 - persistObs1 # NAO+ + array.diff.pers[p,1+l, 3] <- persist2 - persistObs2 # NAO- + array.diff.pers[p,1+l, 4] <- persist3 - persistObs3 # Blocking + array.diff.pers[p,1+l, 2] <- persist4 - persistObs4 # Atl.Ridge + + #array.rpss[p,1+l, 1] <- # NAO+ + #array.rpss[p,1+l, 3] <- # NAO- + #array.rpss[p,1+l, 4] <- # Blocking + #array.rpss[p,1+l, 2] <- # Atl.Ridge + } + } + + + + # Corr summary: + png(filename=paste0(workdir,"/",forecast.name,"_summary_corr.png"), width=550, height=400) + + # create an array similar to array.cor but with colors instead of corr.values: + corr.cols <- rev(c('#b2182b','#d6604d','#f4a582','#fddbc7','#d1e5f0','#92c5de','#4393c3','#2166ac')) + + array.cor.colors <- array(corr.cols[8],c(12,7,4)) + array.cor.colors[array.cor < 0.75] <- corr.cols[7] + array.cor.colors[array.cor < 0.5] <- corr.cols[6] + array.cor.colors[array.cor < 0.25] <- corr.cols[5] + array.cor.colors[array.cor < 0] <- corr.cols[4] + array.cor.colors[array.cor < -0.25] <- corr.cols[3] + array.cor.colors[array.cor < -0.5] <- corr.cols[2] + array.cor.colors[array.cor < -0.75] <- corr.cols[1] + + par(mar=c(5,4,4,4.5)) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Forecast time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + title("Correlation between S4 and ERA-Interim frequencies", cex=1.5) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Forecast time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5) + + for(p in 1:12){ + for(l in 0:6){ + polygon(c(0.5+p-1,0.5+p-1,1+p-1),c(-0.5+l,0.5+l,0+l), col=array.cor.colors[p,1+l,1]) # first triangle + polygon(c(0.5+p-1,1+p-1,1.5+p-1),c(-0.5+l,0+l,-0.5+l), col=array.cor.colors[p,1+l,2]) + polygon(c(1+p-1,1.5+p-1,1.5+p-1),c(l,0.5+l,-0.5+l), col=array.cor.colors[p,1+l,3]) + polygon(c(0.5+p-1,1+p-1,1.5+p-1),c(0.5+l,0+l,0.5+l), col=array.cor.colors[p,1+l,4]) + } + } + + par(fig=c(0.875, 1, 0.14, 0.89), new=TRUE) + ColorBar2(brks = seq(-1,1,0.25), cols = corr.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(seq(-1,1,0.25)), my.labels=seq(-1,1,0.25)) + dev.off() + + + + # Freq. summary: + png(filename=paste0(workdir,"/",forecast.name,"_summary_freq.png"), width=550, height=400) + + # create an array similar to array.diff.freq but with colors instead of frequencies: + freq.cols <- rev(c('#d73027','#f46d43','#fdae61','#fee090','#e0f3f8','#abd9e9','#74add1','#4575b4')) + my.seq <- c(NA,20,22,24,26,28,30,32,NA) + + array.freq.colors <- array(freq.cols[8],c(12,7,4)) + array.freq.colors[array.freq < my.seq[[8]]] <- freq.cols[7] + array.freq.colors[array.freq < my.seq[[7]]] <- freq.cols[6] + array.freq.colors[array.freq < my.seq[[6]]] <- freq.cols[5] + array.freq.colors[array.freq < my.seq[[5]]] <- freq.cols[4] + array.freq.colors[array.freq < my.seq[[4]]] <- freq.cols[3] + array.freq.colors[array.freq < my.seq[[3]]] <- freq.cols[2] + array.freq.colors[array.freq < my.seq[[2]]] <- freq.cols[1] + + par(mar=c(5,4,4,4.5)) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Forecast time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + title("Mean S4 frequency (%)", cex=1.5) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Forecast time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5) + + for(p in 1:12){ + for(l in 0:6){ + polygon(c(0.5+p-1,0.5+p-1,1+p-1),c(-0.5+l,0.5+l,0+l), col=array.freq.colors[p,1+l,1]) # first triangle + polygon(c(0.5+p-1,1+p-1,1.5+p-1),c(-0.5+l,0+l,-0.5+l), col=array.freq.colors[p,1+l,2]) + polygon(c(1+p-1,1.5+p-1,1.5+p-1),c(l,0.5+l,-0.5+l), col=array.freq.colors[p,1+l,3]) + polygon(c(0.5+p-1,1+p-1,1.5+p-1),c(0.5+l,0+l,0.5+l), col=array.freq.colors[p,1+l,4]) + } + } + + par(fig=c(0.875, 1, 0.14, 0.89), new=TRUE) + ColorBar2(brks = my.seq, cols = freq.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + dev.off() + + + + # Delta freq. summary: + png(filename=paste0(workdir,"/",forecast.name,"_summary_diff_freq.png"), width=550, height=400) + + # create an array similar to array.diff.freq but with colors instead of frequencies: + diff.freq.cols <- rev(c('#d73027','#f46d43','#fdae61','#fee090','#e0f3f8','#abd9e9','#74add1','#4575b4')) + + array.diff.freq.colors <- array(diff.freq.cols[8],c(12,7,4)) + array.diff.freq.colors[array.diff.freq < 10] <- diff.freq.cols[7] + array.diff.freq.colors[array.diff.freq < 5] <- diff.freq.cols[6] + array.diff.freq.colors[array.diff.freq < 2] <- diff.freq.cols[5] + array.diff.freq.colors[array.diff.freq < 0] <- diff.freq.cols[4] + array.diff.freq.colors[array.diff.freq < -2] <- diff.freq.cols[3] + array.diff.freq.colors[array.diff.freq < -5] <- diff.freq.cols[2] + array.diff.freq.colors[array.diff.freq < -10] <- diff.freq.cols[1] + + par(mar=c(5,4,4,4.5)) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Forecast time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + title("Frequency difference (%) between S4 and ERA-Interim", cex=1.5) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Forecast time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5) + + for(p in 1:12){ + for(l in 0:6){ + polygon(c(0.5+p-1,0.5+p-1,1+p-1),c(-0.5+l,0.5+l,0+l), col=array.diff.freq.colors[p,1+l,1]) # first triangle + polygon(c(0.5+p-1,1+p-1,1.5+p-1),c(-0.5+l,0+l,-0.5+l), col=array.diff.freq.colors[p,1+l,2]) + polygon(c(1+p-1,1.5+p-1,1.5+p-1),c(l,0.5+l,-0.5+l), col=array.diff.freq.colors[p,1+l,3]) + polygon(c(0.5+p-1,1+p-1,1.5+p-1),c(0.5+l,0+l,0.5+l), col=array.diff.freq.colors[p,1+l,4]) + } + } + + par(fig=c(0.875, 1, 0.14, 0.89), new=TRUE) + ColorBar2(brks = c(-20,-10,-5,-2,0,2,5,10,20), cols = diff.freq.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(c(-20,-10,-5,-2,0,2,5,10,20)), my.labels=c(-20,-10,-5,-2,0,2,5,10,20)) + dev.off() + + + + # Persistence summary: + png(filename=paste0(workdir,"/",forecast.name,"_summary_pers.png"), width=550, height=400) + + # create an array similar to array.pers but with colors instead of frequencies: + pers.cols <- rev(c('#b2182b','#d6604d','#f4a582','#fddbc7','#d1e5f0','#92c5de','#4393c3','#2166ac')) + my.seq <- c(NA, 3.25, 3.5, 3.75, 4, 4.25, 4.5, 4.75, NA) + + array.pers.colors <- array(pers.cols[8],c(12,7,4)) + array.pers.colors[array.pers < my.seq[8]] <- pers.cols[7] + array.pers.colors[array.pers < my.seq[7]] <- pers.cols[6] + array.pers.colors[array.pers < my.seq[6]] <- pers.cols[5] + array.pers.colors[array.pers < my.seq[5]] <- pers.cols[4] + array.pers.colors[array.pers < my.seq[4]] <- pers.cols[3] + array.pers.colors[array.pers < my.seq[3]] <- pers.cols[2] + array.pers.colors[array.pers < my.seq[2]] <- pers.cols[1] + + par(mar=c(5,4,4,4.5)) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Forecast time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + title("S4 Regime persistence (in days/month)", cex=1.5) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Forecast time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5) + + for(p in 1:12){ + for(l in 0:6){ + polygon(c(0.5+p-1,0.5+p-1,1+p-1),c(-0.5+l,0.5+l,0+l), col=array.pers.colors[p,1+l,1]) # first triangle + polygon(c(0.5+p-1,1+p-1,1.5+p-1),c(-0.5+l,0+l,-0.5+l), col=array.pers.colors[p,1+l,2]) + polygon(c(1+p-1,1.5+p-1,1.5+p-1),c(l,0.5+l,-0.5+l), col=array.pers.colors[p,1+l,3]) + polygon(c(0.5+p-1,1+p-1,1.5+p-1),c(0.5+l,0+l,0.5+l), col=array.pers.colors[p,1+l,4]) + } + } + + par(fig=c(0.875, 1, 0.14, 0.89), new=TRUE) + ColorBar2(brks = my.seq, cols = pers.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + dev.off() + + + + # Delta persistence summary: + png(filename=paste0(workdir,"/",forecast.name,"_summary_diff_pers.png"), width=550, height=400) + + # create an array similar to array.pers but with colors instead of frequencies: + diff.pers.cols <- rev(c('#b2182b','#d6604d','#f4a582','#fddbc7','#d1e5f0','#92c5de','#4393c3','#2166ac')) + my.seq <- c(NA, -3, -2, -1, 0, 1, 2, 3, NA) + + array.diff.pers.colors <- array(diff.pers.cols[8],c(12,7,4)) + array.diff.pers.colors[array.diff.pers < my.seq[8]] <- diff.pers.cols[7] + array.diff.pers.colors[array.diff.pers < my.seq[7]] <- diff.pers.cols[6] + array.diff.pers.colors[array.diff.pers < my.seq[6]] <- diff.pers.cols[5] + array.diff.pers.colors[array.diff.pers < my.seq[5]] <- diff.pers.cols[4] + array.diff.pers.colors[array.diff.pers < my.seq[4]] <- diff.pers.cols[3] + array.diff.pers.colors[array.diff.pers < my.seq[3]] <- diff.pers.cols[2] + array.diff.pers.colors[array.diff.pers < my.seq[2]] <- diffpers.cols[1] + + par(mar=c(5,4,4,4.5)) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Forecast time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + title("Diff. between S4 and ERA-Interim regime persistence (in days/month)", cex=1.5) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Forecast time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5) + + for(p in 1:12){ + for(l in 0:6){ + polygon(c(0.5+p-1,0.5+p-1,1+p-1),c(-0.5+l,0.5+l,0+l), col=array.diff.pers.colors[p,1+l,1]) # first triangle + polygon(c(0.5+p-1,1+p-1,1.5+p-1),c(-0.5+l,0+l,-0.5+l), col=array.diff.pers.colors[p,1+l,2]) + polygon(c(1+p-1,1.5+p-1,1.5+p-1),c(l,0.5+l,-0.5+l), col=array.diff.pers.colors[p,1+l,3]) + polygon(c(0.5+p-1,1+p-1,1.5+p-1),c(0.5+l,0+l,0.5+l), col=array.diff.pers.colors[p,1+l,4]) + } + } + + par(fig=c(0.875, 1, 0.14, 0.89), new=TRUE) + ColorBar2(brks = my.seq, cols = diff.pers.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + dev.off() + + ## # FairRPSS summary: + ## png(filename=paste0(workdir,"/",forecast.name,"_summary_rpss.png"), width=550, height=400) + + ## # create an array similar to array.diff.freq but with colors instead of frequencies: + ## freq.cols <- rev(c('#b2182b','#d6604d','#f4a582','#fddbc7','#d1e5f0','#92c5de','#4393c3','#2166ac')) + + ## array.freq.colors <- array(freq.cols[8],c(12,7,4)) + ## array.freq.colors[array.diff.freq < 10] <- freq.cols[7] + ## array.freq.colors[array.diff.freq < 5] <- freq.cols[6] + ## array.freq.colors[array.diff.freq < 2] <- freq.cols[5] + ## array.freq.colors[array.diff.freq < 0] <- freq.cols[4] + ## array.freq.colors[array.diff.freq < -2] <- freq.cols[3] + ## array.freq.colors[array.diff.freq < -5] <- freq.cols[2] + ## array.freq.colors[array.diff.freq < -10] <- freq.cols[1] + + ## par(mar=c(5,4,4,4.5)) + ## plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Forecast time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + ## title("Frequency difference (%) between S4 and ERA-Interim", cex=1.5) + ## mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + ## mtext(side = 2, text = "Forecast time", line = 2.5, cex=1.5) + ## axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + ## axis(2, at=seq(0,6), las=2, cex.axis=1.5) + + ## for(p in 1:12){ + ## for(l in 0:6){ + ## polygon(c(0.5+p-1,0.5+p-1,1+p-1),c(-0.5+l,0.5+l,0+l), col=array.freq.colors[p,1+l,1]) # first triangle + ## polygon(c(0.5+p-1,1+p-1,1.5+p-1),c(-0.5+l,0+l,-0.5+l), col=array.freq.colors[p,1+l,2]) + ## polygon(c(1+p-1,1.5+p-1,1.5+p-1),c(l,0.5+l,-0.5+l), col=array.freq.colors[p,1+l,3]) + ## polygon(c(0.5+p-1,1+p-1,1.5+p-1),c(0.5+l,0+l,0.5+l), col=array.freq.colors[p,1+l,4]) + ## } + ## } + + ## par(fig=c(0.875, 1, 0.14, 0.89), new=TRUE) + ## ColorBar2(brks = c(-20,-10,-5,-2,0,2,5,10,20), cols = freq.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(c(-20,-10,-5,-2,0,2,5,10,20)), my.labels=c(-20,-10,-5,-2,0,2,5,10,20)) + ## dev.off() + +} + + + + + + + +# impact map of the stronger WR: +if(fields.name == rean.name && p == 100){ + + var.num <- 1 # choose a variable (1:sfcWind, 2:tas) + + png(filename=paste0(workdir,"/",rean.name,"_",var.name[var.num],"_most_influent.png"), width=500, height=600) + + plot.new() + #par(mfrow=c(4,3)) + col.regimes <- c("indianred1","cyan","khaki","lightpink") + + for(p in 13:16){ # or 1:12 for monthly maps + load(file=paste0(workdir,"/", rean.dir,"/",rean.name,"_",var.name[var.num],"_",my.period[p],"_psl.RData")) + load(paste0(workdir,"/", rean.dir,"/",rean.name,"_ClusterNames.RData")) # they should not depend on var.name!!! + + # position of long values of Europe only (without the Atlantic Sea and America): + #if(fields.name == "ERA-Interim") EU <- c(1:which(lon >= lon.max)[1], (length(lon)-30):length(lon)) # restrict area to continental europe only + lon.max = 45 + EU <- c(1:which(lon >= lon.max)[1], (which(lon >= 337)[1]:length(lon))) # restrict area to continental europe only + + + cluster1 <- which(orden == cluster1.name.period[p]) # in future it will be == cluster1.name + cluster2 <- which(orden == cluster2.name.period[p]) + cluster3 <- which(orden == cluster3.name.period[p]) + cluster4 <- which(orden == cluster4.name.period[p]) + + assign(paste0("imp",cluster1), varPeriodAnom1mean) + assign(paste0("imp",cluster2), varPeriodAnom2mean) + assign(paste0("imp",cluster3), varPeriodAnom3mean) + assign(paste0("imp",cluster4), varPeriodAnom4mean) + + imp.all <- imp1 + for(i in 1:dim(imp1)[1]){ + for(j in 1:dim(imp1)[2]){ + if(abs(imp1[i,j]) >= max(abs(imp1[i,j]), abs(imp2[i,j]), abs(imp3[i,j]), abs(imp4[i,j]))) imp.all[i,j] <- 1.5 + if(abs(imp2[i,j]) >= max(abs(imp1[i,j]), abs(imp2[i,j]), abs(imp3[i,j]), abs(imp4[i,j]))) imp.all[i,j] <- 2.5 + if(abs(imp3[i,j]) >= max(abs(imp1[i,j]), abs(imp2[i,j]), abs(imp3[i,j]), abs(imp4[i,j]))) imp.all[i,j] <- 3.5 + if(abs(imp4[i,j]) >= max(abs(imp1[i,j]), abs(imp2[i,j]), abs(imp3[i,j]), abs(imp4[i,j]))) imp.all[i,j] <- 4.5 + } + } + + if(p<=3) yMod <- 0.7 + if(p>=4 && p<=6) yMod <- 0.5 + if(p>=7 && p<=9) yMod <- 0.3 + if(p>=10) yMod <- 0.1 + if(p==13 || p==14) yMod <- 0.5 + if(p==15 || p==16) yMod <- 0.1 + + if(p==1 || p==4 || p==7 || p==10) xMod <- 0.05 + if(p==2 || p==5 || p==8 || p==11) xMod <- 0.35 + if(p==3 || p==6 || p==9 || p==12) xMod <- 0.65 + if(p==13 || p==15) xMod <- 0.05 + if(p==14 || p==16) xMod <- 0.50 + + # impact map: + if(p <= 12) par(fig=c(xMod, xMod + 0.3, yMod, yMod + 0.17), new=TRUE) + if(p > 12) par(fig=c(xMod, xMod + 0.45, yMod, yMod + 0.38), new=TRUE) + PlotEquiMap(imp.all[,EU], lon[EU], lat, filled.continents = FALSE, brks=1:5, cols=col.regimes, axelab=F, drawleg=F) + + # title map: + if(p <= 12) par(fig=c(xMod, xMod + 0.3, yMod + 0.162, yMod + 0.172), new=TRUE) + if(p > 12) par(fig=c(xMod + 0.07, xMod + 0.07 + 0.3, yMod +0.21 + 0.162, yMod +0.21 + 0.172), new=TRUE) + mtext(my.period[p], font=2, cex=.9) + + } # close 'p' on 'period' + + par(fig=c(0.1,0.9, 0, 0.12), new=TRUE) + ColorBar2(1:5, cols=col.regimes, vert=FALSE, my.ticks=1:4, draw.ticks=FALSE, my.labels=orden) + + par(fig=c(0.1,0.9, 0.9, 0.95), new=TRUE) + mtext(paste0("Most influent WR on ",var.name[var.num]), font=2, cex=1.5) + + dev.off() +} + + + + + + + + + + + + + + + + + + diff --git a/old/weather_regimes_maps_v10.R~ b/old/weather_regimes_maps_v10.R~ new file mode 100644 index 0000000000000000000000000000000000000000..3cd136b7b56615bd43201c9e83c9fd571ab32efc --- /dev/null +++ b/old/weather_regimes_maps_v10.R~ @@ -0,0 +1,869 @@ + +# Creation: 6/2016 +# Author: Nicola Cortesi +# +# Aim: to visualize the regime anomalies of the weather regimes, their interannual frequencies and their impact on a chosen cliamte variable (i.e: wind speed or temperature) +# +# I/O: the only input file needed is the output of the weather_regimes.R script, which ends with the suffix "_mapdata.RData". +# Output files are the maps, with the user can generate as .png or as .pdf +# +# Assumptions: If your regimes derive from a reanalysis, this script must be run twice: once, with the option 'ordering = F' and 'save.names = T' to create a .pdf or .png +# file that the user must open to find visually the order by which the four regimes are stored inside the four clusters. For example, he can find that the +# sequence of the images in the file (from top to down) corresponds to: Blocking / NAO- / Atl.Ridge / NAO+ regime. Subsequently, he has to change 'ordering' +# from F to T and set the four variables 'clusterX.name' (X=1..4) to follow the same order found inside that file. +# The second time, you have to run this script only to get the maps in the right order, which by default is, from top to down: +# "NAO+","NAO-","Blocking" and "Atl.Ridge" (you can change it with the variable 'orden'). You also have to set 'save.names' to TRUE, so an .RData file +# is written to store the order of the four regimes, in case in future a user needs to visualize these maps again. In this case, the user must set +# 'ordering = TRUE' and 'save.names = FALSE' to be able to visualize the maps in the correct order. +# +# If your regimes derive from a forecast system, you have to compute before the regimes derived from a reanalysis. Then, you can associate the reanalysis +# to the forecast system, to employ it to automatically associate to each simulated cluster one of the +# observed regimes, the one with the highest spatial correlation. Beware that the two maps of regime anomalies must have the same spatial resolution! +# +# Branch: weather_regimes + +library(s2dverification) # for the function Load() +library(Kendall) # for the MannKendall test + +source('/home/Earth/ncortesi/Downloads/scripts/Rfunctions.R') # for the calendar functions +workdir <- "/scratch/Earth/ncortesi/RESILIENCE/Regimes" #/41_ERA-Interim_monthly_1981-2015_LOESS_filter" # working dir with the input files with '_psl.RData' suffix: + +rean.name <- "ERA-Interim" # reanalysis name (if input data comes from a reanalysis) +forecast.name <- "ECMWF-S4" # forecast system name (if input data comes from a forecast system) + +fields.name <- forecast.name # Choose between loading reanalysis ('rean.name') or forecasts ('forecast.name') + +var.num <- 1 # if fields.name ='rean.name', Choose a variable for the impact maps 1: sfcWind 2: tas + +period <- 1:12 # Choose one or more periods to plot from 1 to 17 (1-12: Jan - Dec, 13-16: Winter, Spring, Summer, Autumn, 17: Year). + # You need to have created before the '_mapdata.RData' file with the output of 'weather_regimes'.R for that period. +lead.months <- 0:6 # in case you selected 'fields.name = forecast.name', specify a leadtime (0 = same month of the start month) to plot + # (and variable 'period' becomes the start month) + +# run it twice: once, with 'ordering <- FALSE' and 'save.names <- TRUE' to look only at the cartography and find visually the right regime names of the four clusters; then, +# insert the regime names in the correct order below and run this script a the second time with 'ordering = TRUE' and 'save.names = TRUE', to save the ordered cartography. +# after that, any time you need to run this script, run it with 'ordering = TRUE and 'save.names = FALSE', not to overwrite the file which stores the cluster order: +ordering <- T # If TRUE, order the clusters following the regimes listed in the 'orden' vector (set it to TRUE only after you manually inserted the names below). +save.names <- F # if TRUE, also save the cluster names (i.e: the association between clusters and weather regimes); if FALSE, the cluster names are loaded from a file + +as.pdf <- F # FALSE: save results as one .png file for each season/month, TRUE: save only 1 .pdf file with all seasons/months + +# if fields.name='forecast.name', set the subdir of 'workdir' where the weather regimes computed with a reanalysis are stored: +rean.dir <- "41_ERA-Interim_monthly_1981-2015_LOESS_filter" + +# Associates the regimes to the four cluster: +cluster1.name <- "NAO+" +cluster2.name <- "NAO-" +cluster4.name <- "Blocking" +cluster3.name <- "Atl.Ridge" + +# choose an order to give to the cartograpy, from top to bottom: +orden <- c("NAO+","NAO-","Blocking","Atl.Ridge") + +################################################################################################################################################################### + +if(fields.name != rean.name && fields.name != forecast.name) stop("variable 'fields.name' is not properly set") +regime1.name <- orden[1] +regime2.name <- orden[2] +regime3.name <- orden[3] +regime4.name <- orden[4] + +var.name <- c("sfcWind","tas") +var.name.full <- c("Wind Speed","Temperature") # full name of the 'predictand' variable to put in the title of the graphs +var.unit <- c("m/s", "ºC") # unit of measure of var (for drawing color scales) +var.num.orig <- var.num # loading _psl files, this value can change! +fields.name.orig <- fields.name +period.orig <- period; workdir.orig <- workdir +pdf.suffix <- ifelse(length(period)==1, my.period[period], "all_periods") + +for(lead.month in lead.months){ + +if(as.pdf && fields.name == rean.name)(pdf(file=paste0(workdir,"/",fields.name,"_",var.name[var.num],"_",pdf.suffix,".pdf"),width=40, height=60)) +if(as.pdf && fields.name == forecast.name)(pdf(file=paste0(workdir,"/",fields.name,"_",var.name[var.num],"_",pdf.suffix,"_leadmonth_",lead.month,".pdf"),width=40,height=60)) + + +for(p in period){ + #p <- 1 # for the debug + p.orig <- p + + # load regime data (for forecasts, it load only the data corresponding to the chosen start month p and lead month 'lead.month':: + if(fields.name == rean.name) { + load(file=paste0(workdir,"/",fields.name,"_",my.period[p],"_","psl",".RData")) + if(var.num != var.num.orig) var.num <- var.num.orig # ripristinate original values of this script (necessary after loading regime data) + if(fields.name != fields.name.orig) fields.name <- fields.name.orig # ripristinate original values of this script + if(workdir != workdir.orig) workdir <- workdir.orig # ripristinate original values of this script + + load(file=paste0(workdir,"/",fields.name,"_",my.period[p],"_",var.name[var.num],".RData")) + } + if(fields.name == forecast.name){ + load(file=paste0(workdir,"/",fields.name,"_",my.period[p],"_leadmonth_",lead.month,"_","psl",".RData")) # load cluster time series and mean psl data + #load(file=paste0(workdir,"/",fields.name,"_",my.period[p],"_leadmonth_",lead.month,"_",var.name[var.num],".RData")) # load mean var data + } + + if(var.num != var.num.orig) var.num <- var.num.orig # ripristinate original values of this script (necessary after loading regime data) + if(fields.name != fields.name.orig) fields.name <- fields.name.orig # ripristinate original values of this script + if(p != p.orig) p <- p.orig + + if(fields.name == forecast.name){ + + # compute cluster persistence: + my.cluster.vector <- as.vector(my.cluster.array[[p]]) # reduce it to a vector with the order: day1month1member1 , day2month1member1, ... , day1month2member1, day2month2member1, ,,, + + sequ1 <- sequ2 <- sequ3 <- sequ4 <- rep(0,10000) + i1 <- i2 <- i3 <- i4 <- 1 + + if(my.cluster.vector[1] != 1) i1 <- 0 + if(my.cluster.vector[1] == 1) sequ1[i1] <- sequ1[i1]+1 + if(my.cluster.vector[1] != 2) i2 <- 0 + if(my.cluster.vector[1] == 2) sequ2[i2] <- sequ2[i2]+1 + if(my.cluster.vector[1] != 3) i3 <- 0 + if(my.cluster.vector[1] == 3) sequ3[i3] <- sequ3[i3]+1 + if(my.cluster.vector[1] != 4) i4 <- 0 + if(my.cluster.vector[1] == 4) sequ4[i4] <- sequ4[i4]+1 + n.dd <- length(my.cluster.vector) + for(d in 1:(n.dd-1)){ + if(my.cluster.vector[d] != 1 && my.cluster.vector[d+1] == 1) i1 <- i1+1 + if(my.cluster.vector[d] == 1) sequ1[i1] <- sequ1[i1]+1 + + if(my.cluster.vector[d] != 2 && my.cluster.vector[d+1] == 2) i2 <- i2+1 + if(my.cluster.vector[d] == 2) sequ2[i2] <- sequ2[i2]+1 + + if(my.cluster.vector[d] != 3 && my.cluster.vector[d+1] == 3) i3 <- i3+1 + if(my.cluster.vector[d] == 3) sequ3[i3] <- sequ3[i3]+1 + + if(my.cluster.vector[d] != 4 && my.cluster.vector[d+1] == 4) i4 <- i4+1 + if(my.cluster.vector[d] == 4) sequ4[i4] <- sequ4[i4]+1 + } + + if(my.cluster.vector[n.dd] == 1) sequ1[i1] <- sequ1[i1]+1 + if(my.cluster.vector[n.dd] == 2) sequ2[i2] <- sequ2[i2]+1 + if(my.cluster.vector[n.dd] == 3) sequ3[i3] <- sequ3[i3]+1 + if(my.cluster.vector[n.dd] == 4) sequ4[i4] <- sequ4[i4]+1 + + pers1 <- mean(sequ1[which(sequ1 != 0)]) + pers2 <- mean(sequ2[which(sequ2 != 0)]) + pers3 <- mean(sequ3[which(sequ3 != 0)]) + pers4 <- mean(sequ4[which(sequ4 != 0)]) + + # compute correlations between simulated clusters and observed regime's anomalies: + cluster1.sim <- pslwr1mean; cluster2.sim <- pslwr2mean; cluster3.sim <- pslwr3mean; cluster4.sim <- pslwr4mean + lat.forecast <- lat; lon.forecast <- lon + + if((p + lead.month) > 12) { load.month <- p + lead.month - 12 } else { load.month <- p + lead.month } # reanalysis forecast month to load + load(file=paste0(workdir,"/",rean.dir,"/",rean.name,"_",my.period[load.month],"_","psl",".RData")) # load reanalysis data, which overwrities the pslwrXmean variables + load(paste0(workdir,"/",rean.dir,"/",rean.name,"_", my.period[load.month],"_","ClusterNames",".RData")) # load also reanalysis regime names + + wr1y.obs <- wr1y; wr2y.obs <- wr2y; wr3y.obs <- wr3y; wr4y.obs <- wr4y # observed frecuencies of the regimes + + regimes.obs <- c(cluster1.name, cluster2.name, cluster3.name, cluster4.name) + + if(length(unique(regimes.obs)) < 4) stop("Two or more names of the weather types in the reanalsyis input file are repeated!") + + if(identical(rev(lat),lat.forecast)) {lat.forecast <- rev(lat.forecast); print("Warning: forecast latitudes are in the opposite order than renalysis's")} + if(!identical(lat, lat.forecast)) stop("forecast latitudes are different from reanalysis latitudes!") + if(!identical(lon, lon.forecast)) stop("forecast longitude are different from reanalysis longitudes!") + + cluster.corr <- array(NA, c(4,4), dimnames=list(c("cluster1.sim","cluster2.sim","cluster3.sim","cluster4.sim"), regimes.obs)) + cluster.corr[1,1] <- cor(as.vector(cluster1.sim), as.vector(pslwr1mean)) + cluster.corr[1,2] <- cor(as.vector(cluster1.sim), as.vector(pslwr2mean)) + cluster.corr[1,3] <- cor(as.vector(cluster1.sim), as.vector(pslwr3mean)) + cluster.corr[1,4] <- cor(as.vector(cluster1.sim), as.vector(pslwr4mean)) + cluster.corr[2,1] <- cor(as.vector(cluster2.sim), as.vector(pslwr1mean)) + cluster.corr[2,2] <- cor(as.vector(cluster2.sim), as.vector(pslwr2mean)) + cluster.corr[2,3] <- cor(as.vector(cluster2.sim), as.vector(pslwr3mean)) + cluster.corr[2,4] <- cor(as.vector(cluster2.sim), as.vector(pslwr4mean)) + cluster.corr[3,1] <- cor(as.vector(cluster3.sim), as.vector(pslwr1mean)) + cluster.corr[3,2] <- cor(as.vector(cluster3.sim), as.vector(pslwr2mean)) + cluster.corr[3,3] <- cor(as.vector(cluster3.sim), as.vector(pslwr3mean)) + cluster.corr[3,4] <- cor(as.vector(cluster3.sim), as.vector(pslwr4mean)) + cluster.corr[4,1] <- cor(as.vector(cluster4.sim), as.vector(pslwr1mean)) + cluster.corr[4,2] <- cor(as.vector(cluster4.sim), as.vector(pslwr2mean)) + cluster.corr[4,3] <- cor(as.vector(cluster4.sim), as.vector(pslwr3mean)) + cluster.corr[4,4] <- cor(as.vector(cluster4.sim), as.vector(pslwr4mean)) + + # associate each simulated cluster to one observed regime: + max1 <- which(cluster.corr == max(cluster.corr), arr.ind=T) + cluster.corr2 <- cluster.corr + cluster.corr2[max1[1],] <- -999 # set this row to negative values so it won't be found as a maximum anymore + cluster.corr2[,max1[2]] <- -999 # set this column to negative values so it won't be found as a maximum anymore + max2 <- which(cluster.corr2 == max(cluster.corr2), arr.ind=T) + cluster.corr3 <- cluster.corr2 + cluster.corr3[max2[1],] <- -999 + cluster.corr3[,max2[2]] <- -999 + max3 <- which(cluster.corr3 == max(cluster.corr3), arr.ind=T) + cluster.corr4 <- cluster.corr3 + cluster.corr4[max3[1],] <- -999 + cluster.corr4[,max3[2]] <- -999 + max4 <- which(cluster.corr4 == max(cluster.corr4), arr.ind=T) + + seq.max1 <- c(max1[1],max2[1],max3[1],max4[1]) + seq.max2 <- c(max1[2],max2[2],max3[2],max4[2]) + + cluster1.regime <- seq.max2[which(seq.max1 == 1)] + cluster2.regime <- seq.max2[which(seq.max1 == 2)] + cluster3.regime <- seq.max2[which(seq.max1 == 3)] + cluster4.regime <- seq.max2[which(seq.max1 == 4)] + + cluster1.name <- regimes.obs[cluster1.regime] + cluster2.name <- regimes.obs[cluster2.regime] + cluster3.name <- regimes.obs[cluster3.regime] + cluster4.name <- regimes.obs[cluster4.regime] + + # insert here all the manual corrections to the regime assignation due to misasignment in the automatic selection: + #if(p= && lead.month= ) clusterX.name=... + + if(var.num != var.num.orig) var.num <- var.num.orig # ripristinate original values of this script + if(fields.name != fields.name.orig) fields.name <- fields.name.orig # ripristinate original values of this script + if(!identical(period.orig,period)) period <- period.orig + if(p.orig != p) p <- p.orig + + load(file=paste0(workdir,"/",fields.name,"_",my.period[p],"_leadmonth_",lead.month,"_","psl",".RData")) # reload forecast cluster time series and mean psl data + #load(file=paste0(workdir,"/",fields.name,"_",my.period[p],"_leadmonth_",lead.month,"_ClusterData.RData")) # reload forecast cluster data (for my.cluster.array) + + if(var.num != var.num.orig) var.num <- var.num.orig # ripristinate original values of this script + if(fields.name != fields.name.orig) fields.name <- fields.name.orig # ripristinate original values of this script + if(!identical(period.orig, period)) period <- period.orig + if(p.orig != p) p <- p.orig + if(identical(rev(lat),lat.forecast)) lat <- rev(lat) # ripristinate right lat order + + } # close for on 'forecast.name' + + if(fields.name == rean.name){ + if(save.names){ + save(cluster1.name, cluster2.name, cluster3.name, cluster4.name, file=paste0(workdir,"/",fields.name,"_", my.period[p],"_","ClusterNames",".RData")) + + } else { # in this case, we load the cluster names from the file already saved, if regimes come from a reanalysis: + load(paste0(workdir,"/",fields.name,"_", my.period[p],"_","ClusterNames",".RData")) + + if(var.num != var.num.orig) var.num <- var.num.orig # ripristinate original values of this script + if(fields.name != fields.name.orig) fields.name <- fields.name.orig # ripristinate original values of this script + } + } + + # assign to each cluster its regime: + if(ordering == FALSE && fields.name == rean.name){ + cluster1 <- 1; cluster2 <- 2; cluster3 <- 3; cluster4 <- 4 + } else { + cluster1 <- which(orden == cluster1.name) + cluster2 <- which(orden == cluster2.name) + cluster3 <- which(orden == cluster3.name) + cluster4 <- which(orden == cluster4.name) + } + + assign(paste0("map",cluster1), pslwr1mean) + assign(paste0("map",cluster2), pslwr2mean) + assign(paste0("map",cluster3), pslwr3mean) + assign(paste0("map",cluster4), pslwr4mean) + + assign(paste0("fre",cluster1), wr1y) + assign(paste0("fre",cluster2), wr2y) + assign(paste0("fre",cluster3), wr3y) + assign(paste0("fre",cluster4), wr4y) + + if(p == 100){ + assign(paste0("imp",cluster1), varwr1mean) + assign(paste0("imp",cluster2), varwr2mean) + assign(paste0("imp",cluster3), varwr3mean) + assign(paste0("imp",cluster4), varwr4mean) + + assign(paste0("sig",cluster1), pvalue1) + assign(paste0("sig",cluster2), pvalue2) + assign(paste0("sig",cluster3), pvalue3) + assign(paste0("sig",cluster4), pvalue4) + } + + if(fields.name == forecast.name){ + assign(paste0("freMax",cluster1), wr1yMax) + assign(paste0("freMax",cluster2), wr2yMax) + assign(paste0("freMax",cluster3), wr3yMax) + assign(paste0("freMax",cluster4), wr4yMax) + + assign(paste0("freMin",cluster1), wr1yMin) + assign(paste0("freMin",cluster2), wr2yMin) + assign(paste0("freMin",cluster3), wr3yMin) + assign(paste0("freMin",cluster4), wr4yMin) + + cluster1.Obs <- which(orden == regimes.obs[1]) + cluster2.Obs <- which(orden == regimes.obs[2]) + cluster3.Obs <- which(orden == regimes.obs[3]) + cluster4.Obs <- which(orden == regimes.obs[4]) + + assign(paste0("freObs",cluster1), wr1y.obs) + assign(paste0("freObs",cluster2), wr2y.obs) + assign(paste0("freObs",cluster3), wr3y.obs) + assign(paste0("freObs",cluster4), wr4y.obs) + + assign(paste0("persist",cluster1), pers1) + assign(paste0("persist",cluster2), pers2) + assign(paste0("persist",cluster3), pers3) + assign(paste0("persist",cluster4), pers4) + + #assign(paste0("persistObs",cluster1), persObs1) + #assign(paste0("persistObs",cluster2), persObs2) + #assign(paste0("persistObs",cluster3), persObs3) + #assign(paste0("persistObs",cluster4), persObs4) + + } + + + # position of long values of Europe only (without the Atlantic Sea and America): + #if(fields.name == "ERA-Interim") EU <- c(1:which(lon >= lon.max)[1], (length(lon)-30):length(lon)) # restrict area to continental europe only + EU <- c(1:which(lon >= lon.max)[1], (which(lon >= 337)[1]:length(lon))) # restrict area to continental europe only + + # breaks and colors of the geopotential fields: + if(psl == "g500"){ + #my.brks <- c(-300,-199:200,300) # % Mean anomaly of a WR + my.brks <- c(-300,seq(-200,200,10),300) # % Mean anomaly of a WR + my.brks2 <- c(-300,seq(-200,200,10),300) # % Mean anomaly of a WR for the contour lines + #my.cols <- colorRampPalette((brewer.pal(9,"PuBu")))(length(my.brks)-1) + values.to.plot <- c(-200, -100, 0, 100, 200) # values we want to show in the labels + } + + if(psl == "psl"){ + my.brks <- c(seq(-19,-1,2),0,seq(1,19,2)) # % Mean anomaly of a WR + # my.brks <- c(seq(-19,-1,2),0,seq(1,19,2)) # % Mean anomaly of a WR + + my.brks2 <- my.brks #c(seq(-20,20,2)) # % Mean anomaly of a WR for the contour lines + values.to.plot <- my.brks #c(-17,-15,-13,-11,-9,-7,-5,-3,-1,0,1,3,5,7,9,11,13,15,17) + } + + my.cols <- colorRampPalette(rev(brewer.pal(11,"RdBu")))(length(my.brks)-1) # blue--white--red colors + my.cols[floor(length(my.cols)/2)] <- my.cols[floor(length(my.cols)/2)+1] <- "white" + + # breaks and colors of the impact maps (valid both for Wind speed anomalies and Temperature anomalies): + #my.brks.var <- c(-20,seq(-10,-3,1),seq(-2.9,2.9,0.1),seq(3,10,1),20) # % Mean anomaly of a WR + #my.brks.var <- c(-20,-2,-1.5,-1,-0.5,-0.2,0.2,0.5,1,1.5,2,20) # % Mean anomaly of a WR + #my.brks.var <- c(-20,seq(-3,3,0.5),20) # % Mean anomaly of a WR + if(var.name[var.num] == "sfcWind") { + my.brks.var <- seq(-3,3,0.5) + values.to.plot2 <- c(-3,-2,-1,0,1,2,3) + } + if(var.name[var.num] == "tas") { + my.brks.var <- seq(-5,5,1) + values.to.plot2 <- my.brks.var #c(-5,-3,-1,0,1,3,5) + } + + #my.cols <- colorRampPalette(as.character(read.csv("/home/Earth/vtorralb/rgbhex.csv",header=F)[,1]))(length(my.brks)-1) # blue--yellow-red colors + my.cols.var <- colorRampPalette(rev(brewer.pal(11,"RdBu")))(length(my.brks.var)-1) # blue--white--red colors + #my.cols.var[floor(length(my.cols.var)/2)] <- my.cols.var[floor(length(my.cols.var)/2)+1] <- "white" + + # Visualize the composition with the 3 graphs for all the 4 regimes: its pressure fields, its impact on var and its frequency series: + if(!as.pdf && fields.name==rean.name) png(filename=paste0(workdir,"/",fields.name,"_",my.period[p],"_",var.name[var.num],".png"),width=3000,height=3700) + if(!as.pdf && fields.name==forecast.name) png(filename=paste0(workdir,"/",fields.name,"_",my.period[p],"_leadmonth_",lead.month,"_",var.name[var.num],".png"),width=3000,height=3700) + + plot.new() + + # Sheet title: + par(fig=c(0, 1, 0.94, 0.98), new=TRUE) + + if(fields.name == forecast.name) {forecast.title <- paste0(" startdate and ",lead.month," forecast month ")} else {forecast.title <- ""} + mtext(paste0(fields.name, " Weather Regimes for ", my.period[p], forecast.title," (",year.start,"-",year.end,")"), cex=5.5, font=2) + + # Centroid maps: + map.xpos <- 0 + map.width <- 0.46 + par(fig=c(map.xpos + 0.003, map.xpos + map.width - 0.003, 0.745, 0.925), new=TRUE) + PlotEquiMap2(rescale(map1,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map1), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, xlabel.dist=1.5, contours.lty="F1FF1F") + par(fig=c(map.xpos, map.xpos + map.width, 0.51, 0.69), new=TRUE) + PlotEquiMap2(rescale(map2,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map2), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F") + par(fig=c(map.xpos, map.xpos + map.width, 0.275, 0.455), new=TRUE) + PlotEquiMap2(rescale(map3,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map3), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F") + par(fig=c(map.xpos, map.xpos + map.width, 0.04, 0.22), new=TRUE) + PlotEquiMap2(rescale(map4,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map4), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F") + + # Subtitle centroid maps: + map.title.xpos <- 0.76 + map.title.width <- 0.2 + par(fig=c(map.title.xpos, map.title.xpos + map.title.width, 0.728, 0.729), new=TRUE) + mtext("year", cex=3) + par(fig=c(map.title.xpos, map.title.xpos + map.title.width, 0.494, 0.495), new=TRUE) + mtext("year", cex=3) + par(fig=c(map.title.xpos, map.title.xpos + map.title.width, 0.260, 0.261), new=TRUE) + mtext("year", cex=3) + par(fig=c(map.title.xpos, map.title.xpos + map.title.width, 0.026, 0.027), new=TRUE) + mtext("year", cex=3) + + # Title Centroid Maps: + title1.xpos <- 0.02 + title1.width <- 0.4 + par(fig=c(title1.xpos, title1.xpos + title1.width, 0.9305, 0.9355), new=TRUE) + mtext(paste0(regime1.name,": ", psl.name, " Anomaly"), font=2, cex=4) + par(fig=c(title1.xpos, title1.xpos + title1.width, 0.6955, 0.7005), new=TRUE) + mtext(paste0(regime2.name,": ", psl.name, " Anomaly"), font=2, cex=4) + par(fig=c(title1.xpos, title1.xpos + title1.width, 0.4605, 0.4655), new=TRUE) + mtext(paste0(regime3.name,": ", psl.name, " Anomaly"), font=2, cex=4) + par(fig=c(title1.xpos, title1.xpos + title1.width, 0.2255, 0.2305), new=TRUE) + mtext(paste0(regime4.name,": ", psl.name, " Anomaly"), font=2, cex=4) + + # Impact maps: + if(p == 100){ + impact.xpos <- 0.47 + impact.width <- 0.26 + par(fig=c(impact.xpos, impact.xpos + impact.width, 0.745, 0.925), new=TRUE) + PlotEquiMap2(rescale(imp1[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=t(sig1[,EU] < 0.05), cex.lab=1.5) + par(fig=c(impact.xpos, impact.xpos + impact.width, 0.51, 0.69), new=TRUE) + PlotEquiMap2(rescale(imp2[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=t(sig2[,EU] < 0.05), cex.lab=1.5) + par(fig=c(impact.xpos, impact.xpos + impact.width, 0.275, 0.455), new=TRUE) + PlotEquiMap2(rescale(imp3[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=t(sig3[,EU] < 0.05), cex.lab=1.5) + par(fig=c(impact.xpos, impact.xpos + impact.width, 0.04, 0.22), new=TRUE) + PlotEquiMap2(rescale(imp4[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=t(sig4[,EU] < 0.05), cex.lab=1.5) + + # Title impact maps: + title2.xpos <- 0.42 + title2.width <- 0.35 + par(fig=c(title2.xpos, title2.xpos + title2.width, 0.935, 0.940), new=TRUE) + mtext(paste0(regime1.name, ": Impact on ", var.name.full[var.num]), font=2, cex=4) + par(fig=c(title2.xpos, title2.xpos + title2.width, 0.700, 0.705), new=TRUE) + mtext(paste0(regime2.name, ": Impact on ", var.name.full[var.num]), font=2, cex=4) + par(fig=c(title2.xpos, title2.xpos + title2.width, 0.465, 0.470), new=TRUE) + mtext(paste0(regime3.name, ": Impact on ", var.name.full[var.num]), font=2, cex=4) + par(fig=c(title2.xpos, title2.xpos + title2.width, 0.230, 0.235), new=TRUE) + mtext(paste0(regime4.name, ": Impact on ", var.name.full[var.num]), font=2, cex=4) + } + + # Frequency plots: + barplot.xpos <- 0.75 + barplot.width <- 0.25 + freq.max=100 + + if(fields.name == forecast.name){ + pos.years.present <- match(year.start:year.end, years) + years.missing <- which(is.na(pos.years.present)) + fre1.NA <- fre2.NA <- fre3.NA <- fre4.NA <- freMax1.NA <- freMax2.NA <- freMax3.NA <- freMax4.NA <- freMin1.NA <- freMin2.NA <- freMin3.NA <- freMin4.NA <- c() + + k <- 0 + for(y in year.start:year.end) { + if(y %in% (years.missing + year.start - 1)) { + fre1.NA <- c(fre1.NA,NA); fre2.NA <- c(fre2.NA,NA); fre3.NA <- c(fre3.NA,NA); fre4.NA <- c(fre4.NA,NA) + freMax1.NA <- c(freMax1.NA,NA); freMax2.NA <- c(freMax2.NA,NA); freMax3.NA <- c(freMax3.NA,NA); freMax4.NA <- c(freMax4.NA,NA) + freMin1.NA <- c(freMin1.NA,NA); freMin2.NA <- c(freMin2.NA,NA); freMin3.NA <- c(freMin3.NA,NA); freMin4.NA <- c(freMin4.NA,NA) + k=k+1 + } else { + fre1.NA <- c(fre1.NA,fre1[y - year.start + 1 - k]); fre2.NA <- c(fre2.NA,fre2[y - year.start + 1 - k]) + fre3.NA <- c(fre3.NA,fre3[y - year.start + 1 - k]); fre4.NA <- c(fre4.NA,fre4[y - year.start + 1 - k]) + freMax1.NA <- c(freMax1.NA,freMax1[y - year.start + 1 - k]); freMax2.NA <- c(freMax2.NA,freMax2[y - year.start + 1 - k]) + freMax3.NA <- c(freMax3.NA,freMax3[y - year.start + 1 - k]); freMax4.NA <- c(freMax4.NA,freMax4[y - year.start + 1 - k]) + freMin1.NA <- c(freMin1.NA,freMin1[y - year.start + 1 - k]); freMin2.NA <- c(freMin2.NA,freMin2[y - year.start + 1 - k]) + freMin3.NA <- c(freMin3.NA,freMin3[y - year.start + 1 - k]); freMin4.NA <- c(freMin4.NA,freMin4[y - year.start + 1 - k]) + } + } + } else { fre1.NA <- fre1; fre2.NA <- fre2; fre3.NA <- fre3; fre4.NA <- fre4 } + + par(fig=c(barplot.xpos, barplot.xpos + barplot.width, 0.745, 0.915), new=TRUE) + if(fields.name == rean.name) barplot.freq(100*fre1.NA, year.start, year.end, cex.y=2, cex.x=2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0)) + if(fields.name == forecast.name) barplot.freq.sim2(100*fre1.NA, 100*freMax1.NA, 100*freMin1.NA, 100*freObs1, year.start, year.end, cex.y=2, cex.x=2, freq.min=-2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0), col.line="gray20", cex.r=3, cex.mean=2, cex.obs=2) + + par(fig=c(barplot.xpos, barplot.xpos + barplot.width,0.510,0.680), new=TRUE) + if(fields.name == rean.name) barplot.freq(100*fre2.NA, year.start, year.end, cex.y=2, cex.x=2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0)) + if(fields.name == forecast.name) barplot.freq.sim2(100*fre2.NA, 100*freMax2.NA, 100*freMin2.NA, 100*freObs2, year.start, year.end, cex.y=2, cex.x=2, freq.min=-2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0), col.line="gray20", cex.r=3, cex.mean=2, cex.obs=2) + + par(fig=c(barplot.xpos, barplot.xpos + barplot.width,0.275,0.445), new=TRUE) + if(fields.name == rean.name) barplot.freq(100*fre3.NA, year.start, year.end, cex.y=2, cex.x=2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0)) + if(fields.name == forecast.name) barplot.freq.sim2(100*fre3.NA, 100*freMax3.NA, 100*freMin3.NA, 100*freObs3, year.start, year.end, cex.y=2, cex.x=2, freq.min=-2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0), col.line="gray20", cex.r=3, cex.mean=2, cex.obs=2) + + par(fig=c(barplot.xpos, barplot.xpos + barplot.width,0.040,0.210), new=TRUE) + if(fields.name == rean.name) barplot.freq(100*fre4.NA, year.start, year.end, cex.y=2, cex.x=2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0)) + if(fields.name == forecast.name) barplot.freq.sim2(100*fre4.NA, 100*freMax4.NA, 100*freMin4.NA, 100*freObs4, year.start, year.end, cex.y=2, cex.x=2, freq.min=-2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0), col.line="gray20", cex.r=3, cex.mean=2, cex.obs=2) + + # Title Frequency maps: + title3.xpos <- 0.75 + title3.width <-0.25 + par(fig=c(title3.xpos, title3.xpos + title3.width, 0.935, 0.940), new=TRUE) + mtext(paste0(regime1.name, " Frequency"), font=2, cex=4) # paste0 doesn't work inside an expression! + par(fig=c(title3.xpos, title3.xpos + title3.width, 0.700, 0.705), new=TRUE) + mtext(paste0(regime2.name, " Frequency"), font=2, cex=4) + par(fig=c(title3.xpos, title3.xpos + title3.width, 0.465, 0.470), new=TRUE) + mtext(paste0(regime3.name, " Frequency"), font=2, cex=4) + par(fig=c(title3.xpos, title3.xpos + title3.width, 0.230, 0.235), new=TRUE) + mtext(paste0(regime4.name, " Frequency"), font=2, cex=4) + + # % on Frequency plots: + symbol.xpos <- 0.735 + symbol.width <- 0.01 + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.82, 0.83), new=TRUE) + mtext("%", cex=3.3) + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.59, 0.60), new=TRUE) + mtext("%", cex=3.3) + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.35, 0.36), new=TRUE) + mtext("%", cex=3.3) + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.12, 0.13), new=TRUE) + mtext("%", cex=3.3) + + # mean frequency on Frequency plots: + symbol.xpos <- 0.9 + symbol.width <- 0.1 + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.908, 0.918), new=TRUE) + mtext(bquote(bar(nu) == .(paste0(round(mean(100*fre1.NA),1),"%"))),cex=4.5) + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.673, 0.683), new=TRUE) + mtext(bquote(bar(nu) == .(paste0(round(mean(100*fre2.NA),1),"%"))),cex=4.5) + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.44, 0.45), new=TRUE) + mtext(bquote(bar(nu) == .(paste0(round(mean(100*fre3.NA),1),"%"))),cex=4.5) + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.203, 0.213), new=TRUE) + mtext(bquote(bar(nu) == .(paste0(round(mean(100*fre4.NA),1),"%"))),cex=4.5) + + # add corr between obs.time series and ensemble mean time series on Frequency plots: + if(fields.name == forecast.name){ + symbol.xpos <- 0.76 + symbol.width <- 0.1 + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.908, 0.918), new=TRUE) + sp.cor1 <- round(cor(fre1.NA,freObs1, use="complete.obs"),2) + mtext(paste0("r= ",sp.cor1), cex=4.5) + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.673, 0.683), new=TRUE) + sp.cor2 <- round(cor(fre2.NA,freObs2, use="complete.obs"),2) + mtext(paste0("r= ",sp.cor2), cex=4.5) + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.44, 0.45), new=TRUE) + sp.cor3 <- round(cor(fre3.NA,freObs3, use="complete.obs"),2) + mtext(paste0("r= ",sp.cor3), cex=4.5) + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.203, 0.213), new=TRUE) + sp.cor4 <- round(cor(fre4.NA,freObs4, use="complete.obs"),2) + mtext(paste0("r= ",sp.cor4), cex=4.5) + } + + # Legend 1: + legend1.xpos <- 0.01 + legend1.width <- 0.44 + legend1.cex <- 1.8 + my.subset <- match(values.to.plot, my.brks) + par(fig=c(legend1.xpos, legend1.xpos + legend1.width, 0.719, 0.744), new=TRUE) # syntax fig=c(xmin, xmax, ymin, ymax) + ColorBar3(my.brks, cols=my.cols, vert=FALSE, cex=legend1.cex, subset=my.subset) + if(psl=="g500") {mtext(side=4," m", cex=2.5)} else {mtext(side=4," hPa", cex=legend1.cex)} + par(fig=c(legend1.xpos, legend1.xpos + legend1.width, 0.485, 0.508), new=TRUE) + ColorBar3(my.brks, cols=my.cols, vert=FALSE, cex=legend1.cex, subset=my.subset) + if(psl=="g500") {mtext(side=4," m", cex=2.5)} else {mtext(side=4," hPa", cex=legend1.cex)} + par(fig=c(legend1.xpos, legend1.xpos + legend1.width, 0.250, 0.273), new=TRUE) + ColorBar3(my.brks, cols=my.cols, vert=FALSE, cex=legend1.cex, subset=my.subset) + if(psl=="g500") {mtext(side=4," m", cex=2.5)} else {mtext(side=4," hPa", cex=legend1.cex)} + par(fig=c(legend1.xpos, legend1.xpos + legend1.width, 0.015, 0.038), new=TRUE) + ColorBar3(my.brks, cols=my.cols, vert=FALSE, cex=legend1.cex, subset=my.subset) + if(psl=="g500") {mtext(side=4," m", cex=2.5)} else {mtext(side=4," hPa", cex=legend1.cex)} + + # Legend 2: + if(fields.name == rean.name){ + legend2.xpos <- 0.48 + legend2.width <- 0.25 + legend2.cex <- 1.8 + my.subset2 <- match(values.to.plot2, my.brks.var) + par(fig=c(legend2.xpos, legend2.xpos + legend2.width, 0.719 + 0.001, 0.744), new=TRUE) + ColorBar3(my.brks.var, cols=my.cols.var, vert=FALSE, cex=legend2.cex, subset=my.subset2) + mtext(side=4,paste0(" ",var.unit[var.num]), cex=legend2.cex) + par(fig=c(legend2.xpos, legend2.xpos + legend2.width, 0.485, 0.508), new=TRUE) + ColorBar3(my.brks.var, cols=my.cols.var, vert=FALSE, cex=legend2.cex, subset=my.subset2) + mtext(side=4,paste0(" ",var.unit[var.num]), cex=legend2.cex) + par(fig=c(legend2.xpos, legend2.xpos + legend2.width, 0.250, 0.273), new=TRUE) + ColorBar3(my.brks.var, cols=my.cols.var, vert=FALSE, cex=legend2.cex, subset=my.subset2) + mtext(side=4,paste0(" ",var.unit[var.num]), cex=legend2.cex) + par(fig=c(legend2.xpos, legend2.xpos + legend2.width, 0.015, 0.038), new=TRUE) + ColorBar3(my.brks.var, cols=my.cols.var, vert=FALSE, cex=legend2.cex, subset=my.subset2) + mtext(side=4,paste0(" ",var.unit[var.num]), cex=legend2.cex) + } + + if(!as.pdf) dev.off() # for saving 4 png + + #if(fields.name == forecast.name) save(orden, cluster1.name, cluster2.name, cluster3.name, cluster4.name, cluster.corr, fre1.NA, fre2.NA, fre3.NA, fre4.NA, freObs1, freObs2, freObs3, freObs4, sp.cor1, sp.cor2, sp.cor3, sp.cor4, file=paste0(workdir,"/",fields.name,"_", my.period[p],"_leadmonth_",lead.month,"_","ClusterNames",".RData")) + + # fre1, fre2, etc. already refer to the regimes listed in the 'orden' vector: + if(fields.name == forecast.name) save(orden, fre1.NA, fre2.NA, fre3.NA, fre4.NA, freObs1, freObs2, freObs3, freObs4, sp.cor1, sp.cor2, sp.cor3, sp.cor4, persist1, persist2, persist3, persist4, file=paste0(workdir,"/",fields.name,"_", my.period[p],"_leadmonth_",lead.month,"_","ClusterNames",".RData")) + +} # close 'p' on 'period' + +if(as.pdf) dev.off() # for saving a single pdf with all months/seasons + +} # close 'lead.month' on 'lead.months' + + + + + +# Summary graphs: +if(fields.name == forecast.name && p == 100){ + array.cor <- array.diff.freq <- array.rpss <- array.pers <- array(NA,c(12,7,4)) # array storing correlations in the format: [ startdate, leadmonth, regime] + + for(p in 1:12){ + p.orig <- p + + for(l in 0:6){ + + load(file=paste0(workdir,"/",fields.name,"_",my.period[p],"_leadmonth_",l,"_","ClusterNames",".RData")) # load cluster names and summarized data + + if(var.num != var.num.orig) var.num <- var.num.orig # ripristinate original values of this script (necessary after loading regime data) + if(fields.name != fields.name.orig) fields.name <- fields.name.orig # ripristinate original values of this script + if(p != p.orig) p <- p.orig + + #cluster1 <- which(orden == cluster1.name) + #cluster2 <- which(orden == cluster2.name) + #cluster3 <- which(orden == cluster3.name) + #cluster4 <- which(orden == cluster4.name) + + array.cor[p,1+l, 1] <- sp.cor1 # NAO+ + array.cor[p,1+l, 3] <- sp.cor2 # NAO- + array.cor[p,1+l, 4] <- sp.cor3 # Blocking + array.cor[p,1+l, 2] <- sp.cor4 # Atl.Ridge + + array.diff.freq[p,1+l, 1] <- 100*(mean(fre1.NA) - mean(freObs1)) # NAO+ + array.diff.freq[p,1+l, 3] <- 100*(mean(fre2.NA) - mean(freObs2)) # NAO- + array.diff.freq[p,1+l, 4] <- 100*(mean(fre3.NA) - mean(freObs3)) # Blocking + array.diff.freq[p,1+l, 2] <- 100*(mean(fre4.NA) - mean(freObs4)) # Atl.Ridge + + array.pers[p,1+l, 1] <- persist1 # NAO+ + array.pers[p,1+l, 3] <- persist2 # NAO- + array.pers[p,1+l, 4] <- persist3 # Blocking + array.pers[p,1+l, 2] <- persist4 # Atl.Ridge + + #array.rpss[p,1+l, 1] <- # NAO+ + #array.rpss[p,1+l, 3] <- # NAO- + #array.rpss[p,1+l, 4] <- # Blocking + #array.rpss[p,1+l, 2] <- # Atl.Ridge + } + } + + + + # Corr summary: + png(filename=paste0(workdir,"/",forecast.name,"_summary_corr.png"), width=550, height=400) + + # create an array similar to array.cor but with colors instead of corr.values: + corr.cols <- rev(c('#b2182b','#d6604d','#f4a582','#fddbc7','#d1e5f0','#92c5de','#4393c3','#2166ac')) + array.cor.colors <- array(corr.cols[8],c(12,7,4)) + array.cor.colors[array.cor < 0.75] <- corr.cols[7] + array.cor.colors[array.cor < 0.5] <- corr.cols[6] + array.cor.colors[array.cor < 0.25] <- corr.cols[5] + array.cor.colors[array.cor < 0] <- corr.cols[4] + array.cor.colors[array.cor < -0.25] <- corr.cols[3] + array.cor.colors[array.cor < -0.5] <- corr.cols[2] + array.cor.colors[array.cor < -0.75] <- corr.cols[1] + + par(mar=c(5,4,4,4.5)) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Forecast time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + title("Correlation between S4 and ERA-Interim frequencies", cex=1.5) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Forecast time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5) + + for(p in 1:12){ + for(l in 0:6){ + polygon(c(0.5+p-1,0.5+p-1,1+p-1),c(-0.5+l,0.5+l,0+l), col=array.cor.colors[p,1+l,1]) # first triangle + polygon(c(0.5+p-1,1+p-1,1.5+p-1),c(-0.5+l,0+l,-0.5+l), col=array.cor.colors[p,1+l,2]) + polygon(c(1+p-1,1.5+p-1,1.5+p-1),c(l,0.5+l,-0.5+l), col=array.cor.colors[p,1+l,3]) + polygon(c(0.5+p-1,1+p-1,1.5+p-1),c(0.5+l,0+l,0.5+l), col=array.cor.colors[p,1+l,4]) + } + } + + par(fig=c(0.875, 1, 0.14, 0.89), new=TRUE) + ColorBar2(brks = seq(-1,1,0.25), cols = corr.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(seq(-1,1,0.25)), my.labels=seq(-1,1,0.25)) + dev.off() + + + + # Delta freq. summary: + png(filename=paste0(workdir,"/",forecast.name,"_summary_diff_freq.png"), width=550, height=400) + + # create an array similar to array.diff.freq but with colors instead of frequencies: + freq.cols <- rev(c('#b2182b','#d6604d','#f4a582','#fddbc7','#d1e5f0','#92c5de','#4393c3','#2166ac')) + + array.freq.colors <- array(freq.cols[8],c(12,7,4)) + array.freq.colors[array.diff.freq < 10] <- freq.cols[7] + array.freq.colors[array.diff.freq < 5] <- freq.cols[6] + array.freq.colors[array.diff.freq < 2] <- freq.cols[5] + array.freq.colors[array.diff.freq < 0] <- freq.cols[4] + array.freq.colors[array.diff.freq < -2] <- freq.cols[3] + array.freq.colors[array.diff.freq < -5] <- freq.cols[2] + array.freq.colors[array.diff.freq < -10] <- freq.cols[1] + + par(mar=c(5,4,4,4.5)) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Forecast time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + title("Frequency difference (%) between S4 and ERA-Interim", cex=1.5) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Forecast time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5) + + for(p in 1:12){ + for(l in 0:6){ + polygon(c(0.5+p-1,0.5+p-1,1+p-1),c(-0.5+l,0.5+l,0+l), col=array.freq.colors[p,1+l,1]) # first triangle + polygon(c(0.5+p-1,1+p-1,1.5+p-1),c(-0.5+l,0+l,-0.5+l), col=array.freq.colors[p,1+l,2]) + polygon(c(1+p-1,1.5+p-1,1.5+p-1),c(l,0.5+l,-0.5+l), col=array.freq.colors[p,1+l,3]) + polygon(c(0.5+p-1,1+p-1,1.5+p-1),c(0.5+l,0+l,0.5+l), col=array.freq.colors[p,1+l,4]) + } + } + + par(fig=c(0.875, 1, 0.14, 0.89), new=TRUE) + ColorBar2(brks = c(-20,-10,-5,-2,0,2,5,10,20), cols = freq.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(c(-20,-10,-5,-2,0,2,5,10,20)), my.labels=c(-20,-10,-5,-2,0,2,5,10,20)) + dev.off() + + + + # Persistence summary: + png(filename=paste0(workdir,"/",forecast.name,"_summary_pers.png"), width=550, height=400) + + # create an array similar to array.pers but with colors instead of frequencies: + pers.cols <- rev(c('#b2182b','#d6604d','#f4a582','#fddbc7','#d1e5f0','#92c5de','#4393c3','#2166ac')) + my.seq <- c(0, 3.25, 3.5, 3.75, 4, 4.25, 4.5, 4.75, 10) + + array.pers.colors <- array(pers.cols[8],c(12,7,4)) + array.pers.colors[array.pers < my.seq[7]] <- pers.cols[7] + array.pers.colors[array.pers < my.seq[6]] <- pers.cols[6] + array.pers.colors[array.pers < my.seq[5]] <- pers.cols[5] + array.pers.colors[array.pers < my.seq[5]] <- pers.cols[4] + array.pers.colors[array.pers < my.seq[4]] <- pers.cols[3] + array.pers.colors[array.pers < my.seq[3]] <- pers.cols[2] + array.pers.colors[array.pers < my.seq[2]] <- pers.cols[1] + + par(mar=c(5,4,4,4.5)) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Forecast time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + title("S4 Regime persistence (in days/month)", cex=1.5) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Forecast time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5) + + for(p in 1:12){ + for(l in 0:6){ + polygon(c(0.5+p-1,0.5+p-1,1+p-1),c(-0.5+l,0.5+l,0+l), col=array.pers.colors[p,1+l,1]) # first triangle + polygon(c(0.5+p-1,1+p-1,1.5+p-1),c(-0.5+l,0+l,-0.5+l), col=array.pers.colors[p,1+l,2]) + polygon(c(1+p-1,1.5+p-1,1.5+p-1),c(l,0.5+l,-0.5+l), col=array.pers.colors[p,1+l,3]) + polygon(c(0.5+p-1,1+p-1,1.5+p-1),c(0.5+l,0+l,0.5+l), col=array.pers.colors[p,1+l,4]) + } + } + + par(fig=c(0.875, 1, 0.14, 0.89), new=TRUE) + ColorBar2(brks = my.seq, cols = pers.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + dev.off() + + + ## # FairRPSS summary: + ## png(filename=paste0(workdir,"/",forecast.name,"_summary_rpss.png"), width=550, height=400) + + ## # create an array similar to array.diff.freq but with colors instead of frequencies: + ## freq.cols <- rev(c('#b2182b','#d6604d','#f4a582','#fddbc7','#d1e5f0','#92c5de','#4393c3','#2166ac')) + + ## array.freq.colors <- array(freq.cols[8],c(12,7,4)) + ## array.freq.colors[array.diff.freq < 10] <- freq.cols[7] + ## array.freq.colors[array.diff.freq < 5] <- freq.cols[6] + ## array.freq.colors[array.diff.freq < 2] <- freq.cols[5] + ## array.freq.colors[array.diff.freq < 0] <- freq.cols[4] + ## array.freq.colors[array.diff.freq < -2] <- freq.cols[3] + ## array.freq.colors[array.diff.freq < -5] <- freq.cols[2] + ## array.freq.colors[array.diff.freq < -10] <- freq.cols[1] + + ## par(mar=c(5,4,4,4.5)) + ## plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Forecast time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + ## title("Frequency difference (%) between S4 and ERA-Interim", cex=1.5) + ## mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + ## mtext(side = 2, text = "Forecast time", line = 2.5, cex=1.5) + ## axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + ## axis(2, at=seq(0,6), las=2, cex.axis=1.5) + + ## for(p in 1:12){ + ## for(l in 0:6){ + ## polygon(c(0.5+p-1,0.5+p-1,1+p-1),c(-0.5+l,0.5+l,0+l), col=array.freq.colors[p,1+l,1]) # first triangle + ## polygon(c(0.5+p-1,1+p-1,1.5+p-1),c(-0.5+l,0+l,-0.5+l), col=array.freq.colors[p,1+l,2]) + ## polygon(c(1+p-1,1.5+p-1,1.5+p-1),c(l,0.5+l,-0.5+l), col=array.freq.colors[p,1+l,3]) + ## polygon(c(0.5+p-1,1+p-1,1.5+p-1),c(0.5+l,0+l,0.5+l), col=array.freq.colors[p,1+l,4]) + ## } + ## } + + ## par(fig=c(0.875, 1, 0.14, 0.89), new=TRUE) + ## ColorBar2(brks = c(-20,-10,-5,-2,0,2,5,10,20), cols = freq.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(c(-20,-10,-5,-2,0,2,5,10,20)), my.labels=c(-20,-10,-5,-2,0,2,5,10,20)) + ## dev.off() + +} + + + + + + + +# impact map of the stronger WR: +if(fields.name == rean.name && p == 100){ + + var.num <- 1 # choose a variable (1:sfcWind, 2:tas) + + png(filename=paste0(workdir,"/",rean.name,"_",var.name[var.num],"_most_influent.png"), width=500, height=600) + + plot.new() + #par(mfrow=c(4,3)) + col.regimes <- c("indianred1","cyan","khaki","lightpink") + + for(p in 13:16){ # or 1:12 for monthly maps + load(file=paste0(workdir,"/", rean.dir,"/",rean.name,"_",var.name[var.num],"_",my.period[p],"_psl.RData")) + load(paste0(workdir,"/", rean.dir,"/",rean.name,"_ClusterNames.RData")) # they should not depend on var.name!!! + + # position of long values of Europe only (without the Atlantic Sea and America): + #if(fields.name == "ERA-Interim") EU <- c(1:which(lon >= lon.max)[1], (length(lon)-30):length(lon)) # restrict area to continental europe only + lon.max = 45 + EU <- c(1:which(lon >= lon.max)[1], (which(lon >= 337)[1]:length(lon))) # restrict area to continental europe only + + + cluster1 <- which(orden == cluster1.name.period[p]) # in future it will be == cluster1.name + cluster2 <- which(orden == cluster2.name.period[p]) + cluster3 <- which(orden == cluster3.name.period[p]) + cluster4 <- which(orden == cluster4.name.period[p]) + + assign(paste0("imp",cluster1), varPeriodAnom1mean) + assign(paste0("imp",cluster2), varPeriodAnom2mean) + assign(paste0("imp",cluster3), varPeriodAnom3mean) + assign(paste0("imp",cluster4), varPeriodAnom4mean) + + imp.all <- imp1 + for(i in 1:dim(imp1)[1]){ + for(j in 1:dim(imp1)[2]){ + if(abs(imp1[i,j]) >= max(abs(imp1[i,j]), abs(imp2[i,j]), abs(imp3[i,j]), abs(imp4[i,j]))) imp.all[i,j] <- 1.5 + if(abs(imp2[i,j]) >= max(abs(imp1[i,j]), abs(imp2[i,j]), abs(imp3[i,j]), abs(imp4[i,j]))) imp.all[i,j] <- 2.5 + if(abs(imp3[i,j]) >= max(abs(imp1[i,j]), abs(imp2[i,j]), abs(imp3[i,j]), abs(imp4[i,j]))) imp.all[i,j] <- 3.5 + if(abs(imp4[i,j]) >= max(abs(imp1[i,j]), abs(imp2[i,j]), abs(imp3[i,j]), abs(imp4[i,j]))) imp.all[i,j] <- 4.5 + } + } + + if(p<=3) yMod <- 0.7 + if(p>=4 && p<=6) yMod <- 0.5 + if(p>=7 && p<=9) yMod <- 0.3 + if(p>=10) yMod <- 0.1 + if(p==13 || p==14) yMod <- 0.5 + if(p==15 || p==16) yMod <- 0.1 + + if(p==1 || p==4 || p==7 || p==10) xMod <- 0.05 + if(p==2 || p==5 || p==8 || p==11) xMod <- 0.35 + if(p==3 || p==6 || p==9 || p==12) xMod <- 0.65 + if(p==13 || p==15) xMod <- 0.05 + if(p==14 || p==16) xMod <- 0.50 + + # impact map: + if(p <= 12) par(fig=c(xMod, xMod + 0.3, yMod, yMod + 0.17), new=TRUE) + if(p > 12) par(fig=c(xMod, xMod + 0.45, yMod, yMod + 0.38), new=TRUE) + PlotEquiMap(imp.all[,EU], lon[EU], lat, filled.continents = FALSE, brks=1:5, cols=col.regimes, axelab=F, drawleg=F) + + # title map: + if(p <= 12) par(fig=c(xMod, xMod + 0.3, yMod + 0.162, yMod + 0.172), new=TRUE) + if(p > 12) par(fig=c(xMod + 0.07, xMod + 0.07 + 0.3, yMod +0.21 + 0.162, yMod +0.21 + 0.172), new=TRUE) + mtext(my.period[p], font=2, cex=.9) + + } # close 'p' on 'period' + + par(fig=c(0.1,0.9, 0, 0.12), new=TRUE) + ColorBar2(1:5, cols=col.regimes, vert=FALSE, my.ticks=1:4, draw.ticks=FALSE, my.labels=orden) + + par(fig=c(0.1,0.9, 0.9, 0.95), new=TRUE) + mtext(paste0("Most influent WR on ",var.name[var.num]), font=2, cex=1.5) + + dev.off() +} + + + + + + + + + + + + + + + + + + diff --git a/old/weather_regimes_maps_v11.R b/old/weather_regimes_maps_v11.R new file mode 100644 index 0000000000000000000000000000000000000000..bb94729219125960e227cedf8b2dbe1d2e7276ec --- /dev/null +++ b/old/weather_regimes_maps_v11.R @@ -0,0 +1,1128 @@ + +# Creation: 6/2016 +# Author: Nicola Cortesi +# +# Aim: to visualize the regime anomalies of the weather regimes, their interannual frequencies and their impact on a chosen cliamte variable (i.e: wind speed or temperature) +# +# I/O: the only input file needed is the output of the weather_regimes.R script, which ends with the suffix "_mapdata.RData". +# Output files are the maps, with the user can generate as .png or as .pdf +# +# Assumptions: If your regimes derive from a reanalysis, this script must be run twice: once, with the option 'ordering = F' and 'save.names = T' to create a .pdf or .png +# file that the user must open to find visually the order by which the four regimes are stored inside the four clusters. For example, he can find that the +# sequence of the images in the file (from top to down) corresponds to: Blocking / NAO- / Atl.Ridge / NAO+ regime. Subsequently, he has to change 'ordering' +# from F to T and set the four variables 'clusterX.name' (X=1..4) to follow the same order found inside that file. +# The second time, you have to run this script only to get the maps in the right order, which by default is, from top to down: +# "NAO+","NAO-","Blocking" and "Atl.Ridge" (you can change it with the variable 'orden'). You also have to set 'save.names' to TRUE, so an .RData file +# is written to store the order of the four regimes, in case in future a user needs to visualize these maps again. In this case, the user must set +# 'ordering = TRUE' and 'save.names = FALSE' to be able to visualize the maps in the correct order. +# +# If your regimes derive from a forecast system, you have to compute before the regimes derived from a reanalysis. Then, you can associate the reanalysis +# to the forecast system, to employ it to automatically associate to each simulated cluster one of the +# observed regimes, the one with the highest spatial correlation. Beware that the two maps of regime anomalies must have the same spatial resolution! +# +# Branch: weather_regimes + +library(s2dverification) # for the function Load() +library(Kendall) # for the MannKendall test + +source('/home/Earth/ncortesi/Downloads/scripts/Rfunctions.R') # for the calendar functions + +# working dir with the input files with '_psl.RData' suffix: + +workdir <- "/scratch/Earth/ncortesi/RESILIENCE/Regimes/42_ECMWF_S4_monthly_1981-2015_LOESS_filter" + +rean.name <- "ERA-Interim" # reanalysis name (if input data comes from a reanalysis) +forecast.name <- "ECMWF-S4" # forecast system name (if input data comes from a forecast system) + +fields.name <- forecast.name # Choose between loading reanalysis ('rean.name') or forecasts ('forecast.name') + +var.num <- 1 # if fields.name ='rean.name', Choose a variable for the impact maps 1: sfcWind 2: tas + +period <- 1:12 # Choose one or more periods to plot from 1 to 17 (1-12: Jan - Dec, 13-16: Winter, Spring, Summer, Autumn, 17: Year). + # You need to have created before the '_mapdata.RData' file with the output of 'weather_regimes'.R for that period. +lead.months <- 0:6 # in case you selected 'fields.name = forecast.name', specify a leadtime (0 = same month of the start month) to plot + # (and variable 'period' becomes the start month) + +# run it twice: once, with 'ordering <- FALSE' and 'save.names <- TRUE' to look only at the cartography and find visually the right regime names of the four clusters; then, +# insert the regime names in the correct order below and run this script a the second time with 'ordering = TRUE' and 'save.names = TRUE', to save the ordered cartography. +# after that, any time you need to run this script, run it with 'ordering = TRUE and 'save.names = FALSE', not to overwrite the file which stores the cluster order: +ordering <- T # If TRUE, order the clusters following the regimes listed in the 'orden' vector (set it to TRUE only after you manually inserted the names below). +save.names <- F # if TRUE, also save the cluster names (i.e: the association between clusters and weather regimes); if FALSE, the cluster names are loaded from a file + +as.pdf <- F # FALSE: save results as one .png file for each season/month, TRUE: save only one .pdf file with all seasons/months + +composition <- "psl" # "psl" # choose which kind of composition you want to plot: 'simple' for monthly graphs, 'psl' for the regime anomalies for a fixed forecast month +forecast.month <- 12 # in case composition = 'psl', choose a forecast month to plot its composition + +# if fields.name='forecast.name', set the subdir of 'workdir' where the weather regimes computed with a reanalysis are stored: +rean.dir <- "/scratch/Earth/ncortesi/RESILIENCE/Regimes/41_ERA-Interim_monthly_1981-2015_LOESS_filter" + +# Associates the regimes to the four cluster: +cluster1.name <- "NAO+" +cluster2.name <- "NAO-" +cluster4.name <- "Blocking" +cluster3.name <- "Atl.Ridge" + +# choose an order to give to the cartograpy, from top to bottom: +orden <- c("NAO+","NAO-","Blocking","Atl.Ridge") + +################################################################################################################################################################### + +if(fields.name != rean.name && fields.name != forecast.name) stop("variable 'fields.name' is not properly set") +regime1.name <- orden[1] +regime2.name <- orden[2] +regime3.name <- orden[3] +regime4.name <- orden[4] + +var.name <- c("sfcWind","tas") +var.name.full <- c("Wind Speed","Temperature") # full name of the 'predictand' variable to put in the title of the graphs +var.unit <- c("m/s", "ºC") # unit of measure of var (for drawing color scales) +var.num.orig <- var.num # loading _psl files, this value can change! +fields.name.orig <- fields.name +period.orig <- period +workdir.orig <- workdir +pdf.suffix <- ifelse(length(period)==1, my.period[period], "all_periods") + +if(composition == "psl") { + if(as.pdf && fields.name == forecast.name && composition == "psl") pdf(file=paste0(workdir,"/",fields.name,"_",pdf.suffix,"_forecast_month_",forecast.month,".pdf"),width=40,height=60) + if(!as.pdf && fields.name == forecast.name && composition == "psl") png(filename=paste0(workdir,"/",fields.name,"_forecast_month_",forecast.month,"_psl",".png"),width=6000,height=2300) + + lead.months <- c(6:0,0) + n.map <- 0 + plot.new() + + # Sheet title: + par(fig=c(0, 1, 0.94, 0.98), new=TRUE) + mtext(paste0(fields.name, " simulated Weather Regimes for ", month.name[forecast.month]," (",year.start,"-",year.end,")"), cex=5.5, font=2) +} + + +for(lead.month in lead.months){ +#lead.month=0 # for the debug + +if(as.pdf && fields.name == rean.name && composition == "simple") pdf(file=paste0(workdir,"/",fields.name,"_",var.name[var.num],"_",pdf.suffix,".pdf"),width=40, height=60) +if(as.pdf && fields.name == forecast.name && composition == "simple") pdf(file=paste0(workdir,"/",fields.name,"_",var.name[var.num],"_",pdf.suffix,"_leadmonth_",lead.month,".pdf"),width=40,height=60) + +if(composition == 'psl') { period <- forecast.month - lead.month } + +for(p in period){ + #p <- 1 # for the debug + p.orig <- p + + # load regime data (for forecasts, it load only the data corresponding to the chosen start month p and lead month 'lead.month':: + if(fields.name == rean.name || (fields.name == forecast.name && lead.month == 0 || n.map == 7)) { + load(file=paste0(workdir,"/",fields.name,"_",my.period[p],"_","psl",".RData")) + if(var.num != var.num.orig) var.num <- var.num.orig # ripristinate original values of this script (necessary after loading regime data) + if(fields.name != fields.name.orig) fields.name <- fields.name.orig # ripristinate original values of this script + if(workdir != workdir.orig) workdir <- workdir.orig # ripristinate original values of this script + + load(file=paste0(workdir,"/",fields.name,"_",my.period[p],"_",var.name[var.num],".RData")) + } + + if(fields.name == forecast.name){ + load(file=paste0(workdir,"/",fields.name,"_",my.period[p],"_leadmonth_",lead.month,"_","psl",".RData")) # load cluster time series and mean psl data + #load(file=paste0(workdir,"/",fields.name,"_",my.period[p],"_leadmonth_",lead.month,"_",var.name[var.num],".RData")) # load mean var data + + if(var.num != var.num.orig) var.num <- var.num.orig # ripristinate original values of this script (necessary after loading regime data) + if(fields.name != fields.name.orig) fields.name <- fields.name.orig # ripristinate original values of this script + if(workdir != workdir.orig) workdir <- workdir.orig # ripristinate original values of this script + + } + + if(var.num != var.num.orig) var.num <- var.num.orig # ripristinate original values of this script (necessary after loading regime data) + if(fields.name != fields.name.orig) fields.name <- fields.name.orig # ripristinate original values of this script + if(p != p.orig) p <- p.orig + + if(fields.name == forecast.name){ + # compute cluster persistence: + my.cluster.vector <- as.vector(my.cluster.array[[p]]) # reduce it to a vector with the order: day1month1member1 , day2month1member1, ... , day1month2member1, day2month2member1, ,,, + + sequ1 <- sequ2 <- sequ3 <- sequ4 <- rep(0,10000) + i1 <- i2 <- i3 <- i4 <- 1 + + if(my.cluster.vector[1] != 1) i1 <- 0 + if(my.cluster.vector[1] == 1) sequ1[i1] <- sequ1[i1]+1 + if(my.cluster.vector[1] != 2) i2 <- 0 + if(my.cluster.vector[1] == 2) sequ2[i2] <- sequ2[i2]+1 + if(my.cluster.vector[1] != 3) i3 <- 0 + if(my.cluster.vector[1] == 3) sequ3[i3] <- sequ3[i3]+1 + if(my.cluster.vector[1] != 4) i4 <- 0 + if(my.cluster.vector[1] == 4) sequ4[i4] <- sequ4[i4]+1 + n.dd <- length(my.cluster.vector) + for(d in 1:(n.dd-1)){ + if(my.cluster.vector[d] != 1 && my.cluster.vector[d+1] == 1) i1 <- i1+1 + if(my.cluster.vector[d] == 1) sequ1[i1] <- sequ1[i1]+1 + + if(my.cluster.vector[d] != 2 && my.cluster.vector[d+1] == 2) i2 <- i2+1 + if(my.cluster.vector[d] == 2) sequ2[i2] <- sequ2[i2]+1 + + if(my.cluster.vector[d] != 3 && my.cluster.vector[d+1] == 3) i3 <- i3+1 + if(my.cluster.vector[d] == 3) sequ3[i3] <- sequ3[i3]+1 + + if(my.cluster.vector[d] != 4 && my.cluster.vector[d+1] == 4) i4 <- i4+1 + if(my.cluster.vector[d] == 4) sequ4[i4] <- sequ4[i4]+1 + } + + if(my.cluster.vector[n.dd] == 1) sequ1[i1] <- sequ1[i1]+1 + if(my.cluster.vector[n.dd] == 2) sequ2[i2] <- sequ2[i2]+1 + if(my.cluster.vector[n.dd] == 3) sequ3[i3] <- sequ3[i3]+1 + if(my.cluster.vector[n.dd] == 4) sequ4[i4] <- sequ4[i4]+1 + + pers1 <- mean(sequ1[which(sequ1 != 0)]) + pers2 <- mean(sequ2[which(sequ2 != 0)]) + pers3 <- mean(sequ3[which(sequ3 != 0)]) + pers4 <- mean(sequ4[which(sequ4 != 0)]) + + # compute correlations between simulated clusters and observed regime's anomalies: + cluster1.sim <- pslwr1mean; cluster2.sim <- pslwr2mean; cluster3.sim <- pslwr3mean; cluster4.sim <- pslwr4mean + lat.forecast <- lat; lon.forecast <- lon + + if((p + lead.month) > 12) { load.month <- p + lead.month - 12 } else { load.month <- p + lead.month } # reanalysis forecast month to load + load(file=paste0(rean.dir,"/",rean.name,"_",my.period[load.month],"_","psl",".RData")) # load reanalysis data, which overwrities the pslwrXmean variables + load(paste0(rean.dir,"/",rean.name,"_", my.period[load.month],"_","ClusterNames",".RData")) # load also reanalysis regime names + + wr1y.obs <- wr1y; wr2y.obs <- wr2y; wr3y.obs <- wr3y; wr4y.obs <- wr4y # observed frecuencies of the regimes + + regimes.obs <- c(cluster1.name, cluster2.name, cluster3.name, cluster4.name) + + if(length(unique(regimes.obs)) < 4) stop("Two or more names of the weather types in the reanalsyis input file are repeated!") + + if(identical(rev(lat),lat.forecast)) {lat.forecast <- rev(lat.forecast); print("Warning: forecast latitudes are in the opposite order than renalysis's")} + if(!identical(lat, lat.forecast)) stop("forecast latitudes are different from reanalysis latitudes!") + if(!identical(lon, lon.forecast)) stop("forecast longitude are different from reanalysis longitudes!") + + cluster.corr <- array(NA, c(4,4), dimnames=list(c("cluster1.sim","cluster2.sim","cluster3.sim","cluster4.sim"), regimes.obs)) + cluster.corr[1,1] <- cor(as.vector(cluster1.sim), as.vector(pslwr1mean)) + cluster.corr[1,2] <- cor(as.vector(cluster1.sim), as.vector(pslwr2mean)) + cluster.corr[1,3] <- cor(as.vector(cluster1.sim), as.vector(pslwr3mean)) + cluster.corr[1,4] <- cor(as.vector(cluster1.sim), as.vector(pslwr4mean)) + cluster.corr[2,1] <- cor(as.vector(cluster2.sim), as.vector(pslwr1mean)) + cluster.corr[2,2] <- cor(as.vector(cluster2.sim), as.vector(pslwr2mean)) + cluster.corr[2,3] <- cor(as.vector(cluster2.sim), as.vector(pslwr3mean)) + cluster.corr[2,4] <- cor(as.vector(cluster2.sim), as.vector(pslwr4mean)) + cluster.corr[3,1] <- cor(as.vector(cluster3.sim), as.vector(pslwr1mean)) + cluster.corr[3,2] <- cor(as.vector(cluster3.sim), as.vector(pslwr2mean)) + cluster.corr[3,3] <- cor(as.vector(cluster3.sim), as.vector(pslwr3mean)) + cluster.corr[3,4] <- cor(as.vector(cluster3.sim), as.vector(pslwr4mean)) + cluster.corr[4,1] <- cor(as.vector(cluster4.sim), as.vector(pslwr1mean)) + cluster.corr[4,2] <- cor(as.vector(cluster4.sim), as.vector(pslwr2mean)) + cluster.corr[4,3] <- cor(as.vector(cluster4.sim), as.vector(pslwr3mean)) + cluster.corr[4,4] <- cor(as.vector(cluster4.sim), as.vector(pslwr4mean)) + + # associate each simulated cluster to one observed regime: + max1 <- which(cluster.corr == max(cluster.corr), arr.ind=T) + cluster.corr2 <- cluster.corr + cluster.corr2[max1[1],] <- -999 # set this row to negative values so it won't be found as a maximum anymore + cluster.corr2[,max1[2]] <- -999 # set this column to negative values so it won't be found as a maximum anymore + max2 <- which(cluster.corr2 == max(cluster.corr2), arr.ind=T) + cluster.corr3 <- cluster.corr2 + cluster.corr3[max2[1],] <- -999 + cluster.corr3[,max2[2]] <- -999 + max3 <- which(cluster.corr3 == max(cluster.corr3), arr.ind=T) + cluster.corr4 <- cluster.corr3 + cluster.corr4[max3[1],] <- -999 + cluster.corr4[,max3[2]] <- -999 + max4 <- which(cluster.corr4 == max(cluster.corr4), arr.ind=T) + + seq.max1 <- c(max1[1],max2[1],max3[1],max4[1]) + seq.max2 <- c(max1[2],max2[2],max3[2],max4[2]) + + cluster1.regime <- seq.max2[which(seq.max1 == 1)] + cluster2.regime <- seq.max2[which(seq.max1 == 2)] + cluster3.regime <- seq.max2[which(seq.max1 == 3)] + cluster4.regime <- seq.max2[which(seq.max1 == 4)] + + cluster1.name <- regimes.obs[cluster1.regime] + cluster2.name <- regimes.obs[cluster2.regime] + cluster3.name <- regimes.obs[cluster3.regime] + cluster4.name <- regimes.obs[cluster4.regime] + + spat.cor1 <- cluster.corr[1,seq.max2[which(seq.max1 == 1)]] + spat.cor2 <- cluster.corr[2,seq.max2[which(seq.max1 == 2)]] + spat.cor3 <- cluster.corr[3,seq.max2[which(seq.max1 == 3)]] + spat.cor4 <- cluster.corr[4,seq.max2[which(seq.max1 == 4)]] + + # measure persistence for the reanalysis dataset: + + # compute cluster persistence: + my.cluster.vector <- my.cluster[[load.month]][[1]] + + sequ1 <- sequ2 <- sequ3 <- sequ4 <- rep(0,10000) + i1 <- i2 <- i3 <- i4 <- 1 + + if(my.cluster.vector[1] != 1) i1 <- 0 + if(my.cluster.vector[1] == 1) sequ1[i1] <- sequ1[i1]+1 + if(my.cluster.vector[1] != 2) i2 <- 0 + if(my.cluster.vector[1] == 2) sequ2[i2] <- sequ2[i2]+1 + if(my.cluster.vector[1] != 3) i3 <- 0 + if(my.cluster.vector[1] == 3) sequ3[i3] <- sequ3[i3]+1 + if(my.cluster.vector[1] != 4) i4 <- 0 + if(my.cluster.vector[1] == 4) sequ4[i4] <- sequ4[i4]+1 + + n.dd <- length(my.cluster.vector) + for(d in 1:(n.dd-1)){ + if(my.cluster.vector[d] != 1 && my.cluster.vector[d+1] == 1) i1 <- i1+1 + if(my.cluster.vector[d] == 1) sequ1[i1] <- sequ1[i1]+1 + + if(my.cluster.vector[d] != 2 && my.cluster.vector[d+1] == 2) i2 <- i2+1 + if(my.cluster.vector[d] == 2) sequ2[i2] <- sequ2[i2]+1 + + if(my.cluster.vector[d] != 3 && my.cluster.vector[d+1] == 3) i3 <- i3+1 + if(my.cluster.vector[d] == 3) sequ3[i3] <- sequ3[i3]+1 + + if(my.cluster.vector[d] != 4 && my.cluster.vector[d+1] == 4) i4 <- i4+1 + if(my.cluster.vector[d] == 4) sequ4[i4] <- sequ4[i4]+1 + } + + if(my.cluster.vector[n.dd] == 1) sequ1[i1] <- sequ1[i1]+1 + if(my.cluster.vector[n.dd] == 2) sequ2[i2] <- sequ2[i2]+1 + if(my.cluster.vector[n.dd] == 3) sequ3[i3] <- sequ3[i3]+1 + if(my.cluster.vector[n.dd] == 4) sequ4[i4] <- sequ4[i4]+1 + + persObs1 <- mean(sequ1[which(sequ1 != 0)]) + persObs2 <- mean(sequ2[which(sequ2 != 0)]) + persObs3 <- mean(sequ3[which(sequ3 != 0)]) + persObs4 <- mean(sequ4[which(sequ4 != 0)]) + + # insert here all the manual corrections to the regime assignation due to misasignment in the automatic selection: + #if(p= && lead.month= ) clusterX.name=... + + if(var.num != var.num.orig) var.num <- var.num.orig # ripristinate original values of this script + if(fields.name != fields.name.orig) fields.name <- fields.name.orig # ripristinate original values of this script + if(!identical(period.orig,period)) period <- period.orig + if(workdir != workdir.orig) workdir <- workdir.orig # ripristinate original values of this script + if(p.orig != p) p <- p.orig + + load(file=paste0(workdir,"/",fields.name,"_",my.period[p],"_leadmonth_",lead.month,"_","psl",".RData")) # reload forecast cluster time series and mean psl data + #load(file=paste0(workdir,"/",fields.name,"_",my.period[p],"_leadmonth_",lead.month,"_ClusterData.RData")) # reload forecast cluster data (for my.cluster.array) + + if(var.num != var.num.orig) var.num <- var.num.orig # ripristinate original values of this script + if(fields.name != fields.name.orig) fields.name <- fields.name.orig # ripristinate original values of this script + if(!identical(period.orig, period)) period <- period.orig + if(p.orig != p) p <- p.orig + if(identical(rev(lat),lat.forecast)) lat <- rev(lat) # ripristinate right lat order + if(workdir != workdir.orig) workdir <- workdir.orig # ripristinate original values of this script + + } # close for on 'forecast.name' + + if(fields.name == rean.name){ + if(save.names){ + save(cluster1.name, cluster2.name, cluster3.name, cluster4.name, file=paste0(workdir,"/",fields.name,"_", my.period[p],"_","ClusterNames",".RData")) + + } else { # in this case, we load the cluster names from the file already saved, if regimes come from a reanalysis: + load(paste0(workdir,"/",fields.name,"_", my.period[p],"_","ClusterNames",".RData")) + + if(var.num != var.num.orig) var.num <- var.num.orig # ripristinate original values of this script + if(fields.name != fields.name.orig) fields.name <- fields.name.orig # ripristinate original values of this script + } + } + + # assign to each cluster its regime: + if(ordering == FALSE && fields.name == rean.name){ + cluster1 <- 1; cluster2 <- 2; cluster3 <- 3; cluster4 <- 4 + } else { + cluster1 <- which(orden == cluster1.name) + cluster2 <- which(orden == cluster2.name) + cluster3 <- which(orden == cluster3.name) + cluster4 <- which(orden == cluster4.name) + } + + assign(paste0("map",cluster1), pslwr1mean) + assign(paste0("map",cluster2), pslwr2mean) + assign(paste0("map",cluster3), pslwr3mean) + assign(paste0("map",cluster4), pslwr4mean) + + assign(paste0("fre",cluster1), wr1y) + assign(paste0("fre",cluster2), wr2y) + assign(paste0("fre",cluster3), wr3y) + assign(paste0("fre",cluster4), wr4y) + + if(p == 100){ + assign(paste0("imp",cluster1), varwr1mean) + assign(paste0("imp",cluster2), varwr2mean) + assign(paste0("imp",cluster3), varwr3mean) + assign(paste0("imp",cluster4), varwr4mean) + + assign(paste0("sig",cluster1), pvalue1) + assign(paste0("sig",cluster2), pvalue2) + assign(paste0("sig",cluster3), pvalue3) + assign(paste0("sig",cluster4), pvalue4) + } + + if(fields.name == forecast.name){ + assign(paste0("freMax",cluster1), wr1yMax) + assign(paste0("freMax",cluster2), wr2yMax) + assign(paste0("freMax",cluster3), wr3yMax) + assign(paste0("freMax",cluster4), wr4yMax) + + assign(paste0("freMin",cluster1), wr1yMin) + assign(paste0("freMin",cluster2), wr2yMin) + assign(paste0("freMin",cluster3), wr3yMin) + assign(paste0("freMin",cluster4), wr4yMin) + + assign(paste0("spatial.cor",cluster1), spat.cor1) + assign(paste0("spatial.cor",cluster2), spat.cor2) + assign(paste0("spatial.cor",cluster3), spat.cor3) + assign(paste0("spatial.cor",cluster4), spat.cor3) + + assign(paste0("persist",cluster1), pers1) + assign(paste0("persist",cluster2), pers2) + assign(paste0("persist",cluster3), pers3) + assign(paste0("persist",cluster4), pers4) + + cluster1.obs <- which(orden == regimes.obs[1]) + cluster2.obs <- which(orden == regimes.obs[2]) + cluster3.obs <- which(orden == regimes.obs[3]) + cluster4.obs <- which(orden == regimes.obs[4]) + + assign(paste0("freObs",cluster1.obs), wr1y.obs) + assign(paste0("freObs",cluster2.obs), wr2y.obs) + assign(paste0("freObs",cluster3.obs), wr3y.obs) + assign(paste0("freObs",cluster4.obs), wr4y.obs) + + assign(paste0("persistObs",cluster1.obs), persObs1) + assign(paste0("persistObs",cluster2.obs), persObs2) + assign(paste0("persistObs",cluster3.obs), persObs3) + assign(paste0("persistObs",cluster4.obs), persObs4) + + } + + # position of long values of Europe only (without the Atlantic Sea and America): + #if(fields.name == "ERA-Interim") EU <- c(1:which(lon >= lon.max)[1], (length(lon)-30):length(lon)) # restrict area to continental europe only + EU <- c(1:which(lon >= lon.max)[1], (which(lon >= 337)[1]:length(lon))) # restrict area to continental europe only + + # breaks and colors of the geopotential fields: + if(psl == "g500"){ + #my.brks <- c(-300,-199:200,300) # % Mean anomaly of a WR + my.brks <- c(-300,seq(-200,200,10),300) # % Mean anomaly of a WR + my.brks2 <- c(-300,seq(-200,200,10),300) # % Mean anomaly of a WR for the contour lines + #my.cols <- colorRampPalette((brewer.pal(9,"PuBu")))(length(my.brks)-1) + values.to.plot <- c(-200, -100, 0, 100, 200) # values we want to show in the labels + } + + if(psl == "psl"){ + my.brks <- c(seq(-19,-1,2),0,seq(1,19,2)) # % Mean anomaly of a WR + # my.brks <- c(seq(-19,-1,2),0,seq(1,19,2)) # % Mean anomaly of a WR + + my.brks2 <- my.brks #c(seq(-20,20,2)) # % Mean anomaly of a WR for the contour lines + values.to.plot <- my.brks #c(-17,-15,-13,-11,-9,-7,-5,-3,-1,0,1,3,5,7,9,11,13,15,17) + } + + my.cols <- colorRampPalette(rev(brewer.pal(11,"RdBu")))(length(my.brks)-1) # blue--white--red colors + my.cols[floor(length(my.cols)/2)] <- my.cols[floor(length(my.cols)/2)+1] <- "white" + + # breaks and colors of the impact maps (valid both for Wind speed anomalies and Temperature anomalies): + #my.brks.var <- c(-20,seq(-10,-3,1),seq(-2.9,2.9,0.1),seq(3,10,1),20) # % Mean anomaly of a WR + #my.brks.var <- c(-20,-2,-1.5,-1,-0.5,-0.2,0.2,0.5,1,1.5,2,20) # % Mean anomaly of a WR + #my.brks.var <- c(-20,seq(-3,3,0.5),20) # % Mean anomaly of a WR + if(var.name[var.num] == "sfcWind") { + my.brks.var <- seq(-3,3,0.5) + values.to.plot2 <- c(-3,-2,-1,0,1,2,3) + } + if(var.name[var.num] == "tas") { + my.brks.var <- seq(-5,5,1) + values.to.plot2 <- my.brks.var #c(-5,-3,-1,0,1,3,5) + } + + #my.cols <- colorRampPalette(as.character(read.csv("/home/Earth/vtorralb/rgbhex.csv",header=F)[,1]))(length(my.brks)-1) # blue--yellow-red colors + my.cols.var <- colorRampPalette(rev(brewer.pal(11,"RdBu")))(length(my.brks.var)-1) # blue--white--red colors + #my.cols.var[floor(length(my.cols.var)/2)] <- my.cols.var[floor(length(my.cols.var)/2)+1] <- "white" + + # Visualize the composition with the 3 graphs for all the 4 regimes: its pressure fields, its impact on var and its frequency series: + if(composition == "simple"){ + if(!as.pdf && fields.name==rean.name) png(filename=paste0(workdir,"/",fields.name,"_",my.period[p],"_",var.name[var.num],".png"),width=3000,height=3700) + if(!as.pdf && fields.name==forecast.name) png(filename=paste0(workdir,"/",fields.name,"_",my.period[p],"_leadmonth_",lead.month,"_",var.name[var.num],".png"),width=3000,height=3700) + + plot.new() + + # Sheet title: + par(fig=c(0, 1, 0.94, 0.98), new=TRUE) + if(fields.name == forecast.name) {forecast.title <- paste0(" startdate and ",lead.month," forecast month ")} else {forecast.title <- ""} + mtext(paste0(fields.name, " Weather Regimes for ", my.period[p], forecast.title," (",year.start,"-",year.end,")"), cex=5.5, font=2) + + # Centroid maps: + map.xpos <- 0 + map.width <- 0.46 + par(fig=c(map.xpos + 0.003, map.xpos + map.width - 0.003, 0.745, 0.925), new=TRUE) + PlotEquiMap2(rescale(map1,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map1), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, xlabel.dist=1.5, contours.lty="F1FF1F") + par(fig=c(map.xpos, map.xpos + map.width, 0.51, 0.69), new=TRUE) + PlotEquiMap2(rescale(map2,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map2), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F") + par(fig=c(map.xpos, map.xpos + map.width, 0.275, 0.455), new=TRUE) + PlotEquiMap2(rescale(map3,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map3), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F") + par(fig=c(map.xpos, map.xpos + map.width, 0.04, 0.22), new=TRUE) + PlotEquiMap2(rescale(map4,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map4), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F") + + # Subtitle centroid maps: + map.title.xpos <- 0.76 + map.title.width <- 0.2 + par(fig=c(map.title.xpos, map.title.xpos + map.title.width, 0.728, 0.729), new=TRUE) + mtext("year", cex=3) + par(fig=c(map.title.xpos, map.title.xpos + map.title.width, 0.494, 0.495), new=TRUE) + mtext("year", cex=3) + par(fig=c(map.title.xpos, map.title.xpos + map.title.width, 0.260, 0.261), new=TRUE) + mtext("year", cex=3) + par(fig=c(map.title.xpos, map.title.xpos + map.title.width, 0.026, 0.027), new=TRUE) + mtext("year", cex=3) + + # Title Centroid Maps: + title1.xpos <- 0.02 + title1.width <- 0.4 + par(fig=c(title1.xpos, title1.xpos + title1.width, 0.9305, 0.9355), new=TRUE) + mtext(paste0(regime1.name,": ", psl.name, " Anomaly"), font=2, cex=4) + par(fig=c(title1.xpos, title1.xpos + title1.width, 0.6955, 0.7005), new=TRUE) + mtext(paste0(regime2.name,": ", psl.name, " Anomaly"), font=2, cex=4) + par(fig=c(title1.xpos, title1.xpos + title1.width, 0.4605, 0.4655), new=TRUE) + mtext(paste0(regime3.name,": ", psl.name, " Anomaly"), font=2, cex=4) + par(fig=c(title1.xpos, title1.xpos + title1.width, 0.2255, 0.2305), new=TRUE) + mtext(paste0(regime4.name,": ", psl.name, " Anomaly"), font=2, cex=4) + + # Impact maps: + if(p == 100){ + impact.xpos <- 0.47 + impact.width <- 0.26 + par(fig=c(impact.xpos, impact.xpos + impact.width, 0.745, 0.925), new=TRUE) + PlotEquiMap2(rescale(imp1[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=t(sig1[,EU] < 0.05), cex.lab=1.5) + par(fig=c(impact.xpos, impact.xpos + impact.width, 0.51, 0.69), new=TRUE) + PlotEquiMap2(rescale(imp2[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=t(sig2[,EU] < 0.05), cex.lab=1.5) + par(fig=c(impact.xpos, impact.xpos + impact.width, 0.275, 0.455), new=TRUE) + PlotEquiMap2(rescale(imp3[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=t(sig3[,EU] < 0.05), cex.lab=1.5) + par(fig=c(impact.xpos, impact.xpos + impact.width, 0.04, 0.22), new=TRUE) + PlotEquiMap2(rescale(imp4[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=t(sig4[,EU] < 0.05), cex.lab=1.5) + + # Title impact maps: + title2.xpos <- 0.42 + title2.width <- 0.35 + par(fig=c(title2.xpos, title2.xpos + title2.width, 0.935, 0.940), new=TRUE) + mtext(paste0(regime1.name, ": Impact on ", var.name.full[var.num]), font=2, cex=4) + par(fig=c(title2.xpos, title2.xpos + title2.width, 0.700, 0.705), new=TRUE) + mtext(paste0(regime2.name, ": Impact on ", var.name.full[var.num]), font=2, cex=4) + par(fig=c(title2.xpos, title2.xpos + title2.width, 0.465, 0.470), new=TRUE) + mtext(paste0(regime3.name, ": Impact on ", var.name.full[var.num]), font=2, cex=4) + par(fig=c(title2.xpos, title2.xpos + title2.width, 0.230, 0.235), new=TRUE) + mtext(paste0(regime4.name, ": Impact on ", var.name.full[var.num]), font=2, cex=4) + } + + # Frequency plots: + barplot.xpos <- 0.75 + barplot.width <- 0.25 + freq.max=100 + + if(fields.name == forecast.name){ + pos.years.present <- match(year.start:year.end, years) + years.missing <- which(is.na(pos.years.present)) + fre1.NA <- fre2.NA <- fre3.NA <- fre4.NA <- freMax1.NA <- freMax2.NA <- freMax3.NA <- freMax4.NA <- freMin1.NA <- freMin2.NA <- freMin3.NA <- freMin4.NA <- c() + + k <- 0 + for(y in year.start:year.end) { + if(y %in% (years.missing + year.start - 1)) { + fre1.NA <- c(fre1.NA,NA); fre2.NA <- c(fre2.NA,NA); fre3.NA <- c(fre3.NA,NA); fre4.NA <- c(fre4.NA,NA) + freMax1.NA <- c(freMax1.NA,NA); freMax2.NA <- c(freMax2.NA,NA); freMax3.NA <- c(freMax3.NA,NA); freMax4.NA <- c(freMax4.NA,NA) + freMin1.NA <- c(freMin1.NA,NA); freMin2.NA <- c(freMin2.NA,NA); freMin3.NA <- c(freMin3.NA,NA); freMin4.NA <- c(freMin4.NA,NA) + k=k+1 + } else { + fre1.NA <- c(fre1.NA,fre1[y - year.start + 1 - k]); fre2.NA <- c(fre2.NA,fre2[y - year.start + 1 - k]) + fre3.NA <- c(fre3.NA,fre3[y - year.start + 1 - k]); fre4.NA <- c(fre4.NA,fre4[y - year.start + 1 - k]) + freMax1.NA <- c(freMax1.NA,freMax1[y - year.start + 1 - k]); freMax2.NA <- c(freMax2.NA,freMax2[y - year.start + 1 - k]) + freMax3.NA <- c(freMax3.NA,freMax3[y - year.start + 1 - k]); freMax4.NA <- c(freMax4.NA,freMax4[y - year.start + 1 - k]) + freMin1.NA <- c(freMin1.NA,freMin1[y - year.start + 1 - k]); freMin2.NA <- c(freMin2.NA,freMin2[y - year.start + 1 - k]) + freMin3.NA <- c(freMin3.NA,freMin3[y - year.start + 1 - k]); freMin4.NA <- c(freMin4.NA,freMin4[y - year.start + 1 - k]) + } + } + } else { fre1.NA <- fre1; fre2.NA <- fre2; fre3.NA <- fre3; fre4.NA <- fre4 } + + par(fig=c(barplot.xpos, barplot.xpos + barplot.width, 0.745, 0.915), new=TRUE) + if(fields.name == rean.name) barplot.freq(100*fre1.NA, year.start, year.end, cex.y=2, cex.x=2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0)) + if(fields.name == forecast.name) barplot.freq.sim2(100*fre1.NA, 100*freMax1.NA, 100*freMin1.NA, 100*freObs1, year.start, year.end, cex.y=2, cex.x=2, freq.min=-2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0), col.line="gray20", cex.r=3, cex.mean=2, cex.obs=2) + + par(fig=c(barplot.xpos, barplot.xpos + barplot.width,0.510,0.680), new=TRUE) + if(fields.name == rean.name) barplot.freq(100*fre2.NA, year.start, year.end, cex.y=2, cex.x=2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0)) + if(fields.name == forecast.name) barplot.freq.sim2(100*fre2.NA, 100*freMax2.NA, 100*freMin2.NA, 100*freObs2, year.start, year.end, cex.y=2, cex.x=2, freq.min=-2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0), col.line="gray20", cex.r=3, cex.mean=2, cex.obs=2) + + par(fig=c(barplot.xpos, barplot.xpos + barplot.width,0.275,0.445), new=TRUE) + if(fields.name == rean.name) barplot.freq(100*fre3.NA, year.start, year.end, cex.y=2, cex.x=2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0)) + if(fields.name == forecast.name) barplot.freq.sim2(100*fre3.NA, 100*freMax3.NA, 100*freMin3.NA, 100*freObs3, year.start, year.end, cex.y=2, cex.x=2, freq.min=-2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0), col.line="gray20", cex.r=3, cex.mean=2, cex.obs=2) + + par(fig=c(barplot.xpos, barplot.xpos + barplot.width,0.040,0.210), new=TRUE) + if(fields.name == rean.name) barplot.freq(100*fre4.NA, year.start, year.end, cex.y=2, cex.x=2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0)) + if(fields.name == forecast.name) barplot.freq.sim2(100*fre4.NA, 100*freMax4.NA, 100*freMin4.NA, 100*freObs4, year.start, year.end, cex.y=2, cex.x=2, freq.min=-2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0), col.line="gray20", cex.r=3, cex.mean=2, cex.obs=2) + + # Title Frequency maps: + title3.xpos <- 0.75 + title3.width <-0.25 + par(fig=c(title3.xpos, title3.xpos + title3.width, 0.935, 0.940), new=TRUE) + mtext(paste0(regime1.name, " Frequency"), font=2, cex=4) # paste0 doesn't work inside an expression! + par(fig=c(title3.xpos, title3.xpos + title3.width, 0.700, 0.705), new=TRUE) + mtext(paste0(regime2.name, " Frequency"), font=2, cex=4) + par(fig=c(title3.xpos, title3.xpos + title3.width, 0.465, 0.470), new=TRUE) + mtext(paste0(regime3.name, " Frequency"), font=2, cex=4) + par(fig=c(title3.xpos, title3.xpos + title3.width, 0.230, 0.235), new=TRUE) + mtext(paste0(regime4.name, " Frequency"), font=2, cex=4) + + # % on Frequency plots: + symbol.xpos <- 0.735 + symbol.width <- 0.01 + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.82, 0.83), new=TRUE) + mtext("%", cex=3.3) + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.59, 0.60), new=TRUE) + mtext("%", cex=3.3) + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.35, 0.36), new=TRUE) + mtext("%", cex=3.3) + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.12, 0.13), new=TRUE) + mtext("%", cex=3.3) + + # mean frequency on Frequency plots: + symbol.xpos <- 0.9 + symbol.width <- 0.1 + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.908, 0.918), new=TRUE) + mtext(bquote(bar(nu) == .(paste0(round(mean(100*fre1.NA),1),"%"))),cex=4.5) + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.673, 0.683), new=TRUE) + mtext(bquote(bar(nu) == .(paste0(round(mean(100*fre2.NA),1),"%"))),cex=4.5) + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.44, 0.45), new=TRUE) + mtext(bquote(bar(nu) == .(paste0(round(mean(100*fre3.NA),1),"%"))),cex=4.5) + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.203, 0.213), new=TRUE) + mtext(bquote(bar(nu) == .(paste0(round(mean(100*fre4.NA),1),"%"))),cex=4.5) + + # add corr between obs.time series and ensemble mean time series on Frequency plots: + if(fields.name == forecast.name){ + symbol.xpos <- 0.76 + symbol.width <- 0.1 + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.908, 0.918), new=TRUE) + sp.cor1 <- round(cor(fre1.NA,freObs1, use="complete.obs"),2) + mtext(paste0("r= ",sp.cor1), cex=4.5) + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.673, 0.683), new=TRUE) + sp.cor2 <- round(cor(fre2.NA,freObs2, use="complete.obs"),2) + mtext(paste0("r= ",sp.cor2), cex=4.5) + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.44, 0.45), new=TRUE) + sp.cor3 <- round(cor(fre3.NA,freObs3, use="complete.obs"),2) + mtext(paste0("r= ",sp.cor3), cex=4.5) + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.203, 0.213), new=TRUE) + sp.cor4 <- round(cor(fre4.NA,freObs4, use="complete.obs"),2) + mtext(paste0("r= ",sp.cor4), cex=4.5) + } + + # Legend 1: + legend1.xpos <- 0.10 + legend1.width <- 0.80 + legend1.cex <- 2 + my.subset <- match(values.to.plot, my.brks) + par(fig=c(legend1.xpos, legend1.xpos + legend1.width, 0.719, 0.744), new=TRUE) # syntax fig=c(xmin, xmax, ymin, ymax) + ColorBar3(my.brks, cols=my.cols, vert=FALSE, cex=legend1.cex, subset=my.subset) + if(psl=="g500") {mtext(side=4," m", cex=2.5)} else {mtext(side=4," hPa", cex=legend1.cex)} + par(fig=c(legend1.xpos, legend1.xpos + legend1.width, 0.485, 0.508), new=TRUE) + ColorBar3(my.brks, cols=my.cols, vert=FALSE, cex=legend1.cex, subset=my.subset) + if(psl=="g500") {mtext(side=4," m", cex=2.5)} else {mtext(side=4," hPa", cex=legend1.cex)} + par(fig=c(legend1.xpos, legend1.xpos + legend1.width, 0.250, 0.273), new=TRUE) + ColorBar3(my.brks, cols=my.cols, vert=FALSE, cex=legend1.cex, subset=my.subset) + if(psl=="g500") {mtext(side=4," m", cex=2.5)} else {mtext(side=4," hPa", cex=legend1.cex)} + par(fig=c(legend1.xpos, legend1.xpos + legend1.width, 0.015, 0.038), new=TRUE) + ColorBar3(my.brks, cols=my.cols, vert=FALSE, cex=legend1.cex, subset=my.subset) + if(psl=="g500") {mtext(side=4," m", cex=2.5)} else {mtext(side=4," hPa", cex=legend1.cex)} + + # Legend 2: + if(fields.name == rean.name){ + legend2.xpos <- 0.48 + legend2.width <- 0.25 + legend2.cex <- 1.8 + my.subset2 <- match(values.to.plot2, my.brks.var) + par(fig=c(legend2.xpos, legend2.xpos + legend2.width, 0.719 + 0.001, 0.744), new=TRUE) + ColorBar3(my.brks.var, cols=my.cols.var, vert=FALSE, cex=legend2.cex, subset=my.subset2) + mtext(side=4,paste0(" ",var.unit[var.num]), cex=legend2.cex) + par(fig=c(legend2.xpos, legend2.xpos + legend2.width, 0.485, 0.508), new=TRUE) + ColorBar3(my.brks.var, cols=my.cols.var, vert=FALSE, cex=legend2.cex, subset=my.subset2) + mtext(side=4,paste0(" ",var.unit[var.num]), cex=legend2.cex) + par(fig=c(legend2.xpos, legend2.xpos + legend2.width, 0.250, 0.273), new=TRUE) + ColorBar3(my.brks.var, cols=my.cols.var, vert=FALSE, cex=legend2.cex, subset=my.subset2) + mtext(side=4,paste0(" ",var.unit[var.num]), cex=legend2.cex) + par(fig=c(legend2.xpos, legend2.xpos + legend2.width, 0.015, 0.038), new=TRUE) + ColorBar3(my.brks.var, cols=my.cols.var, vert=FALSE, cex=legend2.cex, subset=my.subset2) + mtext(side=4,paste0(" ",var.unit[var.num]), cex=legend2.cex) + } + + if(!as.pdf) dev.off() # for saving 4 png + + } # close if on: composition == 'simple' + + + # Visualize the composition with the regime anomalies for a selected forecasted month: + if(composition == "psl"){ + + # Centroid maps: + n.map <- n.map + 1 # starts from 0 + map.xpos <- 0.04 + map.width * (n.map - 1) + map.width <- 0.12 + map.ypos <- 0.08 + map.height <- 0.19 + map.ysep <- 0.015 + + par(fig=c(map.xpos, map.xpos + map.width, map.ypos + 3*map.height + 3*map.ysep, map.ypos + 4*map.height + 3*map.ysep), new=TRUE) + PlotEquiMap2(rescale(map1,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map1), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, xlabel.dist=1.5, contours.lty="F1FF1F") + par(fig=c(map.xpos, map.xpos + map.width, map.ypos + 2*map.height + 2*map.ysep, map.ypos + 3*map.height + 2*map.ysep), new=TRUE) + PlotEquiMap2(rescale(map2,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map2), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F") + par(fig=c(map.xpos, map.xpos + map.width, map.ypos + map.height + map.ysep, map.ypos + 2*map.height + map.ysep), new=TRUE) + PlotEquiMap2(rescale(map3,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map3), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F") + par(fig=c(map.xpos, map.xpos + map.width, map.ypos, map.ypos + map.height), new=TRUE) + PlotEquiMap2(rescale(map4,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map4), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F") + + # Sheet subtitles: + par(fig=c(map.xpos, map.xpos + map.width, 0.86, 0.90), new=TRUE) + mtext(paste0(my.month.short[p], " -> ", my.month.short[forecast.month]), cex=5.5, font=2) + + # Regime's names: + title1.xpos <- 0 + title1.width <- 0.04 + par(fig=c(title1.xpos, title1.xpos + title1.width, 0.8305, 0.8355), new=TRUE) + mtext(paste0(regime1.name), font=2, cex=4) + par(fig=c(title1.xpos, title1.xpos + title1.width, 0.5955, 0.6005), new=TRUE) + mtext(paste0(regime2.name), font=2, cex=4) + par(fig=c(title1.xpos, title1.xpos + title1.width, 0.3805, 0.3955), new=TRUE) + mtext(paste0(regime3.name), font=2, cex=4) + par(fig=c(title1.xpos, title1.xpos + title1.width, 0.1655, 0.1705), new=TRUE) + mtext(paste0(regime4.name), font=2, cex=4) + + # Color legend: + legend1.xpos <- 0.10 + legend1.width <- 0.80 + legend1.cex <- 3 + + par(fig=c(legend1.xpos, legend1.xpos + legend1.width, 0, 0.06), new=TRUE) # syntax fig=c(xmin, xmax, ymin, ymax) + ColorBar2(brks=my.brks, cols=my.cols, vert=FALSE, cex=legend1.cex, my.ticks=-0.5+1:21, my.labels=my.brks) + + } # close if on composition == 'psl' + + # fre1, fre2, etc. already refer to the regimes listed in the 'orden' vector: + if(fields.name == forecast.name && composition == 'simple') save(orden, fre1.NA, fre2.NA, fre3.NA, fre4.NA, freObs1, freObs2, freObs3, freObs4, sp.cor1, sp.cor2, sp.cor3, sp.cor4, persist1, persist2, persist3, persist4, persistObs1, persistObs2, persistObs3, persistObs4, spatial.cor1, spatial.cor2, spatial.cor3, spatial.cor4, file=paste0(workdir,"/",fields.name,"_", my.period[p],"_leadmonth_",lead.month,"_","ClusterNames",".RData")) + +} # close 'p' on 'period' + +if(as.pdf) dev.off() # for saving a single pdf with all months/seasons + +} # close 'lead.month' on 'lead.months' + +if(composition == "psl") dev.off() + + + + + + + + +# Summary graphs: +if(fields.name == forecast.name && p == 100){ + # array storing correlations in the format: [startdate, leadmonth, regime] + array.cor <- array.sp.cor <- array.freq <- array.diff.freq <- array.rpss <- array.pers <- array.diff.pers <- array(NA,c(12,7,4)) + + for(p in 1:12){ + p.orig <- p + + for(l in 0:6){ + + load(file=paste0(workdir,"/",fields.name,"_",my.period[p],"_leadmonth_",l,"_","ClusterNames",".RData")) # load cluster names and summarized data + + if(var.num != var.num.orig) var.num <- var.num.orig # ripristinate original values of this script (necessary after loading regime data) + if(fields.name != fields.name.orig) fields.name <- fields.name.orig # ripristinate original values of this script + if(p != p.orig) p <- p.orig + + array.sp.cor[p,1+l, 1] <- spatial.cor1 # associates NAO+ to the first triangle (at left) + array.sp.cor[p,1+l, 3] <- spatial.cor2 # associates NAO- to the third triangle (at right) + array.sp.cor[p,1+l, 4] <- spatial.cor3 # associates Blocking to the fourth triangle (top one) + array.sp.cor[p,1+l, 2] <- spatial.cor4 # associates Atl.Ridge to the second triangle (bottom one) + + array.cor[p,1+l, 1] <- sp.cor1 # associates NAO+ to the first triangle (at left) + array.cor[p,1+l, 3] <- sp.cor2 # associates NAO- to the third triangle (at right) + array.cor[p,1+l, 4] <- sp.cor3 # associates Blocking to the fourth triangle (top one) + array.cor[p,1+l, 2] <- sp.cor4 # associates Atl.Ridge to the second triangle (bottom one) + + array.freq[p,1+l, 1] <- 100*(mean(fre1.NA)) # NAO+ + array.freq[p,1+l, 3] <- 100*(mean(fre2.NA)) # NAO- + array.freq[p,1+l, 4] <- 100*(mean(fre3.NA)) # Blocking + array.freq[p,1+l, 2] <- 100*(mean(fre4.NA)) # Atl.Ridge + + array.diff.freq[p,1+l, 1] <- 100*(mean(fre1.NA) - mean(freObs1)) # NAO+ + array.diff.freq[p,1+l, 3] <- 100*(mean(fre2.NA) - mean(freObs2)) # NAO- + array.diff.freq[p,1+l, 4] <- 100*(mean(fre3.NA) - mean(freObs3)) # Blocking + array.diff.freq[p,1+l, 2] <- 100*(mean(fre4.NA) - mean(freObs4)) # Atl.Ridge + + array.pers[p,1+l, 1] <- persist1 # NAO+ + array.pers[p,1+l, 3] <- persist2 # NAO- + array.pers[p,1+l, 4] <- persist3 # Blocking + array.pers[p,1+l, 2] <- persist4 # Atl.Ridge + + array.diff.pers[p,1+l, 1] <- persist1 - persistObs1 # NAO+ + array.diff.pers[p,1+l, 3] <- persist2 - persistObs2 # NAO- + array.diff.pers[p,1+l, 4] <- persist3 - persistObs3 # Blocking + array.diff.pers[p,1+l, 2] <- persist4 - persistObs4 # Atl.Ridge + + #array.rpss[p,1+l, 1] <- # NAO+ + #array.rpss[p,1+l, 3] <- # NAO- + #array.rpss[p,1+l, 4] <- # Blocking + #array.rpss[p,1+l, 2] <- # Atl.Ridge + } + } + + + # Spatial Corr summary: + png(filename=paste0(workdir,"/",forecast.name,"_summary_sp_corr.png"), width=550, height=400) + + # create an array similar to array.cor but with colors instead of corr.values: + sp.corr.cols <- rev(c('#b2182b','#d6604d','#f4a582','#fddbc7','#d1e5f0','#92c5de','#4393c3','#2166ac')) + my.seq <- c(-1,0,0.4,0.5,0.6,0.7,0.8,0.9,1) + + array.sp.cor.colors <- array(sp.corr.cols[8],c(12,7,4)) + array.sp.cor.colors[array.sp.cor < my.seq[8]] <- sp.corr.cols[7] + array.sp.cor.colors[array.sp.cor < my.seq[7]] <- sp.corr.cols[6] + array.sp.cor.colors[array.sp.cor < my.seq[6]] <- sp.corr.cols[5] + array.sp.cor.colors[array.sp.cor < my.seq[5]] <- sp.corr.cols[4] + array.sp.cor.colors[array.sp.cor < my.seq[4]] <- sp.corr.cols[3] + array.sp.cor.colors[array.sp.cor < my.seq[3]] <- sp.corr.cols[2] + array.sp.cor.colors[array.sp.cor < my.seq[2]] <- sp.corr.cols[1] + + par(mar=c(5,4,4,4.5)) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Forecast time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + title("Spatial correlation between S4 and ERA-Interim frequencies", cex=1.5) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Forecast time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5) + + for(p in 1:12){ + for(l in 0:6){ + polygon(c(0.5+p-1,0.5+p-1,1+p-1),c(-0.5+l,0.5+l,0+l), col=array.sp.cor.colors[p,1+l,1]) # first triangle + polygon(c(0.5+p-1,1+p-1,1.5+p-1),c(-0.5+l,0+l,-0.5+l), col=array.sp.cor.colors[p,1+l,2]) + polygon(c(1+p-1,1.5+p-1,1.5+p-1),c(l,0.5+l,-0.5+l), col=array.sp.cor.colors[p,1+l,3]) + polygon(c(0.5+p-1,1+p-1,1.5+p-1),c(0.5+l,0+l,0.5+l), col=array.sp.cor.colors[p,1+l,4]) + } + } + + par(fig=c(0.875, 1, 0.14, 0.89), new=TRUE) + ColorBar2(brks = my.seq, cols = sp.corr.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + dev.off() + + + + # Corr summary: + png(filename=paste0(workdir,"/",forecast.name,"_summary_corr.png"), width=550, height=400) + + # create an array similar to array.cor but with colors instead of corr.values: + corr.cols <- rev(c('#b2182b','#d6604d','#f4a582','#fddbc7','#d1e5f0','#92c5de','#4393c3','#2166ac')) + + array.cor.colors <- array(corr.cols[8],c(12,7,4)) + array.cor.colors[array.cor < 0.75] <- corr.cols[7] + array.cor.colors[array.cor < 0.5] <- corr.cols[6] + array.cor.colors[array.cor < 0.25] <- corr.cols[5] + array.cor.colors[array.cor < 0] <- corr.cols[4] + array.cor.colors[array.cor < -0.25] <- corr.cols[3] + array.cor.colors[array.cor < -0.5] <- corr.cols[2] + array.cor.colors[array.cor < -0.75] <- corr.cols[1] + + par(mar=c(5,4,4,4.5)) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Forecast time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + title("Correlation between S4 and ERA-Interim frequencies", cex=1.5) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Forecast time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5) + + for(p in 1:12){ + for(l in 0:6){ + polygon(c(0.5+p-1,0.5+p-1,1+p-1),c(-0.5+l,0.5+l,0+l), col=array.cor.colors[p,1+l,1]) # first triangle + polygon(c(0.5+p-1,1+p-1,1.5+p-1),c(-0.5+l,0+l,-0.5+l), col=array.cor.colors[p,1+l,2]) + polygon(c(1+p-1,1.5+p-1,1.5+p-1),c(l,0.5+l,-0.5+l), col=array.cor.colors[p,1+l,3]) + polygon(c(0.5+p-1,1+p-1,1.5+p-1),c(0.5+l,0+l,0.5+l), col=array.cor.colors[p,1+l,4]) + } + } + + par(fig=c(0.875, 1, 0.14, 0.89), new=TRUE) + ColorBar2(brks = seq(-1,1,0.25), cols = corr.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(seq(-1,1,0.25)), my.labels=seq(-1,1,0.25)) + dev.off() + + + + # Freq. summary: + png(filename=paste0(workdir,"/",forecast.name,"_summary_freq.png"), width=550, height=400) + + # create an array similar to array.diff.freq but with colors instead of frequencies: + freq.cols <- rev(c('#d73027','#f46d43','#fdae61','#fee090','#e0f3f8','#abd9e9','#74add1','#4575b4')) + my.seq <- c(NA,20,22,24,26,28,30,32,NA) + + array.freq.colors <- array(freq.cols[8],c(12,7,4)) + array.freq.colors[array.freq < my.seq[[8]]] <- freq.cols[7] + array.freq.colors[array.freq < my.seq[[7]]] <- freq.cols[6] + array.freq.colors[array.freq < my.seq[[6]]] <- freq.cols[5] + array.freq.colors[array.freq < my.seq[[5]]] <- freq.cols[4] + array.freq.colors[array.freq < my.seq[[4]]] <- freq.cols[3] + array.freq.colors[array.freq < my.seq[[3]]] <- freq.cols[2] + array.freq.colors[array.freq < my.seq[[2]]] <- freq.cols[1] + + par(mar=c(5,4,4,4.5)) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Forecast time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + title("Mean S4 frequency (%)", cex=1.5) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Forecast time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5) + + for(p in 1:12){ + for(l in 0:6){ + polygon(c(0.5+p-1,0.5+p-1,1+p-1),c(-0.5+l,0.5+l,0+l), col=array.freq.colors[p,1+l,1]) # first triangle + polygon(c(0.5+p-1,1+p-1,1.5+p-1),c(-0.5+l,0+l,-0.5+l), col=array.freq.colors[p,1+l,2]) + polygon(c(1+p-1,1.5+p-1,1.5+p-1),c(l,0.5+l,-0.5+l), col=array.freq.colors[p,1+l,3]) + polygon(c(0.5+p-1,1+p-1,1.5+p-1),c(0.5+l,0+l,0.5+l), col=array.freq.colors[p,1+l,4]) + } + } + + par(fig=c(0.875, 1, 0.14, 0.89), new=TRUE) + ColorBar2(brks = my.seq, cols = freq.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + dev.off() + + + + # Delta freq. summary: + png(filename=paste0(workdir,"/",forecast.name,"_summary_diff_freq.png"), width=550, height=400) + + # create an array similar to array.diff.freq but with colors instead of frequencies: + diff.freq.cols <- rev(c('#d73027','#f46d43','#fdae61','#fee090','#e0f3f8','#abd9e9','#74add1','#4575b4')) + + array.diff.freq.colors <- array(diff.freq.cols[8],c(12,7,4)) + array.diff.freq.colors[array.diff.freq < 10] <- diff.freq.cols[7] + array.diff.freq.colors[array.diff.freq < 5] <- diff.freq.cols[6] + array.diff.freq.colors[array.diff.freq < 2] <- diff.freq.cols[5] + array.diff.freq.colors[array.diff.freq < 0] <- diff.freq.cols[4] + array.diff.freq.colors[array.diff.freq < -2] <- diff.freq.cols[3] + array.diff.freq.colors[array.diff.freq < -5] <- diff.freq.cols[2] + array.diff.freq.colors[array.diff.freq < -10] <- diff.freq.cols[1] + + par(mar=c(5,4,4,4.5)) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Forecast time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + title("Frequency difference (%) between S4 and ERA-Interim", cex=1.5) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Forecast time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5) + + for(p in 1:12){ + for(l in 0:6){ + polygon(c(0.5+p-1,0.5+p-1,1+p-1),c(-0.5+l,0.5+l,0+l), col=array.diff.freq.colors[p,1+l,1]) # first triangle + polygon(c(0.5+p-1,1+p-1,1.5+p-1),c(-0.5+l,0+l,-0.5+l), col=array.diff.freq.colors[p,1+l,2]) + polygon(c(1+p-1,1.5+p-1,1.5+p-1),c(l,0.5+l,-0.5+l), col=array.diff.freq.colors[p,1+l,3]) + polygon(c(0.5+p-1,1+p-1,1.5+p-1),c(0.5+l,0+l,0.5+l), col=array.diff.freq.colors[p,1+l,4]) + } + } + + par(fig=c(0.875, 1, 0.14, 0.89), new=TRUE) + ColorBar2(brks = c(-20,-10,-5,-2,0,2,5,10,20), cols = diff.freq.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(c(-20,-10,-5,-2,0,2,5,10,20)), my.labels=c(-20,-10,-5,-2,0,2,5,10,20)) + dev.off() + + + # Persistence summary: + png(filename=paste0(workdir,"/",forecast.name,"_summary_pers.png"), width=550, height=400) + + # create an array similar to array.pers but with colors instead of frequencies: + pers.cols <- rev(c('#b2182b','#d6604d','#f4a582','#fddbc7','#d1e5f0','#92c5de','#4393c3','#2166ac')) + my.seq <- c(NA, 3.25, 3.5, 3.75, 4, 4.25, 4.5, 4.75, NA) + + array.pers.colors <- array(pers.cols[8],c(12,7,4)) + array.pers.colors[array.pers < my.seq[8]] <- pers.cols[7] + array.pers.colors[array.pers < my.seq[7]] <- pers.cols[6] + array.pers.colors[array.pers < my.seq[6]] <- pers.cols[5] + array.pers.colors[array.pers < my.seq[5]] <- pers.cols[4] + array.pers.colors[array.pers < my.seq[4]] <- pers.cols[3] + array.pers.colors[array.pers < my.seq[3]] <- pers.cols[2] + array.pers.colors[array.pers < my.seq[2]] <- pers.cols[1] + + par(mar=c(5,4,4,4.5)) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Forecast time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + title("S4 Regime persistence (in days/month)", cex=1.5) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Forecast time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5) + + for(p in 1:12){ + for(l in 0:6){ + polygon(c(0.5+p-1,0.5+p-1,1+p-1),c(-0.5+l,0.5+l,0+l), col=array.pers.colors[p,1+l,1]) # first triangle + polygon(c(0.5+p-1,1+p-1,1.5+p-1),c(-0.5+l,0+l,-0.5+l), col=array.pers.colors[p,1+l,2]) + polygon(c(1+p-1,1.5+p-1,1.5+p-1),c(l,0.5+l,-0.5+l), col=array.pers.colors[p,1+l,3]) + polygon(c(0.5+p-1,1+p-1,1.5+p-1),c(0.5+l,0+l,0.5+l), col=array.pers.colors[p,1+l,4]) + } + } + + par(fig=c(0.875, 1, 0.14, 0.89), new=TRUE) + ColorBar2(brks = my.seq, cols = pers.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + dev.off() + + + + # Delta persistence summary: + png(filename=paste0(workdir,"/",forecast.name,"_summary_diff_pers.png"), width=550, height=400) + + # create an array similar to array.pers but with colors instead of frequencies: + diff.pers.cols <- rev(c('#b2182b','#d6604d','#f4a582','#fddbc7','#d1e5f0','#92c5de','#4393c3','#2166ac')) + my.seq <- c(NA, -1.5, -1, -0.5, 0, 0.5, 1, 1.5, NA) + + array.diff.pers.colors <- array(diff.pers.cols[8],c(12,7,4)) + array.diff.pers.colors[array.diff.pers < my.seq[8]] <- diff.pers.cols[7] + array.diff.pers.colors[array.diff.pers < my.seq[7]] <- diff.pers.cols[6] + array.diff.pers.colors[array.diff.pers < my.seq[6]] <- diff.pers.cols[5] + array.diff.pers.colors[array.diff.pers < my.seq[5]] <- diff.pers.cols[4] + array.diff.pers.colors[array.diff.pers < my.seq[4]] <- diff.pers.cols[3] + array.diff.pers.colors[array.diff.pers < my.seq[3]] <- diff.pers.cols[2] + array.diff.pers.colors[array.diff.pers < my.seq[2]] <- diff.pers.cols[1] + + par(mar=c(5,4,4,4.5)) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Forecast time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + title("Diff. between S4 and ERA-Interim regime persistence (in days/month)", cex=1.5) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Forecast time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5) + + for(p in 1:12){ + for(l in 0:6){ + polygon(c(0.5+p-1,0.5+p-1,1+p-1),c(-0.5+l,0.5+l,0+l), col=array.diff.pers.colors[p,1+l,1]) # first triangle + polygon(c(0.5+p-1,1+p-1,1.5+p-1),c(-0.5+l,0+l,-0.5+l), col=array.diff.pers.colors[p,1+l,2]) + polygon(c(1+p-1,1.5+p-1,1.5+p-1),c(l,0.5+l,-0.5+l), col=array.diff.pers.colors[p,1+l,3]) + polygon(c(0.5+p-1,1+p-1,1.5+p-1),c(0.5+l,0+l,0.5+l), col=array.diff.pers.colors[p,1+l,4]) + } + } + + par(fig=c(0.875, 1, 0.14, 0.89), new=TRUE) + ColorBar2(brks = my.seq, cols = diff.pers.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + dev.off() + + ## # FairRPSS summary: + ## png(filename=paste0(workdir,"/",forecast.name,"_summary_rpss.png"), width=550, height=400) + + ## # create an array similar to array.diff.freq but with colors instead of frequencies: + ## freq.cols <- rev(c('#b2182b','#d6604d','#f4a582','#fddbc7','#d1e5f0','#92c5de','#4393c3','#2166ac')) + + ## array.freq.colors <- array(freq.cols[8],c(12,7,4)) + ## array.freq.colors[array.diff.freq < 10] <- freq.cols[7] + ## array.freq.colors[array.diff.freq < 5] <- freq.cols[6] + ## array.freq.colors[array.diff.freq < 2] <- freq.cols[5] + ## array.freq.colors[array.diff.freq < 0] <- freq.cols[4] + ## array.freq.colors[array.diff.freq < -2] <- freq.cols[3] + ## array.freq.colors[array.diff.freq < -5] <- freq.cols[2] + ## array.freq.colors[array.diff.freq < -10] <- freq.cols[1] + + ## par(mar=c(5,4,4,4.5)) + ## plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Forecast time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + ## title("Frequency difference (%) between S4 and ERA-Interim", cex=1.5) + ## mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + ## mtext(side = 2, text = "Forecast time", line = 2.5, cex=1.5) + ## axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + ## axis(2, at=seq(0,6), las=2, cex.axis=1.5) + + ## for(p in 1:12){ + ## for(l in 0:6){ + ## polygon(c(0.5+p-1,0.5+p-1,1+p-1),c(-0.5+l,0.5+l,0+l), col=array.freq.colors[p,1+l,1]) # first triangle + ## polygon(c(0.5+p-1,1+p-1,1.5+p-1),c(-0.5+l,0+l,-0.5+l), col=array.freq.colors[p,1+l,2]) + ## polygon(c(1+p-1,1.5+p-1,1.5+p-1),c(l,0.5+l,-0.5+l), col=array.freq.colors[p,1+l,3]) + ## polygon(c(0.5+p-1,1+p-1,1.5+p-1),c(0.5+l,0+l,0.5+l), col=array.freq.colors[p,1+l,4]) + ## } + ## } + + ## par(fig=c(0.875, 1, 0.14, 0.89), new=TRUE) + ## ColorBar2(brks = c(-20,-10,-5,-2,0,2,5,10,20), cols = freq.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(c(-20,-10,-5,-2,0,2,5,10,20)), my.labels=c(-20,-10,-5,-2,0,2,5,10,20)) + ## dev.off() + +} + + + + + + + +# impact map of the stronger WR: +if(fields.name == rean.name && p == 100){ + + var.num <- 1 # choose a variable (1:sfcWind, 2:tas) + + png(filename=paste0(workdir,"/",rean.name,"_",var.name[var.num],"_most_influent.png"), width=500, height=600) + + plot.new() + #par(mfrow=c(4,3)) + col.regimes <- c("indianred1","cyan","khaki","lightpink") + + for(p in 13:16){ # or 1:12 for monthly maps + load(file=paste0(rean.dir,"/",rean.name,"_",var.name[var.num],"_",my.period[p],"_psl.RData")) + load(paste0(rean.dir,"/",rean.name,"_ClusterNames.RData")) # they should not depend on var.name!!! + + # position of long values of Europe only (without the Atlantic Sea and America): + #if(fields.name == "ERA-Interim") EU <- c(1:which(lon >= lon.max)[1], (length(lon)-30):length(lon)) # restrict area to continental europe only + lon.max = 45 + EU <- c(1:which(lon >= lon.max)[1], (which(lon >= 337)[1]:length(lon))) # restrict area to continental europe only + + + cluster1 <- which(orden == cluster1.name.period[p]) # in future it will be == cluster1.name + cluster2 <- which(orden == cluster2.name.period[p]) + cluster3 <- which(orden == cluster3.name.period[p]) + cluster4 <- which(orden == cluster4.name.period[p]) + + assign(paste0("imp",cluster1), varPeriodAnom1mean) + assign(paste0("imp",cluster2), varPeriodAnom2mean) + assign(paste0("imp",cluster3), varPeriodAnom3mean) + assign(paste0("imp",cluster4), varPeriodAnom4mean) + + imp.all <- imp1 + for(i in 1:dim(imp1)[1]){ + for(j in 1:dim(imp1)[2]){ + if(abs(imp1[i,j]) >= max(abs(imp1[i,j]), abs(imp2[i,j]), abs(imp3[i,j]), abs(imp4[i,j]))) imp.all[i,j] <- 1.5 + if(abs(imp2[i,j]) >= max(abs(imp1[i,j]), abs(imp2[i,j]), abs(imp3[i,j]), abs(imp4[i,j]))) imp.all[i,j] <- 2.5 + if(abs(imp3[i,j]) >= max(abs(imp1[i,j]), abs(imp2[i,j]), abs(imp3[i,j]), abs(imp4[i,j]))) imp.all[i,j] <- 3.5 + if(abs(imp4[i,j]) >= max(abs(imp1[i,j]), abs(imp2[i,j]), abs(imp3[i,j]), abs(imp4[i,j]))) imp.all[i,j] <- 4.5 + } + } + + if(p<=3) yMod <- 0.7 + if(p>=4 && p<=6) yMod <- 0.5 + if(p>=7 && p<=9) yMod <- 0.3 + if(p>=10) yMod <- 0.1 + if(p==13 || p==14) yMod <- 0.5 + if(p==15 || p==16) yMod <- 0.1 + + if(p==1 || p==4 || p==7 || p==10) xMod <- 0.05 + if(p==2 || p==5 || p==8 || p==11) xMod <- 0.35 + if(p==3 || p==6 || p==9 || p==12) xMod <- 0.65 + if(p==13 || p==15) xMod <- 0.05 + if(p==14 || p==16) xMod <- 0.50 + + # impact map: + if(p <= 12) par(fig=c(xMod, xMod + 0.3, yMod, yMod + 0.17), new=TRUE) + if(p > 12) par(fig=c(xMod, xMod + 0.45, yMod, yMod + 0.38), new=TRUE) + PlotEquiMap(imp.all[,EU], lon[EU], lat, filled.continents = FALSE, brks=1:5, cols=col.regimes, axelab=F, drawleg=F) + + # title map: + if(p <= 12) par(fig=c(xMod, xMod + 0.3, yMod + 0.162, yMod + 0.172), new=TRUE) + if(p > 12) par(fig=c(xMod + 0.07, xMod + 0.07 + 0.3, yMod +0.21 + 0.162, yMod +0.21 + 0.172), new=TRUE) + mtext(my.period[p], font=2, cex=.9) + + } # close 'p' on 'period' + + par(fig=c(0.1,0.9, 0, 0.12), new=TRUE) + ColorBar2(1:5, cols=col.regimes, vert=FALSE, my.ticks=1:4, draw.ticks=FALSE, my.labels=orden) + + par(fig=c(0.1,0.9, 0.9, 0.95), new=TRUE) + mtext(paste0("Most influent WR on ",var.name[var.num]), font=2, cex=1.5) + + dev.off() +} + + + + + + + + + + + + + + + + + + diff --git a/old/weather_regimes_maps_v11.R~ b/old/weather_regimes_maps_v11.R~ new file mode 100644 index 0000000000000000000000000000000000000000..68722741f307e54c60f004e80e073be68ba90a57 --- /dev/null +++ b/old/weather_regimes_maps_v11.R~ @@ -0,0 +1,998 @@ + +# Creation: 6/2016 +# Author: Nicola Cortesi +# +# Aim: to visualize the regime anomalies of the weather regimes, their interannual frequencies and their impact on a chosen cliamte variable (i.e: wind speed or temperature) +# +# I/O: the only input file needed is the output of the weather_regimes.R script, which ends with the suffix "_mapdata.RData". +# Output files are the maps, with the user can generate as .png or as .pdf +# +# Assumptions: If your regimes derive from a reanalysis, this script must be run twice: once, with the option 'ordering = F' and 'save.names = T' to create a .pdf or .png +# file that the user must open to find visually the order by which the four regimes are stored inside the four clusters. For example, he can find that the +# sequence of the images in the file (from top to down) corresponds to: Blocking / NAO- / Atl.Ridge / NAO+ regime. Subsequently, he has to change 'ordering' +# from F to T and set the four variables 'clusterX.name' (X=1..4) to follow the same order found inside that file. +# The second time, you have to run this script only to get the maps in the right order, which by default is, from top to down: +# "NAO+","NAO-","Blocking" and "Atl.Ridge" (you can change it with the variable 'orden'). You also have to set 'save.names' to TRUE, so an .RData file +# is written to store the order of the four regimes, in case in future a user needs to visualize these maps again. In this case, the user must set +# 'ordering = TRUE' and 'save.names = FALSE' to be able to visualize the maps in the correct order. +# +# If your regimes derive from a forecast system, you have to compute before the regimes derived from a reanalysis. Then, you can associate the reanalysis +# to the forecast system, to employ it to automatically associate to each simulated cluster one of the +# observed regimes, the one with the highest spatial correlation. Beware that the two maps of regime anomalies must have the same spatial resolution! +# +# Branch: weather_regimes + +library(s2dverification) # for the function Load() +library(Kendall) # for the MannKendall test + +source('/home/Earth/ncortesi/Downloads/scripts/Rfunctions.R') # for the calendar functions + +# working dir with the input files with '_psl.RData' suffix: + +workdir <- "/scratch/Earth/ncortesi/RESILIENCE/Regimes/42_ECMWF_S4_monthly_1981-2015_LOESS_filter" + +rean.name <- "ERA-Interim" # reanalysis name (if input data comes from a reanalysis) +forecast.name <- "ECMWF-S4" # forecast system name (if input data comes from a forecast system) + +fields.name <- forecast.name # Choose between loading reanalysis ('rean.name') or forecasts ('forecast.name') + +var.num <- 1 # if fields.name ='rean.name', Choose a variable for the impact maps 1: sfcWind 2: tas + +period <- 1:12 # Choose one or more periods to plot from 1 to 17 (1-12: Jan - Dec, 13-16: Winter, Spring, Summer, Autumn, 17: Year). + # You need to have created before the '_mapdata.RData' file with the output of 'weather_regimes'.R for that period. +lead.months <- 0:6 # in case you selected 'fields.name = forecast.name', specify a leadtime (0 = same month of the start month) to plot + # (and variable 'period' becomes the start month) + +# run it twice: once, with 'ordering <- FALSE' and 'save.names <- TRUE' to look only at the cartography and find visually the right regime names of the four clusters; then, +# insert the regime names in the correct order below and run this script a the second time with 'ordering = TRUE' and 'save.names = TRUE', to save the ordered cartography. +# after that, any time you need to run this script, run it with 'ordering = TRUE and 'save.names = FALSE', not to overwrite the file which stores the cluster order: +ordering <- T # If TRUE, order the clusters following the regimes listed in the 'orden' vector (set it to TRUE only after you manually inserted the names below). +save.names <- F # if TRUE, also save the cluster names (i.e: the association between clusters and weather regimes); if FALSE, the cluster names are loaded from a file + +as.pdf <- F # FALSE: save results as one .png file for each season/month, TRUE: save only 1 .pdf file with all seasons/months + +# if fields.name='forecast.name', set the subdir of 'workdir' where the weather regimes computed with a reanalysis are stored: +rean.dir <- "/scratch/Earth/ncortesi/RESILIENCE/Regimes/41_ERA-Interim_monthly_1981-2015_LOESS_filter" + +# Associates the regimes to the four cluster: +cluster1.name <- "NAO+" +cluster2.name <- "NAO-" +cluster4.name <- "Blocking" +cluster3.name <- "Atl.Ridge" + +# choose an order to give to the cartograpy, from top to bottom: +orden <- c("NAO+","NAO-","Blocking","Atl.Ridge") + +################################################################################################################################################################### + +if(fields.name != rean.name && fields.name != forecast.name) stop("variable 'fields.name' is not properly set") +regime1.name <- orden[1] +regime2.name <- orden[2] +regime3.name <- orden[3] +regime4.name <- orden[4] + +var.name <- c("sfcWind","tas") +var.name.full <- c("Wind Speed","Temperature") # full name of the 'predictand' variable to put in the title of the graphs +var.unit <- c("m/s", "ºC") # unit of measure of var (for drawing color scales) +var.num.orig <- var.num # loading _psl files, this value can change! +fields.name.orig <- fields.name +period.orig <- period; workdir.orig <- workdir +pdf.suffix <- ifelse(length(period)==1, my.period[period], "all_periods") + +for(lead.month in lead.months){ +#lead.month=0 # for the debug + +if(as.pdf && fields.name == rean.name)(pdf(file=paste0(workdir,"/",fields.name,"_",var.name[var.num],"_",pdf.suffix,".pdf"),width=40, height=60)) +if(as.pdf && fields.name == forecast.name)(pdf(file=paste0(workdir,"/",fields.name,"_",var.name[var.num],"_",pdf.suffix,"_leadmonth_",lead.month,".pdf"),width=40,height=60)) + +for(p in period){ + #p <- 1 # for the debug + p.orig <- p + + # load regime data (for forecasts, it load only the data corresponding to the chosen start month p and lead month 'lead.month':: + if(fields.name == rean.name) { + load(file=paste0(workdir,"/",fields.name,"_",my.period[p],"_","psl",".RData")) + if(var.num != var.num.orig) var.num <- var.num.orig # ripristinate original values of this script (necessary after loading regime data) + if(fields.name != fields.name.orig) fields.name <- fields.name.orig # ripristinate original values of this script + if(workdir != workdir.orig) workdir <- workdir.orig # ripristinate original values of this script + + load(file=paste0(workdir,"/",fields.name,"_",my.period[p],"_",var.name[var.num],".RData")) + } + if(fields.name == forecast.name){ + load(file=paste0(workdir,"/",fields.name,"_",my.period[p],"_leadmonth_",lead.month,"_","psl",".RData")) # load cluster time series and mean psl data + #load(file=paste0(workdir,"/",fields.name,"_",my.period[p],"_leadmonth_",lead.month,"_",var.name[var.num],".RData")) # load mean var data + } + + if(var.num != var.num.orig) var.num <- var.num.orig # ripristinate original values of this script (necessary after loading regime data) + if(fields.name != fields.name.orig) fields.name <- fields.name.orig # ripristinate original values of this script + if(p != p.orig) p <- p.orig + + if(fields.name == forecast.name){ + + # compute cluster persistence: + my.cluster.vector <- as.vector(my.cluster.array[[p]]) # reduce it to a vector with the order: day1month1member1 , day2month1member1, ... , day1month2member1, day2month2member1, ,,, + + sequ1 <- sequ2 <- sequ3 <- sequ4 <- rep(0,10000) + i1 <- i2 <- i3 <- i4 <- 1 + + if(my.cluster.vector[1] != 1) i1 <- 0 + if(my.cluster.vector[1] == 1) sequ1[i1] <- sequ1[i1]+1 + if(my.cluster.vector[1] != 2) i2 <- 0 + if(my.cluster.vector[1] == 2) sequ2[i2] <- sequ2[i2]+1 + if(my.cluster.vector[1] != 3) i3 <- 0 + if(my.cluster.vector[1] == 3) sequ3[i3] <- sequ3[i3]+1 + if(my.cluster.vector[1] != 4) i4 <- 0 + if(my.cluster.vector[1] == 4) sequ4[i4] <- sequ4[i4]+1 + n.dd <- length(my.cluster.vector) + for(d in 1:(n.dd-1)){ + if(my.cluster.vector[d] != 1 && my.cluster.vector[d+1] == 1) i1 <- i1+1 + if(my.cluster.vector[d] == 1) sequ1[i1] <- sequ1[i1]+1 + + if(my.cluster.vector[d] != 2 && my.cluster.vector[d+1] == 2) i2 <- i2+1 + if(my.cluster.vector[d] == 2) sequ2[i2] <- sequ2[i2]+1 + + if(my.cluster.vector[d] != 3 && my.cluster.vector[d+1] == 3) i3 <- i3+1 + if(my.cluster.vector[d] == 3) sequ3[i3] <- sequ3[i3]+1 + + if(my.cluster.vector[d] != 4 && my.cluster.vector[d+1] == 4) i4 <- i4+1 + if(my.cluster.vector[d] == 4) sequ4[i4] <- sequ4[i4]+1 + } + + if(my.cluster.vector[n.dd] == 1) sequ1[i1] <- sequ1[i1]+1 + if(my.cluster.vector[n.dd] == 2) sequ2[i2] <- sequ2[i2]+1 + if(my.cluster.vector[n.dd] == 3) sequ3[i3] <- sequ3[i3]+1 + if(my.cluster.vector[n.dd] == 4) sequ4[i4] <- sequ4[i4]+1 + + pers1 <- mean(sequ1[which(sequ1 != 0)]) + pers2 <- mean(sequ2[which(sequ2 != 0)]) + pers3 <- mean(sequ3[which(sequ3 != 0)]) + pers4 <- mean(sequ4[which(sequ4 != 0)]) + + # compute correlations between simulated clusters and observed regime's anomalies: + cluster1.sim <- pslwr1mean; cluster2.sim <- pslwr2mean; cluster3.sim <- pslwr3mean; cluster4.sim <- pslwr4mean + lat.forecast <- lat; lon.forecast <- lon + + if((p + lead.month) > 12) { load.month <- p + lead.month - 12 } else { load.month <- p + lead.month } # reanalysis forecast month to load + load(file=paste0(rean.dir,"/",rean.name,"_",my.period[load.month],"_","psl",".RData")) # load reanalysis data, which overwrities the pslwrXmean variables + load(paste0(rean.dir,"/",rean.name,"_", my.period[load.month],"_","ClusterNames",".RData")) # load also reanalysis regime names + + wr1y.obs <- wr1y; wr2y.obs <- wr2y; wr3y.obs <- wr3y; wr4y.obs <- wr4y # observed frecuencies of the regimes + + regimes.obs <- c(cluster1.name, cluster2.name, cluster3.name, cluster4.name) + + if(length(unique(regimes.obs)) < 4) stop("Two or more names of the weather types in the reanalsyis input file are repeated!") + + if(identical(rev(lat),lat.forecast)) {lat.forecast <- rev(lat.forecast); print("Warning: forecast latitudes are in the opposite order than renalysis's")} + if(!identical(lat, lat.forecast)) stop("forecast latitudes are different from reanalysis latitudes!") + if(!identical(lon, lon.forecast)) stop("forecast longitude are different from reanalysis longitudes!") + + cluster.corr <- array(NA, c(4,4), dimnames=list(c("cluster1.sim","cluster2.sim","cluster3.sim","cluster4.sim"), regimes.obs)) + cluster.corr[1,1] <- cor(as.vector(cluster1.sim), as.vector(pslwr1mean)) + cluster.corr[1,2] <- cor(as.vector(cluster1.sim), as.vector(pslwr2mean)) + cluster.corr[1,3] <- cor(as.vector(cluster1.sim), as.vector(pslwr3mean)) + cluster.corr[1,4] <- cor(as.vector(cluster1.sim), as.vector(pslwr4mean)) + cluster.corr[2,1] <- cor(as.vector(cluster2.sim), as.vector(pslwr1mean)) + cluster.corr[2,2] <- cor(as.vector(cluster2.sim), as.vector(pslwr2mean)) + cluster.corr[2,3] <- cor(as.vector(cluster2.sim), as.vector(pslwr3mean)) + cluster.corr[2,4] <- cor(as.vector(cluster2.sim), as.vector(pslwr4mean)) + cluster.corr[3,1] <- cor(as.vector(cluster3.sim), as.vector(pslwr1mean)) + cluster.corr[3,2] <- cor(as.vector(cluster3.sim), as.vector(pslwr2mean)) + cluster.corr[3,3] <- cor(as.vector(cluster3.sim), as.vector(pslwr3mean)) + cluster.corr[3,4] <- cor(as.vector(cluster3.sim), as.vector(pslwr4mean)) + cluster.corr[4,1] <- cor(as.vector(cluster4.sim), as.vector(pslwr1mean)) + cluster.corr[4,2] <- cor(as.vector(cluster4.sim), as.vector(pslwr2mean)) + cluster.corr[4,3] <- cor(as.vector(cluster4.sim), as.vector(pslwr3mean)) + cluster.corr[4,4] <- cor(as.vector(cluster4.sim), as.vector(pslwr4mean)) + + # associate each simulated cluster to one observed regime: + max1 <- which(cluster.corr == max(cluster.corr), arr.ind=T) + cluster.corr2 <- cluster.corr + cluster.corr2[max1[1],] <- -999 # set this row to negative values so it won't be found as a maximum anymore + cluster.corr2[,max1[2]] <- -999 # set this column to negative values so it won't be found as a maximum anymore + max2 <- which(cluster.corr2 == max(cluster.corr2), arr.ind=T) + cluster.corr3 <- cluster.corr2 + cluster.corr3[max2[1],] <- -999 + cluster.corr3[,max2[2]] <- -999 + max3 <- which(cluster.corr3 == max(cluster.corr3), arr.ind=T) + cluster.corr4 <- cluster.corr3 + cluster.corr4[max3[1],] <- -999 + cluster.corr4[,max3[2]] <- -999 + max4 <- which(cluster.corr4 == max(cluster.corr4), arr.ind=T) + + seq.max1 <- c(max1[1],max2[1],max3[1],max4[1]) + seq.max2 <- c(max1[2],max2[2],max3[2],max4[2]) + + cluster1.regime <- seq.max2[which(seq.max1 == 1)] + cluster2.regime <- seq.max2[which(seq.max1 == 2)] + cluster3.regime <- seq.max2[which(seq.max1 == 3)] + cluster4.regime <- seq.max2[which(seq.max1 == 4)] + + cluster1.name <- regimes.obs[cluster1.regime] + cluster2.name <- regimes.obs[cluster2.regime] + cluster3.name <- regimes.obs[cluster3.regime] + cluster4.name <- regimes.obs[cluster4.regime] + + # measure persistence for the reanalysis dataset: + + # compute cluster persistence: + my.cluster.vector <- my.cluster[[load.month]][[1]] + + sequ1 <- sequ2 <- sequ3 <- sequ4 <- rep(0,10000) + i1 <- i2 <- i3 <- i4 <- 1 + + if(my.cluster.vector[1] != 1) i1 <- 0 + if(my.cluster.vector[1] == 1) sequ1[i1] <- sequ1[i1]+1 + if(my.cluster.vector[1] != 2) i2 <- 0 + if(my.cluster.vector[1] == 2) sequ2[i2] <- sequ2[i2]+1 + if(my.cluster.vector[1] != 3) i3 <- 0 + if(my.cluster.vector[1] == 3) sequ3[i3] <- sequ3[i3]+1 + if(my.cluster.vector[1] != 4) i4 <- 0 + if(my.cluster.vector[1] == 4) sequ4[i4] <- sequ4[i4]+1 + + n.dd <- length(my.cluster.vector) + for(d in 1:(n.dd-1)){ + if(my.cluster.vector[d] != 1 && my.cluster.vector[d+1] == 1) i1 <- i1+1 + if(my.cluster.vector[d] == 1) sequ1[i1] <- sequ1[i1]+1 + + if(my.cluster.vector[d] != 2 && my.cluster.vector[d+1] == 2) i2 <- i2+1 + if(my.cluster.vector[d] == 2) sequ2[i2] <- sequ2[i2]+1 + + if(my.cluster.vector[d] != 3 && my.cluster.vector[d+1] == 3) i3 <- i3+1 + if(my.cluster.vector[d] == 3) sequ3[i3] <- sequ3[i3]+1 + + if(my.cluster.vector[d] != 4 && my.cluster.vector[d+1] == 4) i4 <- i4+1 + if(my.cluster.vector[d] == 4) sequ4[i4] <- sequ4[i4]+1 + } + + if(my.cluster.vector[n.dd] == 1) sequ1[i1] <- sequ1[i1]+1 + if(my.cluster.vector[n.dd] == 2) sequ2[i2] <- sequ2[i2]+1 + if(my.cluster.vector[n.dd] == 3) sequ3[i3] <- sequ3[i3]+1 + if(my.cluster.vector[n.dd] == 4) sequ4[i4] <- sequ4[i4]+1 + + persObs1 <- mean(sequ1[which(sequ1 != 0)]) + persObs2 <- mean(sequ2[which(sequ2 != 0)]) + persObs3 <- mean(sequ3[which(sequ3 != 0)]) + persObs4 <- mean(sequ4[which(sequ4 != 0)]) + + # insert here all the manual corrections to the regime assignation due to misasignment in the automatic selection: + #if(p= && lead.month= ) clusterX.name=... + + if(var.num != var.num.orig) var.num <- var.num.orig # ripristinate original values of this script + if(fields.name != fields.name.orig) fields.name <- fields.name.orig # ripristinate original values of this script + if(!identical(period.orig,period)) period <- period.orig + if(p.orig != p) p <- p.orig + + load(file=paste0(workdir,"/",fields.name,"_",my.period[p],"_leadmonth_",lead.month,"_","psl",".RData")) # reload forecast cluster time series and mean psl data + #load(file=paste0(workdir,"/",fields.name,"_",my.period[p],"_leadmonth_",lead.month,"_ClusterData.RData")) # reload forecast cluster data (for my.cluster.array) + + if(var.num != var.num.orig) var.num <- var.num.orig # ripristinate original values of this script + if(fields.name != fields.name.orig) fields.name <- fields.name.orig # ripristinate original values of this script + if(!identical(period.orig, period)) period <- period.orig + if(p.orig != p) p <- p.orig + if(identical(rev(lat),lat.forecast)) lat <- rev(lat) # ripristinate right lat order + + } # close for on 'forecast.name' + + if(fields.name == rean.name){ + if(save.names){ + save(cluster1.name, cluster2.name, cluster3.name, cluster4.name, file=paste0(workdir,"/",fields.name,"_", my.period[p],"_","ClusterNames",".RData")) + + } else { # in this case, we load the cluster names from the file already saved, if regimes come from a reanalysis: + load(paste0(workdir,"/",fields.name,"_", my.period[p],"_","ClusterNames",".RData")) + + if(var.num != var.num.orig) var.num <- var.num.orig # ripristinate original values of this script + if(fields.name != fields.name.orig) fields.name <- fields.name.orig # ripristinate original values of this script + } + } + + # assign to each cluster its regime: + if(ordering == FALSE && fields.name == rean.name){ + cluster1 <- 1; cluster2 <- 2; cluster3 <- 3; cluster4 <- 4 + } else { + cluster1 <- which(orden == cluster1.name) + cluster2 <- which(orden == cluster2.name) + cluster3 <- which(orden == cluster3.name) + cluster4 <- which(orden == cluster4.name) + } + + assign(paste0("map",cluster1), pslwr1mean) + assign(paste0("map",cluster2), pslwr2mean) + assign(paste0("map",cluster3), pslwr3mean) + assign(paste0("map",cluster4), pslwr4mean) + + assign(paste0("fre",cluster1), wr1y) + assign(paste0("fre",cluster2), wr2y) + assign(paste0("fre",cluster3), wr3y) + assign(paste0("fre",cluster4), wr4y) + + if(p == 100){ + assign(paste0("imp",cluster1), varwr1mean) + assign(paste0("imp",cluster2), varwr2mean) + assign(paste0("imp",cluster3), varwr3mean) + assign(paste0("imp",cluster4), varwr4mean) + + assign(paste0("sig",cluster1), pvalue1) + assign(paste0("sig",cluster2), pvalue2) + assign(paste0("sig",cluster3), pvalue3) + assign(paste0("sig",cluster4), pvalue4) + } + + if(fields.name == forecast.name){ + assign(paste0("freMax",cluster1), wr1yMax) + assign(paste0("freMax",cluster2), wr2yMax) + assign(paste0("freMax",cluster3), wr3yMax) + assign(paste0("freMax",cluster4), wr4yMax) + + assign(paste0("freMin",cluster1), wr1yMin) + assign(paste0("freMin",cluster2), wr2yMin) + assign(paste0("freMin",cluster3), wr3yMin) + assign(paste0("freMin",cluster4), wr4yMin) + + cluster1.Obs <- which(orden == regimes.obs[1]) + cluster2.Obs <- which(orden == regimes.obs[2]) + cluster3.Obs <- which(orden == regimes.obs[3]) + cluster4.Obs <- which(orden == regimes.obs[4]) + + assign(paste0("freObs",cluster1), wr1y.obs) + assign(paste0("freObs",cluster2), wr2y.obs) + assign(paste0("freObs",cluster3), wr3y.obs) + assign(paste0("freObs",cluster4), wr4y.obs) + + assign(paste0("persist",cluster1), pers1) + assign(paste0("persist",cluster2), pers2) + assign(paste0("persist",cluster3), pers3) + assign(paste0("persist",cluster4), pers4) + + assign(paste0("persistObs",cluster1), persObs1) + assign(paste0("persistObs",cluster2), persObs2) + assign(paste0("persistObs",cluster3), persObs3) + assign(paste0("persistObs",cluster4), persObs4) + + } + + + # position of long values of Europe only (without the Atlantic Sea and America): + #if(fields.name == "ERA-Interim") EU <- c(1:which(lon >= lon.max)[1], (length(lon)-30):length(lon)) # restrict area to continental europe only + EU <- c(1:which(lon >= lon.max)[1], (which(lon >= 337)[1]:length(lon))) # restrict area to continental europe only + + # breaks and colors of the geopotential fields: + if(psl == "g500"){ + #my.brks <- c(-300,-199:200,300) # % Mean anomaly of a WR + my.brks <- c(-300,seq(-200,200,10),300) # % Mean anomaly of a WR + my.brks2 <- c(-300,seq(-200,200,10),300) # % Mean anomaly of a WR for the contour lines + #my.cols <- colorRampPalette((brewer.pal(9,"PuBu")))(length(my.brks)-1) + values.to.plot <- c(-200, -100, 0, 100, 200) # values we want to show in the labels + } + + if(psl == "psl"){ + my.brks <- c(seq(-19,-1,2),0,seq(1,19,2)) # % Mean anomaly of a WR + # my.brks <- c(seq(-19,-1,2),0,seq(1,19,2)) # % Mean anomaly of a WR + + my.brks2 <- my.brks #c(seq(-20,20,2)) # % Mean anomaly of a WR for the contour lines + values.to.plot <- my.brks #c(-17,-15,-13,-11,-9,-7,-5,-3,-1,0,1,3,5,7,9,11,13,15,17) + } + + my.cols <- colorRampPalette(rev(brewer.pal(11,"RdBu")))(length(my.brks)-1) # blue--white--red colors + my.cols[floor(length(my.cols)/2)] <- my.cols[floor(length(my.cols)/2)+1] <- "white" + + # breaks and colors of the impact maps (valid both for Wind speed anomalies and Temperature anomalies): + #my.brks.var <- c(-20,seq(-10,-3,1),seq(-2.9,2.9,0.1),seq(3,10,1),20) # % Mean anomaly of a WR + #my.brks.var <- c(-20,-2,-1.5,-1,-0.5,-0.2,0.2,0.5,1,1.5,2,20) # % Mean anomaly of a WR + #my.brks.var <- c(-20,seq(-3,3,0.5),20) # % Mean anomaly of a WR + if(var.name[var.num] == "sfcWind") { + my.brks.var <- seq(-3,3,0.5) + values.to.plot2 <- c(-3,-2,-1,0,1,2,3) + } + if(var.name[var.num] == "tas") { + my.brks.var <- seq(-5,5,1) + values.to.plot2 <- my.brks.var #c(-5,-3,-1,0,1,3,5) + } + + #my.cols <- colorRampPalette(as.character(read.csv("/home/Earth/vtorralb/rgbhex.csv",header=F)[,1]))(length(my.brks)-1) # blue--yellow-red colors + my.cols.var <- colorRampPalette(rev(brewer.pal(11,"RdBu")))(length(my.brks.var)-1) # blue--white--red colors + #my.cols.var[floor(length(my.cols.var)/2)] <- my.cols.var[floor(length(my.cols.var)/2)+1] <- "white" + + # Visualize the composition with the 3 graphs for all the 4 regimes: its pressure fields, its impact on var and its frequency series: + if(!as.pdf && fields.name==rean.name) png(filename=paste0(workdir,"/",fields.name,"_",my.period[p],"_",var.name[var.num],".png"),width=3000,height=3700) + if(!as.pdf && fields.name==forecast.name) png(filename=paste0(workdir,"/",fields.name,"_",my.period[p],"_leadmonth_",lead.month,"_",var.name[var.num],".png"),width=3000,height=3700) + + plot.new() + + # Sheet title: + par(fig=c(0, 1, 0.94, 0.98), new=TRUE) + + if(fields.name == forecast.name) {forecast.title <- paste0(" startdate and ",lead.month," forecast month ")} else {forecast.title <- ""} + mtext(paste0(fields.name, " Weather Regimes for ", my.period[p], forecast.title," (",year.start,"-",year.end,")"), cex=5.5, font=2) + + # Centroid maps: + map.xpos <- 0 + map.width <- 0.46 + par(fig=c(map.xpos + 0.003, map.xpos + map.width - 0.003, 0.745, 0.925), new=TRUE) + PlotEquiMap2(rescale(map1,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map1), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, xlabel.dist=1.5, contours.lty="F1FF1F") + par(fig=c(map.xpos, map.xpos + map.width, 0.51, 0.69), new=TRUE) + PlotEquiMap2(rescale(map2,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map2), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F") + par(fig=c(map.xpos, map.xpos + map.width, 0.275, 0.455), new=TRUE) + PlotEquiMap2(rescale(map3,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map3), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F") + par(fig=c(map.xpos, map.xpos + map.width, 0.04, 0.22), new=TRUE) + PlotEquiMap2(rescale(map4,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map4), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F") + + # Subtitle centroid maps: + map.title.xpos <- 0.76 + map.title.width <- 0.2 + par(fig=c(map.title.xpos, map.title.xpos + map.title.width, 0.728, 0.729), new=TRUE) + mtext("year", cex=3) + par(fig=c(map.title.xpos, map.title.xpos + map.title.width, 0.494, 0.495), new=TRUE) + mtext("year", cex=3) + par(fig=c(map.title.xpos, map.title.xpos + map.title.width, 0.260, 0.261), new=TRUE) + mtext("year", cex=3) + par(fig=c(map.title.xpos, map.title.xpos + map.title.width, 0.026, 0.027), new=TRUE) + mtext("year", cex=3) + + # Title Centroid Maps: + title1.xpos <- 0.02 + title1.width <- 0.4 + par(fig=c(title1.xpos, title1.xpos + title1.width, 0.9305, 0.9355), new=TRUE) + mtext(paste0(regime1.name,": ", psl.name, " Anomaly"), font=2, cex=4) + par(fig=c(title1.xpos, title1.xpos + title1.width, 0.6955, 0.7005), new=TRUE) + mtext(paste0(regime2.name,": ", psl.name, " Anomaly"), font=2, cex=4) + par(fig=c(title1.xpos, title1.xpos + title1.width, 0.4605, 0.4655), new=TRUE) + mtext(paste0(regime3.name,": ", psl.name, " Anomaly"), font=2, cex=4) + par(fig=c(title1.xpos, title1.xpos + title1.width, 0.2255, 0.2305), new=TRUE) + mtext(paste0(regime4.name,": ", psl.name, " Anomaly"), font=2, cex=4) + + # Impact maps: + if(p == 100){ + impact.xpos <- 0.47 + impact.width <- 0.26 + par(fig=c(impact.xpos, impact.xpos + impact.width, 0.745, 0.925), new=TRUE) + PlotEquiMap2(rescale(imp1[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=t(sig1[,EU] < 0.05), cex.lab=1.5) + par(fig=c(impact.xpos, impact.xpos + impact.width, 0.51, 0.69), new=TRUE) + PlotEquiMap2(rescale(imp2[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=t(sig2[,EU] < 0.05), cex.lab=1.5) + par(fig=c(impact.xpos, impact.xpos + impact.width, 0.275, 0.455), new=TRUE) + PlotEquiMap2(rescale(imp3[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=t(sig3[,EU] < 0.05), cex.lab=1.5) + par(fig=c(impact.xpos, impact.xpos + impact.width, 0.04, 0.22), new=TRUE) + PlotEquiMap2(rescale(imp4[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=t(sig4[,EU] < 0.05), cex.lab=1.5) + + # Title impact maps: + title2.xpos <- 0.42 + title2.width <- 0.35 + par(fig=c(title2.xpos, title2.xpos + title2.width, 0.935, 0.940), new=TRUE) + mtext(paste0(regime1.name, ": Impact on ", var.name.full[var.num]), font=2, cex=4) + par(fig=c(title2.xpos, title2.xpos + title2.width, 0.700, 0.705), new=TRUE) + mtext(paste0(regime2.name, ": Impact on ", var.name.full[var.num]), font=2, cex=4) + par(fig=c(title2.xpos, title2.xpos + title2.width, 0.465, 0.470), new=TRUE) + mtext(paste0(regime3.name, ": Impact on ", var.name.full[var.num]), font=2, cex=4) + par(fig=c(title2.xpos, title2.xpos + title2.width, 0.230, 0.235), new=TRUE) + mtext(paste0(regime4.name, ": Impact on ", var.name.full[var.num]), font=2, cex=4) + } + + # Frequency plots: + barplot.xpos <- 0.75 + barplot.width <- 0.25 + freq.max=100 + + if(fields.name == forecast.name){ + pos.years.present <- match(year.start:year.end, years) + years.missing <- which(is.na(pos.years.present)) + fre1.NA <- fre2.NA <- fre3.NA <- fre4.NA <- freMax1.NA <- freMax2.NA <- freMax3.NA <- freMax4.NA <- freMin1.NA <- freMin2.NA <- freMin3.NA <- freMin4.NA <- c() + + k <- 0 + for(y in year.start:year.end) { + if(y %in% (years.missing + year.start - 1)) { + fre1.NA <- c(fre1.NA,NA); fre2.NA <- c(fre2.NA,NA); fre3.NA <- c(fre3.NA,NA); fre4.NA <- c(fre4.NA,NA) + freMax1.NA <- c(freMax1.NA,NA); freMax2.NA <- c(freMax2.NA,NA); freMax3.NA <- c(freMax3.NA,NA); freMax4.NA <- c(freMax4.NA,NA) + freMin1.NA <- c(freMin1.NA,NA); freMin2.NA <- c(freMin2.NA,NA); freMin3.NA <- c(freMin3.NA,NA); freMin4.NA <- c(freMin4.NA,NA) + k=k+1 + } else { + fre1.NA <- c(fre1.NA,fre1[y - year.start + 1 - k]); fre2.NA <- c(fre2.NA,fre2[y - year.start + 1 - k]) + fre3.NA <- c(fre3.NA,fre3[y - year.start + 1 - k]); fre4.NA <- c(fre4.NA,fre4[y - year.start + 1 - k]) + freMax1.NA <- c(freMax1.NA,freMax1[y - year.start + 1 - k]); freMax2.NA <- c(freMax2.NA,freMax2[y - year.start + 1 - k]) + freMax3.NA <- c(freMax3.NA,freMax3[y - year.start + 1 - k]); freMax4.NA <- c(freMax4.NA,freMax4[y - year.start + 1 - k]) + freMin1.NA <- c(freMin1.NA,freMin1[y - year.start + 1 - k]); freMin2.NA <- c(freMin2.NA,freMin2[y - year.start + 1 - k]) + freMin3.NA <- c(freMin3.NA,freMin3[y - year.start + 1 - k]); freMin4.NA <- c(freMin4.NA,freMin4[y - year.start + 1 - k]) + } + } + } else { fre1.NA <- fre1; fre2.NA <- fre2; fre3.NA <- fre3; fre4.NA <- fre4 } + + par(fig=c(barplot.xpos, barplot.xpos + barplot.width, 0.745, 0.915), new=TRUE) + if(fields.name == rean.name) barplot.freq(100*fre1.NA, year.start, year.end, cex.y=2, cex.x=2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0)) + if(fields.name == forecast.name) barplot.freq.sim2(100*fre1.NA, 100*freMax1.NA, 100*freMin1.NA, 100*freObs1, year.start, year.end, cex.y=2, cex.x=2, freq.min=-2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0), col.line="gray20", cex.r=3, cex.mean=2, cex.obs=2) + + par(fig=c(barplot.xpos, barplot.xpos + barplot.width,0.510,0.680), new=TRUE) + if(fields.name == rean.name) barplot.freq(100*fre2.NA, year.start, year.end, cex.y=2, cex.x=2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0)) + if(fields.name == forecast.name) barplot.freq.sim2(100*fre2.NA, 100*freMax2.NA, 100*freMin2.NA, 100*freObs2, year.start, year.end, cex.y=2, cex.x=2, freq.min=-2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0), col.line="gray20", cex.r=3, cex.mean=2, cex.obs=2) + + par(fig=c(barplot.xpos, barplot.xpos + barplot.width,0.275,0.445), new=TRUE) + if(fields.name == rean.name) barplot.freq(100*fre3.NA, year.start, year.end, cex.y=2, cex.x=2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0)) + if(fields.name == forecast.name) barplot.freq.sim2(100*fre3.NA, 100*freMax3.NA, 100*freMin3.NA, 100*freObs3, year.start, year.end, cex.y=2, cex.x=2, freq.min=-2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0), col.line="gray20", cex.r=3, cex.mean=2, cex.obs=2) + + par(fig=c(barplot.xpos, barplot.xpos + barplot.width,0.040,0.210), new=TRUE) + if(fields.name == rean.name) barplot.freq(100*fre4.NA, year.start, year.end, cex.y=2, cex.x=2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0)) + if(fields.name == forecast.name) barplot.freq.sim2(100*fre4.NA, 100*freMax4.NA, 100*freMin4.NA, 100*freObs4, year.start, year.end, cex.y=2, cex.x=2, freq.min=-2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0), col.line="gray20", cex.r=3, cex.mean=2, cex.obs=2) + + # Title Frequency maps: + title3.xpos <- 0.75 + title3.width <-0.25 + par(fig=c(title3.xpos, title3.xpos + title3.width, 0.935, 0.940), new=TRUE) + mtext(paste0(regime1.name, " Frequency"), font=2, cex=4) # paste0 doesn't work inside an expression! + par(fig=c(title3.xpos, title3.xpos + title3.width, 0.700, 0.705), new=TRUE) + mtext(paste0(regime2.name, " Frequency"), font=2, cex=4) + par(fig=c(title3.xpos, title3.xpos + title3.width, 0.465, 0.470), new=TRUE) + mtext(paste0(regime3.name, " Frequency"), font=2, cex=4) + par(fig=c(title3.xpos, title3.xpos + title3.width, 0.230, 0.235), new=TRUE) + mtext(paste0(regime4.name, " Frequency"), font=2, cex=4) + + # % on Frequency plots: + symbol.xpos <- 0.735 + symbol.width <- 0.01 + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.82, 0.83), new=TRUE) + mtext("%", cex=3.3) + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.59, 0.60), new=TRUE) + mtext("%", cex=3.3) + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.35, 0.36), new=TRUE) + mtext("%", cex=3.3) + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.12, 0.13), new=TRUE) + mtext("%", cex=3.3) + + # mean frequency on Frequency plots: + symbol.xpos <- 0.9 + symbol.width <- 0.1 + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.908, 0.918), new=TRUE) + mtext(bquote(bar(nu) == .(paste0(round(mean(100*fre1.NA),1),"%"))),cex=4.5) + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.673, 0.683), new=TRUE) + mtext(bquote(bar(nu) == .(paste0(round(mean(100*fre2.NA),1),"%"))),cex=4.5) + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.44, 0.45), new=TRUE) + mtext(bquote(bar(nu) == .(paste0(round(mean(100*fre3.NA),1),"%"))),cex=4.5) + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.203, 0.213), new=TRUE) + mtext(bquote(bar(nu) == .(paste0(round(mean(100*fre4.NA),1),"%"))),cex=4.5) + + # add corr between obs.time series and ensemble mean time series on Frequency plots: + if(fields.name == forecast.name){ + symbol.xpos <- 0.76 + symbol.width <- 0.1 + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.908, 0.918), new=TRUE) + sp.cor1 <- round(cor(fre1.NA,freObs1, use="complete.obs"),2) + mtext(paste0("r= ",sp.cor1), cex=4.5) + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.673, 0.683), new=TRUE) + sp.cor2 <- round(cor(fre2.NA,freObs2, use="complete.obs"),2) + mtext(paste0("r= ",sp.cor2), cex=4.5) + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.44, 0.45), new=TRUE) + sp.cor3 <- round(cor(fre3.NA,freObs3, use="complete.obs"),2) + mtext(paste0("r= ",sp.cor3), cex=4.5) + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.203, 0.213), new=TRUE) + sp.cor4 <- round(cor(fre4.NA,freObs4, use="complete.obs"),2) + mtext(paste0("r= ",sp.cor4), cex=4.5) + } + + # Legend 1: + legend1.xpos <- 0.01 + legend1.width <- 0.44 + legend1.cex <- 1.8 + my.subset <- match(values.to.plot, my.brks) + par(fig=c(legend1.xpos, legend1.xpos + legend1.width, 0.719, 0.744), new=TRUE) # syntax fig=c(xmin, xmax, ymin, ymax) + ColorBar3(my.brks, cols=my.cols, vert=FALSE, cex=legend1.cex, subset=my.subset) + if(psl=="g500") {mtext(side=4," m", cex=2.5)} else {mtext(side=4," hPa", cex=legend1.cex)} + par(fig=c(legend1.xpos, legend1.xpos + legend1.width, 0.485, 0.508), new=TRUE) + ColorBar3(my.brks, cols=my.cols, vert=FALSE, cex=legend1.cex, subset=my.subset) + if(psl=="g500") {mtext(side=4," m", cex=2.5)} else {mtext(side=4," hPa", cex=legend1.cex)} + par(fig=c(legend1.xpos, legend1.xpos + legend1.width, 0.250, 0.273), new=TRUE) + ColorBar3(my.brks, cols=my.cols, vert=FALSE, cex=legend1.cex, subset=my.subset) + if(psl=="g500") {mtext(side=4," m", cex=2.5)} else {mtext(side=4," hPa", cex=legend1.cex)} + par(fig=c(legend1.xpos, legend1.xpos + legend1.width, 0.015, 0.038), new=TRUE) + ColorBar3(my.brks, cols=my.cols, vert=FALSE, cex=legend1.cex, subset=my.subset) + if(psl=="g500") {mtext(side=4," m", cex=2.5)} else {mtext(side=4," hPa", cex=legend1.cex)} + + # Legend 2: + if(fields.name == rean.name){ + legend2.xpos <- 0.48 + legend2.width <- 0.25 + legend2.cex <- 1.8 + my.subset2 <- match(values.to.plot2, my.brks.var) + par(fig=c(legend2.xpos, legend2.xpos + legend2.width, 0.719 + 0.001, 0.744), new=TRUE) + ColorBar3(my.brks.var, cols=my.cols.var, vert=FALSE, cex=legend2.cex, subset=my.subset2) + mtext(side=4,paste0(" ",var.unit[var.num]), cex=legend2.cex) + par(fig=c(legend2.xpos, legend2.xpos + legend2.width, 0.485, 0.508), new=TRUE) + ColorBar3(my.brks.var, cols=my.cols.var, vert=FALSE, cex=legend2.cex, subset=my.subset2) + mtext(side=4,paste0(" ",var.unit[var.num]), cex=legend2.cex) + par(fig=c(legend2.xpos, legend2.xpos + legend2.width, 0.250, 0.273), new=TRUE) + ColorBar3(my.brks.var, cols=my.cols.var, vert=FALSE, cex=legend2.cex, subset=my.subset2) + mtext(side=4,paste0(" ",var.unit[var.num]), cex=legend2.cex) + par(fig=c(legend2.xpos, legend2.xpos + legend2.width, 0.015, 0.038), new=TRUE) + ColorBar3(my.brks.var, cols=my.cols.var, vert=FALSE, cex=legend2.cex, subset=my.subset2) + mtext(side=4,paste0(" ",var.unit[var.num]), cex=legend2.cex) + } + + if(!as.pdf) dev.off() # for saving 4 png + + #if(fields.name == forecast.name) save(orden, cluster1.name, cluster2.name, cluster3.name, cluster4.name, cluster.corr, fre1.NA, fre2.NA, fre3.NA, fre4.NA, freObs1, freObs2, freObs3, freObs4, sp.cor1, sp.cor2, sp.cor3, sp.cor4, file=paste0(workdir,"/",fields.name,"_", my.period[p],"_leadmonth_",lead.month,"_","ClusterNames",".RData")) + + # fre1, fre2, etc. already refer to the regimes listed in the 'orden' vector: + if(fields.name == forecast.name) save(orden, fre1.NA, fre2.NA, fre3.NA, fre4.NA, freObs1, freObs2, freObs3, freObs4, sp.cor1, sp.cor2, sp.cor3, sp.cor4, persist1, persist2, persist3, persist4, persistObs1, persistObs2, persistObs3, persistObs4, file=paste0(workdir,"/",fields.name,"_", my.period[p],"_leadmonth_",lead.month,"_","ClusterNames",".RData")) + +} # close 'p' on 'period' + +if(as.pdf) dev.off() # for saving a single pdf with all months/seasons + +} # close 'lead.month' on 'lead.months' + + + + + +# Summary graphs: +if(fields.name == forecast.name && p == 100){ + # array storing correlations in the format: [startdate, leadmonth, regime] + array.cor <- array.freq <- array.diff.freq <- array.rpss <- array.pers <- array.diff.pers <- array(NA,c(12,7,4)) + + for(p in 1:12){ + p.orig <- p + + for(l in 0:6){ + + load(file=paste0(workdir,"/",fields.name,"_",my.period[p],"_leadmonth_",l,"_","ClusterNames",".RData")) # load cluster names and summarized data + + if(var.num != var.num.orig) var.num <- var.num.orig # ripristinate original values of this script (necessary after loading regime data) + if(fields.name != fields.name.orig) fields.name <- fields.name.orig # ripristinate original values of this script + if(p != p.orig) p <- p.orig + + array.cor[p,1+l, 1] <- sp.cor1 # associates NAO+ to the first triangle (at left) + array.cor[p,1+l, 3] <- sp.cor2 # associates NAO- to the third triangle (at right) + array.cor[p,1+l, 4] <- sp.cor3 # associates Blocking to the fourth triangle (top one) + array.cor[p,1+l, 2] <- sp.cor4 # associates Atl.Ridge to the second triangle (bottom one) + + array.freq[p,1+l, 1] <- 100*(mean(fre1.NA)) # NAO+ + array.freq[p,1+l, 3] <- 100*(mean(fre2.NA)) # NAO- + array.freq[p,1+l, 4] <- 100*(mean(fre3.NA)) # Blocking + array.freq[p,1+l, 2] <- 100*(mean(fre4.NA)) # Atl.Ridge + + array.diff.freq[p,1+l, 1] <- 100*(mean(fre1.NA) - mean(freObs1)) # NAO+ + array.diff.freq[p,1+l, 3] <- 100*(mean(fre2.NA) - mean(freObs2)) # NAO- + array.diff.freq[p,1+l, 4] <- 100*(mean(fre3.NA) - mean(freObs3)) # Blocking + array.diff.freq[p,1+l, 2] <- 100*(mean(fre4.NA) - mean(freObs4)) # Atl.Ridge + + array.pers[p,1+l, 1] <- persist1 # NAO+ + array.pers[p,1+l, 3] <- persist2 # NAO- + array.pers[p,1+l, 4] <- persist3 # Blocking + array.pers[p,1+l, 2] <- persist4 # Atl.Ridge + + array.diff.pers[p,1+l, 1] <- persist1 - persistObs1 # NAO+ + array.diff.pers[p,1+l, 3] <- persist2 - persistObs2 # NAO- + array.diff.pers[p,1+l, 4] <- persist3 - persistObs3 # Blocking + array.diff.pers[p,1+l, 2] <- persist4 - persistObs4 # Atl.Ridge + + #array.rpss[p,1+l, 1] <- # NAO+ + #array.rpss[p,1+l, 3] <- # NAO- + #array.rpss[p,1+l, 4] <- # Blocking + #array.rpss[p,1+l, 2] <- # Atl.Ridge + } + } + + + + # Corr summary: + png(filename=paste0(workdir,"/",forecast.name,"_summary_corr.png"), width=550, height=400) + + # create an array similar to array.cor but with colors instead of corr.values: + corr.cols <- rev(c('#b2182b','#d6604d','#f4a582','#fddbc7','#d1e5f0','#92c5de','#4393c3','#2166ac')) + + array.cor.colors <- array(corr.cols[8],c(12,7,4)) + array.cor.colors[array.cor < 0.75] <- corr.cols[7] + array.cor.colors[array.cor < 0.5] <- corr.cols[6] + array.cor.colors[array.cor < 0.25] <- corr.cols[5] + array.cor.colors[array.cor < 0] <- corr.cols[4] + array.cor.colors[array.cor < -0.25] <- corr.cols[3] + array.cor.colors[array.cor < -0.5] <- corr.cols[2] + array.cor.colors[array.cor < -0.75] <- corr.cols[1] + + par(mar=c(5,4,4,4.5)) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Forecast time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + title("Correlation between S4 and ERA-Interim frequencies", cex=1.5) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Forecast time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5) + + for(p in 1:12){ + for(l in 0:6){ + polygon(c(0.5+p-1,0.5+p-1,1+p-1),c(-0.5+l,0.5+l,0+l), col=array.cor.colors[p,1+l,1]) # first triangle + polygon(c(0.5+p-1,1+p-1,1.5+p-1),c(-0.5+l,0+l,-0.5+l), col=array.cor.colors[p,1+l,2]) + polygon(c(1+p-1,1.5+p-1,1.5+p-1),c(l,0.5+l,-0.5+l), col=array.cor.colors[p,1+l,3]) + polygon(c(0.5+p-1,1+p-1,1.5+p-1),c(0.5+l,0+l,0.5+l), col=array.cor.colors[p,1+l,4]) + } + } + + par(fig=c(0.875, 1, 0.14, 0.89), new=TRUE) + ColorBar2(brks = seq(-1,1,0.25), cols = corr.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(seq(-1,1,0.25)), my.labels=seq(-1,1,0.25)) + dev.off() + + + + # Freq. summary: + png(filename=paste0(workdir,"/",forecast.name,"_summary_freq.png"), width=550, height=400) + + # create an array similar to array.diff.freq but with colors instead of frequencies: + freq.cols <- rev(c('#d73027','#f46d43','#fdae61','#fee090','#e0f3f8','#abd9e9','#74add1','#4575b4')) + my.seq <- c(NA,20,22,24,26,28,30,32,NA) + + array.freq.colors <- array(freq.cols[8],c(12,7,4)) + array.freq.colors[array.freq < my.seq[[8]]] <- freq.cols[7] + array.freq.colors[array.freq < my.seq[[7]]] <- freq.cols[6] + array.freq.colors[array.freq < my.seq[[6]]] <- freq.cols[5] + array.freq.colors[array.freq < my.seq[[5]]] <- freq.cols[4] + array.freq.colors[array.freq < my.seq[[4]]] <- freq.cols[3] + array.freq.colors[array.freq < my.seq[[3]]] <- freq.cols[2] + array.freq.colors[array.freq < my.seq[[2]]] <- freq.cols[1] + + par(mar=c(5,4,4,4.5)) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Forecast time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + title("Mean S4 frequency (%)", cex=1.5) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Forecast time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5) + + for(p in 1:12){ + for(l in 0:6){ + polygon(c(0.5+p-1,0.5+p-1,1+p-1),c(-0.5+l,0.5+l,0+l), col=array.freq.colors[p,1+l,1]) # first triangle + polygon(c(0.5+p-1,1+p-1,1.5+p-1),c(-0.5+l,0+l,-0.5+l), col=array.freq.colors[p,1+l,2]) + polygon(c(1+p-1,1.5+p-1,1.5+p-1),c(l,0.5+l,-0.5+l), col=array.freq.colors[p,1+l,3]) + polygon(c(0.5+p-1,1+p-1,1.5+p-1),c(0.5+l,0+l,0.5+l), col=array.freq.colors[p,1+l,4]) + } + } + + par(fig=c(0.875, 1, 0.14, 0.89), new=TRUE) + ColorBar2(brks = my.seq, cols = freq.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + dev.off() + + + + # Delta freq. summary: + png(filename=paste0(workdir,"/",forecast.name,"_summary_diff_freq.png"), width=550, height=400) + + # create an array similar to array.diff.freq but with colors instead of frequencies: + diff.freq.cols <- rev(c('#d73027','#f46d43','#fdae61','#fee090','#e0f3f8','#abd9e9','#74add1','#4575b4')) + + array.diff.freq.colors <- array(diff.freq.cols[8],c(12,7,4)) + array.diff.freq.colors[array.diff.freq < 10] <- diff.freq.cols[7] + array.diff.freq.colors[array.diff.freq < 5] <- diff.freq.cols[6] + array.diff.freq.colors[array.diff.freq < 2] <- diff.freq.cols[5] + array.diff.freq.colors[array.diff.freq < 0] <- diff.freq.cols[4] + array.diff.freq.colors[array.diff.freq < -2] <- diff.freq.cols[3] + array.diff.freq.colors[array.diff.freq < -5] <- diff.freq.cols[2] + array.diff.freq.colors[array.diff.freq < -10] <- diff.freq.cols[1] + + par(mar=c(5,4,4,4.5)) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Forecast time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + title("Frequency difference (%) between S4 and ERA-Interim", cex=1.5) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Forecast time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5) + + for(p in 1:12){ + for(l in 0:6){ + polygon(c(0.5+p-1,0.5+p-1,1+p-1),c(-0.5+l,0.5+l,0+l), col=array.diff.freq.colors[p,1+l,1]) # first triangle + polygon(c(0.5+p-1,1+p-1,1.5+p-1),c(-0.5+l,0+l,-0.5+l), col=array.diff.freq.colors[p,1+l,2]) + polygon(c(1+p-1,1.5+p-1,1.5+p-1),c(l,0.5+l,-0.5+l), col=array.diff.freq.colors[p,1+l,3]) + polygon(c(0.5+p-1,1+p-1,1.5+p-1),c(0.5+l,0+l,0.5+l), col=array.diff.freq.colors[p,1+l,4]) + } + } + + par(fig=c(0.875, 1, 0.14, 0.89), new=TRUE) + ColorBar2(brks = c(-20,-10,-5,-2,0,2,5,10,20), cols = diff.freq.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(c(-20,-10,-5,-2,0,2,5,10,20)), my.labels=c(-20,-10,-5,-2,0,2,5,10,20)) + dev.off() + + + + # Persistence summary: + png(filename=paste0(workdir,"/",forecast.name,"_summary_pers.png"), width=550, height=400) + + # create an array similar to array.pers but with colors instead of frequencies: + pers.cols <- rev(c('#b2182b','#d6604d','#f4a582','#fddbc7','#d1e5f0','#92c5de','#4393c3','#2166ac')) + my.seq <- c(NA, 3.25, 3.5, 3.75, 4, 4.25, 4.5, 4.75, NA) + + array.pers.colors <- array(pers.cols[8],c(12,7,4)) + array.pers.colors[array.pers < my.seq[8]] <- pers.cols[7] + array.pers.colors[array.pers < my.seq[7]] <- pers.cols[6] + array.pers.colors[array.pers < my.seq[6]] <- pers.cols[5] + array.pers.colors[array.pers < my.seq[5]] <- pers.cols[4] + array.pers.colors[array.pers < my.seq[4]] <- pers.cols[3] + array.pers.colors[array.pers < my.seq[3]] <- pers.cols[2] + array.pers.colors[array.pers < my.seq[2]] <- pers.cols[1] + + par(mar=c(5,4,4,4.5)) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Forecast time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + title("S4 Regime persistence (in days/month)", cex=1.5) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Forecast time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5) + + for(p in 1:12){ + for(l in 0:6){ + polygon(c(0.5+p-1,0.5+p-1,1+p-1),c(-0.5+l,0.5+l,0+l), col=array.pers.colors[p,1+l,1]) # first triangle + polygon(c(0.5+p-1,1+p-1,1.5+p-1),c(-0.5+l,0+l,-0.5+l), col=array.pers.colors[p,1+l,2]) + polygon(c(1+p-1,1.5+p-1,1.5+p-1),c(l,0.5+l,-0.5+l), col=array.pers.colors[p,1+l,3]) + polygon(c(0.5+p-1,1+p-1,1.5+p-1),c(0.5+l,0+l,0.5+l), col=array.pers.colors[p,1+l,4]) + } + } + + par(fig=c(0.875, 1, 0.14, 0.89), new=TRUE) + ColorBar2(brks = my.seq, cols = pers.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + dev.off() + + + + # Delta persistence summary: + png(filename=paste0(workdir,"/",forecast.name,"_summary_diff_pers.png"), width=550, height=400) + + # create an array similar to array.pers but with colors instead of frequencies: + diff.pers.cols <- rev(c('#b2182b','#d6604d','#f4a582','#fddbc7','#d1e5f0','#92c5de','#4393c3','#2166ac')) + my.seq <- c(NA, -3, -2, -1, 0, 1, 2, 3, NA) + + array.diff.pers.colors <- array(diff.pers.cols[8],c(12,7,4)) + array.diff.pers.colors[array.diff.pers < my.seq[8]] <- diff.pers.cols[7] + array.diff.pers.colors[array.diff.pers < my.seq[7]] <- diff.pers.cols[6] + array.diff.pers.colors[array.diff.pers < my.seq[6]] <- diff.pers.cols[5] + array.diff.pers.colors[array.diff.pers < my.seq[5]] <- diff.pers.cols[4] + array.diff.pers.colors[array.diff.pers < my.seq[4]] <- diff.pers.cols[3] + array.diff.pers.colors[array.diff.pers < my.seq[3]] <- diff.pers.cols[2] + array.diff.pers.colors[array.diff.pers < my.seq[2]] <- diffpers.cols[1] + + par(mar=c(5,4,4,4.5)) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Forecast time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + title("Diff. between S4 and ERA-Interim regime persistence (in days/month)", cex=1.5) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Forecast time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5) + + for(p in 1:12){ + for(l in 0:6){ + polygon(c(0.5+p-1,0.5+p-1,1+p-1),c(-0.5+l,0.5+l,0+l), col=array.diff.pers.colors[p,1+l,1]) # first triangle + polygon(c(0.5+p-1,1+p-1,1.5+p-1),c(-0.5+l,0+l,-0.5+l), col=array.diff.pers.colors[p,1+l,2]) + polygon(c(1+p-1,1.5+p-1,1.5+p-1),c(l,0.5+l,-0.5+l), col=array.diff.pers.colors[p,1+l,3]) + polygon(c(0.5+p-1,1+p-1,1.5+p-1),c(0.5+l,0+l,0.5+l), col=array.diff.pers.colors[p,1+l,4]) + } + } + + par(fig=c(0.875, 1, 0.14, 0.89), new=TRUE) + ColorBar2(brks = my.seq, cols = diff.pers.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + dev.off() + + ## # FairRPSS summary: + ## png(filename=paste0(workdir,"/",forecast.name,"_summary_rpss.png"), width=550, height=400) + + ## # create an array similar to array.diff.freq but with colors instead of frequencies: + ## freq.cols <- rev(c('#b2182b','#d6604d','#f4a582','#fddbc7','#d1e5f0','#92c5de','#4393c3','#2166ac')) + + ## array.freq.colors <- array(freq.cols[8],c(12,7,4)) + ## array.freq.colors[array.diff.freq < 10] <- freq.cols[7] + ## array.freq.colors[array.diff.freq < 5] <- freq.cols[6] + ## array.freq.colors[array.diff.freq < 2] <- freq.cols[5] + ## array.freq.colors[array.diff.freq < 0] <- freq.cols[4] + ## array.freq.colors[array.diff.freq < -2] <- freq.cols[3] + ## array.freq.colors[array.diff.freq < -5] <- freq.cols[2] + ## array.freq.colors[array.diff.freq < -10] <- freq.cols[1] + + ## par(mar=c(5,4,4,4.5)) + ## plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Forecast time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + ## title("Frequency difference (%) between S4 and ERA-Interim", cex=1.5) + ## mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + ## mtext(side = 2, text = "Forecast time", line = 2.5, cex=1.5) + ## axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + ## axis(2, at=seq(0,6), las=2, cex.axis=1.5) + + ## for(p in 1:12){ + ## for(l in 0:6){ + ## polygon(c(0.5+p-1,0.5+p-1,1+p-1),c(-0.5+l,0.5+l,0+l), col=array.freq.colors[p,1+l,1]) # first triangle + ## polygon(c(0.5+p-1,1+p-1,1.5+p-1),c(-0.5+l,0+l,-0.5+l), col=array.freq.colors[p,1+l,2]) + ## polygon(c(1+p-1,1.5+p-1,1.5+p-1),c(l,0.5+l,-0.5+l), col=array.freq.colors[p,1+l,3]) + ## polygon(c(0.5+p-1,1+p-1,1.5+p-1),c(0.5+l,0+l,0.5+l), col=array.freq.colors[p,1+l,4]) + ## } + ## } + + ## par(fig=c(0.875, 1, 0.14, 0.89), new=TRUE) + ## ColorBar2(brks = c(-20,-10,-5,-2,0,2,5,10,20), cols = freq.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(c(-20,-10,-5,-2,0,2,5,10,20)), my.labels=c(-20,-10,-5,-2,0,2,5,10,20)) + ## dev.off() + +} + + + + + + + +# impact map of the stronger WR: +if(fields.name == rean.name && p == 100){ + + var.num <- 1 # choose a variable (1:sfcWind, 2:tas) + + png(filename=paste0(workdir,"/",rean.name,"_",var.name[var.num],"_most_influent.png"), width=500, height=600) + + plot.new() + #par(mfrow=c(4,3)) + col.regimes <- c("indianred1","cyan","khaki","lightpink") + + for(p in 13:16){ # or 1:12 for monthly maps + load(file=paste0(rean.dir,"/",rean.name,"_",var.name[var.num],"_",my.period[p],"_psl.RData")) + load(paste0(rean.dir,"/",rean.name,"_ClusterNames.RData")) # they should not depend on var.name!!! + + # position of long values of Europe only (without the Atlantic Sea and America): + #if(fields.name == "ERA-Interim") EU <- c(1:which(lon >= lon.max)[1], (length(lon)-30):length(lon)) # restrict area to continental europe only + lon.max = 45 + EU <- c(1:which(lon >= lon.max)[1], (which(lon >= 337)[1]:length(lon))) # restrict area to continental europe only + + + cluster1 <- which(orden == cluster1.name.period[p]) # in future it will be == cluster1.name + cluster2 <- which(orden == cluster2.name.period[p]) + cluster3 <- which(orden == cluster3.name.period[p]) + cluster4 <- which(orden == cluster4.name.period[p]) + + assign(paste0("imp",cluster1), varPeriodAnom1mean) + assign(paste0("imp",cluster2), varPeriodAnom2mean) + assign(paste0("imp",cluster3), varPeriodAnom3mean) + assign(paste0("imp",cluster4), varPeriodAnom4mean) + + imp.all <- imp1 + for(i in 1:dim(imp1)[1]){ + for(j in 1:dim(imp1)[2]){ + if(abs(imp1[i,j]) >= max(abs(imp1[i,j]), abs(imp2[i,j]), abs(imp3[i,j]), abs(imp4[i,j]))) imp.all[i,j] <- 1.5 + if(abs(imp2[i,j]) >= max(abs(imp1[i,j]), abs(imp2[i,j]), abs(imp3[i,j]), abs(imp4[i,j]))) imp.all[i,j] <- 2.5 + if(abs(imp3[i,j]) >= max(abs(imp1[i,j]), abs(imp2[i,j]), abs(imp3[i,j]), abs(imp4[i,j]))) imp.all[i,j] <- 3.5 + if(abs(imp4[i,j]) >= max(abs(imp1[i,j]), abs(imp2[i,j]), abs(imp3[i,j]), abs(imp4[i,j]))) imp.all[i,j] <- 4.5 + } + } + + if(p<=3) yMod <- 0.7 + if(p>=4 && p<=6) yMod <- 0.5 + if(p>=7 && p<=9) yMod <- 0.3 + if(p>=10) yMod <- 0.1 + if(p==13 || p==14) yMod <- 0.5 + if(p==15 || p==16) yMod <- 0.1 + + if(p==1 || p==4 || p==7 || p==10) xMod <- 0.05 + if(p==2 || p==5 || p==8 || p==11) xMod <- 0.35 + if(p==3 || p==6 || p==9 || p==12) xMod <- 0.65 + if(p==13 || p==15) xMod <- 0.05 + if(p==14 || p==16) xMod <- 0.50 + + # impact map: + if(p <= 12) par(fig=c(xMod, xMod + 0.3, yMod, yMod + 0.17), new=TRUE) + if(p > 12) par(fig=c(xMod, xMod + 0.45, yMod, yMod + 0.38), new=TRUE) + PlotEquiMap(imp.all[,EU], lon[EU], lat, filled.continents = FALSE, brks=1:5, cols=col.regimes, axelab=F, drawleg=F) + + # title map: + if(p <= 12) par(fig=c(xMod, xMod + 0.3, yMod + 0.162, yMod + 0.172), new=TRUE) + if(p > 12) par(fig=c(xMod + 0.07, xMod + 0.07 + 0.3, yMod +0.21 + 0.162, yMod +0.21 + 0.172), new=TRUE) + mtext(my.period[p], font=2, cex=.9) + + } # close 'p' on 'period' + + par(fig=c(0.1,0.9, 0, 0.12), new=TRUE) + ColorBar2(1:5, cols=col.regimes, vert=FALSE, my.ticks=1:4, draw.ticks=FALSE, my.labels=orden) + + par(fig=c(0.1,0.9, 0.9, 0.95), new=TRUE) + mtext(paste0("Most influent WR on ",var.name[var.num]), font=2, cex=1.5) + + dev.off() +} + + + + + + + + + + + + + + + + + + diff --git a/old/weather_regimes_maps_v12.R b/old/weather_regimes_maps_v12.R new file mode 100644 index 0000000000000000000000000000000000000000..b3dcd6d52095211e36802f503a91a99fa82bba52 --- /dev/null +++ b/old/weather_regimes_maps_v12.R @@ -0,0 +1,1159 @@ + +# Creation: 6/2016 +# Author: Nicola Cortesi +# +# Aim: to visualize the regime anomalies of the weather regimes, their interannual frequencies and their impact on a chosen cliamte variable (i.e: wind speed or temperature) +# +# I/O: the only input file needed is the output of the weather_regimes.R script, which ends with the suffix "_mapdata.RData". +# Output files are the maps, with the user can generate as .png or as .pdf +# +# Assumptions: If your regimes derive from a reanalysis, this script must be run twice: once, with the option 'ordering = F' and 'save.names = T' to create a .pdf or .png +# file that the user must open to find visually the order by which the four regimes are stored inside the four clusters. For example, he can find that the +# sequence of the images in the file (from top to down) corresponds to: Blocking / NAO- / Atl.Ridge / NAO+ regime. Subsequently, he has to change 'ordering' +# from F to T and set the four variables 'clusterX.name' (X=1..4) to follow the same order found inside that file. +# The second time, you have to run this script only to get the maps in the right order, which by default is, from top to down: +# "NAO+","NAO-","Blocking" and "Atl.Ridge" (you can change it with the variable 'orden'). You also have to set 'save.names' to TRUE, so an .RData file +# is written to store the order of the four regimes, in case in future a user needs to visualize these maps again. In this case, the user must set +# 'ordering = TRUE' and 'save.names = FALSE' to be able to visualize the maps in the correct order. +# +# If your regimes derive from a forecast system, you have to compute before the regimes derived from a reanalysis. Then, you can associate the reanalysis +# to the forecast system, to employ it to automatically aussociate to each simulated cluster one of the +# observed regimes, the one with the highest spatial correlation. Beware that the two maps of regime anomalies must have the same spatial resolution! +# +# Branch: weather_regimes + +rm(list=ls()) +library(s2dverification) # for the function Load() +library(Kendall) # for the MannKendall test + +source('/home/Earth/ncortesi/Downloads/scripts/Rfunctions.R') # for the calendar functions + +# working dir with the input files with '_psl.RData' suffix: +workdir <- "/scratch/Earth/ncortesi/RESILIENCE/Regimes/42_ECMWF_S4_monthly_1981-2015_LOESS_filter" + +rean.name <- "ERA-Interim" # reanalysis name (if input data comes from a reanalysis) +forecast.name <- "ECMWF-S4" # forecast system name (if input data comes from a forecast system) + +fields.name <- forecast.name # Choose between loading reanalysis ('rean.name') or forecasts ('forecast.name') + +var.num <- 1 # if fields.name ='rean.name', Choose a variable for the impact maps 1: sfcWind 2: tas + +period <- 1:12 # Choose one or more periods to plot from 1 to 17 (1-12: Jan - Dec, 13-16: Winter, Spring, Summer, Autumn, 17: Year). + # You need to have created before the '_mapdata.RData' file with the output of 'weather_regimes'.R for that period. +lead.months <- 0:6 # in case you selected 'fields.name = forecast.name', specify a leadtime (0 = same month of the start month) to plot + # (and variable 'period' becomes the start month) + +# run it twice: once, with 'ordering <- FALSE' and 'save.names <- TRUE' to look only at the cartography and find visually the right regime names of the four clusters; then, +# insert the regime names in the correct order below and run this script a the second time with 'ordering = TRUE' and 'save.names = TRUE', to save the ordered cartography. +# after that, any time you need to run this script, run it with 'ordering = TRUE and 'save.names = FALSE', not to overwrite the file which stores the cluster order: +ordering <- T # If TRUE, order the clusters following the regimes listed in the 'orden' vector (set it to TRUE only after you manually inserted the names below). +save.names <- F # if TRUE, also save the cluster names (i.e: the association between clusters and weather regimes); if FALSE, the cluster names are loaded from a file + +as.pdf <- F # FALSE: save results as one .png file for each season/month, TRUE: save only one .pdf file with all seasons/months + +composition <- "psl" # "psl" # choose which kind of composition you want to plot: + # - 'simple' for monthly graphs, + # - 'psl' for the regime anomalies for a fixed forecast month + # - 'fre' for the interannual frequencies for a fixed forecast month +forecast.month <- 1 # in case composition = 'psl', choose a forecast month to plot its composition + +# if fields.name='forecast.name', set the subdir of 'workdir' where the weather regimes computed with a reanalysis are stored: +rean.dir <- "/scratch/Earth/ncortesi/RESILIENCE/Regimes/41_ERA-Interim_monthly_1981-2015_LOESS_filter" + +# Associates the regimes to the four cluster: +cluster1.name <- "NAO+" +cluster2.name <- "NAO-" +cluster4.name <- "Blocking" +cluster3.name <- "Atl.Ridge" + +# choose an order to give to the cartograpy, from top to bottom: +orden <- c("NAO+","NAO-","Blocking","Atl.Ridge") + +################################################################################################################################################################### + +if(fields.name != rean.name && fields.name != forecast.name) stop("variable 'fields.name' is not properly set") +regime1.name <- orden[1] +regime2.name <- orden[2] +regime3.name <- orden[3] +regime4.name <- orden[4] + +var.name <- c("sfcWind","tas") +var.name.full <- c("Wind Speed","Temperature") # full name of the 'predictand' variable to put in the title of the graphs +var.unit <- c("m/s", "ºC") # unit of measure of var (for drawing color scales) +var.num.orig <- var.num # loading _psl files, this value can change! +fields.name.orig <- fields.name +period.orig <- period +workdir.orig <- workdir +pdf.suffix <- ifelse(length(period)==1, my.period[period], "all_periods") + +if(composition == "psl") { + if(as.pdf && fields.name == forecast.name && composition == "psl") pdf(file=paste0(workdir,"/",fields.name,"_",pdf.suffix,"_forecast_month_",forecast.month,".pdf"),width=110,height=60) + if(!as.pdf && fields.name == forecast.name && composition == "psl") png(filename=paste0(workdir,"/",fields.name,"_forecast_month_",forecast.month,"_psl",".png"),width=6000,height=2300) + + lead.months <- c(6:0,0) + n.map <- 0 + plot.new() + + # Sheet title: + par(fig=c(0, 1, 0.94, 0.98), new=TRUE) + mtext(paste0(fields.name, " simulated Weather Regimes for ", month.name[forecast.month]), cex=5.5, font=2) +} + + +for(lead.month in lead.months){ +#lead.month=0 # for the debug + +if(composition == "simple"){ + if(as.pdf && fields.name == rean.name) pdf(file=paste0(workdir,"/",fields.name,"_",var.name[var.num],"_",pdf.suffix,".pdf"),width=40, height=60) + if(as.pdf && fields.name == forecast.name) pdf(file=paste0(workdir,"/",fields.name,"_",var.name[var.num],"_",pdf.suffix,"_leadmonth_",lead.month,".pdf"),width=40,height=60) +} + +if(composition == 'psl') { + period <- forecast.month - lead.month + if(period < 1) period <- period + 12 +} + +for(p in period){ + #p <- 1 # for the debug + p.orig <- p + + # load regime data (for forecasts, it load only the data corresponding to the chosen start month p and lead month 'lead.month':: + if(fields.name == rean.name || (fields.name == forecast.name && lead.month == 0 || n.map == 7)) { + load(file=paste0(rean.dir,"/",rean.name,"_",my.period[p],"_","psl",".RData")) + if(var.num != var.num.orig) var.num <- var.num.orig # ripristinate original values of this script (necessary after loading regime data) + if(fields.name != fields.name.orig) fields.name <- fields.name.orig # ripristinate original values of this script + if(workdir != workdir.orig) workdir <- workdir.orig # ripristinate original values of this script + + load(file=paste0(rean.dir,"/",rean.name,"_",my.period[p],"_",var.name[var.num],".RData")) + } + + if(fields.name == forecast.name && n.map < 7){ + load(file=paste0(workdir,"/",fields.name,"_",my.period[p],"_leadmonth_",lead.month,"_","psl",".RData")) # load cluster time series and mean psl data + #load(file=paste0(workdir,"/",fields.name,"_",my.period[p],"_leadmonth_",lead.month,"_",var.name[var.num],".RData")) # load mean var data + + if(var.num != var.num.orig) var.num <- var.num.orig # ripristinate original values of this script (necessary after loading regime data) + if(fields.name != fields.name.orig) fields.name <- fields.name.orig # ripristinate original values of this script + if(workdir != workdir.orig) workdir <- workdir.orig # ripristinate original values of this script + } + + if(var.num != var.num.orig) var.num <- var.num.orig # ripristinate original values of this script (necessary after loading regime data) + if(fields.name != fields.name.orig) fields.name <- fields.name.orig # ripristinate original values of this script + if(p != p.orig) p <- p.orig + + if(fields.name == forecast.name && n.map < 7){ + # compute cluster persistence: + my.cluster.vector <- as.vector(my.cluster.array[[p]]) # reduce it to a vector with the order: day1month1member1 , day2month1member1, ... , day1month2member1, day2month2member1, ,,, + + sequ1 <- sequ2 <- sequ3 <- sequ4 <- rep(0,10000) + i1 <- i2 <- i3 <- i4 <- 1 + + if(my.cluster.vector[1] != 1) i1 <- 0 + if(my.cluster.vector[1] == 1) sequ1[i1] <- sequ1[i1]+1 + if(my.cluster.vector[1] != 2) i2 <- 0 + if(my.cluster.vector[1] == 2) sequ2[i2] <- sequ2[i2]+1 + if(my.cluster.vector[1] != 3) i3 <- 0 + if(my.cluster.vector[1] == 3) sequ3[i3] <- sequ3[i3]+1 + if(my.cluster.vector[1] != 4) i4 <- 0 + if(my.cluster.vector[1] == 4) sequ4[i4] <- sequ4[i4]+1 + n.dd <- length(my.cluster.vector) + for(d in 1:(n.dd-1)){ + if(my.cluster.vector[d] != 1 && my.cluster.vector[d+1] == 1) i1 <- i1+1 + if(my.cluster.vector[d] == 1) sequ1[i1] <- sequ1[i1]+1 + + if(my.cluster.vector[d] != 2 && my.cluster.vector[d+1] == 2) i2 <- i2+1 + if(my.cluster.vector[d] == 2) sequ2[i2] <- sequ2[i2]+1 + + if(my.cluster.vector[d] != 3 && my.cluster.vector[d+1] == 3) i3 <- i3+1 + if(my.cluster.vector[d] == 3) sequ3[i3] <- sequ3[i3]+1 + + if(my.cluster.vector[d] != 4 && my.cluster.vector[d+1] == 4) i4 <- i4+1 + if(my.cluster.vector[d] == 4) sequ4[i4] <- sequ4[i4]+1 + } + + if(my.cluster.vector[n.dd] == 1) sequ1[i1] <- sequ1[i1]+1 + if(my.cluster.vector[n.dd] == 2) sequ2[i2] <- sequ2[i2]+1 + if(my.cluster.vector[n.dd] == 3) sequ3[i3] <- sequ3[i3]+1 + if(my.cluster.vector[n.dd] == 4) sequ4[i4] <- sequ4[i4]+1 + + pers1 <- mean(sequ1[which(sequ1 != 0)]) + pers2 <- mean(sequ2[which(sequ2 != 0)]) + pers3 <- mean(sequ3[which(sequ3 != 0)]) + pers4 <- mean(sequ4[which(sequ4 != 0)]) + + # compute correlations between simulated clusters and observed regime's anomalies: + cluster1.sim <- pslwr1mean; cluster2.sim <- pslwr2mean; cluster3.sim <- pslwr3mean; cluster4.sim <- pslwr4mean + lat.forecast <- lat; lon.forecast <- lon + + if((p + lead.month) > 12) { load.month <- p + lead.month - 12 } else { load.month <- p + lead.month } # reanalysis forecast month to load + load(file=paste0(rean.dir,"/",rean.name,"_",my.period[load.month],"_","psl",".RData")) # load reanalysis data, which overwrities the pslwrXmean variables + load(paste0(rean.dir,"/",rean.name,"_", my.period[load.month],"_","ClusterNames",".RData")) # load also reanalysis regime names + + wr1y.obs <- wr1y; wr2y.obs <- wr2y; wr3y.obs <- wr3y; wr4y.obs <- wr4y # observed frecuencies of the regimes + + regimes.obs <- c(cluster1.name, cluster2.name, cluster3.name, cluster4.name) + + if(length(unique(regimes.obs)) < 4) stop("Two or more names of the weather types in the reanalsyis input file are repeated!") + + if(identical(rev(lat),lat.forecast)) {lat.forecast <- rev(lat.forecast); print("Warning: forecast latitudes are in the opposite order than renalysis's")} + if(!identical(lat, lat.forecast)) stop("forecast latitudes are different from reanalysis latitudes!") + if(!identical(lon, lon.forecast)) stop("forecast longitude are different from reanalysis longitudes!") + + cluster.corr <- array(NA, c(4,4), dimnames=list(c("cluster1.sim","cluster2.sim","cluster3.sim","cluster4.sim"), regimes.obs)) + cluster.corr[1,1] <- cor(as.vector(cluster1.sim), as.vector(pslwr1mean)) + cluster.corr[1,2] <- cor(as.vector(cluster1.sim), as.vector(pslwr2mean)) + cluster.corr[1,3] <- cor(as.vector(cluster1.sim), as.vector(pslwr3mean)) + cluster.corr[1,4] <- cor(as.vector(cluster1.sim), as.vector(pslwr4mean)) + cluster.corr[2,1] <- cor(as.vector(cluster2.sim), as.vector(pslwr1mean)) + cluster.corr[2,2] <- cor(as.vector(cluster2.sim), as.vector(pslwr2mean)) + cluster.corr[2,3] <- cor(as.vector(cluster2.sim), as.vector(pslwr3mean)) + cluster.corr[2,4] <- cor(as.vector(cluster2.sim), as.vector(pslwr4mean)) + cluster.corr[3,1] <- cor(as.vector(cluster3.sim), as.vector(pslwr1mean)) + cluster.corr[3,2] <- cor(as.vector(cluster3.sim), as.vector(pslwr2mean)) + cluster.corr[3,3] <- cor(as.vector(cluster3.sim), as.vector(pslwr3mean)) + cluster.corr[3,4] <- cor(as.vector(cluster3.sim), as.vector(pslwr4mean)) + cluster.corr[4,1] <- cor(as.vector(cluster4.sim), as.vector(pslwr1mean)) + cluster.corr[4,2] <- cor(as.vector(cluster4.sim), as.vector(pslwr2mean)) + cluster.corr[4,3] <- cor(as.vector(cluster4.sim), as.vector(pslwr3mean)) + cluster.corr[4,4] <- cor(as.vector(cluster4.sim), as.vector(pslwr4mean)) + + # associate each simulated cluster to one observed regime: + max1 <- which(cluster.corr == max(cluster.corr), arr.ind=T) + cluster.corr2 <- cluster.corr + cluster.corr2[max1[1],] <- -999 # set this row to negative values so it won't be found as a maximum anymore + cluster.corr2[,max1[2]] <- -999 # set this column to negative values so it won't be found as a maximum anymore + max2 <- which(cluster.corr2 == max(cluster.corr2), arr.ind=T) + cluster.corr3 <- cluster.corr2 + cluster.corr3[max2[1],] <- -999 + cluster.corr3[,max2[2]] <- -999 + max3 <- which(cluster.corr3 == max(cluster.corr3), arr.ind=T) + cluster.corr4 <- cluster.corr3 + cluster.corr4[max3[1],] <- -999 + cluster.corr4[,max3[2]] <- -999 + max4 <- which(cluster.corr4 == max(cluster.corr4), arr.ind=T) + + seq.max1 <- c(max1[1],max2[1],max3[1],max4[1]) + seq.max2 <- c(max1[2],max2[2],max3[2],max4[2]) + + cluster1.regime <- seq.max2[which(seq.max1 == 1)] + cluster2.regime <- seq.max2[which(seq.max1 == 2)] + cluster3.regime <- seq.max2[which(seq.max1 == 3)] + cluster4.regime <- seq.max2[which(seq.max1 == 4)] + + cluster1.name <- regimes.obs[cluster1.regime] + cluster2.name <- regimes.obs[cluster2.regime] + cluster3.name <- regimes.obs[cluster3.regime] + cluster4.name <- regimes.obs[cluster4.regime] + + spat.cor1 <- cluster.corr[1,seq.max2[which(seq.max1 == 1)]] + spat.cor2 <- cluster.corr[2,seq.max2[which(seq.max1 == 2)]] + spat.cor3 <- cluster.corr[3,seq.max2[which(seq.max1 == 3)]] + spat.cor4 <- cluster.corr[4,seq.max2[which(seq.max1 == 4)]] + + # measure persistence for the reanalysis dataset: + + # compute cluster persistence: + my.cluster.vector <- my.cluster[[load.month]][[1]] + + sequ1 <- sequ2 <- sequ3 <- sequ4 <- rep(0,10000) + i1 <- i2 <- i3 <- i4 <- 1 + + if(my.cluster.vector[1] != 1) i1 <- 0 + if(my.cluster.vector[1] == 1) sequ1[i1] <- sequ1[i1]+1 + if(my.cluster.vector[1] != 2) i2 <- 0 + if(my.cluster.vector[1] == 2) sequ2[i2] <- sequ2[i2]+1 + if(my.cluster.vector[1] != 3) i3 <- 0 + if(my.cluster.vector[1] == 3) sequ3[i3] <- sequ3[i3]+1 + if(my.cluster.vector[1] != 4) i4 <- 0 + if(my.cluster.vector[1] == 4) sequ4[i4] <- sequ4[i4]+1 + + n.dd <- length(my.cluster.vector) + for(d in 1:(n.dd-1)){ + if(my.cluster.vector[d] != 1 && my.cluster.vector[d+1] == 1) i1 <- i1+1 + if(my.cluster.vector[d] == 1) sequ1[i1] <- sequ1[i1]+1 + + if(my.cluster.vector[d] != 2 && my.cluster.vector[d+1] == 2) i2 <- i2+1 + if(my.cluster.vector[d] == 2) sequ2[i2] <- sequ2[i2]+1 + + if(my.cluster.vector[d] != 3 && my.cluster.vector[d+1] == 3) i3 <- i3+1 + if(my.cluster.vector[d] == 3) sequ3[i3] <- sequ3[i3]+1 + + if(my.cluster.vector[d] != 4 && my.cluster.vector[d+1] == 4) i4 <- i4+1 + if(my.cluster.vector[d] == 4) sequ4[i4] <- sequ4[i4]+1 + } + + if(my.cluster.vector[n.dd] == 1) sequ1[i1] <- sequ1[i1]+1 + if(my.cluster.vector[n.dd] == 2) sequ2[i2] <- sequ2[i2]+1 + if(my.cluster.vector[n.dd] == 3) sequ3[i3] <- sequ3[i3]+1 + if(my.cluster.vector[n.dd] == 4) sequ4[i4] <- sequ4[i4]+1 + + persObs1 <- mean(sequ1[which(sequ1 != 0)]) + persObs2 <- mean(sequ2[which(sequ2 != 0)]) + persObs3 <- mean(sequ3[which(sequ3 != 0)]) + persObs4 <- mean(sequ4[which(sequ4 != 0)]) + + if(var.num != var.num.orig) var.num <- var.num.orig # ripristinate original values of this script + if(fields.name != fields.name.orig) fields.name <- fields.name.orig # ripristinate original values of this script + if(!identical(period.orig,period)) period <- period.orig + if(workdir != workdir.orig) workdir <- workdir.orig # ripristinate original values of this script + if(p.orig != p) p <- p.orig + + # insert here all the manual corrections to the regime assignation due to misasignment in the automatic selection: + # forecast month: September + if(p == 9 && lead.month == 0) { cluster1.name <- "Blocking"; cluster2.name <- "Atl.Ridge"; cluster3.name <- "NAO-"; cluster4.name <- "NAO+"} + if(p == 8 && lead.month == 1) { cluster1.name <- "NAO+"; cluster2.name <- "NAO-"; cluster3.name <- "Blocking"; cluster4.name <- "Atl.Ridge"} + if(p == 6 && lead.month == 3) { cluster1.name <- "Atl.Ridge"; cluster2.name <- "NAO-"} + if(p == 4 && lead.month == 5) { cluster1.name <- "Blocking"; cluster2.name <- "NAO+"; cluster3.name <- "Atl.Ridge"; cluster4.name <- "NAO-"} + + # forecast month: October + if(p == 4 && lead.month == 6) { cluster1.name <- "Atl.Ridge"; cluster2.name <- "Blocking"; cluster3.name <- "NAO+"; cluster4.name <- "NAO-"} + if(p == 5 && lead.month == 5) { cluster1.name <- "NAO+"; cluster2.name <- "Blocking"; cluster3.name <- "NAO-"; cluster4.name <- "Atl.Ridge"} + if(p == 7 && lead.month == 3) { cluster1.name <- "NAO-"; cluster2.name <- "Atl.Ridge"; cluster3.name <- "Blocking"; cluster4.name <- "NAO+"} + if(p == 8 && lead.month == 2) { cluster1.name <- "Blocking"; cluster2.name <- "NAO-"; cluster3.name <- "Atl.Ridge"; cluster4.name <- "NAO+"} + if(p == 9 && lead.month == 1) { cluster1.name <- "NAO-"; cluster2.name <- "Blocking"; cluster3.name <- "Atl.Ridge"; cluster4.name <- "NAO+"} + + # forecast month: November + if(p == 6 && lead.month == 5) { cluster2.name <- "Atl.Ridge"; cluster3.name <- "NAO+"; cluster4.name <- "Blocking"} + if(p == 7 && lead.month == 4) { cluster1.name <- "NAO+"; cluster2.name <- "Blocking"; cluster4.name <- "Atl.Ridge"} + if(p == 8 && lead.month == 3) { cluster3.name <- "Blocking"; cluster4.name <- "Atl.Ridge"} + if(p == 9 && lead.month == 2) { cluster2.name <- "Atl.Ridge"; cluster3.name <- "NAO+"; cluster4.name <- "Blocking"} + if(p == 10 && lead.month == 1) { cluster2.name <- "Blocking"; cluster4.name <- "Atl.Ridge"} + + load(file=paste0(workdir,"/",fields.name,"_",my.period[p],"_leadmonth_",lead.month,"_","psl",".RData")) # reload forecast cluster time series and mean psl data + #load(file=paste0(workdir,"/",fields.name,"_",my.period[p],"_leadmonth_",lead.month,"_ClusterData.RData")) # reload forecast cluster data (for my.cluster.array) + + if(var.num != var.num.orig) var.num <- var.num.orig # ripristinate original values of this script + if(fields.name != fields.name.orig) fields.name <- fields.name.orig # ripristinate original values of this script + if(!identical(period.orig, period)) period <- period.orig + if(p.orig != p) p <- p.orig + if(identical(rev(lat),lat.forecast)) lat <- rev(lat) # ripristinate right lat order + if(workdir != workdir.orig) workdir <- workdir.orig # ripristinate original values of this script + + } # close for on 'forecast.name' + + if(fields.name == rean.name || (fields.name == forecast.name && n.map == 7)){ + if(save.names){ + save(cluster1.name, cluster2.name, cluster3.name, cluster4.name, file=paste0(workdir,"/",fields.name,"_", my.period[p],"_","ClusterNames",".RData")) + + } else { # in this case, we load the cluster names from the file already saved, if regimes come from a reanalysis: + load(paste0(rean.dir,"/",rean.name,"_", my.period[p],"_","ClusterNames",".RData")) + + if(var.num != var.num.orig) var.num <- var.num.orig # ripristinate original values of this script + if(fields.name != fields.name.orig) fields.name <- fields.name.orig # ripristinate original values of this script + } + } + + # assign to each cluster its regime: + if(ordering == FALSE && (fields.name == rean.name || (fields.name == forecast.name && n.map == 7))){ + cluster1 <- 1; cluster2 <- 2; cluster3 <- 3; cluster4 <- 4 + } else { + cluster1 <- which(orden == cluster1.name) + cluster2 <- which(orden == cluster2.name) + cluster3 <- which(orden == cluster3.name) + cluster4 <- which(orden == cluster4.name) + } + + assign(paste0("map",cluster1), pslwr1mean) + assign(paste0("map",cluster2), pslwr2mean) + assign(paste0("map",cluster3), pslwr3mean) + assign(paste0("map",cluster4), pslwr4mean) + + assign(paste0("fre",cluster1), wr1y) + assign(paste0("fre",cluster2), wr2y) + assign(paste0("fre",cluster3), wr3y) + assign(paste0("fre",cluster4), wr4y) + + if(p == 100){ + assign(paste0("imp",cluster1), varwr1mean) + assign(paste0("imp",cluster2), varwr2mean) + assign(paste0("imp",cluster3), varwr3mean) + assign(paste0("imp",cluster4), varwr4mean) + + assign(paste0("sig",cluster1), pvalue1) + assign(paste0("sig",cluster2), pvalue2) + assign(paste0("sig",cluster3), pvalue3) + assign(paste0("sig",cluster4), pvalue4) + } + + if(fields.name == forecast.name && n.map < 7){ + assign(paste0("freMax",cluster1), wr1yMax) + assign(paste0("freMax",cluster2), wr2yMax) + assign(paste0("freMax",cluster3), wr3yMax) + assign(paste0("freMax",cluster4), wr4yMax) + + assign(paste0("freMin",cluster1), wr1yMin) + assign(paste0("freMin",cluster2), wr2yMin) + assign(paste0("freMin",cluster3), wr3yMin) + assign(paste0("freMin",cluster4), wr4yMin) + + assign(paste0("spatial.cor",cluster1), spat.cor1) + assign(paste0("spatial.cor",cluster2), spat.cor2) + assign(paste0("spatial.cor",cluster3), spat.cor3) + assign(paste0("spatial.cor",cluster4), spat.cor3) + + assign(paste0("persist",cluster1), pers1) + assign(paste0("persist",cluster2), pers2) + assign(paste0("persist",cluster3), pers3) + assign(paste0("persist",cluster4), pers4) + + cluster1.obs <- which(orden == regimes.obs[1]) + cluster2.obs <- which(orden == regimes.obs[2]) + cluster3.obs <- which(orden == regimes.obs[3]) + cluster4.obs <- which(orden == regimes.obs[4]) + + assign(paste0("freObs",cluster1.obs), wr1y.obs) + assign(paste0("freObs",cluster2.obs), wr2y.obs) + assign(paste0("freObs",cluster3.obs), wr3y.obs) + assign(paste0("freObs",cluster4.obs), wr4y.obs) + + assign(paste0("persistObs",cluster1.obs), persObs1) + assign(paste0("persistObs",cluster2.obs), persObs2) + assign(paste0("persistObs",cluster3.obs), persObs3) + assign(paste0("persistObs",cluster4.obs), persObs4) + + } + + # position of long values of Europe only (without the Atlantic Sea and America): + #if(fields.name == "ERA-Interim") EU <- c(1:which(lon >= lon.max)[1], (length(lon)-30):length(lon)) # restrict area to continental europe only + EU <- c(1:which(lon >= lon.max)[1], (which(lon >= 337)[1]:length(lon))) # restrict area to continental europe only + + # breaks and colors of the geopotential fields: + if(psl == "g500"){ + #my.brks <- c(-300,-199:200,300) # % Mean anomaly of a WR + my.brks <- c(-300,seq(-200,200,10),300) # % Mean anomaly of a WR + my.brks2 <- c(-300,seq(-200,200,10),300) # % Mean anomaly of a WR for the contour lines + #my.cols <- colorRampPalette((brewer.pal(9,"PuBu")))(length(my.brks)-1) + values.to.plot <- c(-200, -100, 0, 100, 200) # values we want to show in the labels + } + + if(psl == "psl"){ + my.brks <- c(seq(-19,-1,2),0,seq(1,19,2)) # % Mean anomaly of a WR + # my.brks <- c(seq(-19,-1,2),0,seq(1,19,2)) # % Mean anomaly of a WR + + my.brks2 <- my.brks #c(seq(-20,20,2)) # % Mean anomaly of a WR for the contour lines + values.to.plot <- my.brks #c(-17,-15,-13,-11,-9,-7,-5,-3,-1,0,1,3,5,7,9,11,13,15,17) + } + + my.cols <- colorRampPalette(rev(brewer.pal(11,"RdBu")))(length(my.brks)-1) # blue--white--red colors + my.cols[floor(length(my.cols)/2)] <- my.cols[floor(length(my.cols)/2)+1] <- "white" + + # breaks and colors of the impact maps (valid both for Wind speed anomalies and Temperature anomalies): + #my.brks.var <- c(-20,seq(-10,-3,1),seq(-2.9,2.9,0.1),seq(3,10,1),20) # % Mean anomaly of a WR + #my.brks.var <- c(-20,-2,-1.5,-1,-0.5,-0.2,0.2,0.5,1,1.5,2,20) # % Mean anomaly of a WR + #my.brks.var <- c(-20,seq(-3,3,0.5),20) # % Mean anomaly of a WR + if(var.name[var.num] == "sfcWind") { + my.brks.var <- seq(-3,3,0.5) + values.to.plot2 <- c(-3,-2,-1,0,1,2,3) + } + if(var.name[var.num] == "tas") { + my.brks.var <- seq(-5,5,1) + values.to.plot2 <- my.brks.var #c(-5,-3,-1,0,1,3,5) + } + + #my.cols <- colorRampPalette(as.character(read.csv("/home/Earth/vtorralb/rgbhex.csv",header=F)[,1]))(length(my.brks)-1) # blue--yellow-red colors + my.cols.var <- colorRampPalette(rev(brewer.pal(11,"RdBu")))(length(my.brks.var)-1) # blue--white--red colors + #my.cols.var[floor(length(my.cols.var)/2)] <- my.cols.var[floor(length(my.cols.var)/2)+1] <- "white" + + # Visualize the composition with the 3 graphs for all the 4 regimes: its pressure fields, its impact on var and its frequency series: + if(composition == "simple"){ + if(!as.pdf && fields.name==rean.name) png(filename=paste0(workdir,"/",fields.name,"_",my.period[p],"_",var.name[var.num],".png"),width=3000,height=3700) + if(!as.pdf && fields.name==forecast.name) png(filename=paste0(workdir,"/",fields.name,"_",my.period[p],"_leadmonth_",lead.month,"_",var.name[var.num],".png"),width=3000,height=3700) + + plot.new() + + # Sheet title: + par(fig=c(0, 1, 0.94, 0.98), new=TRUE) + if(fields.name == forecast.name) {forecast.title <- paste0(" startdate and ",lead.month," forecast month ")} else {forecast.title <- ""} + mtext(paste0(fields.name, " Weather Regimes for ", my.period[p], forecast.title," (",year.start,"-",year.end,")"), cex=5.5, font=2) + + # Centroid maps: + map.xpos <- 0 + map.width <- 0.46 + par(fig=c(map.xpos + 0.003, map.xpos + map.width - 0.003, 0.745, 0.925), new=TRUE) + PlotEquiMap2(rescale(map1,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map1), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, xlabel.dist=1.5, contours.lty="F1FF1F") + par(fig=c(map.xpos, map.xpos + map.width, 0.51, 0.69), new=TRUE) + PlotEquiMap2(rescale(map2,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map2), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F") + par(fig=c(map.xpos, map.xpos + map.width, 0.275, 0.455), new=TRUE) + PlotEquiMap2(rescale(map3,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map3), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F") + par(fig=c(map.xpos, map.xpos + map.width, 0.04, 0.22), new=TRUE) + PlotEquiMap2(rescale(map4,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map4), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F") + + # Subtitle centroid maps: + map.title.xpos <- 0.76 + map.title.width <- 0.2 + par(fig=c(map.title.xpos, map.title.xpos + map.title.width, 0.728, 0.729), new=TRUE) + mtext("year", cex=3) + par(fig=c(map.title.xpos, map.title.xpos + map.title.width, 0.494, 0.495), new=TRUE) + mtext("year", cex=3) + par(fig=c(map.title.xpos, map.title.xpos + map.title.width, 0.260, 0.261), new=TRUE) + mtext("year", cex=3) + par(fig=c(map.title.xpos, map.title.xpos + map.title.width, 0.026, 0.027), new=TRUE) + mtext("year", cex=3) + + # Title Centroid Maps: + title1.xpos <- 0.02 + title1.width <- 0.4 + par(fig=c(title1.xpos, title1.xpos + title1.width, 0.9305, 0.9355), new=TRUE) + mtext(paste0(regime1.name,": ", psl.name, " Anomaly"), font=2, cex=4) + par(fig=c(title1.xpos, title1.xpos + title1.width, 0.6955, 0.7005), new=TRUE) + mtext(paste0(regime2.name,": ", psl.name, " Anomaly"), font=2, cex=4) + par(fig=c(title1.xpos, title1.xpos + title1.width, 0.4605, 0.4655), new=TRUE) + mtext(paste0(regime3.name,": ", psl.name, " Anomaly"), font=2, cex=4) + par(fig=c(title1.xpos, title1.xpos + title1.width, 0.2255, 0.2305), new=TRUE) + mtext(paste0(regime4.name,": ", psl.name, " Anomaly"), font=2, cex=4) + + # Impact maps: + if(p == 100){ + impact.xpos <- 0.47 + impact.width <- 0.26 + par(fig=c(impact.xpos, impact.xpos + impact.width, 0.745, 0.925), new=TRUE) + PlotEquiMap2(rescale(imp1[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=t(sig1[,EU] < 0.05), cex.lab=1.5) + par(fig=c(impact.xpos, impact.xpos + impact.width, 0.51, 0.69), new=TRUE) + PlotEquiMap2(rescale(imp2[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=t(sig2[,EU] < 0.05), cex.lab=1.5) + par(fig=c(impact.xpos, impact.xpos + impact.width, 0.275, 0.455), new=TRUE) + PlotEquiMap2(rescale(imp3[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=t(sig3[,EU] < 0.05), cex.lab=1.5) + par(fig=c(impact.xpos, impact.xpos + impact.width, 0.04, 0.22), new=TRUE) + PlotEquiMap2(rescale(imp4[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=t(sig4[,EU] < 0.05), cex.lab=1.5) + + # Title impact maps: + title2.xpos <- 0.42 + title2.width <- 0.35 + par(fig=c(title2.xpos, title2.xpos + title2.width, 0.935, 0.940), new=TRUE) + mtext(paste0(regime1.name, ": Impact on ", var.name.full[var.num]), font=2, cex=4) + par(fig=c(title2.xpos, title2.xpos + title2.width, 0.700, 0.705), new=TRUE) + mtext(paste0(regime2.name, ": Impact on ", var.name.full[var.num]), font=2, cex=4) + par(fig=c(title2.xpos, title2.xpos + title2.width, 0.465, 0.470), new=TRUE) + mtext(paste0(regime3.name, ": Impact on ", var.name.full[var.num]), font=2, cex=4) + par(fig=c(title2.xpos, title2.xpos + title2.width, 0.230, 0.235), new=TRUE) + mtext(paste0(regime4.name, ": Impact on ", var.name.full[var.num]), font=2, cex=4) + } + + # Frequency plots: + barplot.xpos <- 0.75 + barplot.width <- 0.25 + freq.max=100 + + if(fields.name == forecast.name){ + pos.years.present <- match(year.start:year.end, years) + years.missing <- which(is.na(pos.years.present)) + fre1.NA <- fre2.NA <- fre3.NA <- fre4.NA <- freMax1.NA <- freMax2.NA <- freMax3.NA <- freMax4.NA <- freMin1.NA <- freMin2.NA <- freMin3.NA <- freMin4.NA <- c() + + k <- 0 + for(y in year.start:year.end) { + if(y %in% (years.missing + year.start - 1)) { + fre1.NA <- c(fre1.NA,NA); fre2.NA <- c(fre2.NA,NA); fre3.NA <- c(fre3.NA,NA); fre4.NA <- c(fre4.NA,NA) + freMax1.NA <- c(freMax1.NA,NA); freMax2.NA <- c(freMax2.NA,NA); freMax3.NA <- c(freMax3.NA,NA); freMax4.NA <- c(freMax4.NA,NA) + freMin1.NA <- c(freMin1.NA,NA); freMin2.NA <- c(freMin2.NA,NA); freMin3.NA <- c(freMin3.NA,NA); freMin4.NA <- c(freMin4.NA,NA) + k=k+1 + } else { + fre1.NA <- c(fre1.NA,fre1[y - year.start + 1 - k]); fre2.NA <- c(fre2.NA,fre2[y - year.start + 1 - k]) + fre3.NA <- c(fre3.NA,fre3[y - year.start + 1 - k]); fre4.NA <- c(fre4.NA,fre4[y - year.start + 1 - k]) + freMax1.NA <- c(freMax1.NA,freMax1[y - year.start + 1 - k]); freMax2.NA <- c(freMax2.NA,freMax2[y - year.start + 1 - k]) + freMax3.NA <- c(freMax3.NA,freMax3[y - year.start + 1 - k]); freMax4.NA <- c(freMax4.NA,freMax4[y - year.start + 1 - k]) + freMin1.NA <- c(freMin1.NA,freMin1[y - year.start + 1 - k]); freMin2.NA <- c(freMin2.NA,freMin2[y - year.start + 1 - k]) + freMin3.NA <- c(freMin3.NA,freMin3[y - year.start + 1 - k]); freMin4.NA <- c(freMin4.NA,freMin4[y - year.start + 1 - k]) + } + } + } else { fre1.NA <- fre1; fre2.NA <- fre2; fre3.NA <- fre3; fre4.NA <- fre4 } + + par(fig=c(barplot.xpos, barplot.xpos + barplot.width, 0.745, 0.915), new=TRUE) + if(fields.name == rean.name) barplot.freq(100*fre1.NA, year.start, year.end, cex.y=2, cex.x=2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0)) + if(fields.name == forecast.name) barplot.freq.sim2(100*fre1.NA, 100*freMax1.NA, 100*freMin1.NA, 100*freObs1, year.start, year.end, cex.y=2, cex.x=2, freq.min=-2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0), col.line="gray20", cex.r=3, cex.mean=2, cex.obs=2) + + par(fig=c(barplot.xpos, barplot.xpos + barplot.width,0.510,0.680), new=TRUE) + if(fields.name == rean.name) barplot.freq(100*fre2.NA, year.start, year.end, cex.y=2, cex.x=2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0)) + if(fields.name == forecast.name) barplot.freq.sim2(100*fre2.NA, 100*freMax2.NA, 100*freMin2.NA, 100*freObs2, year.start, year.end, cex.y=2, cex.x=2, freq.min=-2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0), col.line="gray20", cex.r=3, cex.mean=2, cex.obs=2) + + par(fig=c(barplot.xpos, barplot.xpos + barplot.width,0.275,0.445), new=TRUE) + if(fields.name == rean.name) barplot.freq(100*fre3.NA, year.start, year.end, cex.y=2, cex.x=2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0)) + if(fields.name == forecast.name) barplot.freq.sim2(100*fre3.NA, 100*freMax3.NA, 100*freMin3.NA, 100*freObs3, year.start, year.end, cex.y=2, cex.x=2, freq.min=-2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0), col.line="gray20", cex.r=3, cex.mean=2, cex.obs=2) + + par(fig=c(barplot.xpos, barplot.xpos + barplot.width,0.040,0.210), new=TRUE) + if(fields.name == rean.name) barplot.freq(100*fre4.NA, year.start, year.end, cex.y=2, cex.x=2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0)) + if(fields.name == forecast.name) barplot.freq.sim2(100*fre4.NA, 100*freMax4.NA, 100*freMin4.NA, 100*freObs4, year.start, year.end, cex.y=2, cex.x=2, freq.min=-2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0), col.line="gray20", cex.r=3, cex.mean=2, cex.obs=2) + + # Title Frequency maps: + title3.xpos <- 0.75 + title3.width <-0.25 + par(fig=c(title3.xpos, title3.xpos + title3.width, 0.935, 0.940), new=TRUE) + mtext(paste0(regime1.name, " Frequency"), font=2, cex=4) # paste0 doesn't work inside an expression! + par(fig=c(title3.xpos, title3.xpos + title3.width, 0.700, 0.705), new=TRUE) + mtext(paste0(regime2.name, " Frequency"), font=2, cex=4) + par(fig=c(title3.xpos, title3.xpos + title3.width, 0.465, 0.470), new=TRUE) + mtext(paste0(regime3.name, " Frequency"), font=2, cex=4) + par(fig=c(title3.xpos, title3.xpos + title3.width, 0.230, 0.235), new=TRUE) + mtext(paste0(regime4.name, " Frequency"), font=2, cex=4) + + # % on Frequency plots: + symbol.xpos <- 0.735 + symbol.width <- 0.01 + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.82, 0.83), new=TRUE) + mtext("%", cex=3.3) + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.59, 0.60), new=TRUE) + mtext("%", cex=3.3) + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.35, 0.36), new=TRUE) + mtext("%", cex=3.3) + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.12, 0.13), new=TRUE) + mtext("%", cex=3.3) + + # mean frequency on Frequency plots: + symbol.xpos <- 0.9 + symbol.width <- 0.1 + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.908, 0.918), new=TRUE) + mtext(bquote(bar(nu) == .(paste0(round(mean(100*fre1.NA),1),"%"))),cex=4.5) + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.673, 0.683), new=TRUE) + mtext(bquote(bar(nu) == .(paste0(round(mean(100*fre2.NA),1),"%"))),cex=4.5) + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.44, 0.45), new=TRUE) + mtext(bquote(bar(nu) == .(paste0(round(mean(100*fre3.NA),1),"%"))),cex=4.5) + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.203, 0.213), new=TRUE) + mtext(bquote(bar(nu) == .(paste0(round(mean(100*fre4.NA),1),"%"))),cex=4.5) + + # add corr between obs.time series and ensemble mean time series on Frequency plots: + if(fields.name == forecast.name){ + symbol.xpos <- 0.76 + symbol.width <- 0.1 + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.908, 0.918), new=TRUE) + sp.cor1 <- round(cor(fre1.NA,freObs1, use="complete.obs"),2) + mtext(paste0("r= ",sp.cor1), cex=4.5) + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.673, 0.683), new=TRUE) + sp.cor2 <- round(cor(fre2.NA,freObs2, use="complete.obs"),2) + mtext(paste0("r= ",sp.cor2), cex=4.5) + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.44, 0.45), new=TRUE) + sp.cor3 <- round(cor(fre3.NA,freObs3, use="complete.obs"),2) + mtext(paste0("r= ",sp.cor3), cex=4.5) + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.203, 0.213), new=TRUE) + sp.cor4 <- round(cor(fre4.NA,freObs4, use="complete.obs"),2) + mtext(paste0("r= ",sp.cor4), cex=4.5) + } + + # Legend 1: + legend1.xpos <- 0.10 + legend1.width <- 0.80 + legend1.cex <- 2 + my.subset <- match(values.to.plot, my.brks) + par(fig=c(legend1.xpos, legend1.xpos + legend1.width, 0.719, 0.744), new=TRUE) # syntax fig=c(xmin, xmax, ymin, ymax) + ColorBar3(my.brks, cols=my.cols, vert=FALSE, cex=legend1.cex, subset=my.subset) + if(psl=="g500") {mtext(side=4," m", cex=2.5)} else {mtext(side=4," hPa", cex=legend1.cex)} + par(fig=c(legend1.xpos, legend1.xpos + legend1.width, 0.485, 0.508), new=TRUE) + ColorBar3(my.brks, cols=my.cols, vert=FALSE, cex=legend1.cex, subset=my.subset) + if(psl=="g500") {mtext(side=4," m", cex=2.5)} else {mtext(side=4," hPa", cex=legend1.cex)} + par(fig=c(legend1.xpos, legend1.xpos + legend1.width, 0.250, 0.273), new=TRUE) + ColorBar3(my.brks, cols=my.cols, vert=FALSE, cex=legend1.cex, subset=my.subset) + if(psl=="g500") {mtext(side=4," m", cex=2.5)} else {mtext(side=4," hPa", cex=legend1.cex)} + par(fig=c(legend1.xpos, legend1.xpos + legend1.width, 0.015, 0.038), new=TRUE) + ColorBar3(my.brks, cols=my.cols, vert=FALSE, cex=legend1.cex, subset=my.subset) + if(psl=="g500") {mtext(side=4," m", cex=2.5)} else {mtext(side=4," hPa", cex=legend1.cex)} + + # Legend 2: + if(fields.name == rean.name){ + legend2.xpos <- 0.48 + legend2.width <- 0.25 + legend2.cex <- 1.8 + my.subset2 <- match(values.to.plot2, my.brks.var) + par(fig=c(legend2.xpos, legend2.xpos + legend2.width, 0.719 + 0.001, 0.744), new=TRUE) + ColorBar3(my.brks.var, cols=my.cols.var, vert=FALSE, cex=legend2.cex, subset=my.subset2) + mtext(side=4,paste0(" ",var.unit[var.num]), cex=legend2.cex) + par(fig=c(legend2.xpos, legend2.xpos + legend2.width, 0.485, 0.508), new=TRUE) + ColorBar3(my.brks.var, cols=my.cols.var, vert=FALSE, cex=legend2.cex, subset=my.subset2) + mtext(side=4,paste0(" ",var.unit[var.num]), cex=legend2.cex) + par(fig=c(legend2.xpos, legend2.xpos + legend2.width, 0.250, 0.273), new=TRUE) + ColorBar3(my.brks.var, cols=my.cols.var, vert=FALSE, cex=legend2.cex, subset=my.subset2) + mtext(side=4,paste0(" ",var.unit[var.num]), cex=legend2.cex) + par(fig=c(legend2.xpos, legend2.xpos + legend2.width, 0.015, 0.038), new=TRUE) + ColorBar3(my.brks.var, cols=my.cols.var, vert=FALSE, cex=legend2.cex, subset=my.subset2) + mtext(side=4,paste0(" ",var.unit[var.num]), cex=legend2.cex) + } + + if(!as.pdf) dev.off() # for saving 4 png + + } # close if on: composition == 'simple' + + + # Visualize the composition with the regime anomalies for a selected forecasted month: + if(composition == "psl"){ + + # Centroid maps: + n.map <- n.map + 1 # n.maps starts from 0 + map.width <- 0.12 + map.xpos <- 0.04 + map.width * (n.map - 1) + map.ypos <- 0.08 + map.height <- 0.19 + map.ysep <- 0.015 + + par(fig=c(map.xpos, map.xpos + map.width, map.ypos + 3*map.height + 3*map.ysep, map.ypos + 4*map.height + 3*map.ysep), new=TRUE) + PlotEquiMap2(rescale(map1,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map1), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, xlabel.dist=1.5, contours.lty="F1FF1F") + par(fig=c(map.xpos, map.xpos + map.width, map.ypos + 2*map.height + 2*map.ysep, map.ypos + 3*map.height + 2*map.ysep), new=TRUE) + PlotEquiMap2(rescale(map2,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map2), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F") + par(fig=c(map.xpos, map.xpos + map.width, map.ypos + map.height + map.ysep, map.ypos + 2*map.height + map.ysep), new=TRUE) + PlotEquiMap2(rescale(map3,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map3), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F") + par(fig=c(map.xpos, map.xpos + map.width, map.ypos, map.ypos + map.height), new=TRUE) + PlotEquiMap2(rescale(map4,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map4), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F") + + # Sheet subtitles: + par(fig=c(map.xpos, map.xpos + map.width, 0.86, 0.90), new=TRUE) + if(n.map < 8) { + mtext(paste0(my.month.short[p], " --> ", my.month.short[forecast.month]), cex=5.5, font=2) + } else{ + mtext(paste0(rean.name," ", my.month.short[forecast.month]), cex=5.5, font=2) + } + + # Regime's names: + title1.xpos <- 0 + title1.width <- 0.04 + par(fig=c(title1.xpos, title1.xpos + title1.width, 0.7905, 0.7955), new=TRUE) + mtext(paste0(regime1.name), font=2, cex=4) + par(fig=c(title1.xpos, title1.xpos + title1.width, 0.5855, 0.5905), new=TRUE) + mtext(paste0(regime2.name), font=2, cex=4) + par(fig=c(title1.xpos, title1.xpos + title1.width, 0.3805, 0.3955), new=TRUE) + mtext(paste0(regime3.name), font=2, cex=4) + par(fig=c(title1.xpos, title1.xpos + title1.width, 0.1755, 0.1805), new=TRUE) + mtext(paste0(regime4.name), font=2, cex=4) + + # Color legend: + legend1.xpos <- 0.10 + legend1.width <- 0.80 + legend1.cex <- 4 + + par(fig=c(legend1.xpos, legend1.xpos + legend1.width, 0, 0.065), new=TRUE) # syntax fig=c(xmin, xmax, ymin, ymax) + ColorBar2(brks=my.brks, cols=my.cols, vert=FALSE, cex=legend1.cex, label.dist=2, my.ticks=-0.5 + 1:21, my.labels=my.brks) + par(fig=c(legend1.xpos + legend1.width - 0.02, legend1.xpos + legend1.width + 0.05, 0, 0.02), new=TRUE) # syntax fig=c(xmin, xmax, ymin, ymax) + mtext("hPa", cex=legend1.cex*1.3) + + } # close if on composition == 'psl' + + # fre1, fre2, etc. already refer to the regimes listed in the 'orden' vector: + if(fields.name == forecast.name && composition == 'simple') save(orden, fre1.NA, fre2.NA, fre3.NA, fre4.NA, freObs1, freObs2, freObs3, freObs4, sp.cor1, sp.cor2, sp.cor3, sp.cor4, persist1, persist2, persist3, persist4, persistObs1, persistObs2, persistObs3, persistObs4, spatial.cor1, spatial.cor2, spatial.cor3, spatial.cor4, file=paste0(workdir,"/",fields.name,"_", my.period[p],"_leadmonth_",lead.month,"_","ClusterNames",".RData")) + +} # close 'p' on 'period' + +if(as.pdf) dev.off() # for saving a single pdf with all months/seasons + +} # close 'lead.month' on 'lead.months' + +if(composition == "psl") dev.off() + + + + + + + + +# Summary graphs: +if(fields.name == forecast.name && p == 100){ + # array storing correlations in the format: [startdate, leadmonth, regime] + array.cor <- array.sp.cor <- array.freq <- array.diff.freq <- array.rpss <- array.pers <- array.diff.pers <- array(NA,c(12,7,4)) + + for(p in 1:12){ + p.orig <- p + + for(l in 0:6){ + + load(file=paste0(workdir,"/",fields.name,"_",my.period[p],"_leadmonth_",l,"_","ClusterNames",".RData")) # load cluster names and summarized data + + if(var.num != var.num.orig) var.num <- var.num.orig # ripristinate original values of this script (necessary after loading regime data) + if(fields.name != fields.name.orig) fields.name <- fields.name.orig # ripristinate original values of this script + if(p != p.orig) p <- p.orig + + array.sp.cor[p,1+l, 1] <- spatial.cor1 # associates NAO+ to the first triangle (at left) + array.sp.cor[p,1+l, 3] <- spatial.cor2 # associates NAO- to the third triangle (at right) + array.sp.cor[p,1+l, 4] <- spatial.cor3 # associates Blocking to the fourth triangle (top one) + array.sp.cor[p,1+l, 2] <- spatial.cor4 # associates Atl.Ridge to the second triangle (bottom one) + + array.cor[p,1+l, 1] <- sp.cor1 # associates NAO+ to the first triangle (at left) + array.cor[p,1+l, 3] <- sp.cor2 # associates NAO- to the third triangle (at right) + array.cor[p,1+l, 4] <- sp.cor3 # associates Blocking to the fourth triangle (top one) + array.cor[p,1+l, 2] <- sp.cor4 # associates Atl.Ridge to the second triangle (bottom one) + + array.freq[p,1+l, 1] <- 100*(mean(fre1.NA)) # NAO+ + array.freq[p,1+l, 3] <- 100*(mean(fre2.NA)) # NAO- + array.freq[p,1+l, 4] <- 100*(mean(fre3.NA)) # Blocking + array.freq[p,1+l, 2] <- 100*(mean(fre4.NA)) # Atl.Ridge + + array.diff.freq[p,1+l, 1] <- 100*(mean(fre1.NA) - mean(freObs1)) # NAO+ + array.diff.freq[p,1+l, 3] <- 100*(mean(fre2.NA) - mean(freObs2)) # NAO- + array.diff.freq[p,1+l, 4] <- 100*(mean(fre3.NA) - mean(freObs3)) # Blocking + array.diff.freq[p,1+l, 2] <- 100*(mean(fre4.NA) - mean(freObs4)) # Atl.Ridge + + array.pers[p,1+l, 1] <- persist1 # NAO+ + array.pers[p,1+l, 3] <- persist2 # NAO- + array.pers[p,1+l, 4] <- persist3 # Blocking + array.pers[p,1+l, 2] <- persist4 # Atl.Ridge + + array.diff.pers[p,1+l, 1] <- persist1 - persistObs1 # NAO+ + array.diff.pers[p,1+l, 3] <- persist2 - persistObs2 # NAO- + array.diff.pers[p,1+l, 4] <- persist3 - persistObs3 # Blocking + array.diff.pers[p,1+l, 2] <- persist4 - persistObs4 # Atl.Ridge + + #array.rpss[p,1+l, 1] <- # NAO+ + #array.rpss[p,1+l, 3] <- # NAO- + #array.rpss[p,1+l, 4] <- # Blocking + #array.rpss[p,1+l, 2] <- # Atl.Ridge + } + } + + + # Spatial Corr summary: + png(filename=paste0(workdir,"/",forecast.name,"_summary_sp_corr.png"), width=550, height=400) + + # create an array similar to array.cor but with colors instead of corr.values: + sp.corr.cols <- rev(c('#b2182b','#d6604d','#f4a582','#fddbc7','#d1e5f0','#92c5de','#4393c3','#2166ac')) + my.seq <- c(-1,0,0.4,0.5,0.6,0.7,0.8,0.9,1) + + array.sp.cor.colors <- array(sp.corr.cols[8],c(12,7,4)) + array.sp.cor.colors[array.sp.cor < my.seq[8]] <- sp.corr.cols[7] + array.sp.cor.colors[array.sp.cor < my.seq[7]] <- sp.corr.cols[6] + array.sp.cor.colors[array.sp.cor < my.seq[6]] <- sp.corr.cols[5] + array.sp.cor.colors[array.sp.cor < my.seq[5]] <- sp.corr.cols[4] + array.sp.cor.colors[array.sp.cor < my.seq[4]] <- sp.corr.cols[3] + array.sp.cor.colors[array.sp.cor < my.seq[3]] <- sp.corr.cols[2] + array.sp.cor.colors[array.sp.cor < my.seq[2]] <- sp.corr.cols[1] + + par(mar=c(5,4,4,4.5)) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Forecast time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + title("Spatial correlation between S4 and ERA-Interim frequencies", cex=1.5) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Forecast time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5) + + for(p in 1:12){ + for(l in 0:6){ + polygon(c(0.5+p-1,0.5+p-1,1+p-1),c(-0.5+l,0.5+l,0+l), col=array.sp.cor.colors[p,1+l,1]) # first triangle + polygon(c(0.5+p-1,1+p-1,1.5+p-1),c(-0.5+l,0+l,-0.5+l), col=array.sp.cor.colors[p,1+l,2]) + polygon(c(1+p-1,1.5+p-1,1.5+p-1),c(l,0.5+l,-0.5+l), col=array.sp.cor.colors[p,1+l,3]) + polygon(c(0.5+p-1,1+p-1,1.5+p-1),c(0.5+l,0+l,0.5+l), col=array.sp.cor.colors[p,1+l,4]) + } + } + + par(fig=c(0.875, 1, 0.14, 0.89), new=TRUE) + ColorBar2(brks = my.seq, cols = sp.corr.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + dev.off() + + + + # Corr summary: + png(filename=paste0(workdir,"/",forecast.name,"_summary_corr.png"), width=550, height=400) + + # create an array similar to array.cor but with colors instead of corr.values: + corr.cols <- rev(c('#b2182b','#d6604d','#f4a582','#fddbc7','#d1e5f0','#92c5de','#4393c3','#2166ac')) + + array.cor.colors <- array(corr.cols[8],c(12,7,4)) + array.cor.colors[array.cor < 0.75] <- corr.cols[7] + array.cor.colors[array.cor < 0.5] <- corr.cols[6] + array.cor.colors[array.cor < 0.25] <- corr.cols[5] + array.cor.colors[array.cor < 0] <- corr.cols[4] + array.cor.colors[array.cor < -0.25] <- corr.cols[3] + array.cor.colors[array.cor < -0.5] <- corr.cols[2] + array.cor.colors[array.cor < -0.75] <- corr.cols[1] + + par(mar=c(5,4,4,4.5)) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Forecast time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + title("Correlation between S4 and ERA-Interim frequencies", cex=1.5) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Forecast time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5) + + for(p in 1:12){ + for(l in 0:6){ + polygon(c(0.5+p-1,0.5+p-1,1+p-1),c(-0.5+l,0.5+l,0+l), col=array.cor.colors[p,1+l,1]) # first triangle + polygon(c(0.5+p-1,1+p-1,1.5+p-1),c(-0.5+l,0+l,-0.5+l), col=array.cor.colors[p,1+l,2]) + polygon(c(1+p-1,1.5+p-1,1.5+p-1),c(l,0.5+l,-0.5+l), col=array.cor.colors[p,1+l,3]) + polygon(c(0.5+p-1,1+p-1,1.5+p-1),c(0.5+l,0+l,0.5+l), col=array.cor.colors[p,1+l,4]) + } + } + + par(fig=c(0.875, 1, 0.14, 0.89), new=TRUE) + ColorBar2(brks = seq(-1,1,0.25), cols = corr.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(seq(-1,1,0.25)), my.labels=seq(-1,1,0.25)) + dev.off() + + + + # Freq. summary: + png(filename=paste0(workdir,"/",forecast.name,"_summary_freq.png"), width=550, height=400) + + # create an array similar to array.diff.freq but with colors instead of frequencies: + freq.cols <- rev(c('#d73027','#f46d43','#fdae61','#fee090','#e0f3f8','#abd9e9','#74add1','#4575b4')) + my.seq <- c(NA,20,22,24,26,28,30,32,NA) + + array.freq.colors <- array(freq.cols[8],c(12,7,4)) + array.freq.colors[array.freq < my.seq[[8]]] <- freq.cols[7] + array.freq.colors[array.freq < my.seq[[7]]] <- freq.cols[6] + array.freq.colors[array.freq < my.seq[[6]]] <- freq.cols[5] + array.freq.colors[array.freq < my.seq[[5]]] <- freq.cols[4] + array.freq.colors[array.freq < my.seq[[4]]] <- freq.cols[3] + array.freq.colors[array.freq < my.seq[[3]]] <- freq.cols[2] + array.freq.colors[array.freq < my.seq[[2]]] <- freq.cols[1] + + par(mar=c(5,4,4,4.5)) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Forecast time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + title("Mean S4 frequency (%)", cex=1.5) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Forecast time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5) + + for(p in 1:12){ + for(l in 0:6){ + polygon(c(0.5+p-1,0.5+p-1,1+p-1),c(-0.5+l,0.5+l,0+l), col=array.freq.colors[p,1+l,1]) # first triangle + polygon(c(0.5+p-1,1+p-1,1.5+p-1),c(-0.5+l,0+l,-0.5+l), col=array.freq.colors[p,1+l,2]) + polygon(c(1+p-1,1.5+p-1,1.5+p-1),c(l,0.5+l,-0.5+l), col=array.freq.colors[p,1+l,3]) + polygon(c(0.5+p-1,1+p-1,1.5+p-1),c(0.5+l,0+l,0.5+l), col=array.freq.colors[p,1+l,4]) + } + } + + par(fig=c(0.875, 1, 0.14, 0.89), new=TRUE) + ColorBar2(brks = my.seq, cols = freq.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + dev.off() + + + + # Delta freq. summary: + png(filename=paste0(workdir,"/",forecast.name,"_summary_diff_freq.png"), width=550, height=400) + + # create an array similar to array.diff.freq but with colors instead of frequencies: + diff.freq.cols <- rev(c('#d73027','#f46d43','#fdae61','#fee090','#e0f3f8','#abd9e9','#74add1','#4575b4')) + + array.diff.freq.colors <- array(diff.freq.cols[8],c(12,7,4)) + array.diff.freq.colors[array.diff.freq < 10] <- diff.freq.cols[7] + array.diff.freq.colors[array.diff.freq < 5] <- diff.freq.cols[6] + array.diff.freq.colors[array.diff.freq < 2] <- diff.freq.cols[5] + array.diff.freq.colors[array.diff.freq < 0] <- diff.freq.cols[4] + array.diff.freq.colors[array.diff.freq < -2] <- diff.freq.cols[3] + array.diff.freq.colors[array.diff.freq < -5] <- diff.freq.cols[2] + array.diff.freq.colors[array.diff.freq < -10] <- diff.freq.cols[1] + + par(mar=c(5,4,4,4.5)) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Forecast time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + title("Frequency difference (%) between S4 and ERA-Interim", cex=1.5) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Forecast time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5) + + for(p in 1:12){ + for(l in 0:6){ + polygon(c(0.5+p-1,0.5+p-1,1+p-1),c(-0.5+l,0.5+l,0+l), col=array.diff.freq.colors[p,1+l,1]) # first triangle + polygon(c(0.5+p-1,1+p-1,1.5+p-1),c(-0.5+l,0+l,-0.5+l), col=array.diff.freq.colors[p,1+l,2]) + polygon(c(1+p-1,1.5+p-1,1.5+p-1),c(l,0.5+l,-0.5+l), col=array.diff.freq.colors[p,1+l,3]) + polygon(c(0.5+p-1,1+p-1,1.5+p-1),c(0.5+l,0+l,0.5+l), col=array.diff.freq.colors[p,1+l,4]) + } + } + + par(fig=c(0.875, 1, 0.14, 0.89), new=TRUE) + ColorBar2(brks = c(-20,-10,-5,-2,0,2,5,10,20), cols = diff.freq.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(c(-20,-10,-5,-2,0,2,5,10,20)), my.labels=c(-20,-10,-5,-2,0,2,5,10,20)) + dev.off() + + + # Persistence summary: + png(filename=paste0(workdir,"/",forecast.name,"_summary_pers.png"), width=550, height=400) + + # create an array similar to array.pers but with colors instead of frequencies: + pers.cols <- rev(c('#b2182b','#d6604d','#f4a582','#fddbc7','#d1e5f0','#92c5de','#4393c3','#2166ac')) + my.seq <- c(NA, 3.25, 3.5, 3.75, 4, 4.25, 4.5, 4.75, NA) + + array.pers.colors <- array(pers.cols[8],c(12,7,4)) + array.pers.colors[array.pers < my.seq[8]] <- pers.cols[7] + array.pers.colors[array.pers < my.seq[7]] <- pers.cols[6] + array.pers.colors[array.pers < my.seq[6]] <- pers.cols[5] + array.pers.colors[array.pers < my.seq[5]] <- pers.cols[4] + array.pers.colors[array.pers < my.seq[4]] <- pers.cols[3] + array.pers.colors[array.pers < my.seq[3]] <- pers.cols[2] + array.pers.colors[array.pers < my.seq[2]] <- pers.cols[1] + + par(mar=c(5,4,4,4.5)) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Forecast time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + title("S4 Regime persistence (in days/month)", cex=1.5) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Forecast time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5) + + for(p in 1:12){ + for(l in 0:6){ + polygon(c(0.5+p-1,0.5+p-1,1+p-1),c(-0.5+l,0.5+l,0+l), col=array.pers.colors[p,1+l,1]) # first triangle + polygon(c(0.5+p-1,1+p-1,1.5+p-1),c(-0.5+l,0+l,-0.5+l), col=array.pers.colors[p,1+l,2]) + polygon(c(1+p-1,1.5+p-1,1.5+p-1),c(l,0.5+l,-0.5+l), col=array.pers.colors[p,1+l,3]) + polygon(c(0.5+p-1,1+p-1,1.5+p-1),c(0.5+l,0+l,0.5+l), col=array.pers.colors[p,1+l,4]) + } + } + + par(fig=c(0.875, 1, 0.14, 0.89), new=TRUE) + ColorBar2(brks = my.seq, cols = pers.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + dev.off() + + + + # Delta persistence summary: + png(filename=paste0(workdir,"/",forecast.name,"_summary_diff_pers.png"), width=550, height=400) + + # create an array similar to array.pers but with colors instead of frequencies: + diff.pers.cols <- rev(c('#b2182b','#d6604d','#f4a582','#fddbc7','#d1e5f0','#92c5de','#4393c3','#2166ac')) + my.seq <- c(NA, -1.5, -1, -0.5, 0, 0.5, 1, 1.5, NA) + + array.diff.pers.colors <- array(diff.pers.cols[8],c(12,7,4)) + array.diff.pers.colors[array.diff.pers < my.seq[8]] <- diff.pers.cols[7] + array.diff.pers.colors[array.diff.pers < my.seq[7]] <- diff.pers.cols[6] + array.diff.pers.colors[array.diff.pers < my.seq[6]] <- diff.pers.cols[5] + array.diff.pers.colors[array.diff.pers < my.seq[5]] <- diff.pers.cols[4] + array.diff.pers.colors[array.diff.pers < my.seq[4]] <- diff.pers.cols[3] + array.diff.pers.colors[array.diff.pers < my.seq[3]] <- diff.pers.cols[2] + array.diff.pers.colors[array.diff.pers < my.seq[2]] <- diff.pers.cols[1] + + par(mar=c(5,4,4,4.5)) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Forecast time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + title("Diff. between S4 and ERA-Interim regime persistence (in days/month)", cex=1.5) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Forecast time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5) + + for(p in 1:12){ + for(l in 0:6){ + polygon(c(0.5+p-1,0.5+p-1,1+p-1),c(-0.5+l,0.5+l,0+l), col=array.diff.pers.colors[p,1+l,1]) # first triangle + polygon(c(0.5+p-1,1+p-1,1.5+p-1),c(-0.5+l,0+l,-0.5+l), col=array.diff.pers.colors[p,1+l,2]) + polygon(c(1+p-1,1.5+p-1,1.5+p-1),c(l,0.5+l,-0.5+l), col=array.diff.pers.colors[p,1+l,3]) + polygon(c(0.5+p-1,1+p-1,1.5+p-1),c(0.5+l,0+l,0.5+l), col=array.diff.pers.colors[p,1+l,4]) + } + } + + par(fig=c(0.875, 1, 0.14, 0.89), new=TRUE) + ColorBar2(brks = my.seq, cols = diff.pers.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + dev.off() + + ## # FairRPSS summary: + ## png(filename=paste0(workdir,"/",forecast.name,"_summary_rpss.png"), width=550, height=400) + + ## # create an array similar to array.diff.freq but with colors instead of frequencies: + ## freq.cols <- rev(c('#b2182b','#d6604d','#f4a582','#fddbc7','#d1e5f0','#92c5de','#4393c3','#2166ac')) + + ## array.freq.colors <- array(freq.cols[8],c(12,7,4)) + ## array.freq.colors[array.diff.freq < 10] <- freq.cols[7] + ## array.freq.colors[array.diff.freq < 5] <- freq.cols[6] + ## array.freq.colors[array.diff.freq < 2] <- freq.cols[5] + ## array.freq.colors[array.diff.freq < 0] <- freq.cols[4] + ## array.freq.colors[array.diff.freq < -2] <- freq.cols[3] + ## array.freq.colors[array.diff.freq < -5] <- freq.cols[2] + ## array.freq.colors[array.diff.freq < -10] <- freq.cols[1] + + ## par(mar=c(5,4,4,4.5)) + ## plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Forecast time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + ## title("Frequency difference (%) between S4 and ERA-Interim", cex=1.5) + ## mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + ## mtext(side = 2, text = "Forecast time", line = 2.5, cex=1.5) + ## axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + ## axis(2, at=seq(0,6), las=2, cex.axis=1.5) + + ## for(p in 1:12){ + ## for(l in 0:6){ + ## polygon(c(0.5+p-1,0.5+p-1,1+p-1),c(-0.5+l,0.5+l,0+l), col=array.freq.colors[p,1+l,1]) # first triangle + ## polygon(c(0.5+p-1,1+p-1,1.5+p-1),c(-0.5+l,0+l,-0.5+l), col=array.freq.colors[p,1+l,2]) + ## polygon(c(1+p-1,1.5+p-1,1.5+p-1),c(l,0.5+l,-0.5+l), col=array.freq.colors[p,1+l,3]) + ## polygon(c(0.5+p-1,1+p-1,1.5+p-1),c(0.5+l,0+l,0.5+l), col=array.freq.colors[p,1+l,4]) + ## } + ## } + + ## par(fig=c(0.875, 1, 0.14, 0.89), new=TRUE) + ## ColorBar2(brks = c(-20,-10,-5,-2,0,2,5,10,20), cols = freq.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(c(-20,-10,-5,-2,0,2,5,10,20)), my.labels=c(-20,-10,-5,-2,0,2,5,10,20)) + ## dev.off() + +} + + + + + + + +# impact map of the stronger WR: +if(fields.name == rean.name && p == 100){ + + var.num <- 1 # choose a variable (1:sfcWind, 2:tas) + + png(filename=paste0(workdir,"/",rean.name,"_",var.name[var.num],"_most_influent.png"), width=500, height=600) + + plot.new() + #par(mfrow=c(4,3)) + col.regimes <- c("indianred1","cyan","khaki","lightpink") + + for(p in 13:16){ # or 1:12 for monthly maps + load(file=paste0(rean.dir,"/",rean.name,"_",var.name[var.num],"_",my.period[p],"_psl.RData")) + load(paste0(rean.dir,"/",rean.name,"_ClusterNames.RData")) # they should not depend on var.name!!! + + # position of long values of Europe only (without the Atlantic Sea and America): + #if(fields.name == "ERA-Interim") EU <- c(1:which(lon >= lon.max)[1], (length(lon)-30):length(lon)) # restrict area to continental europe only + lon.max = 45 + EU <- c(1:which(lon >= lon.max)[1], (which(lon >= 337)[1]:length(lon))) # restrict area to continental europe only + + + cluster1 <- which(orden == cluster1.name.period[p]) # in future it will be == cluster1.name + cluster2 <- which(orden == cluster2.name.period[p]) + cluster3 <- which(orden == cluster3.name.period[p]) + cluster4 <- which(orden == cluster4.name.period[p]) + + assign(paste0("imp",cluster1), varPeriodAnom1mean) + assign(paste0("imp",cluster2), varPeriodAnom2mean) + assign(paste0("imp",cluster3), varPeriodAnom3mean) + assign(paste0("imp",cluster4), varPeriodAnom4mean) + + imp.all <- imp1 + for(i in 1:dim(imp1)[1]){ + for(j in 1:dim(imp1)[2]){ + if(abs(imp1[i,j]) >= max(abs(imp1[i,j]), abs(imp2[i,j]), abs(imp3[i,j]), abs(imp4[i,j]))) imp.all[i,j] <- 1.5 + if(abs(imp2[i,j]) >= max(abs(imp1[i,j]), abs(imp2[i,j]), abs(imp3[i,j]), abs(imp4[i,j]))) imp.all[i,j] <- 2.5 + if(abs(imp3[i,j]) >= max(abs(imp1[i,j]), abs(imp2[i,j]), abs(imp3[i,j]), abs(imp4[i,j]))) imp.all[i,j] <- 3.5 + if(abs(imp4[i,j]) >= max(abs(imp1[i,j]), abs(imp2[i,j]), abs(imp3[i,j]), abs(imp4[i,j]))) imp.all[i,j] <- 4.5 + } + } + + if(p<=3) yMod <- 0.7 + if(p>=4 && p<=6) yMod <- 0.5 + if(p>=7 && p<=9) yMod <- 0.3 + if(p>=10) yMod <- 0.1 + if(p==13 || p==14) yMod <- 0.5 + if(p==15 || p==16) yMod <- 0.1 + + if(p==1 || p==4 || p==7 || p==10) xMod <- 0.05 + if(p==2 || p==5 || p==8 || p==11) xMod <- 0.35 + if(p==3 || p==6 || p==9 || p==12) xMod <- 0.65 + if(p==13 || p==15) xMod <- 0.05 + if(p==14 || p==16) xMod <- 0.50 + + # impact map: + if(p <= 12) par(fig=c(xMod, xMod + 0.3, yMod, yMod + 0.17), new=TRUE) + if(p > 12) par(fig=c(xMod, xMod + 0.45, yMod, yMod + 0.38), new=TRUE) + PlotEquiMap(imp.all[,EU], lon[EU], lat, filled.continents = FALSE, brks=1:5, cols=col.regimes, axelab=F, drawleg=F) + + # title map: + if(p <= 12) par(fig=c(xMod, xMod + 0.3, yMod + 0.162, yMod + 0.172), new=TRUE) + if(p > 12) par(fig=c(xMod + 0.07, xMod + 0.07 + 0.3, yMod +0.21 + 0.162, yMod +0.21 + 0.172), new=TRUE) + mtext(my.period[p], font=2, cex=.9) + + } # close 'p' on 'period' + + par(fig=c(0.1,0.9, 0, 0.12), new=TRUE) + ColorBar2(1:5, cols=col.regimes, vert=FALSE, my.ticks=1:4, draw.ticks=FALSE, my.labels=orden) + + par(fig=c(0.1,0.9, 0.9, 0.95), new=TRUE) + mtext(paste0("Most influent WR on ",var.name[var.num]), font=2, cex=1.5) + + dev.off() +} + + + + + + + + + + + + + + + + + + diff --git a/old/weather_regimes_maps_v12.R~ b/old/weather_regimes_maps_v12.R~ new file mode 100644 index 0000000000000000000000000000000000000000..9adb6ff7e7007a60a692cd99edf63d71580fb61d --- /dev/null +++ b/old/weather_regimes_maps_v12.R~ @@ -0,0 +1,1147 @@ + +# Creation: 6/2016 +# Author: Nicola Cortesi +# +# Aim: to visualize the regime anomalies of the weather regimes, their interannual frequencies and their impact on a chosen cliamte variable (i.e: wind speed or temperature) +# +# I/O: the only input file needed is the output of the weather_regimes.R script, which ends with the suffix "_mapdata.RData". +# Output files are the maps, with the user can generate as .png or as .pdf +# +# Assumptions: If your regimes derive from a reanalysis, this script must be run twice: once, with the option 'ordering = F' and 'save.names = T' to create a .pdf or .png +# file that the user must open to find visually the order by which the four regimes are stored inside the four clusters. For example, he can find that the +# sequence of the images in the file (from top to down) corresponds to: Blocking / NAO- / Atl.Ridge / NAO+ regime. Subsequently, he has to change 'ordering' +# from F to T and set the four variables 'clusterX.name' (X=1..4) to follow the same order found inside that file. +# The second time, you have to run this script only to get the maps in the right order, which by default is, from top to down: +# "NAO+","NAO-","Blocking" and "Atl.Ridge" (you can change it with the variable 'orden'). You also have to set 'save.names' to TRUE, so an .RData file +# is written to store the order of the four regimes, in case in future a user needs to visualize these maps again. In this case, the user must set +# 'ordering = TRUE' and 'save.names = FALSE' to be able to visualize the maps in the correct order. +# +# If your regimes derive from a forecast system, you have to compute before the regimes derived from a reanalysis. Then, you can associate the reanalysis +# to the forecast system, to employ it to automatically aussociate to each simulated cluster one of the +# observed regimes, the one with the highest spatial correlation. Beware that the two maps of regime anomalies must have the same spatial resolution! +# +# Branch: weather_regimes + +library(s2dverification) # for the function Load() +library(Kendall) # for the MannKendall test + +source('/home/Earth/ncortesi/Downloads/scripts/Rfunctions.R') # for the calendar functions + +# working dir with the input files with '_psl.RData' suffix: + +workdir <- "/scratch/Earth/ncortesi/RESILIENCE/Regimes/42_ECMWF_S4_monthly_1981-2015_LOESS_filter" + +rean.name <- "ERA-Interim" # reanalysis name (if input data comes from a reanalysis) +forecast.name <- "ECMWF-S4" # forecast system name (if input data comes from a forecast system) + +fields.name <- forecast.name # Choose between loading reanalysis ('rean.name') or forecasts ('forecast.name') + +var.num <- 1 # if fields.name ='rean.name', Choose a variable for the impact maps 1: sfcWind 2: tas + +period <- 1:12 # Choose one or more periods to plot from 1 to 17 (1-12: Jan - Dec, 13-16: Winter, Spring, Summer, Autumn, 17: Year). + # You need to have created before the '_mapdata.RData' file with the output of 'weather_regimes'.R for that period. +lead.months <- 0:6 # in case you selected 'fields.name = forecast.name', specify a leadtime (0 = same month of the start month) to plot + # (and variable 'period' becomes the start month) + +# run it twice: once, with 'ordering <- FALSE' and 'save.names <- TRUE' to look only at the cartography and find visually the right regime names of the four clusters; then, +# insert the regime names in the correct order below and run this script a the second time with 'ordering = TRUE' and 'save.names = TRUE', to save the ordered cartography. +# after that, any time you need to run this script, run it with 'ordering = TRUE and 'save.names = FALSE', not to overwrite the file which stores the cluster order: +ordering <- T # If TRUE, order the clusters following the regimes listed in the 'orden' vector (set it to TRUE only after you manually inserted the names below). +save.names <- F # if TRUE, also save the cluster names (i.e: the association between clusters and weather regimes); if FALSE, the cluster names are loaded from a file + +as.pdf <- F # FALSE: save results as one .png file for each season/month, TRUE: save only one .pdf file with all seasons/months + +composition <- "psl" # "psl" # choose which kind of composition you want to plot: 'simple' for monthly graphs, 'psl' for the regime anomalies for a fixed forecast month +forecast.month <- 10 # in case composition = 'psl', choose a forecast month to plot its composition + +# if fields.name='forecast.name', set the subdir of 'workdir' where the weather regimes computed with a reanalysis are stored: +rean.dir <- "/scratch/Earth/ncortesi/RESILIENCE/Regimes/41_ERA-Interim_monthly_1981-2015_LOESS_filter" + +# Associates the regimes to the four cluster: +cluster1.name <- "NAO+" +cluster2.name <- "NAO-" +cluster4.name <- "Blocking" +cluster3.name <- "Atl.Ridge" + +# choose an order to give to the cartograpy, from top to bottom: +orden <- c("NAO+","NAO-","Blocking","Atl.Ridge") + +################################################################################################################################################################### + +if(fields.name != rean.name && fields.name != forecast.name) stop("variable 'fields.name' is not properly set") +regime1.name <- orden[1] +regime2.name <- orden[2] +regime3.name <- orden[3] +regime4.name <- orden[4] + +var.name <- c("sfcWind","tas") +var.name.full <- c("Wind Speed","Temperature") # full name of the 'predictand' variable to put in the title of the graphs +var.unit <- c("m/s", "ºC") # unit of measure of var (for drawing color scales) +var.num.orig <- var.num # loading _psl files, this value can change! +fields.name.orig <- fields.name +period.orig <- period +workdir.orig <- workdir +pdf.suffix <- ifelse(length(period)==1, my.period[period], "all_periods") + +if(composition == "psl") { + if(as.pdf && fields.name == forecast.name && composition == "psl") pdf(file=paste0(workdir,"/",fields.name,"_",pdf.suffix,"_forecast_month_",forecast.month,".pdf"),width=110,height=60) + if(!as.pdf && fields.name == forecast.name && composition == "psl") png(filename=paste0(workdir,"/",fields.name,"_forecast_month_",forecast.month,"_psl",".png"),width=6000,height=2300) + + lead.months <- c(6:0,0) + n.map <- 0 + plot.new() + + # Sheet title: + par(fig=c(0, 1, 0.94, 0.98), new=TRUE) + mtext(paste0(fields.name, " simulated Weather Regimes for ", month.name[forecast.month]," (",year.start,"-",year.end,")"), cex=5.5, font=2) +} + + +for(lead.month in lead.months){ +#lead.month=0 # for the debug + +if(composition == "simple"){ + if(as.pdf && fields.name == rean.name) pdf(file=paste0(workdir,"/",fields.name,"_",var.name[var.num],"_",pdf.suffix,".pdf"),width=40, height=60) + if(as.pdf && fields.name == forecast.name) pdf(file=paste0(workdir,"/",fields.name,"_",var.name[var.num],"_",pdf.suffix,"_leadmonth_",lead.month,".pdf"),width=40,height=60) +} + +if(composition == 'psl') { + period <- forecast.month - lead.month + if(period < 1) period <- period + 12 +} + +for(p in period){ + #p <- 1 # for the debug + p.orig <- p + + # load regime data (for forecasts, it load only the data corresponding to the chosen start month p and lead month 'lead.month':: + if(fields.name == rean.name || (fields.name == forecast.name && lead.month == 0 || n.map == 7)) { + load(file=paste0(rean.dir,"/",rean.name,"_",my.period[p],"_","psl",".RData")) + if(var.num != var.num.orig) var.num <- var.num.orig # ripristinate original values of this script (necessary after loading regime data) + if(fields.name != fields.name.orig) fields.name <- fields.name.orig # ripristinate original values of this script + if(workdir != workdir.orig) workdir <- workdir.orig # ripristinate original values of this script + + load(file=paste0(rean.dir,"/",rean.name,"_",my.period[p],"_",var.name[var.num],".RData")) + } + + if(fields.name == forecast.name && n.map < 7){ + load(file=paste0(workdir,"/",fields.name,"_",my.period[p],"_leadmonth_",lead.month,"_","psl",".RData")) # load cluster time series and mean psl data + #load(file=paste0(workdir,"/",fields.name,"_",my.period[p],"_leadmonth_",lead.month,"_",var.name[var.num],".RData")) # load mean var data + + if(var.num != var.num.orig) var.num <- var.num.orig # ripristinate original values of this script (necessary after loading regime data) + if(fields.name != fields.name.orig) fields.name <- fields.name.orig # ripristinate original values of this script + if(workdir != workdir.orig) workdir <- workdir.orig # ripristinate original values of this script + } + + if(var.num != var.num.orig) var.num <- var.num.orig # ripristinate original values of this script (necessary after loading regime data) + if(fields.name != fields.name.orig) fields.name <- fields.name.orig # ripristinate original values of this script + if(p != p.orig) p <- p.orig + + if(fields.name == forecast.name && n.map < 7){ + # compute cluster persistence: + my.cluster.vector <- as.vector(my.cluster.array[[p]]) # reduce it to a vector with the order: day1month1member1 , day2month1member1, ... , day1month2member1, day2month2member1, ,,, + + sequ1 <- sequ2 <- sequ3 <- sequ4 <- rep(0,10000) + i1 <- i2 <- i3 <- i4 <- 1 + + if(my.cluster.vector[1] != 1) i1 <- 0 + if(my.cluster.vector[1] == 1) sequ1[i1] <- sequ1[i1]+1 + if(my.cluster.vector[1] != 2) i2 <- 0 + if(my.cluster.vector[1] == 2) sequ2[i2] <- sequ2[i2]+1 + if(my.cluster.vector[1] != 3) i3 <- 0 + if(my.cluster.vector[1] == 3) sequ3[i3] <- sequ3[i3]+1 + if(my.cluster.vector[1] != 4) i4 <- 0 + if(my.cluster.vector[1] == 4) sequ4[i4] <- sequ4[i4]+1 + n.dd <- length(my.cluster.vector) + for(d in 1:(n.dd-1)){ + if(my.cluster.vector[d] != 1 && my.cluster.vector[d+1] == 1) i1 <- i1+1 + if(my.cluster.vector[d] == 1) sequ1[i1] <- sequ1[i1]+1 + + if(my.cluster.vector[d] != 2 && my.cluster.vector[d+1] == 2) i2 <- i2+1 + if(my.cluster.vector[d] == 2) sequ2[i2] <- sequ2[i2]+1 + + if(my.cluster.vector[d] != 3 && my.cluster.vector[d+1] == 3) i3 <- i3+1 + if(my.cluster.vector[d] == 3) sequ3[i3] <- sequ3[i3]+1 + + if(my.cluster.vector[d] != 4 && my.cluster.vector[d+1] == 4) i4 <- i4+1 + if(my.cluster.vector[d] == 4) sequ4[i4] <- sequ4[i4]+1 + } + + if(my.cluster.vector[n.dd] == 1) sequ1[i1] <- sequ1[i1]+1 + if(my.cluster.vector[n.dd] == 2) sequ2[i2] <- sequ2[i2]+1 + if(my.cluster.vector[n.dd] == 3) sequ3[i3] <- sequ3[i3]+1 + if(my.cluster.vector[n.dd] == 4) sequ4[i4] <- sequ4[i4]+1 + + pers1 <- mean(sequ1[which(sequ1 != 0)]) + pers2 <- mean(sequ2[which(sequ2 != 0)]) + pers3 <- mean(sequ3[which(sequ3 != 0)]) + pers4 <- mean(sequ4[which(sequ4 != 0)]) + + # compute correlations between simulated clusters and observed regime's anomalies: + cluster1.sim <- pslwr1mean; cluster2.sim <- pslwr2mean; cluster3.sim <- pslwr3mean; cluster4.sim <- pslwr4mean + lat.forecast <- lat; lon.forecast <- lon + + if((p + lead.month) > 12) { load.month <- p + lead.month - 12 } else { load.month <- p + lead.month } # reanalysis forecast month to load + load(file=paste0(rean.dir,"/",rean.name,"_",my.period[load.month],"_","psl",".RData")) # load reanalysis data, which overwrities the pslwrXmean variables + load(paste0(rean.dir,"/",rean.name,"_", my.period[load.month],"_","ClusterNames",".RData")) # load also reanalysis regime names + + wr1y.obs <- wr1y; wr2y.obs <- wr2y; wr3y.obs <- wr3y; wr4y.obs <- wr4y # observed frecuencies of the regimes + + regimes.obs <- c(cluster1.name, cluster2.name, cluster3.name, cluster4.name) + + if(length(unique(regimes.obs)) < 4) stop("Two or more names of the weather types in the reanalsyis input file are repeated!") + + if(identical(rev(lat),lat.forecast)) {lat.forecast <- rev(lat.forecast); print("Warning: forecast latitudes are in the opposite order than renalysis's")} + if(!identical(lat, lat.forecast)) stop("forecast latitudes are different from reanalysis latitudes!") + if(!identical(lon, lon.forecast)) stop("forecast longitude are different from reanalysis longitudes!") + + cluster.corr <- array(NA, c(4,4), dimnames=list(c("cluster1.sim","cluster2.sim","cluster3.sim","cluster4.sim"), regimes.obs)) + cluster.corr[1,1] <- cor(as.vector(cluster1.sim), as.vector(pslwr1mean)) + cluster.corr[1,2] <- cor(as.vector(cluster1.sim), as.vector(pslwr2mean)) + cluster.corr[1,3] <- cor(as.vector(cluster1.sim), as.vector(pslwr3mean)) + cluster.corr[1,4] <- cor(as.vector(cluster1.sim), as.vector(pslwr4mean)) + cluster.corr[2,1] <- cor(as.vector(cluster2.sim), as.vector(pslwr1mean)) + cluster.corr[2,2] <- cor(as.vector(cluster2.sim), as.vector(pslwr2mean)) + cluster.corr[2,3] <- cor(as.vector(cluster2.sim), as.vector(pslwr3mean)) + cluster.corr[2,4] <- cor(as.vector(cluster2.sim), as.vector(pslwr4mean)) + cluster.corr[3,1] <- cor(as.vector(cluster3.sim), as.vector(pslwr1mean)) + cluster.corr[3,2] <- cor(as.vector(cluster3.sim), as.vector(pslwr2mean)) + cluster.corr[3,3] <- cor(as.vector(cluster3.sim), as.vector(pslwr3mean)) + cluster.corr[3,4] <- cor(as.vector(cluster3.sim), as.vector(pslwr4mean)) + cluster.corr[4,1] <- cor(as.vector(cluster4.sim), as.vector(pslwr1mean)) + cluster.corr[4,2] <- cor(as.vector(cluster4.sim), as.vector(pslwr2mean)) + cluster.corr[4,3] <- cor(as.vector(cluster4.sim), as.vector(pslwr3mean)) + cluster.corr[4,4] <- cor(as.vector(cluster4.sim), as.vector(pslwr4mean)) + + # associate each simulated cluster to one observed regime: + max1 <- which(cluster.corr == max(cluster.corr), arr.ind=T) + cluster.corr2 <- cluster.corr + cluster.corr2[max1[1],] <- -999 # set this row to negative values so it won't be found as a maximum anymore + cluster.corr2[,max1[2]] <- -999 # set this column to negative values so it won't be found as a maximum anymore + max2 <- which(cluster.corr2 == max(cluster.corr2), arr.ind=T) + cluster.corr3 <- cluster.corr2 + cluster.corr3[max2[1],] <- -999 + cluster.corr3[,max2[2]] <- -999 + max3 <- which(cluster.corr3 == max(cluster.corr3), arr.ind=T) + cluster.corr4 <- cluster.corr3 + cluster.corr4[max3[1],] <- -999 + cluster.corr4[,max3[2]] <- -999 + max4 <- which(cluster.corr4 == max(cluster.corr4), arr.ind=T) + + seq.max1 <- c(max1[1],max2[1],max3[1],max4[1]) + seq.max2 <- c(max1[2],max2[2],max3[2],max4[2]) + + cluster1.regime <- seq.max2[which(seq.max1 == 1)] + cluster2.regime <- seq.max2[which(seq.max1 == 2)] + cluster3.regime <- seq.max2[which(seq.max1 == 3)] + cluster4.regime <- seq.max2[which(seq.max1 == 4)] + + cluster1.name <- regimes.obs[cluster1.regime] + cluster2.name <- regimes.obs[cluster2.regime] + cluster3.name <- regimes.obs[cluster3.regime] + cluster4.name <- regimes.obs[cluster4.regime] + + spat.cor1 <- cluster.corr[1,seq.max2[which(seq.max1 == 1)]] + spat.cor2 <- cluster.corr[2,seq.max2[which(seq.max1 == 2)]] + spat.cor3 <- cluster.corr[3,seq.max2[which(seq.max1 == 3)]] + spat.cor4 <- cluster.corr[4,seq.max2[which(seq.max1 == 4)]] + + # measure persistence for the reanalysis dataset: + + # compute cluster persistence: + my.cluster.vector <- my.cluster[[load.month]][[1]] + + sequ1 <- sequ2 <- sequ3 <- sequ4 <- rep(0,10000) + i1 <- i2 <- i3 <- i4 <- 1 + + if(my.cluster.vector[1] != 1) i1 <- 0 + if(my.cluster.vector[1] == 1) sequ1[i1] <- sequ1[i1]+1 + if(my.cluster.vector[1] != 2) i2 <- 0 + if(my.cluster.vector[1] == 2) sequ2[i2] <- sequ2[i2]+1 + if(my.cluster.vector[1] != 3) i3 <- 0 + if(my.cluster.vector[1] == 3) sequ3[i3] <- sequ3[i3]+1 + if(my.cluster.vector[1] != 4) i4 <- 0 + if(my.cluster.vector[1] == 4) sequ4[i4] <- sequ4[i4]+1 + + n.dd <- length(my.cluster.vector) + for(d in 1:(n.dd-1)){ + if(my.cluster.vector[d] != 1 && my.cluster.vector[d+1] == 1) i1 <- i1+1 + if(my.cluster.vector[d] == 1) sequ1[i1] <- sequ1[i1]+1 + + if(my.cluster.vector[d] != 2 && my.cluster.vector[d+1] == 2) i2 <- i2+1 + if(my.cluster.vector[d] == 2) sequ2[i2] <- sequ2[i2]+1 + + if(my.cluster.vector[d] != 3 && my.cluster.vector[d+1] == 3) i3 <- i3+1 + if(my.cluster.vector[d] == 3) sequ3[i3] <- sequ3[i3]+1 + + if(my.cluster.vector[d] != 4 && my.cluster.vector[d+1] == 4) i4 <- i4+1 + if(my.cluster.vector[d] == 4) sequ4[i4] <- sequ4[i4]+1 + } + + if(my.cluster.vector[n.dd] == 1) sequ1[i1] <- sequ1[i1]+1 + if(my.cluster.vector[n.dd] == 2) sequ2[i2] <- sequ2[i2]+1 + if(my.cluster.vector[n.dd] == 3) sequ3[i3] <- sequ3[i3]+1 + if(my.cluster.vector[n.dd] == 4) sequ4[i4] <- sequ4[i4]+1 + + persObs1 <- mean(sequ1[which(sequ1 != 0)]) + persObs2 <- mean(sequ2[which(sequ2 != 0)]) + persObs3 <- mean(sequ3[which(sequ3 != 0)]) + persObs4 <- mean(sequ4[which(sequ4 != 0)]) + + if(var.num != var.num.orig) var.num <- var.num.orig # ripristinate original values of this script + if(fields.name != fields.name.orig) fields.name <- fields.name.orig # ripristinate original values of this script + if(!identical(period.orig,period)) period <- period.orig + if(workdir != workdir.orig) workdir <- workdir.orig # ripristinate original values of this script + if(p.orig != p) p <- p.orig + + # insert here all the manual corrections to the regime assignation due to misasignment in the automatic selection: + # forecast month: September + if(p == 9 && lead.month == 0) { cluster1.name <- "Blocking"; cluster2.name <- "Atl.Ridge"; cluster3.name <- "NAO-"; cluster4.name <- "NAO+"} + if(p == 8 && lead.month == 1) { cluster1.name <- "NAO+"; cluster2.name <- "NAO-"; cluster3.name <- "Blocking"; cluster4.name <- "Atl.Ridge"} + if(p == 6 && lead.month == 3) { cluster1.name <- "Atl.Ridge"; cluster2.name <- "NAO-"} + if(p == 4 && lead.month == 5) { cluster1.name <- "Blocking"; cluster2.name <- "NAO+"; cluster3.name <- "Atl.Ridge"; cluster4.name <- "NAO-"} + + # forecast month: October + if(p == 4 && lead.month == 6) { cluster1.name <- "Atl.Ridge"; cluster2.name <- "Blocking"; cluster3.name <- "NAO+"; cluster4.name <- "NAO-"} + if(p == 5 && lead.month == 5) { cluster1.name <- "NAO+"; cluster2.name <- "Blocking"; cluster3.name <- "NAO-"; cluster4.name <- "Atl.Ridge"} + + + load(file=paste0(workdir,"/",fields.name,"_",my.period[p],"_leadmonth_",lead.month,"_","psl",".RData")) # reload forecast cluster time series and mean psl data + #load(file=paste0(workdir,"/",fields.name,"_",my.period[p],"_leadmonth_",lead.month,"_ClusterData.RData")) # reload forecast cluster data (for my.cluster.array) + + if(var.num != var.num.orig) var.num <- var.num.orig # ripristinate original values of this script + if(fields.name != fields.name.orig) fields.name <- fields.name.orig # ripristinate original values of this script + if(!identical(period.orig, period)) period <- period.orig + if(p.orig != p) p <- p.orig + if(identical(rev(lat),lat.forecast)) lat <- rev(lat) # ripristinate right lat order + if(workdir != workdir.orig) workdir <- workdir.orig # ripristinate original values of this script + + } # close for on 'forecast.name' + + if(fields.name == rean.name || (fields.name == forecast.name && n.map == 7)){ + if(save.names){ + save(cluster1.name, cluster2.name, cluster3.name, cluster4.name, file=paste0(workdir,"/",fields.name,"_", my.period[p],"_","ClusterNames",".RData")) + + } else { # in this case, we load the cluster names from the file already saved, if regimes come from a reanalysis: + load(paste0(rean.dir,"/",rean.name,"_", my.period[p],"_","ClusterNames",".RData")) + + if(var.num != var.num.orig) var.num <- var.num.orig # ripristinate original values of this script + if(fields.name != fields.name.orig) fields.name <- fields.name.orig # ripristinate original values of this script + } + } + + # assign to each cluster its regime: + if(ordering == FALSE && (fields.name == rean.name || (fields.name == forecast.name && n.map == 7))){ + cluster1 <- 1; cluster2 <- 2; cluster3 <- 3; cluster4 <- 4 + } else { + cluster1 <- which(orden == cluster1.name) + cluster2 <- which(orden == cluster2.name) + cluster3 <- which(orden == cluster3.name) + cluster4 <- which(orden == cluster4.name) + } + + assign(paste0("map",cluster1), pslwr1mean) + assign(paste0("map",cluster2), pslwr2mean) + assign(paste0("map",cluster3), pslwr3mean) + assign(paste0("map",cluster4), pslwr4mean) + + assign(paste0("fre",cluster1), wr1y) + assign(paste0("fre",cluster2), wr2y) + assign(paste0("fre",cluster3), wr3y) + assign(paste0("fre",cluster4), wr4y) + + if(p == 100){ + assign(paste0("imp",cluster1), varwr1mean) + assign(paste0("imp",cluster2), varwr2mean) + assign(paste0("imp",cluster3), varwr3mean) + assign(paste0("imp",cluster4), varwr4mean) + + assign(paste0("sig",cluster1), pvalue1) + assign(paste0("sig",cluster2), pvalue2) + assign(paste0("sig",cluster3), pvalue3) + assign(paste0("sig",cluster4), pvalue4) + } + + if(fields.name == forecast.name && n.map < 7){ + assign(paste0("freMax",cluster1), wr1yMax) + assign(paste0("freMax",cluster2), wr2yMax) + assign(paste0("freMax",cluster3), wr3yMax) + assign(paste0("freMax",cluster4), wr4yMax) + + assign(paste0("freMin",cluster1), wr1yMin) + assign(paste0("freMin",cluster2), wr2yMin) + assign(paste0("freMin",cluster3), wr3yMin) + assign(paste0("freMin",cluster4), wr4yMin) + + assign(paste0("spatial.cor",cluster1), spat.cor1) + assign(paste0("spatial.cor",cluster2), spat.cor2) + assign(paste0("spatial.cor",cluster3), spat.cor3) + assign(paste0("spatial.cor",cluster4), spat.cor3) + + assign(paste0("persist",cluster1), pers1) + assign(paste0("persist",cluster2), pers2) + assign(paste0("persist",cluster3), pers3) + assign(paste0("persist",cluster4), pers4) + + cluster1.obs <- which(orden == regimes.obs[1]) + cluster2.obs <- which(orden == regimes.obs[2]) + cluster3.obs <- which(orden == regimes.obs[3]) + cluster4.obs <- which(orden == regimes.obs[4]) + + assign(paste0("freObs",cluster1.obs), wr1y.obs) + assign(paste0("freObs",cluster2.obs), wr2y.obs) + assign(paste0("freObs",cluster3.obs), wr3y.obs) + assign(paste0("freObs",cluster4.obs), wr4y.obs) + + assign(paste0("persistObs",cluster1.obs), persObs1) + assign(paste0("persistObs",cluster2.obs), persObs2) + assign(paste0("persistObs",cluster3.obs), persObs3) + assign(paste0("persistObs",cluster4.obs), persObs4) + + } + + # position of long values of Europe only (without the Atlantic Sea and America): + #if(fields.name == "ERA-Interim") EU <- c(1:which(lon >= lon.max)[1], (length(lon)-30):length(lon)) # restrict area to continental europe only + EU <- c(1:which(lon >= lon.max)[1], (which(lon >= 337)[1]:length(lon))) # restrict area to continental europe only + + # breaks and colors of the geopotential fields: + if(psl == "g500"){ + #my.brks <- c(-300,-199:200,300) # % Mean anomaly of a WR + my.brks <- c(-300,seq(-200,200,10),300) # % Mean anomaly of a WR + my.brks2 <- c(-300,seq(-200,200,10),300) # % Mean anomaly of a WR for the contour lines + #my.cols <- colorRampPalette((brewer.pal(9,"PuBu")))(length(my.brks)-1) + values.to.plot <- c(-200, -100, 0, 100, 200) # values we want to show in the labels + } + + if(psl == "psl"){ + my.brks <- c(seq(-19,-1,2),0,seq(1,19,2)) # % Mean anomaly of a WR + # my.brks <- c(seq(-19,-1,2),0,seq(1,19,2)) # % Mean anomaly of a WR + + my.brks2 <- my.brks #c(seq(-20,20,2)) # % Mean anomaly of a WR for the contour lines + values.to.plot <- my.brks #c(-17,-15,-13,-11,-9,-7,-5,-3,-1,0,1,3,5,7,9,11,13,15,17) + } + + my.cols <- colorRampPalette(rev(brewer.pal(11,"RdBu")))(length(my.brks)-1) # blue--white--red colors + my.cols[floor(length(my.cols)/2)] <- my.cols[floor(length(my.cols)/2)+1] <- "white" + + # breaks and colors of the impact maps (valid both for Wind speed anomalies and Temperature anomalies): + #my.brks.var <- c(-20,seq(-10,-3,1),seq(-2.9,2.9,0.1),seq(3,10,1),20) # % Mean anomaly of a WR + #my.brks.var <- c(-20,-2,-1.5,-1,-0.5,-0.2,0.2,0.5,1,1.5,2,20) # % Mean anomaly of a WR + #my.brks.var <- c(-20,seq(-3,3,0.5),20) # % Mean anomaly of a WR + if(var.name[var.num] == "sfcWind") { + my.brks.var <- seq(-3,3,0.5) + values.to.plot2 <- c(-3,-2,-1,0,1,2,3) + } + if(var.name[var.num] == "tas") { + my.brks.var <- seq(-5,5,1) + values.to.plot2 <- my.brks.var #c(-5,-3,-1,0,1,3,5) + } + + #my.cols <- colorRampPalette(as.character(read.csv("/home/Earth/vtorralb/rgbhex.csv",header=F)[,1]))(length(my.brks)-1) # blue--yellow-red colors + my.cols.var <- colorRampPalette(rev(brewer.pal(11,"RdBu")))(length(my.brks.var)-1) # blue--white--red colors + #my.cols.var[floor(length(my.cols.var)/2)] <- my.cols.var[floor(length(my.cols.var)/2)+1] <- "white" + + # Visualize the composition with the 3 graphs for all the 4 regimes: its pressure fields, its impact on var and its frequency series: + if(composition == "simple"){ + if(!as.pdf && fields.name==rean.name) png(filename=paste0(workdir,"/",fields.name,"_",my.period[p],"_",var.name[var.num],".png"),width=3000,height=3700) + if(!as.pdf && fields.name==forecast.name) png(filename=paste0(workdir,"/",fields.name,"_",my.period[p],"_leadmonth_",lead.month,"_",var.name[var.num],".png"),width=3000,height=3700) + + plot.new() + + # Sheet title: + par(fig=c(0, 1, 0.94, 0.98), new=TRUE) + if(fields.name == forecast.name) {forecast.title <- paste0(" startdate and ",lead.month," forecast month ")} else {forecast.title <- ""} + mtext(paste0(fields.name, " Weather Regimes for ", my.period[p], forecast.title," (",year.start,"-",year.end,")"), cex=5.5, font=2) + + # Centroid maps: + map.xpos <- 0 + map.width <- 0.46 + par(fig=c(map.xpos + 0.003, map.xpos + map.width - 0.003, 0.745, 0.925), new=TRUE) + PlotEquiMap2(rescale(map1,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map1), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, xlabel.dist=1.5, contours.lty="F1FF1F") + par(fig=c(map.xpos, map.xpos + map.width, 0.51, 0.69), new=TRUE) + PlotEquiMap2(rescale(map2,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map2), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F") + par(fig=c(map.xpos, map.xpos + map.width, 0.275, 0.455), new=TRUE) + PlotEquiMap2(rescale(map3,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map3), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F") + par(fig=c(map.xpos, map.xpos + map.width, 0.04, 0.22), new=TRUE) + PlotEquiMap2(rescale(map4,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map4), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F") + + # Subtitle centroid maps: + map.title.xpos <- 0.76 + map.title.width <- 0.2 + par(fig=c(map.title.xpos, map.title.xpos + map.title.width, 0.728, 0.729), new=TRUE) + mtext("year", cex=3) + par(fig=c(map.title.xpos, map.title.xpos + map.title.width, 0.494, 0.495), new=TRUE) + mtext("year", cex=3) + par(fig=c(map.title.xpos, map.title.xpos + map.title.width, 0.260, 0.261), new=TRUE) + mtext("year", cex=3) + par(fig=c(map.title.xpos, map.title.xpos + map.title.width, 0.026, 0.027), new=TRUE) + mtext("year", cex=3) + + # Title Centroid Maps: + title1.xpos <- 0.02 + title1.width <- 0.4 + par(fig=c(title1.xpos, title1.xpos + title1.width, 0.9305, 0.9355), new=TRUE) + mtext(paste0(regime1.name,": ", psl.name, " Anomaly"), font=2, cex=4) + par(fig=c(title1.xpos, title1.xpos + title1.width, 0.6955, 0.7005), new=TRUE) + mtext(paste0(regime2.name,": ", psl.name, " Anomaly"), font=2, cex=4) + par(fig=c(title1.xpos, title1.xpos + title1.width, 0.4605, 0.4655), new=TRUE) + mtext(paste0(regime3.name,": ", psl.name, " Anomaly"), font=2, cex=4) + par(fig=c(title1.xpos, title1.xpos + title1.width, 0.2255, 0.2305), new=TRUE) + mtext(paste0(regime4.name,": ", psl.name, " Anomaly"), font=2, cex=4) + + # Impact maps: + if(p == 100){ + impact.xpos <- 0.47 + impact.width <- 0.26 + par(fig=c(impact.xpos, impact.xpos + impact.width, 0.745, 0.925), new=TRUE) + PlotEquiMap2(rescale(imp1[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=t(sig1[,EU] < 0.05), cex.lab=1.5) + par(fig=c(impact.xpos, impact.xpos + impact.width, 0.51, 0.69), new=TRUE) + PlotEquiMap2(rescale(imp2[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=t(sig2[,EU] < 0.05), cex.lab=1.5) + par(fig=c(impact.xpos, impact.xpos + impact.width, 0.275, 0.455), new=TRUE) + PlotEquiMap2(rescale(imp3[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=t(sig3[,EU] < 0.05), cex.lab=1.5) + par(fig=c(impact.xpos, impact.xpos + impact.width, 0.04, 0.22), new=TRUE) + PlotEquiMap2(rescale(imp4[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=t(sig4[,EU] < 0.05), cex.lab=1.5) + + # Title impact maps: + title2.xpos <- 0.42 + title2.width <- 0.35 + par(fig=c(title2.xpos, title2.xpos + title2.width, 0.935, 0.940), new=TRUE) + mtext(paste0(regime1.name, ": Impact on ", var.name.full[var.num]), font=2, cex=4) + par(fig=c(title2.xpos, title2.xpos + title2.width, 0.700, 0.705), new=TRUE) + mtext(paste0(regime2.name, ": Impact on ", var.name.full[var.num]), font=2, cex=4) + par(fig=c(title2.xpos, title2.xpos + title2.width, 0.465, 0.470), new=TRUE) + mtext(paste0(regime3.name, ": Impact on ", var.name.full[var.num]), font=2, cex=4) + par(fig=c(title2.xpos, title2.xpos + title2.width, 0.230, 0.235), new=TRUE) + mtext(paste0(regime4.name, ": Impact on ", var.name.full[var.num]), font=2, cex=4) + } + + # Frequency plots: + barplot.xpos <- 0.75 + barplot.width <- 0.25 + freq.max=100 + + if(fields.name == forecast.name){ + pos.years.present <- match(year.start:year.end, years) + years.missing <- which(is.na(pos.years.present)) + fre1.NA <- fre2.NA <- fre3.NA <- fre4.NA <- freMax1.NA <- freMax2.NA <- freMax3.NA <- freMax4.NA <- freMin1.NA <- freMin2.NA <- freMin3.NA <- freMin4.NA <- c() + + k <- 0 + for(y in year.start:year.end) { + if(y %in% (years.missing + year.start - 1)) { + fre1.NA <- c(fre1.NA,NA); fre2.NA <- c(fre2.NA,NA); fre3.NA <- c(fre3.NA,NA); fre4.NA <- c(fre4.NA,NA) + freMax1.NA <- c(freMax1.NA,NA); freMax2.NA <- c(freMax2.NA,NA); freMax3.NA <- c(freMax3.NA,NA); freMax4.NA <- c(freMax4.NA,NA) + freMin1.NA <- c(freMin1.NA,NA); freMin2.NA <- c(freMin2.NA,NA); freMin3.NA <- c(freMin3.NA,NA); freMin4.NA <- c(freMin4.NA,NA) + k=k+1 + } else { + fre1.NA <- c(fre1.NA,fre1[y - year.start + 1 - k]); fre2.NA <- c(fre2.NA,fre2[y - year.start + 1 - k]) + fre3.NA <- c(fre3.NA,fre3[y - year.start + 1 - k]); fre4.NA <- c(fre4.NA,fre4[y - year.start + 1 - k]) + freMax1.NA <- c(freMax1.NA,freMax1[y - year.start + 1 - k]); freMax2.NA <- c(freMax2.NA,freMax2[y - year.start + 1 - k]) + freMax3.NA <- c(freMax3.NA,freMax3[y - year.start + 1 - k]); freMax4.NA <- c(freMax4.NA,freMax4[y - year.start + 1 - k]) + freMin1.NA <- c(freMin1.NA,freMin1[y - year.start + 1 - k]); freMin2.NA <- c(freMin2.NA,freMin2[y - year.start + 1 - k]) + freMin3.NA <- c(freMin3.NA,freMin3[y - year.start + 1 - k]); freMin4.NA <- c(freMin4.NA,freMin4[y - year.start + 1 - k]) + } + } + } else { fre1.NA <- fre1; fre2.NA <- fre2; fre3.NA <- fre3; fre4.NA <- fre4 } + + par(fig=c(barplot.xpos, barplot.xpos + barplot.width, 0.745, 0.915), new=TRUE) + if(fields.name == rean.name) barplot.freq(100*fre1.NA, year.start, year.end, cex.y=2, cex.x=2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0)) + if(fields.name == forecast.name) barplot.freq.sim2(100*fre1.NA, 100*freMax1.NA, 100*freMin1.NA, 100*freObs1, year.start, year.end, cex.y=2, cex.x=2, freq.min=-2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0), col.line="gray20", cex.r=3, cex.mean=2, cex.obs=2) + + par(fig=c(barplot.xpos, barplot.xpos + barplot.width,0.510,0.680), new=TRUE) + if(fields.name == rean.name) barplot.freq(100*fre2.NA, year.start, year.end, cex.y=2, cex.x=2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0)) + if(fields.name == forecast.name) barplot.freq.sim2(100*fre2.NA, 100*freMax2.NA, 100*freMin2.NA, 100*freObs2, year.start, year.end, cex.y=2, cex.x=2, freq.min=-2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0), col.line="gray20", cex.r=3, cex.mean=2, cex.obs=2) + + par(fig=c(barplot.xpos, barplot.xpos + barplot.width,0.275,0.445), new=TRUE) + if(fields.name == rean.name) barplot.freq(100*fre3.NA, year.start, year.end, cex.y=2, cex.x=2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0)) + if(fields.name == forecast.name) barplot.freq.sim2(100*fre3.NA, 100*freMax3.NA, 100*freMin3.NA, 100*freObs3, year.start, year.end, cex.y=2, cex.x=2, freq.min=-2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0), col.line="gray20", cex.r=3, cex.mean=2, cex.obs=2) + + par(fig=c(barplot.xpos, barplot.xpos + barplot.width,0.040,0.210), new=TRUE) + if(fields.name == rean.name) barplot.freq(100*fre4.NA, year.start, year.end, cex.y=2, cex.x=2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0)) + if(fields.name == forecast.name) barplot.freq.sim2(100*fre4.NA, 100*freMax4.NA, 100*freMin4.NA, 100*freObs4, year.start, year.end, cex.y=2, cex.x=2, freq.min=-2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0), col.line="gray20", cex.r=3, cex.mean=2, cex.obs=2) + + # Title Frequency maps: + title3.xpos <- 0.75 + title3.width <-0.25 + par(fig=c(title3.xpos, title3.xpos + title3.width, 0.935, 0.940), new=TRUE) + mtext(paste0(regime1.name, " Frequency"), font=2, cex=4) # paste0 doesn't work inside an expression! + par(fig=c(title3.xpos, title3.xpos + title3.width, 0.700, 0.705), new=TRUE) + mtext(paste0(regime2.name, " Frequency"), font=2, cex=4) + par(fig=c(title3.xpos, title3.xpos + title3.width, 0.465, 0.470), new=TRUE) + mtext(paste0(regime3.name, " Frequency"), font=2, cex=4) + par(fig=c(title3.xpos, title3.xpos + title3.width, 0.230, 0.235), new=TRUE) + mtext(paste0(regime4.name, " Frequency"), font=2, cex=4) + + # % on Frequency plots: + symbol.xpos <- 0.735 + symbol.width <- 0.01 + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.82, 0.83), new=TRUE) + mtext("%", cex=3.3) + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.59, 0.60), new=TRUE) + mtext("%", cex=3.3) + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.35, 0.36), new=TRUE) + mtext("%", cex=3.3) + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.12, 0.13), new=TRUE) + mtext("%", cex=3.3) + + # mean frequency on Frequency plots: + symbol.xpos <- 0.9 + symbol.width <- 0.1 + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.908, 0.918), new=TRUE) + mtext(bquote(bar(nu) == .(paste0(round(mean(100*fre1.NA),1),"%"))),cex=4.5) + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.673, 0.683), new=TRUE) + mtext(bquote(bar(nu) == .(paste0(round(mean(100*fre2.NA),1),"%"))),cex=4.5) + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.44, 0.45), new=TRUE) + mtext(bquote(bar(nu) == .(paste0(round(mean(100*fre3.NA),1),"%"))),cex=4.5) + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.203, 0.213), new=TRUE) + mtext(bquote(bar(nu) == .(paste0(round(mean(100*fre4.NA),1),"%"))),cex=4.5) + + # add corr between obs.time series and ensemble mean time series on Frequency plots: + if(fields.name == forecast.name){ + symbol.xpos <- 0.76 + symbol.width <- 0.1 + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.908, 0.918), new=TRUE) + sp.cor1 <- round(cor(fre1.NA,freObs1, use="complete.obs"),2) + mtext(paste0("r= ",sp.cor1), cex=4.5) + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.673, 0.683), new=TRUE) + sp.cor2 <- round(cor(fre2.NA,freObs2, use="complete.obs"),2) + mtext(paste0("r= ",sp.cor2), cex=4.5) + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.44, 0.45), new=TRUE) + sp.cor3 <- round(cor(fre3.NA,freObs3, use="complete.obs"),2) + mtext(paste0("r= ",sp.cor3), cex=4.5) + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.203, 0.213), new=TRUE) + sp.cor4 <- round(cor(fre4.NA,freObs4, use="complete.obs"),2) + mtext(paste0("r= ",sp.cor4), cex=4.5) + } + + # Legend 1: + legend1.xpos <- 0.10 + legend1.width <- 0.80 + legend1.cex <- 2 + my.subset <- match(values.to.plot, my.brks) + par(fig=c(legend1.xpos, legend1.xpos + legend1.width, 0.719, 0.744), new=TRUE) # syntax fig=c(xmin, xmax, ymin, ymax) + ColorBar3(my.brks, cols=my.cols, vert=FALSE, cex=legend1.cex, subset=my.subset) + if(psl=="g500") {mtext(side=4," m", cex=2.5)} else {mtext(side=4," hPa", cex=legend1.cex)} + par(fig=c(legend1.xpos, legend1.xpos + legend1.width, 0.485, 0.508), new=TRUE) + ColorBar3(my.brks, cols=my.cols, vert=FALSE, cex=legend1.cex, subset=my.subset) + if(psl=="g500") {mtext(side=4," m", cex=2.5)} else {mtext(side=4," hPa", cex=legend1.cex)} + par(fig=c(legend1.xpos, legend1.xpos + legend1.width, 0.250, 0.273), new=TRUE) + ColorBar3(my.brks, cols=my.cols, vert=FALSE, cex=legend1.cex, subset=my.subset) + if(psl=="g500") {mtext(side=4," m", cex=2.5)} else {mtext(side=4," hPa", cex=legend1.cex)} + par(fig=c(legend1.xpos, legend1.xpos + legend1.width, 0.015, 0.038), new=TRUE) + ColorBar3(my.brks, cols=my.cols, vert=FALSE, cex=legend1.cex, subset=my.subset) + if(psl=="g500") {mtext(side=4," m", cex=2.5)} else {mtext(side=4," hPa", cex=legend1.cex)} + + # Legend 2: + if(fields.name == rean.name){ + legend2.xpos <- 0.48 + legend2.width <- 0.25 + legend2.cex <- 1.8 + my.subset2 <- match(values.to.plot2, my.brks.var) + par(fig=c(legend2.xpos, legend2.xpos + legend2.width, 0.719 + 0.001, 0.744), new=TRUE) + ColorBar3(my.brks.var, cols=my.cols.var, vert=FALSE, cex=legend2.cex, subset=my.subset2) + mtext(side=4,paste0(" ",var.unit[var.num]), cex=legend2.cex) + par(fig=c(legend2.xpos, legend2.xpos + legend2.width, 0.485, 0.508), new=TRUE) + ColorBar3(my.brks.var, cols=my.cols.var, vert=FALSE, cex=legend2.cex, subset=my.subset2) + mtext(side=4,paste0(" ",var.unit[var.num]), cex=legend2.cex) + par(fig=c(legend2.xpos, legend2.xpos + legend2.width, 0.250, 0.273), new=TRUE) + ColorBar3(my.brks.var, cols=my.cols.var, vert=FALSE, cex=legend2.cex, subset=my.subset2) + mtext(side=4,paste0(" ",var.unit[var.num]), cex=legend2.cex) + par(fig=c(legend2.xpos, legend2.xpos + legend2.width, 0.015, 0.038), new=TRUE) + ColorBar3(my.brks.var, cols=my.cols.var, vert=FALSE, cex=legend2.cex, subset=my.subset2) + mtext(side=4,paste0(" ",var.unit[var.num]), cex=legend2.cex) + } + + if(!as.pdf) dev.off() # for saving 4 png + + } # close if on: composition == 'simple' + + + # Visualize the composition with the regime anomalies for a selected forecasted month: + if(composition == "psl"){ + + # Centroid maps: + n.map <- n.map + 1 # n.maps starts from 0 + map.xpos <- 0.04 + map.width * (n.map - 1) + map.width <- 0.12 + map.ypos <- 0.08 + map.height <- 0.19 + map.ysep <- 0.015 + + par(fig=c(map.xpos, map.xpos + map.width, map.ypos + 3*map.height + 3*map.ysep, map.ypos + 4*map.height + 3*map.ysep), new=TRUE) + PlotEquiMap2(rescale(map1,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map1), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, xlabel.dist=1.5, contours.lty="F1FF1F") + par(fig=c(map.xpos, map.xpos + map.width, map.ypos + 2*map.height + 2*map.ysep, map.ypos + 3*map.height + 2*map.ysep), new=TRUE) + PlotEquiMap2(rescale(map2,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map2), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F") + par(fig=c(map.xpos, map.xpos + map.width, map.ypos + map.height + map.ysep, map.ypos + 2*map.height + map.ysep), new=TRUE) + PlotEquiMap2(rescale(map3,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map3), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F") + par(fig=c(map.xpos, map.xpos + map.width, map.ypos, map.ypos + map.height), new=TRUE) + PlotEquiMap2(rescale(map4,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map4), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F") + + # Sheet subtitles: + par(fig=c(map.xpos, map.xpos + map.width, 0.86, 0.90), new=TRUE) + if(n.map < 8) { + mtext(paste0(my.month.short[p], " --> ", my.month.short[forecast.month]), cex=5.5, font=2) + } else{ + mtext(paste0(rean.name," ", my.month.short[forecast.month]), cex=5.5, font=2) + } + + # Regime's names: + title1.xpos <- 0 + title1.width <- 0.04 + par(fig=c(title1.xpos, title1.xpos + title1.width, 0.7905, 0.7955), new=TRUE) + mtext(paste0(regime1.name), font=2, cex=4) + par(fig=c(title1.xpos, title1.xpos + title1.width, 0.5855, 0.5905), new=TRUE) + mtext(paste0(regime2.name), font=2, cex=4) + par(fig=c(title1.xpos, title1.xpos + title1.width, 0.3805, 0.3955), new=TRUE) + mtext(paste0(regime3.name), font=2, cex=4) + par(fig=c(title1.xpos, title1.xpos + title1.width, 0.1755, 0.1805), new=TRUE) + mtext(paste0(regime4.name), font=2, cex=4) + + # Color legend: + legend1.xpos <- 0.10 + legend1.width <- 0.80 + legend1.cex <- 4 + + par(fig=c(legend1.xpos, legend1.xpos + legend1.width, 0, 0.065), new=TRUE) # syntax fig=c(xmin, xmax, ymin, ymax) + ColorBar2(brks=my.brks, cols=my.cols, vert=FALSE, cex=legend1.cex, label.dist=2, my.ticks=-0.5 + 1:21, my.labels=my.brks) + par(fig=c(legend1.xpos + legend1.width - 0.02, legend1.xpos + legend1.width + 0.05, 0, 0.02), new=TRUE) # syntax fig=c(xmin, xmax, ymin, ymax) + mtext("hPa", cex=legend1.cex*1.3) + + } # close if on composition == 'psl' + + # fre1, fre2, etc. already refer to the regimes listed in the 'orden' vector: + if(fields.name == forecast.name && composition == 'simple') save(orden, fre1.NA, fre2.NA, fre3.NA, fre4.NA, freObs1, freObs2, freObs3, freObs4, sp.cor1, sp.cor2, sp.cor3, sp.cor4, persist1, persist2, persist3, persist4, persistObs1, persistObs2, persistObs3, persistObs4, spatial.cor1, spatial.cor2, spatial.cor3, spatial.cor4, file=paste0(workdir,"/",fields.name,"_", my.period[p],"_leadmonth_",lead.month,"_","ClusterNames",".RData")) + +} # close 'p' on 'period' + +if(as.pdf) dev.off() # for saving a single pdf with all months/seasons + +} # close 'lead.month' on 'lead.months' + +if(composition == "psl") dev.off() + + + + + + + + +# Summary graphs: +if(fields.name == forecast.name && p == 100){ + # array storing correlations in the format: [startdate, leadmonth, regime] + array.cor <- array.sp.cor <- array.freq <- array.diff.freq <- array.rpss <- array.pers <- array.diff.pers <- array(NA,c(12,7,4)) + + for(p in 1:12){ + p.orig <- p + + for(l in 0:6){ + + load(file=paste0(workdir,"/",fields.name,"_",my.period[p],"_leadmonth_",l,"_","ClusterNames",".RData")) # load cluster names and summarized data + + if(var.num != var.num.orig) var.num <- var.num.orig # ripristinate original values of this script (necessary after loading regime data) + if(fields.name != fields.name.orig) fields.name <- fields.name.orig # ripristinate original values of this script + if(p != p.orig) p <- p.orig + + array.sp.cor[p,1+l, 1] <- spatial.cor1 # associates NAO+ to the first triangle (at left) + array.sp.cor[p,1+l, 3] <- spatial.cor2 # associates NAO- to the third triangle (at right) + array.sp.cor[p,1+l, 4] <- spatial.cor3 # associates Blocking to the fourth triangle (top one) + array.sp.cor[p,1+l, 2] <- spatial.cor4 # associates Atl.Ridge to the second triangle (bottom one) + + array.cor[p,1+l, 1] <- sp.cor1 # associates NAO+ to the first triangle (at left) + array.cor[p,1+l, 3] <- sp.cor2 # associates NAO- to the third triangle (at right) + array.cor[p,1+l, 4] <- sp.cor3 # associates Blocking to the fourth triangle (top one) + array.cor[p,1+l, 2] <- sp.cor4 # associates Atl.Ridge to the second triangle (bottom one) + + array.freq[p,1+l, 1] <- 100*(mean(fre1.NA)) # NAO+ + array.freq[p,1+l, 3] <- 100*(mean(fre2.NA)) # NAO- + array.freq[p,1+l, 4] <- 100*(mean(fre3.NA)) # Blocking + array.freq[p,1+l, 2] <- 100*(mean(fre4.NA)) # Atl.Ridge + + array.diff.freq[p,1+l, 1] <- 100*(mean(fre1.NA) - mean(freObs1)) # NAO+ + array.diff.freq[p,1+l, 3] <- 100*(mean(fre2.NA) - mean(freObs2)) # NAO- + array.diff.freq[p,1+l, 4] <- 100*(mean(fre3.NA) - mean(freObs3)) # Blocking + array.diff.freq[p,1+l, 2] <- 100*(mean(fre4.NA) - mean(freObs4)) # Atl.Ridge + + array.pers[p,1+l, 1] <- persist1 # NAO+ + array.pers[p,1+l, 3] <- persist2 # NAO- + array.pers[p,1+l, 4] <- persist3 # Blocking + array.pers[p,1+l, 2] <- persist4 # Atl.Ridge + + array.diff.pers[p,1+l, 1] <- persist1 - persistObs1 # NAO+ + array.diff.pers[p,1+l, 3] <- persist2 - persistObs2 # NAO- + array.diff.pers[p,1+l, 4] <- persist3 - persistObs3 # Blocking + array.diff.pers[p,1+l, 2] <- persist4 - persistObs4 # Atl.Ridge + + #array.rpss[p,1+l, 1] <- # NAO+ + #array.rpss[p,1+l, 3] <- # NAO- + #array.rpss[p,1+l, 4] <- # Blocking + #array.rpss[p,1+l, 2] <- # Atl.Ridge + } + } + + + # Spatial Corr summary: + png(filename=paste0(workdir,"/",forecast.name,"_summary_sp_corr.png"), width=550, height=400) + + # create an array similar to array.cor but with colors instead of corr.values: + sp.corr.cols <- rev(c('#b2182b','#d6604d','#f4a582','#fddbc7','#d1e5f0','#92c5de','#4393c3','#2166ac')) + my.seq <- c(-1,0,0.4,0.5,0.6,0.7,0.8,0.9,1) + + array.sp.cor.colors <- array(sp.corr.cols[8],c(12,7,4)) + array.sp.cor.colors[array.sp.cor < my.seq[8]] <- sp.corr.cols[7] + array.sp.cor.colors[array.sp.cor < my.seq[7]] <- sp.corr.cols[6] + array.sp.cor.colors[array.sp.cor < my.seq[6]] <- sp.corr.cols[5] + array.sp.cor.colors[array.sp.cor < my.seq[5]] <- sp.corr.cols[4] + array.sp.cor.colors[array.sp.cor < my.seq[4]] <- sp.corr.cols[3] + array.sp.cor.colors[array.sp.cor < my.seq[3]] <- sp.corr.cols[2] + array.sp.cor.colors[array.sp.cor < my.seq[2]] <- sp.corr.cols[1] + + par(mar=c(5,4,4,4.5)) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Forecast time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + title("Spatial correlation between S4 and ERA-Interim frequencies", cex=1.5) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Forecast time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5) + + for(p in 1:12){ + for(l in 0:6){ + polygon(c(0.5+p-1,0.5+p-1,1+p-1),c(-0.5+l,0.5+l,0+l), col=array.sp.cor.colors[p,1+l,1]) # first triangle + polygon(c(0.5+p-1,1+p-1,1.5+p-1),c(-0.5+l,0+l,-0.5+l), col=array.sp.cor.colors[p,1+l,2]) + polygon(c(1+p-1,1.5+p-1,1.5+p-1),c(l,0.5+l,-0.5+l), col=array.sp.cor.colors[p,1+l,3]) + polygon(c(0.5+p-1,1+p-1,1.5+p-1),c(0.5+l,0+l,0.5+l), col=array.sp.cor.colors[p,1+l,4]) + } + } + + par(fig=c(0.875, 1, 0.14, 0.89), new=TRUE) + ColorBar2(brks = my.seq, cols = sp.corr.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + dev.off() + + + + # Corr summary: + png(filename=paste0(workdir,"/",forecast.name,"_summary_corr.png"), width=550, height=400) + + # create an array similar to array.cor but with colors instead of corr.values: + corr.cols <- rev(c('#b2182b','#d6604d','#f4a582','#fddbc7','#d1e5f0','#92c5de','#4393c3','#2166ac')) + + array.cor.colors <- array(corr.cols[8],c(12,7,4)) + array.cor.colors[array.cor < 0.75] <- corr.cols[7] + array.cor.colors[array.cor < 0.5] <- corr.cols[6] + array.cor.colors[array.cor < 0.25] <- corr.cols[5] + array.cor.colors[array.cor < 0] <- corr.cols[4] + array.cor.colors[array.cor < -0.25] <- corr.cols[3] + array.cor.colors[array.cor < -0.5] <- corr.cols[2] + array.cor.colors[array.cor < -0.75] <- corr.cols[1] + + par(mar=c(5,4,4,4.5)) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Forecast time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + title("Correlation between S4 and ERA-Interim frequencies", cex=1.5) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Forecast time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5) + + for(p in 1:12){ + for(l in 0:6){ + polygon(c(0.5+p-1,0.5+p-1,1+p-1),c(-0.5+l,0.5+l,0+l), col=array.cor.colors[p,1+l,1]) # first triangle + polygon(c(0.5+p-1,1+p-1,1.5+p-1),c(-0.5+l,0+l,-0.5+l), col=array.cor.colors[p,1+l,2]) + polygon(c(1+p-1,1.5+p-1,1.5+p-1),c(l,0.5+l,-0.5+l), col=array.cor.colors[p,1+l,3]) + polygon(c(0.5+p-1,1+p-1,1.5+p-1),c(0.5+l,0+l,0.5+l), col=array.cor.colors[p,1+l,4]) + } + } + + par(fig=c(0.875, 1, 0.14, 0.89), new=TRUE) + ColorBar2(brks = seq(-1,1,0.25), cols = corr.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(seq(-1,1,0.25)), my.labels=seq(-1,1,0.25)) + dev.off() + + + + # Freq. summary: + png(filename=paste0(workdir,"/",forecast.name,"_summary_freq.png"), width=550, height=400) + + # create an array similar to array.diff.freq but with colors instead of frequencies: + freq.cols <- rev(c('#d73027','#f46d43','#fdae61','#fee090','#e0f3f8','#abd9e9','#74add1','#4575b4')) + my.seq <- c(NA,20,22,24,26,28,30,32,NA) + + array.freq.colors <- array(freq.cols[8],c(12,7,4)) + array.freq.colors[array.freq < my.seq[[8]]] <- freq.cols[7] + array.freq.colors[array.freq < my.seq[[7]]] <- freq.cols[6] + array.freq.colors[array.freq < my.seq[[6]]] <- freq.cols[5] + array.freq.colors[array.freq < my.seq[[5]]] <- freq.cols[4] + array.freq.colors[array.freq < my.seq[[4]]] <- freq.cols[3] + array.freq.colors[array.freq < my.seq[[3]]] <- freq.cols[2] + array.freq.colors[array.freq < my.seq[[2]]] <- freq.cols[1] + + par(mar=c(5,4,4,4.5)) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Forecast time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + title("Mean S4 frequency (%)", cex=1.5) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Forecast time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5) + + for(p in 1:12){ + for(l in 0:6){ + polygon(c(0.5+p-1,0.5+p-1,1+p-1),c(-0.5+l,0.5+l,0+l), col=array.freq.colors[p,1+l,1]) # first triangle + polygon(c(0.5+p-1,1+p-1,1.5+p-1),c(-0.5+l,0+l,-0.5+l), col=array.freq.colors[p,1+l,2]) + polygon(c(1+p-1,1.5+p-1,1.5+p-1),c(l,0.5+l,-0.5+l), col=array.freq.colors[p,1+l,3]) + polygon(c(0.5+p-1,1+p-1,1.5+p-1),c(0.5+l,0+l,0.5+l), col=array.freq.colors[p,1+l,4]) + } + } + + par(fig=c(0.875, 1, 0.14, 0.89), new=TRUE) + ColorBar2(brks = my.seq, cols = freq.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + dev.off() + + + + # Delta freq. summary: + png(filename=paste0(workdir,"/",forecast.name,"_summary_diff_freq.png"), width=550, height=400) + + # create an array similar to array.diff.freq but with colors instead of frequencies: + diff.freq.cols <- rev(c('#d73027','#f46d43','#fdae61','#fee090','#e0f3f8','#abd9e9','#74add1','#4575b4')) + + array.diff.freq.colors <- array(diff.freq.cols[8],c(12,7,4)) + array.diff.freq.colors[array.diff.freq < 10] <- diff.freq.cols[7] + array.diff.freq.colors[array.diff.freq < 5] <- diff.freq.cols[6] + array.diff.freq.colors[array.diff.freq < 2] <- diff.freq.cols[5] + array.diff.freq.colors[array.diff.freq < 0] <- diff.freq.cols[4] + array.diff.freq.colors[array.diff.freq < -2] <- diff.freq.cols[3] + array.diff.freq.colors[array.diff.freq < -5] <- diff.freq.cols[2] + array.diff.freq.colors[array.diff.freq < -10] <- diff.freq.cols[1] + + par(mar=c(5,4,4,4.5)) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Forecast time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + title("Frequency difference (%) between S4 and ERA-Interim", cex=1.5) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Forecast time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5) + + for(p in 1:12){ + for(l in 0:6){ + polygon(c(0.5+p-1,0.5+p-1,1+p-1),c(-0.5+l,0.5+l,0+l), col=array.diff.freq.colors[p,1+l,1]) # first triangle + polygon(c(0.5+p-1,1+p-1,1.5+p-1),c(-0.5+l,0+l,-0.5+l), col=array.diff.freq.colors[p,1+l,2]) + polygon(c(1+p-1,1.5+p-1,1.5+p-1),c(l,0.5+l,-0.5+l), col=array.diff.freq.colors[p,1+l,3]) + polygon(c(0.5+p-1,1+p-1,1.5+p-1),c(0.5+l,0+l,0.5+l), col=array.diff.freq.colors[p,1+l,4]) + } + } + + par(fig=c(0.875, 1, 0.14, 0.89), new=TRUE) + ColorBar2(brks = c(-20,-10,-5,-2,0,2,5,10,20), cols = diff.freq.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(c(-20,-10,-5,-2,0,2,5,10,20)), my.labels=c(-20,-10,-5,-2,0,2,5,10,20)) + dev.off() + + + # Persistence summary: + png(filename=paste0(workdir,"/",forecast.name,"_summary_pers.png"), width=550, height=400) + + # create an array similar to array.pers but with colors instead of frequencies: + pers.cols <- rev(c('#b2182b','#d6604d','#f4a582','#fddbc7','#d1e5f0','#92c5de','#4393c3','#2166ac')) + my.seq <- c(NA, 3.25, 3.5, 3.75, 4, 4.25, 4.5, 4.75, NA) + + array.pers.colors <- array(pers.cols[8],c(12,7,4)) + array.pers.colors[array.pers < my.seq[8]] <- pers.cols[7] + array.pers.colors[array.pers < my.seq[7]] <- pers.cols[6] + array.pers.colors[array.pers < my.seq[6]] <- pers.cols[5] + array.pers.colors[array.pers < my.seq[5]] <- pers.cols[4] + array.pers.colors[array.pers < my.seq[4]] <- pers.cols[3] + array.pers.colors[array.pers < my.seq[3]] <- pers.cols[2] + array.pers.colors[array.pers < my.seq[2]] <- pers.cols[1] + + par(mar=c(5,4,4,4.5)) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Forecast time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + title("S4 Regime persistence (in days/month)", cex=1.5) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Forecast time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5) + + for(p in 1:12){ + for(l in 0:6){ + polygon(c(0.5+p-1,0.5+p-1,1+p-1),c(-0.5+l,0.5+l,0+l), col=array.pers.colors[p,1+l,1]) # first triangle + polygon(c(0.5+p-1,1+p-1,1.5+p-1),c(-0.5+l,0+l,-0.5+l), col=array.pers.colors[p,1+l,2]) + polygon(c(1+p-1,1.5+p-1,1.5+p-1),c(l,0.5+l,-0.5+l), col=array.pers.colors[p,1+l,3]) + polygon(c(0.5+p-1,1+p-1,1.5+p-1),c(0.5+l,0+l,0.5+l), col=array.pers.colors[p,1+l,4]) + } + } + + par(fig=c(0.875, 1, 0.14, 0.89), new=TRUE) + ColorBar2(brks = my.seq, cols = pers.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + dev.off() + + + + # Delta persistence summary: + png(filename=paste0(workdir,"/",forecast.name,"_summary_diff_pers.png"), width=550, height=400) + + # create an array similar to array.pers but with colors instead of frequencies: + diff.pers.cols <- rev(c('#b2182b','#d6604d','#f4a582','#fddbc7','#d1e5f0','#92c5de','#4393c3','#2166ac')) + my.seq <- c(NA, -1.5, -1, -0.5, 0, 0.5, 1, 1.5, NA) + + array.diff.pers.colors <- array(diff.pers.cols[8],c(12,7,4)) + array.diff.pers.colors[array.diff.pers < my.seq[8]] <- diff.pers.cols[7] + array.diff.pers.colors[array.diff.pers < my.seq[7]] <- diff.pers.cols[6] + array.diff.pers.colors[array.diff.pers < my.seq[6]] <- diff.pers.cols[5] + array.diff.pers.colors[array.diff.pers < my.seq[5]] <- diff.pers.cols[4] + array.diff.pers.colors[array.diff.pers < my.seq[4]] <- diff.pers.cols[3] + array.diff.pers.colors[array.diff.pers < my.seq[3]] <- diff.pers.cols[2] + array.diff.pers.colors[array.diff.pers < my.seq[2]] <- diff.pers.cols[1] + + par(mar=c(5,4,4,4.5)) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Forecast time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + title("Diff. between S4 and ERA-Interim regime persistence (in days/month)", cex=1.5) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Forecast time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5) + + for(p in 1:12){ + for(l in 0:6){ + polygon(c(0.5+p-1,0.5+p-1,1+p-1),c(-0.5+l,0.5+l,0+l), col=array.diff.pers.colors[p,1+l,1]) # first triangle + polygon(c(0.5+p-1,1+p-1,1.5+p-1),c(-0.5+l,0+l,-0.5+l), col=array.diff.pers.colors[p,1+l,2]) + polygon(c(1+p-1,1.5+p-1,1.5+p-1),c(l,0.5+l,-0.5+l), col=array.diff.pers.colors[p,1+l,3]) + polygon(c(0.5+p-1,1+p-1,1.5+p-1),c(0.5+l,0+l,0.5+l), col=array.diff.pers.colors[p,1+l,4]) + } + } + + par(fig=c(0.875, 1, 0.14, 0.89), new=TRUE) + ColorBar2(brks = my.seq, cols = diff.pers.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + dev.off() + + ## # FairRPSS summary: + ## png(filename=paste0(workdir,"/",forecast.name,"_summary_rpss.png"), width=550, height=400) + + ## # create an array similar to array.diff.freq but with colors instead of frequencies: + ## freq.cols <- rev(c('#b2182b','#d6604d','#f4a582','#fddbc7','#d1e5f0','#92c5de','#4393c3','#2166ac')) + + ## array.freq.colors <- array(freq.cols[8],c(12,7,4)) + ## array.freq.colors[array.diff.freq < 10] <- freq.cols[7] + ## array.freq.colors[array.diff.freq < 5] <- freq.cols[6] + ## array.freq.colors[array.diff.freq < 2] <- freq.cols[5] + ## array.freq.colors[array.diff.freq < 0] <- freq.cols[4] + ## array.freq.colors[array.diff.freq < -2] <- freq.cols[3] + ## array.freq.colors[array.diff.freq < -5] <- freq.cols[2] + ## array.freq.colors[array.diff.freq < -10] <- freq.cols[1] + + ## par(mar=c(5,4,4,4.5)) + ## plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Forecast time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + ## title("Frequency difference (%) between S4 and ERA-Interim", cex=1.5) + ## mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + ## mtext(side = 2, text = "Forecast time", line = 2.5, cex=1.5) + ## axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + ## axis(2, at=seq(0,6), las=2, cex.axis=1.5) + + ## for(p in 1:12){ + ## for(l in 0:6){ + ## polygon(c(0.5+p-1,0.5+p-1,1+p-1),c(-0.5+l,0.5+l,0+l), col=array.freq.colors[p,1+l,1]) # first triangle + ## polygon(c(0.5+p-1,1+p-1,1.5+p-1),c(-0.5+l,0+l,-0.5+l), col=array.freq.colors[p,1+l,2]) + ## polygon(c(1+p-1,1.5+p-1,1.5+p-1),c(l,0.5+l,-0.5+l), col=array.freq.colors[p,1+l,3]) + ## polygon(c(0.5+p-1,1+p-1,1.5+p-1),c(0.5+l,0+l,0.5+l), col=array.freq.colors[p,1+l,4]) + ## } + ## } + + ## par(fig=c(0.875, 1, 0.14, 0.89), new=TRUE) + ## ColorBar2(brks = c(-20,-10,-5,-2,0,2,5,10,20), cols = freq.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(c(-20,-10,-5,-2,0,2,5,10,20)), my.labels=c(-20,-10,-5,-2,0,2,5,10,20)) + ## dev.off() + +} + + + + + + + +# impact map of the stronger WR: +if(fields.name == rean.name && p == 100){ + + var.num <- 1 # choose a variable (1:sfcWind, 2:tas) + + png(filename=paste0(workdir,"/",rean.name,"_",var.name[var.num],"_most_influent.png"), width=500, height=600) + + plot.new() + #par(mfrow=c(4,3)) + col.regimes <- c("indianred1","cyan","khaki","lightpink") + + for(p in 13:16){ # or 1:12 for monthly maps + load(file=paste0(rean.dir,"/",rean.name,"_",var.name[var.num],"_",my.period[p],"_psl.RData")) + load(paste0(rean.dir,"/",rean.name,"_ClusterNames.RData")) # they should not depend on var.name!!! + + # position of long values of Europe only (without the Atlantic Sea and America): + #if(fields.name == "ERA-Interim") EU <- c(1:which(lon >= lon.max)[1], (length(lon)-30):length(lon)) # restrict area to continental europe only + lon.max = 45 + EU <- c(1:which(lon >= lon.max)[1], (which(lon >= 337)[1]:length(lon))) # restrict area to continental europe only + + + cluster1 <- which(orden == cluster1.name.period[p]) # in future it will be == cluster1.name + cluster2 <- which(orden == cluster2.name.period[p]) + cluster3 <- which(orden == cluster3.name.period[p]) + cluster4 <- which(orden == cluster4.name.period[p]) + + assign(paste0("imp",cluster1), varPeriodAnom1mean) + assign(paste0("imp",cluster2), varPeriodAnom2mean) + assign(paste0("imp",cluster3), varPeriodAnom3mean) + assign(paste0("imp",cluster4), varPeriodAnom4mean) + + imp.all <- imp1 + for(i in 1:dim(imp1)[1]){ + for(j in 1:dim(imp1)[2]){ + if(abs(imp1[i,j]) >= max(abs(imp1[i,j]), abs(imp2[i,j]), abs(imp3[i,j]), abs(imp4[i,j]))) imp.all[i,j] <- 1.5 + if(abs(imp2[i,j]) >= max(abs(imp1[i,j]), abs(imp2[i,j]), abs(imp3[i,j]), abs(imp4[i,j]))) imp.all[i,j] <- 2.5 + if(abs(imp3[i,j]) >= max(abs(imp1[i,j]), abs(imp2[i,j]), abs(imp3[i,j]), abs(imp4[i,j]))) imp.all[i,j] <- 3.5 + if(abs(imp4[i,j]) >= max(abs(imp1[i,j]), abs(imp2[i,j]), abs(imp3[i,j]), abs(imp4[i,j]))) imp.all[i,j] <- 4.5 + } + } + + if(p<=3) yMod <- 0.7 + if(p>=4 && p<=6) yMod <- 0.5 + if(p>=7 && p<=9) yMod <- 0.3 + if(p>=10) yMod <- 0.1 + if(p==13 || p==14) yMod <- 0.5 + if(p==15 || p==16) yMod <- 0.1 + + if(p==1 || p==4 || p==7 || p==10) xMod <- 0.05 + if(p==2 || p==5 || p==8 || p==11) xMod <- 0.35 + if(p==3 || p==6 || p==9 || p==12) xMod <- 0.65 + if(p==13 || p==15) xMod <- 0.05 + if(p==14 || p==16) xMod <- 0.50 + + # impact map: + if(p <= 12) par(fig=c(xMod, xMod + 0.3, yMod, yMod + 0.17), new=TRUE) + if(p > 12) par(fig=c(xMod, xMod + 0.45, yMod, yMod + 0.38), new=TRUE) + PlotEquiMap(imp.all[,EU], lon[EU], lat, filled.continents = FALSE, brks=1:5, cols=col.regimes, axelab=F, drawleg=F) + + # title map: + if(p <= 12) par(fig=c(xMod, xMod + 0.3, yMod + 0.162, yMod + 0.172), new=TRUE) + if(p > 12) par(fig=c(xMod + 0.07, xMod + 0.07 + 0.3, yMod +0.21 + 0.162, yMod +0.21 + 0.172), new=TRUE) + mtext(my.period[p], font=2, cex=.9) + + } # close 'p' on 'period' + + par(fig=c(0.1,0.9, 0, 0.12), new=TRUE) + ColorBar2(1:5, cols=col.regimes, vert=FALSE, my.ticks=1:4, draw.ticks=FALSE, my.labels=orden) + + par(fig=c(0.1,0.9, 0.9, 0.95), new=TRUE) + mtext(paste0("Most influent WR on ",var.name[var.num]), font=2, cex=1.5) + + dev.off() +} + + + + + + + + + + + + + + + + + + diff --git a/old/weather_regimes_maps_v13.R b/old/weather_regimes_maps_v13.R new file mode 100644 index 0000000000000000000000000000000000000000..510165f38478702f4e8130fcd49ab84970bcc34d --- /dev/null +++ b/old/weather_regimes_maps_v13.R @@ -0,0 +1,1212 @@ + +# Creation: 6/2016 +# Author: Nicola Cortesi +# +# Aim: to visualize the regime anomalies of the weather regimes, their interannual frequencies and their impact on a chosen cliamte variable (i.e: wind speed or temperature) +# +# I/O: the only input file needed is the output of the weather_regimes.R script, which ends with the suffix "_mapdata.RData". +# Output files are the maps, with the user can generate as .png or as .pdf +# +# Assumptions: If your regimes derive from a reanalysis, this script must be run twice: once, with the option 'ordering = F' and 'save.names = T' to create a .pdf or .png +# file that the user must open to find visually the order by which the four regimes are stored inside the four clusters. For example, he can find that the +# sequence of the images in the file (from top to down) corresponds to: Blocking / NAO- / Atl.Ridge / NAO+ regime. Subsequently, he has to change 'ordering' +# from F to T and set the four variables 'clusterX.name' (X=1..4) to follow the same order found inside that file. +# The second time, you have to run this script only to get the maps in the right order, which by default is, from top to down: +# "NAO+","NAO-","Blocking" and "Atl.Ridge" (you can change it with the variable 'orden'). You also have to set 'save.names' to TRUE, so an .RData file +# is written to store the order of the four regimes, in case in future a user needs to visualize these maps again. In this case, the user must set +# 'ordering = TRUE' and 'save.names = FALSE' to be able to visualize the maps in the correct order. +# +# If your regimes derive from a forecast system, you have to compute before the regimes derived from a reanalysis. Then, you can associate the reanalysis +# to the forecast system, to employ it to automatically aussociate to each simulated cluster one of the +# observed regimes, the one with the highest spatial correlation. Beware that the two maps of regime anomalies must have the same spatial resolution! +# +# Branch: weather_regimes + +rm(list=ls()) +library(s2dverification) # for the function Load() +library(Kendall) # for the MannKendall test + +source('/home/Earth/ncortesi/Downloads/scripts/Rfunctions.R') # for the calendar functions + +# working dir with the input files with '_psl.RData' suffix: +workdir <- "/scratch/Earth/ncortesi/RESILIENCE/Regimes/42_ECMWF_S4_monthly_1981-2015_LOESS_filter" + +rean.name <- "ERA-Interim" # reanalysis name (if input data comes from a reanalysis) +forecast.name <- "ECMWF-S4" # forecast system name (if input data comes from a forecast system) + +fields.name <- forecast.name # Choose between loading reanalysis ('rean.name') or forecasts ('forecast.name') + +var.num <- 1 # if fields.name ='rean.name', Choose a variable for the impact maps 1: sfcWind 2: tas + +period <- 1:12 # Choose one or more periods to plot from 1 to 17 (1-12: Jan - Dec, 13-16: Winter, Spring, Summer, Autumn, 17: Year). + # You need to have created before the '_mapdata.RData' file with the output of 'weather_regimes'.R for that period. +lead.months <- 0:6 # in case you selected 'fields.name = forecast.name', specify a leadtime (0 = same month of the start month) to plot + # (and variable 'period' becomes the start month) + +# run it twice: once, with 'ordering <- FALSE' and 'save.names <- TRUE' to look only at the cartography and find visually the right regime names of the four clusters; then, +# insert the regime names in the correct order below and run this script a the second time with 'ordering = TRUE' and 'save.names = TRUE', to save the ordered cartography. +# after that, any time you need to run this script, run it with 'ordering = TRUE and 'save.names = FALSE', not to overwrite the file which stores the cluster order: +ordering <- T # If TRUE, order the clusters following the regimes listed in the 'orden' vector (set it to TRUE only after you manually inserted the names below). +save.names <- F # if TRUE, also save the cluster names (i.e: the association between clusters and weather regimes); if FALSE, the cluster names are loaded from a file + +as.pdf <- F # FALSE: save results as one .png file for each season/month, TRUE: save only one .pdf file with all seasons/months + +composition <- "simple" # "psl" # choose which kind of composition you want to plot: + # - 'simple' for monthly graphs, + # - 'psl' for the regime anomalies for a fixed forecast month + # - 'fre' for the interannual frequencies for a fixed forecast month + # - 'summary' for the summary plots of all forecast months (persistence bias, frequency bias, correlations, etc.) + # - 'impact' for the impact plot of the regime with the highest impact + +forecast.month <- 11 # in case composition = 'psl' or 'fre', choose a forecast month from 1 to 12 to plot its composition + +# if fields.name='forecast.name', set the subdir of 'workdir' where the weather regimes computed with a reanalysis are stored: +rean.dir <- "/scratch/Earth/ncortesi/RESILIENCE/Regimes/41_ERA-Interim_monthly_1981-2015_LOESS_filter" + +# Associates the regimes to the four cluster: +cluster1.name <- "NAO+" +cluster2.name <- "NAO-" +cluster4.name <- "Blocking" +cluster3.name <- "Atl.Ridge" + +# choose an order to give to the cartograpy, from top to bottom: +orden <- c("NAO+","NAO-","Blocking","Atl.Ridge") + +################################################################################################################################################################### + +if(fields.name != rean.name && fields.name != forecast.name) stop("variable 'fields.name' is not properly set") +regime1.name <- orden[1] +regime2.name <- orden[2] +regime3.name <- orden[3] +regime4.name <- orden[4] + +var.name <- c("sfcWind","tas") +var.name.full <- c("Wind Speed","Temperature") # full name of the 'predictand' variable to put in the title of the graphs +var.unit <- c("m/s", "ºC") # unit of measure of var (for drawing color scales) +var.num.orig <- var.num # loading _psl files, this value can change! +fields.name.orig <- fields.name +period.orig <- period +workdir.orig <- workdir +pdf.suffix <- ifelse(length(period)==1, my.period[period], "all_periods") +n.map <- 0 + +if(composition != "summary" && composition != "impact"){ + +if(composition == "psl" || composition == "fre") { + if(as.pdf && fields.name == forecast.name) pdf(file=paste0(workdir,"/",fields.name,"_",pdf.suffix,"_forecast_month_",forecast.month,"_",composition,".pdf"),width=110,height=60) + if(!as.pdf && fields.name == forecast.name) png(filename=paste0(workdir,"/",fields.name,"_forecast_month_",forecast.month,"_",composition,".png"),width=6000,height=2300) + + lead.months <- c(6:0,0) + plot.new() + + # Sheet title: + par(fig=c(0, 1, 0.94, 0.98), new=TRUE) + if(composition == "psl") mtext(paste0(fields.name, " simulated Weather Regimes for ", month.name[forecast.month]), cex=5.5, font=2) + if(composition == "fre") mtext(paste0(fields.name, " simulated interannual frequencies for ", month.name[forecast.month]), cex=5.5, font=2) +} + + +for(lead.month in lead.months){ +#lead.month=lead.months[1] # for the debug + +if(composition == "simple"){ + if(as.pdf && fields.name == rean.name) pdf(file=paste0(workdir,"/",fields.name,"_",var.name[var.num],"_",pdf.suffix,".pdf"),width=40, height=60) + if(as.pdf && fields.name == forecast.name) pdf(file=paste0(workdir,"/",fields.name,"_",var.name[var.num],"_",pdf.suffix,"_leadmonth_",lead.month,".pdf"),width=40,height=60) +} + +if(composition == 'psl' || composition == 'fre') { + period <- forecast.month - lead.month + if(period < 1) period <- period + 12 +} + +for(p in period){ + #p <- period[1] # for the debug + p.orig <- p + + # load regime data (for forecasts, it load only the data corresponding to the chosen start month p and lead month 'lead.month':: + if(fields.name == rean.name || (fields.name == forecast.name && lead.month == 0 || n.map == 7)) { + load(file=paste0(rean.dir,"/",rean.name,"_",my.period[p],"_","psl",".RData")) + if(var.num != var.num.orig) var.num <- var.num.orig # ripristinate original values of this script (necessary after loading regime data) + if(fields.name != fields.name.orig) fields.name <- fields.name.orig # ripristinate original values of this script + if(workdir != workdir.orig) workdir <- workdir.orig # ripristinate original values of this script + + load(file=paste0(rean.dir,"/",rean.name,"_",my.period[p],"_",var.name[var.num],".RData")) + } + + if(fields.name == forecast.name && n.map < 7){ + load(file=paste0(workdir,"/",fields.name,"_",my.period[p],"_leadmonth_",lead.month,"_","psl",".RData")) # load cluster time series and mean psl data + #load(file=paste0(workdir,"/",fields.name,"_",my.period[p],"_leadmonth_",lead.month,"_",var.name[var.num],".RData")) # load mean var data + + if(var.num != var.num.orig) var.num <- var.num.orig # ripristinate original values of this script (necessary after loading regime data) + if(fields.name != fields.name.orig) fields.name <- fields.name.orig # ripristinate original values of this script + if(workdir != workdir.orig) workdir <- workdir.orig # ripristinate original values of this script + } + + if(var.num != var.num.orig) var.num <- var.num.orig # ripristinate original values of this script (necessary after loading regime data) + if(fields.name != fields.name.orig) fields.name <- fields.name.orig # ripristinate original values of this script + if(p != p.orig) p <- p.orig + + if(fields.name == forecast.name && n.map < 7){ + # compute cluster persistence: + my.cluster.vector <- as.vector(my.cluster.array[[p]]) # reduce it to a vector with the order: day1month1member1 , day2month1member1, ... , day1month2member1, day2month2member1, ,,, + + sequ1 <- sequ2 <- sequ3 <- sequ4 <- rep(0,10000) + i1 <- i2 <- i3 <- i4 <- 1 + + if(my.cluster.vector[1] != 1) i1 <- 0 + if(my.cluster.vector[1] == 1) sequ1[i1] <- sequ1[i1]+1 + if(my.cluster.vector[1] != 2) i2 <- 0 + if(my.cluster.vector[1] == 2) sequ2[i2] <- sequ2[i2]+1 + if(my.cluster.vector[1] != 3) i3 <- 0 + if(my.cluster.vector[1] == 3) sequ3[i3] <- sequ3[i3]+1 + if(my.cluster.vector[1] != 4) i4 <- 0 + if(my.cluster.vector[1] == 4) sequ4[i4] <- sequ4[i4]+1 + n.dd <- length(my.cluster.vector) + for(d in 1:(n.dd-1)){ + if(my.cluster.vector[d] != 1 && my.cluster.vector[d+1] == 1) i1 <- i1+1 + if(my.cluster.vector[d] == 1) sequ1[i1] <- sequ1[i1]+1 + + if(my.cluster.vector[d] != 2 && my.cluster.vector[d+1] == 2) i2 <- i2+1 + if(my.cluster.vector[d] == 2) sequ2[i2] <- sequ2[i2]+1 + + if(my.cluster.vector[d] != 3 && my.cluster.vector[d+1] == 3) i3 <- i3+1 + if(my.cluster.vector[d] == 3) sequ3[i3] <- sequ3[i3]+1 + + if(my.cluster.vector[d] != 4 && my.cluster.vector[d+1] == 4) i4 <- i4+1 + if(my.cluster.vector[d] == 4) sequ4[i4] <- sequ4[i4]+1 + } + + if(my.cluster.vector[n.dd] == 1) sequ1[i1] <- sequ1[i1]+1 + if(my.cluster.vector[n.dd] == 2) sequ2[i2] <- sequ2[i2]+1 + if(my.cluster.vector[n.dd] == 3) sequ3[i3] <- sequ3[i3]+1 + if(my.cluster.vector[n.dd] == 4) sequ4[i4] <- sequ4[i4]+1 + + pers1 <- mean(sequ1[which(sequ1 != 0)]) + pers2 <- mean(sequ2[which(sequ2 != 0)]) + pers3 <- mean(sequ3[which(sequ3 != 0)]) + pers4 <- mean(sequ4[which(sequ4 != 0)]) + + # compute correlations between simulated clusters and observed regime's anomalies: + cluster1.sim <- pslwr1mean; cluster2.sim <- pslwr2mean; cluster3.sim <- pslwr3mean; cluster4.sim <- pslwr4mean + lat.forecast <- lat; lon.forecast <- lon + + if((p + lead.month) > 12) { load.month <- p + lead.month - 12 } else { load.month <- p + lead.month } # reanalysis forecast month to load + load(file=paste0(rean.dir,"/",rean.name,"_",my.period[load.month],"_","psl",".RData")) # load reanalysis data, which overwrities the pslwrXmean variables + load(paste0(rean.dir,"/",rean.name,"_", my.period[load.month],"_","ClusterNames",".RData")) # load also reanalysis regime names + + if(var.num != var.num.orig) var.num <- var.num.orig # ripristinate original values of this script + if(fields.name != fields.name.orig) fields.name <- fields.name.orig # ripristinate original values of this script + if(!identical(period.orig,period)) period <- period.orig + if(workdir != workdir.orig) workdir <- workdir.orig # ripristinate original values of this script + if(p.orig != p) p <- p.orig + + wr1y.obs <- wr1y; wr2y.obs <- wr2y; wr3y.obs <- wr3y; wr4y.obs <- wr4y # observed frecuencies of the regimes + + regimes.obs <- c(cluster1.name, cluster2.name, cluster3.name, cluster4.name) + + if(length(unique(regimes.obs)) < 4) stop("Two or more names of the weather types in the reanalsyis input file are repeated!") + + if(identical(rev(lat),lat.forecast)) {lat.forecast <- rev(lat.forecast); print("Warning: forecast latitudes are in the opposite order than renalysis's")} + if(!identical(lat, lat.forecast)) stop("forecast latitudes are different from reanalysis latitudes!") + if(!identical(lon, lon.forecast)) stop("forecast longitude are different from reanalysis longitudes!") + + cluster.corr <- array(NA, c(4,4), dimnames=list(c("cluster1.sim","cluster2.sim","cluster3.sim","cluster4.sim"), regimes.obs)) + cluster.corr[1,1] <- cor(as.vector(cluster1.sim), as.vector(pslwr1mean)) + cluster.corr[1,2] <- cor(as.vector(cluster1.sim), as.vector(pslwr2mean)) + cluster.corr[1,3] <- cor(as.vector(cluster1.sim), as.vector(pslwr3mean)) + cluster.corr[1,4] <- cor(as.vector(cluster1.sim), as.vector(pslwr4mean)) + cluster.corr[2,1] <- cor(as.vector(cluster2.sim), as.vector(pslwr1mean)) + cluster.corr[2,2] <- cor(as.vector(cluster2.sim), as.vector(pslwr2mean)) + cluster.corr[2,3] <- cor(as.vector(cluster2.sim), as.vector(pslwr3mean)) + cluster.corr[2,4] <- cor(as.vector(cluster2.sim), as.vector(pslwr4mean)) + cluster.corr[3,1] <- cor(as.vector(cluster3.sim), as.vector(pslwr1mean)) + cluster.corr[3,2] <- cor(as.vector(cluster3.sim), as.vector(pslwr2mean)) + cluster.corr[3,3] <- cor(as.vector(cluster3.sim), as.vector(pslwr3mean)) + cluster.corr[3,4] <- cor(as.vector(cluster3.sim), as.vector(pslwr4mean)) + cluster.corr[4,1] <- cor(as.vector(cluster4.sim), as.vector(pslwr1mean)) + cluster.corr[4,2] <- cor(as.vector(cluster4.sim), as.vector(pslwr2mean)) + cluster.corr[4,3] <- cor(as.vector(cluster4.sim), as.vector(pslwr3mean)) + cluster.corr[4,4] <- cor(as.vector(cluster4.sim), as.vector(pslwr4mean)) + + # associate each simulated cluster to one observed regime: + max1 <- which(cluster.corr == max(cluster.corr), arr.ind=T) + cluster.corr2 <- cluster.corr + cluster.corr2[max1[1],] <- -999 # set this row to negative values so it won't be found as a maximum anymore + cluster.corr2[,max1[2]] <- -999 # set this column to negative values so it won't be found as a maximum anymore + max2 <- which(cluster.corr2 == max(cluster.corr2), arr.ind=T) + cluster.corr3 <- cluster.corr2 + cluster.corr3[max2[1],] <- -999 + cluster.corr3[,max2[2]] <- -999 + max3 <- which(cluster.corr3 == max(cluster.corr3), arr.ind=T) + cluster.corr4 <- cluster.corr3 + cluster.corr4[max3[1],] <- -999 + cluster.corr4[,max3[2]] <- -999 + max4 <- which(cluster.corr4 == max(cluster.corr4), arr.ind=T) + + seq.max1 <- c(max1[1],max2[1],max3[1],max4[1]) + seq.max2 <- c(max1[2],max2[2],max3[2],max4[2]) + + cluster1.regime <- seq.max2[which(seq.max1 == 1)] + cluster2.regime <- seq.max2[which(seq.max1 == 2)] + cluster3.regime <- seq.max2[which(seq.max1 == 3)] + cluster4.regime <- seq.max2[which(seq.max1 == 4)] + + cluster1.name <- regimes.obs[cluster1.regime] + cluster2.name <- regimes.obs[cluster2.regime] + cluster3.name <- regimes.obs[cluster3.regime] + cluster4.name <- regimes.obs[cluster4.regime] + + spat.cor1 <- cluster.corr[1,seq.max2[which(seq.max1 == 1)]] + spat.cor2 <- cluster.corr[2,seq.max2[which(seq.max1 == 2)]] + spat.cor3 <- cluster.corr[3,seq.max2[which(seq.max1 == 3)]] + spat.cor4 <- cluster.corr[4,seq.max2[which(seq.max1 == 4)]] + + # measure persistence for the reanalysis dataset: + my.cluster.vector <- my.cluster[[load.month]][[1]] + + sequ1 <- sequ2 <- sequ3 <- sequ4 <- rep(0,10000) + i1 <- i2 <- i3 <- i4 <- 1 + + if(my.cluster.vector[1] != 1) i1 <- 0 + if(my.cluster.vector[1] == 1) sequ1[i1] <- sequ1[i1]+1 + if(my.cluster.vector[1] != 2) i2 <- 0 + if(my.cluster.vector[1] == 2) sequ2[i2] <- sequ2[i2]+1 + if(my.cluster.vector[1] != 3) i3 <- 0 + if(my.cluster.vector[1] == 3) sequ3[i3] <- sequ3[i3]+1 + if(my.cluster.vector[1] != 4) i4 <- 0 + if(my.cluster.vector[1] == 4) sequ4[i4] <- sequ4[i4]+1 + + n.dd <- length(my.cluster.vector) + for(d in 1:(n.dd-1)){ + if(my.cluster.vector[d] != 1 && my.cluster.vector[d+1] == 1) i1 <- i1+1 + if(my.cluster.vector[d] == 1) sequ1[i1] <- sequ1[i1]+1 + + if(my.cluster.vector[d] != 2 && my.cluster.vector[d+1] == 2) i2 <- i2+1 + if(my.cluster.vector[d] == 2) sequ2[i2] <- sequ2[i2]+1 + + if(my.cluster.vector[d] != 3 && my.cluster.vector[d+1] == 3) i3 <- i3+1 + if(my.cluster.vector[d] == 3) sequ3[i3] <- sequ3[i3]+1 + + if(my.cluster.vector[d] != 4 && my.cluster.vector[d+1] == 4) i4 <- i4+1 + if(my.cluster.vector[d] == 4) sequ4[i4] <- sequ4[i4]+1 + } + + if(my.cluster.vector[n.dd] == 1) sequ1[i1] <- sequ1[i1]+1 + if(my.cluster.vector[n.dd] == 2) sequ2[i2] <- sequ2[i2]+1 + if(my.cluster.vector[n.dd] == 3) sequ3[i3] <- sequ3[i3]+1 + if(my.cluster.vector[n.dd] == 4) sequ4[i4] <- sequ4[i4]+1 + + persObs1 <- mean(sequ1[which(sequ1 != 0)]) + persObs2 <- mean(sequ2[which(sequ2 != 0)]) + persObs3 <- mean(sequ3[which(sequ3 != 0)]) + persObs4 <- mean(sequ4[which(sequ4 != 0)]) + + # insert here all the manual corrections to the regime assignation due to misasignment in the automatic selection: + # forecast month: September + if(p == 9 && lead.month == 0) { cluster1.name <- "Blocking"; cluster2.name <- "Atl.Ridge"; cluster3.name <- "NAO-"; cluster4.name <- "NAO+"} + if(p == 8 && lead.month == 1) { cluster1.name <- "NAO+"; cluster2.name <- "NAO-"; cluster3.name <- "Blocking"; cluster4.name <- "Atl.Ridge"} + if(p == 6 && lead.month == 3) { cluster1.name <- "Atl.Ridge"; cluster2.name <- "NAO-"} + if(p == 4 && lead.month == 5) { cluster1.name <- "Blocking"; cluster2.name <- "NAO+"; cluster3.name <- "Atl.Ridge"; cluster4.name <- "NAO-"} + + # forecast month: October + if(p == 4 && lead.month == 6) { cluster1.name <- "Atl.Ridge"; cluster2.name <- "Blocking"; cluster3.name <- "NAO+"; cluster4.name <- "NAO-"} + if(p == 5 && lead.month == 5) { cluster1.name <- "NAO+"; cluster2.name <- "Blocking"; cluster3.name <- "NAO-"; cluster4.name <- "Atl.Ridge"} + if(p == 7 && lead.month == 3) { cluster1.name <- "NAO-"; cluster2.name <- "Atl.Ridge"; cluster3.name <- "Blocking"; cluster4.name <- "NAO+"} + if(p == 8 && lead.month == 2) { cluster1.name <- "Blocking"; cluster2.name <- "NAO-"; cluster3.name <- "Atl.Ridge"; cluster4.name <- "NAO+"} + if(p == 9 && lead.month == 1) { cluster1.name <- "NAO-"; cluster2.name <- "Blocking"; cluster3.name <- "Atl.Ridge"; cluster4.name <- "NAO+"} + + # forecast month: November + if(p == 6 && lead.month == 5) { cluster2.name <- "Atl.Ridge"; cluster3.name <- "NAO+"; cluster4.name <- "Blocking"} + if(p == 7 && lead.month == 4) { cluster1.name <- "NAO+"; cluster2.name <- "Blocking"; cluster4.name <- "Atl.Ridge"} + if(p == 8 && lead.month == 3) { cluster3.name <- "Blocking"; cluster4.name <- "Atl.Ridge"} + if(p == 9 && lead.month == 2) { cluster2.name <- "Atl.Ridge"; cluster3.name <- "NAO+"; cluster4.name <- "Blocking"} + if(p == 10 && lead.month == 1) { cluster2.name <- "Blocking"; cluster4.name <- "Atl.Ridge"} + + load(file=paste0(workdir,"/",fields.name,"_",my.period[p],"_leadmonth_",lead.month,"_","psl",".RData")) # reload forecast cluster time series and mean psl data + #load(file=paste0(workdir,"/",fields.name,"_",my.period[p],"_leadmonth_",lead.month,"_ClusterData.RData")) # reload forecast cluster data (for my.cluster.array) + + if(var.num != var.num.orig) var.num <- var.num.orig # ripristinate original values of this script + if(fields.name != fields.name.orig) fields.name <- fields.name.orig # ripristinate original values of this script + if(!identical(period.orig, period)) period <- period.orig + if(p.orig != p) p <- p.orig + if(identical(rev(lat),lat.forecast)) lat <- rev(lat) # ripristinate right lat order + if(workdir != workdir.orig) workdir <- workdir.orig # ripristinate original values of this script + + } # close for on 'forecast.name' + + if(fields.name == rean.name || (fields.name == forecast.name && n.map == 7)){ + if(save.names){ + save(cluster1.name, cluster2.name, cluster3.name, cluster4.name, file=paste0(workdir,"/",fields.name,"_", my.period[p],"_","ClusterNames",".RData")) + + } else { # in this case, we load the cluster names from the file already saved, if regimes come from a reanalysis: + load(paste0(rean.dir,"/",rean.name,"_", my.period[p],"_","ClusterNames",".RData")) + + if(var.num != var.num.orig) var.num <- var.num.orig # ripristinate original values of this script + if(fields.name != fields.name.orig) fields.name <- fields.name.orig # ripristinate original values of this script + } + } + + # assign to each cluster its regime: + if(ordering == FALSE && (fields.name == rean.name || (fields.name == forecast.name && n.map == 7))){ + cluster1 <- 1; cluster2 <- 2; cluster3 <- 3; cluster4 <- 4 + } else { + cluster1 <- which(orden == cluster1.name) + cluster2 <- which(orden == cluster2.name) + cluster3 <- which(orden == cluster3.name) + cluster4 <- which(orden == cluster4.name) + } + + assign(paste0("map",cluster1), pslwr1mean) + assign(paste0("map",cluster2), pslwr2mean) + assign(paste0("map",cluster3), pslwr3mean) + assign(paste0("map",cluster4), pslwr4mean) + + assign(paste0("fre",cluster1), wr1y) + assign(paste0("fre",cluster2), wr2y) + assign(paste0("fre",cluster3), wr3y) + assign(paste0("fre",cluster4), wr4y) + + if(p == 100){ + assign(paste0("imp",cluster1), varwr1mean) + assign(paste0("imp",cluster2), varwr2mean) + assign(paste0("imp",cluster3), varwr3mean) + assign(paste0("imp",cluster4), varwr4mean) + + assign(paste0("sig",cluster1), pvalue1) + assign(paste0("sig",cluster2), pvalue2) + assign(paste0("sig",cluster3), pvalue3) + assign(paste0("sig",cluster4), pvalue4) + } + + if(fields.name == forecast.name && n.map < 7){ + assign(paste0("freMax",cluster1), wr1yMax) + assign(paste0("freMax",cluster2), wr2yMax) + assign(paste0("freMax",cluster3), wr3yMax) + assign(paste0("freMax",cluster4), wr4yMax) + + assign(paste0("freMin",cluster1), wr1yMin) + assign(paste0("freMin",cluster2), wr2yMin) + assign(paste0("freMin",cluster3), wr3yMin) + assign(paste0("freMin",cluster4), wr4yMin) + + assign(paste0("spatial.cor",cluster1), spat.cor1) + assign(paste0("spatial.cor",cluster2), spat.cor2) + assign(paste0("spatial.cor",cluster3), spat.cor3) + assign(paste0("spatial.cor",cluster4), spat.cor3) + + assign(paste0("persist",cluster1), pers1) + assign(paste0("persist",cluster2), pers2) + assign(paste0("persist",cluster3), pers3) + assign(paste0("persist",cluster4), pers4) + + cluster1.obs <- which(orden == regimes.obs[1]) + cluster2.obs <- which(orden == regimes.obs[2]) + cluster3.obs <- which(orden == regimes.obs[3]) + cluster4.obs <- which(orden == regimes.obs[4]) + + assign(paste0("freObs",cluster1.obs), wr1y.obs) + assign(paste0("freObs",cluster2.obs), wr2y.obs) + assign(paste0("freObs",cluster3.obs), wr3y.obs) + assign(paste0("freObs",cluster4.obs), wr4y.obs) + + assign(paste0("persistObs",cluster1.obs), persObs1) + assign(paste0("persistObs",cluster2.obs), persObs2) + assign(paste0("persistObs",cluster3.obs), persObs3) + assign(paste0("persistObs",cluster4.obs), persObs4) + + } + + # position of long values of Europe only (without the Atlantic Sea and America): + #if(fields.name == "ERA-Interim") EU <- c(1:which(lon >= lon.max)[1], (length(lon)-30):length(lon)) # restrict area to continental europe only + EU <- c(1:which(lon >= lon.max)[1], (which(lon >= 337)[1]:length(lon))) # restrict area to continental europe only + + # breaks and colors of the geopotential fields: + if(psl == "g500"){ + #my.brks <- c(-300,-199:200,300) # % Mean anomaly of a WR + my.brks <- c(-300,seq(-200,200,10),300) # % Mean anomaly of a WR + my.brks2 <- c(-300,seq(-200,200,10),300) # % Mean anomaly of a WR for the contour lines + #my.cols <- colorRampPalette((brewer.pal(9,"PuBu")))(length(my.brks)-1) + values.to.plot <- c(-200, -100, 0, 100, 200) # values we want to show in the labels + } + + if(psl == "psl"){ + my.brks <- c(seq(-19,-1,2),0,seq(1,19,2)) # % Mean anomaly of a WR + # my.brks <- c(seq(-19,-1,2),0,seq(1,19,2)) # % Mean anomaly of a WR + + my.brks2 <- my.brks #c(seq(-20,20,2)) # % Mean anomaly of a WR for the contour lines + values.to.plot <- my.brks #c(-17,-15,-13,-11,-9,-7,-5,-3,-1,0,1,3,5,7,9,11,13,15,17) + } + + my.cols <- colorRampPalette(rev(brewer.pal(11,"RdBu")))(length(my.brks)-1) # blue--white--red colors + my.cols[floor(length(my.cols)/2)] <- my.cols[floor(length(my.cols)/2)+1] <- "white" + + # breaks and colors of the impact maps (valid both for Wind speed anomalies and Temperature anomalies): + #my.brks.var <- c(-20,seq(-10,-3,1),seq(-2.9,2.9,0.1),seq(3,10,1),20) # % Mean anomaly of a WR + #my.brks.var <- c(-20,-2,-1.5,-1,-0.5,-0.2,0.2,0.5,1,1.5,2,20) # % Mean anomaly of a WR + #my.brks.var <- c(-20,seq(-3,3,0.5),20) # % Mean anomaly of a WR + if(var.name[var.num] == "sfcWind") { + my.brks.var <- seq(-3,3,0.5) + values.to.plot2 <- c(-3,-2,-1,0,1,2,3) + } + if(var.name[var.num] == "tas") { + my.brks.var <- seq(-5,5,1) + values.to.plot2 <- my.brks.var #c(-5,-3,-1,0,1,3,5) + } + + #my.cols <- colorRampPalette(as.character(read.csv("/home/Earth/vtorralb/rgbhex.csv",header=F)[,1]))(length(my.brks)-1) # blue--yellow-red colors + my.cols.var <- colorRampPalette(rev(brewer.pal(11,"RdBu")))(length(my.brks.var)-1) # blue--white--red colors + #my.cols.var[floor(length(my.cols.var)/2)] <- my.cols.var[floor(length(my.cols.var)/2)+1] <- "white" + + if(fields.name == forecast.name){ + pos.years.present <- match(year.start:year.end, years) + years.missing <- which(is.na(pos.years.present)) + fre1.NA <- fre2.NA <- fre3.NA <- fre4.NA <- freMax1.NA <- freMax2.NA <- freMax3.NA <- freMax4.NA <- freMin1.NA <- freMin2.NA <- freMin3.NA <- freMin4.NA <- c() + freq.max=100 + + k <- 0 + for(y in year.start:year.end) { + if(y %in% (years.missing + year.start - 1)) { + fre1.NA <- c(fre1.NA,NA); fre2.NA <- c(fre2.NA,NA); fre3.NA <- c(fre3.NA,NA); fre4.NA <- c(fre4.NA,NA) + freMax1.NA <- c(freMax1.NA,NA); freMax2.NA <- c(freMax2.NA,NA); freMax3.NA <- c(freMax3.NA,NA); freMax4.NA <- c(freMax4.NA,NA) + freMin1.NA <- c(freMin1.NA,NA); freMin2.NA <- c(freMin2.NA,NA); freMin3.NA <- c(freMin3.NA,NA); freMin4.NA <- c(freMin4.NA,NA) + k=k+1 + } else { + fre1.NA <- c(fre1.NA,fre1[y - year.start + 1 - k]); fre2.NA <- c(fre2.NA,fre2[y - year.start + 1 - k]) + fre3.NA <- c(fre3.NA,fre3[y - year.start + 1 - k]); fre4.NA <- c(fre4.NA,fre4[y - year.start + 1 - k]) + freMax1.NA <- c(freMax1.NA,freMax1[y - year.start + 1 - k]); freMax2.NA <- c(freMax2.NA,freMax2[y - year.start + 1 - k]) + freMax3.NA <- c(freMax3.NA,freMax3[y - year.start + 1 - k]); freMax4.NA <- c(freMax4.NA,freMax4[y - year.start + 1 - k]) + freMin1.NA <- c(freMin1.NA,freMin1[y - year.start + 1 - k]); freMin2.NA <- c(freMin2.NA,freMin2[y - year.start + 1 - k]) + freMin3.NA <- c(freMin3.NA,freMin3[y - year.start + 1 - k]); freMin4.NA <- c(freMin4.NA,freMin4[y - year.start + 1 - k]) + } + } + } else { fre1.NA <- fre1; fre2.NA <- fre2; fre3.NA <- fre3; fre4.NA <- fre4 } + + + # Visualize the composition with the 3 graphs for all the 4 regimes: its pressure fields, its impact on var and its frequency series: + if(composition == "simple"){ + if(!as.pdf && fields.name==rean.name) png(filename=paste0(workdir,"/",fields.name,"_",my.period[p],"_",var.name[var.num],".png"),width=3000,height=3700) + if(!as.pdf && fields.name==forecast.name) png(filename=paste0(workdir,"/",fields.name,"_",my.period[p],"_leadmonth_",lead.month,"_",var.name[var.num],".png"),width=3000,height=3700) + + plot.new() + + # Sheet title: + par(fig=c(0, 1, 0.94, 0.98), new=TRUE) + if(fields.name == forecast.name) {forecast.title <- paste0(" startdate and ",lead.month," forecast month ")} else {forecast.title <- ""} + mtext(paste0(fields.name, " Weather Regimes for ", my.period[p], forecast.title," (",year.start,"-",year.end,")"), cex=5.5, font=2) + + # Centroid maps: + map.xpos <- 0 + map.width <- 0.46 + par(fig=c(map.xpos + 0.003, map.xpos + map.width - 0.003, 0.745, 0.925), new=TRUE) + PlotEquiMap2(rescale(map1,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map1), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, xlabel.dist=1.5, contours.lty="F1FF1F") + par(fig=c(map.xpos, map.xpos + map.width, 0.51, 0.69), new=TRUE) + PlotEquiMap2(rescale(map2,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map2), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F") + par(fig=c(map.xpos, map.xpos + map.width, 0.275, 0.455), new=TRUE) + PlotEquiMap2(rescale(map3,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map3), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F") + par(fig=c(map.xpos, map.xpos + map.width, 0.04, 0.22), new=TRUE) + PlotEquiMap2(rescale(map4,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map4), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F") + + # Subtitle centroid maps: + map.title.xpos <- 0.76 + map.title.width <- 0.2 + par(fig=c(map.title.xpos, map.title.xpos + map.title.width, 0.728, 0.729), new=TRUE) + mtext("year", cex=3) + par(fig=c(map.title.xpos, map.title.xpos + map.title.width, 0.494, 0.495), new=TRUE) + mtext("year", cex=3) + par(fig=c(map.title.xpos, map.title.xpos + map.title.width, 0.260, 0.261), new=TRUE) + mtext("year", cex=3) + par(fig=c(map.title.xpos, map.title.xpos + map.title.width, 0.026, 0.027), new=TRUE) + mtext("year", cex=3) + + # Title Centroid Maps: + title1.xpos <- 0.02 + title1.width <- 0.4 + par(fig=c(title1.xpos, title1.xpos + title1.width, 0.9305, 0.9355), new=TRUE) + mtext(paste0(regime1.name,": ", psl.name, " Anomaly"), font=2, cex=4) + par(fig=c(title1.xpos, title1.xpos + title1.width, 0.6955, 0.7005), new=TRUE) + mtext(paste0(regime2.name,": ", psl.name, " Anomaly"), font=2, cex=4) + par(fig=c(title1.xpos, title1.xpos + title1.width, 0.4605, 0.4655), new=TRUE) + mtext(paste0(regime3.name,": ", psl.name, " Anomaly"), font=2, cex=4) + par(fig=c(title1.xpos, title1.xpos + title1.width, 0.2255, 0.2305), new=TRUE) + mtext(paste0(regime4.name,": ", psl.name, " Anomaly"), font=2, cex=4) + + # Impact maps: + if(p == 100){ + impact.xpos <- 0.47 + impact.width <- 0.26 + par(fig=c(impact.xpos, impact.xpos + impact.width, 0.745, 0.925), new=TRUE) + PlotEquiMap2(rescale(imp1[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=t(sig1[,EU] < 0.05), cex.lab=1.5) + par(fig=c(impact.xpos, impact.xpos + impact.width, 0.51, 0.69), new=TRUE) + PlotEquiMap2(rescale(imp2[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=t(sig2[,EU] < 0.05), cex.lab=1.5) + par(fig=c(impact.xpos, impact.xpos + impact.width, 0.275, 0.455), new=TRUE) + PlotEquiMap2(rescale(imp3[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=t(sig3[,EU] < 0.05), cex.lab=1.5) + par(fig=c(impact.xpos, impact.xpos + impact.width, 0.04, 0.22), new=TRUE) + PlotEquiMap2(rescale(imp4[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=t(sig4[,EU] < 0.05), cex.lab=1.5) + + # Title impact maps: + title2.xpos <- 0.42 + title2.width <- 0.35 + par(fig=c(title2.xpos, title2.xpos + title2.width, 0.935, 0.940), new=TRUE) + mtext(paste0(regime1.name, ": Impact on ", var.name.full[var.num]), font=2, cex=4) + par(fig=c(title2.xpos, title2.xpos + title2.width, 0.700, 0.705), new=TRUE) + mtext(paste0(regime2.name, ": Impact on ", var.name.full[var.num]), font=2, cex=4) + par(fig=c(title2.xpos, title2.xpos + title2.width, 0.465, 0.470), new=TRUE) + mtext(paste0(regime3.name, ": Impact on ", var.name.full[var.num]), font=2, cex=4) + par(fig=c(title2.xpos, title2.xpos + title2.width, 0.230, 0.235), new=TRUE) + mtext(paste0(regime4.name, ": Impact on ", var.name.full[var.num]), font=2, cex=4) + } + + # Frequency plots: + barplot.xpos <- 0.75 + barplot.width <- 0.25 + + par(fig=c(barplot.xpos, barplot.xpos + barplot.width, 0.745, 0.915), new=TRUE) + if(fields.name == rean.name) barplot.freq(100*fre1.NA, year.start, year.end, cex.y=2, cex.x=2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0)) + if(fields.name == forecast.name) barplot.freq.sim2(100*fre1.NA, 100*freMax1.NA, 100*freMin1.NA, 100*freObs1, year.start, year.end, cex.y=2, cex.x=2, freq.min=-2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0), col.line="gray20", cex.r=3, cex.mean=2, cex.obs=2) + + par(fig=c(barplot.xpos, barplot.xpos + barplot.width,0.510,0.680), new=TRUE) + if(fields.name == rean.name) barplot.freq(100*fre2.NA, year.start, year.end, cex.y=2, cex.x=2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0)) + if(fields.name == forecast.name) barplot.freq.sim2(100*fre2.NA, 100*freMax2.NA, 100*freMin2.NA, 100*freObs2, year.start, year.end, cex.y=2, cex.x=2, freq.min=-2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0), col.line="gray20", cex.r=3, cex.mean=2, cex.obs=2) + + par(fig=c(barplot.xpos, barplot.xpos + barplot.width,0.275,0.445), new=TRUE) + if(fields.name == rean.name) barplot.freq(100*fre3.NA, year.start, year.end, cex.y=2, cex.x=2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0)) + if(fields.name == forecast.name) barplot.freq.sim2(100*fre3.NA, 100*freMax3.NA, 100*freMin3.NA, 100*freObs3, year.start, year.end, cex.y=2, cex.x=2, freq.min=-2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0), col.line="gray20", cex.r=3, cex.mean=2, cex.obs=2) + + par(fig=c(barplot.xpos, barplot.xpos + barplot.width,0.040,0.210), new=TRUE) + if(fields.name == rean.name) barplot.freq(100*fre4.NA, year.start, year.end, cex.y=2, cex.x=2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0)) + if(fields.name == forecast.name) barplot.freq.sim2(100*fre4.NA, 100*freMax4.NA, 100*freMin4.NA, 100*freObs4, year.start, year.end, cex.y=2, cex.x=2, freq.min=-2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0), col.line="gray20", cex.r=3, cex.mean=2, cex.obs=2) + + # Title Frequency maps: + title3.xpos <- 0.75 + title3.width <-0.25 + par(fig=c(title3.xpos, title3.xpos + title3.width, 0.935, 0.940), new=TRUE) + mtext(paste0(regime1.name, " Frequency"), font=2, cex=4) # paste0 doesn't work inside an expression! + par(fig=c(title3.xpos, title3.xpos + title3.width, 0.700, 0.705), new=TRUE) + mtext(paste0(regime2.name, " Frequency"), font=2, cex=4) + par(fig=c(title3.xpos, title3.xpos + title3.width, 0.465, 0.470), new=TRUE) + mtext(paste0(regime3.name, " Frequency"), font=2, cex=4) + par(fig=c(title3.xpos, title3.xpos + title3.width, 0.230, 0.235), new=TRUE) + mtext(paste0(regime4.name, " Frequency"), font=2, cex=4) + + # % on Frequency plots: + symbol.xpos <- 0.735 + symbol.width <- 0.01 + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.82, 0.83), new=TRUE) + mtext("%", cex=3.3) + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.59, 0.60), new=TRUE) + mtext("%", cex=3.3) + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.35, 0.36), new=TRUE) + mtext("%", cex=3.3) + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.12, 0.13), new=TRUE) + mtext("%", cex=3.3) + + # mean frequency on Frequency plots: + symbol.xpos <- 0.9 + symbol.width <- 0.1 + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.908, 0.918), new=TRUE) + mtext(bquote(bar(nu) == .(paste0(round(mean(100*fre1.NA),1),"%"))),cex=4.5) + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.673, 0.683), new=TRUE) + mtext(bquote(bar(nu) == .(paste0(round(mean(100*fre2.NA),1),"%"))),cex=4.5) + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.44, 0.45), new=TRUE) + mtext(bquote(bar(nu) == .(paste0(round(mean(100*fre3.NA),1),"%"))),cex=4.5) + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.203, 0.213), new=TRUE) + mtext(bquote(bar(nu) == .(paste0(round(mean(100*fre4.NA),1),"%"))),cex=4.5) + + # add corr between obs.time series and ensemble mean time series on Frequency plots: + if(fields.name == forecast.name){ + symbol.xpos <- 0.76 + symbol.width <- 0.1 + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.908, 0.918), new=TRUE) + sp.cor1 <- round(cor(fre1.NA,freObs1, use="complete.obs"),2) + mtext(paste0("r= ",sp.cor1), cex=4.5) + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.673, 0.683), new=TRUE) + sp.cor2 <- round(cor(fre2.NA,freObs2, use="complete.obs"),2) + mtext(paste0("r= ",sp.cor2), cex=4.5) + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.44, 0.45), new=TRUE) + sp.cor3 <- round(cor(fre3.NA,freObs3, use="complete.obs"),2) + mtext(paste0("r= ",sp.cor3), cex=4.5) + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.203, 0.213), new=TRUE) + sp.cor4 <- round(cor(fre4.NA,freObs4, use="complete.obs"),2) + mtext(paste0("r= ",sp.cor4), cex=4.5) + } + + # Legend 1: + legend1.xpos <- 0.10 + legend1.width <- 0.80 + legend1.cex <- 2 + my.subset <- match(values.to.plot, my.brks) + par(fig=c(legend1.xpos, legend1.xpos + legend1.width, 0.719, 0.744), new=TRUE) # syntax fig=c(xmin, xmax, ymin, ymax) + ColorBar3(my.brks, cols=my.cols, vert=FALSE, cex=legend1.cex, subset=my.subset) + if(psl=="g500") {mtext(side=4," m", cex=2.5)} else {mtext(side=4," hPa", cex=legend1.cex)} + par(fig=c(legend1.xpos, legend1.xpos + legend1.width, 0.485, 0.508), new=TRUE) + ColorBar3(my.brks, cols=my.cols, vert=FALSE, cex=legend1.cex, subset=my.subset) + if(psl=="g500") {mtext(side=4," m", cex=2.5)} else {mtext(side=4," hPa", cex=legend1.cex)} + par(fig=c(legend1.xpos, legend1.xpos + legend1.width, 0.250, 0.273), new=TRUE) + ColorBar3(my.brks, cols=my.cols, vert=FALSE, cex=legend1.cex, subset=my.subset) + if(psl=="g500") {mtext(side=4," m", cex=2.5)} else {mtext(side=4," hPa", cex=legend1.cex)} + par(fig=c(legend1.xpos, legend1.xpos + legend1.width, 0.015, 0.038), new=TRUE) + ColorBar3(my.brks, cols=my.cols, vert=FALSE, cex=legend1.cex, subset=my.subset) + if(psl=="g500") {mtext(side=4," m", cex=2.5)} else {mtext(side=4," hPa", cex=legend1.cex)} + + # Legend 2: + if(fields.name == rean.name){ + legend2.xpos <- 0.48 + legend2.width <- 0.25 + legend2.cex <- 1.8 + my.subset2 <- match(values.to.plot2, my.brks.var) + par(fig=c(legend2.xpos, legend2.xpos + legend2.width, 0.719 + 0.001, 0.744), new=TRUE) + ColorBar3(my.brks.var, cols=my.cols.var, vert=FALSE, cex=legend2.cex, subset=my.subset2) + mtext(side=4,paste0(" ",var.unit[var.num]), cex=legend2.cex) + par(fig=c(legend2.xpos, legend2.xpos + legend2.width, 0.485, 0.508), new=TRUE) + ColorBar3(my.brks.var, cols=my.cols.var, vert=FALSE, cex=legend2.cex, subset=my.subset2) + mtext(side=4,paste0(" ",var.unit[var.num]), cex=legend2.cex) + par(fig=c(legend2.xpos, legend2.xpos + legend2.width, 0.250, 0.273), new=TRUE) + ColorBar3(my.brks.var, cols=my.cols.var, vert=FALSE, cex=legend2.cex, subset=my.subset2) + mtext(side=4,paste0(" ",var.unit[var.num]), cex=legend2.cex) + par(fig=c(legend2.xpos, legend2.xpos + legend2.width, 0.015, 0.038), new=TRUE) + ColorBar3(my.brks.var, cols=my.cols.var, vert=FALSE, cex=legend2.cex, subset=my.subset2) + mtext(side=4,paste0(" ",var.unit[var.num]), cex=legend2.cex) + } + + if(!as.pdf) dev.off() # for saving 4 png + + } # close if on: composition == 'simple' + + + # Visualize the composition with the regime anomalies for a selected forecasted month: + if(composition == "psl"){ + # Centroid maps: + n.map <- n.map + 1 # n.maps starts from 0 + map.width <- 0.12 + map.xpos <- 0.04 + map.width * (n.map - 1) + map.ypos <- 0.08 + map.height <- 0.19 + map.ysep <- 0.015 + + par(fig=c(map.xpos, map.xpos + map.width, map.ypos + 3*map.height + 3*map.ysep, map.ypos + 4*map.height + 3*map.ysep), new=TRUE) + PlotEquiMap2(rescale(map1,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map1), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, xlabel.dist=1.5, contours.lty="F1FF1F") + par(fig=c(map.xpos, map.xpos + map.width, map.ypos + 2*map.height + 2*map.ysep, map.ypos + 3*map.height + 2*map.ysep), new=TRUE) + PlotEquiMap2(rescale(map2,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map2), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F") + par(fig=c(map.xpos, map.xpos + map.width, map.ypos + map.height + map.ysep, map.ypos + 2*map.height + map.ysep), new=TRUE) + PlotEquiMap2(rescale(map3,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map3), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F") + par(fig=c(map.xpos, map.xpos + map.width, map.ypos, map.ypos + map.height), new=TRUE) + PlotEquiMap2(rescale(map4,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map4), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F") + + # Sheet subtitles: + par(fig=c(map.xpos, map.xpos + map.width, 0.86, 0.90), new=TRUE) + if(n.map < 8) { + mtext(paste0(my.month.short[p], " --> ", my.month.short[forecast.month]), cex=5.5, font=2) + } else{ + mtext(paste0(rean.name," ", my.month.short[forecast.month]), cex=5.5, font=2) + } + + # Regime's names: + title1.xpos <- 0 + title1.width <- 0.04 + par(fig=c(title1.xpos, title1.xpos + title1.width, 0.7905, 0.7955), new=TRUE) + mtext(paste0(regime1.name), font=2, cex=4) + par(fig=c(title1.xpos, title1.xpos + title1.width, 0.5855, 0.5905), new=TRUE) + mtext(paste0(regime2.name), font=2, cex=4) + par(fig=c(title1.xpos, title1.xpos + title1.width, 0.3805, 0.3955), new=TRUE) + mtext(paste0(regime3.name), font=2, cex=4) + par(fig=c(title1.xpos, title1.xpos + title1.width, 0.1755, 0.1805), new=TRUE) + mtext(paste0(regime4.name), font=2, cex=4) + + # Color legend: + legend1.xpos <- 0.10 + legend1.width <- 0.80 + legend1.cex <- 4 + + par(fig=c(legend1.xpos, legend1.xpos + legend1.width, 0, 0.065), new=TRUE) # syntax fig=c(xmin, xmax, ymin, ymax) + ColorBar2(brks=my.brks, cols=my.cols, vert=FALSE, cex=legend1.cex, label.dist=2, my.ticks=-0.5 + 1:21, my.labels=my.brks) + par(fig=c(legend1.xpos + legend1.width - 0.02, legend1.xpos + legend1.width + 0.05, 0, 0.02), new=TRUE) # syntax fig=c(xmin, xmax, ymin, ymax) + mtext("hPa", cex=legend1.cex*1.3) + + } # close if on composition == 'psl' + + + # Visualize the composition with the interannual frequencies for a selected forecasted month: + if(composition == "fre"){ + # Centroid maps: + n.map <- n.map + 1 # n.maps starts from 0 + map.width <- 0.115 + map.xpos <- 0.06 + map.width * (n.map - 1) + map.ypos <- 0.08 + map.height <- 0.19 + map.ysep <- 0.015 + + par(fig=c(map.xpos, map.xpos + map.width, map.ypos + 3*map.height + 3*map.ysep, map.ypos + 4*map.height + 3*map.ysep), new=TRUE) + barplot.freq.sim2(100*fre1.NA, 100*freMax1.NA, 100*freMin1.NA, 100*freObs1, year.start, year.end, cex.y=2, cex.x=2, freq.min=-2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0), col.line="gray20", cex.r=3, cex.mean=2, cex.obs=2) + par(fig=c(map.xpos, map.xpos + map.width, map.ypos + 2*map.height + 2*map.ysep, map.ypos + 3*map.height + 2*map.ysep), new=TRUE) + if(fields.name == forecast.name) barplot.freq.sim2(100*fre2.NA, 100*freMax2.NA, 100*freMin2.NA, 100*freObs2, year.start, year.end, cex.y=2, cex.x=2, freq.min=-2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0), col.line="gray20", cex.r=3, cex.mean=2, cex.obs=2) + par(fig=c(map.xpos, map.xpos + map.width, map.ypos + map.height + map.ysep, map.ypos + 2*map.height + map.ysep), new=TRUE) + if(fields.name == forecast.name) barplot.freq.sim2(100*fre3.NA, 100*freMax3.NA, 100*freMin3.NA, 100*freObs3, year.start, year.end, cex.y=2, cex.x=2, freq.min=-2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0), col.line="gray20", cex.r=3, cex.mean=2, cex.obs=2) + par(fig=c(map.xpos, map.xpos + map.width, map.ypos, map.ypos + map.height), new=TRUE) + if(fields.name == forecast.name) barplot.freq.sim2(100*fre4.NA, 100*freMax4.NA, 100*freMin4.NA, 100*freObs4, year.start, year.end, cex.y=2, cex.x=2, freq.min=-2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0), col.line="gray20", cex.r=3, cex.mean=2, cex.obs=2) + + # Sheet subtitles: + par(fig=c(map.xpos, map.xpos + map.width, 0.86, 0.90), new=TRUE) + if(n.map < 8) { + mtext(paste0(my.month.short[p], " --> ", my.month.short[forecast.month]), cex=5.5, font=2) + } else{ + mtext(paste0(rean.name," ", my.month.short[forecast.month]), cex=5.5, font=2) + } + + } # close if on composition == 'fre' + + # save indicators for each startdate and forecast time: + # (fre1, fre2, etc. already refer to the regimes in the same order as listed in the 'orden' vector) + if(fields.name == forecast.name && composition == 'simple') save(orden, fre1.NA, fre2.NA, fre3.NA, fre4.NA, freObs1, freObs2, freObs3, freObs4, sp.cor1, sp.cor2, sp.cor3, sp.cor4, persist1, persist2, persist3, persist4, persistObs1, persistObs2, persistObs3, persistObs4, spatial.cor1, spatial.cor2, spatial.cor3, spatial.cor4, file=paste0(workdir,"/",fields.name,"_", my.period[p],"_leadmonth_",lead.month,"_","ClusterNames",".RData")) + +} # close 'p' on 'period' + +if(as.pdf) dev.off() # for saving a single pdf with all months/seasons + +} # close 'lead.month' on 'lead.months' + +if(composition == "psl" || composition == "fre"){ + # Regime's names: + title1.xpos <- 0.01 + title1.width <- 0.04 + par(fig=c(title1.xpos, title1.xpos + title1.width, 0.7905, 0.7955), new=TRUE) + mtext(paste0(regime1.name), font=2, cex=5) + par(fig=c(title1.xpos, title1.xpos + title1.width, 0.5855, 0.5905), new=TRUE) + mtext(paste0(regime2.name), font=2, cex=5) + par(fig=c(title1.xpos, title1.xpos + title1.width, 0.3805, 0.3955), new=TRUE) + mtext(paste0(regime3.name), font=2, cex=5) + par(fig=c(title1.xpos, title1.xpos + title1.width, 0.1755, 0.1805), new=TRUE) + mtext(paste0(regime4.name), font=2, cex=5) + + if(composition == "psl") { + # Color legend: + legend1.xpos <- 0.10 + legend1.width <- 0.80 + legend1.cex <- 4 + + par(fig=c(legend1.xpos, legend1.xpos + legend1.width, 0, 0.065), new=TRUE) # syntax fig=c(xmin, xmax, ymin, ymax) + ColorBar2(brks=my.brks, cols=my.cols, vert=FALSE, cex=legend1.cex, label.dist=2, my.ticks=-0.5 + 1:21, my.labels=my.brks) + par(fig=c(legend1.xpos + legend1.width - 0.02, legend1.xpos + legend1.width + 0.05, 0, 0.02), new=TRUE) # syntax fig=c(xmin, xmax, ymin, ymax) + mtext("hPa", cex=legend1.cex*1.3) + } + + dev.off() +} # close if on composition + +} # close if on composition != "summary" + +# Summary graphs: +if(composition == "summary" && fields.name == forecast.name){ + # array storing correlations in the format: [startdate, leadmonth, regime] + array.cor <- array.sp.cor <- array.freq <- array.diff.freq <- array.rpss <- array.pers <- array.diff.pers <- array(NA,c(12,7,4)) + + for(p in 1:12){ + p.orig <- p + + for(l in 0:6){ + + load(file=paste0(workdir,"/",fields.name,"_",my.period[p],"_leadmonth_",l,"_","ClusterNames",".RData")) # load cluster names and summarized data + + if(var.num != var.num.orig) var.num <- var.num.orig # ripristinate original values of this script (necessary after loading regime data) + if(fields.name != fields.name.orig) fields.name <- fields.name.orig # ripristinate original values of this script + if(p != p.orig) p <- p.orig + + array.sp.cor[p,1+l, 1] <- spatial.cor1 # associates NAO+ to the first triangle (at left) + array.sp.cor[p,1+l, 3] <- spatial.cor2 # associates NAO- to the third triangle (at right) + array.sp.cor[p,1+l, 4] <- spatial.cor3 # associates Blocking to the fourth triangle (top one) + array.sp.cor[p,1+l, 2] <- spatial.cor4 # associates Atl.Ridge to the second triangle (bottom one) + + array.cor[p,1+l, 1] <- sp.cor1 # associates NAO+ to the first triangle (at left) + array.cor[p,1+l, 3] <- sp.cor2 # associates NAO- to the third triangle (at right) + array.cor[p,1+l, 4] <- sp.cor3 # associates Blocking to the fourth triangle (top one) + array.cor[p,1+l, 2] <- sp.cor4 # associates Atl.Ridge to the second triangle (bottom one) + + array.freq[p,1+l, 1] <- 100*(mean(fre1.NA)) # NAO+ + array.freq[p,1+l, 3] <- 100*(mean(fre2.NA)) # NAO- + array.freq[p,1+l, 4] <- 100*(mean(fre3.NA)) # Blocking + array.freq[p,1+l, 2] <- 100*(mean(fre4.NA)) # Atl.Ridge + + array.diff.freq[p,1+l, 1] <- 100*(mean(fre1.NA) - mean(freObs1)) # NAO+ + array.diff.freq[p,1+l, 3] <- 100*(mean(fre2.NA) - mean(freObs2)) # NAO- + array.diff.freq[p,1+l, 4] <- 100*(mean(fre3.NA) - mean(freObs3)) # Blocking + array.diff.freq[p,1+l, 2] <- 100*(mean(fre4.NA) - mean(freObs4)) # Atl.Ridge + + array.pers[p,1+l, 1] <- persist1 # NAO+ + array.pers[p,1+l, 3] <- persist2 # NAO- + array.pers[p,1+l, 4] <- persist3 # Blocking + array.pers[p,1+l, 2] <- persist4 # Atl.Ridge + + array.diff.pers[p,1+l, 1] <- persist1 - persistObs1 # NAO+ + array.diff.pers[p,1+l, 3] <- persist2 - persistObs2 # NAO- + array.diff.pers[p,1+l, 4] <- persist3 - persistObs3 # Blocking + array.diff.pers[p,1+l, 2] <- persist4 - persistObs4 # Atl.Ridge + + #array.rpss[p,1+l, 1] <- # NAO+ + #array.rpss[p,1+l, 3] <- # NAO- + #array.rpss[p,1+l, 4] <- # Blocking + #array.rpss[p,1+l, 2] <- # Atl.Ridge + } + } + + + # Spatial Corr summary: + png(filename=paste0(workdir,"/",forecast.name,"_summary_sp_corr.png"), width=550, height=400) + + # create an array similar to array.cor but with colors instead of corr.values: + sp.corr.cols <- rev(c('#b2182b','#d6604d','#f4a582','#fddbc7','#d1e5f0','#92c5de','#4393c3','#2166ac')) + my.seq <- c(-1,0,0.4,0.5,0.6,0.7,0.8,0.9,1) + + array.sp.cor.colors <- array(sp.corr.cols[8],c(12,7,4)) + array.sp.cor.colors[array.sp.cor < my.seq[8]] <- sp.corr.cols[7] + array.sp.cor.colors[array.sp.cor < my.seq[7]] <- sp.corr.cols[6] + array.sp.cor.colors[array.sp.cor < my.seq[6]] <- sp.corr.cols[5] + array.sp.cor.colors[array.sp.cor < my.seq[5]] <- sp.corr.cols[4] + array.sp.cor.colors[array.sp.cor < my.seq[4]] <- sp.corr.cols[3] + array.sp.cor.colors[array.sp.cor < my.seq[3]] <- sp.corr.cols[2] + array.sp.cor.colors[array.sp.cor < my.seq[2]] <- sp.corr.cols[1] + + par(mar=c(5,4,4,4.5)) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Forecast time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + title("Spatial correlation between S4 and ERA-Interim frequencies", cex=1.5) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Forecast time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5) + + for(p in 1:12){ + for(l in 0:6){ + polygon(c(0.5+p-1,0.5+p-1,1+p-1),c(-0.5+l,0.5+l,0+l), col=array.sp.cor.colors[p,1+l,1]) # first triangle + polygon(c(0.5+p-1,1+p-1,1.5+p-1),c(-0.5+l,0+l,-0.5+l), col=array.sp.cor.colors[p,1+l,2]) + polygon(c(1+p-1,1.5+p-1,1.5+p-1),c(l,0.5+l,-0.5+l), col=array.sp.cor.colors[p,1+l,3]) + polygon(c(0.5+p-1,1+p-1,1.5+p-1),c(0.5+l,0+l,0.5+l), col=array.sp.cor.colors[p,1+l,4]) + } + } + + par(fig=c(0.875, 1, 0.14, 0.89), new=TRUE) + ColorBar2(brks = my.seq, cols = sp.corr.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + dev.off() + + + + # Corr summary: + png(filename=paste0(workdir,"/",forecast.name,"_summary_corr.png"), width=550, height=400) + + # create an array similar to array.cor but with colors instead of corr.values: + corr.cols <- rev(c('#b2182b','#d6604d','#f4a582','#fddbc7','#d1e5f0','#92c5de','#4393c3','#2166ac')) + + array.cor.colors <- array(corr.cols[8],c(12,7,4)) + array.cor.colors[array.cor < 0.75] <- corr.cols[7] + array.cor.colors[array.cor < 0.5] <- corr.cols[6] + array.cor.colors[array.cor < 0.25] <- corr.cols[5] + array.cor.colors[array.cor < 0] <- corr.cols[4] + array.cor.colors[array.cor < -0.25] <- corr.cols[3] + array.cor.colors[array.cor < -0.5] <- corr.cols[2] + array.cor.colors[array.cor < -0.75] <- corr.cols[1] + + par(mar=c(5,4,4,4.5)) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Forecast time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + title("Correlation between S4 and ERA-Interim frequencies", cex=1.5) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Forecast time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5) + + for(p in 1:12){ + for(l in 0:6){ + polygon(c(0.5+p-1,0.5+p-1,1+p-1),c(-0.5+l,0.5+l,0+l), col=array.cor.colors[p,1+l,1]) # first triangle + polygon(c(0.5+p-1,1+p-1,1.5+p-1),c(-0.5+l,0+l,-0.5+l), col=array.cor.colors[p,1+l,2]) + polygon(c(1+p-1,1.5+p-1,1.5+p-1),c(l,0.5+l,-0.5+l), col=array.cor.colors[p,1+l,3]) + polygon(c(0.5+p-1,1+p-1,1.5+p-1),c(0.5+l,0+l,0.5+l), col=array.cor.colors[p,1+l,4]) + } + } + + par(fig=c(0.875, 1, 0.14, 0.89), new=TRUE) + ColorBar2(brks = seq(-1,1,0.25), cols = corr.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(seq(-1,1,0.25)), my.labels=seq(-1,1,0.25)) + dev.off() + + + + # Freq. summary: + png(filename=paste0(workdir,"/",forecast.name,"_summary_freq.png"), width=550, height=400) + + # create an array similar to array.diff.freq but with colors instead of frequencies: + freq.cols <- rev(c('#d73027','#f46d43','#fdae61','#fee090','#e0f3f8','#abd9e9','#74add1','#4575b4')) + my.seq <- c(NA,20,22,24,26,28,30,32,NA) + + array.freq.colors <- array(freq.cols[8],c(12,7,4)) + array.freq.colors[array.freq < my.seq[[8]]] <- freq.cols[7] + array.freq.colors[array.freq < my.seq[[7]]] <- freq.cols[6] + array.freq.colors[array.freq < my.seq[[6]]] <- freq.cols[5] + array.freq.colors[array.freq < my.seq[[5]]] <- freq.cols[4] + array.freq.colors[array.freq < my.seq[[4]]] <- freq.cols[3] + array.freq.colors[array.freq < my.seq[[3]]] <- freq.cols[2] + array.freq.colors[array.freq < my.seq[[2]]] <- freq.cols[1] + + par(mar=c(5,4,4,4.5)) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Forecast time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + title("Mean S4 frequency (%)", cex=1.5) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Forecast time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5) + + for(p in 1:12){ + for(l in 0:6){ + polygon(c(0.5+p-1,0.5+p-1,1+p-1),c(-0.5+l,0.5+l,0+l), col=array.freq.colors[p,1+l,1]) # first triangle + polygon(c(0.5+p-1,1+p-1,1.5+p-1),c(-0.5+l,0+l,-0.5+l), col=array.freq.colors[p,1+l,2]) + polygon(c(1+p-1,1.5+p-1,1.5+p-1),c(l,0.5+l,-0.5+l), col=array.freq.colors[p,1+l,3]) + polygon(c(0.5+p-1,1+p-1,1.5+p-1),c(0.5+l,0+l,0.5+l), col=array.freq.colors[p,1+l,4]) + } + } + + par(fig=c(0.875, 1, 0.14, 0.89), new=TRUE) + ColorBar2(brks = my.seq, cols = freq.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + dev.off() + + + + # Delta freq. summary: + png(filename=paste0(workdir,"/",forecast.name,"_summary_diff_freq.png"), width=550, height=400) + + # create an array similar to array.diff.freq but with colors instead of frequencies: + diff.freq.cols <- rev(c('#d73027','#f46d43','#fdae61','#fee090','#e0f3f8','#abd9e9','#74add1','#4575b4')) + + array.diff.freq.colors <- array(diff.freq.cols[8],c(12,7,4)) + array.diff.freq.colors[array.diff.freq < 10] <- diff.freq.cols[7] + array.diff.freq.colors[array.diff.freq < 5] <- diff.freq.cols[6] + array.diff.freq.colors[array.diff.freq < 2] <- diff.freq.cols[5] + array.diff.freq.colors[array.diff.freq < 0] <- diff.freq.cols[4] + array.diff.freq.colors[array.diff.freq < -2] <- diff.freq.cols[3] + array.diff.freq.colors[array.diff.freq < -5] <- diff.freq.cols[2] + array.diff.freq.colors[array.diff.freq < -10] <- diff.freq.cols[1] + + par(mar=c(5,4,4,4.5)) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Forecast time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + title("Frequency difference (%) between S4 and ERA-Interim", cex=1.5) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Forecast time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5) + + for(p in 1:12){ + for(l in 0:6){ + polygon(c(0.5+p-1,0.5+p-1,1+p-1),c(-0.5+l,0.5+l,0+l), col=array.diff.freq.colors[p,1+l,1]) # first triangle + polygon(c(0.5+p-1,1+p-1,1.5+p-1),c(-0.5+l,0+l,-0.5+l), col=array.diff.freq.colors[p,1+l,2]) + polygon(c(1+p-1,1.5+p-1,1.5+p-1),c(l,0.5+l,-0.5+l), col=array.diff.freq.colors[p,1+l,3]) + polygon(c(0.5+p-1,1+p-1,1.5+p-1),c(0.5+l,0+l,0.5+l), col=array.diff.freq.colors[p,1+l,4]) + } + } + + par(fig=c(0.875, 1, 0.14, 0.89), new=TRUE) + ColorBar2(brks = c(-20,-10,-5,-2,0,2,5,10,20), cols = diff.freq.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(c(-20,-10,-5,-2,0,2,5,10,20)), my.labels=c(-20,-10,-5,-2,0,2,5,10,20)) + dev.off() + + + # Persistence summary: + png(filename=paste0(workdir,"/",forecast.name,"_summary_pers.png"), width=550, height=400) + + # create an array similar to array.pers but with colors instead of frequencies: + pers.cols <- rev(c('#b2182b','#d6604d','#f4a582','#fddbc7','#d1e5f0','#92c5de','#4393c3','#2166ac')) + my.seq <- c(NA, 3.25, 3.5, 3.75, 4, 4.25, 4.5, 4.75, NA) + + array.pers.colors <- array(pers.cols[8],c(12,7,4)) + array.pers.colors[array.pers < my.seq[8]] <- pers.cols[7] + array.pers.colors[array.pers < my.seq[7]] <- pers.cols[6] + array.pers.colors[array.pers < my.seq[6]] <- pers.cols[5] + array.pers.colors[array.pers < my.seq[5]] <- pers.cols[4] + array.pers.colors[array.pers < my.seq[4]] <- pers.cols[3] + array.pers.colors[array.pers < my.seq[3]] <- pers.cols[2] + array.pers.colors[array.pers < my.seq[2]] <- pers.cols[1] + + par(mar=c(5,4,4,4.5)) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Forecast time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + title("S4 Regime persistence (in days/month)", cex=1.5) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Forecast time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5) + + for(p in 1:12){ + for(l in 0:6){ + polygon(c(0.5+p-1,0.5+p-1,1+p-1),c(-0.5+l,0.5+l,0+l), col=array.pers.colors[p,1+l,1]) # first triangle + polygon(c(0.5+p-1,1+p-1,1.5+p-1),c(-0.5+l,0+l,-0.5+l), col=array.pers.colors[p,1+l,2]) + polygon(c(1+p-1,1.5+p-1,1.5+p-1),c(l,0.5+l,-0.5+l), col=array.pers.colors[p,1+l,3]) + polygon(c(0.5+p-1,1+p-1,1.5+p-1),c(0.5+l,0+l,0.5+l), col=array.pers.colors[p,1+l,4]) + } + } + + par(fig=c(0.875, 1, 0.14, 0.89), new=TRUE) + ColorBar2(brks = my.seq, cols = pers.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + dev.off() + + + + # Delta persistence summary: + png(filename=paste0(workdir,"/",forecast.name,"_summary_diff_pers.png"), width=550, height=400) + + # create an array similar to array.pers but with colors instead of frequencies: + diff.pers.cols <- rev(c('#b2182b','#d6604d','#f4a582','#fddbc7','#d1e5f0','#92c5de','#4393c3','#2166ac')) + my.seq <- c(NA, -1.5, -1, -0.5, 0, 0.5, 1, 1.5, NA) + + array.diff.pers.colors <- array(diff.pers.cols[8],c(12,7,4)) + array.diff.pers.colors[array.diff.pers < my.seq[8]] <- diff.pers.cols[7] + array.diff.pers.colors[array.diff.pers < my.seq[7]] <- diff.pers.cols[6] + array.diff.pers.colors[array.diff.pers < my.seq[6]] <- diff.pers.cols[5] + array.diff.pers.colors[array.diff.pers < my.seq[5]] <- diff.pers.cols[4] + array.diff.pers.colors[array.diff.pers < my.seq[4]] <- diff.pers.cols[3] + array.diff.pers.colors[array.diff.pers < my.seq[3]] <- diff.pers.cols[2] + array.diff.pers.colors[array.diff.pers < my.seq[2]] <- diff.pers.cols[1] + + par(mar=c(5,4,4,4.5)) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Forecast time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + title("Diff. between S4 and ERA-Interim regime persistence (in days/month)", cex=1.5) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Forecast time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5) + + for(p in 1:12){ + for(l in 0:6){ + polygon(c(0.5+p-1,0.5+p-1,1+p-1),c(-0.5+l,0.5+l,0+l), col=array.diff.pers.colors[p,1+l,1]) # first triangle + polygon(c(0.5+p-1,1+p-1,1.5+p-1),c(-0.5+l,0+l,-0.5+l), col=array.diff.pers.colors[p,1+l,2]) + polygon(c(1+p-1,1.5+p-1,1.5+p-1),c(l,0.5+l,-0.5+l), col=array.diff.pers.colors[p,1+l,3]) + polygon(c(0.5+p-1,1+p-1,1.5+p-1),c(0.5+l,0+l,0.5+l), col=array.diff.pers.colors[p,1+l,4]) + } + } + + par(fig=c(0.875, 1, 0.14, 0.89), new=TRUE) + ColorBar2(brks = my.seq, cols = diff.pers.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + dev.off() + + ## # FairRPSS summary: + ## png(filename=paste0(workdir,"/",forecast.name,"_summary_rpss.png"), width=550, height=400) + + ## # create an array similar to array.diff.freq but with colors instead of frequencies: + ## freq.cols <- rev(c('#b2182b','#d6604d','#f4a582','#fddbc7','#d1e5f0','#92c5de','#4393c3','#2166ac')) + + ## array.freq.colors <- array(freq.cols[8],c(12,7,4)) + ## array.freq.colors[array.diff.freq < 10] <- freq.cols[7] + ## array.freq.colors[array.diff.freq < 5] <- freq.cols[6] + ## array.freq.colors[array.diff.freq < 2] <- freq.cols[5] + ## array.freq.colors[array.diff.freq < 0] <- freq.cols[4] + ## array.freq.colors[array.diff.freq < -2] <- freq.cols[3] + ## array.freq.colors[array.diff.freq < -5] <- freq.cols[2] + ## array.freq.colors[array.diff.freq < -10] <- freq.cols[1] + + ## par(mar=c(5,4,4,4.5)) + ## plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Forecast time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + ## title("Frequency difference (%) between S4 and ERA-Interim", cex=1.5) + ## mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + ## mtext(side = 2, text = "Forecast time", line = 2.5, cex=1.5) + ## axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + ## axis(2, at=seq(0,6), las=2, cex.axis=1.5) + + ## for(p in 1:12){ + ## for(l in 0:6){ + ## polygon(c(0.5+p-1,0.5+p-1,1+p-1),c(-0.5+l,0.5+l,0+l), col=array.freq.colors[p,1+l,1]) # first triangle + ## polygon(c(0.5+p-1,1+p-1,1.5+p-1),c(-0.5+l,0+l,-0.5+l), col=array.freq.colors[p,1+l,2]) + ## polygon(c(1+p-1,1.5+p-1,1.5+p-1),c(l,0.5+l,-0.5+l), col=array.freq.colors[p,1+l,3]) + ## polygon(c(0.5+p-1,1+p-1,1.5+p-1),c(0.5+l,0+l,0.5+l), col=array.freq.colors[p,1+l,4]) + ## } + ## } + + ## par(fig=c(0.875, 1, 0.14, 0.89), new=TRUE) + ## ColorBar2(brks = c(-20,-10,-5,-2,0,2,5,10,20), cols = freq.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(c(-20,-10,-5,-2,0,2,5,10,20)), my.labels=c(-20,-10,-5,-2,0,2,5,10,20)) + ## dev.off() + +} # close if on composition == "summary" + + + + +# impact map of the stronger WR: +if(composition == "impact" && fields.name == rean.name){ + + var.num <- 1 # choose a variable (1:sfcWind, 2:tas) + + png(filename=paste0(workdir,"/",rean.name,"_",var.name[var.num],"_most_influent.png"), width=500, height=600) + + plot.new() + #par(mfrow=c(4,3)) + col.regimes <- c("indianred1","cyan","khaki","lightpink") + + for(p in 13:16){ # or 1:12 for monthly maps + load(file=paste0(rean.dir,"/",rean.name,"_",var.name[var.num],"_",my.period[p],"_psl.RData")) + load(paste0(rean.dir,"/",rean.name,"_ClusterNames.RData")) # they should not depend on var.name!!! + + # position of long values of Europe only (without the Atlantic Sea and America): + #if(fields.name == "ERA-Interim") EU <- c(1:which(lon >= lon.max)[1], (length(lon)-30):length(lon)) # restrict area to continental europe only + lon.max = 45 + EU <- c(1:which(lon >= lon.max)[1], (which(lon >= 337)[1]:length(lon))) # restrict area to continental europe only + + + cluster1 <- which(orden == cluster1.name.period[p]) # in future it will be == cluster1.name + cluster2 <- which(orden == cluster2.name.period[p]) + cluster3 <- which(orden == cluster3.name.period[p]) + cluster4 <- which(orden == cluster4.name.period[p]) + + assign(paste0("imp",cluster1), varPeriodAnom1mean) + assign(paste0("imp",cluster2), varPeriodAnom2mean) + assign(paste0("imp",cluster3), varPeriodAnom3mean) + assign(paste0("imp",cluster4), varPeriodAnom4mean) + + imp.all <- imp1 + for(i in 1:dim(imp1)[1]){ + for(j in 1:dim(imp1)[2]){ + if(abs(imp1[i,j]) >= max(abs(imp1[i,j]), abs(imp2[i,j]), abs(imp3[i,j]), abs(imp4[i,j]))) imp.all[i,j] <- 1.5 + if(abs(imp2[i,j]) >= max(abs(imp1[i,j]), abs(imp2[i,j]), abs(imp3[i,j]), abs(imp4[i,j]))) imp.all[i,j] <- 2.5 + if(abs(imp3[i,j]) >= max(abs(imp1[i,j]), abs(imp2[i,j]), abs(imp3[i,j]), abs(imp4[i,j]))) imp.all[i,j] <- 3.5 + if(abs(imp4[i,j]) >= max(abs(imp1[i,j]), abs(imp2[i,j]), abs(imp3[i,j]), abs(imp4[i,j]))) imp.all[i,j] <- 4.5 + } + } + + if(p<=3) yMod <- 0.7 + if(p>=4 && p<=6) yMod <- 0.5 + if(p>=7 && p<=9) yMod <- 0.3 + if(p>=10) yMod <- 0.1 + if(p==13 || p==14) yMod <- 0.5 + if(p==15 || p==16) yMod <- 0.1 + + if(p==1 || p==4 || p==7 || p==10) xMod <- 0.05 + if(p==2 || p==5 || p==8 || p==11) xMod <- 0.35 + if(p==3 || p==6 || p==9 || p==12) xMod <- 0.65 + if(p==13 || p==15) xMod <- 0.05 + if(p==14 || p==16) xMod <- 0.50 + + # impact map: + if(p <= 12) par(fig=c(xMod, xMod + 0.3, yMod, yMod + 0.17), new=TRUE) + if(p > 12) par(fig=c(xMod, xMod + 0.45, yMod, yMod + 0.38), new=TRUE) + PlotEquiMap(imp.all[,EU], lon[EU], lat, filled.continents = FALSE, brks=1:5, cols=col.regimes, axelab=F, drawleg=F) + + # title map: + if(p <= 12) par(fig=c(xMod, xMod + 0.3, yMod + 0.162, yMod + 0.172), new=TRUE) + if(p > 12) par(fig=c(xMod + 0.07, xMod + 0.07 + 0.3, yMod +0.21 + 0.162, yMod +0.21 + 0.172), new=TRUE) + mtext(my.period[p], font=2, cex=.9) + + } # close 'p' on 'period' + + par(fig=c(0.1,0.9, 0, 0.12), new=TRUE) + ColorBar2(1:5, cols=col.regimes, vert=FALSE, my.ticks=1:4, draw.ticks=FALSE, my.labels=orden) + + par(fig=c(0.1,0.9, 0.9, 0.95), new=TRUE) + mtext(paste0("Most influent WR on ",var.name[var.num]), font=2, cex=1.5) + + dev.off() +} # close if on composition == "impact" + + + + + + + + + + + + + + + + + + diff --git a/old/weather_regimes_maps_v13.R~ b/old/weather_regimes_maps_v13.R~ new file mode 100644 index 0000000000000000000000000000000000000000..f1329ec228270c054e3b51cb46516f65ad7c3dbc --- /dev/null +++ b/old/weather_regimes_maps_v13.R~ @@ -0,0 +1,1213 @@ + +# Creation: 6/2016 +# Author: Nicola Cortesi +# +# Aim: to visualize the regime anomalies of the weather regimes, their interannual frequencies and their impact on a chosen cliamte variable (i.e: wind speed or temperature) +# +# I/O: the only input file needed is the output of the weather_regimes.R script, which ends with the suffix "_mapdata.RData". +# Output files are the maps, with the user can generate as .png or as .pdf +# +# Assumptions: If your regimes derive from a reanalysis, this script must be run twice: once, with the option 'ordering = F' and 'save.names = T' to create a .pdf or .png +# file that the user must open to find visually the order by which the four regimes are stored inside the four clusters. For example, he can find that the +# sequence of the images in the file (from top to down) corresponds to: Blocking / NAO- / Atl.Ridge / NAO+ regime. Subsequently, he has to change 'ordering' +# from F to T and set the four variables 'clusterX.name' (X=1..4) to follow the same order found inside that file. +# The second time, you have to run this script only to get the maps in the right order, which by default is, from top to down: +# "NAO+","NAO-","Blocking" and "Atl.Ridge" (you can change it with the variable 'orden'). You also have to set 'save.names' to TRUE, so an .RData file +# is written to store the order of the four regimes, in case in future a user needs to visualize these maps again. In this case, the user must set +# 'ordering = TRUE' and 'save.names = FALSE' to be able to visualize the maps in the correct order. +# +# If your regimes derive from a forecast system, you have to compute before the regimes derived from a reanalysis. Then, you can associate the reanalysis +# to the forecast system, to employ it to automatically aussociate to each simulated cluster one of the +# observed regimes, the one with the highest spatial correlation. Beware that the two maps of regime anomalies must have the same spatial resolution! +# +# Branch: weather_regimes + +rm(list=ls()) +library(s2dverification) # for the function Load() +library(Kendall) # for the MannKendall test + +source('/home/Earth/ncortesi/Downloads/scripts/Rfunctions.R') # for the calendar functions + +# working dir with the input files with '_psl.RData' suffix: +workdir <- "/scratch/Earth/ncortesi/RESILIENCE/Regimes/42_ECMWF_S4_monthly_1981-2015_LOESS_filter" + +rean.name <- "ERA-Interim" # reanalysis name (if input data comes from a reanalysis) +forecast.name <- "ECMWF-S4" # forecast system name (if input data comes from a forecast system) + +fields.name <- forecast.name # Choose between loading reanalysis ('rean.name') or forecasts ('forecast.name') + +var.num <- 1 # if fields.name ='rean.name', Choose a variable for the impact maps 1: sfcWind 2: tas + +period <- 1:12 # Choose one or more periods to plot from 1 to 17 (1-12: Jan - Dec, 13-16: Winter, Spring, Summer, Autumn, 17: Year). + # You need to have created before the '_mapdata.RData' file with the output of 'weather_regimes'.R for that period. +lead.months <- 0:6 # in case you selected 'fields.name = forecast.name', specify a leadtime (0 = same month of the start month) to plot + # (and variable 'period' becomes the start month) + +# run it twice: once, with 'ordering <- FALSE' and 'save.names <- TRUE' to look only at the cartography and find visually the right regime names of the four clusters; then, +# insert the regime names in the correct order below and run this script a the second time with 'ordering = TRUE' and 'save.names = TRUE', to save the ordered cartography. +# after that, any time you need to run this script, run it with 'ordering = TRUE and 'save.names = FALSE', not to overwrite the file which stores the cluster order: +ordering <- T # If TRUE, order the clusters following the regimes listed in the 'orden' vector (set it to TRUE only after you manually inserted the names below). +save.names <- F # if TRUE, also save the cluster names (i.e: the association between clusters and weather regimes); if FALSE, the cluster names are loaded from a file + +as.pdf <- F # FALSE: save results as one .png file for each season/month, TRUE: save only one .pdf file with all seasons/months + +composition <- "fre" # "psl" # choose which kind of composition you want to plot: + # - 'simple' for monthly graphs, + # - 'psl' for the regime anomalies for a fixed forecast month + # - 'fre' for the interannual frequencies for a fixed forecast month +forecast.month <- 1 # in case composition = 'psl' or 'fre', choose a forecast month to plot its composition + +# if fields.name='forecast.name', set the subdir of 'workdir' where the weather regimes computed with a reanalysis are stored: +rean.dir <- "/scratch/Earth/ncortesi/RESILIENCE/Regimes/41_ERA-Interim_monthly_1981-2015_LOESS_filter" + +# Associates the regimes to the four cluster: +cluster1.name <- "NAO+" +cluster2.name <- "NAO-" +cluster4.name <- "Blocking" +cluster3.name <- "Atl.Ridge" + +# choose an order to give to the cartograpy, from top to bottom: +orden <- c("NAO+","NAO-","Blocking","Atl.Ridge") + +################################################################################################################################################################### + +if(fields.name != rean.name && fields.name != forecast.name) stop("variable 'fields.name' is not properly set") +regime1.name <- orden[1] +regime2.name <- orden[2] +regime3.name <- orden[3] +regime4.name <- orden[4] + +var.name <- c("sfcWind","tas") +var.name.full <- c("Wind Speed","Temperature") # full name of the 'predictand' variable to put in the title of the graphs +var.unit <- c("m/s", "ºC") # unit of measure of var (for drawing color scales) +var.num.orig <- var.num # loading _psl files, this value can change! +fields.name.orig <- fields.name +period.orig <- period +workdir.orig <- workdir +pdf.suffix <- ifelse(length(period)==1, my.period[period], "all_periods") + +if(composition == "psl" || composition == "fre") { + if(as.pdf && fields.name == forecast.name) pdf(file=paste0(workdir,"/",fields.name,"_",pdf.suffix,"_forecast_month_",forecast.month,"_",composition,".pdf"),width=110,height=60) + if(!as.pdf && fields.name == forecast.name) png(filename=paste0(workdir,"/",fields.name,"_forecast_month_",forecast.month,"_",composition,".png"),width=6000,height=2300) + + lead.months <- c(6:0,0) + n.map <- 0 + plot.new() + + # Sheet title: + par(fig=c(0, 1, 0.94, 0.98), new=TRUE) + mtext(paste0(fields.name, " simulated Weather Regimes for ", month.name[forecast.month]), cex=5.5, font=2) +} + + +for(lead.month in lead.months){ +#lead.month=0 # for the debug + +if(composition == "simple"){ + if(as.pdf && fields.name == rean.name) pdf(file=paste0(workdir,"/",fields.name,"_",var.name[var.num],"_",pdf.suffix,".pdf"),width=40, height=60) + if(as.pdf && fields.name == forecast.name) pdf(file=paste0(workdir,"/",fields.name,"_",var.name[var.num],"_",pdf.suffix,"_leadmonth_",lead.month,".pdf"),width=40,height=60) +} + +if(composition == 'psl') { + period <- forecast.month - lead.month + if(period < 1) period <- period + 12 +} + +for(p in period){ + #p <- 1 # for the debug + p.orig <- p + + # load regime data (for forecasts, it load only the data corresponding to the chosen start month p and lead month 'lead.month':: + if(fields.name == rean.name || (fields.name == forecast.name && lead.month == 0 || n.map == 7)) { + load(file=paste0(rean.dir,"/",rean.name,"_",my.period[p],"_","psl",".RData")) + if(var.num != var.num.orig) var.num <- var.num.orig # ripristinate original values of this script (necessary after loading regime data) + if(fields.name != fields.name.orig) fields.name <- fields.name.orig # ripristinate original values of this script + if(workdir != workdir.orig) workdir <- workdir.orig # ripristinate original values of this script + + load(file=paste0(rean.dir,"/",rean.name,"_",my.period[p],"_",var.name[var.num],".RData")) + } + + if(fields.name == forecast.name && n.map < 7){ + load(file=paste0(workdir,"/",fields.name,"_",my.period[p],"_leadmonth_",lead.month,"_","psl",".RData")) # load cluster time series and mean psl data + #load(file=paste0(workdir,"/",fields.name,"_",my.period[p],"_leadmonth_",lead.month,"_",var.name[var.num],".RData")) # load mean var data + + if(var.num != var.num.orig) var.num <- var.num.orig # ripristinate original values of this script (necessary after loading regime data) + if(fields.name != fields.name.orig) fields.name <- fields.name.orig # ripristinate original values of this script + if(workdir != workdir.orig) workdir <- workdir.orig # ripristinate original values of this script + } + + if(var.num != var.num.orig) var.num <- var.num.orig # ripristinate original values of this script (necessary after loading regime data) + if(fields.name != fields.name.orig) fields.name <- fields.name.orig # ripristinate original values of this script + if(p != p.orig) p <- p.orig + + if(fields.name == forecast.name && n.map < 7){ + # compute cluster persistence: + my.cluster.vector <- as.vector(my.cluster.array[[p]]) # reduce it to a vector with the order: day1month1member1 , day2month1member1, ... , day1month2member1, day2month2member1, ,,, + + sequ1 <- sequ2 <- sequ3 <- sequ4 <- rep(0,10000) + i1 <- i2 <- i3 <- i4 <- 1 + + if(my.cluster.vector[1] != 1) i1 <- 0 + if(my.cluster.vector[1] == 1) sequ1[i1] <- sequ1[i1]+1 + if(my.cluster.vector[1] != 2) i2 <- 0 + if(my.cluster.vector[1] == 2) sequ2[i2] <- sequ2[i2]+1 + if(my.cluster.vector[1] != 3) i3 <- 0 + if(my.cluster.vector[1] == 3) sequ3[i3] <- sequ3[i3]+1 + if(my.cluster.vector[1] != 4) i4 <- 0 + if(my.cluster.vector[1] == 4) sequ4[i4] <- sequ4[i4]+1 + n.dd <- length(my.cluster.vector) + for(d in 1:(n.dd-1)){ + if(my.cluster.vector[d] != 1 && my.cluster.vector[d+1] == 1) i1 <- i1+1 + if(my.cluster.vector[d] == 1) sequ1[i1] <- sequ1[i1]+1 + + if(my.cluster.vector[d] != 2 && my.cluster.vector[d+1] == 2) i2 <- i2+1 + if(my.cluster.vector[d] == 2) sequ2[i2] <- sequ2[i2]+1 + + if(my.cluster.vector[d] != 3 && my.cluster.vector[d+1] == 3) i3 <- i3+1 + if(my.cluster.vector[d] == 3) sequ3[i3] <- sequ3[i3]+1 + + if(my.cluster.vector[d] != 4 && my.cluster.vector[d+1] == 4) i4 <- i4+1 + if(my.cluster.vector[d] == 4) sequ4[i4] <- sequ4[i4]+1 + } + + if(my.cluster.vector[n.dd] == 1) sequ1[i1] <- sequ1[i1]+1 + if(my.cluster.vector[n.dd] == 2) sequ2[i2] <- sequ2[i2]+1 + if(my.cluster.vector[n.dd] == 3) sequ3[i3] <- sequ3[i3]+1 + if(my.cluster.vector[n.dd] == 4) sequ4[i4] <- sequ4[i4]+1 + + pers1 <- mean(sequ1[which(sequ1 != 0)]) + pers2 <- mean(sequ2[which(sequ2 != 0)]) + pers3 <- mean(sequ3[which(sequ3 != 0)]) + pers4 <- mean(sequ4[which(sequ4 != 0)]) + + # compute correlations between simulated clusters and observed regime's anomalies: + cluster1.sim <- pslwr1mean; cluster2.sim <- pslwr2mean; cluster3.sim <- pslwr3mean; cluster4.sim <- pslwr4mean + lat.forecast <- lat; lon.forecast <- lon + + if((p + lead.month) > 12) { load.month <- p + lead.month - 12 } else { load.month <- p + lead.month } # reanalysis forecast month to load + load(file=paste0(rean.dir,"/",rean.name,"_",my.period[load.month],"_","psl",".RData")) # load reanalysis data, which overwrities the pslwrXmean variables + load(paste0(rean.dir,"/",rean.name,"_", my.period[load.month],"_","ClusterNames",".RData")) # load also reanalysis regime names + + wr1y.obs <- wr1y; wr2y.obs <- wr2y; wr3y.obs <- wr3y; wr4y.obs <- wr4y # observed frecuencies of the regimes + + regimes.obs <- c(cluster1.name, cluster2.name, cluster3.name, cluster4.name) + + if(length(unique(regimes.obs)) < 4) stop("Two or more names of the weather types in the reanalsyis input file are repeated!") + + if(identical(rev(lat),lat.forecast)) {lat.forecast <- rev(lat.forecast); print("Warning: forecast latitudes are in the opposite order than renalysis's")} + if(!identical(lat, lat.forecast)) stop("forecast latitudes are different from reanalysis latitudes!") + if(!identical(lon, lon.forecast)) stop("forecast longitude are different from reanalysis longitudes!") + + cluster.corr <- array(NA, c(4,4), dimnames=list(c("cluster1.sim","cluster2.sim","cluster3.sim","cluster4.sim"), regimes.obs)) + cluster.corr[1,1] <- cor(as.vector(cluster1.sim), as.vector(pslwr1mean)) + cluster.corr[1,2] <- cor(as.vector(cluster1.sim), as.vector(pslwr2mean)) + cluster.corr[1,3] <- cor(as.vector(cluster1.sim), as.vector(pslwr3mean)) + cluster.corr[1,4] <- cor(as.vector(cluster1.sim), as.vector(pslwr4mean)) + cluster.corr[2,1] <- cor(as.vector(cluster2.sim), as.vector(pslwr1mean)) + cluster.corr[2,2] <- cor(as.vector(cluster2.sim), as.vector(pslwr2mean)) + cluster.corr[2,3] <- cor(as.vector(cluster2.sim), as.vector(pslwr3mean)) + cluster.corr[2,4] <- cor(as.vector(cluster2.sim), as.vector(pslwr4mean)) + cluster.corr[3,1] <- cor(as.vector(cluster3.sim), as.vector(pslwr1mean)) + cluster.corr[3,2] <- cor(as.vector(cluster3.sim), as.vector(pslwr2mean)) + cluster.corr[3,3] <- cor(as.vector(cluster3.sim), as.vector(pslwr3mean)) + cluster.corr[3,4] <- cor(as.vector(cluster3.sim), as.vector(pslwr4mean)) + cluster.corr[4,1] <- cor(as.vector(cluster4.sim), as.vector(pslwr1mean)) + cluster.corr[4,2] <- cor(as.vector(cluster4.sim), as.vector(pslwr2mean)) + cluster.corr[4,3] <- cor(as.vector(cluster4.sim), as.vector(pslwr3mean)) + cluster.corr[4,4] <- cor(as.vector(cluster4.sim), as.vector(pslwr4mean)) + + # associate each simulated cluster to one observed regime: + max1 <- which(cluster.corr == max(cluster.corr), arr.ind=T) + cluster.corr2 <- cluster.corr + cluster.corr2[max1[1],] <- -999 # set this row to negative values so it won't be found as a maximum anymore + cluster.corr2[,max1[2]] <- -999 # set this column to negative values so it won't be found as a maximum anymore + max2 <- which(cluster.corr2 == max(cluster.corr2), arr.ind=T) + cluster.corr3 <- cluster.corr2 + cluster.corr3[max2[1],] <- -999 + cluster.corr3[,max2[2]] <- -999 + max3 <- which(cluster.corr3 == max(cluster.corr3), arr.ind=T) + cluster.corr4 <- cluster.corr3 + cluster.corr4[max3[1],] <- -999 + cluster.corr4[,max3[2]] <- -999 + max4 <- which(cluster.corr4 == max(cluster.corr4), arr.ind=T) + + seq.max1 <- c(max1[1],max2[1],max3[1],max4[1]) + seq.max2 <- c(max1[2],max2[2],max3[2],max4[2]) + + cluster1.regime <- seq.max2[which(seq.max1 == 1)] + cluster2.regime <- seq.max2[which(seq.max1 == 2)] + cluster3.regime <- seq.max2[which(seq.max1 == 3)] + cluster4.regime <- seq.max2[which(seq.max1 == 4)] + + cluster1.name <- regimes.obs[cluster1.regime] + cluster2.name <- regimes.obs[cluster2.regime] + cluster3.name <- regimes.obs[cluster3.regime] + cluster4.name <- regimes.obs[cluster4.regime] + + spat.cor1 <- cluster.corr[1,seq.max2[which(seq.max1 == 1)]] + spat.cor2 <- cluster.corr[2,seq.max2[which(seq.max1 == 2)]] + spat.cor3 <- cluster.corr[3,seq.max2[which(seq.max1 == 3)]] + spat.cor4 <- cluster.corr[4,seq.max2[which(seq.max1 == 4)]] + + # measure persistence for the reanalysis dataset: + + # compute cluster persistence: + my.cluster.vector <- my.cluster[[load.month]][[1]] + + sequ1 <- sequ2 <- sequ3 <- sequ4 <- rep(0,10000) + i1 <- i2 <- i3 <- i4 <- 1 + + if(my.cluster.vector[1] != 1) i1 <- 0 + if(my.cluster.vector[1] == 1) sequ1[i1] <- sequ1[i1]+1 + if(my.cluster.vector[1] != 2) i2 <- 0 + if(my.cluster.vector[1] == 2) sequ2[i2] <- sequ2[i2]+1 + if(my.cluster.vector[1] != 3) i3 <- 0 + if(my.cluster.vector[1] == 3) sequ3[i3] <- sequ3[i3]+1 + if(my.cluster.vector[1] != 4) i4 <- 0 + if(my.cluster.vector[1] == 4) sequ4[i4] <- sequ4[i4]+1 + + n.dd <- length(my.cluster.vector) + for(d in 1:(n.dd-1)){ + if(my.cluster.vector[d] != 1 && my.cluster.vector[d+1] == 1) i1 <- i1+1 + if(my.cluster.vector[d] == 1) sequ1[i1] <- sequ1[i1]+1 + + if(my.cluster.vector[d] != 2 && my.cluster.vector[d+1] == 2) i2 <- i2+1 + if(my.cluster.vector[d] == 2) sequ2[i2] <- sequ2[i2]+1 + + if(my.cluster.vector[d] != 3 && my.cluster.vector[d+1] == 3) i3 <- i3+1 + if(my.cluster.vector[d] == 3) sequ3[i3] <- sequ3[i3]+1 + + if(my.cluster.vector[d] != 4 && my.cluster.vector[d+1] == 4) i4 <- i4+1 + if(my.cluster.vector[d] == 4) sequ4[i4] <- sequ4[i4]+1 + } + + if(my.cluster.vector[n.dd] == 1) sequ1[i1] <- sequ1[i1]+1 + if(my.cluster.vector[n.dd] == 2) sequ2[i2] <- sequ2[i2]+1 + if(my.cluster.vector[n.dd] == 3) sequ3[i3] <- sequ3[i3]+1 + if(my.cluster.vector[n.dd] == 4) sequ4[i4] <- sequ4[i4]+1 + + persObs1 <- mean(sequ1[which(sequ1 != 0)]) + persObs2 <- mean(sequ2[which(sequ2 != 0)]) + persObs3 <- mean(sequ3[which(sequ3 != 0)]) + persObs4 <- mean(sequ4[which(sequ4 != 0)]) + + if(var.num != var.num.orig) var.num <- var.num.orig # ripristinate original values of this script + if(fields.name != fields.name.orig) fields.name <- fields.name.orig # ripristinate original values of this script + if(!identical(period.orig,period)) period <- period.orig + if(workdir != workdir.orig) workdir <- workdir.orig # ripristinate original values of this script + if(p.orig != p) p <- p.orig + + # insert here all the manual corrections to the regime assignation due to misasignment in the automatic selection: + # forecast month: September + if(p == 9 && lead.month == 0) { cluster1.name <- "Blocking"; cluster2.name <- "Atl.Ridge"; cluster3.name <- "NAO-"; cluster4.name <- "NAO+"} + if(p == 8 && lead.month == 1) { cluster1.name <- "NAO+"; cluster2.name <- "NAO-"; cluster3.name <- "Blocking"; cluster4.name <- "Atl.Ridge"} + if(p == 6 && lead.month == 3) { cluster1.name <- "Atl.Ridge"; cluster2.name <- "NAO-"} + if(p == 4 && lead.month == 5) { cluster1.name <- "Blocking"; cluster2.name <- "NAO+"; cluster3.name <- "Atl.Ridge"; cluster4.name <- "NAO-"} + + # forecast month: October + if(p == 4 && lead.month == 6) { cluster1.name <- "Atl.Ridge"; cluster2.name <- "Blocking"; cluster3.name <- "NAO+"; cluster4.name <- "NAO-"} + if(p == 5 && lead.month == 5) { cluster1.name <- "NAO+"; cluster2.name <- "Blocking"; cluster3.name <- "NAO-"; cluster4.name <- "Atl.Ridge"} + if(p == 7 && lead.month == 3) { cluster1.name <- "NAO-"; cluster2.name <- "Atl.Ridge"; cluster3.name <- "Blocking"; cluster4.name <- "NAO+"} + if(p == 8 && lead.month == 2) { cluster1.name <- "Blocking"; cluster2.name <- "NAO-"; cluster3.name <- "Atl.Ridge"; cluster4.name <- "NAO+"} + if(p == 9 && lead.month == 1) { cluster1.name <- "NAO-"; cluster2.name <- "Blocking"; cluster3.name <- "Atl.Ridge"; cluster4.name <- "NAO+"} + + # forecast month: November + if(p == 6 && lead.month == 5) { cluster2.name <- "Atl.Ridge"; cluster3.name <- "NAO+"; cluster4.name <- "Blocking"} + if(p == 7 && lead.month == 4) { cluster1.name <- "NAO+"; cluster2.name <- "Blocking"; cluster4.name <- "Atl.Ridge"} + if(p == 8 && lead.month == 3) { cluster3.name <- "Blocking"; cluster4.name <- "Atl.Ridge"} + if(p == 9 && lead.month == 2) { cluster2.name <- "Atl.Ridge"; cluster3.name <- "NAO+"; cluster4.name <- "Blocking"} + if(p == 10 && lead.month == 1) { cluster2.name <- "Blocking"; cluster4.name <- "Atl.Ridge"} + + load(file=paste0(workdir,"/",fields.name,"_",my.period[p],"_leadmonth_",lead.month,"_","psl",".RData")) # reload forecast cluster time series and mean psl data + #load(file=paste0(workdir,"/",fields.name,"_",my.period[p],"_leadmonth_",lead.month,"_ClusterData.RData")) # reload forecast cluster data (for my.cluster.array) + + if(var.num != var.num.orig) var.num <- var.num.orig # ripristinate original values of this script + if(fields.name != fields.name.orig) fields.name <- fields.name.orig # ripristinate original values of this script + if(!identical(period.orig, period)) period <- period.orig + if(p.orig != p) p <- p.orig + if(identical(rev(lat),lat.forecast)) lat <- rev(lat) # ripristinate right lat order + if(workdir != workdir.orig) workdir <- workdir.orig # ripristinate original values of this script + + } # close for on 'forecast.name' + + if(fields.name == rean.name || (fields.name == forecast.name && n.map == 7)){ + if(save.names){ + save(cluster1.name, cluster2.name, cluster3.name, cluster4.name, file=paste0(workdir,"/",fields.name,"_", my.period[p],"_","ClusterNames",".RData")) + + } else { # in this case, we load the cluster names from the file already saved, if regimes come from a reanalysis: + load(paste0(rean.dir,"/",rean.name,"_", my.period[p],"_","ClusterNames",".RData")) + + if(var.num != var.num.orig) var.num <- var.num.orig # ripristinate original values of this script + if(fields.name != fields.name.orig) fields.name <- fields.name.orig # ripristinate original values of this script + } + } + + # assign to each cluster its regime: + if(ordering == FALSE && (fields.name == rean.name || (fields.name == forecast.name && n.map == 7))){ + cluster1 <- 1; cluster2 <- 2; cluster3 <- 3; cluster4 <- 4 + } else { + cluster1 <- which(orden == cluster1.name) + cluster2 <- which(orden == cluster2.name) + cluster3 <- which(orden == cluster3.name) + cluster4 <- which(orden == cluster4.name) + } + + assign(paste0("map",cluster1), pslwr1mean) + assign(paste0("map",cluster2), pslwr2mean) + assign(paste0("map",cluster3), pslwr3mean) + assign(paste0("map",cluster4), pslwr4mean) + + assign(paste0("fre",cluster1), wr1y) + assign(paste0("fre",cluster2), wr2y) + assign(paste0("fre",cluster3), wr3y) + assign(paste0("fre",cluster4), wr4y) + + if(p == 100){ + assign(paste0("imp",cluster1), varwr1mean) + assign(paste0("imp",cluster2), varwr2mean) + assign(paste0("imp",cluster3), varwr3mean) + assign(paste0("imp",cluster4), varwr4mean) + + assign(paste0("sig",cluster1), pvalue1) + assign(paste0("sig",cluster2), pvalue2) + assign(paste0("sig",cluster3), pvalue3) + assign(paste0("sig",cluster4), pvalue4) + } + + if(fields.name == forecast.name && n.map < 7){ + assign(paste0("freMax",cluster1), wr1yMax) + assign(paste0("freMax",cluster2), wr2yMax) + assign(paste0("freMax",cluster3), wr3yMax) + assign(paste0("freMax",cluster4), wr4yMax) + + assign(paste0("freMin",cluster1), wr1yMin) + assign(paste0("freMin",cluster2), wr2yMin) + assign(paste0("freMin",cluster3), wr3yMin) + assign(paste0("freMin",cluster4), wr4yMin) + + assign(paste0("spatial.cor",cluster1), spat.cor1) + assign(paste0("spatial.cor",cluster2), spat.cor2) + assign(paste0("spatial.cor",cluster3), spat.cor3) + assign(paste0("spatial.cor",cluster4), spat.cor3) + + assign(paste0("persist",cluster1), pers1) + assign(paste0("persist",cluster2), pers2) + assign(paste0("persist",cluster3), pers3) + assign(paste0("persist",cluster4), pers4) + + cluster1.obs <- which(orden == regimes.obs[1]) + cluster2.obs <- which(orden == regimes.obs[2]) + cluster3.obs <- which(orden == regimes.obs[3]) + cluster4.obs <- which(orden == regimes.obs[4]) + + assign(paste0("freObs",cluster1.obs), wr1y.obs) + assign(paste0("freObs",cluster2.obs), wr2y.obs) + assign(paste0("freObs",cluster3.obs), wr3y.obs) + assign(paste0("freObs",cluster4.obs), wr4y.obs) + + assign(paste0("persistObs",cluster1.obs), persObs1) + assign(paste0("persistObs",cluster2.obs), persObs2) + assign(paste0("persistObs",cluster3.obs), persObs3) + assign(paste0("persistObs",cluster4.obs), persObs4) + + } + + # position of long values of Europe only (without the Atlantic Sea and America): + #if(fields.name == "ERA-Interim") EU <- c(1:which(lon >= lon.max)[1], (length(lon)-30):length(lon)) # restrict area to continental europe only + EU <- c(1:which(lon >= lon.max)[1], (which(lon >= 337)[1]:length(lon))) # restrict area to continental europe only + + # breaks and colors of the geopotential fields: + if(psl == "g500"){ + #my.brks <- c(-300,-199:200,300) # % Mean anomaly of a WR + my.brks <- c(-300,seq(-200,200,10),300) # % Mean anomaly of a WR + my.brks2 <- c(-300,seq(-200,200,10),300) # % Mean anomaly of a WR for the contour lines + #my.cols <- colorRampPalette((brewer.pal(9,"PuBu")))(length(my.brks)-1) + values.to.plot <- c(-200, -100, 0, 100, 200) # values we want to show in the labels + } + + if(psl == "psl"){ + my.brks <- c(seq(-19,-1,2),0,seq(1,19,2)) # % Mean anomaly of a WR + # my.brks <- c(seq(-19,-1,2),0,seq(1,19,2)) # % Mean anomaly of a WR + + my.brks2 <- my.brks #c(seq(-20,20,2)) # % Mean anomaly of a WR for the contour lines + values.to.plot <- my.brks #c(-17,-15,-13,-11,-9,-7,-5,-3,-1,0,1,3,5,7,9,11,13,15,17) + } + + my.cols <- colorRampPalette(rev(brewer.pal(11,"RdBu")))(length(my.brks)-1) # blue--white--red colors + my.cols[floor(length(my.cols)/2)] <- my.cols[floor(length(my.cols)/2)+1] <- "white" + + # breaks and colors of the impact maps (valid both for Wind speed anomalies and Temperature anomalies): + #my.brks.var <- c(-20,seq(-10,-3,1),seq(-2.9,2.9,0.1),seq(3,10,1),20) # % Mean anomaly of a WR + #my.brks.var <- c(-20,-2,-1.5,-1,-0.5,-0.2,0.2,0.5,1,1.5,2,20) # % Mean anomaly of a WR + #my.brks.var <- c(-20,seq(-3,3,0.5),20) # % Mean anomaly of a WR + if(var.name[var.num] == "sfcWind") { + my.brks.var <- seq(-3,3,0.5) + values.to.plot2 <- c(-3,-2,-1,0,1,2,3) + } + if(var.name[var.num] == "tas") { + my.brks.var <- seq(-5,5,1) + values.to.plot2 <- my.brks.var #c(-5,-3,-1,0,1,3,5) + } + + #my.cols <- colorRampPalette(as.character(read.csv("/home/Earth/vtorralb/rgbhex.csv",header=F)[,1]))(length(my.brks)-1) # blue--yellow-red colors + my.cols.var <- colorRampPalette(rev(brewer.pal(11,"RdBu")))(length(my.brks.var)-1) # blue--white--red colors + #my.cols.var[floor(length(my.cols.var)/2)] <- my.cols.var[floor(length(my.cols.var)/2)+1] <- "white" + + # Visualize the composition with the 3 graphs for all the 4 regimes: its pressure fields, its impact on var and its frequency series: + if(composition == "simple"){ + if(!as.pdf && fields.name==rean.name) png(filename=paste0(workdir,"/",fields.name,"_",my.period[p],"_",var.name[var.num],".png"),width=3000,height=3700) + if(!as.pdf && fields.name==forecast.name) png(filename=paste0(workdir,"/",fields.name,"_",my.period[p],"_leadmonth_",lead.month,"_",var.name[var.num],".png"),width=3000,height=3700) + + plot.new() + + # Sheet title: + par(fig=c(0, 1, 0.94, 0.98), new=TRUE) + if(fields.name == forecast.name) {forecast.title <- paste0(" startdate and ",lead.month," forecast month ")} else {forecast.title <- ""} + mtext(paste0(fields.name, " Weather Regimes for ", my.period[p], forecast.title," (",year.start,"-",year.end,")"), cex=5.5, font=2) + + # Centroid maps: + map.xpos <- 0 + map.width <- 0.46 + par(fig=c(map.xpos + 0.003, map.xpos + map.width - 0.003, 0.745, 0.925), new=TRUE) + PlotEquiMap2(rescale(map1,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map1), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, xlabel.dist=1.5, contours.lty="F1FF1F") + par(fig=c(map.xpos, map.xpos + map.width, 0.51, 0.69), new=TRUE) + PlotEquiMap2(rescale(map2,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map2), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F") + par(fig=c(map.xpos, map.xpos + map.width, 0.275, 0.455), new=TRUE) + PlotEquiMap2(rescale(map3,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map3), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F") + par(fig=c(map.xpos, map.xpos + map.width, 0.04, 0.22), new=TRUE) + PlotEquiMap2(rescale(map4,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map4), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F") + + # Subtitle centroid maps: + map.title.xpos <- 0.76 + map.title.width <- 0.2 + par(fig=c(map.title.xpos, map.title.xpos + map.title.width, 0.728, 0.729), new=TRUE) + mtext("year", cex=3) + par(fig=c(map.title.xpos, map.title.xpos + map.title.width, 0.494, 0.495), new=TRUE) + mtext("year", cex=3) + par(fig=c(map.title.xpos, map.title.xpos + map.title.width, 0.260, 0.261), new=TRUE) + mtext("year", cex=3) + par(fig=c(map.title.xpos, map.title.xpos + map.title.width, 0.026, 0.027), new=TRUE) + mtext("year", cex=3) + + # Title Centroid Maps: + title1.xpos <- 0.02 + title1.width <- 0.4 + par(fig=c(title1.xpos, title1.xpos + title1.width, 0.9305, 0.9355), new=TRUE) + mtext(paste0(regime1.name,": ", psl.name, " Anomaly"), font=2, cex=4) + par(fig=c(title1.xpos, title1.xpos + title1.width, 0.6955, 0.7005), new=TRUE) + mtext(paste0(regime2.name,": ", psl.name, " Anomaly"), font=2, cex=4) + par(fig=c(title1.xpos, title1.xpos + title1.width, 0.4605, 0.4655), new=TRUE) + mtext(paste0(regime3.name,": ", psl.name, " Anomaly"), font=2, cex=4) + par(fig=c(title1.xpos, title1.xpos + title1.width, 0.2255, 0.2305), new=TRUE) + mtext(paste0(regime4.name,": ", psl.name, " Anomaly"), font=2, cex=4) + + # Impact maps: + if(p == 100){ + impact.xpos <- 0.47 + impact.width <- 0.26 + par(fig=c(impact.xpos, impact.xpos + impact.width, 0.745, 0.925), new=TRUE) + PlotEquiMap2(rescale(imp1[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=t(sig1[,EU] < 0.05), cex.lab=1.5) + par(fig=c(impact.xpos, impact.xpos + impact.width, 0.51, 0.69), new=TRUE) + PlotEquiMap2(rescale(imp2[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=t(sig2[,EU] < 0.05), cex.lab=1.5) + par(fig=c(impact.xpos, impact.xpos + impact.width, 0.275, 0.455), new=TRUE) + PlotEquiMap2(rescale(imp3[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=t(sig3[,EU] < 0.05), cex.lab=1.5) + par(fig=c(impact.xpos, impact.xpos + impact.width, 0.04, 0.22), new=TRUE) + PlotEquiMap2(rescale(imp4[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=t(sig4[,EU] < 0.05), cex.lab=1.5) + + # Title impact maps: + title2.xpos <- 0.42 + title2.width <- 0.35 + par(fig=c(title2.xpos, title2.xpos + title2.width, 0.935, 0.940), new=TRUE) + mtext(paste0(regime1.name, ": Impact on ", var.name.full[var.num]), font=2, cex=4) + par(fig=c(title2.xpos, title2.xpos + title2.width, 0.700, 0.705), new=TRUE) + mtext(paste0(regime2.name, ": Impact on ", var.name.full[var.num]), font=2, cex=4) + par(fig=c(title2.xpos, title2.xpos + title2.width, 0.465, 0.470), new=TRUE) + mtext(paste0(regime3.name, ": Impact on ", var.name.full[var.num]), font=2, cex=4) + par(fig=c(title2.xpos, title2.xpos + title2.width, 0.230, 0.235), new=TRUE) + mtext(paste0(regime4.name, ": Impact on ", var.name.full[var.num]), font=2, cex=4) + } + + # Frequency plots: + barplot.xpos <- 0.75 + barplot.width <- 0.25 + freq.max=100 + + if(fields.name == forecast.name){ + pos.years.present <- match(year.start:year.end, years) + years.missing <- which(is.na(pos.years.present)) + fre1.NA <- fre2.NA <- fre3.NA <- fre4.NA <- freMax1.NA <- freMax2.NA <- freMax3.NA <- freMax4.NA <- freMin1.NA <- freMin2.NA <- freMin3.NA <- freMin4.NA <- c() + + k <- 0 + for(y in year.start:year.end) { + if(y %in% (years.missing + year.start - 1)) { + fre1.NA <- c(fre1.NA,NA); fre2.NA <- c(fre2.NA,NA); fre3.NA <- c(fre3.NA,NA); fre4.NA <- c(fre4.NA,NA) + freMax1.NA <- c(freMax1.NA,NA); freMax2.NA <- c(freMax2.NA,NA); freMax3.NA <- c(freMax3.NA,NA); freMax4.NA <- c(freMax4.NA,NA) + freMin1.NA <- c(freMin1.NA,NA); freMin2.NA <- c(freMin2.NA,NA); freMin3.NA <- c(freMin3.NA,NA); freMin4.NA <- c(freMin4.NA,NA) + k=k+1 + } else { + fre1.NA <- c(fre1.NA,fre1[y - year.start + 1 - k]); fre2.NA <- c(fre2.NA,fre2[y - year.start + 1 - k]) + fre3.NA <- c(fre3.NA,fre3[y - year.start + 1 - k]); fre4.NA <- c(fre4.NA,fre4[y - year.start + 1 - k]) + freMax1.NA <- c(freMax1.NA,freMax1[y - year.start + 1 - k]); freMax2.NA <- c(freMax2.NA,freMax2[y - year.start + 1 - k]) + freMax3.NA <- c(freMax3.NA,freMax3[y - year.start + 1 - k]); freMax4.NA <- c(freMax4.NA,freMax4[y - year.start + 1 - k]) + freMin1.NA <- c(freMin1.NA,freMin1[y - year.start + 1 - k]); freMin2.NA <- c(freMin2.NA,freMin2[y - year.start + 1 - k]) + freMin3.NA <- c(freMin3.NA,freMin3[y - year.start + 1 - k]); freMin4.NA <- c(freMin4.NA,freMin4[y - year.start + 1 - k]) + } + } + } else { fre1.NA <- fre1; fre2.NA <- fre2; fre3.NA <- fre3; fre4.NA <- fre4 } + + par(fig=c(barplot.xpos, barplot.xpos + barplot.width, 0.745, 0.915), new=TRUE) + if(fields.name == rean.name) barplot.freq(100*fre1.NA, year.start, year.end, cex.y=2, cex.x=2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0)) + if(fields.name == forecast.name) barplot.freq.sim2(100*fre1.NA, 100*freMax1.NA, 100*freMin1.NA, 100*freObs1, year.start, year.end, cex.y=2, cex.x=2, freq.min=-2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0), col.line="gray20", cex.r=3, cex.mean=2, cex.obs=2) + + par(fig=c(barplot.xpos, barplot.xpos + barplot.width,0.510,0.680), new=TRUE) + if(fields.name == rean.name) barplot.freq(100*fre2.NA, year.start, year.end, cex.y=2, cex.x=2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0)) + if(fields.name == forecast.name) barplot.freq.sim2(100*fre2.NA, 100*freMax2.NA, 100*freMin2.NA, 100*freObs2, year.start, year.end, cex.y=2, cex.x=2, freq.min=-2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0), col.line="gray20", cex.r=3, cex.mean=2, cex.obs=2) + + par(fig=c(barplot.xpos, barplot.xpos + barplot.width,0.275,0.445), new=TRUE) + if(fields.name == rean.name) barplot.freq(100*fre3.NA, year.start, year.end, cex.y=2, cex.x=2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0)) + if(fields.name == forecast.name) barplot.freq.sim2(100*fre3.NA, 100*freMax3.NA, 100*freMin3.NA, 100*freObs3, year.start, year.end, cex.y=2, cex.x=2, freq.min=-2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0), col.line="gray20", cex.r=3, cex.mean=2, cex.obs=2) + + par(fig=c(barplot.xpos, barplot.xpos + barplot.width,0.040,0.210), new=TRUE) + if(fields.name == rean.name) barplot.freq(100*fre4.NA, year.start, year.end, cex.y=2, cex.x=2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0)) + if(fields.name == forecast.name) barplot.freq.sim2(100*fre4.NA, 100*freMax4.NA, 100*freMin4.NA, 100*freObs4, year.start, year.end, cex.y=2, cex.x=2, freq.min=-2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0), col.line="gray20", cex.r=3, cex.mean=2, cex.obs=2) + + # Title Frequency maps: + title3.xpos <- 0.75 + title3.width <-0.25 + par(fig=c(title3.xpos, title3.xpos + title3.width, 0.935, 0.940), new=TRUE) + mtext(paste0(regime1.name, " Frequency"), font=2, cex=4) # paste0 doesn't work inside an expression! + par(fig=c(title3.xpos, title3.xpos + title3.width, 0.700, 0.705), new=TRUE) + mtext(paste0(regime2.name, " Frequency"), font=2, cex=4) + par(fig=c(title3.xpos, title3.xpos + title3.width, 0.465, 0.470), new=TRUE) + mtext(paste0(regime3.name, " Frequency"), font=2, cex=4) + par(fig=c(title3.xpos, title3.xpos + title3.width, 0.230, 0.235), new=TRUE) + mtext(paste0(regime4.name, " Frequency"), font=2, cex=4) + + # % on Frequency plots: + symbol.xpos <- 0.735 + symbol.width <- 0.01 + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.82, 0.83), new=TRUE) + mtext("%", cex=3.3) + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.59, 0.60), new=TRUE) + mtext("%", cex=3.3) + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.35, 0.36), new=TRUE) + mtext("%", cex=3.3) + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.12, 0.13), new=TRUE) + mtext("%", cex=3.3) + + # mean frequency on Frequency plots: + symbol.xpos <- 0.9 + symbol.width <- 0.1 + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.908, 0.918), new=TRUE) + mtext(bquote(bar(nu) == .(paste0(round(mean(100*fre1.NA),1),"%"))),cex=4.5) + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.673, 0.683), new=TRUE) + mtext(bquote(bar(nu) == .(paste0(round(mean(100*fre2.NA),1),"%"))),cex=4.5) + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.44, 0.45), new=TRUE) + mtext(bquote(bar(nu) == .(paste0(round(mean(100*fre3.NA),1),"%"))),cex=4.5) + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.203, 0.213), new=TRUE) + mtext(bquote(bar(nu) == .(paste0(round(mean(100*fre4.NA),1),"%"))),cex=4.5) + + # add corr between obs.time series and ensemble mean time series on Frequency plots: + if(fields.name == forecast.name){ + symbol.xpos <- 0.76 + symbol.width <- 0.1 + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.908, 0.918), new=TRUE) + sp.cor1 <- round(cor(fre1.NA,freObs1, use="complete.obs"),2) + mtext(paste0("r= ",sp.cor1), cex=4.5) + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.673, 0.683), new=TRUE) + sp.cor2 <- round(cor(fre2.NA,freObs2, use="complete.obs"),2) + mtext(paste0("r= ",sp.cor2), cex=4.5) + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.44, 0.45), new=TRUE) + sp.cor3 <- round(cor(fre3.NA,freObs3, use="complete.obs"),2) + mtext(paste0("r= ",sp.cor3), cex=4.5) + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.203, 0.213), new=TRUE) + sp.cor4 <- round(cor(fre4.NA,freObs4, use="complete.obs"),2) + mtext(paste0("r= ",sp.cor4), cex=4.5) + } + + # Legend 1: + legend1.xpos <- 0.10 + legend1.width <- 0.80 + legend1.cex <- 2 + my.subset <- match(values.to.plot, my.brks) + par(fig=c(legend1.xpos, legend1.xpos + legend1.width, 0.719, 0.744), new=TRUE) # syntax fig=c(xmin, xmax, ymin, ymax) + ColorBar3(my.brks, cols=my.cols, vert=FALSE, cex=legend1.cex, subset=my.subset) + if(psl=="g500") {mtext(side=4," m", cex=2.5)} else {mtext(side=4," hPa", cex=legend1.cex)} + par(fig=c(legend1.xpos, legend1.xpos + legend1.width, 0.485, 0.508), new=TRUE) + ColorBar3(my.brks, cols=my.cols, vert=FALSE, cex=legend1.cex, subset=my.subset) + if(psl=="g500") {mtext(side=4," m", cex=2.5)} else {mtext(side=4," hPa", cex=legend1.cex)} + par(fig=c(legend1.xpos, legend1.xpos + legend1.width, 0.250, 0.273), new=TRUE) + ColorBar3(my.brks, cols=my.cols, vert=FALSE, cex=legend1.cex, subset=my.subset) + if(psl=="g500") {mtext(side=4," m", cex=2.5)} else {mtext(side=4," hPa", cex=legend1.cex)} + par(fig=c(legend1.xpos, legend1.xpos + legend1.width, 0.015, 0.038), new=TRUE) + ColorBar3(my.brks, cols=my.cols, vert=FALSE, cex=legend1.cex, subset=my.subset) + if(psl=="g500") {mtext(side=4," m", cex=2.5)} else {mtext(side=4," hPa", cex=legend1.cex)} + + # Legend 2: + if(fields.name == rean.name){ + legend2.xpos <- 0.48 + legend2.width <- 0.25 + legend2.cex <- 1.8 + my.subset2 <- match(values.to.plot2, my.brks.var) + par(fig=c(legend2.xpos, legend2.xpos + legend2.width, 0.719 + 0.001, 0.744), new=TRUE) + ColorBar3(my.brks.var, cols=my.cols.var, vert=FALSE, cex=legend2.cex, subset=my.subset2) + mtext(side=4,paste0(" ",var.unit[var.num]), cex=legend2.cex) + par(fig=c(legend2.xpos, legend2.xpos + legend2.width, 0.485, 0.508), new=TRUE) + ColorBar3(my.brks.var, cols=my.cols.var, vert=FALSE, cex=legend2.cex, subset=my.subset2) + mtext(side=4,paste0(" ",var.unit[var.num]), cex=legend2.cex) + par(fig=c(legend2.xpos, legend2.xpos + legend2.width, 0.250, 0.273), new=TRUE) + ColorBar3(my.brks.var, cols=my.cols.var, vert=FALSE, cex=legend2.cex, subset=my.subset2) + mtext(side=4,paste0(" ",var.unit[var.num]), cex=legend2.cex) + par(fig=c(legend2.xpos, legend2.xpos + legend2.width, 0.015, 0.038), new=TRUE) + ColorBar3(my.brks.var, cols=my.cols.var, vert=FALSE, cex=legend2.cex, subset=my.subset2) + mtext(side=4,paste0(" ",var.unit[var.num]), cex=legend2.cex) + } + + if(!as.pdf) dev.off() # for saving 4 png + + } # close if on: composition == 'simple' + + + # Visualize the composition with the regime anomalies for a selected forecasted month: + if(composition == "psl"){ + + # Centroid maps: + n.map <- n.map + 1 # n.maps starts from 0 + map.width <- 0.12 + map.xpos <- 0.04 + map.width * (n.map - 1) + map.ypos <- 0.08 + map.height <- 0.19 + map.ysep <- 0.015 + + par(fig=c(map.xpos, map.xpos + map.width, map.ypos + 3*map.height + 3*map.ysep, map.ypos + 4*map.height + 3*map.ysep), new=TRUE) + PlotEquiMap2(rescale(map1,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map1), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, xlabel.dist=1.5, contours.lty="F1FF1F") + par(fig=c(map.xpos, map.xpos + map.width, map.ypos + 2*map.height + 2*map.ysep, map.ypos + 3*map.height + 2*map.ysep), new=TRUE) + PlotEquiMap2(rescale(map2,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map2), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F") + par(fig=c(map.xpos, map.xpos + map.width, map.ypos + map.height + map.ysep, map.ypos + 2*map.height + map.ysep), new=TRUE) + PlotEquiMap2(rescale(map3,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map3), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F") + par(fig=c(map.xpos, map.xpos + map.width, map.ypos, map.ypos + map.height), new=TRUE) + PlotEquiMap2(rescale(map4,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map4), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F") + + # Sheet subtitles: + par(fig=c(map.xpos, map.xpos + map.width, 0.86, 0.90), new=TRUE) + if(n.map < 8) { + mtext(paste0(my.month.short[p], " --> ", my.month.short[forecast.month]), cex=5.5, font=2) + } else{ + mtext(paste0(rean.name," ", my.month.short[forecast.month]), cex=5.5, font=2) + } + + # Regime's names: + title1.xpos <- 0 + title1.width <- 0.04 + par(fig=c(title1.xpos, title1.xpos + title1.width, 0.7905, 0.7955), new=TRUE) + mtext(paste0(regime1.name), font=2, cex=4) + par(fig=c(title1.xpos, title1.xpos + title1.width, 0.5855, 0.5905), new=TRUE) + mtext(paste0(regime2.name), font=2, cex=4) + par(fig=c(title1.xpos, title1.xpos + title1.width, 0.3805, 0.3955), new=TRUE) + mtext(paste0(regime3.name), font=2, cex=4) + par(fig=c(title1.xpos, title1.xpos + title1.width, 0.1755, 0.1805), new=TRUE) + mtext(paste0(regime4.name), font=2, cex=4) + + # Color legend: + legend1.xpos <- 0.10 + legend1.width <- 0.80 + legend1.cex <- 4 + + par(fig=c(legend1.xpos, legend1.xpos + legend1.width, 0, 0.065), new=TRUE) # syntax fig=c(xmin, xmax, ymin, ymax) + ColorBar2(brks=my.brks, cols=my.cols, vert=FALSE, cex=legend1.cex, label.dist=2, my.ticks=-0.5 + 1:21, my.labels=my.brks) + par(fig=c(legend1.xpos + legend1.width - 0.02, legend1.xpos + legend1.width + 0.05, 0, 0.02), new=TRUE) # syntax fig=c(xmin, xmax, ymin, ymax) + mtext("hPa", cex=legend1.cex*1.3) + + } # close if on composition == 'psl' + + + # Visualize the composition with the interannual frequencies for a selected forecasted month: + if(composition == "fre"){ + + # Centroid maps: + n.map <- n.map + 1 # n.maps starts from 0 + map.width <- 0.12 + map.xpos <- 0.04 + map.width * (n.map - 1) + map.ypos <- 0.08 + map.height <- 0.19 + map.ysep <- 0.015 + + par(fig=c(map.xpos, map.xpos + map.width, map.ypos + 3*map.height + 3*map.ysep, map.ypos + 4*map.height + 3*map.ysep), new=TRUE) + PlotEquiMap2(rescale(map1,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map1), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, xlabel.dist=1.5, contours.lty="F1FF1F") + par(fig=c(map.xpos, map.xpos + map.width, map.ypos + 2*map.height + 2*map.ysep, map.ypos + 3*map.height + 2*map.ysep), new=TRUE) + PlotEquiMap2(rescale(map2,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map2), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F") + par(fig=c(map.xpos, map.xpos + map.width, map.ypos + map.height + map.ysep, map.ypos + 2*map.height + map.ysep), new=TRUE) + PlotEquiMap2(rescale(map3,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map3), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F") + par(fig=c(map.xpos, map.xpos + map.width, map.ypos, map.ypos + map.height), new=TRUE) + PlotEquiMap2(rescale(map4,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map4), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F") + + # Sheet subtitles: + par(fig=c(map.xpos, map.xpos + map.width, 0.86, 0.90), new=TRUE) + if(n.map < 8) { + mtext(paste0(my.month.short[p], " --> ", my.month.short[forecast.month]), cex=5.5, font=2) + } else{ + mtext(paste0(rean.name," ", my.month.short[forecast.month]), cex=5.5, font=2) + } + + # Regime's names: + title1.xpos <- 0 + title1.width <- 0.04 + par(fig=c(title1.xpos, title1.xpos + title1.width, 0.7905, 0.7955), new=TRUE) + mtext(paste0(regime1.name), font=2, cex=4) + par(fig=c(title1.xpos, title1.xpos + title1.width, 0.5855, 0.5905), new=TRUE) + mtext(paste0(regime2.name), font=2, cex=4) + par(fig=c(title1.xpos, title1.xpos + title1.width, 0.3805, 0.3955), new=TRUE) + mtext(paste0(regime3.name), font=2, cex=4) + par(fig=c(title1.xpos, title1.xpos + title1.width, 0.1755, 0.1805), new=TRUE) + mtext(paste0(regime4.name), font=2, cex=4) + + # Color legend: + legend1.xpos <- 0.10 + legend1.width <- 0.80 + legend1.cex <- 4 + + par(fig=c(legend1.xpos, legend1.xpos + legend1.width, 0, 0.065), new=TRUE) # syntax fig=c(xmin, xmax, ymin, ymax) + ColorBar2(brks=my.brks, cols=my.cols, vert=FALSE, cex=legend1.cex, label.dist=2, my.ticks=-0.5 + 1:21, my.labels=my.brks) + par(fig=c(legend1.xpos + legend1.width - 0.02, legend1.xpos + legend1.width + 0.05, 0, 0.02), new=TRUE) # syntax fig=c(xmin, xmax, ymin, ymax) + mtext("hPa", cex=legend1.cex*1.3) + + } # close if on composition == 'fre' + + + # fre1, fre2, etc. already refer to the regimes listed in the 'orden' vector: + if(fields.name == forecast.name && composition == 'simple') save(orden, fre1.NA, fre2.NA, fre3.NA, fre4.NA, freObs1, freObs2, freObs3, freObs4, sp.cor1, sp.cor2, sp.cor3, sp.cor4, persist1, persist2, persist3, persist4, persistObs1, persistObs2, persistObs3, persistObs4, spatial.cor1, spatial.cor2, spatial.cor3, spatial.cor4, file=paste0(workdir,"/",fields.name,"_", my.period[p],"_leadmonth_",lead.month,"_","ClusterNames",".RData")) + +} # close 'p' on 'period' + +if(as.pdf) dev.off() # for saving a single pdf with all months/seasons + +} # close 'lead.month' on 'lead.months' + +if(composition == "psl") dev.off() + + + + + + + + +# Summary graphs: +if(fields.name == forecast.name && p == 100){ + # array storing correlations in the format: [startdate, leadmonth, regime] + array.cor <- array.sp.cor <- array.freq <- array.diff.freq <- array.rpss <- array.pers <- array.diff.pers <- array(NA,c(12,7,4)) + + for(p in 1:12){ + p.orig <- p + + for(l in 0:6){ + + load(file=paste0(workdir,"/",fields.name,"_",my.period[p],"_leadmonth_",l,"_","ClusterNames",".RData")) # load cluster names and summarized data + + if(var.num != var.num.orig) var.num <- var.num.orig # ripristinate original values of this script (necessary after loading regime data) + if(fields.name != fields.name.orig) fields.name <- fields.name.orig # ripristinate original values of this script + if(p != p.orig) p <- p.orig + + array.sp.cor[p,1+l, 1] <- spatial.cor1 # associates NAO+ to the first triangle (at left) + array.sp.cor[p,1+l, 3] <- spatial.cor2 # associates NAO- to the third triangle (at right) + array.sp.cor[p,1+l, 4] <- spatial.cor3 # associates Blocking to the fourth triangle (top one) + array.sp.cor[p,1+l, 2] <- spatial.cor4 # associates Atl.Ridge to the second triangle (bottom one) + + array.cor[p,1+l, 1] <- sp.cor1 # associates NAO+ to the first triangle (at left) + array.cor[p,1+l, 3] <- sp.cor2 # associates NAO- to the third triangle (at right) + array.cor[p,1+l, 4] <- sp.cor3 # associates Blocking to the fourth triangle (top one) + array.cor[p,1+l, 2] <- sp.cor4 # associates Atl.Ridge to the second triangle (bottom one) + + array.freq[p,1+l, 1] <- 100*(mean(fre1.NA)) # NAO+ + array.freq[p,1+l, 3] <- 100*(mean(fre2.NA)) # NAO- + array.freq[p,1+l, 4] <- 100*(mean(fre3.NA)) # Blocking + array.freq[p,1+l, 2] <- 100*(mean(fre4.NA)) # Atl.Ridge + + array.diff.freq[p,1+l, 1] <- 100*(mean(fre1.NA) - mean(freObs1)) # NAO+ + array.diff.freq[p,1+l, 3] <- 100*(mean(fre2.NA) - mean(freObs2)) # NAO- + array.diff.freq[p,1+l, 4] <- 100*(mean(fre3.NA) - mean(freObs3)) # Blocking + array.diff.freq[p,1+l, 2] <- 100*(mean(fre4.NA) - mean(freObs4)) # Atl.Ridge + + array.pers[p,1+l, 1] <- persist1 # NAO+ + array.pers[p,1+l, 3] <- persist2 # NAO- + array.pers[p,1+l, 4] <- persist3 # Blocking + array.pers[p,1+l, 2] <- persist4 # Atl.Ridge + + array.diff.pers[p,1+l, 1] <- persist1 - persistObs1 # NAO+ + array.diff.pers[p,1+l, 3] <- persist2 - persistObs2 # NAO- + array.diff.pers[p,1+l, 4] <- persist3 - persistObs3 # Blocking + array.diff.pers[p,1+l, 2] <- persist4 - persistObs4 # Atl.Ridge + + #array.rpss[p,1+l, 1] <- # NAO+ + #array.rpss[p,1+l, 3] <- # NAO- + #array.rpss[p,1+l, 4] <- # Blocking + #array.rpss[p,1+l, 2] <- # Atl.Ridge + } + } + + + # Spatial Corr summary: + png(filename=paste0(workdir,"/",forecast.name,"_summary_sp_corr.png"), width=550, height=400) + + # create an array similar to array.cor but with colors instead of corr.values: + sp.corr.cols <- rev(c('#b2182b','#d6604d','#f4a582','#fddbc7','#d1e5f0','#92c5de','#4393c3','#2166ac')) + my.seq <- c(-1,0,0.4,0.5,0.6,0.7,0.8,0.9,1) + + array.sp.cor.colors <- array(sp.corr.cols[8],c(12,7,4)) + array.sp.cor.colors[array.sp.cor < my.seq[8]] <- sp.corr.cols[7] + array.sp.cor.colors[array.sp.cor < my.seq[7]] <- sp.corr.cols[6] + array.sp.cor.colors[array.sp.cor < my.seq[6]] <- sp.corr.cols[5] + array.sp.cor.colors[array.sp.cor < my.seq[5]] <- sp.corr.cols[4] + array.sp.cor.colors[array.sp.cor < my.seq[4]] <- sp.corr.cols[3] + array.sp.cor.colors[array.sp.cor < my.seq[3]] <- sp.corr.cols[2] + array.sp.cor.colors[array.sp.cor < my.seq[2]] <- sp.corr.cols[1] + + par(mar=c(5,4,4,4.5)) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Forecast time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + title("Spatial correlation between S4 and ERA-Interim frequencies", cex=1.5) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Forecast time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5) + + for(p in 1:12){ + for(l in 0:6){ + polygon(c(0.5+p-1,0.5+p-1,1+p-1),c(-0.5+l,0.5+l,0+l), col=array.sp.cor.colors[p,1+l,1]) # first triangle + polygon(c(0.5+p-1,1+p-1,1.5+p-1),c(-0.5+l,0+l,-0.5+l), col=array.sp.cor.colors[p,1+l,2]) + polygon(c(1+p-1,1.5+p-1,1.5+p-1),c(l,0.5+l,-0.5+l), col=array.sp.cor.colors[p,1+l,3]) + polygon(c(0.5+p-1,1+p-1,1.5+p-1),c(0.5+l,0+l,0.5+l), col=array.sp.cor.colors[p,1+l,4]) + } + } + + par(fig=c(0.875, 1, 0.14, 0.89), new=TRUE) + ColorBar2(brks = my.seq, cols = sp.corr.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + dev.off() + + + + # Corr summary: + png(filename=paste0(workdir,"/",forecast.name,"_summary_corr.png"), width=550, height=400) + + # create an array similar to array.cor but with colors instead of corr.values: + corr.cols <- rev(c('#b2182b','#d6604d','#f4a582','#fddbc7','#d1e5f0','#92c5de','#4393c3','#2166ac')) + + array.cor.colors <- array(corr.cols[8],c(12,7,4)) + array.cor.colors[array.cor < 0.75] <- corr.cols[7] + array.cor.colors[array.cor < 0.5] <- corr.cols[6] + array.cor.colors[array.cor < 0.25] <- corr.cols[5] + array.cor.colors[array.cor < 0] <- corr.cols[4] + array.cor.colors[array.cor < -0.25] <- corr.cols[3] + array.cor.colors[array.cor < -0.5] <- corr.cols[2] + array.cor.colors[array.cor < -0.75] <- corr.cols[1] + + par(mar=c(5,4,4,4.5)) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Forecast time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + title("Correlation between S4 and ERA-Interim frequencies", cex=1.5) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Forecast time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5) + + for(p in 1:12){ + for(l in 0:6){ + polygon(c(0.5+p-1,0.5+p-1,1+p-1),c(-0.5+l,0.5+l,0+l), col=array.cor.colors[p,1+l,1]) # first triangle + polygon(c(0.5+p-1,1+p-1,1.5+p-1),c(-0.5+l,0+l,-0.5+l), col=array.cor.colors[p,1+l,2]) + polygon(c(1+p-1,1.5+p-1,1.5+p-1),c(l,0.5+l,-0.5+l), col=array.cor.colors[p,1+l,3]) + polygon(c(0.5+p-1,1+p-1,1.5+p-1),c(0.5+l,0+l,0.5+l), col=array.cor.colors[p,1+l,4]) + } + } + + par(fig=c(0.875, 1, 0.14, 0.89), new=TRUE) + ColorBar2(brks = seq(-1,1,0.25), cols = corr.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(seq(-1,1,0.25)), my.labels=seq(-1,1,0.25)) + dev.off() + + + + # Freq. summary: + png(filename=paste0(workdir,"/",forecast.name,"_summary_freq.png"), width=550, height=400) + + # create an array similar to array.diff.freq but with colors instead of frequencies: + freq.cols <- rev(c('#d73027','#f46d43','#fdae61','#fee090','#e0f3f8','#abd9e9','#74add1','#4575b4')) + my.seq <- c(NA,20,22,24,26,28,30,32,NA) + + array.freq.colors <- array(freq.cols[8],c(12,7,4)) + array.freq.colors[array.freq < my.seq[[8]]] <- freq.cols[7] + array.freq.colors[array.freq < my.seq[[7]]] <- freq.cols[6] + array.freq.colors[array.freq < my.seq[[6]]] <- freq.cols[5] + array.freq.colors[array.freq < my.seq[[5]]] <- freq.cols[4] + array.freq.colors[array.freq < my.seq[[4]]] <- freq.cols[3] + array.freq.colors[array.freq < my.seq[[3]]] <- freq.cols[2] + array.freq.colors[array.freq < my.seq[[2]]] <- freq.cols[1] + + par(mar=c(5,4,4,4.5)) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Forecast time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + title("Mean S4 frequency (%)", cex=1.5) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Forecast time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5) + + for(p in 1:12){ + for(l in 0:6){ + polygon(c(0.5+p-1,0.5+p-1,1+p-1),c(-0.5+l,0.5+l,0+l), col=array.freq.colors[p,1+l,1]) # first triangle + polygon(c(0.5+p-1,1+p-1,1.5+p-1),c(-0.5+l,0+l,-0.5+l), col=array.freq.colors[p,1+l,2]) + polygon(c(1+p-1,1.5+p-1,1.5+p-1),c(l,0.5+l,-0.5+l), col=array.freq.colors[p,1+l,3]) + polygon(c(0.5+p-1,1+p-1,1.5+p-1),c(0.5+l,0+l,0.5+l), col=array.freq.colors[p,1+l,4]) + } + } + + par(fig=c(0.875, 1, 0.14, 0.89), new=TRUE) + ColorBar2(brks = my.seq, cols = freq.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + dev.off() + + + + # Delta freq. summary: + png(filename=paste0(workdir,"/",forecast.name,"_summary_diff_freq.png"), width=550, height=400) + + # create an array similar to array.diff.freq but with colors instead of frequencies: + diff.freq.cols <- rev(c('#d73027','#f46d43','#fdae61','#fee090','#e0f3f8','#abd9e9','#74add1','#4575b4')) + + array.diff.freq.colors <- array(diff.freq.cols[8],c(12,7,4)) + array.diff.freq.colors[array.diff.freq < 10] <- diff.freq.cols[7] + array.diff.freq.colors[array.diff.freq < 5] <- diff.freq.cols[6] + array.diff.freq.colors[array.diff.freq < 2] <- diff.freq.cols[5] + array.diff.freq.colors[array.diff.freq < 0] <- diff.freq.cols[4] + array.diff.freq.colors[array.diff.freq < -2] <- diff.freq.cols[3] + array.diff.freq.colors[array.diff.freq < -5] <- diff.freq.cols[2] + array.diff.freq.colors[array.diff.freq < -10] <- diff.freq.cols[1] + + par(mar=c(5,4,4,4.5)) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Forecast time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + title("Frequency difference (%) between S4 and ERA-Interim", cex=1.5) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Forecast time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5) + + for(p in 1:12){ + for(l in 0:6){ + polygon(c(0.5+p-1,0.5+p-1,1+p-1),c(-0.5+l,0.5+l,0+l), col=array.diff.freq.colors[p,1+l,1]) # first triangle + polygon(c(0.5+p-1,1+p-1,1.5+p-1),c(-0.5+l,0+l,-0.5+l), col=array.diff.freq.colors[p,1+l,2]) + polygon(c(1+p-1,1.5+p-1,1.5+p-1),c(l,0.5+l,-0.5+l), col=array.diff.freq.colors[p,1+l,3]) + polygon(c(0.5+p-1,1+p-1,1.5+p-1),c(0.5+l,0+l,0.5+l), col=array.diff.freq.colors[p,1+l,4]) + } + } + + par(fig=c(0.875, 1, 0.14, 0.89), new=TRUE) + ColorBar2(brks = c(-20,-10,-5,-2,0,2,5,10,20), cols = diff.freq.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(c(-20,-10,-5,-2,0,2,5,10,20)), my.labels=c(-20,-10,-5,-2,0,2,5,10,20)) + dev.off() + + + # Persistence summary: + png(filename=paste0(workdir,"/",forecast.name,"_summary_pers.png"), width=550, height=400) + + # create an array similar to array.pers but with colors instead of frequencies: + pers.cols <- rev(c('#b2182b','#d6604d','#f4a582','#fddbc7','#d1e5f0','#92c5de','#4393c3','#2166ac')) + my.seq <- c(NA, 3.25, 3.5, 3.75, 4, 4.25, 4.5, 4.75, NA) + + array.pers.colors <- array(pers.cols[8],c(12,7,4)) + array.pers.colors[array.pers < my.seq[8]] <- pers.cols[7] + array.pers.colors[array.pers < my.seq[7]] <- pers.cols[6] + array.pers.colors[array.pers < my.seq[6]] <- pers.cols[5] + array.pers.colors[array.pers < my.seq[5]] <- pers.cols[4] + array.pers.colors[array.pers < my.seq[4]] <- pers.cols[3] + array.pers.colors[array.pers < my.seq[3]] <- pers.cols[2] + array.pers.colors[array.pers < my.seq[2]] <- pers.cols[1] + + par(mar=c(5,4,4,4.5)) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Forecast time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + title("S4 Regime persistence (in days/month)", cex=1.5) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Forecast time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5) + + for(p in 1:12){ + for(l in 0:6){ + polygon(c(0.5+p-1,0.5+p-1,1+p-1),c(-0.5+l,0.5+l,0+l), col=array.pers.colors[p,1+l,1]) # first triangle + polygon(c(0.5+p-1,1+p-1,1.5+p-1),c(-0.5+l,0+l,-0.5+l), col=array.pers.colors[p,1+l,2]) + polygon(c(1+p-1,1.5+p-1,1.5+p-1),c(l,0.5+l,-0.5+l), col=array.pers.colors[p,1+l,3]) + polygon(c(0.5+p-1,1+p-1,1.5+p-1),c(0.5+l,0+l,0.5+l), col=array.pers.colors[p,1+l,4]) + } + } + + par(fig=c(0.875, 1, 0.14, 0.89), new=TRUE) + ColorBar2(brks = my.seq, cols = pers.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + dev.off() + + + + # Delta persistence summary: + png(filename=paste0(workdir,"/",forecast.name,"_summary_diff_pers.png"), width=550, height=400) + + # create an array similar to array.pers but with colors instead of frequencies: + diff.pers.cols <- rev(c('#b2182b','#d6604d','#f4a582','#fddbc7','#d1e5f0','#92c5de','#4393c3','#2166ac')) + my.seq <- c(NA, -1.5, -1, -0.5, 0, 0.5, 1, 1.5, NA) + + array.diff.pers.colors <- array(diff.pers.cols[8],c(12,7,4)) + array.diff.pers.colors[array.diff.pers < my.seq[8]] <- diff.pers.cols[7] + array.diff.pers.colors[array.diff.pers < my.seq[7]] <- diff.pers.cols[6] + array.diff.pers.colors[array.diff.pers < my.seq[6]] <- diff.pers.cols[5] + array.diff.pers.colors[array.diff.pers < my.seq[5]] <- diff.pers.cols[4] + array.diff.pers.colors[array.diff.pers < my.seq[4]] <- diff.pers.cols[3] + array.diff.pers.colors[array.diff.pers < my.seq[3]] <- diff.pers.cols[2] + array.diff.pers.colors[array.diff.pers < my.seq[2]] <- diff.pers.cols[1] + + par(mar=c(5,4,4,4.5)) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Forecast time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + title("Diff. between S4 and ERA-Interim regime persistence (in days/month)", cex=1.5) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Forecast time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5) + + for(p in 1:12){ + for(l in 0:6){ + polygon(c(0.5+p-1,0.5+p-1,1+p-1),c(-0.5+l,0.5+l,0+l), col=array.diff.pers.colors[p,1+l,1]) # first triangle + polygon(c(0.5+p-1,1+p-1,1.5+p-1),c(-0.5+l,0+l,-0.5+l), col=array.diff.pers.colors[p,1+l,2]) + polygon(c(1+p-1,1.5+p-1,1.5+p-1),c(l,0.5+l,-0.5+l), col=array.diff.pers.colors[p,1+l,3]) + polygon(c(0.5+p-1,1+p-1,1.5+p-1),c(0.5+l,0+l,0.5+l), col=array.diff.pers.colors[p,1+l,4]) + } + } + + par(fig=c(0.875, 1, 0.14, 0.89), new=TRUE) + ColorBar2(brks = my.seq, cols = diff.pers.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + dev.off() + + ## # FairRPSS summary: + ## png(filename=paste0(workdir,"/",forecast.name,"_summary_rpss.png"), width=550, height=400) + + ## # create an array similar to array.diff.freq but with colors instead of frequencies: + ## freq.cols <- rev(c('#b2182b','#d6604d','#f4a582','#fddbc7','#d1e5f0','#92c5de','#4393c3','#2166ac')) + + ## array.freq.colors <- array(freq.cols[8],c(12,7,4)) + ## array.freq.colors[array.diff.freq < 10] <- freq.cols[7] + ## array.freq.colors[array.diff.freq < 5] <- freq.cols[6] + ## array.freq.colors[array.diff.freq < 2] <- freq.cols[5] + ## array.freq.colors[array.diff.freq < 0] <- freq.cols[4] + ## array.freq.colors[array.diff.freq < -2] <- freq.cols[3] + ## array.freq.colors[array.diff.freq < -5] <- freq.cols[2] + ## array.freq.colors[array.diff.freq < -10] <- freq.cols[1] + + ## par(mar=c(5,4,4,4.5)) + ## plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Forecast time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + ## title("Frequency difference (%) between S4 and ERA-Interim", cex=1.5) + ## mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + ## mtext(side = 2, text = "Forecast time", line = 2.5, cex=1.5) + ## axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + ## axis(2, at=seq(0,6), las=2, cex.axis=1.5) + + ## for(p in 1:12){ + ## for(l in 0:6){ + ## polygon(c(0.5+p-1,0.5+p-1,1+p-1),c(-0.5+l,0.5+l,0+l), col=array.freq.colors[p,1+l,1]) # first triangle + ## polygon(c(0.5+p-1,1+p-1,1.5+p-1),c(-0.5+l,0+l,-0.5+l), col=array.freq.colors[p,1+l,2]) + ## polygon(c(1+p-1,1.5+p-1,1.5+p-1),c(l,0.5+l,-0.5+l), col=array.freq.colors[p,1+l,3]) + ## polygon(c(0.5+p-1,1+p-1,1.5+p-1),c(0.5+l,0+l,0.5+l), col=array.freq.colors[p,1+l,4]) + ## } + ## } + + ## par(fig=c(0.875, 1, 0.14, 0.89), new=TRUE) + ## ColorBar2(brks = c(-20,-10,-5,-2,0,2,5,10,20), cols = freq.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(c(-20,-10,-5,-2,0,2,5,10,20)), my.labels=c(-20,-10,-5,-2,0,2,5,10,20)) + ## dev.off() + +} + + + + + + + +# impact map of the stronger WR: +if(fields.name == rean.name && p == 100){ + + var.num <- 1 # choose a variable (1:sfcWind, 2:tas) + + png(filename=paste0(workdir,"/",rean.name,"_",var.name[var.num],"_most_influent.png"), width=500, height=600) + + plot.new() + #par(mfrow=c(4,3)) + col.regimes <- c("indianred1","cyan","khaki","lightpink") + + for(p in 13:16){ # or 1:12 for monthly maps + load(file=paste0(rean.dir,"/",rean.name,"_",var.name[var.num],"_",my.period[p],"_psl.RData")) + load(paste0(rean.dir,"/",rean.name,"_ClusterNames.RData")) # they should not depend on var.name!!! + + # position of long values of Europe only (without the Atlantic Sea and America): + #if(fields.name == "ERA-Interim") EU <- c(1:which(lon >= lon.max)[1], (length(lon)-30):length(lon)) # restrict area to continental europe only + lon.max = 45 + EU <- c(1:which(lon >= lon.max)[1], (which(lon >= 337)[1]:length(lon))) # restrict area to continental europe only + + + cluster1 <- which(orden == cluster1.name.period[p]) # in future it will be == cluster1.name + cluster2 <- which(orden == cluster2.name.period[p]) + cluster3 <- which(orden == cluster3.name.period[p]) + cluster4 <- which(orden == cluster4.name.period[p]) + + assign(paste0("imp",cluster1), varPeriodAnom1mean) + assign(paste0("imp",cluster2), varPeriodAnom2mean) + assign(paste0("imp",cluster3), varPeriodAnom3mean) + assign(paste0("imp",cluster4), varPeriodAnom4mean) + + imp.all <- imp1 + for(i in 1:dim(imp1)[1]){ + for(j in 1:dim(imp1)[2]){ + if(abs(imp1[i,j]) >= max(abs(imp1[i,j]), abs(imp2[i,j]), abs(imp3[i,j]), abs(imp4[i,j]))) imp.all[i,j] <- 1.5 + if(abs(imp2[i,j]) >= max(abs(imp1[i,j]), abs(imp2[i,j]), abs(imp3[i,j]), abs(imp4[i,j]))) imp.all[i,j] <- 2.5 + if(abs(imp3[i,j]) >= max(abs(imp1[i,j]), abs(imp2[i,j]), abs(imp3[i,j]), abs(imp4[i,j]))) imp.all[i,j] <- 3.5 + if(abs(imp4[i,j]) >= max(abs(imp1[i,j]), abs(imp2[i,j]), abs(imp3[i,j]), abs(imp4[i,j]))) imp.all[i,j] <- 4.5 + } + } + + if(p<=3) yMod <- 0.7 + if(p>=4 && p<=6) yMod <- 0.5 + if(p>=7 && p<=9) yMod <- 0.3 + if(p>=10) yMod <- 0.1 + if(p==13 || p==14) yMod <- 0.5 + if(p==15 || p==16) yMod <- 0.1 + + if(p==1 || p==4 || p==7 || p==10) xMod <- 0.05 + if(p==2 || p==5 || p==8 || p==11) xMod <- 0.35 + if(p==3 || p==6 || p==9 || p==12) xMod <- 0.65 + if(p==13 || p==15) xMod <- 0.05 + if(p==14 || p==16) xMod <- 0.50 + + # impact map: + if(p <= 12) par(fig=c(xMod, xMod + 0.3, yMod, yMod + 0.17), new=TRUE) + if(p > 12) par(fig=c(xMod, xMod + 0.45, yMod, yMod + 0.38), new=TRUE) + PlotEquiMap(imp.all[,EU], lon[EU], lat, filled.continents = FALSE, brks=1:5, cols=col.regimes, axelab=F, drawleg=F) + + # title map: + if(p <= 12) par(fig=c(xMod, xMod + 0.3, yMod + 0.162, yMod + 0.172), new=TRUE) + if(p > 12) par(fig=c(xMod + 0.07, xMod + 0.07 + 0.3, yMod +0.21 + 0.162, yMod +0.21 + 0.172), new=TRUE) + mtext(my.period[p], font=2, cex=.9) + + } # close 'p' on 'period' + + par(fig=c(0.1,0.9, 0, 0.12), new=TRUE) + ColorBar2(1:5, cols=col.regimes, vert=FALSE, my.ticks=1:4, draw.ticks=FALSE, my.labels=orden) + + par(fig=c(0.1,0.9, 0.9, 0.95), new=TRUE) + mtext(paste0("Most influent WR on ",var.name[var.num]), font=2, cex=1.5) + + dev.off() +} + + + + + + + + + + + + + + + + + + diff --git a/old/weather_regimes_maps_v14.R b/old/weather_regimes_maps_v14.R new file mode 100644 index 0000000000000000000000000000000000000000..c8780bcc305cb7bead71dcf3d9b86ce309cae1b9 --- /dev/null +++ b/old/weather_regimes_maps_v14.R @@ -0,0 +1,1334 @@ + +# Creation: 6/2016 +# Author: Nicola Cortesi +# +# Aim: to visualize the regime anomalies of the weather regimes, their interannual frequencies and their impact on a chosen cliamte variable (i.e: wind speed or temperature) +# +# I/O: the only input file needed is the output of the weather_regimes.R script, which ends with the suffix "_mapdata.RData". +# Output files are the maps, with the user can generate as .png or as .pdf +# +# Assumptions: If your regimes derive from a reanalysis, this script must be run twice: once, with the option 'ordering = F' and 'save.names = T' to create a .pdf or .png +# file that the user must open to find visually the order by which the four regimes are stored inside the four clusters. For example, he can find that the +# sequence of the images in the file (from top to down) corresponds to: Blocking / NAO- / Atl.Ridge / NAO+ regime. Subsequently, he has to change 'ordering' +# from F to T and set the four variables 'clusterX.name' (X=1..4) to follow the same order found inside that file. +# The second time, you have to run this script only to get the maps in the right order, which by default is, from top to down: +# "NAO+","NAO-","Blocking" and "Atl.Ridge" (you can change it with the variable 'orden'). You also have to set 'save.names' to TRUE, so an .RData file +# is written to store the order of the four regimes, in case in future a user needs to visualize these maps again. In this case, the user must set +# 'ordering = TRUE' and 'save.names = FALSE' to be able to visualize the maps in the correct order. +# +# If your regimes derive from a forecast system, you have to compute before the regimes derived from a reanalysis. Then, you can associate the reanalysis +# to the forecast system, to employ it to automatically aussociate to each simulated cluster one of the +# observed regimes, the one with the highest spatial correlation. Beware that the two maps of regime anomalies must have the same spatial resolution! +# +# Branch: weather_regimes + +rm(list=ls()) +library(s2dverification) # for the function Load() +library(Kendall) # for the MannKendall test + +source('/home/Earth/ncortesi/Downloads/scripts/Rfunctions.R') # for the calendar functions + +# working dir with the input files with '_psl.RData' suffix: +workdir <- "/scratch/Earth/ncortesi/RESILIENCE/Regimes/42_ECMWF_S4_monthly_1981-2015_LOESS_filter" + +rean.name <- "ERA-Interim" # reanalysis name (if input data comes from a reanalysis) +forecast.name <- "ECMWF-S4" # forecast system name (if input data comes from a forecast system) + +fields.name <- forecast.name # Choose between loading reanalysis ('rean.name') or forecasts ('forecast.name') + +var.num <- 1 # if fields.name ='rean.name', Choose a variable for the impact maps 1: sfcWind 2: tas + +period <- 1:12 # Choose one or more periods to plot from 1 to 17 (1-12: Jan - Dec, 13-16: Winter, Spring, Summer, Autumn, 17: Year). + # You need to have created before the '_mapdata.RData' file with the output of 'weather_regimes'.R for that period. +lead.months <- 0:6 # in case you selected 'fields.name = forecast.name', specify a leadtime (0 = same month of the start month) to plot + # (and variable 'period' becomes the start month) + +# run it twice: once, with 'ordering <- FALSE' and 'save.names <- TRUE' to look only at the cartography and find visually the right regime names of the four clusters; then, +# insert the regime names in the correct order below and run this script a the second time with 'ordering = TRUE' and 'save.names = TRUE', to save the ordered cartography. +# after that, any time you need to run this script, run it with 'ordering = TRUE and 'save.names = FALSE', not to overwrite the file which stores the cluster order: +ordering <- T # If TRUE, order the clusters following the regimes listed in the 'orden' vector (set it to TRUE only after you manually inserted the names below). +save.names <- F # if TRUE, also save the cluster names (i.e: the association between clusters and weather regimes); if FALSE, the cluster names are loaded from a file + +as.pdf <- F # FALSE: save results as one .png file for each season/month, TRUE: save only one .pdf file with all seasons/months + +composition <- "none" # choose which kind of composition you want to plot: + # - 'simple' for monthly graphs of regime anomalies / impact / interannual frequencies in three columns + # - 'psl' for all the regime anomalies for a fixed forecast month + # - 'fre' for all the interannual frequencies for a fixed forecast month + # - 'summary' for the summary plots of all forecast months (persistence bias, frequency bias, correlations, etc.) [You need all ClusterNames files] + # - 'impact' for all the impact plot of the regime with the highest impact + # - 'none' doesn't plot anything, it only saves the ClusterName.Rdata files + +forecast.month <- 11 # in case composition = 'psl' or 'fre' or 'impact', choose a forecast month from 1 to 12 to plot its composition + +# if fields.name='forecast.name', set the subdir of 'workdir' where the weather regimes computed with a reanalysis are stored: +rean.dir <- "/scratch/Earth/ncortesi/RESILIENCE/Regimes/41_ERA-Interim_monthly_1981-2015_LOESS_filter" + +# Associates the regimes to the four cluster: +cluster1.name <- "NAO+" +cluster2.name <- "NAO-" +cluster4.name <- "Blocking" +cluster3.name <- "Atl.Ridge" + +# choose an order to give to the cartograpy, from top to bottom: +orden <- c("NAO+","NAO-","Blocking","Atl.Ridge") + +################################################################################################################################################################### + +if(fields.name != rean.name && fields.name != forecast.name) stop("variable 'fields.name' is not properly set") +regime1.name <- orden[1] +regime2.name <- orden[2] +regime3.name <- orden[3] +regime4.name <- orden[4] + +var.name <- c("sfcWind","tas") +var.name.full <- c("Wind Speed","Temperature") # full name of the 'predictand' variable to put in the title of the graphs +var.unit <- c("m/s", "ºC") # unit of measure of var (for drawing color scales) +var.num.orig <- var.num # loading _psl files, this value can change! +fields.name.orig <- fields.name +period.orig <- period +workdir.orig <- workdir +pdf.suffix <- ifelse(length(period)==1, my.period[period], "all_periods") +n.map <- 0 + +if(composition != "summary" && composition != "impact"){ + + if(composition == "psl" || composition == "fre") { + if(as.pdf && fields.name == forecast.name) pdf(file=paste0(workdir,"/",fields.name,"_",pdf.suffix,"_forecast_month_",forecast.month,"_",composition,".pdf"),width=110,height=60) + if(!as.pdf && fields.name == forecast.name) png(filename=paste0(workdir,"/",fields.name,"_forecast_month_",forecast.month,"_",composition,".png"),width=6000,height=2300) + + lead.months <- c(6:0,0) + plot.new() + + # Sheet title: + par(fig=c(0, 1, 0.94, 0.98), new=TRUE) + if(composition == "psl") mtext(paste0(fields.name, " simulated Weather Regimes for ", month.name[forecast.month]), cex=5.5, font=2) + if(composition == "fre") mtext(paste0(fields.name, " simulated interannual frequencies for ", month.name[forecast.month]), cex=5.5, font=2) + } + + for(lead.month in lead.months){ + #lead.month=lead.months[1] # for the debug + + if(composition == "simple"){ + if(as.pdf && fields.name == rean.name) pdf(file=paste0(workdir,"/",fields.name,"_",var.name[var.num],"_",pdf.suffix,".pdf"),width=40, height=60) + if(as.pdf && fields.name == forecast.name) pdf(file=paste0(workdir,"/",fields.name,"_",var.name[var.num],"_",pdf.suffix,"_leadmonth_",lead.month,".pdf"),width=40,height=60) + } + + if(composition == 'psl' || composition == 'fre') { + period <- forecast.month - lead.month + if(period < 1) period <- period + 12 + } + + for(p in period){ + #p <- period[1] # for the debug + p.orig <- p + + # load regime data (for forecasts, it load only the data corresponding to the chosen start month p and lead month 'lead.month':: + if(fields.name == rean.name || (fields.name == forecast.name && lead.month == 0 || n.map == 7)) { + load(file=paste0(rean.dir,"/",rean.name,"_",my.period[p],"_","psl",".RData")) + if(var.num != var.num.orig) var.num <- var.num.orig # ripristinate original values of this script (necessary after loading regime data) + if(fields.name != fields.name.orig) fields.name <- fields.name.orig # ripristinate original values of this script + if(workdir != workdir.orig) workdir <- workdir.orig # ripristinate original values of this script + + load(file=paste0(rean.dir,"/",rean.name,"_",my.period[p],"_",var.name[var.num],".RData")) + } + + if(fields.name == forecast.name && n.map < 7){ + load(file=paste0(workdir,"/",fields.name,"_",my.period[p],"_leadmonth_",lead.month,"_","psl",".RData")) # load cluster time series and mean psl data + #load(file=paste0(workdir,"/",fields.name,"_",my.period[p],"_leadmonth_",lead.month,"_",var.name[var.num],".RData")) # load mean var data + + #if(var.num != var.num.orig) var.num <- var.num.orig # ripristinate original values of this script (necessary after loading regime data) + #if(fields.name != fields.name.orig) fields.name <- fields.name.orig # ripristinate original values of this script + + } + + if(var.num != var.num.orig) var.num <- var.num.orig # ripristinate original values of this script (necessary after loading regime data) + if(fields.name != fields.name.orig) fields.name <- fields.name.orig # ripristinate original values of this script + if(workdir != workdir.orig) workdir <- workdir.orig # ripristinate original values of this script + if(p != p.orig) p <- p.orig + + if(fields.name == forecast.name && n.map < 7){ + + # compute the simulated interannual monthly variability: + np <- n.days.in.a.period(p,2001) + + freq.sim1 <- apply(my.cluster.array[[p]] == 1, c(2,3), sum) + freq.sim2 <- apply(my.cluster.array[[p]] == 2, c(2,3), sum) + freq.sim3 <- apply(my.cluster.array[[p]] == 3, c(2,3), sum) + freq.sim4 <- apply(my.cluster.array[[p]] == 4, c(2,3), sum) + + sd.freq.sim1 <- sd(freq.sim1) / np + sd.freq.sim2 <- sd(freq.sim2) / np + sd.freq.sim3 <- sd(freq.sim3) / np + sd.freq.sim4 <- sd(freq.sim4) / np + + # compute cluster persistence: + my.cluster.vector <- as.vector(my.cluster.array[[p]]) # reduce it to a vector with the order: day1month1member1 , day2month1member1, ... , day1month2member1, day2month2member1, ,,, + + sequ1 <- sequ2 <- sequ3 <- sequ4 <- rep(0,10000) + i1 <- i2 <- i3 <- i4 <- 1 + + if(my.cluster.vector[1] != 1) i1 <- 0 + if(my.cluster.vector[1] == 1) sequ1[i1] <- sequ1[i1]+1 + if(my.cluster.vector[1] != 2) i2 <- 0 + if(my.cluster.vector[1] == 2) sequ2[i2] <- sequ2[i2]+1 + if(my.cluster.vector[1] != 3) i3 <- 0 + if(my.cluster.vector[1] == 3) sequ3[i3] <- sequ3[i3]+1 + if(my.cluster.vector[1] != 4) i4 <- 0 + if(my.cluster.vector[1] == 4) sequ4[i4] <- sequ4[i4]+1 + n.dd <- length(my.cluster.vector) + for(d in 1:(n.dd-1)){ + if(my.cluster.vector[d] != 1 && my.cluster.vector[d+1] == 1) i1 <- i1+1 + if(my.cluster.vector[d] == 1) sequ1[i1] <- sequ1[i1]+1 + + if(my.cluster.vector[d] != 2 && my.cluster.vector[d+1] == 2) i2 <- i2+1 + if(my.cluster.vector[d] == 2) sequ2[i2] <- sequ2[i2]+1 + + if(my.cluster.vector[d] != 3 && my.cluster.vector[d+1] == 3) i3 <- i3+1 + if(my.cluster.vector[d] == 3) sequ3[i3] <- sequ3[i3]+1 + + if(my.cluster.vector[d] != 4 && my.cluster.vector[d+1] == 4) i4 <- i4+1 + if(my.cluster.vector[d] == 4) sequ4[i4] <- sequ4[i4]+1 + } + + if(my.cluster.vector[n.dd] == 1) sequ1[i1] <- sequ1[i1]+1 + if(my.cluster.vector[n.dd] == 2) sequ2[i2] <- sequ2[i2]+1 + if(my.cluster.vector[n.dd] == 3) sequ3[i3] <- sequ3[i3]+1 + if(my.cluster.vector[n.dd] == 4) sequ4[i4] <- sequ4[i4]+1 + + pers1 <- mean(sequ1[which(sequ1 != 0)]) + pers2 <- mean(sequ2[which(sequ2 != 0)]) + pers3 <- mean(sequ3[which(sequ3 != 0)]) + pers4 <- mean(sequ4[which(sequ4 != 0)]) + + # compute correlations between simulated clusters and observed regime's anomalies: + cluster1.sim <- pslwr1mean; cluster2.sim <- pslwr2mean; cluster3.sim <- pslwr3mean; cluster4.sim <- pslwr4mean + lat.forecast <- lat; lon.forecast <- lon + + if((p + lead.month) > 12) { load.month <- p + lead.month - 12 } else { load.month <- p + lead.month } # reanalysis forecast month to load + load(file=paste0(rean.dir,"/",rean.name,"_",my.period[load.month],"_","psl",".RData")) # load reanalysis data, which overwrities the pslwrXmean variables + load(paste0(rean.dir,"/",rean.name,"_", my.period[load.month],"_","ClusterNames",".RData")) # load also reanalysis regime names + + if(var.num != var.num.orig) var.num <- var.num.orig # ripristinate original values of this script + if(fields.name != fields.name.orig) fields.name <- fields.name.orig # ripristinate original values of this script + if(!identical(period.orig,period)) period <- period.orig + if(workdir != workdir.orig) workdir <- workdir.orig # ripristinate original values of this script + if(p.orig != p) p <- p.orig + + # compute the observed interannual monthly variability: + freq.obs1 <- freq.obs2 <- freq.obs3 <- freq.obs4 <- c() + + np <- n.days.in.a.period(p,2001) + for(y in year.start:year.end){ + # for both reanalysis and S4, the time order is always: year1day1, year1day2, ..., year1day31, year2day1, year2day2, ..., year2day28, etc, + freq.obs1[y-year.start+1] <- length(which(my.cluster[[load.month]]$cluster[(y-year.start)*np + (1:np)] == 1)) + freq.obs2[y-year.start+1] <- length(which(my.cluster[[load.month]]$cluster[(y-year.start)*np + (1:np)] == 2)) + freq.obs3[y-year.start+1] <- length(which(my.cluster[[load.month]]$cluster[(y-year.start)*np + (1:np)] == 3)) + freq.obs4[y-year.start+1] <- length(which(my.cluster[[load.month]]$cluster[(y-year.start)*np + (1:np)] == 4)) + } + + np <- n.days.in.a.period(p,2001) + + sd.freq.obs1 <- sd(freq.obs1) / np + sd.freq.obs2 <- sd(freq.obs2) / np + sd.freq.obs3 <- sd(freq.obs3) / np + sd.freq.obs4 <- sd(freq.obs4) / np + + wr1y.obs <- wr1y; wr2y.obs <- wr2y; wr3y.obs <- wr3y; wr4y.obs <- wr4y # observed frecuencies of the regimes + + regimes.obs <- c(cluster1.name, cluster2.name, cluster3.name, cluster4.name) + + if(length(unique(regimes.obs)) < 4) stop("Two or more names of the weather types in the reanalsyis input file are repeated!") + + if(identical(rev(lat),lat.forecast)) {lat.forecast <- rev(lat.forecast); print("Warning: forecast latitudes are in the opposite order than renalysis's")} + if(!identical(lat, lat.forecast)) stop("forecast latitudes are different from reanalysis latitudes!") + if(!identical(lon, lon.forecast)) stop("forecast longitude are different from reanalysis longitudes!") + + cluster.corr <- array(NA, c(4,4), dimnames=list(c("cluster1.sim","cluster2.sim","cluster3.sim","cluster4.sim"), regimes.obs)) + cluster.corr[1,1] <- cor(as.vector(cluster1.sim), as.vector(pslwr1mean)) + cluster.corr[1,2] <- cor(as.vector(cluster1.sim), as.vector(pslwr2mean)) + cluster.corr[1,3] <- cor(as.vector(cluster1.sim), as.vector(pslwr3mean)) + cluster.corr[1,4] <- cor(as.vector(cluster1.sim), as.vector(pslwr4mean)) + cluster.corr[2,1] <- cor(as.vector(cluster2.sim), as.vector(pslwr1mean)) + cluster.corr[2,2] <- cor(as.vector(cluster2.sim), as.vector(pslwr2mean)) + cluster.corr[2,3] <- cor(as.vector(cluster2.sim), as.vector(pslwr3mean)) + cluster.corr[2,4] <- cor(as.vector(cluster2.sim), as.vector(pslwr4mean)) + cluster.corr[3,1] <- cor(as.vector(cluster3.sim), as.vector(pslwr1mean)) + cluster.corr[3,2] <- cor(as.vector(cluster3.sim), as.vector(pslwr2mean)) + cluster.corr[3,3] <- cor(as.vector(cluster3.sim), as.vector(pslwr3mean)) + cluster.corr[3,4] <- cor(as.vector(cluster3.sim), as.vector(pslwr4mean)) + cluster.corr[4,1] <- cor(as.vector(cluster4.sim), as.vector(pslwr1mean)) + cluster.corr[4,2] <- cor(as.vector(cluster4.sim), as.vector(pslwr2mean)) + cluster.corr[4,3] <- cor(as.vector(cluster4.sim), as.vector(pslwr3mean)) + cluster.corr[4,4] <- cor(as.vector(cluster4.sim), as.vector(pslwr4mean)) + + # associate each simulated cluster to one observed regime: + max1 <- which(cluster.corr == max(cluster.corr), arr.ind=T) + cluster.corr2 <- cluster.corr + cluster.corr2[max1[1],] <- -999 # set this row to negative values so it won't be found as a maximum anymore + cluster.corr2[,max1[2]] <- -999 # set this column to negative values so it won't be found as a maximum anymore + max2 <- which(cluster.corr2 == max(cluster.corr2), arr.ind=T) + cluster.corr3 <- cluster.corr2 + cluster.corr3[max2[1],] <- -999 + cluster.corr3[,max2[2]] <- -999 + max3 <- which(cluster.corr3 == max(cluster.corr3), arr.ind=T) + cluster.corr4 <- cluster.corr3 + cluster.corr4[max3[1],] <- -999 + cluster.corr4[,max3[2]] <- -999 + max4 <- which(cluster.corr4 == max(cluster.corr4), arr.ind=T) + + seq.max1 <- c(max1[1],max2[1],max3[1],max4[1]) + seq.max2 <- c(max1[2],max2[2],max3[2],max4[2]) + + cluster1.regime <- seq.max2[which(seq.max1 == 1)] + cluster2.regime <- seq.max2[which(seq.max1 == 2)] + cluster3.regime <- seq.max2[which(seq.max1 == 3)] + cluster4.regime <- seq.max2[which(seq.max1 == 4)] + + cluster1.name <- regimes.obs[cluster1.regime] + cluster2.name <- regimes.obs[cluster2.regime] + cluster3.name <- regimes.obs[cluster3.regime] + cluster4.name <- regimes.obs[cluster4.regime] + + regimes.sim <- c(cluster1.name, cluster2.name, cluster3.name, cluster4.name) + cluster.corr2 <- array(cluster.corr, c(4,4), dimnames=list(regimes.sim, regimes.obs)) + + spat.cor1 <- cluster.corr[1,seq.max2[which(seq.max1 == 1)]] + spat.cor2 <- cluster.corr[2,seq.max2[which(seq.max1 == 2)]] + spat.cor3 <- cluster.corr[3,seq.max2[which(seq.max1 == 3)]] + spat.cor4 <- cluster.corr[4,seq.max2[which(seq.max1 == 4)]] + + # measure persistence for the reanalysis dataset: + my.cluster.vector <- my.cluster[[load.month]][[1]] + + sequ1 <- sequ2 <- sequ3 <- sequ4 <- rep(0,10000) + i1 <- i2 <- i3 <- i4 <- 1 + + if(my.cluster.vector[1] != 1) i1 <- 0 + if(my.cluster.vector[1] == 1) sequ1[i1] <- sequ1[i1]+1 + if(my.cluster.vector[1] != 2) i2 <- 0 + if(my.cluster.vector[1] == 2) sequ2[i2] <- sequ2[i2]+1 + if(my.cluster.vector[1] != 3) i3 <- 0 + if(my.cluster.vector[1] == 3) sequ3[i3] <- sequ3[i3]+1 + if(my.cluster.vector[1] != 4) i4 <- 0 + if(my.cluster.vector[1] == 4) sequ4[i4] <- sequ4[i4]+1 + + n.dd <- length(my.cluster.vector) + for(d in 1:(n.dd-1)){ + if(my.cluster.vector[d] != 1 && my.cluster.vector[d+1] == 1) i1 <- i1+1 + if(my.cluster.vector[d] == 1) sequ1[i1] <- sequ1[i1]+1 + + if(my.cluster.vector[d] != 2 && my.cluster.vector[d+1] == 2) i2 <- i2+1 + if(my.cluster.vector[d] == 2) sequ2[i2] <- sequ2[i2]+1 + + if(my.cluster.vector[d] != 3 && my.cluster.vector[d+1] == 3) i3 <- i3+1 + if(my.cluster.vector[d] == 3) sequ3[i3] <- sequ3[i3]+1 + + if(my.cluster.vector[d] != 4 && my.cluster.vector[d+1] == 4) i4 <- i4+1 + if(my.cluster.vector[d] == 4) sequ4[i4] <- sequ4[i4]+1 + } + + if(my.cluster.vector[n.dd] == 1) sequ1[i1] <- sequ1[i1]+1 + if(my.cluster.vector[n.dd] == 2) sequ2[i2] <- sequ2[i2]+1 + if(my.cluster.vector[n.dd] == 3) sequ3[i3] <- sequ3[i3]+1 + if(my.cluster.vector[n.dd] == 4) sequ4[i4] <- sequ4[i4]+1 + + persObs1 <- mean(sequ1[which(sequ1 != 0)]) + persObs2 <- mean(sequ2[which(sequ2 != 0)]) + persObs3 <- mean(sequ3[which(sequ3 != 0)]) + persObs4 <- mean(sequ4[which(sequ4 != 0)]) + + # insert here all the manual corrections to the regime assignation due to misasignment in the automatic selection: + # forecast month: September + if(p == 9 && lead.month == 0) { cluster1.name <- "Blocking"; cluster2.name <- "Atl.Ridge"; cluster3.name <- "NAO-"; cluster4.name <- "NAO+"} + if(p == 8 && lead.month == 1) { cluster1.name <- "NAO+"; cluster2.name <- "NAO-"; cluster3.name <- "Blocking"; cluster4.name <- "Atl.Ridge"} + if(p == 6 && lead.month == 3) { cluster1.name <- "Atl.Ridge"; cluster2.name <- "NAO-"} + if(p == 4 && lead.month == 5) { cluster1.name <- "Blocking"; cluster2.name <- "NAO+"; cluster3.name <- "Atl.Ridge"; cluster4.name <- "NAO-"} + + # forecast month: October + if(p == 4 && lead.month == 6) { cluster1.name <- "Atl.Ridge"; cluster2.name <- "Blocking"; cluster3.name <- "NAO+"; cluster4.name <- "NAO-"} + if(p == 5 && lead.month == 5) { cluster1.name <- "NAO+"; cluster2.name <- "Blocking"; cluster3.name <- "NAO-"; cluster4.name <- "Atl.Ridge"} + if(p == 7 && lead.month == 3) { cluster1.name <- "NAO-"; cluster2.name <- "Atl.Ridge"; cluster3.name <- "Blocking"; cluster4.name <- "NAO+"} + if(p == 8 && lead.month == 2) { cluster1.name <- "Blocking"; cluster2.name <- "NAO-"; cluster3.name <- "Atl.Ridge"; cluster4.name <- "NAO+"} + if(p == 9 && lead.month == 1) { cluster1.name <- "NAO-"; cluster2.name <- "Blocking"; cluster3.name <- "Atl.Ridge"; cluster4.name <- "NAO+"} + + # forecast month: November + if(p == 6 && lead.month == 5) { cluster2.name <- "Atl.Ridge"; cluster3.name <- "NAO+"; cluster4.name <- "Blocking"} + if(p == 7 && lead.month == 4) { cluster1.name <- "NAO+"; cluster2.name <- "Blocking"; cluster4.name <- "Atl.Ridge"} + if(p == 8 && lead.month == 3) { cluster2.name <- "Blocking"; cluster4.name <- "Atl.Ridge"} + if(p == 9 && lead.month == 2) { cluster2.name <- "Atl.Ridge"; cluster3.name <- "NAO+"; cluster4.name <- "Blocking"} + if(p == 10 && lead.month == 1) { cluster2.name <- "Blocking"; cluster4.name <- "Atl.Ridge"} + + regimes.sim2 <- c(cluster1.name, cluster2.name, cluster3.name, cluster4.name) + #regimes.obs <- c(cluster1.name, cluster2.name, cluster3.name, cluster4.name) # already defined above, included only as reference + + order.sim <- match(regimes.sim2, orden) + order.obs <- match(regimes.obs, orden) + + clusters.sim <- list(cluster1.sim, cluster2.sim, cluster3.sim, cluster4.sim) + clusters.obs <- list(pslwr1mean, pslwr2mean, pslwr3mean, pslwr4mean) + + # recompute the spatial correlations, because they might have changed because of the manual corrections above: + spat.cor1 <- cor(as.vector(clusters.sim[[which(order.sim == 1)]]), as.vector(clusters.obs[[which(order.obs == 1)]])) + spat.cor2 <- cor(as.vector(clusters.sim[[which(order.sim == 2)]]), as.vector(clusters.obs[[which(order.obs == 2)]])) + spat.cor3 <- cor(as.vector(clusters.sim[[which(order.sim == 3)]]), as.vector(clusters.obs[[which(order.obs == 3)]])) + spat.cor4 <- cor(as.vector(clusters.sim[[which(order.sim == 4)]]), as.vector(clusters.obs[[which(order.obs == 4)]])) + + load(file=paste0(workdir,"/",fields.name,"_",my.period[p],"_leadmonth_",lead.month,"_","psl",".RData")) # reload forecast cluster time series and mean psl data + #load(file=paste0(workdir,"/",fields.name,"_",my.period[p],"_leadmonth_",lead.month,"_ClusterData.RData")) # reload forecast cluster data (for my.cluster.array) + if(var.num != var.num.orig) var.num <- var.num.orig # ripristinate original values of this script + if(fields.name != fields.name.orig) fields.name <- fields.name.orig # ripristinate original values of this script + if(!identical(period.orig, period)) period <- period.orig + if(p.orig != p) p <- p.orig + if(identical(rev(lat),lat.forecast)) lat <- rev(lat) # ripristinate right lat order + if(workdir != workdir.orig) workdir <- workdir.orig # ripristinate original values of this script + + } # close for on 'forecast.name' + + if(fields.name == rean.name || (fields.name == forecast.name && n.map == 7)){ + if(save.names){ + save(cluster1.name, cluster2.name, cluster3.name, cluster4.name, file=paste0(workdir,"/",fields.name,"_", my.period[p],"_","ClusterNames",".RData")) + + } else { # in this case, we load the cluster names from the file already saved, if regimes come from a reanalysis: + load(paste0(rean.dir,"/",rean.name,"_", my.period[p],"_","ClusterNames",".RData")) + + if(var.num != var.num.orig) var.num <- var.num.orig # ripristinate original values of this script + if(fields.name != fields.name.orig) fields.name <- fields.name.orig # ripristinate original values of this script + } + } + + # assign to each cluster its regime: + if(ordering == FALSE && (fields.name == rean.name || (fields.name == forecast.name && n.map == 7))){ + cluster1 <- 1; cluster2 <- 2; cluster3 <- 3; cluster4 <- 4 + } else { + cluster1 <- which(orden == cluster1.name) + cluster2 <- which(orden == cluster2.name) + cluster3 <- which(orden == cluster3.name) + cluster4 <- which(orden == cluster4.name) + } + + assign(paste0("map",cluster1), pslwr1mean) + assign(paste0("map",cluster2), pslwr2mean) + assign(paste0("map",cluster3), pslwr3mean) + assign(paste0("map",cluster4), pslwr4mean) + + assign(paste0("fre",cluster1), wr1y) + assign(paste0("fre",cluster2), wr2y) + assign(paste0("fre",cluster3), wr3y) + assign(paste0("fre",cluster4), wr4y) + + if(composition == "impact"){ + assign(paste0("imp",cluster1), varwr1mean) + assign(paste0("imp",cluster2), varwr2mean) + assign(paste0("imp",cluster3), varwr3mean) + assign(paste0("imp",cluster4), varwr4mean) + + assign(paste0("sig",cluster1), pvalue1) + assign(paste0("sig",cluster2), pvalue2) + assign(paste0("sig",cluster3), pvalue3) + assign(paste0("sig",cluster4), pvalue4) + } + + if(fields.name == forecast.name && n.map < 7){ + assign(paste0("freMax",cluster1), wr1yMax) + assign(paste0("freMax",cluster2), wr2yMax) + assign(paste0("freMax",cluster3), wr3yMax) + assign(paste0("freMax",cluster4), wr4yMax) + + assign(paste0("freMin",cluster1), wr1yMin) + assign(paste0("freMin",cluster2), wr2yMin) + assign(paste0("freMin",cluster3), wr3yMin) + assign(paste0("freMin",cluster4), wr4yMin) + + #assign(paste0("spatial.cor",cluster1), spat.cor1) + #assign(paste0("spatial.cor",cluster2), spat.cor2) + #assign(paste0("spatial.cor",cluster3), spat.cor3) + #assign(paste0("spatial.cor",cluster4), spat.cor4) + + assign(paste0("persist",cluster1), pers1) + assign(paste0("persist",cluster2), pers2) + assign(paste0("persist",cluster3), pers3) + assign(paste0("persist",cluster4), pers4) + + cluster1.obs <- which(orden == regimes.obs[1]) + cluster2.obs <- which(orden == regimes.obs[2]) + cluster3.obs <- which(orden == regimes.obs[3]) + cluster4.obs <- which(orden == regimes.obs[4]) + + assign(paste0("freObs",cluster1.obs), wr1y.obs) + assign(paste0("freObs",cluster2.obs), wr2y.obs) + assign(paste0("freObs",cluster3.obs), wr3y.obs) + assign(paste0("freObs",cluster4.obs), wr4y.obs) + + assign(paste0("persistObs",cluster1.obs), persObs1) + assign(paste0("persistObs",cluster2.obs), persObs2) + assign(paste0("persistObs",cluster3.obs), persObs3) + assign(paste0("persistObs",cluster4.obs), persObs4) + + } + + # position of long values of Europe only (without the Atlantic Sea and America): + #if(fields.name == "ERA-Interim") EU <- c(1:which(lon >= lon.max)[1], (length(lon)-30):length(lon)) # restrict area to continental europe only + EU <- c(1:which(lon >= lon.max)[1], (which(lon >= 337)[1]:length(lon))) # restrict area to continental europe only + + # breaks and colors of the geopotential fields: + if(psl == "g500"){ + #my.brks <- c(-300,-199:200,300) # % Mean anomaly of a WR + my.brks <- c(-300,seq(-200,200,10),300) # % Mean anomaly of a WR + my.brks2 <- c(-300,seq(-200,200,10),300) # % Mean anomaly of a WR for the contour lines + #my.cols <- colorRampPalette((brewer.pal(9,"PuBu")))(length(my.brks)-1) + values.to.plot <- c(-200, -100, 0, 100, 200) # values we want to show in the labels + } + + if(psl == "psl"){ + my.brks <- c(seq(-19,-1,2),0,seq(1,19,2)) # % Mean anomaly of a WR + # my.brks <- c(seq(-19,-1,2),0,seq(1,19,2)) # % Mean anomaly of a WR + + my.brks2 <- my.brks #c(seq(-20,20,2)) # % Mean anomaly of a WR for the contour lines + values.to.plot <- my.brks #c(-17,-15,-13,-11,-9,-7,-5,-3,-1,0,1,3,5,7,9,11,13,15,17) + } + + my.cols <- colorRampPalette(rev(brewer.pal(11,"RdBu")))(length(my.brks)-1) # blue--white--red colors + my.cols[floor(length(my.cols)/2)] <- my.cols[floor(length(my.cols)/2)+1] <- "white" + + # breaks and colors of the impact maps (valid both for Wind speed anomalies and Temperature anomalies): + #my.brks.var <- c(-20,seq(-10,-3,1),seq(-2.9,2.9,0.1),seq(3,10,1),20) # % Mean anomaly of a WR + #my.brks.var <- c(-20,-2,-1.5,-1,-0.5,-0.2,0.2,0.5,1,1.5,2,20) # % Mean anomaly of a WR + #my.brks.var <- c(-20,seq(-3,3,0.5),20) # % Mean anomaly of a WR + if(var.name[var.num] == "sfcWind") { + my.brks.var <- seq(-3,3,0.5) + values.to.plot2 <- c(-3,-2,-1,0,1,2,3) + } + if(var.name[var.num] == "tas") { + my.brks.var <- seq(-5,5,1) + values.to.plot2 <- my.brks.var #c(-5,-3,-1,0,1,3,5) + } + + #my.cols <- colorRampPalette(as.character(read.csv("/home/Earth/vtorralb/rgbhex.csv",header=F)[,1]))(length(my.brks)-1) # blue--yellow-red colors + my.cols.var <- colorRampPalette(rev(brewer.pal(11,"RdBu")))(length(my.brks.var)-1) # blue--white--red colors + #my.cols.var[floor(length(my.cols.var)/2)] <- my.cols.var[floor(length(my.cols.var)/2)+1] <- "white" + + if(fields.name == forecast.name){ + pos.years.present <- match(year.start:year.end, years) + years.missing <- which(is.na(pos.years.present)) + fre1.NA <- fre2.NA <- fre3.NA <- fre4.NA <- freMax1.NA <- freMax2.NA <- freMax3.NA <- freMax4.NA <- freMin1.NA <- freMin2.NA <- freMin3.NA <- freMin4.NA <- c() + freq.max=100 + + k <- 0 + for(y in year.start:year.end) { + if(y %in% (years.missing + year.start - 1)) { + fre1.NA <- c(fre1.NA,NA); fre2.NA <- c(fre2.NA,NA); fre3.NA <- c(fre3.NA,NA); fre4.NA <- c(fre4.NA,NA) + freMax1.NA <- c(freMax1.NA,NA); freMax2.NA <- c(freMax2.NA,NA); freMax3.NA <- c(freMax3.NA,NA); freMax4.NA <- c(freMax4.NA,NA) + freMin1.NA <- c(freMin1.NA,NA); freMin2.NA <- c(freMin2.NA,NA); freMin3.NA <- c(freMin3.NA,NA); freMin4.NA <- c(freMin4.NA,NA) + k=k+1 + } else { + fre1.NA <- c(fre1.NA,fre1[y - year.start + 1 - k]); fre2.NA <- c(fre2.NA,fre2[y - year.start + 1 - k]) + fre3.NA <- c(fre3.NA,fre3[y - year.start + 1 - k]); fre4.NA <- c(fre4.NA,fre4[y - year.start + 1 - k]) + freMax1.NA <- c(freMax1.NA,freMax1[y - year.start + 1 - k]); freMax2.NA <- c(freMax2.NA,freMax2[y - year.start + 1 - k]) + freMax3.NA <- c(freMax3.NA,freMax3[y - year.start + 1 - k]); freMax4.NA <- c(freMax4.NA,freMax4[y - year.start + 1 - k]) + freMin1.NA <- c(freMin1.NA,freMin1[y - year.start + 1 - k]); freMin2.NA <- c(freMin2.NA,freMin2[y - year.start + 1 - k]) + freMin3.NA <- c(freMin3.NA,freMin3[y - year.start + 1 - k]); freMin4.NA <- c(freMin4.NA,freMin4[y - year.start + 1 - k]) + } + } + } else { fre1.NA <- fre1; fre2.NA <- fre2; fre3.NA <- fre3; fre4.NA <- fre4 } + + sp.cor1 <- round(cor(fre1.NA,freObs1, use="complete.obs"),2) + sp.cor2 <- round(cor(fre2.NA,freObs2, use="complete.obs"),2) + sp.cor3 <- round(cor(fre3.NA,freObs3, use="complete.obs"),2) + sp.cor4 <- round(cor(fre4.NA,freObs4, use="complete.obs"),2) + +# Visualize the composition with the 3 graphs for all the 4 regimes: its pressure fields, its impact on var and its frequency series: + if(composition == "simple"){ + if(!as.pdf && fields.name==rean.name) png(filename=paste0(workdir,"/",fields.name,"_",my.period[p],"_",var.name[var.num],".png"),width=3000,height=3700) + if(!as.pdf && fields.name==forecast.name) png(filename=paste0(workdir,"/",fields.name,"_",my.period[p],"_leadmonth_",lead.month,"_",var.name[var.num],".png"),width=3000,height=3700) + + plot.new() + + # Sheet title: + par(fig=c(0, 1, 0.94, 0.98), new=TRUE) + if(fields.name == forecast.name) {forecast.title <- paste0(" startdate and ",lead.month," forecast month ")} else {forecast.title <- ""} + mtext(paste0(fields.name, " Weather Regimes for ", my.period[p], forecast.title," (",year.start,"-",year.end,")"), cex=5.5, font=2) + + # Centroid maps: + map.xpos <- 0 + map.width <- 0.46 + par(fig=c(map.xpos + 0.003, map.xpos + map.width - 0.003, 0.745, 0.925), new=TRUE) + PlotEquiMap2(rescale(map1,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map1), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, xlabel.dist=1.5, contours.lty="F1FF1F") + par(fig=c(map.xpos, map.xpos + map.width, 0.51, 0.69), new=TRUE) + PlotEquiMap2(rescale(map2,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map2), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F") + par(fig=c(map.xpos, map.xpos + map.width, 0.275, 0.455), new=TRUE) + PlotEquiMap2(rescale(map3,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map3), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F") + par(fig=c(map.xpos, map.xpos + map.width, 0.04, 0.22), new=TRUE) + PlotEquiMap2(rescale(map4,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map4), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F") + + # Subtitle centroid maps: + map.title.xpos <- 0.76 + map.title.width <- 0.2 + par(fig=c(map.title.xpos, map.title.xpos + map.title.width, 0.728, 0.729), new=TRUE) + mtext("year", cex=3) + par(fig=c(map.title.xpos, map.title.xpos + map.title.width, 0.494, 0.495), new=TRUE) + mtext("year", cex=3) + par(fig=c(map.title.xpos, map.title.xpos + map.title.width, 0.260, 0.261), new=TRUE) + mtext("year", cex=3) + par(fig=c(map.title.xpos, map.title.xpos + map.title.width, 0.026, 0.027), new=TRUE) + mtext("year", cex=3) + + # Title Centroid Maps: + title1.xpos <- 0.02 + title1.width <- 0.4 + par(fig=c(title1.xpos, title1.xpos + title1.width, 0.9305, 0.9355), new=TRUE) + mtext(paste0(regime1.name,": ", psl.name, " Anomaly"), font=2, cex=4) + par(fig=c(title1.xpos, title1.xpos + title1.width, 0.6955, 0.7005), new=TRUE) + mtext(paste0(regime2.name,": ", psl.name, " Anomaly"), font=2, cex=4) + par(fig=c(title1.xpos, title1.xpos + title1.width, 0.4605, 0.4655), new=TRUE) + mtext(paste0(regime3.name,": ", psl.name, " Anomaly"), font=2, cex=4) + par(fig=c(title1.xpos, title1.xpos + title1.width, 0.2255, 0.2305), new=TRUE) + mtext(paste0(regime4.name,": ", psl.name, " Anomaly"), font=2, cex=4) + + # Impact maps: + if(p == 100){ + impact.xpos <- 0.47 + impact.width <- 0.26 + par(fig=c(impact.xpos, impact.xpos + impact.width, 0.745, 0.925), new=TRUE) + PlotEquiMap2(rescale(imp1[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=t(sig1[,EU] < 0.05), cex.lab=1.5) + par(fig=c(impact.xpos, impact.xpos + impact.width, 0.51, 0.69), new=TRUE) + PlotEquiMap2(rescale(imp2[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=t(sig2[,EU] < 0.05), cex.lab=1.5) + par(fig=c(impact.xpos, impact.xpos + impact.width, 0.275, 0.455), new=TRUE) + PlotEquiMap2(rescale(imp3[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=t(sig3[,EU] < 0.05), cex.lab=1.5) + par(fig=c(impact.xpos, impact.xpos + impact.width, 0.04, 0.22), new=TRUE) + PlotEquiMap2(rescale(imp4[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=t(sig4[,EU] < 0.05), cex.lab=1.5) + + # Title impact maps: + title2.xpos <- 0.42 + title2.width <- 0.35 + par(fig=c(title2.xpos, title2.xpos + title2.width, 0.935, 0.940), new=TRUE) + mtext(paste0(regime1.name, ": Impact on ", var.name.full[var.num]), font=2, cex=4) + par(fig=c(title2.xpos, title2.xpos + title2.width, 0.700, 0.705), new=TRUE) + mtext(paste0(regime2.name, ": Impact on ", var.name.full[var.num]), font=2, cex=4) + par(fig=c(title2.xpos, title2.xpos + title2.width, 0.465, 0.470), new=TRUE) + mtext(paste0(regime3.name, ": Impact on ", var.name.full[var.num]), font=2, cex=4) + par(fig=c(title2.xpos, title2.xpos + title2.width, 0.230, 0.235), new=TRUE) + mtext(paste0(regime4.name, ": Impact on ", var.name.full[var.num]), font=2, cex=4) + } + + # Frequency plots: + barplot.xpos <- 0.75 + barplot.width <- 0.25 + + par(fig=c(barplot.xpos, barplot.xpos + barplot.width, 0.745, 0.915), new=TRUE) + if(fields.name == rean.name) barplot.freq(100*fre1.NA, year.start, year.end, cex.y=2, cex.x=2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0)) + if(fields.name == forecast.name) barplot.freq.sim2(100*fre1.NA, 100*freMax1.NA, 100*freMin1.NA, 100*freObs1, year.start, year.end, cex.y=2, cex.x=2, freq.min=-2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0), col.line="gray20", cex.r=3, cex.mean=2, cex.obs=2) + + par(fig=c(barplot.xpos, barplot.xpos + barplot.width,0.510,0.680), new=TRUE) + if(fields.name == rean.name) barplot.freq(100*fre2.NA, year.start, year.end, cex.y=2, cex.x=2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0)) + if(fields.name == forecast.name) barplot.freq.sim2(100*fre2.NA, 100*freMax2.NA, 100*freMin2.NA, 100*freObs2, year.start, year.end, cex.y=2, cex.x=2, freq.min=-2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0), col.line="gray20", cex.r=3, cex.mean=2, cex.obs=2) + + par(fig=c(barplot.xpos, barplot.xpos + barplot.width,0.275,0.445), new=TRUE) + if(fields.name == rean.name) barplot.freq(100*fre3.NA, year.start, year.end, cex.y=2, cex.x=2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0)) + if(fields.name == forecast.name) barplot.freq.sim2(100*fre3.NA, 100*freMax3.NA, 100*freMin3.NA, 100*freObs3, year.start, year.end, cex.y=2, cex.x=2, freq.min=-2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0), col.line="gray20", cex.r=3, cex.mean=2, cex.obs=2) + + par(fig=c(barplot.xpos, barplot.xpos + barplot.width,0.040,0.210), new=TRUE) + if(fields.name == rean.name) barplot.freq(100*fre4.NA, year.start, year.end, cex.y=2, cex.x=2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0)) + if(fields.name == forecast.name) barplot.freq.sim2(100*fre4.NA, 100*freMax4.NA, 100*freMin4.NA, 100*freObs4, year.start, year.end, cex.y=2, cex.x=2, freq.min=-2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0), col.line="gray20", cex.r=3, cex.mean=2, cex.obs=2) + + # Title Frequency maps: + title3.xpos <- 0.75 + title3.width <-0.25 + par(fig=c(title3.xpos, title3.xpos + title3.width, 0.935, 0.940), new=TRUE) + mtext(paste0(regime1.name, " Frequency"), font=2, cex=4) # paste0 doesn't work inside an expression! + par(fig=c(title3.xpos, title3.xpos + title3.width, 0.700, 0.705), new=TRUE) + mtext(paste0(regime2.name, " Frequency"), font=2, cex=4) + par(fig=c(title3.xpos, title3.xpos + title3.width, 0.465, 0.470), new=TRUE) + mtext(paste0(regime3.name, " Frequency"), font=2, cex=4) + par(fig=c(title3.xpos, title3.xpos + title3.width, 0.230, 0.235), new=TRUE) + mtext(paste0(regime4.name, " Frequency"), font=2, cex=4) + + # % on Frequency plots: + symbol.xpos <- 0.735 + symbol.width <- 0.01 + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.82, 0.83), new=TRUE) + mtext("%", cex=3.3) + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.59, 0.60), new=TRUE) + mtext("%", cex=3.3) + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.35, 0.36), new=TRUE) + mtext("%", cex=3.3) + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.12, 0.13), new=TRUE) + mtext("%", cex=3.3) + + # mean frequency on Frequency plots: + symbol.xpos <- 0.9 + symbol.width <- 0.1 + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.908, 0.918), new=TRUE) + mtext(bquote(bar(nu) == .(paste0(round(mean(100*fre1.NA),1),"%"))),cex=4.5) + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.673, 0.683), new=TRUE) + mtext(bquote(bar(nu) == .(paste0(round(mean(100*fre2.NA),1),"%"))),cex=4.5) + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.44, 0.45), new=TRUE) + mtext(bquote(bar(nu) == .(paste0(round(mean(100*fre3.NA),1),"%"))),cex=4.5) + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.203, 0.213), new=TRUE) + mtext(bquote(bar(nu) == .(paste0(round(mean(100*fre4.NA),1),"%"))),cex=4.5) + + # add corr between obs.time series and ensemble mean time series on Frequency plots: + if(fields.name == forecast.name){ + symbol.xpos <- 0.76 + symbol.width <- 0.1 + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.908, 0.918), new=TRUE) + sp.cor1 <- round(cor(fre1.NA,freObs1, use="complete.obs"),2) + mtext(paste0("r= ",sp.cor1), cex=4.5) + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.673, 0.683), new=TRUE) + sp.cor2 <- round(cor(fre2.NA,freObs2, use="complete.obs"),2) + mtext(paste0("r= ",sp.cor2), cex=4.5) + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.44, 0.45), new=TRUE) + sp.cor3 <- round(cor(fre3.NA,freObs3, use="complete.obs"),2) + mtext(paste0("r= ",sp.cor3), cex=4.5) + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.203, 0.213), new=TRUE) + sp.cor4 <- round(cor(fre4.NA,freObs4, use="complete.obs"),2) + mtext(paste0("r= ",sp.cor4), cex=4.5) + } + + # Legend 1: + legend1.xpos <- 0.10 + legend1.width <- 0.80 + legend1.cex <- 2 + my.subset <- match(values.to.plot, my.brks) + par(fig=c(legend1.xpos, legend1.xpos + legend1.width, 0.719, 0.744), new=TRUE) # syntax fig=c(xmin, xmax, ymin, ymax) + ColorBar3(my.brks, cols=my.cols, vert=FALSE, cex=legend1.cex, subset=my.subset) + if(psl=="g500") {mtext(side=4," m", cex=2.5)} else {mtext(side=4," hPa", cex=legend1.cex)} + par(fig=c(legend1.xpos, legend1.xpos + legend1.width, 0.485, 0.508), new=TRUE) + ColorBar3(my.brks, cols=my.cols, vert=FALSE, cex=legend1.cex, subset=my.subset) + if(psl=="g500") {mtext(side=4," m", cex=2.5)} else {mtext(side=4," hPa", cex=legend1.cex)} + par(fig=c(legend1.xpos, legend1.xpos + legend1.width, 0.250, 0.273), new=TRUE) + ColorBar3(my.brks, cols=my.cols, vert=FALSE, cex=legend1.cex, subset=my.subset) + if(psl=="g500") {mtext(side=4," m", cex=2.5)} else {mtext(side=4," hPa", cex=legend1.cex)} + par(fig=c(legend1.xpos, legend1.xpos + legend1.width, 0.015, 0.038), new=TRUE) + ColorBar3(my.brks, cols=my.cols, vert=FALSE, cex=legend1.cex, subset=my.subset) + if(psl=="g500") {mtext(side=4," m", cex=2.5)} else {mtext(side=4," hPa", cex=legend1.cex)} + + # Legend 2: + if(fields.name == rean.name){ + legend2.xpos <- 0.48 + legend2.width <- 0.25 + legend2.cex <- 1.8 + my.subset2 <- match(values.to.plot2, my.brks.var) + par(fig=c(legend2.xpos, legend2.xpos + legend2.width, 0.719 + 0.001, 0.744), new=TRUE) + ColorBar3(my.brks.var, cols=my.cols.var, vert=FALSE, cex=legend2.cex, subset=my.subset2) + mtext(side=4,paste0(" ",var.unit[var.num]), cex=legend2.cex) + par(fig=c(legend2.xpos, legend2.xpos + legend2.width, 0.485, 0.508), new=TRUE) + ColorBar3(my.brks.var, cols=my.cols.var, vert=FALSE, cex=legend2.cex, subset=my.subset2) + mtext(side=4,paste0(" ",var.unit[var.num]), cex=legend2.cex) + par(fig=c(legend2.xpos, legend2.xpos + legend2.width, 0.250, 0.273), new=TRUE) + ColorBar3(my.brks.var, cols=my.cols.var, vert=FALSE, cex=legend2.cex, subset=my.subset2) + mtext(side=4,paste0(" ",var.unit[var.num]), cex=legend2.cex) + par(fig=c(legend2.xpos, legend2.xpos + legend2.width, 0.015, 0.038), new=TRUE) + ColorBar3(my.brks.var, cols=my.cols.var, vert=FALSE, cex=legend2.cex, subset=my.subset2) + mtext(side=4,paste0(" ",var.unit[var.num]), cex=legend2.cex) + } + + if(!as.pdf) dev.off() # for saving 4 png + + } # close if on: composition == 'simple' + + + # Visualize the composition with the regime anomalies for a selected forecasted month: + if(composition == "psl"){ + # Centroid maps: + n.map <- n.map + 1 # n.maps starts from 0 + map.width <- 0.115 + map.xpos <- 0.06 + map.width * (n.map - 1) + map.ypos <- 0.08 + map.height <- 0.19 + map.ysep <- 0.015 + + par(fig=c(map.xpos, map.xpos + map.width, map.ypos + 3*map.height + 3*map.ysep, map.ypos + 4*map.height + 3*map.ysep), new=TRUE) + PlotEquiMap2(rescale(map1,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map1), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, xlabel.dist=1.5, contours.lty="F1FF1F") + par(fig=c(map.xpos, map.xpos + map.width, map.ypos + 2*map.height + 2*map.ysep, map.ypos + 3*map.height + 2*map.ysep), new=TRUE) + PlotEquiMap2(rescale(map2,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map2), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F") + par(fig=c(map.xpos, map.xpos + map.width, map.ypos + map.height + map.ysep, map.ypos + 2*map.height + map.ysep), new=TRUE) + PlotEquiMap2(rescale(map3,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map3), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F") + par(fig=c(map.xpos, map.xpos + map.width, map.ypos, map.ypos + map.height), new=TRUE) + PlotEquiMap2(rescale(map4,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map4), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F") + + # Sheet subtitles: + par(fig=c(map.xpos, map.xpos + map.width, 0.86, 0.90), new=TRUE) + if(n.map < 8) { + mtext(paste0(my.month.short[p], " --> ", my.month.short[forecast.month]), cex=5.5, font=2) + } else{ + mtext(paste0(rean.name," ", my.month.short[forecast.month]), cex=5.5, font=2) + } + + } # close if on composition == 'psl' + + + # Visualize the composition with the interannual frequencies for a selected forecasted month: + if(composition == "fre"){ + # Centroid maps: + n.map <- n.map + 1 # n.maps starts from 0 + map.width <- 0.115 + map.xpos <- 0.06 + map.width * (n.map - 1) + map.ypos <- 0.08 + map.height <- 0.19 + map.ysep <- 0.015 + + par(fig=c(map.xpos, map.xpos + map.width, map.ypos + 3*map.height + 3*map.ysep, map.ypos + 4*map.height + 3*map.ysep), new=TRUE) + if(n.map < 8) barplot.freq.sim2(100*fre1.NA, 100*freMax1.NA, 100*freMin1.NA, 100*freObs1, year.start, year.end, cex.y=2, cex.x=2, freq.min=-2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0), col.line="gray20", cex.r=3, cex.mean=2, cex.obs=2) + if(n.map == 8) barplot.freq(100*fre1.NA, year.start, year.end, cex.y=2, cex.x=2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0)) + + par(fig=c(map.xpos, map.xpos + map.width, map.ypos + 2*map.height + 2*map.ysep, map.ypos + 3*map.height + 2*map.ysep), new=TRUE) + if(n.map < 8) if(fields.name == forecast.name) barplot.freq.sim2(100*fre2.NA, 100*freMax2.NA, 100*freMin2.NA, 100*freObs2, year.start, year.end, cex.y=2, cex.x=2, freq.min=-2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0), col.line="gray20", cex.r=3, cex.mean=2, cex.obs=2) + if(n.map == 8) barplot.freq(100*fre2.NA, year.start, year.end, cex.y=2, cex.x=2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0)) + + par(fig=c(map.xpos, map.xpos + map.width, map.ypos + map.height + map.ysep, map.ypos + 2*map.height + map.ysep), new=TRUE) + if(n.map < 8) if(fields.name == forecast.name) barplot.freq.sim2(100*fre3.NA, 100*freMax3.NA, 100*freMin3.NA, 100*freObs3, year.start, year.end, cex.y=2, cex.x=2, freq.min=-2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0), col.line="gray20", cex.r=3, cex.mean=2, cex.obs=2) + if(n.map == 8) barplot.freq(100*fre3.NA, year.start, year.end, cex.y=2, cex.x=2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0)) + + par(fig=c(map.xpos, map.xpos + map.width, map.ypos, map.ypos + map.height), new=TRUE) + if(n.map < 8) if(fields.name == forecast.name) barplot.freq.sim2(100*fre4.NA, 100*freMax4.NA, 100*freMin4.NA, 100*freObs4, year.start, year.end, cex.y=2, cex.x=2, freq.min=-2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0), col.line="gray20", cex.r=3, cex.mean=2, cex.obs=2) + if(n.map == 8) barplot.freq(100*fre4.NA, year.start, year.end, cex.y=2, cex.x=2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0)) + + # Sheet subtitles: + par(fig=c(map.xpos, map.xpos + map.width, 0.86, 0.90), new=TRUE) + if(n.map < 8) { + mtext(paste0(my.month.short[p], " --> ", my.month.short[forecast.month]), cex=5.5, font=2) + } else{ + mtext(paste0(rean.name," ", my.month.short[forecast.month]), cex=5.5, font=2) + } + + # % on Frequency plots: + par(fig=c(map.xpos - 0.006, map.xpos, map.ypos + 3.9*map.height + 3*map.ysep, map.ypos + 4*map.height + 3*map.ysep), new=TRUE) + mtext("%", cex=3.3) + par(fig=c(map.xpos - 0.006, map.xpos, map.ypos + 2.9*map.height + 2*map.ysep, map.ypos + 3*map.height + 2*map.ysep), new=TRUE) + mtext("%", cex=3.3) + par(fig=c(map.xpos - 0.006, map.xpos, map.ypos + 1.9*map.height + 1*map.ysep, map.ypos + 2*map.height + 1*map.ysep), new=TRUE) + mtext("%", cex=3.3) + par(fig=c(map.xpos - 0.006, map.xpos, map.ypos + 0.9*map.height + 0*map.ysep, map.ypos + 1*map.height + 0*map.ysep), new=TRUE) + mtext("%", cex=3.3) + + # mean frequency on Frequency plots: + par(fig=c(map.xpos + 0.02, map.xpos + 0.04, map.ypos + 3*map.height + 3*map.ysep, map.ypos + 3.1*map.height + 3*map.ysep), new=TRUE) + mtext(bquote(bar(nu) == .(paste0(round(mean(100*fre1.NA),1),"%"))),cex=3.5) + par(fig=c(map.xpos + 0.02, map.xpos + 0.04, map.ypos + 2*map.height + 2*map.ysep, map.ypos + 2.1*map.height + 2*map.ysep), new=TRUE) + mtext(bquote(bar(nu) == .(paste0(round(mean(100*fre2.NA),1),"%"))),cex=3.5) + par(fig=c(map.xpos + 0.02, map.xpos + 0.04, map.ypos + 1*map.height + 1*map.ysep, map.ypos + 1.1*map.height + 1*map.ysep), new=TRUE) + mtext(bquote(bar(nu) == .(paste0(round(mean(100*fre3.NA),1),"%"))),cex=3.5) + par(fig=c(map.xpos + 0.02, map.xpos + 0.04, map.ypos + 0*map.height + 0*map.ysep, map.ypos + 0.1*map.height + 0*map.ysep), new=TRUE) + mtext(bquote(bar(nu) == .(paste0(round(mean(100*fre4.NA),1),"%"))),cex=3.5) + + # add corr between obs.time series and ensemble mean time series on Frequency plots: + if(n.map < 8){ + par(fig=c(map.xpos + map.width - 0.04, map.xpos + map.width - 0.02, map.ypos + 3*map.height + 3*map.ysep, map.ypos + 3.1*map.height + 3*map.ysep), new=TRUE) + mtext(paste0("r= ",sp.cor1), cex=3.5) + par(fig=c(map.xpos + map.width - 0.04, map.xpos + map.width - 0.02, map.ypos + 2*map.height + 2*map.ysep, map.ypos + 2.1*map.height + 2*map.ysep), new=TRUE) + mtext(paste0("r= ",sp.cor2), cex=3.5) + par(fig=c(map.xpos + map.width - 0.04, map.xpos + map.width - 0.02, map.ypos + 1*map.height + 1*map.ysep, map.ypos + 1.1*map.height + 1*map.ysep), new=TRUE) + mtext(paste0("r= ",sp.cor3), cex=3.5) + par(fig=c(map.xpos + map.width - 0.04, map.xpos + map.width - 0.02, map.ypos + 0*map.height + 0*map.ysep, map.ypos + 0.1*map.height + 0*map.ysep), new=TRUE) + mtext(paste0("r= ",sp.cor4), cex=3.5) + } + } # close if on composition == 'fre' + + # save indicators for each startdate and forecast time: + # (fre1, fre2, etc. already refer to the regimes in the same order as listed in the 'orden' vector) + if(fields.name == forecast.name) save(orden, fre1.NA, fre2.NA, fre3.NA, fre4.NA, freObs1, freObs2, freObs3, freObs4, sp.cor1, sp.cor2, sp.cor3, sp.cor4, persist1, persist2, persist3, persist4, persistObs1, persistObs2, persistObs3, persistObs4, spat.cor1, spat.cor2, spat.cor3, spat.cor4, sd.freq.sim1, sd.freq.sim2, sd.freq.sim3, sd.freq.sim4, sd.freq.obs1, sd.freq.obs2, sd.freq.obs3, sd.freq.obs4, file=paste0(workdir,"/",fields.name,"_", my.period[p],"_leadmonth_",lead.month,"_","ClusterNames",".RData")) + print(paste("p =",p,"lead.month =", lead.month)) # spat.cor1,spat.cor2,spat.cor3,spat.cor4)) + } # close 'p' on 'period' + + if(as.pdf) dev.off() # for saving a single pdf with all months/seasons + + } # close 'lead.month' on 'lead.months' + + if(composition == "psl" || composition == "fre"){ + # Regime's names: + title1.xpos <- 0.01 + title1.width <- 0.04 + par(fig=c(title1.xpos, title1.xpos + title1.width, 0.7905, 0.7955), new=TRUE) + mtext(paste0(regime1.name), font=2, cex=5) + par(fig=c(title1.xpos, title1.xpos + title1.width, 0.5855, 0.5905), new=TRUE) + mtext(paste0(regime2.name), font=2, cex=5) + par(fig=c(title1.xpos, title1.xpos + title1.width, 0.3805, 0.3955), new=TRUE) + mtext(paste0(regime3.name), font=2, cex=5) + par(fig=c(title1.xpos, title1.xpos + title1.width, 0.1755, 0.1805), new=TRUE) + mtext(paste0(regime4.name), font=2, cex=5) + + if(composition == "psl") { + # Color legend: + legend1.xpos <- 0.10 + legend1.width <- 0.80 + legend1.cex <- 4 + + par(fig=c(legend1.xpos, legend1.xpos + legend1.width, 0, 0.065), new=TRUE) # syntax fig=c(xmin, xmax, ymin, ymax) + ColorBar2(brks=my.brks, cols=my.cols, vert=FALSE, cex=legend1.cex, label.dist=2, my.ticks=-0.5 + 1:21, my.labels=my.brks) + par(fig=c(legend1.xpos + legend1.width - 0.02, legend1.xpos + legend1.width + 0.05, 0, 0.02), new=TRUE) # syntax fig=c(xmin, xmax, ymin, ymax) + mtext("hPa", cex=legend1.cex*1.3) + } + + dev.off() + } # close if on composition + +} # close if on composition != "summary" + +# Summary graphs: +if(composition == "summary" && fields.name == forecast.name){ + # array storing correlations in the format: [startdate, leadmonth, regime] + array.diff.sd.freq <- array.sd.freq <- array.cor <- array.sp.cor <- array.freq <- array.diff.freq <- array.rpss <- array.pers <- array.diff.pers <- array(NA,c(12,7,4)) + + for(p in 1:12){ + p.orig <- p + + for(l in 0:6){ + + load(file=paste0(workdir,"/",fields.name,"_",my.period[p],"_leadmonth_",l,"_","ClusterNames",".RData")) # load cluster names and summarized data + + if(var.num != var.num.orig) var.num <- var.num.orig # ripristinate original values of this script (necessary after loading regime data) + if(fields.name != fields.name.orig) fields.name <- fields.name.orig # ripristinate original values of this script + if(p != p.orig) p <- p.orig + + array.sp.cor[p,1+l, 1] <- spat.cor1 # associates NAO+ to the first triangle (at left) + array.sp.cor[p,1+l, 3] <- spat.cor2 # associates NAO- to the third triangle (at right) + array.sp.cor[p,1+l, 4] <- spat.cor3 # associates Blocking to the fourth triangle (top one) + array.sp.cor[p,1+l, 2] <- spat.cor4 # associates Atl.Ridge to the second triangle (bottom one) + + array.cor[p,1+l, 1] <- sp.cor1 # associates NAO+ to the first triangle (at left) + array.cor[p,1+l, 3] <- sp.cor2 # associates NAO- to the third triangle (at right) + array.cor[p,1+l, 4] <- sp.cor3 # associates Blocking to the fourth triangle (top one) + array.cor[p,1+l, 2] <- sp.cor4 # associates Atl.Ridge to the second triangle (bottom one) + + array.freq[p,1+l, 1] <- 100*(mean(fre1.NA)) # NAO+ + array.freq[p,1+l, 3] <- 100*(mean(fre2.NA)) # NAO- + array.freq[p,1+l, 4] <- 100*(mean(fre3.NA)) # Blocking + array.freq[p,1+l, 2] <- 100*(mean(fre4.NA)) # Atl.Ridge + + array.diff.freq[p,1+l, 1] <- 100*(mean(fre1.NA) - mean(freObs1)) # NAO+ + array.diff.freq[p,1+l, 3] <- 100*(mean(fre2.NA) - mean(freObs2)) # NAO- + array.diff.freq[p,1+l, 4] <- 100*(mean(fre3.NA) - mean(freObs3)) # Blocking + array.diff.freq[p,1+l, 2] <- 100*(mean(fre4.NA) - mean(freObs4)) # Atl.Ridge + + array.pers[p,1+l, 1] <- persist1 # NAO+ + array.pers[p,1+l, 3] <- persist2 # NAO- + array.pers[p,1+l, 4] <- persist3 # Blocking + array.pers[p,1+l, 2] <- persist4 # Atl.Ridge + + array.diff.pers[p,1+l, 1] <- persist1 - persistObs1 # NAO+ + array.diff.pers[p,1+l, 3] <- persist2 - persistObs2 # NAO- + array.diff.pers[p,1+l, 4] <- persist3 - persistObs3 # Blocking + array.diff.pers[p,1+l, 2] <- persist4 - persistObs4 # Atl.Ridge + + array.pers[p,1+l, 1] <- persist1 # NAO+ + array.pers[p,1+l, 3] <- persist2 # NAO- + array.pers[p,1+l, 4] <- persist3 # Blocking + array.pers[p,1+l, 2] <- persist4 # Atl.Ridge + + array.sd.freq[p,1+l, 1] <- sd.freq.sim1 # NAO+ + array.sd.freq[p,1+l, 3] <- sd.freq.sim2 # NAO- + array.sd.freq[p,1+l, 4] <- sd.freq.sim3 # Blocking + array.sd.freq[p,1+l, 2] <- sd.freq.sim4 # Atl.Ridge + + array.diff.sd.freq[p,1+l, 1] <- sd.freq.sim1 / sd.freq.obs1 # NAO+ + array.diff.sd.freq[p,1+l, 3] <- sd.freq.sim2 / sd.freq.obs2 # NAO- + array.diff.sd.freq[p,1+l, 4] <- sd.freq.sim3 / sd.freq.obs3 # Blocking + array.diff.sd.freq[p,1+l, 2] <- sd.freq.sim4 / sd.freq.obs4 # Atl.Ridge + + #array.rpss[p,1+l, 1] <- # NAO+ + #array.rpss[p,1+l, 3] <- # NAO- + #array.rpss[p,1+l, 4] <- # Blocking + #array.rpss[p,1+l, 2] <- # Atl.Ridge + } + } + + + # Spatial Corr summary: + png(filename=paste0(workdir,"/",forecast.name,"_summary_sp_corr.png"), width=550, height=400) + + # create an array similar to array.cor but with colors instead of corr.values: + sp.corr.cols <- rev(c('#b2182b','#d6604d','#f4a582','#fddbc7','#d1e5f0','#92c5de','#4393c3','#2166ac')) + my.seq <- c(-1,0,0.4,0.5,0.6,0.7,0.8,0.9,1) + + array.sp.cor.colors <- array(sp.corr.cols[8],c(12,7,4)) + array.sp.cor.colors[array.sp.cor < my.seq[8]] <- sp.corr.cols[7] + array.sp.cor.colors[array.sp.cor < my.seq[7]] <- sp.corr.cols[6] + array.sp.cor.colors[array.sp.cor < my.seq[6]] <- sp.corr.cols[5] + array.sp.cor.colors[array.sp.cor < my.seq[5]] <- sp.corr.cols[4] + array.sp.cor.colors[array.sp.cor < my.seq[4]] <- sp.corr.cols[3] + array.sp.cor.colors[array.sp.cor < my.seq[3]] <- sp.corr.cols[2] + array.sp.cor.colors[array.sp.cor < my.seq[2]] <- sp.corr.cols[1] + + par(mar=c(5,4,4,4.5)) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Forecast time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + title("Spatial correlation between S4 and ERA-Interim regime anomalies", cex=1.5) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + + for(p in 1:12){ + for(l in 0:6){ + polygon(c(0.5+p-1, 0.5+p-1, 1+p-1), c(-0.5+l, 0.5+l, 0+l), col=array.sp.cor.colors[p,1+l,1]) # first triangle + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(-0.5+l, 0+l, -0.5+l), col=array.sp.cor.colors[p,1+l,2]) + polygon(c(1+p-1, 1.5+p-1, 1.5+p-1), c(l, 0.5+l, -0.5+l), col=array.sp.cor.colors[p,1+l,3]) + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(0.5+l, 0+l, 0.5+l), col=array.sp.cor.colors[p,1+l,4]) + } + } + + par(fig=c(0.875, 1, 0.14, 0.89), new=TRUE) + ColorBar2(brks = my.seq, cols = sp.corr.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + dev.off() + + # Corr summary: + png(filename=paste0(workdir,"/",forecast.name,"_summary_corr.png"), width=550, height=400) + + # create an array similar to array.cor but with colors instead of corr.values: + corr.cols <- rev(c('#b2182b','#d6604d','#f4a582','#fddbc7','#d1e5f0','#92c5de','#4393c3','#2166ac')) + + array.cor.colors <- array(corr.cols[8],c(12,7,4)) + array.cor.colors[array.cor < 0.75] <- corr.cols[7] + array.cor.colors[array.cor < 0.5] <- corr.cols[6] + array.cor.colors[array.cor < 0.25] <- corr.cols[5] + array.cor.colors[array.cor < 0] <- corr.cols[4] + array.cor.colors[array.cor < -0.25] <- corr.cols[3] + array.cor.colors[array.cor < -0.5] <- corr.cols[2] + array.cor.colors[array.cor < -0.75] <- corr.cols[1] + + par(mar=c(5,4,4,4.5)) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Forecast time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + title("Correlation between S4 and ERA-Interim frequencies", cex=1.5) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + + for(p in 1:12){ + for(l in 0:6){ + polygon(c(0.5+p-1,0.5+p-1,1+p-1),c(-0.5+l,0.5+l,0+l), col=array.cor.colors[p,1+l,1]) # first triangle + polygon(c(0.5+p-1,1+p-1,1.5+p-1),c(-0.5+l,0+l,-0.5+l), col=array.cor.colors[p,1+l,2]) + polygon(c(1+p-1,1.5+p-1,1.5+p-1),c(l,0.5+l,-0.5+l), col=array.cor.colors[p,1+l,3]) + polygon(c(0.5+p-1,1+p-1,1.5+p-1),c(0.5+l,0+l,0.5+l), col=array.cor.colors[p,1+l,4]) + } + } + + par(fig=c(0.875, 1, 0.14, 0.89), new=TRUE) + ColorBar2(brks = seq(-1,1,0.25), cols = corr.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(seq(-1,1,0.25)), my.labels=seq(-1,1,0.25)) + dev.off() + + + + # Freq. summary: + png(filename=paste0(workdir,"/",forecast.name,"_summary_freq.png"), width=550, height=400) + + # create an array similar to array.diff.freq but with colors instead of frequencies: + freq.cols <- rev(c('#d73027','#f46d43','#fdae61','#fee090','#e0f3f8','#abd9e9','#74add1','#4575b4')) + my.seq <- c(NA,20,22,24,26,28,30,32,NA) + + array.freq.colors <- array(freq.cols[8],c(12,7,4)) + array.freq.colors[array.freq < my.seq[[8]]] <- freq.cols[7] + array.freq.colors[array.freq < my.seq[[7]]] <- freq.cols[6] + array.freq.colors[array.freq < my.seq[[6]]] <- freq.cols[5] + array.freq.colors[array.freq < my.seq[[5]]] <- freq.cols[4] + array.freq.colors[array.freq < my.seq[[4]]] <- freq.cols[3] + array.freq.colors[array.freq < my.seq[[3]]] <- freq.cols[2] + array.freq.colors[array.freq < my.seq[[2]]] <- freq.cols[1] + + par(mar=c(5,4,4,4.5)) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Forecast time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + title("Mean S4 frequency (%)", cex=1.5) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + + for(p in 1:12){ + for(l in 0:6){ + polygon(c(0.5+p-1, 0.5+p-1, 1+p-1), c(-0.5+l, 0.5+l, 0+l), col=array.freq.colors[p,1+l,1]) # first triangle + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(-0.5+l, 0+l, -0.5+l), col=array.freq.colors[p,1+l,2]) + polygon(c(1+p-1, 1.5+p-1, 1.5+p-1), c(l, 0.5+l, -0.5+l), col=array.freq.colors[p,1+l,3]) + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(0.5+l, 0+l, 0.5+l), col=array.freq.colors[p,1+l,4]) + } + } + + par(fig=c(0.875, 1, 0.14, 0.89), new=TRUE) + ColorBar2(brks = my.seq, cols = freq.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + dev.off() + + + + # Delta freq. summary: + png(filename=paste0(workdir,"/",forecast.name,"_summary_diff_freq.png"), width=550, height=400) + + # create an array similar to array.diff.freq but with colors instead of frequencies: + diff.freq.cols <- rev(c('#d73027','#f46d43','#fdae61','#fee090','#e0f3f8','#abd9e9','#74add1','#4575b4')) + + array.diff.freq.colors <- array(diff.freq.cols[8],c(12,7,4)) + array.diff.freq.colors[array.diff.freq < 10] <- diff.freq.cols[7] + array.diff.freq.colors[array.diff.freq < 5] <- diff.freq.cols[6] + array.diff.freq.colors[array.diff.freq < 2] <- diff.freq.cols[5] + array.diff.freq.colors[array.diff.freq < 0] <- diff.freq.cols[4] + array.diff.freq.colors[array.diff.freq < -2] <- diff.freq.cols[3] + array.diff.freq.colors[array.diff.freq < -5] <- diff.freq.cols[2] + array.diff.freq.colors[array.diff.freq < -10] <- diff.freq.cols[1] + + par(mar=c(5,4,4,4.5)) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Forecast time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + title("Frequency difference (%) between S4 and ERA-Interim", cex=1.5) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + + for(p in 1:12){ + for(l in 0:6){ + polygon(c(0.5+p-1,0.5+p-1,1+p-1),c(-0.5+l,0.5+l,0+l), col=array.diff.freq.colors[p,1+l,1]) # first triangle + polygon(c(0.5+p-1,1+p-1,1.5+p-1),c(-0.5+l,0+l,-0.5+l), col=array.diff.freq.colors[p,1+l,2]) + polygon(c(1+p-1,1.5+p-1,1.5+p-1),c(l,0.5+l,-0.5+l), col=array.diff.freq.colors[p,1+l,3]) + polygon(c(0.5+p-1,1+p-1,1.5+p-1),c(0.5+l,0+l,0.5+l), col=array.diff.freq.colors[p,1+l,4]) + } + } + + par(fig=c(0.875, 1, 0.14, 0.89), new=TRUE) + ColorBar2(brks = c(-20,-10,-5,-2,0,2,5,10,20), cols = diff.freq.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(c(-20,-10,-5,-2,0,2,5,10,20)), my.labels=c(-20,-10,-5,-2,0,2,5,10,20)) + dev.off() + + + # Persistence summary: + png(filename=paste0(workdir,"/",forecast.name,"_summary_pers.png"), width=550, height=400) + + # create an array similar to array.pers but with colors instead of frequencies: + pers.cols <- rev(c('#b2182b','#d6604d','#f4a582','#fddbc7','#d1e5f0','#92c5de','#4393c3','#2166ac')) + my.seq <- c(NA, 3.25, 3.5, 3.75, 4, 4.25, 4.5, 4.75, NA) + + array.pers.colors <- array(pers.cols[8],c(12,7,4)) + array.pers.colors[array.pers < my.seq[8]] <- pers.cols[7] + array.pers.colors[array.pers < my.seq[7]] <- pers.cols[6] + array.pers.colors[array.pers < my.seq[6]] <- pers.cols[5] + array.pers.colors[array.pers < my.seq[5]] <- pers.cols[4] + array.pers.colors[array.pers < my.seq[4]] <- pers.cols[3] + array.pers.colors[array.pers < my.seq[3]] <- pers.cols[2] + array.pers.colors[array.pers < my.seq[2]] <- pers.cols[1] + + par(mar=c(5,4,4,4.5)) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Forecast time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + title("S4 Regime persistence (in days/month)", cex=1.5) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + + for(p in 1:12){ + for(l in 0:6){ + polygon(c(0.5+p-1,0.5+p-1,1+p-1),c(-0.5+l,0.5+l,0+l), col=array.pers.colors[p,1+l,1]) # first triangle + polygon(c(0.5+p-1,1+p-1,1.5+p-1),c(-0.5+l,0+l,-0.5+l), col=array.pers.colors[p,1+l,2]) + polygon(c(1+p-1,1.5+p-1,1.5+p-1),c(l,0.5+l,-0.5+l), col=array.pers.colors[p,1+l,3]) + polygon(c(0.5+p-1,1+p-1,1.5+p-1),c(0.5+l,0+l,0.5+l), col=array.pers.colors[p,1+l,4]) + } + } + + par(fig=c(0.875, 1, 0.14, 0.89), new=TRUE) + ColorBar2(brks = my.seq, cols = pers.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + dev.off() + + + + # Delta persistence summary: + png(filename=paste0(workdir,"/",forecast.name,"_summary_diff_pers.png"), width=550, height=400) + + # create an array similar to array.pers but with colors instead of frequencies: + diff.pers.cols <- rev(c('#b2182b','#d6604d','#f4a582','#fddbc7','#d1e5f0','#92c5de','#4393c3','#2166ac')) + my.seq <- c(NA, -1.5, -1, -0.5, 0, 0.5, 1, 1.5, NA) + + array.diff.pers.colors <- array(diff.pers.cols[8],c(12,7,4)) + array.diff.pers.colors[array.diff.pers < my.seq[8]] <- diff.pers.cols[7] + array.diff.pers.colors[array.diff.pers < my.seq[7]] <- diff.pers.cols[6] + array.diff.pers.colors[array.diff.pers < my.seq[6]] <- diff.pers.cols[5] + array.diff.pers.colors[array.diff.pers < my.seq[5]] <- diff.pers.cols[4] + array.diff.pers.colors[array.diff.pers < my.seq[4]] <- diff.pers.cols[3] + array.diff.pers.colors[array.diff.pers < my.seq[3]] <- diff.pers.cols[2] + array.diff.pers.colors[array.diff.pers < my.seq[2]] <- diff.pers.cols[1] + + par(mar=c(5,4,4,4.5)) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Forecast time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + title("Diff. between S4 and ERA-Interim regime persistence (in days/month)", cex=1.5) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + + for(p in 1:12){ + for(l in 0:6){ + polygon(c(0.5+p-1,0.5+p-1,1+p-1),c(-0.5+l,0.5+l,0+l), col=array.diff.pers.colors[p,1+l,1]) # first triangle + polygon(c(0.5+p-1,1+p-1,1.5+p-1),c(-0.5+l,0+l,-0.5+l), col=array.diff.pers.colors[p,1+l,2]) + polygon(c(1+p-1,1.5+p-1,1.5+p-1),c(l,0.5+l,-0.5+l), col=array.diff.pers.colors[p,1+l,3]) + polygon(c(0.5+p-1,1+p-1,1.5+p-1),c(0.5+l,0+l,0.5+l), col=array.diff.pers.colors[p,1+l,4]) + } + } + + par(fig=c(0.875, 1, 0.14, 0.89), new=TRUE) + ColorBar2(brks = my.seq, cols = diff.pers.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + dev.off() + + # St.Dev.freq ratio summary: + png(filename=paste0(workdir,"/",forecast.name,"_summary_ratio_sd_freq.png"), width=550, height=400) + + # create an array similar to array.cor but with colors instead of corr.values: + diff.sd.freq.cols <- rev(c('#b2182b','#d6604d','#f4a582','#fddbc7','#d1e5f0','#92c5de','#4393c3','#2166ac')) + my.seq <- c(0,0.7,0.8,0.9,1.0,1.1,1.2,1.3,2) + + array.diff.sd.cor.colors <- array(diff.sd.freq.cols[8],c(12,7,4)) + array.diff.sd.cor.colors[array.diff.sd.freq < my.seq[8]] <- diff.sd.freq.cols[7] + array.diff.sd.cor.colors[array.diff.sd.freq < my.seq[7]] <- diff.sd.freq.cols[6] + array.diff.sd.cor.colors[array.diff.sd.freq < my.seq[6]] <- diff.sd.freq.cols[5] + array.diff.sd.cor.colors[array.diff.sd.freq < my.seq[5]] <- diff.sd.freq.cols[4] + array.diff.sd.cor.colors[array.diff.sd.freq < my.seq[4]] <- diff.sd.freq.cols[3] + array.diff.sd.cor.colors[array.diff.sd.freq < my.seq[3]] <- diff.sd.freq.cols[2] + array.diff.sd.cor.colors[array.diff.sd.freq < my.seq[2]] <- diff.sd.freq.cols[1] + + par(mar=c(5,4,4,4.5)) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Forecast time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + title("St.Dev. ratio between sim. and obs. average frequencies", cex=1.5) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + + for(p in 1:12){ + for(l in 0:6){ + polygon(c(0.5+p-1, 0.5+p-1, 1+p-1), c(-0.5+l, 0.5+l, 0+l), col=array.diff.sd.cor.colors[p,1+l,1]) # first triangle + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(-0.5+l, 0+l, -0.5+l), col=array.diff.sd.cor.colors[p,1+l,2]) + polygon(c(1+p-1, 1.5+p-1, 1.5+p-1), c(l, 0.5+l, -0.5+l), col=array.diff.sd.cor.colors[p,1+l,3]) + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(0.5+l, 0+l, 0.5+l), col=array.diff.sd.cor.colors[p,1+l,4]) + } + } + + par(fig=c(0.875, 1, 0.14, 0.89), new=TRUE) + ColorBar2(brks = my.seq, cols = diff.sd.freq.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + dev.off() + + + ## # FairRPSS summary: + ## png(filename=paste0(workdir,"/",forecast.name,"_summary_rpss.png"), width=550, height=400) + + ## # create an array similar to array.diff.freq but with colors instead of frequencies: + ## freq.cols <- rev(c('#b2182b','#d6604d','#f4a582','#fddbc7','#d1e5f0','#92c5de','#4393c3','#2166ac')) + + ## array.freq.colors <- array(freq.cols[8],c(12,7,4)) + ## array.freq.colors[array.diff.freq < 10] <- freq.cols[7] + ## array.freq.colors[array.diff.freq < 5] <- freq.cols[6] + ## array.freq.colors[array.diff.freq < 2] <- freq.cols[5] + ## array.freq.colors[array.diff.freq < 0] <- freq.cols[4] + ## array.freq.colors[array.diff.freq < -2] <- freq.cols[3] + ## array.freq.colors[array.diff.freq < -5] <- freq.cols[2] + ## array.freq.colors[array.diff.freq < -10] <- freq.cols[1] + + ## par(mar=c(5,4,4,4.5)) + ## plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Forecast time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + ## title("Frequency difference (%) between S4 and ERA-Interim", cex=1.5) + ## mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + ## mtext(side = 2, text = "Forecast time", line = 2.5, cex=1.5) + ## axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + ## axis(2, at=seq(0,6), las=2, cex.axis=1.5) + + ## for(p in 1:12){ + ## for(l in 0:6){ + ## polygon(c(0.5+p-1,0.5+p-1,1+p-1),c(-0.5+l,0.5+l,0+l), col=array.freq.colors[p,1+l,1]) # first triangle + ## polygon(c(0.5+p-1,1+p-1,1.5+p-1),c(-0.5+l,0+l,-0.5+l), col=array.freq.colors[p,1+l,2]) + ## polygon(c(1+p-1,1.5+p-1,1.5+p-1),c(l,0.5+l,-0.5+l), col=array.freq.colors[p,1+l,3]) + ## polygon(c(0.5+p-1,1+p-1,1.5+p-1),c(0.5+l,0+l,0.5+l), col=array.freq.colors[p,1+l,4]) + ## } + ## } + + ## par(fig=c(0.875, 1, 0.14, 0.89), new=TRUE) + ## ColorBar2(brks = c(-20,-10,-5,-2,0,2,5,10,20), cols = freq.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(c(-20,-10,-5,-2,0,2,5,10,20)), my.labels=c(-20,-10,-5,-2,0,2,5,10,20)) + ## dev.off() + +} # close if on composition == "summary" + + + + +# impact map of the stronger WR: +if(composition == "impact" && fields.name == rean.name){ + + var.num <- 1 # choose a variable (1:sfcWind, 2:tas) + + png(filename=paste0(workdir,"/",rean.name,"_",var.name[var.num],"_most_influent.png"), width=500, height=600) + + plot.new() + #par(mfrow=c(4,3)) + col.regimes <- c("indianred1","cyan","khaki","lightpink") + + for(p in 13:16){ # or 1:12 for monthly maps + load(file=paste0(rean.dir,"/",rean.name,"_",var.name[var.num],"_",my.period[p],"_psl.RData")) + load(paste0(rean.dir,"/",rean.name,"_ClusterNames.RData")) # they should not depend on var.name!!! + + + # position of long values of Europe only (without the Atlantic Sea and America): + #if(fields.name == "ERA-Interim") EU <- c(1:which(lon >= lon.max)[1], (length(lon)-30):length(lon)) # restrict area to continental europe only + lon.max = 45 + EU <- c(1:which(lon >= lon.max)[1], (which(lon >= 337)[1]:length(lon))) # restrict area to continental europe only + + + cluster1 <- which(orden == cluster1.name.period[p]) # in future it will be == cluster1.name + cluster2 <- which(orden == cluster2.name.period[p]) + cluster3 <- which(orden == cluster3.name.period[p]) + cluster4 <- which(orden == cluster4.name.period[p]) + + assign(paste0("imp",cluster1), varPeriodAnom1mean) + assign(paste0("imp",cluster2), varPeriodAnom2mean) + assign(paste0("imp",cluster3), varPeriodAnom3mean) + assign(paste0("imp",cluster4), varPeriodAnom4mean) + + imp.all <- imp1 + for(i in 1:dim(imp1)[1]){ + for(j in 1:dim(imp1)[2]){ + if(abs(imp1[i,j]) >= max(abs(imp1[i,j]), abs(imp2[i,j]), abs(imp3[i,j]), abs(imp4[i,j]))) imp.all[i,j] <- 1.5 + if(abs(imp2[i,j]) >= max(abs(imp1[i,j]), abs(imp2[i,j]), abs(imp3[i,j]), abs(imp4[i,j]))) imp.all[i,j] <- 2.5 + if(abs(imp3[i,j]) >= max(abs(imp1[i,j]), abs(imp2[i,j]), abs(imp3[i,j]), abs(imp4[i,j]))) imp.all[i,j] <- 3.5 + if(abs(imp4[i,j]) >= max(abs(imp1[i,j]), abs(imp2[i,j]), abs(imp3[i,j]), abs(imp4[i,j]))) imp.all[i,j] <- 4.5 + } + } + + if(p<=3) yMod <- 0.7 + if(p>=4 && p<=6) yMod <- 0.5 + if(p>=7 && p<=9) yMod <- 0.3 + if(p>=10) yMod <- 0.1 + if(p==13 || p==14) yMod <- 0.5 + if(p==15 || p==16) yMod <- 0.1 + + if(p==1 || p==4 || p==7 || p==10) xMod <- 0.05 + if(p==2 || p==5 || p==8 || p==11) xMod <- 0.35 + if(p==3 || p==6 || p==9 || p==12) xMod <- 0.65 + if(p==13 || p==15) xMod <- 0.05 + if(p==14 || p==16) xMod <- 0.50 + + # impact map: + if(p <= 12) par(fig=c(xMod, xMod + 0.3, yMod, yMod + 0.17), new=TRUE) + if(p > 12) par(fig=c(xMod, xMod + 0.45, yMod, yMod + 0.38), new=TRUE) + PlotEquiMap(imp.all[,EU], lon[EU], lat, filled.continents = FALSE, brks=1:5, cols=col.regimes, axelab=F, drawleg=F) + + # title map: + if(p <= 12) par(fig=c(xMod, xMod + 0.3, yMod + 0.162, yMod + 0.172), new=TRUE) + if(p > 12) par(fig=c(xMod + 0.07, xMod + 0.07 + 0.3, yMod +0.21 + 0.162, yMod +0.21 + 0.172), new=TRUE) + mtext(my.period[p], font=2, cex=.9) + + } # close 'p' on 'period' + + par(fig=c(0.1,0.9, 0, 0.12), new=TRUE) + ColorBar2(1:5, cols=col.regimes, vert=FALSE, my.ticks=1:4, draw.ticks=FALSE, my.labels=orden) + + par(fig=c(0.1,0.9, 0.9, 0.95), new=TRUE) + mtext(paste0("Most influent WR on ",var.name[var.num]), font=2, cex=1.5) + + dev.off() +} # close if on composition == "impact" + + + + + + + + + + + + + + + + + diff --git a/old/weather_regimes_maps_v14.R~ b/old/weather_regimes_maps_v14.R~ new file mode 100644 index 0000000000000000000000000000000000000000..2d1d22060fe7cc981e110587d4cc88e1350cddfb --- /dev/null +++ b/old/weather_regimes_maps_v14.R~ @@ -0,0 +1,1245 @@ +g + +# Creation: 6/2016 +# Author: Nicola Cortesi +# +# Aim: to visualize the regime anomalies of the weather regimes, their interannual frequencies and their impact on a chosen cliamte variable (i.e: wind speed or temperature) +# +# I/O: the only input file needed is the output of the weather_regimes.R script, which ends with the suffix "_mapdata.RData". +# Output files are the maps, with the user can generate as .png or as .pdf +# +# Assumptions: If your regimes derive from a reanalysis, this script must be run twice: once, with the option 'ordering = F' and 'save.names = T' to create a .pdf or .png +# file that the user must open to find visually the order by which the four regimes are stored inside the four clusters. For example, he can find that the +# sequence of the images in the file (from top to down) corresponds to: Blocking / NAO- / Atl.Ridge / NAO+ regime. Subsequently, he has to change 'ordering' +# from F to T and set the four variables 'clusterX.name' (X=1..4) to follow the same order found inside that file. +# The second time, you have to run this script only to get the maps in the right order, which by default is, from top to down: +# "NAO+","NAO-","Blocking" and "Atl.Ridge" (you can change it with the variable 'orden'). You also have to set 'save.names' to TRUE, so an .RData file +# is written to store the order of the four regimes, in case in future a user needs to visualize these maps again. In this case, the user must set +# 'ordering = TRUE' and 'save.names = FALSE' to be able to visualize the maps in the correct order. +# +# If your regimes derive from a forecast system, you have to compute before the regimes derived from a reanalysis. Then, you can associate the reanalysis +# to the forecast system, to employ it to automatically aussociate to each simulated cluster one of the +# observed regimes, the one with the highest spatial correlation. Beware that the two maps of regime anomalies must have the same spatial resolution! +# +# Branch: weather_regimes + +rm(list=ls()) +library(s2dverification) # for the function Load() +library(Kendall) # for the MannKendall test + +source('/home/Earth/ncortesi/Downloads/scripts/Rfunctions.R') # for the calendar functions + +# working dir with the input files with '_psl.RData' suffix: +workdir <- "/scratch/Earth/ncortesi/RESILIENCE/Regimes/42_ECMWF_S4_monthly_1981-2015_LOESS_filter" + +rean.name <- "ERA-Interim" # reanalysis name (if input data comes from a reanalysis) +forecast.name <- "ECMWF-S4" # forecast system name (if input data comes from a forecast system) + +fields.name <- forecast.name # Choose between loading reanalysis ('rean.name') or forecasts ('forecast.name') + +var.num <- 1 # if fields.name ='rean.name', Choose a variable for the impact maps 1: sfcWind 2: tas + +period <- 1:12 # Choose one or more periods to plot from 1 to 17 (1-12: Jan - Dec, 13-16: Winter, Spring, Summer, Autumn, 17: Year). + # You need to have created before the '_mapdata.RData' file with the output of 'weather_regimes'.R for that period. +lead.months <- 0:6 # in case you selected 'fields.name = forecast.name', specify a leadtime (0 = same month of the start month) to plot + # (and variable 'period' becomes the start month) + +# run it twice: once, with 'ordering <- FALSE' and 'save.names <- TRUE' to look only at the cartography and find visually the right regime names of the four clusters; then, +# insert the regime names in the correct order below and run this script a the second time with 'ordering = TRUE' and 'save.names = TRUE', to save the ordered cartography. +# after that, any time you need to run this script, run it with 'ordering = TRUE and 'save.names = FALSE', not to overwrite the file which stores the cluster order: +ordering <- T # If TRUE, order the clusters following the regimes listed in the 'orden' vector (set it to TRUE only after you manually inserted the names below). +save.names <- F # if TRUE, also save the cluster names (i.e: the association between clusters and weather regimes); if FALSE, the cluster names are loaded from a file + +as.pdf <- F # FALSE: save results as one .png file for each season/month, TRUE: save only one .pdf file with all seasons/months + +composition <- "fre" # choose which kind of composition you want to plot: + # - 'simple' for monthly graphs, + # - 'psl' for the regime anomalies for a fixed forecast month + # - 'fre' for the interannual frequencies for a fixed forecast month + # - 'summary' for the summary plots of all forecast months (persistence bias, frequency bias, correlations, etc.) + # - 'impact' for the impact plot of the regime with the highest impact + +forecast.month <- 11 # in case composition = 'psl' or 'fre', choose a forecast month from 1 to 12 to plot its composition + +# if fields.name='forecast.name', set the subdir of 'workdir' where the weather regimes computed with a reanalysis are stored: +rean.dir <- "/scratch/Earth/ncortesi/RESILIENCE/Regimes/41_ERA-Interim_monthly_1981-2015_LOESS_filter" + +# Associates the regimes to the four cluster: +cluster1.name <- "NAO+" +cluster2.name <- "NAO-" +cluster4.name <- "Blocking" +cluster3.name <- "Atl.Ridge" + +# choose an order to give to the cartograpy, from top to bottom: +orden <- c("NAO+","NAO-","Blocking","Atl.Ridge") + +################################################################################################################################################################### + +if(fields.name != rean.name && fields.name != forecast.name) stop("variable 'fields.name' is not properly set") +regime1.name <- orden[1] +regime2.name <- orden[2] +regime3.name <- orden[3] +regime4.name <- orden[4] + +var.name <- c("sfcWind","tas") +var.name.full <- c("Wind Speed","Temperature") # full name of the 'predictand' variable to put in the title of the graphs +var.unit <- c("m/s", "ºC") # unit of measure of var (for drawing color scales) +var.num.orig <- var.num # loading _psl files, this value can change! +fields.name.orig <- fields.name +period.orig <- period +workdir.orig <- workdir +pdf.suffix <- ifelse(length(period)==1, my.period[period], "all_periods") +n.map <- 0 + +if(composition != "summary" && composition != "impact"){ + + if(composition == "psl" || composition == "fre") { + if(as.pdf && fields.name == forecast.name) pdf(file=paste0(workdir,"/",fields.name,"_",pdf.suffix,"_forecast_month_",forecast.month,"_",composition,".pdf"),width=110,height=60) + if(!as.pdf && fields.name == forecast.name) png(filename=paste0(workdir,"/",fields.name,"_forecast_month_",forecast.month,"_",composition,".png"),width=6000,height=2300) + + lead.months <- c(6:0,0) + plot.new() + + # Sheet title: + par(fig=c(0, 1, 0.94, 0.98), new=TRUE) + if(composition == "psl") mtext(paste0(fields.name, " simulated Weather Regimes for ", month.name[forecast.month]), cex=5.5, font=2) + if(composition == "fre") mtext(paste0(fields.name, " simulated interannual frequencies for ", month.name[forecast.month]), cex=5.5, font=2) + } + + for(lead.month in lead.months){ + #lead.month=lead.months[1] # for the debug + + if(composition == "simple"){ + if(as.pdf && fields.name == rean.name) pdf(file=paste0(workdir,"/",fields.name,"_",var.name[var.num],"_",pdf.suffix,".pdf"),width=40, height=60) + if(as.pdf && fields.name == forecast.name) pdf(file=paste0(workdir,"/",fields.name,"_",var.name[var.num],"_",pdf.suffix,"_leadmonth_",lead.month,".pdf"),width=40,height=60) + } + + if(composition == 'psl' || composition == 'fre') { + period <- forecast.month - lead.month + if(period < 1) period <- period + 12 + } + + for(p in period){ + #p <- period[1] # for the debug + p.orig <- p + + # load regime data (for forecasts, it load only the data corresponding to the chosen start month p and lead month 'lead.month':: + if(fields.name == rean.name || (fields.name == forecast.name && lead.month == 0 || n.map == 7)) { + load(file=paste0(rean.dir,"/",rean.name,"_",my.period[p],"_","psl",".RData")) + if(var.num != var.num.orig) var.num <- var.num.orig # ripristinate original values of this script (necessary after loading regime data) + if(fields.name != fields.name.orig) fields.name <- fields.name.orig # ripristinate original values of this script + if(workdir != workdir.orig) workdir <- workdir.orig # ripristinate original values of this script + + load(file=paste0(rean.dir,"/",rean.name,"_",my.period[p],"_",var.name[var.num],".RData")) + } + + if(fields.name == forecast.name && n.map < 7){ + load(file=paste0(workdir,"/",fields.name,"_",my.period[p],"_leadmonth_",lead.month,"_","psl",".RData")) # load cluster time series and mean psl data + #load(file=paste0(workdir,"/",fields.name,"_",my.period[p],"_leadmonth_",lead.month,"_",var.name[var.num],".RData")) # load mean var data + + if(var.num != var.num.orig) var.num <- var.num.orig # ripristinate original values of this script (necessary after loading regime data) + if(fields.name != fields.name.orig) fields.name <- fields.name.orig # ripristinate original values of this script + if(workdir != workdir.orig) workdir <- workdir.orig # ripristinate original values of this script + } + + if(var.num != var.num.orig) var.num <- var.num.orig # ripristinate original values of this script (necessary after loading regime data) + if(fields.name != fields.name.orig) fields.name <- fields.name.orig # ripristinate original values of this script + if(p != p.orig) p <- p.orig + + if(fields.name == forecast.name && n.map < 7){ + # compute cluster persistence: + my.cluster.vector <- as.vector(my.cluster.array[[p]]) # reduce it to a vector with the order: day1month1member1 , day2month1member1, ... , day1month2member1, day2month2member1, ,,, + + sequ1 <- sequ2 <- sequ3 <- sequ4 <- rep(0,10000) + i1 <- i2 <- i3 <- i4 <- 1 + + if(my.cluster.vector[1] != 1) i1 <- 0 + if(my.cluster.vector[1] == 1) sequ1[i1] <- sequ1[i1]+1 + if(my.cluster.vector[1] != 2) i2 <- 0 + if(my.cluster.vector[1] == 2) sequ2[i2] <- sequ2[i2]+1 + if(my.cluster.vector[1] != 3) i3 <- 0 + if(my.cluster.vector[1] == 3) sequ3[i3] <- sequ3[i3]+1 + if(my.cluster.vector[1] != 4) i4 <- 0 + if(my.cluster.vector[1] == 4) sequ4[i4] <- sequ4[i4]+1 + n.dd <- length(my.cluster.vector) + for(d in 1:(n.dd-1)){ + if(my.cluster.vector[d] != 1 && my.cluster.vector[d+1] == 1) i1 <- i1+1 + if(my.cluster.vector[d] == 1) sequ1[i1] <- sequ1[i1]+1 + + if(my.cluster.vector[d] != 2 && my.cluster.vector[d+1] == 2) i2 <- i2+1 + if(my.cluster.vector[d] == 2) sequ2[i2] <- sequ2[i2]+1 + + if(my.cluster.vector[d] != 3 && my.cluster.vector[d+1] == 3) i3 <- i3+1 + if(my.cluster.vector[d] == 3) sequ3[i3] <- sequ3[i3]+1 + + if(my.cluster.vector[d] != 4 && my.cluster.vector[d+1] == 4) i4 <- i4+1 + if(my.cluster.vector[d] == 4) sequ4[i4] <- sequ4[i4]+1 + } + + if(my.cluster.vector[n.dd] == 1) sequ1[i1] <- sequ1[i1]+1 + if(my.cluster.vector[n.dd] == 2) sequ2[i2] <- sequ2[i2]+1 + if(my.cluster.vector[n.dd] == 3) sequ3[i3] <- sequ3[i3]+1 + if(my.cluster.vector[n.dd] == 4) sequ4[i4] <- sequ4[i4]+1 + + pers1 <- mean(sequ1[which(sequ1 != 0)]) + pers2 <- mean(sequ2[which(sequ2 != 0)]) + pers3 <- mean(sequ3[which(sequ3 != 0)]) + pers4 <- mean(sequ4[which(sequ4 != 0)]) + + # compute correlations between simulated clusters and observed regime's anomalies: + cluster1.sim <- pslwr1mean; cluster2.sim <- pslwr2mean; cluster3.sim <- pslwr3mean; cluster4.sim <- pslwr4mean + lat.forecast <- lat; lon.forecast <- lon + + if((p + lead.month) > 12) { load.month <- p + lead.month - 12 } else { load.month <- p + lead.month } # reanalysis forecast month to load + load(file=paste0(rean.dir,"/",rean.name,"_",my.period[load.month],"_","psl",".RData")) # load reanalysis data, which overwrities the pslwrXmean variables + load(paste0(rean.dir,"/",rean.name,"_", my.period[load.month],"_","ClusterNames",".RData")) # load also reanalysis regime names + + if(var.num != var.num.orig) var.num <- var.num.orig # ripristinate original values of this script + if(fields.name != fields.name.orig) fields.name <- fields.name.orig # ripristinate original values of this script + if(!identical(period.orig,period)) period <- period.orig + if(workdir != workdir.orig) workdir <- workdir.orig # ripristinate original values of this script + if(p.orig != p) p <- p.orig + + wr1y.obs <- wr1y; wr2y.obs <- wr2y; wr3y.obs <- wr3y; wr4y.obs <- wr4y # observed frecuencies of the regimes + + regimes.obs <- c(cluster1.name, cluster2.name, cluster3.name, cluster4.name) + + if(length(unique(regimes.obs)) < 4) stop("Two or more names of the weather types in the reanalsyis input file are repeated!") + + if(identical(rev(lat),lat.forecast)) {lat.forecast <- rev(lat.forecast); print("Warning: forecast latitudes are in the opposite order than renalysis's")} + if(!identical(lat, lat.forecast)) stop("forecast latitudes are different from reanalysis latitudes!") + if(!identical(lon, lon.forecast)) stop("forecast longitude are different from reanalysis longitudes!") + + cluster.corr <- array(NA, c(4,4), dimnames=list(c("cluster1.sim","cluster2.sim","cluster3.sim","cluster4.sim"), regimes.obs)) + cluster.corr[1,1] <- cor(as.vector(cluster1.sim), as.vector(pslwr1mean)) + cluster.corr[1,2] <- cor(as.vector(cluster1.sim), as.vector(pslwr2mean)) + cluster.corr[1,3] <- cor(as.vector(cluster1.sim), as.vector(pslwr3mean)) + cluster.corr[1,4] <- cor(as.vector(cluster1.sim), as.vector(pslwr4mean)) + cluster.corr[2,1] <- cor(as.vector(cluster2.sim), as.vector(pslwr1mean)) + cluster.corr[2,2] <- cor(as.vector(cluster2.sim), as.vector(pslwr2mean)) + cluster.corr[2,3] <- cor(as.vector(cluster2.sim), as.vector(pslwr3mean)) + cluster.corr[2,4] <- cor(as.vector(cluster2.sim), as.vector(pslwr4mean)) + cluster.corr[3,1] <- cor(as.vector(cluster3.sim), as.vector(pslwr1mean)) + cluster.corr[3,2] <- cor(as.vector(cluster3.sim), as.vector(pslwr2mean)) + cluster.corr[3,3] <- cor(as.vector(cluster3.sim), as.vector(pslwr3mean)) + cluster.corr[3,4] <- cor(as.vector(cluster3.sim), as.vector(pslwr4mean)) + cluster.corr[4,1] <- cor(as.vector(cluster4.sim), as.vector(pslwr1mean)) + cluster.corr[4,2] <- cor(as.vector(cluster4.sim), as.vector(pslwr2mean)) + cluster.corr[4,3] <- cor(as.vector(cluster4.sim), as.vector(pslwr3mean)) + cluster.corr[4,4] <- cor(as.vector(cluster4.sim), as.vector(pslwr4mean)) + + # associate each simulated cluster to one observed regime: + max1 <- which(cluster.corr == max(cluster.corr), arr.ind=T) + cluster.corr2 <- cluster.corr + cluster.corr2[max1[1],] <- -999 # set this row to negative values so it won't be found as a maximum anymore + cluster.corr2[,max1[2]] <- -999 # set this column to negative values so it won't be found as a maximum anymore + max2 <- which(cluster.corr2 == max(cluster.corr2), arr.ind=T) + cluster.corr3 <- cluster.corr2 + cluster.corr3[max2[1],] <- -999 + cluster.corr3[,max2[2]] <- -999 + max3 <- which(cluster.corr3 == max(cluster.corr3), arr.ind=T) + cluster.corr4 <- cluster.corr3 + cluster.corr4[max3[1],] <- -999 + cluster.corr4[,max3[2]] <- -999 + max4 <- which(cluster.corr4 == max(cluster.corr4), arr.ind=T) + + seq.max1 <- c(max1[1],max2[1],max3[1],max4[1]) + seq.max2 <- c(max1[2],max2[2],max3[2],max4[2]) + + cluster1.regime <- seq.max2[which(seq.max1 == 1)] + cluster2.regime <- seq.max2[which(seq.max1 == 2)] + cluster3.regime <- seq.max2[which(seq.max1 == 3)] + cluster4.regime <- seq.max2[which(seq.max1 == 4)] + + cluster1.name <- regimes.obs[cluster1.regime] + cluster2.name <- regimes.obs[cluster2.regime] + cluster3.name <- regimes.obs[cluster3.regime] + cluster4.name <- regimes.obs[cluster4.regime] + + #spat.cor1 <- cluster.corr[1,seq.max2[which(seq.max1 == 1)]] + #spat.cor2 <- cluster.corr[2,seq.max2[which(seq.max1 == 2)]] + #spat.cor3 <- cluster.corr[3,seq.max2[which(seq.max1 == 3)]] + #spat.cor4 <- cluster.corr[4,seq.max2[which(seq.max1 == 4)]] + + # measure persistence for the reanalysis dataset: + my.cluster.vector <- my.cluster[[load.month]][[1]] + + sequ1 <- sequ2 <- sequ3 <- sequ4 <- rep(0,10000) + i1 <- i2 <- i3 <- i4 <- 1 + + if(my.cluster.vector[1] != 1) i1 <- 0 + if(my.cluster.vector[1] == 1) sequ1[i1] <- sequ1[i1]+1 + if(my.cluster.vector[1] != 2) i2 <- 0 + if(my.cluster.vector[1] == 2) sequ2[i2] <- sequ2[i2]+1 + if(my.cluster.vector[1] != 3) i3 <- 0 + if(my.cluster.vector[1] == 3) sequ3[i3] <- sequ3[i3]+1 + if(my.cluster.vector[1] != 4) i4 <- 0 + if(my.cluster.vector[1] == 4) sequ4[i4] <- sequ4[i4]+1 + + n.dd <- length(my.cluster.vector) + for(d in 1:(n.dd-1)){ + if(my.cluster.vector[d] != 1 && my.cluster.vector[d+1] == 1) i1 <- i1+1 + if(my.cluster.vector[d] == 1) sequ1[i1] <- sequ1[i1]+1 + + if(my.cluster.vector[d] != 2 && my.cluster.vector[d+1] == 2) i2 <- i2+1 + if(my.cluster.vector[d] == 2) sequ2[i2] <- sequ2[i2]+1 + + if(my.cluster.vector[d] != 3 && my.cluster.vector[d+1] == 3) i3 <- i3+1 + if(my.cluster.vector[d] == 3) sequ3[i3] <- sequ3[i3]+1 + + if(my.cluster.vector[d] != 4 && my.cluster.vector[d+1] == 4) i4 <- i4+1 + if(my.cluster.vector[d] == 4) sequ4[i4] <- sequ4[i4]+1 + } + + if(my.cluster.vector[n.dd] == 1) sequ1[i1] <- sequ1[i1]+1 + if(my.cluster.vector[n.dd] == 2) sequ2[i2] <- sequ2[i2]+1 + if(my.cluster.vector[n.dd] == 3) sequ3[i3] <- sequ3[i3]+1 + if(my.cluster.vector[n.dd] == 4) sequ4[i4] <- sequ4[i4]+1 + + persObs1 <- mean(sequ1[which(sequ1 != 0)]) + persObs2 <- mean(sequ2[which(sequ2 != 0)]) + persObs3 <- mean(sequ3[which(sequ3 != 0)]) + persObs4 <- mean(sequ4[which(sequ4 != 0)]) + + # insert here all the manual corrections to the regime assignation due to misasignment in the automatic selection: + # forecast month: September + if(p == 9 && lead.month == 0) { cluster1.name <- "Blocking"; cluster2.name <- "Atl.Ridge"; cluster3.name <- "NAO-"; cluster4.name <- "NAO+"} + if(p == 8 && lead.month == 1) { cluster1.name <- "NAO+"; cluster2.name <- "NAO-"; cluster3.name <- "Blocking"; cluster4.name <- "Atl.Ridge"} + if(p == 6 && lead.month == 3) { cluster1.name <- "Atl.Ridge"; cluster2.name <- "NAO-"} + if(p == 4 && lead.month == 5) { cluster1.name <- "Blocking"; cluster2.name <- "NAO+"; cluster3.name <- "Atl.Ridge"; cluster4.name <- "NAO-"} + + # forecast month: October + if(p == 4 && lead.month == 6) { cluster1.name <- "Atl.Ridge"; cluster2.name <- "Blocking"; cluster3.name <- "NAO+"; cluster4.name <- "NAO-"} + if(p == 5 && lead.month == 5) { cluster1.name <- "NAO+"; cluster2.name <- "Blocking"; cluster3.name <- "NAO-"; cluster4.name <- "Atl.Ridge"} + if(p == 7 && lead.month == 3) { cluster1.name <- "NAO-"; cluster2.name <- "Atl.Ridge"; cluster3.name <- "Blocking"; cluster4.name <- "NAO+"} + if(p == 8 && lead.month == 2) { cluster1.name <- "Blocking"; cluster2.name <- "NAO-"; cluster3.name <- "Atl.Ridge"; cluster4.name <- "NAO+"} + if(p == 9 && lead.month == 1) { cluster1.name <- "NAO-"; cluster2.name <- "Blocking"; cluster3.name <- "Atl.Ridge"; cluster4.name <- "NAO+"} + + # forecast month: November + if(p == 6 && lead.month == 5) { cluster2.name <- "Atl.Ridge"; cluster3.name <- "NAO+"; cluster4.name <- "Blocking"} + if(p == 7 && lead.month == 4) { cluster1.name <- "NAO+"; cluster2.name <- "Blocking"; cluster4.name <- "Atl.Ridge"} + if(p == 8 && lead.month == 3) { cluster2.name <- "Blocking"; cluster4.name <- "Atl.Ridge"} + if(p == 9 && lead.month == 2) { cluster2.name <- "Atl.Ridge"; cluster3.name <- "NAO+"; cluster4.name <- "Blocking"} + if(p == 10 && lead.month == 1) { cluster2.name <- "Blocking"; cluster4.name <- "Atl.Ridge"} + + regimes.sim <- c(cluster1.name,cluster2.name,cluster3.name,cluster4.name) + #regimes.obs <- c(cluster1.name, cluster2.name, cluster3.name, cluster4.name) # already defined above, included only as reference + + order.sim <- match(regimes.sim, orden) + order.obs <- match(regimes.obs, orden) + + clusters.sim <- list(cluster1.sim, cluster2.sim, cluster3.sim, cluster4.sim) + clusters.obs <- list(pslwr1mean, pslwr2mean, pslwr3mean, pslwr4mean) + + # recompute the spatial correlations, because they might have changed because of the manual corrections above: + spat.cor1 <- cor(as.vector(clusters.sim[[which(order.sim == 1)]]), as.vector(clusters.obs[[which(order.obs == 1)]])) + spat.cor2 <- cor(as.vector(clusters.sim[[which(order.sim == 2)]]), as.vector(clusters.obs[[which(order.obs == 2)]])) + spat.cor3 <- cor(as.vector(clusters.sim[[which(order.sim == 3)]]), as.vector(clusters.obs[[which(order.obs == 3)]])) + spat.cor4 <- cor(as.vector(clusters.sim[[which(order.sim == 4)]]), as.vector(clusters.obs[[which(order.obs == 4)]])) + + load(file=paste0(workdir,"/",fields.name,"_",my.period[p],"_leadmonth_",lead.month,"_","psl",".RData")) # reload forecast cluster time series and mean psl data + #load(file=paste0(workdir,"/",fields.name,"_",my.period[p],"_leadmonth_",lead.month,"_ClusterData.RData")) # reload forecast cluster data (for my.cluster.array) + + if(var.num != var.num.orig) var.num <- var.num.orig # ripristinate original values of this script + if(fields.name != fields.name.orig) fields.name <- fields.name.orig # ripristinate original values of this script + if(!identical(period.orig, period)) period <- period.orig + if(p.orig != p) p <- p.orig + if(identical(rev(lat),lat.forecast)) lat <- rev(lat) # ripristinate right lat order + if(workdir != workdir.orig) workdir <- workdir.orig # ripristinate original values of this script + + } # close for on 'forecast.name' + + if(fields.name == rean.name || (fields.name == forecast.name && n.map == 7)){ + if(save.names){ + save(cluster1.name, cluster2.name, cluster3.name, cluster4.name, file=paste0(workdir,"/",fields.name,"_", my.period[p],"_","ClusterNames",".RData")) + + } else { # in this case, we load the cluster names from the file already saved, if regimes come from a reanalysis: + load(paste0(rean.dir,"/",rean.name,"_", my.period[p],"_","ClusterNames",".RData")) + + if(var.num != var.num.orig) var.num <- var.num.orig # ripristinate original values of this script + if(fields.name != fields.name.orig) fields.name <- fields.name.orig # ripristinate original values of this script + } + } + + # assign to each cluster its regime: + if(ordering == FALSE && (fields.name == rean.name || (fields.name == forecast.name && n.map == 7))){ + cluster1 <- 1; cluster2 <- 2; cluster3 <- 3; cluster4 <- 4 + } else { + cluster1 <- which(orden == cluster1.name) + cluster2 <- which(orden == cluster2.name) + cluster3 <- which(orden == cluster3.name) + cluster4 <- which(orden == cluster4.name) + } + + assign(paste0("map",cluster1), pslwr1mean) + assign(paste0("map",cluster2), pslwr2mean) + assign(paste0("map",cluster3), pslwr3mean) + assign(paste0("map",cluster4), pslwr4mean) + + assign(paste0("fre",cluster1), wr1y) + assign(paste0("fre",cluster2), wr2y) + assign(paste0("fre",cluster3), wr3y) + assign(paste0("fre",cluster4), wr4y) + + if(p == 100){ + assign(paste0("imp",cluster1), varwr1mean) + assign(paste0("imp",cluster2), varwr2mean) + assign(paste0("imp",cluster3), varwr3mean) + assign(paste0("imp",cluster4), varwr4mean) + + assign(paste0("sig",cluster1), pvalue1) + assign(paste0("sig",cluster2), pvalue2) + assign(paste0("sig",cluster3), pvalue3) + assign(paste0("sig",cluster4), pvalue4) + } + + if(fields.name == forecast.name && n.map < 7){ + assign(paste0("freMax",cluster1), wr1yMax) + assign(paste0("freMax",cluster2), wr2yMax) + assign(paste0("freMax",cluster3), wr3yMax) + assign(paste0("freMax",cluster4), wr4yMax) + + assign(paste0("freMin",cluster1), wr1yMin) + assign(paste0("freMin",cluster2), wr2yMin) + assign(paste0("freMin",cluster3), wr3yMin) + assign(paste0("freMin",cluster4), wr4yMin) + + #assign(paste0("spatial.cor",cluster1), spat.cor1) + #assign(paste0("spatial.cor",cluster2), spat.cor2) + #assign(paste0("spatial.cor",cluster3), spat.cor3) + #assign(paste0("spatial.cor",cluster4), spat.cor4) + + assign(paste0("persist",cluster1), pers1) + assign(paste0("persist",cluster2), pers2) + assign(paste0("persist",cluster3), pers3) + assign(paste0("persist",cluster4), pers4) + + cluster1.obs <- which(orden == regimes.obs[1]) + cluster2.obs <- which(orden == regimes.obs[2]) + cluster3.obs <- which(orden == regimes.obs[3]) + cluster4.obs <- which(orden == regimes.obs[4]) + + assign(paste0("freObs",cluster1.obs), wr1y.obs) + assign(paste0("freObs",cluster2.obs), wr2y.obs) + assign(paste0("freObs",cluster3.obs), wr3y.obs) + assign(paste0("freObs",cluster4.obs), wr4y.obs) + + assign(paste0("persistObs",cluster1.obs), persObs1) + assign(paste0("persistObs",cluster2.obs), persObs2) + assign(paste0("persistObs",cluster3.obs), persObs3) + assign(paste0("persistObs",cluster4.obs), persObs4) + + } + + # position of long values of Europe only (without the Atlantic Sea and America): + #if(fields.name == "ERA-Interim") EU <- c(1:which(lon >= lon.max)[1], (length(lon)-30):length(lon)) # restrict area to continental europe only + EU <- c(1:which(lon >= lon.max)[1], (which(lon >= 337)[1]:length(lon))) # restrict area to continental europe only + + # breaks and colors of the geopotential fields: + if(psl == "g500"){ + #my.brks <- c(-300,-199:200,300) # % Mean anomaly of a WR + my.brks <- c(-300,seq(-200,200,10),300) # % Mean anomaly of a WR + my.brks2 <- c(-300,seq(-200,200,10),300) # % Mean anomaly of a WR for the contour lines + #my.cols <- colorRampPalette((brewer.pal(9,"PuBu")))(length(my.brks)-1) + values.to.plot <- c(-200, -100, 0, 100, 200) # values we want to show in the labels + } + + if(psl == "psl"){ + my.brks <- c(seq(-19,-1,2),0,seq(1,19,2)) # % Mean anomaly of a WR + # my.brks <- c(seq(-19,-1,2),0,seq(1,19,2)) # % Mean anomaly of a WR + + my.brks2 <- my.brks #c(seq(-20,20,2)) # % Mean anomaly of a WR for the contour lines + values.to.plot <- my.brks #c(-17,-15,-13,-11,-9,-7,-5,-3,-1,0,1,3,5,7,9,11,13,15,17) + } + + my.cols <- colorRampPalette(rev(brewer.pal(11,"RdBu")))(length(my.brks)-1) # blue--white--red colors + my.cols[floor(length(my.cols)/2)] <- my.cols[floor(length(my.cols)/2)+1] <- "white" + + # breaks and colors of the impact maps (valid both for Wind speed anomalies and Temperature anomalies): + #my.brks.var <- c(-20,seq(-10,-3,1),seq(-2.9,2.9,0.1),seq(3,10,1),20) # % Mean anomaly of a WR + #my.brks.var <- c(-20,-2,-1.5,-1,-0.5,-0.2,0.2,0.5,1,1.5,2,20) # % Mean anomaly of a WR + #my.brks.var <- c(-20,seq(-3,3,0.5),20) # % Mean anomaly of a WR + if(var.name[var.num] == "sfcWind") { + my.brks.var <- seq(-3,3,0.5) + values.to.plot2 <- c(-3,-2,-1,0,1,2,3) + } + if(var.name[var.num] == "tas") { + my.brks.var <- seq(-5,5,1) + values.to.plot2 <- my.brks.var #c(-5,-3,-1,0,1,3,5) + } + + #my.cols <- colorRampPalette(as.character(read.csv("/home/Earth/vtorralb/rgbhex.csv",header=F)[,1]))(length(my.brks)-1) # blue--yellow-red colors + my.cols.var <- colorRampPalette(rev(brewer.pal(11,"RdBu")))(length(my.brks.var)-1) # blue--white--red colors + #my.cols.var[floor(length(my.cols.var)/2)] <- my.cols.var[floor(length(my.cols.var)/2)+1] <- "white" + + if(fields.name == forecast.name){ + pos.years.present <- match(year.start:year.end, years) + years.missing <- which(is.na(pos.years.present)) + fre1.NA <- fre2.NA <- fre3.NA <- fre4.NA <- freMax1.NA <- freMax2.NA <- freMax3.NA <- freMax4.NA <- freMin1.NA <- freMin2.NA <- freMin3.NA <- freMin4.NA <- c() + freq.max=100 + + k <- 0 + for(y in year.start:year.end) { + if(y %in% (years.missing + year.start - 1)) { + fre1.NA <- c(fre1.NA,NA); fre2.NA <- c(fre2.NA,NA); fre3.NA <- c(fre3.NA,NA); fre4.NA <- c(fre4.NA,NA) + freMax1.NA <- c(freMax1.NA,NA); freMax2.NA <- c(freMax2.NA,NA); freMax3.NA <- c(freMax3.NA,NA); freMax4.NA <- c(freMax4.NA,NA) + freMin1.NA <- c(freMin1.NA,NA); freMin2.NA <- c(freMin2.NA,NA); freMin3.NA <- c(freMin3.NA,NA); freMin4.NA <- c(freMin4.NA,NA) + k=k+1 + } else { + fre1.NA <- c(fre1.NA,fre1[y - year.start + 1 - k]); fre2.NA <- c(fre2.NA,fre2[y - year.start + 1 - k]) + fre3.NA <- c(fre3.NA,fre3[y - year.start + 1 - k]); fre4.NA <- c(fre4.NA,fre4[y - year.start + 1 - k]) + freMax1.NA <- c(freMax1.NA,freMax1[y - year.start + 1 - k]); freMax2.NA <- c(freMax2.NA,freMax2[y - year.start + 1 - k]) + freMax3.NA <- c(freMax3.NA,freMax3[y - year.start + 1 - k]); freMax4.NA <- c(freMax4.NA,freMax4[y - year.start + 1 - k]) + freMin1.NA <- c(freMin1.NA,freMin1[y - year.start + 1 - k]); freMin2.NA <- c(freMin2.NA,freMin2[y - year.start + 1 - k]) + freMin3.NA <- c(freMin3.NA,freMin3[y - year.start + 1 - k]); freMin4.NA <- c(freMin4.NA,freMin4[y - year.start + 1 - k]) + } + } + } else { fre1.NA <- fre1; fre2.NA <- fre2; fre3.NA <- fre3; fre4.NA <- fre4 } + + + # Visualize the composition with the 3 graphs for all the 4 regimes: its pressure fields, its impact on var and its frequency series: + if(composition == "simple"){ + if(!as.pdf && fields.name==rean.name) png(filename=paste0(workdir,"/",fields.name,"_",my.period[p],"_",var.name[var.num],".png"),width=3000,height=3700) + if(!as.pdf && fields.name==forecast.name) png(filename=paste0(workdir,"/",fields.name,"_",my.period[p],"_leadmonth_",lead.month,"_",var.name[var.num],".png"),width=3000,height=3700) + + plot.new() + + # Sheet title: + par(fig=c(0, 1, 0.94, 0.98), new=TRUE) + if(fields.name == forecast.name) {forecast.title <- paste0(" startdate and ",lead.month," forecast month ")} else {forecast.title <- ""} + mtext(paste0(fields.name, " Weather Regimes for ", my.period[p], forecast.title," (",year.start,"-",year.end,")"), cex=5.5, font=2) + + # Centroid maps: + map.xpos <- 0 + map.width <- 0.46 + par(fig=c(map.xpos + 0.003, map.xpos + map.width - 0.003, 0.745, 0.925), new=TRUE) + PlotEquiMap2(rescale(map1,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map1), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, xlabel.dist=1.5, contours.lty="F1FF1F") + par(fig=c(map.xpos, map.xpos + map.width, 0.51, 0.69), new=TRUE) + PlotEquiMap2(rescale(map2,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map2), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F") + par(fig=c(map.xpos, map.xpos + map.width, 0.275, 0.455), new=TRUE) + PlotEquiMap2(rescale(map3,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map3), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F") + par(fig=c(map.xpos, map.xpos + map.width, 0.04, 0.22), new=TRUE) + PlotEquiMap2(rescale(map4,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map4), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F") + + # Subtitle centroid maps: + map.title.xpos <- 0.76 + map.title.width <- 0.2 + par(fig=c(map.title.xpos, map.title.xpos + map.title.width, 0.728, 0.729), new=TRUE) + mtext("year", cex=3) + par(fig=c(map.title.xpos, map.title.xpos + map.title.width, 0.494, 0.495), new=TRUE) + mtext("year", cex=3) + par(fig=c(map.title.xpos, map.title.xpos + map.title.width, 0.260, 0.261), new=TRUE) + mtext("year", cex=3) + par(fig=c(map.title.xpos, map.title.xpos + map.title.width, 0.026, 0.027), new=TRUE) + mtext("year", cex=3) + + # Title Centroid Maps: + title1.xpos <- 0.02 + title1.width <- 0.4 + par(fig=c(title1.xpos, title1.xpos + title1.width, 0.9305, 0.9355), new=TRUE) + mtext(paste0(regime1.name,": ", psl.name, " Anomaly"), font=2, cex=4) + par(fig=c(title1.xpos, title1.xpos + title1.width, 0.6955, 0.7005), new=TRUE) + mtext(paste0(regime2.name,": ", psl.name, " Anomaly"), font=2, cex=4) + par(fig=c(title1.xpos, title1.xpos + title1.width, 0.4605, 0.4655), new=TRUE) + mtext(paste0(regime3.name,": ", psl.name, " Anomaly"), font=2, cex=4) + par(fig=c(title1.xpos, title1.xpos + title1.width, 0.2255, 0.2305), new=TRUE) + mtext(paste0(regime4.name,": ", psl.name, " Anomaly"), font=2, cex=4) + + # Impact maps: + if(p == 100){ + impact.xpos <- 0.47 + impact.width <- 0.26 + par(fig=c(impact.xpos, impact.xpos + impact.width, 0.745, 0.925), new=TRUE) + PlotEquiMap2(rescale(imp1[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=t(sig1[,EU] < 0.05), cex.lab=1.5) + par(fig=c(impact.xpos, impact.xpos + impact.width, 0.51, 0.69), new=TRUE) + PlotEquiMap2(rescale(imp2[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=t(sig2[,EU] < 0.05), cex.lab=1.5) + par(fig=c(impact.xpos, impact.xpos + impact.width, 0.275, 0.455), new=TRUE) + PlotEquiMap2(rescale(imp3[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=t(sig3[,EU] < 0.05), cex.lab=1.5) + par(fig=c(impact.xpos, impact.xpos + impact.width, 0.04, 0.22), new=TRUE) + PlotEquiMap2(rescale(imp4[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=t(sig4[,EU] < 0.05), cex.lab=1.5) + + # Title impact maps: + title2.xpos <- 0.42 + title2.width <- 0.35 + par(fig=c(title2.xpos, title2.xpos + title2.width, 0.935, 0.940), new=TRUE) + mtext(paste0(regime1.name, ": Impact on ", var.name.full[var.num]), font=2, cex=4) + par(fig=c(title2.xpos, title2.xpos + title2.width, 0.700, 0.705), new=TRUE) + mtext(paste0(regime2.name, ": Impact on ", var.name.full[var.num]), font=2, cex=4) + par(fig=c(title2.xpos, title2.xpos + title2.width, 0.465, 0.470), new=TRUE) + mtext(paste0(regime3.name, ": Impact on ", var.name.full[var.num]), font=2, cex=4) + par(fig=c(title2.xpos, title2.xpos + title2.width, 0.230, 0.235), new=TRUE) + mtext(paste0(regime4.name, ": Impact on ", var.name.full[var.num]), font=2, cex=4) + } + + # Frequency plots: + barplot.xpos <- 0.75 + barplot.width <- 0.25 + + par(fig=c(barplot.xpos, barplot.xpos + barplot.width, 0.745, 0.915), new=TRUE) + if(fields.name == rean.name) barplot.freq(100*fre1.NA, year.start, year.end, cex.y=2, cex.x=2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0)) + if(fields.name == forecast.name) barplot.freq.sim2(100*fre1.NA, 100*freMax1.NA, 100*freMin1.NA, 100*freObs1, year.start, year.end, cex.y=2, cex.x=2, freq.min=-2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0), col.line="gray20", cex.r=3, cex.mean=2, cex.obs=2) + + par(fig=c(barplot.xpos, barplot.xpos + barplot.width,0.510,0.680), new=TRUE) + if(fields.name == rean.name) barplot.freq(100*fre2.NA, year.start, year.end, cex.y=2, cex.x=2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0)) + if(fields.name == forecast.name) barplot.freq.sim2(100*fre2.NA, 100*freMax2.NA, 100*freMin2.NA, 100*freObs2, year.start, year.end, cex.y=2, cex.x=2, freq.min=-2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0), col.line="gray20", cex.r=3, cex.mean=2, cex.obs=2) + + par(fig=c(barplot.xpos, barplot.xpos + barplot.width,0.275,0.445), new=TRUE) + if(fields.name == rean.name) barplot.freq(100*fre3.NA, year.start, year.end, cex.y=2, cex.x=2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0)) + if(fields.name == forecast.name) barplot.freq.sim2(100*fre3.NA, 100*freMax3.NA, 100*freMin3.NA, 100*freObs3, year.start, year.end, cex.y=2, cex.x=2, freq.min=-2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0), col.line="gray20", cex.r=3, cex.mean=2, cex.obs=2) + + par(fig=c(barplot.xpos, barplot.xpos + barplot.width,0.040,0.210), new=TRUE) + if(fields.name == rean.name) barplot.freq(100*fre4.NA, year.start, year.end, cex.y=2, cex.x=2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0)) + if(fields.name == forecast.name) barplot.freq.sim2(100*fre4.NA, 100*freMax4.NA, 100*freMin4.NA, 100*freObs4, year.start, year.end, cex.y=2, cex.x=2, freq.min=-2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0), col.line="gray20", cex.r=3, cex.mean=2, cex.obs=2) + + # Title Frequency maps: + title3.xpos <- 0.75 + title3.width <-0.25 + par(fig=c(title3.xpos, title3.xpos + title3.width, 0.935, 0.940), new=TRUE) + mtext(paste0(regime1.name, " Frequency"), font=2, cex=4) # paste0 doesn't work inside an expression! + par(fig=c(title3.xpos, title3.xpos + title3.width, 0.700, 0.705), new=TRUE) + mtext(paste0(regime2.name, " Frequency"), font=2, cex=4) + par(fig=c(title3.xpos, title3.xpos + title3.width, 0.465, 0.470), new=TRUE) + mtext(paste0(regime3.name, " Frequency"), font=2, cex=4) + par(fig=c(title3.xpos, title3.xpos + title3.width, 0.230, 0.235), new=TRUE) + mtext(paste0(regime4.name, " Frequency"), font=2, cex=4) + + # % on Frequency plots: + symbol.xpos <- 0.735 + symbol.width <- 0.01 + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.82, 0.83), new=TRUE) + mtext("%", cex=3.3) + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.59, 0.60), new=TRUE) + mtext("%", cex=3.3) + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.35, 0.36), new=TRUE) + mtext("%", cex=3.3) + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.12, 0.13), new=TRUE) + mtext("%", cex=3.3) + + # mean frequency on Frequency plots: + symbol.xpos <- 0.9 + symbol.width <- 0.1 + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.908, 0.918), new=TRUE) + mtext(bquote(bar(nu) == .(paste0(round(mean(100*fre1.NA),1),"%"))),cex=4.5) + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.673, 0.683), new=TRUE) + mtext(bquote(bar(nu) == .(paste0(round(mean(100*fre2.NA),1),"%"))),cex=4.5) + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.44, 0.45), new=TRUE) + mtext(bquote(bar(nu) == .(paste0(round(mean(100*fre3.NA),1),"%"))),cex=4.5) + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.203, 0.213), new=TRUE) + mtext(bquote(bar(nu) == .(paste0(round(mean(100*fre4.NA),1),"%"))),cex=4.5) + + # add corr between obs.time series and ensemble mean time series on Frequency plots: + if(fields.name == forecast.name){ + symbol.xpos <- 0.76 + symbol.width <- 0.1 + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.908, 0.918), new=TRUE) + sp.cor1 <- round(cor(fre1.NA,freObs1, use="complete.obs"),2) + mtext(paste0("r= ",sp.cor1), cex=4.5) + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.673, 0.683), new=TRUE) + sp.cor2 <- round(cor(fre2.NA,freObs2, use="complete.obs"),2) + mtext(paste0("r= ",sp.cor2), cex=4.5) + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.44, 0.45), new=TRUE) + sp.cor3 <- round(cor(fre3.NA,freObs3, use="complete.obs"),2) + mtext(paste0("r= ",sp.cor3), cex=4.5) + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.203, 0.213), new=TRUE) + sp.cor4 <- round(cor(fre4.NA,freObs4, use="complete.obs"),2) + mtext(paste0("r= ",sp.cor4), cex=4.5) + } + + # Legend 1: + legend1.xpos <- 0.10 + legend1.width <- 0.80 + legend1.cex <- 2 + my.subset <- match(values.to.plot, my.brks) + par(fig=c(legend1.xpos, legend1.xpos + legend1.width, 0.719, 0.744), new=TRUE) # syntax fig=c(xmin, xmax, ymin, ymax) + ColorBar3(my.brks, cols=my.cols, vert=FALSE, cex=legend1.cex, subset=my.subset) + if(psl=="g500") {mtext(side=4," m", cex=2.5)} else {mtext(side=4," hPa", cex=legend1.cex)} + par(fig=c(legend1.xpos, legend1.xpos + legend1.width, 0.485, 0.508), new=TRUE) + ColorBar3(my.brks, cols=my.cols, vert=FALSE, cex=legend1.cex, subset=my.subset) + if(psl=="g500") {mtext(side=4," m", cex=2.5)} else {mtext(side=4," hPa", cex=legend1.cex)} + par(fig=c(legend1.xpos, legend1.xpos + legend1.width, 0.250, 0.273), new=TRUE) + ColorBar3(my.brks, cols=my.cols, vert=FALSE, cex=legend1.cex, subset=my.subset) + if(psl=="g500") {mtext(side=4," m", cex=2.5)} else {mtext(side=4," hPa", cex=legend1.cex)} + par(fig=c(legend1.xpos, legend1.xpos + legend1.width, 0.015, 0.038), new=TRUE) + ColorBar3(my.brks, cols=my.cols, vert=FALSE, cex=legend1.cex, subset=my.subset) + if(psl=="g500") {mtext(side=4," m", cex=2.5)} else {mtext(side=4," hPa", cex=legend1.cex)} + + # Legend 2: + if(fields.name == rean.name){ + legend2.xpos <- 0.48 + legend2.width <- 0.25 + legend2.cex <- 1.8 + my.subset2 <- match(values.to.plot2, my.brks.var) + par(fig=c(legend2.xpos, legend2.xpos + legend2.width, 0.719 + 0.001, 0.744), new=TRUE) + ColorBar3(my.brks.var, cols=my.cols.var, vert=FALSE, cex=legend2.cex, subset=my.subset2) + mtext(side=4,paste0(" ",var.unit[var.num]), cex=legend2.cex) + par(fig=c(legend2.xpos, legend2.xpos + legend2.width, 0.485, 0.508), new=TRUE) + ColorBar3(my.brks.var, cols=my.cols.var, vert=FALSE, cex=legend2.cex, subset=my.subset2) + mtext(side=4,paste0(" ",var.unit[var.num]), cex=legend2.cex) + par(fig=c(legend2.xpos, legend2.xpos + legend2.width, 0.250, 0.273), new=TRUE) + ColorBar3(my.brks.var, cols=my.cols.var, vert=FALSE, cex=legend2.cex, subset=my.subset2) + mtext(side=4,paste0(" ",var.unit[var.num]), cex=legend2.cex) + par(fig=c(legend2.xpos, legend2.xpos + legend2.width, 0.015, 0.038), new=TRUE) + ColorBar3(my.brks.var, cols=my.cols.var, vert=FALSE, cex=legend2.cex, subset=my.subset2) + mtext(side=4,paste0(" ",var.unit[var.num]), cex=legend2.cex) + } + + if(!as.pdf) dev.off() # for saving 4 png + + } # close if on: composition == 'simple' + + + # Visualize the composition with the regime anomalies for a selected forecasted month: + if(composition == "psl"){ + # Centroid maps: + n.map <- n.map + 1 # n.maps starts from 0 + map.width <- 0.115 + map.xpos <- 0.06 + map.width * (n.map - 1) + map.ypos <- 0.08 + map.height <- 0.19 + map.ysep <- 0.015 + + par(fig=c(map.xpos, map.xpos + map.width, map.ypos + 3*map.height + 3*map.ysep, map.ypos + 4*map.height + 3*map.ysep), new=TRUE) + PlotEquiMap2(rescale(map1,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map1), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, xlabel.dist=1.5, contours.lty="F1FF1F") + par(fig=c(map.xpos, map.xpos + map.width, map.ypos + 2*map.height + 2*map.ysep, map.ypos + 3*map.height + 2*map.ysep), new=TRUE) + PlotEquiMap2(rescale(map2,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map2), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F") + par(fig=c(map.xpos, map.xpos + map.width, map.ypos + map.height + map.ysep, map.ypos + 2*map.height + map.ysep), new=TRUE) + PlotEquiMap2(rescale(map3,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map3), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F") + par(fig=c(map.xpos, map.xpos + map.width, map.ypos, map.ypos + map.height), new=TRUE) + PlotEquiMap2(rescale(map4,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map4), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F") + + # Sheet subtitles: + par(fig=c(map.xpos, map.xpos + map.width, 0.86, 0.90), new=TRUE) + if(n.map < 8) { + mtext(paste0(my.month.short[p], " --> ", my.month.short[forecast.month]), cex=5.5, font=2) + } else{ + mtext(paste0(rean.name," ", my.month.short[forecast.month]), cex=5.5, font=2) + } + + } # close if on composition == 'psl' + + + # Visualize the composition with the interannual frequencies for a selected forecasted month: + if(composition == "fre"){ + # Centroid maps: + n.map <- n.map + 1 # n.maps starts from 0 + map.width <- 0.115 + map.xpos <- 0.06 + map.width * (n.map - 1) + map.ypos <- 0.08 + map.height <- 0.19 + map.ysep <- 0.015 + + par(fig=c(map.xpos, map.xpos + map.width, map.ypos + 3*map.height + 3*map.ysep, map.ypos + 4*map.height + 3*map.ysep), new=TRUE) + if(n.map < 8) barplot.freq.sim2(100*fre1.NA, 100*freMax1.NA, 100*freMin1.NA, 100*freObs1, year.start, year.end, cex.y=2, cex.x=2, freq.min=-2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0), col.line="gray20", cex.r=3, cex.mean=2, cex.obs=2) + if(n.map == 8) barplot.freq(100*fre1.NA, year.start, year.end, cex.y=2, cex.x=2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0)) + + par(fig=c(map.xpos, map.xpos + map.width, map.ypos + 2*map.height + 2*map.ysep, map.ypos + 3*map.height + 2*map.ysep), new=TRUE) + if(n.map < 8) if(fields.name == forecast.name) barplot.freq.sim2(100*fre2.NA, 100*freMax2.NA, 100*freMin2.NA, 100*freObs2, year.start, year.end, cex.y=2, cex.x=2, freq.min=-2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0), col.line="gray20", cex.r=3, cex.mean=2, cex.obs=2) + if(n.map == 8) barplot.freq(100*fre2.NA, year.start, year.end, cex.y=2, cex.x=2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0)) + + par(fig=c(map.xpos, map.xpos + map.width, map.ypos + map.height + map.ysep, map.ypos + 2*map.height + map.ysep), new=TRUE) + if(n.map < 8) if(fields.name == forecast.name) barplot.freq.sim2(100*fre3.NA, 100*freMax3.NA, 100*freMin3.NA, 100*freObs3, year.start, year.end, cex.y=2, cex.x=2, freq.min=-2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0), col.line="gray20", cex.r=3, cex.mean=2, cex.obs=2) + if(n.map == 8) barplot.freq(100*fre3.NA, year.start, year.end, cex.y=2, cex.x=2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0)) + + par(fig=c(map.xpos, map.xpos + map.width, map.ypos, map.ypos + map.height), new=TRUE) + if(n.map < 8) if(fields.name == forecast.name) barplot.freq.sim2(100*fre4.NA, 100*freMax4.NA, 100*freMin4.NA, 100*freObs4, year.start, year.end, cex.y=2, cex.x=2, freq.min=-2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0), col.line="gray20", cex.r=3, cex.mean=2, cex.obs=2) + if(n.map == 8) barplot.freq(100*fre4.NA, year.start, year.end, cex.y=2, cex.x=2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0)) + + # Sheet subtitles: + par(fig=c(map.xpos, map.xpos + map.width, 0.86, 0.90), new=TRUE) + if(n.map < 8) { + mtext(paste0(my.month.short[p], " --> ", my.month.short[forecast.month]), cex=5.5, font=2) + } else{ + mtext(paste0(rean.name," ", my.month.short[forecast.month]), cex=5.5, font=2) + } + + # % on Frequency plots: + par(fig=c(map.xpos - 0.006, map.xpos, map.ypos + 3.9*map.height + 3*map.ysep, map.ypos + 4*map.height + 3*map.ysep), new=TRUE) + mtext("%", cex=3.3) + par(fig=c(map.xpos - 0.006, map.xpos, map.ypos + 2.9*map.height + 2*map.ysep, map.ypos + 3*map.height + 2*map.ysep), new=TRUE) + mtext("%", cex=3.3) + par(fig=c(map.xpos - 0.006, map.xpos, map.ypos + 1.9*map.height + 1*map.ysep, map.ypos + 2*map.height + 1*map.ysep), new=TRUE) + mtext("%", cex=3.3) + par(fig=c(map.xpos - 0.006, map.xpos, map.ypos + 0.9*map.height + 0*map.ysep, map.ypos + 1*map.height + 0*map.ysep), new=TRUE) + mtext("%", cex=3.3) + + # mean frequency on Frequency plots: + par(fig=c(map.xpos + 0.02, map.xpos + 0.04, map.ypos + 3*map.height + 3*map.ysep, map.ypos + 3.1*map.height + 3*map.ysep), new=TRUE) + mtext(bquote(bar(nu) == .(paste0(round(mean(100*fre1.NA),1),"%"))),cex=3.5) + par(fig=c(map.xpos + 0.02, map.xpos + 0.04, map.ypos + 2*map.height + 2*map.ysep, map.ypos + 2.1*map.height + 2*map.ysep), new=TRUE) + mtext(bquote(bar(nu) == .(paste0(round(mean(100*fre2.NA),1),"%"))),cex=3.5) + par(fig=c(map.xpos + 0.02, map.xpos + 0.04, map.ypos + 1*map.height + 1*map.ysep, map.ypos + 1.1*map.height + 1*map.ysep), new=TRUE) + mtext(bquote(bar(nu) == .(paste0(round(mean(100*fre3.NA),1),"%"))),cex=3.5) + par(fig=c(map.xpos + 0.02, map.xpos + 0.04, map.ypos + 0*map.height + 0*map.ysep, map.ypos + 0.1*map.height + 0*map.ysep), new=TRUE) + mtext(bquote(bar(nu) == .(paste0(round(mean(100*fre4.NA),1),"%"))),cex=3.5) + + # add corr between obs.time series and ensemble mean time series on Frequency plots: + if(n.map < 8){ + par(fig=c(map.xpos + map.width - 0.04, map.xpos + map.width - 0.02, map.ypos + 3*map.height + 3*map.ysep, map.ypos + 3.1*map.height + 3*map.ysep), new=TRUE) + sp.cor1 <- round(cor(fre1.NA,freObs1, use="complete.obs"),2) + mtext(paste0("r= ",sp.cor1), cex=3.5) + par(fig=c(map.xpos + map.width - 0.04, map.xpos + map.width - 0.02, map.ypos + 2*map.height + 2*map.ysep, map.ypos + 2.1*map.height + 2*map.ysep), new=TRUE) + sp.cor2 <- round(cor(fre2.NA,freObs2, use="complete.obs"),2) + mtext(paste0("r= ",sp.cor2), cex=3.5) + par(fig=c(map.xpos + map.width - 0.04, map.xpos + map.width - 0.02, map.ypos + 1*map.height + 1*map.ysep, map.ypos + 1.1*map.height + 1*map.ysep), new=TRUE) + sp.cor3 <- round(cor(fre3.NA,freObs3, use="complete.obs"),2) + mtext(paste0("r= ",sp.cor3), cex=3.5) + par(fig=c(map.xpos + map.width - 0.04, map.xpos + map.width - 0.02, map.ypos + 0*map.height + 0*map.ysep, map.ypos + 0.1*map.height + 0*map.ysep), new=TRUE) + sp.cor4 <- round(cor(fre4.NA,freObs4, use="complete.obs"),2) + mtext(paste0("r= ",sp.cor4), cex=3.5) + } + } # close if on composition == 'fre' + + # save indicators for each startdate and forecast time: + # (fre1, fre2, etc. already refer to the regimes in the same order as listed in the 'orden' vector) + if(fields.name == forecast.name && composition == 'simple') save(orden, fre1.NA, fre2.NA, fre3.NA, fre4.NA, freObs1, freObs2, freObs3, freObs4, sp.cor1, sp.cor2, sp.cor3, sp.cor4, persist1, persist2, persist3, persist4, persistObs1, persistObs2, persistObs3, persistObs4, spat.cor1, spat.cor2, spat.cor3, spat.cor4, file=paste0(workdir,"/",fields.name,"_", my.period[p],"_leadmonth_",lead.month,"_","ClusterNames",".RData")) + + } # close 'p' on 'period' + + if(as.pdf) dev.off() # for saving a single pdf with all months/seasons + + } # close 'lead.month' on 'lead.months' + + if(composition == "psl" || composition == "fre"){ + # Regime's names: + title1.xpos <- 0.01 + title1.width <- 0.04 + par(fig=c(title1.xpos, title1.xpos + title1.width, 0.7905, 0.7955), new=TRUE) + mtext(paste0(regime1.name), font=2, cex=5) + par(fig=c(title1.xpos, title1.xpos + title1.width, 0.5855, 0.5905), new=TRUE) + mtext(paste0(regime2.name), font=2, cex=5) + par(fig=c(title1.xpos, title1.xpos + title1.width, 0.3805, 0.3955), new=TRUE) + mtext(paste0(regime3.name), font=2, cex=5) + par(fig=c(title1.xpos, title1.xpos + title1.width, 0.1755, 0.1805), new=TRUE) + mtext(paste0(regime4.name), font=2, cex=5) + + if(composition == "psl") { + # Color legend: + legend1.xpos <- 0.10 + legend1.width <- 0.80 + legend1.cex <- 4 + + par(fig=c(legend1.xpos, legend1.xpos + legend1.width, 0, 0.065), new=TRUE) # syntax fig=c(xmin, xmax, ymin, ymax) + ColorBar2(brks=my.brks, cols=my.cols, vert=FALSE, cex=legend1.cex, label.dist=2, my.ticks=-0.5 + 1:21, my.labels=my.brks) + par(fig=c(legend1.xpos + legend1.width - 0.02, legend1.xpos + legend1.width + 0.05, 0, 0.02), new=TRUE) # syntax fig=c(xmin, xmax, ymin, ymax) + mtext("hPa", cex=legend1.cex*1.3) + } + + dev.off() + } # close if on composition + +} # close if on composition != "summary" + +# Summary graphs: +if(composition == "summary" && fields.name == forecast.name){ + # array storing correlations in the format: [startdate, leadmonth, regime] + array.cor <- array.sp.cor <- array.freq <- array.diff.freq <- array.rpss <- array.pers <- array.diff.pers <- array(NA,c(12,7,4)) + + for(p in 1:12){ + p.orig <- p + + for(l in 0:6){ + + load(file=paste0(workdir,"/",fields.name,"_",my.period[p],"_leadmonth_",l,"_","ClusterNames",".RData")) # load cluster names and summarized data + + if(var.num != var.num.orig) var.num <- var.num.orig # ripristinate original values of this script (necessary after loading regime data) + if(fields.name != fields.name.orig) fields.name <- fields.name.orig # ripristinate original values of this script + if(p != p.orig) p <- p.orig + + array.sp.cor[p,1+l, 1] <- spat.cor1 # associates NAO+ to the first triangle (at left) + array.sp.cor[p,1+l, 3] <- spat.cor2 # associates NAO- to the third triangle (at right) + array.sp.cor[p,1+l, 4] <- spat.cor3 # associates Blocking to the fourth triangle (top one) + array.sp.cor[p,1+l, 2] <- spat.cor4 # associates Atl.Ridge to the second triangle (bottom one) + + array.cor[p,1+l, 1] <- sp.cor1 # associates NAO+ to the first triangle (at left) + array.cor[p,1+l, 3] <- sp.cor2 # associates NAO- to the third triangle (at right) + array.cor[p,1+l, 4] <- sp.cor3 # associates Blocking to the fourth triangle (top one) + array.cor[p,1+l, 2] <- sp.cor4 # associates Atl.Ridge to the second triangle (bottom one) + + array.freq[p,1+l, 1] <- 100*(mean(fre1.NA)) # NAO+ + array.freq[p,1+l, 3] <- 100*(mean(fre2.NA)) # NAO- + array.freq[p,1+l, 4] <- 100*(mean(fre3.NA)) # Blocking + array.freq[p,1+l, 2] <- 100*(mean(fre4.NA)) # Atl.Ridge + + array.diff.freq[p,1+l, 1] <- 100*(mean(fre1.NA) - mean(freObs1)) # NAO+ + array.diff.freq[p,1+l, 3] <- 100*(mean(fre2.NA) - mean(freObs2)) # NAO- + array.diff.freq[p,1+l, 4] <- 100*(mean(fre3.NA) - mean(freObs3)) # Blocking + array.diff.freq[p,1+l, 2] <- 100*(mean(fre4.NA) - mean(freObs4)) # Atl.Ridge + + array.pers[p,1+l, 1] <- persist1 # NAO+ + array.pers[p,1+l, 3] <- persist2 # NAO- + array.pers[p,1+l, 4] <- persist3 # Blocking + array.pers[p,1+l, 2] <- persist4 # Atl.Ridge + + array.diff.pers[p,1+l, 1] <- persist1 - persistObs1 # NAO+ + array.diff.pers[p,1+l, 3] <- persist2 - persistObs2 # NAO- + array.diff.pers[p,1+l, 4] <- persist3 - persistObs3 # Blocking + array.diff.pers[p,1+l, 2] <- persist4 - persistObs4 # Atl.Ridge + + #array.rpss[p,1+l, 1] <- # NAO+ + #array.rpss[p,1+l, 3] <- # NAO- + #array.rpss[p,1+l, 4] <- # Blocking + #array.rpss[p,1+l, 2] <- # Atl.Ridge + } + } + + + # Spatial Corr summary: + png(filename=paste0(workdir,"/",forecast.name,"_summary_sp_corr.png"), width=550, height=400) + + # create an array similar to array.cor but with colors instead of corr.values: + sp.corr.cols <- rev(c('#b2182b','#d6604d','#f4a582','#fddbc7','#d1e5f0','#92c5de','#4393c3','#2166ac')) + my.seq <- c(-1,0,0.4,0.5,0.6,0.7,0.8,0.9,1) + + array.sp.cor.colors <- array(sp.corr.cols[8],c(12,7,4)) + array.sp.cor.colors[array.sp.cor < my.seq[8]] <- sp.corr.cols[7] + array.sp.cor.colors[array.sp.cor < my.seq[7]] <- sp.corr.cols[6] + array.sp.cor.colors[array.sp.cor < my.seq[6]] <- sp.corr.cols[5] + array.sp.cor.colors[array.sp.cor < my.seq[5]] <- sp.corr.cols[4] + array.sp.cor.colors[array.sp.cor < my.seq[4]] <- sp.corr.cols[3] + array.sp.cor.colors[array.sp.cor < my.seq[3]] <- sp.corr.cols[2] + array.sp.cor.colors[array.sp.cor < my.seq[2]] <- sp.corr.cols[1] + + par(mar=c(5,4,4,4.5)) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Forecast time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + title("Spatial correlation between S4 and ERA-Interim regime anomalies", cex=1.5) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Forecast time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5) + + for(p in 1:12){ + for(l in 0:6){ + polygon(c(0.5+p-1,0.5+p-1,1+p-1),c(-0.5+l,0.5+l,0+l), col=array.sp.cor.colors[p,1+l,1]) # first triangle + polygon(c(0.5+p-1,1+p-1,1.5+p-1),c(-0.5+l,0+l,-0.5+l), col=array.sp.cor.colors[p,1+l,2]) + polygon(c(1+p-1,1.5+p-1,1.5+p-1),c(l,0.5+l,-0.5+l), col=array.sp.cor.colors[p,1+l,3]) + polygon(c(0.5+p-1,1+p-1,1.5+p-1),c(0.5+l,0+l,0.5+l), col=array.sp.cor.colors[p,1+l,4]) + } + } + + par(fig=c(0.875, 1, 0.14, 0.89), new=TRUE) + ColorBar2(brks = my.seq, cols = sp.corr.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + dev.off() + + + # Corr summary: + png(filename=paste0(workdir,"/",forecast.name,"_summary_corr.png"), width=550, height=400) + + # create an array similar to array.cor but with colors instead of corr.values: + corr.cols <- rev(c('#b2182b','#d6604d','#f4a582','#fddbc7','#d1e5f0','#92c5de','#4393c3','#2166ac')) + + array.cor.colors <- array(corr.cols[8],c(12,7,4)) + array.cor.colors[array.cor < 0.75] <- corr.cols[7] + array.cor.colors[array.cor < 0.5] <- corr.cols[6] + array.cor.colors[array.cor < 0.25] <- corr.cols[5] + array.cor.colors[array.cor < 0] <- corr.cols[4] + array.cor.colors[array.cor < -0.25] <- corr.cols[3] + array.cor.colors[array.cor < -0.5] <- corr.cols[2] + array.cor.colors[array.cor < -0.75] <- corr.cols[1] + + par(mar=c(5,4,4,4.5)) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Forecast time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + title("Correlation between S4 and ERA-Interim frequencies", cex=1.5) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Forecast time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5) + + for(p in 1:12){ + for(l in 0:6){ + polygon(c(0.5+p-1,0.5+p-1,1+p-1),c(-0.5+l,0.5+l,0+l), col=array.cor.colors[p,1+l,1]) # first triangle + polygon(c(0.5+p-1,1+p-1,1.5+p-1),c(-0.5+l,0+l,-0.5+l), col=array.cor.colors[p,1+l,2]) + polygon(c(1+p-1,1.5+p-1,1.5+p-1),c(l,0.5+l,-0.5+l), col=array.cor.colors[p,1+l,3]) + polygon(c(0.5+p-1,1+p-1,1.5+p-1),c(0.5+l,0+l,0.5+l), col=array.cor.colors[p,1+l,4]) + } + } + + par(fig=c(0.875, 1, 0.14, 0.89), new=TRUE) + ColorBar2(brks = seq(-1,1,0.25), cols = corr.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(seq(-1,1,0.25)), my.labels=seq(-1,1,0.25)) + dev.off() + + + + # Freq. summary: + png(filename=paste0(workdir,"/",forecast.name,"_summary_freq.png"), width=550, height=400) + + # create an array similar to array.diff.freq but with colors instead of frequencies: + freq.cols <- rev(c('#d73027','#f46d43','#fdae61','#fee090','#e0f3f8','#abd9e9','#74add1','#4575b4')) + my.seq <- c(NA,20,22,24,26,28,30,32,NA) + + array.freq.colors <- array(freq.cols[8],c(12,7,4)) + array.freq.colors[array.freq < my.seq[[8]]] <- freq.cols[7] + array.freq.colors[array.freq < my.seq[[7]]] <- freq.cols[6] + array.freq.colors[array.freq < my.seq[[6]]] <- freq.cols[5] + array.freq.colors[array.freq < my.seq[[5]]] <- freq.cols[4] + array.freq.colors[array.freq < my.seq[[4]]] <- freq.cols[3] + array.freq.colors[array.freq < my.seq[[3]]] <- freq.cols[2] + array.freq.colors[array.freq < my.seq[[2]]] <- freq.cols[1] + + par(mar=c(5,4,4,4.5)) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Forecast time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + title("Mean S4 frequency (%)", cex=1.5) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Forecast time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5) + + for(p in 1:12){ + for(l in 0:6){ + polygon(c(0.5+p-1,0.5+p-1,1+p-1),c(-0.5+l,0.5+l,0+l), col=array.freq.colors[p,1+l,1]) # first triangle + polygon(c(0.5+p-1,1+p-1,1.5+p-1),c(-0.5+l,0+l,-0.5+l), col=array.freq.colors[p,1+l,2]) + polygon(c(1+p-1,1.5+p-1,1.5+p-1),c(l,0.5+l,-0.5+l), col=array.freq.colors[p,1+l,3]) + polygon(c(0.5+p-1,1+p-1,1.5+p-1),c(0.5+l,0+l,0.5+l), col=array.freq.colors[p,1+l,4]) + } + } + + par(fig=c(0.875, 1, 0.14, 0.89), new=TRUE) + ColorBar2(brks = my.seq, cols = freq.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + dev.off() + + + + # Delta freq. summary: + png(filename=paste0(workdir,"/",forecast.name,"_summary_diff_freq.png"), width=550, height=400) + + # create an array similar to array.diff.freq but with colors instead of frequencies: + diff.freq.cols <- rev(c('#d73027','#f46d43','#fdae61','#fee090','#e0f3f8','#abd9e9','#74add1','#4575b4')) + + array.diff.freq.colors <- array(diff.freq.cols[8],c(12,7,4)) + array.diff.freq.colors[array.diff.freq < 10] <- diff.freq.cols[7] + array.diff.freq.colors[array.diff.freq < 5] <- diff.freq.cols[6] + array.diff.freq.colors[array.diff.freq < 2] <- diff.freq.cols[5] + array.diff.freq.colors[array.diff.freq < 0] <- diff.freq.cols[4] + array.diff.freq.colors[array.diff.freq < -2] <- diff.freq.cols[3] + array.diff.freq.colors[array.diff.freq < -5] <- diff.freq.cols[2] + array.diff.freq.colors[array.diff.freq < -10] <- diff.freq.cols[1] + + par(mar=c(5,4,4,4.5)) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Forecast time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + title("Frequency difference (%) between S4 and ERA-Interim", cex=1.5) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Forecast time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5) + + for(p in 1:12){ + for(l in 0:6){ + polygon(c(0.5+p-1,0.5+p-1,1+p-1),c(-0.5+l,0.5+l,0+l), col=array.diff.freq.colors[p,1+l,1]) # first triangle + polygon(c(0.5+p-1,1+p-1,1.5+p-1),c(-0.5+l,0+l,-0.5+l), col=array.diff.freq.colors[p,1+l,2]) + polygon(c(1+p-1,1.5+p-1,1.5+p-1),c(l,0.5+l,-0.5+l), col=array.diff.freq.colors[p,1+l,3]) + polygon(c(0.5+p-1,1+p-1,1.5+p-1),c(0.5+l,0+l,0.5+l), col=array.diff.freq.colors[p,1+l,4]) + } + } + + par(fig=c(0.875, 1, 0.14, 0.89), new=TRUE) + ColorBar2(brks = c(-20,-10,-5,-2,0,2,5,10,20), cols = diff.freq.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(c(-20,-10,-5,-2,0,2,5,10,20)), my.labels=c(-20,-10,-5,-2,0,2,5,10,20)) + dev.off() + + + # Persistence summary: + png(filename=paste0(workdir,"/",forecast.name,"_summary_pers.png"), width=550, height=400) + + # create an array similar to array.pers but with colors instead of frequencies: + pers.cols <- rev(c('#b2182b','#d6604d','#f4a582','#fddbc7','#d1e5f0','#92c5de','#4393c3','#2166ac')) + my.seq <- c(NA, 3.25, 3.5, 3.75, 4, 4.25, 4.5, 4.75, NA) + + array.pers.colors <- array(pers.cols[8],c(12,7,4)) + array.pers.colors[array.pers < my.seq[8]] <- pers.cols[7] + array.pers.colors[array.pers < my.seq[7]] <- pers.cols[6] + array.pers.colors[array.pers < my.seq[6]] <- pers.cols[5] + array.pers.colors[array.pers < my.seq[5]] <- pers.cols[4] + array.pers.colors[array.pers < my.seq[4]] <- pers.cols[3] + array.pers.colors[array.pers < my.seq[3]] <- pers.cols[2] + array.pers.colors[array.pers < my.seq[2]] <- pers.cols[1] + + par(mar=c(5,4,4,4.5)) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Forecast time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + title("S4 Regime persistence (in days/month)", cex=1.5) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Forecast time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5) + + for(p in 1:12){ + for(l in 0:6){ + polygon(c(0.5+p-1,0.5+p-1,1+p-1),c(-0.5+l,0.5+l,0+l), col=array.pers.colors[p,1+l,1]) # first triangle + polygon(c(0.5+p-1,1+p-1,1.5+p-1),c(-0.5+l,0+l,-0.5+l), col=array.pers.colors[p,1+l,2]) + polygon(c(1+p-1,1.5+p-1,1.5+p-1),c(l,0.5+l,-0.5+l), col=array.pers.colors[p,1+l,3]) + polygon(c(0.5+p-1,1+p-1,1.5+p-1),c(0.5+l,0+l,0.5+l), col=array.pers.colors[p,1+l,4]) + } + } + + par(fig=c(0.875, 1, 0.14, 0.89), new=TRUE) + ColorBar2(brks = my.seq, cols = pers.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + dev.off() + + + + # Delta persistence summary: + png(filename=paste0(workdir,"/",forecast.name,"_summary_diff_pers.png"), width=550, height=400) + + # create an array similar to array.pers but with colors instead of frequencies: + diff.pers.cols <- rev(c('#b2182b','#d6604d','#f4a582','#fddbc7','#d1e5f0','#92c5de','#4393c3','#2166ac')) + my.seq <- c(NA, -1.5, -1, -0.5, 0, 0.5, 1, 1.5, NA) + + array.diff.pers.colors <- array(diff.pers.cols[8],c(12,7,4)) + array.diff.pers.colors[array.diff.pers < my.seq[8]] <- diff.pers.cols[7] + array.diff.pers.colors[array.diff.pers < my.seq[7]] <- diff.pers.cols[6] + array.diff.pers.colors[array.diff.pers < my.seq[6]] <- diff.pers.cols[5] + array.diff.pers.colors[array.diff.pers < my.seq[5]] <- diff.pers.cols[4] + array.diff.pers.colors[array.diff.pers < my.seq[4]] <- diff.pers.cols[3] + array.diff.pers.colors[array.diff.pers < my.seq[3]] <- diff.pers.cols[2] + array.diff.pers.colors[array.diff.pers < my.seq[2]] <- diff.pers.cols[1] + + par(mar=c(5,4,4,4.5)) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Forecast time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + title("Diff. between S4 and ERA-Interim regime persistence (in days/month)", cex=1.5) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Forecast time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5) + + for(p in 1:12){ + for(l in 0:6){ + polygon(c(0.5+p-1,0.5+p-1,1+p-1),c(-0.5+l,0.5+l,0+l), col=array.diff.pers.colors[p,1+l,1]) # first triangle + polygon(c(0.5+p-1,1+p-1,1.5+p-1),c(-0.5+l,0+l,-0.5+l), col=array.diff.pers.colors[p,1+l,2]) + polygon(c(1+p-1,1.5+p-1,1.5+p-1),c(l,0.5+l,-0.5+l), col=array.diff.pers.colors[p,1+l,3]) + polygon(c(0.5+p-1,1+p-1,1.5+p-1),c(0.5+l,0+l,0.5+l), col=array.diff.pers.colors[p,1+l,4]) + } + } + + par(fig=c(0.875, 1, 0.14, 0.89), new=TRUE) + ColorBar2(brks = my.seq, cols = diff.pers.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + dev.off() + + ## # FairRPSS summary: + ## png(filename=paste0(workdir,"/",forecast.name,"_summary_rpss.png"), width=550, height=400) + + ## # create an array similar to array.diff.freq but with colors instead of frequencies: + ## freq.cols <- rev(c('#b2182b','#d6604d','#f4a582','#fddbc7','#d1e5f0','#92c5de','#4393c3','#2166ac')) + + ## array.freq.colors <- array(freq.cols[8],c(12,7,4)) + ## array.freq.colors[array.diff.freq < 10] <- freq.cols[7] + ## array.freq.colors[array.diff.freq < 5] <- freq.cols[6] + ## array.freq.colors[array.diff.freq < 2] <- freq.cols[5] + ## array.freq.colors[array.diff.freq < 0] <- freq.cols[4] + ## array.freq.colors[array.diff.freq < -2] <- freq.cols[3] + ## array.freq.colors[array.diff.freq < -5] <- freq.cols[2] + ## array.freq.colors[array.diff.freq < -10] <- freq.cols[1] + + ## par(mar=c(5,4,4,4.5)) + ## plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Forecast time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + ## title("Frequency difference (%) between S4 and ERA-Interim", cex=1.5) + ## mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + ## mtext(side = 2, text = "Forecast time", line = 2.5, cex=1.5) + ## axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + ## axis(2, at=seq(0,6), las=2, cex.axis=1.5) + + ## for(p in 1:12){ + ## for(l in 0:6){ + ## polygon(c(0.5+p-1,0.5+p-1,1+p-1),c(-0.5+l,0.5+l,0+l), col=array.freq.colors[p,1+l,1]) # first triangle + ## polygon(c(0.5+p-1,1+p-1,1.5+p-1),c(-0.5+l,0+l,-0.5+l), col=array.freq.colors[p,1+l,2]) + ## polygon(c(1+p-1,1.5+p-1,1.5+p-1),c(l,0.5+l,-0.5+l), col=array.freq.colors[p,1+l,3]) + ## polygon(c(0.5+p-1,1+p-1,1.5+p-1),c(0.5+l,0+l,0.5+l), col=array.freq.colors[p,1+l,4]) + ## } + ## } + + ## par(fig=c(0.875, 1, 0.14, 0.89), new=TRUE) + ## ColorBar2(brks = c(-20,-10,-5,-2,0,2,5,10,20), cols = freq.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(c(-20,-10,-5,-2,0,2,5,10,20)), my.labels=c(-20,-10,-5,-2,0,2,5,10,20)) + ## dev.off() + +} # close if on composition == "summary" + + + + +# impact map of the stronger WR: +if(composition == "impact" && fields.name == rean.name){ + + var.num <- 1 # choose a variable (1:sfcWind, 2:tas) + + png(filename=paste0(workdir,"/",rean.name,"_",var.name[var.num],"_most_influent.png"), width=500, height=600) + + plot.new() + #par(mfrow=c(4,3)) + col.regimes <- c("indianred1","cyan","khaki","lightpink") + + for(p in 13:16){ # or 1:12 for monthly maps + load(file=paste0(rean.dir,"/",rean.name,"_",var.name[var.num],"_",my.period[p],"_psl.RData")) + load(paste0(rean.dir,"/",rean.name,"_ClusterNames.RData")) # they should not depend on var.name!!! + + # position of long values of Europe only (without the Atlantic Sea and America): + #if(fields.name == "ERA-Interim") EU <- c(1:which(lon >= lon.max)[1], (length(lon)-30):length(lon)) # restrict area to continental europe only + lon.max = 45 + EU <- c(1:which(lon >= lon.max)[1], (which(lon >= 337)[1]:length(lon))) # restrict area to continental europe only + + + cluster1 <- which(orden == cluster1.name.period[p]) # in future it will be == cluster1.name + cluster2 <- which(orden == cluster2.name.period[p]) + cluster3 <- which(orden == cluster3.name.period[p]) + cluster4 <- which(orden == cluster4.name.period[p]) + + assign(paste0("imp",cluster1), varPeriodAnom1mean) + assign(paste0("imp",cluster2), varPeriodAnom2mean) + assign(paste0("imp",cluster3), varPeriodAnom3mean) + assign(paste0("imp",cluster4), varPeriodAnom4mean) + + imp.all <- imp1 + for(i in 1:dim(imp1)[1]){ + for(j in 1:dim(imp1)[2]){ + if(abs(imp1[i,j]) >= max(abs(imp1[i,j]), abs(imp2[i,j]), abs(imp3[i,j]), abs(imp4[i,j]))) imp.all[i,j] <- 1.5 + if(abs(imp2[i,j]) >= max(abs(imp1[i,j]), abs(imp2[i,j]), abs(imp3[i,j]), abs(imp4[i,j]))) imp.all[i,j] <- 2.5 + if(abs(imp3[i,j]) >= max(abs(imp1[i,j]), abs(imp2[i,j]), abs(imp3[i,j]), abs(imp4[i,j]))) imp.all[i,j] <- 3.5 + if(abs(imp4[i,j]) >= max(abs(imp1[i,j]), abs(imp2[i,j]), abs(imp3[i,j]), abs(imp4[i,j]))) imp.all[i,j] <- 4.5 + } + } + + if(p<=3) yMod <- 0.7 + if(p>=4 && p<=6) yMod <- 0.5 + if(p>=7 && p<=9) yMod <- 0.3 + if(p>=10) yMod <- 0.1 + if(p==13 || p==14) yMod <- 0.5 + if(p==15 || p==16) yMod <- 0.1 + + if(p==1 || p==4 || p==7 || p==10) xMod <- 0.05 + if(p==2 || p==5 || p==8 || p==11) xMod <- 0.35 + if(p==3 || p==6 || p==9 || p==12) xMod <- 0.65 + if(p==13 || p==15) xMod <- 0.05 + if(p==14 || p==16) xMod <- 0.50 + + # impact map: + if(p <= 12) par(fig=c(xMod, xMod + 0.3, yMod, yMod + 0.17), new=TRUE) + if(p > 12) par(fig=c(xMod, xMod + 0.45, yMod, yMod + 0.38), new=TRUE) + PlotEquiMap(imp.all[,EU], lon[EU], lat, filled.continents = FALSE, brks=1:5, cols=col.regimes, axelab=F, drawleg=F) + + # title map: + if(p <= 12) par(fig=c(xMod, xMod + 0.3, yMod + 0.162, yMod + 0.172), new=TRUE) + if(p > 12) par(fig=c(xMod + 0.07, xMod + 0.07 + 0.3, yMod +0.21 + 0.162, yMod +0.21 + 0.172), new=TRUE) + mtext(my.period[p], font=2, cex=.9) + + } # close 'p' on 'period' + + par(fig=c(0.1,0.9, 0, 0.12), new=TRUE) + ColorBar2(1:5, cols=col.regimes, vert=FALSE, my.ticks=1:4, draw.ticks=FALSE, my.labels=orden) + + par(fig=c(0.1,0.9, 0.9, 0.95), new=TRUE) + mtext(paste0("Most influent WR on ",var.name[var.num]), font=2, cex=1.5) + + dev.off() +} # close if on composition == "impact" + + + + + + + + + + + + + + + + + diff --git a/old/weather_regimes_maps_v15.R b/old/weather_regimes_maps_v15.R new file mode 100644 index 0000000000000000000000000000000000000000..e61f83e8da14c25ed502fcc7e6eef9cc66a03ea8 --- /dev/null +++ b/old/weather_regimes_maps_v15.R @@ -0,0 +1,3573 @@ + +# Creation: 6/2016 +# Author: Nicola Cortesi +# +# Aim: to visualize the regime anomalies of the weather regimes, their interannual frequencies and their impact on a chosen cliamte variable (i.e: wind speed or temperature) +# +# I/O: the only input file needed is the output of the weather_regimes.R script, which ends with the suffix "_mapdata.RData". +# Output files are the maps, with the user can generate as .png or as .pdf +# +# Assumptions: If your regimes derive from a reanalysis, this script must be run twice: once, with the option 'ordering = F' and 'save.names = T' to create a .pdf or .png +# file that the user must open to find visually the order by which the four regimes are stored inside the four clusters. For example, he can find that the +# sequence of the images in the file (from top to down) corresponds to: Blocking / NAO- / Atl.Ridge / NAO+ regime. Subsequently, he has to change 'ordering' +# from F to T and set the four variables 'clusterX.name' (X=1..4) to follow the same order found inside that file. +# The second time, you have to run this script only to get the maps in the right order, which by default is, from top to down: +# "NAO+","NAO-","Blocking" and "Atl.Ridge" (you can change it with the variable 'orden'). You also have to set 'save.names' to TRUE, so an .RData file +# is written to store the order of the four regimes, in case in future a user needs to visualize these maps again. In this case, the user must set +# 'ordering = TRUE' and 'save.names = FALSE' to be able to visualize the maps in the correct order. +# +# If your regimes derive from a forecast system, you have to compute before the regimes derived from a reanalysis. Then, you can associate the reanalysis +# to the forecast system, to employ it to automatically aussociate to each simulated cluster one of the +# observed regimes, the one with the highest spatial correlation. Beware that the two maps of regime anomalies must have the same spatial resolution! +# +# Branch: weather_regimes + +rm(list=ls()) +library(s2dverification) # for the function Load() +library(Kendall) # for the MannKendall test + +source('/home/Earth/ncortesi/Downloads/scripts/Rfunctions.R') # for the calendar functions + +# working dir with the input files with '_psl.RData' suffix: +workdir <- "/scratch/Earth/ncortesi/RESILIENCE/Regimes/42_ECMWF_S4_monthly_1981-2015_LOESS_filter" + +rean.name <- "ERA-Interim" # reanalysis name (if input data comes from a reanalysis) +forecast.name <- "ECMWF-S4" # forecast system name (if input data comes from a forecast system) + +fields.name <- forecast.name # Choose between loading reanalysis ('rean.name') or forecasts ('forecast.name') + +var.num <- 1 # if fields.name ='rean.name', Choose a variable for the impact maps 1: sfcWind 2: tas + +period <- 1:12 # Choose one or more periods to plot from 1 to 17 (1-12: Jan - Dec, 13-16: Winter, Spring, Summer, Autumn, 17: Year). + # You need to have created before the '_mapdata.RData' file with the output of 'weather_regimes'.R for that period. +lead.months <- 0:6 # in case you selected 'fields.name = forecast.name', specify a leadtime (0 = same month of the start month) to plot + # (and variable 'period' becomes the start month) + +# run it twice: once, with 'ordering <- FALSE' and 'save.names <- TRUE' to look only at the cartography and find visually the right regime names of the four clusters; then, +# insert the regime names in the correct order below and run this script a the second time with 'ordering = TRUE' and 'save.names = TRUE', to save the ordered cartography. +# after that, any time you need to run this script, run it with 'ordering = TRUE and 'save.names = FALSE', not to overwrite the file which stores the cluster order: +ordering <- T # If TRUE, order the clusters following the regimes listed in the 'orden' vector (set it to TRUE only after you manually inserted the names below). +save.names <- F # if TRUE, also save the cluster names (i.e: the association between clusters and weather regimes); if FALSE, the cluster names are loaded from a file + +as.pdf <- F # FALSE: save results as one .png file for each season/month, TRUE: save only one .pdf file with all seasons/months + +composition <- "summary" # choose which kind of composition you want to plot: + # - 'simple' for monthly graphs of regime anomalies / impact / interannual frequencies in three columns + # - 'psl' for all the regime anomalies for a fixed forecast month + # - 'fre' for all the interannual frequencies for a fixed forecast month + # - 'summary' for the summary plots of all forecast months (persistence bias, freq.bias, correlations, etc.) [You need all ClusterNames files] + # - 'impact' for all the impact plot of the regime with the highest impact + # - 'none' doesn't plot anything, it only saves the ClusterName.Rdata files + +forecast.month <- 11 # in case composition = 'psl' or 'fre' or 'impact', choose a forecast month from 1 to 12 to plot its composition + +# if fields.name='forecast.name', set the subdir of 'workdir' where the weather regimes computed with a reanalysis are stored: +#rean.dir <- "/scratch/Earth/ncortesi/RESILIENCE/Regimes/41_ERA-Interim_monthly_1981-2015_LOESS_filter" +rean.dir <- "/scratch/Earth/ncortesi/RESILIENCE/Regimes/18) as 12) but with no lat correction" + +# Associates the regimes to the four cluster: +cluster1.name <- "NAO+" +cluster2.name <- "NAO-" +cluster4.name <- "Blocking" +cluster3.name <- "Atl.Ridge" + +# choose an order to give to the cartograpy, from top to bottom: +orden <- c("NAO+","NAO-","Blocking","Atl.Ridge") + +################################################################################################################################################################### + +if(fields.name != rean.name && fields.name != forecast.name) stop("variable 'fields.name' is not properly set") +regime1.name <- orden[1] +regime2.name <- orden[2] +regime3.name <- orden[3] +regime4.name <- orden[4] + +var.name <- c("sfcWind","tas") +var.name.full <- c("Wind Speed","Temperature") # full name of the 'predictand' variable to put in the title of the graphs +var.unit <- c("m/s", "ºC") # unit of measure of var (for drawing color scales) +var.num.orig <- var.num # loading _psl files, this value can change! +fields.name.orig <- fields.name +period.orig <- period +workdir.orig <- workdir +pdf.suffix <- ifelse(length(period)==1, my.period[period], "all_periods") +n.map <- 0 + +if(composition != "summary" && composition != "impact"){ + + if(composition == "psl" || composition == "fre") { + if(as.pdf && fields.name == forecast.name) pdf(file=paste0(workdir,"/",fields.name,"_",pdf.suffix,"_forecast_month_",forecast.month,"_",composition,".pdf"),width=110,height=60) + if(!as.pdf && fields.name == forecast.name) png(filename=paste0(workdir,"/",fields.name,"_forecast_month_",forecast.month,"_",composition,".png"),width=6000,height=2300) + + lead.months <- c(6:0,0) + plot.new() + + # Sheet title: + par(fig=c(0, 1, 0.94, 0.98), new=TRUE) + if(composition == "psl") mtext(paste0(fields.name, " simulated Weather Regimes for ", month.name[forecast.month]), cex=5.5, font=2) + if(composition == "fre") mtext(paste0(fields.name, " simulated interannual frequencies for ", month.name[forecast.month]), cex=5.5, font=2) + } + + for(lead.month in lead.months){ + #lead.month=lead.months[1] # for the debug + + if(composition == "simple"){ + if(as.pdf && fields.name == rean.name) pdf(file=paste0(workdir,"/",fields.name,"_",var.name[var.num],"_",pdf.suffix,".pdf"),width=40, height=60) + if(as.pdf && fields.name == forecast.name) pdf(file=paste0(workdir,"/",fields.name,"_",var.name[var.num],"_",pdf.suffix,"_leadmonth_",lead.month,".pdf"),width=40,height=60) + } + + if(composition == 'psl' || composition == 'fre') { + period <- forecast.month - lead.month + if(period < 1) period <- period + 12 + } + + for(p in period){ + #p <- period[1] # for the debug + p.orig <- p + + # load regime data (for forecasts, it load only the data corresponding to the chosen start month p and lead month 'lead.month':: + if(fields.name == rean.name || (fields.name == forecast.name && lead.month == 0 || n.map == 7)) { + load(file=paste0(rean.dir,"/",rean.name,"_",my.period[p],"_","psl",".RData")) + if(var.num != var.num.orig) var.num <- var.num.orig # ripristinate original values of this script (necessary after loading regime data) + if(fields.name != fields.name.orig) fields.name <- fields.name.orig # ripristinate original values of this script + if(workdir != workdir.orig) workdir <- workdir.orig # ripristinate original values of this script + + load(file=paste0(rean.dir,"/",rean.name,"_",my.period[p],"_",var.name[var.num],".RData")) + } + + if(fields.name == forecast.name && n.map < 7){ + load(file=paste0(workdir,"/",fields.name,"_",my.period[p],"_leadmonth_",lead.month,"_","psl",".RData")) # load cluster time series and mean psl data + if(composition == "impact") load(file=paste0(workdir,"/",fields.name,"_",my.period[p],"_leadmonth_",lead.month,"_",var.name[var.num],".RData")) # load mean var data # for impact maps + + #if(var.num != var.num.orig) var.num <- var.num.orig # ripristinate original values of this script (necessary after loading regime data) + #if(fields.name != fields.name.orig) fields.name <- fields.name.orig # ripristinate original values of this script + + } + + if(var.num != var.num.orig) var.num <- var.num.orig # ripristinate original values of this script (necessary after loading regime data) + if(fields.name != fields.name.orig) fields.name <- fields.name.orig # ripristinate original values of this script + if(workdir != workdir.orig) workdir <- workdir.orig # ripristinate original values of this script + if(p != p.orig) p <- p.orig + + if(fields.name == forecast.name && n.map < 7){ + + # compute the simulated interannual monthly variability: + n.days.month <- dim(my.cluster.array[[p]])[1] # number of days in the forecasted month + + freq.sim1 <- apply(my.cluster.array[[p]] == 1, c(2,3), sum) + freq.sim2 <- apply(my.cluster.array[[p]] == 2, c(2,3), sum) + freq.sim3 <- apply(my.cluster.array[[p]] == 3, c(2,3), sum) + freq.sim4 <- apply(my.cluster.array[[p]] == 4, c(2,3), sum) + + sd.freq.sim1 <- sd(freq.sim1) / n.days.month + sd.freq.sim2 <- sd(freq.sim2) / n.days.month + sd.freq.sim3 <- sd(freq.sim3) / n.days.month + sd.freq.sim4 <- sd(freq.sim4) / n.days.month + + # compute the transition probabilities: + j1 <- j2 <- j3 <- j4 <- 1; transition1 <- transition2 <- transition3 <- transition4 <- c() + + n.members <- dim(my.cluster.array[[p]])[3] + + for(m in 1:n.members){ + for(y in 1:n.years){ + for (d in 1:(n.days.month-1)){ + + if (my.cluster.array[[p]][d,y,m] == 1 && (my.cluster.array[[p]][d + 1,y,m] != my.cluster.array[[p]][d,y,m])){ + transition1[j1] <- my.cluster.array[[p]][d + 1,y,m] + j1 <- j1 + 1 + } + if (my.cluster.array[[p]][d,y,m] == 2 && (my.cluster.array[[p]][d + 1,y,m] != my.cluster.array[[p]][d,y,m])){ + transition2[j2] <- my.cluster.array[[p]][d + 1,y,m] + j2 <- j2 + 1 + } + if (my.cluster.array[[p]][d,y,m] == 3 && (my.cluster.array[[p]][d + 1,y,m] != my.cluster.array[[p]][d,y,m])){ + transition3[j3] <- my.cluster.array[[p]][d + 1,y,m] + j3 <- j3 +1 + } + if (my.cluster.array[[p]][d,y,m] == 4 && (my.cluster.array[[p]][d + 1,y,m] != my.cluster.array[[p]][d,y,m])){ + transition4[j4] <- my.cluster.array[[p]][d + 1,y,m] + j4 <- j4 +1 + } + + } + } + } + + trans.sim <- matrix(NA,4,4 , dimnames= list(c("cluster1.sim", "cluster2.sim", "cluster3.sim", "cluster4.sim"),c("cluster1.sim","cluster2.sim","cluster3.sim","cluster4.sim"))) + trans.sim[1,2] <- length(which(transition1 == 2)) + trans.sim[1,3] <- length(which(transition1 == 3)) + trans.sim[1,4] <- length(which(transition1 == 4)) + + trans.sim[2,1] <- length(which(transition2 == 1)) + trans.sim[2,3] <- length(which(transition2 == 3)) + trans.sim[2,4] <- length(which(transition2 == 4)) + + trans.sim[3,1] <- length(which(transition3 == 1)) + trans.sim[3,2] <- length(which(transition3 == 2)) + trans.sim[3,4] <- length(which(transition3 == 4)) + + trans.sim[4,1] <- length(which(transition4 == 1)) + trans.sim[4,2] <- length(which(transition4 == 2)) + trans.sim[4,3] <- length(which(transition4 == 3)) + + trans.tot <- apply(trans.sim,1,sum,na.rm=T) + for(i in 1:4) trans.sim[i,] <- trans.sim[i,]/trans.tot[i] + + + # compute cluster persistence: + my.cluster.vector <- as.vector(my.cluster.array[[p]]) # reduce it to a vector with the order: day1month1member1 , day2month1member1, ... , day1month2member1, day2month2member1, ,,, + + sequ1 <- sequ2 <- sequ3 <- sequ4 <- rep(0,10000) + i1 <- i2 <- i3 <- i4 <- 1 + + if(my.cluster.vector[1] != 1) i1 <- 0 + if(my.cluster.vector[1] == 1) sequ1[i1] <- sequ1[i1]+1 + if(my.cluster.vector[1] != 2) i2 <- 0 + if(my.cluster.vector[1] == 2) sequ2[i2] <- sequ2[i2]+1 + if(my.cluster.vector[1] != 3) i3 <- 0 + if(my.cluster.vector[1] == 3) sequ3[i3] <- sequ3[i3]+1 + if(my.cluster.vector[1] != 4) i4 <- 0 + if(my.cluster.vector[1] == 4) sequ4[i4] <- sequ4[i4]+1 + n.dd <- length(my.cluster.vector) + + for(d in 1:(n.dd-1)){ + if(my.cluster.vector[d] != 1 && my.cluster.vector[d+1] == 1) i1 <- i1+1 + if(my.cluster.vector[d] == 1) sequ1[i1] <- sequ1[i1]+1 + + if(my.cluster.vector[d] != 2 && my.cluster.vector[d+1] == 2) i2 <- i2+1 + if(my.cluster.vector[d] == 2) sequ2[i2] <- sequ2[i2]+1 + + if(my.cluster.vector[d] != 3 && my.cluster.vector[d+1] == 3) i3 <- i3+1 + if(my.cluster.vector[d] == 3) sequ3[i3] <- sequ3[i3]+1 + + if(my.cluster.vector[d] != 4 && my.cluster.vector[d+1] == 4) i4 <- i4+1 + if(my.cluster.vector[d] == 4) sequ4[i4] <- sequ4[i4]+1 + } + + if(my.cluster.vector[n.dd] == 1) sequ1[i1] <- sequ1[i1]+1 + if(my.cluster.vector[n.dd] == 2) sequ2[i2] <- sequ2[i2]+1 + if(my.cluster.vector[n.dd] == 3) sequ3[i3] <- sequ3[i3]+1 + if(my.cluster.vector[n.dd] == 4) sequ4[i4] <- sequ4[i4]+1 + + pers1 <- mean(sequ1[which(sequ1 != 0)]) + pers2 <- mean(sequ2[which(sequ2 != 0)]) + pers3 <- mean(sequ3[which(sequ3 != 0)]) + pers4 <- mean(sequ4[which(sequ4 != 0)]) + + # compute correlations between simulated clusters and observed regime's anomalies: + cluster1.sim <- pslwr1mean; cluster2.sim <- pslwr2mean; cluster3.sim <- pslwr3mean; cluster4.sim <- pslwr4mean + lat.forecast <- lat; lon.forecast <- lon + + if((p + lead.month) > 12) { load.month <- p + lead.month - 12 } else { load.month <- p + lead.month } # reanalysis forecast month to load + load(file=paste0(rean.dir,"/",rean.name,"_",my.period[load.month],"_","psl",".RData")) # load reanalysis data, which overwrities the pslwrXmean variables + load(paste0(rean.dir,"/",rean.name,"_", my.period[load.month],"_","ClusterNames",".RData")) # load also reanalysis regime names + + if(var.num != var.num.orig) var.num <- var.num.orig # ripristinate original values of this script + if(fields.name != fields.name.orig) fields.name <- fields.name.orig # ripristinate original values of this script + if(!identical(period.orig,period)) period <- period.orig + if(workdir != workdir.orig) workdir <- workdir.orig # ripristinate original values of this script + if(p.orig != p) p <- p.orig + + # compute the observed transition probabilities: + n.days.month <- n.days.in.a.period(load.month,2001) + j1o <- j2o <- j3o <- j4o <- 1; transition.obs1 <- transition.obs2 <- transition.obs3 <- transition.obs4 <- c() + + for (d in 1:(length(my.cluster[[load.month]]$cluster)-1)){ + if (my.cluster[[load.month]]$cluster[d] == 1 && (my.cluster[[load.month]]$cluster[d + 1] != my.cluster[[load.month]]$cluster[d])){ + transition.obs1[j1o] <- my.cluster[[load.month]]$cluster[d + 1] + j1o <- j1o + 1 + } + if (my.cluster[[load.month]]$cluster[d] == 2 && (my.cluster[[load.month]]$cluster[d + 1] != my.cluster[[load.month]]$cluster[d])){ + transition.obs2[j2o] <- my.cluster[[load.month]]$cluster[d + 1] + j2o <- j2o + 1 + } + if (my.cluster[[load.month]]$cluster[d] == 3 && (my.cluster[[load.month]]$cluster[d + 1] != my.cluster[[load.month]]$cluster[d])){ + transition.obs3[j3o] <- my.cluster[[load.month]]$cluster[d + 1] + j3o <- j3o + 1 + } + if (my.cluster[[load.month]]$cluster[d] == 4 && (my.cluster[[load.month]]$cluster[d + 1] != my.cluster[[load.month]]$cluster[d])){ + transition.obs4[j4o] <- my.cluster[[load.month]]$cluster[d + 1] + j4o <- j4o +1 + } + + } + + trans.obs <- matrix(NA,4,4 , dimnames= list(c("cluster1.obs", "cluster2.obs", "cluster3.obs", "cluster4.obs"),c("cluster1.obs","cluster2.obs","cluster3.obs","cluster4.obs"))) + trans.obs[1,2] <- length(which(transition.obs1 == 2)) + trans.obs[1,3] <- length(which(transition.obs1 == 3)) + trans.obs[1,4] <- length(which(transition.obs1 == 4)) + + trans.obs[2,1] <- length(which(transition.obs2 == 1)) + trans.obs[2,3] <- length(which(transition.obs2 == 3)) + trans.obs[2,4] <- length(which(transition.obs2 == 4)) + + trans.obs[3,1] <- length(which(transition.obs3 == 1)) + trans.obs[3,2] <- length(which(transition.obs3 == 2)) + trans.obs[3,4] <- length(which(transition.obs3 == 4)) + + trans.obs[4,1] <- length(which(transition.obs4 == 1)) + trans.obs[4,2] <- length(which(transition.obs4 == 2)) + trans.obs[4,3] <- length(which(transition.obs4 == 3)) + + trans.tot.obs <- apply(trans.obs,1,sum,na.rm=T) + for(i in 1:4) trans.obs[i,] <- trans.obs[i,]/trans.tot.obs[i] + + # compute the observed interannual monthly variability: + freq.obs1 <- freq.obs2 <- freq.obs3 <- freq.obs4 <- c() + + for(y in year.start:year.end){ + # for both reanalysis and S4, the time order is always: year1day1, year1day2, ..., year1day31, year2day1, year2day2, ..., year2day28, etc, + freq.obs1[y-year.start+1] <- length(which(my.cluster[[load.month]]$cluster[(y-year.start)*n.days.month + (1:n.days.month)] == 1)) + freq.obs2[y-year.start+1] <- length(which(my.cluster[[load.month]]$cluster[(y-year.start)*n.days.month + (1:n.days.month)] == 2)) + freq.obs3[y-year.start+1] <- length(which(my.cluster[[load.month]]$cluster[(y-year.start)*n.days.month + (1:n.days.month)] == 3)) + freq.obs4[y-year.start+1] <- length(which(my.cluster[[load.month]]$cluster[(y-year.start)*n.days.month + (1:n.days.month)] == 4)) + } + + sd.freq.obs1 <- sd(freq.obs1) / n.days.month + sd.freq.obs2 <- sd(freq.obs2) / n.days.month + sd.freq.obs3 <- sd(freq.obs3) / n.days.month + sd.freq.obs4 <- sd(freq.obs4) / n.days.month + + wr1y.obs <- wr1y; wr2y.obs <- wr2y; wr3y.obs <- wr3y; wr4y.obs <- wr4y # observed frecuencies of the regimes + + regimes.obs <- c(cluster1.name, cluster2.name, cluster3.name, cluster4.name) + + if(length(unique(regimes.obs)) < 4) stop("Two or more names of the weather types in the reanalsyis input file are repeated!") + + if(identical(rev(lat),lat.forecast)) {lat.forecast <- rev(lat.forecast); print("Warning: forecast latitudes are in the opposite order than renalysis's")} + if(!identical(lat, lat.forecast)) stop("forecast latitudes are different from reanalysis latitudes!") + if(!identical(lon, lon.forecast)) stop("forecast longitude are different from reanalysis longitudes!") + + cluster.corr <- array(NA, c(4,4), dimnames=list(c("cluster1.sim","cluster2.sim","cluster3.sim","cluster4.sim"), regimes.obs)) + cluster.corr[1,1] <- cor(as.vector(cluster1.sim), as.vector(pslwr1mean)) + cluster.corr[1,2] <- cor(as.vector(cluster1.sim), as.vector(pslwr2mean)) + cluster.corr[1,3] <- cor(as.vector(cluster1.sim), as.vector(pslwr3mean)) + cluster.corr[1,4] <- cor(as.vector(cluster1.sim), as.vector(pslwr4mean)) + cluster.corr[2,1] <- cor(as.vector(cluster2.sim), as.vector(pslwr1mean)) + cluster.corr[2,2] <- cor(as.vector(cluster2.sim), as.vector(pslwr2mean)) + cluster.corr[2,3] <- cor(as.vector(cluster2.sim), as.vector(pslwr3mean)) + cluster.corr[2,4] <- cor(as.vector(cluster2.sim), as.vector(pslwr4mean)) + cluster.corr[3,1] <- cor(as.vector(cluster3.sim), as.vector(pslwr1mean)) + cluster.corr[3,2] <- cor(as.vector(cluster3.sim), as.vector(pslwr2mean)) + cluster.corr[3,3] <- cor(as.vector(cluster3.sim), as.vector(pslwr3mean)) + cluster.corr[3,4] <- cor(as.vector(cluster3.sim), as.vector(pslwr4mean)) + cluster.corr[4,1] <- cor(as.vector(cluster4.sim), as.vector(pslwr1mean)) + cluster.corr[4,2] <- cor(as.vector(cluster4.sim), as.vector(pslwr2mean)) + cluster.corr[4,3] <- cor(as.vector(cluster4.sim), as.vector(pslwr3mean)) + cluster.corr[4,4] <- cor(as.vector(cluster4.sim), as.vector(pslwr4mean)) + + # associate each simulated cluster to one observed regime: + max1 <- which(cluster.corr == max(cluster.corr), arr.ind=T) + cluster.corr2 <- cluster.corr + cluster.corr2[max1[1],] <- -999 # set this row to negative values so it won't be found as a maximum anymore + cluster.corr2[,max1[2]] <- -999 # set this column to negative values so it won't be found as a maximum anymore + max2 <- which(cluster.corr2 == max(cluster.corr2), arr.ind=T) + cluster.corr3 <- cluster.corr2 + cluster.corr3[max2[1],] <- -999 + cluster.corr3[,max2[2]] <- -999 + max3 <- which(cluster.corr3 == max(cluster.corr3), arr.ind=T) + cluster.corr4 <- cluster.corr3 + cluster.corr4[max3[1],] <- -999 + cluster.corr4[,max3[2]] <- -999 + max4 <- which(cluster.corr4 == max(cluster.corr4), arr.ind=T) + + seq.max1 <- c(max1[1],max2[1],max3[1],max4[1]) + seq.max2 <- c(max1[2],max2[2],max3[2],max4[2]) + + cluster1.regime <- seq.max2[which(seq.max1 == 1)] + cluster2.regime <- seq.max2[which(seq.max1 == 2)] + cluster3.regime <- seq.max2[which(seq.max1 == 3)] + cluster4.regime <- seq.max2[which(seq.max1 == 4)] + + cluster1.name <- regimes.obs[cluster1.regime] + cluster2.name <- regimes.obs[cluster2.regime] + cluster3.name <- regimes.obs[cluster3.regime] + cluster4.name <- regimes.obs[cluster4.regime] + + regimes.sim <- c(cluster1.name, cluster2.name, cluster3.name, cluster4.name) + cluster.corr2 <- array(cluster.corr, c(4,4), dimnames=list(regimes.sim, regimes.obs)) + + spat.cor1 <- cluster.corr[1,seq.max2[which(seq.max1 == 1)]] + spat.cor2 <- cluster.corr[2,seq.max2[which(seq.max1 == 2)]] + spat.cor3 <- cluster.corr[3,seq.max2[which(seq.max1 == 3)]] + spat.cor4 <- cluster.corr[4,seq.max2[which(seq.max1 == 4)]] + + # measure persistence for the reanalysis dataset: + my.cluster.vector <- my.cluster[[load.month]][[1]] + + sequ1 <- sequ2 <- sequ3 <- sequ4 <- rep(0,10000) + i1 <- i2 <- i3 <- i4 <- 1 + + if(my.cluster.vector[1] != 1) i1 <- 0 + if(my.cluster.vector[1] == 1) sequ1[i1] <- sequ1[i1]+1 + if(my.cluster.vector[1] != 2) i2 <- 0 + if(my.cluster.vector[1] == 2) sequ2[i2] <- sequ2[i2]+1 + if(my.cluster.vector[1] != 3) i3 <- 0 + if(my.cluster.vector[1] == 3) sequ3[i3] <- sequ3[i3]+1 + if(my.cluster.vector[1] != 4) i4 <- 0 + if(my.cluster.vector[1] == 4) sequ4[i4] <- sequ4[i4]+1 + + n.dd <- length(my.cluster.vector) + for(d in 1:(n.dd-1)){ + if(my.cluster.vector[d] != 1 && my.cluster.vector[d+1] == 1) i1 <- i1+1 + if(my.cluster.vector[d] == 1) sequ1[i1] <- sequ1[i1]+1 + + if(my.cluster.vector[d] != 2 && my.cluster.vector[d+1] == 2) i2 <- i2+1 + if(my.cluster.vector[d] == 2) sequ2[i2] <- sequ2[i2]+1 + + if(my.cluster.vector[d] != 3 && my.cluster.vector[d+1] == 3) i3 <- i3+1 + if(my.cluster.vector[d] == 3) sequ3[i3] <- sequ3[i3]+1 + + if(my.cluster.vector[d] != 4 && my.cluster.vector[d+1] == 4) i4 <- i4+1 + if(my.cluster.vector[d] == 4) sequ4[i4] <- sequ4[i4]+1 + } + + if(my.cluster.vector[n.dd] == 1) sequ1[i1] <- sequ1[i1]+1 + if(my.cluster.vector[n.dd] == 2) sequ2[i2] <- sequ2[i2]+1 + if(my.cluster.vector[n.dd] == 3) sequ3[i3] <- sequ3[i3]+1 + if(my.cluster.vector[n.dd] == 4) sequ4[i4] <- sequ4[i4]+1 + + persObs1 <- mean(sequ1[which(sequ1 != 0)]) + persObs2 <- mean(sequ2[which(sequ2 != 0)]) + persObs3 <- mean(sequ3[which(sequ3 != 0)]) + persObs4 <- mean(sequ4[which(sequ4 != 0)]) + + # insert here all the manual corrections to the regime assignation due to misasignment in the automatic selection: + # forecast month: September + if(p == 9 && lead.month == 0) { cluster1.name <- "Blocking"; cluster2.name <- "Atl.Ridge"; cluster3.name <- "NAO-"; cluster4.name <- "NAO+"} + if(p == 8 && lead.month == 1) { cluster1.name <- "NAO+"; cluster2.name <- "NAO-"; cluster3.name <- "Blocking"; cluster4.name <- "Atl.Ridge"} + if(p == 6 && lead.month == 3) { cluster1.name <- "Atl.Ridge"; cluster2.name <- "NAO-"} + if(p == 4 && lead.month == 5) { cluster1.name <- "Blocking"; cluster2.name <- "NAO+"; cluster3.name <- "Atl.Ridge"; cluster4.name <- "NAO-"} + + # forecast month: October + if(p == 4 && lead.month == 6) { cluster1.name <- "Atl.Ridge"; cluster2.name <- "Blocking"; cluster3.name <- "NAO+"; cluster4.name <- "NAO-"} + if(p == 5 && lead.month == 5) { cluster1.name <- "NAO+"; cluster2.name <- "Blocking"; cluster3.name <- "NAO-"; cluster4.name <- "Atl.Ridge"} + if(p == 7 && lead.month == 3) { cluster1.name <- "NAO-"; cluster2.name <- "Atl.Ridge"; cluster3.name <- "Blocking"; cluster4.name <- "NAO+"} + if(p == 8 && lead.month == 2) { cluster1.name <- "Blocking"; cluster2.name <- "NAO-"; cluster3.name <- "Atl.Ridge"; cluster4.name <- "NAO+"} + if(p == 9 && lead.month == 1) { cluster1.name <- "NAO-"; cluster2.name <- "Blocking"; cluster3.name <- "Atl.Ridge"; cluster4.name <- "NAO+"} + + # forecast month: November + if(p == 6 && lead.month == 5) { cluster2.name <- "Atl.Ridge"; cluster3.name <- "NAO+"; cluster4.name <- "Blocking"} + if(p == 7 && lead.month == 4) { cluster1.name <- "NAO+"; cluster2.name <- "Blocking"; cluster4.name <- "Atl.Ridge"} + if(p == 8 && lead.month == 3) { cluster2.name <- "Blocking"; cluster4.name <- "Atl.Ridge"} + if(p == 9 && lead.month == 2) { cluster2.name <- "Atl.Ridge"; cluster3.name <- "NAO+"; cluster4.name <- "Blocking"} + if(p == 10 && lead.month == 1) { cluster2.name <- "Blocking"; cluster4.name <- "Atl.Ridge"} + + regimes.sim2 <- c(cluster1.name, cluster2.name, cluster3.name, cluster4.name) # Simulated regimes after the manual correction + #regimes.obs <- c(cluster1.name, cluster2.name, cluster3.name, cluster4.name) # already defined above, included only as reference + + order.sim <- match(regimes.sim2, orden) + order.obs <- match(regimes.obs, orden) + + clusters.sim <- list(cluster1.sim, cluster2.sim, cluster3.sim, cluster4.sim) + clusters.obs <- list(pslwr1mean, pslwr2mean, pslwr3mean, pslwr4mean) + + # recompute the spatial correlations, because they might have changed because of the manual corrections above: + spat.cor1 <- cor(as.vector(clusters.sim[[which(order.sim == 1)]]), as.vector(clusters.obs[[which(order.obs == 1)]])) + spat.cor2 <- cor(as.vector(clusters.sim[[which(order.sim == 2)]]), as.vector(clusters.obs[[which(order.obs == 2)]])) + spat.cor3 <- cor(as.vector(clusters.sim[[which(order.sim == 3)]]), as.vector(clusters.obs[[which(order.obs == 3)]])) + spat.cor4 <- cor(as.vector(clusters.sim[[which(order.sim == 4)]]), as.vector(clusters.obs[[which(order.obs == 4)]])) + + load(file=paste0(workdir,"/",fields.name,"_",my.period[p],"_leadmonth_",lead.month,"_","psl",".RData")) # reload forecast cluster time series and mean psl data + #load(file=paste0(workdir,"/",fields.name,"_",my.period[p],"_leadmonth_",lead.month,"_ClusterData.RData")) # reload forecast cluster data (for my.cluster.array) + if(var.num != var.num.orig) var.num <- var.num.orig # ripristinate original values of this script + if(fields.name != fields.name.orig) fields.name <- fields.name.orig # ripristinate original values of this script + if(!identical(period.orig, period)) period <- period.orig + if(p.orig != p) p <- p.orig + if(identical(rev(lat),lat.forecast)) lat <- rev(lat) # ripristinate right lat order + if(workdir != workdir.orig) workdir <- workdir.orig # ripristinate original values of this script + + } # close for on 'forecast.name' + + if(fields.name == rean.name || (fields.name == forecast.name && n.map == 7)){ + if(save.names){ + save(cluster1.name, cluster2.name, cluster3.name, cluster4.name, file=paste0(workdir,"/",fields.name,"_", my.period[p],"_","ClusterNames",".RData")) + + } else { # in this case, we load the cluster names from the file already saved, if regimes come from a reanalysis: + load(paste0(rean.dir,"/",rean.name,"_", my.period[p],"_","ClusterNames",".RData")) + + if(var.num != var.num.orig) var.num <- var.num.orig # ripristinate original values of this script + if(fields.name != fields.name.orig) fields.name <- fields.name.orig # ripristinate original values of this script + } + } + + # assign to each cluster its regime: + if(ordering == FALSE && (fields.name == rean.name || (fields.name == forecast.name && n.map == 7))){ + cluster1 <- 1; cluster2 <- 2; cluster3 <- 3; cluster4 <- 4 + } else { + cluster1 <- which(orden == cluster1.name) + cluster2 <- which(orden == cluster2.name) + cluster3 <- which(orden == cluster3.name) + cluster4 <- which(orden == cluster4.name) + } + + assign(paste0("map",cluster1), pslwr1mean) + assign(paste0("map",cluster2), pslwr2mean) + assign(paste0("map",cluster3), pslwr3mean) + assign(paste0("map",cluster4), pslwr4mean) + + assign(paste0("fre",cluster1), wr1y) + assign(paste0("fre",cluster2), wr2y) + assign(paste0("fre",cluster3), wr3y) + assign(paste0("fre",cluster4), wr4y) + + if(composition == "impact"){ + assign(paste0("imp",cluster1), varwr1mean) + assign(paste0("imp",cluster2), varwr2mean) + assign(paste0("imp",cluster3), varwr3mean) + assign(paste0("imp",cluster4), varwr4mean) + + if(fields.name == "rean.name"){ + assign(paste0("sig",cluster1), pvalue1) + assign(paste0("sig",cluster2), pvalue2) + assign(paste0("sig",cluster3), pvalue3) + assign(paste0("sig",cluster4), pvalue4) + } + } + + if(fields.name == forecast.name && n.map < 7){ + assign(paste0("freMax",cluster1), wr1yMax) + assign(paste0("freMax",cluster2), wr2yMax) + assign(paste0("freMax",cluster3), wr3yMax) + assign(paste0("freMax",cluster4), wr4yMax) + + assign(paste0("freMin",cluster1), wr1yMin) + assign(paste0("freMin",cluster2), wr2yMin) + assign(paste0("freMin",cluster3), wr3yMin) + assign(paste0("freMin",cluster4), wr4yMin) + + #assign(paste0("spatial.cor",cluster1), spat.cor1) + #assign(paste0("spatial.cor",cluster2), spat.cor2) + #assign(paste0("spatial.cor",cluster3), spat.cor3) + #assign(paste0("spatial.cor",cluster4), spat.cor4) + + assign(paste0("persist",cluster1), pers1) + assign(paste0("persist",cluster2), pers2) + assign(paste0("persist",cluster3), pers3) + assign(paste0("persist",cluster4), pers4) + + clusters.all <- c(cluster1, cluster2, cluster3, cluster4) + ordered.sequence <- c(which(clusters.all == 1), which(clusters.all == 2), which(clusters.all == 3), which(clusters.all == 4)) + + assign(paste0("transSim",cluster1), unname(trans.sim[1,ordered.sequence])) + assign(paste0("transSim",cluster2), unname(trans.sim[2,ordered.sequence])) + assign(paste0("transSim",cluster3), unname(trans.sim[3,ordered.sequence])) + assign(paste0("transSim",cluster4), unname(trans.sim[4,ordered.sequence])) + + cluster1.obs <- which(orden == regimes.obs[1]) + cluster2.obs <- which(orden == regimes.obs[2]) + cluster3.obs <- which(orden == regimes.obs[3]) + cluster4.obs <- which(orden == regimes.obs[4]) + + clusters.all.obs <- c(cluster1.obs, cluster2.obs, cluster3.obs, cluster4.obs) + ordered.sequence.obs <- c(which(clusters.all.obs == 1), which(clusters.all.obs == 2), which(clusters.all.obs == 3), which(clusters.all.obs == 4)) + + assign(paste0("freObs",cluster1.obs), wr1y.obs) + assign(paste0("freObs",cluster2.obs), wr2y.obs) + assign(paste0("freObs",cluster3.obs), wr3y.obs) + assign(paste0("freObs",cluster4.obs), wr4y.obs) + + assign(paste0("persistObs",cluster1.obs), persObs1) + assign(paste0("persistObs",cluster2.obs), persObs2) + assign(paste0("persistObs",cluster3.obs), persObs3) + assign(paste0("persistObs",cluster4.obs), persObs4) + + assign(paste0("transObs",cluster1.obs), unname(trans.obs[1,ordered.sequence.obs])) + assign(paste0("transObs",cluster2.obs), unname(trans.obs[2,ordered.sequence.obs])) + assign(paste0("transObs",cluster3.obs), unname(trans.obs[3,ordered.sequence.obs])) + assign(paste0("transObs",cluster4.obs), unname(trans.obs[4,ordered.sequence.obs])) + + } + + # position of long values of Europe only (without the Atlantic Sea and America): + #if(fields.name == "ERA-Interim") EU <- c(1:which(lon >= lon.max)[1], (length(lon)-30):length(lon)) # restrict area to continental europe only + EU <- c(1:which(lon >= lon.max)[1], (which(lon >= 337)[1]:length(lon))) # restrict area to continental europe only + + # breaks and colors of the geopotential fields: + if(psl == "g500"){ + #my.brks <- c(-300,-199:200,300) # % Mean anomaly of a WR + my.brks <- c(-300,seq(-200,200,10),300) # % Mean anomaly of a WR + my.brks2 <- c(-300,seq(-200,200,10),300) # % Mean anomaly of a WR for the contour lines + #my.cols <- colorRampPalette((brewer.pal(9,"PuBu")))(length(my.brks)-1) + values.to.plot <- c(-200, -100, 0, 100, 200) # values we want to show in the labels + } + + if(psl == "psl"){ + my.brks <- c(seq(-19,-1,2),0,seq(1,19,2)) # % Mean anomaly of a WR + # my.brks <- c(seq(-19,-1,2),0,seq(1,19,2)) # % Mean anomaly of a WR + + my.brks2 <- my.brks #c(seq(-20,20,2)) # % Mean anomaly of a WR for the contour lines + values.to.plot <- my.brks #c(-17,-15,-13,-11,-9,-7,-5,-3,-1,0,1,3,5,7,9,11,13,15,17) + } + + my.cols <- colorRampPalette(rev(brewer.pal(11,"RdBu")))(length(my.brks)-1) # blue--white--red colors + my.cols[floor(length(my.cols)/2)] <- my.cols[floor(length(my.cols)/2)+1] <- "white" + + # breaks and colors of the impact maps (valid both for Wind speed anomalies and Temperature anomalies): + #my.brks.var <- c(-20,seq(-10,-3,1),seq(-2.9,2.9,0.1),seq(3,10,1),20) # % Mean anomaly of a WR + #my.brks.var <- c(-20,-2,-1.5,-1,-0.5,-0.2,0.2,0.5,1,1.5,2,20) # % Mean anomaly of a WR + #my.brks.var <- c(-20,seq(-3,3,0.5),20) # % Mean anomaly of a WR + if(var.name[var.num] == "sfcWind") { + my.brks.var <- seq(-3,3,0.5) + values.to.plot2 <- c(-3,-2,-1,0,1,2,3) + } + if(var.name[var.num] == "tas") { + my.brks.var <- seq(-5,5,1) + values.to.plot2 <- my.brks.var #c(-5,-3,-1,0,1,3,5) + } + + #my.cols <- colorRampPalette(as.character(read.csv("/home/Earth/vtorralb/rgbhex.csv",header=F)[,1]))(length(my.brks)-1) # blue--yellow-red colors + my.cols.var <- colorRampPalette(rev(brewer.pal(11,"RdBu")))(length(my.brks.var)-1) # blue--white--red colors + #my.cols.var[floor(length(my.cols.var)/2)] <- my.cols.var[floor(length(my.cols.var)/2)+1] <- "white" + + if(fields.name == forecast.name){ + pos.years.present <- match(year.start:year.end, years) + years.missing <- which(is.na(pos.years.present)) + fre1.NA <- fre2.NA <- fre3.NA <- fre4.NA <- freMax1.NA <- freMax2.NA <- freMax3.NA <- freMax4.NA <- freMin1.NA <- freMin2.NA <- freMin3.NA <- freMin4.NA <- c() + freq.max=100 + + k <- 0 + for(y in year.start:year.end) { + if(y %in% (years.missing + year.start - 1)) { + fre1.NA <- c(fre1.NA,NA); fre2.NA <- c(fre2.NA,NA); fre3.NA <- c(fre3.NA,NA); fre4.NA <- c(fre4.NA,NA) + freMax1.NA <- c(freMax1.NA,NA); freMax2.NA <- c(freMax2.NA,NA); freMax3.NA <- c(freMax3.NA,NA); freMax4.NA <- c(freMax4.NA,NA) + freMin1.NA <- c(freMin1.NA,NA); freMin2.NA <- c(freMin2.NA,NA); freMin3.NA <- c(freMin3.NA,NA); freMin4.NA <- c(freMin4.NA,NA) + k=k+1 + } else { + fre1.NA <- c(fre1.NA,fre1[y - year.start + 1 - k]); fre2.NA <- c(fre2.NA,fre2[y - year.start + 1 - k]) + fre3.NA <- c(fre3.NA,fre3[y - year.start + 1 - k]); fre4.NA <- c(fre4.NA,fre4[y - year.start + 1 - k]) + freMax1.NA <- c(freMax1.NA,freMax1[y - year.start + 1 - k]); freMax2.NA <- c(freMax2.NA,freMax2[y - year.start + 1 - k]) + freMax3.NA <- c(freMax3.NA,freMax3[y - year.start + 1 - k]); freMax4.NA <- c(freMax4.NA,freMax4[y - year.start + 1 - k]) + freMin1.NA <- c(freMin1.NA,freMin1[y - year.start + 1 - k]); freMin2.NA <- c(freMin2.NA,freMin2[y - year.start + 1 - k]) + freMin3.NA <- c(freMin3.NA,freMin3[y - year.start + 1 - k]); freMin4.NA <- c(freMin4.NA,freMin4[y - year.start + 1 - k]) + } + } + } else { fre1.NA <- fre1; fre2.NA <- fre2; fre3.NA <- fre3; fre4.NA <- fre4 } + + sp.cor1 <- round(cor(fre1.NA,freObs1, use="complete.obs"),2) + sp.cor2 <- round(cor(fre2.NA,freObs2, use="complete.obs"),2) + sp.cor3 <- round(cor(fre3.NA,freObs3, use="complete.obs"),2) + sp.cor4 <- round(cor(fre4.NA,freObs4, use="complete.obs"),2) + + # Visualize the composition with the 3 graphs for all the 4 regimes: its pressure fields, its impact on var and its frequency series: + if(composition == "simple"){ + if(!as.pdf && fields.name == rean.name) png(filename=paste0(workdir,"/",fields.name,"_",my.period[p],"_",var.name[var.num],".png"),width=3000,height=3700) + if(!as.pdf && fields.name == forecast.name) png(filename=paste0(workdir,"/",fields.name,"_",my.period[p],"_leadmonth_",lead.month,"_",var.name[var.num],".png"),width=3000,height=3700) + + plot.new() + + # Sheet title: + par(fig=c(0, 1, 0.94, 0.98), new=TRUE) + if(fields.name == forecast.name) {forecast.title <- paste0(" startdate and ",lead.month," forecast month ")} else {forecast.title <- ""} + mtext(paste0(fields.name, " Weather Regimes for ", my.period[p], forecast.title," (",year.start,"-",year.end,")"), cex=5.5, font=2) + + # Centroid maps: + map.xpos <- 0 + map.width <- 0.46 + par(fig=c(map.xpos + 0.003, map.xpos + map.width - 0.003, 0.745, 0.925), new=TRUE) + PlotEquiMap2(rescale(map1,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map1), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, xlabel.dist=1.5, contours.lty="F1FF1F") + par(fig=c(map.xpos, map.xpos + map.width, 0.51, 0.69), new=TRUE) + PlotEquiMap2(rescale(map2,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map2), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F") + par(fig=c(map.xpos, map.xpos + map.width, 0.275, 0.455), new=TRUE) + PlotEquiMap2(rescale(map3,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map3), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F") + par(fig=c(map.xpos, map.xpos + map.width, 0.04, 0.22), new=TRUE) + PlotEquiMap2(rescale(map4,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map4), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F") + + # Subtitle centroid maps: + map.title.xpos <- 0.76 + map.title.width <- 0.2 + par(fig=c(map.title.xpos, map.title.xpos + map.title.width, 0.728, 0.729), new=TRUE) + mtext("year", cex=3) + par(fig=c(map.title.xpos, map.title.xpos + map.title.width, 0.494, 0.495), new=TRUE) + mtext("year", cex=3) + par(fig=c(map.title.xpos, map.title.xpos + map.title.width, 0.260, 0.261), new=TRUE) + mtext("year", cex=3) + par(fig=c(map.title.xpos, map.title.xpos + map.title.width, 0.026, 0.027), new=TRUE) + mtext("year", cex=3) + + # Title Centroid Maps: + title1.xpos <- 0.02 + title1.width <- 0.4 + par(fig=c(title1.xpos, title1.xpos + title1.width, 0.9305, 0.9355), new=TRUE) + mtext(paste0(regime1.name,": ", psl.name, " Anomaly"), font=2, cex=4) + par(fig=c(title1.xpos, title1.xpos + title1.width, 0.6955, 0.7005), new=TRUE) + mtext(paste0(regime2.name,": ", psl.name, " Anomaly"), font=2, cex=4) + par(fig=c(title1.xpos, title1.xpos + title1.width, 0.4605, 0.4655), new=TRUE) + mtext(paste0(regime3.name,": ", psl.name, " Anomaly"), font=2, cex=4) + par(fig=c(title1.xpos, title1.xpos + title1.width, 0.2255, 0.2305), new=TRUE) + mtext(paste0(regime4.name,": ", psl.name, " Anomaly"), font=2, cex=4) + + # Impact maps: + if(composition == "impact"){ # it won't enter here never because we are already inside an if con composition="simple" + impact.xpos <- 0.47 + impact.width <- 0.26 + par(fig=c(impact.xpos, impact.xpos + impact.width, 0.745, 0.925), new=TRUE) + PlotEquiMap2(rescale(imp1[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=t(sig1[,EU] < 0.05), cex.lab=1.5) + par(fig=c(impact.xpos, impact.xpos + impact.width, 0.51, 0.69), new=TRUE) + PlotEquiMap2(rescale(imp2[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=t(sig2[,EU] < 0.05), cex.lab=1.5) + par(fig=c(impact.xpos, impact.xpos + impact.width, 0.275, 0.455), new=TRUE) + PlotEquiMap2(rescale(imp3[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=t(sig3[,EU] < 0.05), cex.lab=1.5) + par(fig=c(impact.xpos, impact.xpos + impact.width, 0.04, 0.22), new=TRUE) + PlotEquiMap2(rescale(imp4[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=t(sig4[,EU] < 0.05), cex.lab=1.5) + + # Title impact maps: + title2.xpos <- 0.42 + title2.width <- 0.35 + par(fig=c(title2.xpos, title2.xpos + title2.width, 0.935, 0.940), new=TRUE) + mtext(paste0(regime1.name, ": Impact on ", var.name.full[var.num]), font=2, cex=4) + par(fig=c(title2.xpos, title2.xpos + title2.width, 0.700, 0.705), new=TRUE) + mtext(paste0(regime2.name, ": Impact on ", var.name.full[var.num]), font=2, cex=4) + par(fig=c(title2.xpos, title2.xpos + title2.width, 0.465, 0.470), new=TRUE) + mtext(paste0(regime3.name, ": Impact on ", var.name.full[var.num]), font=2, cex=4) + par(fig=c(title2.xpos, title2.xpos + title2.width, 0.230, 0.235), new=TRUE) + mtext(paste0(regime4.name, ": Impact on ", var.name.full[var.num]), font=2, cex=4) + } + + # Frequency plots: + barplot.xpos <- 0.75 + barplot.width <- 0.25 + + par(fig=c(barplot.xpos, barplot.xpos + barplot.width, 0.745, 0.915), new=TRUE) + if(fields.name == rean.name) barplot.freq(100*fre1.NA, year.start, year.end, cex.y=2, cex.x=2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0)) + if(fields.name == forecast.name) barplot.freq.sim2(100*fre1.NA, 100*freMax1.NA, 100*freMin1.NA, 100*freObs1, year.start, year.end, cex.y=2, cex.x=2, freq.min=-2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0), col.line="gray20", cex.r=3, cex.mean=2, cex.obs=2) + + par(fig=c(barplot.xpos, barplot.xpos + barplot.width,0.510,0.680), new=TRUE) + if(fields.name == rean.name) barplot.freq(100*fre2.NA, year.start, year.end, cex.y=2, cex.x=2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0)) + if(fields.name == forecast.name) barplot.freq.sim2(100*fre2.NA, 100*freMax2.NA, 100*freMin2.NA, 100*freObs2, year.start, year.end, cex.y=2, cex.x=2, freq.min=-2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0), col.line="gray20", cex.r=3, cex.mean=2, cex.obs=2) + + par(fig=c(barplot.xpos, barplot.xpos + barplot.width,0.275,0.445), new=TRUE) + if(fields.name == rean.name) barplot.freq(100*fre3.NA, year.start, year.end, cex.y=2, cex.x=2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0)) + if(fields.name == forecast.name) barplot.freq.sim2(100*fre3.NA, 100*freMax3.NA, 100*freMin3.NA, 100*freObs3, year.start, year.end, cex.y=2, cex.x=2, freq.min=-2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0), col.line="gray20", cex.r=3, cex.mean=2, cex.obs=2) + + par(fig=c(barplot.xpos, barplot.xpos + barplot.width,0.040,0.210), new=TRUE) + if(fields.name == rean.name) barplot.freq(100*fre4.NA, year.start, year.end, cex.y=2, cex.x=2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0)) + if(fields.name == forecast.name) barplot.freq.sim2(100*fre4.NA, 100*freMax4.NA, 100*freMin4.NA, 100*freObs4, year.start, year.end, cex.y=2, cex.x=2, freq.min=-2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0), col.line="gray20", cex.r=3, cex.mean=2, cex.obs=2) + + # Title Frequency maps: + title3.xpos <- 0.75 + title3.width <-0.25 + par(fig=c(title3.xpos, title3.xpos + title3.width, 0.935, 0.940), new=TRUE) + mtext(paste0(regime1.name, " Frequency"), font=2, cex=4) # paste0 doesn't work inside an expression! + par(fig=c(title3.xpos, title3.xpos + title3.width, 0.700, 0.705), new=TRUE) + mtext(paste0(regime2.name, " Frequency"), font=2, cex=4) + par(fig=c(title3.xpos, title3.xpos + title3.width, 0.465, 0.470), new=TRUE) + mtext(paste0(regime3.name, " Frequency"), font=2, cex=4) + par(fig=c(title3.xpos, title3.xpos + title3.width, 0.230, 0.235), new=TRUE) + mtext(paste0(regime4.name, " Frequency"), font=2, cex=4) + + # % on Frequency plots: + symbol.xpos <- 0.735 + symbol.width <- 0.01 + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.82, 0.83), new=TRUE) + mtext("%", cex=3.3) + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.59, 0.60), new=TRUE) + mtext("%", cex=3.3) + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.35, 0.36), new=TRUE) + mtext("%", cex=3.3) + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.12, 0.13), new=TRUE) + mtext("%", cex=3.3) + + # mean frequency on Frequency plots: + symbol.xpos <- 0.9 + symbol.width <- 0.1 + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.908, 0.918), new=TRUE) + mtext(bquote(bar(nu) == .(paste0(round(mean(100*fre1.NA),1),"%"))),cex=4.5) + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.673, 0.683), new=TRUE) + mtext(bquote(bar(nu) == .(paste0(round(mean(100*fre2.NA),1),"%"))),cex=4.5) + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.44, 0.45), new=TRUE) + mtext(bquote(bar(nu) == .(paste0(round(mean(100*fre3.NA),1),"%"))),cex=4.5) + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.203, 0.213), new=TRUE) + mtext(bquote(bar(nu) == .(paste0(round(mean(100*fre4.NA),1),"%"))),cex=4.5) + + # add corr between obs.time series and ensemble mean time series on Frequency plots: + if(fields.name == forecast.name){ + symbol.xpos <- 0.76 + symbol.width <- 0.1 + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.908, 0.918), new=TRUE) + sp.cor1 <- round(cor(fre1.NA,freObs1, use="complete.obs"),2) + mtext(paste0("r= ",sp.cor1), cex=4.5) + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.673, 0.683), new=TRUE) + sp.cor2 <- round(cor(fre2.NA,freObs2, use="complete.obs"),2) + mtext(paste0("r= ",sp.cor2), cex=4.5) + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.44, 0.45), new=TRUE) + sp.cor3 <- round(cor(fre3.NA,freObs3, use="complete.obs"),2) + mtext(paste0("r= ",sp.cor3), cex=4.5) + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.203, 0.213), new=TRUE) + sp.cor4 <- round(cor(fre4.NA,freObs4, use="complete.obs"),2) + mtext(paste0("r= ",sp.cor4), cex=4.5) + } + + # Legend 1: + legend1.xpos <- 0.10 + legend1.width <- 0.80 + legend1.cex <- 2 + my.subset <- match(values.to.plot, my.brks) + par(fig=c(legend1.xpos, legend1.xpos + legend1.width, 0.719, 0.744), new=TRUE) # syntax fig=c(xmin, xmax, ymin, ymax) + ColorBar3(my.brks, cols=my.cols, vert=FALSE, cex=legend1.cex, subset=my.subset) + if(psl=="g500") {mtext(side=4," m", cex=2.5)} else {mtext(side=4," hPa", cex=legend1.cex)} + par(fig=c(legend1.xpos, legend1.xpos + legend1.width, 0.485, 0.508), new=TRUE) + ColorBar3(my.brks, cols=my.cols, vert=FALSE, cex=legend1.cex, subset=my.subset) + if(psl=="g500") {mtext(side=4," m", cex=2.5)} else {mtext(side=4," hPa", cex=legend1.cex)} + par(fig=c(legend1.xpos, legend1.xpos + legend1.width, 0.250, 0.273), new=TRUE) + ColorBar3(my.brks, cols=my.cols, vert=FALSE, cex=legend1.cex, subset=my.subset) + if(psl=="g500") {mtext(side=4," m", cex=2.5)} else {mtext(side=4," hPa", cex=legend1.cex)} + par(fig=c(legend1.xpos, legend1.xpos + legend1.width, 0.015, 0.038), new=TRUE) + ColorBar3(my.brks, cols=my.cols, vert=FALSE, cex=legend1.cex, subset=my.subset) + if(psl=="g500") {mtext(side=4," m", cex=2.5)} else {mtext(side=4," hPa", cex=legend1.cex)} + + # Legend 2: + if(fields.name == rean.name){ + legend2.xpos <- 0.48 + legend2.width <- 0.25 + legend2.cex <- 1.8 + my.subset2 <- match(values.to.plot2, my.brks.var) + par(fig=c(legend2.xpos, legend2.xpos + legend2.width, 0.719 + 0.001, 0.744), new=TRUE) + ColorBar3(my.brks.var, cols=my.cols.var, vert=FALSE, cex=legend2.cex, subset=my.subset2) + mtext(side=4,paste0(" ",var.unit[var.num]), cex=legend2.cex) + par(fig=c(legend2.xpos, legend2.xpos + legend2.width, 0.485, 0.508), new=TRUE) + ColorBar3(my.brks.var, cols=my.cols.var, vert=FALSE, cex=legend2.cex, subset=my.subset2) + mtext(side=4,paste0(" ",var.unit[var.num]), cex=legend2.cex) + par(fig=c(legend2.xpos, legend2.xpos + legend2.width, 0.250, 0.273), new=TRUE) + ColorBar3(my.brks.var, cols=my.cols.var, vert=FALSE, cex=legend2.cex, subset=my.subset2) + mtext(side=4,paste0(" ",var.unit[var.num]), cex=legend2.cex) + par(fig=c(legend2.xpos, legend2.xpos + legend2.width, 0.015, 0.038), new=TRUE) + ColorBar3(my.brks.var, cols=my.cols.var, vert=FALSE, cex=legend2.cex, subset=my.subset2) + mtext(side=4,paste0(" ",var.unit[var.num]), cex=legend2.cex) + } + + if(!as.pdf) dev.off() # for saving 4 png + + } # close if on: composition == 'simple' + + + # Visualize the composition with the regime anomalies for a selected forecasted month: + if(composition == "psl"){ + # Centroid maps: + n.map <- n.map + 1 # n.maps starts from 0 + map.width <- 0.115 + map.xpos <- 0.06 + map.width * (n.map - 1) + map.ypos <- 0.08 + map.height <- 0.19 + map.ysep <- 0.015 + + par(fig=c(map.xpos, map.xpos + map.width, map.ypos + 3*map.height + 3*map.ysep, map.ypos + 4*map.height + 3*map.ysep), new=TRUE) + PlotEquiMap2(rescale(map1,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map1), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, xlabel.dist=1.5, contours.lty="F1FF1F") + par(fig=c(map.xpos, map.xpos + map.width, map.ypos + 2*map.height + 2*map.ysep, map.ypos + 3*map.height + 2*map.ysep), new=TRUE) + PlotEquiMap2(rescale(map2,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map2), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F") + par(fig=c(map.xpos, map.xpos + map.width, map.ypos + map.height + map.ysep, map.ypos + 2*map.height + map.ysep), new=TRUE) + PlotEquiMap2(rescale(map3,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map3), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F") + par(fig=c(map.xpos, map.xpos + map.width, map.ypos, map.ypos + map.height), new=TRUE) + PlotEquiMap2(rescale(map4,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map4), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F") + + # Sheet subtitles: + par(fig=c(map.xpos, map.xpos + map.width, 0.86, 0.90), new=TRUE) + if(n.map < 8) { + mtext(paste0(my.month.short[p], " --> ", my.month.short[forecast.month]), cex=5.5, font=2) + } else{ + mtext(paste0(rean.name," ", my.month.short[forecast.month]), cex=5.5, font=2) + } + + } # close if on composition == 'psl' + + # Visualize the composition if the impact maps for a selected forecasted month: + if(composition == "impact"){ + # Centroid maps: + n.map <- n.map + 1 # n.maps starts from 0 + map.width <- 0.115 + map.xpos <- 0.06 + map.width * (n.map - 1) + map.ypos <- 0.08 + map.height <- 0.19 + map.ysep <- 0.015 + + par(fig=c(map.xpos, map.xpos + map.width, map.ypos + 3*map.height + 3*map.ysep, map.ypos + 4*map.height + 3*map.ysep), new=TRUE) + brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, xlabel.dist=1.5, contours.lty="F1FF1F") + PlotEquiMap2(rescale(imp1[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=t(sig1[,EU] < 0.05), cex.lab=1.5) + + par(fig=c(map.xpos, map.xpos + map.width, map.ypos + 2*map.height + 2*map.ysep, map.ypos + 3*map.height + 2*map.ysep), new=TRUE) + PlotEquiMap2(rescale(imp2[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=t(sig2[,EU] < 0.05), cex.lab=1.5) + + par(fig=c(map.xpos, map.xpos + map.width, map.ypos + map.height + map.ysep, map.ypos + 2*map.height + map.ysep), new=TRUE) + PlotEquiMap2(rescale(imp3[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=t(sig3[,EU] < 0.05), cex.lab=1.5) + + par(fig=c(map.xpos, map.xpos + map.width, map.ypos, map.ypos + map.height), new=TRUE) + PlotEquiMap2(rescale(imp4[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=t(sig4[,EU] < 0.05), cex.lab=1.5) + + + # Sheet subtitles: + par(fig=c(map.xpos, map.xpos + map.width, 0.86, 0.90), new=TRUE) + if(n.map < 8) { + mtext(paste0(my.month.short[p], " --> ", my.month.short[forecast.month]), cex=5.5, font=2) + } else{ + mtext(paste0(rean.name," ", my.month.short[forecast.month]), cex=5.5, font=2) + } + + } # close if on composition == 'psl' + + + # Visualize the composition with the interannual frequencies for a selected forecasted month: + if(composition == "fre"){ + # Centroid maps: + n.map <- n.map + 1 # n.maps starts from 0 + map.width <- 0.115 + map.xpos <- 0.06 + map.width * (n.map - 1) + map.ypos <- 0.08 + map.height <- 0.19 + map.ysep <- 0.015 + + par(fig=c(map.xpos, map.xpos + map.width, map.ypos + 3*map.height + 3*map.ysep, map.ypos + 4*map.height + 3*map.ysep), new=TRUE) + if(n.map < 8) barplot.freq.sim2(100*fre1.NA, 100*freMax1.NA, 100*freMin1.NA, 100*freObs1, year.start, year.end, cex.y=2, cex.x=2, freq.min=-2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0), col.line="gray20", cex.r=3, cex.mean=2, cex.obs=2) + if(n.map == 8) barplot.freq(100*fre1.NA, year.start, year.end, cex.y=2, cex.x=2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0)) + + par(fig=c(map.xpos, map.xpos + map.width, map.ypos + 2*map.height + 2*map.ysep, map.ypos + 3*map.height + 2*map.ysep), new=TRUE) + if(n.map < 8) if(fields.name == forecast.name) barplot.freq.sim2(100*fre2.NA, 100*freMax2.NA, 100*freMin2.NA, 100*freObs2, year.start, year.end, cex.y=2, cex.x=2, freq.min=-2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0), col.line="gray20", cex.r=3, cex.mean=2, cex.obs=2) + if(n.map == 8) barplot.freq(100*fre2.NA, year.start, year.end, cex.y=2, cex.x=2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0)) + + par(fig=c(map.xpos, map.xpos + map.width, map.ypos + map.height + map.ysep, map.ypos + 2*map.height + map.ysep), new=TRUE) + if(n.map < 8) if(fields.name == forecast.name) barplot.freq.sim2(100*fre3.NA, 100*freMax3.NA, 100*freMin3.NA, 100*freObs3, year.start, year.end, cex.y=2, cex.x=2, freq.min=-2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0), col.line="gray20", cex.r=3, cex.mean=2, cex.obs=2) + if(n.map == 8) barplot.freq(100*fre3.NA, year.start, year.end, cex.y=2, cex.x=2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0)) + + par(fig=c(map.xpos, map.xpos + map.width, map.ypos, map.ypos + map.height), new=TRUE) + if(n.map < 8) if(fields.name == forecast.name) barplot.freq.sim2(100*fre4.NA, 100*freMax4.NA, 100*freMin4.NA, 100*freObs4, year.start, year.end, cex.y=2, cex.x=2, freq.min=-2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0), col.line="gray20", cex.r=3, cex.mean=2, cex.obs=2) + if(n.map == 8) barplot.freq(100*fre4.NA, year.start, year.end, cex.y=2, cex.x=2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0)) + + # Sheet subtitles: + par(fig=c(map.xpos, map.xpos + map.width, 0.86, 0.90), new=TRUE) + if(n.map < 8) { + mtext(paste0(my.month.short[p], " --> ", my.month.short[forecast.month]), cex=5.5, font=2) + } else{ + mtext(paste0(rean.name," ", my.month.short[forecast.month]), cex=5.5, font=2) + } + + # % on Frequency plots: + par(fig=c(map.xpos - 0.006, map.xpos, map.ypos + 3.9*map.height + 3*map.ysep, map.ypos + 4*map.height + 3*map.ysep), new=TRUE) + mtext("%", cex=3.3) + par(fig=c(map.xpos - 0.006, map.xpos, map.ypos + 2.9*map.height + 2*map.ysep, map.ypos + 3*map.height + 2*map.ysep), new=TRUE) + mtext("%", cex=3.3) + par(fig=c(map.xpos - 0.006, map.xpos, map.ypos + 1.9*map.height + 1*map.ysep, map.ypos + 2*map.height + 1*map.ysep), new=TRUE) + mtext("%", cex=3.3) + par(fig=c(map.xpos - 0.006, map.xpos, map.ypos + 0.9*map.height + 0*map.ysep, map.ypos + 1*map.height + 0*map.ysep), new=TRUE) + mtext("%", cex=3.3) + + # mean frequency on Frequency plots: + par(fig=c(map.xpos + 0.02, map.xpos + 0.04, map.ypos + 3*map.height + 3*map.ysep, map.ypos + 3.1*map.height + 3*map.ysep), new=TRUE) + mtext(bquote(bar(nu) == .(paste0(round(mean(100*fre1.NA),1),"%"))),cex=3.5) + par(fig=c(map.xpos + 0.02, map.xpos + 0.04, map.ypos + 2*map.height + 2*map.ysep, map.ypos + 2.1*map.height + 2*map.ysep), new=TRUE) + mtext(bquote(bar(nu) == .(paste0(round(mean(100*fre2.NA),1),"%"))),cex=3.5) + par(fig=c(map.xpos + 0.02, map.xpos + 0.04, map.ypos + 1*map.height + 1*map.ysep, map.ypos + 1.1*map.height + 1*map.ysep), new=TRUE) + mtext(bquote(bar(nu) == .(paste0(round(mean(100*fre3.NA),1),"%"))),cex=3.5) + par(fig=c(map.xpos + 0.02, map.xpos + 0.04, map.ypos + 0*map.height + 0*map.ysep, map.ypos + 0.1*map.height + 0*map.ysep), new=TRUE) + mtext(bquote(bar(nu) == .(paste0(round(mean(100*fre4.NA),1),"%"))),cex=3.5) + + # add corr between obs.time series and ensemble mean time series on Frequency plots: + if(n.map < 8){ + par(fig=c(map.xpos + map.width - 0.04, map.xpos + map.width - 0.02, map.ypos + 3*map.height + 3*map.ysep, map.ypos + 3.1*map.height + 3*map.ysep), new=TRUE) + mtext(paste0("r= ",sp.cor1), cex=3.5) + par(fig=c(map.xpos + map.width - 0.04, map.xpos + map.width - 0.02, map.ypos + 2*map.height + 2*map.ysep, map.ypos + 2.1*map.height + 2*map.ysep), new=TRUE) + mtext(paste0("r= ",sp.cor2), cex=3.5) + par(fig=c(map.xpos + map.width - 0.04, map.xpos + map.width - 0.02, map.ypos + 1*map.height + 1*map.ysep, map.ypos + 1.1*map.height + 1*map.ysep), new=TRUE) + mtext(paste0("r= ",sp.cor3), cex=3.5) + par(fig=c(map.xpos + map.width - 0.04, map.xpos + map.width - 0.02, map.ypos + 0*map.height + 0*map.ysep, map.ypos + 0.1*map.height + 0*map.ysep), new=TRUE) + mtext(paste0("r= ",sp.cor4), cex=3.5) + } + } # close if on composition == 'fre' + + # save indicators for each startdate and forecast time: + # (fre1, fre2, etc. already refer to the regimes in the same order as listed in the 'orden' vector) + if(fields.name == forecast.name) { + if (composition != "impact") { + save(orden, fre1.NA, fre2.NA, fre3.NA, fre4.NA, freObs1, freObs2, freObs3, freObs4, sp.cor1, sp.cor2, sp.cor3, sp.cor4, persist1, persist2, persist3, persist4, persistObs1, persistObs2, persistObs3, persistObs4, spat.cor1, spat.cor2, spat.cor3, spat.cor4, sd.freq.sim1, sd.freq.sim2, sd.freq.sim3, sd.freq.sim4, sd.freq.obs1, sd.freq.obs2, sd.freq.obs3, sd.freq.obs4, transSim1, transSim2, transSim3, transSim4, transObs1, transObs2, transObs3, transObs4, file=paste0(workdir,"/",fields.name,"_", my.period[p],"_leadmonth_",lead.month,"_","ClusterNames",".RData")) + } else { + save(orden, fre1.NA, fre2.NA, fre3.NA, fre4.NA, freObs1, freObs2, freObs3, freObs4, sp.cor1, sp.cor2, sp.cor3, sp.cor4, persist1, persist2, persist3, persist4, persistObs1, persistObs2, persistObs3, persistObs4, spat.cor1, spat.cor2, spat.cor3, spat.cor4, sd.freq.sim1, sd.freq.sim2, sd.freq.sim3, sd.freq.sim4, sd.freq.obs1, sd.freq.obs2, sd.freq.obs3, sd.freq.obs4, transSim1, transSim2, transSim3, transSim4, transObs1, transObs2, transObs3, transObs4, imp1, imp2, imp3, imp4, file=paste0(workdir,"/",fields.name,"_", my.period[p],"_leadmonth_",lead.month,"_","ClusterNames",".RData")) + } + } + + print(paste("p =",p,"lead.month =", lead.month)) # spat.cor1,spat.cor2,spat.cor3,spat.cor4)) + } # close 'p' on 'period' + + if(as.pdf) dev.off() # for saving a single pdf with all months/seasons + + } # close 'lead.month' on 'lead.months' + + if(composition == "psl" || composition == "fre"){ + # Regime's names: + title1.xpos <- 0.01 + title1.width <- 0.04 + par(fig=c(title1.xpos, title1.xpos + title1.width, 0.7905, 0.7955), new=TRUE) + mtext(paste0(regime1.name), font=2, cex=5) + par(fig=c(title1.xpos, title1.xpos + title1.width, 0.5855, 0.5905), new=TRUE) + mtext(paste0(regime2.name), font=2, cex=5) + par(fig=c(title1.xpos, title1.xpos + title1.width, 0.3805, 0.3955), new=TRUE) + mtext(paste0(regime3.name), font=2, cex=5) + par(fig=c(title1.xpos, title1.xpos + title1.width, 0.1755, 0.1805), new=TRUE) + mtext(paste0(regime4.name), font=2, cex=5) + + if(composition == "psl") { + # Color legend: + legend1.xpos <- 0.10 + legend1.width <- 0.80 + legend1.cex <- 4 + + par(fig=c(legend1.xpos, legend1.xpos + legend1.width, 0, 0.065), new=TRUE) # syntax fig=c(xmin, xmax, ymin, ymax) + ColorBar2(brks=my.brks, cols=my.cols, vert=FALSE, cex=legend1.cex, label.dist=2, my.ticks=-0.5 + 1:21, my.labels=my.brks) + par(fig=c(legend1.xpos + legend1.width - 0.02, legend1.xpos + legend1.width + 0.05, 0, 0.02), new=TRUE) # syntax fig=c(xmin, xmax, ymin, ymax) + mtext("hPa", cex=legend1.cex*1.3) + } + + dev.off() + } # close if on composition + +} # close if on composition != "summary" + +# Summary graphs: +if(composition == "summary" && fields.name == forecast.name){ + # array storing correlations in the format: [startdate, leadmonth, regime] + array.diff.sd.freq <- array.sd.freq <- array.cor <- array.sp.cor <- array.freq <- array.diff.freq <- array.rpss <- array.pers <- array.diff.pers <- array(NA,c(12,7,4)) + array.sp.cor <- array.trans.sim1 <- array.trans.sim2 <- array.trans.sim3 <- array.trans.sim4 <- array(NA,c(12,7,4)) + array.trans.diff1 <- array.trans.diff2 <- array.trans.diff3 <- array.trans.diff4 <- array(NA,c(12,7,4)) + + for(p in 1:12){ + p.orig <- p + + for(l in 0:6){ + + load(file=paste0(workdir,"/",fields.name,"_",my.period[p],"_leadmonth_",l,"_","ClusterNames",".RData")) # load cluster names and summarized data + + if(var.num != var.num.orig) var.num <- var.num.orig # ripristinate original values of this script (necessary after loading regime data) + if(fields.name != fields.name.orig) fields.name <- fields.name.orig # ripristinate original values of this script + if(p != p.orig) p <- p.orig + + array.sp.cor[p,1+l, 1] <- spat.cor1 # associates NAO+ to the first triangle (at left) + array.sp.cor[p,1+l, 3] <- spat.cor2 # associates NAO- to the third triangle (at right) + array.sp.cor[p,1+l, 4] <- spat.cor3 # associates Blocking to the fourth triangle (top one) + array.sp.cor[p,1+l, 2] <- spat.cor4 # associates Atl.Ridge to the second triangle (bottom one) + + array.cor[p,1+l, 1] <- sp.cor1 # associates NAO+ to the first triangle (at left) + array.cor[p,1+l, 3] <- sp.cor2 # associates NAO- to the third triangle (at right) + array.cor[p,1+l, 4] <- sp.cor3 # associates Blocking to the fourth triangle (top one) + array.cor[p,1+l, 2] <- sp.cor4 # associates Atl.Ridge to the second triangle (bottom one) + + array.freq[p,1+l, 1] <- 100*(mean(fre1.NA)) # NAO+ + array.freq[p,1+l, 3] <- 100*(mean(fre2.NA)) # NAO- + array.freq[p,1+l, 4] <- 100*(mean(fre3.NA)) # Blocking + array.freq[p,1+l, 2] <- 100*(mean(fre4.NA)) # Atl.Ridge + + array.diff.freq[p,1+l, 1] <- 100*(mean(fre1.NA) - mean(freObs1)) # NAO+ + array.diff.freq[p,1+l, 3] <- 100*(mean(fre2.NA) - mean(freObs2)) # NAO- + array.diff.freq[p,1+l, 4] <- 100*(mean(fre3.NA) - mean(freObs3)) # Blocking + array.diff.freq[p,1+l, 2] <- 100*(mean(fre4.NA) - mean(freObs4)) # Atl.Ridge + + array.pers[p,1+l, 1] <- persist1 # NAO+ + array.pers[p,1+l, 3] <- persist2 # NAO- + array.pers[p,1+l, 4] <- persist3 # Blocking + array.pers[p,1+l, 2] <- persist4 # Atl.Ridge + + array.diff.pers[p,1+l, 1] <- persist1 - persistObs1 # NAO+ + array.diff.pers[p,1+l, 3] <- persist2 - persistObs2 # NAO- + array.diff.pers[p,1+l, 4] <- persist3 - persistObs3 # Blocking + array.diff.pers[p,1+l, 2] <- persist4 - persistObs4 # Atl.Ridge + + array.pers[p,1+l, 1] <- persist1 # NAO+ + array.pers[p,1+l, 3] <- persist2 # NAO- + array.pers[p,1+l, 4] <- persist3 # Blocking + array.pers[p,1+l, 2] <- persist4 # Atl.Ridge + + array.sd.freq[p,1+l, 1] <- sd.freq.sim1 # NAO+ + array.sd.freq[p,1+l, 3] <- sd.freq.sim2 # NAO- + array.sd.freq[p,1+l, 4] <- sd.freq.sim3 # Blocking + array.sd.freq[p,1+l, 2] <- sd.freq.sim4 # Atl.Ridge + + array.diff.sd.freq[p,1+l, 1] <- sd.freq.sim1 / sd.freq.obs1 # NAO+ + array.diff.sd.freq[p,1+l, 3] <- sd.freq.sim2 / sd.freq.obs2 # NAO- + array.diff.sd.freq[p,1+l, 4] <- sd.freq.sim3 / sd.freq.obs3 # Blocking + array.diff.sd.freq[p,1+l, 2] <- sd.freq.sim4 / sd.freq.obs4 # Atl.Ridge + + array.trans.sim1[p,1+l, 1] <- NA # Transition from NAO+ to NAO+ + array.trans.sim1[p,1+l, 3] <- 100*transSim1[2] # Transition from NAO+ to NAO- + array.trans.sim1[p,1+l, 4] <- 100*transSim1[3] # Transition from NAO+ to blocking + array.trans.sim1[p,1+l, 2] <- 100*transSim1[4] # Transition from NAO+ to Atl.Ridge + + array.trans.sim2[p,1+l, 1] <- 100*transSim2[1] # Transition from NAO- to NAO+ + array.trans.sim2[p,1+l, 3] <- NA # Transition from NAO- to NAO- + array.trans.sim2[p,1+l, 4] <- 100*transSim2[3] # Transition from NAO- to blocking + array.trans.sim2[p,1+l, 2] <- 100*transSim2[4] # Transition from NAO- to Atl.Ridge + + array.trans.sim3[p,1+l, 1] <- 100*transSim3[1] # Transition from blocking to NAO+ + array.trans.sim3[p,1+l, 3] <- 100*transSim3[2] # Transition from blocking to NAO- + array.trans.sim3[p,1+l, 4] <- NA # Transition from blocking to blocking + array.trans.sim3[p,1+l, 2] <- 100*transSim3[4] # Transition from blocking to Atl.Ridge + + array.trans.sim4[p,1+l, 1] <- 100*transSim4[1] # Transition from Atl.ridge to NAO+ + array.trans.sim4[p,1+l, 3] <- 100*transSim4[2] # Transition from Atl.ridge to NAO- + array.trans.sim4[p,1+l, 4] <- 100*transSim4[3] # Transition from Atl.ridge to blocking + array.trans.sim4[p,1+l, 2] <- NA # Transition from Atl.ridge to Atl.Ridge + + array.trans.diff1[p,1+l, 1] <- NA # Transition from NAO+ to NAO+ + array.trans.diff1[p,1+l, 3] <- 100*(transSim1[2] - transObs1[2]) # Transition from NAO+ to NAO- + array.trans.diff1[p,1+l, 4] <- 100*(transSim1[3] - transObs1[3]) # Transition from NAO+ to blocking + array.trans.diff1[p,1+l, 2] <- 100*(transSim1[4] - transObs1[4]) # Transition from NAO+ to Atl.Ridge + + array.trans.diff2[p,1+l, 1] <- 100*(transSim2[1] - transObs2[1]) # Transition from NAO+ to NAO+ + array.trans.diff2[p,1+l, 3] <- NA + array.trans.diff2[p,1+l, 4] <- 100*(transSim2[3] - transObs2[3]) # Transition from NAO+ to blocking + array.trans.diff2[p,1+l, 2] <- 100*(transSim2[4] - transObs2[4]) # Transition from NAO+ to Atl.Ridge + + array.trans.diff3[p,1+l, 1] <- 100*(transSim3[1] - transObs3[1]) # Transition from NAO+ to NAO+ + array.trans.diff3[p,1+l, 3] <- 100*(transSim3[2] - transObs3[2]) # Transition from NAO+ to NAO- + array.trans.diff3[p,1+l, 4] <- NA + array.trans.diff3[p,1+l, 2] <- 100*(transSim3[4] - transObs3[4]) # Transition from NAO+ to Atl.Ridge + + array.trans.diff4[p,1+l, 1] <- 100*(transSim4[1] - transObs4[1]) # Transition from NAO+ to NAO+ + array.trans.diff4[p,1+l, 3] <- 100*(transSim4[2] - transObs4[2]) # Transition from NAO+ to NAO- + array.trans.diff4[p,1+l, 4] <- 100*(transSim4[3] - transObs4[3]) # Transition from NAO+ to blocking + array.trans.diff4[p,1+l, 2] <- NA + + #array.rpss[p,1+l, 1] <- # NAO+ + #array.rpss[p,1+l, 3] <- # NAO- + #array.rpss[p,1+l, 4] <- # Blocking + #array.rpss[p,1+l, 2] <- # Atl.Ridge + } + } + + # Spatial Corr summary: + png(filename=paste0(workdir,"/",forecast.name,"_summary_sp_corr.png"), width=550, height=400) + + # create an array similar to array.cor but with colors instead of corr.values: + sp.corr.cols <- rev(c('#b2182b','#d6604d','#f4a582','#fddbc7','#d1e5f0','#92c5de','#4393c3','#2166ac')) + my.seq <- c(-1,0,0.4,0.5,0.6,0.7,0.8,0.9,1) + + array.sp.cor.colors <- array(sp.corr.cols[8],c(12,7,4)) + array.sp.cor.colors[array.sp.cor < my.seq[8]] <- sp.corr.cols[7] + array.sp.cor.colors[array.sp.cor < my.seq[7]] <- sp.corr.cols[6] + array.sp.cor.colors[array.sp.cor < my.seq[6]] <- sp.corr.cols[5] + array.sp.cor.colors[array.sp.cor < my.seq[5]] <- sp.corr.cols[4] + array.sp.cor.colors[array.sp.cor < my.seq[4]] <- sp.corr.cols[3] + array.sp.cor.colors[array.sp.cor < my.seq[3]] <- sp.corr.cols[2] + array.sp.cor.colors[array.sp.cor < my.seq[2]] <- sp.corr.cols[1] + + par(mar=c(5,4,4,4.5)) + plot(1,1, type="n", xaxt="n", yaxt="n", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + title("Spatial correlation between S4 and ERA-Interim regime anomalies", cex=1.5) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + + for(p in 1:12){ + for(l in 0:6){ + polygon(c(0.5+p-1, 0.5+p-1, 1+p-1), c(-0.5+l, 0.5+l, 0+l), col=array.sp.cor.colors[p,1+l,1]) # first triangle + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(-0.5+l, 0+l, -0.5+l), col=array.sp.cor.colors[p,1+l,2]) + polygon(c(1+p-1, 1.5+p-1, 1.5+p-1), c(l, 0.5+l, -0.5+l), col=array.sp.cor.colors[p,1+l,3]) + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(0.5+l, 0+l, 0.5+l), col=array.sp.cor.colors[p,1+l,4]) + } + } + + par(fig=c(0.875, 1, 0.14, 0.89), new=TRUE) + ColorBar2(brks = my.seq, cols = sp.corr.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + dev.off() + + + png(filename=paste0(workdir,"/",forecast.name,"_summary_sp_corr_4tables.png"), width=750, height=600) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext(text="Spatial correlation between S4 and ERA-Interim regime anomalies", cex=1.5) + + # NAO+ : + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.85, 0.98), new=TRUE) + mtext("NAO+", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.sp.cor.colors[p,1+l,1])}} + + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.42, 0.5), new=TRUE) + mtext("Blocking", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.sp.cor.colors[p,1+l,4])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.85, 0.98), new=TRUE) + mtext("NAO-", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.sp.cor.colors[p,1+l,3])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.42, 0.5), new=TRUE) + mtext("Atlantic Ridge", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.sp.cor.colors[p,1+l,2])}} + + par(fig=c(0.91, 1, 0.1, 0.9), new=TRUE) + ColorBar2(brks = my.seq, cols = sp.corr.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + png(filename=paste0(workdir,"/",forecast.name,"_summary_sp_corr_1table.png"), width=800, height=300) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext(text="Spatial correlation between S4 and ERA-Interim regime anomalies", cex=1.2) + + par(mar=c(0,0,4,0), fig=c(0.07, 0.22, 0.8, 0.98), new=TRUE) + mtext("NAO+", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0, 0.22, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecast month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.sp.cor.colors[i2,1+6-j,1])}} + + par(mar=c(0,0,4,0), fig=c(0.22, 0.44, 0.8, 0.98), new=TRUE) + mtext("NAO-", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.22, 0.44, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecasted month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.sp.cor.colors[i2,1+6-j,3])}} + + par(mar=c(0,0,4,0), fig=c(0.44, 0.66, 0.8, 0.98), new=TRUE) + mtext("Blocking", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.44, 0.66, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecasted month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.sp.cor.colors[i2,1+6-j,4])}} + + par(mar=c(0,0,4,0), fig=c(0.68, 0.88, 0.8, 0.98), new=TRUE) + mtext("Atlantic Ridge", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.66, 0.88, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecasted month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.sp.cor.colors[i2,1+6-j,2])}} + + par(fig=c(0.91, 1, 0.1, 0.8), new=TRUE) + ColorBar2(brks = my.seq, cols = sp.corr.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + + + + + + + + # Corr summary: + png(filename=paste0(workdir,"/",forecast.name,"_summary_corr.png"), width=550, height=400) + + # create an array similar to array.cor but with colors instead of corr.values: + corr.cols <- rev(c('#b2182b','#d6604d','#f4a582','#fddbc7','#d1e5f0','#92c5de','#4393c3','#2166ac')) + + array.cor.colors <- array(corr.cols[8],c(12,7,4)) + array.cor.colors[array.cor < 0.75] <- corr.cols[7] + array.cor.colors[array.cor < 0.5] <- corr.cols[6] + array.cor.colors[array.cor < 0.25] <- corr.cols[5] + array.cor.colors[array.cor < 0] <- corr.cols[4] + array.cor.colors[array.cor < -0.25] <- corr.cols[3] + array.cor.colors[array.cor < -0.5] <- corr.cols[2] + array.cor.colors[array.cor < -0.75] <- corr.cols[1] + + par(mar=c(5,4,4,4.5)) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Forecast time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + title("Correlation between S4 and ERA-Interim frequencies", cex=1.5) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + + for(p in 1:12){ + for(l in 0:6){ + polygon(c(0.5+p-1,0.5+p-1,1+p-1),c(-0.5+l,0.5+l,0+l), col=array.cor.colors[p,1+l,1]) # first triangle + polygon(c(0.5+p-1,1+p-1,1.5+p-1),c(-0.5+l,0+l,-0.5+l), col=array.cor.colors[p,1+l,2]) + polygon(c(1+p-1,1.5+p-1,1.5+p-1),c(l,0.5+l,-0.5+l), col=array.cor.colors[p,1+l,3]) + polygon(c(0.5+p-1,1+p-1,1.5+p-1),c(0.5+l,0+l,0.5+l), col=array.cor.colors[p,1+l,4]) + } + } + + par(fig=c(0.875, 1, 0.14, 0.89), new=TRUE) + ColorBar2(brks = seq(-1,1,0.25), cols = corr.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(seq(-1,1,0.25)), my.labels=seq(-1,1,0.25)) + dev.off() + + png(filename=paste0(workdir,"/",forecast.name,"_summary_corr_4tables.png"), width=750, height=600) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext(text="Correlation between S4 and ERA-Interim frequencies", cex=1.5) + + # NAO+ : + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.85, 0.98), new=TRUE) + mtext("NAO+", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.cor.colors[p,1+l,1])}} + + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.42, 0.5), new=TRUE) + mtext("Blocking", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.cor.colors[p,1+l,4])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.85, 0.98), new=TRUE) + mtext("NAO-", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.cor.colors[p,1+l,3])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.42, 0.5), new=TRUE) + mtext("Atlantic Ridge", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.cor.colors[p,1+l,2])}} + + par(fig=c(0.91, 1, 0.1, 0.9), new=TRUE) + ColorBar2(brks = my.seq, cols = corr.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + png(filename=paste0(workdir,"/",forecast.name,"_summary_corr_1table.png"), width=800, height=300) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext(text="Correlation between S4 and ERA-Interim frequencies", cex=1.2) + + par(mar=c(0,0,4,0), fig=c(0.07, 0.22, 0.8, 0.98), new=TRUE) + mtext("NAO+", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0, 0.22, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecast month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.cor.colors[i2,1+6-j,1])}} + + par(mar=c(0,0,4,0), fig=c(0.22, 0.44, 0.8, 0.98), new=TRUE) + mtext("NAO-", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.22, 0.44, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecasted month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.cor.colors[i2,1+6-j,3])}} + + par(mar=c(0,0,4,0), fig=c(0.44, 0.66, 0.8, 0.98), new=TRUE) + mtext("Blocking", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.44, 0.66, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecasted month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.cor.colors[i2,1+6-j,4])}} + + par(mar=c(0,0,4,0), fig=c(0.68, 0.88, 0.8, 0.98), new=TRUE) + mtext("Atlantic Ridge", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.66, 0.88, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecasted month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.cor.colors[i2,1+6-j,2])}} + + par(fig=c(0.91, 1, 0.1, 0.8), new=TRUE) + ColorBar2(brks = my.seq, cols = corr.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + + + + + + # Freq. summary: + png(filename=paste0(workdir,"/",forecast.name,"_summary_freq.png"), width=550, height=400) + + # create an array similar to array.diff.freq but with colors instead of frequencies: + freq.cols <- rev(c('#d73027','#f46d43','#fdae61','#fee090','#e0f3f8','#abd9e9','#74add1','#4575b4')) + my.seq <- c(NA,20,22,24,26,28,30,32,NA) + + array.freq.colors <- array(freq.cols[8],c(12,7,4)) + array.freq.colors[array.freq < my.seq[[8]]] <- freq.cols[7] + array.freq.colors[array.freq < my.seq[[7]]] <- freq.cols[6] + array.freq.colors[array.freq < my.seq[[6]]] <- freq.cols[5] + array.freq.colors[array.freq < my.seq[[5]]] <- freq.cols[4] + array.freq.colors[array.freq < my.seq[[4]]] <- freq.cols[3] + array.freq.colors[array.freq < my.seq[[3]]] <- freq.cols[2] + array.freq.colors[array.freq < my.seq[[2]]] <- freq.cols[1] + + par(mar=c(5,4,4,4.5)) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Forecast time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + title("Mean S4 frequency (%)", cex=1.5) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + + for(p in 1:12){ + for(l in 0:6){ + polygon(c(0.5+p-1, 0.5+p-1, 1+p-1), c(-0.5+l, 0.5+l, 0+l), col=array.freq.colors[p,1+l,1]) # first triangle + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(-0.5+l, 0+l, -0.5+l), col=array.freq.colors[p,1+l,2]) + polygon(c(1+p-1, 1.5+p-1, 1.5+p-1), c(l, 0.5+l, -0.5+l), col=array.freq.colors[p,1+l,3]) + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(0.5+l, 0+l, 0.5+l), col=array.freq.colors[p,1+l,4]) + } + } + + par(fig=c(0.875, 1, 0.14, 0.89), new=TRUE) + ColorBar2(brks = my.seq, cols = freq.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + dev.off() + + png(filename=paste0(workdir,"/",forecast.name,"_summary_freq_4tables.png"), width=750, height=600) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext(text="Mean S4 frequency (%)", cex=1.5) + + # NAO+ : + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.85, 0.98), new=TRUE) + mtext("NAO+", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.freq.colors[p,1+l,1])}} + + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.42, 0.5), new=TRUE) + mtext("Blocking", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.freq.colors[p,1+l,4])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.85, 0.98), new=TRUE) + mtext("NAO-", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.freq.colors[p,1+l,3])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.42, 0.5), new=TRUE) + mtext("Atlantic Ridge", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.freq.colors[p,1+l,2])}} + + par(fig=c(0.91, 1, 0.1, 0.9), new=TRUE) + ColorBar2(brks = my.seq, cols = freq.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + png(filename=paste0(workdir,"/",forecast.name,"_summary_freq_1table.png"), width=800, height=300) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext(text="Mean S4 frequency (%)", cex=1.2) + + par(mar=c(0,0,4,0), fig=c(0.07, 0.22, 0.8, 0.98), new=TRUE) + mtext("NAO+", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0, 0.22, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecast month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.freq.colors[i2,1+6-j,1])}} + + par(mar=c(0,0,4,0), fig=c(0.22, 0.44, 0.8, 0.98), new=TRUE) + mtext("NAO-", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.22, 0.44, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecasted month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.freq.colors[i2,1+6-j,3])}} + + par(mar=c(0,0,4,0), fig=c(0.44, 0.66, 0.8, 0.98), new=TRUE) + mtext("Blocking", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.44, 0.66, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecasted month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.freq.colors[i2,1+6-j,4])}} + + par(mar=c(0,0,4,0), fig=c(0.68, 0.88, 0.8, 0.98), new=TRUE) + mtext("Atlantic Ridge", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.66, 0.88, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecasted month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.freq.colors[i2,1+6-j,2])}} + + par(fig=c(0.91, 1, 0.1, 0.8), new=TRUE) + ColorBar2(brks = my.seq, cols = freq.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + + + + + + + # Delta freq. summary: + png(filename=paste0(workdir,"/",forecast.name,"_summary_diff_freq.png"), width=550, height=400) + + # create an array similar to array.diff.freq but with colors instead of frequencies: + diff.freq.cols <- rev(c('#d73027','#f46d43','#fdae61','#fee090','#e0f3f8','#abd9e9','#74add1','#4575b4')) + my.seq <- c(-20,-10,-5,-2,0,2,5,10,20) + + array.diff.freq.colors <- array(diff.freq.cols[8],c(12,7,4)) + array.diff.freq.colors[array.diff.freq < my.seq[[8]]] <- diff.freq.cols[7] + array.diff.freq.colors[array.diff.freq 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.diff.freq.colors[i2,1+6-j,1])}} + + par(mar=c(0,0,4,0), fig=c(0.22, 0.44, 0.8, 0.98), new=TRUE) + mtext("NAO-", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.22, 0.44, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecasted month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.diff.freq.colors[i2,1+6-j,3])}} + + par(mar=c(0,0,4,0), fig=c(0.44, 0.66, 0.8, 0.98), new=TRUE) + mtext("Blocking", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.44, 0.66, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecasted month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.diff.freq.colors[i2,1+6-j,4])}} + + par(mar=c(0,0,4,0), fig=c(0.68, 0.88, 0.8, 0.98), new=TRUE) + mtext("Atlantic Ridge", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.66, 0.88, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecasted month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.diff.freq.colors[i2,1+6-j,2])}} + + par(fig=c(0.91, 1, 0.1, 0.8), new=TRUE) + ColorBar2(brks = my.seq, cols = diff.freq.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + + + + + + + + # Persistence summary: + png(filename=paste0(workdir,"/",forecast.name,"_summary_pers.png"), width=550, height=400) + + # create an array similar to array.pers but with colors instead of frequencies: + pers.cols <- rev(c('#b2182b','#d6604d','#f4a582','#fddbc7','#d1e5f0','#92c5de','#4393c3','#2166ac')) + my.seq <- c(NA, 3.25, 3.5, 3.75, 4, 4.25, 4.5, 4.75, NA) + + array.pers.colors <- array(pers.cols[8],c(12,7,4)) + array.pers.colors[array.pers < my.seq[8]] <- pers.cols[7] + array.pers.colors[array.pers < my.seq[7]] <- pers.cols[6] + array.pers.colors[array.pers < my.seq[6]] <- pers.cols[5] + array.pers.colors[array.pers < my.seq[5]] <- pers.cols[4] + array.pers.colors[array.pers < my.seq[4]] <- pers.cols[3] + array.pers.colors[array.pers < my.seq[3]] <- pers.cols[2] + array.pers.colors[array.pers < my.seq[2]] <- pers.cols[1] + + par(mar=c(5,4,4,4.5)) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Forecast time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + title("S4 Regime persistence (in days/month)", cex=1.5) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + + for(p in 1:12){ + for(l in 0:6){ + polygon(c(0.5+p-1,0.5+p-1,1+p-1),c(-0.5+l,0.5+l,0+l), col=array.pers.colors[p,1+l,1]) # first triangle + polygon(c(0.5+p-1,1+p-1,1.5+p-1),c(-0.5+l,0+l,-0.5+l), col=array.pers.colors[p,1+l,2]) + polygon(c(1+p-1,1.5+p-1,1.5+p-1),c(l,0.5+l,-0.5+l), col=array.pers.colors[p,1+l,3]) + polygon(c(0.5+p-1,1+p-1,1.5+p-1),c(0.5+l,0+l,0.5+l), col=array.pers.colors[p,1+l,4]) + } + } + + par(fig=c(0.875, 1, 0.14, 0.89), new=TRUE) + ColorBar2(brks = my.seq, cols = pers.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + dev.off() + + png(filename=paste0(workdir,"/",forecast.name,"_summary_pers_4tables.png"), width=750, height=600) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("S4 Regime persistence (in days/month)", cex=1.5) + + # NAO+ : + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.85, 0.98), new=TRUE) + mtext("NAO+", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.pers.colors[p,1+l,1])}} + + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.42, 0.5), new=TRUE) + mtext("Blocking", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.pers.colors[p,1+l,4])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.85, 0.98), new=TRUE) + mtext("NAO-", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.pers.colors[p,1+l,3])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.42, 0.5), new=TRUE) + mtext("Atlantic Ridge", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.pers.colors[p,1+l,2])}} + + par(fig=c(0.91, 1, 0.1, 0.9), new=TRUE) + ColorBar2(brks = my.seq, cols = pers.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + png(filename=paste0(workdir,"/",forecast.name,"_summary_pers_4tables.png"), width=750, height=600) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("S4 Regime persistence (in days/month)", cex=1.5) + + # NAO+ : + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.85, 0.98), new=TRUE) + mtext("NAO+", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.pers.colors[p,1+l,1])}} + + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.42, 0.5), new=TRUE) + mtext("Blocking", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.pers.colors[p,1+l,4])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.85, 0.98), new=TRUE) + mtext("NAO-", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.pers.colors[p,1+l,3])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.42, 0.5), new=TRUE) + mtext("Atlantic Ridge", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.pers.colors[p,1+l,2])}} + + par(fig=c(0.91, 1, 0.1, 0.9), new=TRUE) + ColorBar2(brks = my.seq, cols = pers.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + png(filename=paste0(workdir,"/",forecast.name,"_summary_pers_1table.png"), width=800, height=300) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("S4 Regime persistence (in days/month)", cex=1.2) + + par(mar=c(0,0,4,0), fig=c(0.07, 0.22, 0.8, 0.98), new=TRUE) + mtext("NAO+", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0, 0.22, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecast month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.pers.colors[i2,1+6-j,1])}} + + par(mar=c(0,0,4,0), fig=c(0.22, 0.44, 0.8, 0.98), new=TRUE) + mtext("NAO-", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.22, 0.44, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecasted month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.pers.colors[i2,1+6-j,3])}} + + par(mar=c(0,0,4,0), fig=c(0.44, 0.66, 0.8, 0.98), new=TRUE) + mtext("Blocking", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.44, 0.66, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecasted month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.pers.colors[i2,1+6-j,4])}} + + par(mar=c(0,0,4,0), fig=c(0.68, 0.88, 0.8, 0.98), new=TRUE) + mtext("Atlantic Ridge", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.66, 0.88, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecasted month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.pers.colors[i2,1+6-j,2])}} + + par(fig=c(0.91, 1, 0.1, 0.8), new=TRUE) + ColorBar2(brks = my.seq, cols = pers.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + + + + + + + + # Delta persistence summary: + png(filename=paste0(workdir,"/",forecast.name,"_summary_diff_pers.png"), width=550, height=400) + + # create an array similar to array.pers but with colors instead of frequencies: + diff.pers.cols <- rev(c('#b2182b','#d6604d','#f4a582','#fddbc7','#d1e5f0','#92c5de','#4393c3','#2166ac')) + my.seq <- c(NA, -1.5, -1, -0.5, 0, 0.5, 1, 1.5, NA) + + array.diff.pers.colors <- array(diff.pers.cols[8],c(12,7,4)) + array.diff.pers.colors[array.diff.pers < my.seq[8]] <- diff.pers.cols[7] + array.diff.pers.colors[array.diff.pers < my.seq[7]] <- diff.pers.cols[6] + array.diff.pers.colors[array.diff.pers < my.seq[6]] <- diff.pers.cols[5] + array.diff.pers.colors[array.diff.pers < my.seq[5]] <- diff.pers.cols[4] + array.diff.pers.colors[array.diff.pers < my.seq[4]] <- diff.pers.cols[3] + array.diff.pers.colors[array.diff.pers < my.seq[3]] <- diff.pers.cols[2] + array.diff.pers.colors[array.diff.pers < my.seq[2]] <- diff.pers.cols[1] + + par(mar=c(5,4,4,4.5)) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Forecast time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + title("Diff. between S4 and ERA-Interim regime persistence (in days/month)", cex=1.5) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + + for(p in 1:12){ + for(l in 0:6){ + polygon(c(0.5+p-1,0.5+p-1,1+p-1),c(-0.5+l,0.5+l,0+l), col=array.diff.pers.colors[p,1+l,1]) # first triangle + polygon(c(0.5+p-1,1+p-1,1.5+p-1),c(-0.5+l,0+l,-0.5+l), col=array.diff.pers.colors[p,1+l,2]) + polygon(c(1+p-1,1.5+p-1,1.5+p-1),c(l,0.5+l,-0.5+l), col=array.diff.pers.colors[p,1+l,3]) + polygon(c(0.5+p-1,1+p-1,1.5+p-1),c(0.5+l,0+l,0.5+l), col=array.diff.pers.colors[p,1+l,4]) + } + } + + par(fig=c(0.875, 1, 0.14, 0.89), new=TRUE) + ColorBar2(brks = my.seq, cols = diff.pers.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + dev.off() + + png(filename=paste0(workdir,"/",forecast.name,"_summary_diff_pers_4tables.png"), width=750, height=600) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("Diff. between S4 and ERA-Interim regime persistence (in days/month)", cex=1.5) + + # NAO+ : + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.85, 0.98), new=TRUE) + mtext("NAO+", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.diff.pers.colors[p,1+l,1])}} + + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.42, 0.5), new=TRUE) + mtext("Blocking", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.diff.pers.colors[p,1+l,4])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.85, 0.98), new=TRUE) + mtext("NAO-", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.diff.pers.colors[p,1+l,3])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.42, 0.5), new=TRUE) + mtext("Atlantic Ridge", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.diff.pers.colors[p,1+l,2])}} + + par(fig=c(0.91, 1, 0.1, 0.9), new=TRUE) + ColorBar2(brks = my.seq, cols = diff.pers.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + png(filename=paste0(workdir,"/",forecast.name,"_summary_diff_pers_1table.png"), width=800, height=300) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("Diff. between S4 and ERA-Interim regime persistence (in days/month)", cex=1.2) + + par(mar=c(0,0,4,0), fig=c(0.07, 0.22, 0.8, 0.98), new=TRUE) + mtext("NAO+", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0, 0.22, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecast month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.diff.pers.colors[i2,1+6-j,1])}} + + par(mar=c(0,0,4,0), fig=c(0.22, 0.44, 0.8, 0.98), new=TRUE) + mtext("NAO-", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.22, 0.44, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecasted month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.diff.pers.colors[i2,1+6-j,3])}} + + par(mar=c(0,0,4,0), fig=c(0.44, 0.66, 0.8, 0.98), new=TRUE) + mtext("Blocking", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.44, 0.66, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecasted month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.diff.pers.colors[i2,1+6-j,4])}} + + par(mar=c(0,0,4,0), fig=c(0.68, 0.88, 0.8, 0.98), new=TRUE) + mtext("Atlantic Ridge", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.66, 0.88, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecasted month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.diff.pers.colors[i2,1+6-j,2])}} + + par(fig=c(0.91, 1, 0.1, 0.8), new=TRUE) + ColorBar2(brks = my.seq, cols = diff.pers.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + + + + + + + + + # St.Dev.freq ratio summary: + png(filename=paste0(workdir,"/",forecast.name,"_summary_ratio_sd_freq.png"), width=550, height=400) + + # create an array similar to array.cor but with colors instead of corr.values: + diff.sd.freq.cols <- rev(c('#b2182b','#d6604d','#f4a582','#fddbc7','#d1e5f0','#92c5de','#4393c3','#2166ac')) + my.seq <- c(0,0.7,0.8,0.9,1.0,1.1,1.2,1.3,2) + + array.diff.sd.freq.colors <- array(diff.sd.freq.cols[8],c(12,7,4)) + array.diff.sd.freq.colors[array.diff.sd.freq < my.seq[8]] <- diff.sd.freq.cols[7] + array.diff.sd.freq.colors[array.diff.sd.freq < my.seq[7]] <- diff.sd.freq.cols[6] + array.diff.sd.freq.colors[array.diff.sd.freq < my.seq[6]] <- diff.sd.freq.cols[5] + array.diff.sd.freq.colors[array.diff.sd.freq < my.seq[5]] <- diff.sd.freq.cols[4] + array.diff.sd.freq.colors[array.diff.sd.freq < my.seq[4]] <- diff.sd.freq.cols[3] + array.diff.sd.freq.colors[array.diff.sd.freq < my.seq[3]] <- diff.sd.freq.cols[2] + array.diff.sd.freq.colors[array.diff.sd.freq < my.seq[2]] <- diff.sd.freq.cols[1] + + par(mar=c(5,4,4,4.5)) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Forecast time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + title("St.Dev. ratio between sim. and obs. average frequencies", cex=1.5) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + + for(p in 1:12){ + for(l in 0:6){ + polygon(c(0.5+p-1, 0.5+p-1, 1+p-1), c(-0.5+l, 0.5+l, 0+l), col=array.diff.sd.freq.colors[p,1+l,1]) # first triangle + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(-0.5+l, 0+l, -0.5+l), col=array.diff.sd.freq.colors[p,1+l,2]) + polygon(c(1+p-1, 1.5+p-1, 1.5+p-1), c(l, 0.5+l, -0.5+l), col=array.diff.sd.freq.colors[p,1+l,3]) + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(0.5+l, 0+l, 0.5+l), col=array.diff.sd.freq.colors[p,1+l,4]) + } + } + + par(fig=c(0.875, 1, 0.14, 0.89), new=TRUE) + ColorBar2(brks = my.seq, cols = diff.sd.freq.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + dev.off() + + + png(filename=paste0(workdir,"/",forecast.name,"_summary_ratio_sd_freq_4tables.png"), width=750, height=600) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("St.Dev. ratio between sim. and obs. average frequencies", cex=1.5) + + # NAO+ : + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.85, 0.98), new=TRUE) + mtext("NAO+", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.diff.sd.freq.colors[p,1+l,1])}} + + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.42, 0.5), new=TRUE) + mtext("Blocking", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.diff.sd.freq.colors[p,1+l,4])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.85, 0.98), new=TRUE) + mtext("NAO-", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.diff.sd.freq.colors[p,1+l,3])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.42, 0.5), new=TRUE) + mtext("Atlantic Ridge", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.diff.sd.freq.colors[p,1+l,2])}} + + par(fig=c(0.91, 1, 0.1, 0.9), new=TRUE) + ColorBar2(brks = my.seq, cols = diff.sd.freq.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + png(filename=paste0(workdir,"/",forecast.name,"_summary_ratio_sd_freq_1table.png"), width=800, height=300) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("St.Dev. ratio between sim. and obs. average frequencies", cex=1.2) + + par(mar=c(0,0,4,0), fig=c(0.07, 0.22, 0.8, 0.98), new=TRUE) + mtext("NAO+", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0, 0.22, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecast month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.diff.sd.freq.colors[i2,1+6-j,1])}} + + par(mar=c(0,0,4,0), fig=c(0.22, 0.44, 0.8, 0.98), new=TRUE) + mtext("NAO-", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.22, 0.44, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecasted month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.diff.sd.freq.colors[i2,1+6-j,3])}} + + par(mar=c(0,0,4,0), fig=c(0.44, 0.66, 0.8, 0.98), new=TRUE) + mtext("Blocking", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.44, 0.66, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecasted month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.diff.sd.freq.colors[i2,1+6-j,4])}} + + par(mar=c(0,0,4,0), fig=c(0.68, 0.88, 0.8, 0.98), new=TRUE) + mtext("Atlantic Ridge", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.66, 0.88, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecasted month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.diff.sd.freq.colors[i2,1+6-j,2])}} + + par(fig=c(0.91, 1, 0.1, 0.8), new=TRUE) + ColorBar2(brks = my.seq, cols = diff.sd.freq.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + + + + + + + + + + + # Transition probability from NAO+ to X summary: + png(filename=paste0(workdir,"/",forecast.name,"_summary_trans_NAO+.png"), width=550, height=400) + + # create an array similar to array.cor but with colors instead of corr.values: + trans.cols <- rev(c('#b2182b','#d6604d','#f4a582','#fddbc7','#d1e5f0','#92c5de','#4393c3','#2166ac')) + my.seq <- c(0,10,20,30,40,50,60,70,100) + + array.trans.sim1.colors <- array(trans.cols[8],c(12,7,4)) + array.trans.sim1.colors[array.trans.sim1 < my.seq[8]] <- trans.cols[7] + array.trans.sim1.colors[array.trans.sim1 < my.seq[7]] <- trans.cols[6] + array.trans.sim1.colors[array.trans.sim1 < my.seq[6]] <- trans.cols[5] + array.trans.sim1.colors[array.trans.sim1 < my.seq[5]] <- trans.cols[4] + array.trans.sim1.colors[array.trans.sim1 < my.seq[4]] <- trans.cols[3] + array.trans.sim1.colors[array.trans.sim1 < my.seq[3]] <- trans.cols[2] + array.trans.sim1.colors[array.trans.sim1 < my.seq[2]] <- trans.cols[1] + array.trans.sim1.colors[is.na(array.trans.sim1)] <- "white" + + par(mar=c(5,4,4,4.5)) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Forecast time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + title("% Transition from NAO+ regime", cex=1.5) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + + for(p in 1:12){ + for(l in 0:6){ + polygon(c(0.5+p-1, 0.5+p-1, 1+p-1), c(-0.5+l, 0.5+l, 0+l), col=array.trans.sim1.colors[p,1+l,1]) # first triangle + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(-0.5+l, 0+l, -0.5+l), col=array.trans.sim1.colors[p,1+l,2]) + polygon(c(1+p-1, 1.5+p-1, 1.5+p-1), c(l, 0.5+l, -0.5+l), col=array.trans.sim1.colors[p,1+l,3]) + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(0.5+l, 0+l, 0.5+l), col=array.trans.sim1.colors[p,1+l,4]) + } + } + + par(fig=c(0.875, 1, 0.14, 0.89), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + dev.off() + + png(filename=paste0(workdir,"/",forecast.name,"_summary_trans_NAO+_4tables.png"), width=750, height=600) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("% Transition from NAO+ regime", cex=1.5) + + # NAO+ : + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.85, 0.98), new=TRUE) + mtext("NAO+", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.sim1.colors[p,1+l,1])}} + + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.42, 0.5), new=TRUE) + mtext("Blocking", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.sim1.colors[p,1+l,4])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.85, 0.98), new=TRUE) + mtext("NAO-", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.sim1.colors[p,1+l,3])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.42, 0.5), new=TRUE) + mtext("Atlantic Ridge", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.sim1.colors[p,1+l,2])}} + + par(fig=c(0.91, 1, 0.1, 0.9), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + png(filename=paste0(workdir,"/",forecast.name,"_summary_trans_NAO+_1table.png"), width=800, height=300) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("% Transition from NAO+ regime", cex=1.2) + + par(mar=c(0,0,4,0), fig=c(0.07, 0.22, 0.8, 0.98), new=TRUE) + mtext("NAO+", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0, 0.22, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecast month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.sim1.colors[i2,1+6-j,1])}} + + par(mar=c(0,0,4,0), fig=c(0.22, 0.44, 0.8, 0.98), new=TRUE) + mtext("NAO-", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.22, 0.44, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecasted month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.sim1.colors[i2,1+6-j,3])}} + + par(mar=c(0,0,4,0), fig=c(0.44, 0.66, 0.8, 0.98), new=TRUE) + mtext("Blocking", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.44, 0.66, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecasted month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.sim1.colors[i2,1+6-j,4])}} + + par(mar=c(0,0,4,0), fig=c(0.68, 0.88, 0.8, 0.98), new=TRUE) + mtext("Atlantic Ridge", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.66, 0.88, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecasted month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.sim1.colors[i2,1+6-j,2])}} + + par(fig=c(0.91, 1, 0.1, 0.8), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + png(filename=paste0(workdir,"/",forecast.name,"_summary_trans_NAO+_1table.png"), width=600, height=300) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("% Transition from NAO+ regime", cex=1.2) + + par(mar=c(0,0,4,0), fig=c(0.10, 0.28, 0.8, 0.98), new=TRUE) + mtext("NAO-", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0, 0.28, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecasted month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.sim1.colors[i2,1+6-j,3])}} + + par(mar=c(0,0,4,0), fig=c(0.28, 0.56, 0.8, 0.98), new=TRUE) + mtext("Blocking", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.28, 0.56, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecasted month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.sim1.colors[i2,1+6-j,4])}} + + par(mar=c(0,0,4,0), fig=c(0.56, 0.84, 0.8, 0.98), new=TRUE) + mtext("Atlantic Ridge", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.56, 0.84, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecasted month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.sim1.colors[i2,1+6-j,2])}} + + par(fig=c(0.88, 1, 0.1, 0.8), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + + + + + + + + # Transition probability from NAO- to X summary: + png(filename=paste0(workdir,"/",forecast.name,"_summary_trans_NAO-.png"), width=550, height=400) + + # create an array similar to array.cor but with colors instead of corr.values: + trans.cols <- rev(c('#b2182b','#d6604d','#f4a582','#fddbc7','#d1e5f0','#92c5de','#4393c3','#2166ac')) + my.seq <- c(0,10,20,30,40,50,60,70,100) + + array.trans.sim2.colors <- array(trans.cols[8],c(12,7,4)) + array.trans.sim2.colors[array.trans.sim2 < my.seq[8]] <- trans.cols[7] + array.trans.sim2.colors[array.trans.sim2 < my.seq[7]] <- trans.cols[6] + array.trans.sim2.colors[array.trans.sim2 < my.seq[6]] <- trans.cols[5] + array.trans.sim2.colors[array.trans.sim2 < my.seq[5]] <- trans.cols[4] + array.trans.sim2.colors[array.trans.sim2 < my.seq[4]] <- trans.cols[3] + array.trans.sim2.colors[array.trans.sim2 < my.seq[3]] <- trans.cols[2] + array.trans.sim2.colors[array.trans.sim2 < my.seq[2]] <- trans.cols[1] + array.trans.sim2.colors[is.na(array.trans.sim2)] <- "white" + + par(mar=c(5,4,4,4.5)) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Forecast time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + title("% Transition from NAO- regime ", cex=1.5) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + + for(p in 1:12){ + for(l in 0:6){ + polygon(c(0.5+p-1, 0.5+p-1, 1+p-1), c(-0.5+l, 0.5+l, 0+l), col=array.trans.sim2.colors[p,1+l,1]) # first triangle + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(-0.5+l, 0+l, -0.5+l), col=array.trans.sim2.colors[p,1+l,2]) + polygon(c(1+p-1, 1.5+p-1, 1.5+p-1), c(l, 0.5+l, -0.5+l), col=array.trans.sim2.colors[p,1+l,3]) + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(0.5+l, 0+l, 0.5+l), col=array.trans.sim2.colors[p,1+l,4]) + } + } + + par(fig=c(0.875, 1, 0.14, 0.89), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + dev.off() + + png(filename=paste0(workdir,"/",forecast.name,"_summary_trans_NAO-_4tables.png"), width=750, height=600) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("% Transition from NAO- regime", cex=1.5) + + # NAO+ : + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.85, 0.98), new=TRUE) + mtext("NAO+", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.sim2.colors[p,1+l,1])}} + + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.42, 0.5), new=TRUE) + mtext("Blocking", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.sim2.colors[p,1+l,4])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.85, 0.98), new=TRUE) + mtext("NAO-", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.sim2.colors[p,1+l,3])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.42, 0.5), new=TRUE) + mtext("Atlantic Ridge", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.sim2.colors[p,1+l,2])}} + + par(fig=c(0.91, 1, 0.1, 0.9), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + png(filename=paste0(workdir,"/",forecast.name,"_summary_trans_NAO-_1table.png"), width=600, height=300) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("% Transition from NAO- regime", cex=1.2) + + par(mar=c(0,0,4,0), fig=c(0.1, 0.28, 0.8, 0.98), new=TRUE) + mtext("NAO+", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0, 0.28, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecast month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.sim2.colors[i2,1+6-j,1])}} + + par(mar=c(0,0,4,0), fig=c(0.28, 0.56, 0.8, 0.98), new=TRUE) + mtext("Blocking", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.28, 0.56, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecasted month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.sim2.colors[i2,1+6-j,4])}} + + par(mar=c(0,0,4,0), fig=c(0.56, 0.84, 0.8, 0.98), new=TRUE) + mtext("Atlantic Ridge", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.56, 0.84, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecasted month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.sim2.colors[i2,1+6-j,2])}} + + par(fig=c(0.88, 1, 0.1, 0.8), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + + + + + + + + # Transition probability from blocking to X summary: + png(filename=paste0(workdir,"/",forecast.name,"_summary_trans_blocking.png"), width=550, height=400) + + # create an array similar to array.cor but with colors instead of corr.values: + trans.cols <- rev(c('#b2182b','#d6604d','#f4a582','#fddbc7','#d1e5f0','#92c5de','#4393c3','#2166ac')) + my.seq <- c(0,10,20,30,40,50,60,70,100) + + array.trans.sim3.colors <- array(trans.cols[8],c(12,7,4)) + array.trans.sim3.colors[array.trans.sim3 < my.seq[8]] <- trans.cols[7] + array.trans.sim3.colors[array.trans.sim3 < my.seq[7]] <- trans.cols[6] + array.trans.sim3.colors[array.trans.sim3 < my.seq[6]] <- trans.cols[5] + array.trans.sim3.colors[array.trans.sim3 < my.seq[5]] <- trans.cols[4] + array.trans.sim3.colors[array.trans.sim3 < my.seq[4]] <- trans.cols[3] + array.trans.sim3.colors[array.trans.sim3 < my.seq[3]] <- trans.cols[2] + array.trans.sim3.colors[array.trans.sim3 < my.seq[2]] <- trans.cols[1] + array.trans.sim3.colors[is.na(array.trans.sim3)] <- "white" + + par(mar=c(5,4,4,4.5)) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Forecast time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + title("% Transition from blocking regime", cex=1.5) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + + for(p in 1:12){ + for(l in 0:6){ + polygon(c(0.5+p-1, 0.5+p-1, 1+p-1), c(-0.5+l, 0.5+l, 0+l), col=array.trans.sim3.colors[p,1+l,1]) # first triangle + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(-0.5+l, 0+l, -0.5+l), col=array.trans.sim3.colors[p,1+l,2]) + polygon(c(1+p-1, 1.5+p-1, 1.5+p-1), c(l, 0.5+l, -0.5+l), col=array.trans.sim3.colors[p,1+l,3]) + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(0.5+l, 0+l, 0.5+l), col=array.trans.sim3.colors[p,1+l,4]) + } + } + + par(fig=c(0.875, 1, 0.14, 0.89), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + dev.off() + + png(filename=paste0(workdir,"/",forecast.name,"_summary_trans_blocking_4tables.png"), width=750, height=600) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("% Transition from blocking regime", cex=1.5) + + # NAO+ : + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.85, 0.98), new=TRUE) + mtext("NAO+", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.sim3.colors[p,1+l,1])}} + + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.42, 0.5), new=TRUE) + mtext("Blocking", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.sim3.colors[p,1+l,4])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.85, 0.98), new=TRUE) + mtext("NAO-", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.sim3.colors[p,1+l,3])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.42, 0.5), new=TRUE) + mtext("Atlantic Ridge", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.sim3.colors[p,1+l,2])}} + + par(fig=c(0.91, 1, 0.1, 0.9), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + png(filename=paste0(workdir,"/",forecast.name,"_summary_trans_blocking_1table.png"), width=600, height=300) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("% Transition from blocking regime", cex=1.2) + + par(mar=c(0,0,4,0), fig=c(0.10, 0.28, 0.8, 0.98), new=TRUE) + mtext("NAO+", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0, 0.28, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecast month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.sim3.colors[i2,1+6-j,1])}} + + par(mar=c(0,0,4,0), fig=c(0.28, 0.56, 0.8, 0.98), new=TRUE) + mtext("NAO-", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.28, 0.56, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecasted month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.sim3.colors[i2,1+6-j,3])}} + + par(mar=c(0,0,4,0), fig=c(0.56, 0.84, 0.8, 0.98), new=TRUE) + mtext("Atlantic Ridge", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.56, 0.84, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecasted month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.sim3.colors[i2,1+6-j,2])}} + + par(fig=c(0.88, 1, 0.1, 0.8), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + + + + + + + + + + + # Transition probability from Atlantic ridge to X summary: + png(filename=paste0(workdir,"/",forecast.name,"_summary_trans_Atlantic_ridge.png"), width=550, height=400) + + # create an array similar to array.cor but with colors instead of corr.values: + trans.cols <- rev(c('#b2182b','#d6604d','#f4a582','#fddbc7','#d1e5f0','#92c5de','#4393c3','#2166ac')) + my.seq <- c(0,10,20,30,40,50,60,70,100) + + array.trans.sim4.colors <- array(trans.cols[8],c(12,7,4)) + array.trans.sim4.colors[array.trans.sim4 < my.seq[8]] <- trans.cols[7] + array.trans.sim4.colors[array.trans.sim4 < my.seq[7]] <- trans.cols[6] + array.trans.sim4.colors[array.trans.sim4 < my.seq[6]] <- trans.cols[5] + array.trans.sim4.colors[array.trans.sim4 < my.seq[5]] <- trans.cols[4] + array.trans.sim4.colors[array.trans.sim4 < my.seq[4]] <- trans.cols[3] + array.trans.sim4.colors[array.trans.sim4 < my.seq[3]] <- trans.cols[2] + array.trans.sim4.colors[array.trans.sim4 < my.seq[2]] <- trans.cols[1] + array.trans.sim4.colors[is.na(array.trans.sim4)] <- "white" + + par(mar=c(5,4,4,4.5)) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Forecast time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + title("% Transition from Atlantic ridge regime ", cex=1.5) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + + for(p in 1:12){ + for(l in 0:6){ + polygon(c(0.5+p-1, 0.5+p-1, 1+p-1), c(-0.5+l, 0.5+l, 0+l), col=array.trans.sim4.colors[p,1+l,1]) # first triangle + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(-0.5+l, 0+l, -0.5+l), col=array.trans.sim4.colors[p,1+l,2]) + polygon(c(1+p-1, 1.5+p-1, 1.5+p-1), c(l, 0.5+l, -0.5+l), col=array.trans.sim4.colors[p,1+l,3]) + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(0.5+l, 0+l, 0.5+l), col=array.trans.sim4.colors[p,1+l,4]) + } + } + + par(fig=c(0.875, 1, 0.14, 0.89), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + dev.off() + + png(filename=paste0(workdir,"/",forecast.name,"_summary_trans_Atlantic_ridge_4tables.png"), width=750, height=600) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("% Transition from Atlantic Ridge regime", cex=1.5) + + # NAO+ : + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.85, 0.98), new=TRUE) + mtext("NAO+", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.sim4.colors[p,1+l,1])}} + + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.42, 0.5), new=TRUE) + mtext("Blocking", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.sim4.colors[p,1+l,4])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.85, 0.98), new=TRUE) + mtext("NAO-", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.sim4.colors[p,1+l,3])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.42, 0.5), new=TRUE) + mtext("Atlantic Ridge", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.sim4.colors[p,1+l,2])}} + + par(fig=c(0.91, 1, 0.1, 0.9), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + png(filename=paste0(workdir,"/",forecast.name,"_summary_trans_Atlantic_ridge_1table.png"), width=600, height=300) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("% Transition from Atlantic Ridge regime", cex=1.2) + + par(mar=c(0,0,4,0), fig=c(0.1, 0.28, 0.8, 0.98), new=TRUE) + mtext("NAO+", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0, 0.28, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecast month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.sim4.colors[i2,1+6-j,1])}} + + par(mar=c(0,0,4,0), fig=c(0.28, 0.56, 0.8, 0.98), new=TRUE) + mtext("NAO-", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.28, 0.56, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecasted month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.sim4.colors[i2,1+6-j,3])}} + + par(mar=c(0,0,4,0), fig=c(0.56, 0.84, 0.8, 0.98), new=TRUE) + mtext("Blocking", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.56, 0.84, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecasted month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.sim4.colors[i2,1+6-j,4])}} + + par(fig=c(0.88, 1, 0.1, 0.8), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + + + + + + + + # Bias of the Transition probability from NAO+ to X summary: + png(filename=paste0(workdir,"/",forecast.name,"_summary_bias_trans_NAO+.png"), width=550, height=400) + + # create an array similar to array.cor but with colors instead of corr.values: + trans.cols <- rev(c('#b2182b','#d6604d','#f4a582','#fddbc7','#d1e5f0','#92c5de','#4393c3','#2166ac')) + my.seq <- c(-20,-10,-5,-2,0,2,5,10,20) + + array.trans.diff1.colors <- array(trans.cols[8],c(12,7,4)) + array.trans.diff1.colors[array.trans.diff1 < my.seq[8]] <- trans.cols[7] + array.trans.diff1.colors[array.trans.diff1 < my.seq[7]] <- trans.cols[6] + array.trans.diff1.colors[array.trans.diff1 < my.seq[6]] <- trans.cols[5] + array.trans.diff1.colors[array.trans.diff1 < my.seq[5]] <- trans.cols[4] + array.trans.diff1.colors[array.trans.diff1 < my.seq[4]] <- trans.cols[3] + array.trans.diff1.colors[array.trans.diff1 < my.seq[3]] <- trans.cols[2] + array.trans.diff1.colors[array.trans.diff1 < my.seq[2]] <- trans.cols[1] + array.trans.diff1.colors[is.na(array.trans.diff1)] <- "white" + + par(mar=c(5,4,4,4.5)) + plot(1,1, type="n", xaxt="n", yaxt="n", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + title("% Bias transition from NAO+ regime", cex=1.5) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + + for(p in 1:12){ + for(l in 0:6){ + polygon(c(0.5+p-1, 0.5+p-1, 1+p-1), c(-0.5+l, 0.5+l, 0+l), col=array.trans.diff1.colors[p,1+l,1]) # first triangle + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(-0.5+l, 0+l, -0.5+l), col=array.trans.diff1.colors[p,1+l,2]) + polygon(c(1+p-1, 1.5+p-1, 1.5+p-1), c(l, 0.5+l, -0.5+l), col=array.trans.diff1.colors[p,1+l,3]) + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(0.5+l, 0+l, 0.5+l), col=array.trans.diff1.colors[p,1+l,4]) + } + } + + par(fig=c(0.875, 1, 0.14, 0.89), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + dev.off() + + png(filename=paste0(workdir,"/",forecast.name,"_summary_bias_trans_NAO+_4tables.png"), width=750, height=600) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("% Bias transition from NAO+ regime", cex=1.5) + + # NAO+ : + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.85, 0.98), new=TRUE) + mtext("NAO+", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.diff1.colors[p,1+l,1])}} + + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.42, 0.5), new=TRUE) + mtext("Blocking", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.diff1.colors[p,1+l,4])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.85, 0.98), new=TRUE) + mtext("NAO-", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.diff1.colors[p,1+l,3])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.42, 0.5), new=TRUE) + mtext("Atlantic Ridge", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.diff1.colors[p,1+l,2])}} + + par(fig=c(0.91, 1, 0.1, 0.9), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + png(filename=paste0(workdir,"/",forecast.name,"_summary_bias_trans_NAO+_1table.png"), width=800, height=300) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("% Bias transition from NAO+ regime", cex=1.2) + + par(mar=c(0,0,4,0), fig=c(0.07, 0.22, 0.8, 0.98), new=TRUE) + mtext("NAO+", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0, 0.22, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecast month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.diff1.colors[i2,1+6-j,1])}} + + par(mar=c(0,0,4,0), fig=c(0.22, 0.44, 0.8, 0.98), new=TRUE) + mtext("NAO-", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.22, 0.44, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecasted month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.diff1.colors[i2,1+6-j,3])}} + + par(mar=c(0,0,4,0), fig=c(0.44, 0.66, 0.8, 0.98), new=TRUE) + mtext("Blocking", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.44, 0.66, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecasted month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.diff1.colors[i2,1+6-j,4])}} + + par(mar=c(0,0,4,0), fig=c(0.68, 0.88, 0.8, 0.98), new=TRUE) + mtext("Atlantic Ridge", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.66, 0.88, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecasted month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.diff1.colors[i2,1+6-j,2])}} + + par(fig=c(0.91, 1, 0.1, 0.8), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + + + + + + + + + + + + + + + # Bias of the Transition probability from NAO- to X summary: + png(filename=paste0(workdir,"/",forecast.name,"_summary_bias_trans_NAO-.png"), width=550, height=400) + + # create an array similar to array.cor but with colors instead of corr.values: + trans.cols <- rev(c('#b2182b','#d6604d','#f4a582','#fddbc7','#d1e5f0','#92c5de','#4393c3','#2166ac')) + my.seq <- c(-20,-10,-5,-2,0,2,5,10,20) + + array.trans.diff2.colors <- array(trans.cols[8],c(12,7,4)) + array.trans.diff2.colors[array.trans.diff2 < my.seq[8]] <- trans.cols[7] + array.trans.diff2.colors[array.trans.diff2 < my.seq[7]] <- trans.cols[6] + array.trans.diff2.colors[array.trans.diff2 < my.seq[6]] <- trans.cols[5] + array.trans.diff2.colors[array.trans.diff2 < my.seq[5]] <- trans.cols[4] + array.trans.diff2.colors[array.trans.diff2 < my.seq[4]] <- trans.cols[3] + array.trans.diff2.colors[array.trans.diff2 < my.seq[3]] <- trans.cols[2] + array.trans.diff2.colors[array.trans.diff2 < my.seq[2]] <- trans.cols[1] + array.trans.diff2.colors[is.na(array.trans.diff2)] <- "white" + + par(mar=c(5,4,4,4.5)) + plot(1,1, type="n", xaxt="n", yaxt="n", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + title("% Bias transition from NAO- regime", cex=1.5) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + + for(p in 1:12){ + for(l in 0:6){ + polygon(c(0.5+p-1, 0.5+p-1, 1+p-1), c(-0.5+l, 0.5+l, 0+l), col=array.trans.diff2.colors[p,1+l,1]) # first triangle + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(-0.5+l, 0+l, -0.5+l), col=array.trans.diff2.colors[p,1+l,2]) + polygon(c(1+p-1, 1.5+p-1, 1.5+p-1), c(l, 0.5+l, -0.5+l), col=array.trans.diff2.colors[p,1+l,3]) + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(0.5+l, 0+l, 0.5+l), col=array.trans.diff2.colors[p,1+l,4]) + } + } + + par(fig=c(0.875, 1, 0.14, 0.89), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + dev.off() + + png(filename=paste0(workdir,"/",forecast.name,"_summary_bias_trans_NAO-_4tables.png"), width=750, height=600) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("% Bias transition from NAO- regime", cex=1.5) + + # NAO+ : + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.85, 0.98), new=TRUE) + mtext("NAO+", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.diff2.colors[p,1+l,1])}} + + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.42, 0.5), new=TRUE) + mtext("Blocking", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.diff2.colors[p,1+l,4])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.85, 0.98), new=TRUE) + mtext("NAO-", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.diff2.colors[p,1+l,3])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.42, 0.5), new=TRUE) + mtext("Atlantic Ridge", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.diff2.colors[p,1+l,2])}} + + par(fig=c(0.91, 1, 0.1, 0.9), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + png(filename=paste0(workdir,"/",forecast.name,"_summary_bias_trans_NAO-_1table.png"), width=800, height=300) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("% Bias transition from NAO- regime", cex=1.2) + + par(mar=c(0,0,4,0), fig=c(0.07, 0.22, 0.8, 0.98), new=TRUE) + mtext("NAO+", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0, 0.22, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecast month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.diff2.colors[i2,1+6-j,1])}} + + par(mar=c(0,0,4,0), fig=c(0.22, 0.44, 0.8, 0.98), new=TRUE) + mtext("NAO-", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.22, 0.44, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecasted month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.diff2.colors[i2,1+6-j,3])}} + + par(mar=c(0,0,4,0), fig=c(0.44, 0.66, 0.8, 0.98), new=TRUE) + mtext("Blocking", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.44, 0.66, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecasted month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.diff2.colors[i2,1+6-j,4])}} + + par(mar=c(0,0,4,0), fig=c(0.68, 0.88, 0.8, 0.98), new=TRUE) + mtext("Atlantic Ridge", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.66, 0.88, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecasted month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.diff2.colors[i2,1+6-j,2])}} + + par(fig=c(0.91, 1, 0.1, 0.8), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + + + + + + + + + + + # Bias of the Transition probability from blocking to X summary: + png(filename=paste0(workdir,"/",forecast.name,"_summary_bias_trans_blocking.png"), width=550, height=400) + + # create an array similar to array.cor but with colors instead of corr.values: + trans.cols <- rev(c('#b2182b','#d6604d','#f4a582','#fddbc7','#d1e5f0','#92c5de','#4393c3','#2166ac')) + my.seq <- c(-20,-10,-5,-2,0,2,5,10,20) + + array.trans.diff3.colors <- array(trans.cols[8],c(12,7,4)) + array.trans.diff3.colors[array.trans.diff3 < my.seq[8]] <- trans.cols[7] + array.trans.diff3.colors[array.trans.diff3 < my.seq[7]] <- trans.cols[6] + array.trans.diff3.colors[array.trans.diff3 < my.seq[6]] <- trans.cols[5] + array.trans.diff3.colors[array.trans.diff3 < my.seq[5]] <- trans.cols[4] + array.trans.diff3.colors[array.trans.diff3 < my.seq[4]] <- trans.cols[3] + array.trans.diff3.colors[array.trans.diff3 < my.seq[3]] <- trans.cols[2] + array.trans.diff3.colors[array.trans.diff3 < my.seq[2]] <- trans.cols[1] + array.trans.diff3.colors[is.na(array.trans.diff3)] <- "white" + + par(mar=c(5,4,4,4.5)) + plot(1,1, type="n", xaxt="n", yaxt="n", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + title("% Bias transition from blocking regime", cex=1.5) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + + for(p in 1:12){ + for(l in 0:6){ + polygon(c(0.5+p-1, 0.5+p-1, 1+p-1), c(-0.5+l, 0.5+l, 0+l), col=array.trans.diff3.colors[p,1+l,1]) # first triangle + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(-0.5+l, 0+l, -0.5+l), col=array.trans.diff3.colors[p,1+l,2]) + polygon(c(1+p-1, 1.5+p-1, 1.5+p-1), c(l, 0.5+l, -0.5+l), col=array.trans.diff3.colors[p,1+l,3]) + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(0.5+l, 0+l, 0.5+l), col=array.trans.diff3.colors[p,1+l,4]) + } + } + + par(fig=c(0.875, 1, 0.14, 0.89), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + dev.off() + + png(filename=paste0(workdir,"/",forecast.name,"_summary_bias_trans_blocking_4tables.png"), width=750, height=600) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("% Bias transition from blocking regime", cex=1.5) + + # NAO+ : + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.85, 0.98), new=TRUE) + mtext("NAO+", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.diff3.colors[p,1+l,1])}} + + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.42, 0.5), new=TRUE) + mtext("Blocking", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.diff3.colors[p,1+l,4])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.85, 0.98), new=TRUE) + mtext("NAO-", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.diff3.colors[p,1+l,3])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.42, 0.5), new=TRUE) + mtext("Atlantic Ridge", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.diff3.colors[p,1+l,2])}} + + par(fig=c(0.91, 1, 0.1, 0.9), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + dev.off() + + png(filename=paste0(workdir,"/",forecast.name,"_summary_bias_trans_blocking_1table.png"), width=800, height=300) + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("% Bias transition from Blocking regime", cex=1.2) + + par(mar=c(0,0,4,0), fig=c(0.07, 0.22, 0.8, 0.98), new=TRUE) + mtext("NAO+", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0, 0.22, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecast month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.diff3.colors[i2,1+6-j,1])}} + + par(mar=c(0,0,4,0), fig=c(0.22, 0.44, 0.8, 0.98), new=TRUE) + mtext("NAO-", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.22, 0.44, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecasted month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.diff3.colors[i2,1+6-j,3])}} + + par(mar=c(0,0,4,0), fig=c(0.44, 0.66, 0.8, 0.98), new=TRUE) + mtext("Blocking", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.44, 0.66, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecasted month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.diff3.colors[i2,1+6-j,4])}} + + par(mar=c(0,0,4,0), fig=c(0.68, 0.88, 0.8, 0.98), new=TRUE) + mtext("Atlantic Ridge", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.66, 0.88, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecasted month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.diff3.colors[i2,1+6-j,2])}} + + par(fig=c(0.91, 1, 0.1, 0.8), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + + + + + + + + + + # Bias of the Transition probability from Atlantic Ridge to X summary: + png(filename=paste0(workdir,"/",forecast.name,"_summary_bias_trans_Atlantic_ridge.png"), width=550, height=400) + + # create an array similar to array.cor but with colors instead of corr.values: + trans.cols <- rev(c('#b2182b','#d6604d','#f4a582','#fddbc7','#d1e5f0','#92c5de','#4393c3','#2166ac')) + my.seq <- c(-20,-10,-5,-2,0,2,5,10,20) + + array.trans.diff4.colors <- array(trans.cols[8],c(12,7,4)) + array.trans.diff4.colors[array.trans.diff4 < my.seq[8]] <- trans.cols[7] + array.trans.diff4.colors[array.trans.diff4 < my.seq[7]] <- trans.cols[6] + array.trans.diff4.colors[array.trans.diff4 < my.seq[6]] <- trans.cols[5] + array.trans.diff4.colors[array.trans.diff4 < my.seq[5]] <- trans.cols[4] + array.trans.diff4.colors[array.trans.diff4 < my.seq[4]] <- trans.cols[3] + array.trans.diff4.colors[array.trans.diff4 < my.seq[3]] <- trans.cols[2] + array.trans.diff4.colors[array.trans.diff4 < my.seq[2]] <- trans.cols[1] + array.trans.diff4.colors[is.na(array.trans.diff4)] <- "white" + + par(mar=c(5,4,4,4.5)) + plot(1,1, type="n", xaxt="n", yaxt="n", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + title("% Bias transition from Atlantic Ridge regime", cex=1.5) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + + for(p in 1:12){ + for(l in 0:6){ + polygon(c(0.5+p-1, 0.5+p-1, 1+p-1), c(-0.5+l, 0.5+l, 0+l), col=array.trans.diff4.colors[p,1+l,1]) # first triangle + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(-0.5+l, 0+l, -0.5+l), col=array.trans.diff4.colors[p,1+l,2]) + polygon(c(1+p-1, 1.5+p-1, 1.5+p-1), c(l, 0.5+l, -0.5+l), col=array.trans.diff4.colors[p,1+l,3]) + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(0.5+l, 0+l, 0.5+l), col=array.trans.diff4.colors[p,1+l,4]) + } + } + + par(fig=c(0.875, 1, 0.14, 0.89), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + dev.off() + + png(filename=paste0(workdir,"/",forecast.name,"_summary_bias_trans_Atlantic_ridge_4tables.png"), width=750, height=600) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("% Bias transition from Atlantic_Ridge_regime", cex=1.5) + + # NAO+ : + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.85, 0.98), new=TRUE) + mtext("NAO+", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.diff4.colors[p,1+l,1])}} + + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.42, 0.5), new=TRUE) + mtext("Blocking", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.diff4.colors[p,1+l,4])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.85, 0.98), new=TRUE) + mtext("NAO-", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.diff4.colors[p,1+l,3])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.42, 0.5), new=TRUE) + mtext("Atlantic Ridge", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.diff4.colors[p,1+l,2])}} + + par(fig=c(0.91, 1, 0.1, 0.9), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + png(filename=paste0(workdir,"/",forecast.name,"_summary_bias_trans_Atlantic_ridge_1table.png"), width=800, height=300) + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("% Bias transition from Atlantic Ridge regime", cex=1.2) + + par(mar=c(0,0,4,0), fig=c(0.07, 0.22, 0.8, 0.98), new=TRUE) + mtext("NAO+", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0, 0.22, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecast month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.diff4.colors[i2,1+6-j,1])}} + + par(mar=c(0,0,4,0), fig=c(0.22, 0.44, 0.8, 0.98), new=TRUE) + mtext("NAO-", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.22, 0.44, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecasted month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.diff4.colors[i2,1+6-j,3])}} + + par(mar=c(0,0,4,0), fig=c(0.44, 0.66, 0.8, 0.98), new=TRUE) + mtext("Blocking", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.44, 0.66, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecasted month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.diff4.colors[i2,1+6-j,4])}} + + par(mar=c(0,0,4,0), fig=c(0.68, 0.88, 0.8, 0.98), new=TRUE) + mtext("Atlantic Ridge", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.66, 0.88, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecasted month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.diff4.colors[i2,1+6-j,2])}} + + par(fig=c(0.91, 1, 0.1, 0.8), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + +diagnostic.WR <- function(text=NULL, position=c(0,1,0,1), color.array){ + +} + + ## # FairRPSS summary: + ## png(filename=paste0(workdir,"/",forecast.name,"_summary_rpss.png"), width=550, height=400) + + ## # create an array similar to array.diff.freq but with colors instead of frequencies: + ## freq.cols <- rev(c('#b2182b','#d6604d','#f4a582','#fddbc7','#d1e5f0','#92c5de','#4393c3','#2166ac')) + + ## array.freq.colors <- array(freq.cols[8],c(12,7,4)) + ## array.freq.colors[array.diff.freq < 10] <- freq.cols[7] + ## array.freq.colors[array.diff.freq < 5] <- freq.cols[6] + ## array.freq.colors[array.diff.freq < 2] <- freq.cols[5] + ## array.freq.colors[array.diff.freq < 0] <- freq.cols[4] + ## array.freq.colors[array.diff.freq < -2] <- freq.cols[3] + ## array.freq.colors[array.diff.freq < -5] <- freq.cols[2] + ## array.freq.colors[array.diff.freq < -10] <- freq.cols[1] + + ## par(mar=c(5,4,4,4.5)) + ## plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Forecast time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + ## title("Frequency difference (%) between S4 and ERA-Interim", cex=1.5) + ## mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + ## mtext(side = 2, text = "Forecast time", line = 2.5, cex=1.5) + ## axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + ## axis(2, at=seq(0,6), las=2, cex.axis=1.5) + + ## for(p in 1:12){ + ## for(l in 0:6){ + ## polygon(c(0.5+p-1,0.5+p-1,1+p-1),c(-0.5+l,0.5+l,0+l), col=array.freq.colors[p,1+l,1]) # first triangle + ## polygon(c(0.5+p-1,1+p-1,1.5+p-1),c(-0.5+l,0+l,-0.5+l), col=array.freq.colors[p,1+l,2]) + ## polygon(c(1+p-1,1.5+p-1,1.5+p-1),c(l,0.5+l,-0.5+l), col=array.freq.colors[p,1+l,3]) + ## polygon(c(0.5+p-1,1+p-1,1.5+p-1),c(0.5+l,0+l,0.5+l), col=array.freq.colors[p,1+l,4]) + ## } + ## } + + ## par(fig=c(0.875, 1, 0.14, 0.89), new=TRUE) + ## ColorBar2(brks = c(-20,-10,-5,-2,0,2,5,10,20), cols = freq.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(c(-20,-10,-5,-2,0,2,5,10,20)), my.labels=c(-20,-10,-5,-2,0,2,5,10,20)) + ## dev.off() + +} # close if on composition == "summary" + + + + +# impact map of the stronger WR: +if(composition == "impact" && fields.name == rean.name){ + + var.num <- 2 # choose a variable (1:sfcWind, 2:tas) + + png(filename=paste0(workdir,"/",rean.name,"_",var.name[var.num],"_most_influent_negative.png"), width=550, height=650) + + plot.new() + #par(mfrow=c(4,3)) + #col.regimes <- c('#7b3294','#c2a5cf','#a6dba0','#008837') + col.regimes <- c('#e66101','#fdb863','#b2abd2','#5e3c99') + + for(p in 13:16){ # or 1:12 for monthly maps + load(file=paste0(rean.dir,"/",rean.name,"_",my.period[p],"_",var.name[var.num],"_psl.RData")) + load(paste0(rean.dir,"/",rean.name,"_ClusterNames.RData")) # they should not depend on var.name!!! + + # position of long values of Europe only (without the Atlantic Sea and America): + #if(fields.name == "ERA-Interim") EU <- c(1:which(lon >= lon.max)[1], (length(lon)-30):length(lon)) # restrict area to continental europe only + lon.max = 45 + EU <- c(1:which(lon >= lon.max)[1], (which(lon >= 337)[1]:length(lon))) # restrict area to continental europe only + + cluster1 <- which(orden == cluster1.name.period[p]) # in future it will be == cluster1.name + cluster2 <- which(orden == cluster2.name.period[p]) + cluster3 <- which(orden == cluster3.name.period[p]) + cluster4 <- which(orden == cluster4.name.period[p]) + + assign(paste0("imp",cluster1), varPeriodAnom1mean) + assign(paste0("imp",cluster2), varPeriodAnom2mean) + assign(paste0("imp",cluster3), varPeriodAnom3mean) + assign(paste0("imp",cluster4), varPeriodAnom4mean) + + ## imp.all <- imp1 + ## for(i in 1:dim(imp1)[1]){ + ## for(j in 1:dim(imp1)[2]){ + ## if(abs(imp1[i,j]) >= max(abs(imp1[i,j]), abs(imp2[i,j]), abs(imp3[i,j]), abs(imp4[i,j]))) imp.all[i,j] <- 1.5 + ## if(abs(imp2[i,j]) >= max(abs(imp1[i,j]), abs(imp2[i,j]), abs(imp3[i,j]), abs(imp4[i,j]))) imp.all[i,j] <- 2.5 + ## if(abs(imp3[i,j]) >= max(abs(imp1[i,j]), abs(imp2[i,j]), abs(imp3[i,j]), abs(imp4[i,j]))) imp.all[i,j] <- 3.5 + ## if(abs(imp4[i,j]) >= max(abs(imp1[i,j]), abs(imp2[i,j]), abs(imp3[i,j]), abs(imp4[i,j]))) imp.all[i,j] <- 4.5 + ## } + ## } + + ## #most influent WT with positive impact: + ## imp.all <- imp1 + ## for(i in 1:dim(imp1)[1]){ + ## for(j in 1:dim(imp1)[2]){ + ## if((imp1[i,j]) >= max((imp1[i,j]), (imp2[i,j]), (imp3[i,j]), (imp4[i,j]))) imp.all[i,j] <- 1.5 + ## if((imp2[i,j]) >= max((imp1[i,j]), (imp2[i,j]), (imp3[i,j]), (imp4[i,j]))) imp.all[i,j] <- 2.5 + ## if((imp3[i,j]) >= max((imp1[i,j]), (imp2[i,j]), (imp3[i,j]), (imp4[i,j]))) imp.all[i,j] <- 3.5 + ## if((imp4[i,j]) >= max((imp1[i,j]), (imp2[i,j]), (imp3[i,j]), (imp4[i,j]))) imp.all[i,j] <- 4.5 + ## } + ## } + + # most influent WT with negative impact: + imp.all <- imp1 + for(i in 1:dim(imp1)[1]){ + for(j in 1:dim(imp1)[2]){ + if((imp1[i,j]) <= min((imp1[i,j]), (imp2[i,j]), (imp3[i,j]), (imp4[i,j]))) imp.all[i,j] <- 1.5 + if((imp2[i,j]) <= min((imp1[i,j]), (imp2[i,j]), (imp3[i,j]), (imp4[i,j]))) imp.all[i,j] <- 2.5 + if((imp3[i,j]) <= min((imp1[i,j]), (imp2[i,j]), (imp3[i,j]), (imp4[i,j]))) imp.all[i,j] <- 3.5 + if((imp4[i,j]) <= min((imp1[i,j]), (imp2[i,j]), (imp3[i,j]), (imp4[i,j]))) imp.all[i,j] <- 4.5 + } + } + + if(p<=3) yMod <- 0.7 + if(p>=4 && p<=6) yMod <- 0.5 + if(p>=7 && p<=9) yMod <- 0.3 + if(p>=10) yMod <- 0.1 + if(p==13 || p==14) yMod <- 0.52 + if(p==15 || p==16) yMod <- 0.1 + + if(p==1 || p==4 || p==7 || p==10) xMod <- 0.05 + if(p==2 || p==5 || p==8 || p==11) xMod <- 0.35 + if(p==3 || p==6 || p==9 || p==12) xMod <- 0.65 + if(p==13 || p==15) xMod <- 0.05 + if(p==14 || p==16) xMod <- 0.50 + + # impact map: + if(p <= 12) par(fig=c(xMod, xMod + 0.3, yMod, yMod + 0.17), new=TRUE) + if(p > 12) par(fig=c(xMod, xMod + 0.45, yMod, yMod + 0.38), new=TRUE) + PlotEquiMap(imp.all[,EU], lon[EU], lat, filled.continents = FALSE, brks=1:5, cols=col.regimes, axelab=F, drawleg=F) + + # title map: + if(p <= 12) par(fig=c(xMod, xMod + 0.3, yMod + 0.162, yMod + 0.172), new=TRUE) + if(p > 12) par(fig=c(xMod + 0.07, xMod + 0.07 + 0.3, yMod +0.21 + 0.166, yMod +0.21 + 0.176), new=TRUE) + mtext(my.period[p], font=2, cex=1.5) + + } # close 'p' on 'period' + + par(fig=c(0.1,0.9, 0.03, 0.11), new=TRUE) + ColorBar2(1:5, cols=col.regimes, vert=FALSE, my.ticks=1:4, draw.ticks=FALSE, my.labels=orden) + + par(fig=c(0.1,0.9, 0.92, 0.97), new=TRUE) + mtext(paste0("Most influent WR on ",var.name[var.num]), font=2, cex=1.5) + + dev.off() + +} # close if on composition == "impact" + + + + + + + + + + + + + + + + + + diff --git a/old/weather_regimes_maps_v15.R~ b/old/weather_regimes_maps_v15.R~ new file mode 100644 index 0000000000000000000000000000000000000000..ff461c1e664088ebd38169ab1646d8745918d769 --- /dev/null +++ b/old/weather_regimes_maps_v15.R~ @@ -0,0 +1,3530 @@ + +# Creation: 6/2016 +# Author: Nicola Cortesi +# +# Aim: to visualize the regime anomalies of the weather regimes, their interannual frequencies and their impact on a chosen cliamte variable (i.e: wind speed or temperature) +# +# I/O: the only input file needed is the output of the weather_regimes.R script, which ends with the suffix "_mapdata.RData". +# Output files are the maps, with the user can generate as .png or as .pdf +# +# Assumptions: If your regimes derive from a reanalysis, this script must be run twice: once, with the option 'ordering = F' and 'save.names = T' to create a .pdf or .png +# file that the user must open to find visually the order by which the four regimes are stored inside the four clusters. For example, he can find that the +# sequence of the images in the file (from top to down) corresponds to: Blocking / NAO- / Atl.Ridge / NAO+ regime. Subsequently, he has to change 'ordering' +# from F to T and set the four variables 'clusterX.name' (X=1..4) to follow the same order found inside that file. +# The second time, you have to run this script only to get the maps in the right order, which by default is, from top to down: +# "NAO+","NAO-","Blocking" and "Atl.Ridge" (you can change it with the variable 'orden'). You also have to set 'save.names' to TRUE, so an .RData file +# is written to store the order of the four regimes, in case in future a user needs to visualize these maps again. In this case, the user must set +# 'ordering = TRUE' and 'save.names = FALSE' to be able to visualize the maps in the correct order. +# +# If your regimes derive from a forecast system, you have to compute before the regimes derived from a reanalysis. Then, you can associate the reanalysis +# to the forecast system, to employ it to automatically aussociate to each simulated cluster one of the +# observed regimes, the one with the highest spatial correlation. Beware that the two maps of regime anomalies must have the same spatial resolution! +# +# Branch: weather_regimes + +rm(list=ls()) +library(s2dverification) # for the function Load() +library(Kendall) # for the MannKendall test + +source('/home/Earth/ncortesi/Downloads/scripts/Rfunctions.R') # for the calendar functions + +# working dir with the input files with '_psl.RData' suffix: +workdir <- "/scratch/Earth/ncortesi/RESILIENCE/Regimes/42_ECMWF_S4_monthly_1981-2015_LOESS_filter" + +rean.name <- "ERA-Interim" # reanalysis name (if input data comes from a reanalysis) +forecast.name <- "ECMWF-S4" # forecast system name (if input data comes from a forecast system) + +fields.name <- forecast.name # Choose between loading reanalysis ('rean.name') or forecasts ('forecast.name') + +var.num <- 1 # if fields.name ='rean.name', Choose a variable for the impact maps 1: sfcWind 2: tas + +period <- 1:12 # Choose one or more periods to plot from 1 to 17 (1-12: Jan - Dec, 13-16: Winter, Spring, Summer, Autumn, 17: Year). + # You need to have created before the '_mapdata.RData' file with the output of 'weather_regimes'.R for that period. +lead.months <- 0:6 # in case you selected 'fields.name = forecast.name', specify a leadtime (0 = same month of the start month) to plot + # (and variable 'period' becomes the start month) + +# run it twice: once, with 'ordering <- FALSE' and 'save.names <- TRUE' to look only at the cartography and find visually the right regime names of the four clusters; then, +# insert the regime names in the correct order below and run this script a the second time with 'ordering = TRUE' and 'save.names = TRUE', to save the ordered cartography. +# after that, any time you need to run this script, run it with 'ordering = TRUE and 'save.names = FALSE', not to overwrite the file which stores the cluster order: +ordering <- T # If TRUE, order the clusters following the regimes listed in the 'orden' vector (set it to TRUE only after you manually inserted the names below). +save.names <- F # if TRUE, also save the cluster names (i.e: the association between clusters and weather regimes); if FALSE, the cluster names are loaded from a file + +as.pdf <- F # FALSE: save results as one .png file for each season/month, TRUE: save only one .pdf file with all seasons/months + +composition <- "summary" # choose which kind of composition you want to plot: + # - 'simple' for monthly graphs of regime anomalies / impact / interannual frequencies in three columns + # - 'psl' for all the regime anomalies for a fixed forecast month + # - 'fre' for all the interannual frequencies for a fixed forecast month + # - 'summary' for the summary plots of all forecast months (persistence bias, frequency bias, correlations, etc.) [You need all ClusterNames files] + # - 'impact' for all the impact plot of the regime with the highest impact + # - 'none' doesn't plot anything, it only saves the ClusterName.Rdata files + +forecast.month <- 11 # in case composition = 'psl' or 'fre' or 'impact', choose a forecast month from 1 to 12 to plot its composition + +# if fields.name='forecast.name', set the subdir of 'workdir' where the weather regimes computed with a reanalysis are stored: +#rean.dir <- "/scratch/Earth/ncortesi/RESILIENCE/Regimes/41_ERA-Interim_monthly_1981-2015_LOESS_filter" +rean.dir <- "/scratch/Earth/ncortesi/RESILIENCE/Regimes/18) as 12) but with no lat correction" + +# Associates the regimes to the four cluster: +cluster1.name <- "NAO+" +cluster2.name <- "NAO-" +cluster4.name <- "Blocking" +cluster3.name <- "Atl.Ridge" + +# choose an order to give to the cartograpy, from top to bottom: +orden <- c("NAO+","NAO-","Blocking","Atl.Ridge") + +################################################################################################################################################################### + +if(fields.name != rean.name && fields.name != forecast.name) stop("variable 'fields.name' is not properly set") +regime1.name <- orden[1] +regime2.name <- orden[2] +regime3.name <- orden[3] +regime4.name <- orden[4] + +var.name <- c("sfcWind","tas") +var.name.full <- c("Wind Speed","Temperature") # full name of the 'predictand' variable to put in the title of the graphs +var.unit <- c("m/s", "ºC") # unit of measure of var (for drawing color scales) +var.num.orig <- var.num # loading _psl files, this value can change! +fields.name.orig <- fields.name +period.orig <- period +workdir.orig <- workdir +pdf.suffix <- ifelse(length(period)==1, my.period[period], "all_periods") +n.map <- 0 + +if(composition != "summary" && composition != "impact"){ + + if(composition == "psl" || composition == "fre") { + if(as.pdf && fields.name == forecast.name) pdf(file=paste0(workdir,"/",fields.name,"_",pdf.suffix,"_forecast_month_",forecast.month,"_",composition,".pdf"),width=110,height=60) + if(!as.pdf && fields.name == forecast.name) png(filename=paste0(workdir,"/",fields.name,"_forecast_month_",forecast.month,"_",composition,".png"),width=6000,height=2300) + + lead.months <- c(6:0,0) + plot.new() + + # Sheet title: + par(fig=c(0, 1, 0.94, 0.98), new=TRUE) + if(composition == "psl") mtext(paste0(fields.name, " simulated Weather Regimes for ", month.name[forecast.month]), cex=5.5, font=2) + if(composition == "fre") mtext(paste0(fields.name, " simulated interannual frequencies for ", month.name[forecast.month]), cex=5.5, font=2) + } + + for(lead.month in lead.months){ + #lead.month=lead.months[1] # for the debug + + if(composition == "simple"){ + if(as.pdf && fields.name == rean.name) pdf(file=paste0(workdir,"/",fields.name,"_",var.name[var.num],"_",pdf.suffix,".pdf"),width=40, height=60) + if(as.pdf && fields.name == forecast.name) pdf(file=paste0(workdir,"/",fields.name,"_",var.name[var.num],"_",pdf.suffix,"_leadmonth_",lead.month,".pdf"),width=40,height=60) + } + + if(composition == 'psl' || composition == 'fre') { + period <- forecast.month - lead.month + if(period < 1) period <- period + 12 + } + + for(p in period){ + #p <- period[1] # for the debug + p.orig <- p + + # load regime data (for forecasts, it load only the data corresponding to the chosen start month p and lead month 'lead.month':: + if(fields.name == rean.name || (fields.name == forecast.name && lead.month == 0 || n.map == 7)) { + load(file=paste0(rean.dir,"/",rean.name,"_",my.period[p],"_","psl",".RData")) + if(var.num != var.num.orig) var.num <- var.num.orig # ripristinate original values of this script (necessary after loading regime data) + if(fields.name != fields.name.orig) fields.name <- fields.name.orig # ripristinate original values of this script + if(workdir != workdir.orig) workdir <- workdir.orig # ripristinate original values of this script + + load(file=paste0(rean.dir,"/",rean.name,"_",my.period[p],"_",var.name[var.num],".RData")) + } + + if(fields.name == forecast.name && n.map < 7){ + load(file=paste0(workdir,"/",fields.name,"_",my.period[p],"_leadmonth_",lead.month,"_","psl",".RData")) # load cluster time series and mean psl data + #load(file=paste0(workdir,"/",fields.name,"_",my.period[p],"_leadmonth_",lead.month,"_",var.name[var.num],".RData")) # load mean var data + + #if(var.num != var.num.orig) var.num <- var.num.orig # ripristinate original values of this script (necessary after loading regime data) + #if(fields.name != fields.name.orig) fields.name <- fields.name.orig # ripristinate original values of this script + + } + + if(var.num != var.num.orig) var.num <- var.num.orig # ripristinate original values of this script (necessary after loading regime data) + if(fields.name != fields.name.orig) fields.name <- fields.name.orig # ripristinate original values of this script + if(workdir != workdir.orig) workdir <- workdir.orig # ripristinate original values of this script + if(p != p.orig) p <- p.orig + + if(fields.name == forecast.name && n.map < 7){ + + # compute the simulated interannual monthly variability: + n.days.month <- dim(my.cluster.array[[p]])[1] # number of days in the forecasted month + + freq.sim1 <- apply(my.cluster.array[[p]] == 1, c(2,3), sum) + freq.sim2 <- apply(my.cluster.array[[p]] == 2, c(2,3), sum) + freq.sim3 <- apply(my.cluster.array[[p]] == 3, c(2,3), sum) + freq.sim4 <- apply(my.cluster.array[[p]] == 4, c(2,3), sum) + + sd.freq.sim1 <- sd(freq.sim1) / n.days.month + sd.freq.sim2 <- sd(freq.sim2) / n.days.month + sd.freq.sim3 <- sd(freq.sim3) / n.days.month + sd.freq.sim4 <- sd(freq.sim4) / n.days.month + + # compute the transition probabilities: + j1 <- j2 <- j3 <- j4 <- 1; transition1 <- transition2 <- transition3 <- transition4 <- c() + + n.members <- dim(my.cluster.array[[p]])[3] + + for(m in 1:n.members){ + for(y in 1:n.years){ + for (d in 1:(n.days.month-1)){ + + if (my.cluster.array[[p]][d,y,m] == 1 && (my.cluster.array[[p]][d + 1,y,m] != my.cluster.array[[p]][d,y,m])){ + transition1[j1] <- my.cluster.array[[p]][d + 1,y,m] + j1 <- j1 + 1 + } + if (my.cluster.array[[p]][d,y,m] == 2 && (my.cluster.array[[p]][d + 1,y,m] != my.cluster.array[[p]][d,y,m])){ + transition2[j2] <- my.cluster.array[[p]][d + 1,y,m] + j2 <- j2 + 1 + } + if (my.cluster.array[[p]][d,y,m] == 3 && (my.cluster.array[[p]][d + 1,y,m] != my.cluster.array[[p]][d,y,m])){ + transition3[j3] <- my.cluster.array[[p]][d + 1,y,m] + j3 <- j3 +1 + } + if (my.cluster.array[[p]][d,y,m] == 4 && (my.cluster.array[[p]][d + 1,y,m] != my.cluster.array[[p]][d,y,m])){ + transition4[j4] <- my.cluster.array[[p]][d + 1,y,m] + j4 <- j4 +1 + } + + } + } + } + + trans.sim <- matrix(NA,4,4 , dimnames= list(c("cluster1.sim", "cluster2.sim", "cluster3.sim", "cluster4.sim"),c("cluster1.sim","cluster2.sim","cluster3.sim","cluster4.sim"))) + trans.sim[1,2] <- length(which(transition1 == 2)) + trans.sim[1,3] <- length(which(transition1 == 3)) + trans.sim[1,4] <- length(which(transition1 == 4)) + + trans.sim[2,1] <- length(which(transition2 == 1)) + trans.sim[2,3] <- length(which(transition2 == 3)) + trans.sim[2,4] <- length(which(transition2 == 4)) + + trans.sim[3,1] <- length(which(transition3 == 1)) + trans.sim[3,2] <- length(which(transition3 == 2)) + trans.sim[3,4] <- length(which(transition3 == 4)) + + trans.sim[4,1] <- length(which(transition4 == 1)) + trans.sim[4,2] <- length(which(transition4 == 2)) + trans.sim[4,3] <- length(which(transition4 == 3)) + + trans.tot <- apply(trans.sim,1,sum,na.rm=T) + for(i in 1:4) trans.sim[i,] <- trans.sim[i,]/trans.tot[i] + + + # compute cluster persistence: + my.cluster.vector <- as.vector(my.cluster.array[[p]]) # reduce it to a vector with the order: day1month1member1 , day2month1member1, ... , day1month2member1, day2month2member1, ,,, + + sequ1 <- sequ2 <- sequ3 <- sequ4 <- rep(0,10000) + i1 <- i2 <- i3 <- i4 <- 1 + + if(my.cluster.vector[1] != 1) i1 <- 0 + if(my.cluster.vector[1] == 1) sequ1[i1] <- sequ1[i1]+1 + if(my.cluster.vector[1] != 2) i2 <- 0 + if(my.cluster.vector[1] == 2) sequ2[i2] <- sequ2[i2]+1 + if(my.cluster.vector[1] != 3) i3 <- 0 + if(my.cluster.vector[1] == 3) sequ3[i3] <- sequ3[i3]+1 + if(my.cluster.vector[1] != 4) i4 <- 0 + if(my.cluster.vector[1] == 4) sequ4[i4] <- sequ4[i4]+1 + n.dd <- length(my.cluster.vector) + + for(d in 1:(n.dd-1)){ + if(my.cluster.vector[d] != 1 && my.cluster.vector[d+1] == 1) i1 <- i1+1 + if(my.cluster.vector[d] == 1) sequ1[i1] <- sequ1[i1]+1 + + if(my.cluster.vector[d] != 2 && my.cluster.vector[d+1] == 2) i2 <- i2+1 + if(my.cluster.vector[d] == 2) sequ2[i2] <- sequ2[i2]+1 + + if(my.cluster.vector[d] != 3 && my.cluster.vector[d+1] == 3) i3 <- i3+1 + if(my.cluster.vector[d] == 3) sequ3[i3] <- sequ3[i3]+1 + + if(my.cluster.vector[d] != 4 && my.cluster.vector[d+1] == 4) i4 <- i4+1 + if(my.cluster.vector[d] == 4) sequ4[i4] <- sequ4[i4]+1 + } + + if(my.cluster.vector[n.dd] == 1) sequ1[i1] <- sequ1[i1]+1 + if(my.cluster.vector[n.dd] == 2) sequ2[i2] <- sequ2[i2]+1 + if(my.cluster.vector[n.dd] == 3) sequ3[i3] <- sequ3[i3]+1 + if(my.cluster.vector[n.dd] == 4) sequ4[i4] <- sequ4[i4]+1 + + pers1 <- mean(sequ1[which(sequ1 != 0)]) + pers2 <- mean(sequ2[which(sequ2 != 0)]) + pers3 <- mean(sequ3[which(sequ3 != 0)]) + pers4 <- mean(sequ4[which(sequ4 != 0)]) + + # compute correlations between simulated clusters and observed regime's anomalies: + cluster1.sim <- pslwr1mean; cluster2.sim <- pslwr2mean; cluster3.sim <- pslwr3mean; cluster4.sim <- pslwr4mean + lat.forecast <- lat; lon.forecast <- lon + + if((p + lead.month) > 12) { load.month <- p + lead.month - 12 } else { load.month <- p + lead.month } # reanalysis forecast month to load + load(file=paste0(rean.dir,"/",rean.name,"_",my.period[load.month],"_","psl",".RData")) # load reanalysis data, which overwrities the pslwrXmean variables + load(paste0(rean.dir,"/",rean.name,"_", my.period[load.month],"_","ClusterNames",".RData")) # load also reanalysis regime names + + if(var.num != var.num.orig) var.num <- var.num.orig # ripristinate original values of this script + if(fields.name != fields.name.orig) fields.name <- fields.name.orig # ripristinate original values of this script + if(!identical(period.orig,period)) period <- period.orig + if(workdir != workdir.orig) workdir <- workdir.orig # ripristinate original values of this script + if(p.orig != p) p <- p.orig + + # compute the observed transition probabilities: + n.days.month <- n.days.in.a.period(load.month,2001) + j1o <- j2o <- j3o <- j4o <- 1; transition.obs1 <- transition.obs2 <- transition.obs3 <- transition.obs4 <- c() + + for (d in 1:(length(my.cluster[[load.month]]$cluster)-1)){ + if (my.cluster[[load.month]]$cluster[d] == 1 && (my.cluster[[load.month]]$cluster[d + 1] != my.cluster[[load.month]]$cluster[d])){ + transition.obs1[j1o] <- my.cluster[[load.month]]$cluster[d + 1] + j1o <- j1o + 1 + } + if (my.cluster[[load.month]]$cluster[d] == 2 && (my.cluster[[load.month]]$cluster[d + 1] != my.cluster[[load.month]]$cluster[d])){ + transition.obs2[j2o] <- my.cluster[[load.month]]$cluster[d + 1] + j2o <- j2o + 1 + } + if (my.cluster[[load.month]]$cluster[d] == 3 && (my.cluster[[load.month]]$cluster[d + 1] != my.cluster[[load.month]]$cluster[d])){ + transition.obs3[j3o] <- my.cluster[[load.month]]$cluster[d + 1] + j3o <- j3o + 1 + } + if (my.cluster[[load.month]]$cluster[d] == 4 && (my.cluster[[load.month]]$cluster[d + 1] != my.cluster[[load.month]]$cluster[d])){ + transition.obs4[j4o] <- my.cluster[[load.month]]$cluster[d + 1] + j4o <- j4o +1 + } + + } + + trans.obs <- matrix(NA,4,4 , dimnames= list(c("cluster1.obs", "cluster2.obs", "cluster3.obs", "cluster4.obs"),c("cluster1.obs","cluster2.obs","cluster3.obs","cluster4.obs"))) + trans.obs[1,2] <- length(which(transition.obs1 == 2)) + trans.obs[1,3] <- length(which(transition.obs1 == 3)) + trans.obs[1,4] <- length(which(transition.obs1 == 4)) + + trans.obs[2,1] <- length(which(transition.obs2 == 1)) + trans.obs[2,3] <- length(which(transition.obs2 == 3)) + trans.obs[2,4] <- length(which(transition.obs2 == 4)) + + trans.obs[3,1] <- length(which(transition.obs3 == 1)) + trans.obs[3,2] <- length(which(transition.obs3 == 2)) + trans.obs[3,4] <- length(which(transition.obs3 == 4)) + + trans.obs[4,1] <- length(which(transition.obs4 == 1)) + trans.obs[4,2] <- length(which(transition.obs4 == 2)) + trans.obs[4,3] <- length(which(transition.obs4 == 3)) + + trans.tot.obs <- apply(trans.obs,1,sum,na.rm=T) + for(i in 1:4) trans.obs[i,] <- trans.obs[i,]/trans.tot.obs[i] + + # compute the observed interannual monthly variability: + freq.obs1 <- freq.obs2 <- freq.obs3 <- freq.obs4 <- c() + + for(y in year.start:year.end){ + # for both reanalysis and S4, the time order is always: year1day1, year1day2, ..., year1day31, year2day1, year2day2, ..., year2day28, etc, + freq.obs1[y-year.start+1] <- length(which(my.cluster[[load.month]]$cluster[(y-year.start)*n.days.month + (1:n.days.month)] == 1)) + freq.obs2[y-year.start+1] <- length(which(my.cluster[[load.month]]$cluster[(y-year.start)*n.days.month + (1:n.days.month)] == 2)) + freq.obs3[y-year.start+1] <- length(which(my.cluster[[load.month]]$cluster[(y-year.start)*n.days.month + (1:n.days.month)] == 3)) + freq.obs4[y-year.start+1] <- length(which(my.cluster[[load.month]]$cluster[(y-year.start)*n.days.month + (1:n.days.month)] == 4)) + } + + sd.freq.obs1 <- sd(freq.obs1) / n.days.month + sd.freq.obs2 <- sd(freq.obs2) / n.days.month + sd.freq.obs3 <- sd(freq.obs3) / n.days.month + sd.freq.obs4 <- sd(freq.obs4) / n.days.month + + wr1y.obs <- wr1y; wr2y.obs <- wr2y; wr3y.obs <- wr3y; wr4y.obs <- wr4y # observed frecuencies of the regimes + + regimes.obs <- c(cluster1.name, cluster2.name, cluster3.name, cluster4.name) + + if(length(unique(regimes.obs)) < 4) stop("Two or more names of the weather types in the reanalsyis input file are repeated!") + + if(identical(rev(lat),lat.forecast)) {lat.forecast <- rev(lat.forecast); print("Warning: forecast latitudes are in the opposite order than renalysis's")} + if(!identical(lat, lat.forecast)) stop("forecast latitudes are different from reanalysis latitudes!") + if(!identical(lon, lon.forecast)) stop("forecast longitude are different from reanalysis longitudes!") + + cluster.corr <- array(NA, c(4,4), dimnames=list(c("cluster1.sim","cluster2.sim","cluster3.sim","cluster4.sim"), regimes.obs)) + cluster.corr[1,1] <- cor(as.vector(cluster1.sim), as.vector(pslwr1mean)) + cluster.corr[1,2] <- cor(as.vector(cluster1.sim), as.vector(pslwr2mean)) + cluster.corr[1,3] <- cor(as.vector(cluster1.sim), as.vector(pslwr3mean)) + cluster.corr[1,4] <- cor(as.vector(cluster1.sim), as.vector(pslwr4mean)) + cluster.corr[2,1] <- cor(as.vector(cluster2.sim), as.vector(pslwr1mean)) + cluster.corr[2,2] <- cor(as.vector(cluster2.sim), as.vector(pslwr2mean)) + cluster.corr[2,3] <- cor(as.vector(cluster2.sim), as.vector(pslwr3mean)) + cluster.corr[2,4] <- cor(as.vector(cluster2.sim), as.vector(pslwr4mean)) + cluster.corr[3,1] <- cor(as.vector(cluster3.sim), as.vector(pslwr1mean)) + cluster.corr[3,2] <- cor(as.vector(cluster3.sim), as.vector(pslwr2mean)) + cluster.corr[3,3] <- cor(as.vector(cluster3.sim), as.vector(pslwr3mean)) + cluster.corr[3,4] <- cor(as.vector(cluster3.sim), as.vector(pslwr4mean)) + cluster.corr[4,1] <- cor(as.vector(cluster4.sim), as.vector(pslwr1mean)) + cluster.corr[4,2] <- cor(as.vector(cluster4.sim), as.vector(pslwr2mean)) + cluster.corr[4,3] <- cor(as.vector(cluster4.sim), as.vector(pslwr3mean)) + cluster.corr[4,4] <- cor(as.vector(cluster4.sim), as.vector(pslwr4mean)) + + # associate each simulated cluster to one observed regime: + max1 <- which(cluster.corr == max(cluster.corr), arr.ind=T) + cluster.corr2 <- cluster.corr + cluster.corr2[max1[1],] <- -999 # set this row to negative values so it won't be found as a maximum anymore + cluster.corr2[,max1[2]] <- -999 # set this column to negative values so it won't be found as a maximum anymore + max2 <- which(cluster.corr2 == max(cluster.corr2), arr.ind=T) + cluster.corr3 <- cluster.corr2 + cluster.corr3[max2[1],] <- -999 + cluster.corr3[,max2[2]] <- -999 + max3 <- which(cluster.corr3 == max(cluster.corr3), arr.ind=T) + cluster.corr4 <- cluster.corr3 + cluster.corr4[max3[1],] <- -999 + cluster.corr4[,max3[2]] <- -999 + max4 <- which(cluster.corr4 == max(cluster.corr4), arr.ind=T) + + seq.max1 <- c(max1[1],max2[1],max3[1],max4[1]) + seq.max2 <- c(max1[2],max2[2],max3[2],max4[2]) + + cluster1.regime <- seq.max2[which(seq.max1 == 1)] + cluster2.regime <- seq.max2[which(seq.max1 == 2)] + cluster3.regime <- seq.max2[which(seq.max1 == 3)] + cluster4.regime <- seq.max2[which(seq.max1 == 4)] + + cluster1.name <- regimes.obs[cluster1.regime] + cluster2.name <- regimes.obs[cluster2.regime] + cluster3.name <- regimes.obs[cluster3.regime] + cluster4.name <- regimes.obs[cluster4.regime] + + regimes.sim <- c(cluster1.name, cluster2.name, cluster3.name, cluster4.name) + cluster.corr2 <- array(cluster.corr, c(4,4), dimnames=list(regimes.sim, regimes.obs)) + + spat.cor1 <- cluster.corr[1,seq.max2[which(seq.max1 == 1)]] + spat.cor2 <- cluster.corr[2,seq.max2[which(seq.max1 == 2)]] + spat.cor3 <- cluster.corr[3,seq.max2[which(seq.max1 == 3)]] + spat.cor4 <- cluster.corr[4,seq.max2[which(seq.max1 == 4)]] + + # measure persistence for the reanalysis dataset: + my.cluster.vector <- my.cluster[[load.month]][[1]] + + sequ1 <- sequ2 <- sequ3 <- sequ4 <- rep(0,10000) + i1 <- i2 <- i3 <- i4 <- 1 + + if(my.cluster.vector[1] != 1) i1 <- 0 + if(my.cluster.vector[1] == 1) sequ1[i1] <- sequ1[i1]+1 + if(my.cluster.vector[1] != 2) i2 <- 0 + if(my.cluster.vector[1] == 2) sequ2[i2] <- sequ2[i2]+1 + if(my.cluster.vector[1] != 3) i3 <- 0 + if(my.cluster.vector[1] == 3) sequ3[i3] <- sequ3[i3]+1 + if(my.cluster.vector[1] != 4) i4 <- 0 + if(my.cluster.vector[1] == 4) sequ4[i4] <- sequ4[i4]+1 + + n.dd <- length(my.cluster.vector) + for(d in 1:(n.dd-1)){ + if(my.cluster.vector[d] != 1 && my.cluster.vector[d+1] == 1) i1 <- i1+1 + if(my.cluster.vector[d] == 1) sequ1[i1] <- sequ1[i1]+1 + + if(my.cluster.vector[d] != 2 && my.cluster.vector[d+1] == 2) i2 <- i2+1 + if(my.cluster.vector[d] == 2) sequ2[i2] <- sequ2[i2]+1 + + if(my.cluster.vector[d] != 3 && my.cluster.vector[d+1] == 3) i3 <- i3+1 + if(my.cluster.vector[d] == 3) sequ3[i3] <- sequ3[i3]+1 + + if(my.cluster.vector[d] != 4 && my.cluster.vector[d+1] == 4) i4 <- i4+1 + if(my.cluster.vector[d] == 4) sequ4[i4] <- sequ4[i4]+1 + } + + if(my.cluster.vector[n.dd] == 1) sequ1[i1] <- sequ1[i1]+1 + if(my.cluster.vector[n.dd] == 2) sequ2[i2] <- sequ2[i2]+1 + if(my.cluster.vector[n.dd] == 3) sequ3[i3] <- sequ3[i3]+1 + if(my.cluster.vector[n.dd] == 4) sequ4[i4] <- sequ4[i4]+1 + + persObs1 <- mean(sequ1[which(sequ1 != 0)]) + persObs2 <- mean(sequ2[which(sequ2 != 0)]) + persObs3 <- mean(sequ3[which(sequ3 != 0)]) + persObs4 <- mean(sequ4[which(sequ4 != 0)]) + + # insert here all the manual corrections to the regime assignation due to misasignment in the automatic selection: + # forecast month: September + if(p == 9 && lead.month == 0) { cluster1.name <- "Blocking"; cluster2.name <- "Atl.Ridge"; cluster3.name <- "NAO-"; cluster4.name <- "NAO+"} + if(p == 8 && lead.month == 1) { cluster1.name <- "NAO+"; cluster2.name <- "NAO-"; cluster3.name <- "Blocking"; cluster4.name <- "Atl.Ridge"} + if(p == 6 && lead.month == 3) { cluster1.name <- "Atl.Ridge"; cluster2.name <- "NAO-"} + if(p == 4 && lead.month == 5) { cluster1.name <- "Blocking"; cluster2.name <- "NAO+"; cluster3.name <- "Atl.Ridge"; cluster4.name <- "NAO-"} + + # forecast month: October + if(p == 4 && lead.month == 6) { cluster1.name <- "Atl.Ridge"; cluster2.name <- "Blocking"; cluster3.name <- "NAO+"; cluster4.name <- "NAO-"} + if(p == 5 && lead.month == 5) { cluster1.name <- "NAO+"; cluster2.name <- "Blocking"; cluster3.name <- "NAO-"; cluster4.name <- "Atl.Ridge"} + if(p == 7 && lead.month == 3) { cluster1.name <- "NAO-"; cluster2.name <- "Atl.Ridge"; cluster3.name <- "Blocking"; cluster4.name <- "NAO+"} + if(p == 8 && lead.month == 2) { cluster1.name <- "Blocking"; cluster2.name <- "NAO-"; cluster3.name <- "Atl.Ridge"; cluster4.name <- "NAO+"} + if(p == 9 && lead.month == 1) { cluster1.name <- "NAO-"; cluster2.name <- "Blocking"; cluster3.name <- "Atl.Ridge"; cluster4.name <- "NAO+"} + + # forecast month: November + if(p == 6 && lead.month == 5) { cluster2.name <- "Atl.Ridge"; cluster3.name <- "NAO+"; cluster4.name <- "Blocking"} + if(p == 7 && lead.month == 4) { cluster1.name <- "NAO+"; cluster2.name <- "Blocking"; cluster4.name <- "Atl.Ridge"} + if(p == 8 && lead.month == 3) { cluster2.name <- "Blocking"; cluster4.name <- "Atl.Ridge"} + if(p == 9 && lead.month == 2) { cluster2.name <- "Atl.Ridge"; cluster3.name <- "NAO+"; cluster4.name <- "Blocking"} + if(p == 10 && lead.month == 1) { cluster2.name <- "Blocking"; cluster4.name <- "Atl.Ridge"} + + regimes.sim2 <- c(cluster1.name, cluster2.name, cluster3.name, cluster4.name) # Simulated regimes after the manual correction + #regimes.obs <- c(cluster1.name, cluster2.name, cluster3.name, cluster4.name) # already defined above, included only as reference + + order.sim <- match(regimes.sim2, orden) + order.obs <- match(regimes.obs, orden) + + clusters.sim <- list(cluster1.sim, cluster2.sim, cluster3.sim, cluster4.sim) + clusters.obs <- list(pslwr1mean, pslwr2mean, pslwr3mean, pslwr4mean) + + # recompute the spatial correlations, because they might have changed because of the manual corrections above: + spat.cor1 <- cor(as.vector(clusters.sim[[which(order.sim == 1)]]), as.vector(clusters.obs[[which(order.obs == 1)]])) + spat.cor2 <- cor(as.vector(clusters.sim[[which(order.sim == 2)]]), as.vector(clusters.obs[[which(order.obs == 2)]])) + spat.cor3 <- cor(as.vector(clusters.sim[[which(order.sim == 3)]]), as.vector(clusters.obs[[which(order.obs == 3)]])) + spat.cor4 <- cor(as.vector(clusters.sim[[which(order.sim == 4)]]), as.vector(clusters.obs[[which(order.obs == 4)]])) + + load(file=paste0(workdir,"/",fields.name,"_",my.period[p],"_leadmonth_",lead.month,"_","psl",".RData")) # reload forecast cluster time series and mean psl data + #load(file=paste0(workdir,"/",fields.name,"_",my.period[p],"_leadmonth_",lead.month,"_ClusterData.RData")) # reload forecast cluster data (for my.cluster.array) + if(var.num != var.num.orig) var.num <- var.num.orig # ripristinate original values of this script + if(fields.name != fields.name.orig) fields.name <- fields.name.orig # ripristinate original values of this script + if(!identical(period.orig, period)) period <- period.orig + if(p.orig != p) p <- p.orig + if(identical(rev(lat),lat.forecast)) lat <- rev(lat) # ripristinate right lat order + if(workdir != workdir.orig) workdir <- workdir.orig # ripristinate original values of this script + + } # close for on 'forecast.name' + + if(fields.name == rean.name || (fields.name == forecast.name && n.map == 7)){ + if(save.names){ + save(cluster1.name, cluster2.name, cluster3.name, cluster4.name, file=paste0(workdir,"/",fields.name,"_", my.period[p],"_","ClusterNames",".RData")) + + } else { # in this case, we load the cluster names from the file already saved, if regimes come from a reanalysis: + load(paste0(rean.dir,"/",rean.name,"_", my.period[p],"_","ClusterNames",".RData")) + + if(var.num != var.num.orig) var.num <- var.num.orig # ripristinate original values of this script + if(fields.name != fields.name.orig) fields.name <- fields.name.orig # ripristinate original values of this script + } + } + + # assign to each cluster its regime: + if(ordering == FALSE && (fields.name == rean.name || (fields.name == forecast.name && n.map == 7))){ + cluster1 <- 1; cluster2 <- 2; cluster3 <- 3; cluster4 <- 4 + } else { + cluster1 <- which(orden == cluster1.name) + cluster2 <- which(orden == cluster2.name) + cluster3 <- which(orden == cluster3.name) + cluster4 <- which(orden == cluster4.name) + } + + assign(paste0("map",cluster1), pslwr1mean) + assign(paste0("map",cluster2), pslwr2mean) + assign(paste0("map",cluster3), pslwr3mean) + assign(paste0("map",cluster4), pslwr4mean) + + assign(paste0("fre",cluster1), wr1y) + assign(paste0("fre",cluster2), wr2y) + assign(paste0("fre",cluster3), wr3y) + assign(paste0("fre",cluster4), wr4y) + + if(composition == "impact"){ + assign(paste0("imp",cluster1), varwr1mean) + assign(paste0("imp",cluster2), varwr2mean) + assign(paste0("imp",cluster3), varwr3mean) + assign(paste0("imp",cluster4), varwr4mean) + + assign(paste0("sig",cluster1), pvalue1) + assign(paste0("sig",cluster2), pvalue2) + assign(paste0("sig",cluster3), pvalue3) + assign(paste0("sig",cluster4), pvalue4) + } + + if(fields.name == forecast.name && n.map < 7){ + assign(paste0("freMax",cluster1), wr1yMax) + assign(paste0("freMax",cluster2), wr2yMax) + assign(paste0("freMax",cluster3), wr3yMax) + assign(paste0("freMax",cluster4), wr4yMax) + + assign(paste0("freMin",cluster1), wr1yMin) + assign(paste0("freMin",cluster2), wr2yMin) + assign(paste0("freMin",cluster3), wr3yMin) + assign(paste0("freMin",cluster4), wr4yMin) + + #assign(paste0("spatial.cor",cluster1), spat.cor1) + #assign(paste0("spatial.cor",cluster2), spat.cor2) + #assign(paste0("spatial.cor",cluster3), spat.cor3) + #assign(paste0("spatial.cor",cluster4), spat.cor4) + + assign(paste0("persist",cluster1), pers1) + assign(paste0("persist",cluster2), pers2) + assign(paste0("persist",cluster3), pers3) + assign(paste0("persist",cluster4), pers4) + + clusters.all <- c(cluster1, cluster2, cluster3, cluster4) + ordered.sequence <- c(which(clusters.all == 1), which(clusters.all == 2), which(clusters.all == 3), which(clusters.all == 4)) + + assign(paste0("transSim",cluster1), unname(trans.sim[1,ordered.sequence])) + assign(paste0("transSim",cluster2), unname(trans.sim[2,ordered.sequence])) + assign(paste0("transSim",cluster3), unname(trans.sim[3,ordered.sequence])) + assign(paste0("transSim",cluster4), unname(trans.sim[4,ordered.sequence])) + + cluster1.obs <- which(orden == regimes.obs[1]) + cluster2.obs <- which(orden == regimes.obs[2]) + cluster3.obs <- which(orden == regimes.obs[3]) + cluster4.obs <- which(orden == regimes.obs[4]) + + clusters.all.obs <- c(cluster1.obs, cluster2.obs, cluster3.obs, cluster4.obs) + ordered.sequence.obs <- c(which(clusters.all.obs == 1), which(clusters.all.obs == 2), which(clusters.all.obs == 3), which(clusters.all.obs == 4)) + + assign(paste0("freObs",cluster1.obs), wr1y.obs) + assign(paste0("freObs",cluster2.obs), wr2y.obs) + assign(paste0("freObs",cluster3.obs), wr3y.obs) + assign(paste0("freObs",cluster4.obs), wr4y.obs) + + assign(paste0("persistObs",cluster1.obs), persObs1) + assign(paste0("persistObs",cluster2.obs), persObs2) + assign(paste0("persistObs",cluster3.obs), persObs3) + assign(paste0("persistObs",cluster4.obs), persObs4) + + assign(paste0("transObs",cluster1.obs), unname(trans.obs[1,ordered.sequence.obs])) + assign(paste0("transObs",cluster2.obs), unname(trans.obs[2,ordered.sequence.obs])) + assign(paste0("transObs",cluster3.obs), unname(trans.obs[3,ordered.sequence.obs])) + assign(paste0("transObs",cluster4.obs), unname(trans.obs[4,ordered.sequence.obs])) + + } + + # position of long values of Europe only (without the Atlantic Sea and America): + #if(fields.name == "ERA-Interim") EU <- c(1:which(lon >= lon.max)[1], (length(lon)-30):length(lon)) # restrict area to continental europe only + EU <- c(1:which(lon >= lon.max)[1], (which(lon >= 337)[1]:length(lon))) # restrict area to continental europe only + + # breaks and colors of the geopotential fields: + if(psl == "g500"){ + #my.brks <- c(-300,-199:200,300) # % Mean anomaly of a WR + my.brks <- c(-300,seq(-200,200,10),300) # % Mean anomaly of a WR + my.brks2 <- c(-300,seq(-200,200,10),300) # % Mean anomaly of a WR for the contour lines + #my.cols <- colorRampPalette((brewer.pal(9,"PuBu")))(length(my.brks)-1) + values.to.plot <- c(-200, -100, 0, 100, 200) # values we want to show in the labels + } + + if(psl == "psl"){ + my.brks <- c(seq(-19,-1,2),0,seq(1,19,2)) # % Mean anomaly of a WR + # my.brks <- c(seq(-19,-1,2),0,seq(1,19,2)) # % Mean anomaly of a WR + + my.brks2 <- my.brks #c(seq(-20,20,2)) # % Mean anomaly of a WR for the contour lines + values.to.plot <- my.brks #c(-17,-15,-13,-11,-9,-7,-5,-3,-1,0,1,3,5,7,9,11,13,15,17) + } + + my.cols <- colorRampPalette(rev(brewer.pal(11,"RdBu")))(length(my.brks)-1) # blue--white--red colors + my.cols[floor(length(my.cols)/2)] <- my.cols[floor(length(my.cols)/2)+1] <- "white" + + # breaks and colors of the impact maps (valid both for Wind speed anomalies and Temperature anomalies): + #my.brks.var <- c(-20,seq(-10,-3,1),seq(-2.9,2.9,0.1),seq(3,10,1),20) # % Mean anomaly of a WR + #my.brks.var <- c(-20,-2,-1.5,-1,-0.5,-0.2,0.2,0.5,1,1.5,2,20) # % Mean anomaly of a WR + #my.brks.var <- c(-20,seq(-3,3,0.5),20) # % Mean anomaly of a WR + if(var.name[var.num] == "sfcWind") { + my.brks.var <- seq(-3,3,0.5) + values.to.plot2 <- c(-3,-2,-1,0,1,2,3) + } + if(var.name[var.num] == "tas") { + my.brks.var <- seq(-5,5,1) + values.to.plot2 <- my.brks.var #c(-5,-3,-1,0,1,3,5) + } + + #my.cols <- colorRampPalette(as.character(read.csv("/home/Earth/vtorralb/rgbhex.csv",header=F)[,1]))(length(my.brks)-1) # blue--yellow-red colors + my.cols.var <- colorRampPalette(rev(brewer.pal(11,"RdBu")))(length(my.brks.var)-1) # blue--white--red colors + #my.cols.var[floor(length(my.cols.var)/2)] <- my.cols.var[floor(length(my.cols.var)/2)+1] <- "white" + + if(fields.name == forecast.name){ + pos.years.present <- match(year.start:year.end, years) + years.missing <- which(is.na(pos.years.present)) + fre1.NA <- fre2.NA <- fre3.NA <- fre4.NA <- freMax1.NA <- freMax2.NA <- freMax3.NA <- freMax4.NA <- freMin1.NA <- freMin2.NA <- freMin3.NA <- freMin4.NA <- c() + freq.max=100 + + k <- 0 + for(y in year.start:year.end) { + if(y %in% (years.missing + year.start - 1)) { + fre1.NA <- c(fre1.NA,NA); fre2.NA <- c(fre2.NA,NA); fre3.NA <- c(fre3.NA,NA); fre4.NA <- c(fre4.NA,NA) + freMax1.NA <- c(freMax1.NA,NA); freMax2.NA <- c(freMax2.NA,NA); freMax3.NA <- c(freMax3.NA,NA); freMax4.NA <- c(freMax4.NA,NA) + freMin1.NA <- c(freMin1.NA,NA); freMin2.NA <- c(freMin2.NA,NA); freMin3.NA <- c(freMin3.NA,NA); freMin4.NA <- c(freMin4.NA,NA) + k=k+1 + } else { + fre1.NA <- c(fre1.NA,fre1[y - year.start + 1 - k]); fre2.NA <- c(fre2.NA,fre2[y - year.start + 1 - k]) + fre3.NA <- c(fre3.NA,fre3[y - year.start + 1 - k]); fre4.NA <- c(fre4.NA,fre4[y - year.start + 1 - k]) + freMax1.NA <- c(freMax1.NA,freMax1[y - year.start + 1 - k]); freMax2.NA <- c(freMax2.NA,freMax2[y - year.start + 1 - k]) + freMax3.NA <- c(freMax3.NA,freMax3[y - year.start + 1 - k]); freMax4.NA <- c(freMax4.NA,freMax4[y - year.start + 1 - k]) + freMin1.NA <- c(freMin1.NA,freMin1[y - year.start + 1 - k]); freMin2.NA <- c(freMin2.NA,freMin2[y - year.start + 1 - k]) + freMin3.NA <- c(freMin3.NA,freMin3[y - year.start + 1 - k]); freMin4.NA <- c(freMin4.NA,freMin4[y - year.start + 1 - k]) + } + } + } else { fre1.NA <- fre1; fre2.NA <- fre2; fre3.NA <- fre3; fre4.NA <- fre4 } + + sp.cor1 <- round(cor(fre1.NA,freObs1, use="complete.obs"),2) + sp.cor2 <- round(cor(fre2.NA,freObs2, use="complete.obs"),2) + sp.cor3 <- round(cor(fre3.NA,freObs3, use="complete.obs"),2) + sp.cor4 <- round(cor(fre4.NA,freObs4, use="complete.obs"),2) + +# Visualize the composition with the 3 graphs for all the 4 regimes: its pressure fields, its impact on var and its frequency series: + if(composition == "simple"){ + if(!as.pdf && fields.name==rean.name) png(filename=paste0(workdir,"/",fields.name,"_",my.period[p],"_",var.name[var.num],".png"),width=3000,height=3700) + if(!as.pdf && fields.name==forecast.name) png(filename=paste0(workdir,"/",fields.name,"_",my.period[p],"_leadmonth_",lead.month,"_",var.name[var.num],".png"),width=3000,height=3700) + + plot.new() + + # Sheet title: + par(fig=c(0, 1, 0.94, 0.98), new=TRUE) + if(fields.name == forecast.name) {forecast.title <- paste0(" startdate and ",lead.month," forecast month ")} else {forecast.title <- ""} + mtext(paste0(fields.name, " Weather Regimes for ", my.period[p], forecast.title," (",year.start,"-",year.end,")"), cex=5.5, font=2) + + # Centroid maps: + map.xpos <- 0 + map.width <- 0.46 + par(fig=c(map.xpos + 0.003, map.xpos + map.width - 0.003, 0.745, 0.925), new=TRUE) + PlotEquiMap2(rescale(map1,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map1), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, xlabel.dist=1.5, contours.lty="F1FF1F") + par(fig=c(map.xpos, map.xpos + map.width, 0.51, 0.69), new=TRUE) + PlotEquiMap2(rescale(map2,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map2), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F") + par(fig=c(map.xpos, map.xpos + map.width, 0.275, 0.455), new=TRUE) + PlotEquiMap2(rescale(map3,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map3), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F") + par(fig=c(map.xpos, map.xpos + map.width, 0.04, 0.22), new=TRUE) + PlotEquiMap2(rescale(map4,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map4), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F") + + # Subtitle centroid maps: + map.title.xpos <- 0.76 + map.title.width <- 0.2 + par(fig=c(map.title.xpos, map.title.xpos + map.title.width, 0.728, 0.729), new=TRUE) + mtext("year", cex=3) + par(fig=c(map.title.xpos, map.title.xpos + map.title.width, 0.494, 0.495), new=TRUE) + mtext("year", cex=3) + par(fig=c(map.title.xpos, map.title.xpos + map.title.width, 0.260, 0.261), new=TRUE) + mtext("year", cex=3) + par(fig=c(map.title.xpos, map.title.xpos + map.title.width, 0.026, 0.027), new=TRUE) + mtext("year", cex=3) + + # Title Centroid Maps: + title1.xpos <- 0.02 + title1.width <- 0.4 + par(fig=c(title1.xpos, title1.xpos + title1.width, 0.9305, 0.9355), new=TRUE) + mtext(paste0(regime1.name,": ", psl.name, " Anomaly"), font=2, cex=4) + par(fig=c(title1.xpos, title1.xpos + title1.width, 0.6955, 0.7005), new=TRUE) + mtext(paste0(regime2.name,": ", psl.name, " Anomaly"), font=2, cex=4) + par(fig=c(title1.xpos, title1.xpos + title1.width, 0.4605, 0.4655), new=TRUE) + mtext(paste0(regime3.name,": ", psl.name, " Anomaly"), font=2, cex=4) + par(fig=c(title1.xpos, title1.xpos + title1.width, 0.2255, 0.2305), new=TRUE) + mtext(paste0(regime4.name,": ", psl.name, " Anomaly"), font=2, cex=4) + + # Impact maps: + if(composition == "impact"){ + impact.xpos <- 0.47 + impact.width <- 0.26 + par(fig=c(impact.xpos, impact.xpos + impact.width, 0.745, 0.925), new=TRUE) + PlotEquiMap2(rescale(imp1[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=t(sig1[,EU] < 0.05), cex.lab=1.5) + par(fig=c(impact.xpos, impact.xpos + impact.width, 0.51, 0.69), new=TRUE) + PlotEquiMap2(rescale(imp2[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=t(sig2[,EU] < 0.05), cex.lab=1.5) + par(fig=c(impact.xpos, impact.xpos + impact.width, 0.275, 0.455), new=TRUE) + PlotEquiMap2(rescale(imp3[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=t(sig3[,EU] < 0.05), cex.lab=1.5) + par(fig=c(impact.xpos, impact.xpos + impact.width, 0.04, 0.22), new=TRUE) + PlotEquiMap2(rescale(imp4[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=t(sig4[,EU] < 0.05), cex.lab=1.5) + + # Title impact maps: + title2.xpos <- 0.42 + title2.width <- 0.35 + par(fig=c(title2.xpos, title2.xpos + title2.width, 0.935, 0.940), new=TRUE) + mtext(paste0(regime1.name, ": Impact on ", var.name.full[var.num]), font=2, cex=4) + par(fig=c(title2.xpos, title2.xpos + title2.width, 0.700, 0.705), new=TRUE) + mtext(paste0(regime2.name, ": Impact on ", var.name.full[var.num]), font=2, cex=4) + par(fig=c(title2.xpos, title2.xpos + title2.width, 0.465, 0.470), new=TRUE) + mtext(paste0(regime3.name, ": Impact on ", var.name.full[var.num]), font=2, cex=4) + par(fig=c(title2.xpos, title2.xpos + title2.width, 0.230, 0.235), new=TRUE) + mtext(paste0(regime4.name, ": Impact on ", var.name.full[var.num]), font=2, cex=4) + } + + # Frequency plots: + barplot.xpos <- 0.75 + barplot.width <- 0.25 + + par(fig=c(barplot.xpos, barplot.xpos + barplot.width, 0.745, 0.915), new=TRUE) + if(fields.name == rean.name) barplot.freq(100*fre1.NA, year.start, year.end, cex.y=2, cex.x=2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0)) + if(fields.name == forecast.name) barplot.freq.sim2(100*fre1.NA, 100*freMax1.NA, 100*freMin1.NA, 100*freObs1, year.start, year.end, cex.y=2, cex.x=2, freq.min=-2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0), col.line="gray20", cex.r=3, cex.mean=2, cex.obs=2) + + par(fig=c(barplot.xpos, barplot.xpos + barplot.width,0.510,0.680), new=TRUE) + if(fields.name == rean.name) barplot.freq(100*fre2.NA, year.start, year.end, cex.y=2, cex.x=2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0)) + if(fields.name == forecast.name) barplot.freq.sim2(100*fre2.NA, 100*freMax2.NA, 100*freMin2.NA, 100*freObs2, year.start, year.end, cex.y=2, cex.x=2, freq.min=-2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0), col.line="gray20", cex.r=3, cex.mean=2, cex.obs=2) + + par(fig=c(barplot.xpos, barplot.xpos + barplot.width,0.275,0.445), new=TRUE) + if(fields.name == rean.name) barplot.freq(100*fre3.NA, year.start, year.end, cex.y=2, cex.x=2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0)) + if(fields.name == forecast.name) barplot.freq.sim2(100*fre3.NA, 100*freMax3.NA, 100*freMin3.NA, 100*freObs3, year.start, year.end, cex.y=2, cex.x=2, freq.min=-2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0), col.line="gray20", cex.r=3, cex.mean=2, cex.obs=2) + + par(fig=c(barplot.xpos, barplot.xpos + barplot.width,0.040,0.210), new=TRUE) + if(fields.name == rean.name) barplot.freq(100*fre4.NA, year.start, year.end, cex.y=2, cex.x=2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0)) + if(fields.name == forecast.name) barplot.freq.sim2(100*fre4.NA, 100*freMax4.NA, 100*freMin4.NA, 100*freObs4, year.start, year.end, cex.y=2, cex.x=2, freq.min=-2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0), col.line="gray20", cex.r=3, cex.mean=2, cex.obs=2) + + # Title Frequency maps: + title3.xpos <- 0.75 + title3.width <-0.25 + par(fig=c(title3.xpos, title3.xpos + title3.width, 0.935, 0.940), new=TRUE) + mtext(paste0(regime1.name, " Frequency"), font=2, cex=4) # paste0 doesn't work inside an expression! + par(fig=c(title3.xpos, title3.xpos + title3.width, 0.700, 0.705), new=TRUE) + mtext(paste0(regime2.name, " Frequency"), font=2, cex=4) + par(fig=c(title3.xpos, title3.xpos + title3.width, 0.465, 0.470), new=TRUE) + mtext(paste0(regime3.name, " Frequency"), font=2, cex=4) + par(fig=c(title3.xpos, title3.xpos + title3.width, 0.230, 0.235), new=TRUE) + mtext(paste0(regime4.name, " Frequency"), font=2, cex=4) + + # % on Frequency plots: + symbol.xpos <- 0.735 + symbol.width <- 0.01 + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.82, 0.83), new=TRUE) + mtext("%", cex=3.3) + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.59, 0.60), new=TRUE) + mtext("%", cex=3.3) + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.35, 0.36), new=TRUE) + mtext("%", cex=3.3) + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.12, 0.13), new=TRUE) + mtext("%", cex=3.3) + + # mean frequency on Frequency plots: + symbol.xpos <- 0.9 + symbol.width <- 0.1 + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.908, 0.918), new=TRUE) + mtext(bquote(bar(nu) == .(paste0(round(mean(100*fre1.NA),1),"%"))),cex=4.5) + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.673, 0.683), new=TRUE) + mtext(bquote(bar(nu) == .(paste0(round(mean(100*fre2.NA),1),"%"))),cex=4.5) + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.44, 0.45), new=TRUE) + mtext(bquote(bar(nu) == .(paste0(round(mean(100*fre3.NA),1),"%"))),cex=4.5) + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.203, 0.213), new=TRUE) + mtext(bquote(bar(nu) == .(paste0(round(mean(100*fre4.NA),1),"%"))),cex=4.5) + + # add corr between obs.time series and ensemble mean time series on Frequency plots: + if(fields.name == forecast.name){ + symbol.xpos <- 0.76 + symbol.width <- 0.1 + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.908, 0.918), new=TRUE) + sp.cor1 <- round(cor(fre1.NA,freObs1, use="complete.obs"),2) + mtext(paste0("r= ",sp.cor1), cex=4.5) + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.673, 0.683), new=TRUE) + sp.cor2 <- round(cor(fre2.NA,freObs2, use="complete.obs"),2) + mtext(paste0("r= ",sp.cor2), cex=4.5) + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.44, 0.45), new=TRUE) + sp.cor3 <- round(cor(fre3.NA,freObs3, use="complete.obs"),2) + mtext(paste0("r= ",sp.cor3), cex=4.5) + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.203, 0.213), new=TRUE) + sp.cor4 <- round(cor(fre4.NA,freObs4, use="complete.obs"),2) + mtext(paste0("r= ",sp.cor4), cex=4.5) + } + + # Legend 1: + legend1.xpos <- 0.10 + legend1.width <- 0.80 + legend1.cex <- 2 + my.subset <- match(values.to.plot, my.brks) + par(fig=c(legend1.xpos, legend1.xpos + legend1.width, 0.719, 0.744), new=TRUE) # syntax fig=c(xmin, xmax, ymin, ymax) + ColorBar3(my.brks, cols=my.cols, vert=FALSE, cex=legend1.cex, subset=my.subset) + if(psl=="g500") {mtext(side=4," m", cex=2.5)} else {mtext(side=4," hPa", cex=legend1.cex)} + par(fig=c(legend1.xpos, legend1.xpos + legend1.width, 0.485, 0.508), new=TRUE) + ColorBar3(my.brks, cols=my.cols, vert=FALSE, cex=legend1.cex, subset=my.subset) + if(psl=="g500") {mtext(side=4," m", cex=2.5)} else {mtext(side=4," hPa", cex=legend1.cex)} + par(fig=c(legend1.xpos, legend1.xpos + legend1.width, 0.250, 0.273), new=TRUE) + ColorBar3(my.brks, cols=my.cols, vert=FALSE, cex=legend1.cex, subset=my.subset) + if(psl=="g500") {mtext(side=4," m", cex=2.5)} else {mtext(side=4," hPa", cex=legend1.cex)} + par(fig=c(legend1.xpos, legend1.xpos + legend1.width, 0.015, 0.038), new=TRUE) + ColorBar3(my.brks, cols=my.cols, vert=FALSE, cex=legend1.cex, subset=my.subset) + if(psl=="g500") {mtext(side=4," m", cex=2.5)} else {mtext(side=4," hPa", cex=legend1.cex)} + + # Legend 2: + if(fields.name == rean.name){ + legend2.xpos <- 0.48 + legend2.width <- 0.25 + legend2.cex <- 1.8 + my.subset2 <- match(values.to.plot2, my.brks.var) + par(fig=c(legend2.xpos, legend2.xpos + legend2.width, 0.719 + 0.001, 0.744), new=TRUE) + ColorBar3(my.brks.var, cols=my.cols.var, vert=FALSE, cex=legend2.cex, subset=my.subset2) + mtext(side=4,paste0(" ",var.unit[var.num]), cex=legend2.cex) + par(fig=c(legend2.xpos, legend2.xpos + legend2.width, 0.485, 0.508), new=TRUE) + ColorBar3(my.brks.var, cols=my.cols.var, vert=FALSE, cex=legend2.cex, subset=my.subset2) + mtext(side=4,paste0(" ",var.unit[var.num]), cex=legend2.cex) + par(fig=c(legend2.xpos, legend2.xpos + legend2.width, 0.250, 0.273), new=TRUE) + ColorBar3(my.brks.var, cols=my.cols.var, vert=FALSE, cex=legend2.cex, subset=my.subset2) + mtext(side=4,paste0(" ",var.unit[var.num]), cex=legend2.cex) + par(fig=c(legend2.xpos, legend2.xpos + legend2.width, 0.015, 0.038), new=TRUE) + ColorBar3(my.brks.var, cols=my.cols.var, vert=FALSE, cex=legend2.cex, subset=my.subset2) + mtext(side=4,paste0(" ",var.unit[var.num]), cex=legend2.cex) + } + + if(!as.pdf) dev.off() # for saving 4 png + + } # close if on: composition == 'simple' + + + # Visualize the composition with the regime anomalies for a selected forecasted month: + if(composition == "psl"){ + # Centroid maps: + n.map <- n.map + 1 # n.maps starts from 0 + map.width <- 0.115 + map.xpos <- 0.06 + map.width * (n.map - 1) + map.ypos <- 0.08 + map.height <- 0.19 + map.ysep <- 0.015 + + par(fig=c(map.xpos, map.xpos + map.width, map.ypos + 3*map.height + 3*map.ysep, map.ypos + 4*map.height + 3*map.ysep), new=TRUE) + PlotEquiMap2(rescale(map1,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map1), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, xlabel.dist=1.5, contours.lty="F1FF1F") + par(fig=c(map.xpos, map.xpos + map.width, map.ypos + 2*map.height + 2*map.ysep, map.ypos + 3*map.height + 2*map.ysep), new=TRUE) + PlotEquiMap2(rescale(map2,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map2), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F") + par(fig=c(map.xpos, map.xpos + map.width, map.ypos + map.height + map.ysep, map.ypos + 2*map.height + map.ysep), new=TRUE) + PlotEquiMap2(rescale(map3,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map3), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F") + par(fig=c(map.xpos, map.xpos + map.width, map.ypos, map.ypos + map.height), new=TRUE) + PlotEquiMap2(rescale(map4,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map4), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F") + + # Sheet subtitles: + par(fig=c(map.xpos, map.xpos + map.width, 0.86, 0.90), new=TRUE) + if(n.map < 8) { + mtext(paste0(my.month.short[p], " --> ", my.month.short[forecast.month]), cex=5.5, font=2) + } else{ + mtext(paste0(rean.name," ", my.month.short[forecast.month]), cex=5.5, font=2) + } + + } # close if on composition == 'psl' + + + # Visualize the composition with the interannual frequencies for a selected forecasted month: + if(composition == "fre"){ + # Centroid maps: + n.map <- n.map + 1 # n.maps starts from 0 + map.width <- 0.115 + map.xpos <- 0.06 + map.width * (n.map - 1) + map.ypos <- 0.08 + map.height <- 0.19 + map.ysep <- 0.015 + + par(fig=c(map.xpos, map.xpos + map.width, map.ypos + 3*map.height + 3*map.ysep, map.ypos + 4*map.height + 3*map.ysep), new=TRUE) + if(n.map < 8) barplot.freq.sim2(100*fre1.NA, 100*freMax1.NA, 100*freMin1.NA, 100*freObs1, year.start, year.end, cex.y=2, cex.x=2, freq.min=-2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0), col.line="gray20", cex.r=3, cex.mean=2, cex.obs=2) + if(n.map == 8) barplot.freq(100*fre1.NA, year.start, year.end, cex.y=2, cex.x=2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0)) + + par(fig=c(map.xpos, map.xpos + map.width, map.ypos + 2*map.height + 2*map.ysep, map.ypos + 3*map.height + 2*map.ysep), new=TRUE) + if(n.map < 8) if(fields.name == forecast.name) barplot.freq.sim2(100*fre2.NA, 100*freMax2.NA, 100*freMin2.NA, 100*freObs2, year.start, year.end, cex.y=2, cex.x=2, freq.min=-2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0), col.line="gray20", cex.r=3, cex.mean=2, cex.obs=2) + if(n.map == 8) barplot.freq(100*fre2.NA, year.start, year.end, cex.y=2, cex.x=2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0)) + + par(fig=c(map.xpos, map.xpos + map.width, map.ypos + map.height + map.ysep, map.ypos + 2*map.height + map.ysep), new=TRUE) + if(n.map < 8) if(fields.name == forecast.name) barplot.freq.sim2(100*fre3.NA, 100*freMax3.NA, 100*freMin3.NA, 100*freObs3, year.start, year.end, cex.y=2, cex.x=2, freq.min=-2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0), col.line="gray20", cex.r=3, cex.mean=2, cex.obs=2) + if(n.map == 8) barplot.freq(100*fre3.NA, year.start, year.end, cex.y=2, cex.x=2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0)) + + par(fig=c(map.xpos, map.xpos + map.width, map.ypos, map.ypos + map.height), new=TRUE) + if(n.map < 8) if(fields.name == forecast.name) barplot.freq.sim2(100*fre4.NA, 100*freMax4.NA, 100*freMin4.NA, 100*freObs4, year.start, year.end, cex.y=2, cex.x=2, freq.min=-2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0), col.line="gray20", cex.r=3, cex.mean=2, cex.obs=2) + if(n.map == 8) barplot.freq(100*fre4.NA, year.start, year.end, cex.y=2, cex.x=2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0)) + + # Sheet subtitles: + par(fig=c(map.xpos, map.xpos + map.width, 0.86, 0.90), new=TRUE) + if(n.map < 8) { + mtext(paste0(my.month.short[p], " --> ", my.month.short[forecast.month]), cex=5.5, font=2) + } else{ + mtext(paste0(rean.name," ", my.month.short[forecast.month]), cex=5.5, font=2) + } + + # % on Frequency plots: + par(fig=c(map.xpos - 0.006, map.xpos, map.ypos + 3.9*map.height + 3*map.ysep, map.ypos + 4*map.height + 3*map.ysep), new=TRUE) + mtext("%", cex=3.3) + par(fig=c(map.xpos - 0.006, map.xpos, map.ypos + 2.9*map.height + 2*map.ysep, map.ypos + 3*map.height + 2*map.ysep), new=TRUE) + mtext("%", cex=3.3) + par(fig=c(map.xpos - 0.006, map.xpos, map.ypos + 1.9*map.height + 1*map.ysep, map.ypos + 2*map.height + 1*map.ysep), new=TRUE) + mtext("%", cex=3.3) + par(fig=c(map.xpos - 0.006, map.xpos, map.ypos + 0.9*map.height + 0*map.ysep, map.ypos + 1*map.height + 0*map.ysep), new=TRUE) + mtext("%", cex=3.3) + + # mean frequency on Frequency plots: + par(fig=c(map.xpos + 0.02, map.xpos + 0.04, map.ypos + 3*map.height + 3*map.ysep, map.ypos + 3.1*map.height + 3*map.ysep), new=TRUE) + mtext(bquote(bar(nu) == .(paste0(round(mean(100*fre1.NA),1),"%"))),cex=3.5) + par(fig=c(map.xpos + 0.02, map.xpos + 0.04, map.ypos + 2*map.height + 2*map.ysep, map.ypos + 2.1*map.height + 2*map.ysep), new=TRUE) + mtext(bquote(bar(nu) == .(paste0(round(mean(100*fre2.NA),1),"%"))),cex=3.5) + par(fig=c(map.xpos + 0.02, map.xpos + 0.04, map.ypos + 1*map.height + 1*map.ysep, map.ypos + 1.1*map.height + 1*map.ysep), new=TRUE) + mtext(bquote(bar(nu) == .(paste0(round(mean(100*fre3.NA),1),"%"))),cex=3.5) + par(fig=c(map.xpos + 0.02, map.xpos + 0.04, map.ypos + 0*map.height + 0*map.ysep, map.ypos + 0.1*map.height + 0*map.ysep), new=TRUE) + mtext(bquote(bar(nu) == .(paste0(round(mean(100*fre4.NA),1),"%"))),cex=3.5) + + # add corr between obs.time series and ensemble mean time series on Frequency plots: + if(n.map < 8){ + par(fig=c(map.xpos + map.width - 0.04, map.xpos + map.width - 0.02, map.ypos + 3*map.height + 3*map.ysep, map.ypos + 3.1*map.height + 3*map.ysep), new=TRUE) + mtext(paste0("r= ",sp.cor1), cex=3.5) + par(fig=c(map.xpos + map.width - 0.04, map.xpos + map.width - 0.02, map.ypos + 2*map.height + 2*map.ysep, map.ypos + 2.1*map.height + 2*map.ysep), new=TRUE) + mtext(paste0("r= ",sp.cor2), cex=3.5) + par(fig=c(map.xpos + map.width - 0.04, map.xpos + map.width - 0.02, map.ypos + 1*map.height + 1*map.ysep, map.ypos + 1.1*map.height + 1*map.ysep), new=TRUE) + mtext(paste0("r= ",sp.cor3), cex=3.5) + par(fig=c(map.xpos + map.width - 0.04, map.xpos + map.width - 0.02, map.ypos + 0*map.height + 0*map.ysep, map.ypos + 0.1*map.height + 0*map.ysep), new=TRUE) + mtext(paste0("r= ",sp.cor4), cex=3.5) + } + } # close if on composition == 'fre' + + # save indicators for each startdate and forecast time: + # (fre1, fre2, etc. already refer to the regimes in the same order as listed in the 'orden' vector) + if(fields.name == forecast.name) save(orden, fre1.NA, fre2.NA, fre3.NA, fre4.NA, freObs1, freObs2, freObs3, freObs4, sp.cor1, sp.cor2, sp.cor3, sp.cor4, persist1, persist2, persist3, persist4, persistObs1, persistObs2, persistObs3, persistObs4, spat.cor1, spat.cor2, spat.cor3, spat.cor4, sd.freq.sim1, sd.freq.sim2, sd.freq.sim3, sd.freq.sim4, sd.freq.obs1, sd.freq.obs2, sd.freq.obs3, sd.freq.obs4, transSim1, transSim2, transSim3, transSim4, transObs1, transObs2, transObs3, transObs4, file=paste0(workdir,"/",fields.name,"_", my.period[p],"_leadmonth_",lead.month,"_","ClusterNames",".RData")) + print(paste("p =",p,"lead.month =", lead.month)) # spat.cor1,spat.cor2,spat.cor3,spat.cor4)) + } # close 'p' on 'period' + + if(as.pdf) dev.off() # for saving a single pdf with all months/seasons + + } # close 'lead.month' on 'lead.months' + + if(composition == "psl" || composition == "fre"){ + # Regime's names: + title1.xpos <- 0.01 + title1.width <- 0.04 + par(fig=c(title1.xpos, title1.xpos + title1.width, 0.7905, 0.7955), new=TRUE) + mtext(paste0(regime1.name), font=2, cex=5) + par(fig=c(title1.xpos, title1.xpos + title1.width, 0.5855, 0.5905), new=TRUE) + mtext(paste0(regime2.name), font=2, cex=5) + par(fig=c(title1.xpos, title1.xpos + title1.width, 0.3805, 0.3955), new=TRUE) + mtext(paste0(regime3.name), font=2, cex=5) + par(fig=c(title1.xpos, title1.xpos + title1.width, 0.1755, 0.1805), new=TRUE) + mtext(paste0(regime4.name), font=2, cex=5) + + if(composition == "psl") { + # Color legend: + legend1.xpos <- 0.10 + legend1.width <- 0.80 + legend1.cex <- 4 + + par(fig=c(legend1.xpos, legend1.xpos + legend1.width, 0, 0.065), new=TRUE) # syntax fig=c(xmin, xmax, ymin, ymax) + ColorBar2(brks=my.brks, cols=my.cols, vert=FALSE, cex=legend1.cex, label.dist=2, my.ticks=-0.5 + 1:21, my.labels=my.brks) + par(fig=c(legend1.xpos + legend1.width - 0.02, legend1.xpos + legend1.width + 0.05, 0, 0.02), new=TRUE) # syntax fig=c(xmin, xmax, ymin, ymax) + mtext("hPa", cex=legend1.cex*1.3) + } + + dev.off() + } # close if on composition + +} # close if on composition != "summary" + +# Summary graphs: +if(composition == "summary" && fields.name == forecast.name){ + # array storing correlations in the format: [startdate, leadmonth, regime] + array.diff.sd.freq <- array.sd.freq <- array.cor <- array.sp.cor <- array.freq <- array.diff.freq <- array.rpss <- array.pers <- array.diff.pers <- array(NA,c(12,7,4)) + array.sp.cor <- array.trans.sim1 <- array.trans.sim2 <- array.trans.sim3 <- array.trans.sim4 <- array(NA,c(12,7,4)) + array.trans.diff1 <- array.trans.diff2 <- array.trans.diff3 <- array.trans.diff4 <- array(NA,c(12,7,4)) + + for(p in 1:12){ + p.orig <- p + + for(l in 0:6){ + + load(file=paste0(workdir,"/",fields.name,"_",my.period[p],"_leadmonth_",l,"_","ClusterNames",".RData")) # load cluster names and summarized data + + if(var.num != var.num.orig) var.num <- var.num.orig # ripristinate original values of this script (necessary after loading regime data) + if(fields.name != fields.name.orig) fields.name <- fields.name.orig # ripristinate original values of this script + if(p != p.orig) p <- p.orig + + array.sp.cor[p,1+l, 1] <- spat.cor1 # associates NAO+ to the first triangle (at left) + array.sp.cor[p,1+l, 3] <- spat.cor2 # associates NAO- to the third triangle (at right) + array.sp.cor[p,1+l, 4] <- spat.cor3 # associates Blocking to the fourth triangle (top one) + array.sp.cor[p,1+l, 2] <- spat.cor4 # associates Atl.Ridge to the second triangle (bottom one) + + array.cor[p,1+l, 1] <- sp.cor1 # associates NAO+ to the first triangle (at left) + array.cor[p,1+l, 3] <- sp.cor2 # associates NAO- to the third triangle (at right) + array.cor[p,1+l, 4] <- sp.cor3 # associates Blocking to the fourth triangle (top one) + array.cor[p,1+l, 2] <- sp.cor4 # associates Atl.Ridge to the second triangle (bottom one) + + array.freq[p,1+l, 1] <- 100*(mean(fre1.NA)) # NAO+ + array.freq[p,1+l, 3] <- 100*(mean(fre2.NA)) # NAO- + array.freq[p,1+l, 4] <- 100*(mean(fre3.NA)) # Blocking + array.freq[p,1+l, 2] <- 100*(mean(fre4.NA)) # Atl.Ridge + + array.diff.freq[p,1+l, 1] <- 100*(mean(fre1.NA) - mean(freObs1)) # NAO+ + array.diff.freq[p,1+l, 3] <- 100*(mean(fre2.NA) - mean(freObs2)) # NAO- + array.diff.freq[p,1+l, 4] <- 100*(mean(fre3.NA) - mean(freObs3)) # Blocking + array.diff.freq[p,1+l, 2] <- 100*(mean(fre4.NA) - mean(freObs4)) # Atl.Ridge + + array.pers[p,1+l, 1] <- persist1 # NAO+ + array.pers[p,1+l, 3] <- persist2 # NAO- + array.pers[p,1+l, 4] <- persist3 # Blocking + array.pers[p,1+l, 2] <- persist4 # Atl.Ridge + + array.diff.pers[p,1+l, 1] <- persist1 - persistObs1 # NAO+ + array.diff.pers[p,1+l, 3] <- persist2 - persistObs2 # NAO- + array.diff.pers[p,1+l, 4] <- persist3 - persistObs3 # Blocking + array.diff.pers[p,1+l, 2] <- persist4 - persistObs4 # Atl.Ridge + + array.pers[p,1+l, 1] <- persist1 # NAO+ + array.pers[p,1+l, 3] <- persist2 # NAO- + array.pers[p,1+l, 4] <- persist3 # Blocking + array.pers[p,1+l, 2] <- persist4 # Atl.Ridge + + array.sd.freq[p,1+l, 1] <- sd.freq.sim1 # NAO+ + array.sd.freq[p,1+l, 3] <- sd.freq.sim2 # NAO- + array.sd.freq[p,1+l, 4] <- sd.freq.sim3 # Blocking + array.sd.freq[p,1+l, 2] <- sd.freq.sim4 # Atl.Ridge + + array.diff.sd.freq[p,1+l, 1] <- sd.freq.sim1 / sd.freq.obs1 # NAO+ + array.diff.sd.freq[p,1+l, 3] <- sd.freq.sim2 / sd.freq.obs2 # NAO- + array.diff.sd.freq[p,1+l, 4] <- sd.freq.sim3 / sd.freq.obs3 # Blocking + array.diff.sd.freq[p,1+l, 2] <- sd.freq.sim4 / sd.freq.obs4 # Atl.Ridge + + array.trans.sim1[p,1+l, 1] <- NA # Transition from NAO+ to NAO+ + array.trans.sim1[p,1+l, 3] <- 100*transSim1[2] # Transition from NAO+ to NAO- + array.trans.sim1[p,1+l, 4] <- 100*transSim1[3] # Transition from NAO+ to blocking + array.trans.sim1[p,1+l, 2] <- 100*transSim1[4] # Transition from NAO+ to Atl.Ridge + + array.trans.sim2[p,1+l, 1] <- 100*transSim2[1] # Transition from NAO- to NAO+ + array.trans.sim2[p,1+l, 3] <- NA # Transition from NAO- to NAO- + array.trans.sim2[p,1+l, 4] <- 100*transSim2[3] # Transition from NAO- to blocking + array.trans.sim2[p,1+l, 2] <- 100*transSim2[4] # Transition from NAO- to Atl.Ridge + + array.trans.sim3[p,1+l, 1] <- 100*transSim3[1] # Transition from blocking to NAO+ + array.trans.sim3[p,1+l, 3] <- 100*transSim3[2] # Transition from blocking to NAO- + array.trans.sim3[p,1+l, 4] <- NA # Transition from blocking to blocking + array.trans.sim3[p,1+l, 2] <- 100*transSim3[4] # Transition from blocking to Atl.Ridge + + array.trans.sim4[p,1+l, 1] <- 100*transSim4[1] # Transition from Atl.ridge to NAO+ + array.trans.sim4[p,1+l, 3] <- 100*transSim4[2] # Transition from Atl.ridge to NAO- + array.trans.sim4[p,1+l, 4] <- 100*transSim4[3] # Transition from Atl.ridge to blocking + array.trans.sim4[p,1+l, 2] <- NA # Transition from Atl.ridge to Atl.Ridge + + array.trans.diff1[p,1+l, 1] <- NA # Transition from NAO+ to NAO+ + array.trans.diff1[p,1+l, 3] <- 100*(transSim1[2] - transObs1[2]) # Transition from NAO+ to NAO- + array.trans.diff1[p,1+l, 4] <- 100*(transSim1[3] - transObs1[3]) # Transition from NAO+ to blocking + array.trans.diff1[p,1+l, 2] <- 100*(transSim1[4] - transObs1[4]) # Transition from NAO+ to Atl.Ridge + + array.trans.diff2[p,1+l, 1] <- 100*(transSim2[1] - transObs2[1]) # Transition from NAO+ to NAO+ + array.trans.diff2[p,1+l, 3] <- NA + array.trans.diff2[p,1+l, 4] <- 100*(transSim2[3] - transObs2[3]) # Transition from NAO+ to blocking + array.trans.diff2[p,1+l, 2] <- 100*(transSim2[4] - transObs2[4]) # Transition from NAO+ to Atl.Ridge + + array.trans.diff3[p,1+l, 1] <- 100*(transSim3[1] - transObs3[1]) # Transition from NAO+ to NAO+ + array.trans.diff3[p,1+l, 3] <- 100*(transSim3[2] - transObs3[2]) # Transition from NAO+ to NAO- + array.trans.diff3[p,1+l, 4] <- NA + array.trans.diff3[p,1+l, 2] <- 100*(transSim3[4] - transObs3[4]) # Transition from NAO+ to Atl.Ridge + + array.trans.diff4[p,1+l, 1] <- 100*(transSim4[1] - transObs4[1]) # Transition from NAO+ to NAO+ + array.trans.diff4[p,1+l, 3] <- 100*(transSim4[2] - transObs4[2]) # Transition from NAO+ to NAO- + array.trans.diff4[p,1+l, 4] <- 100*(transSim4[3] - transObs4[3]) # Transition from NAO+ to blocking + array.trans.diff4[p,1+l, 2] <- NA + + #array.rpss[p,1+l, 1] <- # NAO+ + #array.rpss[p,1+l, 3] <- # NAO- + #array.rpss[p,1+l, 4] <- # Blocking + #array.rpss[p,1+l, 2] <- # Atl.Ridge + } + } + + # Spatial Corr summary: + png(filename=paste0(workdir,"/",forecast.name,"_summary_sp_corr.png"), width=550, height=400) + + # create an array similar to array.cor but with colors instead of corr.values: + sp.corr.cols <- rev(c('#b2182b','#d6604d','#f4a582','#fddbc7','#d1e5f0','#92c5de','#4393c3','#2166ac')) + my.seq <- c(-1,0,0.4,0.5,0.6,0.7,0.8,0.9,1) + + array.sp.cor.colors <- array(sp.corr.cols[8],c(12,7,4)) + array.sp.cor.colors[array.sp.cor < my.seq[8]] <- sp.corr.cols[7] + array.sp.cor.colors[array.sp.cor < my.seq[7]] <- sp.corr.cols[6] + array.sp.cor.colors[array.sp.cor < my.seq[6]] <- sp.corr.cols[5] + array.sp.cor.colors[array.sp.cor < my.seq[5]] <- sp.corr.cols[4] + array.sp.cor.colors[array.sp.cor < my.seq[4]] <- sp.corr.cols[3] + array.sp.cor.colors[array.sp.cor < my.seq[3]] <- sp.corr.cols[2] + array.sp.cor.colors[array.sp.cor < my.seq[2]] <- sp.corr.cols[1] + + par(mar=c(5,4,4,4.5)) + plot(1,1, type="n", xaxt="n", yaxt="n", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + title("Spatial correlation between S4 and ERA-Interim regime anomalies", cex=1.5) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + + for(p in 1:12){ + for(l in 0:6){ + polygon(c(0.5+p-1, 0.5+p-1, 1+p-1), c(-0.5+l, 0.5+l, 0+l), col=array.sp.cor.colors[p,1+l,1]) # first triangle + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(-0.5+l, 0+l, -0.5+l), col=array.sp.cor.colors[p,1+l,2]) + polygon(c(1+p-1, 1.5+p-1, 1.5+p-1), c(l, 0.5+l, -0.5+l), col=array.sp.cor.colors[p,1+l,3]) + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(0.5+l, 0+l, 0.5+l), col=array.sp.cor.colors[p,1+l,4]) + } + } + + par(fig=c(0.875, 1, 0.14, 0.89), new=TRUE) + ColorBar2(brks = my.seq, cols = sp.corr.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + dev.off() + + + png(filename=paste0(workdir,"/",forecast.name,"_summary_sp_corr_4tables.png"), width=750, height=600) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext(text="Spatial correlation between S4 and ERA-Interim regime anomalies", cex=1.5) + + # NAO+ : + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.85, 0.98), new=TRUE) + mtext("NAO+", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.sp.cor.colors[p,1+l,1])}} + + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.42, 0.5), new=TRUE) + mtext("Blocking", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.sp.cor.colors[p,1+l,4])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.85, 0.98), new=TRUE) + mtext("NAO-", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.sp.cor.colors[p,1+l,3])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.42, 0.5), new=TRUE) + mtext("Atlantic Ridge", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.sp.cor.colors[p,1+l,2])}} + + par(fig=c(0.91, 1, 0.1, 0.9), new=TRUE) + ColorBar2(brks = my.seq, cols = sp.corr.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + png(filename=paste0(workdir,"/",forecast.name,"_summary_sp_corr_1table.png"), width=800, height=300) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext(text="Spatial correlation between S4 and ERA-Interim regime anomalies", cex=1.2) + + par(mar=c(0,0,4,0), fig=c(0.07, 0.22, 0.8, 0.98), new=TRUE) + mtext("NAO+", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0, 0.22, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecast month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.sp.cor.colors[i2,1+6-j,1])}} + + par(mar=c(0,0,4,0), fig=c(0.22, 0.44, 0.8, 0.98), new=TRUE) + mtext("NAO-", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.22, 0.44, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecasted month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.sp.cor.colors[i2,1+6-j,3])}} + + par(mar=c(0,0,4,0), fig=c(0.44, 0.66, 0.8, 0.98), new=TRUE) + mtext("Blocking", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.44, 0.66, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecasted month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.sp.cor.colors[i2,1+6-j,4])}} + + par(mar=c(0,0,4,0), fig=c(0.68, 0.88, 0.8, 0.98), new=TRUE) + mtext("Atlantic Ridge", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.66, 0.88, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecasted month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.sp.cor.colors[i2,1+6-j,2])}} + + par(fig=c(0.91, 1, 0.1, 0.8), new=TRUE) + ColorBar2(brks = my.seq, cols = sp.corr.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + + + + + + + + # Corr summary: + png(filename=paste0(workdir,"/",forecast.name,"_summary_corr.png"), width=550, height=400) + + # create an array similar to array.cor but with colors instead of corr.values: + corr.cols <- rev(c('#b2182b','#d6604d','#f4a582','#fddbc7','#d1e5f0','#92c5de','#4393c3','#2166ac')) + + array.cor.colors <- array(corr.cols[8],c(12,7,4)) + array.cor.colors[array.cor < 0.75] <- corr.cols[7] + array.cor.colors[array.cor < 0.5] <- corr.cols[6] + array.cor.colors[array.cor < 0.25] <- corr.cols[5] + array.cor.colors[array.cor < 0] <- corr.cols[4] + array.cor.colors[array.cor < -0.25] <- corr.cols[3] + array.cor.colors[array.cor < -0.5] <- corr.cols[2] + array.cor.colors[array.cor < -0.75] <- corr.cols[1] + + par(mar=c(5,4,4,4.5)) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Forecast time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + title("Correlation between S4 and ERA-Interim frequencies", cex=1.5) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + + for(p in 1:12){ + for(l in 0:6){ + polygon(c(0.5+p-1,0.5+p-1,1+p-1),c(-0.5+l,0.5+l,0+l), col=array.cor.colors[p,1+l,1]) # first triangle + polygon(c(0.5+p-1,1+p-1,1.5+p-1),c(-0.5+l,0+l,-0.5+l), col=array.cor.colors[p,1+l,2]) + polygon(c(1+p-1,1.5+p-1,1.5+p-1),c(l,0.5+l,-0.5+l), col=array.cor.colors[p,1+l,3]) + polygon(c(0.5+p-1,1+p-1,1.5+p-1),c(0.5+l,0+l,0.5+l), col=array.cor.colors[p,1+l,4]) + } + } + + par(fig=c(0.875, 1, 0.14, 0.89), new=TRUE) + ColorBar2(brks = seq(-1,1,0.25), cols = corr.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(seq(-1,1,0.25)), my.labels=seq(-1,1,0.25)) + dev.off() + + png(filename=paste0(workdir,"/",forecast.name,"_summary_corr_4tables.png"), width=750, height=600) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext(text="Correlation between S4 and ERA-Interim frequencies", cex=1.5) + + # NAO+ : + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.85, 0.98), new=TRUE) + mtext("NAO+", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.cor.colors[p,1+l,1])}} + + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.42, 0.5), new=TRUE) + mtext("Blocking", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.cor.colors[p,1+l,4])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.85, 0.98), new=TRUE) + mtext("NAO-", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.cor.colors[p,1+l,3])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.42, 0.5), new=TRUE) + mtext("Atlantic Ridge", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.cor.colors[p,1+l,2])}} + + par(fig=c(0.91, 1, 0.1, 0.9), new=TRUE) + ColorBar2(brks = my.seq, cols = corr.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + png(filename=paste0(workdir,"/",forecast.name,"_summary_corr_1table.png"), width=800, height=300) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext(text="Correlation between S4 and ERA-Interim frequencies", cex=1.2) + + par(mar=c(0,0,4,0), fig=c(0.07, 0.22, 0.8, 0.98), new=TRUE) + mtext("NAO+", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0, 0.22, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecast month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.cor.colors[i2,1+6-j,1])}} + + par(mar=c(0,0,4,0), fig=c(0.22, 0.44, 0.8, 0.98), new=TRUE) + mtext("NAO-", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.22, 0.44, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecasted month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.cor.colors[i2,1+6-j,3])}} + + par(mar=c(0,0,4,0), fig=c(0.44, 0.66, 0.8, 0.98), new=TRUE) + mtext("Blocking", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.44, 0.66, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecasted month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.cor.colors[i2,1+6-j,4])}} + + par(mar=c(0,0,4,0), fig=c(0.68, 0.88, 0.8, 0.98), new=TRUE) + mtext("Atlantic Ridge", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.66, 0.88, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecasted month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.cor.colors[i2,1+6-j,2])}} + + par(fig=c(0.91, 1, 0.1, 0.8), new=TRUE) + ColorBar2(brks = my.seq, cols = corr.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + + + + + + # Freq. summary: + png(filename=paste0(workdir,"/",forecast.name,"_summary_freq.png"), width=550, height=400) + + # create an array similar to array.diff.freq but with colors instead of frequencies: + freq.cols <- rev(c('#d73027','#f46d43','#fdae61','#fee090','#e0f3f8','#abd9e9','#74add1','#4575b4')) + my.seq <- c(NA,20,22,24,26,28,30,32,NA) + + array.freq.colors <- array(freq.cols[8],c(12,7,4)) + array.freq.colors[array.freq < my.seq[[8]]] <- freq.cols[7] + array.freq.colors[array.freq < my.seq[[7]]] <- freq.cols[6] + array.freq.colors[array.freq < my.seq[[6]]] <- freq.cols[5] + array.freq.colors[array.freq < my.seq[[5]]] <- freq.cols[4] + array.freq.colors[array.freq < my.seq[[4]]] <- freq.cols[3] + array.freq.colors[array.freq < my.seq[[3]]] <- freq.cols[2] + array.freq.colors[array.freq < my.seq[[2]]] <- freq.cols[1] + + par(mar=c(5,4,4,4.5)) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Forecast time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + title("Mean S4 frequency (%)", cex=1.5) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + + for(p in 1:12){ + for(l in 0:6){ + polygon(c(0.5+p-1, 0.5+p-1, 1+p-1), c(-0.5+l, 0.5+l, 0+l), col=array.freq.colors[p,1+l,1]) # first triangle + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(-0.5+l, 0+l, -0.5+l), col=array.freq.colors[p,1+l,2]) + polygon(c(1+p-1, 1.5+p-1, 1.5+p-1), c(l, 0.5+l, -0.5+l), col=array.freq.colors[p,1+l,3]) + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(0.5+l, 0+l, 0.5+l), col=array.freq.colors[p,1+l,4]) + } + } + + par(fig=c(0.875, 1, 0.14, 0.89), new=TRUE) + ColorBar2(brks = my.seq, cols = freq.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + dev.off() + + png(filename=paste0(workdir,"/",forecast.name,"_summary_freq_4tables.png"), width=750, height=600) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext(text="Mean S4 frequency (%)", cex=1.5) + + # NAO+ : + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.85, 0.98), new=TRUE) + mtext("NAO+", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.freq.colors[p,1+l,1])}} + + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.42, 0.5), new=TRUE) + mtext("Blocking", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.freq.colors[p,1+l,4])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.85, 0.98), new=TRUE) + mtext("NAO-", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.freq.colors[p,1+l,3])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.42, 0.5), new=TRUE) + mtext("Atlantic Ridge", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.freq.colors[p,1+l,2])}} + + par(fig=c(0.91, 1, 0.1, 0.9), new=TRUE) + ColorBar2(brks = my.seq, cols = freq.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + png(filename=paste0(workdir,"/",forecast.name,"_summary_freq_1table.png"), width=800, height=300) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext(text="Mean S4 frequency (%)", cex=1.2) + + par(mar=c(0,0,4,0), fig=c(0.07, 0.22, 0.8, 0.98), new=TRUE) + mtext("NAO+", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0, 0.22, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecast month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.freq.colors[i2,1+6-j,1])}} + + par(mar=c(0,0,4,0), fig=c(0.22, 0.44, 0.8, 0.98), new=TRUE) + mtext("NAO-", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.22, 0.44, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecasted month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.freq.colors[i2,1+6-j,3])}} + + par(mar=c(0,0,4,0), fig=c(0.44, 0.66, 0.8, 0.98), new=TRUE) + mtext("Blocking", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.44, 0.66, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecasted month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.freq.colors[i2,1+6-j,4])}} + + par(mar=c(0,0,4,0), fig=c(0.68, 0.88, 0.8, 0.98), new=TRUE) + mtext("Atlantic Ridge", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.66, 0.88, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecasted month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.freq.colors[i2,1+6-j,2])}} + + par(fig=c(0.91, 1, 0.1, 0.8), new=TRUE) + ColorBar2(brks = my.seq, cols = freq.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + + + + + + + # Delta freq. summary: + png(filename=paste0(workdir,"/",forecast.name,"_summary_diff_freq.png"), width=550, height=400) + + # create an array similar to array.diff.freq but with colors instead of frequencies: + diff.freq.cols <- rev(c('#d73027','#f46d43','#fdae61','#fee090','#e0f3f8','#abd9e9','#74add1','#4575b4')) + my.seq <- c(-20,-10,-5,-2,0,2,5,10,20) + + array.diff.freq.colors <- array(diff.freq.cols[8],c(12,7,4)) + array.diff.freq.colors[array.diff.freq < my.seq[[8]]] <- diff.freq.cols[7] + array.diff.freq.colors[array.diff.freq 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.diff.freq.colors[i2,1+6-j,1])}} + + par(mar=c(0,0,4,0), fig=c(0.22, 0.44, 0.8, 0.98), new=TRUE) + mtext("NAO-", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.22, 0.44, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecasted month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.diff.freq.colors[i2,1+6-j,3])}} + + par(mar=c(0,0,4,0), fig=c(0.44, 0.66, 0.8, 0.98), new=TRUE) + mtext("Blocking", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.44, 0.66, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecasted month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.diff.freq.colors[i2,1+6-j,4])}} + + par(mar=c(0,0,4,0), fig=c(0.68, 0.88, 0.8, 0.98), new=TRUE) + mtext("Atlantic Ridge", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.66, 0.88, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecasted month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.diff.freq.colors[i2,1+6-j,2])}} + + par(fig=c(0.91, 1, 0.1, 0.8), new=TRUE) + ColorBar2(brks = my.seq, cols = diff.freq.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + + + + + + + + # Persistence summary: + png(filename=paste0(workdir,"/",forecast.name,"_summary_pers.png"), width=550, height=400) + + # create an array similar to array.pers but with colors instead of frequencies: + pers.cols <- rev(c('#b2182b','#d6604d','#f4a582','#fddbc7','#d1e5f0','#92c5de','#4393c3','#2166ac')) + my.seq <- c(NA, 3.25, 3.5, 3.75, 4, 4.25, 4.5, 4.75, NA) + + array.pers.colors <- array(pers.cols[8],c(12,7,4)) + array.pers.colors[array.pers < my.seq[8]] <- pers.cols[7] + array.pers.colors[array.pers < my.seq[7]] <- pers.cols[6] + array.pers.colors[array.pers < my.seq[6]] <- pers.cols[5] + array.pers.colors[array.pers < my.seq[5]] <- pers.cols[4] + array.pers.colors[array.pers < my.seq[4]] <- pers.cols[3] + array.pers.colors[array.pers < my.seq[3]] <- pers.cols[2] + array.pers.colors[array.pers < my.seq[2]] <- pers.cols[1] + + par(mar=c(5,4,4,4.5)) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Forecast time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + title("S4 Regime persistence (in days/month)", cex=1.5) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + + for(p in 1:12){ + for(l in 0:6){ + polygon(c(0.5+p-1,0.5+p-1,1+p-1),c(-0.5+l,0.5+l,0+l), col=array.pers.colors[p,1+l,1]) # first triangle + polygon(c(0.5+p-1,1+p-1,1.5+p-1),c(-0.5+l,0+l,-0.5+l), col=array.pers.colors[p,1+l,2]) + polygon(c(1+p-1,1.5+p-1,1.5+p-1),c(l,0.5+l,-0.5+l), col=array.pers.colors[p,1+l,3]) + polygon(c(0.5+p-1,1+p-1,1.5+p-1),c(0.5+l,0+l,0.5+l), col=array.pers.colors[p,1+l,4]) + } + } + + par(fig=c(0.875, 1, 0.14, 0.89), new=TRUE) + ColorBar2(brks = my.seq, cols = pers.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + dev.off() + + png(filename=paste0(workdir,"/",forecast.name,"_summary_pers_4tables.png"), width=750, height=600) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("S4 Regime persistence (in days/month)", cex=1.5) + + # NAO+ : + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.85, 0.98), new=TRUE) + mtext("NAO+", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.pers.colors[p,1+l,1])}} + + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.42, 0.5), new=TRUE) + mtext("Blocking", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.pers.colors[p,1+l,4])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.85, 0.98), new=TRUE) + mtext("NAO-", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.pers.colors[p,1+l,3])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.42, 0.5), new=TRUE) + mtext("Atlantic Ridge", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.pers.colors[p,1+l,2])}} + + par(fig=c(0.91, 1, 0.1, 0.9), new=TRUE) + ColorBar2(brks = my.seq, cols = pers.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + png(filename=paste0(workdir,"/",forecast.name,"_summary_pers_4tables.png"), width=750, height=600) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("S4 Regime persistence (in days/month)", cex=1.5) + + # NAO+ : + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.85, 0.98), new=TRUE) + mtext("NAO+", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.pers.colors[p,1+l,1])}} + + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.42, 0.5), new=TRUE) + mtext("Blocking", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.pers.colors[p,1+l,4])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.85, 0.98), new=TRUE) + mtext("NAO-", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.pers.colors[p,1+l,3])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.42, 0.5), new=TRUE) + mtext("Atlantic Ridge", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.pers.colors[p,1+l,2])}} + + par(fig=c(0.91, 1, 0.1, 0.9), new=TRUE) + ColorBar2(brks = my.seq, cols = pers.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + png(filename=paste0(workdir,"/",forecast.name,"_summary_pers_1table.png"), width=800, height=300) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("S4 Regime persistence (in days/month)", cex=1.2) + + par(mar=c(0,0,4,0), fig=c(0.07, 0.22, 0.8, 0.98), new=TRUE) + mtext("NAO+", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0, 0.22, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecast month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.pers.colors[i2,1+6-j,1])}} + + par(mar=c(0,0,4,0), fig=c(0.22, 0.44, 0.8, 0.98), new=TRUE) + mtext("NAO-", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.22, 0.44, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecasted month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.pers.colors[i2,1+6-j,3])}} + + par(mar=c(0,0,4,0), fig=c(0.44, 0.66, 0.8, 0.98), new=TRUE) + mtext("Blocking", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.44, 0.66, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecasted month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.pers.colors[i2,1+6-j,4])}} + + par(mar=c(0,0,4,0), fig=c(0.68, 0.88, 0.8, 0.98), new=TRUE) + mtext("Atlantic Ridge", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.66, 0.88, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecasted month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.pers.colors[i2,1+6-j,2])}} + + par(fig=c(0.91, 1, 0.1, 0.8), new=TRUE) + ColorBar2(brks = my.seq, cols = pers.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + + + + + + + + # Delta persistence summary: + png(filename=paste0(workdir,"/",forecast.name,"_summary_diff_pers.png"), width=550, height=400) + + # create an array similar to array.pers but with colors instead of frequencies: + diff.pers.cols <- rev(c('#b2182b','#d6604d','#f4a582','#fddbc7','#d1e5f0','#92c5de','#4393c3','#2166ac')) + my.seq <- c(NA, -1.5, -1, -0.5, 0, 0.5, 1, 1.5, NA) + + array.diff.pers.colors <- array(diff.pers.cols[8],c(12,7,4)) + array.diff.pers.colors[array.diff.pers < my.seq[8]] <- diff.pers.cols[7] + array.diff.pers.colors[array.diff.pers < my.seq[7]] <- diff.pers.cols[6] + array.diff.pers.colors[array.diff.pers < my.seq[6]] <- diff.pers.cols[5] + array.diff.pers.colors[array.diff.pers < my.seq[5]] <- diff.pers.cols[4] + array.diff.pers.colors[array.diff.pers < my.seq[4]] <- diff.pers.cols[3] + array.diff.pers.colors[array.diff.pers < my.seq[3]] <- diff.pers.cols[2] + array.diff.pers.colors[array.diff.pers < my.seq[2]] <- diff.pers.cols[1] + + par(mar=c(5,4,4,4.5)) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Forecast time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + title("Diff. between S4 and ERA-Interim regime persistence (in days/month)", cex=1.5) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + + for(p in 1:12){ + for(l in 0:6){ + polygon(c(0.5+p-1,0.5+p-1,1+p-1),c(-0.5+l,0.5+l,0+l), col=array.diff.pers.colors[p,1+l,1]) # first triangle + polygon(c(0.5+p-1,1+p-1,1.5+p-1),c(-0.5+l,0+l,-0.5+l), col=array.diff.pers.colors[p,1+l,2]) + polygon(c(1+p-1,1.5+p-1,1.5+p-1),c(l,0.5+l,-0.5+l), col=array.diff.pers.colors[p,1+l,3]) + polygon(c(0.5+p-1,1+p-1,1.5+p-1),c(0.5+l,0+l,0.5+l), col=array.diff.pers.colors[p,1+l,4]) + } + } + + par(fig=c(0.875, 1, 0.14, 0.89), new=TRUE) + ColorBar2(brks = my.seq, cols = diff.pers.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + dev.off() + + png(filename=paste0(workdir,"/",forecast.name,"_summary_diff_pers_4tables.png"), width=750, height=600) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("Diff. between S4 and ERA-Interim regime persistence (in days/month)", cex=1.5) + + # NAO+ : + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.85, 0.98), new=TRUE) + mtext("NAO+", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.diff.pers.colors[p,1+l,1])}} + + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.42, 0.5), new=TRUE) + mtext("Blocking", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.diff.pers.colors[p,1+l,4])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.85, 0.98), new=TRUE) + mtext("NAO-", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.diff.pers.colors[p,1+l,3])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.42, 0.5), new=TRUE) + mtext("Atlantic Ridge", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.diff.pers.colors[p,1+l,2])}} + + par(fig=c(0.91, 1, 0.1, 0.9), new=TRUE) + ColorBar2(brks = my.seq, cols = diff.pers.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + png(filename=paste0(workdir,"/",forecast.name,"_summary_diff_pers_1table.png"), width=800, height=300) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("Diff. between S4 and ERA-Interim regime persistence (in days/month)", cex=1.2) + + par(mar=c(0,0,4,0), fig=c(0.07, 0.22, 0.8, 0.98), new=TRUE) + mtext("NAO+", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0, 0.22, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecast month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.diff.pers.colors[i2,1+6-j,1])}} + + par(mar=c(0,0,4,0), fig=c(0.22, 0.44, 0.8, 0.98), new=TRUE) + mtext("NAO-", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.22, 0.44, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecasted month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.diff.pers.colors[i2,1+6-j,3])}} + + par(mar=c(0,0,4,0), fig=c(0.44, 0.66, 0.8, 0.98), new=TRUE) + mtext("Blocking", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.44, 0.66, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecasted month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.diff.pers.colors[i2,1+6-j,4])}} + + par(mar=c(0,0,4,0), fig=c(0.68, 0.88, 0.8, 0.98), new=TRUE) + mtext("Atlantic Ridge", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.66, 0.88, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecasted month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.diff.pers.colors[i2,1+6-j,2])}} + + par(fig=c(0.91, 1, 0.1, 0.8), new=TRUE) + ColorBar2(brks = my.seq, cols = diff.pers.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + + + + + + + + + # St.Dev.freq ratio summary: + png(filename=paste0(workdir,"/",forecast.name,"_summary_ratio_sd_freq.png"), width=550, height=400) + + # create an array similar to array.cor but with colors instead of corr.values: + diff.sd.freq.cols <- rev(c('#b2182b','#d6604d','#f4a582','#fddbc7','#d1e5f0','#92c5de','#4393c3','#2166ac')) + my.seq <- c(0,0.7,0.8,0.9,1.0,1.1,1.2,1.3,2) + + array.diff.sd.freq.colors <- array(diff.sd.freq.cols[8],c(12,7,4)) + array.diff.sd.freq.colors[array.diff.sd.freq < my.seq[8]] <- diff.sd.freq.cols[7] + array.diff.sd.freq.colors[array.diff.sd.freq < my.seq[7]] <- diff.sd.freq.cols[6] + array.diff.sd.freq.colors[array.diff.sd.freq < my.seq[6]] <- diff.sd.freq.cols[5] + array.diff.sd.freq.colors[array.diff.sd.freq < my.seq[5]] <- diff.sd.freq.cols[4] + array.diff.sd.freq.colors[array.diff.sd.freq < my.seq[4]] <- diff.sd.freq.cols[3] + array.diff.sd.freq.colors[array.diff.sd.freq < my.seq[3]] <- diff.sd.freq.cols[2] + array.diff.sd.freq.colors[array.diff.sd.freq < my.seq[2]] <- diff.sd.freq.cols[1] + + par(mar=c(5,4,4,4.5)) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Forecast time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + title("St.Dev. ratio between sim. and obs. average frequencies", cex=1.5) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + + for(p in 1:12){ + for(l in 0:6){ + polygon(c(0.5+p-1, 0.5+p-1, 1+p-1), c(-0.5+l, 0.5+l, 0+l), col=array.diff.sd.freq.colors[p,1+l,1]) # first triangle + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(-0.5+l, 0+l, -0.5+l), col=array.diff.sd.freq.colors[p,1+l,2]) + polygon(c(1+p-1, 1.5+p-1, 1.5+p-1), c(l, 0.5+l, -0.5+l), col=array.diff.sd.freq.colors[p,1+l,3]) + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(0.5+l, 0+l, 0.5+l), col=array.diff.sd.freq.colors[p,1+l,4]) + } + } + + par(fig=c(0.875, 1, 0.14, 0.89), new=TRUE) + ColorBar2(brks = my.seq, cols = diff.sd.freq.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + dev.off() + + + png(filename=paste0(workdir,"/",forecast.name,"_summary_ratio_sd_freq_4tables.png"), width=750, height=600) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("St.Dev. ratio between sim. and obs. average frequencies", cex=1.5) + + # NAO+ : + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.85, 0.98), new=TRUE) + mtext("NAO+", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.diff.sd.freq.colors[p,1+l,1])}} + + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.42, 0.5), new=TRUE) + mtext("Blocking", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.diff.sd.freq.colors[p,1+l,4])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.85, 0.98), new=TRUE) + mtext("NAO-", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.diff.sd.freq.colors[p,1+l,3])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.42, 0.5), new=TRUE) + mtext("Atlantic Ridge", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.diff.sd.freq.colors[p,1+l,2])}} + + par(fig=c(0.91, 1, 0.1, 0.9), new=TRUE) + ColorBar2(brks = my.seq, cols = diff.sd.freq.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + png(filename=paste0(workdir,"/",forecast.name,"_summary_ratio_sd_freq_1table.png"), width=800, height=300) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("St.Dev. ratio between sim. and obs. average frequencies", cex=1.2) + + par(mar=c(0,0,4,0), fig=c(0.07, 0.22, 0.8, 0.98), new=TRUE) + mtext("NAO+", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0, 0.22, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecast month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.diff.sd.freq.colors[i2,1+6-j,1])}} + + par(mar=c(0,0,4,0), fig=c(0.22, 0.44, 0.8, 0.98), new=TRUE) + mtext("NAO-", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.22, 0.44, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecasted month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.diff.sd.freq.colors[i2,1+6-j,3])}} + + par(mar=c(0,0,4,0), fig=c(0.44, 0.66, 0.8, 0.98), new=TRUE) + mtext("Blocking", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.44, 0.66, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecasted month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.diff.sd.freq.colors[i2,1+6-j,4])}} + + par(mar=c(0,0,4,0), fig=c(0.68, 0.88, 0.8, 0.98), new=TRUE) + mtext("Atlantic Ridge", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.66, 0.88, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecasted month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.diff.sd.freq.colors[i2,1+6-j,2])}} + + par(fig=c(0.91, 1, 0.1, 0.8), new=TRUE) + ColorBar2(brks = my.seq, cols = diff.sd.freq.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + + + + + + + + + + + # Transition probability from NAO+ to X summary: + png(filename=paste0(workdir,"/",forecast.name,"_summary_trans_NAO+.png"), width=550, height=400) + + # create an array similar to array.cor but with colors instead of corr.values: + trans.cols <- rev(c('#b2182b','#d6604d','#f4a582','#fddbc7','#d1e5f0','#92c5de','#4393c3','#2166ac')) + my.seq <- c(0,10,20,30,40,50,60,70,100) + + array.trans.sim1.colors <- array(trans.cols[8],c(12,7,4)) + array.trans.sim1.colors[array.trans.sim1 < my.seq[8]] <- trans.cols[7] + array.trans.sim1.colors[array.trans.sim1 < my.seq[7]] <- trans.cols[6] + array.trans.sim1.colors[array.trans.sim1 < my.seq[6]] <- trans.cols[5] + array.trans.sim1.colors[array.trans.sim1 < my.seq[5]] <- trans.cols[4] + array.trans.sim1.colors[array.trans.sim1 < my.seq[4]] <- trans.cols[3] + array.trans.sim1.colors[array.trans.sim1 < my.seq[3]] <- trans.cols[2] + array.trans.sim1.colors[array.trans.sim1 < my.seq[2]] <- trans.cols[1] + array.trans.sim1.colors[is.na(array.trans.sim1)] <- "white" + + par(mar=c(5,4,4,4.5)) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Forecast time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + title("% Transition from NAO+ regime", cex=1.5) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + + for(p in 1:12){ + for(l in 0:6){ + polygon(c(0.5+p-1, 0.5+p-1, 1+p-1), c(-0.5+l, 0.5+l, 0+l), col=array.trans.sim1.colors[p,1+l,1]) # first triangle + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(-0.5+l, 0+l, -0.5+l), col=array.trans.sim1.colors[p,1+l,2]) + polygon(c(1+p-1, 1.5+p-1, 1.5+p-1), c(l, 0.5+l, -0.5+l), col=array.trans.sim1.colors[p,1+l,3]) + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(0.5+l, 0+l, 0.5+l), col=array.trans.sim1.colors[p,1+l,4]) + } + } + + par(fig=c(0.875, 1, 0.14, 0.89), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + dev.off() + + png(filename=paste0(workdir,"/",forecast.name,"_summary_trans_NAO+_4tables.png"), width=750, height=600) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("% Transition from NAO+ regime", cex=1.5) + + # NAO+ : + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.85, 0.98), new=TRUE) + mtext("NAO+", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.sim1.colors[p,1+l,1])}} + + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.42, 0.5), new=TRUE) + mtext("Blocking", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.sim1.colors[p,1+l,4])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.85, 0.98), new=TRUE) + mtext("NAO-", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.sim1.colors[p,1+l,3])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.42, 0.5), new=TRUE) + mtext("Atlantic Ridge", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.sim1.colors[p,1+l,2])}} + + par(fig=c(0.91, 1, 0.1, 0.9), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + png(filename=paste0(workdir,"/",forecast.name,"_summary_trans_NAO+_1table.png"), width=800, height=300) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("% Transition from NAO+ regime", cex=1.2) + + par(mar=c(0,0,4,0), fig=c(0.07, 0.22, 0.8, 0.98), new=TRUE) + mtext("NAO+", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0, 0.22, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecast month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.sim1.colors[i2,1+6-j,1])}} + + par(mar=c(0,0,4,0), fig=c(0.22, 0.44, 0.8, 0.98), new=TRUE) + mtext("NAO-", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.22, 0.44, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecasted month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.sim1.colors[i2,1+6-j,3])}} + + par(mar=c(0,0,4,0), fig=c(0.44, 0.66, 0.8, 0.98), new=TRUE) + mtext("Blocking", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.44, 0.66, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecasted month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.sim1.colors[i2,1+6-j,4])}} + + par(mar=c(0,0,4,0), fig=c(0.68, 0.88, 0.8, 0.98), new=TRUE) + mtext("Atlantic Ridge", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.66, 0.88, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecasted month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.sim1.colors[i2,1+6-j,2])}} + + par(fig=c(0.91, 1, 0.1, 0.8), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + png(filename=paste0(workdir,"/",forecast.name,"_summary_trans_NAO+_1table.png"), width=600, height=300) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("% Transition from NAO+ regime", cex=1.2) + + par(mar=c(0,0,4,0), fig=c(0.10, 0.28, 0.8, 0.98), new=TRUE) + mtext("NAO-", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0, 0.28, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecasted month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.sim1.colors[i2,1+6-j,3])}} + + par(mar=c(0,0,4,0), fig=c(0.28, 0.56, 0.8, 0.98), new=TRUE) + mtext("Blocking", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.28, 0.56, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecasted month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.sim1.colors[i2,1+6-j,4])}} + + par(mar=c(0,0,4,0), fig=c(0.56, 0.84, 0.8, 0.98), new=TRUE) + mtext("Atlantic Ridge", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.56, 0.84, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecasted month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.sim1.colors[i2,1+6-j,2])}} + + par(fig=c(0.88, 1, 0.1, 0.8), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + + + + + + + + # Transition probability from NAO- to X summary: + png(filename=paste0(workdir,"/",forecast.name,"_summary_trans_NAO-.png"), width=550, height=400) + + # create an array similar to array.cor but with colors instead of corr.values: + trans.cols <- rev(c('#b2182b','#d6604d','#f4a582','#fddbc7','#d1e5f0','#92c5de','#4393c3','#2166ac')) + my.seq <- c(0,10,20,30,40,50,60,70,100) + + array.trans.sim2.colors <- array(trans.cols[8],c(12,7,4)) + array.trans.sim2.colors[array.trans.sim2 < my.seq[8]] <- trans.cols[7] + array.trans.sim2.colors[array.trans.sim2 < my.seq[7]] <- trans.cols[6] + array.trans.sim2.colors[array.trans.sim2 < my.seq[6]] <- trans.cols[5] + array.trans.sim2.colors[array.trans.sim2 < my.seq[5]] <- trans.cols[4] + array.trans.sim2.colors[array.trans.sim2 < my.seq[4]] <- trans.cols[3] + array.trans.sim2.colors[array.trans.sim2 < my.seq[3]] <- trans.cols[2] + array.trans.sim2.colors[array.trans.sim2 < my.seq[2]] <- trans.cols[1] + array.trans.sim2.colors[is.na(array.trans.sim2)] <- "white" + + par(mar=c(5,4,4,4.5)) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Forecast time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + title("% Transition from NAO- regime ", cex=1.5) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + + for(p in 1:12){ + for(l in 0:6){ + polygon(c(0.5+p-1, 0.5+p-1, 1+p-1), c(-0.5+l, 0.5+l, 0+l), col=array.trans.sim2.colors[p,1+l,1]) # first triangle + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(-0.5+l, 0+l, -0.5+l), col=array.trans.sim2.colors[p,1+l,2]) + polygon(c(1+p-1, 1.5+p-1, 1.5+p-1), c(l, 0.5+l, -0.5+l), col=array.trans.sim2.colors[p,1+l,3]) + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(0.5+l, 0+l, 0.5+l), col=array.trans.sim2.colors[p,1+l,4]) + } + } + + par(fig=c(0.875, 1, 0.14, 0.89), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + dev.off() + + png(filename=paste0(workdir,"/",forecast.name,"_summary_trans_NAO-_4tables.png"), width=750, height=600) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("% Transition from NAO- regime", cex=1.5) + + # NAO+ : + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.85, 0.98), new=TRUE) + mtext("NAO+", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.sim2.colors[p,1+l,1])}} + + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.42, 0.5), new=TRUE) + mtext("Blocking", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.sim2.colors[p,1+l,4])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.85, 0.98), new=TRUE) + mtext("NAO-", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.sim2.colors[p,1+l,3])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.42, 0.5), new=TRUE) + mtext("Atlantic Ridge", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.sim2.colors[p,1+l,2])}} + + par(fig=c(0.91, 1, 0.1, 0.9), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + png(filename=paste0(workdir,"/",forecast.name,"_summary_trans_NAO-_1table.png"), width=600, height=300) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("% Transition from NAO- regime", cex=1.2) + + par(mar=c(0,0,4,0), fig=c(0.1, 0.28, 0.8, 0.98), new=TRUE) + mtext("NAO+", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0, 0.28, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecast month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.sim2.colors[i2,1+6-j,1])}} + + par(mar=c(0,0,4,0), fig=c(0.28, 0.56, 0.8, 0.98), new=TRUE) + mtext("Blocking", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.28, 0.56, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecasted month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.sim2.colors[i2,1+6-j,4])}} + + par(mar=c(0,0,4,0), fig=c(0.56, 0.84, 0.8, 0.98), new=TRUE) + mtext("Atlantic Ridge", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.56, 0.84, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecasted month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.sim2.colors[i2,1+6-j,2])}} + + par(fig=c(0.88, 1, 0.1, 0.8), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + + + + + + + + # Transition probability from blocking to X summary: + png(filename=paste0(workdir,"/",forecast.name,"_summary_trans_blocking.png"), width=550, height=400) + + # create an array similar to array.cor but with colors instead of corr.values: + trans.cols <- rev(c('#b2182b','#d6604d','#f4a582','#fddbc7','#d1e5f0','#92c5de','#4393c3','#2166ac')) + my.seq <- c(0,10,20,30,40,50,60,70,100) + + array.trans.sim3.colors <- array(trans.cols[8],c(12,7,4)) + array.trans.sim3.colors[array.trans.sim3 < my.seq[8]] <- trans.cols[7] + array.trans.sim3.colors[array.trans.sim3 < my.seq[7]] <- trans.cols[6] + array.trans.sim3.colors[array.trans.sim3 < my.seq[6]] <- trans.cols[5] + array.trans.sim3.colors[array.trans.sim3 < my.seq[5]] <- trans.cols[4] + array.trans.sim3.colors[array.trans.sim3 < my.seq[4]] <- trans.cols[3] + array.trans.sim3.colors[array.trans.sim3 < my.seq[3]] <- trans.cols[2] + array.trans.sim3.colors[array.trans.sim3 < my.seq[2]] <- trans.cols[1] + array.trans.sim3.colors[is.na(array.trans.sim3)] <- "white" + + par(mar=c(5,4,4,4.5)) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Forecast time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + title("% Transition from blocking regime", cex=1.5) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + + for(p in 1:12){ + for(l in 0:6){ + polygon(c(0.5+p-1, 0.5+p-1, 1+p-1), c(-0.5+l, 0.5+l, 0+l), col=array.trans.sim3.colors[p,1+l,1]) # first triangle + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(-0.5+l, 0+l, -0.5+l), col=array.trans.sim3.colors[p,1+l,2]) + polygon(c(1+p-1, 1.5+p-1, 1.5+p-1), c(l, 0.5+l, -0.5+l), col=array.trans.sim3.colors[p,1+l,3]) + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(0.5+l, 0+l, 0.5+l), col=array.trans.sim3.colors[p,1+l,4]) + } + } + + par(fig=c(0.875, 1, 0.14, 0.89), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + dev.off() + + png(filename=paste0(workdir,"/",forecast.name,"_summary_trans_blocking_4tables.png"), width=750, height=600) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("% Transition from blocking regime", cex=1.5) + + # NAO+ : + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.85, 0.98), new=TRUE) + mtext("NAO+", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.sim3.colors[p,1+l,1])}} + + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.42, 0.5), new=TRUE) + mtext("Blocking", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.sim3.colors[p,1+l,4])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.85, 0.98), new=TRUE) + mtext("NAO-", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.sim3.colors[p,1+l,3])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.42, 0.5), new=TRUE) + mtext("Atlantic Ridge", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.sim3.colors[p,1+l,2])}} + + par(fig=c(0.91, 1, 0.1, 0.9), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + png(filename=paste0(workdir,"/",forecast.name,"_summary_trans_blocking_1table.png"), width=600, height=300) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("% Transition from blocking regime", cex=1.2) + + par(mar=c(0,0,4,0), fig=c(0.10, 0.28, 0.8, 0.98), new=TRUE) + mtext("NAO+", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0, 0.28, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecast month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.sim3.colors[i2,1+6-j,1])}} + + par(mar=c(0,0,4,0), fig=c(0.28, 0.56, 0.8, 0.98), new=TRUE) + mtext("NAO-", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.28, 0.56, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecasted month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.sim3.colors[i2,1+6-j,3])}} + + par(mar=c(0,0,4,0), fig=c(0.56, 0.84, 0.8, 0.98), new=TRUE) + mtext("Atlantic Ridge", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.56, 0.84, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecasted month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.sim3.colors[i2,1+6-j,2])}} + + par(fig=c(0.88, 1, 0.1, 0.8), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + + + + + + + + + + + # Transition probability from Atlantic ridge to X summary: + png(filename=paste0(workdir,"/",forecast.name,"_summary_trans_Atlantic_ridge.png"), width=550, height=400) + + # create an array similar to array.cor but with colors instead of corr.values: + trans.cols <- rev(c('#b2182b','#d6604d','#f4a582','#fddbc7','#d1e5f0','#92c5de','#4393c3','#2166ac')) + my.seq <- c(0,10,20,30,40,50,60,70,100) + + array.trans.sim4.colors <- array(trans.cols[8],c(12,7,4)) + array.trans.sim4.colors[array.trans.sim4 < my.seq[8]] <- trans.cols[7] + array.trans.sim4.colors[array.trans.sim4 < my.seq[7]] <- trans.cols[6] + array.trans.sim4.colors[array.trans.sim4 < my.seq[6]] <- trans.cols[5] + array.trans.sim4.colors[array.trans.sim4 < my.seq[5]] <- trans.cols[4] + array.trans.sim4.colors[array.trans.sim4 < my.seq[4]] <- trans.cols[3] + array.trans.sim4.colors[array.trans.sim4 < my.seq[3]] <- trans.cols[2] + array.trans.sim4.colors[array.trans.sim4 < my.seq[2]] <- trans.cols[1] + array.trans.sim4.colors[is.na(array.trans.sim4)] <- "white" + + par(mar=c(5,4,4,4.5)) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Forecast time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + title("% Transition from Atlantic ridge regime ", cex=1.5) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + + for(p in 1:12){ + for(l in 0:6){ + polygon(c(0.5+p-1, 0.5+p-1, 1+p-1), c(-0.5+l, 0.5+l, 0+l), col=array.trans.sim4.colors[p,1+l,1]) # first triangle + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(-0.5+l, 0+l, -0.5+l), col=array.trans.sim4.colors[p,1+l,2]) + polygon(c(1+p-1, 1.5+p-1, 1.5+p-1), c(l, 0.5+l, -0.5+l), col=array.trans.sim4.colors[p,1+l,3]) + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(0.5+l, 0+l, 0.5+l), col=array.trans.sim4.colors[p,1+l,4]) + } + } + + par(fig=c(0.875, 1, 0.14, 0.89), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + dev.off() + + png(filename=paste0(workdir,"/",forecast.name,"_summary_trans_Atlantic_ridge_4tables.png"), width=750, height=600) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("% Transition from Atlantic Ridge regime", cex=1.5) + + # NAO+ : + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.85, 0.98), new=TRUE) + mtext("NAO+", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.sim4.colors[p,1+l,1])}} + + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.42, 0.5), new=TRUE) + mtext("Blocking", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.sim4.colors[p,1+l,4])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.85, 0.98), new=TRUE) + mtext("NAO-", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.sim4.colors[p,1+l,3])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.42, 0.5), new=TRUE) + mtext("Atlantic Ridge", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.sim4.colors[p,1+l,2])}} + + par(fig=c(0.91, 1, 0.1, 0.9), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + png(filename=paste0(workdir,"/",forecast.name,"_summary_trans_Atlantic_ridge_1table.png"), width=600, height=300) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("% Transition from Atlantic Ridge regime", cex=1.2) + + par(mar=c(0,0,4,0), fig=c(0.1, 0.28, 0.8, 0.98), new=TRUE) + mtext("NAO+", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0, 0.28, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecast month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.sim4.colors[i2,1+6-j,1])}} + + par(mar=c(0,0,4,0), fig=c(0.28, 0.56, 0.8, 0.98), new=TRUE) + mtext("NAO-", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.28, 0.56, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecasted month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.sim4.colors[i2,1+6-j,3])}} + + par(mar=c(0,0,4,0), fig=c(0.56, 0.84, 0.8, 0.98), new=TRUE) + mtext("Blocking", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.56, 0.84, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecasted month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.sim4.colors[i2,1+6-j,4])}} + + par(fig=c(0.88, 1, 0.1, 0.8), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + + + + + + + + # Bias of the Transition probability from NAO+ to X summary: + png(filename=paste0(workdir,"/",forecast.name,"_summary_bias_trans_NAO+.png"), width=550, height=400) + + # create an array similar to array.cor but with colors instead of corr.values: + trans.cols <- rev(c('#b2182b','#d6604d','#f4a582','#fddbc7','#d1e5f0','#92c5de','#4393c3','#2166ac')) + my.seq <- c(-20,-10,-5,-2,0,2,5,10,20) + + array.trans.diff1.colors <- array(trans.cols[8],c(12,7,4)) + array.trans.diff1.colors[array.trans.diff1 < my.seq[8]] <- trans.cols[7] + array.trans.diff1.colors[array.trans.diff1 < my.seq[7]] <- trans.cols[6] + array.trans.diff1.colors[array.trans.diff1 < my.seq[6]] <- trans.cols[5] + array.trans.diff1.colors[array.trans.diff1 < my.seq[5]] <- trans.cols[4] + array.trans.diff1.colors[array.trans.diff1 < my.seq[4]] <- trans.cols[3] + array.trans.diff1.colors[array.trans.diff1 < my.seq[3]] <- trans.cols[2] + array.trans.diff1.colors[array.trans.diff1 < my.seq[2]] <- trans.cols[1] + array.trans.diff1.colors[is.na(array.trans.diff1)] <- "white" + + par(mar=c(5,4,4,4.5)) + plot(1,1, type="n", xaxt="n", yaxt="n", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + title("% Bias transition from NAO+ regime", cex=1.5) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + + for(p in 1:12){ + for(l in 0:6){ + polygon(c(0.5+p-1, 0.5+p-1, 1+p-1), c(-0.5+l, 0.5+l, 0+l), col=array.trans.diff1.colors[p,1+l,1]) # first triangle + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(-0.5+l, 0+l, -0.5+l), col=array.trans.diff1.colors[p,1+l,2]) + polygon(c(1+p-1, 1.5+p-1, 1.5+p-1), c(l, 0.5+l, -0.5+l), col=array.trans.diff1.colors[p,1+l,3]) + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(0.5+l, 0+l, 0.5+l), col=array.trans.diff1.colors[p,1+l,4]) + } + } + + par(fig=c(0.875, 1, 0.14, 0.89), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + dev.off() + + png(filename=paste0(workdir,"/",forecast.name,"_summary_bias_trans_NAO+_4tables.png"), width=750, height=600) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("% Bias transition from NAO+ regime", cex=1.5) + + # NAO+ : + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.85, 0.98), new=TRUE) + mtext("NAO+", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.diff1.colors[p,1+l,1])}} + + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.42, 0.5), new=TRUE) + mtext("Blocking", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.diff1.colors[p,1+l,4])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.85, 0.98), new=TRUE) + mtext("NAO-", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.diff1.colors[p,1+l,3])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.42, 0.5), new=TRUE) + mtext("Atlantic Ridge", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.diff1.colors[p,1+l,2])}} + + par(fig=c(0.91, 1, 0.1, 0.9), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + png(filename=paste0(workdir,"/",forecast.name,"_summary_bias_trans_NAO+_1table.png"), width=800, height=300) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("% Bias transition from NAO+ regime", cex=1.2) + + par(mar=c(0,0,4,0), fig=c(0.07, 0.22, 0.8, 0.98), new=TRUE) + mtext("NAO+", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0, 0.22, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecast month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.diff1.colors[i2,1+6-j,1])}} + + par(mar=c(0,0,4,0), fig=c(0.22, 0.44, 0.8, 0.98), new=TRUE) + mtext("NAO-", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.22, 0.44, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecasted month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.diff1.colors[i2,1+6-j,3])}} + + par(mar=c(0,0,4,0), fig=c(0.44, 0.66, 0.8, 0.98), new=TRUE) + mtext("Blocking", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.44, 0.66, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecasted month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.diff1.colors[i2,1+6-j,4])}} + + par(mar=c(0,0,4,0), fig=c(0.68, 0.88, 0.8, 0.98), new=TRUE) + mtext("Atlantic Ridge", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.66, 0.88, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecasted month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.diff1.colors[i2,1+6-j,2])}} + + par(fig=c(0.91, 1, 0.1, 0.8), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + + + + + + + + + + + + + + + # Bias of the Transition probability from NAO- to X summary: + png(filename=paste0(workdir,"/",forecast.name,"_summary_bias_trans_NAO-.png"), width=550, height=400) + + # create an array similar to array.cor but with colors instead of corr.values: + trans.cols <- rev(c('#b2182b','#d6604d','#f4a582','#fddbc7','#d1e5f0','#92c5de','#4393c3','#2166ac')) + my.seq <- c(-20,-10,-5,-2,0,2,5,10,20) + + array.trans.diff2.colors <- array(trans.cols[8],c(12,7,4)) + array.trans.diff2.colors[array.trans.diff2 < my.seq[8]] <- trans.cols[7] + array.trans.diff2.colors[array.trans.diff2 < my.seq[7]] <- trans.cols[6] + array.trans.diff2.colors[array.trans.diff2 < my.seq[6]] <- trans.cols[5] + array.trans.diff2.colors[array.trans.diff2 < my.seq[5]] <- trans.cols[4] + array.trans.diff2.colors[array.trans.diff2 < my.seq[4]] <- trans.cols[3] + array.trans.diff2.colors[array.trans.diff2 < my.seq[3]] <- trans.cols[2] + array.trans.diff2.colors[array.trans.diff2 < my.seq[2]] <- trans.cols[1] + array.trans.diff2.colors[is.na(array.trans.diff2)] <- "white" + + par(mar=c(5,4,4,4.5)) + plot(1,1, type="n", xaxt="n", yaxt="n", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + title("% Bias transition from NAO- regime", cex=1.5) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + + for(p in 1:12){ + for(l in 0:6){ + polygon(c(0.5+p-1, 0.5+p-1, 1+p-1), c(-0.5+l, 0.5+l, 0+l), col=array.trans.diff2.colors[p,1+l,1]) # first triangle + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(-0.5+l, 0+l, -0.5+l), col=array.trans.diff2.colors[p,1+l,2]) + polygon(c(1+p-1, 1.5+p-1, 1.5+p-1), c(l, 0.5+l, -0.5+l), col=array.trans.diff2.colors[p,1+l,3]) + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(0.5+l, 0+l, 0.5+l), col=array.trans.diff2.colors[p,1+l,4]) + } + } + + par(fig=c(0.875, 1, 0.14, 0.89), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + dev.off() + + png(filename=paste0(workdir,"/",forecast.name,"_summary_bias_trans_NAO-_4tables.png"), width=750, height=600) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("% Bias transition from NAO- regime", cex=1.5) + + # NAO+ : + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.85, 0.98), new=TRUE) + mtext("NAO+", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.diff2.colors[p,1+l,1])}} + + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.42, 0.5), new=TRUE) + mtext("Blocking", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.diff2.colors[p,1+l,4])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.85, 0.98), new=TRUE) + mtext("NAO-", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.diff2.colors[p,1+l,3])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.42, 0.5), new=TRUE) + mtext("Atlantic Ridge", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.diff2.colors[p,1+l,2])}} + + par(fig=c(0.91, 1, 0.1, 0.9), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + png(filename=paste0(workdir,"/",forecast.name,"_summary_bias_trans_NAO-_1table.png"), width=800, height=300) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("% Bias transition from NAO- regime", cex=1.2) + + par(mar=c(0,0,4,0), fig=c(0.07, 0.22, 0.8, 0.98), new=TRUE) + mtext("NAO+", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0, 0.22, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecast month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.diff2.colors[i2,1+6-j,1])}} + + par(mar=c(0,0,4,0), fig=c(0.22, 0.44, 0.8, 0.98), new=TRUE) + mtext("NAO-", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.22, 0.44, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecasted month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.diff2.colors[i2,1+6-j,3])}} + + par(mar=c(0,0,4,0), fig=c(0.44, 0.66, 0.8, 0.98), new=TRUE) + mtext("Blocking", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.44, 0.66, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecasted month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.diff2.colors[i2,1+6-j,4])}} + + par(mar=c(0,0,4,0), fig=c(0.68, 0.88, 0.8, 0.98), new=TRUE) + mtext("Atlantic Ridge", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.66, 0.88, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecasted month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.diff2.colors[i2,1+6-j,2])}} + + par(fig=c(0.91, 1, 0.1, 0.8), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + + + + + + + + + + + # Bias of the Transition probability from blocking to X summary: + png(filename=paste0(workdir,"/",forecast.name,"_summary_bias_trans_blocking.png"), width=550, height=400) + + # create an array similar to array.cor but with colors instead of corr.values: + trans.cols <- rev(c('#b2182b','#d6604d','#f4a582','#fddbc7','#d1e5f0','#92c5de','#4393c3','#2166ac')) + my.seq <- c(-20,-10,-5,-2,0,2,5,10,20) + + array.trans.diff3.colors <- array(trans.cols[8],c(12,7,4)) + array.trans.diff3.colors[array.trans.diff3 < my.seq[8]] <- trans.cols[7] + array.trans.diff3.colors[array.trans.diff3 < my.seq[7]] <- trans.cols[6] + array.trans.diff3.colors[array.trans.diff3 < my.seq[6]] <- trans.cols[5] + array.trans.diff3.colors[array.trans.diff3 < my.seq[5]] <- trans.cols[4] + array.trans.diff3.colors[array.trans.diff3 < my.seq[4]] <- trans.cols[3] + array.trans.diff3.colors[array.trans.diff3 < my.seq[3]] <- trans.cols[2] + array.trans.diff3.colors[array.trans.diff3 < my.seq[2]] <- trans.cols[1] + array.trans.diff3.colors[is.na(array.trans.diff3)] <- "white" + + par(mar=c(5,4,4,4.5)) + plot(1,1, type="n", xaxt="n", yaxt="n", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + title("% Bias transition from blocking regime", cex=1.5) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + + for(p in 1:12){ + for(l in 0:6){ + polygon(c(0.5+p-1, 0.5+p-1, 1+p-1), c(-0.5+l, 0.5+l, 0+l), col=array.trans.diff3.colors[p,1+l,1]) # first triangle + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(-0.5+l, 0+l, -0.5+l), col=array.trans.diff3.colors[p,1+l,2]) + polygon(c(1+p-1, 1.5+p-1, 1.5+p-1), c(l, 0.5+l, -0.5+l), col=array.trans.diff3.colors[p,1+l,3]) + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(0.5+l, 0+l, 0.5+l), col=array.trans.diff3.colors[p,1+l,4]) + } + } + + par(fig=c(0.875, 1, 0.14, 0.89), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + dev.off() + + png(filename=paste0(workdir,"/",forecast.name,"_summary_bias_trans_blocking_4tables.png"), width=750, height=600) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("% Bias transition from blocking regime", cex=1.5) + + # NAO+ : + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.85, 0.98), new=TRUE) + mtext("NAO+", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.diff3.colors[p,1+l,1])}} + + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.42, 0.5), new=TRUE) + mtext("Blocking", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.diff3.colors[p,1+l,4])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.85, 0.98), new=TRUE) + mtext("NAO-", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.diff3.colors[p,1+l,3])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.42, 0.5), new=TRUE) + mtext("Atlantic Ridge", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.diff3.colors[p,1+l,2])}} + + par(fig=c(0.91, 1, 0.1, 0.9), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + dev.off() + + png(filename=paste0(workdir,"/",forecast.name,"_summary_bias_trans_blocking_1table.png"), width=800, height=300) + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("% Bias transition from Blocking regime", cex=1.2) + + par(mar=c(0,0,4,0), fig=c(0.07, 0.22, 0.8, 0.98), new=TRUE) + mtext("NAO+", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0, 0.22, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecast month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.diff3.colors[i2,1+6-j,1])}} + + par(mar=c(0,0,4,0), fig=c(0.22, 0.44, 0.8, 0.98), new=TRUE) + mtext("NAO-", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.22, 0.44, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecasted month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.diff3.colors[i2,1+6-j,3])}} + + par(mar=c(0,0,4,0), fig=c(0.44, 0.66, 0.8, 0.98), new=TRUE) + mtext("Blocking", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.44, 0.66, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecasted month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.diff3.colors[i2,1+6-j,4])}} + + par(mar=c(0,0,4,0), fig=c(0.68, 0.88, 0.8, 0.98), new=TRUE) + mtext("Atlantic Ridge", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.66, 0.88, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecasted month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.diff3.colors[i2,1+6-j,2])}} + + par(fig=c(0.91, 1, 0.1, 0.8), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + + + + + + + + + + # Bias of the Transition probability from Atlantic Ridge to X summary: + png(filename=paste0(workdir,"/",forecast.name,"_summary_bias_trans_Atlantic_ridge.png"), width=550, height=400) + + # create an array similar to array.cor but with colors instead of corr.values: + trans.cols <- rev(c('#b2182b','#d6604d','#f4a582','#fddbc7','#d1e5f0','#92c5de','#4393c3','#2166ac')) + my.seq <- c(-20,-10,-5,-2,0,2,5,10,20) + + array.trans.diff4.colors <- array(trans.cols[8],c(12,7,4)) + array.trans.diff4.colors[array.trans.diff4 < my.seq[8]] <- trans.cols[7] + array.trans.diff4.colors[array.trans.diff4 < my.seq[7]] <- trans.cols[6] + array.trans.diff4.colors[array.trans.diff4 < my.seq[6]] <- trans.cols[5] + array.trans.diff4.colors[array.trans.diff4 < my.seq[5]] <- trans.cols[4] + array.trans.diff4.colors[array.trans.diff4 < my.seq[4]] <- trans.cols[3] + array.trans.diff4.colors[array.trans.diff4 < my.seq[3]] <- trans.cols[2] + array.trans.diff4.colors[array.trans.diff4 < my.seq[2]] <- trans.cols[1] + array.trans.diff4.colors[is.na(array.trans.diff4)] <- "white" + + par(mar=c(5,4,4,4.5)) + plot(1,1, type="n", xaxt="n", yaxt="n", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + title("% Bias transition from Atlantic Ridge regime", cex=1.5) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + + for(p in 1:12){ + for(l in 0:6){ + polygon(c(0.5+p-1, 0.5+p-1, 1+p-1), c(-0.5+l, 0.5+l, 0+l), col=array.trans.diff4.colors[p,1+l,1]) # first triangle + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(-0.5+l, 0+l, -0.5+l), col=array.trans.diff4.colors[p,1+l,2]) + polygon(c(1+p-1, 1.5+p-1, 1.5+p-1), c(l, 0.5+l, -0.5+l), col=array.trans.diff4.colors[p,1+l,3]) + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(0.5+l, 0+l, 0.5+l), col=array.trans.diff4.colors[p,1+l,4]) + } + } + + par(fig=c(0.875, 1, 0.14, 0.89), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + dev.off() + + png(filename=paste0(workdir,"/",forecast.name,"_summary_bias_trans_Atlantic_ridge_4tables.png"), width=750, height=600) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("% Bias transition from Atlantic_Ridge_regime", cex=1.5) + + # NAO+ : + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.85, 0.98), new=TRUE) + mtext("NAO+", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.diff4.colors[p,1+l,1])}} + + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.42, 0.5), new=TRUE) + mtext("Blocking", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.diff4.colors[p,1+l,4])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.85, 0.98), new=TRUE) + mtext("NAO-", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.diff4.colors[p,1+l,3])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.42, 0.5), new=TRUE) + mtext("Atlantic Ridge", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.diff4.colors[p,1+l,2])}} + + par(fig=c(0.91, 1, 0.1, 0.9), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + png(filename=paste0(workdir,"/",forecast.name,"_summary_bias_trans_Atlantic_ridge_1table.png"), width=800, height=300) + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("% Bias transition from Atlantic Ridge regime", cex=1.2) + + par(mar=c(0,0,4,0), fig=c(0.07, 0.22, 0.8, 0.98), new=TRUE) + mtext("NAO+", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0, 0.22, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecast month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.diff4.colors[i2,1+6-j,1])}} + + par(mar=c(0,0,4,0), fig=c(0.22, 0.44, 0.8, 0.98), new=TRUE) + mtext("NAO-", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.22, 0.44, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecasted month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.diff4.colors[i2,1+6-j,3])}} + + par(mar=c(0,0,4,0), fig=c(0.44, 0.66, 0.8, 0.98), new=TRUE) + mtext("Blocking", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.44, 0.66, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecasted month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.diff4.colors[i2,1+6-j,4])}} + + par(mar=c(0,0,4,0), fig=c(0.68, 0.88, 0.8, 0.98), new=TRUE) + mtext("Atlantic Ridge", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.66, 0.88, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecasted month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.diff4.colors[i2,1+6-j,2])}} + + par(fig=c(0.91, 1, 0.1, 0.8), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + +diagnostic.WR <- function(text=NULL, position=c(0,1,0,1), color.array){ + +} + + ## # FairRPSS summary: + ## png(filename=paste0(workdir,"/",forecast.name,"_summary_rpss.png"), width=550, height=400) + + ## # create an array similar to array.diff.freq but with colors instead of frequencies: + ## freq.cols <- rev(c('#b2182b','#d6604d','#f4a582','#fddbc7','#d1e5f0','#92c5de','#4393c3','#2166ac')) + + ## array.freq.colors <- array(freq.cols[8],c(12,7,4)) + ## array.freq.colors[array.diff.freq < 10] <- freq.cols[7] + ## array.freq.colors[array.diff.freq < 5] <- freq.cols[6] + ## array.freq.colors[array.diff.freq < 2] <- freq.cols[5] + ## array.freq.colors[array.diff.freq < 0] <- freq.cols[4] + ## array.freq.colors[array.diff.freq < -2] <- freq.cols[3] + ## array.freq.colors[array.diff.freq < -5] <- freq.cols[2] + ## array.freq.colors[array.diff.freq < -10] <- freq.cols[1] + + ## par(mar=c(5,4,4,4.5)) + ## plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Forecast time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + ## title("Frequency difference (%) between S4 and ERA-Interim", cex=1.5) + ## mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + ## mtext(side = 2, text = "Forecast time", line = 2.5, cex=1.5) + ## axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + ## axis(2, at=seq(0,6), las=2, cex.axis=1.5) + + ## for(p in 1:12){ + ## for(l in 0:6){ + ## polygon(c(0.5+p-1,0.5+p-1,1+p-1),c(-0.5+l,0.5+l,0+l), col=array.freq.colors[p,1+l,1]) # first triangle + ## polygon(c(0.5+p-1,1+p-1,1.5+p-1),c(-0.5+l,0+l,-0.5+l), col=array.freq.colors[p,1+l,2]) + ## polygon(c(1+p-1,1.5+p-1,1.5+p-1),c(l,0.5+l,-0.5+l), col=array.freq.colors[p,1+l,3]) + ## polygon(c(0.5+p-1,1+p-1,1.5+p-1),c(0.5+l,0+l,0.5+l), col=array.freq.colors[p,1+l,4]) + ## } + ## } + + ## par(fig=c(0.875, 1, 0.14, 0.89), new=TRUE) + ## ColorBar2(brks = c(-20,-10,-5,-2,0,2,5,10,20), cols = freq.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(c(-20,-10,-5,-2,0,2,5,10,20)), my.labels=c(-20,-10,-5,-2,0,2,5,10,20)) + ## dev.off() + +} # close if on composition == "summary" + + + + +# impact map of the stronger WR: +if(composition == "impact" && fields.name == rean.name){ + + var.num <- 2 # choose a variable (1:sfcWind, 2:tas) + + png(filename=paste0(workdir,"/",rean.name,"_",var.name[var.num],"_most_influent_negative.png"), width=550, height=650) + + plot.new() + #par(mfrow=c(4,3)) + #col.regimes <- c('#7b3294','#c2a5cf','#a6dba0','#008837') + col.regimes <- c('#e66101','#fdb863','#b2abd2','#5e3c99') + + for(p in 13:16){ # or 1:12 for monthly maps + load(file=paste0(rean.dir,"/",rean.name,"_",my.period[p],"_",var.name[var.num],"_psl.RData")) + load(paste0(rean.dir,"/",rean.name,"_ClusterNames.RData")) # they should not depend on var.name!!! + + # position of long values of Europe only (without the Atlantic Sea and America): + #if(fields.name == "ERA-Interim") EU <- c(1:which(lon >= lon.max)[1], (length(lon)-30):length(lon)) # restrict area to continental europe only + lon.max = 45 + EU <- c(1:which(lon >= lon.max)[1], (which(lon >= 337)[1]:length(lon))) # restrict area to continental europe only + + cluster1 <- which(orden == cluster1.name.period[p]) # in future it will be == cluster1.name + cluster2 <- which(orden == cluster2.name.period[p]) + cluster3 <- which(orden == cluster3.name.period[p]) + cluster4 <- which(orden == cluster4.name.period[p]) + + assign(paste0("imp",cluster1), varPeriodAnom1mean) + assign(paste0("imp",cluster2), varPeriodAnom2mean) + assign(paste0("imp",cluster3), varPeriodAnom3mean) + assign(paste0("imp",cluster4), varPeriodAnom4mean) + + ## imp.all <- imp1 + ## for(i in 1:dim(imp1)[1]){ + ## for(j in 1:dim(imp1)[2]){ + ## if(abs(imp1[i,j]) >= max(abs(imp1[i,j]), abs(imp2[i,j]), abs(imp3[i,j]), abs(imp4[i,j]))) imp.all[i,j] <- 1.5 + ## if(abs(imp2[i,j]) >= max(abs(imp1[i,j]), abs(imp2[i,j]), abs(imp3[i,j]), abs(imp4[i,j]))) imp.all[i,j] <- 2.5 + ## if(abs(imp3[i,j]) >= max(abs(imp1[i,j]), abs(imp2[i,j]), abs(imp3[i,j]), abs(imp4[i,j]))) imp.all[i,j] <- 3.5 + ## if(abs(imp4[i,j]) >= max(abs(imp1[i,j]), abs(imp2[i,j]), abs(imp3[i,j]), abs(imp4[i,j]))) imp.all[i,j] <- 4.5 + ## } + ## } + + ## #most influent WT with positive impact: + ## imp.all <- imp1 + ## for(i in 1:dim(imp1)[1]){ + ## for(j in 1:dim(imp1)[2]){ + ## if((imp1[i,j]) >= max((imp1[i,j]), (imp2[i,j]), (imp3[i,j]), (imp4[i,j]))) imp.all[i,j] <- 1.5 + ## if((imp2[i,j]) >= max((imp1[i,j]), (imp2[i,j]), (imp3[i,j]), (imp4[i,j]))) imp.all[i,j] <- 2.5 + ## if((imp3[i,j]) >= max((imp1[i,j]), (imp2[i,j]), (imp3[i,j]), (imp4[i,j]))) imp.all[i,j] <- 3.5 + ## if((imp4[i,j]) >= max((imp1[i,j]), (imp2[i,j]), (imp3[i,j]), (imp4[i,j]))) imp.all[i,j] <- 4.5 + ## } + ## } + + # most influent WT with negative impact: + imp.all <- imp1 + for(i in 1:dim(imp1)[1]){ + for(j in 1:dim(imp1)[2]){ + if((imp1[i,j]) <= min((imp1[i,j]), (imp2[i,j]), (imp3[i,j]), (imp4[i,j]))) imp.all[i,j] <- 1.5 + if((imp2[i,j]) <= min((imp1[i,j]), (imp2[i,j]), (imp3[i,j]), (imp4[i,j]))) imp.all[i,j] <- 2.5 + if((imp3[i,j]) <= min((imp1[i,j]), (imp2[i,j]), (imp3[i,j]), (imp4[i,j]))) imp.all[i,j] <- 3.5 + if((imp4[i,j]) <= min((imp1[i,j]), (imp2[i,j]), (imp3[i,j]), (imp4[i,j]))) imp.all[i,j] <- 4.5 + } + } + + if(p<=3) yMod <- 0.7 + if(p>=4 && p<=6) yMod <- 0.5 + if(p>=7 && p<=9) yMod <- 0.3 + if(p>=10) yMod <- 0.1 + if(p==13 || p==14) yMod <- 0.52 + if(p==15 || p==16) yMod <- 0.1 + + if(p==1 || p==4 || p==7 || p==10) xMod <- 0.05 + if(p==2 || p==5 || p==8 || p==11) xMod <- 0.35 + if(p==3 || p==6 || p==9 || p==12) xMod <- 0.65 + if(p==13 || p==15) xMod <- 0.05 + if(p==14 || p==16) xMod <- 0.50 + + # impact map: + if(p <= 12) par(fig=c(xMod, xMod + 0.3, yMod, yMod + 0.17), new=TRUE) + if(p > 12) par(fig=c(xMod, xMod + 0.45, yMod, yMod + 0.38), new=TRUE) + PlotEquiMap(imp.all[,EU], lon[EU], lat, filled.continents = FALSE, brks=1:5, cols=col.regimes, axelab=F, drawleg=F) + + # title map: + if(p <= 12) par(fig=c(xMod, xMod + 0.3, yMod + 0.162, yMod + 0.172), new=TRUE) + if(p > 12) par(fig=c(xMod + 0.07, xMod + 0.07 + 0.3, yMod +0.21 + 0.166, yMod +0.21 + 0.176), new=TRUE) + mtext(my.period[p], font=2, cex=1.5) + + } # close 'p' on 'period' + + par(fig=c(0.1,0.9, 0.03, 0.11), new=TRUE) + ColorBar2(1:5, cols=col.regimes, vert=FALSE, my.ticks=1:4, draw.ticks=FALSE, my.labels=orden) + + par(fig=c(0.1,0.9, 0.92, 0.97), new=TRUE) + mtext(paste0("Most influent WR on ",var.name[var.num]), font=2, cex=1.5) + + dev.off() + +} # close if on composition == "impact" + + + + + + + + + + + + + + + + + + diff --git a/old/weather_regimes_maps_v16.R b/old/weather_regimes_maps_v16.R new file mode 100644 index 0000000000000000000000000000000000000000..4dfe0d731642d8c32e9e691e1163eaa4942c1887 --- /dev/null +++ b/old/weather_regimes_maps_v16.R @@ -0,0 +1,3582 @@ + +# Creation: 6/2016 +# Author: Nicola Cortesi +# +# Aim: to visualize the regime anomalies of the weather regimes, their interannual frequencies and their impact on a chosen cliamte variable (i.e: wind speed or temperature) +# +# I/O: the only input file needed is the output of the weather_regimes.R script, which ends with the suffix "_mapdata.RData". +# Output files are the maps, with the user can generate as .png or as .pdf +# +# Assumptions: If your regimes derive from a reanalysis, this script must be run twice: once, with the option 'ordering = F' and 'save.names = T' to create a .pdf or .png +# file that the user must open to find visually the order by which the four regimes are stored inside the four clusters. For example, he can find that the +# sequence of the images in the file (from top to down) corresponds to: Blocking / NAO- / Atl.Ridge / NAO+ regime. Subsequently, he has to change 'ordering' +# from F to T and set the four variables 'clusterX.name' (X=1..4) to follow the same order found inside that file. +# The second time, you have to run this script only to get the maps in the right order, which by default is, from top to down: +# "NAO+","NAO-","Blocking" and "Atl.Ridge" (you can change it with the variable 'orden'). You also have to set 'save.names' to TRUE, so an .RData file +# is written to store the order of the four regimes, in case in future a user needs to visualize these maps again. In this case, the user must set +# 'ordering = TRUE' and 'save.names = FALSE' to be able to visualize the maps in the correct order. +# +# If your regimes derive from a forecast system, you have to compute before the regimes derived from a reanalysis. Then, you can associate the reanalysis +# to the forecast system, to employ it to automatically aussociate to each simulated cluster one of the +# observed regimes, the one with the highest spatial correlation. Beware that the two maps of regime anomalies must have the same spatial resolution! +# +# Branch: weather_regimes + +rm(list=ls()) +library(s2dverification) # for the function Load() +library(Kendall) # for the MannKendall test + +source('/home/Earth/ncortesi/Downloads/scripts/Rfunctions.R') # for the calendar functions + +# working dir with the input files with '_psl.RData' suffix: +#workdir <- "/scratch/Earth/ncortesi/RESILIENCE/Regimes/42_ECMWF_S4_monthly_1981-2015_LOESS_filter" +#workdir <- "/scratch/Earth/ncortesi/RESILIENCE/Regimes/18) as 12) but with no lat correction" +workdir <- "/scratch/Earth/ncortesi/RESILIENCE/Regimes/43_as_41_but_with_running_cluster" + +rean.name <- "ERA-Interim" # reanalysis name (if input data comes from a reanalysis) +forecast.name <- "ECMWF-S4" # forecast system name (if input data comes from a forecast system) + +fields.name <- rean.name #forecast.name # Choose between loading reanalysis ('rean.name') or forecasts ('forecast.name') + +var.num <- 1 # if fields.name ='rean.name' and composition ='simple', Choose a variable for the impact maps 1: sfcWind 2: tas + +period <- 1:12 # Choose one or more periods to plot from 1 to 17 (1-12: Jan - Dec, 13-16: Winter, Spring, Summer, Autumn, 17: Year). + # You need to have created before the '_mapdata.RData' file with the output of 'weather_regimes'.R for that period. + +# run it twice: once, with 'ordering <- FALSE' and 'save.names <- TRUE' to look only at the cartography and find visually the right regime names of the four clusters; then, +# insert the regime names in the correct order below and run this script a second time with 'ordering = TRUE' and 'save.names = TRUE', to save the ordered cartography. +# after that, any time you need to run this script, run it with 'ordering = TRUE and 'save.names = FALSE', not to overwrite the file which stores the cluster order: +ordering <- F # If TRUE, order the clusters following the regimes listed in the 'orden' vector (set it to TRUE only after you manually inserted the names below). +save.names <- T # if TRUE, also save the cluster names (i.e: the association between clusters and weather regimes); if FALSE, the cluster names are loaded from a file + +as.pdf <- F # FALSE: save results as one .png file for each season/month, TRUE: save only one .pdf file with all seasons/months + +composition <- "simple" # choose which kind of composition you want to plot: + # - 'simple' for monthly graphs of regime anomalies / impact / interannual frequencies in three columns + # - 'psl' for all the regime anomalies for a fixed forecast month + # - 'fre' for all the interannual frequencies for a fixed forecast month + # - 'summary' for the summary plots of all forecast months (persistence bias, freq.bias, correlations, etc.) [You need all ClusterNames files] + # - 'impact' for all the impact plot of the regime with the highest impact + # - 'none' doesn't plot anything, it only saves the ClusterName.Rdata files + +# Associates the regimes to the four cluster: +cluster1.name <- "NAO+" +cluster2.name <- "NAO-" +cluster4.name <- "Blocking" +cluster3.name <- "Atl.Ridge" + +# choose an order to give to the cartograpy, from top to bottom: +orden <- c("NAO+","NAO-","Blocking","Atl.Ridge") + +# if fields.name='forecast.name', you have to specify these additional variables: + +#set the subdir of 'workdir' where the weather regimes computed with a reanalysis are stored (xxxxx_psl.RData files): +#rean.dir <- "/scratch/Earth/ncortesi/RESILIENCE/Regimes/41_ERA-Interim_monthly_1981-2015_LOESS_filter" +#rean.dir <- "/scratch/Earth/ncortesi/RESILIENCE/Regimes/18) as 12) but with no lat correction" +rean.dir <- "/scratch/Earth/ncortesi/RESILIENCE/Regimes/43_as_41_but_with_running_cluster" + +lead.months <- 0:6 # in case you selected 'fields.name = forecast.name', specify a leadtime (0 = same month of the start month) to plot + # (and variable 'period' becomes the start month) +forecast.month <- 1 # in case composition = 'psl' or 'fre' or 'impact', choose a target month to forecast, from 1 to 12, to plot its composition + +################################################################################################################################################################### + +if(fields.name != rean.name && fields.name != forecast.name) stop("variable 'fields.name' is not properly set") +regime1.name <- orden[1] +regime2.name <- orden[2] +regime3.name <- orden[3] +regime4.name <- orden[4] + +var.name <- c("sfcWind","tas") +var.name.full <- c("Wind Speed","Temperature") # full name of the 'predictand' variable to put in the title of the graphs +var.unit <- c("m/s", "ºC") # unit of measure of var (for drawing color scales) +var.num.orig <- var.num # loading _psl files, this value can change! +fields.name.orig <- fields.name +period.orig <- period +workdir.orig <- workdir +pdf.suffix <- ifelse(length(period)==1, my.period[period], "all_periods") +n.map <- 0 + +if(composition != "summary" && composition != "impact"){ + + if(composition == "psl" || composition == "fre") { + if(as.pdf && fields.name == forecast.name) pdf(file=paste0(workdir,"/",fields.name,"_",pdf.suffix,"_forecast_month_",forecast.month,"_",composition,".pdf"),width=110,height=60) + if(!as.pdf && fields.name == forecast.name) png(filename=paste0(workdir,"/",fields.name,"_forecast_month_",forecast.month,"_",composition,".png"),width=6000,height=2300) + + lead.months <- c(6:0,0) + plot.new() + + # Sheet title: + par(fig=c(0, 1, 0.94, 0.98), new=TRUE) + if(composition == "psl") mtext(paste0(fields.name, " simulated Weather Regimes for ", month.name[forecast.month]), cex=5.5, font=2) + if(composition == "fre") mtext(paste0(fields.name, " simulated interannual frequencies for ", month.name[forecast.month]), cex=5.5, font=2) + } + + for(lead.month in lead.months){ + #lead.month=lead.months[1] # for the debug + + if(composition == "simple"){ + if(as.pdf && fields.name == rean.name) pdf(file=paste0(workdir,"/",fields.name,"_",var.name[var.num],"_",pdf.suffix,".pdf"),width=40, height=60) + if(as.pdf && fields.name == forecast.name) pdf(file=paste0(workdir,"/",fields.name,"_",var.name[var.num],"_",pdf.suffix,"_leadmonth_",lead.month,".pdf"),width=40,height=60) + } + + if(composition == 'psl' || composition == 'fre') { + period <- forecast.month - lead.month + if(period < 1) period <- period + 12 + } + + for(p in period){ + #p <- period[1] # for the debug + p.orig <- p + + # load regime data (for forecasts, it load only the data corresponding to the chosen start month p and lead month 'lead.month':: + if(fields.name == rean.name || (fields.name == forecast.name && lead.month == 0 || n.map == 7)) { + load(file=paste0(rean.dir,"/",rean.name,"_",my.period[p],"_","psl",".RData")) + if(var.num != var.num.orig) var.num <- var.num.orig # ripristinate original values of this script (necessary after loading regime data) + if(fields.name != fields.name.orig) fields.name <- fields.name.orig # ripristinate original values of this script + if(workdir != workdir.orig) workdir <- workdir.orig # ripristinate original values of this script + + # load impact data only if it is available, if not it will leave an empty space in the composition: + if(file.exists(paste0(rean.dir,"/",rean.name,"_",my.period[p],"_",var.name[var.num],".RData"))){ + load(file=paste0(rean.dir,"/",rean.name,"_",my.period[p],"_",var.name[var.num],".RData")) + } else { + print("Impact data not available; 'simple' compositions will have an empty space in the middle column") + } + } + + if(fields.name == forecast.name && n.map < 7){ + load(file=paste0(workdir,"/",fields.name,"_",my.period[p],"_leadmonth_",lead.month,"_","psl",".RData")) # load cluster time series and mean psl data + if(composition == "impact") load(file=paste0(workdir,"/",fields.name,"_",my.period[p],"_leadmonth_",lead.month,"_",var.name[var.num],".RData")) # load mean var data # for impact maps + + #if(var.num != var.num.orig) var.num <- var.num.orig # ripristinate original values of this script (necessary after loading regime data) + #if(fields.name != fields.name.orig) fields.name <- fields.name.orig # ripristinate original values of this script + + } + + if(var.num != var.num.orig) var.num <- var.num.orig # ripristinate original values of this script (necessary after loading regime data) + if(fields.name != fields.name.orig) fields.name <- fields.name.orig # ripristinate original values of this script + if(workdir != workdir.orig) workdir <- workdir.orig # ripristinate original values of this script + if(p != p.orig) p <- p.orig + + if(fields.name == forecast.name && n.map < 7){ + + # compute the simulated interannual monthly variability: + n.days.month <- dim(my.cluster.array[[p]])[1] # number of days in the forecasted month + + freq.sim1 <- apply(my.cluster.array[[p]] == 1, c(2,3), sum) + freq.sim2 <- apply(my.cluster.array[[p]] == 2, c(2,3), sum) + freq.sim3 <- apply(my.cluster.array[[p]] == 3, c(2,3), sum) + freq.sim4 <- apply(my.cluster.array[[p]] == 4, c(2,3), sum) + + sd.freq.sim1 <- sd(freq.sim1) / n.days.month + sd.freq.sim2 <- sd(freq.sim2) / n.days.month + sd.freq.sim3 <- sd(freq.sim3) / n.days.month + sd.freq.sim4 <- sd(freq.sim4) / n.days.month + + # compute the transition probabilities: + j1 <- j2 <- j3 <- j4 <- 1; transition1 <- transition2 <- transition3 <- transition4 <- c() + + n.members <- dim(my.cluster.array[[p]])[3] + + for(m in 1:n.members){ + for(y in 1:n.years){ + for (d in 1:(n.days.month-1)){ + + if (my.cluster.array[[p]][d,y,m] == 1 && (my.cluster.array[[p]][d + 1,y,m] != my.cluster.array[[p]][d,y,m])){ + transition1[j1] <- my.cluster.array[[p]][d + 1,y,m] + j1 <- j1 + 1 + } + if (my.cluster.array[[p]][d,y,m] == 2 && (my.cluster.array[[p]][d + 1,y,m] != my.cluster.array[[p]][d,y,m])){ + transition2[j2] <- my.cluster.array[[p]][d + 1,y,m] + j2 <- j2 + 1 + } + if (my.cluster.array[[p]][d,y,m] == 3 && (my.cluster.array[[p]][d + 1,y,m] != my.cluster.array[[p]][d,y,m])){ + transition3[j3] <- my.cluster.array[[p]][d + 1,y,m] + j3 <- j3 +1 + } + if (my.cluster.array[[p]][d,y,m] == 4 && (my.cluster.array[[p]][d + 1,y,m] != my.cluster.array[[p]][d,y,m])){ + transition4[j4] <- my.cluster.array[[p]][d + 1,y,m] + j4 <- j4 +1 + } + + } + } + } + + trans.sim <- matrix(NA,4,4 , dimnames= list(c("cluster1.sim", "cluster2.sim", "cluster3.sim", "cluster4.sim"),c("cluster1.sim","cluster2.sim","cluster3.sim","cluster4.sim"))) + trans.sim[1,2] <- length(which(transition1 == 2)) + trans.sim[1,3] <- length(which(transition1 == 3)) + trans.sim[1,4] <- length(which(transition1 == 4)) + + trans.sim[2,1] <- length(which(transition2 == 1)) + trans.sim[2,3] <- length(which(transition2 == 3)) + trans.sim[2,4] <- length(which(transition2 == 4)) + + trans.sim[3,1] <- length(which(transition3 == 1)) + trans.sim[3,2] <- length(which(transition3 == 2)) + trans.sim[3,4] <- length(which(transition3 == 4)) + + trans.sim[4,1] <- length(which(transition4 == 1)) + trans.sim[4,2] <- length(which(transition4 == 2)) + trans.sim[4,3] <- length(which(transition4 == 3)) + + trans.tot <- apply(trans.sim,1,sum,na.rm=T) + for(i in 1:4) trans.sim[i,] <- trans.sim[i,]/trans.tot[i] + + + # compute cluster persistence: + my.cluster.vector <- as.vector(my.cluster.array[[p]]) # reduce it to a vector with the order: day1month1member1 , day2month1member1, ... , day1month2member1, day2month2member1, ,,, + + sequ1 <- sequ2 <- sequ3 <- sequ4 <- rep(0,10000) + i1 <- i2 <- i3 <- i4 <- 1 + + if(my.cluster.vector[1] != 1) i1 <- 0 + if(my.cluster.vector[1] == 1) sequ1[i1] <- sequ1[i1]+1 + if(my.cluster.vector[1] != 2) i2 <- 0 + if(my.cluster.vector[1] == 2) sequ2[i2] <- sequ2[i2]+1 + if(my.cluster.vector[1] != 3) i3 <- 0 + if(my.cluster.vector[1] == 3) sequ3[i3] <- sequ3[i3]+1 + if(my.cluster.vector[1] != 4) i4 <- 0 + if(my.cluster.vector[1] == 4) sequ4[i4] <- sequ4[i4]+1 + n.dd <- length(my.cluster.vector) + + for(d in 1:(n.dd-1)){ + if(my.cluster.vector[d] != 1 && my.cluster.vector[d+1] == 1) i1 <- i1+1 + if(my.cluster.vector[d] == 1) sequ1[i1] <- sequ1[i1]+1 + + if(my.cluster.vector[d] != 2 && my.cluster.vector[d+1] == 2) i2 <- i2+1 + if(my.cluster.vector[d] == 2) sequ2[i2] <- sequ2[i2]+1 + + if(my.cluster.vector[d] != 3 && my.cluster.vector[d+1] == 3) i3 <- i3+1 + if(my.cluster.vector[d] == 3) sequ3[i3] <- sequ3[i3]+1 + + if(my.cluster.vector[d] != 4 && my.cluster.vector[d+1] == 4) i4 <- i4+1 + if(my.cluster.vector[d] == 4) sequ4[i4] <- sequ4[i4]+1 + } + + if(my.cluster.vector[n.dd] == 1) sequ1[i1] <- sequ1[i1]+1 + if(my.cluster.vector[n.dd] == 2) sequ2[i2] <- sequ2[i2]+1 + if(my.cluster.vector[n.dd] == 3) sequ3[i3] <- sequ3[i3]+1 + if(my.cluster.vector[n.dd] == 4) sequ4[i4] <- sequ4[i4]+1 + + pers1 <- mean(sequ1[which(sequ1 != 0)]) + pers2 <- mean(sequ2[which(sequ2 != 0)]) + pers3 <- mean(sequ3[which(sequ3 != 0)]) + pers4 <- mean(sequ4[which(sequ4 != 0)]) + + # compute correlations between simulated clusters and observed regime's anomalies: + cluster1.sim <- pslwr1mean; cluster2.sim <- pslwr2mean; cluster3.sim <- pslwr3mean; cluster4.sim <- pslwr4mean + lat.forecast <- lat; lon.forecast <- lon + + if((p + lead.month) > 12) { load.month <- p + lead.month - 12 } else { load.month <- p + lead.month } # reanalysis forecast month to load + load(file=paste0(rean.dir,"/",rean.name,"_",my.period[load.month],"_","psl",".RData")) # load reanalysis data, which overwrities the pslwrXmean variables + load(paste0(rean.dir,"/",rean.name,"_", my.period[load.month],"_","ClusterNames",".RData")) # load also reanalysis regime names + + if(var.num != var.num.orig) var.num <- var.num.orig # ripristinate original values of this script + if(fields.name != fields.name.orig) fields.name <- fields.name.orig # ripristinate original values of this script + if(!identical(period.orig,period)) period <- period.orig + if(workdir != workdir.orig) workdir <- workdir.orig # ripristinate original values of this script + if(p.orig != p) p <- p.orig + + # compute the observed transition probabilities: + n.days.month <- n.days.in.a.period(load.month,2001) + j1o <- j2o <- j3o <- j4o <- 1; transition.obs1 <- transition.obs2 <- transition.obs3 <- transition.obs4 <- c() + + for (d in 1:(length(my.cluster[[load.month]]$cluster)-1)){ + if (my.cluster[[load.month]]$cluster[d] == 1 && (my.cluster[[load.month]]$cluster[d + 1] != my.cluster[[load.month]]$cluster[d])){ + transition.obs1[j1o] <- my.cluster[[load.month]]$cluster[d + 1] + j1o <- j1o + 1 + } + if (my.cluster[[load.month]]$cluster[d] == 2 && (my.cluster[[load.month]]$cluster[d + 1] != my.cluster[[load.month]]$cluster[d])){ + transition.obs2[j2o] <- my.cluster[[load.month]]$cluster[d + 1] + j2o <- j2o + 1 + } + if (my.cluster[[load.month]]$cluster[d] == 3 && (my.cluster[[load.month]]$cluster[d + 1] != my.cluster[[load.month]]$cluster[d])){ + transition.obs3[j3o] <- my.cluster[[load.month]]$cluster[d + 1] + j3o <- j3o + 1 + } + if (my.cluster[[load.month]]$cluster[d] == 4 && (my.cluster[[load.month]]$cluster[d + 1] != my.cluster[[load.month]]$cluster[d])){ + transition.obs4[j4o] <- my.cluster[[load.month]]$cluster[d + 1] + j4o <- j4o +1 + } + + } + + trans.obs <- matrix(NA,4,4 , dimnames= list(c("cluster1.obs", "cluster2.obs", "cluster3.obs", "cluster4.obs"),c("cluster1.obs","cluster2.obs","cluster3.obs","cluster4.obs"))) + trans.obs[1,2] <- length(which(transition.obs1 == 2)) + trans.obs[1,3] <- length(which(transition.obs1 == 3)) + trans.obs[1,4] <- length(which(transition.obs1 == 4)) + + trans.obs[2,1] <- length(which(transition.obs2 == 1)) + trans.obs[2,3] <- length(which(transition.obs2 == 3)) + trans.obs[2,4] <- length(which(transition.obs2 == 4)) + + trans.obs[3,1] <- length(which(transition.obs3 == 1)) + trans.obs[3,2] <- length(which(transition.obs3 == 2)) + trans.obs[3,4] <- length(which(transition.obs3 == 4)) + + trans.obs[4,1] <- length(which(transition.obs4 == 1)) + trans.obs[4,2] <- length(which(transition.obs4 == 2)) + trans.obs[4,3] <- length(which(transition.obs4 == 3)) + + trans.tot.obs <- apply(trans.obs,1,sum,na.rm=T) + for(i in 1:4) trans.obs[i,] <- trans.obs[i,]/trans.tot.obs[i] + + # compute the observed interannual monthly variability: + freq.obs1 <- freq.obs2 <- freq.obs3 <- freq.obs4 <- c() + + for(y in year.start:year.end){ + # for both reanalysis and S4, the time order is always: year1day1, year1day2, ..., year1day31, year2day1, year2day2, ..., year2day28, etc, + freq.obs1[y-year.start+1] <- length(which(my.cluster[[load.month]]$cluster[(y-year.start)*n.days.month + (1:n.days.month)] == 1)) + freq.obs2[y-year.start+1] <- length(which(my.cluster[[load.month]]$cluster[(y-year.start)*n.days.month + (1:n.days.month)] == 2)) + freq.obs3[y-year.start+1] <- length(which(my.cluster[[load.month]]$cluster[(y-year.start)*n.days.month + (1:n.days.month)] == 3)) + freq.obs4[y-year.start+1] <- length(which(my.cluster[[load.month]]$cluster[(y-year.start)*n.days.month + (1:n.days.month)] == 4)) + } + + sd.freq.obs1 <- sd(freq.obs1) / n.days.month + sd.freq.obs2 <- sd(freq.obs2) / n.days.month + sd.freq.obs3 <- sd(freq.obs3) / n.days.month + sd.freq.obs4 <- sd(freq.obs4) / n.days.month + + wr1y.obs <- wr1y; wr2y.obs <- wr2y; wr3y.obs <- wr3y; wr4y.obs <- wr4y # observed frecuencies of the regimes + + regimes.obs <- c(cluster1.name, cluster2.name, cluster3.name, cluster4.name) + + if(length(unique(regimes.obs)) < 4) stop("Two or more names of the weather types in the reanalsyis input file are repeated!") + + if(identical(rev(lat),lat.forecast)) {lat.forecast <- rev(lat.forecast); print("Warning: forecast latitudes are in the opposite order than renalysis's")} + if(!identical(lat, lat.forecast)) stop("forecast latitudes are different from reanalysis latitudes!") + if(!identical(lon, lon.forecast)) stop("forecast longitude are different from reanalysis longitudes!") + + cluster.corr <- array(NA, c(4,4), dimnames=list(c("cluster1.sim","cluster2.sim","cluster3.sim","cluster4.sim"), regimes.obs)) + cluster.corr[1,1] <- cor(as.vector(cluster1.sim), as.vector(pslwr1mean)) + cluster.corr[1,2] <- cor(as.vector(cluster1.sim), as.vector(pslwr2mean)) + cluster.corr[1,3] <- cor(as.vector(cluster1.sim), as.vector(pslwr3mean)) + cluster.corr[1,4] <- cor(as.vector(cluster1.sim), as.vector(pslwr4mean)) + cluster.corr[2,1] <- cor(as.vector(cluster2.sim), as.vector(pslwr1mean)) + cluster.corr[2,2] <- cor(as.vector(cluster2.sim), as.vector(pslwr2mean)) + cluster.corr[2,3] <- cor(as.vector(cluster2.sim), as.vector(pslwr3mean)) + cluster.corr[2,4] <- cor(as.vector(cluster2.sim), as.vector(pslwr4mean)) + cluster.corr[3,1] <- cor(as.vector(cluster3.sim), as.vector(pslwr1mean)) + cluster.corr[3,2] <- cor(as.vector(cluster3.sim), as.vector(pslwr2mean)) + cluster.corr[3,3] <- cor(as.vector(cluster3.sim), as.vector(pslwr3mean)) + cluster.corr[3,4] <- cor(as.vector(cluster3.sim), as.vector(pslwr4mean)) + cluster.corr[4,1] <- cor(as.vector(cluster4.sim), as.vector(pslwr1mean)) + cluster.corr[4,2] <- cor(as.vector(cluster4.sim), as.vector(pslwr2mean)) + cluster.corr[4,3] <- cor(as.vector(cluster4.sim), as.vector(pslwr3mean)) + cluster.corr[4,4] <- cor(as.vector(cluster4.sim), as.vector(pslwr4mean)) + + # associate each simulated cluster to one observed regime: + max1 <- which(cluster.corr == max(cluster.corr), arr.ind=T) + cluster.corr2 <- cluster.corr + cluster.corr2[max1[1],] <- -999 # set this row to negative values so it won't be found as a maximum anymore + cluster.corr2[,max1[2]] <- -999 # set this column to negative values so it won't be found as a maximum anymore + max2 <- which(cluster.corr2 == max(cluster.corr2), arr.ind=T) + cluster.corr3 <- cluster.corr2 + cluster.corr3[max2[1],] <- -999 + cluster.corr3[,max2[2]] <- -999 + max3 <- which(cluster.corr3 == max(cluster.corr3), arr.ind=T) + cluster.corr4 <- cluster.corr3 + cluster.corr4[max3[1],] <- -999 + cluster.corr4[,max3[2]] <- -999 + max4 <- which(cluster.corr4 == max(cluster.corr4), arr.ind=T) + + seq.max1 <- c(max1[1],max2[1],max3[1],max4[1]) + seq.max2 <- c(max1[2],max2[2],max3[2],max4[2]) + + cluster1.regime <- seq.max2[which(seq.max1 == 1)] + cluster2.regime <- seq.max2[which(seq.max1 == 2)] + cluster3.regime <- seq.max2[which(seq.max1 == 3)] + cluster4.regime <- seq.max2[which(seq.max1 == 4)] + + cluster1.name <- regimes.obs[cluster1.regime] + cluster2.name <- regimes.obs[cluster2.regime] + cluster3.name <- regimes.obs[cluster3.regime] + cluster4.name <- regimes.obs[cluster4.regime] + + regimes.sim <- c(cluster1.name, cluster2.name, cluster3.name, cluster4.name) + cluster.corr2 <- array(cluster.corr, c(4,4), dimnames=list(regimes.sim, regimes.obs)) + + spat.cor1 <- cluster.corr[1,seq.max2[which(seq.max1 == 1)]] + spat.cor2 <- cluster.corr[2,seq.max2[which(seq.max1 == 2)]] + spat.cor3 <- cluster.corr[3,seq.max2[which(seq.max1 == 3)]] + spat.cor4 <- cluster.corr[4,seq.max2[which(seq.max1 == 4)]] + + # measure persistence for the reanalysis dataset: + my.cluster.vector <- my.cluster[[load.month]][[1]] + + sequ1 <- sequ2 <- sequ3 <- sequ4 <- rep(0,10000) + i1 <- i2 <- i3 <- i4 <- 1 + + if(my.cluster.vector[1] != 1) i1 <- 0 + if(my.cluster.vector[1] == 1) sequ1[i1] <- sequ1[i1]+1 + if(my.cluster.vector[1] != 2) i2 <- 0 + if(my.cluster.vector[1] == 2) sequ2[i2] <- sequ2[i2]+1 + if(my.cluster.vector[1] != 3) i3 <- 0 + if(my.cluster.vector[1] == 3) sequ3[i3] <- sequ3[i3]+1 + if(my.cluster.vector[1] != 4) i4 <- 0 + if(my.cluster.vector[1] == 4) sequ4[i4] <- sequ4[i4]+1 + + n.dd <- length(my.cluster.vector) + for(d in 1:(n.dd-1)){ + if(my.cluster.vector[d] != 1 && my.cluster.vector[d+1] == 1) i1 <- i1+1 + if(my.cluster.vector[d] == 1) sequ1[i1] <- sequ1[i1]+1 + + if(my.cluster.vector[d] != 2 && my.cluster.vector[d+1] == 2) i2 <- i2+1 + if(my.cluster.vector[d] == 2) sequ2[i2] <- sequ2[i2]+1 + + if(my.cluster.vector[d] != 3 && my.cluster.vector[d+1] == 3) i3 <- i3+1 + if(my.cluster.vector[d] == 3) sequ3[i3] <- sequ3[i3]+1 + + if(my.cluster.vector[d] != 4 && my.cluster.vector[d+1] == 4) i4 <- i4+1 + if(my.cluster.vector[d] == 4) sequ4[i4] <- sequ4[i4]+1 + } + + if(my.cluster.vector[n.dd] == 1) sequ1[i1] <- sequ1[i1]+1 + if(my.cluster.vector[n.dd] == 2) sequ2[i2] <- sequ2[i2]+1 + if(my.cluster.vector[n.dd] == 3) sequ3[i3] <- sequ3[i3]+1 + if(my.cluster.vector[n.dd] == 4) sequ4[i4] <- sequ4[i4]+1 + + persObs1 <- mean(sequ1[which(sequ1 != 0)]) + persObs2 <- mean(sequ2[which(sequ2 != 0)]) + persObs3 <- mean(sequ3[which(sequ3 != 0)]) + persObs4 <- mean(sequ4[which(sequ4 != 0)]) + + # insert here all the manual corrections to the regime assignation due to misasignment in the automatic selection: + # forecast month: September + if(p == 9 && lead.month == 0) { cluster1.name <- "Blocking"; cluster2.name <- "Atl.Ridge"; cluster3.name <- "NAO-"; cluster4.name <- "NAO+"} + if(p == 8 && lead.month == 1) { cluster1.name <- "NAO+"; cluster2.name <- "NAO-"; cluster3.name <- "Blocking"; cluster4.name <- "Atl.Ridge"} + if(p == 6 && lead.month == 3) { cluster1.name <- "Atl.Ridge"; cluster2.name <- "NAO-"} + if(p == 4 && lead.month == 5) { cluster1.name <- "Blocking"; cluster2.name <- "NAO+"; cluster3.name <- "Atl.Ridge"; cluster4.name <- "NAO-"} + + # forecast month: October + if(p == 4 && lead.month == 6) { cluster1.name <- "Atl.Ridge"; cluster2.name <- "Blocking"; cluster3.name <- "NAO+"; cluster4.name <- "NAO-"} + if(p == 5 && lead.month == 5) { cluster1.name <- "NAO+"; cluster2.name <- "Blocking"; cluster3.name <- "NAO-"; cluster4.name <- "Atl.Ridge"} + if(p == 7 && lead.month == 3) { cluster1.name <- "NAO-"; cluster2.name <- "Atl.Ridge"; cluster3.name <- "Blocking"; cluster4.name <- "NAO+"} + if(p == 8 && lead.month == 2) { cluster1.name <- "Blocking"; cluster2.name <- "NAO-"; cluster3.name <- "Atl.Ridge"; cluster4.name <- "NAO+"} + if(p == 9 && lead.month == 1) { cluster1.name <- "NAO-"; cluster2.name <- "Blocking"; cluster3.name <- "Atl.Ridge"; cluster4.name <- "NAO+"} + + # forecast month: November + if(p == 6 && lead.month == 5) { cluster2.name <- "Atl.Ridge"; cluster3.name <- "NAO+"; cluster4.name <- "Blocking"} + if(p == 7 && lead.month == 4) { cluster1.name <- "NAO+"; cluster2.name <- "Blocking"; cluster4.name <- "Atl.Ridge"} + if(p == 8 && lead.month == 3) { cluster2.name <- "Blocking"; cluster4.name <- "Atl.Ridge"} + if(p == 9 && lead.month == 2) { cluster2.name <- "Atl.Ridge"; cluster3.name <- "NAO+"; cluster4.name <- "Blocking"} + if(p == 10 && lead.month == 1) { cluster2.name <- "Blocking"; cluster4.name <- "Atl.Ridge"} + + regimes.sim2 <- c(cluster1.name, cluster2.name, cluster3.name, cluster4.name) # Simulated regimes after the manual correction + #regimes.obs <- c(cluster1.name, cluster2.name, cluster3.name, cluster4.name) # already defined above, included only as reference + + order.sim <- match(regimes.sim2, orden) + order.obs <- match(regimes.obs, orden) + + clusters.sim <- list(cluster1.sim, cluster2.sim, cluster3.sim, cluster4.sim) + clusters.obs <- list(pslwr1mean, pslwr2mean, pslwr3mean, pslwr4mean) + + # recompute the spatial correlations, because they might have changed because of the manual corrections above: + spat.cor1 <- cor(as.vector(clusters.sim[[which(order.sim == 1)]]), as.vector(clusters.obs[[which(order.obs == 1)]])) + spat.cor2 <- cor(as.vector(clusters.sim[[which(order.sim == 2)]]), as.vector(clusters.obs[[which(order.obs == 2)]])) + spat.cor3 <- cor(as.vector(clusters.sim[[which(order.sim == 3)]]), as.vector(clusters.obs[[which(order.obs == 3)]])) + spat.cor4 <- cor(as.vector(clusters.sim[[which(order.sim == 4)]]), as.vector(clusters.obs[[which(order.obs == 4)]])) + + load(file=paste0(workdir,"/",fields.name,"_",my.period[p],"_leadmonth_",lead.month,"_","psl",".RData")) # reload forecast cluster time series and mean psl data + #load(file=paste0(workdir,"/",fields.name,"_",my.period[p],"_leadmonth_",lead.month,"_ClusterData.RData")) # reload forecast cluster data (for my.cluster.array) + if(var.num != var.num.orig) var.num <- var.num.orig # ripristinate original values of this script + if(fields.name != fields.name.orig) fields.name <- fields.name.orig # ripristinate original values of this script + if(!identical(period.orig, period)) period <- period.orig + if(p.orig != p) p <- p.orig + if(identical(rev(lat),lat.forecast)) lat <- rev(lat) # ripristinate right lat order + if(workdir != workdir.orig) workdir <- workdir.orig # ripristinate original values of this script + + } # close for on 'forecast.name' + + if(fields.name == rean.name || (fields.name == forecast.name && n.map == 7)){ + if(save.names){ + save(cluster1.name, cluster2.name, cluster3.name, cluster4.name, file=paste0(workdir,"/",fields.name,"_", my.period[p],"_","ClusterNames",".RData")) + + } else { # in this case, we load the cluster names from the file already saved, if regimes come from a reanalysis: + load(paste0(rean.dir,"/",rean.name,"_", my.period[p],"_","ClusterNames",".RData")) + + if(var.num != var.num.orig) var.num <- var.num.orig # ripristinate original values of this script + if(fields.name != fields.name.orig) fields.name <- fields.name.orig # ripristinate original values of this script + } + } + + # assign to each cluster its regime: + if(ordering == FALSE && (fields.name == rean.name || (fields.name == forecast.name && n.map == 7))){ + cluster1 <- 1; cluster2 <- 2; cluster3 <- 3; cluster4 <- 4 + } else { + cluster1 <- which(orden == cluster1.name) + cluster2 <- which(orden == cluster2.name) + cluster3 <- which(orden == cluster3.name) + cluster4 <- which(orden == cluster4.name) + } + + assign(paste0("map",cluster1), pslwr1mean) + assign(paste0("map",cluster2), pslwr2mean) + assign(paste0("map",cluster3), pslwr3mean) + assign(paste0("map",cluster4), pslwr4mean) + + assign(paste0("fre",cluster1), wr1y) + assign(paste0("fre",cluster2), wr2y) + assign(paste0("fre",cluster3), wr3y) + assign(paste0("fre",cluster4), wr4y) + + if(composition == "impact"){ + assign(paste0("imp",cluster1), varwr1mean) + assign(paste0("imp",cluster2), varwr2mean) + assign(paste0("imp",cluster3), varwr3mean) + assign(paste0("imp",cluster4), varwr4mean) + + if(fields.name == "rean.name"){ + assign(paste0("sig",cluster1), pvalue1) + assign(paste0("sig",cluster2), pvalue2) + assign(paste0("sig",cluster3), pvalue3) + assign(paste0("sig",cluster4), pvalue4) + } + } + + if(fields.name == forecast.name && n.map < 7){ + assign(paste0("freMax",cluster1), wr1yMax) + assign(paste0("freMax",cluster2), wr2yMax) + assign(paste0("freMax",cluster3), wr3yMax) + assign(paste0("freMax",cluster4), wr4yMax) + + assign(paste0("freMin",cluster1), wr1yMin) + assign(paste0("freMin",cluster2), wr2yMin) + assign(paste0("freMin",cluster3), wr3yMin) + assign(paste0("freMin",cluster4), wr4yMin) + + #assign(paste0("spatial.cor",cluster1), spat.cor1) + #assign(paste0("spatial.cor",cluster2), spat.cor2) + #assign(paste0("spatial.cor",cluster3), spat.cor3) + #assign(paste0("spatial.cor",cluster4), spat.cor4) + + assign(paste0("persist",cluster1), pers1) + assign(paste0("persist",cluster2), pers2) + assign(paste0("persist",cluster3), pers3) + assign(paste0("persist",cluster4), pers4) + + clusters.all <- c(cluster1, cluster2, cluster3, cluster4) + ordered.sequence <- c(which(clusters.all == 1), which(clusters.all == 2), which(clusters.all == 3), which(clusters.all == 4)) + + assign(paste0("transSim",cluster1), unname(trans.sim[1,ordered.sequence])) + assign(paste0("transSim",cluster2), unname(trans.sim[2,ordered.sequence])) + assign(paste0("transSim",cluster3), unname(trans.sim[3,ordered.sequence])) + assign(paste0("transSim",cluster4), unname(trans.sim[4,ordered.sequence])) + + cluster1.obs <- which(orden == regimes.obs[1]) + cluster2.obs <- which(orden == regimes.obs[2]) + cluster3.obs <- which(orden == regimes.obs[3]) + cluster4.obs <- which(orden == regimes.obs[4]) + + clusters.all.obs <- c(cluster1.obs, cluster2.obs, cluster3.obs, cluster4.obs) + ordered.sequence.obs <- c(which(clusters.all.obs == 1), which(clusters.all.obs == 2), which(clusters.all.obs == 3), which(clusters.all.obs == 4)) + + assign(paste0("freObs",cluster1.obs), wr1y.obs) + assign(paste0("freObs",cluster2.obs), wr2y.obs) + assign(paste0("freObs",cluster3.obs), wr3y.obs) + assign(paste0("freObs",cluster4.obs), wr4y.obs) + + assign(paste0("persistObs",cluster1.obs), persObs1) + assign(paste0("persistObs",cluster2.obs), persObs2) + assign(paste0("persistObs",cluster3.obs), persObs3) + assign(paste0("persistObs",cluster4.obs), persObs4) + + assign(paste0("transObs",cluster1.obs), unname(trans.obs[1,ordered.sequence.obs])) + assign(paste0("transObs",cluster2.obs), unname(trans.obs[2,ordered.sequence.obs])) + assign(paste0("transObs",cluster3.obs), unname(trans.obs[3,ordered.sequence.obs])) + assign(paste0("transObs",cluster4.obs), unname(trans.obs[4,ordered.sequence.obs])) + + } + + # position of long values of Europe only (without the Atlantic Sea and America): + #if(fields.name == "ERA-Interim") EU <- c(1:which(lon >= lon.max)[1], (length(lon)-30):length(lon)) # restrict area to continental europe only + EU <- c(1:which(lon >= lon.max)[1], (which(lon >= 337)[1]:length(lon))) # restrict area to continental europe only + + # breaks and colors of the geopotential fields: + if(psl == "g500"){ + #my.brks <- c(-300,-199:200,300) # % Mean anomaly of a WR + my.brks <- c(-300,seq(-200,200,10),300) # % Mean anomaly of a WR + my.brks2 <- c(-300,seq(-200,200,10),300) # % Mean anomaly of a WR for the contour lines + #my.cols <- colorRampPalette((brewer.pal(9,"PuBu")))(length(my.brks)-1) + values.to.plot <- c(-200, -100, 0, 100, 200) # values we want to show in the labels + } + + if(psl == "psl"){ + my.brks <- c(seq(-19,-1,2),0,seq(1,19,2)) # % Mean anomaly of a WR + # my.brks <- c(seq(-19,-1,2),0,seq(1,19,2)) # % Mean anomaly of a WR + + my.brks2 <- my.brks #c(seq(-20,20,2)) # % Mean anomaly of a WR for the contour lines + values.to.plot <- my.brks #c(-17,-15,-13,-11,-9,-7,-5,-3,-1,0,1,3,5,7,9,11,13,15,17) + } + + my.cols <- colorRampPalette(rev(brewer.pal(11,"RdBu")))(length(my.brks)-1) # blue--white--red colors + my.cols[floor(length(my.cols)/2)] <- my.cols[floor(length(my.cols)/2)+1] <- "white" + + # breaks and colors of the impact maps (valid both for Wind speed anomalies and Temperature anomalies): + #my.brks.var <- c(-20,seq(-10,-3,1),seq(-2.9,2.9,0.1),seq(3,10,1),20) # % Mean anomaly of a WR + #my.brks.var <- c(-20,-2,-1.5,-1,-0.5,-0.2,0.2,0.5,1,1.5,2,20) # % Mean anomaly of a WR + #my.brks.var <- c(-20,seq(-3,3,0.5),20) # % Mean anomaly of a WR + if(var.name[var.num] == "sfcWind") { + my.brks.var <- seq(-3,3,0.5) + values.to.plot2 <- c(-3,-2,-1,0,1,2,3) + } + if(var.name[var.num] == "tas") { + my.brks.var <- seq(-5,5,1) + values.to.plot2 <- my.brks.var #c(-5,-3,-1,0,1,3,5) + } + + #my.cols <- colorRampPalette(as.character(read.csv("/home/Earth/vtorralb/rgbhex.csv",header=F)[,1]))(length(my.brks)-1) # blue--yellow-red colors + my.cols.var <- colorRampPalette(rev(brewer.pal(11,"RdBu")))(length(my.brks.var)-1) # blue--white--red colors + #my.cols.var[floor(length(my.cols.var)/2)] <- my.cols.var[floor(length(my.cols.var)/2)+1] <- "white" + + if(fields.name == forecast.name){ + pos.years.present <- match(year.start:year.end, years) + years.missing <- which(is.na(pos.years.present)) + fre1.NA <- fre2.NA <- fre3.NA <- fre4.NA <- freMax1.NA <- freMax2.NA <- freMax3.NA <- freMax4.NA <- freMin1.NA <- freMin2.NA <- freMin3.NA <- freMin4.NA <- c() + freq.max=100 + + k <- 0 + for(y in year.start:year.end) { + if(y %in% (years.missing + year.start - 1)) { + fre1.NA <- c(fre1.NA,NA); fre2.NA <- c(fre2.NA,NA); fre3.NA <- c(fre3.NA,NA); fre4.NA <- c(fre4.NA,NA) + freMax1.NA <- c(freMax1.NA,NA); freMax2.NA <- c(freMax2.NA,NA); freMax3.NA <- c(freMax3.NA,NA); freMax4.NA <- c(freMax4.NA,NA) + freMin1.NA <- c(freMin1.NA,NA); freMin2.NA <- c(freMin2.NA,NA); freMin3.NA <- c(freMin3.NA,NA); freMin4.NA <- c(freMin4.NA,NA) + k=k+1 + } else { + fre1.NA <- c(fre1.NA,fre1[y - year.start + 1 - k]); fre2.NA <- c(fre2.NA,fre2[y - year.start + 1 - k]) + fre3.NA <- c(fre3.NA,fre3[y - year.start + 1 - k]); fre4.NA <- c(fre4.NA,fre4[y - year.start + 1 - k]) + freMax1.NA <- c(freMax1.NA,freMax1[y - year.start + 1 - k]); freMax2.NA <- c(freMax2.NA,freMax2[y - year.start + 1 - k]) + freMax3.NA <- c(freMax3.NA,freMax3[y - year.start + 1 - k]); freMax4.NA <- c(freMax4.NA,freMax4[y - year.start + 1 - k]) + freMin1.NA <- c(freMin1.NA,freMin1[y - year.start + 1 - k]); freMin2.NA <- c(freMin2.NA,freMin2[y - year.start + 1 - k]) + freMin3.NA <- c(freMin3.NA,freMin3[y - year.start + 1 - k]); freMin4.NA <- c(freMin4.NA,freMin4[y - year.start + 1 - k]) + } + } + } else { fre1.NA <- fre1; fre2.NA <- fre2; fre3.NA <- fre3; fre4.NA <- fre4 } + + sp.cor1 <- round(cor(fre1.NA,freObs1, use="complete.obs"),2) + sp.cor2 <- round(cor(fre2.NA,freObs2, use="complete.obs"),2) + sp.cor3 <- round(cor(fre3.NA,freObs3, use="complete.obs"),2) + sp.cor4 <- round(cor(fre4.NA,freObs4, use="complete.obs"),2) + + # Visualize the composition with the 3 graphs for all the 4 regimes: its pressure fields, its impact on var and its frequency series: + if(composition == "simple"){ + if(!as.pdf && fields.name == rean.name) png(filename=paste0(workdir,"/",fields.name,"_",my.period[p],"_",var.name[var.num],".png"),width=3000,height=3700) + if(!as.pdf && fields.name == forecast.name) png(filename=paste0(workdir,"/",fields.name,"_",my.period[p],"_leadmonth_",lead.month,"_",var.name[var.num],".png"),width=3000,height=3700) + + plot.new() + + # Sheet title: + par(fig=c(0, 1, 0.94, 0.98), new=TRUE) + if(fields.name == forecast.name) {forecast.title <- paste0(" startdate and ",lead.month," forecast month ")} else {forecast.title <- ""} + mtext(paste0(fields.name, " Weather Regimes for ", my.period[p], forecast.title," (",year.start,"-",year.end,")"), cex=5.5, font=2) + + # Centroid maps: + map.xpos <- 0 + map.width <- 0.46 + par(fig=c(map.xpos + 0.003, map.xpos + map.width - 0.003, 0.745, 0.925), new=TRUE) + PlotEquiMap2(rescale(map1,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map1), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, xlabel.dist=1.5, contours.lty="F1FF1F") + par(fig=c(map.xpos, map.xpos + map.width, 0.51, 0.69), new=TRUE) + PlotEquiMap2(rescale(map2,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map2), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F") + par(fig=c(map.xpos, map.xpos + map.width, 0.275, 0.455), new=TRUE) + PlotEquiMap2(rescale(map3,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map3), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F") + par(fig=c(map.xpos, map.xpos + map.width, 0.04, 0.22), new=TRUE) + PlotEquiMap2(rescale(map4,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map4), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F") + + # Subtitle centroid maps: + map.title.xpos <- 0.76 + map.title.width <- 0.2 + par(fig=c(map.title.xpos, map.title.xpos + map.title.width, 0.728, 0.729), new=TRUE) + mtext("year", cex=3) + par(fig=c(map.title.xpos, map.title.xpos + map.title.width, 0.494, 0.495), new=TRUE) + mtext("year", cex=3) + par(fig=c(map.title.xpos, map.title.xpos + map.title.width, 0.260, 0.261), new=TRUE) + mtext("year", cex=3) + par(fig=c(map.title.xpos, map.title.xpos + map.title.width, 0.026, 0.027), new=TRUE) + mtext("year", cex=3) + + # Title Centroid Maps: + title1.xpos <- 0.02 + title1.width <- 0.4 + par(fig=c(title1.xpos, title1.xpos + title1.width, 0.9305, 0.9355), new=TRUE) + mtext(paste0(regime1.name,": ", psl.name, " Anomaly"), font=2, cex=4) + par(fig=c(title1.xpos, title1.xpos + title1.width, 0.6955, 0.7005), new=TRUE) + mtext(paste0(regime2.name,": ", psl.name, " Anomaly"), font=2, cex=4) + par(fig=c(title1.xpos, title1.xpos + title1.width, 0.4605, 0.4655), new=TRUE) + mtext(paste0(regime3.name,": ", psl.name, " Anomaly"), font=2, cex=4) + par(fig=c(title1.xpos, title1.xpos + title1.width, 0.2255, 0.2305), new=TRUE) + mtext(paste0(regime4.name,": ", psl.name, " Anomaly"), font=2, cex=4) + + # Impact maps: + if(composition == "impact"){ # it won't enter here never because we are already inside an if con composition="simple" + impact.xpos <- 0.47 + impact.width <- 0.26 + par(fig=c(impact.xpos, impact.xpos + impact.width, 0.745, 0.925), new=TRUE) + PlotEquiMap2(rescale(imp1[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=t(sig1[,EU] < 0.05), cex.lab=1.5) + par(fig=c(impact.xpos, impact.xpos + impact.width, 0.51, 0.69), new=TRUE) + PlotEquiMap2(rescale(imp2[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=t(sig2[,EU] < 0.05), cex.lab=1.5) + par(fig=c(impact.xpos, impact.xpos + impact.width, 0.275, 0.455), new=TRUE) + PlotEquiMap2(rescale(imp3[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=t(sig3[,EU] < 0.05), cex.lab=1.5) + par(fig=c(impact.xpos, impact.xpos + impact.width, 0.04, 0.22), new=TRUE) + PlotEquiMap2(rescale(imp4[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=t(sig4[,EU] < 0.05), cex.lab=1.5) + + # Title impact maps: + title2.xpos <- 0.42 + title2.width <- 0.35 + par(fig=c(title2.xpos, title2.xpos + title2.width, 0.935, 0.940), new=TRUE) + mtext(paste0(regime1.name, ": Impact on ", var.name.full[var.num]), font=2, cex=4) + par(fig=c(title2.xpos, title2.xpos + title2.width, 0.700, 0.705), new=TRUE) + mtext(paste0(regime2.name, ": Impact on ", var.name.full[var.num]), font=2, cex=4) + par(fig=c(title2.xpos, title2.xpos + title2.width, 0.465, 0.470), new=TRUE) + mtext(paste0(regime3.name, ": Impact on ", var.name.full[var.num]), font=2, cex=4) + par(fig=c(title2.xpos, title2.xpos + title2.width, 0.230, 0.235), new=TRUE) + mtext(paste0(regime4.name, ": Impact on ", var.name.full[var.num]), font=2, cex=4) + } + + # Frequency plots: + barplot.xpos <- 0.75 + barplot.width <- 0.25 + + par(fig=c(barplot.xpos, barplot.xpos + barplot.width, 0.745, 0.915), new=TRUE) + if(fields.name == rean.name) barplot.freq(100*fre1.NA, year.start, year.end, cex.y=2, cex.x=2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0)) + if(fields.name == forecast.name) barplot.freq.sim2(100*fre1.NA, 100*freMax1.NA, 100*freMin1.NA, 100*freObs1, year.start, year.end, cex.y=2, cex.x=2, freq.min=-2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0), col.line="gray20", cex.r=3, cex.mean=2, cex.obs=2) + + par(fig=c(barplot.xpos, barplot.xpos + barplot.width,0.510,0.680), new=TRUE) + if(fields.name == rean.name) barplot.freq(100*fre2.NA, year.start, year.end, cex.y=2, cex.x=2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0)) + if(fields.name == forecast.name) barplot.freq.sim2(100*fre2.NA, 100*freMax2.NA, 100*freMin2.NA, 100*freObs2, year.start, year.end, cex.y=2, cex.x=2, freq.min=-2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0), col.line="gray20", cex.r=3, cex.mean=2, cex.obs=2) + + par(fig=c(barplot.xpos, barplot.xpos + barplot.width,0.275,0.445), new=TRUE) + if(fields.name == rean.name) barplot.freq(100*fre3.NA, year.start, year.end, cex.y=2, cex.x=2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0)) + if(fields.name == forecast.name) barplot.freq.sim2(100*fre3.NA, 100*freMax3.NA, 100*freMin3.NA, 100*freObs3, year.start, year.end, cex.y=2, cex.x=2, freq.min=-2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0), col.line="gray20", cex.r=3, cex.mean=2, cex.obs=2) + + par(fig=c(barplot.xpos, barplot.xpos + barplot.width,0.040,0.210), new=TRUE) + if(fields.name == rean.name) barplot.freq(100*fre4.NA, year.start, year.end, cex.y=2, cex.x=2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0)) + if(fields.name == forecast.name) barplot.freq.sim2(100*fre4.NA, 100*freMax4.NA, 100*freMin4.NA, 100*freObs4, year.start, year.end, cex.y=2, cex.x=2, freq.min=-2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0), col.line="gray20", cex.r=3, cex.mean=2, cex.obs=2) + + # Title Frequency maps: + title3.xpos <- 0.75 + title3.width <-0.25 + par(fig=c(title3.xpos, title3.xpos + title3.width, 0.935, 0.940), new=TRUE) + mtext(paste0(regime1.name, " Frequency"), font=2, cex=4) # paste0 doesn't work inside an expression! + par(fig=c(title3.xpos, title3.xpos + title3.width, 0.700, 0.705), new=TRUE) + mtext(paste0(regime2.name, " Frequency"), font=2, cex=4) + par(fig=c(title3.xpos, title3.xpos + title3.width, 0.465, 0.470), new=TRUE) + mtext(paste0(regime3.name, " Frequency"), font=2, cex=4) + par(fig=c(title3.xpos, title3.xpos + title3.width, 0.230, 0.235), new=TRUE) + mtext(paste0(regime4.name, " Frequency"), font=2, cex=4) + + # % on Frequency plots: + symbol.xpos <- 0.735 + symbol.width <- 0.01 + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.82, 0.83), new=TRUE) + mtext("%", cex=3.3) + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.59, 0.60), new=TRUE) + mtext("%", cex=3.3) + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.35, 0.36), new=TRUE) + mtext("%", cex=3.3) + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.12, 0.13), new=TRUE) + mtext("%", cex=3.3) + + # mean frequency on Frequency plots: + symbol.xpos <- 0.9 + symbol.width <- 0.1 + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.908, 0.918), new=TRUE) + mtext(bquote(bar(nu) == .(paste0(round(mean(100*fre1.NA),1),"%"))),cex=4.5) + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.673, 0.683), new=TRUE) + mtext(bquote(bar(nu) == .(paste0(round(mean(100*fre2.NA),1),"%"))),cex=4.5) + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.44, 0.45), new=TRUE) + mtext(bquote(bar(nu) == .(paste0(round(mean(100*fre3.NA),1),"%"))),cex=4.5) + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.203, 0.213), new=TRUE) + mtext(bquote(bar(nu) == .(paste0(round(mean(100*fre4.NA),1),"%"))),cex=4.5) + + # add corr between obs.time series and ensemble mean time series on Frequency plots: + if(fields.name == forecast.name){ + symbol.xpos <- 0.76 + symbol.width <- 0.1 + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.908, 0.918), new=TRUE) + sp.cor1 <- round(cor(fre1.NA,freObs1, use="complete.obs"),2) + mtext(paste0("r= ",sp.cor1), cex=4.5) + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.673, 0.683), new=TRUE) + sp.cor2 <- round(cor(fre2.NA,freObs2, use="complete.obs"),2) + mtext(paste0("r= ",sp.cor2), cex=4.5) + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.44, 0.45), new=TRUE) + sp.cor3 <- round(cor(fre3.NA,freObs3, use="complete.obs"),2) + mtext(paste0("r= ",sp.cor3), cex=4.5) + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.203, 0.213), new=TRUE) + sp.cor4 <- round(cor(fre4.NA,freObs4, use="complete.obs"),2) + mtext(paste0("r= ",sp.cor4), cex=4.5) + } + + # Legend 1: + legend1.xpos <- 0.10 + legend1.width <- 0.80 + legend1.cex <- 2 + my.subset <- match(values.to.plot, my.brks) + par(fig=c(legend1.xpos, legend1.xpos + legend1.width, 0.719, 0.744), new=TRUE) # syntax fig=c(xmin, xmax, ymin, ymax) + ColorBar3(my.brks, cols=my.cols, vert=FALSE, cex=legend1.cex, subset=my.subset) + if(psl=="g500") {mtext(side=4," m", cex=2.5)} else {mtext(side=4," hPa", cex=legend1.cex)} + par(fig=c(legend1.xpos, legend1.xpos + legend1.width, 0.485, 0.508), new=TRUE) + ColorBar3(my.brks, cols=my.cols, vert=FALSE, cex=legend1.cex, subset=my.subset) + if(psl=="g500") {mtext(side=4," m", cex=2.5)} else {mtext(side=4," hPa", cex=legend1.cex)} + par(fig=c(legend1.xpos, legend1.xpos + legend1.width, 0.250, 0.273), new=TRUE) + ColorBar3(my.brks, cols=my.cols, vert=FALSE, cex=legend1.cex, subset=my.subset) + if(psl=="g500") {mtext(side=4," m", cex=2.5)} else {mtext(side=4," hPa", cex=legend1.cex)} + par(fig=c(legend1.xpos, legend1.xpos + legend1.width, 0.015, 0.038), new=TRUE) + ColorBar3(my.brks, cols=my.cols, vert=FALSE, cex=legend1.cex, subset=my.subset) + if(psl=="g500") {mtext(side=4," m", cex=2.5)} else {mtext(side=4," hPa", cex=legend1.cex)} + + # Legend 2: + if(fields.name == rean.name){ + legend2.xpos <- 0.48 + legend2.width <- 0.25 + legend2.cex <- 1.8 + my.subset2 <- match(values.to.plot2, my.brks.var) + par(fig=c(legend2.xpos, legend2.xpos + legend2.width, 0.719 + 0.001, 0.744), new=TRUE) + ColorBar3(my.brks.var, cols=my.cols.var, vert=FALSE, cex=legend2.cex, subset=my.subset2) + mtext(side=4,paste0(" ",var.unit[var.num]), cex=legend2.cex) + par(fig=c(legend2.xpos, legend2.xpos + legend2.width, 0.485, 0.508), new=TRUE) + ColorBar3(my.brks.var, cols=my.cols.var, vert=FALSE, cex=legend2.cex, subset=my.subset2) + mtext(side=4,paste0(" ",var.unit[var.num]), cex=legend2.cex) + par(fig=c(legend2.xpos, legend2.xpos + legend2.width, 0.250, 0.273), new=TRUE) + ColorBar3(my.brks.var, cols=my.cols.var, vert=FALSE, cex=legend2.cex, subset=my.subset2) + mtext(side=4,paste0(" ",var.unit[var.num]), cex=legend2.cex) + par(fig=c(legend2.xpos, legend2.xpos + legend2.width, 0.015, 0.038), new=TRUE) + ColorBar3(my.brks.var, cols=my.cols.var, vert=FALSE, cex=legend2.cex, subset=my.subset2) + mtext(side=4,paste0(" ",var.unit[var.num]), cex=legend2.cex) + } + + if(!as.pdf) dev.off() # for saving 4 png + + } # close if on: composition == 'simple' + + + # Visualize the composition with the regime anomalies for a selected forecasted month: + if(composition == "psl"){ + # Centroid maps: + n.map <- n.map + 1 # n.maps starts from 0 + map.width <- 0.115 + map.xpos <- 0.06 + map.width * (n.map - 1) + map.ypos <- 0.08 + map.height <- 0.19 + map.ysep <- 0.015 + + par(fig=c(map.xpos, map.xpos + map.width, map.ypos + 3*map.height + 3*map.ysep, map.ypos + 4*map.height + 3*map.ysep), new=TRUE) + PlotEquiMap2(rescale(map1,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map1), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, xlabel.dist=1.5, contours.lty="F1FF1F") + par(fig=c(map.xpos, map.xpos + map.width, map.ypos + 2*map.height + 2*map.ysep, map.ypos + 3*map.height + 2*map.ysep), new=TRUE) + PlotEquiMap2(rescale(map2,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map2), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F") + par(fig=c(map.xpos, map.xpos + map.width, map.ypos + map.height + map.ysep, map.ypos + 2*map.height + map.ysep), new=TRUE) + PlotEquiMap2(rescale(map3,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map3), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F") + par(fig=c(map.xpos, map.xpos + map.width, map.ypos, map.ypos + map.height), new=TRUE) + PlotEquiMap2(rescale(map4,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map4), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F") + + # Sheet subtitles: + par(fig=c(map.xpos, map.xpos + map.width, 0.86, 0.90), new=TRUE) + if(n.map < 8) { + mtext(paste0(my.month.short[p], " --> ", my.month.short[forecast.month]), cex=5.5, font=2) + } else{ + mtext(paste0(rean.name," ", my.month.short[forecast.month]), cex=5.5, font=2) + } + + } # close if on composition == 'psl' + + # Visualize the composition if the impact maps for a selected forecasted month: + if(composition == "impact"){ + # Centroid maps: + n.map <- n.map + 1 # n.maps starts from 0 + map.width <- 0.115 + map.xpos <- 0.06 + map.width * (n.map - 1) + map.ypos <- 0.08 + map.height <- 0.19 + map.ysep <- 0.015 + + par(fig=c(map.xpos, map.xpos + map.width, map.ypos + 3*map.height + 3*map.ysep, map.ypos + 4*map.height + 3*map.ysep), new=TRUE) + PlotEquiMap2(rescale(imp1[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=t(sig1[,EU] < 0.05), cex.lab=1.5) + + par(fig=c(map.xpos, map.xpos + map.width, map.ypos + 2*map.height + 2*map.ysep, map.ypos + 3*map.height + 2*map.ysep), new=TRUE) + PlotEquiMap2(rescale(imp2[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=t(sig2[,EU] < 0.05), cex.lab=1.5) + + par(fig=c(map.xpos, map.xpos + map.width, map.ypos + map.height + map.ysep, map.ypos + 2*map.height + map.ysep), new=TRUE) + PlotEquiMap2(rescale(imp3[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=t(sig3[,EU] < 0.05), cex.lab=1.5) + + par(fig=c(map.xpos, map.xpos + map.width, map.ypos, map.ypos + map.height), new=TRUE) + PlotEquiMap2(rescale(imp4[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=t(sig4[,EU] < 0.05), cex.lab=1.5) + + + # Sheet subtitles: + par(fig=c(map.xpos, map.xpos + map.width, 0.86, 0.90), new=TRUE) + if(n.map < 8) { + mtext(paste0(my.month.short[p], " --> ", my.month.short[forecast.month]), cex=5.5, font=2) + } else{ + mtext(paste0(rean.name," ", my.month.short[forecast.month]), cex=5.5, font=2) + } + + } # close if on composition == 'psl' + + + # Visualize the composition with the interannual frequencies for a selected forecasted month: + if(composition == "fre"){ + # Centroid maps: + n.map <- n.map + 1 # n.maps starts from 0 + map.width <- 0.115 + map.xpos <- 0.06 + map.width * (n.map - 1) + map.ypos <- 0.08 + map.height <- 0.19 + map.ysep <- 0.015 + + par(fig=c(map.xpos, map.xpos + map.width, map.ypos + 3*map.height + 3*map.ysep, map.ypos + 4*map.height + 3*map.ysep), new=TRUE) + if(n.map < 8) barplot.freq.sim2(100*fre1.NA, 100*freMax1.NA, 100*freMin1.NA, 100*freObs1, year.start, year.end, cex.y=2, cex.x=2, freq.min=-2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0), col.line="gray20", cex.r=3, cex.mean=2, cex.obs=2) + if(n.map == 8) barplot.freq(100*fre1.NA, year.start, year.end, cex.y=2, cex.x=2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0)) + + par(fig=c(map.xpos, map.xpos + map.width, map.ypos + 2*map.height + 2*map.ysep, map.ypos + 3*map.height + 2*map.ysep), new=TRUE) + if(n.map < 8) if(fields.name == forecast.name) barplot.freq.sim2(100*fre2.NA, 100*freMax2.NA, 100*freMin2.NA, 100*freObs2, year.start, year.end, cex.y=2, cex.x=2, freq.min=-2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0), col.line="gray20", cex.r=3, cex.mean=2, cex.obs=2) + if(n.map == 8) barplot.freq(100*fre2.NA, year.start, year.end, cex.y=2, cex.x=2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0)) + + par(fig=c(map.xpos, map.xpos + map.width, map.ypos + map.height + map.ysep, map.ypos + 2*map.height + map.ysep), new=TRUE) + if(n.map < 8) if(fields.name == forecast.name) barplot.freq.sim2(100*fre3.NA, 100*freMax3.NA, 100*freMin3.NA, 100*freObs3, year.start, year.end, cex.y=2, cex.x=2, freq.min=-2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0), col.line="gray20", cex.r=3, cex.mean=2, cex.obs=2) + if(n.map == 8) barplot.freq(100*fre3.NA, year.start, year.end, cex.y=2, cex.x=2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0)) + + par(fig=c(map.xpos, map.xpos + map.width, map.ypos, map.ypos + map.height), new=TRUE) + if(n.map < 8) if(fields.name == forecast.name) barplot.freq.sim2(100*fre4.NA, 100*freMax4.NA, 100*freMin4.NA, 100*freObs4, year.start, year.end, cex.y=2, cex.x=2, freq.min=-2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0), col.line="gray20", cex.r=3, cex.mean=2, cex.obs=2) + if(n.map == 8) barplot.freq(100*fre4.NA, year.start, year.end, cex.y=2, cex.x=2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0)) + + # Sheet subtitles: + par(fig=c(map.xpos, map.xpos + map.width, 0.86, 0.90), new=TRUE) + if(n.map < 8) { + mtext(paste0(my.month.short[p], " --> ", my.month.short[forecast.month]), cex=5.5, font=2) + } else{ + mtext(paste0(rean.name," ", my.month.short[forecast.month]), cex=5.5, font=2) + } + + # % on Frequency plots: + par(fig=c(map.xpos - 0.006, map.xpos, map.ypos + 3.9*map.height + 3*map.ysep, map.ypos + 4*map.height + 3*map.ysep), new=TRUE) + mtext("%", cex=3.3) + par(fig=c(map.xpos - 0.006, map.xpos, map.ypos + 2.9*map.height + 2*map.ysep, map.ypos + 3*map.height + 2*map.ysep), new=TRUE) + mtext("%", cex=3.3) + par(fig=c(map.xpos - 0.006, map.xpos, map.ypos + 1.9*map.height + 1*map.ysep, map.ypos + 2*map.height + 1*map.ysep), new=TRUE) + mtext("%", cex=3.3) + par(fig=c(map.xpos - 0.006, map.xpos, map.ypos + 0.9*map.height + 0*map.ysep, map.ypos + 1*map.height + 0*map.ysep), new=TRUE) + mtext("%", cex=3.3) + + # mean frequency on Frequency plots: + par(fig=c(map.xpos + 0.02, map.xpos + 0.04, map.ypos + 3*map.height + 3*map.ysep, map.ypos + 3.1*map.height + 3*map.ysep), new=TRUE) + mtext(bquote(bar(nu) == .(paste0(round(mean(100*fre1.NA),1),"%"))),cex=3.5) + par(fig=c(map.xpos + 0.02, map.xpos + 0.04, map.ypos + 2*map.height + 2*map.ysep, map.ypos + 2.1*map.height + 2*map.ysep), new=TRUE) + mtext(bquote(bar(nu) == .(paste0(round(mean(100*fre2.NA),1),"%"))),cex=3.5) + par(fig=c(map.xpos + 0.02, map.xpos + 0.04, map.ypos + 1*map.height + 1*map.ysep, map.ypos + 1.1*map.height + 1*map.ysep), new=TRUE) + mtext(bquote(bar(nu) == .(paste0(round(mean(100*fre3.NA),1),"%"))),cex=3.5) + par(fig=c(map.xpos + 0.02, map.xpos + 0.04, map.ypos + 0*map.height + 0*map.ysep, map.ypos + 0.1*map.height + 0*map.ysep), new=TRUE) + mtext(bquote(bar(nu) == .(paste0(round(mean(100*fre4.NA),1),"%"))),cex=3.5) + + # add corr between obs.time series and ensemble mean time series on Frequency plots: + if(n.map < 8){ + par(fig=c(map.xpos + map.width - 0.04, map.xpos + map.width - 0.02, map.ypos + 3*map.height + 3*map.ysep, map.ypos + 3.1*map.height + 3*map.ysep), new=TRUE) + mtext(paste0("r= ",sp.cor1), cex=3.5) + par(fig=c(map.xpos + map.width - 0.04, map.xpos + map.width - 0.02, map.ypos + 2*map.height + 2*map.ysep, map.ypos + 2.1*map.height + 2*map.ysep), new=TRUE) + mtext(paste0("r= ",sp.cor2), cex=3.5) + par(fig=c(map.xpos + map.width - 0.04, map.xpos + map.width - 0.02, map.ypos + 1*map.height + 1*map.ysep, map.ypos + 1.1*map.height + 1*map.ysep), new=TRUE) + mtext(paste0("r= ",sp.cor3), cex=3.5) + par(fig=c(map.xpos + map.width - 0.04, map.xpos + map.width - 0.02, map.ypos + 0*map.height + 0*map.ysep, map.ypos + 0.1*map.height + 0*map.ysep), new=TRUE) + mtext(paste0("r= ",sp.cor4), cex=3.5) + } + } # close if on composition == 'fre' + + # save indicators for each startdate and forecast time: + # (fre1, fre2, etc. already refer to the regimes in the same order as listed in the 'orden' vector) + if(fields.name == forecast.name) { + if (composition != "impact") { + save(orden, fre1.NA, fre2.NA, fre3.NA, fre4.NA, freObs1, freObs2, freObs3, freObs4, sp.cor1, sp.cor2, sp.cor3, sp.cor4, persist1, persist2, persist3, persist4, persistObs1, persistObs2, persistObs3, persistObs4, spat.cor1, spat.cor2, spat.cor3, spat.cor4, sd.freq.sim1, sd.freq.sim2, sd.freq.sim3, sd.freq.sim4, sd.freq.obs1, sd.freq.obs2, sd.freq.obs3, sd.freq.obs4, transSim1, transSim2, transSim3, transSim4, transObs1, transObs2, transObs3, transObs4, file=paste0(workdir,"/",fields.name,"_", my.period[p],"_leadmonth_",lead.month,"_","ClusterNames",".RData")) + } else { + save(orden, fre1.NA, fre2.NA, fre3.NA, fre4.NA, freObs1, freObs2, freObs3, freObs4, sp.cor1, sp.cor2, sp.cor3, sp.cor4, persist1, persist2, persist3, persist4, persistObs1, persistObs2, persistObs3, persistObs4, spat.cor1, spat.cor2, spat.cor3, spat.cor4, sd.freq.sim1, sd.freq.sim2, sd.freq.sim3, sd.freq.sim4, sd.freq.obs1, sd.freq.obs2, sd.freq.obs3, sd.freq.obs4, transSim1, transSim2, transSim3, transSim4, transObs1, transObs2, transObs3, transObs4, imp1, imp2, imp3, imp4, file=paste0(workdir,"/",fields.name,"_", my.period[p],"_leadmonth_",lead.month,"_","ClusterNames",".RData")) + } + } + + print(paste("p =",p,"lead.month =", lead.month)) # spat.cor1,spat.cor2,spat.cor3,spat.cor4)) + } # close 'p' on 'period' + + if(as.pdf) dev.off() # for saving a single pdf with all months/seasons + + } # close 'lead.month' on 'lead.months' + + if(composition == "psl" || composition == "fre"){ + # Regime's names: + title1.xpos <- 0.01 + title1.width <- 0.04 + par(fig=c(title1.xpos, title1.xpos + title1.width, 0.7905, 0.7955), new=TRUE) + mtext(paste0(regime1.name), font=2, cex=5) + par(fig=c(title1.xpos, title1.xpos + title1.width, 0.5855, 0.5905), new=TRUE) + mtext(paste0(regime2.name), font=2, cex=5) + par(fig=c(title1.xpos, title1.xpos + title1.width, 0.3805, 0.3955), new=TRUE) + mtext(paste0(regime3.name), font=2, cex=5) + par(fig=c(title1.xpos, title1.xpos + title1.width, 0.1755, 0.1805), new=TRUE) + mtext(paste0(regime4.name), font=2, cex=5) + + if(composition == "psl") { + # Color legend: + legend1.xpos <- 0.10 + legend1.width <- 0.80 + legend1.cex <- 4 + + par(fig=c(legend1.xpos, legend1.xpos + legend1.width, 0, 0.065), new=TRUE) # syntax fig=c(xmin, xmax, ymin, ymax) + ColorBar2(brks=my.brks, cols=my.cols, vert=FALSE, cex=legend1.cex, label.dist=2, my.ticks=-0.5 + 1:21, my.labels=my.brks) + par(fig=c(legend1.xpos + legend1.width - 0.02, legend1.xpos + legend1.width + 0.05, 0, 0.02), new=TRUE) # syntax fig=c(xmin, xmax, ymin, ymax) + mtext("hPa", cex=legend1.cex*1.3) + } + + dev.off() + } # close if on composition + +} # close if on composition != "summary" + +# Summary graphs: +if(composition == "summary" && fields.name == forecast.name){ + # array storing correlations in the format: [startdate, leadmonth, regime] + array.diff.sd.freq <- array.sd.freq <- array.cor <- array.sp.cor <- array.freq <- array.diff.freq <- array.rpss <- array.pers <- array.diff.pers <- array(NA,c(12,7,4)) + array.sp.cor <- array.trans.sim1 <- array.trans.sim2 <- array.trans.sim3 <- array.trans.sim4 <- array(NA,c(12,7,4)) + array.trans.diff1 <- array.trans.diff2 <- array.trans.diff3 <- array.trans.diff4 <- array(NA,c(12,7,4)) + + for(p in 1:12){ + p.orig <- p + + for(l in 0:6){ + + load(file=paste0(workdir,"/",fields.name,"_",my.period[p],"_leadmonth_",l,"_","ClusterNames",".RData")) # load cluster names and summarized data + + if(var.num != var.num.orig) var.num <- var.num.orig # ripristinate original values of this script (necessary after loading regime data) + if(fields.name != fields.name.orig) fields.name <- fields.name.orig # ripristinate original values of this script + if(p != p.orig) p <- p.orig + + array.sp.cor[p,1+l, 1] <- spat.cor1 # associates NAO+ to the first triangle (at left) + array.sp.cor[p,1+l, 3] <- spat.cor2 # associates NAO- to the third triangle (at right) + array.sp.cor[p,1+l, 4] <- spat.cor3 # associates Blocking to the fourth triangle (top one) + array.sp.cor[p,1+l, 2] <- spat.cor4 # associates Atl.Ridge to the second triangle (bottom one) + + array.cor[p,1+l, 1] <- sp.cor1 # associates NAO+ to the first triangle (at left) + array.cor[p,1+l, 3] <- sp.cor2 # associates NAO- to the third triangle (at right) + array.cor[p,1+l, 4] <- sp.cor3 # associates Blocking to the fourth triangle (top one) + array.cor[p,1+l, 2] <- sp.cor4 # associates Atl.Ridge to the second triangle (bottom one) + + array.freq[p,1+l, 1] <- 100*(mean(fre1.NA)) # NAO+ + array.freq[p,1+l, 3] <- 100*(mean(fre2.NA)) # NAO- + array.freq[p,1+l, 4] <- 100*(mean(fre3.NA)) # Blocking + array.freq[p,1+l, 2] <- 100*(mean(fre4.NA)) # Atl.Ridge + + array.diff.freq[p,1+l, 1] <- 100*(mean(fre1.NA) - mean(freObs1)) # NAO+ + array.diff.freq[p,1+l, 3] <- 100*(mean(fre2.NA) - mean(freObs2)) # NAO- + array.diff.freq[p,1+l, 4] <- 100*(mean(fre3.NA) - mean(freObs3)) # Blocking + array.diff.freq[p,1+l, 2] <- 100*(mean(fre4.NA) - mean(freObs4)) # Atl.Ridge + + array.pers[p,1+l, 1] <- persist1 # NAO+ + array.pers[p,1+l, 3] <- persist2 # NAO- + array.pers[p,1+l, 4] <- persist3 # Blocking + array.pers[p,1+l, 2] <- persist4 # Atl.Ridge + + array.diff.pers[p,1+l, 1] <- persist1 - persistObs1 # NAO+ + array.diff.pers[p,1+l, 3] <- persist2 - persistObs2 # NAO- + array.diff.pers[p,1+l, 4] <- persist3 - persistObs3 # Blocking + array.diff.pers[p,1+l, 2] <- persist4 - persistObs4 # Atl.Ridge + + array.pers[p,1+l, 1] <- persist1 # NAO+ + array.pers[p,1+l, 3] <- persist2 # NAO- + array.pers[p,1+l, 4] <- persist3 # Blocking + array.pers[p,1+l, 2] <- persist4 # Atl.Ridge + + array.sd.freq[p,1+l, 1] <- sd.freq.sim1 # NAO+ + array.sd.freq[p,1+l, 3] <- sd.freq.sim2 # NAO- + array.sd.freq[p,1+l, 4] <- sd.freq.sim3 # Blocking + array.sd.freq[p,1+l, 2] <- sd.freq.sim4 # Atl.Ridge + + array.diff.sd.freq[p,1+l, 1] <- sd.freq.sim1 / sd.freq.obs1 # NAO+ + array.diff.sd.freq[p,1+l, 3] <- sd.freq.sim2 / sd.freq.obs2 # NAO- + array.diff.sd.freq[p,1+l, 4] <- sd.freq.sim3 / sd.freq.obs3 # Blocking + array.diff.sd.freq[p,1+l, 2] <- sd.freq.sim4 / sd.freq.obs4 # Atl.Ridge + + array.trans.sim1[p,1+l, 1] <- NA # Transition from NAO+ to NAO+ + array.trans.sim1[p,1+l, 3] <- 100*transSim1[2] # Transition from NAO+ to NAO- + array.trans.sim1[p,1+l, 4] <- 100*transSim1[3] # Transition from NAO+ to blocking + array.trans.sim1[p,1+l, 2] <- 100*transSim1[4] # Transition from NAO+ to Atl.Ridge + + array.trans.sim2[p,1+l, 1] <- 100*transSim2[1] # Transition from NAO- to NAO+ + array.trans.sim2[p,1+l, 3] <- NA # Transition from NAO- to NAO- + array.trans.sim2[p,1+l, 4] <- 100*transSim2[3] # Transition from NAO- to blocking + array.trans.sim2[p,1+l, 2] <- 100*transSim2[4] # Transition from NAO- to Atl.Ridge + + array.trans.sim3[p,1+l, 1] <- 100*transSim3[1] # Transition from blocking to NAO+ + array.trans.sim3[p,1+l, 3] <- 100*transSim3[2] # Transition from blocking to NAO- + array.trans.sim3[p,1+l, 4] <- NA # Transition from blocking to blocking + array.trans.sim3[p,1+l, 2] <- 100*transSim3[4] # Transition from blocking to Atl.Ridge + + array.trans.sim4[p,1+l, 1] <- 100*transSim4[1] # Transition from Atl.ridge to NAO+ + array.trans.sim4[p,1+l, 3] <- 100*transSim4[2] # Transition from Atl.ridge to NAO- + array.trans.sim4[p,1+l, 4] <- 100*transSim4[3] # Transition from Atl.ridge to blocking + array.trans.sim4[p,1+l, 2] <- NA # Transition from Atl.ridge to Atl.Ridge + + array.trans.diff1[p,1+l, 1] <- NA # Transition from NAO+ to NAO+ + array.trans.diff1[p,1+l, 3] <- 100*(transSim1[2] - transObs1[2]) # Transition from NAO+ to NAO- + array.trans.diff1[p,1+l, 4] <- 100*(transSim1[3] - transObs1[3]) # Transition from NAO+ to blocking + array.trans.diff1[p,1+l, 2] <- 100*(transSim1[4] - transObs1[4]) # Transition from NAO+ to Atl.Ridge + + array.trans.diff2[p,1+l, 1] <- 100*(transSim2[1] - transObs2[1]) # Transition from NAO+ to NAO+ + array.trans.diff2[p,1+l, 3] <- NA + array.trans.diff2[p,1+l, 4] <- 100*(transSim2[3] - transObs2[3]) # Transition from NAO+ to blocking + array.trans.diff2[p,1+l, 2] <- 100*(transSim2[4] - transObs2[4]) # Transition from NAO+ to Atl.Ridge + + array.trans.diff3[p,1+l, 1] <- 100*(transSim3[1] - transObs3[1]) # Transition from NAO+ to NAO+ + array.trans.diff3[p,1+l, 3] <- 100*(transSim3[2] - transObs3[2]) # Transition from NAO+ to NAO- + array.trans.diff3[p,1+l, 4] <- NA + array.trans.diff3[p,1+l, 2] <- 100*(transSim3[4] - transObs3[4]) # Transition from NAO+ to Atl.Ridge + + array.trans.diff4[p,1+l, 1] <- 100*(transSim4[1] - transObs4[1]) # Transition from NAO+ to NAO+ + array.trans.diff4[p,1+l, 3] <- 100*(transSim4[2] - transObs4[2]) # Transition from NAO+ to NAO- + array.trans.diff4[p,1+l, 4] <- 100*(transSim4[3] - transObs4[3]) # Transition from NAO+ to blocking + array.trans.diff4[p,1+l, 2] <- NA + + #array.rpss[p,1+l, 1] <- # NAO+ + #array.rpss[p,1+l, 3] <- # NAO- + #array.rpss[p,1+l, 4] <- # Blocking + #array.rpss[p,1+l, 2] <- # Atl.Ridge + } + } + + # Spatial Corr summary: + png(filename=paste0(workdir,"/",forecast.name,"_summary_sp_corr.png"), width=550, height=400) + + # create an array similar to array.cor but with colors instead of corr.values: + sp.corr.cols <- rev(c('#b2182b','#d6604d','#f4a582','#fddbc7','#d1e5f0','#92c5de','#4393c3','#2166ac')) + my.seq <- c(-1,0,0.4,0.5,0.6,0.7,0.8,0.9,1) + + array.sp.cor.colors <- array(sp.corr.cols[8],c(12,7,4)) + array.sp.cor.colors[array.sp.cor < my.seq[8]] <- sp.corr.cols[7] + array.sp.cor.colors[array.sp.cor < my.seq[7]] <- sp.corr.cols[6] + array.sp.cor.colors[array.sp.cor < my.seq[6]] <- sp.corr.cols[5] + array.sp.cor.colors[array.sp.cor < my.seq[5]] <- sp.corr.cols[4] + array.sp.cor.colors[array.sp.cor < my.seq[4]] <- sp.corr.cols[3] + array.sp.cor.colors[array.sp.cor < my.seq[3]] <- sp.corr.cols[2] + array.sp.cor.colors[array.sp.cor < my.seq[2]] <- sp.corr.cols[1] + + par(mar=c(5,4,4,4.5)) + plot(1,1, type="n", xaxt="n", yaxt="n", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + title("Spatial correlation between S4 and ERA-Interim regime anomalies", cex=1.5) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + + for(p in 1:12){ + for(l in 0:6){ + polygon(c(0.5+p-1, 0.5+p-1, 1+p-1), c(-0.5+l, 0.5+l, 0+l), col=array.sp.cor.colors[p,1+l,1]) # first triangle + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(-0.5+l, 0+l, -0.5+l), col=array.sp.cor.colors[p,1+l,2]) + polygon(c(1+p-1, 1.5+p-1, 1.5+p-1), c(l, 0.5+l, -0.5+l), col=array.sp.cor.colors[p,1+l,3]) + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(0.5+l, 0+l, 0.5+l), col=array.sp.cor.colors[p,1+l,4]) + } + } + + par(fig=c(0.875, 1, 0.14, 0.89), new=TRUE) + ColorBar2(brks = my.seq, cols = sp.corr.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + dev.off() + + + png(filename=paste0(workdir,"/",forecast.name,"_summary_sp_corr_4tables.png"), width=750, height=600) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext(text="Spatial correlation between S4 and ERA-Interim regime anomalies", cex=1.5) + + # NAO+ : + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.85, 0.98), new=TRUE) + mtext("NAO+", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.sp.cor.colors[p,1+l,1])}} + + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.42, 0.5), new=TRUE) + mtext("Blocking", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.sp.cor.colors[p,1+l,4])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.85, 0.98), new=TRUE) + mtext("NAO-", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.sp.cor.colors[p,1+l,3])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.42, 0.5), new=TRUE) + mtext("Atlantic Ridge", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.sp.cor.colors[p,1+l,2])}} + + par(fig=c(0.91, 1, 0.1, 0.9), new=TRUE) + ColorBar2(brks = my.seq, cols = sp.corr.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + png(filename=paste0(workdir,"/",forecast.name,"_summary_sp_corr_1table.png"), width=800, height=300) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext(text="Spatial correlation between S4 and ERA-Interim regime anomalies", cex=1.2) + + par(mar=c(0,0,4,0), fig=c(0.07, 0.22, 0.8, 0.98), new=TRUE) + mtext("NAO+", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0, 0.22, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecast month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.sp.cor.colors[i2,1+6-j,1])}} + + par(mar=c(0,0,4,0), fig=c(0.22, 0.44, 0.8, 0.98), new=TRUE) + mtext("NAO-", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.22, 0.44, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecasted month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.sp.cor.colors[i2,1+6-j,3])}} + + par(mar=c(0,0,4,0), fig=c(0.44, 0.66, 0.8, 0.98), new=TRUE) + mtext("Blocking", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.44, 0.66, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecasted month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.sp.cor.colors[i2,1+6-j,4])}} + + par(mar=c(0,0,4,0), fig=c(0.68, 0.88, 0.8, 0.98), new=TRUE) + mtext("Atlantic Ridge", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.66, 0.88, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecasted month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.sp.cor.colors[i2,1+6-j,2])}} + + par(fig=c(0.91, 1, 0.1, 0.8), new=TRUE) + ColorBar2(brks = my.seq, cols = sp.corr.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + + + + + + + + # Corr summary: + png(filename=paste0(workdir,"/",forecast.name,"_summary_corr.png"), width=550, height=400) + + # create an array similar to array.cor but with colors instead of corr.values: + corr.cols <- rev(c('#b2182b','#d6604d','#f4a582','#fddbc7','#d1e5f0','#92c5de','#4393c3','#2166ac')) + + array.cor.colors <- array(corr.cols[8],c(12,7,4)) + array.cor.colors[array.cor < 0.75] <- corr.cols[7] + array.cor.colors[array.cor < 0.5] <- corr.cols[6] + array.cor.colors[array.cor < 0.25] <- corr.cols[5] + array.cor.colors[array.cor < 0] <- corr.cols[4] + array.cor.colors[array.cor < -0.25] <- corr.cols[3] + array.cor.colors[array.cor < -0.5] <- corr.cols[2] + array.cor.colors[array.cor < -0.75] <- corr.cols[1] + + par(mar=c(5,4,4,4.5)) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Forecast time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + title("Correlation between S4 and ERA-Interim frequencies", cex=1.5) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + + for(p in 1:12){ + for(l in 0:6){ + polygon(c(0.5+p-1,0.5+p-1,1+p-1),c(-0.5+l,0.5+l,0+l), col=array.cor.colors[p,1+l,1]) # first triangle + polygon(c(0.5+p-1,1+p-1,1.5+p-1),c(-0.5+l,0+l,-0.5+l), col=array.cor.colors[p,1+l,2]) + polygon(c(1+p-1,1.5+p-1,1.5+p-1),c(l,0.5+l,-0.5+l), col=array.cor.colors[p,1+l,3]) + polygon(c(0.5+p-1,1+p-1,1.5+p-1),c(0.5+l,0+l,0.5+l), col=array.cor.colors[p,1+l,4]) + } + } + + par(fig=c(0.875, 1, 0.14, 0.89), new=TRUE) + ColorBar2(brks = seq(-1,1,0.25), cols = corr.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(seq(-1,1,0.25)), my.labels=seq(-1,1,0.25)) + dev.off() + + png(filename=paste0(workdir,"/",forecast.name,"_summary_corr_4tables.png"), width=750, height=600) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext(text="Correlation between S4 and ERA-Interim frequencies", cex=1.5) + + # NAO+ : + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.85, 0.98), new=TRUE) + mtext("NAO+", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.cor.colors[p,1+l,1])}} + + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.42, 0.5), new=TRUE) + mtext("Blocking", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.cor.colors[p,1+l,4])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.85, 0.98), new=TRUE) + mtext("NAO-", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.cor.colors[p,1+l,3])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.42, 0.5), new=TRUE) + mtext("Atlantic Ridge", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.cor.colors[p,1+l,2])}} + + par(fig=c(0.91, 1, 0.1, 0.9), new=TRUE) + ColorBar2(brks = my.seq, cols = corr.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + png(filename=paste0(workdir,"/",forecast.name,"_summary_corr_1table.png"), width=800, height=300) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext(text="Correlation between S4 and ERA-Interim frequencies", cex=1.2) + + par(mar=c(0,0,4,0), fig=c(0.07, 0.22, 0.8, 0.98), new=TRUE) + mtext("NAO+", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0, 0.22, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecast month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.cor.colors[i2,1+6-j,1])}} + + par(mar=c(0,0,4,0), fig=c(0.22, 0.44, 0.8, 0.98), new=TRUE) + mtext("NAO-", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.22, 0.44, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecasted month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.cor.colors[i2,1+6-j,3])}} + + par(mar=c(0,0,4,0), fig=c(0.44, 0.66, 0.8, 0.98), new=TRUE) + mtext("Blocking", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.44, 0.66, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecasted month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.cor.colors[i2,1+6-j,4])}} + + par(mar=c(0,0,4,0), fig=c(0.68, 0.88, 0.8, 0.98), new=TRUE) + mtext("Atlantic Ridge", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.66, 0.88, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecasted month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.cor.colors[i2,1+6-j,2])}} + + par(fig=c(0.91, 1, 0.1, 0.8), new=TRUE) + ColorBar2(brks = my.seq, cols = corr.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + + + + + + # Freq. summary: + png(filename=paste0(workdir,"/",forecast.name,"_summary_freq.png"), width=550, height=400) + + # create an array similar to array.diff.freq but with colors instead of frequencies: + freq.cols <- rev(c('#d73027','#f46d43','#fdae61','#fee090','#e0f3f8','#abd9e9','#74add1','#4575b4')) + my.seq <- c(NA,20,22,24,26,28,30,32,NA) + + array.freq.colors <- array(freq.cols[8],c(12,7,4)) + array.freq.colors[array.freq < my.seq[[8]]] <- freq.cols[7] + array.freq.colors[array.freq < my.seq[[7]]] <- freq.cols[6] + array.freq.colors[array.freq < my.seq[[6]]] <- freq.cols[5] + array.freq.colors[array.freq < my.seq[[5]]] <- freq.cols[4] + array.freq.colors[array.freq < my.seq[[4]]] <- freq.cols[3] + array.freq.colors[array.freq < my.seq[[3]]] <- freq.cols[2] + array.freq.colors[array.freq < my.seq[[2]]] <- freq.cols[1] + + par(mar=c(5,4,4,4.5)) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Forecast time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + title("Mean S4 frequency (%)", cex=1.5) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + + for(p in 1:12){ + for(l in 0:6){ + polygon(c(0.5+p-1, 0.5+p-1, 1+p-1), c(-0.5+l, 0.5+l, 0+l), col=array.freq.colors[p,1+l,1]) # first triangle + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(-0.5+l, 0+l, -0.5+l), col=array.freq.colors[p,1+l,2]) + polygon(c(1+p-1, 1.5+p-1, 1.5+p-1), c(l, 0.5+l, -0.5+l), col=array.freq.colors[p,1+l,3]) + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(0.5+l, 0+l, 0.5+l), col=array.freq.colors[p,1+l,4]) + } + } + + par(fig=c(0.875, 1, 0.14, 0.89), new=TRUE) + ColorBar2(brks = my.seq, cols = freq.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + dev.off() + + png(filename=paste0(workdir,"/",forecast.name,"_summary_freq_4tables.png"), width=750, height=600) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext(text="Mean S4 frequency (%)", cex=1.5) + + # NAO+ : + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.85, 0.98), new=TRUE) + mtext("NAO+", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.freq.colors[p,1+l,1])}} + + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.42, 0.5), new=TRUE) + mtext("Blocking", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.freq.colors[p,1+l,4])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.85, 0.98), new=TRUE) + mtext("NAO-", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.freq.colors[p,1+l,3])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.42, 0.5), new=TRUE) + mtext("Atlantic Ridge", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.freq.colors[p,1+l,2])}} + + par(fig=c(0.91, 1, 0.1, 0.9), new=TRUE) + ColorBar2(brks = my.seq, cols = freq.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + png(filename=paste0(workdir,"/",forecast.name,"_summary_freq_1table.png"), width=800, height=300) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext(text="Mean S4 frequency (%)", cex=1.2) + + par(mar=c(0,0,4,0), fig=c(0.07, 0.22, 0.8, 0.98), new=TRUE) + mtext("NAO+", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0, 0.22, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecast month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.freq.colors[i2,1+6-j,1])}} + + par(mar=c(0,0,4,0), fig=c(0.22, 0.44, 0.8, 0.98), new=TRUE) + mtext("NAO-", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.22, 0.44, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecasted month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.freq.colors[i2,1+6-j,3])}} + + par(mar=c(0,0,4,0), fig=c(0.44, 0.66, 0.8, 0.98), new=TRUE) + mtext("Blocking", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.44, 0.66, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecasted month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.freq.colors[i2,1+6-j,4])}} + + par(mar=c(0,0,4,0), fig=c(0.68, 0.88, 0.8, 0.98), new=TRUE) + mtext("Atlantic Ridge", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.66, 0.88, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecasted month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.freq.colors[i2,1+6-j,2])}} + + par(fig=c(0.91, 1, 0.1, 0.8), new=TRUE) + ColorBar2(brks = my.seq, cols = freq.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + + + + + + + # Delta freq. summary: + png(filename=paste0(workdir,"/",forecast.name,"_summary_diff_freq.png"), width=550, height=400) + + # create an array similar to array.diff.freq but with colors instead of frequencies: + diff.freq.cols <- rev(c('#d73027','#f46d43','#fdae61','#fee090','#e0f3f8','#abd9e9','#74add1','#4575b4')) + my.seq <- c(-20,-10,-5,-2,0,2,5,10,20) + + array.diff.freq.colors <- array(diff.freq.cols[8],c(12,7,4)) + array.diff.freq.colors[array.diff.freq < my.seq[[8]]] <- diff.freq.cols[7] + array.diff.freq.colors[array.diff.freq 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.diff.freq.colors[i2,1+6-j,1])}} + + par(mar=c(0,0,4,0), fig=c(0.22, 0.44, 0.8, 0.98), new=TRUE) + mtext("NAO-", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.22, 0.44, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecasted month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.diff.freq.colors[i2,1+6-j,3])}} + + par(mar=c(0,0,4,0), fig=c(0.44, 0.66, 0.8, 0.98), new=TRUE) + mtext("Blocking", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.44, 0.66, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecasted month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.diff.freq.colors[i2,1+6-j,4])}} + + par(mar=c(0,0,4,0), fig=c(0.68, 0.88, 0.8, 0.98), new=TRUE) + mtext("Atlantic Ridge", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.66, 0.88, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecasted month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.diff.freq.colors[i2,1+6-j,2])}} + + par(fig=c(0.91, 1, 0.1, 0.8), new=TRUE) + ColorBar2(brks = my.seq, cols = diff.freq.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + + + + + + + + # Persistence summary: + png(filename=paste0(workdir,"/",forecast.name,"_summary_pers.png"), width=550, height=400) + + # create an array similar to array.pers but with colors instead of frequencies: + pers.cols <- rev(c('#b2182b','#d6604d','#f4a582','#fddbc7','#d1e5f0','#92c5de','#4393c3','#2166ac')) + my.seq <- c(NA, 3.25, 3.5, 3.75, 4, 4.25, 4.5, 4.75, NA) + + array.pers.colors <- array(pers.cols[8],c(12,7,4)) + array.pers.colors[array.pers < my.seq[8]] <- pers.cols[7] + array.pers.colors[array.pers < my.seq[7]] <- pers.cols[6] + array.pers.colors[array.pers < my.seq[6]] <- pers.cols[5] + array.pers.colors[array.pers < my.seq[5]] <- pers.cols[4] + array.pers.colors[array.pers < my.seq[4]] <- pers.cols[3] + array.pers.colors[array.pers < my.seq[3]] <- pers.cols[2] + array.pers.colors[array.pers < my.seq[2]] <- pers.cols[1] + + par(mar=c(5,4,4,4.5)) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Forecast time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + title("S4 Regime persistence (in days/month)", cex=1.5) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + + for(p in 1:12){ + for(l in 0:6){ + polygon(c(0.5+p-1,0.5+p-1,1+p-1),c(-0.5+l,0.5+l,0+l), col=array.pers.colors[p,1+l,1]) # first triangle + polygon(c(0.5+p-1,1+p-1,1.5+p-1),c(-0.5+l,0+l,-0.5+l), col=array.pers.colors[p,1+l,2]) + polygon(c(1+p-1,1.5+p-1,1.5+p-1),c(l,0.5+l,-0.5+l), col=array.pers.colors[p,1+l,3]) + polygon(c(0.5+p-1,1+p-1,1.5+p-1),c(0.5+l,0+l,0.5+l), col=array.pers.colors[p,1+l,4]) + } + } + + par(fig=c(0.875, 1, 0.14, 0.89), new=TRUE) + ColorBar2(brks = my.seq, cols = pers.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + dev.off() + + png(filename=paste0(workdir,"/",forecast.name,"_summary_pers_4tables.png"), width=750, height=600) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("S4 Regime persistence (in days/month)", cex=1.5) + + # NAO+ : + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.85, 0.98), new=TRUE) + mtext("NAO+", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.pers.colors[p,1+l,1])}} + + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.42, 0.5), new=TRUE) + mtext("Blocking", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.pers.colors[p,1+l,4])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.85, 0.98), new=TRUE) + mtext("NAO-", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.pers.colors[p,1+l,3])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.42, 0.5), new=TRUE) + mtext("Atlantic Ridge", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.pers.colors[p,1+l,2])}} + + par(fig=c(0.91, 1, 0.1, 0.9), new=TRUE) + ColorBar2(brks = my.seq, cols = pers.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + png(filename=paste0(workdir,"/",forecast.name,"_summary_pers_4tables.png"), width=750, height=600) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("S4 Regime persistence (in days/month)", cex=1.5) + + # NAO+ : + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.85, 0.98), new=TRUE) + mtext("NAO+", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.pers.colors[p,1+l,1])}} + + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.42, 0.5), new=TRUE) + mtext("Blocking", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.pers.colors[p,1+l,4])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.85, 0.98), new=TRUE) + mtext("NAO-", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.pers.colors[p,1+l,3])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.42, 0.5), new=TRUE) + mtext("Atlantic Ridge", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.pers.colors[p,1+l,2])}} + + par(fig=c(0.91, 1, 0.1, 0.9), new=TRUE) + ColorBar2(brks = my.seq, cols = pers.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + png(filename=paste0(workdir,"/",forecast.name,"_summary_pers_1table.png"), width=800, height=300) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("S4 Regime persistence (in days/month)", cex=1.2) + + par(mar=c(0,0,4,0), fig=c(0.07, 0.22, 0.8, 0.98), new=TRUE) + mtext("NAO+", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0, 0.22, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecast month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.pers.colors[i2,1+6-j,1])}} + + par(mar=c(0,0,4,0), fig=c(0.22, 0.44, 0.8, 0.98), new=TRUE) + mtext("NAO-", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.22, 0.44, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecasted month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.pers.colors[i2,1+6-j,3])}} + + par(mar=c(0,0,4,0), fig=c(0.44, 0.66, 0.8, 0.98), new=TRUE) + mtext("Blocking", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.44, 0.66, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecasted month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.pers.colors[i2,1+6-j,4])}} + + par(mar=c(0,0,4,0), fig=c(0.68, 0.88, 0.8, 0.98), new=TRUE) + mtext("Atlantic Ridge", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.66, 0.88, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecasted month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.pers.colors[i2,1+6-j,2])}} + + par(fig=c(0.91, 1, 0.1, 0.8), new=TRUE) + ColorBar2(brks = my.seq, cols = pers.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + + + + + + + + # Delta persistence summary: + png(filename=paste0(workdir,"/",forecast.name,"_summary_diff_pers.png"), width=550, height=400) + + # create an array similar to array.pers but with colors instead of frequencies: + diff.pers.cols <- rev(c('#b2182b','#d6604d','#f4a582','#fddbc7','#d1e5f0','#92c5de','#4393c3','#2166ac')) + my.seq <- c(NA, -1.5, -1, -0.5, 0, 0.5, 1, 1.5, NA) + + array.diff.pers.colors <- array(diff.pers.cols[8],c(12,7,4)) + array.diff.pers.colors[array.diff.pers < my.seq[8]] <- diff.pers.cols[7] + array.diff.pers.colors[array.diff.pers < my.seq[7]] <- diff.pers.cols[6] + array.diff.pers.colors[array.diff.pers < my.seq[6]] <- diff.pers.cols[5] + array.diff.pers.colors[array.diff.pers < my.seq[5]] <- diff.pers.cols[4] + array.diff.pers.colors[array.diff.pers < my.seq[4]] <- diff.pers.cols[3] + array.diff.pers.colors[array.diff.pers < my.seq[3]] <- diff.pers.cols[2] + array.diff.pers.colors[array.diff.pers < my.seq[2]] <- diff.pers.cols[1] + + par(mar=c(5,4,4,4.5)) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Forecast time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + title("Diff. between S4 and ERA-Interim regime persistence (in days/month)", cex=1.5) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + + for(p in 1:12){ + for(l in 0:6){ + polygon(c(0.5+p-1,0.5+p-1,1+p-1),c(-0.5+l,0.5+l,0+l), col=array.diff.pers.colors[p,1+l,1]) # first triangle + polygon(c(0.5+p-1,1+p-1,1.5+p-1),c(-0.5+l,0+l,-0.5+l), col=array.diff.pers.colors[p,1+l,2]) + polygon(c(1+p-1,1.5+p-1,1.5+p-1),c(l,0.5+l,-0.5+l), col=array.diff.pers.colors[p,1+l,3]) + polygon(c(0.5+p-1,1+p-1,1.5+p-1),c(0.5+l,0+l,0.5+l), col=array.diff.pers.colors[p,1+l,4]) + } + } + + par(fig=c(0.875, 1, 0.14, 0.89), new=TRUE) + ColorBar2(brks = my.seq, cols = diff.pers.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + dev.off() + + png(filename=paste0(workdir,"/",forecast.name,"_summary_diff_pers_4tables.png"), width=750, height=600) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("Diff. between S4 and ERA-Interim regime persistence (in days/month)", cex=1.5) + + # NAO+ : + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.85, 0.98), new=TRUE) + mtext("NAO+", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.diff.pers.colors[p,1+l,1])}} + + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.42, 0.5), new=TRUE) + mtext("Blocking", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.diff.pers.colors[p,1+l,4])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.85, 0.98), new=TRUE) + mtext("NAO-", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.diff.pers.colors[p,1+l,3])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.42, 0.5), new=TRUE) + mtext("Atlantic Ridge", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.diff.pers.colors[p,1+l,2])}} + + par(fig=c(0.91, 1, 0.1, 0.9), new=TRUE) + ColorBar2(brks = my.seq, cols = diff.pers.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + png(filename=paste0(workdir,"/",forecast.name,"_summary_diff_pers_1table.png"), width=800, height=300) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("Diff. between S4 and ERA-Interim regime persistence (in days/month)", cex=1.2) + + par(mar=c(0,0,4,0), fig=c(0.07, 0.22, 0.8, 0.98), new=TRUE) + mtext("NAO+", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0, 0.22, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecast month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.diff.pers.colors[i2,1+6-j,1])}} + + par(mar=c(0,0,4,0), fig=c(0.22, 0.44, 0.8, 0.98), new=TRUE) + mtext("NAO-", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.22, 0.44, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecasted month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.diff.pers.colors[i2,1+6-j,3])}} + + par(mar=c(0,0,4,0), fig=c(0.44, 0.66, 0.8, 0.98), new=TRUE) + mtext("Blocking", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.44, 0.66, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecasted month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.diff.pers.colors[i2,1+6-j,4])}} + + par(mar=c(0,0,4,0), fig=c(0.68, 0.88, 0.8, 0.98), new=TRUE) + mtext("Atlantic Ridge", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.66, 0.88, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecasted month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.diff.pers.colors[i2,1+6-j,2])}} + + par(fig=c(0.91, 1, 0.1, 0.8), new=TRUE) + ColorBar2(brks = my.seq, cols = diff.pers.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + + + + + + + + + # St.Dev.freq ratio summary: + png(filename=paste0(workdir,"/",forecast.name,"_summary_ratio_sd_freq.png"), width=550, height=400) + + # create an array similar to array.cor but with colors instead of corr.values: + diff.sd.freq.cols <- rev(c('#b2182b','#d6604d','#f4a582','#fddbc7','#d1e5f0','#92c5de','#4393c3','#2166ac')) + my.seq <- c(0,0.7,0.8,0.9,1.0,1.1,1.2,1.3,2) + + array.diff.sd.freq.colors <- array(diff.sd.freq.cols[8],c(12,7,4)) + array.diff.sd.freq.colors[array.diff.sd.freq < my.seq[8]] <- diff.sd.freq.cols[7] + array.diff.sd.freq.colors[array.diff.sd.freq < my.seq[7]] <- diff.sd.freq.cols[6] + array.diff.sd.freq.colors[array.diff.sd.freq < my.seq[6]] <- diff.sd.freq.cols[5] + array.diff.sd.freq.colors[array.diff.sd.freq < my.seq[5]] <- diff.sd.freq.cols[4] + array.diff.sd.freq.colors[array.diff.sd.freq < my.seq[4]] <- diff.sd.freq.cols[3] + array.diff.sd.freq.colors[array.diff.sd.freq < my.seq[3]] <- diff.sd.freq.cols[2] + array.diff.sd.freq.colors[array.diff.sd.freq < my.seq[2]] <- diff.sd.freq.cols[1] + + par(mar=c(5,4,4,4.5)) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Forecast time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + title("St.Dev. ratio between sim. and obs. average frequencies", cex=1.5) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + + for(p in 1:12){ + for(l in 0:6){ + polygon(c(0.5+p-1, 0.5+p-1, 1+p-1), c(-0.5+l, 0.5+l, 0+l), col=array.diff.sd.freq.colors[p,1+l,1]) # first triangle + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(-0.5+l, 0+l, -0.5+l), col=array.diff.sd.freq.colors[p,1+l,2]) + polygon(c(1+p-1, 1.5+p-1, 1.5+p-1), c(l, 0.5+l, -0.5+l), col=array.diff.sd.freq.colors[p,1+l,3]) + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(0.5+l, 0+l, 0.5+l), col=array.diff.sd.freq.colors[p,1+l,4]) + } + } + + par(fig=c(0.875, 1, 0.14, 0.89), new=TRUE) + ColorBar2(brks = my.seq, cols = diff.sd.freq.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + dev.off() + + + png(filename=paste0(workdir,"/",forecast.name,"_summary_ratio_sd_freq_4tables.png"), width=750, height=600) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("St.Dev. ratio between sim. and obs. average frequencies", cex=1.5) + + # NAO+ : + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.85, 0.98), new=TRUE) + mtext("NAO+", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.diff.sd.freq.colors[p,1+l,1])}} + + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.42, 0.5), new=TRUE) + mtext("Blocking", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.diff.sd.freq.colors[p,1+l,4])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.85, 0.98), new=TRUE) + mtext("NAO-", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.diff.sd.freq.colors[p,1+l,3])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.42, 0.5), new=TRUE) + mtext("Atlantic Ridge", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.diff.sd.freq.colors[p,1+l,2])}} + + par(fig=c(0.91, 1, 0.1, 0.9), new=TRUE) + ColorBar2(brks = my.seq, cols = diff.sd.freq.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + png(filename=paste0(workdir,"/",forecast.name,"_summary_ratio_sd_freq_1table.png"), width=800, height=300) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("St.Dev. ratio between sim. and obs. average frequencies", cex=1.2) + + par(mar=c(0,0,4,0), fig=c(0.07, 0.22, 0.8, 0.98), new=TRUE) + mtext("NAO+", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0, 0.22, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecast month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.diff.sd.freq.colors[i2,1+6-j,1])}} + + par(mar=c(0,0,4,0), fig=c(0.22, 0.44, 0.8, 0.98), new=TRUE) + mtext("NAO-", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.22, 0.44, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecasted month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.diff.sd.freq.colors[i2,1+6-j,3])}} + + par(mar=c(0,0,4,0), fig=c(0.44, 0.66, 0.8, 0.98), new=TRUE) + mtext("Blocking", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.44, 0.66, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecasted month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.diff.sd.freq.colors[i2,1+6-j,4])}} + + par(mar=c(0,0,4,0), fig=c(0.68, 0.88, 0.8, 0.98), new=TRUE) + mtext("Atlantic Ridge", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.66, 0.88, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecasted month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.diff.sd.freq.colors[i2,1+6-j,2])}} + + par(fig=c(0.91, 1, 0.1, 0.8), new=TRUE) + ColorBar2(brks = my.seq, cols = diff.sd.freq.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + + + + + + + + + + + # Transition probability from NAO+ to X summary: + png(filename=paste0(workdir,"/",forecast.name,"_summary_trans_NAO+.png"), width=550, height=400) + + # create an array similar to array.cor but with colors instead of corr.values: + trans.cols <- rev(c('#b2182b','#d6604d','#f4a582','#fddbc7','#d1e5f0','#92c5de','#4393c3','#2166ac')) + my.seq <- c(0,10,20,30,40,50,60,70,100) + + array.trans.sim1.colors <- array(trans.cols[8],c(12,7,4)) + array.trans.sim1.colors[array.trans.sim1 < my.seq[8]] <- trans.cols[7] + array.trans.sim1.colors[array.trans.sim1 < my.seq[7]] <- trans.cols[6] + array.trans.sim1.colors[array.trans.sim1 < my.seq[6]] <- trans.cols[5] + array.trans.sim1.colors[array.trans.sim1 < my.seq[5]] <- trans.cols[4] + array.trans.sim1.colors[array.trans.sim1 < my.seq[4]] <- trans.cols[3] + array.trans.sim1.colors[array.trans.sim1 < my.seq[3]] <- trans.cols[2] + array.trans.sim1.colors[array.trans.sim1 < my.seq[2]] <- trans.cols[1] + array.trans.sim1.colors[is.na(array.trans.sim1)] <- "white" + + par(mar=c(5,4,4,4.5)) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Forecast time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + title("% Transition from NAO+ regime", cex=1.5) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + + for(p in 1:12){ + for(l in 0:6){ + polygon(c(0.5+p-1, 0.5+p-1, 1+p-1), c(-0.5+l, 0.5+l, 0+l), col=array.trans.sim1.colors[p,1+l,1]) # first triangle + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(-0.5+l, 0+l, -0.5+l), col=array.trans.sim1.colors[p,1+l,2]) + polygon(c(1+p-1, 1.5+p-1, 1.5+p-1), c(l, 0.5+l, -0.5+l), col=array.trans.sim1.colors[p,1+l,3]) + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(0.5+l, 0+l, 0.5+l), col=array.trans.sim1.colors[p,1+l,4]) + } + } + + par(fig=c(0.875, 1, 0.14, 0.89), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + dev.off() + + png(filename=paste0(workdir,"/",forecast.name,"_summary_trans_NAO+_4tables.png"), width=750, height=600) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("% Transition from NAO+ regime", cex=1.5) + + # NAO+ : + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.85, 0.98), new=TRUE) + mtext("NAO+", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.sim1.colors[p,1+l,1])}} + + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.42, 0.5), new=TRUE) + mtext("Blocking", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.sim1.colors[p,1+l,4])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.85, 0.98), new=TRUE) + mtext("NAO-", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.sim1.colors[p,1+l,3])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.42, 0.5), new=TRUE) + mtext("Atlantic Ridge", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.sim1.colors[p,1+l,2])}} + + par(fig=c(0.91, 1, 0.1, 0.9), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + png(filename=paste0(workdir,"/",forecast.name,"_summary_trans_NAO+_1table.png"), width=800, height=300) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("% Transition from NAO+ regime", cex=1.2) + + par(mar=c(0,0,4,0), fig=c(0.07, 0.22, 0.8, 0.98), new=TRUE) + mtext("NAO+", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0, 0.22, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecast month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.sim1.colors[i2,1+6-j,1])}} + + par(mar=c(0,0,4,0), fig=c(0.22, 0.44, 0.8, 0.98), new=TRUE) + mtext("NAO-", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.22, 0.44, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecasted month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.sim1.colors[i2,1+6-j,3])}} + + par(mar=c(0,0,4,0), fig=c(0.44, 0.66, 0.8, 0.98), new=TRUE) + mtext("Blocking", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.44, 0.66, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecasted month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.sim1.colors[i2,1+6-j,4])}} + + par(mar=c(0,0,4,0), fig=c(0.68, 0.88, 0.8, 0.98), new=TRUE) + mtext("Atlantic Ridge", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.66, 0.88, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecasted month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.sim1.colors[i2,1+6-j,2])}} + + par(fig=c(0.91, 1, 0.1, 0.8), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + png(filename=paste0(workdir,"/",forecast.name,"_summary_trans_NAO+_1table.png"), width=600, height=300) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("% Transition from NAO+ regime", cex=1.2) + + par(mar=c(0,0,4,0), fig=c(0.10, 0.28, 0.8, 0.98), new=TRUE) + mtext("NAO-", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0, 0.28, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecasted month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.sim1.colors[i2,1+6-j,3])}} + + par(mar=c(0,0,4,0), fig=c(0.28, 0.56, 0.8, 0.98), new=TRUE) + mtext("Blocking", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.28, 0.56, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecasted month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.sim1.colors[i2,1+6-j,4])}} + + par(mar=c(0,0,4,0), fig=c(0.56, 0.84, 0.8, 0.98), new=TRUE) + mtext("Atlantic Ridge", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.56, 0.84, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecasted month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.sim1.colors[i2,1+6-j,2])}} + + par(fig=c(0.88, 1, 0.1, 0.8), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + + + + + + + + # Transition probability from NAO- to X summary: + png(filename=paste0(workdir,"/",forecast.name,"_summary_trans_NAO-.png"), width=550, height=400) + + # create an array similar to array.cor but with colors instead of corr.values: + trans.cols <- rev(c('#b2182b','#d6604d','#f4a582','#fddbc7','#d1e5f0','#92c5de','#4393c3','#2166ac')) + my.seq <- c(0,10,20,30,40,50,60,70,100) + + array.trans.sim2.colors <- array(trans.cols[8],c(12,7,4)) + array.trans.sim2.colors[array.trans.sim2 < my.seq[8]] <- trans.cols[7] + array.trans.sim2.colors[array.trans.sim2 < my.seq[7]] <- trans.cols[6] + array.trans.sim2.colors[array.trans.sim2 < my.seq[6]] <- trans.cols[5] + array.trans.sim2.colors[array.trans.sim2 < my.seq[5]] <- trans.cols[4] + array.trans.sim2.colors[array.trans.sim2 < my.seq[4]] <- trans.cols[3] + array.trans.sim2.colors[array.trans.sim2 < my.seq[3]] <- trans.cols[2] + array.trans.sim2.colors[array.trans.sim2 < my.seq[2]] <- trans.cols[1] + array.trans.sim2.colors[is.na(array.trans.sim2)] <- "white" + + par(mar=c(5,4,4,4.5)) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Forecast time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + title("% Transition from NAO- regime ", cex=1.5) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + + for(p in 1:12){ + for(l in 0:6){ + polygon(c(0.5+p-1, 0.5+p-1, 1+p-1), c(-0.5+l, 0.5+l, 0+l), col=array.trans.sim2.colors[p,1+l,1]) # first triangle + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(-0.5+l, 0+l, -0.5+l), col=array.trans.sim2.colors[p,1+l,2]) + polygon(c(1+p-1, 1.5+p-1, 1.5+p-1), c(l, 0.5+l, -0.5+l), col=array.trans.sim2.colors[p,1+l,3]) + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(0.5+l, 0+l, 0.5+l), col=array.trans.sim2.colors[p,1+l,4]) + } + } + + par(fig=c(0.875, 1, 0.14, 0.89), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + dev.off() + + png(filename=paste0(workdir,"/",forecast.name,"_summary_trans_NAO-_4tables.png"), width=750, height=600) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("% Transition from NAO- regime", cex=1.5) + + # NAO+ : + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.85, 0.98), new=TRUE) + mtext("NAO+", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.sim2.colors[p,1+l,1])}} + + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.42, 0.5), new=TRUE) + mtext("Blocking", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.sim2.colors[p,1+l,4])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.85, 0.98), new=TRUE) + mtext("NAO-", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.sim2.colors[p,1+l,3])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.42, 0.5), new=TRUE) + mtext("Atlantic Ridge", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.sim2.colors[p,1+l,2])}} + + par(fig=c(0.91, 1, 0.1, 0.9), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + png(filename=paste0(workdir,"/",forecast.name,"_summary_trans_NAO-_1table.png"), width=600, height=300) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("% Transition from NAO- regime", cex=1.2) + + par(mar=c(0,0,4,0), fig=c(0.1, 0.28, 0.8, 0.98), new=TRUE) + mtext("NAO+", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0, 0.28, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecast month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.sim2.colors[i2,1+6-j,1])}} + + par(mar=c(0,0,4,0), fig=c(0.28, 0.56, 0.8, 0.98), new=TRUE) + mtext("Blocking", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.28, 0.56, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecasted month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.sim2.colors[i2,1+6-j,4])}} + + par(mar=c(0,0,4,0), fig=c(0.56, 0.84, 0.8, 0.98), new=TRUE) + mtext("Atlantic Ridge", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.56, 0.84, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecasted month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.sim2.colors[i2,1+6-j,2])}} + + par(fig=c(0.88, 1, 0.1, 0.8), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + + + + + + + + # Transition probability from blocking to X summary: + png(filename=paste0(workdir,"/",forecast.name,"_summary_trans_blocking.png"), width=550, height=400) + + # create an array similar to array.cor but with colors instead of corr.values: + trans.cols <- rev(c('#b2182b','#d6604d','#f4a582','#fddbc7','#d1e5f0','#92c5de','#4393c3','#2166ac')) + my.seq <- c(0,10,20,30,40,50,60,70,100) + + array.trans.sim3.colors <- array(trans.cols[8],c(12,7,4)) + array.trans.sim3.colors[array.trans.sim3 < my.seq[8]] <- trans.cols[7] + array.trans.sim3.colors[array.trans.sim3 < my.seq[7]] <- trans.cols[6] + array.trans.sim3.colors[array.trans.sim3 < my.seq[6]] <- trans.cols[5] + array.trans.sim3.colors[array.trans.sim3 < my.seq[5]] <- trans.cols[4] + array.trans.sim3.colors[array.trans.sim3 < my.seq[4]] <- trans.cols[3] + array.trans.sim3.colors[array.trans.sim3 < my.seq[3]] <- trans.cols[2] + array.trans.sim3.colors[array.trans.sim3 < my.seq[2]] <- trans.cols[1] + array.trans.sim3.colors[is.na(array.trans.sim3)] <- "white" + + par(mar=c(5,4,4,4.5)) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Forecast time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + title("% Transition from blocking regime", cex=1.5) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + + for(p in 1:12){ + for(l in 0:6){ + polygon(c(0.5+p-1, 0.5+p-1, 1+p-1), c(-0.5+l, 0.5+l, 0+l), col=array.trans.sim3.colors[p,1+l,1]) # first triangle + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(-0.5+l, 0+l, -0.5+l), col=array.trans.sim3.colors[p,1+l,2]) + polygon(c(1+p-1, 1.5+p-1, 1.5+p-1), c(l, 0.5+l, -0.5+l), col=array.trans.sim3.colors[p,1+l,3]) + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(0.5+l, 0+l, 0.5+l), col=array.trans.sim3.colors[p,1+l,4]) + } + } + + par(fig=c(0.875, 1, 0.14, 0.89), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + dev.off() + + png(filename=paste0(workdir,"/",forecast.name,"_summary_trans_blocking_4tables.png"), width=750, height=600) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("% Transition from blocking regime", cex=1.5) + + # NAO+ : + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.85, 0.98), new=TRUE) + mtext("NAO+", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.sim3.colors[p,1+l,1])}} + + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.42, 0.5), new=TRUE) + mtext("Blocking", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.sim3.colors[p,1+l,4])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.85, 0.98), new=TRUE) + mtext("NAO-", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.sim3.colors[p,1+l,3])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.42, 0.5), new=TRUE) + mtext("Atlantic Ridge", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.sim3.colors[p,1+l,2])}} + + par(fig=c(0.91, 1, 0.1, 0.9), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + png(filename=paste0(workdir,"/",forecast.name,"_summary_trans_blocking_1table.png"), width=600, height=300) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("% Transition from blocking regime", cex=1.2) + + par(mar=c(0,0,4,0), fig=c(0.10, 0.28, 0.8, 0.98), new=TRUE) + mtext("NAO+", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0, 0.28, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecast month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.sim3.colors[i2,1+6-j,1])}} + + par(mar=c(0,0,4,0), fig=c(0.28, 0.56, 0.8, 0.98), new=TRUE) + mtext("NAO-", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.28, 0.56, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecasted month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.sim3.colors[i2,1+6-j,3])}} + + par(mar=c(0,0,4,0), fig=c(0.56, 0.84, 0.8, 0.98), new=TRUE) + mtext("Atlantic Ridge", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.56, 0.84, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecasted month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.sim3.colors[i2,1+6-j,2])}} + + par(fig=c(0.88, 1, 0.1, 0.8), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + + + + + + + + + + + # Transition probability from Atlantic ridge to X summary: + png(filename=paste0(workdir,"/",forecast.name,"_summary_trans_Atlantic_ridge.png"), width=550, height=400) + + # create an array similar to array.cor but with colors instead of corr.values: + trans.cols <- rev(c('#b2182b','#d6604d','#f4a582','#fddbc7','#d1e5f0','#92c5de','#4393c3','#2166ac')) + my.seq <- c(0,10,20,30,40,50,60,70,100) + + array.trans.sim4.colors <- array(trans.cols[8],c(12,7,4)) + array.trans.sim4.colors[array.trans.sim4 < my.seq[8]] <- trans.cols[7] + array.trans.sim4.colors[array.trans.sim4 < my.seq[7]] <- trans.cols[6] + array.trans.sim4.colors[array.trans.sim4 < my.seq[6]] <- trans.cols[5] + array.trans.sim4.colors[array.trans.sim4 < my.seq[5]] <- trans.cols[4] + array.trans.sim4.colors[array.trans.sim4 < my.seq[4]] <- trans.cols[3] + array.trans.sim4.colors[array.trans.sim4 < my.seq[3]] <- trans.cols[2] + array.trans.sim4.colors[array.trans.sim4 < my.seq[2]] <- trans.cols[1] + array.trans.sim4.colors[is.na(array.trans.sim4)] <- "white" + + par(mar=c(5,4,4,4.5)) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Forecast time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + title("% Transition from Atlantic ridge regime ", cex=1.5) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + + for(p in 1:12){ + for(l in 0:6){ + polygon(c(0.5+p-1, 0.5+p-1, 1+p-1), c(-0.5+l, 0.5+l, 0+l), col=array.trans.sim4.colors[p,1+l,1]) # first triangle + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(-0.5+l, 0+l, -0.5+l), col=array.trans.sim4.colors[p,1+l,2]) + polygon(c(1+p-1, 1.5+p-1, 1.5+p-1), c(l, 0.5+l, -0.5+l), col=array.trans.sim4.colors[p,1+l,3]) + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(0.5+l, 0+l, 0.5+l), col=array.trans.sim4.colors[p,1+l,4]) + } + } + + par(fig=c(0.875, 1, 0.14, 0.89), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + dev.off() + + png(filename=paste0(workdir,"/",forecast.name,"_summary_trans_Atlantic_ridge_4tables.png"), width=750, height=600) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("% Transition from Atlantic Ridge regime", cex=1.5) + + # NAO+ : + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.85, 0.98), new=TRUE) + mtext("NAO+", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.sim4.colors[p,1+l,1])}} + + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.42, 0.5), new=TRUE) + mtext("Blocking", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.sim4.colors[p,1+l,4])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.85, 0.98), new=TRUE) + mtext("NAO-", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.sim4.colors[p,1+l,3])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.42, 0.5), new=TRUE) + mtext("Atlantic Ridge", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.sim4.colors[p,1+l,2])}} + + par(fig=c(0.91, 1, 0.1, 0.9), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + png(filename=paste0(workdir,"/",forecast.name,"_summary_trans_Atlantic_ridge_1table.png"), width=600, height=300) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("% Transition from Atlantic Ridge regime", cex=1.2) + + par(mar=c(0,0,4,0), fig=c(0.1, 0.28, 0.8, 0.98), new=TRUE) + mtext("NAO+", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0, 0.28, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecast month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.sim4.colors[i2,1+6-j,1])}} + + par(mar=c(0,0,4,0), fig=c(0.28, 0.56, 0.8, 0.98), new=TRUE) + mtext("NAO-", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.28, 0.56, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecasted month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.sim4.colors[i2,1+6-j,3])}} + + par(mar=c(0,0,4,0), fig=c(0.56, 0.84, 0.8, 0.98), new=TRUE) + mtext("Blocking", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.56, 0.84, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecasted month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.sim4.colors[i2,1+6-j,4])}} + + par(fig=c(0.88, 1, 0.1, 0.8), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + + + + + + + + # Bias of the Transition probability from NAO+ to X summary: + png(filename=paste0(workdir,"/",forecast.name,"_summary_bias_trans_NAO+.png"), width=550, height=400) + + # create an array similar to array.cor but with colors instead of corr.values: + trans.cols <- rev(c('#b2182b','#d6604d','#f4a582','#fddbc7','#d1e5f0','#92c5de','#4393c3','#2166ac')) + my.seq <- c(-20,-10,-5,-2,0,2,5,10,20) + + array.trans.diff1.colors <- array(trans.cols[8],c(12,7,4)) + array.trans.diff1.colors[array.trans.diff1 < my.seq[8]] <- trans.cols[7] + array.trans.diff1.colors[array.trans.diff1 < my.seq[7]] <- trans.cols[6] + array.trans.diff1.colors[array.trans.diff1 < my.seq[6]] <- trans.cols[5] + array.trans.diff1.colors[array.trans.diff1 < my.seq[5]] <- trans.cols[4] + array.trans.diff1.colors[array.trans.diff1 < my.seq[4]] <- trans.cols[3] + array.trans.diff1.colors[array.trans.diff1 < my.seq[3]] <- trans.cols[2] + array.trans.diff1.colors[array.trans.diff1 < my.seq[2]] <- trans.cols[1] + array.trans.diff1.colors[is.na(array.trans.diff1)] <- "white" + + par(mar=c(5,4,4,4.5)) + plot(1,1, type="n", xaxt="n", yaxt="n", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + title("% Bias transition from NAO+ regime", cex=1.5) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + + for(p in 1:12){ + for(l in 0:6){ + polygon(c(0.5+p-1, 0.5+p-1, 1+p-1), c(-0.5+l, 0.5+l, 0+l), col=array.trans.diff1.colors[p,1+l,1]) # first triangle + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(-0.5+l, 0+l, -0.5+l), col=array.trans.diff1.colors[p,1+l,2]) + polygon(c(1+p-1, 1.5+p-1, 1.5+p-1), c(l, 0.5+l, -0.5+l), col=array.trans.diff1.colors[p,1+l,3]) + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(0.5+l, 0+l, 0.5+l), col=array.trans.diff1.colors[p,1+l,4]) + } + } + + par(fig=c(0.875, 1, 0.14, 0.89), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + dev.off() + + png(filename=paste0(workdir,"/",forecast.name,"_summary_bias_trans_NAO+_4tables.png"), width=750, height=600) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("% Bias transition from NAO+ regime", cex=1.5) + + # NAO+ : + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.85, 0.98), new=TRUE) + mtext("NAO+", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.diff1.colors[p,1+l,1])}} + + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.42, 0.5), new=TRUE) + mtext("Blocking", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.diff1.colors[p,1+l,4])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.85, 0.98), new=TRUE) + mtext("NAO-", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.diff1.colors[p,1+l,3])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.42, 0.5), new=TRUE) + mtext("Atlantic Ridge", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.diff1.colors[p,1+l,2])}} + + par(fig=c(0.91, 1, 0.1, 0.9), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + png(filename=paste0(workdir,"/",forecast.name,"_summary_bias_trans_NAO+_1table.png"), width=800, height=300) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("% Bias transition from NAO+ regime", cex=1.2) + + par(mar=c(0,0,4,0), fig=c(0.07, 0.22, 0.8, 0.98), new=TRUE) + mtext("NAO+", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0, 0.22, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecast month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.diff1.colors[i2,1+6-j,1])}} + + par(mar=c(0,0,4,0), fig=c(0.22, 0.44, 0.8, 0.98), new=TRUE) + mtext("NAO-", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.22, 0.44, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecasted month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.diff1.colors[i2,1+6-j,3])}} + + par(mar=c(0,0,4,0), fig=c(0.44, 0.66, 0.8, 0.98), new=TRUE) + mtext("Blocking", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.44, 0.66, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecasted month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.diff1.colors[i2,1+6-j,4])}} + + par(mar=c(0,0,4,0), fig=c(0.68, 0.88, 0.8, 0.98), new=TRUE) + mtext("Atlantic Ridge", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.66, 0.88, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecasted month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.diff1.colors[i2,1+6-j,2])}} + + par(fig=c(0.91, 1, 0.1, 0.8), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + + + + + + + + + + + + + + + # Bias of the Transition probability from NAO- to X summary: + png(filename=paste0(workdir,"/",forecast.name,"_summary_bias_trans_NAO-.png"), width=550, height=400) + + # create an array similar to array.cor but with colors instead of corr.values: + trans.cols <- rev(c('#b2182b','#d6604d','#f4a582','#fddbc7','#d1e5f0','#92c5de','#4393c3','#2166ac')) + my.seq <- c(-20,-10,-5,-2,0,2,5,10,20) + + array.trans.diff2.colors <- array(trans.cols[8],c(12,7,4)) + array.trans.diff2.colors[array.trans.diff2 < my.seq[8]] <- trans.cols[7] + array.trans.diff2.colors[array.trans.diff2 < my.seq[7]] <- trans.cols[6] + array.trans.diff2.colors[array.trans.diff2 < my.seq[6]] <- trans.cols[5] + array.trans.diff2.colors[array.trans.diff2 < my.seq[5]] <- trans.cols[4] + array.trans.diff2.colors[array.trans.diff2 < my.seq[4]] <- trans.cols[3] + array.trans.diff2.colors[array.trans.diff2 < my.seq[3]] <- trans.cols[2] + array.trans.diff2.colors[array.trans.diff2 < my.seq[2]] <- trans.cols[1] + array.trans.diff2.colors[is.na(array.trans.diff2)] <- "white" + + par(mar=c(5,4,4,4.5)) + plot(1,1, type="n", xaxt="n", yaxt="n", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + title("% Bias transition from NAO- regime", cex=1.5) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + + for(p in 1:12){ + for(l in 0:6){ + polygon(c(0.5+p-1, 0.5+p-1, 1+p-1), c(-0.5+l, 0.5+l, 0+l), col=array.trans.diff2.colors[p,1+l,1]) # first triangle + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(-0.5+l, 0+l, -0.5+l), col=array.trans.diff2.colors[p,1+l,2]) + polygon(c(1+p-1, 1.5+p-1, 1.5+p-1), c(l, 0.5+l, -0.5+l), col=array.trans.diff2.colors[p,1+l,3]) + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(0.5+l, 0+l, 0.5+l), col=array.trans.diff2.colors[p,1+l,4]) + } + } + + par(fig=c(0.875, 1, 0.14, 0.89), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + dev.off() + + png(filename=paste0(workdir,"/",forecast.name,"_summary_bias_trans_NAO-_4tables.png"), width=750, height=600) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("% Bias transition from NAO- regime", cex=1.5) + + # NAO+ : + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.85, 0.98), new=TRUE) + mtext("NAO+", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.diff2.colors[p,1+l,1])}} + + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.42, 0.5), new=TRUE) + mtext("Blocking", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.diff2.colors[p,1+l,4])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.85, 0.98), new=TRUE) + mtext("NAO-", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.diff2.colors[p,1+l,3])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.42, 0.5), new=TRUE) + mtext("Atlantic Ridge", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.diff2.colors[p,1+l,2])}} + + par(fig=c(0.91, 1, 0.1, 0.9), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + png(filename=paste0(workdir,"/",forecast.name,"_summary_bias_trans_NAO-_1table.png"), width=800, height=300) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("% Bias transition from NAO- regime", cex=1.2) + + par(mar=c(0,0,4,0), fig=c(0.07, 0.22, 0.8, 0.98), new=TRUE) + mtext("NAO+", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0, 0.22, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecast month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.diff2.colors[i2,1+6-j,1])}} + + par(mar=c(0,0,4,0), fig=c(0.22, 0.44, 0.8, 0.98), new=TRUE) + mtext("NAO-", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.22, 0.44, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecasted month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.diff2.colors[i2,1+6-j,3])}} + + par(mar=c(0,0,4,0), fig=c(0.44, 0.66, 0.8, 0.98), new=TRUE) + mtext("Blocking", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.44, 0.66, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecasted month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.diff2.colors[i2,1+6-j,4])}} + + par(mar=c(0,0,4,0), fig=c(0.68, 0.88, 0.8, 0.98), new=TRUE) + mtext("Atlantic Ridge", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.66, 0.88, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecasted month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.diff2.colors[i2,1+6-j,2])}} + + par(fig=c(0.91, 1, 0.1, 0.8), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + + + + + + + + + + + # Bias of the Transition probability from blocking to X summary: + png(filename=paste0(workdir,"/",forecast.name,"_summary_bias_trans_blocking.png"), width=550, height=400) + + # create an array similar to array.cor but with colors instead of corr.values: + trans.cols <- rev(c('#b2182b','#d6604d','#f4a582','#fddbc7','#d1e5f0','#92c5de','#4393c3','#2166ac')) + my.seq <- c(-20,-10,-5,-2,0,2,5,10,20) + + array.trans.diff3.colors <- array(trans.cols[8],c(12,7,4)) + array.trans.diff3.colors[array.trans.diff3 < my.seq[8]] <- trans.cols[7] + array.trans.diff3.colors[array.trans.diff3 < my.seq[7]] <- trans.cols[6] + array.trans.diff3.colors[array.trans.diff3 < my.seq[6]] <- trans.cols[5] + array.trans.diff3.colors[array.trans.diff3 < my.seq[5]] <- trans.cols[4] + array.trans.diff3.colors[array.trans.diff3 < my.seq[4]] <- trans.cols[3] + array.trans.diff3.colors[array.trans.diff3 < my.seq[3]] <- trans.cols[2] + array.trans.diff3.colors[array.trans.diff3 < my.seq[2]] <- trans.cols[1] + array.trans.diff3.colors[is.na(array.trans.diff3)] <- "white" + + par(mar=c(5,4,4,4.5)) + plot(1,1, type="n", xaxt="n", yaxt="n", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + title("% Bias transition from blocking regime", cex=1.5) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + + for(p in 1:12){ + for(l in 0:6){ + polygon(c(0.5+p-1, 0.5+p-1, 1+p-1), c(-0.5+l, 0.5+l, 0+l), col=array.trans.diff3.colors[p,1+l,1]) # first triangle + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(-0.5+l, 0+l, -0.5+l), col=array.trans.diff3.colors[p,1+l,2]) + polygon(c(1+p-1, 1.5+p-1, 1.5+p-1), c(l, 0.5+l, -0.5+l), col=array.trans.diff3.colors[p,1+l,3]) + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(0.5+l, 0+l, 0.5+l), col=array.trans.diff3.colors[p,1+l,4]) + } + } + + par(fig=c(0.875, 1, 0.14, 0.89), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + dev.off() + + png(filename=paste0(workdir,"/",forecast.name,"_summary_bias_trans_blocking_4tables.png"), width=750, height=600) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("% Bias transition from blocking regime", cex=1.5) + + # NAO+ : + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.85, 0.98), new=TRUE) + mtext("NAO+", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.diff3.colors[p,1+l,1])}} + + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.42, 0.5), new=TRUE) + mtext("Blocking", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.diff3.colors[p,1+l,4])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.85, 0.98), new=TRUE) + mtext("NAO-", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.diff3.colors[p,1+l,3])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.42, 0.5), new=TRUE) + mtext("Atlantic Ridge", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.diff3.colors[p,1+l,2])}} + + par(fig=c(0.91, 1, 0.1, 0.9), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + dev.off() + + png(filename=paste0(workdir,"/",forecast.name,"_summary_bias_trans_blocking_1table.png"), width=800, height=300) + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("% Bias transition from Blocking regime", cex=1.2) + + par(mar=c(0,0,4,0), fig=c(0.07, 0.22, 0.8, 0.98), new=TRUE) + mtext("NAO+", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0, 0.22, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecast month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.diff3.colors[i2,1+6-j,1])}} + + par(mar=c(0,0,4,0), fig=c(0.22, 0.44, 0.8, 0.98), new=TRUE) + mtext("NAO-", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.22, 0.44, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecasted month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.diff3.colors[i2,1+6-j,3])}} + + par(mar=c(0,0,4,0), fig=c(0.44, 0.66, 0.8, 0.98), new=TRUE) + mtext("Blocking", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.44, 0.66, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecasted month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.diff3.colors[i2,1+6-j,4])}} + + par(mar=c(0,0,4,0), fig=c(0.68, 0.88, 0.8, 0.98), new=TRUE) + mtext("Atlantic Ridge", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.66, 0.88, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecasted month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.diff3.colors[i2,1+6-j,2])}} + + par(fig=c(0.91, 1, 0.1, 0.8), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + + + + + + + + + + # Bias of the Transition probability from Atlantic Ridge to X summary: + png(filename=paste0(workdir,"/",forecast.name,"_summary_bias_trans_Atlantic_ridge.png"), width=550, height=400) + + # create an array similar to array.cor but with colors instead of corr.values: + trans.cols <- rev(c('#b2182b','#d6604d','#f4a582','#fddbc7','#d1e5f0','#92c5de','#4393c3','#2166ac')) + my.seq <- c(-20,-10,-5,-2,0,2,5,10,20) + + array.trans.diff4.colors <- array(trans.cols[8],c(12,7,4)) + array.trans.diff4.colors[array.trans.diff4 < my.seq[8]] <- trans.cols[7] + array.trans.diff4.colors[array.trans.diff4 < my.seq[7]] <- trans.cols[6] + array.trans.diff4.colors[array.trans.diff4 < my.seq[6]] <- trans.cols[5] + array.trans.diff4.colors[array.trans.diff4 < my.seq[5]] <- trans.cols[4] + array.trans.diff4.colors[array.trans.diff4 < my.seq[4]] <- trans.cols[3] + array.trans.diff4.colors[array.trans.diff4 < my.seq[3]] <- trans.cols[2] + array.trans.diff4.colors[array.trans.diff4 < my.seq[2]] <- trans.cols[1] + array.trans.diff4.colors[is.na(array.trans.diff4)] <- "white" + + par(mar=c(5,4,4,4.5)) + plot(1,1, type="n", xaxt="n", yaxt="n", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + title("% Bias transition from Atlantic Ridge regime", cex=1.5) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + + for(p in 1:12){ + for(l in 0:6){ + polygon(c(0.5+p-1, 0.5+p-1, 1+p-1), c(-0.5+l, 0.5+l, 0+l), col=array.trans.diff4.colors[p,1+l,1]) # first triangle + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(-0.5+l, 0+l, -0.5+l), col=array.trans.diff4.colors[p,1+l,2]) + polygon(c(1+p-1, 1.5+p-1, 1.5+p-1), c(l, 0.5+l, -0.5+l), col=array.trans.diff4.colors[p,1+l,3]) + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(0.5+l, 0+l, 0.5+l), col=array.trans.diff4.colors[p,1+l,4]) + } + } + + par(fig=c(0.875, 1, 0.14, 0.89), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + dev.off() + + png(filename=paste0(workdir,"/",forecast.name,"_summary_bias_trans_Atlantic_ridge_4tables.png"), width=750, height=600) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("% Bias transition from Atlantic_Ridge_regime", cex=1.5) + + # NAO+ : + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.85, 0.98), new=TRUE) + mtext("NAO+", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.diff4.colors[p,1+l,1])}} + + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.42, 0.5), new=TRUE) + mtext("Blocking", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.diff4.colors[p,1+l,4])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.85, 0.98), new=TRUE) + mtext("NAO-", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.diff4.colors[p,1+l,3])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.42, 0.5), new=TRUE) + mtext("Atlantic Ridge", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.diff4.colors[p,1+l,2])}} + + par(fig=c(0.91, 1, 0.1, 0.9), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + png(filename=paste0(workdir,"/",forecast.name,"_summary_bias_trans_Atlantic_ridge_1table.png"), width=800, height=300) + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("% Bias transition from Atlantic Ridge regime", cex=1.2) + + par(mar=c(0,0,4,0), fig=c(0.07, 0.22, 0.8, 0.98), new=TRUE) + mtext("NAO+", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0, 0.22, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecast month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.diff4.colors[i2,1+6-j,1])}} + + par(mar=c(0,0,4,0), fig=c(0.22, 0.44, 0.8, 0.98), new=TRUE) + mtext("NAO-", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.22, 0.44, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecasted month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.diff4.colors[i2,1+6-j,3])}} + + par(mar=c(0,0,4,0), fig=c(0.44, 0.66, 0.8, 0.98), new=TRUE) + mtext("Blocking", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.44, 0.66, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecasted month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.diff4.colors[i2,1+6-j,4])}} + + par(mar=c(0,0,4,0), fig=c(0.68, 0.88, 0.8, 0.98), new=TRUE) + mtext("Atlantic Ridge", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.66, 0.88, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecasted month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.diff4.colors[i2,1+6-j,2])}} + + par(fig=c(0.91, 1, 0.1, 0.8), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + +diagnostic.WR <- function(text=NULL, position=c(0,1,0,1), color.array){ + +} + + ## # FairRPSS summary: + ## png(filename=paste0(workdir,"/",forecast.name,"_summary_rpss.png"), width=550, height=400) + + ## # create an array similar to array.diff.freq but with colors instead of frequencies: + ## freq.cols <- rev(c('#b2182b','#d6604d','#f4a582','#fddbc7','#d1e5f0','#92c5de','#4393c3','#2166ac')) + + ## array.freq.colors <- array(freq.cols[8],c(12,7,4)) + ## array.freq.colors[array.diff.freq < 10] <- freq.cols[7] + ## array.freq.colors[array.diff.freq < 5] <- freq.cols[6] + ## array.freq.colors[array.diff.freq < 2] <- freq.cols[5] + ## array.freq.colors[array.diff.freq < 0] <- freq.cols[4] + ## array.freq.colors[array.diff.freq < -2] <- freq.cols[3] + ## array.freq.colors[array.diff.freq < -5] <- freq.cols[2] + ## array.freq.colors[array.diff.freq < -10] <- freq.cols[1] + + ## par(mar=c(5,4,4,4.5)) + ## plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Forecast time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + ## title("Frequency difference (%) between S4 and ERA-Interim", cex=1.5) + ## mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + ## mtext(side = 2, text = "Forecast time", line = 2.5, cex=1.5) + ## axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + ## axis(2, at=seq(0,6), las=2, cex.axis=1.5) + + ## for(p in 1:12){ + ## for(l in 0:6){ + ## polygon(c(0.5+p-1,0.5+p-1,1+p-1),c(-0.5+l,0.5+l,0+l), col=array.freq.colors[p,1+l,1]) # first triangle + ## polygon(c(0.5+p-1,1+p-1,1.5+p-1),c(-0.5+l,0+l,-0.5+l), col=array.freq.colors[p,1+l,2]) + ## polygon(c(1+p-1,1.5+p-1,1.5+p-1),c(l,0.5+l,-0.5+l), col=array.freq.colors[p,1+l,3]) + ## polygon(c(0.5+p-1,1+p-1,1.5+p-1),c(0.5+l,0+l,0.5+l), col=array.freq.colors[p,1+l,4]) + ## } + ## } + + ## par(fig=c(0.875, 1, 0.14, 0.89), new=TRUE) + ## ColorBar2(brks = c(-20,-10,-5,-2,0,2,5,10,20), cols = freq.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(c(-20,-10,-5,-2,0,2,5,10,20)), my.labels=c(-20,-10,-5,-2,0,2,5,10,20)) + ## dev.off() + +} # close if on composition == "summary" + + + + +# impact map of the stronger WR: +if(composition == "impact" && fields.name == rean.name){ + + var.num <- 2 # choose a variable (1:sfcWind, 2:tas) + + png(filename=paste0(workdir,"/",rean.name,"_",var.name[var.num],"_most_influent_negative.png"), width=550, height=650) + + plot.new() + #par(mfrow=c(4,3)) + #col.regimes <- c('#7b3294','#c2a5cf','#a6dba0','#008837') + col.regimes <- c('#e66101','#fdb863','#b2abd2','#5e3c99') + + for(p in 13:16){ # or 1:12 for monthly maps + load(file=paste0(rean.dir,"/",rean.name,"_",my.period[p],"_",var.name[var.num],"_psl.RData")) + load(paste0(rean.dir,"/",rean.name,"_ClusterNames.RData")) # they should not depend on var.name!!! + + # position of long values of Europe only (without the Atlantic Sea and America): + #if(fields.name == "ERA-Interim") EU <- c(1:which(lon >= lon.max)[1], (length(lon)-30):length(lon)) # restrict area to continental europe only + lon.max = 45 + EU <- c(1:which(lon >= lon.max)[1], (which(lon >= 337)[1]:length(lon))) # restrict area to continental europe only + + cluster1 <- which(orden == cluster1.name.period[p]) # in future it will be == cluster1.name + cluster2 <- which(orden == cluster2.name.period[p]) + cluster3 <- which(orden == cluster3.name.period[p]) + cluster4 <- which(orden == cluster4.name.period[p]) + + assign(paste0("imp",cluster1), varPeriodAnom1mean) + assign(paste0("imp",cluster2), varPeriodAnom2mean) + assign(paste0("imp",cluster3), varPeriodAnom3mean) + assign(paste0("imp",cluster4), varPeriodAnom4mean) + + ## imp.all <- imp1 + ## for(i in 1:dim(imp1)[1]){ + ## for(j in 1:dim(imp1)[2]){ + ## if(abs(imp1[i,j]) >= max(abs(imp1[i,j]), abs(imp2[i,j]), abs(imp3[i,j]), abs(imp4[i,j]))) imp.all[i,j] <- 1.5 + ## if(abs(imp2[i,j]) >= max(abs(imp1[i,j]), abs(imp2[i,j]), abs(imp3[i,j]), abs(imp4[i,j]))) imp.all[i,j] <- 2.5 + ## if(abs(imp3[i,j]) >= max(abs(imp1[i,j]), abs(imp2[i,j]), abs(imp3[i,j]), abs(imp4[i,j]))) imp.all[i,j] <- 3.5 + ## if(abs(imp4[i,j]) >= max(abs(imp1[i,j]), abs(imp2[i,j]), abs(imp3[i,j]), abs(imp4[i,j]))) imp.all[i,j] <- 4.5 + ## } + ## } + + ## #most influent WT with positive impact: + ## imp.all <- imp1 + ## for(i in 1:dim(imp1)[1]){ + ## for(j in 1:dim(imp1)[2]){ + ## if((imp1[i,j]) >= max((imp1[i,j]), (imp2[i,j]), (imp3[i,j]), (imp4[i,j]))) imp.all[i,j] <- 1.5 + ## if((imp2[i,j]) >= max((imp1[i,j]), (imp2[i,j]), (imp3[i,j]), (imp4[i,j]))) imp.all[i,j] <- 2.5 + ## if((imp3[i,j]) >= max((imp1[i,j]), (imp2[i,j]), (imp3[i,j]), (imp4[i,j]))) imp.all[i,j] <- 3.5 + ## if((imp4[i,j]) >= max((imp1[i,j]), (imp2[i,j]), (imp3[i,j]), (imp4[i,j]))) imp.all[i,j] <- 4.5 + ## } + ## } + + # most influent WT with negative impact: + imp.all <- imp1 + for(i in 1:dim(imp1)[1]){ + for(j in 1:dim(imp1)[2]){ + if((imp1[i,j]) <= min((imp1[i,j]), (imp2[i,j]), (imp3[i,j]), (imp4[i,j]))) imp.all[i,j] <- 1.5 + if((imp2[i,j]) <= min((imp1[i,j]), (imp2[i,j]), (imp3[i,j]), (imp4[i,j]))) imp.all[i,j] <- 2.5 + if((imp3[i,j]) <= min((imp1[i,j]), (imp2[i,j]), (imp3[i,j]), (imp4[i,j]))) imp.all[i,j] <- 3.5 + if((imp4[i,j]) <= min((imp1[i,j]), (imp2[i,j]), (imp3[i,j]), (imp4[i,j]))) imp.all[i,j] <- 4.5 + } + } + + if(p<=3) yMod <- 0.7 + if(p>=4 && p<=6) yMod <- 0.5 + if(p>=7 && p<=9) yMod <- 0.3 + if(p>=10) yMod <- 0.1 + if(p==13 || p==14) yMod <- 0.52 + if(p==15 || p==16) yMod <- 0.1 + + if(p==1 || p==4 || p==7 || p==10) xMod <- 0.05 + if(p==2 || p==5 || p==8 || p==11) xMod <- 0.35 + if(p==3 || p==6 || p==9 || p==12) xMod <- 0.65 + if(p==13 || p==15) xMod <- 0.05 + if(p==14 || p==16) xMod <- 0.50 + + # impact map: + if(p <= 12) par(fig=c(xMod, xMod + 0.3, yMod, yMod + 0.17), new=TRUE) + if(p > 12) par(fig=c(xMod, xMod + 0.45, yMod, yMod + 0.38), new=TRUE) + PlotEquiMap(imp.all[,EU], lon[EU], lat, filled.continents = FALSE, brks=1:5, cols=col.regimes, axelab=F, drawleg=F) + + # title map: + if(p <= 12) par(fig=c(xMod, xMod + 0.3, yMod + 0.162, yMod + 0.172), new=TRUE) + if(p > 12) par(fig=c(xMod + 0.07, xMod + 0.07 + 0.3, yMod +0.21 + 0.166, yMod +0.21 + 0.176), new=TRUE) + mtext(my.period[p], font=2, cex=1.5) + + } # close 'p' on 'period' + + par(fig=c(0.1,0.9, 0.03, 0.11), new=TRUE) + ColorBar2(1:5, cols=col.regimes, vert=FALSE, my.ticks=1:4, draw.ticks=FALSE, my.labels=orden) + + par(fig=c(0.1,0.9, 0.92, 0.97), new=TRUE) + mtext(paste0("Most influent WR on ",var.name[var.num]), font=2, cex=1.5) + + dev.off() + +} # close if on composition == "impact" + + + + + + + + + + + + + + + + + + diff --git a/old/weather_regimes_maps_v16.R~ b/old/weather_regimes_maps_v16.R~ new file mode 100644 index 0000000000000000000000000000000000000000..6b8a1f16d63e16d54641ec53e4a638e29a193647 --- /dev/null +++ b/old/weather_regimes_maps_v16.R~ @@ -0,0 +1,3592 @@ + +# Creation: 6/2016 +# Author: Nicola Cortesi +# +# Aim: to visualize the regime anomalies of the weather regimes, their interannual frequencies and their impact on a chosen cliamte variable (i.e: wind speed or temperature) +# +# I/O: the only input file needed is the output of the weather_regimes.R script, which ends with the suffix "_mapdata.RData". +# Output files are the maps, with the user can generate as .png or as .pdf +# +# Assumptions: If your regimes derive from a reanalysis, this script must be run twice: once, with the option 'ordering = F' and 'save.names = T' to create a .pdf or .png +# file that the user must open to find visually the order by which the four regimes are stored inside the four clusters. For example, he can find that the +# sequence of the images in the file (from top to down) corresponds to: Blocking / NAO- / Atl.Ridge / NAO+ regime. Subsequently, he has to change 'ordering' +# from F to T and set the four variables 'clusterX.name' (X=1..4) to follow the same order found inside that file. +# The second time, you have to run this script only to get the maps in the right order, which by default is, from top to down: +# "NAO+","NAO-","Blocking" and "Atl.Ridge" (you can change it with the variable 'orden'). You also have to set 'save.names' to TRUE, so an .RData file +# is written to store the order of the four regimes, in case in future a user needs to visualize these maps again. In this case, the user must set +# 'ordering = TRUE' and 'save.names = FALSE' to be able to visualize the maps in the correct order. +# +# If your regimes derive from a forecast system, you have to compute before the regimes derived from a reanalysis. Then, you can associate the reanalysis +# to the forecast system, to employ it to automatically aussociate to each simulated cluster one of the +# observed regimes, the one with the highest spatial correlation. Beware that the two maps of regime anomalies must have the same spatial resolution! +# +# Branch: weather_regimes + +rm(list=ls()) +library(s2dverification) # for the function Load() +library(Kendall) # for the MannKendall test + +source('/home/Earth/ncortesi/Downloads/scripts/Rfunctions.R') # for the calendar functions + +# working dir with the input files with '_psl.RData' suffix: +#workdir <- "/scratch/Earth/ncortesi/RESILIENCE/Regimes/42_ECMWF_S4_monthly_1981-2015_LOESS_filter" +workdir <- "/scratch/Earth/ncortesi/RESILIENCE/Regimes/18) as 12) but with no lat correction" + +rean.name <- "ERA-Interim" # reanalysis name (if input data comes from a reanalysis) +forecast.name <- "ECMWF-S4" # forecast system name (if input data comes from a forecast system) + +fields.name <- rean.name #forecast.name # Choose between loading reanalysis ('rean.name') or forecasts ('forecast.name') + +var.num <- 1 # if fields.name ='rean.name', Choose a variable for the impact maps 1: sfcWind 2: tas + +period <- 16 #1:12 # Choose one or more periods to plot from 1 to 17 (1-12: Jan - Dec, 13-16: Winter, Spring, Summer, Autumn, 17: Year). + # You need to have created before the '_mapdata.RData' file with the output of 'weather_regimes'.R for that period. +lead.months <- 0:6 # in case you selected 'fields.name = forecast.name', specify a leadtime (0 = same month of the start month) to plot + # (and variable 'period' becomes the start month) + +# run it twice: once, with 'ordering <- FALSE' and 'save.names <- TRUE' to look only at the cartography and find visually the right regime names of the four clusters; then, +# insert the regime names in the correct order below and run this script a the second time with 'ordering = TRUE' and 'save.names = TRUE', to save the ordered cartography. +# after that, any time you need to run this script, run it with 'ordering = TRUE and 'save.names = FALSE', not to overwrite the file which stores the cluster order: +ordering <- T # If TRUE, order the clusters following the regimes listed in the 'orden' vector (set it to TRUE only after you manually inserted the names below). +save.names <- F # if TRUE, also save the cluster names (i.e: the association between clusters and weather regimes); if FALSE, the cluster names are loaded from a file + +as.pdf <- F # FALSE: save results as one .png file for each season/month, TRUE: save only one .pdf file with all seasons/months + +composition <- "simple" # choose which kind of composition you want to plot: + # - 'simple' for monthly graphs of regime anomalies / impact / interannual frequencies in three columns + # - 'psl' for all the regime anomalies for a fixed forecast month + # - 'fre' for all the interannual frequencies for a fixed forecast month + # - 'summary' for the summary plots of all forecast months (persistence bias, freq.bias, correlations, etc.) [You need all ClusterNames files] + # - 'impact' for all the impact plot of the regime with the highest impact + # - 'none' doesn't plot anything, it only saves the ClusterName.Rdata files + +forecast.month <- 1 # in case composition = 'psl' or 'fre' or 'impact', choose a forecast month from 1 to 12 to plot its composition + +# if fields.name='forecast.name', set the subdir of 'workdir' where the weather regimes computed with a reanalysis are stored: +#rean.dir <- "/scratch/Earth/ncortesi/RESILIENCE/Regimes/41_ERA-Interim_monthly_1981-2015_LOESS_filter" +rean.dir <- "/scratch/Earth/ncortesi/RESILIENCE/Regimes/18) as 12) but with no lat correction" + +# Associates the regimes to the four cluster: +cluster1.name <- "NAO+" +cluster2.name <- "NAO-" +cluster4.name <- "Blocking" +cluster3.name <- "Atl.Ridge" + +# choose an order to give to the cartograpy, from top to bottom: +orden <- c("NAO+","NAO-","Blocking","Atl.Ridge") + +################################################################################################################################################################### + +if(fields.name != rean.name && fields.name != forecast.name) stop("variable 'fields.name' is not properly set") +regime1.name <- orden[1] +regime2.name <- orden[2] +regime3.name <- orden[3] +regime4.name <- orden[4] + +var.name <- c("sfcWind","tas") +var.name.full <- c("Wind Speed","Temperature") # full name of the 'predictand' variable to put in the title of the graphs +var.unit <- c("m/s", "ºC") # unit of measure of var (for drawing color scales) +var.num.orig <- var.num # loading _psl files, this value can change! +fields.name.orig <- fields.name +period.orig <- period +workdir.orig <- workdir +pdf.suffix <- ifelse(length(period)==1, my.period[period], "all_periods") +n.map <- 0 + +if(composition != "summary" && composition != "impact"){ + + if(composition == "psl" || composition == "fre") { + if(as.pdf && fields.name == forecast.name) pdf(file=paste0(workdir,"/",fields.name,"_",pdf.suffix,"_forecast_month_",forecast.month,"_",composition,".pdf"),width=110,height=60) + if(!as.pdf && fields.name == forecast.name) png(filename=paste0(workdir,"/",fields.name,"_forecast_month_",forecast.month,"_",composition,".png"),width=6000,height=2300) + + lead.months <- c(6:0,0) + plot.new() + + # Sheet title: + par(fig=c(0, 1, 0.94, 0.98), new=TRUE) + if(composition == "psl") mtext(paste0(fields.name, " simulated Weather Regimes for ", month.name[forecast.month]), cex=5.5, font=2) + if(composition == "fre") mtext(paste0(fields.name, " simulated interannual frequencies for ", month.name[forecast.month]), cex=5.5, font=2) + } + + for(lead.month in lead.months){ + #lead.month=lead.months[1] # for the debug + + if(composition == "simple"){ + if(as.pdf && fields.name == rean.name) pdf(file=paste0(workdir,"/",fields.name,"_",var.name[var.num],"_",pdf.suffix,".pdf"),width=40, height=60) + if(as.pdf && fields.name == forecast.name) pdf(file=paste0(workdir,"/",fields.name,"_",var.name[var.num],"_",pdf.suffix,"_leadmonth_",lead.month,".pdf"),width=40,height=60) + } + + if(composition == 'psl' || composition == 'fre') { + period <- forecast.month - lead.month + if(period < 1) period <- period + 12 + } + + for(p in period){ + #p <- period[1] # for the debug + p.orig <- p + + # load regime data (for forecasts, it load only the data corresponding to the chosen start month p and lead month 'lead.month':: + if(fields.name == rean.name || (fields.name == forecast.name && lead.month == 0 || n.map == 7)) { + load(file=paste0(rean.dir,"/",rean.name,"_",my.period[p],"_","psl",".RData")) + if(var.num != var.num.orig) var.num <- var.num.orig # ripristinate original values of this script (necessary after loading regime data) + if(fields.name != fields.name.orig) fields.name <- fields.name.orig # ripristinate original values of this script + if(workdir != workdir.orig) workdir <- workdir.orig # ripristinate original values of this script + + load(file=paste0(rean.dir,"/",rean.name,"_",my.period[p],"_",var.name[var.num],".RData")) + } + + if(fields.name == forecast.name && n.map < 7){ + load(file=paste0(workdir,"/",fields.name,"_",my.period[p],"_leadmonth_",lead.month,"_","psl",".RData")) # load cluster time series and mean psl data + if(composition == "impact") load(file=paste0(workdir,"/",fields.name,"_",my.period[p],"_leadmonth_",lead.month,"_",var.name[var.num],".RData")) # load mean var data # for impact maps + + #if(var.num != var.num.orig) var.num <- var.num.orig # ripristinate original values of this script (necessary after loading regime data) + #if(fields.name != fields.name.orig) fields.name <- fields.name.orig # ripristinate original values of this script + + } + + if(var.num != var.num.orig) var.num <- var.num.orig # ripristinate original values of this script (necessary after loading regime data) + if(fields.name != fields.name.orig) fields.name <- fields.name.orig # ripristinate original values of this script + if(workdir != workdir.orig) workdir <- workdir.orig # ripristinate original values of this script + if(p != p.orig) p <- p.orig + + if(fields.name == forecast.name && n.map < 7){ + + # compute the simulated interannual monthly variability: + n.days.month <- dim(my.cluster.array[[p]])[1] # number of days in the forecasted month + + freq.sim1 <- apply(my.cluster.array[[p]] == 1, c(2,3), sum) + freq.sim2 <- apply(my.cluster.array[[p]] == 2, c(2,3), sum) + freq.sim3 <- apply(my.cluster.array[[p]] == 3, c(2,3), sum) + freq.sim4 <- apply(my.cluster.array[[p]] == 4, c(2,3), sum) + + sd.freq.sim1 <- sd(freq.sim1) / n.days.month + sd.freq.sim2 <- sd(freq.sim2) / n.days.month + sd.freq.sim3 <- sd(freq.sim3) / n.days.month + sd.freq.sim4 <- sd(freq.sim4) / n.days.month + + # compute the transition probabilities: + j1 <- j2 <- j3 <- j4 <- 1; transition1 <- transition2 <- transition3 <- transition4 <- c() + + n.members <- dim(my.cluster.array[[p]])[3] + + for(m in 1:n.members){ + for(y in 1:n.years){ + for (d in 1:(n.days.month-1)){ + + if (my.cluster.array[[p]][d,y,m] == 1 && (my.cluster.array[[p]][d + 1,y,m] != my.cluster.array[[p]][d,y,m])){ + transition1[j1] <- my.cluster.array[[p]][d + 1,y,m] + j1 <- j1 + 1 + } + if (my.cluster.array[[p]][d,y,m] == 2 && (my.cluster.array[[p]][d + 1,y,m] != my.cluster.array[[p]][d,y,m])){ + transition2[j2] <- my.cluster.array[[p]][d + 1,y,m] + j2 <- j2 + 1 + } + if (my.cluster.array[[p]][d,y,m] == 3 && (my.cluster.array[[p]][d + 1,y,m] != my.cluster.array[[p]][d,y,m])){ + transition3[j3] <- my.cluster.array[[p]][d + 1,y,m] + j3 <- j3 +1 + } + if (my.cluster.array[[p]][d,y,m] == 4 && (my.cluster.array[[p]][d + 1,y,m] != my.cluster.array[[p]][d,y,m])){ + transition4[j4] <- my.cluster.array[[p]][d + 1,y,m] + j4 <- j4 +1 + } + + } + } + } + + trans.sim <- matrix(NA,4,4 , dimnames= list(c("cluster1.sim", "cluster2.sim", "cluster3.sim", "cluster4.sim"),c("cluster1.sim","cluster2.sim","cluster3.sim","cluster4.sim"))) + trans.sim[1,2] <- length(which(transition1 == 2)) + trans.sim[1,3] <- length(which(transition1 == 3)) + trans.sim[1,4] <- length(which(transition1 == 4)) + + trans.sim[2,1] <- length(which(transition2 == 1)) + trans.sim[2,3] <- length(which(transition2 == 3)) + trans.sim[2,4] <- length(which(transition2 == 4)) + + trans.sim[3,1] <- length(which(transition3 == 1)) + trans.sim[3,2] <- length(which(transition3 == 2)) + trans.sim[3,4] <- length(which(transition3 == 4)) + + trans.sim[4,1] <- length(which(transition4 == 1)) + trans.sim[4,2] <- length(which(transition4 == 2)) + trans.sim[4,3] <- length(which(transition4 == 3)) + + trans.tot <- apply(trans.sim,1,sum,na.rm=T) + for(i in 1:4) trans.sim[i,] <- trans.sim[i,]/trans.tot[i] + + + # compute cluster persistence: + my.cluster.vector <- as.vector(my.cluster.array[[p]]) # reduce it to a vector with the order: day1month1member1 , day2month1member1, ... , day1month2member1, day2month2member1, ,,, + + sequ1 <- sequ2 <- sequ3 <- sequ4 <- rep(0,10000) + i1 <- i2 <- i3 <- i4 <- 1 + + if(my.cluster.vector[1] != 1) i1 <- 0 + if(my.cluster.vector[1] == 1) sequ1[i1] <- sequ1[i1]+1 + if(my.cluster.vector[1] != 2) i2 <- 0 + if(my.cluster.vector[1] == 2) sequ2[i2] <- sequ2[i2]+1 + if(my.cluster.vector[1] != 3) i3 <- 0 + if(my.cluster.vector[1] == 3) sequ3[i3] <- sequ3[i3]+1 + if(my.cluster.vector[1] != 4) i4 <- 0 + if(my.cluster.vector[1] == 4) sequ4[i4] <- sequ4[i4]+1 + n.dd <- length(my.cluster.vector) + + for(d in 1:(n.dd-1)){ + if(my.cluster.vector[d] != 1 && my.cluster.vector[d+1] == 1) i1 <- i1+1 + if(my.cluster.vector[d] == 1) sequ1[i1] <- sequ1[i1]+1 + + if(my.cluster.vector[d] != 2 && my.cluster.vector[d+1] == 2) i2 <- i2+1 + if(my.cluster.vector[d] == 2) sequ2[i2] <- sequ2[i2]+1 + + if(my.cluster.vector[d] != 3 && my.cluster.vector[d+1] == 3) i3 <- i3+1 + if(my.cluster.vector[d] == 3) sequ3[i3] <- sequ3[i3]+1 + + if(my.cluster.vector[d] != 4 && my.cluster.vector[d+1] == 4) i4 <- i4+1 + if(my.cluster.vector[d] == 4) sequ4[i4] <- sequ4[i4]+1 + } + + if(my.cluster.vector[n.dd] == 1) sequ1[i1] <- sequ1[i1]+1 + if(my.cluster.vector[n.dd] == 2) sequ2[i2] <- sequ2[i2]+1 + if(my.cluster.vector[n.dd] == 3) sequ3[i3] <- sequ3[i3]+1 + if(my.cluster.vector[n.dd] == 4) sequ4[i4] <- sequ4[i4]+1 + + pers1 <- mean(sequ1[which(sequ1 != 0)]) + pers2 <- mean(sequ2[which(sequ2 != 0)]) + pers3 <- mean(sequ3[which(sequ3 != 0)]) + pers4 <- mean(sequ4[which(sequ4 != 0)]) + + # compute correlations between simulated clusters and observed regime's anomalies: + cluster1.sim <- pslwr1mean; cluster2.sim <- pslwr2mean; cluster3.sim <- pslwr3mean; cluster4.sim <- pslwr4mean + lat.forecast <- lat; lon.forecast <- lon + + if((p + lead.month) > 12) { load.month <- p + lead.month - 12 } else { load.month <- p + lead.month } # reanalysis forecast month to load + load(file=paste0(rean.dir,"/",rean.name,"_",my.period[load.month],"_","psl",".RData")) # load reanalysis data, which overwrities the pslwrXmean variables + load(paste0(rean.dir,"/",rean.name,"_", my.period[load.month],"_","ClusterNames",".RData")) # load also reanalysis regime names + + if(var.num != var.num.orig) var.num <- var.num.orig # ripristinate original values of this script + if(fields.name != fields.name.orig) fields.name <- fields.name.orig # ripristinate original values of this script + if(!identical(period.orig,period)) period <- period.orig + if(workdir != workdir.orig) workdir <- workdir.orig # ripristinate original values of this script + if(p.orig != p) p <- p.orig + + # compute the observed transition probabilities: + n.days.month <- n.days.in.a.period(load.month,2001) + j1o <- j2o <- j3o <- j4o <- 1; transition.obs1 <- transition.obs2 <- transition.obs3 <- transition.obs4 <- c() + + for (d in 1:(length(my.cluster[[load.month]]$cluster)-1)){ + if (my.cluster[[load.month]]$cluster[d] == 1 && (my.cluster[[load.month]]$cluster[d + 1] != my.cluster[[load.month]]$cluster[d])){ + transition.obs1[j1o] <- my.cluster[[load.month]]$cluster[d + 1] + j1o <- j1o + 1 + } + if (my.cluster[[load.month]]$cluster[d] == 2 && (my.cluster[[load.month]]$cluster[d + 1] != my.cluster[[load.month]]$cluster[d])){ + transition.obs2[j2o] <- my.cluster[[load.month]]$cluster[d + 1] + j2o <- j2o + 1 + } + if (my.cluster[[load.month]]$cluster[d] == 3 && (my.cluster[[load.month]]$cluster[d + 1] != my.cluster[[load.month]]$cluster[d])){ + transition.obs3[j3o] <- my.cluster[[load.month]]$cluster[d + 1] + j3o <- j3o + 1 + } + if (my.cluster[[load.month]]$cluster[d] == 4 && (my.cluster[[load.month]]$cluster[d + 1] != my.cluster[[load.month]]$cluster[d])){ + transition.obs4[j4o] <- my.cluster[[load.month]]$cluster[d + 1] + j4o <- j4o +1 + } + + } + + trans.obs <- matrix(NA,4,4 , dimnames= list(c("cluster1.obs", "cluster2.obs", "cluster3.obs", "cluster4.obs"),c("cluster1.obs","cluster2.obs","cluster3.obs","cluster4.obs"))) + trans.obs[1,2] <- length(which(transition.obs1 == 2)) + trans.obs[1,3] <- length(which(transition.obs1 == 3)) + trans.obs[1,4] <- length(which(transition.obs1 == 4)) + + trans.obs[2,1] <- length(which(transition.obs2 == 1)) + trans.obs[2,3] <- length(which(transition.obs2 == 3)) + trans.obs[2,4] <- length(which(transition.obs2 == 4)) + + trans.obs[3,1] <- length(which(transition.obs3 == 1)) + trans.obs[3,2] <- length(which(transition.obs3 == 2)) + trans.obs[3,4] <- length(which(transition.obs3 == 4)) + + trans.obs[4,1] <- length(which(transition.obs4 == 1)) + trans.obs[4,2] <- length(which(transition.obs4 == 2)) + trans.obs[4,3] <- length(which(transition.obs4 == 3)) + + trans.tot.obs <- apply(trans.obs,1,sum,na.rm=T) + for(i in 1:4) trans.obs[i,] <- trans.obs[i,]/trans.tot.obs[i] + + # compute the observed interannual monthly variability: + freq.obs1 <- freq.obs2 <- freq.obs3 <- freq.obs4 <- c() + + for(y in year.start:year.end){ + # for both reanalysis and S4, the time order is always: year1day1, year1day2, ..., year1day31, year2day1, year2day2, ..., year2day28, etc, + freq.obs1[y-year.start+1] <- length(which(my.cluster[[load.month]]$cluster[(y-year.start)*n.days.month + (1:n.days.month)] == 1)) + freq.obs2[y-year.start+1] <- length(which(my.cluster[[load.month]]$cluster[(y-year.start)*n.days.month + (1:n.days.month)] == 2)) + freq.obs3[y-year.start+1] <- length(which(my.cluster[[load.month]]$cluster[(y-year.start)*n.days.month + (1:n.days.month)] == 3)) + freq.obs4[y-year.start+1] <- length(which(my.cluster[[load.month]]$cluster[(y-year.start)*n.days.month + (1:n.days.month)] == 4)) + } + + sd.freq.obs1 <- sd(freq.obs1) / n.days.month + sd.freq.obs2 <- sd(freq.obs2) / n.days.month + sd.freq.obs3 <- sd(freq.obs3) / n.days.month + sd.freq.obs4 <- sd(freq.obs4) / n.days.month + + wr1y.obs <- wr1y; wr2y.obs <- wr2y; wr3y.obs <- wr3y; wr4y.obs <- wr4y # observed frecuencies of the regimes + + regimes.obs <- c(cluster1.name, cluster2.name, cluster3.name, cluster4.name) + + if(length(unique(regimes.obs)) < 4) stop("Two or more names of the weather types in the reanalsyis input file are repeated!") + + if(identical(rev(lat),lat.forecast)) {lat.forecast <- rev(lat.forecast); print("Warning: forecast latitudes are in the opposite order than renalysis's")} + if(!identical(lat, lat.forecast)) stop("forecast latitudes are different from reanalysis latitudes!") + if(!identical(lon, lon.forecast)) stop("forecast longitude are different from reanalysis longitudes!") + + cluster.corr <- array(NA, c(4,4), dimnames=list(c("cluster1.sim","cluster2.sim","cluster3.sim","cluster4.sim"), regimes.obs)) + cluster.corr[1,1] <- cor(as.vector(cluster1.sim), as.vector(pslwr1mean)) + cluster.corr[1,2] <- cor(as.vector(cluster1.sim), as.vector(pslwr2mean)) + cluster.corr[1,3] <- cor(as.vector(cluster1.sim), as.vector(pslwr3mean)) + cluster.corr[1,4] <- cor(as.vector(cluster1.sim), as.vector(pslwr4mean)) + cluster.corr[2,1] <- cor(as.vector(cluster2.sim), as.vector(pslwr1mean)) + cluster.corr[2,2] <- cor(as.vector(cluster2.sim), as.vector(pslwr2mean)) + cluster.corr[2,3] <- cor(as.vector(cluster2.sim), as.vector(pslwr3mean)) + cluster.corr[2,4] <- cor(as.vector(cluster2.sim), as.vector(pslwr4mean)) + cluster.corr[3,1] <- cor(as.vector(cluster3.sim), as.vector(pslwr1mean)) + cluster.corr[3,2] <- cor(as.vector(cluster3.sim), as.vector(pslwr2mean)) + cluster.corr[3,3] <- cor(as.vector(cluster3.sim), as.vector(pslwr3mean)) + cluster.corr[3,4] <- cor(as.vector(cluster3.sim), as.vector(pslwr4mean)) + cluster.corr[4,1] <- cor(as.vector(cluster4.sim), as.vector(pslwr1mean)) + cluster.corr[4,2] <- cor(as.vector(cluster4.sim), as.vector(pslwr2mean)) + cluster.corr[4,3] <- cor(as.vector(cluster4.sim), as.vector(pslwr3mean)) + cluster.corr[4,4] <- cor(as.vector(cluster4.sim), as.vector(pslwr4mean)) + + # associate each simulated cluster to one observed regime: + max1 <- which(cluster.corr == max(cluster.corr), arr.ind=T) + cluster.corr2 <- cluster.corr + cluster.corr2[max1[1],] <- -999 # set this row to negative values so it won't be found as a maximum anymore + cluster.corr2[,max1[2]] <- -999 # set this column to negative values so it won't be found as a maximum anymore + max2 <- which(cluster.corr2 == max(cluster.corr2), arr.ind=T) + cluster.corr3 <- cluster.corr2 + cluster.corr3[max2[1],] <- -999 + cluster.corr3[,max2[2]] <- -999 + max3 <- which(cluster.corr3 == max(cluster.corr3), arr.ind=T) + cluster.corr4 <- cluster.corr3 + cluster.corr4[max3[1],] <- -999 + cluster.corr4[,max3[2]] <- -999 + max4 <- which(cluster.corr4 == max(cluster.corr4), arr.ind=T) + + seq.max1 <- c(max1[1],max2[1],max3[1],max4[1]) + seq.max2 <- c(max1[2],max2[2],max3[2],max4[2]) + + cluster1.regime <- seq.max2[which(seq.max1 == 1)] + cluster2.regime <- seq.max2[which(seq.max1 == 2)] + cluster3.regime <- seq.max2[which(seq.max1 == 3)] + cluster4.regime <- seq.max2[which(seq.max1 == 4)] + + cluster1.name <- regimes.obs[cluster1.regime] + cluster2.name <- regimes.obs[cluster2.regime] + cluster3.name <- regimes.obs[cluster3.regime] + cluster4.name <- regimes.obs[cluster4.regime] + + regimes.sim <- c(cluster1.name, cluster2.name, cluster3.name, cluster4.name) + cluster.corr2 <- array(cluster.corr, c(4,4), dimnames=list(regimes.sim, regimes.obs)) + + spat.cor1 <- cluster.corr[1,seq.max2[which(seq.max1 == 1)]] + spat.cor2 <- cluster.corr[2,seq.max2[which(seq.max1 == 2)]] + spat.cor3 <- cluster.corr[3,seq.max2[which(seq.max1 == 3)]] + spat.cor4 <- cluster.corr[4,seq.max2[which(seq.max1 == 4)]] + + # measure persistence for the reanalysis dataset: + my.cluster.vector <- my.cluster[[load.month]][[1]] + + sequ1 <- sequ2 <- sequ3 <- sequ4 <- rep(0,10000) + i1 <- i2 <- i3 <- i4 <- 1 + + if(my.cluster.vector[1] != 1) i1 <- 0 + if(my.cluster.vector[1] == 1) sequ1[i1] <- sequ1[i1]+1 + if(my.cluster.vector[1] != 2) i2 <- 0 + if(my.cluster.vector[1] == 2) sequ2[i2] <- sequ2[i2]+1 + if(my.cluster.vector[1] != 3) i3 <- 0 + if(my.cluster.vector[1] == 3) sequ3[i3] <- sequ3[i3]+1 + if(my.cluster.vector[1] != 4) i4 <- 0 + if(my.cluster.vector[1] == 4) sequ4[i4] <- sequ4[i4]+1 + + n.dd <- length(my.cluster.vector) + for(d in 1:(n.dd-1)){ + if(my.cluster.vector[d] != 1 && my.cluster.vector[d+1] == 1) i1 <- i1+1 + if(my.cluster.vector[d] == 1) sequ1[i1] <- sequ1[i1]+1 + + if(my.cluster.vector[d] != 2 && my.cluster.vector[d+1] == 2) i2 <- i2+1 + if(my.cluster.vector[d] == 2) sequ2[i2] <- sequ2[i2]+1 + + if(my.cluster.vector[d] != 3 && my.cluster.vector[d+1] == 3) i3 <- i3+1 + if(my.cluster.vector[d] == 3) sequ3[i3] <- sequ3[i3]+1 + + if(my.cluster.vector[d] != 4 && my.cluster.vector[d+1] == 4) i4 <- i4+1 + if(my.cluster.vector[d] == 4) sequ4[i4] <- sequ4[i4]+1 + } + + if(my.cluster.vector[n.dd] == 1) sequ1[i1] <- sequ1[i1]+1 + if(my.cluster.vector[n.dd] == 2) sequ2[i2] <- sequ2[i2]+1 + if(my.cluster.vector[n.dd] == 3) sequ3[i3] <- sequ3[i3]+1 + if(my.cluster.vector[n.dd] == 4) sequ4[i4] <- sequ4[i4]+1 + + persObs1 <- mean(sequ1[which(sequ1 != 0)]) + persObs2 <- mean(sequ2[which(sequ2 != 0)]) + persObs3 <- mean(sequ3[which(sequ3 != 0)]) + persObs4 <- mean(sequ4[which(sequ4 != 0)]) + + # insert here all the manual corrections to the regime assignation due to misasignment in the automatic selection: + # forecast month: September + if(p == 9 && lead.month == 0) { cluster1.name <- "Blocking"; cluster2.name <- "Atl.Ridge"; cluster3.name <- "NAO-"; cluster4.name <- "NAO+"} + if(p == 8 && lead.month == 1) { cluster1.name <- "NAO+"; cluster2.name <- "NAO-"; cluster3.name <- "Blocking"; cluster4.name <- "Atl.Ridge"} + if(p == 6 && lead.month == 3) { cluster1.name <- "Atl.Ridge"; cluster2.name <- "NAO-"} + if(p == 4 && lead.month == 5) { cluster1.name <- "Blocking"; cluster2.name <- "NAO+"; cluster3.name <- "Atl.Ridge"; cluster4.name <- "NAO-"} + + # forecast month: October + if(p == 4 && lead.month == 6) { cluster1.name <- "Atl.Ridge"; cluster2.name <- "Blocking"; cluster3.name <- "NAO+"; cluster4.name <- "NAO-"} + if(p == 5 && lead.month == 5) { cluster1.name <- "NAO+"; cluster2.name <- "Blocking"; cluster3.name <- "NAO-"; cluster4.name <- "Atl.Ridge"} + if(p == 7 && lead.month == 3) { cluster1.name <- "NAO-"; cluster2.name <- "Atl.Ridge"; cluster3.name <- "Blocking"; cluster4.name <- "NAO+"} + if(p == 8 && lead.month == 2) { cluster1.name <- "Blocking"; cluster2.name <- "NAO-"; cluster3.name <- "Atl.Ridge"; cluster4.name <- "NAO+"} + if(p == 9 && lead.month == 1) { cluster1.name <- "NAO-"; cluster2.name <- "Blocking"; cluster3.name <- "Atl.Ridge"; cluster4.name <- "NAO+"} + + # forecast month: November + if(p == 6 && lead.month == 5) { cluster2.name <- "Atl.Ridge"; cluster3.name <- "NAO+"; cluster4.name <- "Blocking"} + if(p == 7 && lead.month == 4) { cluster1.name <- "NAO+"; cluster2.name <- "Blocking"; cluster4.name <- "Atl.Ridge"} + if(p == 8 && lead.month == 3) { cluster2.name <- "Blocking"; cluster4.name <- "Atl.Ridge"} + if(p == 9 && lead.month == 2) { cluster2.name <- "Atl.Ridge"; cluster3.name <- "NAO+"; cluster4.name <- "Blocking"} + if(p == 10 && lead.month == 1) { cluster2.name <- "Blocking"; cluster4.name <- "Atl.Ridge"} + + regimes.sim2 <- c(cluster1.name, cluster2.name, cluster3.name, cluster4.name) # Simulated regimes after the manual correction + #regimes.obs <- c(cluster1.name, cluster2.name, cluster3.name, cluster4.name) # already defined above, included only as reference + + order.sim <- match(regimes.sim2, orden) + order.obs <- match(regimes.obs, orden) + + clusters.sim <- list(cluster1.sim, cluster2.sim, cluster3.sim, cluster4.sim) + clusters.obs <- list(pslwr1mean, pslwr2mean, pslwr3mean, pslwr4mean) + + # recompute the spatial correlations, because they might have changed because of the manual corrections above: + spat.cor1 <- cor(as.vector(clusters.sim[[which(order.sim == 1)]]), as.vector(clusters.obs[[which(order.obs == 1)]])) + spat.cor2 <- cor(as.vector(clusters.sim[[which(order.sim == 2)]]), as.vector(clusters.obs[[which(order.obs == 2)]])) + spat.cor3 <- cor(as.vector(clusters.sim[[which(order.sim == 3)]]), as.vector(clusters.obs[[which(order.obs == 3)]])) + spat.cor4 <- cor(as.vector(clusters.sim[[which(order.sim == 4)]]), as.vector(clusters.obs[[which(order.obs == 4)]])) + + load(file=paste0(workdir,"/",fields.name,"_",my.period[p],"_leadmonth_",lead.month,"_","psl",".RData")) # reload forecast cluster time series and mean psl data + #load(file=paste0(workdir,"/",fields.name,"_",my.period[p],"_leadmonth_",lead.month,"_ClusterData.RData")) # reload forecast cluster data (for my.cluster.array) + if(var.num != var.num.orig) var.num <- var.num.orig # ripristinate original values of this script + if(fields.name != fields.name.orig) fields.name <- fields.name.orig # ripristinate original values of this script + if(!identical(period.orig, period)) period <- period.orig + if(p.orig != p) p <- p.orig + if(identical(rev(lat),lat.forecast)) lat <- rev(lat) # ripristinate right lat order + if(workdir != workdir.orig) workdir <- workdir.orig # ripristinate original values of this script + + } # close for on 'forecast.name' + + if(fields.name == rean.name || (fields.name == forecast.name && n.map == 7)){ + if(save.names){ + save(cluster1.name, cluster2.name, cluster3.name, cluster4.name, file=paste0(workdir,"/",fields.name,"_", my.period[p],"_","ClusterNames",".RData")) + + } else { # in this case, we load the cluster names from the file already saved, if regimes come from a reanalysis: + load(paste0(rean.dir,"/",rean.name,"_", my.period[p],"_","ClusterNames",".RData")) + + if(var.num != var.num.orig) var.num <- var.num.orig # ripristinate original values of this script + if(fields.name != fields.name.orig) fields.name <- fields.name.orig # ripristinate original values of this script + } + } + + # assign to each cluster its regime: + if(ordering == FALSE && (fields.name == rean.name || (fields.name == forecast.name && n.map == 7))){ + cluster1 <- 1; cluster2 <- 2; cluster3 <- 3; cluster4 <- 4 + } else { + cluster1 <- which(orden == cluster1.name) + cluster2 <- which(orden == cluster2.name) + cluster3 <- which(orden == cluster3.name) + cluster4 <- which(orden == cluster4.name) + } + + assign(paste0("map",cluster1), pslwr1mean) + assign(paste0("map",cluster2), pslwr2mean) + assign(paste0("map",cluster3), pslwr3mean) + assign(paste0("map",cluster4), pslwr4mean) + + assign(paste0("fre",cluster1), wr1y) + assign(paste0("fre",cluster2), wr2y) + assign(paste0("fre",cluster3), wr3y) + assign(paste0("fre",cluster4), wr4y) + + if(composition == "impact"){ + assign(paste0("imp",cluster1), varwr1mean) + assign(paste0("imp",cluster2), varwr2mean) + assign(paste0("imp",cluster3), varwr3mean) + assign(paste0("imp",cluster4), varwr4mean) + + if(fields.name == "rean.name"){ + assign(paste0("sig",cluster1), pvalue1) + assign(paste0("sig",cluster2), pvalue2) + assign(paste0("sig",cluster3), pvalue3) + assign(paste0("sig",cluster4), pvalue4) + } + } + + if(fields.name == forecast.name && n.map < 7){ + assign(paste0("freMax",cluster1), wr1yMax) + assign(paste0("freMax",cluster2), wr2yMax) + assign(paste0("freMax",cluster3), wr3yMax) + assign(paste0("freMax",cluster4), wr4yMax) + + assign(paste0("freMin",cluster1), wr1yMin) + assign(paste0("freMin",cluster2), wr2yMin) + assign(paste0("freMin",cluster3), wr3yMin) + assign(paste0("freMin",cluster4), wr4yMin) + + #assign(paste0("spatial.cor",cluster1), spat.cor1) + #assign(paste0("spatial.cor",cluster2), spat.cor2) + #assign(paste0("spatial.cor",cluster3), spat.cor3) + #assign(paste0("spatial.cor",cluster4), spat.cor4) + + assign(paste0("persist",cluster1), pers1) + assign(paste0("persist",cluster2), pers2) + assign(paste0("persist",cluster3), pers3) + assign(paste0("persist",cluster4), pers4) + + clusters.all <- c(cluster1, cluster2, cluster3, cluster4) + ordered.sequence <- c(which(clusters.all == 1), which(clusters.all == 2), which(clusters.all == 3), which(clusters.all == 4)) + + assign(paste0("transSim",cluster1), unname(trans.sim[1,ordered.sequence])) + assign(paste0("transSim",cluster2), unname(trans.sim[2,ordered.sequence])) + assign(paste0("transSim",cluster3), unname(trans.sim[3,ordered.sequence])) + assign(paste0("transSim",cluster4), unname(trans.sim[4,ordered.sequence])) + + cluster1.obs <- which(orden == regimes.obs[1]) + cluster2.obs <- which(orden == regimes.obs[2]) + cluster3.obs <- which(orden == regimes.obs[3]) + cluster4.obs <- which(orden == regimes.obs[4]) + + clusters.all.obs <- c(cluster1.obs, cluster2.obs, cluster3.obs, cluster4.obs) + ordered.sequence.obs <- c(which(clusters.all.obs == 1), which(clusters.all.obs == 2), which(clusters.all.obs == 3), which(clusters.all.obs == 4)) + + assign(paste0("freObs",cluster1.obs), wr1y.obs) + assign(paste0("freObs",cluster2.obs), wr2y.obs) + assign(paste0("freObs",cluster3.obs), wr3y.obs) + assign(paste0("freObs",cluster4.obs), wr4y.obs) + + assign(paste0("persistObs",cluster1.obs), persObs1) + assign(paste0("persistObs",cluster2.obs), persObs2) + assign(paste0("persistObs",cluster3.obs), persObs3) + assign(paste0("persistObs",cluster4.obs), persObs4) + + assign(paste0("transObs",cluster1.obs), unname(trans.obs[1,ordered.sequence.obs])) + assign(paste0("transObs",cluster2.obs), unname(trans.obs[2,ordered.sequence.obs])) + assign(paste0("transObs",cluster3.obs), unname(trans.obs[3,ordered.sequence.obs])) + assign(paste0("transObs",cluster4.obs), unname(trans.obs[4,ordered.sequence.obs])) + + } + + # position of long values of Europe only (without the Atlantic Sea and America): + #if(fields.name == "ERA-Interim") EU <- c(1:which(lon >= lon.max)[1], (length(lon)-30):length(lon)) # restrict area to continental europe only + EU <- c(1:which(lon >= lon.max)[1], (which(lon >= 337)[1]:length(lon))) # restrict area to continental europe only + + # breaks and colors of the geopotential fields: + if(psl == "g500"){ + #my.brks <- c(-300,-199:200,300) # % Mean anomaly of a WR + my.brks <- c(-300,seq(-200,200,10),300) # % Mean anomaly of a WR + my.brks2 <- c(-300,seq(-200,200,10),300) # % Mean anomaly of a WR for the contour lines + #my.cols <- colorRampPalette((brewer.pal(9,"PuBu")))(length(my.brks)-1) + values.to.plot <- c(-200, -100, 0, 100, 200) # values we want to show in the labels + } + + if(psl == "psl"){ + my.brks <- c(seq(-19,-1,2),0,seq(1,19,2)) # % Mean anomaly of a WR + # my.brks <- c(seq(-19,-1,2),0,seq(1,19,2)) # % Mean anomaly of a WR + + my.brks2 <- my.brks #c(seq(-20,20,2)) # % Mean anomaly of a WR for the contour lines + values.to.plot <- my.brks #c(-17,-15,-13,-11,-9,-7,-5,-3,-1,0,1,3,5,7,9,11,13,15,17) + } + + my.cols <- colorRampPalette(rev(brewer.pal(11,"RdBu")))(length(my.brks)-1) # blue--white--red colors + my.cols[floor(length(my.cols)/2)] <- my.cols[floor(length(my.cols)/2)+1] <- "white" + + # breaks and colors of the impact maps (valid both for Wind speed anomalies and Temperature anomalies): + #my.brks.var <- c(-20,seq(-10,-3,1),seq(-2.9,2.9,0.1),seq(3,10,1),20) # % Mean anomaly of a WR + #my.brks.var <- c(-20,-2,-1.5,-1,-0.5,-0.2,0.2,0.5,1,1.5,2,20) # % Mean anomaly of a WR + #my.brks.var <- c(-20,seq(-3,3,0.5),20) # % Mean anomaly of a WR + if(var.name[var.num] == "sfcWind") { + my.brks.var <- seq(-3,3,0.5) + values.to.plot2 <- c(-3,-2,-1,0,1,2,3) + } + if(var.name[var.num] == "tas") { + my.brks.var <- seq(-5,5,1) + values.to.plot2 <- my.brks.var #c(-5,-3,-1,0,1,3,5) + } + + #my.cols <- colorRampPalette(as.character(read.csv("/home/Earth/vtorralb/rgbhex.csv",header=F)[,1]))(length(my.brks)-1) # blue--yellow-red colors + my.cols.var <- colorRampPalette(rev(brewer.pal(11,"RdBu")))(length(my.brks.var)-1) # blue--white--red colors + #my.cols.var[floor(length(my.cols.var)/2)] <- my.cols.var[floor(length(my.cols.var)/2)+1] <- "white" + + if(fields.name == forecast.name){ + pos.years.present <- match(year.start:year.end, years) + years.missing <- which(is.na(pos.years.present)) + fre1.NA <- fre2.NA <- fre3.NA <- fre4.NA <- freMax1.NA <- freMax2.NA <- freMax3.NA <- freMax4.NA <- freMin1.NA <- freMin2.NA <- freMin3.NA <- freMin4.NA <- c() + freq.max=100 + + k <- 0 + for(y in year.start:year.end) { + if(y %in% (years.missing + year.start - 1)) { + fre1.NA <- c(fre1.NA,NA); fre2.NA <- c(fre2.NA,NA); fre3.NA <- c(fre3.NA,NA); fre4.NA <- c(fre4.NA,NA) + freMax1.NA <- c(freMax1.NA,NA); freMax2.NA <- c(freMax2.NA,NA); freMax3.NA <- c(freMax3.NA,NA); freMax4.NA <- c(freMax4.NA,NA) + freMin1.NA <- c(freMin1.NA,NA); freMin2.NA <- c(freMin2.NA,NA); freMin3.NA <- c(freMin3.NA,NA); freMin4.NA <- c(freMin4.NA,NA) + k=k+1 + } else { + fre1.NA <- c(fre1.NA,fre1[y - year.start + 1 - k]); fre2.NA <- c(fre2.NA,fre2[y - year.start + 1 - k]) + fre3.NA <- c(fre3.NA,fre3[y - year.start + 1 - k]); fre4.NA <- c(fre4.NA,fre4[y - year.start + 1 - k]) + freMax1.NA <- c(freMax1.NA,freMax1[y - year.start + 1 - k]); freMax2.NA <- c(freMax2.NA,freMax2[y - year.start + 1 - k]) + freMax3.NA <- c(freMax3.NA,freMax3[y - year.start + 1 - k]); freMax4.NA <- c(freMax4.NA,freMax4[y - year.start + 1 - k]) + freMin1.NA <- c(freMin1.NA,freMin1[y - year.start + 1 - k]); freMin2.NA <- c(freMin2.NA,freMin2[y - year.start + 1 - k]) + freMin3.NA <- c(freMin3.NA,freMin3[y - year.start + 1 - k]); freMin4.NA <- c(freMin4.NA,freMin4[y - year.start + 1 - k]) + } + } + } else { fre1.NA <- fre1; fre2.NA <- fre2; fre3.NA <- fre3; fre4.NA <- fre4 } + + sp.cor1 <- round(cor(fre1.NA,freObs1, use="complete.obs"),2) + sp.cor2 <- round(cor(fre2.NA,freObs2, use="complete.obs"),2) + sp.cor3 <- round(cor(fre3.NA,freObs3, use="complete.obs"),2) + sp.cor4 <- round(cor(fre4.NA,freObs4, use="complete.obs"),2) + + # Visualize the composition with the 3 graphs for all the 4 regimes: its pressure fields, its impact on var and its frequency series: + if(composition == "simple"){ + if(!as.pdf && fields.name == rean.name) png(filename=paste0(workdir,"/",fields.name,"_",my.period[p],"_",var.name[var.num],".png"),width=3000,height=3700) + if(!as.pdf && fields.name == forecast.name) png(filename=paste0(workdir,"/",fields.name,"_",my.period[p],"_leadmonth_",lead.month,"_",var.name[var.num],".png"),width=3000,height=3700) + + plot.new() + + # Sheet title: + par(fig=c(0, 1, 0.94, 0.98), new=TRUE) + if(fields.name == forecast.name) {forecast.title <- paste0(" startdate and ",lead.month," forecast month ")} else {forecast.title <- ""} + mtext(paste0(fields.name, " Weather Regimes for ", my.period[p], forecast.title," (",year.start,"-",year.end,")"), cex=5.5, font=2) + + # Centroid maps: + map.xpos <- 0 + map.width <- 0.46 + par(fig=c(map.xpos + 0.003, map.xpos + map.width - 0.003, 0.745, 0.925), new=TRUE) + PlotEquiMap2(rescale(map1,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map1), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, xlabel.dist=1.5, contours.lty="F1FF1F") + par(fig=c(map.xpos, map.xpos + map.width, 0.51, 0.69), new=TRUE) + PlotEquiMap2(rescale(map2,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map2), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F") + par(fig=c(map.xpos, map.xpos + map.width, 0.275, 0.455), new=TRUE) + PlotEquiMap2(rescale(map3,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map3), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F") + par(fig=c(map.xpos, map.xpos + map.width, 0.04, 0.22), new=TRUE) + PlotEquiMap2(rescale(map4,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map4), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F") + + # Subtitle centroid maps: + map.title.xpos <- 0.76 + map.title.width <- 0.2 + par(fig=c(map.title.xpos, map.title.xpos + map.title.width, 0.728, 0.729), new=TRUE) + mtext("year", cex=3) + par(fig=c(map.title.xpos, map.title.xpos + map.title.width, 0.494, 0.495), new=TRUE) + mtext("year", cex=3) + par(fig=c(map.title.xpos, map.title.xpos + map.title.width, 0.260, 0.261), new=TRUE) + mtext("year", cex=3) + par(fig=c(map.title.xpos, map.title.xpos + map.title.width, 0.026, 0.027), new=TRUE) + mtext("year", cex=3) + + # Title Centroid Maps: + title1.xpos <- 0.02 + title1.width <- 0.4 + par(fig=c(title1.xpos, title1.xpos + title1.width, 0.9305, 0.9355), new=TRUE) + mtext(paste0(regime1.name,": ", psl.name, " Anomaly"), font=2, cex=4) + par(fig=c(title1.xpos, title1.xpos + title1.width, 0.6955, 0.7005), new=TRUE) + mtext(paste0(regime2.name,": ", psl.name, " Anomaly"), font=2, cex=4) + par(fig=c(title1.xpos, title1.xpos + title1.width, 0.4605, 0.4655), new=TRUE) + mtext(paste0(regime3.name,": ", psl.name, " Anomaly"), font=2, cex=4) + par(fig=c(title1.xpos, title1.xpos + title1.width, 0.2255, 0.2305), new=TRUE) + mtext(paste0(regime4.name,": ", psl.name, " Anomaly"), font=2, cex=4) + + # Impact maps: + if(composition == "impact"){ # it won't enter here never because we are already inside an if con composition="simple" + impact.xpos <- 0.47 + impact.width <- 0.26 + par(fig=c(impact.xpos, impact.xpos + impact.width, 0.745, 0.925), new=TRUE) + PlotEquiMap2(rescale(imp1[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=t(sig1[,EU] < 0.05), cex.lab=1.5) + par(fig=c(impact.xpos, impact.xpos + impact.width, 0.51, 0.69), new=TRUE) + PlotEquiMap2(rescale(imp2[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=t(sig2[,EU] < 0.05), cex.lab=1.5) + par(fig=c(impact.xpos, impact.xpos + impact.width, 0.275, 0.455), new=TRUE) + PlotEquiMap2(rescale(imp3[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=t(sig3[,EU] < 0.05), cex.lab=1.5) + par(fig=c(impact.xpos, impact.xpos + impact.width, 0.04, 0.22), new=TRUE) + PlotEquiMap2(rescale(imp4[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=t(sig4[,EU] < 0.05), cex.lab=1.5) + + # Title impact maps: + title2.xpos <- 0.42 + title2.width <- 0.35 + par(fig=c(title2.xpos, title2.xpos + title2.width, 0.935, 0.940), new=TRUE) + mtext(paste0(regime1.name, ": Impact on ", var.name.full[var.num]), font=2, cex=4) + par(fig=c(title2.xpos, title2.xpos + title2.width, 0.700, 0.705), new=TRUE) + mtext(paste0(regime2.name, ": Impact on ", var.name.full[var.num]), font=2, cex=4) + par(fig=c(title2.xpos, title2.xpos + title2.width, 0.465, 0.470), new=TRUE) + mtext(paste0(regime3.name, ": Impact on ", var.name.full[var.num]), font=2, cex=4) + par(fig=c(title2.xpos, title2.xpos + title2.width, 0.230, 0.235), new=TRUE) + mtext(paste0(regime4.name, ": Impact on ", var.name.full[var.num]), font=2, cex=4) + } + + # Frequency plots: + barplot.xpos <- 0.75 + barplot.width <- 0.25 + + par(fig=c(barplot.xpos, barplot.xpos + barplot.width, 0.745, 0.915), new=TRUE) + if(fields.name == rean.name) barplot.freq(100*fre1.NA, year.start, year.end, cex.y=2, cex.x=2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0)) + if(fields.name == forecast.name) barplot.freq.sim2(100*fre1.NA, 100*freMax1.NA, 100*freMin1.NA, 100*freObs1, year.start, year.end, cex.y=2, cex.x=2, freq.min=-2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0), col.line="gray20", cex.r=3, cex.mean=2, cex.obs=2) + + par(fig=c(barplot.xpos, barplot.xpos + barplot.width,0.510,0.680), new=TRUE) + if(fields.name == rean.name) barplot.freq(100*fre2.NA, year.start, year.end, cex.y=2, cex.x=2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0)) + if(fields.name == forecast.name) barplot.freq.sim2(100*fre2.NA, 100*freMax2.NA, 100*freMin2.NA, 100*freObs2, year.start, year.end, cex.y=2, cex.x=2, freq.min=-2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0), col.line="gray20", cex.r=3, cex.mean=2, cex.obs=2) + + par(fig=c(barplot.xpos, barplot.xpos + barplot.width,0.275,0.445), new=TRUE) + if(fields.name == rean.name) barplot.freq(100*fre3.NA, year.start, year.end, cex.y=2, cex.x=2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0)) + if(fields.name == forecast.name) barplot.freq.sim2(100*fre3.NA, 100*freMax3.NA, 100*freMin3.NA, 100*freObs3, year.start, year.end, cex.y=2, cex.x=2, freq.min=-2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0), col.line="gray20", cex.r=3, cex.mean=2, cex.obs=2) + + par(fig=c(barplot.xpos, barplot.xpos + barplot.width,0.040,0.210), new=TRUE) + if(fields.name == rean.name) barplot.freq(100*fre4.NA, year.start, year.end, cex.y=2, cex.x=2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0)) + if(fields.name == forecast.name) barplot.freq.sim2(100*fre4.NA, 100*freMax4.NA, 100*freMin4.NA, 100*freObs4, year.start, year.end, cex.y=2, cex.x=2, freq.min=-2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0), col.line="gray20", cex.r=3, cex.mean=2, cex.obs=2) + + # Title Frequency maps: + title3.xpos <- 0.75 + title3.width <-0.25 + par(fig=c(title3.xpos, title3.xpos + title3.width, 0.935, 0.940), new=TRUE) + mtext(paste0(regime1.name, " Frequency"), font=2, cex=4) # paste0 doesn't work inside an expression! + par(fig=c(title3.xpos, title3.xpos + title3.width, 0.700, 0.705), new=TRUE) + mtext(paste0(regime2.name, " Frequency"), font=2, cex=4) + par(fig=c(title3.xpos, title3.xpos + title3.width, 0.465, 0.470), new=TRUE) + mtext(paste0(regime3.name, " Frequency"), font=2, cex=4) + par(fig=c(title3.xpos, title3.xpos + title3.width, 0.230, 0.235), new=TRUE) + mtext(paste0(regime4.name, " Frequency"), font=2, cex=4) + + # % on Frequency plots: + symbol.xpos <- 0.735 + symbol.width <- 0.01 + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.82, 0.83), new=TRUE) + mtext("%", cex=3.3) + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.59, 0.60), new=TRUE) + mtext("%", cex=3.3) + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.35, 0.36), new=TRUE) + mtext("%", cex=3.3) + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.12, 0.13), new=TRUE) + mtext("%", cex=3.3) + + # mean frequency on Frequency plots: + symbol.xpos <- 0.9 + symbol.width <- 0.1 + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.908, 0.918), new=TRUE) + mtext(bquote(bar(nu) == .(paste0(round(mean(100*fre1.NA),1),"%"))),cex=4.5) + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.673, 0.683), new=TRUE) + mtext(bquote(bar(nu) == .(paste0(round(mean(100*fre2.NA),1),"%"))),cex=4.5) + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.44, 0.45), new=TRUE) + mtext(bquote(bar(nu) == .(paste0(round(mean(100*fre3.NA),1),"%"))),cex=4.5) + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.203, 0.213), new=TRUE) + mtext(bquote(bar(nu) == .(paste0(round(mean(100*fre4.NA),1),"%"))),cex=4.5) + + # add corr between obs.time series and ensemble mean time series on Frequency plots: + if(fields.name == forecast.name){ + symbol.xpos <- 0.76 + symbol.width <- 0.1 + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.908, 0.918), new=TRUE) + sp.cor1 <- round(cor(fre1.NA,freObs1, use="complete.obs"),2) + mtext(paste0("r= ",sp.cor1), cex=4.5) + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.673, 0.683), new=TRUE) + sp.cor2 <- round(cor(fre2.NA,freObs2, use="complete.obs"),2) + mtext(paste0("r= ",sp.cor2), cex=4.5) + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.44, 0.45), new=TRUE) + sp.cor3 <- round(cor(fre3.NA,freObs3, use="complete.obs"),2) + mtext(paste0("r= ",sp.cor3), cex=4.5) + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.203, 0.213), new=TRUE) + sp.cor4 <- round(cor(fre4.NA,freObs4, use="complete.obs"),2) + mtext(paste0("r= ",sp.cor4), cex=4.5) + } + + # Legend 1: + legend1.xpos <- 0.10 + legend1.width <- 0.80 + legend1.cex <- 2 + my.subset <- match(values.to.plot, my.brks) + par(fig=c(legend1.xpos, legend1.xpos + legend1.width, 0.719, 0.744), new=TRUE) # syntax fig=c(xmin, xmax, ymin, ymax) + ColorBar3(my.brks, cols=my.cols, vert=FALSE, cex=legend1.cex, subset=my.subset) + if(psl=="g500") {mtext(side=4," m", cex=2.5)} else {mtext(side=4," hPa", cex=legend1.cex)} + par(fig=c(legend1.xpos, legend1.xpos + legend1.width, 0.485, 0.508), new=TRUE) + ColorBar3(my.brks, cols=my.cols, vert=FALSE, cex=legend1.cex, subset=my.subset) + if(psl=="g500") {mtext(side=4," m", cex=2.5)} else {mtext(side=4," hPa", cex=legend1.cex)} + par(fig=c(legend1.xpos, legend1.xpos + legend1.width, 0.250, 0.273), new=TRUE) + ColorBar3(my.brks, cols=my.cols, vert=FALSE, cex=legend1.cex, subset=my.subset) + if(psl=="g500") {mtext(side=4," m", cex=2.5)} else {mtext(side=4," hPa", cex=legend1.cex)} + par(fig=c(legend1.xpos, legend1.xpos + legend1.width, 0.015, 0.038), new=TRUE) + ColorBar3(my.brks, cols=my.cols, vert=FALSE, cex=legend1.cex, subset=my.subset) + if(psl=="g500") {mtext(side=4," m", cex=2.5)} else {mtext(side=4," hPa", cex=legend1.cex)} + + # Legend 2: + if(fields.name == rean.name){ + legend2.xpos <- 0.48 + legend2.width <- 0.25 + legend2.cex <- 1.8 + my.subset2 <- match(values.to.plot2, my.brks.var) + par(fig=c(legend2.xpos, legend2.xpos + legend2.width, 0.719 + 0.001, 0.744), new=TRUE) + ColorBar3(my.brks.var, cols=my.cols.var, vert=FALSE, cex=legend2.cex, subset=my.subset2) + mtext(side=4,paste0(" ",var.unit[var.num]), cex=legend2.cex) + par(fig=c(legend2.xpos, legend2.xpos + legend2.width, 0.485, 0.508), new=TRUE) + ColorBar3(my.brks.var, cols=my.cols.var, vert=FALSE, cex=legend2.cex, subset=my.subset2) + mtext(side=4,paste0(" ",var.unit[var.num]), cex=legend2.cex) + par(fig=c(legend2.xpos, legend2.xpos + legend2.width, 0.250, 0.273), new=TRUE) + ColorBar3(my.brks.var, cols=my.cols.var, vert=FALSE, cex=legend2.cex, subset=my.subset2) + mtext(side=4,paste0(" ",var.unit[var.num]), cex=legend2.cex) + par(fig=c(legend2.xpos, legend2.xpos + legend2.width, 0.015, 0.038), new=TRUE) + ColorBar3(my.brks.var, cols=my.cols.var, vert=FALSE, cex=legend2.cex, subset=my.subset2) + mtext(side=4,paste0(" ",var.unit[var.num]), cex=legend2.cex) + } + + if(!as.pdf) dev.off() # for saving 4 png + + } # close if on: composition == 'simple' + + + # Visualize the composition with the regime anomalies for a selected forecasted month: + if(composition == "psl"){ + # Centroid maps: + n.map <- n.map + 1 # n.maps starts from 0 + map.width <- 0.115 + map.xpos <- 0.06 + map.width * (n.map - 1) + map.ypos <- 0.08 + map.height <- 0.19 + map.ysep <- 0.015 + + par(fig=c(map.xpos, map.xpos + map.width, map.ypos + 3*map.height + 3*map.ysep, map.ypos + 4*map.height + 3*map.ysep), new=TRUE) + PlotEquiMap2(rescale(map1,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map1), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, xlabel.dist=1.5, contours.lty="F1FF1F") + par(fig=c(map.xpos, map.xpos + map.width, map.ypos + 2*map.height + 2*map.ysep, map.ypos + 3*map.height + 2*map.ysep), new=TRUE) + PlotEquiMap2(rescale(map2,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map2), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F") + par(fig=c(map.xpos, map.xpos + map.width, map.ypos + map.height + map.ysep, map.ypos + 2*map.height + map.ysep), new=TRUE) + PlotEquiMap2(rescale(map3,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map3), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F") + par(fig=c(map.xpos, map.xpos + map.width, map.ypos, map.ypos + map.height), new=TRUE) + PlotEquiMap2(rescale(map4,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map4), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F") + + # Sheet subtitles: + par(fig=c(map.xpos, map.xpos + map.width, 0.86, 0.90), new=TRUE) + if(n.map < 8) { + mtext(paste0(my.month.short[p], " --> ", my.month.short[forecast.month]), cex=5.5, font=2) + } else{ + mtext(paste0(rean.name," ", my.month.short[forecast.month]), cex=5.5, font=2) + } + + } # close if on composition == 'psl' + + # Visualize the composition if the impact maps for a selected forecasted month: + if(composition == "impact"){ + # Centroid maps: + n.map <- n.map + 1 # n.maps starts from 0 + map.width <- 0.115 + map.xpos <- 0.06 + map.width * (n.map - 1) + map.ypos <- 0.08 + map.height <- 0.19 + map.ysep <- 0.015 + + par(fig=c(map.xpos, map.xpos + map.width, map.ypos + 3*map.height + 3*map.ysep, map.ypos + 4*map.height + 3*map.ysep), new=TRUE) + PlotEquiMap2(rescale(imp1[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=t(sig1[,EU] < 0.05), cex.lab=1.5) + + par(fig=c(map.xpos, map.xpos + map.width, map.ypos + 2*map.height + 2*map.ysep, map.ypos + 3*map.height + 2*map.ysep), new=TRUE) + PlotEquiMap2(rescale(imp2[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=t(sig2[,EU] < 0.05), cex.lab=1.5) + + par(fig=c(map.xpos, map.xpos + map.width, map.ypos + map.height + map.ysep, map.ypos + 2*map.height + map.ysep), new=TRUE) + PlotEquiMap2(rescale(imp3[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=t(sig3[,EU] < 0.05), cex.lab=1.5) + + par(fig=c(map.xpos, map.xpos + map.width, map.ypos, map.ypos + map.height), new=TRUE) + PlotEquiMap2(rescale(imp4[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=t(sig4[,EU] < 0.05), cex.lab=1.5) + + + # Sheet subtitles: + par(fig=c(map.xpos, map.xpos + map.width, 0.86, 0.90), new=TRUE) + if(n.map < 8) { + mtext(paste0(my.month.short[p], " --> ", my.month.short[forecast.month]), cex=5.5, font=2) + } else{ + mtext(paste0(rean.name," ", my.month.short[forecast.month]), cex=5.5, font=2) + } + + } # close if on composition == 'psl' + + + # Visualize the composition with the interannual frequencies for a selected forecasted month: + if(composition == "fre"){ + # Centroid maps: + n.map <- n.map + 1 # n.maps starts from 0 + map.width <- 0.115 + map.xpos <- 0.06 + map.width * (n.map - 1) + map.ypos <- 0.08 + map.height <- 0.19 + map.ysep <- 0.015 + + par(fig=c(map.xpos, map.xpos + map.width, map.ypos + 3*map.height + 3*map.ysep, map.ypos + 4*map.height + 3*map.ysep), new=TRUE) + if(n.map < 8) barplot.freq.sim2(100*fre1.NA, 100*freMax1.NA, 100*freMin1.NA, 100*freObs1, year.start, year.end, cex.y=2, cex.x=2, freq.min=-2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0), col.line="gray20", cex.r=3, cex.mean=2, cex.obs=2) + if(n.map == 8) barplot.freq(100*fre1.NA, year.start, year.end, cex.y=2, cex.x=2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0)) + + par(fig=c(map.xpos, map.xpos + map.width, map.ypos + 2*map.height + 2*map.ysep, map.ypos + 3*map.height + 2*map.ysep), new=TRUE) + if(n.map < 8) if(fields.name == forecast.name) barplot.freq.sim2(100*fre2.NA, 100*freMax2.NA, 100*freMin2.NA, 100*freObs2, year.start, year.end, cex.y=2, cex.x=2, freq.min=-2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0), col.line="gray20", cex.r=3, cex.mean=2, cex.obs=2) + if(n.map == 8) barplot.freq(100*fre2.NA, year.start, year.end, cex.y=2, cex.x=2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0)) + + par(fig=c(map.xpos, map.xpos + map.width, map.ypos + map.height + map.ysep, map.ypos + 2*map.height + map.ysep), new=TRUE) + if(n.map < 8) if(fields.name == forecast.name) barplot.freq.sim2(100*fre3.NA, 100*freMax3.NA, 100*freMin3.NA, 100*freObs3, year.start, year.end, cex.y=2, cex.x=2, freq.min=-2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0), col.line="gray20", cex.r=3, cex.mean=2, cex.obs=2) + if(n.map == 8) barplot.freq(100*fre3.NA, year.start, year.end, cex.y=2, cex.x=2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0)) + + par(fig=c(map.xpos, map.xpos + map.width, map.ypos, map.ypos + map.height), new=TRUE) + if(n.map < 8) if(fields.name == forecast.name) barplot.freq.sim2(100*fre4.NA, 100*freMax4.NA, 100*freMin4.NA, 100*freObs4, year.start, year.end, cex.y=2, cex.x=2, freq.min=-2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0), col.line="gray20", cex.r=3, cex.mean=2, cex.obs=2) + if(n.map == 8) barplot.freq(100*fre4.NA, year.start, year.end, cex.y=2, cex.x=2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0)) + + # Sheet subtitles: + par(fig=c(map.xpos, map.xpos + map.width, 0.86, 0.90), new=TRUE) + if(n.map < 8) { + mtext(paste0(my.month.short[p], " --> ", my.month.short[forecast.month]), cex=5.5, font=2) + } else{ + mtext(paste0(rean.name," ", my.month.short[forecast.month]), cex=5.5, font=2) + } + + # % on Frequency plots: + par(fig=c(map.xpos - 0.006, map.xpos, map.ypos + 3.9*map.height + 3*map.ysep, map.ypos + 4*map.height + 3*map.ysep), new=TRUE) + mtext("%", cex=3.3) + par(fig=c(map.xpos - 0.006, map.xpos, map.ypos + 2.9*map.height + 2*map.ysep, map.ypos + 3*map.height + 2*map.ysep), new=TRUE) + mtext("%", cex=3.3) + par(fig=c(map.xpos - 0.006, map.xpos, map.ypos + 1.9*map.height + 1*map.ysep, map.ypos + 2*map.height + 1*map.ysep), new=TRUE) + mtext("%", cex=3.3) + par(fig=c(map.xpos - 0.006, map.xpos, map.ypos + 0.9*map.height + 0*map.ysep, map.ypos + 1*map.height + 0*map.ysep), new=TRUE) + mtext("%", cex=3.3) + + # mean frequency on Frequency plots: + par(fig=c(map.xpos + 0.02, map.xpos + 0.04, map.ypos + 3*map.height + 3*map.ysep, map.ypos + 3.1*map.height + 3*map.ysep), new=TRUE) + mtext(bquote(bar(nu) == .(paste0(round(mean(100*fre1.NA),1),"%"))),cex=3.5) + par(fig=c(map.xpos + 0.02, map.xpos + 0.04, map.ypos + 2*map.height + 2*map.ysep, map.ypos + 2.1*map.height + 2*map.ysep), new=TRUE) + mtext(bquote(bar(nu) == .(paste0(round(mean(100*fre2.NA),1),"%"))),cex=3.5) + par(fig=c(map.xpos + 0.02, map.xpos + 0.04, map.ypos + 1*map.height + 1*map.ysep, map.ypos + 1.1*map.height + 1*map.ysep), new=TRUE) + mtext(bquote(bar(nu) == .(paste0(round(mean(100*fre3.NA),1),"%"))),cex=3.5) + par(fig=c(map.xpos + 0.02, map.xpos + 0.04, map.ypos + 0*map.height + 0*map.ysep, map.ypos + 0.1*map.height + 0*map.ysep), new=TRUE) + mtext(bquote(bar(nu) == .(paste0(round(mean(100*fre4.NA),1),"%"))),cex=3.5) + + # add corr between obs.time series and ensemble mean time series on Frequency plots: + if(n.map < 8){ + par(fig=c(map.xpos + map.width - 0.04, map.xpos + map.width - 0.02, map.ypos + 3*map.height + 3*map.ysep, map.ypos + 3.1*map.height + 3*map.ysep), new=TRUE) + mtext(paste0("r= ",sp.cor1), cex=3.5) + par(fig=c(map.xpos + map.width - 0.04, map.xpos + map.width - 0.02, map.ypos + 2*map.height + 2*map.ysep, map.ypos + 2.1*map.height + 2*map.ysep), new=TRUE) + mtext(paste0("r= ",sp.cor2), cex=3.5) + par(fig=c(map.xpos + map.width - 0.04, map.xpos + map.width - 0.02, map.ypos + 1*map.height + 1*map.ysep, map.ypos + 1.1*map.height + 1*map.ysep), new=TRUE) + mtext(paste0("r= ",sp.cor3), cex=3.5) + par(fig=c(map.xpos + map.width - 0.04, map.xpos + map.width - 0.02, map.ypos + 0*map.height + 0*map.ysep, map.ypos + 0.1*map.height + 0*map.ysep), new=TRUE) + mtext(paste0("r= ",sp.cor4), cex=3.5) + } + } # close if on composition == 'fre' + + # just to plot the ERA-Interim running cluster monthly maps: + map1 <- pslwr1mean + map2 <- pslwr2mean + map3 <- pslwr3mean + map4 <- pslwr4mean + png(filename=paste0(workdir,"/",fields.name,"_",my.period[p],"_cluster1.png"),width=3000,height=3700) + PlotEquiMap2(rescale(map1,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map1), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, xlabel.dist=1.5, contours.lty="F1FF1F") + dev.off() + png(filename=paste0(workdir,"/",fields.name,"_",my.period[p],"_cluster2.png"),width=3000,height=3700) + PlotEquiMap2(rescale(map2,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map2), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F") + dev.off() + png(filename=paste0(workdir,"/",fields.name,"_",my.period[p],"_cluster3.png"),width=3000,height=3700) + PlotEquiMap2(rescale(map3,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map3), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F") + dev.off() + png(filename=paste0(workdir,"/",fields.name,"_",my.period[p],"_cluster4.png"),width=3000,height=3700) + PlotEquiMap2(rescale(map4,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map4), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F") + dev.off() + + + # save indicators for each startdate and forecast time: + # (fre1, fre2, etc. already refer to the regimes in the same order as listed in the 'orden' vector) + if(fields.name == forecast.name) { + if (composition != "impact") { + save(orden, fre1.NA, fre2.NA, fre3.NA, fre4.NA, freObs1, freObs2, freObs3, freObs4, sp.cor1, sp.cor2, sp.cor3, sp.cor4, persist1, persist2, persist3, persist4, persistObs1, persistObs2, persistObs3, persistObs4, spat.cor1, spat.cor2, spat.cor3, spat.cor4, sd.freq.sim1, sd.freq.sim2, sd.freq.sim3, sd.freq.sim4, sd.freq.obs1, sd.freq.obs2, sd.freq.obs3, sd.freq.obs4, transSim1, transSim2, transSim3, transSim4, transObs1, transObs2, transObs3, transObs4, file=paste0(workdir,"/",fields.name,"_", my.period[p],"_leadmonth_",lead.month,"_","ClusterNames",".RData")) + } else { + save(orden, fre1.NA, fre2.NA, fre3.NA, fre4.NA, freObs1, freObs2, freObs3, freObs4, sp.cor1, sp.cor2, sp.cor3, sp.cor4, persist1, persist2, persist3, persist4, persistObs1, persistObs2, persistObs3, persistObs4, spat.cor1, spat.cor2, spat.cor3, spat.cor4, sd.freq.sim1, sd.freq.sim2, sd.freq.sim3, sd.freq.sim4, sd.freq.obs1, sd.freq.obs2, sd.freq.obs3, sd.freq.obs4, transSim1, transSim2, transSim3, transSim4, transObs1, transObs2, transObs3, transObs4, imp1, imp2, imp3, imp4, file=paste0(workdir,"/",fields.name,"_", my.period[p],"_leadmonth_",lead.month,"_","ClusterNames",".RData")) + } + } + + print(paste("p =",p,"lead.month =", lead.month)) # spat.cor1,spat.cor2,spat.cor3,spat.cor4)) + } # close 'p' on 'period' + + if(as.pdf) dev.off() # for saving a single pdf with all months/seasons + + } # close 'lead.month' on 'lead.months' + + if(composition == "psl" || composition == "fre"){ + # Regime's names: + title1.xpos <- 0.01 + title1.width <- 0.04 + par(fig=c(title1.xpos, title1.xpos + title1.width, 0.7905, 0.7955), new=TRUE) + mtext(paste0(regime1.name), font=2, cex=5) + par(fig=c(title1.xpos, title1.xpos + title1.width, 0.5855, 0.5905), new=TRUE) + mtext(paste0(regime2.name), font=2, cex=5) + par(fig=c(title1.xpos, title1.xpos + title1.width, 0.3805, 0.3955), new=TRUE) + mtext(paste0(regime3.name), font=2, cex=5) + par(fig=c(title1.xpos, title1.xpos + title1.width, 0.1755, 0.1805), new=TRUE) + mtext(paste0(regime4.name), font=2, cex=5) + + if(composition == "psl") { + # Color legend: + legend1.xpos <- 0.10 + legend1.width <- 0.80 + legend1.cex <- 4 + + par(fig=c(legend1.xpos, legend1.xpos + legend1.width, 0, 0.065), new=TRUE) # syntax fig=c(xmin, xmax, ymin, ymax) + ColorBar2(brks=my.brks, cols=my.cols, vert=FALSE, cex=legend1.cex, label.dist=2, my.ticks=-0.5 + 1:21, my.labels=my.brks) + par(fig=c(legend1.xpos + legend1.width - 0.02, legend1.xpos + legend1.width + 0.05, 0, 0.02), new=TRUE) # syntax fig=c(xmin, xmax, ymin, ymax) + mtext("hPa", cex=legend1.cex*1.3) + } + + dev.off() + } # close if on composition + +} # close if on composition != "summary" + +# Summary graphs: +if(composition == "summary" && fields.name == forecast.name){ + # array storing correlations in the format: [startdate, leadmonth, regime] + array.diff.sd.freq <- array.sd.freq <- array.cor <- array.sp.cor <- array.freq <- array.diff.freq <- array.rpss <- array.pers <- array.diff.pers <- array(NA,c(12,7,4)) + array.sp.cor <- array.trans.sim1 <- array.trans.sim2 <- array.trans.sim3 <- array.trans.sim4 <- array(NA,c(12,7,4)) + array.trans.diff1 <- array.trans.diff2 <- array.trans.diff3 <- array.trans.diff4 <- array(NA,c(12,7,4)) + + for(p in 1:12){ + p.orig <- p + + for(l in 0:6){ + + load(file=paste0(workdir,"/",fields.name,"_",my.period[p],"_leadmonth_",l,"_","ClusterNames",".RData")) # load cluster names and summarized data + + if(var.num != var.num.orig) var.num <- var.num.orig # ripristinate original values of this script (necessary after loading regime data) + if(fields.name != fields.name.orig) fields.name <- fields.name.orig # ripristinate original values of this script + if(p != p.orig) p <- p.orig + + array.sp.cor[p,1+l, 1] <- spat.cor1 # associates NAO+ to the first triangle (at left) + array.sp.cor[p,1+l, 3] <- spat.cor2 # associates NAO- to the third triangle (at right) + array.sp.cor[p,1+l, 4] <- spat.cor3 # associates Blocking to the fourth triangle (top one) + array.sp.cor[p,1+l, 2] <- spat.cor4 # associates Atl.Ridge to the second triangle (bottom one) + + array.cor[p,1+l, 1] <- sp.cor1 # associates NAO+ to the first triangle (at left) + array.cor[p,1+l, 3] <- sp.cor2 # associates NAO- to the third triangle (at right) + array.cor[p,1+l, 4] <- sp.cor3 # associates Blocking to the fourth triangle (top one) + array.cor[p,1+l, 2] <- sp.cor4 # associates Atl.Ridge to the second triangle (bottom one) + + array.freq[p,1+l, 1] <- 100*(mean(fre1.NA)) # NAO+ + array.freq[p,1+l, 3] <- 100*(mean(fre2.NA)) # NAO- + array.freq[p,1+l, 4] <- 100*(mean(fre3.NA)) # Blocking + array.freq[p,1+l, 2] <- 100*(mean(fre4.NA)) # Atl.Ridge + + array.diff.freq[p,1+l, 1] <- 100*(mean(fre1.NA) - mean(freObs1)) # NAO+ + array.diff.freq[p,1+l, 3] <- 100*(mean(fre2.NA) - mean(freObs2)) # NAO- + array.diff.freq[p,1+l, 4] <- 100*(mean(fre3.NA) - mean(freObs3)) # Blocking + array.diff.freq[p,1+l, 2] <- 100*(mean(fre4.NA) - mean(freObs4)) # Atl.Ridge + + array.pers[p,1+l, 1] <- persist1 # NAO+ + array.pers[p,1+l, 3] <- persist2 # NAO- + array.pers[p,1+l, 4] <- persist3 # Blocking + array.pers[p,1+l, 2] <- persist4 # Atl.Ridge + + array.diff.pers[p,1+l, 1] <- persist1 - persistObs1 # NAO+ + array.diff.pers[p,1+l, 3] <- persist2 - persistObs2 # NAO- + array.diff.pers[p,1+l, 4] <- persist3 - persistObs3 # Blocking + array.diff.pers[p,1+l, 2] <- persist4 - persistObs4 # Atl.Ridge + + array.pers[p,1+l, 1] <- persist1 # NAO+ + array.pers[p,1+l, 3] <- persist2 # NAO- + array.pers[p,1+l, 4] <- persist3 # Blocking + array.pers[p,1+l, 2] <- persist4 # Atl.Ridge + + array.sd.freq[p,1+l, 1] <- sd.freq.sim1 # NAO+ + array.sd.freq[p,1+l, 3] <- sd.freq.sim2 # NAO- + array.sd.freq[p,1+l, 4] <- sd.freq.sim3 # Blocking + array.sd.freq[p,1+l, 2] <- sd.freq.sim4 # Atl.Ridge + + array.diff.sd.freq[p,1+l, 1] <- sd.freq.sim1 / sd.freq.obs1 # NAO+ + array.diff.sd.freq[p,1+l, 3] <- sd.freq.sim2 / sd.freq.obs2 # NAO- + array.diff.sd.freq[p,1+l, 4] <- sd.freq.sim3 / sd.freq.obs3 # Blocking + array.diff.sd.freq[p,1+l, 2] <- sd.freq.sim4 / sd.freq.obs4 # Atl.Ridge + + array.trans.sim1[p,1+l, 1] <- NA # Transition from NAO+ to NAO+ + array.trans.sim1[p,1+l, 3] <- 100*transSim1[2] # Transition from NAO+ to NAO- + array.trans.sim1[p,1+l, 4] <- 100*transSim1[3] # Transition from NAO+ to blocking + array.trans.sim1[p,1+l, 2] <- 100*transSim1[4] # Transition from NAO+ to Atl.Ridge + + array.trans.sim2[p,1+l, 1] <- 100*transSim2[1] # Transition from NAO- to NAO+ + array.trans.sim2[p,1+l, 3] <- NA # Transition from NAO- to NAO- + array.trans.sim2[p,1+l, 4] <- 100*transSim2[3] # Transition from NAO- to blocking + array.trans.sim2[p,1+l, 2] <- 100*transSim2[4] # Transition from NAO- to Atl.Ridge + + array.trans.sim3[p,1+l, 1] <- 100*transSim3[1] # Transition from blocking to NAO+ + array.trans.sim3[p,1+l, 3] <- 100*transSim3[2] # Transition from blocking to NAO- + array.trans.sim3[p,1+l, 4] <- NA # Transition from blocking to blocking + array.trans.sim3[p,1+l, 2] <- 100*transSim3[4] # Transition from blocking to Atl.Ridge + + array.trans.sim4[p,1+l, 1] <- 100*transSim4[1] # Transition from Atl.ridge to NAO+ + array.trans.sim4[p,1+l, 3] <- 100*transSim4[2] # Transition from Atl.ridge to NAO- + array.trans.sim4[p,1+l, 4] <- 100*transSim4[3] # Transition from Atl.ridge to blocking + array.trans.sim4[p,1+l, 2] <- NA # Transition from Atl.ridge to Atl.Ridge + + array.trans.diff1[p,1+l, 1] <- NA # Transition from NAO+ to NAO+ + array.trans.diff1[p,1+l, 3] <- 100*(transSim1[2] - transObs1[2]) # Transition from NAO+ to NAO- + array.trans.diff1[p,1+l, 4] <- 100*(transSim1[3] - transObs1[3]) # Transition from NAO+ to blocking + array.trans.diff1[p,1+l, 2] <- 100*(transSim1[4] - transObs1[4]) # Transition from NAO+ to Atl.Ridge + + array.trans.diff2[p,1+l, 1] <- 100*(transSim2[1] - transObs2[1]) # Transition from NAO+ to NAO+ + array.trans.diff2[p,1+l, 3] <- NA + array.trans.diff2[p,1+l, 4] <- 100*(transSim2[3] - transObs2[3]) # Transition from NAO+ to blocking + array.trans.diff2[p,1+l, 2] <- 100*(transSim2[4] - transObs2[4]) # Transition from NAO+ to Atl.Ridge + + array.trans.diff3[p,1+l, 1] <- 100*(transSim3[1] - transObs3[1]) # Transition from NAO+ to NAO+ + array.trans.diff3[p,1+l, 3] <- 100*(transSim3[2] - transObs3[2]) # Transition from NAO+ to NAO- + array.trans.diff3[p,1+l, 4] <- NA + array.trans.diff3[p,1+l, 2] <- 100*(transSim3[4] - transObs3[4]) # Transition from NAO+ to Atl.Ridge + + array.trans.diff4[p,1+l, 1] <- 100*(transSim4[1] - transObs4[1]) # Transition from NAO+ to NAO+ + array.trans.diff4[p,1+l, 3] <- 100*(transSim4[2] - transObs4[2]) # Transition from NAO+ to NAO- + array.trans.diff4[p,1+l, 4] <- 100*(transSim4[3] - transObs4[3]) # Transition from NAO+ to blocking + array.trans.diff4[p,1+l, 2] <- NA + + #array.rpss[p,1+l, 1] <- # NAO+ + #array.rpss[p,1+l, 3] <- # NAO- + #array.rpss[p,1+l, 4] <- # Blocking + #array.rpss[p,1+l, 2] <- # Atl.Ridge + } + } + + # Spatial Corr summary: + png(filename=paste0(workdir,"/",forecast.name,"_summary_sp_corr.png"), width=550, height=400) + + # create an array similar to array.cor but with colors instead of corr.values: + sp.corr.cols <- rev(c('#b2182b','#d6604d','#f4a582','#fddbc7','#d1e5f0','#92c5de','#4393c3','#2166ac')) + my.seq <- c(-1,0,0.4,0.5,0.6,0.7,0.8,0.9,1) + + array.sp.cor.colors <- array(sp.corr.cols[8],c(12,7,4)) + array.sp.cor.colors[array.sp.cor < my.seq[8]] <- sp.corr.cols[7] + array.sp.cor.colors[array.sp.cor < my.seq[7]] <- sp.corr.cols[6] + array.sp.cor.colors[array.sp.cor < my.seq[6]] <- sp.corr.cols[5] + array.sp.cor.colors[array.sp.cor < my.seq[5]] <- sp.corr.cols[4] + array.sp.cor.colors[array.sp.cor < my.seq[4]] <- sp.corr.cols[3] + array.sp.cor.colors[array.sp.cor < my.seq[3]] <- sp.corr.cols[2] + array.sp.cor.colors[array.sp.cor < my.seq[2]] <- sp.corr.cols[1] + + par(mar=c(5,4,4,4.5)) + plot(1,1, type="n", xaxt="n", yaxt="n", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + title("Spatial correlation between S4 and ERA-Interim regime anomalies", cex=1.5) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + + for(p in 1:12){ + for(l in 0:6){ + polygon(c(0.5+p-1, 0.5+p-1, 1+p-1), c(-0.5+l, 0.5+l, 0+l), col=array.sp.cor.colors[p,1+l,1]) # first triangle + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(-0.5+l, 0+l, -0.5+l), col=array.sp.cor.colors[p,1+l,2]) + polygon(c(1+p-1, 1.5+p-1, 1.5+p-1), c(l, 0.5+l, -0.5+l), col=array.sp.cor.colors[p,1+l,3]) + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(0.5+l, 0+l, 0.5+l), col=array.sp.cor.colors[p,1+l,4]) + } + } + + par(fig=c(0.875, 1, 0.14, 0.89), new=TRUE) + ColorBar2(brks = my.seq, cols = sp.corr.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + dev.off() + + + png(filename=paste0(workdir,"/",forecast.name,"_summary_sp_corr_4tables.png"), width=750, height=600) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext(text="Spatial correlation between S4 and ERA-Interim regime anomalies", cex=1.5) + + # NAO+ : + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.85, 0.98), new=TRUE) + mtext("NAO+", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.sp.cor.colors[p,1+l,1])}} + + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.42, 0.5), new=TRUE) + mtext("Blocking", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.sp.cor.colors[p,1+l,4])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.85, 0.98), new=TRUE) + mtext("NAO-", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.sp.cor.colors[p,1+l,3])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.42, 0.5), new=TRUE) + mtext("Atlantic Ridge", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.sp.cor.colors[p,1+l,2])}} + + par(fig=c(0.91, 1, 0.1, 0.9), new=TRUE) + ColorBar2(brks = my.seq, cols = sp.corr.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + png(filename=paste0(workdir,"/",forecast.name,"_summary_sp_corr_1table.png"), width=800, height=300) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext(text="Spatial correlation between S4 and ERA-Interim regime anomalies", cex=1.2) + + par(mar=c(0,0,4,0), fig=c(0.07, 0.22, 0.8, 0.98), new=TRUE) + mtext("NAO+", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0, 0.22, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecast month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.sp.cor.colors[i2,1+6-j,1])}} + + par(mar=c(0,0,4,0), fig=c(0.22, 0.44, 0.8, 0.98), new=TRUE) + mtext("NAO-", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.22, 0.44, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecasted month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.sp.cor.colors[i2,1+6-j,3])}} + + par(mar=c(0,0,4,0), fig=c(0.44, 0.66, 0.8, 0.98), new=TRUE) + mtext("Blocking", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.44, 0.66, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecasted month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.sp.cor.colors[i2,1+6-j,4])}} + + par(mar=c(0,0,4,0), fig=c(0.68, 0.88, 0.8, 0.98), new=TRUE) + mtext("Atlantic Ridge", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.66, 0.88, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecasted month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.sp.cor.colors[i2,1+6-j,2])}} + + par(fig=c(0.91, 1, 0.1, 0.8), new=TRUE) + ColorBar2(brks = my.seq, cols = sp.corr.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + + + + + + + + # Corr summary: + png(filename=paste0(workdir,"/",forecast.name,"_summary_corr.png"), width=550, height=400) + + # create an array similar to array.cor but with colors instead of corr.values: + corr.cols <- rev(c('#b2182b','#d6604d','#f4a582','#fddbc7','#d1e5f0','#92c5de','#4393c3','#2166ac')) + + array.cor.colors <- array(corr.cols[8],c(12,7,4)) + array.cor.colors[array.cor < 0.75] <- corr.cols[7] + array.cor.colors[array.cor < 0.5] <- corr.cols[6] + array.cor.colors[array.cor < 0.25] <- corr.cols[5] + array.cor.colors[array.cor < 0] <- corr.cols[4] + array.cor.colors[array.cor < -0.25] <- corr.cols[3] + array.cor.colors[array.cor < -0.5] <- corr.cols[2] + array.cor.colors[array.cor < -0.75] <- corr.cols[1] + + par(mar=c(5,4,4,4.5)) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Forecast time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + title("Correlation between S4 and ERA-Interim frequencies", cex=1.5) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + + for(p in 1:12){ + for(l in 0:6){ + polygon(c(0.5+p-1,0.5+p-1,1+p-1),c(-0.5+l,0.5+l,0+l), col=array.cor.colors[p,1+l,1]) # first triangle + polygon(c(0.5+p-1,1+p-1,1.5+p-1),c(-0.5+l,0+l,-0.5+l), col=array.cor.colors[p,1+l,2]) + polygon(c(1+p-1,1.5+p-1,1.5+p-1),c(l,0.5+l,-0.5+l), col=array.cor.colors[p,1+l,3]) + polygon(c(0.5+p-1,1+p-1,1.5+p-1),c(0.5+l,0+l,0.5+l), col=array.cor.colors[p,1+l,4]) + } + } + + par(fig=c(0.875, 1, 0.14, 0.89), new=TRUE) + ColorBar2(brks = seq(-1,1,0.25), cols = corr.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(seq(-1,1,0.25)), my.labels=seq(-1,1,0.25)) + dev.off() + + png(filename=paste0(workdir,"/",forecast.name,"_summary_corr_4tables.png"), width=750, height=600) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext(text="Correlation between S4 and ERA-Interim frequencies", cex=1.5) + + # NAO+ : + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.85, 0.98), new=TRUE) + mtext("NAO+", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.cor.colors[p,1+l,1])}} + + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.42, 0.5), new=TRUE) + mtext("Blocking", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.cor.colors[p,1+l,4])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.85, 0.98), new=TRUE) + mtext("NAO-", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.cor.colors[p,1+l,3])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.42, 0.5), new=TRUE) + mtext("Atlantic Ridge", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.cor.colors[p,1+l,2])}} + + par(fig=c(0.91, 1, 0.1, 0.9), new=TRUE) + ColorBar2(brks = my.seq, cols = corr.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + png(filename=paste0(workdir,"/",forecast.name,"_summary_corr_1table.png"), width=800, height=300) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext(text="Correlation between S4 and ERA-Interim frequencies", cex=1.2) + + par(mar=c(0,0,4,0), fig=c(0.07, 0.22, 0.8, 0.98), new=TRUE) + mtext("NAO+", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0, 0.22, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecast month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.cor.colors[i2,1+6-j,1])}} + + par(mar=c(0,0,4,0), fig=c(0.22, 0.44, 0.8, 0.98), new=TRUE) + mtext("NAO-", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.22, 0.44, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecasted month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.cor.colors[i2,1+6-j,3])}} + + par(mar=c(0,0,4,0), fig=c(0.44, 0.66, 0.8, 0.98), new=TRUE) + mtext("Blocking", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.44, 0.66, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecasted month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.cor.colors[i2,1+6-j,4])}} + + par(mar=c(0,0,4,0), fig=c(0.68, 0.88, 0.8, 0.98), new=TRUE) + mtext("Atlantic Ridge", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.66, 0.88, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecasted month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.cor.colors[i2,1+6-j,2])}} + + par(fig=c(0.91, 1, 0.1, 0.8), new=TRUE) + ColorBar2(brks = my.seq, cols = corr.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + + + + + + # Freq. summary: + png(filename=paste0(workdir,"/",forecast.name,"_summary_freq.png"), width=550, height=400) + + # create an array similar to array.diff.freq but with colors instead of frequencies: + freq.cols <- rev(c('#d73027','#f46d43','#fdae61','#fee090','#e0f3f8','#abd9e9','#74add1','#4575b4')) + my.seq <- c(NA,20,22,24,26,28,30,32,NA) + + array.freq.colors <- array(freq.cols[8],c(12,7,4)) + array.freq.colors[array.freq < my.seq[[8]]] <- freq.cols[7] + array.freq.colors[array.freq < my.seq[[7]]] <- freq.cols[6] + array.freq.colors[array.freq < my.seq[[6]]] <- freq.cols[5] + array.freq.colors[array.freq < my.seq[[5]]] <- freq.cols[4] + array.freq.colors[array.freq < my.seq[[4]]] <- freq.cols[3] + array.freq.colors[array.freq < my.seq[[3]]] <- freq.cols[2] + array.freq.colors[array.freq < my.seq[[2]]] <- freq.cols[1] + + par(mar=c(5,4,4,4.5)) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Forecast time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + title("Mean S4 frequency (%)", cex=1.5) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + + for(p in 1:12){ + for(l in 0:6){ + polygon(c(0.5+p-1, 0.5+p-1, 1+p-1), c(-0.5+l, 0.5+l, 0+l), col=array.freq.colors[p,1+l,1]) # first triangle + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(-0.5+l, 0+l, -0.5+l), col=array.freq.colors[p,1+l,2]) + polygon(c(1+p-1, 1.5+p-1, 1.5+p-1), c(l, 0.5+l, -0.5+l), col=array.freq.colors[p,1+l,3]) + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(0.5+l, 0+l, 0.5+l), col=array.freq.colors[p,1+l,4]) + } + } + + par(fig=c(0.875, 1, 0.14, 0.89), new=TRUE) + ColorBar2(brks = my.seq, cols = freq.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + dev.off() + + png(filename=paste0(workdir,"/",forecast.name,"_summary_freq_4tables.png"), width=750, height=600) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext(text="Mean S4 frequency (%)", cex=1.5) + + # NAO+ : + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.85, 0.98), new=TRUE) + mtext("NAO+", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.freq.colors[p,1+l,1])}} + + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.42, 0.5), new=TRUE) + mtext("Blocking", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.freq.colors[p,1+l,4])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.85, 0.98), new=TRUE) + mtext("NAO-", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.freq.colors[p,1+l,3])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.42, 0.5), new=TRUE) + mtext("Atlantic Ridge", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.freq.colors[p,1+l,2])}} + + par(fig=c(0.91, 1, 0.1, 0.9), new=TRUE) + ColorBar2(brks = my.seq, cols = freq.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + png(filename=paste0(workdir,"/",forecast.name,"_summary_freq_1table.png"), width=800, height=300) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext(text="Mean S4 frequency (%)", cex=1.2) + + par(mar=c(0,0,4,0), fig=c(0.07, 0.22, 0.8, 0.98), new=TRUE) + mtext("NAO+", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0, 0.22, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecast month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.freq.colors[i2,1+6-j,1])}} + + par(mar=c(0,0,4,0), fig=c(0.22, 0.44, 0.8, 0.98), new=TRUE) + mtext("NAO-", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.22, 0.44, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecasted month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.freq.colors[i2,1+6-j,3])}} + + par(mar=c(0,0,4,0), fig=c(0.44, 0.66, 0.8, 0.98), new=TRUE) + mtext("Blocking", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.44, 0.66, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecasted month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.freq.colors[i2,1+6-j,4])}} + + par(mar=c(0,0,4,0), fig=c(0.68, 0.88, 0.8, 0.98), new=TRUE) + mtext("Atlantic Ridge", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.66, 0.88, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecasted month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.freq.colors[i2,1+6-j,2])}} + + par(fig=c(0.91, 1, 0.1, 0.8), new=TRUE) + ColorBar2(brks = my.seq, cols = freq.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + + + + + + + # Delta freq. summary: + png(filename=paste0(workdir,"/",forecast.name,"_summary_diff_freq.png"), width=550, height=400) + + # create an array similar to array.diff.freq but with colors instead of frequencies: + diff.freq.cols <- rev(c('#d73027','#f46d43','#fdae61','#fee090','#e0f3f8','#abd9e9','#74add1','#4575b4')) + my.seq <- c(-20,-10,-5,-2,0,2,5,10,20) + + array.diff.freq.colors <- array(diff.freq.cols[8],c(12,7,4)) + array.diff.freq.colors[array.diff.freq < my.seq[[8]]] <- diff.freq.cols[7] + array.diff.freq.colors[array.diff.freq 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.diff.freq.colors[i2,1+6-j,1])}} + + par(mar=c(0,0,4,0), fig=c(0.22, 0.44, 0.8, 0.98), new=TRUE) + mtext("NAO-", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.22, 0.44, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecasted month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.diff.freq.colors[i2,1+6-j,3])}} + + par(mar=c(0,0,4,0), fig=c(0.44, 0.66, 0.8, 0.98), new=TRUE) + mtext("Blocking", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.44, 0.66, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecasted month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.diff.freq.colors[i2,1+6-j,4])}} + + par(mar=c(0,0,4,0), fig=c(0.68, 0.88, 0.8, 0.98), new=TRUE) + mtext("Atlantic Ridge", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.66, 0.88, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecasted month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.diff.freq.colors[i2,1+6-j,2])}} + + par(fig=c(0.91, 1, 0.1, 0.8), new=TRUE) + ColorBar2(brks = my.seq, cols = diff.freq.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + + + + + + + + # Persistence summary: + png(filename=paste0(workdir,"/",forecast.name,"_summary_pers.png"), width=550, height=400) + + # create an array similar to array.pers but with colors instead of frequencies: + pers.cols <- rev(c('#b2182b','#d6604d','#f4a582','#fddbc7','#d1e5f0','#92c5de','#4393c3','#2166ac')) + my.seq <- c(NA, 3.25, 3.5, 3.75, 4, 4.25, 4.5, 4.75, NA) + + array.pers.colors <- array(pers.cols[8],c(12,7,4)) + array.pers.colors[array.pers < my.seq[8]] <- pers.cols[7] + array.pers.colors[array.pers < my.seq[7]] <- pers.cols[6] + array.pers.colors[array.pers < my.seq[6]] <- pers.cols[5] + array.pers.colors[array.pers < my.seq[5]] <- pers.cols[4] + array.pers.colors[array.pers < my.seq[4]] <- pers.cols[3] + array.pers.colors[array.pers < my.seq[3]] <- pers.cols[2] + array.pers.colors[array.pers < my.seq[2]] <- pers.cols[1] + + par(mar=c(5,4,4,4.5)) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Forecast time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + title("S4 Regime persistence (in days/month)", cex=1.5) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + + for(p in 1:12){ + for(l in 0:6){ + polygon(c(0.5+p-1,0.5+p-1,1+p-1),c(-0.5+l,0.5+l,0+l), col=array.pers.colors[p,1+l,1]) # first triangle + polygon(c(0.5+p-1,1+p-1,1.5+p-1),c(-0.5+l,0+l,-0.5+l), col=array.pers.colors[p,1+l,2]) + polygon(c(1+p-1,1.5+p-1,1.5+p-1),c(l,0.5+l,-0.5+l), col=array.pers.colors[p,1+l,3]) + polygon(c(0.5+p-1,1+p-1,1.5+p-1),c(0.5+l,0+l,0.5+l), col=array.pers.colors[p,1+l,4]) + } + } + + par(fig=c(0.875, 1, 0.14, 0.89), new=TRUE) + ColorBar2(brks = my.seq, cols = pers.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + dev.off() + + png(filename=paste0(workdir,"/",forecast.name,"_summary_pers_4tables.png"), width=750, height=600) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("S4 Regime persistence (in days/month)", cex=1.5) + + # NAO+ : + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.85, 0.98), new=TRUE) + mtext("NAO+", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.pers.colors[p,1+l,1])}} + + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.42, 0.5), new=TRUE) + mtext("Blocking", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.pers.colors[p,1+l,4])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.85, 0.98), new=TRUE) + mtext("NAO-", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.pers.colors[p,1+l,3])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.42, 0.5), new=TRUE) + mtext("Atlantic Ridge", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.pers.colors[p,1+l,2])}} + + par(fig=c(0.91, 1, 0.1, 0.9), new=TRUE) + ColorBar2(brks = my.seq, cols = pers.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + png(filename=paste0(workdir,"/",forecast.name,"_summary_pers_4tables.png"), width=750, height=600) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("S4 Regime persistence (in days/month)", cex=1.5) + + # NAO+ : + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.85, 0.98), new=TRUE) + mtext("NAO+", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.pers.colors[p,1+l,1])}} + + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.42, 0.5), new=TRUE) + mtext("Blocking", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.pers.colors[p,1+l,4])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.85, 0.98), new=TRUE) + mtext("NAO-", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.pers.colors[p,1+l,3])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.42, 0.5), new=TRUE) + mtext("Atlantic Ridge", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.pers.colors[p,1+l,2])}} + + par(fig=c(0.91, 1, 0.1, 0.9), new=TRUE) + ColorBar2(brks = my.seq, cols = pers.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + png(filename=paste0(workdir,"/",forecast.name,"_summary_pers_1table.png"), width=800, height=300) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("S4 Regime persistence (in days/month)", cex=1.2) + + par(mar=c(0,0,4,0), fig=c(0.07, 0.22, 0.8, 0.98), new=TRUE) + mtext("NAO+", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0, 0.22, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecast month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.pers.colors[i2,1+6-j,1])}} + + par(mar=c(0,0,4,0), fig=c(0.22, 0.44, 0.8, 0.98), new=TRUE) + mtext("NAO-", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.22, 0.44, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecasted month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.pers.colors[i2,1+6-j,3])}} + + par(mar=c(0,0,4,0), fig=c(0.44, 0.66, 0.8, 0.98), new=TRUE) + mtext("Blocking", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.44, 0.66, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecasted month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.pers.colors[i2,1+6-j,4])}} + + par(mar=c(0,0,4,0), fig=c(0.68, 0.88, 0.8, 0.98), new=TRUE) + mtext("Atlantic Ridge", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.66, 0.88, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecasted month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.pers.colors[i2,1+6-j,2])}} + + par(fig=c(0.91, 1, 0.1, 0.8), new=TRUE) + ColorBar2(brks = my.seq, cols = pers.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + + + + + + + + # Delta persistence summary: + png(filename=paste0(workdir,"/",forecast.name,"_summary_diff_pers.png"), width=550, height=400) + + # create an array similar to array.pers but with colors instead of frequencies: + diff.pers.cols <- rev(c('#b2182b','#d6604d','#f4a582','#fddbc7','#d1e5f0','#92c5de','#4393c3','#2166ac')) + my.seq <- c(NA, -1.5, -1, -0.5, 0, 0.5, 1, 1.5, NA) + + array.diff.pers.colors <- array(diff.pers.cols[8],c(12,7,4)) + array.diff.pers.colors[array.diff.pers < my.seq[8]] <- diff.pers.cols[7] + array.diff.pers.colors[array.diff.pers < my.seq[7]] <- diff.pers.cols[6] + array.diff.pers.colors[array.diff.pers < my.seq[6]] <- diff.pers.cols[5] + array.diff.pers.colors[array.diff.pers < my.seq[5]] <- diff.pers.cols[4] + array.diff.pers.colors[array.diff.pers < my.seq[4]] <- diff.pers.cols[3] + array.diff.pers.colors[array.diff.pers < my.seq[3]] <- diff.pers.cols[2] + array.diff.pers.colors[array.diff.pers < my.seq[2]] <- diff.pers.cols[1] + + par(mar=c(5,4,4,4.5)) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Forecast time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + title("Diff. between S4 and ERA-Interim regime persistence (in days/month)", cex=1.5) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + + for(p in 1:12){ + for(l in 0:6){ + polygon(c(0.5+p-1,0.5+p-1,1+p-1),c(-0.5+l,0.5+l,0+l), col=array.diff.pers.colors[p,1+l,1]) # first triangle + polygon(c(0.5+p-1,1+p-1,1.5+p-1),c(-0.5+l,0+l,-0.5+l), col=array.diff.pers.colors[p,1+l,2]) + polygon(c(1+p-1,1.5+p-1,1.5+p-1),c(l,0.5+l,-0.5+l), col=array.diff.pers.colors[p,1+l,3]) + polygon(c(0.5+p-1,1+p-1,1.5+p-1),c(0.5+l,0+l,0.5+l), col=array.diff.pers.colors[p,1+l,4]) + } + } + + par(fig=c(0.875, 1, 0.14, 0.89), new=TRUE) + ColorBar2(brks = my.seq, cols = diff.pers.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + dev.off() + + png(filename=paste0(workdir,"/",forecast.name,"_summary_diff_pers_4tables.png"), width=750, height=600) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("Diff. between S4 and ERA-Interim regime persistence (in days/month)", cex=1.5) + + # NAO+ : + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.85, 0.98), new=TRUE) + mtext("NAO+", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.diff.pers.colors[p,1+l,1])}} + + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.42, 0.5), new=TRUE) + mtext("Blocking", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.diff.pers.colors[p,1+l,4])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.85, 0.98), new=TRUE) + mtext("NAO-", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.diff.pers.colors[p,1+l,3])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.42, 0.5), new=TRUE) + mtext("Atlantic Ridge", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.diff.pers.colors[p,1+l,2])}} + + par(fig=c(0.91, 1, 0.1, 0.9), new=TRUE) + ColorBar2(brks = my.seq, cols = diff.pers.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + png(filename=paste0(workdir,"/",forecast.name,"_summary_diff_pers_1table.png"), width=800, height=300) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("Diff. between S4 and ERA-Interim regime persistence (in days/month)", cex=1.2) + + par(mar=c(0,0,4,0), fig=c(0.07, 0.22, 0.8, 0.98), new=TRUE) + mtext("NAO+", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0, 0.22, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecast month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.diff.pers.colors[i2,1+6-j,1])}} + + par(mar=c(0,0,4,0), fig=c(0.22, 0.44, 0.8, 0.98), new=TRUE) + mtext("NAO-", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.22, 0.44, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecasted month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.diff.pers.colors[i2,1+6-j,3])}} + + par(mar=c(0,0,4,0), fig=c(0.44, 0.66, 0.8, 0.98), new=TRUE) + mtext("Blocking", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.44, 0.66, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecasted month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.diff.pers.colors[i2,1+6-j,4])}} + + par(mar=c(0,0,4,0), fig=c(0.68, 0.88, 0.8, 0.98), new=TRUE) + mtext("Atlantic Ridge", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.66, 0.88, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecasted month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.diff.pers.colors[i2,1+6-j,2])}} + + par(fig=c(0.91, 1, 0.1, 0.8), new=TRUE) + ColorBar2(brks = my.seq, cols = diff.pers.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + + + + + + + + + # St.Dev.freq ratio summary: + png(filename=paste0(workdir,"/",forecast.name,"_summary_ratio_sd_freq.png"), width=550, height=400) + + # create an array similar to array.cor but with colors instead of corr.values: + diff.sd.freq.cols <- rev(c('#b2182b','#d6604d','#f4a582','#fddbc7','#d1e5f0','#92c5de','#4393c3','#2166ac')) + my.seq <- c(0,0.7,0.8,0.9,1.0,1.1,1.2,1.3,2) + + array.diff.sd.freq.colors <- array(diff.sd.freq.cols[8],c(12,7,4)) + array.diff.sd.freq.colors[array.diff.sd.freq < my.seq[8]] <- diff.sd.freq.cols[7] + array.diff.sd.freq.colors[array.diff.sd.freq < my.seq[7]] <- diff.sd.freq.cols[6] + array.diff.sd.freq.colors[array.diff.sd.freq < my.seq[6]] <- diff.sd.freq.cols[5] + array.diff.sd.freq.colors[array.diff.sd.freq < my.seq[5]] <- diff.sd.freq.cols[4] + array.diff.sd.freq.colors[array.diff.sd.freq < my.seq[4]] <- diff.sd.freq.cols[3] + array.diff.sd.freq.colors[array.diff.sd.freq < my.seq[3]] <- diff.sd.freq.cols[2] + array.diff.sd.freq.colors[array.diff.sd.freq < my.seq[2]] <- diff.sd.freq.cols[1] + + par(mar=c(5,4,4,4.5)) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Forecast time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + title("St.Dev. ratio between sim. and obs. average frequencies", cex=1.5) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + + for(p in 1:12){ + for(l in 0:6){ + polygon(c(0.5+p-1, 0.5+p-1, 1+p-1), c(-0.5+l, 0.5+l, 0+l), col=array.diff.sd.freq.colors[p,1+l,1]) # first triangle + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(-0.5+l, 0+l, -0.5+l), col=array.diff.sd.freq.colors[p,1+l,2]) + polygon(c(1+p-1, 1.5+p-1, 1.5+p-1), c(l, 0.5+l, -0.5+l), col=array.diff.sd.freq.colors[p,1+l,3]) + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(0.5+l, 0+l, 0.5+l), col=array.diff.sd.freq.colors[p,1+l,4]) + } + } + + par(fig=c(0.875, 1, 0.14, 0.89), new=TRUE) + ColorBar2(brks = my.seq, cols = diff.sd.freq.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + dev.off() + + + png(filename=paste0(workdir,"/",forecast.name,"_summary_ratio_sd_freq_4tables.png"), width=750, height=600) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("St.Dev. ratio between sim. and obs. average frequencies", cex=1.5) + + # NAO+ : + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.85, 0.98), new=TRUE) + mtext("NAO+", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.diff.sd.freq.colors[p,1+l,1])}} + + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.42, 0.5), new=TRUE) + mtext("Blocking", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.diff.sd.freq.colors[p,1+l,4])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.85, 0.98), new=TRUE) + mtext("NAO-", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.diff.sd.freq.colors[p,1+l,3])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.42, 0.5), new=TRUE) + mtext("Atlantic Ridge", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.diff.sd.freq.colors[p,1+l,2])}} + + par(fig=c(0.91, 1, 0.1, 0.9), new=TRUE) + ColorBar2(brks = my.seq, cols = diff.sd.freq.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + png(filename=paste0(workdir,"/",forecast.name,"_summary_ratio_sd_freq_1table.png"), width=800, height=300) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("St.Dev. ratio between sim. and obs. average frequencies", cex=1.2) + + par(mar=c(0,0,4,0), fig=c(0.07, 0.22, 0.8, 0.98), new=TRUE) + mtext("NAO+", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0, 0.22, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecast month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.diff.sd.freq.colors[i2,1+6-j,1])}} + + par(mar=c(0,0,4,0), fig=c(0.22, 0.44, 0.8, 0.98), new=TRUE) + mtext("NAO-", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.22, 0.44, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecasted month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.diff.sd.freq.colors[i2,1+6-j,3])}} + + par(mar=c(0,0,4,0), fig=c(0.44, 0.66, 0.8, 0.98), new=TRUE) + mtext("Blocking", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.44, 0.66, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecasted month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.diff.sd.freq.colors[i2,1+6-j,4])}} + + par(mar=c(0,0,4,0), fig=c(0.68, 0.88, 0.8, 0.98), new=TRUE) + mtext("Atlantic Ridge", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.66, 0.88, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecasted month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.diff.sd.freq.colors[i2,1+6-j,2])}} + + par(fig=c(0.91, 1, 0.1, 0.8), new=TRUE) + ColorBar2(brks = my.seq, cols = diff.sd.freq.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + + + + + + + + + + + # Transition probability from NAO+ to X summary: + png(filename=paste0(workdir,"/",forecast.name,"_summary_trans_NAO+.png"), width=550, height=400) + + # create an array similar to array.cor but with colors instead of corr.values: + trans.cols <- rev(c('#b2182b','#d6604d','#f4a582','#fddbc7','#d1e5f0','#92c5de','#4393c3','#2166ac')) + my.seq <- c(0,10,20,30,40,50,60,70,100) + + array.trans.sim1.colors <- array(trans.cols[8],c(12,7,4)) + array.trans.sim1.colors[array.trans.sim1 < my.seq[8]] <- trans.cols[7] + array.trans.sim1.colors[array.trans.sim1 < my.seq[7]] <- trans.cols[6] + array.trans.sim1.colors[array.trans.sim1 < my.seq[6]] <- trans.cols[5] + array.trans.sim1.colors[array.trans.sim1 < my.seq[5]] <- trans.cols[4] + array.trans.sim1.colors[array.trans.sim1 < my.seq[4]] <- trans.cols[3] + array.trans.sim1.colors[array.trans.sim1 < my.seq[3]] <- trans.cols[2] + array.trans.sim1.colors[array.trans.sim1 < my.seq[2]] <- trans.cols[1] + array.trans.sim1.colors[is.na(array.trans.sim1)] <- "white" + + par(mar=c(5,4,4,4.5)) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Forecast time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + title("% Transition from NAO+ regime", cex=1.5) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + + for(p in 1:12){ + for(l in 0:6){ + polygon(c(0.5+p-1, 0.5+p-1, 1+p-1), c(-0.5+l, 0.5+l, 0+l), col=array.trans.sim1.colors[p,1+l,1]) # first triangle + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(-0.5+l, 0+l, -0.5+l), col=array.trans.sim1.colors[p,1+l,2]) + polygon(c(1+p-1, 1.5+p-1, 1.5+p-1), c(l, 0.5+l, -0.5+l), col=array.trans.sim1.colors[p,1+l,3]) + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(0.5+l, 0+l, 0.5+l), col=array.trans.sim1.colors[p,1+l,4]) + } + } + + par(fig=c(0.875, 1, 0.14, 0.89), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + dev.off() + + png(filename=paste0(workdir,"/",forecast.name,"_summary_trans_NAO+_4tables.png"), width=750, height=600) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("% Transition from NAO+ regime", cex=1.5) + + # NAO+ : + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.85, 0.98), new=TRUE) + mtext("NAO+", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.sim1.colors[p,1+l,1])}} + + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.42, 0.5), new=TRUE) + mtext("Blocking", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.sim1.colors[p,1+l,4])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.85, 0.98), new=TRUE) + mtext("NAO-", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.sim1.colors[p,1+l,3])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.42, 0.5), new=TRUE) + mtext("Atlantic Ridge", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.sim1.colors[p,1+l,2])}} + + par(fig=c(0.91, 1, 0.1, 0.9), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + png(filename=paste0(workdir,"/",forecast.name,"_summary_trans_NAO+_1table.png"), width=800, height=300) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("% Transition from NAO+ regime", cex=1.2) + + par(mar=c(0,0,4,0), fig=c(0.07, 0.22, 0.8, 0.98), new=TRUE) + mtext("NAO+", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0, 0.22, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecast month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.sim1.colors[i2,1+6-j,1])}} + + par(mar=c(0,0,4,0), fig=c(0.22, 0.44, 0.8, 0.98), new=TRUE) + mtext("NAO-", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.22, 0.44, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecasted month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.sim1.colors[i2,1+6-j,3])}} + + par(mar=c(0,0,4,0), fig=c(0.44, 0.66, 0.8, 0.98), new=TRUE) + mtext("Blocking", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.44, 0.66, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecasted month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.sim1.colors[i2,1+6-j,4])}} + + par(mar=c(0,0,4,0), fig=c(0.68, 0.88, 0.8, 0.98), new=TRUE) + mtext("Atlantic Ridge", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.66, 0.88, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecasted month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.sim1.colors[i2,1+6-j,2])}} + + par(fig=c(0.91, 1, 0.1, 0.8), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + png(filename=paste0(workdir,"/",forecast.name,"_summary_trans_NAO+_1table.png"), width=600, height=300) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("% Transition from NAO+ regime", cex=1.2) + + par(mar=c(0,0,4,0), fig=c(0.10, 0.28, 0.8, 0.98), new=TRUE) + mtext("NAO-", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0, 0.28, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecasted month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.sim1.colors[i2,1+6-j,3])}} + + par(mar=c(0,0,4,0), fig=c(0.28, 0.56, 0.8, 0.98), new=TRUE) + mtext("Blocking", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.28, 0.56, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecasted month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.sim1.colors[i2,1+6-j,4])}} + + par(mar=c(0,0,4,0), fig=c(0.56, 0.84, 0.8, 0.98), new=TRUE) + mtext("Atlantic Ridge", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.56, 0.84, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecasted month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.sim1.colors[i2,1+6-j,2])}} + + par(fig=c(0.88, 1, 0.1, 0.8), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + + + + + + + + # Transition probability from NAO- to X summary: + png(filename=paste0(workdir,"/",forecast.name,"_summary_trans_NAO-.png"), width=550, height=400) + + # create an array similar to array.cor but with colors instead of corr.values: + trans.cols <- rev(c('#b2182b','#d6604d','#f4a582','#fddbc7','#d1e5f0','#92c5de','#4393c3','#2166ac')) + my.seq <- c(0,10,20,30,40,50,60,70,100) + + array.trans.sim2.colors <- array(trans.cols[8],c(12,7,4)) + array.trans.sim2.colors[array.trans.sim2 < my.seq[8]] <- trans.cols[7] + array.trans.sim2.colors[array.trans.sim2 < my.seq[7]] <- trans.cols[6] + array.trans.sim2.colors[array.trans.sim2 < my.seq[6]] <- trans.cols[5] + array.trans.sim2.colors[array.trans.sim2 < my.seq[5]] <- trans.cols[4] + array.trans.sim2.colors[array.trans.sim2 < my.seq[4]] <- trans.cols[3] + array.trans.sim2.colors[array.trans.sim2 < my.seq[3]] <- trans.cols[2] + array.trans.sim2.colors[array.trans.sim2 < my.seq[2]] <- trans.cols[1] + array.trans.sim2.colors[is.na(array.trans.sim2)] <- "white" + + par(mar=c(5,4,4,4.5)) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Forecast time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + title("% Transition from NAO- regime ", cex=1.5) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + + for(p in 1:12){ + for(l in 0:6){ + polygon(c(0.5+p-1, 0.5+p-1, 1+p-1), c(-0.5+l, 0.5+l, 0+l), col=array.trans.sim2.colors[p,1+l,1]) # first triangle + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(-0.5+l, 0+l, -0.5+l), col=array.trans.sim2.colors[p,1+l,2]) + polygon(c(1+p-1, 1.5+p-1, 1.5+p-1), c(l, 0.5+l, -0.5+l), col=array.trans.sim2.colors[p,1+l,3]) + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(0.5+l, 0+l, 0.5+l), col=array.trans.sim2.colors[p,1+l,4]) + } + } + + par(fig=c(0.875, 1, 0.14, 0.89), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + dev.off() + + png(filename=paste0(workdir,"/",forecast.name,"_summary_trans_NAO-_4tables.png"), width=750, height=600) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("% Transition from NAO- regime", cex=1.5) + + # NAO+ : + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.85, 0.98), new=TRUE) + mtext("NAO+", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.sim2.colors[p,1+l,1])}} + + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.42, 0.5), new=TRUE) + mtext("Blocking", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.sim2.colors[p,1+l,4])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.85, 0.98), new=TRUE) + mtext("NAO-", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.sim2.colors[p,1+l,3])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.42, 0.5), new=TRUE) + mtext("Atlantic Ridge", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.sim2.colors[p,1+l,2])}} + + par(fig=c(0.91, 1, 0.1, 0.9), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + png(filename=paste0(workdir,"/",forecast.name,"_summary_trans_NAO-_1table.png"), width=600, height=300) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("% Transition from NAO- regime", cex=1.2) + + par(mar=c(0,0,4,0), fig=c(0.1, 0.28, 0.8, 0.98), new=TRUE) + mtext("NAO+", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0, 0.28, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecast month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.sim2.colors[i2,1+6-j,1])}} + + par(mar=c(0,0,4,0), fig=c(0.28, 0.56, 0.8, 0.98), new=TRUE) + mtext("Blocking", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.28, 0.56, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecasted month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.sim2.colors[i2,1+6-j,4])}} + + par(mar=c(0,0,4,0), fig=c(0.56, 0.84, 0.8, 0.98), new=TRUE) + mtext("Atlantic Ridge", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.56, 0.84, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecasted month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.sim2.colors[i2,1+6-j,2])}} + + par(fig=c(0.88, 1, 0.1, 0.8), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + + + + + + + + # Transition probability from blocking to X summary: + png(filename=paste0(workdir,"/",forecast.name,"_summary_trans_blocking.png"), width=550, height=400) + + # create an array similar to array.cor but with colors instead of corr.values: + trans.cols <- rev(c('#b2182b','#d6604d','#f4a582','#fddbc7','#d1e5f0','#92c5de','#4393c3','#2166ac')) + my.seq <- c(0,10,20,30,40,50,60,70,100) + + array.trans.sim3.colors <- array(trans.cols[8],c(12,7,4)) + array.trans.sim3.colors[array.trans.sim3 < my.seq[8]] <- trans.cols[7] + array.trans.sim3.colors[array.trans.sim3 < my.seq[7]] <- trans.cols[6] + array.trans.sim3.colors[array.trans.sim3 < my.seq[6]] <- trans.cols[5] + array.trans.sim3.colors[array.trans.sim3 < my.seq[5]] <- trans.cols[4] + array.trans.sim3.colors[array.trans.sim3 < my.seq[4]] <- trans.cols[3] + array.trans.sim3.colors[array.trans.sim3 < my.seq[3]] <- trans.cols[2] + array.trans.sim3.colors[array.trans.sim3 < my.seq[2]] <- trans.cols[1] + array.trans.sim3.colors[is.na(array.trans.sim3)] <- "white" + + par(mar=c(5,4,4,4.5)) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Forecast time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + title("% Transition from blocking regime", cex=1.5) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + + for(p in 1:12){ + for(l in 0:6){ + polygon(c(0.5+p-1, 0.5+p-1, 1+p-1), c(-0.5+l, 0.5+l, 0+l), col=array.trans.sim3.colors[p,1+l,1]) # first triangle + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(-0.5+l, 0+l, -0.5+l), col=array.trans.sim3.colors[p,1+l,2]) + polygon(c(1+p-1, 1.5+p-1, 1.5+p-1), c(l, 0.5+l, -0.5+l), col=array.trans.sim3.colors[p,1+l,3]) + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(0.5+l, 0+l, 0.5+l), col=array.trans.sim3.colors[p,1+l,4]) + } + } + + par(fig=c(0.875, 1, 0.14, 0.89), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + dev.off() + + png(filename=paste0(workdir,"/",forecast.name,"_summary_trans_blocking_4tables.png"), width=750, height=600) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("% Transition from blocking regime", cex=1.5) + + # NAO+ : + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.85, 0.98), new=TRUE) + mtext("NAO+", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.sim3.colors[p,1+l,1])}} + + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.42, 0.5), new=TRUE) + mtext("Blocking", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.sim3.colors[p,1+l,4])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.85, 0.98), new=TRUE) + mtext("NAO-", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.sim3.colors[p,1+l,3])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.42, 0.5), new=TRUE) + mtext("Atlantic Ridge", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.sim3.colors[p,1+l,2])}} + + par(fig=c(0.91, 1, 0.1, 0.9), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + png(filename=paste0(workdir,"/",forecast.name,"_summary_trans_blocking_1table.png"), width=600, height=300) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("% Transition from blocking regime", cex=1.2) + + par(mar=c(0,0,4,0), fig=c(0.10, 0.28, 0.8, 0.98), new=TRUE) + mtext("NAO+", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0, 0.28, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecast month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.sim3.colors[i2,1+6-j,1])}} + + par(mar=c(0,0,4,0), fig=c(0.28, 0.56, 0.8, 0.98), new=TRUE) + mtext("NAO-", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.28, 0.56, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecasted month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.sim3.colors[i2,1+6-j,3])}} + + par(mar=c(0,0,4,0), fig=c(0.56, 0.84, 0.8, 0.98), new=TRUE) + mtext("Atlantic Ridge", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.56, 0.84, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecasted month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.sim3.colors[i2,1+6-j,2])}} + + par(fig=c(0.88, 1, 0.1, 0.8), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + + + + + + + + + + + # Transition probability from Atlantic ridge to X summary: + png(filename=paste0(workdir,"/",forecast.name,"_summary_trans_Atlantic_ridge.png"), width=550, height=400) + + # create an array similar to array.cor but with colors instead of corr.values: + trans.cols <- rev(c('#b2182b','#d6604d','#f4a582','#fddbc7','#d1e5f0','#92c5de','#4393c3','#2166ac')) + my.seq <- c(0,10,20,30,40,50,60,70,100) + + array.trans.sim4.colors <- array(trans.cols[8],c(12,7,4)) + array.trans.sim4.colors[array.trans.sim4 < my.seq[8]] <- trans.cols[7] + array.trans.sim4.colors[array.trans.sim4 < my.seq[7]] <- trans.cols[6] + array.trans.sim4.colors[array.trans.sim4 < my.seq[6]] <- trans.cols[5] + array.trans.sim4.colors[array.trans.sim4 < my.seq[5]] <- trans.cols[4] + array.trans.sim4.colors[array.trans.sim4 < my.seq[4]] <- trans.cols[3] + array.trans.sim4.colors[array.trans.sim4 < my.seq[3]] <- trans.cols[2] + array.trans.sim4.colors[array.trans.sim4 < my.seq[2]] <- trans.cols[1] + array.trans.sim4.colors[is.na(array.trans.sim4)] <- "white" + + par(mar=c(5,4,4,4.5)) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Forecast time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + title("% Transition from Atlantic ridge regime ", cex=1.5) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + + for(p in 1:12){ + for(l in 0:6){ + polygon(c(0.5+p-1, 0.5+p-1, 1+p-1), c(-0.5+l, 0.5+l, 0+l), col=array.trans.sim4.colors[p,1+l,1]) # first triangle + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(-0.5+l, 0+l, -0.5+l), col=array.trans.sim4.colors[p,1+l,2]) + polygon(c(1+p-1, 1.5+p-1, 1.5+p-1), c(l, 0.5+l, -0.5+l), col=array.trans.sim4.colors[p,1+l,3]) + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(0.5+l, 0+l, 0.5+l), col=array.trans.sim4.colors[p,1+l,4]) + } + } + + par(fig=c(0.875, 1, 0.14, 0.89), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + dev.off() + + png(filename=paste0(workdir,"/",forecast.name,"_summary_trans_Atlantic_ridge_4tables.png"), width=750, height=600) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("% Transition from Atlantic Ridge regime", cex=1.5) + + # NAO+ : + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.85, 0.98), new=TRUE) + mtext("NAO+", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.sim4.colors[p,1+l,1])}} + + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.42, 0.5), new=TRUE) + mtext("Blocking", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.sim4.colors[p,1+l,4])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.85, 0.98), new=TRUE) + mtext("NAO-", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.sim4.colors[p,1+l,3])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.42, 0.5), new=TRUE) + mtext("Atlantic Ridge", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.sim4.colors[p,1+l,2])}} + + par(fig=c(0.91, 1, 0.1, 0.9), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + png(filename=paste0(workdir,"/",forecast.name,"_summary_trans_Atlantic_ridge_1table.png"), width=600, height=300) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("% Transition from Atlantic Ridge regime", cex=1.2) + + par(mar=c(0,0,4,0), fig=c(0.1, 0.28, 0.8, 0.98), new=TRUE) + mtext("NAO+", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0, 0.28, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecast month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.sim4.colors[i2,1+6-j,1])}} + + par(mar=c(0,0,4,0), fig=c(0.28, 0.56, 0.8, 0.98), new=TRUE) + mtext("NAO-", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.28, 0.56, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecasted month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.sim4.colors[i2,1+6-j,3])}} + + par(mar=c(0,0,4,0), fig=c(0.56, 0.84, 0.8, 0.98), new=TRUE) + mtext("Blocking", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.56, 0.84, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecasted month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.sim4.colors[i2,1+6-j,4])}} + + par(fig=c(0.88, 1, 0.1, 0.8), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + + + + + + + + # Bias of the Transition probability from NAO+ to X summary: + png(filename=paste0(workdir,"/",forecast.name,"_summary_bias_trans_NAO+.png"), width=550, height=400) + + # create an array similar to array.cor but with colors instead of corr.values: + trans.cols <- rev(c('#b2182b','#d6604d','#f4a582','#fddbc7','#d1e5f0','#92c5de','#4393c3','#2166ac')) + my.seq <- c(-20,-10,-5,-2,0,2,5,10,20) + + array.trans.diff1.colors <- array(trans.cols[8],c(12,7,4)) + array.trans.diff1.colors[array.trans.diff1 < my.seq[8]] <- trans.cols[7] + array.trans.diff1.colors[array.trans.diff1 < my.seq[7]] <- trans.cols[6] + array.trans.diff1.colors[array.trans.diff1 < my.seq[6]] <- trans.cols[5] + array.trans.diff1.colors[array.trans.diff1 < my.seq[5]] <- trans.cols[4] + array.trans.diff1.colors[array.trans.diff1 < my.seq[4]] <- trans.cols[3] + array.trans.diff1.colors[array.trans.diff1 < my.seq[3]] <- trans.cols[2] + array.trans.diff1.colors[array.trans.diff1 < my.seq[2]] <- trans.cols[1] + array.trans.diff1.colors[is.na(array.trans.diff1)] <- "white" + + par(mar=c(5,4,4,4.5)) + plot(1,1, type="n", xaxt="n", yaxt="n", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + title("% Bias transition from NAO+ regime", cex=1.5) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + + for(p in 1:12){ + for(l in 0:6){ + polygon(c(0.5+p-1, 0.5+p-1, 1+p-1), c(-0.5+l, 0.5+l, 0+l), col=array.trans.diff1.colors[p,1+l,1]) # first triangle + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(-0.5+l, 0+l, -0.5+l), col=array.trans.diff1.colors[p,1+l,2]) + polygon(c(1+p-1, 1.5+p-1, 1.5+p-1), c(l, 0.5+l, -0.5+l), col=array.trans.diff1.colors[p,1+l,3]) + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(0.5+l, 0+l, 0.5+l), col=array.trans.diff1.colors[p,1+l,4]) + } + } + + par(fig=c(0.875, 1, 0.14, 0.89), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + dev.off() + + png(filename=paste0(workdir,"/",forecast.name,"_summary_bias_trans_NAO+_4tables.png"), width=750, height=600) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("% Bias transition from NAO+ regime", cex=1.5) + + # NAO+ : + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.85, 0.98), new=TRUE) + mtext("NAO+", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.diff1.colors[p,1+l,1])}} + + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.42, 0.5), new=TRUE) + mtext("Blocking", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.diff1.colors[p,1+l,4])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.85, 0.98), new=TRUE) + mtext("NAO-", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.diff1.colors[p,1+l,3])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.42, 0.5), new=TRUE) + mtext("Atlantic Ridge", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.diff1.colors[p,1+l,2])}} + + par(fig=c(0.91, 1, 0.1, 0.9), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + png(filename=paste0(workdir,"/",forecast.name,"_summary_bias_trans_NAO+_1table.png"), width=800, height=300) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("% Bias transition from NAO+ regime", cex=1.2) + + par(mar=c(0,0,4,0), fig=c(0.07, 0.22, 0.8, 0.98), new=TRUE) + mtext("NAO+", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0, 0.22, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecast month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.diff1.colors[i2,1+6-j,1])}} + + par(mar=c(0,0,4,0), fig=c(0.22, 0.44, 0.8, 0.98), new=TRUE) + mtext("NAO-", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.22, 0.44, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecasted month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.diff1.colors[i2,1+6-j,3])}} + + par(mar=c(0,0,4,0), fig=c(0.44, 0.66, 0.8, 0.98), new=TRUE) + mtext("Blocking", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.44, 0.66, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecasted month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.diff1.colors[i2,1+6-j,4])}} + + par(mar=c(0,0,4,0), fig=c(0.68, 0.88, 0.8, 0.98), new=TRUE) + mtext("Atlantic Ridge", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.66, 0.88, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecasted month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.diff1.colors[i2,1+6-j,2])}} + + par(fig=c(0.91, 1, 0.1, 0.8), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + + + + + + + + + + + + + + + # Bias of the Transition probability from NAO- to X summary: + png(filename=paste0(workdir,"/",forecast.name,"_summary_bias_trans_NAO-.png"), width=550, height=400) + + # create an array similar to array.cor but with colors instead of corr.values: + trans.cols <- rev(c('#b2182b','#d6604d','#f4a582','#fddbc7','#d1e5f0','#92c5de','#4393c3','#2166ac')) + my.seq <- c(-20,-10,-5,-2,0,2,5,10,20) + + array.trans.diff2.colors <- array(trans.cols[8],c(12,7,4)) + array.trans.diff2.colors[array.trans.diff2 < my.seq[8]] <- trans.cols[7] + array.trans.diff2.colors[array.trans.diff2 < my.seq[7]] <- trans.cols[6] + array.trans.diff2.colors[array.trans.diff2 < my.seq[6]] <- trans.cols[5] + array.trans.diff2.colors[array.trans.diff2 < my.seq[5]] <- trans.cols[4] + array.trans.diff2.colors[array.trans.diff2 < my.seq[4]] <- trans.cols[3] + array.trans.diff2.colors[array.trans.diff2 < my.seq[3]] <- trans.cols[2] + array.trans.diff2.colors[array.trans.diff2 < my.seq[2]] <- trans.cols[1] + array.trans.diff2.colors[is.na(array.trans.diff2)] <- "white" + + par(mar=c(5,4,4,4.5)) + plot(1,1, type="n", xaxt="n", yaxt="n", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + title("% Bias transition from NAO- regime", cex=1.5) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + + for(p in 1:12){ + for(l in 0:6){ + polygon(c(0.5+p-1, 0.5+p-1, 1+p-1), c(-0.5+l, 0.5+l, 0+l), col=array.trans.diff2.colors[p,1+l,1]) # first triangle + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(-0.5+l, 0+l, -0.5+l), col=array.trans.diff2.colors[p,1+l,2]) + polygon(c(1+p-1, 1.5+p-1, 1.5+p-1), c(l, 0.5+l, -0.5+l), col=array.trans.diff2.colors[p,1+l,3]) + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(0.5+l, 0+l, 0.5+l), col=array.trans.diff2.colors[p,1+l,4]) + } + } + + par(fig=c(0.875, 1, 0.14, 0.89), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + dev.off() + + png(filename=paste0(workdir,"/",forecast.name,"_summary_bias_trans_NAO-_4tables.png"), width=750, height=600) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("% Bias transition from NAO- regime", cex=1.5) + + # NAO+ : + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.85, 0.98), new=TRUE) + mtext("NAO+", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.diff2.colors[p,1+l,1])}} + + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.42, 0.5), new=TRUE) + mtext("Blocking", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.diff2.colors[p,1+l,4])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.85, 0.98), new=TRUE) + mtext("NAO-", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.diff2.colors[p,1+l,3])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.42, 0.5), new=TRUE) + mtext("Atlantic Ridge", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.diff2.colors[p,1+l,2])}} + + par(fig=c(0.91, 1, 0.1, 0.9), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + png(filename=paste0(workdir,"/",forecast.name,"_summary_bias_trans_NAO-_1table.png"), width=800, height=300) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("% Bias transition from NAO- regime", cex=1.2) + + par(mar=c(0,0,4,0), fig=c(0.07, 0.22, 0.8, 0.98), new=TRUE) + mtext("NAO+", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0, 0.22, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecast month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.diff2.colors[i2,1+6-j,1])}} + + par(mar=c(0,0,4,0), fig=c(0.22, 0.44, 0.8, 0.98), new=TRUE) + mtext("NAO-", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.22, 0.44, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecasted month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.diff2.colors[i2,1+6-j,3])}} + + par(mar=c(0,0,4,0), fig=c(0.44, 0.66, 0.8, 0.98), new=TRUE) + mtext("Blocking", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.44, 0.66, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecasted month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.diff2.colors[i2,1+6-j,4])}} + + par(mar=c(0,0,4,0), fig=c(0.68, 0.88, 0.8, 0.98), new=TRUE) + mtext("Atlantic Ridge", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.66, 0.88, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecasted month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.diff2.colors[i2,1+6-j,2])}} + + par(fig=c(0.91, 1, 0.1, 0.8), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + + + + + + + + + + + # Bias of the Transition probability from blocking to X summary: + png(filename=paste0(workdir,"/",forecast.name,"_summary_bias_trans_blocking.png"), width=550, height=400) + + # create an array similar to array.cor but with colors instead of corr.values: + trans.cols <- rev(c('#b2182b','#d6604d','#f4a582','#fddbc7','#d1e5f0','#92c5de','#4393c3','#2166ac')) + my.seq <- c(-20,-10,-5,-2,0,2,5,10,20) + + array.trans.diff3.colors <- array(trans.cols[8],c(12,7,4)) + array.trans.diff3.colors[array.trans.diff3 < my.seq[8]] <- trans.cols[7] + array.trans.diff3.colors[array.trans.diff3 < my.seq[7]] <- trans.cols[6] + array.trans.diff3.colors[array.trans.diff3 < my.seq[6]] <- trans.cols[5] + array.trans.diff3.colors[array.trans.diff3 < my.seq[5]] <- trans.cols[4] + array.trans.diff3.colors[array.trans.diff3 < my.seq[4]] <- trans.cols[3] + array.trans.diff3.colors[array.trans.diff3 < my.seq[3]] <- trans.cols[2] + array.trans.diff3.colors[array.trans.diff3 < my.seq[2]] <- trans.cols[1] + array.trans.diff3.colors[is.na(array.trans.diff3)] <- "white" + + par(mar=c(5,4,4,4.5)) + plot(1,1, type="n", xaxt="n", yaxt="n", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + title("% Bias transition from blocking regime", cex=1.5) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + + for(p in 1:12){ + for(l in 0:6){ + polygon(c(0.5+p-1, 0.5+p-1, 1+p-1), c(-0.5+l, 0.5+l, 0+l), col=array.trans.diff3.colors[p,1+l,1]) # first triangle + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(-0.5+l, 0+l, -0.5+l), col=array.trans.diff3.colors[p,1+l,2]) + polygon(c(1+p-1, 1.5+p-1, 1.5+p-1), c(l, 0.5+l, -0.5+l), col=array.trans.diff3.colors[p,1+l,3]) + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(0.5+l, 0+l, 0.5+l), col=array.trans.diff3.colors[p,1+l,4]) + } + } + + par(fig=c(0.875, 1, 0.14, 0.89), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + dev.off() + + png(filename=paste0(workdir,"/",forecast.name,"_summary_bias_trans_blocking_4tables.png"), width=750, height=600) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("% Bias transition from blocking regime", cex=1.5) + + # NAO+ : + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.85, 0.98), new=TRUE) + mtext("NAO+", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.diff3.colors[p,1+l,1])}} + + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.42, 0.5), new=TRUE) + mtext("Blocking", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.diff3.colors[p,1+l,4])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.85, 0.98), new=TRUE) + mtext("NAO-", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.diff3.colors[p,1+l,3])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.42, 0.5), new=TRUE) + mtext("Atlantic Ridge", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.diff3.colors[p,1+l,2])}} + + par(fig=c(0.91, 1, 0.1, 0.9), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + dev.off() + + png(filename=paste0(workdir,"/",forecast.name,"_summary_bias_trans_blocking_1table.png"), width=800, height=300) + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("% Bias transition from Blocking regime", cex=1.2) + + par(mar=c(0,0,4,0), fig=c(0.07, 0.22, 0.8, 0.98), new=TRUE) + mtext("NAO+", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0, 0.22, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecast month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.diff3.colors[i2,1+6-j,1])}} + + par(mar=c(0,0,4,0), fig=c(0.22, 0.44, 0.8, 0.98), new=TRUE) + mtext("NAO-", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.22, 0.44, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecasted month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.diff3.colors[i2,1+6-j,3])}} + + par(mar=c(0,0,4,0), fig=c(0.44, 0.66, 0.8, 0.98), new=TRUE) + mtext("Blocking", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.44, 0.66, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecasted month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.diff3.colors[i2,1+6-j,4])}} + + par(mar=c(0,0,4,0), fig=c(0.68, 0.88, 0.8, 0.98), new=TRUE) + mtext("Atlantic Ridge", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.66, 0.88, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecasted month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.diff3.colors[i2,1+6-j,2])}} + + par(fig=c(0.91, 1, 0.1, 0.8), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + + + + + + + + + + # Bias of the Transition probability from Atlantic Ridge to X summary: + png(filename=paste0(workdir,"/",forecast.name,"_summary_bias_trans_Atlantic_ridge.png"), width=550, height=400) + + # create an array similar to array.cor but with colors instead of corr.values: + trans.cols <- rev(c('#b2182b','#d6604d','#f4a582','#fddbc7','#d1e5f0','#92c5de','#4393c3','#2166ac')) + my.seq <- c(-20,-10,-5,-2,0,2,5,10,20) + + array.trans.diff4.colors <- array(trans.cols[8],c(12,7,4)) + array.trans.diff4.colors[array.trans.diff4 < my.seq[8]] <- trans.cols[7] + array.trans.diff4.colors[array.trans.diff4 < my.seq[7]] <- trans.cols[6] + array.trans.diff4.colors[array.trans.diff4 < my.seq[6]] <- trans.cols[5] + array.trans.diff4.colors[array.trans.diff4 < my.seq[5]] <- trans.cols[4] + array.trans.diff4.colors[array.trans.diff4 < my.seq[4]] <- trans.cols[3] + array.trans.diff4.colors[array.trans.diff4 < my.seq[3]] <- trans.cols[2] + array.trans.diff4.colors[array.trans.diff4 < my.seq[2]] <- trans.cols[1] + array.trans.diff4.colors[is.na(array.trans.diff4)] <- "white" + + par(mar=c(5,4,4,4.5)) + plot(1,1, type="n", xaxt="n", yaxt="n", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + title("% Bias transition from Atlantic Ridge regime", cex=1.5) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + + for(p in 1:12){ + for(l in 0:6){ + polygon(c(0.5+p-1, 0.5+p-1, 1+p-1), c(-0.5+l, 0.5+l, 0+l), col=array.trans.diff4.colors[p,1+l,1]) # first triangle + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(-0.5+l, 0+l, -0.5+l), col=array.trans.diff4.colors[p,1+l,2]) + polygon(c(1+p-1, 1.5+p-1, 1.5+p-1), c(l, 0.5+l, -0.5+l), col=array.trans.diff4.colors[p,1+l,3]) + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(0.5+l, 0+l, 0.5+l), col=array.trans.diff4.colors[p,1+l,4]) + } + } + + par(fig=c(0.875, 1, 0.14, 0.89), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + dev.off() + + png(filename=paste0(workdir,"/",forecast.name,"_summary_bias_trans_Atlantic_ridge_4tables.png"), width=750, height=600) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("% Bias transition from Atlantic_Ridge_regime", cex=1.5) + + # NAO+ : + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.85, 0.98), new=TRUE) + mtext("NAO+", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.diff4.colors[p,1+l,1])}} + + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.42, 0.5), new=TRUE) + mtext("Blocking", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.diff4.colors[p,1+l,4])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.85, 0.98), new=TRUE) + mtext("NAO-", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.diff4.colors[p,1+l,3])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.42, 0.5), new=TRUE) + mtext("Atlantic Ridge", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.diff4.colors[p,1+l,2])}} + + par(fig=c(0.91, 1, 0.1, 0.9), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + png(filename=paste0(workdir,"/",forecast.name,"_summary_bias_trans_Atlantic_ridge_1table.png"), width=800, height=300) + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("% Bias transition from Atlantic Ridge regime", cex=1.2) + + par(mar=c(0,0,4,0), fig=c(0.07, 0.22, 0.8, 0.98), new=TRUE) + mtext("NAO+", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0, 0.22, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecast month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.diff4.colors[i2,1+6-j,1])}} + + par(mar=c(0,0,4,0), fig=c(0.22, 0.44, 0.8, 0.98), new=TRUE) + mtext("NAO-", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.22, 0.44, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecasted month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.diff4.colors[i2,1+6-j,3])}} + + par(mar=c(0,0,4,0), fig=c(0.44, 0.66, 0.8, 0.98), new=TRUE) + mtext("Blocking", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.44, 0.66, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecasted month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.diff4.colors[i2,1+6-j,4])}} + + par(mar=c(0,0,4,0), fig=c(0.68, 0.88, 0.8, 0.98), new=TRUE) + mtext("Atlantic Ridge", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.66, 0.88, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecasted month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.diff4.colors[i2,1+6-j,2])}} + + par(fig=c(0.91, 1, 0.1, 0.8), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + +diagnostic.WR <- function(text=NULL, position=c(0,1,0,1), color.array){ + +} + + ## # FairRPSS summary: + ## png(filename=paste0(workdir,"/",forecast.name,"_summary_rpss.png"), width=550, height=400) + + ## # create an array similar to array.diff.freq but with colors instead of frequencies: + ## freq.cols <- rev(c('#b2182b','#d6604d','#f4a582','#fddbc7','#d1e5f0','#92c5de','#4393c3','#2166ac')) + + ## array.freq.colors <- array(freq.cols[8],c(12,7,4)) + ## array.freq.colors[array.diff.freq < 10] <- freq.cols[7] + ## array.freq.colors[array.diff.freq < 5] <- freq.cols[6] + ## array.freq.colors[array.diff.freq < 2] <- freq.cols[5] + ## array.freq.colors[array.diff.freq < 0] <- freq.cols[4] + ## array.freq.colors[array.diff.freq < -2] <- freq.cols[3] + ## array.freq.colors[array.diff.freq < -5] <- freq.cols[2] + ## array.freq.colors[array.diff.freq < -10] <- freq.cols[1] + + ## par(mar=c(5,4,4,4.5)) + ## plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Forecast time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + ## title("Frequency difference (%) between S4 and ERA-Interim", cex=1.5) + ## mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + ## mtext(side = 2, text = "Forecast time", line = 2.5, cex=1.5) + ## axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + ## axis(2, at=seq(0,6), las=2, cex.axis=1.5) + + ## for(p in 1:12){ + ## for(l in 0:6){ + ## polygon(c(0.5+p-1,0.5+p-1,1+p-1),c(-0.5+l,0.5+l,0+l), col=array.freq.colors[p,1+l,1]) # first triangle + ## polygon(c(0.5+p-1,1+p-1,1.5+p-1),c(-0.5+l,0+l,-0.5+l), col=array.freq.colors[p,1+l,2]) + ## polygon(c(1+p-1,1.5+p-1,1.5+p-1),c(l,0.5+l,-0.5+l), col=array.freq.colors[p,1+l,3]) + ## polygon(c(0.5+p-1,1+p-1,1.5+p-1),c(0.5+l,0+l,0.5+l), col=array.freq.colors[p,1+l,4]) + ## } + ## } + + ## par(fig=c(0.875, 1, 0.14, 0.89), new=TRUE) + ## ColorBar2(brks = c(-20,-10,-5,-2,0,2,5,10,20), cols = freq.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(c(-20,-10,-5,-2,0,2,5,10,20)), my.labels=c(-20,-10,-5,-2,0,2,5,10,20)) + ## dev.off() + +} # close if on composition == "summary" + + + + +# impact map of the stronger WR: +if(composition == "impact" && fields.name == rean.name){ + + var.num <- 2 # choose a variable (1:sfcWind, 2:tas) + + png(filename=paste0(workdir,"/",rean.name,"_",var.name[var.num],"_most_influent_negative.png"), width=550, height=650) + + plot.new() + #par(mfrow=c(4,3)) + #col.regimes <- c('#7b3294','#c2a5cf','#a6dba0','#008837') + col.regimes <- c('#e66101','#fdb863','#b2abd2','#5e3c99') + + for(p in 13:16){ # or 1:12 for monthly maps + load(file=paste0(rean.dir,"/",rean.name,"_",my.period[p],"_",var.name[var.num],"_psl.RData")) + load(paste0(rean.dir,"/",rean.name,"_ClusterNames.RData")) # they should not depend on var.name!!! + + # position of long values of Europe only (without the Atlantic Sea and America): + #if(fields.name == "ERA-Interim") EU <- c(1:which(lon >= lon.max)[1], (length(lon)-30):length(lon)) # restrict area to continental europe only + lon.max = 45 + EU <- c(1:which(lon >= lon.max)[1], (which(lon >= 337)[1]:length(lon))) # restrict area to continental europe only + + cluster1 <- which(orden == cluster1.name.period[p]) # in future it will be == cluster1.name + cluster2 <- which(orden == cluster2.name.period[p]) + cluster3 <- which(orden == cluster3.name.period[p]) + cluster4 <- which(orden == cluster4.name.period[p]) + + assign(paste0("imp",cluster1), varPeriodAnom1mean) + assign(paste0("imp",cluster2), varPeriodAnom2mean) + assign(paste0("imp",cluster3), varPeriodAnom3mean) + assign(paste0("imp",cluster4), varPeriodAnom4mean) + + ## imp.all <- imp1 + ## for(i in 1:dim(imp1)[1]){ + ## for(j in 1:dim(imp1)[2]){ + ## if(abs(imp1[i,j]) >= max(abs(imp1[i,j]), abs(imp2[i,j]), abs(imp3[i,j]), abs(imp4[i,j]))) imp.all[i,j] <- 1.5 + ## if(abs(imp2[i,j]) >= max(abs(imp1[i,j]), abs(imp2[i,j]), abs(imp3[i,j]), abs(imp4[i,j]))) imp.all[i,j] <- 2.5 + ## if(abs(imp3[i,j]) >= max(abs(imp1[i,j]), abs(imp2[i,j]), abs(imp3[i,j]), abs(imp4[i,j]))) imp.all[i,j] <- 3.5 + ## if(abs(imp4[i,j]) >= max(abs(imp1[i,j]), abs(imp2[i,j]), abs(imp3[i,j]), abs(imp4[i,j]))) imp.all[i,j] <- 4.5 + ## } + ## } + + ## #most influent WT with positive impact: + ## imp.all <- imp1 + ## for(i in 1:dim(imp1)[1]){ + ## for(j in 1:dim(imp1)[2]){ + ## if((imp1[i,j]) >= max((imp1[i,j]), (imp2[i,j]), (imp3[i,j]), (imp4[i,j]))) imp.all[i,j] <- 1.5 + ## if((imp2[i,j]) >= max((imp1[i,j]), (imp2[i,j]), (imp3[i,j]), (imp4[i,j]))) imp.all[i,j] <- 2.5 + ## if((imp3[i,j]) >= max((imp1[i,j]), (imp2[i,j]), (imp3[i,j]), (imp4[i,j]))) imp.all[i,j] <- 3.5 + ## if((imp4[i,j]) >= max((imp1[i,j]), (imp2[i,j]), (imp3[i,j]), (imp4[i,j]))) imp.all[i,j] <- 4.5 + ## } + ## } + + # most influent WT with negative impact: + imp.all <- imp1 + for(i in 1:dim(imp1)[1]){ + for(j in 1:dim(imp1)[2]){ + if((imp1[i,j]) <= min((imp1[i,j]), (imp2[i,j]), (imp3[i,j]), (imp4[i,j]))) imp.all[i,j] <- 1.5 + if((imp2[i,j]) <= min((imp1[i,j]), (imp2[i,j]), (imp3[i,j]), (imp4[i,j]))) imp.all[i,j] <- 2.5 + if((imp3[i,j]) <= min((imp1[i,j]), (imp2[i,j]), (imp3[i,j]), (imp4[i,j]))) imp.all[i,j] <- 3.5 + if((imp4[i,j]) <= min((imp1[i,j]), (imp2[i,j]), (imp3[i,j]), (imp4[i,j]))) imp.all[i,j] <- 4.5 + } + } + + if(p<=3) yMod <- 0.7 + if(p>=4 && p<=6) yMod <- 0.5 + if(p>=7 && p<=9) yMod <- 0.3 + if(p>=10) yMod <- 0.1 + if(p==13 || p==14) yMod <- 0.52 + if(p==15 || p==16) yMod <- 0.1 + + if(p==1 || p==4 || p==7 || p==10) xMod <- 0.05 + if(p==2 || p==5 || p==8 || p==11) xMod <- 0.35 + if(p==3 || p==6 || p==9 || p==12) xMod <- 0.65 + if(p==13 || p==15) xMod <- 0.05 + if(p==14 || p==16) xMod <- 0.50 + + # impact map: + if(p <= 12) par(fig=c(xMod, xMod + 0.3, yMod, yMod + 0.17), new=TRUE) + if(p > 12) par(fig=c(xMod, xMod + 0.45, yMod, yMod + 0.38), new=TRUE) + PlotEquiMap(imp.all[,EU], lon[EU], lat, filled.continents = FALSE, brks=1:5, cols=col.regimes, axelab=F, drawleg=F) + + # title map: + if(p <= 12) par(fig=c(xMod, xMod + 0.3, yMod + 0.162, yMod + 0.172), new=TRUE) + if(p > 12) par(fig=c(xMod + 0.07, xMod + 0.07 + 0.3, yMod +0.21 + 0.166, yMod +0.21 + 0.176), new=TRUE) + mtext(my.period[p], font=2, cex=1.5) + + } # close 'p' on 'period' + + par(fig=c(0.1,0.9, 0.03, 0.11), new=TRUE) + ColorBar2(1:5, cols=col.regimes, vert=FALSE, my.ticks=1:4, draw.ticks=FALSE, my.labels=orden) + + par(fig=c(0.1,0.9, 0.92, 0.97), new=TRUE) + mtext(paste0("Most influent WR on ",var.name[var.num]), font=2, cex=1.5) + + dev.off() + +} # close if on composition == "impact" + + + + + + + + + + + + + + + + + + diff --git a/old/weather_regimes_maps_v17.R b/old/weather_regimes_maps_v17.R new file mode 100644 index 0000000000000000000000000000000000000000..732f31f6831764cc6e2c55ce15b3bddaf0093d5a --- /dev/null +++ b/old/weather_regimes_maps_v17.R @@ -0,0 +1,3603 @@ + +# Creation: 6/2016 +# Author: Nicola Cortesi +# +# Aim: to visualize the regime anomalies of the weather regimes, their interannual frequencies and their impact on a chosen cliamte variable (i.e: wind speed or temperature) +# +# I/O: the only input file needed is the output of the weather_regimes.R script, which ends with the suffix "_mapdata.RData". +# Output files are the maps, with the user can generate as .png or as .pdf +# +# Assumptions: If your regimes derive from a reanalysis, this script must be run twice: once, with the option 'ordering = F' and 'save.names = T' to create a .pdf or .png +# file that the user must open to find visually the order by which the four regimes are stored inside the four clusters. For example, he can find that the +# sequence of the images in the file (from top to down) corresponds to: Blocking / NAO- / Atl.Ridge / NAO+ regime. Subsequently, he has to change 'ordering' +# from F to T and set the four variables 'clusterX.name' (X=1..4) to follow the same order found inside that file. +# The second time, you have to run this script only to get the maps in the right order, which by default is, from top to down: +# "NAO+","NAO-","Blocking" and "Atl.Ridge" (you can change it with the variable 'orden'). You also have to set 'save.names' to TRUE, so an .RData file +# is written to store the order of the four regimes, in case in future a user needs to visualize these maps again. In this case, the user must set +# 'ordering = TRUE' and 'save.names = FALSE' to be able to visualize the maps in the correct order. +# +# If your regimes derive from a forecast system, you have to compute before the regimes derived from a reanalysis. Then, you can associate the reanalysis +# to the forecast system, to employ it to automatically aussociate to each simulated cluster one of the +# observed regimes, the one with the highest spatial correlation. Beware that the two maps of regime anomalies must have the same spatial resolution! +# +# Branch: weather_regimes + +rm(list=ls()) +library(s2dverification) # for the function Load() +library(Kendall) # for the MannKendall test + +source('/home/Earth/ncortesi/Downloads/scripts/Rfunctions.R') # for the calendar functions + +#set the subdir of 'workdir' where the weather regimes computed with a reanalysis are stored (xxxxx_psl.RData files): +#rean.dir <- "/scratch/Earth/ncortesi/RESILIENCE/Regimes/41_ERA-Interim_monthly_1981-2015_LOESS_filter" +#rean.dir <- "/scratch/Earth/ncortesi/RESILIENCE/Regimes/18) as 12) but with no lat correction" +#rean.dir <- "/scratch/Earth/ncortesi/RESILIENCE/Regimes/43_as_41_but_with_running_cluster" +rean.dir <- "/scratch/Earth/ncortesi/RESILIENCE/Regimes/44_as_43_but_with_no_lat_correction" + +rean.name <- "ERA-Interim" # reanalysis name (if input data comes from a reanalysis) +forecast.name <- "ECMWF-S4" # forecast system name (if input data comes from a forecast system) + +fields.name <- rean.name #forecast.name # Choose between loading reanalysis ('rean.name') or forecasts ('forecast.name') + +var.num <- 1 # if fields.name ='rean.name' and composition ='simple', Choose a variable for the impact maps 1: sfcWind 2: tas + +period <- 12 # Choose one or more periods to plot from 1 to 17 (1-12: Jan - Dec, 13-16: Winter, Spring, Summer, Autumn, 17: Year). + # You need to have created before the '_psl.RData' file with the output of 'weather_regimes_vXX'.R for that period. + +# run it twice: once, with 'ordering <- FALSE', 'save.names <- TRUE' and 'as.pdf=T' to look at the cartography and find visually the right regime names of the four clusters; +# then, insert the regime names in the correct order below and run this script a second time one month at time with 'ordering = TRUE' and 'save.names = TRUE', to save the +# ordered cartography. After that, any time you run this script, set 'ordering = TRUE and 'save.names = FALSE', not to overwrite the files which stores the cluster order: +ordering <- T # If TRUE, order the clusters following the regimes listed in the 'orden' vector (set it to TRUE only after you manually inserted the names below). +save.names <- F # if TRUE, also save the cluster names (i.e: the association between clusters and weather regimes); if FALSE, the cluster names are loaded from a file +as.pdf <- F # FALSE: save results as one .png file for each season/month, TRUE: save only one .pdf file with all seasons/months + +composition <- "taylor" # choose which kind of composition you want to plot: + # - 'simple' for monthly graphs of regime anomalies / impact / interannual frequencies in three columns + # - 'psl' for all the regime anomalies for a fixed forecast month + # - 'fre' for all the interannual frequencies for a fixed forecast month + # - 'summary' for the summary plots of all forecast months (persistence bias, freq.bias, correlations, etc.) [You need all ClusterNames files] + # - 'impact' for all the impact plot of the regime with the highest impact + # - 'none' doesn't plot anything, it only saves the ClusterName.Rdata files + # - 'taylor' plot the taylor's diagram between the regime anomalies of a monthly reanalaysis and a seasonal one + +# Associates the regimes to the four cluster: +cluster4.name <- "NAO+" +cluster2.name <- "NAO-" +cluster3.name <- "Blocking" +cluster1.name <- "Atl.Ridge" + +# choose an order to give to the cartograpy, from top to bottom: +orden <- c("NAO+","NAO-","Blocking","Atl.Ridge") + +# if fields.name='forecast.name', you have to specify these additional variables: + +# working dir with the input files with '_psl.RData' suffix: +#work.dir <- "/scratch/Earth/ncortesi/RESILIENCE/Regimes/42_ECMWF_S4_monthly_1981-2015_LOESS_filter" +#work.dir <- "/scratch/Earth/ncortesi/RESILIENCE/Regimes/18) as 12) but with no lat correction" +work.dir <- "/scratch/Earth/ncortesi/RESILIENCE/Regimes/43_as_41_but_with_running_cluster" + +lead.months <- 0:6 # in case you selected 'fields.name = forecast.name', specify a leadtime (0 = same month of the start month) to plot + # (and variable 'period' becomes the start month) +forecast.month <- 1 # in case composition = 'psl' or 'fre' or 'impact', choose a target month to forecast, from 1 to 12, to plot its composition + +################################################################################################################################################################### + +if(fields.name != rean.name && fields.name != forecast.name) stop("variable 'fields.name' is not properly set") +regime1.name <- orden[1] +regime2.name <- orden[2] +regime3.name <- orden[3] +regime4.name <- orden[4] + +var.name <- c("sfcWind","tas") +var.name.full <- c("Wind Speed","Temperature") # full name of the 'predictand' variable to put in the title of the graphs +var.unit <- c("m/s", "ºC") # unit of measure of var (for drawing color scales) +var.num.orig <- var.num # loading _psl files, this value can change! +fields.name.orig <- fields.name +period.orig <- period +work.dir.orig <- work.dir +pdf.suffix <- ifelse(length(period)==1, my.period[period], "all_periods") +n.map <- 0 + +if(composition != "summary" && composition != "impact" $$ composition != "taylor"){ + + if(composition == "psl" || composition == "fre") { + if(as.pdf && fields.name == forecast.name) pdf(file=paste0(work.dir,"/",fields.name,"_",pdf.suffix,"_forecast_month_",forecast.month,"_",composition,".pdf"),width=110,height=60) + if(!as.pdf && fields.name == forecast.name) png(filename=paste0(work.dir,"/",fields.name,"_forecast_month_",forecast.month,"_",composition,".png"),width=6000,height=2300) + + lead.months <- c(6:0,0) + plot.new() + + # Sheet title: + par(fig=c(0, 1, 0.94, 0.98), new=TRUE) + if(composition == "psl") mtext(paste0(fields.name, " simulated Weather Regimes for ", month.name[forecast.month]), cex=5.5, font=2) + if(composition == "fre") mtext(paste0(fields.name, " simulated interannual frequencies for ", month.name[forecast.month]), cex=5.5, font=2) + } + + if(fields.name == rean.name) lead.months <- 1 # just to be able to enter the loop below once! + + for(lead.month in lead.months){ + #lead.month=lead.months[1] # for the debug + + if(composition == "simple"){ + if(as.pdf && fields.name == rean.name) pdf(file=paste0(rean.dir,"/",fields.name,"_",var.name[var.num],"_",pdf.suffix,".pdf"),width=40, height=60) + if(as.pdf && fields.name == forecast.name) pdf(file=paste0(work.dir,"/",fields.name,"_",var.name[var.num],"_",pdf.suffix,"_leadmonth_",lead.month,".pdf"),width=40,height=60) + } + + if(composition == 'psl' || composition == 'fre') { + period <- forecast.month - lead.month + if(period < 1) period <- period + 12 + } + + for(p in period){ + #p <- period[1] # for the debug + p.orig <- p + + # load regime data (for forecasts, it load only the data corresponding to the chosen start month p and lead month 'lead.month':: + if(fields.name == rean.name || (fields.name == forecast.name && lead.month == 0 || n.map == 7)) { + load(file=paste0(rean.dir,"/",rean.name,"_",my.period[p],"_","psl",".RData")) + if(var.num != var.num.orig) var.num <- var.num.orig # ripristinate original values of this script (necessary after loading regime data) + if(fields.name != fields.name.orig) fields.name <- fields.name.orig # ripristinate original values of this script + if(work.dir != work.dir.orig) work.dir <- work.dir.orig # ripristinate original values of this script + + # load impact data only if it is available, if not it will leave an empty space in the composition: + if(file.exists(paste0(rean.dir,"/",rean.name,"_",my.period[p],"_",var.name[var.num],".RData"))){ + load(file=paste0(rean.dir,"/",rean.name,"_",my.period[p],"_",var.name[var.num],".RData")) + } else { + print("Impact data not available; 'simple' compositions will have an empty space in the middle column") + } + } + + if(fields.name == forecast.name && n.map < 7){ + load(file=paste0(work.dir,"/",fields.name,"_",my.period[p],"_leadmonth_",lead.month,"_","psl",".RData")) # load cluster time series and mean psl data + if(composition == "impact") load(file=paste0(work.dir,"/",fields.name,"_",my.period[p],"_leadmonth_",lead.month,"_",var.name[var.num],".RData")) # load mean var data # for impact maps + + #if(var.num != var.num.orig) var.num <- var.num.orig # ripristinate original values of this script (necessary after loading regime data) + #if(fields.name != fields.name.orig) fields.name <- fields.name.orig # ripristinate original values of this script + + } + + if(var.num != var.num.orig) var.num <- var.num.orig # ripristinate original values of this script (necessary after loading regime data) + if(fields.name != fields.name.orig) fields.name <- fields.name.orig # ripristinate original values of this script + if(work.dir != work.dir.orig) work.dir <- work.dir.orig # ripristinate original values of this script + if(p != p.orig) p <- p.orig + + if(fields.name == forecast.name && n.map < 7){ + + # compute the simulated interannual monthly variability: + n.days.month <- dim(my.cluster.array[[p]])[1] # number of days in the forecasted month + + freq.sim1 <- apply(my.cluster.array[[p]] == 1, c(2,3), sum) + freq.sim2 <- apply(my.cluster.array[[p]] == 2, c(2,3), sum) + freq.sim3 <- apply(my.cluster.array[[p]] == 3, c(2,3), sum) + freq.sim4 <- apply(my.cluster.array[[p]] == 4, c(2,3), sum) + + sd.freq.sim1 <- sd(freq.sim1) / n.days.month + sd.freq.sim2 <- sd(freq.sim2) / n.days.month + sd.freq.sim3 <- sd(freq.sim3) / n.days.month + sd.freq.sim4 <- sd(freq.sim4) / n.days.month + + # compute the transition probabilities: + j1 <- j2 <- j3 <- j4 <- 1; transition1 <- transition2 <- transition3 <- transition4 <- c() + + n.members <- dim(my.cluster.array[[p]])[3] + + for(m in 1:n.members){ + for(y in 1:n.years){ + for (d in 1:(n.days.month-1)){ + + if (my.cluster.array[[p]][d,y,m] == 1 && (my.cluster.array[[p]][d + 1,y,m] != my.cluster.array[[p]][d,y,m])){ + transition1[j1] <- my.cluster.array[[p]][d + 1,y,m] + j1 <- j1 + 1 + } + if (my.cluster.array[[p]][d,y,m] == 2 && (my.cluster.array[[p]][d + 1,y,m] != my.cluster.array[[p]][d,y,m])){ + transition2[j2] <- my.cluster.array[[p]][d + 1,y,m] + j2 <- j2 + 1 + } + if (my.cluster.array[[p]][d,y,m] == 3 && (my.cluster.array[[p]][d + 1,y,m] != my.cluster.array[[p]][d,y,m])){ + transition3[j3] <- my.cluster.array[[p]][d + 1,y,m] + j3 <- j3 +1 + } + if (my.cluster.array[[p]][d,y,m] == 4 && (my.cluster.array[[p]][d + 1,y,m] != my.cluster.array[[p]][d,y,m])){ + transition4[j4] <- my.cluster.array[[p]][d + 1,y,m] + j4 <- j4 +1 + } + + } + } + } + + trans.sim <- matrix(NA,4,4 , dimnames= list(c("cluster1.sim", "cluster2.sim", "cluster3.sim", "cluster4.sim"),c("cluster1.sim","cluster2.sim","cluster3.sim","cluster4.sim"))) + trans.sim[1,2] <- length(which(transition1 == 2)) + trans.sim[1,3] <- length(which(transition1 == 3)) + trans.sim[1,4] <- length(which(transition1 == 4)) + + trans.sim[2,1] <- length(which(transition2 == 1)) + trans.sim[2,3] <- length(which(transition2 == 3)) + trans.sim[2,4] <- length(which(transition2 == 4)) + + trans.sim[3,1] <- length(which(transition3 == 1)) + trans.sim[3,2] <- length(which(transition3 == 2)) + trans.sim[3,4] <- length(which(transition3 == 4)) + + trans.sim[4,1] <- length(which(transition4 == 1)) + trans.sim[4,2] <- length(which(transition4 == 2)) + trans.sim[4,3] <- length(which(transition4 == 3)) + + trans.tot <- apply(trans.sim,1,sum,na.rm=T) + for(i in 1:4) trans.sim[i,] <- trans.sim[i,]/trans.tot[i] + + + # compute cluster persistence: + my.cluster.vector <- as.vector(my.cluster.array[[p]]) # reduce it to a vector with the order: day1month1member1 , day2month1member1, ... , day1month2member1, day2month2member1, ,,, + + sequ1 <- sequ2 <- sequ3 <- sequ4 <- rep(0,10000) + i1 <- i2 <- i3 <- i4 <- 1 + + if(my.cluster.vector[1] != 1) i1 <- 0 + if(my.cluster.vector[1] == 1) sequ1[i1] <- sequ1[i1]+1 + if(my.cluster.vector[1] != 2) i2 <- 0 + if(my.cluster.vector[1] == 2) sequ2[i2] <- sequ2[i2]+1 + if(my.cluster.vector[1] != 3) i3 <- 0 + if(my.cluster.vector[1] == 3) sequ3[i3] <- sequ3[i3]+1 + if(my.cluster.vector[1] != 4) i4 <- 0 + if(my.cluster.vector[1] == 4) sequ4[i4] <- sequ4[i4]+1 + n.dd <- length(my.cluster.vector) + + for(d in 1:(n.dd-1)){ + if(my.cluster.vector[d] != 1 && my.cluster.vector[d+1] == 1) i1 <- i1+1 + if(my.cluster.vector[d] == 1) sequ1[i1] <- sequ1[i1]+1 + + if(my.cluster.vector[d] != 2 && my.cluster.vector[d+1] == 2) i2 <- i2+1 + if(my.cluster.vector[d] == 2) sequ2[i2] <- sequ2[i2]+1 + + if(my.cluster.vector[d] != 3 && my.cluster.vector[d+1] == 3) i3 <- i3+1 + if(my.cluster.vector[d] == 3) sequ3[i3] <- sequ3[i3]+1 + + if(my.cluster.vector[d] != 4 && my.cluster.vector[d+1] == 4) i4 <- i4+1 + if(my.cluster.vector[d] == 4) sequ4[i4] <- sequ4[i4]+1 + } + + if(my.cluster.vector[n.dd] == 1) sequ1[i1] <- sequ1[i1]+1 + if(my.cluster.vector[n.dd] == 2) sequ2[i2] <- sequ2[i2]+1 + if(my.cluster.vector[n.dd] == 3) sequ3[i3] <- sequ3[i3]+1 + if(my.cluster.vector[n.dd] == 4) sequ4[i4] <- sequ4[i4]+1 + + pers1 <- mean(sequ1[which(sequ1 != 0)]) + pers2 <- mean(sequ2[which(sequ2 != 0)]) + pers3 <- mean(sequ3[which(sequ3 != 0)]) + pers4 <- mean(sequ4[which(sequ4 != 0)]) + + # compute correlations between simulated clusters and observed regime's anomalies: + cluster1.sim <- pslwr1mean; cluster2.sim <- pslwr2mean; cluster3.sim <- pslwr3mean; cluster4.sim <- pslwr4mean + lat.forecast <- lat; lon.forecast <- lon + + if((p + lead.month) > 12) { load.month <- p + lead.month - 12 } else { load.month <- p + lead.month } # reanalysis forecast month to load + load(file=paste0(rean.dir,"/",rean.name,"_",my.period[load.month],"_","psl",".RData")) # load reanalysis data, which overwrities the pslwrXmean variables + load(paste0(rean.dir,"/",rean.name,"_", my.period[load.month],"_","ClusterNames",".RData")) # load also reanalysis regime names + + if(var.num != var.num.orig) var.num <- var.num.orig # ripristinate original values of this script + if(fields.name != fields.name.orig) fields.name <- fields.name.orig # ripristinate original values of this script + if(!identical(period.orig,period)) period <- period.orig + if(work.dir != work.dir.orig) work.dir <- work.dir.orig # ripristinate original values of this script + if(p.orig != p) p <- p.orig + + # compute the observed transition probabilities: + n.days.month <- n.days.in.a.period(load.month,2001) + j1o <- j2o <- j3o <- j4o <- 1; transition.obs1 <- transition.obs2 <- transition.obs3 <- transition.obs4 <- c() + + for (d in 1:(length(my.cluster[[load.month]]$cluster)-1)){ + if (my.cluster[[load.month]]$cluster[d] == 1 && (my.cluster[[load.month]]$cluster[d + 1] != my.cluster[[load.month]]$cluster[d])){ + transition.obs1[j1o] <- my.cluster[[load.month]]$cluster[d + 1] + j1o <- j1o + 1 + } + if (my.cluster[[load.month]]$cluster[d] == 2 && (my.cluster[[load.month]]$cluster[d + 1] != my.cluster[[load.month]]$cluster[d])){ + transition.obs2[j2o] <- my.cluster[[load.month]]$cluster[d + 1] + j2o <- j2o + 1 + } + if (my.cluster[[load.month]]$cluster[d] == 3 && (my.cluster[[load.month]]$cluster[d + 1] != my.cluster[[load.month]]$cluster[d])){ + transition.obs3[j3o] <- my.cluster[[load.month]]$cluster[d + 1] + j3o <- j3o + 1 + } + if (my.cluster[[load.month]]$cluster[d] == 4 && (my.cluster[[load.month]]$cluster[d + 1] != my.cluster[[load.month]]$cluster[d])){ + transition.obs4[j4o] <- my.cluster[[load.month]]$cluster[d + 1] + j4o <- j4o +1 + } + + } + + trans.obs <- matrix(NA,4,4 , dimnames= list(c("cluster1.obs", "cluster2.obs", "cluster3.obs", "cluster4.obs"),c("cluster1.obs","cluster2.obs","cluster3.obs","cluster4.obs"))) + trans.obs[1,2] <- length(which(transition.obs1 == 2)) + trans.obs[1,3] <- length(which(transition.obs1 == 3)) + trans.obs[1,4] <- length(which(transition.obs1 == 4)) + + trans.obs[2,1] <- length(which(transition.obs2 == 1)) + trans.obs[2,3] <- length(which(transition.obs2 == 3)) + trans.obs[2,4] <- length(which(transition.obs2 == 4)) + + trans.obs[3,1] <- length(which(transition.obs3 == 1)) + trans.obs[3,2] <- length(which(transition.obs3 == 2)) + trans.obs[3,4] <- length(which(transition.obs3 == 4)) + + trans.obs[4,1] <- length(which(transition.obs4 == 1)) + trans.obs[4,2] <- length(which(transition.obs4 == 2)) + trans.obs[4,3] <- length(which(transition.obs4 == 3)) + + trans.tot.obs <- apply(trans.obs,1,sum,na.rm=T) + for(i in 1:4) trans.obs[i,] <- trans.obs[i,]/trans.tot.obs[i] + + # compute the observed interannual monthly variability: + freq.obs1 <- freq.obs2 <- freq.obs3 <- freq.obs4 <- c() + + for(y in year.start:year.end){ + # for both reanalysis and S4, the time order is always: year1day1, year1day2, ..., year1day31, year2day1, year2day2, ..., year2day28, etc, + freq.obs1[y-year.start+1] <- length(which(my.cluster[[load.month]]$cluster[(y-year.start)*n.days.month + (1:n.days.month)] == 1)) + freq.obs2[y-year.start+1] <- length(which(my.cluster[[load.month]]$cluster[(y-year.start)*n.days.month + (1:n.days.month)] == 2)) + freq.obs3[y-year.start+1] <- length(which(my.cluster[[load.month]]$cluster[(y-year.start)*n.days.month + (1:n.days.month)] == 3)) + freq.obs4[y-year.start+1] <- length(which(my.cluster[[load.month]]$cluster[(y-year.start)*n.days.month + (1:n.days.month)] == 4)) + } + + sd.freq.obs1 <- sd(freq.obs1) / n.days.month + sd.freq.obs2 <- sd(freq.obs2) / n.days.month + sd.freq.obs3 <- sd(freq.obs3) / n.days.month + sd.freq.obs4 <- sd(freq.obs4) / n.days.month + + wr1y.obs <- wr1y; wr2y.obs <- wr2y; wr3y.obs <- wr3y; wr4y.obs <- wr4y # observed frecuencies of the regimes + + regimes.obs <- c(cluster1.name, cluster2.name, cluster3.name, cluster4.name) + + if(length(unique(regimes.obs)) < 4) stop("Two or more names of the weather types in the reanalsyis input file are repeated!") + + if(identical(rev(lat),lat.forecast)) {lat.forecast <- rev(lat.forecast); print("Warning: forecast latitudes are in the opposite order than renalysis's")} + if(!identical(lat, lat.forecast)) stop("forecast latitudes are different from reanalysis latitudes!") + if(!identical(lon, lon.forecast)) stop("forecast longitude are different from reanalysis longitudes!") + + cluster.corr <- array(NA, c(4,4), dimnames=list(c("cluster1.sim","cluster2.sim","cluster3.sim","cluster4.sim"), regimes.obs)) + cluster.corr[1,1] <- cor(as.vector(cluster1.sim), as.vector(pslwr1mean)) + cluster.corr[1,2] <- cor(as.vector(cluster1.sim), as.vector(pslwr2mean)) + cluster.corr[1,3] <- cor(as.vector(cluster1.sim), as.vector(pslwr3mean)) + cluster.corr[1,4] <- cor(as.vector(cluster1.sim), as.vector(pslwr4mean)) + cluster.corr[2,1] <- cor(as.vector(cluster2.sim), as.vector(pslwr1mean)) + cluster.corr[2,2] <- cor(as.vector(cluster2.sim), as.vector(pslwr2mean)) + cluster.corr[2,3] <- cor(as.vector(cluster2.sim), as.vector(pslwr3mean)) + cluster.corr[2,4] <- cor(as.vector(cluster2.sim), as.vector(pslwr4mean)) + cluster.corr[3,1] <- cor(as.vector(cluster3.sim), as.vector(pslwr1mean)) + cluster.corr[3,2] <- cor(as.vector(cluster3.sim), as.vector(pslwr2mean)) + cluster.corr[3,3] <- cor(as.vector(cluster3.sim), as.vector(pslwr3mean)) + cluster.corr[3,4] <- cor(as.vector(cluster3.sim), as.vector(pslwr4mean)) + cluster.corr[4,1] <- cor(as.vector(cluster4.sim), as.vector(pslwr1mean)) + cluster.corr[4,2] <- cor(as.vector(cluster4.sim), as.vector(pslwr2mean)) + cluster.corr[4,3] <- cor(as.vector(cluster4.sim), as.vector(pslwr3mean)) + cluster.corr[4,4] <- cor(as.vector(cluster4.sim), as.vector(pslwr4mean)) + + # associate each simulated cluster to one observed regime: + max1 <- which(cluster.corr == max(cluster.corr), arr.ind=T) + cluster.corr2 <- cluster.corr + cluster.corr2[max1[1],] <- -999 # set this row to negative values so it won't be found as a maximum anymore + cluster.corr2[,max1[2]] <- -999 # set this column to negative values so it won't be found as a maximum anymore + max2 <- which(cluster.corr2 == max(cluster.corr2), arr.ind=T) + cluster.corr3 <- cluster.corr2 + cluster.corr3[max2[1],] <- -999 + cluster.corr3[,max2[2]] <- -999 + max3 <- which(cluster.corr3 == max(cluster.corr3), arr.ind=T) + cluster.corr4 <- cluster.corr3 + cluster.corr4[max3[1],] <- -999 + cluster.corr4[,max3[2]] <- -999 + max4 <- which(cluster.corr4 == max(cluster.corr4), arr.ind=T) + + seq.max1 <- c(max1[1],max2[1],max3[1],max4[1]) + seq.max2 <- c(max1[2],max2[2],max3[2],max4[2]) + + cluster1.regime <- seq.max2[which(seq.max1 == 1)] + cluster2.regime <- seq.max2[which(seq.max1 == 2)] + cluster3.regime <- seq.max2[which(seq.max1 == 3)] + cluster4.regime <- seq.max2[which(seq.max1 == 4)] + + cluster1.name <- regimes.obs[cluster1.regime] + cluster2.name <- regimes.obs[cluster2.regime] + cluster3.name <- regimes.obs[cluster3.regime] + cluster4.name <- regimes.obs[cluster4.regime] + + regimes.sim <- c(cluster1.name, cluster2.name, cluster3.name, cluster4.name) + cluster.corr2 <- array(cluster.corr, c(4,4), dimnames=list(regimes.sim, regimes.obs)) + + spat.cor1 <- cluster.corr[1,seq.max2[which(seq.max1 == 1)]] + spat.cor2 <- cluster.corr[2,seq.max2[which(seq.max1 == 2)]] + spat.cor3 <- cluster.corr[3,seq.max2[which(seq.max1 == 3)]] + spat.cor4 <- cluster.corr[4,seq.max2[which(seq.max1 == 4)]] + + # measure persistence for the reanalysis dataset: + my.cluster.vector <- my.cluster[[load.month]][[1]] + + sequ1 <- sequ2 <- sequ3 <- sequ4 <- rep(0,10000) + i1 <- i2 <- i3 <- i4 <- 1 + + if(my.cluster.vector[1] != 1) i1 <- 0 + if(my.cluster.vector[1] == 1) sequ1[i1] <- sequ1[i1]+1 + if(my.cluster.vector[1] != 2) i2 <- 0 + if(my.cluster.vector[1] == 2) sequ2[i2] <- sequ2[i2]+1 + if(my.cluster.vector[1] != 3) i3 <- 0 + if(my.cluster.vector[1] == 3) sequ3[i3] <- sequ3[i3]+1 + if(my.cluster.vector[1] != 4) i4 <- 0 + if(my.cluster.vector[1] == 4) sequ4[i4] <- sequ4[i4]+1 + + n.dd <- length(my.cluster.vector) + for(d in 1:(n.dd-1)){ + if(my.cluster.vector[d] != 1 && my.cluster.vector[d+1] == 1) i1 <- i1+1 + if(my.cluster.vector[d] == 1) sequ1[i1] <- sequ1[i1]+1 + + if(my.cluster.vector[d] != 2 && my.cluster.vector[d+1] == 2) i2 <- i2+1 + if(my.cluster.vector[d] == 2) sequ2[i2] <- sequ2[i2]+1 + + if(my.cluster.vector[d] != 3 && my.cluster.vector[d+1] == 3) i3 <- i3+1 + if(my.cluster.vector[d] == 3) sequ3[i3] <- sequ3[i3]+1 + + if(my.cluster.vector[d] != 4 && my.cluster.vector[d+1] == 4) i4 <- i4+1 + if(my.cluster.vector[d] == 4) sequ4[i4] <- sequ4[i4]+1 + } + + if(my.cluster.vector[n.dd] == 1) sequ1[i1] <- sequ1[i1]+1 + if(my.cluster.vector[n.dd] == 2) sequ2[i2] <- sequ2[i2]+1 + if(my.cluster.vector[n.dd] == 3) sequ3[i3] <- sequ3[i3]+1 + if(my.cluster.vector[n.dd] == 4) sequ4[i4] <- sequ4[i4]+1 + + persObs1 <- mean(sequ1[which(sequ1 != 0)]) + persObs2 <- mean(sequ2[which(sequ2 != 0)]) + persObs3 <- mean(sequ3[which(sequ3 != 0)]) + persObs4 <- mean(sequ4[which(sequ4 != 0)]) + + # insert here all the manual corrections to the regime assignation due to misasignment in the automatic selection: + # forecast month: September + if(p == 9 && lead.month == 0) { cluster1.name <- "Blocking"; cluster2.name <- "Atl.Ridge"; cluster3.name <- "NAO-"; cluster4.name <- "NAO+"} + if(p == 8 && lead.month == 1) { cluster1.name <- "NAO+"; cluster2.name <- "NAO-"; cluster3.name <- "Blocking"; cluster4.name <- "Atl.Ridge"} + if(p == 6 && lead.month == 3) { cluster1.name <- "Atl.Ridge"; cluster2.name <- "NAO-"} + if(p == 4 && lead.month == 5) { cluster1.name <- "Blocking"; cluster2.name <- "NAO+"; cluster3.name <- "Atl.Ridge"; cluster4.name <- "NAO-"} + + # forecast month: October + if(p == 4 && lead.month == 6) { cluster1.name <- "Atl.Ridge"; cluster2.name <- "Blocking"; cluster3.name <- "NAO+"; cluster4.name <- "NAO-"} + if(p == 5 && lead.month == 5) { cluster1.name <- "NAO+"; cluster2.name <- "Blocking"; cluster3.name <- "NAO-"; cluster4.name <- "Atl.Ridge"} + if(p == 7 && lead.month == 3) { cluster1.name <- "NAO-"; cluster2.name <- "Atl.Ridge"; cluster3.name <- "Blocking"; cluster4.name <- "NAO+"} + if(p == 8 && lead.month == 2) { cluster1.name <- "Blocking"; cluster2.name <- "NAO-"; cluster3.name <- "Atl.Ridge"; cluster4.name <- "NAO+"} + if(p == 9 && lead.month == 1) { cluster1.name <- "NAO-"; cluster2.name <- "Blocking"; cluster3.name <- "Atl.Ridge"; cluster4.name <- "NAO+"} + + # forecast month: November + if(p == 6 && lead.month == 5) { cluster2.name <- "Atl.Ridge"; cluster3.name <- "NAO+"; cluster4.name <- "Blocking"} + if(p == 7 && lead.month == 4) { cluster1.name <- "NAO+"; cluster2.name <- "Blocking"; cluster4.name <- "Atl.Ridge"} + if(p == 8 && lead.month == 3) { cluster2.name <- "Blocking"; cluster4.name <- "Atl.Ridge"} + if(p == 9 && lead.month == 2) { cluster2.name <- "Atl.Ridge"; cluster3.name <- "NAO+"; cluster4.name <- "Blocking"} + if(p == 10 && lead.month == 1) { cluster2.name <- "Blocking"; cluster4.name <- "Atl.Ridge"} + + regimes.sim2 <- c(cluster1.name, cluster2.name, cluster3.name, cluster4.name) # Simulated regimes after the manual correction + #regimes.obs <- c(cluster1.name, cluster2.name, cluster3.name, cluster4.name) # already defined above, included only as reference + + order.sim <- match(regimes.sim2, orden) + order.obs <- match(regimes.obs, orden) + + clusters.sim <- list(cluster1.sim, cluster2.sim, cluster3.sim, cluster4.sim) + clusters.obs <- list(pslwr1mean, pslwr2mean, pslwr3mean, pslwr4mean) + + # recompute the spatial correlations, because they might have changed because of the manual corrections above: + spat.cor1 <- cor(as.vector(clusters.sim[[which(order.sim == 1)]]), as.vector(clusters.obs[[which(order.obs == 1)]])) + spat.cor2 <- cor(as.vector(clusters.sim[[which(order.sim == 2)]]), as.vector(clusters.obs[[which(order.obs == 2)]])) + spat.cor3 <- cor(as.vector(clusters.sim[[which(order.sim == 3)]]), as.vector(clusters.obs[[which(order.obs == 3)]])) + spat.cor4 <- cor(as.vector(clusters.sim[[which(order.sim == 4)]]), as.vector(clusters.obs[[which(order.obs == 4)]])) + + load(file=paste0(work.dir,"/",fields.name,"_",my.period[p],"_leadmonth_",lead.month,"_","psl",".RData")) # reload forecast cluster time series and mean psl data + #load(file=paste0(work.dir,"/",fields.name,"_",my.period[p],"_leadmonth_",lead.month,"_ClusterData.RData")) # reload forecast cluster data (for my.cluster.array) + if(var.num != var.num.orig) var.num <- var.num.orig # ripristinate original values of this script + if(fields.name != fields.name.orig) fields.name <- fields.name.orig # ripristinate original values of this script + if(!identical(period.orig, period)) period <- period.orig + if(p.orig != p) p <- p.orig + if(identical(rev(lat),lat.forecast)) lat <- rev(lat) # ripristinate right lat order + if(work.dir != work.dir.orig) work.dir <- work.dir.orig # ripristinate original values of this script + + } # close for on 'forecast.name' + + if(fields.name == rean.name || (fields.name == forecast.name && n.map == 7)){ + if(save.names){ + save(cluster1.name, cluster2.name, cluster3.name, cluster4.name, file=paste0(rean.dir,"/",fields.name,"_", my.period[p],"_","ClusterNames",".RData")) + + } else { # in this case, we load the cluster names from the file already saved, if regimes come from a reanalysis: + load(paste0(rean.dir,"/",rean.name,"_", my.period[p],"_","ClusterNames",".RData")) + + if(var.num != var.num.orig) var.num <- var.num.orig # ripristinate original values of this script + if(fields.name != fields.name.orig) fields.name <- fields.name.orig # ripristinate original values of this script + } + } + + # assign to each cluster its regime: + if(ordering == FALSE && (fields.name == rean.name || (fields.name == forecast.name && n.map == 7))){ + cluster1 <- 1; cluster2 <- 2; cluster3 <- 3; cluster4 <- 4 + } else { + cluster1 <- which(orden == cluster1.name) + cluster2 <- which(orden == cluster2.name) + cluster3 <- which(orden == cluster3.name) + cluster4 <- which(orden == cluster4.name) + } + + assign(paste0("map",cluster1), pslwr1mean) + assign(paste0("map",cluster2), pslwr2mean) + assign(paste0("map",cluster3), pslwr3mean) + assign(paste0("map",cluster4), pslwr4mean) + + assign(paste0("fre",cluster1), wr1y) + assign(paste0("fre",cluster2), wr2y) + assign(paste0("fre",cluster3), wr3y) + assign(paste0("fre",cluster4), wr4y) + + if(composition == "impact"){ + assign(paste0("imp",cluster1), varwr1mean) + assign(paste0("imp",cluster2), varwr2mean) + assign(paste0("imp",cluster3), varwr3mean) + assign(paste0("imp",cluster4), varwr4mean) + + if(fields.name == "rean.name"){ + assign(paste0("sig",cluster1), pvalue1) + assign(paste0("sig",cluster2), pvalue2) + assign(paste0("sig",cluster3), pvalue3) + assign(paste0("sig",cluster4), pvalue4) + } + } + + if(fields.name == forecast.name && n.map < 7){ + assign(paste0("freMax",cluster1), wr1yMax) + assign(paste0("freMax",cluster2), wr2yMax) + assign(paste0("freMax",cluster3), wr3yMax) + assign(paste0("freMax",cluster4), wr4yMax) + + assign(paste0("freMin",cluster1), wr1yMin) + assign(paste0("freMin",cluster2), wr2yMin) + assign(paste0("freMin",cluster3), wr3yMin) + assign(paste0("freMin",cluster4), wr4yMin) + + #assign(paste0("spatial.cor",cluster1), spat.cor1) + #assign(paste0("spatial.cor",cluster2), spat.cor2) + #assign(paste0("spatial.cor",cluster3), spat.cor3) + #assign(paste0("spatial.cor",cluster4), spat.cor4) + + assign(paste0("persist",cluster1), pers1) + assign(paste0("persist",cluster2), pers2) + assign(paste0("persist",cluster3), pers3) + assign(paste0("persist",cluster4), pers4) + + clusters.all <- c(cluster1, cluster2, cluster3, cluster4) + ordered.sequence <- c(which(clusters.all == 1), which(clusters.all == 2), which(clusters.all == 3), which(clusters.all == 4)) + + assign(paste0("transSim",cluster1), unname(trans.sim[1,ordered.sequence])) + assign(paste0("transSim",cluster2), unname(trans.sim[2,ordered.sequence])) + assign(paste0("transSim",cluster3), unname(trans.sim[3,ordered.sequence])) + assign(paste0("transSim",cluster4), unname(trans.sim[4,ordered.sequence])) + + cluster1.obs <- which(orden == regimes.obs[1]) + cluster2.obs <- which(orden == regimes.obs[2]) + cluster3.obs <- which(orden == regimes.obs[3]) + cluster4.obs <- which(orden == regimes.obs[4]) + + clusters.all.obs <- c(cluster1.obs, cluster2.obs, cluster3.obs, cluster4.obs) + ordered.sequence.obs <- c(which(clusters.all.obs == 1), which(clusters.all.obs == 2), which(clusters.all.obs == 3), which(clusters.all.obs == 4)) + + assign(paste0("freObs",cluster1.obs), wr1y.obs) + assign(paste0("freObs",cluster2.obs), wr2y.obs) + assign(paste0("freObs",cluster3.obs), wr3y.obs) + assign(paste0("freObs",cluster4.obs), wr4y.obs) + + assign(paste0("persistObs",cluster1.obs), persObs1) + assign(paste0("persistObs",cluster2.obs), persObs2) + assign(paste0("persistObs",cluster3.obs), persObs3) + assign(paste0("persistObs",cluster4.obs), persObs4) + + assign(paste0("transObs",cluster1.obs), unname(trans.obs[1,ordered.sequence.obs])) + assign(paste0("transObs",cluster2.obs), unname(trans.obs[2,ordered.sequence.obs])) + assign(paste0("transObs",cluster3.obs), unname(trans.obs[3,ordered.sequence.obs])) + assign(paste0("transObs",cluster4.obs), unname(trans.obs[4,ordered.sequence.obs])) + + } + + # position of long values of Europe only (without the Atlantic Sea and America): + #if(fields.name == "ERA-Interim") EU <- c(1:which(lon >= lon.max)[1], (length(lon)-30):length(lon)) # restrict area to continental europe only + EU <- c(1:which(lon >= lon.max)[1], (which(lon >= 337)[1]:length(lon))) # restrict area to continental europe only + + # breaks and colors of the geopotential fields: + if(psl == "g500"){ + #my.brks <- c(-300,-199:200,300) # % Mean anomaly of a WR + my.brks <- c(-300,seq(-200,200,10),300) # % Mean anomaly of a WR + my.brks2 <- c(-300,seq(-200,200,10),300) # % Mean anomaly of a WR for the contour lines + #my.cols <- colorRampPalette((brewer.pal(9,"PuBu")))(length(my.brks)-1) + values.to.plot <- c(-200, -100, 0, 100, 200) # values we want to show in the labels + } + + if(psl == "psl"){ + my.brks <- c(seq(-19,-1,2),0,seq(1,19,2)) # % Mean anomaly of a WR + # my.brks <- c(seq(-19,-1,2),0,seq(1,19,2)) # % Mean anomaly of a WR + + my.brks2 <- my.brks #c(seq(-20,20,2)) # % Mean anomaly of a WR for the contour lines + values.to.plot <- my.brks #c(-17,-15,-13,-11,-9,-7,-5,-3,-1,0,1,3,5,7,9,11,13,15,17) + } + + my.cols <- colorRampPalette(rev(brewer.pal(11,"RdBu")))(length(my.brks)-1) # blue--white--red colors + my.cols[floor(length(my.cols)/2)] <- my.cols[floor(length(my.cols)/2)+1] <- "white" + + # breaks and colors of the impact maps (valid both for Wind speed anomalies and Temperature anomalies): + #my.brks.var <- c(-20,seq(-10,-3,1),seq(-2.9,2.9,0.1),seq(3,10,1),20) # % Mean anomaly of a WR + #my.brks.var <- c(-20,-2,-1.5,-1,-0.5,-0.2,0.2,0.5,1,1.5,2,20) # % Mean anomaly of a WR + #my.brks.var <- c(-20,seq(-3,3,0.5),20) # % Mean anomaly of a WR + if(var.name[var.num] == "sfcWind") { + my.brks.var <- seq(-3,3,0.5) + values.to.plot2 <- c(-3,-2,-1,0,1,2,3) + } + if(var.name[var.num] == "tas") { + my.brks.var <- seq(-5,5,1) + values.to.plot2 <- my.brks.var #c(-5,-3,-1,0,1,3,5) + } + + #my.cols <- colorRampPalette(as.character(read.csv("/home/Earth/vtorralb/rgbhex.csv",header=F)[,1]))(length(my.brks)-1) # blue--yellow-red colors + my.cols.var <- colorRampPalette(rev(brewer.pal(11,"RdBu")))(length(my.brks.var)-1) # blue--white--red colors + #my.cols.var[floor(length(my.cols.var)/2)] <- my.cols.var[floor(length(my.cols.var)/2)+1] <- "white" + + freq.max=100 + if(fields.name == forecast.name){ + pos.years.present <- match(year.start:year.end, years) + years.missing <- which(is.na(pos.years.present)) + fre1.NA <- fre2.NA <- fre3.NA <- fre4.NA <- freMax1.NA <- freMax2.NA <- freMax3.NA <- freMax4.NA <- freMin1.NA <- freMin2.NA <- freMin3.NA <- freMin4.NA <- c() + k <- 0 + for(y in year.start:year.end) { + if(y %in% (years.missing + year.start - 1)) { + fre1.NA <- c(fre1.NA,NA); fre2.NA <- c(fre2.NA,NA); fre3.NA <- c(fre3.NA,NA); fre4.NA <- c(fre4.NA,NA) + freMax1.NA <- c(freMax1.NA,NA); freMax2.NA <- c(freMax2.NA,NA); freMax3.NA <- c(freMax3.NA,NA); freMax4.NA <- c(freMax4.NA,NA) + freMin1.NA <- c(freMin1.NA,NA); freMin2.NA <- c(freMin2.NA,NA); freMin3.NA <- c(freMin3.NA,NA); freMin4.NA <- c(freMin4.NA,NA) + k=k+1 + } else { + fre1.NA <- c(fre1.NA,fre1[y - year.start + 1 - k]); fre2.NA <- c(fre2.NA,fre2[y - year.start + 1 - k]) + fre3.NA <- c(fre3.NA,fre3[y - year.start + 1 - k]); fre4.NA <- c(fre4.NA,fre4[y - year.start + 1 - k]) + freMax1.NA <- c(freMax1.NA,freMax1[y - year.start + 1 - k]); freMax2.NA <- c(freMax2.NA,freMax2[y - year.start + 1 - k]) + freMax3.NA <- c(freMax3.NA,freMax3[y - year.start + 1 - k]); freMax4.NA <- c(freMax4.NA,freMax4[y - year.start + 1 - k]) + freMin1.NA <- c(freMin1.NA,freMin1[y - year.start + 1 - k]); freMin2.NA <- c(freMin2.NA,freMin2[y - year.start + 1 - k]) + freMin3.NA <- c(freMin3.NA,freMin3[y - year.start + 1 - k]); freMin4.NA <- c(freMin4.NA,freMin4[y - year.start + 1 - k]) + } + } + + sp.cor1 <- round(cor(fre1.NA,freObs1, use="complete.obs"),2) + sp.cor2 <- round(cor(fre2.NA,freObs2, use="complete.obs"),2) + sp.cor3 <- round(cor(fre3.NA,freObs3, use="complete.obs"),2) + sp.cor4 <- round(cor(fre4.NA,freObs4, use="complete.obs"),2) + + } else { + fre1.NA <- fre1; fre2.NA <- fre2; fre3.NA <- fre3; fre4.NA <- fre4 + } + + + # Visualize the composition with the 3 graphs for all the 4 regimes: its pressure fields, its impact on var and its frequency series: + if(composition == "simple"){ + if(!as.pdf && fields.name == rean.name) png(filename=paste0(rean.dir,"/",fields.name,"_",my.period[p],"_",var.name[var.num],".png"),width=3000,height=3700) + if(!as.pdf && fields.name == forecast.name) png(filename=paste0(work.dir,"/",fields.name,"_",my.period[p],"_leadmonth_",lead.month,"_",var.name[var.num],".png"),width=3000,height=3700) + + plot.new() + + # Sheet title: + par(fig=c(0, 1, 0.94, 0.98), new=TRUE) + if(fields.name == forecast.name) {forecast.title <- paste0(" startdate and ",lead.month," forecast month ")} else {forecast.title <- ""} + mtext(paste0(fields.name, " Weather Regimes for ", my.period[p], forecast.title," (",year.start,"-",year.end,")"), cex=5.5, font=2) + + # Centroid maps: + map.xpos <- 0 + map.width <- 0.46 + par(fig=c(map.xpos + 0.003, map.xpos + map.width - 0.003, 0.745, 0.925), new=TRUE) + PlotEquiMap2(rescale(map1,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map1), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, xlabel.dist=1.5, contours.lty="F1FF1F") + par(fig=c(map.xpos, map.xpos + map.width, 0.51, 0.69), new=TRUE) + PlotEquiMap2(rescale(map2,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map2), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F") + par(fig=c(map.xpos, map.xpos + map.width, 0.275, 0.455), new=TRUE) + PlotEquiMap2(rescale(map3,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map3), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F") + par(fig=c(map.xpos, map.xpos + map.width, 0.04, 0.22), new=TRUE) + PlotEquiMap2(rescale(map4,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map4), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F") + + # Legend centroid maps: + legend1.xpos <- 0.10 + legend1.width <- 0.30 + legend1.cex <- 2 + my.subset <- match(values.to.plot, my.brks) + par(fig=c(legend1.xpos, legend1.xpos + legend1.width, 0.719, 0.744), new=TRUE) # syntax fig=c(xmin, xmax, ymin, ymax) + ColorBar3(my.brks, cols=my.cols, vert=FALSE, cex=legend1.cex, subset=my.subset) + if(psl=="g500") {mtext(side=4," m", cex=2.5)} else {mtext(side=4," hPa", cex=legend1.cex)} + par(fig=c(legend1.xpos, legend1.xpos + legend1.width, 0.485, 0.508), new=TRUE) + ColorBar3(my.brks, cols=my.cols, vert=FALSE, cex=legend1.cex, subset=my.subset) + if(psl=="g500") {mtext(side=4," m", cex=2.5)} else {mtext(side=4," hPa", cex=legend1.cex)} + par(fig=c(legend1.xpos, legend1.xpos + legend1.width, 0.250, 0.273), new=TRUE) + ColorBar3(my.brks, cols=my.cols, vert=FALSE, cex=legend1.cex, subset=my.subset) + if(psl=="g500") {mtext(side=4," m", cex=2.5)} else {mtext(side=4," hPa", cex=legend1.cex)} + par(fig=c(legend1.xpos, legend1.xpos + legend1.width, 0.015, 0.038), new=TRUE) + ColorBar3(my.brks, cols=my.cols, vert=FALSE, cex=legend1.cex, subset=my.subset) + if(psl=="g500") {mtext(side=4," m", cex=2.5)} else {mtext(side=4," hPa", cex=legend1.cex)} + + # Subtitle centroid maps: + map.title.xpos <- 0.76 + map.title.width <- 0.2 + par(fig=c(map.title.xpos, map.title.xpos + map.title.width, 0.728, 0.729), new=TRUE) + mtext("year", cex=3) + par(fig=c(map.title.xpos, map.title.xpos + map.title.width, 0.494, 0.495), new=TRUE) + mtext("year", cex=3) + par(fig=c(map.title.xpos, map.title.xpos + map.title.width, 0.260, 0.261), new=TRUE) + mtext("year", cex=3) + par(fig=c(map.title.xpos, map.title.xpos + map.title.width, 0.026, 0.027), new=TRUE) + mtext("year", cex=3) + + # Title Centroid Maps: + title1.xpos <- 0.02 + title1.width <- 0.4 + par(fig=c(title1.xpos, title1.xpos + title1.width, 0.9305, 0.9355), new=TRUE) + mtext(paste0(regime1.name,": ", psl.name, " Anomaly"), font=2, cex=4) + par(fig=c(title1.xpos, title1.xpos + title1.width, 0.6955, 0.7005), new=TRUE) + mtext(paste0(regime2.name,": ", psl.name, " Anomaly"), font=2, cex=4) + par(fig=c(title1.xpos, title1.xpos + title1.width, 0.4605, 0.4655), new=TRUE) + mtext(paste0(regime3.name,": ", psl.name, " Anomaly"), font=2, cex=4) + par(fig=c(title1.xpos, title1.xpos + title1.width, 0.2255, 0.2305), new=TRUE) + mtext(paste0(regime4.name,": ", psl.name, " Anomaly"), font=2, cex=4) + + # Impact maps: + if(composition == "impact"){ # it won't enter here never because we are already inside an if con composition="simple" + impact.xpos <- 0.47 + impact.width <- 0.26 + par(fig=c(impact.xpos, impact.xpos + impact.width, 0.745, 0.925), new=TRUE) + PlotEquiMap2(rescale(imp1[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=t(sig1[,EU] < 0.05), cex.lab=1.5) + par(fig=c(impact.xpos, impact.xpos + impact.width, 0.51, 0.69), new=TRUE) + PlotEquiMap2(rescale(imp2[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=t(sig2[,EU] < 0.05), cex.lab=1.5) + par(fig=c(impact.xpos, impact.xpos + impact.width, 0.275, 0.455), new=TRUE) + PlotEquiMap2(rescale(imp3[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=t(sig3[,EU] < 0.05), cex.lab=1.5) + par(fig=c(impact.xpos, impact.xpos + impact.width, 0.04, 0.22), new=TRUE) + PlotEquiMap2(rescale(imp4[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=t(sig4[,EU] < 0.05), cex.lab=1.5) + + # Title impact maps: + title2.xpos <- 0.42 + title2.width <- 0.35 + par(fig=c(title2.xpos, title2.xpos + title2.width, 0.935, 0.940), new=TRUE) + mtext(paste0(regime1.name, ": Impact on ", var.name.full[var.num]), font=2, cex=4) + par(fig=c(title2.xpos, title2.xpos + title2.width, 0.700, 0.705), new=TRUE) + mtext(paste0(regime2.name, ": Impact on ", var.name.full[var.num]), font=2, cex=4) + par(fig=c(title2.xpos, title2.xpos + title2.width, 0.465, 0.470), new=TRUE) + mtext(paste0(regime3.name, ": Impact on ", var.name.full[var.num]), font=2, cex=4) + par(fig=c(title2.xpos, title2.xpos + title2.width, 0.230, 0.235), new=TRUE) + mtext(paste0(regime4.name, ": Impact on ", var.name.full[var.num]), font=2, cex=4) + } + + # Frequency plots: + barplot.xpos <- 0.75 + barplot.width <- 0.25 + + par(fig=c(barplot.xpos, barplot.xpos + barplot.width, 0.745, 0.915), new=TRUE) + if(fields.name == rean.name) barplot.freq(100*fre1.NA, year.start, year.end, cex.y=2, cex.x=2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0)) + if(fields.name == forecast.name) barplot.freq.sim2(100*fre1.NA, 100*freMax1.NA, 100*freMin1.NA, 100*freObs1, year.start, year.end, cex.y=2, cex.x=2, freq.min=-2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0), col.line="gray20", cex.r=3, cex.mean=2, cex.obs=2) + + par(fig=c(barplot.xpos, barplot.xpos + barplot.width,0.510,0.680), new=TRUE) + if(fields.name == rean.name) barplot.freq(100*fre2.NA, year.start, year.end, cex.y=2, cex.x=2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0)) + if(fields.name == forecast.name) barplot.freq.sim2(100*fre2.NA, 100*freMax2.NA, 100*freMin2.NA, 100*freObs2, year.start, year.end, cex.y=2, cex.x=2, freq.min=-2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0), col.line="gray20", cex.r=3, cex.mean=2, cex.obs=2) + + par(fig=c(barplot.xpos, barplot.xpos + barplot.width,0.275,0.445), new=TRUE) + if(fields.name == rean.name) barplot.freq(100*fre3.NA, year.start, year.end, cex.y=2, cex.x=2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0)) + if(fields.name == forecast.name) barplot.freq.sim2(100*fre3.NA, 100*freMax3.NA, 100*freMin3.NA, 100*freObs3, year.start, year.end, cex.y=2, cex.x=2, freq.min=-2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0), col.line="gray20", cex.r=3, cex.mean=2, cex.obs=2) + + par(fig=c(barplot.xpos, barplot.xpos + barplot.width,0.040,0.210), new=TRUE) + if(fields.name == rean.name) barplot.freq(100*fre4.NA, year.start, year.end, cex.y=2, cex.x=2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0)) + if(fields.name == forecast.name) barplot.freq.sim2(100*fre4.NA, 100*freMax4.NA, 100*freMin4.NA, 100*freObs4, year.start, year.end, cex.y=2, cex.x=2, freq.min=-2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0), col.line="gray20", cex.r=3, cex.mean=2, cex.obs=2) + + # Title Frequency maps: + title3.xpos <- 0.75 + title3.width <-0.25 + par(fig=c(title3.xpos, title3.xpos + title3.width, 0.935, 0.940), new=TRUE) + mtext(paste0(regime1.name, " Frequency"), font=2, cex=4) # paste0 doesn't work inside an expression! + par(fig=c(title3.xpos, title3.xpos + title3.width, 0.700, 0.705), new=TRUE) + mtext(paste0(regime2.name, " Frequency"), font=2, cex=4) + par(fig=c(title3.xpos, title3.xpos + title3.width, 0.465, 0.470), new=TRUE) + mtext(paste0(regime3.name, " Frequency"), font=2, cex=4) + par(fig=c(title3.xpos, title3.xpos + title3.width, 0.230, 0.235), new=TRUE) + mtext(paste0(regime4.name, " Frequency"), font=2, cex=4) + + # % on Frequency plots: + symbol.xpos <- 0.735 + symbol.width <- 0.01 + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.82, 0.83), new=TRUE) + mtext("%", cex=3.3) + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.59, 0.60), new=TRUE) + mtext("%", cex=3.3) + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.35, 0.36), new=TRUE) + mtext("%", cex=3.3) + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.12, 0.13), new=TRUE) + mtext("%", cex=3.3) + + # mean frequency on Frequency plots: + symbol.xpos <- 0.9 + symbol.width <- 0.1 + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.908, 0.918), new=TRUE) + mtext(bquote(bar(nu) == .(paste0(round(mean(100*fre1.NA),1),"%"))),cex=4.5) + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.673, 0.683), new=TRUE) + mtext(bquote(bar(nu) == .(paste0(round(mean(100*fre2.NA),1),"%"))),cex=4.5) + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.44, 0.45), new=TRUE) + mtext(bquote(bar(nu) == .(paste0(round(mean(100*fre3.NA),1),"%"))),cex=4.5) + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.203, 0.213), new=TRUE) + mtext(bquote(bar(nu) == .(paste0(round(mean(100*fre4.NA),1),"%"))),cex=4.5) + + # add corr between obs.time series and ensemble mean time series on Frequency plots: + if(fields.name == forecast.name){ + symbol.xpos <- 0.76 + symbol.width <- 0.1 + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.908, 0.918), new=TRUE) + sp.cor1 <- round(cor(fre1.NA,freObs1, use="complete.obs"),2) + mtext(paste0("r= ",sp.cor1), cex=4.5) + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.673, 0.683), new=TRUE) + sp.cor2 <- round(cor(fre2.NA,freObs2, use="complete.obs"),2) + mtext(paste0("r= ",sp.cor2), cex=4.5) + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.44, 0.45), new=TRUE) + sp.cor3 <- round(cor(fre3.NA,freObs3, use="complete.obs"),2) + mtext(paste0("r= ",sp.cor3), cex=4.5) + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.203, 0.213), new=TRUE) + sp.cor4 <- round(cor(fre4.NA,freObs4, use="complete.obs"),2) + mtext(paste0("r= ",sp.cor4), cex=4.5) + } + + # Legend 2: + if(fields.name == rean.name){ + legend2.xpos <- 0.48 + legend2.width <- 0.25 + legend2.cex <- 1.8 + my.subset2 <- match(values.to.plot2, my.brks.var) + par(fig=c(legend2.xpos, legend2.xpos + legend2.width, 0.719 + 0.001, 0.744), new=TRUE) + ColorBar3(my.brks.var, cols=my.cols.var, vert=FALSE, cex=legend2.cex, subset=my.subset2) + mtext(side=4,paste0(" ",var.unit[var.num]), cex=legend2.cex) + par(fig=c(legend2.xpos, legend2.xpos + legend2.width, 0.485, 0.508), new=TRUE) + ColorBar3(my.brks.var, cols=my.cols.var, vert=FALSE, cex=legend2.cex, subset=my.subset2) + mtext(side=4,paste0(" ",var.unit[var.num]), cex=legend2.cex) + par(fig=c(legend2.xpos, legend2.xpos + legend2.width, 0.250, 0.273), new=TRUE) + ColorBar3(my.brks.var, cols=my.cols.var, vert=FALSE, cex=legend2.cex, subset=my.subset2) + mtext(side=4,paste0(" ",var.unit[var.num]), cex=legend2.cex) + par(fig=c(legend2.xpos, legend2.xpos + legend2.width, 0.015, 0.038), new=TRUE) + ColorBar3(my.brks.var, cols=my.cols.var, vert=FALSE, cex=legend2.cex, subset=my.subset2) + mtext(side=4,paste0(" ",var.unit[var.num]), cex=legend2.cex) + } + + if(!as.pdf) dev.off() # for saving 4 png + + } # close if on: composition == 'simple' + + + # Visualize the composition with the regime anomalies for a selected forecasted month: + if(composition == "psl"){ + # Centroid maps: + n.map <- n.map + 1 # n.maps starts from 0 + map.width <- 0.115 + map.xpos <- 0.06 + map.width * (n.map - 1) + map.ypos <- 0.08 + map.height <- 0.19 + map.ysep <- 0.015 + + par(fig=c(map.xpos, map.xpos + map.width, map.ypos + 3*map.height + 3*map.ysep, map.ypos + 4*map.height + 3*map.ysep), new=TRUE) + PlotEquiMap2(rescale(map1,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map1), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, xlabel.dist=1.5, contours.lty="F1FF1F") + par(fig=c(map.xpos, map.xpos + map.width, map.ypos + 2*map.height + 2*map.ysep, map.ypos + 3*map.height + 2*map.ysep), new=TRUE) + PlotEquiMap2(rescale(map2,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map2), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F") + par(fig=c(map.xpos, map.xpos + map.width, map.ypos + map.height + map.ysep, map.ypos + 2*map.height + map.ysep), new=TRUE) + PlotEquiMap2(rescale(map3,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map3), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F") + par(fig=c(map.xpos, map.xpos + map.width, map.ypos, map.ypos + map.height), new=TRUE) + PlotEquiMap2(rescale(map4,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map4), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F") + + # Sheet subtitles: + par(fig=c(map.xpos, map.xpos + map.width, 0.86, 0.90), new=TRUE) + if(n.map < 8) { + mtext(paste0(my.month.short[p], " --> ", my.month.short[forecast.month]), cex=5.5, font=2) + } else{ + mtext(paste0(rean.name," ", my.month.short[forecast.month]), cex=5.5, font=2) + } + + } # close if on composition == 'psl' + + # Visualize the composition if the impact maps for a selected forecasted month: + if(composition == "impact"){ + # Centroid maps: + n.map <- n.map + 1 # n.maps starts from 0 + map.width <- 0.115 + map.xpos <- 0.06 + map.width * (n.map - 1) + map.ypos <- 0.08 + map.height <- 0.19 + map.ysep <- 0.015 + + par(fig=c(map.xpos, map.xpos + map.width, map.ypos + 3*map.height + 3*map.ysep, map.ypos + 4*map.height + 3*map.ysep), new=TRUE) + PlotEquiMap2(rescale(imp1[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=t(sig1[,EU] < 0.05), cex.lab=1.5) + + par(fig=c(map.xpos, map.xpos + map.width, map.ypos + 2*map.height + 2*map.ysep, map.ypos + 3*map.height + 2*map.ysep), new=TRUE) + PlotEquiMap2(rescale(imp2[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=t(sig2[,EU] < 0.05), cex.lab=1.5) + + par(fig=c(map.xpos, map.xpos + map.width, map.ypos + map.height + map.ysep, map.ypos + 2*map.height + map.ysep), new=TRUE) + PlotEquiMap2(rescale(imp3[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=t(sig3[,EU] < 0.05), cex.lab=1.5) + + par(fig=c(map.xpos, map.xpos + map.width, map.ypos, map.ypos + map.height), new=TRUE) + PlotEquiMap2(rescale(imp4[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=t(sig4[,EU] < 0.05), cex.lab=1.5) + + + # Sheet subtitles: + par(fig=c(map.xpos, map.xpos + map.width, 0.86, 0.90), new=TRUE) + if(n.map < 8) { + mtext(paste0(my.month.short[p], " --> ", my.month.short[forecast.month]), cex=5.5, font=2) + } else{ + mtext(paste0(rean.name," ", my.month.short[forecast.month]), cex=5.5, font=2) + } + + } # close if on composition == 'psl' + + + # Visualize the composition with the interannual frequencies for a selected forecasted month: + if(composition == "fre"){ + # Centroid maps: + n.map <- n.map + 1 # n.maps starts from 0 + map.width <- 0.115 + map.xpos <- 0.06 + map.width * (n.map - 1) + map.ypos <- 0.08 + map.height <- 0.19 + map.ysep <- 0.015 + + par(fig=c(map.xpos, map.xpos + map.width, map.ypos + 3*map.height + 3*map.ysep, map.ypos + 4*map.height + 3*map.ysep), new=TRUE) + if(n.map < 8) barplot.freq.sim2(100*fre1.NA, 100*freMax1.NA, 100*freMin1.NA, 100*freObs1, year.start, year.end, cex.y=2, cex.x=2, freq.min=-2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0), col.line="gray20", cex.r=3, cex.mean=2, cex.obs=2) + if(n.map == 8) barplot.freq(100*fre1.NA, year.start, year.end, cex.y=2, cex.x=2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0)) + + par(fig=c(map.xpos, map.xpos + map.width, map.ypos + 2*map.height + 2*map.ysep, map.ypos + 3*map.height + 2*map.ysep), new=TRUE) + if(n.map < 8) if(fields.name == forecast.name) barplot.freq.sim2(100*fre2.NA, 100*freMax2.NA, 100*freMin2.NA, 100*freObs2, year.start, year.end, cex.y=2, cex.x=2, freq.min=-2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0), col.line="gray20", cex.r=3, cex.mean=2, cex.obs=2) + if(n.map == 8) barplot.freq(100*fre2.NA, year.start, year.end, cex.y=2, cex.x=2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0)) + + par(fig=c(map.xpos, map.xpos + map.width, map.ypos + map.height + map.ysep, map.ypos + 2*map.height + map.ysep), new=TRUE) + if(n.map < 8) if(fields.name == forecast.name) barplot.freq.sim2(100*fre3.NA, 100*freMax3.NA, 100*freMin3.NA, 100*freObs3, year.start, year.end, cex.y=2, cex.x=2, freq.min=-2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0), col.line="gray20", cex.r=3, cex.mean=2, cex.obs=2) + if(n.map == 8) barplot.freq(100*fre3.NA, year.start, year.end, cex.y=2, cex.x=2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0)) + + par(fig=c(map.xpos, map.xpos + map.width, map.ypos, map.ypos + map.height), new=TRUE) + if(n.map < 8) if(fields.name == forecast.name) barplot.freq.sim2(100*fre4.NA, 100*freMax4.NA, 100*freMin4.NA, 100*freObs4, year.start, year.end, cex.y=2, cex.x=2, freq.min=-2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0), col.line="gray20", cex.r=3, cex.mean=2, cex.obs=2) + if(n.map == 8) barplot.freq(100*fre4.NA, year.start, year.end, cex.y=2, cex.x=2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0)) + + # Sheet subtitles: + par(fig=c(map.xpos, map.xpos + map.width, 0.86, 0.90), new=TRUE) + if(n.map < 8) { + mtext(paste0(my.month.short[p], " --> ", my.month.short[forecast.month]), cex=5.5, font=2) + } else{ + mtext(paste0(rean.name," ", my.month.short[forecast.month]), cex=5.5, font=2) + } + + # % on Frequency plots: + par(fig=c(map.xpos - 0.006, map.xpos, map.ypos + 3.9*map.height + 3*map.ysep, map.ypos + 4*map.height + 3*map.ysep), new=TRUE) + mtext("%", cex=3.3) + par(fig=c(map.xpos - 0.006, map.xpos, map.ypos + 2.9*map.height + 2*map.ysep, map.ypos + 3*map.height + 2*map.ysep), new=TRUE) + mtext("%", cex=3.3) + par(fig=c(map.xpos - 0.006, map.xpos, map.ypos + 1.9*map.height + 1*map.ysep, map.ypos + 2*map.height + 1*map.ysep), new=TRUE) + mtext("%", cex=3.3) + par(fig=c(map.xpos - 0.006, map.xpos, map.ypos + 0.9*map.height + 0*map.ysep, map.ypos + 1*map.height + 0*map.ysep), new=TRUE) + mtext("%", cex=3.3) + + # mean frequency on Frequency plots: + par(fig=c(map.xpos + 0.02, map.xpos + 0.04, map.ypos + 3*map.height + 3*map.ysep, map.ypos + 3.1*map.height + 3*map.ysep), new=TRUE) + mtext(bquote(bar(nu) == .(paste0(round(mean(100*fre1.NA),1),"%"))),cex=3.5) + par(fig=c(map.xpos + 0.02, map.xpos + 0.04, map.ypos + 2*map.height + 2*map.ysep, map.ypos + 2.1*map.height + 2*map.ysep), new=TRUE) + mtext(bquote(bar(nu) == .(paste0(round(mean(100*fre2.NA),1),"%"))),cex=3.5) + par(fig=c(map.xpos + 0.02, map.xpos + 0.04, map.ypos + 1*map.height + 1*map.ysep, map.ypos + 1.1*map.height + 1*map.ysep), new=TRUE) + mtext(bquote(bar(nu) == .(paste0(round(mean(100*fre3.NA),1),"%"))),cex=3.5) + par(fig=c(map.xpos + 0.02, map.xpos + 0.04, map.ypos + 0*map.height + 0*map.ysep, map.ypos + 0.1*map.height + 0*map.ysep), new=TRUE) + mtext(bquote(bar(nu) == .(paste0(round(mean(100*fre4.NA),1),"%"))),cex=3.5) + + # add corr between obs.time series and ensemble mean time series on Frequency plots: + if(n.map < 8){ + par(fig=c(map.xpos + map.width - 0.04, map.xpos + map.width - 0.02, map.ypos + 3*map.height + 3*map.ysep, map.ypos + 3.1*map.height + 3*map.ysep), new=TRUE) + mtext(paste0("r= ",sp.cor1), cex=3.5) + par(fig=c(map.xpos + map.width - 0.04, map.xpos + map.width - 0.02, map.ypos + 2*map.height + 2*map.ysep, map.ypos + 2.1*map.height + 2*map.ysep), new=TRUE) + mtext(paste0("r= ",sp.cor2), cex=3.5) + par(fig=c(map.xpos + map.width - 0.04, map.xpos + map.width - 0.02, map.ypos + 1*map.height + 1*map.ysep, map.ypos + 1.1*map.height + 1*map.ysep), new=TRUE) + mtext(paste0("r= ",sp.cor3), cex=3.5) + par(fig=c(map.xpos + map.width - 0.04, map.xpos + map.width - 0.02, map.ypos + 0*map.height + 0*map.ysep, map.ypos + 0.1*map.height + 0*map.ysep), new=TRUE) + mtext(paste0("r= ",sp.cor4), cex=3.5) + } + } # close if on composition == 'fre' + + # save indicators for each startdate and forecast time: + # (fre1, fre2, etc. already refer to the regimes in the same order as listed in the 'orden' vector) + if(fields.name == forecast.name) { + if (composition != "impact") { + save(orden, map1, map2, map3, map4, fre1.NA, fre2.NA, fre3.NA, fre4.NA, freObs1, freObs2, freObs3, freObs4, sp.cor1, sp.cor2, sp.cor3, sp.cor4, persist1, persist2, persist3, persist4, persistObs1, persistObs2, persistObs3, persistObs4, spat.cor1, spat.cor2, spat.cor3, spat.cor4, sd.freq.sim1, sd.freq.sim2, sd.freq.sim3, sd.freq.sim4, sd.freq.obs1, sd.freq.obs2, sd.freq.obs3, sd.freq.obs4, transSim1, transSim2, transSim3, transSim4, transObs1, transObs2, transObs3, transObs4, file=paste0(work.dir,"/",fields.name,"_", my.period[p],"_leadmonth_",lead.month,"_","ClusterNames",".RData")) + } else { + save(orden, map1, map2, map3, map4, fre1.NA, fre2.NA, fre3.NA, fre4.NA, freObs1, freObs2, freObs3, freObs4, sp.cor1, sp.cor2, sp.cor3, sp.cor4, persist1, persist2, persist3, persist4, persistObs1, persistObs2, persistObs3, persistObs4, spat.cor1, spat.cor2, spat.cor3, spat.cor4, sd.freq.sim1, sd.freq.sim2, sd.freq.sim3, sd.freq.sim4, sd.freq.obs1, sd.freq.obs2, sd.freq.obs3, sd.freq.obs4, transSim1, transSim2, transSim3, transSim4, transObs1, transObs2, transObs3, transObs4, imp1, imp2, imp3, imp4, file=paste0(work.dir,"/",fields.name,"_", my.period[p],"_leadmonth_",lead.month,"_","ClusterNames",".RData")) + } + } + + print(paste("p =",p,"lead.month =", lead.month)) # spat.cor1,spat.cor2,spat.cor3,spat.cor4)) + } # close 'p' on 'period' + + if(as.pdf) dev.off() # for saving a single pdf with all months/seasons + + } # close 'lead.month' on 'lead.months' + + if(composition == "psl" || composition == "fre"){ + # Regime's names: + title1.xpos <- 0.01 + title1.width <- 0.04 + par(fig=c(title1.xpos, title1.xpos + title1.width, 0.7905, 0.7955), new=TRUE) + mtext(paste0(regime1.name), font=2, cex=5) + par(fig=c(title1.xpos, title1.xpos + title1.width, 0.5855, 0.5905), new=TRUE) + mtext(paste0(regime2.name), font=2, cex=5) + par(fig=c(title1.xpos, title1.xpos + title1.width, 0.3805, 0.3955), new=TRUE) + mtext(paste0(regime3.name), font=2, cex=5) + par(fig=c(title1.xpos, title1.xpos + title1.width, 0.1755, 0.1805), new=TRUE) + mtext(paste0(regime4.name), font=2, cex=5) + + if(composition == "psl") { + # Color legend: + legend1.xpos <- 0.10 + legend1.width <- 0.80 + legend1.cex <- 4 + + par(fig=c(legend1.xpos, legend1.xpos + legend1.width, 0, 0.065), new=TRUE) # syntax fig=c(xmin, xmax, ymin, ymax) + ColorBar2(brks=my.brks, cols=my.cols, vert=FALSE, cex=legend1.cex, label.dist=2, my.ticks=-0.5 + 1:21, my.labels=my.brks) + par(fig=c(legend1.xpos + legend1.width - 0.02, legend1.xpos + legend1.width + 0.05, 0, 0.02), new=TRUE) # syntax fig=c(xmin, xmax, ymin, ymax) + mtext("hPa", cex=legend1.cex*1.3) + } + + dev.off() + } # close if on composition + + print("Finished!") +} # close if on composition != "summary" + +if(composition == "taylor"){ + for(p in 1:12){ + p.orig <- p + load(file=paste0(rean.dir,"/",fields.name,"_",my.period[p],"_","psl",".RData")) # load cluster names and summarized data + load(file=paste0(rean.dir,"/",fields.name,"_",my.period[p],"_","ClusterNames",".RData")) # load cluster names and summarized data + + if(var.num != var.num.orig) var.num <- var.num.orig # ripristinate original values of this script (necessary after loading regime data) + if(fields.name != fields.name.orig) fields.name <- fields.name.orig # ripristinate original values of this script + if(p != p.orig) p <- p.orig + } + +} # close if on composition == "taylor" + + +# Summary graphs: +if(composition == "summary" && fields.name == forecast.name){ + # array storing correlations in the format: [startdate, leadmonth, regime] + array.diff.sd.freq <- array.sd.freq <- array.cor <- array.sp.cor <- array.freq <- array.diff.freq <- array.rpss <- array.pers <- array.diff.pers <- array(NA,c(12,7,4)) + array.sp.cor <- array.trans.sim1 <- array.trans.sim2 <- array.trans.sim3 <- array.trans.sim4 <- array(NA,c(12,7,4)) + array.trans.diff1 <- array.trans.diff2 <- array.trans.diff3 <- array.trans.diff4 <- array(NA,c(12,7,4)) + + for(p in 1:12){ + p.orig <- p + + for(l in 0:6){ + + load(file=paste0(work.dir,"/",fields.name,"_",my.period[p],"_leadmonth_",l,"_","ClusterNames",".RData")) # load cluster names and summarized data + + if(var.num != var.num.orig) var.num <- var.num.orig # ripristinate original values of this script (necessary after loading regime data) + if(fields.name != fields.name.orig) fields.name <- fields.name.orig # ripristinate original values of this script + if(p != p.orig) p <- p.orig + + array.sp.cor[p,1+l, 1] <- spat.cor1 # associates NAO+ to the first triangle (at left) + array.sp.cor[p,1+l, 3] <- spat.cor2 # associates NAO- to the third triangle (at right) + array.sp.cor[p,1+l, 4] <- spat.cor3 # associates Blocking to the fourth triangle (top one) + array.sp.cor[p,1+l, 2] <- spat.cor4 # associates Atl.Ridge to the second triangle (bottom one) + + array.cor[p,1+l, 1] <- sp.cor1 # associates NAO+ to the first triangle (at left) + array.cor[p,1+l, 3] <- sp.cor2 # associates NAO- to the third triangle (at right) + array.cor[p,1+l, 4] <- sp.cor3 # associates Blocking to the fourth triangle (top one) + array.cor[p,1+l, 2] <- sp.cor4 # associates Atl.Ridge to the second triangle (bottom one) + + array.freq[p,1+l, 1] <- 100*(mean(fre1.NA)) # NAO+ + array.freq[p,1+l, 3] <- 100*(mean(fre2.NA)) # NAO- + array.freq[p,1+l, 4] <- 100*(mean(fre3.NA)) # Blocking + array.freq[p,1+l, 2] <- 100*(mean(fre4.NA)) # Atl.Ridge + + array.diff.freq[p,1+l, 1] <- 100*(mean(fre1.NA) - mean(freObs1)) # NAO+ + array.diff.freq[p,1+l, 3] <- 100*(mean(fre2.NA) - mean(freObs2)) # NAO- + array.diff.freq[p,1+l, 4] <- 100*(mean(fre3.NA) - mean(freObs3)) # Blocking + array.diff.freq[p,1+l, 2] <- 100*(mean(fre4.NA) - mean(freObs4)) # Atl.Ridge + + array.pers[p,1+l, 1] <- persist1 # NAO+ + array.pers[p,1+l, 3] <- persist2 # NAO- + array.pers[p,1+l, 4] <- persist3 # Blocking + array.pers[p,1+l, 2] <- persist4 # Atl.Ridge + + array.diff.pers[p,1+l, 1] <- persist1 - persistObs1 # NAO+ + array.diff.pers[p,1+l, 3] <- persist2 - persistObs2 # NAO- + array.diff.pers[p,1+l, 4] <- persist3 - persistObs3 # Blocking + array.diff.pers[p,1+l, 2] <- persist4 - persistObs4 # Atl.Ridge + + array.pers[p,1+l, 1] <- persist1 # NAO+ + array.pers[p,1+l, 3] <- persist2 # NAO- + array.pers[p,1+l, 4] <- persist3 # Blocking + array.pers[p,1+l, 2] <- persist4 # Atl.Ridge + + array.sd.freq[p,1+l, 1] <- sd.freq.sim1 # NAO+ + array.sd.freq[p,1+l, 3] <- sd.freq.sim2 # NAO- + array.sd.freq[p,1+l, 4] <- sd.freq.sim3 # Blocking + array.sd.freq[p,1+l, 2] <- sd.freq.sim4 # Atl.Ridge + + array.diff.sd.freq[p,1+l, 1] <- sd.freq.sim1 / sd.freq.obs1 # NAO+ + array.diff.sd.freq[p,1+l, 3] <- sd.freq.sim2 / sd.freq.obs2 # NAO- + array.diff.sd.freq[p,1+l, 4] <- sd.freq.sim3 / sd.freq.obs3 # Blocking + array.diff.sd.freq[p,1+l, 2] <- sd.freq.sim4 / sd.freq.obs4 # Atl.Ridge + + array.trans.sim1[p,1+l, 1] <- NA # Transition from NAO+ to NAO+ + array.trans.sim1[p,1+l, 3] <- 100*transSim1[2] # Transition from NAO+ to NAO- + array.trans.sim1[p,1+l, 4] <- 100*transSim1[3] # Transition from NAO+ to blocking + array.trans.sim1[p,1+l, 2] <- 100*transSim1[4] # Transition from NAO+ to Atl.Ridge + + array.trans.sim2[p,1+l, 1] <- 100*transSim2[1] # Transition from NAO- to NAO+ + array.trans.sim2[p,1+l, 3] <- NA # Transition from NAO- to NAO- + array.trans.sim2[p,1+l, 4] <- 100*transSim2[3] # Transition from NAO- to blocking + array.trans.sim2[p,1+l, 2] <- 100*transSim2[4] # Transition from NAO- to Atl.Ridge + + array.trans.sim3[p,1+l, 1] <- 100*transSim3[1] # Transition from blocking to NAO+ + array.trans.sim3[p,1+l, 3] <- 100*transSim3[2] # Transition from blocking to NAO- + array.trans.sim3[p,1+l, 4] <- NA # Transition from blocking to blocking + array.trans.sim3[p,1+l, 2] <- 100*transSim3[4] # Transition from blocking to Atl.Ridge + + array.trans.sim4[p,1+l, 1] <- 100*transSim4[1] # Transition from Atl.ridge to NAO+ + array.trans.sim4[p,1+l, 3] <- 100*transSim4[2] # Transition from Atl.ridge to NAO- + array.trans.sim4[p,1+l, 4] <- 100*transSim4[3] # Transition from Atl.ridge to blocking + array.trans.sim4[p,1+l, 2] <- NA # Transition from Atl.ridge to Atl.Ridge + + array.trans.diff1[p,1+l, 1] <- NA # Transition from NAO+ to NAO+ + array.trans.diff1[p,1+l, 3] <- 100*(transSim1[2] - transObs1[2]) # Transition from NAO+ to NAO- + array.trans.diff1[p,1+l, 4] <- 100*(transSim1[3] - transObs1[3]) # Transition from NAO+ to blocking + array.trans.diff1[p,1+l, 2] <- 100*(transSim1[4] - transObs1[4]) # Transition from NAO+ to Atl.Ridge + + array.trans.diff2[p,1+l, 1] <- 100*(transSim2[1] - transObs2[1]) # Transition from NAO+ to NAO+ + array.trans.diff2[p,1+l, 3] <- NA + array.trans.diff2[p,1+l, 4] <- 100*(transSim2[3] - transObs2[3]) # Transition from NAO+ to blocking + array.trans.diff2[p,1+l, 2] <- 100*(transSim2[4] - transObs2[4]) # Transition from NAO+ to Atl.Ridge + + array.trans.diff3[p,1+l, 1] <- 100*(transSim3[1] - transObs3[1]) # Transition from NAO+ to NAO+ + array.trans.diff3[p,1+l, 3] <- 100*(transSim3[2] - transObs3[2]) # Transition from NAO+ to NAO- + array.trans.diff3[p,1+l, 4] <- NA + array.trans.diff3[p,1+l, 2] <- 100*(transSim3[4] - transObs3[4]) # Transition from NAO+ to Atl.Ridge + + array.trans.diff4[p,1+l, 1] <- 100*(transSim4[1] - transObs4[1]) # Transition from NAO+ to NAO+ + array.trans.diff4[p,1+l, 3] <- 100*(transSim4[2] - transObs4[2]) # Transition from NAO+ to NAO- + array.trans.diff4[p,1+l, 4] <- 100*(transSim4[3] - transObs4[3]) # Transition from NAO+ to blocking + array.trans.diff4[p,1+l, 2] <- NA + + #array.rpss[p,1+l, 1] <- # NAO+ + #array.rpss[p,1+l, 3] <- # NAO- + #array.rpss[p,1+l, 4] <- # Blocking + #array.rpss[p,1+l, 2] <- # Atl.Ridge + } + } + + # Spatial Corr summary: + png(filename=paste0(work.dir,"/",forecast.name,"_summary_sp_corr.png"), width=550, height=400) + + # create an array similar to array.cor but with colors instead of corr.values: + sp.corr.cols <- rev(c('#b2182b','#d6604d','#f4a582','#fddbc7','#d1e5f0','#92c5de','#4393c3','#2166ac')) + my.seq <- c(-1,0,0.4,0.5,0.6,0.7,0.8,0.9,1) + + array.sp.cor.colors <- array(sp.corr.cols[8],c(12,7,4)) + array.sp.cor.colors[array.sp.cor < my.seq[8]] <- sp.corr.cols[7] + array.sp.cor.colors[array.sp.cor < my.seq[7]] <- sp.corr.cols[6] + array.sp.cor.colors[array.sp.cor < my.seq[6]] <- sp.corr.cols[5] + array.sp.cor.colors[array.sp.cor < my.seq[5]] <- sp.corr.cols[4] + array.sp.cor.colors[array.sp.cor < my.seq[4]] <- sp.corr.cols[3] + array.sp.cor.colors[array.sp.cor < my.seq[3]] <- sp.corr.cols[2] + array.sp.cor.colors[array.sp.cor < my.seq[2]] <- sp.corr.cols[1] + + par(mar=c(5,4,4,4.5)) + plot(1,1, type="n", xaxt="n", yaxt="n", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + title("Spatial correlation between S4 and ERA-Interim regime anomalies", cex=1.5) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + + for(p in 1:12){ + for(l in 0:6){ + polygon(c(0.5+p-1, 0.5+p-1, 1+p-1), c(-0.5+l, 0.5+l, 0+l), col=array.sp.cor.colors[p,1+l,1]) # first triangle + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(-0.5+l, 0+l, -0.5+l), col=array.sp.cor.colors[p,1+l,2]) + polygon(c(1+p-1, 1.5+p-1, 1.5+p-1), c(l, 0.5+l, -0.5+l), col=array.sp.cor.colors[p,1+l,3]) + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(0.5+l, 0+l, 0.5+l), col=array.sp.cor.colors[p,1+l,4]) + } + } + + par(fig=c(0.875, 1, 0.14, 0.89), new=TRUE) + ColorBar2(brks = my.seq, cols = sp.corr.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + dev.off() + + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_sp_corr_4tables.png"), width=750, height=600) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext(text="Spatial correlation between S4 and ERA-Interim regime anomalies", cex=1.5) + + # NAO+ : + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.85, 0.98), new=TRUE) + mtext("NAO+", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.sp.cor.colors[p,1+l,1])}} + + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.42, 0.5), new=TRUE) + mtext("Blocking", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.sp.cor.colors[p,1+l,4])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.85, 0.98), new=TRUE) + mtext("NAO-", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.sp.cor.colors[p,1+l,3])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.42, 0.5), new=TRUE) + mtext("Atlantic Ridge", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.sp.cor.colors[p,1+l,2])}} + + par(fig=c(0.91, 1, 0.1, 0.9), new=TRUE) + ColorBar2(brks = my.seq, cols = sp.corr.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_sp_corr_1table.png"), width=800, height=300) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext(text="Spatial correlation between S4 and ERA-Interim regime anomalies", cex=1.2) + + par(mar=c(0,0,4,0), fig=c(0.07, 0.22, 0.8, 0.98), new=TRUE) + mtext("NAO+", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0, 0.22, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecast month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.sp.cor.colors[i2,1+6-j,1])}} + + par(mar=c(0,0,4,0), fig=c(0.22, 0.44, 0.8, 0.98), new=TRUE) + mtext("NAO-", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.22, 0.44, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecasted month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.sp.cor.colors[i2,1+6-j,3])}} + + par(mar=c(0,0,4,0), fig=c(0.44, 0.66, 0.8, 0.98), new=TRUE) + mtext("Blocking", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.44, 0.66, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecasted month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.sp.cor.colors[i2,1+6-j,4])}} + + par(mar=c(0,0,4,0), fig=c(0.68, 0.88, 0.8, 0.98), new=TRUE) + mtext("Atlantic Ridge", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.66, 0.88, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecasted month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.sp.cor.colors[i2,1+6-j,2])}} + + par(fig=c(0.91, 1, 0.1, 0.8), new=TRUE) + ColorBar2(brks = my.seq, cols = sp.corr.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + + + + + + + + # Corr summary: + png(filename=paste0(work.dir,"/",forecast.name,"_summary_corr.png"), width=550, height=400) + + # create an array similar to array.cor but with colors instead of corr.values: + corr.cols <- rev(c('#b2182b','#d6604d','#f4a582','#fddbc7','#d1e5f0','#92c5de','#4393c3','#2166ac')) + + array.cor.colors <- array(corr.cols[8],c(12,7,4)) + array.cor.colors[array.cor < 0.75] <- corr.cols[7] + array.cor.colors[array.cor < 0.5] <- corr.cols[6] + array.cor.colors[array.cor < 0.25] <- corr.cols[5] + array.cor.colors[array.cor < 0] <- corr.cols[4] + array.cor.colors[array.cor < -0.25] <- corr.cols[3] + array.cor.colors[array.cor < -0.5] <- corr.cols[2] + array.cor.colors[array.cor < -0.75] <- corr.cols[1] + + par(mar=c(5,4,4,4.5)) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Forecast time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + title("Correlation between S4 and ERA-Interim frequencies", cex=1.5) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + + for(p in 1:12){ + for(l in 0:6){ + polygon(c(0.5+p-1,0.5+p-1,1+p-1),c(-0.5+l,0.5+l,0+l), col=array.cor.colors[p,1+l,1]) # first triangle + polygon(c(0.5+p-1,1+p-1,1.5+p-1),c(-0.5+l,0+l,-0.5+l), col=array.cor.colors[p,1+l,2]) + polygon(c(1+p-1,1.5+p-1,1.5+p-1),c(l,0.5+l,-0.5+l), col=array.cor.colors[p,1+l,3]) + polygon(c(0.5+p-1,1+p-1,1.5+p-1),c(0.5+l,0+l,0.5+l), col=array.cor.colors[p,1+l,4]) + } + } + + par(fig=c(0.875, 1, 0.14, 0.89), new=TRUE) + ColorBar2(brks = seq(-1,1,0.25), cols = corr.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(seq(-1,1,0.25)), my.labels=seq(-1,1,0.25)) + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_corr_4tables.png"), width=750, height=600) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext(text="Correlation between S4 and ERA-Interim frequencies", cex=1.5) + + # NAO+ : + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.85, 0.98), new=TRUE) + mtext("NAO+", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.cor.colors[p,1+l,1])}} + + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.42, 0.5), new=TRUE) + mtext("Blocking", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.cor.colors[p,1+l,4])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.85, 0.98), new=TRUE) + mtext("NAO-", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.cor.colors[p,1+l,3])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.42, 0.5), new=TRUE) + mtext("Atlantic Ridge", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.cor.colors[p,1+l,2])}} + + par(fig=c(0.91, 1, 0.1, 0.9), new=TRUE) + ColorBar2(brks = my.seq, cols = corr.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_corr_1table.png"), width=800, height=300) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext(text="Correlation between S4 and ERA-Interim frequencies", cex=1.2) + + par(mar=c(0,0,4,0), fig=c(0.07, 0.22, 0.8, 0.98), new=TRUE) + mtext("NAO+", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0, 0.22, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecast month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.cor.colors[i2,1+6-j,1])}} + + par(mar=c(0,0,4,0), fig=c(0.22, 0.44, 0.8, 0.98), new=TRUE) + mtext("NAO-", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.22, 0.44, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecasted month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.cor.colors[i2,1+6-j,3])}} + + par(mar=c(0,0,4,0), fig=c(0.44, 0.66, 0.8, 0.98), new=TRUE) + mtext("Blocking", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.44, 0.66, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecasted month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.cor.colors[i2,1+6-j,4])}} + + par(mar=c(0,0,4,0), fig=c(0.68, 0.88, 0.8, 0.98), new=TRUE) + mtext("Atlantic Ridge", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.66, 0.88, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecasted month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.cor.colors[i2,1+6-j,2])}} + + par(fig=c(0.91, 1, 0.1, 0.8), new=TRUE) + ColorBar2(brks = my.seq, cols = corr.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + + + + + + # Freq. summary: + png(filename=paste0(work.dir,"/",forecast.name,"_summary_freq.png"), width=550, height=400) + + # create an array similar to array.diff.freq but with colors instead of frequencies: + freq.cols <- rev(c('#d73027','#f46d43','#fdae61','#fee090','#e0f3f8','#abd9e9','#74add1','#4575b4')) + my.seq <- c(NA,20,22,24,26,28,30,32,NA) + + array.freq.colors <- array(freq.cols[8],c(12,7,4)) + array.freq.colors[array.freq < my.seq[[8]]] <- freq.cols[7] + array.freq.colors[array.freq < my.seq[[7]]] <- freq.cols[6] + array.freq.colors[array.freq < my.seq[[6]]] <- freq.cols[5] + array.freq.colors[array.freq < my.seq[[5]]] <- freq.cols[4] + array.freq.colors[array.freq < my.seq[[4]]] <- freq.cols[3] + array.freq.colors[array.freq < my.seq[[3]]] <- freq.cols[2] + array.freq.colors[array.freq < my.seq[[2]]] <- freq.cols[1] + + par(mar=c(5,4,4,4.5)) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Forecast time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + title("Mean S4 frequency (%)", cex=1.5) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + + for(p in 1:12){ + for(l in 0:6){ + polygon(c(0.5+p-1, 0.5+p-1, 1+p-1), c(-0.5+l, 0.5+l, 0+l), col=array.freq.colors[p,1+l,1]) # first triangle + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(-0.5+l, 0+l, -0.5+l), col=array.freq.colors[p,1+l,2]) + polygon(c(1+p-1, 1.5+p-1, 1.5+p-1), c(l, 0.5+l, -0.5+l), col=array.freq.colors[p,1+l,3]) + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(0.5+l, 0+l, 0.5+l), col=array.freq.colors[p,1+l,4]) + } + } + + par(fig=c(0.875, 1, 0.14, 0.89), new=TRUE) + ColorBar2(brks = my.seq, cols = freq.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_freq_4tables.png"), width=750, height=600) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext(text="Mean S4 frequency (%)", cex=1.5) + + # NAO+ : + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.85, 0.98), new=TRUE) + mtext("NAO+", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.freq.colors[p,1+l,1])}} + + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.42, 0.5), new=TRUE) + mtext("Blocking", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.freq.colors[p,1+l,4])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.85, 0.98), new=TRUE) + mtext("NAO-", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.freq.colors[p,1+l,3])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.42, 0.5), new=TRUE) + mtext("Atlantic Ridge", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.freq.colors[p,1+l,2])}} + + par(fig=c(0.91, 1, 0.1, 0.9), new=TRUE) + ColorBar2(brks = my.seq, cols = freq.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_freq_1table.png"), width=800, height=300) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext(text="Mean S4 frequency (%)", cex=1.2) + + par(mar=c(0,0,4,0), fig=c(0.07, 0.22, 0.8, 0.98), new=TRUE) + mtext("NAO+", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0, 0.22, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecast month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.freq.colors[i2,1+6-j,1])}} + + par(mar=c(0,0,4,0), fig=c(0.22, 0.44, 0.8, 0.98), new=TRUE) + mtext("NAO-", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.22, 0.44, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecasted month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.freq.colors[i2,1+6-j,3])}} + + par(mar=c(0,0,4,0), fig=c(0.44, 0.66, 0.8, 0.98), new=TRUE) + mtext("Blocking", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.44, 0.66, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecasted month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.freq.colors[i2,1+6-j,4])}} + + par(mar=c(0,0,4,0), fig=c(0.68, 0.88, 0.8, 0.98), new=TRUE) + mtext("Atlantic Ridge", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.66, 0.88, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecasted month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.freq.colors[i2,1+6-j,2])}} + + par(fig=c(0.91, 1, 0.1, 0.8), new=TRUE) + ColorBar2(brks = my.seq, cols = freq.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + + + + + + + # Delta freq. summary: + png(filename=paste0(work.dir,"/",forecast.name,"_summary_diff_freq.png"), width=550, height=400) + + # create an array similar to array.diff.freq but with colors instead of frequencies: + diff.freq.cols <- rev(c('#d73027','#f46d43','#fdae61','#fee090','#e0f3f8','#abd9e9','#74add1','#4575b4')) + my.seq <- c(-20,-10,-5,-2,0,2,5,10,20) + + array.diff.freq.colors <- array(diff.freq.cols[8],c(12,7,4)) + array.diff.freq.colors[array.diff.freq < my.seq[[8]]] <- diff.freq.cols[7] + array.diff.freq.colors[array.diff.freq 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.diff.freq.colors[i2,1+6-j,1])}} + + par(mar=c(0,0,4,0), fig=c(0.22, 0.44, 0.8, 0.98), new=TRUE) + mtext("NAO-", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.22, 0.44, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecasted month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.diff.freq.colors[i2,1+6-j,3])}} + + par(mar=c(0,0,4,0), fig=c(0.44, 0.66, 0.8, 0.98), new=TRUE) + mtext("Blocking", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.44, 0.66, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecasted month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.diff.freq.colors[i2,1+6-j,4])}} + + par(mar=c(0,0,4,0), fig=c(0.68, 0.88, 0.8, 0.98), new=TRUE) + mtext("Atlantic Ridge", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.66, 0.88, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecasted month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.diff.freq.colors[i2,1+6-j,2])}} + + par(fig=c(0.91, 1, 0.1, 0.8), new=TRUE) + ColorBar2(brks = my.seq, cols = diff.freq.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + + + + + + + + # Persistence summary: + png(filename=paste0(work.dir,"/",forecast.name,"_summary_pers.png"), width=550, height=400) + + # create an array similar to array.pers but with colors instead of frequencies: + pers.cols <- rev(c('#b2182b','#d6604d','#f4a582','#fddbc7','#d1e5f0','#92c5de','#4393c3','#2166ac')) + my.seq <- c(NA, 3.25, 3.5, 3.75, 4, 4.25, 4.5, 4.75, NA) + + array.pers.colors <- array(pers.cols[8],c(12,7,4)) + array.pers.colors[array.pers < my.seq[8]] <- pers.cols[7] + array.pers.colors[array.pers < my.seq[7]] <- pers.cols[6] + array.pers.colors[array.pers < my.seq[6]] <- pers.cols[5] + array.pers.colors[array.pers < my.seq[5]] <- pers.cols[4] + array.pers.colors[array.pers < my.seq[4]] <- pers.cols[3] + array.pers.colors[array.pers < my.seq[3]] <- pers.cols[2] + array.pers.colors[array.pers < my.seq[2]] <- pers.cols[1] + + par(mar=c(5,4,4,4.5)) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Forecast time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + title("S4 Regime persistence (in days/month)", cex=1.5) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + + for(p in 1:12){ + for(l in 0:6){ + polygon(c(0.5+p-1,0.5+p-1,1+p-1),c(-0.5+l,0.5+l,0+l), col=array.pers.colors[p,1+l,1]) # first triangle + polygon(c(0.5+p-1,1+p-1,1.5+p-1),c(-0.5+l,0+l,-0.5+l), col=array.pers.colors[p,1+l,2]) + polygon(c(1+p-1,1.5+p-1,1.5+p-1),c(l,0.5+l,-0.5+l), col=array.pers.colors[p,1+l,3]) + polygon(c(0.5+p-1,1+p-1,1.5+p-1),c(0.5+l,0+l,0.5+l), col=array.pers.colors[p,1+l,4]) + } + } + + par(fig=c(0.875, 1, 0.14, 0.89), new=TRUE) + ColorBar2(brks = my.seq, cols = pers.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_pers_4tables.png"), width=750, height=600) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("S4 Regime persistence (in days/month)", cex=1.5) + + # NAO+ : + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.85, 0.98), new=TRUE) + mtext("NAO+", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.pers.colors[p,1+l,1])}} + + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.42, 0.5), new=TRUE) + mtext("Blocking", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.pers.colors[p,1+l,4])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.85, 0.98), new=TRUE) + mtext("NAO-", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.pers.colors[p,1+l,3])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.42, 0.5), new=TRUE) + mtext("Atlantic Ridge", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.pers.colors[p,1+l,2])}} + + par(fig=c(0.91, 1, 0.1, 0.9), new=TRUE) + ColorBar2(brks = my.seq, cols = pers.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_pers_4tables.png"), width=750, height=600) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("S4 Regime persistence (in days/month)", cex=1.5) + + # NAO+ : + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.85, 0.98), new=TRUE) + mtext("NAO+", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.pers.colors[p,1+l,1])}} + + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.42, 0.5), new=TRUE) + mtext("Blocking", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.pers.colors[p,1+l,4])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.85, 0.98), new=TRUE) + mtext("NAO-", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.pers.colors[p,1+l,3])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.42, 0.5), new=TRUE) + mtext("Atlantic Ridge", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.pers.colors[p,1+l,2])}} + + par(fig=c(0.91, 1, 0.1, 0.9), new=TRUE) + ColorBar2(brks = my.seq, cols = pers.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_pers_1table.png"), width=800, height=300) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("S4 Regime persistence (in days/month)", cex=1.2) + + par(mar=c(0,0,4,0), fig=c(0.07, 0.22, 0.8, 0.98), new=TRUE) + mtext("NAO+", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0, 0.22, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecast month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.pers.colors[i2,1+6-j,1])}} + + par(mar=c(0,0,4,0), fig=c(0.22, 0.44, 0.8, 0.98), new=TRUE) + mtext("NAO-", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.22, 0.44, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecasted month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.pers.colors[i2,1+6-j,3])}} + + par(mar=c(0,0,4,0), fig=c(0.44, 0.66, 0.8, 0.98), new=TRUE) + mtext("Blocking", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.44, 0.66, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecasted month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.pers.colors[i2,1+6-j,4])}} + + par(mar=c(0,0,4,0), fig=c(0.68, 0.88, 0.8, 0.98), new=TRUE) + mtext("Atlantic Ridge", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.66, 0.88, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecasted month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.pers.colors[i2,1+6-j,2])}} + + par(fig=c(0.91, 1, 0.1, 0.8), new=TRUE) + ColorBar2(brks = my.seq, cols = pers.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + + + + + + + + # Delta persistence summary: + png(filename=paste0(work.dir,"/",forecast.name,"_summary_diff_pers.png"), width=550, height=400) + + # create an array similar to array.pers but with colors instead of frequencies: + diff.pers.cols <- rev(c('#b2182b','#d6604d','#f4a582','#fddbc7','#d1e5f0','#92c5de','#4393c3','#2166ac')) + my.seq <- c(NA, -1.5, -1, -0.5, 0, 0.5, 1, 1.5, NA) + + array.diff.pers.colors <- array(diff.pers.cols[8],c(12,7,4)) + array.diff.pers.colors[array.diff.pers < my.seq[8]] <- diff.pers.cols[7] + array.diff.pers.colors[array.diff.pers < my.seq[7]] <- diff.pers.cols[6] + array.diff.pers.colors[array.diff.pers < my.seq[6]] <- diff.pers.cols[5] + array.diff.pers.colors[array.diff.pers < my.seq[5]] <- diff.pers.cols[4] + array.diff.pers.colors[array.diff.pers < my.seq[4]] <- diff.pers.cols[3] + array.diff.pers.colors[array.diff.pers < my.seq[3]] <- diff.pers.cols[2] + array.diff.pers.colors[array.diff.pers < my.seq[2]] <- diff.pers.cols[1] + + par(mar=c(5,4,4,4.5)) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Forecast time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + title("Diff. between S4 and ERA-Interim regime persistence (in days/month)", cex=1.5) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + + for(p in 1:12){ + for(l in 0:6){ + polygon(c(0.5+p-1,0.5+p-1,1+p-1),c(-0.5+l,0.5+l,0+l), col=array.diff.pers.colors[p,1+l,1]) # first triangle + polygon(c(0.5+p-1,1+p-1,1.5+p-1),c(-0.5+l,0+l,-0.5+l), col=array.diff.pers.colors[p,1+l,2]) + polygon(c(1+p-1,1.5+p-1,1.5+p-1),c(l,0.5+l,-0.5+l), col=array.diff.pers.colors[p,1+l,3]) + polygon(c(0.5+p-1,1+p-1,1.5+p-1),c(0.5+l,0+l,0.5+l), col=array.diff.pers.colors[p,1+l,4]) + } + } + + par(fig=c(0.875, 1, 0.14, 0.89), new=TRUE) + ColorBar2(brks = my.seq, cols = diff.pers.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_diff_pers_4tables.png"), width=750, height=600) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("Diff. between S4 and ERA-Interim regime persistence (in days/month)", cex=1.5) + + # NAO+ : + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.85, 0.98), new=TRUE) + mtext("NAO+", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.diff.pers.colors[p,1+l,1])}} + + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.42, 0.5), new=TRUE) + mtext("Blocking", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.diff.pers.colors[p,1+l,4])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.85, 0.98), new=TRUE) + mtext("NAO-", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.diff.pers.colors[p,1+l,3])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.42, 0.5), new=TRUE) + mtext("Atlantic Ridge", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.diff.pers.colors[p,1+l,2])}} + + par(fig=c(0.91, 1, 0.1, 0.9), new=TRUE) + ColorBar2(brks = my.seq, cols = diff.pers.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_diff_pers_1table.png"), width=800, height=300) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("Diff. between S4 and ERA-Interim regime persistence (in days/month)", cex=1.2) + + par(mar=c(0,0,4,0), fig=c(0.07, 0.22, 0.8, 0.98), new=TRUE) + mtext("NAO+", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0, 0.22, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecast month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.diff.pers.colors[i2,1+6-j,1])}} + + par(mar=c(0,0,4,0), fig=c(0.22, 0.44, 0.8, 0.98), new=TRUE) + mtext("NAO-", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.22, 0.44, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecasted month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.diff.pers.colors[i2,1+6-j,3])}} + + par(mar=c(0,0,4,0), fig=c(0.44, 0.66, 0.8, 0.98), new=TRUE) + mtext("Blocking", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.44, 0.66, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecasted month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.diff.pers.colors[i2,1+6-j,4])}} + + par(mar=c(0,0,4,0), fig=c(0.68, 0.88, 0.8, 0.98), new=TRUE) + mtext("Atlantic Ridge", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.66, 0.88, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecasted month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.diff.pers.colors[i2,1+6-j,2])}} + + par(fig=c(0.91, 1, 0.1, 0.8), new=TRUE) + ColorBar2(brks = my.seq, cols = diff.pers.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + + + + + + + + + # St.Dev.freq ratio summary: + png(filename=paste0(work.dir,"/",forecast.name,"_summary_ratio_sd_freq.png"), width=550, height=400) + + # create an array similar to array.cor but with colors instead of corr.values: + diff.sd.freq.cols <- rev(c('#b2182b','#d6604d','#f4a582','#fddbc7','#d1e5f0','#92c5de','#4393c3','#2166ac')) + my.seq <- c(0,0.7,0.8,0.9,1.0,1.1,1.2,1.3,2) + + array.diff.sd.freq.colors <- array(diff.sd.freq.cols[8],c(12,7,4)) + array.diff.sd.freq.colors[array.diff.sd.freq < my.seq[8]] <- diff.sd.freq.cols[7] + array.diff.sd.freq.colors[array.diff.sd.freq < my.seq[7]] <- diff.sd.freq.cols[6] + array.diff.sd.freq.colors[array.diff.sd.freq < my.seq[6]] <- diff.sd.freq.cols[5] + array.diff.sd.freq.colors[array.diff.sd.freq < my.seq[5]] <- diff.sd.freq.cols[4] + array.diff.sd.freq.colors[array.diff.sd.freq < my.seq[4]] <- diff.sd.freq.cols[3] + array.diff.sd.freq.colors[array.diff.sd.freq < my.seq[3]] <- diff.sd.freq.cols[2] + array.diff.sd.freq.colors[array.diff.sd.freq < my.seq[2]] <- diff.sd.freq.cols[1] + + par(mar=c(5,4,4,4.5)) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Forecast time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + title("St.Dev. ratio between sim. and obs. average frequencies", cex=1.5) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + + for(p in 1:12){ + for(l in 0:6){ + polygon(c(0.5+p-1, 0.5+p-1, 1+p-1), c(-0.5+l, 0.5+l, 0+l), col=array.diff.sd.freq.colors[p,1+l,1]) # first triangle + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(-0.5+l, 0+l, -0.5+l), col=array.diff.sd.freq.colors[p,1+l,2]) + polygon(c(1+p-1, 1.5+p-1, 1.5+p-1), c(l, 0.5+l, -0.5+l), col=array.diff.sd.freq.colors[p,1+l,3]) + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(0.5+l, 0+l, 0.5+l), col=array.diff.sd.freq.colors[p,1+l,4]) + } + } + + par(fig=c(0.875, 1, 0.14, 0.89), new=TRUE) + ColorBar2(brks = my.seq, cols = diff.sd.freq.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + dev.off() + + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_ratio_sd_freq_4tables.png"), width=750, height=600) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("St.Dev. ratio between sim. and obs. average frequencies", cex=1.5) + + # NAO+ : + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.85, 0.98), new=TRUE) + mtext("NAO+", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.diff.sd.freq.colors[p,1+l,1])}} + + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.42, 0.5), new=TRUE) + mtext("Blocking", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.diff.sd.freq.colors[p,1+l,4])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.85, 0.98), new=TRUE) + mtext("NAO-", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.diff.sd.freq.colors[p,1+l,3])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.42, 0.5), new=TRUE) + mtext("Atlantic Ridge", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.diff.sd.freq.colors[p,1+l,2])}} + + par(fig=c(0.91, 1, 0.1, 0.9), new=TRUE) + ColorBar2(brks = my.seq, cols = diff.sd.freq.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_ratio_sd_freq_1table.png"), width=800, height=300) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("St.Dev. ratio between sim. and obs. average frequencies", cex=1.2) + + par(mar=c(0,0,4,0), fig=c(0.07, 0.22, 0.8, 0.98), new=TRUE) + mtext("NAO+", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0, 0.22, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecast month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.diff.sd.freq.colors[i2,1+6-j,1])}} + + par(mar=c(0,0,4,0), fig=c(0.22, 0.44, 0.8, 0.98), new=TRUE) + mtext("NAO-", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.22, 0.44, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecasted month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.diff.sd.freq.colors[i2,1+6-j,3])}} + + par(mar=c(0,0,4,0), fig=c(0.44, 0.66, 0.8, 0.98), new=TRUE) + mtext("Blocking", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.44, 0.66, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecasted month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.diff.sd.freq.colors[i2,1+6-j,4])}} + + par(mar=c(0,0,4,0), fig=c(0.68, 0.88, 0.8, 0.98), new=TRUE) + mtext("Atlantic Ridge", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.66, 0.88, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecasted month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.diff.sd.freq.colors[i2,1+6-j,2])}} + + par(fig=c(0.91, 1, 0.1, 0.8), new=TRUE) + ColorBar2(brks = my.seq, cols = diff.sd.freq.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + + + + + + + + + + + # Transition probability from NAO+ to X summary: + png(filename=paste0(work.dir,"/",forecast.name,"_summary_trans_NAO+.png"), width=550, height=400) + + # create an array similar to array.cor but with colors instead of corr.values: + trans.cols <- rev(c('#b2182b','#d6604d','#f4a582','#fddbc7','#d1e5f0','#92c5de','#4393c3','#2166ac')) + my.seq <- c(0,10,20,30,40,50,60,70,100) + + array.trans.sim1.colors <- array(trans.cols[8],c(12,7,4)) + array.trans.sim1.colors[array.trans.sim1 < my.seq[8]] <- trans.cols[7] + array.trans.sim1.colors[array.trans.sim1 < my.seq[7]] <- trans.cols[6] + array.trans.sim1.colors[array.trans.sim1 < my.seq[6]] <- trans.cols[5] + array.trans.sim1.colors[array.trans.sim1 < my.seq[5]] <- trans.cols[4] + array.trans.sim1.colors[array.trans.sim1 < my.seq[4]] <- trans.cols[3] + array.trans.sim1.colors[array.trans.sim1 < my.seq[3]] <- trans.cols[2] + array.trans.sim1.colors[array.trans.sim1 < my.seq[2]] <- trans.cols[1] + array.trans.sim1.colors[is.na(array.trans.sim1)] <- "white" + + par(mar=c(5,4,4,4.5)) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Forecast time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + title("% Transition from NAO+ regime", cex=1.5) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + + for(p in 1:12){ + for(l in 0:6){ + polygon(c(0.5+p-1, 0.5+p-1, 1+p-1), c(-0.5+l, 0.5+l, 0+l), col=array.trans.sim1.colors[p,1+l,1]) # first triangle + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(-0.5+l, 0+l, -0.5+l), col=array.trans.sim1.colors[p,1+l,2]) + polygon(c(1+p-1, 1.5+p-1, 1.5+p-1), c(l, 0.5+l, -0.5+l), col=array.trans.sim1.colors[p,1+l,3]) + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(0.5+l, 0+l, 0.5+l), col=array.trans.sim1.colors[p,1+l,4]) + } + } + + par(fig=c(0.875, 1, 0.14, 0.89), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_trans_NAO+_4tables.png"), width=750, height=600) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("% Transition from NAO+ regime", cex=1.5) + + # NAO+ : + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.85, 0.98), new=TRUE) + mtext("NAO+", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.sim1.colors[p,1+l,1])}} + + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.42, 0.5), new=TRUE) + mtext("Blocking", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.sim1.colors[p,1+l,4])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.85, 0.98), new=TRUE) + mtext("NAO-", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.sim1.colors[p,1+l,3])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.42, 0.5), new=TRUE) + mtext("Atlantic Ridge", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.sim1.colors[p,1+l,2])}} + + par(fig=c(0.91, 1, 0.1, 0.9), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_trans_NAO+_1table.png"), width=800, height=300) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("% Transition from NAO+ regime", cex=1.2) + + par(mar=c(0,0,4,0), fig=c(0.07, 0.22, 0.8, 0.98), new=TRUE) + mtext("NAO+", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0, 0.22, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecast month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.sim1.colors[i2,1+6-j,1])}} + + par(mar=c(0,0,4,0), fig=c(0.22, 0.44, 0.8, 0.98), new=TRUE) + mtext("NAO-", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.22, 0.44, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecasted month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.sim1.colors[i2,1+6-j,3])}} + + par(mar=c(0,0,4,0), fig=c(0.44, 0.66, 0.8, 0.98), new=TRUE) + mtext("Blocking", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.44, 0.66, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecasted month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.sim1.colors[i2,1+6-j,4])}} + + par(mar=c(0,0,4,0), fig=c(0.68, 0.88, 0.8, 0.98), new=TRUE) + mtext("Atlantic Ridge", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.66, 0.88, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecasted month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.sim1.colors[i2,1+6-j,2])}} + + par(fig=c(0.91, 1, 0.1, 0.8), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_trans_NAO+_1table.png"), width=600, height=300) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("% Transition from NAO+ regime", cex=1.2) + + par(mar=c(0,0,4,0), fig=c(0.10, 0.28, 0.8, 0.98), new=TRUE) + mtext("NAO-", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0, 0.28, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecasted month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.sim1.colors[i2,1+6-j,3])}} + + par(mar=c(0,0,4,0), fig=c(0.28, 0.56, 0.8, 0.98), new=TRUE) + mtext("Blocking", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.28, 0.56, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecasted month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.sim1.colors[i2,1+6-j,4])}} + + par(mar=c(0,0,4,0), fig=c(0.56, 0.84, 0.8, 0.98), new=TRUE) + mtext("Atlantic Ridge", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.56, 0.84, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecasted month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.sim1.colors[i2,1+6-j,2])}} + + par(fig=c(0.88, 1, 0.1, 0.8), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + + + + + + + + # Transition probability from NAO- to X summary: + png(filename=paste0(work.dir,"/",forecast.name,"_summary_trans_NAO-.png"), width=550, height=400) + + # create an array similar to array.cor but with colors instead of corr.values: + trans.cols <- rev(c('#b2182b','#d6604d','#f4a582','#fddbc7','#d1e5f0','#92c5de','#4393c3','#2166ac')) + my.seq <- c(0,10,20,30,40,50,60,70,100) + + array.trans.sim2.colors <- array(trans.cols[8],c(12,7,4)) + array.trans.sim2.colors[array.trans.sim2 < my.seq[8]] <- trans.cols[7] + array.trans.sim2.colors[array.trans.sim2 < my.seq[7]] <- trans.cols[6] + array.trans.sim2.colors[array.trans.sim2 < my.seq[6]] <- trans.cols[5] + array.trans.sim2.colors[array.trans.sim2 < my.seq[5]] <- trans.cols[4] + array.trans.sim2.colors[array.trans.sim2 < my.seq[4]] <- trans.cols[3] + array.trans.sim2.colors[array.trans.sim2 < my.seq[3]] <- trans.cols[2] + array.trans.sim2.colors[array.trans.sim2 < my.seq[2]] <- trans.cols[1] + array.trans.sim2.colors[is.na(array.trans.sim2)] <- "white" + + par(mar=c(5,4,4,4.5)) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Forecast time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + title("% Transition from NAO- regime ", cex=1.5) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + + for(p in 1:12){ + for(l in 0:6){ + polygon(c(0.5+p-1, 0.5+p-1, 1+p-1), c(-0.5+l, 0.5+l, 0+l), col=array.trans.sim2.colors[p,1+l,1]) # first triangle + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(-0.5+l, 0+l, -0.5+l), col=array.trans.sim2.colors[p,1+l,2]) + polygon(c(1+p-1, 1.5+p-1, 1.5+p-1), c(l, 0.5+l, -0.5+l), col=array.trans.sim2.colors[p,1+l,3]) + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(0.5+l, 0+l, 0.5+l), col=array.trans.sim2.colors[p,1+l,4]) + } + } + + par(fig=c(0.875, 1, 0.14, 0.89), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_trans_NAO-_4tables.png"), width=750, height=600) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("% Transition from NAO- regime", cex=1.5) + + # NAO+ : + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.85, 0.98), new=TRUE) + mtext("NAO+", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.sim2.colors[p,1+l,1])}} + + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.42, 0.5), new=TRUE) + mtext("Blocking", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.sim2.colors[p,1+l,4])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.85, 0.98), new=TRUE) + mtext("NAO-", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.sim2.colors[p,1+l,3])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.42, 0.5), new=TRUE) + mtext("Atlantic Ridge", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.sim2.colors[p,1+l,2])}} + + par(fig=c(0.91, 1, 0.1, 0.9), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_trans_NAO-_1table.png"), width=600, height=300) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("% Transition from NAO- regime", cex=1.2) + + par(mar=c(0,0,4,0), fig=c(0.1, 0.28, 0.8, 0.98), new=TRUE) + mtext("NAO+", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0, 0.28, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecast month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.sim2.colors[i2,1+6-j,1])}} + + par(mar=c(0,0,4,0), fig=c(0.28, 0.56, 0.8, 0.98), new=TRUE) + mtext("Blocking", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.28, 0.56, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecasted month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.sim2.colors[i2,1+6-j,4])}} + + par(mar=c(0,0,4,0), fig=c(0.56, 0.84, 0.8, 0.98), new=TRUE) + mtext("Atlantic Ridge", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.56, 0.84, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecasted month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.sim2.colors[i2,1+6-j,2])}} + + par(fig=c(0.88, 1, 0.1, 0.8), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + + + + + + + + # Transition probability from blocking to X summary: + png(filename=paste0(work.dir,"/",forecast.name,"_summary_trans_blocking.png"), width=550, height=400) + + # create an array similar to array.cor but with colors instead of corr.values: + trans.cols <- rev(c('#b2182b','#d6604d','#f4a582','#fddbc7','#d1e5f0','#92c5de','#4393c3','#2166ac')) + my.seq <- c(0,10,20,30,40,50,60,70,100) + + array.trans.sim3.colors <- array(trans.cols[8],c(12,7,4)) + array.trans.sim3.colors[array.trans.sim3 < my.seq[8]] <- trans.cols[7] + array.trans.sim3.colors[array.trans.sim3 < my.seq[7]] <- trans.cols[6] + array.trans.sim3.colors[array.trans.sim3 < my.seq[6]] <- trans.cols[5] + array.trans.sim3.colors[array.trans.sim3 < my.seq[5]] <- trans.cols[4] + array.trans.sim3.colors[array.trans.sim3 < my.seq[4]] <- trans.cols[3] + array.trans.sim3.colors[array.trans.sim3 < my.seq[3]] <- trans.cols[2] + array.trans.sim3.colors[array.trans.sim3 < my.seq[2]] <- trans.cols[1] + array.trans.sim3.colors[is.na(array.trans.sim3)] <- "white" + + par(mar=c(5,4,4,4.5)) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Forecast time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + title("% Transition from blocking regime", cex=1.5) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + + for(p in 1:12){ + for(l in 0:6){ + polygon(c(0.5+p-1, 0.5+p-1, 1+p-1), c(-0.5+l, 0.5+l, 0+l), col=array.trans.sim3.colors[p,1+l,1]) # first triangle + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(-0.5+l, 0+l, -0.5+l), col=array.trans.sim3.colors[p,1+l,2]) + polygon(c(1+p-1, 1.5+p-1, 1.5+p-1), c(l, 0.5+l, -0.5+l), col=array.trans.sim3.colors[p,1+l,3]) + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(0.5+l, 0+l, 0.5+l), col=array.trans.sim3.colors[p,1+l,4]) + } + } + + par(fig=c(0.875, 1, 0.14, 0.89), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_trans_blocking_4tables.png"), width=750, height=600) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("% Transition from blocking regime", cex=1.5) + + # NAO+ : + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.85, 0.98), new=TRUE) + mtext("NAO+", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.sim3.colors[p,1+l,1])}} + + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.42, 0.5), new=TRUE) + mtext("Blocking", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.sim3.colors[p,1+l,4])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.85, 0.98), new=TRUE) + mtext("NAO-", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.sim3.colors[p,1+l,3])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.42, 0.5), new=TRUE) + mtext("Atlantic Ridge", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.sim3.colors[p,1+l,2])}} + + par(fig=c(0.91, 1, 0.1, 0.9), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_trans_blocking_1table.png"), width=600, height=300) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("% Transition from blocking regime", cex=1.2) + + par(mar=c(0,0,4,0), fig=c(0.10, 0.28, 0.8, 0.98), new=TRUE) + mtext("NAO+", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0, 0.28, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecast month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.sim3.colors[i2,1+6-j,1])}} + + par(mar=c(0,0,4,0), fig=c(0.28, 0.56, 0.8, 0.98), new=TRUE) + mtext("NAO-", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.28, 0.56, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecasted month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.sim3.colors[i2,1+6-j,3])}} + + par(mar=c(0,0,4,0), fig=c(0.56, 0.84, 0.8, 0.98), new=TRUE) + mtext("Atlantic Ridge", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.56, 0.84, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecasted month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.sim3.colors[i2,1+6-j,2])}} + + par(fig=c(0.88, 1, 0.1, 0.8), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + + + + + + + + + + + # Transition probability from Atlantic ridge to X summary: + png(filename=paste0(work.dir,"/",forecast.name,"_summary_trans_Atlantic_ridge.png"), width=550, height=400) + + # create an array similar to array.cor but with colors instead of corr.values: + trans.cols <- rev(c('#b2182b','#d6604d','#f4a582','#fddbc7','#d1e5f0','#92c5de','#4393c3','#2166ac')) + my.seq <- c(0,10,20,30,40,50,60,70,100) + + array.trans.sim4.colors <- array(trans.cols[8],c(12,7,4)) + array.trans.sim4.colors[array.trans.sim4 < my.seq[8]] <- trans.cols[7] + array.trans.sim4.colors[array.trans.sim4 < my.seq[7]] <- trans.cols[6] + array.trans.sim4.colors[array.trans.sim4 < my.seq[6]] <- trans.cols[5] + array.trans.sim4.colors[array.trans.sim4 < my.seq[5]] <- trans.cols[4] + array.trans.sim4.colors[array.trans.sim4 < my.seq[4]] <- trans.cols[3] + array.trans.sim4.colors[array.trans.sim4 < my.seq[3]] <- trans.cols[2] + array.trans.sim4.colors[array.trans.sim4 < my.seq[2]] <- trans.cols[1] + array.trans.sim4.colors[is.na(array.trans.sim4)] <- "white" + + par(mar=c(5,4,4,4.5)) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Forecast time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + title("% Transition from Atlantic ridge regime ", cex=1.5) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + + for(p in 1:12){ + for(l in 0:6){ + polygon(c(0.5+p-1, 0.5+p-1, 1+p-1), c(-0.5+l, 0.5+l, 0+l), col=array.trans.sim4.colors[p,1+l,1]) # first triangle + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(-0.5+l, 0+l, -0.5+l), col=array.trans.sim4.colors[p,1+l,2]) + polygon(c(1+p-1, 1.5+p-1, 1.5+p-1), c(l, 0.5+l, -0.5+l), col=array.trans.sim4.colors[p,1+l,3]) + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(0.5+l, 0+l, 0.5+l), col=array.trans.sim4.colors[p,1+l,4]) + } + } + + par(fig=c(0.875, 1, 0.14, 0.89), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_trans_Atlantic_ridge_4tables.png"), width=750, height=600) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("% Transition from Atlantic Ridge regime", cex=1.5) + + # NAO+ : + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.85, 0.98), new=TRUE) + mtext("NAO+", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.sim4.colors[p,1+l,1])}} + + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.42, 0.5), new=TRUE) + mtext("Blocking", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.sim4.colors[p,1+l,4])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.85, 0.98), new=TRUE) + mtext("NAO-", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.sim4.colors[p,1+l,3])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.42, 0.5), new=TRUE) + mtext("Atlantic Ridge", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.sim4.colors[p,1+l,2])}} + + par(fig=c(0.91, 1, 0.1, 0.9), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_trans_Atlantic_ridge_1table.png"), width=600, height=300) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("% Transition from Atlantic Ridge regime", cex=1.2) + + par(mar=c(0,0,4,0), fig=c(0.1, 0.28, 0.8, 0.98), new=TRUE) + mtext("NAO+", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0, 0.28, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecast month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.sim4.colors[i2,1+6-j,1])}} + + par(mar=c(0,0,4,0), fig=c(0.28, 0.56, 0.8, 0.98), new=TRUE) + mtext("NAO-", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.28, 0.56, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecasted month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.sim4.colors[i2,1+6-j,3])}} + + par(mar=c(0,0,4,0), fig=c(0.56, 0.84, 0.8, 0.98), new=TRUE) + mtext("Blocking", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.56, 0.84, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecasted month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.sim4.colors[i2,1+6-j,4])}} + + par(fig=c(0.88, 1, 0.1, 0.8), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + + + + + + + + # Bias of the Transition probability from NAO+ to X summary: + png(filename=paste0(work.dir,"/",forecast.name,"_summary_bias_trans_NAO+.png"), width=550, height=400) + + # create an array similar to array.cor but with colors instead of corr.values: + trans.cols <- rev(c('#b2182b','#d6604d','#f4a582','#fddbc7','#d1e5f0','#92c5de','#4393c3','#2166ac')) + my.seq <- c(-20,-10,-5,-2,0,2,5,10,20) + + array.trans.diff1.colors <- array(trans.cols[8],c(12,7,4)) + array.trans.diff1.colors[array.trans.diff1 < my.seq[8]] <- trans.cols[7] + array.trans.diff1.colors[array.trans.diff1 < my.seq[7]] <- trans.cols[6] + array.trans.diff1.colors[array.trans.diff1 < my.seq[6]] <- trans.cols[5] + array.trans.diff1.colors[array.trans.diff1 < my.seq[5]] <- trans.cols[4] + array.trans.diff1.colors[array.trans.diff1 < my.seq[4]] <- trans.cols[3] + array.trans.diff1.colors[array.trans.diff1 < my.seq[3]] <- trans.cols[2] + array.trans.diff1.colors[array.trans.diff1 < my.seq[2]] <- trans.cols[1] + array.trans.diff1.colors[is.na(array.trans.diff1)] <- "white" + + par(mar=c(5,4,4,4.5)) + plot(1,1, type="n", xaxt="n", yaxt="n", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + title("% Bias transition from NAO+ regime", cex=1.5) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + + for(p in 1:12){ + for(l in 0:6){ + polygon(c(0.5+p-1, 0.5+p-1, 1+p-1), c(-0.5+l, 0.5+l, 0+l), col=array.trans.diff1.colors[p,1+l,1]) # first triangle + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(-0.5+l, 0+l, -0.5+l), col=array.trans.diff1.colors[p,1+l,2]) + polygon(c(1+p-1, 1.5+p-1, 1.5+p-1), c(l, 0.5+l, -0.5+l), col=array.trans.diff1.colors[p,1+l,3]) + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(0.5+l, 0+l, 0.5+l), col=array.trans.diff1.colors[p,1+l,4]) + } + } + + par(fig=c(0.875, 1, 0.14, 0.89), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_bias_trans_NAO+_4tables.png"), width=750, height=600) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("% Bias transition from NAO+ regime", cex=1.5) + + # NAO+ : + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.85, 0.98), new=TRUE) + mtext("NAO+", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.diff1.colors[p,1+l,1])}} + + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.42, 0.5), new=TRUE) + mtext("Blocking", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.diff1.colors[p,1+l,4])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.85, 0.98), new=TRUE) + mtext("NAO-", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.diff1.colors[p,1+l,3])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.42, 0.5), new=TRUE) + mtext("Atlantic Ridge", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.diff1.colors[p,1+l,2])}} + + par(fig=c(0.91, 1, 0.1, 0.9), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_bias_trans_NAO+_1table.png"), width=800, height=300) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("% Bias transition from NAO+ regime", cex=1.2) + + par(mar=c(0,0,4,0), fig=c(0.07, 0.22, 0.8, 0.98), new=TRUE) + mtext("NAO+", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0, 0.22, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecast month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.diff1.colors[i2,1+6-j,1])}} + + par(mar=c(0,0,4,0), fig=c(0.22, 0.44, 0.8, 0.98), new=TRUE) + mtext("NAO-", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.22, 0.44, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecasted month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.diff1.colors[i2,1+6-j,3])}} + + par(mar=c(0,0,4,0), fig=c(0.44, 0.66, 0.8, 0.98), new=TRUE) + mtext("Blocking", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.44, 0.66, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecasted month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.diff1.colors[i2,1+6-j,4])}} + + par(mar=c(0,0,4,0), fig=c(0.68, 0.88, 0.8, 0.98), new=TRUE) + mtext("Atlantic Ridge", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.66, 0.88, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecasted month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.diff1.colors[i2,1+6-j,2])}} + + par(fig=c(0.91, 1, 0.1, 0.8), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + + + + + + + + + + + + + + + # Bias of the Transition probability from NAO- to X summary: + png(filename=paste0(work.dir,"/",forecast.name,"_summary_bias_trans_NAO-.png"), width=550, height=400) + + # create an array similar to array.cor but with colors instead of corr.values: + trans.cols <- rev(c('#b2182b','#d6604d','#f4a582','#fddbc7','#d1e5f0','#92c5de','#4393c3','#2166ac')) + my.seq <- c(-20,-10,-5,-2,0,2,5,10,20) + + array.trans.diff2.colors <- array(trans.cols[8],c(12,7,4)) + array.trans.diff2.colors[array.trans.diff2 < my.seq[8]] <- trans.cols[7] + array.trans.diff2.colors[array.trans.diff2 < my.seq[7]] <- trans.cols[6] + array.trans.diff2.colors[array.trans.diff2 < my.seq[6]] <- trans.cols[5] + array.trans.diff2.colors[array.trans.diff2 < my.seq[5]] <- trans.cols[4] + array.trans.diff2.colors[array.trans.diff2 < my.seq[4]] <- trans.cols[3] + array.trans.diff2.colors[array.trans.diff2 < my.seq[3]] <- trans.cols[2] + array.trans.diff2.colors[array.trans.diff2 < my.seq[2]] <- trans.cols[1] + array.trans.diff2.colors[is.na(array.trans.diff2)] <- "white" + + par(mar=c(5,4,4,4.5)) + plot(1,1, type="n", xaxt="n", yaxt="n", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + title("% Bias transition from NAO- regime", cex=1.5) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + + for(p in 1:12){ + for(l in 0:6){ + polygon(c(0.5+p-1, 0.5+p-1, 1+p-1), c(-0.5+l, 0.5+l, 0+l), col=array.trans.diff2.colors[p,1+l,1]) # first triangle + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(-0.5+l, 0+l, -0.5+l), col=array.trans.diff2.colors[p,1+l,2]) + polygon(c(1+p-1, 1.5+p-1, 1.5+p-1), c(l, 0.5+l, -0.5+l), col=array.trans.diff2.colors[p,1+l,3]) + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(0.5+l, 0+l, 0.5+l), col=array.trans.diff2.colors[p,1+l,4]) + } + } + + par(fig=c(0.875, 1, 0.14, 0.89), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_bias_trans_NAO-_4tables.png"), width=750, height=600) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("% Bias transition from NAO- regime", cex=1.5) + + # NAO+ : + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.85, 0.98), new=TRUE) + mtext("NAO+", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.diff2.colors[p,1+l,1])}} + + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.42, 0.5), new=TRUE) + mtext("Blocking", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.diff2.colors[p,1+l,4])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.85, 0.98), new=TRUE) + mtext("NAO-", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.diff2.colors[p,1+l,3])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.42, 0.5), new=TRUE) + mtext("Atlantic Ridge", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.diff2.colors[p,1+l,2])}} + + par(fig=c(0.91, 1, 0.1, 0.9), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_bias_trans_NAO-_1table.png"), width=800, height=300) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("% Bias transition from NAO- regime", cex=1.2) + + par(mar=c(0,0,4,0), fig=c(0.07, 0.22, 0.8, 0.98), new=TRUE) + mtext("NAO+", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0, 0.22, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecast month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.diff2.colors[i2,1+6-j,1])}} + + par(mar=c(0,0,4,0), fig=c(0.22, 0.44, 0.8, 0.98), new=TRUE) + mtext("NAO-", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.22, 0.44, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecasted month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.diff2.colors[i2,1+6-j,3])}} + + par(mar=c(0,0,4,0), fig=c(0.44, 0.66, 0.8, 0.98), new=TRUE) + mtext("Blocking", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.44, 0.66, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecasted month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.diff2.colors[i2,1+6-j,4])}} + + par(mar=c(0,0,4,0), fig=c(0.68, 0.88, 0.8, 0.98), new=TRUE) + mtext("Atlantic Ridge", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.66, 0.88, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecasted month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.diff2.colors[i2,1+6-j,2])}} + + par(fig=c(0.91, 1, 0.1, 0.8), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + + + + + + + + + + + # Bias of the Transition probability from blocking to X summary: + png(filename=paste0(work.dir,"/",forecast.name,"_summary_bias_trans_blocking.png"), width=550, height=400) + + # create an array similar to array.cor but with colors instead of corr.values: + trans.cols <- rev(c('#b2182b','#d6604d','#f4a582','#fddbc7','#d1e5f0','#92c5de','#4393c3','#2166ac')) + my.seq <- c(-20,-10,-5,-2,0,2,5,10,20) + + array.trans.diff3.colors <- array(trans.cols[8],c(12,7,4)) + array.trans.diff3.colors[array.trans.diff3 < my.seq[8]] <- trans.cols[7] + array.trans.diff3.colors[array.trans.diff3 < my.seq[7]] <- trans.cols[6] + array.trans.diff3.colors[array.trans.diff3 < my.seq[6]] <- trans.cols[5] + array.trans.diff3.colors[array.trans.diff3 < my.seq[5]] <- trans.cols[4] + array.trans.diff3.colors[array.trans.diff3 < my.seq[4]] <- trans.cols[3] + array.trans.diff3.colors[array.trans.diff3 < my.seq[3]] <- trans.cols[2] + array.trans.diff3.colors[array.trans.diff3 < my.seq[2]] <- trans.cols[1] + array.trans.diff3.colors[is.na(array.trans.diff3)] <- "white" + + par(mar=c(5,4,4,4.5)) + plot(1,1, type="n", xaxt="n", yaxt="n", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + title("% Bias transition from blocking regime", cex=1.5) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + + for(p in 1:12){ + for(l in 0:6){ + polygon(c(0.5+p-1, 0.5+p-1, 1+p-1), c(-0.5+l, 0.5+l, 0+l), col=array.trans.diff3.colors[p,1+l,1]) # first triangle + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(-0.5+l, 0+l, -0.5+l), col=array.trans.diff3.colors[p,1+l,2]) + polygon(c(1+p-1, 1.5+p-1, 1.5+p-1), c(l, 0.5+l, -0.5+l), col=array.trans.diff3.colors[p,1+l,3]) + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(0.5+l, 0+l, 0.5+l), col=array.trans.diff3.colors[p,1+l,4]) + } + } + + par(fig=c(0.875, 1, 0.14, 0.89), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_bias_trans_blocking_4tables.png"), width=750, height=600) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("% Bias transition from blocking regime", cex=1.5) + + # NAO+ : + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.85, 0.98), new=TRUE) + mtext("NAO+", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.diff3.colors[p,1+l,1])}} + + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.42, 0.5), new=TRUE) + mtext("Blocking", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.diff3.colors[p,1+l,4])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.85, 0.98), new=TRUE) + mtext("NAO-", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.diff3.colors[p,1+l,3])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.42, 0.5), new=TRUE) + mtext("Atlantic Ridge", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.diff3.colors[p,1+l,2])}} + + par(fig=c(0.91, 1, 0.1, 0.9), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_bias_trans_blocking_1table.png"), width=800, height=300) + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("% Bias transition from Blocking regime", cex=1.2) + + par(mar=c(0,0,4,0), fig=c(0.07, 0.22, 0.8, 0.98), new=TRUE) + mtext("NAO+", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0, 0.22, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecast month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.diff3.colors[i2,1+6-j,1])}} + + par(mar=c(0,0,4,0), fig=c(0.22, 0.44, 0.8, 0.98), new=TRUE) + mtext("NAO-", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.22, 0.44, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecasted month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.diff3.colors[i2,1+6-j,3])}} + + par(mar=c(0,0,4,0), fig=c(0.44, 0.66, 0.8, 0.98), new=TRUE) + mtext("Blocking", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.44, 0.66, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecasted month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.diff3.colors[i2,1+6-j,4])}} + + par(mar=c(0,0,4,0), fig=c(0.68, 0.88, 0.8, 0.98), new=TRUE) + mtext("Atlantic Ridge", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.66, 0.88, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecasted month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.diff3.colors[i2,1+6-j,2])}} + + par(fig=c(0.91, 1, 0.1, 0.8), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + + + + + + + + + + # Bias of the Transition probability from Atlantic Ridge to X summary: + png(filename=paste0(work.dir,"/",forecast.name,"_summary_bias_trans_Atlantic_ridge.png"), width=550, height=400) + + # create an array similar to array.cor but with colors instead of corr.values: + trans.cols <- rev(c('#b2182b','#d6604d','#f4a582','#fddbc7','#d1e5f0','#92c5de','#4393c3','#2166ac')) + my.seq <- c(-20,-10,-5,-2,0,2,5,10,20) + + array.trans.diff4.colors <- array(trans.cols[8],c(12,7,4)) + array.trans.diff4.colors[array.trans.diff4 < my.seq[8]] <- trans.cols[7] + array.trans.diff4.colors[array.trans.diff4 < my.seq[7]] <- trans.cols[6] + array.trans.diff4.colors[array.trans.diff4 < my.seq[6]] <- trans.cols[5] + array.trans.diff4.colors[array.trans.diff4 < my.seq[5]] <- trans.cols[4] + array.trans.diff4.colors[array.trans.diff4 < my.seq[4]] <- trans.cols[3] + array.trans.diff4.colors[array.trans.diff4 < my.seq[3]] <- trans.cols[2] + array.trans.diff4.colors[array.trans.diff4 < my.seq[2]] <- trans.cols[1] + array.trans.diff4.colors[is.na(array.trans.diff4)] <- "white" + + par(mar=c(5,4,4,4.5)) + plot(1,1, type="n", xaxt="n", yaxt="n", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + title("% Bias transition from Atlantic Ridge regime", cex=1.5) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + + for(p in 1:12){ + for(l in 0:6){ + polygon(c(0.5+p-1, 0.5+p-1, 1+p-1), c(-0.5+l, 0.5+l, 0+l), col=array.trans.diff4.colors[p,1+l,1]) # first triangle + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(-0.5+l, 0+l, -0.5+l), col=array.trans.diff4.colors[p,1+l,2]) + polygon(c(1+p-1, 1.5+p-1, 1.5+p-1), c(l, 0.5+l, -0.5+l), col=array.trans.diff4.colors[p,1+l,3]) + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(0.5+l, 0+l, 0.5+l), col=array.trans.diff4.colors[p,1+l,4]) + } + } + + par(fig=c(0.875, 1, 0.14, 0.89), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_bias_trans_Atlantic_ridge_4tables.png"), width=750, height=600) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("% Bias transition from Atlantic_Ridge_regime", cex=1.5) + + # NAO+ : + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.85, 0.98), new=TRUE) + mtext("NAO+", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.diff4.colors[p,1+l,1])}} + + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.42, 0.5), new=TRUE) + mtext("Blocking", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.diff4.colors[p,1+l,4])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.85, 0.98), new=TRUE) + mtext("NAO-", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.diff4.colors[p,1+l,3])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.42, 0.5), new=TRUE) + mtext("Atlantic Ridge", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.diff4.colors[p,1+l,2])}} + + par(fig=c(0.91, 1, 0.1, 0.9), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_bias_trans_Atlantic_ridge_1table.png"), width=800, height=300) + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("% Bias transition from Atlantic Ridge regime", cex=1.2) + + par(mar=c(0,0,4,0), fig=c(0.07, 0.22, 0.8, 0.98), new=TRUE) + mtext("NAO+", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0, 0.22, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecast month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.diff4.colors[i2,1+6-j,1])}} + + par(mar=c(0,0,4,0), fig=c(0.22, 0.44, 0.8, 0.98), new=TRUE) + mtext("NAO-", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.22, 0.44, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecasted month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.diff4.colors[i2,1+6-j,3])}} + + par(mar=c(0,0,4,0), fig=c(0.44, 0.66, 0.8, 0.98), new=TRUE) + mtext("Blocking", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.44, 0.66, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecasted month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.diff4.colors[i2,1+6-j,4])}} + + par(mar=c(0,0,4,0), fig=c(0.68, 0.88, 0.8, 0.98), new=TRUE) + mtext("Atlantic Ridge", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.66, 0.88, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecasted month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.diff4.colors[i2,1+6-j,2])}} + + par(fig=c(0.91, 1, 0.1, 0.8), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + +diagnostic.WR <- function(text=NULL, position=c(0,1,0,1), color.array){ + +} + + ## # FairRPSS summary: + ## png(filename=paste0(work.dir,"/",forecast.name,"_summary_rpss.png"), width=550, height=400) + + ## # create an array similar to array.diff.freq but with colors instead of frequencies: + ## freq.cols <- rev(c('#b2182b','#d6604d','#f4a582','#fddbc7','#d1e5f0','#92c5de','#4393c3','#2166ac')) + + ## array.freq.colors <- array(freq.cols[8],c(12,7,4)) + ## array.freq.colors[array.diff.freq < 10] <- freq.cols[7] + ## array.freq.colors[array.diff.freq < 5] <- freq.cols[6] + ## array.freq.colors[array.diff.freq < 2] <- freq.cols[5] + ## array.freq.colors[array.diff.freq < 0] <- freq.cols[4] + ## array.freq.colors[array.diff.freq < -2] <- freq.cols[3] + ## array.freq.colors[array.diff.freq < -5] <- freq.cols[2] + ## array.freq.colors[array.diff.freq < -10] <- freq.cols[1] + + ## par(mar=c(5,4,4,4.5)) + ## plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Forecast time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + ## title("Frequency difference (%) between S4 and ERA-Interim", cex=1.5) + ## mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + ## mtext(side = 2, text = "Forecast time", line = 2.5, cex=1.5) + ## axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + ## axis(2, at=seq(0,6), las=2, cex.axis=1.5) + + ## for(p in 1:12){ + ## for(l in 0:6){ + ## polygon(c(0.5+p-1,0.5+p-1,1+p-1),c(-0.5+l,0.5+l,0+l), col=array.freq.colors[p,1+l,1]) # first triangle + ## polygon(c(0.5+p-1,1+p-1,1.5+p-1),c(-0.5+l,0+l,-0.5+l), col=array.freq.colors[p,1+l,2]) + ## polygon(c(1+p-1,1.5+p-1,1.5+p-1),c(l,0.5+l,-0.5+l), col=array.freq.colors[p,1+l,3]) + ## polygon(c(0.5+p-1,1+p-1,1.5+p-1),c(0.5+l,0+l,0.5+l), col=array.freq.colors[p,1+l,4]) + ## } + ## } + + ## par(fig=c(0.875, 1, 0.14, 0.89), new=TRUE) + ## ColorBar2(brks = c(-20,-10,-5,-2,0,2,5,10,20), cols = freq.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(c(-20,-10,-5,-2,0,2,5,10,20)), my.labels=c(-20,-10,-5,-2,0,2,5,10,20)) + ## dev.off() + +} # close if on composition == "summary" + + + + +# impact map of the stronger WR: +if(composition == "impact" && fields.name == rean.name){ + + var.num <- 2 # choose a variable (1:sfcWind, 2:tas) + + png(filename=paste0(rean.dir,"/",rean.name,"_",var.name[var.num],"_most_influent_negative.png"), width=550, height=650) + + plot.new() + #par(mfrow=c(4,3)) + #col.regimes <- c('#7b3294','#c2a5cf','#a6dba0','#008837') + col.regimes <- c('#e66101','#fdb863','#b2abd2','#5e3c99') + + for(p in 13:16){ # or 1:12 for monthly maps + load(file=paste0(rean.dir,"/",rean.name,"_",my.period[p],"_",var.name[var.num],"_psl.RData")) + load(paste0(rean.dir,"/",rean.name,"_ClusterNames.RData")) # they should not depend on var.name!!! + + # position of long values of Europe only (without the Atlantic Sea and America): + #if(fields.name == "ERA-Interim") EU <- c(1:which(lon >= lon.max)[1], (length(lon)-30):length(lon)) # restrict area to continental europe only + lon.max = 45 + EU <- c(1:which(lon >= lon.max)[1], (which(lon >= 337)[1]:length(lon))) # restrict area to continental europe only + + cluster1 <- which(orden == cluster1.name.period[p]) # in future it will be == cluster1.name + cluster2 <- which(orden == cluster2.name.period[p]) + cluster3 <- which(orden == cluster3.name.period[p]) + cluster4 <- which(orden == cluster4.name.period[p]) + + assign(paste0("imp",cluster1), varPeriodAnom1mean) + assign(paste0("imp",cluster2), varPeriodAnom2mean) + assign(paste0("imp",cluster3), varPeriodAnom3mean) + assign(paste0("imp",cluster4), varPeriodAnom4mean) + + ## imp.all <- imp1 + ## for(i in 1:dim(imp1)[1]){ + ## for(j in 1:dim(imp1)[2]){ + ## if(abs(imp1[i,j]) >= max(abs(imp1[i,j]), abs(imp2[i,j]), abs(imp3[i,j]), abs(imp4[i,j]))) imp.all[i,j] <- 1.5 + ## if(abs(imp2[i,j]) >= max(abs(imp1[i,j]), abs(imp2[i,j]), abs(imp3[i,j]), abs(imp4[i,j]))) imp.all[i,j] <- 2.5 + ## if(abs(imp3[i,j]) >= max(abs(imp1[i,j]), abs(imp2[i,j]), abs(imp3[i,j]), abs(imp4[i,j]))) imp.all[i,j] <- 3.5 + ## if(abs(imp4[i,j]) >= max(abs(imp1[i,j]), abs(imp2[i,j]), abs(imp3[i,j]), abs(imp4[i,j]))) imp.all[i,j] <- 4.5 + ## } + ## } + + ## #most influent WT with positive impact: + ## imp.all <- imp1 + ## for(i in 1:dim(imp1)[1]){ + ## for(j in 1:dim(imp1)[2]){ + ## if((imp1[i,j]) >= max((imp1[i,j]), (imp2[i,j]), (imp3[i,j]), (imp4[i,j]))) imp.all[i,j] <- 1.5 + ## if((imp2[i,j]) >= max((imp1[i,j]), (imp2[i,j]), (imp3[i,j]), (imp4[i,j]))) imp.all[i,j] <- 2.5 + ## if((imp3[i,j]) >= max((imp1[i,j]), (imp2[i,j]), (imp3[i,j]), (imp4[i,j]))) imp.all[i,j] <- 3.5 + ## if((imp4[i,j]) >= max((imp1[i,j]), (imp2[i,j]), (imp3[i,j]), (imp4[i,j]))) imp.all[i,j] <- 4.5 + ## } + ## } + + # most influent WT with negative impact: + imp.all <- imp1 + for(i in 1:dim(imp1)[1]){ + for(j in 1:dim(imp1)[2]){ + if((imp1[i,j]) <= min((imp1[i,j]), (imp2[i,j]), (imp3[i,j]), (imp4[i,j]))) imp.all[i,j] <- 1.5 + if((imp2[i,j]) <= min((imp1[i,j]), (imp2[i,j]), (imp3[i,j]), (imp4[i,j]))) imp.all[i,j] <- 2.5 + if((imp3[i,j]) <= min((imp1[i,j]), (imp2[i,j]), (imp3[i,j]), (imp4[i,j]))) imp.all[i,j] <- 3.5 + if((imp4[i,j]) <= min((imp1[i,j]), (imp2[i,j]), (imp3[i,j]), (imp4[i,j]))) imp.all[i,j] <- 4.5 + } + } + + if(p<=3) yMod <- 0.7 + if(p>=4 && p<=6) yMod <- 0.5 + if(p>=7 && p<=9) yMod <- 0.3 + if(p>=10) yMod <- 0.1 + if(p==13 || p==14) yMod <- 0.52 + if(p==15 || p==16) yMod <- 0.1 + + if(p==1 || p==4 || p==7 || p==10) xMod <- 0.05 + if(p==2 || p==5 || p==8 || p==11) xMod <- 0.35 + if(p==3 || p==6 || p==9 || p==12) xMod <- 0.65 + if(p==13 || p==15) xMod <- 0.05 + if(p==14 || p==16) xMod <- 0.50 + + # impact map: + if(p <= 12) par(fig=c(xMod, xMod + 0.3, yMod, yMod + 0.17), new=TRUE) + if(p > 12) par(fig=c(xMod, xMod + 0.45, yMod, yMod + 0.38), new=TRUE) + PlotEquiMap(imp.all[,EU], lon[EU], lat, filled.continents = FALSE, brks=1:5, cols=col.regimes, axelab=F, drawleg=F) + + # title map: + if(p <= 12) par(fig=c(xMod, xMod + 0.3, yMod + 0.162, yMod + 0.172), new=TRUE) + if(p > 12) par(fig=c(xMod + 0.07, xMod + 0.07 + 0.3, yMod +0.21 + 0.166, yMod +0.21 + 0.176), new=TRUE) + mtext(my.period[p], font=2, cex=1.5) + + } # close 'p' on 'period' + + par(fig=c(0.1,0.9, 0.03, 0.11), new=TRUE) + ColorBar2(1:5, cols=col.regimes, vert=FALSE, my.ticks=1:4, draw.ticks=FALSE, my.labels=orden) + + par(fig=c(0.1,0.9, 0.92, 0.97), new=TRUE) + mtext(paste0("Most influent WR on ",var.name[var.num]), font=2, cex=1.5) + + dev.off() + +} # close if on composition == "impact" + + + + + + + + + + + + + + + + + + diff --git a/old/weather_regimes_maps_v17.R~ b/old/weather_regimes_maps_v17.R~ new file mode 100644 index 0000000000000000000000000000000000000000..373fa7d9a7056b23cdea075fe36f0707154c3885 --- /dev/null +++ b/old/weather_regimes_maps_v17.R~ @@ -0,0 +1,3587 @@ + +# Creation: 6/2016 +# Author: Nicola Cortesi +# +# Aim: to visualize the regime anomalies of the weather regimes, their interannual frequencies and their impact on a chosen cliamte variable (i.e: wind speed or temperature) +# +# I/O: the only input file needed is the output of the weather_regimes.R script, which ends with the suffix "_mapdata.RData". +# Output files are the maps, with the user can generate as .png or as .pdf +# +# Assumptions: If your regimes derive from a reanalysis, this script must be run twice: once, with the option 'ordering = F' and 'save.names = T' to create a .pdf or .png +# file that the user must open to find visually the order by which the four regimes are stored inside the four clusters. For example, he can find that the +# sequence of the images in the file (from top to down) corresponds to: Blocking / NAO- / Atl.Ridge / NAO+ regime. Subsequently, he has to change 'ordering' +# from F to T and set the four variables 'clusterX.name' (X=1..4) to follow the same order found inside that file. +# The second time, you have to run this script only to get the maps in the right order, which by default is, from top to down: +# "NAO+","NAO-","Blocking" and "Atl.Ridge" (you can change it with the variable 'orden'). You also have to set 'save.names' to TRUE, so an .RData file +# is written to store the order of the four regimes, in case in future a user needs to visualize these maps again. In this case, the user must set +# 'ordering = TRUE' and 'save.names = FALSE' to be able to visualize the maps in the correct order. +# +# If your regimes derive from a forecast system, you have to compute before the regimes derived from a reanalysis. Then, you can associate the reanalysis +# to the forecast system, to employ it to automatically aussociate to each simulated cluster one of the +# observed regimes, the one with the highest spatial correlation. Beware that the two maps of regime anomalies must have the same spatial resolution! +# +# Branch: weather_regimes + +rm(list=ls()) +library(s2dverification) # for the function Load() +library(Kendall) # for the MannKendall test + +source('/home/Earth/ncortesi/Downloads/scripts/Rfunctions.R') # for the calendar functions + +#set the subdir of 'workdir' where the weather regimes computed with a reanalysis are stored (xxxxx_psl.RData files): +#rean.dir <- "/scratch/Earth/ncortesi/RESILIENCE/Regimes/41_ERA-Interim_monthly_1981-2015_LOESS_filter" +#rean.dir <- "/scratch/Earth/ncortesi/RESILIENCE/Regimes/18) as 12) but with no lat correction" +rean.dir <- "/scratch/Earth/ncortesi/RESILIENCE/Regimes/43_as_41_but_with_running_cluster" + +rean.name <- "ERA-Interim" # reanalysis name (if input data comes from a reanalysis) +forecast.name <- "ECMWF-S4" # forecast system name (if input data comes from a forecast system) + +fields.name <- rean.name #forecast.name # Choose between loading reanalysis ('rean.name') or forecasts ('forecast.name') + +var.num <- 1 # if fields.name ='rean.name' and composition ='simple', Choose a variable for the impact maps 1: sfcWind 2: tas + +period <- 1:12 # Choose one or more periods to plot from 1 to 17 (1-12: Jan - Dec, 13-16: Winter, Spring, Summer, Autumn, 17: Year). + # You need to have created before the '_mapdata.RData' file with the output of 'weather_regimes'.R for that period. + +# run it twice: once, with 'ordering <- FALSE' and 'save.names <- TRUE' to look only at the cartography and find visually the right regime names of the four clusters; then, +# insert the regime names in the correct order below and run this script a second time with 'ordering = TRUE' and 'save.names = TRUE', to save the ordered cartography. +# after that, any time you need to run this script, run it with 'ordering = TRUE and 'save.names = FALSE', not to overwrite the file which stores the cluster order: +ordering <- F # If TRUE, order the clusters following the regimes listed in the 'orden' vector (set it to TRUE only after you manually inserted the names below). +save.names <- T # if TRUE, also save the cluster names (i.e: the association between clusters and weather regimes); if FALSE, the cluster names are loaded from a file + +as.pdf <- F # FALSE: save results as one .png file for each season/month, TRUE: save only one .pdf file with all seasons/months + +composition <- "simple" # choose which kind of composition you want to plot: + # - 'simple' for monthly graphs of regime anomalies / impact / interannual frequencies in three columns + # - 'psl' for all the regime anomalies for a fixed forecast month + # - 'fre' for all the interannual frequencies for a fixed forecast month + # - 'summary' for the summary plots of all forecast months (persistence bias, freq.bias, correlations, etc.) [You need all ClusterNames files] + # - 'impact' for all the impact plot of the regime with the highest impact + # - 'none' doesn't plot anything, it only saves the ClusterName.Rdata files + +# Associates the regimes to the four cluster: +cluster1.name <- "NAO+" +cluster2.name <- "NAO-" +cluster4.name <- "Blocking" +cluster3.name <- "Atl.Ridge" + +# choose an order to give to the cartograpy, from top to bottom: +orden <- c("NAO+","NAO-","Blocking","Atl.Ridge") + +# if fields.name='forecast.name', you have to specify these additional variables: + +# working dir with the input files with '_psl.RData' suffix: +#work.dir <- "/scratch/Earth/ncortesi/RESILIENCE/Regimes/42_ECMWF_S4_monthly_1981-2015_LOESS_filter" +#work.dir <- "/scratch/Earth/ncortesi/RESILIENCE/Regimes/18) as 12) but with no lat correction" +work.dir <- "/scratch/Earth/ncortesi/RESILIENCE/Regimes/43_as_41_but_with_running_cluster" + +lead.months <- 0:6 # in case you selected 'fields.name = forecast.name', specify a leadtime (0 = same month of the start month) to plot + # (and variable 'period' becomes the start month) +forecast.month <- 1 # in case composition = 'psl' or 'fre' or 'impact', choose a target month to forecast, from 1 to 12, to plot its composition + +################################################################################################################################################################### + +if(fields.name != rean.name && fields.name != forecast.name) stop("variable 'fields.name' is not properly set") +regime1.name <- orden[1] +regime2.name <- orden[2] +regime3.name <- orden[3] +regime4.name <- orden[4] + +var.name <- c("sfcWind","tas") +var.name.full <- c("Wind Speed","Temperature") # full name of the 'predictand' variable to put in the title of the graphs +var.unit <- c("m/s", "ºC") # unit of measure of var (for drawing color scales) +var.num.orig <- var.num # loading _psl files, this value can change! +fields.name.orig <- fields.name +period.orig <- period +work.dir.orig <- work.dir +pdf.suffix <- ifelse(length(period)==1, my.period[period], "all_periods") +n.map <- 0 + +if(composition != "summary" && composition != "impact"){ + + if(composition == "psl" || composition == "fre") { + if(as.pdf && fields.name == forecast.name) pdf(file=paste0(work.dir,"/",fields.name,"_",pdf.suffix,"_forecast_month_",forecast.month,"_",composition,".pdf"),width=110,height=60) + if(!as.pdf && fields.name == forecast.name) png(filename=paste0(work.dir,"/",fields.name,"_forecast_month_",forecast.month,"_",composition,".png"),width=6000,height=2300) + + lead.months <- c(6:0,0) + plot.new() + + # Sheet title: + par(fig=c(0, 1, 0.94, 0.98), new=TRUE) + if(composition == "psl") mtext(paste0(fields.name, " simulated Weather Regimes for ", month.name[forecast.month]), cex=5.5, font=2) + if(composition == "fre") mtext(paste0(fields.name, " simulated interannual frequencies for ", month.name[forecast.month]), cex=5.5, font=2) + } + + if(fields.name == rean.name) lead.months <- 1 # just to be able to enter the loop below once! + + for(lead.month in lead.months){ + #lead.month=lead.months[1] # for the debug + + if(composition == "simple"){ + if(as.pdf && fields.name == rean.name) pdf(file=paste0(rean.dir,"/",fields.name,"_",var.name[var.num],"_",pdf.suffix,".pdf"),width=40, height=60) + if(as.pdf && fields.name == forecast.name) pdf(file=paste0(work.dir,"/",fields.name,"_",var.name[var.num],"_",pdf.suffix,"_leadmonth_",lead.month,".pdf"),width=40,height=60) + } + + if(composition == 'psl' || composition == 'fre') { + period <- forecast.month - lead.month + if(period < 1) period <- period + 12 + } + + for(p in period){ + #p <- period[1] # for the debug + p.orig <- p + + # load regime data (for forecasts, it load only the data corresponding to the chosen start month p and lead month 'lead.month':: + if(fields.name == rean.name || (fields.name == forecast.name && lead.month == 0 || n.map == 7)) { + load(file=paste0(rean.dir,"/",rean.name,"_",my.period[p],"_","psl",".RData")) + if(var.num != var.num.orig) var.num <- var.num.orig # ripristinate original values of this script (necessary after loading regime data) + if(fields.name != fields.name.orig) fields.name <- fields.name.orig # ripristinate original values of this script + if(work.dir != work.dir.orig) work.dir <- work.dir.orig # ripristinate original values of this script + + # load impact data only if it is available, if not it will leave an empty space in the composition: + if(file.exists(paste0(rean.dir,"/",rean.name,"_",my.period[p],"_",var.name[var.num],".RData"))){ + load(file=paste0(rean.dir,"/",rean.name,"_",my.period[p],"_",var.name[var.num],".RData")) + } else { + print("Impact data not available; 'simple' compositions will have an empty space in the middle column") + } + } + + if(fields.name == forecast.name && n.map < 7){ + load(file=paste0(work.dir,"/",fields.name,"_",my.period[p],"_leadmonth_",lead.month,"_","psl",".RData")) # load cluster time series and mean psl data + if(composition == "impact") load(file=paste0(work.dir,"/",fields.name,"_",my.period[p],"_leadmonth_",lead.month,"_",var.name[var.num],".RData")) # load mean var data # for impact maps + + #if(var.num != var.num.orig) var.num <- var.num.orig # ripristinate original values of this script (necessary after loading regime data) + #if(fields.name != fields.name.orig) fields.name <- fields.name.orig # ripristinate original values of this script + + } + + if(var.num != var.num.orig) var.num <- var.num.orig # ripristinate original values of this script (necessary after loading regime data) + if(fields.name != fields.name.orig) fields.name <- fields.name.orig # ripristinate original values of this script + if(work.dir != work.dir.orig) work.dir <- work.dir.orig # ripristinate original values of this script + if(p != p.orig) p <- p.orig + + if(fields.name == forecast.name && n.map < 7){ + + # compute the simulated interannual monthly variability: + n.days.month <- dim(my.cluster.array[[p]])[1] # number of days in the forecasted month + + freq.sim1 <- apply(my.cluster.array[[p]] == 1, c(2,3), sum) + freq.sim2 <- apply(my.cluster.array[[p]] == 2, c(2,3), sum) + freq.sim3 <- apply(my.cluster.array[[p]] == 3, c(2,3), sum) + freq.sim4 <- apply(my.cluster.array[[p]] == 4, c(2,3), sum) + + sd.freq.sim1 <- sd(freq.sim1) / n.days.month + sd.freq.sim2 <- sd(freq.sim2) / n.days.month + sd.freq.sim3 <- sd(freq.sim3) / n.days.month + sd.freq.sim4 <- sd(freq.sim4) / n.days.month + + # compute the transition probabilities: + j1 <- j2 <- j3 <- j4 <- 1; transition1 <- transition2 <- transition3 <- transition4 <- c() + + n.members <- dim(my.cluster.array[[p]])[3] + + for(m in 1:n.members){ + for(y in 1:n.years){ + for (d in 1:(n.days.month-1)){ + + if (my.cluster.array[[p]][d,y,m] == 1 && (my.cluster.array[[p]][d + 1,y,m] != my.cluster.array[[p]][d,y,m])){ + transition1[j1] <- my.cluster.array[[p]][d + 1,y,m] + j1 <- j1 + 1 + } + if (my.cluster.array[[p]][d,y,m] == 2 && (my.cluster.array[[p]][d + 1,y,m] != my.cluster.array[[p]][d,y,m])){ + transition2[j2] <- my.cluster.array[[p]][d + 1,y,m] + j2 <- j2 + 1 + } + if (my.cluster.array[[p]][d,y,m] == 3 && (my.cluster.array[[p]][d + 1,y,m] != my.cluster.array[[p]][d,y,m])){ + transition3[j3] <- my.cluster.array[[p]][d + 1,y,m] + j3 <- j3 +1 + } + if (my.cluster.array[[p]][d,y,m] == 4 && (my.cluster.array[[p]][d + 1,y,m] != my.cluster.array[[p]][d,y,m])){ + transition4[j4] <- my.cluster.array[[p]][d + 1,y,m] + j4 <- j4 +1 + } + + } + } + } + + trans.sim <- matrix(NA,4,4 , dimnames= list(c("cluster1.sim", "cluster2.sim", "cluster3.sim", "cluster4.sim"),c("cluster1.sim","cluster2.sim","cluster3.sim","cluster4.sim"))) + trans.sim[1,2] <- length(which(transition1 == 2)) + trans.sim[1,3] <- length(which(transition1 == 3)) + trans.sim[1,4] <- length(which(transition1 == 4)) + + trans.sim[2,1] <- length(which(transition2 == 1)) + trans.sim[2,3] <- length(which(transition2 == 3)) + trans.sim[2,4] <- length(which(transition2 == 4)) + + trans.sim[3,1] <- length(which(transition3 == 1)) + trans.sim[3,2] <- length(which(transition3 == 2)) + trans.sim[3,4] <- length(which(transition3 == 4)) + + trans.sim[4,1] <- length(which(transition4 == 1)) + trans.sim[4,2] <- length(which(transition4 == 2)) + trans.sim[4,3] <- length(which(transition4 == 3)) + + trans.tot <- apply(trans.sim,1,sum,na.rm=T) + for(i in 1:4) trans.sim[i,] <- trans.sim[i,]/trans.tot[i] + + + # compute cluster persistence: + my.cluster.vector <- as.vector(my.cluster.array[[p]]) # reduce it to a vector with the order: day1month1member1 , day2month1member1, ... , day1month2member1, day2month2member1, ,,, + + sequ1 <- sequ2 <- sequ3 <- sequ4 <- rep(0,10000) + i1 <- i2 <- i3 <- i4 <- 1 + + if(my.cluster.vector[1] != 1) i1 <- 0 + if(my.cluster.vector[1] == 1) sequ1[i1] <- sequ1[i1]+1 + if(my.cluster.vector[1] != 2) i2 <- 0 + if(my.cluster.vector[1] == 2) sequ2[i2] <- sequ2[i2]+1 + if(my.cluster.vector[1] != 3) i3 <- 0 + if(my.cluster.vector[1] == 3) sequ3[i3] <- sequ3[i3]+1 + if(my.cluster.vector[1] != 4) i4 <- 0 + if(my.cluster.vector[1] == 4) sequ4[i4] <- sequ4[i4]+1 + n.dd <- length(my.cluster.vector) + + for(d in 1:(n.dd-1)){ + if(my.cluster.vector[d] != 1 && my.cluster.vector[d+1] == 1) i1 <- i1+1 + if(my.cluster.vector[d] == 1) sequ1[i1] <- sequ1[i1]+1 + + if(my.cluster.vector[d] != 2 && my.cluster.vector[d+1] == 2) i2 <- i2+1 + if(my.cluster.vector[d] == 2) sequ2[i2] <- sequ2[i2]+1 + + if(my.cluster.vector[d] != 3 && my.cluster.vector[d+1] == 3) i3 <- i3+1 + if(my.cluster.vector[d] == 3) sequ3[i3] <- sequ3[i3]+1 + + if(my.cluster.vector[d] != 4 && my.cluster.vector[d+1] == 4) i4 <- i4+1 + if(my.cluster.vector[d] == 4) sequ4[i4] <- sequ4[i4]+1 + } + + if(my.cluster.vector[n.dd] == 1) sequ1[i1] <- sequ1[i1]+1 + if(my.cluster.vector[n.dd] == 2) sequ2[i2] <- sequ2[i2]+1 + if(my.cluster.vector[n.dd] == 3) sequ3[i3] <- sequ3[i3]+1 + if(my.cluster.vector[n.dd] == 4) sequ4[i4] <- sequ4[i4]+1 + + pers1 <- mean(sequ1[which(sequ1 != 0)]) + pers2 <- mean(sequ2[which(sequ2 != 0)]) + pers3 <- mean(sequ3[which(sequ3 != 0)]) + pers4 <- mean(sequ4[which(sequ4 != 0)]) + + # compute correlations between simulated clusters and observed regime's anomalies: + cluster1.sim <- pslwr1mean; cluster2.sim <- pslwr2mean; cluster3.sim <- pslwr3mean; cluster4.sim <- pslwr4mean + lat.forecast <- lat; lon.forecast <- lon + + if((p + lead.month) > 12) { load.month <- p + lead.month - 12 } else { load.month <- p + lead.month } # reanalysis forecast month to load + load(file=paste0(rean.dir,"/",rean.name,"_",my.period[load.month],"_","psl",".RData")) # load reanalysis data, which overwrities the pslwrXmean variables + load(paste0(rean.dir,"/",rean.name,"_", my.period[load.month],"_","ClusterNames",".RData")) # load also reanalysis regime names + + if(var.num != var.num.orig) var.num <- var.num.orig # ripristinate original values of this script + if(fields.name != fields.name.orig) fields.name <- fields.name.orig # ripristinate original values of this script + if(!identical(period.orig,period)) period <- period.orig + if(work.dir != work.dir.orig) work.dir <- work.dir.orig # ripristinate original values of this script + if(p.orig != p) p <- p.orig + + # compute the observed transition probabilities: + n.days.month <- n.days.in.a.period(load.month,2001) + j1o <- j2o <- j3o <- j4o <- 1; transition.obs1 <- transition.obs2 <- transition.obs3 <- transition.obs4 <- c() + + for (d in 1:(length(my.cluster[[load.month]]$cluster)-1)){ + if (my.cluster[[load.month]]$cluster[d] == 1 && (my.cluster[[load.month]]$cluster[d + 1] != my.cluster[[load.month]]$cluster[d])){ + transition.obs1[j1o] <- my.cluster[[load.month]]$cluster[d + 1] + j1o <- j1o + 1 + } + if (my.cluster[[load.month]]$cluster[d] == 2 && (my.cluster[[load.month]]$cluster[d + 1] != my.cluster[[load.month]]$cluster[d])){ + transition.obs2[j2o] <- my.cluster[[load.month]]$cluster[d + 1] + j2o <- j2o + 1 + } + if (my.cluster[[load.month]]$cluster[d] == 3 && (my.cluster[[load.month]]$cluster[d + 1] != my.cluster[[load.month]]$cluster[d])){ + transition.obs3[j3o] <- my.cluster[[load.month]]$cluster[d + 1] + j3o <- j3o + 1 + } + if (my.cluster[[load.month]]$cluster[d] == 4 && (my.cluster[[load.month]]$cluster[d + 1] != my.cluster[[load.month]]$cluster[d])){ + transition.obs4[j4o] <- my.cluster[[load.month]]$cluster[d + 1] + j4o <- j4o +1 + } + + } + + trans.obs <- matrix(NA,4,4 , dimnames= list(c("cluster1.obs", "cluster2.obs", "cluster3.obs", "cluster4.obs"),c("cluster1.obs","cluster2.obs","cluster3.obs","cluster4.obs"))) + trans.obs[1,2] <- length(which(transition.obs1 == 2)) + trans.obs[1,3] <- length(which(transition.obs1 == 3)) + trans.obs[1,4] <- length(which(transition.obs1 == 4)) + + trans.obs[2,1] <- length(which(transition.obs2 == 1)) + trans.obs[2,3] <- length(which(transition.obs2 == 3)) + trans.obs[2,4] <- length(which(transition.obs2 == 4)) + + trans.obs[3,1] <- length(which(transition.obs3 == 1)) + trans.obs[3,2] <- length(which(transition.obs3 == 2)) + trans.obs[3,4] <- length(which(transition.obs3 == 4)) + + trans.obs[4,1] <- length(which(transition.obs4 == 1)) + trans.obs[4,2] <- length(which(transition.obs4 == 2)) + trans.obs[4,3] <- length(which(transition.obs4 == 3)) + + trans.tot.obs <- apply(trans.obs,1,sum,na.rm=T) + for(i in 1:4) trans.obs[i,] <- trans.obs[i,]/trans.tot.obs[i] + + # compute the observed interannual monthly variability: + freq.obs1 <- freq.obs2 <- freq.obs3 <- freq.obs4 <- c() + + for(y in year.start:year.end){ + # for both reanalysis and S4, the time order is always: year1day1, year1day2, ..., year1day31, year2day1, year2day2, ..., year2day28, etc, + freq.obs1[y-year.start+1] <- length(which(my.cluster[[load.month]]$cluster[(y-year.start)*n.days.month + (1:n.days.month)] == 1)) + freq.obs2[y-year.start+1] <- length(which(my.cluster[[load.month]]$cluster[(y-year.start)*n.days.month + (1:n.days.month)] == 2)) + freq.obs3[y-year.start+1] <- length(which(my.cluster[[load.month]]$cluster[(y-year.start)*n.days.month + (1:n.days.month)] == 3)) + freq.obs4[y-year.start+1] <- length(which(my.cluster[[load.month]]$cluster[(y-year.start)*n.days.month + (1:n.days.month)] == 4)) + } + + sd.freq.obs1 <- sd(freq.obs1) / n.days.month + sd.freq.obs2 <- sd(freq.obs2) / n.days.month + sd.freq.obs3 <- sd(freq.obs3) / n.days.month + sd.freq.obs4 <- sd(freq.obs4) / n.days.month + + wr1y.obs <- wr1y; wr2y.obs <- wr2y; wr3y.obs <- wr3y; wr4y.obs <- wr4y # observed frecuencies of the regimes + + regimes.obs <- c(cluster1.name, cluster2.name, cluster3.name, cluster4.name) + + if(length(unique(regimes.obs)) < 4) stop("Two or more names of the weather types in the reanalsyis input file are repeated!") + + if(identical(rev(lat),lat.forecast)) {lat.forecast <- rev(lat.forecast); print("Warning: forecast latitudes are in the opposite order than renalysis's")} + if(!identical(lat, lat.forecast)) stop("forecast latitudes are different from reanalysis latitudes!") + if(!identical(lon, lon.forecast)) stop("forecast longitude are different from reanalysis longitudes!") + + cluster.corr <- array(NA, c(4,4), dimnames=list(c("cluster1.sim","cluster2.sim","cluster3.sim","cluster4.sim"), regimes.obs)) + cluster.corr[1,1] <- cor(as.vector(cluster1.sim), as.vector(pslwr1mean)) + cluster.corr[1,2] <- cor(as.vector(cluster1.sim), as.vector(pslwr2mean)) + cluster.corr[1,3] <- cor(as.vector(cluster1.sim), as.vector(pslwr3mean)) + cluster.corr[1,4] <- cor(as.vector(cluster1.sim), as.vector(pslwr4mean)) + cluster.corr[2,1] <- cor(as.vector(cluster2.sim), as.vector(pslwr1mean)) + cluster.corr[2,2] <- cor(as.vector(cluster2.sim), as.vector(pslwr2mean)) + cluster.corr[2,3] <- cor(as.vector(cluster2.sim), as.vector(pslwr3mean)) + cluster.corr[2,4] <- cor(as.vector(cluster2.sim), as.vector(pslwr4mean)) + cluster.corr[3,1] <- cor(as.vector(cluster3.sim), as.vector(pslwr1mean)) + cluster.corr[3,2] <- cor(as.vector(cluster3.sim), as.vector(pslwr2mean)) + cluster.corr[3,3] <- cor(as.vector(cluster3.sim), as.vector(pslwr3mean)) + cluster.corr[3,4] <- cor(as.vector(cluster3.sim), as.vector(pslwr4mean)) + cluster.corr[4,1] <- cor(as.vector(cluster4.sim), as.vector(pslwr1mean)) + cluster.corr[4,2] <- cor(as.vector(cluster4.sim), as.vector(pslwr2mean)) + cluster.corr[4,3] <- cor(as.vector(cluster4.sim), as.vector(pslwr3mean)) + cluster.corr[4,4] <- cor(as.vector(cluster4.sim), as.vector(pslwr4mean)) + + # associate each simulated cluster to one observed regime: + max1 <- which(cluster.corr == max(cluster.corr), arr.ind=T) + cluster.corr2 <- cluster.corr + cluster.corr2[max1[1],] <- -999 # set this row to negative values so it won't be found as a maximum anymore + cluster.corr2[,max1[2]] <- -999 # set this column to negative values so it won't be found as a maximum anymore + max2 <- which(cluster.corr2 == max(cluster.corr2), arr.ind=T) + cluster.corr3 <- cluster.corr2 + cluster.corr3[max2[1],] <- -999 + cluster.corr3[,max2[2]] <- -999 + max3 <- which(cluster.corr3 == max(cluster.corr3), arr.ind=T) + cluster.corr4 <- cluster.corr3 + cluster.corr4[max3[1],] <- -999 + cluster.corr4[,max3[2]] <- -999 + max4 <- which(cluster.corr4 == max(cluster.corr4), arr.ind=T) + + seq.max1 <- c(max1[1],max2[1],max3[1],max4[1]) + seq.max2 <- c(max1[2],max2[2],max3[2],max4[2]) + + cluster1.regime <- seq.max2[which(seq.max1 == 1)] + cluster2.regime <- seq.max2[which(seq.max1 == 2)] + cluster3.regime <- seq.max2[which(seq.max1 == 3)] + cluster4.regime <- seq.max2[which(seq.max1 == 4)] + + cluster1.name <- regimes.obs[cluster1.regime] + cluster2.name <- regimes.obs[cluster2.regime] + cluster3.name <- regimes.obs[cluster3.regime] + cluster4.name <- regimes.obs[cluster4.regime] + + regimes.sim <- c(cluster1.name, cluster2.name, cluster3.name, cluster4.name) + cluster.corr2 <- array(cluster.corr, c(4,4), dimnames=list(regimes.sim, regimes.obs)) + + spat.cor1 <- cluster.corr[1,seq.max2[which(seq.max1 == 1)]] + spat.cor2 <- cluster.corr[2,seq.max2[which(seq.max1 == 2)]] + spat.cor3 <- cluster.corr[3,seq.max2[which(seq.max1 == 3)]] + spat.cor4 <- cluster.corr[4,seq.max2[which(seq.max1 == 4)]] + + # measure persistence for the reanalysis dataset: + my.cluster.vector <- my.cluster[[load.month]][[1]] + + sequ1 <- sequ2 <- sequ3 <- sequ4 <- rep(0,10000) + i1 <- i2 <- i3 <- i4 <- 1 + + if(my.cluster.vector[1] != 1) i1 <- 0 + if(my.cluster.vector[1] == 1) sequ1[i1] <- sequ1[i1]+1 + if(my.cluster.vector[1] != 2) i2 <- 0 + if(my.cluster.vector[1] == 2) sequ2[i2] <- sequ2[i2]+1 + if(my.cluster.vector[1] != 3) i3 <- 0 + if(my.cluster.vector[1] == 3) sequ3[i3] <- sequ3[i3]+1 + if(my.cluster.vector[1] != 4) i4 <- 0 + if(my.cluster.vector[1] == 4) sequ4[i4] <- sequ4[i4]+1 + + n.dd <- length(my.cluster.vector) + for(d in 1:(n.dd-1)){ + if(my.cluster.vector[d] != 1 && my.cluster.vector[d+1] == 1) i1 <- i1+1 + if(my.cluster.vector[d] == 1) sequ1[i1] <- sequ1[i1]+1 + + if(my.cluster.vector[d] != 2 && my.cluster.vector[d+1] == 2) i2 <- i2+1 + if(my.cluster.vector[d] == 2) sequ2[i2] <- sequ2[i2]+1 + + if(my.cluster.vector[d] != 3 && my.cluster.vector[d+1] == 3) i3 <- i3+1 + if(my.cluster.vector[d] == 3) sequ3[i3] <- sequ3[i3]+1 + + if(my.cluster.vector[d] != 4 && my.cluster.vector[d+1] == 4) i4 <- i4+1 + if(my.cluster.vector[d] == 4) sequ4[i4] <- sequ4[i4]+1 + } + + if(my.cluster.vector[n.dd] == 1) sequ1[i1] <- sequ1[i1]+1 + if(my.cluster.vector[n.dd] == 2) sequ2[i2] <- sequ2[i2]+1 + if(my.cluster.vector[n.dd] == 3) sequ3[i3] <- sequ3[i3]+1 + if(my.cluster.vector[n.dd] == 4) sequ4[i4] <- sequ4[i4]+1 + + persObs1 <- mean(sequ1[which(sequ1 != 0)]) + persObs2 <- mean(sequ2[which(sequ2 != 0)]) + persObs3 <- mean(sequ3[which(sequ3 != 0)]) + persObs4 <- mean(sequ4[which(sequ4 != 0)]) + + # insert here all the manual corrections to the regime assignation due to misasignment in the automatic selection: + # forecast month: September + if(p == 9 && lead.month == 0) { cluster1.name <- "Blocking"; cluster2.name <- "Atl.Ridge"; cluster3.name <- "NAO-"; cluster4.name <- "NAO+"} + if(p == 8 && lead.month == 1) { cluster1.name <- "NAO+"; cluster2.name <- "NAO-"; cluster3.name <- "Blocking"; cluster4.name <- "Atl.Ridge"} + if(p == 6 && lead.month == 3) { cluster1.name <- "Atl.Ridge"; cluster2.name <- "NAO-"} + if(p == 4 && lead.month == 5) { cluster1.name <- "Blocking"; cluster2.name <- "NAO+"; cluster3.name <- "Atl.Ridge"; cluster4.name <- "NAO-"} + + # forecast month: October + if(p == 4 && lead.month == 6) { cluster1.name <- "Atl.Ridge"; cluster2.name <- "Blocking"; cluster3.name <- "NAO+"; cluster4.name <- "NAO-"} + if(p == 5 && lead.month == 5) { cluster1.name <- "NAO+"; cluster2.name <- "Blocking"; cluster3.name <- "NAO-"; cluster4.name <- "Atl.Ridge"} + if(p == 7 && lead.month == 3) { cluster1.name <- "NAO-"; cluster2.name <- "Atl.Ridge"; cluster3.name <- "Blocking"; cluster4.name <- "NAO+"} + if(p == 8 && lead.month == 2) { cluster1.name <- "Blocking"; cluster2.name <- "NAO-"; cluster3.name <- "Atl.Ridge"; cluster4.name <- "NAO+"} + if(p == 9 && lead.month == 1) { cluster1.name <- "NAO-"; cluster2.name <- "Blocking"; cluster3.name <- "Atl.Ridge"; cluster4.name <- "NAO+"} + + # forecast month: November + if(p == 6 && lead.month == 5) { cluster2.name <- "Atl.Ridge"; cluster3.name <- "NAO+"; cluster4.name <- "Blocking"} + if(p == 7 && lead.month == 4) { cluster1.name <- "NAO+"; cluster2.name <- "Blocking"; cluster4.name <- "Atl.Ridge"} + if(p == 8 && lead.month == 3) { cluster2.name <- "Blocking"; cluster4.name <- "Atl.Ridge"} + if(p == 9 && lead.month == 2) { cluster2.name <- "Atl.Ridge"; cluster3.name <- "NAO+"; cluster4.name <- "Blocking"} + if(p == 10 && lead.month == 1) { cluster2.name <- "Blocking"; cluster4.name <- "Atl.Ridge"} + + regimes.sim2 <- c(cluster1.name, cluster2.name, cluster3.name, cluster4.name) # Simulated regimes after the manual correction + #regimes.obs <- c(cluster1.name, cluster2.name, cluster3.name, cluster4.name) # already defined above, included only as reference + + order.sim <- match(regimes.sim2, orden) + order.obs <- match(regimes.obs, orden) + + clusters.sim <- list(cluster1.sim, cluster2.sim, cluster3.sim, cluster4.sim) + clusters.obs <- list(pslwr1mean, pslwr2mean, pslwr3mean, pslwr4mean) + + # recompute the spatial correlations, because they might have changed because of the manual corrections above: + spat.cor1 <- cor(as.vector(clusters.sim[[which(order.sim == 1)]]), as.vector(clusters.obs[[which(order.obs == 1)]])) + spat.cor2 <- cor(as.vector(clusters.sim[[which(order.sim == 2)]]), as.vector(clusters.obs[[which(order.obs == 2)]])) + spat.cor3 <- cor(as.vector(clusters.sim[[which(order.sim == 3)]]), as.vector(clusters.obs[[which(order.obs == 3)]])) + spat.cor4 <- cor(as.vector(clusters.sim[[which(order.sim == 4)]]), as.vector(clusters.obs[[which(order.obs == 4)]])) + + load(file=paste0(work.dir,"/",fields.name,"_",my.period[p],"_leadmonth_",lead.month,"_","psl",".RData")) # reload forecast cluster time series and mean psl data + #load(file=paste0(work.dir,"/",fields.name,"_",my.period[p],"_leadmonth_",lead.month,"_ClusterData.RData")) # reload forecast cluster data (for my.cluster.array) + if(var.num != var.num.orig) var.num <- var.num.orig # ripristinate original values of this script + if(fields.name != fields.name.orig) fields.name <- fields.name.orig # ripristinate original values of this script + if(!identical(period.orig, period)) period <- period.orig + if(p.orig != p) p <- p.orig + if(identical(rev(lat),lat.forecast)) lat <- rev(lat) # ripristinate right lat order + if(work.dir != work.dir.orig) work.dir <- work.dir.orig # ripristinate original values of this script + + } # close for on 'forecast.name' + + if(fields.name == rean.name || (fields.name == forecast.name && n.map == 7)){ + if(save.names){ + save(cluster1.name, cluster2.name, cluster3.name, cluster4.name, file=paste0(rean.dir,"/",fields.name,"_", my.period[p],"_","ClusterNames",".RData")) + + } else { # in this case, we load the cluster names from the file already saved, if regimes come from a reanalysis: + load(paste0(rean.dir,"/",rean.name,"_", my.period[p],"_","ClusterNames",".RData")) + + if(var.num != var.num.orig) var.num <- var.num.orig # ripristinate original values of this script + if(fields.name != fields.name.orig) fields.name <- fields.name.orig # ripristinate original values of this script + } + } + + # assign to each cluster its regime: + if(ordering == FALSE && (fields.name == rean.name || (fields.name == forecast.name && n.map == 7))){ + cluster1 <- 1; cluster2 <- 2; cluster3 <- 3; cluster4 <- 4 + } else { + cluster1 <- which(orden == cluster1.name) + cluster2 <- which(orden == cluster2.name) + cluster3 <- which(orden == cluster3.name) + cluster4 <- which(orden == cluster4.name) + } + + assign(paste0("map",cluster1), pslwr1mean) + assign(paste0("map",cluster2), pslwr2mean) + assign(paste0("map",cluster3), pslwr3mean) + assign(paste0("map",cluster4), pslwr4mean) + + assign(paste0("fre",cluster1), wr1y) + assign(paste0("fre",cluster2), wr2y) + assign(paste0("fre",cluster3), wr3y) + assign(paste0("fre",cluster4), wr4y) + + if(composition == "impact"){ + assign(paste0("imp",cluster1), varwr1mean) + assign(paste0("imp",cluster2), varwr2mean) + assign(paste0("imp",cluster3), varwr3mean) + assign(paste0("imp",cluster4), varwr4mean) + + if(fields.name == "rean.name"){ + assign(paste0("sig",cluster1), pvalue1) + assign(paste0("sig",cluster2), pvalue2) + assign(paste0("sig",cluster3), pvalue3) + assign(paste0("sig",cluster4), pvalue4) + } + } + + if(fields.name == forecast.name && n.map < 7){ + assign(paste0("freMax",cluster1), wr1yMax) + assign(paste0("freMax",cluster2), wr2yMax) + assign(paste0("freMax",cluster3), wr3yMax) + assign(paste0("freMax",cluster4), wr4yMax) + + assign(paste0("freMin",cluster1), wr1yMin) + assign(paste0("freMin",cluster2), wr2yMin) + assign(paste0("freMin",cluster3), wr3yMin) + assign(paste0("freMin",cluster4), wr4yMin) + + #assign(paste0("spatial.cor",cluster1), spat.cor1) + #assign(paste0("spatial.cor",cluster2), spat.cor2) + #assign(paste0("spatial.cor",cluster3), spat.cor3) + #assign(paste0("spatial.cor",cluster4), spat.cor4) + + assign(paste0("persist",cluster1), pers1) + assign(paste0("persist",cluster2), pers2) + assign(paste0("persist",cluster3), pers3) + assign(paste0("persist",cluster4), pers4) + + clusters.all <- c(cluster1, cluster2, cluster3, cluster4) + ordered.sequence <- c(which(clusters.all == 1), which(clusters.all == 2), which(clusters.all == 3), which(clusters.all == 4)) + + assign(paste0("transSim",cluster1), unname(trans.sim[1,ordered.sequence])) + assign(paste0("transSim",cluster2), unname(trans.sim[2,ordered.sequence])) + assign(paste0("transSim",cluster3), unname(trans.sim[3,ordered.sequence])) + assign(paste0("transSim",cluster4), unname(trans.sim[4,ordered.sequence])) + + cluster1.obs <- which(orden == regimes.obs[1]) + cluster2.obs <- which(orden == regimes.obs[2]) + cluster3.obs <- which(orden == regimes.obs[3]) + cluster4.obs <- which(orden == regimes.obs[4]) + + clusters.all.obs <- c(cluster1.obs, cluster2.obs, cluster3.obs, cluster4.obs) + ordered.sequence.obs <- c(which(clusters.all.obs == 1), which(clusters.all.obs == 2), which(clusters.all.obs == 3), which(clusters.all.obs == 4)) + + assign(paste0("freObs",cluster1.obs), wr1y.obs) + assign(paste0("freObs",cluster2.obs), wr2y.obs) + assign(paste0("freObs",cluster3.obs), wr3y.obs) + assign(paste0("freObs",cluster4.obs), wr4y.obs) + + assign(paste0("persistObs",cluster1.obs), persObs1) + assign(paste0("persistObs",cluster2.obs), persObs2) + assign(paste0("persistObs",cluster3.obs), persObs3) + assign(paste0("persistObs",cluster4.obs), persObs4) + + assign(paste0("transObs",cluster1.obs), unname(trans.obs[1,ordered.sequence.obs])) + assign(paste0("transObs",cluster2.obs), unname(trans.obs[2,ordered.sequence.obs])) + assign(paste0("transObs",cluster3.obs), unname(trans.obs[3,ordered.sequence.obs])) + assign(paste0("transObs",cluster4.obs), unname(trans.obs[4,ordered.sequence.obs])) + + } + + # position of long values of Europe only (without the Atlantic Sea and America): + #if(fields.name == "ERA-Interim") EU <- c(1:which(lon >= lon.max)[1], (length(lon)-30):length(lon)) # restrict area to continental europe only + EU <- c(1:which(lon >= lon.max)[1], (which(lon >= 337)[1]:length(lon))) # restrict area to continental europe only + + # breaks and colors of the geopotential fields: + if(psl == "g500"){ + #my.brks <- c(-300,-199:200,300) # % Mean anomaly of a WR + my.brks <- c(-300,seq(-200,200,10),300) # % Mean anomaly of a WR + my.brks2 <- c(-300,seq(-200,200,10),300) # % Mean anomaly of a WR for the contour lines + #my.cols <- colorRampPalette((brewer.pal(9,"PuBu")))(length(my.brks)-1) + values.to.plot <- c(-200, -100, 0, 100, 200) # values we want to show in the labels + } + + if(psl == "psl"){ + my.brks <- c(seq(-19,-1,2),0,seq(1,19,2)) # % Mean anomaly of a WR + # my.brks <- c(seq(-19,-1,2),0,seq(1,19,2)) # % Mean anomaly of a WR + + my.brks2 <- my.brks #c(seq(-20,20,2)) # % Mean anomaly of a WR for the contour lines + values.to.plot <- my.brks #c(-17,-15,-13,-11,-9,-7,-5,-3,-1,0,1,3,5,7,9,11,13,15,17) + } + + my.cols <- colorRampPalette(rev(brewer.pal(11,"RdBu")))(length(my.brks)-1) # blue--white--red colors + my.cols[floor(length(my.cols)/2)] <- my.cols[floor(length(my.cols)/2)+1] <- "white" + + # breaks and colors of the impact maps (valid both for Wind speed anomalies and Temperature anomalies): + #my.brks.var <- c(-20,seq(-10,-3,1),seq(-2.9,2.9,0.1),seq(3,10,1),20) # % Mean anomaly of a WR + #my.brks.var <- c(-20,-2,-1.5,-1,-0.5,-0.2,0.2,0.5,1,1.5,2,20) # % Mean anomaly of a WR + #my.brks.var <- c(-20,seq(-3,3,0.5),20) # % Mean anomaly of a WR + if(var.name[var.num] == "sfcWind") { + my.brks.var <- seq(-3,3,0.5) + values.to.plot2 <- c(-3,-2,-1,0,1,2,3) + } + if(var.name[var.num] == "tas") { + my.brks.var <- seq(-5,5,1) + values.to.plot2 <- my.brks.var #c(-5,-3,-1,0,1,3,5) + } + + #my.cols <- colorRampPalette(as.character(read.csv("/home/Earth/vtorralb/rgbhex.csv",header=F)[,1]))(length(my.brks)-1) # blue--yellow-red colors + my.cols.var <- colorRampPalette(rev(brewer.pal(11,"RdBu")))(length(my.brks.var)-1) # blue--white--red colors + #my.cols.var[floor(length(my.cols.var)/2)] <- my.cols.var[floor(length(my.cols.var)/2)+1] <- "white" + + freq.max=100 + if(fields.name == forecast.name){ + pos.years.present <- match(year.start:year.end, years) + years.missing <- which(is.na(pos.years.present)) + fre1.NA <- fre2.NA <- fre3.NA <- fre4.NA <- freMax1.NA <- freMax2.NA <- freMax3.NA <- freMax4.NA <- freMin1.NA <- freMin2.NA <- freMin3.NA <- freMin4.NA <- c() + k <- 0 + for(y in year.start:year.end) { + if(y %in% (years.missing + year.start - 1)) { + fre1.NA <- c(fre1.NA,NA); fre2.NA <- c(fre2.NA,NA); fre3.NA <- c(fre3.NA,NA); fre4.NA <- c(fre4.NA,NA) + freMax1.NA <- c(freMax1.NA,NA); freMax2.NA <- c(freMax2.NA,NA); freMax3.NA <- c(freMax3.NA,NA); freMax4.NA <- c(freMax4.NA,NA) + freMin1.NA <- c(freMin1.NA,NA); freMin2.NA <- c(freMin2.NA,NA); freMin3.NA <- c(freMin3.NA,NA); freMin4.NA <- c(freMin4.NA,NA) + k=k+1 + } else { + fre1.NA <- c(fre1.NA,fre1[y - year.start + 1 - k]); fre2.NA <- c(fre2.NA,fre2[y - year.start + 1 - k]) + fre3.NA <- c(fre3.NA,fre3[y - year.start + 1 - k]); fre4.NA <- c(fre4.NA,fre4[y - year.start + 1 - k]) + freMax1.NA <- c(freMax1.NA,freMax1[y - year.start + 1 - k]); freMax2.NA <- c(freMax2.NA,freMax2[y - year.start + 1 - k]) + freMax3.NA <- c(freMax3.NA,freMax3[y - year.start + 1 - k]); freMax4.NA <- c(freMax4.NA,freMax4[y - year.start + 1 - k]) + freMin1.NA <- c(freMin1.NA,freMin1[y - year.start + 1 - k]); freMin2.NA <- c(freMin2.NA,freMin2[y - year.start + 1 - k]) + freMin3.NA <- c(freMin3.NA,freMin3[y - year.start + 1 - k]); freMin4.NA <- c(freMin4.NA,freMin4[y - year.start + 1 - k]) + } + } + + sp.cor1 <- round(cor(fre1.NA,freObs1, use="complete.obs"),2) + sp.cor2 <- round(cor(fre2.NA,freObs2, use="complete.obs"),2) + sp.cor3 <- round(cor(fre3.NA,freObs3, use="complete.obs"),2) + sp.cor4 <- round(cor(fre4.NA,freObs4, use="complete.obs"),2) + + } else { + fre1.NA <- fre1; fre2.NA <- fre2; fre3.NA <- fre3; fre4.NA <- fre4 + } + + + # Visualize the composition with the 3 graphs for all the 4 regimes: its pressure fields, its impact on var and its frequency series: + if(composition == "simple"){ + if(!as.pdf && fields.name == rean.name) png(filename=paste0(rean.dir,"/",fields.name,"_",my.period[p],"_",var.name[var.num],".png"),width=3000,height=3700) + if(!as.pdf && fields.name == forecast.name) png(filename=paste0(work.dir,"/",fields.name,"_",my.period[p],"_leadmonth_",lead.month,"_",var.name[var.num],".png"),width=3000,height=3700) + + plot.new() + + # Sheet title: + par(fig=c(0, 1, 0.94, 0.98), new=TRUE) + if(fields.name == forecast.name) {forecast.title <- paste0(" startdate and ",lead.month," forecast month ")} else {forecast.title <- ""} + mtext(paste0(fields.name, " Weather Regimes for ", my.period[p], forecast.title," (",year.start,"-",year.end,")"), cex=5.5, font=2) + + # Centroid maps: + map.xpos <- 0 + map.width <- 0.46 + par(fig=c(map.xpos + 0.003, map.xpos + map.width - 0.003, 0.745, 0.925), new=TRUE) + PlotEquiMap2(rescale(map1,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map1), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, xlabel.dist=1.5, contours.lty="F1FF1F") + par(fig=c(map.xpos, map.xpos + map.width, 0.51, 0.69), new=TRUE) + PlotEquiMap2(rescale(map2,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map2), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F") + par(fig=c(map.xpos, map.xpos + map.width, 0.275, 0.455), new=TRUE) + PlotEquiMap2(rescale(map3,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map3), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F") + par(fig=c(map.xpos, map.xpos + map.width, 0.04, 0.22), new=TRUE) + PlotEquiMap2(rescale(map4,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map4), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F") + + # Legend centroid maps: + legend1.xpos <- 0.10 + legend1.width <- 0.30 + legend1.cex <- 2 + my.subset <- match(values.to.plot, my.brks) + par(fig=c(legend1.xpos, legend1.xpos + legend1.width, 0.719, 0.744), new=TRUE) # syntax fig=c(xmin, xmax, ymin, ymax) + ColorBar3(my.brks, cols=my.cols, vert=FALSE, cex=legend1.cex, subset=my.subset) + if(psl=="g500") {mtext(side=4," m", cex=2.5)} else {mtext(side=4," hPa", cex=legend1.cex)} + par(fig=c(legend1.xpos, legend1.xpos + legend1.width, 0.485, 0.508), new=TRUE) + ColorBar3(my.brks, cols=my.cols, vert=FALSE, cex=legend1.cex, subset=my.subset) + if(psl=="g500") {mtext(side=4," m", cex=2.5)} else {mtext(side=4," hPa", cex=legend1.cex)} + par(fig=c(legend1.xpos, legend1.xpos + legend1.width, 0.250, 0.273), new=TRUE) + ColorBar3(my.brks, cols=my.cols, vert=FALSE, cex=legend1.cex, subset=my.subset) + if(psl=="g500") {mtext(side=4," m", cex=2.5)} else {mtext(side=4," hPa", cex=legend1.cex)} + par(fig=c(legend1.xpos, legend1.xpos + legend1.width, 0.015, 0.038), new=TRUE) + ColorBar3(my.brks, cols=my.cols, vert=FALSE, cex=legend1.cex, subset=my.subset) + if(psl=="g500") {mtext(side=4," m", cex=2.5)} else {mtext(side=4," hPa", cex=legend1.cex)} + + # Subtitle centroid maps: + map.title.xpos <- 0.76 + map.title.width <- 0.2 + par(fig=c(map.title.xpos, map.title.xpos + map.title.width, 0.728, 0.729), new=TRUE) + mtext("year", cex=3) + par(fig=c(map.title.xpos, map.title.xpos + map.title.width, 0.494, 0.495), new=TRUE) + mtext("year", cex=3) + par(fig=c(map.title.xpos, map.title.xpos + map.title.width, 0.260, 0.261), new=TRUE) + mtext("year", cex=3) + par(fig=c(map.title.xpos, map.title.xpos + map.title.width, 0.026, 0.027), new=TRUE) + mtext("year", cex=3) + + # Title Centroid Maps: + title1.xpos <- 0.02 + title1.width <- 0.4 + par(fig=c(title1.xpos, title1.xpos + title1.width, 0.9305, 0.9355), new=TRUE) + mtext(paste0(regime1.name,": ", psl.name, " Anomaly"), font=2, cex=4) + par(fig=c(title1.xpos, title1.xpos + title1.width, 0.6955, 0.7005), new=TRUE) + mtext(paste0(regime2.name,": ", psl.name, " Anomaly"), font=2, cex=4) + par(fig=c(title1.xpos, title1.xpos + title1.width, 0.4605, 0.4655), new=TRUE) + mtext(paste0(regime3.name,": ", psl.name, " Anomaly"), font=2, cex=4) + par(fig=c(title1.xpos, title1.xpos + title1.width, 0.2255, 0.2305), new=TRUE) + mtext(paste0(regime4.name,": ", psl.name, " Anomaly"), font=2, cex=4) + + # Impact maps: + if(composition == "impact"){ # it won't enter here never because we are already inside an if con composition="simple" + impact.xpos <- 0.47 + impact.width <- 0.26 + par(fig=c(impact.xpos, impact.xpos + impact.width, 0.745, 0.925), new=TRUE) + PlotEquiMap2(rescale(imp1[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=t(sig1[,EU] < 0.05), cex.lab=1.5) + par(fig=c(impact.xpos, impact.xpos + impact.width, 0.51, 0.69), new=TRUE) + PlotEquiMap2(rescale(imp2[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=t(sig2[,EU] < 0.05), cex.lab=1.5) + par(fig=c(impact.xpos, impact.xpos + impact.width, 0.275, 0.455), new=TRUE) + PlotEquiMap2(rescale(imp3[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=t(sig3[,EU] < 0.05), cex.lab=1.5) + par(fig=c(impact.xpos, impact.xpos + impact.width, 0.04, 0.22), new=TRUE) + PlotEquiMap2(rescale(imp4[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=t(sig4[,EU] < 0.05), cex.lab=1.5) + + # Title impact maps: + title2.xpos <- 0.42 + title2.width <- 0.35 + par(fig=c(title2.xpos, title2.xpos + title2.width, 0.935, 0.940), new=TRUE) + mtext(paste0(regime1.name, ": Impact on ", var.name.full[var.num]), font=2, cex=4) + par(fig=c(title2.xpos, title2.xpos + title2.width, 0.700, 0.705), new=TRUE) + mtext(paste0(regime2.name, ": Impact on ", var.name.full[var.num]), font=2, cex=4) + par(fig=c(title2.xpos, title2.xpos + title2.width, 0.465, 0.470), new=TRUE) + mtext(paste0(regime3.name, ": Impact on ", var.name.full[var.num]), font=2, cex=4) + par(fig=c(title2.xpos, title2.xpos + title2.width, 0.230, 0.235), new=TRUE) + mtext(paste0(regime4.name, ": Impact on ", var.name.full[var.num]), font=2, cex=4) + } + + # Frequency plots: + barplot.xpos <- 0.75 + barplot.width <- 0.25 + + par(fig=c(barplot.xpos, barplot.xpos + barplot.width, 0.745, 0.915), new=TRUE) + if(fields.name == rean.name) barplot.freq(100*fre1.NA, year.start, year.end, cex.y=2, cex.x=2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0)) + if(fields.name == forecast.name) barplot.freq.sim2(100*fre1.NA, 100*freMax1.NA, 100*freMin1.NA, 100*freObs1, year.start, year.end, cex.y=2, cex.x=2, freq.min=-2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0), col.line="gray20", cex.r=3, cex.mean=2, cex.obs=2) + + par(fig=c(barplot.xpos, barplot.xpos + barplot.width,0.510,0.680), new=TRUE) + if(fields.name == rean.name) barplot.freq(100*fre2.NA, year.start, year.end, cex.y=2, cex.x=2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0)) + if(fields.name == forecast.name) barplot.freq.sim2(100*fre2.NA, 100*freMax2.NA, 100*freMin2.NA, 100*freObs2, year.start, year.end, cex.y=2, cex.x=2, freq.min=-2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0), col.line="gray20", cex.r=3, cex.mean=2, cex.obs=2) + + par(fig=c(barplot.xpos, barplot.xpos + barplot.width,0.275,0.445), new=TRUE) + if(fields.name == rean.name) barplot.freq(100*fre3.NA, year.start, year.end, cex.y=2, cex.x=2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0)) + if(fields.name == forecast.name) barplot.freq.sim2(100*fre3.NA, 100*freMax3.NA, 100*freMin3.NA, 100*freObs3, year.start, year.end, cex.y=2, cex.x=2, freq.min=-2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0), col.line="gray20", cex.r=3, cex.mean=2, cex.obs=2) + + par(fig=c(barplot.xpos, barplot.xpos + barplot.width,0.040,0.210), new=TRUE) + if(fields.name == rean.name) barplot.freq(100*fre4.NA, year.start, year.end, cex.y=2, cex.x=2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0)) + if(fields.name == forecast.name) barplot.freq.sim2(100*fre4.NA, 100*freMax4.NA, 100*freMin4.NA, 100*freObs4, year.start, year.end, cex.y=2, cex.x=2, freq.min=-2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0), col.line="gray20", cex.r=3, cex.mean=2, cex.obs=2) + + # Title Frequency maps: + title3.xpos <- 0.75 + title3.width <-0.25 + par(fig=c(title3.xpos, title3.xpos + title3.width, 0.935, 0.940), new=TRUE) + mtext(paste0(regime1.name, " Frequency"), font=2, cex=4) # paste0 doesn't work inside an expression! + par(fig=c(title3.xpos, title3.xpos + title3.width, 0.700, 0.705), new=TRUE) + mtext(paste0(regime2.name, " Frequency"), font=2, cex=4) + par(fig=c(title3.xpos, title3.xpos + title3.width, 0.465, 0.470), new=TRUE) + mtext(paste0(regime3.name, " Frequency"), font=2, cex=4) + par(fig=c(title3.xpos, title3.xpos + title3.width, 0.230, 0.235), new=TRUE) + mtext(paste0(regime4.name, " Frequency"), font=2, cex=4) + + # % on Frequency plots: + symbol.xpos <- 0.735 + symbol.width <- 0.01 + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.82, 0.83), new=TRUE) + mtext("%", cex=3.3) + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.59, 0.60), new=TRUE) + mtext("%", cex=3.3) + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.35, 0.36), new=TRUE) + mtext("%", cex=3.3) + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.12, 0.13), new=TRUE) + mtext("%", cex=3.3) + + # mean frequency on Frequency plots: + symbol.xpos <- 0.9 + symbol.width <- 0.1 + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.908, 0.918), new=TRUE) + mtext(bquote(bar(nu) == .(paste0(round(mean(100*fre1.NA),1),"%"))),cex=4.5) + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.673, 0.683), new=TRUE) + mtext(bquote(bar(nu) == .(paste0(round(mean(100*fre2.NA),1),"%"))),cex=4.5) + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.44, 0.45), new=TRUE) + mtext(bquote(bar(nu) == .(paste0(round(mean(100*fre3.NA),1),"%"))),cex=4.5) + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.203, 0.213), new=TRUE) + mtext(bquote(bar(nu) == .(paste0(round(mean(100*fre4.NA),1),"%"))),cex=4.5) + + # add corr between obs.time series and ensemble mean time series on Frequency plots: + if(fields.name == forecast.name){ + symbol.xpos <- 0.76 + symbol.width <- 0.1 + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.908, 0.918), new=TRUE) + sp.cor1 <- round(cor(fre1.NA,freObs1, use="complete.obs"),2) + mtext(paste0("r= ",sp.cor1), cex=4.5) + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.673, 0.683), new=TRUE) + sp.cor2 <- round(cor(fre2.NA,freObs2, use="complete.obs"),2) + mtext(paste0("r= ",sp.cor2), cex=4.5) + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.44, 0.45), new=TRUE) + sp.cor3 <- round(cor(fre3.NA,freObs3, use="complete.obs"),2) + mtext(paste0("r= ",sp.cor3), cex=4.5) + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.203, 0.213), new=TRUE) + sp.cor4 <- round(cor(fre4.NA,freObs4, use="complete.obs"),2) + mtext(paste0("r= ",sp.cor4), cex=4.5) + } + + # Legend 2: + if(fields.name == rean.name){ + legend2.xpos <- 0.48 + legend2.width <- 0.25 + legend2.cex <- 1.8 + my.subset2 <- match(values.to.plot2, my.brks.var) + par(fig=c(legend2.xpos, legend2.xpos + legend2.width, 0.719 + 0.001, 0.744), new=TRUE) + ColorBar3(my.brks.var, cols=my.cols.var, vert=FALSE, cex=legend2.cex, subset=my.subset2) + mtext(side=4,paste0(" ",var.unit[var.num]), cex=legend2.cex) + par(fig=c(legend2.xpos, legend2.xpos + legend2.width, 0.485, 0.508), new=TRUE) + ColorBar3(my.brks.var, cols=my.cols.var, vert=FALSE, cex=legend2.cex, subset=my.subset2) + mtext(side=4,paste0(" ",var.unit[var.num]), cex=legend2.cex) + par(fig=c(legend2.xpos, legend2.xpos + legend2.width, 0.250, 0.273), new=TRUE) + ColorBar3(my.brks.var, cols=my.cols.var, vert=FALSE, cex=legend2.cex, subset=my.subset2) + mtext(side=4,paste0(" ",var.unit[var.num]), cex=legend2.cex) + par(fig=c(legend2.xpos, legend2.xpos + legend2.width, 0.015, 0.038), new=TRUE) + ColorBar3(my.brks.var, cols=my.cols.var, vert=FALSE, cex=legend2.cex, subset=my.subset2) + mtext(side=4,paste0(" ",var.unit[var.num]), cex=legend2.cex) + } + + if(!as.pdf) dev.off() # for saving 4 png + + } # close if on: composition == 'simple' + + + # Visualize the composition with the regime anomalies for a selected forecasted month: + if(composition == "psl"){ + # Centroid maps: + n.map <- n.map + 1 # n.maps starts from 0 + map.width <- 0.115 + map.xpos <- 0.06 + map.width * (n.map - 1) + map.ypos <- 0.08 + map.height <- 0.19 + map.ysep <- 0.015 + + par(fig=c(map.xpos, map.xpos + map.width, map.ypos + 3*map.height + 3*map.ysep, map.ypos + 4*map.height + 3*map.ysep), new=TRUE) + PlotEquiMap2(rescale(map1,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map1), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, xlabel.dist=1.5, contours.lty="F1FF1F") + par(fig=c(map.xpos, map.xpos + map.width, map.ypos + 2*map.height + 2*map.ysep, map.ypos + 3*map.height + 2*map.ysep), new=TRUE) + PlotEquiMap2(rescale(map2,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map2), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F") + par(fig=c(map.xpos, map.xpos + map.width, map.ypos + map.height + map.ysep, map.ypos + 2*map.height + map.ysep), new=TRUE) + PlotEquiMap2(rescale(map3,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map3), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F") + par(fig=c(map.xpos, map.xpos + map.width, map.ypos, map.ypos + map.height), new=TRUE) + PlotEquiMap2(rescale(map4,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map4), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F") + + # Sheet subtitles: + par(fig=c(map.xpos, map.xpos + map.width, 0.86, 0.90), new=TRUE) + if(n.map < 8) { + mtext(paste0(my.month.short[p], " --> ", my.month.short[forecast.month]), cex=5.5, font=2) + } else{ + mtext(paste0(rean.name," ", my.month.short[forecast.month]), cex=5.5, font=2) + } + + } # close if on composition == 'psl' + + # Visualize the composition if the impact maps for a selected forecasted month: + if(composition == "impact"){ + # Centroid maps: + n.map <- n.map + 1 # n.maps starts from 0 + map.width <- 0.115 + map.xpos <- 0.06 + map.width * (n.map - 1) + map.ypos <- 0.08 + map.height <- 0.19 + map.ysep <- 0.015 + + par(fig=c(map.xpos, map.xpos + map.width, map.ypos + 3*map.height + 3*map.ysep, map.ypos + 4*map.height + 3*map.ysep), new=TRUE) + PlotEquiMap2(rescale(imp1[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=t(sig1[,EU] < 0.05), cex.lab=1.5) + + par(fig=c(map.xpos, map.xpos + map.width, map.ypos + 2*map.height + 2*map.ysep, map.ypos + 3*map.height + 2*map.ysep), new=TRUE) + PlotEquiMap2(rescale(imp2[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=t(sig2[,EU] < 0.05), cex.lab=1.5) + + par(fig=c(map.xpos, map.xpos + map.width, map.ypos + map.height + map.ysep, map.ypos + 2*map.height + map.ysep), new=TRUE) + PlotEquiMap2(rescale(imp3[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=t(sig3[,EU] < 0.05), cex.lab=1.5) + + par(fig=c(map.xpos, map.xpos + map.width, map.ypos, map.ypos + map.height), new=TRUE) + PlotEquiMap2(rescale(imp4[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=t(sig4[,EU] < 0.05), cex.lab=1.5) + + + # Sheet subtitles: + par(fig=c(map.xpos, map.xpos + map.width, 0.86, 0.90), new=TRUE) + if(n.map < 8) { + mtext(paste0(my.month.short[p], " --> ", my.month.short[forecast.month]), cex=5.5, font=2) + } else{ + mtext(paste0(rean.name," ", my.month.short[forecast.month]), cex=5.5, font=2) + } + + } # close if on composition == 'psl' + + + # Visualize the composition with the interannual frequencies for a selected forecasted month: + if(composition == "fre"){ + # Centroid maps: + n.map <- n.map + 1 # n.maps starts from 0 + map.width <- 0.115 + map.xpos <- 0.06 + map.width * (n.map - 1) + map.ypos <- 0.08 + map.height <- 0.19 + map.ysep <- 0.015 + + par(fig=c(map.xpos, map.xpos + map.width, map.ypos + 3*map.height + 3*map.ysep, map.ypos + 4*map.height + 3*map.ysep), new=TRUE) + if(n.map < 8) barplot.freq.sim2(100*fre1.NA, 100*freMax1.NA, 100*freMin1.NA, 100*freObs1, year.start, year.end, cex.y=2, cex.x=2, freq.min=-2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0), col.line="gray20", cex.r=3, cex.mean=2, cex.obs=2) + if(n.map == 8) barplot.freq(100*fre1.NA, year.start, year.end, cex.y=2, cex.x=2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0)) + + par(fig=c(map.xpos, map.xpos + map.width, map.ypos + 2*map.height + 2*map.ysep, map.ypos + 3*map.height + 2*map.ysep), new=TRUE) + if(n.map < 8) if(fields.name == forecast.name) barplot.freq.sim2(100*fre2.NA, 100*freMax2.NA, 100*freMin2.NA, 100*freObs2, year.start, year.end, cex.y=2, cex.x=2, freq.min=-2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0), col.line="gray20", cex.r=3, cex.mean=2, cex.obs=2) + if(n.map == 8) barplot.freq(100*fre2.NA, year.start, year.end, cex.y=2, cex.x=2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0)) + + par(fig=c(map.xpos, map.xpos + map.width, map.ypos + map.height + map.ysep, map.ypos + 2*map.height + map.ysep), new=TRUE) + if(n.map < 8) if(fields.name == forecast.name) barplot.freq.sim2(100*fre3.NA, 100*freMax3.NA, 100*freMin3.NA, 100*freObs3, year.start, year.end, cex.y=2, cex.x=2, freq.min=-2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0), col.line="gray20", cex.r=3, cex.mean=2, cex.obs=2) + if(n.map == 8) barplot.freq(100*fre3.NA, year.start, year.end, cex.y=2, cex.x=2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0)) + + par(fig=c(map.xpos, map.xpos + map.width, map.ypos, map.ypos + map.height), new=TRUE) + if(n.map < 8) if(fields.name == forecast.name) barplot.freq.sim2(100*fre4.NA, 100*freMax4.NA, 100*freMin4.NA, 100*freObs4, year.start, year.end, cex.y=2, cex.x=2, freq.min=-2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0), col.line="gray20", cex.r=3, cex.mean=2, cex.obs=2) + if(n.map == 8) barplot.freq(100*fre4.NA, year.start, year.end, cex.y=2, cex.x=2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0)) + + # Sheet subtitles: + par(fig=c(map.xpos, map.xpos + map.width, 0.86, 0.90), new=TRUE) + if(n.map < 8) { + mtext(paste0(my.month.short[p], " --> ", my.month.short[forecast.month]), cex=5.5, font=2) + } else{ + mtext(paste0(rean.name," ", my.month.short[forecast.month]), cex=5.5, font=2) + } + + # % on Frequency plots: + par(fig=c(map.xpos - 0.006, map.xpos, map.ypos + 3.9*map.height + 3*map.ysep, map.ypos + 4*map.height + 3*map.ysep), new=TRUE) + mtext("%", cex=3.3) + par(fig=c(map.xpos - 0.006, map.xpos, map.ypos + 2.9*map.height + 2*map.ysep, map.ypos + 3*map.height + 2*map.ysep), new=TRUE) + mtext("%", cex=3.3) + par(fig=c(map.xpos - 0.006, map.xpos, map.ypos + 1.9*map.height + 1*map.ysep, map.ypos + 2*map.height + 1*map.ysep), new=TRUE) + mtext("%", cex=3.3) + par(fig=c(map.xpos - 0.006, map.xpos, map.ypos + 0.9*map.height + 0*map.ysep, map.ypos + 1*map.height + 0*map.ysep), new=TRUE) + mtext("%", cex=3.3) + + # mean frequency on Frequency plots: + par(fig=c(map.xpos + 0.02, map.xpos + 0.04, map.ypos + 3*map.height + 3*map.ysep, map.ypos + 3.1*map.height + 3*map.ysep), new=TRUE) + mtext(bquote(bar(nu) == .(paste0(round(mean(100*fre1.NA),1),"%"))),cex=3.5) + par(fig=c(map.xpos + 0.02, map.xpos + 0.04, map.ypos + 2*map.height + 2*map.ysep, map.ypos + 2.1*map.height + 2*map.ysep), new=TRUE) + mtext(bquote(bar(nu) == .(paste0(round(mean(100*fre2.NA),1),"%"))),cex=3.5) + par(fig=c(map.xpos + 0.02, map.xpos + 0.04, map.ypos + 1*map.height + 1*map.ysep, map.ypos + 1.1*map.height + 1*map.ysep), new=TRUE) + mtext(bquote(bar(nu) == .(paste0(round(mean(100*fre3.NA),1),"%"))),cex=3.5) + par(fig=c(map.xpos + 0.02, map.xpos + 0.04, map.ypos + 0*map.height + 0*map.ysep, map.ypos + 0.1*map.height + 0*map.ysep), new=TRUE) + mtext(bquote(bar(nu) == .(paste0(round(mean(100*fre4.NA),1),"%"))),cex=3.5) + + # add corr between obs.time series and ensemble mean time series on Frequency plots: + if(n.map < 8){ + par(fig=c(map.xpos + map.width - 0.04, map.xpos + map.width - 0.02, map.ypos + 3*map.height + 3*map.ysep, map.ypos + 3.1*map.height + 3*map.ysep), new=TRUE) + mtext(paste0("r= ",sp.cor1), cex=3.5) + par(fig=c(map.xpos + map.width - 0.04, map.xpos + map.width - 0.02, map.ypos + 2*map.height + 2*map.ysep, map.ypos + 2.1*map.height + 2*map.ysep), new=TRUE) + mtext(paste0("r= ",sp.cor2), cex=3.5) + par(fig=c(map.xpos + map.width - 0.04, map.xpos + map.width - 0.02, map.ypos + 1*map.height + 1*map.ysep, map.ypos + 1.1*map.height + 1*map.ysep), new=TRUE) + mtext(paste0("r= ",sp.cor3), cex=3.5) + par(fig=c(map.xpos + map.width - 0.04, map.xpos + map.width - 0.02, map.ypos + 0*map.height + 0*map.ysep, map.ypos + 0.1*map.height + 0*map.ysep), new=TRUE) + mtext(paste0("r= ",sp.cor4), cex=3.5) + } + } # close if on composition == 'fre' + + # save indicators for each startdate and forecast time: + # (fre1, fre2, etc. already refer to the regimes in the same order as listed in the 'orden' vector) + if(fields.name == forecast.name) { + if (composition != "impact") { + save(orden, fre1.NA, fre2.NA, fre3.NA, fre4.NA, freObs1, freObs2, freObs3, freObs4, sp.cor1, sp.cor2, sp.cor3, sp.cor4, persist1, persist2, persist3, persist4, persistObs1, persistObs2, persistObs3, persistObs4, spat.cor1, spat.cor2, spat.cor3, spat.cor4, sd.freq.sim1, sd.freq.sim2, sd.freq.sim3, sd.freq.sim4, sd.freq.obs1, sd.freq.obs2, sd.freq.obs3, sd.freq.obs4, transSim1, transSim2, transSim3, transSim4, transObs1, transObs2, transObs3, transObs4, file=paste0(work.dir,"/",fields.name,"_", my.period[p],"_leadmonth_",lead.month,"_","ClusterNames",".RData")) + } else { + save(orden, fre1.NA, fre2.NA, fre3.NA, fre4.NA, freObs1, freObs2, freObs3, freObs4, sp.cor1, sp.cor2, sp.cor3, sp.cor4, persist1, persist2, persist3, persist4, persistObs1, persistObs2, persistObs3, persistObs4, spat.cor1, spat.cor2, spat.cor3, spat.cor4, sd.freq.sim1, sd.freq.sim2, sd.freq.sim3, sd.freq.sim4, sd.freq.obs1, sd.freq.obs2, sd.freq.obs3, sd.freq.obs4, transSim1, transSim2, transSim3, transSim4, transObs1, transObs2, transObs3, transObs4, imp1, imp2, imp3, imp4, file=paste0(work.dir,"/",fields.name,"_", my.period[p],"_leadmonth_",lead.month,"_","ClusterNames",".RData")) + } + } + + print(paste("p =",p,"lead.month =", lead.month)) # spat.cor1,spat.cor2,spat.cor3,spat.cor4)) + } # close 'p' on 'period' + + if(as.pdf) dev.off() # for saving a single pdf with all months/seasons + + } # close 'lead.month' on 'lead.months' + + if(composition == "psl" || composition == "fre"){ + # Regime's names: + title1.xpos <- 0.01 + title1.width <- 0.04 + par(fig=c(title1.xpos, title1.xpos + title1.width, 0.7905, 0.7955), new=TRUE) + mtext(paste0(regime1.name), font=2, cex=5) + par(fig=c(title1.xpos, title1.xpos + title1.width, 0.5855, 0.5905), new=TRUE) + mtext(paste0(regime2.name), font=2, cex=5) + par(fig=c(title1.xpos, title1.xpos + title1.width, 0.3805, 0.3955), new=TRUE) + mtext(paste0(regime3.name), font=2, cex=5) + par(fig=c(title1.xpos, title1.xpos + title1.width, 0.1755, 0.1805), new=TRUE) + mtext(paste0(regime4.name), font=2, cex=5) + + if(composition == "psl") { + # Color legend: + legend1.xpos <- 0.10 + legend1.width <- 0.80 + legend1.cex <- 4 + + par(fig=c(legend1.xpos, legend1.xpos + legend1.width, 0, 0.065), new=TRUE) # syntax fig=c(xmin, xmax, ymin, ymax) + ColorBar2(brks=my.brks, cols=my.cols, vert=FALSE, cex=legend1.cex, label.dist=2, my.ticks=-0.5 + 1:21, my.labels=my.brks) + par(fig=c(legend1.xpos + legend1.width - 0.02, legend1.xpos + legend1.width + 0.05, 0, 0.02), new=TRUE) # syntax fig=c(xmin, xmax, ymin, ymax) + mtext("hPa", cex=legend1.cex*1.3) + } + + dev.off() + } # close if on composition + +} # close if on composition != "summary" + +# Summary graphs: +if(composition == "summary" && fields.name == forecast.name){ + # array storing correlations in the format: [startdate, leadmonth, regime] + array.diff.sd.freq <- array.sd.freq <- array.cor <- array.sp.cor <- array.freq <- array.diff.freq <- array.rpss <- array.pers <- array.diff.pers <- array(NA,c(12,7,4)) + array.sp.cor <- array.trans.sim1 <- array.trans.sim2 <- array.trans.sim3 <- array.trans.sim4 <- array(NA,c(12,7,4)) + array.trans.diff1 <- array.trans.diff2 <- array.trans.diff3 <- array.trans.diff4 <- array(NA,c(12,7,4)) + + for(p in 1:12){ + p.orig <- p + + for(l in 0:6){ + + load(file=paste0(work.dir,"/",fields.name,"_",my.period[p],"_leadmonth_",l,"_","ClusterNames",".RData")) # load cluster names and summarized data + + if(var.num != var.num.orig) var.num <- var.num.orig # ripristinate original values of this script (necessary after loading regime data) + if(fields.name != fields.name.orig) fields.name <- fields.name.orig # ripristinate original values of this script + if(p != p.orig) p <- p.orig + + array.sp.cor[p,1+l, 1] <- spat.cor1 # associates NAO+ to the first triangle (at left) + array.sp.cor[p,1+l, 3] <- spat.cor2 # associates NAO- to the third triangle (at right) + array.sp.cor[p,1+l, 4] <- spat.cor3 # associates Blocking to the fourth triangle (top one) + array.sp.cor[p,1+l, 2] <- spat.cor4 # associates Atl.Ridge to the second triangle (bottom one) + + array.cor[p,1+l, 1] <- sp.cor1 # associates NAO+ to the first triangle (at left) + array.cor[p,1+l, 3] <- sp.cor2 # associates NAO- to the third triangle (at right) + array.cor[p,1+l, 4] <- sp.cor3 # associates Blocking to the fourth triangle (top one) + array.cor[p,1+l, 2] <- sp.cor4 # associates Atl.Ridge to the second triangle (bottom one) + + array.freq[p,1+l, 1] <- 100*(mean(fre1.NA)) # NAO+ + array.freq[p,1+l, 3] <- 100*(mean(fre2.NA)) # NAO- + array.freq[p,1+l, 4] <- 100*(mean(fre3.NA)) # Blocking + array.freq[p,1+l, 2] <- 100*(mean(fre4.NA)) # Atl.Ridge + + array.diff.freq[p,1+l, 1] <- 100*(mean(fre1.NA) - mean(freObs1)) # NAO+ + array.diff.freq[p,1+l, 3] <- 100*(mean(fre2.NA) - mean(freObs2)) # NAO- + array.diff.freq[p,1+l, 4] <- 100*(mean(fre3.NA) - mean(freObs3)) # Blocking + array.diff.freq[p,1+l, 2] <- 100*(mean(fre4.NA) - mean(freObs4)) # Atl.Ridge + + array.pers[p,1+l, 1] <- persist1 # NAO+ + array.pers[p,1+l, 3] <- persist2 # NAO- + array.pers[p,1+l, 4] <- persist3 # Blocking + array.pers[p,1+l, 2] <- persist4 # Atl.Ridge + + array.diff.pers[p,1+l, 1] <- persist1 - persistObs1 # NAO+ + array.diff.pers[p,1+l, 3] <- persist2 - persistObs2 # NAO- + array.diff.pers[p,1+l, 4] <- persist3 - persistObs3 # Blocking + array.diff.pers[p,1+l, 2] <- persist4 - persistObs4 # Atl.Ridge + + array.pers[p,1+l, 1] <- persist1 # NAO+ + array.pers[p,1+l, 3] <- persist2 # NAO- + array.pers[p,1+l, 4] <- persist3 # Blocking + array.pers[p,1+l, 2] <- persist4 # Atl.Ridge + + array.sd.freq[p,1+l, 1] <- sd.freq.sim1 # NAO+ + array.sd.freq[p,1+l, 3] <- sd.freq.sim2 # NAO- + array.sd.freq[p,1+l, 4] <- sd.freq.sim3 # Blocking + array.sd.freq[p,1+l, 2] <- sd.freq.sim4 # Atl.Ridge + + array.diff.sd.freq[p,1+l, 1] <- sd.freq.sim1 / sd.freq.obs1 # NAO+ + array.diff.sd.freq[p,1+l, 3] <- sd.freq.sim2 / sd.freq.obs2 # NAO- + array.diff.sd.freq[p,1+l, 4] <- sd.freq.sim3 / sd.freq.obs3 # Blocking + array.diff.sd.freq[p,1+l, 2] <- sd.freq.sim4 / sd.freq.obs4 # Atl.Ridge + + array.trans.sim1[p,1+l, 1] <- NA # Transition from NAO+ to NAO+ + array.trans.sim1[p,1+l, 3] <- 100*transSim1[2] # Transition from NAO+ to NAO- + array.trans.sim1[p,1+l, 4] <- 100*transSim1[3] # Transition from NAO+ to blocking + array.trans.sim1[p,1+l, 2] <- 100*transSim1[4] # Transition from NAO+ to Atl.Ridge + + array.trans.sim2[p,1+l, 1] <- 100*transSim2[1] # Transition from NAO- to NAO+ + array.trans.sim2[p,1+l, 3] <- NA # Transition from NAO- to NAO- + array.trans.sim2[p,1+l, 4] <- 100*transSim2[3] # Transition from NAO- to blocking + array.trans.sim2[p,1+l, 2] <- 100*transSim2[4] # Transition from NAO- to Atl.Ridge + + array.trans.sim3[p,1+l, 1] <- 100*transSim3[1] # Transition from blocking to NAO+ + array.trans.sim3[p,1+l, 3] <- 100*transSim3[2] # Transition from blocking to NAO- + array.trans.sim3[p,1+l, 4] <- NA # Transition from blocking to blocking + array.trans.sim3[p,1+l, 2] <- 100*transSim3[4] # Transition from blocking to Atl.Ridge + + array.trans.sim4[p,1+l, 1] <- 100*transSim4[1] # Transition from Atl.ridge to NAO+ + array.trans.sim4[p,1+l, 3] <- 100*transSim4[2] # Transition from Atl.ridge to NAO- + array.trans.sim4[p,1+l, 4] <- 100*transSim4[3] # Transition from Atl.ridge to blocking + array.trans.sim4[p,1+l, 2] <- NA # Transition from Atl.ridge to Atl.Ridge + + array.trans.diff1[p,1+l, 1] <- NA # Transition from NAO+ to NAO+ + array.trans.diff1[p,1+l, 3] <- 100*(transSim1[2] - transObs1[2]) # Transition from NAO+ to NAO- + array.trans.diff1[p,1+l, 4] <- 100*(transSim1[3] - transObs1[3]) # Transition from NAO+ to blocking + array.trans.diff1[p,1+l, 2] <- 100*(transSim1[4] - transObs1[4]) # Transition from NAO+ to Atl.Ridge + + array.trans.diff2[p,1+l, 1] <- 100*(transSim2[1] - transObs2[1]) # Transition from NAO+ to NAO+ + array.trans.diff2[p,1+l, 3] <- NA + array.trans.diff2[p,1+l, 4] <- 100*(transSim2[3] - transObs2[3]) # Transition from NAO+ to blocking + array.trans.diff2[p,1+l, 2] <- 100*(transSim2[4] - transObs2[4]) # Transition from NAO+ to Atl.Ridge + + array.trans.diff3[p,1+l, 1] <- 100*(transSim3[1] - transObs3[1]) # Transition from NAO+ to NAO+ + array.trans.diff3[p,1+l, 3] <- 100*(transSim3[2] - transObs3[2]) # Transition from NAO+ to NAO- + array.trans.diff3[p,1+l, 4] <- NA + array.trans.diff3[p,1+l, 2] <- 100*(transSim3[4] - transObs3[4]) # Transition from NAO+ to Atl.Ridge + + array.trans.diff4[p,1+l, 1] <- 100*(transSim4[1] - transObs4[1]) # Transition from NAO+ to NAO+ + array.trans.diff4[p,1+l, 3] <- 100*(transSim4[2] - transObs4[2]) # Transition from NAO+ to NAO- + array.trans.diff4[p,1+l, 4] <- 100*(transSim4[3] - transObs4[3]) # Transition from NAO+ to blocking + array.trans.diff4[p,1+l, 2] <- NA + + #array.rpss[p,1+l, 1] <- # NAO+ + #array.rpss[p,1+l, 3] <- # NAO- + #array.rpss[p,1+l, 4] <- # Blocking + #array.rpss[p,1+l, 2] <- # Atl.Ridge + } + } + + # Spatial Corr summary: + png(filename=paste0(work.dir,"/",forecast.name,"_summary_sp_corr.png"), width=550, height=400) + + # create an array similar to array.cor but with colors instead of corr.values: + sp.corr.cols <- rev(c('#b2182b','#d6604d','#f4a582','#fddbc7','#d1e5f0','#92c5de','#4393c3','#2166ac')) + my.seq <- c(-1,0,0.4,0.5,0.6,0.7,0.8,0.9,1) + + array.sp.cor.colors <- array(sp.corr.cols[8],c(12,7,4)) + array.sp.cor.colors[array.sp.cor < my.seq[8]] <- sp.corr.cols[7] + array.sp.cor.colors[array.sp.cor < my.seq[7]] <- sp.corr.cols[6] + array.sp.cor.colors[array.sp.cor < my.seq[6]] <- sp.corr.cols[5] + array.sp.cor.colors[array.sp.cor < my.seq[5]] <- sp.corr.cols[4] + array.sp.cor.colors[array.sp.cor < my.seq[4]] <- sp.corr.cols[3] + array.sp.cor.colors[array.sp.cor < my.seq[3]] <- sp.corr.cols[2] + array.sp.cor.colors[array.sp.cor < my.seq[2]] <- sp.corr.cols[1] + + par(mar=c(5,4,4,4.5)) + plot(1,1, type="n", xaxt="n", yaxt="n", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + title("Spatial correlation between S4 and ERA-Interim regime anomalies", cex=1.5) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + + for(p in 1:12){ + for(l in 0:6){ + polygon(c(0.5+p-1, 0.5+p-1, 1+p-1), c(-0.5+l, 0.5+l, 0+l), col=array.sp.cor.colors[p,1+l,1]) # first triangle + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(-0.5+l, 0+l, -0.5+l), col=array.sp.cor.colors[p,1+l,2]) + polygon(c(1+p-1, 1.5+p-1, 1.5+p-1), c(l, 0.5+l, -0.5+l), col=array.sp.cor.colors[p,1+l,3]) + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(0.5+l, 0+l, 0.5+l), col=array.sp.cor.colors[p,1+l,4]) + } + } + + par(fig=c(0.875, 1, 0.14, 0.89), new=TRUE) + ColorBar2(brks = my.seq, cols = sp.corr.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + dev.off() + + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_sp_corr_4tables.png"), width=750, height=600) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext(text="Spatial correlation between S4 and ERA-Interim regime anomalies", cex=1.5) + + # NAO+ : + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.85, 0.98), new=TRUE) + mtext("NAO+", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.sp.cor.colors[p,1+l,1])}} + + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.42, 0.5), new=TRUE) + mtext("Blocking", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.sp.cor.colors[p,1+l,4])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.85, 0.98), new=TRUE) + mtext("NAO-", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.sp.cor.colors[p,1+l,3])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.42, 0.5), new=TRUE) + mtext("Atlantic Ridge", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.sp.cor.colors[p,1+l,2])}} + + par(fig=c(0.91, 1, 0.1, 0.9), new=TRUE) + ColorBar2(brks = my.seq, cols = sp.corr.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_sp_corr_1table.png"), width=800, height=300) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext(text="Spatial correlation between S4 and ERA-Interim regime anomalies", cex=1.2) + + par(mar=c(0,0,4,0), fig=c(0.07, 0.22, 0.8, 0.98), new=TRUE) + mtext("NAO+", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0, 0.22, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecast month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.sp.cor.colors[i2,1+6-j,1])}} + + par(mar=c(0,0,4,0), fig=c(0.22, 0.44, 0.8, 0.98), new=TRUE) + mtext("NAO-", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.22, 0.44, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecasted month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.sp.cor.colors[i2,1+6-j,3])}} + + par(mar=c(0,0,4,0), fig=c(0.44, 0.66, 0.8, 0.98), new=TRUE) + mtext("Blocking", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.44, 0.66, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecasted month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.sp.cor.colors[i2,1+6-j,4])}} + + par(mar=c(0,0,4,0), fig=c(0.68, 0.88, 0.8, 0.98), new=TRUE) + mtext("Atlantic Ridge", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.66, 0.88, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecasted month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.sp.cor.colors[i2,1+6-j,2])}} + + par(fig=c(0.91, 1, 0.1, 0.8), new=TRUE) + ColorBar2(brks = my.seq, cols = sp.corr.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + + + + + + + + # Corr summary: + png(filename=paste0(work.dir,"/",forecast.name,"_summary_corr.png"), width=550, height=400) + + # create an array similar to array.cor but with colors instead of corr.values: + corr.cols <- rev(c('#b2182b','#d6604d','#f4a582','#fddbc7','#d1e5f0','#92c5de','#4393c3','#2166ac')) + + array.cor.colors <- array(corr.cols[8],c(12,7,4)) + array.cor.colors[array.cor < 0.75] <- corr.cols[7] + array.cor.colors[array.cor < 0.5] <- corr.cols[6] + array.cor.colors[array.cor < 0.25] <- corr.cols[5] + array.cor.colors[array.cor < 0] <- corr.cols[4] + array.cor.colors[array.cor < -0.25] <- corr.cols[3] + array.cor.colors[array.cor < -0.5] <- corr.cols[2] + array.cor.colors[array.cor < -0.75] <- corr.cols[1] + + par(mar=c(5,4,4,4.5)) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Forecast time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + title("Correlation between S4 and ERA-Interim frequencies", cex=1.5) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + + for(p in 1:12){ + for(l in 0:6){ + polygon(c(0.5+p-1,0.5+p-1,1+p-1),c(-0.5+l,0.5+l,0+l), col=array.cor.colors[p,1+l,1]) # first triangle + polygon(c(0.5+p-1,1+p-1,1.5+p-1),c(-0.5+l,0+l,-0.5+l), col=array.cor.colors[p,1+l,2]) + polygon(c(1+p-1,1.5+p-1,1.5+p-1),c(l,0.5+l,-0.5+l), col=array.cor.colors[p,1+l,3]) + polygon(c(0.5+p-1,1+p-1,1.5+p-1),c(0.5+l,0+l,0.5+l), col=array.cor.colors[p,1+l,4]) + } + } + + par(fig=c(0.875, 1, 0.14, 0.89), new=TRUE) + ColorBar2(brks = seq(-1,1,0.25), cols = corr.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(seq(-1,1,0.25)), my.labels=seq(-1,1,0.25)) + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_corr_4tables.png"), width=750, height=600) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext(text="Correlation between S4 and ERA-Interim frequencies", cex=1.5) + + # NAO+ : + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.85, 0.98), new=TRUE) + mtext("NAO+", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.cor.colors[p,1+l,1])}} + + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.42, 0.5), new=TRUE) + mtext("Blocking", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.cor.colors[p,1+l,4])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.85, 0.98), new=TRUE) + mtext("NAO-", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.cor.colors[p,1+l,3])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.42, 0.5), new=TRUE) + mtext("Atlantic Ridge", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.cor.colors[p,1+l,2])}} + + par(fig=c(0.91, 1, 0.1, 0.9), new=TRUE) + ColorBar2(brks = my.seq, cols = corr.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_corr_1table.png"), width=800, height=300) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext(text="Correlation between S4 and ERA-Interim frequencies", cex=1.2) + + par(mar=c(0,0,4,0), fig=c(0.07, 0.22, 0.8, 0.98), new=TRUE) + mtext("NAO+", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0, 0.22, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecast month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.cor.colors[i2,1+6-j,1])}} + + par(mar=c(0,0,4,0), fig=c(0.22, 0.44, 0.8, 0.98), new=TRUE) + mtext("NAO-", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.22, 0.44, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecasted month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.cor.colors[i2,1+6-j,3])}} + + par(mar=c(0,0,4,0), fig=c(0.44, 0.66, 0.8, 0.98), new=TRUE) + mtext("Blocking", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.44, 0.66, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecasted month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.cor.colors[i2,1+6-j,4])}} + + par(mar=c(0,0,4,0), fig=c(0.68, 0.88, 0.8, 0.98), new=TRUE) + mtext("Atlantic Ridge", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.66, 0.88, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecasted month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.cor.colors[i2,1+6-j,2])}} + + par(fig=c(0.91, 1, 0.1, 0.8), new=TRUE) + ColorBar2(brks = my.seq, cols = corr.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + + + + + + # Freq. summary: + png(filename=paste0(work.dir,"/",forecast.name,"_summary_freq.png"), width=550, height=400) + + # create an array similar to array.diff.freq but with colors instead of frequencies: + freq.cols <- rev(c('#d73027','#f46d43','#fdae61','#fee090','#e0f3f8','#abd9e9','#74add1','#4575b4')) + my.seq <- c(NA,20,22,24,26,28,30,32,NA) + + array.freq.colors <- array(freq.cols[8],c(12,7,4)) + array.freq.colors[array.freq < my.seq[[8]]] <- freq.cols[7] + array.freq.colors[array.freq < my.seq[[7]]] <- freq.cols[6] + array.freq.colors[array.freq < my.seq[[6]]] <- freq.cols[5] + array.freq.colors[array.freq < my.seq[[5]]] <- freq.cols[4] + array.freq.colors[array.freq < my.seq[[4]]] <- freq.cols[3] + array.freq.colors[array.freq < my.seq[[3]]] <- freq.cols[2] + array.freq.colors[array.freq < my.seq[[2]]] <- freq.cols[1] + + par(mar=c(5,4,4,4.5)) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Forecast time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + title("Mean S4 frequency (%)", cex=1.5) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + + for(p in 1:12){ + for(l in 0:6){ + polygon(c(0.5+p-1, 0.5+p-1, 1+p-1), c(-0.5+l, 0.5+l, 0+l), col=array.freq.colors[p,1+l,1]) # first triangle + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(-0.5+l, 0+l, -0.5+l), col=array.freq.colors[p,1+l,2]) + polygon(c(1+p-1, 1.5+p-1, 1.5+p-1), c(l, 0.5+l, -0.5+l), col=array.freq.colors[p,1+l,3]) + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(0.5+l, 0+l, 0.5+l), col=array.freq.colors[p,1+l,4]) + } + } + + par(fig=c(0.875, 1, 0.14, 0.89), new=TRUE) + ColorBar2(brks = my.seq, cols = freq.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_freq_4tables.png"), width=750, height=600) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext(text="Mean S4 frequency (%)", cex=1.5) + + # NAO+ : + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.85, 0.98), new=TRUE) + mtext("NAO+", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.freq.colors[p,1+l,1])}} + + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.42, 0.5), new=TRUE) + mtext("Blocking", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.freq.colors[p,1+l,4])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.85, 0.98), new=TRUE) + mtext("NAO-", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.freq.colors[p,1+l,3])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.42, 0.5), new=TRUE) + mtext("Atlantic Ridge", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.freq.colors[p,1+l,2])}} + + par(fig=c(0.91, 1, 0.1, 0.9), new=TRUE) + ColorBar2(brks = my.seq, cols = freq.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_freq_1table.png"), width=800, height=300) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext(text="Mean S4 frequency (%)", cex=1.2) + + par(mar=c(0,0,4,0), fig=c(0.07, 0.22, 0.8, 0.98), new=TRUE) + mtext("NAO+", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0, 0.22, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecast month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.freq.colors[i2,1+6-j,1])}} + + par(mar=c(0,0,4,0), fig=c(0.22, 0.44, 0.8, 0.98), new=TRUE) + mtext("NAO-", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.22, 0.44, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecasted month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.freq.colors[i2,1+6-j,3])}} + + par(mar=c(0,0,4,0), fig=c(0.44, 0.66, 0.8, 0.98), new=TRUE) + mtext("Blocking", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.44, 0.66, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecasted month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.freq.colors[i2,1+6-j,4])}} + + par(mar=c(0,0,4,0), fig=c(0.68, 0.88, 0.8, 0.98), new=TRUE) + mtext("Atlantic Ridge", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.66, 0.88, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecasted month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.freq.colors[i2,1+6-j,2])}} + + par(fig=c(0.91, 1, 0.1, 0.8), new=TRUE) + ColorBar2(brks = my.seq, cols = freq.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + + + + + + + # Delta freq. summary: + png(filename=paste0(work.dir,"/",forecast.name,"_summary_diff_freq.png"), width=550, height=400) + + # create an array similar to array.diff.freq but with colors instead of frequencies: + diff.freq.cols <- rev(c('#d73027','#f46d43','#fdae61','#fee090','#e0f3f8','#abd9e9','#74add1','#4575b4')) + my.seq <- c(-20,-10,-5,-2,0,2,5,10,20) + + array.diff.freq.colors <- array(diff.freq.cols[8],c(12,7,4)) + array.diff.freq.colors[array.diff.freq < my.seq[[8]]] <- diff.freq.cols[7] + array.diff.freq.colors[array.diff.freq 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.diff.freq.colors[i2,1+6-j,1])}} + + par(mar=c(0,0,4,0), fig=c(0.22, 0.44, 0.8, 0.98), new=TRUE) + mtext("NAO-", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.22, 0.44, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecasted month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.diff.freq.colors[i2,1+6-j,3])}} + + par(mar=c(0,0,4,0), fig=c(0.44, 0.66, 0.8, 0.98), new=TRUE) + mtext("Blocking", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.44, 0.66, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecasted month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.diff.freq.colors[i2,1+6-j,4])}} + + par(mar=c(0,0,4,0), fig=c(0.68, 0.88, 0.8, 0.98), new=TRUE) + mtext("Atlantic Ridge", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.66, 0.88, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecasted month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.diff.freq.colors[i2,1+6-j,2])}} + + par(fig=c(0.91, 1, 0.1, 0.8), new=TRUE) + ColorBar2(brks = my.seq, cols = diff.freq.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + + + + + + + + # Persistence summary: + png(filename=paste0(work.dir,"/",forecast.name,"_summary_pers.png"), width=550, height=400) + + # create an array similar to array.pers but with colors instead of frequencies: + pers.cols <- rev(c('#b2182b','#d6604d','#f4a582','#fddbc7','#d1e5f0','#92c5de','#4393c3','#2166ac')) + my.seq <- c(NA, 3.25, 3.5, 3.75, 4, 4.25, 4.5, 4.75, NA) + + array.pers.colors <- array(pers.cols[8],c(12,7,4)) + array.pers.colors[array.pers < my.seq[8]] <- pers.cols[7] + array.pers.colors[array.pers < my.seq[7]] <- pers.cols[6] + array.pers.colors[array.pers < my.seq[6]] <- pers.cols[5] + array.pers.colors[array.pers < my.seq[5]] <- pers.cols[4] + array.pers.colors[array.pers < my.seq[4]] <- pers.cols[3] + array.pers.colors[array.pers < my.seq[3]] <- pers.cols[2] + array.pers.colors[array.pers < my.seq[2]] <- pers.cols[1] + + par(mar=c(5,4,4,4.5)) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Forecast time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + title("S4 Regime persistence (in days/month)", cex=1.5) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + + for(p in 1:12){ + for(l in 0:6){ + polygon(c(0.5+p-1,0.5+p-1,1+p-1),c(-0.5+l,0.5+l,0+l), col=array.pers.colors[p,1+l,1]) # first triangle + polygon(c(0.5+p-1,1+p-1,1.5+p-1),c(-0.5+l,0+l,-0.5+l), col=array.pers.colors[p,1+l,2]) + polygon(c(1+p-1,1.5+p-1,1.5+p-1),c(l,0.5+l,-0.5+l), col=array.pers.colors[p,1+l,3]) + polygon(c(0.5+p-1,1+p-1,1.5+p-1),c(0.5+l,0+l,0.5+l), col=array.pers.colors[p,1+l,4]) + } + } + + par(fig=c(0.875, 1, 0.14, 0.89), new=TRUE) + ColorBar2(brks = my.seq, cols = pers.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_pers_4tables.png"), width=750, height=600) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("S4 Regime persistence (in days/month)", cex=1.5) + + # NAO+ : + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.85, 0.98), new=TRUE) + mtext("NAO+", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.pers.colors[p,1+l,1])}} + + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.42, 0.5), new=TRUE) + mtext("Blocking", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.pers.colors[p,1+l,4])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.85, 0.98), new=TRUE) + mtext("NAO-", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.pers.colors[p,1+l,3])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.42, 0.5), new=TRUE) + mtext("Atlantic Ridge", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.pers.colors[p,1+l,2])}} + + par(fig=c(0.91, 1, 0.1, 0.9), new=TRUE) + ColorBar2(brks = my.seq, cols = pers.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_pers_4tables.png"), width=750, height=600) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("S4 Regime persistence (in days/month)", cex=1.5) + + # NAO+ : + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.85, 0.98), new=TRUE) + mtext("NAO+", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.pers.colors[p,1+l,1])}} + + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.42, 0.5), new=TRUE) + mtext("Blocking", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.pers.colors[p,1+l,4])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.85, 0.98), new=TRUE) + mtext("NAO-", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.pers.colors[p,1+l,3])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.42, 0.5), new=TRUE) + mtext("Atlantic Ridge", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.pers.colors[p,1+l,2])}} + + par(fig=c(0.91, 1, 0.1, 0.9), new=TRUE) + ColorBar2(brks = my.seq, cols = pers.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_pers_1table.png"), width=800, height=300) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("S4 Regime persistence (in days/month)", cex=1.2) + + par(mar=c(0,0,4,0), fig=c(0.07, 0.22, 0.8, 0.98), new=TRUE) + mtext("NAO+", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0, 0.22, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecast month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.pers.colors[i2,1+6-j,1])}} + + par(mar=c(0,0,4,0), fig=c(0.22, 0.44, 0.8, 0.98), new=TRUE) + mtext("NAO-", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.22, 0.44, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecasted month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.pers.colors[i2,1+6-j,3])}} + + par(mar=c(0,0,4,0), fig=c(0.44, 0.66, 0.8, 0.98), new=TRUE) + mtext("Blocking", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.44, 0.66, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecasted month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.pers.colors[i2,1+6-j,4])}} + + par(mar=c(0,0,4,0), fig=c(0.68, 0.88, 0.8, 0.98), new=TRUE) + mtext("Atlantic Ridge", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.66, 0.88, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecasted month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.pers.colors[i2,1+6-j,2])}} + + par(fig=c(0.91, 1, 0.1, 0.8), new=TRUE) + ColorBar2(brks = my.seq, cols = pers.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + + + + + + + + # Delta persistence summary: + png(filename=paste0(work.dir,"/",forecast.name,"_summary_diff_pers.png"), width=550, height=400) + + # create an array similar to array.pers but with colors instead of frequencies: + diff.pers.cols <- rev(c('#b2182b','#d6604d','#f4a582','#fddbc7','#d1e5f0','#92c5de','#4393c3','#2166ac')) + my.seq <- c(NA, -1.5, -1, -0.5, 0, 0.5, 1, 1.5, NA) + + array.diff.pers.colors <- array(diff.pers.cols[8],c(12,7,4)) + array.diff.pers.colors[array.diff.pers < my.seq[8]] <- diff.pers.cols[7] + array.diff.pers.colors[array.diff.pers < my.seq[7]] <- diff.pers.cols[6] + array.diff.pers.colors[array.diff.pers < my.seq[6]] <- diff.pers.cols[5] + array.diff.pers.colors[array.diff.pers < my.seq[5]] <- diff.pers.cols[4] + array.diff.pers.colors[array.diff.pers < my.seq[4]] <- diff.pers.cols[3] + array.diff.pers.colors[array.diff.pers < my.seq[3]] <- diff.pers.cols[2] + array.diff.pers.colors[array.diff.pers < my.seq[2]] <- diff.pers.cols[1] + + par(mar=c(5,4,4,4.5)) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Forecast time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + title("Diff. between S4 and ERA-Interim regime persistence (in days/month)", cex=1.5) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + + for(p in 1:12){ + for(l in 0:6){ + polygon(c(0.5+p-1,0.5+p-1,1+p-1),c(-0.5+l,0.5+l,0+l), col=array.diff.pers.colors[p,1+l,1]) # first triangle + polygon(c(0.5+p-1,1+p-1,1.5+p-1),c(-0.5+l,0+l,-0.5+l), col=array.diff.pers.colors[p,1+l,2]) + polygon(c(1+p-1,1.5+p-1,1.5+p-1),c(l,0.5+l,-0.5+l), col=array.diff.pers.colors[p,1+l,3]) + polygon(c(0.5+p-1,1+p-1,1.5+p-1),c(0.5+l,0+l,0.5+l), col=array.diff.pers.colors[p,1+l,4]) + } + } + + par(fig=c(0.875, 1, 0.14, 0.89), new=TRUE) + ColorBar2(brks = my.seq, cols = diff.pers.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_diff_pers_4tables.png"), width=750, height=600) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("Diff. between S4 and ERA-Interim regime persistence (in days/month)", cex=1.5) + + # NAO+ : + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.85, 0.98), new=TRUE) + mtext("NAO+", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.diff.pers.colors[p,1+l,1])}} + + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.42, 0.5), new=TRUE) + mtext("Blocking", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.diff.pers.colors[p,1+l,4])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.85, 0.98), new=TRUE) + mtext("NAO-", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.diff.pers.colors[p,1+l,3])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.42, 0.5), new=TRUE) + mtext("Atlantic Ridge", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.diff.pers.colors[p,1+l,2])}} + + par(fig=c(0.91, 1, 0.1, 0.9), new=TRUE) + ColorBar2(brks = my.seq, cols = diff.pers.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_diff_pers_1table.png"), width=800, height=300) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("Diff. between S4 and ERA-Interim regime persistence (in days/month)", cex=1.2) + + par(mar=c(0,0,4,0), fig=c(0.07, 0.22, 0.8, 0.98), new=TRUE) + mtext("NAO+", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0, 0.22, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecast month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.diff.pers.colors[i2,1+6-j,1])}} + + par(mar=c(0,0,4,0), fig=c(0.22, 0.44, 0.8, 0.98), new=TRUE) + mtext("NAO-", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.22, 0.44, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecasted month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.diff.pers.colors[i2,1+6-j,3])}} + + par(mar=c(0,0,4,0), fig=c(0.44, 0.66, 0.8, 0.98), new=TRUE) + mtext("Blocking", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.44, 0.66, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecasted month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.diff.pers.colors[i2,1+6-j,4])}} + + par(mar=c(0,0,4,0), fig=c(0.68, 0.88, 0.8, 0.98), new=TRUE) + mtext("Atlantic Ridge", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.66, 0.88, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecasted month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.diff.pers.colors[i2,1+6-j,2])}} + + par(fig=c(0.91, 1, 0.1, 0.8), new=TRUE) + ColorBar2(brks = my.seq, cols = diff.pers.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + + + + + + + + + # St.Dev.freq ratio summary: + png(filename=paste0(work.dir,"/",forecast.name,"_summary_ratio_sd_freq.png"), width=550, height=400) + + # create an array similar to array.cor but with colors instead of corr.values: + diff.sd.freq.cols <- rev(c('#b2182b','#d6604d','#f4a582','#fddbc7','#d1e5f0','#92c5de','#4393c3','#2166ac')) + my.seq <- c(0,0.7,0.8,0.9,1.0,1.1,1.2,1.3,2) + + array.diff.sd.freq.colors <- array(diff.sd.freq.cols[8],c(12,7,4)) + array.diff.sd.freq.colors[array.diff.sd.freq < my.seq[8]] <- diff.sd.freq.cols[7] + array.diff.sd.freq.colors[array.diff.sd.freq < my.seq[7]] <- diff.sd.freq.cols[6] + array.diff.sd.freq.colors[array.diff.sd.freq < my.seq[6]] <- diff.sd.freq.cols[5] + array.diff.sd.freq.colors[array.diff.sd.freq < my.seq[5]] <- diff.sd.freq.cols[4] + array.diff.sd.freq.colors[array.diff.sd.freq < my.seq[4]] <- diff.sd.freq.cols[3] + array.diff.sd.freq.colors[array.diff.sd.freq < my.seq[3]] <- diff.sd.freq.cols[2] + array.diff.sd.freq.colors[array.diff.sd.freq < my.seq[2]] <- diff.sd.freq.cols[1] + + par(mar=c(5,4,4,4.5)) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Forecast time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + title("St.Dev. ratio between sim. and obs. average frequencies", cex=1.5) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + + for(p in 1:12){ + for(l in 0:6){ + polygon(c(0.5+p-1, 0.5+p-1, 1+p-1), c(-0.5+l, 0.5+l, 0+l), col=array.diff.sd.freq.colors[p,1+l,1]) # first triangle + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(-0.5+l, 0+l, -0.5+l), col=array.diff.sd.freq.colors[p,1+l,2]) + polygon(c(1+p-1, 1.5+p-1, 1.5+p-1), c(l, 0.5+l, -0.5+l), col=array.diff.sd.freq.colors[p,1+l,3]) + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(0.5+l, 0+l, 0.5+l), col=array.diff.sd.freq.colors[p,1+l,4]) + } + } + + par(fig=c(0.875, 1, 0.14, 0.89), new=TRUE) + ColorBar2(brks = my.seq, cols = diff.sd.freq.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + dev.off() + + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_ratio_sd_freq_4tables.png"), width=750, height=600) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("St.Dev. ratio between sim. and obs. average frequencies", cex=1.5) + + # NAO+ : + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.85, 0.98), new=TRUE) + mtext("NAO+", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.diff.sd.freq.colors[p,1+l,1])}} + + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.42, 0.5), new=TRUE) + mtext("Blocking", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.diff.sd.freq.colors[p,1+l,4])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.85, 0.98), new=TRUE) + mtext("NAO-", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.diff.sd.freq.colors[p,1+l,3])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.42, 0.5), new=TRUE) + mtext("Atlantic Ridge", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.diff.sd.freq.colors[p,1+l,2])}} + + par(fig=c(0.91, 1, 0.1, 0.9), new=TRUE) + ColorBar2(brks = my.seq, cols = diff.sd.freq.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_ratio_sd_freq_1table.png"), width=800, height=300) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("St.Dev. ratio between sim. and obs. average frequencies", cex=1.2) + + par(mar=c(0,0,4,0), fig=c(0.07, 0.22, 0.8, 0.98), new=TRUE) + mtext("NAO+", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0, 0.22, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecast month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.diff.sd.freq.colors[i2,1+6-j,1])}} + + par(mar=c(0,0,4,0), fig=c(0.22, 0.44, 0.8, 0.98), new=TRUE) + mtext("NAO-", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.22, 0.44, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecasted month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.diff.sd.freq.colors[i2,1+6-j,3])}} + + par(mar=c(0,0,4,0), fig=c(0.44, 0.66, 0.8, 0.98), new=TRUE) + mtext("Blocking", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.44, 0.66, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecasted month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.diff.sd.freq.colors[i2,1+6-j,4])}} + + par(mar=c(0,0,4,0), fig=c(0.68, 0.88, 0.8, 0.98), new=TRUE) + mtext("Atlantic Ridge", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.66, 0.88, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecasted month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.diff.sd.freq.colors[i2,1+6-j,2])}} + + par(fig=c(0.91, 1, 0.1, 0.8), new=TRUE) + ColorBar2(brks = my.seq, cols = diff.sd.freq.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + + + + + + + + + + + # Transition probability from NAO+ to X summary: + png(filename=paste0(work.dir,"/",forecast.name,"_summary_trans_NAO+.png"), width=550, height=400) + + # create an array similar to array.cor but with colors instead of corr.values: + trans.cols <- rev(c('#b2182b','#d6604d','#f4a582','#fddbc7','#d1e5f0','#92c5de','#4393c3','#2166ac')) + my.seq <- c(0,10,20,30,40,50,60,70,100) + + array.trans.sim1.colors <- array(trans.cols[8],c(12,7,4)) + array.trans.sim1.colors[array.trans.sim1 < my.seq[8]] <- trans.cols[7] + array.trans.sim1.colors[array.trans.sim1 < my.seq[7]] <- trans.cols[6] + array.trans.sim1.colors[array.trans.sim1 < my.seq[6]] <- trans.cols[5] + array.trans.sim1.colors[array.trans.sim1 < my.seq[5]] <- trans.cols[4] + array.trans.sim1.colors[array.trans.sim1 < my.seq[4]] <- trans.cols[3] + array.trans.sim1.colors[array.trans.sim1 < my.seq[3]] <- trans.cols[2] + array.trans.sim1.colors[array.trans.sim1 < my.seq[2]] <- trans.cols[1] + array.trans.sim1.colors[is.na(array.trans.sim1)] <- "white" + + par(mar=c(5,4,4,4.5)) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Forecast time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + title("% Transition from NAO+ regime", cex=1.5) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + + for(p in 1:12){ + for(l in 0:6){ + polygon(c(0.5+p-1, 0.5+p-1, 1+p-1), c(-0.5+l, 0.5+l, 0+l), col=array.trans.sim1.colors[p,1+l,1]) # first triangle + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(-0.5+l, 0+l, -0.5+l), col=array.trans.sim1.colors[p,1+l,2]) + polygon(c(1+p-1, 1.5+p-1, 1.5+p-1), c(l, 0.5+l, -0.5+l), col=array.trans.sim1.colors[p,1+l,3]) + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(0.5+l, 0+l, 0.5+l), col=array.trans.sim1.colors[p,1+l,4]) + } + } + + par(fig=c(0.875, 1, 0.14, 0.89), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_trans_NAO+_4tables.png"), width=750, height=600) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("% Transition from NAO+ regime", cex=1.5) + + # NAO+ : + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.85, 0.98), new=TRUE) + mtext("NAO+", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.sim1.colors[p,1+l,1])}} + + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.42, 0.5), new=TRUE) + mtext("Blocking", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.sim1.colors[p,1+l,4])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.85, 0.98), new=TRUE) + mtext("NAO-", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.sim1.colors[p,1+l,3])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.42, 0.5), new=TRUE) + mtext("Atlantic Ridge", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.sim1.colors[p,1+l,2])}} + + par(fig=c(0.91, 1, 0.1, 0.9), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_trans_NAO+_1table.png"), width=800, height=300) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("% Transition from NAO+ regime", cex=1.2) + + par(mar=c(0,0,4,0), fig=c(0.07, 0.22, 0.8, 0.98), new=TRUE) + mtext("NAO+", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0, 0.22, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecast month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.sim1.colors[i2,1+6-j,1])}} + + par(mar=c(0,0,4,0), fig=c(0.22, 0.44, 0.8, 0.98), new=TRUE) + mtext("NAO-", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.22, 0.44, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecasted month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.sim1.colors[i2,1+6-j,3])}} + + par(mar=c(0,0,4,0), fig=c(0.44, 0.66, 0.8, 0.98), new=TRUE) + mtext("Blocking", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.44, 0.66, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecasted month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.sim1.colors[i2,1+6-j,4])}} + + par(mar=c(0,0,4,0), fig=c(0.68, 0.88, 0.8, 0.98), new=TRUE) + mtext("Atlantic Ridge", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.66, 0.88, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecasted month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.sim1.colors[i2,1+6-j,2])}} + + par(fig=c(0.91, 1, 0.1, 0.8), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_trans_NAO+_1table.png"), width=600, height=300) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("% Transition from NAO+ regime", cex=1.2) + + par(mar=c(0,0,4,0), fig=c(0.10, 0.28, 0.8, 0.98), new=TRUE) + mtext("NAO-", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0, 0.28, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecasted month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.sim1.colors[i2,1+6-j,3])}} + + par(mar=c(0,0,4,0), fig=c(0.28, 0.56, 0.8, 0.98), new=TRUE) + mtext("Blocking", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.28, 0.56, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecasted month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.sim1.colors[i2,1+6-j,4])}} + + par(mar=c(0,0,4,0), fig=c(0.56, 0.84, 0.8, 0.98), new=TRUE) + mtext("Atlantic Ridge", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.56, 0.84, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecasted month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.sim1.colors[i2,1+6-j,2])}} + + par(fig=c(0.88, 1, 0.1, 0.8), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + + + + + + + + # Transition probability from NAO- to X summary: + png(filename=paste0(work.dir,"/",forecast.name,"_summary_trans_NAO-.png"), width=550, height=400) + + # create an array similar to array.cor but with colors instead of corr.values: + trans.cols <- rev(c('#b2182b','#d6604d','#f4a582','#fddbc7','#d1e5f0','#92c5de','#4393c3','#2166ac')) + my.seq <- c(0,10,20,30,40,50,60,70,100) + + array.trans.sim2.colors <- array(trans.cols[8],c(12,7,4)) + array.trans.sim2.colors[array.trans.sim2 < my.seq[8]] <- trans.cols[7] + array.trans.sim2.colors[array.trans.sim2 < my.seq[7]] <- trans.cols[6] + array.trans.sim2.colors[array.trans.sim2 < my.seq[6]] <- trans.cols[5] + array.trans.sim2.colors[array.trans.sim2 < my.seq[5]] <- trans.cols[4] + array.trans.sim2.colors[array.trans.sim2 < my.seq[4]] <- trans.cols[3] + array.trans.sim2.colors[array.trans.sim2 < my.seq[3]] <- trans.cols[2] + array.trans.sim2.colors[array.trans.sim2 < my.seq[2]] <- trans.cols[1] + array.trans.sim2.colors[is.na(array.trans.sim2)] <- "white" + + par(mar=c(5,4,4,4.5)) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Forecast time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + title("% Transition from NAO- regime ", cex=1.5) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + + for(p in 1:12){ + for(l in 0:6){ + polygon(c(0.5+p-1, 0.5+p-1, 1+p-1), c(-0.5+l, 0.5+l, 0+l), col=array.trans.sim2.colors[p,1+l,1]) # first triangle + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(-0.5+l, 0+l, -0.5+l), col=array.trans.sim2.colors[p,1+l,2]) + polygon(c(1+p-1, 1.5+p-1, 1.5+p-1), c(l, 0.5+l, -0.5+l), col=array.trans.sim2.colors[p,1+l,3]) + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(0.5+l, 0+l, 0.5+l), col=array.trans.sim2.colors[p,1+l,4]) + } + } + + par(fig=c(0.875, 1, 0.14, 0.89), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_trans_NAO-_4tables.png"), width=750, height=600) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("% Transition from NAO- regime", cex=1.5) + + # NAO+ : + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.85, 0.98), new=TRUE) + mtext("NAO+", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.sim2.colors[p,1+l,1])}} + + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.42, 0.5), new=TRUE) + mtext("Blocking", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.sim2.colors[p,1+l,4])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.85, 0.98), new=TRUE) + mtext("NAO-", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.sim2.colors[p,1+l,3])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.42, 0.5), new=TRUE) + mtext("Atlantic Ridge", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.sim2.colors[p,1+l,2])}} + + par(fig=c(0.91, 1, 0.1, 0.9), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_trans_NAO-_1table.png"), width=600, height=300) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("% Transition from NAO- regime", cex=1.2) + + par(mar=c(0,0,4,0), fig=c(0.1, 0.28, 0.8, 0.98), new=TRUE) + mtext("NAO+", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0, 0.28, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecast month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.sim2.colors[i2,1+6-j,1])}} + + par(mar=c(0,0,4,0), fig=c(0.28, 0.56, 0.8, 0.98), new=TRUE) + mtext("Blocking", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.28, 0.56, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecasted month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.sim2.colors[i2,1+6-j,4])}} + + par(mar=c(0,0,4,0), fig=c(0.56, 0.84, 0.8, 0.98), new=TRUE) + mtext("Atlantic Ridge", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.56, 0.84, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecasted month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.sim2.colors[i2,1+6-j,2])}} + + par(fig=c(0.88, 1, 0.1, 0.8), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + + + + + + + + # Transition probability from blocking to X summary: + png(filename=paste0(work.dir,"/",forecast.name,"_summary_trans_blocking.png"), width=550, height=400) + + # create an array similar to array.cor but with colors instead of corr.values: + trans.cols <- rev(c('#b2182b','#d6604d','#f4a582','#fddbc7','#d1e5f0','#92c5de','#4393c3','#2166ac')) + my.seq <- c(0,10,20,30,40,50,60,70,100) + + array.trans.sim3.colors <- array(trans.cols[8],c(12,7,4)) + array.trans.sim3.colors[array.trans.sim3 < my.seq[8]] <- trans.cols[7] + array.trans.sim3.colors[array.trans.sim3 < my.seq[7]] <- trans.cols[6] + array.trans.sim3.colors[array.trans.sim3 < my.seq[6]] <- trans.cols[5] + array.trans.sim3.colors[array.trans.sim3 < my.seq[5]] <- trans.cols[4] + array.trans.sim3.colors[array.trans.sim3 < my.seq[4]] <- trans.cols[3] + array.trans.sim3.colors[array.trans.sim3 < my.seq[3]] <- trans.cols[2] + array.trans.sim3.colors[array.trans.sim3 < my.seq[2]] <- trans.cols[1] + array.trans.sim3.colors[is.na(array.trans.sim3)] <- "white" + + par(mar=c(5,4,4,4.5)) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Forecast time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + title("% Transition from blocking regime", cex=1.5) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + + for(p in 1:12){ + for(l in 0:6){ + polygon(c(0.5+p-1, 0.5+p-1, 1+p-1), c(-0.5+l, 0.5+l, 0+l), col=array.trans.sim3.colors[p,1+l,1]) # first triangle + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(-0.5+l, 0+l, -0.5+l), col=array.trans.sim3.colors[p,1+l,2]) + polygon(c(1+p-1, 1.5+p-1, 1.5+p-1), c(l, 0.5+l, -0.5+l), col=array.trans.sim3.colors[p,1+l,3]) + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(0.5+l, 0+l, 0.5+l), col=array.trans.sim3.colors[p,1+l,4]) + } + } + + par(fig=c(0.875, 1, 0.14, 0.89), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_trans_blocking_4tables.png"), width=750, height=600) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("% Transition from blocking regime", cex=1.5) + + # NAO+ : + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.85, 0.98), new=TRUE) + mtext("NAO+", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.sim3.colors[p,1+l,1])}} + + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.42, 0.5), new=TRUE) + mtext("Blocking", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.sim3.colors[p,1+l,4])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.85, 0.98), new=TRUE) + mtext("NAO-", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.sim3.colors[p,1+l,3])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.42, 0.5), new=TRUE) + mtext("Atlantic Ridge", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.sim3.colors[p,1+l,2])}} + + par(fig=c(0.91, 1, 0.1, 0.9), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_trans_blocking_1table.png"), width=600, height=300) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("% Transition from blocking regime", cex=1.2) + + par(mar=c(0,0,4,0), fig=c(0.10, 0.28, 0.8, 0.98), new=TRUE) + mtext("NAO+", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0, 0.28, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecast month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.sim3.colors[i2,1+6-j,1])}} + + par(mar=c(0,0,4,0), fig=c(0.28, 0.56, 0.8, 0.98), new=TRUE) + mtext("NAO-", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.28, 0.56, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecasted month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.sim3.colors[i2,1+6-j,3])}} + + par(mar=c(0,0,4,0), fig=c(0.56, 0.84, 0.8, 0.98), new=TRUE) + mtext("Atlantic Ridge", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.56, 0.84, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecasted month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.sim3.colors[i2,1+6-j,2])}} + + par(fig=c(0.88, 1, 0.1, 0.8), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + + + + + + + + + + + # Transition probability from Atlantic ridge to X summary: + png(filename=paste0(work.dir,"/",forecast.name,"_summary_trans_Atlantic_ridge.png"), width=550, height=400) + + # create an array similar to array.cor but with colors instead of corr.values: + trans.cols <- rev(c('#b2182b','#d6604d','#f4a582','#fddbc7','#d1e5f0','#92c5de','#4393c3','#2166ac')) + my.seq <- c(0,10,20,30,40,50,60,70,100) + + array.trans.sim4.colors <- array(trans.cols[8],c(12,7,4)) + array.trans.sim4.colors[array.trans.sim4 < my.seq[8]] <- trans.cols[7] + array.trans.sim4.colors[array.trans.sim4 < my.seq[7]] <- trans.cols[6] + array.trans.sim4.colors[array.trans.sim4 < my.seq[6]] <- trans.cols[5] + array.trans.sim4.colors[array.trans.sim4 < my.seq[5]] <- trans.cols[4] + array.trans.sim4.colors[array.trans.sim4 < my.seq[4]] <- trans.cols[3] + array.trans.sim4.colors[array.trans.sim4 < my.seq[3]] <- trans.cols[2] + array.trans.sim4.colors[array.trans.sim4 < my.seq[2]] <- trans.cols[1] + array.trans.sim4.colors[is.na(array.trans.sim4)] <- "white" + + par(mar=c(5,4,4,4.5)) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Forecast time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + title("% Transition from Atlantic ridge regime ", cex=1.5) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + + for(p in 1:12){ + for(l in 0:6){ + polygon(c(0.5+p-1, 0.5+p-1, 1+p-1), c(-0.5+l, 0.5+l, 0+l), col=array.trans.sim4.colors[p,1+l,1]) # first triangle + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(-0.5+l, 0+l, -0.5+l), col=array.trans.sim4.colors[p,1+l,2]) + polygon(c(1+p-1, 1.5+p-1, 1.5+p-1), c(l, 0.5+l, -0.5+l), col=array.trans.sim4.colors[p,1+l,3]) + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(0.5+l, 0+l, 0.5+l), col=array.trans.sim4.colors[p,1+l,4]) + } + } + + par(fig=c(0.875, 1, 0.14, 0.89), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_trans_Atlantic_ridge_4tables.png"), width=750, height=600) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("% Transition from Atlantic Ridge regime", cex=1.5) + + # NAO+ : + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.85, 0.98), new=TRUE) + mtext("NAO+", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.sim4.colors[p,1+l,1])}} + + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.42, 0.5), new=TRUE) + mtext("Blocking", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.sim4.colors[p,1+l,4])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.85, 0.98), new=TRUE) + mtext("NAO-", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.sim4.colors[p,1+l,3])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.42, 0.5), new=TRUE) + mtext("Atlantic Ridge", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.sim4.colors[p,1+l,2])}} + + par(fig=c(0.91, 1, 0.1, 0.9), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_trans_Atlantic_ridge_1table.png"), width=600, height=300) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("% Transition from Atlantic Ridge regime", cex=1.2) + + par(mar=c(0,0,4,0), fig=c(0.1, 0.28, 0.8, 0.98), new=TRUE) + mtext("NAO+", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0, 0.28, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecast month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.sim4.colors[i2,1+6-j,1])}} + + par(mar=c(0,0,4,0), fig=c(0.28, 0.56, 0.8, 0.98), new=TRUE) + mtext("NAO-", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.28, 0.56, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecasted month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.sim4.colors[i2,1+6-j,3])}} + + par(mar=c(0,0,4,0), fig=c(0.56, 0.84, 0.8, 0.98), new=TRUE) + mtext("Blocking", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.56, 0.84, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecasted month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.sim4.colors[i2,1+6-j,4])}} + + par(fig=c(0.88, 1, 0.1, 0.8), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + + + + + + + + # Bias of the Transition probability from NAO+ to X summary: + png(filename=paste0(work.dir,"/",forecast.name,"_summary_bias_trans_NAO+.png"), width=550, height=400) + + # create an array similar to array.cor but with colors instead of corr.values: + trans.cols <- rev(c('#b2182b','#d6604d','#f4a582','#fddbc7','#d1e5f0','#92c5de','#4393c3','#2166ac')) + my.seq <- c(-20,-10,-5,-2,0,2,5,10,20) + + array.trans.diff1.colors <- array(trans.cols[8],c(12,7,4)) + array.trans.diff1.colors[array.trans.diff1 < my.seq[8]] <- trans.cols[7] + array.trans.diff1.colors[array.trans.diff1 < my.seq[7]] <- trans.cols[6] + array.trans.diff1.colors[array.trans.diff1 < my.seq[6]] <- trans.cols[5] + array.trans.diff1.colors[array.trans.diff1 < my.seq[5]] <- trans.cols[4] + array.trans.diff1.colors[array.trans.diff1 < my.seq[4]] <- trans.cols[3] + array.trans.diff1.colors[array.trans.diff1 < my.seq[3]] <- trans.cols[2] + array.trans.diff1.colors[array.trans.diff1 < my.seq[2]] <- trans.cols[1] + array.trans.diff1.colors[is.na(array.trans.diff1)] <- "white" + + par(mar=c(5,4,4,4.5)) + plot(1,1, type="n", xaxt="n", yaxt="n", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + title("% Bias transition from NAO+ regime", cex=1.5) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + + for(p in 1:12){ + for(l in 0:6){ + polygon(c(0.5+p-1, 0.5+p-1, 1+p-1), c(-0.5+l, 0.5+l, 0+l), col=array.trans.diff1.colors[p,1+l,1]) # first triangle + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(-0.5+l, 0+l, -0.5+l), col=array.trans.diff1.colors[p,1+l,2]) + polygon(c(1+p-1, 1.5+p-1, 1.5+p-1), c(l, 0.5+l, -0.5+l), col=array.trans.diff1.colors[p,1+l,3]) + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(0.5+l, 0+l, 0.5+l), col=array.trans.diff1.colors[p,1+l,4]) + } + } + + par(fig=c(0.875, 1, 0.14, 0.89), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_bias_trans_NAO+_4tables.png"), width=750, height=600) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("% Bias transition from NAO+ regime", cex=1.5) + + # NAO+ : + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.85, 0.98), new=TRUE) + mtext("NAO+", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.diff1.colors[p,1+l,1])}} + + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.42, 0.5), new=TRUE) + mtext("Blocking", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.diff1.colors[p,1+l,4])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.85, 0.98), new=TRUE) + mtext("NAO-", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.diff1.colors[p,1+l,3])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.42, 0.5), new=TRUE) + mtext("Atlantic Ridge", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.diff1.colors[p,1+l,2])}} + + par(fig=c(0.91, 1, 0.1, 0.9), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_bias_trans_NAO+_1table.png"), width=800, height=300) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("% Bias transition from NAO+ regime", cex=1.2) + + par(mar=c(0,0,4,0), fig=c(0.07, 0.22, 0.8, 0.98), new=TRUE) + mtext("NAO+", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0, 0.22, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecast month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.diff1.colors[i2,1+6-j,1])}} + + par(mar=c(0,0,4,0), fig=c(0.22, 0.44, 0.8, 0.98), new=TRUE) + mtext("NAO-", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.22, 0.44, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecasted month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.diff1.colors[i2,1+6-j,3])}} + + par(mar=c(0,0,4,0), fig=c(0.44, 0.66, 0.8, 0.98), new=TRUE) + mtext("Blocking", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.44, 0.66, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecasted month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.diff1.colors[i2,1+6-j,4])}} + + par(mar=c(0,0,4,0), fig=c(0.68, 0.88, 0.8, 0.98), new=TRUE) + mtext("Atlantic Ridge", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.66, 0.88, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecasted month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.diff1.colors[i2,1+6-j,2])}} + + par(fig=c(0.91, 1, 0.1, 0.8), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + + + + + + + + + + + + + + + # Bias of the Transition probability from NAO- to X summary: + png(filename=paste0(work.dir,"/",forecast.name,"_summary_bias_trans_NAO-.png"), width=550, height=400) + + # create an array similar to array.cor but with colors instead of corr.values: + trans.cols <- rev(c('#b2182b','#d6604d','#f4a582','#fddbc7','#d1e5f0','#92c5de','#4393c3','#2166ac')) + my.seq <- c(-20,-10,-5,-2,0,2,5,10,20) + + array.trans.diff2.colors <- array(trans.cols[8],c(12,7,4)) + array.trans.diff2.colors[array.trans.diff2 < my.seq[8]] <- trans.cols[7] + array.trans.diff2.colors[array.trans.diff2 < my.seq[7]] <- trans.cols[6] + array.trans.diff2.colors[array.trans.diff2 < my.seq[6]] <- trans.cols[5] + array.trans.diff2.colors[array.trans.diff2 < my.seq[5]] <- trans.cols[4] + array.trans.diff2.colors[array.trans.diff2 < my.seq[4]] <- trans.cols[3] + array.trans.diff2.colors[array.trans.diff2 < my.seq[3]] <- trans.cols[2] + array.trans.diff2.colors[array.trans.diff2 < my.seq[2]] <- trans.cols[1] + array.trans.diff2.colors[is.na(array.trans.diff2)] <- "white" + + par(mar=c(5,4,4,4.5)) + plot(1,1, type="n", xaxt="n", yaxt="n", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + title("% Bias transition from NAO- regime", cex=1.5) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + + for(p in 1:12){ + for(l in 0:6){ + polygon(c(0.5+p-1, 0.5+p-1, 1+p-1), c(-0.5+l, 0.5+l, 0+l), col=array.trans.diff2.colors[p,1+l,1]) # first triangle + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(-0.5+l, 0+l, -0.5+l), col=array.trans.diff2.colors[p,1+l,2]) + polygon(c(1+p-1, 1.5+p-1, 1.5+p-1), c(l, 0.5+l, -0.5+l), col=array.trans.diff2.colors[p,1+l,3]) + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(0.5+l, 0+l, 0.5+l), col=array.trans.diff2.colors[p,1+l,4]) + } + } + + par(fig=c(0.875, 1, 0.14, 0.89), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_bias_trans_NAO-_4tables.png"), width=750, height=600) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("% Bias transition from NAO- regime", cex=1.5) + + # NAO+ : + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.85, 0.98), new=TRUE) + mtext("NAO+", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.diff2.colors[p,1+l,1])}} + + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.42, 0.5), new=TRUE) + mtext("Blocking", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.diff2.colors[p,1+l,4])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.85, 0.98), new=TRUE) + mtext("NAO-", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.diff2.colors[p,1+l,3])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.42, 0.5), new=TRUE) + mtext("Atlantic Ridge", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.diff2.colors[p,1+l,2])}} + + par(fig=c(0.91, 1, 0.1, 0.9), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_bias_trans_NAO-_1table.png"), width=800, height=300) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("% Bias transition from NAO- regime", cex=1.2) + + par(mar=c(0,0,4,0), fig=c(0.07, 0.22, 0.8, 0.98), new=TRUE) + mtext("NAO+", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0, 0.22, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecast month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.diff2.colors[i2,1+6-j,1])}} + + par(mar=c(0,0,4,0), fig=c(0.22, 0.44, 0.8, 0.98), new=TRUE) + mtext("NAO-", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.22, 0.44, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecasted month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.diff2.colors[i2,1+6-j,3])}} + + par(mar=c(0,0,4,0), fig=c(0.44, 0.66, 0.8, 0.98), new=TRUE) + mtext("Blocking", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.44, 0.66, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecasted month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.diff2.colors[i2,1+6-j,4])}} + + par(mar=c(0,0,4,0), fig=c(0.68, 0.88, 0.8, 0.98), new=TRUE) + mtext("Atlantic Ridge", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.66, 0.88, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecasted month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.diff2.colors[i2,1+6-j,2])}} + + par(fig=c(0.91, 1, 0.1, 0.8), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + + + + + + + + + + + # Bias of the Transition probability from blocking to X summary: + png(filename=paste0(work.dir,"/",forecast.name,"_summary_bias_trans_blocking.png"), width=550, height=400) + + # create an array similar to array.cor but with colors instead of corr.values: + trans.cols <- rev(c('#b2182b','#d6604d','#f4a582','#fddbc7','#d1e5f0','#92c5de','#4393c3','#2166ac')) + my.seq <- c(-20,-10,-5,-2,0,2,5,10,20) + + array.trans.diff3.colors <- array(trans.cols[8],c(12,7,4)) + array.trans.diff3.colors[array.trans.diff3 < my.seq[8]] <- trans.cols[7] + array.trans.diff3.colors[array.trans.diff3 < my.seq[7]] <- trans.cols[6] + array.trans.diff3.colors[array.trans.diff3 < my.seq[6]] <- trans.cols[5] + array.trans.diff3.colors[array.trans.diff3 < my.seq[5]] <- trans.cols[4] + array.trans.diff3.colors[array.trans.diff3 < my.seq[4]] <- trans.cols[3] + array.trans.diff3.colors[array.trans.diff3 < my.seq[3]] <- trans.cols[2] + array.trans.diff3.colors[array.trans.diff3 < my.seq[2]] <- trans.cols[1] + array.trans.diff3.colors[is.na(array.trans.diff3)] <- "white" + + par(mar=c(5,4,4,4.5)) + plot(1,1, type="n", xaxt="n", yaxt="n", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + title("% Bias transition from blocking regime", cex=1.5) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + + for(p in 1:12){ + for(l in 0:6){ + polygon(c(0.5+p-1, 0.5+p-1, 1+p-1), c(-0.5+l, 0.5+l, 0+l), col=array.trans.diff3.colors[p,1+l,1]) # first triangle + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(-0.5+l, 0+l, -0.5+l), col=array.trans.diff3.colors[p,1+l,2]) + polygon(c(1+p-1, 1.5+p-1, 1.5+p-1), c(l, 0.5+l, -0.5+l), col=array.trans.diff3.colors[p,1+l,3]) + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(0.5+l, 0+l, 0.5+l), col=array.trans.diff3.colors[p,1+l,4]) + } + } + + par(fig=c(0.875, 1, 0.14, 0.89), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_bias_trans_blocking_4tables.png"), width=750, height=600) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("% Bias transition from blocking regime", cex=1.5) + + # NAO+ : + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.85, 0.98), new=TRUE) + mtext("NAO+", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.diff3.colors[p,1+l,1])}} + + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.42, 0.5), new=TRUE) + mtext("Blocking", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.diff3.colors[p,1+l,4])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.85, 0.98), new=TRUE) + mtext("NAO-", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.diff3.colors[p,1+l,3])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.42, 0.5), new=TRUE) + mtext("Atlantic Ridge", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.diff3.colors[p,1+l,2])}} + + par(fig=c(0.91, 1, 0.1, 0.9), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_bias_trans_blocking_1table.png"), width=800, height=300) + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("% Bias transition from Blocking regime", cex=1.2) + + par(mar=c(0,0,4,0), fig=c(0.07, 0.22, 0.8, 0.98), new=TRUE) + mtext("NAO+", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0, 0.22, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecast month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.diff3.colors[i2,1+6-j,1])}} + + par(mar=c(0,0,4,0), fig=c(0.22, 0.44, 0.8, 0.98), new=TRUE) + mtext("NAO-", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.22, 0.44, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecasted month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.diff3.colors[i2,1+6-j,3])}} + + par(mar=c(0,0,4,0), fig=c(0.44, 0.66, 0.8, 0.98), new=TRUE) + mtext("Blocking", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.44, 0.66, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecasted month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.diff3.colors[i2,1+6-j,4])}} + + par(mar=c(0,0,4,0), fig=c(0.68, 0.88, 0.8, 0.98), new=TRUE) + mtext("Atlantic Ridge", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.66, 0.88, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecasted month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.diff3.colors[i2,1+6-j,2])}} + + par(fig=c(0.91, 1, 0.1, 0.8), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + + + + + + + + + + # Bias of the Transition probability from Atlantic Ridge to X summary: + png(filename=paste0(work.dir,"/",forecast.name,"_summary_bias_trans_Atlantic_ridge.png"), width=550, height=400) + + # create an array similar to array.cor but with colors instead of corr.values: + trans.cols <- rev(c('#b2182b','#d6604d','#f4a582','#fddbc7','#d1e5f0','#92c5de','#4393c3','#2166ac')) + my.seq <- c(-20,-10,-5,-2,0,2,5,10,20) + + array.trans.diff4.colors <- array(trans.cols[8],c(12,7,4)) + array.trans.diff4.colors[array.trans.diff4 < my.seq[8]] <- trans.cols[7] + array.trans.diff4.colors[array.trans.diff4 < my.seq[7]] <- trans.cols[6] + array.trans.diff4.colors[array.trans.diff4 < my.seq[6]] <- trans.cols[5] + array.trans.diff4.colors[array.trans.diff4 < my.seq[5]] <- trans.cols[4] + array.trans.diff4.colors[array.trans.diff4 < my.seq[4]] <- trans.cols[3] + array.trans.diff4.colors[array.trans.diff4 < my.seq[3]] <- trans.cols[2] + array.trans.diff4.colors[array.trans.diff4 < my.seq[2]] <- trans.cols[1] + array.trans.diff4.colors[is.na(array.trans.diff4)] <- "white" + + par(mar=c(5,4,4,4.5)) + plot(1,1, type="n", xaxt="n", yaxt="n", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + title("% Bias transition from Atlantic Ridge regime", cex=1.5) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + + for(p in 1:12){ + for(l in 0:6){ + polygon(c(0.5+p-1, 0.5+p-1, 1+p-1), c(-0.5+l, 0.5+l, 0+l), col=array.trans.diff4.colors[p,1+l,1]) # first triangle + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(-0.5+l, 0+l, -0.5+l), col=array.trans.diff4.colors[p,1+l,2]) + polygon(c(1+p-1, 1.5+p-1, 1.5+p-1), c(l, 0.5+l, -0.5+l), col=array.trans.diff4.colors[p,1+l,3]) + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(0.5+l, 0+l, 0.5+l), col=array.trans.diff4.colors[p,1+l,4]) + } + } + + par(fig=c(0.875, 1, 0.14, 0.89), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_bias_trans_Atlantic_ridge_4tables.png"), width=750, height=600) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("% Bias transition from Atlantic_Ridge_regime", cex=1.5) + + # NAO+ : + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.85, 0.98), new=TRUE) + mtext("NAO+", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.diff4.colors[p,1+l,1])}} + + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.42, 0.5), new=TRUE) + mtext("Blocking", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.diff4.colors[p,1+l,4])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.85, 0.98), new=TRUE) + mtext("NAO-", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.diff4.colors[p,1+l,3])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.42, 0.5), new=TRUE) + mtext("Atlantic Ridge", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.diff4.colors[p,1+l,2])}} + + par(fig=c(0.91, 1, 0.1, 0.9), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_bias_trans_Atlantic_ridge_1table.png"), width=800, height=300) + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("% Bias transition from Atlantic Ridge regime", cex=1.2) + + par(mar=c(0,0,4,0), fig=c(0.07, 0.22, 0.8, 0.98), new=TRUE) + mtext("NAO+", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0, 0.22, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecast month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.diff4.colors[i2,1+6-j,1])}} + + par(mar=c(0,0,4,0), fig=c(0.22, 0.44, 0.8, 0.98), new=TRUE) + mtext("NAO-", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.22, 0.44, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecasted month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.diff4.colors[i2,1+6-j,3])}} + + par(mar=c(0,0,4,0), fig=c(0.44, 0.66, 0.8, 0.98), new=TRUE) + mtext("Blocking", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.44, 0.66, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecasted month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.diff4.colors[i2,1+6-j,4])}} + + par(mar=c(0,0,4,0), fig=c(0.68, 0.88, 0.8, 0.98), new=TRUE) + mtext("Atlantic Ridge", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.66, 0.88, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecasted month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.diff4.colors[i2,1+6-j,2])}} + + par(fig=c(0.91, 1, 0.1, 0.8), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + +diagnostic.WR <- function(text=NULL, position=c(0,1,0,1), color.array){ + +} + + ## # FairRPSS summary: + ## png(filename=paste0(work.dir,"/",forecast.name,"_summary_rpss.png"), width=550, height=400) + + ## # create an array similar to array.diff.freq but with colors instead of frequencies: + ## freq.cols <- rev(c('#b2182b','#d6604d','#f4a582','#fddbc7','#d1e5f0','#92c5de','#4393c3','#2166ac')) + + ## array.freq.colors <- array(freq.cols[8],c(12,7,4)) + ## array.freq.colors[array.diff.freq < 10] <- freq.cols[7] + ## array.freq.colors[array.diff.freq < 5] <- freq.cols[6] + ## array.freq.colors[array.diff.freq < 2] <- freq.cols[5] + ## array.freq.colors[array.diff.freq < 0] <- freq.cols[4] + ## array.freq.colors[array.diff.freq < -2] <- freq.cols[3] + ## array.freq.colors[array.diff.freq < -5] <- freq.cols[2] + ## array.freq.colors[array.diff.freq < -10] <- freq.cols[1] + + ## par(mar=c(5,4,4,4.5)) + ## plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Forecast time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + ## title("Frequency difference (%) between S4 and ERA-Interim", cex=1.5) + ## mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + ## mtext(side = 2, text = "Forecast time", line = 2.5, cex=1.5) + ## axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + ## axis(2, at=seq(0,6), las=2, cex.axis=1.5) + + ## for(p in 1:12){ + ## for(l in 0:6){ + ## polygon(c(0.5+p-1,0.5+p-1,1+p-1),c(-0.5+l,0.5+l,0+l), col=array.freq.colors[p,1+l,1]) # first triangle + ## polygon(c(0.5+p-1,1+p-1,1.5+p-1),c(-0.5+l,0+l,-0.5+l), col=array.freq.colors[p,1+l,2]) + ## polygon(c(1+p-1,1.5+p-1,1.5+p-1),c(l,0.5+l,-0.5+l), col=array.freq.colors[p,1+l,3]) + ## polygon(c(0.5+p-1,1+p-1,1.5+p-1),c(0.5+l,0+l,0.5+l), col=array.freq.colors[p,1+l,4]) + ## } + ## } + + ## par(fig=c(0.875, 1, 0.14, 0.89), new=TRUE) + ## ColorBar2(brks = c(-20,-10,-5,-2,0,2,5,10,20), cols = freq.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(c(-20,-10,-5,-2,0,2,5,10,20)), my.labels=c(-20,-10,-5,-2,0,2,5,10,20)) + ## dev.off() + +} # close if on composition == "summary" + + + + +# impact map of the stronger WR: +if(composition == "impact" && fields.name == rean.name){ + + var.num <- 2 # choose a variable (1:sfcWind, 2:tas) + + png(filename=paste0(rean.dir,"/",rean.name,"_",var.name[var.num],"_most_influent_negative.png"), width=550, height=650) + + plot.new() + #par(mfrow=c(4,3)) + #col.regimes <- c('#7b3294','#c2a5cf','#a6dba0','#008837') + col.regimes <- c('#e66101','#fdb863','#b2abd2','#5e3c99') + + for(p in 13:16){ # or 1:12 for monthly maps + load(file=paste0(rean.dir,"/",rean.name,"_",my.period[p],"_",var.name[var.num],"_psl.RData")) + load(paste0(rean.dir,"/",rean.name,"_ClusterNames.RData")) # they should not depend on var.name!!! + + # position of long values of Europe only (without the Atlantic Sea and America): + #if(fields.name == "ERA-Interim") EU <- c(1:which(lon >= lon.max)[1], (length(lon)-30):length(lon)) # restrict area to continental europe only + lon.max = 45 + EU <- c(1:which(lon >= lon.max)[1], (which(lon >= 337)[1]:length(lon))) # restrict area to continental europe only + + cluster1 <- which(orden == cluster1.name.period[p]) # in future it will be == cluster1.name + cluster2 <- which(orden == cluster2.name.period[p]) + cluster3 <- which(orden == cluster3.name.period[p]) + cluster4 <- which(orden == cluster4.name.period[p]) + + assign(paste0("imp",cluster1), varPeriodAnom1mean) + assign(paste0("imp",cluster2), varPeriodAnom2mean) + assign(paste0("imp",cluster3), varPeriodAnom3mean) + assign(paste0("imp",cluster4), varPeriodAnom4mean) + + ## imp.all <- imp1 + ## for(i in 1:dim(imp1)[1]){ + ## for(j in 1:dim(imp1)[2]){ + ## if(abs(imp1[i,j]) >= max(abs(imp1[i,j]), abs(imp2[i,j]), abs(imp3[i,j]), abs(imp4[i,j]))) imp.all[i,j] <- 1.5 + ## if(abs(imp2[i,j]) >= max(abs(imp1[i,j]), abs(imp2[i,j]), abs(imp3[i,j]), abs(imp4[i,j]))) imp.all[i,j] <- 2.5 + ## if(abs(imp3[i,j]) >= max(abs(imp1[i,j]), abs(imp2[i,j]), abs(imp3[i,j]), abs(imp4[i,j]))) imp.all[i,j] <- 3.5 + ## if(abs(imp4[i,j]) >= max(abs(imp1[i,j]), abs(imp2[i,j]), abs(imp3[i,j]), abs(imp4[i,j]))) imp.all[i,j] <- 4.5 + ## } + ## } + + ## #most influent WT with positive impact: + ## imp.all <- imp1 + ## for(i in 1:dim(imp1)[1]){ + ## for(j in 1:dim(imp1)[2]){ + ## if((imp1[i,j]) >= max((imp1[i,j]), (imp2[i,j]), (imp3[i,j]), (imp4[i,j]))) imp.all[i,j] <- 1.5 + ## if((imp2[i,j]) >= max((imp1[i,j]), (imp2[i,j]), (imp3[i,j]), (imp4[i,j]))) imp.all[i,j] <- 2.5 + ## if((imp3[i,j]) >= max((imp1[i,j]), (imp2[i,j]), (imp3[i,j]), (imp4[i,j]))) imp.all[i,j] <- 3.5 + ## if((imp4[i,j]) >= max((imp1[i,j]), (imp2[i,j]), (imp3[i,j]), (imp4[i,j]))) imp.all[i,j] <- 4.5 + ## } + ## } + + # most influent WT with negative impact: + imp.all <- imp1 + for(i in 1:dim(imp1)[1]){ + for(j in 1:dim(imp1)[2]){ + if((imp1[i,j]) <= min((imp1[i,j]), (imp2[i,j]), (imp3[i,j]), (imp4[i,j]))) imp.all[i,j] <- 1.5 + if((imp2[i,j]) <= min((imp1[i,j]), (imp2[i,j]), (imp3[i,j]), (imp4[i,j]))) imp.all[i,j] <- 2.5 + if((imp3[i,j]) <= min((imp1[i,j]), (imp2[i,j]), (imp3[i,j]), (imp4[i,j]))) imp.all[i,j] <- 3.5 + if((imp4[i,j]) <= min((imp1[i,j]), (imp2[i,j]), (imp3[i,j]), (imp4[i,j]))) imp.all[i,j] <- 4.5 + } + } + + if(p<=3) yMod <- 0.7 + if(p>=4 && p<=6) yMod <- 0.5 + if(p>=7 && p<=9) yMod <- 0.3 + if(p>=10) yMod <- 0.1 + if(p==13 || p==14) yMod <- 0.52 + if(p==15 || p==16) yMod <- 0.1 + + if(p==1 || p==4 || p==7 || p==10) xMod <- 0.05 + if(p==2 || p==5 || p==8 || p==11) xMod <- 0.35 + if(p==3 || p==6 || p==9 || p==12) xMod <- 0.65 + if(p==13 || p==15) xMod <- 0.05 + if(p==14 || p==16) xMod <- 0.50 + + # impact map: + if(p <= 12) par(fig=c(xMod, xMod + 0.3, yMod, yMod + 0.17), new=TRUE) + if(p > 12) par(fig=c(xMod, xMod + 0.45, yMod, yMod + 0.38), new=TRUE) + PlotEquiMap(imp.all[,EU], lon[EU], lat, filled.continents = FALSE, brks=1:5, cols=col.regimes, axelab=F, drawleg=F) + + # title map: + if(p <= 12) par(fig=c(xMod, xMod + 0.3, yMod + 0.162, yMod + 0.172), new=TRUE) + if(p > 12) par(fig=c(xMod + 0.07, xMod + 0.07 + 0.3, yMod +0.21 + 0.166, yMod +0.21 + 0.176), new=TRUE) + mtext(my.period[p], font=2, cex=1.5) + + } # close 'p' on 'period' + + par(fig=c(0.1,0.9, 0.03, 0.11), new=TRUE) + ColorBar2(1:5, cols=col.regimes, vert=FALSE, my.ticks=1:4, draw.ticks=FALSE, my.labels=orden) + + par(fig=c(0.1,0.9, 0.92, 0.97), new=TRUE) + mtext(paste0("Most influent WR on ",var.name[var.num]), font=2, cex=1.5) + + dev.off() + +} # close if on composition == "impact" + + + + + + + + + + + + + + + + + + diff --git a/old/weather_regimes_maps_v18.R b/old/weather_regimes_maps_v18.R new file mode 100644 index 0000000000000000000000000000000000000000..4dc14ac5f4248cf5f8113a25d868d200ef185cf7 --- /dev/null +++ b/old/weather_regimes_maps_v18.R @@ -0,0 +1,3876 @@ + +# Creation: 6/2016 +# Author: Nicola Cortesi +# +# Aim: to visualize the regime anomalies of the weather regimes, their interannual frequencies and their impact on a chosen cliamte variable (i.e: wind speed or temperature) +# +# I/O: input file needed are: +# 1) the output of the weather_regimes.R script, which ends with the suffix "_psl.RData". +# 2) if you need the impact map too, the outputs of the weather_regimes_impact.R script, which end wuth the suffix "_sfcWind.RData" and "_tas.RData" +# Output files are the maps, witch the user can generate as .png or as .pdf +# Due to the script's length, it is better to run it from the terminal with: Rscript ~/Downloads/scripts/weather_regimes_maps_v18.R (for example) +# This script doesn't need to be parallelized because it takes only a few seconds to create and save the output maps. +# +# Assumptions: If your regimes derive from a reanalysis, this script must be run twice: once, with the option 'ordering = F' and 'save.names = T' to create a .pdf or .png +# file that the user must open to find visually the order by which the four regimes are stored inside the four clusters. For example, he can find that the +# sequence of the images in the file (from top to down) corresponds to: Blocking / NAO- / Atl.Ridge / NAO+ regime. Subsequently, he has to change 'ordering' +# from F to T and set the four variables 'clusterX.name' (X=1..4) to follow the same order found inside that file. +# The second time, you have to run this script only to get the maps in the right order, which by default is, from top to down: +# "NAO+","NAO-","Blocking" and "Atl.Ridge" (you can change it with the variable 'orden'). You also have to set 'save.names' to TRUE, so an .RData file +# is written to store the order of the four regimes, in case in future a user needs to visualize these maps again. In this case, the user must set +# 'ordering = TRUE' and 'save.names = FALSE' to be able to visualize the maps in the correct order. +# +# If your regimes derive from a forecast system, you have to compute before the regimes derived from a reanalysis. Then, you can associate the reanalysis +# to the forecast system, to employ it to automatically aussociate to each simulated cluster one of the +# observed regimes, the one with the highest spatial correlation. Beware that the two maps of regime anomalies must have the same spatial resolution! +# +# Branch: weather_regimes + +rm(list=ls()) +library(s2dverification) # for the function Load() +library(Kendall) # for the MannKendall test +source('/home/Earth/ncortesi/Downloads/scripts/Rfunctions.R') # for the calendar functions + +### set the directory where the weather regimes computed with a reanalysis are stored (xxxxx_psl.RData files): + +#rean.dir <- "/scratch/Earth/ncortesi/RESILIENCE/Regimes/18) as 12) but with no lat correction" +#rean.dir <- "/scratch/Earth/ncortesi/RESILIENCE/Regimes/18_as_12_but_with_no_lat_correction" +rean.dir <- "/scratch/Earth/ncortesi/RESILIENCE/Regimes/41_ERA-Interim_monthly_1981-2015_LOESS_filter" +#rean.dir <- "/scratch/Earth/ncortesi/RESILIENCE/Regimes/43_as_41_but_with_running_cluster_and_lat_correction" +#rean.dir <- "/scratch/Earth/ncortesi/RESILIENCE/Regimes/44_as_43_but_with_no_lat_correction" +#rean.dir <- "/scratch/Earth/ncortesi/RESILIENCE/Regimes/45_as_43_but_with_Z500" + +rean.name <- "ERA-Interim" # reanalysis name (if input data comes from a reanalysis) +forecast.name <- "ECMWF-S4" # forecast system name (if input data comes from a forecast system) + +# Choose between loading reanalysis ('rean.name') or forecasts ('forecast.name'): +fields.name <- rean.name #forecast.name + +# Choose one or more periods to plot from 1 to 17 (1-12: Jan - Dec, 13-16: Winter, Spring, Summer, Autumn, 17: Year). +period <- 11 #1:12 # You need to have created before the '_psl.RData' file with the output of 'weather_regimes_vXX'.R for that period. + +# run it twice: once, with: 'ordering <- FALSE', 'save.names <- TRUE', 'as.pdf <- TRUE' and 'composition <- "simple" ' +# to look at the cartography and find visually the right regime names of the four clusters; then, insert the regime names in the correct order below and run this script +# a second time one month/season at time with: 'ordering = TRUE', 'save.names = TRUE', 'as.pdf = FALSE' and 'composition = "simple" ' +# to save the ordered cartography (then, you can check the correct regime sequence in the .png maps). After that, any time you run this script, remember to set: +# 'ordering = TRUE , 'save.names = FALSE' (and any type of composition you want to plot) not to overwrite the files which stores the right cluster order. + +ordering <- F # If TRUE, order the clusters following the regimes listed in the 'orden' vector (set it to TRUE only after you manually inserted the names below). +save.names <- T # if TRUE, also save the cluster names (i.e: the association between clusters and weather regimes); if FALSE, the cluster names are loaded from a file +as.pdf <- T # FALSE: save results as one .png file for each season/month, TRUE: save only one .pdf file with all seasons/months + +composition <- "simple" # choose which kind of composition you want to plot: + # - 'simple' for monthly graphs of regime anomalies / impact / interannual frequencies in three columns + # - 'psl' for all the regime anomalies for a fixed forecast month + # - 'fre' for all the interannual frequencies for a fixed forecast month + # - 'summary' for the summary plots of all forecast months (persistence bias, freq.bias, correlations, etc.) [You need all ClusterNames files] + # - 'impact' for all the impact plot of the regime with the highest impact + # - 'single.impact' to save the individual impact maps + # - 'single.psl' to save the individual psl map + # - 'single.fre' to save the individual fre map + # - 'none' doesn't plot anything, it only saves the ClusterName.Rdata files + # - 'taylor' plot the taylor's diagram between the regime anomalies of a monthly reanalaysis and a seasonal one + +var.num <- 1 # if fields.name ='rean.name' and composition ='simple', Choose a variable for the impact maps 1: sfcWind 2: tas + +mean.freq <- FALSE # if TRUE, when composition <- "simple", it also add the mean frequency value over each frequency plots + +# Associates the regimes to the four cluster: +cluster2.name <- "NAO+" +cluster4.name <- "NAO-" +cluster3.name <- "Blocking" +cluster1.name <- "Atl.Ridge" + +# choose an order to give to the cartograpy, from top to bottom: +orden <- c("NAO+","NAO-","Blocking","Atl.Ridge") + + +####### if fields.name='forecast.name', you have to specify these additional variables: + +# working dir with the input files with '_psl.RData' suffix: +work.dir <- "/scratch/Earth/ncortesi/RESILIENCE/Regimes/42_ECMWF_S4_monthly_1981-2015_LOESS_filter" + +lead.months <- 0:6 # in case you selected 'fields.name = forecast.name', specify a leadtime (0 = same month of the start month) to plot + # (and variable 'period' becomes the start month) +forecast.month <- 1 # in case composition = 'psl' or 'fre' or 'impact', choose a target month to forecast, from 1 to 12, to plot its composition + +################################################################################################################################################################### + +if(fields.name != rean.name && fields.name != forecast.name) stop("variable 'fields.name' is not properly set") +regime1.name <- orden[1] +regime2.name <- orden[2] +regime3.name <- orden[3] +regime4.name <- orden[4] + +var.name <- c("sfcWind","tas") +var.name.full <- c("Wind Speed","Temperature") # full name of the 'predictand' variable to put in the title of the graphs +var.unit <- c("m/s", "ºC") # unit of measure of var (for drawing color scales) +var.num.orig <- var.num # loading _psl files, this value can change! +fields.name.orig <- fields.name +period.orig <- period +work.dir.orig <- work.dir +pdf.suffix <- ifelse(length(period)==1, my.period[period], "all_periods") +n.map <- 0 + +if(composition != "summary" && composition != "impact" && composition != "taylor"){ + + if(composition == "psl" || composition == "fre") { + if(as.pdf && fields.name == forecast.name) pdf(file=paste0(work.dir,"/",fields.name,"_",pdf.suffix,"_forecast_month_",forecast.month,"_",composition,".pdf"),width=110,height=60) + if(!as.pdf && fields.name == forecast.name) png(filename=paste0(work.dir,"/",fields.name,"_forecast_month_",forecast.month,"_",composition,".png"),width=6000,height=2300) + + lead.months <- c(6:0,0) + plot.new() + + # Sheet title: + par(fig=c(0, 1, 0.94, 0.98), new=TRUE) + if(composition == "psl") mtext(paste0(fields.name, " simulated Weather Regimes for ", month.name[forecast.month]), cex=5.5, font=2) + if(composition == "fre") mtext(paste0(fields.name, " simulated interannual frequencies for ", month.name[forecast.month]), cex=5.5, font=2) + } + + if(fields.name == rean.name) lead.months <- 1 # just to be able to enter the loop below once! + + for(lead.month in lead.months){ + #lead.month=lead.months[1] # for the debug + + if(composition == "simple"){ + if(as.pdf && fields.name == rean.name) pdf(file=paste0(rean.dir,"/",fields.name,"_",var.name[var.num],"_",pdf.suffix,".pdf"),width=40, height=60) + if(as.pdf && fields.name == forecast.name) pdf(file=paste0(work.dir,"/",fields.name,"_",var.name[var.num],"_",pdf.suffix,"_leadmonth_",lead.month,".pdf"),width=40,height=60) + } + + if(composition == 'psl' || composition == 'fre') { + period <- forecast.month - lead.month + if(period < 1) period <- period + 12 + } + + for(p in period){ + #p <- period[1] # for the debug + p.orig <- p + + # load regime data (for forecasts, it load only the data corresponding to the chosen start month p and lead month 'lead.month':: + if(fields.name == rean.name || (fields.name == forecast.name && lead.month == 0 || n.map == 7)) { + load(file=paste0(rean.dir,"/",rean.name,"_",my.period[p],"_","psl",".RData")) + if(var.num != var.num.orig) var.num <- var.num.orig # ripristinate original values of this script (necessary after loading regime data) + if(fields.name != fields.name.orig) fields.name <- fields.name.orig # ripristinate original values of this script + if(work.dir != work.dir.orig) work.dir <- work.dir.orig # ripristinate original values of this script + + # load impact data only if it is available, if not it will leave an empty space in the composition: + if(file.exists(paste0(rean.dir,"/",rean.name,"_",my.period[p],"_",var.name[var.num],".RData"))){ + load(file=paste0(rean.dir,"/",rean.name,"_",my.period[p],"_",var.name[var.num],".RData")) + print("Impact data available") + impact.data <- TRUE + } else { + print("Impact data not available; 'simple' compositions will have an empty space in the middle column") + impact.data <- FALSE + } + } + + if(fields.name == forecast.name && n.map < 7){ + load(file=paste0(work.dir,"/",fields.name,"_",my.period[p],"_leadmonth_",lead.month,"_","psl",".RData")) # load cluster time series and mean psl data + if(impact.data == TRUE && (composition == simple || composition == "impact")) load(file=paste0(work.dir,"/",fields.name,"_",my.period[p],"_leadmonth_",lead.month,"_",var.name[var.num],".RData")) # load mean var data # for impact maps + + if(var.num != var.num.orig) var.num <- var.num.orig # ripristinate original values of this script (necessary after loading regime data) + if(fields.name != fields.name.orig) fields.name <- fields.name.orig # ripristinate original values of this script + + } + + if(var.num != var.num.orig) var.num <- var.num.orig # ripristinate original values of this script (necessary after loading regime data) + if(fields.name != fields.name.orig) fields.name <- fields.name.orig # ripristinate original values of this script + if(work.dir != work.dir.orig) work.dir <- work.dir.orig # ripristinate original values of this script + if(p != p.orig) p <- p.orig + + if(fields.name == forecast.name && n.map < 7){ + + # compute the simulated interannual monthly variability: + n.days.month <- dim(my.cluster.array[[p]])[1] # number of days in the forecasted month + + freq.sim1 <- apply(my.cluster.array[[p]] == 1, c(2,3), sum) + freq.sim2 <- apply(my.cluster.array[[p]] == 2, c(2,3), sum) + freq.sim3 <- apply(my.cluster.array[[p]] == 3, c(2,3), sum) + freq.sim4 <- apply(my.cluster.array[[p]] == 4, c(2,3), sum) + + sd.freq.sim1 <- sd(freq.sim1) / n.days.month + sd.freq.sim2 <- sd(freq.sim2) / n.days.month + sd.freq.sim3 <- sd(freq.sim3) / n.days.month + sd.freq.sim4 <- sd(freq.sim4) / n.days.month + + # compute the transition probabilities: + j1 <- j2 <- j3 <- j4 <- 1; transition1 <- transition2 <- transition3 <- transition4 <- c() + + n.members <- dim(my.cluster.array[[p]])[3] + + for(m in 1:n.members){ + for(y in 1:n.years){ + for (d in 1:(n.days.month-1)){ + + if (my.cluster.array[[p]][d,y,m] == 1 && (my.cluster.array[[p]][d + 1,y,m] != my.cluster.array[[p]][d,y,m])){ + transition1[j1] <- my.cluster.array[[p]][d + 1,y,m] + j1 <- j1 + 1 + } + if (my.cluster.array[[p]][d,y,m] == 2 && (my.cluster.array[[p]][d + 1,y,m] != my.cluster.array[[p]][d,y,m])){ + transition2[j2] <- my.cluster.array[[p]][d + 1,y,m] + j2 <- j2 + 1 + } + if (my.cluster.array[[p]][d,y,m] == 3 && (my.cluster.array[[p]][d + 1,y,m] != my.cluster.array[[p]][d,y,m])){ + transition3[j3] <- my.cluster.array[[p]][d + 1,y,m] + j3 <- j3 +1 + } + if (my.cluster.array[[p]][d,y,m] == 4 && (my.cluster.array[[p]][d + 1,y,m] != my.cluster.array[[p]][d,y,m])){ + transition4[j4] <- my.cluster.array[[p]][d + 1,y,m] + j4 <- j4 +1 + } + + } + } + } + + trans.sim <- matrix(NA,4,4 , dimnames= list(c("cluster1.sim", "cluster2.sim", "cluster3.sim", "cluster4.sim"),c("cluster1.sim","cluster2.sim","cluster3.sim","cluster4.sim"))) + trans.sim[1,2] <- length(which(transition1 == 2)) + trans.sim[1,3] <- length(which(transition1 == 3)) + trans.sim[1,4] <- length(which(transition1 == 4)) + + trans.sim[2,1] <- length(which(transition2 == 1)) + trans.sim[2,3] <- length(which(transition2 == 3)) + trans.sim[2,4] <- length(which(transition2 == 4)) + + trans.sim[3,1] <- length(which(transition3 == 1)) + trans.sim[3,2] <- length(which(transition3 == 2)) + trans.sim[3,4] <- length(which(transition3 == 4)) + + trans.sim[4,1] <- length(which(transition4 == 1)) + trans.sim[4,2] <- length(which(transition4 == 2)) + trans.sim[4,3] <- length(which(transition4 == 3)) + + trans.tot <- apply(trans.sim,1,sum,na.rm=T) + for(i in 1:4) trans.sim[i,] <- trans.sim[i,]/trans.tot[i] + + + # compute cluster persistence: + my.cluster.vector <- as.vector(my.cluster.array[[p]]) # reduce it to a vector with the order: day1month1member1 , day2month1member1, ... , day1month2member1, day2month2member1, ,,, + + sequ1 <- sequ2 <- sequ3 <- sequ4 <- rep(0,10000) + i1 <- i2 <- i3 <- i4 <- 1 + + if(my.cluster.vector[1] != 1) i1 <- 0 + if(my.cluster.vector[1] == 1) sequ1[i1] <- sequ1[i1]+1 + if(my.cluster.vector[1] != 2) i2 <- 0 + if(my.cluster.vector[1] == 2) sequ2[i2] <- sequ2[i2]+1 + if(my.cluster.vector[1] != 3) i3 <- 0 + if(my.cluster.vector[1] == 3) sequ3[i3] <- sequ3[i3]+1 + if(my.cluster.vector[1] != 4) i4 <- 0 + if(my.cluster.vector[1] == 4) sequ4[i4] <- sequ4[i4]+1 + n.dd <- length(my.cluster.vector) + + for(d in 1:(n.dd-1)){ + if(my.cluster.vector[d] != 1 && my.cluster.vector[d+1] == 1) i1 <- i1+1 + if(my.cluster.vector[d] == 1) sequ1[i1] <- sequ1[i1]+1 + + if(my.cluster.vector[d] != 2 && my.cluster.vector[d+1] == 2) i2 <- i2+1 + if(my.cluster.vector[d] == 2) sequ2[i2] <- sequ2[i2]+1 + + if(my.cluster.vector[d] != 3 && my.cluster.vector[d+1] == 3) i3 <- i3+1 + if(my.cluster.vector[d] == 3) sequ3[i3] <- sequ3[i3]+1 + + if(my.cluster.vector[d] != 4 && my.cluster.vector[d+1] == 4) i4 <- i4+1 + if(my.cluster.vector[d] == 4) sequ4[i4] <- sequ4[i4]+1 + } + + if(my.cluster.vector[n.dd] == 1) sequ1[i1] <- sequ1[i1]+1 + if(my.cluster.vector[n.dd] == 2) sequ2[i2] <- sequ2[i2]+1 + if(my.cluster.vector[n.dd] == 3) sequ3[i3] <- sequ3[i3]+1 + if(my.cluster.vector[n.dd] == 4) sequ4[i4] <- sequ4[i4]+1 + + pers1 <- mean(sequ1[which(sequ1 != 0)]) + pers2 <- mean(sequ2[which(sequ2 != 0)]) + pers3 <- mean(sequ3[which(sequ3 != 0)]) + pers4 <- mean(sequ4[which(sequ4 != 0)]) + + # compute correlations between simulated clusters and observed regime's anomalies: + cluster1.sim <- pslwr1mean; cluster2.sim <- pslwr2mean; cluster3.sim <- pslwr3mean; cluster4.sim <- pslwr4mean + lat.forecast <- lat; lon.forecast <- lon + + if((p + lead.month) > 12) { load.month <- p + lead.month - 12 } else { load.month <- p + lead.month } # reanalysis forecast month to load + load(file=paste0(rean.dir,"/",rean.name,"_",my.period[load.month],"_","psl",".RData")) # load reanalysis data, which overwrities the pslwrXmean variables + load(paste0(rean.dir,"/",rean.name,"_", my.period[load.month],"_","ClusterNames",".RData")) # load also reanalysis regime names + + if(var.num != var.num.orig) var.num <- var.num.orig # ripristinate original values of this script + if(fields.name != fields.name.orig) fields.name <- fields.name.orig # ripristinate original values of this script + if(!identical(period.orig,period)) period <- period.orig + if(work.dir != work.dir.orig) work.dir <- work.dir.orig # ripristinate original values of this script + if(p.orig != p) p <- p.orig + + # compute the observed transition probabilities: + n.days.month <- n.days.in.a.period(load.month,2001) + j1o <- j2o <- j3o <- j4o <- 1; transition.obs1 <- transition.obs2 <- transition.obs3 <- transition.obs4 <- c() + + for (d in 1:(length(my.cluster[[load.month]]$cluster)-1)){ + if (my.cluster[[load.month]]$cluster[d] == 1 && (my.cluster[[load.month]]$cluster[d + 1] != my.cluster[[load.month]]$cluster[d])){ + transition.obs1[j1o] <- my.cluster[[load.month]]$cluster[d + 1] + j1o <- j1o + 1 + } + if (my.cluster[[load.month]]$cluster[d] == 2 && (my.cluster[[load.month]]$cluster[d + 1] != my.cluster[[load.month]]$cluster[d])){ + transition.obs2[j2o] <- my.cluster[[load.month]]$cluster[d + 1] + j2o <- j2o + 1 + } + if (my.cluster[[load.month]]$cluster[d] == 3 && (my.cluster[[load.month]]$cluster[d + 1] != my.cluster[[load.month]]$cluster[d])){ + transition.obs3[j3o] <- my.cluster[[load.month]]$cluster[d + 1] + j3o <- j3o + 1 + } + if (my.cluster[[load.month]]$cluster[d] == 4 && (my.cluster[[load.month]]$cluster[d + 1] != my.cluster[[load.month]]$cluster[d])){ + transition.obs4[j4o] <- my.cluster[[load.month]]$cluster[d + 1] + j4o <- j4o +1 + } + + } + + trans.obs <- matrix(NA,4,4 , dimnames= list(c("cluster1.obs", "cluster2.obs", "cluster3.obs", "cluster4.obs"),c("cluster1.obs","cluster2.obs","cluster3.obs","cluster4.obs"))) + trans.obs[1,2] <- length(which(transition.obs1 == 2)) + trans.obs[1,3] <- length(which(transition.obs1 == 3)) + trans.obs[1,4] <- length(which(transition.obs1 == 4)) + + trans.obs[2,1] <- length(which(transition.obs2 == 1)) + trans.obs[2,3] <- length(which(transition.obs2 == 3)) + trans.obs[2,4] <- length(which(transition.obs2 == 4)) + + trans.obs[3,1] <- length(which(transition.obs3 == 1)) + trans.obs[3,2] <- length(which(transition.obs3 == 2)) + trans.obs[3,4] <- length(which(transition.obs3 == 4)) + + trans.obs[4,1] <- length(which(transition.obs4 == 1)) + trans.obs[4,2] <- length(which(transition.obs4 == 2)) + trans.obs[4,3] <- length(which(transition.obs4 == 3)) + + trans.tot.obs <- apply(trans.obs,1,sum,na.rm=T) + for(i in 1:4) trans.obs[i,] <- trans.obs[i,]/trans.tot.obs[i] + + # compute the observed interannual monthly variability: + freq.obs1 <- freq.obs2 <- freq.obs3 <- freq.obs4 <- c() + + for(y in year.start:year.end){ + # for both reanalysis and S4, the time order is always: year1day1, year1day2, ..., year1day31, year2day1, year2day2, ..., year2day28, etc, + freq.obs1[y-year.start+1] <- length(which(my.cluster[[load.month]]$cluster[(y-year.start)*n.days.month + (1:n.days.month)] == 1)) + freq.obs2[y-year.start+1] <- length(which(my.cluster[[load.month]]$cluster[(y-year.start)*n.days.month + (1:n.days.month)] == 2)) + freq.obs3[y-year.start+1] <- length(which(my.cluster[[load.month]]$cluster[(y-year.start)*n.days.month + (1:n.days.month)] == 3)) + freq.obs4[y-year.start+1] <- length(which(my.cluster[[load.month]]$cluster[(y-year.start)*n.days.month + (1:n.days.month)] == 4)) + } + + sd.freq.obs1 <- sd(freq.obs1) / n.days.month + sd.freq.obs2 <- sd(freq.obs2) / n.days.month + sd.freq.obs3 <- sd(freq.obs3) / n.days.month + sd.freq.obs4 <- sd(freq.obs4) / n.days.month + + wr1y.obs <- wr1y; wr2y.obs <- wr2y; wr3y.obs <- wr3y; wr4y.obs <- wr4y # observed frecuencies of the regimes + + regimes.obs <- c(cluster1.name, cluster2.name, cluster3.name, cluster4.name) + + if(length(unique(regimes.obs)) < 4) stop("Two or more names of the weather types in the reanalsyis input file are repeated!") + + if(identical(rev(lat),lat.forecast)) {lat.forecast <- rev(lat.forecast); print("Warning: forecast latitudes are in the opposite order than renalysis's")} + if(!identical(lat, lat.forecast)) stop("forecast latitudes are different from reanalysis latitudes!") + if(!identical(lon, lon.forecast)) stop("forecast longitude are different from reanalysis longitudes!") + + cluster.corr <- array(NA, c(4,4), dimnames=list(c("cluster1.sim","cluster2.sim","cluster3.sim","cluster4.sim"), regimes.obs)) + cluster.corr[1,1] <- cor(as.vector(cluster1.sim), as.vector(pslwr1mean)) + cluster.corr[1,2] <- cor(as.vector(cluster1.sim), as.vector(pslwr2mean)) + cluster.corr[1,3] <- cor(as.vector(cluster1.sim), as.vector(pslwr3mean)) + cluster.corr[1,4] <- cor(as.vector(cluster1.sim), as.vector(pslwr4mean)) + cluster.corr[2,1] <- cor(as.vector(cluster2.sim), as.vector(pslwr1mean)) + cluster.corr[2,2] <- cor(as.vector(cluster2.sim), as.vector(pslwr2mean)) + cluster.corr[2,3] <- cor(as.vector(cluster2.sim), as.vector(pslwr3mean)) + cluster.corr[2,4] <- cor(as.vector(cluster2.sim), as.vector(pslwr4mean)) + cluster.corr[3,1] <- cor(as.vector(cluster3.sim), as.vector(pslwr1mean)) + cluster.corr[3,2] <- cor(as.vector(cluster3.sim), as.vector(pslwr2mean)) + cluster.corr[3,3] <- cor(as.vector(cluster3.sim), as.vector(pslwr3mean)) + cluster.corr[3,4] <- cor(as.vector(cluster3.sim), as.vector(pslwr4mean)) + cluster.corr[4,1] <- cor(as.vector(cluster4.sim), as.vector(pslwr1mean)) + cluster.corr[4,2] <- cor(as.vector(cluster4.sim), as.vector(pslwr2mean)) + cluster.corr[4,3] <- cor(as.vector(cluster4.sim), as.vector(pslwr3mean)) + cluster.corr[4,4] <- cor(as.vector(cluster4.sim), as.vector(pslwr4mean)) + + # associate each simulated cluster to one observed regime: + max1 <- which(cluster.corr == max(cluster.corr), arr.ind=T) + cluster.corr2 <- cluster.corr + cluster.corr2[max1[1],] <- -999 # set this row to negative values so it won't be found as a maximum anymore + cluster.corr2[,max1[2]] <- -999 # set this column to negative values so it won't be found as a maximum anymore + max2 <- which(cluster.corr2 == max(cluster.corr2), arr.ind=T) + cluster.corr3 <- cluster.corr2 + cluster.corr3[max2[1],] <- -999 + cluster.corr3[,max2[2]] <- -999 + max3 <- which(cluster.corr3 == max(cluster.corr3), arr.ind=T) + cluster.corr4 <- cluster.corr3 + cluster.corr4[max3[1],] <- -999 + cluster.corr4[,max3[2]] <- -999 + max4 <- which(cluster.corr4 == max(cluster.corr4), arr.ind=T) + + seq.max1 <- c(max1[1],max2[1],max3[1],max4[1]) + seq.max2 <- c(max1[2],max2[2],max3[2],max4[2]) + + cluster1.regime <- seq.max2[which(seq.max1 == 1)] + cluster2.regime <- seq.max2[which(seq.max1 == 2)] + cluster3.regime <- seq.max2[which(seq.max1 == 3)] + cluster4.regime <- seq.max2[which(seq.max1 == 4)] + + cluster1.name <- regimes.obs[cluster1.regime] + cluster2.name <- regimes.obs[cluster2.regime] + cluster3.name <- regimes.obs[cluster3.regime] + cluster4.name <- regimes.obs[cluster4.regime] + + regimes.sim <- c(cluster1.name, cluster2.name, cluster3.name, cluster4.name) + cluster.corr2 <- array(cluster.corr, c(4,4), dimnames=list(regimes.sim, regimes.obs)) + + spat.cor1 <- cluster.corr[1,seq.max2[which(seq.max1 == 1)]] + spat.cor2 <- cluster.corr[2,seq.max2[which(seq.max1 == 2)]] + spat.cor3 <- cluster.corr[3,seq.max2[which(seq.max1 == 3)]] + spat.cor4 <- cluster.corr[4,seq.max2[which(seq.max1 == 4)]] + + # measure persistence for the reanalysis dataset: + my.cluster.vector <- my.cluster[[load.month]][[1]] + + sequ1 <- sequ2 <- sequ3 <- sequ4 <- rep(0,10000) + i1 <- i2 <- i3 <- i4 <- 1 + + if(my.cluster.vector[1] != 1) i1 <- 0 + if(my.cluster.vector[1] == 1) sequ1[i1] <- sequ1[i1]+1 + if(my.cluster.vector[1] != 2) i2 <- 0 + if(my.cluster.vector[1] == 2) sequ2[i2] <- sequ2[i2]+1 + if(my.cluster.vector[1] != 3) i3 <- 0 + if(my.cluster.vector[1] == 3) sequ3[i3] <- sequ3[i3]+1 + if(my.cluster.vector[1] != 4) i4 <- 0 + if(my.cluster.vector[1] == 4) sequ4[i4] <- sequ4[i4]+1 + + n.dd <- length(my.cluster.vector) + for(d in 1:(n.dd-1)){ + if(my.cluster.vector[d] != 1 && my.cluster.vector[d+1] == 1) i1 <- i1+1 + if(my.cluster.vector[d] == 1) sequ1[i1] <- sequ1[i1]+1 + + if(my.cluster.vector[d] != 2 && my.cluster.vector[d+1] == 2) i2 <- i2+1 + if(my.cluster.vector[d] == 2) sequ2[i2] <- sequ2[i2]+1 + + if(my.cluster.vector[d] != 3 && my.cluster.vector[d+1] == 3) i3 <- i3+1 + if(my.cluster.vector[d] == 3) sequ3[i3] <- sequ3[i3]+1 + + if(my.cluster.vector[d] != 4 && my.cluster.vector[d+1] == 4) i4 <- i4+1 + if(my.cluster.vector[d] == 4) sequ4[i4] <- sequ4[i4]+1 + } + + if(my.cluster.vector[n.dd] == 1) sequ1[i1] <- sequ1[i1]+1 + if(my.cluster.vector[n.dd] == 2) sequ2[i2] <- sequ2[i2]+1 + if(my.cluster.vector[n.dd] == 3) sequ3[i3] <- sequ3[i3]+1 + if(my.cluster.vector[n.dd] == 4) sequ4[i4] <- sequ4[i4]+1 + + persObs1 <- mean(sequ1[which(sequ1 != 0)]) + persObs2 <- mean(sequ2[which(sequ2 != 0)]) + persObs3 <- mean(sequ3[which(sequ3 != 0)]) + persObs4 <- mean(sequ4[which(sequ4 != 0)]) + + # insert here all the manual corrections to the regime assignation due to misasignment in the automatic selection: + # forecast month: September + if(p == 9 && lead.month == 0) { cluster1.name <- "Blocking"; cluster2.name <- "Atl.Ridge"; cluster3.name <- "NAO-"; cluster4.name <- "NAO+"} + if(p == 8 && lead.month == 1) { cluster1.name <- "NAO+"; cluster2.name <- "NAO-"; cluster3.name <- "Blocking"; cluster4.name <- "Atl.Ridge"} + if(p == 6 && lead.month == 3) { cluster1.name <- "Atl.Ridge"; cluster2.name <- "NAO-"} + if(p == 4 && lead.month == 5) { cluster1.name <- "Blocking"; cluster2.name <- "NAO+"; cluster3.name <- "Atl.Ridge"; cluster4.name <- "NAO-"} + + # forecast month: October + if(p == 4 && lead.month == 6) { cluster1.name <- "Atl.Ridge"; cluster2.name <- "Blocking"; cluster3.name <- "NAO+"; cluster4.name <- "NAO-"} + if(p == 5 && lead.month == 5) { cluster1.name <- "NAO+"; cluster2.name <- "Blocking"; cluster3.name <- "NAO-"; cluster4.name <- "Atl.Ridge"} + if(p == 7 && lead.month == 3) { cluster1.name <- "NAO-"; cluster2.name <- "Atl.Ridge"; cluster3.name <- "Blocking"; cluster4.name <- "NAO+"} + if(p == 8 && lead.month == 2) { cluster1.name <- "Blocking"; cluster2.name <- "NAO-"; cluster3.name <- "Atl.Ridge"; cluster4.name <- "NAO+"} + if(p == 9 && lead.month == 1) { cluster1.name <- "NAO-"; cluster2.name <- "Blocking"; cluster3.name <- "Atl.Ridge"; cluster4.name <- "NAO+"} + + # forecast month: November + if(p == 6 && lead.month == 5) { cluster2.name <- "Atl.Ridge"; cluster3.name <- "NAO+"; cluster4.name <- "Blocking"} + if(p == 7 && lead.month == 4) { cluster1.name <- "NAO+"; cluster2.name <- "Blocking"; cluster4.name <- "Atl.Ridge"} + if(p == 8 && lead.month == 3) { cluster2.name <- "Blocking"; cluster4.name <- "Atl.Ridge"} + if(p == 9 && lead.month == 2) { cluster2.name <- "Atl.Ridge"; cluster3.name <- "NAO+"; cluster4.name <- "Blocking"} + if(p == 10 && lead.month == 1) { cluster2.name <- "Blocking"; cluster4.name <- "Atl.Ridge"} + + regimes.sim2 <- c(cluster1.name, cluster2.name, cluster3.name, cluster4.name) # Simulated regimes after the manual correction + #regimes.obs <- c(cluster1.name, cluster2.name, cluster3.name, cluster4.name) # already defined above, included only as reference + + order.sim <- match(regimes.sim2, orden) + order.obs <- match(regimes.obs, orden) + + clusters.sim <- list(cluster1.sim, cluster2.sim, cluster3.sim, cluster4.sim) + clusters.obs <- list(pslwr1mean, pslwr2mean, pslwr3mean, pslwr4mean) + + # recompute the spatial correlations, because they might have changed because of the manual corrections above: + spat.cor1 <- cor(as.vector(clusters.sim[[which(order.sim == 1)]]), as.vector(clusters.obs[[which(order.obs == 1)]])) + spat.cor2 <- cor(as.vector(clusters.sim[[which(order.sim == 2)]]), as.vector(clusters.obs[[which(order.obs == 2)]])) + spat.cor3 <- cor(as.vector(clusters.sim[[which(order.sim == 3)]]), as.vector(clusters.obs[[which(order.obs == 3)]])) + spat.cor4 <- cor(as.vector(clusters.sim[[which(order.sim == 4)]]), as.vector(clusters.obs[[which(order.obs == 4)]])) + + load(file=paste0(work.dir,"/",fields.name,"_",my.period[p],"_leadmonth_",lead.month,"_","psl",".RData")) # reload forecast cluster time series and mean psl data + #load(file=paste0(work.dir,"/",fields.name,"_",my.period[p],"_leadmonth_",lead.month,"_ClusterData.RData")) # reload forecast cluster data (for my.cluster.array) + if(var.num != var.num.orig) var.num <- var.num.orig # ripristinate original values of this script + if(fields.name != fields.name.orig) fields.name <- fields.name.orig # ripristinate original values of this script + if(!identical(period.orig, period)) period <- period.orig + if(p.orig != p) p <- p.orig + if(identical(rev(lat),lat.forecast)) lat <- rev(lat) # ripristinate right lat order + if(work.dir != work.dir.orig) work.dir <- work.dir.orig # ripristinate original values of this script + + } # close for on 'forecast.name' + + if(fields.name == rean.name || (fields.name == forecast.name && n.map == 7)){ + if(save.names){ + save(cluster1.name, cluster2.name, cluster3.name, cluster4.name, file=paste0(rean.dir,"/",fields.name,"_", my.period[p],"_","ClusterNames",".RData")) + + } else { # in this case, we load the cluster names from the file already saved, if regimes come from a reanalysis: + load(paste0(rean.dir,"/",rean.name,"_", my.period[p],"_","ClusterNames",".RData")) + + if(var.num != var.num.orig) var.num <- var.num.orig # ripristinate original values of this script + if(fields.name != fields.name.orig) fields.name <- fields.name.orig # ripristinate original values of this script + } + } + + # assign to each cluster its regime: + if(ordering == FALSE && (fields.name == rean.name || (fields.name == forecast.name && n.map == 7))){ + cluster1 <- 1; cluster2 <- 2; cluster3 <- 3; cluster4 <- 4 + } else { + cluster1 <- which(orden == cluster1.name) + cluster2 <- which(orden == cluster2.name) + cluster3 <- which(orden == cluster3.name) + cluster4 <- which(orden == cluster4.name) + } + + assign(paste0("map",cluster1), pslwr1mean) + assign(paste0("map",cluster2), pslwr2mean) + assign(paste0("map",cluster3), pslwr3mean) + assign(paste0("map",cluster4), pslwr4mean) + + assign(paste0("fre",cluster1), wr1y) + assign(paste0("fre",cluster2), wr2y) + assign(paste0("fre",cluster3), wr3y) + assign(paste0("fre",cluster4), wr4y) + + #assign(paste0("withinss",cluster1), my.cluster[[p]]$withinss[1]/my.cluster[[p]]$size[1]) + #assign(paste0("withinss",cluster2), my.cluster[[p]]$withinss[2]/my.cluster[[p]]$size[2]) + #assign(paste0("withinss",cluster3), my.cluster[[p]]$withinss[3]/my.cluster[[p]]$size[3]) + #assign(paste0("withinss",cluster4), my.cluster[[p]]$withinss[4]/my.cluster[[p]]$size[4]) + + if(impact.data == TRUE && (composition == "simple" || composition == "single.impact" || composition == 'impact')){ + assign(paste0("imp",cluster1), varwr1mean) + assign(paste0("imp",cluster2), varwr2mean) + assign(paste0("imp",cluster3), varwr3mean) + assign(paste0("imp",cluster4), varwr4mean) + + if(fields.name == rean.name){ + assign(paste0("sig",cluster1), pvalue1) + assign(paste0("sig",cluster2), pvalue2) + assign(paste0("sig",cluster3), pvalue3) + assign(paste0("sig",cluster4), pvalue4) + } + } + + if(fields.name == forecast.name && n.map < 7){ + assign(paste0("freMax",cluster1), wr1yMax) + assign(paste0("freMax",cluster2), wr2yMax) + assign(paste0("freMax",cluster3), wr3yMax) + assign(paste0("freMax",cluster4), wr4yMax) + + assign(paste0("freMin",cluster1), wr1yMin) + assign(paste0("freMin",cluster2), wr2yMin) + assign(paste0("freMin",cluster3), wr3yMin) + assign(paste0("freMin",cluster4), wr4yMin) + + #assign(paste0("spatial.cor",cluster1), spat.cor1) + #assign(paste0("spatial.cor",cluster2), spat.cor2) + #assign(paste0("spatial.cor",cluster3), spat.cor3) + #assign(paste0("spatial.cor",cluster4), spat.cor4) + + assign(paste0("persist",cluster1), pers1) + assign(paste0("persist",cluster2), pers2) + assign(paste0("persist",cluster3), pers3) + assign(paste0("persist",cluster4), pers4) + + clusters.all <- c(cluster1, cluster2, cluster3, cluster4) + ordered.sequence <- c(which(clusters.all == 1), which(clusters.all == 2), which(clusters.all == 3), which(clusters.all == 4)) + + assign(paste0("transSim",cluster1), unname(trans.sim[1,ordered.sequence])) + assign(paste0("transSim",cluster2), unname(trans.sim[2,ordered.sequence])) + assign(paste0("transSim",cluster3), unname(trans.sim[3,ordered.sequence])) + assign(paste0("transSim",cluster4), unname(trans.sim[4,ordered.sequence])) + + cluster1.obs <- which(orden == regimes.obs[1]) + cluster2.obs <- which(orden == regimes.obs[2]) + cluster3.obs <- which(orden == regimes.obs[3]) + cluster4.obs <- which(orden == regimes.obs[4]) + + clusters.all.obs <- c(cluster1.obs, cluster2.obs, cluster3.obs, cluster4.obs) + ordered.sequence.obs <- c(which(clusters.all.obs == 1), which(clusters.all.obs == 2), which(clusters.all.obs == 3), which(clusters.all.obs == 4)) + + assign(paste0("freObs",cluster1.obs), wr1y.obs) + assign(paste0("freObs",cluster2.obs), wr2y.obs) + assign(paste0("freObs",cluster3.obs), wr3y.obs) + assign(paste0("freObs",cluster4.obs), wr4y.obs) + + assign(paste0("persistObs",cluster1.obs), persObs1) + assign(paste0("persistObs",cluster2.obs), persObs2) + assign(paste0("persistObs",cluster3.obs), persObs3) + assign(paste0("persistObs",cluster4.obs), persObs4) + + assign(paste0("transObs",cluster1.obs), unname(trans.obs[1,ordered.sequence.obs])) + assign(paste0("transObs",cluster2.obs), unname(trans.obs[2,ordered.sequence.obs])) + assign(paste0("transObs",cluster3.obs), unname(trans.obs[3,ordered.sequence.obs])) + assign(paste0("transObs",cluster4.obs), unname(trans.obs[4,ordered.sequence.obs])) + + } + + # position of long values of Europe only (without the Atlantic Sea and America): + #if(fields.name == "ERA-Interim") EU <- c(1:which(lon >= lon.max)[1], (length(lon)-30):length(lon)) # restrict area to continental europe only + EU <- c(1:which(lon >= lon.max)[1], (which(lon >= 337)[1]:length(lon))) # restrict area to continental europe only + + # breaks and colors of the geopotential fields: + if(psl == "g500"){ + #my.brks <- c(-300,-199:200,300) # % Mean anomaly of a WR + my.brks <- c(-300,seq(-200,200,10),300) # % Mean anomaly of a WR + my.brks2 <- c(-300,seq(-200,200,10),300) # % Mean anomaly of a WR for the contour lines + #my.cols <- colorRampPalette((brewer.pal(9,"PuBu")))(length(my.brks)-1) + values.to.plot <- c(-200, -100, 0, 100, 200) # values we want to show in the labels + } + + if(psl == "psl"){ + my.brks <- c(seq(-19,-1,2),0,seq(1,19,2)) # % Mean anomaly of a WR + # my.brks <- c(seq(-19,-1,2),0,seq(1,19,2)) # % Mean anomaly of a WR + + my.brks2 <- my.brks #c(seq(-20,20,2)) # % Mean anomaly of a WR for the contour lines + values.to.plot <- my.brks #c(-17,-15,-13,-11,-9,-7,-5,-3,-1,0,1,3,5,7,9,11,13,15,17) + } + + my.cols <- colorRampPalette(rev(brewer.pal(11,"RdBu")))(length(my.brks)-1) # blue--white--red colors + my.cols[floor(length(my.cols)/2)] <- my.cols[floor(length(my.cols)/2)+1] <- "white" + + # breaks and colors of the impact maps (valid both for Wind speed anomalies and Temperature anomalies): + #my.brks.var <- c(-20,seq(-10,-3,1),seq(-2.9,2.9,0.1),seq(3,10,1),20) # % Mean anomaly of a WR + #my.brks.var <- c(-20,-2,-1.5,-1,-0.5,-0.2,0.2,0.5,1,1.5,2,20) # % Mean anomaly of a WR + #my.brks.var <- c(-20,seq(-3,3,0.5),20) # % Mean anomaly of a WR + if(var.name[var.num] == "sfcWind") { + my.brks.var <- seq(-3,3,0.5) + values.to.plot2 <- c(-3,-2,-1,0,1,2,3) + } + if(var.name[var.num] == "tas") { + my.brks.var <- seq(-5,5,1) + values.to.plot2 <- my.brks.var #c(-5,-3,-1,0,1,3,5) + } + + #my.cols <- colorRampPalette(as.character(read.csv("/home/Earth/vtorralb/rgbhex.csv",header=F)[,1]))(length(my.brks)-1) # blue--yellow-red colors + my.cols.var <- colorRampPalette(rev(brewer.pal(11,"RdBu")))(length(my.brks.var)-1) # blue--white--red colors + #my.cols.var[floor(length(my.cols.var)/2)] <- my.cols.var[floor(length(my.cols.var)/2)+1] <- "white" + + freq.max=100 + if(fields.name == forecast.name){ + pos.years.present <- match(year.start:year.end, years) + years.missing <- which(is.na(pos.years.present)) + fre1.NA <- fre2.NA <- fre3.NA <- fre4.NA <- freMax1.NA <- freMax2.NA <- freMax3.NA <- freMax4.NA <- freMin1.NA <- freMin2.NA <- freMin3.NA <- freMin4.NA <- c() + k <- 0 + for(y in year.start:year.end) { + if(y %in% (years.missing + year.start - 1)) { + fre1.NA <- c(fre1.NA,NA); fre2.NA <- c(fre2.NA,NA); fre3.NA <- c(fre3.NA,NA); fre4.NA <- c(fre4.NA,NA) + freMax1.NA <- c(freMax1.NA,NA); freMax2.NA <- c(freMax2.NA,NA); freMax3.NA <- c(freMax3.NA,NA); freMax4.NA <- c(freMax4.NA,NA) + freMin1.NA <- c(freMin1.NA,NA); freMin2.NA <- c(freMin2.NA,NA); freMin3.NA <- c(freMin3.NA,NA); freMin4.NA <- c(freMin4.NA,NA) + k=k+1 + } else { + fre1.NA <- c(fre1.NA,fre1[y - year.start + 1 - k]); fre2.NA <- c(fre2.NA,fre2[y - year.start + 1 - k]) + fre3.NA <- c(fre3.NA,fre3[y - year.start + 1 - k]); fre4.NA <- c(fre4.NA,fre4[y - year.start + 1 - k]) + freMax1.NA <- c(freMax1.NA,freMax1[y - year.start + 1 - k]); freMax2.NA <- c(freMax2.NA,freMax2[y - year.start + 1 - k]) + freMax3.NA <- c(freMax3.NA,freMax3[y - year.start + 1 - k]); freMax4.NA <- c(freMax4.NA,freMax4[y - year.start + 1 - k]) + freMin1.NA <- c(freMin1.NA,freMin1[y - year.start + 1 - k]); freMin2.NA <- c(freMin2.NA,freMin2[y - year.start + 1 - k]) + freMin3.NA <- c(freMin3.NA,freMin3[y - year.start + 1 - k]); freMin4.NA <- c(freMin4.NA,freMin4[y - year.start + 1 - k]) + } + } + + sp.cor1 <- round(cor(fre1.NA,freObs1, use="complete.obs"),2) + sp.cor2 <- round(cor(fre2.NA,freObs2, use="complete.obs"),2) + sp.cor3 <- round(cor(fre3.NA,freObs3, use="complete.obs"),2) + sp.cor4 <- round(cor(fre4.NA,freObs4, use="complete.obs"),2) + + } else { + fre1.NA <- fre1; fre2.NA <- fre2; fre3.NA <- fre3; fre4.NA <- fre4 + } + + + # Visualize the composition with the 3 graphs for all the 4 regimes: its pressure fields, its impact on var and its frequency series: + if(composition == "simple"){ + if(!as.pdf && fields.name == rean.name) png(filename=paste0(rean.dir,"/",fields.name,"_",my.period[p],"_",var.name[var.num],".png"),width=3000,height=3700) + if(!as.pdf && fields.name == forecast.name) png(filename=paste0(work.dir,"/",fields.name,"_",my.period[p],"_leadmonth_",lead.month,"_",var.name[var.num],".png"),width=3000,height=3700) + + plot.new() + + # Sheet title: + par(fig=c(0, 1, 0.94, 0.98), new=TRUE) + if(fields.name == forecast.name) {forecast.title <- paste0(" startdate and ",lead.month," forecast month ")} else {forecast.title <- ""} + mtext(paste0(fields.name, " Weather Regimes for ", my.period[p], forecast.title," (",year.start,"-",year.end,")"), cex=5.5, font=2) + + # Centroid maps: + map.xpos <- 0 + map.width <- 0.46 + par(fig=c(map.xpos + 0.003, map.xpos + map.width - 0.003, 0.745, 0.925), new=TRUE) + PlotEquiMap2(rescale(map1,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map1), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, xlabel.dist=1.5, contours.lty="F1FF1F") + par(fig=c(map.xpos, map.xpos + map.width, 0.51, 0.69), new=TRUE) + PlotEquiMap2(rescale(map2,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map2), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F") + par(fig=c(map.xpos, map.xpos + map.width, 0.275, 0.455), new=TRUE) + PlotEquiMap2(rescale(map3,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map3), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F") + par(fig=c(map.xpos, map.xpos + map.width, 0.04, 0.22), new=TRUE) + PlotEquiMap2(rescale(map4,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map4), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F") + + ## # for the debug: + ## obs <- read.table("/scratch/Earth/ncortesi/RESILIENCE/Regimes/NAO_series.txt") + ## mes <- p + ## fre.obs <- obs$V3[seq(mes,12*35,12)] # get the ovserved freuency of the NAO + ## #plot(1981:2015,fre.obs,type="l") + ## #lines(1981:2015,fre1,type="l",col="red") + ## #lines(1981:2015,fre2,type="l",col="blue") + ## #lines(1981:2015,fre4,type="l",col="green") + ## cor1 <- cor(fre.obs, fre1) + ## cor2 <- cor(fre.obs, fre2) + ## cor3 <- cor(fre.obs, fre3) + ## cor4 <- cor(fre.obs, fre4) + ## round(c(cor1,cor2,cor3,cor4),2) + + # Legend centroid maps: + legend1.xpos <- 0.10 + legend1.width <- 0.30 + legend1.cex <- 2 + my.subset <- match(values.to.plot, my.brks) + par(fig=c(legend1.xpos, legend1.xpos + legend1.width, 0.719, 0.744), new=TRUE) # syntax fig=c(xmin, xmax, ymin, ymax) + ColorBar3(my.brks, cols=my.cols, vert=FALSE, cex=legend1.cex, subset=my.subset) + if(psl=="g500") {mtext(side=4," m", cex=2.5)} else {mtext(side=4," hPa", cex=legend1.cex)} + par(fig=c(legend1.xpos, legend1.xpos + legend1.width, 0.485, 0.508), new=TRUE) + ColorBar3(my.brks, cols=my.cols, vert=FALSE, cex=legend1.cex, subset=my.subset) + if(psl=="g500") {mtext(side=4," m", cex=2.5)} else {mtext(side=4," hPa", cex=legend1.cex)} + par(fig=c(legend1.xpos, legend1.xpos + legend1.width, 0.250, 0.273), new=TRUE) + ColorBar3(my.brks, cols=my.cols, vert=FALSE, cex=legend1.cex, subset=my.subset) + if(psl=="g500") {mtext(side=4," m", cex=2.5)} else {mtext(side=4," hPa", cex=legend1.cex)} + par(fig=c(legend1.xpos, legend1.xpos + legend1.width, 0.015, 0.038), new=TRUE) + ColorBar3(my.brks, cols=my.cols, vert=FALSE, cex=legend1.cex, subset=my.subset) + if(psl=="g500") {mtext(side=4," m", cex=2.5)} else {mtext(side=4," hPa", cex=legend1.cex)} + + # Subtitle centroid maps: + map.title.xpos <- 0.76 + map.title.width <- 0.2 + par(fig=c(map.title.xpos, map.title.xpos + map.title.width, 0.728, 0.729), new=TRUE) + mtext("year", cex=3) + par(fig=c(map.title.xpos, map.title.xpos + map.title.width, 0.494, 0.495), new=TRUE) + mtext("year", cex=3) + par(fig=c(map.title.xpos, map.title.xpos + map.title.width, 0.260, 0.261), new=TRUE) + mtext("year", cex=3) + par(fig=c(map.title.xpos, map.title.xpos + map.title.width, 0.026, 0.027), new=TRUE) + mtext("year", cex=3) + + # Title Centroid Maps: + title1.xpos <- 0.02 + title1.width <- 0.4 + par(fig=c(title1.xpos, title1.xpos + title1.width, 0.9305, 0.9355), new=TRUE) + mtext(paste0(regime1.name,": ", psl.name, " Anomaly"), font=2, cex=4) + par(fig=c(title1.xpos, title1.xpos + title1.width, 0.6955, 0.7005), new=TRUE) + mtext(paste0(regime2.name,": ", psl.name, " Anomaly"), font=2, cex=4) + par(fig=c(title1.xpos, title1.xpos + title1.width, 0.4605, 0.4655), new=TRUE) + mtext(paste0(regime3.name,": ", psl.name, " Anomaly"), font=2, cex=4) + par(fig=c(title1.xpos, title1.xpos + title1.width, 0.2255, 0.2305), new=TRUE) + mtext(paste0(regime4.name,": ", psl.name, " Anomaly"), font=2, cex=4) + + # Impact maps: + if(impact.data == TRUE && (composition == "simple" || composition == "impact")){ # it won't enter here never because we are already inside an if con composition="simple" + impact.xpos <- 0.47 + impact.width <- 0.26 + par(fig=c(impact.xpos, impact.xpos + impact.width, 0.745, 0.925), new=TRUE) + PlotEquiMap2(rescale(imp1[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=t(sig1[,EU] < 0.05), cex.lab=1.5) + par(fig=c(impact.xpos, impact.xpos + impact.width, 0.51, 0.69), new=TRUE) + PlotEquiMap2(rescale(imp2[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=t(sig2[,EU] < 0.05), cex.lab=1.5) + par(fig=c(impact.xpos, impact.xpos + impact.width, 0.275, 0.455), new=TRUE) + PlotEquiMap2(rescale(imp3[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=t(sig3[,EU] < 0.05), cex.lab=1.5) + par(fig=c(impact.xpos, impact.xpos + impact.width, 0.04, 0.22), new=TRUE) + PlotEquiMap2(rescale(imp4[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=t(sig4[,EU] < 0.05), cex.lab=1.5) + + # Title impact maps: + title2.xpos <- 0.42 + title2.width <- 0.35 + par(fig=c(title2.xpos, title2.xpos + title2.width, 0.935, 0.940), new=TRUE) + mtext(paste0(regime1.name, ": Impact on ", var.name.full[var.num]), font=2, cex=4) + par(fig=c(title2.xpos, title2.xpos + title2.width, 0.700, 0.705), new=TRUE) + mtext(paste0(regime2.name, ": Impact on ", var.name.full[var.num]), font=2, cex=4) + par(fig=c(title2.xpos, title2.xpos + title2.width, 0.465, 0.470), new=TRUE) + mtext(paste0(regime3.name, ": Impact on ", var.name.full[var.num]), font=2, cex=4) + par(fig=c(title2.xpos, title2.xpos + title2.width, 0.230, 0.235), new=TRUE) + mtext(paste0(regime4.name, ": Impact on ", var.name.full[var.num]), font=2, cex=4) + } + + # Frequency plots: + barplot.xpos <- 0.75 + barplot.width <- 0.25 + + par(fig=c(barplot.xpos, barplot.xpos + barplot.width, 0.745, 0.915), new=TRUE) + if(fields.name == rean.name) barplot.freq(100*fre1.NA, year.start, year.end, cex.y=2, cex.x=2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0)) + if(fields.name == forecast.name) barplot.freq.sim2(100*fre1.NA, 100*freMax1.NA, 100*freMin1.NA, 100*freObs1, year.start, year.end, cex.y=2, cex.x=2, freq.min=-2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0), col.line="gray20", cex.r=3, cex.mean=2, cex.obs=2) + + par(fig=c(barplot.xpos, barplot.xpos + barplot.width,0.510,0.680), new=TRUE) + if(fields.name == rean.name) barplot.freq(100*fre2.NA, year.start, year.end, cex.y=2, cex.x=2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0)) + if(fields.name == forecast.name) barplot.freq.sim2(100*fre2.NA, 100*freMax2.NA, 100*freMin2.NA, 100*freObs2, year.start, year.end, cex.y=2, cex.x=2, freq.min=-2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0), col.line="gray20", cex.r=3, cex.mean=2, cex.obs=2) + + par(fig=c(barplot.xpos, barplot.xpos + barplot.width,0.275,0.445), new=TRUE) + if(fields.name == rean.name) barplot.freq(100*fre3.NA, year.start, year.end, cex.y=2, cex.x=2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0)) + if(fields.name == forecast.name) barplot.freq.sim2(100*fre3.NA, 100*freMax3.NA, 100*freMin3.NA, 100*freObs3, year.start, year.end, cex.y=2, cex.x=2, freq.min=-2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0), col.line="gray20", cex.r=3, cex.mean=2, cex.obs=2) + + par(fig=c(barplot.xpos, barplot.xpos + barplot.width,0.040,0.210), new=TRUE) + if(fields.name == rean.name) barplot.freq(100*fre4.NA, year.start, year.end, cex.y=2, cex.x=2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0)) + if(fields.name == forecast.name) barplot.freq.sim2(100*fre4.NA, 100*freMax4.NA, 100*freMin4.NA, 100*freObs4, year.start, year.end, cex.y=2, cex.x=2, freq.min=-2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0), col.line="gray20", cex.r=3, cex.mean=2, cex.obs=2) + + # Title Frequency maps: + title3.xpos <- 0.75 + title3.width <-0.25 + par(fig=c(title3.xpos, title3.xpos + title3.width, 0.935, 0.940), new=TRUE) + mtext(paste0(regime1.name, " Frequency"), font=2, cex=4) # paste0 doesn't work inside an expression! + par(fig=c(title3.xpos, title3.xpos + title3.width, 0.700, 0.705), new=TRUE) + mtext(paste0(regime2.name, " Frequency"), font=2, cex=4) + par(fig=c(title3.xpos, title3.xpos + title3.width, 0.465, 0.470), new=TRUE) + mtext(paste0(regime3.name, " Frequency"), font=2, cex=4) + par(fig=c(title3.xpos, title3.xpos + title3.width, 0.230, 0.235), new=TRUE) + mtext(paste0(regime4.name, " Frequency"), font=2, cex=4) + + # % on Frequency plots: + symbol.xpos <- 0.735 + symbol.width <- 0.01 + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.82, 0.83), new=TRUE) + mtext("%", cex=3.3) + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.59, 0.60), new=TRUE) + mtext("%", cex=3.3) + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.35, 0.36), new=TRUE) + mtext("%", cex=3.3) + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.12, 0.13), new=TRUE) + mtext("%", cex=3.3) + + # mean frequency on Frequency plots: + if(mean.freq == TRUE){ + symbol.xpos <- 0.9 + symbol.width <- 0.1 + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.908, 0.918), new=TRUE) + mtext(bquote(bar(nu) == .(paste0(round(mean(100*fre1.NA),1),"%"))),cex=4.5) + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.673, 0.683), new=TRUE) + mtext(bquote(bar(nu) == .(paste0(round(mean(100*fre2.NA),1),"%"))),cex=4.5) + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.44, 0.45), new=TRUE) + mtext(bquote(bar(nu) == .(paste0(round(mean(100*fre3.NA),1),"%"))),cex=4.5) + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.203, 0.213), new=TRUE) + mtext(bquote(bar(nu) == .(paste0(round(mean(100*fre4.NA),1),"%"))),cex=4.5) + } + + # add corr between obs.time series and ensemble mean time series on Frequency plots: + if(fields.name == forecast.name){ + symbol.xpos <- 0.76 + symbol.width <- 0.1 + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.908, 0.918), new=TRUE) + sp.cor1 <- round(cor(fre1.NA,freObs1, use="complete.obs"),2) + mtext(paste0("r= ",sp.cor1), cex=4.5) + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.673, 0.683), new=TRUE) + sp.cor2 <- round(cor(fre2.NA,freObs2, use="complete.obs"),2) + mtext(paste0("r= ",sp.cor2), cex=4.5) + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.44, 0.45), new=TRUE) + sp.cor3 <- round(cor(fre3.NA,freObs3, use="complete.obs"),2) + mtext(paste0("r= ",sp.cor3), cex=4.5) + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.203, 0.213), new=TRUE) + sp.cor4 <- round(cor(fre4.NA,freObs4, use="complete.obs"),2) + mtext(paste0("r= ",sp.cor4), cex=4.5) + } + + # Legend 2: + if(fields.name == rean.name){ + legend2.xpos <- 0.48 + legend2.width <- 0.25 + legend2.cex <- 1.8 + my.subset2 <- match(values.to.plot2, my.brks.var) + par(fig=c(legend2.xpos, legend2.xpos + legend2.width, 0.719 + 0.001, 0.744), new=TRUE) + ColorBar3(my.brks.var, cols=my.cols.var, vert=FALSE, cex=legend2.cex, subset=my.subset2) + mtext(side=4,paste0(" ",var.unit[var.num]), cex=legend2.cex) + par(fig=c(legend2.xpos, legend2.xpos + legend2.width, 0.485, 0.508), new=TRUE) + ColorBar3(my.brks.var, cols=my.cols.var, vert=FALSE, cex=legend2.cex, subset=my.subset2) + mtext(side=4,paste0(" ",var.unit[var.num]), cex=legend2.cex) + par(fig=c(legend2.xpos, legend2.xpos + legend2.width, 0.250, 0.273), new=TRUE) + ColorBar3(my.brks.var, cols=my.cols.var, vert=FALSE, cex=legend2.cex, subset=my.subset2) + mtext(side=4,paste0(" ",var.unit[var.num]), cex=legend2.cex) + par(fig=c(legend2.xpos, legend2.xpos + legend2.width, 0.015, 0.038), new=TRUE) + ColorBar3(my.brks.var, cols=my.cols.var, vert=FALSE, cex=legend2.cex, subset=my.subset2) + mtext(side=4,paste0(" ",var.unit[var.num]), cex=legend2.cex) + } + + if(!as.pdf) dev.off() # for saving 4 png + + } # close if on: composition == 'simple' + + + # Visualize the composition with the regime anomalies for a selected forecasted month: + if(composition == "psl"){ + # Centroid maps: + n.map <- n.map + 1 # n.maps starts from 0 + map.width <- 0.115 + map.xpos <- 0.06 + map.width * (n.map - 1) + map.ypos <- 0.08 + map.height <- 0.19 + map.ysep <- 0.015 + + par(fig=c(map.xpos, map.xpos + map.width, map.ypos + 3*map.height + 3*map.ysep, map.ypos + 4*map.height + 3*map.ysep), new=TRUE) + PlotEquiMap2(rescale(map1,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map1), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, xlabel.dist=1.5, contours.lty="F1FF1F") + par(fig=c(map.xpos, map.xpos + map.width, map.ypos + 2*map.height + 2*map.ysep, map.ypos + 3*map.height + 2*map.ysep), new=TRUE) + PlotEquiMap2(rescale(map2,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map2), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F") + par(fig=c(map.xpos, map.xpos + map.width, map.ypos + map.height + map.ysep, map.ypos + 2*map.height + map.ysep), new=TRUE) + PlotEquiMap2(rescale(map3,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map3), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F") + par(fig=c(map.xpos, map.xpos + map.width, map.ypos, map.ypos + map.height), new=TRUE) + PlotEquiMap2(rescale(map4,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map4), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F") + + # Sheet subtitles: + par(fig=c(map.xpos, map.xpos + map.width, 0.86, 0.90), new=TRUE) + if(n.map < 8) { + mtext(paste0(my.month.short[p], " --> ", my.month.short[forecast.month]), cex=5.5, font=2) + } else{ + mtext(paste0(rean.name," ", my.month.short[forecast.month]), cex=5.5, font=2) + } + + } # close if on composition == 'psl' + + # Visualize the composition if the impact maps for a selected forecasted month: + if(composition == "impact"){ + # Centroid maps: + n.map <- n.map + 1 # n.maps starts from 0 + map.width <- 0.115 + map.xpos <- 0.06 + map.width * (n.map - 1) + map.ypos <- 0.08 + map.height <- 0.19 + map.ysep <- 0.015 + + par(fig=c(map.xpos, map.xpos + map.width, map.ypos + 3*map.height + 3*map.ysep, map.ypos + 4*map.height + 3*map.ysep), new=TRUE) + PlotEquiMap2(rescale(imp1[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=t(sig1[,EU] < 0.05), cex.lab=1.5) + + par(fig=c(map.xpos, map.xpos + map.width, map.ypos + 2*map.height + 2*map.ysep, map.ypos + 3*map.height + 2*map.ysep), new=TRUE) + PlotEquiMap2(rescale(imp2[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=t(sig2[,EU] < 0.05), cex.lab=1.5) + + par(fig=c(map.xpos, map.xpos + map.width, map.ypos + map.height + map.ysep, map.ypos + 2*map.height + map.ysep), new=TRUE) + PlotEquiMap2(rescale(imp3[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=t(sig3[,EU] < 0.05), cex.lab=1.5) + + par(fig=c(map.xpos, map.xpos + map.width, map.ypos, map.ypos + map.height), new=TRUE) + PlotEquiMap2(rescale(imp4[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=t(sig4[,EU] < 0.05), cex.lab=1.5) + + + # Sheet subtitles: + par(fig=c(map.xpos, map.xpos + map.width, 0.86, 0.90), new=TRUE) + if(n.map < 8) { + mtext(paste0(my.month.short[p], " --> ", my.month.short[forecast.month]), cex=5.5, font=2) + } else{ + mtext(paste0(rean.name," ", my.month.short[forecast.month]), cex=5.5, font=2) + } + + } # close if on composition == 'psl' + + # Visualize the composition if the impact maps for a selected forecasted month: + if(composition == "single.impact"){ + png(filename=paste0(rean.dir,"/",fields.name,"_",my.period[p],"_",var.name[var.num],"_impact_NAO+.png"),width=3000,height=3700) + PlotEquiMap2(rescale(imp1[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=t(sig1[,EU] < 0.05), cex.lab=1.5) + dev.off() + + png(filename=paste0(rean.dir,"/",fields.name,"_",my.period[p],"_",var.name[var.num],"_impact_NAO-.png"),width=3000,height=3700) + PlotEquiMap2(rescale(imp2[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=t(sig2[,EU] < 0.05), cex.lab=1.5) + dev.off() + + png(filename=paste0(rean.dir,"/",fields.name,"_",my.period[p],"_",var.name[var.num],"_impact_Blocking.png"),width=3000,height=3700) + PlotEquiMap2(rescale(imp3[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=t(sig3[,EU] < 0.05), cex.lab=1.5) + dev.off() + + png(filename=paste0(rean.dir,"/",fields.name,"_",my.period[p],"_",var.name[var.num],"_impact_Atl_Ridge.png"),width=3000,height=3700) + PlotEquiMap2(rescale(imp4[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=t(sig4[,EU] < 0.05), cex.lab=1.5) + dev.off() + } + + # Visualize the composition if the impact maps for a selected forecasted month: + if(composition == "single.psl"){ + png(filename=paste0(rean.dir,"/",fields.name,"_",my.period[p],"_",var.name[var.num],"_pattern_cluster1.png"),width=2000,height=1400, res=300) + PlotEquiMap2(rescale(map1,my.brks[1],tail(my.brks,1)), lon, lat,filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1, contours=t(map1), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=0.5, xlabel.dist=0, contours.lty="F1FF1F", toptitle=paste0(my.period[p]," WithinSS=", round(withinss1/1000000,2))) + dev.off() + + png(filename=paste0(rean.dir,"/",fields.name,"_",my.period[p],"_",var.name[var.num],"_pattern_cluster2.png"),width=2000,height=1400, res=300) + PlotEquiMap2(rescale(map2,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1, contours=t(map2), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=0.5, xlabel.dist=0, contours.lty="F1FF1F", toptitle=paste0(my.period[p]," WithinSS=", round(withinss2/1000000,2))) + dev.off() + + png(filename=paste0(rean.dir,"/",fields.name,"_",my.period[p],"_",var.name[var.num],"_pattern_cluster3.png"),width=2000,height=1400, res=300) + PlotEquiMap2(rescale(map3,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1, contours=t(map3), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=0.5, xlabel.dist=0, contours.lty="F1FF1F", toptitle=paste0(my.period[p]," WithinSS=", round(withinss3/1000000,2))) + dev.off() + + png(filename=paste0(rean.dir,"/",fields.name,"_",my.period[p],"_",var.name[var.num],"_pattern_cluster4.png"),width=2000,height=1400, res=300) + PlotEquiMap2(rescale(map4,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1, contours=t(map4), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=0.5, xlabel.dist=0, contours.lty="F1FF1F", toptitle=paste0(my.period[p]," WithinSS=", round(withinss4/1000000,2))) + dev.off() + + # Legend centroid maps: + #my.subset <- match(values.to.plot, my.brks) + #ColorBar3(my.brks, cols=my.cols, vert=FALSE, cex=legend1.cex, subset=my.subset) + #mtext(side=4," hPa", cex=legend1.cex) + + } + + # Visualize the composition with the interannual frequencies for a selected forecasted month: + if(composition == "fre"){ + # Centroid maps: + n.map <- n.map + 1 # n.maps starts from 0 + map.width <- 0.115 + map.xpos <- 0.06 + map.width * (n.map - 1) + map.ypos <- 0.08 + map.height <- 0.19 + map.ysep <- 0.015 + + par(fig=c(map.xpos, map.xpos + map.width, map.ypos + 3*map.height + 3*map.ysep, map.ypos + 4*map.height + 3*map.ysep), new=TRUE) + if(n.map < 8) barplot.freq.sim2(100*fre1.NA, 100*freMax1.NA, 100*freMin1.NA, 100*freObs1, year.start, year.end, cex.y=2, cex.x=2, freq.min=-2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0), col.line="gray20", cex.r=3, cex.mean=2, cex.obs=2) + if(n.map == 8) barplot.freq(100*fre1.NA, year.start, year.end, cex.y=2, cex.x=2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0)) + + par(fig=c(map.xpos, map.xpos + map.width, map.ypos + 2*map.height + 2*map.ysep, map.ypos + 3*map.height + 2*map.ysep), new=TRUE) + if(n.map < 8) if(fields.name == forecast.name) barplot.freq.sim2(100*fre2.NA, 100*freMax2.NA, 100*freMin2.NA, 100*freObs2, year.start, year.end, cex.y=2, cex.x=2, freq.min=-2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0), col.line="gray20", cex.r=3, cex.mean=2, cex.obs=2) + if(n.map == 8) barplot.freq(100*fre2.NA, year.start, year.end, cex.y=2, cex.x=2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0)) + + par(fig=c(map.xpos, map.xpos + map.width, map.ypos + map.height + map.ysep, map.ypos + 2*map.height + map.ysep), new=TRUE) + if(n.map < 8) if(fields.name == forecast.name) barplot.freq.sim2(100*fre3.NA, 100*freMax3.NA, 100*freMin3.NA, 100*freObs3, year.start, year.end, cex.y=2, cex.x=2, freq.min=-2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0), col.line="gray20", cex.r=3, cex.mean=2, cex.obs=2) + if(n.map == 8) barplot.freq(100*fre3.NA, year.start, year.end, cex.y=2, cex.x=2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0)) + + par(fig=c(map.xpos, map.xpos + map.width, map.ypos, map.ypos + map.height), new=TRUE) + if(n.map < 8) if(fields.name == forecast.name) barplot.freq.sim2(100*fre4.NA, 100*freMax4.NA, 100*freMin4.NA, 100*freObs4, year.start, year.end, cex.y=2, cex.x=2, freq.min=-2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0), col.line="gray20", cex.r=3, cex.mean=2, cex.obs=2) + if(n.map == 8) barplot.freq(100*fre4.NA, year.start, year.end, cex.y=2, cex.x=2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0)) + + # Sheet subtitles: + par(fig=c(map.xpos, map.xpos + map.width, 0.86, 0.90), new=TRUE) + if(n.map < 8) { + mtext(paste0(my.month.short[p], " --> ", my.month.short[forecast.month]), cex=5.5, font=2) + } else{ + mtext(paste0(rean.name," ", my.month.short[forecast.month]), cex=5.5, font=2) + } + + # % on Frequency plots: + par(fig=c(map.xpos - 0.006, map.xpos, map.ypos + 3.9*map.height + 3*map.ysep, map.ypos + 4*map.height + 3*map.ysep), new=TRUE) + mtext("%", cex=3.3) + par(fig=c(map.xpos - 0.006, map.xpos, map.ypos + 2.9*map.height + 2*map.ysep, map.ypos + 3*map.height + 2*map.ysep), new=TRUE) + mtext("%", cex=3.3) + par(fig=c(map.xpos - 0.006, map.xpos, map.ypos + 1.9*map.height + 1*map.ysep, map.ypos + 2*map.height + 1*map.ysep), new=TRUE) + mtext("%", cex=3.3) + par(fig=c(map.xpos - 0.006, map.xpos, map.ypos + 0.9*map.height + 0*map.ysep, map.ypos + 1*map.height + 0*map.ysep), new=TRUE) + mtext("%", cex=3.3) + + # mean frequency on Frequency plots: + par(fig=c(map.xpos + 0.02, map.xpos + 0.04, map.ypos + 3*map.height + 3*map.ysep, map.ypos + 3.1*map.height + 3*map.ysep), new=TRUE) + mtext(bquote(bar(nu) == .(paste0(round(mean(100*fre1.NA),1),"%"))),cex=3.5) + par(fig=c(map.xpos + 0.02, map.xpos + 0.04, map.ypos + 2*map.height + 2*map.ysep, map.ypos + 2.1*map.height + 2*map.ysep), new=TRUE) + mtext(bquote(bar(nu) == .(paste0(round(mean(100*fre2.NA),1),"%"))),cex=3.5) + par(fig=c(map.xpos + 0.02, map.xpos + 0.04, map.ypos + 1*map.height + 1*map.ysep, map.ypos + 1.1*map.height + 1*map.ysep), new=TRUE) + mtext(bquote(bar(nu) == .(paste0(round(mean(100*fre3.NA),1),"%"))),cex=3.5) + par(fig=c(map.xpos + 0.02, map.xpos + 0.04, map.ypos + 0*map.height + 0*map.ysep, map.ypos + 0.1*map.height + 0*map.ysep), new=TRUE) + mtext(bquote(bar(nu) == .(paste0(round(mean(100*fre4.NA),1),"%"))),cex=3.5) + + # add corr between obs.time series and ensemble mean time series on Frequency plots: + if(n.map < 8){ + par(fig=c(map.xpos + map.width - 0.04, map.xpos + map.width - 0.02, map.ypos + 3*map.height + 3*map.ysep, map.ypos + 3.1*map.height + 3*map.ysep), new=TRUE) + mtext(paste0("r= ",sp.cor1), cex=3.5) + par(fig=c(map.xpos + map.width - 0.04, map.xpos + map.width - 0.02, map.ypos + 2*map.height + 2*map.ysep, map.ypos + 2.1*map.height + 2*map.ysep), new=TRUE) + mtext(paste0("r= ",sp.cor2), cex=3.5) + par(fig=c(map.xpos + map.width - 0.04, map.xpos + map.width - 0.02, map.ypos + 1*map.height + 1*map.ysep, map.ypos + 1.1*map.height + 1*map.ysep), new=TRUE) + mtext(paste0("r= ",sp.cor3), cex=3.5) + par(fig=c(map.xpos + map.width - 0.04, map.xpos + map.width - 0.02, map.ypos + 0*map.height + 0*map.ysep, map.ypos + 0.1*map.height + 0*map.ysep), new=TRUE) + mtext(paste0("r= ",sp.cor4), cex=3.5) + } + } # close if on composition == 'fre' + + # save indicators for each startdate and forecast time: + # (map1, map2, map3, map4, fre1, fre2, etc. already refer to the regimes in the same order as listed in the 'orden' vector) + if(fields.name == forecast.name) { + if (composition != "impact") { + save(orden, map1, map2, map3, map4, fre1.NA, fre2.NA, fre3.NA, fre4.NA, freObs1, freObs2, freObs3, freObs4, sp.cor1, sp.cor2, sp.cor3, sp.cor4, persist1, persist2, persist3, persist4, persistObs1, persistObs2, persistObs3, persistObs4, spat.cor1, spat.cor2, spat.cor3, spat.cor4, sd.freq.sim1, sd.freq.sim2, sd.freq.sim3, sd.freq.sim4, sd.freq.obs1, sd.freq.obs2, sd.freq.obs3, sd.freq.obs4, transSim1, transSim2, transSim3, transSim4, transObs1, transObs2, transObs3, transObs4, file=paste0(work.dir,"/",fields.name,"_", my.period[p],"_leadmonth_",lead.month,"_","ClusterNames",".RData")) + } else { + save(orden, map1, map2, map3, map4, fre1.NA, fre2.NA, fre3.NA, fre4.NA, freObs1, freObs2, freObs3, freObs4, sp.cor1, sp.cor2, sp.cor3, sp.cor4, persist1, persist2, persist3, persist4, persistObs1, persistObs2, persistObs3, persistObs4, spat.cor1, spat.cor2, spat.cor3, spat.cor4, sd.freq.sim1, sd.freq.sim2, sd.freq.sim3, sd.freq.sim4, sd.freq.obs1, sd.freq.obs2, sd.freq.obs3, sd.freq.obs4, transSim1, transSim2, transSim3, transSim4, transObs1, transObs2, transObs3, transObs4, imp1, imp2, imp3, imp4, file=paste0(work.dir,"/",fields.name,"_", my.period[p],"_leadmonth_",lead.month,"_","ClusterNames",".RData")) + } + } + + print(paste("p =",p,"lead.month =", lead.month)) # spat.cor1,spat.cor2,spat.cor3,spat.cor4)) + } # close 'p' on 'period' + + if(as.pdf) dev.off() # for saving a single pdf with all months/seasons + + } # close 'lead.month' on 'lead.months' + + if(composition == "psl" || composition == "fre"){ + # Regime's names: + title1.xpos <- 0.01 + title1.width <- 0.04 + par(fig=c(title1.xpos, title1.xpos + title1.width, 0.7905, 0.7955), new=TRUE) + mtext(paste0(regime1.name), font=2, cex=5) + par(fig=c(title1.xpos, title1.xpos + title1.width, 0.5855, 0.5905), new=TRUE) + mtext(paste0(regime2.name), font=2, cex=5) + par(fig=c(title1.xpos, title1.xpos + title1.width, 0.3805, 0.3955), new=TRUE) + mtext(paste0(regime3.name), font=2, cex=5) + par(fig=c(title1.xpos, title1.xpos + title1.width, 0.1755, 0.1805), new=TRUE) + mtext(paste0(regime4.name), font=2, cex=5) + + if(composition == "psl") { + # Color legend: + legend1.xpos <- 0.10 + legend1.width <- 0.80 + legend1.cex <- 4 + + par(fig=c(legend1.xpos, legend1.xpos + legend1.width, 0, 0.065), new=TRUE) # syntax fig=c(xmin, xmax, ymin, ymax) + ColorBar2(brks=my.brks, cols=my.cols, vert=FALSE, cex=legend1.cex, label.dist=2, my.ticks=-0.5 + 1:21, my.labels=my.brks) + par(fig=c(legend1.xpos + legend1.width - 0.02, legend1.xpos + legend1.width + 0.05, 0, 0.02), new=TRUE) # syntax fig=c(xmin, xmax, ymin, ymax) + mtext("hPa", cex=legend1.cex*1.3) + } + + dev.off() + } # close if on composition + + print("Finished!") +} # close if on composition != "summary" + +if(composition == "taylor"){ + library("plotrix") + + # choose a reference season from 13 (winter) to 16 (Autumn): + season.ref <- 13 + + # set a second WR classification (i.e: without running cluster) to contrast with the new one: + #rean2.dir <- "/scratch/Earth/ncortesi/RESILIENCE/Regimes/41_ERA-Interim_monthly_1981-2015_LOESS_filter" + rean2.dir <- "/scratch/Earth/ncortesi/RESILIENCE/Regimes/44_as_43_but_with_no_lat_correction" + + # load data of the reference season of the first classification (this line should be modified to load the chosen season): + load("/scratch/Earth/ncortesi/RESILIENCE/Regimes/13_with_Z500/ERA-Interim_Winter_psl.RData") # load pslwrXmean data + cluster1.ref <- 3 + cluster2.ref <- 4 + cluster3.ref <- 1 + cluster4.ref <- 2 + + assign(paste0("map.ref",cluster1.ref), pslwr1mean) # NAO+ + assign(paste0("map.ref",cluster2.ref), pslwr2mean) # NAO- + assign(paste0("map.ref",cluster3.ref), pslwr3mean) # Blocking + assign(paste0("map.ref",cluster4.ref), pslwr4mean) # Atl.ridge + + # load data of the reference season of the second classification(this line should be modified to load the chosen season): + load("/scratch/Earth/ncortesi/RESILIENCE/Regimes/18) as 12) but with no lat correction/ERA-Interim_Winter_psl.RData") # load pslwrXmean data + load("/scratch/Earth/ncortesi/RESILIENCE/Regimes/18) as 12) but with no lat correction/ERA-Interim_ClusterNames.RData") # load clusterX.name.period + + if(var.num != var.num.orig) var.num <- var.num.orig # ripristinate original values of this script (necessary after loading regime data) + if(fields.name != fields.name.orig) fields.name <- fields.name.orig # ripristinate original values of this script + + cluster1.name.ref <- cluster1.name.period[season.ref] + cluster2.name.ref <- cluster2.name.period[season.ref] + cluster3.name.ref <- cluster3.name.period[season.ref] + cluster4.name.ref <- cluster4.name.period[season.ref] + + cluster1.ref <- which(orden == cluster1.name.ref) + cluster2.ref <- which(orden == cluster2.name.ref) + cluster3.ref <- which(orden == cluster3.name.ref) + cluster4.ref <- which(orden == cluster4.name.ref) + + assign(paste0("map.old.ref",cluster1.ref), pslwr1mean) # NAO+ + assign(paste0("map.old.ref",cluster2.ref), pslwr2mean) # NAO- + assign(paste0("map.old.ref",cluster3.ref), pslwr3mean) # Blocking + assign(paste0("map.old.ref",cluster4.ref), pslwr4mean) # Atl.ridge + + + # breaks and colors of the geopotential fields: + if(psl == "g500"){ + my.brks <- c(-300,seq(-200,200,10),300) # % Mean anomaly of a WR + my.brks2 <- c(-300,seq(-200,200,10),300) # % Mean anomaly of a WR for the contour lines + values.to.plot <- c(-200, -100, 0, 100, 200) # values we want to show in the labels + } + + if(psl == "psl"){ + my.brks <- c(seq(-19,-1,2),0,seq(1,19,2)) # % Mean anomaly of a WR + my.brks2 <- my.brks #c(seq(-20,20,2)) # % Mean anomaly of a WR for the contour lines + values.to.plot <- my.brks #c(-17,-15,-13,-11,-9,-7,-5,-3,-1,0,1,3,5,7,9,11,13,15,17) + } + + my.cols <- colorRampPalette(rev(brewer.pal(11,"RdBu")))(length(my.brks)-1) # blue--white--red colors + my.cols[floor(length(my.cols)/2)] <- my.cols[floor(length(my.cols)/2)+1] <- "white" + + # plot the composition with the regime anomalies of each monthly regimes: + png(filename=paste0(rean.dir,"/",fields.name,"_taylor_source",".png"),width=6000,height=2000) + + plot.new() + + # Sheet title: + par(fig=c(0, 1, 0.94, 0.98), new=TRUE) + mtext(paste0(fields.name, " observed monthly regime anomalies"), cex=5.5, font=2) + + n.map <- 0 + for(p in c(9:12,1:8)){ + p.orig <- p + + load(file=paste0(rean.dir,"/",fields.name,"_",my.period[p],"_","psl",".RData")) # load cluster names and summarized data + load(file=paste0(rean.dir,"/",fields.name,"_",my.period[p],"_","ClusterNames",".RData")) # load cluster names and summarized data + + if(var.num != var.num.orig) var.num <- var.num.orig # ripristinate original values of this script (necessary after loading regime data) + if(fields.name != fields.name.orig) fields.name <- fields.name.orig # ripristinate original values of this script + if(p != p.orig) p <- p.orig + + cluster1 <- which(orden == cluster1.name) + cluster2 <- which(orden == cluster2.name) + cluster3 <- which(orden == cluster3.name) + cluster4 <- which(orden == cluster4.name) + + assign(paste0("map",cluster1), pslwr1mean) # NAO+ + assign(paste0("map",cluster2), pslwr2mean) # NAO- + assign(paste0("map",cluster3), pslwr3mean) # Blocking + assign(paste0("map",cluster4), pslwr4mean) # Atl.ridge + + n.map <- n.map + 1 # n.maps starts from 0 + map.width <- 0.07 + map.xpos <- 0.06 + map.width * (n.map - 1) + map.ypos <- 0.08 + map.height <- 0.19 + map.ysep <- 0.015 + + par(fig=c(map.xpos, map.xpos + map.width, map.ypos + 3*map.height + 3*map.ysep, map.ypos + 4*map.height + 3*map.ysep), new=TRUE) + PlotEquiMap2(rescale(map1,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map1), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, xlabel.dist=1.5, contours.lty="F1FF1F") + par(fig=c(map.xpos, map.xpos + map.width, map.ypos + 2*map.height + 2*map.ysep, map.ypos + 3*map.height + 2*map.ysep), new=TRUE) + PlotEquiMap2(rescale(map2,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map2), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F") + par(fig=c(map.xpos, map.xpos + map.width, map.ypos + map.height + map.ysep, map.ypos + 2*map.height + map.ysep), new=TRUE) + PlotEquiMap2(rescale(map3,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map3), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F") + par(fig=c(map.xpos, map.xpos + map.width, map.ypos, map.ypos + map.height), new=TRUE) + PlotEquiMap2(rescale(map4,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map4), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F") + + # Sheet subtitles: + par(fig=c(map.xpos, map.xpos + map.width, 0.86, 0.90), new=TRUE) + mtext(my.month.short[p], cex=5.5, font=2) + + } + + # draw the last map to the right of the composition, that with the seasonal anomalies: + n.map <- n.map + 1 # n.maps starts from 0 + map.width <- 0.07 + map.xpos <- 0.06 + map.width * (n.map - 1) + map.ypos <- 0.08 + map.height <- 0.19 + map.ysep <- 0.015 + + par(fig=c(map.xpos, map.xpos + map.width, map.ypos + 3*map.height + 3*map.ysep, map.ypos + 4*map.height + 3*map.ysep), new=TRUE) + PlotEquiMap2(rescale(map.ref1,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map.ref1), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, xlabel.dist=1.5, contours.lty="F1FF1F") + par(fig=c(map.xpos, map.xpos + map.width, map.ypos + 2*map.height + 2*map.ysep, map.ypos + 3*map.height + 2*map.ysep), new=TRUE) + PlotEquiMap2(rescale(map.ref2,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map.ref2), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F") + par(fig=c(map.xpos, map.xpos + map.width, map.ypos + map.height + map.ysep, map.ypos + 2*map.height + map.ysep), new=TRUE) + PlotEquiMap2(rescale(map.ref3,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map.ref3), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F") + par(fig=c(map.xpos, map.xpos + map.width, map.ypos, map.ypos + map.height), new=TRUE) + PlotEquiMap2(rescale(map.ref4,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map.ref4), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F") + + # subtitle: + par(fig=c(map.xpos, map.xpos + map.width, 0.86, 0.90), new=TRUE) + mtext(my.period[season.ref], cex=5.5, font=2) + + dev.off() + + # Plot the Taylor diagrams: + for(regime in 1:4){ + + png(filename=paste0(rean.dir,"/",fields.name,"_taylor_",orden[regime],".png"),width=1200,height=800) + + for(p in 1:12){ + p.orig <- p + + load(file=paste0(rean.dir,"/",fields.name,"_",my.period[p],"_","psl",".RData")) # load cluster names and summarized data + load(file=paste0(rean.dir,"/",fields.name,"_",my.period[p],"_","ClusterNames",".RData")) # load cluster names and summarized data + + if(var.num != var.num.orig) var.num <- var.num.orig # ripristinate original values of this script (necessary after loading regime data) + if(fields.name != fields.name.orig) fields.name <- fields.name.orig # ripristinate original values of this script + if(p != p.orig) p <- p.orig + + cluster1 <- which(orden == cluster1.name) + cluster2 <- which(orden == cluster2.name) + cluster3 <- which(orden == cluster3.name) + cluster4 <- which(orden == cluster4.name) + + assign(paste0("map",cluster1), pslwr1mean) # NAO+ + assign(paste0("map",cluster2), pslwr2mean) # NAO- + assign(paste0("map",cluster3), pslwr3mean) # Blocking + assign(paste0("map",cluster4), pslwr4mean) # Atl.ridge + + # load data from the second classification: + load(file=paste0(rean2.dir,"/",fields.name,"_",my.period[p],"_","psl",".RData")) # load cluster names and summarized data + load(file=paste0(rean2.dir,"/",fields.name,"_",my.period[p],"_","ClusterNames",".RData")) # load cluster names and summarized data + + if(var.num != var.num.orig) var.num <- var.num.orig # ripristinate original values of this script (necessary after loading regime data) + if(fields.name != fields.name.orig) fields.name <- fields.name.orig # ripristinate original values of this script + if(p != p.orig) p <- p.orig + + cluster1.old <- which(orden == cluster1.name) + cluster2.old <- which(orden == cluster2.name) + cluster3.old <- which(orden == cluster3.name) + cluster4.old <- which(orden == cluster4.name) + + assign(paste0("map.old",cluster1.old), pslwr1mean) # NAO+ + assign(paste0("map.old",cluster2.old), pslwr2mean) # NAO- + assign(paste0("map.old",cluster3.old), pslwr3mean) # Blocking + assign(paste0("map.old",cluster4.old), pslwr4mean) # Atl.ridge + + add.mod <- ifelse(p == 1, FALSE, TRUE) + main.mod <- ifelse(p == 1, orden[regime],"") + + sd.arcs.mod <- c(0,0.1,0.2,0.3,0.4,0.5,0.6,0.7,0.8,0.9,1,1.1,1.2,1.3) #c(0,0.5,1,1.5) # doesn't work! + if(regime == 1) my.taylor(as.vector(map.ref1),as.vector(map1), normalize=TRUE, add=add.mod, pos.cor=FALSE, sd.arcs=sd.arcs.mod, ref.sd=F, cex.axis=1.7, text.cex=1, my.text=my.month.short[p], col="blue", main=main.mod) + if(regime == 2) my.taylor(as.vector(map.ref2),as.vector(map2), normalize=TRUE, add=add.mod, pos.cor=FALSE, sd.arcs=sd.arcs.mod, ref.sd=F, cex.axis=1.7, text.cex=1, my.text=my.month.short[p], col="blue", main=main.mod) + if(regime == 3) my.taylor(as.vector(map.ref3),as.vector(map3), normalize=TRUE, add=add.mod, pos.cor=FALSE, sd.arcs=sd.arcs.mod, ref.sd=F, cex.axis=1.7, text.cex=1, my.text=my.month.short[p], col="blue", main=main.mod) + if(regime == 4) my.taylor(as.vector(map.ref4),as.vector(map4), normalize=TRUE, add=add.mod, pos.cor=FALSE, sd.arcs=sd.arcs.mod, ref.sd=F, cex.axis=1.7, text.cex=1, my.text=my.month.short[p], col="blue", main=main.mod) + + # add the points from the second classification: + add.mod=TRUE + if(regime == 1) my.taylor(as.vector(map.old.ref1),as.vector(map.old1), normalize=TRUE, add=add.mod, pos.cor=FALSE, sd.arcs=sd.arcs.mod, ref.sd=F, cex.axis=1.7, text.cex=1, my.text=my.month.short[p], col="red") + if(regime == 2) my.taylor(as.vector(map.old.ref2),as.vector(map.old2), normalize=TRUE, add=add.mod, pos.cor=FALSE, sd.arcs=sd.arcs.mod, ref.sd=F, cex.axis=1.7, text.cex=1, my.text=my.month.short[p], col="red") + if(regime == 3) my.taylor(as.vector(map.old.ref3),as.vector(map.old3), normalize=TRUE, add=add.mod, pos.cor=FALSE, sd.arcs=sd.arcs.mod, ref.sd=F, cex.axis=1.7, text.cex=1, my.text=my.month.short[p], col="red") + if(regime == 4) my.taylor(as.vector(map.old.ref4),as.vector(map.old4), normalize=TRUE, add=add.mod, pos.cor=FALSE, sd.arcs=sd.arcs.mod, ref.sd=F, cex.axis=1.7, text.cex=1, my.text=my.month.short[p], col="red") + } # close for on 'p' + + dev.off() + + } # close for on 'regime' + + +} # close if on composition == "taylor" + + + + +# Summary graphs: +if(composition == "summary" && fields.name == forecast.name){ + # array storing correlations in the format: [startdate, leadmonth, regime] + array.diff.sd.freq <- array.sd.freq <- array.cor <- array.sp.cor <- array.freq <- array.diff.freq <- array.rpss <- array.pers <- array.diff.pers <- array(NA,c(12,7,4)) + array.sp.cor <- array.trans.sim1 <- array.trans.sim2 <- array.trans.sim3 <- array.trans.sim4 <- array(NA,c(12,7,4)) + array.trans.diff1 <- array.trans.diff2 <- array.trans.diff3 <- array.trans.diff4 <- array(NA,c(12,7,4)) + + for(p in 1:12){ + p.orig <- p + + for(l in 0:6){ + + load(file=paste0(work.dir,"/",fields.name,"_",my.period[p],"_leadmonth_",l,"_","ClusterNames",".RData")) # load cluster names and summarized data + + if(var.num != var.num.orig) var.num <- var.num.orig # ripristinate original values of this script (necessary after loading regime data) + if(fields.name != fields.name.orig) fields.name <- fields.name.orig # ripristinate original values of this script + if(p != p.orig) p <- p.orig + + array.sp.cor[p,1+l, 1] <- spat.cor1 # associates NAO+ to the first triangle (at left) + array.sp.cor[p,1+l, 3] <- spat.cor2 # associates NAO- to the third triangle (at right) + array.sp.cor[p,1+l, 4] <- spat.cor3 # associates Blocking to the fourth triangle (top one) + array.sp.cor[p,1+l, 2] <- spat.cor4 # associates Atl.Ridge to the second triangle (bottom one) + + array.cor[p,1+l, 1] <- sp.cor1 # associates NAO+ to the first triangle (at left) + array.cor[p,1+l, 3] <- sp.cor2 # associates NAO- to the third triangle (at right) + array.cor[p,1+l, 4] <- sp.cor3 # associates Blocking to the fourth triangle (top one) + array.cor[p,1+l, 2] <- sp.cor4 # associates Atl.Ridge to the second triangle (bottom one) + + array.freq[p,1+l, 1] <- 100*(mean(fre1.NA)) # NAO+ + array.freq[p,1+l, 3] <- 100*(mean(fre2.NA)) # NAO- + array.freq[p,1+l, 4] <- 100*(mean(fre3.NA)) # Blocking + array.freq[p,1+l, 2] <- 100*(mean(fre4.NA)) # Atl.Ridge + + array.diff.freq[p,1+l, 1] <- 100*(mean(fre1.NA) - mean(freObs1)) # NAO+ + array.diff.freq[p,1+l, 3] <- 100*(mean(fre2.NA) - mean(freObs2)) # NAO- + array.diff.freq[p,1+l, 4] <- 100*(mean(fre3.NA) - mean(freObs3)) # Blocking + array.diff.freq[p,1+l, 2] <- 100*(mean(fre4.NA) - mean(freObs4)) # Atl.Ridge + + array.pers[p,1+l, 1] <- persist1 # NAO+ + array.pers[p,1+l, 3] <- persist2 # NAO- + array.pers[p,1+l, 4] <- persist3 # Blocking + array.pers[p,1+l, 2] <- persist4 # Atl.Ridge + + array.diff.pers[p,1+l, 1] <- persist1 - persistObs1 # NAO+ + array.diff.pers[p,1+l, 3] <- persist2 - persistObs2 # NAO- + array.diff.pers[p,1+l, 4] <- persist3 - persistObs3 # Blocking + array.diff.pers[p,1+l, 2] <- persist4 - persistObs4 # Atl.Ridge + + array.pers[p,1+l, 1] <- persist1 # NAO+ + array.pers[p,1+l, 3] <- persist2 # NAO- + array.pers[p,1+l, 4] <- persist3 # Blocking + array.pers[p,1+l, 2] <- persist4 # Atl.Ridge + + array.sd.freq[p,1+l, 1] <- sd.freq.sim1 # NAO+ + array.sd.freq[p,1+l, 3] <- sd.freq.sim2 # NAO- + array.sd.freq[p,1+l, 4] <- sd.freq.sim3 # Blocking + array.sd.freq[p,1+l, 2] <- sd.freq.sim4 # Atl.Ridge + + array.diff.sd.freq[p,1+l, 1] <- sd.freq.sim1 / sd.freq.obs1 # NAO+ + array.diff.sd.freq[p,1+l, 3] <- sd.freq.sim2 / sd.freq.obs2 # NAO- + array.diff.sd.freq[p,1+l, 4] <- sd.freq.sim3 / sd.freq.obs3 # Blocking + array.diff.sd.freq[p,1+l, 2] <- sd.freq.sim4 / sd.freq.obs4 # Atl.Ridge + + array.trans.sim1[p,1+l, 1] <- NA # Transition from NAO+ to NAO+ + array.trans.sim1[p,1+l, 3] <- 100*transSim1[2] # Transition from NAO+ to NAO- + array.trans.sim1[p,1+l, 4] <- 100*transSim1[3] # Transition from NAO+ to blocking + array.trans.sim1[p,1+l, 2] <- 100*transSim1[4] # Transition from NAO+ to Atl.Ridge + + array.trans.sim2[p,1+l, 1] <- 100*transSim2[1] # Transition from NAO- to NAO+ + array.trans.sim2[p,1+l, 3] <- NA # Transition from NAO- to NAO- + array.trans.sim2[p,1+l, 4] <- 100*transSim2[3] # Transition from NAO- to blocking + array.trans.sim2[p,1+l, 2] <- 100*transSim2[4] # Transition from NAO- to Atl.Ridge + + array.trans.sim3[p,1+l, 1] <- 100*transSim3[1] # Transition from blocking to NAO+ + array.trans.sim3[p,1+l, 3] <- 100*transSim3[2] # Transition from blocking to NAO- + array.trans.sim3[p,1+l, 4] <- NA # Transition from blocking to blocking + array.trans.sim3[p,1+l, 2] <- 100*transSim3[4] # Transition from blocking to Atl.Ridge + + array.trans.sim4[p,1+l, 1] <- 100*transSim4[1] # Transition from Atl.ridge to NAO+ + array.trans.sim4[p,1+l, 3] <- 100*transSim4[2] # Transition from Atl.ridge to NAO- + array.trans.sim4[p,1+l, 4] <- 100*transSim4[3] # Transition from Atl.ridge to blocking + array.trans.sim4[p,1+l, 2] <- NA # Transition from Atl.ridge to Atl.Ridge + + array.trans.diff1[p,1+l, 1] <- NA # Transition from NAO+ to NAO+ + array.trans.diff1[p,1+l, 3] <- 100*(transSim1[2] - transObs1[2]) # Transition from NAO+ to NAO- + array.trans.diff1[p,1+l, 4] <- 100*(transSim1[3] - transObs1[3]) # Transition from NAO+ to blocking + array.trans.diff1[p,1+l, 2] <- 100*(transSim1[4] - transObs1[4]) # Transition from NAO+ to Atl.Ridge + + array.trans.diff2[p,1+l, 1] <- 100*(transSim2[1] - transObs2[1]) # Transition from NAO+ to NAO+ + array.trans.diff2[p,1+l, 3] <- NA + array.trans.diff2[p,1+l, 4] <- 100*(transSim2[3] - transObs2[3]) # Transition from NAO+ to blocking + array.trans.diff2[p,1+l, 2] <- 100*(transSim2[4] - transObs2[4]) # Transition from NAO+ to Atl.Ridge + + array.trans.diff3[p,1+l, 1] <- 100*(transSim3[1] - transObs3[1]) # Transition from NAO+ to NAO+ + array.trans.diff3[p,1+l, 3] <- 100*(transSim3[2] - transObs3[2]) # Transition from NAO+ to NAO- + array.trans.diff3[p,1+l, 4] <- NA + array.trans.diff3[p,1+l, 2] <- 100*(transSim3[4] - transObs3[4]) # Transition from NAO+ to Atl.Ridge + + array.trans.diff4[p,1+l, 1] <- 100*(transSim4[1] - transObs4[1]) # Transition from NAO+ to NAO+ + array.trans.diff4[p,1+l, 3] <- 100*(transSim4[2] - transObs4[2]) # Transition from NAO+ to NAO- + array.trans.diff4[p,1+l, 4] <- 100*(transSim4[3] - transObs4[3]) # Transition from NAO+ to blocking + array.trans.diff4[p,1+l, 2] <- NA + + #array.rpss[p,1+l, 1] <- # NAO+ + #array.rpss[p,1+l, 3] <- # NAO- + #array.rpss[p,1+l, 4] <- # Blocking + #array.rpss[p,1+l, 2] <- # Atl.Ridge + } + } + + # Spatial Corr summary: + png(filename=paste0(work.dir,"/",forecast.name,"_summary_sp_corr.png"), width=550, height=400) + + # create an array similar to array.cor but with colors instead of corr.values: + sp.corr.cols <- rev(c('#b2182b','#d6604d','#f4a582','#fddbc7','#d1e5f0','#92c5de','#4393c3','#2166ac')) + my.seq <- c(-1,0,0.4,0.5,0.6,0.7,0.8,0.9,1) + + array.sp.cor.colors <- array(sp.corr.cols[8],c(12,7,4)) + array.sp.cor.colors[array.sp.cor < my.seq[8]] <- sp.corr.cols[7] + array.sp.cor.colors[array.sp.cor < my.seq[7]] <- sp.corr.cols[6] + array.sp.cor.colors[array.sp.cor < my.seq[6]] <- sp.corr.cols[5] + array.sp.cor.colors[array.sp.cor < my.seq[5]] <- sp.corr.cols[4] + array.sp.cor.colors[array.sp.cor < my.seq[4]] <- sp.corr.cols[3] + array.sp.cor.colors[array.sp.cor < my.seq[3]] <- sp.corr.cols[2] + array.sp.cor.colors[array.sp.cor < my.seq[2]] <- sp.corr.cols[1] + + par(mar=c(5,4,4,4.5)) + plot(1,1, type="n", xaxt="n", yaxt="n", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + title("Spatial correlation between S4 and ERA-Interim regime anomalies", cex=1.5) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + + for(p in 1:12){ + for(l in 0:6){ + polygon(c(0.5+p-1, 0.5+p-1, 1+p-1), c(-0.5+l, 0.5+l, 0+l), col=array.sp.cor.colors[p,1+l,1]) # first triangle + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(-0.5+l, 0+l, -0.5+l), col=array.sp.cor.colors[p,1+l,2]) + polygon(c(1+p-1, 1.5+p-1, 1.5+p-1), c(l, 0.5+l, -0.5+l), col=array.sp.cor.colors[p,1+l,3]) + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(0.5+l, 0+l, 0.5+l), col=array.sp.cor.colors[p,1+l,4]) + } + } + + par(fig=c(0.875, 1, 0.14, 0.89), new=TRUE) + ColorBar2(brks = my.seq, cols = sp.corr.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + dev.off() + + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_sp_corr_4tables.png"), width=750, height=600) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext(text="Spatial correlation between S4 and ERA-Interim regime anomalies", cex=1.5) + + # NAO+ : + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.85, 0.98), new=TRUE) + mtext("NAO+", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.sp.cor.colors[p,1+l,1])}} + + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.42, 0.5), new=TRUE) + mtext("Blocking", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.sp.cor.colors[p,1+l,4])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.85, 0.98), new=TRUE) + mtext("NAO-", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.sp.cor.colors[p,1+l,3])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.42, 0.5), new=TRUE) + mtext("Atlantic Ridge", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.sp.cor.colors[p,1+l,2])}} + + par(fig=c(0.91, 1, 0.1, 0.9), new=TRUE) + ColorBar2(brks = my.seq, cols = sp.corr.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_sp_corr_1table.png"), width=800, height=300) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext(text="Spatial correlation between S4 and ERA-Interim regime anomalies", cex=1.2) + + par(mar=c(0,0,4,0), fig=c(0.07, 0.22, 0.8, 0.98), new=TRUE) + mtext("NAO+", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0, 0.22, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecast month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.sp.cor.colors[i2,1+6-j,1])}} + + par(mar=c(0,0,4,0), fig=c(0.22, 0.44, 0.8, 0.98), new=TRUE) + mtext("NAO-", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.22, 0.44, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecasted month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.sp.cor.colors[i2,1+6-j,3])}} + + par(mar=c(0,0,4,0), fig=c(0.44, 0.66, 0.8, 0.98), new=TRUE) + mtext("Blocking", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.44, 0.66, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecasted month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.sp.cor.colors[i2,1+6-j,4])}} + + par(mar=c(0,0,4,0), fig=c(0.68, 0.88, 0.8, 0.98), new=TRUE) + mtext("Atlantic Ridge", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.66, 0.88, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecasted month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.sp.cor.colors[i2,1+6-j,2])}} + + par(fig=c(0.91, 1, 0.1, 0.8), new=TRUE) + ColorBar2(brks = my.seq, cols = sp.corr.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + + + + + + + + # Corr summary: + png(filename=paste0(work.dir,"/",forecast.name,"_summary_corr.png"), width=550, height=400) + + # create an array similar to array.cor but with colors instead of corr.values: + corr.cols <- rev(c('#b2182b','#d6604d','#f4a582','#fddbc7','#d1e5f0','#92c5de','#4393c3','#2166ac')) + + array.cor.colors <- array(corr.cols[8],c(12,7,4)) + array.cor.colors[array.cor < 0.75] <- corr.cols[7] + array.cor.colors[array.cor < 0.5] <- corr.cols[6] + array.cor.colors[array.cor < 0.25] <- corr.cols[5] + array.cor.colors[array.cor < 0] <- corr.cols[4] + array.cor.colors[array.cor < -0.25] <- corr.cols[3] + array.cor.colors[array.cor < -0.5] <- corr.cols[2] + array.cor.colors[array.cor < -0.75] <- corr.cols[1] + + par(mar=c(5,4,4,4.5)) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Forecast time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + title("Correlation between S4 and ERA-Interim frequencies", cex=1.5) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + + for(p in 1:12){ + for(l in 0:6){ + polygon(c(0.5+p-1,0.5+p-1,1+p-1),c(-0.5+l,0.5+l,0+l), col=array.cor.colors[p,1+l,1]) # first triangle + polygon(c(0.5+p-1,1+p-1,1.5+p-1),c(-0.5+l,0+l,-0.5+l), col=array.cor.colors[p,1+l,2]) + polygon(c(1+p-1,1.5+p-1,1.5+p-1),c(l,0.5+l,-0.5+l), col=array.cor.colors[p,1+l,3]) + polygon(c(0.5+p-1,1+p-1,1.5+p-1),c(0.5+l,0+l,0.5+l), col=array.cor.colors[p,1+l,4]) + } + } + + par(fig=c(0.875, 1, 0.14, 0.89), new=TRUE) + ColorBar2(brks = seq(-1,1,0.25), cols = corr.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(seq(-1,1,0.25)), my.labels=seq(-1,1,0.25)) + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_corr_4tables.png"), width=750, height=600) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext(text="Correlation between S4 and ERA-Interim frequencies", cex=1.5) + + # NAO+ : + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.85, 0.98), new=TRUE) + mtext("NAO+", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.cor.colors[p,1+l,1])}} + + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.42, 0.5), new=TRUE) + mtext("Blocking", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.cor.colors[p,1+l,4])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.85, 0.98), new=TRUE) + mtext("NAO-", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.cor.colors[p,1+l,3])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.42, 0.5), new=TRUE) + mtext("Atlantic Ridge", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.cor.colors[p,1+l,2])}} + + par(fig=c(0.91, 1, 0.1, 0.9), new=TRUE) + ColorBar2(brks = my.seq, cols = corr.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_corr_1table.png"), width=800, height=300) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext(text="Correlation between S4 and ERA-Interim frequencies", cex=1.2) + + par(mar=c(0,0,4,0), fig=c(0.07, 0.22, 0.8, 0.98), new=TRUE) + mtext("NAO+", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0, 0.22, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecast month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.cor.colors[i2,1+6-j,1])}} + + par(mar=c(0,0,4,0), fig=c(0.22, 0.44, 0.8, 0.98), new=TRUE) + mtext("NAO-", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.22, 0.44, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecasted month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.cor.colors[i2,1+6-j,3])}} + + par(mar=c(0,0,4,0), fig=c(0.44, 0.66, 0.8, 0.98), new=TRUE) + mtext("Blocking", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.44, 0.66, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecasted month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.cor.colors[i2,1+6-j,4])}} + + par(mar=c(0,0,4,0), fig=c(0.68, 0.88, 0.8, 0.98), new=TRUE) + mtext("Atlantic Ridge", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.66, 0.88, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecasted month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.cor.colors[i2,1+6-j,2])}} + + par(fig=c(0.91, 1, 0.1, 0.8), new=TRUE) + ColorBar2(brks = my.seq, cols = corr.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + + + + + + # Freq. summary: + png(filename=paste0(work.dir,"/",forecast.name,"_summary_freq.png"), width=550, height=400) + + # create an array similar to array.diff.freq but with colors instead of frequencies: + freq.cols <- rev(c('#d73027','#f46d43','#fdae61','#fee090','#e0f3f8','#abd9e9','#74add1','#4575b4')) + my.seq <- c(NA,20,22,24,26,28,30,32,NA) + + array.freq.colors <- array(freq.cols[8],c(12,7,4)) + array.freq.colors[array.freq < my.seq[[8]]] <- freq.cols[7] + array.freq.colors[array.freq < my.seq[[7]]] <- freq.cols[6] + array.freq.colors[array.freq < my.seq[[6]]] <- freq.cols[5] + array.freq.colors[array.freq < my.seq[[5]]] <- freq.cols[4] + array.freq.colors[array.freq < my.seq[[4]]] <- freq.cols[3] + array.freq.colors[array.freq < my.seq[[3]]] <- freq.cols[2] + array.freq.colors[array.freq < my.seq[[2]]] <- freq.cols[1] + + par(mar=c(5,4,4,4.5)) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Forecast time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + title("Mean S4 frequency (%)", cex=1.5) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + + for(p in 1:12){ + for(l in 0:6){ + polygon(c(0.5+p-1, 0.5+p-1, 1+p-1), c(-0.5+l, 0.5+l, 0+l), col=array.freq.colors[p,1+l,1]) # first triangle + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(-0.5+l, 0+l, -0.5+l), col=array.freq.colors[p,1+l,2]) + polygon(c(1+p-1, 1.5+p-1, 1.5+p-1), c(l, 0.5+l, -0.5+l), col=array.freq.colors[p,1+l,3]) + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(0.5+l, 0+l, 0.5+l), col=array.freq.colors[p,1+l,4]) + } + } + + par(fig=c(0.875, 1, 0.14, 0.89), new=TRUE) + ColorBar2(brks = my.seq, cols = freq.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_freq_4tables.png"), width=750, height=600) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext(text="Mean S4 frequency (%)", cex=1.5) + + # NAO+ : + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.85, 0.98), new=TRUE) + mtext("NAO+", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.freq.colors[p,1+l,1])}} + + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.42, 0.5), new=TRUE) + mtext("Blocking", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.freq.colors[p,1+l,4])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.85, 0.98), new=TRUE) + mtext("NAO-", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.freq.colors[p,1+l,3])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.42, 0.5), new=TRUE) + mtext("Atlantic Ridge", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.freq.colors[p,1+l,2])}} + + par(fig=c(0.91, 1, 0.1, 0.9), new=TRUE) + ColorBar2(brks = my.seq, cols = freq.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_freq_1table.png"), width=800, height=300) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext(text="Mean S4 frequency (%)", cex=1.2) + + par(mar=c(0,0,4,0), fig=c(0.07, 0.22, 0.8, 0.98), new=TRUE) + mtext("NAO+", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0, 0.22, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecast month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.freq.colors[i2,1+6-j,1])}} + + par(mar=c(0,0,4,0), fig=c(0.22, 0.44, 0.8, 0.98), new=TRUE) + mtext("NAO-", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.22, 0.44, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecasted month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.freq.colors[i2,1+6-j,3])}} + + par(mar=c(0,0,4,0), fig=c(0.44, 0.66, 0.8, 0.98), new=TRUE) + mtext("Blocking", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.44, 0.66, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecasted month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.freq.colors[i2,1+6-j,4])}} + + par(mar=c(0,0,4,0), fig=c(0.68, 0.88, 0.8, 0.98), new=TRUE) + mtext("Atlantic Ridge", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.66, 0.88, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecasted month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.freq.colors[i2,1+6-j,2])}} + + par(fig=c(0.91, 1, 0.1, 0.8), new=TRUE) + ColorBar2(brks = my.seq, cols = freq.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + + + + + + + # Delta freq. summary: + png(filename=paste0(work.dir,"/",forecast.name,"_summary_diff_freq.png"), width=550, height=400) + + # create an array similar to array.diff.freq but with colors instead of frequencies: + diff.freq.cols <- rev(c('#d73027','#f46d43','#fdae61','#fee090','#e0f3f8','#abd9e9','#74add1','#4575b4')) + my.seq <- c(-20,-10,-5,-2,0,2,5,10,20) + + array.diff.freq.colors <- array(diff.freq.cols[8],c(12,7,4)) + array.diff.freq.colors[array.diff.freq < my.seq[[8]]] <- diff.freq.cols[7] + array.diff.freq.colors[array.diff.freq 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.diff.freq.colors[i2,1+6-j,1])}} + + par(mar=c(0,0,4,0), fig=c(0.22, 0.44, 0.8, 0.98), new=TRUE) + mtext("NAO-", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.22, 0.44, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecasted month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.diff.freq.colors[i2,1+6-j,3])}} + + par(mar=c(0,0,4,0), fig=c(0.44, 0.66, 0.8, 0.98), new=TRUE) + mtext("Blocking", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.44, 0.66, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecasted month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.diff.freq.colors[i2,1+6-j,4])}} + + par(mar=c(0,0,4,0), fig=c(0.68, 0.88, 0.8, 0.98), new=TRUE) + mtext("Atlantic Ridge", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.66, 0.88, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecasted month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.diff.freq.colors[i2,1+6-j,2])}} + + par(fig=c(0.91, 1, 0.1, 0.8), new=TRUE) + ColorBar2(brks = my.seq, cols = diff.freq.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + + + + + + + + # Persistence summary: + png(filename=paste0(work.dir,"/",forecast.name,"_summary_pers.png"), width=550, height=400) + + # create an array similar to array.pers but with colors instead of frequencies: + pers.cols <- rev(c('#b2182b','#d6604d','#f4a582','#fddbc7','#d1e5f0','#92c5de','#4393c3','#2166ac')) + my.seq <- c(NA, 3.25, 3.5, 3.75, 4, 4.25, 4.5, 4.75, NA) + + array.pers.colors <- array(pers.cols[8],c(12,7,4)) + array.pers.colors[array.pers < my.seq[8]] <- pers.cols[7] + array.pers.colors[array.pers < my.seq[7]] <- pers.cols[6] + array.pers.colors[array.pers < my.seq[6]] <- pers.cols[5] + array.pers.colors[array.pers < my.seq[5]] <- pers.cols[4] + array.pers.colors[array.pers < my.seq[4]] <- pers.cols[3] + array.pers.colors[array.pers < my.seq[3]] <- pers.cols[2] + array.pers.colors[array.pers < my.seq[2]] <- pers.cols[1] + + par(mar=c(5,4,4,4.5)) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Forecast time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + title("S4 Regime persistence (in days/month)", cex=1.5) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + + for(p in 1:12){ + for(l in 0:6){ + polygon(c(0.5+p-1,0.5+p-1,1+p-1),c(-0.5+l,0.5+l,0+l), col=array.pers.colors[p,1+l,1]) # first triangle + polygon(c(0.5+p-1,1+p-1,1.5+p-1),c(-0.5+l,0+l,-0.5+l), col=array.pers.colors[p,1+l,2]) + polygon(c(1+p-1,1.5+p-1,1.5+p-1),c(l,0.5+l,-0.5+l), col=array.pers.colors[p,1+l,3]) + polygon(c(0.5+p-1,1+p-1,1.5+p-1),c(0.5+l,0+l,0.5+l), col=array.pers.colors[p,1+l,4]) + } + } + + par(fig=c(0.875, 1, 0.14, 0.89), new=TRUE) + ColorBar2(brks = my.seq, cols = pers.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_pers_4tables.png"), width=750, height=600) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("S4 Regime persistence (in days/month)", cex=1.5) + + # NAO+ : + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.85, 0.98), new=TRUE) + mtext("NAO+", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.pers.colors[p,1+l,1])}} + + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.42, 0.5), new=TRUE) + mtext("Blocking", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.pers.colors[p,1+l,4])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.85, 0.98), new=TRUE) + mtext("NAO-", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.pers.colors[p,1+l,3])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.42, 0.5), new=TRUE) + mtext("Atlantic Ridge", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.pers.colors[p,1+l,2])}} + + par(fig=c(0.91, 1, 0.1, 0.9), new=TRUE) + ColorBar2(brks = my.seq, cols = pers.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_pers_4tables.png"), width=750, height=600) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("S4 Regime persistence (in days/month)", cex=1.5) + + # NAO+ : + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.85, 0.98), new=TRUE) + mtext("NAO+", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.pers.colors[p,1+l,1])}} + + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.42, 0.5), new=TRUE) + mtext("Blocking", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.pers.colors[p,1+l,4])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.85, 0.98), new=TRUE) + mtext("NAO-", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.pers.colors[p,1+l,3])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.42, 0.5), new=TRUE) + mtext("Atlantic Ridge", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.pers.colors[p,1+l,2])}} + + par(fig=c(0.91, 1, 0.1, 0.9), new=TRUE) + ColorBar2(brks = my.seq, cols = pers.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_pers_1table.png"), width=800, height=300) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("S4 Regime persistence (in days/month)", cex=1.2) + + par(mar=c(0,0,4,0), fig=c(0.07, 0.22, 0.8, 0.98), new=TRUE) + mtext("NAO+", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0, 0.22, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecast month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.pers.colors[i2,1+6-j,1])}} + + par(mar=c(0,0,4,0), fig=c(0.22, 0.44, 0.8, 0.98), new=TRUE) + mtext("NAO-", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.22, 0.44, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecasted month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.pers.colors[i2,1+6-j,3])}} + + par(mar=c(0,0,4,0), fig=c(0.44, 0.66, 0.8, 0.98), new=TRUE) + mtext("Blocking", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.44, 0.66, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecasted month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.pers.colors[i2,1+6-j,4])}} + + par(mar=c(0,0,4,0), fig=c(0.68, 0.88, 0.8, 0.98), new=TRUE) + mtext("Atlantic Ridge", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.66, 0.88, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecasted month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.pers.colors[i2,1+6-j,2])}} + + par(fig=c(0.91, 1, 0.1, 0.8), new=TRUE) + ColorBar2(brks = my.seq, cols = pers.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + + + + + + + + # Delta persistence summary: + png(filename=paste0(work.dir,"/",forecast.name,"_summary_diff_pers.png"), width=550, height=400) + + # create an array similar to array.pers but with colors instead of frequencies: + diff.pers.cols <- rev(c('#b2182b','#d6604d','#f4a582','#fddbc7','#d1e5f0','#92c5de','#4393c3','#2166ac')) + my.seq <- c(NA, -1.5, -1, -0.5, 0, 0.5, 1, 1.5, NA) + + array.diff.pers.colors <- array(diff.pers.cols[8],c(12,7,4)) + array.diff.pers.colors[array.diff.pers < my.seq[8]] <- diff.pers.cols[7] + array.diff.pers.colors[array.diff.pers < my.seq[7]] <- diff.pers.cols[6] + array.diff.pers.colors[array.diff.pers < my.seq[6]] <- diff.pers.cols[5] + array.diff.pers.colors[array.diff.pers < my.seq[5]] <- diff.pers.cols[4] + array.diff.pers.colors[array.diff.pers < my.seq[4]] <- diff.pers.cols[3] + array.diff.pers.colors[array.diff.pers < my.seq[3]] <- diff.pers.cols[2] + array.diff.pers.colors[array.diff.pers < my.seq[2]] <- diff.pers.cols[1] + + par(mar=c(5,4,4,4.5)) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Forecast time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + title("Diff. between S4 and ERA-Interim regime persistence (in days/month)", cex=1.5) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + + for(p in 1:12){ + for(l in 0:6){ + polygon(c(0.5+p-1,0.5+p-1,1+p-1),c(-0.5+l,0.5+l,0+l), col=array.diff.pers.colors[p,1+l,1]) # first triangle + polygon(c(0.5+p-1,1+p-1,1.5+p-1),c(-0.5+l,0+l,-0.5+l), col=array.diff.pers.colors[p,1+l,2]) + polygon(c(1+p-1,1.5+p-1,1.5+p-1),c(l,0.5+l,-0.5+l), col=array.diff.pers.colors[p,1+l,3]) + polygon(c(0.5+p-1,1+p-1,1.5+p-1),c(0.5+l,0+l,0.5+l), col=array.diff.pers.colors[p,1+l,4]) + } + } + + par(fig=c(0.875, 1, 0.14, 0.89), new=TRUE) + ColorBar2(brks = my.seq, cols = diff.pers.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_diff_pers_4tables.png"), width=750, height=600) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("Diff. between S4 and ERA-Interim regime persistence (in days/month)", cex=1.5) + + # NAO+ : + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.85, 0.98), new=TRUE) + mtext("NAO+", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.diff.pers.colors[p,1+l,1])}} + + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.42, 0.5), new=TRUE) + mtext("Blocking", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.diff.pers.colors[p,1+l,4])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.85, 0.98), new=TRUE) + mtext("NAO-", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.diff.pers.colors[p,1+l,3])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.42, 0.5), new=TRUE) + mtext("Atlantic Ridge", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.diff.pers.colors[p,1+l,2])}} + + par(fig=c(0.91, 1, 0.1, 0.9), new=TRUE) + ColorBar2(brks = my.seq, cols = diff.pers.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_diff_pers_1table.png"), width=800, height=300) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("Diff. between S4 and ERA-Interim regime persistence (in days/month)", cex=1.2) + + par(mar=c(0,0,4,0), fig=c(0.07, 0.22, 0.8, 0.98), new=TRUE) + mtext("NAO+", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0, 0.22, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecast month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.diff.pers.colors[i2,1+6-j,1])}} + + par(mar=c(0,0,4,0), fig=c(0.22, 0.44, 0.8, 0.98), new=TRUE) + mtext("NAO-", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.22, 0.44, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecasted month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.diff.pers.colors[i2,1+6-j,3])}} + + par(mar=c(0,0,4,0), fig=c(0.44, 0.66, 0.8, 0.98), new=TRUE) + mtext("Blocking", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.44, 0.66, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecasted month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.diff.pers.colors[i2,1+6-j,4])}} + + par(mar=c(0,0,4,0), fig=c(0.68, 0.88, 0.8, 0.98), new=TRUE) + mtext("Atlantic Ridge", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.66, 0.88, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecasted month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.diff.pers.colors[i2,1+6-j,2])}} + + par(fig=c(0.91, 1, 0.1, 0.8), new=TRUE) + ColorBar2(brks = my.seq, cols = diff.pers.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + + + + + + + + + # St.Dev.freq ratio summary: + png(filename=paste0(work.dir,"/",forecast.name,"_summary_ratio_sd_freq.png"), width=550, height=400) + + # create an array similar to array.cor but with colors instead of corr.values: + diff.sd.freq.cols <- rev(c('#b2182b','#d6604d','#f4a582','#fddbc7','#d1e5f0','#92c5de','#4393c3','#2166ac')) + my.seq <- c(0,0.7,0.8,0.9,1.0,1.1,1.2,1.3,2) + + array.diff.sd.freq.colors <- array(diff.sd.freq.cols[8],c(12,7,4)) + array.diff.sd.freq.colors[array.diff.sd.freq < my.seq[8]] <- diff.sd.freq.cols[7] + array.diff.sd.freq.colors[array.diff.sd.freq < my.seq[7]] <- diff.sd.freq.cols[6] + array.diff.sd.freq.colors[array.diff.sd.freq < my.seq[6]] <- diff.sd.freq.cols[5] + array.diff.sd.freq.colors[array.diff.sd.freq < my.seq[5]] <- diff.sd.freq.cols[4] + array.diff.sd.freq.colors[array.diff.sd.freq < my.seq[4]] <- diff.sd.freq.cols[3] + array.diff.sd.freq.colors[array.diff.sd.freq < my.seq[3]] <- diff.sd.freq.cols[2] + array.diff.sd.freq.colors[array.diff.sd.freq < my.seq[2]] <- diff.sd.freq.cols[1] + + par(mar=c(5,4,4,4.5)) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Forecast time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + title("St.Dev. ratio between sim. and obs. average frequencies", cex=1.5) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + + for(p in 1:12){ + for(l in 0:6){ + polygon(c(0.5+p-1, 0.5+p-1, 1+p-1), c(-0.5+l, 0.5+l, 0+l), col=array.diff.sd.freq.colors[p,1+l,1]) # first triangle + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(-0.5+l, 0+l, -0.5+l), col=array.diff.sd.freq.colors[p,1+l,2]) + polygon(c(1+p-1, 1.5+p-1, 1.5+p-1), c(l, 0.5+l, -0.5+l), col=array.diff.sd.freq.colors[p,1+l,3]) + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(0.5+l, 0+l, 0.5+l), col=array.diff.sd.freq.colors[p,1+l,4]) + } + } + + par(fig=c(0.875, 1, 0.14, 0.89), new=TRUE) + ColorBar2(brks = my.seq, cols = diff.sd.freq.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + dev.off() + + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_ratio_sd_freq_4tables.png"), width=750, height=600) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("St.Dev. ratio between sim. and obs. average frequencies", cex=1.5) + + # NAO+ : + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.85, 0.98), new=TRUE) + mtext("NAO+", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.diff.sd.freq.colors[p,1+l,1])}} + + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.42, 0.5), new=TRUE) + mtext("Blocking", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.diff.sd.freq.colors[p,1+l,4])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.85, 0.98), new=TRUE) + mtext("NAO-", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.diff.sd.freq.colors[p,1+l,3])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.42, 0.5), new=TRUE) + mtext("Atlantic Ridge", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.diff.sd.freq.colors[p,1+l,2])}} + + par(fig=c(0.91, 1, 0.1, 0.9), new=TRUE) + ColorBar2(brks = my.seq, cols = diff.sd.freq.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_ratio_sd_freq_1table.png"), width=800, height=300) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("St.Dev. ratio between sim. and obs. average frequencies", cex=1.2) + + par(mar=c(0,0,4,0), fig=c(0.07, 0.22, 0.8, 0.98), new=TRUE) + mtext("NAO+", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0, 0.22, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecast month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.diff.sd.freq.colors[i2,1+6-j,1])}} + + par(mar=c(0,0,4,0), fig=c(0.22, 0.44, 0.8, 0.98), new=TRUE) + mtext("NAO-", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.22, 0.44, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecasted month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.diff.sd.freq.colors[i2,1+6-j,3])}} + + par(mar=c(0,0,4,0), fig=c(0.44, 0.66, 0.8, 0.98), new=TRUE) + mtext("Blocking", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.44, 0.66, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecasted month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.diff.sd.freq.colors[i2,1+6-j,4])}} + + par(mar=c(0,0,4,0), fig=c(0.68, 0.88, 0.8, 0.98), new=TRUE) + mtext("Atlantic Ridge", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.66, 0.88, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecasted month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.diff.sd.freq.colors[i2,1+6-j,2])}} + + par(fig=c(0.91, 1, 0.1, 0.8), new=TRUE) + ColorBar2(brks = my.seq, cols = diff.sd.freq.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + + + + + + + + + + + # Transition probability from NAO+ to X summary: + png(filename=paste0(work.dir,"/",forecast.name,"_summary_trans_NAO+.png"), width=550, height=400) + + # create an array similar to array.cor but with colors instead of corr.values: + trans.cols <- rev(c('#b2182b','#d6604d','#f4a582','#fddbc7','#d1e5f0','#92c5de','#4393c3','#2166ac')) + my.seq <- c(0,10,20,30,40,50,60,70,100) + + array.trans.sim1.colors <- array(trans.cols[8],c(12,7,4)) + array.trans.sim1.colors[array.trans.sim1 < my.seq[8]] <- trans.cols[7] + array.trans.sim1.colors[array.trans.sim1 < my.seq[7]] <- trans.cols[6] + array.trans.sim1.colors[array.trans.sim1 < my.seq[6]] <- trans.cols[5] + array.trans.sim1.colors[array.trans.sim1 < my.seq[5]] <- trans.cols[4] + array.trans.sim1.colors[array.trans.sim1 < my.seq[4]] <- trans.cols[3] + array.trans.sim1.colors[array.trans.sim1 < my.seq[3]] <- trans.cols[2] + array.trans.sim1.colors[array.trans.sim1 < my.seq[2]] <- trans.cols[1] + array.trans.sim1.colors[is.na(array.trans.sim1)] <- "white" + + par(mar=c(5,4,4,4.5)) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Forecast time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + title("% Transition from NAO+ regime", cex=1.5) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + + for(p in 1:12){ + for(l in 0:6){ + polygon(c(0.5+p-1, 0.5+p-1, 1+p-1), c(-0.5+l, 0.5+l, 0+l), col=array.trans.sim1.colors[p,1+l,1]) # first triangle + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(-0.5+l, 0+l, -0.5+l), col=array.trans.sim1.colors[p,1+l,2]) + polygon(c(1+p-1, 1.5+p-1, 1.5+p-1), c(l, 0.5+l, -0.5+l), col=array.trans.sim1.colors[p,1+l,3]) + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(0.5+l, 0+l, 0.5+l), col=array.trans.sim1.colors[p,1+l,4]) + } + } + + par(fig=c(0.875, 1, 0.14, 0.89), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_trans_NAO+_4tables.png"), width=750, height=600) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("% Transition from NAO+ regime", cex=1.5) + + # NAO+ : + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.85, 0.98), new=TRUE) + mtext("NAO+", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.sim1.colors[p,1+l,1])}} + + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.42, 0.5), new=TRUE) + mtext("Blocking", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.sim1.colors[p,1+l,4])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.85, 0.98), new=TRUE) + mtext("NAO-", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.sim1.colors[p,1+l,3])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.42, 0.5), new=TRUE) + mtext("Atlantic Ridge", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.sim1.colors[p,1+l,2])}} + + par(fig=c(0.91, 1, 0.1, 0.9), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_trans_NAO+_1table.png"), width=800, height=300) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("% Transition from NAO+ regime", cex=1.2) + + par(mar=c(0,0,4,0), fig=c(0.07, 0.22, 0.8, 0.98), new=TRUE) + mtext("NAO+", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0, 0.22, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecast month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.sim1.colors[i2,1+6-j,1])}} + + par(mar=c(0,0,4,0), fig=c(0.22, 0.44, 0.8, 0.98), new=TRUE) + mtext("NAO-", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.22, 0.44, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecasted month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.sim1.colors[i2,1+6-j,3])}} + + par(mar=c(0,0,4,0), fig=c(0.44, 0.66, 0.8, 0.98), new=TRUE) + mtext("Blocking", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.44, 0.66, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecasted month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.sim1.colors[i2,1+6-j,4])}} + + par(mar=c(0,0,4,0), fig=c(0.68, 0.88, 0.8, 0.98), new=TRUE) + mtext("Atlantic Ridge", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.66, 0.88, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecasted month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.sim1.colors[i2,1+6-j,2])}} + + par(fig=c(0.91, 1, 0.1, 0.8), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_trans_NAO+_1table.png"), width=600, height=300) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("% Transition from NAO+ regime", cex=1.2) + + par(mar=c(0,0,4,0), fig=c(0.10, 0.28, 0.8, 0.98), new=TRUE) + mtext("NAO-", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0, 0.28, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecasted month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.sim1.colors[i2,1+6-j,3])}} + + par(mar=c(0,0,4,0), fig=c(0.28, 0.56, 0.8, 0.98), new=TRUE) + mtext("Blocking", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.28, 0.56, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecasted month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.sim1.colors[i2,1+6-j,4])}} + + par(mar=c(0,0,4,0), fig=c(0.56, 0.84, 0.8, 0.98), new=TRUE) + mtext("Atlantic Ridge", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.56, 0.84, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecasted month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.sim1.colors[i2,1+6-j,2])}} + + par(fig=c(0.88, 1, 0.1, 0.8), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + + + + + + + + # Transition probability from NAO- to X summary: + png(filename=paste0(work.dir,"/",forecast.name,"_summary_trans_NAO-.png"), width=550, height=400) + + # create an array similar to array.cor but with colors instead of corr.values: + trans.cols <- rev(c('#b2182b','#d6604d','#f4a582','#fddbc7','#d1e5f0','#92c5de','#4393c3','#2166ac')) + my.seq <- c(0,10,20,30,40,50,60,70,100) + + array.trans.sim2.colors <- array(trans.cols[8],c(12,7,4)) + array.trans.sim2.colors[array.trans.sim2 < my.seq[8]] <- trans.cols[7] + array.trans.sim2.colors[array.trans.sim2 < my.seq[7]] <- trans.cols[6] + array.trans.sim2.colors[array.trans.sim2 < my.seq[6]] <- trans.cols[5] + array.trans.sim2.colors[array.trans.sim2 < my.seq[5]] <- trans.cols[4] + array.trans.sim2.colors[array.trans.sim2 < my.seq[4]] <- trans.cols[3] + array.trans.sim2.colors[array.trans.sim2 < my.seq[3]] <- trans.cols[2] + array.trans.sim2.colors[array.trans.sim2 < my.seq[2]] <- trans.cols[1] + array.trans.sim2.colors[is.na(array.trans.sim2)] <- "white" + + par(mar=c(5,4,4,4.5)) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Forecast time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + title("% Transition from NAO- regime ", cex=1.5) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + + for(p in 1:12){ + for(l in 0:6){ + polygon(c(0.5+p-1, 0.5+p-1, 1+p-1), c(-0.5+l, 0.5+l, 0+l), col=array.trans.sim2.colors[p,1+l,1]) # first triangle + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(-0.5+l, 0+l, -0.5+l), col=array.trans.sim2.colors[p,1+l,2]) + polygon(c(1+p-1, 1.5+p-1, 1.5+p-1), c(l, 0.5+l, -0.5+l), col=array.trans.sim2.colors[p,1+l,3]) + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(0.5+l, 0+l, 0.5+l), col=array.trans.sim2.colors[p,1+l,4]) + } + } + + par(fig=c(0.875, 1, 0.14, 0.89), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_trans_NAO-_4tables.png"), width=750, height=600) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("% Transition from NAO- regime", cex=1.5) + + # NAO+ : + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.85, 0.98), new=TRUE) + mtext("NAO+", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.sim2.colors[p,1+l,1])}} + + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.42, 0.5), new=TRUE) + mtext("Blocking", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.sim2.colors[p,1+l,4])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.85, 0.98), new=TRUE) + mtext("NAO-", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.sim2.colors[p,1+l,3])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.42, 0.5), new=TRUE) + mtext("Atlantic Ridge", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.sim2.colors[p,1+l,2])}} + + par(fig=c(0.91, 1, 0.1, 0.9), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_trans_NAO-_1table.png"), width=600, height=300) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("% Transition from NAO- regime", cex=1.2) + + par(mar=c(0,0,4,0), fig=c(0.1, 0.28, 0.8, 0.98), new=TRUE) + mtext("NAO+", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0, 0.28, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecast month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.sim2.colors[i2,1+6-j,1])}} + + par(mar=c(0,0,4,0), fig=c(0.28, 0.56, 0.8, 0.98), new=TRUE) + mtext("Blocking", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.28, 0.56, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecasted month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.sim2.colors[i2,1+6-j,4])}} + + par(mar=c(0,0,4,0), fig=c(0.56, 0.84, 0.8, 0.98), new=TRUE) + mtext("Atlantic Ridge", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.56, 0.84, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecasted month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.sim2.colors[i2,1+6-j,2])}} + + par(fig=c(0.88, 1, 0.1, 0.8), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + + + + + + + + # Transition probability from blocking to X summary: + png(filename=paste0(work.dir,"/",forecast.name,"_summary_trans_blocking.png"), width=550, height=400) + + # create an array similar to array.cor but with colors instead of corr.values: + trans.cols <- rev(c('#b2182b','#d6604d','#f4a582','#fddbc7','#d1e5f0','#92c5de','#4393c3','#2166ac')) + my.seq <- c(0,10,20,30,40,50,60,70,100) + + array.trans.sim3.colors <- array(trans.cols[8],c(12,7,4)) + array.trans.sim3.colors[array.trans.sim3 < my.seq[8]] <- trans.cols[7] + array.trans.sim3.colors[array.trans.sim3 < my.seq[7]] <- trans.cols[6] + array.trans.sim3.colors[array.trans.sim3 < my.seq[6]] <- trans.cols[5] + array.trans.sim3.colors[array.trans.sim3 < my.seq[5]] <- trans.cols[4] + array.trans.sim3.colors[array.trans.sim3 < my.seq[4]] <- trans.cols[3] + array.trans.sim3.colors[array.trans.sim3 < my.seq[3]] <- trans.cols[2] + array.trans.sim3.colors[array.trans.sim3 < my.seq[2]] <- trans.cols[1] + array.trans.sim3.colors[is.na(array.trans.sim3)] <- "white" + + par(mar=c(5,4,4,4.5)) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Forecast time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + title("% Transition from blocking regime", cex=1.5) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + + for(p in 1:12){ + for(l in 0:6){ + polygon(c(0.5+p-1, 0.5+p-1, 1+p-1), c(-0.5+l, 0.5+l, 0+l), col=array.trans.sim3.colors[p,1+l,1]) # first triangle + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(-0.5+l, 0+l, -0.5+l), col=array.trans.sim3.colors[p,1+l,2]) + polygon(c(1+p-1, 1.5+p-1, 1.5+p-1), c(l, 0.5+l, -0.5+l), col=array.trans.sim3.colors[p,1+l,3]) + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(0.5+l, 0+l, 0.5+l), col=array.trans.sim3.colors[p,1+l,4]) + } + } + + par(fig=c(0.875, 1, 0.14, 0.89), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_trans_blocking_4tables.png"), width=750, height=600) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("% Transition from blocking regime", cex=1.5) + + # NAO+ : + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.85, 0.98), new=TRUE) + mtext("NAO+", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.sim3.colors[p,1+l,1])}} + + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.42, 0.5), new=TRUE) + mtext("Blocking", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.sim3.colors[p,1+l,4])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.85, 0.98), new=TRUE) + mtext("NAO-", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.sim3.colors[p,1+l,3])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.42, 0.5), new=TRUE) + mtext("Atlantic Ridge", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.sim3.colors[p,1+l,2])}} + + par(fig=c(0.91, 1, 0.1, 0.9), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_trans_blocking_1table.png"), width=600, height=300) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("% Transition from blocking regime", cex=1.2) + + par(mar=c(0,0,4,0), fig=c(0.10, 0.28, 0.8, 0.98), new=TRUE) + mtext("NAO+", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0, 0.28, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecast month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.sim3.colors[i2,1+6-j,1])}} + + par(mar=c(0,0,4,0), fig=c(0.28, 0.56, 0.8, 0.98), new=TRUE) + mtext("NAO-", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.28, 0.56, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecasted month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.sim3.colors[i2,1+6-j,3])}} + + par(mar=c(0,0,4,0), fig=c(0.56, 0.84, 0.8, 0.98), new=TRUE) + mtext("Atlantic Ridge", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.56, 0.84, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecasted month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.sim3.colors[i2,1+6-j,2])}} + + par(fig=c(0.88, 1, 0.1, 0.8), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + + + + + + + + + + + # Transition probability from Atlantic ridge to X summary: + png(filename=paste0(work.dir,"/",forecast.name,"_summary_trans_Atlantic_ridge.png"), width=550, height=400) + + # create an array similar to array.cor but with colors instead of corr.values: + trans.cols <- rev(c('#b2182b','#d6604d','#f4a582','#fddbc7','#d1e5f0','#92c5de','#4393c3','#2166ac')) + my.seq <- c(0,10,20,30,40,50,60,70,100) + + array.trans.sim4.colors <- array(trans.cols[8],c(12,7,4)) + array.trans.sim4.colors[array.trans.sim4 < my.seq[8]] <- trans.cols[7] + array.trans.sim4.colors[array.trans.sim4 < my.seq[7]] <- trans.cols[6] + array.trans.sim4.colors[array.trans.sim4 < my.seq[6]] <- trans.cols[5] + array.trans.sim4.colors[array.trans.sim4 < my.seq[5]] <- trans.cols[4] + array.trans.sim4.colors[array.trans.sim4 < my.seq[4]] <- trans.cols[3] + array.trans.sim4.colors[array.trans.sim4 < my.seq[3]] <- trans.cols[2] + array.trans.sim4.colors[array.trans.sim4 < my.seq[2]] <- trans.cols[1] + array.trans.sim4.colors[is.na(array.trans.sim4)] <- "white" + + par(mar=c(5,4,4,4.5)) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Forecast time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + title("% Transition from Atlantic ridge regime ", cex=1.5) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + + for(p in 1:12){ + for(l in 0:6){ + polygon(c(0.5+p-1, 0.5+p-1, 1+p-1), c(-0.5+l, 0.5+l, 0+l), col=array.trans.sim4.colors[p,1+l,1]) # first triangle + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(-0.5+l, 0+l, -0.5+l), col=array.trans.sim4.colors[p,1+l,2]) + polygon(c(1+p-1, 1.5+p-1, 1.5+p-1), c(l, 0.5+l, -0.5+l), col=array.trans.sim4.colors[p,1+l,3]) + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(0.5+l, 0+l, 0.5+l), col=array.trans.sim4.colors[p,1+l,4]) + } + } + + par(fig=c(0.875, 1, 0.14, 0.89), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_trans_Atlantic_ridge_4tables.png"), width=750, height=600) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("% Transition from Atlantic Ridge regime", cex=1.5) + + # NAO+ : + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.85, 0.98), new=TRUE) + mtext("NAO+", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.sim4.colors[p,1+l,1])}} + + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.42, 0.5), new=TRUE) + mtext("Blocking", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.sim4.colors[p,1+l,4])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.85, 0.98), new=TRUE) + mtext("NAO-", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.sim4.colors[p,1+l,3])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.42, 0.5), new=TRUE) + mtext("Atlantic Ridge", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.sim4.colors[p,1+l,2])}} + + par(fig=c(0.91, 1, 0.1, 0.9), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_trans_Atlantic_ridge_1table.png"), width=600, height=300) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("% Transition from Atlantic Ridge regime", cex=1.2) + + par(mar=c(0,0,4,0), fig=c(0.1, 0.28, 0.8, 0.98), new=TRUE) + mtext("NAO+", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0, 0.28, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecast month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.sim4.colors[i2,1+6-j,1])}} + + par(mar=c(0,0,4,0), fig=c(0.28, 0.56, 0.8, 0.98), new=TRUE) + mtext("NAO-", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.28, 0.56, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecasted month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.sim4.colors[i2,1+6-j,3])}} + + par(mar=c(0,0,4,0), fig=c(0.56, 0.84, 0.8, 0.98), new=TRUE) + mtext("Blocking", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.56, 0.84, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecasted month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.sim4.colors[i2,1+6-j,4])}} + + par(fig=c(0.88, 1, 0.1, 0.8), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + + + + + + + + # Bias of the Transition probability from NAO+ to X summary: + png(filename=paste0(work.dir,"/",forecast.name,"_summary_bias_trans_NAO+.png"), width=550, height=400) + + # create an array similar to array.cor but with colors instead of corr.values: + trans.cols <- rev(c('#b2182b','#d6604d','#f4a582','#fddbc7','#d1e5f0','#92c5de','#4393c3','#2166ac')) + my.seq <- c(-20,-10,-5,-2,0,2,5,10,20) + + array.trans.diff1.colors <- array(trans.cols[8],c(12,7,4)) + array.trans.diff1.colors[array.trans.diff1 < my.seq[8]] <- trans.cols[7] + array.trans.diff1.colors[array.trans.diff1 < my.seq[7]] <- trans.cols[6] + array.trans.diff1.colors[array.trans.diff1 < my.seq[6]] <- trans.cols[5] + array.trans.diff1.colors[array.trans.diff1 < my.seq[5]] <- trans.cols[4] + array.trans.diff1.colors[array.trans.diff1 < my.seq[4]] <- trans.cols[3] + array.trans.diff1.colors[array.trans.diff1 < my.seq[3]] <- trans.cols[2] + array.trans.diff1.colors[array.trans.diff1 < my.seq[2]] <- trans.cols[1] + array.trans.diff1.colors[is.na(array.trans.diff1)] <- "white" + + par(mar=c(5,4,4,4.5)) + plot(1,1, type="n", xaxt="n", yaxt="n", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + title("% Bias transition from NAO+ regime", cex=1.5) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + + for(p in 1:12){ + for(l in 0:6){ + polygon(c(0.5+p-1, 0.5+p-1, 1+p-1), c(-0.5+l, 0.5+l, 0+l), col=array.trans.diff1.colors[p,1+l,1]) # first triangle + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(-0.5+l, 0+l, -0.5+l), col=array.trans.diff1.colors[p,1+l,2]) + polygon(c(1+p-1, 1.5+p-1, 1.5+p-1), c(l, 0.5+l, -0.5+l), col=array.trans.diff1.colors[p,1+l,3]) + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(0.5+l, 0+l, 0.5+l), col=array.trans.diff1.colors[p,1+l,4]) + } + } + + par(fig=c(0.875, 1, 0.14, 0.89), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_bias_trans_NAO+_4tables.png"), width=750, height=600) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("% Bias transition from NAO+ regime", cex=1.5) + + # NAO+ : + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.85, 0.98), new=TRUE) + mtext("NAO+", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.diff1.colors[p,1+l,1])}} + + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.42, 0.5), new=TRUE) + mtext("Blocking", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.diff1.colors[p,1+l,4])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.85, 0.98), new=TRUE) + mtext("NAO-", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.diff1.colors[p,1+l,3])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.42, 0.5), new=TRUE) + mtext("Atlantic Ridge", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.diff1.colors[p,1+l,2])}} + + par(fig=c(0.91, 1, 0.1, 0.9), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_bias_trans_NAO+_1table.png"), width=800, height=300) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("% Bias transition from NAO+ regime", cex=1.2) + + par(mar=c(0,0,4,0), fig=c(0.07, 0.22, 0.8, 0.98), new=TRUE) + mtext("NAO+", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0, 0.22, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecast month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.diff1.colors[i2,1+6-j,1])}} + + par(mar=c(0,0,4,0), fig=c(0.22, 0.44, 0.8, 0.98), new=TRUE) + mtext("NAO-", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.22, 0.44, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecasted month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.diff1.colors[i2,1+6-j,3])}} + + par(mar=c(0,0,4,0), fig=c(0.44, 0.66, 0.8, 0.98), new=TRUE) + mtext("Blocking", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.44, 0.66, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecasted month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.diff1.colors[i2,1+6-j,4])}} + + par(mar=c(0,0,4,0), fig=c(0.68, 0.88, 0.8, 0.98), new=TRUE) + mtext("Atlantic Ridge", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.66, 0.88, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecasted month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.diff1.colors[i2,1+6-j,2])}} + + par(fig=c(0.91, 1, 0.1, 0.8), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + + + + + + + + + + + + + + + # Bias of the Transition probability from NAO- to X summary: + png(filename=paste0(work.dir,"/",forecast.name,"_summary_bias_trans_NAO-.png"), width=550, height=400) + + # create an array similar to array.cor but with colors instead of corr.values: + trans.cols <- rev(c('#b2182b','#d6604d','#f4a582','#fddbc7','#d1e5f0','#92c5de','#4393c3','#2166ac')) + my.seq <- c(-20,-10,-5,-2,0,2,5,10,20) + + array.trans.diff2.colors <- array(trans.cols[8],c(12,7,4)) + array.trans.diff2.colors[array.trans.diff2 < my.seq[8]] <- trans.cols[7] + array.trans.diff2.colors[array.trans.diff2 < my.seq[7]] <- trans.cols[6] + array.trans.diff2.colors[array.trans.diff2 < my.seq[6]] <- trans.cols[5] + array.trans.diff2.colors[array.trans.diff2 < my.seq[5]] <- trans.cols[4] + array.trans.diff2.colors[array.trans.diff2 < my.seq[4]] <- trans.cols[3] + array.trans.diff2.colors[array.trans.diff2 < my.seq[3]] <- trans.cols[2] + array.trans.diff2.colors[array.trans.diff2 < my.seq[2]] <- trans.cols[1] + array.trans.diff2.colors[is.na(array.trans.diff2)] <- "white" + + par(mar=c(5,4,4,4.5)) + plot(1,1, type="n", xaxt="n", yaxt="n", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + title("% Bias transition from NAO- regime", cex=1.5) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + + for(p in 1:12){ + for(l in 0:6){ + polygon(c(0.5+p-1, 0.5+p-1, 1+p-1), c(-0.5+l, 0.5+l, 0+l), col=array.trans.diff2.colors[p,1+l,1]) # first triangle + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(-0.5+l, 0+l, -0.5+l), col=array.trans.diff2.colors[p,1+l,2]) + polygon(c(1+p-1, 1.5+p-1, 1.5+p-1), c(l, 0.5+l, -0.5+l), col=array.trans.diff2.colors[p,1+l,3]) + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(0.5+l, 0+l, 0.5+l), col=array.trans.diff2.colors[p,1+l,4]) + } + } + + par(fig=c(0.875, 1, 0.14, 0.89), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_bias_trans_NAO-_4tables.png"), width=750, height=600) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("% Bias transition from NAO- regime", cex=1.5) + + # NAO+ : + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.85, 0.98), new=TRUE) + mtext("NAO+", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.diff2.colors[p,1+l,1])}} + + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.42, 0.5), new=TRUE) + mtext("Blocking", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.diff2.colors[p,1+l,4])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.85, 0.98), new=TRUE) + mtext("NAO-", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.diff2.colors[p,1+l,3])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.42, 0.5), new=TRUE) + mtext("Atlantic Ridge", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.diff2.colors[p,1+l,2])}} + + par(fig=c(0.91, 1, 0.1, 0.9), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_bias_trans_NAO-_1table.png"), width=800, height=300) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("% Bias transition from NAO- regime", cex=1.2) + + par(mar=c(0,0,4,0), fig=c(0.07, 0.22, 0.8, 0.98), new=TRUE) + mtext("NAO+", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0, 0.22, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecast month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.diff2.colors[i2,1+6-j,1])}} + + par(mar=c(0,0,4,0), fig=c(0.22, 0.44, 0.8, 0.98), new=TRUE) + mtext("NAO-", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.22, 0.44, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecasted month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.diff2.colors[i2,1+6-j,3])}} + + par(mar=c(0,0,4,0), fig=c(0.44, 0.66, 0.8, 0.98), new=TRUE) + mtext("Blocking", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.44, 0.66, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecasted month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.diff2.colors[i2,1+6-j,4])}} + + par(mar=c(0,0,4,0), fig=c(0.68, 0.88, 0.8, 0.98), new=TRUE) + mtext("Atlantic Ridge", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.66, 0.88, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecasted month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.diff2.colors[i2,1+6-j,2])}} + + par(fig=c(0.91, 1, 0.1, 0.8), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + + + + + + + + + + + # Bias of the Transition probability from blocking to X summary: + png(filename=paste0(work.dir,"/",forecast.name,"_summary_bias_trans_blocking.png"), width=550, height=400) + + # create an array similar to array.cor but with colors instead of corr.values: + trans.cols <- rev(c('#b2182b','#d6604d','#f4a582','#fddbc7','#d1e5f0','#92c5de','#4393c3','#2166ac')) + my.seq <- c(-20,-10,-5,-2,0,2,5,10,20) + + array.trans.diff3.colors <- array(trans.cols[8],c(12,7,4)) + array.trans.diff3.colors[array.trans.diff3 < my.seq[8]] <- trans.cols[7] + array.trans.diff3.colors[array.trans.diff3 < my.seq[7]] <- trans.cols[6] + array.trans.diff3.colors[array.trans.diff3 < my.seq[6]] <- trans.cols[5] + array.trans.diff3.colors[array.trans.diff3 < my.seq[5]] <- trans.cols[4] + array.trans.diff3.colors[array.trans.diff3 < my.seq[4]] <- trans.cols[3] + array.trans.diff3.colors[array.trans.diff3 < my.seq[3]] <- trans.cols[2] + array.trans.diff3.colors[array.trans.diff3 < my.seq[2]] <- trans.cols[1] + array.trans.diff3.colors[is.na(array.trans.diff3)] <- "white" + + par(mar=c(5,4,4,4.5)) + plot(1,1, type="n", xaxt="n", yaxt="n", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + title("% Bias transition from blocking regime", cex=1.5) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + + for(p in 1:12){ + for(l in 0:6){ + polygon(c(0.5+p-1, 0.5+p-1, 1+p-1), c(-0.5+l, 0.5+l, 0+l), col=array.trans.diff3.colors[p,1+l,1]) # first triangle + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(-0.5+l, 0+l, -0.5+l), col=array.trans.diff3.colors[p,1+l,2]) + polygon(c(1+p-1, 1.5+p-1, 1.5+p-1), c(l, 0.5+l, -0.5+l), col=array.trans.diff3.colors[p,1+l,3]) + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(0.5+l, 0+l, 0.5+l), col=array.trans.diff3.colors[p,1+l,4]) + } + } + + par(fig=c(0.875, 1, 0.14, 0.89), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_bias_trans_blocking_4tables.png"), width=750, height=600) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("% Bias transition from blocking regime", cex=1.5) + + # NAO+ : + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.85, 0.98), new=TRUE) + mtext("NAO+", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.diff3.colors[p,1+l,1])}} + + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.42, 0.5), new=TRUE) + mtext("Blocking", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.diff3.colors[p,1+l,4])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.85, 0.98), new=TRUE) + mtext("NAO-", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.diff3.colors[p,1+l,3])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.42, 0.5), new=TRUE) + mtext("Atlantic Ridge", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.diff3.colors[p,1+l,2])}} + + par(fig=c(0.91, 1, 0.1, 0.9), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_bias_trans_blocking_1table.png"), width=800, height=300) + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("% Bias transition from Blocking regime", cex=1.2) + + par(mar=c(0,0,4,0), fig=c(0.07, 0.22, 0.8, 0.98), new=TRUE) + mtext("NAO+", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0, 0.22, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecast month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.diff3.colors[i2,1+6-j,1])}} + + par(mar=c(0,0,4,0), fig=c(0.22, 0.44, 0.8, 0.98), new=TRUE) + mtext("NAO-", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.22, 0.44, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecasted month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.diff3.colors[i2,1+6-j,3])}} + + par(mar=c(0,0,4,0), fig=c(0.44, 0.66, 0.8, 0.98), new=TRUE) + mtext("Blocking", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.44, 0.66, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecasted month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.diff3.colors[i2,1+6-j,4])}} + + par(mar=c(0,0,4,0), fig=c(0.68, 0.88, 0.8, 0.98), new=TRUE) + mtext("Atlantic Ridge", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.66, 0.88, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecasted month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.diff3.colors[i2,1+6-j,2])}} + + par(fig=c(0.91, 1, 0.1, 0.8), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + + + + + + + + + + # Bias of the Transition probability from Atlantic Ridge to X summary: + png(filename=paste0(work.dir,"/",forecast.name,"_summary_bias_trans_Atlantic_ridge.png"), width=550, height=400) + + # create an array similar to array.cor but with colors instead of corr.values: + trans.cols <- rev(c('#b2182b','#d6604d','#f4a582','#fddbc7','#d1e5f0','#92c5de','#4393c3','#2166ac')) + my.seq <- c(-20,-10,-5,-2,0,2,5,10,20) + + array.trans.diff4.colors <- array(trans.cols[8],c(12,7,4)) + array.trans.diff4.colors[array.trans.diff4 < my.seq[8]] <- trans.cols[7] + array.trans.diff4.colors[array.trans.diff4 < my.seq[7]] <- trans.cols[6] + array.trans.diff4.colors[array.trans.diff4 < my.seq[6]] <- trans.cols[5] + array.trans.diff4.colors[array.trans.diff4 < my.seq[5]] <- trans.cols[4] + array.trans.diff4.colors[array.trans.diff4 < my.seq[4]] <- trans.cols[3] + array.trans.diff4.colors[array.trans.diff4 < my.seq[3]] <- trans.cols[2] + array.trans.diff4.colors[array.trans.diff4 < my.seq[2]] <- trans.cols[1] + array.trans.diff4.colors[is.na(array.trans.diff4)] <- "white" + + par(mar=c(5,4,4,4.5)) + plot(1,1, type="n", xaxt="n", yaxt="n", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + title("% Bias transition from Atlantic Ridge regime", cex=1.5) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + + for(p in 1:12){ + for(l in 0:6){ + polygon(c(0.5+p-1, 0.5+p-1, 1+p-1), c(-0.5+l, 0.5+l, 0+l), col=array.trans.diff4.colors[p,1+l,1]) # first triangle + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(-0.5+l, 0+l, -0.5+l), col=array.trans.diff4.colors[p,1+l,2]) + polygon(c(1+p-1, 1.5+p-1, 1.5+p-1), c(l, 0.5+l, -0.5+l), col=array.trans.diff4.colors[p,1+l,3]) + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(0.5+l, 0+l, 0.5+l), col=array.trans.diff4.colors[p,1+l,4]) + } + } + + par(fig=c(0.875, 1, 0.14, 0.89), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_bias_trans_Atlantic_ridge_4tables.png"), width=750, height=600) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("% Bias transition from Atlantic_Ridge_regime", cex=1.5) + + # NAO+ : + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.85, 0.98), new=TRUE) + mtext("NAO+", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.diff4.colors[p,1+l,1])}} + + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.42, 0.5), new=TRUE) + mtext("Blocking", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.diff4.colors[p,1+l,4])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.85, 0.98), new=TRUE) + mtext("NAO-", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.diff4.colors[p,1+l,3])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.42, 0.5), new=TRUE) + mtext("Atlantic Ridge", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.diff4.colors[p,1+l,2])}} + + par(fig=c(0.91, 1, 0.1, 0.9), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_bias_trans_Atlantic_ridge_1table.png"), width=800, height=300) + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("% Bias transition from Atlantic Ridge regime", cex=1.2) + + par(mar=c(0,0,4,0), fig=c(0.07, 0.22, 0.8, 0.98), new=TRUE) + mtext("NAO+", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0, 0.22, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecast month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.diff4.colors[i2,1+6-j,1])}} + + par(mar=c(0,0,4,0), fig=c(0.22, 0.44, 0.8, 0.98), new=TRUE) + mtext("NAO-", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.22, 0.44, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecasted month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.diff4.colors[i2,1+6-j,3])}} + + par(mar=c(0,0,4,0), fig=c(0.44, 0.66, 0.8, 0.98), new=TRUE) + mtext("Blocking", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.44, 0.66, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecasted month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.diff4.colors[i2,1+6-j,4])}} + + par(mar=c(0,0,4,0), fig=c(0.68, 0.88, 0.8, 0.98), new=TRUE) + mtext("Atlantic Ridge", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.66, 0.88, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecasted month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.diff4.colors[i2,1+6-j,2])}} + + par(fig=c(0.91, 1, 0.1, 0.8), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + +diagnostic.WR <- function(text=NULL, position=c(0,1,0,1), color.array){ + +} + + ## # FairRPSS summary: + ## png(filename=paste0(work.dir,"/",forecast.name,"_summary_rpss.png"), width=550, height=400) + + ## # create an array similar to array.diff.freq but with colors instead of frequencies: + ## freq.cols <- rev(c('#b2182b','#d6604d','#f4a582','#fddbc7','#d1e5f0','#92c5de','#4393c3','#2166ac')) + + ## array.freq.colors <- array(freq.cols[8],c(12,7,4)) + ## array.freq.colors[array.diff.freq < 10] <- freq.cols[7] + ## array.freq.colors[array.diff.freq < 5] <- freq.cols[6] + ## array.freq.colors[array.diff.freq < 2] <- freq.cols[5] + ## array.freq.colors[array.diff.freq < 0] <- freq.cols[4] + ## array.freq.colors[array.diff.freq < -2] <- freq.cols[3] + ## array.freq.colors[array.diff.freq < -5] <- freq.cols[2] + ## array.freq.colors[array.diff.freq < -10] <- freq.cols[1] + + ## par(mar=c(5,4,4,4.5)) + ## plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Forecast time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + ## title("Frequency difference (%) between S4 and ERA-Interim", cex=1.5) + ## mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + ## mtext(side = 2, text = "Forecast time", line = 2.5, cex=1.5) + ## axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + ## axis(2, at=seq(0,6), las=2, cex.axis=1.5) + + ## for(p in 1:12){ + ## for(l in 0:6){ + ## polygon(c(0.5+p-1,0.5+p-1,1+p-1),c(-0.5+l,0.5+l,0+l), col=array.freq.colors[p,1+l,1]) # first triangle + ## polygon(c(0.5+p-1,1+p-1,1.5+p-1),c(-0.5+l,0+l,-0.5+l), col=array.freq.colors[p,1+l,2]) + ## polygon(c(1+p-1,1.5+p-1,1.5+p-1),c(l,0.5+l,-0.5+l), col=array.freq.colors[p,1+l,3]) + ## polygon(c(0.5+p-1,1+p-1,1.5+p-1),c(0.5+l,0+l,0.5+l), col=array.freq.colors[p,1+l,4]) + ## } + ## } + + ## par(fig=c(0.875, 1, 0.14, 0.89), new=TRUE) + ## ColorBar2(brks = c(-20,-10,-5,-2,0,2,5,10,20), cols = freq.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(c(-20,-10,-5,-2,0,2,5,10,20)), my.labels=c(-20,-10,-5,-2,0,2,5,10,20)) + ## dev.off() + +} # close if on composition == "summary" + + + + +# impact map of the stronger WR: +if(composition == "impact" && fields.name == rean.name){ + + var.num <- 2 # choose a variable (1:sfcWind, 2:tas) + + png(filename=paste0(rean.dir,"/",rean.name,"_",var.name[var.num],"_most_influent_negative.png"), width=550, height=650) + + plot.new() + #par(mfrow=c(4,3)) + #col.regimes <- c('#7b3294','#c2a5cf','#a6dba0','#008837') + col.regimes <- c('#e66101','#fdb863','#b2abd2','#5e3c99') + + for(p in 13:16){ # or 1:12 for monthly maps + load(file=paste0(rean.dir,"/",rean.name,"_",my.period[p],"_",var.name[var.num],"_psl.RData")) + load(paste0(rean.dir,"/",rean.name,"_ClusterNames.RData")) # they should not depend on var.name!!! + + # position of long values of Europe only (without the Atlantic Sea and America): + #if(fields.name == "ERA-Interim") EU <- c(1:which(lon >= lon.max)[1], (length(lon)-30):length(lon)) # restrict area to continental europe only + lon.max = 45 + EU <- c(1:which(lon >= lon.max)[1], (which(lon >= 337)[1]:length(lon))) # restrict area to continental europe only + + cluster1 <- which(orden == cluster1.name.period[p]) # in future it will be == cluster1.name + cluster2 <- which(orden == cluster2.name.period[p]) + cluster3 <- which(orden == cluster3.name.period[p]) + cluster4 <- which(orden == cluster4.name.period[p]) + + assign(paste0("imp",cluster1), varPeriodAnom1mean) + assign(paste0("imp",cluster2), varPeriodAnom2mean) + assign(paste0("imp",cluster3), varPeriodAnom3mean) + assign(paste0("imp",cluster4), varPeriodAnom4mean) + + ## imp.all <- imp1 + ## for(i in 1:dim(imp1)[1]){ + ## for(j in 1:dim(imp1)[2]){ + ## if(abs(imp1[i,j]) >= max(abs(imp1[i,j]), abs(imp2[i,j]), abs(imp3[i,j]), abs(imp4[i,j]))) imp.all[i,j] <- 1.5 + ## if(abs(imp2[i,j]) >= max(abs(imp1[i,j]), abs(imp2[i,j]), abs(imp3[i,j]), abs(imp4[i,j]))) imp.all[i,j] <- 2.5 + ## if(abs(imp3[i,j]) >= max(abs(imp1[i,j]), abs(imp2[i,j]), abs(imp3[i,j]), abs(imp4[i,j]))) imp.all[i,j] <- 3.5 + ## if(abs(imp4[i,j]) >= max(abs(imp1[i,j]), abs(imp2[i,j]), abs(imp3[i,j]), abs(imp4[i,j]))) imp.all[i,j] <- 4.5 + ## } + ## } + + ## #most influent WT with positive impact: + ## imp.all <- imp1 + ## for(i in 1:dim(imp1)[1]){ + ## for(j in 1:dim(imp1)[2]){ + ## if((imp1[i,j]) >= max((imp1[i,j]), (imp2[i,j]), (imp3[i,j]), (imp4[i,j]))) imp.all[i,j] <- 1.5 + ## if((imp2[i,j]) >= max((imp1[i,j]), (imp2[i,j]), (imp3[i,j]), (imp4[i,j]))) imp.all[i,j] <- 2.5 + ## if((imp3[i,j]) >= max((imp1[i,j]), (imp2[i,j]), (imp3[i,j]), (imp4[i,j]))) imp.all[i,j] <- 3.5 + ## if((imp4[i,j]) >= max((imp1[i,j]), (imp2[i,j]), (imp3[i,j]), (imp4[i,j]))) imp.all[i,j] <- 4.5 + ## } + ## } + + # most influent WT with negative impact: + imp.all <- imp1 + for(i in 1:dim(imp1)[1]){ + for(j in 1:dim(imp1)[2]){ + if((imp1[i,j]) <= min((imp1[i,j]), (imp2[i,j]), (imp3[i,j]), (imp4[i,j]))) imp.all[i,j] <- 1.5 + if((imp2[i,j]) <= min((imp1[i,j]), (imp2[i,j]), (imp3[i,j]), (imp4[i,j]))) imp.all[i,j] <- 2.5 + if((imp3[i,j]) <= min((imp1[i,j]), (imp2[i,j]), (imp3[i,j]), (imp4[i,j]))) imp.all[i,j] <- 3.5 + if((imp4[i,j]) <= min((imp1[i,j]), (imp2[i,j]), (imp3[i,j]), (imp4[i,j]))) imp.all[i,j] <- 4.5 + } + } + + if(p<=3) yMod <- 0.7 + if(p>=4 && p<=6) yMod <- 0.5 + if(p>=7 && p<=9) yMod <- 0.3 + if(p>=10) yMod <- 0.1 + if(p==13 || p==14) yMod <- 0.52 + if(p==15 || p==16) yMod <- 0.1 + + if(p==1 || p==4 || p==7 || p==10) xMod <- 0.05 + if(p==2 || p==5 || p==8 || p==11) xMod <- 0.35 + if(p==3 || p==6 || p==9 || p==12) xMod <- 0.65 + if(p==13 || p==15) xMod <- 0.05 + if(p==14 || p==16) xMod <- 0.50 + + # impact map: + if(p <= 12) par(fig=c(xMod, xMod + 0.3, yMod, yMod + 0.17), new=TRUE) + if(p > 12) par(fig=c(xMod, xMod + 0.45, yMod, yMod + 0.38), new=TRUE) + PlotEquiMap(imp.all[,EU], lon[EU], lat, filled.continents = FALSE, brks=1:5, cols=col.regimes, axelab=F, drawleg=F) + + # title map: + if(p <= 12) par(fig=c(xMod, xMod + 0.3, yMod + 0.162, yMod + 0.172), new=TRUE) + if(p > 12) par(fig=c(xMod + 0.07, xMod + 0.07 + 0.3, yMod +0.21 + 0.166, yMod +0.21 + 0.176), new=TRUE) + mtext(my.period[p], font=2, cex=1.5) + + } # close 'p' on 'period' + + par(fig=c(0.1,0.9, 0.03, 0.11), new=TRUE) + ColorBar2(1:5, cols=col.regimes, vert=FALSE, my.ticks=1:4, draw.ticks=FALSE, my.labels=orden) + + par(fig=c(0.1,0.9, 0.92, 0.97), new=TRUE) + mtext(paste0("Most influent WR on ",var.name[var.num]), font=2, cex=1.5) + + dev.off() + +} # close if on composition == "impact" + + + + + + + + + + + + + + + + + + diff --git a/old/weather_regimes_maps_v18.R~ b/old/weather_regimes_maps_v18.R~ new file mode 100644 index 0000000000000000000000000000000000000000..7254af8b9e9179ac17afb4484d0137742e21629b --- /dev/null +++ b/old/weather_regimes_maps_v18.R~ @@ -0,0 +1,3876 @@ + +# Creation: 6/2016 +# Author: Nicola Cortesi +# +# Aim: to visualize the regime anomalies of the weather regimes, their interannual frequencies and their impact on a chosen cliamte variable (i.e: wind speed or temperature) +# +# I/O: input file needed are: +# 1) the output of the weather_regimes.R script, which ends with the suffix "_psl.RData". +# 2) if you need the impact map too, the outputs of the weather_regimes_impact.R script, which end wuth the suffix "_sfcWind.RData" and "_tas.RData" +# Output files are the maps, witch the user can generate as .png or as .pdf +# Due to the script's length, it is better to run it from the terminal with: Rscript ~/Downloads/scripts/weather_regimes_maps_v18.R (for example) +# This script doesn't need to be parallelized because it takes only a few seconds to create and save the output maps. +# +# Assumptions: If your regimes derive from a reanalysis, this script must be run twice: once, with the option 'ordering = F' and 'save.names = T' to create a .pdf or .png +# file that the user must open to find visually the order by which the four regimes are stored inside the four clusters. For example, he can find that the +# sequence of the images in the file (from top to down) corresponds to: Blocking / NAO- / Atl.Ridge / NAO+ regime. Subsequently, he has to change 'ordering' +# from F to T and set the four variables 'clusterX.name' (X=1..4) to follow the same order found inside that file. +# The second time, you have to run this script only to get the maps in the right order, which by default is, from top to down: +# "NAO+","NAO-","Blocking" and "Atl.Ridge" (you can change it with the variable 'orden'). You also have to set 'save.names' to TRUE, so an .RData file +# is written to store the order of the four regimes, in case in future a user needs to visualize these maps again. In this case, the user must set +# 'ordering = TRUE' and 'save.names = FALSE' to be able to visualize the maps in the correct order. +# +# If your regimes derive from a forecast system, you have to compute before the regimes derived from a reanalysis. Then, you can associate the reanalysis +# to the forecast system, to employ it to automatically aussociate to each simulated cluster one of the +# observed regimes, the one with the highest spatial correlation. Beware that the two maps of regime anomalies must have the same spatial resolution! +# +# Branch: weather_regimes + +rm(list=ls()) +library(s2dverification) # for the function Load() +library(Kendall) # for the MannKendall test +source('/home/Earth/ncortesi/Downloads/scripts/Rfunctions.R') # for the calendar functions + +### set the directory where the weather regimes computed with a reanalysis are stored (xxxxx_psl.RData files): + +#rean.dir <- "/scratch/Earth/ncortesi/RESILIENCE/Regimes/18) as 12) but with no lat correction" +#rean.dir <- "/scratch/Earth/ncortesi/RESILIENCE/Regimes/18_as_12_but_with_no_lat_correction" +#rean.dir <- "/scratch/Earth/ncortesi/RESILIENCE/Regimes/41_ERA-Interim_monthly_1981-2015_LOESS_filter" +#rean.dir <- "/scratch/Earth/ncortesi/RESILIENCE/Regimes/43_as_41_but_with_running_cluster_and_lat_correction" +rean.dir <- "/scratch/Earth/ncortesi/RESILIENCE/Regimes/44_as_43_but_with_no_lat_correction" +#rean.dir <- "/scratch/Earth/ncortesi/RESILIENCE/Regimes/45_as_43_but_with_Z500" + +rean.name <- "ERA-Interim" # reanalysis name (if input data comes from a reanalysis) +forecast.name <- "ECMWF-S4" # forecast system name (if input data comes from a forecast system) + +# Choose between loading reanalysis ('rean.name') or forecasts ('forecast.name'): +fields.name <- forecast.name #rean.name #forecast.name + +# Choose one or more periods to plot from 1 to 17 (1-12: Jan - Dec, 13-16: Winter, Spring, Summer, Autumn, 17: Year). +period <- 1:12 # You need to have created before the '_psl.RData' file with the output of 'weather_regimes_vXX'.R for that period. + +# run it twice: once, with: 'ordering <- FALSE', 'save.names <- TRUE', 'as.pdf <- TRUE' and 'composition <- "simple" ' +# to look at the cartography and find visually the right regime names of the four clusters; then, insert the regime names in the correct order below and run this script +# a second time one month/season at time with: 'ordering = TRUE', 'save.names = TRUE', 'as.pdf = FALSE' and 'composition = "simple" ' +# to save the ordered cartography (then, you can check the correct regime sequence in the .png maps). After that, any time you run this script, remember to set: +# 'ordering = TRUE , 'save.names = FALSE' (and any type of composition you want to plot) not to overwrite the files which stores the right cluster order. + +ordering <- T # If TRUE, order the clusters following the regimes listed in the 'orden' vector (set it to TRUE only after you manually inserted the names below). +save.names <- F # if TRUE, also save the cluster names (i.e: the association between clusters and weather regimes); if FALSE, the cluster names are loaded from a file +as.pdf <- F # FALSE: save results as one .png file for each season/month, TRUE: save only one .pdf file with all seasons/months + +composition <- "simple" # choose which kind of composition you want to plot: + # - 'simple' for monthly graphs of regime anomalies / impact / interannual frequencies in three columns + # - 'psl' for all the regime anomalies for a fixed forecast month + # - 'fre' for all the interannual frequencies for a fixed forecast month + # - 'summary' for the summary plots of all forecast months (persistence bias, freq.bias, correlations, etc.) [You need all ClusterNames files] + # - 'impact' for all the impact plot of the regime with the highest impact + # - 'single.impact' to save the individual impact maps + # - 'single.psl' to save the individual psl map + # - 'single.fre' to save the individual fre map + # - 'none' doesn't plot anything, it only saves the ClusterName.Rdata files + # - 'taylor' plot the taylor's diagram between the regime anomalies of a monthly reanalaysis and a seasonal one + +var.num <- 1 # if fields.name ='rean.name' and composition ='simple', Choose a variable for the impact maps 1: sfcWind 2: tas + +mean.freq <- FALSE # if TRUE, when composition <- "simple", it also add the mean frequency value over each frequency plots + +# Associates the regimes to the four cluster: +cluster2.name <- "NAO+" +cluster4.name <- "NAO-" +cluster3.name <- "Blocking" +cluster1.name <- "Atl.Ridge" + +# choose an order to give to the cartograpy, from top to bottom: +orden <- c("NAO+","NAO-","Blocking","Atl.Ridge") + + +####### if fields.name='forecast.name', you have to specify these additional variables: + +# working dir with the input files with '_psl.RData' suffix: +work.dir <- "/scratch/Earth/ncortesi/RESILIENCE/Regimes/42_ECMWF_S4_monthly_1981-2015_LOESS_filter" + +lead.months <- 0:6 # in case you selected 'fields.name = forecast.name', specify a leadtime (0 = same month of the start month) to plot + # (and variable 'period' becomes the start month) +forecast.month <- 1 # in case composition = 'psl' or 'fre' or 'impact', choose a target month to forecast, from 1 to 12, to plot its composition + +################################################################################################################################################################### + +if(fields.name != rean.name && fields.name != forecast.name) stop("variable 'fields.name' is not properly set") +regime1.name <- orden[1] +regime2.name <- orden[2] +regime3.name <- orden[3] +regime4.name <- orden[4] + +var.name <- c("sfcWind","tas") +var.name.full <- c("Wind Speed","Temperature") # full name of the 'predictand' variable to put in the title of the graphs +var.unit <- c("m/s", "ºC") # unit of measure of var (for drawing color scales) +var.num.orig <- var.num # loading _psl files, this value can change! +fields.name.orig <- fields.name +period.orig <- period +work.dir.orig <- work.dir +pdf.suffix <- ifelse(length(period)==1, my.period[period], "all_periods") +n.map <- 0 + +if(composition != "summary" && composition != "impact" && composition != "taylor"){ + + if(composition == "psl" || composition == "fre") { + if(as.pdf && fields.name == forecast.name) pdf(file=paste0(work.dir,"/",fields.name,"_",pdf.suffix,"_forecast_month_",forecast.month,"_",composition,".pdf"),width=110,height=60) + if(!as.pdf && fields.name == forecast.name) png(filename=paste0(work.dir,"/",fields.name,"_forecast_month_",forecast.month,"_",composition,".png"),width=6000,height=2300) + + lead.months <- c(6:0,0) + plot.new() + + # Sheet title: + par(fig=c(0, 1, 0.94, 0.98), new=TRUE) + if(composition == "psl") mtext(paste0(fields.name, " simulated Weather Regimes for ", month.name[forecast.month]), cex=5.5, font=2) + if(composition == "fre") mtext(paste0(fields.name, " simulated interannual frequencies for ", month.name[forecast.month]), cex=5.5, font=2) + } + + if(fields.name == rean.name) lead.months <- 1 # just to be able to enter the loop below once! + + for(lead.month in lead.months){ + #lead.month=lead.months[1] # for the debug + + if(composition == "simple"){ + if(as.pdf && fields.name == rean.name) pdf(file=paste0(rean.dir,"/",fields.name,"_",var.name[var.num],"_",pdf.suffix,".pdf"),width=40, height=60) + if(as.pdf && fields.name == forecast.name) pdf(file=paste0(work.dir,"/",fields.name,"_",var.name[var.num],"_",pdf.suffix,"_leadmonth_",lead.month,".pdf"),width=40,height=60) + } + + if(composition == 'psl' || composition == 'fre') { + period <- forecast.month - lead.month + if(period < 1) period <- period + 12 + } + + for(p in period){ + #p <- period[1] # for the debug + p.orig <- p + + # load regime data (for forecasts, it load only the data corresponding to the chosen start month p and lead month 'lead.month':: + if(fields.name == rean.name || (fields.name == forecast.name && lead.month == 0 || n.map == 7)) { + load(file=paste0(rean.dir,"/",rean.name,"_",my.period[p],"_","psl",".RData")) + if(var.num != var.num.orig) var.num <- var.num.orig # ripristinate original values of this script (necessary after loading regime data) + if(fields.name != fields.name.orig) fields.name <- fields.name.orig # ripristinate original values of this script + if(work.dir != work.dir.orig) work.dir <- work.dir.orig # ripristinate original values of this script + + # load impact data only if it is available, if not it will leave an empty space in the composition: + if(file.exists(paste0(rean.dir,"/",rean.name,"_",my.period[p],"_",var.name[var.num],".RData"))){ + load(file=paste0(rean.dir,"/",rean.name,"_",my.period[p],"_",var.name[var.num],".RData")) + print("Impact data available") + impact.data <- TRUE + } else { + print("Impact data not available; 'simple' compositions will have an empty space in the middle column") + impact.data <- FALSE + } + } + + if(fields.name == forecast.name && n.map < 7){ + load(file=paste0(work.dir,"/",fields.name,"_",my.period[p],"_leadmonth_",lead.month,"_","psl",".RData")) # load cluster time series and mean psl data + if(impact.data == TRUE && (composition == simple || composition == "impact")) load(file=paste0(work.dir,"/",fields.name,"_",my.period[p],"_leadmonth_",lead.month,"_",var.name[var.num],".RData")) # load mean var data # for impact maps + + if(var.num != var.num.orig) var.num <- var.num.orig # ripristinate original values of this script (necessary after loading regime data) + if(fields.name != fields.name.orig) fields.name <- fields.name.orig # ripristinate original values of this script + + } + + if(var.num != var.num.orig) var.num <- var.num.orig # ripristinate original values of this script (necessary after loading regime data) + if(fields.name != fields.name.orig) fields.name <- fields.name.orig # ripristinate original values of this script + if(work.dir != work.dir.orig) work.dir <- work.dir.orig # ripristinate original values of this script + if(p != p.orig) p <- p.orig + + if(fields.name == forecast.name && n.map < 7){ + + # compute the simulated interannual monthly variability: + n.days.month <- dim(my.cluster.array[[p]])[1] # number of days in the forecasted month + + freq.sim1 <- apply(my.cluster.array[[p]] == 1, c(2,3), sum) + freq.sim2 <- apply(my.cluster.array[[p]] == 2, c(2,3), sum) + freq.sim3 <- apply(my.cluster.array[[p]] == 3, c(2,3), sum) + freq.sim4 <- apply(my.cluster.array[[p]] == 4, c(2,3), sum) + + sd.freq.sim1 <- sd(freq.sim1) / n.days.month + sd.freq.sim2 <- sd(freq.sim2) / n.days.month + sd.freq.sim3 <- sd(freq.sim3) / n.days.month + sd.freq.sim4 <- sd(freq.sim4) / n.days.month + + # compute the transition probabilities: + j1 <- j2 <- j3 <- j4 <- 1; transition1 <- transition2 <- transition3 <- transition4 <- c() + + n.members <- dim(my.cluster.array[[p]])[3] + + for(m in 1:n.members){ + for(y in 1:n.years){ + for (d in 1:(n.days.month-1)){ + + if (my.cluster.array[[p]][d,y,m] == 1 && (my.cluster.array[[p]][d + 1,y,m] != my.cluster.array[[p]][d,y,m])){ + transition1[j1] <- my.cluster.array[[p]][d + 1,y,m] + j1 <- j1 + 1 + } + if (my.cluster.array[[p]][d,y,m] == 2 && (my.cluster.array[[p]][d + 1,y,m] != my.cluster.array[[p]][d,y,m])){ + transition2[j2] <- my.cluster.array[[p]][d + 1,y,m] + j2 <- j2 + 1 + } + if (my.cluster.array[[p]][d,y,m] == 3 && (my.cluster.array[[p]][d + 1,y,m] != my.cluster.array[[p]][d,y,m])){ + transition3[j3] <- my.cluster.array[[p]][d + 1,y,m] + j3 <- j3 +1 + } + if (my.cluster.array[[p]][d,y,m] == 4 && (my.cluster.array[[p]][d + 1,y,m] != my.cluster.array[[p]][d,y,m])){ + transition4[j4] <- my.cluster.array[[p]][d + 1,y,m] + j4 <- j4 +1 + } + + } + } + } + + trans.sim <- matrix(NA,4,4 , dimnames= list(c("cluster1.sim", "cluster2.sim", "cluster3.sim", "cluster4.sim"),c("cluster1.sim","cluster2.sim","cluster3.sim","cluster4.sim"))) + trans.sim[1,2] <- length(which(transition1 == 2)) + trans.sim[1,3] <- length(which(transition1 == 3)) + trans.sim[1,4] <- length(which(transition1 == 4)) + + trans.sim[2,1] <- length(which(transition2 == 1)) + trans.sim[2,3] <- length(which(transition2 == 3)) + trans.sim[2,4] <- length(which(transition2 == 4)) + + trans.sim[3,1] <- length(which(transition3 == 1)) + trans.sim[3,2] <- length(which(transition3 == 2)) + trans.sim[3,4] <- length(which(transition3 == 4)) + + trans.sim[4,1] <- length(which(transition4 == 1)) + trans.sim[4,2] <- length(which(transition4 == 2)) + trans.sim[4,3] <- length(which(transition4 == 3)) + + trans.tot <- apply(trans.sim,1,sum,na.rm=T) + for(i in 1:4) trans.sim[i,] <- trans.sim[i,]/trans.tot[i] + + + # compute cluster persistence: + my.cluster.vector <- as.vector(my.cluster.array[[p]]) # reduce it to a vector with the order: day1month1member1 , day2month1member1, ... , day1month2member1, day2month2member1, ,,, + + sequ1 <- sequ2 <- sequ3 <- sequ4 <- rep(0,10000) + i1 <- i2 <- i3 <- i4 <- 1 + + if(my.cluster.vector[1] != 1) i1 <- 0 + if(my.cluster.vector[1] == 1) sequ1[i1] <- sequ1[i1]+1 + if(my.cluster.vector[1] != 2) i2 <- 0 + if(my.cluster.vector[1] == 2) sequ2[i2] <- sequ2[i2]+1 + if(my.cluster.vector[1] != 3) i3 <- 0 + if(my.cluster.vector[1] == 3) sequ3[i3] <- sequ3[i3]+1 + if(my.cluster.vector[1] != 4) i4 <- 0 + if(my.cluster.vector[1] == 4) sequ4[i4] <- sequ4[i4]+1 + n.dd <- length(my.cluster.vector) + + for(d in 1:(n.dd-1)){ + if(my.cluster.vector[d] != 1 && my.cluster.vector[d+1] == 1) i1 <- i1+1 + if(my.cluster.vector[d] == 1) sequ1[i1] <- sequ1[i1]+1 + + if(my.cluster.vector[d] != 2 && my.cluster.vector[d+1] == 2) i2 <- i2+1 + if(my.cluster.vector[d] == 2) sequ2[i2] <- sequ2[i2]+1 + + if(my.cluster.vector[d] != 3 && my.cluster.vector[d+1] == 3) i3 <- i3+1 + if(my.cluster.vector[d] == 3) sequ3[i3] <- sequ3[i3]+1 + + if(my.cluster.vector[d] != 4 && my.cluster.vector[d+1] == 4) i4 <- i4+1 + if(my.cluster.vector[d] == 4) sequ4[i4] <- sequ4[i4]+1 + } + + if(my.cluster.vector[n.dd] == 1) sequ1[i1] <- sequ1[i1]+1 + if(my.cluster.vector[n.dd] == 2) sequ2[i2] <- sequ2[i2]+1 + if(my.cluster.vector[n.dd] == 3) sequ3[i3] <- sequ3[i3]+1 + if(my.cluster.vector[n.dd] == 4) sequ4[i4] <- sequ4[i4]+1 + + pers1 <- mean(sequ1[which(sequ1 != 0)]) + pers2 <- mean(sequ2[which(sequ2 != 0)]) + pers3 <- mean(sequ3[which(sequ3 != 0)]) + pers4 <- mean(sequ4[which(sequ4 != 0)]) + + # compute correlations between simulated clusters and observed regime's anomalies: + cluster1.sim <- pslwr1mean; cluster2.sim <- pslwr2mean; cluster3.sim <- pslwr3mean; cluster4.sim <- pslwr4mean + lat.forecast <- lat; lon.forecast <- lon + + if((p + lead.month) > 12) { load.month <- p + lead.month - 12 } else { load.month <- p + lead.month } # reanalysis forecast month to load + load(file=paste0(rean.dir,"/",rean.name,"_",my.period[load.month],"_","psl",".RData")) # load reanalysis data, which overwrities the pslwrXmean variables + load(paste0(rean.dir,"/",rean.name,"_", my.period[load.month],"_","ClusterNames",".RData")) # load also reanalysis regime names + + if(var.num != var.num.orig) var.num <- var.num.orig # ripristinate original values of this script + if(fields.name != fields.name.orig) fields.name <- fields.name.orig # ripristinate original values of this script + if(!identical(period.orig,period)) period <- period.orig + if(work.dir != work.dir.orig) work.dir <- work.dir.orig # ripristinate original values of this script + if(p.orig != p) p <- p.orig + + # compute the observed transition probabilities: + n.days.month <- n.days.in.a.period(load.month,2001) + j1o <- j2o <- j3o <- j4o <- 1; transition.obs1 <- transition.obs2 <- transition.obs3 <- transition.obs4 <- c() + + for (d in 1:(length(my.cluster[[load.month]]$cluster)-1)){ + if (my.cluster[[load.month]]$cluster[d] == 1 && (my.cluster[[load.month]]$cluster[d + 1] != my.cluster[[load.month]]$cluster[d])){ + transition.obs1[j1o] <- my.cluster[[load.month]]$cluster[d + 1] + j1o <- j1o + 1 + } + if (my.cluster[[load.month]]$cluster[d] == 2 && (my.cluster[[load.month]]$cluster[d + 1] != my.cluster[[load.month]]$cluster[d])){ + transition.obs2[j2o] <- my.cluster[[load.month]]$cluster[d + 1] + j2o <- j2o + 1 + } + if (my.cluster[[load.month]]$cluster[d] == 3 && (my.cluster[[load.month]]$cluster[d + 1] != my.cluster[[load.month]]$cluster[d])){ + transition.obs3[j3o] <- my.cluster[[load.month]]$cluster[d + 1] + j3o <- j3o + 1 + } + if (my.cluster[[load.month]]$cluster[d] == 4 && (my.cluster[[load.month]]$cluster[d + 1] != my.cluster[[load.month]]$cluster[d])){ + transition.obs4[j4o] <- my.cluster[[load.month]]$cluster[d + 1] + j4o <- j4o +1 + } + + } + + trans.obs <- matrix(NA,4,4 , dimnames= list(c("cluster1.obs", "cluster2.obs", "cluster3.obs", "cluster4.obs"),c("cluster1.obs","cluster2.obs","cluster3.obs","cluster4.obs"))) + trans.obs[1,2] <- length(which(transition.obs1 == 2)) + trans.obs[1,3] <- length(which(transition.obs1 == 3)) + trans.obs[1,4] <- length(which(transition.obs1 == 4)) + + trans.obs[2,1] <- length(which(transition.obs2 == 1)) + trans.obs[2,3] <- length(which(transition.obs2 == 3)) + trans.obs[2,4] <- length(which(transition.obs2 == 4)) + + trans.obs[3,1] <- length(which(transition.obs3 == 1)) + trans.obs[3,2] <- length(which(transition.obs3 == 2)) + trans.obs[3,4] <- length(which(transition.obs3 == 4)) + + trans.obs[4,1] <- length(which(transition.obs4 == 1)) + trans.obs[4,2] <- length(which(transition.obs4 == 2)) + trans.obs[4,3] <- length(which(transition.obs4 == 3)) + + trans.tot.obs <- apply(trans.obs,1,sum,na.rm=T) + for(i in 1:4) trans.obs[i,] <- trans.obs[i,]/trans.tot.obs[i] + + # compute the observed interannual monthly variability: + freq.obs1 <- freq.obs2 <- freq.obs3 <- freq.obs4 <- c() + + for(y in year.start:year.end){ + # for both reanalysis and S4, the time order is always: year1day1, year1day2, ..., year1day31, year2day1, year2day2, ..., year2day28, etc, + freq.obs1[y-year.start+1] <- length(which(my.cluster[[load.month]]$cluster[(y-year.start)*n.days.month + (1:n.days.month)] == 1)) + freq.obs2[y-year.start+1] <- length(which(my.cluster[[load.month]]$cluster[(y-year.start)*n.days.month + (1:n.days.month)] == 2)) + freq.obs3[y-year.start+1] <- length(which(my.cluster[[load.month]]$cluster[(y-year.start)*n.days.month + (1:n.days.month)] == 3)) + freq.obs4[y-year.start+1] <- length(which(my.cluster[[load.month]]$cluster[(y-year.start)*n.days.month + (1:n.days.month)] == 4)) + } + + sd.freq.obs1 <- sd(freq.obs1) / n.days.month + sd.freq.obs2 <- sd(freq.obs2) / n.days.month + sd.freq.obs3 <- sd(freq.obs3) / n.days.month + sd.freq.obs4 <- sd(freq.obs4) / n.days.month + + wr1y.obs <- wr1y; wr2y.obs <- wr2y; wr3y.obs <- wr3y; wr4y.obs <- wr4y # observed frecuencies of the regimes + + regimes.obs <- c(cluster1.name, cluster2.name, cluster3.name, cluster4.name) + + if(length(unique(regimes.obs)) < 4) stop("Two or more names of the weather types in the reanalsyis input file are repeated!") + + if(identical(rev(lat),lat.forecast)) {lat.forecast <- rev(lat.forecast); print("Warning: forecast latitudes are in the opposite order than renalysis's")} + if(!identical(lat, lat.forecast)) stop("forecast latitudes are different from reanalysis latitudes!") + if(!identical(lon, lon.forecast)) stop("forecast longitude are different from reanalysis longitudes!") + + cluster.corr <- array(NA, c(4,4), dimnames=list(c("cluster1.sim","cluster2.sim","cluster3.sim","cluster4.sim"), regimes.obs)) + cluster.corr[1,1] <- cor(as.vector(cluster1.sim), as.vector(pslwr1mean)) + cluster.corr[1,2] <- cor(as.vector(cluster1.sim), as.vector(pslwr2mean)) + cluster.corr[1,3] <- cor(as.vector(cluster1.sim), as.vector(pslwr3mean)) + cluster.corr[1,4] <- cor(as.vector(cluster1.sim), as.vector(pslwr4mean)) + cluster.corr[2,1] <- cor(as.vector(cluster2.sim), as.vector(pslwr1mean)) + cluster.corr[2,2] <- cor(as.vector(cluster2.sim), as.vector(pslwr2mean)) + cluster.corr[2,3] <- cor(as.vector(cluster2.sim), as.vector(pslwr3mean)) + cluster.corr[2,4] <- cor(as.vector(cluster2.sim), as.vector(pslwr4mean)) + cluster.corr[3,1] <- cor(as.vector(cluster3.sim), as.vector(pslwr1mean)) + cluster.corr[3,2] <- cor(as.vector(cluster3.sim), as.vector(pslwr2mean)) + cluster.corr[3,3] <- cor(as.vector(cluster3.sim), as.vector(pslwr3mean)) + cluster.corr[3,4] <- cor(as.vector(cluster3.sim), as.vector(pslwr4mean)) + cluster.corr[4,1] <- cor(as.vector(cluster4.sim), as.vector(pslwr1mean)) + cluster.corr[4,2] <- cor(as.vector(cluster4.sim), as.vector(pslwr2mean)) + cluster.corr[4,3] <- cor(as.vector(cluster4.sim), as.vector(pslwr3mean)) + cluster.corr[4,4] <- cor(as.vector(cluster4.sim), as.vector(pslwr4mean)) + + # associate each simulated cluster to one observed regime: + max1 <- which(cluster.corr == max(cluster.corr), arr.ind=T) + cluster.corr2 <- cluster.corr + cluster.corr2[max1[1],] <- -999 # set this row to negative values so it won't be found as a maximum anymore + cluster.corr2[,max1[2]] <- -999 # set this column to negative values so it won't be found as a maximum anymore + max2 <- which(cluster.corr2 == max(cluster.corr2), arr.ind=T) + cluster.corr3 <- cluster.corr2 + cluster.corr3[max2[1],] <- -999 + cluster.corr3[,max2[2]] <- -999 + max3 <- which(cluster.corr3 == max(cluster.corr3), arr.ind=T) + cluster.corr4 <- cluster.corr3 + cluster.corr4[max3[1],] <- -999 + cluster.corr4[,max3[2]] <- -999 + max4 <- which(cluster.corr4 == max(cluster.corr4), arr.ind=T) + + seq.max1 <- c(max1[1],max2[1],max3[1],max4[1]) + seq.max2 <- c(max1[2],max2[2],max3[2],max4[2]) + + cluster1.regime <- seq.max2[which(seq.max1 == 1)] + cluster2.regime <- seq.max2[which(seq.max1 == 2)] + cluster3.regime <- seq.max2[which(seq.max1 == 3)] + cluster4.regime <- seq.max2[which(seq.max1 == 4)] + + cluster1.name <- regimes.obs[cluster1.regime] + cluster2.name <- regimes.obs[cluster2.regime] + cluster3.name <- regimes.obs[cluster3.regime] + cluster4.name <- regimes.obs[cluster4.regime] + + regimes.sim <- c(cluster1.name, cluster2.name, cluster3.name, cluster4.name) + cluster.corr2 <- array(cluster.corr, c(4,4), dimnames=list(regimes.sim, regimes.obs)) + + spat.cor1 <- cluster.corr[1,seq.max2[which(seq.max1 == 1)]] + spat.cor2 <- cluster.corr[2,seq.max2[which(seq.max1 == 2)]] + spat.cor3 <- cluster.corr[3,seq.max2[which(seq.max1 == 3)]] + spat.cor4 <- cluster.corr[4,seq.max2[which(seq.max1 == 4)]] + + # measure persistence for the reanalysis dataset: + my.cluster.vector <- my.cluster[[load.month]][[1]] + + sequ1 <- sequ2 <- sequ3 <- sequ4 <- rep(0,10000) + i1 <- i2 <- i3 <- i4 <- 1 + + if(my.cluster.vector[1] != 1) i1 <- 0 + if(my.cluster.vector[1] == 1) sequ1[i1] <- sequ1[i1]+1 + if(my.cluster.vector[1] != 2) i2 <- 0 + if(my.cluster.vector[1] == 2) sequ2[i2] <- sequ2[i2]+1 + if(my.cluster.vector[1] != 3) i3 <- 0 + if(my.cluster.vector[1] == 3) sequ3[i3] <- sequ3[i3]+1 + if(my.cluster.vector[1] != 4) i4 <- 0 + if(my.cluster.vector[1] == 4) sequ4[i4] <- sequ4[i4]+1 + + n.dd <- length(my.cluster.vector) + for(d in 1:(n.dd-1)){ + if(my.cluster.vector[d] != 1 && my.cluster.vector[d+1] == 1) i1 <- i1+1 + if(my.cluster.vector[d] == 1) sequ1[i1] <- sequ1[i1]+1 + + if(my.cluster.vector[d] != 2 && my.cluster.vector[d+1] == 2) i2 <- i2+1 + if(my.cluster.vector[d] == 2) sequ2[i2] <- sequ2[i2]+1 + + if(my.cluster.vector[d] != 3 && my.cluster.vector[d+1] == 3) i3 <- i3+1 + if(my.cluster.vector[d] == 3) sequ3[i3] <- sequ3[i3]+1 + + if(my.cluster.vector[d] != 4 && my.cluster.vector[d+1] == 4) i4 <- i4+1 + if(my.cluster.vector[d] == 4) sequ4[i4] <- sequ4[i4]+1 + } + + if(my.cluster.vector[n.dd] == 1) sequ1[i1] <- sequ1[i1]+1 + if(my.cluster.vector[n.dd] == 2) sequ2[i2] <- sequ2[i2]+1 + if(my.cluster.vector[n.dd] == 3) sequ3[i3] <- sequ3[i3]+1 + if(my.cluster.vector[n.dd] == 4) sequ4[i4] <- sequ4[i4]+1 + + persObs1 <- mean(sequ1[which(sequ1 != 0)]) + persObs2 <- mean(sequ2[which(sequ2 != 0)]) + persObs3 <- mean(sequ3[which(sequ3 != 0)]) + persObs4 <- mean(sequ4[which(sequ4 != 0)]) + + # insert here all the manual corrections to the regime assignation due to misasignment in the automatic selection: + # forecast month: September + if(p == 9 && lead.month == 0) { cluster1.name <- "Blocking"; cluster2.name <- "Atl.Ridge"; cluster3.name <- "NAO-"; cluster4.name <- "NAO+"} + if(p == 8 && lead.month == 1) { cluster1.name <- "NAO+"; cluster2.name <- "NAO-"; cluster3.name <- "Blocking"; cluster4.name <- "Atl.Ridge"} + if(p == 6 && lead.month == 3) { cluster1.name <- "Atl.Ridge"; cluster2.name <- "NAO-"} + if(p == 4 && lead.month == 5) { cluster1.name <- "Blocking"; cluster2.name <- "NAO+"; cluster3.name <- "Atl.Ridge"; cluster4.name <- "NAO-"} + + # forecast month: October + if(p == 4 && lead.month == 6) { cluster1.name <- "Atl.Ridge"; cluster2.name <- "Blocking"; cluster3.name <- "NAO+"; cluster4.name <- "NAO-"} + if(p == 5 && lead.month == 5) { cluster1.name <- "NAO+"; cluster2.name <- "Blocking"; cluster3.name <- "NAO-"; cluster4.name <- "Atl.Ridge"} + if(p == 7 && lead.month == 3) { cluster1.name <- "NAO-"; cluster2.name <- "Atl.Ridge"; cluster3.name <- "Blocking"; cluster4.name <- "NAO+"} + if(p == 8 && lead.month == 2) { cluster1.name <- "Blocking"; cluster2.name <- "NAO-"; cluster3.name <- "Atl.Ridge"; cluster4.name <- "NAO+"} + if(p == 9 && lead.month == 1) { cluster1.name <- "NAO-"; cluster2.name <- "Blocking"; cluster3.name <- "Atl.Ridge"; cluster4.name <- "NAO+"} + + # forecast month: November + if(p == 6 && lead.month == 5) { cluster2.name <- "Atl.Ridge"; cluster3.name <- "NAO+"; cluster4.name <- "Blocking"} + if(p == 7 && lead.month == 4) { cluster1.name <- "NAO+"; cluster2.name <- "Blocking"; cluster4.name <- "Atl.Ridge"} + if(p == 8 && lead.month == 3) { cluster2.name <- "Blocking"; cluster4.name <- "Atl.Ridge"} + if(p == 9 && lead.month == 2) { cluster2.name <- "Atl.Ridge"; cluster3.name <- "NAO+"; cluster4.name <- "Blocking"} + if(p == 10 && lead.month == 1) { cluster2.name <- "Blocking"; cluster4.name <- "Atl.Ridge"} + + regimes.sim2 <- c(cluster1.name, cluster2.name, cluster3.name, cluster4.name) # Simulated regimes after the manual correction + #regimes.obs <- c(cluster1.name, cluster2.name, cluster3.name, cluster4.name) # already defined above, included only as reference + + order.sim <- match(regimes.sim2, orden) + order.obs <- match(regimes.obs, orden) + + clusters.sim <- list(cluster1.sim, cluster2.sim, cluster3.sim, cluster4.sim) + clusters.obs <- list(pslwr1mean, pslwr2mean, pslwr3mean, pslwr4mean) + + # recompute the spatial correlations, because they might have changed because of the manual corrections above: + spat.cor1 <- cor(as.vector(clusters.sim[[which(order.sim == 1)]]), as.vector(clusters.obs[[which(order.obs == 1)]])) + spat.cor2 <- cor(as.vector(clusters.sim[[which(order.sim == 2)]]), as.vector(clusters.obs[[which(order.obs == 2)]])) + spat.cor3 <- cor(as.vector(clusters.sim[[which(order.sim == 3)]]), as.vector(clusters.obs[[which(order.obs == 3)]])) + spat.cor4 <- cor(as.vector(clusters.sim[[which(order.sim == 4)]]), as.vector(clusters.obs[[which(order.obs == 4)]])) + + load(file=paste0(work.dir,"/",fields.name,"_",my.period[p],"_leadmonth_",lead.month,"_","psl",".RData")) # reload forecast cluster time series and mean psl data + #load(file=paste0(work.dir,"/",fields.name,"_",my.period[p],"_leadmonth_",lead.month,"_ClusterData.RData")) # reload forecast cluster data (for my.cluster.array) + if(var.num != var.num.orig) var.num <- var.num.orig # ripristinate original values of this script + if(fields.name != fields.name.orig) fields.name <- fields.name.orig # ripristinate original values of this script + if(!identical(period.orig, period)) period <- period.orig + if(p.orig != p) p <- p.orig + if(identical(rev(lat),lat.forecast)) lat <- rev(lat) # ripristinate right lat order + if(work.dir != work.dir.orig) work.dir <- work.dir.orig # ripristinate original values of this script + + } # close for on 'forecast.name' + + if(fields.name == rean.name || (fields.name == forecast.name && n.map == 7)){ + if(save.names){ + save(cluster1.name, cluster2.name, cluster3.name, cluster4.name, file=paste0(rean.dir,"/",fields.name,"_", my.period[p],"_","ClusterNames",".RData")) + + } else { # in this case, we load the cluster names from the file already saved, if regimes come from a reanalysis: + load(paste0(rean.dir,"/",rean.name,"_", my.period[p],"_","ClusterNames",".RData")) + + if(var.num != var.num.orig) var.num <- var.num.orig # ripristinate original values of this script + if(fields.name != fields.name.orig) fields.name <- fields.name.orig # ripristinate original values of this script + } + } + + # assign to each cluster its regime: + if(ordering == FALSE && (fields.name == rean.name || (fields.name == forecast.name && n.map == 7))){ + cluster1 <- 1; cluster2 <- 2; cluster3 <- 3; cluster4 <- 4 + } else { + cluster1 <- which(orden == cluster1.name) + cluster2 <- which(orden == cluster2.name) + cluster3 <- which(orden == cluster3.name) + cluster4 <- which(orden == cluster4.name) + } + + assign(paste0("map",cluster1), pslwr1mean) + assign(paste0("map",cluster2), pslwr2mean) + assign(paste0("map",cluster3), pslwr3mean) + assign(paste0("map",cluster4), pslwr4mean) + + assign(paste0("fre",cluster1), wr1y) + assign(paste0("fre",cluster2), wr2y) + assign(paste0("fre",cluster3), wr3y) + assign(paste0("fre",cluster4), wr4y) + + #assign(paste0("withinss",cluster1), my.cluster[[p]]$withinss[1]/my.cluster[[p]]$size[1]) + #assign(paste0("withinss",cluster2), my.cluster[[p]]$withinss[2]/my.cluster[[p]]$size[2]) + #assign(paste0("withinss",cluster3), my.cluster[[p]]$withinss[3]/my.cluster[[p]]$size[3]) + #assign(paste0("withinss",cluster4), my.cluster[[p]]$withinss[4]/my.cluster[[p]]$size[4]) + + if(impact.data == TRUE && (composition == "simple" || composition == "single.impact" || composition == 'impact')){ + assign(paste0("imp",cluster1), varwr1mean) + assign(paste0("imp",cluster2), varwr2mean) + assign(paste0("imp",cluster3), varwr3mean) + assign(paste0("imp",cluster4), varwr4mean) + + if(fields.name == rean.name){ + assign(paste0("sig",cluster1), pvalue1) + assign(paste0("sig",cluster2), pvalue2) + assign(paste0("sig",cluster3), pvalue3) + assign(paste0("sig",cluster4), pvalue4) + } + } + + if(fields.name == forecast.name && n.map < 7){ + assign(paste0("freMax",cluster1), wr1yMax) + assign(paste0("freMax",cluster2), wr2yMax) + assign(paste0("freMax",cluster3), wr3yMax) + assign(paste0("freMax",cluster4), wr4yMax) + + assign(paste0("freMin",cluster1), wr1yMin) + assign(paste0("freMin",cluster2), wr2yMin) + assign(paste0("freMin",cluster3), wr3yMin) + assign(paste0("freMin",cluster4), wr4yMin) + + #assign(paste0("spatial.cor",cluster1), spat.cor1) + #assign(paste0("spatial.cor",cluster2), spat.cor2) + #assign(paste0("spatial.cor",cluster3), spat.cor3) + #assign(paste0("spatial.cor",cluster4), spat.cor4) + + assign(paste0("persist",cluster1), pers1) + assign(paste0("persist",cluster2), pers2) + assign(paste0("persist",cluster3), pers3) + assign(paste0("persist",cluster4), pers4) + + clusters.all <- c(cluster1, cluster2, cluster3, cluster4) + ordered.sequence <- c(which(clusters.all == 1), which(clusters.all == 2), which(clusters.all == 3), which(clusters.all == 4)) + + assign(paste0("transSim",cluster1), unname(trans.sim[1,ordered.sequence])) + assign(paste0("transSim",cluster2), unname(trans.sim[2,ordered.sequence])) + assign(paste0("transSim",cluster3), unname(trans.sim[3,ordered.sequence])) + assign(paste0("transSim",cluster4), unname(trans.sim[4,ordered.sequence])) + + cluster1.obs <- which(orden == regimes.obs[1]) + cluster2.obs <- which(orden == regimes.obs[2]) + cluster3.obs <- which(orden == regimes.obs[3]) + cluster4.obs <- which(orden == regimes.obs[4]) + + clusters.all.obs <- c(cluster1.obs, cluster2.obs, cluster3.obs, cluster4.obs) + ordered.sequence.obs <- c(which(clusters.all.obs == 1), which(clusters.all.obs == 2), which(clusters.all.obs == 3), which(clusters.all.obs == 4)) + + assign(paste0("freObs",cluster1.obs), wr1y.obs) + assign(paste0("freObs",cluster2.obs), wr2y.obs) + assign(paste0("freObs",cluster3.obs), wr3y.obs) + assign(paste0("freObs",cluster4.obs), wr4y.obs) + + assign(paste0("persistObs",cluster1.obs), persObs1) + assign(paste0("persistObs",cluster2.obs), persObs2) + assign(paste0("persistObs",cluster3.obs), persObs3) + assign(paste0("persistObs",cluster4.obs), persObs4) + + assign(paste0("transObs",cluster1.obs), unname(trans.obs[1,ordered.sequence.obs])) + assign(paste0("transObs",cluster2.obs), unname(trans.obs[2,ordered.sequence.obs])) + assign(paste0("transObs",cluster3.obs), unname(trans.obs[3,ordered.sequence.obs])) + assign(paste0("transObs",cluster4.obs), unname(trans.obs[4,ordered.sequence.obs])) + + } + + # position of long values of Europe only (without the Atlantic Sea and America): + #if(fields.name == "ERA-Interim") EU <- c(1:which(lon >= lon.max)[1], (length(lon)-30):length(lon)) # restrict area to continental europe only + EU <- c(1:which(lon >= lon.max)[1], (which(lon >= 337)[1]:length(lon))) # restrict area to continental europe only + + # breaks and colors of the geopotential fields: + if(psl == "g500"){ + #my.brks <- c(-300,-199:200,300) # % Mean anomaly of a WR + my.brks <- c(-300,seq(-200,200,10),300) # % Mean anomaly of a WR + my.brks2 <- c(-300,seq(-200,200,10),300) # % Mean anomaly of a WR for the contour lines + #my.cols <- colorRampPalette((brewer.pal(9,"PuBu")))(length(my.brks)-1) + values.to.plot <- c(-200, -100, 0, 100, 200) # values we want to show in the labels + } + + if(psl == "psl"){ + my.brks <- c(seq(-19,-1,2),0,seq(1,19,2)) # % Mean anomaly of a WR + # my.brks <- c(seq(-19,-1,2),0,seq(1,19,2)) # % Mean anomaly of a WR + + my.brks2 <- my.brks #c(seq(-20,20,2)) # % Mean anomaly of a WR for the contour lines + values.to.plot <- my.brks #c(-17,-15,-13,-11,-9,-7,-5,-3,-1,0,1,3,5,7,9,11,13,15,17) + } + + my.cols <- colorRampPalette(rev(brewer.pal(11,"RdBu")))(length(my.brks)-1) # blue--white--red colors + my.cols[floor(length(my.cols)/2)] <- my.cols[floor(length(my.cols)/2)+1] <- "white" + + # breaks and colors of the impact maps (valid both for Wind speed anomalies and Temperature anomalies): + #my.brks.var <- c(-20,seq(-10,-3,1),seq(-2.9,2.9,0.1),seq(3,10,1),20) # % Mean anomaly of a WR + #my.brks.var <- c(-20,-2,-1.5,-1,-0.5,-0.2,0.2,0.5,1,1.5,2,20) # % Mean anomaly of a WR + #my.brks.var <- c(-20,seq(-3,3,0.5),20) # % Mean anomaly of a WR + if(var.name[var.num] == "sfcWind") { + my.brks.var <- seq(-3,3,0.5) + values.to.plot2 <- c(-3,-2,-1,0,1,2,3) + } + if(var.name[var.num] == "tas") { + my.brks.var <- seq(-5,5,1) + values.to.plot2 <- my.brks.var #c(-5,-3,-1,0,1,3,5) + } + + #my.cols <- colorRampPalette(as.character(read.csv("/home/Earth/vtorralb/rgbhex.csv",header=F)[,1]))(length(my.brks)-1) # blue--yellow-red colors + my.cols.var <- colorRampPalette(rev(brewer.pal(11,"RdBu")))(length(my.brks.var)-1) # blue--white--red colors + #my.cols.var[floor(length(my.cols.var)/2)] <- my.cols.var[floor(length(my.cols.var)/2)+1] <- "white" + + freq.max=100 + if(fields.name == forecast.name){ + pos.years.present <- match(year.start:year.end, years) + years.missing <- which(is.na(pos.years.present)) + fre1.NA <- fre2.NA <- fre3.NA <- fre4.NA <- freMax1.NA <- freMax2.NA <- freMax3.NA <- freMax4.NA <- freMin1.NA <- freMin2.NA <- freMin3.NA <- freMin4.NA <- c() + k <- 0 + for(y in year.start:year.end) { + if(y %in% (years.missing + year.start - 1)) { + fre1.NA <- c(fre1.NA,NA); fre2.NA <- c(fre2.NA,NA); fre3.NA <- c(fre3.NA,NA); fre4.NA <- c(fre4.NA,NA) + freMax1.NA <- c(freMax1.NA,NA); freMax2.NA <- c(freMax2.NA,NA); freMax3.NA <- c(freMax3.NA,NA); freMax4.NA <- c(freMax4.NA,NA) + freMin1.NA <- c(freMin1.NA,NA); freMin2.NA <- c(freMin2.NA,NA); freMin3.NA <- c(freMin3.NA,NA); freMin4.NA <- c(freMin4.NA,NA) + k=k+1 + } else { + fre1.NA <- c(fre1.NA,fre1[y - year.start + 1 - k]); fre2.NA <- c(fre2.NA,fre2[y - year.start + 1 - k]) + fre3.NA <- c(fre3.NA,fre3[y - year.start + 1 - k]); fre4.NA <- c(fre4.NA,fre4[y - year.start + 1 - k]) + freMax1.NA <- c(freMax1.NA,freMax1[y - year.start + 1 - k]); freMax2.NA <- c(freMax2.NA,freMax2[y - year.start + 1 - k]) + freMax3.NA <- c(freMax3.NA,freMax3[y - year.start + 1 - k]); freMax4.NA <- c(freMax4.NA,freMax4[y - year.start + 1 - k]) + freMin1.NA <- c(freMin1.NA,freMin1[y - year.start + 1 - k]); freMin2.NA <- c(freMin2.NA,freMin2[y - year.start + 1 - k]) + freMin3.NA <- c(freMin3.NA,freMin3[y - year.start + 1 - k]); freMin4.NA <- c(freMin4.NA,freMin4[y - year.start + 1 - k]) + } + } + + sp.cor1 <- round(cor(fre1.NA,freObs1, use="complete.obs"),2) + sp.cor2 <- round(cor(fre2.NA,freObs2, use="complete.obs"),2) + sp.cor3 <- round(cor(fre3.NA,freObs3, use="complete.obs"),2) + sp.cor4 <- round(cor(fre4.NA,freObs4, use="complete.obs"),2) + + } else { + fre1.NA <- fre1; fre2.NA <- fre2; fre3.NA <- fre3; fre4.NA <- fre4 + } + + + # Visualize the composition with the 3 graphs for all the 4 regimes: its pressure fields, its impact on var and its frequency series: + if(composition == "simple"){ + if(!as.pdf && fields.name == rean.name) png(filename=paste0(rean.dir,"/",fields.name,"_",my.period[p],"_",var.name[var.num],".png"),width=3000,height=3700) + if(!as.pdf && fields.name == forecast.name) png(filename=paste0(work.dir,"/",fields.name,"_",my.period[p],"_leadmonth_",lead.month,"_",var.name[var.num],".png"),width=3000,height=3700) + + plot.new() + + # Sheet title: + par(fig=c(0, 1, 0.94, 0.98), new=TRUE) + if(fields.name == forecast.name) {forecast.title <- paste0(" startdate and ",lead.month," forecast month ")} else {forecast.title <- ""} + mtext(paste0(fields.name, " Weather Regimes for ", my.period[p], forecast.title," (",year.start,"-",year.end,")"), cex=5.5, font=2) + + # Centroid maps: + map.xpos <- 0 + map.width <- 0.46 + par(fig=c(map.xpos + 0.003, map.xpos + map.width - 0.003, 0.745, 0.925), new=TRUE) + PlotEquiMap2(rescale(map1,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map1), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, xlabel.dist=1.5, contours.lty="F1FF1F") + par(fig=c(map.xpos, map.xpos + map.width, 0.51, 0.69), new=TRUE) + PlotEquiMap2(rescale(map2,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map2), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F") + par(fig=c(map.xpos, map.xpos + map.width, 0.275, 0.455), new=TRUE) + PlotEquiMap2(rescale(map3,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map3), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F") + par(fig=c(map.xpos, map.xpos + map.width, 0.04, 0.22), new=TRUE) + PlotEquiMap2(rescale(map4,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map4), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F") + + ## # for the debug: + ## obs <- read.table("/scratch/Earth/ncortesi/RESILIENCE/Regimes/NAO_series.txt") + ## mes <- p + ## fre.obs <- obs$V3[seq(mes,12*35,12)] # get the ovserved freuency of the NAO + ## #plot(1981:2015,fre.obs,type="l") + ## #lines(1981:2015,fre1,type="l",col="red") + ## #lines(1981:2015,fre2,type="l",col="blue") + ## #lines(1981:2015,fre4,type="l",col="green") + ## cor1 <- cor(fre.obs, fre1) + ## cor2 <- cor(fre.obs, fre2) + ## cor3 <- cor(fre.obs, fre3) + ## cor4 <- cor(fre.obs, fre4) + ## round(c(cor1,cor2,cor3,cor4),2) + + # Legend centroid maps: + legend1.xpos <- 0.10 + legend1.width <- 0.30 + legend1.cex <- 2 + my.subset <- match(values.to.plot, my.brks) + par(fig=c(legend1.xpos, legend1.xpos + legend1.width, 0.719, 0.744), new=TRUE) # syntax fig=c(xmin, xmax, ymin, ymax) + ColorBar3(my.brks, cols=my.cols, vert=FALSE, cex=legend1.cex, subset=my.subset) + if(psl=="g500") {mtext(side=4," m", cex=2.5)} else {mtext(side=4," hPa", cex=legend1.cex)} + par(fig=c(legend1.xpos, legend1.xpos + legend1.width, 0.485, 0.508), new=TRUE) + ColorBar3(my.brks, cols=my.cols, vert=FALSE, cex=legend1.cex, subset=my.subset) + if(psl=="g500") {mtext(side=4," m", cex=2.5)} else {mtext(side=4," hPa", cex=legend1.cex)} + par(fig=c(legend1.xpos, legend1.xpos + legend1.width, 0.250, 0.273), new=TRUE) + ColorBar3(my.brks, cols=my.cols, vert=FALSE, cex=legend1.cex, subset=my.subset) + if(psl=="g500") {mtext(side=4," m", cex=2.5)} else {mtext(side=4," hPa", cex=legend1.cex)} + par(fig=c(legend1.xpos, legend1.xpos + legend1.width, 0.015, 0.038), new=TRUE) + ColorBar3(my.brks, cols=my.cols, vert=FALSE, cex=legend1.cex, subset=my.subset) + if(psl=="g500") {mtext(side=4," m", cex=2.5)} else {mtext(side=4," hPa", cex=legend1.cex)} + + # Subtitle centroid maps: + map.title.xpos <- 0.76 + map.title.width <- 0.2 + par(fig=c(map.title.xpos, map.title.xpos + map.title.width, 0.728, 0.729), new=TRUE) + mtext("year", cex=3) + par(fig=c(map.title.xpos, map.title.xpos + map.title.width, 0.494, 0.495), new=TRUE) + mtext("year", cex=3) + par(fig=c(map.title.xpos, map.title.xpos + map.title.width, 0.260, 0.261), new=TRUE) + mtext("year", cex=3) + par(fig=c(map.title.xpos, map.title.xpos + map.title.width, 0.026, 0.027), new=TRUE) + mtext("year", cex=3) + + # Title Centroid Maps: + title1.xpos <- 0.02 + title1.width <- 0.4 + par(fig=c(title1.xpos, title1.xpos + title1.width, 0.9305, 0.9355), new=TRUE) + mtext(paste0(regime1.name,": ", psl.name, " Anomaly"), font=2, cex=4) + par(fig=c(title1.xpos, title1.xpos + title1.width, 0.6955, 0.7005), new=TRUE) + mtext(paste0(regime2.name,": ", psl.name, " Anomaly"), font=2, cex=4) + par(fig=c(title1.xpos, title1.xpos + title1.width, 0.4605, 0.4655), new=TRUE) + mtext(paste0(regime3.name,": ", psl.name, " Anomaly"), font=2, cex=4) + par(fig=c(title1.xpos, title1.xpos + title1.width, 0.2255, 0.2305), new=TRUE) + mtext(paste0(regime4.name,": ", psl.name, " Anomaly"), font=2, cex=4) + + # Impact maps: + if(impact.data == TRUE && (composition == "simple" || composition == "impact")){ # it won't enter here never because we are already inside an if con composition="simple" + impact.xpos <- 0.47 + impact.width <- 0.26 + par(fig=c(impact.xpos, impact.xpos + impact.width, 0.745, 0.925), new=TRUE) + PlotEquiMap2(rescale(imp1[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=t(sig1[,EU] < 0.05), cex.lab=1.5) + par(fig=c(impact.xpos, impact.xpos + impact.width, 0.51, 0.69), new=TRUE) + PlotEquiMap2(rescale(imp2[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=t(sig2[,EU] < 0.05), cex.lab=1.5) + par(fig=c(impact.xpos, impact.xpos + impact.width, 0.275, 0.455), new=TRUE) + PlotEquiMap2(rescale(imp3[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=t(sig3[,EU] < 0.05), cex.lab=1.5) + par(fig=c(impact.xpos, impact.xpos + impact.width, 0.04, 0.22), new=TRUE) + PlotEquiMap2(rescale(imp4[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=t(sig4[,EU] < 0.05), cex.lab=1.5) + + # Title impact maps: + title2.xpos <- 0.42 + title2.width <- 0.35 + par(fig=c(title2.xpos, title2.xpos + title2.width, 0.935, 0.940), new=TRUE) + mtext(paste0(regime1.name, ": Impact on ", var.name.full[var.num]), font=2, cex=4) + par(fig=c(title2.xpos, title2.xpos + title2.width, 0.700, 0.705), new=TRUE) + mtext(paste0(regime2.name, ": Impact on ", var.name.full[var.num]), font=2, cex=4) + par(fig=c(title2.xpos, title2.xpos + title2.width, 0.465, 0.470), new=TRUE) + mtext(paste0(regime3.name, ": Impact on ", var.name.full[var.num]), font=2, cex=4) + par(fig=c(title2.xpos, title2.xpos + title2.width, 0.230, 0.235), new=TRUE) + mtext(paste0(regime4.name, ": Impact on ", var.name.full[var.num]), font=2, cex=4) + } + + # Frequency plots: + barplot.xpos <- 0.75 + barplot.width <- 0.25 + + par(fig=c(barplot.xpos, barplot.xpos + barplot.width, 0.745, 0.915), new=TRUE) + if(fields.name == rean.name) barplot.freq(100*fre1.NA, year.start, year.end, cex.y=2, cex.x=2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0)) + if(fields.name == forecast.name) barplot.freq.sim2(100*fre1.NA, 100*freMax1.NA, 100*freMin1.NA, 100*freObs1, year.start, year.end, cex.y=2, cex.x=2, freq.min=-2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0), col.line="gray20", cex.r=3, cex.mean=2, cex.obs=2) + + par(fig=c(barplot.xpos, barplot.xpos + barplot.width,0.510,0.680), new=TRUE) + if(fields.name == rean.name) barplot.freq(100*fre2.NA, year.start, year.end, cex.y=2, cex.x=2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0)) + if(fields.name == forecast.name) barplot.freq.sim2(100*fre2.NA, 100*freMax2.NA, 100*freMin2.NA, 100*freObs2, year.start, year.end, cex.y=2, cex.x=2, freq.min=-2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0), col.line="gray20", cex.r=3, cex.mean=2, cex.obs=2) + + par(fig=c(barplot.xpos, barplot.xpos + barplot.width,0.275,0.445), new=TRUE) + if(fields.name == rean.name) barplot.freq(100*fre3.NA, year.start, year.end, cex.y=2, cex.x=2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0)) + if(fields.name == forecast.name) barplot.freq.sim2(100*fre3.NA, 100*freMax3.NA, 100*freMin3.NA, 100*freObs3, year.start, year.end, cex.y=2, cex.x=2, freq.min=-2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0), col.line="gray20", cex.r=3, cex.mean=2, cex.obs=2) + + par(fig=c(barplot.xpos, barplot.xpos + barplot.width,0.040,0.210), new=TRUE) + if(fields.name == rean.name) barplot.freq(100*fre4.NA, year.start, year.end, cex.y=2, cex.x=2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0)) + if(fields.name == forecast.name) barplot.freq.sim2(100*fre4.NA, 100*freMax4.NA, 100*freMin4.NA, 100*freObs4, year.start, year.end, cex.y=2, cex.x=2, freq.min=-2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0), col.line="gray20", cex.r=3, cex.mean=2, cex.obs=2) + + # Title Frequency maps: + title3.xpos <- 0.75 + title3.width <-0.25 + par(fig=c(title3.xpos, title3.xpos + title3.width, 0.935, 0.940), new=TRUE) + mtext(paste0(regime1.name, " Frequency"), font=2, cex=4) # paste0 doesn't work inside an expression! + par(fig=c(title3.xpos, title3.xpos + title3.width, 0.700, 0.705), new=TRUE) + mtext(paste0(regime2.name, " Frequency"), font=2, cex=4) + par(fig=c(title3.xpos, title3.xpos + title3.width, 0.465, 0.470), new=TRUE) + mtext(paste0(regime3.name, " Frequency"), font=2, cex=4) + par(fig=c(title3.xpos, title3.xpos + title3.width, 0.230, 0.235), new=TRUE) + mtext(paste0(regime4.name, " Frequency"), font=2, cex=4) + + # % on Frequency plots: + symbol.xpos <- 0.735 + symbol.width <- 0.01 + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.82, 0.83), new=TRUE) + mtext("%", cex=3.3) + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.59, 0.60), new=TRUE) + mtext("%", cex=3.3) + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.35, 0.36), new=TRUE) + mtext("%", cex=3.3) + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.12, 0.13), new=TRUE) + mtext("%", cex=3.3) + + # mean frequency on Frequency plots: + if(mean.freq == TRUE){ + symbol.xpos <- 0.9 + symbol.width <- 0.1 + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.908, 0.918), new=TRUE) + mtext(bquote(bar(nu) == .(paste0(round(mean(100*fre1.NA),1),"%"))),cex=4.5) + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.673, 0.683), new=TRUE) + mtext(bquote(bar(nu) == .(paste0(round(mean(100*fre2.NA),1),"%"))),cex=4.5) + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.44, 0.45), new=TRUE) + mtext(bquote(bar(nu) == .(paste0(round(mean(100*fre3.NA),1),"%"))),cex=4.5) + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.203, 0.213), new=TRUE) + mtext(bquote(bar(nu) == .(paste0(round(mean(100*fre4.NA),1),"%"))),cex=4.5) + } + + # add corr between obs.time series and ensemble mean time series on Frequency plots: + if(fields.name == forecast.name){ + symbol.xpos <- 0.76 + symbol.width <- 0.1 + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.908, 0.918), new=TRUE) + sp.cor1 <- round(cor(fre1.NA,freObs1, use="complete.obs"),2) + mtext(paste0("r= ",sp.cor1), cex=4.5) + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.673, 0.683), new=TRUE) + sp.cor2 <- round(cor(fre2.NA,freObs2, use="complete.obs"),2) + mtext(paste0("r= ",sp.cor2), cex=4.5) + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.44, 0.45), new=TRUE) + sp.cor3 <- round(cor(fre3.NA,freObs3, use="complete.obs"),2) + mtext(paste0("r= ",sp.cor3), cex=4.5) + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.203, 0.213), new=TRUE) + sp.cor4 <- round(cor(fre4.NA,freObs4, use="complete.obs"),2) + mtext(paste0("r= ",sp.cor4), cex=4.5) + } + + # Legend 2: + if(fields.name == rean.name){ + legend2.xpos <- 0.48 + legend2.width <- 0.25 + legend2.cex <- 1.8 + my.subset2 <- match(values.to.plot2, my.brks.var) + par(fig=c(legend2.xpos, legend2.xpos + legend2.width, 0.719 + 0.001, 0.744), new=TRUE) + ColorBar3(my.brks.var, cols=my.cols.var, vert=FALSE, cex=legend2.cex, subset=my.subset2) + mtext(side=4,paste0(" ",var.unit[var.num]), cex=legend2.cex) + par(fig=c(legend2.xpos, legend2.xpos + legend2.width, 0.485, 0.508), new=TRUE) + ColorBar3(my.brks.var, cols=my.cols.var, vert=FALSE, cex=legend2.cex, subset=my.subset2) + mtext(side=4,paste0(" ",var.unit[var.num]), cex=legend2.cex) + par(fig=c(legend2.xpos, legend2.xpos + legend2.width, 0.250, 0.273), new=TRUE) + ColorBar3(my.brks.var, cols=my.cols.var, vert=FALSE, cex=legend2.cex, subset=my.subset2) + mtext(side=4,paste0(" ",var.unit[var.num]), cex=legend2.cex) + par(fig=c(legend2.xpos, legend2.xpos + legend2.width, 0.015, 0.038), new=TRUE) + ColorBar3(my.brks.var, cols=my.cols.var, vert=FALSE, cex=legend2.cex, subset=my.subset2) + mtext(side=4,paste0(" ",var.unit[var.num]), cex=legend2.cex) + } + + if(!as.pdf) dev.off() # for saving 4 png + + } # close if on: composition == 'simple' + + + # Visualize the composition with the regime anomalies for a selected forecasted month: + if(composition == "psl"){ + # Centroid maps: + n.map <- n.map + 1 # n.maps starts from 0 + map.width <- 0.115 + map.xpos <- 0.06 + map.width * (n.map - 1) + map.ypos <- 0.08 + map.height <- 0.19 + map.ysep <- 0.015 + + par(fig=c(map.xpos, map.xpos + map.width, map.ypos + 3*map.height + 3*map.ysep, map.ypos + 4*map.height + 3*map.ysep), new=TRUE) + PlotEquiMap2(rescale(map1,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map1), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, xlabel.dist=1.5, contours.lty="F1FF1F") + par(fig=c(map.xpos, map.xpos + map.width, map.ypos + 2*map.height + 2*map.ysep, map.ypos + 3*map.height + 2*map.ysep), new=TRUE) + PlotEquiMap2(rescale(map2,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map2), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F") + par(fig=c(map.xpos, map.xpos + map.width, map.ypos + map.height + map.ysep, map.ypos + 2*map.height + map.ysep), new=TRUE) + PlotEquiMap2(rescale(map3,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map3), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F") + par(fig=c(map.xpos, map.xpos + map.width, map.ypos, map.ypos + map.height), new=TRUE) + PlotEquiMap2(rescale(map4,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map4), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F") + + # Sheet subtitles: + par(fig=c(map.xpos, map.xpos + map.width, 0.86, 0.90), new=TRUE) + if(n.map < 8) { + mtext(paste0(my.month.short[p], " --> ", my.month.short[forecast.month]), cex=5.5, font=2) + } else{ + mtext(paste0(rean.name," ", my.month.short[forecast.month]), cex=5.5, font=2) + } + + } # close if on composition == 'psl' + + # Visualize the composition if the impact maps for a selected forecasted month: + if(composition == "impact"){ + # Centroid maps: + n.map <- n.map + 1 # n.maps starts from 0 + map.width <- 0.115 + map.xpos <- 0.06 + map.width * (n.map - 1) + map.ypos <- 0.08 + map.height <- 0.19 + map.ysep <- 0.015 + + par(fig=c(map.xpos, map.xpos + map.width, map.ypos + 3*map.height + 3*map.ysep, map.ypos + 4*map.height + 3*map.ysep), new=TRUE) + PlotEquiMap2(rescale(imp1[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=t(sig1[,EU] < 0.05), cex.lab=1.5) + + par(fig=c(map.xpos, map.xpos + map.width, map.ypos + 2*map.height + 2*map.ysep, map.ypos + 3*map.height + 2*map.ysep), new=TRUE) + PlotEquiMap2(rescale(imp2[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=t(sig2[,EU] < 0.05), cex.lab=1.5) + + par(fig=c(map.xpos, map.xpos + map.width, map.ypos + map.height + map.ysep, map.ypos + 2*map.height + map.ysep), new=TRUE) + PlotEquiMap2(rescale(imp3[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=t(sig3[,EU] < 0.05), cex.lab=1.5) + + par(fig=c(map.xpos, map.xpos + map.width, map.ypos, map.ypos + map.height), new=TRUE) + PlotEquiMap2(rescale(imp4[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=t(sig4[,EU] < 0.05), cex.lab=1.5) + + + # Sheet subtitles: + par(fig=c(map.xpos, map.xpos + map.width, 0.86, 0.90), new=TRUE) + if(n.map < 8) { + mtext(paste0(my.month.short[p], " --> ", my.month.short[forecast.month]), cex=5.5, font=2) + } else{ + mtext(paste0(rean.name," ", my.month.short[forecast.month]), cex=5.5, font=2) + } + + } # close if on composition == 'psl' + + # Visualize the composition if the impact maps for a selected forecasted month: + if(composition == "single.impact"){ + png(filename=paste0(rean.dir,"/",fields.name,"_",my.period[p],"_",var.name[var.num],"_impact_NAO+.png"),width=3000,height=3700) + PlotEquiMap2(rescale(imp1[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=t(sig1[,EU] < 0.05), cex.lab=1.5) + dev.off() + + png(filename=paste0(rean.dir,"/",fields.name,"_",my.period[p],"_",var.name[var.num],"_impact_NAO-.png"),width=3000,height=3700) + PlotEquiMap2(rescale(imp2[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=t(sig2[,EU] < 0.05), cex.lab=1.5) + dev.off() + + png(filename=paste0(rean.dir,"/",fields.name,"_",my.period[p],"_",var.name[var.num],"_impact_Blocking.png"),width=3000,height=3700) + PlotEquiMap2(rescale(imp3[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=t(sig3[,EU] < 0.05), cex.lab=1.5) + dev.off() + + png(filename=paste0(rean.dir,"/",fields.name,"_",my.period[p],"_",var.name[var.num],"_impact_Atl_Ridge.png"),width=3000,height=3700) + PlotEquiMap2(rescale(imp4[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=t(sig4[,EU] < 0.05), cex.lab=1.5) + dev.off() + } + + # Visualize the composition if the impact maps for a selected forecasted month: + if(composition == "single.psl"){ + png(filename=paste0(rean.dir,"/",fields.name,"_",my.period[p],"_",var.name[var.num],"_pattern_cluster1.png"),width=2000,height=1400, res=300) + PlotEquiMap2(rescale(map1,my.brks[1],tail(my.brks,1)), lon, lat,filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1, contours=t(map1), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=0.5, xlabel.dist=0, contours.lty="F1FF1F", toptitle=paste0(my.period[p]," WithinSS=", round(withinss1/1000000,2))) + dev.off() + + png(filename=paste0(rean.dir,"/",fields.name,"_",my.period[p],"_",var.name[var.num],"_pattern_cluster2.png"),width=2000,height=1400, res=300) + PlotEquiMap2(rescale(map2,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1, contours=t(map2), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=0.5, xlabel.dist=0, contours.lty="F1FF1F", toptitle=paste0(my.period[p]," WithinSS=", round(withinss2/1000000,2))) + dev.off() + + png(filename=paste0(rean.dir,"/",fields.name,"_",my.period[p],"_",var.name[var.num],"_pattern_cluster3.png"),width=2000,height=1400, res=300) + PlotEquiMap2(rescale(map3,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1, contours=t(map3), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=0.5, xlabel.dist=0, contours.lty="F1FF1F", toptitle=paste0(my.period[p]," WithinSS=", round(withinss3/1000000,2))) + dev.off() + + png(filename=paste0(rean.dir,"/",fields.name,"_",my.period[p],"_",var.name[var.num],"_pattern_cluster4.png"),width=2000,height=1400, res=300) + PlotEquiMap2(rescale(map4,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1, contours=t(map4), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=0.5, xlabel.dist=0, contours.lty="F1FF1F", toptitle=paste0(my.period[p]," WithinSS=", round(withinss4/1000000,2))) + dev.off() + + # Legend centroid maps: + #my.subset <- match(values.to.plot, my.brks) + #ColorBar3(my.brks, cols=my.cols, vert=FALSE, cex=legend1.cex, subset=my.subset) + #mtext(side=4," hPa", cex=legend1.cex) + + } + + # Visualize the composition with the interannual frequencies for a selected forecasted month: + if(composition == "fre"){ + # Centroid maps: + n.map <- n.map + 1 # n.maps starts from 0 + map.width <- 0.115 + map.xpos <- 0.06 + map.width * (n.map - 1) + map.ypos <- 0.08 + map.height <- 0.19 + map.ysep <- 0.015 + + par(fig=c(map.xpos, map.xpos + map.width, map.ypos + 3*map.height + 3*map.ysep, map.ypos + 4*map.height + 3*map.ysep), new=TRUE) + if(n.map < 8) barplot.freq.sim2(100*fre1.NA, 100*freMax1.NA, 100*freMin1.NA, 100*freObs1, year.start, year.end, cex.y=2, cex.x=2, freq.min=-2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0), col.line="gray20", cex.r=3, cex.mean=2, cex.obs=2) + if(n.map == 8) barplot.freq(100*fre1.NA, year.start, year.end, cex.y=2, cex.x=2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0)) + + par(fig=c(map.xpos, map.xpos + map.width, map.ypos + 2*map.height + 2*map.ysep, map.ypos + 3*map.height + 2*map.ysep), new=TRUE) + if(n.map < 8) if(fields.name == forecast.name) barplot.freq.sim2(100*fre2.NA, 100*freMax2.NA, 100*freMin2.NA, 100*freObs2, year.start, year.end, cex.y=2, cex.x=2, freq.min=-2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0), col.line="gray20", cex.r=3, cex.mean=2, cex.obs=2) + if(n.map == 8) barplot.freq(100*fre2.NA, year.start, year.end, cex.y=2, cex.x=2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0)) + + par(fig=c(map.xpos, map.xpos + map.width, map.ypos + map.height + map.ysep, map.ypos + 2*map.height + map.ysep), new=TRUE) + if(n.map < 8) if(fields.name == forecast.name) barplot.freq.sim2(100*fre3.NA, 100*freMax3.NA, 100*freMin3.NA, 100*freObs3, year.start, year.end, cex.y=2, cex.x=2, freq.min=-2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0), col.line="gray20", cex.r=3, cex.mean=2, cex.obs=2) + if(n.map == 8) barplot.freq(100*fre3.NA, year.start, year.end, cex.y=2, cex.x=2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0)) + + par(fig=c(map.xpos, map.xpos + map.width, map.ypos, map.ypos + map.height), new=TRUE) + if(n.map < 8) if(fields.name == forecast.name) barplot.freq.sim2(100*fre4.NA, 100*freMax4.NA, 100*freMin4.NA, 100*freObs4, year.start, year.end, cex.y=2, cex.x=2, freq.min=-2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0), col.line="gray20", cex.r=3, cex.mean=2, cex.obs=2) + if(n.map == 8) barplot.freq(100*fre4.NA, year.start, year.end, cex.y=2, cex.x=2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0)) + + # Sheet subtitles: + par(fig=c(map.xpos, map.xpos + map.width, 0.86, 0.90), new=TRUE) + if(n.map < 8) { + mtext(paste0(my.month.short[p], " --> ", my.month.short[forecast.month]), cex=5.5, font=2) + } else{ + mtext(paste0(rean.name," ", my.month.short[forecast.month]), cex=5.5, font=2) + } + + # % on Frequency plots: + par(fig=c(map.xpos - 0.006, map.xpos, map.ypos + 3.9*map.height + 3*map.ysep, map.ypos + 4*map.height + 3*map.ysep), new=TRUE) + mtext("%", cex=3.3) + par(fig=c(map.xpos - 0.006, map.xpos, map.ypos + 2.9*map.height + 2*map.ysep, map.ypos + 3*map.height + 2*map.ysep), new=TRUE) + mtext("%", cex=3.3) + par(fig=c(map.xpos - 0.006, map.xpos, map.ypos + 1.9*map.height + 1*map.ysep, map.ypos + 2*map.height + 1*map.ysep), new=TRUE) + mtext("%", cex=3.3) + par(fig=c(map.xpos - 0.006, map.xpos, map.ypos + 0.9*map.height + 0*map.ysep, map.ypos + 1*map.height + 0*map.ysep), new=TRUE) + mtext("%", cex=3.3) + + # mean frequency on Frequency plots: + par(fig=c(map.xpos + 0.02, map.xpos + 0.04, map.ypos + 3*map.height + 3*map.ysep, map.ypos + 3.1*map.height + 3*map.ysep), new=TRUE) + mtext(bquote(bar(nu) == .(paste0(round(mean(100*fre1.NA),1),"%"))),cex=3.5) + par(fig=c(map.xpos + 0.02, map.xpos + 0.04, map.ypos + 2*map.height + 2*map.ysep, map.ypos + 2.1*map.height + 2*map.ysep), new=TRUE) + mtext(bquote(bar(nu) == .(paste0(round(mean(100*fre2.NA),1),"%"))),cex=3.5) + par(fig=c(map.xpos + 0.02, map.xpos + 0.04, map.ypos + 1*map.height + 1*map.ysep, map.ypos + 1.1*map.height + 1*map.ysep), new=TRUE) + mtext(bquote(bar(nu) == .(paste0(round(mean(100*fre3.NA),1),"%"))),cex=3.5) + par(fig=c(map.xpos + 0.02, map.xpos + 0.04, map.ypos + 0*map.height + 0*map.ysep, map.ypos + 0.1*map.height + 0*map.ysep), new=TRUE) + mtext(bquote(bar(nu) == .(paste0(round(mean(100*fre4.NA),1),"%"))),cex=3.5) + + # add corr between obs.time series and ensemble mean time series on Frequency plots: + if(n.map < 8){ + par(fig=c(map.xpos + map.width - 0.04, map.xpos + map.width - 0.02, map.ypos + 3*map.height + 3*map.ysep, map.ypos + 3.1*map.height + 3*map.ysep), new=TRUE) + mtext(paste0("r= ",sp.cor1), cex=3.5) + par(fig=c(map.xpos + map.width - 0.04, map.xpos + map.width - 0.02, map.ypos + 2*map.height + 2*map.ysep, map.ypos + 2.1*map.height + 2*map.ysep), new=TRUE) + mtext(paste0("r= ",sp.cor2), cex=3.5) + par(fig=c(map.xpos + map.width - 0.04, map.xpos + map.width - 0.02, map.ypos + 1*map.height + 1*map.ysep, map.ypos + 1.1*map.height + 1*map.ysep), new=TRUE) + mtext(paste0("r= ",sp.cor3), cex=3.5) + par(fig=c(map.xpos + map.width - 0.04, map.xpos + map.width - 0.02, map.ypos + 0*map.height + 0*map.ysep, map.ypos + 0.1*map.height + 0*map.ysep), new=TRUE) + mtext(paste0("r= ",sp.cor4), cex=3.5) + } + } # close if on composition == 'fre' + + # save indicators for each startdate and forecast time: + # (map1, map2, map3, map4, fre1, fre2, etc. already refer to the regimes in the same order as listed in the 'orden' vector) + if(fields.name == forecast.name) { + if (composition != "impact") { + save(orden, map1, map2, map3, map4, fre1.NA, fre2.NA, fre3.NA, fre4.NA, freObs1, freObs2, freObs3, freObs4, sp.cor1, sp.cor2, sp.cor3, sp.cor4, persist1, persist2, persist3, persist4, persistObs1, persistObs2, persistObs3, persistObs4, spat.cor1, spat.cor2, spat.cor3, spat.cor4, sd.freq.sim1, sd.freq.sim2, sd.freq.sim3, sd.freq.sim4, sd.freq.obs1, sd.freq.obs2, sd.freq.obs3, sd.freq.obs4, transSim1, transSim2, transSim3, transSim4, transObs1, transObs2, transObs3, transObs4, file=paste0(work.dir,"/",fields.name,"_", my.period[p],"_leadmonth_",lead.month,"_","ClusterNames",".RData")) + } else { + save(orden, map1, map2, map3, map4, fre1.NA, fre2.NA, fre3.NA, fre4.NA, freObs1, freObs2, freObs3, freObs4, sp.cor1, sp.cor2, sp.cor3, sp.cor4, persist1, persist2, persist3, persist4, persistObs1, persistObs2, persistObs3, persistObs4, spat.cor1, spat.cor2, spat.cor3, spat.cor4, sd.freq.sim1, sd.freq.sim2, sd.freq.sim3, sd.freq.sim4, sd.freq.obs1, sd.freq.obs2, sd.freq.obs3, sd.freq.obs4, transSim1, transSim2, transSim3, transSim4, transObs1, transObs2, transObs3, transObs4, imp1, imp2, imp3, imp4, file=paste0(work.dir,"/",fields.name,"_", my.period[p],"_leadmonth_",lead.month,"_","ClusterNames",".RData")) + } + } + + print(paste("p =",p,"lead.month =", lead.month)) # spat.cor1,spat.cor2,spat.cor3,spat.cor4)) + } # close 'p' on 'period' + + if(as.pdf) dev.off() # for saving a single pdf with all months/seasons + + } # close 'lead.month' on 'lead.months' + + if(composition == "psl" || composition == "fre"){ + # Regime's names: + title1.xpos <- 0.01 + title1.width <- 0.04 + par(fig=c(title1.xpos, title1.xpos + title1.width, 0.7905, 0.7955), new=TRUE) + mtext(paste0(regime1.name), font=2, cex=5) + par(fig=c(title1.xpos, title1.xpos + title1.width, 0.5855, 0.5905), new=TRUE) + mtext(paste0(regime2.name), font=2, cex=5) + par(fig=c(title1.xpos, title1.xpos + title1.width, 0.3805, 0.3955), new=TRUE) + mtext(paste0(regime3.name), font=2, cex=5) + par(fig=c(title1.xpos, title1.xpos + title1.width, 0.1755, 0.1805), new=TRUE) + mtext(paste0(regime4.name), font=2, cex=5) + + if(composition == "psl") { + # Color legend: + legend1.xpos <- 0.10 + legend1.width <- 0.80 + legend1.cex <- 4 + + par(fig=c(legend1.xpos, legend1.xpos + legend1.width, 0, 0.065), new=TRUE) # syntax fig=c(xmin, xmax, ymin, ymax) + ColorBar2(brks=my.brks, cols=my.cols, vert=FALSE, cex=legend1.cex, label.dist=2, my.ticks=-0.5 + 1:21, my.labels=my.brks) + par(fig=c(legend1.xpos + legend1.width - 0.02, legend1.xpos + legend1.width + 0.05, 0, 0.02), new=TRUE) # syntax fig=c(xmin, xmax, ymin, ymax) + mtext("hPa", cex=legend1.cex*1.3) + } + + dev.off() + } # close if on composition + + print("Finished!") +} # close if on composition != "summary" + +if(composition == "taylor"){ + library("plotrix") + + # choose a reference season from 13 (winter) to 16 (Autumn): + season.ref <- 13 + + # set a second WR classification (i.e: without running cluster) to contrast with the new one: + #rean2.dir <- "/scratch/Earth/ncortesi/RESILIENCE/Regimes/41_ERA-Interim_monthly_1981-2015_LOESS_filter" + rean2.dir <- "/scratch/Earth/ncortesi/RESILIENCE/Regimes/44_as_43_but_with_no_lat_correction" + + # load data of the reference season of the first classification (this line should be modified to load the chosen season): + load("/scratch/Earth/ncortesi/RESILIENCE/Regimes/13_with_Z500/ERA-Interim_Winter_psl.RData") # load pslwrXmean data + cluster1.ref <- 3 + cluster2.ref <- 4 + cluster3.ref <- 1 + cluster4.ref <- 2 + + assign(paste0("map.ref",cluster1.ref), pslwr1mean) # NAO+ + assign(paste0("map.ref",cluster2.ref), pslwr2mean) # NAO- + assign(paste0("map.ref",cluster3.ref), pslwr3mean) # Blocking + assign(paste0("map.ref",cluster4.ref), pslwr4mean) # Atl.ridge + + # load data of the reference season of the second classification(this line should be modified to load the chosen season): + load("/scratch/Earth/ncortesi/RESILIENCE/Regimes/18) as 12) but with no lat correction/ERA-Interim_Winter_psl.RData") # load pslwrXmean data + load("/scratch/Earth/ncortesi/RESILIENCE/Regimes/18) as 12) but with no lat correction/ERA-Interim_ClusterNames.RData") # load clusterX.name.period + + if(var.num != var.num.orig) var.num <- var.num.orig # ripristinate original values of this script (necessary after loading regime data) + if(fields.name != fields.name.orig) fields.name <- fields.name.orig # ripristinate original values of this script + + cluster1.name.ref <- cluster1.name.period[season.ref] + cluster2.name.ref <- cluster2.name.period[season.ref] + cluster3.name.ref <- cluster3.name.period[season.ref] + cluster4.name.ref <- cluster4.name.period[season.ref] + + cluster1.ref <- which(orden == cluster1.name.ref) + cluster2.ref <- which(orden == cluster2.name.ref) + cluster3.ref <- which(orden == cluster3.name.ref) + cluster4.ref <- which(orden == cluster4.name.ref) + + assign(paste0("map.old.ref",cluster1.ref), pslwr1mean) # NAO+ + assign(paste0("map.old.ref",cluster2.ref), pslwr2mean) # NAO- + assign(paste0("map.old.ref",cluster3.ref), pslwr3mean) # Blocking + assign(paste0("map.old.ref",cluster4.ref), pslwr4mean) # Atl.ridge + + + # breaks and colors of the geopotential fields: + if(psl == "g500"){ + my.brks <- c(-300,seq(-200,200,10),300) # % Mean anomaly of a WR + my.brks2 <- c(-300,seq(-200,200,10),300) # % Mean anomaly of a WR for the contour lines + values.to.plot <- c(-200, -100, 0, 100, 200) # values we want to show in the labels + } + + if(psl == "psl"){ + my.brks <- c(seq(-19,-1,2),0,seq(1,19,2)) # % Mean anomaly of a WR + my.brks2 <- my.brks #c(seq(-20,20,2)) # % Mean anomaly of a WR for the contour lines + values.to.plot <- my.brks #c(-17,-15,-13,-11,-9,-7,-5,-3,-1,0,1,3,5,7,9,11,13,15,17) + } + + my.cols <- colorRampPalette(rev(brewer.pal(11,"RdBu")))(length(my.brks)-1) # blue--white--red colors + my.cols[floor(length(my.cols)/2)] <- my.cols[floor(length(my.cols)/2)+1] <- "white" + + # plot the composition with the regime anomalies of each monthly regimes: + png(filename=paste0(rean.dir,"/",fields.name,"_taylor_source",".png"),width=6000,height=2000) + + plot.new() + + # Sheet title: + par(fig=c(0, 1, 0.94, 0.98), new=TRUE) + mtext(paste0(fields.name, " observed monthly regime anomalies"), cex=5.5, font=2) + + n.map <- 0 + for(p in c(9:12,1:8)){ + p.orig <- p + + load(file=paste0(rean.dir,"/",fields.name,"_",my.period[p],"_","psl",".RData")) # load cluster names and summarized data + load(file=paste0(rean.dir,"/",fields.name,"_",my.period[p],"_","ClusterNames",".RData")) # load cluster names and summarized data + + if(var.num != var.num.orig) var.num <- var.num.orig # ripristinate original values of this script (necessary after loading regime data) + if(fields.name != fields.name.orig) fields.name <- fields.name.orig # ripristinate original values of this script + if(p != p.orig) p <- p.orig + + cluster1 <- which(orden == cluster1.name) + cluster2 <- which(orden == cluster2.name) + cluster3 <- which(orden == cluster3.name) + cluster4 <- which(orden == cluster4.name) + + assign(paste0("map",cluster1), pslwr1mean) # NAO+ + assign(paste0("map",cluster2), pslwr2mean) # NAO- + assign(paste0("map",cluster3), pslwr3mean) # Blocking + assign(paste0("map",cluster4), pslwr4mean) # Atl.ridge + + n.map <- n.map + 1 # n.maps starts from 0 + map.width <- 0.07 + map.xpos <- 0.06 + map.width * (n.map - 1) + map.ypos <- 0.08 + map.height <- 0.19 + map.ysep <- 0.015 + + par(fig=c(map.xpos, map.xpos + map.width, map.ypos + 3*map.height + 3*map.ysep, map.ypos + 4*map.height + 3*map.ysep), new=TRUE) + PlotEquiMap2(rescale(map1,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map1), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, xlabel.dist=1.5, contours.lty="F1FF1F") + par(fig=c(map.xpos, map.xpos + map.width, map.ypos + 2*map.height + 2*map.ysep, map.ypos + 3*map.height + 2*map.ysep), new=TRUE) + PlotEquiMap2(rescale(map2,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map2), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F") + par(fig=c(map.xpos, map.xpos + map.width, map.ypos + map.height + map.ysep, map.ypos + 2*map.height + map.ysep), new=TRUE) + PlotEquiMap2(rescale(map3,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map3), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F") + par(fig=c(map.xpos, map.xpos + map.width, map.ypos, map.ypos + map.height), new=TRUE) + PlotEquiMap2(rescale(map4,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map4), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F") + + # Sheet subtitles: + par(fig=c(map.xpos, map.xpos + map.width, 0.86, 0.90), new=TRUE) + mtext(my.month.short[p], cex=5.5, font=2) + + } + + # draw the last map to the right of the composition, that with the seasonal anomalies: + n.map <- n.map + 1 # n.maps starts from 0 + map.width <- 0.07 + map.xpos <- 0.06 + map.width * (n.map - 1) + map.ypos <- 0.08 + map.height <- 0.19 + map.ysep <- 0.015 + + par(fig=c(map.xpos, map.xpos + map.width, map.ypos + 3*map.height + 3*map.ysep, map.ypos + 4*map.height + 3*map.ysep), new=TRUE) + PlotEquiMap2(rescale(map.ref1,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map.ref1), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, xlabel.dist=1.5, contours.lty="F1FF1F") + par(fig=c(map.xpos, map.xpos + map.width, map.ypos + 2*map.height + 2*map.ysep, map.ypos + 3*map.height + 2*map.ysep), new=TRUE) + PlotEquiMap2(rescale(map.ref2,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map.ref2), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F") + par(fig=c(map.xpos, map.xpos + map.width, map.ypos + map.height + map.ysep, map.ypos + 2*map.height + map.ysep), new=TRUE) + PlotEquiMap2(rescale(map.ref3,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map.ref3), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F") + par(fig=c(map.xpos, map.xpos + map.width, map.ypos, map.ypos + map.height), new=TRUE) + PlotEquiMap2(rescale(map.ref4,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map.ref4), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F") + + # subtitle: + par(fig=c(map.xpos, map.xpos + map.width, 0.86, 0.90), new=TRUE) + mtext(my.period[season.ref], cex=5.5, font=2) + + dev.off() + + # Plot the Taylor diagrams: + for(regime in 1:4){ + + png(filename=paste0(rean.dir,"/",fields.name,"_taylor_",orden[regime],".png"),width=1200,height=800) + + for(p in 1:12){ + p.orig <- p + + load(file=paste0(rean.dir,"/",fields.name,"_",my.period[p],"_","psl",".RData")) # load cluster names and summarized data + load(file=paste0(rean.dir,"/",fields.name,"_",my.period[p],"_","ClusterNames",".RData")) # load cluster names and summarized data + + if(var.num != var.num.orig) var.num <- var.num.orig # ripristinate original values of this script (necessary after loading regime data) + if(fields.name != fields.name.orig) fields.name <- fields.name.orig # ripristinate original values of this script + if(p != p.orig) p <- p.orig + + cluster1 <- which(orden == cluster1.name) + cluster2 <- which(orden == cluster2.name) + cluster3 <- which(orden == cluster3.name) + cluster4 <- which(orden == cluster4.name) + + assign(paste0("map",cluster1), pslwr1mean) # NAO+ + assign(paste0("map",cluster2), pslwr2mean) # NAO- + assign(paste0("map",cluster3), pslwr3mean) # Blocking + assign(paste0("map",cluster4), pslwr4mean) # Atl.ridge + + # load data from the second classification: + load(file=paste0(rean2.dir,"/",fields.name,"_",my.period[p],"_","psl",".RData")) # load cluster names and summarized data + load(file=paste0(rean2.dir,"/",fields.name,"_",my.period[p],"_","ClusterNames",".RData")) # load cluster names and summarized data + + if(var.num != var.num.orig) var.num <- var.num.orig # ripristinate original values of this script (necessary after loading regime data) + if(fields.name != fields.name.orig) fields.name <- fields.name.orig # ripristinate original values of this script + if(p != p.orig) p <- p.orig + + cluster1.old <- which(orden == cluster1.name) + cluster2.old <- which(orden == cluster2.name) + cluster3.old <- which(orden == cluster3.name) + cluster4.old <- which(orden == cluster4.name) + + assign(paste0("map.old",cluster1.old), pslwr1mean) # NAO+ + assign(paste0("map.old",cluster2.old), pslwr2mean) # NAO- + assign(paste0("map.old",cluster3.old), pslwr3mean) # Blocking + assign(paste0("map.old",cluster4.old), pslwr4mean) # Atl.ridge + + add.mod <- ifelse(p == 1, FALSE, TRUE) + main.mod <- ifelse(p == 1, orden[regime],"") + + sd.arcs.mod <- c(0,0.1,0.2,0.3,0.4,0.5,0.6,0.7,0.8,0.9,1,1.1,1.2,1.3) #c(0,0.5,1,1.5) # doesn't work! + if(regime == 1) my.taylor(as.vector(map.ref1),as.vector(map1), normalize=TRUE, add=add.mod, pos.cor=FALSE, sd.arcs=sd.arcs.mod, ref.sd=F, cex.axis=1.7, text.cex=1, my.text=my.month.short[p], col="blue", main=main.mod) + if(regime == 2) my.taylor(as.vector(map.ref2),as.vector(map2), normalize=TRUE, add=add.mod, pos.cor=FALSE, sd.arcs=sd.arcs.mod, ref.sd=F, cex.axis=1.7, text.cex=1, my.text=my.month.short[p], col="blue", main=main.mod) + if(regime == 3) my.taylor(as.vector(map.ref3),as.vector(map3), normalize=TRUE, add=add.mod, pos.cor=FALSE, sd.arcs=sd.arcs.mod, ref.sd=F, cex.axis=1.7, text.cex=1, my.text=my.month.short[p], col="blue", main=main.mod) + if(regime == 4) my.taylor(as.vector(map.ref4),as.vector(map4), normalize=TRUE, add=add.mod, pos.cor=FALSE, sd.arcs=sd.arcs.mod, ref.sd=F, cex.axis=1.7, text.cex=1, my.text=my.month.short[p], col="blue", main=main.mod) + + # add the points from the second classification: + add.mod=TRUE + if(regime == 1) my.taylor(as.vector(map.old.ref1),as.vector(map.old1), normalize=TRUE, add=add.mod, pos.cor=FALSE, sd.arcs=sd.arcs.mod, ref.sd=F, cex.axis=1.7, text.cex=1, my.text=my.month.short[p], col="red") + if(regime == 2) my.taylor(as.vector(map.old.ref2),as.vector(map.old2), normalize=TRUE, add=add.mod, pos.cor=FALSE, sd.arcs=sd.arcs.mod, ref.sd=F, cex.axis=1.7, text.cex=1, my.text=my.month.short[p], col="red") + if(regime == 3) my.taylor(as.vector(map.old.ref3),as.vector(map.old3), normalize=TRUE, add=add.mod, pos.cor=FALSE, sd.arcs=sd.arcs.mod, ref.sd=F, cex.axis=1.7, text.cex=1, my.text=my.month.short[p], col="red") + if(regime == 4) my.taylor(as.vector(map.old.ref4),as.vector(map.old4), normalize=TRUE, add=add.mod, pos.cor=FALSE, sd.arcs=sd.arcs.mod, ref.sd=F, cex.axis=1.7, text.cex=1, my.text=my.month.short[p], col="red") + } # close for on 'p' + + dev.off() + + } # close for on 'regime' + + +} # close if on composition == "taylor" + + + + +# Summary graphs: +if(composition == "summary" && fields.name == forecast.name){ + # array storing correlations in the format: [startdate, leadmonth, regime] + array.diff.sd.freq <- array.sd.freq <- array.cor <- array.sp.cor <- array.freq <- array.diff.freq <- array.rpss <- array.pers <- array.diff.pers <- array(NA,c(12,7,4)) + array.sp.cor <- array.trans.sim1 <- array.trans.sim2 <- array.trans.sim3 <- array.trans.sim4 <- array(NA,c(12,7,4)) + array.trans.diff1 <- array.trans.diff2 <- array.trans.diff3 <- array.trans.diff4 <- array(NA,c(12,7,4)) + + for(p in 1:12){ + p.orig <- p + + for(l in 0:6){ + + load(file=paste0(work.dir,"/",fields.name,"_",my.period[p],"_leadmonth_",l,"_","ClusterNames",".RData")) # load cluster names and summarized data + + if(var.num != var.num.orig) var.num <- var.num.orig # ripristinate original values of this script (necessary after loading regime data) + if(fields.name != fields.name.orig) fields.name <- fields.name.orig # ripristinate original values of this script + if(p != p.orig) p <- p.orig + + array.sp.cor[p,1+l, 1] <- spat.cor1 # associates NAO+ to the first triangle (at left) + array.sp.cor[p,1+l, 3] <- spat.cor2 # associates NAO- to the third triangle (at right) + array.sp.cor[p,1+l, 4] <- spat.cor3 # associates Blocking to the fourth triangle (top one) + array.sp.cor[p,1+l, 2] <- spat.cor4 # associates Atl.Ridge to the second triangle (bottom one) + + array.cor[p,1+l, 1] <- sp.cor1 # associates NAO+ to the first triangle (at left) + array.cor[p,1+l, 3] <- sp.cor2 # associates NAO- to the third triangle (at right) + array.cor[p,1+l, 4] <- sp.cor3 # associates Blocking to the fourth triangle (top one) + array.cor[p,1+l, 2] <- sp.cor4 # associates Atl.Ridge to the second triangle (bottom one) + + array.freq[p,1+l, 1] <- 100*(mean(fre1.NA)) # NAO+ + array.freq[p,1+l, 3] <- 100*(mean(fre2.NA)) # NAO- + array.freq[p,1+l, 4] <- 100*(mean(fre3.NA)) # Blocking + array.freq[p,1+l, 2] <- 100*(mean(fre4.NA)) # Atl.Ridge + + array.diff.freq[p,1+l, 1] <- 100*(mean(fre1.NA) - mean(freObs1)) # NAO+ + array.diff.freq[p,1+l, 3] <- 100*(mean(fre2.NA) - mean(freObs2)) # NAO- + array.diff.freq[p,1+l, 4] <- 100*(mean(fre3.NA) - mean(freObs3)) # Blocking + array.diff.freq[p,1+l, 2] <- 100*(mean(fre4.NA) - mean(freObs4)) # Atl.Ridge + + array.pers[p,1+l, 1] <- persist1 # NAO+ + array.pers[p,1+l, 3] <- persist2 # NAO- + array.pers[p,1+l, 4] <- persist3 # Blocking + array.pers[p,1+l, 2] <- persist4 # Atl.Ridge + + array.diff.pers[p,1+l, 1] <- persist1 - persistObs1 # NAO+ + array.diff.pers[p,1+l, 3] <- persist2 - persistObs2 # NAO- + array.diff.pers[p,1+l, 4] <- persist3 - persistObs3 # Blocking + array.diff.pers[p,1+l, 2] <- persist4 - persistObs4 # Atl.Ridge + + array.pers[p,1+l, 1] <- persist1 # NAO+ + array.pers[p,1+l, 3] <- persist2 # NAO- + array.pers[p,1+l, 4] <- persist3 # Blocking + array.pers[p,1+l, 2] <- persist4 # Atl.Ridge + + array.sd.freq[p,1+l, 1] <- sd.freq.sim1 # NAO+ + array.sd.freq[p,1+l, 3] <- sd.freq.sim2 # NAO- + array.sd.freq[p,1+l, 4] <- sd.freq.sim3 # Blocking + array.sd.freq[p,1+l, 2] <- sd.freq.sim4 # Atl.Ridge + + array.diff.sd.freq[p,1+l, 1] <- sd.freq.sim1 / sd.freq.obs1 # NAO+ + array.diff.sd.freq[p,1+l, 3] <- sd.freq.sim2 / sd.freq.obs2 # NAO- + array.diff.sd.freq[p,1+l, 4] <- sd.freq.sim3 / sd.freq.obs3 # Blocking + array.diff.sd.freq[p,1+l, 2] <- sd.freq.sim4 / sd.freq.obs4 # Atl.Ridge + + array.trans.sim1[p,1+l, 1] <- NA # Transition from NAO+ to NAO+ + array.trans.sim1[p,1+l, 3] <- 100*transSim1[2] # Transition from NAO+ to NAO- + array.trans.sim1[p,1+l, 4] <- 100*transSim1[3] # Transition from NAO+ to blocking + array.trans.sim1[p,1+l, 2] <- 100*transSim1[4] # Transition from NAO+ to Atl.Ridge + + array.trans.sim2[p,1+l, 1] <- 100*transSim2[1] # Transition from NAO- to NAO+ + array.trans.sim2[p,1+l, 3] <- NA # Transition from NAO- to NAO- + array.trans.sim2[p,1+l, 4] <- 100*transSim2[3] # Transition from NAO- to blocking + array.trans.sim2[p,1+l, 2] <- 100*transSim2[4] # Transition from NAO- to Atl.Ridge + + array.trans.sim3[p,1+l, 1] <- 100*transSim3[1] # Transition from blocking to NAO+ + array.trans.sim3[p,1+l, 3] <- 100*transSim3[2] # Transition from blocking to NAO- + array.trans.sim3[p,1+l, 4] <- NA # Transition from blocking to blocking + array.trans.sim3[p,1+l, 2] <- 100*transSim3[4] # Transition from blocking to Atl.Ridge + + array.trans.sim4[p,1+l, 1] <- 100*transSim4[1] # Transition from Atl.ridge to NAO+ + array.trans.sim4[p,1+l, 3] <- 100*transSim4[2] # Transition from Atl.ridge to NAO- + array.trans.sim4[p,1+l, 4] <- 100*transSim4[3] # Transition from Atl.ridge to blocking + array.trans.sim4[p,1+l, 2] <- NA # Transition from Atl.ridge to Atl.Ridge + + array.trans.diff1[p,1+l, 1] <- NA # Transition from NAO+ to NAO+ + array.trans.diff1[p,1+l, 3] <- 100*(transSim1[2] - transObs1[2]) # Transition from NAO+ to NAO- + array.trans.diff1[p,1+l, 4] <- 100*(transSim1[3] - transObs1[3]) # Transition from NAO+ to blocking + array.trans.diff1[p,1+l, 2] <- 100*(transSim1[4] - transObs1[4]) # Transition from NAO+ to Atl.Ridge + + array.trans.diff2[p,1+l, 1] <- 100*(transSim2[1] - transObs2[1]) # Transition from NAO+ to NAO+ + array.trans.diff2[p,1+l, 3] <- NA + array.trans.diff2[p,1+l, 4] <- 100*(transSim2[3] - transObs2[3]) # Transition from NAO+ to blocking + array.trans.diff2[p,1+l, 2] <- 100*(transSim2[4] - transObs2[4]) # Transition from NAO+ to Atl.Ridge + + array.trans.diff3[p,1+l, 1] <- 100*(transSim3[1] - transObs3[1]) # Transition from NAO+ to NAO+ + array.trans.diff3[p,1+l, 3] <- 100*(transSim3[2] - transObs3[2]) # Transition from NAO+ to NAO- + array.trans.diff3[p,1+l, 4] <- NA + array.trans.diff3[p,1+l, 2] <- 100*(transSim3[4] - transObs3[4]) # Transition from NAO+ to Atl.Ridge + + array.trans.diff4[p,1+l, 1] <- 100*(transSim4[1] - transObs4[1]) # Transition from NAO+ to NAO+ + array.trans.diff4[p,1+l, 3] <- 100*(transSim4[2] - transObs4[2]) # Transition from NAO+ to NAO- + array.trans.diff4[p,1+l, 4] <- 100*(transSim4[3] - transObs4[3]) # Transition from NAO+ to blocking + array.trans.diff4[p,1+l, 2] <- NA + + #array.rpss[p,1+l, 1] <- # NAO+ + #array.rpss[p,1+l, 3] <- # NAO- + #array.rpss[p,1+l, 4] <- # Blocking + #array.rpss[p,1+l, 2] <- # Atl.Ridge + } + } + + # Spatial Corr summary: + png(filename=paste0(work.dir,"/",forecast.name,"_summary_sp_corr.png"), width=550, height=400) + + # create an array similar to array.cor but with colors instead of corr.values: + sp.corr.cols <- rev(c('#b2182b','#d6604d','#f4a582','#fddbc7','#d1e5f0','#92c5de','#4393c3','#2166ac')) + my.seq <- c(-1,0,0.4,0.5,0.6,0.7,0.8,0.9,1) + + array.sp.cor.colors <- array(sp.corr.cols[8],c(12,7,4)) + array.sp.cor.colors[array.sp.cor < my.seq[8]] <- sp.corr.cols[7] + array.sp.cor.colors[array.sp.cor < my.seq[7]] <- sp.corr.cols[6] + array.sp.cor.colors[array.sp.cor < my.seq[6]] <- sp.corr.cols[5] + array.sp.cor.colors[array.sp.cor < my.seq[5]] <- sp.corr.cols[4] + array.sp.cor.colors[array.sp.cor < my.seq[4]] <- sp.corr.cols[3] + array.sp.cor.colors[array.sp.cor < my.seq[3]] <- sp.corr.cols[2] + array.sp.cor.colors[array.sp.cor < my.seq[2]] <- sp.corr.cols[1] + + par(mar=c(5,4,4,4.5)) + plot(1,1, type="n", xaxt="n", yaxt="n", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + title("Spatial correlation between S4 and ERA-Interim regime anomalies", cex=1.5) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + + for(p in 1:12){ + for(l in 0:6){ + polygon(c(0.5+p-1, 0.5+p-1, 1+p-1), c(-0.5+l, 0.5+l, 0+l), col=array.sp.cor.colors[p,1+l,1]) # first triangle + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(-0.5+l, 0+l, -0.5+l), col=array.sp.cor.colors[p,1+l,2]) + polygon(c(1+p-1, 1.5+p-1, 1.5+p-1), c(l, 0.5+l, -0.5+l), col=array.sp.cor.colors[p,1+l,3]) + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(0.5+l, 0+l, 0.5+l), col=array.sp.cor.colors[p,1+l,4]) + } + } + + par(fig=c(0.875, 1, 0.14, 0.89), new=TRUE) + ColorBar2(brks = my.seq, cols = sp.corr.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + dev.off() + + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_sp_corr_4tables.png"), width=750, height=600) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext(text="Spatial correlation between S4 and ERA-Interim regime anomalies", cex=1.5) + + # NAO+ : + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.85, 0.98), new=TRUE) + mtext("NAO+", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.sp.cor.colors[p,1+l,1])}} + + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.42, 0.5), new=TRUE) + mtext("Blocking", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.sp.cor.colors[p,1+l,4])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.85, 0.98), new=TRUE) + mtext("NAO-", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.sp.cor.colors[p,1+l,3])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.42, 0.5), new=TRUE) + mtext("Atlantic Ridge", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.sp.cor.colors[p,1+l,2])}} + + par(fig=c(0.91, 1, 0.1, 0.9), new=TRUE) + ColorBar2(brks = my.seq, cols = sp.corr.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_sp_corr_1table.png"), width=800, height=300) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext(text="Spatial correlation between S4 and ERA-Interim regime anomalies", cex=1.2) + + par(mar=c(0,0,4,0), fig=c(0.07, 0.22, 0.8, 0.98), new=TRUE) + mtext("NAO+", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0, 0.22, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecast month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.sp.cor.colors[i2,1+6-j,1])}} + + par(mar=c(0,0,4,0), fig=c(0.22, 0.44, 0.8, 0.98), new=TRUE) + mtext("NAO-", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.22, 0.44, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecasted month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.sp.cor.colors[i2,1+6-j,3])}} + + par(mar=c(0,0,4,0), fig=c(0.44, 0.66, 0.8, 0.98), new=TRUE) + mtext("Blocking", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.44, 0.66, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecasted month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.sp.cor.colors[i2,1+6-j,4])}} + + par(mar=c(0,0,4,0), fig=c(0.68, 0.88, 0.8, 0.98), new=TRUE) + mtext("Atlantic Ridge", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.66, 0.88, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecasted month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.sp.cor.colors[i2,1+6-j,2])}} + + par(fig=c(0.91, 1, 0.1, 0.8), new=TRUE) + ColorBar2(brks = my.seq, cols = sp.corr.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + + + + + + + + # Corr summary: + png(filename=paste0(work.dir,"/",forecast.name,"_summary_corr.png"), width=550, height=400) + + # create an array similar to array.cor but with colors instead of corr.values: + corr.cols <- rev(c('#b2182b','#d6604d','#f4a582','#fddbc7','#d1e5f0','#92c5de','#4393c3','#2166ac')) + + array.cor.colors <- array(corr.cols[8],c(12,7,4)) + array.cor.colors[array.cor < 0.75] <- corr.cols[7] + array.cor.colors[array.cor < 0.5] <- corr.cols[6] + array.cor.colors[array.cor < 0.25] <- corr.cols[5] + array.cor.colors[array.cor < 0] <- corr.cols[4] + array.cor.colors[array.cor < -0.25] <- corr.cols[3] + array.cor.colors[array.cor < -0.5] <- corr.cols[2] + array.cor.colors[array.cor < -0.75] <- corr.cols[1] + + par(mar=c(5,4,4,4.5)) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Forecast time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + title("Correlation between S4 and ERA-Interim frequencies", cex=1.5) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + + for(p in 1:12){ + for(l in 0:6){ + polygon(c(0.5+p-1,0.5+p-1,1+p-1),c(-0.5+l,0.5+l,0+l), col=array.cor.colors[p,1+l,1]) # first triangle + polygon(c(0.5+p-1,1+p-1,1.5+p-1),c(-0.5+l,0+l,-0.5+l), col=array.cor.colors[p,1+l,2]) + polygon(c(1+p-1,1.5+p-1,1.5+p-1),c(l,0.5+l,-0.5+l), col=array.cor.colors[p,1+l,3]) + polygon(c(0.5+p-1,1+p-1,1.5+p-1),c(0.5+l,0+l,0.5+l), col=array.cor.colors[p,1+l,4]) + } + } + + par(fig=c(0.875, 1, 0.14, 0.89), new=TRUE) + ColorBar2(brks = seq(-1,1,0.25), cols = corr.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(seq(-1,1,0.25)), my.labels=seq(-1,1,0.25)) + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_corr_4tables.png"), width=750, height=600) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext(text="Correlation between S4 and ERA-Interim frequencies", cex=1.5) + + # NAO+ : + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.85, 0.98), new=TRUE) + mtext("NAO+", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.cor.colors[p,1+l,1])}} + + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.42, 0.5), new=TRUE) + mtext("Blocking", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.cor.colors[p,1+l,4])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.85, 0.98), new=TRUE) + mtext("NAO-", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.cor.colors[p,1+l,3])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.42, 0.5), new=TRUE) + mtext("Atlantic Ridge", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.cor.colors[p,1+l,2])}} + + par(fig=c(0.91, 1, 0.1, 0.9), new=TRUE) + ColorBar2(brks = my.seq, cols = corr.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_corr_1table.png"), width=800, height=300) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext(text="Correlation between S4 and ERA-Interim frequencies", cex=1.2) + + par(mar=c(0,0,4,0), fig=c(0.07, 0.22, 0.8, 0.98), new=TRUE) + mtext("NAO+", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0, 0.22, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecast month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.cor.colors[i2,1+6-j,1])}} + + par(mar=c(0,0,4,0), fig=c(0.22, 0.44, 0.8, 0.98), new=TRUE) + mtext("NAO-", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.22, 0.44, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecasted month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.cor.colors[i2,1+6-j,3])}} + + par(mar=c(0,0,4,0), fig=c(0.44, 0.66, 0.8, 0.98), new=TRUE) + mtext("Blocking", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.44, 0.66, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecasted month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.cor.colors[i2,1+6-j,4])}} + + par(mar=c(0,0,4,0), fig=c(0.68, 0.88, 0.8, 0.98), new=TRUE) + mtext("Atlantic Ridge", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.66, 0.88, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecasted month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.cor.colors[i2,1+6-j,2])}} + + par(fig=c(0.91, 1, 0.1, 0.8), new=TRUE) + ColorBar2(brks = my.seq, cols = corr.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + + + + + + # Freq. summary: + png(filename=paste0(work.dir,"/",forecast.name,"_summary_freq.png"), width=550, height=400) + + # create an array similar to array.diff.freq but with colors instead of frequencies: + freq.cols <- rev(c('#d73027','#f46d43','#fdae61','#fee090','#e0f3f8','#abd9e9','#74add1','#4575b4')) + my.seq <- c(NA,20,22,24,26,28,30,32,NA) + + array.freq.colors <- array(freq.cols[8],c(12,7,4)) + array.freq.colors[array.freq < my.seq[[8]]] <- freq.cols[7] + array.freq.colors[array.freq < my.seq[[7]]] <- freq.cols[6] + array.freq.colors[array.freq < my.seq[[6]]] <- freq.cols[5] + array.freq.colors[array.freq < my.seq[[5]]] <- freq.cols[4] + array.freq.colors[array.freq < my.seq[[4]]] <- freq.cols[3] + array.freq.colors[array.freq < my.seq[[3]]] <- freq.cols[2] + array.freq.colors[array.freq < my.seq[[2]]] <- freq.cols[1] + + par(mar=c(5,4,4,4.5)) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Forecast time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + title("Mean S4 frequency (%)", cex=1.5) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + + for(p in 1:12){ + for(l in 0:6){ + polygon(c(0.5+p-1, 0.5+p-1, 1+p-1), c(-0.5+l, 0.5+l, 0+l), col=array.freq.colors[p,1+l,1]) # first triangle + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(-0.5+l, 0+l, -0.5+l), col=array.freq.colors[p,1+l,2]) + polygon(c(1+p-1, 1.5+p-1, 1.5+p-1), c(l, 0.5+l, -0.5+l), col=array.freq.colors[p,1+l,3]) + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(0.5+l, 0+l, 0.5+l), col=array.freq.colors[p,1+l,4]) + } + } + + par(fig=c(0.875, 1, 0.14, 0.89), new=TRUE) + ColorBar2(brks = my.seq, cols = freq.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_freq_4tables.png"), width=750, height=600) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext(text="Mean S4 frequency (%)", cex=1.5) + + # NAO+ : + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.85, 0.98), new=TRUE) + mtext("NAO+", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.freq.colors[p,1+l,1])}} + + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.42, 0.5), new=TRUE) + mtext("Blocking", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.freq.colors[p,1+l,4])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.85, 0.98), new=TRUE) + mtext("NAO-", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.freq.colors[p,1+l,3])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.42, 0.5), new=TRUE) + mtext("Atlantic Ridge", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.freq.colors[p,1+l,2])}} + + par(fig=c(0.91, 1, 0.1, 0.9), new=TRUE) + ColorBar2(brks = my.seq, cols = freq.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_freq_1table.png"), width=800, height=300) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext(text="Mean S4 frequency (%)", cex=1.2) + + par(mar=c(0,0,4,0), fig=c(0.07, 0.22, 0.8, 0.98), new=TRUE) + mtext("NAO+", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0, 0.22, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecast month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.freq.colors[i2,1+6-j,1])}} + + par(mar=c(0,0,4,0), fig=c(0.22, 0.44, 0.8, 0.98), new=TRUE) + mtext("NAO-", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.22, 0.44, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecasted month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.freq.colors[i2,1+6-j,3])}} + + par(mar=c(0,0,4,0), fig=c(0.44, 0.66, 0.8, 0.98), new=TRUE) + mtext("Blocking", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.44, 0.66, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecasted month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.freq.colors[i2,1+6-j,4])}} + + par(mar=c(0,0,4,0), fig=c(0.68, 0.88, 0.8, 0.98), new=TRUE) + mtext("Atlantic Ridge", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.66, 0.88, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecasted month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.freq.colors[i2,1+6-j,2])}} + + par(fig=c(0.91, 1, 0.1, 0.8), new=TRUE) + ColorBar2(brks = my.seq, cols = freq.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + + + + + + + # Delta freq. summary: + png(filename=paste0(work.dir,"/",forecast.name,"_summary_diff_freq.png"), width=550, height=400) + + # create an array similar to array.diff.freq but with colors instead of frequencies: + diff.freq.cols <- rev(c('#d73027','#f46d43','#fdae61','#fee090','#e0f3f8','#abd9e9','#74add1','#4575b4')) + my.seq <- c(-20,-10,-5,-2,0,2,5,10,20) + + array.diff.freq.colors <- array(diff.freq.cols[8],c(12,7,4)) + array.diff.freq.colors[array.diff.freq < my.seq[[8]]] <- diff.freq.cols[7] + array.diff.freq.colors[array.diff.freq 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.diff.freq.colors[i2,1+6-j,1])}} + + par(mar=c(0,0,4,0), fig=c(0.22, 0.44, 0.8, 0.98), new=TRUE) + mtext("NAO-", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.22, 0.44, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecasted month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.diff.freq.colors[i2,1+6-j,3])}} + + par(mar=c(0,0,4,0), fig=c(0.44, 0.66, 0.8, 0.98), new=TRUE) + mtext("Blocking", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.44, 0.66, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecasted month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.diff.freq.colors[i2,1+6-j,4])}} + + par(mar=c(0,0,4,0), fig=c(0.68, 0.88, 0.8, 0.98), new=TRUE) + mtext("Atlantic Ridge", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.66, 0.88, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecasted month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.diff.freq.colors[i2,1+6-j,2])}} + + par(fig=c(0.91, 1, 0.1, 0.8), new=TRUE) + ColorBar2(brks = my.seq, cols = diff.freq.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + + + + + + + + # Persistence summary: + png(filename=paste0(work.dir,"/",forecast.name,"_summary_pers.png"), width=550, height=400) + + # create an array similar to array.pers but with colors instead of frequencies: + pers.cols <- rev(c('#b2182b','#d6604d','#f4a582','#fddbc7','#d1e5f0','#92c5de','#4393c3','#2166ac')) + my.seq <- c(NA, 3.25, 3.5, 3.75, 4, 4.25, 4.5, 4.75, NA) + + array.pers.colors <- array(pers.cols[8],c(12,7,4)) + array.pers.colors[array.pers < my.seq[8]] <- pers.cols[7] + array.pers.colors[array.pers < my.seq[7]] <- pers.cols[6] + array.pers.colors[array.pers < my.seq[6]] <- pers.cols[5] + array.pers.colors[array.pers < my.seq[5]] <- pers.cols[4] + array.pers.colors[array.pers < my.seq[4]] <- pers.cols[3] + array.pers.colors[array.pers < my.seq[3]] <- pers.cols[2] + array.pers.colors[array.pers < my.seq[2]] <- pers.cols[1] + + par(mar=c(5,4,4,4.5)) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Forecast time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + title("S4 Regime persistence (in days/month)", cex=1.5) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + + for(p in 1:12){ + for(l in 0:6){ + polygon(c(0.5+p-1,0.5+p-1,1+p-1),c(-0.5+l,0.5+l,0+l), col=array.pers.colors[p,1+l,1]) # first triangle + polygon(c(0.5+p-1,1+p-1,1.5+p-1),c(-0.5+l,0+l,-0.5+l), col=array.pers.colors[p,1+l,2]) + polygon(c(1+p-1,1.5+p-1,1.5+p-1),c(l,0.5+l,-0.5+l), col=array.pers.colors[p,1+l,3]) + polygon(c(0.5+p-1,1+p-1,1.5+p-1),c(0.5+l,0+l,0.5+l), col=array.pers.colors[p,1+l,4]) + } + } + + par(fig=c(0.875, 1, 0.14, 0.89), new=TRUE) + ColorBar2(brks = my.seq, cols = pers.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_pers_4tables.png"), width=750, height=600) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("S4 Regime persistence (in days/month)", cex=1.5) + + # NAO+ : + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.85, 0.98), new=TRUE) + mtext("NAO+", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.pers.colors[p,1+l,1])}} + + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.42, 0.5), new=TRUE) + mtext("Blocking", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.pers.colors[p,1+l,4])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.85, 0.98), new=TRUE) + mtext("NAO-", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.pers.colors[p,1+l,3])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.42, 0.5), new=TRUE) + mtext("Atlantic Ridge", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.pers.colors[p,1+l,2])}} + + par(fig=c(0.91, 1, 0.1, 0.9), new=TRUE) + ColorBar2(brks = my.seq, cols = pers.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_pers_4tables.png"), width=750, height=600) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("S4 Regime persistence (in days/month)", cex=1.5) + + # NAO+ : + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.85, 0.98), new=TRUE) + mtext("NAO+", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.pers.colors[p,1+l,1])}} + + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.42, 0.5), new=TRUE) + mtext("Blocking", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.pers.colors[p,1+l,4])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.85, 0.98), new=TRUE) + mtext("NAO-", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.pers.colors[p,1+l,3])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.42, 0.5), new=TRUE) + mtext("Atlantic Ridge", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.pers.colors[p,1+l,2])}} + + par(fig=c(0.91, 1, 0.1, 0.9), new=TRUE) + ColorBar2(brks = my.seq, cols = pers.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_pers_1table.png"), width=800, height=300) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("S4 Regime persistence (in days/month)", cex=1.2) + + par(mar=c(0,0,4,0), fig=c(0.07, 0.22, 0.8, 0.98), new=TRUE) + mtext("NAO+", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0, 0.22, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecast month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.pers.colors[i2,1+6-j,1])}} + + par(mar=c(0,0,4,0), fig=c(0.22, 0.44, 0.8, 0.98), new=TRUE) + mtext("NAO-", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.22, 0.44, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecasted month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.pers.colors[i2,1+6-j,3])}} + + par(mar=c(0,0,4,0), fig=c(0.44, 0.66, 0.8, 0.98), new=TRUE) + mtext("Blocking", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.44, 0.66, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecasted month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.pers.colors[i2,1+6-j,4])}} + + par(mar=c(0,0,4,0), fig=c(0.68, 0.88, 0.8, 0.98), new=TRUE) + mtext("Atlantic Ridge", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.66, 0.88, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecasted month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.pers.colors[i2,1+6-j,2])}} + + par(fig=c(0.91, 1, 0.1, 0.8), new=TRUE) + ColorBar2(brks = my.seq, cols = pers.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + + + + + + + + # Delta persistence summary: + png(filename=paste0(work.dir,"/",forecast.name,"_summary_diff_pers.png"), width=550, height=400) + + # create an array similar to array.pers but with colors instead of frequencies: + diff.pers.cols <- rev(c('#b2182b','#d6604d','#f4a582','#fddbc7','#d1e5f0','#92c5de','#4393c3','#2166ac')) + my.seq <- c(NA, -1.5, -1, -0.5, 0, 0.5, 1, 1.5, NA) + + array.diff.pers.colors <- array(diff.pers.cols[8],c(12,7,4)) + array.diff.pers.colors[array.diff.pers < my.seq[8]] <- diff.pers.cols[7] + array.diff.pers.colors[array.diff.pers < my.seq[7]] <- diff.pers.cols[6] + array.diff.pers.colors[array.diff.pers < my.seq[6]] <- diff.pers.cols[5] + array.diff.pers.colors[array.diff.pers < my.seq[5]] <- diff.pers.cols[4] + array.diff.pers.colors[array.diff.pers < my.seq[4]] <- diff.pers.cols[3] + array.diff.pers.colors[array.diff.pers < my.seq[3]] <- diff.pers.cols[2] + array.diff.pers.colors[array.diff.pers < my.seq[2]] <- diff.pers.cols[1] + + par(mar=c(5,4,4,4.5)) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Forecast time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + title("Diff. between S4 and ERA-Interim regime persistence (in days/month)", cex=1.5) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + + for(p in 1:12){ + for(l in 0:6){ + polygon(c(0.5+p-1,0.5+p-1,1+p-1),c(-0.5+l,0.5+l,0+l), col=array.diff.pers.colors[p,1+l,1]) # first triangle + polygon(c(0.5+p-1,1+p-1,1.5+p-1),c(-0.5+l,0+l,-0.5+l), col=array.diff.pers.colors[p,1+l,2]) + polygon(c(1+p-1,1.5+p-1,1.5+p-1),c(l,0.5+l,-0.5+l), col=array.diff.pers.colors[p,1+l,3]) + polygon(c(0.5+p-1,1+p-1,1.5+p-1),c(0.5+l,0+l,0.5+l), col=array.diff.pers.colors[p,1+l,4]) + } + } + + par(fig=c(0.875, 1, 0.14, 0.89), new=TRUE) + ColorBar2(brks = my.seq, cols = diff.pers.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_diff_pers_4tables.png"), width=750, height=600) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("Diff. between S4 and ERA-Interim regime persistence (in days/month)", cex=1.5) + + # NAO+ : + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.85, 0.98), new=TRUE) + mtext("NAO+", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.diff.pers.colors[p,1+l,1])}} + + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.42, 0.5), new=TRUE) + mtext("Blocking", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.diff.pers.colors[p,1+l,4])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.85, 0.98), new=TRUE) + mtext("NAO-", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.diff.pers.colors[p,1+l,3])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.42, 0.5), new=TRUE) + mtext("Atlantic Ridge", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.diff.pers.colors[p,1+l,2])}} + + par(fig=c(0.91, 1, 0.1, 0.9), new=TRUE) + ColorBar2(brks = my.seq, cols = diff.pers.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_diff_pers_1table.png"), width=800, height=300) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("Diff. between S4 and ERA-Interim regime persistence (in days/month)", cex=1.2) + + par(mar=c(0,0,4,0), fig=c(0.07, 0.22, 0.8, 0.98), new=TRUE) + mtext("NAO+", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0, 0.22, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecast month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.diff.pers.colors[i2,1+6-j,1])}} + + par(mar=c(0,0,4,0), fig=c(0.22, 0.44, 0.8, 0.98), new=TRUE) + mtext("NAO-", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.22, 0.44, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecasted month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.diff.pers.colors[i2,1+6-j,3])}} + + par(mar=c(0,0,4,0), fig=c(0.44, 0.66, 0.8, 0.98), new=TRUE) + mtext("Blocking", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.44, 0.66, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecasted month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.diff.pers.colors[i2,1+6-j,4])}} + + par(mar=c(0,0,4,0), fig=c(0.68, 0.88, 0.8, 0.98), new=TRUE) + mtext("Atlantic Ridge", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.66, 0.88, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecasted month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.diff.pers.colors[i2,1+6-j,2])}} + + par(fig=c(0.91, 1, 0.1, 0.8), new=TRUE) + ColorBar2(brks = my.seq, cols = diff.pers.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + + + + + + + + + # St.Dev.freq ratio summary: + png(filename=paste0(work.dir,"/",forecast.name,"_summary_ratio_sd_freq.png"), width=550, height=400) + + # create an array similar to array.cor but with colors instead of corr.values: + diff.sd.freq.cols <- rev(c('#b2182b','#d6604d','#f4a582','#fddbc7','#d1e5f0','#92c5de','#4393c3','#2166ac')) + my.seq <- c(0,0.7,0.8,0.9,1.0,1.1,1.2,1.3,2) + + array.diff.sd.freq.colors <- array(diff.sd.freq.cols[8],c(12,7,4)) + array.diff.sd.freq.colors[array.diff.sd.freq < my.seq[8]] <- diff.sd.freq.cols[7] + array.diff.sd.freq.colors[array.diff.sd.freq < my.seq[7]] <- diff.sd.freq.cols[6] + array.diff.sd.freq.colors[array.diff.sd.freq < my.seq[6]] <- diff.sd.freq.cols[5] + array.diff.sd.freq.colors[array.diff.sd.freq < my.seq[5]] <- diff.sd.freq.cols[4] + array.diff.sd.freq.colors[array.diff.sd.freq < my.seq[4]] <- diff.sd.freq.cols[3] + array.diff.sd.freq.colors[array.diff.sd.freq < my.seq[3]] <- diff.sd.freq.cols[2] + array.diff.sd.freq.colors[array.diff.sd.freq < my.seq[2]] <- diff.sd.freq.cols[1] + + par(mar=c(5,4,4,4.5)) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Forecast time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + title("St.Dev. ratio between sim. and obs. average frequencies", cex=1.5) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + + for(p in 1:12){ + for(l in 0:6){ + polygon(c(0.5+p-1, 0.5+p-1, 1+p-1), c(-0.5+l, 0.5+l, 0+l), col=array.diff.sd.freq.colors[p,1+l,1]) # first triangle + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(-0.5+l, 0+l, -0.5+l), col=array.diff.sd.freq.colors[p,1+l,2]) + polygon(c(1+p-1, 1.5+p-1, 1.5+p-1), c(l, 0.5+l, -0.5+l), col=array.diff.sd.freq.colors[p,1+l,3]) + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(0.5+l, 0+l, 0.5+l), col=array.diff.sd.freq.colors[p,1+l,4]) + } + } + + par(fig=c(0.875, 1, 0.14, 0.89), new=TRUE) + ColorBar2(brks = my.seq, cols = diff.sd.freq.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + dev.off() + + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_ratio_sd_freq_4tables.png"), width=750, height=600) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("St.Dev. ratio between sim. and obs. average frequencies", cex=1.5) + + # NAO+ : + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.85, 0.98), new=TRUE) + mtext("NAO+", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.diff.sd.freq.colors[p,1+l,1])}} + + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.42, 0.5), new=TRUE) + mtext("Blocking", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.diff.sd.freq.colors[p,1+l,4])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.85, 0.98), new=TRUE) + mtext("NAO-", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.diff.sd.freq.colors[p,1+l,3])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.42, 0.5), new=TRUE) + mtext("Atlantic Ridge", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.diff.sd.freq.colors[p,1+l,2])}} + + par(fig=c(0.91, 1, 0.1, 0.9), new=TRUE) + ColorBar2(brks = my.seq, cols = diff.sd.freq.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_ratio_sd_freq_1table.png"), width=800, height=300) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("St.Dev. ratio between sim. and obs. average frequencies", cex=1.2) + + par(mar=c(0,0,4,0), fig=c(0.07, 0.22, 0.8, 0.98), new=TRUE) + mtext("NAO+", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0, 0.22, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecast month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.diff.sd.freq.colors[i2,1+6-j,1])}} + + par(mar=c(0,0,4,0), fig=c(0.22, 0.44, 0.8, 0.98), new=TRUE) + mtext("NAO-", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.22, 0.44, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecasted month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.diff.sd.freq.colors[i2,1+6-j,3])}} + + par(mar=c(0,0,4,0), fig=c(0.44, 0.66, 0.8, 0.98), new=TRUE) + mtext("Blocking", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.44, 0.66, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecasted month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.diff.sd.freq.colors[i2,1+6-j,4])}} + + par(mar=c(0,0,4,0), fig=c(0.68, 0.88, 0.8, 0.98), new=TRUE) + mtext("Atlantic Ridge", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.66, 0.88, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecasted month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.diff.sd.freq.colors[i2,1+6-j,2])}} + + par(fig=c(0.91, 1, 0.1, 0.8), new=TRUE) + ColorBar2(brks = my.seq, cols = diff.sd.freq.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + + + + + + + + + + + # Transition probability from NAO+ to X summary: + png(filename=paste0(work.dir,"/",forecast.name,"_summary_trans_NAO+.png"), width=550, height=400) + + # create an array similar to array.cor but with colors instead of corr.values: + trans.cols <- rev(c('#b2182b','#d6604d','#f4a582','#fddbc7','#d1e5f0','#92c5de','#4393c3','#2166ac')) + my.seq <- c(0,10,20,30,40,50,60,70,100) + + array.trans.sim1.colors <- array(trans.cols[8],c(12,7,4)) + array.trans.sim1.colors[array.trans.sim1 < my.seq[8]] <- trans.cols[7] + array.trans.sim1.colors[array.trans.sim1 < my.seq[7]] <- trans.cols[6] + array.trans.sim1.colors[array.trans.sim1 < my.seq[6]] <- trans.cols[5] + array.trans.sim1.colors[array.trans.sim1 < my.seq[5]] <- trans.cols[4] + array.trans.sim1.colors[array.trans.sim1 < my.seq[4]] <- trans.cols[3] + array.trans.sim1.colors[array.trans.sim1 < my.seq[3]] <- trans.cols[2] + array.trans.sim1.colors[array.trans.sim1 < my.seq[2]] <- trans.cols[1] + array.trans.sim1.colors[is.na(array.trans.sim1)] <- "white" + + par(mar=c(5,4,4,4.5)) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Forecast time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + title("% Transition from NAO+ regime", cex=1.5) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + + for(p in 1:12){ + for(l in 0:6){ + polygon(c(0.5+p-1, 0.5+p-1, 1+p-1), c(-0.5+l, 0.5+l, 0+l), col=array.trans.sim1.colors[p,1+l,1]) # first triangle + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(-0.5+l, 0+l, -0.5+l), col=array.trans.sim1.colors[p,1+l,2]) + polygon(c(1+p-1, 1.5+p-1, 1.5+p-1), c(l, 0.5+l, -0.5+l), col=array.trans.sim1.colors[p,1+l,3]) + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(0.5+l, 0+l, 0.5+l), col=array.trans.sim1.colors[p,1+l,4]) + } + } + + par(fig=c(0.875, 1, 0.14, 0.89), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_trans_NAO+_4tables.png"), width=750, height=600) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("% Transition from NAO+ regime", cex=1.5) + + # NAO+ : + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.85, 0.98), new=TRUE) + mtext("NAO+", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.sim1.colors[p,1+l,1])}} + + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.42, 0.5), new=TRUE) + mtext("Blocking", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.sim1.colors[p,1+l,4])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.85, 0.98), new=TRUE) + mtext("NAO-", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.sim1.colors[p,1+l,3])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.42, 0.5), new=TRUE) + mtext("Atlantic Ridge", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.sim1.colors[p,1+l,2])}} + + par(fig=c(0.91, 1, 0.1, 0.9), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_trans_NAO+_1table.png"), width=800, height=300) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("% Transition from NAO+ regime", cex=1.2) + + par(mar=c(0,0,4,0), fig=c(0.07, 0.22, 0.8, 0.98), new=TRUE) + mtext("NAO+", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0, 0.22, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecast month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.sim1.colors[i2,1+6-j,1])}} + + par(mar=c(0,0,4,0), fig=c(0.22, 0.44, 0.8, 0.98), new=TRUE) + mtext("NAO-", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.22, 0.44, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecasted month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.sim1.colors[i2,1+6-j,3])}} + + par(mar=c(0,0,4,0), fig=c(0.44, 0.66, 0.8, 0.98), new=TRUE) + mtext("Blocking", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.44, 0.66, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecasted month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.sim1.colors[i2,1+6-j,4])}} + + par(mar=c(0,0,4,0), fig=c(0.68, 0.88, 0.8, 0.98), new=TRUE) + mtext("Atlantic Ridge", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.66, 0.88, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecasted month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.sim1.colors[i2,1+6-j,2])}} + + par(fig=c(0.91, 1, 0.1, 0.8), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_trans_NAO+_1table.png"), width=600, height=300) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("% Transition from NAO+ regime", cex=1.2) + + par(mar=c(0,0,4,0), fig=c(0.10, 0.28, 0.8, 0.98), new=TRUE) + mtext("NAO-", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0, 0.28, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecasted month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.sim1.colors[i2,1+6-j,3])}} + + par(mar=c(0,0,4,0), fig=c(0.28, 0.56, 0.8, 0.98), new=TRUE) + mtext("Blocking", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.28, 0.56, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecasted month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.sim1.colors[i2,1+6-j,4])}} + + par(mar=c(0,0,4,0), fig=c(0.56, 0.84, 0.8, 0.98), new=TRUE) + mtext("Atlantic Ridge", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.56, 0.84, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecasted month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.sim1.colors[i2,1+6-j,2])}} + + par(fig=c(0.88, 1, 0.1, 0.8), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + + + + + + + + # Transition probability from NAO- to X summary: + png(filename=paste0(work.dir,"/",forecast.name,"_summary_trans_NAO-.png"), width=550, height=400) + + # create an array similar to array.cor but with colors instead of corr.values: + trans.cols <- rev(c('#b2182b','#d6604d','#f4a582','#fddbc7','#d1e5f0','#92c5de','#4393c3','#2166ac')) + my.seq <- c(0,10,20,30,40,50,60,70,100) + + array.trans.sim2.colors <- array(trans.cols[8],c(12,7,4)) + array.trans.sim2.colors[array.trans.sim2 < my.seq[8]] <- trans.cols[7] + array.trans.sim2.colors[array.trans.sim2 < my.seq[7]] <- trans.cols[6] + array.trans.sim2.colors[array.trans.sim2 < my.seq[6]] <- trans.cols[5] + array.trans.sim2.colors[array.trans.sim2 < my.seq[5]] <- trans.cols[4] + array.trans.sim2.colors[array.trans.sim2 < my.seq[4]] <- trans.cols[3] + array.trans.sim2.colors[array.trans.sim2 < my.seq[3]] <- trans.cols[2] + array.trans.sim2.colors[array.trans.sim2 < my.seq[2]] <- trans.cols[1] + array.trans.sim2.colors[is.na(array.trans.sim2)] <- "white" + + par(mar=c(5,4,4,4.5)) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Forecast time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + title("% Transition from NAO- regime ", cex=1.5) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + + for(p in 1:12){ + for(l in 0:6){ + polygon(c(0.5+p-1, 0.5+p-1, 1+p-1), c(-0.5+l, 0.5+l, 0+l), col=array.trans.sim2.colors[p,1+l,1]) # first triangle + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(-0.5+l, 0+l, -0.5+l), col=array.trans.sim2.colors[p,1+l,2]) + polygon(c(1+p-1, 1.5+p-1, 1.5+p-1), c(l, 0.5+l, -0.5+l), col=array.trans.sim2.colors[p,1+l,3]) + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(0.5+l, 0+l, 0.5+l), col=array.trans.sim2.colors[p,1+l,4]) + } + } + + par(fig=c(0.875, 1, 0.14, 0.89), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_trans_NAO-_4tables.png"), width=750, height=600) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("% Transition from NAO- regime", cex=1.5) + + # NAO+ : + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.85, 0.98), new=TRUE) + mtext("NAO+", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.sim2.colors[p,1+l,1])}} + + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.42, 0.5), new=TRUE) + mtext("Blocking", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.sim2.colors[p,1+l,4])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.85, 0.98), new=TRUE) + mtext("NAO-", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.sim2.colors[p,1+l,3])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.42, 0.5), new=TRUE) + mtext("Atlantic Ridge", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.sim2.colors[p,1+l,2])}} + + par(fig=c(0.91, 1, 0.1, 0.9), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_trans_NAO-_1table.png"), width=600, height=300) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("% Transition from NAO- regime", cex=1.2) + + par(mar=c(0,0,4,0), fig=c(0.1, 0.28, 0.8, 0.98), new=TRUE) + mtext("NAO+", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0, 0.28, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecast month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.sim2.colors[i2,1+6-j,1])}} + + par(mar=c(0,0,4,0), fig=c(0.28, 0.56, 0.8, 0.98), new=TRUE) + mtext("Blocking", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.28, 0.56, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecasted month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.sim2.colors[i2,1+6-j,4])}} + + par(mar=c(0,0,4,0), fig=c(0.56, 0.84, 0.8, 0.98), new=TRUE) + mtext("Atlantic Ridge", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.56, 0.84, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecasted month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.sim2.colors[i2,1+6-j,2])}} + + par(fig=c(0.88, 1, 0.1, 0.8), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + + + + + + + + # Transition probability from blocking to X summary: + png(filename=paste0(work.dir,"/",forecast.name,"_summary_trans_blocking.png"), width=550, height=400) + + # create an array similar to array.cor but with colors instead of corr.values: + trans.cols <- rev(c('#b2182b','#d6604d','#f4a582','#fddbc7','#d1e5f0','#92c5de','#4393c3','#2166ac')) + my.seq <- c(0,10,20,30,40,50,60,70,100) + + array.trans.sim3.colors <- array(trans.cols[8],c(12,7,4)) + array.trans.sim3.colors[array.trans.sim3 < my.seq[8]] <- trans.cols[7] + array.trans.sim3.colors[array.trans.sim3 < my.seq[7]] <- trans.cols[6] + array.trans.sim3.colors[array.trans.sim3 < my.seq[6]] <- trans.cols[5] + array.trans.sim3.colors[array.trans.sim3 < my.seq[5]] <- trans.cols[4] + array.trans.sim3.colors[array.trans.sim3 < my.seq[4]] <- trans.cols[3] + array.trans.sim3.colors[array.trans.sim3 < my.seq[3]] <- trans.cols[2] + array.trans.sim3.colors[array.trans.sim3 < my.seq[2]] <- trans.cols[1] + array.trans.sim3.colors[is.na(array.trans.sim3)] <- "white" + + par(mar=c(5,4,4,4.5)) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Forecast time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + title("% Transition from blocking regime", cex=1.5) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + + for(p in 1:12){ + for(l in 0:6){ + polygon(c(0.5+p-1, 0.5+p-1, 1+p-1), c(-0.5+l, 0.5+l, 0+l), col=array.trans.sim3.colors[p,1+l,1]) # first triangle + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(-0.5+l, 0+l, -0.5+l), col=array.trans.sim3.colors[p,1+l,2]) + polygon(c(1+p-1, 1.5+p-1, 1.5+p-1), c(l, 0.5+l, -0.5+l), col=array.trans.sim3.colors[p,1+l,3]) + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(0.5+l, 0+l, 0.5+l), col=array.trans.sim3.colors[p,1+l,4]) + } + } + + par(fig=c(0.875, 1, 0.14, 0.89), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_trans_blocking_4tables.png"), width=750, height=600) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("% Transition from blocking regime", cex=1.5) + + # NAO+ : + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.85, 0.98), new=TRUE) + mtext("NAO+", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.sim3.colors[p,1+l,1])}} + + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.42, 0.5), new=TRUE) + mtext("Blocking", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.sim3.colors[p,1+l,4])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.85, 0.98), new=TRUE) + mtext("NAO-", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.sim3.colors[p,1+l,3])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.42, 0.5), new=TRUE) + mtext("Atlantic Ridge", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.sim3.colors[p,1+l,2])}} + + par(fig=c(0.91, 1, 0.1, 0.9), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_trans_blocking_1table.png"), width=600, height=300) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("% Transition from blocking regime", cex=1.2) + + par(mar=c(0,0,4,0), fig=c(0.10, 0.28, 0.8, 0.98), new=TRUE) + mtext("NAO+", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0, 0.28, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecast month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.sim3.colors[i2,1+6-j,1])}} + + par(mar=c(0,0,4,0), fig=c(0.28, 0.56, 0.8, 0.98), new=TRUE) + mtext("NAO-", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.28, 0.56, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecasted month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.sim3.colors[i2,1+6-j,3])}} + + par(mar=c(0,0,4,0), fig=c(0.56, 0.84, 0.8, 0.98), new=TRUE) + mtext("Atlantic Ridge", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.56, 0.84, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecasted month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.sim3.colors[i2,1+6-j,2])}} + + par(fig=c(0.88, 1, 0.1, 0.8), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + + + + + + + + + + + # Transition probability from Atlantic ridge to X summary: + png(filename=paste0(work.dir,"/",forecast.name,"_summary_trans_Atlantic_ridge.png"), width=550, height=400) + + # create an array similar to array.cor but with colors instead of corr.values: + trans.cols <- rev(c('#b2182b','#d6604d','#f4a582','#fddbc7','#d1e5f0','#92c5de','#4393c3','#2166ac')) + my.seq <- c(0,10,20,30,40,50,60,70,100) + + array.trans.sim4.colors <- array(trans.cols[8],c(12,7,4)) + array.trans.sim4.colors[array.trans.sim4 < my.seq[8]] <- trans.cols[7] + array.trans.sim4.colors[array.trans.sim4 < my.seq[7]] <- trans.cols[6] + array.trans.sim4.colors[array.trans.sim4 < my.seq[6]] <- trans.cols[5] + array.trans.sim4.colors[array.trans.sim4 < my.seq[5]] <- trans.cols[4] + array.trans.sim4.colors[array.trans.sim4 < my.seq[4]] <- trans.cols[3] + array.trans.sim4.colors[array.trans.sim4 < my.seq[3]] <- trans.cols[2] + array.trans.sim4.colors[array.trans.sim4 < my.seq[2]] <- trans.cols[1] + array.trans.sim4.colors[is.na(array.trans.sim4)] <- "white" + + par(mar=c(5,4,4,4.5)) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Forecast time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + title("% Transition from Atlantic ridge regime ", cex=1.5) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + + for(p in 1:12){ + for(l in 0:6){ + polygon(c(0.5+p-1, 0.5+p-1, 1+p-1), c(-0.5+l, 0.5+l, 0+l), col=array.trans.sim4.colors[p,1+l,1]) # first triangle + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(-0.5+l, 0+l, -0.5+l), col=array.trans.sim4.colors[p,1+l,2]) + polygon(c(1+p-1, 1.5+p-1, 1.5+p-1), c(l, 0.5+l, -0.5+l), col=array.trans.sim4.colors[p,1+l,3]) + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(0.5+l, 0+l, 0.5+l), col=array.trans.sim4.colors[p,1+l,4]) + } + } + + par(fig=c(0.875, 1, 0.14, 0.89), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_trans_Atlantic_ridge_4tables.png"), width=750, height=600) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("% Transition from Atlantic Ridge regime", cex=1.5) + + # NAO+ : + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.85, 0.98), new=TRUE) + mtext("NAO+", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.sim4.colors[p,1+l,1])}} + + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.42, 0.5), new=TRUE) + mtext("Blocking", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.sim4.colors[p,1+l,4])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.85, 0.98), new=TRUE) + mtext("NAO-", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.sim4.colors[p,1+l,3])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.42, 0.5), new=TRUE) + mtext("Atlantic Ridge", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.sim4.colors[p,1+l,2])}} + + par(fig=c(0.91, 1, 0.1, 0.9), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_trans_Atlantic_ridge_1table.png"), width=600, height=300) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("% Transition from Atlantic Ridge regime", cex=1.2) + + par(mar=c(0,0,4,0), fig=c(0.1, 0.28, 0.8, 0.98), new=TRUE) + mtext("NAO+", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0, 0.28, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecast month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.sim4.colors[i2,1+6-j,1])}} + + par(mar=c(0,0,4,0), fig=c(0.28, 0.56, 0.8, 0.98), new=TRUE) + mtext("NAO-", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.28, 0.56, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecasted month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.sim4.colors[i2,1+6-j,3])}} + + par(mar=c(0,0,4,0), fig=c(0.56, 0.84, 0.8, 0.98), new=TRUE) + mtext("Blocking", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.56, 0.84, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecasted month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.sim4.colors[i2,1+6-j,4])}} + + par(fig=c(0.88, 1, 0.1, 0.8), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + + + + + + + + # Bias of the Transition probability from NAO+ to X summary: + png(filename=paste0(work.dir,"/",forecast.name,"_summary_bias_trans_NAO+.png"), width=550, height=400) + + # create an array similar to array.cor but with colors instead of corr.values: + trans.cols <- rev(c('#b2182b','#d6604d','#f4a582','#fddbc7','#d1e5f0','#92c5de','#4393c3','#2166ac')) + my.seq <- c(-20,-10,-5,-2,0,2,5,10,20) + + array.trans.diff1.colors <- array(trans.cols[8],c(12,7,4)) + array.trans.diff1.colors[array.trans.diff1 < my.seq[8]] <- trans.cols[7] + array.trans.diff1.colors[array.trans.diff1 < my.seq[7]] <- trans.cols[6] + array.trans.diff1.colors[array.trans.diff1 < my.seq[6]] <- trans.cols[5] + array.trans.diff1.colors[array.trans.diff1 < my.seq[5]] <- trans.cols[4] + array.trans.diff1.colors[array.trans.diff1 < my.seq[4]] <- trans.cols[3] + array.trans.diff1.colors[array.trans.diff1 < my.seq[3]] <- trans.cols[2] + array.trans.diff1.colors[array.trans.diff1 < my.seq[2]] <- trans.cols[1] + array.trans.diff1.colors[is.na(array.trans.diff1)] <- "white" + + par(mar=c(5,4,4,4.5)) + plot(1,1, type="n", xaxt="n", yaxt="n", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + title("% Bias transition from NAO+ regime", cex=1.5) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + + for(p in 1:12){ + for(l in 0:6){ + polygon(c(0.5+p-1, 0.5+p-1, 1+p-1), c(-0.5+l, 0.5+l, 0+l), col=array.trans.diff1.colors[p,1+l,1]) # first triangle + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(-0.5+l, 0+l, -0.5+l), col=array.trans.diff1.colors[p,1+l,2]) + polygon(c(1+p-1, 1.5+p-1, 1.5+p-1), c(l, 0.5+l, -0.5+l), col=array.trans.diff1.colors[p,1+l,3]) + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(0.5+l, 0+l, 0.5+l), col=array.trans.diff1.colors[p,1+l,4]) + } + } + + par(fig=c(0.875, 1, 0.14, 0.89), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_bias_trans_NAO+_4tables.png"), width=750, height=600) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("% Bias transition from NAO+ regime", cex=1.5) + + # NAO+ : + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.85, 0.98), new=TRUE) + mtext("NAO+", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.diff1.colors[p,1+l,1])}} + + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.42, 0.5), new=TRUE) + mtext("Blocking", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.diff1.colors[p,1+l,4])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.85, 0.98), new=TRUE) + mtext("NAO-", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.diff1.colors[p,1+l,3])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.42, 0.5), new=TRUE) + mtext("Atlantic Ridge", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.diff1.colors[p,1+l,2])}} + + par(fig=c(0.91, 1, 0.1, 0.9), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_bias_trans_NAO+_1table.png"), width=800, height=300) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("% Bias transition from NAO+ regime", cex=1.2) + + par(mar=c(0,0,4,0), fig=c(0.07, 0.22, 0.8, 0.98), new=TRUE) + mtext("NAO+", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0, 0.22, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecast month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.diff1.colors[i2,1+6-j,1])}} + + par(mar=c(0,0,4,0), fig=c(0.22, 0.44, 0.8, 0.98), new=TRUE) + mtext("NAO-", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.22, 0.44, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecasted month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.diff1.colors[i2,1+6-j,3])}} + + par(mar=c(0,0,4,0), fig=c(0.44, 0.66, 0.8, 0.98), new=TRUE) + mtext("Blocking", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.44, 0.66, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecasted month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.diff1.colors[i2,1+6-j,4])}} + + par(mar=c(0,0,4,0), fig=c(0.68, 0.88, 0.8, 0.98), new=TRUE) + mtext("Atlantic Ridge", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.66, 0.88, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecasted month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.diff1.colors[i2,1+6-j,2])}} + + par(fig=c(0.91, 1, 0.1, 0.8), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + + + + + + + + + + + + + + + # Bias of the Transition probability from NAO- to X summary: + png(filename=paste0(work.dir,"/",forecast.name,"_summary_bias_trans_NAO-.png"), width=550, height=400) + + # create an array similar to array.cor but with colors instead of corr.values: + trans.cols <- rev(c('#b2182b','#d6604d','#f4a582','#fddbc7','#d1e5f0','#92c5de','#4393c3','#2166ac')) + my.seq <- c(-20,-10,-5,-2,0,2,5,10,20) + + array.trans.diff2.colors <- array(trans.cols[8],c(12,7,4)) + array.trans.diff2.colors[array.trans.diff2 < my.seq[8]] <- trans.cols[7] + array.trans.diff2.colors[array.trans.diff2 < my.seq[7]] <- trans.cols[6] + array.trans.diff2.colors[array.trans.diff2 < my.seq[6]] <- trans.cols[5] + array.trans.diff2.colors[array.trans.diff2 < my.seq[5]] <- trans.cols[4] + array.trans.diff2.colors[array.trans.diff2 < my.seq[4]] <- trans.cols[3] + array.trans.diff2.colors[array.trans.diff2 < my.seq[3]] <- trans.cols[2] + array.trans.diff2.colors[array.trans.diff2 < my.seq[2]] <- trans.cols[1] + array.trans.diff2.colors[is.na(array.trans.diff2)] <- "white" + + par(mar=c(5,4,4,4.5)) + plot(1,1, type="n", xaxt="n", yaxt="n", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + title("% Bias transition from NAO- regime", cex=1.5) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + + for(p in 1:12){ + for(l in 0:6){ + polygon(c(0.5+p-1, 0.5+p-1, 1+p-1), c(-0.5+l, 0.5+l, 0+l), col=array.trans.diff2.colors[p,1+l,1]) # first triangle + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(-0.5+l, 0+l, -0.5+l), col=array.trans.diff2.colors[p,1+l,2]) + polygon(c(1+p-1, 1.5+p-1, 1.5+p-1), c(l, 0.5+l, -0.5+l), col=array.trans.diff2.colors[p,1+l,3]) + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(0.5+l, 0+l, 0.5+l), col=array.trans.diff2.colors[p,1+l,4]) + } + } + + par(fig=c(0.875, 1, 0.14, 0.89), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_bias_trans_NAO-_4tables.png"), width=750, height=600) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("% Bias transition from NAO- regime", cex=1.5) + + # NAO+ : + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.85, 0.98), new=TRUE) + mtext("NAO+", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.diff2.colors[p,1+l,1])}} + + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.42, 0.5), new=TRUE) + mtext("Blocking", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.diff2.colors[p,1+l,4])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.85, 0.98), new=TRUE) + mtext("NAO-", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.diff2.colors[p,1+l,3])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.42, 0.5), new=TRUE) + mtext("Atlantic Ridge", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.diff2.colors[p,1+l,2])}} + + par(fig=c(0.91, 1, 0.1, 0.9), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_bias_trans_NAO-_1table.png"), width=800, height=300) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("% Bias transition from NAO- regime", cex=1.2) + + par(mar=c(0,0,4,0), fig=c(0.07, 0.22, 0.8, 0.98), new=TRUE) + mtext("NAO+", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0, 0.22, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecast month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.diff2.colors[i2,1+6-j,1])}} + + par(mar=c(0,0,4,0), fig=c(0.22, 0.44, 0.8, 0.98), new=TRUE) + mtext("NAO-", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.22, 0.44, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecasted month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.diff2.colors[i2,1+6-j,3])}} + + par(mar=c(0,0,4,0), fig=c(0.44, 0.66, 0.8, 0.98), new=TRUE) + mtext("Blocking", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.44, 0.66, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecasted month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.diff2.colors[i2,1+6-j,4])}} + + par(mar=c(0,0,4,0), fig=c(0.68, 0.88, 0.8, 0.98), new=TRUE) + mtext("Atlantic Ridge", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.66, 0.88, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecasted month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.diff2.colors[i2,1+6-j,2])}} + + par(fig=c(0.91, 1, 0.1, 0.8), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + + + + + + + + + + + # Bias of the Transition probability from blocking to X summary: + png(filename=paste0(work.dir,"/",forecast.name,"_summary_bias_trans_blocking.png"), width=550, height=400) + + # create an array similar to array.cor but with colors instead of corr.values: + trans.cols <- rev(c('#b2182b','#d6604d','#f4a582','#fddbc7','#d1e5f0','#92c5de','#4393c3','#2166ac')) + my.seq <- c(-20,-10,-5,-2,0,2,5,10,20) + + array.trans.diff3.colors <- array(trans.cols[8],c(12,7,4)) + array.trans.diff3.colors[array.trans.diff3 < my.seq[8]] <- trans.cols[7] + array.trans.diff3.colors[array.trans.diff3 < my.seq[7]] <- trans.cols[6] + array.trans.diff3.colors[array.trans.diff3 < my.seq[6]] <- trans.cols[5] + array.trans.diff3.colors[array.trans.diff3 < my.seq[5]] <- trans.cols[4] + array.trans.diff3.colors[array.trans.diff3 < my.seq[4]] <- trans.cols[3] + array.trans.diff3.colors[array.trans.diff3 < my.seq[3]] <- trans.cols[2] + array.trans.diff3.colors[array.trans.diff3 < my.seq[2]] <- trans.cols[1] + array.trans.diff3.colors[is.na(array.trans.diff3)] <- "white" + + par(mar=c(5,4,4,4.5)) + plot(1,1, type="n", xaxt="n", yaxt="n", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + title("% Bias transition from blocking regime", cex=1.5) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + + for(p in 1:12){ + for(l in 0:6){ + polygon(c(0.5+p-1, 0.5+p-1, 1+p-1), c(-0.5+l, 0.5+l, 0+l), col=array.trans.diff3.colors[p,1+l,1]) # first triangle + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(-0.5+l, 0+l, -0.5+l), col=array.trans.diff3.colors[p,1+l,2]) + polygon(c(1+p-1, 1.5+p-1, 1.5+p-1), c(l, 0.5+l, -0.5+l), col=array.trans.diff3.colors[p,1+l,3]) + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(0.5+l, 0+l, 0.5+l), col=array.trans.diff3.colors[p,1+l,4]) + } + } + + par(fig=c(0.875, 1, 0.14, 0.89), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_bias_trans_blocking_4tables.png"), width=750, height=600) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("% Bias transition from blocking regime", cex=1.5) + + # NAO+ : + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.85, 0.98), new=TRUE) + mtext("NAO+", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.diff3.colors[p,1+l,1])}} + + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.42, 0.5), new=TRUE) + mtext("Blocking", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.diff3.colors[p,1+l,4])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.85, 0.98), new=TRUE) + mtext("NAO-", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.diff3.colors[p,1+l,3])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.42, 0.5), new=TRUE) + mtext("Atlantic Ridge", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.diff3.colors[p,1+l,2])}} + + par(fig=c(0.91, 1, 0.1, 0.9), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_bias_trans_blocking_1table.png"), width=800, height=300) + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("% Bias transition from Blocking regime", cex=1.2) + + par(mar=c(0,0,4,0), fig=c(0.07, 0.22, 0.8, 0.98), new=TRUE) + mtext("NAO+", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0, 0.22, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecast month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.diff3.colors[i2,1+6-j,1])}} + + par(mar=c(0,0,4,0), fig=c(0.22, 0.44, 0.8, 0.98), new=TRUE) + mtext("NAO-", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.22, 0.44, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecasted month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.diff3.colors[i2,1+6-j,3])}} + + par(mar=c(0,0,4,0), fig=c(0.44, 0.66, 0.8, 0.98), new=TRUE) + mtext("Blocking", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.44, 0.66, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecasted month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.diff3.colors[i2,1+6-j,4])}} + + par(mar=c(0,0,4,0), fig=c(0.68, 0.88, 0.8, 0.98), new=TRUE) + mtext("Atlantic Ridge", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.66, 0.88, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecasted month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.diff3.colors[i2,1+6-j,2])}} + + par(fig=c(0.91, 1, 0.1, 0.8), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + + + + + + + + + + # Bias of the Transition probability from Atlantic Ridge to X summary: + png(filename=paste0(work.dir,"/",forecast.name,"_summary_bias_trans_Atlantic_ridge.png"), width=550, height=400) + + # create an array similar to array.cor but with colors instead of corr.values: + trans.cols <- rev(c('#b2182b','#d6604d','#f4a582','#fddbc7','#d1e5f0','#92c5de','#4393c3','#2166ac')) + my.seq <- c(-20,-10,-5,-2,0,2,5,10,20) + + array.trans.diff4.colors <- array(trans.cols[8],c(12,7,4)) + array.trans.diff4.colors[array.trans.diff4 < my.seq[8]] <- trans.cols[7] + array.trans.diff4.colors[array.trans.diff4 < my.seq[7]] <- trans.cols[6] + array.trans.diff4.colors[array.trans.diff4 < my.seq[6]] <- trans.cols[5] + array.trans.diff4.colors[array.trans.diff4 < my.seq[5]] <- trans.cols[4] + array.trans.diff4.colors[array.trans.diff4 < my.seq[4]] <- trans.cols[3] + array.trans.diff4.colors[array.trans.diff4 < my.seq[3]] <- trans.cols[2] + array.trans.diff4.colors[array.trans.diff4 < my.seq[2]] <- trans.cols[1] + array.trans.diff4.colors[is.na(array.trans.diff4)] <- "white" + + par(mar=c(5,4,4,4.5)) + plot(1,1, type="n", xaxt="n", yaxt="n", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + title("% Bias transition from Atlantic Ridge regime", cex=1.5) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + + for(p in 1:12){ + for(l in 0:6){ + polygon(c(0.5+p-1, 0.5+p-1, 1+p-1), c(-0.5+l, 0.5+l, 0+l), col=array.trans.diff4.colors[p,1+l,1]) # first triangle + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(-0.5+l, 0+l, -0.5+l), col=array.trans.diff4.colors[p,1+l,2]) + polygon(c(1+p-1, 1.5+p-1, 1.5+p-1), c(l, 0.5+l, -0.5+l), col=array.trans.diff4.colors[p,1+l,3]) + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(0.5+l, 0+l, 0.5+l), col=array.trans.diff4.colors[p,1+l,4]) + } + } + + par(fig=c(0.875, 1, 0.14, 0.89), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_bias_trans_Atlantic_ridge_4tables.png"), width=750, height=600) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("% Bias transition from Atlantic_Ridge_regime", cex=1.5) + + # NAO+ : + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.85, 0.98), new=TRUE) + mtext("NAO+", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.diff4.colors[p,1+l,1])}} + + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.42, 0.5), new=TRUE) + mtext("Blocking", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.diff4.colors[p,1+l,4])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.85, 0.98), new=TRUE) + mtext("NAO-", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.diff4.colors[p,1+l,3])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.42, 0.5), new=TRUE) + mtext("Atlantic Ridge", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.diff4.colors[p,1+l,2])}} + + par(fig=c(0.91, 1, 0.1, 0.9), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_bias_trans_Atlantic_ridge_1table.png"), width=800, height=300) + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("% Bias transition from Atlantic Ridge regime", cex=1.2) + + par(mar=c(0,0,4,0), fig=c(0.07, 0.22, 0.8, 0.98), new=TRUE) + mtext("NAO+", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0, 0.22, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecast month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.diff4.colors[i2,1+6-j,1])}} + + par(mar=c(0,0,4,0), fig=c(0.22, 0.44, 0.8, 0.98), new=TRUE) + mtext("NAO-", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.22, 0.44, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecasted month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.diff4.colors[i2,1+6-j,3])}} + + par(mar=c(0,0,4,0), fig=c(0.44, 0.66, 0.8, 0.98), new=TRUE) + mtext("Blocking", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.44, 0.66, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecasted month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.diff4.colors[i2,1+6-j,4])}} + + par(mar=c(0,0,4,0), fig=c(0.68, 0.88, 0.8, 0.98), new=TRUE) + mtext("Atlantic Ridge", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.66, 0.88, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecasted month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.diff4.colors[i2,1+6-j,2])}} + + par(fig=c(0.91, 1, 0.1, 0.8), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + +diagnostic.WR <- function(text=NULL, position=c(0,1,0,1), color.array){ + +} + + ## # FairRPSS summary: + ## png(filename=paste0(work.dir,"/",forecast.name,"_summary_rpss.png"), width=550, height=400) + + ## # create an array similar to array.diff.freq but with colors instead of frequencies: + ## freq.cols <- rev(c('#b2182b','#d6604d','#f4a582','#fddbc7','#d1e5f0','#92c5de','#4393c3','#2166ac')) + + ## array.freq.colors <- array(freq.cols[8],c(12,7,4)) + ## array.freq.colors[array.diff.freq < 10] <- freq.cols[7] + ## array.freq.colors[array.diff.freq < 5] <- freq.cols[6] + ## array.freq.colors[array.diff.freq < 2] <- freq.cols[5] + ## array.freq.colors[array.diff.freq < 0] <- freq.cols[4] + ## array.freq.colors[array.diff.freq < -2] <- freq.cols[3] + ## array.freq.colors[array.diff.freq < -5] <- freq.cols[2] + ## array.freq.colors[array.diff.freq < -10] <- freq.cols[1] + + ## par(mar=c(5,4,4,4.5)) + ## plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Forecast time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + ## title("Frequency difference (%) between S4 and ERA-Interim", cex=1.5) + ## mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + ## mtext(side = 2, text = "Forecast time", line = 2.5, cex=1.5) + ## axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + ## axis(2, at=seq(0,6), las=2, cex.axis=1.5) + + ## for(p in 1:12){ + ## for(l in 0:6){ + ## polygon(c(0.5+p-1,0.5+p-1,1+p-1),c(-0.5+l,0.5+l,0+l), col=array.freq.colors[p,1+l,1]) # first triangle + ## polygon(c(0.5+p-1,1+p-1,1.5+p-1),c(-0.5+l,0+l,-0.5+l), col=array.freq.colors[p,1+l,2]) + ## polygon(c(1+p-1,1.5+p-1,1.5+p-1),c(l,0.5+l,-0.5+l), col=array.freq.colors[p,1+l,3]) + ## polygon(c(0.5+p-1,1+p-1,1.5+p-1),c(0.5+l,0+l,0.5+l), col=array.freq.colors[p,1+l,4]) + ## } + ## } + + ## par(fig=c(0.875, 1, 0.14, 0.89), new=TRUE) + ## ColorBar2(brks = c(-20,-10,-5,-2,0,2,5,10,20), cols = freq.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(c(-20,-10,-5,-2,0,2,5,10,20)), my.labels=c(-20,-10,-5,-2,0,2,5,10,20)) + ## dev.off() + +} # close if on composition == "summary" + + + + +# impact map of the stronger WR: +if(composition == "impact" && fields.name == rean.name){ + + var.num <- 2 # choose a variable (1:sfcWind, 2:tas) + + png(filename=paste0(rean.dir,"/",rean.name,"_",var.name[var.num],"_most_influent_negative.png"), width=550, height=650) + + plot.new() + #par(mfrow=c(4,3)) + #col.regimes <- c('#7b3294','#c2a5cf','#a6dba0','#008837') + col.regimes <- c('#e66101','#fdb863','#b2abd2','#5e3c99') + + for(p in 13:16){ # or 1:12 for monthly maps + load(file=paste0(rean.dir,"/",rean.name,"_",my.period[p],"_",var.name[var.num],"_psl.RData")) + load(paste0(rean.dir,"/",rean.name,"_ClusterNames.RData")) # they should not depend on var.name!!! + + # position of long values of Europe only (without the Atlantic Sea and America): + #if(fields.name == "ERA-Interim") EU <- c(1:which(lon >= lon.max)[1], (length(lon)-30):length(lon)) # restrict area to continental europe only + lon.max = 45 + EU <- c(1:which(lon >= lon.max)[1], (which(lon >= 337)[1]:length(lon))) # restrict area to continental europe only + + cluster1 <- which(orden == cluster1.name.period[p]) # in future it will be == cluster1.name + cluster2 <- which(orden == cluster2.name.period[p]) + cluster3 <- which(orden == cluster3.name.period[p]) + cluster4 <- which(orden == cluster4.name.period[p]) + + assign(paste0("imp",cluster1), varPeriodAnom1mean) + assign(paste0("imp",cluster2), varPeriodAnom2mean) + assign(paste0("imp",cluster3), varPeriodAnom3mean) + assign(paste0("imp",cluster4), varPeriodAnom4mean) + + ## imp.all <- imp1 + ## for(i in 1:dim(imp1)[1]){ + ## for(j in 1:dim(imp1)[2]){ + ## if(abs(imp1[i,j]) >= max(abs(imp1[i,j]), abs(imp2[i,j]), abs(imp3[i,j]), abs(imp4[i,j]))) imp.all[i,j] <- 1.5 + ## if(abs(imp2[i,j]) >= max(abs(imp1[i,j]), abs(imp2[i,j]), abs(imp3[i,j]), abs(imp4[i,j]))) imp.all[i,j] <- 2.5 + ## if(abs(imp3[i,j]) >= max(abs(imp1[i,j]), abs(imp2[i,j]), abs(imp3[i,j]), abs(imp4[i,j]))) imp.all[i,j] <- 3.5 + ## if(abs(imp4[i,j]) >= max(abs(imp1[i,j]), abs(imp2[i,j]), abs(imp3[i,j]), abs(imp4[i,j]))) imp.all[i,j] <- 4.5 + ## } + ## } + + ## #most influent WT with positive impact: + ## imp.all <- imp1 + ## for(i in 1:dim(imp1)[1]){ + ## for(j in 1:dim(imp1)[2]){ + ## if((imp1[i,j]) >= max((imp1[i,j]), (imp2[i,j]), (imp3[i,j]), (imp4[i,j]))) imp.all[i,j] <- 1.5 + ## if((imp2[i,j]) >= max((imp1[i,j]), (imp2[i,j]), (imp3[i,j]), (imp4[i,j]))) imp.all[i,j] <- 2.5 + ## if((imp3[i,j]) >= max((imp1[i,j]), (imp2[i,j]), (imp3[i,j]), (imp4[i,j]))) imp.all[i,j] <- 3.5 + ## if((imp4[i,j]) >= max((imp1[i,j]), (imp2[i,j]), (imp3[i,j]), (imp4[i,j]))) imp.all[i,j] <- 4.5 + ## } + ## } + + # most influent WT with negative impact: + imp.all <- imp1 + for(i in 1:dim(imp1)[1]){ + for(j in 1:dim(imp1)[2]){ + if((imp1[i,j]) <= min((imp1[i,j]), (imp2[i,j]), (imp3[i,j]), (imp4[i,j]))) imp.all[i,j] <- 1.5 + if((imp2[i,j]) <= min((imp1[i,j]), (imp2[i,j]), (imp3[i,j]), (imp4[i,j]))) imp.all[i,j] <- 2.5 + if((imp3[i,j]) <= min((imp1[i,j]), (imp2[i,j]), (imp3[i,j]), (imp4[i,j]))) imp.all[i,j] <- 3.5 + if((imp4[i,j]) <= min((imp1[i,j]), (imp2[i,j]), (imp3[i,j]), (imp4[i,j]))) imp.all[i,j] <- 4.5 + } + } + + if(p<=3) yMod <- 0.7 + if(p>=4 && p<=6) yMod <- 0.5 + if(p>=7 && p<=9) yMod <- 0.3 + if(p>=10) yMod <- 0.1 + if(p==13 || p==14) yMod <- 0.52 + if(p==15 || p==16) yMod <- 0.1 + + if(p==1 || p==4 || p==7 || p==10) xMod <- 0.05 + if(p==2 || p==5 || p==8 || p==11) xMod <- 0.35 + if(p==3 || p==6 || p==9 || p==12) xMod <- 0.65 + if(p==13 || p==15) xMod <- 0.05 + if(p==14 || p==16) xMod <- 0.50 + + # impact map: + if(p <= 12) par(fig=c(xMod, xMod + 0.3, yMod, yMod + 0.17), new=TRUE) + if(p > 12) par(fig=c(xMod, xMod + 0.45, yMod, yMod + 0.38), new=TRUE) + PlotEquiMap(imp.all[,EU], lon[EU], lat, filled.continents = FALSE, brks=1:5, cols=col.regimes, axelab=F, drawleg=F) + + # title map: + if(p <= 12) par(fig=c(xMod, xMod + 0.3, yMod + 0.162, yMod + 0.172), new=TRUE) + if(p > 12) par(fig=c(xMod + 0.07, xMod + 0.07 + 0.3, yMod +0.21 + 0.166, yMod +0.21 + 0.176), new=TRUE) + mtext(my.period[p], font=2, cex=1.5) + + } # close 'p' on 'period' + + par(fig=c(0.1,0.9, 0.03, 0.11), new=TRUE) + ColorBar2(1:5, cols=col.regimes, vert=FALSE, my.ticks=1:4, draw.ticks=FALSE, my.labels=orden) + + par(fig=c(0.1,0.9, 0.92, 0.97), new=TRUE) + mtext(paste0("Most influent WR on ",var.name[var.num]), font=2, cex=1.5) + + dev.off() + +} # close if on composition == "impact" + + + + + + + + + + + + + + + + + + diff --git a/old/weather_regimes_maps_v19.R b/old/weather_regimes_maps_v19.R new file mode 100644 index 0000000000000000000000000000000000000000..4d0d1109d5c1a242618d15645318bc302a5a2a8f --- /dev/null +++ b/old/weather_regimes_maps_v19.R @@ -0,0 +1,3924 @@ + +# Creation: 6/2016 +# Author: Nicola Cortesi +# +# Aim: to visualize the regime anomalies of the weather regimes, their interannual frequencies and their impact on a chosen cliamte variable (i.e: wind speed or temperature) +# +# I/O: input file needed are: +# 1) the output of the weather_regimes.R script, which ends with the suffix "_psl.RData". +# 2) if you need the impact map too, the outputs of the weather_regimes_impact.R script, which end wuth the suffix "_sfcWind.RData" and "_tas.RData" +# +# Output files are the maps, witch the user can generate as .png or as .pdf +# Due to the script's length, it is better to run it from the terminal with: Rscript ~/Downloads/scripts/weather_regimes_maps_v19.R (for example) +# This script doesn't need to be parallelized because it takes only a few seconds to create and save the output maps. +# +# Assumptions: If your regimes derive from a reanalysis, this script must be run twice: once, to create a .pdf file (see below) +# that the user must open to find visually the order by which the four regimes are stored inside the four clusters. For example, he can find that the +# sequence of the images in the file (from top to down) corresponds to: Blocking / NAO- / Atl.Ridge / NAO+ regime. Subsequently, he has to change 'ordering' +# from F to T (see below) and set the four variables 'clusterX.name' (X = 1...4) to follow the same order found inside that file. +# The second time, you have to run this script only to get the maps in the right order, which by default is, from top to down: +# "NAO+","NAO-","Blocking" and "Atl.Ridge" (you can change it with the variable 'orden'). You also have to set 'save.names' to TRUE, so an .RData file +# is written to store the order of the four regimes, in case you need to visualize these maps again in future or create new ones based on the saved data. +# +# If your regimes derive from a forecast system, you have to compute before the regimes derived from a reanalysis. Then, you can associate the reanalysis +# to the forecast system, to employ it to automatically aussociate to each simulated cluster one of the +# observed regimes, the one with the highest spatial correlation. Beware that the two maps of regime anomalies must have the same spatial resolution! +# +# Branch: weather_regimes + +rm(list=ls()) +library(s2dverification) # for the function Load() +library(Kendall) # for the MannKendall test +source('/home/Earth/ncortesi/Downloads/scripts/Rfunctions.R') # for the calendar functions + +### set the directory where the weather regimes computed with a reanalysis are stored (xxxxx_psl.RData files): + +#rean.dir <- "/scratch/Earth/ncortesi/RESILIENCE/Regimes/18) as 12) but with no lat correction" +#rean.dir <- "/scratch/Earth/ncortesi/RESILIENCE/Regimes/18_as_12_but_with_no_lat_correction" +#rean.dir <- "/scratch/Earth/ncortesi/RESILIENCE/Regimes/41_ERA-Interim_monthly_1981-2015_LOESS_filter" +#rean.dir <- "/scratch/Earth/ncortesi/RESILIENCE/Regimes/43_as_41_but_with_running_cluster_and_lat_correction" +rean.dir <- "/scratch/Earth/ncortesi/RESILIENCE/Regimes/44_as_43_but_with_no_lat_correction" +#rean.dir <- "/scratch/Earth/ncortesi/RESILIENCE/Regimes/45_as_43_but_with_Z500" + +rean.name <- "ERA-Interim" # reanalysis name (if input data comes from a reanalysis) +forecast.name <- "ECMWF-S4" # forecast system name (if input data comes from a forecast system) + +# Choose between loading reanalysis ('rean.name') or forecasts ('forecast.name'): +fields.name <- forecast.name #rean.name #forecast.name + +# Choose one or more periods to plot from 1 to 17 (1-12: Jan - Dec, 13-16: Winter, Spring, Summer, Autumn, 17: Year). +period <- 1:12 # You need to have created before the '_psl.RData' file with the output of 'weather_regimes_vXX'.R for that period. + +# run it twice: once, with: 'ordering <- FALSE', 'save.names <- TRUE', 'as.pdf <- TRUE' and 'composition <- "simple" ' +# to look at the cartography and find visually the right regime names of the four clusters; then, insert the regime names in the correct order below and run this script +# a second time one month/season at time with: 'ordering = TRUE', 'save.names = TRUE', 'as.pdf = FALSE' and 'composition = "simple" ' +# to save the ordered cartography (then, you can check the correct regime sequence in the .png maps). After that, any time you run this script, remember to set: +# 'ordering = TRUE , 'save.names = FALSE' (and any type of composition you want to plot) not to overwrite the files which stores the right cluster order. + +ordering <- T # If TRUE, order the clusters following the regimes listed in the 'orden' vector (set it to TRUE only after you manually inserted the names below). +save.names <- F # if TRUE, also save the cluster names (i.e: the association between clusters and weather regimes); if FALSE, the cluster names are loaded from a file +as.pdf <- F # FALSE: save results as one .png file for each season/month, TRUE: save only one .pdf file with all seasons/months + +composition <- "simple" # choose which kind of composition you want to plot: + # - 'simple' for monthly graphs of regime anomalies / impact / interannual frequencies in three columns + # - 'psl' for all the regime anomalies for a fixed forecast month + # - 'fre' for all the interannual frequencies for a fixed forecast month + # - 'summary' for the summary plots of all forecast months (persistence bias, freq.bias, correlations, etc.) [You need all ClusterNames files] + # - 'impact' for all the impact plot of the regime with the highest impact + # - 'single.impact' to save the individual impact maps + # - 'single.psl' to save the individual psl map + # - 'single.fre' to save the individual fre map + # - 'none' doesn't plot anything, it only saves the ClusterName.Rdata files + # - 'taylor' plot the taylor's diagram between the regime anomalies of a monthly reanalaysis and a seasonal one + +var.num <- 1 # if fields.name ='rean.name' and composition ='simple', Choose a variable for the impact maps 1: sfcWind 2: tas + +mean.freq <- TRUE # if TRUE, when composition <- "simple", it also add the mean frequency value over each frequency plots + +# Associates the regimes to the four cluster: +cluster3.name <- "NAO+" +cluster2.name <- "NAO-" +cluster1.name <- "Blocking" +cluster4.name <- "Atl.Ridge" + +# choose an order to give to the cartograpy, from top to bottom: +orden <- c("NAO+","NAO-","Blocking","Atl.Ridge") + + +####### if fields.name='forecast.name', you have to specify these additional variables: + +# working dir with the input files with '_psl.RData' suffix: +work.dir <- "/scratch/Earth/ncortesi/RESILIENCE/Regimes/42_ECMWF_S4_monthly_1981-2015_LOESS_filter" + +lead.months <- 0:6 # in case you selected 'fields.name = forecast.name', specify a leadtime (0 = same month of the start month) to plot + # (and variable 'period' becomes the start month) +forecast.month <- 1 # in case composition = 'psl' or 'fre' or 'impact', choose a target month to forecast, from 1 to 12, to plot its composition + +################################################################################################################################################################### + +if(fields.name != rean.name && fields.name != forecast.name) stop("variable 'fields.name' is not properly set") +regime1.name <- orden[1] +regime2.name <- orden[2] +regime3.name <- orden[3] +regime4.name <- orden[4] + +var.name <- c("sfcWind","tas") +var.name.full <- c("Wind Speed","Temperature") # full name of the 'predictand' variable to put in the title of the graphs +var.unit <- c("m/s", "ºC") # unit of measure of var (for drawing color scales) +var.num.orig <- var.num # loading _psl files, this value can change! +fields.name.orig <- fields.name +period.orig <- period +work.dir.orig <- work.dir +pdf.suffix <- ifelse(length(period)==1, my.period[period], "all_periods") +n.map <- 0 + +if(composition != "summary" && composition != "impact" && composition != "taylor"){ + + if(composition == "psl" || composition == "fre") { + if(as.pdf && fields.name == forecast.name) pdf(file=paste0(work.dir,"/",fields.name,"_",pdf.suffix,"_forecast_month_",forecast.month,"_",composition,".pdf"),width=110,height=60) + if(!as.pdf && fields.name == forecast.name) png(filename=paste0(work.dir,"/",fields.name,"_forecast_month_",forecast.month,"_",composition,".png"),width=6000,height=2300) + + lead.months <- c(6:0,0) + plot.new() + + # Sheet title: + par(fig=c(0, 1, 0.94, 0.98), new=TRUE) + if(composition == "psl") mtext(paste0(fields.name, " simulated Weather Regimes for ", month.name[forecast.month]), cex=5.5, font=2) + if(composition == "fre") mtext(paste0(fields.name, " simulated interannual frequencies for ", month.name[forecast.month]), cex=5.5, font=2) + } + + if(fields.name == rean.name) lead.months <- 1 # just to be able to enter the loop below once! + + for(lead.month in lead.months){ + #lead.month=lead.months[1] # for the debug + + if(composition == "simple"){ + if(as.pdf && fields.name == rean.name) pdf(file=paste0(rean.dir,"/",fields.name,"_",var.name[var.num],"_",pdf.suffix,".pdf"),width=40, height=60) + if(as.pdf && fields.name == forecast.name) pdf(file=paste0(work.dir,"/",fields.name,"_",var.name[var.num],"_",pdf.suffix,"_leadmonth_",lead.month,".pdf"),width=40,height=60) + } + + if(composition == 'psl' || composition == 'fre') { + period <- forecast.month - lead.month + if(period < 1) period <- period + 12 + } + + for(p in period){ + #p <- period[1] # for the debug + p.orig <- p + + # load regime data (for forecasts, it load only the data corresponding to the chosen start month p and lead month 'lead.month':: + if(fields.name == rean.name || (fields.name == forecast.name && lead.month == 0 || n.map == 7)) { + load(file=paste0(rean.dir,"/",rean.name,"_",my.period[p],"_","psl",".RData")) + if(var.num != var.num.orig) var.num <- var.num.orig # ripristinate original values of this script (necessary after loading regime data) + if(fields.name != fields.name.orig) fields.name <- fields.name.orig # ripristinate original values of this script + if(work.dir != work.dir.orig) work.dir <- work.dir.orig # ripristinate original values of this script + + # load impact data only if it is available, if not it will leave an empty space in the composition: + if(file.exists(paste0(rean.dir,"/",rean.name,"_",my.period[p],"_",var.name[var.num],".RData"))){ + load(file=paste0(rean.dir,"/",rean.name,"_",my.period[p],"_",var.name[var.num],".RData")) + print("Impact data available") + impact.data <- TRUE + } else { + print("Impact data not available; 'simple' compositions will have an empty space in the middle column") + impact.data <- FALSE + } + } + + if(fields.name == forecast.name && n.map < 7){ + load(file=paste0(work.dir,"/",fields.name,"_",my.period[p],"_leadmonth_",lead.month,"_","psl",".RData")) # load cluster time series and mean psl data + if(impact.data == TRUE && (composition == simple || composition == "impact")) load(file=paste0(work.dir,"/",fields.name,"_",my.period[p],"_leadmonth_",lead.month,"_",var.name[var.num],".RData")) # load mean var data # for impact maps + + if(var.num != var.num.orig) var.num <- var.num.orig # ripristinate original values of this script (necessary after loading regime data) + if(fields.name != fields.name.orig) fields.name <- fields.name.orig # ripristinate original values of this script + + } + + if(var.num != var.num.orig) var.num <- var.num.orig # ripristinate original values of this script (necessary after loading regime data) + if(fields.name != fields.name.orig) fields.name <- fields.name.orig # ripristinate original values of this script + if(work.dir != work.dir.orig) work.dir <- work.dir.orig # ripristinate original values of this script + if(p != p.orig) p <- p.orig + + if(fields.name == forecast.name && n.map < 7){ + + # compute the simulated interannual monthly variability: + n.days.month <- dim(my.cluster.array[[p]])[1] # number of days in the forecasted month + + freq.sim1 <- apply(my.cluster.array[[p]] == 1, c(2,3), sum) + freq.sim2 <- apply(my.cluster.array[[p]] == 2, c(2,3), sum) + freq.sim3 <- apply(my.cluster.array[[p]] == 3, c(2,3), sum) + freq.sim4 <- apply(my.cluster.array[[p]] == 4, c(2,3), sum) + + sd.freq.sim1 <- sd(freq.sim1) / n.days.month + sd.freq.sim2 <- sd(freq.sim2) / n.days.month + sd.freq.sim3 <- sd(freq.sim3) / n.days.month + sd.freq.sim4 <- sd(freq.sim4) / n.days.month + + # compute the transition probabilities: + j1 <- j2 <- j3 <- j4 <- 1; transition1 <- transition2 <- transition3 <- transition4 <- c() + + n.members <- dim(my.cluster.array[[p]])[3] + + for(m in 1:n.members){ + for(y in 1:n.years){ + for (d in 1:(n.days.month-1)){ + + if (my.cluster.array[[p]][d,y,m] == 1 && (my.cluster.array[[p]][d + 1,y,m] != my.cluster.array[[p]][d,y,m])){ + transition1[j1] <- my.cluster.array[[p]][d + 1,y,m] + j1 <- j1 + 1 + } + if (my.cluster.array[[p]][d,y,m] == 2 && (my.cluster.array[[p]][d + 1,y,m] != my.cluster.array[[p]][d,y,m])){ + transition2[j2] <- my.cluster.array[[p]][d + 1,y,m] + j2 <- j2 + 1 + } + if (my.cluster.array[[p]][d,y,m] == 3 && (my.cluster.array[[p]][d + 1,y,m] != my.cluster.array[[p]][d,y,m])){ + transition3[j3] <- my.cluster.array[[p]][d + 1,y,m] + j3 <- j3 +1 + } + if (my.cluster.array[[p]][d,y,m] == 4 && (my.cluster.array[[p]][d + 1,y,m] != my.cluster.array[[p]][d,y,m])){ + transition4[j4] <- my.cluster.array[[p]][d + 1,y,m] + j4 <- j4 +1 + } + + } + } + } + + trans.sim <- matrix(NA,4,4 , dimnames= list(c("cluster1.sim", "cluster2.sim", "cluster3.sim", "cluster4.sim"),c("cluster1.sim","cluster2.sim","cluster3.sim","cluster4.sim"))) + trans.sim[1,2] <- length(which(transition1 == 2)) + trans.sim[1,3] <- length(which(transition1 == 3)) + trans.sim[1,4] <- length(which(transition1 == 4)) + + trans.sim[2,1] <- length(which(transition2 == 1)) + trans.sim[2,3] <- length(which(transition2 == 3)) + trans.sim[2,4] <- length(which(transition2 == 4)) + + trans.sim[3,1] <- length(which(transition3 == 1)) + trans.sim[3,2] <- length(which(transition3 == 2)) + trans.sim[3,4] <- length(which(transition3 == 4)) + + trans.sim[4,1] <- length(which(transition4 == 1)) + trans.sim[4,2] <- length(which(transition4 == 2)) + trans.sim[4,3] <- length(which(transition4 == 3)) + + trans.tot <- apply(trans.sim,1,sum,na.rm=T) + for(i in 1:4) trans.sim[i,] <- trans.sim[i,]/trans.tot[i] + + + # compute cluster persistence: + my.cluster.vector <- as.vector(my.cluster.array[[p]]) # reduce it to a vector with the order: day1month1member1 , day2month1member1, ... , day1month2member1, day2month2member1, ,,, + + sequ1 <- sequ2 <- sequ3 <- sequ4 <- rep(0,10000) + i1 <- i2 <- i3 <- i4 <- 1 + + if(my.cluster.vector[1] != 1) i1 <- 0 + if(my.cluster.vector[1] == 1) sequ1[i1] <- sequ1[i1]+1 + if(my.cluster.vector[1] != 2) i2 <- 0 + if(my.cluster.vector[1] == 2) sequ2[i2] <- sequ2[i2]+1 + if(my.cluster.vector[1] != 3) i3 <- 0 + if(my.cluster.vector[1] == 3) sequ3[i3] <- sequ3[i3]+1 + if(my.cluster.vector[1] != 4) i4 <- 0 + if(my.cluster.vector[1] == 4) sequ4[i4] <- sequ4[i4]+1 + n.dd <- length(my.cluster.vector) + + for(d in 1:(n.dd-1)){ + if(my.cluster.vector[d] != 1 && my.cluster.vector[d+1] == 1) i1 <- i1+1 + if(my.cluster.vector[d] == 1) sequ1[i1] <- sequ1[i1]+1 + + if(my.cluster.vector[d] != 2 && my.cluster.vector[d+1] == 2) i2 <- i2+1 + if(my.cluster.vector[d] == 2) sequ2[i2] <- sequ2[i2]+1 + + if(my.cluster.vector[d] != 3 && my.cluster.vector[d+1] == 3) i3 <- i3+1 + if(my.cluster.vector[d] == 3) sequ3[i3] <- sequ3[i3]+1 + + if(my.cluster.vector[d] != 4 && my.cluster.vector[d+1] == 4) i4 <- i4+1 + if(my.cluster.vector[d] == 4) sequ4[i4] <- sequ4[i4]+1 + } + + if(my.cluster.vector[n.dd] == 1) sequ1[i1] <- sequ1[i1]+1 + if(my.cluster.vector[n.dd] == 2) sequ2[i2] <- sequ2[i2]+1 + if(my.cluster.vector[n.dd] == 3) sequ3[i3] <- sequ3[i3]+1 + if(my.cluster.vector[n.dd] == 4) sequ4[i4] <- sequ4[i4]+1 + + pers1 <- mean(sequ1[which(sequ1 != 0)]) + pers2 <- mean(sequ2[which(sequ2 != 0)]) + pers3 <- mean(sequ3[which(sequ3 != 0)]) + pers4 <- mean(sequ4[which(sequ4 != 0)]) + + # compute correlations between simulated clusters and observed regime's anomalies: + cluster1.sim <- pslwr1mean; cluster2.sim <- pslwr2mean; cluster3.sim <- pslwr3mean; cluster4.sim <- pslwr4mean + lat.forecast <- lat; lon.forecast <- lon + + if((p + lead.month) > 12) { load.month <- p + lead.month - 12 } else { load.month <- p + lead.month } # reanalysis forecast month to load + load(file=paste0(rean.dir,"/",rean.name,"_",my.period[load.month],"_","psl",".RData")) # load reanalysis data, which overwrities the pslwrXmean variables + load(paste0(rean.dir,"/",rean.name,"_", my.period[load.month],"_","ClusterNames",".RData")) # load also reanalysis regime names + + if(var.num != var.num.orig) var.num <- var.num.orig # ripristinate original values of this script + if(fields.name != fields.name.orig) fields.name <- fields.name.orig # ripristinate original values of this script + if(!identical(period.orig,period)) period <- period.orig + if(work.dir != work.dir.orig) work.dir <- work.dir.orig # ripristinate original values of this script + if(p.orig != p) p <- p.orig + + # compute the observed transition probabilities: + n.days.month <- n.days.in.a.period(load.month,2001) + j1o <- j2o <- j3o <- j4o <- 1; transition.obs1 <- transition.obs2 <- transition.obs3 <- transition.obs4 <- c() + + for (d in 1:(length(my.cluster[[load.month]]$cluster)-1)){ + if (my.cluster[[load.month]]$cluster[d] == 1 && (my.cluster[[load.month]]$cluster[d + 1] != my.cluster[[load.month]]$cluster[d])){ + transition.obs1[j1o] <- my.cluster[[load.month]]$cluster[d + 1] + j1o <- j1o + 1 + } + if (my.cluster[[load.month]]$cluster[d] == 2 && (my.cluster[[load.month]]$cluster[d + 1] != my.cluster[[load.month]]$cluster[d])){ + transition.obs2[j2o] <- my.cluster[[load.month]]$cluster[d + 1] + j2o <- j2o + 1 + } + if (my.cluster[[load.month]]$cluster[d] == 3 && (my.cluster[[load.month]]$cluster[d + 1] != my.cluster[[load.month]]$cluster[d])){ + transition.obs3[j3o] <- my.cluster[[load.month]]$cluster[d + 1] + j3o <- j3o + 1 + } + if (my.cluster[[load.month]]$cluster[d] == 4 && (my.cluster[[load.month]]$cluster[d + 1] != my.cluster[[load.month]]$cluster[d])){ + transition.obs4[j4o] <- my.cluster[[load.month]]$cluster[d + 1] + j4o <- j4o +1 + } + + } + + trans.obs <- matrix(NA,4,4 , dimnames= list(c("cluster1.obs", "cluster2.obs", "cluster3.obs", "cluster4.obs"),c("cluster1.obs","cluster2.obs","cluster3.obs","cluster4.obs"))) + trans.obs[1,2] <- length(which(transition.obs1 == 2)) + trans.obs[1,3] <- length(which(transition.obs1 == 3)) + trans.obs[1,4] <- length(which(transition.obs1 == 4)) + + trans.obs[2,1] <- length(which(transition.obs2 == 1)) + trans.obs[2,3] <- length(which(transition.obs2 == 3)) + trans.obs[2,4] <- length(which(transition.obs2 == 4)) + + trans.obs[3,1] <- length(which(transition.obs3 == 1)) + trans.obs[3,2] <- length(which(transition.obs3 == 2)) + trans.obs[3,4] <- length(which(transition.obs3 == 4)) + + trans.obs[4,1] <- length(which(transition.obs4 == 1)) + trans.obs[4,2] <- length(which(transition.obs4 == 2)) + trans.obs[4,3] <- length(which(transition.obs4 == 3)) + + trans.tot.obs <- apply(trans.obs,1,sum,na.rm=T) + for(i in 1:4) trans.obs[i,] <- trans.obs[i,]/trans.tot.obs[i] + + # compute the observed interannual monthly variability: + freq.obs1 <- freq.obs2 <- freq.obs3 <- freq.obs4 <- c() + + for(y in year.start:year.end){ + # for both reanalysis and S4, the time order is always: year1day1, year1day2, ..., year1day31, year2day1, year2day2, ..., year2day28, etc, + freq.obs1[y-year.start+1] <- length(which(my.cluster[[load.month]]$cluster[(y-year.start)*n.days.month + (1:n.days.month)] == 1)) + freq.obs2[y-year.start+1] <- length(which(my.cluster[[load.month]]$cluster[(y-year.start)*n.days.month + (1:n.days.month)] == 2)) + freq.obs3[y-year.start+1] <- length(which(my.cluster[[load.month]]$cluster[(y-year.start)*n.days.month + (1:n.days.month)] == 3)) + freq.obs4[y-year.start+1] <- length(which(my.cluster[[load.month]]$cluster[(y-year.start)*n.days.month + (1:n.days.month)] == 4)) + } + + sd.freq.obs1 <- sd(freq.obs1) / n.days.month + sd.freq.obs2 <- sd(freq.obs2) / n.days.month + sd.freq.obs3 <- sd(freq.obs3) / n.days.month + sd.freq.obs4 <- sd(freq.obs4) / n.days.month + + wr1y.obs <- wr1y; wr2y.obs <- wr2y; wr3y.obs <- wr3y; wr4y.obs <- wr4y # observed frecuencies of the regimes + + regimes.obs <- c(cluster1.name, cluster2.name, cluster3.name, cluster4.name) + + if(length(unique(regimes.obs)) < 4) stop("Two or more names of the weather types in the reanalsyis input file are repeated!") + + if(identical(rev(lat),lat.forecast)) {lat.forecast <- rev(lat.forecast); print("Warning: forecast latitudes are in the opposite order than renalysis's")} + if(!identical(lat, lat.forecast)) stop("forecast latitudes are different from reanalysis latitudes!") + if(!identical(lon, lon.forecast)) stop("forecast longitude are different from reanalysis longitudes!") + + cluster.corr <- array(NA, c(4,4), dimnames=list(c("cluster1.sim","cluster2.sim","cluster3.sim","cluster4.sim"), regimes.obs)) + cluster.corr[1,1] <- cor(as.vector(cluster1.sim), as.vector(pslwr1mean)) + cluster.corr[1,2] <- cor(as.vector(cluster1.sim), as.vector(pslwr2mean)) + cluster.corr[1,3] <- cor(as.vector(cluster1.sim), as.vector(pslwr3mean)) + cluster.corr[1,4] <- cor(as.vector(cluster1.sim), as.vector(pslwr4mean)) + cluster.corr[2,1] <- cor(as.vector(cluster2.sim), as.vector(pslwr1mean)) + cluster.corr[2,2] <- cor(as.vector(cluster2.sim), as.vector(pslwr2mean)) + cluster.corr[2,3] <- cor(as.vector(cluster2.sim), as.vector(pslwr3mean)) + cluster.corr[2,4] <- cor(as.vector(cluster2.sim), as.vector(pslwr4mean)) + cluster.corr[3,1] <- cor(as.vector(cluster3.sim), as.vector(pslwr1mean)) + cluster.corr[3,2] <- cor(as.vector(cluster3.sim), as.vector(pslwr2mean)) + cluster.corr[3,3] <- cor(as.vector(cluster3.sim), as.vector(pslwr3mean)) + cluster.corr[3,4] <- cor(as.vector(cluster3.sim), as.vector(pslwr4mean)) + cluster.corr[4,1] <- cor(as.vector(cluster4.sim), as.vector(pslwr1mean)) + cluster.corr[4,2] <- cor(as.vector(cluster4.sim), as.vector(pslwr2mean)) + cluster.corr[4,3] <- cor(as.vector(cluster4.sim), as.vector(pslwr3mean)) + cluster.corr[4,4] <- cor(as.vector(cluster4.sim), as.vector(pslwr4mean)) + + # associate each simulated cluster to one observed regime: + max1 <- which(cluster.corr == max(cluster.corr), arr.ind=T) + cluster.corr2 <- cluster.corr + cluster.corr2[max1[1],] <- -999 # set this row to negative values so it won't be found as a maximum anymore + cluster.corr2[,max1[2]] <- -999 # set this column to negative values so it won't be found as a maximum anymore + max2 <- which(cluster.corr2 == max(cluster.corr2), arr.ind=T) + cluster.corr3 <- cluster.corr2 + cluster.corr3[max2[1],] <- -999 + cluster.corr3[,max2[2]] <- -999 + max3 <- which(cluster.corr3 == max(cluster.corr3), arr.ind=T) + cluster.corr4 <- cluster.corr3 + cluster.corr4[max3[1],] <- -999 + cluster.corr4[,max3[2]] <- -999 + max4 <- which(cluster.corr4 == max(cluster.corr4), arr.ind=T) + + seq.max1 <- c(max1[1],max2[1],max3[1],max4[1]) + seq.max2 <- c(max1[2],max2[2],max3[2],max4[2]) + + cluster1.regime <- seq.max2[which(seq.max1 == 1)] + cluster2.regime <- seq.max2[which(seq.max1 == 2)] + cluster3.regime <- seq.max2[which(seq.max1 == 3)] + cluster4.regime <- seq.max2[which(seq.max1 == 4)] + + cluster1.name <- regimes.obs[cluster1.regime] + cluster2.name <- regimes.obs[cluster2.regime] + cluster3.name <- regimes.obs[cluster3.regime] + cluster4.name <- regimes.obs[cluster4.regime] + + regimes.sim <- c(cluster1.name, cluster2.name, cluster3.name, cluster4.name) + cluster.corr2 <- array(cluster.corr, c(4,4), dimnames=list(regimes.sim, regimes.obs)) + + spat.cor1 <- cluster.corr[1,seq.max2[which(seq.max1 == 1)]] + spat.cor2 <- cluster.corr[2,seq.max2[which(seq.max1 == 2)]] + spat.cor3 <- cluster.corr[3,seq.max2[which(seq.max1 == 3)]] + spat.cor4 <- cluster.corr[4,seq.max2[which(seq.max1 == 4)]] + + # measure persistence for the reanalysis dataset: + my.cluster.vector <- my.cluster[[load.month]][[1]] + + sequ1 <- sequ2 <- sequ3 <- sequ4 <- rep(0,10000) + i1 <- i2 <- i3 <- i4 <- 1 + + if(my.cluster.vector[1] != 1) i1 <- 0 + if(my.cluster.vector[1] == 1) sequ1[i1] <- sequ1[i1]+1 + if(my.cluster.vector[1] != 2) i2 <- 0 + if(my.cluster.vector[1] == 2) sequ2[i2] <- sequ2[i2]+1 + if(my.cluster.vector[1] != 3) i3 <- 0 + if(my.cluster.vector[1] == 3) sequ3[i3] <- sequ3[i3]+1 + if(my.cluster.vector[1] != 4) i4 <- 0 + if(my.cluster.vector[1] == 4) sequ4[i4] <- sequ4[i4]+1 + + n.dd <- length(my.cluster.vector) + for(d in 1:(n.dd-1)){ + if(my.cluster.vector[d] != 1 && my.cluster.vector[d+1] == 1) i1 <- i1+1 + if(my.cluster.vector[d] == 1) sequ1[i1] <- sequ1[i1]+1 + + if(my.cluster.vector[d] != 2 && my.cluster.vector[d+1] == 2) i2 <- i2+1 + if(my.cluster.vector[d] == 2) sequ2[i2] <- sequ2[i2]+1 + + if(my.cluster.vector[d] != 3 && my.cluster.vector[d+1] == 3) i3 <- i3+1 + if(my.cluster.vector[d] == 3) sequ3[i3] <- sequ3[i3]+1 + + if(my.cluster.vector[d] != 4 && my.cluster.vector[d+1] == 4) i4 <- i4+1 + if(my.cluster.vector[d] == 4) sequ4[i4] <- sequ4[i4]+1 + } + + if(my.cluster.vector[n.dd] == 1) sequ1[i1] <- sequ1[i1]+1 + if(my.cluster.vector[n.dd] == 2) sequ2[i2] <- sequ2[i2]+1 + if(my.cluster.vector[n.dd] == 3) sequ3[i3] <- sequ3[i3]+1 + if(my.cluster.vector[n.dd] == 4) sequ4[i4] <- sequ4[i4]+1 + + persObs1 <- mean(sequ1[which(sequ1 != 0)]) + persObs2 <- mean(sequ2[which(sequ2 != 0)]) + persObs3 <- mean(sequ3[which(sequ3 != 0)]) + persObs4 <- mean(sequ4[which(sequ4 != 0)]) + + # insert here all the manual corrections to the regime assignation due to misasignment in the automatic selection: + # forecast month: September + if(p == 9 && lead.month == 0) { cluster1.name <- "Blocking"; cluster2.name <- "Atl.Ridge"; cluster3.name <- "NAO-"; cluster4.name <- "NAO+"} + if(p == 8 && lead.month == 1) { cluster1.name <- "NAO+"; cluster2.name <- "NAO-"; cluster3.name <- "Blocking"; cluster4.name <- "Atl.Ridge"} + if(p == 6 && lead.month == 3) { cluster1.name <- "Atl.Ridge"; cluster2.name <- "NAO-"} + if(p == 4 && lead.month == 5) { cluster1.name <- "Blocking"; cluster2.name <- "NAO+"; cluster3.name <- "Atl.Ridge"; cluster4.name <- "NAO-"} + + # forecast month: October + if(p == 4 && lead.month == 6) { cluster1.name <- "Atl.Ridge"; cluster2.name <- "Blocking"; cluster3.name <- "NAO+"; cluster4.name <- "NAO-"} + if(p == 5 && lead.month == 5) { cluster1.name <- "NAO+"; cluster2.name <- "Blocking"; cluster3.name <- "NAO-"; cluster4.name <- "Atl.Ridge"} + if(p == 7 && lead.month == 3) { cluster1.name <- "NAO-"; cluster2.name <- "Atl.Ridge"; cluster3.name <- "Blocking"; cluster4.name <- "NAO+"} + if(p == 8 && lead.month == 2) { cluster1.name <- "Blocking"; cluster2.name <- "NAO-"; cluster3.name <- "Atl.Ridge"; cluster4.name <- "NAO+"} + if(p == 9 && lead.month == 1) { cluster1.name <- "NAO-"; cluster2.name <- "Blocking"; cluster3.name <- "Atl.Ridge"; cluster4.name <- "NAO+"} + + # forecast month: November + if(p == 6 && lead.month == 5) { cluster2.name <- "Atl.Ridge"; cluster3.name <- "NAO+"; cluster4.name <- "Blocking"} + if(p == 7 && lead.month == 4) { cluster1.name <- "NAO+"; cluster2.name <- "Blocking"; cluster4.name <- "Atl.Ridge"} + if(p == 8 && lead.month == 3) { cluster2.name <- "Blocking"; cluster4.name <- "Atl.Ridge"} + if(p == 9 && lead.month == 2) { cluster2.name <- "Atl.Ridge"; cluster3.name <- "NAO+"; cluster4.name <- "Blocking"} + if(p == 10 && lead.month == 1) { cluster2.name <- "Blocking"; cluster4.name <- "Atl.Ridge"} + + regimes.sim2 <- c(cluster1.name, cluster2.name, cluster3.name, cluster4.name) # Simulated regimes after the manual correction + #regimes.obs <- c(cluster1.name, cluster2.name, cluster3.name, cluster4.name) # already defined above, included only as reference + + order.sim <- match(regimes.sim2, orden) + order.obs <- match(regimes.obs, orden) + + clusters.sim <- list(cluster1.sim, cluster2.sim, cluster3.sim, cluster4.sim) + clusters.obs <- list(pslwr1mean, pslwr2mean, pslwr3mean, pslwr4mean) + + # recompute the spatial correlations, because they might have changed because of the manual corrections above: + spat.cor1 <- cor(as.vector(clusters.sim[[which(order.sim == 1)]]), as.vector(clusters.obs[[which(order.obs == 1)]])) + spat.cor2 <- cor(as.vector(clusters.sim[[which(order.sim == 2)]]), as.vector(clusters.obs[[which(order.obs == 2)]])) + spat.cor3 <- cor(as.vector(clusters.sim[[which(order.sim == 3)]]), as.vector(clusters.obs[[which(order.obs == 3)]])) + spat.cor4 <- cor(as.vector(clusters.sim[[which(order.sim == 4)]]), as.vector(clusters.obs[[which(order.obs == 4)]])) + + load(file=paste0(work.dir,"/",fields.name,"_",my.period[p],"_leadmonth_",lead.month,"_","psl",".RData")) # reload forecast cluster time series and mean psl data + #load(file=paste0(work.dir,"/",fields.name,"_",my.period[p],"_leadmonth_",lead.month,"_ClusterData.RData")) # reload forecast cluster data (for my.cluster.array) + if(var.num != var.num.orig) var.num <- var.num.orig # ripristinate original values of this script + if(fields.name != fields.name.orig) fields.name <- fields.name.orig # ripristinate original values of this script + if(!identical(period.orig, period)) period <- period.orig + if(p.orig != p) p <- p.orig + if(identical(rev(lat),lat.forecast)) lat <- rev(lat) # ripristinate right lat order + if(work.dir != work.dir.orig) work.dir <- work.dir.orig # ripristinate original values of this script + + } # close for on 'forecast.name' + + if(fields.name == rean.name || (fields.name == forecast.name && n.map == 7)){ + if(save.names){ + save(cluster1.name, cluster2.name, cluster3.name, cluster4.name, file=paste0(rean.dir,"/",fields.name,"_", my.period[p],"_","ClusterNames",".RData")) + + } else { # in this case, we load the cluster names from the file already saved, if regimes come from a reanalysis: + load(paste0(rean.dir,"/",rean.name,"_", my.period[p],"_","ClusterNames",".RData")) + + if(var.num != var.num.orig) var.num <- var.num.orig # ripristinate original values of this script + if(fields.name != fields.name.orig) fields.name <- fields.name.orig # ripristinate original values of this script + } + } + + # assign to each cluster its regime: + if(ordering == FALSE && (fields.name == rean.name || (fields.name == forecast.name && n.map == 7))){ + cluster1 <- 1; cluster2 <- 2; cluster3 <- 3; cluster4 <- 4 + } else { + cluster1 <- which(orden == cluster1.name) + cluster2 <- which(orden == cluster2.name) + cluster3 <- which(orden == cluster3.name) + cluster4 <- which(orden == cluster4.name) + } + + assign(paste0("map",cluster1), pslwr1mean) + assign(paste0("map",cluster2), pslwr2mean) + assign(paste0("map",cluster3), pslwr3mean) + assign(paste0("map",cluster4), pslwr4mean) + + assign(paste0("fre",cluster1), wr1y) + assign(paste0("fre",cluster2), wr2y) + assign(paste0("fre",cluster3), wr3y) + assign(paste0("fre",cluster4), wr4y) + + #assign(paste0("withinss",cluster1), my.cluster[[p]]$withinss[1]/my.cluster[[p]]$size[1]) + #assign(paste0("withinss",cluster2), my.cluster[[p]]$withinss[2]/my.cluster[[p]]$size[2]) + #assign(paste0("withinss",cluster3), my.cluster[[p]]$withinss[3]/my.cluster[[p]]$size[3]) + #assign(paste0("withinss",cluster4), my.cluster[[p]]$withinss[4]/my.cluster[[p]]$size[4]) + + if(impact.data == TRUE && (composition == "simple" || composition == "single.impact" || composition == 'impact')){ + assign(paste0("imp",cluster1), varwr1mean) + assign(paste0("imp",cluster2), varwr2mean) + assign(paste0("imp",cluster3), varwr3mean) + assign(paste0("imp",cluster4), varwr4mean) + + if(fields.name == rean.name){ + assign(paste0("sig",cluster1), pvalue1) + assign(paste0("sig",cluster2), pvalue2) + assign(paste0("sig",cluster3), pvalue3) + assign(paste0("sig",cluster4), pvalue4) + } + } + + if(fields.name == forecast.name && n.map < 7){ + assign(paste0("freMax",cluster1), wr1yMax) + assign(paste0("freMax",cluster2), wr2yMax) + assign(paste0("freMax",cluster3), wr3yMax) + assign(paste0("freMax",cluster4), wr4yMax) + + assign(paste0("freMin",cluster1), wr1yMin) + assign(paste0("freMin",cluster2), wr2yMin) + assign(paste0("freMin",cluster3), wr3yMin) + assign(paste0("freMin",cluster4), wr4yMin) + + #assign(paste0("spatial.cor",cluster1), spat.cor1) + #assign(paste0("spatial.cor",cluster2), spat.cor2) + #assign(paste0("spatial.cor",cluster3), spat.cor3) + #assign(paste0("spatial.cor",cluster4), spat.cor4) + + assign(paste0("persist",cluster1), pers1) + assign(paste0("persist",cluster2), pers2) + assign(paste0("persist",cluster3), pers3) + assign(paste0("persist",cluster4), pers4) + + clusters.all <- c(cluster1, cluster2, cluster3, cluster4) + ordered.sequence <- c(which(clusters.all == 1), which(clusters.all == 2), which(clusters.all == 3), which(clusters.all == 4)) + + assign(paste0("transSim",cluster1), unname(trans.sim[1,ordered.sequence])) + assign(paste0("transSim",cluster2), unname(trans.sim[2,ordered.sequence])) + assign(paste0("transSim",cluster3), unname(trans.sim[3,ordered.sequence])) + assign(paste0("transSim",cluster4), unname(trans.sim[4,ordered.sequence])) + + cluster1.obs <- which(orden == regimes.obs[1]) + cluster2.obs <- which(orden == regimes.obs[2]) + cluster3.obs <- which(orden == regimes.obs[3]) + cluster4.obs <- which(orden == regimes.obs[4]) + + clusters.all.obs <- c(cluster1.obs, cluster2.obs, cluster3.obs, cluster4.obs) + ordered.sequence.obs <- c(which(clusters.all.obs == 1), which(clusters.all.obs == 2), which(clusters.all.obs == 3), which(clusters.all.obs == 4)) + + assign(paste0("freObs",cluster1.obs), wr1y.obs) + assign(paste0("freObs",cluster2.obs), wr2y.obs) + assign(paste0("freObs",cluster3.obs), wr3y.obs) + assign(paste0("freObs",cluster4.obs), wr4y.obs) + + assign(paste0("persistObs",cluster1.obs), persObs1) + assign(paste0("persistObs",cluster2.obs), persObs2) + assign(paste0("persistObs",cluster3.obs), persObs3) + assign(paste0("persistObs",cluster4.obs), persObs4) + + assign(paste0("transObs",cluster1.obs), unname(trans.obs[1,ordered.sequence.obs])) + assign(paste0("transObs",cluster2.obs), unname(trans.obs[2,ordered.sequence.obs])) + assign(paste0("transObs",cluster3.obs), unname(trans.obs[3,ordered.sequence.obs])) + assign(paste0("transObs",cluster4.obs), unname(trans.obs[4,ordered.sequence.obs])) + + } + + # position of long values of Europe only (without the Atlantic Sea and America): + #if(fields.name == "ERA-Interim") EU <- c(1:which(lon >= lon.max)[1], (length(lon)-30):length(lon)) # restrict area to continental europe only + EU <- c(1:which(lon >= lon.max)[1], (which(lon >= 337)[1]:length(lon))) # restrict area to continental europe only + + # breaks and colors of the geopotential fields: + if(psl == "g500"){ + #my.brks <- c(-300,-199:200,300) # % Mean anomaly of a WR + my.brks <- c(-300,seq(-200,200,10),300) # % Mean anomaly of a WR + my.brks2 <- c(-300,seq(-200,200,10),300) # % Mean anomaly of a WR for the contour lines + #my.cols <- colorRampPalette((brewer.pal(9,"PuBu")))(length(my.brks)-1) + values.to.plot <- c(-200, -100, 0, 100, 200) # values we want to show in the labels + } + + if(psl == "psl"){ + my.brks <- c(seq(-19,-1,2),0,seq(1,19,2)) # % Mean anomaly of a WR + # my.brks <- c(seq(-19,-1,2),0,seq(1,19,2)) # % Mean anomaly of a WR + + my.brks2 <- my.brks #c(seq(-20,20,2)) # % Mean anomaly of a WR for the contour lines + values.to.plot <- my.brks #c(-17,-15,-13,-11,-9,-7,-5,-3,-1,0,1,3,5,7,9,11,13,15,17) + } + + my.cols <- colorRampPalette(rev(brewer.pal(11,"RdBu")))(length(my.brks)-1) # blue--white--red colors + my.cols[floor(length(my.cols)/2)] <- my.cols[floor(length(my.cols)/2)+1] <- "white" + + # breaks and colors of the impact maps (valid both for Wind speed anomalies and Temperature anomalies): + #my.brks.var <- c(-20,seq(-10,-3,1),seq(-2.9,2.9,0.1),seq(3,10,1),20) # % Mean anomaly of a WR + #my.brks.var <- c(-20,-2,-1.5,-1,-0.5,-0.2,0.2,0.5,1,1.5,2,20) # % Mean anomaly of a WR + #my.brks.var <- c(-20,seq(-3,3,0.5),20) # % Mean anomaly of a WR + if(var.name[var.num] == "sfcWind") { + my.brks.var <- seq(-3,3,0.5) + values.to.plot2 <- c(-3,-2,-1,0,1,2,3) + } + if(var.name[var.num] == "tas") { + my.brks.var <- seq(-5,5,1) + values.to.plot2 <- my.brks.var #c(-5,-3,-1,0,1,3,5) + } + + #my.cols <- colorRampPalette(as.character(read.csv("/home/Earth/vtorralb/rgbhex.csv",header=F)[,1]))(length(my.brks)-1) # blue--yellow-red colors + my.cols.var <- colorRampPalette(rev(brewer.pal(11,"RdBu")))(length(my.brks.var)-1) # blue--white--red colors + #my.cols.var[floor(length(my.cols.var)/2)] <- my.cols.var[floor(length(my.cols.var)/2)+1] <- "white" + + freq.max=100 + if(fields.name == forecast.name){ + pos.years.present <- match(year.start:year.end, years) + years.missing <- which(is.na(pos.years.present)) + fre1.NA <- fre2.NA <- fre3.NA <- fre4.NA <- freMax1.NA <- freMax2.NA <- freMax3.NA <- freMax4.NA <- freMin1.NA <- freMin2.NA <- freMin3.NA <- freMin4.NA <- c() + k <- 0 + for(y in year.start:year.end) { + if(y %in% (years.missing + year.start - 1)) { + fre1.NA <- c(fre1.NA,NA); fre2.NA <- c(fre2.NA,NA); fre3.NA <- c(fre3.NA,NA); fre4.NA <- c(fre4.NA,NA) + freMax1.NA <- c(freMax1.NA,NA); freMax2.NA <- c(freMax2.NA,NA); freMax3.NA <- c(freMax3.NA,NA); freMax4.NA <- c(freMax4.NA,NA) + freMin1.NA <- c(freMin1.NA,NA); freMin2.NA <- c(freMin2.NA,NA); freMin3.NA <- c(freMin3.NA,NA); freMin4.NA <- c(freMin4.NA,NA) + k=k+1 + } else { + fre1.NA <- c(fre1.NA,fre1[y - year.start + 1 - k]); fre2.NA <- c(fre2.NA,fre2[y - year.start + 1 - k]) + fre3.NA <- c(fre3.NA,fre3[y - year.start + 1 - k]); fre4.NA <- c(fre4.NA,fre4[y - year.start + 1 - k]) + freMax1.NA <- c(freMax1.NA,freMax1[y - year.start + 1 - k]); freMax2.NA <- c(freMax2.NA,freMax2[y - year.start + 1 - k]) + freMax3.NA <- c(freMax3.NA,freMax3[y - year.start + 1 - k]); freMax4.NA <- c(freMax4.NA,freMax4[y - year.start + 1 - k]) + freMin1.NA <- c(freMin1.NA,freMin1[y - year.start + 1 - k]); freMin2.NA <- c(freMin2.NA,freMin2[y - year.start + 1 - k]) + freMin3.NA <- c(freMin3.NA,freMin3[y - year.start + 1 - k]); freMin4.NA <- c(freMin4.NA,freMin4[y - year.start + 1 - k]) + } + } + + sp.cor1 <- round(cor(fre1.NA,freObs1, use="complete.obs"),2) + sp.cor2 <- round(cor(fre2.NA,freObs2, use="complete.obs"),2) + sp.cor3 <- round(cor(fre3.NA,freObs3, use="complete.obs"),2) + sp.cor4 <- round(cor(fre4.NA,freObs4, use="complete.obs"),2) + + } else { + fre1.NA <- fre1; fre2.NA <- fre2; fre3.NA <- fre3; fre4.NA <- fre4 + } + + + # Visualize the composition with the 3 graphs for all the 4 regimes: its pressure fields, its impact on var and its frequency series: + if(composition == "simple"){ + if(!as.pdf && fields.name == rean.name) png(filename=paste0(rean.dir,"/",fields.name,"_",my.period[p],"_",var.name[var.num],".png"),width=3000,height=3700) + if(!as.pdf && fields.name == forecast.name) png(filename=paste0(work.dir,"/",fields.name,"_",my.period[p],"_leadmonth_",lead.month,"_",var.name[var.num],".png"),width=3000,height=3700) + + plot.new() + + # Sheet title: + par(fig=c(0, 1, 0.94, 0.98), new=TRUE) + if(fields.name == forecast.name) {forecast.title <- paste0(" startdate and ",lead.month," forecast month ")} else {forecast.title <- ""} + mtext(paste0(fields.name, " Weather Regimes for ", my.period[p], forecast.title," (",year.start,"-",year.end,")"), cex=5.5, font=2) + + # Centroid maps: + map.xpos <- 0 + map.width <- 0.46 + par(fig=c(map.xpos + 0.003, map.xpos + map.width - 0.003, 0.745, 0.925), new=TRUE) + PlotEquiMap2(rescale(map1,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map1), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, xlabel.dist=1.5, contours.lty="F1FF1F") + par(fig=c(map.xpos, map.xpos + map.width, 0.51, 0.69), new=TRUE) + PlotEquiMap2(rescale(map2,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map2), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F") + par(fig=c(map.xpos, map.xpos + map.width, 0.275, 0.455), new=TRUE) + PlotEquiMap2(rescale(map3,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map3), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F") + par(fig=c(map.xpos, map.xpos + map.width, 0.04, 0.22), new=TRUE) + PlotEquiMap2(rescale(map4,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map4), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F") + + ## # for the debug: + ## obs <- read.table("/scratch/Earth/ncortesi/RESILIENCE/Regimes/NAO_series.txt") + ## mes <- p + ## fre.obs <- obs$V3[seq(mes,12*35,12)] # get the ovserved freuency of the NAO + ## #plot(1981:2015,fre.obs,type="l") + ## #lines(1981:2015,fre1,type="l",col="red") + ## #lines(1981:2015,fre2,type="l",col="blue") + ## #lines(1981:2015,fre4,type="l",col="green") + ## cor1 <- cor(fre.obs, fre1) + ## cor2 <- cor(fre.obs, fre2) + ## cor3 <- cor(fre.obs, fre3) + ## cor4 <- cor(fre.obs, fre4) + ## round(c(cor1,cor2,cor3,cor4),2) + + # Legend centroid maps: + legend1.xpos <- 0.10 + legend1.width <- 0.30 + legend1.cex <- 2 + my.subset <- match(values.to.plot, my.brks) + par(fig=c(legend1.xpos, legend1.xpos + legend1.width, 0.719, 0.744), new=TRUE) # syntax fig=c(xmin, xmax, ymin, ymax) + ColorBar3(my.brks, cols=my.cols, vert=FALSE, cex=legend1.cex, subset=my.subset) + if(psl=="g500") {mtext(side=4," m", cex=2.5)} else {mtext(side=4," hPa", cex=legend1.cex)} + par(fig=c(legend1.xpos, legend1.xpos + legend1.width, 0.485, 0.508), new=TRUE) + ColorBar3(my.brks, cols=my.cols, vert=FALSE, cex=legend1.cex, subset=my.subset) + if(psl=="g500") {mtext(side=4," m", cex=2.5)} else {mtext(side=4," hPa", cex=legend1.cex)} + par(fig=c(legend1.xpos, legend1.xpos + legend1.width, 0.250, 0.273), new=TRUE) + ColorBar3(my.brks, cols=my.cols, vert=FALSE, cex=legend1.cex, subset=my.subset) + if(psl=="g500") {mtext(side=4," m", cex=2.5)} else {mtext(side=4," hPa", cex=legend1.cex)} + par(fig=c(legend1.xpos, legend1.xpos + legend1.width, 0.015, 0.038), new=TRUE) + ColorBar3(my.brks, cols=my.cols, vert=FALSE, cex=legend1.cex, subset=my.subset) + if(psl=="g500") {mtext(side=4," m", cex=2.5)} else {mtext(side=4," hPa", cex=legend1.cex)} + + # Subtitle centroid maps: + map.title.xpos <- 0.76 + map.title.width <- 0.2 + par(fig=c(map.title.xpos, map.title.xpos + map.title.width, 0.728, 0.729), new=TRUE) + mtext("year", cex=3) + par(fig=c(map.title.xpos, map.title.xpos + map.title.width, 0.494, 0.495), new=TRUE) + mtext("year", cex=3) + par(fig=c(map.title.xpos, map.title.xpos + map.title.width, 0.260, 0.261), new=TRUE) + mtext("year", cex=3) + par(fig=c(map.title.xpos, map.title.xpos + map.title.width, 0.026, 0.027), new=TRUE) + mtext("year", cex=3) + + # Title Centroid Maps: + title1.xpos <- 0.02 + title1.width <- 0.4 + par(fig=c(title1.xpos, title1.xpos + title1.width, 0.9305, 0.9355), new=TRUE) + mtext(paste0(regime1.name,": ", psl.name, " Anomaly"), font=2, cex=4) + par(fig=c(title1.xpos, title1.xpos + title1.width, 0.6955, 0.7005), new=TRUE) + mtext(paste0(regime2.name,": ", psl.name, " Anomaly"), font=2, cex=4) + par(fig=c(title1.xpos, title1.xpos + title1.width, 0.4605, 0.4655), new=TRUE) + mtext(paste0(regime3.name,": ", psl.name, " Anomaly"), font=2, cex=4) + par(fig=c(title1.xpos, title1.xpos + title1.width, 0.2255, 0.2305), new=TRUE) + mtext(paste0(regime4.name,": ", psl.name, " Anomaly"), font=2, cex=4) + + # Impact maps: + if(impact.data == TRUE && (composition == "simple" || composition == "impact")){ # it won't enter here never because we are already inside an if con composition="simple" + impact.xpos <- 0.47 + impact.width <- 0.26 + par(fig=c(impact.xpos, impact.xpos + impact.width, 0.745, 0.925), new=TRUE) + PlotEquiMap2(rescale(imp1[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=t(sig1[,EU] < 0.05), cex.lab=1.5) + par(fig=c(impact.xpos, impact.xpos + impact.width, 0.51, 0.69), new=TRUE) + PlotEquiMap2(rescale(imp2[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=t(sig2[,EU] < 0.05), cex.lab=1.5) + par(fig=c(impact.xpos, impact.xpos + impact.width, 0.275, 0.455), new=TRUE) + PlotEquiMap2(rescale(imp3[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=t(sig3[,EU] < 0.05), cex.lab=1.5) + par(fig=c(impact.xpos, impact.xpos + impact.width, 0.04, 0.22), new=TRUE) + PlotEquiMap2(rescale(imp4[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=t(sig4[,EU] < 0.05), cex.lab=1.5) + + # Title impact maps: + title2.xpos <- 0.42 + title2.width <- 0.35 + par(fig=c(title2.xpos, title2.xpos + title2.width, 0.935, 0.940), new=TRUE) + mtext(paste0(regime1.name, ": Impact on ", var.name.full[var.num]), font=2, cex=4) + par(fig=c(title2.xpos, title2.xpos + title2.width, 0.700, 0.705), new=TRUE) + mtext(paste0(regime2.name, ": Impact on ", var.name.full[var.num]), font=2, cex=4) + par(fig=c(title2.xpos, title2.xpos + title2.width, 0.465, 0.470), new=TRUE) + mtext(paste0(regime3.name, ": Impact on ", var.name.full[var.num]), font=2, cex=4) + par(fig=c(title2.xpos, title2.xpos + title2.width, 0.230, 0.235), new=TRUE) + mtext(paste0(regime4.name, ": Impact on ", var.name.full[var.num]), font=2, cex=4) + } + + # Frequency plots: + barplot.xpos <- 0.75 + barplot.width <- 0.25 + + par(fig=c(barplot.xpos, barplot.xpos + barplot.width, 0.745, 0.915), new=TRUE) + if(fields.name == rean.name) barplot.freq(100*fre1.NA, year.start, year.end, cex.y=2, cex.x=2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0)) + if(fields.name == forecast.name) barplot.freq.sim2(100*fre1.NA, 100*freMax1.NA, 100*freMin1.NA, 100*freObs1, year.start, year.end, cex.y=2, cex.x=2, freq.min=-2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0), col.line="gray20", cex.r=3, cex.mean=2, cex.obs=2) + + par(fig=c(barplot.xpos, barplot.xpos + barplot.width,0.510,0.680), new=TRUE) + if(fields.name == rean.name) barplot.freq(100*fre2.NA, year.start, year.end, cex.y=2, cex.x=2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0)) + if(fields.name == forecast.name) barplot.freq.sim2(100*fre2.NA, 100*freMax2.NA, 100*freMin2.NA, 100*freObs2, year.start, year.end, cex.y=2, cex.x=2, freq.min=-2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0), col.line="gray20", cex.r=3, cex.mean=2, cex.obs=2) + + par(fig=c(barplot.xpos, barplot.xpos + barplot.width,0.275,0.445), new=TRUE) + if(fields.name == rean.name) barplot.freq(100*fre3.NA, year.start, year.end, cex.y=2, cex.x=2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0)) + if(fields.name == forecast.name) barplot.freq.sim2(100*fre3.NA, 100*freMax3.NA, 100*freMin3.NA, 100*freObs3, year.start, year.end, cex.y=2, cex.x=2, freq.min=-2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0), col.line="gray20", cex.r=3, cex.mean=2, cex.obs=2) + + par(fig=c(barplot.xpos, barplot.xpos + barplot.width,0.040,0.210), new=TRUE) + if(fields.name == rean.name) barplot.freq(100*fre4.NA, year.start, year.end, cex.y=2, cex.x=2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0)) + if(fields.name == forecast.name) barplot.freq.sim2(100*fre4.NA, 100*freMax4.NA, 100*freMin4.NA, 100*freObs4, year.start, year.end, cex.y=2, cex.x=2, freq.min=-2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0), col.line="gray20", cex.r=3, cex.mean=2, cex.obs=2) + + # Title Frequency maps: + title3.xpos <- 0.75 + title3.width <-0.25 + par(fig=c(title3.xpos, title3.xpos + title3.width, 0.935, 0.940), new=TRUE) + mtext(paste0(regime1.name, " Frequency"), font=2, cex=4) # paste0 doesn't work inside an expression! + par(fig=c(title3.xpos, title3.xpos + title3.width, 0.700, 0.705), new=TRUE) + mtext(paste0(regime2.name, " Frequency"), font=2, cex=4) + par(fig=c(title3.xpos, title3.xpos + title3.width, 0.465, 0.470), new=TRUE) + mtext(paste0(regime3.name, " Frequency"), font=2, cex=4) + par(fig=c(title3.xpos, title3.xpos + title3.width, 0.230, 0.235), new=TRUE) + mtext(paste0(regime4.name, " Frequency"), font=2, cex=4) + + # % on Frequency plots: + symbol.xpos <- 0.735 + symbol.width <- 0.01 + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.82, 0.83), new=TRUE) + mtext("%", cex=3.3) + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.59, 0.60), new=TRUE) + mtext("%", cex=3.3) + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.35, 0.36), new=TRUE) + mtext("%", cex=3.3) + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.12, 0.13), new=TRUE) + mtext("%", cex=3.3) + + # mean frequency on Frequency plots: + if(mean.freq == TRUE){ + symbol.xpos <- 0.9 + symbol.width <- 0.1 + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.908, 0.918), new=TRUE) + mtext(bquote(bar(nu) == .(paste0(round(mean(100*fre1.NA),1),"%"))),cex=4.5) + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.673, 0.683), new=TRUE) + mtext(bquote(bar(nu) == .(paste0(round(mean(100*fre2.NA),1),"%"))),cex=4.5) + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.44, 0.45), new=TRUE) + mtext(bquote(bar(nu) == .(paste0(round(mean(100*fre3.NA),1),"%"))),cex=4.5) + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.203, 0.213), new=TRUE) + mtext(bquote(bar(nu) == .(paste0(round(mean(100*fre4.NA),1),"%"))),cex=4.5) + } + + # add corr between obs.time series and ensemble mean time series on Frequency plots: + if(fields.name == forecast.name){ + symbol.xpos <- 0.76 + symbol.width <- 0.1 + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.908, 0.918), new=TRUE) + sp.cor1 <- round(cor(fre1.NA,freObs1, use="complete.obs"),2) + mtext(paste0("r= ",sp.cor1), cex=4.5) + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.673, 0.683), new=TRUE) + sp.cor2 <- round(cor(fre2.NA,freObs2, use="complete.obs"),2) + mtext(paste0("r= ",sp.cor2), cex=4.5) + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.44, 0.45), new=TRUE) + sp.cor3 <- round(cor(fre3.NA,freObs3, use="complete.obs"),2) + mtext(paste0("r= ",sp.cor3), cex=4.5) + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.203, 0.213), new=TRUE) + sp.cor4 <- round(cor(fre4.NA,freObs4, use="complete.obs"),2) + mtext(paste0("r= ",sp.cor4), cex=4.5) + } + + # Legend 2: + if(fields.name == rean.name){ + legend2.xpos <- 0.48 + legend2.width <- 0.25 + legend2.cex <- 1.8 + my.subset2 <- match(values.to.plot2, my.brks.var) + par(fig=c(legend2.xpos, legend2.xpos + legend2.width, 0.719 + 0.001, 0.744), new=TRUE) + ColorBar3(my.brks.var, cols=my.cols.var, vert=FALSE, cex=legend2.cex, subset=my.subset2) + mtext(side=4,paste0(" ",var.unit[var.num]), cex=legend2.cex) + par(fig=c(legend2.xpos, legend2.xpos + legend2.width, 0.485, 0.508), new=TRUE) + ColorBar3(my.brks.var, cols=my.cols.var, vert=FALSE, cex=legend2.cex, subset=my.subset2) + mtext(side=4,paste0(" ",var.unit[var.num]), cex=legend2.cex) + par(fig=c(legend2.xpos, legend2.xpos + legend2.width, 0.250, 0.273), new=TRUE) + ColorBar3(my.brks.var, cols=my.cols.var, vert=FALSE, cex=legend2.cex, subset=my.subset2) + mtext(side=4,paste0(" ",var.unit[var.num]), cex=legend2.cex) + par(fig=c(legend2.xpos, legend2.xpos + legend2.width, 0.015, 0.038), new=TRUE) + ColorBar3(my.brks.var, cols=my.cols.var, vert=FALSE, cex=legend2.cex, subset=my.subset2) + mtext(side=4,paste0(" ",var.unit[var.num]), cex=legend2.cex) + } + + if(!as.pdf) dev.off() # for saving 4 png + + } # close if on: composition == 'simple' + + + # Visualize the composition with the regime anomalies for a selected forecasted month: + if(composition == "psl"){ + # Centroid maps: + n.map <- n.map + 1 # n.maps starts from 0 + map.width <- 0.115 + map.xpos <- 0.06 + map.width * (n.map - 1) + map.ypos <- 0.08 + map.height <- 0.19 + map.ysep <- 0.015 + + par(fig=c(map.xpos, map.xpos + map.width, map.ypos + 3*map.height + 3*map.ysep, map.ypos + 4*map.height + 3*map.ysep), new=TRUE) + PlotEquiMap2(rescale(map1,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map1), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, xlabel.dist=1.5, contours.lty="F1FF1F") + par(fig=c(map.xpos, map.xpos + map.width, map.ypos + 2*map.height + 2*map.ysep, map.ypos + 3*map.height + 2*map.ysep), new=TRUE) + PlotEquiMap2(rescale(map2,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map2), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F") + par(fig=c(map.xpos, map.xpos + map.width, map.ypos + map.height + map.ysep, map.ypos + 2*map.height + map.ysep), new=TRUE) + PlotEquiMap2(rescale(map3,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map3), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F") + par(fig=c(map.xpos, map.xpos + map.width, map.ypos, map.ypos + map.height), new=TRUE) + PlotEquiMap2(rescale(map4,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map4), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F") + + # Sheet subtitles: + par(fig=c(map.xpos, map.xpos + map.width, 0.86, 0.90), new=TRUE) + if(n.map < 8) { + mtext(paste0(my.month.short[p], " --> ", my.month.short[forecast.month]), cex=5.5, font=2) + } else{ + mtext(paste0(rean.name," ", my.month.short[forecast.month]), cex=5.5, font=2) + } + + } # close if on composition == 'psl' + + # Visualize the composition if the impact maps for a selected forecasted month: + if(composition == "impact"){ + # Centroid maps: + n.map <- n.map + 1 # n.maps starts from 0 + map.width <- 0.115 + map.xpos <- 0.06 + map.width * (n.map - 1) + map.ypos <- 0.08 + map.height <- 0.19 + map.ysep <- 0.015 + + par(fig=c(map.xpos, map.xpos + map.width, map.ypos + 3*map.height + 3*map.ysep, map.ypos + 4*map.height + 3*map.ysep), new=TRUE) + PlotEquiMap2(rescale(imp1[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=t(sig1[,EU] < 0.05), cex.lab=1.5) + + par(fig=c(map.xpos, map.xpos + map.width, map.ypos + 2*map.height + 2*map.ysep, map.ypos + 3*map.height + 2*map.ysep), new=TRUE) + PlotEquiMap2(rescale(imp2[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=t(sig2[,EU] < 0.05), cex.lab=1.5) + + par(fig=c(map.xpos, map.xpos + map.width, map.ypos + map.height + map.ysep, map.ypos + 2*map.height + map.ysep), new=TRUE) + PlotEquiMap2(rescale(imp3[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=t(sig3[,EU] < 0.05), cex.lab=1.5) + + par(fig=c(map.xpos, map.xpos + map.width, map.ypos, map.ypos + map.height), new=TRUE) + PlotEquiMap2(rescale(imp4[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=t(sig4[,EU] < 0.05), cex.lab=1.5) + + + # Sheet subtitles: + par(fig=c(map.xpos, map.xpos + map.width, 0.86, 0.90), new=TRUE) + if(n.map < 8) { + mtext(paste0(my.month.short[p], " --> ", my.month.short[forecast.month]), cex=5.5, font=2) + } else{ + mtext(paste0(rean.name," ", my.month.short[forecast.month]), cex=5.5, font=2) + } + + } # close if on composition == 'psl' + + # Visualize the composition if the impact maps for a selected forecasted month: + if(composition == "single.impact"){ + png(filename=paste0(rean.dir,"/",fields.name,"_",my.period[p],"_",var.name[var.num],"_impact_NAO+.png"),width=3000,height=3700) + PlotEquiMap2(rescale(imp1[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=t(sig1[,EU] < 0.05), cex.lab=1.5) + dev.off() + + png(filename=paste0(rean.dir,"/",fields.name,"_",my.period[p],"_",var.name[var.num],"_impact_NAO-.png"),width=3000,height=3700) + PlotEquiMap2(rescale(imp2[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=t(sig2[,EU] < 0.05), cex.lab=1.5) + dev.off() + + png(filename=paste0(rean.dir,"/",fields.name,"_",my.period[p],"_",var.name[var.num],"_impact_Blocking.png"),width=3000,height=3700) + PlotEquiMap2(rescale(imp3[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=t(sig3[,EU] < 0.05), cex.lab=1.5) + dev.off() + + png(filename=paste0(rean.dir,"/",fields.name,"_",my.period[p],"_",var.name[var.num],"_impact_Atl_Ridge.png"),width=3000,height=3700) + PlotEquiMap2(rescale(imp4[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=t(sig4[,EU] < 0.05), cex.lab=1.5) + dev.off() + } + + # Visualize the composition if the impact maps for a selected forecasted month: + if(composition == "single.psl"){ + png(filename=paste0(rean.dir,"/",fields.name,"_",my.period[p],"_",var.name[var.num],"_pattern_cluster1.png"),width=2000,height=1400, res=300) + PlotEquiMap2(rescale(map1,my.brks[1],tail(my.brks,1)), lon, lat,filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1, contours=t(map1), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=0.5, xlabel.dist=0, contours.lty="F1FF1F", toptitle=paste0(my.period[p]," WithinSS=", round(withinss1/1000000,2))) + dev.off() + + png(filename=paste0(rean.dir,"/",fields.name,"_",my.period[p],"_",var.name[var.num],"_pattern_cluster2.png"),width=2000,height=1400, res=300) + PlotEquiMap2(rescale(map2,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1, contours=t(map2), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=0.5, xlabel.dist=0, contours.lty="F1FF1F", toptitle=paste0(my.period[p]," WithinSS=", round(withinss2/1000000,2))) + dev.off() + + png(filename=paste0(rean.dir,"/",fields.name,"_",my.period[p],"_",var.name[var.num],"_pattern_cluster3.png"),width=2000,height=1400, res=300) + PlotEquiMap2(rescale(map3,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1, contours=t(map3), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=0.5, xlabel.dist=0, contours.lty="F1FF1F", toptitle=paste0(my.period[p]," WithinSS=", round(withinss3/1000000,2))) + dev.off() + + png(filename=paste0(rean.dir,"/",fields.name,"_",my.period[p],"_",var.name[var.num],"_pattern_cluster4.png"),width=2000,height=1400, res=300) + PlotEquiMap2(rescale(map4,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1, contours=t(map4), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=0.5, xlabel.dist=0, contours.lty="F1FF1F", toptitle=paste0(my.period[p]," WithinSS=", round(withinss4/1000000,2))) + dev.off() + + # Legend centroid maps: + #my.subset <- match(values.to.plot, my.brks) + #ColorBar3(my.brks, cols=my.cols, vert=FALSE, cex=legend1.cex, subset=my.subset) + #mtext(side=4," hPa", cex=legend1.cex) + + } + + # Visualize the composition with the interannual frequencies for a selected forecasted month: + if(composition == "fre"){ + # Centroid maps: + n.map <- n.map + 1 # n.maps starts from 0 + map.width <- 0.115 + map.xpos <- 0.06 + map.width * (n.map - 1) + map.ypos <- 0.08 + map.height <- 0.19 + map.ysep <- 0.015 + + par(fig=c(map.xpos, map.xpos + map.width, map.ypos + 3*map.height + 3*map.ysep, map.ypos + 4*map.height + 3*map.ysep), new=TRUE) + if(n.map < 8) barplot.freq.sim2(100*fre1.NA, 100*freMax1.NA, 100*freMin1.NA, 100*freObs1, year.start, year.end, cex.y=2, cex.x=2, freq.min=-2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0), col.line="gray20", cex.r=3, cex.mean=2, cex.obs=2) + if(n.map == 8) barplot.freq(100*fre1.NA, year.start, year.end, cex.y=2, cex.x=2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0)) + + par(fig=c(map.xpos, map.xpos + map.width, map.ypos + 2*map.height + 2*map.ysep, map.ypos + 3*map.height + 2*map.ysep), new=TRUE) + if(n.map < 8) if(fields.name == forecast.name) barplot.freq.sim2(100*fre2.NA, 100*freMax2.NA, 100*freMin2.NA, 100*freObs2, year.start, year.end, cex.y=2, cex.x=2, freq.min=-2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0), col.line="gray20", cex.r=3, cex.mean=2, cex.obs=2) + if(n.map == 8) barplot.freq(100*fre2.NA, year.start, year.end, cex.y=2, cex.x=2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0)) + + par(fig=c(map.xpos, map.xpos + map.width, map.ypos + map.height + map.ysep, map.ypos + 2*map.height + map.ysep), new=TRUE) + if(n.map < 8) if(fields.name == forecast.name) barplot.freq.sim2(100*fre3.NA, 100*freMax3.NA, 100*freMin3.NA, 100*freObs3, year.start, year.end, cex.y=2, cex.x=2, freq.min=-2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0), col.line="gray20", cex.r=3, cex.mean=2, cex.obs=2) + if(n.map == 8) barplot.freq(100*fre3.NA, year.start, year.end, cex.y=2, cex.x=2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0)) + + par(fig=c(map.xpos, map.xpos + map.width, map.ypos, map.ypos + map.height), new=TRUE) + if(n.map < 8) if(fields.name == forecast.name) barplot.freq.sim2(100*fre4.NA, 100*freMax4.NA, 100*freMin4.NA, 100*freObs4, year.start, year.end, cex.y=2, cex.x=2, freq.min=-2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0), col.line="gray20", cex.r=3, cex.mean=2, cex.obs=2) + if(n.map == 8) barplot.freq(100*fre4.NA, year.start, year.end, cex.y=2, cex.x=2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0)) + + # Sheet subtitles: + par(fig=c(map.xpos, map.xpos + map.width, 0.86, 0.90), new=TRUE) + if(n.map < 8) { + mtext(paste0(my.month.short[p], " --> ", my.month.short[forecast.month]), cex=5.5, font=2) + } else{ + mtext(paste0(rean.name," ", my.month.short[forecast.month]), cex=5.5, font=2) + } + + # % on Frequency plots: + par(fig=c(map.xpos - 0.006, map.xpos, map.ypos + 3.9*map.height + 3*map.ysep, map.ypos + 4*map.height + 3*map.ysep), new=TRUE) + mtext("%", cex=3.3) + par(fig=c(map.xpos - 0.006, map.xpos, map.ypos + 2.9*map.height + 2*map.ysep, map.ypos + 3*map.height + 2*map.ysep), new=TRUE) + mtext("%", cex=3.3) + par(fig=c(map.xpos - 0.006, map.xpos, map.ypos + 1.9*map.height + 1*map.ysep, map.ypos + 2*map.height + 1*map.ysep), new=TRUE) + mtext("%", cex=3.3) + par(fig=c(map.xpos - 0.006, map.xpos, map.ypos + 0.9*map.height + 0*map.ysep, map.ypos + 1*map.height + 0*map.ysep), new=TRUE) + mtext("%", cex=3.3) + + # mean frequency on Frequency plots: + par(fig=c(map.xpos + 0.02, map.xpos + 0.04, map.ypos + 3*map.height + 3*map.ysep, map.ypos + 3.1*map.height + 3*map.ysep), new=TRUE) + mtext(bquote(bar(nu) == .(paste0(round(mean(100*fre1.NA),1),"%"))),cex=3.5) + par(fig=c(map.xpos + 0.02, map.xpos + 0.04, map.ypos + 2*map.height + 2*map.ysep, map.ypos + 2.1*map.height + 2*map.ysep), new=TRUE) + mtext(bquote(bar(nu) == .(paste0(round(mean(100*fre2.NA),1),"%"))),cex=3.5) + par(fig=c(map.xpos + 0.02, map.xpos + 0.04, map.ypos + 1*map.height + 1*map.ysep, map.ypos + 1.1*map.height + 1*map.ysep), new=TRUE) + mtext(bquote(bar(nu) == .(paste0(round(mean(100*fre3.NA),1),"%"))),cex=3.5) + par(fig=c(map.xpos + 0.02, map.xpos + 0.04, map.ypos + 0*map.height + 0*map.ysep, map.ypos + 0.1*map.height + 0*map.ysep), new=TRUE) + mtext(bquote(bar(nu) == .(paste0(round(mean(100*fre4.NA),1),"%"))),cex=3.5) + + # add corr between obs.time series and ensemble mean time series on Frequency plots: + if(n.map < 8){ + par(fig=c(map.xpos + map.width - 0.04, map.xpos + map.width - 0.02, map.ypos + 3*map.height + 3*map.ysep, map.ypos + 3.1*map.height + 3*map.ysep), new=TRUE) + mtext(paste0("r= ",sp.cor1), cex=3.5) + par(fig=c(map.xpos + map.width - 0.04, map.xpos + map.width - 0.02, map.ypos + 2*map.height + 2*map.ysep, map.ypos + 2.1*map.height + 2*map.ysep), new=TRUE) + mtext(paste0("r= ",sp.cor2), cex=3.5) + par(fig=c(map.xpos + map.width - 0.04, map.xpos + map.width - 0.02, map.ypos + 1*map.height + 1*map.ysep, map.ypos + 1.1*map.height + 1*map.ysep), new=TRUE) + mtext(paste0("r= ",sp.cor3), cex=3.5) + par(fig=c(map.xpos + map.width - 0.04, map.xpos + map.width - 0.02, map.ypos + 0*map.height + 0*map.ysep, map.ypos + 0.1*map.height + 0*map.ysep), new=TRUE) + mtext(paste0("r= ",sp.cor4), cex=3.5) + } + } # close if on composition == 'fre' + + # save indicators for each startdate and forecast time: + # (map1, map2, map3, map4, fre1, fre2, etc. already refer to the regimes in the same order as listed in the 'orden' vector) + if(fields.name == forecast.name) { + if (composition != "impact") { + save(orden, map1, map2, map3, map4, fre1.NA, fre2.NA, fre3.NA, fre4.NA, freObs1, freObs2, freObs3, freObs4, sp.cor1, sp.cor2, sp.cor3, sp.cor4, persist1, persist2, persist3, persist4, persistObs1, persistObs2, persistObs3, persistObs4, spat.cor1, spat.cor2, spat.cor3, spat.cor4, sd.freq.sim1, sd.freq.sim2, sd.freq.sim3, sd.freq.sim4, sd.freq.obs1, sd.freq.obs2, sd.freq.obs3, sd.freq.obs4, transSim1, transSim2, transSim3, transSim4, transObs1, transObs2, transObs3, transObs4, file=paste0(work.dir,"/",fields.name,"_", my.period[p],"_leadmonth_",lead.month,"_","ClusterNames",".RData")) + } else { + save(orden, map1, map2, map3, map4, fre1.NA, fre2.NA, fre3.NA, fre4.NA, freObs1, freObs2, freObs3, freObs4, sp.cor1, sp.cor2, sp.cor3, sp.cor4, persist1, persist2, persist3, persist4, persistObs1, persistObs2, persistObs3, persistObs4, spat.cor1, spat.cor2, spat.cor3, spat.cor4, sd.freq.sim1, sd.freq.sim2, sd.freq.sim3, sd.freq.sim4, sd.freq.obs1, sd.freq.obs2, sd.freq.obs3, sd.freq.obs4, transSim1, transSim2, transSim3, transSim4, transObs1, transObs2, transObs3, transObs4, imp1, imp2, imp3, imp4, file=paste0(work.dir,"/",fields.name,"_", my.period[p],"_leadmonth_",lead.month,"_","ClusterNames",".RData")) + } + } + + print(paste("p =",p,"lead.month =", lead.month)) # spat.cor1,spat.cor2,spat.cor3,spat.cor4)) + } # close 'p' on 'period' + + if(as.pdf) dev.off() # for saving a single pdf with all months/seasons + + } # close 'lead.month' on 'lead.months' + + if(composition == "psl" || composition == "fre"){ + # Regime's names: + title1.xpos <- 0.01 + title1.width <- 0.04 + par(fig=c(title1.xpos, title1.xpos + title1.width, 0.7905, 0.7955), new=TRUE) + mtext(paste0(regime1.name), font=2, cex=5) + par(fig=c(title1.xpos, title1.xpos + title1.width, 0.5855, 0.5905), new=TRUE) + mtext(paste0(regime2.name), font=2, cex=5) + par(fig=c(title1.xpos, title1.xpos + title1.width, 0.3805, 0.3955), new=TRUE) + mtext(paste0(regime3.name), font=2, cex=5) + par(fig=c(title1.xpos, title1.xpos + title1.width, 0.1755, 0.1805), new=TRUE) + mtext(paste0(regime4.name), font=2, cex=5) + + if(composition == "psl") { + # Color legend: + legend1.xpos <- 0.10 + legend1.width <- 0.80 + legend1.cex <- 4 + + par(fig=c(legend1.xpos, legend1.xpos + legend1.width, 0, 0.065), new=TRUE) # syntax fig=c(xmin, xmax, ymin, ymax) + ColorBar2(brks=my.brks, cols=my.cols, vert=FALSE, cex=legend1.cex, label.dist=2, my.ticks=-0.5 + 1:21, my.labels=my.brks) + par(fig=c(legend1.xpos + legend1.width - 0.02, legend1.xpos + legend1.width + 0.05, 0, 0.02), new=TRUE) # syntax fig=c(xmin, xmax, ymin, ymax) + mtext("hPa", cex=legend1.cex*1.3) + } + + dev.off() + } # close if on composition + + print("Finished!") +} # close if on composition != "summary" + + +if(composition == "taylor"){ + library("plotrix") + + # choose a reference season from 13 (winter) to 16 (Autumn): + season.ref <- 13 + + # set the first WR classification: + rean.dir <- "/scratch/Earth/ncortesi/RESILIENCE/Regimes/41_ERA-Interim_monthly_1981-2015_LOESS_filter" + + # load data of the reference season of the first classification (this line should be modified to load the chosen season): + #load("/scratch/Earth/ncortesi/RESILIENCE/Regimes/18) as 12) but with no lat correction/ERA-Interim_Winter_psl.RData") # load pslwrXmean data + #load("/scratch/Earth/ncortesi/RESILIENCE/Regimes/18) as 12) but with no lat correction/ERA-Interim_Winter_ClusterNames.RData") # load clusterX.name.period + load("/scratch/Earth/ncortesi/RESILIENCE/Regimes/46_as_12_but_with_no_lat_correction_and_with_LOESS/ERA-Interim_Winter_psl.RData") # load pslwrXmean data + load("/scratch/Earth/ncortesi/RESILIENCE/Regimes/46_as_12_but_with_no_lat_correction_and_with_LOESS/ERA-Interim_Winter_ClusterNames.RData") # load clusterX.name.period + + if(var.num != var.num.orig) var.num <- var.num.orig # ripristinate original values of this script (necessary after loading regime data) + if(fields.name != fields.name.orig) fields.name <- fields.name.orig # ripristinate original values of this script + + cluster1.ref <- which(orden == cluster1.name) + cluster2.ref <- which(orden == cluster2.name) + cluster3.ref <- which(orden == cluster3.name) + cluster4.ref <- which(orden == cluster4.name) + + #cluster1.ref <- cluster1.name.period[13] + #cluster2.ref <- cluster2.name.period[13] + #cluster3.ref <- cluster3.name.period[13] + #cluster4.ref <- cluster4.name.period[13] + + cluster1.ref <- 3 # bypass the previous values because for dataset num.46 the blocking and atlantic regimes were saved swapped! + cluster2.ref <- 2 + cluster3.ref <- 1 + cluster4.ref <- 4 + + assign(paste0("map.ref",cluster1.ref), pslwr1mean) # NAO+ + assign(paste0("map.ref",cluster2.ref), pslwr2mean) # NAO- + assign(paste0("map.ref",cluster3.ref), pslwr3mean) # Blocking + assign(paste0("map.ref",cluster4.ref), pslwr4mean) # Atl.ridge + + # set a second WR classification (i.e: with running cluster) to contrast with the new one: + #rean2.dir <- "/scratch/Earth/ncortesi/RESILIENCE/Regimes/41_ERA-Interim_monthly_1981-2015_LOESS_filter" + rean2.dir <- "/scratch/Earth/ncortesi/RESILIENCE/Regimes/44_as_43_but_with_no_lat_correction" + + # load data of the reference season of the second classification (this line should be modified to load the chosen season): + load("/scratch/Earth/ncortesi/RESILIENCE/Regimes/46_as_12_but_with_no_lat_correction_and_with_LOESS/ERA-Interim_Winter_psl.RData") # load pslwrXmean data + load("/scratch/Earth/ncortesi/RESILIENCE/Regimes/46_as_12_but_with_no_lat_correction_and_with_LOESS/ERA-Interim_Winter_ClusterNames.RData") # load clusterX.name.period + + if(var.num != var.num.orig) var.num <- var.num.orig # ripristinate original values of this script (necessary after loading regime data) + if(fields.name != fields.name.orig) fields.name <- fields.name.orig # ripristinate original values of this script + + #cluster1.name.ref <- cluster1.name.period[season.ref] + #cluster2.name.ref <- cluster2.name.period[season.ref] + #cluster3.name.ref <- cluster3.name.period[season.ref] + #cluster4.name.ref <- cluster4.name.period[season.ref] + + cluster1.ref <- which(orden == cluster1.name) + cluster2.ref <- which(orden == cluster2.name) + cluster3.ref <- which(orden == cluster3.name) + cluster4.ref <- which(orden == cluster4.name) + + cluster1.ref <- 3 # bypass the previous values because for dataset num.46 the blocking and atlantic regimes were saved swapped! + cluster2.ref <- 2 + cluster3.ref <- 1 + cluster4.ref <- 4 + + assign(paste0("map.old.ref",cluster1.ref), pslwr1mean) # NAO+ + assign(paste0("map.old.ref",cluster2.ref), pslwr2mean) # NAO- + assign(paste0("map.old.ref",cluster3.ref), pslwr3mean) # Blocking + assign(paste0("map.old.ref",cluster4.ref), pslwr4mean) # Atl.ridge + + + # breaks and colors of the geopotential fields: + if(psl == "g500"){ + my.brks <- c(-300,seq(-200,200,10),300) # % Mean anomaly of a WR + my.brks2 <- c(-300,seq(-200,200,10),300) # % Mean anomaly of a WR for the contour lines + values.to.plot <- c(-200, -100, 0, 100, 200) # values we want to show in the labels + } + + if(psl == "psl"){ + my.brks <- c(seq(-19,-1,2),0,seq(1,19,2)) # % Mean anomaly of a WR + my.brks2 <- my.brks #c(seq(-20,20,2)) # % Mean anomaly of a WR for the contour lines + values.to.plot <- my.brks #c(-17,-15,-13,-11,-9,-7,-5,-3,-1,0,1,3,5,7,9,11,13,15,17) + } + + my.cols <- colorRampPalette(rev(brewer.pal(11,"RdBu")))(length(my.brks)-1) # blue--white--red colors + my.cols[floor(length(my.cols)/2)] <- my.cols[floor(length(my.cols)/2)+1] <- "white" + + # plot the composition with the regime anomalies of each monthly regimes: + png(filename=paste0(rean.dir,"/",fields.name,"_taylor_source",".png"),width=6000,height=2000) + + plot.new() + + # Sheet title: + par(fig=c(0, 1, 0.94, 0.98), new=TRUE) + mtext(paste0(fields.name, " observed monthly regime anomalies"), cex=5.5, font=2) + + n.map <- 0 + for(p in c(9:12,1:8)){ + p.orig <- p + + load(file=paste0(rean.dir,"/",fields.name,"_",my.period[p],"_","psl",".RData")) # load cluster names and summarized data + load(file=paste0(rean.dir,"/",fields.name,"_",my.period[p],"_","ClusterNames",".RData")) # load cluster names and summarized data + + if(var.num != var.num.orig) var.num <- var.num.orig # ripristinate original values of this script (necessary after loading regime data) + if(fields.name != fields.name.orig) fields.name <- fields.name.orig # ripristinate original values of this script + if(p != p.orig) p <- p.orig + + cluster1 <- which(orden == cluster1.name) + cluster2 <- which(orden == cluster2.name) + cluster3 <- which(orden == cluster3.name) + cluster4 <- which(orden == cluster4.name) + + assign(paste0("map",cluster1), pslwr1mean) # NAO+ + assign(paste0("map",cluster2), pslwr2mean) # NAO- + assign(paste0("map",cluster3), pslwr3mean) # Blocking + assign(paste0("map",cluster4), pslwr4mean) # Atl.ridge + + n.map <- n.map + 1 # n.maps starts from 0 + map.width <- 0.07 + map.xpos <- 0.06 + map.width * (n.map - 1) + map.ypos <- 0.08 + map.height <- 0.19 + map.ysep <- 0.015 + + par(fig=c(map.xpos, map.xpos + map.width, map.ypos + 3*map.height + 3*map.ysep, map.ypos + 4*map.height + 3*map.ysep), new=TRUE) + PlotEquiMap2(rescale(map1,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map1), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, xlabel.dist=1.5, contours.lty="F1FF1F") + par(fig=c(map.xpos, map.xpos + map.width, map.ypos + 2*map.height + 2*map.ysep, map.ypos + 3*map.height + 2*map.ysep), new=TRUE) + PlotEquiMap2(rescale(map2,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map2), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F") + par(fig=c(map.xpos, map.xpos + map.width, map.ypos + map.height + map.ysep, map.ypos + 2*map.height + map.ysep), new=TRUE) + PlotEquiMap2(rescale(map3,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map3), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F") + par(fig=c(map.xpos, map.xpos + map.width, map.ypos, map.ypos + map.height), new=TRUE) + PlotEquiMap2(rescale(map4,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map4), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F") + + # Sheet subtitles: + par(fig=c(map.xpos, map.xpos + map.width, 0.86, 0.90), new=TRUE) + mtext(my.month.short[p], cex=5.5, font=2) + + } + + # draw the last map to the right of the composition, that with the seasonal anomalies: + n.map <- n.map + 1 # n.maps starts from 0 + map.width <- 0.07 + map.xpos <- 0.06 + map.width * (n.map - 1) + map.ypos <- 0.08 + map.height <- 0.19 + map.ysep <- 0.015 + + par(fig=c(map.xpos, map.xpos + map.width, map.ypos + 3*map.height + 3*map.ysep, map.ypos + 4*map.height + 3*map.ysep), new=TRUE) + PlotEquiMap2(rescale(map.ref1,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map.ref1), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, xlabel.dist=1.5, contours.lty="F1FF1F") + par(fig=c(map.xpos, map.xpos + map.width, map.ypos + 2*map.height + 2*map.ysep, map.ypos + 3*map.height + 2*map.ysep), new=TRUE) + PlotEquiMap2(rescale(map.ref2,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map.ref2), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F") + par(fig=c(map.xpos, map.xpos + map.width, map.ypos + map.height + map.ysep, map.ypos + 2*map.height + map.ysep), new=TRUE) + PlotEquiMap2(rescale(map.ref3,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map.ref3), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F") + par(fig=c(map.xpos, map.xpos + map.width, map.ypos, map.ypos + map.height), new=TRUE) + PlotEquiMap2(rescale(map.ref4,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map.ref4), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F") + + # subtitle: + par(fig=c(map.xpos, map.xpos + map.width, 0.86, 0.90), new=TRUE) + mtext(my.period[season.ref], cex=5.5, font=2) + + dev.off() + + corr.first <- corr.second <- rmse.first <- rmse.second <- array(NA,c(4,12)) + + # Plot the Taylor diagrams: + for(regime in 1:4){ + + png(filename=paste0(rean.dir,"/",fields.name,"_taylor_",orden[regime],".png"), width=1200, height=800) + + for(p in 1:12){ + p.orig <- p + + load(file=paste0(rean.dir,"/",fields.name,"_",my.period[p],"_","psl",".RData")) # load cluster names and summarized data + load(file=paste0(rean.dir,"/",fields.name,"_",my.period[p],"_","ClusterNames",".RData")) # load cluster names and summarized data + + if(var.num != var.num.orig) var.num <- var.num.orig # ripristinate original values of this script (necessary after loading regime data) + if(fields.name != fields.name.orig) fields.name <- fields.name.orig # ripristinate original values of this script + if(p != p.orig) p <- p.orig + + cluster1 <- which(orden == cluster1.name) + cluster2 <- which(orden == cluster2.name) + cluster3 <- which(orden == cluster3.name) + cluster4 <- which(orden == cluster4.name) + + assign(paste0("map",cluster1), pslwr1mean) # NAO+ + assign(paste0("map",cluster2), pslwr2mean) # NAO- + assign(paste0("map",cluster3), pslwr3mean) # Blocking + assign(paste0("map",cluster4), pslwr4mean) # Atl.ridge + + # load data from the second classification: + load(file=paste0(rean2.dir,"/",fields.name,"_",my.period[p],"_","psl",".RData")) # load cluster names and summarized data + load(file=paste0(rean2.dir,"/",fields.name,"_",my.period[p],"_","ClusterNames",".RData")) # load cluster names and summarized data + + if(var.num != var.num.orig) var.num <- var.num.orig # ripristinate original values of this script (necessary after loading regime data) + if(fields.name != fields.name.orig) fields.name <- fields.name.orig # ripristinate original values of this script + if(p != p.orig) p <- p.orig + + cluster1.old <- which(orden == cluster1.name) + cluster2.old <- which(orden == cluster2.name) + cluster3.old <- which(orden == cluster3.name) + cluster4.old <- which(orden == cluster4.name) + + assign(paste0("map.old",cluster1.old), pslwr1mean) # NAO+ + assign(paste0("map.old",cluster2.old), pslwr2mean) # NAO- + assign(paste0("map.old",cluster3.old), pslwr3mean) # Blocking + assign(paste0("map.old",cluster4.old), pslwr4mean) # Atl.ridge + + add.mod <- ifelse(p == 1, FALSE, TRUE) + main.mod <- ifelse(p == 1, orden[regime],"") + + color.first <- "red" + color.second <- "blue" + sd.arcs.mod <- c(0,0.1,0.2,0.3,0.4,0.5,0.6,0.7,0.8,0.9,1,1.1,1.2,1.3) #c(0,0.5,1,1.5) # doesn't work! + if(regime == 1) my.taylor(as.vector(map.ref1),as.vector(map1), normalize=TRUE, add=add.mod, pos.cor=FALSE, sd.arcs=sd.arcs.mod, ref.sd=F, cex.axis=1.7, text.cex=1, my.text=my.month.short[p], col = color.first, main=main.mod) + if(regime == 2) my.taylor(as.vector(map.ref2),as.vector(map2), normalize=TRUE, add=add.mod, pos.cor=FALSE, sd.arcs=sd.arcs.mod, ref.sd=F, cex.axis=1.7, text.cex=1, my.text=my.month.short[p], col = color.first, main=main.mod) + if(regime == 3) my.taylor(as.vector(map.ref3),as.vector(map3), normalize=TRUE, add=add.mod, pos.cor=FALSE, sd.arcs=sd.arcs.mod, ref.sd=F, cex.axis=1.7, text.cex=1, my.text=my.month.short[p], col = color.first, main=main.mod) + if(regime == 4) my.taylor(as.vector(map.ref4),as.vector(map4), normalize=TRUE, add=add.mod, pos.cor=FALSE, sd.arcs=sd.arcs.mod, ref.sd=F, cex.axis=1.7, text.cex=1, my.text=my.month.short[p], col = color.first, main=main.mod) + + # add the points from the second classification: + add.mod=TRUE + if(regime == 1) my.taylor(as.vector(map.old.ref1),as.vector(map.old1), normalize=TRUE, add=add.mod, pos.cor=FALSE, sd.arcs=sd.arcs.mod, ref.sd=F, cex.axis=1.7, text.cex=1, my.text=my.month.short[p], col = color.second) + if(regime == 2) my.taylor(as.vector(map.old.ref2),as.vector(map.old2), normalize=TRUE, add=add.mod, pos.cor=FALSE, sd.arcs=sd.arcs.mod, ref.sd=F, cex.axis=1.7, text.cex=1, my.text=my.month.short[p], col = color.second) + if(regime == 3) my.taylor(as.vector(map.old.ref3),as.vector(map.old3), normalize=TRUE, add=add.mod, pos.cor=FALSE, sd.arcs=sd.arcs.mod, ref.sd=F, cex.axis=1.7, text.cex=1, my.text=my.month.short[p], col = color.second) + if(regime == 4) my.taylor(as.vector(map.old.ref4),as.vector(map.old4), normalize=TRUE, add=add.mod, pos.cor=FALSE, sd.arcs=sd.arcs.mod, ref.sd=F, cex.axis=1.7, text.cex=1, my.text=my.month.short[p], col = color.second) + + if(regime == 1) { corr.first[1,p] <- cor(as.vector(map.ref1),as.vector(map1)); rmse.first[1,p] <- RMSE(as.vector(map.ref1),as.vector(map1)) } + if(regime == 2) { corr.first[2,p] <- cor(as.vector(map.ref2),as.vector(map2)); rmse.first[2,p] <- RMSE(as.vector(map.ref2),as.vector(map2)) } + if(regime == 3) { corr.first[3,p] <- cor(as.vector(map.ref3),as.vector(map3)); rmse.first[3,p] <- RMSE(as.vector(map.ref3),as.vector(map3)) } + if(regime == 4) { corr.first[4,p] <- cor(as.vector(map.ref4),as.vector(map4)); rmse.first[4,p] <- RMSE(as.vector(map.ref4),as.vector(map4)) } + + if(regime == 1) { corr.second[1,p] <- cor(as.vector(map.old.ref1),as.vector(map.old1)); rmse.second[1,p] <- RMSE(as.vector(map.old.ref1),as.vector(map.old1)) } + if(regime == 2) { corr.second[2,p] <- cor(as.vector(map.old.ref2),as.vector(map.old2)); rmse.second[2,p] <- RMSE(as.vector(map.old.ref2),as.vector(map.old2)) } + if(regime == 3) { corr.second[3,p] <- cor(as.vector(map.old.ref3),as.vector(map.old3)); rmse.second[3,p] <- RMSE(as.vector(map.old.ref3),as.vector(map.old3)) } + if(regime == 4) { corr.second[4,p] <- cor(as.vector(map.old.ref4),as.vector(map.old4)); rmse.second[4,p] <- RMSE(as.vector(map.old.ref4),as.vector(map.old4)) } + + } # close for on 'p' + + dev.off() + + } # close for on 'regime' + + corr.first.mean <- apply(corr.first,1,mean) + corr.second.mean <- apply(corr.second,1,mean) + corr.diff <- corr.second.mean - corr.first.mean + + rmse.first.mean <- apply(rmse.first,1,mean) + rmse.second.mean <- apply(rmse.second,1,mean) + rmse.diff <- rmse.second.mean - rmse.first.mean + +} # close if on composition == "taylor" + + + + +# Summary graphs: +if(composition == "summary" && fields.name == forecast.name){ + # array storing correlations in the format: [startdate, leadmonth, regime] + array.diff.sd.freq <- array.sd.freq <- array.cor <- array.sp.cor <- array.freq <- array.diff.freq <- array.rpss <- array.pers <- array.diff.pers <- array(NA,c(12,7,4)) + array.sp.cor <- array.trans.sim1 <- array.trans.sim2 <- array.trans.sim3 <- array.trans.sim4 <- array(NA,c(12,7,4)) + array.trans.diff1 <- array.trans.diff2 <- array.trans.diff3 <- array.trans.diff4 <- array(NA,c(12,7,4)) + + for(p in 1:12){ + p.orig <- p + + for(l in 0:6){ + + load(file=paste0(work.dir,"/",fields.name,"_",my.period[p],"_leadmonth_",l,"_","ClusterNames",".RData")) # load cluster names and summarized data + + if(var.num != var.num.orig) var.num <- var.num.orig # ripristinate original values of this script (necessary after loading regime data) + if(fields.name != fields.name.orig) fields.name <- fields.name.orig # ripristinate original values of this script + if(p != p.orig) p <- p.orig + + array.sp.cor[p,1+l, 1] <- spat.cor1 # associates NAO+ to the first triangle (at left) + array.sp.cor[p,1+l, 3] <- spat.cor2 # associates NAO- to the third triangle (at right) + array.sp.cor[p,1+l, 4] <- spat.cor3 # associates Blocking to the fourth triangle (top one) + array.sp.cor[p,1+l, 2] <- spat.cor4 # associates Atl.Ridge to the second triangle (bottom one) + + array.cor[p,1+l, 1] <- sp.cor1 # associates NAO+ to the first triangle (at left) + array.cor[p,1+l, 3] <- sp.cor2 # associates NAO- to the third triangle (at right) + array.cor[p,1+l, 4] <- sp.cor3 # associates Blocking to the fourth triangle (top one) + array.cor[p,1+l, 2] <- sp.cor4 # associates Atl.Ridge to the second triangle (bottom one) + + array.freq[p,1+l, 1] <- 100*(mean(fre1.NA)) # NAO+ + array.freq[p,1+l, 3] <- 100*(mean(fre2.NA)) # NAO- + array.freq[p,1+l, 4] <- 100*(mean(fre3.NA)) # Blocking + array.freq[p,1+l, 2] <- 100*(mean(fre4.NA)) # Atl.Ridge + + array.diff.freq[p,1+l, 1] <- 100*(mean(fre1.NA) - mean(freObs1)) # NAO+ + array.diff.freq[p,1+l, 3] <- 100*(mean(fre2.NA) - mean(freObs2)) # NAO- + array.diff.freq[p,1+l, 4] <- 100*(mean(fre3.NA) - mean(freObs3)) # Blocking + array.diff.freq[p,1+l, 2] <- 100*(mean(fre4.NA) - mean(freObs4)) # Atl.Ridge + + array.pers[p,1+l, 1] <- persist1 # NAO+ + array.pers[p,1+l, 3] <- persist2 # NAO- + array.pers[p,1+l, 4] <- persist3 # Blocking + array.pers[p,1+l, 2] <- persist4 # Atl.Ridge + + array.diff.pers[p,1+l, 1] <- persist1 - persistObs1 # NAO+ + array.diff.pers[p,1+l, 3] <- persist2 - persistObs2 # NAO- + array.diff.pers[p,1+l, 4] <- persist3 - persistObs3 # Blocking + array.diff.pers[p,1+l, 2] <- persist4 - persistObs4 # Atl.Ridge + + array.pers[p,1+l, 1] <- persist1 # NAO+ + array.pers[p,1+l, 3] <- persist2 # NAO- + array.pers[p,1+l, 4] <- persist3 # Blocking + array.pers[p,1+l, 2] <- persist4 # Atl.Ridge + + array.sd.freq[p,1+l, 1] <- sd.freq.sim1 # NAO+ + array.sd.freq[p,1+l, 3] <- sd.freq.sim2 # NAO- + array.sd.freq[p,1+l, 4] <- sd.freq.sim3 # Blocking + array.sd.freq[p,1+l, 2] <- sd.freq.sim4 # Atl.Ridge + + array.diff.sd.freq[p,1+l, 1] <- sd.freq.sim1 / sd.freq.obs1 # NAO+ + array.diff.sd.freq[p,1+l, 3] <- sd.freq.sim2 / sd.freq.obs2 # NAO- + array.diff.sd.freq[p,1+l, 4] <- sd.freq.sim3 / sd.freq.obs3 # Blocking + array.diff.sd.freq[p,1+l, 2] <- sd.freq.sim4 / sd.freq.obs4 # Atl.Ridge + + array.trans.sim1[p,1+l, 1] <- NA # Transition from NAO+ to NAO+ + array.trans.sim1[p,1+l, 3] <- 100*transSim1[2] # Transition from NAO+ to NAO- + array.trans.sim1[p,1+l, 4] <- 100*transSim1[3] # Transition from NAO+ to blocking + array.trans.sim1[p,1+l, 2] <- 100*transSim1[4] # Transition from NAO+ to Atl.Ridge + + array.trans.sim2[p,1+l, 1] <- 100*transSim2[1] # Transition from NAO- to NAO+ + array.trans.sim2[p,1+l, 3] <- NA # Transition from NAO- to NAO- + array.trans.sim2[p,1+l, 4] <- 100*transSim2[3] # Transition from NAO- to blocking + array.trans.sim2[p,1+l, 2] <- 100*transSim2[4] # Transition from NAO- to Atl.Ridge + + array.trans.sim3[p,1+l, 1] <- 100*transSim3[1] # Transition from blocking to NAO+ + array.trans.sim3[p,1+l, 3] <- 100*transSim3[2] # Transition from blocking to NAO- + array.trans.sim3[p,1+l, 4] <- NA # Transition from blocking to blocking + array.trans.sim3[p,1+l, 2] <- 100*transSim3[4] # Transition from blocking to Atl.Ridge + + array.trans.sim4[p,1+l, 1] <- 100*transSim4[1] # Transition from Atl.ridge to NAO+ + array.trans.sim4[p,1+l, 3] <- 100*transSim4[2] # Transition from Atl.ridge to NAO- + array.trans.sim4[p,1+l, 4] <- 100*transSim4[3] # Transition from Atl.ridge to blocking + array.trans.sim4[p,1+l, 2] <- NA # Transition from Atl.ridge to Atl.Ridge + + array.trans.diff1[p,1+l, 1] <- NA # Transition from NAO+ to NAO+ + array.trans.diff1[p,1+l, 3] <- 100*(transSim1[2] - transObs1[2]) # Transition from NAO+ to NAO- + array.trans.diff1[p,1+l, 4] <- 100*(transSim1[3] - transObs1[3]) # Transition from NAO+ to blocking + array.trans.diff1[p,1+l, 2] <- 100*(transSim1[4] - transObs1[4]) # Transition from NAO+ to Atl.Ridge + + array.trans.diff2[p,1+l, 1] <- 100*(transSim2[1] - transObs2[1]) # Transition from NAO+ to NAO+ + array.trans.diff2[p,1+l, 3] <- NA + array.trans.diff2[p,1+l, 4] <- 100*(transSim2[3] - transObs2[3]) # Transition from NAO+ to blocking + array.trans.diff2[p,1+l, 2] <- 100*(transSim2[4] - transObs2[4]) # Transition from NAO+ to Atl.Ridge + + array.trans.diff3[p,1+l, 1] <- 100*(transSim3[1] - transObs3[1]) # Transition from NAO+ to NAO+ + array.trans.diff3[p,1+l, 3] <- 100*(transSim3[2] - transObs3[2]) # Transition from NAO+ to NAO- + array.trans.diff3[p,1+l, 4] <- NA + array.trans.diff3[p,1+l, 2] <- 100*(transSim3[4] - transObs3[4]) # Transition from NAO+ to Atl.Ridge + + array.trans.diff4[p,1+l, 1] <- 100*(transSim4[1] - transObs4[1]) # Transition from NAO+ to NAO+ + array.trans.diff4[p,1+l, 3] <- 100*(transSim4[2] - transObs4[2]) # Transition from NAO+ to NAO- + array.trans.diff4[p,1+l, 4] <- 100*(transSim4[3] - transObs4[3]) # Transition from NAO+ to blocking + array.trans.diff4[p,1+l, 2] <- NA + + #array.rpss[p,1+l, 1] <- # NAO+ + #array.rpss[p,1+l, 3] <- # NAO- + #array.rpss[p,1+l, 4] <- # Blocking + #array.rpss[p,1+l, 2] <- # Atl.Ridge + } + } + + # Spatial Corr summary: + png(filename=paste0(work.dir,"/",forecast.name,"_summary_sp_corr.png"), width=550, height=400) + + # create an array similar to array.cor but with colors instead of corr.values: + sp.corr.cols <- rev(c('#b2182b','#d6604d','#f4a582','#fddbc7','#d1e5f0','#92c5de','#4393c3','#2166ac')) + my.seq <- c(-1,0,0.4,0.5,0.6,0.7,0.8,0.9,1) + + array.sp.cor.colors <- array(sp.corr.cols[8],c(12,7,4)) + array.sp.cor.colors[array.sp.cor < my.seq[8]] <- sp.corr.cols[7] + array.sp.cor.colors[array.sp.cor < my.seq[7]] <- sp.corr.cols[6] + array.sp.cor.colors[array.sp.cor < my.seq[6]] <- sp.corr.cols[5] + array.sp.cor.colors[array.sp.cor < my.seq[5]] <- sp.corr.cols[4] + array.sp.cor.colors[array.sp.cor < my.seq[4]] <- sp.corr.cols[3] + array.sp.cor.colors[array.sp.cor < my.seq[3]] <- sp.corr.cols[2] + array.sp.cor.colors[array.sp.cor < my.seq[2]] <- sp.corr.cols[1] + + par(mar=c(5,4,4,4.5)) + plot(1,1, type="n", xaxt="n", yaxt="n", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + title("Spatial correlation between S4 and ERA-Interim regime anomalies", cex=1.5) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + + for(p in 1:12){ + for(l in 0:6){ + polygon(c(0.5+p-1, 0.5+p-1, 1+p-1), c(-0.5+l, 0.5+l, 0+l), col=array.sp.cor.colors[p,1+l,1]) # first triangle + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(-0.5+l, 0+l, -0.5+l), col=array.sp.cor.colors[p,1+l,2]) + polygon(c(1+p-1, 1.5+p-1, 1.5+p-1), c(l, 0.5+l, -0.5+l), col=array.sp.cor.colors[p,1+l,3]) + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(0.5+l, 0+l, 0.5+l), col=array.sp.cor.colors[p,1+l,4]) + } + } + + par(fig=c(0.875, 1, 0.14, 0.89), new=TRUE) + ColorBar2(brks = my.seq, cols = sp.corr.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + dev.off() + + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_sp_corr_4tables.png"), width=750, height=600) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext(text="Spatial correlation between S4 and ERA-Interim regime anomalies", cex=1.5) + + # NAO+ : + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.85, 0.98), new=TRUE) + mtext("NAO+", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.sp.cor.colors[p,1+l,1])}} + + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.42, 0.5), new=TRUE) + mtext("Blocking", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.sp.cor.colors[p,1+l,4])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.85, 0.98), new=TRUE) + mtext("NAO-", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.sp.cor.colors[p,1+l,3])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.42, 0.5), new=TRUE) + mtext("Atlantic Ridge", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.sp.cor.colors[p,1+l,2])}} + + par(fig=c(0.91, 1, 0.1, 0.9), new=TRUE) + ColorBar2(brks = my.seq, cols = sp.corr.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_sp_corr_1table.png"), width=800, height=300) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext(text="Spatial correlation between S4 and ERA-Interim regime anomalies", cex=1.2) + + par(mar=c(0,0,4,0), fig=c(0.07, 0.22, 0.8, 0.98), new=TRUE) + mtext("NAO+", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0, 0.22, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecast month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.sp.cor.colors[i2,1+6-j,1])}} + + par(mar=c(0,0,4,0), fig=c(0.22, 0.44, 0.8, 0.98), new=TRUE) + mtext("NAO-", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.22, 0.44, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecasted month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.sp.cor.colors[i2,1+6-j,3])}} + + par(mar=c(0,0,4,0), fig=c(0.44, 0.66, 0.8, 0.98), new=TRUE) + mtext("Blocking", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.44, 0.66, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecasted month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.sp.cor.colors[i2,1+6-j,4])}} + + par(mar=c(0,0,4,0), fig=c(0.68, 0.88, 0.8, 0.98), new=TRUE) + mtext("Atlantic Ridge", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.66, 0.88, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecasted month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.sp.cor.colors[i2,1+6-j,2])}} + + par(fig=c(0.91, 1, 0.1, 0.8), new=TRUE) + ColorBar2(brks = my.seq, cols = sp.corr.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + + + + + + + + # Corr summary: + png(filename=paste0(work.dir,"/",forecast.name,"_summary_corr.png"), width=550, height=400) + + # create an array similar to array.cor but with colors instead of corr.values: + corr.cols <- rev(c('#b2182b','#d6604d','#f4a582','#fddbc7','#d1e5f0','#92c5de','#4393c3','#2166ac')) + + array.cor.colors <- array(corr.cols[8],c(12,7,4)) + array.cor.colors[array.cor < 0.75] <- corr.cols[7] + array.cor.colors[array.cor < 0.5] <- corr.cols[6] + array.cor.colors[array.cor < 0.25] <- corr.cols[5] + array.cor.colors[array.cor < 0] <- corr.cols[4] + array.cor.colors[array.cor < -0.25] <- corr.cols[3] + array.cor.colors[array.cor < -0.5] <- corr.cols[2] + array.cor.colors[array.cor < -0.75] <- corr.cols[1] + + par(mar=c(5,4,4,4.5)) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Forecast time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + title("Correlation between S4 and ERA-Interim frequencies", cex=1.5) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + + for(p in 1:12){ + for(l in 0:6){ + polygon(c(0.5+p-1,0.5+p-1,1+p-1),c(-0.5+l,0.5+l,0+l), col=array.cor.colors[p,1+l,1]) # first triangle + polygon(c(0.5+p-1,1+p-1,1.5+p-1),c(-0.5+l,0+l,-0.5+l), col=array.cor.colors[p,1+l,2]) + polygon(c(1+p-1,1.5+p-1,1.5+p-1),c(l,0.5+l,-0.5+l), col=array.cor.colors[p,1+l,3]) + polygon(c(0.5+p-1,1+p-1,1.5+p-1),c(0.5+l,0+l,0.5+l), col=array.cor.colors[p,1+l,4]) + } + } + + par(fig=c(0.875, 1, 0.14, 0.89), new=TRUE) + ColorBar2(brks = seq(-1,1,0.25), cols = corr.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(seq(-1,1,0.25)), my.labels=seq(-1,1,0.25)) + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_corr_4tables.png"), width=750, height=600) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext(text="Correlation between S4 and ERA-Interim frequencies", cex=1.5) + + # NAO+ : + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.85, 0.98), new=TRUE) + mtext("NAO+", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.cor.colors[p,1+l,1])}} + + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.42, 0.5), new=TRUE) + mtext("Blocking", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.cor.colors[p,1+l,4])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.85, 0.98), new=TRUE) + mtext("NAO-", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.cor.colors[p,1+l,3])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.42, 0.5), new=TRUE) + mtext("Atlantic Ridge", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.cor.colors[p,1+l,2])}} + + par(fig=c(0.91, 1, 0.1, 0.9), new=TRUE) + ColorBar2(brks = my.seq, cols = corr.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_corr_1table.png"), width=800, height=300) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext(text="Correlation between S4 and ERA-Interim frequencies", cex=1.2) + + par(mar=c(0,0,4,0), fig=c(0.07, 0.22, 0.8, 0.98), new=TRUE) + mtext("NAO+", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0, 0.22, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecast month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.cor.colors[i2,1+6-j,1])}} + + par(mar=c(0,0,4,0), fig=c(0.22, 0.44, 0.8, 0.98), new=TRUE) + mtext("NAO-", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.22, 0.44, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecasted month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.cor.colors[i2,1+6-j,3])}} + + par(mar=c(0,0,4,0), fig=c(0.44, 0.66, 0.8, 0.98), new=TRUE) + mtext("Blocking", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.44, 0.66, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecasted month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.cor.colors[i2,1+6-j,4])}} + + par(mar=c(0,0,4,0), fig=c(0.68, 0.88, 0.8, 0.98), new=TRUE) + mtext("Atlantic Ridge", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.66, 0.88, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecasted month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.cor.colors[i2,1+6-j,2])}} + + par(fig=c(0.91, 1, 0.1, 0.8), new=TRUE) + ColorBar2(brks = my.seq, cols = corr.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + + + + + + # Freq. summary: + png(filename=paste0(work.dir,"/",forecast.name,"_summary_freq.png"), width=550, height=400) + + # create an array similar to array.diff.freq but with colors instead of frequencies: + freq.cols <- rev(c('#d73027','#f46d43','#fdae61','#fee090','#e0f3f8','#abd9e9','#74add1','#4575b4')) + my.seq <- c(NA,20,22,24,26,28,30,32,NA) + + array.freq.colors <- array(freq.cols[8],c(12,7,4)) + array.freq.colors[array.freq < my.seq[[8]]] <- freq.cols[7] + array.freq.colors[array.freq < my.seq[[7]]] <- freq.cols[6] + array.freq.colors[array.freq < my.seq[[6]]] <- freq.cols[5] + array.freq.colors[array.freq < my.seq[[5]]] <- freq.cols[4] + array.freq.colors[array.freq < my.seq[[4]]] <- freq.cols[3] + array.freq.colors[array.freq < my.seq[[3]]] <- freq.cols[2] + array.freq.colors[array.freq < my.seq[[2]]] <- freq.cols[1] + + par(mar=c(5,4,4,4.5)) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Forecast time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + title("Mean S4 frequency (%)", cex=1.5) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + + for(p in 1:12){ + for(l in 0:6){ + polygon(c(0.5+p-1, 0.5+p-1, 1+p-1), c(-0.5+l, 0.5+l, 0+l), col=array.freq.colors[p,1+l,1]) # first triangle + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(-0.5+l, 0+l, -0.5+l), col=array.freq.colors[p,1+l,2]) + polygon(c(1+p-1, 1.5+p-1, 1.5+p-1), c(l, 0.5+l, -0.5+l), col=array.freq.colors[p,1+l,3]) + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(0.5+l, 0+l, 0.5+l), col=array.freq.colors[p,1+l,4]) + } + } + + par(fig=c(0.875, 1, 0.14, 0.89), new=TRUE) + ColorBar2(brks = my.seq, cols = freq.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_freq_4tables.png"), width=750, height=600) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext(text="Mean S4 frequency (%)", cex=1.5) + + # NAO+ : + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.85, 0.98), new=TRUE) + mtext("NAO+", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.freq.colors[p,1+l,1])}} + + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.42, 0.5), new=TRUE) + mtext("Blocking", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.freq.colors[p,1+l,4])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.85, 0.98), new=TRUE) + mtext("NAO-", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.freq.colors[p,1+l,3])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.42, 0.5), new=TRUE) + mtext("Atlantic Ridge", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.freq.colors[p,1+l,2])}} + + par(fig=c(0.91, 1, 0.1, 0.9), new=TRUE) + ColorBar2(brks = my.seq, cols = freq.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_freq_1table.png"), width=800, height=300) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext(text="Mean S4 frequency (%)", cex=1.2) + + par(mar=c(0,0,4,0), fig=c(0.07, 0.22, 0.8, 0.98), new=TRUE) + mtext("NAO+", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0, 0.22, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecast month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.freq.colors[i2,1+6-j,1])}} + + par(mar=c(0,0,4,0), fig=c(0.22, 0.44, 0.8, 0.98), new=TRUE) + mtext("NAO-", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.22, 0.44, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecasted month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.freq.colors[i2,1+6-j,3])}} + + par(mar=c(0,0,4,0), fig=c(0.44, 0.66, 0.8, 0.98), new=TRUE) + mtext("Blocking", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.44, 0.66, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecasted month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.freq.colors[i2,1+6-j,4])}} + + par(mar=c(0,0,4,0), fig=c(0.68, 0.88, 0.8, 0.98), new=TRUE) + mtext("Atlantic Ridge", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.66, 0.88, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecasted month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.freq.colors[i2,1+6-j,2])}} + + par(fig=c(0.91, 1, 0.1, 0.8), new=TRUE) + ColorBar2(brks = my.seq, cols = freq.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + + + + + + + # Delta freq. summary: + png(filename=paste0(work.dir,"/",forecast.name,"_summary_diff_freq.png"), width=550, height=400) + + # create an array similar to array.diff.freq but with colors instead of frequencies: + diff.freq.cols <- rev(c('#d73027','#f46d43','#fdae61','#fee090','#e0f3f8','#abd9e9','#74add1','#4575b4')) + my.seq <- c(-20,-10,-5,-2,0,2,5,10,20) + + array.diff.freq.colors <- array(diff.freq.cols[8],c(12,7,4)) + array.diff.freq.colors[array.diff.freq < my.seq[[8]]] <- diff.freq.cols[7] + array.diff.freq.colors[array.diff.freq 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.diff.freq.colors[i2,1+6-j,1])}} + + par(mar=c(0,0,4,0), fig=c(0.22, 0.44, 0.8, 0.98), new=TRUE) + mtext("NAO-", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.22, 0.44, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecasted month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.diff.freq.colors[i2,1+6-j,3])}} + + par(mar=c(0,0,4,0), fig=c(0.44, 0.66, 0.8, 0.98), new=TRUE) + mtext("Blocking", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.44, 0.66, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecasted month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.diff.freq.colors[i2,1+6-j,4])}} + + par(mar=c(0,0,4,0), fig=c(0.68, 0.88, 0.8, 0.98), new=TRUE) + mtext("Atlantic Ridge", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.66, 0.88, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecasted month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.diff.freq.colors[i2,1+6-j,2])}} + + par(fig=c(0.91, 1, 0.1, 0.8), new=TRUE) + ColorBar2(brks = my.seq, cols = diff.freq.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + + + + + + + + # Persistence summary: + png(filename=paste0(work.dir,"/",forecast.name,"_summary_pers.png"), width=550, height=400) + + # create an array similar to array.pers but with colors instead of frequencies: + pers.cols <- rev(c('#b2182b','#d6604d','#f4a582','#fddbc7','#d1e5f0','#92c5de','#4393c3','#2166ac')) + my.seq <- c(NA, 3.25, 3.5, 3.75, 4, 4.25, 4.5, 4.75, NA) + + array.pers.colors <- array(pers.cols[8],c(12,7,4)) + array.pers.colors[array.pers < my.seq[8]] <- pers.cols[7] + array.pers.colors[array.pers < my.seq[7]] <- pers.cols[6] + array.pers.colors[array.pers < my.seq[6]] <- pers.cols[5] + array.pers.colors[array.pers < my.seq[5]] <- pers.cols[4] + array.pers.colors[array.pers < my.seq[4]] <- pers.cols[3] + array.pers.colors[array.pers < my.seq[3]] <- pers.cols[2] + array.pers.colors[array.pers < my.seq[2]] <- pers.cols[1] + + par(mar=c(5,4,4,4.5)) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Forecast time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + title("S4 Regime persistence (in days/month)", cex=1.5) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + + for(p in 1:12){ + for(l in 0:6){ + polygon(c(0.5+p-1,0.5+p-1,1+p-1),c(-0.5+l,0.5+l,0+l), col=array.pers.colors[p,1+l,1]) # first triangle + polygon(c(0.5+p-1,1+p-1,1.5+p-1),c(-0.5+l,0+l,-0.5+l), col=array.pers.colors[p,1+l,2]) + polygon(c(1+p-1,1.5+p-1,1.5+p-1),c(l,0.5+l,-0.5+l), col=array.pers.colors[p,1+l,3]) + polygon(c(0.5+p-1,1+p-1,1.5+p-1),c(0.5+l,0+l,0.5+l), col=array.pers.colors[p,1+l,4]) + } + } + + par(fig=c(0.875, 1, 0.14, 0.89), new=TRUE) + ColorBar2(brks = my.seq, cols = pers.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_pers_4tables.png"), width=750, height=600) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("S4 Regime persistence (in days/month)", cex=1.5) + + # NAO+ : + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.85, 0.98), new=TRUE) + mtext("NAO+", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.pers.colors[p,1+l,1])}} + + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.42, 0.5), new=TRUE) + mtext("Blocking", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.pers.colors[p,1+l,4])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.85, 0.98), new=TRUE) + mtext("NAO-", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.pers.colors[p,1+l,3])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.42, 0.5), new=TRUE) + mtext("Atlantic Ridge", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.pers.colors[p,1+l,2])}} + + par(fig=c(0.91, 1, 0.1, 0.9), new=TRUE) + ColorBar2(brks = my.seq, cols = pers.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_pers_4tables.png"), width=750, height=600) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("S4 Regime persistence (in days/month)", cex=1.5) + + # NAO+ : + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.85, 0.98), new=TRUE) + mtext("NAO+", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.pers.colors[p,1+l,1])}} + + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.42, 0.5), new=TRUE) + mtext("Blocking", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.pers.colors[p,1+l,4])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.85, 0.98), new=TRUE) + mtext("NAO-", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.pers.colors[p,1+l,3])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.42, 0.5), new=TRUE) + mtext("Atlantic Ridge", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.pers.colors[p,1+l,2])}} + + par(fig=c(0.91, 1, 0.1, 0.9), new=TRUE) + ColorBar2(brks = my.seq, cols = pers.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_pers_1table.png"), width=800, height=300) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("S4 Regime persistence (in days/month)", cex=1.2) + + par(mar=c(0,0,4,0), fig=c(0.07, 0.22, 0.8, 0.98), new=TRUE) + mtext("NAO+", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0, 0.22, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecast month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.pers.colors[i2,1+6-j,1])}} + + par(mar=c(0,0,4,0), fig=c(0.22, 0.44, 0.8, 0.98), new=TRUE) + mtext("NAO-", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.22, 0.44, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecasted month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.pers.colors[i2,1+6-j,3])}} + + par(mar=c(0,0,4,0), fig=c(0.44, 0.66, 0.8, 0.98), new=TRUE) + mtext("Blocking", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.44, 0.66, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecasted month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.pers.colors[i2,1+6-j,4])}} + + par(mar=c(0,0,4,0), fig=c(0.68, 0.88, 0.8, 0.98), new=TRUE) + mtext("Atlantic Ridge", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.66, 0.88, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecasted month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.pers.colors[i2,1+6-j,2])}} + + par(fig=c(0.91, 1, 0.1, 0.8), new=TRUE) + ColorBar2(brks = my.seq, cols = pers.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + + + + + + + + # Delta persistence summary: + png(filename=paste0(work.dir,"/",forecast.name,"_summary_diff_pers.png"), width=550, height=400) + + # create an array similar to array.pers but with colors instead of frequencies: + diff.pers.cols <- rev(c('#b2182b','#d6604d','#f4a582','#fddbc7','#d1e5f0','#92c5de','#4393c3','#2166ac')) + my.seq <- c(NA, -1.5, -1, -0.5, 0, 0.5, 1, 1.5, NA) + + array.diff.pers.colors <- array(diff.pers.cols[8],c(12,7,4)) + array.diff.pers.colors[array.diff.pers < my.seq[8]] <- diff.pers.cols[7] + array.diff.pers.colors[array.diff.pers < my.seq[7]] <- diff.pers.cols[6] + array.diff.pers.colors[array.diff.pers < my.seq[6]] <- diff.pers.cols[5] + array.diff.pers.colors[array.diff.pers < my.seq[5]] <- diff.pers.cols[4] + array.diff.pers.colors[array.diff.pers < my.seq[4]] <- diff.pers.cols[3] + array.diff.pers.colors[array.diff.pers < my.seq[3]] <- diff.pers.cols[2] + array.diff.pers.colors[array.diff.pers < my.seq[2]] <- diff.pers.cols[1] + + par(mar=c(5,4,4,4.5)) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Forecast time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + title("Diff. between S4 and ERA-Interim regime persistence (in days/month)", cex=1.5) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + + for(p in 1:12){ + for(l in 0:6){ + polygon(c(0.5+p-1,0.5+p-1,1+p-1),c(-0.5+l,0.5+l,0+l), col=array.diff.pers.colors[p,1+l,1]) # first triangle + polygon(c(0.5+p-1,1+p-1,1.5+p-1),c(-0.5+l,0+l,-0.5+l), col=array.diff.pers.colors[p,1+l,2]) + polygon(c(1+p-1,1.5+p-1,1.5+p-1),c(l,0.5+l,-0.5+l), col=array.diff.pers.colors[p,1+l,3]) + polygon(c(0.5+p-1,1+p-1,1.5+p-1),c(0.5+l,0+l,0.5+l), col=array.diff.pers.colors[p,1+l,4]) + } + } + + par(fig=c(0.875, 1, 0.14, 0.89), new=TRUE) + ColorBar2(brks = my.seq, cols = diff.pers.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_diff_pers_4tables.png"), width=750, height=600) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("Diff. between S4 and ERA-Interim regime persistence (in days/month)", cex=1.5) + + # NAO+ : + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.85, 0.98), new=TRUE) + mtext("NAO+", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.diff.pers.colors[p,1+l,1])}} + + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.42, 0.5), new=TRUE) + mtext("Blocking", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.diff.pers.colors[p,1+l,4])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.85, 0.98), new=TRUE) + mtext("NAO-", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.diff.pers.colors[p,1+l,3])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.42, 0.5), new=TRUE) + mtext("Atlantic Ridge", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.diff.pers.colors[p,1+l,2])}} + + par(fig=c(0.91, 1, 0.1, 0.9), new=TRUE) + ColorBar2(brks = my.seq, cols = diff.pers.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_diff_pers_1table.png"), width=800, height=300) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("Diff. between S4 and ERA-Interim regime persistence (in days/month)", cex=1.2) + + par(mar=c(0,0,4,0), fig=c(0.07, 0.22, 0.8, 0.98), new=TRUE) + mtext("NAO+", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0, 0.22, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecast month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.diff.pers.colors[i2,1+6-j,1])}} + + par(mar=c(0,0,4,0), fig=c(0.22, 0.44, 0.8, 0.98), new=TRUE) + mtext("NAO-", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.22, 0.44, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecasted month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.diff.pers.colors[i2,1+6-j,3])}} + + par(mar=c(0,0,4,0), fig=c(0.44, 0.66, 0.8, 0.98), new=TRUE) + mtext("Blocking", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.44, 0.66, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecasted month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.diff.pers.colors[i2,1+6-j,4])}} + + par(mar=c(0,0,4,0), fig=c(0.68, 0.88, 0.8, 0.98), new=TRUE) + mtext("Atlantic Ridge", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.66, 0.88, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecasted month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.diff.pers.colors[i2,1+6-j,2])}} + + par(fig=c(0.91, 1, 0.1, 0.8), new=TRUE) + ColorBar2(brks = my.seq, cols = diff.pers.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + + + + + + + + + # St.Dev.freq ratio summary: + png(filename=paste0(work.dir,"/",forecast.name,"_summary_ratio_sd_freq.png"), width=550, height=400) + + # create an array similar to array.cor but with colors instead of corr.values: + diff.sd.freq.cols <- rev(c('#b2182b','#d6604d','#f4a582','#fddbc7','#d1e5f0','#92c5de','#4393c3','#2166ac')) + my.seq <- c(0,0.7,0.8,0.9,1.0,1.1,1.2,1.3,2) + + array.diff.sd.freq.colors <- array(diff.sd.freq.cols[8],c(12,7,4)) + array.diff.sd.freq.colors[array.diff.sd.freq < my.seq[8]] <- diff.sd.freq.cols[7] + array.diff.sd.freq.colors[array.diff.sd.freq < my.seq[7]] <- diff.sd.freq.cols[6] + array.diff.sd.freq.colors[array.diff.sd.freq < my.seq[6]] <- diff.sd.freq.cols[5] + array.diff.sd.freq.colors[array.diff.sd.freq < my.seq[5]] <- diff.sd.freq.cols[4] + array.diff.sd.freq.colors[array.diff.sd.freq < my.seq[4]] <- diff.sd.freq.cols[3] + array.diff.sd.freq.colors[array.diff.sd.freq < my.seq[3]] <- diff.sd.freq.cols[2] + array.diff.sd.freq.colors[array.diff.sd.freq < my.seq[2]] <- diff.sd.freq.cols[1] + + par(mar=c(5,4,4,4.5)) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Forecast time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + title("St.Dev. ratio between sim. and obs. average frequencies", cex=1.5) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + + for(p in 1:12){ + for(l in 0:6){ + polygon(c(0.5+p-1, 0.5+p-1, 1+p-1), c(-0.5+l, 0.5+l, 0+l), col=array.diff.sd.freq.colors[p,1+l,1]) # first triangle + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(-0.5+l, 0+l, -0.5+l), col=array.diff.sd.freq.colors[p,1+l,2]) + polygon(c(1+p-1, 1.5+p-1, 1.5+p-1), c(l, 0.5+l, -0.5+l), col=array.diff.sd.freq.colors[p,1+l,3]) + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(0.5+l, 0+l, 0.5+l), col=array.diff.sd.freq.colors[p,1+l,4]) + } + } + + par(fig=c(0.875, 1, 0.14, 0.89), new=TRUE) + ColorBar2(brks = my.seq, cols = diff.sd.freq.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + dev.off() + + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_ratio_sd_freq_4tables.png"), width=750, height=600) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("St.Dev. ratio between sim. and obs. average frequencies", cex=1.5) + + # NAO+ : + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.85, 0.98), new=TRUE) + mtext("NAO+", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.diff.sd.freq.colors[p,1+l,1])}} + + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.42, 0.5), new=TRUE) + mtext("Blocking", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.diff.sd.freq.colors[p,1+l,4])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.85, 0.98), new=TRUE) + mtext("NAO-", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.diff.sd.freq.colors[p,1+l,3])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.42, 0.5), new=TRUE) + mtext("Atlantic Ridge", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.diff.sd.freq.colors[p,1+l,2])}} + + par(fig=c(0.91, 1, 0.1, 0.9), new=TRUE) + ColorBar2(brks = my.seq, cols = diff.sd.freq.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_ratio_sd_freq_1table.png"), width=800, height=300) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("St.Dev. ratio between sim. and obs. average frequencies", cex=1.2) + + par(mar=c(0,0,4,0), fig=c(0.07, 0.22, 0.8, 0.98), new=TRUE) + mtext("NAO+", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0, 0.22, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecast month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.diff.sd.freq.colors[i2,1+6-j,1])}} + + par(mar=c(0,0,4,0), fig=c(0.22, 0.44, 0.8, 0.98), new=TRUE) + mtext("NAO-", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.22, 0.44, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecasted month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.diff.sd.freq.colors[i2,1+6-j,3])}} + + par(mar=c(0,0,4,0), fig=c(0.44, 0.66, 0.8, 0.98), new=TRUE) + mtext("Blocking", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.44, 0.66, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecasted month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.diff.sd.freq.colors[i2,1+6-j,4])}} + + par(mar=c(0,0,4,0), fig=c(0.68, 0.88, 0.8, 0.98), new=TRUE) + mtext("Atlantic Ridge", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.66, 0.88, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecasted month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.diff.sd.freq.colors[i2,1+6-j,2])}} + + par(fig=c(0.91, 1, 0.1, 0.8), new=TRUE) + ColorBar2(brks = my.seq, cols = diff.sd.freq.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + + + + + + + + + + + # Transition probability from NAO+ to X summary: + png(filename=paste0(work.dir,"/",forecast.name,"_summary_trans_NAO+.png"), width=550, height=400) + + # create an array similar to array.cor but with colors instead of corr.values: + trans.cols <- rev(c('#b2182b','#d6604d','#f4a582','#fddbc7','#d1e5f0','#92c5de','#4393c3','#2166ac')) + my.seq <- c(0,10,20,30,40,50,60,70,100) + + array.trans.sim1.colors <- array(trans.cols[8],c(12,7,4)) + array.trans.sim1.colors[array.trans.sim1 < my.seq[8]] <- trans.cols[7] + array.trans.sim1.colors[array.trans.sim1 < my.seq[7]] <- trans.cols[6] + array.trans.sim1.colors[array.trans.sim1 < my.seq[6]] <- trans.cols[5] + array.trans.sim1.colors[array.trans.sim1 < my.seq[5]] <- trans.cols[4] + array.trans.sim1.colors[array.trans.sim1 < my.seq[4]] <- trans.cols[3] + array.trans.sim1.colors[array.trans.sim1 < my.seq[3]] <- trans.cols[2] + array.trans.sim1.colors[array.trans.sim1 < my.seq[2]] <- trans.cols[1] + array.trans.sim1.colors[is.na(array.trans.sim1)] <- "white" + + par(mar=c(5,4,4,4.5)) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Forecast time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + title("% Transition from NAO+ regime", cex=1.5) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + + for(p in 1:12){ + for(l in 0:6){ + polygon(c(0.5+p-1, 0.5+p-1, 1+p-1), c(-0.5+l, 0.5+l, 0+l), col=array.trans.sim1.colors[p,1+l,1]) # first triangle + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(-0.5+l, 0+l, -0.5+l), col=array.trans.sim1.colors[p,1+l,2]) + polygon(c(1+p-1, 1.5+p-1, 1.5+p-1), c(l, 0.5+l, -0.5+l), col=array.trans.sim1.colors[p,1+l,3]) + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(0.5+l, 0+l, 0.5+l), col=array.trans.sim1.colors[p,1+l,4]) + } + } + + par(fig=c(0.875, 1, 0.14, 0.89), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_trans_NAO+_4tables.png"), width=750, height=600) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("% Transition from NAO+ regime", cex=1.5) + + # NAO+ : + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.85, 0.98), new=TRUE) + mtext("NAO+", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.sim1.colors[p,1+l,1])}} + + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.42, 0.5), new=TRUE) + mtext("Blocking", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.sim1.colors[p,1+l,4])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.85, 0.98), new=TRUE) + mtext("NAO-", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.sim1.colors[p,1+l,3])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.42, 0.5), new=TRUE) + mtext("Atlantic Ridge", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.sim1.colors[p,1+l,2])}} + + par(fig=c(0.91, 1, 0.1, 0.9), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_trans_NAO+_1table.png"), width=800, height=300) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("% Transition from NAO+ regime", cex=1.2) + + par(mar=c(0,0,4,0), fig=c(0.07, 0.22, 0.8, 0.98), new=TRUE) + mtext("NAO+", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0, 0.22, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecast month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.sim1.colors[i2,1+6-j,1])}} + + par(mar=c(0,0,4,0), fig=c(0.22, 0.44, 0.8, 0.98), new=TRUE) + mtext("NAO-", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.22, 0.44, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecasted month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.sim1.colors[i2,1+6-j,3])}} + + par(mar=c(0,0,4,0), fig=c(0.44, 0.66, 0.8, 0.98), new=TRUE) + mtext("Blocking", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.44, 0.66, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecasted month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.sim1.colors[i2,1+6-j,4])}} + + par(mar=c(0,0,4,0), fig=c(0.68, 0.88, 0.8, 0.98), new=TRUE) + mtext("Atlantic Ridge", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.66, 0.88, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecasted month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.sim1.colors[i2,1+6-j,2])}} + + par(fig=c(0.91, 1, 0.1, 0.8), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_trans_NAO+_1table.png"), width=600, height=300) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("% Transition from NAO+ regime", cex=1.2) + + par(mar=c(0,0,4,0), fig=c(0.10, 0.28, 0.8, 0.98), new=TRUE) + mtext("NAO-", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0, 0.28, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecasted month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.sim1.colors[i2,1+6-j,3])}} + + par(mar=c(0,0,4,0), fig=c(0.28, 0.56, 0.8, 0.98), new=TRUE) + mtext("Blocking", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.28, 0.56, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecasted month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.sim1.colors[i2,1+6-j,4])}} + + par(mar=c(0,0,4,0), fig=c(0.56, 0.84, 0.8, 0.98), new=TRUE) + mtext("Atlantic Ridge", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.56, 0.84, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecasted month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.sim1.colors[i2,1+6-j,2])}} + + par(fig=c(0.88, 1, 0.1, 0.8), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + + + + + + + + # Transition probability from NAO- to X summary: + png(filename=paste0(work.dir,"/",forecast.name,"_summary_trans_NAO-.png"), width=550, height=400) + + # create an array similar to array.cor but with colors instead of corr.values: + trans.cols <- rev(c('#b2182b','#d6604d','#f4a582','#fddbc7','#d1e5f0','#92c5de','#4393c3','#2166ac')) + my.seq <- c(0,10,20,30,40,50,60,70,100) + + array.trans.sim2.colors <- array(trans.cols[8],c(12,7,4)) + array.trans.sim2.colors[array.trans.sim2 < my.seq[8]] <- trans.cols[7] + array.trans.sim2.colors[array.trans.sim2 < my.seq[7]] <- trans.cols[6] + array.trans.sim2.colors[array.trans.sim2 < my.seq[6]] <- trans.cols[5] + array.trans.sim2.colors[array.trans.sim2 < my.seq[5]] <- trans.cols[4] + array.trans.sim2.colors[array.trans.sim2 < my.seq[4]] <- trans.cols[3] + array.trans.sim2.colors[array.trans.sim2 < my.seq[3]] <- trans.cols[2] + array.trans.sim2.colors[array.trans.sim2 < my.seq[2]] <- trans.cols[1] + array.trans.sim2.colors[is.na(array.trans.sim2)] <- "white" + + par(mar=c(5,4,4,4.5)) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Forecast time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + title("% Transition from NAO- regime ", cex=1.5) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + + for(p in 1:12){ + for(l in 0:6){ + polygon(c(0.5+p-1, 0.5+p-1, 1+p-1), c(-0.5+l, 0.5+l, 0+l), col=array.trans.sim2.colors[p,1+l,1]) # first triangle + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(-0.5+l, 0+l, -0.5+l), col=array.trans.sim2.colors[p,1+l,2]) + polygon(c(1+p-1, 1.5+p-1, 1.5+p-1), c(l, 0.5+l, -0.5+l), col=array.trans.sim2.colors[p,1+l,3]) + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(0.5+l, 0+l, 0.5+l), col=array.trans.sim2.colors[p,1+l,4]) + } + } + + par(fig=c(0.875, 1, 0.14, 0.89), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_trans_NAO-_4tables.png"), width=750, height=600) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("% Transition from NAO- regime", cex=1.5) + + # NAO+ : + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.85, 0.98), new=TRUE) + mtext("NAO+", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.sim2.colors[p,1+l,1])}} + + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.42, 0.5), new=TRUE) + mtext("Blocking", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.sim2.colors[p,1+l,4])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.85, 0.98), new=TRUE) + mtext("NAO-", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.sim2.colors[p,1+l,3])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.42, 0.5), new=TRUE) + mtext("Atlantic Ridge", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.sim2.colors[p,1+l,2])}} + + par(fig=c(0.91, 1, 0.1, 0.9), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_trans_NAO-_1table.png"), width=600, height=300) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("% Transition from NAO- regime", cex=1.2) + + par(mar=c(0,0,4,0), fig=c(0.1, 0.28, 0.8, 0.98), new=TRUE) + mtext("NAO+", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0, 0.28, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecast month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.sim2.colors[i2,1+6-j,1])}} + + par(mar=c(0,0,4,0), fig=c(0.28, 0.56, 0.8, 0.98), new=TRUE) + mtext("Blocking", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.28, 0.56, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecasted month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.sim2.colors[i2,1+6-j,4])}} + + par(mar=c(0,0,4,0), fig=c(0.56, 0.84, 0.8, 0.98), new=TRUE) + mtext("Atlantic Ridge", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.56, 0.84, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecasted month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.sim2.colors[i2,1+6-j,2])}} + + par(fig=c(0.88, 1, 0.1, 0.8), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + + + + + + + + # Transition probability from blocking to X summary: + png(filename=paste0(work.dir,"/",forecast.name,"_summary_trans_blocking.png"), width=550, height=400) + + # create an array similar to array.cor but with colors instead of corr.values: + trans.cols <- rev(c('#b2182b','#d6604d','#f4a582','#fddbc7','#d1e5f0','#92c5de','#4393c3','#2166ac')) + my.seq <- c(0,10,20,30,40,50,60,70,100) + + array.trans.sim3.colors <- array(trans.cols[8],c(12,7,4)) + array.trans.sim3.colors[array.trans.sim3 < my.seq[8]] <- trans.cols[7] + array.trans.sim3.colors[array.trans.sim3 < my.seq[7]] <- trans.cols[6] + array.trans.sim3.colors[array.trans.sim3 < my.seq[6]] <- trans.cols[5] + array.trans.sim3.colors[array.trans.sim3 < my.seq[5]] <- trans.cols[4] + array.trans.sim3.colors[array.trans.sim3 < my.seq[4]] <- trans.cols[3] + array.trans.sim3.colors[array.trans.sim3 < my.seq[3]] <- trans.cols[2] + array.trans.sim3.colors[array.trans.sim3 < my.seq[2]] <- trans.cols[1] + array.trans.sim3.colors[is.na(array.trans.sim3)] <- "white" + + par(mar=c(5,4,4,4.5)) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Forecast time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + title("% Transition from blocking regime", cex=1.5) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + + for(p in 1:12){ + for(l in 0:6){ + polygon(c(0.5+p-1, 0.5+p-1, 1+p-1), c(-0.5+l, 0.5+l, 0+l), col=array.trans.sim3.colors[p,1+l,1]) # first triangle + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(-0.5+l, 0+l, -0.5+l), col=array.trans.sim3.colors[p,1+l,2]) + polygon(c(1+p-1, 1.5+p-1, 1.5+p-1), c(l, 0.5+l, -0.5+l), col=array.trans.sim3.colors[p,1+l,3]) + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(0.5+l, 0+l, 0.5+l), col=array.trans.sim3.colors[p,1+l,4]) + } + } + + par(fig=c(0.875, 1, 0.14, 0.89), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_trans_blocking_4tables.png"), width=750, height=600) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("% Transition from blocking regime", cex=1.5) + + # NAO+ : + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.85, 0.98), new=TRUE) + mtext("NAO+", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.sim3.colors[p,1+l,1])}} + + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.42, 0.5), new=TRUE) + mtext("Blocking", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.sim3.colors[p,1+l,4])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.85, 0.98), new=TRUE) + mtext("NAO-", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.sim3.colors[p,1+l,3])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.42, 0.5), new=TRUE) + mtext("Atlantic Ridge", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.sim3.colors[p,1+l,2])}} + + par(fig=c(0.91, 1, 0.1, 0.9), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_trans_blocking_1table.png"), width=600, height=300) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("% Transition from blocking regime", cex=1.2) + + par(mar=c(0,0,4,0), fig=c(0.10, 0.28, 0.8, 0.98), new=TRUE) + mtext("NAO+", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0, 0.28, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecast month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.sim3.colors[i2,1+6-j,1])}} + + par(mar=c(0,0,4,0), fig=c(0.28, 0.56, 0.8, 0.98), new=TRUE) + mtext("NAO-", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.28, 0.56, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecasted month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.sim3.colors[i2,1+6-j,3])}} + + par(mar=c(0,0,4,0), fig=c(0.56, 0.84, 0.8, 0.98), new=TRUE) + mtext("Atlantic Ridge", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.56, 0.84, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecasted month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.sim3.colors[i2,1+6-j,2])}} + + par(fig=c(0.88, 1, 0.1, 0.8), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + + + + + + + + + + + # Transition probability from Atlantic ridge to X summary: + png(filename=paste0(work.dir,"/",forecast.name,"_summary_trans_Atlantic_ridge.png"), width=550, height=400) + + # create an array similar to array.cor but with colors instead of corr.values: + trans.cols <- rev(c('#b2182b','#d6604d','#f4a582','#fddbc7','#d1e5f0','#92c5de','#4393c3','#2166ac')) + my.seq <- c(0,10,20,30,40,50,60,70,100) + + array.trans.sim4.colors <- array(trans.cols[8],c(12,7,4)) + array.trans.sim4.colors[array.trans.sim4 < my.seq[8]] <- trans.cols[7] + array.trans.sim4.colors[array.trans.sim4 < my.seq[7]] <- trans.cols[6] + array.trans.sim4.colors[array.trans.sim4 < my.seq[6]] <- trans.cols[5] + array.trans.sim4.colors[array.trans.sim4 < my.seq[5]] <- trans.cols[4] + array.trans.sim4.colors[array.trans.sim4 < my.seq[4]] <- trans.cols[3] + array.trans.sim4.colors[array.trans.sim4 < my.seq[3]] <- trans.cols[2] + array.trans.sim4.colors[array.trans.sim4 < my.seq[2]] <- trans.cols[1] + array.trans.sim4.colors[is.na(array.trans.sim4)] <- "white" + + par(mar=c(5,4,4,4.5)) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Forecast time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + title("% Transition from Atlantic ridge regime ", cex=1.5) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + + for(p in 1:12){ + for(l in 0:6){ + polygon(c(0.5+p-1, 0.5+p-1, 1+p-1), c(-0.5+l, 0.5+l, 0+l), col=array.trans.sim4.colors[p,1+l,1]) # first triangle + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(-0.5+l, 0+l, -0.5+l), col=array.trans.sim4.colors[p,1+l,2]) + polygon(c(1+p-1, 1.5+p-1, 1.5+p-1), c(l, 0.5+l, -0.5+l), col=array.trans.sim4.colors[p,1+l,3]) + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(0.5+l, 0+l, 0.5+l), col=array.trans.sim4.colors[p,1+l,4]) + } + } + + par(fig=c(0.875, 1, 0.14, 0.89), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_trans_Atlantic_ridge_4tables.png"), width=750, height=600) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("% Transition from Atlantic Ridge regime", cex=1.5) + + # NAO+ : + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.85, 0.98), new=TRUE) + mtext("NAO+", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.sim4.colors[p,1+l,1])}} + + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.42, 0.5), new=TRUE) + mtext("Blocking", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.sim4.colors[p,1+l,4])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.85, 0.98), new=TRUE) + mtext("NAO-", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.sim4.colors[p,1+l,3])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.42, 0.5), new=TRUE) + mtext("Atlantic Ridge", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.sim4.colors[p,1+l,2])}} + + par(fig=c(0.91, 1, 0.1, 0.9), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_trans_Atlantic_ridge_1table.png"), width=600, height=300) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("% Transition from Atlantic Ridge regime", cex=1.2) + + par(mar=c(0,0,4,0), fig=c(0.1, 0.28, 0.8, 0.98), new=TRUE) + mtext("NAO+", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0, 0.28, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecast month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.sim4.colors[i2,1+6-j,1])}} + + par(mar=c(0,0,4,0), fig=c(0.28, 0.56, 0.8, 0.98), new=TRUE) + mtext("NAO-", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.28, 0.56, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecasted month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.sim4.colors[i2,1+6-j,3])}} + + par(mar=c(0,0,4,0), fig=c(0.56, 0.84, 0.8, 0.98), new=TRUE) + mtext("Blocking", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.56, 0.84, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecasted month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.sim4.colors[i2,1+6-j,4])}} + + par(fig=c(0.88, 1, 0.1, 0.8), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + + + + + + + + # Bias of the Transition probability from NAO+ to X summary: + png(filename=paste0(work.dir,"/",forecast.name,"_summary_bias_trans_NAO+.png"), width=550, height=400) + + # create an array similar to array.cor but with colors instead of corr.values: + trans.cols <- rev(c('#b2182b','#d6604d','#f4a582','#fddbc7','#d1e5f0','#92c5de','#4393c3','#2166ac')) + my.seq <- c(-20,-10,-5,-2,0,2,5,10,20) + + array.trans.diff1.colors <- array(trans.cols[8],c(12,7,4)) + array.trans.diff1.colors[array.trans.diff1 < my.seq[8]] <- trans.cols[7] + array.trans.diff1.colors[array.trans.diff1 < my.seq[7]] <- trans.cols[6] + array.trans.diff1.colors[array.trans.diff1 < my.seq[6]] <- trans.cols[5] + array.trans.diff1.colors[array.trans.diff1 < my.seq[5]] <- trans.cols[4] + array.trans.diff1.colors[array.trans.diff1 < my.seq[4]] <- trans.cols[3] + array.trans.diff1.colors[array.trans.diff1 < my.seq[3]] <- trans.cols[2] + array.trans.diff1.colors[array.trans.diff1 < my.seq[2]] <- trans.cols[1] + array.trans.diff1.colors[is.na(array.trans.diff1)] <- "white" + + par(mar=c(5,4,4,4.5)) + plot(1,1, type="n", xaxt="n", yaxt="n", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + title("% Bias transition from NAO+ regime", cex=1.5) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + + for(p in 1:12){ + for(l in 0:6){ + polygon(c(0.5+p-1, 0.5+p-1, 1+p-1), c(-0.5+l, 0.5+l, 0+l), col=array.trans.diff1.colors[p,1+l,1]) # first triangle + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(-0.5+l, 0+l, -0.5+l), col=array.trans.diff1.colors[p,1+l,2]) + polygon(c(1+p-1, 1.5+p-1, 1.5+p-1), c(l, 0.5+l, -0.5+l), col=array.trans.diff1.colors[p,1+l,3]) + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(0.5+l, 0+l, 0.5+l), col=array.trans.diff1.colors[p,1+l,4]) + } + } + + par(fig=c(0.875, 1, 0.14, 0.89), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_bias_trans_NAO+_4tables.png"), width=750, height=600) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("% Bias transition from NAO+ regime", cex=1.5) + + # NAO+ : + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.85, 0.98), new=TRUE) + mtext("NAO+", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.diff1.colors[p,1+l,1])}} + + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.42, 0.5), new=TRUE) + mtext("Blocking", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.diff1.colors[p,1+l,4])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.85, 0.98), new=TRUE) + mtext("NAO-", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.diff1.colors[p,1+l,3])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.42, 0.5), new=TRUE) + mtext("Atlantic Ridge", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.diff1.colors[p,1+l,2])}} + + par(fig=c(0.91, 1, 0.1, 0.9), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_bias_trans_NAO+_1table.png"), width=800, height=300) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("% Bias transition from NAO+ regime", cex=1.2) + + par(mar=c(0,0,4,0), fig=c(0.07, 0.22, 0.8, 0.98), new=TRUE) + mtext("NAO+", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0, 0.22, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecast month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.diff1.colors[i2,1+6-j,1])}} + + par(mar=c(0,0,4,0), fig=c(0.22, 0.44, 0.8, 0.98), new=TRUE) + mtext("NAO-", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.22, 0.44, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecasted month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.diff1.colors[i2,1+6-j,3])}} + + par(mar=c(0,0,4,0), fig=c(0.44, 0.66, 0.8, 0.98), new=TRUE) + mtext("Blocking", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.44, 0.66, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecasted month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.diff1.colors[i2,1+6-j,4])}} + + par(mar=c(0,0,4,0), fig=c(0.68, 0.88, 0.8, 0.98), new=TRUE) + mtext("Atlantic Ridge", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.66, 0.88, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecasted month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.diff1.colors[i2,1+6-j,2])}} + + par(fig=c(0.91, 1, 0.1, 0.8), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + + + + + + + + + + + + + + + # Bias of the Transition probability from NAO- to X summary: + png(filename=paste0(work.dir,"/",forecast.name,"_summary_bias_trans_NAO-.png"), width=550, height=400) + + # create an array similar to array.cor but with colors instead of corr.values: + trans.cols <- rev(c('#b2182b','#d6604d','#f4a582','#fddbc7','#d1e5f0','#92c5de','#4393c3','#2166ac')) + my.seq <- c(-20,-10,-5,-2,0,2,5,10,20) + + array.trans.diff2.colors <- array(trans.cols[8],c(12,7,4)) + array.trans.diff2.colors[array.trans.diff2 < my.seq[8]] <- trans.cols[7] + array.trans.diff2.colors[array.trans.diff2 < my.seq[7]] <- trans.cols[6] + array.trans.diff2.colors[array.trans.diff2 < my.seq[6]] <- trans.cols[5] + array.trans.diff2.colors[array.trans.diff2 < my.seq[5]] <- trans.cols[4] + array.trans.diff2.colors[array.trans.diff2 < my.seq[4]] <- trans.cols[3] + array.trans.diff2.colors[array.trans.diff2 < my.seq[3]] <- trans.cols[2] + array.trans.diff2.colors[array.trans.diff2 < my.seq[2]] <- trans.cols[1] + array.trans.diff2.colors[is.na(array.trans.diff2)] <- "white" + + par(mar=c(5,4,4,4.5)) + plot(1,1, type="n", xaxt="n", yaxt="n", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + title("% Bias transition from NAO- regime", cex=1.5) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + + for(p in 1:12){ + for(l in 0:6){ + polygon(c(0.5+p-1, 0.5+p-1, 1+p-1), c(-0.5+l, 0.5+l, 0+l), col=array.trans.diff2.colors[p,1+l,1]) # first triangle + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(-0.5+l, 0+l, -0.5+l), col=array.trans.diff2.colors[p,1+l,2]) + polygon(c(1+p-1, 1.5+p-1, 1.5+p-1), c(l, 0.5+l, -0.5+l), col=array.trans.diff2.colors[p,1+l,3]) + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(0.5+l, 0+l, 0.5+l), col=array.trans.diff2.colors[p,1+l,4]) + } + } + + par(fig=c(0.875, 1, 0.14, 0.89), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_bias_trans_NAO-_4tables.png"), width=750, height=600) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("% Bias transition from NAO- regime", cex=1.5) + + # NAO+ : + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.85, 0.98), new=TRUE) + mtext("NAO+", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.diff2.colors[p,1+l,1])}} + + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.42, 0.5), new=TRUE) + mtext("Blocking", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.diff2.colors[p,1+l,4])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.85, 0.98), new=TRUE) + mtext("NAO-", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.diff2.colors[p,1+l,3])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.42, 0.5), new=TRUE) + mtext("Atlantic Ridge", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.diff2.colors[p,1+l,2])}} + + par(fig=c(0.91, 1, 0.1, 0.9), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_bias_trans_NAO-_1table.png"), width=800, height=300) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("% Bias transition from NAO- regime", cex=1.2) + + par(mar=c(0,0,4,0), fig=c(0.07, 0.22, 0.8, 0.98), new=TRUE) + mtext("NAO+", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0, 0.22, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecast month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.diff2.colors[i2,1+6-j,1])}} + + par(mar=c(0,0,4,0), fig=c(0.22, 0.44, 0.8, 0.98), new=TRUE) + mtext("NAO-", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.22, 0.44, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecasted month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.diff2.colors[i2,1+6-j,3])}} + + par(mar=c(0,0,4,0), fig=c(0.44, 0.66, 0.8, 0.98), new=TRUE) + mtext("Blocking", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.44, 0.66, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecasted month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.diff2.colors[i2,1+6-j,4])}} + + par(mar=c(0,0,4,0), fig=c(0.68, 0.88, 0.8, 0.98), new=TRUE) + mtext("Atlantic Ridge", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.66, 0.88, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecasted month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.diff2.colors[i2,1+6-j,2])}} + + par(fig=c(0.91, 1, 0.1, 0.8), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + + + + + + + + + + + # Bias of the Transition probability from blocking to X summary: + png(filename=paste0(work.dir,"/",forecast.name,"_summary_bias_trans_blocking.png"), width=550, height=400) + + # create an array similar to array.cor but with colors instead of corr.values: + trans.cols <- rev(c('#b2182b','#d6604d','#f4a582','#fddbc7','#d1e5f0','#92c5de','#4393c3','#2166ac')) + my.seq <- c(-20,-10,-5,-2,0,2,5,10,20) + + array.trans.diff3.colors <- array(trans.cols[8],c(12,7,4)) + array.trans.diff3.colors[array.trans.diff3 < my.seq[8]] <- trans.cols[7] + array.trans.diff3.colors[array.trans.diff3 < my.seq[7]] <- trans.cols[6] + array.trans.diff3.colors[array.trans.diff3 < my.seq[6]] <- trans.cols[5] + array.trans.diff3.colors[array.trans.diff3 < my.seq[5]] <- trans.cols[4] + array.trans.diff3.colors[array.trans.diff3 < my.seq[4]] <- trans.cols[3] + array.trans.diff3.colors[array.trans.diff3 < my.seq[3]] <- trans.cols[2] + array.trans.diff3.colors[array.trans.diff3 < my.seq[2]] <- trans.cols[1] + array.trans.diff3.colors[is.na(array.trans.diff3)] <- "white" + + par(mar=c(5,4,4,4.5)) + plot(1,1, type="n", xaxt="n", yaxt="n", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + title("% Bias transition from blocking regime", cex=1.5) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + + for(p in 1:12){ + for(l in 0:6){ + polygon(c(0.5+p-1, 0.5+p-1, 1+p-1), c(-0.5+l, 0.5+l, 0+l), col=array.trans.diff3.colors[p,1+l,1]) # first triangle + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(-0.5+l, 0+l, -0.5+l), col=array.trans.diff3.colors[p,1+l,2]) + polygon(c(1+p-1, 1.5+p-1, 1.5+p-1), c(l, 0.5+l, -0.5+l), col=array.trans.diff3.colors[p,1+l,3]) + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(0.5+l, 0+l, 0.5+l), col=array.trans.diff3.colors[p,1+l,4]) + } + } + + par(fig=c(0.875, 1, 0.14, 0.89), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_bias_trans_blocking_4tables.png"), width=750, height=600) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("% Bias transition from blocking regime", cex=1.5) + + # NAO+ : + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.85, 0.98), new=TRUE) + mtext("NAO+", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.diff3.colors[p,1+l,1])}} + + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.42, 0.5), new=TRUE) + mtext("Blocking", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.diff3.colors[p,1+l,4])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.85, 0.98), new=TRUE) + mtext("NAO-", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.diff3.colors[p,1+l,3])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.42, 0.5), new=TRUE) + mtext("Atlantic Ridge", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.diff3.colors[p,1+l,2])}} + + par(fig=c(0.91, 1, 0.1, 0.9), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_bias_trans_blocking_1table.png"), width=800, height=300) + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("% Bias transition from Blocking regime", cex=1.2) + + par(mar=c(0,0,4,0), fig=c(0.07, 0.22, 0.8, 0.98), new=TRUE) + mtext("NAO+", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0, 0.22, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecast month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.diff3.colors[i2,1+6-j,1])}} + + par(mar=c(0,0,4,0), fig=c(0.22, 0.44, 0.8, 0.98), new=TRUE) + mtext("NAO-", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.22, 0.44, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecasted month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.diff3.colors[i2,1+6-j,3])}} + + par(mar=c(0,0,4,0), fig=c(0.44, 0.66, 0.8, 0.98), new=TRUE) + mtext("Blocking", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.44, 0.66, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecasted month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.diff3.colors[i2,1+6-j,4])}} + + par(mar=c(0,0,4,0), fig=c(0.68, 0.88, 0.8, 0.98), new=TRUE) + mtext("Atlantic Ridge", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.66, 0.88, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecasted month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.diff3.colors[i2,1+6-j,2])}} + + par(fig=c(0.91, 1, 0.1, 0.8), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + + + + + + + + + + # Bias of the Transition probability from Atlantic Ridge to X summary: + png(filename=paste0(work.dir,"/",forecast.name,"_summary_bias_trans_Atlantic_ridge.png"), width=550, height=400) + + # create an array similar to array.cor but with colors instead of corr.values: + trans.cols <- rev(c('#b2182b','#d6604d','#f4a582','#fddbc7','#d1e5f0','#92c5de','#4393c3','#2166ac')) + my.seq <- c(-20,-10,-5,-2,0,2,5,10,20) + + array.trans.diff4.colors <- array(trans.cols[8],c(12,7,4)) + array.trans.diff4.colors[array.trans.diff4 < my.seq[8]] <- trans.cols[7] + array.trans.diff4.colors[array.trans.diff4 < my.seq[7]] <- trans.cols[6] + array.trans.diff4.colors[array.trans.diff4 < my.seq[6]] <- trans.cols[5] + array.trans.diff4.colors[array.trans.diff4 < my.seq[5]] <- trans.cols[4] + array.trans.diff4.colors[array.trans.diff4 < my.seq[4]] <- trans.cols[3] + array.trans.diff4.colors[array.trans.diff4 < my.seq[3]] <- trans.cols[2] + array.trans.diff4.colors[array.trans.diff4 < my.seq[2]] <- trans.cols[1] + array.trans.diff4.colors[is.na(array.trans.diff4)] <- "white" + + par(mar=c(5,4,4,4.5)) + plot(1,1, type="n", xaxt="n", yaxt="n", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + title("% Bias transition from Atlantic Ridge regime", cex=1.5) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + + for(p in 1:12){ + for(l in 0:6){ + polygon(c(0.5+p-1, 0.5+p-1, 1+p-1), c(-0.5+l, 0.5+l, 0+l), col=array.trans.diff4.colors[p,1+l,1]) # first triangle + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(-0.5+l, 0+l, -0.5+l), col=array.trans.diff4.colors[p,1+l,2]) + polygon(c(1+p-1, 1.5+p-1, 1.5+p-1), c(l, 0.5+l, -0.5+l), col=array.trans.diff4.colors[p,1+l,3]) + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(0.5+l, 0+l, 0.5+l), col=array.trans.diff4.colors[p,1+l,4]) + } + } + + par(fig=c(0.875, 1, 0.14, 0.89), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_bias_trans_Atlantic_ridge_4tables.png"), width=750, height=600) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("% Bias transition from Atlantic_Ridge_regime", cex=1.5) + + # NAO+ : + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.85, 0.98), new=TRUE) + mtext("NAO+", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.diff4.colors[p,1+l,1])}} + + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.42, 0.5), new=TRUE) + mtext("Blocking", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.diff4.colors[p,1+l,4])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.85, 0.98), new=TRUE) + mtext("NAO-", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.diff4.colors[p,1+l,3])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.42, 0.5), new=TRUE) + mtext("Atlantic Ridge", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.diff4.colors[p,1+l,2])}} + + par(fig=c(0.91, 1, 0.1, 0.9), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_bias_trans_Atlantic_ridge_1table.png"), width=800, height=300) + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("% Bias transition from Atlantic Ridge regime", cex=1.2) + + par(mar=c(0,0,4,0), fig=c(0.07, 0.22, 0.8, 0.98), new=TRUE) + mtext("NAO+", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0, 0.22, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecast month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.diff4.colors[i2,1+6-j,1])}} + + par(mar=c(0,0,4,0), fig=c(0.22, 0.44, 0.8, 0.98), new=TRUE) + mtext("NAO-", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.22, 0.44, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecasted month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.diff4.colors[i2,1+6-j,3])}} + + par(mar=c(0,0,4,0), fig=c(0.44, 0.66, 0.8, 0.98), new=TRUE) + mtext("Blocking", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.44, 0.66, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecasted month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.diff4.colors[i2,1+6-j,4])}} + + par(mar=c(0,0,4,0), fig=c(0.68, 0.88, 0.8, 0.98), new=TRUE) + mtext("Atlantic Ridge", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.66, 0.88, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecasted month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.diff4.colors[i2,1+6-j,2])}} + + par(fig=c(0.91, 1, 0.1, 0.8), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + +diagnostic.WR <- function(text=NULL, position=c(0,1,0,1), color.array){ + +} + + ## # FairRPSS summary: + ## png(filename=paste0(work.dir,"/",forecast.name,"_summary_rpss.png"), width=550, height=400) + + ## # create an array similar to array.diff.freq but with colors instead of frequencies: + ## freq.cols <- rev(c('#b2182b','#d6604d','#f4a582','#fddbc7','#d1e5f0','#92c5de','#4393c3','#2166ac')) + + ## array.freq.colors <- array(freq.cols[8],c(12,7,4)) + ## array.freq.colors[array.diff.freq < 10] <- freq.cols[7] + ## array.freq.colors[array.diff.freq < 5] <- freq.cols[6] + ## array.freq.colors[array.diff.freq < 2] <- freq.cols[5] + ## array.freq.colors[array.diff.freq < 0] <- freq.cols[4] + ## array.freq.colors[array.diff.freq < -2] <- freq.cols[3] + ## array.freq.colors[array.diff.freq < -5] <- freq.cols[2] + ## array.freq.colors[array.diff.freq < -10] <- freq.cols[1] + + ## par(mar=c(5,4,4,4.5)) + ## plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Forecast time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + ## title("Frequency difference (%) between S4 and ERA-Interim", cex=1.5) + ## mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + ## mtext(side = 2, text = "Forecast time", line = 2.5, cex=1.5) + ## axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + ## axis(2, at=seq(0,6), las=2, cex.axis=1.5) + + ## for(p in 1:12){ + ## for(l in 0:6){ + ## polygon(c(0.5+p-1,0.5+p-1,1+p-1),c(-0.5+l,0.5+l,0+l), col=array.freq.colors[p,1+l,1]) # first triangle + ## polygon(c(0.5+p-1,1+p-1,1.5+p-1),c(-0.5+l,0+l,-0.5+l), col=array.freq.colors[p,1+l,2]) + ## polygon(c(1+p-1,1.5+p-1,1.5+p-1),c(l,0.5+l,-0.5+l), col=array.freq.colors[p,1+l,3]) + ## polygon(c(0.5+p-1,1+p-1,1.5+p-1),c(0.5+l,0+l,0.5+l), col=array.freq.colors[p,1+l,4]) + ## } + ## } + + ## par(fig=c(0.875, 1, 0.14, 0.89), new=TRUE) + ## ColorBar2(brks = c(-20,-10,-5,-2,0,2,5,10,20), cols = freq.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(c(-20,-10,-5,-2,0,2,5,10,20)), my.labels=c(-20,-10,-5,-2,0,2,5,10,20)) + ## dev.off() + +} # close if on composition == "summary" + + + + +# impact map of the stronger WR: +if(composition == "impact" && fields.name == rean.name){ + + var.num <- 2 # choose a variable (1:sfcWind, 2:tas) + + png(filename=paste0(rean.dir,"/",rean.name,"_",var.name[var.num],"_most_influent_negative.png"), width=550, height=650) + + plot.new() + #par(mfrow=c(4,3)) + #col.regimes <- c('#7b3294','#c2a5cf','#a6dba0','#008837') + col.regimes <- c('#e66101','#fdb863','#b2abd2','#5e3c99') + + for(p in 13:16){ # or 1:12 for monthly maps + load(file=paste0(rean.dir,"/",rean.name,"_",my.period[p],"_",var.name[var.num],"_psl.RData")) + load(paste0(rean.dir,"/",rean.name,"_ClusterNames.RData")) # they should not depend on var.name!!! + + # position of long values of Europe only (without the Atlantic Sea and America): + #if(fields.name == "ERA-Interim") EU <- c(1:which(lon >= lon.max)[1], (length(lon)-30):length(lon)) # restrict area to continental europe only + lon.max = 45 + EU <- c(1:which(lon >= lon.max)[1], (which(lon >= 337)[1]:length(lon))) # restrict area to continental europe only + + cluster1 <- which(orden == cluster1.name.period[p]) # in future it will be == cluster1.name + cluster2 <- which(orden == cluster2.name.period[p]) + cluster3 <- which(orden == cluster3.name.period[p]) + cluster4 <- which(orden == cluster4.name.period[p]) + + assign(paste0("imp",cluster1), varPeriodAnom1mean) + assign(paste0("imp",cluster2), varPeriodAnom2mean) + assign(paste0("imp",cluster3), varPeriodAnom3mean) + assign(paste0("imp",cluster4), varPeriodAnom4mean) + + ## imp.all <- imp1 + ## for(i in 1:dim(imp1)[1]){ + ## for(j in 1:dim(imp1)[2]){ + ## if(abs(imp1[i,j]) >= max(abs(imp1[i,j]), abs(imp2[i,j]), abs(imp3[i,j]), abs(imp4[i,j]))) imp.all[i,j] <- 1.5 + ## if(abs(imp2[i,j]) >= max(abs(imp1[i,j]), abs(imp2[i,j]), abs(imp3[i,j]), abs(imp4[i,j]))) imp.all[i,j] <- 2.5 + ## if(abs(imp3[i,j]) >= max(abs(imp1[i,j]), abs(imp2[i,j]), abs(imp3[i,j]), abs(imp4[i,j]))) imp.all[i,j] <- 3.5 + ## if(abs(imp4[i,j]) >= max(abs(imp1[i,j]), abs(imp2[i,j]), abs(imp3[i,j]), abs(imp4[i,j]))) imp.all[i,j] <- 4.5 + ## } + ## } + + ## #most influent WT with positive impact: + ## imp.all <- imp1 + ## for(i in 1:dim(imp1)[1]){ + ## for(j in 1:dim(imp1)[2]){ + ## if((imp1[i,j]) >= max((imp1[i,j]), (imp2[i,j]), (imp3[i,j]), (imp4[i,j]))) imp.all[i,j] <- 1.5 + ## if((imp2[i,j]) >= max((imp1[i,j]), (imp2[i,j]), (imp3[i,j]), (imp4[i,j]))) imp.all[i,j] <- 2.5 + ## if((imp3[i,j]) >= max((imp1[i,j]), (imp2[i,j]), (imp3[i,j]), (imp4[i,j]))) imp.all[i,j] <- 3.5 + ## if((imp4[i,j]) >= max((imp1[i,j]), (imp2[i,j]), (imp3[i,j]), (imp4[i,j]))) imp.all[i,j] <- 4.5 + ## } + ## } + + # most influent WT with negative impact: + imp.all <- imp1 + for(i in 1:dim(imp1)[1]){ + for(j in 1:dim(imp1)[2]){ + if((imp1[i,j]) <= min((imp1[i,j]), (imp2[i,j]), (imp3[i,j]), (imp4[i,j]))) imp.all[i,j] <- 1.5 + if((imp2[i,j]) <= min((imp1[i,j]), (imp2[i,j]), (imp3[i,j]), (imp4[i,j]))) imp.all[i,j] <- 2.5 + if((imp3[i,j]) <= min((imp1[i,j]), (imp2[i,j]), (imp3[i,j]), (imp4[i,j]))) imp.all[i,j] <- 3.5 + if((imp4[i,j]) <= min((imp1[i,j]), (imp2[i,j]), (imp3[i,j]), (imp4[i,j]))) imp.all[i,j] <- 4.5 + } + } + + if(p<=3) yMod <- 0.7 + if(p>=4 && p<=6) yMod <- 0.5 + if(p>=7 && p<=9) yMod <- 0.3 + if(p>=10) yMod <- 0.1 + if(p==13 || p==14) yMod <- 0.52 + if(p==15 || p==16) yMod <- 0.1 + + if(p==1 || p==4 || p==7 || p==10) xMod <- 0.05 + if(p==2 || p==5 || p==8 || p==11) xMod <- 0.35 + if(p==3 || p==6 || p==9 || p==12) xMod <- 0.65 + if(p==13 || p==15) xMod <- 0.05 + if(p==14 || p==16) xMod <- 0.50 + + # impact map: + if(p <= 12) par(fig=c(xMod, xMod + 0.3, yMod, yMod + 0.17), new=TRUE) + if(p > 12) par(fig=c(xMod, xMod + 0.45, yMod, yMod + 0.38), new=TRUE) + PlotEquiMap(imp.all[,EU], lon[EU], lat, filled.continents = FALSE, brks=1:5, cols=col.regimes, axelab=F, drawleg=F) + + # title map: + if(p <= 12) par(fig=c(xMod, xMod + 0.3, yMod + 0.162, yMod + 0.172), new=TRUE) + if(p > 12) par(fig=c(xMod + 0.07, xMod + 0.07 + 0.3, yMod +0.21 + 0.166, yMod +0.21 + 0.176), new=TRUE) + mtext(my.period[p], font=2, cex=1.5) + + } # close 'p' on 'period' + + par(fig=c(0.1,0.9, 0.03, 0.11), new=TRUE) + ColorBar2(1:5, cols=col.regimes, vert=FALSE, my.ticks=1:4, draw.ticks=FALSE, my.labels=orden) + + par(fig=c(0.1,0.9, 0.92, 0.97), new=TRUE) + mtext(paste0("Most influent WR on ",var.name[var.num]), font=2, cex=1.5) + + dev.off() + +} # close if on composition == "impact" + + + + + + + + + + + + + + + + + + diff --git a/old/weather_regimes_maps_v19.R~ b/old/weather_regimes_maps_v19.R~ new file mode 100644 index 0000000000000000000000000000000000000000..6b5608a6bf99029d6f40efe38a51e19427a86f99 --- /dev/null +++ b/old/weather_regimes_maps_v19.R~ @@ -0,0 +1,3909 @@ + +# Creation: 6/2016 +# Author: Nicola Cortesi +# +# Aim: to visualize the regime anomalies of the weather regimes, their interannual frequencies and their impact on a chosen cliamte variable (i.e: wind speed or temperature) +# +# I/O: input file needed are: +# 1) the output of the weather_regimes.R script, which ends with the suffix "_psl.RData". +# 2) if you need the impact map too, the outputs of the weather_regimes_impact.R script, which end wuth the suffix "_sfcWind.RData" and "_tas.RData" +# +# Output files are the maps, witch the user can generate as .png or as .pdf +# Due to the script's length, it is better to run it from the terminal with: Rscript ~/Downloads/scripts/weather_regimes_maps_v19.R (for example) +# This script doesn't need to be parallelized because it takes only a few seconds to create and save the output maps. +# +# Assumptions: If your regimes derive from a reanalysis, this script must be run twice: once, to create a .pdf file (see below) +# that the user must open to find visually the order by which the four regimes are stored inside the four clusters. For example, he can find that the +# sequence of the images in the file (from top to down) corresponds to: Blocking / NAO- / Atl.Ridge / NAO+ regime. Subsequently, he has to change 'ordering' +# from F to T (see below) and set the four variables 'clusterX.name' (X = 1...4) to follow the same order found inside that file. +# The second time, you have to run this script only to get the maps in the right order, which by default is, from top to down: +# "NAO+","NAO-","Blocking" and "Atl.Ridge" (you can change it with the variable 'orden'). You also have to set 'save.names' to TRUE, so an .RData file +# is written to store the order of the four regimes, in case you need to visualize these maps again in future or create new ones based on the saved data. +# +# If your regimes derive from a forecast system, you have to compute before the regimes derived from a reanalysis. Then, you can associate the reanalysis +# to the forecast system, to employ it to automatically aussociate to each simulated cluster one of the +# observed regimes, the one with the highest spatial correlation. Beware that the two maps of regime anomalies must have the same spatial resolution! +# +# Branch: weather_regimes + +rm(list=ls()) +library(s2dverification) # for the function Load() +library(Kendall) # for the MannKendall test +source('/home/Earth/ncortesi/Downloads/scripts/Rfunctions.R') # for the calendar functions + +### set the directory where the weather regimes computed with a reanalysis are stored (xxxxx_psl.RData files): + +#rean.dir <- "/scratch/Earth/ncortesi/RESILIENCE/Regimes/18) as 12) but with no lat correction" +#rean.dir <- "/scratch/Earth/ncortesi/RESILIENCE/Regimes/18_as_12_but_with_no_lat_correction" +rean.dir <- "/scratch/Earth/ncortesi/RESILIENCE/Regimes/41_ERA-Interim_monthly_1981-2015_LOESS_filter" +#rean.dir <- "/scratch/Earth/ncortesi/RESILIENCE/Regimes/43_as_41_but_with_running_cluster_and_lat_correction" +#rean.dir <- "/scratch/Earth/ncortesi/RESILIENCE/Regimes/44_as_43_but_with_no_lat_correction" +#rean.dir <- "/scratch/Earth/ncortesi/RESILIENCE/Regimes/45_as_43_but_with_Z500" + +rean.name <- "ERA-Interim" # reanalysis name (if input data comes from a reanalysis) +forecast.name <- "ECMWF-S4" # forecast system name (if input data comes from a forecast system) + +# Choose between loading reanalysis ('rean.name') or forecasts ('forecast.name'): +fields.name <- rean.name #forecast.name + +# Choose one or more periods to plot from 1 to 17 (1-12: Jan - Dec, 13-16: Winter, Spring, Summer, Autumn, 17: Year). +period <- 11 #1:12 # You need to have created before the '_psl.RData' file with the output of 'weather_regimes_vXX'.R for that period. + +# run it twice: once, with: 'ordering <- FALSE', 'save.names <- TRUE', 'as.pdf <- TRUE' and 'composition <- "simple" ' +# to look at the cartography and find visually the right regime names of the four clusters; then, insert the regime names in the correct order below and run this script +# a second time one month/season at time with: 'ordering = TRUE', 'save.names = TRUE', 'as.pdf = FALSE' and 'composition = "simple" ' +# to save the ordered cartography (then, you can check the correct regime sequence in the .png maps). After that, any time you run this script, remember to set: +# 'ordering = TRUE , 'save.names = FALSE' (and any type of composition you want to plot) not to overwrite the files which stores the right cluster order. + +ordering <- T # If TRUE, order the clusters following the regimes listed in the 'orden' vector (set it to TRUE only after you manually inserted the names below). +save.names <- F # if TRUE, also save the cluster names (i.e: the association between clusters and weather regimes); if FALSE, the cluster names are loaded from a file +as.pdf <- F # FALSE: save results as one .png file for each season/month, TRUE: save only one .pdf file with all seasons/months + +composition <- "taylor" # choose which kind of composition you want to plot: + # - 'simple' for monthly graphs of regime anomalies / impact / interannual frequencies in three columns + # - 'psl' for all the regime anomalies for a fixed forecast month + # - 'fre' for all the interannual frequencies for a fixed forecast month + # - 'summary' for the summary plots of all forecast months (persistence bias, freq.bias, correlations, etc.) [You need all ClusterNames files] + # - 'impact' for all the impact plot of the regime with the highest impact + # - 'single.impact' to save the individual impact maps + # - 'single.psl' to save the individual psl map + # - 'single.fre' to save the individual fre map + # - 'none' doesn't plot anything, it only saves the ClusterName.Rdata files + # - 'taylor' plot the taylor's diagram between the regime anomalies of a monthly reanalaysis and a seasonal one + +var.num <- 1 # if fields.name ='rean.name' and composition ='simple', Choose a variable for the impact maps 1: sfcWind 2: tas + +mean.freq <- FALSE # if TRUE, when composition <- "simple", it also add the mean frequency value over each frequency plots + +# Associates the regimes to the four cluster: +cluster3.name <- "NAO+" +cluster2.name <- "NAO-" +cluster1.name <- "Blocking" +cluster4.name <- "Atl.Ridge" + +# choose an order to give to the cartograpy, from top to bottom: +orden <- c("NAO+","NAO-","Blocking","Atl.Ridge") + + +####### if fields.name='forecast.name', you have to specify these additional variables: + +# working dir with the input files with '_psl.RData' suffix: +work.dir <- "/scratch/Earth/ncortesi/RESILIENCE/Regimes/42_ECMWF_S4_monthly_1981-2015_LOESS_filter" + +lead.months <- 0:6 # in case you selected 'fields.name = forecast.name', specify a leadtime (0 = same month of the start month) to plot + # (and variable 'period' becomes the start month) +forecast.month <- 1 # in case composition = 'psl' or 'fre' or 'impact', choose a target month to forecast, from 1 to 12, to plot its composition + +################################################################################################################################################################### + +if(fields.name != rean.name && fields.name != forecast.name) stop("variable 'fields.name' is not properly set") +regime1.name <- orden[1] +regime2.name <- orden[2] +regime3.name <- orden[3] +regime4.name <- orden[4] + +var.name <- c("sfcWind","tas") +var.name.full <- c("Wind Speed","Temperature") # full name of the 'predictand' variable to put in the title of the graphs +var.unit <- c("m/s", "ºC") # unit of measure of var (for drawing color scales) +var.num.orig <- var.num # loading _psl files, this value can change! +fields.name.orig <- fields.name +period.orig <- period +work.dir.orig <- work.dir +pdf.suffix <- ifelse(length(period)==1, my.period[period], "all_periods") +n.map <- 0 + +if(composition != "summary" && composition != "impact" && composition != "taylor"){ + + if(composition == "psl" || composition == "fre") { + if(as.pdf && fields.name == forecast.name) pdf(file=paste0(work.dir,"/",fields.name,"_",pdf.suffix,"_forecast_month_",forecast.month,"_",composition,".pdf"),width=110,height=60) + if(!as.pdf && fields.name == forecast.name) png(filename=paste0(work.dir,"/",fields.name,"_forecast_month_",forecast.month,"_",composition,".png"),width=6000,height=2300) + + lead.months <- c(6:0,0) + plot.new() + + # Sheet title: + par(fig=c(0, 1, 0.94, 0.98), new=TRUE) + if(composition == "psl") mtext(paste0(fields.name, " simulated Weather Regimes for ", month.name[forecast.month]), cex=5.5, font=2) + if(composition == "fre") mtext(paste0(fields.name, " simulated interannual frequencies for ", month.name[forecast.month]), cex=5.5, font=2) + } + + if(fields.name == rean.name) lead.months <- 1 # just to be able to enter the loop below once! + + for(lead.month in lead.months){ + #lead.month=lead.months[1] # for the debug + + if(composition == "simple"){ + if(as.pdf && fields.name == rean.name) pdf(file=paste0(rean.dir,"/",fields.name,"_",var.name[var.num],"_",pdf.suffix,".pdf"),width=40, height=60) + if(as.pdf && fields.name == forecast.name) pdf(file=paste0(work.dir,"/",fields.name,"_",var.name[var.num],"_",pdf.suffix,"_leadmonth_",lead.month,".pdf"),width=40,height=60) + } + + if(composition == 'psl' || composition == 'fre') { + period <- forecast.month - lead.month + if(period < 1) period <- period + 12 + } + + for(p in period){ + #p <- period[1] # for the debug + p.orig <- p + + # load regime data (for forecasts, it load only the data corresponding to the chosen start month p and lead month 'lead.month':: + if(fields.name == rean.name || (fields.name == forecast.name && lead.month == 0 || n.map == 7)) { + load(file=paste0(rean.dir,"/",rean.name,"_",my.period[p],"_","psl",".RData")) + if(var.num != var.num.orig) var.num <- var.num.orig # ripristinate original values of this script (necessary after loading regime data) + if(fields.name != fields.name.orig) fields.name <- fields.name.orig # ripristinate original values of this script + if(work.dir != work.dir.orig) work.dir <- work.dir.orig # ripristinate original values of this script + + # load impact data only if it is available, if not it will leave an empty space in the composition: + if(file.exists(paste0(rean.dir,"/",rean.name,"_",my.period[p],"_",var.name[var.num],".RData"))){ + load(file=paste0(rean.dir,"/",rean.name,"_",my.period[p],"_",var.name[var.num],".RData")) + print("Impact data available") + impact.data <- TRUE + } else { + print("Impact data not available; 'simple' compositions will have an empty space in the middle column") + impact.data <- FALSE + } + } + + if(fields.name == forecast.name && n.map < 7){ + load(file=paste0(work.dir,"/",fields.name,"_",my.period[p],"_leadmonth_",lead.month,"_","psl",".RData")) # load cluster time series and mean psl data + if(impact.data == TRUE && (composition == simple || composition == "impact")) load(file=paste0(work.dir,"/",fields.name,"_",my.period[p],"_leadmonth_",lead.month,"_",var.name[var.num],".RData")) # load mean var data # for impact maps + + if(var.num != var.num.orig) var.num <- var.num.orig # ripristinate original values of this script (necessary after loading regime data) + if(fields.name != fields.name.orig) fields.name <- fields.name.orig # ripristinate original values of this script + + } + + if(var.num != var.num.orig) var.num <- var.num.orig # ripristinate original values of this script (necessary after loading regime data) + if(fields.name != fields.name.orig) fields.name <- fields.name.orig # ripristinate original values of this script + if(work.dir != work.dir.orig) work.dir <- work.dir.orig # ripristinate original values of this script + if(p != p.orig) p <- p.orig + + if(fields.name == forecast.name && n.map < 7){ + + # compute the simulated interannual monthly variability: + n.days.month <- dim(my.cluster.array[[p]])[1] # number of days in the forecasted month + + freq.sim1 <- apply(my.cluster.array[[p]] == 1, c(2,3), sum) + freq.sim2 <- apply(my.cluster.array[[p]] == 2, c(2,3), sum) + freq.sim3 <- apply(my.cluster.array[[p]] == 3, c(2,3), sum) + freq.sim4 <- apply(my.cluster.array[[p]] == 4, c(2,3), sum) + + sd.freq.sim1 <- sd(freq.sim1) / n.days.month + sd.freq.sim2 <- sd(freq.sim2) / n.days.month + sd.freq.sim3 <- sd(freq.sim3) / n.days.month + sd.freq.sim4 <- sd(freq.sim4) / n.days.month + + # compute the transition probabilities: + j1 <- j2 <- j3 <- j4 <- 1; transition1 <- transition2 <- transition3 <- transition4 <- c() + + n.members <- dim(my.cluster.array[[p]])[3] + + for(m in 1:n.members){ + for(y in 1:n.years){ + for (d in 1:(n.days.month-1)){ + + if (my.cluster.array[[p]][d,y,m] == 1 && (my.cluster.array[[p]][d + 1,y,m] != my.cluster.array[[p]][d,y,m])){ + transition1[j1] <- my.cluster.array[[p]][d + 1,y,m] + j1 <- j1 + 1 + } + if (my.cluster.array[[p]][d,y,m] == 2 && (my.cluster.array[[p]][d + 1,y,m] != my.cluster.array[[p]][d,y,m])){ + transition2[j2] <- my.cluster.array[[p]][d + 1,y,m] + j2 <- j2 + 1 + } + if (my.cluster.array[[p]][d,y,m] == 3 && (my.cluster.array[[p]][d + 1,y,m] != my.cluster.array[[p]][d,y,m])){ + transition3[j3] <- my.cluster.array[[p]][d + 1,y,m] + j3 <- j3 +1 + } + if (my.cluster.array[[p]][d,y,m] == 4 && (my.cluster.array[[p]][d + 1,y,m] != my.cluster.array[[p]][d,y,m])){ + transition4[j4] <- my.cluster.array[[p]][d + 1,y,m] + j4 <- j4 +1 + } + + } + } + } + + trans.sim <- matrix(NA,4,4 , dimnames= list(c("cluster1.sim", "cluster2.sim", "cluster3.sim", "cluster4.sim"),c("cluster1.sim","cluster2.sim","cluster3.sim","cluster4.sim"))) + trans.sim[1,2] <- length(which(transition1 == 2)) + trans.sim[1,3] <- length(which(transition1 == 3)) + trans.sim[1,4] <- length(which(transition1 == 4)) + + trans.sim[2,1] <- length(which(transition2 == 1)) + trans.sim[2,3] <- length(which(transition2 == 3)) + trans.sim[2,4] <- length(which(transition2 == 4)) + + trans.sim[3,1] <- length(which(transition3 == 1)) + trans.sim[3,2] <- length(which(transition3 == 2)) + trans.sim[3,4] <- length(which(transition3 == 4)) + + trans.sim[4,1] <- length(which(transition4 == 1)) + trans.sim[4,2] <- length(which(transition4 == 2)) + trans.sim[4,3] <- length(which(transition4 == 3)) + + trans.tot <- apply(trans.sim,1,sum,na.rm=T) + for(i in 1:4) trans.sim[i,] <- trans.sim[i,]/trans.tot[i] + + + # compute cluster persistence: + my.cluster.vector <- as.vector(my.cluster.array[[p]]) # reduce it to a vector with the order: day1month1member1 , day2month1member1, ... , day1month2member1, day2month2member1, ,,, + + sequ1 <- sequ2 <- sequ3 <- sequ4 <- rep(0,10000) + i1 <- i2 <- i3 <- i4 <- 1 + + if(my.cluster.vector[1] != 1) i1 <- 0 + if(my.cluster.vector[1] == 1) sequ1[i1] <- sequ1[i1]+1 + if(my.cluster.vector[1] != 2) i2 <- 0 + if(my.cluster.vector[1] == 2) sequ2[i2] <- sequ2[i2]+1 + if(my.cluster.vector[1] != 3) i3 <- 0 + if(my.cluster.vector[1] == 3) sequ3[i3] <- sequ3[i3]+1 + if(my.cluster.vector[1] != 4) i4 <- 0 + if(my.cluster.vector[1] == 4) sequ4[i4] <- sequ4[i4]+1 + n.dd <- length(my.cluster.vector) + + for(d in 1:(n.dd-1)){ + if(my.cluster.vector[d] != 1 && my.cluster.vector[d+1] == 1) i1 <- i1+1 + if(my.cluster.vector[d] == 1) sequ1[i1] <- sequ1[i1]+1 + + if(my.cluster.vector[d] != 2 && my.cluster.vector[d+1] == 2) i2 <- i2+1 + if(my.cluster.vector[d] == 2) sequ2[i2] <- sequ2[i2]+1 + + if(my.cluster.vector[d] != 3 && my.cluster.vector[d+1] == 3) i3 <- i3+1 + if(my.cluster.vector[d] == 3) sequ3[i3] <- sequ3[i3]+1 + + if(my.cluster.vector[d] != 4 && my.cluster.vector[d+1] == 4) i4 <- i4+1 + if(my.cluster.vector[d] == 4) sequ4[i4] <- sequ4[i4]+1 + } + + if(my.cluster.vector[n.dd] == 1) sequ1[i1] <- sequ1[i1]+1 + if(my.cluster.vector[n.dd] == 2) sequ2[i2] <- sequ2[i2]+1 + if(my.cluster.vector[n.dd] == 3) sequ3[i3] <- sequ3[i3]+1 + if(my.cluster.vector[n.dd] == 4) sequ4[i4] <- sequ4[i4]+1 + + pers1 <- mean(sequ1[which(sequ1 != 0)]) + pers2 <- mean(sequ2[which(sequ2 != 0)]) + pers3 <- mean(sequ3[which(sequ3 != 0)]) + pers4 <- mean(sequ4[which(sequ4 != 0)]) + + # compute correlations between simulated clusters and observed regime's anomalies: + cluster1.sim <- pslwr1mean; cluster2.sim <- pslwr2mean; cluster3.sim <- pslwr3mean; cluster4.sim <- pslwr4mean + lat.forecast <- lat; lon.forecast <- lon + + if((p + lead.month) > 12) { load.month <- p + lead.month - 12 } else { load.month <- p + lead.month } # reanalysis forecast month to load + load(file=paste0(rean.dir,"/",rean.name,"_",my.period[load.month],"_","psl",".RData")) # load reanalysis data, which overwrities the pslwrXmean variables + load(paste0(rean.dir,"/",rean.name,"_", my.period[load.month],"_","ClusterNames",".RData")) # load also reanalysis regime names + + if(var.num != var.num.orig) var.num <- var.num.orig # ripristinate original values of this script + if(fields.name != fields.name.orig) fields.name <- fields.name.orig # ripristinate original values of this script + if(!identical(period.orig,period)) period <- period.orig + if(work.dir != work.dir.orig) work.dir <- work.dir.orig # ripristinate original values of this script + if(p.orig != p) p <- p.orig + + # compute the observed transition probabilities: + n.days.month <- n.days.in.a.period(load.month,2001) + j1o <- j2o <- j3o <- j4o <- 1; transition.obs1 <- transition.obs2 <- transition.obs3 <- transition.obs4 <- c() + + for (d in 1:(length(my.cluster[[load.month]]$cluster)-1)){ + if (my.cluster[[load.month]]$cluster[d] == 1 && (my.cluster[[load.month]]$cluster[d + 1] != my.cluster[[load.month]]$cluster[d])){ + transition.obs1[j1o] <- my.cluster[[load.month]]$cluster[d + 1] + j1o <- j1o + 1 + } + if (my.cluster[[load.month]]$cluster[d] == 2 && (my.cluster[[load.month]]$cluster[d + 1] != my.cluster[[load.month]]$cluster[d])){ + transition.obs2[j2o] <- my.cluster[[load.month]]$cluster[d + 1] + j2o <- j2o + 1 + } + if (my.cluster[[load.month]]$cluster[d] == 3 && (my.cluster[[load.month]]$cluster[d + 1] != my.cluster[[load.month]]$cluster[d])){ + transition.obs3[j3o] <- my.cluster[[load.month]]$cluster[d + 1] + j3o <- j3o + 1 + } + if (my.cluster[[load.month]]$cluster[d] == 4 && (my.cluster[[load.month]]$cluster[d + 1] != my.cluster[[load.month]]$cluster[d])){ + transition.obs4[j4o] <- my.cluster[[load.month]]$cluster[d + 1] + j4o <- j4o +1 + } + + } + + trans.obs <- matrix(NA,4,4 , dimnames= list(c("cluster1.obs", "cluster2.obs", "cluster3.obs", "cluster4.obs"),c("cluster1.obs","cluster2.obs","cluster3.obs","cluster4.obs"))) + trans.obs[1,2] <- length(which(transition.obs1 == 2)) + trans.obs[1,3] <- length(which(transition.obs1 == 3)) + trans.obs[1,4] <- length(which(transition.obs1 == 4)) + + trans.obs[2,1] <- length(which(transition.obs2 == 1)) + trans.obs[2,3] <- length(which(transition.obs2 == 3)) + trans.obs[2,4] <- length(which(transition.obs2 == 4)) + + trans.obs[3,1] <- length(which(transition.obs3 == 1)) + trans.obs[3,2] <- length(which(transition.obs3 == 2)) + trans.obs[3,4] <- length(which(transition.obs3 == 4)) + + trans.obs[4,1] <- length(which(transition.obs4 == 1)) + trans.obs[4,2] <- length(which(transition.obs4 == 2)) + trans.obs[4,3] <- length(which(transition.obs4 == 3)) + + trans.tot.obs <- apply(trans.obs,1,sum,na.rm=T) + for(i in 1:4) trans.obs[i,] <- trans.obs[i,]/trans.tot.obs[i] + + # compute the observed interannual monthly variability: + freq.obs1 <- freq.obs2 <- freq.obs3 <- freq.obs4 <- c() + + for(y in year.start:year.end){ + # for both reanalysis and S4, the time order is always: year1day1, year1day2, ..., year1day31, year2day1, year2day2, ..., year2day28, etc, + freq.obs1[y-year.start+1] <- length(which(my.cluster[[load.month]]$cluster[(y-year.start)*n.days.month + (1:n.days.month)] == 1)) + freq.obs2[y-year.start+1] <- length(which(my.cluster[[load.month]]$cluster[(y-year.start)*n.days.month + (1:n.days.month)] == 2)) + freq.obs3[y-year.start+1] <- length(which(my.cluster[[load.month]]$cluster[(y-year.start)*n.days.month + (1:n.days.month)] == 3)) + freq.obs4[y-year.start+1] <- length(which(my.cluster[[load.month]]$cluster[(y-year.start)*n.days.month + (1:n.days.month)] == 4)) + } + + sd.freq.obs1 <- sd(freq.obs1) / n.days.month + sd.freq.obs2 <- sd(freq.obs2) / n.days.month + sd.freq.obs3 <- sd(freq.obs3) / n.days.month + sd.freq.obs4 <- sd(freq.obs4) / n.days.month + + wr1y.obs <- wr1y; wr2y.obs <- wr2y; wr3y.obs <- wr3y; wr4y.obs <- wr4y # observed frecuencies of the regimes + + regimes.obs <- c(cluster1.name, cluster2.name, cluster3.name, cluster4.name) + + if(length(unique(regimes.obs)) < 4) stop("Two or more names of the weather types in the reanalsyis input file are repeated!") + + if(identical(rev(lat),lat.forecast)) {lat.forecast <- rev(lat.forecast); print("Warning: forecast latitudes are in the opposite order than renalysis's")} + if(!identical(lat, lat.forecast)) stop("forecast latitudes are different from reanalysis latitudes!") + if(!identical(lon, lon.forecast)) stop("forecast longitude are different from reanalysis longitudes!") + + cluster.corr <- array(NA, c(4,4), dimnames=list(c("cluster1.sim","cluster2.sim","cluster3.sim","cluster4.sim"), regimes.obs)) + cluster.corr[1,1] <- cor(as.vector(cluster1.sim), as.vector(pslwr1mean)) + cluster.corr[1,2] <- cor(as.vector(cluster1.sim), as.vector(pslwr2mean)) + cluster.corr[1,3] <- cor(as.vector(cluster1.sim), as.vector(pslwr3mean)) + cluster.corr[1,4] <- cor(as.vector(cluster1.sim), as.vector(pslwr4mean)) + cluster.corr[2,1] <- cor(as.vector(cluster2.sim), as.vector(pslwr1mean)) + cluster.corr[2,2] <- cor(as.vector(cluster2.sim), as.vector(pslwr2mean)) + cluster.corr[2,3] <- cor(as.vector(cluster2.sim), as.vector(pslwr3mean)) + cluster.corr[2,4] <- cor(as.vector(cluster2.sim), as.vector(pslwr4mean)) + cluster.corr[3,1] <- cor(as.vector(cluster3.sim), as.vector(pslwr1mean)) + cluster.corr[3,2] <- cor(as.vector(cluster3.sim), as.vector(pslwr2mean)) + cluster.corr[3,3] <- cor(as.vector(cluster3.sim), as.vector(pslwr3mean)) + cluster.corr[3,4] <- cor(as.vector(cluster3.sim), as.vector(pslwr4mean)) + cluster.corr[4,1] <- cor(as.vector(cluster4.sim), as.vector(pslwr1mean)) + cluster.corr[4,2] <- cor(as.vector(cluster4.sim), as.vector(pslwr2mean)) + cluster.corr[4,3] <- cor(as.vector(cluster4.sim), as.vector(pslwr3mean)) + cluster.corr[4,4] <- cor(as.vector(cluster4.sim), as.vector(pslwr4mean)) + + # associate each simulated cluster to one observed regime: + max1 <- which(cluster.corr == max(cluster.corr), arr.ind=T) + cluster.corr2 <- cluster.corr + cluster.corr2[max1[1],] <- -999 # set this row to negative values so it won't be found as a maximum anymore + cluster.corr2[,max1[2]] <- -999 # set this column to negative values so it won't be found as a maximum anymore + max2 <- which(cluster.corr2 == max(cluster.corr2), arr.ind=T) + cluster.corr3 <- cluster.corr2 + cluster.corr3[max2[1],] <- -999 + cluster.corr3[,max2[2]] <- -999 + max3 <- which(cluster.corr3 == max(cluster.corr3), arr.ind=T) + cluster.corr4 <- cluster.corr3 + cluster.corr4[max3[1],] <- -999 + cluster.corr4[,max3[2]] <- -999 + max4 <- which(cluster.corr4 == max(cluster.corr4), arr.ind=T) + + seq.max1 <- c(max1[1],max2[1],max3[1],max4[1]) + seq.max2 <- c(max1[2],max2[2],max3[2],max4[2]) + + cluster1.regime <- seq.max2[which(seq.max1 == 1)] + cluster2.regime <- seq.max2[which(seq.max1 == 2)] + cluster3.regime <- seq.max2[which(seq.max1 == 3)] + cluster4.regime <- seq.max2[which(seq.max1 == 4)] + + cluster1.name <- regimes.obs[cluster1.regime] + cluster2.name <- regimes.obs[cluster2.regime] + cluster3.name <- regimes.obs[cluster3.regime] + cluster4.name <- regimes.obs[cluster4.regime] + + regimes.sim <- c(cluster1.name, cluster2.name, cluster3.name, cluster4.name) + cluster.corr2 <- array(cluster.corr, c(4,4), dimnames=list(regimes.sim, regimes.obs)) + + spat.cor1 <- cluster.corr[1,seq.max2[which(seq.max1 == 1)]] + spat.cor2 <- cluster.corr[2,seq.max2[which(seq.max1 == 2)]] + spat.cor3 <- cluster.corr[3,seq.max2[which(seq.max1 == 3)]] + spat.cor4 <- cluster.corr[4,seq.max2[which(seq.max1 == 4)]] + + # measure persistence for the reanalysis dataset: + my.cluster.vector <- my.cluster[[load.month]][[1]] + + sequ1 <- sequ2 <- sequ3 <- sequ4 <- rep(0,10000) + i1 <- i2 <- i3 <- i4 <- 1 + + if(my.cluster.vector[1] != 1) i1 <- 0 + if(my.cluster.vector[1] == 1) sequ1[i1] <- sequ1[i1]+1 + if(my.cluster.vector[1] != 2) i2 <- 0 + if(my.cluster.vector[1] == 2) sequ2[i2] <- sequ2[i2]+1 + if(my.cluster.vector[1] != 3) i3 <- 0 + if(my.cluster.vector[1] == 3) sequ3[i3] <- sequ3[i3]+1 + if(my.cluster.vector[1] != 4) i4 <- 0 + if(my.cluster.vector[1] == 4) sequ4[i4] <- sequ4[i4]+1 + + n.dd <- length(my.cluster.vector) + for(d in 1:(n.dd-1)){ + if(my.cluster.vector[d] != 1 && my.cluster.vector[d+1] == 1) i1 <- i1+1 + if(my.cluster.vector[d] == 1) sequ1[i1] <- sequ1[i1]+1 + + if(my.cluster.vector[d] != 2 && my.cluster.vector[d+1] == 2) i2 <- i2+1 + if(my.cluster.vector[d] == 2) sequ2[i2] <- sequ2[i2]+1 + + if(my.cluster.vector[d] != 3 && my.cluster.vector[d+1] == 3) i3 <- i3+1 + if(my.cluster.vector[d] == 3) sequ3[i3] <- sequ3[i3]+1 + + if(my.cluster.vector[d] != 4 && my.cluster.vector[d+1] == 4) i4 <- i4+1 + if(my.cluster.vector[d] == 4) sequ4[i4] <- sequ4[i4]+1 + } + + if(my.cluster.vector[n.dd] == 1) sequ1[i1] <- sequ1[i1]+1 + if(my.cluster.vector[n.dd] == 2) sequ2[i2] <- sequ2[i2]+1 + if(my.cluster.vector[n.dd] == 3) sequ3[i3] <- sequ3[i3]+1 + if(my.cluster.vector[n.dd] == 4) sequ4[i4] <- sequ4[i4]+1 + + persObs1 <- mean(sequ1[which(sequ1 != 0)]) + persObs2 <- mean(sequ2[which(sequ2 != 0)]) + persObs3 <- mean(sequ3[which(sequ3 != 0)]) + persObs4 <- mean(sequ4[which(sequ4 != 0)]) + + # insert here all the manual corrections to the regime assignation due to misasignment in the automatic selection: + # forecast month: September + if(p == 9 && lead.month == 0) { cluster1.name <- "Blocking"; cluster2.name <- "Atl.Ridge"; cluster3.name <- "NAO-"; cluster4.name <- "NAO+"} + if(p == 8 && lead.month == 1) { cluster1.name <- "NAO+"; cluster2.name <- "NAO-"; cluster3.name <- "Blocking"; cluster4.name <- "Atl.Ridge"} + if(p == 6 && lead.month == 3) { cluster1.name <- "Atl.Ridge"; cluster2.name <- "NAO-"} + if(p == 4 && lead.month == 5) { cluster1.name <- "Blocking"; cluster2.name <- "NAO+"; cluster3.name <- "Atl.Ridge"; cluster4.name <- "NAO-"} + + # forecast month: October + if(p == 4 && lead.month == 6) { cluster1.name <- "Atl.Ridge"; cluster2.name <- "Blocking"; cluster3.name <- "NAO+"; cluster4.name <- "NAO-"} + if(p == 5 && lead.month == 5) { cluster1.name <- "NAO+"; cluster2.name <- "Blocking"; cluster3.name <- "NAO-"; cluster4.name <- "Atl.Ridge"} + if(p == 7 && lead.month == 3) { cluster1.name <- "NAO-"; cluster2.name <- "Atl.Ridge"; cluster3.name <- "Blocking"; cluster4.name <- "NAO+"} + if(p == 8 && lead.month == 2) { cluster1.name <- "Blocking"; cluster2.name <- "NAO-"; cluster3.name <- "Atl.Ridge"; cluster4.name <- "NAO+"} + if(p == 9 && lead.month == 1) { cluster1.name <- "NAO-"; cluster2.name <- "Blocking"; cluster3.name <- "Atl.Ridge"; cluster4.name <- "NAO+"} + + # forecast month: November + if(p == 6 && lead.month == 5) { cluster2.name <- "Atl.Ridge"; cluster3.name <- "NAO+"; cluster4.name <- "Blocking"} + if(p == 7 && lead.month == 4) { cluster1.name <- "NAO+"; cluster2.name <- "Blocking"; cluster4.name <- "Atl.Ridge"} + if(p == 8 && lead.month == 3) { cluster2.name <- "Blocking"; cluster4.name <- "Atl.Ridge"} + if(p == 9 && lead.month == 2) { cluster2.name <- "Atl.Ridge"; cluster3.name <- "NAO+"; cluster4.name <- "Blocking"} + if(p == 10 && lead.month == 1) { cluster2.name <- "Blocking"; cluster4.name <- "Atl.Ridge"} + + regimes.sim2 <- c(cluster1.name, cluster2.name, cluster3.name, cluster4.name) # Simulated regimes after the manual correction + #regimes.obs <- c(cluster1.name, cluster2.name, cluster3.name, cluster4.name) # already defined above, included only as reference + + order.sim <- match(regimes.sim2, orden) + order.obs <- match(regimes.obs, orden) + + clusters.sim <- list(cluster1.sim, cluster2.sim, cluster3.sim, cluster4.sim) + clusters.obs <- list(pslwr1mean, pslwr2mean, pslwr3mean, pslwr4mean) + + # recompute the spatial correlations, because they might have changed because of the manual corrections above: + spat.cor1 <- cor(as.vector(clusters.sim[[which(order.sim == 1)]]), as.vector(clusters.obs[[which(order.obs == 1)]])) + spat.cor2 <- cor(as.vector(clusters.sim[[which(order.sim == 2)]]), as.vector(clusters.obs[[which(order.obs == 2)]])) + spat.cor3 <- cor(as.vector(clusters.sim[[which(order.sim == 3)]]), as.vector(clusters.obs[[which(order.obs == 3)]])) + spat.cor4 <- cor(as.vector(clusters.sim[[which(order.sim == 4)]]), as.vector(clusters.obs[[which(order.obs == 4)]])) + + load(file=paste0(work.dir,"/",fields.name,"_",my.period[p],"_leadmonth_",lead.month,"_","psl",".RData")) # reload forecast cluster time series and mean psl data + #load(file=paste0(work.dir,"/",fields.name,"_",my.period[p],"_leadmonth_",lead.month,"_ClusterData.RData")) # reload forecast cluster data (for my.cluster.array) + if(var.num != var.num.orig) var.num <- var.num.orig # ripristinate original values of this script + if(fields.name != fields.name.orig) fields.name <- fields.name.orig # ripristinate original values of this script + if(!identical(period.orig, period)) period <- period.orig + if(p.orig != p) p <- p.orig + if(identical(rev(lat),lat.forecast)) lat <- rev(lat) # ripristinate right lat order + if(work.dir != work.dir.orig) work.dir <- work.dir.orig # ripristinate original values of this script + + } # close for on 'forecast.name' + + if(fields.name == rean.name || (fields.name == forecast.name && n.map == 7)){ + if(save.names){ + save(cluster1.name, cluster2.name, cluster3.name, cluster4.name, file=paste0(rean.dir,"/",fields.name,"_", my.period[p],"_","ClusterNames",".RData")) + + } else { # in this case, we load the cluster names from the file already saved, if regimes come from a reanalysis: + load(paste0(rean.dir,"/",rean.name,"_", my.period[p],"_","ClusterNames",".RData")) + + if(var.num != var.num.orig) var.num <- var.num.orig # ripristinate original values of this script + if(fields.name != fields.name.orig) fields.name <- fields.name.orig # ripristinate original values of this script + } + } + + # assign to each cluster its regime: + if(ordering == FALSE && (fields.name == rean.name || (fields.name == forecast.name && n.map == 7))){ + cluster1 <- 1; cluster2 <- 2; cluster3 <- 3; cluster4 <- 4 + } else { + cluster1 <- which(orden == cluster1.name) + cluster2 <- which(orden == cluster2.name) + cluster3 <- which(orden == cluster3.name) + cluster4 <- which(orden == cluster4.name) + } + + assign(paste0("map",cluster1), pslwr1mean) + assign(paste0("map",cluster2), pslwr2mean) + assign(paste0("map",cluster3), pslwr3mean) + assign(paste0("map",cluster4), pslwr4mean) + + assign(paste0("fre",cluster1), wr1y) + assign(paste0("fre",cluster2), wr2y) + assign(paste0("fre",cluster3), wr3y) + assign(paste0("fre",cluster4), wr4y) + + #assign(paste0("withinss",cluster1), my.cluster[[p]]$withinss[1]/my.cluster[[p]]$size[1]) + #assign(paste0("withinss",cluster2), my.cluster[[p]]$withinss[2]/my.cluster[[p]]$size[2]) + #assign(paste0("withinss",cluster3), my.cluster[[p]]$withinss[3]/my.cluster[[p]]$size[3]) + #assign(paste0("withinss",cluster4), my.cluster[[p]]$withinss[4]/my.cluster[[p]]$size[4]) + + if(impact.data == TRUE && (composition == "simple" || composition == "single.impact" || composition == 'impact')){ + assign(paste0("imp",cluster1), varwr1mean) + assign(paste0("imp",cluster2), varwr2mean) + assign(paste0("imp",cluster3), varwr3mean) + assign(paste0("imp",cluster4), varwr4mean) + + if(fields.name == rean.name){ + assign(paste0("sig",cluster1), pvalue1) + assign(paste0("sig",cluster2), pvalue2) + assign(paste0("sig",cluster3), pvalue3) + assign(paste0("sig",cluster4), pvalue4) + } + } + + if(fields.name == forecast.name && n.map < 7){ + assign(paste0("freMax",cluster1), wr1yMax) + assign(paste0("freMax",cluster2), wr2yMax) + assign(paste0("freMax",cluster3), wr3yMax) + assign(paste0("freMax",cluster4), wr4yMax) + + assign(paste0("freMin",cluster1), wr1yMin) + assign(paste0("freMin",cluster2), wr2yMin) + assign(paste0("freMin",cluster3), wr3yMin) + assign(paste0("freMin",cluster4), wr4yMin) + + #assign(paste0("spatial.cor",cluster1), spat.cor1) + #assign(paste0("spatial.cor",cluster2), spat.cor2) + #assign(paste0("spatial.cor",cluster3), spat.cor3) + #assign(paste0("spatial.cor",cluster4), spat.cor4) + + assign(paste0("persist",cluster1), pers1) + assign(paste0("persist",cluster2), pers2) + assign(paste0("persist",cluster3), pers3) + assign(paste0("persist",cluster4), pers4) + + clusters.all <- c(cluster1, cluster2, cluster3, cluster4) + ordered.sequence <- c(which(clusters.all == 1), which(clusters.all == 2), which(clusters.all == 3), which(clusters.all == 4)) + + assign(paste0("transSim",cluster1), unname(trans.sim[1,ordered.sequence])) + assign(paste0("transSim",cluster2), unname(trans.sim[2,ordered.sequence])) + assign(paste0("transSim",cluster3), unname(trans.sim[3,ordered.sequence])) + assign(paste0("transSim",cluster4), unname(trans.sim[4,ordered.sequence])) + + cluster1.obs <- which(orden == regimes.obs[1]) + cluster2.obs <- which(orden == regimes.obs[2]) + cluster3.obs <- which(orden == regimes.obs[3]) + cluster4.obs <- which(orden == regimes.obs[4]) + + clusters.all.obs <- c(cluster1.obs, cluster2.obs, cluster3.obs, cluster4.obs) + ordered.sequence.obs <- c(which(clusters.all.obs == 1), which(clusters.all.obs == 2), which(clusters.all.obs == 3), which(clusters.all.obs == 4)) + + assign(paste0("freObs",cluster1.obs), wr1y.obs) + assign(paste0("freObs",cluster2.obs), wr2y.obs) + assign(paste0("freObs",cluster3.obs), wr3y.obs) + assign(paste0("freObs",cluster4.obs), wr4y.obs) + + assign(paste0("persistObs",cluster1.obs), persObs1) + assign(paste0("persistObs",cluster2.obs), persObs2) + assign(paste0("persistObs",cluster3.obs), persObs3) + assign(paste0("persistObs",cluster4.obs), persObs4) + + assign(paste0("transObs",cluster1.obs), unname(trans.obs[1,ordered.sequence.obs])) + assign(paste0("transObs",cluster2.obs), unname(trans.obs[2,ordered.sequence.obs])) + assign(paste0("transObs",cluster3.obs), unname(trans.obs[3,ordered.sequence.obs])) + assign(paste0("transObs",cluster4.obs), unname(trans.obs[4,ordered.sequence.obs])) + + } + + # position of long values of Europe only (without the Atlantic Sea and America): + #if(fields.name == "ERA-Interim") EU <- c(1:which(lon >= lon.max)[1], (length(lon)-30):length(lon)) # restrict area to continental europe only + EU <- c(1:which(lon >= lon.max)[1], (which(lon >= 337)[1]:length(lon))) # restrict area to continental europe only + + # breaks and colors of the geopotential fields: + if(psl == "g500"){ + #my.brks <- c(-300,-199:200,300) # % Mean anomaly of a WR + my.brks <- c(-300,seq(-200,200,10),300) # % Mean anomaly of a WR + my.brks2 <- c(-300,seq(-200,200,10),300) # % Mean anomaly of a WR for the contour lines + #my.cols <- colorRampPalette((brewer.pal(9,"PuBu")))(length(my.brks)-1) + values.to.plot <- c(-200, -100, 0, 100, 200) # values we want to show in the labels + } + + if(psl == "psl"){ + my.brks <- c(seq(-19,-1,2),0,seq(1,19,2)) # % Mean anomaly of a WR + # my.brks <- c(seq(-19,-1,2),0,seq(1,19,2)) # % Mean anomaly of a WR + + my.brks2 <- my.brks #c(seq(-20,20,2)) # % Mean anomaly of a WR for the contour lines + values.to.plot <- my.brks #c(-17,-15,-13,-11,-9,-7,-5,-3,-1,0,1,3,5,7,9,11,13,15,17) + } + + my.cols <- colorRampPalette(rev(brewer.pal(11,"RdBu")))(length(my.brks)-1) # blue--white--red colors + my.cols[floor(length(my.cols)/2)] <- my.cols[floor(length(my.cols)/2)+1] <- "white" + + # breaks and colors of the impact maps (valid both for Wind speed anomalies and Temperature anomalies): + #my.brks.var <- c(-20,seq(-10,-3,1),seq(-2.9,2.9,0.1),seq(3,10,1),20) # % Mean anomaly of a WR + #my.brks.var <- c(-20,-2,-1.5,-1,-0.5,-0.2,0.2,0.5,1,1.5,2,20) # % Mean anomaly of a WR + #my.brks.var <- c(-20,seq(-3,3,0.5),20) # % Mean anomaly of a WR + if(var.name[var.num] == "sfcWind") { + my.brks.var <- seq(-3,3,0.5) + values.to.plot2 <- c(-3,-2,-1,0,1,2,3) + } + if(var.name[var.num] == "tas") { + my.brks.var <- seq(-5,5,1) + values.to.plot2 <- my.brks.var #c(-5,-3,-1,0,1,3,5) + } + + #my.cols <- colorRampPalette(as.character(read.csv("/home/Earth/vtorralb/rgbhex.csv",header=F)[,1]))(length(my.brks)-1) # blue--yellow-red colors + my.cols.var <- colorRampPalette(rev(brewer.pal(11,"RdBu")))(length(my.brks.var)-1) # blue--white--red colors + #my.cols.var[floor(length(my.cols.var)/2)] <- my.cols.var[floor(length(my.cols.var)/2)+1] <- "white" + + freq.max=100 + if(fields.name == forecast.name){ + pos.years.present <- match(year.start:year.end, years) + years.missing <- which(is.na(pos.years.present)) + fre1.NA <- fre2.NA <- fre3.NA <- fre4.NA <- freMax1.NA <- freMax2.NA <- freMax3.NA <- freMax4.NA <- freMin1.NA <- freMin2.NA <- freMin3.NA <- freMin4.NA <- c() + k <- 0 + for(y in year.start:year.end) { + if(y %in% (years.missing + year.start - 1)) { + fre1.NA <- c(fre1.NA,NA); fre2.NA <- c(fre2.NA,NA); fre3.NA <- c(fre3.NA,NA); fre4.NA <- c(fre4.NA,NA) + freMax1.NA <- c(freMax1.NA,NA); freMax2.NA <- c(freMax2.NA,NA); freMax3.NA <- c(freMax3.NA,NA); freMax4.NA <- c(freMax4.NA,NA) + freMin1.NA <- c(freMin1.NA,NA); freMin2.NA <- c(freMin2.NA,NA); freMin3.NA <- c(freMin3.NA,NA); freMin4.NA <- c(freMin4.NA,NA) + k=k+1 + } else { + fre1.NA <- c(fre1.NA,fre1[y - year.start + 1 - k]); fre2.NA <- c(fre2.NA,fre2[y - year.start + 1 - k]) + fre3.NA <- c(fre3.NA,fre3[y - year.start + 1 - k]); fre4.NA <- c(fre4.NA,fre4[y - year.start + 1 - k]) + freMax1.NA <- c(freMax1.NA,freMax1[y - year.start + 1 - k]); freMax2.NA <- c(freMax2.NA,freMax2[y - year.start + 1 - k]) + freMax3.NA <- c(freMax3.NA,freMax3[y - year.start + 1 - k]); freMax4.NA <- c(freMax4.NA,freMax4[y - year.start + 1 - k]) + freMin1.NA <- c(freMin1.NA,freMin1[y - year.start + 1 - k]); freMin2.NA <- c(freMin2.NA,freMin2[y - year.start + 1 - k]) + freMin3.NA <- c(freMin3.NA,freMin3[y - year.start + 1 - k]); freMin4.NA <- c(freMin4.NA,freMin4[y - year.start + 1 - k]) + } + } + + sp.cor1 <- round(cor(fre1.NA,freObs1, use="complete.obs"),2) + sp.cor2 <- round(cor(fre2.NA,freObs2, use="complete.obs"),2) + sp.cor3 <- round(cor(fre3.NA,freObs3, use="complete.obs"),2) + sp.cor4 <- round(cor(fre4.NA,freObs4, use="complete.obs"),2) + + } else { + fre1.NA <- fre1; fre2.NA <- fre2; fre3.NA <- fre3; fre4.NA <- fre4 + } + + + # Visualize the composition with the 3 graphs for all the 4 regimes: its pressure fields, its impact on var and its frequency series: + if(composition == "simple"){ + if(!as.pdf && fields.name == rean.name) png(filename=paste0(rean.dir,"/",fields.name,"_",my.period[p],"_",var.name[var.num],".png"),width=3000,height=3700) + if(!as.pdf && fields.name == forecast.name) png(filename=paste0(work.dir,"/",fields.name,"_",my.period[p],"_leadmonth_",lead.month,"_",var.name[var.num],".png"),width=3000,height=3700) + + plot.new() + + # Sheet title: + par(fig=c(0, 1, 0.94, 0.98), new=TRUE) + if(fields.name == forecast.name) {forecast.title <- paste0(" startdate and ",lead.month," forecast month ")} else {forecast.title <- ""} + mtext(paste0(fields.name, " Weather Regimes for ", my.period[p], forecast.title," (",year.start,"-",year.end,")"), cex=5.5, font=2) + + # Centroid maps: + map.xpos <- 0 + map.width <- 0.46 + par(fig=c(map.xpos + 0.003, map.xpos + map.width - 0.003, 0.745, 0.925), new=TRUE) + PlotEquiMap2(rescale(map1,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map1), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, xlabel.dist=1.5, contours.lty="F1FF1F") + par(fig=c(map.xpos, map.xpos + map.width, 0.51, 0.69), new=TRUE) + PlotEquiMap2(rescale(map2,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map2), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F") + par(fig=c(map.xpos, map.xpos + map.width, 0.275, 0.455), new=TRUE) + PlotEquiMap2(rescale(map3,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map3), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F") + par(fig=c(map.xpos, map.xpos + map.width, 0.04, 0.22), new=TRUE) + PlotEquiMap2(rescale(map4,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map4), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F") + + ## # for the debug: + ## obs <- read.table("/scratch/Earth/ncortesi/RESILIENCE/Regimes/NAO_series.txt") + ## mes <- p + ## fre.obs <- obs$V3[seq(mes,12*35,12)] # get the ovserved freuency of the NAO + ## #plot(1981:2015,fre.obs,type="l") + ## #lines(1981:2015,fre1,type="l",col="red") + ## #lines(1981:2015,fre2,type="l",col="blue") + ## #lines(1981:2015,fre4,type="l",col="green") + ## cor1 <- cor(fre.obs, fre1) + ## cor2 <- cor(fre.obs, fre2) + ## cor3 <- cor(fre.obs, fre3) + ## cor4 <- cor(fre.obs, fre4) + ## round(c(cor1,cor2,cor3,cor4),2) + + # Legend centroid maps: + legend1.xpos <- 0.10 + legend1.width <- 0.30 + legend1.cex <- 2 + my.subset <- match(values.to.plot, my.brks) + par(fig=c(legend1.xpos, legend1.xpos + legend1.width, 0.719, 0.744), new=TRUE) # syntax fig=c(xmin, xmax, ymin, ymax) + ColorBar3(my.brks, cols=my.cols, vert=FALSE, cex=legend1.cex, subset=my.subset) + if(psl=="g500") {mtext(side=4," m", cex=2.5)} else {mtext(side=4," hPa", cex=legend1.cex)} + par(fig=c(legend1.xpos, legend1.xpos + legend1.width, 0.485, 0.508), new=TRUE) + ColorBar3(my.brks, cols=my.cols, vert=FALSE, cex=legend1.cex, subset=my.subset) + if(psl=="g500") {mtext(side=4," m", cex=2.5)} else {mtext(side=4," hPa", cex=legend1.cex)} + par(fig=c(legend1.xpos, legend1.xpos + legend1.width, 0.250, 0.273), new=TRUE) + ColorBar3(my.brks, cols=my.cols, vert=FALSE, cex=legend1.cex, subset=my.subset) + if(psl=="g500") {mtext(side=4," m", cex=2.5)} else {mtext(side=4," hPa", cex=legend1.cex)} + par(fig=c(legend1.xpos, legend1.xpos + legend1.width, 0.015, 0.038), new=TRUE) + ColorBar3(my.brks, cols=my.cols, vert=FALSE, cex=legend1.cex, subset=my.subset) + if(psl=="g500") {mtext(side=4," m", cex=2.5)} else {mtext(side=4," hPa", cex=legend1.cex)} + + # Subtitle centroid maps: + map.title.xpos <- 0.76 + map.title.width <- 0.2 + par(fig=c(map.title.xpos, map.title.xpos + map.title.width, 0.728, 0.729), new=TRUE) + mtext("year", cex=3) + par(fig=c(map.title.xpos, map.title.xpos + map.title.width, 0.494, 0.495), new=TRUE) + mtext("year", cex=3) + par(fig=c(map.title.xpos, map.title.xpos + map.title.width, 0.260, 0.261), new=TRUE) + mtext("year", cex=3) + par(fig=c(map.title.xpos, map.title.xpos + map.title.width, 0.026, 0.027), new=TRUE) + mtext("year", cex=3) + + # Title Centroid Maps: + title1.xpos <- 0.02 + title1.width <- 0.4 + par(fig=c(title1.xpos, title1.xpos + title1.width, 0.9305, 0.9355), new=TRUE) + mtext(paste0(regime1.name,": ", psl.name, " Anomaly"), font=2, cex=4) + par(fig=c(title1.xpos, title1.xpos + title1.width, 0.6955, 0.7005), new=TRUE) + mtext(paste0(regime2.name,": ", psl.name, " Anomaly"), font=2, cex=4) + par(fig=c(title1.xpos, title1.xpos + title1.width, 0.4605, 0.4655), new=TRUE) + mtext(paste0(regime3.name,": ", psl.name, " Anomaly"), font=2, cex=4) + par(fig=c(title1.xpos, title1.xpos + title1.width, 0.2255, 0.2305), new=TRUE) + mtext(paste0(regime4.name,": ", psl.name, " Anomaly"), font=2, cex=4) + + # Impact maps: + if(impact.data == TRUE && (composition == "simple" || composition == "impact")){ # it won't enter here never because we are already inside an if con composition="simple" + impact.xpos <- 0.47 + impact.width <- 0.26 + par(fig=c(impact.xpos, impact.xpos + impact.width, 0.745, 0.925), new=TRUE) + PlotEquiMap2(rescale(imp1[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=t(sig1[,EU] < 0.05), cex.lab=1.5) + par(fig=c(impact.xpos, impact.xpos + impact.width, 0.51, 0.69), new=TRUE) + PlotEquiMap2(rescale(imp2[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=t(sig2[,EU] < 0.05), cex.lab=1.5) + par(fig=c(impact.xpos, impact.xpos + impact.width, 0.275, 0.455), new=TRUE) + PlotEquiMap2(rescale(imp3[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=t(sig3[,EU] < 0.05), cex.lab=1.5) + par(fig=c(impact.xpos, impact.xpos + impact.width, 0.04, 0.22), new=TRUE) + PlotEquiMap2(rescale(imp4[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=t(sig4[,EU] < 0.05), cex.lab=1.5) + + # Title impact maps: + title2.xpos <- 0.42 + title2.width <- 0.35 + par(fig=c(title2.xpos, title2.xpos + title2.width, 0.935, 0.940), new=TRUE) + mtext(paste0(regime1.name, ": Impact on ", var.name.full[var.num]), font=2, cex=4) + par(fig=c(title2.xpos, title2.xpos + title2.width, 0.700, 0.705), new=TRUE) + mtext(paste0(regime2.name, ": Impact on ", var.name.full[var.num]), font=2, cex=4) + par(fig=c(title2.xpos, title2.xpos + title2.width, 0.465, 0.470), new=TRUE) + mtext(paste0(regime3.name, ": Impact on ", var.name.full[var.num]), font=2, cex=4) + par(fig=c(title2.xpos, title2.xpos + title2.width, 0.230, 0.235), new=TRUE) + mtext(paste0(regime4.name, ": Impact on ", var.name.full[var.num]), font=2, cex=4) + } + + # Frequency plots: + barplot.xpos <- 0.75 + barplot.width <- 0.25 + + par(fig=c(barplot.xpos, barplot.xpos + barplot.width, 0.745, 0.915), new=TRUE) + if(fields.name == rean.name) barplot.freq(100*fre1.NA, year.start, year.end, cex.y=2, cex.x=2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0)) + if(fields.name == forecast.name) barplot.freq.sim2(100*fre1.NA, 100*freMax1.NA, 100*freMin1.NA, 100*freObs1, year.start, year.end, cex.y=2, cex.x=2, freq.min=-2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0), col.line="gray20", cex.r=3, cex.mean=2, cex.obs=2) + + par(fig=c(barplot.xpos, barplot.xpos + barplot.width,0.510,0.680), new=TRUE) + if(fields.name == rean.name) barplot.freq(100*fre2.NA, year.start, year.end, cex.y=2, cex.x=2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0)) + if(fields.name == forecast.name) barplot.freq.sim2(100*fre2.NA, 100*freMax2.NA, 100*freMin2.NA, 100*freObs2, year.start, year.end, cex.y=2, cex.x=2, freq.min=-2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0), col.line="gray20", cex.r=3, cex.mean=2, cex.obs=2) + + par(fig=c(barplot.xpos, barplot.xpos + barplot.width,0.275,0.445), new=TRUE) + if(fields.name == rean.name) barplot.freq(100*fre3.NA, year.start, year.end, cex.y=2, cex.x=2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0)) + if(fields.name == forecast.name) barplot.freq.sim2(100*fre3.NA, 100*freMax3.NA, 100*freMin3.NA, 100*freObs3, year.start, year.end, cex.y=2, cex.x=2, freq.min=-2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0), col.line="gray20", cex.r=3, cex.mean=2, cex.obs=2) + + par(fig=c(barplot.xpos, barplot.xpos + barplot.width,0.040,0.210), new=TRUE) + if(fields.name == rean.name) barplot.freq(100*fre4.NA, year.start, year.end, cex.y=2, cex.x=2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0)) + if(fields.name == forecast.name) barplot.freq.sim2(100*fre4.NA, 100*freMax4.NA, 100*freMin4.NA, 100*freObs4, year.start, year.end, cex.y=2, cex.x=2, freq.min=-2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0), col.line="gray20", cex.r=3, cex.mean=2, cex.obs=2) + + # Title Frequency maps: + title3.xpos <- 0.75 + title3.width <-0.25 + par(fig=c(title3.xpos, title3.xpos + title3.width, 0.935, 0.940), new=TRUE) + mtext(paste0(regime1.name, " Frequency"), font=2, cex=4) # paste0 doesn't work inside an expression! + par(fig=c(title3.xpos, title3.xpos + title3.width, 0.700, 0.705), new=TRUE) + mtext(paste0(regime2.name, " Frequency"), font=2, cex=4) + par(fig=c(title3.xpos, title3.xpos + title3.width, 0.465, 0.470), new=TRUE) + mtext(paste0(regime3.name, " Frequency"), font=2, cex=4) + par(fig=c(title3.xpos, title3.xpos + title3.width, 0.230, 0.235), new=TRUE) + mtext(paste0(regime4.name, " Frequency"), font=2, cex=4) + + # % on Frequency plots: + symbol.xpos <- 0.735 + symbol.width <- 0.01 + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.82, 0.83), new=TRUE) + mtext("%", cex=3.3) + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.59, 0.60), new=TRUE) + mtext("%", cex=3.3) + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.35, 0.36), new=TRUE) + mtext("%", cex=3.3) + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.12, 0.13), new=TRUE) + mtext("%", cex=3.3) + + # mean frequency on Frequency plots: + if(mean.freq == TRUE){ + symbol.xpos <- 0.9 + symbol.width <- 0.1 + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.908, 0.918), new=TRUE) + mtext(bquote(bar(nu) == .(paste0(round(mean(100*fre1.NA),1),"%"))),cex=4.5) + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.673, 0.683), new=TRUE) + mtext(bquote(bar(nu) == .(paste0(round(mean(100*fre2.NA),1),"%"))),cex=4.5) + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.44, 0.45), new=TRUE) + mtext(bquote(bar(nu) == .(paste0(round(mean(100*fre3.NA),1),"%"))),cex=4.5) + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.203, 0.213), new=TRUE) + mtext(bquote(bar(nu) == .(paste0(round(mean(100*fre4.NA),1),"%"))),cex=4.5) + } + + # add corr between obs.time series and ensemble mean time series on Frequency plots: + if(fields.name == forecast.name){ + symbol.xpos <- 0.76 + symbol.width <- 0.1 + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.908, 0.918), new=TRUE) + sp.cor1 <- round(cor(fre1.NA,freObs1, use="complete.obs"),2) + mtext(paste0("r= ",sp.cor1), cex=4.5) + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.673, 0.683), new=TRUE) + sp.cor2 <- round(cor(fre2.NA,freObs2, use="complete.obs"),2) + mtext(paste0("r= ",sp.cor2), cex=4.5) + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.44, 0.45), new=TRUE) + sp.cor3 <- round(cor(fre3.NA,freObs3, use="complete.obs"),2) + mtext(paste0("r= ",sp.cor3), cex=4.5) + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.203, 0.213), new=TRUE) + sp.cor4 <- round(cor(fre4.NA,freObs4, use="complete.obs"),2) + mtext(paste0("r= ",sp.cor4), cex=4.5) + } + + # Legend 2: + if(fields.name == rean.name){ + legend2.xpos <- 0.48 + legend2.width <- 0.25 + legend2.cex <- 1.8 + my.subset2 <- match(values.to.plot2, my.brks.var) + par(fig=c(legend2.xpos, legend2.xpos + legend2.width, 0.719 + 0.001, 0.744), new=TRUE) + ColorBar3(my.brks.var, cols=my.cols.var, vert=FALSE, cex=legend2.cex, subset=my.subset2) + mtext(side=4,paste0(" ",var.unit[var.num]), cex=legend2.cex) + par(fig=c(legend2.xpos, legend2.xpos + legend2.width, 0.485, 0.508), new=TRUE) + ColorBar3(my.brks.var, cols=my.cols.var, vert=FALSE, cex=legend2.cex, subset=my.subset2) + mtext(side=4,paste0(" ",var.unit[var.num]), cex=legend2.cex) + par(fig=c(legend2.xpos, legend2.xpos + legend2.width, 0.250, 0.273), new=TRUE) + ColorBar3(my.brks.var, cols=my.cols.var, vert=FALSE, cex=legend2.cex, subset=my.subset2) + mtext(side=4,paste0(" ",var.unit[var.num]), cex=legend2.cex) + par(fig=c(legend2.xpos, legend2.xpos + legend2.width, 0.015, 0.038), new=TRUE) + ColorBar3(my.brks.var, cols=my.cols.var, vert=FALSE, cex=legend2.cex, subset=my.subset2) + mtext(side=4,paste0(" ",var.unit[var.num]), cex=legend2.cex) + } + + if(!as.pdf) dev.off() # for saving 4 png + + } # close if on: composition == 'simple' + + + # Visualize the composition with the regime anomalies for a selected forecasted month: + if(composition == "psl"){ + # Centroid maps: + n.map <- n.map + 1 # n.maps starts from 0 + map.width <- 0.115 + map.xpos <- 0.06 + map.width * (n.map - 1) + map.ypos <- 0.08 + map.height <- 0.19 + map.ysep <- 0.015 + + par(fig=c(map.xpos, map.xpos + map.width, map.ypos + 3*map.height + 3*map.ysep, map.ypos + 4*map.height + 3*map.ysep), new=TRUE) + PlotEquiMap2(rescale(map1,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map1), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, xlabel.dist=1.5, contours.lty="F1FF1F") + par(fig=c(map.xpos, map.xpos + map.width, map.ypos + 2*map.height + 2*map.ysep, map.ypos + 3*map.height + 2*map.ysep), new=TRUE) + PlotEquiMap2(rescale(map2,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map2), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F") + par(fig=c(map.xpos, map.xpos + map.width, map.ypos + map.height + map.ysep, map.ypos + 2*map.height + map.ysep), new=TRUE) + PlotEquiMap2(rescale(map3,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map3), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F") + par(fig=c(map.xpos, map.xpos + map.width, map.ypos, map.ypos + map.height), new=TRUE) + PlotEquiMap2(rescale(map4,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map4), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F") + + # Sheet subtitles: + par(fig=c(map.xpos, map.xpos + map.width, 0.86, 0.90), new=TRUE) + if(n.map < 8) { + mtext(paste0(my.month.short[p], " --> ", my.month.short[forecast.month]), cex=5.5, font=2) + } else{ + mtext(paste0(rean.name," ", my.month.short[forecast.month]), cex=5.5, font=2) + } + + } # close if on composition == 'psl' + + # Visualize the composition if the impact maps for a selected forecasted month: + if(composition == "impact"){ + # Centroid maps: + n.map <- n.map + 1 # n.maps starts from 0 + map.width <- 0.115 + map.xpos <- 0.06 + map.width * (n.map - 1) + map.ypos <- 0.08 + map.height <- 0.19 + map.ysep <- 0.015 + + par(fig=c(map.xpos, map.xpos + map.width, map.ypos + 3*map.height + 3*map.ysep, map.ypos + 4*map.height + 3*map.ysep), new=TRUE) + PlotEquiMap2(rescale(imp1[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=t(sig1[,EU] < 0.05), cex.lab=1.5) + + par(fig=c(map.xpos, map.xpos + map.width, map.ypos + 2*map.height + 2*map.ysep, map.ypos + 3*map.height + 2*map.ysep), new=TRUE) + PlotEquiMap2(rescale(imp2[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=t(sig2[,EU] < 0.05), cex.lab=1.5) + + par(fig=c(map.xpos, map.xpos + map.width, map.ypos + map.height + map.ysep, map.ypos + 2*map.height + map.ysep), new=TRUE) + PlotEquiMap2(rescale(imp3[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=t(sig3[,EU] < 0.05), cex.lab=1.5) + + par(fig=c(map.xpos, map.xpos + map.width, map.ypos, map.ypos + map.height), new=TRUE) + PlotEquiMap2(rescale(imp4[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=t(sig4[,EU] < 0.05), cex.lab=1.5) + + + # Sheet subtitles: + par(fig=c(map.xpos, map.xpos + map.width, 0.86, 0.90), new=TRUE) + if(n.map < 8) { + mtext(paste0(my.month.short[p], " --> ", my.month.short[forecast.month]), cex=5.5, font=2) + } else{ + mtext(paste0(rean.name," ", my.month.short[forecast.month]), cex=5.5, font=2) + } + + } # close if on composition == 'psl' + + # Visualize the composition if the impact maps for a selected forecasted month: + if(composition == "single.impact"){ + png(filename=paste0(rean.dir,"/",fields.name,"_",my.period[p],"_",var.name[var.num],"_impact_NAO+.png"),width=3000,height=3700) + PlotEquiMap2(rescale(imp1[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=t(sig1[,EU] < 0.05), cex.lab=1.5) + dev.off() + + png(filename=paste0(rean.dir,"/",fields.name,"_",my.period[p],"_",var.name[var.num],"_impact_NAO-.png"),width=3000,height=3700) + PlotEquiMap2(rescale(imp2[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=t(sig2[,EU] < 0.05), cex.lab=1.5) + dev.off() + + png(filename=paste0(rean.dir,"/",fields.name,"_",my.period[p],"_",var.name[var.num],"_impact_Blocking.png"),width=3000,height=3700) + PlotEquiMap2(rescale(imp3[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=t(sig3[,EU] < 0.05), cex.lab=1.5) + dev.off() + + png(filename=paste0(rean.dir,"/",fields.name,"_",my.period[p],"_",var.name[var.num],"_impact_Atl_Ridge.png"),width=3000,height=3700) + PlotEquiMap2(rescale(imp4[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=t(sig4[,EU] < 0.05), cex.lab=1.5) + dev.off() + } + + # Visualize the composition if the impact maps for a selected forecasted month: + if(composition == "single.psl"){ + png(filename=paste0(rean.dir,"/",fields.name,"_",my.period[p],"_",var.name[var.num],"_pattern_cluster1.png"),width=2000,height=1400, res=300) + PlotEquiMap2(rescale(map1,my.brks[1],tail(my.brks,1)), lon, lat,filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1, contours=t(map1), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=0.5, xlabel.dist=0, contours.lty="F1FF1F", toptitle=paste0(my.period[p]," WithinSS=", round(withinss1/1000000,2))) + dev.off() + + png(filename=paste0(rean.dir,"/",fields.name,"_",my.period[p],"_",var.name[var.num],"_pattern_cluster2.png"),width=2000,height=1400, res=300) + PlotEquiMap2(rescale(map2,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1, contours=t(map2), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=0.5, xlabel.dist=0, contours.lty="F1FF1F", toptitle=paste0(my.period[p]," WithinSS=", round(withinss2/1000000,2))) + dev.off() + + png(filename=paste0(rean.dir,"/",fields.name,"_",my.period[p],"_",var.name[var.num],"_pattern_cluster3.png"),width=2000,height=1400, res=300) + PlotEquiMap2(rescale(map3,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1, contours=t(map3), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=0.5, xlabel.dist=0, contours.lty="F1FF1F", toptitle=paste0(my.period[p]," WithinSS=", round(withinss3/1000000,2))) + dev.off() + + png(filename=paste0(rean.dir,"/",fields.name,"_",my.period[p],"_",var.name[var.num],"_pattern_cluster4.png"),width=2000,height=1400, res=300) + PlotEquiMap2(rescale(map4,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1, contours=t(map4), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=0.5, xlabel.dist=0, contours.lty="F1FF1F", toptitle=paste0(my.period[p]," WithinSS=", round(withinss4/1000000,2))) + dev.off() + + # Legend centroid maps: + #my.subset <- match(values.to.plot, my.brks) + #ColorBar3(my.brks, cols=my.cols, vert=FALSE, cex=legend1.cex, subset=my.subset) + #mtext(side=4," hPa", cex=legend1.cex) + + } + + # Visualize the composition with the interannual frequencies for a selected forecasted month: + if(composition == "fre"){ + # Centroid maps: + n.map <- n.map + 1 # n.maps starts from 0 + map.width <- 0.115 + map.xpos <- 0.06 + map.width * (n.map - 1) + map.ypos <- 0.08 + map.height <- 0.19 + map.ysep <- 0.015 + + par(fig=c(map.xpos, map.xpos + map.width, map.ypos + 3*map.height + 3*map.ysep, map.ypos + 4*map.height + 3*map.ysep), new=TRUE) + if(n.map < 8) barplot.freq.sim2(100*fre1.NA, 100*freMax1.NA, 100*freMin1.NA, 100*freObs1, year.start, year.end, cex.y=2, cex.x=2, freq.min=-2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0), col.line="gray20", cex.r=3, cex.mean=2, cex.obs=2) + if(n.map == 8) barplot.freq(100*fre1.NA, year.start, year.end, cex.y=2, cex.x=2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0)) + + par(fig=c(map.xpos, map.xpos + map.width, map.ypos + 2*map.height + 2*map.ysep, map.ypos + 3*map.height + 2*map.ysep), new=TRUE) + if(n.map < 8) if(fields.name == forecast.name) barplot.freq.sim2(100*fre2.NA, 100*freMax2.NA, 100*freMin2.NA, 100*freObs2, year.start, year.end, cex.y=2, cex.x=2, freq.min=-2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0), col.line="gray20", cex.r=3, cex.mean=2, cex.obs=2) + if(n.map == 8) barplot.freq(100*fre2.NA, year.start, year.end, cex.y=2, cex.x=2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0)) + + par(fig=c(map.xpos, map.xpos + map.width, map.ypos + map.height + map.ysep, map.ypos + 2*map.height + map.ysep), new=TRUE) + if(n.map < 8) if(fields.name == forecast.name) barplot.freq.sim2(100*fre3.NA, 100*freMax3.NA, 100*freMin3.NA, 100*freObs3, year.start, year.end, cex.y=2, cex.x=2, freq.min=-2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0), col.line="gray20", cex.r=3, cex.mean=2, cex.obs=2) + if(n.map == 8) barplot.freq(100*fre3.NA, year.start, year.end, cex.y=2, cex.x=2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0)) + + par(fig=c(map.xpos, map.xpos + map.width, map.ypos, map.ypos + map.height), new=TRUE) + if(n.map < 8) if(fields.name == forecast.name) barplot.freq.sim2(100*fre4.NA, 100*freMax4.NA, 100*freMin4.NA, 100*freObs4, year.start, year.end, cex.y=2, cex.x=2, freq.min=-2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0), col.line="gray20", cex.r=3, cex.mean=2, cex.obs=2) + if(n.map == 8) barplot.freq(100*fre4.NA, year.start, year.end, cex.y=2, cex.x=2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0)) + + # Sheet subtitles: + par(fig=c(map.xpos, map.xpos + map.width, 0.86, 0.90), new=TRUE) + if(n.map < 8) { + mtext(paste0(my.month.short[p], " --> ", my.month.short[forecast.month]), cex=5.5, font=2) + } else{ + mtext(paste0(rean.name," ", my.month.short[forecast.month]), cex=5.5, font=2) + } + + # % on Frequency plots: + par(fig=c(map.xpos - 0.006, map.xpos, map.ypos + 3.9*map.height + 3*map.ysep, map.ypos + 4*map.height + 3*map.ysep), new=TRUE) + mtext("%", cex=3.3) + par(fig=c(map.xpos - 0.006, map.xpos, map.ypos + 2.9*map.height + 2*map.ysep, map.ypos + 3*map.height + 2*map.ysep), new=TRUE) + mtext("%", cex=3.3) + par(fig=c(map.xpos - 0.006, map.xpos, map.ypos + 1.9*map.height + 1*map.ysep, map.ypos + 2*map.height + 1*map.ysep), new=TRUE) + mtext("%", cex=3.3) + par(fig=c(map.xpos - 0.006, map.xpos, map.ypos + 0.9*map.height + 0*map.ysep, map.ypos + 1*map.height + 0*map.ysep), new=TRUE) + mtext("%", cex=3.3) + + # mean frequency on Frequency plots: + par(fig=c(map.xpos + 0.02, map.xpos + 0.04, map.ypos + 3*map.height + 3*map.ysep, map.ypos + 3.1*map.height + 3*map.ysep), new=TRUE) + mtext(bquote(bar(nu) == .(paste0(round(mean(100*fre1.NA),1),"%"))),cex=3.5) + par(fig=c(map.xpos + 0.02, map.xpos + 0.04, map.ypos + 2*map.height + 2*map.ysep, map.ypos + 2.1*map.height + 2*map.ysep), new=TRUE) + mtext(bquote(bar(nu) == .(paste0(round(mean(100*fre2.NA),1),"%"))),cex=3.5) + par(fig=c(map.xpos + 0.02, map.xpos + 0.04, map.ypos + 1*map.height + 1*map.ysep, map.ypos + 1.1*map.height + 1*map.ysep), new=TRUE) + mtext(bquote(bar(nu) == .(paste0(round(mean(100*fre3.NA),1),"%"))),cex=3.5) + par(fig=c(map.xpos + 0.02, map.xpos + 0.04, map.ypos + 0*map.height + 0*map.ysep, map.ypos + 0.1*map.height + 0*map.ysep), new=TRUE) + mtext(bquote(bar(nu) == .(paste0(round(mean(100*fre4.NA),1),"%"))),cex=3.5) + + # add corr between obs.time series and ensemble mean time series on Frequency plots: + if(n.map < 8){ + par(fig=c(map.xpos + map.width - 0.04, map.xpos + map.width - 0.02, map.ypos + 3*map.height + 3*map.ysep, map.ypos + 3.1*map.height + 3*map.ysep), new=TRUE) + mtext(paste0("r= ",sp.cor1), cex=3.5) + par(fig=c(map.xpos + map.width - 0.04, map.xpos + map.width - 0.02, map.ypos + 2*map.height + 2*map.ysep, map.ypos + 2.1*map.height + 2*map.ysep), new=TRUE) + mtext(paste0("r= ",sp.cor2), cex=3.5) + par(fig=c(map.xpos + map.width - 0.04, map.xpos + map.width - 0.02, map.ypos + 1*map.height + 1*map.ysep, map.ypos + 1.1*map.height + 1*map.ysep), new=TRUE) + mtext(paste0("r= ",sp.cor3), cex=3.5) + par(fig=c(map.xpos + map.width - 0.04, map.xpos + map.width - 0.02, map.ypos + 0*map.height + 0*map.ysep, map.ypos + 0.1*map.height + 0*map.ysep), new=TRUE) + mtext(paste0("r= ",sp.cor4), cex=3.5) + } + } # close if on composition == 'fre' + + # save indicators for each startdate and forecast time: + # (map1, map2, map3, map4, fre1, fre2, etc. already refer to the regimes in the same order as listed in the 'orden' vector) + if(fields.name == forecast.name) { + if (composition != "impact") { + save(orden, map1, map2, map3, map4, fre1.NA, fre2.NA, fre3.NA, fre4.NA, freObs1, freObs2, freObs3, freObs4, sp.cor1, sp.cor2, sp.cor3, sp.cor4, persist1, persist2, persist3, persist4, persistObs1, persistObs2, persistObs3, persistObs4, spat.cor1, spat.cor2, spat.cor3, spat.cor4, sd.freq.sim1, sd.freq.sim2, sd.freq.sim3, sd.freq.sim4, sd.freq.obs1, sd.freq.obs2, sd.freq.obs3, sd.freq.obs4, transSim1, transSim2, transSim3, transSim4, transObs1, transObs2, transObs3, transObs4, file=paste0(work.dir,"/",fields.name,"_", my.period[p],"_leadmonth_",lead.month,"_","ClusterNames",".RData")) + } else { + save(orden, map1, map2, map3, map4, fre1.NA, fre2.NA, fre3.NA, fre4.NA, freObs1, freObs2, freObs3, freObs4, sp.cor1, sp.cor2, sp.cor3, sp.cor4, persist1, persist2, persist3, persist4, persistObs1, persistObs2, persistObs3, persistObs4, spat.cor1, spat.cor2, spat.cor3, spat.cor4, sd.freq.sim1, sd.freq.sim2, sd.freq.sim3, sd.freq.sim4, sd.freq.obs1, sd.freq.obs2, sd.freq.obs3, sd.freq.obs4, transSim1, transSim2, transSim3, transSim4, transObs1, transObs2, transObs3, transObs4, imp1, imp2, imp3, imp4, file=paste0(work.dir,"/",fields.name,"_", my.period[p],"_leadmonth_",lead.month,"_","ClusterNames",".RData")) + } + } + + print(paste("p =",p,"lead.month =", lead.month)) # spat.cor1,spat.cor2,spat.cor3,spat.cor4)) + } # close 'p' on 'period' + + if(as.pdf) dev.off() # for saving a single pdf with all months/seasons + + } # close 'lead.month' on 'lead.months' + + if(composition == "psl" || composition == "fre"){ + # Regime's names: + title1.xpos <- 0.01 + title1.width <- 0.04 + par(fig=c(title1.xpos, title1.xpos + title1.width, 0.7905, 0.7955), new=TRUE) + mtext(paste0(regime1.name), font=2, cex=5) + par(fig=c(title1.xpos, title1.xpos + title1.width, 0.5855, 0.5905), new=TRUE) + mtext(paste0(regime2.name), font=2, cex=5) + par(fig=c(title1.xpos, title1.xpos + title1.width, 0.3805, 0.3955), new=TRUE) + mtext(paste0(regime3.name), font=2, cex=5) + par(fig=c(title1.xpos, title1.xpos + title1.width, 0.1755, 0.1805), new=TRUE) + mtext(paste0(regime4.name), font=2, cex=5) + + if(composition == "psl") { + # Color legend: + legend1.xpos <- 0.10 + legend1.width <- 0.80 + legend1.cex <- 4 + + par(fig=c(legend1.xpos, legend1.xpos + legend1.width, 0, 0.065), new=TRUE) # syntax fig=c(xmin, xmax, ymin, ymax) + ColorBar2(brks=my.brks, cols=my.cols, vert=FALSE, cex=legend1.cex, label.dist=2, my.ticks=-0.5 + 1:21, my.labels=my.brks) + par(fig=c(legend1.xpos + legend1.width - 0.02, legend1.xpos + legend1.width + 0.05, 0, 0.02), new=TRUE) # syntax fig=c(xmin, xmax, ymin, ymax) + mtext("hPa", cex=legend1.cex*1.3) + } + + dev.off() + } # close if on composition + + print("Finished!") +} # close if on composition != "summary" + + +if(composition == "taylor"){ + library("plotrix") + + # choose a reference season from 13 (winter) to 16 (Autumn): + season.ref <- 13 + + # set a first WR classification: + rean.dir <- "/scratch/Earth/ncortesi/RESILIENCE/Regimes/41_ERA-Interim_monthly_1981-2015_LOESS_filter" + + # load data of the reference season of the first classification (this line should be modified to load the chosen season): + #load("/scratch/Earth/ncortesi/RESILIENCE/Regimes/18) as 12) but with no lat correction/ERA-Interim_Winter_psl.RData") # load pslwrXmean data + #load("/scratch/Earth/ncortesi/RESILIENCE/Regimes/18) as 12) but with no lat correction/ERA-Interim_Winter_ClusterNames.RData") # load clusterX.name.period + load("/scratch/Earth/ncortesi/RESILIENCE/Regimes/46_as_12_but_with_no_lat_correction_and_with_LOESS/ERA-Interim_Winter_psl.RData") # load pslwrXmean data + load("/scratch/Earth/ncortesi/RESILIENCE/Regimes/46_as_12_but_with_no_lat_correction_and_with_LOESS/ERA-Interim_Winter_ClusterNames.RData") # load clusterX.name.period + + if(var.num != var.num.orig) var.num <- var.num.orig # ripristinate original values of this script (necessary after loading regime data) + if(fields.name != fields.name.orig) fields.name <- fields.name.orig # ripristinate original values of this script + + cluster1.name.ref <- cluster1.name + cluster2.name.ref <- cluster2.name + cluster3.name.ref <- cluster3.name + cluster4.name.ref <- cluster4.name + + cluster1.ref <- which(orden == cluster1.name.ref) + cluster2.ref <- which(orden == cluster2.name.ref) + cluster3.ref <- which(orden == cluster3.name.ref) + cluster4.ref <- which(orden == cluster4.name.ref) + + #cluster1.ref <- cluster1.name.period[13] + #cluster2.ref <- cluster2.name.period[13] + #cluster3.ref <- cluster3.name.period[13] + #cluster4.ref <- cluster4.name.period[13] + + #cluster1.ref <- 3 + #cluster2.ref <- 4 + #cluster3.ref <- 1 + #cluster4.ref <- 2 + + assign(paste0("map.ref",cluster1.ref), pslwr1mean) # NAO+ + assign(paste0("map.ref",cluster2.ref), pslwr2mean) # NAO- + assign(paste0("map.ref",cluster3.ref), pslwr3mean) # Blocking + assign(paste0("map.ref",cluster4.ref), pslwr4mean) # Atl.ridge + + # set a second WR classification (i.e: with running cluster) to contrast with the new one: + #rean2.dir <- "/scratch/Earth/ncortesi/RESILIENCE/Regimes/41_ERA-Interim_monthly_1981-2015_LOESS_filter" + rean2.dir <- "/scratch/Earth/ncortesi/RESILIENCE/Regimes/44_as_43_but_with_no_lat_correction" + + # load data of the reference season of the second classification (this line should be modified to load the chosen season): + load("/scratch/Earth/ncortesi/RESILIENCE/Regimes/46_as_12_but_with_no_lat_correction_and_with_LOESS/ERA-Interim_Winter_psl.RData") # load pslwrXmean data + load("/scratch/Earth/ncortesi/RESILIENCE/Regimes/46_as_12_but_with_no_lat_correction_and_with_LOESS/ERA-Interim_Winter_ClusterNames.RData") # load clusterX.name.period + + if(var.num != var.num.orig) var.num <- var.num.orig # ripristinate original values of this script (necessary after loading regime data) + if(fields.name != fields.name.orig) fields.name <- fields.name.orig # ripristinate original values of this script + + cluster1.name.ref <- cluster1.name + cluster2.name.ref <- cluster2.name + cluster3.name.ref <- cluster3.name + cluster4.name.ref <- cluster4.name + + #cluster1.name.ref <- cluster1.name.period[season.ref] + #cluster2.name.ref <- cluster2.name.period[season.ref] + #cluster3.name.ref <- cluster3.name.period[season.ref] + #cluster4.name.ref <- cluster4.name.period[season.ref] + + cluster1.ref <- which(orden == cluster1.name.ref) + cluster2.ref <- which(orden == cluster2.name.ref) + cluster3.ref <- which(orden == cluster3.name.ref) + cluster4.ref <- which(orden == cluster4.name.ref) + + assign(paste0("map.old.ref",cluster1.ref), pslwr1mean) # NAO+ + assign(paste0("map.old.ref",cluster2.ref), pslwr2mean) # NAO- + assign(paste0("map.old.ref",cluster3.ref), pslwr3mean) # Blocking + assign(paste0("map.old.ref",cluster4.ref), pslwr4mean) # Atl.ridge + + + # breaks and colors of the geopotential fields: + if(psl == "g500"){ + my.brks <- c(-300,seq(-200,200,10),300) # % Mean anomaly of a WR + my.brks2 <- c(-300,seq(-200,200,10),300) # % Mean anomaly of a WR for the contour lines + values.to.plot <- c(-200, -100, 0, 100, 200) # values we want to show in the labels + } + + if(psl == "psl"){ + my.brks <- c(seq(-19,-1,2),0,seq(1,19,2)) # % Mean anomaly of a WR + my.brks2 <- my.brks #c(seq(-20,20,2)) # % Mean anomaly of a WR for the contour lines + values.to.plot <- my.brks #c(-17,-15,-13,-11,-9,-7,-5,-3,-1,0,1,3,5,7,9,11,13,15,17) + } + + my.cols <- colorRampPalette(rev(brewer.pal(11,"RdBu")))(length(my.brks)-1) # blue--white--red colors + my.cols[floor(length(my.cols)/2)] <- my.cols[floor(length(my.cols)/2)+1] <- "white" + + # plot the composition with the regime anomalies of each monthly regimes: + png(filename=paste0(rean.dir,"/",fields.name,"_taylor_source",".png"),width=6000,height=2000) + + plot.new() + + # Sheet title: + par(fig=c(0, 1, 0.94, 0.98), new=TRUE) + mtext(paste0(fields.name, " observed monthly regime anomalies"), cex=5.5, font=2) + + n.map <- 0 + for(p in c(9:12,1:8)){ + p.orig <- p + + load(file=paste0(rean.dir,"/",fields.name,"_",my.period[p],"_","psl",".RData")) # load cluster names and summarized data + load(file=paste0(rean.dir,"/",fields.name,"_",my.period[p],"_","ClusterNames",".RData")) # load cluster names and summarized data + + if(var.num != var.num.orig) var.num <- var.num.orig # ripristinate original values of this script (necessary after loading regime data) + if(fields.name != fields.name.orig) fields.name <- fields.name.orig # ripristinate original values of this script + if(p != p.orig) p <- p.orig + + cluster1 <- which(orden == cluster1.name) + cluster2 <- which(orden == cluster2.name) + cluster3 <- which(orden == cluster3.name) + cluster4 <- which(orden == cluster4.name) + + assign(paste0("map",cluster1), pslwr1mean) # NAO+ + assign(paste0("map",cluster2), pslwr2mean) # NAO- + assign(paste0("map",cluster3), pslwr3mean) # Blocking + assign(paste0("map",cluster4), pslwr4mean) # Atl.ridge + + n.map <- n.map + 1 # n.maps starts from 0 + map.width <- 0.07 + map.xpos <- 0.06 + map.width * (n.map - 1) + map.ypos <- 0.08 + map.height <- 0.19 + map.ysep <- 0.015 + + par(fig=c(map.xpos, map.xpos + map.width, map.ypos + 3*map.height + 3*map.ysep, map.ypos + 4*map.height + 3*map.ysep), new=TRUE) + PlotEquiMap2(rescale(map1,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map1), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, xlabel.dist=1.5, contours.lty="F1FF1F") + par(fig=c(map.xpos, map.xpos + map.width, map.ypos + 2*map.height + 2*map.ysep, map.ypos + 3*map.height + 2*map.ysep), new=TRUE) + PlotEquiMap2(rescale(map2,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map2), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F") + par(fig=c(map.xpos, map.xpos + map.width, map.ypos + map.height + map.ysep, map.ypos + 2*map.height + map.ysep), new=TRUE) + PlotEquiMap2(rescale(map3,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map3), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F") + par(fig=c(map.xpos, map.xpos + map.width, map.ypos, map.ypos + map.height), new=TRUE) + PlotEquiMap2(rescale(map4,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map4), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F") + + # Sheet subtitles: + par(fig=c(map.xpos, map.xpos + map.width, 0.86, 0.90), new=TRUE) + mtext(my.month.short[p], cex=5.5, font=2) + + } + + # draw the last map to the right of the composition, that with the seasonal anomalies: + n.map <- n.map + 1 # n.maps starts from 0 + map.width <- 0.07 + map.xpos <- 0.06 + map.width * (n.map - 1) + map.ypos <- 0.08 + map.height <- 0.19 + map.ysep <- 0.015 + + par(fig=c(map.xpos, map.xpos + map.width, map.ypos + 3*map.height + 3*map.ysep, map.ypos + 4*map.height + 3*map.ysep), new=TRUE) + PlotEquiMap2(rescale(map.ref1,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map.ref1), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, xlabel.dist=1.5, contours.lty="F1FF1F") + par(fig=c(map.xpos, map.xpos + map.width, map.ypos + 2*map.height + 2*map.ysep, map.ypos + 3*map.height + 2*map.ysep), new=TRUE) + PlotEquiMap2(rescale(map.ref2,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map.ref2), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F") + par(fig=c(map.xpos, map.xpos + map.width, map.ypos + map.height + map.ysep, map.ypos + 2*map.height + map.ysep), new=TRUE) + PlotEquiMap2(rescale(map.ref3,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map.ref3), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F") + par(fig=c(map.xpos, map.xpos + map.width, map.ypos, map.ypos + map.height), new=TRUE) + PlotEquiMap2(rescale(map.ref4,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map.ref4), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F") + + # subtitle: + par(fig=c(map.xpos, map.xpos + map.width, 0.86, 0.90), new=TRUE) + mtext(my.period[season.ref], cex=5.5, font=2) + + dev.off() + + # Plot the Taylor diagrams: + for(regime in 1:4){ + + png(filename=paste0(rean.dir,"/",fields.name,"_taylor_",orden[regime],".png"), width=1200, height=800) + + for(p in 1:12){ + p.orig <- p + + load(file=paste0(rean.dir,"/",fields.name,"_",my.period[p],"_","psl",".RData")) # load cluster names and summarized data + load(file=paste0(rean.dir,"/",fields.name,"_",my.period[p],"_","ClusterNames",".RData")) # load cluster names and summarized data + + if(var.num != var.num.orig) var.num <- var.num.orig # ripristinate original values of this script (necessary after loading regime data) + if(fields.name != fields.name.orig) fields.name <- fields.name.orig # ripristinate original values of this script + if(p != p.orig) p <- p.orig + + cluster1 <- which(orden == cluster1.name) + cluster2 <- which(orden == cluster2.name) + cluster3 <- which(orden == cluster3.name) + cluster4 <- which(orden == cluster4.name) + + assign(paste0("map",cluster1), pslwr1mean) # NAO+ + assign(paste0("map",cluster2), pslwr2mean) # NAO- + assign(paste0("map",cluster3), pslwr3mean) # Blocking + assign(paste0("map",cluster4), pslwr4mean) # Atl.ridge + + # load data from the second classification: + load(file=paste0(rean2.dir,"/",fields.name,"_",my.period[p],"_","psl",".RData")) # load cluster names and summarized data + load(file=paste0(rean2.dir,"/",fields.name,"_",my.period[p],"_","ClusterNames",".RData")) # load cluster names and summarized data + + if(var.num != var.num.orig) var.num <- var.num.orig # ripristinate original values of this script (necessary after loading regime data) + if(fields.name != fields.name.orig) fields.name <- fields.name.orig # ripristinate original values of this script + if(p != p.orig) p <- p.orig + + cluster1.old <- which(orden == cluster1.name) + cluster2.old <- which(orden == cluster2.name) + cluster3.old <- which(orden == cluster3.name) + cluster4.old <- which(orden == cluster4.name) + + assign(paste0("map.old",cluster1.old), pslwr1mean) # NAO+ + assign(paste0("map.old",cluster2.old), pslwr2mean) # NAO- + assign(paste0("map.old",cluster3.old), pslwr3mean) # Blocking + assign(paste0("map.old",cluster4.old), pslwr4mean) # Atl.ridge + + add.mod <- ifelse(p == 1, FALSE, TRUE) + main.mod <- ifelse(p == 1, orden[regime],"") + + color.first <- "red" + color.second <- "blue" + sd.arcs.mod <- c(0,0.1,0.2,0.3,0.4,0.5,0.6,0.7,0.8,0.9,1,1.1,1.2,1.3) #c(0,0.5,1,1.5) # doesn't work! + if(regime == 1) my.taylor(as.vector(map.ref1),as.vector(map1), normalize=TRUE, add=add.mod, pos.cor=FALSE, sd.arcs=sd.arcs.mod, ref.sd=F, cex.axis=1.7, text.cex=1, my.text=my.month.short[p], col = color.first, main=main.mod) + if(regime == 2) my.taylor(as.vector(map.ref2),as.vector(map2), normalize=TRUE, add=add.mod, pos.cor=FALSE, sd.arcs=sd.arcs.mod, ref.sd=F, cex.axis=1.7, text.cex=1, my.text=my.month.short[p], col = color.first, main=main.mod) + if(regime == 3) my.taylor(as.vector(map.ref3),as.vector(map3), normalize=TRUE, add=add.mod, pos.cor=FALSE, sd.arcs=sd.arcs.mod, ref.sd=F, cex.axis=1.7, text.cex=1, my.text=my.month.short[p], col = color.first, main=main.mod) + if(regime == 4) my.taylor(as.vector(map.ref4),as.vector(map4), normalize=TRUE, add=add.mod, pos.cor=FALSE, sd.arcs=sd.arcs.mod, ref.sd=F, cex.axis=1.7, text.cex=1, my.text=my.month.short[p], col = color.first, main=main.mod) + + # add the points from the second classification: + add.mod=TRUE + if(regime == 1) my.taylor(as.vector(map.old.ref1),as.vector(map.old1), normalize=TRUE, add=add.mod, pos.cor=FALSE, sd.arcs=sd.arcs.mod, ref.sd=F, cex.axis=1.7, text.cex=1, my.text=my.month.short[p], col = color.second) + if(regime == 2) my.taylor(as.vector(map.old.ref2),as.vector(map.old2), normalize=TRUE, add=add.mod, pos.cor=FALSE, sd.arcs=sd.arcs.mod, ref.sd=F, cex.axis=1.7, text.cex=1, my.text=my.month.short[p], col = color.second) + if(regime == 3) my.taylor(as.vector(map.old.ref3),as.vector(map.old3), normalize=TRUE, add=add.mod, pos.cor=FALSE, sd.arcs=sd.arcs.mod, ref.sd=F, cex.axis=1.7, text.cex=1, my.text=my.month.short[p], col = color.second) + if(regime == 4) my.taylor(as.vector(map.old.ref4),as.vector(map.old4), normalize=TRUE, add=add.mod, pos.cor=FALSE, sd.arcs=sd.arcs.mod, ref.sd=F, cex.axis=1.7, text.cex=1, my.text=my.month.short[p], col = color.second) + } # close for on 'p' + + dev.off() + + } # close for on 'regime' + + +} # close if on composition == "taylor" + + + + +# Summary graphs: +if(composition == "summary" && fields.name == forecast.name){ + # array storing correlations in the format: [startdate, leadmonth, regime] + array.diff.sd.freq <- array.sd.freq <- array.cor <- array.sp.cor <- array.freq <- array.diff.freq <- array.rpss <- array.pers <- array.diff.pers <- array(NA,c(12,7,4)) + array.sp.cor <- array.trans.sim1 <- array.trans.sim2 <- array.trans.sim3 <- array.trans.sim4 <- array(NA,c(12,7,4)) + array.trans.diff1 <- array.trans.diff2 <- array.trans.diff3 <- array.trans.diff4 <- array(NA,c(12,7,4)) + + for(p in 1:12){ + p.orig <- p + + for(l in 0:6){ + + load(file=paste0(work.dir,"/",fields.name,"_",my.period[p],"_leadmonth_",l,"_","ClusterNames",".RData")) # load cluster names and summarized data + + if(var.num != var.num.orig) var.num <- var.num.orig # ripristinate original values of this script (necessary after loading regime data) + if(fields.name != fields.name.orig) fields.name <- fields.name.orig # ripristinate original values of this script + if(p != p.orig) p <- p.orig + + array.sp.cor[p,1+l, 1] <- spat.cor1 # associates NAO+ to the first triangle (at left) + array.sp.cor[p,1+l, 3] <- spat.cor2 # associates NAO- to the third triangle (at right) + array.sp.cor[p,1+l, 4] <- spat.cor3 # associates Blocking to the fourth triangle (top one) + array.sp.cor[p,1+l, 2] <- spat.cor4 # associates Atl.Ridge to the second triangle (bottom one) + + array.cor[p,1+l, 1] <- sp.cor1 # associates NAO+ to the first triangle (at left) + array.cor[p,1+l, 3] <- sp.cor2 # associates NAO- to the third triangle (at right) + array.cor[p,1+l, 4] <- sp.cor3 # associates Blocking to the fourth triangle (top one) + array.cor[p,1+l, 2] <- sp.cor4 # associates Atl.Ridge to the second triangle (bottom one) + + array.freq[p,1+l, 1] <- 100*(mean(fre1.NA)) # NAO+ + array.freq[p,1+l, 3] <- 100*(mean(fre2.NA)) # NAO- + array.freq[p,1+l, 4] <- 100*(mean(fre3.NA)) # Blocking + array.freq[p,1+l, 2] <- 100*(mean(fre4.NA)) # Atl.Ridge + + array.diff.freq[p,1+l, 1] <- 100*(mean(fre1.NA) - mean(freObs1)) # NAO+ + array.diff.freq[p,1+l, 3] <- 100*(mean(fre2.NA) - mean(freObs2)) # NAO- + array.diff.freq[p,1+l, 4] <- 100*(mean(fre3.NA) - mean(freObs3)) # Blocking + array.diff.freq[p,1+l, 2] <- 100*(mean(fre4.NA) - mean(freObs4)) # Atl.Ridge + + array.pers[p,1+l, 1] <- persist1 # NAO+ + array.pers[p,1+l, 3] <- persist2 # NAO- + array.pers[p,1+l, 4] <- persist3 # Blocking + array.pers[p,1+l, 2] <- persist4 # Atl.Ridge + + array.diff.pers[p,1+l, 1] <- persist1 - persistObs1 # NAO+ + array.diff.pers[p,1+l, 3] <- persist2 - persistObs2 # NAO- + array.diff.pers[p,1+l, 4] <- persist3 - persistObs3 # Blocking + array.diff.pers[p,1+l, 2] <- persist4 - persistObs4 # Atl.Ridge + + array.pers[p,1+l, 1] <- persist1 # NAO+ + array.pers[p,1+l, 3] <- persist2 # NAO- + array.pers[p,1+l, 4] <- persist3 # Blocking + array.pers[p,1+l, 2] <- persist4 # Atl.Ridge + + array.sd.freq[p,1+l, 1] <- sd.freq.sim1 # NAO+ + array.sd.freq[p,1+l, 3] <- sd.freq.sim2 # NAO- + array.sd.freq[p,1+l, 4] <- sd.freq.sim3 # Blocking + array.sd.freq[p,1+l, 2] <- sd.freq.sim4 # Atl.Ridge + + array.diff.sd.freq[p,1+l, 1] <- sd.freq.sim1 / sd.freq.obs1 # NAO+ + array.diff.sd.freq[p,1+l, 3] <- sd.freq.sim2 / sd.freq.obs2 # NAO- + array.diff.sd.freq[p,1+l, 4] <- sd.freq.sim3 / sd.freq.obs3 # Blocking + array.diff.sd.freq[p,1+l, 2] <- sd.freq.sim4 / sd.freq.obs4 # Atl.Ridge + + array.trans.sim1[p,1+l, 1] <- NA # Transition from NAO+ to NAO+ + array.trans.sim1[p,1+l, 3] <- 100*transSim1[2] # Transition from NAO+ to NAO- + array.trans.sim1[p,1+l, 4] <- 100*transSim1[3] # Transition from NAO+ to blocking + array.trans.sim1[p,1+l, 2] <- 100*transSim1[4] # Transition from NAO+ to Atl.Ridge + + array.trans.sim2[p,1+l, 1] <- 100*transSim2[1] # Transition from NAO- to NAO+ + array.trans.sim2[p,1+l, 3] <- NA # Transition from NAO- to NAO- + array.trans.sim2[p,1+l, 4] <- 100*transSim2[3] # Transition from NAO- to blocking + array.trans.sim2[p,1+l, 2] <- 100*transSim2[4] # Transition from NAO- to Atl.Ridge + + array.trans.sim3[p,1+l, 1] <- 100*transSim3[1] # Transition from blocking to NAO+ + array.trans.sim3[p,1+l, 3] <- 100*transSim3[2] # Transition from blocking to NAO- + array.trans.sim3[p,1+l, 4] <- NA # Transition from blocking to blocking + array.trans.sim3[p,1+l, 2] <- 100*transSim3[4] # Transition from blocking to Atl.Ridge + + array.trans.sim4[p,1+l, 1] <- 100*transSim4[1] # Transition from Atl.ridge to NAO+ + array.trans.sim4[p,1+l, 3] <- 100*transSim4[2] # Transition from Atl.ridge to NAO- + array.trans.sim4[p,1+l, 4] <- 100*transSim4[3] # Transition from Atl.ridge to blocking + array.trans.sim4[p,1+l, 2] <- NA # Transition from Atl.ridge to Atl.Ridge + + array.trans.diff1[p,1+l, 1] <- NA # Transition from NAO+ to NAO+ + array.trans.diff1[p,1+l, 3] <- 100*(transSim1[2] - transObs1[2]) # Transition from NAO+ to NAO- + array.trans.diff1[p,1+l, 4] <- 100*(transSim1[3] - transObs1[3]) # Transition from NAO+ to blocking + array.trans.diff1[p,1+l, 2] <- 100*(transSim1[4] - transObs1[4]) # Transition from NAO+ to Atl.Ridge + + array.trans.diff2[p,1+l, 1] <- 100*(transSim2[1] - transObs2[1]) # Transition from NAO+ to NAO+ + array.trans.diff2[p,1+l, 3] <- NA + array.trans.diff2[p,1+l, 4] <- 100*(transSim2[3] - transObs2[3]) # Transition from NAO+ to blocking + array.trans.diff2[p,1+l, 2] <- 100*(transSim2[4] - transObs2[4]) # Transition from NAO+ to Atl.Ridge + + array.trans.diff3[p,1+l, 1] <- 100*(transSim3[1] - transObs3[1]) # Transition from NAO+ to NAO+ + array.trans.diff3[p,1+l, 3] <- 100*(transSim3[2] - transObs3[2]) # Transition from NAO+ to NAO- + array.trans.diff3[p,1+l, 4] <- NA + array.trans.diff3[p,1+l, 2] <- 100*(transSim3[4] - transObs3[4]) # Transition from NAO+ to Atl.Ridge + + array.trans.diff4[p,1+l, 1] <- 100*(transSim4[1] - transObs4[1]) # Transition from NAO+ to NAO+ + array.trans.diff4[p,1+l, 3] <- 100*(transSim4[2] - transObs4[2]) # Transition from NAO+ to NAO- + array.trans.diff4[p,1+l, 4] <- 100*(transSim4[3] - transObs4[3]) # Transition from NAO+ to blocking + array.trans.diff4[p,1+l, 2] <- NA + + #array.rpss[p,1+l, 1] <- # NAO+ + #array.rpss[p,1+l, 3] <- # NAO- + #array.rpss[p,1+l, 4] <- # Blocking + #array.rpss[p,1+l, 2] <- # Atl.Ridge + } + } + + # Spatial Corr summary: + png(filename=paste0(work.dir,"/",forecast.name,"_summary_sp_corr.png"), width=550, height=400) + + # create an array similar to array.cor but with colors instead of corr.values: + sp.corr.cols <- rev(c('#b2182b','#d6604d','#f4a582','#fddbc7','#d1e5f0','#92c5de','#4393c3','#2166ac')) + my.seq <- c(-1,0,0.4,0.5,0.6,0.7,0.8,0.9,1) + + array.sp.cor.colors <- array(sp.corr.cols[8],c(12,7,4)) + array.sp.cor.colors[array.sp.cor < my.seq[8]] <- sp.corr.cols[7] + array.sp.cor.colors[array.sp.cor < my.seq[7]] <- sp.corr.cols[6] + array.sp.cor.colors[array.sp.cor < my.seq[6]] <- sp.corr.cols[5] + array.sp.cor.colors[array.sp.cor < my.seq[5]] <- sp.corr.cols[4] + array.sp.cor.colors[array.sp.cor < my.seq[4]] <- sp.corr.cols[3] + array.sp.cor.colors[array.sp.cor < my.seq[3]] <- sp.corr.cols[2] + array.sp.cor.colors[array.sp.cor < my.seq[2]] <- sp.corr.cols[1] + + par(mar=c(5,4,4,4.5)) + plot(1,1, type="n", xaxt="n", yaxt="n", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + title("Spatial correlation between S4 and ERA-Interim regime anomalies", cex=1.5) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + + for(p in 1:12){ + for(l in 0:6){ + polygon(c(0.5+p-1, 0.5+p-1, 1+p-1), c(-0.5+l, 0.5+l, 0+l), col=array.sp.cor.colors[p,1+l,1]) # first triangle + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(-0.5+l, 0+l, -0.5+l), col=array.sp.cor.colors[p,1+l,2]) + polygon(c(1+p-1, 1.5+p-1, 1.5+p-1), c(l, 0.5+l, -0.5+l), col=array.sp.cor.colors[p,1+l,3]) + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(0.5+l, 0+l, 0.5+l), col=array.sp.cor.colors[p,1+l,4]) + } + } + + par(fig=c(0.875, 1, 0.14, 0.89), new=TRUE) + ColorBar2(brks = my.seq, cols = sp.corr.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + dev.off() + + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_sp_corr_4tables.png"), width=750, height=600) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext(text="Spatial correlation between S4 and ERA-Interim regime anomalies", cex=1.5) + + # NAO+ : + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.85, 0.98), new=TRUE) + mtext("NAO+", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.sp.cor.colors[p,1+l,1])}} + + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.42, 0.5), new=TRUE) + mtext("Blocking", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.sp.cor.colors[p,1+l,4])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.85, 0.98), new=TRUE) + mtext("NAO-", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.sp.cor.colors[p,1+l,3])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.42, 0.5), new=TRUE) + mtext("Atlantic Ridge", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.sp.cor.colors[p,1+l,2])}} + + par(fig=c(0.91, 1, 0.1, 0.9), new=TRUE) + ColorBar2(brks = my.seq, cols = sp.corr.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_sp_corr_1table.png"), width=800, height=300) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext(text="Spatial correlation between S4 and ERA-Interim regime anomalies", cex=1.2) + + par(mar=c(0,0,4,0), fig=c(0.07, 0.22, 0.8, 0.98), new=TRUE) + mtext("NAO+", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0, 0.22, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecast month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.sp.cor.colors[i2,1+6-j,1])}} + + par(mar=c(0,0,4,0), fig=c(0.22, 0.44, 0.8, 0.98), new=TRUE) + mtext("NAO-", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.22, 0.44, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecasted month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.sp.cor.colors[i2,1+6-j,3])}} + + par(mar=c(0,0,4,0), fig=c(0.44, 0.66, 0.8, 0.98), new=TRUE) + mtext("Blocking", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.44, 0.66, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecasted month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.sp.cor.colors[i2,1+6-j,4])}} + + par(mar=c(0,0,4,0), fig=c(0.68, 0.88, 0.8, 0.98), new=TRUE) + mtext("Atlantic Ridge", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.66, 0.88, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecasted month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.sp.cor.colors[i2,1+6-j,2])}} + + par(fig=c(0.91, 1, 0.1, 0.8), new=TRUE) + ColorBar2(brks = my.seq, cols = sp.corr.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + + + + + + + + # Corr summary: + png(filename=paste0(work.dir,"/",forecast.name,"_summary_corr.png"), width=550, height=400) + + # create an array similar to array.cor but with colors instead of corr.values: + corr.cols <- rev(c('#b2182b','#d6604d','#f4a582','#fddbc7','#d1e5f0','#92c5de','#4393c3','#2166ac')) + + array.cor.colors <- array(corr.cols[8],c(12,7,4)) + array.cor.colors[array.cor < 0.75] <- corr.cols[7] + array.cor.colors[array.cor < 0.5] <- corr.cols[6] + array.cor.colors[array.cor < 0.25] <- corr.cols[5] + array.cor.colors[array.cor < 0] <- corr.cols[4] + array.cor.colors[array.cor < -0.25] <- corr.cols[3] + array.cor.colors[array.cor < -0.5] <- corr.cols[2] + array.cor.colors[array.cor < -0.75] <- corr.cols[1] + + par(mar=c(5,4,4,4.5)) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Forecast time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + title("Correlation between S4 and ERA-Interim frequencies", cex=1.5) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + + for(p in 1:12){ + for(l in 0:6){ + polygon(c(0.5+p-1,0.5+p-1,1+p-1),c(-0.5+l,0.5+l,0+l), col=array.cor.colors[p,1+l,1]) # first triangle + polygon(c(0.5+p-1,1+p-1,1.5+p-1),c(-0.5+l,0+l,-0.5+l), col=array.cor.colors[p,1+l,2]) + polygon(c(1+p-1,1.5+p-1,1.5+p-1),c(l,0.5+l,-0.5+l), col=array.cor.colors[p,1+l,3]) + polygon(c(0.5+p-1,1+p-1,1.5+p-1),c(0.5+l,0+l,0.5+l), col=array.cor.colors[p,1+l,4]) + } + } + + par(fig=c(0.875, 1, 0.14, 0.89), new=TRUE) + ColorBar2(brks = seq(-1,1,0.25), cols = corr.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(seq(-1,1,0.25)), my.labels=seq(-1,1,0.25)) + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_corr_4tables.png"), width=750, height=600) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext(text="Correlation between S4 and ERA-Interim frequencies", cex=1.5) + + # NAO+ : + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.85, 0.98), new=TRUE) + mtext("NAO+", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.cor.colors[p,1+l,1])}} + + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.42, 0.5), new=TRUE) + mtext("Blocking", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.cor.colors[p,1+l,4])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.85, 0.98), new=TRUE) + mtext("NAO-", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.cor.colors[p,1+l,3])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.42, 0.5), new=TRUE) + mtext("Atlantic Ridge", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.cor.colors[p,1+l,2])}} + + par(fig=c(0.91, 1, 0.1, 0.9), new=TRUE) + ColorBar2(brks = my.seq, cols = corr.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_corr_1table.png"), width=800, height=300) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext(text="Correlation between S4 and ERA-Interim frequencies", cex=1.2) + + par(mar=c(0,0,4,0), fig=c(0.07, 0.22, 0.8, 0.98), new=TRUE) + mtext("NAO+", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0, 0.22, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecast month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.cor.colors[i2,1+6-j,1])}} + + par(mar=c(0,0,4,0), fig=c(0.22, 0.44, 0.8, 0.98), new=TRUE) + mtext("NAO-", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.22, 0.44, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecasted month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.cor.colors[i2,1+6-j,3])}} + + par(mar=c(0,0,4,0), fig=c(0.44, 0.66, 0.8, 0.98), new=TRUE) + mtext("Blocking", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.44, 0.66, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecasted month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.cor.colors[i2,1+6-j,4])}} + + par(mar=c(0,0,4,0), fig=c(0.68, 0.88, 0.8, 0.98), new=TRUE) + mtext("Atlantic Ridge", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.66, 0.88, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecasted month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.cor.colors[i2,1+6-j,2])}} + + par(fig=c(0.91, 1, 0.1, 0.8), new=TRUE) + ColorBar2(brks = my.seq, cols = corr.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + + + + + + # Freq. summary: + png(filename=paste0(work.dir,"/",forecast.name,"_summary_freq.png"), width=550, height=400) + + # create an array similar to array.diff.freq but with colors instead of frequencies: + freq.cols <- rev(c('#d73027','#f46d43','#fdae61','#fee090','#e0f3f8','#abd9e9','#74add1','#4575b4')) + my.seq <- c(NA,20,22,24,26,28,30,32,NA) + + array.freq.colors <- array(freq.cols[8],c(12,7,4)) + array.freq.colors[array.freq < my.seq[[8]]] <- freq.cols[7] + array.freq.colors[array.freq < my.seq[[7]]] <- freq.cols[6] + array.freq.colors[array.freq < my.seq[[6]]] <- freq.cols[5] + array.freq.colors[array.freq < my.seq[[5]]] <- freq.cols[4] + array.freq.colors[array.freq < my.seq[[4]]] <- freq.cols[3] + array.freq.colors[array.freq < my.seq[[3]]] <- freq.cols[2] + array.freq.colors[array.freq < my.seq[[2]]] <- freq.cols[1] + + par(mar=c(5,4,4,4.5)) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Forecast time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + title("Mean S4 frequency (%)", cex=1.5) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + + for(p in 1:12){ + for(l in 0:6){ + polygon(c(0.5+p-1, 0.5+p-1, 1+p-1), c(-0.5+l, 0.5+l, 0+l), col=array.freq.colors[p,1+l,1]) # first triangle + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(-0.5+l, 0+l, -0.5+l), col=array.freq.colors[p,1+l,2]) + polygon(c(1+p-1, 1.5+p-1, 1.5+p-1), c(l, 0.5+l, -0.5+l), col=array.freq.colors[p,1+l,3]) + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(0.5+l, 0+l, 0.5+l), col=array.freq.colors[p,1+l,4]) + } + } + + par(fig=c(0.875, 1, 0.14, 0.89), new=TRUE) + ColorBar2(brks = my.seq, cols = freq.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_freq_4tables.png"), width=750, height=600) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext(text="Mean S4 frequency (%)", cex=1.5) + + # NAO+ : + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.85, 0.98), new=TRUE) + mtext("NAO+", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.freq.colors[p,1+l,1])}} + + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.42, 0.5), new=TRUE) + mtext("Blocking", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.freq.colors[p,1+l,4])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.85, 0.98), new=TRUE) + mtext("NAO-", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.freq.colors[p,1+l,3])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.42, 0.5), new=TRUE) + mtext("Atlantic Ridge", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.freq.colors[p,1+l,2])}} + + par(fig=c(0.91, 1, 0.1, 0.9), new=TRUE) + ColorBar2(brks = my.seq, cols = freq.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_freq_1table.png"), width=800, height=300) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext(text="Mean S4 frequency (%)", cex=1.2) + + par(mar=c(0,0,4,0), fig=c(0.07, 0.22, 0.8, 0.98), new=TRUE) + mtext("NAO+", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0, 0.22, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecast month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.freq.colors[i2,1+6-j,1])}} + + par(mar=c(0,0,4,0), fig=c(0.22, 0.44, 0.8, 0.98), new=TRUE) + mtext("NAO-", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.22, 0.44, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecasted month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.freq.colors[i2,1+6-j,3])}} + + par(mar=c(0,0,4,0), fig=c(0.44, 0.66, 0.8, 0.98), new=TRUE) + mtext("Blocking", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.44, 0.66, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecasted month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.freq.colors[i2,1+6-j,4])}} + + par(mar=c(0,0,4,0), fig=c(0.68, 0.88, 0.8, 0.98), new=TRUE) + mtext("Atlantic Ridge", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.66, 0.88, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecasted month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.freq.colors[i2,1+6-j,2])}} + + par(fig=c(0.91, 1, 0.1, 0.8), new=TRUE) + ColorBar2(brks = my.seq, cols = freq.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + + + + + + + # Delta freq. summary: + png(filename=paste0(work.dir,"/",forecast.name,"_summary_diff_freq.png"), width=550, height=400) + + # create an array similar to array.diff.freq but with colors instead of frequencies: + diff.freq.cols <- rev(c('#d73027','#f46d43','#fdae61','#fee090','#e0f3f8','#abd9e9','#74add1','#4575b4')) + my.seq <- c(-20,-10,-5,-2,0,2,5,10,20) + + array.diff.freq.colors <- array(diff.freq.cols[8],c(12,7,4)) + array.diff.freq.colors[array.diff.freq < my.seq[[8]]] <- diff.freq.cols[7] + array.diff.freq.colors[array.diff.freq 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.diff.freq.colors[i2,1+6-j,1])}} + + par(mar=c(0,0,4,0), fig=c(0.22, 0.44, 0.8, 0.98), new=TRUE) + mtext("NAO-", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.22, 0.44, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecasted month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.diff.freq.colors[i2,1+6-j,3])}} + + par(mar=c(0,0,4,0), fig=c(0.44, 0.66, 0.8, 0.98), new=TRUE) + mtext("Blocking", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.44, 0.66, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecasted month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.diff.freq.colors[i2,1+6-j,4])}} + + par(mar=c(0,0,4,0), fig=c(0.68, 0.88, 0.8, 0.98), new=TRUE) + mtext("Atlantic Ridge", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.66, 0.88, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecasted month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.diff.freq.colors[i2,1+6-j,2])}} + + par(fig=c(0.91, 1, 0.1, 0.8), new=TRUE) + ColorBar2(brks = my.seq, cols = diff.freq.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + + + + + + + + # Persistence summary: + png(filename=paste0(work.dir,"/",forecast.name,"_summary_pers.png"), width=550, height=400) + + # create an array similar to array.pers but with colors instead of frequencies: + pers.cols <- rev(c('#b2182b','#d6604d','#f4a582','#fddbc7','#d1e5f0','#92c5de','#4393c3','#2166ac')) + my.seq <- c(NA, 3.25, 3.5, 3.75, 4, 4.25, 4.5, 4.75, NA) + + array.pers.colors <- array(pers.cols[8],c(12,7,4)) + array.pers.colors[array.pers < my.seq[8]] <- pers.cols[7] + array.pers.colors[array.pers < my.seq[7]] <- pers.cols[6] + array.pers.colors[array.pers < my.seq[6]] <- pers.cols[5] + array.pers.colors[array.pers < my.seq[5]] <- pers.cols[4] + array.pers.colors[array.pers < my.seq[4]] <- pers.cols[3] + array.pers.colors[array.pers < my.seq[3]] <- pers.cols[2] + array.pers.colors[array.pers < my.seq[2]] <- pers.cols[1] + + par(mar=c(5,4,4,4.5)) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Forecast time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + title("S4 Regime persistence (in days/month)", cex=1.5) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + + for(p in 1:12){ + for(l in 0:6){ + polygon(c(0.5+p-1,0.5+p-1,1+p-1),c(-0.5+l,0.5+l,0+l), col=array.pers.colors[p,1+l,1]) # first triangle + polygon(c(0.5+p-1,1+p-1,1.5+p-1),c(-0.5+l,0+l,-0.5+l), col=array.pers.colors[p,1+l,2]) + polygon(c(1+p-1,1.5+p-1,1.5+p-1),c(l,0.5+l,-0.5+l), col=array.pers.colors[p,1+l,3]) + polygon(c(0.5+p-1,1+p-1,1.5+p-1),c(0.5+l,0+l,0.5+l), col=array.pers.colors[p,1+l,4]) + } + } + + par(fig=c(0.875, 1, 0.14, 0.89), new=TRUE) + ColorBar2(brks = my.seq, cols = pers.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_pers_4tables.png"), width=750, height=600) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("S4 Regime persistence (in days/month)", cex=1.5) + + # NAO+ : + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.85, 0.98), new=TRUE) + mtext("NAO+", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.pers.colors[p,1+l,1])}} + + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.42, 0.5), new=TRUE) + mtext("Blocking", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.pers.colors[p,1+l,4])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.85, 0.98), new=TRUE) + mtext("NAO-", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.pers.colors[p,1+l,3])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.42, 0.5), new=TRUE) + mtext("Atlantic Ridge", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.pers.colors[p,1+l,2])}} + + par(fig=c(0.91, 1, 0.1, 0.9), new=TRUE) + ColorBar2(brks = my.seq, cols = pers.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_pers_4tables.png"), width=750, height=600) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("S4 Regime persistence (in days/month)", cex=1.5) + + # NAO+ : + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.85, 0.98), new=TRUE) + mtext("NAO+", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.pers.colors[p,1+l,1])}} + + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.42, 0.5), new=TRUE) + mtext("Blocking", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.pers.colors[p,1+l,4])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.85, 0.98), new=TRUE) + mtext("NAO-", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.pers.colors[p,1+l,3])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.42, 0.5), new=TRUE) + mtext("Atlantic Ridge", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.pers.colors[p,1+l,2])}} + + par(fig=c(0.91, 1, 0.1, 0.9), new=TRUE) + ColorBar2(brks = my.seq, cols = pers.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_pers_1table.png"), width=800, height=300) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("S4 Regime persistence (in days/month)", cex=1.2) + + par(mar=c(0,0,4,0), fig=c(0.07, 0.22, 0.8, 0.98), new=TRUE) + mtext("NAO+", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0, 0.22, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecast month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.pers.colors[i2,1+6-j,1])}} + + par(mar=c(0,0,4,0), fig=c(0.22, 0.44, 0.8, 0.98), new=TRUE) + mtext("NAO-", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.22, 0.44, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecasted month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.pers.colors[i2,1+6-j,3])}} + + par(mar=c(0,0,4,0), fig=c(0.44, 0.66, 0.8, 0.98), new=TRUE) + mtext("Blocking", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.44, 0.66, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecasted month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.pers.colors[i2,1+6-j,4])}} + + par(mar=c(0,0,4,0), fig=c(0.68, 0.88, 0.8, 0.98), new=TRUE) + mtext("Atlantic Ridge", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.66, 0.88, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecasted month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.pers.colors[i2,1+6-j,2])}} + + par(fig=c(0.91, 1, 0.1, 0.8), new=TRUE) + ColorBar2(brks = my.seq, cols = pers.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + + + + + + + + # Delta persistence summary: + png(filename=paste0(work.dir,"/",forecast.name,"_summary_diff_pers.png"), width=550, height=400) + + # create an array similar to array.pers but with colors instead of frequencies: + diff.pers.cols <- rev(c('#b2182b','#d6604d','#f4a582','#fddbc7','#d1e5f0','#92c5de','#4393c3','#2166ac')) + my.seq <- c(NA, -1.5, -1, -0.5, 0, 0.5, 1, 1.5, NA) + + array.diff.pers.colors <- array(diff.pers.cols[8],c(12,7,4)) + array.diff.pers.colors[array.diff.pers < my.seq[8]] <- diff.pers.cols[7] + array.diff.pers.colors[array.diff.pers < my.seq[7]] <- diff.pers.cols[6] + array.diff.pers.colors[array.diff.pers < my.seq[6]] <- diff.pers.cols[5] + array.diff.pers.colors[array.diff.pers < my.seq[5]] <- diff.pers.cols[4] + array.diff.pers.colors[array.diff.pers < my.seq[4]] <- diff.pers.cols[3] + array.diff.pers.colors[array.diff.pers < my.seq[3]] <- diff.pers.cols[2] + array.diff.pers.colors[array.diff.pers < my.seq[2]] <- diff.pers.cols[1] + + par(mar=c(5,4,4,4.5)) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Forecast time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + title("Diff. between S4 and ERA-Interim regime persistence (in days/month)", cex=1.5) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + + for(p in 1:12){ + for(l in 0:6){ + polygon(c(0.5+p-1,0.5+p-1,1+p-1),c(-0.5+l,0.5+l,0+l), col=array.diff.pers.colors[p,1+l,1]) # first triangle + polygon(c(0.5+p-1,1+p-1,1.5+p-1),c(-0.5+l,0+l,-0.5+l), col=array.diff.pers.colors[p,1+l,2]) + polygon(c(1+p-1,1.5+p-1,1.5+p-1),c(l,0.5+l,-0.5+l), col=array.diff.pers.colors[p,1+l,3]) + polygon(c(0.5+p-1,1+p-1,1.5+p-1),c(0.5+l,0+l,0.5+l), col=array.diff.pers.colors[p,1+l,4]) + } + } + + par(fig=c(0.875, 1, 0.14, 0.89), new=TRUE) + ColorBar2(brks = my.seq, cols = diff.pers.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_diff_pers_4tables.png"), width=750, height=600) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("Diff. between S4 and ERA-Interim regime persistence (in days/month)", cex=1.5) + + # NAO+ : + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.85, 0.98), new=TRUE) + mtext("NAO+", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.diff.pers.colors[p,1+l,1])}} + + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.42, 0.5), new=TRUE) + mtext("Blocking", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.diff.pers.colors[p,1+l,4])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.85, 0.98), new=TRUE) + mtext("NAO-", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.diff.pers.colors[p,1+l,3])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.42, 0.5), new=TRUE) + mtext("Atlantic Ridge", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.diff.pers.colors[p,1+l,2])}} + + par(fig=c(0.91, 1, 0.1, 0.9), new=TRUE) + ColorBar2(brks = my.seq, cols = diff.pers.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_diff_pers_1table.png"), width=800, height=300) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("Diff. between S4 and ERA-Interim regime persistence (in days/month)", cex=1.2) + + par(mar=c(0,0,4,0), fig=c(0.07, 0.22, 0.8, 0.98), new=TRUE) + mtext("NAO+", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0, 0.22, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecast month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.diff.pers.colors[i2,1+6-j,1])}} + + par(mar=c(0,0,4,0), fig=c(0.22, 0.44, 0.8, 0.98), new=TRUE) + mtext("NAO-", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.22, 0.44, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecasted month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.diff.pers.colors[i2,1+6-j,3])}} + + par(mar=c(0,0,4,0), fig=c(0.44, 0.66, 0.8, 0.98), new=TRUE) + mtext("Blocking", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.44, 0.66, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecasted month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.diff.pers.colors[i2,1+6-j,4])}} + + par(mar=c(0,0,4,0), fig=c(0.68, 0.88, 0.8, 0.98), new=TRUE) + mtext("Atlantic Ridge", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.66, 0.88, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecasted month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.diff.pers.colors[i2,1+6-j,2])}} + + par(fig=c(0.91, 1, 0.1, 0.8), new=TRUE) + ColorBar2(brks = my.seq, cols = diff.pers.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + + + + + + + + + # St.Dev.freq ratio summary: + png(filename=paste0(work.dir,"/",forecast.name,"_summary_ratio_sd_freq.png"), width=550, height=400) + + # create an array similar to array.cor but with colors instead of corr.values: + diff.sd.freq.cols <- rev(c('#b2182b','#d6604d','#f4a582','#fddbc7','#d1e5f0','#92c5de','#4393c3','#2166ac')) + my.seq <- c(0,0.7,0.8,0.9,1.0,1.1,1.2,1.3,2) + + array.diff.sd.freq.colors <- array(diff.sd.freq.cols[8],c(12,7,4)) + array.diff.sd.freq.colors[array.diff.sd.freq < my.seq[8]] <- diff.sd.freq.cols[7] + array.diff.sd.freq.colors[array.diff.sd.freq < my.seq[7]] <- diff.sd.freq.cols[6] + array.diff.sd.freq.colors[array.diff.sd.freq < my.seq[6]] <- diff.sd.freq.cols[5] + array.diff.sd.freq.colors[array.diff.sd.freq < my.seq[5]] <- diff.sd.freq.cols[4] + array.diff.sd.freq.colors[array.diff.sd.freq < my.seq[4]] <- diff.sd.freq.cols[3] + array.diff.sd.freq.colors[array.diff.sd.freq < my.seq[3]] <- diff.sd.freq.cols[2] + array.diff.sd.freq.colors[array.diff.sd.freq < my.seq[2]] <- diff.sd.freq.cols[1] + + par(mar=c(5,4,4,4.5)) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Forecast time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + title("St.Dev. ratio between sim. and obs. average frequencies", cex=1.5) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + + for(p in 1:12){ + for(l in 0:6){ + polygon(c(0.5+p-1, 0.5+p-1, 1+p-1), c(-0.5+l, 0.5+l, 0+l), col=array.diff.sd.freq.colors[p,1+l,1]) # first triangle + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(-0.5+l, 0+l, -0.5+l), col=array.diff.sd.freq.colors[p,1+l,2]) + polygon(c(1+p-1, 1.5+p-1, 1.5+p-1), c(l, 0.5+l, -0.5+l), col=array.diff.sd.freq.colors[p,1+l,3]) + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(0.5+l, 0+l, 0.5+l), col=array.diff.sd.freq.colors[p,1+l,4]) + } + } + + par(fig=c(0.875, 1, 0.14, 0.89), new=TRUE) + ColorBar2(brks = my.seq, cols = diff.sd.freq.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + dev.off() + + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_ratio_sd_freq_4tables.png"), width=750, height=600) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("St.Dev. ratio between sim. and obs. average frequencies", cex=1.5) + + # NAO+ : + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.85, 0.98), new=TRUE) + mtext("NAO+", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.diff.sd.freq.colors[p,1+l,1])}} + + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.42, 0.5), new=TRUE) + mtext("Blocking", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.diff.sd.freq.colors[p,1+l,4])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.85, 0.98), new=TRUE) + mtext("NAO-", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.diff.sd.freq.colors[p,1+l,3])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.42, 0.5), new=TRUE) + mtext("Atlantic Ridge", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.diff.sd.freq.colors[p,1+l,2])}} + + par(fig=c(0.91, 1, 0.1, 0.9), new=TRUE) + ColorBar2(brks = my.seq, cols = diff.sd.freq.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_ratio_sd_freq_1table.png"), width=800, height=300) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("St.Dev. ratio between sim. and obs. average frequencies", cex=1.2) + + par(mar=c(0,0,4,0), fig=c(0.07, 0.22, 0.8, 0.98), new=TRUE) + mtext("NAO+", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0, 0.22, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecast month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.diff.sd.freq.colors[i2,1+6-j,1])}} + + par(mar=c(0,0,4,0), fig=c(0.22, 0.44, 0.8, 0.98), new=TRUE) + mtext("NAO-", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.22, 0.44, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecasted month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.diff.sd.freq.colors[i2,1+6-j,3])}} + + par(mar=c(0,0,4,0), fig=c(0.44, 0.66, 0.8, 0.98), new=TRUE) + mtext("Blocking", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.44, 0.66, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecasted month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.diff.sd.freq.colors[i2,1+6-j,4])}} + + par(mar=c(0,0,4,0), fig=c(0.68, 0.88, 0.8, 0.98), new=TRUE) + mtext("Atlantic Ridge", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.66, 0.88, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecasted month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.diff.sd.freq.colors[i2,1+6-j,2])}} + + par(fig=c(0.91, 1, 0.1, 0.8), new=TRUE) + ColorBar2(brks = my.seq, cols = diff.sd.freq.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + + + + + + + + + + + # Transition probability from NAO+ to X summary: + png(filename=paste0(work.dir,"/",forecast.name,"_summary_trans_NAO+.png"), width=550, height=400) + + # create an array similar to array.cor but with colors instead of corr.values: + trans.cols <- rev(c('#b2182b','#d6604d','#f4a582','#fddbc7','#d1e5f0','#92c5de','#4393c3','#2166ac')) + my.seq <- c(0,10,20,30,40,50,60,70,100) + + array.trans.sim1.colors <- array(trans.cols[8],c(12,7,4)) + array.trans.sim1.colors[array.trans.sim1 < my.seq[8]] <- trans.cols[7] + array.trans.sim1.colors[array.trans.sim1 < my.seq[7]] <- trans.cols[6] + array.trans.sim1.colors[array.trans.sim1 < my.seq[6]] <- trans.cols[5] + array.trans.sim1.colors[array.trans.sim1 < my.seq[5]] <- trans.cols[4] + array.trans.sim1.colors[array.trans.sim1 < my.seq[4]] <- trans.cols[3] + array.trans.sim1.colors[array.trans.sim1 < my.seq[3]] <- trans.cols[2] + array.trans.sim1.colors[array.trans.sim1 < my.seq[2]] <- trans.cols[1] + array.trans.sim1.colors[is.na(array.trans.sim1)] <- "white" + + par(mar=c(5,4,4,4.5)) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Forecast time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + title("% Transition from NAO+ regime", cex=1.5) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + + for(p in 1:12){ + for(l in 0:6){ + polygon(c(0.5+p-1, 0.5+p-1, 1+p-1), c(-0.5+l, 0.5+l, 0+l), col=array.trans.sim1.colors[p,1+l,1]) # first triangle + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(-0.5+l, 0+l, -0.5+l), col=array.trans.sim1.colors[p,1+l,2]) + polygon(c(1+p-1, 1.5+p-1, 1.5+p-1), c(l, 0.5+l, -0.5+l), col=array.trans.sim1.colors[p,1+l,3]) + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(0.5+l, 0+l, 0.5+l), col=array.trans.sim1.colors[p,1+l,4]) + } + } + + par(fig=c(0.875, 1, 0.14, 0.89), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_trans_NAO+_4tables.png"), width=750, height=600) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("% Transition from NAO+ regime", cex=1.5) + + # NAO+ : + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.85, 0.98), new=TRUE) + mtext("NAO+", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.sim1.colors[p,1+l,1])}} + + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.42, 0.5), new=TRUE) + mtext("Blocking", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.sim1.colors[p,1+l,4])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.85, 0.98), new=TRUE) + mtext("NAO-", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.sim1.colors[p,1+l,3])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.42, 0.5), new=TRUE) + mtext("Atlantic Ridge", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.sim1.colors[p,1+l,2])}} + + par(fig=c(0.91, 1, 0.1, 0.9), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_trans_NAO+_1table.png"), width=800, height=300) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("% Transition from NAO+ regime", cex=1.2) + + par(mar=c(0,0,4,0), fig=c(0.07, 0.22, 0.8, 0.98), new=TRUE) + mtext("NAO+", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0, 0.22, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecast month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.sim1.colors[i2,1+6-j,1])}} + + par(mar=c(0,0,4,0), fig=c(0.22, 0.44, 0.8, 0.98), new=TRUE) + mtext("NAO-", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.22, 0.44, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecasted month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.sim1.colors[i2,1+6-j,3])}} + + par(mar=c(0,0,4,0), fig=c(0.44, 0.66, 0.8, 0.98), new=TRUE) + mtext("Blocking", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.44, 0.66, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecasted month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.sim1.colors[i2,1+6-j,4])}} + + par(mar=c(0,0,4,0), fig=c(0.68, 0.88, 0.8, 0.98), new=TRUE) + mtext("Atlantic Ridge", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.66, 0.88, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecasted month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.sim1.colors[i2,1+6-j,2])}} + + par(fig=c(0.91, 1, 0.1, 0.8), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_trans_NAO+_1table.png"), width=600, height=300) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("% Transition from NAO+ regime", cex=1.2) + + par(mar=c(0,0,4,0), fig=c(0.10, 0.28, 0.8, 0.98), new=TRUE) + mtext("NAO-", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0, 0.28, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecasted month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.sim1.colors[i2,1+6-j,3])}} + + par(mar=c(0,0,4,0), fig=c(0.28, 0.56, 0.8, 0.98), new=TRUE) + mtext("Blocking", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.28, 0.56, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecasted month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.sim1.colors[i2,1+6-j,4])}} + + par(mar=c(0,0,4,0), fig=c(0.56, 0.84, 0.8, 0.98), new=TRUE) + mtext("Atlantic Ridge", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.56, 0.84, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecasted month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.sim1.colors[i2,1+6-j,2])}} + + par(fig=c(0.88, 1, 0.1, 0.8), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + + + + + + + + # Transition probability from NAO- to X summary: + png(filename=paste0(work.dir,"/",forecast.name,"_summary_trans_NAO-.png"), width=550, height=400) + + # create an array similar to array.cor but with colors instead of corr.values: + trans.cols <- rev(c('#b2182b','#d6604d','#f4a582','#fddbc7','#d1e5f0','#92c5de','#4393c3','#2166ac')) + my.seq <- c(0,10,20,30,40,50,60,70,100) + + array.trans.sim2.colors <- array(trans.cols[8],c(12,7,4)) + array.trans.sim2.colors[array.trans.sim2 < my.seq[8]] <- trans.cols[7] + array.trans.sim2.colors[array.trans.sim2 < my.seq[7]] <- trans.cols[6] + array.trans.sim2.colors[array.trans.sim2 < my.seq[6]] <- trans.cols[5] + array.trans.sim2.colors[array.trans.sim2 < my.seq[5]] <- trans.cols[4] + array.trans.sim2.colors[array.trans.sim2 < my.seq[4]] <- trans.cols[3] + array.trans.sim2.colors[array.trans.sim2 < my.seq[3]] <- trans.cols[2] + array.trans.sim2.colors[array.trans.sim2 < my.seq[2]] <- trans.cols[1] + array.trans.sim2.colors[is.na(array.trans.sim2)] <- "white" + + par(mar=c(5,4,4,4.5)) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Forecast time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + title("% Transition from NAO- regime ", cex=1.5) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + + for(p in 1:12){ + for(l in 0:6){ + polygon(c(0.5+p-1, 0.5+p-1, 1+p-1), c(-0.5+l, 0.5+l, 0+l), col=array.trans.sim2.colors[p,1+l,1]) # first triangle + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(-0.5+l, 0+l, -0.5+l), col=array.trans.sim2.colors[p,1+l,2]) + polygon(c(1+p-1, 1.5+p-1, 1.5+p-1), c(l, 0.5+l, -0.5+l), col=array.trans.sim2.colors[p,1+l,3]) + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(0.5+l, 0+l, 0.5+l), col=array.trans.sim2.colors[p,1+l,4]) + } + } + + par(fig=c(0.875, 1, 0.14, 0.89), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_trans_NAO-_4tables.png"), width=750, height=600) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("% Transition from NAO- regime", cex=1.5) + + # NAO+ : + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.85, 0.98), new=TRUE) + mtext("NAO+", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.sim2.colors[p,1+l,1])}} + + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.42, 0.5), new=TRUE) + mtext("Blocking", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.sim2.colors[p,1+l,4])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.85, 0.98), new=TRUE) + mtext("NAO-", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.sim2.colors[p,1+l,3])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.42, 0.5), new=TRUE) + mtext("Atlantic Ridge", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.sim2.colors[p,1+l,2])}} + + par(fig=c(0.91, 1, 0.1, 0.9), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_trans_NAO-_1table.png"), width=600, height=300) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("% Transition from NAO- regime", cex=1.2) + + par(mar=c(0,0,4,0), fig=c(0.1, 0.28, 0.8, 0.98), new=TRUE) + mtext("NAO+", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0, 0.28, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecast month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.sim2.colors[i2,1+6-j,1])}} + + par(mar=c(0,0,4,0), fig=c(0.28, 0.56, 0.8, 0.98), new=TRUE) + mtext("Blocking", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.28, 0.56, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecasted month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.sim2.colors[i2,1+6-j,4])}} + + par(mar=c(0,0,4,0), fig=c(0.56, 0.84, 0.8, 0.98), new=TRUE) + mtext("Atlantic Ridge", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.56, 0.84, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecasted month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.sim2.colors[i2,1+6-j,2])}} + + par(fig=c(0.88, 1, 0.1, 0.8), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + + + + + + + + # Transition probability from blocking to X summary: + png(filename=paste0(work.dir,"/",forecast.name,"_summary_trans_blocking.png"), width=550, height=400) + + # create an array similar to array.cor but with colors instead of corr.values: + trans.cols <- rev(c('#b2182b','#d6604d','#f4a582','#fddbc7','#d1e5f0','#92c5de','#4393c3','#2166ac')) + my.seq <- c(0,10,20,30,40,50,60,70,100) + + array.trans.sim3.colors <- array(trans.cols[8],c(12,7,4)) + array.trans.sim3.colors[array.trans.sim3 < my.seq[8]] <- trans.cols[7] + array.trans.sim3.colors[array.trans.sim3 < my.seq[7]] <- trans.cols[6] + array.trans.sim3.colors[array.trans.sim3 < my.seq[6]] <- trans.cols[5] + array.trans.sim3.colors[array.trans.sim3 < my.seq[5]] <- trans.cols[4] + array.trans.sim3.colors[array.trans.sim3 < my.seq[4]] <- trans.cols[3] + array.trans.sim3.colors[array.trans.sim3 < my.seq[3]] <- trans.cols[2] + array.trans.sim3.colors[array.trans.sim3 < my.seq[2]] <- trans.cols[1] + array.trans.sim3.colors[is.na(array.trans.sim3)] <- "white" + + par(mar=c(5,4,4,4.5)) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Forecast time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + title("% Transition from blocking regime", cex=1.5) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + + for(p in 1:12){ + for(l in 0:6){ + polygon(c(0.5+p-1, 0.5+p-1, 1+p-1), c(-0.5+l, 0.5+l, 0+l), col=array.trans.sim3.colors[p,1+l,1]) # first triangle + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(-0.5+l, 0+l, -0.5+l), col=array.trans.sim3.colors[p,1+l,2]) + polygon(c(1+p-1, 1.5+p-1, 1.5+p-1), c(l, 0.5+l, -0.5+l), col=array.trans.sim3.colors[p,1+l,3]) + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(0.5+l, 0+l, 0.5+l), col=array.trans.sim3.colors[p,1+l,4]) + } + } + + par(fig=c(0.875, 1, 0.14, 0.89), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_trans_blocking_4tables.png"), width=750, height=600) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("% Transition from blocking regime", cex=1.5) + + # NAO+ : + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.85, 0.98), new=TRUE) + mtext("NAO+", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.sim3.colors[p,1+l,1])}} + + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.42, 0.5), new=TRUE) + mtext("Blocking", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.sim3.colors[p,1+l,4])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.85, 0.98), new=TRUE) + mtext("NAO-", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.sim3.colors[p,1+l,3])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.42, 0.5), new=TRUE) + mtext("Atlantic Ridge", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.sim3.colors[p,1+l,2])}} + + par(fig=c(0.91, 1, 0.1, 0.9), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_trans_blocking_1table.png"), width=600, height=300) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("% Transition from blocking regime", cex=1.2) + + par(mar=c(0,0,4,0), fig=c(0.10, 0.28, 0.8, 0.98), new=TRUE) + mtext("NAO+", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0, 0.28, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecast month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.sim3.colors[i2,1+6-j,1])}} + + par(mar=c(0,0,4,0), fig=c(0.28, 0.56, 0.8, 0.98), new=TRUE) + mtext("NAO-", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.28, 0.56, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecasted month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.sim3.colors[i2,1+6-j,3])}} + + par(mar=c(0,0,4,0), fig=c(0.56, 0.84, 0.8, 0.98), new=TRUE) + mtext("Atlantic Ridge", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.56, 0.84, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecasted month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.sim3.colors[i2,1+6-j,2])}} + + par(fig=c(0.88, 1, 0.1, 0.8), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + + + + + + + + + + + # Transition probability from Atlantic ridge to X summary: + png(filename=paste0(work.dir,"/",forecast.name,"_summary_trans_Atlantic_ridge.png"), width=550, height=400) + + # create an array similar to array.cor but with colors instead of corr.values: + trans.cols <- rev(c('#b2182b','#d6604d','#f4a582','#fddbc7','#d1e5f0','#92c5de','#4393c3','#2166ac')) + my.seq <- c(0,10,20,30,40,50,60,70,100) + + array.trans.sim4.colors <- array(trans.cols[8],c(12,7,4)) + array.trans.sim4.colors[array.trans.sim4 < my.seq[8]] <- trans.cols[7] + array.trans.sim4.colors[array.trans.sim4 < my.seq[7]] <- trans.cols[6] + array.trans.sim4.colors[array.trans.sim4 < my.seq[6]] <- trans.cols[5] + array.trans.sim4.colors[array.trans.sim4 < my.seq[5]] <- trans.cols[4] + array.trans.sim4.colors[array.trans.sim4 < my.seq[4]] <- trans.cols[3] + array.trans.sim4.colors[array.trans.sim4 < my.seq[3]] <- trans.cols[2] + array.trans.sim4.colors[array.trans.sim4 < my.seq[2]] <- trans.cols[1] + array.trans.sim4.colors[is.na(array.trans.sim4)] <- "white" + + par(mar=c(5,4,4,4.5)) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Forecast time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + title("% Transition from Atlantic ridge regime ", cex=1.5) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + + for(p in 1:12){ + for(l in 0:6){ + polygon(c(0.5+p-1, 0.5+p-1, 1+p-1), c(-0.5+l, 0.5+l, 0+l), col=array.trans.sim4.colors[p,1+l,1]) # first triangle + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(-0.5+l, 0+l, -0.5+l), col=array.trans.sim4.colors[p,1+l,2]) + polygon(c(1+p-1, 1.5+p-1, 1.5+p-1), c(l, 0.5+l, -0.5+l), col=array.trans.sim4.colors[p,1+l,3]) + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(0.5+l, 0+l, 0.5+l), col=array.trans.sim4.colors[p,1+l,4]) + } + } + + par(fig=c(0.875, 1, 0.14, 0.89), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_trans_Atlantic_ridge_4tables.png"), width=750, height=600) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("% Transition from Atlantic Ridge regime", cex=1.5) + + # NAO+ : + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.85, 0.98), new=TRUE) + mtext("NAO+", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.sim4.colors[p,1+l,1])}} + + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.42, 0.5), new=TRUE) + mtext("Blocking", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.sim4.colors[p,1+l,4])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.85, 0.98), new=TRUE) + mtext("NAO-", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.sim4.colors[p,1+l,3])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.42, 0.5), new=TRUE) + mtext("Atlantic Ridge", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.sim4.colors[p,1+l,2])}} + + par(fig=c(0.91, 1, 0.1, 0.9), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_trans_Atlantic_ridge_1table.png"), width=600, height=300) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("% Transition from Atlantic Ridge regime", cex=1.2) + + par(mar=c(0,0,4,0), fig=c(0.1, 0.28, 0.8, 0.98), new=TRUE) + mtext("NAO+", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0, 0.28, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecast month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.sim4.colors[i2,1+6-j,1])}} + + par(mar=c(0,0,4,0), fig=c(0.28, 0.56, 0.8, 0.98), new=TRUE) + mtext("NAO-", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.28, 0.56, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecasted month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.sim4.colors[i2,1+6-j,3])}} + + par(mar=c(0,0,4,0), fig=c(0.56, 0.84, 0.8, 0.98), new=TRUE) + mtext("Blocking", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.56, 0.84, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecasted month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.sim4.colors[i2,1+6-j,4])}} + + par(fig=c(0.88, 1, 0.1, 0.8), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + + + + + + + + # Bias of the Transition probability from NAO+ to X summary: + png(filename=paste0(work.dir,"/",forecast.name,"_summary_bias_trans_NAO+.png"), width=550, height=400) + + # create an array similar to array.cor but with colors instead of corr.values: + trans.cols <- rev(c('#b2182b','#d6604d','#f4a582','#fddbc7','#d1e5f0','#92c5de','#4393c3','#2166ac')) + my.seq <- c(-20,-10,-5,-2,0,2,5,10,20) + + array.trans.diff1.colors <- array(trans.cols[8],c(12,7,4)) + array.trans.diff1.colors[array.trans.diff1 < my.seq[8]] <- trans.cols[7] + array.trans.diff1.colors[array.trans.diff1 < my.seq[7]] <- trans.cols[6] + array.trans.diff1.colors[array.trans.diff1 < my.seq[6]] <- trans.cols[5] + array.trans.diff1.colors[array.trans.diff1 < my.seq[5]] <- trans.cols[4] + array.trans.diff1.colors[array.trans.diff1 < my.seq[4]] <- trans.cols[3] + array.trans.diff1.colors[array.trans.diff1 < my.seq[3]] <- trans.cols[2] + array.trans.diff1.colors[array.trans.diff1 < my.seq[2]] <- trans.cols[1] + array.trans.diff1.colors[is.na(array.trans.diff1)] <- "white" + + par(mar=c(5,4,4,4.5)) + plot(1,1, type="n", xaxt="n", yaxt="n", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + title("% Bias transition from NAO+ regime", cex=1.5) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + + for(p in 1:12){ + for(l in 0:6){ + polygon(c(0.5+p-1, 0.5+p-1, 1+p-1), c(-0.5+l, 0.5+l, 0+l), col=array.trans.diff1.colors[p,1+l,1]) # first triangle + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(-0.5+l, 0+l, -0.5+l), col=array.trans.diff1.colors[p,1+l,2]) + polygon(c(1+p-1, 1.5+p-1, 1.5+p-1), c(l, 0.5+l, -0.5+l), col=array.trans.diff1.colors[p,1+l,3]) + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(0.5+l, 0+l, 0.5+l), col=array.trans.diff1.colors[p,1+l,4]) + } + } + + par(fig=c(0.875, 1, 0.14, 0.89), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_bias_trans_NAO+_4tables.png"), width=750, height=600) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("% Bias transition from NAO+ regime", cex=1.5) + + # NAO+ : + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.85, 0.98), new=TRUE) + mtext("NAO+", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.diff1.colors[p,1+l,1])}} + + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.42, 0.5), new=TRUE) + mtext("Blocking", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.diff1.colors[p,1+l,4])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.85, 0.98), new=TRUE) + mtext("NAO-", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.diff1.colors[p,1+l,3])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.42, 0.5), new=TRUE) + mtext("Atlantic Ridge", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.diff1.colors[p,1+l,2])}} + + par(fig=c(0.91, 1, 0.1, 0.9), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_bias_trans_NAO+_1table.png"), width=800, height=300) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("% Bias transition from NAO+ regime", cex=1.2) + + par(mar=c(0,0,4,0), fig=c(0.07, 0.22, 0.8, 0.98), new=TRUE) + mtext("NAO+", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0, 0.22, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecast month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.diff1.colors[i2,1+6-j,1])}} + + par(mar=c(0,0,4,0), fig=c(0.22, 0.44, 0.8, 0.98), new=TRUE) + mtext("NAO-", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.22, 0.44, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecasted month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.diff1.colors[i2,1+6-j,3])}} + + par(mar=c(0,0,4,0), fig=c(0.44, 0.66, 0.8, 0.98), new=TRUE) + mtext("Blocking", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.44, 0.66, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecasted month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.diff1.colors[i2,1+6-j,4])}} + + par(mar=c(0,0,4,0), fig=c(0.68, 0.88, 0.8, 0.98), new=TRUE) + mtext("Atlantic Ridge", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.66, 0.88, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecasted month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.diff1.colors[i2,1+6-j,2])}} + + par(fig=c(0.91, 1, 0.1, 0.8), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + + + + + + + + + + + + + + + # Bias of the Transition probability from NAO- to X summary: + png(filename=paste0(work.dir,"/",forecast.name,"_summary_bias_trans_NAO-.png"), width=550, height=400) + + # create an array similar to array.cor but with colors instead of corr.values: + trans.cols <- rev(c('#b2182b','#d6604d','#f4a582','#fddbc7','#d1e5f0','#92c5de','#4393c3','#2166ac')) + my.seq <- c(-20,-10,-5,-2,0,2,5,10,20) + + array.trans.diff2.colors <- array(trans.cols[8],c(12,7,4)) + array.trans.diff2.colors[array.trans.diff2 < my.seq[8]] <- trans.cols[7] + array.trans.diff2.colors[array.trans.diff2 < my.seq[7]] <- trans.cols[6] + array.trans.diff2.colors[array.trans.diff2 < my.seq[6]] <- trans.cols[5] + array.trans.diff2.colors[array.trans.diff2 < my.seq[5]] <- trans.cols[4] + array.trans.diff2.colors[array.trans.diff2 < my.seq[4]] <- trans.cols[3] + array.trans.diff2.colors[array.trans.diff2 < my.seq[3]] <- trans.cols[2] + array.trans.diff2.colors[array.trans.diff2 < my.seq[2]] <- trans.cols[1] + array.trans.diff2.colors[is.na(array.trans.diff2)] <- "white" + + par(mar=c(5,4,4,4.5)) + plot(1,1, type="n", xaxt="n", yaxt="n", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + title("% Bias transition from NAO- regime", cex=1.5) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + + for(p in 1:12){ + for(l in 0:6){ + polygon(c(0.5+p-1, 0.5+p-1, 1+p-1), c(-0.5+l, 0.5+l, 0+l), col=array.trans.diff2.colors[p,1+l,1]) # first triangle + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(-0.5+l, 0+l, -0.5+l), col=array.trans.diff2.colors[p,1+l,2]) + polygon(c(1+p-1, 1.5+p-1, 1.5+p-1), c(l, 0.5+l, -0.5+l), col=array.trans.diff2.colors[p,1+l,3]) + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(0.5+l, 0+l, 0.5+l), col=array.trans.diff2.colors[p,1+l,4]) + } + } + + par(fig=c(0.875, 1, 0.14, 0.89), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_bias_trans_NAO-_4tables.png"), width=750, height=600) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("% Bias transition from NAO- regime", cex=1.5) + + # NAO+ : + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.85, 0.98), new=TRUE) + mtext("NAO+", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.diff2.colors[p,1+l,1])}} + + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.42, 0.5), new=TRUE) + mtext("Blocking", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.diff2.colors[p,1+l,4])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.85, 0.98), new=TRUE) + mtext("NAO-", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.diff2.colors[p,1+l,3])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.42, 0.5), new=TRUE) + mtext("Atlantic Ridge", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.diff2.colors[p,1+l,2])}} + + par(fig=c(0.91, 1, 0.1, 0.9), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_bias_trans_NAO-_1table.png"), width=800, height=300) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("% Bias transition from NAO- regime", cex=1.2) + + par(mar=c(0,0,4,0), fig=c(0.07, 0.22, 0.8, 0.98), new=TRUE) + mtext("NAO+", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0, 0.22, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecast month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.diff2.colors[i2,1+6-j,1])}} + + par(mar=c(0,0,4,0), fig=c(0.22, 0.44, 0.8, 0.98), new=TRUE) + mtext("NAO-", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.22, 0.44, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecasted month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.diff2.colors[i2,1+6-j,3])}} + + par(mar=c(0,0,4,0), fig=c(0.44, 0.66, 0.8, 0.98), new=TRUE) + mtext("Blocking", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.44, 0.66, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecasted month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.diff2.colors[i2,1+6-j,4])}} + + par(mar=c(0,0,4,0), fig=c(0.68, 0.88, 0.8, 0.98), new=TRUE) + mtext("Atlantic Ridge", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.66, 0.88, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecasted month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.diff2.colors[i2,1+6-j,2])}} + + par(fig=c(0.91, 1, 0.1, 0.8), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + + + + + + + + + + + # Bias of the Transition probability from blocking to X summary: + png(filename=paste0(work.dir,"/",forecast.name,"_summary_bias_trans_blocking.png"), width=550, height=400) + + # create an array similar to array.cor but with colors instead of corr.values: + trans.cols <- rev(c('#b2182b','#d6604d','#f4a582','#fddbc7','#d1e5f0','#92c5de','#4393c3','#2166ac')) + my.seq <- c(-20,-10,-5,-2,0,2,5,10,20) + + array.trans.diff3.colors <- array(trans.cols[8],c(12,7,4)) + array.trans.diff3.colors[array.trans.diff3 < my.seq[8]] <- trans.cols[7] + array.trans.diff3.colors[array.trans.diff3 < my.seq[7]] <- trans.cols[6] + array.trans.diff3.colors[array.trans.diff3 < my.seq[6]] <- trans.cols[5] + array.trans.diff3.colors[array.trans.diff3 < my.seq[5]] <- trans.cols[4] + array.trans.diff3.colors[array.trans.diff3 < my.seq[4]] <- trans.cols[3] + array.trans.diff3.colors[array.trans.diff3 < my.seq[3]] <- trans.cols[2] + array.trans.diff3.colors[array.trans.diff3 < my.seq[2]] <- trans.cols[1] + array.trans.diff3.colors[is.na(array.trans.diff3)] <- "white" + + par(mar=c(5,4,4,4.5)) + plot(1,1, type="n", xaxt="n", yaxt="n", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + title("% Bias transition from blocking regime", cex=1.5) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + + for(p in 1:12){ + for(l in 0:6){ + polygon(c(0.5+p-1, 0.5+p-1, 1+p-1), c(-0.5+l, 0.5+l, 0+l), col=array.trans.diff3.colors[p,1+l,1]) # first triangle + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(-0.5+l, 0+l, -0.5+l), col=array.trans.diff3.colors[p,1+l,2]) + polygon(c(1+p-1, 1.5+p-1, 1.5+p-1), c(l, 0.5+l, -0.5+l), col=array.trans.diff3.colors[p,1+l,3]) + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(0.5+l, 0+l, 0.5+l), col=array.trans.diff3.colors[p,1+l,4]) + } + } + + par(fig=c(0.875, 1, 0.14, 0.89), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_bias_trans_blocking_4tables.png"), width=750, height=600) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("% Bias transition from blocking regime", cex=1.5) + + # NAO+ : + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.85, 0.98), new=TRUE) + mtext("NAO+", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.diff3.colors[p,1+l,1])}} + + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.42, 0.5), new=TRUE) + mtext("Blocking", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.diff3.colors[p,1+l,4])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.85, 0.98), new=TRUE) + mtext("NAO-", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.diff3.colors[p,1+l,3])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.42, 0.5), new=TRUE) + mtext("Atlantic Ridge", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.diff3.colors[p,1+l,2])}} + + par(fig=c(0.91, 1, 0.1, 0.9), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_bias_trans_blocking_1table.png"), width=800, height=300) + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("% Bias transition from Blocking regime", cex=1.2) + + par(mar=c(0,0,4,0), fig=c(0.07, 0.22, 0.8, 0.98), new=TRUE) + mtext("NAO+", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0, 0.22, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecast month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.diff3.colors[i2,1+6-j,1])}} + + par(mar=c(0,0,4,0), fig=c(0.22, 0.44, 0.8, 0.98), new=TRUE) + mtext("NAO-", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.22, 0.44, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecasted month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.diff3.colors[i2,1+6-j,3])}} + + par(mar=c(0,0,4,0), fig=c(0.44, 0.66, 0.8, 0.98), new=TRUE) + mtext("Blocking", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.44, 0.66, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecasted month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.diff3.colors[i2,1+6-j,4])}} + + par(mar=c(0,0,4,0), fig=c(0.68, 0.88, 0.8, 0.98), new=TRUE) + mtext("Atlantic Ridge", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.66, 0.88, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecasted month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.diff3.colors[i2,1+6-j,2])}} + + par(fig=c(0.91, 1, 0.1, 0.8), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + + + + + + + + + + # Bias of the Transition probability from Atlantic Ridge to X summary: + png(filename=paste0(work.dir,"/",forecast.name,"_summary_bias_trans_Atlantic_ridge.png"), width=550, height=400) + + # create an array similar to array.cor but with colors instead of corr.values: + trans.cols <- rev(c('#b2182b','#d6604d','#f4a582','#fddbc7','#d1e5f0','#92c5de','#4393c3','#2166ac')) + my.seq <- c(-20,-10,-5,-2,0,2,5,10,20) + + array.trans.diff4.colors <- array(trans.cols[8],c(12,7,4)) + array.trans.diff4.colors[array.trans.diff4 < my.seq[8]] <- trans.cols[7] + array.trans.diff4.colors[array.trans.diff4 < my.seq[7]] <- trans.cols[6] + array.trans.diff4.colors[array.trans.diff4 < my.seq[6]] <- trans.cols[5] + array.trans.diff4.colors[array.trans.diff4 < my.seq[5]] <- trans.cols[4] + array.trans.diff4.colors[array.trans.diff4 < my.seq[4]] <- trans.cols[3] + array.trans.diff4.colors[array.trans.diff4 < my.seq[3]] <- trans.cols[2] + array.trans.diff4.colors[array.trans.diff4 < my.seq[2]] <- trans.cols[1] + array.trans.diff4.colors[is.na(array.trans.diff4)] <- "white" + + par(mar=c(5,4,4,4.5)) + plot(1,1, type="n", xaxt="n", yaxt="n", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + title("% Bias transition from Atlantic Ridge regime", cex=1.5) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + + for(p in 1:12){ + for(l in 0:6){ + polygon(c(0.5+p-1, 0.5+p-1, 1+p-1), c(-0.5+l, 0.5+l, 0+l), col=array.trans.diff4.colors[p,1+l,1]) # first triangle + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(-0.5+l, 0+l, -0.5+l), col=array.trans.diff4.colors[p,1+l,2]) + polygon(c(1+p-1, 1.5+p-1, 1.5+p-1), c(l, 0.5+l, -0.5+l), col=array.trans.diff4.colors[p,1+l,3]) + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(0.5+l, 0+l, 0.5+l), col=array.trans.diff4.colors[p,1+l,4]) + } + } + + par(fig=c(0.875, 1, 0.14, 0.89), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_bias_trans_Atlantic_ridge_4tables.png"), width=750, height=600) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("% Bias transition from Atlantic_Ridge_regime", cex=1.5) + + # NAO+ : + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.85, 0.98), new=TRUE) + mtext("NAO+", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.diff4.colors[p,1+l,1])}} + + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.42, 0.5), new=TRUE) + mtext("Blocking", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.diff4.colors[p,1+l,4])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.85, 0.98), new=TRUE) + mtext("NAO-", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.diff4.colors[p,1+l,3])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.42, 0.5), new=TRUE) + mtext("Atlantic Ridge", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.diff4.colors[p,1+l,2])}} + + par(fig=c(0.91, 1, 0.1, 0.9), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_bias_trans_Atlantic_ridge_1table.png"), width=800, height=300) + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("% Bias transition from Atlantic Ridge regime", cex=1.2) + + par(mar=c(0,0,4,0), fig=c(0.07, 0.22, 0.8, 0.98), new=TRUE) + mtext("NAO+", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0, 0.22, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecast month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.diff4.colors[i2,1+6-j,1])}} + + par(mar=c(0,0,4,0), fig=c(0.22, 0.44, 0.8, 0.98), new=TRUE) + mtext("NAO-", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.22, 0.44, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecasted month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.diff4.colors[i2,1+6-j,3])}} + + par(mar=c(0,0,4,0), fig=c(0.44, 0.66, 0.8, 0.98), new=TRUE) + mtext("Blocking", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.44, 0.66, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecasted month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.diff4.colors[i2,1+6-j,4])}} + + par(mar=c(0,0,4,0), fig=c(0.68, 0.88, 0.8, 0.98), new=TRUE) + mtext("Atlantic Ridge", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.66, 0.88, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecasted month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.diff4.colors[i2,1+6-j,2])}} + + par(fig=c(0.91, 1, 0.1, 0.8), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + +diagnostic.WR <- function(text=NULL, position=c(0,1,0,1), color.array){ + +} + + ## # FairRPSS summary: + ## png(filename=paste0(work.dir,"/",forecast.name,"_summary_rpss.png"), width=550, height=400) + + ## # create an array similar to array.diff.freq but with colors instead of frequencies: + ## freq.cols <- rev(c('#b2182b','#d6604d','#f4a582','#fddbc7','#d1e5f0','#92c5de','#4393c3','#2166ac')) + + ## array.freq.colors <- array(freq.cols[8],c(12,7,4)) + ## array.freq.colors[array.diff.freq < 10] <- freq.cols[7] + ## array.freq.colors[array.diff.freq < 5] <- freq.cols[6] + ## array.freq.colors[array.diff.freq < 2] <- freq.cols[5] + ## array.freq.colors[array.diff.freq < 0] <- freq.cols[4] + ## array.freq.colors[array.diff.freq < -2] <- freq.cols[3] + ## array.freq.colors[array.diff.freq < -5] <- freq.cols[2] + ## array.freq.colors[array.diff.freq < -10] <- freq.cols[1] + + ## par(mar=c(5,4,4,4.5)) + ## plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Forecast time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + ## title("Frequency difference (%) between S4 and ERA-Interim", cex=1.5) + ## mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + ## mtext(side = 2, text = "Forecast time", line = 2.5, cex=1.5) + ## axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + ## axis(2, at=seq(0,6), las=2, cex.axis=1.5) + + ## for(p in 1:12){ + ## for(l in 0:6){ + ## polygon(c(0.5+p-1,0.5+p-1,1+p-1),c(-0.5+l,0.5+l,0+l), col=array.freq.colors[p,1+l,1]) # first triangle + ## polygon(c(0.5+p-1,1+p-1,1.5+p-1),c(-0.5+l,0+l,-0.5+l), col=array.freq.colors[p,1+l,2]) + ## polygon(c(1+p-1,1.5+p-1,1.5+p-1),c(l,0.5+l,-0.5+l), col=array.freq.colors[p,1+l,3]) + ## polygon(c(0.5+p-1,1+p-1,1.5+p-1),c(0.5+l,0+l,0.5+l), col=array.freq.colors[p,1+l,4]) + ## } + ## } + + ## par(fig=c(0.875, 1, 0.14, 0.89), new=TRUE) + ## ColorBar2(brks = c(-20,-10,-5,-2,0,2,5,10,20), cols = freq.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(c(-20,-10,-5,-2,0,2,5,10,20)), my.labels=c(-20,-10,-5,-2,0,2,5,10,20)) + ## dev.off() + +} # close if on composition == "summary" + + + + +# impact map of the stronger WR: +if(composition == "impact" && fields.name == rean.name){ + + var.num <- 2 # choose a variable (1:sfcWind, 2:tas) + + png(filename=paste0(rean.dir,"/",rean.name,"_",var.name[var.num],"_most_influent_negative.png"), width=550, height=650) + + plot.new() + #par(mfrow=c(4,3)) + #col.regimes <- c('#7b3294','#c2a5cf','#a6dba0','#008837') + col.regimes <- c('#e66101','#fdb863','#b2abd2','#5e3c99') + + for(p in 13:16){ # or 1:12 for monthly maps + load(file=paste0(rean.dir,"/",rean.name,"_",my.period[p],"_",var.name[var.num],"_psl.RData")) + load(paste0(rean.dir,"/",rean.name,"_ClusterNames.RData")) # they should not depend on var.name!!! + + # position of long values of Europe only (without the Atlantic Sea and America): + #if(fields.name == "ERA-Interim") EU <- c(1:which(lon >= lon.max)[1], (length(lon)-30):length(lon)) # restrict area to continental europe only + lon.max = 45 + EU <- c(1:which(lon >= lon.max)[1], (which(lon >= 337)[1]:length(lon))) # restrict area to continental europe only + + cluster1 <- which(orden == cluster1.name.period[p]) # in future it will be == cluster1.name + cluster2 <- which(orden == cluster2.name.period[p]) + cluster3 <- which(orden == cluster3.name.period[p]) + cluster4 <- which(orden == cluster4.name.period[p]) + + assign(paste0("imp",cluster1), varPeriodAnom1mean) + assign(paste0("imp",cluster2), varPeriodAnom2mean) + assign(paste0("imp",cluster3), varPeriodAnom3mean) + assign(paste0("imp",cluster4), varPeriodAnom4mean) + + ## imp.all <- imp1 + ## for(i in 1:dim(imp1)[1]){ + ## for(j in 1:dim(imp1)[2]){ + ## if(abs(imp1[i,j]) >= max(abs(imp1[i,j]), abs(imp2[i,j]), abs(imp3[i,j]), abs(imp4[i,j]))) imp.all[i,j] <- 1.5 + ## if(abs(imp2[i,j]) >= max(abs(imp1[i,j]), abs(imp2[i,j]), abs(imp3[i,j]), abs(imp4[i,j]))) imp.all[i,j] <- 2.5 + ## if(abs(imp3[i,j]) >= max(abs(imp1[i,j]), abs(imp2[i,j]), abs(imp3[i,j]), abs(imp4[i,j]))) imp.all[i,j] <- 3.5 + ## if(abs(imp4[i,j]) >= max(abs(imp1[i,j]), abs(imp2[i,j]), abs(imp3[i,j]), abs(imp4[i,j]))) imp.all[i,j] <- 4.5 + ## } + ## } + + ## #most influent WT with positive impact: + ## imp.all <- imp1 + ## for(i in 1:dim(imp1)[1]){ + ## for(j in 1:dim(imp1)[2]){ + ## if((imp1[i,j]) >= max((imp1[i,j]), (imp2[i,j]), (imp3[i,j]), (imp4[i,j]))) imp.all[i,j] <- 1.5 + ## if((imp2[i,j]) >= max((imp1[i,j]), (imp2[i,j]), (imp3[i,j]), (imp4[i,j]))) imp.all[i,j] <- 2.5 + ## if((imp3[i,j]) >= max((imp1[i,j]), (imp2[i,j]), (imp3[i,j]), (imp4[i,j]))) imp.all[i,j] <- 3.5 + ## if((imp4[i,j]) >= max((imp1[i,j]), (imp2[i,j]), (imp3[i,j]), (imp4[i,j]))) imp.all[i,j] <- 4.5 + ## } + ## } + + # most influent WT with negative impact: + imp.all <- imp1 + for(i in 1:dim(imp1)[1]){ + for(j in 1:dim(imp1)[2]){ + if((imp1[i,j]) <= min((imp1[i,j]), (imp2[i,j]), (imp3[i,j]), (imp4[i,j]))) imp.all[i,j] <- 1.5 + if((imp2[i,j]) <= min((imp1[i,j]), (imp2[i,j]), (imp3[i,j]), (imp4[i,j]))) imp.all[i,j] <- 2.5 + if((imp3[i,j]) <= min((imp1[i,j]), (imp2[i,j]), (imp3[i,j]), (imp4[i,j]))) imp.all[i,j] <- 3.5 + if((imp4[i,j]) <= min((imp1[i,j]), (imp2[i,j]), (imp3[i,j]), (imp4[i,j]))) imp.all[i,j] <- 4.5 + } + } + + if(p<=3) yMod <- 0.7 + if(p>=4 && p<=6) yMod <- 0.5 + if(p>=7 && p<=9) yMod <- 0.3 + if(p>=10) yMod <- 0.1 + if(p==13 || p==14) yMod <- 0.52 + if(p==15 || p==16) yMod <- 0.1 + + if(p==1 || p==4 || p==7 || p==10) xMod <- 0.05 + if(p==2 || p==5 || p==8 || p==11) xMod <- 0.35 + if(p==3 || p==6 || p==9 || p==12) xMod <- 0.65 + if(p==13 || p==15) xMod <- 0.05 + if(p==14 || p==16) xMod <- 0.50 + + # impact map: + if(p <= 12) par(fig=c(xMod, xMod + 0.3, yMod, yMod + 0.17), new=TRUE) + if(p > 12) par(fig=c(xMod, xMod + 0.45, yMod, yMod + 0.38), new=TRUE) + PlotEquiMap(imp.all[,EU], lon[EU], lat, filled.continents = FALSE, brks=1:5, cols=col.regimes, axelab=F, drawleg=F) + + # title map: + if(p <= 12) par(fig=c(xMod, xMod + 0.3, yMod + 0.162, yMod + 0.172), new=TRUE) + if(p > 12) par(fig=c(xMod + 0.07, xMod + 0.07 + 0.3, yMod +0.21 + 0.166, yMod +0.21 + 0.176), new=TRUE) + mtext(my.period[p], font=2, cex=1.5) + + } # close 'p' on 'period' + + par(fig=c(0.1,0.9, 0.03, 0.11), new=TRUE) + ColorBar2(1:5, cols=col.regimes, vert=FALSE, my.ticks=1:4, draw.ticks=FALSE, my.labels=orden) + + par(fig=c(0.1,0.9, 0.92, 0.97), new=TRUE) + mtext(paste0("Most influent WR on ",var.name[var.num]), font=2, cex=1.5) + + dev.off() + +} # close if on composition == "impact" + + + + + + + + + + + + + + + + + + diff --git a/old/weather_regimes_maps_v2.R b/old/weather_regimes_maps_v2.R new file mode 100644 index 0000000000000000000000000000000000000000..32706dd4e767c024a6972e397c8fdc3f7352613b --- /dev/null +++ b/old/weather_regimes_maps_v2.R @@ -0,0 +1,324 @@ + +# Creation: 6/2016 +# Author: Nicola Cortesi +# Aim: to visualize the regime anomalies of the weather regimes, their interannual frequencies and their impact on a chosen cliamte variable (i.e: wind speed or temperature) +# I/O: the only input file needed is the output of the weather_regimes.R script, which ends with the suffix "_mapdata.RData". +# Output files are the maps, with the user can generate as .png or as .pdf +# Assumptions: this script must be run twice: once, with the option 'ordering = F' to create a .pdf or .png file that the user must open to find visually the order by which +# the four regimes are stored inside the four clusters. For example, he can find that the sequence of the images in the file (from top to down) corresponds to: +# Blocking / NAO- / Atl.Ridge / NAO+ regime. Subsequently, he has to change 'ordering' from F to T and set the four variables 'clusterX.name' (X=1..4) +# to follow the same order found inside that file; after that, he can run this script a second time to get the maps in the right order, which by default is, +# from top to down: "NAO+","NAO-","Blocking" and "Atl.Ridge" (you can change it with the variable 'orden'). +# During these first two runs, the variable 'save.names' must be set to TRUE, so an .RData file is written to store the order of the four regimes, in case +# in future a user needs to visualize these maps again. In this case, the user must set 'save.names= FALSE' and 'ordering=TRUE' to be able to visualize the maps +# in the correct order. +# Branch: weather_regimes + +library(s2dverification) # for the function Load() +library(Kendall) # for the MannKendall test + +source('/home/Earth/ncortesi/Downloads/scripts/Rfunctions.R') # for the calendar functions +workdir <- "/scratch/Earth/ncortesi/RESILIENCE/Regimes" # working dir with the input files with the WT classifications for each MSLP grid point + +rean.name <- "ERA-Interim" # reanalysis name (if input data comes from a reanalysis) +forecast.name <- "ECMWF-S4" # forecast system name (if input data comes from a forecast system) + +fields.name <- rean.name # Choose between loading reanalysis or forecasts +var.num <- 2 # Choose a variable for the impact maps 1: sfcWind 2: tas +period <- 1 # Choose a period to plot from 1 to 17. You need to have created before the '_mapdata.RData' file with the output of weather_regimes.R + # for that period. (1-12: Jan - Dec, 13-16: Winter, Spring, Summer, Autumn, 17: Year) +lead.month <- 1 # in case you selected 'fields.name = forecast.name', specify a leadtime (0 = same month of the start month) + +# run it twice: once, with ordering <- FALSE to look only at the cartography and find visually the right regime names of the four clusters; +# and the second time, to save the ordered cartography: +ordering <- T # If TRUE, order the clusters following the regimes listed in the 'orden' vector (after you manually insert the names). +as.pdf <- F # FALSE: save results as one .png file for each season, TRUE: save only 1 .pdf file with all seasons +save.names <- F # if TRUE, also save the cluster names (i.e: the association between clusters and weather regimes); if FALSE, the cluster names are loaded from the file + +if(save.names){ + cluster3.name <- "NAO+" # the regime associated to this cluster + cluster2.name <- "NAO-" + cluster1.name <- "Blocking" + cluster4.name <- "Atl.Ridge" +} + +var.name <- c("sfcWind","tas") +if(fields.name == rean.name) load(file=paste0(workdir,"/",fields.name,"_",var.name[var.num],"_",my.period[p],"_mapdata.RData")) +if(fields.name == forecast.name) load(file=paste0(workdir,"/",fields.name,"_",var.name[var.num],"_",my.period[p],"_leadmonth_",lead.month,"_mapdata.RData")) + +# position of long values of Europe only (without the Atlantic Sea and America): +#if(fields.name == "ERA-Interim") EU <- c(1:which(lon >= lon.max)[1], (length(lon)-30):length(lon)) # restrict area to continental europe only +EU <- c(1:which(lon >= lon.max)[1], (which(lon >= 337)[1]:length(lon))) # restrict area to continental europe only + +# choose an order to give to the cartography, from top to bottom: +orden <- c("NAO+","NAO-","Blocking","Atl.Ridge") + +regime1.name <- orden[1] +regime2.name <- orden[2] +regime3.name <- orden[3] +regime4.name <- orden[4] + +# breaks and colors of the geopotential fields: +if(psl == "g500"){ + #my.brks <- c(-300,-199:200,300) # % Mean anomaly of a WR + my.brks <- c(-300,seq(-200,200,10),300) # % Mean anomaly of a WR + my.brks2 <- c(-300,seq(-200,200,10),300) # % Mean anomaly of a WR for the contour lines + #my.cols <- colorRampPalette((brewer.pal(9,"PuBu")))(length(my.brks)-1) + values.to.plot <- c(-200, -100, 0, 100, 200) # values we want to show in the labels +} + +if(psl == "psl"){ + my.brks <- c(seq(-19,-1,2),0,seq(1,19,2)) # % Mean anomaly of a WR + # my.brks <- c(seq(-19,-1,2),0,seq(1,19,2)) # % Mean anomaly of a WR + + my.brks2 <- my.brks #c(seq(-20,20,2)) # % Mean anomaly of a WR for the contour lines + values.to.plot <- my.brks #c(-17,-15,-13,-11,-9,-7,-5,-3,-1,0,1,3,5,7,9,11,13,15,17) +} + +my.cols <- colorRampPalette(rev(brewer.pal(11,"RdBu")))(length(my.brks)-1) # blue--white--red colors +my.cols[floor(length(my.cols)/2)] <- my.cols[floor(length(my.cols)/2)+1] <- "white" + +# breaks and colors of the impact maps (valid both for Wind speed anomalies and Temperature anomalies): +#my.brks.var <- c(-20,seq(-10,-3,1),seq(-2.9,2.9,0.1),seq(3,10,1),20) # % Mean anomaly of a WR +#my.brks.var <- c(-20,-2,-1.5,-1,-0.5,-0.2,0.2,0.5,1,1.5,2,20) # % Mean anomaly of a WR +#my.brks.var <- c(-20,seq(-3,3,0.5),20) # % Mean anomaly of a WR +if(var.name[var.num] == "sfcWind") { + my.brks.var <- seq(-3,3,0.5) + values.to.plot2 <- c(-3,-2,-1,0,1,2,3) +} +if(var.name[var.num] == "tas") { + my.brks.var <- seq(-5,5,1) + values.to.plot2 <- my.brks.var #c(-5,-3,-1,0,1,3,5) +} + +#my.cols <- colorRampPalette(as.character(read.csv("/home/Earth/vtorralb/rgbhex.csv",header=F)[,1]))(length(my.brks)-1) # blue--yellow-red colors +my.cols.var <- colorRampPalette(rev(brewer.pal(11,"RdBu")))(length(my.brks.var)-1) # blue--white--red colors +#my.cols.var[floor(length(my.cols.var)/2)] <- my.cols.var[floor(length(my.cols.var)/2)+1] <- "white" + +if(as.pdf && fields.name==rean.name)(pdf(file=paste0(workdir,"/",fields.name,"_",var.name[var.num],"_",my.period[p],".pdf"),width=40, height=60)) +if(as.pdf && fields.name==forecast.name)(pdf(file=paste0(workdir,"/",fields.name,"_",var.name[var.num],"_",my.period[p],"_leadmonth_",lead.month,".pdf"),width=40,height=60)) + +for(p in +if(save.names){ + cluster1.name.period <- cluster2.name.period <- cluster3.name.period <- cluster4.name.period <- c() + + cluster1.name.period[p] <- cluster1.name + cluster2.name.period[p] <- cluster2.name + cluster3.name.period[p] <- cluster3.name + cluster4.name.period[p] <- cluster4.name + + if(fields.name==rean.name) save(cluster1.name.period, cluster2.name.period, cluster3.name.period, cluster4.name.period, file=paste0(workdir,"/",fields.name,"_", var.name[var.num],"_",my.period[p],"_ClusterNames.RData")) + if(fields.name==forecast.name) save(cluster1.name.period, cluster2.name.period, cluster3.name.period, cluster4.name.period, file=paste0(workdir,"/",fields.name,"_", var.name[var.num],"_",my.period[p],"_leadmonth_",lead.month,"_ClusterNames.RData")) + +} else { # in this case, we load the cluster names from the file already saved: + if(fields.name == rean.name) load(paste0(workdir,"/",fields.name,"_", var.name[var.num],"_",my.period[p],"_ClusterNames.RData")) + if(fields.name == forecast.name) load(paste0(workdir,"/",fields.name,"_", var.name[var.num],"_",my.period[p],"_leadmonth_",lead.month,"_ClusterNames.RData")) + + cluster1.name <- cluster1.name.period[p] + cluster2.name <- cluster2.name.period[p] + cluster3.name <- cluster3.name.period[p] + cluster4.name <- cluster4.name.period[p] +} + + # assign to each cluster its regime: + if(ordering == FALSE){ + cluster1 <- 1; cluster2 <- 2; cluster3 <- 3; cluster4 <- 4 + } else { + cluster1 <- which(orden == cluster1.name) + cluster2 <- which(orden == cluster2.name) + cluster3 <- which(orden == cluster3.name) + cluster4 <- which(orden == cluster4.name) + } + + assign(paste0("map",cluster1), pslwr1mean) + assign(paste0("map",cluster2), pslwr2mean) + assign(paste0("map",cluster3), pslwr3mean) + assign(paste0("map",cluster4), pslwr4mean) + + assign(paste0("fre",cluster1), wr1y) + assign(paste0("fre",cluster2), wr2y) + assign(paste0("fre",cluster3), wr3y) + assign(paste0("fre",cluster4), wr4y) + + if(fields.name == rean.name){ + assign(paste0("imp",cluster1), varPeriodAnom1mean) + assign(paste0("imp",cluster2), varPeriodAnom2mean) + assign(paste0("imp",cluster3), varPeriodAnom3mean) + assign(paste0("imp",cluster4), varPeriodAnom4mean) + + assign(paste0("sig",cluster1), pvalue1) + assign(paste0("sig",cluster2), pvalue2) + assign(paste0("sig",cluster3), pvalue3) + assign(paste0("sig",cluster4), pvalue4) + } + + + # Visualize the composition with the 3 graphs for all the 4 regimes: its pressure fields, its impact on var and its frequency series: + if(!as.pdf && fields.name==rean.name) png(filename=paste0(workdir,"/",fields.name,"_",var.name[var.num],"_",my.period[p],".png"),width=3000,height=3700) + if(!as.pdf && fields.name==forecast.name) png(filename=paste0(workdir,"/",fields.name,"_",var.name[var.num],"_",my.period[p],"_leadmonth_",lead.month,".png"),width=3000,height=3700) + + plot.new() + + # Sheet title: + par(fig=c(0, 1, 0.94, 0.98), new=TRUE) + mtext(paste0("Weather Regimes for ",my.period[p]," season (",year.start,"-",year.end,"). Source: ",fields.name), cex=6, font=2) + + # Centroid maps: + map.xpos <- 0 + map.width <- 0.46 + par(fig=c(map.xpos + 0.003, map.xpos + map.width - 0.003, 0.745, 0.925), new=TRUE) + PlotEquiMap2(rescale(map1,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map1), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, xlabel.dist=1.5, contours.lty="F1FF1F") + par(fig=c(map.xpos, map.xpos + map.width, 0.51, 0.69), new=TRUE) + PlotEquiMap2(rescale(map2,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map2), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F") + par(fig=c(map.xpos, map.xpos + map.width, 0.275, 0.455), new=TRUE) + PlotEquiMap2(rescale(map3,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map3), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F") + par(fig=c(map.xpos, map.xpos + map.width, 0.04, 0.22), new=TRUE) + PlotEquiMap2(rescale(map4,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map4), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F") + + # Title Centroid Maps: + map.title.xpos <- 0.76 + map.title.width <- 0.2 + par(fig=c(map.title.xpos, map.title.xpos + map.title.width, 0.728, 0.729), new=TRUE) + mtext("year", cex=3) + par(fig=c(map.title.xpos, map.title.xpos + map.title.width, 0.494, 0.495), new=TRUE) + mtext("year", cex=3) + par(fig=c(map.title.xpos, map.title.xpos + map.title.width, 0.260, 0.261), new=TRUE) + mtext("year", cex=3) + par(fig=c(map.title.xpos, map.title.xpos + map.title.width, 0.026, 0.027), new=TRUE) + mtext("year", cex=3) + + # Impact maps: + if(fields.name == rean.name){ + impact.xpos <- 0.47 + impact.width <- 0.26 + par(fig=c(impact.xpos, impact.xpos + impact.width, 0.745, 0.925), new=TRUE) + PlotEquiMap2(rescale(imp1[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=t(sig1[,EU] < 0.05), cex.lab=1.5) + par(fig=c(impact.xpos, impact.xpos + impact.width, 0.51, 0.69), new=TRUE) + PlotEquiMap2(rescale(imp2[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=t(sig2[,EU] < 0.05), cex.lab=1.5) + par(fig=c(impact.xpos, impact.xpos + impact.width, 0.275, 0.455), new=TRUE) + PlotEquiMap2(rescale(imp3[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=t(sig3[,EU] < 0.05), cex.lab=1.5) + par(fig=c(impact.xpos, impact.xpos + impact.width, 0.04, 0.22), new=TRUE) + PlotEquiMap2(rescale(imp4[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=t(sig4[,EU] < 0.05), cex.lab=1.5) + } + + # Frequency plots: + barplot.xpos <- 0.75 + barplot.width <- 0.25 + freq.max=60 + + par(fig=c(barplot.xpos, barplot.xpos + barplot.width, 0.745, 0.915), new=TRUE) + barplot.freq(100*fre1, year.start, year.end, cex.y=2, cex.x=2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0)) + par(fig=c(barplot.xpos, barplot.xpos + barplot.width,0.510,0.680), new=TRUE) + barplot.freq(100*fre2, year.start, year.end, cex.y=2, cex.x=2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0)) + par(fig=c(barplot.xpos, barplot.xpos + barplot.width,0.275,0.445), new=TRUE) + barplot.freq(100*fre3, year.start, year.end, cex.y=2, cex.x=2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0)) + par(fig=c(barplot.xpos, barplot.xpos + barplot.width,0.040,0.210), new=TRUE) + barplot.freq(100*fre4, year.start, year.end, cex.y=2, cex.x=2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0)) + + # % on Frequency plots: + symbol.xpos <- 0.735 + symbol.width <- 0.01 + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.83, 0.84), new=TRUE) + mtext("%", cex=3.3) + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.60, 0.61), new=TRUE) + mtext("%", cex=3.3) + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.36, 0.37), new=TRUE) + mtext("%", cex=3.3) + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.13, 0.14), new=TRUE) + mtext("%", cex=3.3) + + # Title 1: + title1.xpos <- 0.02 + title1.width <- 0.4 + par(fig=c(title1.xpos, title1.xpos + title1.width, 0.930, 0.935), new=TRUE) + mtext(paste0(regime1.name,": ", psl.name, " Anomaly"), font=2, cex=4) + par(fig=c(title1.xpos, title1.xpos + title1.width, 0.695, 0.700), new=TRUE) + mtext(paste0(regime2.name,": ", psl.name, " Anomaly"), font=2, cex=4) + par(fig=c(title1.xpos, title1.xpos + title1.width, 0.460, 0.465), new=TRUE) + mtext(paste0(regime3.name,": ", psl.name, " Anomaly"), font=2, cex=4) + par(fig=c(title1.xpos, title1.xpos + title1.width, 0.225, 0.230), new=TRUE) + mtext(paste0(regime4.name,": ", psl.name, " Anomaly"), font=2, cex=4) + + # Title 2: + if(fields.name == rean.name){ + title2.xpos <- 0.42 + title2.width <- 0.35 + par(fig=c(title2.xpos, title2.xpos + title2.width, 0.930, 0.935), new=TRUE) + mtext(paste0(regime1.name, ": Impact on ", var.name.full[var.num]), font=2, cex=4) + par(fig=c(title2.xpos, title2.xpos + title2.width, 0.695, 0.700), new=TRUE) + mtext(paste0(regime2.name, ": Impact on ", var.name.full[var.num]), font=2, cex=4) + par(fig=c(title2.xpos, title2.xpos + title2.width, 0.460, 0.465), new=TRUE) + mtext(paste0(regime3.name, ": Impact on ", var.name.full[var.num]), font=2, cex=4) + par(fig=c(title2.xpos, title2.xpos + title2.width, 0.225, 0.230), new=TRUE) + mtext(paste0(regime4.name, ": Impact on ", var.name.full[var.num]), font=2, cex=4) + } + + # Title 3: + title3.xpos <- 0.75 + title3.width <-0.25 + par(fig=c(title3.xpos, title3.xpos + title3.width, 0.930, 0.935), new=TRUE) + mtext(paste0(regime1.name, ": Frequency (", round(100*mean(fre1),1), "%)"), font=2, cex=4) + par(fig=c(title3.xpos, title3.xpos + title3.width, 0.695, 0.700), new=TRUE) + mtext(paste0(regime2.name, ": Frequency (", round(100*mean(fre2),1), "%)"), font=2, cex=4) + par(fig=c(title3.xpos, title3.xpos + title3.width, 0.460, 0.465), new=TRUE) + mtext(paste0(regime3.name, ": Frequency (", round(100*mean(fre3),1), "%)"), font=2, cex=4) + par(fig=c(title3.xpos, title3.xpos + title3.width, 0.225, 0.230), new=TRUE) + mtext(paste0(regime4.name, ": Frequency (", round(100*mean(fre4),1), "%)"), font=2, cex=4) + + # Legend 1: + legend1.xpos <- 0.01 + legend1.width <- 0.44 + legend1.cex <- 1.8 + my.subset <- match(values.to.plot, my.brks) + par(fig=c(legend1.xpos, legend1.xpos + legend1.width, 0.719, 0.744), new=TRUE) # syntax fig=c(xmin, xmax, ymin, ymax) + ColorBar3(my.brks, cols=my.cols, vert=FALSE, cex=legend1.cex, subset=my.subset) + if(psl=="g500") {mtext(side=4," m", cex=2.5)} else {mtext(side=4," hPa", cex=legend1.cex)} + par(fig=c(legend1.xpos, legend1.xpos + legend1.width, 0.485, 0.508), new=TRUE) + ColorBar3(my.brks, cols=my.cols, vert=FALSE, cex=legend1.cex, subset=my.subset) + if(psl=="g500") {mtext(side=4," m", cex=2.5)} else {mtext(side=4," hPa", cex=legend1.cex)} + par(fig=c(legend1.xpos, legend1.xpos + legend1.width, 0.250, 0.273), new=TRUE) + ColorBar3(my.brks, cols=my.cols, vert=FALSE, cex=legend1.cex, subset=my.subset) + if(psl=="g500") {mtext(side=4," m", cex=2.5)} else {mtext(side=4," hPa", cex=legend1.cex)} + par(fig=c(legend1.xpos, legend1.xpos + legend1.width, 0.015, 0.038), new=TRUE) + ColorBar3(my.brks, cols=my.cols, vert=FALSE, cex=legend1.cex, subset=my.subset) + if(psl=="g500") {mtext(side=4," m", cex=2.5)} else {mtext(side=4," hPa", cex=legend1.cex)} + + # Legend 2: + if(fields.name == rean.name){ + legend2.xpos <- 0.48 + legend2.width <- 0.25 + legend2.cex <- 1.8 + my.subset2 <- match(values.to.plot2, my.brks.var) + par(fig=c(legend2.xpos, legend2.xpos + legend2.width, 0.719 + 0.001, 0.744), new=TRUE) + ColorBar3(my.brks.var, cols=my.cols.var, vert=FALSE, cex=legend2.cex, subset=my.subset2) + mtext(side=4,paste0(" ",var.unit[var.num]), cex=legend2.cex) + par(fig=c(legend2.xpos, legend2.xpos + legend2.width, 0.485, 0.508), new=TRUE) + ColorBar3(my.brks.var, cols=my.cols.var, vert=FALSE, cex=legend2.cex, subset=my.subset2) + mtext(side=4,paste0(" ",var.unit[var.num]), cex=legend2.cex) + par(fig=c(legend2.xpos, legend2.xpos + legend2.width, 0.250, 0.273), new=TRUE) + ColorBar3(my.brks.var, cols=my.cols.var, vert=FALSE, cex=legend2.cex, subset=my.subset2) + mtext(side=4,paste0(" ",var.unit[var.num]), cex=legend2.cex) + par(fig=c(legend2.xpos, legend2.xpos + legend2.width, 0.015, 0.038), new=TRUE) + ColorBar3(my.brks.var, cols=my.cols.var, vert=FALSE, cex=legend2.cex, subset=my.subset2) + mtext(side=4,paste0(" ",var.unit[var.num]), cex=legend2.cex) + } + +if(fields.name == rean.name){ + imp.all <- imp1 + for(i in 1:dim(imp1)[1]){ + for(j in 1:dim(imp1)[2]){ + if(abs(imp1[i,j]) >= max(abs(imp1[i,j]), abs(imp2[i,j]), abs(imp3[i,j]), abs(imp4[i,j]))) imp.all[i,j] <- 1.5 + if(abs(imp2[i,j]) >= max(abs(imp1[i,j]), abs(imp2[i,j]), abs(imp3[i,j]), abs(imp4[i,j]))) imp.all[i,j] <- 2.5 + if(abs(imp3[i,j]) >= max(abs(imp1[i,j]), abs(imp2[i,j]), abs(imp3[i,j]), abs(imp4[i,j]))) imp.all[i,j] <- 3.5 + if(abs(imp4[i,j]) >= max(abs(imp1[i,j]), abs(imp2[i,j]), abs(imp3[i,j]), abs(imp4[i,j]))) imp.all[i,j] <- 4.5 + } + } + + PlotEquiMap2(imp.all[,EU], lon[EU], lat, filled.continents = FALSE, brks=1:5, cols=c("red","blue","gold","purple3"), intxlon=10, intylat=10, drawleg=F, cex.lab=1.5) +} + + if(!as.pdf) dev.off() # for saving 4 png + +if(as.pdf) dev.off() # for saving a single pdf with all seasons + + diff --git a/old/weather_regimes_maps_v2.R~ b/old/weather_regimes_maps_v2.R~ new file mode 100644 index 0000000000000000000000000000000000000000..10f52f9b2294851bf897bd9e2474f697f3cafd0b --- /dev/null +++ b/old/weather_regimes_maps_v2.R~ @@ -0,0 +1,322 @@ + +# Creation: 6/2016 +# Author: Nicola Cortesi +# Aim: to visualize the regime anomalies of the weather regimes, their interannual frequencies and their impact on a chosen cliamte variable (i.e: wind speed or temperature) +# I/O: the only input file needed is the output of the weather_regimes.R script, which ends with the suffix "_mapdata.RData". +# Output files are the maps, with the user can generate as .png or as .pdf +# Assumptions: this script must be run twice: once, with the option 'ordering = F' to create a .pdf or .png file that the user must open to find visually the order by which +# the four regimes are stored inside the four clusters. For example, he can find that the sequence of the images in the file (from top to down) corresponds to: +# Blocking / NAO- / Atl.Ridge / NAO+ regime. Subsequently, he has to change 'ordering' from F to T and set the four variables 'clusterX.name' (X=1..4) +# to follow the same order found inside that file; after that, he can run this script a second time to get the maps in the right order, which by default is, +# from top to down: "NAO+","NAO-","Blocking" and "Atl.Ridge" (you can change it with the variable 'orden'). +# During these first two runs, the variable 'save.names' must be set to TRUE, so an .RData file is written to store the order of the four regimes, in case +# in future a user needs to visualize these maps again. In this case, the user must set 'save.names= FALSE' and 'ordering=TRUE' to be able to visualize the maps +# in the correct order. +# Branch: weather_regimes + +library(s2dverification) # for the function Load() +library(Kendall) # for the MannKendall test + +source('/home/Earth/ncortesi/Downloads/scripts/Rfunctions.R') # for the calendar functions +workdir <- "/scratch/Earth/ncortesi/RESILIENCE/Regimes" # working dir with the input files with the WT classifications for each MSLP grid point + +rean.name <- "ERA-Interim" # reanalysis name (if input data comes from a reanalysis) +forecast.name <- "ECMWF-S4" # forecast system name (if input data comes from a forecast system) + +fields.name <- forecast.name # Choose between loading reanalysis or forecasts +var.num <- 1 # Choose a variable for the impact maps 1: sfcWind 2: tas +p <- 1 # Choose a period from 1 to 17. You need to have created before the '_mapdata.RData' file with the output of weather_regimes.R + # for that period. (1-12: Jan - Dec, 13-16: Winter, Spring, Summer, Autumn, 17: Year) +lead.month <- 1 # in case you selected forecasts, specify a leadtime (0 = same month of the start month) + +# run it twice: once, with ordering <- FALSE to look only at the cartography and find visually the right regime names of the four clusters; +# and the second time, to save the ordered cartography: +ordering <- T # If TRUE, order the clusters following the regimes listed in the 'orden' vector (after you manually insert the names). +as.pdf <- F # FALSE: save results as one .png file for each season, TRUE: save only 1 .pdf file with all seasons +save.names <- T # if TRUE, also save the cluster names (i.e: the association between clusters and weather regimes); if FALSE, the cluster names are loaded from the file + +if(save.names){ + cluster3.name <- "NAO+" + cluster2.name <- "NAO-" + cluster1.name <- "Blocking" + cluster4.name <- "Atl.Ridge" +} + +var.name <- c("sfcWind","tas") +if(fields.name == rean.name) load(file=paste0(workdir,"/",fields.name,"_",var.name[var.num],"_",my.period[p],"_mapdata.RData")) +if(fields.name == forecast.name) load(file=paste0(workdir,"/",fields.name,"_",var.name[var.num],"_",my.period[p],"_leadmonth_",lead.month,"_mapdata.RData")) + +# position of long values of Europe only (without the Atlantic Sea and America): +#if(fields.name == "ERA-Interim") EU <- c(1:which(lon >= lon.max)[1], (length(lon)-30):length(lon)) # restrict area to continental europe only +EU <- c(1:which(lon >= lon.max)[1], (which(lon >= 337)[1]:length(lon))) # restrict area to continental europe only + +# choose an order to give to the cartography, from top to bottom: +orden <- c("NAO+","NAO-","Blocking","Atl.Ridge") + +regime1.name <- orden[1] +regime2.name <- orden[2] +regime3.name <- orden[3] +regime4.name <- orden[4] + +if(save.names){cluster1.name.period <- cluster2.name.period <- cluster3.name.period <- cluster4.name.period <- c()} + +# breaks and colors of the geopotential fields: +if(psl == "g500"){ + #my.brks <- c(-300,-199:200,300) # % Mean anomaly of a WR + my.brks <- c(-300,seq(-200,200,10),300) # % Mean anomaly of a WR + my.brks2 <- c(-300,seq(-200,200,10),300) # % Mean anomaly of a WR for the contour lines + #my.cols <- colorRampPalette((brewer.pal(9,"PuBu")))(length(my.brks)-1) + values.to.plot <- c(-200, -100, 0, 100, 200) # values we want to show in the labels +} + +if(psl == "psl"){ + my.brks <- c(seq(-19,-1,2),0,seq(1,19,2)) # % Mean anomaly of a WR + # my.brks <- c(seq(-19,-1,2),0,seq(1,19,2)) # % Mean anomaly of a WR + + my.brks2 <- my.brks #c(seq(-20,20,2)) # % Mean anomaly of a WR for the contour lines + values.to.plot <- my.brks #c(-17,-15,-13,-11,-9,-7,-5,-3,-1,0,1,3,5,7,9,11,13,15,17) +} + +my.cols <- colorRampPalette(rev(brewer.pal(11,"RdBu")))(length(my.brks)-1) # blue--white--red colors +my.cols[floor(length(my.cols)/2)] <- my.cols[floor(length(my.cols)/2)+1] <- "white" + +# breaks and colors of the impact maps (valid both for Wind speed anomalies and Temperature anomalies): +#my.brks.var <- c(-20,seq(-10,-3,1),seq(-2.9,2.9,0.1),seq(3,10,1),20) # % Mean anomaly of a WR +#my.brks.var <- c(-20,-2,-1.5,-1,-0.5,-0.2,0.2,0.5,1,1.5,2,20) # % Mean anomaly of a WR +#my.brks.var <- c(-20,seq(-3,3,0.5),20) # % Mean anomaly of a WR +if(var.name[var.num] == "sfcWind") { + my.brks.var <- seq(-3,3,0.5) + values.to.plot2 <- c(-3,-2,-1,0,1,2,3) +} +if(var.name[var.num] == "tas") { + my.brks.var <- seq(-5,5,1) + values.to.plot2 <- my.brks.var #c(-5,-3,-1,0,1,3,5) +} + +#my.cols <- colorRampPalette(as.character(read.csv("/home/Earth/vtorralb/rgbhex.csv",header=F)[,1]))(length(my.brks)-1) # blue--yellow-red colors +my.cols.var <- colorRampPalette(rev(brewer.pal(11,"RdBu")))(length(my.brks.var)-1) # blue--white--red colors +#my.cols.var[floor(length(my.cols.var)/2)] <- my.cols.var[floor(length(my.cols.var)/2)+1] <- "white" + +if(as.pdf && fields.name==rean.name)(pdf(file=paste0(workdir,"/",fields.name,"_",var.name[var.num],"_",my.period[p],".pdf"),width=40, height=60)) +if(as.pdf && fields.name==forecast.name)(pdf(file=paste0(workdir,"/",fields.name,"_",var.name[var.num],"_",my.period[p],"_leadmonth_",lead.month,".pdf"),width=40,height=60)) + +if(save.names){ + cluster1.name.period[p] <- cluster1.name + cluster2.name.period[p] <- cluster2.name + cluster3.name.period[p] <- cluster3.name + cluster4.name.period[p] <- cluster4.name + + if(fields.name==rean.name) save(cluster1.name.period, cluster2.name.period, cluster3.name.period, cluster4.name.period, file=paste0(workdir,"/",fields.name,"_", var.name[var.num],"_",my.period[p],"_ClusterNames.RData")) + if(fields.name==forecast.name) save(cluster1.name.period, cluster2.name.period, cluster3.name.period, cluster4.name.period, file=paste0(workdir,"/",fields.name,"_", var.name[var.num],"_",my.period[p],"_leadmonth_",lead.month,"_ClusterNames.RData")) + + } else { # in this case, we load the cluster names from the file already saved: + if(fields.name == rean.name) load(paste0(workdir,"/",fields.name,"_", var.name[var.num],"_",my.period[p],"_ClusterNames.RData")) + if(fields.name == forecast.name) load(paste0(workdir,"/",fields.name,"_", var.name[var.num],"_",my.period[p],"_leadmonth_",lead.month,"_ClusterNames.RData")) + + cluster1.name <- cluster1.name.period[p] + cluster2.name <- cluster2.name.period[p] + cluster3.name <- cluster3.name.period[p] + cluster4.name <- cluster4.name.period[p] + } + + # assign to each cluster its regime: + if(ordering == FALSE){ + cluster1 <- 1; cluster2 <- 2; cluster3 <- 3; cluster4 <- 4 + } else { + cluster1 <- which(orden == cluster1.name) + cluster2 <- which(orden == cluster2.name) + cluster3 <- which(orden == cluster3.name) + cluster4 <- which(orden == cluster4.name) + } + + assign(paste0("map",cluster1), pslwr1mean) + assign(paste0("map",cluster2), pslwr2mean) + assign(paste0("map",cluster3), pslwr3mean) + assign(paste0("map",cluster4), pslwr4mean) + + assign(paste0("fre",cluster1), wr1y) + assign(paste0("fre",cluster2), wr2y) + assign(paste0("fre",cluster3), wr3y) + assign(paste0("fre",cluster4), wr4y) + + if(fields.name == rean.name){ + assign(paste0("imp",cluster1), varPeriodAnom1mean) + assign(paste0("imp",cluster2), varPeriodAnom2mean) + assign(paste0("imp",cluster3), varPeriodAnom3mean) + assign(paste0("imp",cluster4), varPeriodAnom4mean) + + assign(paste0("sig",cluster1), pvalue1) + assign(paste0("sig",cluster2), pvalue2) + assign(paste0("sig",cluster3), pvalue3) + assign(paste0("sig",cluster4), pvalue4) + } + + + # Visualize the composition with the 3 graphs for all the 4 regimes: its pressure fields, its impact on var and its frequency series: + if(!as.pdf && fields.name==rean.name) png(filename=paste0(workdir,"/",fields.name,"_",var.name[var.num],"_",my.period[p],".png"),width=3000,height=3700) + if(!as.pdf && fields.name==forecast.name) png(filename=paste0(workdir,"/",fields.name,"_",var.name[var.num],"_",my.period[p],"_leadmonth_",lead.month,".png"),width=3000,height=3700) + + plot.new() + + # Sheet title: + par(fig=c(0, 1, 0.94, 0.98), new=TRUE) + mtext(paste0("Weather Regimes for ",my.period[p]," season (",year.start,"-",year.end,"). Source: ",fields.name), cex=6, font=2) + + # Centroid maps: + map.xpos <- 0 + map.width <- 0.46 + par(fig=c(map.xpos + 0.003, map.xpos + map.width - 0.003, 0.745, 0.925), new=TRUE) + PlotEquiMap2(rescale(map1,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map1), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, xlabel.dist=1.5, contours.lty="F1FF1F") + par(fig=c(map.xpos, map.xpos + map.width, 0.51, 0.69), new=TRUE) + PlotEquiMap2(rescale(map2,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map2), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F") + par(fig=c(map.xpos, map.xpos + map.width, 0.275, 0.455), new=TRUE) + PlotEquiMap2(rescale(map3,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map3), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F") + par(fig=c(map.xpos, map.xpos + map.width, 0.04, 0.22), new=TRUE) + PlotEquiMap2(rescale(map4,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map4), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F") + + # Title Centroid Maps: + map.title.xpos <- 0.76 + map.title.width <- 0.2 + par(fig=c(map.title.xpos, map.title.xpos + map.title.width, 0.728, 0.729), new=TRUE) + mtext("year", cex=3) + par(fig=c(map.title.xpos, map.title.xpos + map.title.width, 0.494, 0.495), new=TRUE) + mtext("year", cex=3) + par(fig=c(map.title.xpos, map.title.xpos + map.title.width, 0.260, 0.261), new=TRUE) + mtext("year", cex=3) + par(fig=c(map.title.xpos, map.title.xpos + map.title.width, 0.026, 0.027), new=TRUE) + mtext("year", cex=3) + + # Impact maps: + if(fields.name == rean.name){ + impact.xpos <- 0.47 + impact.width <- 0.26 + par(fig=c(impact.xpos, impact.xpos + impact.width, 0.745, 0.925), new=TRUE) + PlotEquiMap2(rescale(imp1[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=t(sig1[,EU] < 0.05), cex.lab=1.5) + par(fig=c(impact.xpos, impact.xpos + impact.width, 0.51, 0.69), new=TRUE) + PlotEquiMap2(rescale(imp2[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=t(sig2[,EU] < 0.05), cex.lab=1.5) + par(fig=c(impact.xpos, impact.xpos + impact.width, 0.275, 0.455), new=TRUE) + PlotEquiMap2(rescale(imp3[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=t(sig3[,EU] < 0.05), cex.lab=1.5) + par(fig=c(impact.xpos, impact.xpos + impact.width, 0.04, 0.22), new=TRUE) + PlotEquiMap2(rescale(imp4[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=t(sig4[,EU] < 0.05), cex.lab=1.5) + + imp.all <- imp1 + for(i in dim(imp1)[1]){ + for(j in dim(imp1)[2]){ + if(abs(imp1[i,j]) >= max(abs(imp1[i,j]), abs(imp2[i,j]), abs(imp3[i,j]), abs(imp4[i,j]))) imp.all[i,j] <- 1 + if(abs(imp2[i,j]) >= max(abs(imp1[i,j]), abs(imp2[i,j]), abs(imp3[i,j]), abs(imp4[i,j]))) imp.all[i,j] <- 2 + if(abs(imp3[i,j]) >= max(abs(imp1[i,j]), abs(imp2[i,j]), abs(imp3[i,j]), abs(imp4[i,j]))) imp.all[i,j] <- 3 + if(abs(imp4[i,j]) >= max(abs(imp1[i,j]), abs(imp2[i,j]), abs(imp3[i,j]), abs(imp4[i,j]))) imp.all[i,j] <- 4 + } + } + + PlotEquiMap2(imp.all + + } + + # Frequency plots: + barplot.xpos <- 0.75 + barplot.width <- 0.25 + freq.max=60 + + par(fig=c(barplot.xpos, barplot.xpos + barplot.width, 0.745, 0.915), new=TRUE) + barplot.freq(100*fre1, year.start, year.end, cex.y=2, cex.x=2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0)) + par(fig=c(barplot.xpos, barplot.xpos + barplot.width,0.510,0.680), new=TRUE) + barplot.freq(100*fre2, year.start, year.end, cex.y=2, cex.x=2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0)) + par(fig=c(barplot.xpos, barplot.xpos + barplot.width,0.275,0.445), new=TRUE) + barplot.freq(100*fre3, year.start, year.end, cex.y=2, cex.x=2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0)) + par(fig=c(barplot.xpos, barplot.xpos + barplot.width,0.040,0.210), new=TRUE) + barplot.freq(100*fre4, year.start, year.end, cex.y=2, cex.x=2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0)) + + # % on Frequency plots: + symbol.xpos <- 0.735 + symbol.width <- 0.01 + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.83, 0.84), new=TRUE) + mtext("%", cex=3.3) + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.60, 0.61), new=TRUE) + mtext("%", cex=3.3) + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.36, 0.37), new=TRUE) + mtext("%", cex=3.3) + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.13, 0.14), new=TRUE) + mtext("%", cex=3.3) + + # Title 1: + title1.xpos <- 0.02 + title1.width <- 0.4 + par(fig=c(title1.xpos, title1.xpos + title1.width, 0.930, 0.935), new=TRUE) + mtext(paste0(regime1.name,": ", psl.name, " Anomaly"), font=2, cex=4) + par(fig=c(title1.xpos, title1.xpos + title1.width, 0.695, 0.700), new=TRUE) + mtext(paste0(regime2.name,": ", psl.name, " Anomaly"), font=2, cex=4) + par(fig=c(title1.xpos, title1.xpos + title1.width, 0.460, 0.465), new=TRUE) + mtext(paste0(regime3.name,": ", psl.name, " Anomaly"), font=2, cex=4) + par(fig=c(title1.xpos, title1.xpos + title1.width, 0.225, 0.230), new=TRUE) + mtext(paste0(regime4.name,": ", psl.name, " Anomaly"), font=2, cex=4) + + # Title 2: + if(fields.name == rean.name){ + title2.xpos <- 0.42 + title2.width <- 0.35 + par(fig=c(title2.xpos, title2.xpos + title2.width, 0.930, 0.935), new=TRUE) + mtext(paste0(regime1.name, ": Impact on ", var.name.full[var.num]), font=2, cex=4) + par(fig=c(title2.xpos, title2.xpos + title2.width, 0.695, 0.700), new=TRUE) + mtext(paste0(regime2.name, ": Impact on ", var.name.full[var.num]), font=2, cex=4) + par(fig=c(title2.xpos, title2.xpos + title2.width, 0.460, 0.465), new=TRUE) + mtext(paste0(regime3.name, ": Impact on ", var.name.full[var.num]), font=2, cex=4) + par(fig=c(title2.xpos, title2.xpos + title2.width, 0.225, 0.230), new=TRUE) + mtext(paste0(regime4.name, ": Impact on ", var.name.full[var.num]), font=2, cex=4) + } + + # Title 3: + title3.xpos <- 0.75 + title3.width <-0.25 + par(fig=c(title3.xpos, title3.xpos + title3.width, 0.930, 0.935), new=TRUE) + mtext(paste0(regime1.name, ": Frequency (", round(100*mean(fre1),1), "%)"), font=2, cex=4) + par(fig=c(title3.xpos, title3.xpos + title3.width, 0.695, 0.700), new=TRUE) + mtext(paste0(regime2.name, ": Frequency (", round(100*mean(fre2),1), "%)"), font=2, cex=4) + par(fig=c(title3.xpos, title3.xpos + title3.width, 0.460, 0.465), new=TRUE) + mtext(paste0(regime3.name, ": Frequency (", round(100*mean(fre3),1), "%)"), font=2, cex=4) + par(fig=c(title3.xpos, title3.xpos + title3.width, 0.225, 0.230), new=TRUE) + mtext(paste0(regime4.name, ": Frequency (", round(100*mean(fre4),1), "%)"), font=2, cex=4) + + # Legend 1: + legend1.xpos <- 0.01 + legend1.width <- 0.44 + legend1.cex <- 1.8 + my.subset <- match(values.to.plot, my.brks) + par(fig=c(legend1.xpos, legend1.xpos + legend1.width, 0.719, 0.744), new=TRUE) # syntax fig=c(xmin, xmax, ymin, ymax) + ColorBar3(my.brks, cols=my.cols, vert=FALSE, cex=legend1.cex, subset=my.subset) + if(psl=="g500") {mtext(side=4," m", cex=2.5)} else {mtext(side=4," hPa", cex=legend1.cex)} + par(fig=c(legend1.xpos, legend1.xpos + legend1.width, 0.485, 0.508), new=TRUE) + ColorBar3(my.brks, cols=my.cols, vert=FALSE, cex=legend1.cex, subset=my.subset) + if(psl=="g500") {mtext(side=4," m", cex=2.5)} else {mtext(side=4," hPa", cex=legend1.cex)} + par(fig=c(legend1.xpos, legend1.xpos + legend1.width, 0.250, 0.273), new=TRUE) + ColorBar3(my.brks, cols=my.cols, vert=FALSE, cex=legend1.cex, subset=my.subset) + if(psl=="g500") {mtext(side=4," m", cex=2.5)} else {mtext(side=4," hPa", cex=legend1.cex)} + par(fig=c(legend1.xpos, legend1.xpos + legend1.width, 0.015, 0.038), new=TRUE) + ColorBar3(my.brks, cols=my.cols, vert=FALSE, cex=legend1.cex, subset=my.subset) + if(psl=="g500") {mtext(side=4," m", cex=2.5)} else {mtext(side=4," hPa", cex=legend1.cex)} + + # Legend 2: + if(fields.name == rean.name){ + legend2.xpos <- 0.48 + legend2.width <- 0.25 + legend2.cex <- 1.8 + my.subset2 <- match(values.to.plot2, my.brks.var) + par(fig=c(legend2.xpos, legend2.xpos + legend2.width, 0.719 + 0.001, 0.744), new=TRUE) + ColorBar3(my.brks.var, cols=my.cols.var, vert=FALSE, cex=legend2.cex, subset=my.subset2) + mtext(side=4,paste0(" ",var.unit[var.num]), cex=legend2.cex) + par(fig=c(legend2.xpos, legend2.xpos + legend2.width, 0.485, 0.508), new=TRUE) + ColorBar3(my.brks.var, cols=my.cols.var, vert=FALSE, cex=legend2.cex, subset=my.subset2) + mtext(side=4,paste0(" ",var.unit[var.num]), cex=legend2.cex) + par(fig=c(legend2.xpos, legend2.xpos + legend2.width, 0.250, 0.273), new=TRUE) + ColorBar3(my.brks.var, cols=my.cols.var, vert=FALSE, cex=legend2.cex, subset=my.subset2) + mtext(side=4,paste0(" ",var.unit[var.num]), cex=legend2.cex) + par(fig=c(legend2.xpos, legend2.xpos + legend2.width, 0.015, 0.038), new=TRUE) + ColorBar3(my.brks.var, cols=my.cols.var, vert=FALSE, cex=legend2.cex, subset=my.subset2) + mtext(side=4,paste0(" ",var.unit[var.num]), cex=legend2.cex) + } + + if(!as.pdf) dev.off() # for saving 4 png + +if(as.pdf) dev.off() # for saving a single pdf with all seasons + + diff --git a/old/weather_regimes_maps_v20.R b/old/weather_regimes_maps_v20.R new file mode 100644 index 0000000000000000000000000000000000000000..74885e494f85aaa9aa11f752092a50238f5a9f9b --- /dev/null +++ b/old/weather_regimes_maps_v20.R @@ -0,0 +1,4058 @@ + +# Creation: 6/2016 +# Author: Nicola Cortesi +# +# Aim: to visualize the regime anomalies of the weather regimes, their interannual frequencies and their impact on a chosen cliamte variable (i.e: wind speed or temperature) +# +# I/O: input file needed are: +# 1) the output of the weather_regimes.R script, which ends with the suffix "_psl.RData". +# 2) if you need the impact map too, the outputs of the weather_regimes_impact.R script, which end wuth the suffix "_sfcWind.RData" and "_tas.RData" +# +# Output files are the maps, witch the user can generate as .png or as .pdf +# Due to the script's length, it is better to run it from the terminal with: Rscript ~/scripts/weather_regimes_maps.R +# This script doesn't need to be parallelized because it takes only a few seconds to create and save the output maps. +# +# Assumptions: If your regimes derive from a reanalysis, this script must be run twice: once, to create a .pdf file (see below) +# that the user must open to find visually the order by which the four regimes are stored inside the four clusters. For example, he can find that the +# sequence of the images in the file (from top to down) corresponds to: Blocking / NAO- / Atl.Ridge / NAO+ regime. Subsequently, he has to change 'ordering' +# from F to T (see below) and set the four variables 'clusterX.name' (X = 1...4) to follow the same order found inside that file. +# The second time, you have to run this script only to get the maps in the right order, which by default is, from top to down: +# "NAO+","NAO-","Blocking" and "Atl.Ridge" (you can change it with the variable 'orden'). You also have to set 'save.names' to TRUE, so an .RData file +# is written to store the order of the four regimes, in case you need to visualize these maps again in future or create new ones based on the saved data. +# +# If your regimes derive from a forecast system, you have to compute before the regimes derived from a reanalysis. Then, you can associate the reanalysis +# to the forecast system, to employ it to automatically aussociate to each simulated cluster one of the +# observed regimes, the one with the highest spatial correlation. Beware that the two maps of regime anomalies must have the same spatial resolution! +# +# Branch: weather_regimes + +rm(list=ls()) +library(s2dverification) # for the function Load() +library(Kendall) # for the MannKendall test +source('/home/Earth/ncortesi/scripts/Rfunctions.R') # for the calendar functions + +### set the directory where the weather regimes computed with a reanalysis are stored (xxxxx_psl.RData files): + +#rean.dir <- "/scratch/Earth/ncortesi/RESILIENCE/Regimes/18) as 12) but with no lat correction" +#rean.dir <- "/scratch/Earth/ncortesi/RESILIENCE/Regimes/18_as_12_but_with_no_lat_correction" +#rean.dir <- "/scratch/Earth/ncortesi/RESILIENCE/Regimes/41_ERA-Interim_monthly_1981-2015_LOESS_filter" +#rean.dir <- "/scratch/Earth/ncortesi/RESILIENCE/Regimes/43_as_41_but_with_running_cluster_and_lat_correction" +rean.dir <- "/scratch/Earth/ncortesi/RESILIENCE/Regimes/44_as_43_but_with_no_lat_correction" +#rean.dir <- "/scratch/Earth/ncortesi/RESILIENCE/Regimes/45_as_43_but_with_Z500" + +rean.name <- "ERA-Interim" # reanalysis name (if input data comes from a reanalysis) +forecast.name <- "ECMWF-S4" # forecast system name (if input data comes from a forecast system) + +# Choose between loading reanalysis ('rean.name') or forecasts ('forecast.name'): +fields.name <- forecast.name #rean.name #forecast.name + +# Choose one or more periods to plot from 1 to 17 (1-12: Jan - Dec, 13-16: Winter, Spring, Summer, Autumn, 17: Year). +period <- 1:1 # You need to have created before the '_psl.RData' file with the output of 'weather_regimes_vXX'.R for that period. + +# run it twice: once, with: 'ordering <- FALSE', 'save.names <- TRUE', 'as.pdf <- TRUE' and 'composition <- "simple" ' +# to look at the cartography and find visually the right regime names of the four clusters; then, insert the regime names in the correct order below and run this script +# a second time one month/season at time with: 'ordering = TRUE', 'save.names = TRUE', 'as.pdf = FALSE' and 'composition = "simple" ' +# to save the ordered cartography (then, you can check the correct regime sequence in the .png maps). After that, any time you run this script, remember to set: +# 'ordering = TRUE , 'save.names = FALSE' (and any type of composition you want to plot) not to overwrite the files which stores the right cluster order. + +ordering <- T # If TRUE, order the clusters following the regimes listed in the 'orden' vector (set it to TRUE only after you manually inserted the names below). +save.names <- F # if TRUE, also save the cluster names (i.e: the association between clusters and weather regimes); if FALSE, the cluster names are loaded from a file +as.pdf <- F # FALSE: save results as one .png file for each season/month, TRUE: save only one .pdf file with all seasons/months + +composition <- "impact" # choose which kind of composition you want to plot: + # - 'simple' for monthly graphs of regime anomalies / impact / interannual frequencies in three columns + # - 'psl' for all the regime anomalies for a fixed forecast month + # - 'fre' for all the interannual frequencies for a fixed forecast month + # - 'impact' for all the impact maps for a fixed forecast month and the variable specified in var.num + # - 'summary' for the summary plots of all forecast months (persistence bias, freq.bias, correlations, etc.) [You need all ClusterNames files] + # - 'impact.highest' for all the impact plot of the regime with the highest impact + # - 'single.impact' to save the individual impact maps + # - 'single.psl' to save the individual psl map + # - 'single.fre' to save the individual fre map + # - 'none' doesn't plot anything, it only saves the ClusterName.Rdata files + # - 'taylor' plot the taylor's diagram between the regime anomalies of a monthly reanalaysis and a seasonal one + +var.num <- 2 # if fields.name ='rean.name' and composition ='simple', Choose a variable for the impact maps 1: sfcWind 2: tas + +mean.freq <- FALSE # if TRUE, when composition <- "simple", it also add the mean frequency value over each frequency plots + +# Associates the regimes to the four cluster: +cluster3.name <- "NAO+" +cluster2.name <- "NAO-" +cluster1.name <- "Blocking" +cluster4.name <- "Atl.Ridge" + +# choose an order to give to the cartograpy, from top to bottom: +orden <- c("NAO+","NAO-","Blocking","Atl.Ridge") + + +####### if fields.name='forecast.name', you have to specify these additional variables: + +# working dir with the input files with '_psl.RData' suffix: +work.dir <- "/scratch/Earth/ncortesi/RESILIENCE/Regimes/42_ECMWF_S4_monthly_1981-2015_LOESS_filter" + +lead.months <- 0:6 # in case you selected 'fields.name = forecast.name', specify a leadtime (0 = same month of the start month) to plot + # (and variable 'period' becomes the start month) +forecast.month <- 1 # in case composition = 'psl' or 'fre' or 'impact', choose a target month to forecast, from 1 to 12, to plot its composition + # if you want to plot all compositions from 1 to 12 at once, just enable the line below and add a { at the end of the script + # DO NOT run the loop below when composition="summary" or "taylor", because it will modify the data in the ClusterName.RData files!!! +#for(forecast.month in 1:12){ +################################################################################################################################################################### + +if(fields.name != rean.name && fields.name != forecast.name) stop("variable 'fields.name' is not properly set") +regime1.name <- orden[1] +regime2.name <- orden[2] +regime3.name <- orden[3] +regime4.name <- orden[4] + +var.name <- c("sfcWind","tas") +var.name.full <- c("Wind Speed","Temperature") # full name of the 'predictand' variable to put in the title of the graphs +var.unit <- c("m/s", "ºC") # unit of measure of var (for drawing color scales) + +var.num.orig <- var.num # loading _psl files, this value can change! +fields.name.orig <- fields.name +period.orig <- period +work.dir.orig <- work.dir +pdf.suffix <- ifelse(length(period)==1, my.period[period], "all_periods") +n.map <- 0 + +if(composition != "summary" && composition != "impact.highest" && composition != "taylor"){ + + if(composition == "psl" || composition == "fre" || composition == "impact") { + if(as.pdf && fields.name == forecast.name) pdf(file=paste0(work.dir,"/",fields.name,"_",pdf.suffix,"_forecast_month_",forecast.month,"_",composition,".pdf"),width=110,height=60) + if(!as.pdf && fields.name == forecast.name) png(filename=paste0(work.dir,"/",fields.name,"_forecast_month_",forecast.month,"_",composition,".png"),width=6000,height=2300) + + lead.months <- c(6:0,0) + plot.new() + + # Sheet title: + par(fig=c(0, 1, 0.94, 0.98), new=TRUE) + if(composition == "psl") mtext(paste0(fields.name, " simulated Weather Regimes for ", month.name[forecast.month]), cex=5.5, font=2) + if(composition == "fre") mtext(paste0(fields.name, " simulated interannual frequencies for ", month.name[forecast.month]), cex=5.5, font=2) + if(composition == "impact") mtext(paste0(fields.name, " simulated ",var.name.full[var.num], " impact for ", month.name[forecast.month]), cex=5.5, font=2) + + } + + if(fields.name == rean.name) lead.months <- 1 # just to be able to enter the loop below once! + + for(lead.month in lead.months){ + #lead.month=lead.months[1] # for the debug + + if(composition == "simple"){ + if(as.pdf && fields.name == rean.name) pdf(file=paste0(rean.dir,"/",fields.name,"_",var.name[var.num],"_",pdf.suffix,".pdf"),width=40, height=60) + if(as.pdf && fields.name == forecast.name) pdf(file=paste0(work.dir,"/",fields.name,"_",var.name[var.num],"_",pdf.suffix,"_leadmonth_",lead.month,".pdf"),width=40,height=60) + } + + if(composition == 'psl' || composition == 'fre' || composition == 'impact') { + period <- forecast.month - lead.month + if(period < 1) period <- period + 12 + } + + impact.data <- FALSE + for(p in period){ + #p <- period[1] # for the debug + p.orig <- p + + if(fields.name == rean.name) print(paste("p =",p)) + if(fields.name == forecast.name) print(paste("p =",p,"lead.month =", lead.month)) + + + # load regime data (for forecasts, it load only the data corresponding to the chosen start month p and lead month 'lead.month') + if(fields.name == rean.name || (fields.name == forecast.name && n.map == 7)) { + load(file=paste0(rean.dir,"/",rean.name,"_",my.period[p],"_","psl",".RData")) + if(var.num != var.num.orig) var.num <- var.num.orig # ripristinate original values of this script (necessary after loading regime data) + if(fields.name != fields.name.orig) fields.name <- fields.name.orig # ripristinate original values of this script + if(work.dir != work.dir.orig) work.dir <- work.dir.orig # ripristinate original values of this script + + # load impact data only if it is available, if not it will leave an empty space in the composition: + if(fields.name == rean.name){ + if(file.exists(paste0(rean.dir,"/",rean.name,"_",my.period[p],"_",var.name[var.num],".RData"))){ + impact.data <- TRUE + print("Impact data available for reanalysis") + load(file=paste0(rean.dir,"/",rean.name,"_",my.period[p],"_",var.name[var.num],".RData")) + } else { + impact.data <- FALSE + print("Impact data for reanalysis not available") + } + } + } + + if(fields.name == forecast.name && n.map < 7){ + load(file=paste0(work.dir,"/",forecast.name,"_",my.period[p],"_leadmonth_",lead.month,"_","psl",".RData")) # load cluster time series and mean psl data + + if(file.exists(paste0(work.dir,"/",forecast.name,"_",my.period[p],"_leadmonth_",lead.month,"_",var.name[var.num],".RData"))){ + impact.data <- TRUE + print("Impact data available for forecasts") + if((composition == "simple" || composition == "impact")) load(file=paste0(work.dir,"/",fields.name,"_",my.period[p],"_leadmonth_",lead.month,"_",var.name[var.num],".RData")) # load mean var data for impact maps + } else { + impact.data <- FALSE + print("Impact data for forecasts not available") + } + + + if(var.num != var.num.orig) var.num <- var.num.orig # ripristinate original values of this script (necessary after loading regime data) + if(fields.name != fields.name.orig) fields.name <- fields.name.orig # ripristinate original values of this script + + } + + if(var.num != var.num.orig) var.num <- var.num.orig # ripristinate original values of this script (necessary after loading regime data) + if(fields.name != fields.name.orig) fields.name <- fields.name.orig # ripristinate original values of this script + if(work.dir != work.dir.orig) work.dir <- work.dir.orig # ripristinate original values of this script + if(p != p.orig) p <- p.orig + + if(fields.name == forecast.name && n.map < 7){ + + # compute the simulated interannual monthly variability: + n.days.month <- dim(my.cluster.array[[p]])[1] # number of days in the forecasted month + + freq.sim1 <- apply(my.cluster.array[[p]] == 1, c(2,3), sum) + freq.sim2 <- apply(my.cluster.array[[p]] == 2, c(2,3), sum) + freq.sim3 <- apply(my.cluster.array[[p]] == 3, c(2,3), sum) + freq.sim4 <- apply(my.cluster.array[[p]] == 4, c(2,3), sum) + + sd.freq.sim1 <- sd(freq.sim1) / n.days.month + sd.freq.sim2 <- sd(freq.sim2) / n.days.month + sd.freq.sim3 <- sd(freq.sim3) / n.days.month + sd.freq.sim4 <- sd(freq.sim4) / n.days.month + + # compute the transition probabilities: + j1 <- j2 <- j3 <- j4 <- 1; transition1 <- transition2 <- transition3 <- transition4 <- c() + + n.members <- dim(my.cluster.array[[p]])[3] + + for(m in 1:n.members){ + for(y in 1:n.years){ + for (d in 1:(n.days.month-1)){ + + if (my.cluster.array[[p]][d,y,m] == 1 && (my.cluster.array[[p]][d + 1,y,m] != my.cluster.array[[p]][d,y,m])){ + transition1[j1] <- my.cluster.array[[p]][d + 1,y,m] + j1 <- j1 + 1 + } + if (my.cluster.array[[p]][d,y,m] == 2 && (my.cluster.array[[p]][d + 1,y,m] != my.cluster.array[[p]][d,y,m])){ + transition2[j2] <- my.cluster.array[[p]][d + 1,y,m] + j2 <- j2 + 1 + } + if (my.cluster.array[[p]][d,y,m] == 3 && (my.cluster.array[[p]][d + 1,y,m] != my.cluster.array[[p]][d,y,m])){ + transition3[j3] <- my.cluster.array[[p]][d + 1,y,m] + j3 <- j3 +1 + } + if (my.cluster.array[[p]][d,y,m] == 4 && (my.cluster.array[[p]][d + 1,y,m] != my.cluster.array[[p]][d,y,m])){ + transition4[j4] <- my.cluster.array[[p]][d + 1,y,m] + j4 <- j4 +1 + } + + } + } + } + + trans.sim <- matrix(NA,4,4 , dimnames= list(c("cluster1.sim", "cluster2.sim", "cluster3.sim", "cluster4.sim"),c("cluster1.sim","cluster2.sim","cluster3.sim","cluster4.sim"))) + trans.sim[1,2] <- length(which(transition1 == 2)) + trans.sim[1,3] <- length(which(transition1 == 3)) + trans.sim[1,4] <- length(which(transition1 == 4)) + + trans.sim[2,1] <- length(which(transition2 == 1)) + trans.sim[2,3] <- length(which(transition2 == 3)) + trans.sim[2,4] <- length(which(transition2 == 4)) + + trans.sim[3,1] <- length(which(transition3 == 1)) + trans.sim[3,2] <- length(which(transition3 == 2)) + trans.sim[3,4] <- length(which(transition3 == 4)) + + trans.sim[4,1] <- length(which(transition4 == 1)) + trans.sim[4,2] <- length(which(transition4 == 2)) + trans.sim[4,3] <- length(which(transition4 == 3)) + + trans.tot <- apply(trans.sim,1,sum,na.rm=T) + for(i in 1:4) trans.sim[i,] <- trans.sim[i,]/trans.tot[i] + + + # compute cluster persistence: + my.cluster.vector <- as.vector(my.cluster.array[[p]]) # reduce it to a vector with the order: day1month1member1 , day2month1member1, ... , day1month2member1, day2month2member1, ,,, + + sequ1 <- sequ2 <- sequ3 <- sequ4 <- rep(0,10000) + i1 <- i2 <- i3 <- i4 <- 1 + + if(my.cluster.vector[1] != 1) i1 <- 0 + if(my.cluster.vector[1] == 1) sequ1[i1] <- sequ1[i1]+1 + if(my.cluster.vector[1] != 2) i2 <- 0 + if(my.cluster.vector[1] == 2) sequ2[i2] <- sequ2[i2]+1 + if(my.cluster.vector[1] != 3) i3 <- 0 + if(my.cluster.vector[1] == 3) sequ3[i3] <- sequ3[i3]+1 + if(my.cluster.vector[1] != 4) i4 <- 0 + if(my.cluster.vector[1] == 4) sequ4[i4] <- sequ4[i4]+1 + n.dd <- length(my.cluster.vector) + + for(d in 1:(n.dd-1)){ + if(my.cluster.vector[d] != 1 && my.cluster.vector[d+1] == 1) i1 <- i1+1 + if(my.cluster.vector[d] == 1) sequ1[i1] <- sequ1[i1]+1 + + if(my.cluster.vector[d] != 2 && my.cluster.vector[d+1] == 2) i2 <- i2+1 + if(my.cluster.vector[d] == 2) sequ2[i2] <- sequ2[i2]+1 + + if(my.cluster.vector[d] != 3 && my.cluster.vector[d+1] == 3) i3 <- i3+1 + if(my.cluster.vector[d] == 3) sequ3[i3] <- sequ3[i3]+1 + + if(my.cluster.vector[d] != 4 && my.cluster.vector[d+1] == 4) i4 <- i4+1 + if(my.cluster.vector[d] == 4) sequ4[i4] <- sequ4[i4]+1 + } + + if(my.cluster.vector[n.dd] == 1) sequ1[i1] <- sequ1[i1]+1 + if(my.cluster.vector[n.dd] == 2) sequ2[i2] <- sequ2[i2]+1 + if(my.cluster.vector[n.dd] == 3) sequ3[i3] <- sequ3[i3]+1 + if(my.cluster.vector[n.dd] == 4) sequ4[i4] <- sequ4[i4]+1 + + pers1 <- mean(sequ1[which(sequ1 != 0)]) + pers2 <- mean(sequ2[which(sequ2 != 0)]) + pers3 <- mean(sequ3[which(sequ3 != 0)]) + pers4 <- mean(sequ4[which(sequ4 != 0)]) + + # compute correlations between simulated clusters and observed regime's anomalies: + cluster1.sim <- pslwr1mean; cluster2.sim <- pslwr2mean; cluster3.sim <- pslwr3mean; cluster4.sim <- pslwr4mean + lat.forecast <- lat; lon.forecast <- lon + + if((p + lead.month) > 12) { load.month <- p + lead.month - 12 } else { load.month <- p + lead.month } # reanalysis forecast month to load + load(file=paste0(rean.dir,"/",rean.name,"_",my.period[load.month],"_","psl",".RData")) # load reanalysis data, which overwrities the pslwrXmean variables + load(paste0(rean.dir,"/",rean.name,"_", my.period[load.month],"_","ClusterNames",".RData")) # load also reanalysis regime names + + if(var.num != var.num.orig) var.num <- var.num.orig # ripristinate original values of this script + if(fields.name != fields.name.orig) fields.name <- fields.name.orig # ripristinate original values of this script + if(!identical(period.orig,period)) period <- period.orig + if(work.dir != work.dir.orig) work.dir <- work.dir.orig # ripristinate original values of this script + if(p.orig != p) p <- p.orig + + # compute the observed transition probabilities: + n.days.month <- n.days.in.a.period(load.month,2001) + j1o <- j2o <- j3o <- j4o <- 1; transition.obs1 <- transition.obs2 <- transition.obs3 <- transition.obs4 <- c() + + for (d in 1:(length(my.cluster$cluster)-1)){ + if (my.cluster$cluster[d] == 1 && (my.cluster$cluster[d + 1] != my.cluster$cluster[d])){ + transition.obs1[j1o] <- my.cluster$cluster[d + 1] + j1o <- j1o + 1 + } + if (my.cluster$cluster[d] == 2 && (my.cluster$cluster[d + 1] != my.cluster$cluster[d])){ + transition.obs2[j2o] <- my.cluster$cluster[d + 1] + j2o <- j2o + 1 + } + if (my.cluster$cluster[d] == 3 && (my.cluster$cluster[d + 1] != my.cluster$cluster[d])){ + transition.obs3[j3o] <- my.cluster$cluster[d + 1] + j3o <- j3o + 1 + } + if (my.cluster$cluster[d] == 4 && (my.cluster$cluster[d + 1] != my.cluster$cluster[d])){ + transition.obs4[j4o] <- my.cluster$cluster[d + 1] + j4o <- j4o +1 + } + + } + + trans.obs <- matrix(NA,4,4 , dimnames= list(c("cluster1.obs", "cluster2.obs", "cluster3.obs", "cluster4.obs"),c("cluster1.obs","cluster2.obs","cluster3.obs","cluster4.obs"))) + trans.obs[1,2] <- length(which(transition.obs1 == 2)) + trans.obs[1,3] <- length(which(transition.obs1 == 3)) + trans.obs[1,4] <- length(which(transition.obs1 == 4)) + + trans.obs[2,1] <- length(which(transition.obs2 == 1)) + trans.obs[2,3] <- length(which(transition.obs2 == 3)) + trans.obs[2,4] <- length(which(transition.obs2 == 4)) + + trans.obs[3,1] <- length(which(transition.obs3 == 1)) + trans.obs[3,2] <- length(which(transition.obs3 == 2)) + trans.obs[3,4] <- length(which(transition.obs3 == 4)) + + trans.obs[4,1] <- length(which(transition.obs4 == 1)) + trans.obs[4,2] <- length(which(transition.obs4 == 2)) + trans.obs[4,3] <- length(which(transition.obs4 == 3)) + + trans.tot.obs <- apply(trans.obs,1,sum,na.rm=T) + for(i in 1:4) trans.obs[i,] <- trans.obs[i,]/trans.tot.obs[i] + + # compute the observed interannual monthly variability: + freq.obs1 <- freq.obs2 <- freq.obs3 <- freq.obs4 <- c() + + for(y in year.start:year.end){ + # for both reanalysis and S4, the time order is always: year1day1, year1day2, ..., year1day31, year2day1, year2day2, ..., year2day28, etc, + freq.obs1[y-year.start+1] <- length(which(my.cluster$cluster[(y-year.start)*n.days.month + (1:n.days.month)] == 1)) + freq.obs2[y-year.start+1] <- length(which(my.cluster$cluster[(y-year.start)*n.days.month + (1:n.days.month)] == 2)) + freq.obs3[y-year.start+1] <- length(which(my.cluster$cluster[(y-year.start)*n.days.month + (1:n.days.month)] == 3)) + freq.obs4[y-year.start+1] <- length(which(my.cluster$cluster[(y-year.start)*n.days.month + (1:n.days.month)] == 4)) + } + + sd.freq.obs1 <- sd(freq.obs1) / n.days.month + sd.freq.obs2 <- sd(freq.obs2) / n.days.month + sd.freq.obs3 <- sd(freq.obs3) / n.days.month + sd.freq.obs4 <- sd(freq.obs4) / n.days.month + + wr1y.obs <- wr1y; wr2y.obs <- wr2y; wr3y.obs <- wr3y; wr4y.obs <- wr4y # observed frecuencies of the regimes + + regimes.obs <- c(cluster1.name, cluster2.name, cluster3.name, cluster4.name) + + if(length(unique(regimes.obs)) < 4) stop("Two or more names of the weather types in the reanalsyis input file are repeated!") + + if(identical(rev(lat),lat.forecast)) {lat.forecast <- rev(lat.forecast); print("Warning: forecast latitudes are in the opposite order than renalysis's")} + if(!identical(lat, lat.forecast)) stop("forecast latitudes are different from reanalysis latitudes!") + if(!identical(lon, lon.forecast)) stop("forecast longitude are different from reanalysis longitudes!") + + cluster.corr <- array(NA, c(4,4), dimnames=list(c("cluster1.sim","cluster2.sim","cluster3.sim","cluster4.sim"), regimes.obs)) + cluster.corr[1,1] <- cor(as.vector(cluster1.sim), as.vector(pslwr1mean)) + cluster.corr[1,2] <- cor(as.vector(cluster1.sim), as.vector(pslwr2mean)) + cluster.corr[1,3] <- cor(as.vector(cluster1.sim), as.vector(pslwr3mean)) + cluster.corr[1,4] <- cor(as.vector(cluster1.sim), as.vector(pslwr4mean)) + cluster.corr[2,1] <- cor(as.vector(cluster2.sim), as.vector(pslwr1mean)) + cluster.corr[2,2] <- cor(as.vector(cluster2.sim), as.vector(pslwr2mean)) + cluster.corr[2,3] <- cor(as.vector(cluster2.sim), as.vector(pslwr3mean)) + cluster.corr[2,4] <- cor(as.vector(cluster2.sim), as.vector(pslwr4mean)) + cluster.corr[3,1] <- cor(as.vector(cluster3.sim), as.vector(pslwr1mean)) + cluster.corr[3,2] <- cor(as.vector(cluster3.sim), as.vector(pslwr2mean)) + cluster.corr[3,3] <- cor(as.vector(cluster3.sim), as.vector(pslwr3mean)) + cluster.corr[3,4] <- cor(as.vector(cluster3.sim), as.vector(pslwr4mean)) + cluster.corr[4,1] <- cor(as.vector(cluster4.sim), as.vector(pslwr1mean)) + cluster.corr[4,2] <- cor(as.vector(cluster4.sim), as.vector(pslwr2mean)) + cluster.corr[4,3] <- cor(as.vector(cluster4.sim), as.vector(pslwr3mean)) + cluster.corr[4,4] <- cor(as.vector(cluster4.sim), as.vector(pslwr4mean)) + + # associate each simulated cluster to one observed regime: + max1 <- which(cluster.corr == max(cluster.corr), arr.ind=T) + cluster.corr2 <- cluster.corr + cluster.corr2[max1[1],] <- -999 # set this row to negative values so it won't be found as a maximum anymore + cluster.corr2[,max1[2]] <- -999 # set this column to negative values so it won't be found as a maximum anymore + max2 <- which(cluster.corr2 == max(cluster.corr2), arr.ind=T) + cluster.corr3 <- cluster.corr2 + cluster.corr3[max2[1],] <- -999 + cluster.corr3[,max2[2]] <- -999 + max3 <- which(cluster.corr3 == max(cluster.corr3), arr.ind=T) + cluster.corr4 <- cluster.corr3 + cluster.corr4[max3[1],] <- -999 + cluster.corr4[,max3[2]] <- -999 + max4 <- which(cluster.corr4 == max(cluster.corr4), arr.ind=T) + + seq.max1 <- c(max1[1],max2[1],max3[1],max4[1]) + seq.max2 <- c(max1[2],max2[2],max3[2],max4[2]) + + cluster1.regime <- seq.max2[which(seq.max1 == 1)] + cluster2.regime <- seq.max2[which(seq.max1 == 2)] + cluster3.regime <- seq.max2[which(seq.max1 == 3)] + cluster4.regime <- seq.max2[which(seq.max1 == 4)] + + cluster1.name <- regimes.obs[cluster1.regime] + cluster2.name <- regimes.obs[cluster2.regime] + cluster3.name <- regimes.obs[cluster3.regime] + cluster4.name <- regimes.obs[cluster4.regime] + + regimes.sim <- c(cluster1.name, cluster2.name, cluster3.name, cluster4.name) + cluster.corr2 <- array(cluster.corr, c(4,4), dimnames=list(regimes.sim, regimes.obs)) + + spat.cor1 <- cluster.corr[1,seq.max2[which(seq.max1 == 1)]] + spat.cor2 <- cluster.corr[2,seq.max2[which(seq.max1 == 2)]] + spat.cor3 <- cluster.corr[3,seq.max2[which(seq.max1 == 3)]] + spat.cor4 <- cluster.corr[4,seq.max2[which(seq.max1 == 4)]] + + # measure persistence for the reanalysis dataset: + my.cluster.vector <- my.cluster[[1]] + + sequ1 <- sequ2 <- sequ3 <- sequ4 <- rep(0,10000) + i1 <- i2 <- i3 <- i4 <- 1 + + if(my.cluster.vector[1] != 1) i1 <- 0 + if(my.cluster.vector[1] == 1) sequ1[i1] <- sequ1[i1]+1 + if(my.cluster.vector[1] != 2) i2 <- 0 + if(my.cluster.vector[1] == 2) sequ2[i2] <- sequ2[i2]+1 + if(my.cluster.vector[1] != 3) i3 <- 0 + if(my.cluster.vector[1] == 3) sequ3[i3] <- sequ3[i3]+1 + if(my.cluster.vector[1] != 4) i4 <- 0 + if(my.cluster.vector[1] == 4) sequ4[i4] <- sequ4[i4]+1 + + n.dd <- length(my.cluster.vector) + for(d in 1:(n.dd-1)){ + if(my.cluster.vector[d] != 1 && my.cluster.vector[d+1] == 1) i1 <- i1+1 + if(my.cluster.vector[d] == 1) sequ1[i1] <- sequ1[i1]+1 + + if(my.cluster.vector[d] != 2 && my.cluster.vector[d+1] == 2) i2 <- i2+1 + if(my.cluster.vector[d] == 2) sequ2[i2] <- sequ2[i2]+1 + + if(my.cluster.vector[d] != 3 && my.cluster.vector[d+1] == 3) i3 <- i3+1 + if(my.cluster.vector[d] == 3) sequ3[i3] <- sequ3[i3]+1 + + if(my.cluster.vector[d] != 4 && my.cluster.vector[d+1] == 4) i4 <- i4+1 + if(my.cluster.vector[d] == 4) sequ4[i4] <- sequ4[i4]+1 + } + + if(my.cluster.vector[n.dd] == 1) sequ1[i1] <- sequ1[i1]+1 + if(my.cluster.vector[n.dd] == 2) sequ2[i2] <- sequ2[i2]+1 + if(my.cluster.vector[n.dd] == 3) sequ3[i3] <- sequ3[i3]+1 + if(my.cluster.vector[n.dd] == 4) sequ4[i4] <- sequ4[i4]+1 + + persObs1 <- mean(sequ1[which(sequ1 != 0)]) + persObs2 <- mean(sequ2[which(sequ2 != 0)]) + persObs3 <- mean(sequ3[which(sequ3 != 0)]) + persObs4 <- mean(sequ4[which(sequ4 != 0)]) + + ### insert here all the manual corrections to the regime assignation due to misasignment in the automatic selection: + ### forecast month: September + #if(p == 9 && lead.month == 0) { cluster1.name <- "Blocking"; cluster2.name <- "Atl.Ridge"; cluster3.name <- "NAO-"; cluster4.name <- "NAO+"} + #if(p == 8 && lead.month == 1) { cluster1.name <- "NAO+"; cluster2.name <- "NAO-"; cluster3.name <- "Blocking"; cluster4.name <- "Atl.Ridge"} + #if(p == 6 && lead.month == 3) { cluster1.name <- "Atl.Ridge"; cluster2.name <- "NAO+"} + #if(p == 4 && lead.month == 5) { cluster1.name <- "Blocking"; cluster2.name <- "NAO+"; cluster3.name <- "Atl.Ridge"; cluster4.name <- "NAO-"} + + if(p == 9 && lead.month == 0) { cluster1.name <- "Blocking"; cluster2.name <- "Atl.Ridge"; cluster3.name <- "NAO-"; cluster4.name <- "NAO+" } + if(p == 8 && lead.month == 1) { cluster1.name <- "NAO+"; cluster2.name <- "NAO-"; cluster3.name <- "Blocking"; cluster4.name <- "Atl.Ridge" } + if(p == 7 && lead.month == 2) { cluster1.name <- "Atl.Ridge"; cluster2.name <- "Blocking"; cluster3.name <- "NAO-"; cluster4.name <- "NAO+" } + if(p == 6 && lead.month == 3) { cluster1.name <- "Atl.Ridge"; cluster2.name <- "NAO-"; cluster3.name <- "NAO+"; cluster4.name <- "Blocking" } + if(p == 5 && lead.month == 4) { cluster1.name <- "Atl.Ridge"; cluster2.name <- "NAO+"; cluster3.name <- "NAO-"; cluster4.name <- "Blocking" } + if(p == 4 && lead.month == 5) { cluster1.name <- "Blocking"; cluster2.name <- "NAO+"; cluster3.name <- "Atl.Ridge"; cluster4.name <- "NAO-" } + if(p == 3 && lead.month == 6) { cluster2.name <- "NAO-"; cluster3.name <- "Atl.Ridge"} # cluster1.name <- "Blocking"; cluster4.name <- "NAO+" + + # regimes.sim # for the debug + + ### forecast month: October + #if(p == 4 && lead.month == 6) { cluster1.name <- "Atl.Ridge"; cluster2.name <- "Blocking"; cluster3.name <- "NAO+"; cluster4.name <- "NAO-"} + #if(p == 5 && lead.month == 5) { cluster1.name <- "NAO+"; cluster2.name <- "Blocking"; cluster3.name <- "NAO-"; cluster4.name <- "Atl.Ridge"} + #if(p == 7 && lead.month == 3) { cluster1.name <- "NAO-"; cluster2.name <- "Atl.Ridge"; cluster3.name <- "Blocking"; cluster4.name <- "NAO+"} + #if(p == 8 && lead.month == 2) { cluster1.name <- "Blocking"; cluster2.name <- "NAO-"; cluster3.name <- "Atl.Ridge"; cluster4.name <- "NAO+"} + #if(p == 9 && lead.month == 1) { cluster1.name <- "NAO-"; cluster2.name <- "Blocking"; cluster3.name <- "Atl.Ridge"; cluster4.name <- "NAO+"} + + if(p == 10 && lead.month == 0) { cluster1.name <- "Blocking"; cluster2.name <- "NAO+"; cluster3.name <- "Atl.Ridge"; cluster4.name <- "NAO-"} + if(p == 9 && lead.month == 1) { cluster1.name <- "NAO-"; cluster2.name <- "Blocking"; cluster3.name <- "Atl.Ridge"; cluster4.name <- "NAO+"} + if(p == 8 && lead.month == 2) { cluster1.name <- "Blocking"; cluster2.name <- "NAO-"; cluster3.name <- "Atl.Ridge"; cluster4.name <- "NAO+"} + if(p == 7 && lead.month == 3) { cluster1.name <- "NAO-"; cluster2.name <- "Atl.Ridge"; cluster3.name <- "Blocking"; cluster4.name <- "NAO+"} + if(p == 6 && lead.month == 4) { cluster1.name <- "NAO+"; cluster2.name <- "Blocking"; cluster3.name <- "NAO-"; cluster4.name <- "Atl.Ridge"} + + ### forecast month: November + #if(p == 6 && lead.month == 5) { cluster2.name <- "Atl.Ridge"; cluster3.name <- "NAO+"; cluster4.name <- "Blocking"} + #if(p == 7 && lead.month == 4) { cluster1.name <- "NAO+"; cluster2.name <- "Blocking"; cluster4.name <- "Atl.Ridge"} + #if(p == 8 && lead.month == 3) { cluster2.name <- "Blocking"; cluster4.name <- "Atl.Ridge"} + #if(p == 9 && lead.month == 2) { cluster2.name <- "Atl.Ridge"; cluster3.name <- "NAO+"; cluster4.name <- "Blocking"} + #if(p == 10 && lead.month == 1) { cluster2.name <- "Blocking"; cluster4.name <- "Atl.Ridge"} + + regimes.sim2 <- c(cluster1.name, cluster2.name, cluster3.name, cluster4.name) # Simulated regimes after the manual correction + #regimes.obs <- c(cluster1.name, cluster2.name, cluster3.name, cluster4.name) # already defined above, included only as reference + + order.sim <- match(regimes.sim2, orden) + order.obs <- match(regimes.obs, orden) + + clusters.sim <- list(cluster1.sim, cluster2.sim, cluster3.sim, cluster4.sim) + clusters.obs <- list(pslwr1mean, pslwr2mean, pslwr3mean, pslwr4mean) + + # recompute the spatial correlations, because they might have changed because of the manual corrections above: + spat.cor1 <- cor(as.vector(clusters.sim[[which(order.sim == 1)]]), as.vector(clusters.obs[[which(order.obs == 1)]])) + spat.cor2 <- cor(as.vector(clusters.sim[[which(order.sim == 2)]]), as.vector(clusters.obs[[which(order.obs == 2)]])) + spat.cor3 <- cor(as.vector(clusters.sim[[which(order.sim == 3)]]), as.vector(clusters.obs[[which(order.obs == 3)]])) + spat.cor4 <- cor(as.vector(clusters.sim[[which(order.sim == 4)]]), as.vector(clusters.obs[[which(order.obs == 4)]])) + + load(file=paste0(work.dir,"/",fields.name,"_",my.period[p],"_leadmonth_",lead.month,"_","psl",".RData")) # reload forecast cluster time series and mean psl data + #load(file=paste0(work.dir,"/",fields.name,"_",my.period[p],"_leadmonth_",lead.month,"_ClusterData.RData")) # reload forecast cluster data (for my.cluster.array) + if(var.num != var.num.orig) var.num <- var.num.orig # ripristinate original values of this script + if(fields.name != fields.name.orig) fields.name <- fields.name.orig # ripristinate original values of this script + if(!identical(period.orig, period)) period <- period.orig + if(p.orig != p) p <- p.orig + if(identical(rev(lat),lat.forecast)) lat <- rev(lat) # ripristinate right lat order + if(work.dir != work.dir.orig) work.dir <- work.dir.orig # ripristinate original values of this script + + } # close for on 'forecast.name' + + if(fields.name == rean.name || (fields.name == forecast.name && n.map == 7)){ + if(save.names){ + save(cluster1.name, cluster2.name, cluster3.name, cluster4.name, file=paste0(rean.dir,"/",fields.name,"_", my.period[p],"_","ClusterNames",".RData")) + + } else { # in this case, we load the cluster names from the file already saved, if regimes come from a reanalysis: + load(paste0(rean.dir,"/",rean.name,"_", my.period[p],"_","ClusterNames",".RData")) + + if(var.num != var.num.orig) var.num <- var.num.orig # ripristinate original values of this script + if(fields.name != fields.name.orig) fields.name <- fields.name.orig # ripristinate original values of this script + } + } + + # assign to each cluster its regime: + if(ordering == FALSE && (fields.name == rean.name || (fields.name == forecast.name && n.map == 7))){ + cluster1 <- 1; cluster2 <- 2; cluster3 <- 3; cluster4 <- 4 + } else { + cluster1 <- which(orden == cluster1.name) + cluster2 <- which(orden == cluster2.name) + cluster3 <- which(orden == cluster3.name) + cluster4 <- which(orden == cluster4.name) + } + + assign(paste0("map",cluster1), pslwr1mean) + assign(paste0("map",cluster2), pslwr2mean) + assign(paste0("map",cluster3), pslwr3mean) + assign(paste0("map",cluster4), pslwr4mean) + + assign(paste0("fre",cluster1), wr1y) + assign(paste0("fre",cluster2), wr2y) + assign(paste0("fre",cluster3), wr3y) + assign(paste0("fre",cluster4), wr4y) + + #assign(paste0("withinss",cluster1), my.cluster[[p]]$withinss[1]/my.cluster[[p]]$size[1]) + #assign(paste0("withinss",cluster2), my.cluster[[p]]$withinss[2]/my.cluster[[p]]$size[2]) + #assign(paste0("withinss",cluster3), my.cluster[[p]]$withinss[3]/my.cluster[[p]]$size[3]) + #assign(paste0("withinss",cluster4), my.cluster[[p]]$withinss[4]/my.cluster[[p]]$size[4]) + + if(impact.data == TRUE && (composition == "simple" || composition == "single.impact" || composition == 'impact')){ + assign(paste0("imp",cluster1), varwr1mean) + assign(paste0("imp",cluster2), varwr2mean) + assign(paste0("imp",cluster3), varwr3mean) + assign(paste0("imp",cluster4), varwr4mean) + + if(fields.name == rean.name){ + assign(paste0("sig",cluster1), pvalue1) + assign(paste0("sig",cluster2), pvalue2) + assign(paste0("sig",cluster3), pvalue3) + assign(paste0("sig",cluster4), pvalue4) + } + } + + if(fields.name == forecast.name && n.map < 7){ + assign(paste0("freMax",cluster1), wr1yMax) + assign(paste0("freMax",cluster2), wr2yMax) + assign(paste0("freMax",cluster3), wr3yMax) + assign(paste0("freMax",cluster4), wr4yMax) + + assign(paste0("freMin",cluster1), wr1yMin) + assign(paste0("freMin",cluster2), wr2yMin) + assign(paste0("freMin",cluster3), wr3yMin) + assign(paste0("freMin",cluster4), wr4yMin) + + #assign(paste0("spatial.cor",cluster1), spat.cor1) + #assign(paste0("spatial.cor",cluster2), spat.cor2) + #assign(paste0("spatial.cor",cluster3), spat.cor3) + #assign(paste0("spatial.cor",cluster4), spat.cor4) + + assign(paste0("persist",cluster1), pers1) + assign(paste0("persist",cluster2), pers2) + assign(paste0("persist",cluster3), pers3) + assign(paste0("persist",cluster4), pers4) + + clusters.all <- c(cluster1, cluster2, cluster3, cluster4) + ordered.sequence <- c(which(clusters.all == 1), which(clusters.all == 2), which(clusters.all == 3), which(clusters.all == 4)) + + assign(paste0("transSim",cluster1), unname(trans.sim[1,ordered.sequence])) + assign(paste0("transSim",cluster2), unname(trans.sim[2,ordered.sequence])) + assign(paste0("transSim",cluster3), unname(trans.sim[3,ordered.sequence])) + assign(paste0("transSim",cluster4), unname(trans.sim[4,ordered.sequence])) + + cluster1.obs <- which(orden == regimes.obs[1]) + cluster2.obs <- which(orden == regimes.obs[2]) + cluster3.obs <- which(orden == regimes.obs[3]) + cluster4.obs <- which(orden == regimes.obs[4]) + + clusters.all.obs <- c(cluster1.obs, cluster2.obs, cluster3.obs, cluster4.obs) + ordered.sequence.obs <- c(which(clusters.all.obs == 1), which(clusters.all.obs == 2), which(clusters.all.obs == 3), which(clusters.all.obs == 4)) + + assign(paste0("freObs",cluster1.obs), wr1y.obs) + assign(paste0("freObs",cluster2.obs), wr2y.obs) + assign(paste0("freObs",cluster3.obs), wr3y.obs) + assign(paste0("freObs",cluster4.obs), wr4y.obs) + + assign(paste0("persistObs",cluster1.obs), persObs1) + assign(paste0("persistObs",cluster2.obs), persObs2) + assign(paste0("persistObs",cluster3.obs), persObs3) + assign(paste0("persistObs",cluster4.obs), persObs4) + + assign(paste0("transObs",cluster1.obs), unname(trans.obs[1,ordered.sequence.obs])) + assign(paste0("transObs",cluster2.obs), unname(trans.obs[2,ordered.sequence.obs])) + assign(paste0("transObs",cluster3.obs), unname(trans.obs[3,ordered.sequence.obs])) + assign(paste0("transObs",cluster4.obs), unname(trans.obs[4,ordered.sequence.obs])) + + } + + # position of long values of Europe only (without the Atlantic Sea and America): + #if(fields.name == "ERA-Interim") EU <- c(1:which(lon >= lon.max)[1], (length(lon)-30):length(lon)) # restrict area to continental europe only + EU <- c(1:which(lon >= lon.max)[1], (which(lon >= 337)[1]:length(lon))) # restrict area to continental europe only + + # breaks and colors of the geopotential fields: + if(psl == "g500"){ + #my.brks <- c(-300,-199:200,300) # % Mean anomaly of a WR + my.brks <- c(-300,seq(-200,200,10),300) # % Mean anomaly of a WR + my.brks2 <- c(-300,seq(-200,200,10),300) # % Mean anomaly of a WR for the contour lines + #my.cols <- colorRampPalette((brewer.pal(9,"PuBu")))(length(my.brks)-1) + values.to.plot <- c(-200, -100, 0, 100, 200) # values we want to show in the labels + } + + if(psl == "psl"){ + my.brks <- c(seq(-19,-1,2),0,seq(1,19,2)) # % Mean anomaly of a WR + # my.brks <- c(seq(-19,-1,2),0,seq(1,19,2)) # % Mean anomaly of a WR + + my.brks2 <- my.brks #c(seq(-20,20,2)) # % Mean anomaly of a WR for the contour lines + values.to.plot <- my.brks #c(-17,-15,-13,-11,-9,-7,-5,-3,-1,0,1,3,5,7,9,11,13,15,17) + } + + my.cols <- colorRampPalette(rev(brewer.pal(11,"RdBu")))(length(my.brks)-1) # blue--white--red colors + my.cols[floor(length(my.cols)/2)] <- my.cols[floor(length(my.cols)/2)+1] <- "white" + + # breaks and colors of the impact maps (valid both for Wind speed anomalies and Temperature anomalies): + #my.brks.var <- c(-20,seq(-10,-3,1),seq(-2.9,2.9,0.1),seq(3,10,1),20) # % Mean anomaly of a WR + #my.brks.var <- c(-20,-2,-1.5,-1,-0.5,-0.2,0.2,0.5,1,1.5,2,20) # % Mean anomaly of a WR + #my.brks.var <- c(-20,seq(-3,3,0.5),20) # % Mean anomaly of a WR + if(var.name[var.num] == "sfcWind") { + my.brks.var <- seq(-3,3,0.5) + values.to.plot2 <- c(-3,-2,-1,0,1,2,3) + } + if(var.name[var.num] == "tas") { + my.brks.var <- seq(-5,5,1) + values.to.plot2 <- my.brks.var #c(-5,-3,-1,0,1,3,5) + } + + #my.cols <- colorRampPalette(as.character(read.csv("/home/Earth/vtorralb/rgbhex.csv",header=F)[,1]))(length(my.brks)-1) # blue--yellow-red colors + my.cols.var <- colorRampPalette(rev(brewer.pal(11,"RdBu")))(length(my.brks.var)-1) # blue--white--red colors + #my.cols.var[floor(length(my.cols.var)/2)] <- my.cols.var[floor(length(my.cols.var)/2)+1] <- "white" + + freq.max=100 + if(fields.name == forecast.name){ + pos.years.present <- match(year.start:year.end, years) + years.missing <- which(is.na(pos.years.present)) + fre1.NA <- fre2.NA <- fre3.NA <- fre4.NA <- freMax1.NA <- freMax2.NA <- freMax3.NA <- freMax4.NA <- freMin1.NA <- freMin2.NA <- freMin3.NA <- freMin4.NA <- c() + k <- 0 + for(y in year.start:year.end) { + if(y %in% (years.missing + year.start - 1)) { + fre1.NA <- c(fre1.NA,NA); fre2.NA <- c(fre2.NA,NA); fre3.NA <- c(fre3.NA,NA); fre4.NA <- c(fre4.NA,NA) + freMax1.NA <- c(freMax1.NA,NA); freMax2.NA <- c(freMax2.NA,NA); freMax3.NA <- c(freMax3.NA,NA); freMax4.NA <- c(freMax4.NA,NA) + freMin1.NA <- c(freMin1.NA,NA); freMin2.NA <- c(freMin2.NA,NA); freMin3.NA <- c(freMin3.NA,NA); freMin4.NA <- c(freMin4.NA,NA) + k=k+1 + } else { + fre1.NA <- c(fre1.NA,fre1[y - year.start + 1 - k]); fre2.NA <- c(fre2.NA,fre2[y - year.start + 1 - k]) + fre3.NA <- c(fre3.NA,fre3[y - year.start + 1 - k]); fre4.NA <- c(fre4.NA,fre4[y - year.start + 1 - k]) + freMax1.NA <- c(freMax1.NA,freMax1[y - year.start + 1 - k]); freMax2.NA <- c(freMax2.NA,freMax2[y - year.start + 1 - k]) + freMax3.NA <- c(freMax3.NA,freMax3[y - year.start + 1 - k]); freMax4.NA <- c(freMax4.NA,freMax4[y - year.start + 1 - k]) + freMin1.NA <- c(freMin1.NA,freMin1[y - year.start + 1 - k]); freMin2.NA <- c(freMin2.NA,freMin2[y - year.start + 1 - k]) + freMin3.NA <- c(freMin3.NA,freMin3[y - year.start + 1 - k]); freMin4.NA <- c(freMin4.NA,freMin4[y - year.start + 1 - k]) + } + } + + sp.cor1 <- round(cor(fre1.NA,freObs1, use="complete.obs"),2) + sp.cor2 <- round(cor(fre2.NA,freObs2, use="complete.obs"),2) + sp.cor3 <- round(cor(fre3.NA,freObs3, use="complete.obs"),2) + sp.cor4 <- round(cor(fre4.NA,freObs4, use="complete.obs"),2) + + } else { + fre1.NA <- fre1; fre2.NA <- fre2; fre3.NA <- fre3; fre4.NA <- fre4 + } + + + # Visualize the composition with the 3 graphs for all the 4 regimes: its pressure fields, its impact on var and its frequency series: + if(composition == "simple"){ + if(!as.pdf && fields.name == rean.name) png(filename=paste0(rean.dir,"/",fields.name,"_",my.period[p],"_",var.name[var.num],".png"),width=3000,height=3700) + if(!as.pdf && fields.name == forecast.name) png(filename=paste0(work.dir,"/",fields.name,"_",my.period[p],"_leadmonth_",lead.month,"_",var.name[var.num],".png"),width=3000,height=3700) + + plot.new() + + # Sheet title: + par(fig=c(0, 1, 0.94, 0.98), new=TRUE) + if(fields.name == forecast.name) {forecast.title <- paste0(" startdate and ",lead.month," forecast month ")} else {forecast.title <- ""} + mtext(paste0(fields.name, " Weather Regimes for ", my.period[p], forecast.title," (",year.start,"-",year.end,")"), cex=5.5, font=2) + + # Centroid maps: + map.xpos <- 0 + map.width <- 0.46 + par(fig=c(map.xpos + 0.003, map.xpos + map.width - 0.003, 0.745, 0.925), new=TRUE) + PlotEquiMap2(rescale(map1,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map1), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, xlabel.dist=1.5, contours.lty="F1FF1F") + par(fig=c(map.xpos, map.xpos + map.width, 0.51, 0.69), new=TRUE) + PlotEquiMap2(rescale(map2,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map2), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F") + par(fig=c(map.xpos, map.xpos + map.width, 0.275, 0.455), new=TRUE) + PlotEquiMap2(rescale(map3,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map3), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F") + par(fig=c(map.xpos, map.xpos + map.width, 0.04, 0.22), new=TRUE) + PlotEquiMap2(rescale(map4,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map4), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F") + + ## # for the debug: + ## obs <- read.table("/scratch/Earth/ncortesi/RESILIENCE/Regimes/NAO_series.txt") + ## mes <- p + ## fre.obs <- obs$V3[seq(mes,12*35,12)] # get the ovserved freuency of the NAO + ## #plot(1981:2015,fre.obs,type="l") + ## #lines(1981:2015,fre1,type="l",col="red") + ## #lines(1981:2015,fre2,type="l",col="blue") + ## #lines(1981:2015,fre4,type="l",col="green") + ## cor1 <- cor(fre.obs, fre1) + ## cor2 <- cor(fre.obs, fre2) + ## cor3 <- cor(fre.obs, fre3) + ## cor4 <- cor(fre.obs, fre4) + ## round(c(cor1,cor2,cor3,cor4),2) + + # Legend centroid maps: + legend1.xpos <- 0.10 + legend1.width <- 0.30 + legend1.cex <- 2 + my.subset <- match(values.to.plot, my.brks) + par(fig=c(legend1.xpos, legend1.xpos + legend1.width, 0.719, 0.744), new=TRUE) # syntax fig=c(xmin, xmax, ymin, ymax) + ColorBar3(my.brks, cols=my.cols, vert=FALSE, cex=legend1.cex, subset=my.subset) + if(psl=="g500") {mtext(side=4," m", cex=2.5)} else {mtext(side=4," hPa", cex=legend1.cex)} + par(fig=c(legend1.xpos, legend1.xpos + legend1.width, 0.485, 0.508), new=TRUE) + ColorBar3(my.brks, cols=my.cols, vert=FALSE, cex=legend1.cex, subset=my.subset) + if(psl=="g500") {mtext(side=4," m", cex=2.5)} else {mtext(side=4," hPa", cex=legend1.cex)} + par(fig=c(legend1.xpos, legend1.xpos + legend1.width, 0.250, 0.273), new=TRUE) + ColorBar3(my.brks, cols=my.cols, vert=FALSE, cex=legend1.cex, subset=my.subset) + if(psl=="g500") {mtext(side=4," m", cex=2.5)} else {mtext(side=4," hPa", cex=legend1.cex)} + par(fig=c(legend1.xpos, legend1.xpos + legend1.width, 0.015, 0.038), new=TRUE) + ColorBar3(my.brks, cols=my.cols, vert=FALSE, cex=legend1.cex, subset=my.subset) + if(psl=="g500") {mtext(side=4," m", cex=2.5)} else {mtext(side=4," hPa", cex=legend1.cex)} + + # Subtitle centroid maps: + map.title.xpos <- 0.76 + map.title.width <- 0.2 + par(fig=c(map.title.xpos, map.title.xpos + map.title.width, 0.728, 0.729), new=TRUE) + mtext("year", cex=3) + par(fig=c(map.title.xpos, map.title.xpos + map.title.width, 0.494, 0.495), new=TRUE) + mtext("year", cex=3) + par(fig=c(map.title.xpos, map.title.xpos + map.title.width, 0.260, 0.261), new=TRUE) + mtext("year", cex=3) + par(fig=c(map.title.xpos, map.title.xpos + map.title.width, 0.026, 0.027), new=TRUE) + mtext("year", cex=3) + + # Title Centroid Maps: + title1.xpos <- 0.02 + title1.width <- 0.4 + par(fig=c(title1.xpos, title1.xpos + title1.width, 0.9305, 0.9355), new=TRUE) + mtext(paste0(regime1.name,": ", psl.name, " Anomaly"), font=2, cex=4) + par(fig=c(title1.xpos, title1.xpos + title1.width, 0.6955, 0.7005), new=TRUE) + mtext(paste0(regime2.name,": ", psl.name, " Anomaly"), font=2, cex=4) + par(fig=c(title1.xpos, title1.xpos + title1.width, 0.4605, 0.4655), new=TRUE) + mtext(paste0(regime3.name,": ", psl.name, " Anomaly"), font=2, cex=4) + par(fig=c(title1.xpos, title1.xpos + title1.width, 0.2255, 0.2305), new=TRUE) + mtext(paste0(regime4.name,": ", psl.name, " Anomaly"), font=2, cex=4) + + # Impact maps: + if(impact.data == TRUE && (composition == "simple" || composition == "impact")){ # it won't enter here never because we are already inside an if con composition="simple" + impact.xpos <- 0.47 + impact.width <- 0.26 + par(fig=c(impact.xpos, impact.xpos + impact.width, 0.745, 0.925), new=TRUE) + PlotEquiMap2(rescale(imp1[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=t(sig1[,EU] < 0.05), cex.lab=1.5) + par(fig=c(impact.xpos, impact.xpos + impact.width, 0.51, 0.69), new=TRUE) + PlotEquiMap2(rescale(imp2[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=t(sig2[,EU] < 0.05), cex.lab=1.5) + par(fig=c(impact.xpos, impact.xpos + impact.width, 0.275, 0.455), new=TRUE) + PlotEquiMap2(rescale(imp3[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=t(sig3[,EU] < 0.05), cex.lab=1.5) + par(fig=c(impact.xpos, impact.xpos + impact.width, 0.04, 0.22), new=TRUE) + PlotEquiMap2(rescale(imp4[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=t(sig4[,EU] < 0.05), cex.lab=1.5) + + # Title impact maps: + title2.xpos <- 0.42 + title2.width <- 0.35 + par(fig=c(title2.xpos, title2.xpos + title2.width, 0.935, 0.940), new=TRUE) + mtext(paste0(regime1.name, ": Impact on ", var.name.full[var.num]), font=2, cex=4) + par(fig=c(title2.xpos, title2.xpos + title2.width, 0.700, 0.705), new=TRUE) + mtext(paste0(regime2.name, ": Impact on ", var.name.full[var.num]), font=2, cex=4) + par(fig=c(title2.xpos, title2.xpos + title2.width, 0.465, 0.470), new=TRUE) + mtext(paste0(regime3.name, ": Impact on ", var.name.full[var.num]), font=2, cex=4) + par(fig=c(title2.xpos, title2.xpos + title2.width, 0.230, 0.235), new=TRUE) + mtext(paste0(regime4.name, ": Impact on ", var.name.full[var.num]), font=2, cex=4) + } + + # Frequency plots: + barplot.xpos <- 0.75 + barplot.width <- 0.25 + + par(fig=c(barplot.xpos, barplot.xpos + barplot.width, 0.745, 0.915), new=TRUE) + if(fields.name == rean.name) barplot.freq(100*fre1.NA, year.start, year.end, cex.y=2, cex.x=2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0)) + if(fields.name == forecast.name) barplot.freq.sim2(100*fre1.NA, 100*freMax1.NA, 100*freMin1.NA, 100*freObs1, year.start, year.end, cex.y=2, cex.x=2, freq.min=-2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0), col.line="gray20", cex.r=3, cex.mean=2, cex.obs=2) + + par(fig=c(barplot.xpos, barplot.xpos + barplot.width,0.510,0.680), new=TRUE) + if(fields.name == rean.name) barplot.freq(100*fre2.NA, year.start, year.end, cex.y=2, cex.x=2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0)) + if(fields.name == forecast.name) barplot.freq.sim2(100*fre2.NA, 100*freMax2.NA, 100*freMin2.NA, 100*freObs2, year.start, year.end, cex.y=2, cex.x=2, freq.min=-2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0), col.line="gray20", cex.r=3, cex.mean=2, cex.obs=2) + + par(fig=c(barplot.xpos, barplot.xpos + barplot.width,0.275,0.445), new=TRUE) + if(fields.name == rean.name) barplot.freq(100*fre3.NA, year.start, year.end, cex.y=2, cex.x=2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0)) + if(fields.name == forecast.name) barplot.freq.sim2(100*fre3.NA, 100*freMax3.NA, 100*freMin3.NA, 100*freObs3, year.start, year.end, cex.y=2, cex.x=2, freq.min=-2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0), col.line="gray20", cex.r=3, cex.mean=2, cex.obs=2) + + par(fig=c(barplot.xpos, barplot.xpos + barplot.width,0.040,0.210), new=TRUE) + if(fields.name == rean.name) barplot.freq(100*fre4.NA, year.start, year.end, cex.y=2, cex.x=2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0)) + if(fields.name == forecast.name) barplot.freq.sim2(100*fre4.NA, 100*freMax4.NA, 100*freMin4.NA, 100*freObs4, year.start, year.end, cex.y=2, cex.x=2, freq.min=-2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0), col.line="gray20", cex.r=3, cex.mean=2, cex.obs=2) + + # Title Frequency maps: + title3.xpos <- 0.75 + title3.width <-0.25 + par(fig=c(title3.xpos, title3.xpos + title3.width, 0.935, 0.940), new=TRUE) + mtext(paste0(regime1.name, " Frequency"), font=2, cex=4) # paste0 doesn't work inside an expression! + par(fig=c(title3.xpos, title3.xpos + title3.width, 0.700, 0.705), new=TRUE) + mtext(paste0(regime2.name, " Frequency"), font=2, cex=4) + par(fig=c(title3.xpos, title3.xpos + title3.width, 0.465, 0.470), new=TRUE) + mtext(paste0(regime3.name, " Frequency"), font=2, cex=4) + par(fig=c(title3.xpos, title3.xpos + title3.width, 0.230, 0.235), new=TRUE) + mtext(paste0(regime4.name, " Frequency"), font=2, cex=4) + + # % on Frequency plots: + symbol.xpos <- 0.735 + symbol.width <- 0.01 + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.82, 0.83), new=TRUE) + mtext("%", cex=3.3) + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.59, 0.60), new=TRUE) + mtext("%", cex=3.3) + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.35, 0.36), new=TRUE) + mtext("%", cex=3.3) + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.12, 0.13), new=TRUE) + mtext("%", cex=3.3) + + # mean frequency on Frequency plots: + if(mean.freq == TRUE){ + symbol.xpos <- 0.9 + symbol.width <- 0.1 + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.908, 0.918), new=TRUE) + mtext(bquote(bar(nu) == .(paste0(round(mean(100*fre1.NA),1),"%"))),cex=4.5) + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.673, 0.683), new=TRUE) + mtext(bquote(bar(nu) == .(paste0(round(mean(100*fre2.NA),1),"%"))),cex=4.5) + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.44, 0.45), new=TRUE) + mtext(bquote(bar(nu) == .(paste0(round(mean(100*fre3.NA),1),"%"))),cex=4.5) + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.203, 0.213), new=TRUE) + mtext(bquote(bar(nu) == .(paste0(round(mean(100*fre4.NA),1),"%"))),cex=4.5) + } + + # add corr between obs.time series and ensemble mean time series on Frequency plots: + if(fields.name == forecast.name){ + symbol.xpos <- 0.76 + symbol.width <- 0.1 + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.908, 0.918), new=TRUE) + sp.cor1 <- round(cor(fre1.NA,freObs1, use="complete.obs"),2) + mtext(paste0("r= ",sp.cor1), cex=4.5) + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.673, 0.683), new=TRUE) + sp.cor2 <- round(cor(fre2.NA,freObs2, use="complete.obs"),2) + mtext(paste0("r= ",sp.cor2), cex=4.5) + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.44, 0.45), new=TRUE) + sp.cor3 <- round(cor(fre3.NA,freObs3, use="complete.obs"),2) + mtext(paste0("r= ",sp.cor3), cex=4.5) + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.203, 0.213), new=TRUE) + sp.cor4 <- round(cor(fre4.NA,freObs4, use="complete.obs"),2) + mtext(paste0("r= ",sp.cor4), cex=4.5) + } + + # Legend 2: + if(fields.name == rean.name){ + legend2.xpos <- 0.48 + legend2.width <- 0.25 + legend2.cex <- 1.8 + my.subset2 <- match(values.to.plot2, my.brks.var) + par(fig=c(legend2.xpos, legend2.xpos + legend2.width, 0.719 + 0.001, 0.744), new=TRUE) + ColorBar3(my.brks.var, cols=my.cols.var, vert=FALSE, cex=legend2.cex, subset=my.subset2) + mtext(side=4,paste0(" ",var.unit[var.num]), cex=legend2.cex) + par(fig=c(legend2.xpos, legend2.xpos + legend2.width, 0.485, 0.508), new=TRUE) + ColorBar3(my.brks.var, cols=my.cols.var, vert=FALSE, cex=legend2.cex, subset=my.subset2) + mtext(side=4,paste0(" ",var.unit[var.num]), cex=legend2.cex) + par(fig=c(legend2.xpos, legend2.xpos + legend2.width, 0.250, 0.273), new=TRUE) + ColorBar3(my.brks.var, cols=my.cols.var, vert=FALSE, cex=legend2.cex, subset=my.subset2) + mtext(side=4,paste0(" ",var.unit[var.num]), cex=legend2.cex) + par(fig=c(legend2.xpos, legend2.xpos + legend2.width, 0.015, 0.038), new=TRUE) + ColorBar3(my.brks.var, cols=my.cols.var, vert=FALSE, cex=legend2.cex, subset=my.subset2) + mtext(side=4,paste0(" ",var.unit[var.num]), cex=legend2.cex) + } + + if(!as.pdf) dev.off() # for saving 4 png + + } # close if on: composition == 'simple' + + + # Visualize the composition with the regime anomalies for a selected forecasted month: + if(composition == "psl"){ + # Centroid maps: + n.map <- n.map + 1 # n.maps starts from 0 + map.width <- 0.115 + map.xpos <- 0.06 + map.width * (n.map - 1) + map.ypos <- 0.08 + map.height <- 0.19 + map.ysep <- 0.015 + + par(fig=c(map.xpos, map.xpos + map.width, map.ypos + 3*map.height + 3*map.ysep, map.ypos + 4*map.height + 3*map.ysep), new=TRUE) + PlotEquiMap2(rescale(map1,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map1), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, xlabel.dist=1.5, contours.lty="F1FF1F") + par(fig=c(map.xpos, map.xpos + map.width, map.ypos + 2*map.height + 2*map.ysep, map.ypos + 3*map.height + 2*map.ysep), new=TRUE) + PlotEquiMap2(rescale(map2,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map2), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F") + par(fig=c(map.xpos, map.xpos + map.width, map.ypos + map.height + map.ysep, map.ypos + 2*map.height + map.ysep), new=TRUE) + PlotEquiMap2(rescale(map3,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map3), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F") + par(fig=c(map.xpos, map.xpos + map.width, map.ypos, map.ypos + map.height), new=TRUE) + PlotEquiMap2(rescale(map4,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map4), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F") + + # Sheet subtitles: + par(fig=c(map.xpos, map.xpos + map.width, 0.86, 0.90), new=TRUE) + if(n.map < 8) { + mtext(paste0(my.month.short[p], " --> ", my.month.short[forecast.month]), cex=5.5, font=2) + } else{ + mtext(paste0(rean.name," ", my.month.short[forecast.month]), cex=5.5, font=2) + } + + } # close if on composition == 'psl' + + # Visualize the composition if the impact maps for a selected forecasted month: + if(composition == "impact"){ + # Centroid maps: + n.map <- n.map + 1 # n.maps starts from 0 + map.width <- 0.115 + map.xpos <- 0.06 + map.width * (n.map - 1) + map.ypos <- 0.08 + map.height <- 0.19 + map.ysep <- 0.015 + + par(fig=c(map.xpos, map.xpos + map.width, map.ypos + 3*map.height + 3*map.ysep, map.ypos + 4*map.height + 3*map.ysep), new=TRUE) + PlotEquiMap2(rescale(imp1[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=t(sig1[,EU] < 0.05), cex.lab=1.5) + + par(fig=c(map.xpos, map.xpos + map.width, map.ypos + 2*map.height + 2*map.ysep, map.ypos + 3*map.height + 2*map.ysep), new=TRUE) + PlotEquiMap2(rescale(imp2[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=t(sig2[,EU] < 0.05), cex.lab=1.5) + + par(fig=c(map.xpos, map.xpos + map.width, map.ypos + map.height + map.ysep, map.ypos + 2*map.height + map.ysep), new=TRUE) + PlotEquiMap2(rescale(imp3[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=t(sig3[,EU] < 0.05), cex.lab=1.5) + + par(fig=c(map.xpos, map.xpos + map.width, map.ypos, map.ypos + map.height), new=TRUE) + PlotEquiMap2(rescale(imp4[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=t(sig4[,EU] < 0.05), cex.lab=1.5) + + + # Sheet subtitles: + par(fig=c(map.xpos, map.xpos + map.width, 0.86, 0.90), new=TRUE) + if(n.map < 8) { + mtext(paste0(my.month.short[p], " --> ", my.month.short[forecast.month]), cex=5.5, font=2) + } else{ + mtext(paste0(rean.name," ", my.month.short[forecast.month]), cex=5.5, font=2) + } + + } # close if on composition == 'impact' + + # Visualize the composition if the impact maps for a selected forecasted month: + if(composition == "single.impact"){ + png(filename=paste0(rean.dir,"/",fields.name,"_",my.period[p],"_",var.name[var.num],"_impact_NAO+.png"),width=3000,height=3700) + PlotEquiMap2(rescale(imp1[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=t(sig1[,EU] < 0.05), cex.lab=1.5) + dev.off() + + png(filename=paste0(rean.dir,"/",fields.name,"_",my.period[p],"_",var.name[var.num],"_impact_NAO-.png"),width=3000,height=3700) + PlotEquiMap2(rescale(imp2[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=t(sig2[,EU] < 0.05), cex.lab=1.5) + dev.off() + + png(filename=paste0(rean.dir,"/",fields.name,"_",my.period[p],"_",var.name[var.num],"_impact_Blocking.png"),width=3000,height=3700) + PlotEquiMap2(rescale(imp3[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=t(sig3[,EU] < 0.05), cex.lab=1.5) + dev.off() + + png(filename=paste0(rean.dir,"/",fields.name,"_",my.period[p],"_",var.name[var.num],"_impact_Atl_Ridge.png"),width=3000,height=3700) + PlotEquiMap2(rescale(imp4[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=t(sig4[,EU] < 0.05), cex.lab=1.5) + dev.off() + } + + # Visualize the composition if the impact maps for a selected forecasted month: + if(composition == "single.psl"){ + png(filename=paste0(rean.dir,"/",fields.name,"_",my.period[p],"_",var.name[var.num],"_pattern_cluster1.png"),width=2000,height=1400, res=300) + PlotEquiMap2(rescale(map1,my.brks[1],tail(my.brks,1)), lon, lat,filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1, contours=t(map1), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=0.5, xlabel.dist=0, contours.lty="F1FF1F", toptitle=paste0(my.period[p]," WithinSS=", round(withinss1/1000000,2))) + dev.off() + + png(filename=paste0(rean.dir,"/",fields.name,"_",my.period[p],"_",var.name[var.num],"_pattern_cluster2.png"),width=2000,height=1400, res=300) + PlotEquiMap2(rescale(map2,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1, contours=t(map2), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=0.5, xlabel.dist=0, contours.lty="F1FF1F", toptitle=paste0(my.period[p]," WithinSS=", round(withinss2/1000000,2))) + dev.off() + + png(filename=paste0(rean.dir,"/",fields.name,"_",my.period[p],"_",var.name[var.num],"_pattern_cluster3.png"),width=2000,height=1400, res=300) + PlotEquiMap2(rescale(map3,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1, contours=t(map3), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=0.5, xlabel.dist=0, contours.lty="F1FF1F", toptitle=paste0(my.period[p]," WithinSS=", round(withinss3/1000000,2))) + dev.off() + + png(filename=paste0(rean.dir,"/",fields.name,"_",my.period[p],"_",var.name[var.num],"_pattern_cluster4.png"),width=2000,height=1400, res=300) + PlotEquiMap2(rescale(map4,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1, contours=t(map4), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=0.5, xlabel.dist=0, contours.lty="F1FF1F", toptitle=paste0(my.period[p]," WithinSS=", round(withinss4/1000000,2))) + dev.off() + + # Legend centroid maps: + #my.subset <- match(values.to.plot, my.brks) + #ColorBar3(my.brks, cols=my.cols, vert=FALSE, cex=legend1.cex, subset=my.subset) + #mtext(side=4," hPa", cex=legend1.cex) + + } + + # Visualize the composition with the interannual frequencies for a selected forecasted month: + if(composition == "fre"){ + # Centroid maps: + n.map <- n.map + 1 # n.maps starts from 0 + map.width <- 0.115 + map.xpos <- 0.06 + map.width * (n.map - 1) + map.ypos <- 0.08 + map.height <- 0.19 + map.ysep <- 0.015 + + par(fig=c(map.xpos, map.xpos + map.width, map.ypos + 3*map.height + 3*map.ysep, map.ypos + 4*map.height + 3*map.ysep), new=TRUE) + if(n.map < 8) barplot.freq.sim2(100*fre1.NA, 100*freMax1.NA, 100*freMin1.NA, 100*freObs1, year.start, year.end, cex.y=2, cex.x=2, freq.min=-2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0), col.line="gray20", cex.r=3, cex.mean=2, cex.obs=2) + if(n.map == 8) barplot.freq(100*fre1.NA, year.start, year.end, cex.y=2, cex.x=2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0)) + + par(fig=c(map.xpos, map.xpos + map.width, map.ypos + 2*map.height + 2*map.ysep, map.ypos + 3*map.height + 2*map.ysep), new=TRUE) + if(n.map < 8) if(fields.name == forecast.name) barplot.freq.sim2(100*fre2.NA, 100*freMax2.NA, 100*freMin2.NA, 100*freObs2, year.start, year.end, cex.y=2, cex.x=2, freq.min=-2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0), col.line="gray20", cex.r=3, cex.mean=2, cex.obs=2) + if(n.map == 8) barplot.freq(100*fre2.NA, year.start, year.end, cex.y=2, cex.x=2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0)) + + par(fig=c(map.xpos, map.xpos + map.width, map.ypos + map.height + map.ysep, map.ypos + 2*map.height + map.ysep), new=TRUE) + if(n.map < 8) if(fields.name == forecast.name) barplot.freq.sim2(100*fre3.NA, 100*freMax3.NA, 100*freMin3.NA, 100*freObs3, year.start, year.end, cex.y=2, cex.x=2, freq.min=-2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0), col.line="gray20", cex.r=3, cex.mean=2, cex.obs=2) + if(n.map == 8) barplot.freq(100*fre3.NA, year.start, year.end, cex.y=2, cex.x=2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0)) + + par(fig=c(map.xpos, map.xpos + map.width, map.ypos, map.ypos + map.height), new=TRUE) + if(n.map < 8) if(fields.name == forecast.name) barplot.freq.sim2(100*fre4.NA, 100*freMax4.NA, 100*freMin4.NA, 100*freObs4, year.start, year.end, cex.y=2, cex.x=2, freq.min=-2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0), col.line="gray20", cex.r=3, cex.mean=2, cex.obs=2) + if(n.map == 8) barplot.freq(100*fre4.NA, year.start, year.end, cex.y=2, cex.x=2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0)) + + # Sheet subtitles: + par(fig=c(map.xpos, map.xpos + map.width, 0.86, 0.90), new=TRUE) + if(n.map < 8) { + mtext(paste0(my.month.short[p], " --> ", my.month.short[forecast.month]), cex=5.5, font=2) + } else{ + mtext(paste0(rean.name," ", my.month.short[forecast.month]), cex=5.5, font=2) + } + + # % on Frequency plots: + par(fig=c(map.xpos - 0.006, map.xpos, map.ypos + 3.9*map.height + 3*map.ysep, map.ypos + 4*map.height + 3*map.ysep), new=TRUE) + mtext("%", cex=3.3) + par(fig=c(map.xpos - 0.006, map.xpos, map.ypos + 2.9*map.height + 2*map.ysep, map.ypos + 3*map.height + 2*map.ysep), new=TRUE) + mtext("%", cex=3.3) + par(fig=c(map.xpos - 0.006, map.xpos, map.ypos + 1.9*map.height + 1*map.ysep, map.ypos + 2*map.height + 1*map.ysep), new=TRUE) + mtext("%", cex=3.3) + par(fig=c(map.xpos - 0.006, map.xpos, map.ypos + 0.9*map.height + 0*map.ysep, map.ypos + 1*map.height + 0*map.ysep), new=TRUE) + mtext("%", cex=3.3) + + # mean frequency on Frequency plots: + par(fig=c(map.xpos + 0.02, map.xpos + 0.04, map.ypos + 3*map.height + 3*map.ysep, map.ypos + 3.1*map.height + 3*map.ysep), new=TRUE) + mtext(bquote(bar(nu) == .(paste0(round(mean(100*fre1.NA),1),"%"))),cex=3.5) + par(fig=c(map.xpos + 0.02, map.xpos + 0.04, map.ypos + 2*map.height + 2*map.ysep, map.ypos + 2.1*map.height + 2*map.ysep), new=TRUE) + mtext(bquote(bar(nu) == .(paste0(round(mean(100*fre2.NA),1),"%"))),cex=3.5) + par(fig=c(map.xpos + 0.02, map.xpos + 0.04, map.ypos + 1*map.height + 1*map.ysep, map.ypos + 1.1*map.height + 1*map.ysep), new=TRUE) + mtext(bquote(bar(nu) == .(paste0(round(mean(100*fre3.NA),1),"%"))),cex=3.5) + par(fig=c(map.xpos + 0.02, map.xpos + 0.04, map.ypos + 0*map.height + 0*map.ysep, map.ypos + 0.1*map.height + 0*map.ysep), new=TRUE) + mtext(bquote(bar(nu) == .(paste0(round(mean(100*fre4.NA),1),"%"))),cex=3.5) + + # add corr between obs.time series and ensemble mean time series on Frequency plots: + if(n.map < 8){ + par(fig=c(map.xpos + map.width - 0.04, map.xpos + map.width - 0.02, map.ypos + 3*map.height + 3*map.ysep, map.ypos + 3.1*map.height + 3*map.ysep), new=TRUE) + mtext(paste0("r= ",sp.cor1), cex=3.5) + par(fig=c(map.xpos + map.width - 0.04, map.xpos + map.width - 0.02, map.ypos + 2*map.height + 2*map.ysep, map.ypos + 2.1*map.height + 2*map.ysep), new=TRUE) + mtext(paste0("r= ",sp.cor2), cex=3.5) + par(fig=c(map.xpos + map.width - 0.04, map.xpos + map.width - 0.02, map.ypos + 1*map.height + 1*map.ysep, map.ypos + 1.1*map.height + 1*map.ysep), new=TRUE) + mtext(paste0("r= ",sp.cor3), cex=3.5) + par(fig=c(map.xpos + map.width - 0.04, map.xpos + map.width - 0.02, map.ypos + 0*map.height + 0*map.ysep, map.ypos + 0.1*map.height + 0*map.ysep), new=TRUE) + mtext(paste0("r= ",sp.cor4), cex=3.5) + } + } # close if on composition == 'fre' + + # save indicators for each startdate and forecast time: + # (map1, map2, map3, map4, fre1, fre2, etc. already refer to the regimes in the same order as listed in the 'orden' vector) + if(fields.name == forecast.name) { + if (composition != "impact") { + save(orden, map1, map2, map3, map4, fre1.NA, fre2.NA, fre3.NA, fre4.NA, freObs1, freObs2, freObs3, freObs4, sp.cor1, sp.cor2, sp.cor3, sp.cor4, persist1, persist2, persist3, persist4, persistObs1, persistObs2, persistObs3, persistObs4, spat.cor1, spat.cor2, spat.cor3, spat.cor4, sd.freq.sim1, sd.freq.sim2, sd.freq.sim3, sd.freq.sim4, sd.freq.obs1, sd.freq.obs2, sd.freq.obs3, sd.freq.obs4, transSim1, transSim2, transSim3, transSim4, transObs1, transObs2, transObs3, transObs4, file=paste0(work.dir,"/",fields.name,"_", my.period[p],"_leadmonth_",lead.month,"_","ClusterNames",".RData")) + } else { # also save impX objects + save(orden, map1, map2, map3, map4, fre1.NA, fre2.NA, fre3.NA, fre4.NA, freObs1, freObs2, freObs3, freObs4, sp.cor1, sp.cor2, sp.cor3, sp.cor4, persist1, persist2, persist3, persist4, persistObs1, persistObs2, persistObs3, persistObs4, spat.cor1, spat.cor2, spat.cor3, spat.cor4, sd.freq.sim1, sd.freq.sim2, sd.freq.sim3, sd.freq.sim4, sd.freq.obs1, sd.freq.obs2, sd.freq.obs3, sd.freq.obs4, transSim1, transSim2, transSim3, transSim4, transObs1, transObs2, transObs3, transObs4, imp1, imp2, imp3, imp4, file=paste0(work.dir,"/",fields.name,"_", my.period[p],"_leadmonth_",lead.month,"_","ClusterNames",".RData")) + } + } + + } # close 'p' on 'period' + + if(as.pdf) dev.off() # for saving a single pdf with all months/seasons + + } # close 'lead.month' on 'lead.months' + + if(composition == "psl" || composition == "fre"){ + # Regime's names: + title1.xpos <- 0.01 + title1.width <- 0.04 + par(fig=c(title1.xpos, title1.xpos + title1.width, 0.7905, 0.7955), new=TRUE) + mtext(paste0(regime1.name), font=2, cex=5) + par(fig=c(title1.xpos, title1.xpos + title1.width, 0.5855, 0.5905), new=TRUE) + mtext(paste0(regime2.name), font=2, cex=5) + par(fig=c(title1.xpos, title1.xpos + title1.width, 0.3805, 0.3955), new=TRUE) + mtext(paste0(regime3.name), font=2, cex=5) + par(fig=c(title1.xpos, title1.xpos + title1.width, 0.1755, 0.1805), new=TRUE) + mtext(paste0(regime4.name), font=2, cex=5) + + if(composition == "psl") { + # Color legend: + legend1.xpos <- 0.10 + legend1.width <- 0.80 + legend1.cex <- 4 + + par(fig=c(legend1.xpos, legend1.xpos + legend1.width, 0, 0.065), new=TRUE) # syntax fig=c(xmin, xmax, ymin, ymax) + ColorBar2(brks=my.brks, cols=my.cols, vert=FALSE, cex=legend1.cex, label.dist=2, my.ticks=-0.5 + 1:21, my.labels=my.brks) + par(fig=c(legend1.xpos + legend1.width - 0.02, legend1.xpos + legend1.width + 0.05, 0, 0.02), new=TRUE) # syntax fig=c(xmin, xmax, ymin, ymax) + mtext("hPa", cex=legend1.cex*1.3) + } + + dev.off() + } # close if on composition + + print("Finished!") +} # close if on composition != "summary" + + + +if(composition == "taylor"){ + library("plotrix") + + # choose a reference season from 13 (winter) to 16 (Autumn): + season.ref <- 13 + + # set the first WR classification: + rean.dir <- "/scratch/Earth/ncortesi/RESILIENCE/Regimes/41_ERA-Interim_monthly_1981-2015_LOESS_filter" + + # load data of the reference season of the first classification (this line should be modified to load the chosen season): + #load("/scratch/Earth/ncortesi/RESILIENCE/Regimes/18) as 12) but with no lat correction/ERA-Interim_Winter_psl.RData") # load pslwrXmean data + #load("/scratch/Earth/ncortesi/RESILIENCE/Regimes/18) as 12) but with no lat correction/ERA-Interim_Winter_ClusterNames.RData") # load clusterX.name.period + load("/scratch/Earth/ncortesi/RESILIENCE/Regimes/46_as_12_but_with_no_lat_correction_and_with_LOESS/ERA-Interim_Winter_psl.RData") # load pslwrXmean data + load("/scratch/Earth/ncortesi/RESILIENCE/Regimes/46_as_12_but_with_no_lat_correction_and_with_LOESS/ERA-Interim_Winter_ClusterNames.RData") # load clusterX.name.period + + if(var.num != var.num.orig) var.num <- var.num.orig # ripristinate original values of this script (necessary after loading regime data) + if(fields.name != fields.name.orig) fields.name <- fields.name.orig # ripristinate original values of this script + + cluster1.ref <- which(orden == cluster1.name) + cluster2.ref <- which(orden == cluster2.name) + cluster3.ref <- which(orden == cluster3.name) + cluster4.ref <- which(orden == cluster4.name) + + #cluster1.ref <- cluster1.name.period[13] + #cluster2.ref <- cluster2.name.period[13] + #cluster3.ref <- cluster3.name.period[13] + #cluster4.ref <- cluster4.name.period[13] + + cluster1.ref <- 3 # bypass the previous values because for dataset num.46 the blocking and atlantic regimes were saved swapped! + cluster2.ref <- 2 + cluster3.ref <- 1 + cluster4.ref <- 4 + + assign(paste0("map.ref",cluster1.ref), pslwr1mean) # NAO+ + assign(paste0("map.ref",cluster2.ref), pslwr2mean) # NAO- + assign(paste0("map.ref",cluster3.ref), pslwr3mean) # Blocking + assign(paste0("map.ref",cluster4.ref), pslwr4mean) # Atl.ridge + + # set a second WR classification (i.e: with running cluster) to contrast with the new one: + #rean2.dir <- "/scratch/Earth/ncortesi/RESILIENCE/Regimes/41_ERA-Interim_monthly_1981-2015_LOESS_filter" + rean2.dir <- "/scratch/Earth/ncortesi/RESILIENCE/Regimes/44_as_43_but_with_no_lat_correction" + + # load data of the reference season of the second classification (this line should be modified to load the chosen season): + load("/scratch/Earth/ncortesi/RESILIENCE/Regimes/46_as_12_but_with_no_lat_correction_and_with_LOESS/ERA-Interim_Winter_psl.RData") # load pslwrXmean data + load("/scratch/Earth/ncortesi/RESILIENCE/Regimes/46_as_12_but_with_no_lat_correction_and_with_LOESS/ERA-Interim_Winter_ClusterNames.RData") # load clusterX.name.period + + if(var.num != var.num.orig) var.num <- var.num.orig # ripristinate original values of this script (necessary after loading regime data) + if(fields.name != fields.name.orig) fields.name <- fields.name.orig # ripristinate original values of this script + + #cluster1.name.ref <- cluster1.name.period[season.ref] + #cluster2.name.ref <- cluster2.name.period[season.ref] + #cluster3.name.ref <- cluster3.name.period[season.ref] + #cluster4.name.ref <- cluster4.name.period[season.ref] + + cluster1.ref <- which(orden == cluster1.name) + cluster2.ref <- which(orden == cluster2.name) + cluster3.ref <- which(orden == cluster3.name) + cluster4.ref <- which(orden == cluster4.name) + + cluster1.ref <- 3 # bypass the previous values because for dataset num.46 the blocking and atlantic regimes were saved swapped! + cluster2.ref <- 2 + cluster3.ref <- 1 + cluster4.ref <- 4 + + assign(paste0("map.old.ref",cluster1.ref), pslwr1mean) # NAO+ + assign(paste0("map.old.ref",cluster2.ref), pslwr2mean) # NAO- + assign(paste0("map.old.ref",cluster3.ref), pslwr3mean) # Blocking + assign(paste0("map.old.ref",cluster4.ref), pslwr4mean) # Atl.ridge + + + # breaks and colors of the geopotential fields: + if(psl == "g500"){ + my.brks <- c(-300,seq(-200,200,10),300) # % Mean anomaly of a WR + my.brks2 <- c(-300,seq(-200,200,10),300) # % Mean anomaly of a WR for the contour lines + values.to.plot <- c(-200, -100, 0, 100, 200) # values we want to show in the labels + } + + if(psl == "psl"){ + my.brks <- c(seq(-19,-1,2),0,seq(1,19,2)) # % Mean anomaly of a WR + my.brks2 <- my.brks #c(seq(-20,20,2)) # % Mean anomaly of a WR for the contour lines + values.to.plot <- my.brks #c(-17,-15,-13,-11,-9,-7,-5,-3,-1,0,1,3,5,7,9,11,13,15,17) + } + + my.cols <- colorRampPalette(rev(brewer.pal(11,"RdBu")))(length(my.brks)-1) # blue--white--red colors + my.cols[floor(length(my.cols)/2)] <- my.cols[floor(length(my.cols)/2)+1] <- "white" + + # plot the composition with the regime anomalies of each monthly regimes: + png(filename=paste0(rean.dir,"/",fields.name,"_taylor_source",".png"),width=6000,height=2000) + + plot.new() + + # Sheet title: + par(fig=c(0, 1, 0.94, 0.98), new=TRUE) + mtext(paste0(fields.name, " observed monthly regime anomalies"), cex=5.5, font=2) + + n.map <- 0 + for(p in c(9:12,1:8)){ + p.orig <- p + + load(file=paste0(rean.dir,"/",fields.name,"_",my.period[p],"_","psl",".RData")) # load cluster names and summarized data + load(file=paste0(rean.dir,"/",fields.name,"_",my.period[p],"_","ClusterNames",".RData")) # load cluster names and summarized data + + if(var.num != var.num.orig) var.num <- var.num.orig # ripristinate original values of this script (necessary after loading regime data) + if(fields.name != fields.name.orig) fields.name <- fields.name.orig # ripristinate original values of this script + if(p != p.orig) p <- p.orig + + cluster1 <- which(orden == cluster1.name) + cluster2 <- which(orden == cluster2.name) + cluster3 <- which(orden == cluster3.name) + cluster4 <- which(orden == cluster4.name) + + assign(paste0("map",cluster1), pslwr1mean) # NAO+ + assign(paste0("map",cluster2), pslwr2mean) # NAO- + assign(paste0("map",cluster3), pslwr3mean) # Blocking + assign(paste0("map",cluster4), pslwr4mean) # Atl.ridge + + n.map <- n.map + 1 # n.maps starts from 0 + map.width <- 0.07 + map.xpos <- 0.06 + map.width * (n.map - 1) + map.ypos <- 0.08 + map.height <- 0.19 + map.ysep <- 0.015 + + par(fig=c(map.xpos, map.xpos + map.width, map.ypos + 3*map.height + 3*map.ysep, map.ypos + 4*map.height + 3*map.ysep), new=TRUE) + PlotEquiMap2(rescale(map1,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map1), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, xlabel.dist=1.5, contours.lty="F1FF1F") + par(fig=c(map.xpos, map.xpos + map.width, map.ypos + 2*map.height + 2*map.ysep, map.ypos + 3*map.height + 2*map.ysep), new=TRUE) + PlotEquiMap2(rescale(map2,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map2), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F") + par(fig=c(map.xpos, map.xpos + map.width, map.ypos + map.height + map.ysep, map.ypos + 2*map.height + map.ysep), new=TRUE) + PlotEquiMap2(rescale(map3,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map3), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F") + par(fig=c(map.xpos, map.xpos + map.width, map.ypos, map.ypos + map.height), new=TRUE) + PlotEquiMap2(rescale(map4,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map4), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F") + + # Sheet subtitles: + par(fig=c(map.xpos, map.xpos + map.width, 0.86, 0.90), new=TRUE) + mtext(my.month.short[p], cex=5.5, font=2) + + } + + # draw the last map to the right of the composition, that with the seasonal anomalies: + n.map <- n.map + 1 # n.maps starts from 0 + map.width <- 0.07 + map.xpos <- 0.06 + map.width * (n.map - 1) + map.ypos <- 0.08 + map.height <- 0.19 + map.ysep <- 0.015 + + par(fig=c(map.xpos, map.xpos + map.width, map.ypos + 3*map.height + 3*map.ysep, map.ypos + 4*map.height + 3*map.ysep), new=TRUE) + PlotEquiMap2(rescale(map.ref1,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map.ref1), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, xlabel.dist=1.5, contours.lty="F1FF1F") + par(fig=c(map.xpos, map.xpos + map.width, map.ypos + 2*map.height + 2*map.ysep, map.ypos + 3*map.height + 2*map.ysep), new=TRUE) + PlotEquiMap2(rescale(map.ref2,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map.ref2), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F") + par(fig=c(map.xpos, map.xpos + map.width, map.ypos + map.height + map.ysep, map.ypos + 2*map.height + map.ysep), new=TRUE) + PlotEquiMap2(rescale(map.ref3,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map.ref3), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F") + par(fig=c(map.xpos, map.xpos + map.width, map.ypos, map.ypos + map.height), new=TRUE) + PlotEquiMap2(rescale(map.ref4,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map.ref4), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F") + + # subtitle: + par(fig=c(map.xpos, map.xpos + map.width, 0.86, 0.90), new=TRUE) + mtext(my.period[season.ref], cex=5.5, font=2) + + dev.off() + + corr.first <- corr.second <- rmse.first <- rmse.second <- array(NA,c(4,12)) + + # Plot the Taylor diagrams: + for(regime in 1:4){ + + png(filename=paste0(rean.dir,"/",fields.name,"_taylor_",orden[regime],".png"), width=1200, height=800) + + for(p in 1:12){ + p.orig <- p + + load(file=paste0(rean.dir,"/",fields.name,"_",my.period[p],"_","psl",".RData")) # load cluster names and summarized data + load(file=paste0(rean.dir,"/",fields.name,"_",my.period[p],"_","ClusterNames",".RData")) # load cluster names and summarized data + + if(var.num != var.num.orig) var.num <- var.num.orig # ripristinate original values of this script (necessary after loading regime data) + if(fields.name != fields.name.orig) fields.name <- fields.name.orig # ripristinate original values of this script + if(p != p.orig) p <- p.orig + + cluster1 <- which(orden == cluster1.name) + cluster2 <- which(orden == cluster2.name) + cluster3 <- which(orden == cluster3.name) + cluster4 <- which(orden == cluster4.name) + + assign(paste0("map",cluster1), pslwr1mean) # NAO+ + assign(paste0("map",cluster2), pslwr2mean) # NAO- + assign(paste0("map",cluster3), pslwr3mean) # Blocking + assign(paste0("map",cluster4), pslwr4mean) # Atl.ridge + + # load data from the second classification: + load(file=paste0(rean2.dir,"/",fields.name,"_",my.period[p],"_","psl",".RData")) # load cluster names and summarized data + load(file=paste0(rean2.dir,"/",fields.name,"_",my.period[p],"_","ClusterNames",".RData")) # load cluster names and summarized data + + if(var.num != var.num.orig) var.num <- var.num.orig # ripristinate original values of this script (necessary after loading regime data) + if(fields.name != fields.name.orig) fields.name <- fields.name.orig # ripristinate original values of this script + if(p != p.orig) p <- p.orig + + cluster1.old <- which(orden == cluster1.name) + cluster2.old <- which(orden == cluster2.name) + cluster3.old <- which(orden == cluster3.name) + cluster4.old <- which(orden == cluster4.name) + + assign(paste0("map.old",cluster1.old), pslwr1mean) # NAO+ + assign(paste0("map.old",cluster2.old), pslwr2mean) # NAO- + assign(paste0("map.old",cluster3.old), pslwr3mean) # Blocking + assign(paste0("map.old",cluster4.old), pslwr4mean) # Atl.ridge + + add.mod <- ifelse(p == 1, FALSE, TRUE) + main.mod <- ifelse(p == 1, orden[regime],"") + + color.first <- "red" + color.second <- "blue" + sd.arcs.mod <- c(0,0.1,0.2,0.3,0.4,0.5,0.6,0.7,0.8,0.9,1,1.1,1.2,1.3) #c(0,0.5,1,1.5) # doesn't work! + if(regime == 1) my.taylor(as.vector(map.ref1),as.vector(map1), normalize=TRUE, add=add.mod, pos.cor=FALSE, sd.arcs=sd.arcs.mod, ref.sd=F, cex.axis=1.7, text.cex=1, my.text=my.month.short[p], col = color.first, main=main.mod) + if(regime == 2) my.taylor(as.vector(map.ref2),as.vector(map2), normalize=TRUE, add=add.mod, pos.cor=FALSE, sd.arcs=sd.arcs.mod, ref.sd=F, cex.axis=1.7, text.cex=1, my.text=my.month.short[p], col = color.first, main=main.mod) + if(regime == 3) my.taylor(as.vector(map.ref3),as.vector(map3), normalize=TRUE, add=add.mod, pos.cor=FALSE, sd.arcs=sd.arcs.mod, ref.sd=F, cex.axis=1.7, text.cex=1, my.text=my.month.short[p], col = color.first, main=main.mod) + if(regime == 4) my.taylor(as.vector(map.ref4),as.vector(map4), normalize=TRUE, add=add.mod, pos.cor=FALSE, sd.arcs=sd.arcs.mod, ref.sd=F, cex.axis=1.7, text.cex=1, my.text=my.month.short[p], col = color.first, main=main.mod) + + # add the points from the second classification: + add.mod=TRUE + if(regime == 1) my.taylor(as.vector(map.old.ref1),as.vector(map.old1), normalize=TRUE, add=add.mod, pos.cor=FALSE, sd.arcs=sd.arcs.mod, ref.sd=F, cex.axis=1.7, text.cex=1, my.text=my.month.short[p], col = color.second) + if(regime == 2) my.taylor(as.vector(map.old.ref2),as.vector(map.old2), normalize=TRUE, add=add.mod, pos.cor=FALSE, sd.arcs=sd.arcs.mod, ref.sd=F, cex.axis=1.7, text.cex=1, my.text=my.month.short[p], col = color.second) + if(regime == 3) my.taylor(as.vector(map.old.ref3),as.vector(map.old3), normalize=TRUE, add=add.mod, pos.cor=FALSE, sd.arcs=sd.arcs.mod, ref.sd=F, cex.axis=1.7, text.cex=1, my.text=my.month.short[p], col = color.second) + if(regime == 4) my.taylor(as.vector(map.old.ref4),as.vector(map.old4), normalize=TRUE, add=add.mod, pos.cor=FALSE, sd.arcs=sd.arcs.mod, ref.sd=F, cex.axis=1.7, text.cex=1, my.text=my.month.short[p], col = color.second) + + if(regime == 1) { corr.first[1,p] <- cor(as.vector(map.ref1),as.vector(map1)); rmse.first[1,p] <- RMSE(as.vector(map.ref1),as.vector(map1)) } + if(regime == 2) { corr.first[2,p] <- cor(as.vector(map.ref2),as.vector(map2)); rmse.first[2,p] <- RMSE(as.vector(map.ref2),as.vector(map2)) } + if(regime == 3) { corr.first[3,p] <- cor(as.vector(map.ref3),as.vector(map3)); rmse.first[3,p] <- RMSE(as.vector(map.ref3),as.vector(map3)) } + if(regime == 4) { corr.first[4,p] <- cor(as.vector(map.ref4),as.vector(map4)); rmse.first[4,p] <- RMSE(as.vector(map.ref4),as.vector(map4)) } + + if(regime == 1) { corr.second[1,p] <- cor(as.vector(map.old.ref1),as.vector(map.old1)); rmse.second[1,p] <- RMSE(as.vector(map.old.ref1),as.vector(map.old1)) } + if(regime == 2) { corr.second[2,p] <- cor(as.vector(map.old.ref2),as.vector(map.old2)); rmse.second[2,p] <- RMSE(as.vector(map.old.ref2),as.vector(map.old2)) } + if(regime == 3) { corr.second[3,p] <- cor(as.vector(map.old.ref3),as.vector(map.old3)); rmse.second[3,p] <- RMSE(as.vector(map.old.ref3),as.vector(map.old3)) } + if(regime == 4) { corr.second[4,p] <- cor(as.vector(map.old.ref4),as.vector(map.old4)); rmse.second[4,p] <- RMSE(as.vector(map.old.ref4),as.vector(map.old4)) } + + } # close for on 'p' + + dev.off() + + } # close for on 'regime' + + corr.first.mean <- apply(corr.first,1,mean) + corr.second.mean <- apply(corr.second,1,mean) + corr.diff <- corr.second.mean - corr.first.mean + + rmse.first.mean <- apply(rmse.first,1,mean) + rmse.second.mean <- apply(rmse.second,1,mean) + rmse.diff <- rmse.second.mean - rmse.first.mean + +} # close if on composition == "taylor" + + + + +# Summary graphs: +if(composition == "summary" && fields.name == forecast.name){ + # array storing correlations in the format: [startdate, leadmonth, regime] + array.diff.sd.freq <- array.sd.freq <- array.cor <- array.sp.cor <- array.freq <- array.diff.freq <- array.rpss <- array.pers <- array.diff.pers <- array(NA,c(12,7,4)) + array.spat.cor <- array.trans.sim1 <- array.trans.sim2 <- array.trans.sim3 <- array.trans.sim4 <- array(NA,c(12,7,4)) + array.trans.diff1 <- array.trans.diff2 <- array.trans.diff3 <- array.trans.diff4 <- array(NA,c(12,7,4)) + + for(p in 1:12){ + p.orig <- p + + for(l in 0:6){ + + load(file=paste0(work.dir,"/",fields.name,"_",my.period[p],"_leadmonth_",l,"_","ClusterNames",".RData")) # load cluster names and summarized data + + if(var.num != var.num.orig) var.num <- var.num.orig # ripristinate original values of this script (necessary after loading regime data) + if(fields.name != fields.name.orig) fields.name <- fields.name.orig # ripristinate original values of this script + if(p != p.orig) p <- p.orig + + array.spat.cor[p,1+l, 1] <- spat.cor1 # associates NAO+ to the first triangle (at left) + array.spat.cor[p,1+l, 3] <- spat.cor2 # associates NAO- to the third triangle (at right) + array.spat.cor[p,1+l, 4] <- spat.cor3 # associates Blocking to the fourth triangle (top one) + array.spat.cor[p,1+l, 2] <- spat.cor4 # associates Atl.Ridge to the second triangle (bottom one) + + array.cor[p,1+l, 1] <- sp.cor1 # associates NAO+ to the first triangle (at left) + array.cor[p,1+l, 3] <- sp.cor2 # associates NAO- to the third triangle (at right) + array.cor[p,1+l, 4] <- sp.cor3 # associates Blocking to the fourth triangle (top one) + array.cor[p,1+l, 2] <- sp.cor4 # associates Atl.Ridge to the second triangle (bottom one) + + array.freq[p,1+l, 1] <- 100*(mean(fre1.NA)) # NAO+ + array.freq[p,1+l, 3] <- 100*(mean(fre2.NA)) # NAO- + array.freq[p,1+l, 4] <- 100*(mean(fre3.NA)) # Blocking + array.freq[p,1+l, 2] <- 100*(mean(fre4.NA)) # Atl.Ridge + + array.diff.freq[p,1+l, 1] <- 100*(mean(fre1.NA) - mean(freObs1)) # NAO+ + array.diff.freq[p,1+l, 3] <- 100*(mean(fre2.NA) - mean(freObs2)) # NAO- + array.diff.freq[p,1+l, 4] <- 100*(mean(fre3.NA) - mean(freObs3)) # Blocking + array.diff.freq[p,1+l, 2] <- 100*(mean(fre4.NA) - mean(freObs4)) # Atl.Ridge + + array.pers[p,1+l, 1] <- persist1 # NAO+ + array.pers[p,1+l, 3] <- persist2 # NAO- + array.pers[p,1+l, 4] <- persist3 # Blocking + array.pers[p,1+l, 2] <- persist4 # Atl.Ridge + + array.diff.pers[p,1+l, 1] <- persist1 - persistObs1 # NAO+ + array.diff.pers[p,1+l, 3] <- persist2 - persistObs2 # NAO- + array.diff.pers[p,1+l, 4] <- persist3 - persistObs3 # Blocking + array.diff.pers[p,1+l, 2] <- persist4 - persistObs4 # Atl.Ridge + + array.sd.freq[p,1+l, 1] <- sd.freq.sim1 # NAO+ + array.sd.freq[p,1+l, 3] <- sd.freq.sim2 # NAO- + array.sd.freq[p,1+l, 4] <- sd.freq.sim3 # Blocking + array.sd.freq[p,1+l, 2] <- sd.freq.sim4 # Atl.Ridge + + array.diff.sd.freq[p,1+l, 1] <- sd.freq.sim1 / sd.freq.obs1 # NAO+ + array.diff.sd.freq[p,1+l, 3] <- sd.freq.sim2 / sd.freq.obs2 # NAO- + array.diff.sd.freq[p,1+l, 4] <- sd.freq.sim3 / sd.freq.obs3 # Blocking + array.diff.sd.freq[p,1+l, 2] <- sd.freq.sim4 / sd.freq.obs4 # Atl.Ridge + + array.imp[p,1+l, 1] <- cor.imp1 # NAO+ + array.imp[p,1+l, 3] <- cor.imp2 # NAO- + array.imp[p,1+l, 4] <- cor.imp3 # Blocking + array.imp[p,1+l, 2] <- cor.imp4 # Atl.Ridge + + array.trans.sim1[p,1+l, 1] <- NA # Transition from NAO+ to NAO+ + array.trans.sim1[p,1+l, 3] <- 100*transSim1[2] # Transition from NAO+ to NAO- + array.trans.sim1[p,1+l, 4] <- 100*transSim1[3] # Transition from NAO+ to blocking + array.trans.sim1[p,1+l, 2] <- 100*transSim1[4] # Transition from NAO+ to Atl.Ridge + + array.trans.sim2[p,1+l, 1] <- 100*transSim2[1] # Transition from NAO- to NAO+ + array.trans.sim2[p,1+l, 3] <- NA # Transition from NAO- to NAO- + array.trans.sim2[p,1+l, 4] <- 100*transSim2[3] # Transition from NAO- to blocking + array.trans.sim2[p,1+l, 2] <- 100*transSim2[4] # Transition from NAO- to Atl.Ridge + + array.trans.sim3[p,1+l, 1] <- 100*transSim3[1] # Transition from blocking to NAO+ + array.trans.sim3[p,1+l, 3] <- 100*transSim3[2] # Transition from blocking to NAO- + array.trans.sim3[p,1+l, 4] <- NA # Transition from blocking to blocking + array.trans.sim3[p,1+l, 2] <- 100*transSim3[4] # Transition from blocking to Atl.Ridge + + array.trans.sim4[p,1+l, 1] <- 100*transSim4[1] # Transition from Atl.ridge to NAO+ + array.trans.sim4[p,1+l, 3] <- 100*transSim4[2] # Transition from Atl.ridge to NAO- + array.trans.sim4[p,1+l, 4] <- 100*transSim4[3] # Transition from Atl.ridge to blocking + array.trans.sim4[p,1+l, 2] <- NA # Transition from Atl.ridge to Atl.Ridge + + array.trans.diff1[p,1+l, 1] <- NA # Transition from NAO+ to NAO+ + array.trans.diff1[p,1+l, 3] <- 100*(transSim1[2] - transObs1[2]) # Transition from NAO+ to NAO- + array.trans.diff1[p,1+l, 4] <- 100*(transSim1[3] - transObs1[3]) # Transition from NAO+ to blocking + array.trans.diff1[p,1+l, 2] <- 100*(transSim1[4] - transObs1[4]) # Transition from NAO+ to Atl.Ridge + + array.trans.diff2[p,1+l, 1] <- 100*(transSim2[1] - transObs2[1]) # Transition from NAO+ to NAO+ + array.trans.diff2[p,1+l, 3] <- NA + array.trans.diff2[p,1+l, 4] <- 100*(transSim2[3] - transObs2[3]) # Transition from NAO+ to blocking + array.trans.diff2[p,1+l, 2] <- 100*(transSim2[4] - transObs2[4]) # Transition from NAO+ to Atl.Ridge + + array.trans.diff3[p,1+l, 1] <- 100*(transSim3[1] - transObs3[1]) # Transition from NAO+ to NAO+ + array.trans.diff3[p,1+l, 3] <- 100*(transSim3[2] - transObs3[2]) # Transition from NAO+ to NAO- + array.trans.diff3[p,1+l, 4] <- NA + array.trans.diff3[p,1+l, 2] <- 100*(transSim3[4] - transObs3[4]) # Transition from NAO+ to Atl.Ridge + + array.trans.diff4[p,1+l, 1] <- 100*(transSim4[1] - transObs4[1]) # Transition from NAO+ to NAO+ + array.trans.diff4[p,1+l, 3] <- 100*(transSim4[2] - transObs4[2]) # Transition from NAO+ to NAO- + array.trans.diff4[p,1+l, 4] <- 100*(transSim4[3] - transObs4[3]) # Transition from NAO+ to blocking + array.trans.diff4[p,1+l, 2] <- NA + + #array.rpss[p,1+l, 1] <- # NAO+ + #array.rpss[p,1+l, 3] <- # NAO- + #array.rpss[p,1+l, 4] <- # Blocking + #array.rpss[p,1+l, 2] <- # Atl.Ridge + } + } + + # Spatial Corr summary: + png(filename=paste0(work.dir,"/",forecast.name,"_summary_spat_corr.png"), width=550, height=400) + + # create an array similar to array.cor but with colors instead of corr.values: + sp.corr.cols <- rev(c('#b2182b','#d6604d','#f4a582','#fddbc7','#d1e5f0','#92c5de','#4393c3','#2166ac')) + my.seq <- c(-1,0,0.4,0.5,0.6,0.7,0.8,0.9,1) + + array.sp.cor.colors <- array(sp.corr.cols[8],c(12,7,4)) + array.sp.cor.colors[array.spat.cor < my.seq[8]] <- sp.corr.cols[7] + array.sp.cor.colors[array.spat.cor < my.seq[7]] <- sp.corr.cols[6] + array.sp.cor.colors[array.spat.cor < my.seq[6]] <- sp.corr.cols[5] + array.sp.cor.colors[array.spat.cor < my.seq[5]] <- sp.corr.cols[4] + array.sp.cor.colors[array.spat.cor < my.seq[4]] <- sp.corr.cols[3] + array.sp.cor.colors[array.spat.cor < my.seq[3]] <- sp.corr.cols[2] + array.sp.cor.colors[array.spat.cor < my.seq[2]] <- sp.corr.cols[1] + + par(mar=c(5,4,4,4.5)) + plot(1,1, type="n", xaxt="n", yaxt="n", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + title("Spatial correlation between S4 and ERA-Interim regime anomalies", cex=1.5) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + + for(p in 1:12){ + for(l in 0:6){ + polygon(c(0.5+p-1, 0.5+p-1, 1+p-1), c(-0.5+l, 0.5+l, 0+l), col=array.sp.cor.colors[p,1+l,1]) # first triangle + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(-0.5+l, 0+l, -0.5+l), col=array.sp.cor.colors[p,1+l,2]) + polygon(c(1+p-1, 1.5+p-1, 1.5+p-1), c(l, 0.5+l, -0.5+l), col=array.sp.cor.colors[p,1+l,3]) + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(0.5+l, 0+l, 0.5+l), col=array.sp.cor.colors[p,1+l,4]) + } + } + + par(fig=c(0.875, 1, 0.14, 0.89), new=TRUE) + ColorBar2(brks = my.seq, cols = sp.corr.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + dev.off() + + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_spat_corr_4tables.png"), width=750, height=600) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext(text="Spatial correlation between S4 and ERA-Interim regime anomalies", cex=1.5) + + # NAO+ : + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.85, 0.98), new=TRUE) + mtext("NAO+", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.spat.cor.colors[p,1+l,1])}} + + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.42, 0.5), new=TRUE) + mtext("Blocking", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.spat.cor.colors[p,1+l,4])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.85, 0.98), new=TRUE) + mtext("NAO-", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.spat.cor.colors[p,1+l,3])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.42, 0.5), new=TRUE) + mtext("Atlantic Ridge", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.spat.cor.colors[p,1+l,2])}} + + par(fig=c(0.91, 1, 0.1, 0.9), new=TRUE) + ColorBar2(brks = my.seq, cols = sp.corr.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_spat_corr_1table.png"), width=800, height=300) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext(text="Spatial correlation between S4 and ERA-Interim regime anomalies", cex=1.2) + + par(mar=c(0,0,4,0), fig=c(0.07, 0.22, 0.8, 0.98), new=TRUE) + mtext("NAO+", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0, 0.22, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecast month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.spat.cor.colors[i2,1+6-j,1])}} + + par(mar=c(0,0,4,0), fig=c(0.22, 0.44, 0.8, 0.98), new=TRUE) + mtext("NAO-", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.22, 0.44, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecasted month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.spat.cor.colors[i2,1+6-j,3])}} + + par(mar=c(0,0,4,0), fig=c(0.44, 0.66, 0.8, 0.98), new=TRUE) + mtext("Blocking", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.44, 0.66, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecasted month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.spat.cor.colors[i2,1+6-j,4])}} + + par(mar=c(0,0,4,0), fig=c(0.68, 0.88, 0.8, 0.98), new=TRUE) + mtext("Atlantic Ridge", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.66, 0.88, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecasted month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.spat.cor.colors[i2,1+6-j,2])}} + + par(fig=c(0.91, 1, 0.1, 0.8), new=TRUE) + ColorBar2(brks = my.seq, cols = sp.corr.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + + + + + + + + # Corr summary: + png(filename=paste0(work.dir,"/",forecast.name,"_summary_corr.png"), width=550, height=400) + + # create an array similar to array.cor but with colors instead of corr.values: + corr.cols <- rev(c('#b2182b','#d6604d','#f4a582','#fddbc7','#d1e5f0','#92c5de','#4393c3','#2166ac')) + + array.cor.colors <- array(corr.cols[8],c(12,7,4)) + array.cor.colors[array.cor < 0.75] <- corr.cols[7] + array.cor.colors[array.cor < 0.5] <- corr.cols[6] + array.cor.colors[array.cor < 0.25] <- corr.cols[5] + array.cor.colors[array.cor < 0] <- corr.cols[4] + array.cor.colors[array.cor < -0.25] <- corr.cols[3] + array.cor.colors[array.cor < -0.5] <- corr.cols[2] + array.cor.colors[array.cor < -0.75] <- corr.cols[1] + + par(mar=c(5,4,4,4.5)) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Forecast time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + title("Correlation between S4 and ERA-Interim frequencies", cex=1.5) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + + for(p in 1:12){ + for(l in 0:6){ + polygon(c(0.5+p-1,0.5+p-1,1+p-1),c(-0.5+l,0.5+l,0+l), col=array.cor.colors[p,1+l,1]) # first triangle + polygon(c(0.5+p-1,1+p-1,1.5+p-1),c(-0.5+l,0+l,-0.5+l), col=array.cor.colors[p,1+l,2]) + polygon(c(1+p-1,1.5+p-1,1.5+p-1),c(l,0.5+l,-0.5+l), col=array.cor.colors[p,1+l,3]) + polygon(c(0.5+p-1,1+p-1,1.5+p-1),c(0.5+l,0+l,0.5+l), col=array.cor.colors[p,1+l,4]) + } + } + + par(fig=c(0.875, 1, 0.14, 0.89), new=TRUE) + ColorBar2(brks = seq(-1,1,0.25), cols = corr.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(seq(-1,1,0.25)), my.labels=seq(-1,1,0.25)) + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_corr_4tables.png"), width=750, height=600) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext(text="Correlation between S4 and ERA-Interim frequencies", cex=1.5) + + # NAO+ : + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.85, 0.98), new=TRUE) + mtext("NAO+", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.cor.colors[p,1+l,1])}} + + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.42, 0.5), new=TRUE) + mtext("Blocking", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.cor.colors[p,1+l,4])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.85, 0.98), new=TRUE) + mtext("NAO-", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.cor.colors[p,1+l,3])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.42, 0.5), new=TRUE) + mtext("Atlantic Ridge", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.cor.colors[p,1+l,2])}} + + par(fig=c(0.91, 1, 0.1, 0.9), new=TRUE) + ColorBar2(brks = my.seq, cols = corr.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_corr_1table.png"), width=800, height=300) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext(text="Correlation between S4 and ERA-Interim frequencies", cex=1.2) + + par(mar=c(0,0,4,0), fig=c(0.07, 0.22, 0.8, 0.98), new=TRUE) + mtext("NAO+", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0, 0.22, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecast month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.cor.colors[i2,1+6-j,1])}} + + par(mar=c(0,0,4,0), fig=c(0.22, 0.44, 0.8, 0.98), new=TRUE) + mtext("NAO-", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.22, 0.44, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecasted month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.cor.colors[i2,1+6-j,3])}} + + par(mar=c(0,0,4,0), fig=c(0.44, 0.66, 0.8, 0.98), new=TRUE) + mtext("Blocking", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.44, 0.66, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecasted month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.cor.colors[i2,1+6-j,4])}} + + par(mar=c(0,0,4,0), fig=c(0.68, 0.88, 0.8, 0.98), new=TRUE) + mtext("Atlantic Ridge", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.66, 0.88, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecasted month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.cor.colors[i2,1+6-j,2])}} + + par(fig=c(0.91, 1, 0.1, 0.8), new=TRUE) + ColorBar2(brks = my.seq, cols = corr.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + + + + + + # Freq. summary: + png(filename=paste0(work.dir,"/",forecast.name,"_summary_freq.png"), width=550, height=400) + + # create an array similar to array.diff.freq but with colors instead of frequencies: + freq.cols <- rev(c('#d73027','#f46d43','#fdae61','#fee090','#e0f3f8','#abd9e9','#74add1','#4575b4')) + my.seq <- c(NA,20,22,24,26,28,30,32,NA) + + array.freq.colors <- array(freq.cols[8],c(12,7,4)) + array.freq.colors[array.freq < my.seq[[8]]] <- freq.cols[7] + array.freq.colors[array.freq < my.seq[[7]]] <- freq.cols[6] + array.freq.colors[array.freq < my.seq[[6]]] <- freq.cols[5] + array.freq.colors[array.freq < my.seq[[5]]] <- freq.cols[4] + array.freq.colors[array.freq < my.seq[[4]]] <- freq.cols[3] + array.freq.colors[array.freq < my.seq[[3]]] <- freq.cols[2] + array.freq.colors[array.freq < my.seq[[2]]] <- freq.cols[1] + + par(mar=c(5,4,4,4.5)) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Forecast time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + title("Mean S4 frequency (%)", cex=1.5) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + + for(p in 1:12){ + for(l in 0:6){ + polygon(c(0.5+p-1, 0.5+p-1, 1+p-1), c(-0.5+l, 0.5+l, 0+l), col=array.freq.colors[p,1+l,1]) # first triangle + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(-0.5+l, 0+l, -0.5+l), col=array.freq.colors[p,1+l,2]) + polygon(c(1+p-1, 1.5+p-1, 1.5+p-1), c(l, 0.5+l, -0.5+l), col=array.freq.colors[p,1+l,3]) + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(0.5+l, 0+l, 0.5+l), col=array.freq.colors[p,1+l,4]) + } + } + + par(fig=c(0.875, 1, 0.14, 0.89), new=TRUE) + ColorBar2(brks = my.seq, cols = freq.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_freq_4tables.png"), width=750, height=600) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext(text="Mean S4 frequency (%)", cex=1.5) + + # NAO+ : + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.85, 0.98), new=TRUE) + mtext("NAO+", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.freq.colors[p,1+l,1])}} + + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.42, 0.5), new=TRUE) + mtext("Blocking", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.freq.colors[p,1+l,4])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.85, 0.98), new=TRUE) + mtext("NAO-", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.freq.colors[p,1+l,3])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.42, 0.5), new=TRUE) + mtext("Atlantic Ridge", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.freq.colors[p,1+l,2])}} + + par(fig=c(0.91, 1, 0.1, 0.9), new=TRUE) + ColorBar2(brks = my.seq, cols = freq.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_freq_1table.png"), width=800, height=300) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext(text="Mean S4 frequency (%)", cex=1.2) + + par(mar=c(0,0,4,0), fig=c(0.07, 0.22, 0.8, 0.98), new=TRUE) + mtext("NAO+", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0, 0.22, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecast month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.freq.colors[i2,1+6-j,1])}} + + par(mar=c(0,0,4,0), fig=c(0.22, 0.44, 0.8, 0.98), new=TRUE) + mtext("NAO-", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.22, 0.44, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecasted month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.freq.colors[i2,1+6-j,3])}} + + par(mar=c(0,0,4,0), fig=c(0.44, 0.66, 0.8, 0.98), new=TRUE) + mtext("Blocking", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.44, 0.66, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecasted month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.freq.colors[i2,1+6-j,4])}} + + par(mar=c(0,0,4,0), fig=c(0.68, 0.88, 0.8, 0.98), new=TRUE) + mtext("Atlantic Ridge", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.66, 0.88, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecasted month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.freq.colors[i2,1+6-j,2])}} + + par(fig=c(0.91, 1, 0.1, 0.8), new=TRUE) + ColorBar2(brks = my.seq, cols = freq.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + + + + + + + # Delta freq. summary: + png(filename=paste0(work.dir,"/",forecast.name,"_summary_diff_freq.png"), width=550, height=400) + + # create an array similar to array.diff.freq but with colors instead of frequencies: + diff.freq.cols <- rev(c('#d73027','#f46d43','#fdae61','#fee090','#e0f3f8','#abd9e9','#74add1','#4575b4')) + my.seq <- c(-20,-10,-5,-2,0,2,5,10,20) + + array.diff.freq.colors <- array(diff.freq.cols[8],c(12,7,4)) + array.diff.freq.colors[array.diff.freq < my.seq[[8]]] <- diff.freq.cols[7] + array.diff.freq.colors[array.diff.freq 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.diff.freq.colors[i2,1+6-j,1])}} + + par(mar=c(0,0,4,0), fig=c(0.22, 0.44, 0.8, 0.98), new=TRUE) + mtext("NAO-", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.22, 0.44, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecasted month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.diff.freq.colors[i2,1+6-j,3])}} + + par(mar=c(0,0,4,0), fig=c(0.44, 0.66, 0.8, 0.98), new=TRUE) + mtext("Blocking", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.44, 0.66, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecasted month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.diff.freq.colors[i2,1+6-j,4])}} + + par(mar=c(0,0,4,0), fig=c(0.68, 0.88, 0.8, 0.98), new=TRUE) + mtext("Atlantic Ridge", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.66, 0.88, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecasted month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.diff.freq.colors[i2,1+6-j,2])}} + + par(fig=c(0.91, 1, 0.1, 0.8), new=TRUE) + ColorBar2(brks = my.seq, cols = diff.freq.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + + + + + + + + # Persistence summary: + png(filename=paste0(work.dir,"/",forecast.name,"_summary_pers.png"), width=550, height=400) + + # create an array similar to array.pers but with colors instead of frequencies: + pers.cols <- rev(c('#b2182b','#d6604d','#f4a582','#fddbc7','#d1e5f0','#92c5de','#4393c3','#2166ac')) + my.seq <- c(NA, 3.25, 3.5, 3.75, 4, 4.25, 4.5, 4.75, NA) + + array.pers.colors <- array(pers.cols[8],c(12,7,4)) + array.pers.colors[array.pers < my.seq[8]] <- pers.cols[7] + array.pers.colors[array.pers < my.seq[7]] <- pers.cols[6] + array.pers.colors[array.pers < my.seq[6]] <- pers.cols[5] + array.pers.colors[array.pers < my.seq[5]] <- pers.cols[4] + array.pers.colors[array.pers < my.seq[4]] <- pers.cols[3] + array.pers.colors[array.pers < my.seq[3]] <- pers.cols[2] + array.pers.colors[array.pers < my.seq[2]] <- pers.cols[1] + + par(mar=c(5,4,4,4.5)) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Forecast time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + title("S4 Regime persistence (in days/month)", cex=1.5) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + + for(p in 1:12){ + for(l in 0:6){ + polygon(c(0.5+p-1,0.5+p-1,1+p-1),c(-0.5+l,0.5+l,0+l), col=array.pers.colors[p,1+l,1]) # first triangle + polygon(c(0.5+p-1,1+p-1,1.5+p-1),c(-0.5+l,0+l,-0.5+l), col=array.pers.colors[p,1+l,2]) + polygon(c(1+p-1,1.5+p-1,1.5+p-1),c(l,0.5+l,-0.5+l), col=array.pers.colors[p,1+l,3]) + polygon(c(0.5+p-1,1+p-1,1.5+p-1),c(0.5+l,0+l,0.5+l), col=array.pers.colors[p,1+l,4]) + } + } + + par(fig=c(0.875, 1, 0.14, 0.89), new=TRUE) + ColorBar2(brks = my.seq, cols = pers.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_pers_4tables.png"), width=750, height=600) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("S4 Regime persistence (in days/month)", cex=1.5) + + # NAO+ : + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.85, 0.98), new=TRUE) + mtext("NAO+", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.pers.colors[p,1+l,1])}} + + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.42, 0.5), new=TRUE) + mtext("Blocking", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.pers.colors[p,1+l,4])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.85, 0.98), new=TRUE) + mtext("NAO-", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.pers.colors[p,1+l,3])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.42, 0.5), new=TRUE) + mtext("Atlantic Ridge", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.pers.colors[p,1+l,2])}} + + par(fig=c(0.91, 1, 0.1, 0.9), new=TRUE) + ColorBar2(brks = my.seq, cols = pers.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_pers_1table.png"), width=800, height=300) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("S4 Regime persistence (in days/month)", cex=1.2) + + par(mar=c(0,0,4,0), fig=c(0.07, 0.22, 0.8, 0.98), new=TRUE) + mtext("NAO+", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0, 0.22, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecast month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.pers.colors[i2,1+6-j,1])}} + + par(mar=c(0,0,4,0), fig=c(0.22, 0.44, 0.8, 0.98), new=TRUE) + mtext("NAO-", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.22, 0.44, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecasted month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.pers.colors[i2,1+6-j,3])}} + + par(mar=c(0,0,4,0), fig=c(0.44, 0.66, 0.8, 0.98), new=TRUE) + mtext("Blocking", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.44, 0.66, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecasted month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.pers.colors[i2,1+6-j,4])}} + + par(mar=c(0,0,4,0), fig=c(0.68, 0.88, 0.8, 0.98), new=TRUE) + mtext("Atlantic Ridge", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.66, 0.88, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecasted month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.pers.colors[i2,1+6-j,2])}} + + par(fig=c(0.91, 1, 0.1, 0.8), new=TRUE) + ColorBar2(brks = my.seq, cols = pers.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + + + + + + + + # Delta persistence summary: + png(filename=paste0(work.dir,"/",forecast.name,"_summary_diff_pers.png"), width=550, height=400) + + # create an array similar to array.pers but with colors instead of frequencies: + diff.pers.cols <- rev(c('#b2182b','#d6604d','#f4a582','#fddbc7','#d1e5f0','#92c5de','#4393c3','#2166ac')) + my.seq <- c(NA, -1.5, -1, -0.5, 0, 0.5, 1, 1.5, NA) + + array.diff.pers.colors <- array(diff.pers.cols[8],c(12,7,4)) + array.diff.pers.colors[array.diff.pers < my.seq[8]] <- diff.pers.cols[7] + array.diff.pers.colors[array.diff.pers < my.seq[7]] <- diff.pers.cols[6] + array.diff.pers.colors[array.diff.pers < my.seq[6]] <- diff.pers.cols[5] + array.diff.pers.colors[array.diff.pers < my.seq[5]] <- diff.pers.cols[4] + array.diff.pers.colors[array.diff.pers < my.seq[4]] <- diff.pers.cols[3] + array.diff.pers.colors[array.diff.pers < my.seq[3]] <- diff.pers.cols[2] + array.diff.pers.colors[array.diff.pers < my.seq[2]] <- diff.pers.cols[1] + + par(mar=c(5,4,4,4.5)) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Forecast time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + title("Diff. between S4 and ERA-Interim regime persistence (in days/month)", cex=1.5) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + + for(p in 1:12){ + for(l in 0:6){ + polygon(c(0.5+p-1,0.5+p-1,1+p-1),c(-0.5+l,0.5+l,0+l), col=array.diff.pers.colors[p,1+l,1]) # first triangle + polygon(c(0.5+p-1,1+p-1,1.5+p-1),c(-0.5+l,0+l,-0.5+l), col=array.diff.pers.colors[p,1+l,2]) + polygon(c(1+p-1,1.5+p-1,1.5+p-1),c(l,0.5+l,-0.5+l), col=array.diff.pers.colors[p,1+l,3]) + polygon(c(0.5+p-1,1+p-1,1.5+p-1),c(0.5+l,0+l,0.5+l), col=array.diff.pers.colors[p,1+l,4]) + } + } + + par(fig=c(0.875, 1, 0.14, 0.89), new=TRUE) + ColorBar2(brks = my.seq, cols = diff.pers.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_diff_pers_4tables.png"), width=750, height=600) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("Diff. between S4 and ERA-Interim regime persistence (in days/month)", cex=1.5) + + # NAO+ : + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.85, 0.98), new=TRUE) + mtext("NAO+", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.diff.pers.colors[p,1+l,1])}} + + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.42, 0.5), new=TRUE) + mtext("Blocking", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.diff.pers.colors[p,1+l,4])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.85, 0.98), new=TRUE) + mtext("NAO-", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.diff.pers.colors[p,1+l,3])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.42, 0.5), new=TRUE) + mtext("Atlantic Ridge", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.diff.pers.colors[p,1+l,2])}} + + par(fig=c(0.91, 1, 0.1, 0.9), new=TRUE) + ColorBar2(brks = my.seq, cols = diff.pers.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_diff_pers_1table.png"), width=800, height=300) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("Diff. between S4 and ERA-Interim regime persistence (in days/month)", cex=1.2) + + par(mar=c(0,0,4,0), fig=c(0.07, 0.22, 0.8, 0.98), new=TRUE) + mtext("NAO+", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0, 0.22, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecast month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.diff.pers.colors[i2,1+6-j,1])}} + + par(mar=c(0,0,4,0), fig=c(0.22, 0.44, 0.8, 0.98), new=TRUE) + mtext("NAO-", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.22, 0.44, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecasted month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.diff.pers.colors[i2,1+6-j,3])}} + + par(mar=c(0,0,4,0), fig=c(0.44, 0.66, 0.8, 0.98), new=TRUE) + mtext("Blocking", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.44, 0.66, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecasted month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.diff.pers.colors[i2,1+6-j,4])}} + + par(mar=c(0,0,4,0), fig=c(0.68, 0.88, 0.8, 0.98), new=TRUE) + mtext("Atlantic Ridge", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.66, 0.88, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecasted month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.diff.pers.colors[i2,1+6-j,2])}} + + par(fig=c(0.91, 1, 0.1, 0.8), new=TRUE) + ColorBar2(brks = my.seq, cols = diff.pers.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + + + + + + + + + # St.Dev.freq ratio summary: + png(filename=paste0(work.dir,"/",forecast.name,"_summary_ratio_sd_freq.png"), width=550, height=400) + + # create an array similar to array.cor but with colors instead of corr.values: + diff.sd.freq.cols <- rev(c('#b2182b','#d6604d','#f4a582','#fddbc7','#d1e5f0','#92c5de','#4393c3','#2166ac')) + my.seq <- c(0,0.7,0.8,0.9,1.0,1.1,1.2,1.3,2) + + array.diff.sd.freq.colors <- array(diff.sd.freq.cols[8],c(12,7,4)) + array.diff.sd.freq.colors[array.diff.sd.freq < my.seq[8]] <- diff.sd.freq.cols[7] + array.diff.sd.freq.colors[array.diff.sd.freq < my.seq[7]] <- diff.sd.freq.cols[6] + array.diff.sd.freq.colors[array.diff.sd.freq < my.seq[6]] <- diff.sd.freq.cols[5] + array.diff.sd.freq.colors[array.diff.sd.freq < my.seq[5]] <- diff.sd.freq.cols[4] + array.diff.sd.freq.colors[array.diff.sd.freq < my.seq[4]] <- diff.sd.freq.cols[3] + array.diff.sd.freq.colors[array.diff.sd.freq < my.seq[3]] <- diff.sd.freq.cols[2] + array.diff.sd.freq.colors[array.diff.sd.freq < my.seq[2]] <- diff.sd.freq.cols[1] + + par(mar=c(5,4,4,4.5)) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Forecast time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + title("St.Dev. ratio between sim. and obs. average frequencies", cex=1.5) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + + for(p in 1:12){ + for(l in 0:6){ + polygon(c(0.5+p-1, 0.5+p-1, 1+p-1), c(-0.5+l, 0.5+l, 0+l), col=array.diff.sd.freq.colors[p,1+l,1]) # first triangle + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(-0.5+l, 0+l, -0.5+l), col=array.diff.sd.freq.colors[p,1+l,2]) + polygon(c(1+p-1, 1.5+p-1, 1.5+p-1), c(l, 0.5+l, -0.5+l), col=array.diff.sd.freq.colors[p,1+l,3]) + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(0.5+l, 0+l, 0.5+l), col=array.diff.sd.freq.colors[p,1+l,4]) + } + } + + par(fig=c(0.875, 1, 0.14, 0.89), new=TRUE) + ColorBar2(brks = my.seq, cols = diff.sd.freq.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + dev.off() + + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_ratio_sd_freq_4tables.png"), width=750, height=600) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("St.Dev. ratio between sim. and obs. average frequencies", cex=1.5) + + # NAO+ : + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.85, 0.98), new=TRUE) + mtext("NAO+", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.diff.sd.freq.colors[p,1+l,1])}} + + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.42, 0.5), new=TRUE) + mtext("Blocking", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.diff.sd.freq.colors[p,1+l,4])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.85, 0.98), new=TRUE) + mtext("NAO-", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.diff.sd.freq.colors[p,1+l,3])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.42, 0.5), new=TRUE) + mtext("Atlantic Ridge", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.diff.sd.freq.colors[p,1+l,2])}} + + par(fig=c(0.91, 1, 0.1, 0.9), new=TRUE) + ColorBar2(brks = my.seq, cols = diff.sd.freq.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_ratio_sd_freq_1table.png"), width=800, height=300) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("St.Dev. ratio between sim. and obs. average frequencies", cex=1.2) + + par(mar=c(0,0,4,0), fig=c(0.07, 0.22, 0.8, 0.98), new=TRUE) + mtext("NAO+", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0, 0.22, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecast month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.diff.sd.freq.colors[i2,1+6-j,1])}} + + par(mar=c(0,0,4,0), fig=c(0.22, 0.44, 0.8, 0.98), new=TRUE) + mtext("NAO-", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.22, 0.44, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecasted month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.diff.sd.freq.colors[i2,1+6-j,3])}} + + par(mar=c(0,0,4,0), fig=c(0.44, 0.66, 0.8, 0.98), new=TRUE) + mtext("Blocking", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.44, 0.66, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecasted month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.diff.sd.freq.colors[i2,1+6-j,4])}} + + par(mar=c(0,0,4,0), fig=c(0.68, 0.88, 0.8, 0.98), new=TRUE) + mtext("Atlantic Ridge", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.66, 0.88, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecasted month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.diff.sd.freq.colors[i2,1+6-j,2])}} + + par(fig=c(0.91, 1, 0.1, 0.8), new=TRUE) + ColorBar2(brks = my.seq, cols = diff.sd.freq.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + + + + + + + + + + + + + # Impact summary: + png(filename=paste0(work.dir,"/",forecast.name,"_summary_impact_",var.name[var.num],".png"), width=550, height=400) + + # create an array similar to array.pers but with colors instead of frequencies: + imp.cols <- rev(c('#b2182b','#d6604d','#f4a582','#fddbc7','#d1e5f0','#92c5de','#4393c3','#2166ac')) + my.seq <- c(NA, 3.25, 3.5, 3.75, 4, 4.25, 4.5, 4.75, NA) + + array.imp.colors <- array(imp.cols[8],c(12,7,4)) + array.imp.colors[array.imp < my.seq[8]] <- imp.cols[7] + array.imp.colors[array.imp < my.seq[7]] <- imp.cols[6] + array.imp.colors[array.imp < my.seq[6]] <- imp.cols[5] + array.imp.colors[array.imp < my.seq[5]] <- imp.cols[4] + array.imp.colors[array.imp < my.seq[4]] <- imp.cols[3] + array.imp.colors[array.imp < my.seq[3]] <- imp.cols[2] + array.imp.colors[array.imp < my.seq[2]] <- imp.cols[1] + + par(mar=c(5,4,4,4.5)) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Forecast time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + title("S4 spatial correlation of impact on ",var.name[var.num], cex=1.5) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + + for(p in 1:12){ + for(l in 0:6){ + polygon(c(0.5+p-1,0.5+p-1,1+p-1),c(-0.5+l,0.5+l,0+l), col=array.imp.colors[p,1+l,1]) # first triangle + polygon(c(0.5+p-1,1+p-1,1.5+p-1),c(-0.5+l,0+l,-0.5+l), col=array.imp.colors[p,1+l,2]) + polygon(c(1+p-1,1.5+p-1,1.5+p-1),c(l,0.5+l,-0.5+l), col=array.imp.colors[p,1+l,3]) + polygon(c(0.5+p-1,1+p-1,1.5+p-1),c(0.5+l,0+l,0.5+l), col=array.imp.colors[p,1+l,4]) + } + } + + par(fig=c(0.875, 1, 0.14, 0.89), new=TRUE) + ColorBar2(brks = my.seq, cols = imp.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_impact_",var.name[var.num],"_4tables.png"), width=750, height=600) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("S4 spatial correlation of impact on ",var.name[var.num], cex=1.5) + + # NAO+ : + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.85, 0.98), new=TRUE) + mtext("NAO+", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.imp.colors[p,1+l,1])}} + + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.42, 0.5), new=TRUE) + mtext("Blocking", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.imp.colors[p,1+l,4])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.85, 0.98), new=TRUE) + mtext("NAO-", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.imp.colors[p,1+l,3])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.42, 0.5), new=TRUE) + mtext("Atlantic Ridge", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.imp.colors[p,1+l,2])}} + + par(fig=c(0.91, 1, 0.1, 0.9), new=TRUE) + ColorBar2(brks = my.seq, cols = imp.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_impact_",var.name[var.num],"_1table.png"), width=800, height=300) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("S4 spatial correlation of impact on ",var.name[var.num], cex=1.2) + + par(mar=c(0,0,4,0), fig=c(0.07, 0.22, 0.8, 0.98), new=TRUE) + mtext("NAO+", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0, 0.22, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecast month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.imp.colors[i2,1+6-j,1])}} + + par(mar=c(0,0,4,0), fig=c(0.22, 0.44, 0.8, 0.98), new=TRUE) + mtext("NAO-", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.22, 0.44, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecasted month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.imp.colors[i2,1+6-j,3])}} + + par(mar=c(0,0,4,0), fig=c(0.44, 0.66, 0.8, 0.98), new=TRUE) + mtext("Blocking", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.44, 0.66, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecasted month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.imp.colors[i2,1+6-j,4])}} + + par(mar=c(0,0,4,0), fig=c(0.68, 0.88, 0.8, 0.98), new=TRUE) + mtext("Atlantic Ridge", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.66, 0.88, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecasted month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.imp.colors[i2,1+6-j,2])}} + + par(fig=c(0.91, 1, 0.1, 0.8), new=TRUE) + ColorBar2(brks = my.seq, cols = pers.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + + + + + + + + + + + # Transition probability from NAO+ to X summary: + png(filename=paste0(work.dir,"/",forecast.name,"_summary_trans_NAO+.png"), width=550, height=400) + + # create an array similar to array.cor but with colors instead of corr.values: + trans.cols <- rev(c('#b2182b','#d6604d','#f4a582','#fddbc7','#d1e5f0','#92c5de','#4393c3','#2166ac')) + my.seq <- c(0,10,20,30,40,50,60,70,100) + + array.trans.sim1.colors <- array(trans.cols[8],c(12,7,4)) + array.trans.sim1.colors[array.trans.sim1 < my.seq[8]] <- trans.cols[7] + array.trans.sim1.colors[array.trans.sim1 < my.seq[7]] <- trans.cols[6] + array.trans.sim1.colors[array.trans.sim1 < my.seq[6]] <- trans.cols[5] + array.trans.sim1.colors[array.trans.sim1 < my.seq[5]] <- trans.cols[4] + array.trans.sim1.colors[array.trans.sim1 < my.seq[4]] <- trans.cols[3] + array.trans.sim1.colors[array.trans.sim1 < my.seq[3]] <- trans.cols[2] + array.trans.sim1.colors[array.trans.sim1 < my.seq[2]] <- trans.cols[1] + array.trans.sim1.colors[is.na(array.trans.sim1)] <- "white" + + par(mar=c(5,4,4,4.5)) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Forecast time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + title("% Transition from NAO+ regime", cex=1.5) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + + for(p in 1:12){ + for(l in 0:6){ + polygon(c(0.5+p-1, 0.5+p-1, 1+p-1), c(-0.5+l, 0.5+l, 0+l), col=array.trans.sim1.colors[p,1+l,1]) # first triangle + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(-0.5+l, 0+l, -0.5+l), col=array.trans.sim1.colors[p,1+l,2]) + polygon(c(1+p-1, 1.5+p-1, 1.5+p-1), c(l, 0.5+l, -0.5+l), col=array.trans.sim1.colors[p,1+l,3]) + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(0.5+l, 0+l, 0.5+l), col=array.trans.sim1.colors[p,1+l,4]) + } + } + + par(fig=c(0.875, 1, 0.14, 0.89), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_trans_NAO+_4tables.png"), width=750, height=600) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("% Transition from NAO+ regime", cex=1.5) + + # NAO+ : + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.85, 0.98), new=TRUE) + mtext("NAO+", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.sim1.colors[p,1+l,1])}} + + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.42, 0.5), new=TRUE) + mtext("Blocking", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.sim1.colors[p,1+l,4])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.85, 0.98), new=TRUE) + mtext("NAO-", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.sim1.colors[p,1+l,3])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.42, 0.5), new=TRUE) + mtext("Atlantic Ridge", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.sim1.colors[p,1+l,2])}} + + par(fig=c(0.91, 1, 0.1, 0.9), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_trans_NAO+_1table.png"), width=800, height=300) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("% Transition from NAO+ regime", cex=1.2) + + par(mar=c(0,0,4,0), fig=c(0.07, 0.22, 0.8, 0.98), new=TRUE) + mtext("NAO+", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0, 0.22, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecast month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.sim1.colors[i2,1+6-j,1])}} + + par(mar=c(0,0,4,0), fig=c(0.22, 0.44, 0.8, 0.98), new=TRUE) + mtext("NAO-", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.22, 0.44, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecasted month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.sim1.colors[i2,1+6-j,3])}} + + par(mar=c(0,0,4,0), fig=c(0.44, 0.66, 0.8, 0.98), new=TRUE) + mtext("Blocking", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.44, 0.66, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecasted month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.sim1.colors[i2,1+6-j,4])}} + + par(mar=c(0,0,4,0), fig=c(0.68, 0.88, 0.8, 0.98), new=TRUE) + mtext("Atlantic Ridge", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.66, 0.88, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecasted month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.sim1.colors[i2,1+6-j,2])}} + + par(fig=c(0.91, 1, 0.1, 0.8), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_trans_NAO+_1table.png"), width=600, height=300) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("% Transition from NAO+ regime", cex=1.2) + + par(mar=c(0,0,4,0), fig=c(0.10, 0.28, 0.8, 0.98), new=TRUE) + mtext("NAO-", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0, 0.28, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecasted month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.sim1.colors[i2,1+6-j,3])}} + + par(mar=c(0,0,4,0), fig=c(0.28, 0.56, 0.8, 0.98), new=TRUE) + mtext("Blocking", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.28, 0.56, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecasted month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.sim1.colors[i2,1+6-j,4])}} + + par(mar=c(0,0,4,0), fig=c(0.56, 0.84, 0.8, 0.98), new=TRUE) + mtext("Atlantic Ridge", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.56, 0.84, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecasted month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.sim1.colors[i2,1+6-j,2])}} + + par(fig=c(0.88, 1, 0.1, 0.8), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + + + + + + + + # Transition probability from NAO- to X summary: + png(filename=paste0(work.dir,"/",forecast.name,"_summary_trans_NAO-.png"), width=550, height=400) + + # create an array similar to array.cor but with colors instead of corr.values: + trans.cols <- rev(c('#b2182b','#d6604d','#f4a582','#fddbc7','#d1e5f0','#92c5de','#4393c3','#2166ac')) + my.seq <- c(0,10,20,30,40,50,60,70,100) + + array.trans.sim2.colors <- array(trans.cols[8],c(12,7,4)) + array.trans.sim2.colors[array.trans.sim2 < my.seq[8]] <- trans.cols[7] + array.trans.sim2.colors[array.trans.sim2 < my.seq[7]] <- trans.cols[6] + array.trans.sim2.colors[array.trans.sim2 < my.seq[6]] <- trans.cols[5] + array.trans.sim2.colors[array.trans.sim2 < my.seq[5]] <- trans.cols[4] + array.trans.sim2.colors[array.trans.sim2 < my.seq[4]] <- trans.cols[3] + array.trans.sim2.colors[array.trans.sim2 < my.seq[3]] <- trans.cols[2] + array.trans.sim2.colors[array.trans.sim2 < my.seq[2]] <- trans.cols[1] + array.trans.sim2.colors[is.na(array.trans.sim2)] <- "white" + + par(mar=c(5,4,4,4.5)) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Forecast time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + title("% Transition from NAO- regime ", cex=1.5) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + + for(p in 1:12){ + for(l in 0:6){ + polygon(c(0.5+p-1, 0.5+p-1, 1+p-1), c(-0.5+l, 0.5+l, 0+l), col=array.trans.sim2.colors[p,1+l,1]) # first triangle + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(-0.5+l, 0+l, -0.5+l), col=array.trans.sim2.colors[p,1+l,2]) + polygon(c(1+p-1, 1.5+p-1, 1.5+p-1), c(l, 0.5+l, -0.5+l), col=array.trans.sim2.colors[p,1+l,3]) + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(0.5+l, 0+l, 0.5+l), col=array.trans.sim2.colors[p,1+l,4]) + } + } + + par(fig=c(0.875, 1, 0.14, 0.89), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_trans_NAO-_4tables.png"), width=750, height=600) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("% Transition from NAO- regime", cex=1.5) + + # NAO+ : + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.85, 0.98), new=TRUE) + mtext("NAO+", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.sim2.colors[p,1+l,1])}} + + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.42, 0.5), new=TRUE) + mtext("Blocking", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.sim2.colors[p,1+l,4])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.85, 0.98), new=TRUE) + mtext("NAO-", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.sim2.colors[p,1+l,3])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.42, 0.5), new=TRUE) + mtext("Atlantic Ridge", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.sim2.colors[p,1+l,2])}} + + par(fig=c(0.91, 1, 0.1, 0.9), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_trans_NAO-_1table.png"), width=600, height=300) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("% Transition from NAO- regime", cex=1.2) + + par(mar=c(0,0,4,0), fig=c(0.1, 0.28, 0.8, 0.98), new=TRUE) + mtext("NAO+", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0, 0.28, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecast month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.sim2.colors[i2,1+6-j,1])}} + + par(mar=c(0,0,4,0), fig=c(0.28, 0.56, 0.8, 0.98), new=TRUE) + mtext("Blocking", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.28, 0.56, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecasted month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.sim2.colors[i2,1+6-j,4])}} + + par(mar=c(0,0,4,0), fig=c(0.56, 0.84, 0.8, 0.98), new=TRUE) + mtext("Atlantic Ridge", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.56, 0.84, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecasted month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.sim2.colors[i2,1+6-j,2])}} + + par(fig=c(0.88, 1, 0.1, 0.8), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + + + + + + + + # Transition probability from blocking to X summary: + png(filename=paste0(work.dir,"/",forecast.name,"_summary_trans_blocking.png"), width=550, height=400) + + # create an array similar to array.cor but with colors instead of corr.values: + trans.cols <- rev(c('#b2182b','#d6604d','#f4a582','#fddbc7','#d1e5f0','#92c5de','#4393c3','#2166ac')) + my.seq <- c(0,10,20,30,40,50,60,70,100) + + array.trans.sim3.colors <- array(trans.cols[8],c(12,7,4)) + array.trans.sim3.colors[array.trans.sim3 < my.seq[8]] <- trans.cols[7] + array.trans.sim3.colors[array.trans.sim3 < my.seq[7]] <- trans.cols[6] + array.trans.sim3.colors[array.trans.sim3 < my.seq[6]] <- trans.cols[5] + array.trans.sim3.colors[array.trans.sim3 < my.seq[5]] <- trans.cols[4] + array.trans.sim3.colors[array.trans.sim3 < my.seq[4]] <- trans.cols[3] + array.trans.sim3.colors[array.trans.sim3 < my.seq[3]] <- trans.cols[2] + array.trans.sim3.colors[array.trans.sim3 < my.seq[2]] <- trans.cols[1] + array.trans.sim3.colors[is.na(array.trans.sim3)] <- "white" + + par(mar=c(5,4,4,4.5)) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Forecast time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + title("% Transition from blocking regime", cex=1.5) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + + for(p in 1:12){ + for(l in 0:6){ + polygon(c(0.5+p-1, 0.5+p-1, 1+p-1), c(-0.5+l, 0.5+l, 0+l), col=array.trans.sim3.colors[p,1+l,1]) # first triangle + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(-0.5+l, 0+l, -0.5+l), col=array.trans.sim3.colors[p,1+l,2]) + polygon(c(1+p-1, 1.5+p-1, 1.5+p-1), c(l, 0.5+l, -0.5+l), col=array.trans.sim3.colors[p,1+l,3]) + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(0.5+l, 0+l, 0.5+l), col=array.trans.sim3.colors[p,1+l,4]) + } + } + + par(fig=c(0.875, 1, 0.14, 0.89), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_trans_blocking_4tables.png"), width=750, height=600) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("% Transition from blocking regime", cex=1.5) + + # NAO+ : + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.85, 0.98), new=TRUE) + mtext("NAO+", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.sim3.colors[p,1+l,1])}} + + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.42, 0.5), new=TRUE) + mtext("Blocking", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.sim3.colors[p,1+l,4])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.85, 0.98), new=TRUE) + mtext("NAO-", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.sim3.colors[p,1+l,3])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.42, 0.5), new=TRUE) + mtext("Atlantic Ridge", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.sim3.colors[p,1+l,2])}} + + par(fig=c(0.91, 1, 0.1, 0.9), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_trans_blocking_1table.png"), width=600, height=300) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("% Transition from blocking regime", cex=1.2) + + par(mar=c(0,0,4,0), fig=c(0.10, 0.28, 0.8, 0.98), new=TRUE) + mtext("NAO+", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0, 0.28, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecast month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.sim3.colors[i2,1+6-j,1])}} + + par(mar=c(0,0,4,0), fig=c(0.28, 0.56, 0.8, 0.98), new=TRUE) + mtext("NAO-", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.28, 0.56, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecasted month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.sim3.colors[i2,1+6-j,3])}} + + par(mar=c(0,0,4,0), fig=c(0.56, 0.84, 0.8, 0.98), new=TRUE) + mtext("Atlantic Ridge", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.56, 0.84, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecasted month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.sim3.colors[i2,1+6-j,2])}} + + par(fig=c(0.88, 1, 0.1, 0.8), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + + + + + + + + + + + # Transition probability from Atlantic ridge to X summary: + png(filename=paste0(work.dir,"/",forecast.name,"_summary_trans_Atlantic_ridge.png"), width=550, height=400) + + # create an array similar to array.cor but with colors instead of corr.values: + trans.cols <- rev(c('#b2182b','#d6604d','#f4a582','#fddbc7','#d1e5f0','#92c5de','#4393c3','#2166ac')) + my.seq <- c(0,10,20,30,40,50,60,70,100) + + array.trans.sim4.colors <- array(trans.cols[8],c(12,7,4)) + array.trans.sim4.colors[array.trans.sim4 < my.seq[8]] <- trans.cols[7] + array.trans.sim4.colors[array.trans.sim4 < my.seq[7]] <- trans.cols[6] + array.trans.sim4.colors[array.trans.sim4 < my.seq[6]] <- trans.cols[5] + array.trans.sim4.colors[array.trans.sim4 < my.seq[5]] <- trans.cols[4] + array.trans.sim4.colors[array.trans.sim4 < my.seq[4]] <- trans.cols[3] + array.trans.sim4.colors[array.trans.sim4 < my.seq[3]] <- trans.cols[2] + array.trans.sim4.colors[array.trans.sim4 < my.seq[2]] <- trans.cols[1] + array.trans.sim4.colors[is.na(array.trans.sim4)] <- "white" + + par(mar=c(5,4,4,4.5)) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Forecast time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + title("% Transition from Atlantic ridge regime ", cex=1.5) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + + for(p in 1:12){ + for(l in 0:6){ + polygon(c(0.5+p-1, 0.5+p-1, 1+p-1), c(-0.5+l, 0.5+l, 0+l), col=array.trans.sim4.colors[p,1+l,1]) # first triangle + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(-0.5+l, 0+l, -0.5+l), col=array.trans.sim4.colors[p,1+l,2]) + polygon(c(1+p-1, 1.5+p-1, 1.5+p-1), c(l, 0.5+l, -0.5+l), col=array.trans.sim4.colors[p,1+l,3]) + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(0.5+l, 0+l, 0.5+l), col=array.trans.sim4.colors[p,1+l,4]) + } + } + + par(fig=c(0.875, 1, 0.14, 0.89), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_trans_Atlantic_ridge_4tables.png"), width=750, height=600) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("% Transition from Atlantic Ridge regime", cex=1.5) + + # NAO+ : + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.85, 0.98), new=TRUE) + mtext("NAO+", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.sim4.colors[p,1+l,1])}} + + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.42, 0.5), new=TRUE) + mtext("Blocking", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.sim4.colors[p,1+l,4])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.85, 0.98), new=TRUE) + mtext("NAO-", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.sim4.colors[p,1+l,3])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.42, 0.5), new=TRUE) + mtext("Atlantic Ridge", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.sim4.colors[p,1+l,2])}} + + par(fig=c(0.91, 1, 0.1, 0.9), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_trans_Atlantic_ridge_1table.png"), width=600, height=300) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("% Transition from Atlantic Ridge regime", cex=1.2) + + par(mar=c(0,0,4,0), fig=c(0.1, 0.28, 0.8, 0.98), new=TRUE) + mtext("NAO+", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0, 0.28, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecast month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.sim4.colors[i2,1+6-j,1])}} + + par(mar=c(0,0,4,0), fig=c(0.28, 0.56, 0.8, 0.98), new=TRUE) + mtext("NAO-", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.28, 0.56, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecasted month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.sim4.colors[i2,1+6-j,3])}} + + par(mar=c(0,0,4,0), fig=c(0.56, 0.84, 0.8, 0.98), new=TRUE) + mtext("Blocking", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.56, 0.84, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecasted month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.sim4.colors[i2,1+6-j,4])}} + + par(fig=c(0.88, 1, 0.1, 0.8), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + + + + + + + + # Bias of the Transition probability from NAO+ to X summary: + png(filename=paste0(work.dir,"/",forecast.name,"_summary_bias_trans_NAO+.png"), width=550, height=400) + + # create an array similar to array.cor but with colors instead of corr.values: + trans.cols <- rev(c('#b2182b','#d6604d','#f4a582','#fddbc7','#d1e5f0','#92c5de','#4393c3','#2166ac')) + my.seq <- c(-20,-10,-5,-2,0,2,5,10,20) + + array.trans.diff1.colors <- array(trans.cols[8],c(12,7,4)) + array.trans.diff1.colors[array.trans.diff1 < my.seq[8]] <- trans.cols[7] + array.trans.diff1.colors[array.trans.diff1 < my.seq[7]] <- trans.cols[6] + array.trans.diff1.colors[array.trans.diff1 < my.seq[6]] <- trans.cols[5] + array.trans.diff1.colors[array.trans.diff1 < my.seq[5]] <- trans.cols[4] + array.trans.diff1.colors[array.trans.diff1 < my.seq[4]] <- trans.cols[3] + array.trans.diff1.colors[array.trans.diff1 < my.seq[3]] <- trans.cols[2] + array.trans.diff1.colors[array.trans.diff1 < my.seq[2]] <- trans.cols[1] + array.trans.diff1.colors[is.na(array.trans.diff1)] <- "white" + + par(mar=c(5,4,4,4.5)) + plot(1,1, type="n", xaxt="n", yaxt="n", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + title("% Bias transition from NAO+ regime", cex=1.5) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + + for(p in 1:12){ + for(l in 0:6){ + polygon(c(0.5+p-1, 0.5+p-1, 1+p-1), c(-0.5+l, 0.5+l, 0+l), col=array.trans.diff1.colors[p,1+l,1]) # first triangle + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(-0.5+l, 0+l, -0.5+l), col=array.trans.diff1.colors[p,1+l,2]) + polygon(c(1+p-1, 1.5+p-1, 1.5+p-1), c(l, 0.5+l, -0.5+l), col=array.trans.diff1.colors[p,1+l,3]) + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(0.5+l, 0+l, 0.5+l), col=array.trans.diff1.colors[p,1+l,4]) + } + } + + par(fig=c(0.875, 1, 0.14, 0.89), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_bias_trans_NAO+_4tables.png"), width=750, height=600) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("% Bias transition from NAO+ regime", cex=1.5) + + # NAO+ : + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.85, 0.98), new=TRUE) + mtext("NAO+", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.diff1.colors[p,1+l,1])}} + + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.42, 0.5), new=TRUE) + mtext("Blocking", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.diff1.colors[p,1+l,4])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.85, 0.98), new=TRUE) + mtext("NAO-", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.diff1.colors[p,1+l,3])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.42, 0.5), new=TRUE) + mtext("Atlantic Ridge", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.diff1.colors[p,1+l,2])}} + + par(fig=c(0.91, 1, 0.1, 0.9), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_bias_trans_NAO+_1table.png"), width=800, height=300) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("% Bias transition from NAO+ regime", cex=1.2) + + par(mar=c(0,0,4,0), fig=c(0.07, 0.22, 0.8, 0.98), new=TRUE) + mtext("NAO+", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0, 0.22, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecast month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.diff1.colors[i2,1+6-j,1])}} + + par(mar=c(0,0,4,0), fig=c(0.22, 0.44, 0.8, 0.98), new=TRUE) + mtext("NAO-", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.22, 0.44, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecasted month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.diff1.colors[i2,1+6-j,3])}} + + par(mar=c(0,0,4,0), fig=c(0.44, 0.66, 0.8, 0.98), new=TRUE) + mtext("Blocking", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.44, 0.66, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecasted month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.diff1.colors[i2,1+6-j,4])}} + + par(mar=c(0,0,4,0), fig=c(0.68, 0.88, 0.8, 0.98), new=TRUE) + mtext("Atlantic Ridge", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.66, 0.88, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecasted month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.diff1.colors[i2,1+6-j,2])}} + + par(fig=c(0.91, 1, 0.1, 0.8), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + + + + + + + + + + + + + + + # Bias of the Transition probability from NAO- to X summary: + png(filename=paste0(work.dir,"/",forecast.name,"_summary_bias_trans_NAO-.png"), width=550, height=400) + + # create an array similar to array.cor but with colors instead of corr.values: + trans.cols <- rev(c('#b2182b','#d6604d','#f4a582','#fddbc7','#d1e5f0','#92c5de','#4393c3','#2166ac')) + my.seq <- c(-20,-10,-5,-2,0,2,5,10,20) + + array.trans.diff2.colors <- array(trans.cols[8],c(12,7,4)) + array.trans.diff2.colors[array.trans.diff2 < my.seq[8]] <- trans.cols[7] + array.trans.diff2.colors[array.trans.diff2 < my.seq[7]] <- trans.cols[6] + array.trans.diff2.colors[array.trans.diff2 < my.seq[6]] <- trans.cols[5] + array.trans.diff2.colors[array.trans.diff2 < my.seq[5]] <- trans.cols[4] + array.trans.diff2.colors[array.trans.diff2 < my.seq[4]] <- trans.cols[3] + array.trans.diff2.colors[array.trans.diff2 < my.seq[3]] <- trans.cols[2] + array.trans.diff2.colors[array.trans.diff2 < my.seq[2]] <- trans.cols[1] + array.trans.diff2.colors[is.na(array.trans.diff2)] <- "white" + + par(mar=c(5,4,4,4.5)) + plot(1,1, type="n", xaxt="n", yaxt="n", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + title("% Bias transition from NAO- regime", cex=1.5) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + + for(p in 1:12){ + for(l in 0:6){ + polygon(c(0.5+p-1, 0.5+p-1, 1+p-1), c(-0.5+l, 0.5+l, 0+l), col=array.trans.diff2.colors[p,1+l,1]) # first triangle + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(-0.5+l, 0+l, -0.5+l), col=array.trans.diff2.colors[p,1+l,2]) + polygon(c(1+p-1, 1.5+p-1, 1.5+p-1), c(l, 0.5+l, -0.5+l), col=array.trans.diff2.colors[p,1+l,3]) + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(0.5+l, 0+l, 0.5+l), col=array.trans.diff2.colors[p,1+l,4]) + } + } + + par(fig=c(0.875, 1, 0.14, 0.89), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_bias_trans_NAO-_4tables.png"), width=750, height=600) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("% Bias transition from NAO- regime", cex=1.5) + + # NAO+ : + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.85, 0.98), new=TRUE) + mtext("NAO+", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.diff2.colors[p,1+l,1])}} + + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.42, 0.5), new=TRUE) + mtext("Blocking", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.diff2.colors[p,1+l,4])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.85, 0.98), new=TRUE) + mtext("NAO-", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.diff2.colors[p,1+l,3])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.42, 0.5), new=TRUE) + mtext("Atlantic Ridge", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.diff2.colors[p,1+l,2])}} + + par(fig=c(0.91, 1, 0.1, 0.9), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_bias_trans_NAO-_1table.png"), width=800, height=300) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("% Bias transition from NAO- regime", cex=1.2) + + par(mar=c(0,0,4,0), fig=c(0.07, 0.22, 0.8, 0.98), new=TRUE) + mtext("NAO+", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0, 0.22, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecast month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.diff2.colors[i2,1+6-j,1])}} + + par(mar=c(0,0,4,0), fig=c(0.22, 0.44, 0.8, 0.98), new=TRUE) + mtext("NAO-", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.22, 0.44, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecasted month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.diff2.colors[i2,1+6-j,3])}} + + par(mar=c(0,0,4,0), fig=c(0.44, 0.66, 0.8, 0.98), new=TRUE) + mtext("Blocking", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.44, 0.66, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecasted month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.diff2.colors[i2,1+6-j,4])}} + + par(mar=c(0,0,4,0), fig=c(0.68, 0.88, 0.8, 0.98), new=TRUE) + mtext("Atlantic Ridge", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.66, 0.88, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecasted month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.diff2.colors[i2,1+6-j,2])}} + + par(fig=c(0.91, 1, 0.1, 0.8), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + + + + + + + + + + + # Bias of the Transition probability from blocking to X summary: + png(filename=paste0(work.dir,"/",forecast.name,"_summary_bias_trans_blocking.png"), width=550, height=400) + + # create an array similar to array.cor but with colors instead of corr.values: + trans.cols <- rev(c('#b2182b','#d6604d','#f4a582','#fddbc7','#d1e5f0','#92c5de','#4393c3','#2166ac')) + my.seq <- c(-20,-10,-5,-2,0,2,5,10,20) + + array.trans.diff3.colors <- array(trans.cols[8],c(12,7,4)) + array.trans.diff3.colors[array.trans.diff3 < my.seq[8]] <- trans.cols[7] + array.trans.diff3.colors[array.trans.diff3 < my.seq[7]] <- trans.cols[6] + array.trans.diff3.colors[array.trans.diff3 < my.seq[6]] <- trans.cols[5] + array.trans.diff3.colors[array.trans.diff3 < my.seq[5]] <- trans.cols[4] + array.trans.diff3.colors[array.trans.diff3 < my.seq[4]] <- trans.cols[3] + array.trans.diff3.colors[array.trans.diff3 < my.seq[3]] <- trans.cols[2] + array.trans.diff3.colors[array.trans.diff3 < my.seq[2]] <- trans.cols[1] + array.trans.diff3.colors[is.na(array.trans.diff3)] <- "white" + + par(mar=c(5,4,4,4.5)) + plot(1,1, type="n", xaxt="n", yaxt="n", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + title("% Bias transition from blocking regime", cex=1.5) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + + for(p in 1:12){ + for(l in 0:6){ + polygon(c(0.5+p-1, 0.5+p-1, 1+p-1), c(-0.5+l, 0.5+l, 0+l), col=array.trans.diff3.colors[p,1+l,1]) # first triangle + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(-0.5+l, 0+l, -0.5+l), col=array.trans.diff3.colors[p,1+l,2]) + polygon(c(1+p-1, 1.5+p-1, 1.5+p-1), c(l, 0.5+l, -0.5+l), col=array.trans.diff3.colors[p,1+l,3]) + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(0.5+l, 0+l, 0.5+l), col=array.trans.diff3.colors[p,1+l,4]) + } + } + + par(fig=c(0.875, 1, 0.14, 0.89), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_bias_trans_blocking_4tables.png"), width=750, height=600) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("% Bias transition from blocking regime", cex=1.5) + + # NAO+ : + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.85, 0.98), new=TRUE) + mtext("NAO+", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.diff3.colors[p,1+l,1])}} + + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.42, 0.5), new=TRUE) + mtext("Blocking", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.diff3.colors[p,1+l,4])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.85, 0.98), new=TRUE) + mtext("NAO-", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.diff3.colors[p,1+l,3])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.42, 0.5), new=TRUE) + mtext("Atlantic Ridge", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.diff3.colors[p,1+l,2])}} + + par(fig=c(0.91, 1, 0.1, 0.9), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_bias_trans_blocking_1table.png"), width=800, height=300) + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("% Bias transition from Blocking regime", cex=1.2) + + par(mar=c(0,0,4,0), fig=c(0.07, 0.22, 0.8, 0.98), new=TRUE) + mtext("NAO+", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0, 0.22, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecast month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.diff3.colors[i2,1+6-j,1])}} + + par(mar=c(0,0,4,0), fig=c(0.22, 0.44, 0.8, 0.98), new=TRUE) + mtext("NAO-", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.22, 0.44, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecasted month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.diff3.colors[i2,1+6-j,3])}} + + par(mar=c(0,0,4,0), fig=c(0.44, 0.66, 0.8, 0.98), new=TRUE) + mtext("Blocking", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.44, 0.66, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecasted month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.diff3.colors[i2,1+6-j,4])}} + + par(mar=c(0,0,4,0), fig=c(0.68, 0.88, 0.8, 0.98), new=TRUE) + mtext("Atlantic Ridge", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.66, 0.88, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecasted month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.diff3.colors[i2,1+6-j,2])}} + + par(fig=c(0.91, 1, 0.1, 0.8), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + + + + + + + + + + # Bias of the Transition probability from Atlantic Ridge to X summary: + png(filename=paste0(work.dir,"/",forecast.name,"_summary_bias_trans_Atlantic_ridge.png"), width=550, height=400) + + # create an array similar to array.cor but with colors instead of corr.values: + trans.cols <- rev(c('#b2182b','#d6604d','#f4a582','#fddbc7','#d1e5f0','#92c5de','#4393c3','#2166ac')) + my.seq <- c(-20,-10,-5,-2,0,2,5,10,20) + + array.trans.diff4.colors <- array(trans.cols[8],c(12,7,4)) + array.trans.diff4.colors[array.trans.diff4 < my.seq[8]] <- trans.cols[7] + array.trans.diff4.colors[array.trans.diff4 < my.seq[7]] <- trans.cols[6] + array.trans.diff4.colors[array.trans.diff4 < my.seq[6]] <- trans.cols[5] + array.trans.diff4.colors[array.trans.diff4 < my.seq[5]] <- trans.cols[4] + array.trans.diff4.colors[array.trans.diff4 < my.seq[4]] <- trans.cols[3] + array.trans.diff4.colors[array.trans.diff4 < my.seq[3]] <- trans.cols[2] + array.trans.diff4.colors[array.trans.diff4 < my.seq[2]] <- trans.cols[1] + array.trans.diff4.colors[is.na(array.trans.diff4)] <- "white" + + par(mar=c(5,4,4,4.5)) + plot(1,1, type="n", xaxt="n", yaxt="n", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + title("% Bias transition from Atlantic Ridge regime", cex=1.5) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + + for(p in 1:12){ + for(l in 0:6){ + polygon(c(0.5+p-1, 0.5+p-1, 1+p-1), c(-0.5+l, 0.5+l, 0+l), col=array.trans.diff4.colors[p,1+l,1]) # first triangle + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(-0.5+l, 0+l, -0.5+l), col=array.trans.diff4.colors[p,1+l,2]) + polygon(c(1+p-1, 1.5+p-1, 1.5+p-1), c(l, 0.5+l, -0.5+l), col=array.trans.diff4.colors[p,1+l,3]) + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(0.5+l, 0+l, 0.5+l), col=array.trans.diff4.colors[p,1+l,4]) + } + } + + par(fig=c(0.875, 1, 0.14, 0.89), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_bias_trans_Atlantic_ridge_4tables.png"), width=750, height=600) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("% Bias transition from Atlantic_Ridge_regime", cex=1.5) + + # NAO+ : + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.85, 0.98), new=TRUE) + mtext("NAO+", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.diff4.colors[p,1+l,1])}} + + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.42, 0.5), new=TRUE) + mtext("Blocking", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.diff4.colors[p,1+l,4])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.85, 0.98), new=TRUE) + mtext("NAO-", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.diff4.colors[p,1+l,3])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.42, 0.5), new=TRUE) + mtext("Atlantic Ridge", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.diff4.colors[p,1+l,2])}} + + par(fig=c(0.91, 1, 0.1, 0.9), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_bias_trans_Atlantic_ridge_1table.png"), width=800, height=300) + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("% Bias transition from Atlantic Ridge regime", cex=1.2) + + par(mar=c(0,0,4,0), fig=c(0.07, 0.22, 0.8, 0.98), new=TRUE) + mtext("NAO+", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0, 0.22, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecast month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.diff4.colors[i2,1+6-j,1])}} + + par(mar=c(0,0,4,0), fig=c(0.22, 0.44, 0.8, 0.98), new=TRUE) + mtext("NAO-", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.22, 0.44, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecasted month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.diff4.colors[i2,1+6-j,3])}} + + par(mar=c(0,0,4,0), fig=c(0.44, 0.66, 0.8, 0.98), new=TRUE) + mtext("Blocking", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.44, 0.66, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecasted month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.diff4.colors[i2,1+6-j,4])}} + + par(mar=c(0,0,4,0), fig=c(0.68, 0.88, 0.8, 0.98), new=TRUE) + mtext("Atlantic Ridge", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.66, 0.88, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecasted month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.diff4.colors[i2,1+6-j,2])}} + + par(fig=c(0.91, 1, 0.1, 0.8), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + +diagnostic.WR <- function(text=NULL, position=c(0,1,0,1), color.array){ + +} + + ## # FairRPSS summary: + ## png(filename=paste0(work.dir,"/",forecast.name,"_summary_rpss.png"), width=550, height=400) + + ## # create an array similar to array.diff.freq but with colors instead of frequencies: + ## freq.cols <- rev(c('#b2182b','#d6604d','#f4a582','#fddbc7','#d1e5f0','#92c5de','#4393c3','#2166ac')) + + ## array.freq.colors <- array(freq.cols[8],c(12,7,4)) + ## array.freq.colors[array.diff.freq < 10] <- freq.cols[7] + ## array.freq.colors[array.diff.freq < 5] <- freq.cols[6] + ## array.freq.colors[array.diff.freq < 2] <- freq.cols[5] + ## array.freq.colors[array.diff.freq < 0] <- freq.cols[4] + ## array.freq.colors[array.diff.freq < -2] <- freq.cols[3] + ## array.freq.colors[array.diff.freq < -5] <- freq.cols[2] + ## array.freq.colors[array.diff.freq < -10] <- freq.cols[1] + + ## par(mar=c(5,4,4,4.5)) + ## plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Forecast time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + ## title("Frequency difference (%) between S4 and ERA-Interim", cex=1.5) + ## mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + ## mtext(side = 2, text = "Forecast time", line = 2.5, cex=1.5) + ## axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + ## axis(2, at=seq(0,6), las=2, cex.axis=1.5) + + ## for(p in 1:12){ + ## for(l in 0:6){ + ## polygon(c(0.5+p-1,0.5+p-1,1+p-1),c(-0.5+l,0.5+l,0+l), col=array.freq.colors[p,1+l,1]) # first triangle + ## polygon(c(0.5+p-1,1+p-1,1.5+p-1),c(-0.5+l,0+l,-0.5+l), col=array.freq.colors[p,1+l,2]) + ## polygon(c(1+p-1,1.5+p-1,1.5+p-1),c(l,0.5+l,-0.5+l), col=array.freq.colors[p,1+l,3]) + ## polygon(c(0.5+p-1,1+p-1,1.5+p-1),c(0.5+l,0+l,0.5+l), col=array.freq.colors[p,1+l,4]) + ## } + ## } + + ## par(fig=c(0.875, 1, 0.14, 0.89), new=TRUE) + ## ColorBar2(brks = c(-20,-10,-5,-2,0,2,5,10,20), cols = freq.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(c(-20,-10,-5,-2,0,2,5,10,20)), my.labels=c(-20,-10,-5,-2,0,2,5,10,20)) + ## dev.off() + +} # close if on composition == "summary" + + + + +# impact map of the stronger WR: +if(composition == "impact.highest" && fields.name == rean.name){ + + var.num <- 2 # choose a variable (1:sfcWind, 2:tas) + index <- 1 # chose an index: 1: most influent, 2: most influent positively, 3: most influent negatively + + if ( index == 1 ) png(filename=paste0(rean.dir,"/",rean.name,"_",var.name.full[var.num],"_most_influent.png"), width=550, height=650) + if ( index == 2 ) png(filename=paste0(rean.dir,"/",rean.name,"_",var.name.full[var.num],"_most_influent_positively.png"), width=550, height=650) + if ( index == 3 ) png(filename=paste0(rean.dir,"/",rean.name,"_",var.name.full[var.num],"_most_influent_negatively.png"), width=550, height=650) + + plot.new() + #par(mfrow=c(4,3)) + #col.regimes <- c('#7b3294','#c2a5cf','#a6dba0','#008837') + col.regimes <- c('#e66101','#fdb863','#b2abd2','#5e3c99') + + for(p in 13:16){ # or 1:12 for monthly maps + load(file=paste0(rean.dir,"/",rean.name,"_",my.period[p],"_",var.name[var.num],".RData")) + load(paste0(rean.dir,"/",rean.name,"_",my.period[p],"_ClusterNames.RData")) # they should not depend on var.name!!! + + # position of long values of Europe only (without the Atlantic Sea and America): + #if(fields.name == "ERA-Interim") EU <- c(1:which(lon >= lon.max)[1], (length(lon)-30):length(lon)) # restrict area to continental europe only + lon.max = 45 + EU <- c(1:which(lon >= lon.max)[1], (which(lon >= 337)[1]:length(lon))) # restrict area to continental europe only + + cluster1 <- which(orden == cluster1.name.period[p]) # in future it will be == cluster1.name + cluster2 <- which(orden == cluster2.name.period[p]) + cluster3 <- which(orden == cluster3.name.period[p]) + cluster4 <- which(orden == cluster4.name.period[p]) + + assign(paste0("imp",cluster1), varPeriodAnom1mean) + assign(paste0("imp",cluster2), varPeriodAnom2mean) + assign(paste0("imp",cluster3), varPeriodAnom3mean) + assign(paste0("imp",cluster4), varPeriodAnom4mean) + + #most influent WT: + if (index == 1) { + imp.all <- imp1 + for(i in 1:dim(imp1)[1]){ + for(j in 1:dim(imp1)[2]){ + if(abs(imp1[i,j]) >= max(abs(imp1[i,j]), abs(imp2[i,j]), abs(imp3[i,j]), abs(imp4[i,j]))) imp.all[i,j] <- 1.5 + if(abs(imp2[i,j]) >= max(abs(imp1[i,j]), abs(imp2[i,j]), abs(imp3[i,j]), abs(imp4[i,j]))) imp.all[i,j] <- 2.5 + if(abs(imp3[i,j]) >= max(abs(imp1[i,j]), abs(imp2[i,j]), abs(imp3[i,j]), abs(imp4[i,j]))) imp.all[i,j] <- 3.5 + if(abs(imp4[i,j]) >= max(abs(imp1[i,j]), abs(imp2[i,j]), abs(imp3[i,j]), abs(imp4[i,j]))) imp.all[i,j] <- 4.5 + } + } + } + + #most influent WT with positive impact: + if (index == 2) { + imp.all <- imp1 + for(i in 1:dim(imp1)[1]){ + for(j in 1:dim(imp1)[2]){ + if((imp1[i,j]) >= max((imp1[i,j]), (imp2[i,j]), (imp3[i,j]), (imp4[i,j]))) imp.all[i,j] <- 1.5 + if((imp2[i,j]) >= max((imp1[i,j]), (imp2[i,j]), (imp3[i,j]), (imp4[i,j]))) imp.all[i,j] <- 2.5 + if((imp3[i,j]) >= max((imp1[i,j]), (imp2[i,j]), (imp3[i,j]), (imp4[i,j]))) imp.all[i,j] <- 3.5 + if((imp4[i,j]) >= max((imp1[i,j]), (imp2[i,j]), (imp3[i,j]), (imp4[i,j]))) imp.all[i,j] <- 4.5 + } + } + + } + + # most influent WT with negative impact: + if (index == 3) { + imp.all <- imp1 + for(i in 1:dim(imp1)[1]){ + for(j in 1:dim(imp1)[2]){ + if((imp1[i,j]) <= min((imp1[i,j]), (imp2[i,j]), (imp3[i,j]), (imp4[i,j]))) imp.all[i,j] <- 1.5 + if((imp2[i,j]) <= min((imp1[i,j]), (imp2[i,j]), (imp3[i,j]), (imp4[i,j]))) imp.all[i,j] <- 2.5 + if((imp3[i,j]) <= min((imp1[i,j]), (imp2[i,j]), (imp3[i,j]), (imp4[i,j]))) imp.all[i,j] <- 3.5 + if((imp4[i,j]) <= min((imp1[i,j]), (imp2[i,j]), (imp3[i,j]), (imp4[i,j]))) imp.all[i,j] <- 4.5 + } + } + } + + if(p<=3) yMod <- 0.7 + if(p>=4 && p<=6) yMod <- 0.5 + if(p>=7 && p<=9) yMod <- 0.3 + if(p>=10) yMod <- 0.1 + if(p==13 || p==14) yMod <- 0.52 + if(p==15 || p==16) yMod <- 0.1 + + if(p==1 || p==4 || p==7 || p==10) xMod <- 0.05 + if(p==2 || p==5 || p==8 || p==11) xMod <- 0.35 + if(p==3 || p==6 || p==9 || p==12) xMod <- 0.65 + if(p==13 || p==15) xMod <- 0.05 + if(p==14 || p==16) xMod <- 0.50 + + # impact map: + if(p <= 12) par(fig=c(xMod, xMod + 0.3, yMod, yMod + 0.17), new=TRUE) + if(p > 12) par(fig=c(xMod, xMod + 0.45, yMod, yMod + 0.38), new=TRUE) + PlotEquiMap(imp.all[,EU], lon[EU], lat, filled.continents = FALSE, brks=1:5, cols=col.regimes, axelab=F, drawleg=F) + + # title map: + if(p <= 12) par(fig=c(xMod, xMod + 0.3, yMod + 0.162, yMod + 0.172), new=TRUE) + if(p > 12) par(fig=c(xMod + 0.07, xMod + 0.07 + 0.3, yMod +0.21 + 0.166, yMod +0.21 + 0.176), new=TRUE) + mtext(my.period[p], font=2, cex=1.5) + + } # close 'p' on 'period' + + par(fig=c(0.1,0.9, 0.03, 0.11), new=TRUE) + ColorBar2(1:5, cols=col.regimes, vert=FALSE, my.ticks=1:4, draw.ticks=FALSE, my.labels=orden) + + par(fig=c(0.1,0.9, 0.92, 0.97), new=TRUE) + if( index == 1 ) mtext(paste0("Most influent WR on ",var.name[var.num]), font=2, cex=1.5) + if( index == 2 ) mtext(paste0("Most positively influent WR on ",var.name[var.num]), font=2, cex=1.5) + if( index == 3 ) mtext(paste0("Most negatively influent WR on ",var.name[var.num]), font=2, cex=1.5) + + dev.off() + +} # close if on composition == "impact.highest" + diff --git a/old/weather_regimes_maps_v20.R~ b/old/weather_regimes_maps_v20.R~ new file mode 100644 index 0000000000000000000000000000000000000000..6d1479c9d8296d9381369f0b4e70b7b52d63be52 --- /dev/null +++ b/old/weather_regimes_maps_v20.R~ @@ -0,0 +1,3957 @@ + +# Creation: 6/2016 +# Author: Nicola Cortesi +# +# Aim: to visualize the regime anomalies of the weather regimes, their interannual frequencies and their impact on a chosen cliamte variable (i.e: wind speed or temperature) +# +# I/O: input file needed are: +# 1) the output of the weather_regimes.R script, which ends with the suffix "_psl.RData". +# 2) if you need the impact map too, the outputs of the weather_regimes_impact.R script, which end wuth the suffix "_sfcWind.RData" and "_tas.RData" +# +# Output files are the maps, witch the user can generate as .png or as .pdf +# Due to the script's length, it is better to run it from the terminal with: Rscript ~/scripts/weather_regimes_maps.R +# This script doesn't need to be parallelized because it takes only a few seconds to create and save the output maps. +# +# Assumptions: If your regimes derive from a reanalysis, this script must be run twice: once, to create a .pdf file (see below) +# that the user must open to find visually the order by which the four regimes are stored inside the four clusters. For example, he can find that the +# sequence of the images in the file (from top to down) corresponds to: Blocking / NAO- / Atl.Ridge / NAO+ regime. Subsequently, he has to change 'ordering' +# from F to T (see below) and set the four variables 'clusterX.name' (X = 1...4) to follow the same order found inside that file. +# The second time, you have to run this script only to get the maps in the right order, which by default is, from top to down: +# "NAO+","NAO-","Blocking" and "Atl.Ridge" (you can change it with the variable 'orden'). You also have to set 'save.names' to TRUE, so an .RData file +# is written to store the order of the four regimes, in case you need to visualize these maps again in future or create new ones based on the saved data. +# +# If your regimes derive from a forecast system, you have to compute before the regimes derived from a reanalysis. Then, you can associate the reanalysis +# to the forecast system, to employ it to automatically aussociate to each simulated cluster one of the +# observed regimes, the one with the highest spatial correlation. Beware that the two maps of regime anomalies must have the same spatial resolution! +# +# Branch: weather_regimes + +rm(list=ls()) +library(s2dverification) # for the function Load() +library(Kendall) # for the MannKendall test +source('/home/Earth/ncortesi/scripts/Rfunctions.R') # for the calendar functions + +### set the directory where the weather regimes computed with a reanalysis are stored (xxxxx_psl.RData files): + +#rean.dir <- "/scratch/Earth/ncortesi/RESILIENCE/Regimes/18) as 12) but with no lat correction" +#rean.dir <- "/scratch/Earth/ncortesi/RESILIENCE/Regimes/18_as_12_but_with_no_lat_correction" +#rean.dir <- "/scratch/Earth/ncortesi/RESILIENCE/Regimes/41_ERA-Interim_monthly_1981-2015_LOESS_filter" +#rean.dir <- "/scratch/Earth/ncortesi/RESILIENCE/Regimes/43_as_41_but_with_running_cluster_and_lat_correction" +#rean.dir <- "/scratch/Earth/ncortesi/RESILIENCE/Regimes/44_as_43_but_with_no_lat_correction" +#rean.dir <- "/scratch/Earth/ncortesi/RESILIENCE/Regimes/45_as_43_but_with_Z500" + +rean.name <- "ERA-Interim" # reanalysis name (if input data comes from a reanalysis) +forecast.name <- "ECMWF-S4" # forecast system name (if input data comes from a forecast system) + +# Choose between loading reanalysis ('rean.name') or forecasts ('forecast.name'): +fields.name <- rean.name #forecast.name #rean.name #forecast.name + +# Choose one or more periods to plot from 1 to 17 (1-12: Jan - Dec, 13-16: Winter, Spring, Summer, Autumn, 17: Year). +period <- 13:16 # You need to have created before the '_psl.RData' file with the output of 'weather_regimes_vXX'.R for that period. + +# run it twice: once, with: 'ordering <- FALSE', 'save.names <- TRUE', 'as.pdf <- TRUE' and 'composition <- "simple" ' +# to look at the cartography and find visually the right regime names of the four clusters; then, insert the regime names in the correct order below and run this script +# a second time one month/season at time with: 'ordering = TRUE', 'save.names = TRUE', 'as.pdf = FALSE' and 'composition = "simple" ' +# to save the ordered cartography (then, you can check the correct regime sequence in the .png maps). After that, any time you run this script, remember to set: +# 'ordering = TRUE , 'save.names = FALSE' (and any type of composition you want to plot) not to overwrite the files which stores the right cluster order. + +ordering <- T # If TRUE, order the clusters following the regimes listed in the 'orden' vector (set it to TRUE only after you manually inserted the names below). +save.names <- F # if TRUE, also save the cluster names (i.e: the association between clusters and weather regimes); if FALSE, the cluster names are loaded from a file +as.pdf <- F # FALSE: save results as one .png file for each season/month, TRUE: save only one .pdf file with all seasons/months + +composition <- "impact" # choose which kind of composition you want to plot: + # - 'simple' for monthly graphs of regime anomalies / impact / interannual frequencies in three columns + # - 'psl' for all the regime anomalies for a fixed forecast month + # - 'fre' for all the interannual frequencies for a fixed forecast month + # - 'impact' for all the impact maps for a fixed forecast month and the variable specified in var.num + # - 'summary' for the summary plots of all forecast months (persistence bias, freq.bias, correlations, etc.) [You need all ClusterNames files] + # - 'impact.highest' for all the impact plot of the regime with the highest impact + # - 'single.impact' to save the individual impact maps + # - 'single.psl' to save the individual psl map + # - 'single.fre' to save the individual fre map + # - 'none' doesn't plot anything, it only saves the ClusterName.Rdata files + # - 'taylor' plot the taylor's diagram between the regime anomalies of a monthly reanalaysis and a seasonal one + +var.num <- 2 # if fields.name ='rean.name' and composition ='simple', Choose a variable for the impact maps 1: sfcWind 2: tas + +mean.freq <- FALSE # if TRUE, when composition <- "simple", it also add the mean frequency value over each frequency plots + +# Associates the regimes to the four cluster: +cluster3.name <- "NAO+" +cluster2.name <- "NAO-" +cluster1.name <- "Blocking" +cluster4.name <- "Atl.Ridge" + +# choose an order to give to the cartograpy, from top to bottom: +orden <- c("NAO+","NAO-","Blocking","Atl.Ridge") + + +####### if fields.name='forecast.name', you have to specify these additional variables: + +# working dir with the input files with '_psl.RData' suffix: +work.dir <- "/scratch/Earth/ncortesi/RESILIENCE/Regimes/42_ECMWF_S4_monthly_1981-2015_LOESS_filter" + +lead.months <- 0:6 # in case you selected 'fields.name = forecast.name', specify a leadtime (0 = same month of the start month) to plot + # (and variable 'period' becomes the start month) +forecast.month <- 10 # in case composition = 'psl' or 'fre' or 'impact', choose a target month to forecast, from 1 to 12, to plot its composition + # if you want to plot all compositions from 1 to 12 at once, just enable the line below and add a { at the end of the script + # DO NOT run the loop below when composition="summary" or "taylor", because it will modify the data in the ClusterName.RData files!!! +#for(forecast.month in 1:12){ +################################################################################################################################################################### + +if(fields.name != rean.name && fields.name != forecast.name) stop("variable 'fields.name' is not properly set") +regime1.name <- orden[1] +regime2.name <- orden[2] +regime3.name <- orden[3] +regime4.name <- orden[4] + +var.name <- c("sfcWind","tas") +var.name.full <- c("Wind Speed","Temperature") # full name of the 'predictand' variable to put in the title of the graphs +var.unit <- c("m/s", "ºC") # unit of measure of var (for drawing color scales) + +var.num.orig <- var.num # loading _psl files, this value can change! +fields.name.orig <- fields.name +period.orig <- period +work.dir.orig <- work.dir +pdf.suffix <- ifelse(length(period)==1, my.period[period], "all_periods") +n.map <- 0 + +if(composition != "summary" && composition != "impact.highest" && composition != "taylor"){ + + if(composition == "psl" || composition == "fre" || composition == "impact") { + if(as.pdf && fields.name == forecast.name) pdf(file=paste0(work.dir,"/",fields.name,"_",pdf.suffix,"_forecast_month_",forecast.month,"_",composition,".pdf"),width=110,height=60) + if(!as.pdf && fields.name == forecast.name) png(filename=paste0(work.dir,"/",fields.name,"_forecast_month_",forecast.month,"_",composition,".png"),width=6000,height=2300) + + lead.months <- c(6:0,0) + plot.new() + + # Sheet title: + par(fig=c(0, 1, 0.94, 0.98), new=TRUE) + if(composition == "psl") mtext(paste0(fields.name, " simulated Weather Regimes for ", month.name[forecast.month]), cex=5.5, font=2) + if(composition == "fre") mtext(paste0(fields.name, " simulated interannual frequencies for ", month.name[forecast.month]), cex=5.5, font=2) + if(composition == "impact") mtext(paste0(fields.name, " simulated ",var.name.full[var.num], " impact for ", month.name[forecast.month]), cex=5.5, font=2) + + } + + if(fields.name == rean.name) lead.months <- 1 # just to be able to enter the loop below once! + + + for(lead.month in lead.months){ + #lead.month=lead.months[1] # for the debug + + if(composition == "simple"){ + if(as.pdf && fields.name == rean.name) pdf(file=paste0(rean.dir,"/",fields.name,"_",var.name[var.num],"_",pdf.suffix,".pdf"),width=40, height=60) + if(as.pdf && fields.name == forecast.name) pdf(file=paste0(work.dir,"/",fields.name,"_",var.name[var.num],"_",pdf.suffix,"_leadmonth_",lead.month,".pdf"),width=40,height=60) + } + + if(composition == 'psl' || composition == 'fre' || composition == 'impact') { + period <- forecast.month - lead.month + if(period < 1) period <- period + 12 + } + + impact.data <- FALSE + for(p in period){ + #p <- period[1] # for the debug + p.orig <- p + + if(fields.name == rean.name) print(paste("p =",p)) + if(fields.name == forecast.name) print(paste("p =",p,"lead.month =", lead.month)) + + + # load regime data (for forecasts, it load only the data corresponding to the chosen start month p and lead month 'lead.month') + if(fields.name == rean.name || (fields.name == forecast.name && n.map == 7)) { + load(file=paste0(rean.dir,"/",rean.name,"_",my.period[p],"_","psl",".RData")) + if(var.num != var.num.orig) var.num <- var.num.orig # ripristinate original values of this script (necessary after loading regime data) + if(fields.name != fields.name.orig) fields.name <- fields.name.orig # ripristinate original values of this script + if(work.dir != work.dir.orig) work.dir <- work.dir.orig # ripristinate original values of this script + + # load impact data only if it is available, if not it will leave an empty space in the composition: + if(fields.name == rean.name){ + if(file.exists(paste0(rean.dir,"/",rean.name,"_",my.period[p],"_",var.name[var.num],".RData"))){ + impact.data <- TRUE + print("Impact data available for reanalysis") + load(file=paste0(rean.dir,"/",rean.name,"_",my.period[p],"_",var.name[var.num],".RData")) + } else { + impact.data <- FALSE + print("Impact data for reanalysis not available") + } + } + } + + if(fields.name == forecast.name && n.map < 7){ + load(file=paste0(work.dir,"/",forecast.name,"_",my.period[p],"_leadmonth_",lead.month,"_","psl",".RData")) # load cluster time series and mean psl data + if(file.exists(paste0(work.dir,"/",forecast.name,"_",my.period[p],"_leadmonth_",lead.month,"_",var.name[var.num],".RData"))){ + impact.data <- TRUE + print("Impact data available for forecasts") + if((composition == "simple" || composition == "impact")) load(file=paste0(work.dir,"/",fields.name,"_",my.period[p],"_leadmonth_",lead.month,"_",var.name[var.num],".RData")) # load mean var data for impact maps + } else { + impact.data <- FALSE + print("Impact data for forecasts not available") + } + + + if(var.num != var.num.orig) var.num <- var.num.orig # ripristinate original values of this script (necessary after loading regime data) + if(fields.name != fields.name.orig) fields.name <- fields.name.orig # ripristinate original values of this script + + } + + if(var.num != var.num.orig) var.num <- var.num.orig # ripristinate original values of this script (necessary after loading regime data) + if(fields.name != fields.name.orig) fields.name <- fields.name.orig # ripristinate original values of this script + if(work.dir != work.dir.orig) work.dir <- work.dir.orig # ripristinate original values of this script + if(p != p.orig) p <- p.orig + + if(fields.name == forecast.name && n.map < 7){ + + # compute the simulated interannual monthly variability: + n.days.month <- dim(my.cluster.array[[p]])[1] # number of days in the forecasted month + + freq.sim1 <- apply(my.cluster.array[[p]] == 1, c(2,3), sum) + freq.sim2 <- apply(my.cluster.array[[p]] == 2, c(2,3), sum) + freq.sim3 <- apply(my.cluster.array[[p]] == 3, c(2,3), sum) + freq.sim4 <- apply(my.cluster.array[[p]] == 4, c(2,3), sum) + + sd.freq.sim1 <- sd(freq.sim1) / n.days.month + sd.freq.sim2 <- sd(freq.sim2) / n.days.month + sd.freq.sim3 <- sd(freq.sim3) / n.days.month + sd.freq.sim4 <- sd(freq.sim4) / n.days.month + + # compute the transition probabilities: + j1 <- j2 <- j3 <- j4 <- 1; transition1 <- transition2 <- transition3 <- transition4 <- c() + + n.members <- dim(my.cluster.array[[p]])[3] + + for(m in 1:n.members){ + for(y in 1:n.years){ + for (d in 1:(n.days.month-1)){ + + if (my.cluster.array[[p]][d,y,m] == 1 && (my.cluster.array[[p]][d + 1,y,m] != my.cluster.array[[p]][d,y,m])){ + transition1[j1] <- my.cluster.array[[p]][d + 1,y,m] + j1 <- j1 + 1 + } + if (my.cluster.array[[p]][d,y,m] == 2 && (my.cluster.array[[p]][d + 1,y,m] != my.cluster.array[[p]][d,y,m])){ + transition2[j2] <- my.cluster.array[[p]][d + 1,y,m] + j2 <- j2 + 1 + } + if (my.cluster.array[[p]][d,y,m] == 3 && (my.cluster.array[[p]][d + 1,y,m] != my.cluster.array[[p]][d,y,m])){ + transition3[j3] <- my.cluster.array[[p]][d + 1,y,m] + j3 <- j3 +1 + } + if (my.cluster.array[[p]][d,y,m] == 4 && (my.cluster.array[[p]][d + 1,y,m] != my.cluster.array[[p]][d,y,m])){ + transition4[j4] <- my.cluster.array[[p]][d + 1,y,m] + j4 <- j4 +1 + } + + } + } + } + + trans.sim <- matrix(NA,4,4 , dimnames= list(c("cluster1.sim", "cluster2.sim", "cluster3.sim", "cluster4.sim"),c("cluster1.sim","cluster2.sim","cluster3.sim","cluster4.sim"))) + trans.sim[1,2] <- length(which(transition1 == 2)) + trans.sim[1,3] <- length(which(transition1 == 3)) + trans.sim[1,4] <- length(which(transition1 == 4)) + + trans.sim[2,1] <- length(which(transition2 == 1)) + trans.sim[2,3] <- length(which(transition2 == 3)) + trans.sim[2,4] <- length(which(transition2 == 4)) + + trans.sim[3,1] <- length(which(transition3 == 1)) + trans.sim[3,2] <- length(which(transition3 == 2)) + trans.sim[3,4] <- length(which(transition3 == 4)) + + trans.sim[4,1] <- length(which(transition4 == 1)) + trans.sim[4,2] <- length(which(transition4 == 2)) + trans.sim[4,3] <- length(which(transition4 == 3)) + + trans.tot <- apply(trans.sim,1,sum,na.rm=T) + for(i in 1:4) trans.sim[i,] <- trans.sim[i,]/trans.tot[i] + + + # compute cluster persistence: + my.cluster.vector <- as.vector(my.cluster.array[[p]]) # reduce it to a vector with the order: day1month1member1 , day2month1member1, ... , day1month2member1, day2month2member1, ,,, + + sequ1 <- sequ2 <- sequ3 <- sequ4 <- rep(0,10000) + i1 <- i2 <- i3 <- i4 <- 1 + + if(my.cluster.vector[1] != 1) i1 <- 0 + if(my.cluster.vector[1] == 1) sequ1[i1] <- sequ1[i1]+1 + if(my.cluster.vector[1] != 2) i2 <- 0 + if(my.cluster.vector[1] == 2) sequ2[i2] <- sequ2[i2]+1 + if(my.cluster.vector[1] != 3) i3 <- 0 + if(my.cluster.vector[1] == 3) sequ3[i3] <- sequ3[i3]+1 + if(my.cluster.vector[1] != 4) i4 <- 0 + if(my.cluster.vector[1] == 4) sequ4[i4] <- sequ4[i4]+1 + n.dd <- length(my.cluster.vector) + + for(d in 1:(n.dd-1)){ + if(my.cluster.vector[d] != 1 && my.cluster.vector[d+1] == 1) i1 <- i1+1 + if(my.cluster.vector[d] == 1) sequ1[i1] <- sequ1[i1]+1 + + if(my.cluster.vector[d] != 2 && my.cluster.vector[d+1] == 2) i2 <- i2+1 + if(my.cluster.vector[d] == 2) sequ2[i2] <- sequ2[i2]+1 + + if(my.cluster.vector[d] != 3 && my.cluster.vector[d+1] == 3) i3 <- i3+1 + if(my.cluster.vector[d] == 3) sequ3[i3] <- sequ3[i3]+1 + + if(my.cluster.vector[d] != 4 && my.cluster.vector[d+1] == 4) i4 <- i4+1 + if(my.cluster.vector[d] == 4) sequ4[i4] <- sequ4[i4]+1 + } + + if(my.cluster.vector[n.dd] == 1) sequ1[i1] <- sequ1[i1]+1 + if(my.cluster.vector[n.dd] == 2) sequ2[i2] <- sequ2[i2]+1 + if(my.cluster.vector[n.dd] == 3) sequ3[i3] <- sequ3[i3]+1 + if(my.cluster.vector[n.dd] == 4) sequ4[i4] <- sequ4[i4]+1 + + pers1 <- mean(sequ1[which(sequ1 != 0)]) + pers2 <- mean(sequ2[which(sequ2 != 0)]) + pers3 <- mean(sequ3[which(sequ3 != 0)]) + pers4 <- mean(sequ4[which(sequ4 != 0)]) + + # compute correlations between simulated clusters and observed regime's anomalies: + cluster1.sim <- pslwr1mean; cluster2.sim <- pslwr2mean; cluster3.sim <- pslwr3mean; cluster4.sim <- pslwr4mean + lat.forecast <- lat; lon.forecast <- lon + + if((p + lead.month) > 12) { load.month <- p + lead.month - 12 } else { load.month <- p + lead.month } # reanalysis forecast month to load + load(file=paste0(rean.dir,"/",rean.name,"_",my.period[load.month],"_","psl",".RData")) # load reanalysis data, which overwrities the pslwrXmean variables + load(paste0(rean.dir,"/",rean.name,"_", my.period[load.month],"_","ClusterNames",".RData")) # load also reanalysis regime names + + if(var.num != var.num.orig) var.num <- var.num.orig # ripristinate original values of this script + if(fields.name != fields.name.orig) fields.name <- fields.name.orig # ripristinate original values of this script + if(!identical(period.orig,period)) period <- period.orig + if(work.dir != work.dir.orig) work.dir <- work.dir.orig # ripristinate original values of this script + if(p.orig != p) p <- p.orig + + # compute the observed transition probabilities: + n.days.month <- n.days.in.a.period(load.month,2001) + j1o <- j2o <- j3o <- j4o <- 1; transition.obs1 <- transition.obs2 <- transition.obs3 <- transition.obs4 <- c() + + for (d in 1:(length(my.cluster$cluster)-1)){ + if (my.cluster$cluster[d] == 1 && (my.cluster$cluster[d + 1] != my.cluster$cluster[d])){ + transition.obs1[j1o] <- my.cluster$cluster[d + 1] + j1o <- j1o + 1 + } + if (my.cluster$cluster[d] == 2 && (my.cluster$cluster[d + 1] != my.cluster$cluster[d])){ + transition.obs2[j2o] <- my.cluster$cluster[d + 1] + j2o <- j2o + 1 + } + if (my.cluster$cluster[d] == 3 && (my.cluster$cluster[d + 1] != my.cluster$cluster[d])){ + transition.obs3[j3o] <- my.cluster$cluster[d + 1] + j3o <- j3o + 1 + } + if (my.cluster$cluster[d] == 4 && (my.cluster$cluster[d + 1] != my.cluster$cluster[d])){ + transition.obs4[j4o] <- my.cluster$cluster[d + 1] + j4o <- j4o +1 + } + + } + + trans.obs <- matrix(NA,4,4 , dimnames= list(c("cluster1.obs", "cluster2.obs", "cluster3.obs", "cluster4.obs"),c("cluster1.obs","cluster2.obs","cluster3.obs","cluster4.obs"))) + trans.obs[1,2] <- length(which(transition.obs1 == 2)) + trans.obs[1,3] <- length(which(transition.obs1 == 3)) + trans.obs[1,4] <- length(which(transition.obs1 == 4)) + + trans.obs[2,1] <- length(which(transition.obs2 == 1)) + trans.obs[2,3] <- length(which(transition.obs2 == 3)) + trans.obs[2,4] <- length(which(transition.obs2 == 4)) + + trans.obs[3,1] <- length(which(transition.obs3 == 1)) + trans.obs[3,2] <- length(which(transition.obs3 == 2)) + trans.obs[3,4] <- length(which(transition.obs3 == 4)) + + trans.obs[4,1] <- length(which(transition.obs4 == 1)) + trans.obs[4,2] <- length(which(transition.obs4 == 2)) + trans.obs[4,3] <- length(which(transition.obs4 == 3)) + + trans.tot.obs <- apply(trans.obs,1,sum,na.rm=T) + for(i in 1:4) trans.obs[i,] <- trans.obs[i,]/trans.tot.obs[i] + + # compute the observed interannual monthly variability: + freq.obs1 <- freq.obs2 <- freq.obs3 <- freq.obs4 <- c() + + for(y in year.start:year.end){ + # for both reanalysis and S4, the time order is always: year1day1, year1day2, ..., year1day31, year2day1, year2day2, ..., year2day28, etc, + freq.obs1[y-year.start+1] <- length(which(my.cluster$cluster[(y-year.start)*n.days.month + (1:n.days.month)] == 1)) + freq.obs2[y-year.start+1] <- length(which(my.cluster$cluster[(y-year.start)*n.days.month + (1:n.days.month)] == 2)) + freq.obs3[y-year.start+1] <- length(which(my.cluster$cluster[(y-year.start)*n.days.month + (1:n.days.month)] == 3)) + freq.obs4[y-year.start+1] <- length(which(my.cluster$cluster[(y-year.start)*n.days.month + (1:n.days.month)] == 4)) + } + + sd.freq.obs1 <- sd(freq.obs1) / n.days.month + sd.freq.obs2 <- sd(freq.obs2) / n.days.month + sd.freq.obs3 <- sd(freq.obs3) / n.days.month + sd.freq.obs4 <- sd(freq.obs4) / n.days.month + + wr1y.obs <- wr1y; wr2y.obs <- wr2y; wr3y.obs <- wr3y; wr4y.obs <- wr4y # observed frecuencies of the regimes + + regimes.obs <- c(cluster1.name, cluster2.name, cluster3.name, cluster4.name) + + if(length(unique(regimes.obs)) < 4) stop("Two or more names of the weather types in the reanalsyis input file are repeated!") + + if(identical(rev(lat),lat.forecast)) {lat.forecast <- rev(lat.forecast); print("Warning: forecast latitudes are in the opposite order than renalysis's")} + if(!identical(lat, lat.forecast)) stop("forecast latitudes are different from reanalysis latitudes!") + if(!identical(lon, lon.forecast)) stop("forecast longitude are different from reanalysis longitudes!") + + cluster.corr <- array(NA, c(4,4), dimnames=list(c("cluster1.sim","cluster2.sim","cluster3.sim","cluster4.sim"), regimes.obs)) + cluster.corr[1,1] <- cor(as.vector(cluster1.sim), as.vector(pslwr1mean)) + cluster.corr[1,2] <- cor(as.vector(cluster1.sim), as.vector(pslwr2mean)) + cluster.corr[1,3] <- cor(as.vector(cluster1.sim), as.vector(pslwr3mean)) + cluster.corr[1,4] <- cor(as.vector(cluster1.sim), as.vector(pslwr4mean)) + cluster.corr[2,1] <- cor(as.vector(cluster2.sim), as.vector(pslwr1mean)) + cluster.corr[2,2] <- cor(as.vector(cluster2.sim), as.vector(pslwr2mean)) + cluster.corr[2,3] <- cor(as.vector(cluster2.sim), as.vector(pslwr3mean)) + cluster.corr[2,4] <- cor(as.vector(cluster2.sim), as.vector(pslwr4mean)) + cluster.corr[3,1] <- cor(as.vector(cluster3.sim), as.vector(pslwr1mean)) + cluster.corr[3,2] <- cor(as.vector(cluster3.sim), as.vector(pslwr2mean)) + cluster.corr[3,3] <- cor(as.vector(cluster3.sim), as.vector(pslwr3mean)) + cluster.corr[3,4] <- cor(as.vector(cluster3.sim), as.vector(pslwr4mean)) + cluster.corr[4,1] <- cor(as.vector(cluster4.sim), as.vector(pslwr1mean)) + cluster.corr[4,2] <- cor(as.vector(cluster4.sim), as.vector(pslwr2mean)) + cluster.corr[4,3] <- cor(as.vector(cluster4.sim), as.vector(pslwr3mean)) + cluster.corr[4,4] <- cor(as.vector(cluster4.sim), as.vector(pslwr4mean)) + + # associate each simulated cluster to one observed regime: + max1 <- which(cluster.corr == max(cluster.corr), arr.ind=T) + cluster.corr2 <- cluster.corr + cluster.corr2[max1[1],] <- -999 # set this row to negative values so it won't be found as a maximum anymore + cluster.corr2[,max1[2]] <- -999 # set this column to negative values so it won't be found as a maximum anymore + max2 <- which(cluster.corr2 == max(cluster.corr2), arr.ind=T) + cluster.corr3 <- cluster.corr2 + cluster.corr3[max2[1],] <- -999 + cluster.corr3[,max2[2]] <- -999 + max3 <- which(cluster.corr3 == max(cluster.corr3), arr.ind=T) + cluster.corr4 <- cluster.corr3 + cluster.corr4[max3[1],] <- -999 + cluster.corr4[,max3[2]] <- -999 + max4 <- which(cluster.corr4 == max(cluster.corr4), arr.ind=T) + + seq.max1 <- c(max1[1],max2[1],max3[1],max4[1]) + seq.max2 <- c(max1[2],max2[2],max3[2],max4[2]) + + cluster1.regime <- seq.max2[which(seq.max1 == 1)] + cluster2.regime <- seq.max2[which(seq.max1 == 2)] + cluster3.regime <- seq.max2[which(seq.max1 == 3)] + cluster4.regime <- seq.max2[which(seq.max1 == 4)] + + cluster1.name <- regimes.obs[cluster1.regime] + cluster2.name <- regimes.obs[cluster2.regime] + cluster3.name <- regimes.obs[cluster3.regime] + cluster4.name <- regimes.obs[cluster4.regime] + + regimes.sim <- c(cluster1.name, cluster2.name, cluster3.name, cluster4.name) + cluster.corr2 <- array(cluster.corr, c(4,4), dimnames=list(regimes.sim, regimes.obs)) + + spat.cor1 <- cluster.corr[1,seq.max2[which(seq.max1 == 1)]] + spat.cor2 <- cluster.corr[2,seq.max2[which(seq.max1 == 2)]] + spat.cor3 <- cluster.corr[3,seq.max2[which(seq.max1 == 3)]] + spat.cor4 <- cluster.corr[4,seq.max2[which(seq.max1 == 4)]] + + # measure persistence for the reanalysis dataset: + my.cluster.vector <- my.cluster[[1]] + + sequ1 <- sequ2 <- sequ3 <- sequ4 <- rep(0,10000) + i1 <- i2 <- i3 <- i4 <- 1 + + if(my.cluster.vector[1] != 1) i1 <- 0 + if(my.cluster.vector[1] == 1) sequ1[i1] <- sequ1[i1]+1 + if(my.cluster.vector[1] != 2) i2 <- 0 + if(my.cluster.vector[1] == 2) sequ2[i2] <- sequ2[i2]+1 + if(my.cluster.vector[1] != 3) i3 <- 0 + if(my.cluster.vector[1] == 3) sequ3[i3] <- sequ3[i3]+1 + if(my.cluster.vector[1] != 4) i4 <- 0 + if(my.cluster.vector[1] == 4) sequ4[i4] <- sequ4[i4]+1 + + n.dd <- length(my.cluster.vector) + for(d in 1:(n.dd-1)){ + if(my.cluster.vector[d] != 1 && my.cluster.vector[d+1] == 1) i1 <- i1+1 + if(my.cluster.vector[d] == 1) sequ1[i1] <- sequ1[i1]+1 + + if(my.cluster.vector[d] != 2 && my.cluster.vector[d+1] == 2) i2 <- i2+1 + if(my.cluster.vector[d] == 2) sequ2[i2] <- sequ2[i2]+1 + + if(my.cluster.vector[d] != 3 && my.cluster.vector[d+1] == 3) i3 <- i3+1 + if(my.cluster.vector[d] == 3) sequ3[i3] <- sequ3[i3]+1 + + if(my.cluster.vector[d] != 4 && my.cluster.vector[d+1] == 4) i4 <- i4+1 + if(my.cluster.vector[d] == 4) sequ4[i4] <- sequ4[i4]+1 + } + + if(my.cluster.vector[n.dd] == 1) sequ1[i1] <- sequ1[i1]+1 + if(my.cluster.vector[n.dd] == 2) sequ2[i2] <- sequ2[i2]+1 + if(my.cluster.vector[n.dd] == 3) sequ3[i3] <- sequ3[i3]+1 + if(my.cluster.vector[n.dd] == 4) sequ4[i4] <- sequ4[i4]+1 + + persObs1 <- mean(sequ1[which(sequ1 != 0)]) + persObs2 <- mean(sequ2[which(sequ2 != 0)]) + persObs3 <- mean(sequ3[which(sequ3 != 0)]) + persObs4 <- mean(sequ4[which(sequ4 != 0)]) + + ### insert here all the manual corrections to the regime assignation due to misasignment in the automatic selection: + ### forecast month: September + #if(p == 9 && lead.month == 0) { cluster1.name <- "Blocking"; cluster2.name <- "Atl.Ridge"; cluster3.name <- "NAO-"; cluster4.name <- "NAO+"} + #if(p == 8 && lead.month == 1) { cluster1.name <- "NAO+"; cluster2.name <- "NAO-"; cluster3.name <- "Blocking"; cluster4.name <- "Atl.Ridge"} + #if(p == 6 && lead.month == 3) { cluster1.name <- "Atl.Ridge"; cluster2.name <- "NAO+"} + #if(p == 4 && lead.month == 5) { cluster1.name <- "Blocking"; cluster2.name <- "NAO+"; cluster3.name <- "Atl.Ridge"; cluster4.name <- "NAO-"} + + if(p == 9 && lead.month == 0) { cluster1.name <- "Blocking"; cluster2.name <- "Atl.Ridge"; cluster3.name <- "NAO-"; cluster4.name <- "NAO+" } + if(p == 8 && lead.month == 1) { cluster1.name <- "NAO+"; cluster2.name <- "NAO-"; cluster3.name <- "Blocking"; cluster4.name <- "Atl.Ridge" } + if(p == 7 && lead.month == 2) { cluster1.name <- "Atl.Ridge"; cluster2.name <- "Blocking"; cluster3.name <- "NAO-"; cluster4.name <- "NAO+" } + if(p == 6 && lead.month == 3) { cluster1.name <- "Atl.Ridge"; cluster2.name <- "NAO-"; cluster3.name <- "NAO+"; cluster4.name <- "Blocking" } + if(p == 5 && lead.month == 4) { cluster1.name <- "Atl.Ridge"; cluster2.name <- "NAO+"; cluster3.name <- "NAO-"; cluster4.name <- "Blocking" } + if(p == 4 && lead.month == 5) { cluster1.name <- "Blocking"; cluster2.name <- "NAO+"; cluster3.name <- "Atl.Ridge"; cluster4.name <- "NAO-" } + if(p == 3 && lead.month == 6) { cluster2.name <- "NAO-"; cluster3.name <- "Atl.Ridge"} # cluster1.name <- "Blocking"; cluster4.name <- "NAO+" + + # regimes.sim # for the debug + + ### forecast month: October + #if(p == 4 && lead.month == 6) { cluster1.name <- "Atl.Ridge"; cluster2.name <- "Blocking"; cluster3.name <- "NAO+"; cluster4.name <- "NAO-"} + #if(p == 5 && lead.month == 5) { cluster1.name <- "NAO+"; cluster2.name <- "Blocking"; cluster3.name <- "NAO-"; cluster4.name <- "Atl.Ridge"} + #if(p == 7 && lead.month == 3) { cluster1.name <- "NAO-"; cluster2.name <- "Atl.Ridge"; cluster3.name <- "Blocking"; cluster4.name <- "NAO+"} + #if(p == 8 && lead.month == 2) { cluster1.name <- "Blocking"; cluster2.name <- "NAO-"; cluster3.name <- "Atl.Ridge"; cluster4.name <- "NAO+"} + #if(p == 9 && lead.month == 1) { cluster1.name <- "NAO-"; cluster2.name <- "Blocking"; cluster3.name <- "Atl.Ridge"; cluster4.name <- "NAO+"} + + if(p == 10 && lead.month == 0) { cluster1.name <- "Blocking"; cluster2.name <- "NAO+"; cluster3.name <- "Atl.Ridge"; cluster4.name <- "NAO-"} + if(p == 9 && lead.month == 1) { cluster1.name <- "NAO-"; cluster2.name <- "Blocking"; cluster3.name <- "Atl.Ridge"; cluster4.name <- "NAO+"} + if(p == 8 && lead.month == 2) { cluster1.name <- "Blocking"; cluster2.name <- "NAO-"; cluster3.name <- "Atl.Ridge"; cluster4.name <- "NAO+"} + if(p == 7 && lead.month == 3) { cluster1.name <- "NAO-"; cluster2.name <- "Atl.Ridge"; cluster3.name <- "Blocking"; cluster4.name <- "NAO+"} + if(p == 6 && lead.month == 4) { cluster1.name <- "NAO+"; cluster2.name <- "Blocking"; cluster3.name <- "NAO-"; cluster4.name <- "Atl.Ridge"} + + ### forecast month: November + #if(p == 6 && lead.month == 5) { cluster2.name <- "Atl.Ridge"; cluster3.name <- "NAO+"; cluster4.name <- "Blocking"} + #if(p == 7 && lead.month == 4) { cluster1.name <- "NAO+"; cluster2.name <- "Blocking"; cluster4.name <- "Atl.Ridge"} + #if(p == 8 && lead.month == 3) { cluster2.name <- "Blocking"; cluster4.name <- "Atl.Ridge"} + #if(p == 9 && lead.month == 2) { cluster2.name <- "Atl.Ridge"; cluster3.name <- "NAO+"; cluster4.name <- "Blocking"} + #if(p == 10 && lead.month == 1) { cluster2.name <- "Blocking"; cluster4.name <- "Atl.Ridge"} + + regimes.sim2 <- c(cluster1.name, cluster2.name, cluster3.name, cluster4.name) # Simulated regimes after the manual correction + #regimes.obs <- c(cluster1.name, cluster2.name, cluster3.name, cluster4.name) # already defined above, included only as reference + + order.sim <- match(regimes.sim2, orden) + order.obs <- match(regimes.obs, orden) + + clusters.sim <- list(cluster1.sim, cluster2.sim, cluster3.sim, cluster4.sim) + clusters.obs <- list(pslwr1mean, pslwr2mean, pslwr3mean, pslwr4mean) + + # recompute the spatial correlations, because they might have changed because of the manual corrections above: + spat.cor1 <- cor(as.vector(clusters.sim[[which(order.sim == 1)]]), as.vector(clusters.obs[[which(order.obs == 1)]])) + spat.cor2 <- cor(as.vector(clusters.sim[[which(order.sim == 2)]]), as.vector(clusters.obs[[which(order.obs == 2)]])) + spat.cor3 <- cor(as.vector(clusters.sim[[which(order.sim == 3)]]), as.vector(clusters.obs[[which(order.obs == 3)]])) + spat.cor4 <- cor(as.vector(clusters.sim[[which(order.sim == 4)]]), as.vector(clusters.obs[[which(order.obs == 4)]])) + + load(file=paste0(work.dir,"/",fields.name,"_",my.period[p],"_leadmonth_",lead.month,"_","psl",".RData")) # reload forecast cluster time series and mean psl data + #load(file=paste0(work.dir,"/",fields.name,"_",my.period[p],"_leadmonth_",lead.month,"_ClusterData.RData")) # reload forecast cluster data (for my.cluster.array) + if(var.num != var.num.orig) var.num <- var.num.orig # ripristinate original values of this script + if(fields.name != fields.name.orig) fields.name <- fields.name.orig # ripristinate original values of this script + if(!identical(period.orig, period)) period <- period.orig + if(p.orig != p) p <- p.orig + if(identical(rev(lat),lat.forecast)) lat <- rev(lat) # ripristinate right lat order + if(work.dir != work.dir.orig) work.dir <- work.dir.orig # ripristinate original values of this script + + } # close for on 'forecast.name' + + if(fields.name == rean.name || (fields.name == forecast.name && n.map == 7)){ + if(save.names){ + save(cluster1.name, cluster2.name, cluster3.name, cluster4.name, file=paste0(rean.dir,"/",fields.name,"_", my.period[p],"_","ClusterNames",".RData")) + + } else { # in this case, we load the cluster names from the file already saved, if regimes come from a reanalysis: + load(paste0(rean.dir,"/",rean.name,"_", my.period[p],"_","ClusterNames",".RData")) + + if(var.num != var.num.orig) var.num <- var.num.orig # ripristinate original values of this script + if(fields.name != fields.name.orig) fields.name <- fields.name.orig # ripristinate original values of this script + } + } + + # assign to each cluster its regime: + if(ordering == FALSE && (fields.name == rean.name || (fields.name == forecast.name && n.map == 7))){ + cluster1 <- 1; cluster2 <- 2; cluster3 <- 3; cluster4 <- 4 + } else { + cluster1 <- which(orden == cluster1.name) + cluster2 <- which(orden == cluster2.name) + cluster3 <- which(orden == cluster3.name) + cluster4 <- which(orden == cluster4.name) + } + + assign(paste0("map",cluster1), pslwr1mean) + assign(paste0("map",cluster2), pslwr2mean) + assign(paste0("map",cluster3), pslwr3mean) + assign(paste0("map",cluster4), pslwr4mean) + + assign(paste0("fre",cluster1), wr1y) + assign(paste0("fre",cluster2), wr2y) + assign(paste0("fre",cluster3), wr3y) + assign(paste0("fre",cluster4), wr4y) + + #assign(paste0("withinss",cluster1), my.cluster[[p]]$withinss[1]/my.cluster[[p]]$size[1]) + #assign(paste0("withinss",cluster2), my.cluster[[p]]$withinss[2]/my.cluster[[p]]$size[2]) + #assign(paste0("withinss",cluster3), my.cluster[[p]]$withinss[3]/my.cluster[[p]]$size[3]) + #assign(paste0("withinss",cluster4), my.cluster[[p]]$withinss[4]/my.cluster[[p]]$size[4]) + + if(impact.data == TRUE && (composition == "simple" || composition == "single.impact" || composition == 'impact')){ + assign(paste0("imp",cluster1), varwr1mean) + assign(paste0("imp",cluster2), varwr2mean) + assign(paste0("imp",cluster3), varwr3mean) + assign(paste0("imp",cluster4), varwr4mean) + + if(fields.name == rean.name){ + assign(paste0("sig",cluster1), pvalue1) + assign(paste0("sig",cluster2), pvalue2) + assign(paste0("sig",cluster3), pvalue3) + assign(paste0("sig",cluster4), pvalue4) + } + } + + if(fields.name == forecast.name && n.map < 7){ + assign(paste0("freMax",cluster1), wr1yMax) + assign(paste0("freMax",cluster2), wr2yMax) + assign(paste0("freMax",cluster3), wr3yMax) + assign(paste0("freMax",cluster4), wr4yMax) + + assign(paste0("freMin",cluster1), wr1yMin) + assign(paste0("freMin",cluster2), wr2yMin) + assign(paste0("freMin",cluster3), wr3yMin) + assign(paste0("freMin",cluster4), wr4yMin) + + #assign(paste0("spatial.cor",cluster1), spat.cor1) + #assign(paste0("spatial.cor",cluster2), spat.cor2) + #assign(paste0("spatial.cor",cluster3), spat.cor3) + #assign(paste0("spatial.cor",cluster4), spat.cor4) + + assign(paste0("persist",cluster1), pers1) + assign(paste0("persist",cluster2), pers2) + assign(paste0("persist",cluster3), pers3) + assign(paste0("persist",cluster4), pers4) + + clusters.all <- c(cluster1, cluster2, cluster3, cluster4) + ordered.sequence <- c(which(clusters.all == 1), which(clusters.all == 2), which(clusters.all == 3), which(clusters.all == 4)) + + assign(paste0("transSim",cluster1), unname(trans.sim[1,ordered.sequence])) + assign(paste0("transSim",cluster2), unname(trans.sim[2,ordered.sequence])) + assign(paste0("transSim",cluster3), unname(trans.sim[3,ordered.sequence])) + assign(paste0("transSim",cluster4), unname(trans.sim[4,ordered.sequence])) + + cluster1.obs <- which(orden == regimes.obs[1]) + cluster2.obs <- which(orden == regimes.obs[2]) + cluster3.obs <- which(orden == regimes.obs[3]) + cluster4.obs <- which(orden == regimes.obs[4]) + + clusters.all.obs <- c(cluster1.obs, cluster2.obs, cluster3.obs, cluster4.obs) + ordered.sequence.obs <- c(which(clusters.all.obs == 1), which(clusters.all.obs == 2), which(clusters.all.obs == 3), which(clusters.all.obs == 4)) + + assign(paste0("freObs",cluster1.obs), wr1y.obs) + assign(paste0("freObs",cluster2.obs), wr2y.obs) + assign(paste0("freObs",cluster3.obs), wr3y.obs) + assign(paste0("freObs",cluster4.obs), wr4y.obs) + + assign(paste0("persistObs",cluster1.obs), persObs1) + assign(paste0("persistObs",cluster2.obs), persObs2) + assign(paste0("persistObs",cluster3.obs), persObs3) + assign(paste0("persistObs",cluster4.obs), persObs4) + + assign(paste0("transObs",cluster1.obs), unname(trans.obs[1,ordered.sequence.obs])) + assign(paste0("transObs",cluster2.obs), unname(trans.obs[2,ordered.sequence.obs])) + assign(paste0("transObs",cluster3.obs), unname(trans.obs[3,ordered.sequence.obs])) + assign(paste0("transObs",cluster4.obs), unname(trans.obs[4,ordered.sequence.obs])) + + } + + # position of long values of Europe only (without the Atlantic Sea and America): + #if(fields.name == "ERA-Interim") EU <- c(1:which(lon >= lon.max)[1], (length(lon)-30):length(lon)) # restrict area to continental europe only + EU <- c(1:which(lon >= lon.max)[1], (which(lon >= 337)[1]:length(lon))) # restrict area to continental europe only + + # breaks and colors of the geopotential fields: + if(psl == "g500"){ + #my.brks <- c(-300,-199:200,300) # % Mean anomaly of a WR + my.brks <- c(-300,seq(-200,200,10),300) # % Mean anomaly of a WR + my.brks2 <- c(-300,seq(-200,200,10),300) # % Mean anomaly of a WR for the contour lines + #my.cols <- colorRampPalette((brewer.pal(9,"PuBu")))(length(my.brks)-1) + values.to.plot <- c(-200, -100, 0, 100, 200) # values we want to show in the labels + } + + if(psl == "psl"){ + my.brks <- c(seq(-19,-1,2),0,seq(1,19,2)) # % Mean anomaly of a WR + # my.brks <- c(seq(-19,-1,2),0,seq(1,19,2)) # % Mean anomaly of a WR + + my.brks2 <- my.brks #c(seq(-20,20,2)) # % Mean anomaly of a WR for the contour lines + values.to.plot <- my.brks #c(-17,-15,-13,-11,-9,-7,-5,-3,-1,0,1,3,5,7,9,11,13,15,17) + } + + my.cols <- colorRampPalette(rev(brewer.pal(11,"RdBu")))(length(my.brks)-1) # blue--white--red colors + my.cols[floor(length(my.cols)/2)] <- my.cols[floor(length(my.cols)/2)+1] <- "white" + + # breaks and colors of the impact maps (valid both for Wind speed anomalies and Temperature anomalies): + #my.brks.var <- c(-20,seq(-10,-3,1),seq(-2.9,2.9,0.1),seq(3,10,1),20) # % Mean anomaly of a WR + #my.brks.var <- c(-20,-2,-1.5,-1,-0.5,-0.2,0.2,0.5,1,1.5,2,20) # % Mean anomaly of a WR + #my.brks.var <- c(-20,seq(-3,3,0.5),20) # % Mean anomaly of a WR + if(var.name[var.num] == "sfcWind") { + my.brks.var <- seq(-3,3,0.5) + values.to.plot2 <- c(-3,-2,-1,0,1,2,3) + } + if(var.name[var.num] == "tas") { + my.brks.var <- seq(-5,5,1) + values.to.plot2 <- my.brks.var #c(-5,-3,-1,0,1,3,5) + } + + #my.cols <- colorRampPalette(as.character(read.csv("/home/Earth/vtorralb/rgbhex.csv",header=F)[,1]))(length(my.brks)-1) # blue--yellow-red colors + my.cols.var <- colorRampPalette(rev(brewer.pal(11,"RdBu")))(length(my.brks.var)-1) # blue--white--red colors + #my.cols.var[floor(length(my.cols.var)/2)] <- my.cols.var[floor(length(my.cols.var)/2)+1] <- "white" + + freq.max=100 + if(fields.name == forecast.name){ + pos.years.present <- match(year.start:year.end, years) + years.missing <- which(is.na(pos.years.present)) + fre1.NA <- fre2.NA <- fre3.NA <- fre4.NA <- freMax1.NA <- freMax2.NA <- freMax3.NA <- freMax4.NA <- freMin1.NA <- freMin2.NA <- freMin3.NA <- freMin4.NA <- c() + k <- 0 + for(y in year.start:year.end) { + if(y %in% (years.missing + year.start - 1)) { + fre1.NA <- c(fre1.NA,NA); fre2.NA <- c(fre2.NA,NA); fre3.NA <- c(fre3.NA,NA); fre4.NA <- c(fre4.NA,NA) + freMax1.NA <- c(freMax1.NA,NA); freMax2.NA <- c(freMax2.NA,NA); freMax3.NA <- c(freMax3.NA,NA); freMax4.NA <- c(freMax4.NA,NA) + freMin1.NA <- c(freMin1.NA,NA); freMin2.NA <- c(freMin2.NA,NA); freMin3.NA <- c(freMin3.NA,NA); freMin4.NA <- c(freMin4.NA,NA) + k=k+1 + } else { + fre1.NA <- c(fre1.NA,fre1[y - year.start + 1 - k]); fre2.NA <- c(fre2.NA,fre2[y - year.start + 1 - k]) + fre3.NA <- c(fre3.NA,fre3[y - year.start + 1 - k]); fre4.NA <- c(fre4.NA,fre4[y - year.start + 1 - k]) + freMax1.NA <- c(freMax1.NA,freMax1[y - year.start + 1 - k]); freMax2.NA <- c(freMax2.NA,freMax2[y - year.start + 1 - k]) + freMax3.NA <- c(freMax3.NA,freMax3[y - year.start + 1 - k]); freMax4.NA <- c(freMax4.NA,freMax4[y - year.start + 1 - k]) + freMin1.NA <- c(freMin1.NA,freMin1[y - year.start + 1 - k]); freMin2.NA <- c(freMin2.NA,freMin2[y - year.start + 1 - k]) + freMin3.NA <- c(freMin3.NA,freMin3[y - year.start + 1 - k]); freMin4.NA <- c(freMin4.NA,freMin4[y - year.start + 1 - k]) + } + } + + sp.cor1 <- round(cor(fre1.NA,freObs1, use="complete.obs"),2) + sp.cor2 <- round(cor(fre2.NA,freObs2, use="complete.obs"),2) + sp.cor3 <- round(cor(fre3.NA,freObs3, use="complete.obs"),2) + sp.cor4 <- round(cor(fre4.NA,freObs4, use="complete.obs"),2) + + } else { + fre1.NA <- fre1; fre2.NA <- fre2; fre3.NA <- fre3; fre4.NA <- fre4 + } + + + # Visualize the composition with the 3 graphs for all the 4 regimes: its pressure fields, its impact on var and its frequency series: + if(composition == "simple"){ + if(!as.pdf && fields.name == rean.name) png(filename=paste0(rean.dir,"/",fields.name,"_",my.period[p],"_",var.name[var.num],".png"),width=3000,height=3700) + if(!as.pdf && fields.name == forecast.name) png(filename=paste0(work.dir,"/",fields.name,"_",my.period[p],"_leadmonth_",lead.month,"_",var.name[var.num],".png"),width=3000,height=3700) + + plot.new() + + # Sheet title: + par(fig=c(0, 1, 0.94, 0.98), new=TRUE) + if(fields.name == forecast.name) {forecast.title <- paste0(" startdate and ",lead.month," forecast month ")} else {forecast.title <- ""} + mtext(paste0(fields.name, " Weather Regimes for ", my.period[p], forecast.title," (",year.start,"-",year.end,")"), cex=5.5, font=2) + + # Centroid maps: + map.xpos <- 0 + map.width <- 0.46 + par(fig=c(map.xpos + 0.003, map.xpos + map.width - 0.003, 0.745, 0.925), new=TRUE) + PlotEquiMap2(rescale(map1,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map1), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, xlabel.dist=1.5, contours.lty="F1FF1F") + par(fig=c(map.xpos, map.xpos + map.width, 0.51, 0.69), new=TRUE) + PlotEquiMap2(rescale(map2,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map2), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F") + par(fig=c(map.xpos, map.xpos + map.width, 0.275, 0.455), new=TRUE) + PlotEquiMap2(rescale(map3,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map3), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F") + par(fig=c(map.xpos, map.xpos + map.width, 0.04, 0.22), new=TRUE) + PlotEquiMap2(rescale(map4,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map4), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F") + + ## # for the debug: + ## obs <- read.table("/scratch/Earth/ncortesi/RESILIENCE/Regimes/NAO_series.txt") + ## mes <- p + ## fre.obs <- obs$V3[seq(mes,12*35,12)] # get the ovserved freuency of the NAO + ## #plot(1981:2015,fre.obs,type="l") + ## #lines(1981:2015,fre1,type="l",col="red") + ## #lines(1981:2015,fre2,type="l",col="blue") + ## #lines(1981:2015,fre4,type="l",col="green") + ## cor1 <- cor(fre.obs, fre1) + ## cor2 <- cor(fre.obs, fre2) + ## cor3 <- cor(fre.obs, fre3) + ## cor4 <- cor(fre.obs, fre4) + ## round(c(cor1,cor2,cor3,cor4),2) + + # Legend centroid maps: + legend1.xpos <- 0.10 + legend1.width <- 0.30 + legend1.cex <- 2 + my.subset <- match(values.to.plot, my.brks) + par(fig=c(legend1.xpos, legend1.xpos + legend1.width, 0.719, 0.744), new=TRUE) # syntax fig=c(xmin, xmax, ymin, ymax) + ColorBar3(my.brks, cols=my.cols, vert=FALSE, cex=legend1.cex, subset=my.subset) + if(psl=="g500") {mtext(side=4," m", cex=2.5)} else {mtext(side=4," hPa", cex=legend1.cex)} + par(fig=c(legend1.xpos, legend1.xpos + legend1.width, 0.485, 0.508), new=TRUE) + ColorBar3(my.brks, cols=my.cols, vert=FALSE, cex=legend1.cex, subset=my.subset) + if(psl=="g500") {mtext(side=4," m", cex=2.5)} else {mtext(side=4," hPa", cex=legend1.cex)} + par(fig=c(legend1.xpos, legend1.xpos + legend1.width, 0.250, 0.273), new=TRUE) + ColorBar3(my.brks, cols=my.cols, vert=FALSE, cex=legend1.cex, subset=my.subset) + if(psl=="g500") {mtext(side=4," m", cex=2.5)} else {mtext(side=4," hPa", cex=legend1.cex)} + par(fig=c(legend1.xpos, legend1.xpos + legend1.width, 0.015, 0.038), new=TRUE) + ColorBar3(my.brks, cols=my.cols, vert=FALSE, cex=legend1.cex, subset=my.subset) + if(psl=="g500") {mtext(side=4," m", cex=2.5)} else {mtext(side=4," hPa", cex=legend1.cex)} + + # Subtitle centroid maps: + map.title.xpos <- 0.76 + map.title.width <- 0.2 + par(fig=c(map.title.xpos, map.title.xpos + map.title.width, 0.728, 0.729), new=TRUE) + mtext("year", cex=3) + par(fig=c(map.title.xpos, map.title.xpos + map.title.width, 0.494, 0.495), new=TRUE) + mtext("year", cex=3) + par(fig=c(map.title.xpos, map.title.xpos + map.title.width, 0.260, 0.261), new=TRUE) + mtext("year", cex=3) + par(fig=c(map.title.xpos, map.title.xpos + map.title.width, 0.026, 0.027), new=TRUE) + mtext("year", cex=3) + + # Title Centroid Maps: + title1.xpos <- 0.02 + title1.width <- 0.4 + par(fig=c(title1.xpos, title1.xpos + title1.width, 0.9305, 0.9355), new=TRUE) + mtext(paste0(regime1.name,": ", psl.name, " Anomaly"), font=2, cex=4) + par(fig=c(title1.xpos, title1.xpos + title1.width, 0.6955, 0.7005), new=TRUE) + mtext(paste0(regime2.name,": ", psl.name, " Anomaly"), font=2, cex=4) + par(fig=c(title1.xpos, title1.xpos + title1.width, 0.4605, 0.4655), new=TRUE) + mtext(paste0(regime3.name,": ", psl.name, " Anomaly"), font=2, cex=4) + par(fig=c(title1.xpos, title1.xpos + title1.width, 0.2255, 0.2305), new=TRUE) + mtext(paste0(regime4.name,": ", psl.name, " Anomaly"), font=2, cex=4) + + # Impact maps: + if(impact.data == TRUE && (composition == "simple" || composition == "impact")){ # it won't enter here never because we are already inside an if con composition="simple" + impact.xpos <- 0.47 + impact.width <- 0.26 + par(fig=c(impact.xpos, impact.xpos + impact.width, 0.745, 0.925), new=TRUE) + PlotEquiMap2(rescale(imp1[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=t(sig1[,EU] < 0.05), cex.lab=1.5) + par(fig=c(impact.xpos, impact.xpos + impact.width, 0.51, 0.69), new=TRUE) + PlotEquiMap2(rescale(imp2[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=t(sig2[,EU] < 0.05), cex.lab=1.5) + par(fig=c(impact.xpos, impact.xpos + impact.width, 0.275, 0.455), new=TRUE) + PlotEquiMap2(rescale(imp3[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=t(sig3[,EU] < 0.05), cex.lab=1.5) + par(fig=c(impact.xpos, impact.xpos + impact.width, 0.04, 0.22), new=TRUE) + PlotEquiMap2(rescale(imp4[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=t(sig4[,EU] < 0.05), cex.lab=1.5) + + # Title impact maps: + title2.xpos <- 0.42 + title2.width <- 0.35 + par(fig=c(title2.xpos, title2.xpos + title2.width, 0.935, 0.940), new=TRUE) + mtext(paste0(regime1.name, ": Impact on ", var.name.full[var.num]), font=2, cex=4) + par(fig=c(title2.xpos, title2.xpos + title2.width, 0.700, 0.705), new=TRUE) + mtext(paste0(regime2.name, ": Impact on ", var.name.full[var.num]), font=2, cex=4) + par(fig=c(title2.xpos, title2.xpos + title2.width, 0.465, 0.470), new=TRUE) + mtext(paste0(regime3.name, ": Impact on ", var.name.full[var.num]), font=2, cex=4) + par(fig=c(title2.xpos, title2.xpos + title2.width, 0.230, 0.235), new=TRUE) + mtext(paste0(regime4.name, ": Impact on ", var.name.full[var.num]), font=2, cex=4) + } + + # Frequency plots: + barplot.xpos <- 0.75 + barplot.width <- 0.25 + + par(fig=c(barplot.xpos, barplot.xpos + barplot.width, 0.745, 0.915), new=TRUE) + if(fields.name == rean.name) barplot.freq(100*fre1.NA, year.start, year.end, cex.y=2, cex.x=2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0)) + if(fields.name == forecast.name) barplot.freq.sim2(100*fre1.NA, 100*freMax1.NA, 100*freMin1.NA, 100*freObs1, year.start, year.end, cex.y=2, cex.x=2, freq.min=-2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0), col.line="gray20", cex.r=3, cex.mean=2, cex.obs=2) + + par(fig=c(barplot.xpos, barplot.xpos + barplot.width,0.510,0.680), new=TRUE) + if(fields.name == rean.name) barplot.freq(100*fre2.NA, year.start, year.end, cex.y=2, cex.x=2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0)) + if(fields.name == forecast.name) barplot.freq.sim2(100*fre2.NA, 100*freMax2.NA, 100*freMin2.NA, 100*freObs2, year.start, year.end, cex.y=2, cex.x=2, freq.min=-2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0), col.line="gray20", cex.r=3, cex.mean=2, cex.obs=2) + + par(fig=c(barplot.xpos, barplot.xpos + barplot.width,0.275,0.445), new=TRUE) + if(fields.name == rean.name) barplot.freq(100*fre3.NA, year.start, year.end, cex.y=2, cex.x=2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0)) + if(fields.name == forecast.name) barplot.freq.sim2(100*fre3.NA, 100*freMax3.NA, 100*freMin3.NA, 100*freObs3, year.start, year.end, cex.y=2, cex.x=2, freq.min=-2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0), col.line="gray20", cex.r=3, cex.mean=2, cex.obs=2) + + par(fig=c(barplot.xpos, barplot.xpos + barplot.width,0.040,0.210), new=TRUE) + if(fields.name == rean.name) barplot.freq(100*fre4.NA, year.start, year.end, cex.y=2, cex.x=2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0)) + if(fields.name == forecast.name) barplot.freq.sim2(100*fre4.NA, 100*freMax4.NA, 100*freMin4.NA, 100*freObs4, year.start, year.end, cex.y=2, cex.x=2, freq.min=-2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0), col.line="gray20", cex.r=3, cex.mean=2, cex.obs=2) + + # Title Frequency maps: + title3.xpos <- 0.75 + title3.width <-0.25 + par(fig=c(title3.xpos, title3.xpos + title3.width, 0.935, 0.940), new=TRUE) + mtext(paste0(regime1.name, " Frequency"), font=2, cex=4) # paste0 doesn't work inside an expression! + par(fig=c(title3.xpos, title3.xpos + title3.width, 0.700, 0.705), new=TRUE) + mtext(paste0(regime2.name, " Frequency"), font=2, cex=4) + par(fig=c(title3.xpos, title3.xpos + title3.width, 0.465, 0.470), new=TRUE) + mtext(paste0(regime3.name, " Frequency"), font=2, cex=4) + par(fig=c(title3.xpos, title3.xpos + title3.width, 0.230, 0.235), new=TRUE) + mtext(paste0(regime4.name, " Frequency"), font=2, cex=4) + + # % on Frequency plots: + symbol.xpos <- 0.735 + symbol.width <- 0.01 + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.82, 0.83), new=TRUE) + mtext("%", cex=3.3) + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.59, 0.60), new=TRUE) + mtext("%", cex=3.3) + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.35, 0.36), new=TRUE) + mtext("%", cex=3.3) + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.12, 0.13), new=TRUE) + mtext("%", cex=3.3) + + # mean frequency on Frequency plots: + if(mean.freq == TRUE){ + symbol.xpos <- 0.9 + symbol.width <- 0.1 + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.908, 0.918), new=TRUE) + mtext(bquote(bar(nu) == .(paste0(round(mean(100*fre1.NA),1),"%"))),cex=4.5) + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.673, 0.683), new=TRUE) + mtext(bquote(bar(nu) == .(paste0(round(mean(100*fre2.NA),1),"%"))),cex=4.5) + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.44, 0.45), new=TRUE) + mtext(bquote(bar(nu) == .(paste0(round(mean(100*fre3.NA),1),"%"))),cex=4.5) + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.203, 0.213), new=TRUE) + mtext(bquote(bar(nu) == .(paste0(round(mean(100*fre4.NA),1),"%"))),cex=4.5) + } + + # add corr between obs.time series and ensemble mean time series on Frequency plots: + if(fields.name == forecast.name){ + symbol.xpos <- 0.76 + symbol.width <- 0.1 + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.908, 0.918), new=TRUE) + sp.cor1 <- round(cor(fre1.NA,freObs1, use="complete.obs"),2) + mtext(paste0("r= ",sp.cor1), cex=4.5) + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.673, 0.683), new=TRUE) + sp.cor2 <- round(cor(fre2.NA,freObs2, use="complete.obs"),2) + mtext(paste0("r= ",sp.cor2), cex=4.5) + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.44, 0.45), new=TRUE) + sp.cor3 <- round(cor(fre3.NA,freObs3, use="complete.obs"),2) + mtext(paste0("r= ",sp.cor3), cex=4.5) + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.203, 0.213), new=TRUE) + sp.cor4 <- round(cor(fre4.NA,freObs4, use="complete.obs"),2) + mtext(paste0("r= ",sp.cor4), cex=4.5) + } + + # Legend 2: + if(fields.name == rean.name){ + legend2.xpos <- 0.48 + legend2.width <- 0.25 + legend2.cex <- 1.8 + my.subset2 <- match(values.to.plot2, my.brks.var) + par(fig=c(legend2.xpos, legend2.xpos + legend2.width, 0.719 + 0.001, 0.744), new=TRUE) + ColorBar3(my.brks.var, cols=my.cols.var, vert=FALSE, cex=legend2.cex, subset=my.subset2) + mtext(side=4,paste0(" ",var.unit[var.num]), cex=legend2.cex) + par(fig=c(legend2.xpos, legend2.xpos + legend2.width, 0.485, 0.508), new=TRUE) + ColorBar3(my.brks.var, cols=my.cols.var, vert=FALSE, cex=legend2.cex, subset=my.subset2) + mtext(side=4,paste0(" ",var.unit[var.num]), cex=legend2.cex) + par(fig=c(legend2.xpos, legend2.xpos + legend2.width, 0.250, 0.273), new=TRUE) + ColorBar3(my.brks.var, cols=my.cols.var, vert=FALSE, cex=legend2.cex, subset=my.subset2) + mtext(side=4,paste0(" ",var.unit[var.num]), cex=legend2.cex) + par(fig=c(legend2.xpos, legend2.xpos + legend2.width, 0.015, 0.038), new=TRUE) + ColorBar3(my.brks.var, cols=my.cols.var, vert=FALSE, cex=legend2.cex, subset=my.subset2) + mtext(side=4,paste0(" ",var.unit[var.num]), cex=legend2.cex) + } + + if(!as.pdf) dev.off() # for saving 4 png + + } # close if on: composition == 'simple' + + + # Visualize the composition with the regime anomalies for a selected forecasted month: + if(composition == "psl"){ + # Centroid maps: + n.map <- n.map + 1 # n.maps starts from 0 + map.width <- 0.115 + map.xpos <- 0.06 + map.width * (n.map - 1) + map.ypos <- 0.08 + map.height <- 0.19 + map.ysep <- 0.015 + + par(fig=c(map.xpos, map.xpos + map.width, map.ypos + 3*map.height + 3*map.ysep, map.ypos + 4*map.height + 3*map.ysep), new=TRUE) + PlotEquiMap2(rescale(map1,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map1), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, xlabel.dist=1.5, contours.lty="F1FF1F") + par(fig=c(map.xpos, map.xpos + map.width, map.ypos + 2*map.height + 2*map.ysep, map.ypos + 3*map.height + 2*map.ysep), new=TRUE) + PlotEquiMap2(rescale(map2,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map2), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F") + par(fig=c(map.xpos, map.xpos + map.width, map.ypos + map.height + map.ysep, map.ypos + 2*map.height + map.ysep), new=TRUE) + PlotEquiMap2(rescale(map3,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map3), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F") + par(fig=c(map.xpos, map.xpos + map.width, map.ypos, map.ypos + map.height), new=TRUE) + PlotEquiMap2(rescale(map4,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map4), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F") + + # Sheet subtitles: + par(fig=c(map.xpos, map.xpos + map.width, 0.86, 0.90), new=TRUE) + if(n.map < 8) { + mtext(paste0(my.month.short[p], " --> ", my.month.short[forecast.month]), cex=5.5, font=2) + } else{ + mtext(paste0(rean.name," ", my.month.short[forecast.month]), cex=5.5, font=2) + } + + } # close if on composition == 'psl' + + # Visualize the composition if the impact maps for a selected forecasted month: + if(composition == "impact"){ + # Centroid maps: + n.map <- n.map + 1 # n.maps starts from 0 + map.width <- 0.115 + map.xpos <- 0.06 + map.width * (n.map - 1) + map.ypos <- 0.08 + map.height <- 0.19 + map.ysep <- 0.015 + + par(fig=c(map.xpos, map.xpos + map.width, map.ypos + 3*map.height + 3*map.ysep, map.ypos + 4*map.height + 3*map.ysep), new=TRUE) + PlotEquiMap2(rescale(imp1[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=t(sig1[,EU] < 0.05), cex.lab=1.5) + + par(fig=c(map.xpos, map.xpos + map.width, map.ypos + 2*map.height + 2*map.ysep, map.ypos + 3*map.height + 2*map.ysep), new=TRUE) + PlotEquiMap2(rescale(imp2[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=t(sig2[,EU] < 0.05), cex.lab=1.5) + + par(fig=c(map.xpos, map.xpos + map.width, map.ypos + map.height + map.ysep, map.ypos + 2*map.height + map.ysep), new=TRUE) + PlotEquiMap2(rescale(imp3[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=t(sig3[,EU] < 0.05), cex.lab=1.5) + + par(fig=c(map.xpos, map.xpos + map.width, map.ypos, map.ypos + map.height), new=TRUE) + PlotEquiMap2(rescale(imp4[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=t(sig4[,EU] < 0.05), cex.lab=1.5) + + + # Sheet subtitles: + par(fig=c(map.xpos, map.xpos + map.width, 0.86, 0.90), new=TRUE) + if(n.map < 8) { + mtext(paste0(my.month.short[p], " --> ", my.month.short[forecast.month]), cex=5.5, font=2) + } else{ + mtext(paste0(rean.name," ", my.month.short[forecast.month]), cex=5.5, font=2) + } + + } # close if on composition == 'impact' + + # Visualize the composition if the impact maps for a selected forecasted month: + if(composition == "single.impact"){ + png(filename=paste0(rean.dir,"/",fields.name,"_",my.period[p],"_",var.name[var.num],"_impact_NAO+.png"),width=3000,height=3700) + PlotEquiMap2(rescale(imp1[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=t(sig1[,EU] < 0.05), cex.lab=1.5) + dev.off() + + png(filename=paste0(rean.dir,"/",fields.name,"_",my.period[p],"_",var.name[var.num],"_impact_NAO-.png"),width=3000,height=3700) + PlotEquiMap2(rescale(imp2[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=t(sig2[,EU] < 0.05), cex.lab=1.5) + dev.off() + + png(filename=paste0(rean.dir,"/",fields.name,"_",my.period[p],"_",var.name[var.num],"_impact_Blocking.png"),width=3000,height=3700) + PlotEquiMap2(rescale(imp3[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=t(sig3[,EU] < 0.05), cex.lab=1.5) + dev.off() + + png(filename=paste0(rean.dir,"/",fields.name,"_",my.period[p],"_",var.name[var.num],"_impact_Atl_Ridge.png"),width=3000,height=3700) + PlotEquiMap2(rescale(imp4[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=t(sig4[,EU] < 0.05), cex.lab=1.5) + dev.off() + } + + # Visualize the composition if the impact maps for a selected forecasted month: + if(composition == "single.psl"){ + png(filename=paste0(rean.dir,"/",fields.name,"_",my.period[p],"_",var.name[var.num],"_pattern_cluster1.png"),width=2000,height=1400, res=300) + PlotEquiMap2(rescale(map1,my.brks[1],tail(my.brks,1)), lon, lat,filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1, contours=t(map1), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=0.5, xlabel.dist=0, contours.lty="F1FF1F", toptitle=paste0(my.period[p]," WithinSS=", round(withinss1/1000000,2))) + dev.off() + + png(filename=paste0(rean.dir,"/",fields.name,"_",my.period[p],"_",var.name[var.num],"_pattern_cluster2.png"),width=2000,height=1400, res=300) + PlotEquiMap2(rescale(map2,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1, contours=t(map2), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=0.5, xlabel.dist=0, contours.lty="F1FF1F", toptitle=paste0(my.period[p]," WithinSS=", round(withinss2/1000000,2))) + dev.off() + + png(filename=paste0(rean.dir,"/",fields.name,"_",my.period[p],"_",var.name[var.num],"_pattern_cluster3.png"),width=2000,height=1400, res=300) + PlotEquiMap2(rescale(map3,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1, contours=t(map3), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=0.5, xlabel.dist=0, contours.lty="F1FF1F", toptitle=paste0(my.period[p]," WithinSS=", round(withinss3/1000000,2))) + dev.off() + + png(filename=paste0(rean.dir,"/",fields.name,"_",my.period[p],"_",var.name[var.num],"_pattern_cluster4.png"),width=2000,height=1400, res=300) + PlotEquiMap2(rescale(map4,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1, contours=t(map4), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=0.5, xlabel.dist=0, contours.lty="F1FF1F", toptitle=paste0(my.period[p]," WithinSS=", round(withinss4/1000000,2))) + dev.off() + + # Legend centroid maps: + #my.subset <- match(values.to.plot, my.brks) + #ColorBar3(my.brks, cols=my.cols, vert=FALSE, cex=legend1.cex, subset=my.subset) + #mtext(side=4," hPa", cex=legend1.cex) + + } + + # Visualize the composition with the interannual frequencies for a selected forecasted month: + if(composition == "fre"){ + # Centroid maps: + n.map <- n.map + 1 # n.maps starts from 0 + map.width <- 0.115 + map.xpos <- 0.06 + map.width * (n.map - 1) + map.ypos <- 0.08 + map.height <- 0.19 + map.ysep <- 0.015 + + par(fig=c(map.xpos, map.xpos + map.width, map.ypos + 3*map.height + 3*map.ysep, map.ypos + 4*map.height + 3*map.ysep), new=TRUE) + if(n.map < 8) barplot.freq.sim2(100*fre1.NA, 100*freMax1.NA, 100*freMin1.NA, 100*freObs1, year.start, year.end, cex.y=2, cex.x=2, freq.min=-2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0), col.line="gray20", cex.r=3, cex.mean=2, cex.obs=2) + if(n.map == 8) barplot.freq(100*fre1.NA, year.start, year.end, cex.y=2, cex.x=2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0)) + + par(fig=c(map.xpos, map.xpos + map.width, map.ypos + 2*map.height + 2*map.ysep, map.ypos + 3*map.height + 2*map.ysep), new=TRUE) + if(n.map < 8) if(fields.name == forecast.name) barplot.freq.sim2(100*fre2.NA, 100*freMax2.NA, 100*freMin2.NA, 100*freObs2, year.start, year.end, cex.y=2, cex.x=2, freq.min=-2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0), col.line="gray20", cex.r=3, cex.mean=2, cex.obs=2) + if(n.map == 8) barplot.freq(100*fre2.NA, year.start, year.end, cex.y=2, cex.x=2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0)) + + par(fig=c(map.xpos, map.xpos + map.width, map.ypos + map.height + map.ysep, map.ypos + 2*map.height + map.ysep), new=TRUE) + if(n.map < 8) if(fields.name == forecast.name) barplot.freq.sim2(100*fre3.NA, 100*freMax3.NA, 100*freMin3.NA, 100*freObs3, year.start, year.end, cex.y=2, cex.x=2, freq.min=-2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0), col.line="gray20", cex.r=3, cex.mean=2, cex.obs=2) + if(n.map == 8) barplot.freq(100*fre3.NA, year.start, year.end, cex.y=2, cex.x=2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0)) + + par(fig=c(map.xpos, map.xpos + map.width, map.ypos, map.ypos + map.height), new=TRUE) + if(n.map < 8) if(fields.name == forecast.name) barplot.freq.sim2(100*fre4.NA, 100*freMax4.NA, 100*freMin4.NA, 100*freObs4, year.start, year.end, cex.y=2, cex.x=2, freq.min=-2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0), col.line="gray20", cex.r=3, cex.mean=2, cex.obs=2) + if(n.map == 8) barplot.freq(100*fre4.NA, year.start, year.end, cex.y=2, cex.x=2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0)) + + # Sheet subtitles: + par(fig=c(map.xpos, map.xpos + map.width, 0.86, 0.90), new=TRUE) + if(n.map < 8) { + mtext(paste0(my.month.short[p], " --> ", my.month.short[forecast.month]), cex=5.5, font=2) + } else{ + mtext(paste0(rean.name," ", my.month.short[forecast.month]), cex=5.5, font=2) + } + + # % on Frequency plots: + par(fig=c(map.xpos - 0.006, map.xpos, map.ypos + 3.9*map.height + 3*map.ysep, map.ypos + 4*map.height + 3*map.ysep), new=TRUE) + mtext("%", cex=3.3) + par(fig=c(map.xpos - 0.006, map.xpos, map.ypos + 2.9*map.height + 2*map.ysep, map.ypos + 3*map.height + 2*map.ysep), new=TRUE) + mtext("%", cex=3.3) + par(fig=c(map.xpos - 0.006, map.xpos, map.ypos + 1.9*map.height + 1*map.ysep, map.ypos + 2*map.height + 1*map.ysep), new=TRUE) + mtext("%", cex=3.3) + par(fig=c(map.xpos - 0.006, map.xpos, map.ypos + 0.9*map.height + 0*map.ysep, map.ypos + 1*map.height + 0*map.ysep), new=TRUE) + mtext("%", cex=3.3) + + # mean frequency on Frequency plots: + par(fig=c(map.xpos + 0.02, map.xpos + 0.04, map.ypos + 3*map.height + 3*map.ysep, map.ypos + 3.1*map.height + 3*map.ysep), new=TRUE) + mtext(bquote(bar(nu) == .(paste0(round(mean(100*fre1.NA),1),"%"))),cex=3.5) + par(fig=c(map.xpos + 0.02, map.xpos + 0.04, map.ypos + 2*map.height + 2*map.ysep, map.ypos + 2.1*map.height + 2*map.ysep), new=TRUE) + mtext(bquote(bar(nu) == .(paste0(round(mean(100*fre2.NA),1),"%"))),cex=3.5) + par(fig=c(map.xpos + 0.02, map.xpos + 0.04, map.ypos + 1*map.height + 1*map.ysep, map.ypos + 1.1*map.height + 1*map.ysep), new=TRUE) + mtext(bquote(bar(nu) == .(paste0(round(mean(100*fre3.NA),1),"%"))),cex=3.5) + par(fig=c(map.xpos + 0.02, map.xpos + 0.04, map.ypos + 0*map.height + 0*map.ysep, map.ypos + 0.1*map.height + 0*map.ysep), new=TRUE) + mtext(bquote(bar(nu) == .(paste0(round(mean(100*fre4.NA),1),"%"))),cex=3.5) + + # add corr between obs.time series and ensemble mean time series on Frequency plots: + if(n.map < 8){ + par(fig=c(map.xpos + map.width - 0.04, map.xpos + map.width - 0.02, map.ypos + 3*map.height + 3*map.ysep, map.ypos + 3.1*map.height + 3*map.ysep), new=TRUE) + mtext(paste0("r= ",sp.cor1), cex=3.5) + par(fig=c(map.xpos + map.width - 0.04, map.xpos + map.width - 0.02, map.ypos + 2*map.height + 2*map.ysep, map.ypos + 2.1*map.height + 2*map.ysep), new=TRUE) + mtext(paste0("r= ",sp.cor2), cex=3.5) + par(fig=c(map.xpos + map.width - 0.04, map.xpos + map.width - 0.02, map.ypos + 1*map.height + 1*map.ysep, map.ypos + 1.1*map.height + 1*map.ysep), new=TRUE) + mtext(paste0("r= ",sp.cor3), cex=3.5) + par(fig=c(map.xpos + map.width - 0.04, map.xpos + map.width - 0.02, map.ypos + 0*map.height + 0*map.ysep, map.ypos + 0.1*map.height + 0*map.ysep), new=TRUE) + mtext(paste0("r= ",sp.cor4), cex=3.5) + } + } # close if on composition == 'fre' + + # save indicators for each startdate and forecast time: + # (map1, map2, map3, map4, fre1, fre2, etc. already refer to the regimes in the same order as listed in the 'orden' vector) + if(fields.name == forecast.name) { + if (composition != "impact") { + save(orden, map1, map2, map3, map4, fre1.NA, fre2.NA, fre3.NA, fre4.NA, freObs1, freObs2, freObs3, freObs4, sp.cor1, sp.cor2, sp.cor3, sp.cor4, persist1, persist2, persist3, persist4, persistObs1, persistObs2, persistObs3, persistObs4, spat.cor1, spat.cor2, spat.cor3, spat.cor4, sd.freq.sim1, sd.freq.sim2, sd.freq.sim3, sd.freq.sim4, sd.freq.obs1, sd.freq.obs2, sd.freq.obs3, sd.freq.obs4, transSim1, transSim2, transSim3, transSim4, transObs1, transObs2, transObs3, transObs4, file=paste0(work.dir,"/",fields.name,"_", my.period[p],"_leadmonth_",lead.month,"_","ClusterNames",".RData")) + } else { # also save impX objects + save(orden, map1, map2, map3, map4, fre1.NA, fre2.NA, fre3.NA, fre4.NA, freObs1, freObs2, freObs3, freObs4, sp.cor1, sp.cor2, sp.cor3, sp.cor4, persist1, persist2, persist3, persist4, persistObs1, persistObs2, persistObs3, persistObs4, spat.cor1, spat.cor2, spat.cor3, spat.cor4, sd.freq.sim1, sd.freq.sim2, sd.freq.sim3, sd.freq.sim4, sd.freq.obs1, sd.freq.obs2, sd.freq.obs3, sd.freq.obs4, transSim1, transSim2, transSim3, transSim4, transObs1, transObs2, transObs3, transObs4, imp1, imp2, imp3, imp4, file=paste0(work.dir,"/",fields.name,"_", my.period[p],"_leadmonth_",lead.month,"_","ClusterNames",".RData")) + } + } + + } # close 'p' on 'period' + + if(as.pdf) dev.off() # for saving a single pdf with all months/seasons + + } # close 'lead.month' on 'lead.months' + + if(composition == "psl" || composition == "fre"){ + # Regime's names: + title1.xpos <- 0.01 + title1.width <- 0.04 + par(fig=c(title1.xpos, title1.xpos + title1.width, 0.7905, 0.7955), new=TRUE) + mtext(paste0(regime1.name), font=2, cex=5) + par(fig=c(title1.xpos, title1.xpos + title1.width, 0.5855, 0.5905), new=TRUE) + mtext(paste0(regime2.name), font=2, cex=5) + par(fig=c(title1.xpos, title1.xpos + title1.width, 0.3805, 0.3955), new=TRUE) + mtext(paste0(regime3.name), font=2, cex=5) + par(fig=c(title1.xpos, title1.xpos + title1.width, 0.1755, 0.1805), new=TRUE) + mtext(paste0(regime4.name), font=2, cex=5) + + if(composition == "psl") { + # Color legend: + legend1.xpos <- 0.10 + legend1.width <- 0.80 + legend1.cex <- 4 + + par(fig=c(legend1.xpos, legend1.xpos + legend1.width, 0, 0.065), new=TRUE) # syntax fig=c(xmin, xmax, ymin, ymax) + ColorBar2(brks=my.brks, cols=my.cols, vert=FALSE, cex=legend1.cex, label.dist=2, my.ticks=-0.5 + 1:21, my.labels=my.brks) + par(fig=c(legend1.xpos + legend1.width - 0.02, legend1.xpos + legend1.width + 0.05, 0, 0.02), new=TRUE) # syntax fig=c(xmin, xmax, ymin, ymax) + mtext("hPa", cex=legend1.cex*1.3) + } + + dev.off() + } # close if on composition + + print("Finished!") +} # close if on composition != "summary" + + +if(composition == "taylor"){ + library("plotrix") + + # choose a reference season from 13 (winter) to 16 (Autumn): + season.ref <- 13 + + # set the first WR classification: + rean.dir <- "/scratch/Earth/ncortesi/RESILIENCE/Regimes/41_ERA-Interim_monthly_1981-2015_LOESS_filter" + + # load data of the reference season of the first classification (this line should be modified to load the chosen season): + #load("/scratch/Earth/ncortesi/RESILIENCE/Regimes/18) as 12) but with no lat correction/ERA-Interim_Winter_psl.RData") # load pslwrXmean data + #load("/scratch/Earth/ncortesi/RESILIENCE/Regimes/18) as 12) but with no lat correction/ERA-Interim_Winter_ClusterNames.RData") # load clusterX.name.period + load("/scratch/Earth/ncortesi/RESILIENCE/Regimes/46_as_12_but_with_no_lat_correction_and_with_LOESS/ERA-Interim_Winter_psl.RData") # load pslwrXmean data + load("/scratch/Earth/ncortesi/RESILIENCE/Regimes/46_as_12_but_with_no_lat_correction_and_with_LOESS/ERA-Interim_Winter_ClusterNames.RData") # load clusterX.name.period + + if(var.num != var.num.orig) var.num <- var.num.orig # ripristinate original values of this script (necessary after loading regime data) + if(fields.name != fields.name.orig) fields.name <- fields.name.orig # ripristinate original values of this script + + cluster1.ref <- which(orden == cluster1.name) + cluster2.ref <- which(orden == cluster2.name) + cluster3.ref <- which(orden == cluster3.name) + cluster4.ref <- which(orden == cluster4.name) + + #cluster1.ref <- cluster1.name.period[13] + #cluster2.ref <- cluster2.name.period[13] + #cluster3.ref <- cluster3.name.period[13] + #cluster4.ref <- cluster4.name.period[13] + + cluster1.ref <- 3 # bypass the previous values because for dataset num.46 the blocking and atlantic regimes were saved swapped! + cluster2.ref <- 2 + cluster3.ref <- 1 + cluster4.ref <- 4 + + assign(paste0("map.ref",cluster1.ref), pslwr1mean) # NAO+ + assign(paste0("map.ref",cluster2.ref), pslwr2mean) # NAO- + assign(paste0("map.ref",cluster3.ref), pslwr3mean) # Blocking + assign(paste0("map.ref",cluster4.ref), pslwr4mean) # Atl.ridge + + # set a second WR classification (i.e: with running cluster) to contrast with the new one: + #rean2.dir <- "/scratch/Earth/ncortesi/RESILIENCE/Regimes/41_ERA-Interim_monthly_1981-2015_LOESS_filter" + rean2.dir <- "/scratch/Earth/ncortesi/RESILIENCE/Regimes/44_as_43_but_with_no_lat_correction" + + # load data of the reference season of the second classification (this line should be modified to load the chosen season): + load("/scratch/Earth/ncortesi/RESILIENCE/Regimes/46_as_12_but_with_no_lat_correction_and_with_LOESS/ERA-Interim_Winter_psl.RData") # load pslwrXmean data + load("/scratch/Earth/ncortesi/RESILIENCE/Regimes/46_as_12_but_with_no_lat_correction_and_with_LOESS/ERA-Interim_Winter_ClusterNames.RData") # load clusterX.name.period + + if(var.num != var.num.orig) var.num <- var.num.orig # ripristinate original values of this script (necessary after loading regime data) + if(fields.name != fields.name.orig) fields.name <- fields.name.orig # ripristinate original values of this script + + #cluster1.name.ref <- cluster1.name.period[season.ref] + #cluster2.name.ref <- cluster2.name.period[season.ref] + #cluster3.name.ref <- cluster3.name.period[season.ref] + #cluster4.name.ref <- cluster4.name.period[season.ref] + + cluster1.ref <- which(orden == cluster1.name) + cluster2.ref <- which(orden == cluster2.name) + cluster3.ref <- which(orden == cluster3.name) + cluster4.ref <- which(orden == cluster4.name) + + cluster1.ref <- 3 # bypass the previous values because for dataset num.46 the blocking and atlantic regimes were saved swapped! + cluster2.ref <- 2 + cluster3.ref <- 1 + cluster4.ref <- 4 + + assign(paste0("map.old.ref",cluster1.ref), pslwr1mean) # NAO+ + assign(paste0("map.old.ref",cluster2.ref), pslwr2mean) # NAO- + assign(paste0("map.old.ref",cluster3.ref), pslwr3mean) # Blocking + assign(paste0("map.old.ref",cluster4.ref), pslwr4mean) # Atl.ridge + + + # breaks and colors of the geopotential fields: + if(psl == "g500"){ + my.brks <- c(-300,seq(-200,200,10),300) # % Mean anomaly of a WR + my.brks2 <- c(-300,seq(-200,200,10),300) # % Mean anomaly of a WR for the contour lines + values.to.plot <- c(-200, -100, 0, 100, 200) # values we want to show in the labels + } + + if(psl == "psl"){ + my.brks <- c(seq(-19,-1,2),0,seq(1,19,2)) # % Mean anomaly of a WR + my.brks2 <- my.brks #c(seq(-20,20,2)) # % Mean anomaly of a WR for the contour lines + values.to.plot <- my.brks #c(-17,-15,-13,-11,-9,-7,-5,-3,-1,0,1,3,5,7,9,11,13,15,17) + } + + my.cols <- colorRampPalette(rev(brewer.pal(11,"RdBu")))(length(my.brks)-1) # blue--white--red colors + my.cols[floor(length(my.cols)/2)] <- my.cols[floor(length(my.cols)/2)+1] <- "white" + + # plot the composition with the regime anomalies of each monthly regimes: + png(filename=paste0(rean.dir,"/",fields.name,"_taylor_source",".png"),width=6000,height=2000) + + plot.new() + + # Sheet title: + par(fig=c(0, 1, 0.94, 0.98), new=TRUE) + mtext(paste0(fields.name, " observed monthly regime anomalies"), cex=5.5, font=2) + + n.map <- 0 + for(p in c(9:12,1:8)){ + p.orig <- p + + load(file=paste0(rean.dir,"/",fields.name,"_",my.period[p],"_","psl",".RData")) # load cluster names and summarized data + load(file=paste0(rean.dir,"/",fields.name,"_",my.period[p],"_","ClusterNames",".RData")) # load cluster names and summarized data + + if(var.num != var.num.orig) var.num <- var.num.orig # ripristinate original values of this script (necessary after loading regime data) + if(fields.name != fields.name.orig) fields.name <- fields.name.orig # ripristinate original values of this script + if(p != p.orig) p <- p.orig + + cluster1 <- which(orden == cluster1.name) + cluster2 <- which(orden == cluster2.name) + cluster3 <- which(orden == cluster3.name) + cluster4 <- which(orden == cluster4.name) + + assign(paste0("map",cluster1), pslwr1mean) # NAO+ + assign(paste0("map",cluster2), pslwr2mean) # NAO- + assign(paste0("map",cluster3), pslwr3mean) # Blocking + assign(paste0("map",cluster4), pslwr4mean) # Atl.ridge + + n.map <- n.map + 1 # n.maps starts from 0 + map.width <- 0.07 + map.xpos <- 0.06 + map.width * (n.map - 1) + map.ypos <- 0.08 + map.height <- 0.19 + map.ysep <- 0.015 + + par(fig=c(map.xpos, map.xpos + map.width, map.ypos + 3*map.height + 3*map.ysep, map.ypos + 4*map.height + 3*map.ysep), new=TRUE) + PlotEquiMap2(rescale(map1,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map1), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, xlabel.dist=1.5, contours.lty="F1FF1F") + par(fig=c(map.xpos, map.xpos + map.width, map.ypos + 2*map.height + 2*map.ysep, map.ypos + 3*map.height + 2*map.ysep), new=TRUE) + PlotEquiMap2(rescale(map2,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map2), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F") + par(fig=c(map.xpos, map.xpos + map.width, map.ypos + map.height + map.ysep, map.ypos + 2*map.height + map.ysep), new=TRUE) + PlotEquiMap2(rescale(map3,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map3), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F") + par(fig=c(map.xpos, map.xpos + map.width, map.ypos, map.ypos + map.height), new=TRUE) + PlotEquiMap2(rescale(map4,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map4), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F") + + # Sheet subtitles: + par(fig=c(map.xpos, map.xpos + map.width, 0.86, 0.90), new=TRUE) + mtext(my.month.short[p], cex=5.5, font=2) + + } + + # draw the last map to the right of the composition, that with the seasonal anomalies: + n.map <- n.map + 1 # n.maps starts from 0 + map.width <- 0.07 + map.xpos <- 0.06 + map.width * (n.map - 1) + map.ypos <- 0.08 + map.height <- 0.19 + map.ysep <- 0.015 + + par(fig=c(map.xpos, map.xpos + map.width, map.ypos + 3*map.height + 3*map.ysep, map.ypos + 4*map.height + 3*map.ysep), new=TRUE) + PlotEquiMap2(rescale(map.ref1,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map.ref1), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, xlabel.dist=1.5, contours.lty="F1FF1F") + par(fig=c(map.xpos, map.xpos + map.width, map.ypos + 2*map.height + 2*map.ysep, map.ypos + 3*map.height + 2*map.ysep), new=TRUE) + PlotEquiMap2(rescale(map.ref2,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map.ref2), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F") + par(fig=c(map.xpos, map.xpos + map.width, map.ypos + map.height + map.ysep, map.ypos + 2*map.height + map.ysep), new=TRUE) + PlotEquiMap2(rescale(map.ref3,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map.ref3), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F") + par(fig=c(map.xpos, map.xpos + map.width, map.ypos, map.ypos + map.height), new=TRUE) + PlotEquiMap2(rescale(map.ref4,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map.ref4), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F") + + # subtitle: + par(fig=c(map.xpos, map.xpos + map.width, 0.86, 0.90), new=TRUE) + mtext(my.period[season.ref], cex=5.5, font=2) + + dev.off() + + corr.first <- corr.second <- rmse.first <- rmse.second <- array(NA,c(4,12)) + + # Plot the Taylor diagrams: + for(regime in 1:4){ + + png(filename=paste0(rean.dir,"/",fields.name,"_taylor_",orden[regime],".png"), width=1200, height=800) + + for(p in 1:12){ + p.orig <- p + + load(file=paste0(rean.dir,"/",fields.name,"_",my.period[p],"_","psl",".RData")) # load cluster names and summarized data + load(file=paste0(rean.dir,"/",fields.name,"_",my.period[p],"_","ClusterNames",".RData")) # load cluster names and summarized data + + if(var.num != var.num.orig) var.num <- var.num.orig # ripristinate original values of this script (necessary after loading regime data) + if(fields.name != fields.name.orig) fields.name <- fields.name.orig # ripristinate original values of this script + if(p != p.orig) p <- p.orig + + cluster1 <- which(orden == cluster1.name) + cluster2 <- which(orden == cluster2.name) + cluster3 <- which(orden == cluster3.name) + cluster4 <- which(orden == cluster4.name) + + assign(paste0("map",cluster1), pslwr1mean) # NAO+ + assign(paste0("map",cluster2), pslwr2mean) # NAO- + assign(paste0("map",cluster3), pslwr3mean) # Blocking + assign(paste0("map",cluster4), pslwr4mean) # Atl.ridge + + # load data from the second classification: + load(file=paste0(rean2.dir,"/",fields.name,"_",my.period[p],"_","psl",".RData")) # load cluster names and summarized data + load(file=paste0(rean2.dir,"/",fields.name,"_",my.period[p],"_","ClusterNames",".RData")) # load cluster names and summarized data + + if(var.num != var.num.orig) var.num <- var.num.orig # ripristinate original values of this script (necessary after loading regime data) + if(fields.name != fields.name.orig) fields.name <- fields.name.orig # ripristinate original values of this script + if(p != p.orig) p <- p.orig + + cluster1.old <- which(orden == cluster1.name) + cluster2.old <- which(orden == cluster2.name) + cluster3.old <- which(orden == cluster3.name) + cluster4.old <- which(orden == cluster4.name) + + assign(paste0("map.old",cluster1.old), pslwr1mean) # NAO+ + assign(paste0("map.old",cluster2.old), pslwr2mean) # NAO- + assign(paste0("map.old",cluster3.old), pslwr3mean) # Blocking + assign(paste0("map.old",cluster4.old), pslwr4mean) # Atl.ridge + + add.mod <- ifelse(p == 1, FALSE, TRUE) + main.mod <- ifelse(p == 1, orden[regime],"") + + color.first <- "red" + color.second <- "blue" + sd.arcs.mod <- c(0,0.1,0.2,0.3,0.4,0.5,0.6,0.7,0.8,0.9,1,1.1,1.2,1.3) #c(0,0.5,1,1.5) # doesn't work! + if(regime == 1) my.taylor(as.vector(map.ref1),as.vector(map1), normalize=TRUE, add=add.mod, pos.cor=FALSE, sd.arcs=sd.arcs.mod, ref.sd=F, cex.axis=1.7, text.cex=1, my.text=my.month.short[p], col = color.first, main=main.mod) + if(regime == 2) my.taylor(as.vector(map.ref2),as.vector(map2), normalize=TRUE, add=add.mod, pos.cor=FALSE, sd.arcs=sd.arcs.mod, ref.sd=F, cex.axis=1.7, text.cex=1, my.text=my.month.short[p], col = color.first, main=main.mod) + if(regime == 3) my.taylor(as.vector(map.ref3),as.vector(map3), normalize=TRUE, add=add.mod, pos.cor=FALSE, sd.arcs=sd.arcs.mod, ref.sd=F, cex.axis=1.7, text.cex=1, my.text=my.month.short[p], col = color.first, main=main.mod) + if(regime == 4) my.taylor(as.vector(map.ref4),as.vector(map4), normalize=TRUE, add=add.mod, pos.cor=FALSE, sd.arcs=sd.arcs.mod, ref.sd=F, cex.axis=1.7, text.cex=1, my.text=my.month.short[p], col = color.first, main=main.mod) + + # add the points from the second classification: + add.mod=TRUE + if(regime == 1) my.taylor(as.vector(map.old.ref1),as.vector(map.old1), normalize=TRUE, add=add.mod, pos.cor=FALSE, sd.arcs=sd.arcs.mod, ref.sd=F, cex.axis=1.7, text.cex=1, my.text=my.month.short[p], col = color.second) + if(regime == 2) my.taylor(as.vector(map.old.ref2),as.vector(map.old2), normalize=TRUE, add=add.mod, pos.cor=FALSE, sd.arcs=sd.arcs.mod, ref.sd=F, cex.axis=1.7, text.cex=1, my.text=my.month.short[p], col = color.second) + if(regime == 3) my.taylor(as.vector(map.old.ref3),as.vector(map.old3), normalize=TRUE, add=add.mod, pos.cor=FALSE, sd.arcs=sd.arcs.mod, ref.sd=F, cex.axis=1.7, text.cex=1, my.text=my.month.short[p], col = color.second) + if(regime == 4) my.taylor(as.vector(map.old.ref4),as.vector(map.old4), normalize=TRUE, add=add.mod, pos.cor=FALSE, sd.arcs=sd.arcs.mod, ref.sd=F, cex.axis=1.7, text.cex=1, my.text=my.month.short[p], col = color.second) + + if(regime == 1) { corr.first[1,p] <- cor(as.vector(map.ref1),as.vector(map1)); rmse.first[1,p] <- RMSE(as.vector(map.ref1),as.vector(map1)) } + if(regime == 2) { corr.first[2,p] <- cor(as.vector(map.ref2),as.vector(map2)); rmse.first[2,p] <- RMSE(as.vector(map.ref2),as.vector(map2)) } + if(regime == 3) { corr.first[3,p] <- cor(as.vector(map.ref3),as.vector(map3)); rmse.first[3,p] <- RMSE(as.vector(map.ref3),as.vector(map3)) } + if(regime == 4) { corr.first[4,p] <- cor(as.vector(map.ref4),as.vector(map4)); rmse.first[4,p] <- RMSE(as.vector(map.ref4),as.vector(map4)) } + + if(regime == 1) { corr.second[1,p] <- cor(as.vector(map.old.ref1),as.vector(map.old1)); rmse.second[1,p] <- RMSE(as.vector(map.old.ref1),as.vector(map.old1)) } + if(regime == 2) { corr.second[2,p] <- cor(as.vector(map.old.ref2),as.vector(map.old2)); rmse.second[2,p] <- RMSE(as.vector(map.old.ref2),as.vector(map.old2)) } + if(regime == 3) { corr.second[3,p] <- cor(as.vector(map.old.ref3),as.vector(map.old3)); rmse.second[3,p] <- RMSE(as.vector(map.old.ref3),as.vector(map.old3)) } + if(regime == 4) { corr.second[4,p] <- cor(as.vector(map.old.ref4),as.vector(map.old4)); rmse.second[4,p] <- RMSE(as.vector(map.old.ref4),as.vector(map.old4)) } + + } # close for on 'p' + + dev.off() + + } # close for on 'regime' + + corr.first.mean <- apply(corr.first,1,mean) + corr.second.mean <- apply(corr.second,1,mean) + corr.diff <- corr.second.mean - corr.first.mean + + rmse.first.mean <- apply(rmse.first,1,mean) + rmse.second.mean <- apply(rmse.second,1,mean) + rmse.diff <- rmse.second.mean - rmse.first.mean + +} # close if on composition == "taylor" + + + + +# Summary graphs: +if(composition == "summary" && fields.name == forecast.name){ + # array storing correlations in the format: [startdate, leadmonth, regime] + array.diff.sd.freq <- array.sd.freq <- array.cor <- array.sp.cor <- array.freq <- array.diff.freq <- array.rpss <- array.pers <- array.diff.pers <- array(NA,c(12,7,4)) + array.spat.cor <- array.trans.sim1 <- array.trans.sim2 <- array.trans.sim3 <- array.trans.sim4 <- array(NA,c(12,7,4)) + array.trans.diff1 <- array.trans.diff2 <- array.trans.diff3 <- array.trans.diff4 <- array(NA,c(12,7,4)) + + for(p in 1:12){ + p.orig <- p + + for(l in 0:6){ + + load(file=paste0(work.dir,"/",fields.name,"_",my.period[p],"_leadmonth_",l,"_","ClusterNames",".RData")) # load cluster names and summarized data + + if(var.num != var.num.orig) var.num <- var.num.orig # ripristinate original values of this script (necessary after loading regime data) + if(fields.name != fields.name.orig) fields.name <- fields.name.orig # ripristinate original values of this script + if(p != p.orig) p <- p.orig + + array.spat.cor[p,1+l, 1] <- spat.cor1 # associates NAO+ to the first triangle (at left) + array.spat.cor[p,1+l, 3] <- spat.cor2 # associates NAO- to the third triangle (at right) + array.spat.cor[p,1+l, 4] <- spat.cor3 # associates Blocking to the fourth triangle (top one) + array.spat.cor[p,1+l, 2] <- spat.cor4 # associates Atl.Ridge to the second triangle (bottom one) + + array.cor[p,1+l, 1] <- sp.cor1 # associates NAO+ to the first triangle (at left) + array.cor[p,1+l, 3] <- sp.cor2 # associates NAO- to the third triangle (at right) + array.cor[p,1+l, 4] <- sp.cor3 # associates Blocking to the fourth triangle (top one) + array.cor[p,1+l, 2] <- sp.cor4 # associates Atl.Ridge to the second triangle (bottom one) + + array.freq[p,1+l, 1] <- 100*(mean(fre1.NA)) # NAO+ + array.freq[p,1+l, 3] <- 100*(mean(fre2.NA)) # NAO- + array.freq[p,1+l, 4] <- 100*(mean(fre3.NA)) # Blocking + array.freq[p,1+l, 2] <- 100*(mean(fre4.NA)) # Atl.Ridge + + array.diff.freq[p,1+l, 1] <- 100*(mean(fre1.NA) - mean(freObs1)) # NAO+ + array.diff.freq[p,1+l, 3] <- 100*(mean(fre2.NA) - mean(freObs2)) # NAO- + array.diff.freq[p,1+l, 4] <- 100*(mean(fre3.NA) - mean(freObs3)) # Blocking + array.diff.freq[p,1+l, 2] <- 100*(mean(fre4.NA) - mean(freObs4)) # Atl.Ridge + + array.pers[p,1+l, 1] <- persist1 # NAO+ + array.pers[p,1+l, 3] <- persist2 # NAO- + array.pers[p,1+l, 4] <- persist3 # Blocking + array.pers[p,1+l, 2] <- persist4 # Atl.Ridge + + array.diff.pers[p,1+l, 1] <- persist1 - persistObs1 # NAO+ + array.diff.pers[p,1+l, 3] <- persist2 - persistObs2 # NAO- + array.diff.pers[p,1+l, 4] <- persist3 - persistObs3 # Blocking + array.diff.pers[p,1+l, 2] <- persist4 - persistObs4 # Atl.Ridge + + array.pers[p,1+l, 1] <- persist1 # NAO+ + array.pers[p,1+l, 3] <- persist2 # NAO- + array.pers[p,1+l, 4] <- persist3 # Blocking + array.pers[p,1+l, 2] <- persist4 # Atl.Ridge + + array.sd.freq[p,1+l, 1] <- sd.freq.sim1 # NAO+ + array.sd.freq[p,1+l, 3] <- sd.freq.sim2 # NAO- + array.sd.freq[p,1+l, 4] <- sd.freq.sim3 # Blocking + array.sd.freq[p,1+l, 2] <- sd.freq.sim4 # Atl.Ridge + + array.diff.sd.freq[p,1+l, 1] <- sd.freq.sim1 / sd.freq.obs1 # NAO+ + array.diff.sd.freq[p,1+l, 3] <- sd.freq.sim2 / sd.freq.obs2 # NAO- + array.diff.sd.freq[p,1+l, 4] <- sd.freq.sim3 / sd.freq.obs3 # Blocking + array.diff.sd.freq[p,1+l, 2] <- sd.freq.sim4 / sd.freq.obs4 # Atl.Ridge + + array.trans.sim1[p,1+l, 1] <- NA # Transition from NAO+ to NAO+ + array.trans.sim1[p,1+l, 3] <- 100*transSim1[2] # Transition from NAO+ to NAO- + array.trans.sim1[p,1+l, 4] <- 100*transSim1[3] # Transition from NAO+ to blocking + array.trans.sim1[p,1+l, 2] <- 100*transSim1[4] # Transition from NAO+ to Atl.Ridge + + array.trans.sim2[p,1+l, 1] <- 100*transSim2[1] # Transition from NAO- to NAO+ + array.trans.sim2[p,1+l, 3] <- NA # Transition from NAO- to NAO- + array.trans.sim2[p,1+l, 4] <- 100*transSim2[3] # Transition from NAO- to blocking + array.trans.sim2[p,1+l, 2] <- 100*transSim2[4] # Transition from NAO- to Atl.Ridge + + array.trans.sim3[p,1+l, 1] <- 100*transSim3[1] # Transition from blocking to NAO+ + array.trans.sim3[p,1+l, 3] <- 100*transSim3[2] # Transition from blocking to NAO- + array.trans.sim3[p,1+l, 4] <- NA # Transition from blocking to blocking + array.trans.sim3[p,1+l, 2] <- 100*transSim3[4] # Transition from blocking to Atl.Ridge + + array.trans.sim4[p,1+l, 1] <- 100*transSim4[1] # Transition from Atl.ridge to NAO+ + array.trans.sim4[p,1+l, 3] <- 100*transSim4[2] # Transition from Atl.ridge to NAO- + array.trans.sim4[p,1+l, 4] <- 100*transSim4[3] # Transition from Atl.ridge to blocking + array.trans.sim4[p,1+l, 2] <- NA # Transition from Atl.ridge to Atl.Ridge + + array.trans.diff1[p,1+l, 1] <- NA # Transition from NAO+ to NAO+ + array.trans.diff1[p,1+l, 3] <- 100*(transSim1[2] - transObs1[2]) # Transition from NAO+ to NAO- + array.trans.diff1[p,1+l, 4] <- 100*(transSim1[3] - transObs1[3]) # Transition from NAO+ to blocking + array.trans.diff1[p,1+l, 2] <- 100*(transSim1[4] - transObs1[4]) # Transition from NAO+ to Atl.Ridge + + array.trans.diff2[p,1+l, 1] <- 100*(transSim2[1] - transObs2[1]) # Transition from NAO+ to NAO+ + array.trans.diff2[p,1+l, 3] <- NA + array.trans.diff2[p,1+l, 4] <- 100*(transSim2[3] - transObs2[3]) # Transition from NAO+ to blocking + array.trans.diff2[p,1+l, 2] <- 100*(transSim2[4] - transObs2[4]) # Transition from NAO+ to Atl.Ridge + + array.trans.diff3[p,1+l, 1] <- 100*(transSim3[1] - transObs3[1]) # Transition from NAO+ to NAO+ + array.trans.diff3[p,1+l, 3] <- 100*(transSim3[2] - transObs3[2]) # Transition from NAO+ to NAO- + array.trans.diff3[p,1+l, 4] <- NA + array.trans.diff3[p,1+l, 2] <- 100*(transSim3[4] - transObs3[4]) # Transition from NAO+ to Atl.Ridge + + array.trans.diff4[p,1+l, 1] <- 100*(transSim4[1] - transObs4[1]) # Transition from NAO+ to NAO+ + array.trans.diff4[p,1+l, 3] <- 100*(transSim4[2] - transObs4[2]) # Transition from NAO+ to NAO- + array.trans.diff4[p,1+l, 4] <- 100*(transSim4[3] - transObs4[3]) # Transition from NAO+ to blocking + array.trans.diff4[p,1+l, 2] <- NA + + #array.rpss[p,1+l, 1] <- # NAO+ + #array.rpss[p,1+l, 3] <- # NAO- + #array.rpss[p,1+l, 4] <- # Blocking + #array.rpss[p,1+l, 2] <- # Atl.Ridge + } + } + + # Spatial Corr summary: + png(filename=paste0(work.dir,"/",forecast.name,"_summary_spat_corr.png"), width=550, height=400) + + # create an array similar to array.cor but with colors instead of corr.values: + sp.corr.cols <- rev(c('#b2182b','#d6604d','#f4a582','#fddbc7','#d1e5f0','#92c5de','#4393c3','#2166ac')) + my.seq <- c(-1,0,0.4,0.5,0.6,0.7,0.8,0.9,1) + + array.sp.cor.colors <- array(sp.corr.cols[8],c(12,7,4)) + array.sp.cor.colors[array.spat.cor < my.seq[8]] <- sp.corr.cols[7] + array.sp.cor.colors[array.spat.cor < my.seq[7]] <- sp.corr.cols[6] + array.sp.cor.colors[array.spat.cor < my.seq[6]] <- sp.corr.cols[5] + array.sp.cor.colors[array.spat.cor < my.seq[5]] <- sp.corr.cols[4] + array.sp.cor.colors[array.spat.cor < my.seq[4]] <- sp.corr.cols[3] + array.sp.cor.colors[array.spat.cor < my.seq[3]] <- sp.corr.cols[2] + array.sp.cor.colors[array.spat.cor < my.seq[2]] <- sp.corr.cols[1] + + par(mar=c(5,4,4,4.5)) + plot(1,1, type="n", xaxt="n", yaxt="n", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + title("Spatial correlation between S4 and ERA-Interim regime anomalies", cex=1.5) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + + for(p in 1:12){ + for(l in 0:6){ + polygon(c(0.5+p-1, 0.5+p-1, 1+p-1), c(-0.5+l, 0.5+l, 0+l), col=array.sp.cor.colors[p,1+l,1]) # first triangle + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(-0.5+l, 0+l, -0.5+l), col=array.sp.cor.colors[p,1+l,2]) + polygon(c(1+p-1, 1.5+p-1, 1.5+p-1), c(l, 0.5+l, -0.5+l), col=array.sp.cor.colors[p,1+l,3]) + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(0.5+l, 0+l, 0.5+l), col=array.sp.cor.colors[p,1+l,4]) + } + } + + par(fig=c(0.875, 1, 0.14, 0.89), new=TRUE) + ColorBar2(brks = my.seq, cols = sp.corr.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + dev.off() + + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_spat_corr_4tables.png"), width=750, height=600) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext(text="Spatial correlation between S4 and ERA-Interim regime anomalies", cex=1.5) + + # NAO+ : + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.85, 0.98), new=TRUE) + mtext("NAO+", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.spat.cor.colors[p,1+l,1])}} + + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.42, 0.5), new=TRUE) + mtext("Blocking", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.spat.cor.colors[p,1+l,4])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.85, 0.98), new=TRUE) + mtext("NAO-", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.spat.cor.colors[p,1+l,3])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.42, 0.5), new=TRUE) + mtext("Atlantic Ridge", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.spat.cor.colors[p,1+l,2])}} + + par(fig=c(0.91, 1, 0.1, 0.9), new=TRUE) + ColorBar2(brks = my.seq, cols = sp.corr.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_spat_corr_1table.png"), width=800, height=300) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext(text="Spatial correlation between S4 and ERA-Interim regime anomalies", cex=1.2) + + par(mar=c(0,0,4,0), fig=c(0.07, 0.22, 0.8, 0.98), new=TRUE) + mtext("NAO+", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0, 0.22, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecast month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.spat.cor.colors[i2,1+6-j,1])}} + + par(mar=c(0,0,4,0), fig=c(0.22, 0.44, 0.8, 0.98), new=TRUE) + mtext("NAO-", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.22, 0.44, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecasted month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.spat.cor.colors[i2,1+6-j,3])}} + + par(mar=c(0,0,4,0), fig=c(0.44, 0.66, 0.8, 0.98), new=TRUE) + mtext("Blocking", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.44, 0.66, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecasted month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.spat.cor.colors[i2,1+6-j,4])}} + + par(mar=c(0,0,4,0), fig=c(0.68, 0.88, 0.8, 0.98), new=TRUE) + mtext("Atlantic Ridge", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.66, 0.88, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecasted month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.spat.cor.colors[i2,1+6-j,2])}} + + par(fig=c(0.91, 1, 0.1, 0.8), new=TRUE) + ColorBar2(brks = my.seq, cols = sp.corr.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + + + + + + + + # Corr summary: + png(filename=paste0(work.dir,"/",forecast.name,"_summary_corr.png"), width=550, height=400) + + # create an array similar to array.cor but with colors instead of corr.values: + corr.cols <- rev(c('#b2182b','#d6604d','#f4a582','#fddbc7','#d1e5f0','#92c5de','#4393c3','#2166ac')) + + array.cor.colors <- array(corr.cols[8],c(12,7,4)) + array.cor.colors[array.cor < 0.75] <- corr.cols[7] + array.cor.colors[array.cor < 0.5] <- corr.cols[6] + array.cor.colors[array.cor < 0.25] <- corr.cols[5] + array.cor.colors[array.cor < 0] <- corr.cols[4] + array.cor.colors[array.cor < -0.25] <- corr.cols[3] + array.cor.colors[array.cor < -0.5] <- corr.cols[2] + array.cor.colors[array.cor < -0.75] <- corr.cols[1] + + par(mar=c(5,4,4,4.5)) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Forecast time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + title("Correlation between S4 and ERA-Interim frequencies", cex=1.5) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + + for(p in 1:12){ + for(l in 0:6){ + polygon(c(0.5+p-1,0.5+p-1,1+p-1),c(-0.5+l,0.5+l,0+l), col=array.cor.colors[p,1+l,1]) # first triangle + polygon(c(0.5+p-1,1+p-1,1.5+p-1),c(-0.5+l,0+l,-0.5+l), col=array.cor.colors[p,1+l,2]) + polygon(c(1+p-1,1.5+p-1,1.5+p-1),c(l,0.5+l,-0.5+l), col=array.cor.colors[p,1+l,3]) + polygon(c(0.5+p-1,1+p-1,1.5+p-1),c(0.5+l,0+l,0.5+l), col=array.cor.colors[p,1+l,4]) + } + } + + par(fig=c(0.875, 1, 0.14, 0.89), new=TRUE) + ColorBar2(brks = seq(-1,1,0.25), cols = corr.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(seq(-1,1,0.25)), my.labels=seq(-1,1,0.25)) + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_corr_4tables.png"), width=750, height=600) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext(text="Correlation between S4 and ERA-Interim frequencies", cex=1.5) + + # NAO+ : + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.85, 0.98), new=TRUE) + mtext("NAO+", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.cor.colors[p,1+l,1])}} + + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.42, 0.5), new=TRUE) + mtext("Blocking", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.cor.colors[p,1+l,4])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.85, 0.98), new=TRUE) + mtext("NAO-", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.cor.colors[p,1+l,3])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.42, 0.5), new=TRUE) + mtext("Atlantic Ridge", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.cor.colors[p,1+l,2])}} + + par(fig=c(0.91, 1, 0.1, 0.9), new=TRUE) + ColorBar2(brks = my.seq, cols = corr.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_corr_1table.png"), width=800, height=300) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext(text="Correlation between S4 and ERA-Interim frequencies", cex=1.2) + + par(mar=c(0,0,4,0), fig=c(0.07, 0.22, 0.8, 0.98), new=TRUE) + mtext("NAO+", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0, 0.22, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecast month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.cor.colors[i2,1+6-j,1])}} + + par(mar=c(0,0,4,0), fig=c(0.22, 0.44, 0.8, 0.98), new=TRUE) + mtext("NAO-", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.22, 0.44, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecasted month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.cor.colors[i2,1+6-j,3])}} + + par(mar=c(0,0,4,0), fig=c(0.44, 0.66, 0.8, 0.98), new=TRUE) + mtext("Blocking", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.44, 0.66, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecasted month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.cor.colors[i2,1+6-j,4])}} + + par(mar=c(0,0,4,0), fig=c(0.68, 0.88, 0.8, 0.98), new=TRUE) + mtext("Atlantic Ridge", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.66, 0.88, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecasted month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.cor.colors[i2,1+6-j,2])}} + + par(fig=c(0.91, 1, 0.1, 0.8), new=TRUE) + ColorBar2(brks = my.seq, cols = corr.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + + + + + + # Freq. summary: + png(filename=paste0(work.dir,"/",forecast.name,"_summary_freq.png"), width=550, height=400) + + # create an array similar to array.diff.freq but with colors instead of frequencies: + freq.cols <- rev(c('#d73027','#f46d43','#fdae61','#fee090','#e0f3f8','#abd9e9','#74add1','#4575b4')) + my.seq <- c(NA,20,22,24,26,28,30,32,NA) + + array.freq.colors <- array(freq.cols[8],c(12,7,4)) + array.freq.colors[array.freq < my.seq[[8]]] <- freq.cols[7] + array.freq.colors[array.freq < my.seq[[7]]] <- freq.cols[6] + array.freq.colors[array.freq < my.seq[[6]]] <- freq.cols[5] + array.freq.colors[array.freq < my.seq[[5]]] <- freq.cols[4] + array.freq.colors[array.freq < my.seq[[4]]] <- freq.cols[3] + array.freq.colors[array.freq < my.seq[[3]]] <- freq.cols[2] + array.freq.colors[array.freq < my.seq[[2]]] <- freq.cols[1] + + par(mar=c(5,4,4,4.5)) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Forecast time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + title("Mean S4 frequency (%)", cex=1.5) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + + for(p in 1:12){ + for(l in 0:6){ + polygon(c(0.5+p-1, 0.5+p-1, 1+p-1), c(-0.5+l, 0.5+l, 0+l), col=array.freq.colors[p,1+l,1]) # first triangle + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(-0.5+l, 0+l, -0.5+l), col=array.freq.colors[p,1+l,2]) + polygon(c(1+p-1, 1.5+p-1, 1.5+p-1), c(l, 0.5+l, -0.5+l), col=array.freq.colors[p,1+l,3]) + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(0.5+l, 0+l, 0.5+l), col=array.freq.colors[p,1+l,4]) + } + } + + par(fig=c(0.875, 1, 0.14, 0.89), new=TRUE) + ColorBar2(brks = my.seq, cols = freq.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_freq_4tables.png"), width=750, height=600) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext(text="Mean S4 frequency (%)", cex=1.5) + + # NAO+ : + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.85, 0.98), new=TRUE) + mtext("NAO+", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.freq.colors[p,1+l,1])}} + + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.42, 0.5), new=TRUE) + mtext("Blocking", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.freq.colors[p,1+l,4])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.85, 0.98), new=TRUE) + mtext("NAO-", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.freq.colors[p,1+l,3])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.42, 0.5), new=TRUE) + mtext("Atlantic Ridge", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.freq.colors[p,1+l,2])}} + + par(fig=c(0.91, 1, 0.1, 0.9), new=TRUE) + ColorBar2(brks = my.seq, cols = freq.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_freq_1table.png"), width=800, height=300) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext(text="Mean S4 frequency (%)", cex=1.2) + + par(mar=c(0,0,4,0), fig=c(0.07, 0.22, 0.8, 0.98), new=TRUE) + mtext("NAO+", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0, 0.22, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecast month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.freq.colors[i2,1+6-j,1])}} + + par(mar=c(0,0,4,0), fig=c(0.22, 0.44, 0.8, 0.98), new=TRUE) + mtext("NAO-", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.22, 0.44, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecasted month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.freq.colors[i2,1+6-j,3])}} + + par(mar=c(0,0,4,0), fig=c(0.44, 0.66, 0.8, 0.98), new=TRUE) + mtext("Blocking", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.44, 0.66, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecasted month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.freq.colors[i2,1+6-j,4])}} + + par(mar=c(0,0,4,0), fig=c(0.68, 0.88, 0.8, 0.98), new=TRUE) + mtext("Atlantic Ridge", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.66, 0.88, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecasted month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.freq.colors[i2,1+6-j,2])}} + + par(fig=c(0.91, 1, 0.1, 0.8), new=TRUE) + ColorBar2(brks = my.seq, cols = freq.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + + + + + + + # Delta freq. summary: + png(filename=paste0(work.dir,"/",forecast.name,"_summary_diff_freq.png"), width=550, height=400) + + # create an array similar to array.diff.freq but with colors instead of frequencies: + diff.freq.cols <- rev(c('#d73027','#f46d43','#fdae61','#fee090','#e0f3f8','#abd9e9','#74add1','#4575b4')) + my.seq <- c(-20,-10,-5,-2,0,2,5,10,20) + + array.diff.freq.colors <- array(diff.freq.cols[8],c(12,7,4)) + array.diff.freq.colors[array.diff.freq < my.seq[[8]]] <- diff.freq.cols[7] + array.diff.freq.colors[array.diff.freq 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.diff.freq.colors[i2,1+6-j,1])}} + + par(mar=c(0,0,4,0), fig=c(0.22, 0.44, 0.8, 0.98), new=TRUE) + mtext("NAO-", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.22, 0.44, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecasted month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.diff.freq.colors[i2,1+6-j,3])}} + + par(mar=c(0,0,4,0), fig=c(0.44, 0.66, 0.8, 0.98), new=TRUE) + mtext("Blocking", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.44, 0.66, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecasted month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.diff.freq.colors[i2,1+6-j,4])}} + + par(mar=c(0,0,4,0), fig=c(0.68, 0.88, 0.8, 0.98), new=TRUE) + mtext("Atlantic Ridge", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.66, 0.88, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecasted month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.diff.freq.colors[i2,1+6-j,2])}} + + par(fig=c(0.91, 1, 0.1, 0.8), new=TRUE) + ColorBar2(brks = my.seq, cols = diff.freq.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + + + + + + + + # Persistence summary: + png(filename=paste0(work.dir,"/",forecast.name,"_summary_pers.png"), width=550, height=400) + + # create an array similar to array.pers but with colors instead of frequencies: + pers.cols <- rev(c('#b2182b','#d6604d','#f4a582','#fddbc7','#d1e5f0','#92c5de','#4393c3','#2166ac')) + my.seq <- c(NA, 3.25, 3.5, 3.75, 4, 4.25, 4.5, 4.75, NA) + + array.pers.colors <- array(pers.cols[8],c(12,7,4)) + array.pers.colors[array.pers < my.seq[8]] <- pers.cols[7] + array.pers.colors[array.pers < my.seq[7]] <- pers.cols[6] + array.pers.colors[array.pers < my.seq[6]] <- pers.cols[5] + array.pers.colors[array.pers < my.seq[5]] <- pers.cols[4] + array.pers.colors[array.pers < my.seq[4]] <- pers.cols[3] + array.pers.colors[array.pers < my.seq[3]] <- pers.cols[2] + array.pers.colors[array.pers < my.seq[2]] <- pers.cols[1] + + par(mar=c(5,4,4,4.5)) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Forecast time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + title("S4 Regime persistence (in days/month)", cex=1.5) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + + for(p in 1:12){ + for(l in 0:6){ + polygon(c(0.5+p-1,0.5+p-1,1+p-1),c(-0.5+l,0.5+l,0+l), col=array.pers.colors[p,1+l,1]) # first triangle + polygon(c(0.5+p-1,1+p-1,1.5+p-1),c(-0.5+l,0+l,-0.5+l), col=array.pers.colors[p,1+l,2]) + polygon(c(1+p-1,1.5+p-1,1.5+p-1),c(l,0.5+l,-0.5+l), col=array.pers.colors[p,1+l,3]) + polygon(c(0.5+p-1,1+p-1,1.5+p-1),c(0.5+l,0+l,0.5+l), col=array.pers.colors[p,1+l,4]) + } + } + + par(fig=c(0.875, 1, 0.14, 0.89), new=TRUE) + ColorBar2(brks = my.seq, cols = pers.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_pers_4tables.png"), width=750, height=600) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("S4 Regime persistence (in days/month)", cex=1.5) + + # NAO+ : + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.85, 0.98), new=TRUE) + mtext("NAO+", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.pers.colors[p,1+l,1])}} + + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.42, 0.5), new=TRUE) + mtext("Blocking", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.pers.colors[p,1+l,4])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.85, 0.98), new=TRUE) + mtext("NAO-", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.pers.colors[p,1+l,3])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.42, 0.5), new=TRUE) + mtext("Atlantic Ridge", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.pers.colors[p,1+l,2])}} + + par(fig=c(0.91, 1, 0.1, 0.9), new=TRUE) + ColorBar2(brks = my.seq, cols = pers.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_pers_4tables.png"), width=750, height=600) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("S4 Regime persistence (in days/month)", cex=1.5) + + # NAO+ : + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.85, 0.98), new=TRUE) + mtext("NAO+", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.pers.colors[p,1+l,1])}} + + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.42, 0.5), new=TRUE) + mtext("Blocking", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.pers.colors[p,1+l,4])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.85, 0.98), new=TRUE) + mtext("NAO-", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.pers.colors[p,1+l,3])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.42, 0.5), new=TRUE) + mtext("Atlantic Ridge", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.pers.colors[p,1+l,2])}} + + par(fig=c(0.91, 1, 0.1, 0.9), new=TRUE) + ColorBar2(brks = my.seq, cols = pers.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_pers_1table.png"), width=800, height=300) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("S4 Regime persistence (in days/month)", cex=1.2) + + par(mar=c(0,0,4,0), fig=c(0.07, 0.22, 0.8, 0.98), new=TRUE) + mtext("NAO+", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0, 0.22, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecast month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.pers.colors[i2,1+6-j,1])}} + + par(mar=c(0,0,4,0), fig=c(0.22, 0.44, 0.8, 0.98), new=TRUE) + mtext("NAO-", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.22, 0.44, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecasted month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.pers.colors[i2,1+6-j,3])}} + + par(mar=c(0,0,4,0), fig=c(0.44, 0.66, 0.8, 0.98), new=TRUE) + mtext("Blocking", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.44, 0.66, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecasted month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.pers.colors[i2,1+6-j,4])}} + + par(mar=c(0,0,4,0), fig=c(0.68, 0.88, 0.8, 0.98), new=TRUE) + mtext("Atlantic Ridge", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.66, 0.88, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecasted month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.pers.colors[i2,1+6-j,2])}} + + par(fig=c(0.91, 1, 0.1, 0.8), new=TRUE) + ColorBar2(brks = my.seq, cols = pers.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + + + + + + + + # Delta persistence summary: + png(filename=paste0(work.dir,"/",forecast.name,"_summary_diff_pers.png"), width=550, height=400) + + # create an array similar to array.pers but with colors instead of frequencies: + diff.pers.cols <- rev(c('#b2182b','#d6604d','#f4a582','#fddbc7','#d1e5f0','#92c5de','#4393c3','#2166ac')) + my.seq <- c(NA, -1.5, -1, -0.5, 0, 0.5, 1, 1.5, NA) + + array.diff.pers.colors <- array(diff.pers.cols[8],c(12,7,4)) + array.diff.pers.colors[array.diff.pers < my.seq[8]] <- diff.pers.cols[7] + array.diff.pers.colors[array.diff.pers < my.seq[7]] <- diff.pers.cols[6] + array.diff.pers.colors[array.diff.pers < my.seq[6]] <- diff.pers.cols[5] + array.diff.pers.colors[array.diff.pers < my.seq[5]] <- diff.pers.cols[4] + array.diff.pers.colors[array.diff.pers < my.seq[4]] <- diff.pers.cols[3] + array.diff.pers.colors[array.diff.pers < my.seq[3]] <- diff.pers.cols[2] + array.diff.pers.colors[array.diff.pers < my.seq[2]] <- diff.pers.cols[1] + + par(mar=c(5,4,4,4.5)) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Forecast time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + title("Diff. between S4 and ERA-Interim regime persistence (in days/month)", cex=1.5) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + + for(p in 1:12){ + for(l in 0:6){ + polygon(c(0.5+p-1,0.5+p-1,1+p-1),c(-0.5+l,0.5+l,0+l), col=array.diff.pers.colors[p,1+l,1]) # first triangle + polygon(c(0.5+p-1,1+p-1,1.5+p-1),c(-0.5+l,0+l,-0.5+l), col=array.diff.pers.colors[p,1+l,2]) + polygon(c(1+p-1,1.5+p-1,1.5+p-1),c(l,0.5+l,-0.5+l), col=array.diff.pers.colors[p,1+l,3]) + polygon(c(0.5+p-1,1+p-1,1.5+p-1),c(0.5+l,0+l,0.5+l), col=array.diff.pers.colors[p,1+l,4]) + } + } + + par(fig=c(0.875, 1, 0.14, 0.89), new=TRUE) + ColorBar2(brks = my.seq, cols = diff.pers.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_diff_pers_4tables.png"), width=750, height=600) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("Diff. between S4 and ERA-Interim regime persistence (in days/month)", cex=1.5) + + # NAO+ : + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.85, 0.98), new=TRUE) + mtext("NAO+", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.diff.pers.colors[p,1+l,1])}} + + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.42, 0.5), new=TRUE) + mtext("Blocking", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.diff.pers.colors[p,1+l,4])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.85, 0.98), new=TRUE) + mtext("NAO-", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.diff.pers.colors[p,1+l,3])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.42, 0.5), new=TRUE) + mtext("Atlantic Ridge", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.diff.pers.colors[p,1+l,2])}} + + par(fig=c(0.91, 1, 0.1, 0.9), new=TRUE) + ColorBar2(brks = my.seq, cols = diff.pers.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_diff_pers_1table.png"), width=800, height=300) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("Diff. between S4 and ERA-Interim regime persistence (in days/month)", cex=1.2) + + par(mar=c(0,0,4,0), fig=c(0.07, 0.22, 0.8, 0.98), new=TRUE) + mtext("NAO+", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0, 0.22, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecast month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.diff.pers.colors[i2,1+6-j,1])}} + + par(mar=c(0,0,4,0), fig=c(0.22, 0.44, 0.8, 0.98), new=TRUE) + mtext("NAO-", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.22, 0.44, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecasted month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.diff.pers.colors[i2,1+6-j,3])}} + + par(mar=c(0,0,4,0), fig=c(0.44, 0.66, 0.8, 0.98), new=TRUE) + mtext("Blocking", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.44, 0.66, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecasted month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.diff.pers.colors[i2,1+6-j,4])}} + + par(mar=c(0,0,4,0), fig=c(0.68, 0.88, 0.8, 0.98), new=TRUE) + mtext("Atlantic Ridge", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.66, 0.88, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecasted month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.diff.pers.colors[i2,1+6-j,2])}} + + par(fig=c(0.91, 1, 0.1, 0.8), new=TRUE) + ColorBar2(brks = my.seq, cols = diff.pers.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + + + + + + + + + # St.Dev.freq ratio summary: + png(filename=paste0(work.dir,"/",forecast.name,"_summary_ratio_sd_freq.png"), width=550, height=400) + + # create an array similar to array.cor but with colors instead of corr.values: + diff.sd.freq.cols <- rev(c('#b2182b','#d6604d','#f4a582','#fddbc7','#d1e5f0','#92c5de','#4393c3','#2166ac')) + my.seq <- c(0,0.7,0.8,0.9,1.0,1.1,1.2,1.3,2) + + array.diff.sd.freq.colors <- array(diff.sd.freq.cols[8],c(12,7,4)) + array.diff.sd.freq.colors[array.diff.sd.freq < my.seq[8]] <- diff.sd.freq.cols[7] + array.diff.sd.freq.colors[array.diff.sd.freq < my.seq[7]] <- diff.sd.freq.cols[6] + array.diff.sd.freq.colors[array.diff.sd.freq < my.seq[6]] <- diff.sd.freq.cols[5] + array.diff.sd.freq.colors[array.diff.sd.freq < my.seq[5]] <- diff.sd.freq.cols[4] + array.diff.sd.freq.colors[array.diff.sd.freq < my.seq[4]] <- diff.sd.freq.cols[3] + array.diff.sd.freq.colors[array.diff.sd.freq < my.seq[3]] <- diff.sd.freq.cols[2] + array.diff.sd.freq.colors[array.diff.sd.freq < my.seq[2]] <- diff.sd.freq.cols[1] + + par(mar=c(5,4,4,4.5)) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Forecast time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + title("St.Dev. ratio between sim. and obs. average frequencies", cex=1.5) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + + for(p in 1:12){ + for(l in 0:6){ + polygon(c(0.5+p-1, 0.5+p-1, 1+p-1), c(-0.5+l, 0.5+l, 0+l), col=array.diff.sd.freq.colors[p,1+l,1]) # first triangle + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(-0.5+l, 0+l, -0.5+l), col=array.diff.sd.freq.colors[p,1+l,2]) + polygon(c(1+p-1, 1.5+p-1, 1.5+p-1), c(l, 0.5+l, -0.5+l), col=array.diff.sd.freq.colors[p,1+l,3]) + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(0.5+l, 0+l, 0.5+l), col=array.diff.sd.freq.colors[p,1+l,4]) + } + } + + par(fig=c(0.875, 1, 0.14, 0.89), new=TRUE) + ColorBar2(brks = my.seq, cols = diff.sd.freq.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + dev.off() + + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_ratio_sd_freq_4tables.png"), width=750, height=600) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("St.Dev. ratio between sim. and obs. average frequencies", cex=1.5) + + # NAO+ : + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.85, 0.98), new=TRUE) + mtext("NAO+", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.diff.sd.freq.colors[p,1+l,1])}} + + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.42, 0.5), new=TRUE) + mtext("Blocking", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.diff.sd.freq.colors[p,1+l,4])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.85, 0.98), new=TRUE) + mtext("NAO-", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.diff.sd.freq.colors[p,1+l,3])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.42, 0.5), new=TRUE) + mtext("Atlantic Ridge", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.diff.sd.freq.colors[p,1+l,2])}} + + par(fig=c(0.91, 1, 0.1, 0.9), new=TRUE) + ColorBar2(brks = my.seq, cols = diff.sd.freq.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_ratio_sd_freq_1table.png"), width=800, height=300) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("St.Dev. ratio between sim. and obs. average frequencies", cex=1.2) + + par(mar=c(0,0,4,0), fig=c(0.07, 0.22, 0.8, 0.98), new=TRUE) + mtext("NAO+", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0, 0.22, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecast month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.diff.sd.freq.colors[i2,1+6-j,1])}} + + par(mar=c(0,0,4,0), fig=c(0.22, 0.44, 0.8, 0.98), new=TRUE) + mtext("NAO-", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.22, 0.44, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecasted month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.diff.sd.freq.colors[i2,1+6-j,3])}} + + par(mar=c(0,0,4,0), fig=c(0.44, 0.66, 0.8, 0.98), new=TRUE) + mtext("Blocking", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.44, 0.66, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecasted month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.diff.sd.freq.colors[i2,1+6-j,4])}} + + par(mar=c(0,0,4,0), fig=c(0.68, 0.88, 0.8, 0.98), new=TRUE) + mtext("Atlantic Ridge", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.66, 0.88, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecasted month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.diff.sd.freq.colors[i2,1+6-j,2])}} + + par(fig=c(0.91, 1, 0.1, 0.8), new=TRUE) + ColorBar2(brks = my.seq, cols = diff.sd.freq.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + + + + + + + + + + + # Transition probability from NAO+ to X summary: + png(filename=paste0(work.dir,"/",forecast.name,"_summary_trans_NAO+.png"), width=550, height=400) + + # create an array similar to array.cor but with colors instead of corr.values: + trans.cols <- rev(c('#b2182b','#d6604d','#f4a582','#fddbc7','#d1e5f0','#92c5de','#4393c3','#2166ac')) + my.seq <- c(0,10,20,30,40,50,60,70,100) + + array.trans.sim1.colors <- array(trans.cols[8],c(12,7,4)) + array.trans.sim1.colors[array.trans.sim1 < my.seq[8]] <- trans.cols[7] + array.trans.sim1.colors[array.trans.sim1 < my.seq[7]] <- trans.cols[6] + array.trans.sim1.colors[array.trans.sim1 < my.seq[6]] <- trans.cols[5] + array.trans.sim1.colors[array.trans.sim1 < my.seq[5]] <- trans.cols[4] + array.trans.sim1.colors[array.trans.sim1 < my.seq[4]] <- trans.cols[3] + array.trans.sim1.colors[array.trans.sim1 < my.seq[3]] <- trans.cols[2] + array.trans.sim1.colors[array.trans.sim1 < my.seq[2]] <- trans.cols[1] + array.trans.sim1.colors[is.na(array.trans.sim1)] <- "white" + + par(mar=c(5,4,4,4.5)) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Forecast time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + title("% Transition from NAO+ regime", cex=1.5) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + + for(p in 1:12){ + for(l in 0:6){ + polygon(c(0.5+p-1, 0.5+p-1, 1+p-1), c(-0.5+l, 0.5+l, 0+l), col=array.trans.sim1.colors[p,1+l,1]) # first triangle + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(-0.5+l, 0+l, -0.5+l), col=array.trans.sim1.colors[p,1+l,2]) + polygon(c(1+p-1, 1.5+p-1, 1.5+p-1), c(l, 0.5+l, -0.5+l), col=array.trans.sim1.colors[p,1+l,3]) + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(0.5+l, 0+l, 0.5+l), col=array.trans.sim1.colors[p,1+l,4]) + } + } + + par(fig=c(0.875, 1, 0.14, 0.89), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_trans_NAO+_4tables.png"), width=750, height=600) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("% Transition from NAO+ regime", cex=1.5) + + # NAO+ : + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.85, 0.98), new=TRUE) + mtext("NAO+", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.sim1.colors[p,1+l,1])}} + + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.42, 0.5), new=TRUE) + mtext("Blocking", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.sim1.colors[p,1+l,4])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.85, 0.98), new=TRUE) + mtext("NAO-", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.sim1.colors[p,1+l,3])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.42, 0.5), new=TRUE) + mtext("Atlantic Ridge", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.sim1.colors[p,1+l,2])}} + + par(fig=c(0.91, 1, 0.1, 0.9), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_trans_NAO+_1table.png"), width=800, height=300) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("% Transition from NAO+ regime", cex=1.2) + + par(mar=c(0,0,4,0), fig=c(0.07, 0.22, 0.8, 0.98), new=TRUE) + mtext("NAO+", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0, 0.22, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecast month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.sim1.colors[i2,1+6-j,1])}} + + par(mar=c(0,0,4,0), fig=c(0.22, 0.44, 0.8, 0.98), new=TRUE) + mtext("NAO-", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.22, 0.44, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecasted month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.sim1.colors[i2,1+6-j,3])}} + + par(mar=c(0,0,4,0), fig=c(0.44, 0.66, 0.8, 0.98), new=TRUE) + mtext("Blocking", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.44, 0.66, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecasted month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.sim1.colors[i2,1+6-j,4])}} + + par(mar=c(0,0,4,0), fig=c(0.68, 0.88, 0.8, 0.98), new=TRUE) + mtext("Atlantic Ridge", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.66, 0.88, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecasted month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.sim1.colors[i2,1+6-j,2])}} + + par(fig=c(0.91, 1, 0.1, 0.8), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_trans_NAO+_1table.png"), width=600, height=300) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("% Transition from NAO+ regime", cex=1.2) + + par(mar=c(0,0,4,0), fig=c(0.10, 0.28, 0.8, 0.98), new=TRUE) + mtext("NAO-", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0, 0.28, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecasted month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.sim1.colors[i2,1+6-j,3])}} + + par(mar=c(0,0,4,0), fig=c(0.28, 0.56, 0.8, 0.98), new=TRUE) + mtext("Blocking", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.28, 0.56, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecasted month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.sim1.colors[i2,1+6-j,4])}} + + par(mar=c(0,0,4,0), fig=c(0.56, 0.84, 0.8, 0.98), new=TRUE) + mtext("Atlantic Ridge", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.56, 0.84, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecasted month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.sim1.colors[i2,1+6-j,2])}} + + par(fig=c(0.88, 1, 0.1, 0.8), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + + + + + + + + # Transition probability from NAO- to X summary: + png(filename=paste0(work.dir,"/",forecast.name,"_summary_trans_NAO-.png"), width=550, height=400) + + # create an array similar to array.cor but with colors instead of corr.values: + trans.cols <- rev(c('#b2182b','#d6604d','#f4a582','#fddbc7','#d1e5f0','#92c5de','#4393c3','#2166ac')) + my.seq <- c(0,10,20,30,40,50,60,70,100) + + array.trans.sim2.colors <- array(trans.cols[8],c(12,7,4)) + array.trans.sim2.colors[array.trans.sim2 < my.seq[8]] <- trans.cols[7] + array.trans.sim2.colors[array.trans.sim2 < my.seq[7]] <- trans.cols[6] + array.trans.sim2.colors[array.trans.sim2 < my.seq[6]] <- trans.cols[5] + array.trans.sim2.colors[array.trans.sim2 < my.seq[5]] <- trans.cols[4] + array.trans.sim2.colors[array.trans.sim2 < my.seq[4]] <- trans.cols[3] + array.trans.sim2.colors[array.trans.sim2 < my.seq[3]] <- trans.cols[2] + array.trans.sim2.colors[array.trans.sim2 < my.seq[2]] <- trans.cols[1] + array.trans.sim2.colors[is.na(array.trans.sim2)] <- "white" + + par(mar=c(5,4,4,4.5)) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Forecast time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + title("% Transition from NAO- regime ", cex=1.5) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + + for(p in 1:12){ + for(l in 0:6){ + polygon(c(0.5+p-1, 0.5+p-1, 1+p-1), c(-0.5+l, 0.5+l, 0+l), col=array.trans.sim2.colors[p,1+l,1]) # first triangle + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(-0.5+l, 0+l, -0.5+l), col=array.trans.sim2.colors[p,1+l,2]) + polygon(c(1+p-1, 1.5+p-1, 1.5+p-1), c(l, 0.5+l, -0.5+l), col=array.trans.sim2.colors[p,1+l,3]) + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(0.5+l, 0+l, 0.5+l), col=array.trans.sim2.colors[p,1+l,4]) + } + } + + par(fig=c(0.875, 1, 0.14, 0.89), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_trans_NAO-_4tables.png"), width=750, height=600) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("% Transition from NAO- regime", cex=1.5) + + # NAO+ : + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.85, 0.98), new=TRUE) + mtext("NAO+", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.sim2.colors[p,1+l,1])}} + + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.42, 0.5), new=TRUE) + mtext("Blocking", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.sim2.colors[p,1+l,4])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.85, 0.98), new=TRUE) + mtext("NAO-", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.sim2.colors[p,1+l,3])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.42, 0.5), new=TRUE) + mtext("Atlantic Ridge", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.sim2.colors[p,1+l,2])}} + + par(fig=c(0.91, 1, 0.1, 0.9), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_trans_NAO-_1table.png"), width=600, height=300) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("% Transition from NAO- regime", cex=1.2) + + par(mar=c(0,0,4,0), fig=c(0.1, 0.28, 0.8, 0.98), new=TRUE) + mtext("NAO+", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0, 0.28, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecast month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.sim2.colors[i2,1+6-j,1])}} + + par(mar=c(0,0,4,0), fig=c(0.28, 0.56, 0.8, 0.98), new=TRUE) + mtext("Blocking", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.28, 0.56, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecasted month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.sim2.colors[i2,1+6-j,4])}} + + par(mar=c(0,0,4,0), fig=c(0.56, 0.84, 0.8, 0.98), new=TRUE) + mtext("Atlantic Ridge", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.56, 0.84, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecasted month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.sim2.colors[i2,1+6-j,2])}} + + par(fig=c(0.88, 1, 0.1, 0.8), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + + + + + + + + # Transition probability from blocking to X summary: + png(filename=paste0(work.dir,"/",forecast.name,"_summary_trans_blocking.png"), width=550, height=400) + + # create an array similar to array.cor but with colors instead of corr.values: + trans.cols <- rev(c('#b2182b','#d6604d','#f4a582','#fddbc7','#d1e5f0','#92c5de','#4393c3','#2166ac')) + my.seq <- c(0,10,20,30,40,50,60,70,100) + + array.trans.sim3.colors <- array(trans.cols[8],c(12,7,4)) + array.trans.sim3.colors[array.trans.sim3 < my.seq[8]] <- trans.cols[7] + array.trans.sim3.colors[array.trans.sim3 < my.seq[7]] <- trans.cols[6] + array.trans.sim3.colors[array.trans.sim3 < my.seq[6]] <- trans.cols[5] + array.trans.sim3.colors[array.trans.sim3 < my.seq[5]] <- trans.cols[4] + array.trans.sim3.colors[array.trans.sim3 < my.seq[4]] <- trans.cols[3] + array.trans.sim3.colors[array.trans.sim3 < my.seq[3]] <- trans.cols[2] + array.trans.sim3.colors[array.trans.sim3 < my.seq[2]] <- trans.cols[1] + array.trans.sim3.colors[is.na(array.trans.sim3)] <- "white" + + par(mar=c(5,4,4,4.5)) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Forecast time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + title("% Transition from blocking regime", cex=1.5) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + + for(p in 1:12){ + for(l in 0:6){ + polygon(c(0.5+p-1, 0.5+p-1, 1+p-1), c(-0.5+l, 0.5+l, 0+l), col=array.trans.sim3.colors[p,1+l,1]) # first triangle + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(-0.5+l, 0+l, -0.5+l), col=array.trans.sim3.colors[p,1+l,2]) + polygon(c(1+p-1, 1.5+p-1, 1.5+p-1), c(l, 0.5+l, -0.5+l), col=array.trans.sim3.colors[p,1+l,3]) + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(0.5+l, 0+l, 0.5+l), col=array.trans.sim3.colors[p,1+l,4]) + } + } + + par(fig=c(0.875, 1, 0.14, 0.89), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_trans_blocking_4tables.png"), width=750, height=600) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("% Transition from blocking regime", cex=1.5) + + # NAO+ : + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.85, 0.98), new=TRUE) + mtext("NAO+", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.sim3.colors[p,1+l,1])}} + + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.42, 0.5), new=TRUE) + mtext("Blocking", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.sim3.colors[p,1+l,4])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.85, 0.98), new=TRUE) + mtext("NAO-", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.sim3.colors[p,1+l,3])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.42, 0.5), new=TRUE) + mtext("Atlantic Ridge", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.sim3.colors[p,1+l,2])}} + + par(fig=c(0.91, 1, 0.1, 0.9), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_trans_blocking_1table.png"), width=600, height=300) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("% Transition from blocking regime", cex=1.2) + + par(mar=c(0,0,4,0), fig=c(0.10, 0.28, 0.8, 0.98), new=TRUE) + mtext("NAO+", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0, 0.28, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecast month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.sim3.colors[i2,1+6-j,1])}} + + par(mar=c(0,0,4,0), fig=c(0.28, 0.56, 0.8, 0.98), new=TRUE) + mtext("NAO-", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.28, 0.56, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecasted month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.sim3.colors[i2,1+6-j,3])}} + + par(mar=c(0,0,4,0), fig=c(0.56, 0.84, 0.8, 0.98), new=TRUE) + mtext("Atlantic Ridge", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.56, 0.84, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecasted month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.sim3.colors[i2,1+6-j,2])}} + + par(fig=c(0.88, 1, 0.1, 0.8), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + + + + + + + + + + + # Transition probability from Atlantic ridge to X summary: + png(filename=paste0(work.dir,"/",forecast.name,"_summary_trans_Atlantic_ridge.png"), width=550, height=400) + + # create an array similar to array.cor but with colors instead of corr.values: + trans.cols <- rev(c('#b2182b','#d6604d','#f4a582','#fddbc7','#d1e5f0','#92c5de','#4393c3','#2166ac')) + my.seq <- c(0,10,20,30,40,50,60,70,100) + + array.trans.sim4.colors <- array(trans.cols[8],c(12,7,4)) + array.trans.sim4.colors[array.trans.sim4 < my.seq[8]] <- trans.cols[7] + array.trans.sim4.colors[array.trans.sim4 < my.seq[7]] <- trans.cols[6] + array.trans.sim4.colors[array.trans.sim4 < my.seq[6]] <- trans.cols[5] + array.trans.sim4.colors[array.trans.sim4 < my.seq[5]] <- trans.cols[4] + array.trans.sim4.colors[array.trans.sim4 < my.seq[4]] <- trans.cols[3] + array.trans.sim4.colors[array.trans.sim4 < my.seq[3]] <- trans.cols[2] + array.trans.sim4.colors[array.trans.sim4 < my.seq[2]] <- trans.cols[1] + array.trans.sim4.colors[is.na(array.trans.sim4)] <- "white" + + par(mar=c(5,4,4,4.5)) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Forecast time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + title("% Transition from Atlantic ridge regime ", cex=1.5) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + + for(p in 1:12){ + for(l in 0:6){ + polygon(c(0.5+p-1, 0.5+p-1, 1+p-1), c(-0.5+l, 0.5+l, 0+l), col=array.trans.sim4.colors[p,1+l,1]) # first triangle + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(-0.5+l, 0+l, -0.5+l), col=array.trans.sim4.colors[p,1+l,2]) + polygon(c(1+p-1, 1.5+p-1, 1.5+p-1), c(l, 0.5+l, -0.5+l), col=array.trans.sim4.colors[p,1+l,3]) + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(0.5+l, 0+l, 0.5+l), col=array.trans.sim4.colors[p,1+l,4]) + } + } + + par(fig=c(0.875, 1, 0.14, 0.89), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_trans_Atlantic_ridge_4tables.png"), width=750, height=600) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("% Transition from Atlantic Ridge regime", cex=1.5) + + # NAO+ : + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.85, 0.98), new=TRUE) + mtext("NAO+", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.sim4.colors[p,1+l,1])}} + + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.42, 0.5), new=TRUE) + mtext("Blocking", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.sim4.colors[p,1+l,4])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.85, 0.98), new=TRUE) + mtext("NAO-", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.sim4.colors[p,1+l,3])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.42, 0.5), new=TRUE) + mtext("Atlantic Ridge", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.sim4.colors[p,1+l,2])}} + + par(fig=c(0.91, 1, 0.1, 0.9), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_trans_Atlantic_ridge_1table.png"), width=600, height=300) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("% Transition from Atlantic Ridge regime", cex=1.2) + + par(mar=c(0,0,4,0), fig=c(0.1, 0.28, 0.8, 0.98), new=TRUE) + mtext("NAO+", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0, 0.28, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecast month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.sim4.colors[i2,1+6-j,1])}} + + par(mar=c(0,0,4,0), fig=c(0.28, 0.56, 0.8, 0.98), new=TRUE) + mtext("NAO-", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.28, 0.56, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecasted month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.sim4.colors[i2,1+6-j,3])}} + + par(mar=c(0,0,4,0), fig=c(0.56, 0.84, 0.8, 0.98), new=TRUE) + mtext("Blocking", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.56, 0.84, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecasted month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.sim4.colors[i2,1+6-j,4])}} + + par(fig=c(0.88, 1, 0.1, 0.8), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + + + + + + + + # Bias of the Transition probability from NAO+ to X summary: + png(filename=paste0(work.dir,"/",forecast.name,"_summary_bias_trans_NAO+.png"), width=550, height=400) + + # create an array similar to array.cor but with colors instead of corr.values: + trans.cols <- rev(c('#b2182b','#d6604d','#f4a582','#fddbc7','#d1e5f0','#92c5de','#4393c3','#2166ac')) + my.seq <- c(-20,-10,-5,-2,0,2,5,10,20) + + array.trans.diff1.colors <- array(trans.cols[8],c(12,7,4)) + array.trans.diff1.colors[array.trans.diff1 < my.seq[8]] <- trans.cols[7] + array.trans.diff1.colors[array.trans.diff1 < my.seq[7]] <- trans.cols[6] + array.trans.diff1.colors[array.trans.diff1 < my.seq[6]] <- trans.cols[5] + array.trans.diff1.colors[array.trans.diff1 < my.seq[5]] <- trans.cols[4] + array.trans.diff1.colors[array.trans.diff1 < my.seq[4]] <- trans.cols[3] + array.trans.diff1.colors[array.trans.diff1 < my.seq[3]] <- trans.cols[2] + array.trans.diff1.colors[array.trans.diff1 < my.seq[2]] <- trans.cols[1] + array.trans.diff1.colors[is.na(array.trans.diff1)] <- "white" + + par(mar=c(5,4,4,4.5)) + plot(1,1, type="n", xaxt="n", yaxt="n", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + title("% Bias transition from NAO+ regime", cex=1.5) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + + for(p in 1:12){ + for(l in 0:6){ + polygon(c(0.5+p-1, 0.5+p-1, 1+p-1), c(-0.5+l, 0.5+l, 0+l), col=array.trans.diff1.colors[p,1+l,1]) # first triangle + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(-0.5+l, 0+l, -0.5+l), col=array.trans.diff1.colors[p,1+l,2]) + polygon(c(1+p-1, 1.5+p-1, 1.5+p-1), c(l, 0.5+l, -0.5+l), col=array.trans.diff1.colors[p,1+l,3]) + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(0.5+l, 0+l, 0.5+l), col=array.trans.diff1.colors[p,1+l,4]) + } + } + + par(fig=c(0.875, 1, 0.14, 0.89), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_bias_trans_NAO+_4tables.png"), width=750, height=600) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("% Bias transition from NAO+ regime", cex=1.5) + + # NAO+ : + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.85, 0.98), new=TRUE) + mtext("NAO+", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.diff1.colors[p,1+l,1])}} + + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.42, 0.5), new=TRUE) + mtext("Blocking", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.diff1.colors[p,1+l,4])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.85, 0.98), new=TRUE) + mtext("NAO-", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.diff1.colors[p,1+l,3])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.42, 0.5), new=TRUE) + mtext("Atlantic Ridge", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.diff1.colors[p,1+l,2])}} + + par(fig=c(0.91, 1, 0.1, 0.9), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_bias_trans_NAO+_1table.png"), width=800, height=300) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("% Bias transition from NAO+ regime", cex=1.2) + + par(mar=c(0,0,4,0), fig=c(0.07, 0.22, 0.8, 0.98), new=TRUE) + mtext("NAO+", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0, 0.22, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecast month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.diff1.colors[i2,1+6-j,1])}} + + par(mar=c(0,0,4,0), fig=c(0.22, 0.44, 0.8, 0.98), new=TRUE) + mtext("NAO-", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.22, 0.44, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecasted month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.diff1.colors[i2,1+6-j,3])}} + + par(mar=c(0,0,4,0), fig=c(0.44, 0.66, 0.8, 0.98), new=TRUE) + mtext("Blocking", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.44, 0.66, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecasted month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.diff1.colors[i2,1+6-j,4])}} + + par(mar=c(0,0,4,0), fig=c(0.68, 0.88, 0.8, 0.98), new=TRUE) + mtext("Atlantic Ridge", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.66, 0.88, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecasted month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.diff1.colors[i2,1+6-j,2])}} + + par(fig=c(0.91, 1, 0.1, 0.8), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + + + + + + + + + + + + + + + # Bias of the Transition probability from NAO- to X summary: + png(filename=paste0(work.dir,"/",forecast.name,"_summary_bias_trans_NAO-.png"), width=550, height=400) + + # create an array similar to array.cor but with colors instead of corr.values: + trans.cols <- rev(c('#b2182b','#d6604d','#f4a582','#fddbc7','#d1e5f0','#92c5de','#4393c3','#2166ac')) + my.seq <- c(-20,-10,-5,-2,0,2,5,10,20) + + array.trans.diff2.colors <- array(trans.cols[8],c(12,7,4)) + array.trans.diff2.colors[array.trans.diff2 < my.seq[8]] <- trans.cols[7] + array.trans.diff2.colors[array.trans.diff2 < my.seq[7]] <- trans.cols[6] + array.trans.diff2.colors[array.trans.diff2 < my.seq[6]] <- trans.cols[5] + array.trans.diff2.colors[array.trans.diff2 < my.seq[5]] <- trans.cols[4] + array.trans.diff2.colors[array.trans.diff2 < my.seq[4]] <- trans.cols[3] + array.trans.diff2.colors[array.trans.diff2 < my.seq[3]] <- trans.cols[2] + array.trans.diff2.colors[array.trans.diff2 < my.seq[2]] <- trans.cols[1] + array.trans.diff2.colors[is.na(array.trans.diff2)] <- "white" + + par(mar=c(5,4,4,4.5)) + plot(1,1, type="n", xaxt="n", yaxt="n", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + title("% Bias transition from NAO- regime", cex=1.5) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + + for(p in 1:12){ + for(l in 0:6){ + polygon(c(0.5+p-1, 0.5+p-1, 1+p-1), c(-0.5+l, 0.5+l, 0+l), col=array.trans.diff2.colors[p,1+l,1]) # first triangle + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(-0.5+l, 0+l, -0.5+l), col=array.trans.diff2.colors[p,1+l,2]) + polygon(c(1+p-1, 1.5+p-1, 1.5+p-1), c(l, 0.5+l, -0.5+l), col=array.trans.diff2.colors[p,1+l,3]) + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(0.5+l, 0+l, 0.5+l), col=array.trans.diff2.colors[p,1+l,4]) + } + } + + par(fig=c(0.875, 1, 0.14, 0.89), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_bias_trans_NAO-_4tables.png"), width=750, height=600) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("% Bias transition from NAO- regime", cex=1.5) + + # NAO+ : + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.85, 0.98), new=TRUE) + mtext("NAO+", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.diff2.colors[p,1+l,1])}} + + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.42, 0.5), new=TRUE) + mtext("Blocking", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.diff2.colors[p,1+l,4])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.85, 0.98), new=TRUE) + mtext("NAO-", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.diff2.colors[p,1+l,3])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.42, 0.5), new=TRUE) + mtext("Atlantic Ridge", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.diff2.colors[p,1+l,2])}} + + par(fig=c(0.91, 1, 0.1, 0.9), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_bias_trans_NAO-_1table.png"), width=800, height=300) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("% Bias transition from NAO- regime", cex=1.2) + + par(mar=c(0,0,4,0), fig=c(0.07, 0.22, 0.8, 0.98), new=TRUE) + mtext("NAO+", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0, 0.22, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecast month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.diff2.colors[i2,1+6-j,1])}} + + par(mar=c(0,0,4,0), fig=c(0.22, 0.44, 0.8, 0.98), new=TRUE) + mtext("NAO-", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.22, 0.44, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecasted month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.diff2.colors[i2,1+6-j,3])}} + + par(mar=c(0,0,4,0), fig=c(0.44, 0.66, 0.8, 0.98), new=TRUE) + mtext("Blocking", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.44, 0.66, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecasted month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.diff2.colors[i2,1+6-j,4])}} + + par(mar=c(0,0,4,0), fig=c(0.68, 0.88, 0.8, 0.98), new=TRUE) + mtext("Atlantic Ridge", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.66, 0.88, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecasted month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.diff2.colors[i2,1+6-j,2])}} + + par(fig=c(0.91, 1, 0.1, 0.8), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + + + + + + + + + + + # Bias of the Transition probability from blocking to X summary: + png(filename=paste0(work.dir,"/",forecast.name,"_summary_bias_trans_blocking.png"), width=550, height=400) + + # create an array similar to array.cor but with colors instead of corr.values: + trans.cols <- rev(c('#b2182b','#d6604d','#f4a582','#fddbc7','#d1e5f0','#92c5de','#4393c3','#2166ac')) + my.seq <- c(-20,-10,-5,-2,0,2,5,10,20) + + array.trans.diff3.colors <- array(trans.cols[8],c(12,7,4)) + array.trans.diff3.colors[array.trans.diff3 < my.seq[8]] <- trans.cols[7] + array.trans.diff3.colors[array.trans.diff3 < my.seq[7]] <- trans.cols[6] + array.trans.diff3.colors[array.trans.diff3 < my.seq[6]] <- trans.cols[5] + array.trans.diff3.colors[array.trans.diff3 < my.seq[5]] <- trans.cols[4] + array.trans.diff3.colors[array.trans.diff3 < my.seq[4]] <- trans.cols[3] + array.trans.diff3.colors[array.trans.diff3 < my.seq[3]] <- trans.cols[2] + array.trans.diff3.colors[array.trans.diff3 < my.seq[2]] <- trans.cols[1] + array.trans.diff3.colors[is.na(array.trans.diff3)] <- "white" + + par(mar=c(5,4,4,4.5)) + plot(1,1, type="n", xaxt="n", yaxt="n", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + title("% Bias transition from blocking regime", cex=1.5) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + + for(p in 1:12){ + for(l in 0:6){ + polygon(c(0.5+p-1, 0.5+p-1, 1+p-1), c(-0.5+l, 0.5+l, 0+l), col=array.trans.diff3.colors[p,1+l,1]) # first triangle + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(-0.5+l, 0+l, -0.5+l), col=array.trans.diff3.colors[p,1+l,2]) + polygon(c(1+p-1, 1.5+p-1, 1.5+p-1), c(l, 0.5+l, -0.5+l), col=array.trans.diff3.colors[p,1+l,3]) + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(0.5+l, 0+l, 0.5+l), col=array.trans.diff3.colors[p,1+l,4]) + } + } + + par(fig=c(0.875, 1, 0.14, 0.89), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_bias_trans_blocking_4tables.png"), width=750, height=600) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("% Bias transition from blocking regime", cex=1.5) + + # NAO+ : + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.85, 0.98), new=TRUE) + mtext("NAO+", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.diff3.colors[p,1+l,1])}} + + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.42, 0.5), new=TRUE) + mtext("Blocking", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.diff3.colors[p,1+l,4])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.85, 0.98), new=TRUE) + mtext("NAO-", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.diff3.colors[p,1+l,3])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.42, 0.5), new=TRUE) + mtext("Atlantic Ridge", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.diff3.colors[p,1+l,2])}} + + par(fig=c(0.91, 1, 0.1, 0.9), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_bias_trans_blocking_1table.png"), width=800, height=300) + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("% Bias transition from Blocking regime", cex=1.2) + + par(mar=c(0,0,4,0), fig=c(0.07, 0.22, 0.8, 0.98), new=TRUE) + mtext("NAO+", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0, 0.22, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecast month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.diff3.colors[i2,1+6-j,1])}} + + par(mar=c(0,0,4,0), fig=c(0.22, 0.44, 0.8, 0.98), new=TRUE) + mtext("NAO-", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.22, 0.44, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecasted month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.diff3.colors[i2,1+6-j,3])}} + + par(mar=c(0,0,4,0), fig=c(0.44, 0.66, 0.8, 0.98), new=TRUE) + mtext("Blocking", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.44, 0.66, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecasted month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.diff3.colors[i2,1+6-j,4])}} + + par(mar=c(0,0,4,0), fig=c(0.68, 0.88, 0.8, 0.98), new=TRUE) + mtext("Atlantic Ridge", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.66, 0.88, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecasted month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.diff3.colors[i2,1+6-j,2])}} + + par(fig=c(0.91, 1, 0.1, 0.8), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + + + + + + + + + + # Bias of the Transition probability from Atlantic Ridge to X summary: + png(filename=paste0(work.dir,"/",forecast.name,"_summary_bias_trans_Atlantic_ridge.png"), width=550, height=400) + + # create an array similar to array.cor but with colors instead of corr.values: + trans.cols <- rev(c('#b2182b','#d6604d','#f4a582','#fddbc7','#d1e5f0','#92c5de','#4393c3','#2166ac')) + my.seq <- c(-20,-10,-5,-2,0,2,5,10,20) + + array.trans.diff4.colors <- array(trans.cols[8],c(12,7,4)) + array.trans.diff4.colors[array.trans.diff4 < my.seq[8]] <- trans.cols[7] + array.trans.diff4.colors[array.trans.diff4 < my.seq[7]] <- trans.cols[6] + array.trans.diff4.colors[array.trans.diff4 < my.seq[6]] <- trans.cols[5] + array.trans.diff4.colors[array.trans.diff4 < my.seq[5]] <- trans.cols[4] + array.trans.diff4.colors[array.trans.diff4 < my.seq[4]] <- trans.cols[3] + array.trans.diff4.colors[array.trans.diff4 < my.seq[3]] <- trans.cols[2] + array.trans.diff4.colors[array.trans.diff4 < my.seq[2]] <- trans.cols[1] + array.trans.diff4.colors[is.na(array.trans.diff4)] <- "white" + + par(mar=c(5,4,4,4.5)) + plot(1,1, type="n", xaxt="n", yaxt="n", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + title("% Bias transition from Atlantic Ridge regime", cex=1.5) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + + for(p in 1:12){ + for(l in 0:6){ + polygon(c(0.5+p-1, 0.5+p-1, 1+p-1), c(-0.5+l, 0.5+l, 0+l), col=array.trans.diff4.colors[p,1+l,1]) # first triangle + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(-0.5+l, 0+l, -0.5+l), col=array.trans.diff4.colors[p,1+l,2]) + polygon(c(1+p-1, 1.5+p-1, 1.5+p-1), c(l, 0.5+l, -0.5+l), col=array.trans.diff4.colors[p,1+l,3]) + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(0.5+l, 0+l, 0.5+l), col=array.trans.diff4.colors[p,1+l,4]) + } + } + + par(fig=c(0.875, 1, 0.14, 0.89), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_bias_trans_Atlantic_ridge_4tables.png"), width=750, height=600) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("% Bias transition from Atlantic_Ridge_regime", cex=1.5) + + # NAO+ : + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.85, 0.98), new=TRUE) + mtext("NAO+", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.diff4.colors[p,1+l,1])}} + + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.42, 0.5), new=TRUE) + mtext("Blocking", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.diff4.colors[p,1+l,4])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.85, 0.98), new=TRUE) + mtext("NAO-", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.diff4.colors[p,1+l,3])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.42, 0.5), new=TRUE) + mtext("Atlantic Ridge", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.diff4.colors[p,1+l,2])}} + + par(fig=c(0.91, 1, 0.1, 0.9), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_bias_trans_Atlantic_ridge_1table.png"), width=800, height=300) + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("% Bias transition from Atlantic Ridge regime", cex=1.2) + + par(mar=c(0,0,4,0), fig=c(0.07, 0.22, 0.8, 0.98), new=TRUE) + mtext("NAO+", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0, 0.22, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecast month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.diff4.colors[i2,1+6-j,1])}} + + par(mar=c(0,0,4,0), fig=c(0.22, 0.44, 0.8, 0.98), new=TRUE) + mtext("NAO-", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.22, 0.44, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecasted month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.diff4.colors[i2,1+6-j,3])}} + + par(mar=c(0,0,4,0), fig=c(0.44, 0.66, 0.8, 0.98), new=TRUE) + mtext("Blocking", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.44, 0.66, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecasted month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.diff4.colors[i2,1+6-j,4])}} + + par(mar=c(0,0,4,0), fig=c(0.68, 0.88, 0.8, 0.98), new=TRUE) + mtext("Atlantic Ridge", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.66, 0.88, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecasted month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.diff4.colors[i2,1+6-j,2])}} + + par(fig=c(0.91, 1, 0.1, 0.8), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + +diagnostic.WR <- function(text=NULL, position=c(0,1,0,1), color.array){ + +} + + ## # FairRPSS summary: + ## png(filename=paste0(work.dir,"/",forecast.name,"_summary_rpss.png"), width=550, height=400) + + ## # create an array similar to array.diff.freq but with colors instead of frequencies: + ## freq.cols <- rev(c('#b2182b','#d6604d','#f4a582','#fddbc7','#d1e5f0','#92c5de','#4393c3','#2166ac')) + + ## array.freq.colors <- array(freq.cols[8],c(12,7,4)) + ## array.freq.colors[array.diff.freq < 10] <- freq.cols[7] + ## array.freq.colors[array.diff.freq < 5] <- freq.cols[6] + ## array.freq.colors[array.diff.freq < 2] <- freq.cols[5] + ## array.freq.colors[array.diff.freq < 0] <- freq.cols[4] + ## array.freq.colors[array.diff.freq < -2] <- freq.cols[3] + ## array.freq.colors[array.diff.freq < -5] <- freq.cols[2] + ## array.freq.colors[array.diff.freq < -10] <- freq.cols[1] + + ## par(mar=c(5,4,4,4.5)) + ## plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Forecast time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + ## title("Frequency difference (%) between S4 and ERA-Interim", cex=1.5) + ## mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + ## mtext(side = 2, text = "Forecast time", line = 2.5, cex=1.5) + ## axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + ## axis(2, at=seq(0,6), las=2, cex.axis=1.5) + + ## for(p in 1:12){ + ## for(l in 0:6){ + ## polygon(c(0.5+p-1,0.5+p-1,1+p-1),c(-0.5+l,0.5+l,0+l), col=array.freq.colors[p,1+l,1]) # first triangle + ## polygon(c(0.5+p-1,1+p-1,1.5+p-1),c(-0.5+l,0+l,-0.5+l), col=array.freq.colors[p,1+l,2]) + ## polygon(c(1+p-1,1.5+p-1,1.5+p-1),c(l,0.5+l,-0.5+l), col=array.freq.colors[p,1+l,3]) + ## polygon(c(0.5+p-1,1+p-1,1.5+p-1),c(0.5+l,0+l,0.5+l), col=array.freq.colors[p,1+l,4]) + ## } + ## } + + ## par(fig=c(0.875, 1, 0.14, 0.89), new=TRUE) + ## ColorBar2(brks = c(-20,-10,-5,-2,0,2,5,10,20), cols = freq.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(c(-20,-10,-5,-2,0,2,5,10,20)), my.labels=c(-20,-10,-5,-2,0,2,5,10,20)) + ## dev.off() + +} # close if on composition == "summary" + + + + +# impact map of the stronger WR: +if(composition == "impact.highest" && fields.name == rean.name){ + + var.num <- 2 # choose a variable (1:sfcWind, 2:tas) + index <- 1 # chose an index: 1: most influent, 2: most influent positively, 3: most influent negatively + + if ( index == 1 ) png(filename=paste0(rean.dir,"/",rean.name,"_",var.name.full[var.num],"_most_influent.png"), width=550, height=650) + if ( index == 2 ) png(filename=paste0(rean.dir,"/",rean.name,"_",var.name.full[var.num],"_most_influent_positively.png"), width=550, height=650) + if ( index == 3 ) png(filename=paste0(rean.dir,"/",rean.name,"_",var.name.full[var.num],"_most_influent_negatively.png"), width=550, height=650) + + plot.new() + #par(mfrow=c(4,3)) + #col.regimes <- c('#7b3294','#c2a5cf','#a6dba0','#008837') + col.regimes <- c('#e66101','#fdb863','#b2abd2','#5e3c99') + + for(p in 13:16){ # or 1:12 for monthly maps + load(file=paste0(rean.dir,"/",rean.name,"_",my.period[p],"_",var.name[var.num],".RData")) + load(paste0(rean.dir,"/",rean.name,"_",my.period[p],"_ClusterNames.RData")) # they should not depend on var.name!!! + + # position of long values of Europe only (without the Atlantic Sea and America): + #if(fields.name == "ERA-Interim") EU <- c(1:which(lon >= lon.max)[1], (length(lon)-30):length(lon)) # restrict area to continental europe only + lon.max = 45 + EU <- c(1:which(lon >= lon.max)[1], (which(lon >= 337)[1]:length(lon))) # restrict area to continental europe only + + cluster1 <- which(orden == cluster1.name.period[p]) # in future it will be == cluster1.name + cluster2 <- which(orden == cluster2.name.period[p]) + cluster3 <- which(orden == cluster3.name.period[p]) + cluster4 <- which(orden == cluster4.name.period[p]) + + assign(paste0("imp",cluster1), varPeriodAnom1mean) + assign(paste0("imp",cluster2), varPeriodAnom2mean) + assign(paste0("imp",cluster3), varPeriodAnom3mean) + assign(paste0("imp",cluster4), varPeriodAnom4mean) + + #most influent WT: + if (index == 1) { + imp.all <- imp1 + for(i in 1:dim(imp1)[1]){ + for(j in 1:dim(imp1)[2]){ + if(abs(imp1[i,j]) >= max(abs(imp1[i,j]), abs(imp2[i,j]), abs(imp3[i,j]), abs(imp4[i,j]))) imp.all[i,j] <- 1.5 + if(abs(imp2[i,j]) >= max(abs(imp1[i,j]), abs(imp2[i,j]), abs(imp3[i,j]), abs(imp4[i,j]))) imp.all[i,j] <- 2.5 + if(abs(imp3[i,j]) >= max(abs(imp1[i,j]), abs(imp2[i,j]), abs(imp3[i,j]), abs(imp4[i,j]))) imp.all[i,j] <- 3.5 + if(abs(imp4[i,j]) >= max(abs(imp1[i,j]), abs(imp2[i,j]), abs(imp3[i,j]), abs(imp4[i,j]))) imp.all[i,j] <- 4.5 + } + } + } + + #most influent WT with positive impact: + if (index == 2) { + imp.all <- imp1 + for(i in 1:dim(imp1)[1]){ + for(j in 1:dim(imp1)[2]){ + if((imp1[i,j]) >= max((imp1[i,j]), (imp2[i,j]), (imp3[i,j]), (imp4[i,j]))) imp.all[i,j] <- 1.5 + if((imp2[i,j]) >= max((imp1[i,j]), (imp2[i,j]), (imp3[i,j]), (imp4[i,j]))) imp.all[i,j] <- 2.5 + if((imp3[i,j]) >= max((imp1[i,j]), (imp2[i,j]), (imp3[i,j]), (imp4[i,j]))) imp.all[i,j] <- 3.5 + if((imp4[i,j]) >= max((imp1[i,j]), (imp2[i,j]), (imp3[i,j]), (imp4[i,j]))) imp.all[i,j] <- 4.5 + } + } + + } + + # most influent WT with negative impact: + if (index == 3) { + imp.all <- imp1 + for(i in 1:dim(imp1)[1]){ + for(j in 1:dim(imp1)[2]){ + if((imp1[i,j]) <= min((imp1[i,j]), (imp2[i,j]), (imp3[i,j]), (imp4[i,j]))) imp.all[i,j] <- 1.5 + if((imp2[i,j]) <= min((imp1[i,j]), (imp2[i,j]), (imp3[i,j]), (imp4[i,j]))) imp.all[i,j] <- 2.5 + if((imp3[i,j]) <= min((imp1[i,j]), (imp2[i,j]), (imp3[i,j]), (imp4[i,j]))) imp.all[i,j] <- 3.5 + if((imp4[i,j]) <= min((imp1[i,j]), (imp2[i,j]), (imp3[i,j]), (imp4[i,j]))) imp.all[i,j] <- 4.5 + } + } + } + + if(p<=3) yMod <- 0.7 + if(p>=4 && p<=6) yMod <- 0.5 + if(p>=7 && p<=9) yMod <- 0.3 + if(p>=10) yMod <- 0.1 + if(p==13 || p==14) yMod <- 0.52 + if(p==15 || p==16) yMod <- 0.1 + + if(p==1 || p==4 || p==7 || p==10) xMod <- 0.05 + if(p==2 || p==5 || p==8 || p==11) xMod <- 0.35 + if(p==3 || p==6 || p==9 || p==12) xMod <- 0.65 + if(p==13 || p==15) xMod <- 0.05 + if(p==14 || p==16) xMod <- 0.50 + + # impact map: + if(p <= 12) par(fig=c(xMod, xMod + 0.3, yMod, yMod + 0.17), new=TRUE) + if(p > 12) par(fig=c(xMod, xMod + 0.45, yMod, yMod + 0.38), new=TRUE) + PlotEquiMap(imp.all[,EU], lon[EU], lat, filled.continents = FALSE, brks=1:5, cols=col.regimes, axelab=F, drawleg=F) + + # title map: + if(p <= 12) par(fig=c(xMod, xMod + 0.3, yMod + 0.162, yMod + 0.172), new=TRUE) + if(p > 12) par(fig=c(xMod + 0.07, xMod + 0.07 + 0.3, yMod +0.21 + 0.166, yMod +0.21 + 0.176), new=TRUE) + mtext(my.period[p], font=2, cex=1.5) + + } # close 'p' on 'period' + + par(fig=c(0.1,0.9, 0.03, 0.11), new=TRUE) + ColorBar2(1:5, cols=col.regimes, vert=FALSE, my.ticks=1:4, draw.ticks=FALSE, my.labels=orden) + + par(fig=c(0.1,0.9, 0.92, 0.97), new=TRUE) + if( index == 1 ) mtext(paste0("Most influent WR on ",var.name[var.num]), font=2, cex=1.5) + if( index == 2 ) mtext(paste0("Most positively influent WR on ",var.name[var.num]), font=2, cex=1.5) + if( index == 3 ) mtext(paste0("Most negatively influent WR on ",var.name[var.num]), font=2, cex=1.5) + + dev.off() + +} # close if on composition == "impact.highest" + diff --git a/old/weather_regimes_maps_v21.R b/old/weather_regimes_maps_v21.R new file mode 100644 index 0000000000000000000000000000000000000000..bdc53e5f493622a61485fcccc59409219ae63c04 --- /dev/null +++ b/old/weather_regimes_maps_v21.R @@ -0,0 +1,4075 @@ + +# Creation: 6/2016 +# Author: Nicola Cortesi +# +# Aim: to visualize the regime anomalies of the weather regimes, their interannual frequencies and their impact on a chosen cliamte variable (i.e: wind speed or temperature) +# +# I/O: input file needed are: +# 1) the output of the weather_regimes.R script, which ends with the suffix "_psl.RData". +# 2) if you need the impact map too, the outputs of the weather_regimes_impact.R script, which end wuth the suffix "_sfcWind.RData" and "_tas.RData" +# +# Output files are the maps, witch the user can generate as .png or as .pdf +# Due to the script's length, it is better to run it from the terminal with: Rscript ~/scripts/weather_regimes_maps.R +# This script doesn't need to be parallelized because it takes only a few seconds to create and save the output maps. +# +# Assumptions: If your regimes derive from a reanalysis, this script must be run twice: once, to create a .pdf file (see below) +# that the user must open to find visually the order by which the four regimes are stored inside the four clusters. For example, he can find that the +# sequence of the images in the file (from top to down) corresponds to: Blocking / NAO- / Atl.Ridge / NAO+ regime. Subsequently, he has to change 'ordering' +# from F to T (see below) and set the four variables 'clusterX.name' (X = 1...4) to follow the same order found inside that file. +# The second time, you have to run this script only to get the maps in the right order, which by default is, from top to down: +# "NAO+","NAO-","Blocking" and "Atl.Ridge" (you can change it with the variable 'orden'). You also have to set 'save.names' to TRUE, so an .RData file +# is written to store the order of the four regimes, in case you need to visualize these maps again in future or create new ones based on the saved data. +# +# If your regimes derive from a forecast system, you have to compute before the regimes derived from a reanalysis. Then, you can associate the reanalysis +# to the forecast system, to employ it to automatically aussociate to each simulated cluster one of the +# observed regimes, the one with the highest spatial correlation. Beware that the two maps of regime anomalies must have the same spatial resolution! +# +# Branch: weather_regimes + +rm(list=ls()) +library(s2dverification) # for the function Load() +library(Kendall) # for the MannKendall test +source('/home/Earth/ncortesi/scripts/Rfunctions.R') # for the calendar functions + +### set the directory where the weather regimes computed with a reanalysis are stored (xxxxx_psl.RData files): + +#rean.dir <- "/scratch/Earth/ncortesi/RESILIENCE/Regimes/18) as 12) but with no lat correction" +#rean.dir <- "/scratch/Earth/ncortesi/RESILIENCE/Regimes/18_as_12_but_with_no_lat_correction" +#rean.dir <- "/scratch/Earth/ncortesi/RESILIENCE/Regimes/41_ERA-Interim_monthly_1981-2015_LOESS_filter" +#rean.dir <- "/scratch/Earth/ncortesi/RESILIENCE/Regimes/43_as_41_but_with_running_cluster_and_lat_correction" +rean.dir <- "/scratch/Earth/ncortesi/RESILIENCE/Regimes/44_as_43_but_with_no_lat_correction" +#rean.dir <- "/scratch/Earth/ncortesi/RESILIENCE/Regimes/45_as_43_but_with_Z500" + +rean.name <- "ERA-Interim" # reanalysis name (if input data comes from a reanalysis) +forecast.name <- "ECMWF-S4" # forecast system name (if input data comes from a forecast system) + +# Choose between loading reanalysis ('rean.name') or forecasts ('forecast.name'): +fields.name <- forecast.name #rean.name #forecast.name + +# Choose one or more periods to plot from 1 to 17 (1-12: Jan - Dec, 13-16: Winter, Spring, Summer, Autumn, 17: Year). +period <- 1:12 # You need to have created before the '_psl.RData' file with the output of 'weather_regimes_vXX'.R for that period. + +# run it twice: once, with: 'ordering <- FALSE', 'save.names <- TRUE', 'as.pdf <- TRUE' and 'composition <- "simple" ' +# to look at the cartography and find visually the right regime names of the four clusters; then, insert the regime names in the correct order below and run this script +# a second time one month/season at time with: 'ordering = TRUE', 'save.names = TRUE', 'as.pdf = FALSE' and 'composition = "simple" ' +# to save the ordered cartography (then, you can check the correct regime sequence in the .png maps). After that, any time you run this script, remember to set: +# 'ordering = TRUE , 'save.names = FALSE' (and any type of composition you want to plot) not to overwrite the files which stores the right cluster order. + +ordering <- T # If TRUE, order the clusters following the regimes listed in the 'orden' vector (set it to TRUE only after you manually inserted the names below). +save.names <- F # if TRUE, also save the cluster names (i.e: the association between clusters and weather regimes); if FALSE, the cluster names are loaded from a file +as.pdf <- F # FALSE: save results as one .png file for each season/month, TRUE: save only one .pdf file with all seasons/months + +composition <- "impact" # choose which kind of composition you want to plot: + # - 'simple' for monthly graphs of regime anomalies / impact / interannual frequencies in three columns + # - 'psl' for all the regime anomalies for a fixed forecast month + # - 'fre' for all the interannual frequencies for a fixed forecast month + # - 'impact' for all the impact maps for a fixed forecast month and the variable specified in var.num + # - 'summary' for the summary plots of all forecast months (persistence bias, freq.bias, correlations, etc.) [You need all ClusterNames files] + # - 'impact.highest' for all the impact plot of the regime with the highest impact + # - 'single.impact' to save the individual impact maps + # - 'single.psl' to save the individual psl map + # - 'single.fre' to save the individual fre map + # - 'none' doesn't plot anything, it only saves the ClusterName.Rdata files + # - 'taylor' plot the taylor's diagram between the regime anomalies of a monthly reanalaysis and a seasonal one + +var.num <- 2 # if fields.name ='rean.name' and composition ='simple', Choose a variable for the impact maps 1: sfcWind 2: tas + +mean.freq <- FALSE # if TRUE, when composition <- "simple", it also add the mean frequency value over each frequency plots + +# Associates the regimes to the four cluster: +cluster3.name <- "NAO+" +cluster2.name <- "NAO-" +cluster1.name <- "Blocking" +cluster4.name <- "Atl.Ridge" + +# choose an order to give to the cartograpy, from top to bottom: +orden <- c("NAO+","NAO-","Blocking","Atl.Ridge") + +####### if fields.name='forecast.name', you have to specify these additional variables: + +# working dir with the input files with '_psl.RData' suffix: +work.dir <- "/scratch/Earth/ncortesi/RESILIENCE/Regimes/42_ECMWF_S4_monthly_1981-2015_LOESS_filter" + +lead.months <- 0:6 # in case you selected 'fields.name = forecast.name', specify a leadtime (0 = same month of the start month) to plot + # (and variable 'period' becomes the start month) +forecast.month <- 1 # in case composition = 'psl' or 'fre' or 'impact', choose a target month to forecast, from 1 to 12, to plot its composition + # if you want to plot all compositions from 1 to 12 at once, just enable the line below and add a { at the end of the script + # DO NOT run the loop below when composition="summary" or "taylor", because it will modify the data in the ClusterName.RData files!!! +#for(forecast.month in 1:12){ +################################################################################################################################################################### + +if(fields.name != rean.name && fields.name != forecast.name) stop("variable 'fields.name' is not properly set") +regime1.name <- orden[1] +regime2.name <- orden[2] +regime3.name <- orden[3] +regime4.name <- orden[4] + +var.name <- c("sfcWind","tas") +var.name.full <- c("Wind Speed","Temperature") # full name of the 'predictand' variable to put in the title of the graphs +var.unit <- c("m/s", "ºC") # unit of measure of var (for drawing color scales) + +var.num.orig <- var.num # loading _psl files, this value can change! +fields.name.orig <- fields.name +period.orig <- period +work.dir.orig <- work.dir +pdf.suffix <- ifelse(length(period)==1, my.period[period], "all_periods") +n.map <- 0 + +if(composition != "summary" && composition != "impact.highest" && composition != "taylor"){ + + if(composition == "psl" || composition == "fre" || composition == "impact") { + if(as.pdf && fields.name == forecast.name) pdf(file=paste0(work.dir,"/",fields.name,"_",pdf.suffix,"_forecast_month_",forecast.month,"_",composition,".pdf"),width=110,height=60) + if(!as.pdf && fields.name == forecast.name) png(filename=paste0(work.dir,"/",fields.name,"_forecast_month_",forecast.month,"_",composition,".png"),width=6000,height=2300) + + lead.months <- c(6:0,0) + plot.new() + + # Sheet title: + par(fig=c(0, 1, 0.94, 0.98), new=TRUE) + if(composition == "psl") mtext(paste0(fields.name, " simulated Weather Regimes for ", month.name[forecast.month]), cex=5.5, font=2) + if(composition == "fre") mtext(paste0(fields.name, " simulated interannual frequencies for ", month.name[forecast.month]), cex=5.5, font=2) + if(composition == "impact") mtext(paste0(fields.name, " simulated ",var.name.full[var.num], " impact for ", month.name[forecast.month]), cex=5.5, font=2) + + } + + if(fields.name == rean.name) lead.months <- 1 # just to be able to enter the loop below once! + + for(lead.month in lead.months){ + #lead.month=lead.months[1] # for the debug + + if(composition == "simple"){ + if(as.pdf && fields.name == rean.name) pdf(file=paste0(rean.dir,"/",fields.name,"_",var.name[var.num],"_",pdf.suffix,".pdf"),width=40, height=60) + if(as.pdf && fields.name == forecast.name) pdf(file=paste0(work.dir,"/",fields.name,"_",var.name[var.num],"_",pdf.suffix,"_leadmonth_",lead.month,".pdf"),width=40,height=60) + } + + if(composition == 'psl' || composition == 'fre' || composition == 'impact') { + period <- forecast.month - lead.month + if(period < 1) period <- period + 12 + } + + impact.data <- FALSE + for(p in period){ + #p <- period[1] # for the debug + p.orig <- p + + if(fields.name == rean.name) print(paste("p =",p)) + if(fields.name == forecast.name) print(paste("p =",p,"lead.month =", lead.month)) + + # load regime data (for forecasts, it load only the data corresponding to the chosen start month p and lead month 'lead.month') + if(fields.name == rean.name || (fields.name == forecast.name && n.map == 7)) { + load(file=paste0(rean.dir,"/",rean.name,"_",my.period[p],"_","psl",".RData")) + if(var.num != var.num.orig) var.num <- var.num.orig # ripristinate original values of this script (necessary after loading regime data) + if(fields.name != fields.name.orig) fields.name <- fields.name.orig # ripristinate original values of this script + if(work.dir != work.dir.orig) work.dir <- work.dir.orig # ripristinate original values of this script + + # load impact data only if it is available, if not it will leave an empty space in the composition: + if(fields.name == rean.name){ + if(file.exists(paste0(rean.dir,"/",rean.name,"_",my.period[p],"_",var.name[var.num],".RData"))){ + impact.data <- TRUE + print("Impact data available for reanalysis") + load(file=paste0(rean.dir,"/",rean.name,"_",my.period[p],"_",var.name[var.num],".RData")) + } else { + impact.data <- FALSE + print("Impact data for reanalysis not available") + } + } + } + + if(fields.name == forecast.name && n.map < 7){ + load(file=paste0(work.dir,"/",forecast.name,"_",my.period[p],"_leadmonth_",lead.month,"_","psl",".RData")) # load cluster time series and mean psl data + + if(file.exists(paste0(work.dir,"/",forecast.name,"_",my.period[p],"_leadmonth_",lead.month,"_",var.name[var.num],".RData"))){ + impact.data <- TRUE + print("Impact data available for forecasts") + if((composition == "simple" || composition == "impact")) load(file=paste0(work.dir,"/",forecast.name,"_",my.period[p],"_leadmonth_",lead.month,"_",var.name[var.num],".RData")) # load mean var data for impact maps + } else { + impact.data <- FALSE + print("Impact data for forecasts not available") + } + + if(var.num != var.num.orig) var.num <- var.num.orig # ripristinate original values of this script (necessary after loading regime data) + if(fields.name != fields.name.orig) fields.name <- fields.name.orig # ripristinate original values of this script + + } + + if(var.num != var.num.orig) var.num <- var.num.orig # ripristinate original values of this script (necessary after loading regime data) + if(fields.name != fields.name.orig) fields.name <- fields.name.orig # ripristinate original values of this script + if(work.dir != work.dir.orig) work.dir <- work.dir.orig # ripristinate original values of this script + if(p != p.orig) p <- p.orig + + if(fields.name == forecast.name && n.map < 7){ + + # compute the simulated interannual monthly variability: + n.days.month <- dim(my.cluster.array[[p]])[1] # number of days in the forecasted month + + freq.sim1 <- apply(my.cluster.array[[p]] == 1, c(2,3), sum) + freq.sim2 <- apply(my.cluster.array[[p]] == 2, c(2,3), sum) + freq.sim3 <- apply(my.cluster.array[[p]] == 3, c(2,3), sum) + freq.sim4 <- apply(my.cluster.array[[p]] == 4, c(2,3), sum) + + sd.freq.sim1 <- sd(freq.sim1) / n.days.month + sd.freq.sim2 <- sd(freq.sim2) / n.days.month + sd.freq.sim3 <- sd(freq.sim3) / n.days.month + sd.freq.sim4 <- sd(freq.sim4) / n.days.month + + # compute the transition probabilities: + j1 <- j2 <- j3 <- j4 <- 1; transition1 <- transition2 <- transition3 <- transition4 <- c() + + n.members <- dim(my.cluster.array[[p]])[3] + + for(m in 1:n.members){ + for(y in 1:n.years){ + for (d in 1:(n.days.month-1)){ + + if (my.cluster.array[[p]][d,y,m] == 1 && (my.cluster.array[[p]][d + 1,y,m] != my.cluster.array[[p]][d,y,m])){ + transition1[j1] <- my.cluster.array[[p]][d + 1,y,m] + j1 <- j1 + 1 + } + if (my.cluster.array[[p]][d,y,m] == 2 && (my.cluster.array[[p]][d + 1,y,m] != my.cluster.array[[p]][d,y,m])){ + transition2[j2] <- my.cluster.array[[p]][d + 1,y,m] + j2 <- j2 + 1 + } + if (my.cluster.array[[p]][d,y,m] == 3 && (my.cluster.array[[p]][d + 1,y,m] != my.cluster.array[[p]][d,y,m])){ + transition3[j3] <- my.cluster.array[[p]][d + 1,y,m] + j3 <- j3 +1 + } + if (my.cluster.array[[p]][d,y,m] == 4 && (my.cluster.array[[p]][d + 1,y,m] != my.cluster.array[[p]][d,y,m])){ + transition4[j4] <- my.cluster.array[[p]][d + 1,y,m] + j4 <- j4 +1 + } + + } + } + } + + trans.sim <- matrix(NA,4,4 , dimnames= list(c("cluster1.sim", "cluster2.sim", "cluster3.sim", "cluster4.sim"),c("cluster1.sim","cluster2.sim","cluster3.sim","cluster4.sim"))) + trans.sim[1,2] <- length(which(transition1 == 2)) + trans.sim[1,3] <- length(which(transition1 == 3)) + trans.sim[1,4] <- length(which(transition1 == 4)) + + trans.sim[2,1] <- length(which(transition2 == 1)) + trans.sim[2,3] <- length(which(transition2 == 3)) + trans.sim[2,4] <- length(which(transition2 == 4)) + + trans.sim[3,1] <- length(which(transition3 == 1)) + trans.sim[3,2] <- length(which(transition3 == 2)) + trans.sim[3,4] <- length(which(transition3 == 4)) + + trans.sim[4,1] <- length(which(transition4 == 1)) + trans.sim[4,2] <- length(which(transition4 == 2)) + trans.sim[4,3] <- length(which(transition4 == 3)) + + trans.tot <- apply(trans.sim,1,sum,na.rm=T) + for(i in 1:4) trans.sim[i,] <- trans.sim[i,]/trans.tot[i] + + + # compute cluster persistence: + my.cluster.vector <- as.vector(my.cluster.array[[p]]) # reduce it to a vector with the order: day1month1member1 , day2month1member1, ... , day1month2member1, day2month2member1, ,,, + + sequ1 <- sequ2 <- sequ3 <- sequ4 <- rep(0,10000) + i1 <- i2 <- i3 <- i4 <- 1 + + if(my.cluster.vector[1] != 1) i1 <- 0 + if(my.cluster.vector[1] == 1) sequ1[i1] <- sequ1[i1]+1 + if(my.cluster.vector[1] != 2) i2 <- 0 + if(my.cluster.vector[1] == 2) sequ2[i2] <- sequ2[i2]+1 + if(my.cluster.vector[1] != 3) i3 <- 0 + if(my.cluster.vector[1] == 3) sequ3[i3] <- sequ3[i3]+1 + if(my.cluster.vector[1] != 4) i4 <- 0 + if(my.cluster.vector[1] == 4) sequ4[i4] <- sequ4[i4]+1 + n.dd <- length(my.cluster.vector) + + for(d in 1:(n.dd-1)){ + if(my.cluster.vector[d] != 1 && my.cluster.vector[d+1] == 1) i1 <- i1+1 + if(my.cluster.vector[d] == 1) sequ1[i1] <- sequ1[i1]+1 + + if(my.cluster.vector[d] != 2 && my.cluster.vector[d+1] == 2) i2 <- i2+1 + if(my.cluster.vector[d] == 2) sequ2[i2] <- sequ2[i2]+1 + + if(my.cluster.vector[d] != 3 && my.cluster.vector[d+1] == 3) i3 <- i3+1 + if(my.cluster.vector[d] == 3) sequ3[i3] <- sequ3[i3]+1 + + if(my.cluster.vector[d] != 4 && my.cluster.vector[d+1] == 4) i4 <- i4+1 + if(my.cluster.vector[d] == 4) sequ4[i4] <- sequ4[i4]+1 + } + + if(my.cluster.vector[n.dd] == 1) sequ1[i1] <- sequ1[i1]+1 + if(my.cluster.vector[n.dd] == 2) sequ2[i2] <- sequ2[i2]+1 + if(my.cluster.vector[n.dd] == 3) sequ3[i3] <- sequ3[i3]+1 + if(my.cluster.vector[n.dd] == 4) sequ4[i4] <- sequ4[i4]+1 + + pers1 <- mean(sequ1[which(sequ1 != 0)]) + pers2 <- mean(sequ2[which(sequ2 != 0)]) + pers3 <- mean(sequ3[which(sequ3 != 0)]) + pers4 <- mean(sequ4[which(sequ4 != 0)]) + + # compute correlations between simulated clusters and observed regime's anomalies: + cluster1.sim <- pslwr1mean; cluster2.sim <- pslwr2mean; cluster3.sim <- pslwr3mean; cluster4.sim <- pslwr4mean + + if(composition == 'impact' && impact.data == TRUE) { + cluster1.sim.imp <- varwr1mean; cluster2.sim.imp <- varwr2mean; cluster3.sim.imp <- varwr3mean; cluster4.sim.imp <- varwr4mean + #cluster1.sim.imp.pv <- pvalue1; cluster2.sim.imp.pv <- pvalue2; cluster3.sim.imp.pv <- pvalue3; cluster4.sim.imp.pv <- pvalue4 + } + + lat.forecast <- lat; lon.forecast <- lon + + if((p + lead.month) > 12) { load.month <- p + lead.month - 12 } else { load.month <- p + lead.month } # reanalysis forecast month to load + load(file=paste0(rean.dir,"/",rean.name,"_",my.period[load.month],"_","psl",".RData")) # load reanalysis data, which overwrities the pslwrXmean variables + load(paste0(rean.dir,"/",rean.name,"_", my.period[load.month],"_","ClusterNames",".RData")) # load also reanalysis regime names + + # load reanalysis varwrXmean impact data: + if(composition == 'impact' && impact.data == TRUE) load(file=paste0(rean.dir,"/",rean.name,"_",my.period[load.month],"_",var.name[var.num],".RData")) + + if(var.num != var.num.orig) var.num <- var.num.orig # ripristinate original values of this script + if(fields.name != fields.name.orig) fields.name <- fields.name.orig # ripristinate original values of this script + if(!identical(period.orig,period)) period <- period.orig + if(work.dir != work.dir.orig) work.dir <- work.dir.orig # ripristinate original values of this script + if(p.orig != p) p <- p.orig + + # compute the observed transition probabilities: + n.days.month <- n.days.in.a.period(load.month,2001) + j1o <- j2o <- j3o <- j4o <- 1; transition.obs1 <- transition.obs2 <- transition.obs3 <- transition.obs4 <- c() + + for (d in 1:(length(my.cluster$cluster)-1)){ + if (my.cluster$cluster[d] == 1 && (my.cluster$cluster[d + 1] != my.cluster$cluster[d])){ + transition.obs1[j1o] <- my.cluster$cluster[d + 1] + j1o <- j1o + 1 + } + if (my.cluster$cluster[d] == 2 && (my.cluster$cluster[d + 1] != my.cluster$cluster[d])){ + transition.obs2[j2o] <- my.cluster$cluster[d + 1] + j2o <- j2o + 1 + } + if (my.cluster$cluster[d] == 3 && (my.cluster$cluster[d + 1] != my.cluster$cluster[d])){ + transition.obs3[j3o] <- my.cluster$cluster[d + 1] + j3o <- j3o + 1 + } + if (my.cluster$cluster[d] == 4 && (my.cluster$cluster[d + 1] != my.cluster$cluster[d])){ + transition.obs4[j4o] <- my.cluster$cluster[d + 1] + j4o <- j4o +1 + } + + } + + trans.obs <- matrix(NA,4,4 , dimnames= list(c("cluster1.obs", "cluster2.obs", "cluster3.obs", "cluster4.obs"),c("cluster1.obs","cluster2.obs","cluster3.obs","cluster4.obs"))) + trans.obs[1,2] <- length(which(transition.obs1 == 2)) + trans.obs[1,3] <- length(which(transition.obs1 == 3)) + trans.obs[1,4] <- length(which(transition.obs1 == 4)) + + trans.obs[2,1] <- length(which(transition.obs2 == 1)) + trans.obs[2,3] <- length(which(transition.obs2 == 3)) + trans.obs[2,4] <- length(which(transition.obs2 == 4)) + + trans.obs[3,1] <- length(which(transition.obs3 == 1)) + trans.obs[3,2] <- length(which(transition.obs3 == 2)) + trans.obs[3,4] <- length(which(transition.obs3 == 4)) + + trans.obs[4,1] <- length(which(transition.obs4 == 1)) + trans.obs[4,2] <- length(which(transition.obs4 == 2)) + trans.obs[4,3] <- length(which(transition.obs4 == 3)) + + trans.tot.obs <- apply(trans.obs,1,sum,na.rm=T) + for(i in 1:4) trans.obs[i,] <- trans.obs[i,]/trans.tot.obs[i] + + # compute the observed interannual monthly variability: + freq.obs1 <- freq.obs2 <- freq.obs3 <- freq.obs4 <- c() + + for(y in year.start:year.end){ + # for both reanalysis and S4, the time order is always: year1day1, year1day2, ..., year1day31, year2day1, year2day2, ..., year2day28, etc, + freq.obs1[y-year.start+1] <- length(which(my.cluster$cluster[(y-year.start)*n.days.month + (1:n.days.month)] == 1)) + freq.obs2[y-year.start+1] <- length(which(my.cluster$cluster[(y-year.start)*n.days.month + (1:n.days.month)] == 2)) + freq.obs3[y-year.start+1] <- length(which(my.cluster$cluster[(y-year.start)*n.days.month + (1:n.days.month)] == 3)) + freq.obs4[y-year.start+1] <- length(which(my.cluster$cluster[(y-year.start)*n.days.month + (1:n.days.month)] == 4)) + } + + sd.freq.obs1 <- sd(freq.obs1) / n.days.month + sd.freq.obs2 <- sd(freq.obs2) / n.days.month + sd.freq.obs3 <- sd(freq.obs3) / n.days.month + sd.freq.obs4 <- sd(freq.obs4) / n.days.month + + wr1y.obs <- wr1y; wr2y.obs <- wr2y; wr3y.obs <- wr3y; wr4y.obs <- wr4y # observed frecuencies of the regimes + + regimes.obs <- c(cluster1.name, cluster2.name, cluster3.name, cluster4.name) + + if(length(unique(regimes.obs)) < 4) stop("Two or more names of the weather types in the reanalsyis input file are repeated!") + + if(identical(rev(lat),lat.forecast)) {lat.forecast <- rev(lat.forecast); print("Warning: forecast latitudes are in the opposite order than renalysis's")} + if(!identical(lat, lat.forecast)) stop("forecast latitudes are different from reanalysis latitudes!") + if(!identical(lon, lon.forecast)) stop("forecast longitude are different from reanalysis longitudes!") + + cluster.corr <- array(NA, c(4,4), dimnames=list(c("cluster1.sim","cluster2.sim","cluster3.sim","cluster4.sim"), regimes.obs)) + cluster.corr[1,1] <- cor(as.vector(cluster1.sim), as.vector(pslwr1mean)) + cluster.corr[1,2] <- cor(as.vector(cluster1.sim), as.vector(pslwr2mean)) + cluster.corr[1,3] <- cor(as.vector(cluster1.sim), as.vector(pslwr3mean)) + cluster.corr[1,4] <- cor(as.vector(cluster1.sim), as.vector(pslwr4mean)) + cluster.corr[2,1] <- cor(as.vector(cluster2.sim), as.vector(pslwr1mean)) + cluster.corr[2,2] <- cor(as.vector(cluster2.sim), as.vector(pslwr2mean)) + cluster.corr[2,3] <- cor(as.vector(cluster2.sim), as.vector(pslwr3mean)) + cluster.corr[2,4] <- cor(as.vector(cluster2.sim), as.vector(pslwr4mean)) + cluster.corr[3,1] <- cor(as.vector(cluster3.sim), as.vector(pslwr1mean)) + cluster.corr[3,2] <- cor(as.vector(cluster3.sim), as.vector(pslwr2mean)) + cluster.corr[3,3] <- cor(as.vector(cluster3.sim), as.vector(pslwr3mean)) + cluster.corr[3,4] <- cor(as.vector(cluster3.sim), as.vector(pslwr4mean)) + cluster.corr[4,1] <- cor(as.vector(cluster4.sim), as.vector(pslwr1mean)) + cluster.corr[4,2] <- cor(as.vector(cluster4.sim), as.vector(pslwr2mean)) + cluster.corr[4,3] <- cor(as.vector(cluster4.sim), as.vector(pslwr3mean)) + cluster.corr[4,4] <- cor(as.vector(cluster4.sim), as.vector(pslwr4mean)) + + # associate each simulated cluster to one observed regime: + max1 <- which(cluster.corr == max(cluster.corr), arr.ind=T) + cluster.corr2 <- cluster.corr + cluster.corr2[max1[1],] <- -999 # set this row to negative values so it won't be found as a maximum anymore + cluster.corr2[,max1[2]] <- -999 # set this column to negative values so it won't be found as a maximum anymore + max2 <- which(cluster.corr2 == max(cluster.corr2), arr.ind=T) + cluster.corr3 <- cluster.corr2 + cluster.corr3[max2[1],] <- -999 + cluster.corr3[,max2[2]] <- -999 + max3 <- which(cluster.corr3 == max(cluster.corr3), arr.ind=T) + cluster.corr4 <- cluster.corr3 + cluster.corr4[max3[1],] <- -999 + cluster.corr4[,max3[2]] <- -999 + max4 <- which(cluster.corr4 == max(cluster.corr4), arr.ind=T) + + seq.max1 <- c(max1[1],max2[1],max3[1],max4[1]) + seq.max2 <- c(max1[2],max2[2],max3[2],max4[2]) + + cluster1.regime <- seq.max2[which(seq.max1 == 1)] + cluster2.regime <- seq.max2[which(seq.max1 == 2)] + cluster3.regime <- seq.max2[which(seq.max1 == 3)] + cluster4.regime <- seq.max2[which(seq.max1 == 4)] + + cluster1.name <- regimes.obs[cluster1.regime] + cluster2.name <- regimes.obs[cluster2.regime] + cluster3.name <- regimes.obs[cluster3.regime] + cluster4.name <- regimes.obs[cluster4.regime] + + regimes.sim <- c(cluster1.name, cluster2.name, cluster3.name, cluster4.name) + cluster.corr2 <- array(cluster.corr, c(4,4), dimnames=list(regimes.sim, regimes.obs)) + + spat.cor1 <- cluster.corr[1,seq.max2[which(seq.max1 == 1)]] + spat.cor2 <- cluster.corr[2,seq.max2[which(seq.max1 == 2)]] + spat.cor3 <- cluster.corr[3,seq.max2[which(seq.max1 == 3)]] + spat.cor4 <- cluster.corr[4,seq.max2[which(seq.max1 == 4)]] + + # measure persistence for the reanalysis dataset: + my.cluster.vector <- my.cluster[[1]] + + sequ1 <- sequ2 <- sequ3 <- sequ4 <- rep(0,10000) + i1 <- i2 <- i3 <- i4 <- 1 + + if(my.cluster.vector[1] != 1) i1 <- 0 + if(my.cluster.vector[1] == 1) sequ1[i1] <- sequ1[i1]+1 + if(my.cluster.vector[1] != 2) i2 <- 0 + if(my.cluster.vector[1] == 2) sequ2[i2] <- sequ2[i2]+1 + if(my.cluster.vector[1] != 3) i3 <- 0 + if(my.cluster.vector[1] == 3) sequ3[i3] <- sequ3[i3]+1 + if(my.cluster.vector[1] != 4) i4 <- 0 + if(my.cluster.vector[1] == 4) sequ4[i4] <- sequ4[i4]+1 + + n.dd <- length(my.cluster.vector) + for(d in 1:(n.dd-1)){ + if(my.cluster.vector[d] != 1 && my.cluster.vector[d+1] == 1) i1 <- i1+1 + if(my.cluster.vector[d] == 1) sequ1[i1] <- sequ1[i1]+1 + + if(my.cluster.vector[d] != 2 && my.cluster.vector[d+1] == 2) i2 <- i2+1 + if(my.cluster.vector[d] == 2) sequ2[i2] <- sequ2[i2]+1 + + if(my.cluster.vector[d] != 3 && my.cluster.vector[d+1] == 3) i3 <- i3+1 + if(my.cluster.vector[d] == 3) sequ3[i3] <- sequ3[i3]+1 + + if(my.cluster.vector[d] != 4 && my.cluster.vector[d+1] == 4) i4 <- i4+1 + if(my.cluster.vector[d] == 4) sequ4[i4] <- sequ4[i4]+1 + } + + if(my.cluster.vector[n.dd] == 1) sequ1[i1] <- sequ1[i1]+1 + if(my.cluster.vector[n.dd] == 2) sequ2[i2] <- sequ2[i2]+1 + if(my.cluster.vector[n.dd] == 3) sequ3[i3] <- sequ3[i3]+1 + if(my.cluster.vector[n.dd] == 4) sequ4[i4] <- sequ4[i4]+1 + + persObs1 <- mean(sequ1[which(sequ1 != 0)]) + persObs2 <- mean(sequ2[which(sequ2 != 0)]) + persObs3 <- mean(sequ3[which(sequ3 != 0)]) + persObs4 <- mean(sequ4[which(sequ4 != 0)]) + + ### insert here all the manual corrections to the regime assignation due to misasignment in the automatic selection: + ### forecast month: September + #if(p == 9 && lead.month == 0) { cluster1.name <- "Blocking"; cluster2.name <- "Atl.Ridge"; cluster3.name <- "NAO-"; cluster4.name <- "NAO+"} + #if(p == 8 && lead.month == 1) { cluster1.name <- "NAO+"; cluster2.name <- "NAO-"; cluster3.name <- "Blocking"; cluster4.name <- "Atl.Ridge"} + #if(p == 6 && lead.month == 3) { cluster1.name <- "Atl.Ridge"; cluster2.name <- "NAO+"} + #if(p == 4 && lead.month == 5) { cluster1.name <- "Blocking"; cluster2.name <- "NAO+"; cluster3.name <- "Atl.Ridge"; cluster4.name <- "NAO-"} + + if(p == 9 && lead.month == 0) { cluster1.name <- "Blocking"; cluster2.name <- "Atl.Ridge"; cluster3.name <- "NAO-"; cluster4.name <- "NAO+" } + if(p == 8 && lead.month == 1) { cluster1.name <- "NAO+"; cluster2.name <- "NAO-"; cluster3.name <- "Blocking"; cluster4.name <- "Atl.Ridge" } + if(p == 7 && lead.month == 2) { cluster1.name <- "Atl.Ridge"; cluster2.name <- "Blocking"; cluster3.name <- "NAO-"; cluster4.name <- "NAO+" } + if(p == 6 && lead.month == 3) { cluster1.name <- "Atl.Ridge"; cluster2.name <- "NAO-"; cluster3.name <- "NAO+"; cluster4.name <- "Blocking" } + if(p == 5 && lead.month == 4) { cluster1.name <- "Atl.Ridge"; cluster2.name <- "NAO+"; cluster3.name <- "NAO-"; cluster4.name <- "Blocking" } + if(p == 4 && lead.month == 5) { cluster1.name <- "Blocking"; cluster2.name <- "NAO+"; cluster3.name <- "Atl.Ridge"; cluster4.name <- "NAO-" } + if(p == 3 && lead.month == 6) { cluster2.name <- "NAO-"; cluster3.name <- "Atl.Ridge"} # cluster1.name <- "Blocking"; cluster4.name <- "NAO+" + + # regimes.sim # for the debug + + ### forecast month: October + #if(p == 4 && lead.month == 6) { cluster1.name <- "Atl.Ridge"; cluster2.name <- "Blocking"; cluster3.name <- "NAO+"; cluster4.name <- "NAO-"} + #if(p == 5 && lead.month == 5) { cluster1.name <- "NAO+"; cluster2.name <- "Blocking"; cluster3.name <- "NAO-"; cluster4.name <- "Atl.Ridge"} + #if(p == 7 && lead.month == 3) { cluster1.name <- "NAO-"; cluster2.name <- "Atl.Ridge"; cluster3.name <- "Blocking"; cluster4.name <- "NAO+"} + #if(p == 8 && lead.month == 2) { cluster1.name <- "Blocking"; cluster2.name <- "NAO-"; cluster3.name <- "Atl.Ridge"; cluster4.name <- "NAO+"} + #if(p == 9 && lead.month == 1) { cluster1.name <- "NAO-"; cluster2.name <- "Blocking"; cluster3.name <- "Atl.Ridge"; cluster4.name <- "NAO+"} + + if(p == 10 && lead.month == 0) { cluster1.name <- "Blocking"; cluster2.name <- "NAO+"; cluster3.name <- "Atl.Ridge"; cluster4.name <- "NAO-"} + if(p == 9 && lead.month == 1) { cluster1.name <- "NAO-"; cluster2.name <- "Blocking"; cluster3.name <- "Atl.Ridge"; cluster4.name <- "NAO+"} + if(p == 8 && lead.month == 2) { cluster1.name <- "Blocking"; cluster2.name <- "NAO-"; cluster3.name <- "Atl.Ridge"; cluster4.name <- "NAO+"} + if(p == 7 && lead.month == 3) { cluster1.name <- "NAO-"; cluster2.name <- "Atl.Ridge"; cluster3.name <- "Blocking"; cluster4.name <- "NAO+"} + if(p == 6 && lead.month == 4) { cluster1.name <- "NAO+"; cluster2.name <- "Blocking"; cluster3.name <- "NAO-"; cluster4.name <- "Atl.Ridge"} + + ### forecast month: November + #if(p == 6 && lead.month == 5) { cluster2.name <- "Atl.Ridge"; cluster3.name <- "NAO+"; cluster4.name <- "Blocking"} + #if(p == 7 && lead.month == 4) { cluster1.name <- "NAO+"; cluster2.name <- "Blocking"; cluster4.name <- "Atl.Ridge"} + #if(p == 8 && lead.month == 3) { cluster2.name <- "Blocking"; cluster4.name <- "Atl.Ridge"} + #if(p == 9 && lead.month == 2) { cluster2.name <- "Atl.Ridge"; cluster3.name <- "NAO+"; cluster4.name <- "Blocking"} + #if(p == 10 && lead.month == 1) { cluster2.name <- "Blocking"; cluster4.name <- "Atl.Ridge"} + + regimes.sim2 <- c(cluster1.name, cluster2.name, cluster3.name, cluster4.name) # Simulated regimes after the manual correction + #regimes.obs <- c(cluster1.name, cluster2.name, cluster3.name, cluster4.name) # already defined above, included only as reference + + order.sim <- match(regimes.sim2, orden) + order.obs <- match(regimes.obs, orden) + + clusters.sim <- list(cluster1.sim, cluster2.sim, cluster3.sim, cluster4.sim) + clusters.obs <- list(pslwr1mean, pslwr2mean, pslwr3mean, pslwr4mean) + + # recompute the spatial correlations, because they might have changed because of the manual corrections above: + spat.cor1 <- cor(as.vector(clusters.sim[[which(order.sim == 1)]]), as.vector(clusters.obs[[which(order.obs == 1)]])) + spat.cor2 <- cor(as.vector(clusters.sim[[which(order.sim == 2)]]), as.vector(clusters.obs[[which(order.obs == 2)]])) + spat.cor3 <- cor(as.vector(clusters.sim[[which(order.sim == 3)]]), as.vector(clusters.obs[[which(order.obs == 3)]])) + spat.cor4 <- cor(as.vector(clusters.sim[[which(order.sim == 4)]]), as.vector(clusters.obs[[which(order.obs == 4)]])) + + if(composition == 'impact' && impact.data == TRUE) { + clusters.sim.imp <- list(cluster1.sim.imp, cluster2.sim.imp, cluster3.sim.imp, cluster4.sim.imp) + #clusters.sim.imp.pv <- list(cluster1.sim.imp.pv, cluster2.sim.imp.pv, cluster3.sim.imp.pv, cluster4.sim.imp.pv) + + clusters.obs.imp <- list(varwr1mean, varwr2mean, varwr3mean, varwr4mean) + #clusters.obs.imp.pv <- list(pvalue1, pvalue2, pvalue3, pvalue4) + + cor.imp1 <- cor(as.vector(clusters.sim.imp[[which(order.sim == 1)]]), as.vector(clusters.obs.imp[[which(order.obs == 1)]])) + cor.imp2 <- cor(as.vector(clusters.sim.imp[[which(order.sim == 2)]]), as.vector(clusters.obs.imp[[which(order.obs == 2)]])) + cor.imp3 <- cor(as.vector(clusters.sim.imp[[which(order.sim == 3)]]), as.vector(clusters.obs.imp[[which(order.obs == 3)]])) + cor.imp4 <- cor(as.vector(clusters.sim.imp[[which(order.sim == 4)]]), as.vector(clusters.obs.imp[[which(order.obs == 4)]])) + + } + + load(file=paste0(work.dir,"/",fields.name,"_",my.period[p],"_leadmonth_",lead.month,"_","psl",".RData")) # reload forecast cluster time series and mean psl data + if(var.num != var.num.orig) var.num <- var.num.orig # ripristinate original values of this script + if(fields.name != fields.name.orig) fields.name <- fields.name.orig # ripristinate original values of this script + if(!identical(period.orig, period)) period <- period.orig + if(p.orig != p) p <- p.orig + if(identical(rev(lat),lat.forecast)) lat <- rev(lat) # ripristinate right lat order + if(work.dir != work.dir.orig) work.dir <- work.dir.orig # ripristinate original values of this script + + } # close for on 'forecast.name' + + if(fields.name == rean.name || (fields.name == forecast.name && n.map == 7)){ + if(save.names){ + save(cluster1.name, cluster2.name, cluster3.name, cluster4.name, file=paste0(rean.dir,"/",fields.name,"_", my.period[p],"_","ClusterNames",".RData")) + + } else { # in this case, we load the cluster names from the file already saved, if regimes come from a reanalysis: + load(paste0(rean.dir,"/",rean.name,"_", my.period[p],"_","ClusterNames",".RData")) + + if(var.num != var.num.orig) var.num <- var.num.orig # ripristinate original values of this script + if(fields.name != fields.name.orig) fields.name <- fields.name.orig # ripristinate original values of this script + } + } + + # assign to each cluster its regime: + if(ordering == FALSE && (fields.name == rean.name || (fields.name == forecast.name && n.map == 7))){ + cluster1 <- 1; cluster2 <- 2; cluster3 <- 3; cluster4 <- 4 + } else { + cluster1 <- which(orden == cluster1.name) + cluster2 <- which(orden == cluster2.name) + cluster3 <- which(orden == cluster3.name) + cluster4 <- which(orden == cluster4.name) + } + + assign(paste0("map",cluster1), pslwr1mean) + assign(paste0("map",cluster2), pslwr2mean) + assign(paste0("map",cluster3), pslwr3mean) + assign(paste0("map",cluster4), pslwr4mean) + + assign(paste0("fre",cluster1), wr1y) + assign(paste0("fre",cluster2), wr2y) + assign(paste0("fre",cluster3), wr3y) + assign(paste0("fre",cluster4), wr4y) + + #assign(paste0("withinss",cluster1), my.cluster[[p]]$withinss[1]/my.cluster[[p]]$size[1]) + #assign(paste0("withinss",cluster2), my.cluster[[p]]$withinss[2]/my.cluster[[p]]$size[2]) + #assign(paste0("withinss",cluster3), my.cluster[[p]]$withinss[3]/my.cluster[[p]]$size[3]) + #assign(paste0("withinss",cluster4), my.cluster[[p]]$withinss[4]/my.cluster[[p]]$size[4]) + + if(impact.data == TRUE && (composition == "simple" || composition == "single.impact" || composition == 'impact')){ + assign(paste0("imp",cluster1), varwr1mean) + assign(paste0("imp",cluster2), varwr2mean) + assign(paste0("imp",cluster3), varwr3mean) + assign(paste0("imp",cluster4), varwr4mean) + + assign(paste0("sig",cluster1), pvalue1) + assign(paste0("sig",cluster2), pvalue2) + assign(paste0("sig",cluster3), pvalue3) + assign(paste0("sig",cluster4), pvalue4) + } + + if(fields.name == forecast.name && n.map < 7){ + assign(paste0("freMax",cluster1), wr1yMax) + assign(paste0("freMax",cluster2), wr2yMax) + assign(paste0("freMax",cluster3), wr3yMax) + assign(paste0("freMax",cluster4), wr4yMax) + + assign(paste0("freMin",cluster1), wr1yMin) + assign(paste0("freMin",cluster2), wr2yMin) + assign(paste0("freMin",cluster3), wr3yMin) + assign(paste0("freMin",cluster4), wr4yMin) + + #assign(paste0("spatial.cor",cluster1), spat.cor1) + #assign(paste0("spatial.cor",cluster2), spat.cor2) + #assign(paste0("spatial.cor",cluster3), spat.cor3) + #assign(paste0("spatial.cor",cluster4), spat.cor4) + + assign(paste0("persist",cluster1), pers1) + assign(paste0("persist",cluster2), pers2) + assign(paste0("persist",cluster3), pers3) + assign(paste0("persist",cluster4), pers4) + + clusters.all <- c(cluster1, cluster2, cluster3, cluster4) + ordered.sequence <- c(which(clusters.all == 1), which(clusters.all == 2), which(clusters.all == 3), which(clusters.all == 4)) + + assign(paste0("transSim",cluster1), unname(trans.sim[1,ordered.sequence])) + assign(paste0("transSim",cluster2), unname(trans.sim[2,ordered.sequence])) + assign(paste0("transSim",cluster3), unname(trans.sim[3,ordered.sequence])) + assign(paste0("transSim",cluster4), unname(trans.sim[4,ordered.sequence])) + + cluster1.obs <- which(orden == regimes.obs[1]) + cluster2.obs <- which(orden == regimes.obs[2]) + cluster3.obs <- which(orden == regimes.obs[3]) + cluster4.obs <- which(orden == regimes.obs[4]) + + clusters.all.obs <- c(cluster1.obs, cluster2.obs, cluster3.obs, cluster4.obs) + ordered.sequence.obs <- c(which(clusters.all.obs == 1), which(clusters.all.obs == 2), which(clusters.all.obs == 3), which(clusters.all.obs == 4)) + + assign(paste0("freObs",cluster1.obs), wr1y.obs) + assign(paste0("freObs",cluster2.obs), wr2y.obs) + assign(paste0("freObs",cluster3.obs), wr3y.obs) + assign(paste0("freObs",cluster4.obs), wr4y.obs) + + assign(paste0("persistObs",cluster1.obs), persObs1) + assign(paste0("persistObs",cluster2.obs), persObs2) + assign(paste0("persistObs",cluster3.obs), persObs3) + assign(paste0("persistObs",cluster4.obs), persObs4) + + assign(paste0("transObs",cluster1.obs), unname(trans.obs[1,ordered.sequence.obs])) + assign(paste0("transObs",cluster2.obs), unname(trans.obs[2,ordered.sequence.obs])) + assign(paste0("transObs",cluster3.obs), unname(trans.obs[3,ordered.sequence.obs])) + assign(paste0("transObs",cluster4.obs), unname(trans.obs[4,ordered.sequence.obs])) + + } + + # position of long values of Europe only (without the Atlantic Sea and America): + #if(fields.name == "ERA-Interim") EU <- c(1:which(lon >= lon.max)[1], (length(lon)-30):length(lon)) # restrict area to continental europe only + EU <- c(1:which(lon >= lon.max)[1], (which(lon >= 337)[1]:length(lon))) # restrict area to continental europe only + + # breaks and colors of the geopotential fields: + if(psl == "g500"){ + #my.brks <- c(-300,-199:200,300) # % Mean anomaly of a WR + my.brks <- c(-300,seq(-200,200,10),300) # % Mean anomaly of a WR + my.brks2 <- c(-300,seq(-200,200,10),300) # % Mean anomaly of a WR for the contour lines + #my.cols <- colorRampPalette((brewer.pal(9,"PuBu")))(length(my.brks)-1) + values.to.plot <- c(-200, -100, 0, 100, 200) # values we want to show in the labels + } + + if(psl == "psl"){ + my.brks <- c(seq(-19,-1,2),0,seq(1,19,2)) # % Mean anomaly of a WR + # my.brks <- c(seq(-19,-1,2),0,seq(1,19,2)) # % Mean anomaly of a WR + + my.brks2 <- my.brks #c(seq(-20,20,2)) # % Mean anomaly of a WR for the contour lines + values.to.plot <- my.brks #c(-17,-15,-13,-11,-9,-7,-5,-3,-1,0,1,3,5,7,9,11,13,15,17) + } + + my.cols <- colorRampPalette(rev(brewer.pal(11,"RdBu")))(length(my.brks)-1) # blue--white--red colors + my.cols[floor(length(my.cols)/2)] <- my.cols[floor(length(my.cols)/2)+1] <- "white" + + # breaks and colors of the impact maps (valid both for Wind speed anomalies and Temperature anomalies): + #my.brks.var <- c(-20,seq(-10,-3,1),seq(-2.9,2.9,0.1),seq(3,10,1),20) # % Mean anomaly of a WR + #my.brks.var <- c(-20,-2,-1.5,-1,-0.5,-0.2,0.2,0.5,1,1.5,2,20) # % Mean anomaly of a WR + #my.brks.var <- c(-20,seq(-3,3,0.5),20) # % Mean anomaly of a WR + if(var.name[var.num] == "sfcWind") { + my.brks.var <- seq(-3,3,0.5) + values.to.plot2 <- c(-3,-2,-1,0,1,2,3) + } + if(var.name[var.num] == "tas") { + my.brks.var <- seq(-5,5,1) + values.to.plot2 <- my.brks.var #c(-5,-3,-1,0,1,3,5) + } + + #my.cols <- colorRampPalette(as.character(read.csv("/home/Earth/vtorralb/rgbhex.csv",header=F)[,1]))(length(my.brks)-1) # blue--yellow-red colors + my.cols.var <- colorRampPalette(rev(brewer.pal(11,"RdBu")))(length(my.brks.var)-1) # blue--white--red colors + #my.cols.var[floor(length(my.cols.var)/2)] <- my.cols.var[floor(length(my.cols.var)/2)+1] <- "white" + + freq.max=100 + if(fields.name == forecast.name){ + pos.years.present <- match(year.start:year.end, years) + years.missing <- which(is.na(pos.years.present)) + fre1.NA <- fre2.NA <- fre3.NA <- fre4.NA <- freMax1.NA <- freMax2.NA <- freMax3.NA <- freMax4.NA <- freMin1.NA <- freMin2.NA <- freMin3.NA <- freMin4.NA <- c() + k <- 0 + for(y in year.start:year.end) { + if(y %in% (years.missing + year.start - 1)) { + fre1.NA <- c(fre1.NA,NA); fre2.NA <- c(fre2.NA,NA); fre3.NA <- c(fre3.NA,NA); fre4.NA <- c(fre4.NA,NA) + freMax1.NA <- c(freMax1.NA,NA); freMax2.NA <- c(freMax2.NA,NA); freMax3.NA <- c(freMax3.NA,NA); freMax4.NA <- c(freMax4.NA,NA) + freMin1.NA <- c(freMin1.NA,NA); freMin2.NA <- c(freMin2.NA,NA); freMin3.NA <- c(freMin3.NA,NA); freMin4.NA <- c(freMin4.NA,NA) + k=k+1 + } else { + fre1.NA <- c(fre1.NA,fre1[y - year.start + 1 - k]); fre2.NA <- c(fre2.NA,fre2[y - year.start + 1 - k]) + fre3.NA <- c(fre3.NA,fre3[y - year.start + 1 - k]); fre4.NA <- c(fre4.NA,fre4[y - year.start + 1 - k]) + freMax1.NA <- c(freMax1.NA,freMax1[y - year.start + 1 - k]); freMax2.NA <- c(freMax2.NA,freMax2[y - year.start + 1 - k]) + freMax3.NA <- c(freMax3.NA,freMax3[y - year.start + 1 - k]); freMax4.NA <- c(freMax4.NA,freMax4[y - year.start + 1 - k]) + freMin1.NA <- c(freMin1.NA,freMin1[y - year.start + 1 - k]); freMin2.NA <- c(freMin2.NA,freMin2[y - year.start + 1 - k]) + freMin3.NA <- c(freMin3.NA,freMin3[y - year.start + 1 - k]); freMin4.NA <- c(freMin4.NA,freMin4[y - year.start + 1 - k]) + } + } + + sp.cor1 <- round(cor(fre1.NA,freObs1, use="complete.obs"),2) + sp.cor2 <- round(cor(fre2.NA,freObs2, use="complete.obs"),2) + sp.cor3 <- round(cor(fre3.NA,freObs3, use="complete.obs"),2) + sp.cor4 <- round(cor(fre4.NA,freObs4, use="complete.obs"),2) + + } else { + fre1.NA <- fre1; fre2.NA <- fre2; fre3.NA <- fre3; fre4.NA <- fre4 + } + + + # Visualize the composition with the 3 graphs for all the 4 regimes: its pressure fields, its impact on var and its frequency series: + if(composition == "simple"){ + if(!as.pdf && fields.name == rean.name) png(filename=paste0(rean.dir,"/",fields.name,"_",my.period[p],"_",var.name[var.num],".png"),width=3000,height=3700) + if(!as.pdf && fields.name == forecast.name) png(filename=paste0(work.dir,"/",fields.name,"_",my.period[p],"_leadmonth_",lead.month,"_",var.name[var.num],".png"),width=3000,height=3700) + + plot.new() + + # Sheet title: + par(fig=c(0, 1, 0.94, 0.98), new=TRUE) + if(fields.name == forecast.name) {forecast.title <- paste0(" startdate and ",lead.month," forecast month ")} else {forecast.title <- ""} + mtext(paste0(fields.name, " Weather Regimes for ", my.period[p], forecast.title," (",year.start,"-",year.end,")"), cex=5.5, font=2) + + # Centroid maps: + map.xpos <- 0 + map.width <- 0.46 + par(fig=c(map.xpos + 0.003, map.xpos + map.width - 0.003, 0.745, 0.925), new=TRUE) + PlotEquiMap2(rescale(map1,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map1), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, xlabel.dist=1.5, contours.lty="F1FF1F") + par(fig=c(map.xpos, map.xpos + map.width, 0.51, 0.69), new=TRUE) + PlotEquiMap2(rescale(map2,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map2), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F") + par(fig=c(map.xpos, map.xpos + map.width, 0.275, 0.455), new=TRUE) + PlotEquiMap2(rescale(map3,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map3), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F") + par(fig=c(map.xpos, map.xpos + map.width, 0.04, 0.22), new=TRUE) + PlotEquiMap2(rescale(map4,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map4), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F") + + ## # for the debug: + ## obs <- read.table("/scratch/Earth/ncortesi/RESILIENCE/Regimes/NAO_series.txt") + ## mes <- p + ## fre.obs <- obs$V3[seq(mes,12*35,12)] # get the ovserved freuency of the NAO + ## #plot(1981:2015,fre.obs,type="l") + ## #lines(1981:2015,fre1,type="l",col="red") + ## #lines(1981:2015,fre2,type="l",col="blue") + ## #lines(1981:2015,fre4,type="l",col="green") + ## cor1 <- cor(fre.obs, fre1) + ## cor2 <- cor(fre.obs, fre2) + ## cor3 <- cor(fre.obs, fre3) + ## cor4 <- cor(fre.obs, fre4) + ## round(c(cor1,cor2,cor3,cor4),2) + + # Legend centroid maps: + legend1.xpos <- 0.10 + legend1.width <- 0.30 + legend1.cex <- 2 + my.subset <- match(values.to.plot, my.brks) + par(fig=c(legend1.xpos, legend1.xpos + legend1.width, 0.719, 0.744), new=TRUE) # syntax fig=c(xmin, xmax, ymin, ymax) + ColorBar3(my.brks, cols=my.cols, vert=FALSE, cex=legend1.cex, subset=my.subset) + if(psl=="g500") {mtext(side=4," m", cex=2.5)} else {mtext(side=4," hPa", cex=legend1.cex)} + par(fig=c(legend1.xpos, legend1.xpos + legend1.width, 0.485, 0.508), new=TRUE) + ColorBar3(my.brks, cols=my.cols, vert=FALSE, cex=legend1.cex, subset=my.subset) + if(psl=="g500") {mtext(side=4," m", cex=2.5)} else {mtext(side=4," hPa", cex=legend1.cex)} + par(fig=c(legend1.xpos, legend1.xpos + legend1.width, 0.250, 0.273), new=TRUE) + ColorBar3(my.brks, cols=my.cols, vert=FALSE, cex=legend1.cex, subset=my.subset) + if(psl=="g500") {mtext(side=4," m", cex=2.5)} else {mtext(side=4," hPa", cex=legend1.cex)} + par(fig=c(legend1.xpos, legend1.xpos + legend1.width, 0.015, 0.038), new=TRUE) + ColorBar3(my.brks, cols=my.cols, vert=FALSE, cex=legend1.cex, subset=my.subset) + if(psl=="g500") {mtext(side=4," m", cex=2.5)} else {mtext(side=4," hPa", cex=legend1.cex)} + + # Subtitle centroid maps: + map.title.xpos <- 0.76 + map.title.width <- 0.2 + par(fig=c(map.title.xpos, map.title.xpos + map.title.width, 0.728, 0.729), new=TRUE) + mtext("year", cex=3) + par(fig=c(map.title.xpos, map.title.xpos + map.title.width, 0.494, 0.495), new=TRUE) + mtext("year", cex=3) + par(fig=c(map.title.xpos, map.title.xpos + map.title.width, 0.260, 0.261), new=TRUE) + mtext("year", cex=3) + par(fig=c(map.title.xpos, map.title.xpos + map.title.width, 0.026, 0.027), new=TRUE) + mtext("year", cex=3) + + # Title Centroid Maps: + title1.xpos <- 0.02 + title1.width <- 0.4 + par(fig=c(title1.xpos, title1.xpos + title1.width, 0.9305, 0.9355), new=TRUE) + mtext(paste0(regime1.name,": ", psl.name, " Anomaly"), font=2, cex=4) + par(fig=c(title1.xpos, title1.xpos + title1.width, 0.6955, 0.7005), new=TRUE) + mtext(paste0(regime2.name,": ", psl.name, " Anomaly"), font=2, cex=4) + par(fig=c(title1.xpos, title1.xpos + title1.width, 0.4605, 0.4655), new=TRUE) + mtext(paste0(regime3.name,": ", psl.name, " Anomaly"), font=2, cex=4) + par(fig=c(title1.xpos, title1.xpos + title1.width, 0.2255, 0.2305), new=TRUE) + mtext(paste0(regime4.name,": ", psl.name, " Anomaly"), font=2, cex=4) + + # Impact maps: + if(impact.data == TRUE && (composition == "simple" || composition == "impact")){ # it won't enter here never because we are already inside an if con composition="simple" + impact.xpos <- 0.47 + impact.width <- 0.26 + par(fig=c(impact.xpos, impact.xpos + impact.width, 0.745, 0.925), new=TRUE) + PlotEquiMap2(rescale(imp1[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=t(sig1[,EU] < 0.05), cex.lab=1.5) + par(fig=c(impact.xpos, impact.xpos + impact.width, 0.51, 0.69), new=TRUE) + PlotEquiMap2(rescale(imp2[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=t(sig2[,EU] < 0.05), cex.lab=1.5) + par(fig=c(impact.xpos, impact.xpos + impact.width, 0.275, 0.455), new=TRUE) + PlotEquiMap2(rescale(imp3[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=t(sig3[,EU] < 0.05), cex.lab=1.5) + par(fig=c(impact.xpos, impact.xpos + impact.width, 0.04, 0.22), new=TRUE) + PlotEquiMap2(rescale(imp4[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=t(sig4[,EU] < 0.05), cex.lab=1.5) + + # Title impact maps: + title2.xpos <- 0.42 + title2.width <- 0.35 + par(fig=c(title2.xpos, title2.xpos + title2.width, 0.935, 0.940), new=TRUE) + mtext(paste0(regime1.name, ": Impact on ", var.name.full[var.num]), font=2, cex=4) + par(fig=c(title2.xpos, title2.xpos + title2.width, 0.700, 0.705), new=TRUE) + mtext(paste0(regime2.name, ": Impact on ", var.name.full[var.num]), font=2, cex=4) + par(fig=c(title2.xpos, title2.xpos + title2.width, 0.465, 0.470), new=TRUE) + mtext(paste0(regime3.name, ": Impact on ", var.name.full[var.num]), font=2, cex=4) + par(fig=c(title2.xpos, title2.xpos + title2.width, 0.230, 0.235), new=TRUE) + mtext(paste0(regime4.name, ": Impact on ", var.name.full[var.num]), font=2, cex=4) + } + + # Frequency plots: + barplot.xpos <- 0.75 + barplot.width <- 0.25 + + par(fig=c(barplot.xpos, barplot.xpos + barplot.width, 0.745, 0.915), new=TRUE) + if(fields.name == rean.name) barplot.freq(100*fre1.NA, year.start, year.end, cex.y=2, cex.x=2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0)) + if(fields.name == forecast.name) barplot.freq.sim2(100*fre1.NA, 100*freMax1.NA, 100*freMin1.NA, 100*freObs1, year.start, year.end, cex.y=2, cex.x=2, freq.min=-2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0), col.line="gray20", cex.r=3, cex.mean=2, cex.obs=2) + + par(fig=c(barplot.xpos, barplot.xpos + barplot.width,0.510,0.680), new=TRUE) + if(fields.name == rean.name) barplot.freq(100*fre2.NA, year.start, year.end, cex.y=2, cex.x=2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0)) + if(fields.name == forecast.name) barplot.freq.sim2(100*fre2.NA, 100*freMax2.NA, 100*freMin2.NA, 100*freObs2, year.start, year.end, cex.y=2, cex.x=2, freq.min=-2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0), col.line="gray20", cex.r=3, cex.mean=2, cex.obs=2) + + par(fig=c(barplot.xpos, barplot.xpos + barplot.width,0.275,0.445), new=TRUE) + if(fields.name == rean.name) barplot.freq(100*fre3.NA, year.start, year.end, cex.y=2, cex.x=2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0)) + if(fields.name == forecast.name) barplot.freq.sim2(100*fre3.NA, 100*freMax3.NA, 100*freMin3.NA, 100*freObs3, year.start, year.end, cex.y=2, cex.x=2, freq.min=-2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0), col.line="gray20", cex.r=3, cex.mean=2, cex.obs=2) + + par(fig=c(barplot.xpos, barplot.xpos + barplot.width,0.040,0.210), new=TRUE) + if(fields.name == rean.name) barplot.freq(100*fre4.NA, year.start, year.end, cex.y=2, cex.x=2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0)) + if(fields.name == forecast.name) barplot.freq.sim2(100*fre4.NA, 100*freMax4.NA, 100*freMin4.NA, 100*freObs4, year.start, year.end, cex.y=2, cex.x=2, freq.min=-2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0), col.line="gray20", cex.r=3, cex.mean=2, cex.obs=2) + + # Title Frequency maps: + title3.xpos <- 0.75 + title3.width <-0.25 + par(fig=c(title3.xpos, title3.xpos + title3.width, 0.935, 0.940), new=TRUE) + mtext(paste0(regime1.name, " Frequency"), font=2, cex=4) # paste0 doesn't work inside an expression! + par(fig=c(title3.xpos, title3.xpos + title3.width, 0.700, 0.705), new=TRUE) + mtext(paste0(regime2.name, " Frequency"), font=2, cex=4) + par(fig=c(title3.xpos, title3.xpos + title3.width, 0.465, 0.470), new=TRUE) + mtext(paste0(regime3.name, " Frequency"), font=2, cex=4) + par(fig=c(title3.xpos, title3.xpos + title3.width, 0.230, 0.235), new=TRUE) + mtext(paste0(regime4.name, " Frequency"), font=2, cex=4) + + # % on Frequency plots: + symbol.xpos <- 0.735 + symbol.width <- 0.01 + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.82, 0.83), new=TRUE) + mtext("%", cex=3.3) + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.59, 0.60), new=TRUE) + mtext("%", cex=3.3) + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.35, 0.36), new=TRUE) + mtext("%", cex=3.3) + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.12, 0.13), new=TRUE) + mtext("%", cex=3.3) + + # mean frequency on Frequency plots: + if(mean.freq == TRUE){ + symbol.xpos <- 0.9 + symbol.width <- 0.1 + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.908, 0.918), new=TRUE) + mtext(bquote(bar(nu) == .(paste0(round(mean(100*fre1.NA),1),"%"))),cex=4.5) + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.673, 0.683), new=TRUE) + mtext(bquote(bar(nu) == .(paste0(round(mean(100*fre2.NA),1),"%"))),cex=4.5) + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.44, 0.45), new=TRUE) + mtext(bquote(bar(nu) == .(paste0(round(mean(100*fre3.NA),1),"%"))),cex=4.5) + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.203, 0.213), new=TRUE) + mtext(bquote(bar(nu) == .(paste0(round(mean(100*fre4.NA),1),"%"))),cex=4.5) + } + + # add corr between obs.time series and ensemble mean time series on Frequency plots: + if(fields.name == forecast.name){ + symbol.xpos <- 0.76 + symbol.width <- 0.1 + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.908, 0.918), new=TRUE) + sp.cor1 <- round(cor(fre1.NA,freObs1, use="complete.obs"),2) + mtext(paste0("r= ",sp.cor1), cex=4.5) + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.673, 0.683), new=TRUE) + sp.cor2 <- round(cor(fre2.NA,freObs2, use="complete.obs"),2) + mtext(paste0("r= ",sp.cor2), cex=4.5) + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.44, 0.45), new=TRUE) + sp.cor3 <- round(cor(fre3.NA,freObs3, use="complete.obs"),2) + mtext(paste0("r= ",sp.cor3), cex=4.5) + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.203, 0.213), new=TRUE) + sp.cor4 <- round(cor(fre4.NA,freObs4, use="complete.obs"),2) + mtext(paste0("r= ",sp.cor4), cex=4.5) + } + + # Legend 2: + if(fields.name == rean.name){ + legend2.xpos <- 0.48 + legend2.width <- 0.25 + legend2.cex <- 1.8 + my.subset2 <- match(values.to.plot2, my.brks.var) + par(fig=c(legend2.xpos, legend2.xpos + legend2.width, 0.719 + 0.001, 0.744), new=TRUE) + ColorBar3(my.brks.var, cols=my.cols.var, vert=FALSE, cex=legend2.cex, subset=my.subset2) + mtext(side=4,paste0(" ",var.unit[var.num]), cex=legend2.cex) + par(fig=c(legend2.xpos, legend2.xpos + legend2.width, 0.485, 0.508), new=TRUE) + ColorBar3(my.brks.var, cols=my.cols.var, vert=FALSE, cex=legend2.cex, subset=my.subset2) + mtext(side=4,paste0(" ",var.unit[var.num]), cex=legend2.cex) + par(fig=c(legend2.xpos, legend2.xpos + legend2.width, 0.250, 0.273), new=TRUE) + ColorBar3(my.brks.var, cols=my.cols.var, vert=FALSE, cex=legend2.cex, subset=my.subset2) + mtext(side=4,paste0(" ",var.unit[var.num]), cex=legend2.cex) + par(fig=c(legend2.xpos, legend2.xpos + legend2.width, 0.015, 0.038), new=TRUE) + ColorBar3(my.brks.var, cols=my.cols.var, vert=FALSE, cex=legend2.cex, subset=my.subset2) + mtext(side=4,paste0(" ",var.unit[var.num]), cex=legend2.cex) + } + + if(!as.pdf) dev.off() # for saving 4 png + + } # close if on: composition == 'simple' + + + # Visualize the composition with the regime anomalies for a selected forecasted month: + if(composition == "psl"){ + # Centroid maps: + n.map <- n.map + 1 # n.maps starts from 0 + map.width <- 0.115 + map.xpos <- 0.06 + map.width * (n.map - 1) + map.ypos <- 0.08 + map.height <- 0.19 + map.ysep <- 0.015 + + par(fig=c(map.xpos, map.xpos + map.width, map.ypos + 3*map.height + 3*map.ysep, map.ypos + 4*map.height + 3*map.ysep), new=TRUE) + PlotEquiMap2(rescale(map1,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map1), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, xlabel.dist=1.5, contours.lty="F1FF1F") + par(fig=c(map.xpos, map.xpos + map.width, map.ypos + 2*map.height + 2*map.ysep, map.ypos + 3*map.height + 2*map.ysep), new=TRUE) + PlotEquiMap2(rescale(map2,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map2), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F") + par(fig=c(map.xpos, map.xpos + map.width, map.ypos + map.height + map.ysep, map.ypos + 2*map.height + map.ysep), new=TRUE) + PlotEquiMap2(rescale(map3,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map3), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F") + par(fig=c(map.xpos, map.xpos + map.width, map.ypos, map.ypos + map.height), new=TRUE) + PlotEquiMap2(rescale(map4,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map4), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F") + + # Sheet subtitles: + par(fig=c(map.xpos, map.xpos + map.width, 0.86, 0.90), new=TRUE) + if(n.map < 8) { + mtext(paste0(my.month.short[p], " --> ", my.month.short[forecast.month]), cex=5.5, font=2) + } else{ + mtext(paste0(rean.name," ", my.month.short[forecast.month]), cex=5.5, font=2) + } + + } # close if on composition == 'psl' + + # Visualize the composition if the impact maps for a selected forecasted month: + if(composition == "impact"){ + # Centroid maps: + n.map <- n.map + 1 # n.maps starts from 0 + map.width <- 0.115 + map.xpos <- 0.06 + map.width * (n.map - 1) + map.ypos <- 0.08 + map.height <- 0.19 + map.ysep <- 0.015 + + par(fig=c(map.xpos, map.xpos + map.width, map.ypos + 3*map.height + 3*map.ysep, map.ypos + 4*map.height + 3*map.ysep), new=TRUE) + PlotEquiMap2(rescale(imp1[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=t(sig1[,EU] < 0.05), cex.lab=1.5) + + par(fig=c(map.xpos, map.xpos + map.width, map.ypos + 2*map.height + 2*map.ysep, map.ypos + 3*map.height + 2*map.ysep), new=TRUE) + PlotEquiMap2(rescale(imp2[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=t(sig2[,EU] < 0.05), cex.lab=1.5) + + par(fig=c(map.xpos, map.xpos + map.width, map.ypos + map.height + map.ysep, map.ypos + 2*map.height + map.ysep), new=TRUE) + PlotEquiMap2(rescale(imp3[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=t(sig3[,EU] < 0.05), cex.lab=1.5) + + par(fig=c(map.xpos, map.xpos + map.width, map.ypos, map.ypos + map.height), new=TRUE) + PlotEquiMap2(rescale(imp4[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=t(sig4[,EU] < 0.05), cex.lab=1.5) + + + # Sheet subtitles: + par(fig=c(map.xpos, map.xpos + map.width, 0.86, 0.90), new=TRUE) + if(n.map < 8) { + mtext(paste0(my.month.short[p], " --> ", my.month.short[forecast.month]), cex=5.5, font=2) + } else{ + mtext(paste0(rean.name," ", my.month.short[forecast.month]), cex=5.5, font=2) + } + + } # close if on composition == 'impact' + + # Visualize the composition if the impact maps for a selected forecasted month: + if(composition == "single.impact"){ + png(filename=paste0(rean.dir,"/",fields.name,"_",my.period[p],"_",var.name[var.num],"_impact_NAO+.png"),width=3000,height=3700) + PlotEquiMap2(rescale(imp1[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=t(sig1[,EU] < 0.05), cex.lab=1.5) + dev.off() + + png(filename=paste0(rean.dir,"/",fields.name,"_",my.period[p],"_",var.name[var.num],"_impact_NAO-.png"),width=3000,height=3700) + PlotEquiMap2(rescale(imp2[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=t(sig2[,EU] < 0.05), cex.lab=1.5) + dev.off() + + png(filename=paste0(rean.dir,"/",fields.name,"_",my.period[p],"_",var.name[var.num],"_impact_Blocking.png"),width=3000,height=3700) + PlotEquiMap2(rescale(imp3[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=t(sig3[,EU] < 0.05), cex.lab=1.5) + dev.off() + + png(filename=paste0(rean.dir,"/",fields.name,"_",my.period[p],"_",var.name[var.num],"_impact_Atl_Ridge.png"),width=3000,height=3700) + PlotEquiMap2(rescale(imp4[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=t(sig4[,EU] < 0.05), cex.lab=1.5) + dev.off() + } + + # Visualize the composition if the impact maps for a selected forecasted month: + if(composition == "single.psl"){ + png(filename=paste0(rean.dir,"/",fields.name,"_",my.period[p],"_",var.name[var.num],"_pattern_cluster1.png"),width=2000,height=1400, res=300) + PlotEquiMap2(rescale(map1,my.brks[1],tail(my.brks,1)), lon, lat,filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1, contours=t(map1), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=0.5, xlabel.dist=0, contours.lty="F1FF1F", toptitle=paste0(my.period[p]," WithinSS=", round(withinss1/1000000,2))) + dev.off() + + png(filename=paste0(rean.dir,"/",fields.name,"_",my.period[p],"_",var.name[var.num],"_pattern_cluster2.png"),width=2000,height=1400, res=300) + PlotEquiMap2(rescale(map2,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1, contours=t(map2), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=0.5, xlabel.dist=0, contours.lty="F1FF1F", toptitle=paste0(my.period[p]," WithinSS=", round(withinss2/1000000,2))) + dev.off() + + png(filename=paste0(rean.dir,"/",fields.name,"_",my.period[p],"_",var.name[var.num],"_pattern_cluster3.png"),width=2000,height=1400, res=300) + PlotEquiMap2(rescale(map3,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1, contours=t(map3), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=0.5, xlabel.dist=0, contours.lty="F1FF1F", toptitle=paste0(my.period[p]," WithinSS=", round(withinss3/1000000,2))) + dev.off() + + png(filename=paste0(rean.dir,"/",fields.name,"_",my.period[p],"_",var.name[var.num],"_pattern_cluster4.png"),width=2000,height=1400, res=300) + PlotEquiMap2(rescale(map4,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1, contours=t(map4), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=0.5, xlabel.dist=0, contours.lty="F1FF1F", toptitle=paste0(my.period[p]," WithinSS=", round(withinss4/1000000,2))) + dev.off() + + # Legend centroid maps: + #my.subset <- match(values.to.plot, my.brks) + #ColorBar3(my.brks, cols=my.cols, vert=FALSE, cex=legend1.cex, subset=my.subset) + #mtext(side=4," hPa", cex=legend1.cex) + + } + + # Visualize the composition with the interannual frequencies for a selected forecasted month: + if(composition == "fre"){ + # Centroid maps: + n.map <- n.map + 1 # n.maps starts from 0 + map.width <- 0.115 + map.xpos <- 0.06 + map.width * (n.map - 1) + map.ypos <- 0.08 + map.height <- 0.19 + map.ysep <- 0.015 + + par(fig=c(map.xpos, map.xpos + map.width, map.ypos + 3*map.height + 3*map.ysep, map.ypos + 4*map.height + 3*map.ysep), new=TRUE) + if(n.map < 8) barplot.freq.sim2(100*fre1.NA, 100*freMax1.NA, 100*freMin1.NA, 100*freObs1, year.start, year.end, cex.y=2, cex.x=2, freq.min=-2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0), col.line="gray20", cex.r=3, cex.mean=2, cex.obs=2) + if(n.map == 8) barplot.freq(100*fre1.NA, year.start, year.end, cex.y=2, cex.x=2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0)) + + par(fig=c(map.xpos, map.xpos + map.width, map.ypos + 2*map.height + 2*map.ysep, map.ypos + 3*map.height + 2*map.ysep), new=TRUE) + if(n.map < 8) if(fields.name == forecast.name) barplot.freq.sim2(100*fre2.NA, 100*freMax2.NA, 100*freMin2.NA, 100*freObs2, year.start, year.end, cex.y=2, cex.x=2, freq.min=-2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0), col.line="gray20", cex.r=3, cex.mean=2, cex.obs=2) + if(n.map == 8) barplot.freq(100*fre2.NA, year.start, year.end, cex.y=2, cex.x=2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0)) + + par(fig=c(map.xpos, map.xpos + map.width, map.ypos + map.height + map.ysep, map.ypos + 2*map.height + map.ysep), new=TRUE) + if(n.map < 8) if(fields.name == forecast.name) barplot.freq.sim2(100*fre3.NA, 100*freMax3.NA, 100*freMin3.NA, 100*freObs3, year.start, year.end, cex.y=2, cex.x=2, freq.min=-2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0), col.line="gray20", cex.r=3, cex.mean=2, cex.obs=2) + if(n.map == 8) barplot.freq(100*fre3.NA, year.start, year.end, cex.y=2, cex.x=2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0)) + + par(fig=c(map.xpos, map.xpos + map.width, map.ypos, map.ypos + map.height), new=TRUE) + if(n.map < 8) if(fields.name == forecast.name) barplot.freq.sim2(100*fre4.NA, 100*freMax4.NA, 100*freMin4.NA, 100*freObs4, year.start, year.end, cex.y=2, cex.x=2, freq.min=-2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0), col.line="gray20", cex.r=3, cex.mean=2, cex.obs=2) + if(n.map == 8) barplot.freq(100*fre4.NA, year.start, year.end, cex.y=2, cex.x=2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0)) + + # Sheet subtitles: + par(fig=c(map.xpos, map.xpos + map.width, 0.86, 0.90), new=TRUE) + if(n.map < 8) { + mtext(paste0(my.month.short[p], " --> ", my.month.short[forecast.month]), cex=5.5, font=2) + } else{ + mtext(paste0(rean.name," ", my.month.short[forecast.month]), cex=5.5, font=2) + } + + # % on Frequency plots: + par(fig=c(map.xpos - 0.006, map.xpos, map.ypos + 3.9*map.height + 3*map.ysep, map.ypos + 4*map.height + 3*map.ysep), new=TRUE) + mtext("%", cex=3.3) + par(fig=c(map.xpos - 0.006, map.xpos, map.ypos + 2.9*map.height + 2*map.ysep, map.ypos + 3*map.height + 2*map.ysep), new=TRUE) + mtext("%", cex=3.3) + par(fig=c(map.xpos - 0.006, map.xpos, map.ypos + 1.9*map.height + 1*map.ysep, map.ypos + 2*map.height + 1*map.ysep), new=TRUE) + mtext("%", cex=3.3) + par(fig=c(map.xpos - 0.006, map.xpos, map.ypos + 0.9*map.height + 0*map.ysep, map.ypos + 1*map.height + 0*map.ysep), new=TRUE) + mtext("%", cex=3.3) + + # mean frequency on Frequency plots: + par(fig=c(map.xpos + 0.02, map.xpos + 0.04, map.ypos + 3*map.height + 3*map.ysep, map.ypos + 3.1*map.height + 3*map.ysep), new=TRUE) + mtext(bquote(bar(nu) == .(paste0(round(mean(100*fre1.NA),1),"%"))),cex=3.5) + par(fig=c(map.xpos + 0.02, map.xpos + 0.04, map.ypos + 2*map.height + 2*map.ysep, map.ypos + 2.1*map.height + 2*map.ysep), new=TRUE) + mtext(bquote(bar(nu) == .(paste0(round(mean(100*fre2.NA),1),"%"))),cex=3.5) + par(fig=c(map.xpos + 0.02, map.xpos + 0.04, map.ypos + 1*map.height + 1*map.ysep, map.ypos + 1.1*map.height + 1*map.ysep), new=TRUE) + mtext(bquote(bar(nu) == .(paste0(round(mean(100*fre3.NA),1),"%"))),cex=3.5) + par(fig=c(map.xpos + 0.02, map.xpos + 0.04, map.ypos + 0*map.height + 0*map.ysep, map.ypos + 0.1*map.height + 0*map.ysep), new=TRUE) + mtext(bquote(bar(nu) == .(paste0(round(mean(100*fre4.NA),1),"%"))),cex=3.5) + + # add corr between obs.time series and ensemble mean time series on Frequency plots: + if(n.map < 8){ + par(fig=c(map.xpos + map.width - 0.04, map.xpos + map.width - 0.02, map.ypos + 3*map.height + 3*map.ysep, map.ypos + 3.1*map.height + 3*map.ysep), new=TRUE) + mtext(paste0("r= ",sp.cor1), cex=3.5) + par(fig=c(map.xpos + map.width - 0.04, map.xpos + map.width - 0.02, map.ypos + 2*map.height + 2*map.ysep, map.ypos + 2.1*map.height + 2*map.ysep), new=TRUE) + mtext(paste0("r= ",sp.cor2), cex=3.5) + par(fig=c(map.xpos + map.width - 0.04, map.xpos + map.width - 0.02, map.ypos + 1*map.height + 1*map.ysep, map.ypos + 1.1*map.height + 1*map.ysep), new=TRUE) + mtext(paste0("r= ",sp.cor3), cex=3.5) + par(fig=c(map.xpos + map.width - 0.04, map.xpos + map.width - 0.02, map.ypos + 0*map.height + 0*map.ysep, map.ypos + 0.1*map.height + 0*map.ysep), new=TRUE) + mtext(paste0("r= ",sp.cor4), cex=3.5) + } + } # close if on composition == 'fre' + + # save indicators for each startdate and forecast time: + # (map1, map2, map3, map4, fre1, fre2, etc. already refer to the regimes in the same order as listed in the 'orden' vector) + if(fields.name == forecast.name) { + if (composition != "impact") { + save(orden, map1, map2, map3, map4, fre1.NA, fre2.NA, fre3.NA, fre4.NA, freObs1, freObs2, freObs3, freObs4, sp.cor1, sp.cor2, sp.cor3, sp.cor4, persist1, persist2, persist3, persist4, persistObs1, persistObs2, persistObs3, persistObs4, spat.cor1, spat.cor2, spat.cor3, spat.cor4, sd.freq.sim1, sd.freq.sim2, sd.freq.sim3, sd.freq.sim4, sd.freq.obs1, sd.freq.obs2, sd.freq.obs3, sd.freq.obs4, transSim1, transSim2, transSim3, transSim4, transObs1, transObs2, transObs3, transObs4, file=paste0(work.dir,"/",fields.name,"_", my.period[p],"_leadmonth_",lead.month,"_","ClusterNames",".RData")) + } else { # also save impX objects + save(orden, map1, map2, map3, map4, fre1.NA, fre2.NA, fre3.NA, fre4.NA, freObs1, freObs2, freObs3, freObs4, sp.cor1, sp.cor2, sp.cor3, sp.cor4, persist1, persist2, persist3, persist4, persistObs1, persistObs2, persistObs3, persistObs4, spat.cor1, spat.cor2, spat.cor3, spat.cor4, sd.freq.sim1, sd.freq.sim2, sd.freq.sim3, sd.freq.sim4, sd.freq.obs1, sd.freq.obs2, sd.freq.obs3, sd.freq.obs4, transSim1, transSim2, transSim3, transSim4, transObs1, transObs2, transObs3, transObs4, imp1, imp2, imp3, imp4,cor.imp1,cor.imp2,cor.imp3,cor.imp4, file=paste0(work.dir,"/",fields.name,"_", my.period[p],"_leadmonth_",lead.month,"_","ClusterNames",".RData")) + } + } + + } # close 'p' on 'period' + + if(as.pdf) dev.off() # for saving a single pdf with all months/seasons + + } # close 'lead.month' on 'lead.months' + + if(composition == "psl" || composition == "fre"){ + # Regime's names: + title1.xpos <- 0.01 + title1.width <- 0.04 + par(fig=c(title1.xpos, title1.xpos + title1.width, 0.7905, 0.7955), new=TRUE) + mtext(paste0(regime1.name), font=2, cex=5) + par(fig=c(title1.xpos, title1.xpos + title1.width, 0.5855, 0.5905), new=TRUE) + mtext(paste0(regime2.name), font=2, cex=5) + par(fig=c(title1.xpos, title1.xpos + title1.width, 0.3805, 0.3955), new=TRUE) + mtext(paste0(regime3.name), font=2, cex=5) + par(fig=c(title1.xpos, title1.xpos + title1.width, 0.1755, 0.1805), new=TRUE) + mtext(paste0(regime4.name), font=2, cex=5) + + if(composition == "psl") { + # Color legend: + legend1.xpos <- 0.10 + legend1.width <- 0.80 + legend1.cex <- 4 + + par(fig=c(legend1.xpos, legend1.xpos + legend1.width, 0, 0.065), new=TRUE) # syntax fig=c(xmin, xmax, ymin, ymax) + ColorBar2(brks=my.brks, cols=my.cols, vert=FALSE, cex=legend1.cex, label.dist=2, my.ticks=-0.5 + 1:21, my.labels=my.brks) + par(fig=c(legend1.xpos + legend1.width - 0.02, legend1.xpos + legend1.width + 0.05, 0, 0.02), new=TRUE) # syntax fig=c(xmin, xmax, ymin, ymax) + mtext("hPa", cex=legend1.cex*1.3) + } + + dev.off() + } # close if on composition + + print("Finished!") +} # close if on composition != "summary" + + + +if(composition == "taylor"){ + library("plotrix") + + # choose a reference season from 13 (winter) to 16 (Autumn): + season.ref <- 13 + + # set the first WR classification: + rean.dir <- "/scratch/Earth/ncortesi/RESILIENCE/Regimes/41_ERA-Interim_monthly_1981-2015_LOESS_filter" + + # load data of the reference season of the first classification (this line should be modified to load the chosen season): + #load("/scratch/Earth/ncortesi/RESILIENCE/Regimes/18) as 12) but with no lat correction/ERA-Interim_Winter_psl.RData") # load pslwrXmean data + #load("/scratch/Earth/ncortesi/RESILIENCE/Regimes/18) as 12) but with no lat correction/ERA-Interim_Winter_ClusterNames.RData") # load clusterX.name.period + load("/scratch/Earth/ncortesi/RESILIENCE/Regimes/46_as_12_but_with_no_lat_correction_and_with_LOESS/ERA-Interim_Winter_psl.RData") # load pslwrXmean data + load("/scratch/Earth/ncortesi/RESILIENCE/Regimes/46_as_12_but_with_no_lat_correction_and_with_LOESS/ERA-Interim_Winter_ClusterNames.RData") # load clusterX.name.period + + if(var.num != var.num.orig) var.num <- var.num.orig # ripristinate original values of this script (necessary after loading regime data) + if(fields.name != fields.name.orig) fields.name <- fields.name.orig # ripristinate original values of this script + + cluster1.ref <- which(orden == cluster1.name) + cluster2.ref <- which(orden == cluster2.name) + cluster3.ref <- which(orden == cluster3.name) + cluster4.ref <- which(orden == cluster4.name) + + #cluster1.ref <- cluster1.name.period[13] + #cluster2.ref <- cluster2.name.period[13] + #cluster3.ref <- cluster3.name.period[13] + #cluster4.ref <- cluster4.name.period[13] + + cluster1.ref <- 3 # bypass the previous values because for dataset num.46 the blocking and atlantic regimes were saved swapped! + cluster2.ref <- 2 + cluster3.ref <- 1 + cluster4.ref <- 4 + + assign(paste0("map.ref",cluster1.ref), pslwr1mean) # NAO+ + assign(paste0("map.ref",cluster2.ref), pslwr2mean) # NAO- + assign(paste0("map.ref",cluster3.ref), pslwr3mean) # Blocking + assign(paste0("map.ref",cluster4.ref), pslwr4mean) # Atl.ridge + + # set a second WR classification (i.e: with running cluster) to contrast with the new one: + #rean2.dir <- "/scratch/Earth/ncortesi/RESILIENCE/Regimes/41_ERA-Interim_monthly_1981-2015_LOESS_filter" + rean2.dir <- "/scratch/Earth/ncortesi/RESILIENCE/Regimes/44_as_43_but_with_no_lat_correction" + + # load data of the reference season of the second classification (this line should be modified to load the chosen season): + load("/scratch/Earth/ncortesi/RESILIENCE/Regimes/46_as_12_but_with_no_lat_correction_and_with_LOESS/ERA-Interim_Winter_psl.RData") # load pslwrXmean data + load("/scratch/Earth/ncortesi/RESILIENCE/Regimes/46_as_12_but_with_no_lat_correction_and_with_LOESS/ERA-Interim_Winter_ClusterNames.RData") # load clusterX.name.period + + if(var.num != var.num.orig) var.num <- var.num.orig # ripristinate original values of this script (necessary after loading regime data) + if(fields.name != fields.name.orig) fields.name <- fields.name.orig # ripristinate original values of this script + + #cluster1.name.ref <- cluster1.name.period[season.ref] + #cluster2.name.ref <- cluster2.name.period[season.ref] + #cluster3.name.ref <- cluster3.name.period[season.ref] + #cluster4.name.ref <- cluster4.name.period[season.ref] + + cluster1.ref <- which(orden == cluster1.name) + cluster2.ref <- which(orden == cluster2.name) + cluster3.ref <- which(orden == cluster3.name) + cluster4.ref <- which(orden == cluster4.name) + + cluster1.ref <- 3 # bypass the previous values because for dataset num.46 the blocking and atlantic regimes were saved swapped! + cluster2.ref <- 2 + cluster3.ref <- 1 + cluster4.ref <- 4 + + assign(paste0("map.old.ref",cluster1.ref), pslwr1mean) # NAO+ + assign(paste0("map.old.ref",cluster2.ref), pslwr2mean) # NAO- + assign(paste0("map.old.ref",cluster3.ref), pslwr3mean) # Blocking + assign(paste0("map.old.ref",cluster4.ref), pslwr4mean) # Atl.ridge + + + # breaks and colors of the geopotential fields: + if(psl == "g500"){ + my.brks <- c(-300,seq(-200,200,10),300) # % Mean anomaly of a WR + my.brks2 <- c(-300,seq(-200,200,10),300) # % Mean anomaly of a WR for the contour lines + values.to.plot <- c(-200, -100, 0, 100, 200) # values we want to show in the labels + } + + if(psl == "psl"){ + my.brks <- c(seq(-19,-1,2),0,seq(1,19,2)) # % Mean anomaly of a WR + my.brks2 <- my.brks #c(seq(-20,20,2)) # % Mean anomaly of a WR for the contour lines + values.to.plot <- my.brks #c(-17,-15,-13,-11,-9,-7,-5,-3,-1,0,1,3,5,7,9,11,13,15,17) + } + + my.cols <- colorRampPalette(rev(brewer.pal(11,"RdBu")))(length(my.brks)-1) # blue--white--red colors + my.cols[floor(length(my.cols)/2)] <- my.cols[floor(length(my.cols)/2)+1] <- "white" + + # plot the composition with the regime anomalies of each monthly regimes: + png(filename=paste0(rean.dir,"/",fields.name,"_taylor_source",".png"),width=6000,height=2000) + + plot.new() + + # Sheet title: + par(fig=c(0, 1, 0.94, 0.98), new=TRUE) + mtext(paste0(fields.name, " observed monthly regime anomalies"), cex=5.5, font=2) + + n.map <- 0 + for(p in c(9:12,1:8)){ + p.orig <- p + + load(file=paste0(rean.dir,"/",fields.name,"_",my.period[p],"_","psl",".RData")) # load cluster names and summarized data + load(file=paste0(rean.dir,"/",fields.name,"_",my.period[p],"_","ClusterNames",".RData")) # load cluster names and summarized data + + if(var.num != var.num.orig) var.num <- var.num.orig # ripristinate original values of this script (necessary after loading regime data) + if(fields.name != fields.name.orig) fields.name <- fields.name.orig # ripristinate original values of this script + if(p != p.orig) p <- p.orig + + cluster1 <- which(orden == cluster1.name) + cluster2 <- which(orden == cluster2.name) + cluster3 <- which(orden == cluster3.name) + cluster4 <- which(orden == cluster4.name) + + assign(paste0("map",cluster1), pslwr1mean) # NAO+ + assign(paste0("map",cluster2), pslwr2mean) # NAO- + assign(paste0("map",cluster3), pslwr3mean) # Blocking + assign(paste0("map",cluster4), pslwr4mean) # Atl.ridge + + n.map <- n.map + 1 # n.maps starts from 0 + map.width <- 0.07 + map.xpos <- 0.06 + map.width * (n.map - 1) + map.ypos <- 0.08 + map.height <- 0.19 + map.ysep <- 0.015 + + par(fig=c(map.xpos, map.xpos + map.width, map.ypos + 3*map.height + 3*map.ysep, map.ypos + 4*map.height + 3*map.ysep), new=TRUE) + PlotEquiMap2(rescale(map1,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map1), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, xlabel.dist=1.5, contours.lty="F1FF1F") + par(fig=c(map.xpos, map.xpos + map.width, map.ypos + 2*map.height + 2*map.ysep, map.ypos + 3*map.height + 2*map.ysep), new=TRUE) + PlotEquiMap2(rescale(map2,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map2), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F") + par(fig=c(map.xpos, map.xpos + map.width, map.ypos + map.height + map.ysep, map.ypos + 2*map.height + map.ysep), new=TRUE) + PlotEquiMap2(rescale(map3,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map3), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F") + par(fig=c(map.xpos, map.xpos + map.width, map.ypos, map.ypos + map.height), new=TRUE) + PlotEquiMap2(rescale(map4,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map4), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F") + + # Sheet subtitles: + par(fig=c(map.xpos, map.xpos + map.width, 0.86, 0.90), new=TRUE) + mtext(my.month.short[p], cex=5.5, font=2) + + } + + # draw the last map to the right of the composition, that with the seasonal anomalies: + n.map <- n.map + 1 # n.maps starts from 0 + map.width <- 0.07 + map.xpos <- 0.06 + map.width * (n.map - 1) + map.ypos <- 0.08 + map.height <- 0.19 + map.ysep <- 0.015 + + par(fig=c(map.xpos, map.xpos + map.width, map.ypos + 3*map.height + 3*map.ysep, map.ypos + 4*map.height + 3*map.ysep), new=TRUE) + PlotEquiMap2(rescale(map.ref1,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map.ref1), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, xlabel.dist=1.5, contours.lty="F1FF1F") + par(fig=c(map.xpos, map.xpos + map.width, map.ypos + 2*map.height + 2*map.ysep, map.ypos + 3*map.height + 2*map.ysep), new=TRUE) + PlotEquiMap2(rescale(map.ref2,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map.ref2), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F") + par(fig=c(map.xpos, map.xpos + map.width, map.ypos + map.height + map.ysep, map.ypos + 2*map.height + map.ysep), new=TRUE) + PlotEquiMap2(rescale(map.ref3,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map.ref3), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F") + par(fig=c(map.xpos, map.xpos + map.width, map.ypos, map.ypos + map.height), new=TRUE) + PlotEquiMap2(rescale(map.ref4,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map.ref4), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F") + + # subtitle: + par(fig=c(map.xpos, map.xpos + map.width, 0.86, 0.90), new=TRUE) + mtext(my.period[season.ref], cex=5.5, font=2) + + dev.off() + + corr.first <- corr.second <- rmse.first <- rmse.second <- array(NA,c(4,12)) + + # Plot the Taylor diagrams: + for(regime in 1:4){ + + png(filename=paste0(rean.dir,"/",fields.name,"_taylor_",orden[regime],".png"), width=1200, height=800) + + for(p in 1:12){ + p.orig <- p + + load(file=paste0(rean.dir,"/",fields.name,"_",my.period[p],"_","psl",".RData")) # load cluster names and summarized data + load(file=paste0(rean.dir,"/",fields.name,"_",my.period[p],"_","ClusterNames",".RData")) # load cluster names and summarized data + + if(var.num != var.num.orig) var.num <- var.num.orig # ripristinate original values of this script (necessary after loading regime data) + if(fields.name != fields.name.orig) fields.name <- fields.name.orig # ripristinate original values of this script + if(p != p.orig) p <- p.orig + + cluster1 <- which(orden == cluster1.name) + cluster2 <- which(orden == cluster2.name) + cluster3 <- which(orden == cluster3.name) + cluster4 <- which(orden == cluster4.name) + + assign(paste0("map",cluster1), pslwr1mean) # NAO+ + assign(paste0("map",cluster2), pslwr2mean) # NAO- + assign(paste0("map",cluster3), pslwr3mean) # Blocking + assign(paste0("map",cluster4), pslwr4mean) # Atl.ridge + + # load data from the second classification: + load(file=paste0(rean2.dir,"/",fields.name,"_",my.period[p],"_","psl",".RData")) # load cluster names and summarized data + load(file=paste0(rean2.dir,"/",fields.name,"_",my.period[p],"_","ClusterNames",".RData")) # load cluster names and summarized data + + if(var.num != var.num.orig) var.num <- var.num.orig # ripristinate original values of this script (necessary after loading regime data) + if(fields.name != fields.name.orig) fields.name <- fields.name.orig # ripristinate original values of this script + if(p != p.orig) p <- p.orig + + cluster1.old <- which(orden == cluster1.name) + cluster2.old <- which(orden == cluster2.name) + cluster3.old <- which(orden == cluster3.name) + cluster4.old <- which(orden == cluster4.name) + + assign(paste0("map.old",cluster1.old), pslwr1mean) # NAO+ + assign(paste0("map.old",cluster2.old), pslwr2mean) # NAO- + assign(paste0("map.old",cluster3.old), pslwr3mean) # Blocking + assign(paste0("map.old",cluster4.old), pslwr4mean) # Atl.ridge + + add.mod <- ifelse(p == 1, FALSE, TRUE) + main.mod <- ifelse(p == 1, orden[regime],"") + + color.first <- "red" + color.second <- "blue" + sd.arcs.mod <- c(0,0.1,0.2,0.3,0.4,0.5,0.6,0.7,0.8,0.9,1,1.1,1.2,1.3) #c(0,0.5,1,1.5) # doesn't work! + if(regime == 1) my.taylor(as.vector(map.ref1),as.vector(map1), normalize=TRUE, add=add.mod, pos.cor=FALSE, sd.arcs=sd.arcs.mod, ref.sd=F, cex.axis=1.7, text.cex=1, my.text=my.month.short[p], col = color.first, main=main.mod) + if(regime == 2) my.taylor(as.vector(map.ref2),as.vector(map2), normalize=TRUE, add=add.mod, pos.cor=FALSE, sd.arcs=sd.arcs.mod, ref.sd=F, cex.axis=1.7, text.cex=1, my.text=my.month.short[p], col = color.first, main=main.mod) + if(regime == 3) my.taylor(as.vector(map.ref3),as.vector(map3), normalize=TRUE, add=add.mod, pos.cor=FALSE, sd.arcs=sd.arcs.mod, ref.sd=F, cex.axis=1.7, text.cex=1, my.text=my.month.short[p], col = color.first, main=main.mod) + if(regime == 4) my.taylor(as.vector(map.ref4),as.vector(map4), normalize=TRUE, add=add.mod, pos.cor=FALSE, sd.arcs=sd.arcs.mod, ref.sd=F, cex.axis=1.7, text.cex=1, my.text=my.month.short[p], col = color.first, main=main.mod) + + # add the points from the second classification: + add.mod=TRUE + if(regime == 1) my.taylor(as.vector(map.old.ref1),as.vector(map.old1), normalize=TRUE, add=add.mod, pos.cor=FALSE, sd.arcs=sd.arcs.mod, ref.sd=F, cex.axis=1.7, text.cex=1, my.text=my.month.short[p], col = color.second) + if(regime == 2) my.taylor(as.vector(map.old.ref2),as.vector(map.old2), normalize=TRUE, add=add.mod, pos.cor=FALSE, sd.arcs=sd.arcs.mod, ref.sd=F, cex.axis=1.7, text.cex=1, my.text=my.month.short[p], col = color.second) + if(regime == 3) my.taylor(as.vector(map.old.ref3),as.vector(map.old3), normalize=TRUE, add=add.mod, pos.cor=FALSE, sd.arcs=sd.arcs.mod, ref.sd=F, cex.axis=1.7, text.cex=1, my.text=my.month.short[p], col = color.second) + if(regime == 4) my.taylor(as.vector(map.old.ref4),as.vector(map.old4), normalize=TRUE, add=add.mod, pos.cor=FALSE, sd.arcs=sd.arcs.mod, ref.sd=F, cex.axis=1.7, text.cex=1, my.text=my.month.short[p], col = color.second) + + if(regime == 1) { corr.first[1,p] <- cor(as.vector(map.ref1),as.vector(map1)); rmse.first[1,p] <- RMSE(as.vector(map.ref1),as.vector(map1)) } + if(regime == 2) { corr.first[2,p] <- cor(as.vector(map.ref2),as.vector(map2)); rmse.first[2,p] <- RMSE(as.vector(map.ref2),as.vector(map2)) } + if(regime == 3) { corr.first[3,p] <- cor(as.vector(map.ref3),as.vector(map3)); rmse.first[3,p] <- RMSE(as.vector(map.ref3),as.vector(map3)) } + if(regime == 4) { corr.first[4,p] <- cor(as.vector(map.ref4),as.vector(map4)); rmse.first[4,p] <- RMSE(as.vector(map.ref4),as.vector(map4)) } + + if(regime == 1) { corr.second[1,p] <- cor(as.vector(map.old.ref1),as.vector(map.old1)); rmse.second[1,p] <- RMSE(as.vector(map.old.ref1),as.vector(map.old1)) } + if(regime == 2) { corr.second[2,p] <- cor(as.vector(map.old.ref2),as.vector(map.old2)); rmse.second[2,p] <- RMSE(as.vector(map.old.ref2),as.vector(map.old2)) } + if(regime == 3) { corr.second[3,p] <- cor(as.vector(map.old.ref3),as.vector(map.old3)); rmse.second[3,p] <- RMSE(as.vector(map.old.ref3),as.vector(map.old3)) } + if(regime == 4) { corr.second[4,p] <- cor(as.vector(map.old.ref4),as.vector(map.old4)); rmse.second[4,p] <- RMSE(as.vector(map.old.ref4),as.vector(map.old4)) } + + } # close for on 'p' + + dev.off() + + } # close for on 'regime' + + corr.first.mean <- apply(corr.first,1,mean) + corr.second.mean <- apply(corr.second,1,mean) + corr.diff <- corr.second.mean - corr.first.mean + + rmse.first.mean <- apply(rmse.first,1,mean) + rmse.second.mean <- apply(rmse.second,1,mean) + rmse.diff <- rmse.second.mean - rmse.first.mean + +} # close if on composition == "taylor" + + + + +# Summary graphs: +if(composition == "summary" && fields.name == forecast.name){ + # array storing correlations in the format: [startdate, leadmonth, regime] + array.diff.sd.freq <- array.sd.freq <- array.cor <- array.sp.cor <- array.freq <- array.diff.freq <- array.rpss <- array.pers <- array.diff.pers <- array(NA,c(12,7,4)) + array.spat.cor <- array.trans.sim1 <- array.trans.sim2 <- array.trans.sim3 <- array.trans.sim4 <- array(NA,c(12,7,4)) + array.trans.diff1 <- array.trans.diff2 <- array.trans.diff3 <- array.trans.diff4 <- array(NA,c(12,7,4)) + + for(p in 1:12){ + p.orig <- p + + for(l in 0:6){ + + load(file=paste0(work.dir,"/",fields.name,"_",my.period[p],"_leadmonth_",l,"_","ClusterNames",".RData")) # load cluster names and summarized data + + if(var.num != var.num.orig) var.num <- var.num.orig # ripristinate original values of this script (necessary after loading regime data) + if(fields.name != fields.name.orig) fields.name <- fields.name.orig # ripristinate original values of this script + if(p != p.orig) p <- p.orig + + array.spat.cor[p,1+l, 1] <- spat.cor1 # associates NAO+ to the first triangle (at left) + array.spat.cor[p,1+l, 3] <- spat.cor2 # associates NAO- to the third triangle (at right) + array.spat.cor[p,1+l, 4] <- spat.cor3 # associates Blocking to the fourth triangle (top one) + array.spat.cor[p,1+l, 2] <- spat.cor4 # associates Atl.Ridge to the second triangle (bottom one) + + array.cor[p,1+l, 1] <- sp.cor1 # associates NAO+ to the first triangle (at left) + array.cor[p,1+l, 3] <- sp.cor2 # associates NAO- to the third triangle (at right) + array.cor[p,1+l, 4] <- sp.cor3 # associates Blocking to the fourth triangle (top one) + array.cor[p,1+l, 2] <- sp.cor4 # associates Atl.Ridge to the second triangle (bottom one) + + array.freq[p,1+l, 1] <- 100*(mean(fre1.NA)) # NAO+ + array.freq[p,1+l, 3] <- 100*(mean(fre2.NA)) # NAO- + array.freq[p,1+l, 4] <- 100*(mean(fre3.NA)) # Blocking + array.freq[p,1+l, 2] <- 100*(mean(fre4.NA)) # Atl.Ridge + + array.diff.freq[p,1+l, 1] <- 100*(mean(fre1.NA) - mean(freObs1)) # NAO+ + array.diff.freq[p,1+l, 3] <- 100*(mean(fre2.NA) - mean(freObs2)) # NAO- + array.diff.freq[p,1+l, 4] <- 100*(mean(fre3.NA) - mean(freObs3)) # Blocking + array.diff.freq[p,1+l, 2] <- 100*(mean(fre4.NA) - mean(freObs4)) # Atl.Ridge + + array.pers[p,1+l, 1] <- persist1 # NAO+ + array.pers[p,1+l, 3] <- persist2 # NAO- + array.pers[p,1+l, 4] <- persist3 # Blocking + array.pers[p,1+l, 2] <- persist4 # Atl.Ridge + + array.diff.pers[p,1+l, 1] <- persist1 - persistObs1 # NAO+ + array.diff.pers[p,1+l, 3] <- persist2 - persistObs2 # NAO- + array.diff.pers[p,1+l, 4] <- persist3 - persistObs3 # Blocking + array.diff.pers[p,1+l, 2] <- persist4 - persistObs4 # Atl.Ridge + + array.sd.freq[p,1+l, 1] <- sd.freq.sim1 # NAO+ + array.sd.freq[p,1+l, 3] <- sd.freq.sim2 # NAO- + array.sd.freq[p,1+l, 4] <- sd.freq.sim3 # Blocking + array.sd.freq[p,1+l, 2] <- sd.freq.sim4 # Atl.Ridge + + array.diff.sd.freq[p,1+l, 1] <- sd.freq.sim1 / sd.freq.obs1 # NAO+ + array.diff.sd.freq[p,1+l, 3] <- sd.freq.sim2 / sd.freq.obs2 # NAO- + array.diff.sd.freq[p,1+l, 4] <- sd.freq.sim3 / sd.freq.obs3 # Blocking + array.diff.sd.freq[p,1+l, 2] <- sd.freq.sim4 / sd.freq.obs4 # Atl.Ridge + + array.imp[p,1+l, 1] <- cor.imp1 # NAO+ + array.imp[p,1+l, 3] <- cor.imp2 # NAO- + array.imp[p,1+l, 4] <- cor.imp3 # Blocking + array.imp[p,1+l, 2] <- cor.imp4 # Atl.Ridge + + array.trans.sim1[p,1+l, 1] <- NA # Transition from NAO+ to NAO+ + array.trans.sim1[p,1+l, 3] <- 100*transSim1[2] # Transition from NAO+ to NAO- + array.trans.sim1[p,1+l, 4] <- 100*transSim1[3] # Transition from NAO+ to blocking + array.trans.sim1[p,1+l, 2] <- 100*transSim1[4] # Transition from NAO+ to Atl.Ridge + + array.trans.sim2[p,1+l, 1] <- 100*transSim2[1] # Transition from NAO- to NAO+ + array.trans.sim2[p,1+l, 3] <- NA # Transition from NAO- to NAO- + array.trans.sim2[p,1+l, 4] <- 100*transSim2[3] # Transition from NAO- to blocking + array.trans.sim2[p,1+l, 2] <- 100*transSim2[4] # Transition from NAO- to Atl.Ridge + + array.trans.sim3[p,1+l, 1] <- 100*transSim3[1] # Transition from blocking to NAO+ + array.trans.sim3[p,1+l, 3] <- 100*transSim3[2] # Transition from blocking to NAO- + array.trans.sim3[p,1+l, 4] <- NA # Transition from blocking to blocking + array.trans.sim3[p,1+l, 2] <- 100*transSim3[4] # Transition from blocking to Atl.Ridge + + array.trans.sim4[p,1+l, 1] <- 100*transSim4[1] # Transition from Atl.ridge to NAO+ + array.trans.sim4[p,1+l, 3] <- 100*transSim4[2] # Transition from Atl.ridge to NAO- + array.trans.sim4[p,1+l, 4] <- 100*transSim4[3] # Transition from Atl.ridge to blocking + array.trans.sim4[p,1+l, 2] <- NA # Transition from Atl.ridge to Atl.Ridge + + array.trans.diff1[p,1+l, 1] <- NA # Transition from NAO+ to NAO+ + array.trans.diff1[p,1+l, 3] <- 100*(transSim1[2] - transObs1[2]) # Transition from NAO+ to NAO- + array.trans.diff1[p,1+l, 4] <- 100*(transSim1[3] - transObs1[3]) # Transition from NAO+ to blocking + array.trans.diff1[p,1+l, 2] <- 100*(transSim1[4] - transObs1[4]) # Transition from NAO+ to Atl.Ridge + + array.trans.diff2[p,1+l, 1] <- 100*(transSim2[1] - transObs2[1]) # Transition from NAO+ to NAO+ + array.trans.diff2[p,1+l, 3] <- NA + array.trans.diff2[p,1+l, 4] <- 100*(transSim2[3] - transObs2[3]) # Transition from NAO+ to blocking + array.trans.diff2[p,1+l, 2] <- 100*(transSim2[4] - transObs2[4]) # Transition from NAO+ to Atl.Ridge + + array.trans.diff3[p,1+l, 1] <- 100*(transSim3[1] - transObs3[1]) # Transition from NAO+ to NAO+ + array.trans.diff3[p,1+l, 3] <- 100*(transSim3[2] - transObs3[2]) # Transition from NAO+ to NAO- + array.trans.diff3[p,1+l, 4] <- NA + array.trans.diff3[p,1+l, 2] <- 100*(transSim3[4] - transObs3[4]) # Transition from NAO+ to Atl.Ridge + + array.trans.diff4[p,1+l, 1] <- 100*(transSim4[1] - transObs4[1]) # Transition from NAO+ to NAO+ + array.trans.diff4[p,1+l, 3] <- 100*(transSim4[2] - transObs4[2]) # Transition from NAO+ to NAO- + array.trans.diff4[p,1+l, 4] <- 100*(transSim4[3] - transObs4[3]) # Transition from NAO+ to blocking + array.trans.diff4[p,1+l, 2] <- NA + + #array.rpss[p,1+l, 1] <- # NAO+ + #array.rpss[p,1+l, 3] <- # NAO- + #array.rpss[p,1+l, 4] <- # Blocking + #array.rpss[p,1+l, 2] <- # Atl.Ridge + } + } + + # Spatial Corr summary: + png(filename=paste0(work.dir,"/",forecast.name,"_summary_spat_corr.png"), width=550, height=400) + + # create an array similar to array.cor but with colors instead of corr.values: + sp.corr.cols <- rev(c('#b2182b','#d6604d','#f4a582','#fddbc7','#d1e5f0','#92c5de','#4393c3','#2166ac')) + my.seq <- c(-1,0,0.4,0.5,0.6,0.7,0.8,0.9,1) + + array.sp.cor.colors <- array(sp.corr.cols[8],c(12,7,4)) + array.sp.cor.colors[array.spat.cor < my.seq[8]] <- sp.corr.cols[7] + array.sp.cor.colors[array.spat.cor < my.seq[7]] <- sp.corr.cols[6] + array.sp.cor.colors[array.spat.cor < my.seq[6]] <- sp.corr.cols[5] + array.sp.cor.colors[array.spat.cor < my.seq[5]] <- sp.corr.cols[4] + array.sp.cor.colors[array.spat.cor < my.seq[4]] <- sp.corr.cols[3] + array.sp.cor.colors[array.spat.cor < my.seq[3]] <- sp.corr.cols[2] + array.sp.cor.colors[array.spat.cor < my.seq[2]] <- sp.corr.cols[1] + + par(mar=c(5,4,4,4.5)) + plot(1,1, type="n", xaxt="n", yaxt="n", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + title("Spatial correlation between S4 and ERA-Interim regime anomalies", cex=1.5) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + + for(p in 1:12){ + for(l in 0:6){ + polygon(c(0.5+p-1, 0.5+p-1, 1+p-1), c(-0.5+l, 0.5+l, 0+l), col=array.sp.cor.colors[p,1+l,1]) # first triangle + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(-0.5+l, 0+l, -0.5+l), col=array.sp.cor.colors[p,1+l,2]) + polygon(c(1+p-1, 1.5+p-1, 1.5+p-1), c(l, 0.5+l, -0.5+l), col=array.sp.cor.colors[p,1+l,3]) + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(0.5+l, 0+l, 0.5+l), col=array.sp.cor.colors[p,1+l,4]) + } + } + + par(fig=c(0.875, 1, 0.14, 0.89), new=TRUE) + ColorBar2(brks = my.seq, cols = sp.corr.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + dev.off() + + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_spat_corr_4tables.png"), width=750, height=600) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext(text="Spatial correlation between S4 and ERA-Interim regime anomalies", cex=1.5) + + # NAO+ : + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.85, 0.98), new=TRUE) + mtext("NAO+", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.spat.cor.colors[p,1+l,1])}} + + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.42, 0.5), new=TRUE) + mtext("Blocking", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.spat.cor.colors[p,1+l,4])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.85, 0.98), new=TRUE) + mtext("NAO-", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.spat.cor.colors[p,1+l,3])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.42, 0.5), new=TRUE) + mtext("Atlantic Ridge", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.spat.cor.colors[p,1+l,2])}} + + par(fig=c(0.91, 1, 0.1, 0.9), new=TRUE) + ColorBar2(brks = my.seq, cols = sp.corr.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_spat_corr_1table.png"), width=800, height=300) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext(text="Spatial correlation between S4 and ERA-Interim regime anomalies", cex=1.2) + + par(mar=c(0,0,4,0), fig=c(0.07, 0.22, 0.8, 0.98), new=TRUE) + mtext("NAO+", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0, 0.22, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecast month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.spat.cor.colors[i2,1+6-j,1])}} + + par(mar=c(0,0,4,0), fig=c(0.22, 0.44, 0.8, 0.98), new=TRUE) + mtext("NAO-", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.22, 0.44, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecasted month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.spat.cor.colors[i2,1+6-j,3])}} + + par(mar=c(0,0,4,0), fig=c(0.44, 0.66, 0.8, 0.98), new=TRUE) + mtext("Blocking", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.44, 0.66, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecasted month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.spat.cor.colors[i2,1+6-j,4])}} + + par(mar=c(0,0,4,0), fig=c(0.68, 0.88, 0.8, 0.98), new=TRUE) + mtext("Atlantic Ridge", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.66, 0.88, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecasted month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.spat.cor.colors[i2,1+6-j,2])}} + + par(fig=c(0.91, 1, 0.1, 0.8), new=TRUE) + ColorBar2(brks = my.seq, cols = sp.corr.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + + + + + + + + # Corr summary: + png(filename=paste0(work.dir,"/",forecast.name,"_summary_corr.png"), width=550, height=400) + + # create an array similar to array.cor but with colors instead of corr.values: + corr.cols <- rev(c('#b2182b','#d6604d','#f4a582','#fddbc7','#d1e5f0','#92c5de','#4393c3','#2166ac')) + + array.cor.colors <- array(corr.cols[8],c(12,7,4)) + array.cor.colors[array.cor < 0.75] <- corr.cols[7] + array.cor.colors[array.cor < 0.5] <- corr.cols[6] + array.cor.colors[array.cor < 0.25] <- corr.cols[5] + array.cor.colors[array.cor < 0] <- corr.cols[4] + array.cor.colors[array.cor < -0.25] <- corr.cols[3] + array.cor.colors[array.cor < -0.5] <- corr.cols[2] + array.cor.colors[array.cor < -0.75] <- corr.cols[1] + + par(mar=c(5,4,4,4.5)) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Forecast time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + title("Correlation between S4 and ERA-Interim frequencies", cex=1.5) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + + for(p in 1:12){ + for(l in 0:6){ + polygon(c(0.5+p-1,0.5+p-1,1+p-1),c(-0.5+l,0.5+l,0+l), col=array.cor.colors[p,1+l,1]) # first triangle + polygon(c(0.5+p-1,1+p-1,1.5+p-1),c(-0.5+l,0+l,-0.5+l), col=array.cor.colors[p,1+l,2]) + polygon(c(1+p-1,1.5+p-1,1.5+p-1),c(l,0.5+l,-0.5+l), col=array.cor.colors[p,1+l,3]) + polygon(c(0.5+p-1,1+p-1,1.5+p-1),c(0.5+l,0+l,0.5+l), col=array.cor.colors[p,1+l,4]) + } + } + + par(fig=c(0.875, 1, 0.14, 0.89), new=TRUE) + ColorBar2(brks = seq(-1,1,0.25), cols = corr.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(seq(-1,1,0.25)), my.labels=seq(-1,1,0.25)) + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_corr_4tables.png"), width=750, height=600) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext(text="Correlation between S4 and ERA-Interim frequencies", cex=1.5) + + # NAO+ : + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.85, 0.98), new=TRUE) + mtext("NAO+", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.cor.colors[p,1+l,1])}} + + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.42, 0.5), new=TRUE) + mtext("Blocking", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.cor.colors[p,1+l,4])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.85, 0.98), new=TRUE) + mtext("NAO-", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.cor.colors[p,1+l,3])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.42, 0.5), new=TRUE) + mtext("Atlantic Ridge", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.cor.colors[p,1+l,2])}} + + par(fig=c(0.91, 1, 0.1, 0.9), new=TRUE) + ColorBar2(brks = my.seq, cols = corr.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_corr_1table.png"), width=800, height=300) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext(text="Correlation between S4 and ERA-Interim frequencies", cex=1.2) + + par(mar=c(0,0,4,0), fig=c(0.07, 0.22, 0.8, 0.98), new=TRUE) + mtext("NAO+", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0, 0.22, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecast month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.cor.colors[i2,1+6-j,1])}} + + par(mar=c(0,0,4,0), fig=c(0.22, 0.44, 0.8, 0.98), new=TRUE) + mtext("NAO-", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.22, 0.44, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecasted month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.cor.colors[i2,1+6-j,3])}} + + par(mar=c(0,0,4,0), fig=c(0.44, 0.66, 0.8, 0.98), new=TRUE) + mtext("Blocking", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.44, 0.66, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecasted month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.cor.colors[i2,1+6-j,4])}} + + par(mar=c(0,0,4,0), fig=c(0.68, 0.88, 0.8, 0.98), new=TRUE) + mtext("Atlantic Ridge", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.66, 0.88, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecasted month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.cor.colors[i2,1+6-j,2])}} + + par(fig=c(0.91, 1, 0.1, 0.8), new=TRUE) + ColorBar2(brks = my.seq, cols = corr.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + + + + + + # Freq. summary: + png(filename=paste0(work.dir,"/",forecast.name,"_summary_freq.png"), width=550, height=400) + + # create an array similar to array.diff.freq but with colors instead of frequencies: + freq.cols <- rev(c('#d73027','#f46d43','#fdae61','#fee090','#e0f3f8','#abd9e9','#74add1','#4575b4')) + my.seq <- c(NA,20,22,24,26,28,30,32,NA) + + array.freq.colors <- array(freq.cols[8],c(12,7,4)) + array.freq.colors[array.freq < my.seq[[8]]] <- freq.cols[7] + array.freq.colors[array.freq < my.seq[[7]]] <- freq.cols[6] + array.freq.colors[array.freq < my.seq[[6]]] <- freq.cols[5] + array.freq.colors[array.freq < my.seq[[5]]] <- freq.cols[4] + array.freq.colors[array.freq < my.seq[[4]]] <- freq.cols[3] + array.freq.colors[array.freq < my.seq[[3]]] <- freq.cols[2] + array.freq.colors[array.freq < my.seq[[2]]] <- freq.cols[1] + + par(mar=c(5,4,4,4.5)) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Forecast time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + title("Mean S4 frequency (%)", cex=1.5) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + + for(p in 1:12){ + for(l in 0:6){ + polygon(c(0.5+p-1, 0.5+p-1, 1+p-1), c(-0.5+l, 0.5+l, 0+l), col=array.freq.colors[p,1+l,1]) # first triangle + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(-0.5+l, 0+l, -0.5+l), col=array.freq.colors[p,1+l,2]) + polygon(c(1+p-1, 1.5+p-1, 1.5+p-1), c(l, 0.5+l, -0.5+l), col=array.freq.colors[p,1+l,3]) + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(0.5+l, 0+l, 0.5+l), col=array.freq.colors[p,1+l,4]) + } + } + + par(fig=c(0.875, 1, 0.14, 0.89), new=TRUE) + ColorBar2(brks = my.seq, cols = freq.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_freq_4tables.png"), width=750, height=600) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext(text="Mean S4 frequency (%)", cex=1.5) + + # NAO+ : + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.85, 0.98), new=TRUE) + mtext("NAO+", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.freq.colors[p,1+l,1])}} + + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.42, 0.5), new=TRUE) + mtext("Blocking", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.freq.colors[p,1+l,4])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.85, 0.98), new=TRUE) + mtext("NAO-", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.freq.colors[p,1+l,3])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.42, 0.5), new=TRUE) + mtext("Atlantic Ridge", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.freq.colors[p,1+l,2])}} + + par(fig=c(0.91, 1, 0.1, 0.9), new=TRUE) + ColorBar2(brks = my.seq, cols = freq.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_freq_1table.png"), width=800, height=300) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext(text="Mean S4 frequency (%)", cex=1.2) + + par(mar=c(0,0,4,0), fig=c(0.07, 0.22, 0.8, 0.98), new=TRUE) + mtext("NAO+", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0, 0.22, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecast month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.freq.colors[i2,1+6-j,1])}} + + par(mar=c(0,0,4,0), fig=c(0.22, 0.44, 0.8, 0.98), new=TRUE) + mtext("NAO-", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.22, 0.44, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecasted month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.freq.colors[i2,1+6-j,3])}} + + par(mar=c(0,0,4,0), fig=c(0.44, 0.66, 0.8, 0.98), new=TRUE) + mtext("Blocking", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.44, 0.66, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecasted month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.freq.colors[i2,1+6-j,4])}} + + par(mar=c(0,0,4,0), fig=c(0.68, 0.88, 0.8, 0.98), new=TRUE) + mtext("Atlantic Ridge", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.66, 0.88, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecasted month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.freq.colors[i2,1+6-j,2])}} + + par(fig=c(0.91, 1, 0.1, 0.8), new=TRUE) + ColorBar2(brks = my.seq, cols = freq.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + + + + + + + # Delta freq. summary: + png(filename=paste0(work.dir,"/",forecast.name,"_summary_diff_freq.png"), width=550, height=400) + + # create an array similar to array.diff.freq but with colors instead of frequencies: + diff.freq.cols <- rev(c('#d73027','#f46d43','#fdae61','#fee090','#e0f3f8','#abd9e9','#74add1','#4575b4')) + my.seq <- c(-20,-10,-5,-2,0,2,5,10,20) + + array.diff.freq.colors <- array(diff.freq.cols[8],c(12,7,4)) + array.diff.freq.colors[array.diff.freq < my.seq[[8]]] <- diff.freq.cols[7] + array.diff.freq.colors[array.diff.freq 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.diff.freq.colors[i2,1+6-j,1])}} + + par(mar=c(0,0,4,0), fig=c(0.22, 0.44, 0.8, 0.98), new=TRUE) + mtext("NAO-", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.22, 0.44, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecasted month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.diff.freq.colors[i2,1+6-j,3])}} + + par(mar=c(0,0,4,0), fig=c(0.44, 0.66, 0.8, 0.98), new=TRUE) + mtext("Blocking", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.44, 0.66, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecasted month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.diff.freq.colors[i2,1+6-j,4])}} + + par(mar=c(0,0,4,0), fig=c(0.68, 0.88, 0.8, 0.98), new=TRUE) + mtext("Atlantic Ridge", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.66, 0.88, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecasted month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.diff.freq.colors[i2,1+6-j,2])}} + + par(fig=c(0.91, 1, 0.1, 0.8), new=TRUE) + ColorBar2(brks = my.seq, cols = diff.freq.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + + + + + + + + # Persistence summary: + png(filename=paste0(work.dir,"/",forecast.name,"_summary_pers.png"), width=550, height=400) + + # create an array similar to array.pers but with colors instead of frequencies: + pers.cols <- rev(c('#b2182b','#d6604d','#f4a582','#fddbc7','#d1e5f0','#92c5de','#4393c3','#2166ac')) + my.seq <- c(NA, 3.25, 3.5, 3.75, 4, 4.25, 4.5, 4.75, NA) + + array.pers.colors <- array(pers.cols[8],c(12,7,4)) + array.pers.colors[array.pers < my.seq[8]] <- pers.cols[7] + array.pers.colors[array.pers < my.seq[7]] <- pers.cols[6] + array.pers.colors[array.pers < my.seq[6]] <- pers.cols[5] + array.pers.colors[array.pers < my.seq[5]] <- pers.cols[4] + array.pers.colors[array.pers < my.seq[4]] <- pers.cols[3] + array.pers.colors[array.pers < my.seq[3]] <- pers.cols[2] + array.pers.colors[array.pers < my.seq[2]] <- pers.cols[1] + + par(mar=c(5,4,4,4.5)) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Forecast time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + title("S4 Regime persistence (in days/month)", cex=1.5) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + + for(p in 1:12){ + for(l in 0:6){ + polygon(c(0.5+p-1,0.5+p-1,1+p-1),c(-0.5+l,0.5+l,0+l), col=array.pers.colors[p,1+l,1]) # first triangle + polygon(c(0.5+p-1,1+p-1,1.5+p-1),c(-0.5+l,0+l,-0.5+l), col=array.pers.colors[p,1+l,2]) + polygon(c(1+p-1,1.5+p-1,1.5+p-1),c(l,0.5+l,-0.5+l), col=array.pers.colors[p,1+l,3]) + polygon(c(0.5+p-1,1+p-1,1.5+p-1),c(0.5+l,0+l,0.5+l), col=array.pers.colors[p,1+l,4]) + } + } + + par(fig=c(0.875, 1, 0.14, 0.89), new=TRUE) + ColorBar2(brks = my.seq, cols = pers.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_pers_4tables.png"), width=750, height=600) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("S4 Regime persistence (in days/month)", cex=1.5) + + # NAO+ : + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.85, 0.98), new=TRUE) + mtext("NAO+", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.pers.colors[p,1+l,1])}} + + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.42, 0.5), new=TRUE) + mtext("Blocking", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.pers.colors[p,1+l,4])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.85, 0.98), new=TRUE) + mtext("NAO-", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.pers.colors[p,1+l,3])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.42, 0.5), new=TRUE) + mtext("Atlantic Ridge", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.pers.colors[p,1+l,2])}} + + par(fig=c(0.91, 1, 0.1, 0.9), new=TRUE) + ColorBar2(brks = my.seq, cols = pers.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_pers_1table.png"), width=800, height=300) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("S4 Regime persistence (in days/month)", cex=1.2) + + par(mar=c(0,0,4,0), fig=c(0.07, 0.22, 0.8, 0.98), new=TRUE) + mtext("NAO+", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0, 0.22, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecast month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.pers.colors[i2,1+6-j,1])}} + + par(mar=c(0,0,4,0), fig=c(0.22, 0.44, 0.8, 0.98), new=TRUE) + mtext("NAO-", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.22, 0.44, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecasted month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.pers.colors[i2,1+6-j,3])}} + + par(mar=c(0,0,4,0), fig=c(0.44, 0.66, 0.8, 0.98), new=TRUE) + mtext("Blocking", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.44, 0.66, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecasted month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.pers.colors[i2,1+6-j,4])}} + + par(mar=c(0,0,4,0), fig=c(0.68, 0.88, 0.8, 0.98), new=TRUE) + mtext("Atlantic Ridge", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.66, 0.88, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecasted month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.pers.colors[i2,1+6-j,2])}} + + par(fig=c(0.91, 1, 0.1, 0.8), new=TRUE) + ColorBar2(brks = my.seq, cols = pers.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + + + + + + + + # Delta persistence summary: + png(filename=paste0(work.dir,"/",forecast.name,"_summary_diff_pers.png"), width=550, height=400) + + # create an array similar to array.pers but with colors instead of frequencies: + diff.pers.cols <- rev(c('#b2182b','#d6604d','#f4a582','#fddbc7','#d1e5f0','#92c5de','#4393c3','#2166ac')) + my.seq <- c(NA, -1.5, -1, -0.5, 0, 0.5, 1, 1.5, NA) + + array.diff.pers.colors <- array(diff.pers.cols[8],c(12,7,4)) + array.diff.pers.colors[array.diff.pers < my.seq[8]] <- diff.pers.cols[7] + array.diff.pers.colors[array.diff.pers < my.seq[7]] <- diff.pers.cols[6] + array.diff.pers.colors[array.diff.pers < my.seq[6]] <- diff.pers.cols[5] + array.diff.pers.colors[array.diff.pers < my.seq[5]] <- diff.pers.cols[4] + array.diff.pers.colors[array.diff.pers < my.seq[4]] <- diff.pers.cols[3] + array.diff.pers.colors[array.diff.pers < my.seq[3]] <- diff.pers.cols[2] + array.diff.pers.colors[array.diff.pers < my.seq[2]] <- diff.pers.cols[1] + + par(mar=c(5,4,4,4.5)) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Forecast time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + title("Diff. between S4 and ERA-Interim regime persistence (in days/month)", cex=1.5) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + + for(p in 1:12){ + for(l in 0:6){ + polygon(c(0.5+p-1,0.5+p-1,1+p-1),c(-0.5+l,0.5+l,0+l), col=array.diff.pers.colors[p,1+l,1]) # first triangle + polygon(c(0.5+p-1,1+p-1,1.5+p-1),c(-0.5+l,0+l,-0.5+l), col=array.diff.pers.colors[p,1+l,2]) + polygon(c(1+p-1,1.5+p-1,1.5+p-1),c(l,0.5+l,-0.5+l), col=array.diff.pers.colors[p,1+l,3]) + polygon(c(0.5+p-1,1+p-1,1.5+p-1),c(0.5+l,0+l,0.5+l), col=array.diff.pers.colors[p,1+l,4]) + } + } + + par(fig=c(0.875, 1, 0.14, 0.89), new=TRUE) + ColorBar2(brks = my.seq, cols = diff.pers.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_diff_pers_4tables.png"), width=750, height=600) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("Diff. between S4 and ERA-Interim regime persistence (in days/month)", cex=1.5) + + # NAO+ : + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.85, 0.98), new=TRUE) + mtext("NAO+", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.diff.pers.colors[p,1+l,1])}} + + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.42, 0.5), new=TRUE) + mtext("Blocking", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.diff.pers.colors[p,1+l,4])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.85, 0.98), new=TRUE) + mtext("NAO-", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.diff.pers.colors[p,1+l,3])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.42, 0.5), new=TRUE) + mtext("Atlantic Ridge", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.diff.pers.colors[p,1+l,2])}} + + par(fig=c(0.91, 1, 0.1, 0.9), new=TRUE) + ColorBar2(brks = my.seq, cols = diff.pers.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_diff_pers_1table.png"), width=800, height=300) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("Diff. between S4 and ERA-Interim regime persistence (in days/month)", cex=1.2) + + par(mar=c(0,0,4,0), fig=c(0.07, 0.22, 0.8, 0.98), new=TRUE) + mtext("NAO+", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0, 0.22, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecast month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.diff.pers.colors[i2,1+6-j,1])}} + + par(mar=c(0,0,4,0), fig=c(0.22, 0.44, 0.8, 0.98), new=TRUE) + mtext("NAO-", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.22, 0.44, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecasted month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.diff.pers.colors[i2,1+6-j,3])}} + + par(mar=c(0,0,4,0), fig=c(0.44, 0.66, 0.8, 0.98), new=TRUE) + mtext("Blocking", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.44, 0.66, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecasted month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.diff.pers.colors[i2,1+6-j,4])}} + + par(mar=c(0,0,4,0), fig=c(0.68, 0.88, 0.8, 0.98), new=TRUE) + mtext("Atlantic Ridge", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.66, 0.88, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecasted month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.diff.pers.colors[i2,1+6-j,2])}} + + par(fig=c(0.91, 1, 0.1, 0.8), new=TRUE) + ColorBar2(brks = my.seq, cols = diff.pers.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + + + + + + + + + # St.Dev.freq ratio summary: + png(filename=paste0(work.dir,"/",forecast.name,"_summary_ratio_sd_freq.png"), width=550, height=400) + + # create an array similar to array.cor but with colors instead of corr.values: + diff.sd.freq.cols <- rev(c('#b2182b','#d6604d','#f4a582','#fddbc7','#d1e5f0','#92c5de','#4393c3','#2166ac')) + my.seq <- c(0,0.7,0.8,0.9,1.0,1.1,1.2,1.3,2) + + array.diff.sd.freq.colors <- array(diff.sd.freq.cols[8],c(12,7,4)) + array.diff.sd.freq.colors[array.diff.sd.freq < my.seq[8]] <- diff.sd.freq.cols[7] + array.diff.sd.freq.colors[array.diff.sd.freq < my.seq[7]] <- diff.sd.freq.cols[6] + array.diff.sd.freq.colors[array.diff.sd.freq < my.seq[6]] <- diff.sd.freq.cols[5] + array.diff.sd.freq.colors[array.diff.sd.freq < my.seq[5]] <- diff.sd.freq.cols[4] + array.diff.sd.freq.colors[array.diff.sd.freq < my.seq[4]] <- diff.sd.freq.cols[3] + array.diff.sd.freq.colors[array.diff.sd.freq < my.seq[3]] <- diff.sd.freq.cols[2] + array.diff.sd.freq.colors[array.diff.sd.freq < my.seq[2]] <- diff.sd.freq.cols[1] + + par(mar=c(5,4,4,4.5)) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Forecast time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + title("St.Dev. ratio between sim. and obs. average frequencies", cex=1.5) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + + for(p in 1:12){ + for(l in 0:6){ + polygon(c(0.5+p-1, 0.5+p-1, 1+p-1), c(-0.5+l, 0.5+l, 0+l), col=array.diff.sd.freq.colors[p,1+l,1]) # first triangle + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(-0.5+l, 0+l, -0.5+l), col=array.diff.sd.freq.colors[p,1+l,2]) + polygon(c(1+p-1, 1.5+p-1, 1.5+p-1), c(l, 0.5+l, -0.5+l), col=array.diff.sd.freq.colors[p,1+l,3]) + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(0.5+l, 0+l, 0.5+l), col=array.diff.sd.freq.colors[p,1+l,4]) + } + } + + par(fig=c(0.875, 1, 0.14, 0.89), new=TRUE) + ColorBar2(brks = my.seq, cols = diff.sd.freq.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + dev.off() + + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_ratio_sd_freq_4tables.png"), width=750, height=600) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("St.Dev. ratio between sim. and obs. average frequencies", cex=1.5) + + # NAO+ : + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.85, 0.98), new=TRUE) + mtext("NAO+", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.diff.sd.freq.colors[p,1+l,1])}} + + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.42, 0.5), new=TRUE) + mtext("Blocking", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.diff.sd.freq.colors[p,1+l,4])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.85, 0.98), new=TRUE) + mtext("NAO-", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.diff.sd.freq.colors[p,1+l,3])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.42, 0.5), new=TRUE) + mtext("Atlantic Ridge", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.diff.sd.freq.colors[p,1+l,2])}} + + par(fig=c(0.91, 1, 0.1, 0.9), new=TRUE) + ColorBar2(brks = my.seq, cols = diff.sd.freq.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_ratio_sd_freq_1table.png"), width=800, height=300) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("St.Dev. ratio between sim. and obs. average frequencies", cex=1.2) + + par(mar=c(0,0,4,0), fig=c(0.07, 0.22, 0.8, 0.98), new=TRUE) + mtext("NAO+", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0, 0.22, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecast month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.diff.sd.freq.colors[i2,1+6-j,1])}} + + par(mar=c(0,0,4,0), fig=c(0.22, 0.44, 0.8, 0.98), new=TRUE) + mtext("NAO-", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.22, 0.44, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecasted month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.diff.sd.freq.colors[i2,1+6-j,3])}} + + par(mar=c(0,0,4,0), fig=c(0.44, 0.66, 0.8, 0.98), new=TRUE) + mtext("Blocking", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.44, 0.66, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecasted month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.diff.sd.freq.colors[i2,1+6-j,4])}} + + par(mar=c(0,0,4,0), fig=c(0.68, 0.88, 0.8, 0.98), new=TRUE) + mtext("Atlantic Ridge", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.66, 0.88, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecasted month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.diff.sd.freq.colors[i2,1+6-j,2])}} + + par(fig=c(0.91, 1, 0.1, 0.8), new=TRUE) + ColorBar2(brks = my.seq, cols = diff.sd.freq.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + + + + + + + + + + + + + # Impact summary: + png(filename=paste0(work.dir,"/",forecast.name,"_summary_impact_",var.name[var.num],".png"), width=550, height=400) + + # create an array similar to array.pers but with colors instead of frequencies: + imp.cols <- rev(c('#b2182b','#d6604d','#f4a582','#fddbc7','#d1e5f0','#92c5de','#4393c3','#2166ac')) + my.seq <- c(NA, 3.25, 3.5, 3.75, 4, 4.25, 4.5, 4.75, NA) + + array.imp.colors <- array(imp.cols[8],c(12,7,4)) + array.imp.colors[array.imp < my.seq[8]] <- imp.cols[7] + array.imp.colors[array.imp < my.seq[7]] <- imp.cols[6] + array.imp.colors[array.imp < my.seq[6]] <- imp.cols[5] + array.imp.colors[array.imp < my.seq[5]] <- imp.cols[4] + array.imp.colors[array.imp < my.seq[4]] <- imp.cols[3] + array.imp.colors[array.imp < my.seq[3]] <- imp.cols[2] + array.imp.colors[array.imp < my.seq[2]] <- imp.cols[1] + + par(mar=c(5,4,4,4.5)) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Forecast time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + title("S4 spatial correlation of impact on ",var.name[var.num], cex=1.5) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + + for(p in 1:12){ + for(l in 0:6){ + polygon(c(0.5+p-1,0.5+p-1,1+p-1),c(-0.5+l,0.5+l,0+l), col=array.imp.colors[p,1+l,1]) # first triangle + polygon(c(0.5+p-1,1+p-1,1.5+p-1),c(-0.5+l,0+l,-0.5+l), col=array.imp.colors[p,1+l,2]) + polygon(c(1+p-1,1.5+p-1,1.5+p-1),c(l,0.5+l,-0.5+l), col=array.imp.colors[p,1+l,3]) + polygon(c(0.5+p-1,1+p-1,1.5+p-1),c(0.5+l,0+l,0.5+l), col=array.imp.colors[p,1+l,4]) + } + } + + par(fig=c(0.875, 1, 0.14, 0.89), new=TRUE) + ColorBar2(brks = my.seq, cols = imp.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_impact_",var.name[var.num],"_4tables.png"), width=750, height=600) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("S4 spatial correlation of impact on ",var.name[var.num], cex=1.5) + + # NAO+ : + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.85, 0.98), new=TRUE) + mtext("NAO+", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.imp.colors[p,1+l,1])}} + + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.42, 0.5), new=TRUE) + mtext("Blocking", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.imp.colors[p,1+l,4])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.85, 0.98), new=TRUE) + mtext("NAO-", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.imp.colors[p,1+l,3])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.42, 0.5), new=TRUE) + mtext("Atlantic Ridge", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.imp.colors[p,1+l,2])}} + + par(fig=c(0.91, 1, 0.1, 0.9), new=TRUE) + ColorBar2(brks = my.seq, cols = imp.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_impact_",var.name[var.num],"_1table.png"), width=800, height=300) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("S4 spatial correlation of impact on ",var.name[var.num], cex=1.2) + + par(mar=c(0,0,4,0), fig=c(0.07, 0.22, 0.8, 0.98), new=TRUE) + mtext("NAO+", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0, 0.22, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecast month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.imp.colors[i2,1+6-j,1])}} + + par(mar=c(0,0,4,0), fig=c(0.22, 0.44, 0.8, 0.98), new=TRUE) + mtext("NAO-", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.22, 0.44, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecasted month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.imp.colors[i2,1+6-j,3])}} + + par(mar=c(0,0,4,0), fig=c(0.44, 0.66, 0.8, 0.98), new=TRUE) + mtext("Blocking", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.44, 0.66, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecasted month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.imp.colors[i2,1+6-j,4])}} + + par(mar=c(0,0,4,0), fig=c(0.68, 0.88, 0.8, 0.98), new=TRUE) + mtext("Atlantic Ridge", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.66, 0.88, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecasted month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.imp.colors[i2,1+6-j,2])}} + + par(fig=c(0.91, 1, 0.1, 0.8), new=TRUE) + ColorBar2(brks = my.seq, cols = pers.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + + + + + + + + + + + # Transition probability from NAO+ to X summary: + png(filename=paste0(work.dir,"/",forecast.name,"_summary_trans_NAO+.png"), width=550, height=400) + + # create an array similar to array.cor but with colors instead of corr.values: + trans.cols <- rev(c('#b2182b','#d6604d','#f4a582','#fddbc7','#d1e5f0','#92c5de','#4393c3','#2166ac')) + my.seq <- c(0,10,20,30,40,50,60,70,100) + + array.trans.sim1.colors <- array(trans.cols[8],c(12,7,4)) + array.trans.sim1.colors[array.trans.sim1 < my.seq[8]] <- trans.cols[7] + array.trans.sim1.colors[array.trans.sim1 < my.seq[7]] <- trans.cols[6] + array.trans.sim1.colors[array.trans.sim1 < my.seq[6]] <- trans.cols[5] + array.trans.sim1.colors[array.trans.sim1 < my.seq[5]] <- trans.cols[4] + array.trans.sim1.colors[array.trans.sim1 < my.seq[4]] <- trans.cols[3] + array.trans.sim1.colors[array.trans.sim1 < my.seq[3]] <- trans.cols[2] + array.trans.sim1.colors[array.trans.sim1 < my.seq[2]] <- trans.cols[1] + array.trans.sim1.colors[is.na(array.trans.sim1)] <- "white" + + par(mar=c(5,4,4,4.5)) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Forecast time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + title("% Transition from NAO+ regime", cex=1.5) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + + for(p in 1:12){ + for(l in 0:6){ + polygon(c(0.5+p-1, 0.5+p-1, 1+p-1), c(-0.5+l, 0.5+l, 0+l), col=array.trans.sim1.colors[p,1+l,1]) # first triangle + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(-0.5+l, 0+l, -0.5+l), col=array.trans.sim1.colors[p,1+l,2]) + polygon(c(1+p-1, 1.5+p-1, 1.5+p-1), c(l, 0.5+l, -0.5+l), col=array.trans.sim1.colors[p,1+l,3]) + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(0.5+l, 0+l, 0.5+l), col=array.trans.sim1.colors[p,1+l,4]) + } + } + + par(fig=c(0.875, 1, 0.14, 0.89), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_trans_NAO+_4tables.png"), width=750, height=600) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("% Transition from NAO+ regime", cex=1.5) + + # NAO+ : + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.85, 0.98), new=TRUE) + mtext("NAO+", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.sim1.colors[p,1+l,1])}} + + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.42, 0.5), new=TRUE) + mtext("Blocking", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.sim1.colors[p,1+l,4])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.85, 0.98), new=TRUE) + mtext("NAO-", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.sim1.colors[p,1+l,3])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.42, 0.5), new=TRUE) + mtext("Atlantic Ridge", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.sim1.colors[p,1+l,2])}} + + par(fig=c(0.91, 1, 0.1, 0.9), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_trans_NAO+_1table.png"), width=800, height=300) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("% Transition from NAO+ regime", cex=1.2) + + par(mar=c(0,0,4,0), fig=c(0.07, 0.22, 0.8, 0.98), new=TRUE) + mtext("NAO+", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0, 0.22, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecast month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.sim1.colors[i2,1+6-j,1])}} + + par(mar=c(0,0,4,0), fig=c(0.22, 0.44, 0.8, 0.98), new=TRUE) + mtext("NAO-", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.22, 0.44, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecasted month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.sim1.colors[i2,1+6-j,3])}} + + par(mar=c(0,0,4,0), fig=c(0.44, 0.66, 0.8, 0.98), new=TRUE) + mtext("Blocking", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.44, 0.66, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecasted month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.sim1.colors[i2,1+6-j,4])}} + + par(mar=c(0,0,4,0), fig=c(0.68, 0.88, 0.8, 0.98), new=TRUE) + mtext("Atlantic Ridge", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.66, 0.88, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecasted month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.sim1.colors[i2,1+6-j,2])}} + + par(fig=c(0.91, 1, 0.1, 0.8), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_trans_NAO+_1table.png"), width=600, height=300) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("% Transition from NAO+ regime", cex=1.2) + + par(mar=c(0,0,4,0), fig=c(0.10, 0.28, 0.8, 0.98), new=TRUE) + mtext("NAO-", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0, 0.28, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecasted month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.sim1.colors[i2,1+6-j,3])}} + + par(mar=c(0,0,4,0), fig=c(0.28, 0.56, 0.8, 0.98), new=TRUE) + mtext("Blocking", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.28, 0.56, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecasted month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.sim1.colors[i2,1+6-j,4])}} + + par(mar=c(0,0,4,0), fig=c(0.56, 0.84, 0.8, 0.98), new=TRUE) + mtext("Atlantic Ridge", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.56, 0.84, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecasted month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.sim1.colors[i2,1+6-j,2])}} + + par(fig=c(0.88, 1, 0.1, 0.8), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + + + + + + + + # Transition probability from NAO- to X summary: + png(filename=paste0(work.dir,"/",forecast.name,"_summary_trans_NAO-.png"), width=550, height=400) + + # create an array similar to array.cor but with colors instead of corr.values: + trans.cols <- rev(c('#b2182b','#d6604d','#f4a582','#fddbc7','#d1e5f0','#92c5de','#4393c3','#2166ac')) + my.seq <- c(0,10,20,30,40,50,60,70,100) + + array.trans.sim2.colors <- array(trans.cols[8],c(12,7,4)) + array.trans.sim2.colors[array.trans.sim2 < my.seq[8]] <- trans.cols[7] + array.trans.sim2.colors[array.trans.sim2 < my.seq[7]] <- trans.cols[6] + array.trans.sim2.colors[array.trans.sim2 < my.seq[6]] <- trans.cols[5] + array.trans.sim2.colors[array.trans.sim2 < my.seq[5]] <- trans.cols[4] + array.trans.sim2.colors[array.trans.sim2 < my.seq[4]] <- trans.cols[3] + array.trans.sim2.colors[array.trans.sim2 < my.seq[3]] <- trans.cols[2] + array.trans.sim2.colors[array.trans.sim2 < my.seq[2]] <- trans.cols[1] + array.trans.sim2.colors[is.na(array.trans.sim2)] <- "white" + + par(mar=c(5,4,4,4.5)) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Forecast time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + title("% Transition from NAO- regime ", cex=1.5) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + + for(p in 1:12){ + for(l in 0:6){ + polygon(c(0.5+p-1, 0.5+p-1, 1+p-1), c(-0.5+l, 0.5+l, 0+l), col=array.trans.sim2.colors[p,1+l,1]) # first triangle + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(-0.5+l, 0+l, -0.5+l), col=array.trans.sim2.colors[p,1+l,2]) + polygon(c(1+p-1, 1.5+p-1, 1.5+p-1), c(l, 0.5+l, -0.5+l), col=array.trans.sim2.colors[p,1+l,3]) + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(0.5+l, 0+l, 0.5+l), col=array.trans.sim2.colors[p,1+l,4]) + } + } + + par(fig=c(0.875, 1, 0.14, 0.89), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_trans_NAO-_4tables.png"), width=750, height=600) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("% Transition from NAO- regime", cex=1.5) + + # NAO+ : + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.85, 0.98), new=TRUE) + mtext("NAO+", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.sim2.colors[p,1+l,1])}} + + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.42, 0.5), new=TRUE) + mtext("Blocking", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.sim2.colors[p,1+l,4])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.85, 0.98), new=TRUE) + mtext("NAO-", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.sim2.colors[p,1+l,3])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.42, 0.5), new=TRUE) + mtext("Atlantic Ridge", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.sim2.colors[p,1+l,2])}} + + par(fig=c(0.91, 1, 0.1, 0.9), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_trans_NAO-_1table.png"), width=600, height=300) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("% Transition from NAO- regime", cex=1.2) + + par(mar=c(0,0,4,0), fig=c(0.1, 0.28, 0.8, 0.98), new=TRUE) + mtext("NAO+", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0, 0.28, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecast month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.sim2.colors[i2,1+6-j,1])}} + + par(mar=c(0,0,4,0), fig=c(0.28, 0.56, 0.8, 0.98), new=TRUE) + mtext("Blocking", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.28, 0.56, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecasted month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.sim2.colors[i2,1+6-j,4])}} + + par(mar=c(0,0,4,0), fig=c(0.56, 0.84, 0.8, 0.98), new=TRUE) + mtext("Atlantic Ridge", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.56, 0.84, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecasted month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.sim2.colors[i2,1+6-j,2])}} + + par(fig=c(0.88, 1, 0.1, 0.8), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + + + + + + + + # Transition probability from blocking to X summary: + png(filename=paste0(work.dir,"/",forecast.name,"_summary_trans_blocking.png"), width=550, height=400) + + # create an array similar to array.cor but with colors instead of corr.values: + trans.cols <- rev(c('#b2182b','#d6604d','#f4a582','#fddbc7','#d1e5f0','#92c5de','#4393c3','#2166ac')) + my.seq <- c(0,10,20,30,40,50,60,70,100) + + array.trans.sim3.colors <- array(trans.cols[8],c(12,7,4)) + array.trans.sim3.colors[array.trans.sim3 < my.seq[8]] <- trans.cols[7] + array.trans.sim3.colors[array.trans.sim3 < my.seq[7]] <- trans.cols[6] + array.trans.sim3.colors[array.trans.sim3 < my.seq[6]] <- trans.cols[5] + array.trans.sim3.colors[array.trans.sim3 < my.seq[5]] <- trans.cols[4] + array.trans.sim3.colors[array.trans.sim3 < my.seq[4]] <- trans.cols[3] + array.trans.sim3.colors[array.trans.sim3 < my.seq[3]] <- trans.cols[2] + array.trans.sim3.colors[array.trans.sim3 < my.seq[2]] <- trans.cols[1] + array.trans.sim3.colors[is.na(array.trans.sim3)] <- "white" + + par(mar=c(5,4,4,4.5)) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Forecast time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + title("% Transition from blocking regime", cex=1.5) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + + for(p in 1:12){ + for(l in 0:6){ + polygon(c(0.5+p-1, 0.5+p-1, 1+p-1), c(-0.5+l, 0.5+l, 0+l), col=array.trans.sim3.colors[p,1+l,1]) # first triangle + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(-0.5+l, 0+l, -0.5+l), col=array.trans.sim3.colors[p,1+l,2]) + polygon(c(1+p-1, 1.5+p-1, 1.5+p-1), c(l, 0.5+l, -0.5+l), col=array.trans.sim3.colors[p,1+l,3]) + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(0.5+l, 0+l, 0.5+l), col=array.trans.sim3.colors[p,1+l,4]) + } + } + + par(fig=c(0.875, 1, 0.14, 0.89), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_trans_blocking_4tables.png"), width=750, height=600) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("% Transition from blocking regime", cex=1.5) + + # NAO+ : + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.85, 0.98), new=TRUE) + mtext("NAO+", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.sim3.colors[p,1+l,1])}} + + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.42, 0.5), new=TRUE) + mtext("Blocking", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.sim3.colors[p,1+l,4])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.85, 0.98), new=TRUE) + mtext("NAO-", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.sim3.colors[p,1+l,3])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.42, 0.5), new=TRUE) + mtext("Atlantic Ridge", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.sim3.colors[p,1+l,2])}} + + par(fig=c(0.91, 1, 0.1, 0.9), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_trans_blocking_1table.png"), width=600, height=300) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("% Transition from blocking regime", cex=1.2) + + par(mar=c(0,0,4,0), fig=c(0.10, 0.28, 0.8, 0.98), new=TRUE) + mtext("NAO+", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0, 0.28, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecast month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.sim3.colors[i2,1+6-j,1])}} + + par(mar=c(0,0,4,0), fig=c(0.28, 0.56, 0.8, 0.98), new=TRUE) + mtext("NAO-", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.28, 0.56, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecasted month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.sim3.colors[i2,1+6-j,3])}} + + par(mar=c(0,0,4,0), fig=c(0.56, 0.84, 0.8, 0.98), new=TRUE) + mtext("Atlantic Ridge", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.56, 0.84, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecasted month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.sim3.colors[i2,1+6-j,2])}} + + par(fig=c(0.88, 1, 0.1, 0.8), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + + + + + + + + + + + # Transition probability from Atlantic ridge to X summary: + png(filename=paste0(work.dir,"/",forecast.name,"_summary_trans_Atlantic_ridge.png"), width=550, height=400) + + # create an array similar to array.cor but with colors instead of corr.values: + trans.cols <- rev(c('#b2182b','#d6604d','#f4a582','#fddbc7','#d1e5f0','#92c5de','#4393c3','#2166ac')) + my.seq <- c(0,10,20,30,40,50,60,70,100) + + array.trans.sim4.colors <- array(trans.cols[8],c(12,7,4)) + array.trans.sim4.colors[array.trans.sim4 < my.seq[8]] <- trans.cols[7] + array.trans.sim4.colors[array.trans.sim4 < my.seq[7]] <- trans.cols[6] + array.trans.sim4.colors[array.trans.sim4 < my.seq[6]] <- trans.cols[5] + array.trans.sim4.colors[array.trans.sim4 < my.seq[5]] <- trans.cols[4] + array.trans.sim4.colors[array.trans.sim4 < my.seq[4]] <- trans.cols[3] + array.trans.sim4.colors[array.trans.sim4 < my.seq[3]] <- trans.cols[2] + array.trans.sim4.colors[array.trans.sim4 < my.seq[2]] <- trans.cols[1] + array.trans.sim4.colors[is.na(array.trans.sim4)] <- "white" + + par(mar=c(5,4,4,4.5)) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Forecast time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + title("% Transition from Atlantic ridge regime ", cex=1.5) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + + for(p in 1:12){ + for(l in 0:6){ + polygon(c(0.5+p-1, 0.5+p-1, 1+p-1), c(-0.5+l, 0.5+l, 0+l), col=array.trans.sim4.colors[p,1+l,1]) # first triangle + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(-0.5+l, 0+l, -0.5+l), col=array.trans.sim4.colors[p,1+l,2]) + polygon(c(1+p-1, 1.5+p-1, 1.5+p-1), c(l, 0.5+l, -0.5+l), col=array.trans.sim4.colors[p,1+l,3]) + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(0.5+l, 0+l, 0.5+l), col=array.trans.sim4.colors[p,1+l,4]) + } + } + + par(fig=c(0.875, 1, 0.14, 0.89), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_trans_Atlantic_ridge_4tables.png"), width=750, height=600) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("% Transition from Atlantic Ridge regime", cex=1.5) + + # NAO+ : + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.85, 0.98), new=TRUE) + mtext("NAO+", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.sim4.colors[p,1+l,1])}} + + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.42, 0.5), new=TRUE) + mtext("Blocking", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.sim4.colors[p,1+l,4])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.85, 0.98), new=TRUE) + mtext("NAO-", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.sim4.colors[p,1+l,3])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.42, 0.5), new=TRUE) + mtext("Atlantic Ridge", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.sim4.colors[p,1+l,2])}} + + par(fig=c(0.91, 1, 0.1, 0.9), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_trans_Atlantic_ridge_1table.png"), width=600, height=300) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("% Transition from Atlantic Ridge regime", cex=1.2) + + par(mar=c(0,0,4,0), fig=c(0.1, 0.28, 0.8, 0.98), new=TRUE) + mtext("NAO+", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0, 0.28, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecast month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.sim4.colors[i2,1+6-j,1])}} + + par(mar=c(0,0,4,0), fig=c(0.28, 0.56, 0.8, 0.98), new=TRUE) + mtext("NAO-", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.28, 0.56, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecasted month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.sim4.colors[i2,1+6-j,3])}} + + par(mar=c(0,0,4,0), fig=c(0.56, 0.84, 0.8, 0.98), new=TRUE) + mtext("Blocking", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.56, 0.84, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecasted month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.sim4.colors[i2,1+6-j,4])}} + + par(fig=c(0.88, 1, 0.1, 0.8), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + + + + + + + + # Bias of the Transition probability from NAO+ to X summary: + png(filename=paste0(work.dir,"/",forecast.name,"_summary_bias_trans_NAO+.png"), width=550, height=400) + + # create an array similar to array.cor but with colors instead of corr.values: + trans.cols <- rev(c('#b2182b','#d6604d','#f4a582','#fddbc7','#d1e5f0','#92c5de','#4393c3','#2166ac')) + my.seq <- c(-20,-10,-5,-2,0,2,5,10,20) + + array.trans.diff1.colors <- array(trans.cols[8],c(12,7,4)) + array.trans.diff1.colors[array.trans.diff1 < my.seq[8]] <- trans.cols[7] + array.trans.diff1.colors[array.trans.diff1 < my.seq[7]] <- trans.cols[6] + array.trans.diff1.colors[array.trans.diff1 < my.seq[6]] <- trans.cols[5] + array.trans.diff1.colors[array.trans.diff1 < my.seq[5]] <- trans.cols[4] + array.trans.diff1.colors[array.trans.diff1 < my.seq[4]] <- trans.cols[3] + array.trans.diff1.colors[array.trans.diff1 < my.seq[3]] <- trans.cols[2] + array.trans.diff1.colors[array.trans.diff1 < my.seq[2]] <- trans.cols[1] + array.trans.diff1.colors[is.na(array.trans.diff1)] <- "white" + + par(mar=c(5,4,4,4.5)) + plot(1,1, type="n", xaxt="n", yaxt="n", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + title("% Bias transition from NAO+ regime", cex=1.5) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + + for(p in 1:12){ + for(l in 0:6){ + polygon(c(0.5+p-1, 0.5+p-1, 1+p-1), c(-0.5+l, 0.5+l, 0+l), col=array.trans.diff1.colors[p,1+l,1]) # first triangle + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(-0.5+l, 0+l, -0.5+l), col=array.trans.diff1.colors[p,1+l,2]) + polygon(c(1+p-1, 1.5+p-1, 1.5+p-1), c(l, 0.5+l, -0.5+l), col=array.trans.diff1.colors[p,1+l,3]) + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(0.5+l, 0+l, 0.5+l), col=array.trans.diff1.colors[p,1+l,4]) + } + } + + par(fig=c(0.875, 1, 0.14, 0.89), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_bias_trans_NAO+_4tables.png"), width=750, height=600) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("% Bias transition from NAO+ regime", cex=1.5) + + # NAO+ : + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.85, 0.98), new=TRUE) + mtext("NAO+", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.diff1.colors[p,1+l,1])}} + + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.42, 0.5), new=TRUE) + mtext("Blocking", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.diff1.colors[p,1+l,4])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.85, 0.98), new=TRUE) + mtext("NAO-", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.diff1.colors[p,1+l,3])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.42, 0.5), new=TRUE) + mtext("Atlantic Ridge", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.diff1.colors[p,1+l,2])}} + + par(fig=c(0.91, 1, 0.1, 0.9), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_bias_trans_NAO+_1table.png"), width=800, height=300) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("% Bias transition from NAO+ regime", cex=1.2) + + par(mar=c(0,0,4,0), fig=c(0.07, 0.22, 0.8, 0.98), new=TRUE) + mtext("NAO+", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0, 0.22, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecast month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.diff1.colors[i2,1+6-j,1])}} + + par(mar=c(0,0,4,0), fig=c(0.22, 0.44, 0.8, 0.98), new=TRUE) + mtext("NAO-", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.22, 0.44, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecasted month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.diff1.colors[i2,1+6-j,3])}} + + par(mar=c(0,0,4,0), fig=c(0.44, 0.66, 0.8, 0.98), new=TRUE) + mtext("Blocking", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.44, 0.66, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecasted month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.diff1.colors[i2,1+6-j,4])}} + + par(mar=c(0,0,4,0), fig=c(0.68, 0.88, 0.8, 0.98), new=TRUE) + mtext("Atlantic Ridge", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.66, 0.88, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecasted month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.diff1.colors[i2,1+6-j,2])}} + + par(fig=c(0.91, 1, 0.1, 0.8), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + + + + + + + + + + + + + + + # Bias of the Transition probability from NAO- to X summary: + png(filename=paste0(work.dir,"/",forecast.name,"_summary_bias_trans_NAO-.png"), width=550, height=400) + + # create an array similar to array.cor but with colors instead of corr.values: + trans.cols <- rev(c('#b2182b','#d6604d','#f4a582','#fddbc7','#d1e5f0','#92c5de','#4393c3','#2166ac')) + my.seq <- c(-20,-10,-5,-2,0,2,5,10,20) + + array.trans.diff2.colors <- array(trans.cols[8],c(12,7,4)) + array.trans.diff2.colors[array.trans.diff2 < my.seq[8]] <- trans.cols[7] + array.trans.diff2.colors[array.trans.diff2 < my.seq[7]] <- trans.cols[6] + array.trans.diff2.colors[array.trans.diff2 < my.seq[6]] <- trans.cols[5] + array.trans.diff2.colors[array.trans.diff2 < my.seq[5]] <- trans.cols[4] + array.trans.diff2.colors[array.trans.diff2 < my.seq[4]] <- trans.cols[3] + array.trans.diff2.colors[array.trans.diff2 < my.seq[3]] <- trans.cols[2] + array.trans.diff2.colors[array.trans.diff2 < my.seq[2]] <- trans.cols[1] + array.trans.diff2.colors[is.na(array.trans.diff2)] <- "white" + + par(mar=c(5,4,4,4.5)) + plot(1,1, type="n", xaxt="n", yaxt="n", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + title("% Bias transition from NAO- regime", cex=1.5) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + + for(p in 1:12){ + for(l in 0:6){ + polygon(c(0.5+p-1, 0.5+p-1, 1+p-1), c(-0.5+l, 0.5+l, 0+l), col=array.trans.diff2.colors[p,1+l,1]) # first triangle + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(-0.5+l, 0+l, -0.5+l), col=array.trans.diff2.colors[p,1+l,2]) + polygon(c(1+p-1, 1.5+p-1, 1.5+p-1), c(l, 0.5+l, -0.5+l), col=array.trans.diff2.colors[p,1+l,3]) + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(0.5+l, 0+l, 0.5+l), col=array.trans.diff2.colors[p,1+l,4]) + } + } + + par(fig=c(0.875, 1, 0.14, 0.89), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_bias_trans_NAO-_4tables.png"), width=750, height=600) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("% Bias transition from NAO- regime", cex=1.5) + + # NAO+ : + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.85, 0.98), new=TRUE) + mtext("NAO+", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.diff2.colors[p,1+l,1])}} + + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.42, 0.5), new=TRUE) + mtext("Blocking", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.diff2.colors[p,1+l,4])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.85, 0.98), new=TRUE) + mtext("NAO-", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.diff2.colors[p,1+l,3])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.42, 0.5), new=TRUE) + mtext("Atlantic Ridge", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.diff2.colors[p,1+l,2])}} + + par(fig=c(0.91, 1, 0.1, 0.9), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_bias_trans_NAO-_1table.png"), width=800, height=300) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("% Bias transition from NAO- regime", cex=1.2) + + par(mar=c(0,0,4,0), fig=c(0.07, 0.22, 0.8, 0.98), new=TRUE) + mtext("NAO+", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0, 0.22, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecast month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.diff2.colors[i2,1+6-j,1])}} + + par(mar=c(0,0,4,0), fig=c(0.22, 0.44, 0.8, 0.98), new=TRUE) + mtext("NAO-", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.22, 0.44, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecasted month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.diff2.colors[i2,1+6-j,3])}} + + par(mar=c(0,0,4,0), fig=c(0.44, 0.66, 0.8, 0.98), new=TRUE) + mtext("Blocking", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.44, 0.66, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecasted month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.diff2.colors[i2,1+6-j,4])}} + + par(mar=c(0,0,4,0), fig=c(0.68, 0.88, 0.8, 0.98), new=TRUE) + mtext("Atlantic Ridge", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.66, 0.88, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecasted month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.diff2.colors[i2,1+6-j,2])}} + + par(fig=c(0.91, 1, 0.1, 0.8), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + + + + + + + + + + + # Bias of the Transition probability from blocking to X summary: + png(filename=paste0(work.dir,"/",forecast.name,"_summary_bias_trans_blocking.png"), width=550, height=400) + + # create an array similar to array.cor but with colors instead of corr.values: + trans.cols <- rev(c('#b2182b','#d6604d','#f4a582','#fddbc7','#d1e5f0','#92c5de','#4393c3','#2166ac')) + my.seq <- c(-20,-10,-5,-2,0,2,5,10,20) + + array.trans.diff3.colors <- array(trans.cols[8],c(12,7,4)) + array.trans.diff3.colors[array.trans.diff3 < my.seq[8]] <- trans.cols[7] + array.trans.diff3.colors[array.trans.diff3 < my.seq[7]] <- trans.cols[6] + array.trans.diff3.colors[array.trans.diff3 < my.seq[6]] <- trans.cols[5] + array.trans.diff3.colors[array.trans.diff3 < my.seq[5]] <- trans.cols[4] + array.trans.diff3.colors[array.trans.diff3 < my.seq[4]] <- trans.cols[3] + array.trans.diff3.colors[array.trans.diff3 < my.seq[3]] <- trans.cols[2] + array.trans.diff3.colors[array.trans.diff3 < my.seq[2]] <- trans.cols[1] + array.trans.diff3.colors[is.na(array.trans.diff3)] <- "white" + + par(mar=c(5,4,4,4.5)) + plot(1,1, type="n", xaxt="n", yaxt="n", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + title("% Bias transition from blocking regime", cex=1.5) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + + for(p in 1:12){ + for(l in 0:6){ + polygon(c(0.5+p-1, 0.5+p-1, 1+p-1), c(-0.5+l, 0.5+l, 0+l), col=array.trans.diff3.colors[p,1+l,1]) # first triangle + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(-0.5+l, 0+l, -0.5+l), col=array.trans.diff3.colors[p,1+l,2]) + polygon(c(1+p-1, 1.5+p-1, 1.5+p-1), c(l, 0.5+l, -0.5+l), col=array.trans.diff3.colors[p,1+l,3]) + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(0.5+l, 0+l, 0.5+l), col=array.trans.diff3.colors[p,1+l,4]) + } + } + + par(fig=c(0.875, 1, 0.14, 0.89), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_bias_trans_blocking_4tables.png"), width=750, height=600) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("% Bias transition from blocking regime", cex=1.5) + + # NAO+ : + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.85, 0.98), new=TRUE) + mtext("NAO+", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.diff3.colors[p,1+l,1])}} + + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.42, 0.5), new=TRUE) + mtext("Blocking", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.diff3.colors[p,1+l,4])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.85, 0.98), new=TRUE) + mtext("NAO-", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.diff3.colors[p,1+l,3])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.42, 0.5), new=TRUE) + mtext("Atlantic Ridge", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.diff3.colors[p,1+l,2])}} + + par(fig=c(0.91, 1, 0.1, 0.9), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_bias_trans_blocking_1table.png"), width=800, height=300) + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("% Bias transition from Blocking regime", cex=1.2) + + par(mar=c(0,0,4,0), fig=c(0.07, 0.22, 0.8, 0.98), new=TRUE) + mtext("NAO+", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0, 0.22, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecast month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.diff3.colors[i2,1+6-j,1])}} + + par(mar=c(0,0,4,0), fig=c(0.22, 0.44, 0.8, 0.98), new=TRUE) + mtext("NAO-", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.22, 0.44, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecasted month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.diff3.colors[i2,1+6-j,3])}} + + par(mar=c(0,0,4,0), fig=c(0.44, 0.66, 0.8, 0.98), new=TRUE) + mtext("Blocking", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.44, 0.66, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecasted month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.diff3.colors[i2,1+6-j,4])}} + + par(mar=c(0,0,4,0), fig=c(0.68, 0.88, 0.8, 0.98), new=TRUE) + mtext("Atlantic Ridge", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.66, 0.88, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecasted month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.diff3.colors[i2,1+6-j,2])}} + + par(fig=c(0.91, 1, 0.1, 0.8), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + + + + + + + + + + # Bias of the Transition probability from Atlantic Ridge to X summary: + png(filename=paste0(work.dir,"/",forecast.name,"_summary_bias_trans_Atlantic_ridge.png"), width=550, height=400) + + # create an array similar to array.cor but with colors instead of corr.values: + trans.cols <- rev(c('#b2182b','#d6604d','#f4a582','#fddbc7','#d1e5f0','#92c5de','#4393c3','#2166ac')) + my.seq <- c(-20,-10,-5,-2,0,2,5,10,20) + + array.trans.diff4.colors <- array(trans.cols[8],c(12,7,4)) + array.trans.diff4.colors[array.trans.diff4 < my.seq[8]] <- trans.cols[7] + array.trans.diff4.colors[array.trans.diff4 < my.seq[7]] <- trans.cols[6] + array.trans.diff4.colors[array.trans.diff4 < my.seq[6]] <- trans.cols[5] + array.trans.diff4.colors[array.trans.diff4 < my.seq[5]] <- trans.cols[4] + array.trans.diff4.colors[array.trans.diff4 < my.seq[4]] <- trans.cols[3] + array.trans.diff4.colors[array.trans.diff4 < my.seq[3]] <- trans.cols[2] + array.trans.diff4.colors[array.trans.diff4 < my.seq[2]] <- trans.cols[1] + array.trans.diff4.colors[is.na(array.trans.diff4)] <- "white" + + par(mar=c(5,4,4,4.5)) + plot(1,1, type="n", xaxt="n", yaxt="n", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + title("% Bias transition from Atlantic Ridge regime", cex=1.5) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + + for(p in 1:12){ + for(l in 0:6){ + polygon(c(0.5+p-1, 0.5+p-1, 1+p-1), c(-0.5+l, 0.5+l, 0+l), col=array.trans.diff4.colors[p,1+l,1]) # first triangle + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(-0.5+l, 0+l, -0.5+l), col=array.trans.diff4.colors[p,1+l,2]) + polygon(c(1+p-1, 1.5+p-1, 1.5+p-1), c(l, 0.5+l, -0.5+l), col=array.trans.diff4.colors[p,1+l,3]) + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(0.5+l, 0+l, 0.5+l), col=array.trans.diff4.colors[p,1+l,4]) + } + } + + par(fig=c(0.875, 1, 0.14, 0.89), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_bias_trans_Atlantic_ridge_4tables.png"), width=750, height=600) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("% Bias transition from Atlantic_Ridge_regime", cex=1.5) + + # NAO+ : + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.85, 0.98), new=TRUE) + mtext("NAO+", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.diff4.colors[p,1+l,1])}} + + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.42, 0.5), new=TRUE) + mtext("Blocking", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.diff4.colors[p,1+l,4])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.85, 0.98), new=TRUE) + mtext("NAO-", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.diff4.colors[p,1+l,3])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.42, 0.5), new=TRUE) + mtext("Atlantic Ridge", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.diff4.colors[p,1+l,2])}} + + par(fig=c(0.91, 1, 0.1, 0.9), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_bias_trans_Atlantic_ridge_1table.png"), width=800, height=300) + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("% Bias transition from Atlantic Ridge regime", cex=1.2) + + par(mar=c(0,0,4,0), fig=c(0.07, 0.22, 0.8, 0.98), new=TRUE) + mtext("NAO+", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0, 0.22, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecast month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.diff4.colors[i2,1+6-j,1])}} + + par(mar=c(0,0,4,0), fig=c(0.22, 0.44, 0.8, 0.98), new=TRUE) + mtext("NAO-", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.22, 0.44, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecasted month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.diff4.colors[i2,1+6-j,3])}} + + par(mar=c(0,0,4,0), fig=c(0.44, 0.66, 0.8, 0.98), new=TRUE) + mtext("Blocking", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.44, 0.66, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecasted month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.diff4.colors[i2,1+6-j,4])}} + + par(mar=c(0,0,4,0), fig=c(0.68, 0.88, 0.8, 0.98), new=TRUE) + mtext("Atlantic Ridge", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.66, 0.88, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecasted month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.diff4.colors[i2,1+6-j,2])}} + + par(fig=c(0.91, 1, 0.1, 0.8), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + +diagnostic.WR <- function(text=NULL, position=c(0,1,0,1), color.array){ + +} + + ## # FairRPSS summary: + ## png(filename=paste0(work.dir,"/",forecast.name,"_summary_rpss.png"), width=550, height=400) + + ## # create an array similar to array.diff.freq but with colors instead of frequencies: + ## freq.cols <- rev(c('#b2182b','#d6604d','#f4a582','#fddbc7','#d1e5f0','#92c5de','#4393c3','#2166ac')) + + ## array.freq.colors <- array(freq.cols[8],c(12,7,4)) + ## array.freq.colors[array.diff.freq < 10] <- freq.cols[7] + ## array.freq.colors[array.diff.freq < 5] <- freq.cols[6] + ## array.freq.colors[array.diff.freq < 2] <- freq.cols[5] + ## array.freq.colors[array.diff.freq < 0] <- freq.cols[4] + ## array.freq.colors[array.diff.freq < -2] <- freq.cols[3] + ## array.freq.colors[array.diff.freq < -5] <- freq.cols[2] + ## array.freq.colors[array.diff.freq < -10] <- freq.cols[1] + + ## par(mar=c(5,4,4,4.5)) + ## plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Forecast time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + ## title("Frequency difference (%) between S4 and ERA-Interim", cex=1.5) + ## mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + ## mtext(side = 2, text = "Forecast time", line = 2.5, cex=1.5) + ## axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + ## axis(2, at=seq(0,6), las=2, cex.axis=1.5) + + ## for(p in 1:12){ + ## for(l in 0:6){ + ## polygon(c(0.5+p-1,0.5+p-1,1+p-1),c(-0.5+l,0.5+l,0+l), col=array.freq.colors[p,1+l,1]) # first triangle + ## polygon(c(0.5+p-1,1+p-1,1.5+p-1),c(-0.5+l,0+l,-0.5+l), col=array.freq.colors[p,1+l,2]) + ## polygon(c(1+p-1,1.5+p-1,1.5+p-1),c(l,0.5+l,-0.5+l), col=array.freq.colors[p,1+l,3]) + ## polygon(c(0.5+p-1,1+p-1,1.5+p-1),c(0.5+l,0+l,0.5+l), col=array.freq.colors[p,1+l,4]) + ## } + ## } + + ## par(fig=c(0.875, 1, 0.14, 0.89), new=TRUE) + ## ColorBar2(brks = c(-20,-10,-5,-2,0,2,5,10,20), cols = freq.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(c(-20,-10,-5,-2,0,2,5,10,20)), my.labels=c(-20,-10,-5,-2,0,2,5,10,20)) + ## dev.off() + +} # close if on composition == "summary" + + + + +# impact map of the stronger WR: +if(composition == "impact.highest" && fields.name == rean.name){ + + var.num <- 2 # choose a variable (1:sfcWind, 2:tas) + index <- 1 # chose an index: 1: most influent, 2: most influent positively, 3: most influent negatively + + if ( index == 1 ) png(filename=paste0(rean.dir,"/",rean.name,"_",var.name.full[var.num],"_most_influent.png"), width=550, height=650) + if ( index == 2 ) png(filename=paste0(rean.dir,"/",rean.name,"_",var.name.full[var.num],"_most_influent_positively.png"), width=550, height=650) + if ( index == 3 ) png(filename=paste0(rean.dir,"/",rean.name,"_",var.name.full[var.num],"_most_influent_negatively.png"), width=550, height=650) + + plot.new() + #par(mfrow=c(4,3)) + #col.regimes <- c('#7b3294','#c2a5cf','#a6dba0','#008837') + col.regimes <- c('#e66101','#fdb863','#b2abd2','#5e3c99') + + for(p in 13:16){ # or 1:12 for monthly maps + load(file=paste0(rean.dir,"/",rean.name,"_",my.period[p],"_",var.name[var.num],".RData")) + load(paste0(rean.dir,"/",rean.name,"_",my.period[p],"_ClusterNames.RData")) # they should not depend on var.name!!! + + # position of long values of Europe only (without the Atlantic Sea and America): + #if(fields.name == "ERA-Interim") EU <- c(1:which(lon >= lon.max)[1], (length(lon)-30):length(lon)) # restrict area to continental europe only + lon.max = 45 + EU <- c(1:which(lon >= lon.max)[1], (which(lon >= 337)[1]:length(lon))) # restrict area to continental europe only + + cluster1 <- which(orden == cluster1.name.period[p]) # in future it will be == cluster1.name + cluster2 <- which(orden == cluster2.name.period[p]) + cluster3 <- which(orden == cluster3.name.period[p]) + cluster4 <- which(orden == cluster4.name.period[p]) + + assign(paste0("imp",cluster1), varPeriodAnom1mean) + assign(paste0("imp",cluster2), varPeriodAnom2mean) + assign(paste0("imp",cluster3), varPeriodAnom3mean) + assign(paste0("imp",cluster4), varPeriodAnom4mean) + + #most influent WT: + if (index == 1) { + imp.all <- imp1 + for(i in 1:dim(imp1)[1]){ + for(j in 1:dim(imp1)[2]){ + if(abs(imp1[i,j]) >= max(abs(imp1[i,j]), abs(imp2[i,j]), abs(imp3[i,j]), abs(imp4[i,j]))) imp.all[i,j] <- 1.5 + if(abs(imp2[i,j]) >= max(abs(imp1[i,j]), abs(imp2[i,j]), abs(imp3[i,j]), abs(imp4[i,j]))) imp.all[i,j] <- 2.5 + if(abs(imp3[i,j]) >= max(abs(imp1[i,j]), abs(imp2[i,j]), abs(imp3[i,j]), abs(imp4[i,j]))) imp.all[i,j] <- 3.5 + if(abs(imp4[i,j]) >= max(abs(imp1[i,j]), abs(imp2[i,j]), abs(imp3[i,j]), abs(imp4[i,j]))) imp.all[i,j] <- 4.5 + } + } + } + + #most influent WT with positive impact: + if (index == 2) { + imp.all <- imp1 + for(i in 1:dim(imp1)[1]){ + for(j in 1:dim(imp1)[2]){ + if((imp1[i,j]) >= max((imp1[i,j]), (imp2[i,j]), (imp3[i,j]), (imp4[i,j]))) imp.all[i,j] <- 1.5 + if((imp2[i,j]) >= max((imp1[i,j]), (imp2[i,j]), (imp3[i,j]), (imp4[i,j]))) imp.all[i,j] <- 2.5 + if((imp3[i,j]) >= max((imp1[i,j]), (imp2[i,j]), (imp3[i,j]), (imp4[i,j]))) imp.all[i,j] <- 3.5 + if((imp4[i,j]) >= max((imp1[i,j]), (imp2[i,j]), (imp3[i,j]), (imp4[i,j]))) imp.all[i,j] <- 4.5 + } + } + + } + + # most influent WT with negative impact: + if (index == 3) { + imp.all <- imp1 + for(i in 1:dim(imp1)[1]){ + for(j in 1:dim(imp1)[2]){ + if((imp1[i,j]) <= min((imp1[i,j]), (imp2[i,j]), (imp3[i,j]), (imp4[i,j]))) imp.all[i,j] <- 1.5 + if((imp2[i,j]) <= min((imp1[i,j]), (imp2[i,j]), (imp3[i,j]), (imp4[i,j]))) imp.all[i,j] <- 2.5 + if((imp3[i,j]) <= min((imp1[i,j]), (imp2[i,j]), (imp3[i,j]), (imp4[i,j]))) imp.all[i,j] <- 3.5 + if((imp4[i,j]) <= min((imp1[i,j]), (imp2[i,j]), (imp3[i,j]), (imp4[i,j]))) imp.all[i,j] <- 4.5 + } + } + } + + if(p<=3) yMod <- 0.7 + if(p>=4 && p<=6) yMod <- 0.5 + if(p>=7 && p<=9) yMod <- 0.3 + if(p>=10) yMod <- 0.1 + if(p==13 || p==14) yMod <- 0.52 + if(p==15 || p==16) yMod <- 0.1 + + if(p==1 || p==4 || p==7 || p==10) xMod <- 0.05 + if(p==2 || p==5 || p==8 || p==11) xMod <- 0.35 + if(p==3 || p==6 || p==9 || p==12) xMod <- 0.65 + if(p==13 || p==15) xMod <- 0.05 + if(p==14 || p==16) xMod <- 0.50 + + # impact map: + if(p <= 12) par(fig=c(xMod, xMod + 0.3, yMod, yMod + 0.17), new=TRUE) + if(p > 12) par(fig=c(xMod, xMod + 0.45, yMod, yMod + 0.38), new=TRUE) + PlotEquiMap(imp.all[,EU], lon[EU], lat, filled.continents = FALSE, brks=1:5, cols=col.regimes, axelab=F, drawleg=F) + + # title map: + if(p <= 12) par(fig=c(xMod, xMod + 0.3, yMod + 0.162, yMod + 0.172), new=TRUE) + if(p > 12) par(fig=c(xMod + 0.07, xMod + 0.07 + 0.3, yMod +0.21 + 0.166, yMod +0.21 + 0.176), new=TRUE) + mtext(my.period[p], font=2, cex=1.5) + + } # close 'p' on 'period' + + par(fig=c(0.1,0.9, 0.03, 0.11), new=TRUE) + ColorBar2(1:5, cols=col.regimes, vert=FALSE, my.ticks=1:4, draw.ticks=FALSE, my.labels=orden) + + par(fig=c(0.1,0.9, 0.92, 0.97), new=TRUE) + if( index == 1 ) mtext(paste0("Most influent WR on ",var.name[var.num]), font=2, cex=1.5) + if( index == 2 ) mtext(paste0("Most positively influent WR on ",var.name[var.num]), font=2, cex=1.5) + if( index == 3 ) mtext(paste0("Most negatively influent WR on ",var.name[var.num]), font=2, cex=1.5) + + dev.off() + +} # close if on composition == "impact.highest" + diff --git a/old/weather_regimes_maps_v21.R~ b/old/weather_regimes_maps_v21.R~ new file mode 100644 index 0000000000000000000000000000000000000000..e78494067975371ac0dd142d9a78c3a40d14fd51 --- /dev/null +++ b/old/weather_regimes_maps_v21.R~ @@ -0,0 +1,4077 @@ + +# Creation: 6/2016 +# Author: Nicola Cortesi +# +# Aim: to visualize the regime anomalies of the weather regimes, their interannual frequencies and their impact on a chosen cliamte variable (i.e: wind speed or temperature) +# +# I/O: input file needed are: +# 1) the output of the weather_regimes.R script, which ends with the suffix "_psl.RData". +# 2) if you need the impact map too, the outputs of the weather_regimes_impact.R script, which end wuth the suffix "_sfcWind.RData" and "_tas.RData" +# +# Output files are the maps, witch the user can generate as .png or as .pdf +# Due to the script's length, it is better to run it from the terminal with: Rscript ~/scripts/weather_regimes_maps.R +# This script doesn't need to be parallelized because it takes only a few seconds to create and save the output maps. +# +# Assumptions: If your regimes derive from a reanalysis, this script must be run twice: once, to create a .pdf file (see below) +# that the user must open to find visually the order by which the four regimes are stored inside the four clusters. For example, he can find that the +# sequence of the images in the file (from top to down) corresponds to: Blocking / NAO- / Atl.Ridge / NAO+ regime. Subsequently, he has to change 'ordering' +# from F to T (see below) and set the four variables 'clusterX.name' (X = 1...4) to follow the same order found inside that file. +# The second time, you have to run this script only to get the maps in the right order, which by default is, from top to down: +# "NAO+","NAO-","Blocking" and "Atl.Ridge" (you can change it with the variable 'orden'). You also have to set 'save.names' to TRUE, so an .RData file +# is written to store the order of the four regimes, in case you need to visualize these maps again in future or create new ones based on the saved data. +# +# If your regimes derive from a forecast system, you have to compute before the regimes derived from a reanalysis. Then, you can associate the reanalysis +# to the forecast system, to employ it to automatically aussociate to each simulated cluster one of the +# observed regimes, the one with the highest spatial correlation. Beware that the two maps of regime anomalies must have the same spatial resolution! +# +# Branch: weather_regimes + +rm(list=ls()) +library(s2dverification) # for the function Load() +library(Kendall) # for the MannKendall test +source('/home/Earth/ncortesi/scripts/Rfunctions.R') # for the calendar functions + +### set the directory where the weather regimes computed with a reanalysis are stored (xxxxx_psl.RData files): + +#rean.dir <- "/scratch/Earth/ncortesi/RESILIENCE/Regimes/18) as 12) but with no lat correction" +#rean.dir <- "/scratch/Earth/ncortesi/RESILIENCE/Regimes/18_as_12_but_with_no_lat_correction" +#rean.dir <- "/scratch/Earth/ncortesi/RESILIENCE/Regimes/41_ERA-Interim_monthly_1981-2015_LOESS_filter" +#rean.dir <- "/scratch/Earth/ncortesi/RESILIENCE/Regimes/43_as_41_but_with_running_cluster_and_lat_correction" +rean.dir <- "/scratch/Earth/ncortesi/RESILIENCE/Regimes/44_as_43_but_with_no_lat_correction" +#rean.dir <- "/scratch/Earth/ncortesi/RESILIENCE/Regimes/45_as_43_but_with_Z500" + +rean.name <- "ERA-Interim" # reanalysis name (if input data comes from a reanalysis) +forecast.name <- "ECMWF-S4" # forecast system name (if input data comes from a forecast system) + +# Choose between loading reanalysis ('rean.name') or forecasts ('forecast.name'): +fields.name <- forecast.name #rean.name #forecast.name + +# Choose one or more periods to plot from 1 to 17 (1-12: Jan - Dec, 13-16: Winter, Spring, Summer, Autumn, 17: Year). +period <- 1:12 # You need to have created before the '_psl.RData' file with the output of 'weather_regimes_vXX'.R for that period. + +# run it twice: once, with: 'ordering <- FALSE', 'save.names <- TRUE', 'as.pdf <- TRUE' and 'composition <- "simple" ' +# to look at the cartography and find visually the right regime names of the four clusters; then, insert the regime names in the correct order below and run this script +# a second time one month/season at time with: 'ordering = TRUE', 'save.names = TRUE', 'as.pdf = FALSE' and 'composition = "simple" ' +# to save the ordered cartography (then, you can check the correct regime sequence in the .png maps). After that, any time you run this script, remember to set: +# 'ordering = TRUE , 'save.names = FALSE' (and any type of composition you want to plot) not to overwrite the files which stores the right cluster order. + +ordering <- T # If TRUE, order the clusters following the regimes listed in the 'orden' vector (set it to TRUE only after you manually inserted the names below). +save.names <- F # if TRUE, also save the cluster names (i.e: the association between clusters and weather regimes); if FALSE, the cluster names are loaded from a file +as.pdf <- F # FALSE: save results as one .png file for each season/month, TRUE: save only one .pdf file with all seasons/months + +composition <- "impact" # choose which kind of composition you want to plot: + # - 'simple' for monthly graphs of regime anomalies / impact / interannual frequencies in three columns + # - 'psl' for all the regime anomalies for a fixed forecast month + # - 'fre' for all the interannual frequencies for a fixed forecast month + # - 'impact' for all the impact maps for a fixed forecast month and the variable specified in var.num + # - 'summary' for the summary plots of all forecast months (persistence bias, freq.bias, correlations, etc.) [You need all ClusterNames files] + # - 'impact.highest' for all the impact plot of the regime with the highest impact + # - 'single.impact' to save the individual impact maps + # - 'single.psl' to save the individual psl map + # - 'single.fre' to save the individual fre map + # - 'none' doesn't plot anything, it only saves the ClusterName.Rdata files + # - 'taylor' plot the taylor's diagram between the regime anomalies of a monthly reanalaysis and a seasonal one + +var.num <- 2 # if fields.name ='rean.name' and composition ='simple', Choose a variable for the impact maps 1: sfcWind 2: tas + +mean.freq <- FALSE # if TRUE, when composition <- "simple", it also add the mean frequency value over each frequency plots + +# Associates the regimes to the four cluster: +cluster3.name <- "NAO+" +cluster2.name <- "NAO-" +cluster1.name <- "Blocking" +cluster4.name <- "Atl.Ridge" + +# choose an order to give to the cartograpy, from top to bottom: +orden <- c("NAO+","NAO-","Blocking","Atl.Ridge") + +####### if fields.name='forecast.name', you have to specify these additional variables: + +# working dir with the input files with '_psl.RData' suffix: +work.dir <- "/scratch/Earth/ncortesi/RESILIENCE/Regimes/42_ECMWF_S4_monthly_1981-2015_LOESS_filter" + +lead.months <- 0:6 # in case you selected 'fields.name = forecast.name', specify a leadtime (0 = same month of the start month) to plot + # (and variable 'period' becomes the start month) +forecast.month <- 1 # in case composition = 'psl' or 'fre' or 'impact', choose a target month to forecast, from 1 to 12, to plot its composition + # if you want to plot all compositions from 1 to 12 at once, just enable the line below and add a { at the end of the script + # DO NOT run the loop below when composition="summary" or "taylor", because it will modify the data in the ClusterName.RData files!!! +#for(forecast.month in 1:12){ +################################################################################################################################################################### + +if(fields.name != rean.name && fields.name != forecast.name) stop("variable 'fields.name' is not properly set") +regime1.name <- orden[1] +regime2.name <- orden[2] +regime3.name <- orden[3] +regime4.name <- orden[4] + +var.name <- c("sfcWind","tas") +var.name.full <- c("Wind Speed","Temperature") # full name of the 'predictand' variable to put in the title of the graphs +var.unit <- c("m/s", "ºC") # unit of measure of var (for drawing color scales) + +var.num.orig <- var.num # loading _psl files, this value can change! +fields.name.orig <- fields.name +period.orig <- period +work.dir.orig <- work.dir +pdf.suffix <- ifelse(length(period)==1, my.period[period], "all_periods") +n.map <- 0 + +if(composition != "summary" && composition != "impact.highest" && composition != "taylor"){ + + if(composition == "psl" || composition == "fre" || composition == "impact") { + if(as.pdf && fields.name == forecast.name) pdf(file=paste0(work.dir,"/",fields.name,"_",pdf.suffix,"_forecast_month_",forecast.month,"_",composition,".pdf"),width=110,height=60) + if(!as.pdf && fields.name == forecast.name) png(filename=paste0(work.dir,"/",fields.name,"_forecast_month_",forecast.month,"_",composition,".png"),width=6000,height=2300) + + lead.months <- c(6:0,0) + plot.new() + + # Sheet title: + par(fig=c(0, 1, 0.94, 0.98), new=TRUE) + if(composition == "psl") mtext(paste0(fields.name, " simulated Weather Regimes for ", month.name[forecast.month]), cex=5.5, font=2) + if(composition == "fre") mtext(paste0(fields.name, " simulated interannual frequencies for ", month.name[forecast.month]), cex=5.5, font=2) + if(composition == "impact") mtext(paste0(fields.name, " simulated ",var.name.full[var.num], " impact for ", month.name[forecast.month]), cex=5.5, font=2) + + } + + if(fields.name == rean.name) lead.months <- 1 # just to be able to enter the loop below once! + + for(lead.month in lead.months){ + #lead.month=lead.months[1] # for the debug + + if(composition == "simple"){ + if(as.pdf && fields.name == rean.name) pdf(file=paste0(rean.dir,"/",fields.name,"_",var.name[var.num],"_",pdf.suffix,".pdf"),width=40, height=60) + if(as.pdf && fields.name == forecast.name) pdf(file=paste0(work.dir,"/",fields.name,"_",var.name[var.num],"_",pdf.suffix,"_leadmonth_",lead.month,".pdf"),width=40,height=60) + } + + if(composition == 'psl' || composition == 'fre' || composition == 'impact') { + period <- forecast.month - lead.month + if(period < 1) period <- period + 12 + } + + impact.data <- FALSE + for(p in period){ + #p <- period[1] # for the debug + p.orig <- p + + if(fields.name == rean.name) print(paste("p =",p)) + if(fields.name == forecast.name) print(paste("p =",p,"lead.month =", lead.month)) + + # load regime data (for forecasts, it load only the data corresponding to the chosen start month p and lead month 'lead.month') + if(fields.name == rean.name || (fields.name == forecast.name && n.map == 7)) { + load(file=paste0(rean.dir,"/",rean.name,"_",my.period[p],"_","psl",".RData")) + if(var.num != var.num.orig) var.num <- var.num.orig # ripristinate original values of this script (necessary after loading regime data) + if(fields.name != fields.name.orig) fields.name <- fields.name.orig # ripristinate original values of this script + if(work.dir != work.dir.orig) work.dir <- work.dir.orig # ripristinate original values of this script + + # load impact data only if it is available, if not it will leave an empty space in the composition: + if(fields.name == rean.name){ + if(file.exists(paste0(rean.dir,"/",rean.name,"_",my.period[p],"_",var.name[var.num],".RData"))){ + impact.data <- TRUE + print("Impact data available for reanalysis") + load(file=paste0(rean.dir,"/",rean.name,"_",my.period[p],"_",var.name[var.num],".RData")) + } else { + impact.data <- FALSE + print("Impact data for reanalysis not available") + } + } + } + + if(fields.name == forecast.name && n.map < 7){ + load(file=paste0(work.dir,"/",forecast.name,"_",my.period[p],"_leadmonth_",lead.month,"_","psl",".RData")) # load cluster time series and mean psl data + + if(file.exists(paste0(work.dir,"/",forecast.name,"_",my.period[p],"_leadmonth_",lead.month,"_",var.name[var.num],".RData"))){ + impact.data <- TRUE + print("Impact data available for forecasts") + if((composition == "simple" || composition == "impact")) load(file=paste0(work.dir,"/",forecast.name,"_",my.period[p],"_leadmonth_",lead.month,"_",var.name[var.num],".RData")) # load mean var data for impact maps + } else { + impact.data <- FALSE + print("Impact data for forecasts not available") + } + + if(var.num != var.num.orig) var.num <- var.num.orig # ripristinate original values of this script (necessary after loading regime data) + if(fields.name != fields.name.orig) fields.name <- fields.name.orig # ripristinate original values of this script + + } + + if(var.num != var.num.orig) var.num <- var.num.orig # ripristinate original values of this script (necessary after loading regime data) + if(fields.name != fields.name.orig) fields.name <- fields.name.orig # ripristinate original values of this script + if(work.dir != work.dir.orig) work.dir <- work.dir.orig # ripristinate original values of this script + if(p != p.orig) p <- p.orig + + if(fields.name == forecast.name && n.map < 7){ + + # compute the simulated interannual monthly variability: + n.days.month <- dim(my.cluster.array[[p]])[1] # number of days in the forecasted month + + freq.sim1 <- apply(my.cluster.array[[p]] == 1, c(2,3), sum) + freq.sim2 <- apply(my.cluster.array[[p]] == 2, c(2,3), sum) + freq.sim3 <- apply(my.cluster.array[[p]] == 3, c(2,3), sum) + freq.sim4 <- apply(my.cluster.array[[p]] == 4, c(2,3), sum) + + sd.freq.sim1 <- sd(freq.sim1) / n.days.month + sd.freq.sim2 <- sd(freq.sim2) / n.days.month + sd.freq.sim3 <- sd(freq.sim3) / n.days.month + sd.freq.sim4 <- sd(freq.sim4) / n.days.month + + # compute the transition probabilities: + j1 <- j2 <- j3 <- j4 <- 1; transition1 <- transition2 <- transition3 <- transition4 <- c() + + n.members <- dim(my.cluster.array[[p]])[3] + + for(m in 1:n.members){ + for(y in 1:n.years){ + for (d in 1:(n.days.month-1)){ + + if (my.cluster.array[[p]][d,y,m] == 1 && (my.cluster.array[[p]][d + 1,y,m] != my.cluster.array[[p]][d,y,m])){ + transition1[j1] <- my.cluster.array[[p]][d + 1,y,m] + j1 <- j1 + 1 + } + if (my.cluster.array[[p]][d,y,m] == 2 && (my.cluster.array[[p]][d + 1,y,m] != my.cluster.array[[p]][d,y,m])){ + transition2[j2] <- my.cluster.array[[p]][d + 1,y,m] + j2 <- j2 + 1 + } + if (my.cluster.array[[p]][d,y,m] == 3 && (my.cluster.array[[p]][d + 1,y,m] != my.cluster.array[[p]][d,y,m])){ + transition3[j3] <- my.cluster.array[[p]][d + 1,y,m] + j3 <- j3 +1 + } + if (my.cluster.array[[p]][d,y,m] == 4 && (my.cluster.array[[p]][d + 1,y,m] != my.cluster.array[[p]][d,y,m])){ + transition4[j4] <- my.cluster.array[[p]][d + 1,y,m] + j4 <- j4 +1 + } + + } + } + } + + trans.sim <- matrix(NA,4,4 , dimnames= list(c("cluster1.sim", "cluster2.sim", "cluster3.sim", "cluster4.sim"),c("cluster1.sim","cluster2.sim","cluster3.sim","cluster4.sim"))) + trans.sim[1,2] <- length(which(transition1 == 2)) + trans.sim[1,3] <- length(which(transition1 == 3)) + trans.sim[1,4] <- length(which(transition1 == 4)) + + trans.sim[2,1] <- length(which(transition2 == 1)) + trans.sim[2,3] <- length(which(transition2 == 3)) + trans.sim[2,4] <- length(which(transition2 == 4)) + + trans.sim[3,1] <- length(which(transition3 == 1)) + trans.sim[3,2] <- length(which(transition3 == 2)) + trans.sim[3,4] <- length(which(transition3 == 4)) + + trans.sim[4,1] <- length(which(transition4 == 1)) + trans.sim[4,2] <- length(which(transition4 == 2)) + trans.sim[4,3] <- length(which(transition4 == 3)) + + trans.tot <- apply(trans.sim,1,sum,na.rm=T) + for(i in 1:4) trans.sim[i,] <- trans.sim[i,]/trans.tot[i] + + + # compute cluster persistence: + my.cluster.vector <- as.vector(my.cluster.array[[p]]) # reduce it to a vector with the order: day1month1member1 , day2month1member1, ... , day1month2member1, day2month2member1, ,,, + + sequ1 <- sequ2 <- sequ3 <- sequ4 <- rep(0,10000) + i1 <- i2 <- i3 <- i4 <- 1 + + if(my.cluster.vector[1] != 1) i1 <- 0 + if(my.cluster.vector[1] == 1) sequ1[i1] <- sequ1[i1]+1 + if(my.cluster.vector[1] != 2) i2 <- 0 + if(my.cluster.vector[1] == 2) sequ2[i2] <- sequ2[i2]+1 + if(my.cluster.vector[1] != 3) i3 <- 0 + if(my.cluster.vector[1] == 3) sequ3[i3] <- sequ3[i3]+1 + if(my.cluster.vector[1] != 4) i4 <- 0 + if(my.cluster.vector[1] == 4) sequ4[i4] <- sequ4[i4]+1 + n.dd <- length(my.cluster.vector) + + for(d in 1:(n.dd-1)){ + if(my.cluster.vector[d] != 1 && my.cluster.vector[d+1] == 1) i1 <- i1+1 + if(my.cluster.vector[d] == 1) sequ1[i1] <- sequ1[i1]+1 + + if(my.cluster.vector[d] != 2 && my.cluster.vector[d+1] == 2) i2 <- i2+1 + if(my.cluster.vector[d] == 2) sequ2[i2] <- sequ2[i2]+1 + + if(my.cluster.vector[d] != 3 && my.cluster.vector[d+1] == 3) i3 <- i3+1 + if(my.cluster.vector[d] == 3) sequ3[i3] <- sequ3[i3]+1 + + if(my.cluster.vector[d] != 4 && my.cluster.vector[d+1] == 4) i4 <- i4+1 + if(my.cluster.vector[d] == 4) sequ4[i4] <- sequ4[i4]+1 + } + + if(my.cluster.vector[n.dd] == 1) sequ1[i1] <- sequ1[i1]+1 + if(my.cluster.vector[n.dd] == 2) sequ2[i2] <- sequ2[i2]+1 + if(my.cluster.vector[n.dd] == 3) sequ3[i3] <- sequ3[i3]+1 + if(my.cluster.vector[n.dd] == 4) sequ4[i4] <- sequ4[i4]+1 + + pers1 <- mean(sequ1[which(sequ1 != 0)]) + pers2 <- mean(sequ2[which(sequ2 != 0)]) + pers3 <- mean(sequ3[which(sequ3 != 0)]) + pers4 <- mean(sequ4[which(sequ4 != 0)]) + + # compute correlations between simulated clusters and observed regime's anomalies: + cluster1.sim <- pslwr1mean; cluster2.sim <- pslwr2mean; cluster3.sim <- pslwr3mean; cluster4.sim <- pslwr4mean + + if(composition == 'impact' && impact.data == TRUE) { + cluster1.sim.imp <- varwr1mean; cluster2.sim.imp <- varwr2mean; cluster3.sim.imp <- varwr3mean; cluster4.sim.imp <- varwr4mean + #cluster1.sim.imp.pv <- pvalue1; cluster2.sim.imp.pv <- pvalue2; cluster3.sim.imp.pv <- pvalue3; cluster4.sim.imp.pv <- pvalue4 + } + + lat.forecast <- lat; lon.forecast <- lon + + if((p + lead.month) > 12) { load.month <- p + lead.month - 12 } else { load.month <- p + lead.month } # reanalysis forecast month to load + load(file=paste0(rean.dir,"/",rean.name,"_",my.period[load.month],"_","psl",".RData")) # load reanalysis data, which overwrities the pslwrXmean variables + load(paste0(rean.dir,"/",rean.name,"_", my.period[load.month],"_","ClusterNames",".RData")) # load also reanalysis regime names + + # load reanalysis varwrXmean impact data: + if(composition == 'impact' && impact.data == TRUE) load(file=paste0(rean.dir,"/",rean.name,"_",my.period[load.month],"_",var.name[var.num],".RData")) + + if(var.num != var.num.orig) var.num <- var.num.orig # ripristinate original values of this script + if(fields.name != fields.name.orig) fields.name <- fields.name.orig # ripristinate original values of this script + if(!identical(period.orig,period)) period <- period.orig + if(work.dir != work.dir.orig) work.dir <- work.dir.orig # ripristinate original values of this script + if(p.orig != p) p <- p.orig + + # compute the observed transition probabilities: + n.days.month <- n.days.in.a.period(load.month,2001) + j1o <- j2o <- j3o <- j4o <- 1; transition.obs1 <- transition.obs2 <- transition.obs3 <- transition.obs4 <- c() + + for (d in 1:(length(my.cluster$cluster)-1)){ + if (my.cluster$cluster[d] == 1 && (my.cluster$cluster[d + 1] != my.cluster$cluster[d])){ + transition.obs1[j1o] <- my.cluster$cluster[d + 1] + j1o <- j1o + 1 + } + if (my.cluster$cluster[d] == 2 && (my.cluster$cluster[d + 1] != my.cluster$cluster[d])){ + transition.obs2[j2o] <- my.cluster$cluster[d + 1] + j2o <- j2o + 1 + } + if (my.cluster$cluster[d] == 3 && (my.cluster$cluster[d + 1] != my.cluster$cluster[d])){ + transition.obs3[j3o] <- my.cluster$cluster[d + 1] + j3o <- j3o + 1 + } + if (my.cluster$cluster[d] == 4 && (my.cluster$cluster[d + 1] != my.cluster$cluster[d])){ + transition.obs4[j4o] <- my.cluster$cluster[d + 1] + j4o <- j4o +1 + } + + } + + trans.obs <- matrix(NA,4,4 , dimnames= list(c("cluster1.obs", "cluster2.obs", "cluster3.obs", "cluster4.obs"),c("cluster1.obs","cluster2.obs","cluster3.obs","cluster4.obs"))) + trans.obs[1,2] <- length(which(transition.obs1 == 2)) + trans.obs[1,3] <- length(which(transition.obs1 == 3)) + trans.obs[1,4] <- length(which(transition.obs1 == 4)) + + trans.obs[2,1] <- length(which(transition.obs2 == 1)) + trans.obs[2,3] <- length(which(transition.obs2 == 3)) + trans.obs[2,4] <- length(which(transition.obs2 == 4)) + + trans.obs[3,1] <- length(which(transition.obs3 == 1)) + trans.obs[3,2] <- length(which(transition.obs3 == 2)) + trans.obs[3,4] <- length(which(transition.obs3 == 4)) + + trans.obs[4,1] <- length(which(transition.obs4 == 1)) + trans.obs[4,2] <- length(which(transition.obs4 == 2)) + trans.obs[4,3] <- length(which(transition.obs4 == 3)) + + trans.tot.obs <- apply(trans.obs,1,sum,na.rm=T) + for(i in 1:4) trans.obs[i,] <- trans.obs[i,]/trans.tot.obs[i] + + # compute the observed interannual monthly variability: + freq.obs1 <- freq.obs2 <- freq.obs3 <- freq.obs4 <- c() + + for(y in year.start:year.end){ + # for both reanalysis and S4, the time order is always: year1day1, year1day2, ..., year1day31, year2day1, year2day2, ..., year2day28, etc, + freq.obs1[y-year.start+1] <- length(which(my.cluster$cluster[(y-year.start)*n.days.month + (1:n.days.month)] == 1)) + freq.obs2[y-year.start+1] <- length(which(my.cluster$cluster[(y-year.start)*n.days.month + (1:n.days.month)] == 2)) + freq.obs3[y-year.start+1] <- length(which(my.cluster$cluster[(y-year.start)*n.days.month + (1:n.days.month)] == 3)) + freq.obs4[y-year.start+1] <- length(which(my.cluster$cluster[(y-year.start)*n.days.month + (1:n.days.month)] == 4)) + } + + sd.freq.obs1 <- sd(freq.obs1) / n.days.month + sd.freq.obs2 <- sd(freq.obs2) / n.days.month + sd.freq.obs3 <- sd(freq.obs3) / n.days.month + sd.freq.obs4 <- sd(freq.obs4) / n.days.month + + wr1y.obs <- wr1y; wr2y.obs <- wr2y; wr3y.obs <- wr3y; wr4y.obs <- wr4y # observed frecuencies of the regimes + + regimes.obs <- c(cluster1.name, cluster2.name, cluster3.name, cluster4.name) + + if(length(unique(regimes.obs)) < 4) stop("Two or more names of the weather types in the reanalsyis input file are repeated!") + + if(identical(rev(lat),lat.forecast)) {lat.forecast <- rev(lat.forecast); print("Warning: forecast latitudes are in the opposite order than renalysis's")} + if(!identical(lat, lat.forecast)) stop("forecast latitudes are different from reanalysis latitudes!") + if(!identical(lon, lon.forecast)) stop("forecast longitude are different from reanalysis longitudes!") + + cluster.corr <- array(NA, c(4,4), dimnames=list(c("cluster1.sim","cluster2.sim","cluster3.sim","cluster4.sim"), regimes.obs)) + cluster.corr[1,1] <- cor(as.vector(cluster1.sim), as.vector(pslwr1mean)) + cluster.corr[1,2] <- cor(as.vector(cluster1.sim), as.vector(pslwr2mean)) + cluster.corr[1,3] <- cor(as.vector(cluster1.sim), as.vector(pslwr3mean)) + cluster.corr[1,4] <- cor(as.vector(cluster1.sim), as.vector(pslwr4mean)) + cluster.corr[2,1] <- cor(as.vector(cluster2.sim), as.vector(pslwr1mean)) + cluster.corr[2,2] <- cor(as.vector(cluster2.sim), as.vector(pslwr2mean)) + cluster.corr[2,3] <- cor(as.vector(cluster2.sim), as.vector(pslwr3mean)) + cluster.corr[2,4] <- cor(as.vector(cluster2.sim), as.vector(pslwr4mean)) + cluster.corr[3,1] <- cor(as.vector(cluster3.sim), as.vector(pslwr1mean)) + cluster.corr[3,2] <- cor(as.vector(cluster3.sim), as.vector(pslwr2mean)) + cluster.corr[3,3] <- cor(as.vector(cluster3.sim), as.vector(pslwr3mean)) + cluster.corr[3,4] <- cor(as.vector(cluster3.sim), as.vector(pslwr4mean)) + cluster.corr[4,1] <- cor(as.vector(cluster4.sim), as.vector(pslwr1mean)) + cluster.corr[4,2] <- cor(as.vector(cluster4.sim), as.vector(pslwr2mean)) + cluster.corr[4,3] <- cor(as.vector(cluster4.sim), as.vector(pslwr3mean)) + cluster.corr[4,4] <- cor(as.vector(cluster4.sim), as.vector(pslwr4mean)) + + # associate each simulated cluster to one observed regime: + max1 <- which(cluster.corr == max(cluster.corr), arr.ind=T) + cluster.corr2 <- cluster.corr + cluster.corr2[max1[1],] <- -999 # set this row to negative values so it won't be found as a maximum anymore + cluster.corr2[,max1[2]] <- -999 # set this column to negative values so it won't be found as a maximum anymore + max2 <- which(cluster.corr2 == max(cluster.corr2), arr.ind=T) + cluster.corr3 <- cluster.corr2 + cluster.corr3[max2[1],] <- -999 + cluster.corr3[,max2[2]] <- -999 + max3 <- which(cluster.corr3 == max(cluster.corr3), arr.ind=T) + cluster.corr4 <- cluster.corr3 + cluster.corr4[max3[1],] <- -999 + cluster.corr4[,max3[2]] <- -999 + max4 <- which(cluster.corr4 == max(cluster.corr4), arr.ind=T) + + seq.max1 <- c(max1[1],max2[1],max3[1],max4[1]) + seq.max2 <- c(max1[2],max2[2],max3[2],max4[2]) + + cluster1.regime <- seq.max2[which(seq.max1 == 1)] + cluster2.regime <- seq.max2[which(seq.max1 == 2)] + cluster3.regime <- seq.max2[which(seq.max1 == 3)] + cluster4.regime <- seq.max2[which(seq.max1 == 4)] + + cluster1.name <- regimes.obs[cluster1.regime] + cluster2.name <- regimes.obs[cluster2.regime] + cluster3.name <- regimes.obs[cluster3.regime] + cluster4.name <- regimes.obs[cluster4.regime] + + regimes.sim <- c(cluster1.name, cluster2.name, cluster3.name, cluster4.name) + cluster.corr2 <- array(cluster.corr, c(4,4), dimnames=list(regimes.sim, regimes.obs)) + + spat.cor1 <- cluster.corr[1,seq.max2[which(seq.max1 == 1)]] + spat.cor2 <- cluster.corr[2,seq.max2[which(seq.max1 == 2)]] + spat.cor3 <- cluster.corr[3,seq.max2[which(seq.max1 == 3)]] + spat.cor4 <- cluster.corr[4,seq.max2[which(seq.max1 == 4)]] + + # measure persistence for the reanalysis dataset: + my.cluster.vector <- my.cluster[[1]] + + sequ1 <- sequ2 <- sequ3 <- sequ4 <- rep(0,10000) + i1 <- i2 <- i3 <- i4 <- 1 + + if(my.cluster.vector[1] != 1) i1 <- 0 + if(my.cluster.vector[1] == 1) sequ1[i1] <- sequ1[i1]+1 + if(my.cluster.vector[1] != 2) i2 <- 0 + if(my.cluster.vector[1] == 2) sequ2[i2] <- sequ2[i2]+1 + if(my.cluster.vector[1] != 3) i3 <- 0 + if(my.cluster.vector[1] == 3) sequ3[i3] <- sequ3[i3]+1 + if(my.cluster.vector[1] != 4) i4 <- 0 + if(my.cluster.vector[1] == 4) sequ4[i4] <- sequ4[i4]+1 + + n.dd <- length(my.cluster.vector) + for(d in 1:(n.dd-1)){ + if(my.cluster.vector[d] != 1 && my.cluster.vector[d+1] == 1) i1 <- i1+1 + if(my.cluster.vector[d] == 1) sequ1[i1] <- sequ1[i1]+1 + + if(my.cluster.vector[d] != 2 && my.cluster.vector[d+1] == 2) i2 <- i2+1 + if(my.cluster.vector[d] == 2) sequ2[i2] <- sequ2[i2]+1 + + if(my.cluster.vector[d] != 3 && my.cluster.vector[d+1] == 3) i3 <- i3+1 + if(my.cluster.vector[d] == 3) sequ3[i3] <- sequ3[i3]+1 + + if(my.cluster.vector[d] != 4 && my.cluster.vector[d+1] == 4) i4 <- i4+1 + if(my.cluster.vector[d] == 4) sequ4[i4] <- sequ4[i4]+1 + } + + if(my.cluster.vector[n.dd] == 1) sequ1[i1] <- sequ1[i1]+1 + if(my.cluster.vector[n.dd] == 2) sequ2[i2] <- sequ2[i2]+1 + if(my.cluster.vector[n.dd] == 3) sequ3[i3] <- sequ3[i3]+1 + if(my.cluster.vector[n.dd] == 4) sequ4[i4] <- sequ4[i4]+1 + + persObs1 <- mean(sequ1[which(sequ1 != 0)]) + persObs2 <- mean(sequ2[which(sequ2 != 0)]) + persObs3 <- mean(sequ3[which(sequ3 != 0)]) + persObs4 <- mean(sequ4[which(sequ4 != 0)]) + + ### insert here all the manual corrections to the regime assignation due to misasignment in the automatic selection: + ### forecast month: September + #if(p == 9 && lead.month == 0) { cluster1.name <- "Blocking"; cluster2.name <- "Atl.Ridge"; cluster3.name <- "NAO-"; cluster4.name <- "NAO+"} + #if(p == 8 && lead.month == 1) { cluster1.name <- "NAO+"; cluster2.name <- "NAO-"; cluster3.name <- "Blocking"; cluster4.name <- "Atl.Ridge"} + #if(p == 6 && lead.month == 3) { cluster1.name <- "Atl.Ridge"; cluster2.name <- "NAO+"} + #if(p == 4 && lead.month == 5) { cluster1.name <- "Blocking"; cluster2.name <- "NAO+"; cluster3.name <- "Atl.Ridge"; cluster4.name <- "NAO-"} + + if(p == 9 && lead.month == 0) { cluster1.name <- "Blocking"; cluster2.name <- "Atl.Ridge"; cluster3.name <- "NAO-"; cluster4.name <- "NAO+" } + if(p == 8 && lead.month == 1) { cluster1.name <- "NAO+"; cluster2.name <- "NAO-"; cluster3.name <- "Blocking"; cluster4.name <- "Atl.Ridge" } + if(p == 7 && lead.month == 2) { cluster1.name <- "Atl.Ridge"; cluster2.name <- "Blocking"; cluster3.name <- "NAO-"; cluster4.name <- "NAO+" } + if(p == 6 && lead.month == 3) { cluster1.name <- "Atl.Ridge"; cluster2.name <- "NAO-"; cluster3.name <- "NAO+"; cluster4.name <- "Blocking" } + if(p == 5 && lead.month == 4) { cluster1.name <- "Atl.Ridge"; cluster2.name <- "NAO+"; cluster3.name <- "NAO-"; cluster4.name <- "Blocking" } + if(p == 4 && lead.month == 5) { cluster1.name <- "Blocking"; cluster2.name <- "NAO+"; cluster3.name <- "Atl.Ridge"; cluster4.name <- "NAO-" } + if(p == 3 && lead.month == 6) { cluster2.name <- "NAO-"; cluster3.name <- "Atl.Ridge"} # cluster1.name <- "Blocking"; cluster4.name <- "NAO+" + + # regimes.sim # for the debug + + ### forecast month: October + #if(p == 4 && lead.month == 6) { cluster1.name <- "Atl.Ridge"; cluster2.name <- "Blocking"; cluster3.name <- "NAO+"; cluster4.name <- "NAO-"} + #if(p == 5 && lead.month == 5) { cluster1.name <- "NAO+"; cluster2.name <- "Blocking"; cluster3.name <- "NAO-"; cluster4.name <- "Atl.Ridge"} + #if(p == 7 && lead.month == 3) { cluster1.name <- "NAO-"; cluster2.name <- "Atl.Ridge"; cluster3.name <- "Blocking"; cluster4.name <- "NAO+"} + #if(p == 8 && lead.month == 2) { cluster1.name <- "Blocking"; cluster2.name <- "NAO-"; cluster3.name <- "Atl.Ridge"; cluster4.name <- "NAO+"} + #if(p == 9 && lead.month == 1) { cluster1.name <- "NAO-"; cluster2.name <- "Blocking"; cluster3.name <- "Atl.Ridge"; cluster4.name <- "NAO+"} + + if(p == 10 && lead.month == 0) { cluster1.name <- "Blocking"; cluster2.name <- "NAO+"; cluster3.name <- "Atl.Ridge"; cluster4.name <- "NAO-"} + if(p == 9 && lead.month == 1) { cluster1.name <- "NAO-"; cluster2.name <- "Blocking"; cluster3.name <- "Atl.Ridge"; cluster4.name <- "NAO+"} + if(p == 8 && lead.month == 2) { cluster1.name <- "Blocking"; cluster2.name <- "NAO-"; cluster3.name <- "Atl.Ridge"; cluster4.name <- "NAO+"} + if(p == 7 && lead.month == 3) { cluster1.name <- "NAO-"; cluster2.name <- "Atl.Ridge"; cluster3.name <- "Blocking"; cluster4.name <- "NAO+"} + if(p == 6 && lead.month == 4) { cluster1.name <- "NAO+"; cluster2.name <- "Blocking"; cluster3.name <- "NAO-"; cluster4.name <- "Atl.Ridge"} + + ### forecast month: November + #if(p == 6 && lead.month == 5) { cluster2.name <- "Atl.Ridge"; cluster3.name <- "NAO+"; cluster4.name <- "Blocking"} + #if(p == 7 && lead.month == 4) { cluster1.name <- "NAO+"; cluster2.name <- "Blocking"; cluster4.name <- "Atl.Ridge"} + #if(p == 8 && lead.month == 3) { cluster2.name <- "Blocking"; cluster4.name <- "Atl.Ridge"} + #if(p == 9 && lead.month == 2) { cluster2.name <- "Atl.Ridge"; cluster3.name <- "NAO+"; cluster4.name <- "Blocking"} + #if(p == 10 && lead.month == 1) { cluster2.name <- "Blocking"; cluster4.name <- "Atl.Ridge"} + + regimes.sim2 <- c(cluster1.name, cluster2.name, cluster3.name, cluster4.name) # Simulated regimes after the manual correction + #regimes.obs <- c(cluster1.name, cluster2.name, cluster3.name, cluster4.name) # already defined above, included only as reference + + order.sim <- match(regimes.sim2, orden) + order.obs <- match(regimes.obs, orden) + + clusters.sim <- list(cluster1.sim, cluster2.sim, cluster3.sim, cluster4.sim) + clusters.obs <- list(pslwr1mean, pslwr2mean, pslwr3mean, pslwr4mean) + + # recompute the spatial correlations, because they might have changed because of the manual corrections above: + spat.cor1 <- cor(as.vector(clusters.sim[[which(order.sim == 1)]]), as.vector(clusters.obs[[which(order.obs == 1)]])) + spat.cor2 <- cor(as.vector(clusters.sim[[which(order.sim == 2)]]), as.vector(clusters.obs[[which(order.obs == 2)]])) + spat.cor3 <- cor(as.vector(clusters.sim[[which(order.sim == 3)]]), as.vector(clusters.obs[[which(order.obs == 3)]])) + spat.cor4 <- cor(as.vector(clusters.sim[[which(order.sim == 4)]]), as.vector(clusters.obs[[which(order.obs == 4)]])) + + if(composition == 'impact' && impact.data == TRUE) { + clusters.sim.imp <- list(cluster1.sim.imp, cluster2.sim.imp, cluster3.sim.imp, cluster4.sim.imp) + #clusters.sim.imp.pv <- list(cluster1.sim.imp.pv, cluster2.sim.imp.pv, cluster3.sim.imp.pv, cluster4.sim.imp.pv) + + clusters.obs.imp <- list(varwr1mean, varwr2mean, varwr3mean, varwr4mean) + #clusters.obs.imp.pv <- list(pvalue1, pvalue2, pvalue3, pvalue4) + + cor.imp1 <- cor(as.vector(clusters.sim.imp[[which(order.sim == 1)]]), as.vector(clusters.obs.imp[[which(order.obs == 1)]])) + cor.imp2 <- cor(as.vector(clusters.sim.imp[[which(order.sim == 2)]]), as.vector(clusters.obs.imp[[which(order.obs == 2)]])) + cor.imp3 <- cor(as.vector(clusters.sim.imp[[which(order.sim == 3)]]), as.vector(clusters.obs.imp[[which(order.obs == 3)]])) + cor.imp4 <- cor(as.vector(clusters.sim.imp[[which(order.sim == 4)]]), as.vector(clusters.obs.imp[[which(order.obs == 4)]])) + + } + + load(file=paste0(work.dir,"/",fields.name,"_",my.period[p],"_leadmonth_",lead.month,"_","psl",".RData")) # reload forecast cluster time series and mean psl data + if(var.num != var.num.orig) var.num <- var.num.orig # ripristinate original values of this script + if(fields.name != fields.name.orig) fields.name <- fields.name.orig # ripristinate original values of this script + if(!identical(period.orig, period)) period <- period.orig + if(p.orig != p) p <- p.orig + if(identical(rev(lat),lat.forecast)) lat <- rev(lat) # ripristinate right lat order + if(work.dir != work.dir.orig) work.dir <- work.dir.orig # ripristinate original values of this script + + } # close for on 'forecast.name' + + if(fields.name == rean.name || (fields.name == forecast.name && n.map == 7)){ + if(save.names){ + save(cluster1.name, cluster2.name, cluster3.name, cluster4.name, file=paste0(rean.dir,"/",fields.name,"_", my.period[p],"_","ClusterNames",".RData")) + + } else { # in this case, we load the cluster names from the file already saved, if regimes come from a reanalysis: + load(paste0(rean.dir,"/",rean.name,"_", my.period[p],"_","ClusterNames",".RData")) + + if(var.num != var.num.orig) var.num <- var.num.orig # ripristinate original values of this script + if(fields.name != fields.name.orig) fields.name <- fields.name.orig # ripristinate original values of this script + } + } + + # assign to each cluster its regime: + if(ordering == FALSE && (fields.name == rean.name || (fields.name == forecast.name && n.map == 7))){ + cluster1 <- 1; cluster2 <- 2; cluster3 <- 3; cluster4 <- 4 + } else { + cluster1 <- which(orden == cluster1.name) + cluster2 <- which(orden == cluster2.name) + cluster3 <- which(orden == cluster3.name) + cluster4 <- which(orden == cluster4.name) + } + + assign(paste0("map",cluster1), pslwr1mean) + assign(paste0("map",cluster2), pslwr2mean) + assign(paste0("map",cluster3), pslwr3mean) + assign(paste0("map",cluster4), pslwr4mean) + + assign(paste0("fre",cluster1), wr1y) + assign(paste0("fre",cluster2), wr2y) + assign(paste0("fre",cluster3), wr3y) + assign(paste0("fre",cluster4), wr4y) + + #assign(paste0("withinss",cluster1), my.cluster[[p]]$withinss[1]/my.cluster[[p]]$size[1]) + #assign(paste0("withinss",cluster2), my.cluster[[p]]$withinss[2]/my.cluster[[p]]$size[2]) + #assign(paste0("withinss",cluster3), my.cluster[[p]]$withinss[3]/my.cluster[[p]]$size[3]) + #assign(paste0("withinss",cluster4), my.cluster[[p]]$withinss[4]/my.cluster[[p]]$size[4]) + + if(impact.data == TRUE && (composition == "simple" || composition == "single.impact" || composition == 'impact')){ + assign(paste0("imp",cluster1), varwr1mean) + assign(paste0("imp",cluster2), varwr2mean) + assign(paste0("imp",cluster3), varwr3mean) + assign(paste0("imp",cluster4), varwr4mean) + + if(fields.name == rean.name){ + assign(paste0("sig",cluster1), pvalue1) + assign(paste0("sig",cluster2), pvalue2) + assign(paste0("sig",cluster3), pvalue3) + assign(paste0("sig",cluster4), pvalue4) + } + } + + if(fields.name == forecast.name && n.map < 7){ + assign(paste0("freMax",cluster1), wr1yMax) + assign(paste0("freMax",cluster2), wr2yMax) + assign(paste0("freMax",cluster3), wr3yMax) + assign(paste0("freMax",cluster4), wr4yMax) + + assign(paste0("freMin",cluster1), wr1yMin) + assign(paste0("freMin",cluster2), wr2yMin) + assign(paste0("freMin",cluster3), wr3yMin) + assign(paste0("freMin",cluster4), wr4yMin) + + #assign(paste0("spatial.cor",cluster1), spat.cor1) + #assign(paste0("spatial.cor",cluster2), spat.cor2) + #assign(paste0("spatial.cor",cluster3), spat.cor3) + #assign(paste0("spatial.cor",cluster4), spat.cor4) + + assign(paste0("persist",cluster1), pers1) + assign(paste0("persist",cluster2), pers2) + assign(paste0("persist",cluster3), pers3) + assign(paste0("persist",cluster4), pers4) + + clusters.all <- c(cluster1, cluster2, cluster3, cluster4) + ordered.sequence <- c(which(clusters.all == 1), which(clusters.all == 2), which(clusters.all == 3), which(clusters.all == 4)) + + assign(paste0("transSim",cluster1), unname(trans.sim[1,ordered.sequence])) + assign(paste0("transSim",cluster2), unname(trans.sim[2,ordered.sequence])) + assign(paste0("transSim",cluster3), unname(trans.sim[3,ordered.sequence])) + assign(paste0("transSim",cluster4), unname(trans.sim[4,ordered.sequence])) + + cluster1.obs <- which(orden == regimes.obs[1]) + cluster2.obs <- which(orden == regimes.obs[2]) + cluster3.obs <- which(orden == regimes.obs[3]) + cluster4.obs <- which(orden == regimes.obs[4]) + + clusters.all.obs <- c(cluster1.obs, cluster2.obs, cluster3.obs, cluster4.obs) + ordered.sequence.obs <- c(which(clusters.all.obs == 1), which(clusters.all.obs == 2), which(clusters.all.obs == 3), which(clusters.all.obs == 4)) + + assign(paste0("freObs",cluster1.obs), wr1y.obs) + assign(paste0("freObs",cluster2.obs), wr2y.obs) + assign(paste0("freObs",cluster3.obs), wr3y.obs) + assign(paste0("freObs",cluster4.obs), wr4y.obs) + + assign(paste0("persistObs",cluster1.obs), persObs1) + assign(paste0("persistObs",cluster2.obs), persObs2) + assign(paste0("persistObs",cluster3.obs), persObs3) + assign(paste0("persistObs",cluster4.obs), persObs4) + + assign(paste0("transObs",cluster1.obs), unname(trans.obs[1,ordered.sequence.obs])) + assign(paste0("transObs",cluster2.obs), unname(trans.obs[2,ordered.sequence.obs])) + assign(paste0("transObs",cluster3.obs), unname(trans.obs[3,ordered.sequence.obs])) + assign(paste0("transObs",cluster4.obs), unname(trans.obs[4,ordered.sequence.obs])) + + } + + # position of long values of Europe only (without the Atlantic Sea and America): + #if(fields.name == "ERA-Interim") EU <- c(1:which(lon >= lon.max)[1], (length(lon)-30):length(lon)) # restrict area to continental europe only + EU <- c(1:which(lon >= lon.max)[1], (which(lon >= 337)[1]:length(lon))) # restrict area to continental europe only + + # breaks and colors of the geopotential fields: + if(psl == "g500"){ + #my.brks <- c(-300,-199:200,300) # % Mean anomaly of a WR + my.brks <- c(-300,seq(-200,200,10),300) # % Mean anomaly of a WR + my.brks2 <- c(-300,seq(-200,200,10),300) # % Mean anomaly of a WR for the contour lines + #my.cols <- colorRampPalette((brewer.pal(9,"PuBu")))(length(my.brks)-1) + values.to.plot <- c(-200, -100, 0, 100, 200) # values we want to show in the labels + } + + if(psl == "psl"){ + my.brks <- c(seq(-19,-1,2),0,seq(1,19,2)) # % Mean anomaly of a WR + # my.brks <- c(seq(-19,-1,2),0,seq(1,19,2)) # % Mean anomaly of a WR + + my.brks2 <- my.brks #c(seq(-20,20,2)) # % Mean anomaly of a WR for the contour lines + values.to.plot <- my.brks #c(-17,-15,-13,-11,-9,-7,-5,-3,-1,0,1,3,5,7,9,11,13,15,17) + } + + my.cols <- colorRampPalette(rev(brewer.pal(11,"RdBu")))(length(my.brks)-1) # blue--white--red colors + my.cols[floor(length(my.cols)/2)] <- my.cols[floor(length(my.cols)/2)+1] <- "white" + + # breaks and colors of the impact maps (valid both for Wind speed anomalies and Temperature anomalies): + #my.brks.var <- c(-20,seq(-10,-3,1),seq(-2.9,2.9,0.1),seq(3,10,1),20) # % Mean anomaly of a WR + #my.brks.var <- c(-20,-2,-1.5,-1,-0.5,-0.2,0.2,0.5,1,1.5,2,20) # % Mean anomaly of a WR + #my.brks.var <- c(-20,seq(-3,3,0.5),20) # % Mean anomaly of a WR + if(var.name[var.num] == "sfcWind") { + my.brks.var <- seq(-3,3,0.5) + values.to.plot2 <- c(-3,-2,-1,0,1,2,3) + } + if(var.name[var.num] == "tas") { + my.brks.var <- seq(-5,5,1) + values.to.plot2 <- my.brks.var #c(-5,-3,-1,0,1,3,5) + } + + #my.cols <- colorRampPalette(as.character(read.csv("/home/Earth/vtorralb/rgbhex.csv",header=F)[,1]))(length(my.brks)-1) # blue--yellow-red colors + my.cols.var <- colorRampPalette(rev(brewer.pal(11,"RdBu")))(length(my.brks.var)-1) # blue--white--red colors + #my.cols.var[floor(length(my.cols.var)/2)] <- my.cols.var[floor(length(my.cols.var)/2)+1] <- "white" + + freq.max=100 + if(fields.name == forecast.name){ + pos.years.present <- match(year.start:year.end, years) + years.missing <- which(is.na(pos.years.present)) + fre1.NA <- fre2.NA <- fre3.NA <- fre4.NA <- freMax1.NA <- freMax2.NA <- freMax3.NA <- freMax4.NA <- freMin1.NA <- freMin2.NA <- freMin3.NA <- freMin4.NA <- c() + k <- 0 + for(y in year.start:year.end) { + if(y %in% (years.missing + year.start - 1)) { + fre1.NA <- c(fre1.NA,NA); fre2.NA <- c(fre2.NA,NA); fre3.NA <- c(fre3.NA,NA); fre4.NA <- c(fre4.NA,NA) + freMax1.NA <- c(freMax1.NA,NA); freMax2.NA <- c(freMax2.NA,NA); freMax3.NA <- c(freMax3.NA,NA); freMax4.NA <- c(freMax4.NA,NA) + freMin1.NA <- c(freMin1.NA,NA); freMin2.NA <- c(freMin2.NA,NA); freMin3.NA <- c(freMin3.NA,NA); freMin4.NA <- c(freMin4.NA,NA) + k=k+1 + } else { + fre1.NA <- c(fre1.NA,fre1[y - year.start + 1 - k]); fre2.NA <- c(fre2.NA,fre2[y - year.start + 1 - k]) + fre3.NA <- c(fre3.NA,fre3[y - year.start + 1 - k]); fre4.NA <- c(fre4.NA,fre4[y - year.start + 1 - k]) + freMax1.NA <- c(freMax1.NA,freMax1[y - year.start + 1 - k]); freMax2.NA <- c(freMax2.NA,freMax2[y - year.start + 1 - k]) + freMax3.NA <- c(freMax3.NA,freMax3[y - year.start + 1 - k]); freMax4.NA <- c(freMax4.NA,freMax4[y - year.start + 1 - k]) + freMin1.NA <- c(freMin1.NA,freMin1[y - year.start + 1 - k]); freMin2.NA <- c(freMin2.NA,freMin2[y - year.start + 1 - k]) + freMin3.NA <- c(freMin3.NA,freMin3[y - year.start + 1 - k]); freMin4.NA <- c(freMin4.NA,freMin4[y - year.start + 1 - k]) + } + } + + sp.cor1 <- round(cor(fre1.NA,freObs1, use="complete.obs"),2) + sp.cor2 <- round(cor(fre2.NA,freObs2, use="complete.obs"),2) + sp.cor3 <- round(cor(fre3.NA,freObs3, use="complete.obs"),2) + sp.cor4 <- round(cor(fre4.NA,freObs4, use="complete.obs"),2) + + } else { + fre1.NA <- fre1; fre2.NA <- fre2; fre3.NA <- fre3; fre4.NA <- fre4 + } + + + # Visualize the composition with the 3 graphs for all the 4 regimes: its pressure fields, its impact on var and its frequency series: + if(composition == "simple"){ + if(!as.pdf && fields.name == rean.name) png(filename=paste0(rean.dir,"/",fields.name,"_",my.period[p],"_",var.name[var.num],".png"),width=3000,height=3700) + if(!as.pdf && fields.name == forecast.name) png(filename=paste0(work.dir,"/",fields.name,"_",my.period[p],"_leadmonth_",lead.month,"_",var.name[var.num],".png"),width=3000,height=3700) + + plot.new() + + # Sheet title: + par(fig=c(0, 1, 0.94, 0.98), new=TRUE) + if(fields.name == forecast.name) {forecast.title <- paste0(" startdate and ",lead.month," forecast month ")} else {forecast.title <- ""} + mtext(paste0(fields.name, " Weather Regimes for ", my.period[p], forecast.title," (",year.start,"-",year.end,")"), cex=5.5, font=2) + + # Centroid maps: + map.xpos <- 0 + map.width <- 0.46 + par(fig=c(map.xpos + 0.003, map.xpos + map.width - 0.003, 0.745, 0.925), new=TRUE) + PlotEquiMap2(rescale(map1,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map1), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, xlabel.dist=1.5, contours.lty="F1FF1F") + par(fig=c(map.xpos, map.xpos + map.width, 0.51, 0.69), new=TRUE) + PlotEquiMap2(rescale(map2,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map2), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F") + par(fig=c(map.xpos, map.xpos + map.width, 0.275, 0.455), new=TRUE) + PlotEquiMap2(rescale(map3,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map3), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F") + par(fig=c(map.xpos, map.xpos + map.width, 0.04, 0.22), new=TRUE) + PlotEquiMap2(rescale(map4,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map4), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F") + + ## # for the debug: + ## obs <- read.table("/scratch/Earth/ncortesi/RESILIENCE/Regimes/NAO_series.txt") + ## mes <- p + ## fre.obs <- obs$V3[seq(mes,12*35,12)] # get the ovserved freuency of the NAO + ## #plot(1981:2015,fre.obs,type="l") + ## #lines(1981:2015,fre1,type="l",col="red") + ## #lines(1981:2015,fre2,type="l",col="blue") + ## #lines(1981:2015,fre4,type="l",col="green") + ## cor1 <- cor(fre.obs, fre1) + ## cor2 <- cor(fre.obs, fre2) + ## cor3 <- cor(fre.obs, fre3) + ## cor4 <- cor(fre.obs, fre4) + ## round(c(cor1,cor2,cor3,cor4),2) + + # Legend centroid maps: + legend1.xpos <- 0.10 + legend1.width <- 0.30 + legend1.cex <- 2 + my.subset <- match(values.to.plot, my.brks) + par(fig=c(legend1.xpos, legend1.xpos + legend1.width, 0.719, 0.744), new=TRUE) # syntax fig=c(xmin, xmax, ymin, ymax) + ColorBar3(my.brks, cols=my.cols, vert=FALSE, cex=legend1.cex, subset=my.subset) + if(psl=="g500") {mtext(side=4," m", cex=2.5)} else {mtext(side=4," hPa", cex=legend1.cex)} + par(fig=c(legend1.xpos, legend1.xpos + legend1.width, 0.485, 0.508), new=TRUE) + ColorBar3(my.brks, cols=my.cols, vert=FALSE, cex=legend1.cex, subset=my.subset) + if(psl=="g500") {mtext(side=4," m", cex=2.5)} else {mtext(side=4," hPa", cex=legend1.cex)} + par(fig=c(legend1.xpos, legend1.xpos + legend1.width, 0.250, 0.273), new=TRUE) + ColorBar3(my.brks, cols=my.cols, vert=FALSE, cex=legend1.cex, subset=my.subset) + if(psl=="g500") {mtext(side=4," m", cex=2.5)} else {mtext(side=4," hPa", cex=legend1.cex)} + par(fig=c(legend1.xpos, legend1.xpos + legend1.width, 0.015, 0.038), new=TRUE) + ColorBar3(my.brks, cols=my.cols, vert=FALSE, cex=legend1.cex, subset=my.subset) + if(psl=="g500") {mtext(side=4," m", cex=2.5)} else {mtext(side=4," hPa", cex=legend1.cex)} + + # Subtitle centroid maps: + map.title.xpos <- 0.76 + map.title.width <- 0.2 + par(fig=c(map.title.xpos, map.title.xpos + map.title.width, 0.728, 0.729), new=TRUE) + mtext("year", cex=3) + par(fig=c(map.title.xpos, map.title.xpos + map.title.width, 0.494, 0.495), new=TRUE) + mtext("year", cex=3) + par(fig=c(map.title.xpos, map.title.xpos + map.title.width, 0.260, 0.261), new=TRUE) + mtext("year", cex=3) + par(fig=c(map.title.xpos, map.title.xpos + map.title.width, 0.026, 0.027), new=TRUE) + mtext("year", cex=3) + + # Title Centroid Maps: + title1.xpos <- 0.02 + title1.width <- 0.4 + par(fig=c(title1.xpos, title1.xpos + title1.width, 0.9305, 0.9355), new=TRUE) + mtext(paste0(regime1.name,": ", psl.name, " Anomaly"), font=2, cex=4) + par(fig=c(title1.xpos, title1.xpos + title1.width, 0.6955, 0.7005), new=TRUE) + mtext(paste0(regime2.name,": ", psl.name, " Anomaly"), font=2, cex=4) + par(fig=c(title1.xpos, title1.xpos + title1.width, 0.4605, 0.4655), new=TRUE) + mtext(paste0(regime3.name,": ", psl.name, " Anomaly"), font=2, cex=4) + par(fig=c(title1.xpos, title1.xpos + title1.width, 0.2255, 0.2305), new=TRUE) + mtext(paste0(regime4.name,": ", psl.name, " Anomaly"), font=2, cex=4) + + # Impact maps: + if(impact.data == TRUE && (composition == "simple" || composition == "impact")){ # it won't enter here never because we are already inside an if con composition="simple" + impact.xpos <- 0.47 + impact.width <- 0.26 + par(fig=c(impact.xpos, impact.xpos + impact.width, 0.745, 0.925), new=TRUE) + PlotEquiMap2(rescale(imp1[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=t(sig1[,EU] < 0.05), cex.lab=1.5) + par(fig=c(impact.xpos, impact.xpos + impact.width, 0.51, 0.69), new=TRUE) + PlotEquiMap2(rescale(imp2[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=t(sig2[,EU] < 0.05), cex.lab=1.5) + par(fig=c(impact.xpos, impact.xpos + impact.width, 0.275, 0.455), new=TRUE) + PlotEquiMap2(rescale(imp3[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=t(sig3[,EU] < 0.05), cex.lab=1.5) + par(fig=c(impact.xpos, impact.xpos + impact.width, 0.04, 0.22), new=TRUE) + PlotEquiMap2(rescale(imp4[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=t(sig4[,EU] < 0.05), cex.lab=1.5) + + # Title impact maps: + title2.xpos <- 0.42 + title2.width <- 0.35 + par(fig=c(title2.xpos, title2.xpos + title2.width, 0.935, 0.940), new=TRUE) + mtext(paste0(regime1.name, ": Impact on ", var.name.full[var.num]), font=2, cex=4) + par(fig=c(title2.xpos, title2.xpos + title2.width, 0.700, 0.705), new=TRUE) + mtext(paste0(regime2.name, ": Impact on ", var.name.full[var.num]), font=2, cex=4) + par(fig=c(title2.xpos, title2.xpos + title2.width, 0.465, 0.470), new=TRUE) + mtext(paste0(regime3.name, ": Impact on ", var.name.full[var.num]), font=2, cex=4) + par(fig=c(title2.xpos, title2.xpos + title2.width, 0.230, 0.235), new=TRUE) + mtext(paste0(regime4.name, ": Impact on ", var.name.full[var.num]), font=2, cex=4) + } + + # Frequency plots: + barplot.xpos <- 0.75 + barplot.width <- 0.25 + + par(fig=c(barplot.xpos, barplot.xpos + barplot.width, 0.745, 0.915), new=TRUE) + if(fields.name == rean.name) barplot.freq(100*fre1.NA, year.start, year.end, cex.y=2, cex.x=2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0)) + if(fields.name == forecast.name) barplot.freq.sim2(100*fre1.NA, 100*freMax1.NA, 100*freMin1.NA, 100*freObs1, year.start, year.end, cex.y=2, cex.x=2, freq.min=-2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0), col.line="gray20", cex.r=3, cex.mean=2, cex.obs=2) + + par(fig=c(barplot.xpos, barplot.xpos + barplot.width,0.510,0.680), new=TRUE) + if(fields.name == rean.name) barplot.freq(100*fre2.NA, year.start, year.end, cex.y=2, cex.x=2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0)) + if(fields.name == forecast.name) barplot.freq.sim2(100*fre2.NA, 100*freMax2.NA, 100*freMin2.NA, 100*freObs2, year.start, year.end, cex.y=2, cex.x=2, freq.min=-2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0), col.line="gray20", cex.r=3, cex.mean=2, cex.obs=2) + + par(fig=c(barplot.xpos, barplot.xpos + barplot.width,0.275,0.445), new=TRUE) + if(fields.name == rean.name) barplot.freq(100*fre3.NA, year.start, year.end, cex.y=2, cex.x=2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0)) + if(fields.name == forecast.name) barplot.freq.sim2(100*fre3.NA, 100*freMax3.NA, 100*freMin3.NA, 100*freObs3, year.start, year.end, cex.y=2, cex.x=2, freq.min=-2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0), col.line="gray20", cex.r=3, cex.mean=2, cex.obs=2) + + par(fig=c(barplot.xpos, barplot.xpos + barplot.width,0.040,0.210), new=TRUE) + if(fields.name == rean.name) barplot.freq(100*fre4.NA, year.start, year.end, cex.y=2, cex.x=2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0)) + if(fields.name == forecast.name) barplot.freq.sim2(100*fre4.NA, 100*freMax4.NA, 100*freMin4.NA, 100*freObs4, year.start, year.end, cex.y=2, cex.x=2, freq.min=-2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0), col.line="gray20", cex.r=3, cex.mean=2, cex.obs=2) + + # Title Frequency maps: + title3.xpos <- 0.75 + title3.width <-0.25 + par(fig=c(title3.xpos, title3.xpos + title3.width, 0.935, 0.940), new=TRUE) + mtext(paste0(regime1.name, " Frequency"), font=2, cex=4) # paste0 doesn't work inside an expression! + par(fig=c(title3.xpos, title3.xpos + title3.width, 0.700, 0.705), new=TRUE) + mtext(paste0(regime2.name, " Frequency"), font=2, cex=4) + par(fig=c(title3.xpos, title3.xpos + title3.width, 0.465, 0.470), new=TRUE) + mtext(paste0(regime3.name, " Frequency"), font=2, cex=4) + par(fig=c(title3.xpos, title3.xpos + title3.width, 0.230, 0.235), new=TRUE) + mtext(paste0(regime4.name, " Frequency"), font=2, cex=4) + + # % on Frequency plots: + symbol.xpos <- 0.735 + symbol.width <- 0.01 + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.82, 0.83), new=TRUE) + mtext("%", cex=3.3) + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.59, 0.60), new=TRUE) + mtext("%", cex=3.3) + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.35, 0.36), new=TRUE) + mtext("%", cex=3.3) + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.12, 0.13), new=TRUE) + mtext("%", cex=3.3) + + # mean frequency on Frequency plots: + if(mean.freq == TRUE){ + symbol.xpos <- 0.9 + symbol.width <- 0.1 + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.908, 0.918), new=TRUE) + mtext(bquote(bar(nu) == .(paste0(round(mean(100*fre1.NA),1),"%"))),cex=4.5) + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.673, 0.683), new=TRUE) + mtext(bquote(bar(nu) == .(paste0(round(mean(100*fre2.NA),1),"%"))),cex=4.5) + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.44, 0.45), new=TRUE) + mtext(bquote(bar(nu) == .(paste0(round(mean(100*fre3.NA),1),"%"))),cex=4.5) + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.203, 0.213), new=TRUE) + mtext(bquote(bar(nu) == .(paste0(round(mean(100*fre4.NA),1),"%"))),cex=4.5) + } + + # add corr between obs.time series and ensemble mean time series on Frequency plots: + if(fields.name == forecast.name){ + symbol.xpos <- 0.76 + symbol.width <- 0.1 + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.908, 0.918), new=TRUE) + sp.cor1 <- round(cor(fre1.NA,freObs1, use="complete.obs"),2) + mtext(paste0("r= ",sp.cor1), cex=4.5) + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.673, 0.683), new=TRUE) + sp.cor2 <- round(cor(fre2.NA,freObs2, use="complete.obs"),2) + mtext(paste0("r= ",sp.cor2), cex=4.5) + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.44, 0.45), new=TRUE) + sp.cor3 <- round(cor(fre3.NA,freObs3, use="complete.obs"),2) + mtext(paste0("r= ",sp.cor3), cex=4.5) + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.203, 0.213), new=TRUE) + sp.cor4 <- round(cor(fre4.NA,freObs4, use="complete.obs"),2) + mtext(paste0("r= ",sp.cor4), cex=4.5) + } + + # Legend 2: + if(fields.name == rean.name){ + legend2.xpos <- 0.48 + legend2.width <- 0.25 + legend2.cex <- 1.8 + my.subset2 <- match(values.to.plot2, my.brks.var) + par(fig=c(legend2.xpos, legend2.xpos + legend2.width, 0.719 + 0.001, 0.744), new=TRUE) + ColorBar3(my.brks.var, cols=my.cols.var, vert=FALSE, cex=legend2.cex, subset=my.subset2) + mtext(side=4,paste0(" ",var.unit[var.num]), cex=legend2.cex) + par(fig=c(legend2.xpos, legend2.xpos + legend2.width, 0.485, 0.508), new=TRUE) + ColorBar3(my.brks.var, cols=my.cols.var, vert=FALSE, cex=legend2.cex, subset=my.subset2) + mtext(side=4,paste0(" ",var.unit[var.num]), cex=legend2.cex) + par(fig=c(legend2.xpos, legend2.xpos + legend2.width, 0.250, 0.273), new=TRUE) + ColorBar3(my.brks.var, cols=my.cols.var, vert=FALSE, cex=legend2.cex, subset=my.subset2) + mtext(side=4,paste0(" ",var.unit[var.num]), cex=legend2.cex) + par(fig=c(legend2.xpos, legend2.xpos + legend2.width, 0.015, 0.038), new=TRUE) + ColorBar3(my.brks.var, cols=my.cols.var, vert=FALSE, cex=legend2.cex, subset=my.subset2) + mtext(side=4,paste0(" ",var.unit[var.num]), cex=legend2.cex) + } + + if(!as.pdf) dev.off() # for saving 4 png + + } # close if on: composition == 'simple' + + + # Visualize the composition with the regime anomalies for a selected forecasted month: + if(composition == "psl"){ + # Centroid maps: + n.map <- n.map + 1 # n.maps starts from 0 + map.width <- 0.115 + map.xpos <- 0.06 + map.width * (n.map - 1) + map.ypos <- 0.08 + map.height <- 0.19 + map.ysep <- 0.015 + + par(fig=c(map.xpos, map.xpos + map.width, map.ypos + 3*map.height + 3*map.ysep, map.ypos + 4*map.height + 3*map.ysep), new=TRUE) + PlotEquiMap2(rescale(map1,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map1), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, xlabel.dist=1.5, contours.lty="F1FF1F") + par(fig=c(map.xpos, map.xpos + map.width, map.ypos + 2*map.height + 2*map.ysep, map.ypos + 3*map.height + 2*map.ysep), new=TRUE) + PlotEquiMap2(rescale(map2,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map2), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F") + par(fig=c(map.xpos, map.xpos + map.width, map.ypos + map.height + map.ysep, map.ypos + 2*map.height + map.ysep), new=TRUE) + PlotEquiMap2(rescale(map3,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map3), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F") + par(fig=c(map.xpos, map.xpos + map.width, map.ypos, map.ypos + map.height), new=TRUE) + PlotEquiMap2(rescale(map4,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map4), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F") + + # Sheet subtitles: + par(fig=c(map.xpos, map.xpos + map.width, 0.86, 0.90), new=TRUE) + if(n.map < 8) { + mtext(paste0(my.month.short[p], " --> ", my.month.short[forecast.month]), cex=5.5, font=2) + } else{ + mtext(paste0(rean.name," ", my.month.short[forecast.month]), cex=5.5, font=2) + } + + } # close if on composition == 'psl' + + # Visualize the composition if the impact maps for a selected forecasted month: + if(composition == "impact"){ + # Centroid maps: + n.map <- n.map + 1 # n.maps starts from 0 + map.width <- 0.115 + map.xpos <- 0.06 + map.width * (n.map - 1) + map.ypos <- 0.08 + map.height <- 0.19 + map.ysep <- 0.015 + + par(fig=c(map.xpos, map.xpos + map.width, map.ypos + 3*map.height + 3*map.ysep, map.ypos + 4*map.height + 3*map.ysep), new=TRUE) + PlotEquiMap2(rescale(imp1[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=t(sig1[,EU] < 0.05), cex.lab=1.5) + + par(fig=c(map.xpos, map.xpos + map.width, map.ypos + 2*map.height + 2*map.ysep, map.ypos + 3*map.height + 2*map.ysep), new=TRUE) + PlotEquiMap2(rescale(imp2[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=t(sig2[,EU] < 0.05), cex.lab=1.5) + + par(fig=c(map.xpos, map.xpos + map.width, map.ypos + map.height + map.ysep, map.ypos + 2*map.height + map.ysep), new=TRUE) + PlotEquiMap2(rescale(imp3[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=t(sig3[,EU] < 0.05), cex.lab=1.5) + + par(fig=c(map.xpos, map.xpos + map.width, map.ypos, map.ypos + map.height), new=TRUE) + PlotEquiMap2(rescale(imp4[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=t(sig4[,EU] < 0.05), cex.lab=1.5) + + + # Sheet subtitles: + par(fig=c(map.xpos, map.xpos + map.width, 0.86, 0.90), new=TRUE) + if(n.map < 8) { + mtext(paste0(my.month.short[p], " --> ", my.month.short[forecast.month]), cex=5.5, font=2) + } else{ + mtext(paste0(rean.name," ", my.month.short[forecast.month]), cex=5.5, font=2) + } + + } # close if on composition == 'impact' + + # Visualize the composition if the impact maps for a selected forecasted month: + if(composition == "single.impact"){ + png(filename=paste0(rean.dir,"/",fields.name,"_",my.period[p],"_",var.name[var.num],"_impact_NAO+.png"),width=3000,height=3700) + PlotEquiMap2(rescale(imp1[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=t(sig1[,EU] < 0.05), cex.lab=1.5) + dev.off() + + png(filename=paste0(rean.dir,"/",fields.name,"_",my.period[p],"_",var.name[var.num],"_impact_NAO-.png"),width=3000,height=3700) + PlotEquiMap2(rescale(imp2[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=t(sig2[,EU] < 0.05), cex.lab=1.5) + dev.off() + + png(filename=paste0(rean.dir,"/",fields.name,"_",my.period[p],"_",var.name[var.num],"_impact_Blocking.png"),width=3000,height=3700) + PlotEquiMap2(rescale(imp3[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=t(sig3[,EU] < 0.05), cex.lab=1.5) + dev.off() + + png(filename=paste0(rean.dir,"/",fields.name,"_",my.period[p],"_",var.name[var.num],"_impact_Atl_Ridge.png"),width=3000,height=3700) + PlotEquiMap2(rescale(imp4[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=t(sig4[,EU] < 0.05), cex.lab=1.5) + dev.off() + } + + # Visualize the composition if the impact maps for a selected forecasted month: + if(composition == "single.psl"){ + png(filename=paste0(rean.dir,"/",fields.name,"_",my.period[p],"_",var.name[var.num],"_pattern_cluster1.png"),width=2000,height=1400, res=300) + PlotEquiMap2(rescale(map1,my.brks[1],tail(my.brks,1)), lon, lat,filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1, contours=t(map1), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=0.5, xlabel.dist=0, contours.lty="F1FF1F", toptitle=paste0(my.period[p]," WithinSS=", round(withinss1/1000000,2))) + dev.off() + + png(filename=paste0(rean.dir,"/",fields.name,"_",my.period[p],"_",var.name[var.num],"_pattern_cluster2.png"),width=2000,height=1400, res=300) + PlotEquiMap2(rescale(map2,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1, contours=t(map2), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=0.5, xlabel.dist=0, contours.lty="F1FF1F", toptitle=paste0(my.period[p]," WithinSS=", round(withinss2/1000000,2))) + dev.off() + + png(filename=paste0(rean.dir,"/",fields.name,"_",my.period[p],"_",var.name[var.num],"_pattern_cluster3.png"),width=2000,height=1400, res=300) + PlotEquiMap2(rescale(map3,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1, contours=t(map3), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=0.5, xlabel.dist=0, contours.lty="F1FF1F", toptitle=paste0(my.period[p]," WithinSS=", round(withinss3/1000000,2))) + dev.off() + + png(filename=paste0(rean.dir,"/",fields.name,"_",my.period[p],"_",var.name[var.num],"_pattern_cluster4.png"),width=2000,height=1400, res=300) + PlotEquiMap2(rescale(map4,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1, contours=t(map4), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=0.5, xlabel.dist=0, contours.lty="F1FF1F", toptitle=paste0(my.period[p]," WithinSS=", round(withinss4/1000000,2))) + dev.off() + + # Legend centroid maps: + #my.subset <- match(values.to.plot, my.brks) + #ColorBar3(my.brks, cols=my.cols, vert=FALSE, cex=legend1.cex, subset=my.subset) + #mtext(side=4," hPa", cex=legend1.cex) + + } + + # Visualize the composition with the interannual frequencies for a selected forecasted month: + if(composition == "fre"){ + # Centroid maps: + n.map <- n.map + 1 # n.maps starts from 0 + map.width <- 0.115 + map.xpos <- 0.06 + map.width * (n.map - 1) + map.ypos <- 0.08 + map.height <- 0.19 + map.ysep <- 0.015 + + par(fig=c(map.xpos, map.xpos + map.width, map.ypos + 3*map.height + 3*map.ysep, map.ypos + 4*map.height + 3*map.ysep), new=TRUE) + if(n.map < 8) barplot.freq.sim2(100*fre1.NA, 100*freMax1.NA, 100*freMin1.NA, 100*freObs1, year.start, year.end, cex.y=2, cex.x=2, freq.min=-2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0), col.line="gray20", cex.r=3, cex.mean=2, cex.obs=2) + if(n.map == 8) barplot.freq(100*fre1.NA, year.start, year.end, cex.y=2, cex.x=2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0)) + + par(fig=c(map.xpos, map.xpos + map.width, map.ypos + 2*map.height + 2*map.ysep, map.ypos + 3*map.height + 2*map.ysep), new=TRUE) + if(n.map < 8) if(fields.name == forecast.name) barplot.freq.sim2(100*fre2.NA, 100*freMax2.NA, 100*freMin2.NA, 100*freObs2, year.start, year.end, cex.y=2, cex.x=2, freq.min=-2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0), col.line="gray20", cex.r=3, cex.mean=2, cex.obs=2) + if(n.map == 8) barplot.freq(100*fre2.NA, year.start, year.end, cex.y=2, cex.x=2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0)) + + par(fig=c(map.xpos, map.xpos + map.width, map.ypos + map.height + map.ysep, map.ypos + 2*map.height + map.ysep), new=TRUE) + if(n.map < 8) if(fields.name == forecast.name) barplot.freq.sim2(100*fre3.NA, 100*freMax3.NA, 100*freMin3.NA, 100*freObs3, year.start, year.end, cex.y=2, cex.x=2, freq.min=-2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0), col.line="gray20", cex.r=3, cex.mean=2, cex.obs=2) + if(n.map == 8) barplot.freq(100*fre3.NA, year.start, year.end, cex.y=2, cex.x=2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0)) + + par(fig=c(map.xpos, map.xpos + map.width, map.ypos, map.ypos + map.height), new=TRUE) + if(n.map < 8) if(fields.name == forecast.name) barplot.freq.sim2(100*fre4.NA, 100*freMax4.NA, 100*freMin4.NA, 100*freObs4, year.start, year.end, cex.y=2, cex.x=2, freq.min=-2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0), col.line="gray20", cex.r=3, cex.mean=2, cex.obs=2) + if(n.map == 8) barplot.freq(100*fre4.NA, year.start, year.end, cex.y=2, cex.x=2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0)) + + # Sheet subtitles: + par(fig=c(map.xpos, map.xpos + map.width, 0.86, 0.90), new=TRUE) + if(n.map < 8) { + mtext(paste0(my.month.short[p], " --> ", my.month.short[forecast.month]), cex=5.5, font=2) + } else{ + mtext(paste0(rean.name," ", my.month.short[forecast.month]), cex=5.5, font=2) + } + + # % on Frequency plots: + par(fig=c(map.xpos - 0.006, map.xpos, map.ypos + 3.9*map.height + 3*map.ysep, map.ypos + 4*map.height + 3*map.ysep), new=TRUE) + mtext("%", cex=3.3) + par(fig=c(map.xpos - 0.006, map.xpos, map.ypos + 2.9*map.height + 2*map.ysep, map.ypos + 3*map.height + 2*map.ysep), new=TRUE) + mtext("%", cex=3.3) + par(fig=c(map.xpos - 0.006, map.xpos, map.ypos + 1.9*map.height + 1*map.ysep, map.ypos + 2*map.height + 1*map.ysep), new=TRUE) + mtext("%", cex=3.3) + par(fig=c(map.xpos - 0.006, map.xpos, map.ypos + 0.9*map.height + 0*map.ysep, map.ypos + 1*map.height + 0*map.ysep), new=TRUE) + mtext("%", cex=3.3) + + # mean frequency on Frequency plots: + par(fig=c(map.xpos + 0.02, map.xpos + 0.04, map.ypos + 3*map.height + 3*map.ysep, map.ypos + 3.1*map.height + 3*map.ysep), new=TRUE) + mtext(bquote(bar(nu) == .(paste0(round(mean(100*fre1.NA),1),"%"))),cex=3.5) + par(fig=c(map.xpos + 0.02, map.xpos + 0.04, map.ypos + 2*map.height + 2*map.ysep, map.ypos + 2.1*map.height + 2*map.ysep), new=TRUE) + mtext(bquote(bar(nu) == .(paste0(round(mean(100*fre2.NA),1),"%"))),cex=3.5) + par(fig=c(map.xpos + 0.02, map.xpos + 0.04, map.ypos + 1*map.height + 1*map.ysep, map.ypos + 1.1*map.height + 1*map.ysep), new=TRUE) + mtext(bquote(bar(nu) == .(paste0(round(mean(100*fre3.NA),1),"%"))),cex=3.5) + par(fig=c(map.xpos + 0.02, map.xpos + 0.04, map.ypos + 0*map.height + 0*map.ysep, map.ypos + 0.1*map.height + 0*map.ysep), new=TRUE) + mtext(bquote(bar(nu) == .(paste0(round(mean(100*fre4.NA),1),"%"))),cex=3.5) + + # add corr between obs.time series and ensemble mean time series on Frequency plots: + if(n.map < 8){ + par(fig=c(map.xpos + map.width - 0.04, map.xpos + map.width - 0.02, map.ypos + 3*map.height + 3*map.ysep, map.ypos + 3.1*map.height + 3*map.ysep), new=TRUE) + mtext(paste0("r= ",sp.cor1), cex=3.5) + par(fig=c(map.xpos + map.width - 0.04, map.xpos + map.width - 0.02, map.ypos + 2*map.height + 2*map.ysep, map.ypos + 2.1*map.height + 2*map.ysep), new=TRUE) + mtext(paste0("r= ",sp.cor2), cex=3.5) + par(fig=c(map.xpos + map.width - 0.04, map.xpos + map.width - 0.02, map.ypos + 1*map.height + 1*map.ysep, map.ypos + 1.1*map.height + 1*map.ysep), new=TRUE) + mtext(paste0("r= ",sp.cor3), cex=3.5) + par(fig=c(map.xpos + map.width - 0.04, map.xpos + map.width - 0.02, map.ypos + 0*map.height + 0*map.ysep, map.ypos + 0.1*map.height + 0*map.ysep), new=TRUE) + mtext(paste0("r= ",sp.cor4), cex=3.5) + } + } # close if on composition == 'fre' + + # save indicators for each startdate and forecast time: + # (map1, map2, map3, map4, fre1, fre2, etc. already refer to the regimes in the same order as listed in the 'orden' vector) + if(fields.name == forecast.name) { + if (composition != "impact") { + save(orden, map1, map2, map3, map4, fre1.NA, fre2.NA, fre3.NA, fre4.NA, freObs1, freObs2, freObs3, freObs4, sp.cor1, sp.cor2, sp.cor3, sp.cor4, persist1, persist2, persist3, persist4, persistObs1, persistObs2, persistObs3, persistObs4, spat.cor1, spat.cor2, spat.cor3, spat.cor4, sd.freq.sim1, sd.freq.sim2, sd.freq.sim3, sd.freq.sim4, sd.freq.obs1, sd.freq.obs2, sd.freq.obs3, sd.freq.obs4, transSim1, transSim2, transSim3, transSim4, transObs1, transObs2, transObs3, transObs4, file=paste0(work.dir,"/",fields.name,"_", my.period[p],"_leadmonth_",lead.month,"_","ClusterNames",".RData")) + } else { # also save impX objects + save(orden, map1, map2, map3, map4, fre1.NA, fre2.NA, fre3.NA, fre4.NA, freObs1, freObs2, freObs3, freObs4, sp.cor1, sp.cor2, sp.cor3, sp.cor4, persist1, persist2, persist3, persist4, persistObs1, persistObs2, persistObs3, persistObs4, spat.cor1, spat.cor2, spat.cor3, spat.cor4, sd.freq.sim1, sd.freq.sim2, sd.freq.sim3, sd.freq.sim4, sd.freq.obs1, sd.freq.obs2, sd.freq.obs3, sd.freq.obs4, transSim1, transSim2, transSim3, transSim4, transObs1, transObs2, transObs3, transObs4, imp1, imp2, imp3, imp4,cor.imp1,cor.imp2,cor.imp3,cor.imp4, file=paste0(work.dir,"/",fields.name,"_", my.period[p],"_leadmonth_",lead.month,"_","ClusterNames",".RData")) + } + } + + } # close 'p' on 'period' + + if(as.pdf) dev.off() # for saving a single pdf with all months/seasons + + } # close 'lead.month' on 'lead.months' + + if(composition == "psl" || composition == "fre"){ + # Regime's names: + title1.xpos <- 0.01 + title1.width <- 0.04 + par(fig=c(title1.xpos, title1.xpos + title1.width, 0.7905, 0.7955), new=TRUE) + mtext(paste0(regime1.name), font=2, cex=5) + par(fig=c(title1.xpos, title1.xpos + title1.width, 0.5855, 0.5905), new=TRUE) + mtext(paste0(regime2.name), font=2, cex=5) + par(fig=c(title1.xpos, title1.xpos + title1.width, 0.3805, 0.3955), new=TRUE) + mtext(paste0(regime3.name), font=2, cex=5) + par(fig=c(title1.xpos, title1.xpos + title1.width, 0.1755, 0.1805), new=TRUE) + mtext(paste0(regime4.name), font=2, cex=5) + + if(composition == "psl") { + # Color legend: + legend1.xpos <- 0.10 + legend1.width <- 0.80 + legend1.cex <- 4 + + par(fig=c(legend1.xpos, legend1.xpos + legend1.width, 0, 0.065), new=TRUE) # syntax fig=c(xmin, xmax, ymin, ymax) + ColorBar2(brks=my.brks, cols=my.cols, vert=FALSE, cex=legend1.cex, label.dist=2, my.ticks=-0.5 + 1:21, my.labels=my.brks) + par(fig=c(legend1.xpos + legend1.width - 0.02, legend1.xpos + legend1.width + 0.05, 0, 0.02), new=TRUE) # syntax fig=c(xmin, xmax, ymin, ymax) + mtext("hPa", cex=legend1.cex*1.3) + } + + dev.off() + } # close if on composition + + print("Finished!") +} # close if on composition != "summary" + + + +if(composition == "taylor"){ + library("plotrix") + + # choose a reference season from 13 (winter) to 16 (Autumn): + season.ref <- 13 + + # set the first WR classification: + rean.dir <- "/scratch/Earth/ncortesi/RESILIENCE/Regimes/41_ERA-Interim_monthly_1981-2015_LOESS_filter" + + # load data of the reference season of the first classification (this line should be modified to load the chosen season): + #load("/scratch/Earth/ncortesi/RESILIENCE/Regimes/18) as 12) but with no lat correction/ERA-Interim_Winter_psl.RData") # load pslwrXmean data + #load("/scratch/Earth/ncortesi/RESILIENCE/Regimes/18) as 12) but with no lat correction/ERA-Interim_Winter_ClusterNames.RData") # load clusterX.name.period + load("/scratch/Earth/ncortesi/RESILIENCE/Regimes/46_as_12_but_with_no_lat_correction_and_with_LOESS/ERA-Interim_Winter_psl.RData") # load pslwrXmean data + load("/scratch/Earth/ncortesi/RESILIENCE/Regimes/46_as_12_but_with_no_lat_correction_and_with_LOESS/ERA-Interim_Winter_ClusterNames.RData") # load clusterX.name.period + + if(var.num != var.num.orig) var.num <- var.num.orig # ripristinate original values of this script (necessary after loading regime data) + if(fields.name != fields.name.orig) fields.name <- fields.name.orig # ripristinate original values of this script + + cluster1.ref <- which(orden == cluster1.name) + cluster2.ref <- which(orden == cluster2.name) + cluster3.ref <- which(orden == cluster3.name) + cluster4.ref <- which(orden == cluster4.name) + + #cluster1.ref <- cluster1.name.period[13] + #cluster2.ref <- cluster2.name.period[13] + #cluster3.ref <- cluster3.name.period[13] + #cluster4.ref <- cluster4.name.period[13] + + cluster1.ref <- 3 # bypass the previous values because for dataset num.46 the blocking and atlantic regimes were saved swapped! + cluster2.ref <- 2 + cluster3.ref <- 1 + cluster4.ref <- 4 + + assign(paste0("map.ref",cluster1.ref), pslwr1mean) # NAO+ + assign(paste0("map.ref",cluster2.ref), pslwr2mean) # NAO- + assign(paste0("map.ref",cluster3.ref), pslwr3mean) # Blocking + assign(paste0("map.ref",cluster4.ref), pslwr4mean) # Atl.ridge + + # set a second WR classification (i.e: with running cluster) to contrast with the new one: + #rean2.dir <- "/scratch/Earth/ncortesi/RESILIENCE/Regimes/41_ERA-Interim_monthly_1981-2015_LOESS_filter" + rean2.dir <- "/scratch/Earth/ncortesi/RESILIENCE/Regimes/44_as_43_but_with_no_lat_correction" + + # load data of the reference season of the second classification (this line should be modified to load the chosen season): + load("/scratch/Earth/ncortesi/RESILIENCE/Regimes/46_as_12_but_with_no_lat_correction_and_with_LOESS/ERA-Interim_Winter_psl.RData") # load pslwrXmean data + load("/scratch/Earth/ncortesi/RESILIENCE/Regimes/46_as_12_but_with_no_lat_correction_and_with_LOESS/ERA-Interim_Winter_ClusterNames.RData") # load clusterX.name.period + + if(var.num != var.num.orig) var.num <- var.num.orig # ripristinate original values of this script (necessary after loading regime data) + if(fields.name != fields.name.orig) fields.name <- fields.name.orig # ripristinate original values of this script + + #cluster1.name.ref <- cluster1.name.period[season.ref] + #cluster2.name.ref <- cluster2.name.period[season.ref] + #cluster3.name.ref <- cluster3.name.period[season.ref] + #cluster4.name.ref <- cluster4.name.period[season.ref] + + cluster1.ref <- which(orden == cluster1.name) + cluster2.ref <- which(orden == cluster2.name) + cluster3.ref <- which(orden == cluster3.name) + cluster4.ref <- which(orden == cluster4.name) + + cluster1.ref <- 3 # bypass the previous values because for dataset num.46 the blocking and atlantic regimes were saved swapped! + cluster2.ref <- 2 + cluster3.ref <- 1 + cluster4.ref <- 4 + + assign(paste0("map.old.ref",cluster1.ref), pslwr1mean) # NAO+ + assign(paste0("map.old.ref",cluster2.ref), pslwr2mean) # NAO- + assign(paste0("map.old.ref",cluster3.ref), pslwr3mean) # Blocking + assign(paste0("map.old.ref",cluster4.ref), pslwr4mean) # Atl.ridge + + + # breaks and colors of the geopotential fields: + if(psl == "g500"){ + my.brks <- c(-300,seq(-200,200,10),300) # % Mean anomaly of a WR + my.brks2 <- c(-300,seq(-200,200,10),300) # % Mean anomaly of a WR for the contour lines + values.to.plot <- c(-200, -100, 0, 100, 200) # values we want to show in the labels + } + + if(psl == "psl"){ + my.brks <- c(seq(-19,-1,2),0,seq(1,19,2)) # % Mean anomaly of a WR + my.brks2 <- my.brks #c(seq(-20,20,2)) # % Mean anomaly of a WR for the contour lines + values.to.plot <- my.brks #c(-17,-15,-13,-11,-9,-7,-5,-3,-1,0,1,3,5,7,9,11,13,15,17) + } + + my.cols <- colorRampPalette(rev(brewer.pal(11,"RdBu")))(length(my.brks)-1) # blue--white--red colors + my.cols[floor(length(my.cols)/2)] <- my.cols[floor(length(my.cols)/2)+1] <- "white" + + # plot the composition with the regime anomalies of each monthly regimes: + png(filename=paste0(rean.dir,"/",fields.name,"_taylor_source",".png"),width=6000,height=2000) + + plot.new() + + # Sheet title: + par(fig=c(0, 1, 0.94, 0.98), new=TRUE) + mtext(paste0(fields.name, " observed monthly regime anomalies"), cex=5.5, font=2) + + n.map <- 0 + for(p in c(9:12,1:8)){ + p.orig <- p + + load(file=paste0(rean.dir,"/",fields.name,"_",my.period[p],"_","psl",".RData")) # load cluster names and summarized data + load(file=paste0(rean.dir,"/",fields.name,"_",my.period[p],"_","ClusterNames",".RData")) # load cluster names and summarized data + + if(var.num != var.num.orig) var.num <- var.num.orig # ripristinate original values of this script (necessary after loading regime data) + if(fields.name != fields.name.orig) fields.name <- fields.name.orig # ripristinate original values of this script + if(p != p.orig) p <- p.orig + + cluster1 <- which(orden == cluster1.name) + cluster2 <- which(orden == cluster2.name) + cluster3 <- which(orden == cluster3.name) + cluster4 <- which(orden == cluster4.name) + + assign(paste0("map",cluster1), pslwr1mean) # NAO+ + assign(paste0("map",cluster2), pslwr2mean) # NAO- + assign(paste0("map",cluster3), pslwr3mean) # Blocking + assign(paste0("map",cluster4), pslwr4mean) # Atl.ridge + + n.map <- n.map + 1 # n.maps starts from 0 + map.width <- 0.07 + map.xpos <- 0.06 + map.width * (n.map - 1) + map.ypos <- 0.08 + map.height <- 0.19 + map.ysep <- 0.015 + + par(fig=c(map.xpos, map.xpos + map.width, map.ypos + 3*map.height + 3*map.ysep, map.ypos + 4*map.height + 3*map.ysep), new=TRUE) + PlotEquiMap2(rescale(map1,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map1), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, xlabel.dist=1.5, contours.lty="F1FF1F") + par(fig=c(map.xpos, map.xpos + map.width, map.ypos + 2*map.height + 2*map.ysep, map.ypos + 3*map.height + 2*map.ysep), new=TRUE) + PlotEquiMap2(rescale(map2,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map2), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F") + par(fig=c(map.xpos, map.xpos + map.width, map.ypos + map.height + map.ysep, map.ypos + 2*map.height + map.ysep), new=TRUE) + PlotEquiMap2(rescale(map3,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map3), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F") + par(fig=c(map.xpos, map.xpos + map.width, map.ypos, map.ypos + map.height), new=TRUE) + PlotEquiMap2(rescale(map4,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map4), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F") + + # Sheet subtitles: + par(fig=c(map.xpos, map.xpos + map.width, 0.86, 0.90), new=TRUE) + mtext(my.month.short[p], cex=5.5, font=2) + + } + + # draw the last map to the right of the composition, that with the seasonal anomalies: + n.map <- n.map + 1 # n.maps starts from 0 + map.width <- 0.07 + map.xpos <- 0.06 + map.width * (n.map - 1) + map.ypos <- 0.08 + map.height <- 0.19 + map.ysep <- 0.015 + + par(fig=c(map.xpos, map.xpos + map.width, map.ypos + 3*map.height + 3*map.ysep, map.ypos + 4*map.height + 3*map.ysep), new=TRUE) + PlotEquiMap2(rescale(map.ref1,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map.ref1), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, xlabel.dist=1.5, contours.lty="F1FF1F") + par(fig=c(map.xpos, map.xpos + map.width, map.ypos + 2*map.height + 2*map.ysep, map.ypos + 3*map.height + 2*map.ysep), new=TRUE) + PlotEquiMap2(rescale(map.ref2,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map.ref2), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F") + par(fig=c(map.xpos, map.xpos + map.width, map.ypos + map.height + map.ysep, map.ypos + 2*map.height + map.ysep), new=TRUE) + PlotEquiMap2(rescale(map.ref3,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map.ref3), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F") + par(fig=c(map.xpos, map.xpos + map.width, map.ypos, map.ypos + map.height), new=TRUE) + PlotEquiMap2(rescale(map.ref4,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map.ref4), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F") + + # subtitle: + par(fig=c(map.xpos, map.xpos + map.width, 0.86, 0.90), new=TRUE) + mtext(my.period[season.ref], cex=5.5, font=2) + + dev.off() + + corr.first <- corr.second <- rmse.first <- rmse.second <- array(NA,c(4,12)) + + # Plot the Taylor diagrams: + for(regime in 1:4){ + + png(filename=paste0(rean.dir,"/",fields.name,"_taylor_",orden[regime],".png"), width=1200, height=800) + + for(p in 1:12){ + p.orig <- p + + load(file=paste0(rean.dir,"/",fields.name,"_",my.period[p],"_","psl",".RData")) # load cluster names and summarized data + load(file=paste0(rean.dir,"/",fields.name,"_",my.period[p],"_","ClusterNames",".RData")) # load cluster names and summarized data + + if(var.num != var.num.orig) var.num <- var.num.orig # ripristinate original values of this script (necessary after loading regime data) + if(fields.name != fields.name.orig) fields.name <- fields.name.orig # ripristinate original values of this script + if(p != p.orig) p <- p.orig + + cluster1 <- which(orden == cluster1.name) + cluster2 <- which(orden == cluster2.name) + cluster3 <- which(orden == cluster3.name) + cluster4 <- which(orden == cluster4.name) + + assign(paste0("map",cluster1), pslwr1mean) # NAO+ + assign(paste0("map",cluster2), pslwr2mean) # NAO- + assign(paste0("map",cluster3), pslwr3mean) # Blocking + assign(paste0("map",cluster4), pslwr4mean) # Atl.ridge + + # load data from the second classification: + load(file=paste0(rean2.dir,"/",fields.name,"_",my.period[p],"_","psl",".RData")) # load cluster names and summarized data + load(file=paste0(rean2.dir,"/",fields.name,"_",my.period[p],"_","ClusterNames",".RData")) # load cluster names and summarized data + + if(var.num != var.num.orig) var.num <- var.num.orig # ripristinate original values of this script (necessary after loading regime data) + if(fields.name != fields.name.orig) fields.name <- fields.name.orig # ripristinate original values of this script + if(p != p.orig) p <- p.orig + + cluster1.old <- which(orden == cluster1.name) + cluster2.old <- which(orden == cluster2.name) + cluster3.old <- which(orden == cluster3.name) + cluster4.old <- which(orden == cluster4.name) + + assign(paste0("map.old",cluster1.old), pslwr1mean) # NAO+ + assign(paste0("map.old",cluster2.old), pslwr2mean) # NAO- + assign(paste0("map.old",cluster3.old), pslwr3mean) # Blocking + assign(paste0("map.old",cluster4.old), pslwr4mean) # Atl.ridge + + add.mod <- ifelse(p == 1, FALSE, TRUE) + main.mod <- ifelse(p == 1, orden[regime],"") + + color.first <- "red" + color.second <- "blue" + sd.arcs.mod <- c(0,0.1,0.2,0.3,0.4,0.5,0.6,0.7,0.8,0.9,1,1.1,1.2,1.3) #c(0,0.5,1,1.5) # doesn't work! + if(regime == 1) my.taylor(as.vector(map.ref1),as.vector(map1), normalize=TRUE, add=add.mod, pos.cor=FALSE, sd.arcs=sd.arcs.mod, ref.sd=F, cex.axis=1.7, text.cex=1, my.text=my.month.short[p], col = color.first, main=main.mod) + if(regime == 2) my.taylor(as.vector(map.ref2),as.vector(map2), normalize=TRUE, add=add.mod, pos.cor=FALSE, sd.arcs=sd.arcs.mod, ref.sd=F, cex.axis=1.7, text.cex=1, my.text=my.month.short[p], col = color.first, main=main.mod) + if(regime == 3) my.taylor(as.vector(map.ref3),as.vector(map3), normalize=TRUE, add=add.mod, pos.cor=FALSE, sd.arcs=sd.arcs.mod, ref.sd=F, cex.axis=1.7, text.cex=1, my.text=my.month.short[p], col = color.first, main=main.mod) + if(regime == 4) my.taylor(as.vector(map.ref4),as.vector(map4), normalize=TRUE, add=add.mod, pos.cor=FALSE, sd.arcs=sd.arcs.mod, ref.sd=F, cex.axis=1.7, text.cex=1, my.text=my.month.short[p], col = color.first, main=main.mod) + + # add the points from the second classification: + add.mod=TRUE + if(regime == 1) my.taylor(as.vector(map.old.ref1),as.vector(map.old1), normalize=TRUE, add=add.mod, pos.cor=FALSE, sd.arcs=sd.arcs.mod, ref.sd=F, cex.axis=1.7, text.cex=1, my.text=my.month.short[p], col = color.second) + if(regime == 2) my.taylor(as.vector(map.old.ref2),as.vector(map.old2), normalize=TRUE, add=add.mod, pos.cor=FALSE, sd.arcs=sd.arcs.mod, ref.sd=F, cex.axis=1.7, text.cex=1, my.text=my.month.short[p], col = color.second) + if(regime == 3) my.taylor(as.vector(map.old.ref3),as.vector(map.old3), normalize=TRUE, add=add.mod, pos.cor=FALSE, sd.arcs=sd.arcs.mod, ref.sd=F, cex.axis=1.7, text.cex=1, my.text=my.month.short[p], col = color.second) + if(regime == 4) my.taylor(as.vector(map.old.ref4),as.vector(map.old4), normalize=TRUE, add=add.mod, pos.cor=FALSE, sd.arcs=sd.arcs.mod, ref.sd=F, cex.axis=1.7, text.cex=1, my.text=my.month.short[p], col = color.second) + + if(regime == 1) { corr.first[1,p] <- cor(as.vector(map.ref1),as.vector(map1)); rmse.first[1,p] <- RMSE(as.vector(map.ref1),as.vector(map1)) } + if(regime == 2) { corr.first[2,p] <- cor(as.vector(map.ref2),as.vector(map2)); rmse.first[2,p] <- RMSE(as.vector(map.ref2),as.vector(map2)) } + if(regime == 3) { corr.first[3,p] <- cor(as.vector(map.ref3),as.vector(map3)); rmse.first[3,p] <- RMSE(as.vector(map.ref3),as.vector(map3)) } + if(regime == 4) { corr.first[4,p] <- cor(as.vector(map.ref4),as.vector(map4)); rmse.first[4,p] <- RMSE(as.vector(map.ref4),as.vector(map4)) } + + if(regime == 1) { corr.second[1,p] <- cor(as.vector(map.old.ref1),as.vector(map.old1)); rmse.second[1,p] <- RMSE(as.vector(map.old.ref1),as.vector(map.old1)) } + if(regime == 2) { corr.second[2,p] <- cor(as.vector(map.old.ref2),as.vector(map.old2)); rmse.second[2,p] <- RMSE(as.vector(map.old.ref2),as.vector(map.old2)) } + if(regime == 3) { corr.second[3,p] <- cor(as.vector(map.old.ref3),as.vector(map.old3)); rmse.second[3,p] <- RMSE(as.vector(map.old.ref3),as.vector(map.old3)) } + if(regime == 4) { corr.second[4,p] <- cor(as.vector(map.old.ref4),as.vector(map.old4)); rmse.second[4,p] <- RMSE(as.vector(map.old.ref4),as.vector(map.old4)) } + + } # close for on 'p' + + dev.off() + + } # close for on 'regime' + + corr.first.mean <- apply(corr.first,1,mean) + corr.second.mean <- apply(corr.second,1,mean) + corr.diff <- corr.second.mean - corr.first.mean + + rmse.first.mean <- apply(rmse.first,1,mean) + rmse.second.mean <- apply(rmse.second,1,mean) + rmse.diff <- rmse.second.mean - rmse.first.mean + +} # close if on composition == "taylor" + + + + +# Summary graphs: +if(composition == "summary" && fields.name == forecast.name){ + # array storing correlations in the format: [startdate, leadmonth, regime] + array.diff.sd.freq <- array.sd.freq <- array.cor <- array.sp.cor <- array.freq <- array.diff.freq <- array.rpss <- array.pers <- array.diff.pers <- array(NA,c(12,7,4)) + array.spat.cor <- array.trans.sim1 <- array.trans.sim2 <- array.trans.sim3 <- array.trans.sim4 <- array(NA,c(12,7,4)) + array.trans.diff1 <- array.trans.diff2 <- array.trans.diff3 <- array.trans.diff4 <- array(NA,c(12,7,4)) + + for(p in 1:12){ + p.orig <- p + + for(l in 0:6){ + + load(file=paste0(work.dir,"/",fields.name,"_",my.period[p],"_leadmonth_",l,"_","ClusterNames",".RData")) # load cluster names and summarized data + + if(var.num != var.num.orig) var.num <- var.num.orig # ripristinate original values of this script (necessary after loading regime data) + if(fields.name != fields.name.orig) fields.name <- fields.name.orig # ripristinate original values of this script + if(p != p.orig) p <- p.orig + + array.spat.cor[p,1+l, 1] <- spat.cor1 # associates NAO+ to the first triangle (at left) + array.spat.cor[p,1+l, 3] <- spat.cor2 # associates NAO- to the third triangle (at right) + array.spat.cor[p,1+l, 4] <- spat.cor3 # associates Blocking to the fourth triangle (top one) + array.spat.cor[p,1+l, 2] <- spat.cor4 # associates Atl.Ridge to the second triangle (bottom one) + + array.cor[p,1+l, 1] <- sp.cor1 # associates NAO+ to the first triangle (at left) + array.cor[p,1+l, 3] <- sp.cor2 # associates NAO- to the third triangle (at right) + array.cor[p,1+l, 4] <- sp.cor3 # associates Blocking to the fourth triangle (top one) + array.cor[p,1+l, 2] <- sp.cor4 # associates Atl.Ridge to the second triangle (bottom one) + + array.freq[p,1+l, 1] <- 100*(mean(fre1.NA)) # NAO+ + array.freq[p,1+l, 3] <- 100*(mean(fre2.NA)) # NAO- + array.freq[p,1+l, 4] <- 100*(mean(fre3.NA)) # Blocking + array.freq[p,1+l, 2] <- 100*(mean(fre4.NA)) # Atl.Ridge + + array.diff.freq[p,1+l, 1] <- 100*(mean(fre1.NA) - mean(freObs1)) # NAO+ + array.diff.freq[p,1+l, 3] <- 100*(mean(fre2.NA) - mean(freObs2)) # NAO- + array.diff.freq[p,1+l, 4] <- 100*(mean(fre3.NA) - mean(freObs3)) # Blocking + array.diff.freq[p,1+l, 2] <- 100*(mean(fre4.NA) - mean(freObs4)) # Atl.Ridge + + array.pers[p,1+l, 1] <- persist1 # NAO+ + array.pers[p,1+l, 3] <- persist2 # NAO- + array.pers[p,1+l, 4] <- persist3 # Blocking + array.pers[p,1+l, 2] <- persist4 # Atl.Ridge + + array.diff.pers[p,1+l, 1] <- persist1 - persistObs1 # NAO+ + array.diff.pers[p,1+l, 3] <- persist2 - persistObs2 # NAO- + array.diff.pers[p,1+l, 4] <- persist3 - persistObs3 # Blocking + array.diff.pers[p,1+l, 2] <- persist4 - persistObs4 # Atl.Ridge + + array.sd.freq[p,1+l, 1] <- sd.freq.sim1 # NAO+ + array.sd.freq[p,1+l, 3] <- sd.freq.sim2 # NAO- + array.sd.freq[p,1+l, 4] <- sd.freq.sim3 # Blocking + array.sd.freq[p,1+l, 2] <- sd.freq.sim4 # Atl.Ridge + + array.diff.sd.freq[p,1+l, 1] <- sd.freq.sim1 / sd.freq.obs1 # NAO+ + array.diff.sd.freq[p,1+l, 3] <- sd.freq.sim2 / sd.freq.obs2 # NAO- + array.diff.sd.freq[p,1+l, 4] <- sd.freq.sim3 / sd.freq.obs3 # Blocking + array.diff.sd.freq[p,1+l, 2] <- sd.freq.sim4 / sd.freq.obs4 # Atl.Ridge + + array.imp[p,1+l, 1] <- cor.imp1 # NAO+ + array.imp[p,1+l, 3] <- cor.imp2 # NAO- + array.imp[p,1+l, 4] <- cor.imp3 # Blocking + array.imp[p,1+l, 2] <- cor.imp4 # Atl.Ridge + + array.trans.sim1[p,1+l, 1] <- NA # Transition from NAO+ to NAO+ + array.trans.sim1[p,1+l, 3] <- 100*transSim1[2] # Transition from NAO+ to NAO- + array.trans.sim1[p,1+l, 4] <- 100*transSim1[3] # Transition from NAO+ to blocking + array.trans.sim1[p,1+l, 2] <- 100*transSim1[4] # Transition from NAO+ to Atl.Ridge + + array.trans.sim2[p,1+l, 1] <- 100*transSim2[1] # Transition from NAO- to NAO+ + array.trans.sim2[p,1+l, 3] <- NA # Transition from NAO- to NAO- + array.trans.sim2[p,1+l, 4] <- 100*transSim2[3] # Transition from NAO- to blocking + array.trans.sim2[p,1+l, 2] <- 100*transSim2[4] # Transition from NAO- to Atl.Ridge + + array.trans.sim3[p,1+l, 1] <- 100*transSim3[1] # Transition from blocking to NAO+ + array.trans.sim3[p,1+l, 3] <- 100*transSim3[2] # Transition from blocking to NAO- + array.trans.sim3[p,1+l, 4] <- NA # Transition from blocking to blocking + array.trans.sim3[p,1+l, 2] <- 100*transSim3[4] # Transition from blocking to Atl.Ridge + + array.trans.sim4[p,1+l, 1] <- 100*transSim4[1] # Transition from Atl.ridge to NAO+ + array.trans.sim4[p,1+l, 3] <- 100*transSim4[2] # Transition from Atl.ridge to NAO- + array.trans.sim4[p,1+l, 4] <- 100*transSim4[3] # Transition from Atl.ridge to blocking + array.trans.sim4[p,1+l, 2] <- NA # Transition from Atl.ridge to Atl.Ridge + + array.trans.diff1[p,1+l, 1] <- NA # Transition from NAO+ to NAO+ + array.trans.diff1[p,1+l, 3] <- 100*(transSim1[2] - transObs1[2]) # Transition from NAO+ to NAO- + array.trans.diff1[p,1+l, 4] <- 100*(transSim1[3] - transObs1[3]) # Transition from NAO+ to blocking + array.trans.diff1[p,1+l, 2] <- 100*(transSim1[4] - transObs1[4]) # Transition from NAO+ to Atl.Ridge + + array.trans.diff2[p,1+l, 1] <- 100*(transSim2[1] - transObs2[1]) # Transition from NAO+ to NAO+ + array.trans.diff2[p,1+l, 3] <- NA + array.trans.diff2[p,1+l, 4] <- 100*(transSim2[3] - transObs2[3]) # Transition from NAO+ to blocking + array.trans.diff2[p,1+l, 2] <- 100*(transSim2[4] - transObs2[4]) # Transition from NAO+ to Atl.Ridge + + array.trans.diff3[p,1+l, 1] <- 100*(transSim3[1] - transObs3[1]) # Transition from NAO+ to NAO+ + array.trans.diff3[p,1+l, 3] <- 100*(transSim3[2] - transObs3[2]) # Transition from NAO+ to NAO- + array.trans.diff3[p,1+l, 4] <- NA + array.trans.diff3[p,1+l, 2] <- 100*(transSim3[4] - transObs3[4]) # Transition from NAO+ to Atl.Ridge + + array.trans.diff4[p,1+l, 1] <- 100*(transSim4[1] - transObs4[1]) # Transition from NAO+ to NAO+ + array.trans.diff4[p,1+l, 3] <- 100*(transSim4[2] - transObs4[2]) # Transition from NAO+ to NAO- + array.trans.diff4[p,1+l, 4] <- 100*(transSim4[3] - transObs4[3]) # Transition from NAO+ to blocking + array.trans.diff4[p,1+l, 2] <- NA + + #array.rpss[p,1+l, 1] <- # NAO+ + #array.rpss[p,1+l, 3] <- # NAO- + #array.rpss[p,1+l, 4] <- # Blocking + #array.rpss[p,1+l, 2] <- # Atl.Ridge + } + } + + # Spatial Corr summary: + png(filename=paste0(work.dir,"/",forecast.name,"_summary_spat_corr.png"), width=550, height=400) + + # create an array similar to array.cor but with colors instead of corr.values: + sp.corr.cols <- rev(c('#b2182b','#d6604d','#f4a582','#fddbc7','#d1e5f0','#92c5de','#4393c3','#2166ac')) + my.seq <- c(-1,0,0.4,0.5,0.6,0.7,0.8,0.9,1) + + array.sp.cor.colors <- array(sp.corr.cols[8],c(12,7,4)) + array.sp.cor.colors[array.spat.cor < my.seq[8]] <- sp.corr.cols[7] + array.sp.cor.colors[array.spat.cor < my.seq[7]] <- sp.corr.cols[6] + array.sp.cor.colors[array.spat.cor < my.seq[6]] <- sp.corr.cols[5] + array.sp.cor.colors[array.spat.cor < my.seq[5]] <- sp.corr.cols[4] + array.sp.cor.colors[array.spat.cor < my.seq[4]] <- sp.corr.cols[3] + array.sp.cor.colors[array.spat.cor < my.seq[3]] <- sp.corr.cols[2] + array.sp.cor.colors[array.spat.cor < my.seq[2]] <- sp.corr.cols[1] + + par(mar=c(5,4,4,4.5)) + plot(1,1, type="n", xaxt="n", yaxt="n", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + title("Spatial correlation between S4 and ERA-Interim regime anomalies", cex=1.5) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + + for(p in 1:12){ + for(l in 0:6){ + polygon(c(0.5+p-1, 0.5+p-1, 1+p-1), c(-0.5+l, 0.5+l, 0+l), col=array.sp.cor.colors[p,1+l,1]) # first triangle + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(-0.5+l, 0+l, -0.5+l), col=array.sp.cor.colors[p,1+l,2]) + polygon(c(1+p-1, 1.5+p-1, 1.5+p-1), c(l, 0.5+l, -0.5+l), col=array.sp.cor.colors[p,1+l,3]) + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(0.5+l, 0+l, 0.5+l), col=array.sp.cor.colors[p,1+l,4]) + } + } + + par(fig=c(0.875, 1, 0.14, 0.89), new=TRUE) + ColorBar2(brks = my.seq, cols = sp.corr.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + dev.off() + + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_spat_corr_4tables.png"), width=750, height=600) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext(text="Spatial correlation between S4 and ERA-Interim regime anomalies", cex=1.5) + + # NAO+ : + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.85, 0.98), new=TRUE) + mtext("NAO+", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.spat.cor.colors[p,1+l,1])}} + + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.42, 0.5), new=TRUE) + mtext("Blocking", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.spat.cor.colors[p,1+l,4])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.85, 0.98), new=TRUE) + mtext("NAO-", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.spat.cor.colors[p,1+l,3])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.42, 0.5), new=TRUE) + mtext("Atlantic Ridge", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.spat.cor.colors[p,1+l,2])}} + + par(fig=c(0.91, 1, 0.1, 0.9), new=TRUE) + ColorBar2(brks = my.seq, cols = sp.corr.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_spat_corr_1table.png"), width=800, height=300) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext(text="Spatial correlation between S4 and ERA-Interim regime anomalies", cex=1.2) + + par(mar=c(0,0,4,0), fig=c(0.07, 0.22, 0.8, 0.98), new=TRUE) + mtext("NAO+", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0, 0.22, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecast month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.spat.cor.colors[i2,1+6-j,1])}} + + par(mar=c(0,0,4,0), fig=c(0.22, 0.44, 0.8, 0.98), new=TRUE) + mtext("NAO-", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.22, 0.44, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecasted month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.spat.cor.colors[i2,1+6-j,3])}} + + par(mar=c(0,0,4,0), fig=c(0.44, 0.66, 0.8, 0.98), new=TRUE) + mtext("Blocking", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.44, 0.66, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecasted month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.spat.cor.colors[i2,1+6-j,4])}} + + par(mar=c(0,0,4,0), fig=c(0.68, 0.88, 0.8, 0.98), new=TRUE) + mtext("Atlantic Ridge", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.66, 0.88, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecasted month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.spat.cor.colors[i2,1+6-j,2])}} + + par(fig=c(0.91, 1, 0.1, 0.8), new=TRUE) + ColorBar2(brks = my.seq, cols = sp.corr.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + + + + + + + + # Corr summary: + png(filename=paste0(work.dir,"/",forecast.name,"_summary_corr.png"), width=550, height=400) + + # create an array similar to array.cor but with colors instead of corr.values: + corr.cols <- rev(c('#b2182b','#d6604d','#f4a582','#fddbc7','#d1e5f0','#92c5de','#4393c3','#2166ac')) + + array.cor.colors <- array(corr.cols[8],c(12,7,4)) + array.cor.colors[array.cor < 0.75] <- corr.cols[7] + array.cor.colors[array.cor < 0.5] <- corr.cols[6] + array.cor.colors[array.cor < 0.25] <- corr.cols[5] + array.cor.colors[array.cor < 0] <- corr.cols[4] + array.cor.colors[array.cor < -0.25] <- corr.cols[3] + array.cor.colors[array.cor < -0.5] <- corr.cols[2] + array.cor.colors[array.cor < -0.75] <- corr.cols[1] + + par(mar=c(5,4,4,4.5)) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Forecast time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + title("Correlation between S4 and ERA-Interim frequencies", cex=1.5) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + + for(p in 1:12){ + for(l in 0:6){ + polygon(c(0.5+p-1,0.5+p-1,1+p-1),c(-0.5+l,0.5+l,0+l), col=array.cor.colors[p,1+l,1]) # first triangle + polygon(c(0.5+p-1,1+p-1,1.5+p-1),c(-0.5+l,0+l,-0.5+l), col=array.cor.colors[p,1+l,2]) + polygon(c(1+p-1,1.5+p-1,1.5+p-1),c(l,0.5+l,-0.5+l), col=array.cor.colors[p,1+l,3]) + polygon(c(0.5+p-1,1+p-1,1.5+p-1),c(0.5+l,0+l,0.5+l), col=array.cor.colors[p,1+l,4]) + } + } + + par(fig=c(0.875, 1, 0.14, 0.89), new=TRUE) + ColorBar2(brks = seq(-1,1,0.25), cols = corr.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(seq(-1,1,0.25)), my.labels=seq(-1,1,0.25)) + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_corr_4tables.png"), width=750, height=600) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext(text="Correlation between S4 and ERA-Interim frequencies", cex=1.5) + + # NAO+ : + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.85, 0.98), new=TRUE) + mtext("NAO+", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.cor.colors[p,1+l,1])}} + + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.42, 0.5), new=TRUE) + mtext("Blocking", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.cor.colors[p,1+l,4])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.85, 0.98), new=TRUE) + mtext("NAO-", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.cor.colors[p,1+l,3])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.42, 0.5), new=TRUE) + mtext("Atlantic Ridge", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.cor.colors[p,1+l,2])}} + + par(fig=c(0.91, 1, 0.1, 0.9), new=TRUE) + ColorBar2(brks = my.seq, cols = corr.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_corr_1table.png"), width=800, height=300) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext(text="Correlation between S4 and ERA-Interim frequencies", cex=1.2) + + par(mar=c(0,0,4,0), fig=c(0.07, 0.22, 0.8, 0.98), new=TRUE) + mtext("NAO+", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0, 0.22, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecast month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.cor.colors[i2,1+6-j,1])}} + + par(mar=c(0,0,4,0), fig=c(0.22, 0.44, 0.8, 0.98), new=TRUE) + mtext("NAO-", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.22, 0.44, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecasted month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.cor.colors[i2,1+6-j,3])}} + + par(mar=c(0,0,4,0), fig=c(0.44, 0.66, 0.8, 0.98), new=TRUE) + mtext("Blocking", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.44, 0.66, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecasted month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.cor.colors[i2,1+6-j,4])}} + + par(mar=c(0,0,4,0), fig=c(0.68, 0.88, 0.8, 0.98), new=TRUE) + mtext("Atlantic Ridge", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.66, 0.88, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecasted month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.cor.colors[i2,1+6-j,2])}} + + par(fig=c(0.91, 1, 0.1, 0.8), new=TRUE) + ColorBar2(brks = my.seq, cols = corr.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + + + + + + # Freq. summary: + png(filename=paste0(work.dir,"/",forecast.name,"_summary_freq.png"), width=550, height=400) + + # create an array similar to array.diff.freq but with colors instead of frequencies: + freq.cols <- rev(c('#d73027','#f46d43','#fdae61','#fee090','#e0f3f8','#abd9e9','#74add1','#4575b4')) + my.seq <- c(NA,20,22,24,26,28,30,32,NA) + + array.freq.colors <- array(freq.cols[8],c(12,7,4)) + array.freq.colors[array.freq < my.seq[[8]]] <- freq.cols[7] + array.freq.colors[array.freq < my.seq[[7]]] <- freq.cols[6] + array.freq.colors[array.freq < my.seq[[6]]] <- freq.cols[5] + array.freq.colors[array.freq < my.seq[[5]]] <- freq.cols[4] + array.freq.colors[array.freq < my.seq[[4]]] <- freq.cols[3] + array.freq.colors[array.freq < my.seq[[3]]] <- freq.cols[2] + array.freq.colors[array.freq < my.seq[[2]]] <- freq.cols[1] + + par(mar=c(5,4,4,4.5)) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Forecast time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + title("Mean S4 frequency (%)", cex=1.5) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + + for(p in 1:12){ + for(l in 0:6){ + polygon(c(0.5+p-1, 0.5+p-1, 1+p-1), c(-0.5+l, 0.5+l, 0+l), col=array.freq.colors[p,1+l,1]) # first triangle + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(-0.5+l, 0+l, -0.5+l), col=array.freq.colors[p,1+l,2]) + polygon(c(1+p-1, 1.5+p-1, 1.5+p-1), c(l, 0.5+l, -0.5+l), col=array.freq.colors[p,1+l,3]) + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(0.5+l, 0+l, 0.5+l), col=array.freq.colors[p,1+l,4]) + } + } + + par(fig=c(0.875, 1, 0.14, 0.89), new=TRUE) + ColorBar2(brks = my.seq, cols = freq.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_freq_4tables.png"), width=750, height=600) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext(text="Mean S4 frequency (%)", cex=1.5) + + # NAO+ : + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.85, 0.98), new=TRUE) + mtext("NAO+", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.freq.colors[p,1+l,1])}} + + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.42, 0.5), new=TRUE) + mtext("Blocking", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.freq.colors[p,1+l,4])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.85, 0.98), new=TRUE) + mtext("NAO-", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.freq.colors[p,1+l,3])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.42, 0.5), new=TRUE) + mtext("Atlantic Ridge", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.freq.colors[p,1+l,2])}} + + par(fig=c(0.91, 1, 0.1, 0.9), new=TRUE) + ColorBar2(brks = my.seq, cols = freq.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_freq_1table.png"), width=800, height=300) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext(text="Mean S4 frequency (%)", cex=1.2) + + par(mar=c(0,0,4,0), fig=c(0.07, 0.22, 0.8, 0.98), new=TRUE) + mtext("NAO+", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0, 0.22, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecast month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.freq.colors[i2,1+6-j,1])}} + + par(mar=c(0,0,4,0), fig=c(0.22, 0.44, 0.8, 0.98), new=TRUE) + mtext("NAO-", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.22, 0.44, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecasted month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.freq.colors[i2,1+6-j,3])}} + + par(mar=c(0,0,4,0), fig=c(0.44, 0.66, 0.8, 0.98), new=TRUE) + mtext("Blocking", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.44, 0.66, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecasted month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.freq.colors[i2,1+6-j,4])}} + + par(mar=c(0,0,4,0), fig=c(0.68, 0.88, 0.8, 0.98), new=TRUE) + mtext("Atlantic Ridge", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.66, 0.88, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecasted month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.freq.colors[i2,1+6-j,2])}} + + par(fig=c(0.91, 1, 0.1, 0.8), new=TRUE) + ColorBar2(brks = my.seq, cols = freq.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + + + + + + + # Delta freq. summary: + png(filename=paste0(work.dir,"/",forecast.name,"_summary_diff_freq.png"), width=550, height=400) + + # create an array similar to array.diff.freq but with colors instead of frequencies: + diff.freq.cols <- rev(c('#d73027','#f46d43','#fdae61','#fee090','#e0f3f8','#abd9e9','#74add1','#4575b4')) + my.seq <- c(-20,-10,-5,-2,0,2,5,10,20) + + array.diff.freq.colors <- array(diff.freq.cols[8],c(12,7,4)) + array.diff.freq.colors[array.diff.freq < my.seq[[8]]] <- diff.freq.cols[7] + array.diff.freq.colors[array.diff.freq 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.diff.freq.colors[i2,1+6-j,1])}} + + par(mar=c(0,0,4,0), fig=c(0.22, 0.44, 0.8, 0.98), new=TRUE) + mtext("NAO-", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.22, 0.44, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecasted month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.diff.freq.colors[i2,1+6-j,3])}} + + par(mar=c(0,0,4,0), fig=c(0.44, 0.66, 0.8, 0.98), new=TRUE) + mtext("Blocking", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.44, 0.66, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecasted month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.diff.freq.colors[i2,1+6-j,4])}} + + par(mar=c(0,0,4,0), fig=c(0.68, 0.88, 0.8, 0.98), new=TRUE) + mtext("Atlantic Ridge", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.66, 0.88, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecasted month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.diff.freq.colors[i2,1+6-j,2])}} + + par(fig=c(0.91, 1, 0.1, 0.8), new=TRUE) + ColorBar2(brks = my.seq, cols = diff.freq.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + + + + + + + + # Persistence summary: + png(filename=paste0(work.dir,"/",forecast.name,"_summary_pers.png"), width=550, height=400) + + # create an array similar to array.pers but with colors instead of frequencies: + pers.cols <- rev(c('#b2182b','#d6604d','#f4a582','#fddbc7','#d1e5f0','#92c5de','#4393c3','#2166ac')) + my.seq <- c(NA, 3.25, 3.5, 3.75, 4, 4.25, 4.5, 4.75, NA) + + array.pers.colors <- array(pers.cols[8],c(12,7,4)) + array.pers.colors[array.pers < my.seq[8]] <- pers.cols[7] + array.pers.colors[array.pers < my.seq[7]] <- pers.cols[6] + array.pers.colors[array.pers < my.seq[6]] <- pers.cols[5] + array.pers.colors[array.pers < my.seq[5]] <- pers.cols[4] + array.pers.colors[array.pers < my.seq[4]] <- pers.cols[3] + array.pers.colors[array.pers < my.seq[3]] <- pers.cols[2] + array.pers.colors[array.pers < my.seq[2]] <- pers.cols[1] + + par(mar=c(5,4,4,4.5)) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Forecast time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + title("S4 Regime persistence (in days/month)", cex=1.5) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + + for(p in 1:12){ + for(l in 0:6){ + polygon(c(0.5+p-1,0.5+p-1,1+p-1),c(-0.5+l,0.5+l,0+l), col=array.pers.colors[p,1+l,1]) # first triangle + polygon(c(0.5+p-1,1+p-1,1.5+p-1),c(-0.5+l,0+l,-0.5+l), col=array.pers.colors[p,1+l,2]) + polygon(c(1+p-1,1.5+p-1,1.5+p-1),c(l,0.5+l,-0.5+l), col=array.pers.colors[p,1+l,3]) + polygon(c(0.5+p-1,1+p-1,1.5+p-1),c(0.5+l,0+l,0.5+l), col=array.pers.colors[p,1+l,4]) + } + } + + par(fig=c(0.875, 1, 0.14, 0.89), new=TRUE) + ColorBar2(brks = my.seq, cols = pers.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_pers_4tables.png"), width=750, height=600) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("S4 Regime persistence (in days/month)", cex=1.5) + + # NAO+ : + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.85, 0.98), new=TRUE) + mtext("NAO+", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.pers.colors[p,1+l,1])}} + + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.42, 0.5), new=TRUE) + mtext("Blocking", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.pers.colors[p,1+l,4])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.85, 0.98), new=TRUE) + mtext("NAO-", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.pers.colors[p,1+l,3])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.42, 0.5), new=TRUE) + mtext("Atlantic Ridge", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.pers.colors[p,1+l,2])}} + + par(fig=c(0.91, 1, 0.1, 0.9), new=TRUE) + ColorBar2(brks = my.seq, cols = pers.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_pers_1table.png"), width=800, height=300) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("S4 Regime persistence (in days/month)", cex=1.2) + + par(mar=c(0,0,4,0), fig=c(0.07, 0.22, 0.8, 0.98), new=TRUE) + mtext("NAO+", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0, 0.22, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecast month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.pers.colors[i2,1+6-j,1])}} + + par(mar=c(0,0,4,0), fig=c(0.22, 0.44, 0.8, 0.98), new=TRUE) + mtext("NAO-", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.22, 0.44, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecasted month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.pers.colors[i2,1+6-j,3])}} + + par(mar=c(0,0,4,0), fig=c(0.44, 0.66, 0.8, 0.98), new=TRUE) + mtext("Blocking", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.44, 0.66, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecasted month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.pers.colors[i2,1+6-j,4])}} + + par(mar=c(0,0,4,0), fig=c(0.68, 0.88, 0.8, 0.98), new=TRUE) + mtext("Atlantic Ridge", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.66, 0.88, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecasted month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.pers.colors[i2,1+6-j,2])}} + + par(fig=c(0.91, 1, 0.1, 0.8), new=TRUE) + ColorBar2(brks = my.seq, cols = pers.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + + + + + + + + # Delta persistence summary: + png(filename=paste0(work.dir,"/",forecast.name,"_summary_diff_pers.png"), width=550, height=400) + + # create an array similar to array.pers but with colors instead of frequencies: + diff.pers.cols <- rev(c('#b2182b','#d6604d','#f4a582','#fddbc7','#d1e5f0','#92c5de','#4393c3','#2166ac')) + my.seq <- c(NA, -1.5, -1, -0.5, 0, 0.5, 1, 1.5, NA) + + array.diff.pers.colors <- array(diff.pers.cols[8],c(12,7,4)) + array.diff.pers.colors[array.diff.pers < my.seq[8]] <- diff.pers.cols[7] + array.diff.pers.colors[array.diff.pers < my.seq[7]] <- diff.pers.cols[6] + array.diff.pers.colors[array.diff.pers < my.seq[6]] <- diff.pers.cols[5] + array.diff.pers.colors[array.diff.pers < my.seq[5]] <- diff.pers.cols[4] + array.diff.pers.colors[array.diff.pers < my.seq[4]] <- diff.pers.cols[3] + array.diff.pers.colors[array.diff.pers < my.seq[3]] <- diff.pers.cols[2] + array.diff.pers.colors[array.diff.pers < my.seq[2]] <- diff.pers.cols[1] + + par(mar=c(5,4,4,4.5)) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Forecast time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + title("Diff. between S4 and ERA-Interim regime persistence (in days/month)", cex=1.5) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + + for(p in 1:12){ + for(l in 0:6){ + polygon(c(0.5+p-1,0.5+p-1,1+p-1),c(-0.5+l,0.5+l,0+l), col=array.diff.pers.colors[p,1+l,1]) # first triangle + polygon(c(0.5+p-1,1+p-1,1.5+p-1),c(-0.5+l,0+l,-0.5+l), col=array.diff.pers.colors[p,1+l,2]) + polygon(c(1+p-1,1.5+p-1,1.5+p-1),c(l,0.5+l,-0.5+l), col=array.diff.pers.colors[p,1+l,3]) + polygon(c(0.5+p-1,1+p-1,1.5+p-1),c(0.5+l,0+l,0.5+l), col=array.diff.pers.colors[p,1+l,4]) + } + } + + par(fig=c(0.875, 1, 0.14, 0.89), new=TRUE) + ColorBar2(brks = my.seq, cols = diff.pers.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_diff_pers_4tables.png"), width=750, height=600) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("Diff. between S4 and ERA-Interim regime persistence (in days/month)", cex=1.5) + + # NAO+ : + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.85, 0.98), new=TRUE) + mtext("NAO+", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.diff.pers.colors[p,1+l,1])}} + + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.42, 0.5), new=TRUE) + mtext("Blocking", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.diff.pers.colors[p,1+l,4])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.85, 0.98), new=TRUE) + mtext("NAO-", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.diff.pers.colors[p,1+l,3])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.42, 0.5), new=TRUE) + mtext("Atlantic Ridge", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.diff.pers.colors[p,1+l,2])}} + + par(fig=c(0.91, 1, 0.1, 0.9), new=TRUE) + ColorBar2(brks = my.seq, cols = diff.pers.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_diff_pers_1table.png"), width=800, height=300) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("Diff. between S4 and ERA-Interim regime persistence (in days/month)", cex=1.2) + + par(mar=c(0,0,4,0), fig=c(0.07, 0.22, 0.8, 0.98), new=TRUE) + mtext("NAO+", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0, 0.22, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecast month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.diff.pers.colors[i2,1+6-j,1])}} + + par(mar=c(0,0,4,0), fig=c(0.22, 0.44, 0.8, 0.98), new=TRUE) + mtext("NAO-", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.22, 0.44, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecasted month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.diff.pers.colors[i2,1+6-j,3])}} + + par(mar=c(0,0,4,0), fig=c(0.44, 0.66, 0.8, 0.98), new=TRUE) + mtext("Blocking", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.44, 0.66, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecasted month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.diff.pers.colors[i2,1+6-j,4])}} + + par(mar=c(0,0,4,0), fig=c(0.68, 0.88, 0.8, 0.98), new=TRUE) + mtext("Atlantic Ridge", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.66, 0.88, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecasted month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.diff.pers.colors[i2,1+6-j,2])}} + + par(fig=c(0.91, 1, 0.1, 0.8), new=TRUE) + ColorBar2(brks = my.seq, cols = diff.pers.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + + + + + + + + + # St.Dev.freq ratio summary: + png(filename=paste0(work.dir,"/",forecast.name,"_summary_ratio_sd_freq.png"), width=550, height=400) + + # create an array similar to array.cor but with colors instead of corr.values: + diff.sd.freq.cols <- rev(c('#b2182b','#d6604d','#f4a582','#fddbc7','#d1e5f0','#92c5de','#4393c3','#2166ac')) + my.seq <- c(0,0.7,0.8,0.9,1.0,1.1,1.2,1.3,2) + + array.diff.sd.freq.colors <- array(diff.sd.freq.cols[8],c(12,7,4)) + array.diff.sd.freq.colors[array.diff.sd.freq < my.seq[8]] <- diff.sd.freq.cols[7] + array.diff.sd.freq.colors[array.diff.sd.freq < my.seq[7]] <- diff.sd.freq.cols[6] + array.diff.sd.freq.colors[array.diff.sd.freq < my.seq[6]] <- diff.sd.freq.cols[5] + array.diff.sd.freq.colors[array.diff.sd.freq < my.seq[5]] <- diff.sd.freq.cols[4] + array.diff.sd.freq.colors[array.diff.sd.freq < my.seq[4]] <- diff.sd.freq.cols[3] + array.diff.sd.freq.colors[array.diff.sd.freq < my.seq[3]] <- diff.sd.freq.cols[2] + array.diff.sd.freq.colors[array.diff.sd.freq < my.seq[2]] <- diff.sd.freq.cols[1] + + par(mar=c(5,4,4,4.5)) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Forecast time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + title("St.Dev. ratio between sim. and obs. average frequencies", cex=1.5) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + + for(p in 1:12){ + for(l in 0:6){ + polygon(c(0.5+p-1, 0.5+p-1, 1+p-1), c(-0.5+l, 0.5+l, 0+l), col=array.diff.sd.freq.colors[p,1+l,1]) # first triangle + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(-0.5+l, 0+l, -0.5+l), col=array.diff.sd.freq.colors[p,1+l,2]) + polygon(c(1+p-1, 1.5+p-1, 1.5+p-1), c(l, 0.5+l, -0.5+l), col=array.diff.sd.freq.colors[p,1+l,3]) + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(0.5+l, 0+l, 0.5+l), col=array.diff.sd.freq.colors[p,1+l,4]) + } + } + + par(fig=c(0.875, 1, 0.14, 0.89), new=TRUE) + ColorBar2(brks = my.seq, cols = diff.sd.freq.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + dev.off() + + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_ratio_sd_freq_4tables.png"), width=750, height=600) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("St.Dev. ratio between sim. and obs. average frequencies", cex=1.5) + + # NAO+ : + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.85, 0.98), new=TRUE) + mtext("NAO+", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.diff.sd.freq.colors[p,1+l,1])}} + + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.42, 0.5), new=TRUE) + mtext("Blocking", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.diff.sd.freq.colors[p,1+l,4])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.85, 0.98), new=TRUE) + mtext("NAO-", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.diff.sd.freq.colors[p,1+l,3])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.42, 0.5), new=TRUE) + mtext("Atlantic Ridge", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.diff.sd.freq.colors[p,1+l,2])}} + + par(fig=c(0.91, 1, 0.1, 0.9), new=TRUE) + ColorBar2(brks = my.seq, cols = diff.sd.freq.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_ratio_sd_freq_1table.png"), width=800, height=300) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("St.Dev. ratio between sim. and obs. average frequencies", cex=1.2) + + par(mar=c(0,0,4,0), fig=c(0.07, 0.22, 0.8, 0.98), new=TRUE) + mtext("NAO+", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0, 0.22, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecast month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.diff.sd.freq.colors[i2,1+6-j,1])}} + + par(mar=c(0,0,4,0), fig=c(0.22, 0.44, 0.8, 0.98), new=TRUE) + mtext("NAO-", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.22, 0.44, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecasted month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.diff.sd.freq.colors[i2,1+6-j,3])}} + + par(mar=c(0,0,4,0), fig=c(0.44, 0.66, 0.8, 0.98), new=TRUE) + mtext("Blocking", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.44, 0.66, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecasted month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.diff.sd.freq.colors[i2,1+6-j,4])}} + + par(mar=c(0,0,4,0), fig=c(0.68, 0.88, 0.8, 0.98), new=TRUE) + mtext("Atlantic Ridge", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.66, 0.88, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecasted month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.diff.sd.freq.colors[i2,1+6-j,2])}} + + par(fig=c(0.91, 1, 0.1, 0.8), new=TRUE) + ColorBar2(brks = my.seq, cols = diff.sd.freq.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + + + + + + + + + + + + + # Impact summary: + png(filename=paste0(work.dir,"/",forecast.name,"_summary_impact_",var.name[var.num],".png"), width=550, height=400) + + # create an array similar to array.pers but with colors instead of frequencies: + imp.cols <- rev(c('#b2182b','#d6604d','#f4a582','#fddbc7','#d1e5f0','#92c5de','#4393c3','#2166ac')) + my.seq <- c(NA, 3.25, 3.5, 3.75, 4, 4.25, 4.5, 4.75, NA) + + array.imp.colors <- array(imp.cols[8],c(12,7,4)) + array.imp.colors[array.imp < my.seq[8]] <- imp.cols[7] + array.imp.colors[array.imp < my.seq[7]] <- imp.cols[6] + array.imp.colors[array.imp < my.seq[6]] <- imp.cols[5] + array.imp.colors[array.imp < my.seq[5]] <- imp.cols[4] + array.imp.colors[array.imp < my.seq[4]] <- imp.cols[3] + array.imp.colors[array.imp < my.seq[3]] <- imp.cols[2] + array.imp.colors[array.imp < my.seq[2]] <- imp.cols[1] + + par(mar=c(5,4,4,4.5)) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Forecast time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + title("S4 spatial correlation of impact on ",var.name[var.num], cex=1.5) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + + for(p in 1:12){ + for(l in 0:6){ + polygon(c(0.5+p-1,0.5+p-1,1+p-1),c(-0.5+l,0.5+l,0+l), col=array.imp.colors[p,1+l,1]) # first triangle + polygon(c(0.5+p-1,1+p-1,1.5+p-1),c(-0.5+l,0+l,-0.5+l), col=array.imp.colors[p,1+l,2]) + polygon(c(1+p-1,1.5+p-1,1.5+p-1),c(l,0.5+l,-0.5+l), col=array.imp.colors[p,1+l,3]) + polygon(c(0.5+p-1,1+p-1,1.5+p-1),c(0.5+l,0+l,0.5+l), col=array.imp.colors[p,1+l,4]) + } + } + + par(fig=c(0.875, 1, 0.14, 0.89), new=TRUE) + ColorBar2(brks = my.seq, cols = imp.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_impact_",var.name[var.num],"_4tables.png"), width=750, height=600) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("S4 spatial correlation of impact on ",var.name[var.num], cex=1.5) + + # NAO+ : + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.85, 0.98), new=TRUE) + mtext("NAO+", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.imp.colors[p,1+l,1])}} + + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.42, 0.5), new=TRUE) + mtext("Blocking", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.imp.colors[p,1+l,4])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.85, 0.98), new=TRUE) + mtext("NAO-", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.imp.colors[p,1+l,3])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.42, 0.5), new=TRUE) + mtext("Atlantic Ridge", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.imp.colors[p,1+l,2])}} + + par(fig=c(0.91, 1, 0.1, 0.9), new=TRUE) + ColorBar2(brks = my.seq, cols = imp.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_impact_",var.name[var.num],"_1table.png"), width=800, height=300) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("S4 spatial correlation of impact on ",var.name[var.num], cex=1.2) + + par(mar=c(0,0,4,0), fig=c(0.07, 0.22, 0.8, 0.98), new=TRUE) + mtext("NAO+", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0, 0.22, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecast month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.imp.colors[i2,1+6-j,1])}} + + par(mar=c(0,0,4,0), fig=c(0.22, 0.44, 0.8, 0.98), new=TRUE) + mtext("NAO-", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.22, 0.44, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecasted month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.imp.colors[i2,1+6-j,3])}} + + par(mar=c(0,0,4,0), fig=c(0.44, 0.66, 0.8, 0.98), new=TRUE) + mtext("Blocking", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.44, 0.66, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecasted month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.imp.colors[i2,1+6-j,4])}} + + par(mar=c(0,0,4,0), fig=c(0.68, 0.88, 0.8, 0.98), new=TRUE) + mtext("Atlantic Ridge", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.66, 0.88, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecasted month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.imp.colors[i2,1+6-j,2])}} + + par(fig=c(0.91, 1, 0.1, 0.8), new=TRUE) + ColorBar2(brks = my.seq, cols = pers.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + + + + + + + + + + + # Transition probability from NAO+ to X summary: + png(filename=paste0(work.dir,"/",forecast.name,"_summary_trans_NAO+.png"), width=550, height=400) + + # create an array similar to array.cor but with colors instead of corr.values: + trans.cols <- rev(c('#b2182b','#d6604d','#f4a582','#fddbc7','#d1e5f0','#92c5de','#4393c3','#2166ac')) + my.seq <- c(0,10,20,30,40,50,60,70,100) + + array.trans.sim1.colors <- array(trans.cols[8],c(12,7,4)) + array.trans.sim1.colors[array.trans.sim1 < my.seq[8]] <- trans.cols[7] + array.trans.sim1.colors[array.trans.sim1 < my.seq[7]] <- trans.cols[6] + array.trans.sim1.colors[array.trans.sim1 < my.seq[6]] <- trans.cols[5] + array.trans.sim1.colors[array.trans.sim1 < my.seq[5]] <- trans.cols[4] + array.trans.sim1.colors[array.trans.sim1 < my.seq[4]] <- trans.cols[3] + array.trans.sim1.colors[array.trans.sim1 < my.seq[3]] <- trans.cols[2] + array.trans.sim1.colors[array.trans.sim1 < my.seq[2]] <- trans.cols[1] + array.trans.sim1.colors[is.na(array.trans.sim1)] <- "white" + + par(mar=c(5,4,4,4.5)) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Forecast time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + title("% Transition from NAO+ regime", cex=1.5) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + + for(p in 1:12){ + for(l in 0:6){ + polygon(c(0.5+p-1, 0.5+p-1, 1+p-1), c(-0.5+l, 0.5+l, 0+l), col=array.trans.sim1.colors[p,1+l,1]) # first triangle + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(-0.5+l, 0+l, -0.5+l), col=array.trans.sim1.colors[p,1+l,2]) + polygon(c(1+p-1, 1.5+p-1, 1.5+p-1), c(l, 0.5+l, -0.5+l), col=array.trans.sim1.colors[p,1+l,3]) + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(0.5+l, 0+l, 0.5+l), col=array.trans.sim1.colors[p,1+l,4]) + } + } + + par(fig=c(0.875, 1, 0.14, 0.89), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_trans_NAO+_4tables.png"), width=750, height=600) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("% Transition from NAO+ regime", cex=1.5) + + # NAO+ : + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.85, 0.98), new=TRUE) + mtext("NAO+", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.sim1.colors[p,1+l,1])}} + + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.42, 0.5), new=TRUE) + mtext("Blocking", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.sim1.colors[p,1+l,4])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.85, 0.98), new=TRUE) + mtext("NAO-", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.sim1.colors[p,1+l,3])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.42, 0.5), new=TRUE) + mtext("Atlantic Ridge", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.sim1.colors[p,1+l,2])}} + + par(fig=c(0.91, 1, 0.1, 0.9), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_trans_NAO+_1table.png"), width=800, height=300) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("% Transition from NAO+ regime", cex=1.2) + + par(mar=c(0,0,4,0), fig=c(0.07, 0.22, 0.8, 0.98), new=TRUE) + mtext("NAO+", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0, 0.22, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecast month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.sim1.colors[i2,1+6-j,1])}} + + par(mar=c(0,0,4,0), fig=c(0.22, 0.44, 0.8, 0.98), new=TRUE) + mtext("NAO-", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.22, 0.44, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecasted month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.sim1.colors[i2,1+6-j,3])}} + + par(mar=c(0,0,4,0), fig=c(0.44, 0.66, 0.8, 0.98), new=TRUE) + mtext("Blocking", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.44, 0.66, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecasted month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.sim1.colors[i2,1+6-j,4])}} + + par(mar=c(0,0,4,0), fig=c(0.68, 0.88, 0.8, 0.98), new=TRUE) + mtext("Atlantic Ridge", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.66, 0.88, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecasted month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.sim1.colors[i2,1+6-j,2])}} + + par(fig=c(0.91, 1, 0.1, 0.8), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_trans_NAO+_1table.png"), width=600, height=300) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("% Transition from NAO+ regime", cex=1.2) + + par(mar=c(0,0,4,0), fig=c(0.10, 0.28, 0.8, 0.98), new=TRUE) + mtext("NAO-", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0, 0.28, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecasted month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.sim1.colors[i2,1+6-j,3])}} + + par(mar=c(0,0,4,0), fig=c(0.28, 0.56, 0.8, 0.98), new=TRUE) + mtext("Blocking", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.28, 0.56, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecasted month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.sim1.colors[i2,1+6-j,4])}} + + par(mar=c(0,0,4,0), fig=c(0.56, 0.84, 0.8, 0.98), new=TRUE) + mtext("Atlantic Ridge", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.56, 0.84, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecasted month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.sim1.colors[i2,1+6-j,2])}} + + par(fig=c(0.88, 1, 0.1, 0.8), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + + + + + + + + # Transition probability from NAO- to X summary: + png(filename=paste0(work.dir,"/",forecast.name,"_summary_trans_NAO-.png"), width=550, height=400) + + # create an array similar to array.cor but with colors instead of corr.values: + trans.cols <- rev(c('#b2182b','#d6604d','#f4a582','#fddbc7','#d1e5f0','#92c5de','#4393c3','#2166ac')) + my.seq <- c(0,10,20,30,40,50,60,70,100) + + array.trans.sim2.colors <- array(trans.cols[8],c(12,7,4)) + array.trans.sim2.colors[array.trans.sim2 < my.seq[8]] <- trans.cols[7] + array.trans.sim2.colors[array.trans.sim2 < my.seq[7]] <- trans.cols[6] + array.trans.sim2.colors[array.trans.sim2 < my.seq[6]] <- trans.cols[5] + array.trans.sim2.colors[array.trans.sim2 < my.seq[5]] <- trans.cols[4] + array.trans.sim2.colors[array.trans.sim2 < my.seq[4]] <- trans.cols[3] + array.trans.sim2.colors[array.trans.sim2 < my.seq[3]] <- trans.cols[2] + array.trans.sim2.colors[array.trans.sim2 < my.seq[2]] <- trans.cols[1] + array.trans.sim2.colors[is.na(array.trans.sim2)] <- "white" + + par(mar=c(5,4,4,4.5)) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Forecast time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + title("% Transition from NAO- regime ", cex=1.5) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + + for(p in 1:12){ + for(l in 0:6){ + polygon(c(0.5+p-1, 0.5+p-1, 1+p-1), c(-0.5+l, 0.5+l, 0+l), col=array.trans.sim2.colors[p,1+l,1]) # first triangle + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(-0.5+l, 0+l, -0.5+l), col=array.trans.sim2.colors[p,1+l,2]) + polygon(c(1+p-1, 1.5+p-1, 1.5+p-1), c(l, 0.5+l, -0.5+l), col=array.trans.sim2.colors[p,1+l,3]) + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(0.5+l, 0+l, 0.5+l), col=array.trans.sim2.colors[p,1+l,4]) + } + } + + par(fig=c(0.875, 1, 0.14, 0.89), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_trans_NAO-_4tables.png"), width=750, height=600) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("% Transition from NAO- regime", cex=1.5) + + # NAO+ : + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.85, 0.98), new=TRUE) + mtext("NAO+", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.sim2.colors[p,1+l,1])}} + + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.42, 0.5), new=TRUE) + mtext("Blocking", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.sim2.colors[p,1+l,4])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.85, 0.98), new=TRUE) + mtext("NAO-", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.sim2.colors[p,1+l,3])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.42, 0.5), new=TRUE) + mtext("Atlantic Ridge", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.sim2.colors[p,1+l,2])}} + + par(fig=c(0.91, 1, 0.1, 0.9), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_trans_NAO-_1table.png"), width=600, height=300) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("% Transition from NAO- regime", cex=1.2) + + par(mar=c(0,0,4,0), fig=c(0.1, 0.28, 0.8, 0.98), new=TRUE) + mtext("NAO+", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0, 0.28, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecast month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.sim2.colors[i2,1+6-j,1])}} + + par(mar=c(0,0,4,0), fig=c(0.28, 0.56, 0.8, 0.98), new=TRUE) + mtext("Blocking", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.28, 0.56, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecasted month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.sim2.colors[i2,1+6-j,4])}} + + par(mar=c(0,0,4,0), fig=c(0.56, 0.84, 0.8, 0.98), new=TRUE) + mtext("Atlantic Ridge", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.56, 0.84, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecasted month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.sim2.colors[i2,1+6-j,2])}} + + par(fig=c(0.88, 1, 0.1, 0.8), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + + + + + + + + # Transition probability from blocking to X summary: + png(filename=paste0(work.dir,"/",forecast.name,"_summary_trans_blocking.png"), width=550, height=400) + + # create an array similar to array.cor but with colors instead of corr.values: + trans.cols <- rev(c('#b2182b','#d6604d','#f4a582','#fddbc7','#d1e5f0','#92c5de','#4393c3','#2166ac')) + my.seq <- c(0,10,20,30,40,50,60,70,100) + + array.trans.sim3.colors <- array(trans.cols[8],c(12,7,4)) + array.trans.sim3.colors[array.trans.sim3 < my.seq[8]] <- trans.cols[7] + array.trans.sim3.colors[array.trans.sim3 < my.seq[7]] <- trans.cols[6] + array.trans.sim3.colors[array.trans.sim3 < my.seq[6]] <- trans.cols[5] + array.trans.sim3.colors[array.trans.sim3 < my.seq[5]] <- trans.cols[4] + array.trans.sim3.colors[array.trans.sim3 < my.seq[4]] <- trans.cols[3] + array.trans.sim3.colors[array.trans.sim3 < my.seq[3]] <- trans.cols[2] + array.trans.sim3.colors[array.trans.sim3 < my.seq[2]] <- trans.cols[1] + array.trans.sim3.colors[is.na(array.trans.sim3)] <- "white" + + par(mar=c(5,4,4,4.5)) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Forecast time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + title("% Transition from blocking regime", cex=1.5) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + + for(p in 1:12){ + for(l in 0:6){ + polygon(c(0.5+p-1, 0.5+p-1, 1+p-1), c(-0.5+l, 0.5+l, 0+l), col=array.trans.sim3.colors[p,1+l,1]) # first triangle + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(-0.5+l, 0+l, -0.5+l), col=array.trans.sim3.colors[p,1+l,2]) + polygon(c(1+p-1, 1.5+p-1, 1.5+p-1), c(l, 0.5+l, -0.5+l), col=array.trans.sim3.colors[p,1+l,3]) + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(0.5+l, 0+l, 0.5+l), col=array.trans.sim3.colors[p,1+l,4]) + } + } + + par(fig=c(0.875, 1, 0.14, 0.89), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_trans_blocking_4tables.png"), width=750, height=600) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("% Transition from blocking regime", cex=1.5) + + # NAO+ : + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.85, 0.98), new=TRUE) + mtext("NAO+", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.sim3.colors[p,1+l,1])}} + + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.42, 0.5), new=TRUE) + mtext("Blocking", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.sim3.colors[p,1+l,4])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.85, 0.98), new=TRUE) + mtext("NAO-", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.sim3.colors[p,1+l,3])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.42, 0.5), new=TRUE) + mtext("Atlantic Ridge", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.sim3.colors[p,1+l,2])}} + + par(fig=c(0.91, 1, 0.1, 0.9), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_trans_blocking_1table.png"), width=600, height=300) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("% Transition from blocking regime", cex=1.2) + + par(mar=c(0,0,4,0), fig=c(0.10, 0.28, 0.8, 0.98), new=TRUE) + mtext("NAO+", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0, 0.28, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecast month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.sim3.colors[i2,1+6-j,1])}} + + par(mar=c(0,0,4,0), fig=c(0.28, 0.56, 0.8, 0.98), new=TRUE) + mtext("NAO-", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.28, 0.56, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecasted month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.sim3.colors[i2,1+6-j,3])}} + + par(mar=c(0,0,4,0), fig=c(0.56, 0.84, 0.8, 0.98), new=TRUE) + mtext("Atlantic Ridge", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.56, 0.84, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecasted month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.sim3.colors[i2,1+6-j,2])}} + + par(fig=c(0.88, 1, 0.1, 0.8), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + + + + + + + + + + + # Transition probability from Atlantic ridge to X summary: + png(filename=paste0(work.dir,"/",forecast.name,"_summary_trans_Atlantic_ridge.png"), width=550, height=400) + + # create an array similar to array.cor but with colors instead of corr.values: + trans.cols <- rev(c('#b2182b','#d6604d','#f4a582','#fddbc7','#d1e5f0','#92c5de','#4393c3','#2166ac')) + my.seq <- c(0,10,20,30,40,50,60,70,100) + + array.trans.sim4.colors <- array(trans.cols[8],c(12,7,4)) + array.trans.sim4.colors[array.trans.sim4 < my.seq[8]] <- trans.cols[7] + array.trans.sim4.colors[array.trans.sim4 < my.seq[7]] <- trans.cols[6] + array.trans.sim4.colors[array.trans.sim4 < my.seq[6]] <- trans.cols[5] + array.trans.sim4.colors[array.trans.sim4 < my.seq[5]] <- trans.cols[4] + array.trans.sim4.colors[array.trans.sim4 < my.seq[4]] <- trans.cols[3] + array.trans.sim4.colors[array.trans.sim4 < my.seq[3]] <- trans.cols[2] + array.trans.sim4.colors[array.trans.sim4 < my.seq[2]] <- trans.cols[1] + array.trans.sim4.colors[is.na(array.trans.sim4)] <- "white" + + par(mar=c(5,4,4,4.5)) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Forecast time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + title("% Transition from Atlantic ridge regime ", cex=1.5) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + + for(p in 1:12){ + for(l in 0:6){ + polygon(c(0.5+p-1, 0.5+p-1, 1+p-1), c(-0.5+l, 0.5+l, 0+l), col=array.trans.sim4.colors[p,1+l,1]) # first triangle + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(-0.5+l, 0+l, -0.5+l), col=array.trans.sim4.colors[p,1+l,2]) + polygon(c(1+p-1, 1.5+p-1, 1.5+p-1), c(l, 0.5+l, -0.5+l), col=array.trans.sim4.colors[p,1+l,3]) + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(0.5+l, 0+l, 0.5+l), col=array.trans.sim4.colors[p,1+l,4]) + } + } + + par(fig=c(0.875, 1, 0.14, 0.89), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_trans_Atlantic_ridge_4tables.png"), width=750, height=600) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("% Transition from Atlantic Ridge regime", cex=1.5) + + # NAO+ : + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.85, 0.98), new=TRUE) + mtext("NAO+", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.sim4.colors[p,1+l,1])}} + + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.42, 0.5), new=TRUE) + mtext("Blocking", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.sim4.colors[p,1+l,4])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.85, 0.98), new=TRUE) + mtext("NAO-", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.sim4.colors[p,1+l,3])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.42, 0.5), new=TRUE) + mtext("Atlantic Ridge", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.sim4.colors[p,1+l,2])}} + + par(fig=c(0.91, 1, 0.1, 0.9), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_trans_Atlantic_ridge_1table.png"), width=600, height=300) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("% Transition from Atlantic Ridge regime", cex=1.2) + + par(mar=c(0,0,4,0), fig=c(0.1, 0.28, 0.8, 0.98), new=TRUE) + mtext("NAO+", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0, 0.28, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecast month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.sim4.colors[i2,1+6-j,1])}} + + par(mar=c(0,0,4,0), fig=c(0.28, 0.56, 0.8, 0.98), new=TRUE) + mtext("NAO-", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.28, 0.56, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecasted month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.sim4.colors[i2,1+6-j,3])}} + + par(mar=c(0,0,4,0), fig=c(0.56, 0.84, 0.8, 0.98), new=TRUE) + mtext("Blocking", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.56, 0.84, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecasted month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.sim4.colors[i2,1+6-j,4])}} + + par(fig=c(0.88, 1, 0.1, 0.8), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + + + + + + + + # Bias of the Transition probability from NAO+ to X summary: + png(filename=paste0(work.dir,"/",forecast.name,"_summary_bias_trans_NAO+.png"), width=550, height=400) + + # create an array similar to array.cor but with colors instead of corr.values: + trans.cols <- rev(c('#b2182b','#d6604d','#f4a582','#fddbc7','#d1e5f0','#92c5de','#4393c3','#2166ac')) + my.seq <- c(-20,-10,-5,-2,0,2,5,10,20) + + array.trans.diff1.colors <- array(trans.cols[8],c(12,7,4)) + array.trans.diff1.colors[array.trans.diff1 < my.seq[8]] <- trans.cols[7] + array.trans.diff1.colors[array.trans.diff1 < my.seq[7]] <- trans.cols[6] + array.trans.diff1.colors[array.trans.diff1 < my.seq[6]] <- trans.cols[5] + array.trans.diff1.colors[array.trans.diff1 < my.seq[5]] <- trans.cols[4] + array.trans.diff1.colors[array.trans.diff1 < my.seq[4]] <- trans.cols[3] + array.trans.diff1.colors[array.trans.diff1 < my.seq[3]] <- trans.cols[2] + array.trans.diff1.colors[array.trans.diff1 < my.seq[2]] <- trans.cols[1] + array.trans.diff1.colors[is.na(array.trans.diff1)] <- "white" + + par(mar=c(5,4,4,4.5)) + plot(1,1, type="n", xaxt="n", yaxt="n", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + title("% Bias transition from NAO+ regime", cex=1.5) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + + for(p in 1:12){ + for(l in 0:6){ + polygon(c(0.5+p-1, 0.5+p-1, 1+p-1), c(-0.5+l, 0.5+l, 0+l), col=array.trans.diff1.colors[p,1+l,1]) # first triangle + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(-0.5+l, 0+l, -0.5+l), col=array.trans.diff1.colors[p,1+l,2]) + polygon(c(1+p-1, 1.5+p-1, 1.5+p-1), c(l, 0.5+l, -0.5+l), col=array.trans.diff1.colors[p,1+l,3]) + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(0.5+l, 0+l, 0.5+l), col=array.trans.diff1.colors[p,1+l,4]) + } + } + + par(fig=c(0.875, 1, 0.14, 0.89), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_bias_trans_NAO+_4tables.png"), width=750, height=600) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("% Bias transition from NAO+ regime", cex=1.5) + + # NAO+ : + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.85, 0.98), new=TRUE) + mtext("NAO+", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.diff1.colors[p,1+l,1])}} + + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.42, 0.5), new=TRUE) + mtext("Blocking", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.diff1.colors[p,1+l,4])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.85, 0.98), new=TRUE) + mtext("NAO-", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.diff1.colors[p,1+l,3])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.42, 0.5), new=TRUE) + mtext("Atlantic Ridge", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.diff1.colors[p,1+l,2])}} + + par(fig=c(0.91, 1, 0.1, 0.9), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_bias_trans_NAO+_1table.png"), width=800, height=300) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("% Bias transition from NAO+ regime", cex=1.2) + + par(mar=c(0,0,4,0), fig=c(0.07, 0.22, 0.8, 0.98), new=TRUE) + mtext("NAO+", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0, 0.22, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecast month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.diff1.colors[i2,1+6-j,1])}} + + par(mar=c(0,0,4,0), fig=c(0.22, 0.44, 0.8, 0.98), new=TRUE) + mtext("NAO-", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.22, 0.44, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecasted month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.diff1.colors[i2,1+6-j,3])}} + + par(mar=c(0,0,4,0), fig=c(0.44, 0.66, 0.8, 0.98), new=TRUE) + mtext("Blocking", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.44, 0.66, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecasted month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.diff1.colors[i2,1+6-j,4])}} + + par(mar=c(0,0,4,0), fig=c(0.68, 0.88, 0.8, 0.98), new=TRUE) + mtext("Atlantic Ridge", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.66, 0.88, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecasted month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.diff1.colors[i2,1+6-j,2])}} + + par(fig=c(0.91, 1, 0.1, 0.8), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + + + + + + + + + + + + + + + # Bias of the Transition probability from NAO- to X summary: + png(filename=paste0(work.dir,"/",forecast.name,"_summary_bias_trans_NAO-.png"), width=550, height=400) + + # create an array similar to array.cor but with colors instead of corr.values: + trans.cols <- rev(c('#b2182b','#d6604d','#f4a582','#fddbc7','#d1e5f0','#92c5de','#4393c3','#2166ac')) + my.seq <- c(-20,-10,-5,-2,0,2,5,10,20) + + array.trans.diff2.colors <- array(trans.cols[8],c(12,7,4)) + array.trans.diff2.colors[array.trans.diff2 < my.seq[8]] <- trans.cols[7] + array.trans.diff2.colors[array.trans.diff2 < my.seq[7]] <- trans.cols[6] + array.trans.diff2.colors[array.trans.diff2 < my.seq[6]] <- trans.cols[5] + array.trans.diff2.colors[array.trans.diff2 < my.seq[5]] <- trans.cols[4] + array.trans.diff2.colors[array.trans.diff2 < my.seq[4]] <- trans.cols[3] + array.trans.diff2.colors[array.trans.diff2 < my.seq[3]] <- trans.cols[2] + array.trans.diff2.colors[array.trans.diff2 < my.seq[2]] <- trans.cols[1] + array.trans.diff2.colors[is.na(array.trans.diff2)] <- "white" + + par(mar=c(5,4,4,4.5)) + plot(1,1, type="n", xaxt="n", yaxt="n", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + title("% Bias transition from NAO- regime", cex=1.5) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + + for(p in 1:12){ + for(l in 0:6){ + polygon(c(0.5+p-1, 0.5+p-1, 1+p-1), c(-0.5+l, 0.5+l, 0+l), col=array.trans.diff2.colors[p,1+l,1]) # first triangle + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(-0.5+l, 0+l, -0.5+l), col=array.trans.diff2.colors[p,1+l,2]) + polygon(c(1+p-1, 1.5+p-1, 1.5+p-1), c(l, 0.5+l, -0.5+l), col=array.trans.diff2.colors[p,1+l,3]) + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(0.5+l, 0+l, 0.5+l), col=array.trans.diff2.colors[p,1+l,4]) + } + } + + par(fig=c(0.875, 1, 0.14, 0.89), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_bias_trans_NAO-_4tables.png"), width=750, height=600) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("% Bias transition from NAO- regime", cex=1.5) + + # NAO+ : + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.85, 0.98), new=TRUE) + mtext("NAO+", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.diff2.colors[p,1+l,1])}} + + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.42, 0.5), new=TRUE) + mtext("Blocking", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.diff2.colors[p,1+l,4])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.85, 0.98), new=TRUE) + mtext("NAO-", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.diff2.colors[p,1+l,3])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.42, 0.5), new=TRUE) + mtext("Atlantic Ridge", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.diff2.colors[p,1+l,2])}} + + par(fig=c(0.91, 1, 0.1, 0.9), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_bias_trans_NAO-_1table.png"), width=800, height=300) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("% Bias transition from NAO- regime", cex=1.2) + + par(mar=c(0,0,4,0), fig=c(0.07, 0.22, 0.8, 0.98), new=TRUE) + mtext("NAO+", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0, 0.22, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecast month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.diff2.colors[i2,1+6-j,1])}} + + par(mar=c(0,0,4,0), fig=c(0.22, 0.44, 0.8, 0.98), new=TRUE) + mtext("NAO-", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.22, 0.44, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecasted month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.diff2.colors[i2,1+6-j,3])}} + + par(mar=c(0,0,4,0), fig=c(0.44, 0.66, 0.8, 0.98), new=TRUE) + mtext("Blocking", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.44, 0.66, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecasted month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.diff2.colors[i2,1+6-j,4])}} + + par(mar=c(0,0,4,0), fig=c(0.68, 0.88, 0.8, 0.98), new=TRUE) + mtext("Atlantic Ridge", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.66, 0.88, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecasted month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.diff2.colors[i2,1+6-j,2])}} + + par(fig=c(0.91, 1, 0.1, 0.8), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + + + + + + + + + + + # Bias of the Transition probability from blocking to X summary: + png(filename=paste0(work.dir,"/",forecast.name,"_summary_bias_trans_blocking.png"), width=550, height=400) + + # create an array similar to array.cor but with colors instead of corr.values: + trans.cols <- rev(c('#b2182b','#d6604d','#f4a582','#fddbc7','#d1e5f0','#92c5de','#4393c3','#2166ac')) + my.seq <- c(-20,-10,-5,-2,0,2,5,10,20) + + array.trans.diff3.colors <- array(trans.cols[8],c(12,7,4)) + array.trans.diff3.colors[array.trans.diff3 < my.seq[8]] <- trans.cols[7] + array.trans.diff3.colors[array.trans.diff3 < my.seq[7]] <- trans.cols[6] + array.trans.diff3.colors[array.trans.diff3 < my.seq[6]] <- trans.cols[5] + array.trans.diff3.colors[array.trans.diff3 < my.seq[5]] <- trans.cols[4] + array.trans.diff3.colors[array.trans.diff3 < my.seq[4]] <- trans.cols[3] + array.trans.diff3.colors[array.trans.diff3 < my.seq[3]] <- trans.cols[2] + array.trans.diff3.colors[array.trans.diff3 < my.seq[2]] <- trans.cols[1] + array.trans.diff3.colors[is.na(array.trans.diff3)] <- "white" + + par(mar=c(5,4,4,4.5)) + plot(1,1, type="n", xaxt="n", yaxt="n", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + title("% Bias transition from blocking regime", cex=1.5) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + + for(p in 1:12){ + for(l in 0:6){ + polygon(c(0.5+p-1, 0.5+p-1, 1+p-1), c(-0.5+l, 0.5+l, 0+l), col=array.trans.diff3.colors[p,1+l,1]) # first triangle + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(-0.5+l, 0+l, -0.5+l), col=array.trans.diff3.colors[p,1+l,2]) + polygon(c(1+p-1, 1.5+p-1, 1.5+p-1), c(l, 0.5+l, -0.5+l), col=array.trans.diff3.colors[p,1+l,3]) + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(0.5+l, 0+l, 0.5+l), col=array.trans.diff3.colors[p,1+l,4]) + } + } + + par(fig=c(0.875, 1, 0.14, 0.89), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_bias_trans_blocking_4tables.png"), width=750, height=600) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("% Bias transition from blocking regime", cex=1.5) + + # NAO+ : + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.85, 0.98), new=TRUE) + mtext("NAO+", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.diff3.colors[p,1+l,1])}} + + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.42, 0.5), new=TRUE) + mtext("Blocking", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.diff3.colors[p,1+l,4])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.85, 0.98), new=TRUE) + mtext("NAO-", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.diff3.colors[p,1+l,3])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.42, 0.5), new=TRUE) + mtext("Atlantic Ridge", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.diff3.colors[p,1+l,2])}} + + par(fig=c(0.91, 1, 0.1, 0.9), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_bias_trans_blocking_1table.png"), width=800, height=300) + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("% Bias transition from Blocking regime", cex=1.2) + + par(mar=c(0,0,4,0), fig=c(0.07, 0.22, 0.8, 0.98), new=TRUE) + mtext("NAO+", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0, 0.22, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecast month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.diff3.colors[i2,1+6-j,1])}} + + par(mar=c(0,0,4,0), fig=c(0.22, 0.44, 0.8, 0.98), new=TRUE) + mtext("NAO-", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.22, 0.44, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecasted month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.diff3.colors[i2,1+6-j,3])}} + + par(mar=c(0,0,4,0), fig=c(0.44, 0.66, 0.8, 0.98), new=TRUE) + mtext("Blocking", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.44, 0.66, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecasted month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.diff3.colors[i2,1+6-j,4])}} + + par(mar=c(0,0,4,0), fig=c(0.68, 0.88, 0.8, 0.98), new=TRUE) + mtext("Atlantic Ridge", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.66, 0.88, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecasted month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.diff3.colors[i2,1+6-j,2])}} + + par(fig=c(0.91, 1, 0.1, 0.8), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + + + + + + + + + + # Bias of the Transition probability from Atlantic Ridge to X summary: + png(filename=paste0(work.dir,"/",forecast.name,"_summary_bias_trans_Atlantic_ridge.png"), width=550, height=400) + + # create an array similar to array.cor but with colors instead of corr.values: + trans.cols <- rev(c('#b2182b','#d6604d','#f4a582','#fddbc7','#d1e5f0','#92c5de','#4393c3','#2166ac')) + my.seq <- c(-20,-10,-5,-2,0,2,5,10,20) + + array.trans.diff4.colors <- array(trans.cols[8],c(12,7,4)) + array.trans.diff4.colors[array.trans.diff4 < my.seq[8]] <- trans.cols[7] + array.trans.diff4.colors[array.trans.diff4 < my.seq[7]] <- trans.cols[6] + array.trans.diff4.colors[array.trans.diff4 < my.seq[6]] <- trans.cols[5] + array.trans.diff4.colors[array.trans.diff4 < my.seq[5]] <- trans.cols[4] + array.trans.diff4.colors[array.trans.diff4 < my.seq[4]] <- trans.cols[3] + array.trans.diff4.colors[array.trans.diff4 < my.seq[3]] <- trans.cols[2] + array.trans.diff4.colors[array.trans.diff4 < my.seq[2]] <- trans.cols[1] + array.trans.diff4.colors[is.na(array.trans.diff4)] <- "white" + + par(mar=c(5,4,4,4.5)) + plot(1,1, type="n", xaxt="n", yaxt="n", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + title("% Bias transition from Atlantic Ridge regime", cex=1.5) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + + for(p in 1:12){ + for(l in 0:6){ + polygon(c(0.5+p-1, 0.5+p-1, 1+p-1), c(-0.5+l, 0.5+l, 0+l), col=array.trans.diff4.colors[p,1+l,1]) # first triangle + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(-0.5+l, 0+l, -0.5+l), col=array.trans.diff4.colors[p,1+l,2]) + polygon(c(1+p-1, 1.5+p-1, 1.5+p-1), c(l, 0.5+l, -0.5+l), col=array.trans.diff4.colors[p,1+l,3]) + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(0.5+l, 0+l, 0.5+l), col=array.trans.diff4.colors[p,1+l,4]) + } + } + + par(fig=c(0.875, 1, 0.14, 0.89), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_bias_trans_Atlantic_ridge_4tables.png"), width=750, height=600) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("% Bias transition from Atlantic_Ridge_regime", cex=1.5) + + # NAO+ : + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.85, 0.98), new=TRUE) + mtext("NAO+", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.diff4.colors[p,1+l,1])}} + + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.42, 0.5), new=TRUE) + mtext("Blocking", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.diff4.colors[p,1+l,4])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.85, 0.98), new=TRUE) + mtext("NAO-", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.diff4.colors[p,1+l,3])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.42, 0.5), new=TRUE) + mtext("Atlantic Ridge", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.diff4.colors[p,1+l,2])}} + + par(fig=c(0.91, 1, 0.1, 0.9), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_bias_trans_Atlantic_ridge_1table.png"), width=800, height=300) + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("% Bias transition from Atlantic Ridge regime", cex=1.2) + + par(mar=c(0,0,4,0), fig=c(0.07, 0.22, 0.8, 0.98), new=TRUE) + mtext("NAO+", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0, 0.22, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecast month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.diff4.colors[i2,1+6-j,1])}} + + par(mar=c(0,0,4,0), fig=c(0.22, 0.44, 0.8, 0.98), new=TRUE) + mtext("NAO-", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.22, 0.44, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecasted month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.diff4.colors[i2,1+6-j,3])}} + + par(mar=c(0,0,4,0), fig=c(0.44, 0.66, 0.8, 0.98), new=TRUE) + mtext("Blocking", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.44, 0.66, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecasted month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.diff4.colors[i2,1+6-j,4])}} + + par(mar=c(0,0,4,0), fig=c(0.68, 0.88, 0.8, 0.98), new=TRUE) + mtext("Atlantic Ridge", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.66, 0.88, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Forecasted month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.very.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.diff4.colors[i2,1+6-j,2])}} + + par(fig=c(0.91, 1, 0.1, 0.8), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + +diagnostic.WR <- function(text=NULL, position=c(0,1,0,1), color.array){ + +} + + ## # FairRPSS summary: + ## png(filename=paste0(work.dir,"/",forecast.name,"_summary_rpss.png"), width=550, height=400) + + ## # create an array similar to array.diff.freq but with colors instead of frequencies: + ## freq.cols <- rev(c('#b2182b','#d6604d','#f4a582','#fddbc7','#d1e5f0','#92c5de','#4393c3','#2166ac')) + + ## array.freq.colors <- array(freq.cols[8],c(12,7,4)) + ## array.freq.colors[array.diff.freq < 10] <- freq.cols[7] + ## array.freq.colors[array.diff.freq < 5] <- freq.cols[6] + ## array.freq.colors[array.diff.freq < 2] <- freq.cols[5] + ## array.freq.colors[array.diff.freq < 0] <- freq.cols[4] + ## array.freq.colors[array.diff.freq < -2] <- freq.cols[3] + ## array.freq.colors[array.diff.freq < -5] <- freq.cols[2] + ## array.freq.colors[array.diff.freq < -10] <- freq.cols[1] + + ## par(mar=c(5,4,4,4.5)) + ## plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Forecast time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + ## title("Frequency difference (%) between S4 and ERA-Interim", cex=1.5) + ## mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + ## mtext(side = 2, text = "Forecast time", line = 2.5, cex=1.5) + ## axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + ## axis(2, at=seq(0,6), las=2, cex.axis=1.5) + + ## for(p in 1:12){ + ## for(l in 0:6){ + ## polygon(c(0.5+p-1,0.5+p-1,1+p-1),c(-0.5+l,0.5+l,0+l), col=array.freq.colors[p,1+l,1]) # first triangle + ## polygon(c(0.5+p-1,1+p-1,1.5+p-1),c(-0.5+l,0+l,-0.5+l), col=array.freq.colors[p,1+l,2]) + ## polygon(c(1+p-1,1.5+p-1,1.5+p-1),c(l,0.5+l,-0.5+l), col=array.freq.colors[p,1+l,3]) + ## polygon(c(0.5+p-1,1+p-1,1.5+p-1),c(0.5+l,0+l,0.5+l), col=array.freq.colors[p,1+l,4]) + ## } + ## } + + ## par(fig=c(0.875, 1, 0.14, 0.89), new=TRUE) + ## ColorBar2(brks = c(-20,-10,-5,-2,0,2,5,10,20), cols = freq.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(c(-20,-10,-5,-2,0,2,5,10,20)), my.labels=c(-20,-10,-5,-2,0,2,5,10,20)) + ## dev.off() + +} # close if on composition == "summary" + + + + +# impact map of the stronger WR: +if(composition == "impact.highest" && fields.name == rean.name){ + + var.num <- 2 # choose a variable (1:sfcWind, 2:tas) + index <- 1 # chose an index: 1: most influent, 2: most influent positively, 3: most influent negatively + + if ( index == 1 ) png(filename=paste0(rean.dir,"/",rean.name,"_",var.name.full[var.num],"_most_influent.png"), width=550, height=650) + if ( index == 2 ) png(filename=paste0(rean.dir,"/",rean.name,"_",var.name.full[var.num],"_most_influent_positively.png"), width=550, height=650) + if ( index == 3 ) png(filename=paste0(rean.dir,"/",rean.name,"_",var.name.full[var.num],"_most_influent_negatively.png"), width=550, height=650) + + plot.new() + #par(mfrow=c(4,3)) + #col.regimes <- c('#7b3294','#c2a5cf','#a6dba0','#008837') + col.regimes <- c('#e66101','#fdb863','#b2abd2','#5e3c99') + + for(p in 13:16){ # or 1:12 for monthly maps + load(file=paste0(rean.dir,"/",rean.name,"_",my.period[p],"_",var.name[var.num],".RData")) + load(paste0(rean.dir,"/",rean.name,"_",my.period[p],"_ClusterNames.RData")) # they should not depend on var.name!!! + + # position of long values of Europe only (without the Atlantic Sea and America): + #if(fields.name == "ERA-Interim") EU <- c(1:which(lon >= lon.max)[1], (length(lon)-30):length(lon)) # restrict area to continental europe only + lon.max = 45 + EU <- c(1:which(lon >= lon.max)[1], (which(lon >= 337)[1]:length(lon))) # restrict area to continental europe only + + cluster1 <- which(orden == cluster1.name.period[p]) # in future it will be == cluster1.name + cluster2 <- which(orden == cluster2.name.period[p]) + cluster3 <- which(orden == cluster3.name.period[p]) + cluster4 <- which(orden == cluster4.name.period[p]) + + assign(paste0("imp",cluster1), varPeriodAnom1mean) + assign(paste0("imp",cluster2), varPeriodAnom2mean) + assign(paste0("imp",cluster3), varPeriodAnom3mean) + assign(paste0("imp",cluster4), varPeriodAnom4mean) + + #most influent WT: + if (index == 1) { + imp.all <- imp1 + for(i in 1:dim(imp1)[1]){ + for(j in 1:dim(imp1)[2]){ + if(abs(imp1[i,j]) >= max(abs(imp1[i,j]), abs(imp2[i,j]), abs(imp3[i,j]), abs(imp4[i,j]))) imp.all[i,j] <- 1.5 + if(abs(imp2[i,j]) >= max(abs(imp1[i,j]), abs(imp2[i,j]), abs(imp3[i,j]), abs(imp4[i,j]))) imp.all[i,j] <- 2.5 + if(abs(imp3[i,j]) >= max(abs(imp1[i,j]), abs(imp2[i,j]), abs(imp3[i,j]), abs(imp4[i,j]))) imp.all[i,j] <- 3.5 + if(abs(imp4[i,j]) >= max(abs(imp1[i,j]), abs(imp2[i,j]), abs(imp3[i,j]), abs(imp4[i,j]))) imp.all[i,j] <- 4.5 + } + } + } + + #most influent WT with positive impact: + if (index == 2) { + imp.all <- imp1 + for(i in 1:dim(imp1)[1]){ + for(j in 1:dim(imp1)[2]){ + if((imp1[i,j]) >= max((imp1[i,j]), (imp2[i,j]), (imp3[i,j]), (imp4[i,j]))) imp.all[i,j] <- 1.5 + if((imp2[i,j]) >= max((imp1[i,j]), (imp2[i,j]), (imp3[i,j]), (imp4[i,j]))) imp.all[i,j] <- 2.5 + if((imp3[i,j]) >= max((imp1[i,j]), (imp2[i,j]), (imp3[i,j]), (imp4[i,j]))) imp.all[i,j] <- 3.5 + if((imp4[i,j]) >= max((imp1[i,j]), (imp2[i,j]), (imp3[i,j]), (imp4[i,j]))) imp.all[i,j] <- 4.5 + } + } + + } + + # most influent WT with negative impact: + if (index == 3) { + imp.all <- imp1 + for(i in 1:dim(imp1)[1]){ + for(j in 1:dim(imp1)[2]){ + if((imp1[i,j]) <= min((imp1[i,j]), (imp2[i,j]), (imp3[i,j]), (imp4[i,j]))) imp.all[i,j] <- 1.5 + if((imp2[i,j]) <= min((imp1[i,j]), (imp2[i,j]), (imp3[i,j]), (imp4[i,j]))) imp.all[i,j] <- 2.5 + if((imp3[i,j]) <= min((imp1[i,j]), (imp2[i,j]), (imp3[i,j]), (imp4[i,j]))) imp.all[i,j] <- 3.5 + if((imp4[i,j]) <= min((imp1[i,j]), (imp2[i,j]), (imp3[i,j]), (imp4[i,j]))) imp.all[i,j] <- 4.5 + } + } + } + + if(p<=3) yMod <- 0.7 + if(p>=4 && p<=6) yMod <- 0.5 + if(p>=7 && p<=9) yMod <- 0.3 + if(p>=10) yMod <- 0.1 + if(p==13 || p==14) yMod <- 0.52 + if(p==15 || p==16) yMod <- 0.1 + + if(p==1 || p==4 || p==7 || p==10) xMod <- 0.05 + if(p==2 || p==5 || p==8 || p==11) xMod <- 0.35 + if(p==3 || p==6 || p==9 || p==12) xMod <- 0.65 + if(p==13 || p==15) xMod <- 0.05 + if(p==14 || p==16) xMod <- 0.50 + + # impact map: + if(p <= 12) par(fig=c(xMod, xMod + 0.3, yMod, yMod + 0.17), new=TRUE) + if(p > 12) par(fig=c(xMod, xMod + 0.45, yMod, yMod + 0.38), new=TRUE) + PlotEquiMap(imp.all[,EU], lon[EU], lat, filled.continents = FALSE, brks=1:5, cols=col.regimes, axelab=F, drawleg=F) + + # title map: + if(p <= 12) par(fig=c(xMod, xMod + 0.3, yMod + 0.162, yMod + 0.172), new=TRUE) + if(p > 12) par(fig=c(xMod + 0.07, xMod + 0.07 + 0.3, yMod +0.21 + 0.166, yMod +0.21 + 0.176), new=TRUE) + mtext(my.period[p], font=2, cex=1.5) + + } # close 'p' on 'period' + + par(fig=c(0.1,0.9, 0.03, 0.11), new=TRUE) + ColorBar2(1:5, cols=col.regimes, vert=FALSE, my.ticks=1:4, draw.ticks=FALSE, my.labels=orden) + + par(fig=c(0.1,0.9, 0.92, 0.97), new=TRUE) + if( index == 1 ) mtext(paste0("Most influent WR on ",var.name[var.num]), font=2, cex=1.5) + if( index == 2 ) mtext(paste0("Most positively influent WR on ",var.name[var.num]), font=2, cex=1.5) + if( index == 3 ) mtext(paste0("Most negatively influent WR on ",var.name[var.num]), font=2, cex=1.5) + + dev.off() + +} # close if on composition == "impact.highest" + diff --git a/old/weather_regimes_maps_v22.R b/old/weather_regimes_maps_v22.R new file mode 100644 index 0000000000000000000000000000000000000000..c1f8b7f70ceca50bd0f5376be7c2154efc26bff7 --- /dev/null +++ b/old/weather_regimes_maps_v22.R @@ -0,0 +1,4202 @@ + +# Creation: 6/2016 +# Author: Nicola Cortesi +# +# Aim: to visualize the regime anomalies of the weather regimes, their interannual frequencies and their impact on a chosen cliamte variable (i.e: wind speed or temperature) +# +# I/O: input file needed are: +# 1) the output of the weather_regimes.R script, which ends with the suffix "_psl.RData". +# 2) if you need the impact map too, the outputs of the weather_regimes_impact.R script, which end wuth the suffix "_sfcWind.RData" and "_tas.RData" +# +# Output files are the maps, witch the user can generate as .png or as .pdf +# Due to the script's length, it is better to run it from the terminal with: Rscript ~/scripts/weather_regimes_maps.R +# This script doesn't need to be parallelized because it takes only a few seconds to create and save the output maps. +# +# Assumptions: If your regimes derive from a reanalysis, this script must be run twice: once, to create a .pdf file (see below) +# that the user must open to find visually the order by which the four regimes are stored inside the four clusters. For example, he can find that the +# sequence of the images in the file (from top to down) corresponds to: Blocking / NAO- / Atl.Ridge / NAO+ regime. Subsequently, he has to change 'ordering' +# from F to T (see below) and set the four variables 'clusterX.name' (X = 1...4) to follow the same order found inside that file. +# The second time, you have to run this script only to get the maps in the right order, which by default is, from top to down: +# "NAO+","NAO-","Blocking" and "Atl.Ridge" (you can change it with the variable 'orden'). You also have to set 'save.names' to TRUE, so an .RData file +# is written to store the order of the four regimes, in case you need to visualize these maps again in future or create new ones based on the saved data. +# +# If your regimes derive from a forecast system, you have to compute before the regimes derived from a reanalysis. Then, you can associate the reanalysis +# to the forecast system, to employ it to automatically aussociate to each simulated cluster one of the +# observed regimes, the one with the highest spatial correlation. Beware that the two maps of regime anomalies must have the same spatial resolution! +# +# Branch: weather_regimes + +rm(list=ls()) +library(s2dverification) # for the function Load() +library(Kendall) # for the MannKendall test +source('/home/Earth/ncortesi/scripts/Rfunctions.R') # for the calendar functions + +### set the directory where the weather regimes computed with a reanalysis are stored (xxxxx_psl.RData files): + +#rean.dir <- "/scratch/Earth/ncortesi/RESILIENCE/Regimes/18) as 12) but with no lat correction" +#rean.dir <- "/scratch/Earth/ncortesi/RESILIENCE/Regimes/18_as_12_but_with_no_lat_correction" +#rean.dir <- "/scratch/Earth/ncortesi/RESILIENCE/Regimes/41_ERA-Interim_monthly_1981-2015_LOESS_filter" +#rean.dir <- "/scratch/Earth/ncortesi/RESILIENCE/Regimes/43_as_41_but_with_running_cluster_and_lat_correction" +rean.dir <- "/scratch/Earth/ncortesi/RESILIENCE/Regimes/44_as_43_but_with_no_lat_correction" +#rean.dir <- "/scratch/Earth/ncortesi/RESILIENCE/Regimes/45_as_43_but_with_Z500" + +rean.name <- "ERA-Interim" # reanalysis name (if input data comes from a reanalysis) +forecast.name <- "ECMWF-S4" # forecast system name (if input data comes from a forecast system) + +# Choose between loading reanalysis ('rean.name') or forecasts ('forecast.name'): +fields.name <- forecast.name + +# Choose one or more periods to plot from 1 to 17 (1-12: Jan - Dec, 13-16: Winter, Spring, Summer, Autumn, 17: Year). +period <- 1:12 # You need to have created before the '_psl.RData' file with the output of 'weather_regimes_vXX'.R for that period. + +# run it twice: once, with: 'ordering <- FALSE', 'save.names <- TRUE', 'as.pdf <- TRUE' and 'composition <- "simple" ' +# to look at the cartography and find visually the right regime names of the four clusters; then, insert the regime names in the correct order below and run this script +# a second time one month/season at time with: 'ordering = TRUE', 'save.names = TRUE', 'as.pdf = FALSE' and 'composition = "simple" ' +# to save the ordered cartography (then, you can check the correct regime sequence in the .png maps). After that, any time you run this script, remember to set: +# 'ordering = TRUE , 'save.names = FALSE' (and any type of composition you want to plot) not to overwrite the files which stores the right cluster order. + +ordering <- T # If TRUE, order the clusters following the regimes listed in the 'orden' vector (set it to TRUE only after you manually inserted the names below). +save.names <- F # if TRUE, also save the cluster names (i.e: the association between clusters and weather regimes); if FALSE, the cluster names are loaded from a file +as.pdf <- F # FALSE: save results as one .png file for each season/month, TRUE: save only one .pdf file with all seasons/months + +composition <- "summary" # choose which kind of composition you want to plot: + # - 'simple' for monthly graphs of regime anomalies / impact / interannual frequencies in three columns + # - 'psl' for all the regime anomalies for a fixed forecast month + # - 'fre' for all the interannual frequencies for a fixed forecast month + # - 'impact' for all the impact maps for a fixed forecast month and the variable specified in var.num + # - 'summary' for the summary plots of all forecast months (persistence bias, freq.bias, correlations, etc.) [You need all ClusterNames files] + # - 'impact.highest' for all the impact plot of the regime with the highest impact + # - 'single.impact' to save the individual impact maps + # - 'single.psl' to save the individual psl map + # - 'single.fre' to save the individual fre map + # - 'none' doesn't plot anything, it only saves the ClusterName.Rdata files + # - 'taylor' plot the taylor's diagram between the regime anomalies of a monthly reanalaysis and a seasonal one + +var.num <- 1 # if fields.name ='rean.name' and composition ='simple', Choose a variable for the impact maps 1: sfcWind 2: tas + +mean.freq <- FALSE # if TRUE, when composition <- "simple", it also add the mean frequency value over each frequency plots + +# Associates the regimes to the four cluster: +cluster3.name <- "NAO+" +cluster2.name <- "NAO-" +cluster1.name <- "Blocking" +cluster4.name <- "Atl.Ridge" + +# choose an order to give to the cartograpy, from top to bottom: +orden <- c("NAO+","NAO-","Blocking","Atl.Ridge") + +####### if fields.name='forecast.name', you have to specify these additional variables: + +# working dir with the input files with '_psl.RData' suffix: +work.dir <- "/scratch/Earth/ncortesi/RESILIENCE/Regimes/42_ECMWF_S4_monthly_1981-2015_LOESS_filter" + +lead.months <- 0:6 # in case you selected 'fields.name = forecast.name', specify a leadtime (0 = same month of the start month) to plot + # (and variable 'period' becomes the start month) +forecast.month <- 1 # in case composition = 'psl' or 'fre' or 'impact', choose a target month to forecast, from 1 to 12, to plot its composition + # if you want to plot all compositions from 1 to 12 at once, just enable the line below and add a { at the end of the script + # DO NOT run the loop below when composition="summary" or "taylor", because it will modify the data in the ClusterName.RData files!!! +for(forecast.month in 1:12){ +####### Derived variables ############################################################################################################################################### + +if(fields.name != rean.name && fields.name != forecast.name) stop("variable 'fields.name' is not properly set") +regime1.name <- orden[1] +regime2.name <- orden[2] +regime3.name <- orden[3] +regime4.name <- orden[4] + +var.name <- c("sfcWind","tas") +var.name.full <- c("Wind Speed","Temperature") # full name of the 'predictand' variable to put in the title of the graphs +var.unit <- c("m/s", "ºC") # unit of measure of var (for drawing color scales) + +var.num.orig <- var.num # loading _psl files, this value can change! +fields.name.orig <- fields.name +period.orig <- period +work.dir.orig <- work.dir +pdf.suffix <- ifelse(length(period)==1, my.period[period], "all_periods") + +n.map <- 0 +################################################################################################################################################################### + +if(composition != "summary" && composition != "impact.highest" && composition != "taylor"){ + + if(composition == "psl" || composition == "fre" || composition == "impact") { + if(as.pdf && fields.name == forecast.name) pdf(file=paste0(work.dir,"/",fields.name,"_",pdf.suffix,"_forecast_month_",forecast.month,"_",composition,".pdf"),width=110,height=60) + if(!as.pdf && fields.name == forecast.name) png(filename=paste0(work.dir,"/",fields.name,"_forecast_month_",forecast.month,"_",composition,".png"),width=6000,height=2300) + + lead.months <- c(6:0,0) + plot.new() + + # Sheet title: + par(fig=c(0, 1, 0.94, 0.98), new=TRUE) + if(composition == "psl") mtext(paste0(fields.name, " simulated Weather Regimes for ", month.name[forecast.month]), cex=5.5, font=2) + if(composition == "fre") mtext(paste0(fields.name, " simulated interannual frequencies for ", month.name[forecast.month]), cex=5.5, font=2) + if(composition == "impact") mtext(paste0(fields.name, " simulated ",var.name.full[var.num], " impact for ", month.name[forecast.month]), cex=5.5, font=2) + + } + + if(fields.name == rean.name) lead.months <- 1 # just to be able to enter the loop below once! + + for(lead.month in lead.months){ + #lead.month=lead.months[1] # for the debug + + if(composition == "simple"){ + if(as.pdf && fields.name == rean.name) pdf(file=paste0(rean.dir,"/",fields.name,"_",var.name[var.num],"_",pdf.suffix,".pdf"),width=40, height=60) + if(as.pdf && fields.name == forecast.name) pdf(file=paste0(work.dir,"/",fields.name,"_",var.name[var.num],"_",pdf.suffix,"_leadmonth_",lead.month,".pdf"),width=40,height=60) + } + + if(composition == 'psl' || composition == 'fre' || composition == 'impact') { + period <- forecast.month - lead.month + if(period < 1) period <- period + 12 + } + + impact.data <- FALSE + for(p in period){ + #p <- period[1] # for the debug + p.orig <- p + + if(fields.name == rean.name) print(paste("p =",p)) + if(fields.name == forecast.name) print(paste("p =",p,"lead.month =", lead.month)) + + # load regime data (for forecasts, it load only the data corresponding to the chosen start month p and lead month 'lead.month') + if(fields.name == rean.name || (fields.name == forecast.name && n.map == 7)) { + load(file=paste0(rean.dir,"/",rean.name,"_",my.period[p],"_","psl",".RData")) + if(var.num != var.num.orig) var.num <- var.num.orig # ripristinate original values of this script (necessary after loading regime data) + if(fields.name != fields.name.orig) fields.name <- fields.name.orig # ripristinate original values of this script + if(work.dir != work.dir.orig) work.dir <- work.dir.orig # ripristinate original values of this script + + # load impact data only if it is available, if not it will leave an empty space in the composition: + if(file.exists(paste0(rean.dir,"/",rean.name,"_",my.period[p],"_",var.name[var.num],".RData"))){ + impact.data <- TRUE + print("Impact data available for reanalysis") + load(file=paste0(rean.dir,"/",rean.name,"_",my.period[p],"_",var.name[var.num],".RData")) + } else { + impact.data <- FALSE + print("Impact data for reanalysis not available") + } + } + + if(fields.name == forecast.name && n.map < 7){ + load(file=paste0(work.dir,"/",forecast.name,"_",my.period[p],"_leadmonth_",lead.month,"_","psl",".RData")) # load cluster time series and mean psl data + + if(file.exists(paste0(work.dir,"/",forecast.name,"_",my.period[p],"_leadmonth_",lead.month,"_",var.name[var.num],".RData"))){ + impact.data <- TRUE + print("Impact data available for forecasts") + if((composition == "simple" || composition == "impact")) load(file=paste0(work.dir,"/",forecast.name,"_",my.period[p],"_leadmonth_",lead.month,"_",var.name[var.num],".RData")) # load mean var data for impact maps + } else { + impact.data <- FALSE + print("Impact data for forecasts not available") + } + + if(var.num != var.num.orig) var.num <- var.num.orig # ripristinate original values of this script (necessary after loading regime data) + if(fields.name != fields.name.orig) fields.name <- fields.name.orig # ripristinate original values of this script + + } + + if(var.num != var.num.orig) var.num <- var.num.orig # ripristinate original values of this script (necessary after loading regime data) + if(fields.name != fields.name.orig) fields.name <- fields.name.orig # ripristinate original values of this script + if(work.dir != work.dir.orig) work.dir <- work.dir.orig # ripristinate original values of this script + if(p != p.orig) p <- p.orig + + if(fields.name == forecast.name && n.map < 7){ + + # compute the simulated interannual monthly variability: + n.days.month <- dim(my.cluster.array[[p]])[1] # number of days in the forecasted month + + freq.sim1 <- apply(my.cluster.array[[p]] == 1, c(2,3), sum) + freq.sim2 <- apply(my.cluster.array[[p]] == 2, c(2,3), sum) + freq.sim3 <- apply(my.cluster.array[[p]] == 3, c(2,3), sum) + freq.sim4 <- apply(my.cluster.array[[p]] == 4, c(2,3), sum) + + sd.freq.sim1 <- sd(freq.sim1) / n.days.month + sd.freq.sim2 <- sd(freq.sim2) / n.days.month + sd.freq.sim3 <- sd(freq.sim3) / n.days.month + sd.freq.sim4 <- sd(freq.sim4) / n.days.month + + # compute the transition probabilities: + j1 <- j2 <- j3 <- j4 <- 1 + transition1 <- transition2 <- transition3 <- transition4 <- c() + + n.members <- dim(my.cluster.array[[p]])[3] + + for(m in 1:n.members){ + for(y in 1:n.years){ + for (d in 1:(n.days.month-1)){ + + if (my.cluster.array[[p]][d,y,m] == 1 && (my.cluster.array[[p]][d + 1,y,m] != 1)){ + transition1[j1] <- my.cluster.array[[p]][d + 1,y,m] + j1 <- j1 + 1 + } + if (my.cluster.array[[p]][d,y,m] == 2 && (my.cluster.array[[p]][d + 1,y,m] != 2)){ + transition2[j2] <- my.cluster.array[[p]][d + 1,y,m] + j2 <- j2 + 1 + } + if (my.cluster.array[[p]][d,y,m] == 3 && (my.cluster.array[[p]][d + 1,y,m] != 3)){ + transition3[j3] <- my.cluster.array[[p]][d + 1,y,m] + j3 <- j3 +1 + } + if (my.cluster.array[[p]][d,y,m] == 4 && (my.cluster.array[[p]][d + 1,y,m] != 4)){ + transition4[j4] <- my.cluster.array[[p]][d + 1,y,m] + j4 <- j4 +1 + } + + } + } + } + + trans.sim <- matrix(NA,4,4 , dimnames= list(c("cluster1.sim", "cluster2.sim", "cluster3.sim", "cluster4.sim"),c("cluster1.sim","cluster2.sim","cluster3.sim","cluster4.sim"))) + trans.sim[1,2] <- length(which(transition1 == 2)) + trans.sim[1,3] <- length(which(transition1 == 3)) + trans.sim[1,4] <- length(which(transition1 == 4)) + + trans.sim[2,1] <- length(which(transition2 == 1)) + trans.sim[2,3] <- length(which(transition2 == 3)) + trans.sim[2,4] <- length(which(transition2 == 4)) + + trans.sim[3,1] <- length(which(transition3 == 1)) + trans.sim[3,2] <- length(which(transition3 == 2)) + trans.sim[3,4] <- length(which(transition3 == 4)) + + trans.sim[4,1] <- length(which(transition4 == 1)) + trans.sim[4,2] <- length(which(transition4 == 2)) + trans.sim[4,3] <- length(which(transition4 == 3)) + + trans.tot <- apply(trans.sim,1,sum,na.rm=T) + for(i in 1:4) trans.sim[i,] <- trans.sim[i,]/trans.tot[i] + + + # compute cluster persistence: + my.cluster.vector <- as.vector(my.cluster.array[[p]]) # reduce it to a vector with the order: day1month1member1 , day2month1member1, ... , day1month2member1, day2month2member1, ,,, + + sequ1 <- sequ2 <- sequ3 <- sequ4 <- rep(0,10000) + i1 <- i2 <- i3 <- i4 <- 1 + + if(my.cluster.vector[1] != 1) i1 <- 0 + if(my.cluster.vector[1] == 1) sequ1[i1] <- sequ1[i1]+1 + if(my.cluster.vector[1] != 2) i2 <- 0 + if(my.cluster.vector[1] == 2) sequ2[i2] <- sequ2[i2]+1 + if(my.cluster.vector[1] != 3) i3 <- 0 + if(my.cluster.vector[1] == 3) sequ3[i3] <- sequ3[i3]+1 + if(my.cluster.vector[1] != 4) i4 <- 0 + if(my.cluster.vector[1] == 4) sequ4[i4] <- sequ4[i4]+1 + n.dd <- length(my.cluster.vector) + + for(d in 1:(n.dd-1)){ + if(my.cluster.vector[d] != 1 && my.cluster.vector[d+1] == 1) i1 <- i1+1 + if(my.cluster.vector[d] == 1) sequ1[i1] <- sequ1[i1]+1 + + if(my.cluster.vector[d] != 2 && my.cluster.vector[d+1] == 2) i2 <- i2+1 + if(my.cluster.vector[d] == 2) sequ2[i2] <- sequ2[i2]+1 + + if(my.cluster.vector[d] != 3 && my.cluster.vector[d+1] == 3) i3 <- i3+1 + if(my.cluster.vector[d] == 3) sequ3[i3] <- sequ3[i3]+1 + + if(my.cluster.vector[d] != 4 && my.cluster.vector[d+1] == 4) i4 <- i4+1 + if(my.cluster.vector[d] == 4) sequ4[i4] <- sequ4[i4]+1 + } + + if(my.cluster.vector[n.dd] == 1) sequ1[i1] <- sequ1[i1]+1 + if(my.cluster.vector[n.dd] == 2) sequ2[i2] <- sequ2[i2]+1 + if(my.cluster.vector[n.dd] == 3) sequ3[i3] <- sequ3[i3]+1 + if(my.cluster.vector[n.dd] == 4) sequ4[i4] <- sequ4[i4]+1 + + pers1 <- mean(sequ1[which(sequ1 != 0)]) + pers2 <- mean(sequ2[which(sequ2 != 0)]) + pers3 <- mean(sequ3[which(sequ3 != 0)]) + pers4 <- mean(sequ4[which(sequ4 != 0)]) + + # compute correlations between simulated clusters and observed regime's anomalies: + cluster1.sim <- pslwr1mean; cluster2.sim <- pslwr2mean; cluster3.sim <- pslwr3mean; cluster4.sim <- pslwr4mean + + if(composition == 'impact' && impact.data == TRUE) { + cluster1.sim.imp <- varwr1mean; cluster2.sim.imp <- varwr2mean; cluster3.sim.imp <- varwr3mean; cluster4.sim.imp <- varwr4mean + #cluster1.sim.imp.pv <- pvalue1; cluster2.sim.imp.pv <- pvalue2; cluster3.sim.imp.pv <- pvalue3; cluster4.sim.imp.pv <- pvalue4 + } + + lat.forecast <- lat; lon.forecast <- lon + + if((p + lead.month) > 12) { load.month <- p + lead.month - 12 } else { load.month <- p + lead.month } # reanalysis forecast month to load + load(file=paste0(rean.dir,"/",rean.name,"_",my.period[load.month],"_","psl",".RData")) # load reanalysis data, which overwrities the pslwrXmean variables + load(paste0(rean.dir,"/",rean.name,"_", my.period[load.month],"_","ClusterNames",".RData")) # load also reanalysis regime names + + # load reanalysis varwrXmean impact data: + if(composition == 'impact' && impact.data == TRUE) load(file=paste0(rean.dir,"/",rean.name,"_",my.period[load.month],"_",var.name[var.num],".RData")) + + if(var.num != var.num.orig) var.num <- var.num.orig # ripristinate original values of this script + if(fields.name != fields.name.orig) fields.name <- fields.name.orig # ripristinate original values of this script + if(!identical(period.orig,period)) period <- period.orig + if(work.dir != work.dir.orig) work.dir <- work.dir.orig # ripristinate original values of this script + if(p.orig != p) p <- p.orig + + # compute the observed transition probabilities: + n.days.month <- n.days.in.a.period(load.month,2001) + j1o <- j2o <- j3o <- j4o <- 1; transition.obs1 <- transition.obs2 <- transition.obs3 <- transition.obs4 <- c() + + for (d in 1:(length(my.cluster$cluster)-1)){ + if (my.cluster$cluster[d] == 1 && (my.cluster$cluster[d + 1] != my.cluster$cluster[d])){ + transition.obs1[j1o] <- my.cluster$cluster[d + 1] + j1o <- j1o + 1 + } + if (my.cluster$cluster[d] == 2 && (my.cluster$cluster[d + 1] != my.cluster$cluster[d])){ + transition.obs2[j2o] <- my.cluster$cluster[d + 1] + j2o <- j2o + 1 + } + if (my.cluster$cluster[d] == 3 && (my.cluster$cluster[d + 1] != my.cluster$cluster[d])){ + transition.obs3[j3o] <- my.cluster$cluster[d + 1] + j3o <- j3o + 1 + } + if (my.cluster$cluster[d] == 4 && (my.cluster$cluster[d + 1] != my.cluster$cluster[d])){ + transition.obs4[j4o] <- my.cluster$cluster[d + 1] + j4o <- j4o +1 + } + + } + + trans.obs <- matrix(NA,4,4 , dimnames= list(c("cluster1.obs", "cluster2.obs", "cluster3.obs", "cluster4.obs"),c("cluster1.obs","cluster2.obs","cluster3.obs","cluster4.obs"))) + trans.obs[1,2] <- length(which(transition.obs1 == 2)) + trans.obs[1,3] <- length(which(transition.obs1 == 3)) + trans.obs[1,4] <- length(which(transition.obs1 == 4)) + + trans.obs[2,1] <- length(which(transition.obs2 == 1)) + trans.obs[2,3] <- length(which(transition.obs2 == 3)) + trans.obs[2,4] <- length(which(transition.obs2 == 4)) + + trans.obs[3,1] <- length(which(transition.obs3 == 1)) + trans.obs[3,2] <- length(which(transition.obs3 == 2)) + trans.obs[3,4] <- length(which(transition.obs3 == 4)) + + trans.obs[4,1] <- length(which(transition.obs4 == 1)) + trans.obs[4,2] <- length(which(transition.obs4 == 2)) + trans.obs[4,3] <- length(which(transition.obs4 == 3)) + + trans.tot.obs <- apply(trans.obs,1,sum,na.rm=T) + for(i in 1:4) trans.obs[i,] <- trans.obs[i,]/trans.tot.obs[i] + + # compute the observed interannual monthly variability: + freq.obs1 <- freq.obs2 <- freq.obs3 <- freq.obs4 <- c() + + for(y in year.start:year.end){ + # for both reanalysis and S4, the time order is always: year1day1, year1day2, ..., year1day31, year2day1, year2day2, ..., year2day28, etc, + freq.obs1[y-year.start+1] <- length(which(my.cluster$cluster[(y-year.start)*n.days.month + (1:n.days.month)] == 1)) + freq.obs2[y-year.start+1] <- length(which(my.cluster$cluster[(y-year.start)*n.days.month + (1:n.days.month)] == 2)) + freq.obs3[y-year.start+1] <- length(which(my.cluster$cluster[(y-year.start)*n.days.month + (1:n.days.month)] == 3)) + freq.obs4[y-year.start+1] <- length(which(my.cluster$cluster[(y-year.start)*n.days.month + (1:n.days.month)] == 4)) + } + + sd.freq.obs1 <- sd(freq.obs1) / n.days.month + sd.freq.obs2 <- sd(freq.obs2) / n.days.month + sd.freq.obs3 <- sd(freq.obs3) / n.days.month + sd.freq.obs4 <- sd(freq.obs4) / n.days.month + + wr1y.obs <- wr1y; wr2y.obs <- wr2y; wr3y.obs <- wr3y; wr4y.obs <- wr4y # observed frecuencies of the regimes + + regimes.obs <- c(cluster1.name, cluster2.name, cluster3.name, cluster4.name) + + if(length(unique(regimes.obs)) < 4) stop("Two or more names of the weather types in the reanalsyis input file are repeated!") + + if(identical(rev(lat),lat.forecast)) {lat.forecast <- rev(lat.forecast); print("Warning: forecast latitudes are in the opposite order than renalysis's")} + if(!identical(lat, lat.forecast)) stop("forecast latitudes are different from reanalysis latitudes!") + if(!identical(lon, lon.forecast)) stop("forecast longitude are different from reanalysis longitudes!") + + cluster.corr <- array(NA, c(4,4), dimnames=list(c("cluster1.sim","cluster2.sim","cluster3.sim","cluster4.sim"), regimes.obs)) + cluster.corr[1,1] <- cor(as.vector(cluster1.sim), as.vector(pslwr1mean)) + cluster.corr[1,2] <- cor(as.vector(cluster1.sim), as.vector(pslwr2mean)) + cluster.corr[1,3] <- cor(as.vector(cluster1.sim), as.vector(pslwr3mean)) + cluster.corr[1,4] <- cor(as.vector(cluster1.sim), as.vector(pslwr4mean)) + cluster.corr[2,1] <- cor(as.vector(cluster2.sim), as.vector(pslwr1mean)) + cluster.corr[2,2] <- cor(as.vector(cluster2.sim), as.vector(pslwr2mean)) + cluster.corr[2,3] <- cor(as.vector(cluster2.sim), as.vector(pslwr3mean)) + cluster.corr[2,4] <- cor(as.vector(cluster2.sim), as.vector(pslwr4mean)) + cluster.corr[3,1] <- cor(as.vector(cluster3.sim), as.vector(pslwr1mean)) + cluster.corr[3,2] <- cor(as.vector(cluster3.sim), as.vector(pslwr2mean)) + cluster.corr[3,3] <- cor(as.vector(cluster3.sim), as.vector(pslwr3mean)) + cluster.corr[3,4] <- cor(as.vector(cluster3.sim), as.vector(pslwr4mean)) + cluster.corr[4,1] <- cor(as.vector(cluster4.sim), as.vector(pslwr1mean)) + cluster.corr[4,2] <- cor(as.vector(cluster4.sim), as.vector(pslwr2mean)) + cluster.corr[4,3] <- cor(as.vector(cluster4.sim), as.vector(pslwr3mean)) + cluster.corr[4,4] <- cor(as.vector(cluster4.sim), as.vector(pslwr4mean)) + + # associate each simulated cluster to one observed regime: + max1 <- which(cluster.corr == max(cluster.corr), arr.ind=T) + cluster.corr2 <- cluster.corr + cluster.corr2[max1[1],] <- -999 # set this row to negative values so it won't be found as a maximum anymore + cluster.corr2[,max1[2]] <- -999 # set this column to negative values so it won't be found as a maximum anymore + max2 <- which(cluster.corr2 == max(cluster.corr2), arr.ind=T) + cluster.corr3 <- cluster.corr2 + cluster.corr3[max2[1],] <- -999 + cluster.corr3[,max2[2]] <- -999 + max3 <- which(cluster.corr3 == max(cluster.corr3), arr.ind=T) + cluster.corr4 <- cluster.corr3 + cluster.corr4[max3[1],] <- -999 + cluster.corr4[,max3[2]] <- -999 + max4 <- which(cluster.corr4 == max(cluster.corr4), arr.ind=T) + + seq.max1 <- c(max1[1],max2[1],max3[1],max4[1]) + seq.max2 <- c(max1[2],max2[2],max3[2],max4[2]) + + cluster1.regime <- seq.max2[which(seq.max1 == 1)] + cluster2.regime <- seq.max2[which(seq.max1 == 2)] + cluster3.regime <- seq.max2[which(seq.max1 == 3)] + cluster4.regime <- seq.max2[which(seq.max1 == 4)] + + cluster1.name <- regimes.obs[cluster1.regime] + cluster2.name <- regimes.obs[cluster2.regime] + cluster3.name <- regimes.obs[cluster3.regime] + cluster4.name <- regimes.obs[cluster4.regime] + + regimes.sim <- c(cluster1.name, cluster2.name, cluster3.name, cluster4.name) + cluster.corr2 <- array(cluster.corr, c(4,4), dimnames=list(regimes.sim, regimes.obs)) + + spat.cor1 <- cluster.corr[1,seq.max2[which(seq.max1 == 1)]] + spat.cor2 <- cluster.corr[2,seq.max2[which(seq.max1 == 2)]] + spat.cor3 <- cluster.corr[3,seq.max2[which(seq.max1 == 3)]] + spat.cor4 <- cluster.corr[4,seq.max2[which(seq.max1 == 4)]] + + # measure persistence for the reanalysis dataset: + my.cluster.vector <- my.cluster[[1]] + + sequ1 <- sequ2 <- sequ3 <- sequ4 <- rep(0,10000) + i1 <- i2 <- i3 <- i4 <- 1 + + if(my.cluster.vector[1] != 1) i1 <- 0 + if(my.cluster.vector[1] == 1) sequ1[i1] <- sequ1[i1]+1 + if(my.cluster.vector[1] != 2) i2 <- 0 + if(my.cluster.vector[1] == 2) sequ2[i2] <- sequ2[i2]+1 + if(my.cluster.vector[1] != 3) i3 <- 0 + if(my.cluster.vector[1] == 3) sequ3[i3] <- sequ3[i3]+1 + if(my.cluster.vector[1] != 4) i4 <- 0 + if(my.cluster.vector[1] == 4) sequ4[i4] <- sequ4[i4]+1 + + n.dd <- length(my.cluster.vector) + for(d in 1:(n.dd-1)){ + if(my.cluster.vector[d] != 1 && my.cluster.vector[d+1] == 1) i1 <- i1+1 + if(my.cluster.vector[d] == 1) sequ1[i1] <- sequ1[i1]+1 + + if(my.cluster.vector[d] != 2 && my.cluster.vector[d+1] == 2) i2 <- i2+1 + if(my.cluster.vector[d] == 2) sequ2[i2] <- sequ2[i2]+1 + + if(my.cluster.vector[d] != 3 && my.cluster.vector[d+1] == 3) i3 <- i3+1 + if(my.cluster.vector[d] == 3) sequ3[i3] <- sequ3[i3]+1 + + if(my.cluster.vector[d] != 4 && my.cluster.vector[d+1] == 4) i4 <- i4+1 + if(my.cluster.vector[d] == 4) sequ4[i4] <- sequ4[i4]+1 + } + + if(my.cluster.vector[n.dd] == 1) sequ1[i1] <- sequ1[i1]+1 + if(my.cluster.vector[n.dd] == 2) sequ2[i2] <- sequ2[i2]+1 + if(my.cluster.vector[n.dd] == 3) sequ3[i3] <- sequ3[i3]+1 + if(my.cluster.vector[n.dd] == 4) sequ4[i4] <- sequ4[i4]+1 + + persObs1 <- mean(sequ1[which(sequ1 != 0)]) + persObs2 <- mean(sequ2[which(sequ2 != 0)]) + persObs3 <- mean(sequ3[which(sequ3 != 0)]) + persObs4 <- mean(sequ4[which(sequ4 != 0)]) + + ### insert here all the manual corrections to the regime assignation due to misasignment in the automatic selection: + ### forecast month: September + #if(p == 9 && lead.month == 0) { cluster1.name <- "Blocking"; cluster2.name <- "Atl.Ridge"; cluster3.name <- "NAO-"; cluster4.name <- "NAO+"} + #if(p == 8 && lead.month == 1) { cluster1.name <- "NAO+"; cluster2.name <- "NAO-"; cluster3.name <- "Blocking"; cluster4.name <- "Atl.Ridge"} + #if(p == 6 && lead.month == 3) { cluster1.name <- "Atl.Ridge"; cluster2.name <- "NAO+"} + #if(p == 4 && lead.month == 5) { cluster1.name <- "Blocking"; cluster2.name <- "NAO+"; cluster3.name <- "Atl.Ridge"; cluster4.name <- "NAO-"} + + if(p == 9 && lead.month == 0) { cluster1.name <- "Blocking"; cluster2.name <- "Atl.Ridge"; cluster3.name <- "NAO-"; cluster4.name <- "NAO+" } + if(p == 8 && lead.month == 1) { cluster1.name <- "NAO+"; cluster2.name <- "NAO-"; cluster3.name <- "Blocking"; cluster4.name <- "Atl.Ridge" } + if(p == 7 && lead.month == 2) { cluster1.name <- "Atl.Ridge"; cluster2.name <- "Blocking"; cluster3.name <- "NAO-"; cluster4.name <- "NAO+" } + if(p == 6 && lead.month == 3) { cluster1.name <- "Atl.Ridge"; cluster2.name <- "NAO-"; cluster3.name <- "NAO+"; cluster4.name <- "Blocking" } + if(p == 5 && lead.month == 4) { cluster1.name <- "Atl.Ridge"; cluster2.name <- "NAO+"; cluster3.name <- "NAO-"; cluster4.name <- "Blocking" } + if(p == 4 && lead.month == 5) { cluster1.name <- "Blocking"; cluster2.name <- "NAO+"; cluster3.name <- "Atl.Ridge"; cluster4.name <- "NAO-" } + if(p == 3 && lead.month == 6) { cluster2.name <- "NAO-"; cluster3.name <- "Atl.Ridge"} # cluster1.name <- "Blocking"; cluster4.name <- "NAO+" + + # regimes.sim # for the debug + + ### forecast month: October + #if(p == 4 && lead.month == 6) { cluster1.name <- "Atl.Ridge"; cluster2.name <- "Blocking"; cluster3.name <- "NAO+"; cluster4.name <- "NAO-"} + #if(p == 5 && lead.month == 5) { cluster1.name <- "NAO+"; cluster2.name <- "Blocking"; cluster3.name <- "NAO-"; cluster4.name <- "Atl.Ridge"} + #if(p == 7 && lead.month == 3) { cluster1.name <- "NAO-"; cluster2.name <- "Atl.Ridge"; cluster3.name <- "Blocking"; cluster4.name <- "NAO+"} + #if(p == 8 && lead.month == 2) { cluster1.name <- "Blocking"; cluster2.name <- "NAO-"; cluster3.name <- "Atl.Ridge"; cluster4.name <- "NAO+"} + #if(p == 9 && lead.month == 1) { cluster1.name <- "NAO-"; cluster2.name <- "Blocking"; cluster3.name <- "Atl.Ridge"; cluster4.name <- "NAO+"} + + if(p == 10 && lead.month == 0) { cluster1.name <- "Blocking"; cluster2.name <- "NAO+"; cluster3.name <- "Atl.Ridge"; cluster4.name <- "NAO-"} + if(p == 9 && lead.month == 1) { cluster1.name <- "NAO-"; cluster2.name <- "Blocking"; cluster3.name <- "Atl.Ridge"; cluster4.name <- "NAO+"} + if(p == 8 && lead.month == 2) { cluster1.name <- "Blocking"; cluster2.name <- "NAO-"; cluster3.name <- "Atl.Ridge"; cluster4.name <- "NAO+"} + if(p == 7 && lead.month == 3) { cluster1.name <- "NAO-"; cluster2.name <- "Atl.Ridge"; cluster3.name <- "Blocking"; cluster4.name <- "NAO+"} + if(p == 6 && lead.month == 4) { cluster1.name <- "NAO+"; cluster2.name <- "Blocking"; cluster3.name <- "NAO-"; cluster4.name <- "Atl.Ridge"} + + ### forecast month: November + #if(p == 6 && lead.month == 5) { cluster2.name <- "Atl.Ridge"; cluster3.name <- "NAO+"; cluster4.name <- "Blocking"} + #if(p == 7 && lead.month == 4) { cluster1.name <- "NAO+"; cluster2.name <- "Blocking"; cluster4.name <- "Atl.Ridge"} + #if(p == 8 && lead.month == 3) { cluster2.name <- "Blocking"; cluster4.name <- "Atl.Ridge"} + #if(p == 9 && lead.month == 2) { cluster2.name <- "Atl.Ridge"; cluster3.name <- "NAO+"; cluster4.name <- "Blocking"} + #if(p == 10 && lead.month == 1) { cluster2.name <- "Blocking"; cluster4.name <- "Atl.Ridge"} + + regimes.sim2 <- c(cluster1.name, cluster2.name, cluster3.name, cluster4.name) # Simulated regimes after the manual correction + #regimes.obs <- c(cluster1.name, cluster2.name, cluster3.name, cluster4.name) # already defined above, included only as reference + + order.sim <- match(regimes.sim2, orden) + order.obs <- match(regimes.obs, orden) + + clusters.sim <- list(cluster1.sim, cluster2.sim, cluster3.sim, cluster4.sim) + clusters.obs <- list(pslwr1mean, pslwr2mean, pslwr3mean, pslwr4mean) + + # recompute the spatial correlations, because they might have changed because of the manual corrections above: + spat.cor1 <- cor(as.vector(clusters.sim[[which(order.sim == 1)]]), as.vector(clusters.obs[[which(order.obs == 1)]])) + spat.cor2 <- cor(as.vector(clusters.sim[[which(order.sim == 2)]]), as.vector(clusters.obs[[which(order.obs == 2)]])) + spat.cor3 <- cor(as.vector(clusters.sim[[which(order.sim == 3)]]), as.vector(clusters.obs[[which(order.obs == 3)]])) + spat.cor4 <- cor(as.vector(clusters.sim[[which(order.sim == 4)]]), as.vector(clusters.obs[[which(order.obs == 4)]])) + + if(composition == 'impact' && impact.data == TRUE) { + clusters.sim.imp <- list(cluster1.sim.imp, cluster2.sim.imp, cluster3.sim.imp, cluster4.sim.imp) + #clusters.sim.imp.pv <- list(cluster1.sim.imp.pv, cluster2.sim.imp.pv, cluster3.sim.imp.pv, cluster4.sim.imp.pv) + + clusters.obs.imp <- list(varwr1mean, varwr2mean, varwr3mean, varwr4mean) + #clusters.obs.imp.pv <- list(pvalue1, pvalue2, pvalue3, pvalue4) + + cor.imp1 <- cor(as.vector(clusters.sim.imp[[which(order.sim == 1)]]), as.vector(clusters.obs.imp[[which(order.obs == 1)]])) + cor.imp2 <- cor(as.vector(clusters.sim.imp[[which(order.sim == 2)]]), as.vector(clusters.obs.imp[[which(order.obs == 2)]])) + cor.imp3 <- cor(as.vector(clusters.sim.imp[[which(order.sim == 3)]]), as.vector(clusters.obs.imp[[which(order.obs == 3)]])) + cor.imp4 <- cor(as.vector(clusters.sim.imp[[which(order.sim == 4)]]), as.vector(clusters.obs.imp[[which(order.obs == 4)]])) + + } + + load(file=paste0(work.dir,"/",fields.name,"_",my.period[p],"_leadmonth_",lead.month,"_","psl",".RData")) # reload forecast cluster time series and mean psl data + load(file=paste0(work.dir,"/",fields.name,"_",my.period[p],"_leadmonth_",lead.month,"_",var.name[var.num],".RData")) # reload mean var data for impact maps + + if(var.num != var.num.orig) var.num <- var.num.orig # ripristinate original values of this script + if(fields.name != fields.name.orig) fields.name <- fields.name.orig # ripristinate original values of this script + if(!identical(period.orig, period)) period <- period.orig + if(p.orig != p) p <- p.orig + if(identical(rev(lat),lat.forecast)) lat <- rev(lat) # ripristinate right lat order + if(work.dir != work.dir.orig) work.dir <- work.dir.orig # ripristinate original values of this script + + } # close for on 'forecast.name' + + if(fields.name == rean.name || (fields.name == forecast.name && n.map == 7)){ + if(save.names){ + save(cluster1.name, cluster2.name, cluster3.name, cluster4.name, file=paste0(rean.dir,"/",fields.name,"_", my.period[p],"_","ClusterNames",".RData")) + + } else { # in this case, we load the cluster names from the file already saved, if regimes come from a reanalysis: + load(paste0(rean.dir,"/",rean.name,"_", my.period[p],"_","ClusterNames",".RData")) + + if(var.num != var.num.orig) var.num <- var.num.orig # ripristinate original values of this script + if(fields.name != fields.name.orig) fields.name <- fields.name.orig # ripristinate original values of this script + } + } + + # assign to each cluster its regime: + #if(ordering == FALSE && (fields.name == rean.name || (fields.name == forecast.name && n.map == 7))){ + if(ordering == FALSE){ + cluster1 <- 1; cluster2 <- 2; cluster3 <- 3; cluster4 <- 4 + } else { + cluster1 <- which(orden == cluster1.name) + cluster2 <- which(orden == cluster2.name) + cluster3 <- which(orden == cluster3.name) + cluster4 <- which(orden == cluster4.name) + } + + assign(paste0("map",cluster1), pslwr1mean) + assign(paste0("map",cluster2), pslwr2mean) + assign(paste0("map",cluster3), pslwr3mean) + assign(paste0("map",cluster4), pslwr4mean) + + assign(paste0("fre",cluster1), wr1y) + assign(paste0("fre",cluster2), wr2y) + assign(paste0("fre",cluster3), wr3y) + assign(paste0("fre",cluster4), wr4y) + + #assign(paste0("withinss",cluster1), my.cluster[[p]]$withinss[1]/my.cluster[[p]]$size[1]) + #assign(paste0("withinss",cluster2), my.cluster[[p]]$withinss[2]/my.cluster[[p]]$size[2]) + #assign(paste0("withinss",cluster3), my.cluster[[p]]$withinss[3]/my.cluster[[p]]$size[3]) + #assign(paste0("withinss",cluster4), my.cluster[[p]]$withinss[4]/my.cluster[[p]]$size[4]) + + if(impact.data == TRUE && (composition == "simple" || composition == "single.impact" || composition == 'impact')){ + assign(paste0("imp",cluster1), varwr1mean) + assign(paste0("imp",cluster2), varwr2mean) + assign(paste0("imp",cluster3), varwr3mean) + assign(paste0("imp",cluster4), varwr4mean) + + assign(paste0("sig",cluster1), pvalue1) + assign(paste0("sig",cluster2), pvalue2) + assign(paste0("sig",cluster3), pvalue3) + assign(paste0("sig",cluster4), pvalue4) + } + + if(fields.name == forecast.name && n.map < 7){ + assign(paste0("freMax",cluster1), wr1yMax) + assign(paste0("freMax",cluster2), wr2yMax) + assign(paste0("freMax",cluster3), wr3yMax) + assign(paste0("freMax",cluster4), wr4yMax) + + assign(paste0("freMin",cluster1), wr1yMin) + assign(paste0("freMin",cluster2), wr2yMin) + assign(paste0("freMin",cluster3), wr3yMin) + assign(paste0("freMin",cluster4), wr4yMin) + + #assign(paste0("spatial.cor",cluster1), spat.cor1) + #assign(paste0("spatial.cor",cluster2), spat.cor2) + #assign(paste0("spatial.cor",cluster3), spat.cor3) + #assign(paste0("spatial.cor",cluster4), spat.cor4) + + assign(paste0("persist",cluster1), pers1) + assign(paste0("persist",cluster2), pers2) + assign(paste0("persist",cluster3), pers3) + assign(paste0("persist",cluster4), pers4) + + clusters.all <- c(cluster1, cluster2, cluster3, cluster4) + ordered.sequence <- c(which(clusters.all == 1), which(clusters.all == 2), which(clusters.all == 3), which(clusters.all == 4)) + + assign(paste0("transSim",cluster1), unname(trans.sim[1,ordered.sequence])) + assign(paste0("transSim",cluster2), unname(trans.sim[2,ordered.sequence])) + assign(paste0("transSim",cluster3), unname(trans.sim[3,ordered.sequence])) + assign(paste0("transSim",cluster4), unname(trans.sim[4,ordered.sequence])) + + cluster1.obs <- which(orden == regimes.obs[1]) + cluster2.obs <- which(orden == regimes.obs[2]) + cluster3.obs <- which(orden == regimes.obs[3]) + cluster4.obs <- which(orden == regimes.obs[4]) + + clusters.all.obs <- c(cluster1.obs, cluster2.obs, cluster3.obs, cluster4.obs) + ordered.sequence.obs <- c(which(clusters.all.obs == 1), which(clusters.all.obs == 2), which(clusters.all.obs == 3), which(clusters.all.obs == 4)) + + assign(paste0("freObs",cluster1.obs), wr1y.obs) + assign(paste0("freObs",cluster2.obs), wr2y.obs) + assign(paste0("freObs",cluster3.obs), wr3y.obs) + assign(paste0("freObs",cluster4.obs), wr4y.obs) + + assign(paste0("persistObs",cluster1.obs), persObs1) + assign(paste0("persistObs",cluster2.obs), persObs2) + assign(paste0("persistObs",cluster3.obs), persObs3) + assign(paste0("persistObs",cluster4.obs), persObs4) + + assign(paste0("transObs",cluster1.obs), unname(trans.obs[1,ordered.sequence.obs])) + assign(paste0("transObs",cluster2.obs), unname(trans.obs[2,ordered.sequence.obs])) + assign(paste0("transObs",cluster3.obs), unname(trans.obs[3,ordered.sequence.obs])) + assign(paste0("transObs",cluster4.obs), unname(trans.obs[4,ordered.sequence.obs])) + + } + + # position of long values of Europe only (without the Atlantic Sea and America): + #if(fields.name == "ERA-Interim") EU <- c(1:which(lon >= lon.max)[1], (length(lon)-30):length(lon)) # restrict area to continental europe only + EU <- c(1:which(lon >= lon.max)[1], (which(lon >= 337)[1]:length(lon))) # restrict area to continental europe only + + # breaks and colors of the geopotential fields: + if(psl == "g500"){ + #my.brks <- c(-300,-199:200,300) # % Mean anomaly of a WR + my.brks <- c(-300,seq(-200,200,10),300) # % Mean anomaly of a WR + my.brks2 <- c(-300,seq(-200,200,10),300) # % Mean anomaly of a WR for the contour lines + #my.cols <- colorRampPalette((brewer.pal(9,"PuBu")))(length(my.brks)-1) + values.to.plot <- c(-200, -100, 0, 100, 200) # values we want to show in the labels + } + + if(psl == "psl"){ + my.brks <- c(seq(-19,-1,2),0,seq(1,19,2)) # % Mean anomaly of a WR + # my.brks <- c(seq(-19,-1,2),0,seq(1,19,2)) # % Mean anomaly of a WR + + my.brks2 <- my.brks #c(seq(-20,20,2)) # % Mean anomaly of a WR for the contour lines + values.to.plot <- my.brks #c(-17,-15,-13,-11,-9,-7,-5,-3,-1,0,1,3,5,7,9,11,13,15,17) + } + + my.cols <- colorRampPalette(rev(brewer.pal(11,"RdBu")))(length(my.brks)-1) # blue--white--red colors + my.cols[floor(length(my.cols)/2)] <- my.cols[floor(length(my.cols)/2)+1] <- "white" + + # breaks and colors of the impact maps (valid both for Wind speed anomalies and Temperature anomalies): + #my.brks.var <- c(-20,seq(-10,-3,1),seq(-2.9,2.9,0.1),seq(3,10,1),20) # % Mean anomaly of a WR + #my.brks.var <- c(-20,-2,-1.5,-1,-0.5,-0.2,0.2,0.5,1,1.5,2,20) # % Mean anomaly of a WR + #my.brks.var <- c(-20,seq(-3,3,0.5),20) # % Mean anomaly of a WR + if(var.name[var.num] == "sfcWind") { + my.brks.var <- seq(-3,3,0.5) + values.to.plot2 <- c(-3,-2,-1,0,1,2,3) + } + if(var.name[var.num] == "tas") { + my.brks.var <- seq(-5,5,1) + values.to.plot2 <- my.brks.var #c(-5,-3,-1,0,1,3,5) + } + + #my.cols <- colorRampPalette(as.character(read.csv("/home/Earth/vtorralb/rgbhex.csv",header=F)[,1]))(length(my.brks)-1) # blue--yellow-red colors + my.cols.var <- colorRampPalette(rev(brewer.pal(11,"RdBu")))(length(my.brks.var)-1) # blue--white--red colors + #my.cols.var[floor(length(my.cols.var)/2)] <- my.cols.var[floor(length(my.cols.var)/2)+1] <- "white" + + freq.max=100 + if(fields.name == forecast.name){ + pos.years.present <- match(year.start:year.end, years) + years.missing <- which(is.na(pos.years.present)) + fre1.NA <- fre2.NA <- fre3.NA <- fre4.NA <- freMax1.NA <- freMax2.NA <- freMax3.NA <- freMax4.NA <- freMin1.NA <- freMin2.NA <- freMin3.NA <- freMin4.NA <- c() + k <- 0 + for(y in year.start:year.end) { + if(y %in% (years.missing + year.start - 1)) { + fre1.NA <- c(fre1.NA,NA); fre2.NA <- c(fre2.NA,NA); fre3.NA <- c(fre3.NA,NA); fre4.NA <- c(fre4.NA,NA) + freMax1.NA <- c(freMax1.NA,NA); freMax2.NA <- c(freMax2.NA,NA); freMax3.NA <- c(freMax3.NA,NA); freMax4.NA <- c(freMax4.NA,NA) + freMin1.NA <- c(freMin1.NA,NA); freMin2.NA <- c(freMin2.NA,NA); freMin3.NA <- c(freMin3.NA,NA); freMin4.NA <- c(freMin4.NA,NA) + k=k+1 + } else { + fre1.NA <- c(fre1.NA,fre1[y - year.start + 1 - k]); fre2.NA <- c(fre2.NA,fre2[y - year.start + 1 - k]) + fre3.NA <- c(fre3.NA,fre3[y - year.start + 1 - k]); fre4.NA <- c(fre4.NA,fre4[y - year.start + 1 - k]) + freMax1.NA <- c(freMax1.NA,freMax1[y - year.start + 1 - k]); freMax2.NA <- c(freMax2.NA,freMax2[y - year.start + 1 - k]) + freMax3.NA <- c(freMax3.NA,freMax3[y - year.start + 1 - k]); freMax4.NA <- c(freMax4.NA,freMax4[y - year.start + 1 - k]) + freMin1.NA <- c(freMin1.NA,freMin1[y - year.start + 1 - k]); freMin2.NA <- c(freMin2.NA,freMin2[y - year.start + 1 - k]) + freMin3.NA <- c(freMin3.NA,freMin3[y - year.start + 1 - k]); freMin4.NA <- c(freMin4.NA,freMin4[y - year.start + 1 - k]) + } + } + + sp.cor1 <- round(cor(fre1.NA,freObs1, use="complete.obs"),2) + sp.cor2 <- round(cor(fre2.NA,freObs2, use="complete.obs"),2) + sp.cor3 <- round(cor(fre3.NA,freObs3, use="complete.obs"),2) + sp.cor4 <- round(cor(fre4.NA,freObs4, use="complete.obs"),2) + + } else { + fre1.NA <- fre1; fre2.NA <- fre2; fre3.NA <- fre3; fre4.NA <- fre4 + } + + + # Visualize the composition with the 3 graphs for all the 4 regimes: its pressure fields, its impact on var and its frequency series: + if(composition == "simple"){ + if(!as.pdf && fields.name == rean.name) png(filename=paste0(rean.dir,"/",fields.name,"_",my.period[p],"_",var.name[var.num],".png"),width=3000,height=3700) + if(!as.pdf && fields.name == forecast.name) png(filename=paste0(work.dir,"/",fields.name,"_",my.period[p],"_leadmonth_",lead.month,"_",var.name[var.num],".png"),width=3000,height=3700) + + plot.new() + + # Sheet title: + par(fig=c(0, 1, 0.94, 0.98), new=TRUE) + if(fields.name == forecast.name) {forecast.title <- paste0(" startdate and ",lead.month," forecast month ")} else {forecast.title <- ""} + mtext(paste0(fields.name, " Weather Regimes for ", my.period[p], forecast.title," (",year.start,"-",year.end,")"), cex=5.5, font=2) + + # Centroid maps: + map.xpos <- 0 + map.width <- 0.46 + par(fig=c(map.xpos + 0.003, map.xpos + map.width - 0.003, 0.745, 0.925), new=TRUE) + PlotEquiMap2(rescale(map1,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map1), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, xlabel.dist=1.5, contours.lty="F1FF1F") + par(fig=c(map.xpos, map.xpos + map.width, 0.51, 0.69), new=TRUE) + PlotEquiMap2(rescale(map2,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map2), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F") + par(fig=c(map.xpos, map.xpos + map.width, 0.275, 0.455), new=TRUE) + PlotEquiMap2(rescale(map3,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map3), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F") + par(fig=c(map.xpos, map.xpos + map.width, 0.04, 0.22), new=TRUE) + PlotEquiMap2(rescale(map4,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map4), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F") + + ## # for the debug: + ## obs <- read.table("/scratch/Earth/ncortesi/RESILIENCE/Regimes/NAO_series.txt") + ## mes <- p + ## fre.obs <- obs$V3[seq(mes,12*35,12)] # get the ovserved freuency of the NAO + ## #plot(1981:2015,fre.obs,type="l") + ## #lines(1981:2015,fre1,type="l",col="red") + ## #lines(1981:2015,fre2,type="l",col="blue") + ## #lines(1981:2015,fre4,type="l",col="green") + ## cor1 <- cor(fre.obs, fre1) + ## cor2 <- cor(fre.obs, fre2) + ## cor3 <- cor(fre.obs, fre3) + ## cor4 <- cor(fre.obs, fre4) + ## round(c(cor1,cor2,cor3,cor4),2) + + # Legend centroid maps: + legend1.xpos <- 0.10 + legend1.width <- 0.30 + legend1.cex <- 2 + my.subset <- match(values.to.plot, my.brks) + par(fig=c(legend1.xpos, legend1.xpos + legend1.width, 0.719, 0.744), new=TRUE) # syntax fig=c(xmin, xmax, ymin, ymax) + ColorBar3(my.brks, cols=my.cols, vert=FALSE, cex=legend1.cex, subset=my.subset) + if(psl=="g500") {mtext(side=4," m", cex=2.5)} else {mtext(side=4," hPa", cex=legend1.cex)} + par(fig=c(legend1.xpos, legend1.xpos + legend1.width, 0.485, 0.508), new=TRUE) + ColorBar3(my.brks, cols=my.cols, vert=FALSE, cex=legend1.cex, subset=my.subset) + if(psl=="g500") {mtext(side=4," m", cex=2.5)} else {mtext(side=4," hPa", cex=legend1.cex)} + par(fig=c(legend1.xpos, legend1.xpos + legend1.width, 0.250, 0.273), new=TRUE) + ColorBar3(my.brks, cols=my.cols, vert=FALSE, cex=legend1.cex, subset=my.subset) + if(psl=="g500") {mtext(side=4," m", cex=2.5)} else {mtext(side=4," hPa", cex=legend1.cex)} + par(fig=c(legend1.xpos, legend1.xpos + legend1.width, 0.015, 0.038), new=TRUE) + ColorBar3(my.brks, cols=my.cols, vert=FALSE, cex=legend1.cex, subset=my.subset) + if(psl=="g500") {mtext(side=4," m", cex=2.5)} else {mtext(side=4," hPa", cex=legend1.cex)} + + # Subtitle centroid maps: + map.title.xpos <- 0.76 + map.title.width <- 0.2 + par(fig=c(map.title.xpos, map.title.xpos + map.title.width, 0.728, 0.729), new=TRUE) + mtext("year", cex=3) + par(fig=c(map.title.xpos, map.title.xpos + map.title.width, 0.494, 0.495), new=TRUE) + mtext("year", cex=3) + par(fig=c(map.title.xpos, map.title.xpos + map.title.width, 0.260, 0.261), new=TRUE) + mtext("year", cex=3) + par(fig=c(map.title.xpos, map.title.xpos + map.title.width, 0.026, 0.027), new=TRUE) + mtext("year", cex=3) + + # Title Centroid Maps: + title1.xpos <- 0.02 + title1.width <- 0.4 + par(fig=c(title1.xpos, title1.xpos + title1.width, 0.9305, 0.9355), new=TRUE) + mtext(paste0(regime1.name,": ", psl.name, " Anomaly"), font=2, cex=4) + par(fig=c(title1.xpos, title1.xpos + title1.width, 0.6955, 0.7005), new=TRUE) + mtext(paste0(regime2.name,": ", psl.name, " Anomaly"), font=2, cex=4) + par(fig=c(title1.xpos, title1.xpos + title1.width, 0.4605, 0.4655), new=TRUE) + mtext(paste0(regime3.name,": ", psl.name, " Anomaly"), font=2, cex=4) + par(fig=c(title1.xpos, title1.xpos + title1.width, 0.2255, 0.2305), new=TRUE) + mtext(paste0(regime4.name,": ", psl.name, " Anomaly"), font=2, cex=4) + + # Impact maps: + if(impact.data == TRUE ){ # it won't enter here never because we are already inside an if con composition="simple" + impact.xpos <- 0.47 + impact.width <- 0.26 + par(fig=c(impact.xpos, impact.xpos + impact.width, 0.745, 0.925), new=TRUE) + PlotEquiMap2(rescale(imp1[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=t(sig1[,EU] < 0.05), cex.lab=1.5) + par(fig=c(impact.xpos, impact.xpos + impact.width, 0.51, 0.69), new=TRUE) + PlotEquiMap2(rescale(imp2[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=t(sig2[,EU] < 0.05), cex.lab=1.5) + par(fig=c(impact.xpos, impact.xpos + impact.width, 0.275, 0.455), new=TRUE) + PlotEquiMap2(rescale(imp3[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=t(sig3[,EU] < 0.05), cex.lab=1.5) + par(fig=c(impact.xpos, impact.xpos + impact.width, 0.04, 0.22), new=TRUE) + PlotEquiMap2(rescale(imp4[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=t(sig4[,EU] < 0.05), cex.lab=1.5) + + # Title impact maps: + title2.xpos <- 0.42 + title2.width <- 0.35 + par(fig=c(title2.xpos, title2.xpos + title2.width, 0.935, 0.940), new=TRUE) + mtext(paste0(regime1.name, ": Impact on ", var.name.full[var.num]), font=2, cex=4) + par(fig=c(title2.xpos, title2.xpos + title2.width, 0.700, 0.705), new=TRUE) + mtext(paste0(regime2.name, ": Impact on ", var.name.full[var.num]), font=2, cex=4) + par(fig=c(title2.xpos, title2.xpos + title2.width, 0.465, 0.470), new=TRUE) + mtext(paste0(regime3.name, ": Impact on ", var.name.full[var.num]), font=2, cex=4) + par(fig=c(title2.xpos, title2.xpos + title2.width, 0.230, 0.235), new=TRUE) + mtext(paste0(regime4.name, ": Impact on ", var.name.full[var.num]), font=2, cex=4) + + # Legend: + legend2.xpos <- 0.48 + legend2.width <- 0.25 + legend2.cex <- 1.8 + + my.subset2 <- match(values.to.plot2, my.brks.var) + par(fig=c(legend2.xpos, legend2.xpos + legend2.width, 0.719 + 0.001, 0.744), new=TRUE) + ColorBar3(my.brks.var, cols=my.cols.var, vert=FALSE, cex=legend2.cex, subset=my.subset2) + mtext(side=4,paste0(" ",var.unit[var.num]), cex=legend2.cex) + par(fig=c(legend2.xpos, legend2.xpos + legend2.width, 0.485, 0.508), new=TRUE) + ColorBar3(my.brks.var, cols=my.cols.var, vert=FALSE, cex=legend2.cex, subset=my.subset2) + mtext(side=4,paste0(" ",var.unit[var.num]), cex=legend2.cex) + par(fig=c(legend2.xpos, legend2.xpos + legend2.width, 0.250, 0.273), new=TRUE) + ColorBar3(my.brks.var, cols=my.cols.var, vert=FALSE, cex=legend2.cex, subset=my.subset2) + mtext(side=4,paste0(" ",var.unit[var.num]), cex=legend2.cex) + par(fig=c(legend2.xpos, legend2.xpos + legend2.width, 0.015, 0.038), new=TRUE) + ColorBar3(my.brks.var, cols=my.cols.var, vert=FALSE, cex=legend2.cex, subset=my.subset2) + mtext(side=4,paste0(" ",var.unit[var.num]), cex=legend2.cex) + + } + + # Frequency plots: + barplot.xpos <- 0.75 + barplot.width <- 0.25 + + par(fig=c(barplot.xpos, barplot.xpos + barplot.width, 0.745, 0.915), new=TRUE) + if(fields.name == rean.name) barplot.freq(100*fre1.NA, year.start, year.end, cex.y=2, cex.x=2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0)) + if(fields.name == forecast.name) barplot.freq.sim2(100*fre1.NA, 100*freMax1.NA, 100*freMin1.NA, 100*freObs1, year.start, year.end, cex.y=2, cex.x=2, freq.min=-2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0), col.line="gray20", cex.r=3, cex.mean=2, cex.obs=2) + + par(fig=c(barplot.xpos, barplot.xpos + barplot.width,0.510,0.680), new=TRUE) + if(fields.name == rean.name) barplot.freq(100*fre2.NA, year.start, year.end, cex.y=2, cex.x=2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0)) + if(fields.name == forecast.name) barplot.freq.sim2(100*fre2.NA, 100*freMax2.NA, 100*freMin2.NA, 100*freObs2, year.start, year.end, cex.y=2, cex.x=2, freq.min=-2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0), col.line="gray20", cex.r=3, cex.mean=2, cex.obs=2) + + par(fig=c(barplot.xpos, barplot.xpos + barplot.width,0.275,0.445), new=TRUE) + if(fields.name == rean.name) barplot.freq(100*fre3.NA, year.start, year.end, cex.y=2, cex.x=2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0)) + if(fields.name == forecast.name) barplot.freq.sim2(100*fre3.NA, 100*freMax3.NA, 100*freMin3.NA, 100*freObs3, year.start, year.end, cex.y=2, cex.x=2, freq.min=-2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0), col.line="gray20", cex.r=3, cex.mean=2, cex.obs=2) + + par(fig=c(barplot.xpos, barplot.xpos + barplot.width,0.040,0.210), new=TRUE) + if(fields.name == rean.name) barplot.freq(100*fre4.NA, year.start, year.end, cex.y=2, cex.x=2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0)) + if(fields.name == forecast.name) barplot.freq.sim2(100*fre4.NA, 100*freMax4.NA, 100*freMin4.NA, 100*freObs4, year.start, year.end, cex.y=2, cex.x=2, freq.min=-2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0), col.line="gray20", cex.r=3, cex.mean=2, cex.obs=2) + + # Title Frequency maps: + title3.xpos <- 0.75 + title3.width <-0.25 + par(fig=c(title3.xpos, title3.xpos + title3.width, 0.935, 0.940), new=TRUE) + mtext(paste0(regime1.name, " Frequency"), font=2, cex=4) # paste0 doesn't work inside an expression! + par(fig=c(title3.xpos, title3.xpos + title3.width, 0.700, 0.705), new=TRUE) + mtext(paste0(regime2.name, " Frequency"), font=2, cex=4) + par(fig=c(title3.xpos, title3.xpos + title3.width, 0.465, 0.470), new=TRUE) + mtext(paste0(regime3.name, " Frequency"), font=2, cex=4) + par(fig=c(title3.xpos, title3.xpos + title3.width, 0.230, 0.235), new=TRUE) + mtext(paste0(regime4.name, " Frequency"), font=2, cex=4) + + # % on Frequency plots: + symbol.xpos <- 0.735 + symbol.width <- 0.01 + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.82, 0.83), new=TRUE) + mtext("%", cex=3.3) + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.59, 0.60), new=TRUE) + mtext("%", cex=3.3) + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.35, 0.36), new=TRUE) + mtext("%", cex=3.3) + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.12, 0.13), new=TRUE) + mtext("%", cex=3.3) + + # mean frequency on Frequency plots: + if(mean.freq == TRUE){ + symbol.xpos <- 0.9 + symbol.width <- 0.1 + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.908, 0.918), new=TRUE) + mtext(bquote(bar(nu) == .(paste0(round(mean(100*fre1.NA),1),"%"))),cex=4.5) + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.673, 0.683), new=TRUE) + mtext(bquote(bar(nu) == .(paste0(round(mean(100*fre2.NA),1),"%"))),cex=4.5) + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.44, 0.45), new=TRUE) + mtext(bquote(bar(nu) == .(paste0(round(mean(100*fre3.NA),1),"%"))),cex=4.5) + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.203, 0.213), new=TRUE) + mtext(bquote(bar(nu) == .(paste0(round(mean(100*fre4.NA),1),"%"))),cex=4.5) + } + + # add corr between obs.time series and ensemble mean time series on Frequency plots: + if(fields.name == forecast.name){ + symbol.xpos <- 0.76 + symbol.width <- 0.1 + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.908, 0.918), new=TRUE) + sp.cor1 <- round(cor(fre1.NA,freObs1, use="complete.obs"),2) + mtext(paste0("r= ",sp.cor1), cex=4.5) + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.673, 0.683), new=TRUE) + sp.cor2 <- round(cor(fre2.NA,freObs2, use="complete.obs"),2) + mtext(paste0("r= ",sp.cor2), cex=4.5) + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.44, 0.45), new=TRUE) + sp.cor3 <- round(cor(fre3.NA,freObs3, use="complete.obs"),2) + mtext(paste0("r= ",sp.cor3), cex=4.5) + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.203, 0.213), new=TRUE) + sp.cor4 <- round(cor(fre4.NA,freObs4, use="complete.obs"),2) + mtext(paste0("r= ",sp.cor4), cex=4.5) + } + + if(!as.pdf) dev.off() # for saving 4 png + + } # close if on: composition == 'simple' + + + # Visualize the composition with the regime anomalies for a selected forecasted month: + if(composition == "psl"){ + # Centroid maps: + n.map <- n.map + 1 # n.maps starts from 0 + map.width <- 0.115 + map.xpos <- 0.06 + map.width * (n.map - 1) + map.ypos <- 0.08 + map.height <- 0.19 + map.ysep <- 0.015 + + par(fig=c(map.xpos, map.xpos + map.width, map.ypos + 3*map.height + 3*map.ysep, map.ypos + 4*map.height + 3*map.ysep), new=TRUE) + PlotEquiMap2(rescale(map1,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map1), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, xlabel.dist=1.5, contours.lty="F1FF1F") + par(fig=c(map.xpos, map.xpos + map.width, map.ypos + 2*map.height + 2*map.ysep, map.ypos + 3*map.height + 2*map.ysep), new=TRUE) + PlotEquiMap2(rescale(map2,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map2), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F") + par(fig=c(map.xpos, map.xpos + map.width, map.ypos + map.height + map.ysep, map.ypos + 2*map.height + map.ysep), new=TRUE) + PlotEquiMap2(rescale(map3,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map3), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F") + par(fig=c(map.xpos, map.xpos + map.width, map.ypos, map.ypos + map.height), new=TRUE) + PlotEquiMap2(rescale(map4,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map4), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F") + + # Sheet subtitles: + par(fig=c(map.xpos, map.xpos + map.width, 0.86, 0.90), new=TRUE) + if(n.map < 8) { + mtext(paste0(my.month.short[p], " --> ", my.month.short[forecast.month]), cex=5.5, font=2) + } else{ + mtext(paste0(rean.name," ", my.month.short[forecast.month]), cex=5.5, font=2) + } + + } # close if on composition == 'psl' + + # Visualize the composition if the impact maps for a selected forecasted month: + if(composition == "impact"){ + # Centroid maps: + n.map <- n.map + 1 # n.maps starts from 0 + map.width <- 0.115 + map.xpos <- 0.06 + map.width * (n.map - 1) + map.ypos <- 0.08 + map.height <- 0.19 + map.ysep <- 0.015 + + par(fig=c(map.xpos, map.xpos + map.width, map.ypos + 3*map.height + 3*map.ysep, map.ypos + 4*map.height + 3*map.ysep), new=TRUE) + #PlotEquiMap2(rescale(imp1[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=t(sig1[,EU] < 0.05), cex.lab=1.5) + PlotEquiMap2(rescale(imp1[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5) + + par(fig=c(map.xpos, map.xpos + map.width, map.ypos + 2*map.height + 2*map.ysep, map.ypos + 3*map.height + 2*map.ysep), new=TRUE) + #PlotEquiMap2(rescale(imp2[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=t(sig2[,EU] < 0.05), cex.lab=1.5) + PlotEquiMap2(rescale(imp2[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5) + + par(fig=c(map.xpos, map.xpos + map.width, map.ypos + map.height + map.ysep, map.ypos + 2*map.height + map.ysep), new=TRUE) + #PlotEquiMap2(rescale(imp3[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=t(sig3[,EU] < 0.05), cex.lab=1.5) + PlotEquiMap2(rescale(imp3[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5) + + par(fig=c(map.xpos, map.xpos + map.width, map.ypos, map.ypos + map.height), new=TRUE) + #PlotEquiMap2(rescale(imp4[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=t(sig4[,EU] < 0.05), cex.lab=1.5) + PlotEquiMap2(rescale(imp4[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5) + + + # Sheet subtitles: + par(fig=c(map.xpos, map.xpos + map.width, 0.86, 0.90), new=TRUE) + if(n.map < 8) { + mtext(paste0(my.month.short[p], " --> ", my.month.short[forecast.month]), cex=5.5, font=2) + } else{ + mtext(paste0(rean.name," ", my.month.short[forecast.month]), cex=5.5, font=2) + } + + } # close if on composition == 'impact' + + # Visualize the composition if the impact maps for a selected forecasted month: + if(composition == "single.impact"){ + png(filename=paste0(rean.dir,"/",fields.name,"_",my.period[p],"_",var.name[var.num],"_impact_NAO+.png"),width=3000,height=3700) + PlotEquiMap2(rescale(imp1[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=t(sig1[,EU] < 0.05), cex.lab=1.5) + dev.off() + + png(filename=paste0(rean.dir,"/",fields.name,"_",my.period[p],"_",var.name[var.num],"_impact_NAO-.png"),width=3000,height=3700) + PlotEquiMap2(rescale(imp2[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=t(sig2[,EU] < 0.05), cex.lab=1.5) + dev.off() + + png(filename=paste0(rean.dir,"/",fields.name,"_",my.period[p],"_",var.name[var.num],"_impact_Blocking.png"),width=3000,height=3700) + PlotEquiMap2(rescale(imp3[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=t(sig3[,EU] < 0.05), cex.lab=1.5) + dev.off() + + png(filename=paste0(rean.dir,"/",fields.name,"_",my.period[p],"_",var.name[var.num],"_impact_Atl_Ridge.png"),width=3000,height=3700) + PlotEquiMap2(rescale(imp4[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=t(sig4[,EU] < 0.05), cex.lab=1.5) + dev.off() + } + + # Visualize the composition if the impact maps for a selected forecasted month: + if(composition == "single.psl"){ + png(filename=paste0(rean.dir,"/",fields.name,"_",my.period[p],"_",var.name[var.num],"_pattern_cluster1.png"),width=2000,height=1400, res=300) + PlotEquiMap2(rescale(map1,my.brks[1],tail(my.brks,1)), lon, lat,filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1, contours=t(map1), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=0.5, xlabel.dist=0, contours.lty="F1FF1F", toptitle=paste0(my.period[p]," WithinSS=", round(withinss1/1000000,2))) + dev.off() + + png(filename=paste0(rean.dir,"/",fields.name,"_",my.period[p],"_",var.name[var.num],"_pattern_cluster2.png"),width=2000,height=1400, res=300) + PlotEquiMap2(rescale(map2,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1, contours=t(map2), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=0.5, xlabel.dist=0, contours.lty="F1FF1F", toptitle=paste0(my.period[p]," WithinSS=", round(withinss2/1000000,2))) + dev.off() + + png(filename=paste0(rean.dir,"/",fields.name,"_",my.period[p],"_",var.name[var.num],"_pattern_cluster3.png"),width=2000,height=1400, res=300) + PlotEquiMap2(rescale(map3,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1, contours=t(map3), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=0.5, xlabel.dist=0, contours.lty="F1FF1F", toptitle=paste0(my.period[p]," WithinSS=", round(withinss3/1000000,2))) + dev.off() + + png(filename=paste0(rean.dir,"/",fields.name,"_",my.period[p],"_",var.name[var.num],"_pattern_cluster4.png"),width=2000,height=1400, res=300) + PlotEquiMap2(rescale(map4,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1, contours=t(map4), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=0.5, xlabel.dist=0, contours.lty="F1FF1F", toptitle=paste0(my.period[p]," WithinSS=", round(withinss4/1000000,2))) + dev.off() + + # Legend centroid maps: + #my.subset <- match(values.to.plot, my.brks) + #ColorBar3(my.brks, cols=my.cols, vert=FALSE, cex=legend1.cex, subset=my.subset) + #mtext(side=4," hPa", cex=legend1.cex) + + } + + # Visualize the composition with the interannual frequencies for a selected forecasted month: + if(composition == "fre"){ + # Centroid maps: + n.map <- n.map + 1 # n.maps starts from 0 + map.width <- 0.115 + map.xpos <- 0.06 + map.width * (n.map - 1) + map.ypos <- 0.08 + map.height <- 0.19 + map.ysep <- 0.015 + + par(fig=c(map.xpos, map.xpos + map.width, map.ypos + 3*map.height + 3*map.ysep, map.ypos + 4*map.height + 3*map.ysep), new=TRUE) + if(n.map < 8) barplot.freq.sim2(100*fre1.NA, 100*freMax1.NA, 100*freMin1.NA, 100*freObs1, year.start, year.end, cex.y=2, cex.x=2, freq.min=-2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0), col.line="gray20", cex.r=3, cex.mean=2, cex.obs=2) + if(n.map == 8) barplot.freq(100*fre1.NA, year.start, year.end, cex.y=2, cex.x=2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0)) + + par(fig=c(map.xpos, map.xpos + map.width, map.ypos + 2*map.height + 2*map.ysep, map.ypos + 3*map.height + 2*map.ysep), new=TRUE) + if(n.map < 8) if(fields.name == forecast.name) barplot.freq.sim2(100*fre2.NA, 100*freMax2.NA, 100*freMin2.NA, 100*freObs2, year.start, year.end, cex.y=2, cex.x=2, freq.min=-2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0), col.line="gray20", cex.r=3, cex.mean=2, cex.obs=2) + if(n.map == 8) barplot.freq(100*fre2.NA, year.start, year.end, cex.y=2, cex.x=2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0)) + + par(fig=c(map.xpos, map.xpos + map.width, map.ypos + map.height + map.ysep, map.ypos + 2*map.height + map.ysep), new=TRUE) + if(n.map < 8) if(fields.name == forecast.name) barplot.freq.sim2(100*fre3.NA, 100*freMax3.NA, 100*freMin3.NA, 100*freObs3, year.start, year.end, cex.y=2, cex.x=2, freq.min=-2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0), col.line="gray20", cex.r=3, cex.mean=2, cex.obs=2) + if(n.map == 8) barplot.freq(100*fre3.NA, year.start, year.end, cex.y=2, cex.x=2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0)) + + par(fig=c(map.xpos, map.xpos + map.width, map.ypos, map.ypos + map.height), new=TRUE) + if(n.map < 8) if(fields.name == forecast.name) barplot.freq.sim2(100*fre4.NA, 100*freMax4.NA, 100*freMin4.NA, 100*freObs4, year.start, year.end, cex.y=2, cex.x=2, freq.min=-2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0), col.line="gray20", cex.r=3, cex.mean=2, cex.obs=2) + if(n.map == 8) barplot.freq(100*fre4.NA, year.start, year.end, cex.y=2, cex.x=2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0)) + + # Sheet subtitles: + par(fig=c(map.xpos, map.xpos + map.width, 0.86, 0.90), new=TRUE) + if(n.map < 8) { + mtext(paste0(my.month.short[p], " --> ", my.month.short[forecast.month]), cex=5.5, font=2) + } else{ + mtext(paste0(rean.name," ", my.month.short[forecast.month]), cex=5.5, font=2) + } + + # % on Frequency plots: + par(fig=c(map.xpos - 0.006, map.xpos, map.ypos + 3.9*map.height + 3*map.ysep, map.ypos + 4*map.height + 3*map.ysep), new=TRUE) + mtext("%", cex=3.3) + par(fig=c(map.xpos - 0.006, map.xpos, map.ypos + 2.9*map.height + 2*map.ysep, map.ypos + 3*map.height + 2*map.ysep), new=TRUE) + mtext("%", cex=3.3) + par(fig=c(map.xpos - 0.006, map.xpos, map.ypos + 1.9*map.height + 1*map.ysep, map.ypos + 2*map.height + 1*map.ysep), new=TRUE) + mtext("%", cex=3.3) + par(fig=c(map.xpos - 0.006, map.xpos, map.ypos + 0.9*map.height + 0*map.ysep, map.ypos + 1*map.height + 0*map.ysep), new=TRUE) + mtext("%", cex=3.3) + + # mean frequency on Frequency plots: + par(fig=c(map.xpos + 0.02, map.xpos + 0.04, map.ypos + 3*map.height + 3*map.ysep, map.ypos + 3.1*map.height + 3*map.ysep), new=TRUE) + mtext(bquote(bar(nu) == .(paste0(round(mean(100*fre1.NA),1),"%"))),cex=3.5) + par(fig=c(map.xpos + 0.02, map.xpos + 0.04, map.ypos + 2*map.height + 2*map.ysep, map.ypos + 2.1*map.height + 2*map.ysep), new=TRUE) + mtext(bquote(bar(nu) == .(paste0(round(mean(100*fre2.NA),1),"%"))),cex=3.5) + par(fig=c(map.xpos + 0.02, map.xpos + 0.04, map.ypos + 1*map.height + 1*map.ysep, map.ypos + 1.1*map.height + 1*map.ysep), new=TRUE) + mtext(bquote(bar(nu) == .(paste0(round(mean(100*fre3.NA),1),"%"))),cex=3.5) + par(fig=c(map.xpos + 0.02, map.xpos + 0.04, map.ypos + 0*map.height + 0*map.ysep, map.ypos + 0.1*map.height + 0*map.ysep), new=TRUE) + mtext(bquote(bar(nu) == .(paste0(round(mean(100*fre4.NA),1),"%"))),cex=3.5) + + # add corr between obs.time series and ensemble mean time series on Frequency plots: + if(n.map < 8){ + par(fig=c(map.xpos + map.width - 0.04, map.xpos + map.width - 0.02, map.ypos + 3*map.height + 3*map.ysep, map.ypos + 3.1*map.height + 3*map.ysep), new=TRUE) + mtext(paste0("r= ",sp.cor1), cex=3.5) + par(fig=c(map.xpos + map.width - 0.04, map.xpos + map.width - 0.02, map.ypos + 2*map.height + 2*map.ysep, map.ypos + 2.1*map.height + 2*map.ysep), new=TRUE) + mtext(paste0("r= ",sp.cor2), cex=3.5) + par(fig=c(map.xpos + map.width - 0.04, map.xpos + map.width - 0.02, map.ypos + 1*map.height + 1*map.ysep, map.ypos + 1.1*map.height + 1*map.ysep), new=TRUE) + mtext(paste0("r= ",sp.cor3), cex=3.5) + par(fig=c(map.xpos + map.width - 0.04, map.xpos + map.width - 0.02, map.ypos + 0*map.height + 0*map.ysep, map.ypos + 0.1*map.height + 0*map.ysep), new=TRUE) + mtext(paste0("r= ",sp.cor4), cex=3.5) + } + } # close if on composition == 'fre' + + # save indicators for each startdate and forecast time: + # (map1, map2, map3, map4, fre1, fre2, etc. already refer to the regimes in the same order as listed in the 'orden' vector) + if(fields.name == forecast.name) { + if (composition != "impact") { + save(orden, map1, map2, map3, map4, fre1.NA, fre2.NA, fre3.NA, fre4.NA, freObs1, freObs2, freObs3, freObs4, sp.cor1, sp.cor2, sp.cor3, sp.cor4, persist1, persist2, persist3, persist4, persistObs1, persistObs2, persistObs3, persistObs4, spat.cor1, spat.cor2, spat.cor3, spat.cor4, sd.freq.sim1, sd.freq.sim2, sd.freq.sim3, sd.freq.sim4, sd.freq.obs1, sd.freq.obs2, sd.freq.obs3, sd.freq.obs4, transSim1, transSim2, transSim3, transSim4, transObs1, transObs2, transObs3, transObs4, file=paste0(work.dir,"/",fields.name,"_", my.period[p],"_leadmonth_",lead.month,"_","ClusterNames",".RData")) + } else { # also save impX objects + save(orden, map1, map2, map3, map4, fre1.NA, fre2.NA, fre3.NA, fre4.NA, freObs1, freObs2, freObs3, freObs4, sp.cor1, sp.cor2, sp.cor3, sp.cor4, persist1, persist2, persist3, persist4, persistObs1, persistObs2, persistObs3, persistObs4, spat.cor1, spat.cor2, spat.cor3, spat.cor4, sd.freq.sim1, sd.freq.sim2, sd.freq.sim3, sd.freq.sim4, sd.freq.obs1, sd.freq.obs2, sd.freq.obs3, sd.freq.obs4, transSim1, transSim2, transSim3, transSim4, transObs1, transObs2, transObs3, transObs4, imp1, imp2, imp3, imp4,cor.imp1,cor.imp2,cor.imp3,cor.imp4, file=paste0(work.dir,"/",fields.name,"_", my.period[p],"_leadmonth_",lead.month,"_","ClusterNames",".RData")) + } + } + + } # close 'p' on 'period' + + if(as.pdf) dev.off() # for saving a single pdf with all months/seasons + + } # close 'lead.month' on 'lead.months' + + if(composition == "psl" || composition == "fre" || composition == "impact"){ + # Regime's names: + title1.xpos <- 0.01 + title1.width <- 0.04 + par(fig=c(title1.xpos, title1.xpos + title1.width, 0.7905, 0.7955), new=TRUE) + mtext(paste0(regime1.name), font=2, cex=5) + par(fig=c(title1.xpos, title1.xpos + title1.width, 0.5855, 0.5905), new=TRUE) + mtext(paste0(regime2.name), font=2, cex=5) + par(fig=c(title1.xpos, title1.xpos + title1.width, 0.3805, 0.3955), new=TRUE) + mtext(paste0(regime3.name), font=2, cex=5) + par(fig=c(title1.xpos, title1.xpos + title1.width, 0.1755, 0.1805), new=TRUE) + mtext(paste0(regime4.name), font=2, cex=5) + + if(composition == "psl") { + # Color legend: + legend1.xpos <- 0.10 + legend1.width <- 0.80 + legend1.cex <- 4 + + par(fig=c(legend1.xpos, legend1.xpos + legend1.width, 0, 0.065), new=TRUE) # syntax fig=c(xmin, xmax, ymin, ymax) + ColorBar2(brks=my.brks, cols=my.cols, vert=FALSE, cex=legend1.cex, label.dist=2, my.ticks=-0.5 + 1:length(my.brks), my.labels=my.brks) + par(fig=c(legend1.xpos + legend1.width - 0.02, legend1.xpos + legend1.width + 0.05, 0, 0.02), new=TRUE) # syntax fig=c(xmin, xmax, ymin, ymax) + mtext("hPa", cex=legend1.cex*1.3) + } + + if(composition == "impact") { + # Color legend: + legend1.xpos <- 0.10 + legend1.width <- 0.80 + legend1.cex <- 4 + + #my.subset2 <- match(values.to.plot2, my.brks.var) + par(fig=c(legend1.xpos, legend1.xpos + legend1.width, 0, 0.065), new=TRUE) + ColorBar2(brks=my.brks.var, cols=my.cols.var, vert=FALSE, cex=legend1.cex, label.dist=2, my.ticks=-0.5 + 1:length(my.brks.var), my.labels=my.brks.var) + par(fig=c(legend1.xpos + legend1.width - 0.02, legend1.xpos + legend1.width + 0.05, 0, 0.02), new=TRUE) # syntax fig=c(xmin, xmax, ymin, ymax) + #ColorBar2(brks=my.brks.var, cols=my.cols.var, vert=FALSE, cex=legend2.cex, subset=my.subset2) + mtext(var.unit[var.num], cex=legend1.cex*1.3) + + } + + + dev.off() + } # close if on composition + + print("Finished!") +} # close if on composition != "summary" + +} + +if(composition == "taylor"){ + library("plotrix") + + # choose a reference season from 13 (winter) to 16 (Autumn): + season.ref <- 13 + + # set the first WR classification: + rean.dir <- "/scratch/Earth/ncortesi/RESILIENCE/Regimes/41_ERA-Interim_monthly_1981-2015_LOESS_filter" + + # load data of the reference season of the first classification (this line should be modified to load the chosen season): + #load("/scratch/Earth/ncortesi/RESILIENCE/Regimes/18) as 12) but with no lat correction/ERA-Interim_Winter_psl.RData") # load pslwrXmean data + #load("/scratch/Earth/ncortesi/RESILIENCE/Regimes/18) as 12) but with no lat correction/ERA-Interim_Winter_ClusterNames.RData") # load clusterX.name.period + load("/scratch/Earth/ncortesi/RESILIENCE/Regimes/46_as_12_but_with_no_lat_correction_and_with_LOESS/ERA-Interim_Winter_psl.RData") # load pslwrXmean data + load("/scratch/Earth/ncortesi/RESILIENCE/Regimes/46_as_12_but_with_no_lat_correction_and_with_LOESS/ERA-Interim_Winter_ClusterNames.RData") # load clusterX.name.period + + if(var.num != var.num.orig) var.num <- var.num.orig # ripristinate original values of this script (necessary after loading regime data) + if(fields.name != fields.name.orig) fields.name <- fields.name.orig # ripristinate original values of this script + + cluster1.ref <- which(orden == cluster1.name) + cluster2.ref <- which(orden == cluster2.name) + cluster3.ref <- which(orden == cluster3.name) + cluster4.ref <- which(orden == cluster4.name) + + #cluster1.ref <- cluster1.name.period[13] + #cluster2.ref <- cluster2.name.period[13] + #cluster3.ref <- cluster3.name.period[13] + #cluster4.ref <- cluster4.name.period[13] + + cluster1.ref <- 3 # bypass the previous values because for dataset num.46 the blocking and atlantic regimes were saved swapped! + cluster2.ref <- 2 + cluster3.ref <- 1 + cluster4.ref <- 4 + + assign(paste0("map.ref",cluster1.ref), pslwr1mean) # NAO+ + assign(paste0("map.ref",cluster2.ref), pslwr2mean) # NAO- + assign(paste0("map.ref",cluster3.ref), pslwr3mean) # Blocking + assign(paste0("map.ref",cluster4.ref), pslwr4mean) # Atl.ridge + + # set a second WR classification (i.e: with running cluster) to contrast with the new one: + #rean2.dir <- "/scratch/Earth/ncortesi/RESILIENCE/Regimes/41_ERA-Interim_monthly_1981-2015_LOESS_filter" + rean2.dir <- "/scratch/Earth/ncortesi/RESILIENCE/Regimes/44_as_43_but_with_no_lat_correction" + + # load data of the reference season of the second classification (this line should be modified to load the chosen season): + load("/scratch/Earth/ncortesi/RESILIENCE/Regimes/46_as_12_but_with_no_lat_correction_and_with_LOESS/ERA-Interim_Winter_psl.RData") # load pslwrXmean data + load("/scratch/Earth/ncortesi/RESILIENCE/Regimes/46_as_12_but_with_no_lat_correction_and_with_LOESS/ERA-Interim_Winter_ClusterNames.RData") # load clusterX.name.period + + if(var.num != var.num.orig) var.num <- var.num.orig # ripristinate original values of this script (necessary after loading regime data) + if(fields.name != fields.name.orig) fields.name <- fields.name.orig # ripristinate original values of this script + + #cluster1.name.ref <- cluster1.name.period[season.ref] + #cluster2.name.ref <- cluster2.name.period[season.ref] + #cluster3.name.ref <- cluster3.name.period[season.ref] + #cluster4.name.ref <- cluster4.name.period[season.ref] + + cluster1.ref <- which(orden == cluster1.name) + cluster2.ref <- which(orden == cluster2.name) + cluster3.ref <- which(orden == cluster3.name) + cluster4.ref <- which(orden == cluster4.name) + + cluster1.ref <- 3 # bypass the previous values because for dataset num.46 the blocking and atlantic regimes were saved swapped! + cluster2.ref <- 2 + cluster3.ref <- 1 + cluster4.ref <- 4 + + assign(paste0("map.old.ref",cluster1.ref), pslwr1mean) # NAO+ + assign(paste0("map.old.ref",cluster2.ref), pslwr2mean) # NAO- + assign(paste0("map.old.ref",cluster3.ref), pslwr3mean) # Blocking + assign(paste0("map.old.ref",cluster4.ref), pslwr4mean) # Atl.ridge + + + # breaks and colors of the geopotential fields: + if(psl == "g500"){ + my.brks <- c(-300,seq(-200,200,10),300) # % Mean anomaly of a WR + my.brks2 <- c(-300,seq(-200,200,10),300) # % Mean anomaly of a WR for the contour lines + values.to.plot <- c(-200, -100, 0, 100, 200) # values we want to show in the labels + } + + if(psl == "psl"){ + my.brks <- c(seq(-19,-1,2),0,seq(1,19,2)) # % Mean anomaly of a WR + my.brks2 <- my.brks #c(seq(-20,20,2)) # % Mean anomaly of a WR for the contour lines + values.to.plot <- my.brks #c(-17,-15,-13,-11,-9,-7,-5,-3,-1,0,1,3,5,7,9,11,13,15,17) + } + + my.cols <- colorRampPalette(rev(brewer.pal(11,"RdBu")))(length(my.brks)-1) # blue--white--red colors + my.cols[floor(length(my.cols)/2)] <- my.cols[floor(length(my.cols)/2)+1] <- "white" + + # plot the composition with the regime anomalies of each monthly regimes: + png(filename=paste0(rean.dir,"/",fields.name,"_taylor_source",".png"),width=6000,height=2000) + + plot.new() + + # Sheet title: + par(fig=c(0, 1, 0.94, 0.98), new=TRUE) + mtext(paste0(fields.name, " observed monthly regime anomalies"), cex=5.5, font=2) + + n.map <- 0 + for(p in c(9:12,1:8)){ + p.orig <- p + + load(file=paste0(rean.dir,"/",fields.name,"_",my.period[p],"_","psl",".RData")) # load cluster names and summarized data + load(file=paste0(rean.dir,"/",fields.name,"_",my.period[p],"_","ClusterNames",".RData")) # load cluster names and summarized data + + if(var.num != var.num.orig) var.num <- var.num.orig # ripristinate original values of this script (necessary after loading regime data) + if(fields.name != fields.name.orig) fields.name <- fields.name.orig # ripristinate original values of this script + if(p != p.orig) p <- p.orig + + cluster1 <- which(orden == cluster1.name) + cluster2 <- which(orden == cluster2.name) + cluster3 <- which(orden == cluster3.name) + cluster4 <- which(orden == cluster4.name) + + assign(paste0("map",cluster1), pslwr1mean) # NAO+ + assign(paste0("map",cluster2), pslwr2mean) # NAO- + assign(paste0("map",cluster3), pslwr3mean) # Blocking + assign(paste0("map",cluster4), pslwr4mean) # Atl.ridge + + n.map <- n.map + 1 # n.maps starts from 0 + map.width <- 0.07 + map.xpos <- 0.06 + map.width * (n.map - 1) + map.ypos <- 0.08 + map.height <- 0.19 + map.ysep <- 0.015 + + par(fig=c(map.xpos, map.xpos + map.width, map.ypos + 3*map.height + 3*map.ysep, map.ypos + 4*map.height + 3*map.ysep), new=TRUE) + PlotEquiMap2(rescale(map1,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map1), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, xlabel.dist=1.5, contours.lty="F1FF1F") + par(fig=c(map.xpos, map.xpos + map.width, map.ypos + 2*map.height + 2*map.ysep, map.ypos + 3*map.height + 2*map.ysep), new=TRUE) + PlotEquiMap2(rescale(map2,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map2), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F") + par(fig=c(map.xpos, map.xpos + map.width, map.ypos + map.height + map.ysep, map.ypos + 2*map.height + map.ysep), new=TRUE) + PlotEquiMap2(rescale(map3,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map3), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F") + par(fig=c(map.xpos, map.xpos + map.width, map.ypos, map.ypos + map.height), new=TRUE) + PlotEquiMap2(rescale(map4,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map4), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F") + + # Sheet subtitles: + par(fig=c(map.xpos, map.xpos + map.width, 0.86, 0.90), new=TRUE) + mtext(my.month.short[p], cex=5.5, font=2) + + } + + # draw the last map to the right of the composition, that with the seasonal anomalies: + n.map <- n.map + 1 # n.maps starts from 0 + map.width <- 0.07 + map.xpos <- 0.06 + map.width * (n.map - 1) + map.ypos <- 0.08 + map.height <- 0.19 + map.ysep <- 0.015 + + par(fig=c(map.xpos, map.xpos + map.width, map.ypos + 3*map.height + 3*map.ysep, map.ypos + 4*map.height + 3*map.ysep), new=TRUE) + PlotEquiMap2(rescale(map.ref1,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map.ref1), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, xlabel.dist=1.5, contours.lty="F1FF1F") + par(fig=c(map.xpos, map.xpos + map.width, map.ypos + 2*map.height + 2*map.ysep, map.ypos + 3*map.height + 2*map.ysep), new=TRUE) + PlotEquiMap2(rescale(map.ref2,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map.ref2), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F") + par(fig=c(map.xpos, map.xpos + map.width, map.ypos + map.height + map.ysep, map.ypos + 2*map.height + map.ysep), new=TRUE) + PlotEquiMap2(rescale(map.ref3,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map.ref3), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F") + par(fig=c(map.xpos, map.xpos + map.width, map.ypos, map.ypos + map.height), new=TRUE) + PlotEquiMap2(rescale(map.ref4,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map.ref4), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F") + + # subtitle: + par(fig=c(map.xpos, map.xpos + map.width, 0.86, 0.90), new=TRUE) + mtext(my.period[season.ref], cex=5.5, font=2) + + dev.off() + + corr.first <- corr.second <- rmse.first <- rmse.second <- array(NA,c(4,12)) + + # Plot the Taylor diagrams: + for(regime in 1:4){ + + png(filename=paste0(rean.dir,"/",fields.name,"_taylor_",orden[regime],".png"), width=1200, height=800) + + for(p in 1:12){ + p.orig <- p + + load(file=paste0(rean.dir,"/",fields.name,"_",my.period[p],"_","psl",".RData")) # load cluster names and summarized data + load(file=paste0(rean.dir,"/",fields.name,"_",my.period[p],"_","ClusterNames",".RData")) # load cluster names and summarized data + + if(var.num != var.num.orig) var.num <- var.num.orig # ripristinate original values of this script (necessary after loading regime data) + if(fields.name != fields.name.orig) fields.name <- fields.name.orig # ripristinate original values of this script + if(p != p.orig) p <- p.orig + + cluster1 <- which(orden == cluster1.name) + cluster2 <- which(orden == cluster2.name) + cluster3 <- which(orden == cluster3.name) + cluster4 <- which(orden == cluster4.name) + + assign(paste0("map",cluster1), pslwr1mean) # NAO+ + assign(paste0("map",cluster2), pslwr2mean) # NAO- + assign(paste0("map",cluster3), pslwr3mean) # Blocking + assign(paste0("map",cluster4), pslwr4mean) # Atl.ridge + + # load data from the second classification: + load(file=paste0(rean2.dir,"/",fields.name,"_",my.period[p],"_","psl",".RData")) # load cluster names and summarized data + load(file=paste0(rean2.dir,"/",fields.name,"_",my.period[p],"_","ClusterNames",".RData")) # load cluster names and summarized data + + if(var.num != var.num.orig) var.num <- var.num.orig # ripristinate original values of this script (necessary after loading regime data) + if(fields.name != fields.name.orig) fields.name <- fields.name.orig # ripristinate original values of this script + if(p != p.orig) p <- p.orig + + cluster1.old <- which(orden == cluster1.name) + cluster2.old <- which(orden == cluster2.name) + cluster3.old <- which(orden == cluster3.name) + cluster4.old <- which(orden == cluster4.name) + + assign(paste0("map.old",cluster1.old), pslwr1mean) # NAO+ + assign(paste0("map.old",cluster2.old), pslwr2mean) # NAO- + assign(paste0("map.old",cluster3.old), pslwr3mean) # Blocking + assign(paste0("map.old",cluster4.old), pslwr4mean) # Atl.ridge + + add.mod <- ifelse(p == 1, FALSE, TRUE) + main.mod <- ifelse(p == 1, orden[regime],"") + + color.first <- "red" + color.second <- "blue" + sd.arcs.mod <- c(0,0.1,0.2,0.3,0.4,0.5,0.6,0.7,0.8,0.9,1,1.1,1.2,1.3) #c(0,0.5,1,1.5) # doesn't work! + if(regime == 1) my.taylor(as.vector(map.ref1),as.vector(map1), normalize=TRUE, add=add.mod, pos.cor=FALSE, sd.arcs=sd.arcs.mod, ref.sd=F, cex.axis=1.7, text.cex=1, my.text=my.month.short[p], col = color.first, main=main.mod) + if(regime == 2) my.taylor(as.vector(map.ref2),as.vector(map2), normalize=TRUE, add=add.mod, pos.cor=FALSE, sd.arcs=sd.arcs.mod, ref.sd=F, cex.axis=1.7, text.cex=1, my.text=my.month.short[p], col = color.first, main=main.mod) + if(regime == 3) my.taylor(as.vector(map.ref3),as.vector(map3), normalize=TRUE, add=add.mod, pos.cor=FALSE, sd.arcs=sd.arcs.mod, ref.sd=F, cex.axis=1.7, text.cex=1, my.text=my.month.short[p], col = color.first, main=main.mod) + if(regime == 4) my.taylor(as.vector(map.ref4),as.vector(map4), normalize=TRUE, add=add.mod, pos.cor=FALSE, sd.arcs=sd.arcs.mod, ref.sd=F, cex.axis=1.7, text.cex=1, my.text=my.month.short[p], col = color.first, main=main.mod) + + # add the points from the second classification: + add.mod=TRUE + if(regime == 1) my.taylor(as.vector(map.old.ref1),as.vector(map.old1), normalize=TRUE, add=add.mod, pos.cor=FALSE, sd.arcs=sd.arcs.mod, ref.sd=F, cex.axis=1.7, text.cex=1, my.text=my.month.short[p], col = color.second) + if(regime == 2) my.taylor(as.vector(map.old.ref2),as.vector(map.old2), normalize=TRUE, add=add.mod, pos.cor=FALSE, sd.arcs=sd.arcs.mod, ref.sd=F, cex.axis=1.7, text.cex=1, my.text=my.month.short[p], col = color.second) + if(regime == 3) my.taylor(as.vector(map.old.ref3),as.vector(map.old3), normalize=TRUE, add=add.mod, pos.cor=FALSE, sd.arcs=sd.arcs.mod, ref.sd=F, cex.axis=1.7, text.cex=1, my.text=my.month.short[p], col = color.second) + if(regime == 4) my.taylor(as.vector(map.old.ref4),as.vector(map.old4), normalize=TRUE, add=add.mod, pos.cor=FALSE, sd.arcs=sd.arcs.mod, ref.sd=F, cex.axis=1.7, text.cex=1, my.text=my.month.short[p], col = color.second) + + if(regime == 1) { corr.first[1,p] <- cor(as.vector(map.ref1),as.vector(map1)); rmse.first[1,p] <- RMSE(as.vector(map.ref1),as.vector(map1)) } + if(regime == 2) { corr.first[2,p] <- cor(as.vector(map.ref2),as.vector(map2)); rmse.first[2,p] <- RMSE(as.vector(map.ref2),as.vector(map2)) } + if(regime == 3) { corr.first[3,p] <- cor(as.vector(map.ref3),as.vector(map3)); rmse.first[3,p] <- RMSE(as.vector(map.ref3),as.vector(map3)) } + if(regime == 4) { corr.first[4,p] <- cor(as.vector(map.ref4),as.vector(map4)); rmse.first[4,p] <- RMSE(as.vector(map.ref4),as.vector(map4)) } + + if(regime == 1) { corr.second[1,p] <- cor(as.vector(map.old.ref1),as.vector(map.old1)); rmse.second[1,p] <- RMSE(as.vector(map.old.ref1),as.vector(map.old1)) } + if(regime == 2) { corr.second[2,p] <- cor(as.vector(map.old.ref2),as.vector(map.old2)); rmse.second[2,p] <- RMSE(as.vector(map.old.ref2),as.vector(map.old2)) } + if(regime == 3) { corr.second[3,p] <- cor(as.vector(map.old.ref3),as.vector(map.old3)); rmse.second[3,p] <- RMSE(as.vector(map.old.ref3),as.vector(map.old3)) } + if(regime == 4) { corr.second[4,p] <- cor(as.vector(map.old.ref4),as.vector(map.old4)); rmse.second[4,p] <- RMSE(as.vector(map.old.ref4),as.vector(map.old4)) } + + } # close for on 'p' + + dev.off() + + } # close for on 'regime' + + corr.first.mean <- apply(corr.first,1,mean) + corr.second.mean <- apply(corr.second,1,mean) + corr.diff <- corr.second.mean - corr.first.mean + + rmse.first.mean <- apply(rmse.first,1,mean) + rmse.second.mean <- apply(rmse.second,1,mean) + rmse.diff <- rmse.second.mean - rmse.first.mean + +} # close if on composition == "taylor" + + + + + + +# Summary graphs: +if(composition == "summary" && fields.name == forecast.name){ + # array storing correlations in the format: [startdate, leadmonth, regime] + array.diff.sd.freq <- array.sd.freq <- array.cor <- array.sp.cor <- array.freq <- array.diff.freq <- array.rpss <- array.pers <- array.diff.pers <- array(NA,c(12,7,4)) + array.spat.cor <- array.imp <- array.trans.sim1 <- array.trans.sim2 <- array.trans.sim3 <- array.trans.sim4 <- array(NA,c(12,7,4)) + array.trans.diff1 <- array.trans.diff2 <- array.trans.diff3 <- array.trans.diff4 <- array(NA,c(12,7,4)) + + for(p in 1:12){ + p.orig <- p + + for(l in 0:6){ + + load(file=paste0(work.dir,"/",fields.name,"_",my.period[p],"_leadmonth_",l,"_","ClusterNames",".RData")) # load cluster names and summarized data + + if(var.num != var.num.orig) var.num <- var.num.orig # ripristinate original values of this script (necessary after loading regime data) + if(fields.name != fields.name.orig) fields.name <- fields.name.orig # ripristinate original values of this script + if(p != p.orig) p <- p.orig + + array.spat.cor[p,1+l, 1] <- spat.cor1 # associates NAO+ to the first triangle (at left) + array.spat.cor[p,1+l, 3] <- spat.cor2 # associates NAO- to the third triangle (at right) + array.spat.cor[p,1+l, 4] <- spat.cor3 # associates Blocking to the fourth triangle (top one) + array.spat.cor[p,1+l, 2] <- spat.cor4 # associates Atl.Ridge to the second triangle (bottom one) + + array.cor[p,1+l, 1] <- sp.cor1 # associates NAO+ to the first triangle (at left) + array.cor[p,1+l, 3] <- sp.cor2 # associates NAO- to the third triangle (at right) + array.cor[p,1+l, 4] <- sp.cor3 # associates Blocking to the fourth triangle (top one) + array.cor[p,1+l, 2] <- sp.cor4 # associates Atl.Ridge to the second triangle (bottom one) + + array.freq[p,1+l, 1] <- 100*(mean(fre1.NA)) # NAO+ + array.freq[p,1+l, 3] <- 100*(mean(fre2.NA)) # NAO- + array.freq[p,1+l, 4] <- 100*(mean(fre3.NA)) # Blocking + array.freq[p,1+l, 2] <- 100*(mean(fre4.NA)) # Atl.Ridge + + array.diff.freq[p,1+l, 1] <- 100*(mean(fre1.NA) - mean(freObs1)) # NAO+ + array.diff.freq[p,1+l, 3] <- 100*(mean(fre2.NA) - mean(freObs2)) # NAO- + array.diff.freq[p,1+l, 4] <- 100*(mean(fre3.NA) - mean(freObs3)) # Blocking + array.diff.freq[p,1+l, 2] <- 100*(mean(fre4.NA) - mean(freObs4)) # Atl.Ridge + + array.pers[p,1+l, 1] <- persist1 # NAO+ + array.pers[p,1+l, 3] <- persist2 # NAO- + array.pers[p,1+l, 4] <- persist3 # Blocking + array.pers[p,1+l, 2] <- persist4 # Atl.Ridge + + array.diff.pers[p,1+l, 1] <- persist1 - persistObs1 # NAO+ + array.diff.pers[p,1+l, 3] <- persist2 - persistObs2 # NAO- + array.diff.pers[p,1+l, 4] <- persist3 - persistObs3 # Blocking + array.diff.pers[p,1+l, 2] <- persist4 - persistObs4 # Atl.Ridge + + array.sd.freq[p,1+l, 1] <- sd.freq.sim1 # NAO+ + array.sd.freq[p,1+l, 3] <- sd.freq.sim2 # NAO- + array.sd.freq[p,1+l, 4] <- sd.freq.sim3 # Blocking + array.sd.freq[p,1+l, 2] <- sd.freq.sim4 # Atl.Ridge + + array.diff.sd.freq[p,1+l, 1] <- sd.freq.sim1 / sd.freq.obs1 # NAO+ + array.diff.sd.freq[p,1+l, 3] <- sd.freq.sim2 / sd.freq.obs2 # NAO- + array.diff.sd.freq[p,1+l, 4] <- sd.freq.sim3 / sd.freq.obs3 # Blocking + array.diff.sd.freq[p,1+l, 2] <- sd.freq.sim4 / sd.freq.obs4 # Atl.Ridge + + array.imp[p,1+l, 1] <- cor.imp1 # NAO+ + array.imp[p,1+l, 3] <- cor.imp2 # NAO- + array.imp[p,1+l, 4] <- cor.imp3 # Blocking + array.imp[p,1+l, 2] <- cor.imp4 # Atl.Ridge + + array.trans.sim1[p,1+l, 1] <- NA # Transition from NAO+ to NAO+ + array.trans.sim1[p,1+l, 3] <- 100*transSim1[2] # Transition from NAO+ to NAO- + array.trans.sim1[p,1+l, 4] <- 100*transSim1[3] # Transition from NAO+ to blocking + array.trans.sim1[p,1+l, 2] <- 100*transSim1[4] # Transition from NAO+ to Atl.Ridge + + array.trans.sim2[p,1+l, 1] <- 100*transSim2[1] # Transition from NAO- to NAO+ + array.trans.sim2[p,1+l, 3] <- NA # Transition from NAO- to NAO- + array.trans.sim2[p,1+l, 4] <- 100*transSim2[3] # Transition from NAO- to blocking + array.trans.sim2[p,1+l, 2] <- 100*transSim2[4] # Transition from NAO- to Atl.Ridge + + array.trans.sim3[p,1+l, 1] <- 100*transSim3[1] # Transition from blocking to NAO+ + array.trans.sim3[p,1+l, 3] <- 100*transSim3[2] # Transition from blocking to NAO- + array.trans.sim3[p,1+l, 4] <- NA # Transition from blocking to blocking + array.trans.sim3[p,1+l, 2] <- 100*transSim3[4] # Transition from blocking to Atl.Ridge + + array.trans.sim4[p,1+l, 1] <- 100*transSim4[1] # Transition from Atl.ridge to NAO+ + array.trans.sim4[p,1+l, 3] <- 100*transSim4[2] # Transition from Atl.ridge to NAO- + array.trans.sim4[p,1+l, 4] <- 100*transSim4[3] # Transition from Atl.ridge to blocking + array.trans.sim4[p,1+l, 2] <- NA # Transition from Atl.ridge to Atl.Ridge + + array.trans.obs1[p,1+l, 1] <- NA # Transition from NAO+ to NAO+ + array.trans.obs1[p,1+l, 3] <- 100*transObs1[2] # Transition from NAO+ to NAO- + array.trans.obs1[p,1+l, 4] <- 100*transObs1[3] # Transition from NAO+ to blocking + array.trans.obs1[p,1+l, 2] <- 100*transObs1[4] # Transition from NAO+ to Atl.Ridge + + array.trans.obs2[p,1+l, 1] <- 100*transObs2[1] # Transition from NAO- to NAO+ + array.trans.obs2[p,1+l, 3] <- NA # Transition from NAO- to NAO- + array.trans.obs2[p,1+l, 4] <- 100*transObs2[3] # Transition from NAO- to blocking + array.trans.obs2[p,1+l, 2] <- 100*transObs2[4] # Transition from NAO- to Atl.Ridge + + array.trans.obs3[p,1+l, 1] <- 100*transObs3[1] # Transition from blocking to NAO+ + array.trans.obs3[p,1+l, 3] <- 100*transObs3[2] # Transition from blocking to NAO- + array.trans.obs3[p,1+l, 4] <- NA # Transition from blocking to blocking + array.trans.obs3[p,1+l, 2] <- 100*transObs3[4] # Transition from blocking to Atl.Ridge + + array.trans.obs4[p,1+l, 1] <- 100*transObs4[1] # Transition from Atl.ridge to NAO+ + array.trans.obs4[p,1+l, 3] <- 100*transObs4[2] # Transition from Atl.ridge to NAO- + array.trans.obs4[p,1+l, 4] <- 100*transObs4[3] # Transition from Atl.ridge to blocking + array.trans.obs4[p,1+l, 2] <- NA # Transition from Atl.ridge to Atl.Ridge + + array.trans.diff1[p,1+l, 1] <- NA # Transition from NAO+ to NAO+ + array.trans.diff1[p,1+l, 3] <- 100*(transSim1[2] - transObs1[2]) # Transition from NAO+ to NAO- + array.trans.diff1[p,1+l, 4] <- 100*(transSim1[3] - transObs1[3]) # Transition from NAO+ to blocking + array.trans.diff1[p,1+l, 2] <- 100*(transSim1[4] - transObs1[4]) # Transition from NAO+ to Atl.Ridge + + array.trans.diff2[p,1+l, 1] <- 100*(transSim2[1] - transObs2[1]) # Transition from NAO+ to NAO+ + array.trans.diff2[p,1+l, 3] <- NA + array.trans.diff2[p,1+l, 4] <- 100*(transSim2[3] - transObs2[3]) # Transition from NAO+ to blocking + array.trans.diff2[p,1+l, 2] <- 100*(transSim2[4] - transObs2[4]) # Transition from NAO+ to Atl.Ridge + + array.trans.diff3[p,1+l, 1] <- 100*(transSim3[1] - transObs3[1]) # Transition from NAO+ to NAO+ + array.trans.diff3[p,1+l, 3] <- 100*(transSim3[2] - transObs3[2]) # Transition from NAO+ to NAO- + array.trans.diff3[p,1+l, 4] <- NA + array.trans.diff3[p,1+l, 2] <- 100*(transSim3[4] - transObs3[4]) # Transition from NAO+ to Atl.Ridge + + array.trans.diff4[p,1+l, 1] <- 100*(transSim4[1] - transObs4[1]) # Transition from NAO+ to NAO+ + array.trans.diff4[p,1+l, 3] <- 100*(transSim4[2] - transObs4[2]) # Transition from NAO+ to NAO- + array.trans.diff4[p,1+l, 4] <- 100*(transSim4[3] - transObs4[3]) # Transition from NAO+ to blocking + array.trans.diff4[p,1+l, 2] <- NA + + #array.rpss[p,1+l, 1] <- # NAO+ + #array.rpss[p,1+l, 3] <- # NAO- + #array.rpss[p,1+l, 4] <- # Blocking + #array.rpss[p,1+l, 2] <- # Atl.Ridge + } + } + + # Spatial Corr summary: + png(filename=paste0(work.dir,"/",forecast.name,"_summary_spat_corr.png"), width=550, height=400) + + # create an array similar to array.cor but with colors instead of corr.values: + sp.corr.cols <- rev(c('#b2182b','#d6604d','#f4a582','#fddbc7','#d1e5f0','#92c5de','#4393c3','#2166ac')) + my.seq <- c(-1,0,0.4,0.5,0.6,0.7,0.8,0.9,1) + + array.sp.cor.colors <- array(sp.corr.cols[8],c(12,7,4)) + array.sp.cor.colors[array.spat.cor < my.seq[8]] <- sp.corr.cols[7] + array.sp.cor.colors[array.spat.cor < my.seq[7]] <- sp.corr.cols[6] + array.sp.cor.colors[array.spat.cor < my.seq[6]] <- sp.corr.cols[5] + array.sp.cor.colors[array.spat.cor < my.seq[5]] <- sp.corr.cols[4] + array.sp.cor.colors[array.spat.cor < my.seq[4]] <- sp.corr.cols[3] + array.sp.cor.colors[array.spat.cor < my.seq[3]] <- sp.corr.cols[2] + array.sp.cor.colors[array.spat.cor < my.seq[2]] <- sp.corr.cols[1] + + par(mar=c(5,4,4,4.5)) + plot(1,1, type="n", xaxt="n", yaxt="n", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + title("Spatial correlation between S4 and ERA-Interim regime anomalies", cex=1.5) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + + for(p in 1:12){ + for(l in 0:6){ + polygon(c(0.5+p-1, 0.5+p-1, 1+p-1), c(-0.5+l, 0.5+l, 0+l), col=array.sp.cor.colors[p,1+l,1]) # first triangle + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(-0.5+l, 0+l, -0.5+l), col=array.sp.cor.colors[p,1+l,2]) + polygon(c(1+p-1, 1.5+p-1, 1.5+p-1), c(l, 0.5+l, -0.5+l), col=array.sp.cor.colors[p,1+l,3]) + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(0.5+l, 0+l, 0.5+l), col=array.sp.cor.colors[p,1+l,4]) + } + } + + par(fig=c(0.875, 1, 0.14, 0.89), new=TRUE) + ColorBar2(brks = my.seq, cols = sp.corr.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + dev.off() + + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_spat_corr_4tables.png"), width=750, height=600) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext(text="Spatial correlation between S4 and ERA-Interim regime anomalies", cex=1.5) + + # NAO+ : + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.85, 0.98), new=TRUE) + mtext("NAO+", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.spat.cor.colors[p,1+l,1])}} + + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.42, 0.5), new=TRUE) + mtext("Blocking", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.spat.cor.colors[p,1+l,4])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.85, 0.98), new=TRUE) + mtext("NAO-", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.spat.cor.colors[p,1+l,3])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.42, 0.5), new=TRUE) + mtext("Atlantic Ridge", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.spat.cor.colors[p,1+l,2])}} + + par(fig=c(0.91, 1, 0.1, 0.9), new=TRUE) + ColorBar2(brks = my.seq, cols = sp.corr.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_spat_corr_1table.png"), width=800, height=300) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext(text="Spatial correlation between S4 and ERA-Interim regime anomalies", cex=1.2) + + par(mar=c(0,0,4,0), fig=c(0.07, 0.22, 0.8, 0.98), new=TRUE) + mtext("NAO+", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0, 0.22, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.spat.cor.colors[i2,1+6-j,1])}} + + par(mar=c(0,0,4,0), fig=c(0.22, 0.44, 0.8, 0.98), new=TRUE) + mtext("NAO-", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.22, 0.44, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.spat.cor.colors[i2,1+6-j,3])}} + + par(mar=c(0,0,4,0), fig=c(0.44, 0.66, 0.8, 0.98), new=TRUE) + mtext("Blocking", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.44, 0.66, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.spat.cor.colors[i2,1+6-j,4])}} + + par(mar=c(0,0,4,0), fig=c(0.68, 0.88, 0.8, 0.98), new=TRUE) + mtext("Atlantic Ridge", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.66, 0.88, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.spat.cor.colors[i2,1+6-j,2])}} + + par(fig=c(0.91, 1, 0.1, 0.8), new=TRUE) + ColorBar2(brks = my.seq, cols = sp.corr.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + + + + + + + + # Corr summary: + png(filename=paste0(work.dir,"/",forecast.name,"_summary_corr.png"), width=550, height=400) + + # create an array similar to array.cor but with colors instead of corr.values: + corr.cols <- rev(c('#b2182b','#d6604d','#f4a582','#fddbc7','#d1e5f0','#92c5de','#4393c3','#2166ac')) + my.seq <- c(-1,-0.75,-0.5,-0.25,0,0.25,0.5,0.75,1) + + array.cor.colors <- array(corr.cols[8],c(12,7,4)) + array.cor.colors[array.cor < 0.75] <- corr.cols[7] + array.cor.colors[array.cor < 0.5] <- corr.cols[6] + array.cor.colors[array.cor < 0.25] <- corr.cols[5] + array.cor.colors[array.cor < 0] <- corr.cols[4] + array.cor.colors[array.cor < -0.25] <- corr.cols[3] + array.cor.colors[array.cor < -0.5] <- corr.cols[2] + array.cor.colors[array.cor < -0.75] <- corr.cols[1] + + par(mar=c(5,4,4,4.5)) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Forecast time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + title("Correlation between S4 and ERA-Interim frequencies", cex=1.5) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + + for(p in 1:12){ + for(l in 0:6){ + polygon(c(0.5+p-1,0.5+p-1,1+p-1),c(-0.5+l,0.5+l,0+l), col=array.cor.colors[p,1+l,1]) # first triangle + polygon(c(0.5+p-1,1+p-1,1.5+p-1),c(-0.5+l,0+l,-0.5+l), col=array.cor.colors[p,1+l,2]) + polygon(c(1+p-1,1.5+p-1,1.5+p-1),c(l,0.5+l,-0.5+l), col=array.cor.colors[p,1+l,3]) + polygon(c(0.5+p-1,1+p-1,1.5+p-1),c(0.5+l,0+l,0.5+l), col=array.cor.colors[p,1+l,4]) + } + } + + par(fig=c(0.875, 1, 0.14, 0.89), new=TRUE) + ColorBar2(brks = seq(-1,1,0.25), cols = corr.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(seq(-1,1,0.25)), my.labels=seq(-1,1,0.25)) + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_corr_4tables.png"), width=750, height=600) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext(text="Correlation between S4 and ERA-Interim frequencies", cex=1.5) + + # NAO+ : + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.85, 0.98), new=TRUE) + mtext("NAO+", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.cor.colors[p,1+l,1])}} + + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.42, 0.5), new=TRUE) + mtext("Blocking", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.cor.colors[p,1+l,4])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.85, 0.98), new=TRUE) + mtext("NAO-", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.cor.colors[p,1+l,3])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.42, 0.5), new=TRUE) + mtext("Atlantic Ridge", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.cor.colors[p,1+l,2])}} + + par(fig=c(0.91, 1, 0.1, 0.9), new=TRUE) + ColorBar2(brks = my.seq, cols = corr.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_corr_1table.png"), width=800, height=300) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext(text="Correlation between S4 and ERA-Interim frequencies", cex=1.2) + + par(mar=c(0,0,4,0), fig=c(0.07, 0.22, 0.8, 0.98), new=TRUE) + mtext("NAO+", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0, 0.22, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.cor.colors[i2,1+6-j,1])}} + + par(mar=c(0,0,4,0), fig=c(0.22, 0.44, 0.8, 0.98), new=TRUE) + mtext("NAO-", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.22, 0.44, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.cor.colors[i2,1+6-j,3])}} + + par(mar=c(0,0,4,0), fig=c(0.44, 0.66, 0.8, 0.98), new=TRUE) + mtext("Blocking", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.44, 0.66, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.cor.colors[i2,1+6-j,4])}} + + par(mar=c(0,0,4,0), fig=c(0.68, 0.88, 0.8, 0.98), new=TRUE) + mtext("Atlantic Ridge", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.66, 0.88, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.cor.colors[i2,1+6-j,2])}} + + par(fig=c(0.91, 1, 0.1, 0.8), new=TRUE) + ColorBar2(brks = my.seq, cols = corr.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + + + + + + # Freq. summary: + png(filename=paste0(work.dir,"/",forecast.name,"_summary_freq.png"), width=550, height=400) + + # create an array similar to array.diff.freq but with colors instead of frequencies: + freq.cols <- rev(c('#d73027','#f46d43','#fdae61','#fee090','#e0f3f8','#abd9e9','#74add1','#4575b4')) + my.seq <- c(NA,20,22,24,26,28,30,32,NA) + + array.freq.colors <- array(freq.cols[8],c(12,7,4)) + array.freq.colors[array.freq < my.seq[[8]]] <- freq.cols[7] + array.freq.colors[array.freq < my.seq[[7]]] <- freq.cols[6] + array.freq.colors[array.freq < my.seq[[6]]] <- freq.cols[5] + array.freq.colors[array.freq < my.seq[[5]]] <- freq.cols[4] + array.freq.colors[array.freq < my.seq[[4]]] <- freq.cols[3] + array.freq.colors[array.freq < my.seq[[3]]] <- freq.cols[2] + array.freq.colors[array.freq < my.seq[[2]]] <- freq.cols[1] + + par(mar=c(5,4,4,4.5)) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Forecast time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + title("Mean S4 frequency (%)", cex=1.5) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + + for(p in 1:12){ + for(l in 0:6){ + polygon(c(0.5+p-1, 0.5+p-1, 1+p-1), c(-0.5+l, 0.5+l, 0+l), col=array.freq.colors[p,1+l,1]) # first triangle + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(-0.5+l, 0+l, -0.5+l), col=array.freq.colors[p,1+l,2]) + polygon(c(1+p-1, 1.5+p-1, 1.5+p-1), c(l, 0.5+l, -0.5+l), col=array.freq.colors[p,1+l,3]) + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(0.5+l, 0+l, 0.5+l), col=array.freq.colors[p,1+l,4]) + } + } + + par(fig=c(0.875, 1, 0.14, 0.89), new=TRUE) + ColorBar2(brks = my.seq, cols = freq.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_freq_4tables.png"), width=750, height=600) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext(text="Mean S4 frequency (%)", cex=1.5) + + # NAO+ : + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.85, 0.98), new=TRUE) + mtext("NAO+", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.freq.colors[p,1+l,1])}} + + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.42, 0.5), new=TRUE) + mtext("Blocking", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.freq.colors[p,1+l,4])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.85, 0.98), new=TRUE) + mtext("NAO-", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.freq.colors[p,1+l,3])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.42, 0.5), new=TRUE) + mtext("Atlantic Ridge", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.freq.colors[p,1+l,2])}} + + par(fig=c(0.91, 1, 0.1, 0.9), new=TRUE) + ColorBar2(brks = my.seq, cols = freq.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_freq_1table.png"), width=800, height=300) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext(text="Mean S4 frequency (%)", cex=1.2) + + par(mar=c(0,0,4,0), fig=c(0.07, 0.22, 0.8, 0.98), new=TRUE) + mtext("NAO+", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0, 0.22, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.freq.colors[i2,1+6-j,1])}} + + par(mar=c(0,0,4,0), fig=c(0.22, 0.44, 0.8, 0.98), new=TRUE) + mtext("NAO-", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.22, 0.44, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.freq.colors[i2,1+6-j,3])}} + + par(mar=c(0,0,4,0), fig=c(0.44, 0.66, 0.8, 0.98), new=TRUE) + mtext("Blocking", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.44, 0.66, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.freq.colors[i2,1+6-j,4])}} + + par(mar=c(0,0,4,0), fig=c(0.68, 0.88, 0.8, 0.98), new=TRUE) + mtext("Atlantic Ridge", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.66, 0.88, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.freq.colors[i2,1+6-j,2])}} + + par(fig=c(0.91, 1, 0.1, 0.8), new=TRUE) + ColorBar2(brks = my.seq, cols = freq.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + + + + + + + # Delta freq. summary: + png(filename=paste0(work.dir,"/",forecast.name,"_summary_diff_freq.png"), width=550, height=400) + + # create an array similar to array.diff.freq but with colors instead of frequencies: + diff.freq.cols <- rev(c('#d73027','#f46d43','#fdae61','#fee090','#e0f3f8','#abd9e9','#74add1','#4575b4')) + my.seq <- c(-20,-10,-5,-2,0,2,5,10,20) + + array.diff.freq.colors <- array(diff.freq.cols[8],c(12,7,4)) + array.diff.freq.colors[array.diff.freq < my.seq[[8]]] <- diff.freq.cols[7] + array.diff.freq.colors[array.diff.freq 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.diff.freq.colors[i2,1+6-j,1])}} + + par(mar=c(0,0,4,0), fig=c(0.22, 0.44, 0.8, 0.98), new=TRUE) + mtext("NAO-", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.22, 0.44, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.diff.freq.colors[i2,1+6-j,3])}} + + par(mar=c(0,0,4,0), fig=c(0.44, 0.66, 0.8, 0.98), new=TRUE) + mtext("Blocking", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.44, 0.66, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.diff.freq.colors[i2,1+6-j,4])}} + + par(mar=c(0,0,4,0), fig=c(0.68, 0.88, 0.8, 0.98), new=TRUE) + mtext("Atlantic Ridge", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.66, 0.88, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.diff.freq.colors[i2,1+6-j,2])}} + + par(fig=c(0.91, 1, 0.1, 0.8), new=TRUE) + ColorBar2(brks = my.seq, cols = diff.freq.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + + + + + + + + # Persistence summary: + png(filename=paste0(work.dir,"/",forecast.name,"_summary_pers.png"), width=550, height=400) + + # create an array similar to array.pers but with colors instead of frequencies: + pers.cols <- rev(c('#b2182b','#d6604d','#f4a582','#fddbc7','#d1e5f0','#92c5de','#4393c3','#2166ac')) + my.seq <- c(NA, 3.25, 3.5, 3.75, 4, 4.25, 4.5, 4.75, NA) + + array.pers.colors <- array(pers.cols[8],c(12,7,4)) + array.pers.colors[array.pers < my.seq[8]] <- pers.cols[7] + array.pers.colors[array.pers < my.seq[7]] <- pers.cols[6] + array.pers.colors[array.pers < my.seq[6]] <- pers.cols[5] + array.pers.colors[array.pers < my.seq[5]] <- pers.cols[4] + array.pers.colors[array.pers < my.seq[4]] <- pers.cols[3] + array.pers.colors[array.pers < my.seq[3]] <- pers.cols[2] + array.pers.colors[array.pers < my.seq[2]] <- pers.cols[1] + + par(mar=c(5,4,4,4.5)) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Forecast time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + title("S4 Regime persistence (in days/month)", cex=1.5) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + + for(p in 1:12){ + for(l in 0:6){ + polygon(c(0.5+p-1,0.5+p-1,1+p-1),c(-0.5+l,0.5+l,0+l), col=array.pers.colors[p,1+l,1]) # first triangle + polygon(c(0.5+p-1,1+p-1,1.5+p-1),c(-0.5+l,0+l,-0.5+l), col=array.pers.colors[p,1+l,2]) + polygon(c(1+p-1,1.5+p-1,1.5+p-1),c(l,0.5+l,-0.5+l), col=array.pers.colors[p,1+l,3]) + polygon(c(0.5+p-1,1+p-1,1.5+p-1),c(0.5+l,0+l,0.5+l), col=array.pers.colors[p,1+l,4]) + } + } + + par(fig=c(0.875, 1, 0.14, 0.89), new=TRUE) + ColorBar2(brks = my.seq, cols = pers.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_pers_4tables.png"), width=750, height=600) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("S4 Regime persistence (in days/month)", cex=1.5) + + # NAO+ : + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.85, 0.98), new=TRUE) + mtext("NAO+", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.pers.colors[p,1+l,1])}} + + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.42, 0.5), new=TRUE) + mtext("Blocking", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.pers.colors[p,1+l,4])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.85, 0.98), new=TRUE) + mtext("NAO-", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.pers.colors[p,1+l,3])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.42, 0.5), new=TRUE) + mtext("Atlantic Ridge", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.pers.colors[p,1+l,2])}} + + par(fig=c(0.91, 1, 0.1, 0.9), new=TRUE) + ColorBar2(brks = my.seq, cols = pers.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_pers_1table.png"), width=800, height=300) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("S4 Regime persistence (in days/month)", cex=1.2) + + par(mar=c(0,0,4,0), fig=c(0.07, 0.22, 0.8, 0.98), new=TRUE) + mtext("NAO+", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0, 0.22, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.pers.colors[i2,1+6-j,1])}} + + par(mar=c(0,0,4,0), fig=c(0.22, 0.44, 0.8, 0.98), new=TRUE) + mtext("NAO-", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.22, 0.44, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.pers.colors[i2,1+6-j,3])}} + + par(mar=c(0,0,4,0), fig=c(0.44, 0.66, 0.8, 0.98), new=TRUE) + mtext("Blocking", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.44, 0.66, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.pers.colors[i2,1+6-j,4])}} + + par(mar=c(0,0,4,0), fig=c(0.68, 0.88, 0.8, 0.98), new=TRUE) + mtext("Atlantic Ridge", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.66, 0.88, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.pers.colors[i2,1+6-j,2])}} + + par(fig=c(0.91, 1, 0.1, 0.8), new=TRUE) + ColorBar2(brks = my.seq, cols = pers.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + + + + + + + + # Delta persistence summary: + png(filename=paste0(work.dir,"/",forecast.name,"_summary_diff_pers.png"), width=550, height=400) + + # create an array similar to array.pers but with colors instead of frequencies: + diff.pers.cols <- rev(c('#b2182b','#d6604d','#f4a582','#fddbc7','#d1e5f0','#92c5de','#4393c3','#2166ac')) + my.seq <- c(NA, -1.5, -1, -0.5, 0, 0.5, 1, 1.5, NA) + + array.diff.pers.colors <- array(diff.pers.cols[8],c(12,7,4)) + array.diff.pers.colors[array.diff.pers < my.seq[8]] <- diff.pers.cols[7] + array.diff.pers.colors[array.diff.pers < my.seq[7]] <- diff.pers.cols[6] + array.diff.pers.colors[array.diff.pers < my.seq[6]] <- diff.pers.cols[5] + array.diff.pers.colors[array.diff.pers < my.seq[5]] <- diff.pers.cols[4] + array.diff.pers.colors[array.diff.pers < my.seq[4]] <- diff.pers.cols[3] + array.diff.pers.colors[array.diff.pers < my.seq[3]] <- diff.pers.cols[2] + array.diff.pers.colors[array.diff.pers < my.seq[2]] <- diff.pers.cols[1] + + par(mar=c(5,4,4,4.5)) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Forecast time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + title("Diff. between S4 and ERA-Interim regime persistence (in days/month)", cex=1.5) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + + for(p in 1:12){ + for(l in 0:6){ + polygon(c(0.5+p-1,0.5+p-1,1+p-1),c(-0.5+l,0.5+l,0+l), col=array.diff.pers.colors[p,1+l,1]) # first triangle + polygon(c(0.5+p-1,1+p-1,1.5+p-1),c(-0.5+l,0+l,-0.5+l), col=array.diff.pers.colors[p,1+l,2]) + polygon(c(1+p-1,1.5+p-1,1.5+p-1),c(l,0.5+l,-0.5+l), col=array.diff.pers.colors[p,1+l,3]) + polygon(c(0.5+p-1,1+p-1,1.5+p-1),c(0.5+l,0+l,0.5+l), col=array.diff.pers.colors[p,1+l,4]) + } + } + + par(fig=c(0.875, 1, 0.14, 0.89), new=TRUE) + ColorBar2(brks = my.seq, cols = diff.pers.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_diff_pers_4tables.png"), width=750, height=600) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("Diff. between S4 and ERA-Interim regime persistence (in days/month)", cex=1.5) + + # NAO+ : + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.85, 0.98), new=TRUE) + mtext("NAO+", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.diff.pers.colors[p,1+l,1])}} + + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.42, 0.5), new=TRUE) + mtext("Blocking", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.diff.pers.colors[p,1+l,4])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.85, 0.98), new=TRUE) + mtext("NAO-", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.diff.pers.colors[p,1+l,3])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.42, 0.5), new=TRUE) + mtext("Atlantic Ridge", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.diff.pers.colors[p,1+l,2])}} + + par(fig=c(0.91, 1, 0.1, 0.9), new=TRUE) + ColorBar2(brks = my.seq, cols = diff.pers.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_diff_pers_1table.png"), width=800, height=300) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("Diff. between S4 and ERA-Interim regime persistence (in days/month)", cex=1.2) + + par(mar=c(0,0,4,0), fig=c(0.07, 0.22, 0.8, 0.98), new=TRUE) + mtext("NAO+", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0, 0.22, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.diff.pers.colors[i2,1+6-j,1])}} + + par(mar=c(0,0,4,0), fig=c(0.22, 0.44, 0.8, 0.98), new=TRUE) + mtext("NAO-", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.22, 0.44, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.diff.pers.colors[i2,1+6-j,3])}} + + par(mar=c(0,0,4,0), fig=c(0.44, 0.66, 0.8, 0.98), new=TRUE) + mtext("Blocking", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.44, 0.66, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.diff.pers.colors[i2,1+6-j,4])}} + + par(mar=c(0,0,4,0), fig=c(0.68, 0.88, 0.8, 0.98), new=TRUE) + mtext("Atlantic Ridge", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.66, 0.88, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.diff.pers.colors[i2,1+6-j,2])}} + + par(fig=c(0.91, 1, 0.1, 0.8), new=TRUE) + ColorBar2(brks = my.seq, cols = diff.pers.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + + + + + + + + + # St.Dev.freq ratio summary: + png(filename=paste0(work.dir,"/",forecast.name,"_summary_ratio_sd_freq.png"), width=550, height=400) + + # create an array similar to array.cor but with colors instead of corr.values: + diff.sd.freq.cols <- rev(c('#b2182b','#d6604d','#f4a582','#fddbc7','#d1e5f0','#92c5de','#4393c3','#2166ac')) + my.seq <- c(0,0.7,0.8,0.9,1.0,1.1,1.2,1.3,2) + + array.diff.sd.freq.colors <- array(diff.sd.freq.cols[8],c(12,7,4)) + array.diff.sd.freq.colors[array.diff.sd.freq < my.seq[8]] <- diff.sd.freq.cols[7] + array.diff.sd.freq.colors[array.diff.sd.freq < my.seq[7]] <- diff.sd.freq.cols[6] + array.diff.sd.freq.colors[array.diff.sd.freq < my.seq[6]] <- diff.sd.freq.cols[5] + array.diff.sd.freq.colors[array.diff.sd.freq < my.seq[5]] <- diff.sd.freq.cols[4] + array.diff.sd.freq.colors[array.diff.sd.freq < my.seq[4]] <- diff.sd.freq.cols[3] + array.diff.sd.freq.colors[array.diff.sd.freq < my.seq[3]] <- diff.sd.freq.cols[2] + array.diff.sd.freq.colors[array.diff.sd.freq < my.seq[2]] <- diff.sd.freq.cols[1] + + par(mar=c(5,4,4,4.5)) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Forecast time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + title("St.Dev. ratio between sim. and obs. average frequencies", cex=1.5) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + + for(p in 1:12){ + for(l in 0:6){ + polygon(c(0.5+p-1, 0.5+p-1, 1+p-1), c(-0.5+l, 0.5+l, 0+l), col=array.diff.sd.freq.colors[p,1+l,1]) # first triangle + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(-0.5+l, 0+l, -0.5+l), col=array.diff.sd.freq.colors[p,1+l,2]) + polygon(c(1+p-1, 1.5+p-1, 1.5+p-1), c(l, 0.5+l, -0.5+l), col=array.diff.sd.freq.colors[p,1+l,3]) + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(0.5+l, 0+l, 0.5+l), col=array.diff.sd.freq.colors[p,1+l,4]) + } + } + + par(fig=c(0.875, 1, 0.14, 0.89), new=TRUE) + ColorBar2(brks = my.seq, cols = diff.sd.freq.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + dev.off() + + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_ratio_sd_freq_4tables.png"), width=750, height=600) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("St.Dev. ratio between sim. and obs. average frequencies", cex=1.5) + + # NAO+ : + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.85, 0.98), new=TRUE) + mtext("NAO+", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.diff.sd.freq.colors[p,1+l,1])}} + + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.42, 0.5), new=TRUE) + mtext("Blocking", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.diff.sd.freq.colors[p,1+l,4])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.85, 0.98), new=TRUE) + mtext("NAO-", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.diff.sd.freq.colors[p,1+l,3])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.42, 0.5), new=TRUE) + mtext("Atlantic Ridge", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.diff.sd.freq.colors[p,1+l,2])}} + + par(fig=c(0.91, 1, 0.1, 0.9), new=TRUE) + ColorBar2(brks = my.seq, cols = diff.sd.freq.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_ratio_sd_freq_1table.png"), width=800, height=300) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("St.Dev. ratio between sim. and obs. average frequencies", cex=1.2) + + par(mar=c(0,0,4,0), fig=c(0.07, 0.22, 0.8, 0.98), new=TRUE) + mtext("NAO+", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0, 0.22, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.diff.sd.freq.colors[i2,1+6-j,1])}} + + par(mar=c(0,0,4,0), fig=c(0.22, 0.44, 0.8, 0.98), new=TRUE) + mtext("NAO-", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.22, 0.44, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.diff.sd.freq.colors[i2,1+6-j,3])}} + + par(mar=c(0,0,4,0), fig=c(0.44, 0.66, 0.8, 0.98), new=TRUE) + mtext("Blocking", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.44, 0.66, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.diff.sd.freq.colors[i2,1+6-j,4])}} + + par(mar=c(0,0,4,0), fig=c(0.68, 0.88, 0.8, 0.98), new=TRUE) + mtext("Atlantic Ridge", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.66, 0.88, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.diff.sd.freq.colors[i2,1+6-j,2])}} + + par(fig=c(0.91, 1, 0.1, 0.8), new=TRUE) + ColorBar2(brks = my.seq, cols = diff.sd.freq.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + + + + + + + + + + + + + # Impact summary: + png(filename=paste0(work.dir,"/",forecast.name,"_summary_impact_",var.name[var.num],".png"), width=550, height=400) + + # create an array similar to array.pers but with colors instead of frequencies: + imp.cols <- rev(c('#b2182b','#d6604d','#f4a582','#fddbc7','#d1e5f0','#92c5de','#4393c3','#2166ac')) + my.seq <- c(-1,0,0.4,0.5,0.6,0.7,0.8,0.9,1) + + array.imp.colors <- array(imp.cols[8],c(12,7,4)) + array.imp.colors[array.imp < my.seq[8]] <- imp.cols[7] + array.imp.colors[array.imp < my.seq[7]] <- imp.cols[6] + array.imp.colors[array.imp < my.seq[6]] <- imp.cols[5] + array.imp.colors[array.imp < my.seq[5]] <- imp.cols[4] + array.imp.colors[array.imp < my.seq[4]] <- imp.cols[3] + array.imp.colors[array.imp < my.seq[3]] <- imp.cols[2] + array.imp.colors[array.imp < my.seq[2]] <- imp.cols[1] + + par(mar=c(5,4,4,4.5)) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Forecast time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + title(paste0("S4 spatial correlation of impact on ",var.name[var.num]), cex=1.5) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + + for(p in 1:12){ + for(l in 0:6){ + polygon(c(0.5+p-1,0.5+p-1,1+p-1),c(-0.5+l,0.5+l,0+l), col=array.imp.colors[p,1+l,1]) # first triangle + polygon(c(0.5+p-1,1+p-1,1.5+p-1),c(-0.5+l,0+l,-0.5+l), col=array.imp.colors[p,1+l,2]) + polygon(c(1+p-1,1.5+p-1,1.5+p-1),c(l,0.5+l,-0.5+l), col=array.imp.colors[p,1+l,3]) + polygon(c(0.5+p-1,1+p-1,1.5+p-1),c(0.5+l,0+l,0.5+l), col=array.imp.colors[p,1+l,4]) + } + } + + par(fig=c(0.875, 1, 0.14, 0.89), new=TRUE) + ColorBar2(brks = my.seq, cols = imp.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_impact_",var.name[var.num],"_4tables.png"), width=750, height=600) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("S4 spatial correlation of impact on ",var.name[var.num], cex=1.5) + + # NAO+ : + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.85, 0.98), new=TRUE) + mtext("NAO+", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.imp.colors[p,1+l,1])}} + + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.42, 0.5), new=TRUE) + mtext("Blocking", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.imp.colors[p,1+l,4])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.85, 0.98), new=TRUE) + mtext("NAO-", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.imp.colors[p,1+l,3])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.42, 0.5), new=TRUE) + mtext("Atlantic Ridge", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.imp.colors[p,1+l,2])}} + + par(fig=c(0.91, 1, 0.1, 0.9), new=TRUE) + ColorBar2(brks = my.seq, cols = imp.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_impact_",var.name[var.num],"_1table.png"), width=800, height=300) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("S4 spatial correlation of impact on ",var.name[var.num], cex=1.2) + + par(mar=c(0,0,4,0), fig=c(0.07, 0.22, 0.8, 0.98), new=TRUE) + mtext("NAO+", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0, 0.22, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.imp.colors[i2,1+6-j,1])}} + + par(mar=c(0,0,4,0), fig=c(0.22, 0.44, 0.8, 0.98), new=TRUE) + mtext("NAO-", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.22, 0.44, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.imp.colors[i2,1+6-j,3])}} + + par(mar=c(0,0,4,0), fig=c(0.44, 0.66, 0.8, 0.98), new=TRUE) + mtext("Blocking", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.44, 0.66, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.imp.colors[i2,1+6-j,4])}} + + par(mar=c(0,0,4,0), fig=c(0.68, 0.88, 0.8, 0.98), new=TRUE) + mtext("Atlantic Ridge", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.66, 0.88, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.imp.colors[i2,1+6-j,2])}} + + par(fig=c(0.91, 1, 0.1, 0.8), new=TRUE) + ColorBar2(brks = my.seq, cols = imp.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + + + + + + + # Simulated Transition probability from NAO+ to X summary: + png(filename=paste0(work.dir,"/",forecast.name,"_summary_trans_NAO+.png"), width=550, height=400) + + # create an array similar to array.cor but with colors instead of corr.values: + trans.cols <- rev(c('#b2182b','#d6604d','#f4a582','#fddbc7','#d1e5f0','#92c5de','#4393c3','#2166ac')) + my.seq <- c(0,10,20,30,40,50,60,70,100) + + array.trans.sim1.colors <- array(trans.cols[8],c(12,7,4)) + array.trans.sim1.colors[array.trans.sim1 < my.seq[8]] <- trans.cols[7] + array.trans.sim1.colors[array.trans.sim1 < my.seq[7]] <- trans.cols[6] + array.trans.sim1.colors[array.trans.sim1 < my.seq[6]] <- trans.cols[5] + array.trans.sim1.colors[array.trans.sim1 < my.seq[5]] <- trans.cols[4] + array.trans.sim1.colors[array.trans.sim1 < my.seq[4]] <- trans.cols[3] + array.trans.sim1.colors[array.trans.sim1 < my.seq[3]] <- trans.cols[2] + array.trans.sim1.colors[array.trans.sim1 < my.seq[2]] <- trans.cols[1] + array.trans.sim1.colors[is.na(array.trans.sim1)] <- "white" + + par(mar=c(5,4,4,4.5)) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Forecast time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + title("% Transition from NAO+ regime", cex=1.5) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + + for(p in 1:12){ + for(l in 0:6){ + polygon(c(0.5+p-1, 0.5+p-1, 1+p-1), c(-0.5+l, 0.5+l, 0+l), col=array.trans.sim1.colors[p,1+l,1]) # first triangle + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(-0.5+l, 0+l, -0.5+l), col=array.trans.sim1.colors[p,1+l,2]) + polygon(c(1+p-1, 1.5+p-1, 1.5+p-1), c(l, 0.5+l, -0.5+l), col=array.trans.sim1.colors[p,1+l,3]) + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(0.5+l, 0+l, 0.5+l), col=array.trans.sim1.colors[p,1+l,4]) + } + } + + par(fig=c(0.875, 1, 0.14, 0.89), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_trans_NAO+_4tables.png"), width=750, height=600) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("% Transition from NAO+ regime", cex=1.5) + + # NAO+ : + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.85, 0.98), new=TRUE) + mtext("NAO+", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.sim1.colors[p,1+l,1])}} + + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.42, 0.5), new=TRUE) + mtext("Blocking", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.sim1.colors[p,1+l,4])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.85, 0.98), new=TRUE) + mtext("NAO-", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.sim1.colors[p,1+l,3])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.42, 0.5), new=TRUE) + mtext("Atlantic Ridge", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.sim1.colors[p,1+l,2])}} + + par(fig=c(0.91, 1, 0.1, 0.9), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_trans_NAO+_1table.png"), width=600, height=300) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("% Transition from NAO+ regime", cex=1.2) + + par(mar=c(0,0,4,0), fig=c(0.10, 0.28, 0.8, 0.98), new=TRUE) + mtext("NAO-", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0, 0.28, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.sim1.colors[i2,1+6-j,3])}} + + par(mar=c(0,0,4,0), fig=c(0.28, 0.56, 0.8, 0.98), new=TRUE) + mtext("Blocking", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.28, 0.56, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.sim1.colors[i2,1+6-j,4])}} + + par(mar=c(0,0,4,0), fig=c(0.56, 0.84, 0.8, 0.98), new=TRUE) + mtext("Atlantic Ridge", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.56, 0.84, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.sim1.colors[i2,1+6-j,2])}} + + par(fig=c(0.88, 1, 0.1, 0.8), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + + + + + + # Observed Transition probability from NAO+ to X summary: + png(filename=paste0(work.dir,"/",forecast.name,"_summary_trans_NAO+_obs.png"), width=550, height=400) + + # create an array similar to array.cor but with colors instead of corr.values: + trans.cols <- rev(c('#b2182b','#d6604d','#f4a582','#fddbc7','#d1e5f0','#92c5de','#4393c3','#2166ac')) + my.seq <- c(0,10,20,30,40,50,60,70,100) + + array.trans.obs1.colors <- array(trans.cols[8],c(12,7,4)) + array.trans.obs1.colors[array.trans.obs1 < my.seq[8]] <- trans.cols[7] + array.trans.obs1.colors[array.trans.obs1 < my.seq[7]] <- trans.cols[6] + array.trans.obs1.colors[array.trans.obs1 < my.seq[6]] <- trans.cols[5] + array.trans.obs1.colors[array.trans.obs1 < my.seq[5]] <- trans.cols[4] + array.trans.obs1.colors[array.trans.obs1 < my.seq[4]] <- trans.cols[3] + array.trans.obs1.colors[array.trans.obs1 < my.seq[3]] <- trans.cols[2] + array.trans.obs1.colors[array.trans.obs1 < my.seq[2]] <- trans.cols[1] + array.trans.obs1.colors[is.na(array.trans.obs1)] <- "white" + + par(mar=c(5,4,4,4.5)) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Forecast time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + title("% Obs.transition from NAO+ regime", cex=1.5) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + + for(p in 1:12){ + for(l in 0:6){ + polygon(c(0.5+p-1, 0.5+p-1, 1+p-1), c(-0.5+l, 0.5+l, 0+l), col=array.trans.sim1.colors[p,1+l,1]) # first triangle + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(-0.5+l, 0+l, -0.5+l), col=array.trans.sim1.colors[p,1+l,2]) + polygon(c(1+p-1, 1.5+p-1, 1.5+p-1), c(l, 0.5+l, -0.5+l), col=array.trans.sim1.colors[p,1+l,3]) + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(0.5+l, 0+l, 0.5+l), col=array.trans.sim1.colors[p,1+l,4]) + } + } + + par(fig=c(0.875, 1, 0.14, 0.89), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_trans_NAO+_obs_4tables.png"), width=750, height=600) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("% Obs.transition from NAO+ regime", cex=1.5) + + # NAO+ : + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.85, 0.98), new=TRUE) + mtext("NAO+", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.obs1.colors[p,1+l,1])}} + + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.42, 0.5), new=TRUE) + mtext("Blocking", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.obs1.colors[p,1+l,4])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.85, 0.98), new=TRUE) + mtext("NAO-", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.obs1.colors[p,1+l,3])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.42, 0.5), new=TRUE) + mtext("Atlantic Ridge", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.obs1.colors[p,1+l,2])}} + + par(fig=c(0.91, 1, 0.1, 0.9), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_trans_NAO+_obs_target_month.png"), width=600, height=300) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("% Obs.transition from NAO+ regime", cex=1.2) + + par(mar=c(0,0,4,0), fig=c(0.10, 0.28, 0.8, 0.98), new=TRUE) + mtext("NAO-", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0, 0.28, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.obs1.colors[i2,1+6-j,3])}} + + par(mar=c(0,0,4,0), fig=c(0.28, 0.56, 0.8, 0.98), new=TRUE) + mtext("Blocking", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.28, 0.56, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.obs1.colors[i2,1+6-j,4])}} + + par(mar=c(0,0,4,0), fig=c(0.56, 0.84, 0.8, 0.98), new=TRUE) + mtext("Atlantic Ridge", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.56, 0.84, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.obs1.colors[i2,1+6-j,2])}} + + par(fig=c(0.88, 1, 0.1, 0.8), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + + + + + + + + + + + # Simulated transition probability from NAO- to X summary: + png(filename=paste0(work.dir,"/",forecast.name,"_summary_trans_NAO-.png"), width=550, height=400) + + # create an array similar to array.cor but with colors instead of corr.values: + trans.cols <- rev(c('#b2182b','#d6604d','#f4a582','#fddbc7','#d1e5f0','#92c5de','#4393c3','#2166ac')) + my.seq <- c(0,10,20,30,40,50,60,70,100) + + array.trans.sim2.colors <- array(trans.cols[8],c(12,7,4)) + array.trans.sim2.colors[array.trans.sim2 < my.seq[8]] <- trans.cols[7] + array.trans.sim2.colors[array.trans.sim2 < my.seq[7]] <- trans.cols[6] + array.trans.sim2.colors[array.trans.sim2 < my.seq[6]] <- trans.cols[5] + array.trans.sim2.colors[array.trans.sim2 < my.seq[5]] <- trans.cols[4] + array.trans.sim2.colors[array.trans.sim2 < my.seq[4]] <- trans.cols[3] + array.trans.sim2.colors[array.trans.sim2 < my.seq[3]] <- trans.cols[2] + array.trans.sim2.colors[array.trans.sim2 < my.seq[2]] <- trans.cols[1] + array.trans.sim2.colors[is.na(array.trans.sim2)] <- "white" + + par(mar=c(5,4,4,4.5)) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Forecast time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + title("% Transition from NAO- regime ", cex=1.5) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + + for(p in 1:12){ + for(l in 0:6){ + polygon(c(0.5+p-1, 0.5+p-1, 1+p-1), c(-0.5+l, 0.5+l, 0+l), col=array.trans.sim2.colors[p,1+l,1]) # first triangle + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(-0.5+l, 0+l, -0.5+l), col=array.trans.sim2.colors[p,1+l,2]) + polygon(c(1+p-1, 1.5+p-1, 1.5+p-1), c(l, 0.5+l, -0.5+l), col=array.trans.sim2.colors[p,1+l,3]) + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(0.5+l, 0+l, 0.5+l), col=array.trans.sim2.colors[p,1+l,4]) + } + } + + par(fig=c(0.875, 1, 0.14, 0.89), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_trans_NAO-_4tables.png"), width=750, height=600) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("% Transition from NAO- regime", cex=1.5) + + # NAO+ : + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.85, 0.98), new=TRUE) + mtext("NAO+", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.sim2.colors[p,1+l,1])}} + + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.42, 0.5), new=TRUE) + mtext("Blocking", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.sim2.colors[p,1+l,4])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.85, 0.98), new=TRUE) + mtext("NAO-", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.sim2.colors[p,1+l,3])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.42, 0.5), new=TRUE) + mtext("Atlantic Ridge", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.sim2.colors[p,1+l,2])}} + + par(fig=c(0.91, 1, 0.1, 0.9), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_trans_NAO-_1table.png"), width=600, height=300) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("% Transition from NAO- regime", cex=1.2) + + par(mar=c(0,0,4,0), fig=c(0.1, 0.28, 0.8, 0.98), new=TRUE) + mtext("NAO+", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0, 0.28, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.sim2.colors[i2,1+6-j,1])}} + + par(mar=c(0,0,4,0), fig=c(0.28, 0.56, 0.8, 0.98), new=TRUE) + mtext("Blocking", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.28, 0.56, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.sim2.colors[i2,1+6-j,4])}} + + par(mar=c(0,0,4,0), fig=c(0.56, 0.84, 0.8, 0.98), new=TRUE) + mtext("Atlantic Ridge", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.56, 0.84, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.sim2.colors[i2,1+6-j,2])}} + + par(fig=c(0.88, 1, 0.1, 0.8), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + + + + + + + + # Transition probability from blocking to X summary: + png(filename=paste0(work.dir,"/",forecast.name,"_summary_trans_blocking.png"), width=550, height=400) + + # create an array similar to array.cor but with colors instead of corr.values: + trans.cols <- rev(c('#b2182b','#d6604d','#f4a582','#fddbc7','#d1e5f0','#92c5de','#4393c3','#2166ac')) + my.seq <- c(0,10,20,30,40,50,60,70,100) + + array.trans.sim3.colors <- array(trans.cols[8],c(12,7,4)) + array.trans.sim3.colors[array.trans.sim3 < my.seq[8]] <- trans.cols[7] + array.trans.sim3.colors[array.trans.sim3 < my.seq[7]] <- trans.cols[6] + array.trans.sim3.colors[array.trans.sim3 < my.seq[6]] <- trans.cols[5] + array.trans.sim3.colors[array.trans.sim3 < my.seq[5]] <- trans.cols[4] + array.trans.sim3.colors[array.trans.sim3 < my.seq[4]] <- trans.cols[3] + array.trans.sim3.colors[array.trans.sim3 < my.seq[3]] <- trans.cols[2] + array.trans.sim3.colors[array.trans.sim3 < my.seq[2]] <- trans.cols[1] + array.trans.sim3.colors[is.na(array.trans.sim3)] <- "white" + + par(mar=c(5,4,4,4.5)) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Forecast time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + title("% Transition from blocking regime", cex=1.5) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + + for(p in 1:12){ + for(l in 0:6){ + polygon(c(0.5+p-1, 0.5+p-1, 1+p-1), c(-0.5+l, 0.5+l, 0+l), col=array.trans.sim3.colors[p,1+l,1]) # first triangle + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(-0.5+l, 0+l, -0.5+l), col=array.trans.sim3.colors[p,1+l,2]) + polygon(c(1+p-1, 1.5+p-1, 1.5+p-1), c(l, 0.5+l, -0.5+l), col=array.trans.sim3.colors[p,1+l,3]) + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(0.5+l, 0+l, 0.5+l), col=array.trans.sim3.colors[p,1+l,4]) + } + } + + par(fig=c(0.875, 1, 0.14, 0.89), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_trans_blocking_4tables.png"), width=750, height=600) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("% Transition from blocking regime", cex=1.5) + + # NAO+ : + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.85, 0.98), new=TRUE) + mtext("NAO+", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.sim3.colors[p,1+l,1])}} + + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.42, 0.5), new=TRUE) + mtext("Blocking", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.sim3.colors[p,1+l,4])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.85, 0.98), new=TRUE) + mtext("NAO-", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.sim3.colors[p,1+l,3])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.42, 0.5), new=TRUE) + mtext("Atlantic Ridge", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.sim3.colors[p,1+l,2])}} + + par(fig=c(0.91, 1, 0.1, 0.9), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_trans_blocking_1table.png"), width=600, height=300) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("% Transition from blocking regime", cex=1.2) + + par(mar=c(0,0,4,0), fig=c(0.10, 0.28, 0.8, 0.98), new=TRUE) + mtext("NAO+", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0, 0.28, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.sim3.colors[i2,1+6-j,1])}} + + par(mar=c(0,0,4,0), fig=c(0.28, 0.56, 0.8, 0.98), new=TRUE) + mtext("NAO-", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.28, 0.56, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.sim3.colors[i2,1+6-j,3])}} + + par(mar=c(0,0,4,0), fig=c(0.56, 0.84, 0.8, 0.98), new=TRUE) + mtext("Atlantic Ridge", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.56, 0.84, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.sim3.colors[i2,1+6-j,2])}} + + par(fig=c(0.88, 1, 0.1, 0.8), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + + + + + + + + + + + # Transition probability from Atlantic ridge to X summary: + png(filename=paste0(work.dir,"/",forecast.name,"_summary_trans_Atlantic_ridge.png"), width=550, height=400) + + # create an array similar to array.cor but with colors instead of corr.values: + trans.cols <- rev(c('#b2182b','#d6604d','#f4a582','#fddbc7','#d1e5f0','#92c5de','#4393c3','#2166ac')) + my.seq <- c(0,10,20,30,40,50,60,70,100) + + array.trans.sim4.colors <- array(trans.cols[8],c(12,7,4)) + array.trans.sim4.colors[array.trans.sim4 < my.seq[8]] <- trans.cols[7] + array.trans.sim4.colors[array.trans.sim4 < my.seq[7]] <- trans.cols[6] + array.trans.sim4.colors[array.trans.sim4 < my.seq[6]] <- trans.cols[5] + array.trans.sim4.colors[array.trans.sim4 < my.seq[5]] <- trans.cols[4] + array.trans.sim4.colors[array.trans.sim4 < my.seq[4]] <- trans.cols[3] + array.trans.sim4.colors[array.trans.sim4 < my.seq[3]] <- trans.cols[2] + array.trans.sim4.colors[array.trans.sim4 < my.seq[2]] <- trans.cols[1] + array.trans.sim4.colors[is.na(array.trans.sim4)] <- "white" + + par(mar=c(5,4,4,4.5)) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Forecast time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + title("% Transition from Atlantic ridge regime ", cex=1.5) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + + for(p in 1:12){ + for(l in 0:6){ + polygon(c(0.5+p-1, 0.5+p-1, 1+p-1), c(-0.5+l, 0.5+l, 0+l), col=array.trans.sim4.colors[p,1+l,1]) # first triangle + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(-0.5+l, 0+l, -0.5+l), col=array.trans.sim4.colors[p,1+l,2]) + polygon(c(1+p-1, 1.5+p-1, 1.5+p-1), c(l, 0.5+l, -0.5+l), col=array.trans.sim4.colors[p,1+l,3]) + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(0.5+l, 0+l, 0.5+l), col=array.trans.sim4.colors[p,1+l,4]) + } + } + + par(fig=c(0.875, 1, 0.14, 0.89), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_trans_Atlantic_ridge_4tables.png"), width=750, height=600) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("% Transition from Atlantic Ridge regime", cex=1.5) + + # NAO+ : + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.85, 0.98), new=TRUE) + mtext("NAO+", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.sim4.colors[p,1+l,1])}} + + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.42, 0.5), new=TRUE) + mtext("Blocking", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.sim4.colors[p,1+l,4])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.85, 0.98), new=TRUE) + mtext("NAO-", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.sim4.colors[p,1+l,3])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.42, 0.5), new=TRUE) + mtext("Atlantic Ridge", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.sim4.colors[p,1+l,2])}} + + par(fig=c(0.91, 1, 0.1, 0.9), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_trans_Atlantic_ridge_1table.png"), width=600, height=300) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("% Transition from Atlantic Ridge regime", cex=1.2) + + par(mar=c(0,0,4,0), fig=c(0.1, 0.28, 0.8, 0.98), new=TRUE) + mtext("NAO+", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0, 0.28, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.sim4.colors[i2,1+6-j,1])}} + + par(mar=c(0,0,4,0), fig=c(0.28, 0.56, 0.8, 0.98), new=TRUE) + mtext("NAO-", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.28, 0.56, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.sim4.colors[i2,1+6-j,3])}} + + par(mar=c(0,0,4,0), fig=c(0.56, 0.84, 0.8, 0.98), new=TRUE) + mtext("Blocking", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.56, 0.84, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.sim4.colors[i2,1+6-j,4])}} + + par(fig=c(0.88, 1, 0.1, 0.8), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + + + + + + + + # Bias of the Transition probability from NAO+ to X summary: + png(filename=paste0(work.dir,"/",forecast.name,"_summary_bias_trans_NAO+.png"), width=550, height=400) + + # create an array similar to array.cor but with colors instead of corr.values: + trans.cols <- rev(c('#b2182b','#d6604d','#f4a582','#fddbc7','#d1e5f0','#92c5de','#4393c3','#2166ac')) + my.seq <- c(-20,-10,-5,-2,0,2,5,10,20) + + array.trans.diff1.colors <- array(trans.cols[8],c(12,7,4)) + array.trans.diff1.colors[array.trans.diff1 < my.seq[8]] <- trans.cols[7] + array.trans.diff1.colors[array.trans.diff1 < my.seq[7]] <- trans.cols[6] + array.trans.diff1.colors[array.trans.diff1 < my.seq[6]] <- trans.cols[5] + array.trans.diff1.colors[array.trans.diff1 < my.seq[5]] <- trans.cols[4] + array.trans.diff1.colors[array.trans.diff1 < my.seq[4]] <- trans.cols[3] + array.trans.diff1.colors[array.trans.diff1 < my.seq[3]] <- trans.cols[2] + array.trans.diff1.colors[array.trans.diff1 < my.seq[2]] <- trans.cols[1] + array.trans.diff1.colors[is.na(array.trans.diff1)] <- "white" + + par(mar=c(5,4,4,4.5)) + plot(1,1, type="n", xaxt="n", yaxt="n", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + title("% Bias transition from NAO+ regime", cex=1.5) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + + for(p in 1:12){ + for(l in 0:6){ + polygon(c(0.5+p-1, 0.5+p-1, 1+p-1), c(-0.5+l, 0.5+l, 0+l), col=array.trans.diff1.colors[p,1+l,1]) # first triangle + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(-0.5+l, 0+l, -0.5+l), col=array.trans.diff1.colors[p,1+l,2]) + polygon(c(1+p-1, 1.5+p-1, 1.5+p-1), c(l, 0.5+l, -0.5+l), col=array.trans.diff1.colors[p,1+l,3]) + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(0.5+l, 0+l, 0.5+l), col=array.trans.diff1.colors[p,1+l,4]) + } + } + + par(fig=c(0.875, 1, 0.14, 0.89), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_bias_trans_NAO+_4tables.png"), width=750, height=600) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("% Bias transition from NAO+ regime", cex=1.5) + + # NAO+ : + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.85, 0.98), new=TRUE) + mtext("NAO+", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.diff1.colors[p,1+l,1])}} + + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.42, 0.5), new=TRUE) + mtext("Blocking", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.diff1.colors[p,1+l,4])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.85, 0.98), new=TRUE) + mtext("NAO-", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.diff1.colors[p,1+l,3])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.42, 0.5), new=TRUE) + mtext("Atlantic Ridge", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.diff1.colors[p,1+l,2])}} + + par(fig=c(0.91, 1, 0.1, 0.9), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_bias_trans_NAO+_1table.png"), width=800, height=300) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("% Bias transition from NAO+ regime", cex=1.2) + + par(mar=c(0,0,4,0), fig=c(0.07, 0.22, 0.8, 0.98), new=TRUE) + mtext("NAO+", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0, 0.22, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.diff1.colors[i2,1+6-j,1])}} + + par(mar=c(0,0,4,0), fig=c(0.22, 0.44, 0.8, 0.98), new=TRUE) + mtext("NAO-", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.22, 0.44, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.diff1.colors[i2,1+6-j,3])}} + + par(mar=c(0,0,4,0), fig=c(0.44, 0.66, 0.8, 0.98), new=TRUE) + mtext("Blocking", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.44, 0.66, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.diff1.colors[i2,1+6-j,4])}} + + par(mar=c(0,0,4,0), fig=c(0.68, 0.88, 0.8, 0.98), new=TRUE) + mtext("Atlantic Ridge", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.66, 0.88, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.diff1.colors[i2,1+6-j,2])}} + + par(fig=c(0.91, 1, 0.1, 0.8), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + + + + + + + + + + + + + + + # Bias of the Transition probability from NAO- to X summary: + png(filename=paste0(work.dir,"/",forecast.name,"_summary_bias_trans_NAO-.png"), width=550, height=400) + + # create an array similar to array.cor but with colors instead of corr.values: + trans.cols <- rev(c('#b2182b','#d6604d','#f4a582','#fddbc7','#d1e5f0','#92c5de','#4393c3','#2166ac')) + my.seq <- c(-20,-10,-5,-2,0,2,5,10,20) + + array.trans.diff2.colors <- array(trans.cols[8],c(12,7,4)) + array.trans.diff2.colors[array.trans.diff2 < my.seq[8]] <- trans.cols[7] + array.trans.diff2.colors[array.trans.diff2 < my.seq[7]] <- trans.cols[6] + array.trans.diff2.colors[array.trans.diff2 < my.seq[6]] <- trans.cols[5] + array.trans.diff2.colors[array.trans.diff2 < my.seq[5]] <- trans.cols[4] + array.trans.diff2.colors[array.trans.diff2 < my.seq[4]] <- trans.cols[3] + array.trans.diff2.colors[array.trans.diff2 < my.seq[3]] <- trans.cols[2] + array.trans.diff2.colors[array.trans.diff2 < my.seq[2]] <- trans.cols[1] + array.trans.diff2.colors[is.na(array.trans.diff2)] <- "white" + + par(mar=c(5,4,4,4.5)) + plot(1,1, type="n", xaxt="n", yaxt="n", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + title("% Bias transition from NAO- regime", cex=1.5) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + + for(p in 1:12){ + for(l in 0:6){ + polygon(c(0.5+p-1, 0.5+p-1, 1+p-1), c(-0.5+l, 0.5+l, 0+l), col=array.trans.diff2.colors[p,1+l,1]) # first triangle + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(-0.5+l, 0+l, -0.5+l), col=array.trans.diff2.colors[p,1+l,2]) + polygon(c(1+p-1, 1.5+p-1, 1.5+p-1), c(l, 0.5+l, -0.5+l), col=array.trans.diff2.colors[p,1+l,3]) + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(0.5+l, 0+l, 0.5+l), col=array.trans.diff2.colors[p,1+l,4]) + } + } + + par(fig=c(0.875, 1, 0.14, 0.89), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_bias_trans_NAO-_4tables.png"), width=750, height=600) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("% Bias transition from NAO- regime", cex=1.5) + + # NAO+ : + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.85, 0.98), new=TRUE) + mtext("NAO+", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.diff2.colors[p,1+l,1])}} + + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.42, 0.5), new=TRUE) + mtext("Blocking", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.diff2.colors[p,1+l,4])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.85, 0.98), new=TRUE) + mtext("NAO-", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.diff2.colors[p,1+l,3])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.42, 0.5), new=TRUE) + mtext("Atlantic Ridge", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.diff2.colors[p,1+l,2])}} + + par(fig=c(0.91, 1, 0.1, 0.9), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_bias_trans_NAO-_1table.png"), width=800, height=300) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("% Bias transition from NAO- regime", cex=1.2) + + par(mar=c(0,0,4,0), fig=c(0.07, 0.22, 0.8, 0.98), new=TRUE) + mtext("NAO+", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0, 0.22, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.diff2.colors[i2,1+6-j,1])}} + + par(mar=c(0,0,4,0), fig=c(0.22, 0.44, 0.8, 0.98), new=TRUE) + mtext("NAO-", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.22, 0.44, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.diff2.colors[i2,1+6-j,3])}} + + par(mar=c(0,0,4,0), fig=c(0.44, 0.66, 0.8, 0.98), new=TRUE) + mtext("Blocking", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.44, 0.66, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.diff2.colors[i2,1+6-j,4])}} + + par(mar=c(0,0,4,0), fig=c(0.68, 0.88, 0.8, 0.98), new=TRUE) + mtext("Atlantic Ridge", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.66, 0.88, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.diff2.colors[i2,1+6-j,2])}} + + par(fig=c(0.91, 1, 0.1, 0.8), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + + + + + + + + + + + # Bias of the Transition probability from blocking to X summary: + png(filename=paste0(work.dir,"/",forecast.name,"_summary_bias_trans_blocking.png"), width=550, height=400) + + # create an array similar to array.cor but with colors instead of corr.values: + trans.cols <- rev(c('#b2182b','#d6604d','#f4a582','#fddbc7','#d1e5f0','#92c5de','#4393c3','#2166ac')) + my.seq <- c(-20,-10,-5,-2,0,2,5,10,20) + + array.trans.diff3.colors <- array(trans.cols[8],c(12,7,4)) + array.trans.diff3.colors[array.trans.diff3 < my.seq[8]] <- trans.cols[7] + array.trans.diff3.colors[array.trans.diff3 < my.seq[7]] <- trans.cols[6] + array.trans.diff3.colors[array.trans.diff3 < my.seq[6]] <- trans.cols[5] + array.trans.diff3.colors[array.trans.diff3 < my.seq[5]] <- trans.cols[4] + array.trans.diff3.colors[array.trans.diff3 < my.seq[4]] <- trans.cols[3] + array.trans.diff3.colors[array.trans.diff3 < my.seq[3]] <- trans.cols[2] + array.trans.diff3.colors[array.trans.diff3 < my.seq[2]] <- trans.cols[1] + array.trans.diff3.colors[is.na(array.trans.diff3)] <- "white" + + par(mar=c(5,4,4,4.5)) + plot(1,1, type="n", xaxt="n", yaxt="n", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + title("% Bias transition from blocking regime", cex=1.5) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + + for(p in 1:12){ + for(l in 0:6){ + polygon(c(0.5+p-1, 0.5+p-1, 1+p-1), c(-0.5+l, 0.5+l, 0+l), col=array.trans.diff3.colors[p,1+l,1]) # first triangle + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(-0.5+l, 0+l, -0.5+l), col=array.trans.diff3.colors[p,1+l,2]) + polygon(c(1+p-1, 1.5+p-1, 1.5+p-1), c(l, 0.5+l, -0.5+l), col=array.trans.diff3.colors[p,1+l,3]) + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(0.5+l, 0+l, 0.5+l), col=array.trans.diff3.colors[p,1+l,4]) + } + } + + par(fig=c(0.875, 1, 0.14, 0.89), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_bias_trans_blocking_4tables.png"), width=750, height=600) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("% Bias transition from blocking regime", cex=1.5) + + # NAO+ : + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.85, 0.98), new=TRUE) + mtext("NAO+", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.diff3.colors[p,1+l,1])}} + + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.42, 0.5), new=TRUE) + mtext("Blocking", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.diff3.colors[p,1+l,4])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.85, 0.98), new=TRUE) + mtext("NAO-", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.diff3.colors[p,1+l,3])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.42, 0.5), new=TRUE) + mtext("Atlantic Ridge", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.diff3.colors[p,1+l,2])}} + + par(fig=c(0.91, 1, 0.1, 0.9), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_bias_trans_blocking_1table.png"), width=800, height=300) + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("% Bias transition from Blocking regime", cex=1.2) + + par(mar=c(0,0,4,0), fig=c(0.07, 0.22, 0.8, 0.98), new=TRUE) + mtext("NAO+", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0, 0.22, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.diff3.colors[i2,1+6-j,1])}} + + par(mar=c(0,0,4,0), fig=c(0.22, 0.44, 0.8, 0.98), new=TRUE) + mtext("NAO-", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.22, 0.44, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.diff3.colors[i2,1+6-j,3])}} + + par(mar=c(0,0,4,0), fig=c(0.44, 0.66, 0.8, 0.98), new=TRUE) + mtext("Blocking", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.44, 0.66, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.diff3.colors[i2,1+6-j,4])}} + + par(mar=c(0,0,4,0), fig=c(0.68, 0.88, 0.8, 0.98), new=TRUE) + mtext("Atlantic Ridge", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.66, 0.88, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.diff3.colors[i2,1+6-j,2])}} + + par(fig=c(0.91, 1, 0.1, 0.8), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + + + + + + + + + + # Bias of the Transition probability from Atlantic Ridge to X summary: + png(filename=paste0(work.dir,"/",forecast.name,"_summary_bias_trans_Atlantic_ridge.png"), width=550, height=400) + + # create an array similar to array.cor but with colors instead of corr.values: + trans.cols <- rev(c('#b2182b','#d6604d','#f4a582','#fddbc7','#d1e5f0','#92c5de','#4393c3','#2166ac')) + my.seq <- c(-20,-10,-5,-2,0,2,5,10,20) + + array.trans.diff4.colors <- array(trans.cols[8],c(12,7,4)) + array.trans.diff4.colors[array.trans.diff4 < my.seq[8]] <- trans.cols[7] + array.trans.diff4.colors[array.trans.diff4 < my.seq[7]] <- trans.cols[6] + array.trans.diff4.colors[array.trans.diff4 < my.seq[6]] <- trans.cols[5] + array.trans.diff4.colors[array.trans.diff4 < my.seq[5]] <- trans.cols[4] + array.trans.diff4.colors[array.trans.diff4 < my.seq[4]] <- trans.cols[3] + array.trans.diff4.colors[array.trans.diff4 < my.seq[3]] <- trans.cols[2] + array.trans.diff4.colors[array.trans.diff4 < my.seq[2]] <- trans.cols[1] + array.trans.diff4.colors[is.na(array.trans.diff4)] <- "white" + + par(mar=c(5,4,4,4.5)) + plot(1,1, type="n", xaxt="n", yaxt="n", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + title("% Bias transition from Atlantic Ridge regime", cex=1.5) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + + for(p in 1:12){ + for(l in 0:6){ + polygon(c(0.5+p-1, 0.5+p-1, 1+p-1), c(-0.5+l, 0.5+l, 0+l), col=array.trans.diff4.colors[p,1+l,1]) # first triangle + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(-0.5+l, 0+l, -0.5+l), col=array.trans.diff4.colors[p,1+l,2]) + polygon(c(1+p-1, 1.5+p-1, 1.5+p-1), c(l, 0.5+l, -0.5+l), col=array.trans.diff4.colors[p,1+l,3]) + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(0.5+l, 0+l, 0.5+l), col=array.trans.diff4.colors[p,1+l,4]) + } + } + + par(fig=c(0.875, 1, 0.14, 0.89), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_bias_trans_Atlantic_ridge_4tables.png"), width=750, height=600) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("% Bias transition from Atlantic_Ridge_regime", cex=1.5) + + # NAO+ : + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.85, 0.98), new=TRUE) + mtext("NAO+", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.diff4.colors[p,1+l,1])}} + + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.42, 0.5), new=TRUE) + mtext("Blocking", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.diff4.colors[p,1+l,4])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.85, 0.98), new=TRUE) + mtext("NAO-", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.diff4.colors[p,1+l,3])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.42, 0.5), new=TRUE) + mtext("Atlantic Ridge", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.diff4.colors[p,1+l,2])}} + + par(fig=c(0.91, 1, 0.1, 0.9), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_bias_trans_Atlantic_ridge_1table.png"), width=800, height=300) + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("% Bias transition from Atlantic Ridge regime", cex=1.2) + + par(mar=c(0,0,4,0), fig=c(0.07, 0.22, 0.8, 0.98), new=TRUE) + mtext("NAO+", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0, 0.22, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.diff4.colors[i2,1+6-j,1])}} + + par(mar=c(0,0,4,0), fig=c(0.22, 0.44, 0.8, 0.98), new=TRUE) + mtext("NAO-", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.22, 0.44, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.diff4.colors[i2,1+6-j,3])}} + + par(mar=c(0,0,4,0), fig=c(0.44, 0.66, 0.8, 0.98), new=TRUE) + mtext("Blocking", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.44, 0.66, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.diff4.colors[i2,1+6-j,4])}} + + par(mar=c(0,0,4,0), fig=c(0.68, 0.88, 0.8, 0.98), new=TRUE) + mtext("Atlantic Ridge", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.66, 0.88, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.diff4.colors[i2,1+6-j,2])}} + + par(fig=c(0.91, 1, 0.1, 0.8), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + ## # FairRPSS summary: + ## png(filename=paste0(work.dir,"/",forecast.name,"_summary_rpss.png"), width=550, height=400) + + ## # create an array similar to array.diff.freq but with colors instead of frequencies: + ## freq.cols <- rev(c('#b2182b','#d6604d','#f4a582','#fddbc7','#d1e5f0','#92c5de','#4393c3','#2166ac')) + + ## array.freq.colors <- array(freq.cols[8],c(12,7,4)) + ## array.freq.colors[array.diff.freq < 10] <- freq.cols[7] + ## array.freq.colors[array.diff.freq < 5] <- freq.cols[6] + ## array.freq.colors[array.diff.freq < 2] <- freq.cols[5] + ## array.freq.colors[array.diff.freq < 0] <- freq.cols[4] + ## array.freq.colors[array.diff.freq < -2] <- freq.cols[3] + ## array.freq.colors[array.diff.freq < -5] <- freq.cols[2] + ## array.freq.colors[array.diff.freq < -10] <- freq.cols[1] + + ## par(mar=c(5,4,4,4.5)) + ## plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Forecast time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + ## title("Frequency difference (%) between S4 and ERA-Interim", cex=1.5) + ## mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + ## mtext(side = 2, text = "Forecast time", line = 2.5, cex=1.5) + ## axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + ## axis(2, at=seq(0,6), las=2, cex.axis=1.5) + + ## for(p in 1:12){ + ## for(l in 0:6){ + ## polygon(c(0.5+p-1,0.5+p-1,1+p-1),c(-0.5+l,0.5+l,0+l), col=array.freq.colors[p,1+l,1]) # first triangle + ## polygon(c(0.5+p-1,1+p-1,1.5+p-1),c(-0.5+l,0+l,-0.5+l), col=array.freq.colors[p,1+l,2]) + ## polygon(c(1+p-1,1.5+p-1,1.5+p-1),c(l,0.5+l,-0.5+l), col=array.freq.colors[p,1+l,3]) + ## polygon(c(0.5+p-1,1+p-1,1.5+p-1),c(0.5+l,0+l,0.5+l), col=array.freq.colors[p,1+l,4]) + ## } + ## } + + ## par(fig=c(0.875, 1, 0.14, 0.89), new=TRUE) + ## ColorBar2(brks = c(-20,-10,-5,-2,0,2,5,10,20), cols = freq.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(c(-20,-10,-5,-2,0,2,5,10,20)), my.labels=c(-20,-10,-5,-2,0,2,5,10,20)) + ## dev.off() + +} # close if on composition == "summary" + + + + +# impact map of the stronger WR: +if(composition == "impact.highest" && fields.name == rean.name){ + + var.num <- 2 # choose a variable (1:sfcWind, 2:tas) + index <- 1 # chose an index: 1: most influent, 2: most influent positively, 3: most influent negatively + + if ( index == 1 ) png(filename=paste0(rean.dir,"/",rean.name,"_",var.name.full[var.num],"_most_influent.png"), width=550, height=650) + if ( index == 2 ) png(filename=paste0(rean.dir,"/",rean.name,"_",var.name.full[var.num],"_most_influent_positively.png"), width=550, height=650) + if ( index == 3 ) png(filename=paste0(rean.dir,"/",rean.name,"_",var.name.full[var.num],"_most_influent_negatively.png"), width=550, height=650) + + plot.new() + #par(mfrow=c(4,3)) + #col.regimes <- c('#7b3294','#c2a5cf','#a6dba0','#008837') + col.regimes <- c('#e66101','#fdb863','#b2abd2','#5e3c99') + + for(p in 13:16){ # or 1:12 for monthly maps + load(file=paste0(rean.dir,"/",rean.name,"_",my.period[p],"_",var.name[var.num],".RData")) + load(paste0(rean.dir,"/",rean.name,"_",my.period[p],"_ClusterNames.RData")) # they should not depend on var.name!!! + + # position of long values of Europe only (without the Atlantic Sea and America): + #if(fields.name == "ERA-Interim") EU <- c(1:which(lon >= lon.max)[1], (length(lon)-30):length(lon)) # restrict area to continental europe only + lon.max = 45 + EU <- c(1:which(lon >= lon.max)[1], (which(lon >= 337)[1]:length(lon))) # restrict area to continental europe only + + cluster1 <- which(orden == cluster1.name.period[p]) # in future it will be == cluster1.name + cluster2 <- which(orden == cluster2.name.period[p]) + cluster3 <- which(orden == cluster3.name.period[p]) + cluster4 <- which(orden == cluster4.name.period[p]) + + assign(paste0("imp",cluster1), varPeriodAnom1mean) + assign(paste0("imp",cluster2), varPeriodAnom2mean) + assign(paste0("imp",cluster3), varPeriodAnom3mean) + assign(paste0("imp",cluster4), varPeriodAnom4mean) + + #most influent WT: + if (index == 1) { + imp.all <- imp1 + for(i in 1:dim(imp1)[1]){ + for(j in 1:dim(imp1)[2]){ + if(abs(imp1[i,j]) >= max(abs(imp1[i,j]), abs(imp2[i,j]), abs(imp3[i,j]), abs(imp4[i,j]))) imp.all[i,j] <- 1.5 + if(abs(imp2[i,j]) >= max(abs(imp1[i,j]), abs(imp2[i,j]), abs(imp3[i,j]), abs(imp4[i,j]))) imp.all[i,j] <- 2.5 + if(abs(imp3[i,j]) >= max(abs(imp1[i,j]), abs(imp2[i,j]), abs(imp3[i,j]), abs(imp4[i,j]))) imp.all[i,j] <- 3.5 + if(abs(imp4[i,j]) >= max(abs(imp1[i,j]), abs(imp2[i,j]), abs(imp3[i,j]), abs(imp4[i,j]))) imp.all[i,j] <- 4.5 + } + } + } + + #most influent WT with positive impact: + if (index == 2) { + imp.all <- imp1 + for(i in 1:dim(imp1)[1]){ + for(j in 1:dim(imp1)[2]){ + if((imp1[i,j]) >= max((imp1[i,j]), (imp2[i,j]), (imp3[i,j]), (imp4[i,j]))) imp.all[i,j] <- 1.5 + if((imp2[i,j]) >= max((imp1[i,j]), (imp2[i,j]), (imp3[i,j]), (imp4[i,j]))) imp.all[i,j] <- 2.5 + if((imp3[i,j]) >= max((imp1[i,j]), (imp2[i,j]), (imp3[i,j]), (imp4[i,j]))) imp.all[i,j] <- 3.5 + if((imp4[i,j]) >= max((imp1[i,j]), (imp2[i,j]), (imp3[i,j]), (imp4[i,j]))) imp.all[i,j] <- 4.5 + } + } + + } + + # most influent WT with negative impact: + if (index == 3) { + imp.all <- imp1 + for(i in 1:dim(imp1)[1]){ + for(j in 1:dim(imp1)[2]){ + if((imp1[i,j]) <= min((imp1[i,j]), (imp2[i,j]), (imp3[i,j]), (imp4[i,j]))) imp.all[i,j] <- 1.5 + if((imp2[i,j]) <= min((imp1[i,j]), (imp2[i,j]), (imp3[i,j]), (imp4[i,j]))) imp.all[i,j] <- 2.5 + if((imp3[i,j]) <= min((imp1[i,j]), (imp2[i,j]), (imp3[i,j]), (imp4[i,j]))) imp.all[i,j] <- 3.5 + if((imp4[i,j]) <= min((imp1[i,j]), (imp2[i,j]), (imp3[i,j]), (imp4[i,j]))) imp.all[i,j] <- 4.5 + } + } + } + + if(p<=3) yMod <- 0.7 + if(p>=4 && p<=6) yMod <- 0.5 + if(p>=7 && p<=9) yMod <- 0.3 + if(p>=10) yMod <- 0.1 + if(p==13 || p==14) yMod <- 0.52 + if(p==15 || p==16) yMod <- 0.1 + + if(p==1 || p==4 || p==7 || p==10) xMod <- 0.05 + if(p==2 || p==5 || p==8 || p==11) xMod <- 0.35 + if(p==3 || p==6 || p==9 || p==12) xMod <- 0.65 + if(p==13 || p==15) xMod <- 0.05 + if(p==14 || p==16) xMod <- 0.50 + + # impact map: + if(p <= 12) par(fig=c(xMod, xMod + 0.3, yMod, yMod + 0.17), new=TRUE) + if(p > 12) par(fig=c(xMod, xMod + 0.45, yMod, yMod + 0.38), new=TRUE) + PlotEquiMap(imp.all[,EU], lon[EU], lat, filled.continents = FALSE, brks=1:5, cols=col.regimes, axelab=F, drawleg=F) + + # title map: + if(p <= 12) par(fig=c(xMod, xMod + 0.3, yMod + 0.162, yMod + 0.172), new=TRUE) + if(p > 12) par(fig=c(xMod + 0.07, xMod + 0.07 + 0.3, yMod +0.21 + 0.166, yMod +0.21 + 0.176), new=TRUE) + mtext(my.period[p], font=2, cex=1.5) + + } # close 'p' on 'period' + + par(fig=c(0.1,0.9, 0.03, 0.11), new=TRUE) + ColorBar2(1:5, cols=col.regimes, vert=FALSE, my.ticks=1:4, draw.ticks=FALSE, my.labels=orden) + + par(fig=c(0.1,0.9, 0.92, 0.97), new=TRUE) + if( index == 1 ) mtext(paste0("Most influent WR on ",var.name[var.num]), font=2, cex=1.5) + if( index == 2 ) mtext(paste0("Most positively influent WR on ",var.name[var.num]), font=2, cex=1.5) + if( index == 3 ) mtext(paste0("Most negatively influent WR on ",var.name[var.num]), font=2, cex=1.5) + + dev.off() + +} # close if on composition == "impact.highest" + diff --git a/old/weather_regimes_maps_v22.R~ b/old/weather_regimes_maps_v22.R~ new file mode 100644 index 0000000000000000000000000000000000000000..92542b05c634afd61ae1a31738b2ee37daebb53a --- /dev/null +++ b/old/weather_regimes_maps_v22.R~ @@ -0,0 +1,4098 @@ + +# Creation: 6/2016 +# Author: Nicola Cortesi +# +# Aim: to visualize the regime anomalies of the weather regimes, their interannual frequencies and their impact on a chosen cliamte variable (i.e: wind speed or temperature) +# +# I/O: input file needed are: +# 1) the output of the weather_regimes.R script, which ends with the suffix "_psl.RData". +# 2) if you need the impact map too, the outputs of the weather_regimes_impact.R script, which end wuth the suffix "_sfcWind.RData" and "_tas.RData" +# +# Output files are the maps, witch the user can generate as .png or as .pdf +# Due to the script's length, it is better to run it from the terminal with: Rscript ~/scripts/weather_regimes_maps.R +# This script doesn't need to be parallelized because it takes only a few seconds to create and save the output maps. +# +# Assumptions: If your regimes derive from a reanalysis, this script must be run twice: once, to create a .pdf file (see below) +# that the user must open to find visually the order by which the four regimes are stored inside the four clusters. For example, he can find that the +# sequence of the images in the file (from top to down) corresponds to: Blocking / NAO- / Atl.Ridge / NAO+ regime. Subsequently, he has to change 'ordering' +# from F to T (see below) and set the four variables 'clusterX.name' (X = 1...4) to follow the same order found inside that file. +# The second time, you have to run this script only to get the maps in the right order, which by default is, from top to down: +# "NAO+","NAO-","Blocking" and "Atl.Ridge" (you can change it with the variable 'orden'). You also have to set 'save.names' to TRUE, so an .RData file +# is written to store the order of the four regimes, in case you need to visualize these maps again in future or create new ones based on the saved data. +# +# If your regimes derive from a forecast system, you have to compute before the regimes derived from a reanalysis. Then, you can associate the reanalysis +# to the forecast system, to employ it to automatically aussociate to each simulated cluster one of the +# observed regimes, the one with the highest spatial correlation. Beware that the two maps of regime anomalies must have the same spatial resolution! +# +# Branch: weather_regimes + +rm(list=ls()) +library(s2dverification) # for the function Load() +library(Kendall) # for the MannKendall test +source('/home/Earth/ncortesi/scripts/Rfunctions.R') # for the calendar functions + +### set the directory where the weather regimes computed with a reanalysis are stored (xxxxx_psl.RData files): + +#rean.dir <- "/scratch/Earth/ncortesi/RESILIENCE/Regimes/18) as 12) but with no lat correction" +#rean.dir <- "/scratch/Earth/ncortesi/RESILIENCE/Regimes/18_as_12_but_with_no_lat_correction" +#rean.dir <- "/scratch/Earth/ncortesi/RESILIENCE/Regimes/41_ERA-Interim_monthly_1981-2015_LOESS_filter" +#rean.dir <- "/scratch/Earth/ncortesi/RESILIENCE/Regimes/43_as_41_but_with_running_cluster_and_lat_correction" +rean.dir <- "/scratch/Earth/ncortesi/RESILIENCE/Regimes/44_as_43_but_with_no_lat_correction" +#rean.dir <- "/scratch/Earth/ncortesi/RESILIENCE/Regimes/45_as_43_but_with_Z500" + +rean.name <- "ERA-Interim" # reanalysis name (if input data comes from a reanalysis) +forecast.name <- "ECMWF-S4" # forecast system name (if input data comes from a forecast system) + +# Choose between loading reanalysis ('rean.name') or forecasts ('forecast.name'): +fields.name <- forecast.name + +# Choose one or more periods to plot from 1 to 17 (1-12: Jan - Dec, 13-16: Winter, Spring, Summer, Autumn, 17: Year). +period <- 1:12 # You need to have created before the '_psl.RData' file with the output of 'weather_regimes_vXX'.R for that period. + +# run it twice: once, with: 'ordering <- FALSE', 'save.names <- TRUE', 'as.pdf <- TRUE' and 'composition <- "simple" ' +# to look at the cartography and find visually the right regime names of the four clusters; then, insert the regime names in the correct order below and run this script +# a second time one month/season at time with: 'ordering = TRUE', 'save.names = TRUE', 'as.pdf = FALSE' and 'composition = "simple" ' +# to save the ordered cartography (then, you can check the correct regime sequence in the .png maps). After that, any time you run this script, remember to set: +# 'ordering = TRUE , 'save.names = FALSE' (and any type of composition you want to plot) not to overwrite the files which stores the right cluster order. + +ordering <- T # If TRUE, order the clusters following the regimes listed in the 'orden' vector (set it to TRUE only after you manually inserted the names below). +save.names <- F # if TRUE, also save the cluster names (i.e: the association between clusters and weather regimes); if FALSE, the cluster names are loaded from a file +as.pdf <- F # FALSE: save results as one .png file for each season/month, TRUE: save only one .pdf file with all seasons/months + +composition <- "summary" # choose which kind of composition you want to plot: + # - 'simple' for monthly graphs of regime anomalies / impact / interannual frequencies in three columns + # - 'psl' for all the regime anomalies for a fixed forecast month + # - 'fre' for all the interannual frequencies for a fixed forecast month + # - 'impact' for all the impact maps for a fixed forecast month and the variable specified in var.num + # - 'summary' for the summary plots of all forecast months (persistence bias, freq.bias, correlations, etc.) [You need all ClusterNames files] + # - 'impact.highest' for all the impact plot of the regime with the highest impact + # - 'single.impact' to save the individual impact maps + # - 'single.psl' to save the individual psl map + # - 'single.fre' to save the individual fre map + # - 'none' doesn't plot anything, it only saves the ClusterName.Rdata files + # - 'taylor' plot the taylor's diagram between the regime anomalies of a monthly reanalaysis and a seasonal one + +var.num <- 1 # if fields.name ='rean.name' and composition ='simple', Choose a variable for the impact maps 1: sfcWind 2: tas + +mean.freq <- FALSE # if TRUE, when composition <- "simple", it also add the mean frequency value over each frequency plots + +# Associates the regimes to the four cluster: +cluster3.name <- "NAO+" +cluster2.name <- "NAO-" +cluster1.name <- "Blocking" +cluster4.name <- "Atl.Ridge" + +# choose an order to give to the cartograpy, from top to bottom: +orden <- c("NAO+","NAO-","Blocking","Atl.Ridge") + +####### if fields.name='forecast.name', you have to specify these additional variables: + +# working dir with the input files with '_psl.RData' suffix: +work.dir <- "/scratch/Earth/ncortesi/RESILIENCE/Regimes/42_ECMWF_S4_monthly_1981-2015_LOESS_filter" + +lead.months <- 0:6 # in case you selected 'fields.name = forecast.name', specify a leadtime (0 = same month of the start month) to plot + # (and variable 'period' becomes the start month) +forecast.month <- 1 # in case composition = 'psl' or 'fre' or 'impact', choose a target month to forecast, from 1 to 12, to plot its composition + # if you want to plot all compositions from 1 to 12 at once, just enable the line below and add a { at the end of the script + # DO NOT run the loop below when composition="summary" or "taylor", because it will modify the data in the ClusterName.RData files!!! +for(forecast.month in 1:12){ +####### Derived variables ############################################################################################################################################### + +if(fields.name != rean.name && fields.name != forecast.name) stop("variable 'fields.name' is not properly set") +regime1.name <- orden[1] +regime2.name <- orden[2] +regime3.name <- orden[3] +regime4.name <- orden[4] + +var.name <- c("sfcWind","tas") +var.name.full <- c("Wind Speed","Temperature") # full name of the 'predictand' variable to put in the title of the graphs +var.unit <- c("m/s", "ºC") # unit of measure of var (for drawing color scales) + +var.num.orig <- var.num # loading _psl files, this value can change! +fields.name.orig <- fields.name +period.orig <- period +work.dir.orig <- work.dir +pdf.suffix <- ifelse(length(period)==1, my.period[period], "all_periods") + +n.map <- 0 +################################################################################################################################################################### + +if(composition != "summary" && composition != "impact.highest" && composition != "taylor"){ + + if(composition == "psl" || composition == "fre" || composition == "impact") { + if(as.pdf && fields.name == forecast.name) pdf(file=paste0(work.dir,"/",fields.name,"_",pdf.suffix,"_forecast_month_",forecast.month,"_",composition,".pdf"),width=110,height=60) + if(!as.pdf && fields.name == forecast.name) png(filename=paste0(work.dir,"/",fields.name,"_forecast_month_",forecast.month,"_",composition,".png"),width=6000,height=2300) + + lead.months <- c(6:0,0) + plot.new() + + # Sheet title: + par(fig=c(0, 1, 0.94, 0.98), new=TRUE) + if(composition == "psl") mtext(paste0(fields.name, " simulated Weather Regimes for ", month.name[forecast.month]), cex=5.5, font=2) + if(composition == "fre") mtext(paste0(fields.name, " simulated interannual frequencies for ", month.name[forecast.month]), cex=5.5, font=2) + if(composition == "impact") mtext(paste0(fields.name, " simulated ",var.name.full[var.num], " impact for ", month.name[forecast.month]), cex=5.5, font=2) + + } + + if(fields.name == rean.name) lead.months <- 1 # just to be able to enter the loop below once! + + for(lead.month in lead.months){ + #lead.month=lead.months[1] # for the debug + + if(composition == "simple"){ + if(as.pdf && fields.name == rean.name) pdf(file=paste0(rean.dir,"/",fields.name,"_",var.name[var.num],"_",pdf.suffix,".pdf"),width=40, height=60) + if(as.pdf && fields.name == forecast.name) pdf(file=paste0(work.dir,"/",fields.name,"_",var.name[var.num],"_",pdf.suffix,"_leadmonth_",lead.month,".pdf"),width=40,height=60) + } + + if(composition == 'psl' || composition == 'fre' || composition == 'impact') { + period <- forecast.month - lead.month + if(period < 1) period <- period + 12 + } + + impact.data <- FALSE + for(p in period){ + #p <- period[1] # for the debug + p.orig <- p + + if(fields.name == rean.name) print(paste("p =",p)) + if(fields.name == forecast.name) print(paste("p =",p,"lead.month =", lead.month)) + + # load regime data (for forecasts, it load only the data corresponding to the chosen start month p and lead month 'lead.month') + if(fields.name == rean.name || (fields.name == forecast.name && n.map == 7)) { + load(file=paste0(rean.dir,"/",rean.name,"_",my.period[p],"_","psl",".RData")) + if(var.num != var.num.orig) var.num <- var.num.orig # ripristinate original values of this script (necessary after loading regime data) + if(fields.name != fields.name.orig) fields.name <- fields.name.orig # ripristinate original values of this script + if(work.dir != work.dir.orig) work.dir <- work.dir.orig # ripristinate original values of this script + + # load impact data only if it is available, if not it will leave an empty space in the composition: + if(file.exists(paste0(rean.dir,"/",rean.name,"_",my.period[p],"_",var.name[var.num],".RData"))){ + impact.data <- TRUE + print("Impact data available for reanalysis") + load(file=paste0(rean.dir,"/",rean.name,"_",my.period[p],"_",var.name[var.num],".RData")) + } else { + impact.data <- FALSE + print("Impact data for reanalysis not available") + } + } + + if(fields.name == forecast.name && n.map < 7){ + load(file=paste0(work.dir,"/",forecast.name,"_",my.period[p],"_leadmonth_",lead.month,"_","psl",".RData")) # load cluster time series and mean psl data + + if(file.exists(paste0(work.dir,"/",forecast.name,"_",my.period[p],"_leadmonth_",lead.month,"_",var.name[var.num],".RData"))){ + impact.data <- TRUE + print("Impact data available for forecasts") + if((composition == "simple" || composition == "impact")) load(file=paste0(work.dir,"/",forecast.name,"_",my.period[p],"_leadmonth_",lead.month,"_",var.name[var.num],".RData")) # load mean var data for impact maps + } else { + impact.data <- FALSE + print("Impact data for forecasts not available") + } + + if(var.num != var.num.orig) var.num <- var.num.orig # ripristinate original values of this script (necessary after loading regime data) + if(fields.name != fields.name.orig) fields.name <- fields.name.orig # ripristinate original values of this script + + } + + if(var.num != var.num.orig) var.num <- var.num.orig # ripristinate original values of this script (necessary after loading regime data) + if(fields.name != fields.name.orig) fields.name <- fields.name.orig # ripristinate original values of this script + if(work.dir != work.dir.orig) work.dir <- work.dir.orig # ripristinate original values of this script + if(p != p.orig) p <- p.orig + + if(fields.name == forecast.name && n.map < 7){ + + # compute the simulated interannual monthly variability: + n.days.month <- dim(my.cluster.array[[p]])[1] # number of days in the forecasted month + + freq.sim1 <- apply(my.cluster.array[[p]] == 1, c(2,3), sum) + freq.sim2 <- apply(my.cluster.array[[p]] == 2, c(2,3), sum) + freq.sim3 <- apply(my.cluster.array[[p]] == 3, c(2,3), sum) + freq.sim4 <- apply(my.cluster.array[[p]] == 4, c(2,3), sum) + + sd.freq.sim1 <- sd(freq.sim1) / n.days.month + sd.freq.sim2 <- sd(freq.sim2) / n.days.month + sd.freq.sim3 <- sd(freq.sim3) / n.days.month + sd.freq.sim4 <- sd(freq.sim4) / n.days.month + + # compute the transition probabilities: + j1 <- j2 <- j3 <- j4 <- 1 + transition1 <- transition2 <- transition3 <- transition4 <- c() + + n.members <- dim(my.cluster.array[[p]])[3] + + for(m in 1:n.members){ + for(y in 1:n.years){ + for (d in 1:(n.days.month-1)){ + + if (my.cluster.array[[p]][d,y,m] == 1 && (my.cluster.array[[p]][d + 1,y,m] != 1)){ + transition1[j1] <- my.cluster.array[[p]][d + 1,y,m] + j1 <- j1 + 1 + } + if (my.cluster.array[[p]][d,y,m] == 2 && (my.cluster.array[[p]][d + 1,y,m] != 2)){ + transition2[j2] <- my.cluster.array[[p]][d + 1,y,m] + j2 <- j2 + 1 + } + if (my.cluster.array[[p]][d,y,m] == 3 && (my.cluster.array[[p]][d + 1,y,m] != 3)){ + transition3[j3] <- my.cluster.array[[p]][d + 1,y,m] + j3 <- j3 +1 + } + if (my.cluster.array[[p]][d,y,m] == 4 && (my.cluster.array[[p]][d + 1,y,m] != 4)){ + transition4[j4] <- my.cluster.array[[p]][d + 1,y,m] + j4 <- j4 +1 + } + + } + } + } + + trans.sim <- matrix(NA,4,4 , dimnames= list(c("cluster1.sim", "cluster2.sim", "cluster3.sim", "cluster4.sim"),c("cluster1.sim","cluster2.sim","cluster3.sim","cluster4.sim"))) + trans.sim[1,2] <- length(which(transition1 == 2)) + trans.sim[1,3] <- length(which(transition1 == 3)) + trans.sim[1,4] <- length(which(transition1 == 4)) + + trans.sim[2,1] <- length(which(transition2 == 1)) + trans.sim[2,3] <- length(which(transition2 == 3)) + trans.sim[2,4] <- length(which(transition2 == 4)) + + trans.sim[3,1] <- length(which(transition3 == 1)) + trans.sim[3,2] <- length(which(transition3 == 2)) + trans.sim[3,4] <- length(which(transition3 == 4)) + + trans.sim[4,1] <- length(which(transition4 == 1)) + trans.sim[4,2] <- length(which(transition4 == 2)) + trans.sim[4,3] <- length(which(transition4 == 3)) + + trans.tot <- apply(trans.sim,1,sum,na.rm=T) + for(i in 1:4) trans.sim[i,] <- trans.sim[i,]/trans.tot[i] + + + # compute cluster persistence: + my.cluster.vector <- as.vector(my.cluster.array[[p]]) # reduce it to a vector with the order: day1month1member1 , day2month1member1, ... , day1month2member1, day2month2member1, ,,, + + sequ1 <- sequ2 <- sequ3 <- sequ4 <- rep(0,10000) + i1 <- i2 <- i3 <- i4 <- 1 + + if(my.cluster.vector[1] != 1) i1 <- 0 + if(my.cluster.vector[1] == 1) sequ1[i1] <- sequ1[i1]+1 + if(my.cluster.vector[1] != 2) i2 <- 0 + if(my.cluster.vector[1] == 2) sequ2[i2] <- sequ2[i2]+1 + if(my.cluster.vector[1] != 3) i3 <- 0 + if(my.cluster.vector[1] == 3) sequ3[i3] <- sequ3[i3]+1 + if(my.cluster.vector[1] != 4) i4 <- 0 + if(my.cluster.vector[1] == 4) sequ4[i4] <- sequ4[i4]+1 + n.dd <- length(my.cluster.vector) + + for(d in 1:(n.dd-1)){ + if(my.cluster.vector[d] != 1 && my.cluster.vector[d+1] == 1) i1 <- i1+1 + if(my.cluster.vector[d] == 1) sequ1[i1] <- sequ1[i1]+1 + + if(my.cluster.vector[d] != 2 && my.cluster.vector[d+1] == 2) i2 <- i2+1 + if(my.cluster.vector[d] == 2) sequ2[i2] <- sequ2[i2]+1 + + if(my.cluster.vector[d] != 3 && my.cluster.vector[d+1] == 3) i3 <- i3+1 + if(my.cluster.vector[d] == 3) sequ3[i3] <- sequ3[i3]+1 + + if(my.cluster.vector[d] != 4 && my.cluster.vector[d+1] == 4) i4 <- i4+1 + if(my.cluster.vector[d] == 4) sequ4[i4] <- sequ4[i4]+1 + } + + if(my.cluster.vector[n.dd] == 1) sequ1[i1] <- sequ1[i1]+1 + if(my.cluster.vector[n.dd] == 2) sequ2[i2] <- sequ2[i2]+1 + if(my.cluster.vector[n.dd] == 3) sequ3[i3] <- sequ3[i3]+1 + if(my.cluster.vector[n.dd] == 4) sequ4[i4] <- sequ4[i4]+1 + + pers1 <- mean(sequ1[which(sequ1 != 0)]) + pers2 <- mean(sequ2[which(sequ2 != 0)]) + pers3 <- mean(sequ3[which(sequ3 != 0)]) + pers4 <- mean(sequ4[which(sequ4 != 0)]) + + # compute correlations between simulated clusters and observed regime's anomalies: + cluster1.sim <- pslwr1mean; cluster2.sim <- pslwr2mean; cluster3.sim <- pslwr3mean; cluster4.sim <- pslwr4mean + + if(composition == 'impact' && impact.data == TRUE) { + cluster1.sim.imp <- varwr1mean; cluster2.sim.imp <- varwr2mean; cluster3.sim.imp <- varwr3mean; cluster4.sim.imp <- varwr4mean + #cluster1.sim.imp.pv <- pvalue1; cluster2.sim.imp.pv <- pvalue2; cluster3.sim.imp.pv <- pvalue3; cluster4.sim.imp.pv <- pvalue4 + } + + lat.forecast <- lat; lon.forecast <- lon + + if((p + lead.month) > 12) { load.month <- p + lead.month - 12 } else { load.month <- p + lead.month } # reanalysis forecast month to load + load(file=paste0(rean.dir,"/",rean.name,"_",my.period[load.month],"_","psl",".RData")) # load reanalysis data, which overwrities the pslwrXmean variables + load(paste0(rean.dir,"/",rean.name,"_", my.period[load.month],"_","ClusterNames",".RData")) # load also reanalysis regime names + + # load reanalysis varwrXmean impact data: + if(composition == 'impact' && impact.data == TRUE) load(file=paste0(rean.dir,"/",rean.name,"_",my.period[load.month],"_",var.name[var.num],".RData")) + + if(var.num != var.num.orig) var.num <- var.num.orig # ripristinate original values of this script + if(fields.name != fields.name.orig) fields.name <- fields.name.orig # ripristinate original values of this script + if(!identical(period.orig,period)) period <- period.orig + if(work.dir != work.dir.orig) work.dir <- work.dir.orig # ripristinate original values of this script + if(p.orig != p) p <- p.orig + + # compute the observed transition probabilities: + n.days.month <- n.days.in.a.period(load.month,2001) + j1o <- j2o <- j3o <- j4o <- 1; transition.obs1 <- transition.obs2 <- transition.obs3 <- transition.obs4 <- c() + + for (d in 1:(length(my.cluster$cluster)-1)){ + if (my.cluster$cluster[d] == 1 && (my.cluster$cluster[d + 1] != my.cluster$cluster[d])){ + transition.obs1[j1o] <- my.cluster$cluster[d + 1] + j1o <- j1o + 1 + } + if (my.cluster$cluster[d] == 2 && (my.cluster$cluster[d + 1] != my.cluster$cluster[d])){ + transition.obs2[j2o] <- my.cluster$cluster[d + 1] + j2o <- j2o + 1 + } + if (my.cluster$cluster[d] == 3 && (my.cluster$cluster[d + 1] != my.cluster$cluster[d])){ + transition.obs3[j3o] <- my.cluster$cluster[d + 1] + j3o <- j3o + 1 + } + if (my.cluster$cluster[d] == 4 && (my.cluster$cluster[d + 1] != my.cluster$cluster[d])){ + transition.obs4[j4o] <- my.cluster$cluster[d + 1] + j4o <- j4o +1 + } + + } + + trans.obs <- matrix(NA,4,4 , dimnames= list(c("cluster1.obs", "cluster2.obs", "cluster3.obs", "cluster4.obs"),c("cluster1.obs","cluster2.obs","cluster3.obs","cluster4.obs"))) + trans.obs[1,2] <- length(which(transition.obs1 == 2)) + trans.obs[1,3] <- length(which(transition.obs1 == 3)) + trans.obs[1,4] <- length(which(transition.obs1 == 4)) + + trans.obs[2,1] <- length(which(transition.obs2 == 1)) + trans.obs[2,3] <- length(which(transition.obs2 == 3)) + trans.obs[2,4] <- length(which(transition.obs2 == 4)) + + trans.obs[3,1] <- length(which(transition.obs3 == 1)) + trans.obs[3,2] <- length(which(transition.obs3 == 2)) + trans.obs[3,4] <- length(which(transition.obs3 == 4)) + + trans.obs[4,1] <- length(which(transition.obs4 == 1)) + trans.obs[4,2] <- length(which(transition.obs4 == 2)) + trans.obs[4,3] <- length(which(transition.obs4 == 3)) + + trans.tot.obs <- apply(trans.obs,1,sum,na.rm=T) + for(i in 1:4) trans.obs[i,] <- trans.obs[i,]/trans.tot.obs[i] + + # compute the observed interannual monthly variability: + freq.obs1 <- freq.obs2 <- freq.obs3 <- freq.obs4 <- c() + + for(y in year.start:year.end){ + # for both reanalysis and S4, the time order is always: year1day1, year1day2, ..., year1day31, year2day1, year2day2, ..., year2day28, etc, + freq.obs1[y-year.start+1] <- length(which(my.cluster$cluster[(y-year.start)*n.days.month + (1:n.days.month)] == 1)) + freq.obs2[y-year.start+1] <- length(which(my.cluster$cluster[(y-year.start)*n.days.month + (1:n.days.month)] == 2)) + freq.obs3[y-year.start+1] <- length(which(my.cluster$cluster[(y-year.start)*n.days.month + (1:n.days.month)] == 3)) + freq.obs4[y-year.start+1] <- length(which(my.cluster$cluster[(y-year.start)*n.days.month + (1:n.days.month)] == 4)) + } + + sd.freq.obs1 <- sd(freq.obs1) / n.days.month + sd.freq.obs2 <- sd(freq.obs2) / n.days.month + sd.freq.obs3 <- sd(freq.obs3) / n.days.month + sd.freq.obs4 <- sd(freq.obs4) / n.days.month + + wr1y.obs <- wr1y; wr2y.obs <- wr2y; wr3y.obs <- wr3y; wr4y.obs <- wr4y # observed frecuencies of the regimes + + regimes.obs <- c(cluster1.name, cluster2.name, cluster3.name, cluster4.name) + + if(length(unique(regimes.obs)) < 4) stop("Two or more names of the weather types in the reanalsyis input file are repeated!") + + if(identical(rev(lat),lat.forecast)) {lat.forecast <- rev(lat.forecast); print("Warning: forecast latitudes are in the opposite order than renalysis's")} + if(!identical(lat, lat.forecast)) stop("forecast latitudes are different from reanalysis latitudes!") + if(!identical(lon, lon.forecast)) stop("forecast longitude are different from reanalysis longitudes!") + + cluster.corr <- array(NA, c(4,4), dimnames=list(c("cluster1.sim","cluster2.sim","cluster3.sim","cluster4.sim"), regimes.obs)) + cluster.corr[1,1] <- cor(as.vector(cluster1.sim), as.vector(pslwr1mean)) + cluster.corr[1,2] <- cor(as.vector(cluster1.sim), as.vector(pslwr2mean)) + cluster.corr[1,3] <- cor(as.vector(cluster1.sim), as.vector(pslwr3mean)) + cluster.corr[1,4] <- cor(as.vector(cluster1.sim), as.vector(pslwr4mean)) + cluster.corr[2,1] <- cor(as.vector(cluster2.sim), as.vector(pslwr1mean)) + cluster.corr[2,2] <- cor(as.vector(cluster2.sim), as.vector(pslwr2mean)) + cluster.corr[2,3] <- cor(as.vector(cluster2.sim), as.vector(pslwr3mean)) + cluster.corr[2,4] <- cor(as.vector(cluster2.sim), as.vector(pslwr4mean)) + cluster.corr[3,1] <- cor(as.vector(cluster3.sim), as.vector(pslwr1mean)) + cluster.corr[3,2] <- cor(as.vector(cluster3.sim), as.vector(pslwr2mean)) + cluster.corr[3,3] <- cor(as.vector(cluster3.sim), as.vector(pslwr3mean)) + cluster.corr[3,4] <- cor(as.vector(cluster3.sim), as.vector(pslwr4mean)) + cluster.corr[4,1] <- cor(as.vector(cluster4.sim), as.vector(pslwr1mean)) + cluster.corr[4,2] <- cor(as.vector(cluster4.sim), as.vector(pslwr2mean)) + cluster.corr[4,3] <- cor(as.vector(cluster4.sim), as.vector(pslwr3mean)) + cluster.corr[4,4] <- cor(as.vector(cluster4.sim), as.vector(pslwr4mean)) + + # associate each simulated cluster to one observed regime: + max1 <- which(cluster.corr == max(cluster.corr), arr.ind=T) + cluster.corr2 <- cluster.corr + cluster.corr2[max1[1],] <- -999 # set this row to negative values so it won't be found as a maximum anymore + cluster.corr2[,max1[2]] <- -999 # set this column to negative values so it won't be found as a maximum anymore + max2 <- which(cluster.corr2 == max(cluster.corr2), arr.ind=T) + cluster.corr3 <- cluster.corr2 + cluster.corr3[max2[1],] <- -999 + cluster.corr3[,max2[2]] <- -999 + max3 <- which(cluster.corr3 == max(cluster.corr3), arr.ind=T) + cluster.corr4 <- cluster.corr3 + cluster.corr4[max3[1],] <- -999 + cluster.corr4[,max3[2]] <- -999 + max4 <- which(cluster.corr4 == max(cluster.corr4), arr.ind=T) + + seq.max1 <- c(max1[1],max2[1],max3[1],max4[1]) + seq.max2 <- c(max1[2],max2[2],max3[2],max4[2]) + + cluster1.regime <- seq.max2[which(seq.max1 == 1)] + cluster2.regime <- seq.max2[which(seq.max1 == 2)] + cluster3.regime <- seq.max2[which(seq.max1 == 3)] + cluster4.regime <- seq.max2[which(seq.max1 == 4)] + + cluster1.name <- regimes.obs[cluster1.regime] + cluster2.name <- regimes.obs[cluster2.regime] + cluster3.name <- regimes.obs[cluster3.regime] + cluster4.name <- regimes.obs[cluster4.regime] + + regimes.sim <- c(cluster1.name, cluster2.name, cluster3.name, cluster4.name) + cluster.corr2 <- array(cluster.corr, c(4,4), dimnames=list(regimes.sim, regimes.obs)) + + spat.cor1 <- cluster.corr[1,seq.max2[which(seq.max1 == 1)]] + spat.cor2 <- cluster.corr[2,seq.max2[which(seq.max1 == 2)]] + spat.cor3 <- cluster.corr[3,seq.max2[which(seq.max1 == 3)]] + spat.cor4 <- cluster.corr[4,seq.max2[which(seq.max1 == 4)]] + + # measure persistence for the reanalysis dataset: + my.cluster.vector <- my.cluster[[1]] + + sequ1 <- sequ2 <- sequ3 <- sequ4 <- rep(0,10000) + i1 <- i2 <- i3 <- i4 <- 1 + + if(my.cluster.vector[1] != 1) i1 <- 0 + if(my.cluster.vector[1] == 1) sequ1[i1] <- sequ1[i1]+1 + if(my.cluster.vector[1] != 2) i2 <- 0 + if(my.cluster.vector[1] == 2) sequ2[i2] <- sequ2[i2]+1 + if(my.cluster.vector[1] != 3) i3 <- 0 + if(my.cluster.vector[1] == 3) sequ3[i3] <- sequ3[i3]+1 + if(my.cluster.vector[1] != 4) i4 <- 0 + if(my.cluster.vector[1] == 4) sequ4[i4] <- sequ4[i4]+1 + + n.dd <- length(my.cluster.vector) + for(d in 1:(n.dd-1)){ + if(my.cluster.vector[d] != 1 && my.cluster.vector[d+1] == 1) i1 <- i1+1 + if(my.cluster.vector[d] == 1) sequ1[i1] <- sequ1[i1]+1 + + if(my.cluster.vector[d] != 2 && my.cluster.vector[d+1] == 2) i2 <- i2+1 + if(my.cluster.vector[d] == 2) sequ2[i2] <- sequ2[i2]+1 + + if(my.cluster.vector[d] != 3 && my.cluster.vector[d+1] == 3) i3 <- i3+1 + if(my.cluster.vector[d] == 3) sequ3[i3] <- sequ3[i3]+1 + + if(my.cluster.vector[d] != 4 && my.cluster.vector[d+1] == 4) i4 <- i4+1 + if(my.cluster.vector[d] == 4) sequ4[i4] <- sequ4[i4]+1 + } + + if(my.cluster.vector[n.dd] == 1) sequ1[i1] <- sequ1[i1]+1 + if(my.cluster.vector[n.dd] == 2) sequ2[i2] <- sequ2[i2]+1 + if(my.cluster.vector[n.dd] == 3) sequ3[i3] <- sequ3[i3]+1 + if(my.cluster.vector[n.dd] == 4) sequ4[i4] <- sequ4[i4]+1 + + persObs1 <- mean(sequ1[which(sequ1 != 0)]) + persObs2 <- mean(sequ2[which(sequ2 != 0)]) + persObs3 <- mean(sequ3[which(sequ3 != 0)]) + persObs4 <- mean(sequ4[which(sequ4 != 0)]) + + ### insert here all the manual corrections to the regime assignation due to misasignment in the automatic selection: + ### forecast month: September + #if(p == 9 && lead.month == 0) { cluster1.name <- "Blocking"; cluster2.name <- "Atl.Ridge"; cluster3.name <- "NAO-"; cluster4.name <- "NAO+"} + #if(p == 8 && lead.month == 1) { cluster1.name <- "NAO+"; cluster2.name <- "NAO-"; cluster3.name <- "Blocking"; cluster4.name <- "Atl.Ridge"} + #if(p == 6 && lead.month == 3) { cluster1.name <- "Atl.Ridge"; cluster2.name <- "NAO+"} + #if(p == 4 && lead.month == 5) { cluster1.name <- "Blocking"; cluster2.name <- "NAO+"; cluster3.name <- "Atl.Ridge"; cluster4.name <- "NAO-"} + + if(p == 9 && lead.month == 0) { cluster1.name <- "Blocking"; cluster2.name <- "Atl.Ridge"; cluster3.name <- "NAO-"; cluster4.name <- "NAO+" } + if(p == 8 && lead.month == 1) { cluster1.name <- "NAO+"; cluster2.name <- "NAO-"; cluster3.name <- "Blocking"; cluster4.name <- "Atl.Ridge" } + if(p == 7 && lead.month == 2) { cluster1.name <- "Atl.Ridge"; cluster2.name <- "Blocking"; cluster3.name <- "NAO-"; cluster4.name <- "NAO+" } + if(p == 6 && lead.month == 3) { cluster1.name <- "Atl.Ridge"; cluster2.name <- "NAO-"; cluster3.name <- "NAO+"; cluster4.name <- "Blocking" } + if(p == 5 && lead.month == 4) { cluster1.name <- "Atl.Ridge"; cluster2.name <- "NAO+"; cluster3.name <- "NAO-"; cluster4.name <- "Blocking" } + if(p == 4 && lead.month == 5) { cluster1.name <- "Blocking"; cluster2.name <- "NAO+"; cluster3.name <- "Atl.Ridge"; cluster4.name <- "NAO-" } + if(p == 3 && lead.month == 6) { cluster2.name <- "NAO-"; cluster3.name <- "Atl.Ridge"} # cluster1.name <- "Blocking"; cluster4.name <- "NAO+" + + # regimes.sim # for the debug + + ### forecast month: October + #if(p == 4 && lead.month == 6) { cluster1.name <- "Atl.Ridge"; cluster2.name <- "Blocking"; cluster3.name <- "NAO+"; cluster4.name <- "NAO-"} + #if(p == 5 && lead.month == 5) { cluster1.name <- "NAO+"; cluster2.name <- "Blocking"; cluster3.name <- "NAO-"; cluster4.name <- "Atl.Ridge"} + #if(p == 7 && lead.month == 3) { cluster1.name <- "NAO-"; cluster2.name <- "Atl.Ridge"; cluster3.name <- "Blocking"; cluster4.name <- "NAO+"} + #if(p == 8 && lead.month == 2) { cluster1.name <- "Blocking"; cluster2.name <- "NAO-"; cluster3.name <- "Atl.Ridge"; cluster4.name <- "NAO+"} + #if(p == 9 && lead.month == 1) { cluster1.name <- "NAO-"; cluster2.name <- "Blocking"; cluster3.name <- "Atl.Ridge"; cluster4.name <- "NAO+"} + + if(p == 10 && lead.month == 0) { cluster1.name <- "Blocking"; cluster2.name <- "NAO+"; cluster3.name <- "Atl.Ridge"; cluster4.name <- "NAO-"} + if(p == 9 && lead.month == 1) { cluster1.name <- "NAO-"; cluster2.name <- "Blocking"; cluster3.name <- "Atl.Ridge"; cluster4.name <- "NAO+"} + if(p == 8 && lead.month == 2) { cluster1.name <- "Blocking"; cluster2.name <- "NAO-"; cluster3.name <- "Atl.Ridge"; cluster4.name <- "NAO+"} + if(p == 7 && lead.month == 3) { cluster1.name <- "NAO-"; cluster2.name <- "Atl.Ridge"; cluster3.name <- "Blocking"; cluster4.name <- "NAO+"} + if(p == 6 && lead.month == 4) { cluster1.name <- "NAO+"; cluster2.name <- "Blocking"; cluster3.name <- "NAO-"; cluster4.name <- "Atl.Ridge"} + + ### forecast month: November + #if(p == 6 && lead.month == 5) { cluster2.name <- "Atl.Ridge"; cluster3.name <- "NAO+"; cluster4.name <- "Blocking"} + #if(p == 7 && lead.month == 4) { cluster1.name <- "NAO+"; cluster2.name <- "Blocking"; cluster4.name <- "Atl.Ridge"} + #if(p == 8 && lead.month == 3) { cluster2.name <- "Blocking"; cluster4.name <- "Atl.Ridge"} + #if(p == 9 && lead.month == 2) { cluster2.name <- "Atl.Ridge"; cluster3.name <- "NAO+"; cluster4.name <- "Blocking"} + #if(p == 10 && lead.month == 1) { cluster2.name <- "Blocking"; cluster4.name <- "Atl.Ridge"} + + regimes.sim2 <- c(cluster1.name, cluster2.name, cluster3.name, cluster4.name) # Simulated regimes after the manual correction + #regimes.obs <- c(cluster1.name, cluster2.name, cluster3.name, cluster4.name) # already defined above, included only as reference + + order.sim <- match(regimes.sim2, orden) + order.obs <- match(regimes.obs, orden) + + clusters.sim <- list(cluster1.sim, cluster2.sim, cluster3.sim, cluster4.sim) + clusters.obs <- list(pslwr1mean, pslwr2mean, pslwr3mean, pslwr4mean) + + # recompute the spatial correlations, because they might have changed because of the manual corrections above: + spat.cor1 <- cor(as.vector(clusters.sim[[which(order.sim == 1)]]), as.vector(clusters.obs[[which(order.obs == 1)]])) + spat.cor2 <- cor(as.vector(clusters.sim[[which(order.sim == 2)]]), as.vector(clusters.obs[[which(order.obs == 2)]])) + spat.cor3 <- cor(as.vector(clusters.sim[[which(order.sim == 3)]]), as.vector(clusters.obs[[which(order.obs == 3)]])) + spat.cor4 <- cor(as.vector(clusters.sim[[which(order.sim == 4)]]), as.vector(clusters.obs[[which(order.obs == 4)]])) + + if(composition == 'impact' && impact.data == TRUE) { + clusters.sim.imp <- list(cluster1.sim.imp, cluster2.sim.imp, cluster3.sim.imp, cluster4.sim.imp) + #clusters.sim.imp.pv <- list(cluster1.sim.imp.pv, cluster2.sim.imp.pv, cluster3.sim.imp.pv, cluster4.sim.imp.pv) + + clusters.obs.imp <- list(varwr1mean, varwr2mean, varwr3mean, varwr4mean) + #clusters.obs.imp.pv <- list(pvalue1, pvalue2, pvalue3, pvalue4) + + cor.imp1 <- cor(as.vector(clusters.sim.imp[[which(order.sim == 1)]]), as.vector(clusters.obs.imp[[which(order.obs == 1)]])) + cor.imp2 <- cor(as.vector(clusters.sim.imp[[which(order.sim == 2)]]), as.vector(clusters.obs.imp[[which(order.obs == 2)]])) + cor.imp3 <- cor(as.vector(clusters.sim.imp[[which(order.sim == 3)]]), as.vector(clusters.obs.imp[[which(order.obs == 3)]])) + cor.imp4 <- cor(as.vector(clusters.sim.imp[[which(order.sim == 4)]]), as.vector(clusters.obs.imp[[which(order.obs == 4)]])) + + } + + load(file=paste0(work.dir,"/",fields.name,"_",my.period[p],"_leadmonth_",lead.month,"_","psl",".RData")) # reload forecast cluster time series and mean psl data + load(file=paste0(work.dir,"/",fields.name,"_",my.period[p],"_leadmonth_",lead.month,"_",var.name[var.num],".RData")) # reload mean var data for impact maps + + if(var.num != var.num.orig) var.num <- var.num.orig # ripristinate original values of this script + if(fields.name != fields.name.orig) fields.name <- fields.name.orig # ripristinate original values of this script + if(!identical(period.orig, period)) period <- period.orig + if(p.orig != p) p <- p.orig + if(identical(rev(lat),lat.forecast)) lat <- rev(lat) # ripristinate right lat order + if(work.dir != work.dir.orig) work.dir <- work.dir.orig # ripristinate original values of this script + + } # close for on 'forecast.name' + + if(fields.name == rean.name || (fields.name == forecast.name && n.map == 7)){ + if(save.names){ + save(cluster1.name, cluster2.name, cluster3.name, cluster4.name, file=paste0(rean.dir,"/",fields.name,"_", my.period[p],"_","ClusterNames",".RData")) + + } else { # in this case, we load the cluster names from the file already saved, if regimes come from a reanalysis: + load(paste0(rean.dir,"/",rean.name,"_", my.period[p],"_","ClusterNames",".RData")) + + if(var.num != var.num.orig) var.num <- var.num.orig # ripristinate original values of this script + if(fields.name != fields.name.orig) fields.name <- fields.name.orig # ripristinate original values of this script + } + } + + # assign to each cluster its regime: + #if(ordering == FALSE && (fields.name == rean.name || (fields.name == forecast.name && n.map == 7))){ + if(ordering == FALSE){ + cluster1 <- 1; cluster2 <- 2; cluster3 <- 3; cluster4 <- 4 + } else { + cluster1 <- which(orden == cluster1.name) + cluster2 <- which(orden == cluster2.name) + cluster3 <- which(orden == cluster3.name) + cluster4 <- which(orden == cluster4.name) + } + + assign(paste0("map",cluster1), pslwr1mean) + assign(paste0("map",cluster2), pslwr2mean) + assign(paste0("map",cluster3), pslwr3mean) + assign(paste0("map",cluster4), pslwr4mean) + + assign(paste0("fre",cluster1), wr1y) + assign(paste0("fre",cluster2), wr2y) + assign(paste0("fre",cluster3), wr3y) + assign(paste0("fre",cluster4), wr4y) + + #assign(paste0("withinss",cluster1), my.cluster[[p]]$withinss[1]/my.cluster[[p]]$size[1]) + #assign(paste0("withinss",cluster2), my.cluster[[p]]$withinss[2]/my.cluster[[p]]$size[2]) + #assign(paste0("withinss",cluster3), my.cluster[[p]]$withinss[3]/my.cluster[[p]]$size[3]) + #assign(paste0("withinss",cluster4), my.cluster[[p]]$withinss[4]/my.cluster[[p]]$size[4]) + + if(impact.data == TRUE && (composition == "simple" || composition == "single.impact" || composition == 'impact')){ + assign(paste0("imp",cluster1), varwr1mean) + assign(paste0("imp",cluster2), varwr2mean) + assign(paste0("imp",cluster3), varwr3mean) + assign(paste0("imp",cluster4), varwr4mean) + + assign(paste0("sig",cluster1), pvalue1) + assign(paste0("sig",cluster2), pvalue2) + assign(paste0("sig",cluster3), pvalue3) + assign(paste0("sig",cluster4), pvalue4) + } + + if(fields.name == forecast.name && n.map < 7){ + assign(paste0("freMax",cluster1), wr1yMax) + assign(paste0("freMax",cluster2), wr2yMax) + assign(paste0("freMax",cluster3), wr3yMax) + assign(paste0("freMax",cluster4), wr4yMax) + + assign(paste0("freMin",cluster1), wr1yMin) + assign(paste0("freMin",cluster2), wr2yMin) + assign(paste0("freMin",cluster3), wr3yMin) + assign(paste0("freMin",cluster4), wr4yMin) + + #assign(paste0("spatial.cor",cluster1), spat.cor1) + #assign(paste0("spatial.cor",cluster2), spat.cor2) + #assign(paste0("spatial.cor",cluster3), spat.cor3) + #assign(paste0("spatial.cor",cluster4), spat.cor4) + + assign(paste0("persist",cluster1), pers1) + assign(paste0("persist",cluster2), pers2) + assign(paste0("persist",cluster3), pers3) + assign(paste0("persist",cluster4), pers4) + + clusters.all <- c(cluster1, cluster2, cluster3, cluster4) + ordered.sequence <- c(which(clusters.all == 1), which(clusters.all == 2), which(clusters.all == 3), which(clusters.all == 4)) + + assign(paste0("transSim",cluster1), unname(trans.sim[1,ordered.sequence])) + assign(paste0("transSim",cluster2), unname(trans.sim[2,ordered.sequence])) + assign(paste0("transSim",cluster3), unname(trans.sim[3,ordered.sequence])) + assign(paste0("transSim",cluster4), unname(trans.sim[4,ordered.sequence])) + + cluster1.obs <- which(orden == regimes.obs[1]) + cluster2.obs <- which(orden == regimes.obs[2]) + cluster3.obs <- which(orden == regimes.obs[3]) + cluster4.obs <- which(orden == regimes.obs[4]) + + clusters.all.obs <- c(cluster1.obs, cluster2.obs, cluster3.obs, cluster4.obs) + ordered.sequence.obs <- c(which(clusters.all.obs == 1), which(clusters.all.obs == 2), which(clusters.all.obs == 3), which(clusters.all.obs == 4)) + + assign(paste0("freObs",cluster1.obs), wr1y.obs) + assign(paste0("freObs",cluster2.obs), wr2y.obs) + assign(paste0("freObs",cluster3.obs), wr3y.obs) + assign(paste0("freObs",cluster4.obs), wr4y.obs) + + assign(paste0("persistObs",cluster1.obs), persObs1) + assign(paste0("persistObs",cluster2.obs), persObs2) + assign(paste0("persistObs",cluster3.obs), persObs3) + assign(paste0("persistObs",cluster4.obs), persObs4) + + assign(paste0("transObs",cluster1.obs), unname(trans.obs[1,ordered.sequence.obs])) + assign(paste0("transObs",cluster2.obs), unname(trans.obs[2,ordered.sequence.obs])) + assign(paste0("transObs",cluster3.obs), unname(trans.obs[3,ordered.sequence.obs])) + assign(paste0("transObs",cluster4.obs), unname(trans.obs[4,ordered.sequence.obs])) + + } + + # position of long values of Europe only (without the Atlantic Sea and America): + #if(fields.name == "ERA-Interim") EU <- c(1:which(lon >= lon.max)[1], (length(lon)-30):length(lon)) # restrict area to continental europe only + EU <- c(1:which(lon >= lon.max)[1], (which(lon >= 337)[1]:length(lon))) # restrict area to continental europe only + + # breaks and colors of the geopotential fields: + if(psl == "g500"){ + #my.brks <- c(-300,-199:200,300) # % Mean anomaly of a WR + my.brks <- c(-300,seq(-200,200,10),300) # % Mean anomaly of a WR + my.brks2 <- c(-300,seq(-200,200,10),300) # % Mean anomaly of a WR for the contour lines + #my.cols <- colorRampPalette((brewer.pal(9,"PuBu")))(length(my.brks)-1) + values.to.plot <- c(-200, -100, 0, 100, 200) # values we want to show in the labels + } + + if(psl == "psl"){ + my.brks <- c(seq(-19,-1,2),0,seq(1,19,2)) # % Mean anomaly of a WR + # my.brks <- c(seq(-19,-1,2),0,seq(1,19,2)) # % Mean anomaly of a WR + + my.brks2 <- my.brks #c(seq(-20,20,2)) # % Mean anomaly of a WR for the contour lines + values.to.plot <- my.brks #c(-17,-15,-13,-11,-9,-7,-5,-3,-1,0,1,3,5,7,9,11,13,15,17) + } + + my.cols <- colorRampPalette(rev(brewer.pal(11,"RdBu")))(length(my.brks)-1) # blue--white--red colors + my.cols[floor(length(my.cols)/2)] <- my.cols[floor(length(my.cols)/2)+1] <- "white" + + # breaks and colors of the impact maps (valid both for Wind speed anomalies and Temperature anomalies): + #my.brks.var <- c(-20,seq(-10,-3,1),seq(-2.9,2.9,0.1),seq(3,10,1),20) # % Mean anomaly of a WR + #my.brks.var <- c(-20,-2,-1.5,-1,-0.5,-0.2,0.2,0.5,1,1.5,2,20) # % Mean anomaly of a WR + #my.brks.var <- c(-20,seq(-3,3,0.5),20) # % Mean anomaly of a WR + if(var.name[var.num] == "sfcWind") { + my.brks.var <- seq(-3,3,0.5) + values.to.plot2 <- c(-3,-2,-1,0,1,2,3) + } + if(var.name[var.num] == "tas") { + my.brks.var <- seq(-5,5,1) + values.to.plot2 <- my.brks.var #c(-5,-3,-1,0,1,3,5) + } + + #my.cols <- colorRampPalette(as.character(read.csv("/home/Earth/vtorralb/rgbhex.csv",header=F)[,1]))(length(my.brks)-1) # blue--yellow-red colors + my.cols.var <- colorRampPalette(rev(brewer.pal(11,"RdBu")))(length(my.brks.var)-1) # blue--white--red colors + #my.cols.var[floor(length(my.cols.var)/2)] <- my.cols.var[floor(length(my.cols.var)/2)+1] <- "white" + + freq.max=100 + if(fields.name == forecast.name){ + pos.years.present <- match(year.start:year.end, years) + years.missing <- which(is.na(pos.years.present)) + fre1.NA <- fre2.NA <- fre3.NA <- fre4.NA <- freMax1.NA <- freMax2.NA <- freMax3.NA <- freMax4.NA <- freMin1.NA <- freMin2.NA <- freMin3.NA <- freMin4.NA <- c() + k <- 0 + for(y in year.start:year.end) { + if(y %in% (years.missing + year.start - 1)) { + fre1.NA <- c(fre1.NA,NA); fre2.NA <- c(fre2.NA,NA); fre3.NA <- c(fre3.NA,NA); fre4.NA <- c(fre4.NA,NA) + freMax1.NA <- c(freMax1.NA,NA); freMax2.NA <- c(freMax2.NA,NA); freMax3.NA <- c(freMax3.NA,NA); freMax4.NA <- c(freMax4.NA,NA) + freMin1.NA <- c(freMin1.NA,NA); freMin2.NA <- c(freMin2.NA,NA); freMin3.NA <- c(freMin3.NA,NA); freMin4.NA <- c(freMin4.NA,NA) + k=k+1 + } else { + fre1.NA <- c(fre1.NA,fre1[y - year.start + 1 - k]); fre2.NA <- c(fre2.NA,fre2[y - year.start + 1 - k]) + fre3.NA <- c(fre3.NA,fre3[y - year.start + 1 - k]); fre4.NA <- c(fre4.NA,fre4[y - year.start + 1 - k]) + freMax1.NA <- c(freMax1.NA,freMax1[y - year.start + 1 - k]); freMax2.NA <- c(freMax2.NA,freMax2[y - year.start + 1 - k]) + freMax3.NA <- c(freMax3.NA,freMax3[y - year.start + 1 - k]); freMax4.NA <- c(freMax4.NA,freMax4[y - year.start + 1 - k]) + freMin1.NA <- c(freMin1.NA,freMin1[y - year.start + 1 - k]); freMin2.NA <- c(freMin2.NA,freMin2[y - year.start + 1 - k]) + freMin3.NA <- c(freMin3.NA,freMin3[y - year.start + 1 - k]); freMin4.NA <- c(freMin4.NA,freMin4[y - year.start + 1 - k]) + } + } + + sp.cor1 <- round(cor(fre1.NA,freObs1, use="complete.obs"),2) + sp.cor2 <- round(cor(fre2.NA,freObs2, use="complete.obs"),2) + sp.cor3 <- round(cor(fre3.NA,freObs3, use="complete.obs"),2) + sp.cor4 <- round(cor(fre4.NA,freObs4, use="complete.obs"),2) + + } else { + fre1.NA <- fre1; fre2.NA <- fre2; fre3.NA <- fre3; fre4.NA <- fre4 + } + + + # Visualize the composition with the 3 graphs for all the 4 regimes: its pressure fields, its impact on var and its frequency series: + if(composition == "simple"){ + if(!as.pdf && fields.name == rean.name) png(filename=paste0(rean.dir,"/",fields.name,"_",my.period[p],"_",var.name[var.num],".png"),width=3000,height=3700) + if(!as.pdf && fields.name == forecast.name) png(filename=paste0(work.dir,"/",fields.name,"_",my.period[p],"_leadmonth_",lead.month,"_",var.name[var.num],".png"),width=3000,height=3700) + + plot.new() + + # Sheet title: + par(fig=c(0, 1, 0.94, 0.98), new=TRUE) + if(fields.name == forecast.name) {forecast.title <- paste0(" startdate and ",lead.month," forecast month ")} else {forecast.title <- ""} + mtext(paste0(fields.name, " Weather Regimes for ", my.period[p], forecast.title," (",year.start,"-",year.end,")"), cex=5.5, font=2) + + # Centroid maps: + map.xpos <- 0 + map.width <- 0.46 + par(fig=c(map.xpos + 0.003, map.xpos + map.width - 0.003, 0.745, 0.925), new=TRUE) + PlotEquiMap2(rescale(map1,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map1), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, xlabel.dist=1.5, contours.lty="F1FF1F") + par(fig=c(map.xpos, map.xpos + map.width, 0.51, 0.69), new=TRUE) + PlotEquiMap2(rescale(map2,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map2), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F") + par(fig=c(map.xpos, map.xpos + map.width, 0.275, 0.455), new=TRUE) + PlotEquiMap2(rescale(map3,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map3), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F") + par(fig=c(map.xpos, map.xpos + map.width, 0.04, 0.22), new=TRUE) + PlotEquiMap2(rescale(map4,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map4), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F") + + ## # for the debug: + ## obs <- read.table("/scratch/Earth/ncortesi/RESILIENCE/Regimes/NAO_series.txt") + ## mes <- p + ## fre.obs <- obs$V3[seq(mes,12*35,12)] # get the ovserved freuency of the NAO + ## #plot(1981:2015,fre.obs,type="l") + ## #lines(1981:2015,fre1,type="l",col="red") + ## #lines(1981:2015,fre2,type="l",col="blue") + ## #lines(1981:2015,fre4,type="l",col="green") + ## cor1 <- cor(fre.obs, fre1) + ## cor2 <- cor(fre.obs, fre2) + ## cor3 <- cor(fre.obs, fre3) + ## cor4 <- cor(fre.obs, fre4) + ## round(c(cor1,cor2,cor3,cor4),2) + + # Legend centroid maps: + legend1.xpos <- 0.10 + legend1.width <- 0.30 + legend1.cex <- 2 + my.subset <- match(values.to.plot, my.brks) + par(fig=c(legend1.xpos, legend1.xpos + legend1.width, 0.719, 0.744), new=TRUE) # syntax fig=c(xmin, xmax, ymin, ymax) + ColorBar3(my.brks, cols=my.cols, vert=FALSE, cex=legend1.cex, subset=my.subset) + if(psl=="g500") {mtext(side=4," m", cex=2.5)} else {mtext(side=4," hPa", cex=legend1.cex)} + par(fig=c(legend1.xpos, legend1.xpos + legend1.width, 0.485, 0.508), new=TRUE) + ColorBar3(my.brks, cols=my.cols, vert=FALSE, cex=legend1.cex, subset=my.subset) + if(psl=="g500") {mtext(side=4," m", cex=2.5)} else {mtext(side=4," hPa", cex=legend1.cex)} + par(fig=c(legend1.xpos, legend1.xpos + legend1.width, 0.250, 0.273), new=TRUE) + ColorBar3(my.brks, cols=my.cols, vert=FALSE, cex=legend1.cex, subset=my.subset) + if(psl=="g500") {mtext(side=4," m", cex=2.5)} else {mtext(side=4," hPa", cex=legend1.cex)} + par(fig=c(legend1.xpos, legend1.xpos + legend1.width, 0.015, 0.038), new=TRUE) + ColorBar3(my.brks, cols=my.cols, vert=FALSE, cex=legend1.cex, subset=my.subset) + if(psl=="g500") {mtext(side=4," m", cex=2.5)} else {mtext(side=4," hPa", cex=legend1.cex)} + + # Subtitle centroid maps: + map.title.xpos <- 0.76 + map.title.width <- 0.2 + par(fig=c(map.title.xpos, map.title.xpos + map.title.width, 0.728, 0.729), new=TRUE) + mtext("year", cex=3) + par(fig=c(map.title.xpos, map.title.xpos + map.title.width, 0.494, 0.495), new=TRUE) + mtext("year", cex=3) + par(fig=c(map.title.xpos, map.title.xpos + map.title.width, 0.260, 0.261), new=TRUE) + mtext("year", cex=3) + par(fig=c(map.title.xpos, map.title.xpos + map.title.width, 0.026, 0.027), new=TRUE) + mtext("year", cex=3) + + # Title Centroid Maps: + title1.xpos <- 0.02 + title1.width <- 0.4 + par(fig=c(title1.xpos, title1.xpos + title1.width, 0.9305, 0.9355), new=TRUE) + mtext(paste0(regime1.name,": ", psl.name, " Anomaly"), font=2, cex=4) + par(fig=c(title1.xpos, title1.xpos + title1.width, 0.6955, 0.7005), new=TRUE) + mtext(paste0(regime2.name,": ", psl.name, " Anomaly"), font=2, cex=4) + par(fig=c(title1.xpos, title1.xpos + title1.width, 0.4605, 0.4655), new=TRUE) + mtext(paste0(regime3.name,": ", psl.name, " Anomaly"), font=2, cex=4) + par(fig=c(title1.xpos, title1.xpos + title1.width, 0.2255, 0.2305), new=TRUE) + mtext(paste0(regime4.name,": ", psl.name, " Anomaly"), font=2, cex=4) + + # Impact maps: + if(impact.data == TRUE ){ # it won't enter here never because we are already inside an if con composition="simple" + impact.xpos <- 0.47 + impact.width <- 0.26 + par(fig=c(impact.xpos, impact.xpos + impact.width, 0.745, 0.925), new=TRUE) + PlotEquiMap2(rescale(imp1[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=t(sig1[,EU] < 0.05), cex.lab=1.5) + par(fig=c(impact.xpos, impact.xpos + impact.width, 0.51, 0.69), new=TRUE) + PlotEquiMap2(rescale(imp2[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=t(sig2[,EU] < 0.05), cex.lab=1.5) + par(fig=c(impact.xpos, impact.xpos + impact.width, 0.275, 0.455), new=TRUE) + PlotEquiMap2(rescale(imp3[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=t(sig3[,EU] < 0.05), cex.lab=1.5) + par(fig=c(impact.xpos, impact.xpos + impact.width, 0.04, 0.22), new=TRUE) + PlotEquiMap2(rescale(imp4[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=t(sig4[,EU] < 0.05), cex.lab=1.5) + + # Title impact maps: + title2.xpos <- 0.42 + title2.width <- 0.35 + par(fig=c(title2.xpos, title2.xpos + title2.width, 0.935, 0.940), new=TRUE) + mtext(paste0(regime1.name, ": Impact on ", var.name.full[var.num]), font=2, cex=4) + par(fig=c(title2.xpos, title2.xpos + title2.width, 0.700, 0.705), new=TRUE) + mtext(paste0(regime2.name, ": Impact on ", var.name.full[var.num]), font=2, cex=4) + par(fig=c(title2.xpos, title2.xpos + title2.width, 0.465, 0.470), new=TRUE) + mtext(paste0(regime3.name, ": Impact on ", var.name.full[var.num]), font=2, cex=4) + par(fig=c(title2.xpos, title2.xpos + title2.width, 0.230, 0.235), new=TRUE) + mtext(paste0(regime4.name, ": Impact on ", var.name.full[var.num]), font=2, cex=4) + + # Legend: + legend2.xpos <- 0.48 + legend2.width <- 0.25 + legend2.cex <- 1.8 + + my.subset2 <- match(values.to.plot2, my.brks.var) + par(fig=c(legend2.xpos, legend2.xpos + legend2.width, 0.719 + 0.001, 0.744), new=TRUE) + ColorBar3(my.brks.var, cols=my.cols.var, vert=FALSE, cex=legend2.cex, subset=my.subset2) + mtext(side=4,paste0(" ",var.unit[var.num]), cex=legend2.cex) + par(fig=c(legend2.xpos, legend2.xpos + legend2.width, 0.485, 0.508), new=TRUE) + ColorBar3(my.brks.var, cols=my.cols.var, vert=FALSE, cex=legend2.cex, subset=my.subset2) + mtext(side=4,paste0(" ",var.unit[var.num]), cex=legend2.cex) + par(fig=c(legend2.xpos, legend2.xpos + legend2.width, 0.250, 0.273), new=TRUE) + ColorBar3(my.brks.var, cols=my.cols.var, vert=FALSE, cex=legend2.cex, subset=my.subset2) + mtext(side=4,paste0(" ",var.unit[var.num]), cex=legend2.cex) + par(fig=c(legend2.xpos, legend2.xpos + legend2.width, 0.015, 0.038), new=TRUE) + ColorBar3(my.brks.var, cols=my.cols.var, vert=FALSE, cex=legend2.cex, subset=my.subset2) + mtext(side=4,paste0(" ",var.unit[var.num]), cex=legend2.cex) + + } + + # Frequency plots: + barplot.xpos <- 0.75 + barplot.width <- 0.25 + + par(fig=c(barplot.xpos, barplot.xpos + barplot.width, 0.745, 0.915), new=TRUE) + if(fields.name == rean.name) barplot.freq(100*fre1.NA, year.start, year.end, cex.y=2, cex.x=2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0)) + if(fields.name == forecast.name) barplot.freq.sim2(100*fre1.NA, 100*freMax1.NA, 100*freMin1.NA, 100*freObs1, year.start, year.end, cex.y=2, cex.x=2, freq.min=-2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0), col.line="gray20", cex.r=3, cex.mean=2, cex.obs=2) + + par(fig=c(barplot.xpos, barplot.xpos + barplot.width,0.510,0.680), new=TRUE) + if(fields.name == rean.name) barplot.freq(100*fre2.NA, year.start, year.end, cex.y=2, cex.x=2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0)) + if(fields.name == forecast.name) barplot.freq.sim2(100*fre2.NA, 100*freMax2.NA, 100*freMin2.NA, 100*freObs2, year.start, year.end, cex.y=2, cex.x=2, freq.min=-2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0), col.line="gray20", cex.r=3, cex.mean=2, cex.obs=2) + + par(fig=c(barplot.xpos, barplot.xpos + barplot.width,0.275,0.445), new=TRUE) + if(fields.name == rean.name) barplot.freq(100*fre3.NA, year.start, year.end, cex.y=2, cex.x=2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0)) + if(fields.name == forecast.name) barplot.freq.sim2(100*fre3.NA, 100*freMax3.NA, 100*freMin3.NA, 100*freObs3, year.start, year.end, cex.y=2, cex.x=2, freq.min=-2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0), col.line="gray20", cex.r=3, cex.mean=2, cex.obs=2) + + par(fig=c(barplot.xpos, barplot.xpos + barplot.width,0.040,0.210), new=TRUE) + if(fields.name == rean.name) barplot.freq(100*fre4.NA, year.start, year.end, cex.y=2, cex.x=2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0)) + if(fields.name == forecast.name) barplot.freq.sim2(100*fre4.NA, 100*freMax4.NA, 100*freMin4.NA, 100*freObs4, year.start, year.end, cex.y=2, cex.x=2, freq.min=-2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0), col.line="gray20", cex.r=3, cex.mean=2, cex.obs=2) + + # Title Frequency maps: + title3.xpos <- 0.75 + title3.width <-0.25 + par(fig=c(title3.xpos, title3.xpos + title3.width, 0.935, 0.940), new=TRUE) + mtext(paste0(regime1.name, " Frequency"), font=2, cex=4) # paste0 doesn't work inside an expression! + par(fig=c(title3.xpos, title3.xpos + title3.width, 0.700, 0.705), new=TRUE) + mtext(paste0(regime2.name, " Frequency"), font=2, cex=4) + par(fig=c(title3.xpos, title3.xpos + title3.width, 0.465, 0.470), new=TRUE) + mtext(paste0(regime3.name, " Frequency"), font=2, cex=4) + par(fig=c(title3.xpos, title3.xpos + title3.width, 0.230, 0.235), new=TRUE) + mtext(paste0(regime4.name, " Frequency"), font=2, cex=4) + + # % on Frequency plots: + symbol.xpos <- 0.735 + symbol.width <- 0.01 + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.82, 0.83), new=TRUE) + mtext("%", cex=3.3) + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.59, 0.60), new=TRUE) + mtext("%", cex=3.3) + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.35, 0.36), new=TRUE) + mtext("%", cex=3.3) + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.12, 0.13), new=TRUE) + mtext("%", cex=3.3) + + # mean frequency on Frequency plots: + if(mean.freq == TRUE){ + symbol.xpos <- 0.9 + symbol.width <- 0.1 + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.908, 0.918), new=TRUE) + mtext(bquote(bar(nu) == .(paste0(round(mean(100*fre1.NA),1),"%"))),cex=4.5) + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.673, 0.683), new=TRUE) + mtext(bquote(bar(nu) == .(paste0(round(mean(100*fre2.NA),1),"%"))),cex=4.5) + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.44, 0.45), new=TRUE) + mtext(bquote(bar(nu) == .(paste0(round(mean(100*fre3.NA),1),"%"))),cex=4.5) + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.203, 0.213), new=TRUE) + mtext(bquote(bar(nu) == .(paste0(round(mean(100*fre4.NA),1),"%"))),cex=4.5) + } + + # add corr between obs.time series and ensemble mean time series on Frequency plots: + if(fields.name == forecast.name){ + symbol.xpos <- 0.76 + symbol.width <- 0.1 + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.908, 0.918), new=TRUE) + sp.cor1 <- round(cor(fre1.NA,freObs1, use="complete.obs"),2) + mtext(paste0("r= ",sp.cor1), cex=4.5) + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.673, 0.683), new=TRUE) + sp.cor2 <- round(cor(fre2.NA,freObs2, use="complete.obs"),2) + mtext(paste0("r= ",sp.cor2), cex=4.5) + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.44, 0.45), new=TRUE) + sp.cor3 <- round(cor(fre3.NA,freObs3, use="complete.obs"),2) + mtext(paste0("r= ",sp.cor3), cex=4.5) + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.203, 0.213), new=TRUE) + sp.cor4 <- round(cor(fre4.NA,freObs4, use="complete.obs"),2) + mtext(paste0("r= ",sp.cor4), cex=4.5) + } + + if(!as.pdf) dev.off() # for saving 4 png + + } # close if on: composition == 'simple' + + + # Visualize the composition with the regime anomalies for a selected forecasted month: + if(composition == "psl"){ + # Centroid maps: + n.map <- n.map + 1 # n.maps starts from 0 + map.width <- 0.115 + map.xpos <- 0.06 + map.width * (n.map - 1) + map.ypos <- 0.08 + map.height <- 0.19 + map.ysep <- 0.015 + + par(fig=c(map.xpos, map.xpos + map.width, map.ypos + 3*map.height + 3*map.ysep, map.ypos + 4*map.height + 3*map.ysep), new=TRUE) + PlotEquiMap2(rescale(map1,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map1), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, xlabel.dist=1.5, contours.lty="F1FF1F") + par(fig=c(map.xpos, map.xpos + map.width, map.ypos + 2*map.height + 2*map.ysep, map.ypos + 3*map.height + 2*map.ysep), new=TRUE) + PlotEquiMap2(rescale(map2,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map2), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F") + par(fig=c(map.xpos, map.xpos + map.width, map.ypos + map.height + map.ysep, map.ypos + 2*map.height + map.ysep), new=TRUE) + PlotEquiMap2(rescale(map3,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map3), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F") + par(fig=c(map.xpos, map.xpos + map.width, map.ypos, map.ypos + map.height), new=TRUE) + PlotEquiMap2(rescale(map4,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map4), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F") + + # Sheet subtitles: + par(fig=c(map.xpos, map.xpos + map.width, 0.86, 0.90), new=TRUE) + if(n.map < 8) { + mtext(paste0(my.month.short[p], " --> ", my.month.short[forecast.month]), cex=5.5, font=2) + } else{ + mtext(paste0(rean.name," ", my.month.short[forecast.month]), cex=5.5, font=2) + } + + } # close if on composition == 'psl' + + # Visualize the composition if the impact maps for a selected forecasted month: + if(composition == "impact"){ + # Centroid maps: + n.map <- n.map + 1 # n.maps starts from 0 + map.width <- 0.115 + map.xpos <- 0.06 + map.width * (n.map - 1) + map.ypos <- 0.08 + map.height <- 0.19 + map.ysep <- 0.015 + + par(fig=c(map.xpos, map.xpos + map.width, map.ypos + 3*map.height + 3*map.ysep, map.ypos + 4*map.height + 3*map.ysep), new=TRUE) + #PlotEquiMap2(rescale(imp1[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=t(sig1[,EU] < 0.05), cex.lab=1.5) + PlotEquiMap2(rescale(imp1[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5) + + par(fig=c(map.xpos, map.xpos + map.width, map.ypos + 2*map.height + 2*map.ysep, map.ypos + 3*map.height + 2*map.ysep), new=TRUE) + #PlotEquiMap2(rescale(imp2[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=t(sig2[,EU] < 0.05), cex.lab=1.5) + PlotEquiMap2(rescale(imp2[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5) + + par(fig=c(map.xpos, map.xpos + map.width, map.ypos + map.height + map.ysep, map.ypos + 2*map.height + map.ysep), new=TRUE) + #PlotEquiMap2(rescale(imp3[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=t(sig3[,EU] < 0.05), cex.lab=1.5) + PlotEquiMap2(rescale(imp3[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5) + + par(fig=c(map.xpos, map.xpos + map.width, map.ypos, map.ypos + map.height), new=TRUE) + #PlotEquiMap2(rescale(imp4[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=t(sig4[,EU] < 0.05), cex.lab=1.5) + PlotEquiMap2(rescale(imp4[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5) + + + # Sheet subtitles: + par(fig=c(map.xpos, map.xpos + map.width, 0.86, 0.90), new=TRUE) + if(n.map < 8) { + mtext(paste0(my.month.short[p], " --> ", my.month.short[forecast.month]), cex=5.5, font=2) + } else{ + mtext(paste0(rean.name," ", my.month.short[forecast.month]), cex=5.5, font=2) + } + + } # close if on composition == 'impact' + + # Visualize the composition if the impact maps for a selected forecasted month: + if(composition == "single.impact"){ + png(filename=paste0(rean.dir,"/",fields.name,"_",my.period[p],"_",var.name[var.num],"_impact_NAO+.png"),width=3000,height=3700) + PlotEquiMap2(rescale(imp1[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=t(sig1[,EU] < 0.05), cex.lab=1.5) + dev.off() + + png(filename=paste0(rean.dir,"/",fields.name,"_",my.period[p],"_",var.name[var.num],"_impact_NAO-.png"),width=3000,height=3700) + PlotEquiMap2(rescale(imp2[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=t(sig2[,EU] < 0.05), cex.lab=1.5) + dev.off() + + png(filename=paste0(rean.dir,"/",fields.name,"_",my.period[p],"_",var.name[var.num],"_impact_Blocking.png"),width=3000,height=3700) + PlotEquiMap2(rescale(imp3[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=t(sig3[,EU] < 0.05), cex.lab=1.5) + dev.off() + + png(filename=paste0(rean.dir,"/",fields.name,"_",my.period[p],"_",var.name[var.num],"_impact_Atl_Ridge.png"),width=3000,height=3700) + PlotEquiMap2(rescale(imp4[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=t(sig4[,EU] < 0.05), cex.lab=1.5) + dev.off() + } + + # Visualize the composition if the impact maps for a selected forecasted month: + if(composition == "single.psl"){ + png(filename=paste0(rean.dir,"/",fields.name,"_",my.period[p],"_",var.name[var.num],"_pattern_cluster1.png"),width=2000,height=1400, res=300) + PlotEquiMap2(rescale(map1,my.brks[1],tail(my.brks,1)), lon, lat,filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1, contours=t(map1), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=0.5, xlabel.dist=0, contours.lty="F1FF1F", toptitle=paste0(my.period[p]," WithinSS=", round(withinss1/1000000,2))) + dev.off() + + png(filename=paste0(rean.dir,"/",fields.name,"_",my.period[p],"_",var.name[var.num],"_pattern_cluster2.png"),width=2000,height=1400, res=300) + PlotEquiMap2(rescale(map2,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1, contours=t(map2), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=0.5, xlabel.dist=0, contours.lty="F1FF1F", toptitle=paste0(my.period[p]," WithinSS=", round(withinss2/1000000,2))) + dev.off() + + png(filename=paste0(rean.dir,"/",fields.name,"_",my.period[p],"_",var.name[var.num],"_pattern_cluster3.png"),width=2000,height=1400, res=300) + PlotEquiMap2(rescale(map3,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1, contours=t(map3), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=0.5, xlabel.dist=0, contours.lty="F1FF1F", toptitle=paste0(my.period[p]," WithinSS=", round(withinss3/1000000,2))) + dev.off() + + png(filename=paste0(rean.dir,"/",fields.name,"_",my.period[p],"_",var.name[var.num],"_pattern_cluster4.png"),width=2000,height=1400, res=300) + PlotEquiMap2(rescale(map4,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1, contours=t(map4), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=0.5, xlabel.dist=0, contours.lty="F1FF1F", toptitle=paste0(my.period[p]," WithinSS=", round(withinss4/1000000,2))) + dev.off() + + # Legend centroid maps: + #my.subset <- match(values.to.plot, my.brks) + #ColorBar3(my.brks, cols=my.cols, vert=FALSE, cex=legend1.cex, subset=my.subset) + #mtext(side=4," hPa", cex=legend1.cex) + + } + + # Visualize the composition with the interannual frequencies for a selected forecasted month: + if(composition == "fre"){ + # Centroid maps: + n.map <- n.map + 1 # n.maps starts from 0 + map.width <- 0.115 + map.xpos <- 0.06 + map.width * (n.map - 1) + map.ypos <- 0.08 + map.height <- 0.19 + map.ysep <- 0.015 + + par(fig=c(map.xpos, map.xpos + map.width, map.ypos + 3*map.height + 3*map.ysep, map.ypos + 4*map.height + 3*map.ysep), new=TRUE) + if(n.map < 8) barplot.freq.sim2(100*fre1.NA, 100*freMax1.NA, 100*freMin1.NA, 100*freObs1, year.start, year.end, cex.y=2, cex.x=2, freq.min=-2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0), col.line="gray20", cex.r=3, cex.mean=2, cex.obs=2) + if(n.map == 8) barplot.freq(100*fre1.NA, year.start, year.end, cex.y=2, cex.x=2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0)) + + par(fig=c(map.xpos, map.xpos + map.width, map.ypos + 2*map.height + 2*map.ysep, map.ypos + 3*map.height + 2*map.ysep), new=TRUE) + if(n.map < 8) if(fields.name == forecast.name) barplot.freq.sim2(100*fre2.NA, 100*freMax2.NA, 100*freMin2.NA, 100*freObs2, year.start, year.end, cex.y=2, cex.x=2, freq.min=-2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0), col.line="gray20", cex.r=3, cex.mean=2, cex.obs=2) + if(n.map == 8) barplot.freq(100*fre2.NA, year.start, year.end, cex.y=2, cex.x=2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0)) + + par(fig=c(map.xpos, map.xpos + map.width, map.ypos + map.height + map.ysep, map.ypos + 2*map.height + map.ysep), new=TRUE) + if(n.map < 8) if(fields.name == forecast.name) barplot.freq.sim2(100*fre3.NA, 100*freMax3.NA, 100*freMin3.NA, 100*freObs3, year.start, year.end, cex.y=2, cex.x=2, freq.min=-2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0), col.line="gray20", cex.r=3, cex.mean=2, cex.obs=2) + if(n.map == 8) barplot.freq(100*fre3.NA, year.start, year.end, cex.y=2, cex.x=2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0)) + + par(fig=c(map.xpos, map.xpos + map.width, map.ypos, map.ypos + map.height), new=TRUE) + if(n.map < 8) if(fields.name == forecast.name) barplot.freq.sim2(100*fre4.NA, 100*freMax4.NA, 100*freMin4.NA, 100*freObs4, year.start, year.end, cex.y=2, cex.x=2, freq.min=-2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0), col.line="gray20", cex.r=3, cex.mean=2, cex.obs=2) + if(n.map == 8) barplot.freq(100*fre4.NA, year.start, year.end, cex.y=2, cex.x=2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0)) + + # Sheet subtitles: + par(fig=c(map.xpos, map.xpos + map.width, 0.86, 0.90), new=TRUE) + if(n.map < 8) { + mtext(paste0(my.month.short[p], " --> ", my.month.short[forecast.month]), cex=5.5, font=2) + } else{ + mtext(paste0(rean.name," ", my.month.short[forecast.month]), cex=5.5, font=2) + } + + # % on Frequency plots: + par(fig=c(map.xpos - 0.006, map.xpos, map.ypos + 3.9*map.height + 3*map.ysep, map.ypos + 4*map.height + 3*map.ysep), new=TRUE) + mtext("%", cex=3.3) + par(fig=c(map.xpos - 0.006, map.xpos, map.ypos + 2.9*map.height + 2*map.ysep, map.ypos + 3*map.height + 2*map.ysep), new=TRUE) + mtext("%", cex=3.3) + par(fig=c(map.xpos - 0.006, map.xpos, map.ypos + 1.9*map.height + 1*map.ysep, map.ypos + 2*map.height + 1*map.ysep), new=TRUE) + mtext("%", cex=3.3) + par(fig=c(map.xpos - 0.006, map.xpos, map.ypos + 0.9*map.height + 0*map.ysep, map.ypos + 1*map.height + 0*map.ysep), new=TRUE) + mtext("%", cex=3.3) + + # mean frequency on Frequency plots: + par(fig=c(map.xpos + 0.02, map.xpos + 0.04, map.ypos + 3*map.height + 3*map.ysep, map.ypos + 3.1*map.height + 3*map.ysep), new=TRUE) + mtext(bquote(bar(nu) == .(paste0(round(mean(100*fre1.NA),1),"%"))),cex=3.5) + par(fig=c(map.xpos + 0.02, map.xpos + 0.04, map.ypos + 2*map.height + 2*map.ysep, map.ypos + 2.1*map.height + 2*map.ysep), new=TRUE) + mtext(bquote(bar(nu) == .(paste0(round(mean(100*fre2.NA),1),"%"))),cex=3.5) + par(fig=c(map.xpos + 0.02, map.xpos + 0.04, map.ypos + 1*map.height + 1*map.ysep, map.ypos + 1.1*map.height + 1*map.ysep), new=TRUE) + mtext(bquote(bar(nu) == .(paste0(round(mean(100*fre3.NA),1),"%"))),cex=3.5) + par(fig=c(map.xpos + 0.02, map.xpos + 0.04, map.ypos + 0*map.height + 0*map.ysep, map.ypos + 0.1*map.height + 0*map.ysep), new=TRUE) + mtext(bquote(bar(nu) == .(paste0(round(mean(100*fre4.NA),1),"%"))),cex=3.5) + + # add corr between obs.time series and ensemble mean time series on Frequency plots: + if(n.map < 8){ + par(fig=c(map.xpos + map.width - 0.04, map.xpos + map.width - 0.02, map.ypos + 3*map.height + 3*map.ysep, map.ypos + 3.1*map.height + 3*map.ysep), new=TRUE) + mtext(paste0("r= ",sp.cor1), cex=3.5) + par(fig=c(map.xpos + map.width - 0.04, map.xpos + map.width - 0.02, map.ypos + 2*map.height + 2*map.ysep, map.ypos + 2.1*map.height + 2*map.ysep), new=TRUE) + mtext(paste0("r= ",sp.cor2), cex=3.5) + par(fig=c(map.xpos + map.width - 0.04, map.xpos + map.width - 0.02, map.ypos + 1*map.height + 1*map.ysep, map.ypos + 1.1*map.height + 1*map.ysep), new=TRUE) + mtext(paste0("r= ",sp.cor3), cex=3.5) + par(fig=c(map.xpos + map.width - 0.04, map.xpos + map.width - 0.02, map.ypos + 0*map.height + 0*map.ysep, map.ypos + 0.1*map.height + 0*map.ysep), new=TRUE) + mtext(paste0("r= ",sp.cor4), cex=3.5) + } + } # close if on composition == 'fre' + + # save indicators for each startdate and forecast time: + # (map1, map2, map3, map4, fre1, fre2, etc. already refer to the regimes in the same order as listed in the 'orden' vector) + if(fields.name == forecast.name) { + if (composition != "impact") { + save(orden, map1, map2, map3, map4, fre1.NA, fre2.NA, fre3.NA, fre4.NA, freObs1, freObs2, freObs3, freObs4, sp.cor1, sp.cor2, sp.cor3, sp.cor4, persist1, persist2, persist3, persist4, persistObs1, persistObs2, persistObs3, persistObs4, spat.cor1, spat.cor2, spat.cor3, spat.cor4, sd.freq.sim1, sd.freq.sim2, sd.freq.sim3, sd.freq.sim4, sd.freq.obs1, sd.freq.obs2, sd.freq.obs3, sd.freq.obs4, transSim1, transSim2, transSim3, transSim4, transObs1, transObs2, transObs3, transObs4, file=paste0(work.dir,"/",fields.name,"_", my.period[p],"_leadmonth_",lead.month,"_","ClusterNames",".RData")) + } else { # also save impX objects + save(orden, map1, map2, map3, map4, fre1.NA, fre2.NA, fre3.NA, fre4.NA, freObs1, freObs2, freObs3, freObs4, sp.cor1, sp.cor2, sp.cor3, sp.cor4, persist1, persist2, persist3, persist4, persistObs1, persistObs2, persistObs3, persistObs4, spat.cor1, spat.cor2, spat.cor3, spat.cor4, sd.freq.sim1, sd.freq.sim2, sd.freq.sim3, sd.freq.sim4, sd.freq.obs1, sd.freq.obs2, sd.freq.obs3, sd.freq.obs4, transSim1, transSim2, transSim3, transSim4, transObs1, transObs2, transObs3, transObs4, imp1, imp2, imp3, imp4,cor.imp1,cor.imp2,cor.imp3,cor.imp4, file=paste0(work.dir,"/",fields.name,"_", my.period[p],"_leadmonth_",lead.month,"_","ClusterNames",".RData")) + } + } + + } # close 'p' on 'period' + + if(as.pdf) dev.off() # for saving a single pdf with all months/seasons + + } # close 'lead.month' on 'lead.months' + + if(composition == "psl" || composition == "fre" || composition == "impact"){ + # Regime's names: + title1.xpos <- 0.01 + title1.width <- 0.04 + par(fig=c(title1.xpos, title1.xpos + title1.width, 0.7905, 0.7955), new=TRUE) + mtext(paste0(regime1.name), font=2, cex=5) + par(fig=c(title1.xpos, title1.xpos + title1.width, 0.5855, 0.5905), new=TRUE) + mtext(paste0(regime2.name), font=2, cex=5) + par(fig=c(title1.xpos, title1.xpos + title1.width, 0.3805, 0.3955), new=TRUE) + mtext(paste0(regime3.name), font=2, cex=5) + par(fig=c(title1.xpos, title1.xpos + title1.width, 0.1755, 0.1805), new=TRUE) + mtext(paste0(regime4.name), font=2, cex=5) + + if(composition == "psl") { + # Color legend: + legend1.xpos <- 0.10 + legend1.width <- 0.80 + legend1.cex <- 4 + + par(fig=c(legend1.xpos, legend1.xpos + legend1.width, 0, 0.065), new=TRUE) # syntax fig=c(xmin, xmax, ymin, ymax) + ColorBar2(brks=my.brks, cols=my.cols, vert=FALSE, cex=legend1.cex, label.dist=2, my.ticks=-0.5 + 1:length(my.brks), my.labels=my.brks) + par(fig=c(legend1.xpos + legend1.width - 0.02, legend1.xpos + legend1.width + 0.05, 0, 0.02), new=TRUE) # syntax fig=c(xmin, xmax, ymin, ymax) + mtext("hPa", cex=legend1.cex*1.3) + } + + if(composition == "impact") { + # Color legend: + legend1.xpos <- 0.10 + legend1.width <- 0.80 + legend1.cex <- 4 + + #my.subset2 <- match(values.to.plot2, my.brks.var) + par(fig=c(legend1.xpos, legend1.xpos + legend1.width, 0, 0.065), new=TRUE) + ColorBar2(brks=my.brks.var, cols=my.cols.var, vert=FALSE, cex=legend1.cex, label.dist=2, my.ticks=-0.5 + 1:length(my.brks.var), my.labels=my.brks.var) + par(fig=c(legend1.xpos + legend1.width - 0.02, legend1.xpos + legend1.width + 0.05, 0, 0.02), new=TRUE) # syntax fig=c(xmin, xmax, ymin, ymax) + #ColorBar2(brks=my.brks.var, cols=my.cols.var, vert=FALSE, cex=legend2.cex, subset=my.subset2) + mtext(var.unit[var.num], cex=legend1.cex*1.3) + + } + + + dev.off() + } # close if on composition + + print("Finished!") +} # close if on composition != "summary" + +} + +if(composition == "taylor"){ + library("plotrix") + + # choose a reference season from 13 (winter) to 16 (Autumn): + season.ref <- 13 + + # set the first WR classification: + rean.dir <- "/scratch/Earth/ncortesi/RESILIENCE/Regimes/41_ERA-Interim_monthly_1981-2015_LOESS_filter" + + # load data of the reference season of the first classification (this line should be modified to load the chosen season): + #load("/scratch/Earth/ncortesi/RESILIENCE/Regimes/18) as 12) but with no lat correction/ERA-Interim_Winter_psl.RData") # load pslwrXmean data + #load("/scratch/Earth/ncortesi/RESILIENCE/Regimes/18) as 12) but with no lat correction/ERA-Interim_Winter_ClusterNames.RData") # load clusterX.name.period + load("/scratch/Earth/ncortesi/RESILIENCE/Regimes/46_as_12_but_with_no_lat_correction_and_with_LOESS/ERA-Interim_Winter_psl.RData") # load pslwrXmean data + load("/scratch/Earth/ncortesi/RESILIENCE/Regimes/46_as_12_but_with_no_lat_correction_and_with_LOESS/ERA-Interim_Winter_ClusterNames.RData") # load clusterX.name.period + + if(var.num != var.num.orig) var.num <- var.num.orig # ripristinate original values of this script (necessary after loading regime data) + if(fields.name != fields.name.orig) fields.name <- fields.name.orig # ripristinate original values of this script + + cluster1.ref <- which(orden == cluster1.name) + cluster2.ref <- which(orden == cluster2.name) + cluster3.ref <- which(orden == cluster3.name) + cluster4.ref <- which(orden == cluster4.name) + + #cluster1.ref <- cluster1.name.period[13] + #cluster2.ref <- cluster2.name.period[13] + #cluster3.ref <- cluster3.name.period[13] + #cluster4.ref <- cluster4.name.period[13] + + cluster1.ref <- 3 # bypass the previous values because for dataset num.46 the blocking and atlantic regimes were saved swapped! + cluster2.ref <- 2 + cluster3.ref <- 1 + cluster4.ref <- 4 + + assign(paste0("map.ref",cluster1.ref), pslwr1mean) # NAO+ + assign(paste0("map.ref",cluster2.ref), pslwr2mean) # NAO- + assign(paste0("map.ref",cluster3.ref), pslwr3mean) # Blocking + assign(paste0("map.ref",cluster4.ref), pslwr4mean) # Atl.ridge + + # set a second WR classification (i.e: with running cluster) to contrast with the new one: + #rean2.dir <- "/scratch/Earth/ncortesi/RESILIENCE/Regimes/41_ERA-Interim_monthly_1981-2015_LOESS_filter" + rean2.dir <- "/scratch/Earth/ncortesi/RESILIENCE/Regimes/44_as_43_but_with_no_lat_correction" + + # load data of the reference season of the second classification (this line should be modified to load the chosen season): + load("/scratch/Earth/ncortesi/RESILIENCE/Regimes/46_as_12_but_with_no_lat_correction_and_with_LOESS/ERA-Interim_Winter_psl.RData") # load pslwrXmean data + load("/scratch/Earth/ncortesi/RESILIENCE/Regimes/46_as_12_but_with_no_lat_correction_and_with_LOESS/ERA-Interim_Winter_ClusterNames.RData") # load clusterX.name.period + + if(var.num != var.num.orig) var.num <- var.num.orig # ripristinate original values of this script (necessary after loading regime data) + if(fields.name != fields.name.orig) fields.name <- fields.name.orig # ripristinate original values of this script + + #cluster1.name.ref <- cluster1.name.period[season.ref] + #cluster2.name.ref <- cluster2.name.period[season.ref] + #cluster3.name.ref <- cluster3.name.period[season.ref] + #cluster4.name.ref <- cluster4.name.period[season.ref] + + cluster1.ref <- which(orden == cluster1.name) + cluster2.ref <- which(orden == cluster2.name) + cluster3.ref <- which(orden == cluster3.name) + cluster4.ref <- which(orden == cluster4.name) + + cluster1.ref <- 3 # bypass the previous values because for dataset num.46 the blocking and atlantic regimes were saved swapped! + cluster2.ref <- 2 + cluster3.ref <- 1 + cluster4.ref <- 4 + + assign(paste0("map.old.ref",cluster1.ref), pslwr1mean) # NAO+ + assign(paste0("map.old.ref",cluster2.ref), pslwr2mean) # NAO- + assign(paste0("map.old.ref",cluster3.ref), pslwr3mean) # Blocking + assign(paste0("map.old.ref",cluster4.ref), pslwr4mean) # Atl.ridge + + + # breaks and colors of the geopotential fields: + if(psl == "g500"){ + my.brks <- c(-300,seq(-200,200,10),300) # % Mean anomaly of a WR + my.brks2 <- c(-300,seq(-200,200,10),300) # % Mean anomaly of a WR for the contour lines + values.to.plot <- c(-200, -100, 0, 100, 200) # values we want to show in the labels + } + + if(psl == "psl"){ + my.brks <- c(seq(-19,-1,2),0,seq(1,19,2)) # % Mean anomaly of a WR + my.brks2 <- my.brks #c(seq(-20,20,2)) # % Mean anomaly of a WR for the contour lines + values.to.plot <- my.brks #c(-17,-15,-13,-11,-9,-7,-5,-3,-1,0,1,3,5,7,9,11,13,15,17) + } + + my.cols <- colorRampPalette(rev(brewer.pal(11,"RdBu")))(length(my.brks)-1) # blue--white--red colors + my.cols[floor(length(my.cols)/2)] <- my.cols[floor(length(my.cols)/2)+1] <- "white" + + # plot the composition with the regime anomalies of each monthly regimes: + png(filename=paste0(rean.dir,"/",fields.name,"_taylor_source",".png"),width=6000,height=2000) + + plot.new() + + # Sheet title: + par(fig=c(0, 1, 0.94, 0.98), new=TRUE) + mtext(paste0(fields.name, " observed monthly regime anomalies"), cex=5.5, font=2) + + n.map <- 0 + for(p in c(9:12,1:8)){ + p.orig <- p + + load(file=paste0(rean.dir,"/",fields.name,"_",my.period[p],"_","psl",".RData")) # load cluster names and summarized data + load(file=paste0(rean.dir,"/",fields.name,"_",my.period[p],"_","ClusterNames",".RData")) # load cluster names and summarized data + + if(var.num != var.num.orig) var.num <- var.num.orig # ripristinate original values of this script (necessary after loading regime data) + if(fields.name != fields.name.orig) fields.name <- fields.name.orig # ripristinate original values of this script + if(p != p.orig) p <- p.orig + + cluster1 <- which(orden == cluster1.name) + cluster2 <- which(orden == cluster2.name) + cluster3 <- which(orden == cluster3.name) + cluster4 <- which(orden == cluster4.name) + + assign(paste0("map",cluster1), pslwr1mean) # NAO+ + assign(paste0("map",cluster2), pslwr2mean) # NAO- + assign(paste0("map",cluster3), pslwr3mean) # Blocking + assign(paste0("map",cluster4), pslwr4mean) # Atl.ridge + + n.map <- n.map + 1 # n.maps starts from 0 + map.width <- 0.07 + map.xpos <- 0.06 + map.width * (n.map - 1) + map.ypos <- 0.08 + map.height <- 0.19 + map.ysep <- 0.015 + + par(fig=c(map.xpos, map.xpos + map.width, map.ypos + 3*map.height + 3*map.ysep, map.ypos + 4*map.height + 3*map.ysep), new=TRUE) + PlotEquiMap2(rescale(map1,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map1), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, xlabel.dist=1.5, contours.lty="F1FF1F") + par(fig=c(map.xpos, map.xpos + map.width, map.ypos + 2*map.height + 2*map.ysep, map.ypos + 3*map.height + 2*map.ysep), new=TRUE) + PlotEquiMap2(rescale(map2,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map2), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F") + par(fig=c(map.xpos, map.xpos + map.width, map.ypos + map.height + map.ysep, map.ypos + 2*map.height + map.ysep), new=TRUE) + PlotEquiMap2(rescale(map3,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map3), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F") + par(fig=c(map.xpos, map.xpos + map.width, map.ypos, map.ypos + map.height), new=TRUE) + PlotEquiMap2(rescale(map4,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map4), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F") + + # Sheet subtitles: + par(fig=c(map.xpos, map.xpos + map.width, 0.86, 0.90), new=TRUE) + mtext(my.month.short[p], cex=5.5, font=2) + + } + + # draw the last map to the right of the composition, that with the seasonal anomalies: + n.map <- n.map + 1 # n.maps starts from 0 + map.width <- 0.07 + map.xpos <- 0.06 + map.width * (n.map - 1) + map.ypos <- 0.08 + map.height <- 0.19 + map.ysep <- 0.015 + + par(fig=c(map.xpos, map.xpos + map.width, map.ypos + 3*map.height + 3*map.ysep, map.ypos + 4*map.height + 3*map.ysep), new=TRUE) + PlotEquiMap2(rescale(map.ref1,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map.ref1), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, xlabel.dist=1.5, contours.lty="F1FF1F") + par(fig=c(map.xpos, map.xpos + map.width, map.ypos + 2*map.height + 2*map.ysep, map.ypos + 3*map.height + 2*map.ysep), new=TRUE) + PlotEquiMap2(rescale(map.ref2,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map.ref2), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F") + par(fig=c(map.xpos, map.xpos + map.width, map.ypos + map.height + map.ysep, map.ypos + 2*map.height + map.ysep), new=TRUE) + PlotEquiMap2(rescale(map.ref3,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map.ref3), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F") + par(fig=c(map.xpos, map.xpos + map.width, map.ypos, map.ypos + map.height), new=TRUE) + PlotEquiMap2(rescale(map.ref4,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map.ref4), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F") + + # subtitle: + par(fig=c(map.xpos, map.xpos + map.width, 0.86, 0.90), new=TRUE) + mtext(my.period[season.ref], cex=5.5, font=2) + + dev.off() + + corr.first <- corr.second <- rmse.first <- rmse.second <- array(NA,c(4,12)) + + # Plot the Taylor diagrams: + for(regime in 1:4){ + + png(filename=paste0(rean.dir,"/",fields.name,"_taylor_",orden[regime],".png"), width=1200, height=800) + + for(p in 1:12){ + p.orig <- p + + load(file=paste0(rean.dir,"/",fields.name,"_",my.period[p],"_","psl",".RData")) # load cluster names and summarized data + load(file=paste0(rean.dir,"/",fields.name,"_",my.period[p],"_","ClusterNames",".RData")) # load cluster names and summarized data + + if(var.num != var.num.orig) var.num <- var.num.orig # ripristinate original values of this script (necessary after loading regime data) + if(fields.name != fields.name.orig) fields.name <- fields.name.orig # ripristinate original values of this script + if(p != p.orig) p <- p.orig + + cluster1 <- which(orden == cluster1.name) + cluster2 <- which(orden == cluster2.name) + cluster3 <- which(orden == cluster3.name) + cluster4 <- which(orden == cluster4.name) + + assign(paste0("map",cluster1), pslwr1mean) # NAO+ + assign(paste0("map",cluster2), pslwr2mean) # NAO- + assign(paste0("map",cluster3), pslwr3mean) # Blocking + assign(paste0("map",cluster4), pslwr4mean) # Atl.ridge + + # load data from the second classification: + load(file=paste0(rean2.dir,"/",fields.name,"_",my.period[p],"_","psl",".RData")) # load cluster names and summarized data + load(file=paste0(rean2.dir,"/",fields.name,"_",my.period[p],"_","ClusterNames",".RData")) # load cluster names and summarized data + + if(var.num != var.num.orig) var.num <- var.num.orig # ripristinate original values of this script (necessary after loading regime data) + if(fields.name != fields.name.orig) fields.name <- fields.name.orig # ripristinate original values of this script + if(p != p.orig) p <- p.orig + + cluster1.old <- which(orden == cluster1.name) + cluster2.old <- which(orden == cluster2.name) + cluster3.old <- which(orden == cluster3.name) + cluster4.old <- which(orden == cluster4.name) + + assign(paste0("map.old",cluster1.old), pslwr1mean) # NAO+ + assign(paste0("map.old",cluster2.old), pslwr2mean) # NAO- + assign(paste0("map.old",cluster3.old), pslwr3mean) # Blocking + assign(paste0("map.old",cluster4.old), pslwr4mean) # Atl.ridge + + add.mod <- ifelse(p == 1, FALSE, TRUE) + main.mod <- ifelse(p == 1, orden[regime],"") + + color.first <- "red" + color.second <- "blue" + sd.arcs.mod <- c(0,0.1,0.2,0.3,0.4,0.5,0.6,0.7,0.8,0.9,1,1.1,1.2,1.3) #c(0,0.5,1,1.5) # doesn't work! + if(regime == 1) my.taylor(as.vector(map.ref1),as.vector(map1), normalize=TRUE, add=add.mod, pos.cor=FALSE, sd.arcs=sd.arcs.mod, ref.sd=F, cex.axis=1.7, text.cex=1, my.text=my.month.short[p], col = color.first, main=main.mod) + if(regime == 2) my.taylor(as.vector(map.ref2),as.vector(map2), normalize=TRUE, add=add.mod, pos.cor=FALSE, sd.arcs=sd.arcs.mod, ref.sd=F, cex.axis=1.7, text.cex=1, my.text=my.month.short[p], col = color.first, main=main.mod) + if(regime == 3) my.taylor(as.vector(map.ref3),as.vector(map3), normalize=TRUE, add=add.mod, pos.cor=FALSE, sd.arcs=sd.arcs.mod, ref.sd=F, cex.axis=1.7, text.cex=1, my.text=my.month.short[p], col = color.first, main=main.mod) + if(regime == 4) my.taylor(as.vector(map.ref4),as.vector(map4), normalize=TRUE, add=add.mod, pos.cor=FALSE, sd.arcs=sd.arcs.mod, ref.sd=F, cex.axis=1.7, text.cex=1, my.text=my.month.short[p], col = color.first, main=main.mod) + + # add the points from the second classification: + add.mod=TRUE + if(regime == 1) my.taylor(as.vector(map.old.ref1),as.vector(map.old1), normalize=TRUE, add=add.mod, pos.cor=FALSE, sd.arcs=sd.arcs.mod, ref.sd=F, cex.axis=1.7, text.cex=1, my.text=my.month.short[p], col = color.second) + if(regime == 2) my.taylor(as.vector(map.old.ref2),as.vector(map.old2), normalize=TRUE, add=add.mod, pos.cor=FALSE, sd.arcs=sd.arcs.mod, ref.sd=F, cex.axis=1.7, text.cex=1, my.text=my.month.short[p], col = color.second) + if(regime == 3) my.taylor(as.vector(map.old.ref3),as.vector(map.old3), normalize=TRUE, add=add.mod, pos.cor=FALSE, sd.arcs=sd.arcs.mod, ref.sd=F, cex.axis=1.7, text.cex=1, my.text=my.month.short[p], col = color.second) + if(regime == 4) my.taylor(as.vector(map.old.ref4),as.vector(map.old4), normalize=TRUE, add=add.mod, pos.cor=FALSE, sd.arcs=sd.arcs.mod, ref.sd=F, cex.axis=1.7, text.cex=1, my.text=my.month.short[p], col = color.second) + + if(regime == 1) { corr.first[1,p] <- cor(as.vector(map.ref1),as.vector(map1)); rmse.first[1,p] <- RMSE(as.vector(map.ref1),as.vector(map1)) } + if(regime == 2) { corr.first[2,p] <- cor(as.vector(map.ref2),as.vector(map2)); rmse.first[2,p] <- RMSE(as.vector(map.ref2),as.vector(map2)) } + if(regime == 3) { corr.first[3,p] <- cor(as.vector(map.ref3),as.vector(map3)); rmse.first[3,p] <- RMSE(as.vector(map.ref3),as.vector(map3)) } + if(regime == 4) { corr.first[4,p] <- cor(as.vector(map.ref4),as.vector(map4)); rmse.first[4,p] <- RMSE(as.vector(map.ref4),as.vector(map4)) } + + if(regime == 1) { corr.second[1,p] <- cor(as.vector(map.old.ref1),as.vector(map.old1)); rmse.second[1,p] <- RMSE(as.vector(map.old.ref1),as.vector(map.old1)) } + if(regime == 2) { corr.second[2,p] <- cor(as.vector(map.old.ref2),as.vector(map.old2)); rmse.second[2,p] <- RMSE(as.vector(map.old.ref2),as.vector(map.old2)) } + if(regime == 3) { corr.second[3,p] <- cor(as.vector(map.old.ref3),as.vector(map.old3)); rmse.second[3,p] <- RMSE(as.vector(map.old.ref3),as.vector(map.old3)) } + if(regime == 4) { corr.second[4,p] <- cor(as.vector(map.old.ref4),as.vector(map.old4)); rmse.second[4,p] <- RMSE(as.vector(map.old.ref4),as.vector(map.old4)) } + + } # close for on 'p' + + dev.off() + + } # close for on 'regime' + + corr.first.mean <- apply(corr.first,1,mean) + corr.second.mean <- apply(corr.second,1,mean) + corr.diff <- corr.second.mean - corr.first.mean + + rmse.first.mean <- apply(rmse.first,1,mean) + rmse.second.mean <- apply(rmse.second,1,mean) + rmse.diff <- rmse.second.mean - rmse.first.mean + +} # close if on composition == "taylor" + + + + + + +# Summary graphs: +if(composition == "summary" && fields.name == forecast.name){ + # array storing correlations in the format: [startdate, leadmonth, regime] + array.diff.sd.freq <- array.sd.freq <- array.cor <- array.sp.cor <- array.freq <- array.diff.freq <- array.rpss <- array.pers <- array.diff.pers <- array(NA,c(12,7,4)) + array.spat.cor <- array.imp <- array.trans.sim1 <- array.trans.sim2 <- array.trans.sim3 <- array.trans.sim4 <- array(NA,c(12,7,4)) + array.trans.diff1 <- array.trans.diff2 <- array.trans.diff3 <- array.trans.diff4 <- array(NA,c(12,7,4)) + + for(p in 1:12){ + p.orig <- p + + for(l in 0:6){ + + load(file=paste0(work.dir,"/",fields.name,"_",my.period[p],"_leadmonth_",l,"_","ClusterNames",".RData")) # load cluster names and summarized data + + if(var.num != var.num.orig) var.num <- var.num.orig # ripristinate original values of this script (necessary after loading regime data) + if(fields.name != fields.name.orig) fields.name <- fields.name.orig # ripristinate original values of this script + if(p != p.orig) p <- p.orig + + array.spat.cor[p,1+l, 1] <- spat.cor1 # associates NAO+ to the first triangle (at left) + array.spat.cor[p,1+l, 3] <- spat.cor2 # associates NAO- to the third triangle (at right) + array.spat.cor[p,1+l, 4] <- spat.cor3 # associates Blocking to the fourth triangle (top one) + array.spat.cor[p,1+l, 2] <- spat.cor4 # associates Atl.Ridge to the second triangle (bottom one) + + array.cor[p,1+l, 1] <- sp.cor1 # associates NAO+ to the first triangle (at left) + array.cor[p,1+l, 3] <- sp.cor2 # associates NAO- to the third triangle (at right) + array.cor[p,1+l, 4] <- sp.cor3 # associates Blocking to the fourth triangle (top one) + array.cor[p,1+l, 2] <- sp.cor4 # associates Atl.Ridge to the second triangle (bottom one) + + array.freq[p,1+l, 1] <- 100*(mean(fre1.NA)) # NAO+ + array.freq[p,1+l, 3] <- 100*(mean(fre2.NA)) # NAO- + array.freq[p,1+l, 4] <- 100*(mean(fre3.NA)) # Blocking + array.freq[p,1+l, 2] <- 100*(mean(fre4.NA)) # Atl.Ridge + + array.diff.freq[p,1+l, 1] <- 100*(mean(fre1.NA) - mean(freObs1)) # NAO+ + array.diff.freq[p,1+l, 3] <- 100*(mean(fre2.NA) - mean(freObs2)) # NAO- + array.diff.freq[p,1+l, 4] <- 100*(mean(fre3.NA) - mean(freObs3)) # Blocking + array.diff.freq[p,1+l, 2] <- 100*(mean(fre4.NA) - mean(freObs4)) # Atl.Ridge + + array.pers[p,1+l, 1] <- persist1 # NAO+ + array.pers[p,1+l, 3] <- persist2 # NAO- + array.pers[p,1+l, 4] <- persist3 # Blocking + array.pers[p,1+l, 2] <- persist4 # Atl.Ridge + + array.diff.pers[p,1+l, 1] <- persist1 - persistObs1 # NAO+ + array.diff.pers[p,1+l, 3] <- persist2 - persistObs2 # NAO- + array.diff.pers[p,1+l, 4] <- persist3 - persistObs3 # Blocking + array.diff.pers[p,1+l, 2] <- persist4 - persistObs4 # Atl.Ridge + + array.sd.freq[p,1+l, 1] <- sd.freq.sim1 # NAO+ + array.sd.freq[p,1+l, 3] <- sd.freq.sim2 # NAO- + array.sd.freq[p,1+l, 4] <- sd.freq.sim3 # Blocking + array.sd.freq[p,1+l, 2] <- sd.freq.sim4 # Atl.Ridge + + array.diff.sd.freq[p,1+l, 1] <- sd.freq.sim1 / sd.freq.obs1 # NAO+ + array.diff.sd.freq[p,1+l, 3] <- sd.freq.sim2 / sd.freq.obs2 # NAO- + array.diff.sd.freq[p,1+l, 4] <- sd.freq.sim3 / sd.freq.obs3 # Blocking + array.diff.sd.freq[p,1+l, 2] <- sd.freq.sim4 / sd.freq.obs4 # Atl.Ridge + + array.imp[p,1+l, 1] <- cor.imp1 # NAO+ + array.imp[p,1+l, 3] <- cor.imp2 # NAO- + array.imp[p,1+l, 4] <- cor.imp3 # Blocking + array.imp[p,1+l, 2] <- cor.imp4 # Atl.Ridge + + array.trans.sim1[p,1+l, 1] <- NA # Transition from NAO+ to NAO+ + array.trans.sim1[p,1+l, 3] <- 100*transSim1[2] # Transition from NAO+ to NAO- + array.trans.sim1[p,1+l, 4] <- 100*transSim1[3] # Transition from NAO+ to blocking + array.trans.sim1[p,1+l, 2] <- 100*transSim1[4] # Transition from NAO+ to Atl.Ridge + + array.trans.sim2[p,1+l, 1] <- 100*transSim2[1] # Transition from NAO- to NAO+ + array.trans.sim2[p,1+l, 3] <- NA # Transition from NAO- to NAO- + array.trans.sim2[p,1+l, 4] <- 100*transSim2[3] # Transition from NAO- to blocking + array.trans.sim2[p,1+l, 2] <- 100*transSim2[4] # Transition from NAO- to Atl.Ridge + + array.trans.sim3[p,1+l, 1] <- 100*transSim3[1] # Transition from blocking to NAO+ + array.trans.sim3[p,1+l, 3] <- 100*transSim3[2] # Transition from blocking to NAO- + array.trans.sim3[p,1+l, 4] <- NA # Transition from blocking to blocking + array.trans.sim3[p,1+l, 2] <- 100*transSim3[4] # Transition from blocking to Atl.Ridge + + array.trans.sim4[p,1+l, 1] <- 100*transSim4[1] # Transition from Atl.ridge to NAO+ + array.trans.sim4[p,1+l, 3] <- 100*transSim4[2] # Transition from Atl.ridge to NAO- + array.trans.sim4[p,1+l, 4] <- 100*transSim4[3] # Transition from Atl.ridge to blocking + array.trans.sim4[p,1+l, 2] <- NA # Transition from Atl.ridge to Atl.Ridge + + array.trans.diff1[p,1+l, 1] <- NA # Transition from NAO+ to NAO+ + array.trans.diff1[p,1+l, 3] <- 100*(transSim1[2] - transObs1[2]) # Transition from NAO+ to NAO- + array.trans.diff1[p,1+l, 4] <- 100*(transSim1[3] - transObs1[3]) # Transition from NAO+ to blocking + array.trans.diff1[p,1+l, 2] <- 100*(transSim1[4] - transObs1[4]) # Transition from NAO+ to Atl.Ridge + + array.trans.diff2[p,1+l, 1] <- 100*(transSim2[1] - transObs2[1]) # Transition from NAO+ to NAO+ + array.trans.diff2[p,1+l, 3] <- NA + array.trans.diff2[p,1+l, 4] <- 100*(transSim2[3] - transObs2[3]) # Transition from NAO+ to blocking + array.trans.diff2[p,1+l, 2] <- 100*(transSim2[4] - transObs2[4]) # Transition from NAO+ to Atl.Ridge + + array.trans.diff3[p,1+l, 1] <- 100*(transSim3[1] - transObs3[1]) # Transition from NAO+ to NAO+ + array.trans.diff3[p,1+l, 3] <- 100*(transSim3[2] - transObs3[2]) # Transition from NAO+ to NAO- + array.trans.diff3[p,1+l, 4] <- NA + array.trans.diff3[p,1+l, 2] <- 100*(transSim3[4] - transObs3[4]) # Transition from NAO+ to Atl.Ridge + + array.trans.diff4[p,1+l, 1] <- 100*(transSim4[1] - transObs4[1]) # Transition from NAO+ to NAO+ + array.trans.diff4[p,1+l, 3] <- 100*(transSim4[2] - transObs4[2]) # Transition from NAO+ to NAO- + array.trans.diff4[p,1+l, 4] <- 100*(transSim4[3] - transObs4[3]) # Transition from NAO+ to blocking + array.trans.diff4[p,1+l, 2] <- NA + + #array.rpss[p,1+l, 1] <- # NAO+ + #array.rpss[p,1+l, 3] <- # NAO- + #array.rpss[p,1+l, 4] <- # Blocking + #array.rpss[p,1+l, 2] <- # Atl.Ridge + } + } + + # Spatial Corr summary: + png(filename=paste0(work.dir,"/",forecast.name,"_summary_spat_corr.png"), width=550, height=400) + + # create an array similar to array.cor but with colors instead of corr.values: + sp.corr.cols <- rev(c('#b2182b','#d6604d','#f4a582','#fddbc7','#d1e5f0','#92c5de','#4393c3','#2166ac')) + my.seq <- c(-1,0,0.4,0.5,0.6,0.7,0.8,0.9,1) + + array.sp.cor.colors <- array(sp.corr.cols[8],c(12,7,4)) + array.sp.cor.colors[array.spat.cor < my.seq[8]] <- sp.corr.cols[7] + array.sp.cor.colors[array.spat.cor < my.seq[7]] <- sp.corr.cols[6] + array.sp.cor.colors[array.spat.cor < my.seq[6]] <- sp.corr.cols[5] + array.sp.cor.colors[array.spat.cor < my.seq[5]] <- sp.corr.cols[4] + array.sp.cor.colors[array.spat.cor < my.seq[4]] <- sp.corr.cols[3] + array.sp.cor.colors[array.spat.cor < my.seq[3]] <- sp.corr.cols[2] + array.sp.cor.colors[array.spat.cor < my.seq[2]] <- sp.corr.cols[1] + + par(mar=c(5,4,4,4.5)) + plot(1,1, type="n", xaxt="n", yaxt="n", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + title("Spatial correlation between S4 and ERA-Interim regime anomalies", cex=1.5) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + + for(p in 1:12){ + for(l in 0:6){ + polygon(c(0.5+p-1, 0.5+p-1, 1+p-1), c(-0.5+l, 0.5+l, 0+l), col=array.sp.cor.colors[p,1+l,1]) # first triangle + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(-0.5+l, 0+l, -0.5+l), col=array.sp.cor.colors[p,1+l,2]) + polygon(c(1+p-1, 1.5+p-1, 1.5+p-1), c(l, 0.5+l, -0.5+l), col=array.sp.cor.colors[p,1+l,3]) + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(0.5+l, 0+l, 0.5+l), col=array.sp.cor.colors[p,1+l,4]) + } + } + + par(fig=c(0.875, 1, 0.14, 0.89), new=TRUE) + ColorBar2(brks = my.seq, cols = sp.corr.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + dev.off() + + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_spat_corr_4tables.png"), width=750, height=600) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext(text="Spatial correlation between S4 and ERA-Interim regime anomalies", cex=1.5) + + # NAO+ : + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.85, 0.98), new=TRUE) + mtext("NAO+", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.spat.cor.colors[p,1+l,1])}} + + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.42, 0.5), new=TRUE) + mtext("Blocking", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.spat.cor.colors[p,1+l,4])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.85, 0.98), new=TRUE) + mtext("NAO-", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.spat.cor.colors[p,1+l,3])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.42, 0.5), new=TRUE) + mtext("Atlantic Ridge", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.spat.cor.colors[p,1+l,2])}} + + par(fig=c(0.91, 1, 0.1, 0.9), new=TRUE) + ColorBar2(brks = my.seq, cols = sp.corr.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_spat_corr_1table.png"), width=800, height=300) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext(text="Spatial correlation between S4 and ERA-Interim regime anomalies", cex=1.2) + + par(mar=c(0,0,4,0), fig=c(0.07, 0.22, 0.8, 0.98), new=TRUE) + mtext("NAO+", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0, 0.22, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.spat.cor.colors[i2,1+6-j,1])}} + + par(mar=c(0,0,4,0), fig=c(0.22, 0.44, 0.8, 0.98), new=TRUE) + mtext("NAO-", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.22, 0.44, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.spat.cor.colors[i2,1+6-j,3])}} + + par(mar=c(0,0,4,0), fig=c(0.44, 0.66, 0.8, 0.98), new=TRUE) + mtext("Blocking", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.44, 0.66, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.spat.cor.colors[i2,1+6-j,4])}} + + par(mar=c(0,0,4,0), fig=c(0.68, 0.88, 0.8, 0.98), new=TRUE) + mtext("Atlantic Ridge", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.66, 0.88, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.spat.cor.colors[i2,1+6-j,2])}} + + par(fig=c(0.91, 1, 0.1, 0.8), new=TRUE) + ColorBar2(brks = my.seq, cols = sp.corr.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + + + + + + + + # Corr summary: + png(filename=paste0(work.dir,"/",forecast.name,"_summary_corr.png"), width=550, height=400) + + # create an array similar to array.cor but with colors instead of corr.values: + corr.cols <- rev(c('#b2182b','#d6604d','#f4a582','#fddbc7','#d1e5f0','#92c5de','#4393c3','#2166ac')) + my.seq <- c(-1,-0.75,-0.5,-0.25,0,0.25,0.5,0.75,1) + + array.cor.colors <- array(corr.cols[8],c(12,7,4)) + array.cor.colors[array.cor < 0.75] <- corr.cols[7] + array.cor.colors[array.cor < 0.5] <- corr.cols[6] + array.cor.colors[array.cor < 0.25] <- corr.cols[5] + array.cor.colors[array.cor < 0] <- corr.cols[4] + array.cor.colors[array.cor < -0.25] <- corr.cols[3] + array.cor.colors[array.cor < -0.5] <- corr.cols[2] + array.cor.colors[array.cor < -0.75] <- corr.cols[1] + + par(mar=c(5,4,4,4.5)) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Forecast time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + title("Correlation between S4 and ERA-Interim frequencies", cex=1.5) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + + for(p in 1:12){ + for(l in 0:6){ + polygon(c(0.5+p-1,0.5+p-1,1+p-1),c(-0.5+l,0.5+l,0+l), col=array.cor.colors[p,1+l,1]) # first triangle + polygon(c(0.5+p-1,1+p-1,1.5+p-1),c(-0.5+l,0+l,-0.5+l), col=array.cor.colors[p,1+l,2]) + polygon(c(1+p-1,1.5+p-1,1.5+p-1),c(l,0.5+l,-0.5+l), col=array.cor.colors[p,1+l,3]) + polygon(c(0.5+p-1,1+p-1,1.5+p-1),c(0.5+l,0+l,0.5+l), col=array.cor.colors[p,1+l,4]) + } + } + + par(fig=c(0.875, 1, 0.14, 0.89), new=TRUE) + ColorBar2(brks = seq(-1,1,0.25), cols = corr.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(seq(-1,1,0.25)), my.labels=seq(-1,1,0.25)) + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_corr_4tables.png"), width=750, height=600) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext(text="Correlation between S4 and ERA-Interim frequencies", cex=1.5) + + # NAO+ : + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.85, 0.98), new=TRUE) + mtext("NAO+", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.cor.colors[p,1+l,1])}} + + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.42, 0.5), new=TRUE) + mtext("Blocking", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.cor.colors[p,1+l,4])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.85, 0.98), new=TRUE) + mtext("NAO-", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.cor.colors[p,1+l,3])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.42, 0.5), new=TRUE) + mtext("Atlantic Ridge", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.cor.colors[p,1+l,2])}} + + par(fig=c(0.91, 1, 0.1, 0.9), new=TRUE) + ColorBar2(brks = my.seq, cols = corr.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_corr_1table.png"), width=800, height=300) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext(text="Correlation between S4 and ERA-Interim frequencies", cex=1.2) + + par(mar=c(0,0,4,0), fig=c(0.07, 0.22, 0.8, 0.98), new=TRUE) + mtext("NAO+", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0, 0.22, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.cor.colors[i2,1+6-j,1])}} + + par(mar=c(0,0,4,0), fig=c(0.22, 0.44, 0.8, 0.98), new=TRUE) + mtext("NAO-", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.22, 0.44, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.cor.colors[i2,1+6-j,3])}} + + par(mar=c(0,0,4,0), fig=c(0.44, 0.66, 0.8, 0.98), new=TRUE) + mtext("Blocking", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.44, 0.66, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.cor.colors[i2,1+6-j,4])}} + + par(mar=c(0,0,4,0), fig=c(0.68, 0.88, 0.8, 0.98), new=TRUE) + mtext("Atlantic Ridge", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.66, 0.88, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.cor.colors[i2,1+6-j,2])}} + + par(fig=c(0.91, 1, 0.1, 0.8), new=TRUE) + ColorBar2(brks = my.seq, cols = corr.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + + + + + + # Freq. summary: + png(filename=paste0(work.dir,"/",forecast.name,"_summary_freq.png"), width=550, height=400) + + # create an array similar to array.diff.freq but with colors instead of frequencies: + freq.cols <- rev(c('#d73027','#f46d43','#fdae61','#fee090','#e0f3f8','#abd9e9','#74add1','#4575b4')) + my.seq <- c(NA,20,22,24,26,28,30,32,NA) + + array.freq.colors <- array(freq.cols[8],c(12,7,4)) + array.freq.colors[array.freq < my.seq[[8]]] <- freq.cols[7] + array.freq.colors[array.freq < my.seq[[7]]] <- freq.cols[6] + array.freq.colors[array.freq < my.seq[[6]]] <- freq.cols[5] + array.freq.colors[array.freq < my.seq[[5]]] <- freq.cols[4] + array.freq.colors[array.freq < my.seq[[4]]] <- freq.cols[3] + array.freq.colors[array.freq < my.seq[[3]]] <- freq.cols[2] + array.freq.colors[array.freq < my.seq[[2]]] <- freq.cols[1] + + par(mar=c(5,4,4,4.5)) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Forecast time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + title("Mean S4 frequency (%)", cex=1.5) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + + for(p in 1:12){ + for(l in 0:6){ + polygon(c(0.5+p-1, 0.5+p-1, 1+p-1), c(-0.5+l, 0.5+l, 0+l), col=array.freq.colors[p,1+l,1]) # first triangle + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(-0.5+l, 0+l, -0.5+l), col=array.freq.colors[p,1+l,2]) + polygon(c(1+p-1, 1.5+p-1, 1.5+p-1), c(l, 0.5+l, -0.5+l), col=array.freq.colors[p,1+l,3]) + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(0.5+l, 0+l, 0.5+l), col=array.freq.colors[p,1+l,4]) + } + } + + par(fig=c(0.875, 1, 0.14, 0.89), new=TRUE) + ColorBar2(brks = my.seq, cols = freq.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_freq_4tables.png"), width=750, height=600) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext(text="Mean S4 frequency (%)", cex=1.5) + + # NAO+ : + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.85, 0.98), new=TRUE) + mtext("NAO+", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.freq.colors[p,1+l,1])}} + + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.42, 0.5), new=TRUE) + mtext("Blocking", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.freq.colors[p,1+l,4])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.85, 0.98), new=TRUE) + mtext("NAO-", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.freq.colors[p,1+l,3])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.42, 0.5), new=TRUE) + mtext("Atlantic Ridge", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.freq.colors[p,1+l,2])}} + + par(fig=c(0.91, 1, 0.1, 0.9), new=TRUE) + ColorBar2(brks = my.seq, cols = freq.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_freq_1table.png"), width=800, height=300) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext(text="Mean S4 frequency (%)", cex=1.2) + + par(mar=c(0,0,4,0), fig=c(0.07, 0.22, 0.8, 0.98), new=TRUE) + mtext("NAO+", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0, 0.22, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.freq.colors[i2,1+6-j,1])}} + + par(mar=c(0,0,4,0), fig=c(0.22, 0.44, 0.8, 0.98), new=TRUE) + mtext("NAO-", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.22, 0.44, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.freq.colors[i2,1+6-j,3])}} + + par(mar=c(0,0,4,0), fig=c(0.44, 0.66, 0.8, 0.98), new=TRUE) + mtext("Blocking", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.44, 0.66, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.freq.colors[i2,1+6-j,4])}} + + par(mar=c(0,0,4,0), fig=c(0.68, 0.88, 0.8, 0.98), new=TRUE) + mtext("Atlantic Ridge", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.66, 0.88, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.freq.colors[i2,1+6-j,2])}} + + par(fig=c(0.91, 1, 0.1, 0.8), new=TRUE) + ColorBar2(brks = my.seq, cols = freq.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + + + + + + + # Delta freq. summary: + png(filename=paste0(work.dir,"/",forecast.name,"_summary_diff_freq.png"), width=550, height=400) + + # create an array similar to array.diff.freq but with colors instead of frequencies: + diff.freq.cols <- rev(c('#d73027','#f46d43','#fdae61','#fee090','#e0f3f8','#abd9e9','#74add1','#4575b4')) + my.seq <- c(-20,-10,-5,-2,0,2,5,10,20) + + array.diff.freq.colors <- array(diff.freq.cols[8],c(12,7,4)) + array.diff.freq.colors[array.diff.freq < my.seq[[8]]] <- diff.freq.cols[7] + array.diff.freq.colors[array.diff.freq 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.diff.freq.colors[i2,1+6-j,1])}} + + par(mar=c(0,0,4,0), fig=c(0.22, 0.44, 0.8, 0.98), new=TRUE) + mtext("NAO-", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.22, 0.44, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.diff.freq.colors[i2,1+6-j,3])}} + + par(mar=c(0,0,4,0), fig=c(0.44, 0.66, 0.8, 0.98), new=TRUE) + mtext("Blocking", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.44, 0.66, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.diff.freq.colors[i2,1+6-j,4])}} + + par(mar=c(0,0,4,0), fig=c(0.68, 0.88, 0.8, 0.98), new=TRUE) + mtext("Atlantic Ridge", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.66, 0.88, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.diff.freq.colors[i2,1+6-j,2])}} + + par(fig=c(0.91, 1, 0.1, 0.8), new=TRUE) + ColorBar2(brks = my.seq, cols = diff.freq.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + + + + + + + + # Persistence summary: + png(filename=paste0(work.dir,"/",forecast.name,"_summary_pers.png"), width=550, height=400) + + # create an array similar to array.pers but with colors instead of frequencies: + pers.cols <- rev(c('#b2182b','#d6604d','#f4a582','#fddbc7','#d1e5f0','#92c5de','#4393c3','#2166ac')) + my.seq <- c(NA, 3.25, 3.5, 3.75, 4, 4.25, 4.5, 4.75, NA) + + array.pers.colors <- array(pers.cols[8],c(12,7,4)) + array.pers.colors[array.pers < my.seq[8]] <- pers.cols[7] + array.pers.colors[array.pers < my.seq[7]] <- pers.cols[6] + array.pers.colors[array.pers < my.seq[6]] <- pers.cols[5] + array.pers.colors[array.pers < my.seq[5]] <- pers.cols[4] + array.pers.colors[array.pers < my.seq[4]] <- pers.cols[3] + array.pers.colors[array.pers < my.seq[3]] <- pers.cols[2] + array.pers.colors[array.pers < my.seq[2]] <- pers.cols[1] + + par(mar=c(5,4,4,4.5)) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Forecast time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + title("S4 Regime persistence (in days/month)", cex=1.5) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + + for(p in 1:12){ + for(l in 0:6){ + polygon(c(0.5+p-1,0.5+p-1,1+p-1),c(-0.5+l,0.5+l,0+l), col=array.pers.colors[p,1+l,1]) # first triangle + polygon(c(0.5+p-1,1+p-1,1.5+p-1),c(-0.5+l,0+l,-0.5+l), col=array.pers.colors[p,1+l,2]) + polygon(c(1+p-1,1.5+p-1,1.5+p-1),c(l,0.5+l,-0.5+l), col=array.pers.colors[p,1+l,3]) + polygon(c(0.5+p-1,1+p-1,1.5+p-1),c(0.5+l,0+l,0.5+l), col=array.pers.colors[p,1+l,4]) + } + } + + par(fig=c(0.875, 1, 0.14, 0.89), new=TRUE) + ColorBar2(brks = my.seq, cols = pers.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_pers_4tables.png"), width=750, height=600) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("S4 Regime persistence (in days/month)", cex=1.5) + + # NAO+ : + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.85, 0.98), new=TRUE) + mtext("NAO+", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.pers.colors[p,1+l,1])}} + + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.42, 0.5), new=TRUE) + mtext("Blocking", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.pers.colors[p,1+l,4])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.85, 0.98), new=TRUE) + mtext("NAO-", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.pers.colors[p,1+l,3])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.42, 0.5), new=TRUE) + mtext("Atlantic Ridge", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.pers.colors[p,1+l,2])}} + + par(fig=c(0.91, 1, 0.1, 0.9), new=TRUE) + ColorBar2(brks = my.seq, cols = pers.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_pers_1table.png"), width=800, height=300) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("S4 Regime persistence (in days/month)", cex=1.2) + + par(mar=c(0,0,4,0), fig=c(0.07, 0.22, 0.8, 0.98), new=TRUE) + mtext("NAO+", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0, 0.22, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.pers.colors[i2,1+6-j,1])}} + + par(mar=c(0,0,4,0), fig=c(0.22, 0.44, 0.8, 0.98), new=TRUE) + mtext("NAO-", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.22, 0.44, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.pers.colors[i2,1+6-j,3])}} + + par(mar=c(0,0,4,0), fig=c(0.44, 0.66, 0.8, 0.98), new=TRUE) + mtext("Blocking", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.44, 0.66, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.pers.colors[i2,1+6-j,4])}} + + par(mar=c(0,0,4,0), fig=c(0.68, 0.88, 0.8, 0.98), new=TRUE) + mtext("Atlantic Ridge", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.66, 0.88, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.pers.colors[i2,1+6-j,2])}} + + par(fig=c(0.91, 1, 0.1, 0.8), new=TRUE) + ColorBar2(brks = my.seq, cols = pers.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + + + + + + + + # Delta persistence summary: + png(filename=paste0(work.dir,"/",forecast.name,"_summary_diff_pers.png"), width=550, height=400) + + # create an array similar to array.pers but with colors instead of frequencies: + diff.pers.cols <- rev(c('#b2182b','#d6604d','#f4a582','#fddbc7','#d1e5f0','#92c5de','#4393c3','#2166ac')) + my.seq <- c(NA, -1.5, -1, -0.5, 0, 0.5, 1, 1.5, NA) + + array.diff.pers.colors <- array(diff.pers.cols[8],c(12,7,4)) + array.diff.pers.colors[array.diff.pers < my.seq[8]] <- diff.pers.cols[7] + array.diff.pers.colors[array.diff.pers < my.seq[7]] <- diff.pers.cols[6] + array.diff.pers.colors[array.diff.pers < my.seq[6]] <- diff.pers.cols[5] + array.diff.pers.colors[array.diff.pers < my.seq[5]] <- diff.pers.cols[4] + array.diff.pers.colors[array.diff.pers < my.seq[4]] <- diff.pers.cols[3] + array.diff.pers.colors[array.diff.pers < my.seq[3]] <- diff.pers.cols[2] + array.diff.pers.colors[array.diff.pers < my.seq[2]] <- diff.pers.cols[1] + + par(mar=c(5,4,4,4.5)) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Forecast time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + title("Diff. between S4 and ERA-Interim regime persistence (in days/month)", cex=1.5) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + + for(p in 1:12){ + for(l in 0:6){ + polygon(c(0.5+p-1,0.5+p-1,1+p-1),c(-0.5+l,0.5+l,0+l), col=array.diff.pers.colors[p,1+l,1]) # first triangle + polygon(c(0.5+p-1,1+p-1,1.5+p-1),c(-0.5+l,0+l,-0.5+l), col=array.diff.pers.colors[p,1+l,2]) + polygon(c(1+p-1,1.5+p-1,1.5+p-1),c(l,0.5+l,-0.5+l), col=array.diff.pers.colors[p,1+l,3]) + polygon(c(0.5+p-1,1+p-1,1.5+p-1),c(0.5+l,0+l,0.5+l), col=array.diff.pers.colors[p,1+l,4]) + } + } + + par(fig=c(0.875, 1, 0.14, 0.89), new=TRUE) + ColorBar2(brks = my.seq, cols = diff.pers.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_diff_pers_4tables.png"), width=750, height=600) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("Diff. between S4 and ERA-Interim regime persistence (in days/month)", cex=1.5) + + # NAO+ : + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.85, 0.98), new=TRUE) + mtext("NAO+", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.diff.pers.colors[p,1+l,1])}} + + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.42, 0.5), new=TRUE) + mtext("Blocking", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.diff.pers.colors[p,1+l,4])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.85, 0.98), new=TRUE) + mtext("NAO-", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.diff.pers.colors[p,1+l,3])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.42, 0.5), new=TRUE) + mtext("Atlantic Ridge", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.diff.pers.colors[p,1+l,2])}} + + par(fig=c(0.91, 1, 0.1, 0.9), new=TRUE) + ColorBar2(brks = my.seq, cols = diff.pers.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_diff_pers_1table.png"), width=800, height=300) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("Diff. between S4 and ERA-Interim regime persistence (in days/month)", cex=1.2) + + par(mar=c(0,0,4,0), fig=c(0.07, 0.22, 0.8, 0.98), new=TRUE) + mtext("NAO+", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0, 0.22, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.diff.pers.colors[i2,1+6-j,1])}} + + par(mar=c(0,0,4,0), fig=c(0.22, 0.44, 0.8, 0.98), new=TRUE) + mtext("NAO-", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.22, 0.44, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.diff.pers.colors[i2,1+6-j,3])}} + + par(mar=c(0,0,4,0), fig=c(0.44, 0.66, 0.8, 0.98), new=TRUE) + mtext("Blocking", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.44, 0.66, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.diff.pers.colors[i2,1+6-j,4])}} + + par(mar=c(0,0,4,0), fig=c(0.68, 0.88, 0.8, 0.98), new=TRUE) + mtext("Atlantic Ridge", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.66, 0.88, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.diff.pers.colors[i2,1+6-j,2])}} + + par(fig=c(0.91, 1, 0.1, 0.8), new=TRUE) + ColorBar2(brks = my.seq, cols = diff.pers.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + + + + + + + + + # St.Dev.freq ratio summary: + png(filename=paste0(work.dir,"/",forecast.name,"_summary_ratio_sd_freq.png"), width=550, height=400) + + # create an array similar to array.cor but with colors instead of corr.values: + diff.sd.freq.cols <- rev(c('#b2182b','#d6604d','#f4a582','#fddbc7','#d1e5f0','#92c5de','#4393c3','#2166ac')) + my.seq <- c(0,0.7,0.8,0.9,1.0,1.1,1.2,1.3,2) + + array.diff.sd.freq.colors <- array(diff.sd.freq.cols[8],c(12,7,4)) + array.diff.sd.freq.colors[array.diff.sd.freq < my.seq[8]] <- diff.sd.freq.cols[7] + array.diff.sd.freq.colors[array.diff.sd.freq < my.seq[7]] <- diff.sd.freq.cols[6] + array.diff.sd.freq.colors[array.diff.sd.freq < my.seq[6]] <- diff.sd.freq.cols[5] + array.diff.sd.freq.colors[array.diff.sd.freq < my.seq[5]] <- diff.sd.freq.cols[4] + array.diff.sd.freq.colors[array.diff.sd.freq < my.seq[4]] <- diff.sd.freq.cols[3] + array.diff.sd.freq.colors[array.diff.sd.freq < my.seq[3]] <- diff.sd.freq.cols[2] + array.diff.sd.freq.colors[array.diff.sd.freq < my.seq[2]] <- diff.sd.freq.cols[1] + + par(mar=c(5,4,4,4.5)) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Forecast time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + title("St.Dev. ratio between sim. and obs. average frequencies", cex=1.5) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + + for(p in 1:12){ + for(l in 0:6){ + polygon(c(0.5+p-1, 0.5+p-1, 1+p-1), c(-0.5+l, 0.5+l, 0+l), col=array.diff.sd.freq.colors[p,1+l,1]) # first triangle + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(-0.5+l, 0+l, -0.5+l), col=array.diff.sd.freq.colors[p,1+l,2]) + polygon(c(1+p-1, 1.5+p-1, 1.5+p-1), c(l, 0.5+l, -0.5+l), col=array.diff.sd.freq.colors[p,1+l,3]) + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(0.5+l, 0+l, 0.5+l), col=array.diff.sd.freq.colors[p,1+l,4]) + } + } + + par(fig=c(0.875, 1, 0.14, 0.89), new=TRUE) + ColorBar2(brks = my.seq, cols = diff.sd.freq.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + dev.off() + + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_ratio_sd_freq_4tables.png"), width=750, height=600) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("St.Dev. ratio between sim. and obs. average frequencies", cex=1.5) + + # NAO+ : + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.85, 0.98), new=TRUE) + mtext("NAO+", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.diff.sd.freq.colors[p,1+l,1])}} + + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.42, 0.5), new=TRUE) + mtext("Blocking", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.diff.sd.freq.colors[p,1+l,4])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.85, 0.98), new=TRUE) + mtext("NAO-", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.diff.sd.freq.colors[p,1+l,3])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.42, 0.5), new=TRUE) + mtext("Atlantic Ridge", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.diff.sd.freq.colors[p,1+l,2])}} + + par(fig=c(0.91, 1, 0.1, 0.9), new=TRUE) + ColorBar2(brks = my.seq, cols = diff.sd.freq.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_ratio_sd_freq_1table.png"), width=800, height=300) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("St.Dev. ratio between sim. and obs. average frequencies", cex=1.2) + + par(mar=c(0,0,4,0), fig=c(0.07, 0.22, 0.8, 0.98), new=TRUE) + mtext("NAO+", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0, 0.22, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.diff.sd.freq.colors[i2,1+6-j,1])}} + + par(mar=c(0,0,4,0), fig=c(0.22, 0.44, 0.8, 0.98), new=TRUE) + mtext("NAO-", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.22, 0.44, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.diff.sd.freq.colors[i2,1+6-j,3])}} + + par(mar=c(0,0,4,0), fig=c(0.44, 0.66, 0.8, 0.98), new=TRUE) + mtext("Blocking", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.44, 0.66, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.diff.sd.freq.colors[i2,1+6-j,4])}} + + par(mar=c(0,0,4,0), fig=c(0.68, 0.88, 0.8, 0.98), new=TRUE) + mtext("Atlantic Ridge", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.66, 0.88, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.diff.sd.freq.colors[i2,1+6-j,2])}} + + par(fig=c(0.91, 1, 0.1, 0.8), new=TRUE) + ColorBar2(brks = my.seq, cols = diff.sd.freq.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + + + + + + + + + + + + + # Impact summary: + png(filename=paste0(work.dir,"/",forecast.name,"_summary_impact_",var.name[var.num],".png"), width=550, height=400) + + # create an array similar to array.pers but with colors instead of frequencies: + imp.cols <- rev(c('#b2182b','#d6604d','#f4a582','#fddbc7','#d1e5f0','#92c5de','#4393c3','#2166ac')) + my.seq <- c(-1,0,0.4,0.5,0.6,0.7,0.8,0.9,1) + + array.imp.colors <- array(imp.cols[8],c(12,7,4)) + array.imp.colors[array.imp < my.seq[8]] <- imp.cols[7] + array.imp.colors[array.imp < my.seq[7]] <- imp.cols[6] + array.imp.colors[array.imp < my.seq[6]] <- imp.cols[5] + array.imp.colors[array.imp < my.seq[5]] <- imp.cols[4] + array.imp.colors[array.imp < my.seq[4]] <- imp.cols[3] + array.imp.colors[array.imp < my.seq[3]] <- imp.cols[2] + array.imp.colors[array.imp < my.seq[2]] <- imp.cols[1] + + par(mar=c(5,4,4,4.5)) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Forecast time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + title(paste0("S4 spatial correlation of impact on ",var.name[var.num]), cex=1.5) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + + for(p in 1:12){ + for(l in 0:6){ + polygon(c(0.5+p-1,0.5+p-1,1+p-1),c(-0.5+l,0.5+l,0+l), col=array.imp.colors[p,1+l,1]) # first triangle + polygon(c(0.5+p-1,1+p-1,1.5+p-1),c(-0.5+l,0+l,-0.5+l), col=array.imp.colors[p,1+l,2]) + polygon(c(1+p-1,1.5+p-1,1.5+p-1),c(l,0.5+l,-0.5+l), col=array.imp.colors[p,1+l,3]) + polygon(c(0.5+p-1,1+p-1,1.5+p-1),c(0.5+l,0+l,0.5+l), col=array.imp.colors[p,1+l,4]) + } + } + + par(fig=c(0.875, 1, 0.14, 0.89), new=TRUE) + ColorBar2(brks = my.seq, cols = imp.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_impact_",var.name[var.num],"_4tables.png"), width=750, height=600) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("S4 spatial correlation of impact on ",var.name[var.num], cex=1.5) + + # NAO+ : + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.85, 0.98), new=TRUE) + mtext("NAO+", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.imp.colors[p,1+l,1])}} + + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.42, 0.5), new=TRUE) + mtext("Blocking", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.imp.colors[p,1+l,4])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.85, 0.98), new=TRUE) + mtext("NAO-", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.imp.colors[p,1+l,3])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.42, 0.5), new=TRUE) + mtext("Atlantic Ridge", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.imp.colors[p,1+l,2])}} + + par(fig=c(0.91, 1, 0.1, 0.9), new=TRUE) + ColorBar2(brks = my.seq, cols = imp.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_impact_",var.name[var.num],"_1table.png"), width=800, height=300) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("S4 spatial correlation of impact on ",var.name[var.num], cex=1.2) + + par(mar=c(0,0,4,0), fig=c(0.07, 0.22, 0.8, 0.98), new=TRUE) + mtext("NAO+", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0, 0.22, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.imp.colors[i2,1+6-j,1])}} + + par(mar=c(0,0,4,0), fig=c(0.22, 0.44, 0.8, 0.98), new=TRUE) + mtext("NAO-", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.22, 0.44, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.imp.colors[i2,1+6-j,3])}} + + par(mar=c(0,0,4,0), fig=c(0.44, 0.66, 0.8, 0.98), new=TRUE) + mtext("Blocking", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.44, 0.66, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.imp.colors[i2,1+6-j,4])}} + + par(mar=c(0,0,4,0), fig=c(0.68, 0.88, 0.8, 0.98), new=TRUE) + mtext("Atlantic Ridge", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.66, 0.88, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.imp.colors[i2,1+6-j,2])}} + + par(fig=c(0.91, 1, 0.1, 0.8), new=TRUE) + ColorBar2(brks = my.seq, cols = imp.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + + + + + + + # Transition probability from NAO+ to X summary: + png(filename=paste0(work.dir,"/",forecast.name,"_summary_trans_NAO+.png"), width=550, height=400) + + # create an array similar to array.cor but with colors instead of corr.values: + trans.cols <- rev(c('#b2182b','#d6604d','#f4a582','#fddbc7','#d1e5f0','#92c5de','#4393c3','#2166ac')) + my.seq <- c(0,10,20,30,40,50,60,70,100) + + array.trans.sim1.colors <- array(trans.cols[8],c(12,7,4)) + array.trans.sim1.colors[array.trans.sim1 < my.seq[8]] <- trans.cols[7] + array.trans.sim1.colors[array.trans.sim1 < my.seq[7]] <- trans.cols[6] + array.trans.sim1.colors[array.trans.sim1 < my.seq[6]] <- trans.cols[5] + array.trans.sim1.colors[array.trans.sim1 < my.seq[5]] <- trans.cols[4] + array.trans.sim1.colors[array.trans.sim1 < my.seq[4]] <- trans.cols[3] + array.trans.sim1.colors[array.trans.sim1 < my.seq[3]] <- trans.cols[2] + array.trans.sim1.colors[array.trans.sim1 < my.seq[2]] <- trans.cols[1] + array.trans.sim1.colors[is.na(array.trans.sim1)] <- "white" + + par(mar=c(5,4,4,4.5)) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Forecast time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + title("% Transition from NAO+ regime", cex=1.5) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + + for(p in 1:12){ + for(l in 0:6){ + polygon(c(0.5+p-1, 0.5+p-1, 1+p-1), c(-0.5+l, 0.5+l, 0+l), col=array.trans.sim1.colors[p,1+l,1]) # first triangle + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(-0.5+l, 0+l, -0.5+l), col=array.trans.sim1.colors[p,1+l,2]) + polygon(c(1+p-1, 1.5+p-1, 1.5+p-1), c(l, 0.5+l, -0.5+l), col=array.trans.sim1.colors[p,1+l,3]) + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(0.5+l, 0+l, 0.5+l), col=array.trans.sim1.colors[p,1+l,4]) + } + } + + par(fig=c(0.875, 1, 0.14, 0.89), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_trans_NAO+_4tables.png"), width=750, height=600) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("% Transition from NAO+ regime", cex=1.5) + + # NAO+ : + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.85, 0.98), new=TRUE) + mtext("NAO+", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.sim1.colors[p,1+l,1])}} + + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.42, 0.5), new=TRUE) + mtext("Blocking", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.sim1.colors[p,1+l,4])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.85, 0.98), new=TRUE) + mtext("NAO-", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.sim1.colors[p,1+l,3])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.42, 0.5), new=TRUE) + mtext("Atlantic Ridge", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.sim1.colors[p,1+l,2])}} + + par(fig=c(0.91, 1, 0.1, 0.9), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_trans_NAO+_1table.png"), width=800, height=300) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("% Transition from NAO+ regime", cex=1.2) + + par(mar=c(0,0,4,0), fig=c(0.07, 0.22, 0.8, 0.98), new=TRUE) + mtext("NAO+", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0, 0.22, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.sim1.colors[i2,1+6-j,1])}} + + par(mar=c(0,0,4,0), fig=c(0.22, 0.44, 0.8, 0.98), new=TRUE) + mtext("NAO-", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.22, 0.44, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.sim1.colors[i2,1+6-j,3])}} + + par(mar=c(0,0,4,0), fig=c(0.44, 0.66, 0.8, 0.98), new=TRUE) + mtext("Blocking", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.44, 0.66, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.sim1.colors[i2,1+6-j,4])}} + + par(mar=c(0,0,4,0), fig=c(0.68, 0.88, 0.8, 0.98), new=TRUE) + mtext("Atlantic Ridge", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.66, 0.88, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.sim1.colors[i2,1+6-j,2])}} + + par(fig=c(0.91, 1, 0.1, 0.8), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_trans_NAO+_1table.png"), width=600, height=300) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("% Transition from NAO+ regime", cex=1.2) + + par(mar=c(0,0,4,0), fig=c(0.10, 0.28, 0.8, 0.98), new=TRUE) + mtext("NAO-", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0, 0.28, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.sim1.colors[i2,1+6-j,3])}} + + par(mar=c(0,0,4,0), fig=c(0.28, 0.56, 0.8, 0.98), new=TRUE) + mtext("Blocking", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.28, 0.56, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.sim1.colors[i2,1+6-j,4])}} + + par(mar=c(0,0,4,0), fig=c(0.56, 0.84, 0.8, 0.98), new=TRUE) + mtext("Atlantic Ridge", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.56, 0.84, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.sim1.colors[i2,1+6-j,2])}} + + par(fig=c(0.88, 1, 0.1, 0.8), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + + + + + + + + # Transition probability from NAO- to X summary: + png(filename=paste0(work.dir,"/",forecast.name,"_summary_trans_NAO-.png"), width=550, height=400) + + # create an array similar to array.cor but with colors instead of corr.values: + trans.cols <- rev(c('#b2182b','#d6604d','#f4a582','#fddbc7','#d1e5f0','#92c5de','#4393c3','#2166ac')) + my.seq <- c(0,10,20,30,40,50,60,70,100) + + array.trans.sim2.colors <- array(trans.cols[8],c(12,7,4)) + array.trans.sim2.colors[array.trans.sim2 < my.seq[8]] <- trans.cols[7] + array.trans.sim2.colors[array.trans.sim2 < my.seq[7]] <- trans.cols[6] + array.trans.sim2.colors[array.trans.sim2 < my.seq[6]] <- trans.cols[5] + array.trans.sim2.colors[array.trans.sim2 < my.seq[5]] <- trans.cols[4] + array.trans.sim2.colors[array.trans.sim2 < my.seq[4]] <- trans.cols[3] + array.trans.sim2.colors[array.trans.sim2 < my.seq[3]] <- trans.cols[2] + array.trans.sim2.colors[array.trans.sim2 < my.seq[2]] <- trans.cols[1] + array.trans.sim2.colors[is.na(array.trans.sim2)] <- "white" + + par(mar=c(5,4,4,4.5)) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Forecast time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + title("% Transition from NAO- regime ", cex=1.5) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + + for(p in 1:12){ + for(l in 0:6){ + polygon(c(0.5+p-1, 0.5+p-1, 1+p-1), c(-0.5+l, 0.5+l, 0+l), col=array.trans.sim2.colors[p,1+l,1]) # first triangle + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(-0.5+l, 0+l, -0.5+l), col=array.trans.sim2.colors[p,1+l,2]) + polygon(c(1+p-1, 1.5+p-1, 1.5+p-1), c(l, 0.5+l, -0.5+l), col=array.trans.sim2.colors[p,1+l,3]) + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(0.5+l, 0+l, 0.5+l), col=array.trans.sim2.colors[p,1+l,4]) + } + } + + par(fig=c(0.875, 1, 0.14, 0.89), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_trans_NAO-_4tables.png"), width=750, height=600) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("% Transition from NAO- regime", cex=1.5) + + # NAO+ : + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.85, 0.98), new=TRUE) + mtext("NAO+", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.sim2.colors[p,1+l,1])}} + + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.42, 0.5), new=TRUE) + mtext("Blocking", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.sim2.colors[p,1+l,4])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.85, 0.98), new=TRUE) + mtext("NAO-", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.sim2.colors[p,1+l,3])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.42, 0.5), new=TRUE) + mtext("Atlantic Ridge", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.sim2.colors[p,1+l,2])}} + + par(fig=c(0.91, 1, 0.1, 0.9), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_trans_NAO-_1table.png"), width=600, height=300) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("% Transition from NAO- regime", cex=1.2) + + par(mar=c(0,0,4,0), fig=c(0.1, 0.28, 0.8, 0.98), new=TRUE) + mtext("NAO+", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0, 0.28, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.sim2.colors[i2,1+6-j,1])}} + + par(mar=c(0,0,4,0), fig=c(0.28, 0.56, 0.8, 0.98), new=TRUE) + mtext("Blocking", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.28, 0.56, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.sim2.colors[i2,1+6-j,4])}} + + par(mar=c(0,0,4,0), fig=c(0.56, 0.84, 0.8, 0.98), new=TRUE) + mtext("Atlantic Ridge", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.56, 0.84, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.sim2.colors[i2,1+6-j,2])}} + + par(fig=c(0.88, 1, 0.1, 0.8), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + + + + + + + + # Transition probability from blocking to X summary: + png(filename=paste0(work.dir,"/",forecast.name,"_summary_trans_blocking.png"), width=550, height=400) + + # create an array similar to array.cor but with colors instead of corr.values: + trans.cols <- rev(c('#b2182b','#d6604d','#f4a582','#fddbc7','#d1e5f0','#92c5de','#4393c3','#2166ac')) + my.seq <- c(0,10,20,30,40,50,60,70,100) + + array.trans.sim3.colors <- array(trans.cols[8],c(12,7,4)) + array.trans.sim3.colors[array.trans.sim3 < my.seq[8]] <- trans.cols[7] + array.trans.sim3.colors[array.trans.sim3 < my.seq[7]] <- trans.cols[6] + array.trans.sim3.colors[array.trans.sim3 < my.seq[6]] <- trans.cols[5] + array.trans.sim3.colors[array.trans.sim3 < my.seq[5]] <- trans.cols[4] + array.trans.sim3.colors[array.trans.sim3 < my.seq[4]] <- trans.cols[3] + array.trans.sim3.colors[array.trans.sim3 < my.seq[3]] <- trans.cols[2] + array.trans.sim3.colors[array.trans.sim3 < my.seq[2]] <- trans.cols[1] + array.trans.sim3.colors[is.na(array.trans.sim3)] <- "white" + + par(mar=c(5,4,4,4.5)) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Forecast time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + title("% Transition from blocking regime", cex=1.5) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + + for(p in 1:12){ + for(l in 0:6){ + polygon(c(0.5+p-1, 0.5+p-1, 1+p-1), c(-0.5+l, 0.5+l, 0+l), col=array.trans.sim3.colors[p,1+l,1]) # first triangle + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(-0.5+l, 0+l, -0.5+l), col=array.trans.sim3.colors[p,1+l,2]) + polygon(c(1+p-1, 1.5+p-1, 1.5+p-1), c(l, 0.5+l, -0.5+l), col=array.trans.sim3.colors[p,1+l,3]) + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(0.5+l, 0+l, 0.5+l), col=array.trans.sim3.colors[p,1+l,4]) + } + } + + par(fig=c(0.875, 1, 0.14, 0.89), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_trans_blocking_4tables.png"), width=750, height=600) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("% Transition from blocking regime", cex=1.5) + + # NAO+ : + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.85, 0.98), new=TRUE) + mtext("NAO+", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.sim3.colors[p,1+l,1])}} + + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.42, 0.5), new=TRUE) + mtext("Blocking", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.sim3.colors[p,1+l,4])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.85, 0.98), new=TRUE) + mtext("NAO-", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.sim3.colors[p,1+l,3])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.42, 0.5), new=TRUE) + mtext("Atlantic Ridge", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.sim3.colors[p,1+l,2])}} + + par(fig=c(0.91, 1, 0.1, 0.9), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_trans_blocking_1table.png"), width=600, height=300) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("% Transition from blocking regime", cex=1.2) + + par(mar=c(0,0,4,0), fig=c(0.10, 0.28, 0.8, 0.98), new=TRUE) + mtext("NAO+", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0, 0.28, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.sim3.colors[i2,1+6-j,1])}} + + par(mar=c(0,0,4,0), fig=c(0.28, 0.56, 0.8, 0.98), new=TRUE) + mtext("NAO-", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.28, 0.56, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.sim3.colors[i2,1+6-j,3])}} + + par(mar=c(0,0,4,0), fig=c(0.56, 0.84, 0.8, 0.98), new=TRUE) + mtext("Atlantic Ridge", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.56, 0.84, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.sim3.colors[i2,1+6-j,2])}} + + par(fig=c(0.88, 1, 0.1, 0.8), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + + + + + + + + + + + # Transition probability from Atlantic ridge to X summary: + png(filename=paste0(work.dir,"/",forecast.name,"_summary_trans_Atlantic_ridge.png"), width=550, height=400) + + # create an array similar to array.cor but with colors instead of corr.values: + trans.cols <- rev(c('#b2182b','#d6604d','#f4a582','#fddbc7','#d1e5f0','#92c5de','#4393c3','#2166ac')) + my.seq <- c(0,10,20,30,40,50,60,70,100) + + array.trans.sim4.colors <- array(trans.cols[8],c(12,7,4)) + array.trans.sim4.colors[array.trans.sim4 < my.seq[8]] <- trans.cols[7] + array.trans.sim4.colors[array.trans.sim4 < my.seq[7]] <- trans.cols[6] + array.trans.sim4.colors[array.trans.sim4 < my.seq[6]] <- trans.cols[5] + array.trans.sim4.colors[array.trans.sim4 < my.seq[5]] <- trans.cols[4] + array.trans.sim4.colors[array.trans.sim4 < my.seq[4]] <- trans.cols[3] + array.trans.sim4.colors[array.trans.sim4 < my.seq[3]] <- trans.cols[2] + array.trans.sim4.colors[array.trans.sim4 < my.seq[2]] <- trans.cols[1] + array.trans.sim4.colors[is.na(array.trans.sim4)] <- "white" + + par(mar=c(5,4,4,4.5)) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Forecast time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + title("% Transition from Atlantic ridge regime ", cex=1.5) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + + for(p in 1:12){ + for(l in 0:6){ + polygon(c(0.5+p-1, 0.5+p-1, 1+p-1), c(-0.5+l, 0.5+l, 0+l), col=array.trans.sim4.colors[p,1+l,1]) # first triangle + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(-0.5+l, 0+l, -0.5+l), col=array.trans.sim4.colors[p,1+l,2]) + polygon(c(1+p-1, 1.5+p-1, 1.5+p-1), c(l, 0.5+l, -0.5+l), col=array.trans.sim4.colors[p,1+l,3]) + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(0.5+l, 0+l, 0.5+l), col=array.trans.sim4.colors[p,1+l,4]) + } + } + + par(fig=c(0.875, 1, 0.14, 0.89), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_trans_Atlantic_ridge_4tables.png"), width=750, height=600) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("% Transition from Atlantic Ridge regime", cex=1.5) + + # NAO+ : + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.85, 0.98), new=TRUE) + mtext("NAO+", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.sim4.colors[p,1+l,1])}} + + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.42, 0.5), new=TRUE) + mtext("Blocking", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.sim4.colors[p,1+l,4])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.85, 0.98), new=TRUE) + mtext("NAO-", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.sim4.colors[p,1+l,3])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.42, 0.5), new=TRUE) + mtext("Atlantic Ridge", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.sim4.colors[p,1+l,2])}} + + par(fig=c(0.91, 1, 0.1, 0.9), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_trans_Atlantic_ridge_1table.png"), width=600, height=300) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("% Transition from Atlantic Ridge regime", cex=1.2) + + par(mar=c(0,0,4,0), fig=c(0.1, 0.28, 0.8, 0.98), new=TRUE) + mtext("NAO+", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0, 0.28, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.sim4.colors[i2,1+6-j,1])}} + + par(mar=c(0,0,4,0), fig=c(0.28, 0.56, 0.8, 0.98), new=TRUE) + mtext("NAO-", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.28, 0.56, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.sim4.colors[i2,1+6-j,3])}} + + par(mar=c(0,0,4,0), fig=c(0.56, 0.84, 0.8, 0.98), new=TRUE) + mtext("Blocking", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.56, 0.84, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.sim4.colors[i2,1+6-j,4])}} + + par(fig=c(0.88, 1, 0.1, 0.8), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + + + + + + + + # Bias of the Transition probability from NAO+ to X summary: + png(filename=paste0(work.dir,"/",forecast.name,"_summary_bias_trans_NAO+.png"), width=550, height=400) + + # create an array similar to array.cor but with colors instead of corr.values: + trans.cols <- rev(c('#b2182b','#d6604d','#f4a582','#fddbc7','#d1e5f0','#92c5de','#4393c3','#2166ac')) + my.seq <- c(-20,-10,-5,-2,0,2,5,10,20) + + array.trans.diff1.colors <- array(trans.cols[8],c(12,7,4)) + array.trans.diff1.colors[array.trans.diff1 < my.seq[8]] <- trans.cols[7] + array.trans.diff1.colors[array.trans.diff1 < my.seq[7]] <- trans.cols[6] + array.trans.diff1.colors[array.trans.diff1 < my.seq[6]] <- trans.cols[5] + array.trans.diff1.colors[array.trans.diff1 < my.seq[5]] <- trans.cols[4] + array.trans.diff1.colors[array.trans.diff1 < my.seq[4]] <- trans.cols[3] + array.trans.diff1.colors[array.trans.diff1 < my.seq[3]] <- trans.cols[2] + array.trans.diff1.colors[array.trans.diff1 < my.seq[2]] <- trans.cols[1] + array.trans.diff1.colors[is.na(array.trans.diff1)] <- "white" + + par(mar=c(5,4,4,4.5)) + plot(1,1, type="n", xaxt="n", yaxt="n", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + title("% Bias transition from NAO+ regime", cex=1.5) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + + for(p in 1:12){ + for(l in 0:6){ + polygon(c(0.5+p-1, 0.5+p-1, 1+p-1), c(-0.5+l, 0.5+l, 0+l), col=array.trans.diff1.colors[p,1+l,1]) # first triangle + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(-0.5+l, 0+l, -0.5+l), col=array.trans.diff1.colors[p,1+l,2]) + polygon(c(1+p-1, 1.5+p-1, 1.5+p-1), c(l, 0.5+l, -0.5+l), col=array.trans.diff1.colors[p,1+l,3]) + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(0.5+l, 0+l, 0.5+l), col=array.trans.diff1.colors[p,1+l,4]) + } + } + + par(fig=c(0.875, 1, 0.14, 0.89), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_bias_trans_NAO+_4tables.png"), width=750, height=600) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("% Bias transition from NAO+ regime", cex=1.5) + + # NAO+ : + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.85, 0.98), new=TRUE) + mtext("NAO+", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.diff1.colors[p,1+l,1])}} + + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.42, 0.5), new=TRUE) + mtext("Blocking", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.diff1.colors[p,1+l,4])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.85, 0.98), new=TRUE) + mtext("NAO-", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.diff1.colors[p,1+l,3])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.42, 0.5), new=TRUE) + mtext("Atlantic Ridge", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.diff1.colors[p,1+l,2])}} + + par(fig=c(0.91, 1, 0.1, 0.9), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_bias_trans_NAO+_1table.png"), width=800, height=300) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("% Bias transition from NAO+ regime", cex=1.2) + + par(mar=c(0,0,4,0), fig=c(0.07, 0.22, 0.8, 0.98), new=TRUE) + mtext("NAO+", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0, 0.22, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.diff1.colors[i2,1+6-j,1])}} + + par(mar=c(0,0,4,0), fig=c(0.22, 0.44, 0.8, 0.98), new=TRUE) + mtext("NAO-", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.22, 0.44, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.diff1.colors[i2,1+6-j,3])}} + + par(mar=c(0,0,4,0), fig=c(0.44, 0.66, 0.8, 0.98), new=TRUE) + mtext("Blocking", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.44, 0.66, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.diff1.colors[i2,1+6-j,4])}} + + par(mar=c(0,0,4,0), fig=c(0.68, 0.88, 0.8, 0.98), new=TRUE) + mtext("Atlantic Ridge", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.66, 0.88, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.diff1.colors[i2,1+6-j,2])}} + + par(fig=c(0.91, 1, 0.1, 0.8), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + + + + + + + + + + + + + + + # Bias of the Transition probability from NAO- to X summary: + png(filename=paste0(work.dir,"/",forecast.name,"_summary_bias_trans_NAO-.png"), width=550, height=400) + + # create an array similar to array.cor but with colors instead of corr.values: + trans.cols <- rev(c('#b2182b','#d6604d','#f4a582','#fddbc7','#d1e5f0','#92c5de','#4393c3','#2166ac')) + my.seq <- c(-20,-10,-5,-2,0,2,5,10,20) + + array.trans.diff2.colors <- array(trans.cols[8],c(12,7,4)) + array.trans.diff2.colors[array.trans.diff2 < my.seq[8]] <- trans.cols[7] + array.trans.diff2.colors[array.trans.diff2 < my.seq[7]] <- trans.cols[6] + array.trans.diff2.colors[array.trans.diff2 < my.seq[6]] <- trans.cols[5] + array.trans.diff2.colors[array.trans.diff2 < my.seq[5]] <- trans.cols[4] + array.trans.diff2.colors[array.trans.diff2 < my.seq[4]] <- trans.cols[3] + array.trans.diff2.colors[array.trans.diff2 < my.seq[3]] <- trans.cols[2] + array.trans.diff2.colors[array.trans.diff2 < my.seq[2]] <- trans.cols[1] + array.trans.diff2.colors[is.na(array.trans.diff2)] <- "white" + + par(mar=c(5,4,4,4.5)) + plot(1,1, type="n", xaxt="n", yaxt="n", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + title("% Bias transition from NAO- regime", cex=1.5) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + + for(p in 1:12){ + for(l in 0:6){ + polygon(c(0.5+p-1, 0.5+p-1, 1+p-1), c(-0.5+l, 0.5+l, 0+l), col=array.trans.diff2.colors[p,1+l,1]) # first triangle + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(-0.5+l, 0+l, -0.5+l), col=array.trans.diff2.colors[p,1+l,2]) + polygon(c(1+p-1, 1.5+p-1, 1.5+p-1), c(l, 0.5+l, -0.5+l), col=array.trans.diff2.colors[p,1+l,3]) + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(0.5+l, 0+l, 0.5+l), col=array.trans.diff2.colors[p,1+l,4]) + } + } + + par(fig=c(0.875, 1, 0.14, 0.89), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_bias_trans_NAO-_4tables.png"), width=750, height=600) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("% Bias transition from NAO- regime", cex=1.5) + + # NAO+ : + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.85, 0.98), new=TRUE) + mtext("NAO+", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.diff2.colors[p,1+l,1])}} + + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.42, 0.5), new=TRUE) + mtext("Blocking", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.diff2.colors[p,1+l,4])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.85, 0.98), new=TRUE) + mtext("NAO-", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.diff2.colors[p,1+l,3])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.42, 0.5), new=TRUE) + mtext("Atlantic Ridge", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.diff2.colors[p,1+l,2])}} + + par(fig=c(0.91, 1, 0.1, 0.9), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_bias_trans_NAO-_1table.png"), width=800, height=300) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("% Bias transition from NAO- regime", cex=1.2) + + par(mar=c(0,0,4,0), fig=c(0.07, 0.22, 0.8, 0.98), new=TRUE) + mtext("NAO+", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0, 0.22, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.diff2.colors[i2,1+6-j,1])}} + + par(mar=c(0,0,4,0), fig=c(0.22, 0.44, 0.8, 0.98), new=TRUE) + mtext("NAO-", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.22, 0.44, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.diff2.colors[i2,1+6-j,3])}} + + par(mar=c(0,0,4,0), fig=c(0.44, 0.66, 0.8, 0.98), new=TRUE) + mtext("Blocking", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.44, 0.66, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.diff2.colors[i2,1+6-j,4])}} + + par(mar=c(0,0,4,0), fig=c(0.68, 0.88, 0.8, 0.98), new=TRUE) + mtext("Atlantic Ridge", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.66, 0.88, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.diff2.colors[i2,1+6-j,2])}} + + par(fig=c(0.91, 1, 0.1, 0.8), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + + + + + + + + + + + # Bias of the Transition probability from blocking to X summary: + png(filename=paste0(work.dir,"/",forecast.name,"_summary_bias_trans_blocking.png"), width=550, height=400) + + # create an array similar to array.cor but with colors instead of corr.values: + trans.cols <- rev(c('#b2182b','#d6604d','#f4a582','#fddbc7','#d1e5f0','#92c5de','#4393c3','#2166ac')) + my.seq <- c(-20,-10,-5,-2,0,2,5,10,20) + + array.trans.diff3.colors <- array(trans.cols[8],c(12,7,4)) + array.trans.diff3.colors[array.trans.diff3 < my.seq[8]] <- trans.cols[7] + array.trans.diff3.colors[array.trans.diff3 < my.seq[7]] <- trans.cols[6] + array.trans.diff3.colors[array.trans.diff3 < my.seq[6]] <- trans.cols[5] + array.trans.diff3.colors[array.trans.diff3 < my.seq[5]] <- trans.cols[4] + array.trans.diff3.colors[array.trans.diff3 < my.seq[4]] <- trans.cols[3] + array.trans.diff3.colors[array.trans.diff3 < my.seq[3]] <- trans.cols[2] + array.trans.diff3.colors[array.trans.diff3 < my.seq[2]] <- trans.cols[1] + array.trans.diff3.colors[is.na(array.trans.diff3)] <- "white" + + par(mar=c(5,4,4,4.5)) + plot(1,1, type="n", xaxt="n", yaxt="n", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + title("% Bias transition from blocking regime", cex=1.5) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + + for(p in 1:12){ + for(l in 0:6){ + polygon(c(0.5+p-1, 0.5+p-1, 1+p-1), c(-0.5+l, 0.5+l, 0+l), col=array.trans.diff3.colors[p,1+l,1]) # first triangle + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(-0.5+l, 0+l, -0.5+l), col=array.trans.diff3.colors[p,1+l,2]) + polygon(c(1+p-1, 1.5+p-1, 1.5+p-1), c(l, 0.5+l, -0.5+l), col=array.trans.diff3.colors[p,1+l,3]) + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(0.5+l, 0+l, 0.5+l), col=array.trans.diff3.colors[p,1+l,4]) + } + } + + par(fig=c(0.875, 1, 0.14, 0.89), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_bias_trans_blocking_4tables.png"), width=750, height=600) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("% Bias transition from blocking regime", cex=1.5) + + # NAO+ : + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.85, 0.98), new=TRUE) + mtext("NAO+", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.diff3.colors[p,1+l,1])}} + + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.42, 0.5), new=TRUE) + mtext("Blocking", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.diff3.colors[p,1+l,4])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.85, 0.98), new=TRUE) + mtext("NAO-", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.diff3.colors[p,1+l,3])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.42, 0.5), new=TRUE) + mtext("Atlantic Ridge", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.diff3.colors[p,1+l,2])}} + + par(fig=c(0.91, 1, 0.1, 0.9), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_bias_trans_blocking_1table.png"), width=800, height=300) + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("% Bias transition from Blocking regime", cex=1.2) + + par(mar=c(0,0,4,0), fig=c(0.07, 0.22, 0.8, 0.98), new=TRUE) + mtext("NAO+", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0, 0.22, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.diff3.colors[i2,1+6-j,1])}} + + par(mar=c(0,0,4,0), fig=c(0.22, 0.44, 0.8, 0.98), new=TRUE) + mtext("NAO-", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.22, 0.44, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.diff3.colors[i2,1+6-j,3])}} + + par(mar=c(0,0,4,0), fig=c(0.44, 0.66, 0.8, 0.98), new=TRUE) + mtext("Blocking", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.44, 0.66, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.diff3.colors[i2,1+6-j,4])}} + + par(mar=c(0,0,4,0), fig=c(0.68, 0.88, 0.8, 0.98), new=TRUE) + mtext("Atlantic Ridge", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.66, 0.88, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.diff3.colors[i2,1+6-j,2])}} + + par(fig=c(0.91, 1, 0.1, 0.8), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + + + + + + + + + + # Bias of the Transition probability from Atlantic Ridge to X summary: + png(filename=paste0(work.dir,"/",forecast.name,"_summary_bias_trans_Atlantic_ridge.png"), width=550, height=400) + + # create an array similar to array.cor but with colors instead of corr.values: + trans.cols <- rev(c('#b2182b','#d6604d','#f4a582','#fddbc7','#d1e5f0','#92c5de','#4393c3','#2166ac')) + my.seq <- c(-20,-10,-5,-2,0,2,5,10,20) + + array.trans.diff4.colors <- array(trans.cols[8],c(12,7,4)) + array.trans.diff4.colors[array.trans.diff4 < my.seq[8]] <- trans.cols[7] + array.trans.diff4.colors[array.trans.diff4 < my.seq[7]] <- trans.cols[6] + array.trans.diff4.colors[array.trans.diff4 < my.seq[6]] <- trans.cols[5] + array.trans.diff4.colors[array.trans.diff4 < my.seq[5]] <- trans.cols[4] + array.trans.diff4.colors[array.trans.diff4 < my.seq[4]] <- trans.cols[3] + array.trans.diff4.colors[array.trans.diff4 < my.seq[3]] <- trans.cols[2] + array.trans.diff4.colors[array.trans.diff4 < my.seq[2]] <- trans.cols[1] + array.trans.diff4.colors[is.na(array.trans.diff4)] <- "white" + + par(mar=c(5,4,4,4.5)) + plot(1,1, type="n", xaxt="n", yaxt="n", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + title("% Bias transition from Atlantic Ridge regime", cex=1.5) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + + for(p in 1:12){ + for(l in 0:6){ + polygon(c(0.5+p-1, 0.5+p-1, 1+p-1), c(-0.5+l, 0.5+l, 0+l), col=array.trans.diff4.colors[p,1+l,1]) # first triangle + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(-0.5+l, 0+l, -0.5+l), col=array.trans.diff4.colors[p,1+l,2]) + polygon(c(1+p-1, 1.5+p-1, 1.5+p-1), c(l, 0.5+l, -0.5+l), col=array.trans.diff4.colors[p,1+l,3]) + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(0.5+l, 0+l, 0.5+l), col=array.trans.diff4.colors[p,1+l,4]) + } + } + + par(fig=c(0.875, 1, 0.14, 0.89), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_bias_trans_Atlantic_ridge_4tables.png"), width=750, height=600) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("% Bias transition from Atlantic_Ridge_regime", cex=1.5) + + # NAO+ : + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.85, 0.98), new=TRUE) + mtext("NAO+", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.diff4.colors[p,1+l,1])}} + + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.42, 0.5), new=TRUE) + mtext("Blocking", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.diff4.colors[p,1+l,4])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.85, 0.98), new=TRUE) + mtext("NAO-", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.diff4.colors[p,1+l,3])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.42, 0.5), new=TRUE) + mtext("Atlantic Ridge", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.diff4.colors[p,1+l,2])}} + + par(fig=c(0.91, 1, 0.1, 0.9), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_bias_trans_Atlantic_ridge_1table.png"), width=800, height=300) + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("% Bias transition from Atlantic Ridge regime", cex=1.2) + + par(mar=c(0,0,4,0), fig=c(0.07, 0.22, 0.8, 0.98), new=TRUE) + mtext("NAO+", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0, 0.22, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.diff4.colors[i2,1+6-j,1])}} + + par(mar=c(0,0,4,0), fig=c(0.22, 0.44, 0.8, 0.98), new=TRUE) + mtext("NAO-", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.22, 0.44, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.diff4.colors[i2,1+6-j,3])}} + + par(mar=c(0,0,4,0), fig=c(0.44, 0.66, 0.8, 0.98), new=TRUE) + mtext("Blocking", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.44, 0.66, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.diff4.colors[i2,1+6-j,4])}} + + par(mar=c(0,0,4,0), fig=c(0.68, 0.88, 0.8, 0.98), new=TRUE) + mtext("Atlantic Ridge", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.66, 0.88, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.diff4.colors[i2,1+6-j,2])}} + + par(fig=c(0.91, 1, 0.1, 0.8), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + +diagnostic.WR <- function(text=NULL, position=c(0,1,0,1), color.array){ + +} + + ## # FairRPSS summary: + ## png(filename=paste0(work.dir,"/",forecast.name,"_summary_rpss.png"), width=550, height=400) + + ## # create an array similar to array.diff.freq but with colors instead of frequencies: + ## freq.cols <- rev(c('#b2182b','#d6604d','#f4a582','#fddbc7','#d1e5f0','#92c5de','#4393c3','#2166ac')) + + ## array.freq.colors <- array(freq.cols[8],c(12,7,4)) + ## array.freq.colors[array.diff.freq < 10] <- freq.cols[7] + ## array.freq.colors[array.diff.freq < 5] <- freq.cols[6] + ## array.freq.colors[array.diff.freq < 2] <- freq.cols[5] + ## array.freq.colors[array.diff.freq < 0] <- freq.cols[4] + ## array.freq.colors[array.diff.freq < -2] <- freq.cols[3] + ## array.freq.colors[array.diff.freq < -5] <- freq.cols[2] + ## array.freq.colors[array.diff.freq < -10] <- freq.cols[1] + + ## par(mar=c(5,4,4,4.5)) + ## plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Forecast time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + ## title("Frequency difference (%) between S4 and ERA-Interim", cex=1.5) + ## mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + ## mtext(side = 2, text = "Forecast time", line = 2.5, cex=1.5) + ## axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + ## axis(2, at=seq(0,6), las=2, cex.axis=1.5) + + ## for(p in 1:12){ + ## for(l in 0:6){ + ## polygon(c(0.5+p-1,0.5+p-1,1+p-1),c(-0.5+l,0.5+l,0+l), col=array.freq.colors[p,1+l,1]) # first triangle + ## polygon(c(0.5+p-1,1+p-1,1.5+p-1),c(-0.5+l,0+l,-0.5+l), col=array.freq.colors[p,1+l,2]) + ## polygon(c(1+p-1,1.5+p-1,1.5+p-1),c(l,0.5+l,-0.5+l), col=array.freq.colors[p,1+l,3]) + ## polygon(c(0.5+p-1,1+p-1,1.5+p-1),c(0.5+l,0+l,0.5+l), col=array.freq.colors[p,1+l,4]) + ## } + ## } + + ## par(fig=c(0.875, 1, 0.14, 0.89), new=TRUE) + ## ColorBar2(brks = c(-20,-10,-5,-2,0,2,5,10,20), cols = freq.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(c(-20,-10,-5,-2,0,2,5,10,20)), my.labels=c(-20,-10,-5,-2,0,2,5,10,20)) + ## dev.off() + +} # close if on composition == "summary" + + + + +# impact map of the stronger WR: +if(composition == "impact.highest" && fields.name == rean.name){ + + var.num <- 2 # choose a variable (1:sfcWind, 2:tas) + index <- 1 # chose an index: 1: most influent, 2: most influent positively, 3: most influent negatively + + if ( index == 1 ) png(filename=paste0(rean.dir,"/",rean.name,"_",var.name.full[var.num],"_most_influent.png"), width=550, height=650) + if ( index == 2 ) png(filename=paste0(rean.dir,"/",rean.name,"_",var.name.full[var.num],"_most_influent_positively.png"), width=550, height=650) + if ( index == 3 ) png(filename=paste0(rean.dir,"/",rean.name,"_",var.name.full[var.num],"_most_influent_negatively.png"), width=550, height=650) + + plot.new() + #par(mfrow=c(4,3)) + #col.regimes <- c('#7b3294','#c2a5cf','#a6dba0','#008837') + col.regimes <- c('#e66101','#fdb863','#b2abd2','#5e3c99') + + for(p in 13:16){ # or 1:12 for monthly maps + load(file=paste0(rean.dir,"/",rean.name,"_",my.period[p],"_",var.name[var.num],".RData")) + load(paste0(rean.dir,"/",rean.name,"_",my.period[p],"_ClusterNames.RData")) # they should not depend on var.name!!! + + # position of long values of Europe only (without the Atlantic Sea and America): + #if(fields.name == "ERA-Interim") EU <- c(1:which(lon >= lon.max)[1], (length(lon)-30):length(lon)) # restrict area to continental europe only + lon.max = 45 + EU <- c(1:which(lon >= lon.max)[1], (which(lon >= 337)[1]:length(lon))) # restrict area to continental europe only + + cluster1 <- which(orden == cluster1.name.period[p]) # in future it will be == cluster1.name + cluster2 <- which(orden == cluster2.name.period[p]) + cluster3 <- which(orden == cluster3.name.period[p]) + cluster4 <- which(orden == cluster4.name.period[p]) + + assign(paste0("imp",cluster1), varPeriodAnom1mean) + assign(paste0("imp",cluster2), varPeriodAnom2mean) + assign(paste0("imp",cluster3), varPeriodAnom3mean) + assign(paste0("imp",cluster4), varPeriodAnom4mean) + + #most influent WT: + if (index == 1) { + imp.all <- imp1 + for(i in 1:dim(imp1)[1]){ + for(j in 1:dim(imp1)[2]){ + if(abs(imp1[i,j]) >= max(abs(imp1[i,j]), abs(imp2[i,j]), abs(imp3[i,j]), abs(imp4[i,j]))) imp.all[i,j] <- 1.5 + if(abs(imp2[i,j]) >= max(abs(imp1[i,j]), abs(imp2[i,j]), abs(imp3[i,j]), abs(imp4[i,j]))) imp.all[i,j] <- 2.5 + if(abs(imp3[i,j]) >= max(abs(imp1[i,j]), abs(imp2[i,j]), abs(imp3[i,j]), abs(imp4[i,j]))) imp.all[i,j] <- 3.5 + if(abs(imp4[i,j]) >= max(abs(imp1[i,j]), abs(imp2[i,j]), abs(imp3[i,j]), abs(imp4[i,j]))) imp.all[i,j] <- 4.5 + } + } + } + + #most influent WT with positive impact: + if (index == 2) { + imp.all <- imp1 + for(i in 1:dim(imp1)[1]){ + for(j in 1:dim(imp1)[2]){ + if((imp1[i,j]) >= max((imp1[i,j]), (imp2[i,j]), (imp3[i,j]), (imp4[i,j]))) imp.all[i,j] <- 1.5 + if((imp2[i,j]) >= max((imp1[i,j]), (imp2[i,j]), (imp3[i,j]), (imp4[i,j]))) imp.all[i,j] <- 2.5 + if((imp3[i,j]) >= max((imp1[i,j]), (imp2[i,j]), (imp3[i,j]), (imp4[i,j]))) imp.all[i,j] <- 3.5 + if((imp4[i,j]) >= max((imp1[i,j]), (imp2[i,j]), (imp3[i,j]), (imp4[i,j]))) imp.all[i,j] <- 4.5 + } + } + + } + + # most influent WT with negative impact: + if (index == 3) { + imp.all <- imp1 + for(i in 1:dim(imp1)[1]){ + for(j in 1:dim(imp1)[2]){ + if((imp1[i,j]) <= min((imp1[i,j]), (imp2[i,j]), (imp3[i,j]), (imp4[i,j]))) imp.all[i,j] <- 1.5 + if((imp2[i,j]) <= min((imp1[i,j]), (imp2[i,j]), (imp3[i,j]), (imp4[i,j]))) imp.all[i,j] <- 2.5 + if((imp3[i,j]) <= min((imp1[i,j]), (imp2[i,j]), (imp3[i,j]), (imp4[i,j]))) imp.all[i,j] <- 3.5 + if((imp4[i,j]) <= min((imp1[i,j]), (imp2[i,j]), (imp3[i,j]), (imp4[i,j]))) imp.all[i,j] <- 4.5 + } + } + } + + if(p<=3) yMod <- 0.7 + if(p>=4 && p<=6) yMod <- 0.5 + if(p>=7 && p<=9) yMod <- 0.3 + if(p>=10) yMod <- 0.1 + if(p==13 || p==14) yMod <- 0.52 + if(p==15 || p==16) yMod <- 0.1 + + if(p==1 || p==4 || p==7 || p==10) xMod <- 0.05 + if(p==2 || p==5 || p==8 || p==11) xMod <- 0.35 + if(p==3 || p==6 || p==9 || p==12) xMod <- 0.65 + if(p==13 || p==15) xMod <- 0.05 + if(p==14 || p==16) xMod <- 0.50 + + # impact map: + if(p <= 12) par(fig=c(xMod, xMod + 0.3, yMod, yMod + 0.17), new=TRUE) + if(p > 12) par(fig=c(xMod, xMod + 0.45, yMod, yMod + 0.38), new=TRUE) + PlotEquiMap(imp.all[,EU], lon[EU], lat, filled.continents = FALSE, brks=1:5, cols=col.regimes, axelab=F, drawleg=F) + + # title map: + if(p <= 12) par(fig=c(xMod, xMod + 0.3, yMod + 0.162, yMod + 0.172), new=TRUE) + if(p > 12) par(fig=c(xMod + 0.07, xMod + 0.07 + 0.3, yMod +0.21 + 0.166, yMod +0.21 + 0.176), new=TRUE) + mtext(my.period[p], font=2, cex=1.5) + + } # close 'p' on 'period' + + par(fig=c(0.1,0.9, 0.03, 0.11), new=TRUE) + ColorBar2(1:5, cols=col.regimes, vert=FALSE, my.ticks=1:4, draw.ticks=FALSE, my.labels=orden) + + par(fig=c(0.1,0.9, 0.92, 0.97), new=TRUE) + if( index == 1 ) mtext(paste0("Most influent WR on ",var.name[var.num]), font=2, cex=1.5) + if( index == 2 ) mtext(paste0("Most positively influent WR on ",var.name[var.num]), font=2, cex=1.5) + if( index == 3 ) mtext(paste0("Most negatively influent WR on ",var.name[var.num]), font=2, cex=1.5) + + dev.off() + +} # close if on composition == "impact.highest" + diff --git a/old/weather_regimes_maps_v23.R b/old/weather_regimes_maps_v23.R new file mode 100644 index 0000000000000000000000000000000000000000..4d047ddf8d1af35c5fa78c1b59ec484e0df18f5a --- /dev/null +++ b/old/weather_regimes_maps_v23.R @@ -0,0 +1,4932 @@ + +# Creation: 6/2016 +# Author: Nicola Cortesi +# +# Aim: to visualize the regime anomalies of the weather regimes, their interannual frequencies and their impact on a chosen cliamte variable (i.e: wind speed or temperature) +# +# I/O: input file needed are: +# 1) the output of the weather_regimes.R script, which ends with the suffix "_psl.RData". +# 2) if you need the impact map too, the outputs of the weather_regimes_impact.R script, which end wuth the suffix "_sfcWind.RData" and "_tas.RData" +# +# Output files are the maps, witch the user can generate as .png or as .pdf +# Due to the script's length, it is better to run it from the terminal with: Rscript ~/scripts/weather_regimes_maps.R +# This script doesn't need to be parallelized because it takes only a few seconds to create and save the output maps. +# +# Assumptions: If your regimes derive from a reanalysis, this script must be run twice: once, to create a .pdf file (see below) +# that the user must open to find visually the order by which the four regimes are stored inside the four clusters. For example, he can find that the +# sequence of the images in the file (from top to down) corresponds to: Blocking / NAO- / Atl.Ridge / NAO+ regime. Subsequently, he has to change 'ordering' +# from F to T (see below) and set the four variables 'clusterX.name' (X = 1...4) to follow the same order found inside that file. +# The second time, you have to run this script only to get the maps in the right order, which by default is, from top to down: +# "NAO+","NAO-","Blocking" and "Atl.Ridge" (you can change it with the variable 'orden'). You also have to set 'save.names' to TRUE, so an .RData file +# is written to store the order of the four regimes, in case you need to visualize these maps again in future or create new ones based on the saved data. +# +# If your regimes derive from a forecast system, you have to compute before the regimes derived from a reanalysis. Then, you can associate the reanalysis +# to the forecast system, to employ it to automatically aussociate to each simulated cluster one of the +# observed regimes, the one with the highest spatial correlation. Beware that the two maps of regime anomalies must have the same spatial resolution! +# +# Branch: weather_regimes + +rm(list=ls()) +library(s2dverification) # for the function Load() +library(Kendall) # for the MannKendall test +source('/home/Earth/ncortesi/scripts/Rfunctions.R') # for the calendar functions + +### set the directory where the weather regimes computed with a reanalysis are stored (xxxxx_psl.RData files): + +#rean.dir <- "/scratch/Earth/ncortesi/RESILIENCE/Regimes/18) as 12) but with no lat correction" +#rean.dir <- "/scratch/Earth/ncortesi/RESILIENCE/Regimes/18_as_12_but_with_no_lat_correction" +#rean.dir <- "/scratch/Earth/ncortesi/RESILIENCE/Regimes/41_ERA-Interim_monthly_1981-2015_LOESS_filter" +#rean.dir <- "/scratch/Earth/ncortesi/RESILIENCE/Regimes/43_as_41_but_with_running_cluster_and_lat_correction" +rean.dir <- "/scratch/Earth/ncortesi/RESILIENCE/Regimes/44_as_43_but_with_no_lat_correction" +#rean.dir <- "/scratch/Earth/ncortesi/RESILIENCE/Regimes/45_as_43_but_with_Z500" + +rean.name <- "ERA-Interim" # reanalysis name (if input data comes from a reanalysis) +forecast.name <- "ECMWF-S4" # forecast system name (if input data comes from a forecast system) + +# Choose between loading reanalysis ('rean.name') or forecasts ('forecast.name'): +fields.name <- forecast.name + +# Choose one or more periods to plot from 1 to 17 (1-12: Jan - Dec, 13-16: Winter, Spring, Summer, Autumn, 17: Year). +period <- 1:12 # You need to have created before the '_psl.RData' file with the output of 'weather_regimes_vXX'.R for that period. + +# run it twice: once, with: 'ordering <- FALSE', 'save.names <- TRUE', 'as.pdf <- TRUE' and 'composition <- "simple" ' +# to look at the cartography and find visually the right regime names of the four clusters; then, insert the regime names in the correct order below and run this script +# a second time one month/season at time with: 'ordering = TRUE', 'save.names = TRUE', 'as.pdf = FALSE' and 'composition = "simple" ' +# to save the ordered cartography (then, you can check the correct regime sequence in the .png maps). After that, any time you run this script, remember to set: +# 'ordering = TRUE , 'save.names = FALSE' (and any type of composition you want to plot) not to overwrite the files which stores the right cluster order. + +ordering <- T # If TRUE, order the clusters following the regimes listed in the 'orden' vector (set it to TRUE only after you manually inserted the names below). +save.names <- F # if TRUE, also save the cluster names (i.e: the association between clusters and weather regimes); if FALSE, the cluster names are loaded from a file +as.pdf <- F # FALSE: save results as one .png file for each season/month, TRUE: save only one .pdf file with all seasons/months + +composition <- "summary" # choose which kind of composition you want to plot: + # - 'simple' for monthly graphs of regime anomalies / impact / interannual frequencies in three columns + # - 'psl' for all the regime anomalies for a fixed forecast month + # - 'fre' for all the interannual frequencies for a fixed forecast month + # - 'impact' for all the impact maps for a fixed forecast month and the variable specified in var.num + # - 'summary' for the summary plots of all forecast months (persistence bias, freq.bias, correlations, etc.) [You need all ClusterNames files] + # - 'impact.highest' for all the impact plot of the regime with the highest impact + # - 'single.impact' to save the individual impact maps + # - 'single.psl' to save the individual psl map + # - 'single.fre' to save the individual fre map + # - 'none' doesn't plot anything, it only saves the ClusterName.Rdata files + # - 'taylor' plot the taylor's diagram between the regime anomalies of a monthly reanalaysis and a seasonal one + +var.num <- 1 # if fields.name ='rean.name' and composition ='simple', Choose a variable for the impact maps 1: sfcWind 2: tas + +mean.freq <- FALSE # if TRUE, when composition <- "simple", it also add the mean frequency value over each frequency plots + +# Associates the regimes to the four cluster: +cluster3.name <- "NAO+" +cluster2.name <- "NAO-" +cluster1.name <- "Blocking" +cluster4.name <- "Atl.Ridge" + +# choose an order to give to the cartograpy, from top to bottom: +orden <- c("NAO+","NAO-","Blocking","Atl.Ridge") + +####### if fields.name='forecast.name', you have to specify these additional variables: + +# working dir with the input files with '_psl.RData' suffix: +work.dir <- "/scratch/Earth/ncortesi/RESILIENCE/Regimes/42_ECMWF_S4_monthly_1981-2015_LOESS_filter" + +lead.months <- 0:6 # in case you selected 'fields.name = forecast.name', specify a leadtime (0 = same month of the start month) to plot + # (and variable 'period' becomes the start month) +forecast.month <- 1 # in case composition = 'psl' or 'fre' or 'impact', choose a target month to forecast, from 1 to 12, to plot its composition + # if you want to plot all compositions from 1 to 12 at once, just enable the line below and add a { at the end of the script + # DO NOT run the loop below when composition="summary" or "taylor", because it will modify the data in the ClusterName.RData files!!! +for(forecast.month in 1:12){ +####### Derived variables ############################################################################################################################################### + +if(fields.name != rean.name && fields.name != forecast.name) stop("variable 'fields.name' is not properly set") +regime1.name <- orden[1] +regime2.name <- orden[2] +regime3.name <- orden[3] +regime4.name <- orden[4] + +var.name <- c("sfcWind","tas") +var.name.full <- c("Wind Speed","Temperature") # full name of the 'predictand' variable to put in the title of the graphs +var.unit <- c("m/s", "ºC") # unit of measure of var (for drawing color scales) + +var.num.orig <- var.num # loading _psl files, this value can change! +fields.name.orig <- fields.name +period.orig <- period +work.dir.orig <- work.dir +pdf.suffix <- ifelse(length(period)==1, my.period[period], "all_periods") + +n.map <- 0 +################################################################################################################################################################### + +if(composition != "summary" && composition != "impact.highest" && composition != "taylor"){ + + if(composition == "psl" || composition == "fre" || composition == "impact") { + if(as.pdf && fields.name == forecast.name) pdf(file=paste0(work.dir,"/",fields.name,"_",pdf.suffix,"_forecast_month_",forecast.month,"_",composition,".pdf"),width=110,height=60) + if(!as.pdf && fields.name == forecast.name) png(filename=paste0(work.dir,"/",fields.name,"_forecast_month_",forecast.month,"_",composition,".png"),width=6000,height=2300) + + lead.months <- c(6:0,0) + plot.new() + + # Sheet title: + par(fig=c(0, 1, 0.94, 0.98), new=TRUE) + if(composition == "psl") mtext(paste0(fields.name, " simulated Weather Regimes for ", month.name[forecast.month]), cex=5.5, font=2) + if(composition == "fre") mtext(paste0(fields.name, " simulated interannual frequencies for ", month.name[forecast.month]), cex=5.5, font=2) + if(composition == "impact") mtext(paste0(fields.name, " simulated ",var.name.full[var.num], " impact for ", month.name[forecast.month]), cex=5.5, font=2) + + } + + if(fields.name == rean.name) lead.months <- 1 # just to be able to enter the loop below once! + + for(lead.month in lead.months){ + #lead.month=lead.months[1] # for the debug + + if(composition == "simple"){ + if(as.pdf && fields.name == rean.name) pdf(file=paste0(rean.dir,"/",fields.name,"_",var.name[var.num],"_",pdf.suffix,".pdf"),width=40, height=60) + if(as.pdf && fields.name == forecast.name) pdf(file=paste0(work.dir,"/",fields.name,"_",var.name[var.num],"_",pdf.suffix,"_leadmonth_",lead.month,".pdf"),width=40,height=60) + } + + if(composition == 'psl' || composition == 'fre' || composition == 'impact') { + period <- forecast.month - lead.month + if(period < 1) period <- period + 12 + } + + impact.data <- FALSE + for(p in period){ + #p <- period[1] # for the debug + p.orig <- p + + if(fields.name == rean.name) print(paste("p =",p)) + if(fields.name == forecast.name) print(paste("p =",p,"lead.month =", lead.month)) + + # load regime data (for forecasts, it load only the data corresponding to the chosen start month p and lead month 'lead.month') + if(fields.name == rean.name || (fields.name == forecast.name && n.map == 7)) { + load(file=paste0(rean.dir,"/",rean.name,"_",my.period[p],"_","psl",".RData")) + if(var.num != var.num.orig) var.num <- var.num.orig # ripristinate original values of this script (necessary after loading regime data) + if(fields.name != fields.name.orig) fields.name <- fields.name.orig # ripristinate original values of this script + if(work.dir != work.dir.orig) work.dir <- work.dir.orig # ripristinate original values of this script + + # load impact data only if it is available, if not it will leave an empty space in the composition: + if(file.exists(paste0(rean.dir,"/",rean.name,"_",my.period[p],"_",var.name[var.num],".RData"))){ + impact.data <- TRUE + print("Impact data available for reanalysis") + load(file=paste0(rean.dir,"/",rean.name,"_",my.period[p],"_",var.name[var.num],".RData")) + } else { + impact.data <- FALSE + print("Impact data for reanalysis not available") + } + } + + if(fields.name == forecast.name && n.map < 7){ + load(file=paste0(work.dir,"/",forecast.name,"_",my.period[p],"_leadmonth_",lead.month,"_","psl",".RData")) # load cluster time series and mean psl data + + if(file.exists(paste0(work.dir,"/",forecast.name,"_",my.period[p],"_leadmonth_",lead.month,"_",var.name[var.num],".RData"))){ + impact.data <- TRUE + print("Impact data available for forecasts") + if((composition == "simple" || composition == "impact")) load(file=paste0(work.dir,"/",forecast.name,"_",my.period[p],"_leadmonth_",lead.month,"_",var.name[var.num],".RData")) # load mean var data for impact maps + } else { + impact.data <- FALSE + print("Impact data for forecasts not available") + } + + if(var.num != var.num.orig) var.num <- var.num.orig # ripristinate original values of this script (necessary after loading regime data) + if(fields.name != fields.name.orig) fields.name <- fields.name.orig # ripristinate original values of this script + + } + + if(var.num != var.num.orig) var.num <- var.num.orig # ripristinate original values of this script (necessary after loading regime data) + if(fields.name != fields.name.orig) fields.name <- fields.name.orig # ripristinate original values of this script + if(work.dir != work.dir.orig) work.dir <- work.dir.orig # ripristinate original values of this script + if(p != p.orig) p <- p.orig + + if(fields.name == forecast.name && n.map < 7){ + + # compute the simulated interannual monthly variability: + n.days.month <- dim(my.cluster.array[[p]])[1] # number of days in the forecasted month + + freq.sim1 <- apply(my.cluster.array[[p]] == 1, c(2,3), sum) + freq.sim2 <- apply(my.cluster.array[[p]] == 2, c(2,3), sum) + freq.sim3 <- apply(my.cluster.array[[p]] == 3, c(2,3), sum) + freq.sim4 <- apply(my.cluster.array[[p]] == 4, c(2,3), sum) + + sd.freq.sim1 <- sd(freq.sim1) / n.days.month + sd.freq.sim2 <- sd(freq.sim2) / n.days.month + sd.freq.sim3 <- sd(freq.sim3) / n.days.month + sd.freq.sim4 <- sd(freq.sim4) / n.days.month + + # compute the transition probabilities: + j1 <- j2 <- j3 <- j4 <- 1 + transition1 <- transition2 <- transition3 <- transition4 <- c() + + n.members <- dim(my.cluster.array[[p]])[3] + + for(m in 1:n.members){ + for(y in 1:n.years){ + for (d in 1:(n.days.month-1)){ + + if (my.cluster.array[[p]][d,y,m] == 1 && (my.cluster.array[[p]][d + 1,y,m] != 1)){ + transition1[j1] <- my.cluster.array[[p]][d + 1,y,m] + j1 <- j1 + 1 + } + if (my.cluster.array[[p]][d,y,m] == 2 && (my.cluster.array[[p]][d + 1,y,m] != 2)){ + transition2[j2] <- my.cluster.array[[p]][d + 1,y,m] + j2 <- j2 + 1 + } + if (my.cluster.array[[p]][d,y,m] == 3 && (my.cluster.array[[p]][d + 1,y,m] != 3)){ + transition3[j3] <- my.cluster.array[[p]][d + 1,y,m] + j3 <- j3 +1 + } + if (my.cluster.array[[p]][d,y,m] == 4 && (my.cluster.array[[p]][d + 1,y,m] != 4)){ + transition4[j4] <- my.cluster.array[[p]][d + 1,y,m] + j4 <- j4 +1 + } + + } + } + } + + trans.sim <- matrix(NA,4,4 , dimnames= list(c("cluster1.sim", "cluster2.sim", "cluster3.sim", "cluster4.sim"),c("cluster1.sim","cluster2.sim","cluster3.sim","cluster4.sim"))) + trans.sim[1,2] <- length(which(transition1 == 2)) + trans.sim[1,3] <- length(which(transition1 == 3)) + trans.sim[1,4] <- length(which(transition1 == 4)) + + trans.sim[2,1] <- length(which(transition2 == 1)) + trans.sim[2,3] <- length(which(transition2 == 3)) + trans.sim[2,4] <- length(which(transition2 == 4)) + + trans.sim[3,1] <- length(which(transition3 == 1)) + trans.sim[3,2] <- length(which(transition3 == 2)) + trans.sim[3,4] <- length(which(transition3 == 4)) + + trans.sim[4,1] <- length(which(transition4 == 1)) + trans.sim[4,2] <- length(which(transition4 == 2)) + trans.sim[4,3] <- length(which(transition4 == 3)) + + trans.tot <- apply(trans.sim,1,sum,na.rm=T) + for(i in 1:4) trans.sim[i,] <- trans.sim[i,]/trans.tot[i] + + + # compute cluster persistence: + my.cluster.vector <- as.vector(my.cluster.array[[p]]) # reduce it to a vector with the order: day1month1member1 , day2month1member1, ... , day1month2member1, day2month2member1, ,,, + + sequ1 <- sequ2 <- sequ3 <- sequ4 <- rep(0,10000) + i1 <- i2 <- i3 <- i4 <- 1 + + if(my.cluster.vector[1] != 1) i1 <- 0 + if(my.cluster.vector[1] == 1) sequ1[i1] <- sequ1[i1]+1 + if(my.cluster.vector[1] != 2) i2 <- 0 + if(my.cluster.vector[1] == 2) sequ2[i2] <- sequ2[i2]+1 + if(my.cluster.vector[1] != 3) i3 <- 0 + if(my.cluster.vector[1] == 3) sequ3[i3] <- sequ3[i3]+1 + if(my.cluster.vector[1] != 4) i4 <- 0 + if(my.cluster.vector[1] == 4) sequ4[i4] <- sequ4[i4]+1 + n.dd <- length(my.cluster.vector) + + for(d in 1:(n.dd-1)){ + if(my.cluster.vector[d] != 1 && my.cluster.vector[d+1] == 1) i1 <- i1+1 + if(my.cluster.vector[d] == 1) sequ1[i1] <- sequ1[i1]+1 + + if(my.cluster.vector[d] != 2 && my.cluster.vector[d+1] == 2) i2 <- i2+1 + if(my.cluster.vector[d] == 2) sequ2[i2] <- sequ2[i2]+1 + + if(my.cluster.vector[d] != 3 && my.cluster.vector[d+1] == 3) i3 <- i3+1 + if(my.cluster.vector[d] == 3) sequ3[i3] <- sequ3[i3]+1 + + if(my.cluster.vector[d] != 4 && my.cluster.vector[d+1] == 4) i4 <- i4+1 + if(my.cluster.vector[d] == 4) sequ4[i4] <- sequ4[i4]+1 + } + + if(my.cluster.vector[n.dd] == 1) sequ1[i1] <- sequ1[i1]+1 + if(my.cluster.vector[n.dd] == 2) sequ2[i2] <- sequ2[i2]+1 + if(my.cluster.vector[n.dd] == 3) sequ3[i3] <- sequ3[i3]+1 + if(my.cluster.vector[n.dd] == 4) sequ4[i4] <- sequ4[i4]+1 + + pers1 <- mean(sequ1[which(sequ1 != 0)]) + pers2 <- mean(sequ2[which(sequ2 != 0)]) + pers3 <- mean(sequ3[which(sequ3 != 0)]) + pers4 <- mean(sequ4[which(sequ4 != 0)]) + + # compute correlations between simulated clusters and observed regime's anomalies: + cluster1.sim <- pslwr1mean; cluster2.sim <- pslwr2mean; cluster3.sim <- pslwr3mean; cluster4.sim <- pslwr4mean + + if(composition == 'impact' && impact.data == TRUE) { + cluster1.sim.imp <- varwr1mean; cluster2.sim.imp <- varwr2mean; cluster3.sim.imp <- varwr3mean; cluster4.sim.imp <- varwr4mean + #cluster1.sim.imp.pv <- pvalue1; cluster2.sim.imp.pv <- pvalue2; cluster3.sim.imp.pv <- pvalue3; cluster4.sim.imp.pv <- pvalue4 + } + + lat.forecast <- lat; lon.forecast <- lon + + if((p + lead.month) > 12) { load.month <- p + lead.month - 12 } else { load.month <- p + lead.month } # reanalysis forecast month to load + load(file=paste0(rean.dir,"/",rean.name,"_",my.period[load.month],"_","psl",".RData")) # load reanalysis data, which overwrities the pslwrXmean variables + load(paste0(rean.dir,"/",rean.name,"_", my.period[load.month],"_","ClusterNames",".RData")) # load also reanalysis regime names + + # load reanalysis varwrXmean impact data: + if(composition == 'impact' && impact.data == TRUE) load(file=paste0(rean.dir,"/",rean.name,"_",my.period[load.month],"_",var.name[var.num],".RData")) + + if(var.num != var.num.orig) var.num <- var.num.orig # ripristinate original values of this script + if(fields.name != fields.name.orig) fields.name <- fields.name.orig # ripristinate original values of this script + if(!identical(period.orig,period)) period <- period.orig + if(work.dir != work.dir.orig) work.dir <- work.dir.orig # ripristinate original values of this script + if(p.orig != p) p <- p.orig + + # compute the observed transition probabilities: + n.days.month <- n.days.in.a.period(load.month,2001) + j1o <- j2o <- j3o <- j4o <- 1; transition.obs1 <- transition.obs2 <- transition.obs3 <- transition.obs4 <- c() + + for (d in 1:(length(my.cluster$cluster)-1)){ + if (my.cluster$cluster[d] == 1 && (my.cluster$cluster[d + 1] != my.cluster$cluster[d])){ + transition.obs1[j1o] <- my.cluster$cluster[d + 1] + j1o <- j1o + 1 + } + if (my.cluster$cluster[d] == 2 && (my.cluster$cluster[d + 1] != my.cluster$cluster[d])){ + transition.obs2[j2o] <- my.cluster$cluster[d + 1] + j2o <- j2o + 1 + } + if (my.cluster$cluster[d] == 3 && (my.cluster$cluster[d + 1] != my.cluster$cluster[d])){ + transition.obs3[j3o] <- my.cluster$cluster[d + 1] + j3o <- j3o + 1 + } + if (my.cluster$cluster[d] == 4 && (my.cluster$cluster[d + 1] != my.cluster$cluster[d])){ + transition.obs4[j4o] <- my.cluster$cluster[d + 1] + j4o <- j4o +1 + } + + } + + trans.obs <- matrix(NA,4,4 , dimnames= list(c("cluster1.obs", "cluster2.obs", "cluster3.obs", "cluster4.obs"),c("cluster1.obs","cluster2.obs","cluster3.obs","cluster4.obs"))) + trans.obs[1,2] <- length(which(transition.obs1 == 2)) + trans.obs[1,3] <- length(which(transition.obs1 == 3)) + trans.obs[1,4] <- length(which(transition.obs1 == 4)) + + trans.obs[2,1] <- length(which(transition.obs2 == 1)) + trans.obs[2,3] <- length(which(transition.obs2 == 3)) + trans.obs[2,4] <- length(which(transition.obs2 == 4)) + + trans.obs[3,1] <- length(which(transition.obs3 == 1)) + trans.obs[3,2] <- length(which(transition.obs3 == 2)) + trans.obs[3,4] <- length(which(transition.obs3 == 4)) + + trans.obs[4,1] <- length(which(transition.obs4 == 1)) + trans.obs[4,2] <- length(which(transition.obs4 == 2)) + trans.obs[4,3] <- length(which(transition.obs4 == 3)) + + trans.tot.obs <- apply(trans.obs,1,sum,na.rm=T) + for(i in 1:4) trans.obs[i,] <- trans.obs[i,]/trans.tot.obs[i] + + # compute the observed interannual monthly variability: + freq.obs1 <- freq.obs2 <- freq.obs3 <- freq.obs4 <- c() + + for(y in year.start:year.end){ + # for both reanalysis and S4, the time order is always: year1day1, year1day2, ..., year1day31, year2day1, year2day2, ..., year2day28, etc, + freq.obs1[y-year.start+1] <- length(which(my.cluster$cluster[(y-year.start)*n.days.month + (1:n.days.month)] == 1)) + freq.obs2[y-year.start+1] <- length(which(my.cluster$cluster[(y-year.start)*n.days.month + (1:n.days.month)] == 2)) + freq.obs3[y-year.start+1] <- length(which(my.cluster$cluster[(y-year.start)*n.days.month + (1:n.days.month)] == 3)) + freq.obs4[y-year.start+1] <- length(which(my.cluster$cluster[(y-year.start)*n.days.month + (1:n.days.month)] == 4)) + } + + sd.freq.obs1 <- sd(freq.obs1) / n.days.month + sd.freq.obs2 <- sd(freq.obs2) / n.days.month + sd.freq.obs3 <- sd(freq.obs3) / n.days.month + sd.freq.obs4 <- sd(freq.obs4) / n.days.month + + wr1y.obs <- wr1y; wr2y.obs <- wr2y; wr3y.obs <- wr3y; wr4y.obs <- wr4y # observed frecuencies of the regimes + + regimes.obs <- c(cluster1.name, cluster2.name, cluster3.name, cluster4.name) + + if(length(unique(regimes.obs)) < 4) stop("Two or more names of the weather types in the reanalsyis input file are repeated!") + + if(identical(rev(lat),lat.forecast)) {lat.forecast <- rev(lat.forecast); print("Warning: forecast latitudes are in the opposite order than renalysis's")} + if(!identical(lat, lat.forecast)) stop("forecast latitudes are different from reanalysis latitudes!") + if(!identical(lon, lon.forecast)) stop("forecast longitude are different from reanalysis longitudes!") + + cluster.corr <- array(NA, c(4,4), dimnames=list(c("cluster1.sim","cluster2.sim","cluster3.sim","cluster4.sim"), regimes.obs)) + cluster.corr[1,1] <- cor(as.vector(cluster1.sim), as.vector(pslwr1mean)) + cluster.corr[1,2] <- cor(as.vector(cluster1.sim), as.vector(pslwr2mean)) + cluster.corr[1,3] <- cor(as.vector(cluster1.sim), as.vector(pslwr3mean)) + cluster.corr[1,4] <- cor(as.vector(cluster1.sim), as.vector(pslwr4mean)) + cluster.corr[2,1] <- cor(as.vector(cluster2.sim), as.vector(pslwr1mean)) + cluster.corr[2,2] <- cor(as.vector(cluster2.sim), as.vector(pslwr2mean)) + cluster.corr[2,3] <- cor(as.vector(cluster2.sim), as.vector(pslwr3mean)) + cluster.corr[2,4] <- cor(as.vector(cluster2.sim), as.vector(pslwr4mean)) + cluster.corr[3,1] <- cor(as.vector(cluster3.sim), as.vector(pslwr1mean)) + cluster.corr[3,2] <- cor(as.vector(cluster3.sim), as.vector(pslwr2mean)) + cluster.corr[3,3] <- cor(as.vector(cluster3.sim), as.vector(pslwr3mean)) + cluster.corr[3,4] <- cor(as.vector(cluster3.sim), as.vector(pslwr4mean)) + cluster.corr[4,1] <- cor(as.vector(cluster4.sim), as.vector(pslwr1mean)) + cluster.corr[4,2] <- cor(as.vector(cluster4.sim), as.vector(pslwr2mean)) + cluster.corr[4,3] <- cor(as.vector(cluster4.sim), as.vector(pslwr3mean)) + cluster.corr[4,4] <- cor(as.vector(cluster4.sim), as.vector(pslwr4mean)) + + # associate each simulated cluster to one observed regime: + max1 <- which(cluster.corr == max(cluster.corr), arr.ind=T) + cluster.corr2 <- cluster.corr + cluster.corr2[max1[1],] <- -999 # set this row to negative values so it won't be found as a maximum anymore + cluster.corr2[,max1[2]] <- -999 # set this column to negative values so it won't be found as a maximum anymore + max2 <- which(cluster.corr2 == max(cluster.corr2), arr.ind=T) + cluster.corr3 <- cluster.corr2 + cluster.corr3[max2[1],] <- -999 + cluster.corr3[,max2[2]] <- -999 + max3 <- which(cluster.corr3 == max(cluster.corr3), arr.ind=T) + cluster.corr4 <- cluster.corr3 + cluster.corr4[max3[1],] <- -999 + cluster.corr4[,max3[2]] <- -999 + max4 <- which(cluster.corr4 == max(cluster.corr4), arr.ind=T) + + seq.max1 <- c(max1[1],max2[1],max3[1],max4[1]) + seq.max2 <- c(max1[2],max2[2],max3[2],max4[2]) + + cluster1.regime <- seq.max2[which(seq.max1 == 1)] + cluster2.regime <- seq.max2[which(seq.max1 == 2)] + cluster3.regime <- seq.max2[which(seq.max1 == 3)] + cluster4.regime <- seq.max2[which(seq.max1 == 4)] + + cluster1.name <- regimes.obs[cluster1.regime] + cluster2.name <- regimes.obs[cluster2.regime] + cluster3.name <- regimes.obs[cluster3.regime] + cluster4.name <- regimes.obs[cluster4.regime] + + regimes.sim <- c(cluster1.name, cluster2.name, cluster3.name, cluster4.name) + cluster.corr2 <- array(cluster.corr, c(4,4), dimnames=list(regimes.sim, regimes.obs)) + + spat.cor1 <- cluster.corr[1,seq.max2[which(seq.max1 == 1)]] + spat.cor2 <- cluster.corr[2,seq.max2[which(seq.max1 == 2)]] + spat.cor3 <- cluster.corr[3,seq.max2[which(seq.max1 == 3)]] + spat.cor4 <- cluster.corr[4,seq.max2[which(seq.max1 == 4)]] + + # measure persistence for the reanalysis dataset: + my.cluster.vector <- my.cluster[[1]] + + sequ1 <- sequ2 <- sequ3 <- sequ4 <- rep(0,10000) + i1 <- i2 <- i3 <- i4 <- 1 + + if(my.cluster.vector[1] != 1) i1 <- 0 + if(my.cluster.vector[1] == 1) sequ1[i1] <- sequ1[i1]+1 + if(my.cluster.vector[1] != 2) i2 <- 0 + if(my.cluster.vector[1] == 2) sequ2[i2] <- sequ2[i2]+1 + if(my.cluster.vector[1] != 3) i3 <- 0 + if(my.cluster.vector[1] == 3) sequ3[i3] <- sequ3[i3]+1 + if(my.cluster.vector[1] != 4) i4 <- 0 + if(my.cluster.vector[1] == 4) sequ4[i4] <- sequ4[i4]+1 + + n.dd <- length(my.cluster.vector) + for(d in 1:(n.dd-1)){ + if(my.cluster.vector[d] != 1 && my.cluster.vector[d+1] == 1) i1 <- i1+1 + if(my.cluster.vector[d] == 1) sequ1[i1] <- sequ1[i1]+1 + + if(my.cluster.vector[d] != 2 && my.cluster.vector[d+1] == 2) i2 <- i2+1 + if(my.cluster.vector[d] == 2) sequ2[i2] <- sequ2[i2]+1 + + if(my.cluster.vector[d] != 3 && my.cluster.vector[d+1] == 3) i3 <- i3+1 + if(my.cluster.vector[d] == 3) sequ3[i3] <- sequ3[i3]+1 + + if(my.cluster.vector[d] != 4 && my.cluster.vector[d+1] == 4) i4 <- i4+1 + if(my.cluster.vector[d] == 4) sequ4[i4] <- sequ4[i4]+1 + } + + if(my.cluster.vector[n.dd] == 1) sequ1[i1] <- sequ1[i1]+1 + if(my.cluster.vector[n.dd] == 2) sequ2[i2] <- sequ2[i2]+1 + if(my.cluster.vector[n.dd] == 3) sequ3[i3] <- sequ3[i3]+1 + if(my.cluster.vector[n.dd] == 4) sequ4[i4] <- sequ4[i4]+1 + + persObs1 <- mean(sequ1[which(sequ1 != 0)]) + persObs2 <- mean(sequ2[which(sequ2 != 0)]) + persObs3 <- mean(sequ3[which(sequ3 != 0)]) + persObs4 <- mean(sequ4[which(sequ4 != 0)]) + + ### insert here all the manual corrections to the regime assignation due to misasignment in the automatic selection: + ### forecast month: September + #if(p == 9 && lead.month == 0) { cluster1.name <- "Blocking"; cluster2.name <- "Atl.Ridge"; cluster3.name <- "NAO-"; cluster4.name <- "NAO+"} + #if(p == 8 && lead.month == 1) { cluster1.name <- "NAO+"; cluster2.name <- "NAO-"; cluster3.name <- "Blocking"; cluster4.name <- "Atl.Ridge"} + #if(p == 6 && lead.month == 3) { cluster1.name <- "Atl.Ridge"; cluster2.name <- "NAO+"} + #if(p == 4 && lead.month == 5) { cluster1.name <- "Blocking"; cluster2.name <- "NAO+"; cluster3.name <- "Atl.Ridge"; cluster4.name <- "NAO-"} + + if(p == 9 && lead.month == 0) { cluster1.name <- "Blocking"; cluster2.name <- "Atl.Ridge"; cluster3.name <- "NAO-"; cluster4.name <- "NAO+" } + if(p == 8 && lead.month == 1) { cluster1.name <- "NAO+"; cluster2.name <- "NAO-"; cluster3.name <- "Blocking"; cluster4.name <- "Atl.Ridge" } + if(p == 7 && lead.month == 2) { cluster1.name <- "Atl.Ridge"; cluster2.name <- "Blocking"; cluster3.name <- "NAO-"; cluster4.name <- "NAO+" } + if(p == 6 && lead.month == 3) { cluster1.name <- "Atl.Ridge"; cluster2.name <- "NAO-"; cluster3.name <- "NAO+"; cluster4.name <- "Blocking" } + if(p == 5 && lead.month == 4) { cluster1.name <- "Atl.Ridge"; cluster2.name <- "NAO+"; cluster3.name <- "NAO-"; cluster4.name <- "Blocking" } + if(p == 4 && lead.month == 5) { cluster1.name <- "Blocking"; cluster2.name <- "NAO+"; cluster3.name <- "Atl.Ridge"; cluster4.name <- "NAO-" } + if(p == 3 && lead.month == 6) { cluster2.name <- "NAO-"; cluster3.name <- "Atl.Ridge"} # cluster1.name <- "Blocking"; cluster4.name <- "NAO+" + + # regimes.sim # for the debug + + ### forecast month: October + #if(p == 4 && lead.month == 6) { cluster1.name <- "Atl.Ridge"; cluster2.name <- "Blocking"; cluster3.name <- "NAO+"; cluster4.name <- "NAO-"} + #if(p == 5 && lead.month == 5) { cluster1.name <- "NAO+"; cluster2.name <- "Blocking"; cluster3.name <- "NAO-"; cluster4.name <- "Atl.Ridge"} + #if(p == 7 && lead.month == 3) { cluster1.name <- "NAO-"; cluster2.name <- "Atl.Ridge"; cluster3.name <- "Blocking"; cluster4.name <- "NAO+"} + #if(p == 8 && lead.month == 2) { cluster1.name <- "Blocking"; cluster2.name <- "NAO-"; cluster3.name <- "Atl.Ridge"; cluster4.name <- "NAO+"} + #if(p == 9 && lead.month == 1) { cluster1.name <- "NAO-"; cluster2.name <- "Blocking"; cluster3.name <- "Atl.Ridge"; cluster4.name <- "NAO+"} + + if(p == 10 && lead.month == 0) { cluster1.name <- "Blocking"; cluster2.name <- "NAO+"; cluster3.name <- "Atl.Ridge"; cluster4.name <- "NAO-"} + if(p == 9 && lead.month == 1) { cluster1.name <- "NAO-"; cluster2.name <- "Blocking"; cluster3.name <- "Atl.Ridge"; cluster4.name <- "NAO+"} + if(p == 8 && lead.month == 2) { cluster1.name <- "Blocking"; cluster2.name <- "NAO-"; cluster3.name <- "Atl.Ridge"; cluster4.name <- "NAO+"} + if(p == 7 && lead.month == 3) { cluster1.name <- "NAO-"; cluster2.name <- "Atl.Ridge"; cluster3.name <- "Blocking"; cluster4.name <- "NAO+"} + if(p == 6 && lead.month == 4) { cluster1.name <- "NAO+"; cluster2.name <- "Blocking"; cluster3.name <- "NAO-"; cluster4.name <- "Atl.Ridge"} + + ### forecast month: November + #if(p == 6 && lead.month == 5) { cluster2.name <- "Atl.Ridge"; cluster3.name <- "NAO+"; cluster4.name <- "Blocking"} + #if(p == 7 && lead.month == 4) { cluster1.name <- "NAO+"; cluster2.name <- "Blocking"; cluster4.name <- "Atl.Ridge"} + #if(p == 8 && lead.month == 3) { cluster2.name <- "Blocking"; cluster4.name <- "Atl.Ridge"} + #if(p == 9 && lead.month == 2) { cluster2.name <- "Atl.Ridge"; cluster3.name <- "NAO+"; cluster4.name <- "Blocking"} + #if(p == 10 && lead.month == 1) { cluster2.name <- "Blocking"; cluster4.name <- "Atl.Ridge"} + + regimes.sim2 <- c(cluster1.name, cluster2.name, cluster3.name, cluster4.name) # Simulated regimes after the manual correction + #regimes.obs <- c(cluster1.name, cluster2.name, cluster3.name, cluster4.name) # already defined above, included only as reference + + order.sim <- match(regimes.sim2, orden) + order.obs <- match(regimes.obs, orden) + + clusters.sim <- list(cluster1.sim, cluster2.sim, cluster3.sim, cluster4.sim) + clusters.obs <- list(pslwr1mean, pslwr2mean, pslwr3mean, pslwr4mean) + + # recompute the spatial correlations, because they might have changed because of the manual corrections above: + spat.cor1 <- cor(as.vector(clusters.sim[[which(order.sim == 1)]]), as.vector(clusters.obs[[which(order.obs == 1)]])) + spat.cor2 <- cor(as.vector(clusters.sim[[which(order.sim == 2)]]), as.vector(clusters.obs[[which(order.obs == 2)]])) + spat.cor3 <- cor(as.vector(clusters.sim[[which(order.sim == 3)]]), as.vector(clusters.obs[[which(order.obs == 3)]])) + spat.cor4 <- cor(as.vector(clusters.sim[[which(order.sim == 4)]]), as.vector(clusters.obs[[which(order.obs == 4)]])) + + if(composition == 'impact' && impact.data == TRUE) { + clusters.sim.imp <- list(cluster1.sim.imp, cluster2.sim.imp, cluster3.sim.imp, cluster4.sim.imp) + #clusters.sim.imp.pv <- list(cluster1.sim.imp.pv, cluster2.sim.imp.pv, cluster3.sim.imp.pv, cluster4.sim.imp.pv) + + clusters.obs.imp <- list(varwr1mean, varwr2mean, varwr3mean, varwr4mean) + #clusters.obs.imp.pv <- list(pvalue1, pvalue2, pvalue3, pvalue4) + + cor.imp1 <- cor(as.vector(clusters.sim.imp[[which(order.sim == 1)]]), as.vector(clusters.obs.imp[[which(order.obs == 1)]])) + cor.imp2 <- cor(as.vector(clusters.sim.imp[[which(order.sim == 2)]]), as.vector(clusters.obs.imp[[which(order.obs == 2)]])) + cor.imp3 <- cor(as.vector(clusters.sim.imp[[which(order.sim == 3)]]), as.vector(clusters.obs.imp[[which(order.obs == 3)]])) + cor.imp4 <- cor(as.vector(clusters.sim.imp[[which(order.sim == 4)]]), as.vector(clusters.obs.imp[[which(order.obs == 4)]])) + + } + + load(file=paste0(work.dir,"/",fields.name,"_",my.period[p],"_leadmonth_",lead.month,"_","psl",".RData")) # reload forecast cluster time series and mean psl data + load(file=paste0(work.dir,"/",fields.name,"_",my.period[p],"_leadmonth_",lead.month,"_",var.name[var.num],".RData")) # reload mean var data for impact maps + + if(var.num != var.num.orig) var.num <- var.num.orig # ripristinate original values of this script + if(fields.name != fields.name.orig) fields.name <- fields.name.orig # ripristinate original values of this script + if(!identical(period.orig, period)) period <- period.orig + if(p.orig != p) p <- p.orig + if(identical(rev(lat),lat.forecast)) lat <- rev(lat) # ripristinate right lat order + if(work.dir != work.dir.orig) work.dir <- work.dir.orig # ripristinate original values of this script + + } # close for on 'forecast.name' + + if(fields.name == rean.name || (fields.name == forecast.name && n.map == 7)){ + if(save.names){ + save(cluster1.name, cluster2.name, cluster3.name, cluster4.name, file=paste0(rean.dir,"/",fields.name,"_", my.period[p],"_","ClusterNames",".RData")) + + } else { # in this case, we load the cluster names from the file already saved, if regimes come from a reanalysis: + load(paste0(rean.dir,"/",rean.name,"_", my.period[p],"_","ClusterNames",".RData")) + + if(var.num != var.num.orig) var.num <- var.num.orig # ripristinate original values of this script + if(fields.name != fields.name.orig) fields.name <- fields.name.orig # ripristinate original values of this script + } + } + + # assign to each cluster its regime: + #if(ordering == FALSE && (fields.name == rean.name || (fields.name == forecast.name && n.map == 7))){ + if(ordering == FALSE){ + cluster1 <- 1; cluster2 <- 2; cluster3 <- 3; cluster4 <- 4 + } else { + cluster1 <- which(orden == cluster1.name) + cluster2 <- which(orden == cluster2.name) + cluster3 <- which(orden == cluster3.name) + cluster4 <- which(orden == cluster4.name) + } + + assign(paste0("map",cluster1), pslwr1mean) + assign(paste0("map",cluster2), pslwr2mean) + assign(paste0("map",cluster3), pslwr3mean) + assign(paste0("map",cluster4), pslwr4mean) + + assign(paste0("fre",cluster1), wr1y) + assign(paste0("fre",cluster2), wr2y) + assign(paste0("fre",cluster3), wr3y) + assign(paste0("fre",cluster4), wr4y) + + #assign(paste0("withinss",cluster1), my.cluster[[p]]$withinss[1]/my.cluster[[p]]$size[1]) + #assign(paste0("withinss",cluster2), my.cluster[[p]]$withinss[2]/my.cluster[[p]]$size[2]) + #assign(paste0("withinss",cluster3), my.cluster[[p]]$withinss[3]/my.cluster[[p]]$size[3]) + #assign(paste0("withinss",cluster4), my.cluster[[p]]$withinss[4]/my.cluster[[p]]$size[4]) + + if(impact.data == TRUE && (composition == "simple" || composition == "single.impact" || composition == 'impact')){ + assign(paste0("imp",cluster1), varwr1mean) + assign(paste0("imp",cluster2), varwr2mean) + assign(paste0("imp",cluster3), varwr3mean) + assign(paste0("imp",cluster4), varwr4mean) + + assign(paste0("sig",cluster1), pvalue1) + assign(paste0("sig",cluster2), pvalue2) + assign(paste0("sig",cluster3), pvalue3) + assign(paste0("sig",cluster4), pvalue4) + } + + if(fields.name == forecast.name && n.map < 7){ + assign(paste0("freMax",cluster1), wr1yMax) + assign(paste0("freMax",cluster2), wr2yMax) + assign(paste0("freMax",cluster3), wr3yMax) + assign(paste0("freMax",cluster4), wr4yMax) + + assign(paste0("freMin",cluster1), wr1yMin) + assign(paste0("freMin",cluster2), wr2yMin) + assign(paste0("freMin",cluster3), wr3yMin) + assign(paste0("freMin",cluster4), wr4yMin) + + #assign(paste0("spatial.cor",cluster1), spat.cor1) + #assign(paste0("spatial.cor",cluster2), spat.cor2) + #assign(paste0("spatial.cor",cluster3), spat.cor3) + #assign(paste0("spatial.cor",cluster4), spat.cor4) + + assign(paste0("persist",cluster1), pers1) + assign(paste0("persist",cluster2), pers2) + assign(paste0("persist",cluster3), pers3) + assign(paste0("persist",cluster4), pers4) + + clusters.all <- c(cluster1, cluster2, cluster3, cluster4) + ordered.sequence <- c(which(clusters.all == 1), which(clusters.all == 2), which(clusters.all == 3), which(clusters.all == 4)) + + assign(paste0("transSim",cluster1), unname(trans.sim[1,ordered.sequence])) + assign(paste0("transSim",cluster2), unname(trans.sim[2,ordered.sequence])) + assign(paste0("transSim",cluster3), unname(trans.sim[3,ordered.sequence])) + assign(paste0("transSim",cluster4), unname(trans.sim[4,ordered.sequence])) + + cluster1.obs <- which(orden == regimes.obs[1]) + cluster2.obs <- which(orden == regimes.obs[2]) + cluster3.obs <- which(orden == regimes.obs[3]) + cluster4.obs <- which(orden == regimes.obs[4]) + + clusters.all.obs <- c(cluster1.obs, cluster2.obs, cluster3.obs, cluster4.obs) + ordered.sequence.obs <- c(which(clusters.all.obs == 1), which(clusters.all.obs == 2), which(clusters.all.obs == 3), which(clusters.all.obs == 4)) + + assign(paste0("freObs",cluster1.obs), wr1y.obs) + assign(paste0("freObs",cluster2.obs), wr2y.obs) + assign(paste0("freObs",cluster3.obs), wr3y.obs) + assign(paste0("freObs",cluster4.obs), wr4y.obs) + + assign(paste0("persistObs",cluster1.obs), persObs1) + assign(paste0("persistObs",cluster2.obs), persObs2) + assign(paste0("persistObs",cluster3.obs), persObs3) + assign(paste0("persistObs",cluster4.obs), persObs4) + + assign(paste0("transObs",cluster1.obs), unname(trans.obs[1,ordered.sequence.obs])) + assign(paste0("transObs",cluster2.obs), unname(trans.obs[2,ordered.sequence.obs])) + assign(paste0("transObs",cluster3.obs), unname(trans.obs[3,ordered.sequence.obs])) + assign(paste0("transObs",cluster4.obs), unname(trans.obs[4,ordered.sequence.obs])) + + } + + # position of long values of Europe only (without the Atlantic Sea and America): + #if(fields.name == "ERA-Interim") EU <- c(1:which(lon >= lon.max)[1], (length(lon)-30):length(lon)) # restrict area to continental europe only + EU <- c(1:which(lon >= lon.max)[1], (which(lon >= 337)[1]:length(lon))) # restrict area to continental europe only + + # breaks and colors of the geopotential fields: + if(psl == "g500"){ + #my.brks <- c(-300,-199:200,300) # % Mean anomaly of a WR + my.brks <- c(-300,seq(-200,200,10),300) # % Mean anomaly of a WR + my.brks2 <- c(-300,seq(-200,200,10),300) # % Mean anomaly of a WR for the contour lines + #my.cols <- colorRampPalette((brewer.pal(9,"PuBu")))(length(my.brks)-1) + values.to.plot <- c(-200, -100, 0, 100, 200) # values we want to show in the labels + } + + if(psl == "psl"){ + my.brks <- c(seq(-19,-1,2),0,seq(1,19,2)) # % Mean anomaly of a WR + # my.brks <- c(seq(-19,-1,2),0,seq(1,19,2)) # % Mean anomaly of a WR + + my.brks2 <- my.brks #c(seq(-20,20,2)) # % Mean anomaly of a WR for the contour lines + values.to.plot <- my.brks #c(-17,-15,-13,-11,-9,-7,-5,-3,-1,0,1,3,5,7,9,11,13,15,17) + } + + my.cols <- colorRampPalette(rev(brewer.pal(11,"RdBu")))(length(my.brks)-1) # blue--white--red colors + my.cols[floor(length(my.cols)/2)] <- my.cols[floor(length(my.cols)/2)+1] <- "white" + + # breaks and colors of the impact maps (valid both for Wind speed anomalies and Temperature anomalies): + #my.brks.var <- c(-20,seq(-10,-3,1),seq(-2.9,2.9,0.1),seq(3,10,1),20) # % Mean anomaly of a WR + #my.brks.var <- c(-20,-2,-1.5,-1,-0.5,-0.2,0.2,0.5,1,1.5,2,20) # % Mean anomaly of a WR + #my.brks.var <- c(-20,seq(-3,3,0.5),20) # % Mean anomaly of a WR + if(var.name[var.num] == "sfcWind") { + my.brks.var <- seq(-3,3,0.5) + values.to.plot2 <- c(-3,-2,-1,0,1,2,3) + } + if(var.name[var.num] == "tas") { + my.brks.var <- seq(-5,5,1) + values.to.plot2 <- my.brks.var #c(-5,-3,-1,0,1,3,5) + } + + #my.cols <- colorRampPalette(as.character(read.csv("/home/Earth/vtorralb/rgbhex.csv",header=F)[,1]))(length(my.brks)-1) # blue--yellow-red colors + my.cols.var <- colorRampPalette(rev(brewer.pal(11,"RdBu")))(length(my.brks.var)-1) # blue--white--red colors + #my.cols.var[floor(length(my.cols.var)/2)] <- my.cols.var[floor(length(my.cols.var)/2)+1] <- "white" + + freq.max=100 + if(fields.name == forecast.name){ + pos.years.present <- match(year.start:year.end, years) + years.missing <- which(is.na(pos.years.present)) + fre1.NA <- fre2.NA <- fre3.NA <- fre4.NA <- freMax1.NA <- freMax2.NA <- freMax3.NA <- freMax4.NA <- freMin1.NA <- freMin2.NA <- freMin3.NA <- freMin4.NA <- c() + k <- 0 + for(y in year.start:year.end) { + if(y %in% (years.missing + year.start - 1)) { + fre1.NA <- c(fre1.NA,NA); fre2.NA <- c(fre2.NA,NA); fre3.NA <- c(fre3.NA,NA); fre4.NA <- c(fre4.NA,NA) + freMax1.NA <- c(freMax1.NA,NA); freMax2.NA <- c(freMax2.NA,NA); freMax3.NA <- c(freMax3.NA,NA); freMax4.NA <- c(freMax4.NA,NA) + freMin1.NA <- c(freMin1.NA,NA); freMin2.NA <- c(freMin2.NA,NA); freMin3.NA <- c(freMin3.NA,NA); freMin4.NA <- c(freMin4.NA,NA) + k=k+1 + } else { + fre1.NA <- c(fre1.NA,fre1[y - year.start + 1 - k]); fre2.NA <- c(fre2.NA,fre2[y - year.start + 1 - k]) + fre3.NA <- c(fre3.NA,fre3[y - year.start + 1 - k]); fre4.NA <- c(fre4.NA,fre4[y - year.start + 1 - k]) + freMax1.NA <- c(freMax1.NA,freMax1[y - year.start + 1 - k]); freMax2.NA <- c(freMax2.NA,freMax2[y - year.start + 1 - k]) + freMax3.NA <- c(freMax3.NA,freMax3[y - year.start + 1 - k]); freMax4.NA <- c(freMax4.NA,freMax4[y - year.start + 1 - k]) + freMin1.NA <- c(freMin1.NA,freMin1[y - year.start + 1 - k]); freMin2.NA <- c(freMin2.NA,freMin2[y - year.start + 1 - k]) + freMin3.NA <- c(freMin3.NA,freMin3[y - year.start + 1 - k]); freMin4.NA <- c(freMin4.NA,freMin4[y - year.start + 1 - k]) + } + } + + sp.cor1 <- round(cor(fre1.NA,freObs1, use="complete.obs"),2) + sp.cor2 <- round(cor(fre2.NA,freObs2, use="complete.obs"),2) + sp.cor3 <- round(cor(fre3.NA,freObs3, use="complete.obs"),2) + sp.cor4 <- round(cor(fre4.NA,freObs4, use="complete.obs"),2) + + } else { + fre1.NA <- fre1; fre2.NA <- fre2; fre3.NA <- fre3; fre4.NA <- fre4 + } + + + # Visualize the composition with the 3 graphs for all the 4 regimes: its pressure fields, its impact on var and its frequency series: + if(composition == "simple"){ + if(!as.pdf && fields.name == rean.name) png(filename=paste0(rean.dir,"/",fields.name,"_",my.period[p],"_",var.name[var.num],".png"),width=3000,height=3700) + if(!as.pdf && fields.name == forecast.name) png(filename=paste0(work.dir,"/",fields.name,"_",my.period[p],"_leadmonth_",lead.month,"_",var.name[var.num],".png"),width=3000,height=3700) + + plot.new() + + # Sheet title: + par(fig=c(0, 1, 0.94, 0.98), new=TRUE) + if(fields.name == forecast.name) {forecast.title <- paste0(" startdate and ",lead.month," forecast month ")} else {forecast.title <- ""} + mtext(paste0(fields.name, " Weather Regimes for ", my.period[p], forecast.title," (",year.start,"-",year.end,")"), cex=5.5, font=2) + + # Centroid maps: + map.xpos <- 0 + map.width <- 0.46 + par(fig=c(map.xpos + 0.003, map.xpos + map.width - 0.003, 0.745, 0.925), new=TRUE) + PlotEquiMap2(rescale(map1,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map1), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, xlabel.dist=1.5, contours.lty="F1FF1F") + par(fig=c(map.xpos, map.xpos + map.width, 0.51, 0.69), new=TRUE) + PlotEquiMap2(rescale(map2,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map2), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F") + par(fig=c(map.xpos, map.xpos + map.width, 0.275, 0.455), new=TRUE) + PlotEquiMap2(rescale(map3,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map3), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F") + par(fig=c(map.xpos, map.xpos + map.width, 0.04, 0.22), new=TRUE) + PlotEquiMap2(rescale(map4,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map4), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F") + + ## # for the debug: + ## obs <- read.table("/scratch/Earth/ncortesi/RESILIENCE/Regimes/NAO_series.txt") + ## mes <- p + ## fre.obs <- obs$V3[seq(mes,12*35,12)] # get the ovserved freuency of the NAO + ## #plot(1981:2015,fre.obs,type="l") + ## #lines(1981:2015,fre1,type="l",col="red") + ## #lines(1981:2015,fre2,type="l",col="blue") + ## #lines(1981:2015,fre4,type="l",col="green") + ## cor1 <- cor(fre.obs, fre1) + ## cor2 <- cor(fre.obs, fre2) + ## cor3 <- cor(fre.obs, fre3) + ## cor4 <- cor(fre.obs, fre4) + ## round(c(cor1,cor2,cor3,cor4),2) + + # Legend centroid maps: + legend1.xpos <- 0.10 + legend1.width <- 0.30 + legend1.cex <- 2 + my.subset <- match(values.to.plot, my.brks) + par(fig=c(legend1.xpos, legend1.xpos + legend1.width, 0.719, 0.744), new=TRUE) # syntax fig=c(xmin, xmax, ymin, ymax) + ColorBar3(my.brks, cols=my.cols, vert=FALSE, cex=legend1.cex, subset=my.subset) + if(psl=="g500") {mtext(side=4," m", cex=2.5)} else {mtext(side=4," hPa", cex=legend1.cex)} + par(fig=c(legend1.xpos, legend1.xpos + legend1.width, 0.485, 0.508), new=TRUE) + ColorBar3(my.brks, cols=my.cols, vert=FALSE, cex=legend1.cex, subset=my.subset) + if(psl=="g500") {mtext(side=4," m", cex=2.5)} else {mtext(side=4," hPa", cex=legend1.cex)} + par(fig=c(legend1.xpos, legend1.xpos + legend1.width, 0.250, 0.273), new=TRUE) + ColorBar3(my.brks, cols=my.cols, vert=FALSE, cex=legend1.cex, subset=my.subset) + if(psl=="g500") {mtext(side=4," m", cex=2.5)} else {mtext(side=4," hPa", cex=legend1.cex)} + par(fig=c(legend1.xpos, legend1.xpos + legend1.width, 0.015, 0.038), new=TRUE) + ColorBar3(my.brks, cols=my.cols, vert=FALSE, cex=legend1.cex, subset=my.subset) + if(psl=="g500") {mtext(side=4," m", cex=2.5)} else {mtext(side=4," hPa", cex=legend1.cex)} + + # Subtitle centroid maps: + map.title.xpos <- 0.76 + map.title.width <- 0.2 + par(fig=c(map.title.xpos, map.title.xpos + map.title.width, 0.728, 0.729), new=TRUE) + mtext("year", cex=3) + par(fig=c(map.title.xpos, map.title.xpos + map.title.width, 0.494, 0.495), new=TRUE) + mtext("year", cex=3) + par(fig=c(map.title.xpos, map.title.xpos + map.title.width, 0.260, 0.261), new=TRUE) + mtext("year", cex=3) + par(fig=c(map.title.xpos, map.title.xpos + map.title.width, 0.026, 0.027), new=TRUE) + mtext("year", cex=3) + + # Title Centroid Maps: + title1.xpos <- 0.02 + title1.width <- 0.4 + par(fig=c(title1.xpos, title1.xpos + title1.width, 0.9305, 0.9355), new=TRUE) + mtext(paste0(regime1.name,": ", psl.name, " Anomaly"), font=2, cex=4) + par(fig=c(title1.xpos, title1.xpos + title1.width, 0.6955, 0.7005), new=TRUE) + mtext(paste0(regime2.name,": ", psl.name, " Anomaly"), font=2, cex=4) + par(fig=c(title1.xpos, title1.xpos + title1.width, 0.4605, 0.4655), new=TRUE) + mtext(paste0(regime3.name,": ", psl.name, " Anomaly"), font=2, cex=4) + par(fig=c(title1.xpos, title1.xpos + title1.width, 0.2255, 0.2305), new=TRUE) + mtext(paste0(regime4.name,": ", psl.name, " Anomaly"), font=2, cex=4) + + # Impact maps: + if(impact.data == TRUE ){ # it won't enter here never because we are already inside an if con composition="simple" + impact.xpos <- 0.47 + impact.width <- 0.26 + par(fig=c(impact.xpos, impact.xpos + impact.width, 0.745, 0.925), new=TRUE) + PlotEquiMap2(rescale(imp1[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=t(sig1[,EU] < 0.05), cex.lab=1.5) + par(fig=c(impact.xpos, impact.xpos + impact.width, 0.51, 0.69), new=TRUE) + PlotEquiMap2(rescale(imp2[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=t(sig2[,EU] < 0.05), cex.lab=1.5) + par(fig=c(impact.xpos, impact.xpos + impact.width, 0.275, 0.455), new=TRUE) + PlotEquiMap2(rescale(imp3[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=t(sig3[,EU] < 0.05), cex.lab=1.5) + par(fig=c(impact.xpos, impact.xpos + impact.width, 0.04, 0.22), new=TRUE) + PlotEquiMap2(rescale(imp4[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=t(sig4[,EU] < 0.05), cex.lab=1.5) + + # Title impact maps: + title2.xpos <- 0.42 + title2.width <- 0.35 + par(fig=c(title2.xpos, title2.xpos + title2.width, 0.935, 0.940), new=TRUE) + mtext(paste0(regime1.name, ": Impact on ", var.name.full[var.num]), font=2, cex=4) + par(fig=c(title2.xpos, title2.xpos + title2.width, 0.700, 0.705), new=TRUE) + mtext(paste0(regime2.name, ": Impact on ", var.name.full[var.num]), font=2, cex=4) + par(fig=c(title2.xpos, title2.xpos + title2.width, 0.465, 0.470), new=TRUE) + mtext(paste0(regime3.name, ": Impact on ", var.name.full[var.num]), font=2, cex=4) + par(fig=c(title2.xpos, title2.xpos + title2.width, 0.230, 0.235), new=TRUE) + mtext(paste0(regime4.name, ": Impact on ", var.name.full[var.num]), font=2, cex=4) + + # Legend: + legend2.xpos <- 0.48 + legend2.width <- 0.25 + legend2.cex <- 1.8 + + my.subset2 <- match(values.to.plot2, my.brks.var) + par(fig=c(legend2.xpos, legend2.xpos + legend2.width, 0.719 + 0.001, 0.744), new=TRUE) + ColorBar3(my.brks.var, cols=my.cols.var, vert=FALSE, cex=legend2.cex, subset=my.subset2) + mtext(side=4,paste0(" ",var.unit[var.num]), cex=legend2.cex) + par(fig=c(legend2.xpos, legend2.xpos + legend2.width, 0.485, 0.508), new=TRUE) + ColorBar3(my.brks.var, cols=my.cols.var, vert=FALSE, cex=legend2.cex, subset=my.subset2) + mtext(side=4,paste0(" ",var.unit[var.num]), cex=legend2.cex) + par(fig=c(legend2.xpos, legend2.xpos + legend2.width, 0.250, 0.273), new=TRUE) + ColorBar3(my.brks.var, cols=my.cols.var, vert=FALSE, cex=legend2.cex, subset=my.subset2) + mtext(side=4,paste0(" ",var.unit[var.num]), cex=legend2.cex) + par(fig=c(legend2.xpos, legend2.xpos + legend2.width, 0.015, 0.038), new=TRUE) + ColorBar3(my.brks.var, cols=my.cols.var, vert=FALSE, cex=legend2.cex, subset=my.subset2) + mtext(side=4,paste0(" ",var.unit[var.num]), cex=legend2.cex) + + } + + # Frequency plots: + barplot.xpos <- 0.75 + barplot.width <- 0.25 + + par(fig=c(barplot.xpos, barplot.xpos + barplot.width, 0.745, 0.915), new=TRUE) + if(fields.name == rean.name) barplot.freq(100*fre1.NA, year.start, year.end, cex.y=2, cex.x=2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0)) + if(fields.name == forecast.name) barplot.freq.sim2(100*fre1.NA, 100*freMax1.NA, 100*freMin1.NA, 100*freObs1, year.start, year.end, cex.y=2, cex.x=2, freq.min=-2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0), col.line="gray20", cex.r=3, cex.mean=2, cex.obs=2) + + par(fig=c(barplot.xpos, barplot.xpos + barplot.width,0.510,0.680), new=TRUE) + if(fields.name == rean.name) barplot.freq(100*fre2.NA, year.start, year.end, cex.y=2, cex.x=2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0)) + if(fields.name == forecast.name) barplot.freq.sim2(100*fre2.NA, 100*freMax2.NA, 100*freMin2.NA, 100*freObs2, year.start, year.end, cex.y=2, cex.x=2, freq.min=-2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0), col.line="gray20", cex.r=3, cex.mean=2, cex.obs=2) + + par(fig=c(barplot.xpos, barplot.xpos + barplot.width,0.275,0.445), new=TRUE) + if(fields.name == rean.name) barplot.freq(100*fre3.NA, year.start, year.end, cex.y=2, cex.x=2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0)) + if(fields.name == forecast.name) barplot.freq.sim2(100*fre3.NA, 100*freMax3.NA, 100*freMin3.NA, 100*freObs3, year.start, year.end, cex.y=2, cex.x=2, freq.min=-2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0), col.line="gray20", cex.r=3, cex.mean=2, cex.obs=2) + + par(fig=c(barplot.xpos, barplot.xpos + barplot.width,0.040,0.210), new=TRUE) + if(fields.name == rean.name) barplot.freq(100*fre4.NA, year.start, year.end, cex.y=2, cex.x=2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0)) + if(fields.name == forecast.name) barplot.freq.sim2(100*fre4.NA, 100*freMax4.NA, 100*freMin4.NA, 100*freObs4, year.start, year.end, cex.y=2, cex.x=2, freq.min=-2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0), col.line="gray20", cex.r=3, cex.mean=2, cex.obs=2) + + # Title Frequency maps: + title3.xpos <- 0.75 + title3.width <-0.25 + par(fig=c(title3.xpos, title3.xpos + title3.width, 0.935, 0.940), new=TRUE) + mtext(paste0(regime1.name, " Frequency"), font=2, cex=4) # paste0 doesn't work inside an expression! + par(fig=c(title3.xpos, title3.xpos + title3.width, 0.700, 0.705), new=TRUE) + mtext(paste0(regime2.name, " Frequency"), font=2, cex=4) + par(fig=c(title3.xpos, title3.xpos + title3.width, 0.465, 0.470), new=TRUE) + mtext(paste0(regime3.name, " Frequency"), font=2, cex=4) + par(fig=c(title3.xpos, title3.xpos + title3.width, 0.230, 0.235), new=TRUE) + mtext(paste0(regime4.name, " Frequency"), font=2, cex=4) + + # % on Frequency plots: + symbol.xpos <- 0.735 + symbol.width <- 0.01 + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.82, 0.83), new=TRUE) + mtext("%", cex=3.3) + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.59, 0.60), new=TRUE) + mtext("%", cex=3.3) + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.35, 0.36), new=TRUE) + mtext("%", cex=3.3) + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.12, 0.13), new=TRUE) + mtext("%", cex=3.3) + + # mean frequency on Frequency plots: + if(mean.freq == TRUE){ + symbol.xpos <- 0.9 + symbol.width <- 0.1 + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.908, 0.918), new=TRUE) + mtext(bquote(bar(nu) == .(paste0(round(mean(100*fre1.NA),1),"%"))),cex=4.5) + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.673, 0.683), new=TRUE) + mtext(bquote(bar(nu) == .(paste0(round(mean(100*fre2.NA),1),"%"))),cex=4.5) + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.44, 0.45), new=TRUE) + mtext(bquote(bar(nu) == .(paste0(round(mean(100*fre3.NA),1),"%"))),cex=4.5) + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.203, 0.213), new=TRUE) + mtext(bquote(bar(nu) == .(paste0(round(mean(100*fre4.NA),1),"%"))),cex=4.5) + } + + # add corr between obs.time series and ensemble mean time series on Frequency plots: + if(fields.name == forecast.name){ + symbol.xpos <- 0.76 + symbol.width <- 0.1 + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.908, 0.918), new=TRUE) + sp.cor1 <- round(cor(fre1.NA,freObs1, use="complete.obs"),2) + mtext(paste0("r= ",sp.cor1), cex=4.5) + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.673, 0.683), new=TRUE) + sp.cor2 <- round(cor(fre2.NA,freObs2, use="complete.obs"),2) + mtext(paste0("r= ",sp.cor2), cex=4.5) + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.44, 0.45), new=TRUE) + sp.cor3 <- round(cor(fre3.NA,freObs3, use="complete.obs"),2) + mtext(paste0("r= ",sp.cor3), cex=4.5) + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.203, 0.213), new=TRUE) + sp.cor4 <- round(cor(fre4.NA,freObs4, use="complete.obs"),2) + mtext(paste0("r= ",sp.cor4), cex=4.5) + } + + if(!as.pdf) dev.off() # for saving 4 png + + } # close if on: composition == 'simple' + + + # Visualize the composition with the regime anomalies for a selected forecasted month: + if(composition == "psl"){ + # Centroid maps: + n.map <- n.map + 1 # n.maps starts from 0 + map.width <- 0.115 + map.xpos <- 0.06 + map.width * (n.map - 1) + map.ypos <- 0.08 + map.height <- 0.19 + map.ysep <- 0.015 + + par(fig=c(map.xpos, map.xpos + map.width, map.ypos + 3*map.height + 3*map.ysep, map.ypos + 4*map.height + 3*map.ysep), new=TRUE) + PlotEquiMap2(rescale(map1,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map1), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, xlabel.dist=1.5, contours.lty="F1FF1F") + par(fig=c(map.xpos, map.xpos + map.width, map.ypos + 2*map.height + 2*map.ysep, map.ypos + 3*map.height + 2*map.ysep), new=TRUE) + PlotEquiMap2(rescale(map2,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map2), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F") + par(fig=c(map.xpos, map.xpos + map.width, map.ypos + map.height + map.ysep, map.ypos + 2*map.height + map.ysep), new=TRUE) + PlotEquiMap2(rescale(map3,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map3), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F") + par(fig=c(map.xpos, map.xpos + map.width, map.ypos, map.ypos + map.height), new=TRUE) + PlotEquiMap2(rescale(map4,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map4), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F") + + # Sheet subtitles: + par(fig=c(map.xpos, map.xpos + map.width, 0.86, 0.90), new=TRUE) + if(n.map < 8) { + mtext(paste0(my.month.short[p], " --> ", my.month.short[forecast.month]), cex=5.5, font=2) + } else{ + mtext(paste0(rean.name," ", my.month.short[forecast.month]), cex=5.5, font=2) + } + + } # close if on composition == 'psl' + + # Visualize the composition if the impact maps for a selected forecasted month: + if(composition == "impact"){ + # Centroid maps: + n.map <- n.map + 1 # n.maps starts from 0 + map.width <- 0.115 + map.xpos <- 0.06 + map.width * (n.map - 1) + map.ypos <- 0.08 + map.height <- 0.19 + map.ysep <- 0.015 + + par(fig=c(map.xpos, map.xpos + map.width, map.ypos + 3*map.height + 3*map.ysep, map.ypos + 4*map.height + 3*map.ysep), new=TRUE) + #PlotEquiMap2(rescale(imp1[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=t(sig1[,EU] < 0.05), cex.lab=1.5) + PlotEquiMap2(rescale(imp1[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5) + + par(fig=c(map.xpos, map.xpos + map.width, map.ypos + 2*map.height + 2*map.ysep, map.ypos + 3*map.height + 2*map.ysep), new=TRUE) + #PlotEquiMap2(rescale(imp2[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=t(sig2[,EU] < 0.05), cex.lab=1.5) + PlotEquiMap2(rescale(imp2[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5) + + par(fig=c(map.xpos, map.xpos + map.width, map.ypos + map.height + map.ysep, map.ypos + 2*map.height + map.ysep), new=TRUE) + #PlotEquiMap2(rescale(imp3[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=t(sig3[,EU] < 0.05), cex.lab=1.5) + PlotEquiMap2(rescale(imp3[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5) + + par(fig=c(map.xpos, map.xpos + map.width, map.ypos, map.ypos + map.height), new=TRUE) + #PlotEquiMap2(rescale(imp4[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=t(sig4[,EU] < 0.05), cex.lab=1.5) + PlotEquiMap2(rescale(imp4[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5) + + + # Sheet subtitles: + par(fig=c(map.xpos, map.xpos + map.width, 0.86, 0.90), new=TRUE) + if(n.map < 8) { + mtext(paste0(my.month.short[p], " --> ", my.month.short[forecast.month]), cex=5.5, font=2) + } else{ + mtext(paste0(rean.name," ", my.month.short[forecast.month]), cex=5.5, font=2) + } + + } # close if on composition == 'impact' + + # Visualize the composition if the impact maps for a selected forecasted month: + if(composition == "single.impact"){ + png(filename=paste0(rean.dir,"/",fields.name,"_",my.period[p],"_",var.name[var.num],"_impact_NAO+.png"),width=3000,height=3700) + PlotEquiMap2(rescale(imp1[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=t(sig1[,EU] < 0.05), cex.lab=1.5) + dev.off() + + png(filename=paste0(rean.dir,"/",fields.name,"_",my.period[p],"_",var.name[var.num],"_impact_NAO-.png"),width=3000,height=3700) + PlotEquiMap2(rescale(imp2[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=t(sig2[,EU] < 0.05), cex.lab=1.5) + dev.off() + + png(filename=paste0(rean.dir,"/",fields.name,"_",my.period[p],"_",var.name[var.num],"_impact_Blocking.png"),width=3000,height=3700) + PlotEquiMap2(rescale(imp3[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=t(sig3[,EU] < 0.05), cex.lab=1.5) + dev.off() + + png(filename=paste0(rean.dir,"/",fields.name,"_",my.period[p],"_",var.name[var.num],"_impact_Atl_Ridge.png"),width=3000,height=3700) + PlotEquiMap2(rescale(imp4[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=t(sig4[,EU] < 0.05), cex.lab=1.5) + dev.off() + } + + # Visualize the composition if the impact maps for a selected forecasted month: + if(composition == "single.psl"){ + png(filename=paste0(rean.dir,"/",fields.name,"_",my.period[p],"_",var.name[var.num],"_pattern_cluster1.png"),width=2000,height=1400, res=300) + PlotEquiMap2(rescale(map1,my.brks[1],tail(my.brks,1)), lon, lat,filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1, contours=t(map1), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=0.5, xlabel.dist=0, contours.lty="F1FF1F", toptitle=paste0(my.period[p]," WithinSS=", round(withinss1/1000000,2))) + dev.off() + + png(filename=paste0(rean.dir,"/",fields.name,"_",my.period[p],"_",var.name[var.num],"_pattern_cluster2.png"),width=2000,height=1400, res=300) + PlotEquiMap2(rescale(map2,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1, contours=t(map2), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=0.5, xlabel.dist=0, contours.lty="F1FF1F", toptitle=paste0(my.period[p]," WithinSS=", round(withinss2/1000000,2))) + dev.off() + + png(filename=paste0(rean.dir,"/",fields.name,"_",my.period[p],"_",var.name[var.num],"_pattern_cluster3.png"),width=2000,height=1400, res=300) + PlotEquiMap2(rescale(map3,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1, contours=t(map3), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=0.5, xlabel.dist=0, contours.lty="F1FF1F", toptitle=paste0(my.period[p]," WithinSS=", round(withinss3/1000000,2))) + dev.off() + + png(filename=paste0(rean.dir,"/",fields.name,"_",my.period[p],"_",var.name[var.num],"_pattern_cluster4.png"),width=2000,height=1400, res=300) + PlotEquiMap2(rescale(map4,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1, contours=t(map4), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=0.5, xlabel.dist=0, contours.lty="F1FF1F", toptitle=paste0(my.period[p]," WithinSS=", round(withinss4/1000000,2))) + dev.off() + + # Legend centroid maps: + #my.subset <- match(values.to.plot, my.brks) + #ColorBar3(my.brks, cols=my.cols, vert=FALSE, cex=legend1.cex, subset=my.subset) + #mtext(side=4," hPa", cex=legend1.cex) + + } + + # Visualize the composition with the interannual frequencies for a selected forecasted month: + if(composition == "fre"){ + # Centroid maps: + n.map <- n.map + 1 # n.maps starts from 0 + map.width <- 0.115 + map.xpos <- 0.06 + map.width * (n.map - 1) + map.ypos <- 0.08 + map.height <- 0.19 + map.ysep <- 0.015 + + par(fig=c(map.xpos, map.xpos + map.width, map.ypos + 3*map.height + 3*map.ysep, map.ypos + 4*map.height + 3*map.ysep), new=TRUE) + if(n.map < 8) barplot.freq.sim2(100*fre1.NA, 100*freMax1.NA, 100*freMin1.NA, 100*freObs1, year.start, year.end, cex.y=2, cex.x=2, freq.min=-2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0), col.line="gray20", cex.r=3, cex.mean=2, cex.obs=2) + if(n.map == 8) barplot.freq(100*fre1.NA, year.start, year.end, cex.y=2, cex.x=2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0)) + + par(fig=c(map.xpos, map.xpos + map.width, map.ypos + 2*map.height + 2*map.ysep, map.ypos + 3*map.height + 2*map.ysep), new=TRUE) + if(n.map < 8) if(fields.name == forecast.name) barplot.freq.sim2(100*fre2.NA, 100*freMax2.NA, 100*freMin2.NA, 100*freObs2, year.start, year.end, cex.y=2, cex.x=2, freq.min=-2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0), col.line="gray20", cex.r=3, cex.mean=2, cex.obs=2) + if(n.map == 8) barplot.freq(100*fre2.NA, year.start, year.end, cex.y=2, cex.x=2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0)) + + par(fig=c(map.xpos, map.xpos + map.width, map.ypos + map.height + map.ysep, map.ypos + 2*map.height + map.ysep), new=TRUE) + if(n.map < 8) if(fields.name == forecast.name) barplot.freq.sim2(100*fre3.NA, 100*freMax3.NA, 100*freMin3.NA, 100*freObs3, year.start, year.end, cex.y=2, cex.x=2, freq.min=-2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0), col.line="gray20", cex.r=3, cex.mean=2, cex.obs=2) + if(n.map == 8) barplot.freq(100*fre3.NA, year.start, year.end, cex.y=2, cex.x=2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0)) + + par(fig=c(map.xpos, map.xpos + map.width, map.ypos, map.ypos + map.height), new=TRUE) + if(n.map < 8) if(fields.name == forecast.name) barplot.freq.sim2(100*fre4.NA, 100*freMax4.NA, 100*freMin4.NA, 100*freObs4, year.start, year.end, cex.y=2, cex.x=2, freq.min=-2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0), col.line="gray20", cex.r=3, cex.mean=2, cex.obs=2) + if(n.map == 8) barplot.freq(100*fre4.NA, year.start, year.end, cex.y=2, cex.x=2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0)) + + # Sheet subtitles: + par(fig=c(map.xpos, map.xpos + map.width, 0.86, 0.90), new=TRUE) + if(n.map < 8) { + mtext(paste0(my.month.short[p], " --> ", my.month.short[forecast.month]), cex=5.5, font=2) + } else{ + mtext(paste0(rean.name," ", my.month.short[forecast.month]), cex=5.5, font=2) + } + + # % on Frequency plots: + par(fig=c(map.xpos - 0.006, map.xpos, map.ypos + 3.9*map.height + 3*map.ysep, map.ypos + 4*map.height + 3*map.ysep), new=TRUE) + mtext("%", cex=3.3) + par(fig=c(map.xpos - 0.006, map.xpos, map.ypos + 2.9*map.height + 2*map.ysep, map.ypos + 3*map.height + 2*map.ysep), new=TRUE) + mtext("%", cex=3.3) + par(fig=c(map.xpos - 0.006, map.xpos, map.ypos + 1.9*map.height + 1*map.ysep, map.ypos + 2*map.height + 1*map.ysep), new=TRUE) + mtext("%", cex=3.3) + par(fig=c(map.xpos - 0.006, map.xpos, map.ypos + 0.9*map.height + 0*map.ysep, map.ypos + 1*map.height + 0*map.ysep), new=TRUE) + mtext("%", cex=3.3) + + # mean frequency on Frequency plots: + par(fig=c(map.xpos + 0.02, map.xpos + 0.04, map.ypos + 3*map.height + 3*map.ysep, map.ypos + 3.1*map.height + 3*map.ysep), new=TRUE) + mtext(bquote(bar(nu) == .(paste0(round(mean(100*fre1.NA),1),"%"))),cex=3.5) + par(fig=c(map.xpos + 0.02, map.xpos + 0.04, map.ypos + 2*map.height + 2*map.ysep, map.ypos + 2.1*map.height + 2*map.ysep), new=TRUE) + mtext(bquote(bar(nu) == .(paste0(round(mean(100*fre2.NA),1),"%"))),cex=3.5) + par(fig=c(map.xpos + 0.02, map.xpos + 0.04, map.ypos + 1*map.height + 1*map.ysep, map.ypos + 1.1*map.height + 1*map.ysep), new=TRUE) + mtext(bquote(bar(nu) == .(paste0(round(mean(100*fre3.NA),1),"%"))),cex=3.5) + par(fig=c(map.xpos + 0.02, map.xpos + 0.04, map.ypos + 0*map.height + 0*map.ysep, map.ypos + 0.1*map.height + 0*map.ysep), new=TRUE) + mtext(bquote(bar(nu) == .(paste0(round(mean(100*fre4.NA),1),"%"))),cex=3.5) + + # add corr between obs.time series and ensemble mean time series on Frequency plots: + if(n.map < 8){ + par(fig=c(map.xpos + map.width - 0.04, map.xpos + map.width - 0.02, map.ypos + 3*map.height + 3*map.ysep, map.ypos + 3.1*map.height + 3*map.ysep), new=TRUE) + mtext(paste0("r= ",sp.cor1), cex=3.5) + par(fig=c(map.xpos + map.width - 0.04, map.xpos + map.width - 0.02, map.ypos + 2*map.height + 2*map.ysep, map.ypos + 2.1*map.height + 2*map.ysep), new=TRUE) + mtext(paste0("r= ",sp.cor2), cex=3.5) + par(fig=c(map.xpos + map.width - 0.04, map.xpos + map.width - 0.02, map.ypos + 1*map.height + 1*map.ysep, map.ypos + 1.1*map.height + 1*map.ysep), new=TRUE) + mtext(paste0("r= ",sp.cor3), cex=3.5) + par(fig=c(map.xpos + map.width - 0.04, map.xpos + map.width - 0.02, map.ypos + 0*map.height + 0*map.ysep, map.ypos + 0.1*map.height + 0*map.ysep), new=TRUE) + mtext(paste0("r= ",sp.cor4), cex=3.5) + } + } # close if on composition == 'fre' + + # save indicators for each startdate and forecast time: + # (map1, map2, map3, map4, fre1, fre2, etc. already refer to the regimes in the same order as listed in the 'orden' vector) + if(fields.name == forecast.name) { + if (composition != "impact") { + save(orden, map1, map2, map3, map4, fre1.NA, fre2.NA, fre3.NA, fre4.NA, freObs1, freObs2, freObs3, freObs4, sp.cor1, sp.cor2, sp.cor3, sp.cor4, persist1, persist2, persist3, persist4, persistObs1, persistObs2, persistObs3, persistObs4, spat.cor1, spat.cor2, spat.cor3, spat.cor4, sd.freq.sim1, sd.freq.sim2, sd.freq.sim3, sd.freq.sim4, sd.freq.obs1, sd.freq.obs2, sd.freq.obs3, sd.freq.obs4, transSim1, transSim2, transSim3, transSim4, transObs1, transObs2, transObs3, transObs4, file=paste0(work.dir,"/",fields.name,"_", my.period[p],"_leadmonth_",lead.month,"_","ClusterNames",".RData")) + } else { # also save impX objects + save(orden, map1, map2, map3, map4, fre1.NA, fre2.NA, fre3.NA, fre4.NA, freObs1, freObs2, freObs3, freObs4, sp.cor1, sp.cor2, sp.cor3, sp.cor4, persist1, persist2, persist3, persist4, persistObs1, persistObs2, persistObs3, persistObs4, spat.cor1, spat.cor2, spat.cor3, spat.cor4, sd.freq.sim1, sd.freq.sim2, sd.freq.sim3, sd.freq.sim4, sd.freq.obs1, sd.freq.obs2, sd.freq.obs3, sd.freq.obs4, transSim1, transSim2, transSim3, transSim4, transObs1, transObs2, transObs3, transObs4, imp1, imp2, imp3, imp4,cor.imp1,cor.imp2,cor.imp3,cor.imp4, file=paste0(work.dir,"/",fields.name,"_", my.period[p],"_leadmonth_",lead.month,"_","ClusterNames",".RData")) + } + } + + } # close 'p' on 'period' + + if(as.pdf) dev.off() # for saving a single pdf with all months/seasons + + } # close 'lead.month' on 'lead.months' + + if(composition == "psl" || composition == "fre" || composition == "impact"){ + # Regime's names: + title1.xpos <- 0.01 + title1.width <- 0.04 + par(fig=c(title1.xpos, title1.xpos + title1.width, 0.7905, 0.7955), new=TRUE) + mtext(paste0(regime1.name), font=2, cex=5) + par(fig=c(title1.xpos, title1.xpos + title1.width, 0.5855, 0.5905), new=TRUE) + mtext(paste0(regime2.name), font=2, cex=5) + par(fig=c(title1.xpos, title1.xpos + title1.width, 0.3805, 0.3955), new=TRUE) + mtext(paste0(regime3.name), font=2, cex=5) + par(fig=c(title1.xpos, title1.xpos + title1.width, 0.1755, 0.1805), new=TRUE) + mtext(paste0(regime4.name), font=2, cex=5) + + if(composition == "psl") { + # Color legend: + legend1.xpos <- 0.10 + legend1.width <- 0.80 + legend1.cex <- 4 + + par(fig=c(legend1.xpos, legend1.xpos + legend1.width, 0, 0.065), new=TRUE) # syntax fig=c(xmin, xmax, ymin, ymax) + ColorBar2(brks=my.brks, cols=my.cols, vert=FALSE, cex=legend1.cex, label.dist=2, my.ticks=-0.5 + 1:length(my.brks), my.labels=my.brks) + par(fig=c(legend1.xpos + legend1.width - 0.02, legend1.xpos + legend1.width + 0.05, 0, 0.02), new=TRUE) # syntax fig=c(xmin, xmax, ymin, ymax) + mtext("hPa", cex=legend1.cex*1.3) + } + + if(composition == "impact") { + # Color legend: + legend1.xpos <- 0.10 + legend1.width <- 0.80 + legend1.cex <- 4 + + #my.subset2 <- match(values.to.plot2, my.brks.var) + par(fig=c(legend1.xpos, legend1.xpos + legend1.width, 0, 0.065), new=TRUE) + ColorBar2(brks=my.brks.var, cols=my.cols.var, vert=FALSE, cex=legend1.cex, label.dist=2, my.ticks=-0.5 + 1:length(my.brks.var), my.labels=my.brks.var) + par(fig=c(legend1.xpos + legend1.width - 0.02, legend1.xpos + legend1.width + 0.05, 0, 0.02), new=TRUE) # syntax fig=c(xmin, xmax, ymin, ymax) + #ColorBar2(brks=my.brks.var, cols=my.cols.var, vert=FALSE, cex=legend2.cex, subset=my.subset2) + mtext(var.unit[var.num], cex=legend1.cex*1.3) + + } + + + dev.off() + } # close if on composition + + print("Finished!") +} # close if on composition != "summary" + +} + +if(composition == "taylor"){ + library("plotrix") + + # choose a reference season from 13 (winter) to 16 (Autumn): + season.ref <- 13 + + # set the first WR classification: + rean.dir <- "/scratch/Earth/ncortesi/RESILIENCE/Regimes/41_ERA-Interim_monthly_1981-2015_LOESS_filter" + + # load data of the reference season of the first classification (this line should be modified to load the chosen season): + #load("/scratch/Earth/ncortesi/RESILIENCE/Regimes/18) as 12) but with no lat correction/ERA-Interim_Winter_psl.RData") # load pslwrXmean data + #load("/scratch/Earth/ncortesi/RESILIENCE/Regimes/18) as 12) but with no lat correction/ERA-Interim_Winter_ClusterNames.RData") # load clusterX.name.period + load("/scratch/Earth/ncortesi/RESILIENCE/Regimes/46_as_18_but_with_no_lat_correction_and_with_LOESS/ERA-Interim_Winter_psl.RData") # load pslwrXmean data + load("/scratch/Earth/ncortesi/RESILIENCE/Regimes/46_as_18_but_with_no_lat_correction_and_with_LOESS/ERA-Interim_Winter_ClusterNames.RData") # load clusterX.name.period + + if(var.num != var.num.orig) var.num <- var.num.orig # ripristinate original values of this script (necessary after loading regime data) + if(fields.name != fields.name.orig) fields.name <- fields.name.orig # ripristinate original values of this script + + cluster1.ref <- which(orden == cluster1.name) + cluster2.ref <- which(orden == cluster2.name) + cluster3.ref <- which(orden == cluster3.name) + cluster4.ref <- which(orden == cluster4.name) + + #cluster1.ref <- cluster1.name.period[13] + #cluster2.ref <- cluster2.name.period[13] + #cluster3.ref <- cluster3.name.period[13] + #cluster4.ref <- cluster4.name.period[13] + + cluster1.ref <- 3 # bypass the previous values because for dataset num.46 the blocking and atlantic regimes were saved swapped! + cluster2.ref <- 2 + cluster3.ref <- 1 + cluster4.ref <- 4 + + assign(paste0("map.ref",cluster1.ref), pslwr1mean) # NAO+ + assign(paste0("map.ref",cluster2.ref), pslwr2mean) # NAO- + assign(paste0("map.ref",cluster3.ref), pslwr3mean) # Blocking + assign(paste0("map.ref",cluster4.ref), pslwr4mean) # Atl.ridge + + # set a second WR classification (i.e: with running cluster) to contrast with the new one: + #rean2.dir <- "/scratch/Earth/ncortesi/RESILIENCE/Regimes/41_ERA-Interim_monthly_1981-2015_LOESS_filter" + rean2.dir <- "/scratch/Earth/ncortesi/RESILIENCE/Regimes/44_as_43_but_with_no_lat_correction" + + # load data of the reference season of the second classification (this line should be modified to load the chosen season): + load("/scratch/Earth/ncortesi/RESILIENCE/Regimes/46_as_12_but_with_no_lat_correction_and_with_LOESS/ERA-Interim_Winter_psl.RData") # load pslwrXmean data + load("/scratch/Earth/ncortesi/RESILIENCE/Regimes/46_as_12_but_with_no_lat_correction_and_with_LOESS/ERA-Interim_Winter_ClusterNames.RData") # load clusterX.name.period + + if(var.num != var.num.orig) var.num <- var.num.orig # ripristinate original values of this script (necessary after loading regime data) + if(fields.name != fields.name.orig) fields.name <- fields.name.orig # ripristinate original values of this script + + #cluster1.name.ref <- cluster1.name.period[season.ref] + #cluster2.name.ref <- cluster2.name.period[season.ref] + #cluster3.name.ref <- cluster3.name.period[season.ref] + #cluster4.name.ref <- cluster4.name.period[season.ref] + + cluster1.ref <- which(orden == cluster1.name) + cluster2.ref <- which(orden == cluster2.name) + cluster3.ref <- which(orden == cluster3.name) + cluster4.ref <- which(orden == cluster4.name) + + cluster1.ref <- 3 # bypass the previous values because for dataset num.46 the blocking and atlantic regimes were saved swapped! + cluster2.ref <- 2 + cluster3.ref <- 1 + cluster4.ref <- 4 + + assign(paste0("map.old.ref",cluster1.ref), pslwr1mean) # NAO+ + assign(paste0("map.old.ref",cluster2.ref), pslwr2mean) # NAO- + assign(paste0("map.old.ref",cluster3.ref), pslwr3mean) # Blocking + assign(paste0("map.old.ref",cluster4.ref), pslwr4mean) # Atl.ridge + + + # breaks and colors of the geopotential fields: + if(psl == "g500"){ + my.brks <- c(-300,seq(-200,200,10),300) # % Mean anomaly of a WR + my.brks2 <- c(-300,seq(-200,200,10),300) # % Mean anomaly of a WR for the contour lines + values.to.plot <- c(-200, -100, 0, 100, 200) # values we want to show in the labels + } + + if(psl == "psl"){ + my.brks <- c(seq(-19,-1,2),0,seq(1,19,2)) # % Mean anomaly of a WR + my.brks2 <- my.brks #c(seq(-20,20,2)) # % Mean anomaly of a WR for the contour lines + values.to.plot <- my.brks #c(-17,-15,-13,-11,-9,-7,-5,-3,-1,0,1,3,5,7,9,11,13,15,17) + } + + my.cols <- colorRampPalette(rev(brewer.pal(11,"RdBu")))(length(my.brks)-1) # blue--white--red colors + my.cols[floor(length(my.cols)/2)] <- my.cols[floor(length(my.cols)/2)+1] <- "white" + + # plot the composition with the regime anomalies of each monthly regimes: + png(filename=paste0(rean.dir,"/",fields.name,"_taylor_source",".png"),width=6000,height=2000) + + plot.new() + + # Sheet title: + par(fig=c(0, 1, 0.94, 0.98), new=TRUE) + mtext(paste0(fields.name, " observed monthly regime anomalies"), cex=5.5, font=2) + + n.map <- 0 + for(p in c(9:12,1:8)){ + p.orig <- p + + load(file=paste0(rean.dir,"/",fields.name,"_",my.period[p],"_","psl",".RData")) # load cluster names and summarized data + load(file=paste0(rean.dir,"/",fields.name,"_",my.period[p],"_","ClusterNames",".RData")) # load cluster names and summarized data + + if(var.num != var.num.orig) var.num <- var.num.orig # ripristinate original values of this script (necessary after loading regime data) + if(fields.name != fields.name.orig) fields.name <- fields.name.orig # ripristinate original values of this script + if(p != p.orig) p <- p.orig + + cluster1 <- which(orden == cluster1.name) + cluster2 <- which(orden == cluster2.name) + cluster3 <- which(orden == cluster3.name) + cluster4 <- which(orden == cluster4.name) + + assign(paste0("map",cluster1), pslwr1mean) # NAO+ + assign(paste0("map",cluster2), pslwr2mean) # NAO- + assign(paste0("map",cluster3), pslwr3mean) # Blocking + assign(paste0("map",cluster4), pslwr4mean) # Atl.ridge + + n.map <- n.map + 1 # n.maps starts from 0 + map.width <- 0.07 + map.xpos <- 0.06 + map.width * (n.map - 1) + map.ypos <- 0.08 + map.height <- 0.19 + map.ysep <- 0.015 + + par(fig=c(map.xpos, map.xpos + map.width, map.ypos + 3*map.height + 3*map.ysep, map.ypos + 4*map.height + 3*map.ysep), new=TRUE) + PlotEquiMap2(rescale(map1,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map1), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, xlabel.dist=1.5, contours.lty="F1FF1F") + par(fig=c(map.xpos, map.xpos + map.width, map.ypos + 2*map.height + 2*map.ysep, map.ypos + 3*map.height + 2*map.ysep), new=TRUE) + PlotEquiMap2(rescale(map2,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map2), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F") + par(fig=c(map.xpos, map.xpos + map.width, map.ypos + map.height + map.ysep, map.ypos + 2*map.height + map.ysep), new=TRUE) + PlotEquiMap2(rescale(map3,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map3), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F") + par(fig=c(map.xpos, map.xpos + map.width, map.ypos, map.ypos + map.height), new=TRUE) + PlotEquiMap2(rescale(map4,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map4), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F") + + # Sheet subtitles: + par(fig=c(map.xpos, map.xpos + map.width, 0.86, 0.90), new=TRUE) + mtext(my.month.short[p], cex=5.5, font=2) + + } + + # draw the last map to the right of the composition, that with the seasonal anomalies: + n.map <- n.map + 1 # n.maps starts from 0 + map.width <- 0.07 + map.xpos <- 0.06 + map.width * (n.map - 1) + map.ypos <- 0.08 + map.height <- 0.19 + map.ysep <- 0.015 + + par(fig=c(map.xpos, map.xpos + map.width, map.ypos + 3*map.height + 3*map.ysep, map.ypos + 4*map.height + 3*map.ysep), new=TRUE) + PlotEquiMap2(rescale(map.ref1,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map.ref1), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, xlabel.dist=1.5, contours.lty="F1FF1F") + par(fig=c(map.xpos, map.xpos + map.width, map.ypos + 2*map.height + 2*map.ysep, map.ypos + 3*map.height + 2*map.ysep), new=TRUE) + PlotEquiMap2(rescale(map.ref2,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map.ref2), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F") + par(fig=c(map.xpos, map.xpos + map.width, map.ypos + map.height + map.ysep, map.ypos + 2*map.height + map.ysep), new=TRUE) + PlotEquiMap2(rescale(map.ref3,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map.ref3), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F") + par(fig=c(map.xpos, map.xpos + map.width, map.ypos, map.ypos + map.height), new=TRUE) + PlotEquiMap2(rescale(map.ref4,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map.ref4), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F") + + # subtitle: + par(fig=c(map.xpos, map.xpos + map.width, 0.86, 0.90), new=TRUE) + mtext(my.period[season.ref], cex=5.5, font=2) + + dev.off() + + corr.first <- corr.second <- rmse.first <- rmse.second <- array(NA,c(4,12)) + + # Plot the Taylor diagrams: + for(regime in 1:4){ + + png(filename=paste0(rean.dir,"/",fields.name,"_taylor_",orden[regime],".png"), width=1200, height=800) + + for(p in 1:12){ + p.orig <- p + + load(file=paste0(rean.dir,"/",fields.name,"_",my.period[p],"_","psl",".RData")) # load cluster names and summarized data + load(file=paste0(rean.dir,"/",fields.name,"_",my.period[p],"_","ClusterNames",".RData")) # load cluster names and summarized data + + if(var.num != var.num.orig) var.num <- var.num.orig # ripristinate original values of this script (necessary after loading regime data) + if(fields.name != fields.name.orig) fields.name <- fields.name.orig # ripristinate original values of this script + if(p != p.orig) p <- p.orig + + cluster1 <- which(orden == cluster1.name) + cluster2 <- which(orden == cluster2.name) + cluster3 <- which(orden == cluster3.name) + cluster4 <- which(orden == cluster4.name) + + assign(paste0("map",cluster1), pslwr1mean) # NAO+ + assign(paste0("map",cluster2), pslwr2mean) # NAO- + assign(paste0("map",cluster3), pslwr3mean) # Blocking + assign(paste0("map",cluster4), pslwr4mean) # Atl.ridge + + # load data from the second classification: + load(file=paste0(rean2.dir,"/",fields.name,"_",my.period[p],"_","psl",".RData")) # load cluster names and summarized data + load(file=paste0(rean2.dir,"/",fields.name,"_",my.period[p],"_","ClusterNames",".RData")) # load cluster names and summarized data + + if(var.num != var.num.orig) var.num <- var.num.orig # ripristinate original values of this script (necessary after loading regime data) + if(fields.name != fields.name.orig) fields.name <- fields.name.orig # ripristinate original values of this script + if(p != p.orig) p <- p.orig + + cluster1.old <- which(orden == cluster1.name) + cluster2.old <- which(orden == cluster2.name) + cluster3.old <- which(orden == cluster3.name) + cluster4.old <- which(orden == cluster4.name) + + assign(paste0("map.old",cluster1.old), pslwr1mean) # NAO+ + assign(paste0("map.old",cluster2.old), pslwr2mean) # NAO- + assign(paste0("map.old",cluster3.old), pslwr3mean) # Blocking + assign(paste0("map.old",cluster4.old), pslwr4mean) # Atl.ridge + + add.mod <- ifelse(p == 1, FALSE, TRUE) + main.mod <- ifelse(p == 1, orden[regime],"") + + color.first <- "red" + color.second <- "blue" + sd.arcs.mod <- c(0,0.1,0.2,0.3,0.4,0.5,0.6,0.7,0.8,0.9,1,1.1,1.2,1.3) #c(0,0.5,1,1.5) # doesn't work! + if(regime == 1) my.taylor(as.vector(map.ref1),as.vector(map1), normalize=TRUE, add=add.mod, pos.cor=FALSE, sd.arcs=sd.arcs.mod, ref.sd=F, cex.axis=1.7, text.cex=1, my.text=my.month.short[p], col = color.first, main=main.mod) + if(regime == 2) my.taylor(as.vector(map.ref2),as.vector(map2), normalize=TRUE, add=add.mod, pos.cor=FALSE, sd.arcs=sd.arcs.mod, ref.sd=F, cex.axis=1.7, text.cex=1, my.text=my.month.short[p], col = color.first, main=main.mod) + if(regime == 3) my.taylor(as.vector(map.ref3),as.vector(map3), normalize=TRUE, add=add.mod, pos.cor=FALSE, sd.arcs=sd.arcs.mod, ref.sd=F, cex.axis=1.7, text.cex=1, my.text=my.month.short[p], col = color.first, main=main.mod) + if(regime == 4) my.taylor(as.vector(map.ref4),as.vector(map4), normalize=TRUE, add=add.mod, pos.cor=FALSE, sd.arcs=sd.arcs.mod, ref.sd=F, cex.axis=1.7, text.cex=1, my.text=my.month.short[p], col = color.first, main=main.mod) + + # add the points from the second classification: + add.mod=TRUE + if(regime == 1) my.taylor(as.vector(map.old.ref1),as.vector(map.old1), normalize=TRUE, add=add.mod, pos.cor=FALSE, sd.arcs=sd.arcs.mod, ref.sd=F, cex.axis=1.7, text.cex=1, my.text=my.month.short[p], col = color.second) + if(regime == 2) my.taylor(as.vector(map.old.ref2),as.vector(map.old2), normalize=TRUE, add=add.mod, pos.cor=FALSE, sd.arcs=sd.arcs.mod, ref.sd=F, cex.axis=1.7, text.cex=1, my.text=my.month.short[p], col = color.second) + if(regime == 3) my.taylor(as.vector(map.old.ref3),as.vector(map.old3), normalize=TRUE, add=add.mod, pos.cor=FALSE, sd.arcs=sd.arcs.mod, ref.sd=F, cex.axis=1.7, text.cex=1, my.text=my.month.short[p], col = color.second) + if(regime == 4) my.taylor(as.vector(map.old.ref4),as.vector(map.old4), normalize=TRUE, add=add.mod, pos.cor=FALSE, sd.arcs=sd.arcs.mod, ref.sd=F, cex.axis=1.7, text.cex=1, my.text=my.month.short[p], col = color.second) + + if(regime == 1) { corr.first[1,p] <- cor(as.vector(map.ref1),as.vector(map1)); rmse.first[1,p] <- RMSE(as.vector(map.ref1),as.vector(map1)) } + if(regime == 2) { corr.first[2,p] <- cor(as.vector(map.ref2),as.vector(map2)); rmse.first[2,p] <- RMSE(as.vector(map.ref2),as.vector(map2)) } + if(regime == 3) { corr.first[3,p] <- cor(as.vector(map.ref3),as.vector(map3)); rmse.first[3,p] <- RMSE(as.vector(map.ref3),as.vector(map3)) } + if(regime == 4) { corr.first[4,p] <- cor(as.vector(map.ref4),as.vector(map4)); rmse.first[4,p] <- RMSE(as.vector(map.ref4),as.vector(map4)) } + + if(regime == 1) { corr.second[1,p] <- cor(as.vector(map.old.ref1),as.vector(map.old1)); rmse.second[1,p] <- RMSE(as.vector(map.old.ref1),as.vector(map.old1)) } + if(regime == 2) { corr.second[2,p] <- cor(as.vector(map.old.ref2),as.vector(map.old2)); rmse.second[2,p] <- RMSE(as.vector(map.old.ref2),as.vector(map.old2)) } + if(regime == 3) { corr.second[3,p] <- cor(as.vector(map.old.ref3),as.vector(map.old3)); rmse.second[3,p] <- RMSE(as.vector(map.old.ref3),as.vector(map.old3)) } + if(regime == 4) { corr.second[4,p] <- cor(as.vector(map.old.ref4),as.vector(map.old4)); rmse.second[4,p] <- RMSE(as.vector(map.old.ref4),as.vector(map.old4)) } + + } # close for on 'p' + + dev.off() + + } # close for on 'regime' + + corr.first.mean <- apply(corr.first,1,mean) + corr.second.mean <- apply(corr.second,1,mean) + corr.diff <- corr.second.mean - corr.first.mean + + rmse.first.mean <- apply(rmse.first,1,mean) + rmse.second.mean <- apply(rmse.second,1,mean) + rmse.diff <- rmse.second.mean - rmse.first.mean + +} # close if on composition == "taylor" + + + + + + +# Summary graphs: +if(composition == "summary" && fields.name == forecast.name){ + # array storing correlations in the format: [startdate, leadmonth, regime] + array.diff.sd.freq <- array.sd.freq <- array.cor <- array.sp.cor <- array.freq <- array.diff.freq <- array.rpss <- array.pers <- array.diff.pers <- array(NA,c(12,7,4)) + array.spat.cor <- array.imp <- array.trans.sim1 <- array.trans.sim2 <- array.trans.sim3 <- array.trans.sim4 <- array(NA,c(12,7,4)) + array.trans.diff1 <- array.trans.diff2 <- array.trans.diff3 <- array.trans.diff4 <- array.freq.obs <- array.pers.obs <- array(NA,c(12,7,4)) + array.trans.obs1 <- array.trans.obs2 <- array.trans.obs3 <- array.trans.obs4 <- array(NA,c(12,7,4)) + + for(p in 1:12){ + p.orig <- p + + for(l in 0:6){ + + load(file=paste0(work.dir,"/",fields.name,"_",my.period[p],"_leadmonth_",l,"_","ClusterNames",".RData")) # load cluster names and summarized data + + if(var.num != var.num.orig) var.num <- var.num.orig # ripristinate original values of this script (necessary after loading regime data) + if(fields.name != fields.name.orig) fields.name <- fields.name.orig # ripristinate original values of this script + if(p != p.orig) p <- p.orig + + array.spat.cor[p,1+l, 1] <- spat.cor1 # associates NAO+ to the first triangle (at left) + array.spat.cor[p,1+l, 3] <- spat.cor2 # associates NAO- to the third triangle (at right) + array.spat.cor[p,1+l, 4] <- spat.cor3 # associates Blocking to the fourth triangle (top one) + array.spat.cor[p,1+l, 2] <- spat.cor4 # associates Atl.Ridge to the second triangle (bottom one) + + array.cor[p,1+l, 1] <- sp.cor1 # associates NAO+ to the first triangle (at left) + array.cor[p,1+l, 3] <- sp.cor2 # associates NAO- to the third triangle (at right) + array.cor[p,1+l, 4] <- sp.cor3 # associates Blocking to the fourth triangle (top one) + array.cor[p,1+l, 2] <- sp.cor4 # associates Atl.Ridge to the second triangle (bottom one) + + array.freq[p,1+l, 1] <- 100*(mean(fre1.NA)) # NAO+ + array.freq[p,1+l, 3] <- 100*(mean(fre2.NA)) # NAO- + array.freq[p,1+l, 4] <- 100*(mean(fre3.NA)) # Blocking + array.freq[p,1+l, 2] <- 100*(mean(fre4.NA)) # Atl.Ridge + + array.freq.obs[p,1+l, 1] <- 100*(mean(freObs1)) # NAO+ + array.freq.obs[p,1+l, 3] <- 100*(mean(freObs2)) # NAO- + array.freq.obs[p,1+l, 4] <- 100*(mean(freObs3)) # Blocking + array.freq.obs[p,1+l, 2] <- 100*(mean(freObs4)) # Atl.Ridge + + array.diff.freq[p,1+l, 1] <- 100*(mean(fre1.NA) - mean(freObs1)) # NAO+ + array.diff.freq[p,1+l, 3] <- 100*(mean(fre2.NA) - mean(freObs2)) # NAO- + array.diff.freq[p,1+l, 4] <- 100*(mean(fre3.NA) - mean(freObs3)) # Blocking + array.diff.freq[p,1+l, 2] <- 100*(mean(fre4.NA) - mean(freObs4)) # Atl.Ridge + + array.pers[p,1+l, 1] <- persist1 # NAO+ + array.pers[p,1+l, 3] <- persist2 # NAO- + array.pers[p,1+l, 4] <- persist3 # Blocking + array.pers[p,1+l, 2] <- persist4 # Atl.Ridge + + array.pers.obs[p,1+l, 1] <- persistObs1 # NAO+ + array.pers.obs[p,1+l, 3] <- persistObs2 # NAO- + array.pers.obs[p,1+l, 4] <- persistObs3 # Blocking + array.pers.obs[p,1+l, 2] <- persistObs4 # Atl.Ridge + + array.diff.pers[p,1+l, 1] <- persist1 - persistObs1 # NAO+ + array.diff.pers[p,1+l, 3] <- persist2 - persistObs2 # NAO- + array.diff.pers[p,1+l, 4] <- persist3 - persistObs3 # Blocking + array.diff.pers[p,1+l, 2] <- persist4 - persistObs4 # Atl.Ridge + + array.sd.freq[p,1+l, 1] <- sd.freq.sim1 # NAO+ + array.sd.freq[p,1+l, 3] <- sd.freq.sim2 # NAO- + array.sd.freq[p,1+l, 4] <- sd.freq.sim3 # Blocking + array.sd.freq[p,1+l, 2] <- sd.freq.sim4 # Atl.Ridge + + array.diff.sd.freq[p,1+l, 1] <- sd.freq.sim1 / sd.freq.obs1 # NAO+ + array.diff.sd.freq[p,1+l, 3] <- sd.freq.sim2 / sd.freq.obs2 # NAO- + array.diff.sd.freq[p,1+l, 4] <- sd.freq.sim3 / sd.freq.obs3 # Blocking + array.diff.sd.freq[p,1+l, 2] <- sd.freq.sim4 / sd.freq.obs4 # Atl.Ridge + + array.imp[p,1+l, 1] <- cor.imp1 # NAO+ + array.imp[p,1+l, 3] <- cor.imp2 # NAO- + array.imp[p,1+l, 4] <- cor.imp3 # Blocking + array.imp[p,1+l, 2] <- cor.imp4 # Atl.Ridge + + array.trans.sim1[p,1+l, 1] <- NA # Transition from NAO+ to NAO+ + array.trans.sim1[p,1+l, 3] <- 100*transSim1[2] # Transition from NAO+ to NAO- + array.trans.sim1[p,1+l, 4] <- 100*transSim1[3] # Transition from NAO+ to blocking + array.trans.sim1[p,1+l, 2] <- 100*transSim1[4] # Transition from NAO+ to Atl.Ridge + + array.trans.sim2[p,1+l, 1] <- 100*transSim2[1] # Transition from NAO- to NAO+ + array.trans.sim2[p,1+l, 3] <- NA # Transition from NAO- to NAO- + array.trans.sim2[p,1+l, 4] <- 100*transSim2[3] # Transition from NAO- to blocking + array.trans.sim2[p,1+l, 2] <- 100*transSim2[4] # Transition from NAO- to Atl.Ridge + + array.trans.sim3[p,1+l, 1] <- 100*transSim3[1] # Transition from blocking to NAO+ + array.trans.sim3[p,1+l, 3] <- 100*transSim3[2] # Transition from blocking to NAO- + array.trans.sim3[p,1+l, 4] <- NA # Transition from blocking to blocking + array.trans.sim3[p,1+l, 2] <- 100*transSim3[4] # Transition from blocking to Atl.Ridge + + array.trans.sim4[p,1+l, 1] <- 100*transSim4[1] # Transition from Atl.ridge to NAO+ + array.trans.sim4[p,1+l, 3] <- 100*transSim4[2] # Transition from Atl.ridge to NAO- + array.trans.sim4[p,1+l, 4] <- 100*transSim4[3] # Transition from Atl.ridge to blocking + array.trans.sim4[p,1+l, 2] <- NA # Transition from Atl.ridge to Atl.Ridge + + array.trans.obs1[p,1+l, 1] <- NA # Transition from NAO+ to NAO+ + array.trans.obs1[p,1+l, 3] <- 100*transObs1[2] # Transition from NAO+ to NAO- + array.trans.obs1[p,1+l, 4] <- 100*transObs1[3] # Transition from NAO+ to blocking + array.trans.obs1[p,1+l, 2] <- 100*transObs1[4] # Transition from NAO+ to Atl.Ridge + + array.trans.obs2[p,1+l, 1] <- 100*transObs2[1] # Transition from NAO- to NAO+ + array.trans.obs2[p,1+l, 3] <- NA # Transition from NAO- to NAO- + array.trans.obs2[p,1+l, 4] <- 100*transObs2[3] # Transition from NAO- to blocking + array.trans.obs2[p,1+l, 2] <- 100*transObs2[4] # Transition from NAO- to Atl.Ridge + + array.trans.obs3[p,1+l, 1] <- 100*transObs3[1] # Transition from blocking to NAO+ + array.trans.obs3[p,1+l, 3] <- 100*transObs3[2] # Transition from blocking to NAO- + array.trans.obs3[p,1+l, 4] <- NA # Transition from blocking to blocking + array.trans.obs3[p,1+l, 2] <- 100*transObs3[4] # Transition from blocking to Atl.Ridge + + array.trans.obs4[p,1+l, 1] <- 100*transObs4[1] # Transition from Atl.ridge to NAO+ + array.trans.obs4[p,1+l, 3] <- 100*transObs4[2] # Transition from Atl.ridge to NAO- + array.trans.obs4[p,1+l, 4] <- 100*transObs4[3] # Transition from Atl.ridge to blocking + array.trans.obs4[p,1+l, 2] <- NA # Transition from Atl.ridge to Atl.Ridge + + array.trans.diff1[p,1+l, 1] <- NA # Transition from NAO+ to NAO+ + array.trans.diff1[p,1+l, 3] <- 100*(transSim1[2] - transObs1[2]) # Transition from NAO+ to NAO- + array.trans.diff1[p,1+l, 4] <- 100*(transSim1[3] - transObs1[3]) # Transition from NAO+ to blocking + array.trans.diff1[p,1+l, 2] <- 100*(transSim1[4] - transObs1[4]) # Transition from NAO+ to Atl.Ridge + + array.trans.diff2[p,1+l, 1] <- 100*(transSim2[1] - transObs2[1]) # Transition from NAO+ to NAO+ + array.trans.diff2[p,1+l, 3] <- NA + array.trans.diff2[p,1+l, 4] <- 100*(transSim2[3] - transObs2[3]) # Transition from NAO+ to blocking + array.trans.diff2[p,1+l, 2] <- 100*(transSim2[4] - transObs2[4]) # Transition from NAO+ to Atl.Ridge + + array.trans.diff3[p,1+l, 1] <- 100*(transSim3[1] - transObs3[1]) # Transition from NAO+ to NAO+ + array.trans.diff3[p,1+l, 3] <- 100*(transSim3[2] - transObs3[2]) # Transition from NAO+ to NAO- + array.trans.diff3[p,1+l, 4] <- NA + array.trans.diff3[p,1+l, 2] <- 100*(transSim3[4] - transObs3[4]) # Transition from NAO+ to Atl.Ridge + + array.trans.diff4[p,1+l, 1] <- 100*(transSim4[1] - transObs4[1]) # Transition from NAO+ to NAO+ + array.trans.diff4[p,1+l, 3] <- 100*(transSim4[2] - transObs4[2]) # Transition from NAO+ to NAO- + array.trans.diff4[p,1+l, 4] <- 100*(transSim4[3] - transObs4[3]) # Transition from NAO+ to blocking + array.trans.diff4[p,1+l, 2] <- NA + + #array.rpss[p,1+l, 1] <- # NAO+ + #array.rpss[p,1+l, 3] <- # NAO- + #array.rpss[p,1+l, 4] <- # Blocking + #array.rpss[p,1+l, 2] <- # Atl.Ridge + } + } + + + + # Spatial Corr summary: + png(filename=paste0(work.dir,"/",forecast.name,"_summary_spatial_correlations.png"), width=550, height=400) + + # create an array similar to array.cor but with colors instead of corr.values: + sp.corr.cols <- rev(c('#b2182b','#d6604d','#f4a582','#fddbc7','#d1e5f0','#92c5de','#4393c3','#2166ac')) + my.seq <- c(-1,0,0.4,0.5,0.6,0.7,0.8,0.9,1) + + array.spat.cor.colors <- array(sp.corr.cols[8],c(12,7,4)) + array.spat.cor.colors[array.spat.cor < my.seq[8]] <- sp.corr.cols[7] + array.spat.cor.colors[array.spat.cor < my.seq[7]] <- sp.corr.cols[6] + array.spat.cor.colors[array.spat.cor < my.seq[6]] <- sp.corr.cols[5] + array.spat.cor.colors[array.spat.cor < my.seq[5]] <- sp.corr.cols[4] + array.spat.cor.colors[array.spat.cor < my.seq[4]] <- sp.corr.cols[3] + array.spat.cor.colors[array.spat.cor < my.seq[3]] <- sp.corr.cols[2] + array.spat.cor.colors[array.spat.cor < my.seq[2]] <- sp.corr.cols[1] + + par(mar=c(5,4,4,4.5)) + plot(1,1, type="n", xaxt="n", yaxt="n", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + title("Spatial correlation between S4 and ERA-Interim regime anomalies", cex=1.5) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + + for(p in 1:12){ + for(l in 0:6){ + polygon(c(0.5+p-1, 0.5+p-1, 1+p-1), c(-0.5+l, 0.5+l, 0+l), col=array.spat.cor.colors[p,1+l,1]) # first triangle + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(-0.5+l, 0+l, -0.5+l), col=array.spat.cor.colors[p,1+l,2]) + polygon(c(1+p-1, 1.5+p-1, 1.5+p-1), c(l, 0.5+l, -0.5+l), col=array.spat.cor.colors[p,1+l,3]) + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(0.5+l, 0+l, 0.5+l), col=array.spat.cor.colors[p,1+l,4]) + } + } + + par(fig=c(0.875, 1, 0.14, 0.89), new=TRUE) + ColorBar2(brks = my.seq, cols = sp.corr.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + dev.off() + + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_spatial_correlations_startdate.png"), width=750, height=600) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext(text="Spatial correlation between S4 and ERA-Interim regime anomalies", cex=1.5) + + # NAO+ : + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.85, 0.98), new=TRUE) + mtext("NAO+", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.spat.cor.colors[p,1+l,1])}} + + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.42, 0.5), new=TRUE) + mtext("Blocking", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.spat.cor.colors[p,1+l,4])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.85, 0.98), new=TRUE) + mtext("NAO-", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.spat.cor.colors[p,1+l,3])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.42, 0.5), new=TRUE) + mtext("Atlantic Ridge", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.spat.cor.colors[p,1+l,2])}} + + par(fig=c(0.91, 1, 0.1, 0.9), new=TRUE) + ColorBar2(brks = my.seq, cols = sp.corr.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_spatial_correlations_target_month.png"), width=800, height=300) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext(text="Spatial correlation between S4 and ERA-Interim regime anomalies", cex=1.2) + + par(mar=c(0,0,4,0), fig=c(0.07, 0.22, 0.8, 0.98), new=TRUE) + mtext("NAO+", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0, 0.22, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.6, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.spat.cor.colors[i2,1+6-j,1])}} + + par(mar=c(0,0,4,0), fig=c(0.22, 0.44, 0.8, 0.98), new=TRUE) + mtext("NAO-", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.22, 0.44, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.6, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.spat.cor.colors[i2,1+6-j,3])}} + + par(mar=c(0,0,4,0), fig=c(0.44, 0.66, 0.8, 0.98), new=TRUE) + mtext("Blocking", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.44, 0.66, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.6, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.spat.cor.colors[i2,1+6-j,4])}} + + par(mar=c(0,0,4,0), fig=c(0.68, 0.88, 0.8, 0.98), new=TRUE) + mtext("Atlantic Ridge", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.66, 0.88, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.6, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.spat.cor.colors[i2,1+6-j,2])}} + + par(fig=c(0.91, 1, 0.1, 0.8), new=TRUE) + ColorBar2(brks = my.seq, cols = sp.corr.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + + + + + + + + # Corr summary: + png(filename=paste0(work.dir,"/",forecast.name,"_summary_temporal_correlations.png"), width=550, height=400) + + # create an array similar to array.cor but with colors instead of corr.values: + corr.cols <- rev(c('#b2182b','#d6604d','#f4a582','#fddbc7','#d1e5f0','#92c5de','#4393c3','#2166ac')) + my.seq <- c(-1,-0.75,-0.5,-0.25,0,0.25,0.5,0.75,1) + + array.cor.colors <- array(corr.cols[8],c(12,7,4)) + array.cor.colors[array.cor < 0.75] <- corr.cols[7] + array.cor.colors[array.cor < 0.5] <- corr.cols[6] + array.cor.colors[array.cor < 0.25] <- corr.cols[5] + array.cor.colors[array.cor < 0] <- corr.cols[4] + array.cor.colors[array.cor < -0.25] <- corr.cols[3] + array.cor.colors[array.cor < -0.5] <- corr.cols[2] + array.cor.colors[array.cor < -0.75] <- corr.cols[1] + + par(mar=c(5,4,4,4.5)) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Forecast time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + title("Correlation between S4 and ERA-Interim frequencies", cex=1.5) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + + for(p in 1:12){ + for(l in 0:6){ + polygon(c(0.5+p-1,0.5+p-1,1+p-1),c(-0.5+l,0.5+l,0+l), col=array.cor.colors[p,1+l,1]) # first triangle + polygon(c(0.5+p-1,1+p-1,1.5+p-1),c(-0.5+l,0+l,-0.5+l), col=array.cor.colors[p,1+l,2]) + polygon(c(1+p-1,1.5+p-1,1.5+p-1),c(l,0.5+l,-0.5+l), col=array.cor.colors[p,1+l,3]) + polygon(c(0.5+p-1,1+p-1,1.5+p-1),c(0.5+l,0+l,0.5+l), col=array.cor.colors[p,1+l,4]) + } + } + + par(fig=c(0.875, 1, 0.14, 0.89), new=TRUE) + ColorBar2(brks = seq(-1,1,0.25), cols = corr.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(seq(-1,1,0.25)), my.labels=seq(-1,1,0.25)) + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_temporal_correlations_startdate.png"), width=750, height=600) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext(text="Correlation between S4 and ERA-Interim frequencies", cex=1.5) + + # NAO+ : + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.85, 0.98), new=TRUE) + mtext("NAO+", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.cor.colors[p,1+l,1])}} + + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.42, 0.5), new=TRUE) + mtext("Blocking", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.cor.colors[p,1+l,4])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.85, 0.98), new=TRUE) + mtext("NAO-", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.cor.colors[p,1+l,3])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.42, 0.5), new=TRUE) + mtext("Atlantic Ridge", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.cor.colors[p,1+l,2])}} + + par(fig=c(0.91, 1, 0.1, 0.9), new=TRUE) + ColorBar2(brks = my.seq, cols = corr.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_temporal_correlations_target_month.png"), width=800, height=300) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext(text="Correlation between S4 and ERA-Interim frequencies", cex=1.2) + + par(mar=c(0,0,4,0), fig=c(0.07, 0.22, 0.8, 0.98), new=TRUE) + mtext("NAO+", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0, 0.22, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.6, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.cor.colors[i2,1+6-j,1])}} + + par(mar=c(0,0,4,0), fig=c(0.22, 0.44, 0.8, 0.98), new=TRUE) + mtext("NAO-", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.22, 0.44, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.6, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.cor.colors[i2,1+6-j,3])}} + + par(mar=c(0,0,4,0), fig=c(0.44, 0.66, 0.8, 0.98), new=TRUE) + mtext("Blocking", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.44, 0.66, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.6, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.cor.colors[i2,1+6-j,4])}} + + par(mar=c(0,0,4,0), fig=c(0.68, 0.88, 0.8, 0.98), new=TRUE) + mtext("Atlantic Ridge", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.66, 0.88, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.6, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.cor.colors[i2,1+6-j,2])}} + + par(fig=c(0.91, 1, 0.1, 0.8), new=TRUE) + ColorBar2(brks = my.seq, cols = corr.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + + + + + + # Frequency summary: + png(filename=paste0(work.dir,"/",forecast.name,"_summary_frequency.png"), width=550, height=400) + + # create an array similar to array.diff.freq but with colors instead of frequencies: + freq.cols <- rev(c('#d73027','#f46d43','#fdae61','#fee090','#e0f3f8','#abd9e9','#74add1','#4575b4')) + my.seq <- c(NA,20,22,24,26,28,30,32,NA) + + array.freq.colors <- array(freq.cols[8],c(12,7,4)) + array.freq.colors[array.freq < my.seq[[8]]] <- freq.cols[7] + array.freq.colors[array.freq < my.seq[[7]]] <- freq.cols[6] + array.freq.colors[array.freq < my.seq[[6]]] <- freq.cols[5] + array.freq.colors[array.freq < my.seq[[5]]] <- freq.cols[4] + array.freq.colors[array.freq < my.seq[[4]]] <- freq.cols[3] + array.freq.colors[array.freq < my.seq[[3]]] <- freq.cols[2] + array.freq.colors[array.freq < my.seq[[2]]] <- freq.cols[1] + + par(mar=c(5,4,4,4.5)) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Forecast time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + title("Mean S4 frequency (%)", cex=1.5) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + + for(p in 1:12){ + for(l in 0:6){ + polygon(c(0.5+p-1, 0.5+p-1, 1+p-1), c(-0.5+l, 0.5+l, 0+l), col=array.freq.colors[p,1+l,1]) # first triangle + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(-0.5+l, 0+l, -0.5+l), col=array.freq.colors[p,1+l,2]) + polygon(c(1+p-1, 1.5+p-1, 1.5+p-1), c(l, 0.5+l, -0.5+l), col=array.freq.colors[p,1+l,3]) + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(0.5+l, 0+l, 0.5+l), col=array.freq.colors[p,1+l,4]) + } + } + + par(fig=c(0.875, 1, 0.14, 0.89), new=TRUE) + ColorBar2(brks = my.seq, cols = freq.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_frequency_startdate.png"), width=750, height=600) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext(text="Mean S4 frequency (%)", cex=1.5) + + # NAO+ : + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.85, 0.98), new=TRUE) + mtext("NAO+", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.freq.colors[p,1+l,1])}} + + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.42, 0.5), new=TRUE) + mtext("Blocking", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.freq.colors[p,1+l,4])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.85, 0.98), new=TRUE) + mtext("NAO-", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.freq.colors[p,1+l,3])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.42, 0.5), new=TRUE) + mtext("Atlantic Ridge", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.freq.colors[p,1+l,2])}} + + par(fig=c(0.91, 1, 0.1, 0.9), new=TRUE) + ColorBar2(brks = my.seq, cols = freq.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_frequency_target_month.png"), width=800, height=300) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext(text="Mean S4 frequency (%)", cex=1.2) + + par(mar=c(0,0,4,0), fig=c(0.07, 0.22, 0.8, 0.98), new=TRUE) + mtext("NAO+", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0, 0.22, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.6, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.freq.colors[i2,1+6-j,1])}} + + par(mar=c(0,0,4,0), fig=c(0.22, 0.44, 0.8, 0.98), new=TRUE) + mtext("NAO-", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.22, 0.44, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.6, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.freq.colors[i2,1+6-j,3])}} + + par(mar=c(0,0,4,0), fig=c(0.44, 0.66, 0.8, 0.98), new=TRUE) + mtext("Blocking", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.44, 0.66, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.6, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.freq.colors[i2,1+6-j,4])}} + + par(mar=c(0,0,4,0), fig=c(0.68, 0.88, 0.8, 0.98), new=TRUE) + mtext("Atlantic Ridge", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.66, 0.88, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.6, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.freq.colors[i2,1+6-j,2])}} + + par(fig=c(0.91, 1, 0.1, 0.8), new=TRUE) + ColorBar2(brks = my.seq, cols = freq.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + par(fig=c(0.91, 1, 0.88, 0.93), new=TRUE) + mtext(side = 1, text = "%", line = 2, cex=1) + dev.off() + + + + + + + + + # Obs. frequency summary: + png(filename=paste0(work.dir,"/",forecast.name,"_summary_frequency_obs.png"), width=550, height=400) + + # create an array similar to array.diff.freq but with colors instead of frequencies: + freq.cols <- rev(c('#d73027','#f46d43','#fdae61','#fee090','#e0f3f8','#abd9e9','#74add1','#4575b4')) + my.seq <- c(NA,20,22,24,26,28,30,32,NA) + + array.freq.colors <- array(freq.cols[8],c(12,7,4)) + array.freq.colors[array.freq.obs < my.seq[[8]]] <- freq.cols[7] + array.freq.colors[array.freq.obs < my.seq[[7]]] <- freq.cols[6] + array.freq.colors[array.freq.obs < my.seq[[6]]] <- freq.cols[5] + array.freq.colors[array.freq.obs < my.seq[[5]]] <- freq.cols[4] + array.freq.colors[array.freq.obs < my.seq[[4]]] <- freq.cols[3] + array.freq.colors[array.freq.obs < my.seq[[3]]] <- freq.cols[2] + array.freq.colors[array.freq.obs < my.seq[[2]]] <- freq.cols[1] + + par(mar=c(5,4,4,4.5)) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Forecast time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + title("Mean observed frequency (%)", cex=1.5) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + + for(p in 1:12){ + for(l in 0:6){ + polygon(c(0.5+p-1, 0.5+p-1, 1+p-1), c(-0.5+l, 0.5+l, 0+l), col=array.freq.colors[p,1+l,1]) # first triangle + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(-0.5+l, 0+l, -0.5+l), col=array.freq.colors[p,1+l,2]) + polygon(c(1+p-1, 1.5+p-1, 1.5+p-1), c(l, 0.5+l, -0.5+l), col=array.freq.colors[p,1+l,3]) + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(0.5+l, 0+l, 0.5+l), col=array.freq.colors[p,1+l,4]) + } + } + + par(fig=c(0.875, 1, 0.14, 0.89), new=TRUE) + ColorBar2(brks = my.seq, cols = freq.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_frequency_obs_startdate.png"), width=750, height=600) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext(text="Mean observed frequency (%)", cex=1.5) + + # NAO+ : + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.85, 0.98), new=TRUE) + mtext("NAO+", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.freq.colors[p,1+l,1])}} + + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.42, 0.5), new=TRUE) + mtext("Blocking", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.freq.colors[p,1+l,4])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.85, 0.98), new=TRUE) + mtext("NAO-", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.freq.colors[p,1+l,3])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.42, 0.5), new=TRUE) + mtext("Atlantic Ridge", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.freq.colors[p,1+l,2])}} + + par(fig=c(0.91, 1, 0.1, 0.9), new=TRUE) + ColorBar2(brks = my.seq, cols = freq.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_frequency_obs_target_month.png"), width=800, height=300) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext(text="Mean observed frequency (%)", cex=1.2) + + par(mar=c(0,0,4,0), fig=c(0.07, 0.22, 0.8, 0.98), new=TRUE) + mtext("NAO+", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0, 0.22, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.6, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.freq.colors[i2,1+6-j,1])}} + + par(mar=c(0,0,4,0), fig=c(0.22, 0.44, 0.8, 0.98), new=TRUE) + mtext("NAO-", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.22, 0.44, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.6, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.freq.colors[i2,1+6-j,3])}} + + par(mar=c(0,0,4,0), fig=c(0.44, 0.66, 0.8, 0.98), new=TRUE) + mtext("Blocking", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.44, 0.66, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.6, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.freq.colors[i2,1+6-j,4])}} + + par(mar=c(0,0,4,0), fig=c(0.68, 0.88, 0.8, 0.98), new=TRUE) + mtext("Atlantic Ridge", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.66, 0.88, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.6, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.freq.colors[i2,1+6-j,2])}} + + par(fig=c(0.91, 1, 0.1, 0.8), new=TRUE) + ColorBar2(brks = my.seq, cols = freq.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + par(fig=c(0.91, 1, 0.88, 0.93), new=TRUE) + mtext(side = 1, text = "%", line = 2, cex=1) + dev.off() + + + + + + + + + + # Bias freq. summary: + png(filename=paste0(work.dir,"/",forecast.name,"_summary_bias_frequency.png"), width=550, height=400) + + # create an array similar to array.diff.freq but with colors instead of frequencies: + diff.freq.cols <- rev(c('#d73027','#f46d43','#fdae61','#fee090','#e0f3f8','#abd9e9','#74add1','#4575b4')) + my.seq <- c(-20,-10,-5,-2,0,2,5,10,20) + + array.diff.freq.colors <- array(diff.freq.cols[8],c(12,7,4)) + array.diff.freq.colors[array.diff.freq < my.seq[[8]]] <- diff.freq.cols[7] + array.diff.freq.colors[array.diff.freq 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.diff.freq.colors[i2,1+6-j,1])}} + + par(mar=c(0,0,4,0), fig=c(0.22, 0.44, 0.8, 0.98), new=TRUE) + mtext("NAO-", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.22, 0.44, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.6, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.diff.freq.colors[i2,1+6-j,3])}} + + par(mar=c(0,0,4,0), fig=c(0.44, 0.66, 0.8, 0.98), new=TRUE) + mtext("Blocking", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.44, 0.66, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.6, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.diff.freq.colors[i2,1+6-j,4])}} + + par(mar=c(0,0,4,0), fig=c(0.68, 0.88, 0.8, 0.98), new=TRUE) + mtext("Atlantic Ridge", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.66, 0.88, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.6, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.diff.freq.colors[i2,1+6-j,2])}} + + par(fig=c(0.91, 1, 0.1, 0.8), new=TRUE) + ColorBar2(brks = my.seq, cols = diff.freq.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + par(fig=c(0.91, 1, 0.88, 0.93), new=TRUE) + mtext(side = 1, text = "%", line = 2, cex=1) + dev.off() + + + + + + + + + # Simulated persistence summary: + png(filename=paste0(work.dir,"/",forecast.name,"_summary_persistence.png"), width=550, height=400) + + # create an array similar to array.pers but with colors instead of frequencies: + pers.cols <- rev(c('#b2182b','#d6604d','#f4a582','#fddbc7','#d1e5f0','#92c5de','#4393c3','#2166ac')) + my.seq <- c(NA, 3.25, 3.5, 3.75, 4, 4.25, 4.5, 4.75, NA) + + array.pers.colors <- array(pers.cols[8],c(12,7,4)) + array.pers.colors[array.pers < my.seq[8]] <- pers.cols[7] + array.pers.colors[array.pers < my.seq[7]] <- pers.cols[6] + array.pers.colors[array.pers < my.seq[6]] <- pers.cols[5] + array.pers.colors[array.pers < my.seq[5]] <- pers.cols[4] + array.pers.colors[array.pers < my.seq[4]] <- pers.cols[3] + array.pers.colors[array.pers < my.seq[3]] <- pers.cols[2] + array.pers.colors[array.pers < my.seq[2]] <- pers.cols[1] + + par(mar=c(5,4,4,4.5)) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Forecast time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + title("ECMWF-S4 Regime persistence (in days/month)", cex=1.5) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + + for(p in 1:12){ + for(l in 0:6){ + polygon(c(0.5+p-1,0.5+p-1,1+p-1),c(-0.5+l,0.5+l,0+l), col=array.pers.colors[p,1+l,1]) # first triangle + polygon(c(0.5+p-1,1+p-1,1.5+p-1),c(-0.5+l,0+l,-0.5+l), col=array.pers.colors[p,1+l,2]) + polygon(c(1+p-1,1.5+p-1,1.5+p-1),c(l,0.5+l,-0.5+l), col=array.pers.colors[p,1+l,3]) + polygon(c(0.5+p-1,1+p-1,1.5+p-1),c(0.5+l,0+l,0.5+l), col=array.pers.colors[p,1+l,4]) + } + } + + par(fig=c(0.875, 1, 0.14, 0.89), new=TRUE) + ColorBar2(brks = my.seq, cols = pers.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_persistence_startdate.png"), width=750, height=600) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("ECMWF-S4 Regime persistence (in days/month)", cex=1.5) + + # NAO+ : + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.85, 0.98), new=TRUE) + mtext("NAO+", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.pers.colors[p,1+l,1])}} + + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.42, 0.5), new=TRUE) + mtext("Blocking", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.pers.colors[p,1+l,4])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.85, 0.98), new=TRUE) + mtext("NAO-", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.pers.colors[p,1+l,3])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.42, 0.5), new=TRUE) + mtext("Atlantic Ridge", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.pers.colors[p,1+l,2])}} + + par(fig=c(0.91, 1, 0.1, 0.9), new=TRUE) + ColorBar2(brks = my.seq, cols = pers.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_persistence_target_month.png"), width=800, height=300) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("ECMWF-S4 Regime persistence (in days/month)", cex=1.2) + + par(mar=c(0,0,4,0), fig=c(0.07, 0.22, 0.8, 0.98), new=TRUE) + mtext("NAO+", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0, 0.22, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.6, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.pers.colors[i2,1+6-j,1])}} + + par(mar=c(0,0,4,0), fig=c(0.22, 0.44, 0.8, 0.98), new=TRUE) + mtext("NAO-", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.22, 0.44, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.6, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.pers.colors[i2,1+6-j,3])}} + + par(mar=c(0,0,4,0), fig=c(0.44, 0.66, 0.8, 0.98), new=TRUE) + mtext("Blocking", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.44, 0.66, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.6, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.pers.colors[i2,1+6-j,4])}} + + par(mar=c(0,0,4,0), fig=c(0.68, 0.88, 0.8, 0.98), new=TRUE) + mtext("Atlantic Ridge", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.66, 0.88, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.6, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.pers.colors[i2,1+6-j,2])}} + + par(fig=c(0.91, 1, 0.1, 0.8), new=TRUE) + ColorBar2(brks = my.seq, cols = pers.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + par(fig=c(0.9, 1, 0.88, 0.93), new=TRUE) + mtext(side = 1, text = "days/month", line = 2, cex=1) + dev.off() + + + + + + + + # Observed persistence summary: + png(filename=paste0(work.dir,"/",forecast.name,"_summary_persistence_obs.png"), width=550, height=400) + + # create an array similar to array.pers but with colors instead of frequencies: + pers.cols <- rev(c('#b2182b','#d6604d','#f4a582','#fddbc7','#d1e5f0','#92c5de','#4393c3','#2166ac')) + my.seq <- c(NA, 3.25, 3.5, 3.75, 4, 4.25, 4.5, 4.75, NA) + + array.pers.colors <- array(pers.cols[8],c(12,7,4)) + array.pers.colors[array.pers.obs < my.seq[8]] <- pers.cols[7] + array.pers.colors[array.pers.obs < my.seq[7]] <- pers.cols[6] + array.pers.colors[array.pers.obs < my.seq[6]] <- pers.cols[5] + array.pers.colors[array.pers.obs < my.seq[5]] <- pers.cols[4] + array.pers.colors[array.pers.obs < my.seq[4]] <- pers.cols[3] + array.pers.colors[array.pers.obs < my.seq[3]] <- pers.cols[2] + array.pers.colors[array.pers.obs < my.seq[2]] <- pers.cols[1] + + par(mar=c(5,4,4,4.5)) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Forecast time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + title("Observed regime persistence (in days/month)", cex=1.5) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + + for(p in 1:12){ + for(l in 0:6){ + polygon(c(0.5+p-1,0.5+p-1,1+p-1),c(-0.5+l,0.5+l,0+l), col=array.pers.colors[p,1+l,1]) # first triangle + polygon(c(0.5+p-1,1+p-1,1.5+p-1),c(-0.5+l,0+l,-0.5+l), col=array.pers.colors[p,1+l,2]) + polygon(c(1+p-1,1.5+p-1,1.5+p-1),c(l,0.5+l,-0.5+l), col=array.pers.colors[p,1+l,3]) + polygon(c(0.5+p-1,1+p-1,1.5+p-1),c(0.5+l,0+l,0.5+l), col=array.pers.colors[p,1+l,4]) + } + } + + par(fig=c(0.875, 1, 0.14, 0.89), new=TRUE) + ColorBar2(brks = my.seq, cols = pers.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_persistence_obs_startdate.png"), width=750, height=600) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("Observed Regime persistence (in days/month)", cex=1.5) + + # NAO+ : + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.85, 0.98), new=TRUE) + mtext("NAO+", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.pers.colors[p,1+l,1])}} + + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.42, 0.5), new=TRUE) + mtext("Blocking", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.pers.colors[p,1+l,4])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.85, 0.98), new=TRUE) + mtext("NAO-", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.pers.colors[p,1+l,3])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.42, 0.5), new=TRUE) + mtext("Atlantic Ridge", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.pers.colors[p,1+l,2])}} + + par(fig=c(0.91, 1, 0.1, 0.9), new=TRUE) + ColorBar2(brks = my.seq, cols = pers.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_persistence_obs_target_month.png"), width=800, height=300) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("Observed regime persistence (in days/month)", cex=1.2) + + par(mar=c(0,0,4,0), fig=c(0.07, 0.22, 0.8, 0.98), new=TRUE) + mtext("NAO+", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0, 0.22, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.6, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.pers.colors[i2,1+6-j,1])}} + + par(mar=c(0,0,4,0), fig=c(0.22, 0.44, 0.8, 0.98), new=TRUE) + mtext("NAO-", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.22, 0.44, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.6, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.pers.colors[i2,1+6-j,3])}} + + par(mar=c(0,0,4,0), fig=c(0.44, 0.66, 0.8, 0.98), new=TRUE) + mtext("Blocking", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.44, 0.66, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.6, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.pers.colors[i2,1+6-j,4])}} + + par(mar=c(0,0,4,0), fig=c(0.68, 0.88, 0.8, 0.98), new=TRUE) + mtext("Atlantic Ridge", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.66, 0.88, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.6, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.pers.colors[i2,1+6-j,2])}} + + par(fig=c(0.91, 1, 0.1, 0.8), new=TRUE) + ColorBar2(brks = my.seq, cols = pers.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + par(fig=c(0.9, 1, 0.88, 0.93), new=TRUE) + mtext(side = 1, text = "days/month", line = 2, cex=1) + dev.off() + + + + + + + + + + + + + # Bias persistence summary: + png(filename=paste0(work.dir,"/",forecast.name,"_summary_bias_persistence.png"), width=550, height=400) + + # create an array similar to array.pers but with colors instead of frequencies: + diff.pers.cols <- rev(c('#b2182b','#d6604d','#f4a582','#fddbc7','#d1e5f0','#92c5de','#4393c3','#2166ac')) + my.seq <- c(NA, -1.5, -1, -0.5, 0, 0.5, 1, 1.5, NA) + + array.diff.pers.colors <- array(diff.pers.cols[8],c(12,7,4)) + array.diff.pers.colors[array.diff.pers < my.seq[8]] <- diff.pers.cols[7] + array.diff.pers.colors[array.diff.pers < my.seq[7]] <- diff.pers.cols[6] + array.diff.pers.colors[array.diff.pers < my.seq[6]] <- diff.pers.cols[5] + array.diff.pers.colors[array.diff.pers < my.seq[5]] <- diff.pers.cols[4] + array.diff.pers.colors[array.diff.pers < my.seq[4]] <- diff.pers.cols[3] + array.diff.pers.colors[array.diff.pers < my.seq[3]] <- diff.pers.cols[2] + array.diff.pers.colors[array.diff.pers < my.seq[2]] <- diff.pers.cols[1] + + par(mar=c(5,4,4,4.5)) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Forecast time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + title("Diff. between S4 and ERA-Interim regime persistence (in days/month)", cex=1.5) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + + for(p in 1:12){ + for(l in 0:6){ + polygon(c(0.5+p-1,0.5+p-1,1+p-1),c(-0.5+l,0.5+l,0+l), col=array.diff.pers.colors[p,1+l,1]) # first triangle + polygon(c(0.5+p-1,1+p-1,1.5+p-1),c(-0.5+l,0+l,-0.5+l), col=array.diff.pers.colors[p,1+l,2]) + polygon(c(1+p-1,1.5+p-1,1.5+p-1),c(l,0.5+l,-0.5+l), col=array.diff.pers.colors[p,1+l,3]) + polygon(c(0.5+p-1,1+p-1,1.5+p-1),c(0.5+l,0+l,0.5+l), col=array.diff.pers.colors[p,1+l,4]) + } + } + + par(fig=c(0.875, 1, 0.14, 0.89), new=TRUE) + ColorBar2(brks = my.seq, cols = diff.pers.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_bias_persistence_startdate.png"), width=750, height=600) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("Diff. between S4 and ERA-Interim regime persistence (in days/month)", cex=1.5) + + # NAO+ : + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.85, 0.98), new=TRUE) + mtext("NAO+", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.diff.pers.colors[p,1+l,1])}} + + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.42, 0.5), new=TRUE) + mtext("Blocking", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.diff.pers.colors[p,1+l,4])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.85, 0.98), new=TRUE) + mtext("NAO-", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.diff.pers.colors[p,1+l,3])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.42, 0.5), new=TRUE) + mtext("Atlantic Ridge", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.diff.pers.colors[p,1+l,2])}} + + par(fig=c(0.91, 1, 0.1, 0.9), new=TRUE) + ColorBar2(brks = my.seq, cols = diff.pers.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_bias_persistence_target_month.png"), width=800, height=300) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("Diff. between S4 and ERA-Interim regime persistence (in days/month)", cex=1.2) + + par(mar=c(0,0,4,0), fig=c(0.07, 0.22, 0.8, 0.98), new=TRUE) + mtext("NAO+", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0, 0.22, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.6, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.diff.pers.colors[i2,1+6-j,1])}} + + par(mar=c(0,0,4,0), fig=c(0.22, 0.44, 0.8, 0.98), new=TRUE) + mtext("NAO-", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.22, 0.44, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.6, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.diff.pers.colors[i2,1+6-j,3])}} + + par(mar=c(0,0,4,0), fig=c(0.44, 0.66, 0.8, 0.98), new=TRUE) + mtext("Blocking", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.44, 0.66, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.6, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.diff.pers.colors[i2,1+6-j,4])}} + + par(mar=c(0,0,4,0), fig=c(0.68, 0.88, 0.8, 0.98), new=TRUE) + mtext("Atlantic Ridge", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.66, 0.88, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.6, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.diff.pers.colors[i2,1+6-j,2])}} + + par(fig=c(0.91, 1, 0.1, 0.8), new=TRUE) + ColorBar2(brks = my.seq, cols = diff.pers.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + par(fig=c(0.9, 1, 0.88, 0.93), new=TRUE) + mtext(side = 1, text = "days/month", line = 2, cex=1) + dev.off() + + + + + + + + + + # St.Dev.freq ratio summary: + png(filename=paste0(work.dir,"/",forecast.name,"_summary_ratio_sd_freq.png"), width=550, height=400) + + # create an array similar to array.cor but with colors instead of corr.values: + diff.sd.freq.cols <- rev(c('#b2182b','#d6604d','#f4a582','#fddbc7','#d1e5f0','#92c5de','#4393c3','#2166ac')) + my.seq <- c(0,0.7,0.8,0.9,1.0,1.1,1.2,1.3,2) + + array.diff.sd.freq.colors <- array(diff.sd.freq.cols[8],c(12,7,4)) + array.diff.sd.freq.colors[array.diff.sd.freq < my.seq[8]] <- diff.sd.freq.cols[7] + array.diff.sd.freq.colors[array.diff.sd.freq < my.seq[7]] <- diff.sd.freq.cols[6] + array.diff.sd.freq.colors[array.diff.sd.freq < my.seq[6]] <- diff.sd.freq.cols[5] + array.diff.sd.freq.colors[array.diff.sd.freq < my.seq[5]] <- diff.sd.freq.cols[4] + array.diff.sd.freq.colors[array.diff.sd.freq < my.seq[4]] <- diff.sd.freq.cols[3] + array.diff.sd.freq.colors[array.diff.sd.freq < my.seq[3]] <- diff.sd.freq.cols[2] + array.diff.sd.freq.colors[array.diff.sd.freq < my.seq[2]] <- diff.sd.freq.cols[1] + + par(mar=c(5,4,4,4.5)) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Forecast time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + title("St.Dev. ratio between sim. and obs. average frequencies", cex=1.5) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + + for(p in 1:12){ + for(l in 0:6){ + polygon(c(0.5+p-1, 0.5+p-1, 1+p-1), c(-0.5+l, 0.5+l, 0+l), col=array.diff.sd.freq.colors[p,1+l,1]) # first triangle + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(-0.5+l, 0+l, -0.5+l), col=array.diff.sd.freq.colors[p,1+l,2]) + polygon(c(1+p-1, 1.5+p-1, 1.5+p-1), c(l, 0.5+l, -0.5+l), col=array.diff.sd.freq.colors[p,1+l,3]) + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(0.5+l, 0+l, 0.5+l), col=array.diff.sd.freq.colors[p,1+l,4]) + } + } + + par(fig=c(0.875, 1, 0.14, 0.89), new=TRUE) + ColorBar2(brks = my.seq, cols = diff.sd.freq.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + dev.off() + + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_ratio_sd_frequency_startdate.png"), width=750, height=600) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("St.Dev. ratio between sim. and obs. average frequencies", cex=1.5) + + # NAO+ : + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.85, 0.98), new=TRUE) + mtext("NAO+", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.diff.sd.freq.colors[p,1+l,1])}} + + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.42, 0.5), new=TRUE) + mtext("Blocking", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.diff.sd.freq.colors[p,1+l,4])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.85, 0.98), new=TRUE) + mtext("NAO-", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.diff.sd.freq.colors[p,1+l,3])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.42, 0.5), new=TRUE) + mtext("Atlantic Ridge", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.diff.sd.freq.colors[p,1+l,2])}} + + par(fig=c(0.91, 1, 0.1, 0.9), new=TRUE) + ColorBar2(brks = my.seq, cols = diff.sd.freq.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_ratio_sd_frequency_target_month.png"), width=800, height=300) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("St.Dev. ratio between sim. and obs. average frequencies", cex=1.2) + + par(mar=c(0,0,4,0), fig=c(0.07, 0.22, 0.8, 0.98), new=TRUE) + mtext("NAO+", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0, 0.22, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.6, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.diff.sd.freq.colors[i2,1+6-j,1])}} + + par(mar=c(0,0,4,0), fig=c(0.22, 0.44, 0.8, 0.98), new=TRUE) + mtext("NAO-", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.22, 0.44, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.6, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.diff.sd.freq.colors[i2,1+6-j,3])}} + + par(mar=c(0,0,4,0), fig=c(0.44, 0.66, 0.8, 0.98), new=TRUE) + mtext("Blocking", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.44, 0.66, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.6, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.diff.sd.freq.colors[i2,1+6-j,4])}} + + par(mar=c(0,0,4,0), fig=c(0.68, 0.88, 0.8, 0.98), new=TRUE) + mtext("Atlantic Ridge", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.66, 0.88, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.6, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.diff.sd.freq.colors[i2,1+6-j,2])}} + + par(fig=c(0.91, 1, 0.1, 0.8), new=TRUE) + ColorBar2(brks = my.seq, cols = diff.sd.freq.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + + + + + + + + + + + + + # Impact summary: + png(filename=paste0(work.dir,"/",forecast.name,"_summary_impact_",var.name[var.num],".png"), width=550, height=400) + + # create an array similar to array.pers but with colors instead of frequencies: + imp.cols <- rev(c('#b2182b','#d6604d','#f4a582','#fddbc7','#d1e5f0','#92c5de','#4393c3','#2166ac')) + my.seq <- c(-1,0,0.4,0.5,0.6,0.7,0.8,0.9,1) + + array.imp.colors <- array(imp.cols[8],c(12,7,4)) + array.imp.colors[array.imp < my.seq[8]] <- imp.cols[7] + array.imp.colors[array.imp < my.seq[7]] <- imp.cols[6] + array.imp.colors[array.imp < my.seq[6]] <- imp.cols[5] + array.imp.colors[array.imp < my.seq[5]] <- imp.cols[4] + array.imp.colors[array.imp < my.seq[4]] <- imp.cols[3] + array.imp.colors[array.imp < my.seq[3]] <- imp.cols[2] + array.imp.colors[array.imp < my.seq[2]] <- imp.cols[1] + + par(mar=c(5,4,4,4.5)) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Forecast time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + title(paste0("S4 spatial correlation of impact on ",var.name[var.num]), cex=1.5) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + + for(p in 1:12){ + for(l in 0:6){ + polygon(c(0.5+p-1,0.5+p-1,1+p-1),c(-0.5+l,0.5+l,0+l), col=array.imp.colors[p,1+l,1]) # first triangle + polygon(c(0.5+p-1,1+p-1,1.5+p-1),c(-0.5+l,0+l,-0.5+l), col=array.imp.colors[p,1+l,2]) + polygon(c(1+p-1,1.5+p-1,1.5+p-1),c(l,0.5+l,-0.5+l), col=array.imp.colors[p,1+l,3]) + polygon(c(0.5+p-1,1+p-1,1.5+p-1),c(0.5+l,0+l,0.5+l), col=array.imp.colors[p,1+l,4]) + } + } + + par(fig=c(0.875, 1, 0.14, 0.89), new=TRUE) + ColorBar2(brks = my.seq, cols = imp.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_impact_",var.name[var.num],"_startdate.png"), width=750, height=600) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("S4 spatial correlation of impact on ",var.name[var.num], cex=1.5) + + # NAO+ : + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.85, 0.98), new=TRUE) + mtext("NAO+", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.imp.colors[p,1+l,1])}} + + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.42, 0.5), new=TRUE) + mtext("Blocking", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.imp.colors[p,1+l,4])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.85, 0.98), new=TRUE) + mtext("NAO-", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.imp.colors[p,1+l,3])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.42, 0.5), new=TRUE) + mtext("Atlantic Ridge", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.imp.colors[p,1+l,2])}} + + par(fig=c(0.91, 1, 0.1, 0.9), new=TRUE) + ColorBar2(brks = my.seq, cols = imp.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_impact_",var.name[var.num],"_target_month.png"), width=800, height=300) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("S4 spatial correlation of impact on ",var.name[var.num], cex=1.2) + + par(mar=c(0,0,4,0), fig=c(0.07, 0.22, 0.8, 0.98), new=TRUE) + mtext("NAO+", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0, 0.22, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.6, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.imp.colors[i2,1+6-j,1])}} + + par(mar=c(0,0,4,0), fig=c(0.22, 0.44, 0.8, 0.98), new=TRUE) + mtext("NAO-", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.22, 0.44, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.6, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.imp.colors[i2,1+6-j,3])}} + + par(mar=c(0,0,4,0), fig=c(0.44, 0.66, 0.8, 0.98), new=TRUE) + mtext("Blocking", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.44, 0.66, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.6, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.imp.colors[i2,1+6-j,4])}} + + par(mar=c(0,0,4,0), fig=c(0.68, 0.88, 0.8, 0.98), new=TRUE) + mtext("Atlantic Ridge", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.66, 0.88, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.6, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.imp.colors[i2,1+6-j,2])}} + + par(fig=c(0.91, 1, 0.1, 0.8), new=TRUE) + ColorBar2(brks = my.seq, cols = imp.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + + + + + + + # Simulated Transition probability from NAO+ to X summary: + png(filename=paste0(work.dir,"/",forecast.name,"_summary_transition_prob_NAO+.png"), width=550, height=400) + + # create an array similar to array.cor but with colors instead of corr.values: + trans.cols <- rev(c('#b2182b','#d6604d','#f4a582','#fddbc7','#d1e5f0','#92c5de','#4393c3','#2166ac')) + my.seq <- c(0,10,20,30,40,50,60,70,100) + + array.trans.sim1.colors <- array(trans.cols[8],c(12,7,4)) + array.trans.sim1.colors[array.trans.sim1 < my.seq[8]] <- trans.cols[7] + array.trans.sim1.colors[array.trans.sim1 < my.seq[7]] <- trans.cols[6] + array.trans.sim1.colors[array.trans.sim1 < my.seq[6]] <- trans.cols[5] + array.trans.sim1.colors[array.trans.sim1 < my.seq[5]] <- trans.cols[4] + array.trans.sim1.colors[array.trans.sim1 < my.seq[4]] <- trans.cols[3] + array.trans.sim1.colors[array.trans.sim1 < my.seq[3]] <- trans.cols[2] + array.trans.sim1.colors[array.trans.sim1 < my.seq[2]] <- trans.cols[1] + array.trans.sim1.colors[is.na(array.trans.sim1)] <- "white" + + par(mar=c(5,4,4,4.5)) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Forecast time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + title("% ECMWF-S4 transition from NAO+ regime", cex=1.5) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + + for(p in 1:12){ + for(l in 0:6){ + polygon(c(0.5+p-1, 0.5+p-1, 1+p-1), c(-0.5+l, 0.5+l, 0+l), col=array.trans.sim1.colors[p,1+l,1]) # first triangle + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(-0.5+l, 0+l, -0.5+l), col=array.trans.sim1.colors[p,1+l,2]) + polygon(c(1+p-1, 1.5+p-1, 1.5+p-1), c(l, 0.5+l, -0.5+l), col=array.trans.sim1.colors[p,1+l,3]) + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(0.5+l, 0+l, 0.5+l), col=array.trans.sim1.colors[p,1+l,4]) + } + } + + par(fig=c(0.875, 1, 0.14, 0.89), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_transition_prob_NAO+_startdate.png"), width=750, height=600) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("% Transition from NAO+ regime", cex=1.5) + mtext("ECMWF-S4 / NAO+ Transition Probability \nJanuary to December / 1994-2013", cex=1.5) + + # NAO+ : + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.85, 0.98), new=TRUE) + mtext("NAO+", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.sim1.colors[p,1+l,1])}} + + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.42, 0.5), new=TRUE) + mtext("Blocking", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.sim1.colors[p,1+l,4])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.85, 0.98), new=TRUE) + mtext("NAO-", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.sim1.colors[p,1+l,3])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.42, 0.5), new=TRUE) + mtext("Atlantic Ridge", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.sim1.colors[p,1+l,2])}} + + par(fig=c(0.91, 1, 0.1, 0.9), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_transition_prob_NAO+_target_month.png"), width=750, height=350) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 0.95), new=TRUE) + mtext("% Transition from NAO+ regime", cex=1.2) + + par(mar=c(0,0,4,0), fig=c(0.10, 0.28, 0.8, 0.95), new=TRUE) + mtext("NAO-", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0, 0.28, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.8, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.sim1.colors[i2,1+6-j,3])}} + + par(mar=c(0,0,4,0), fig=c(0.30, 0.58, 0.8, 0.95), new=TRUE) + mtext("Blocking", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.30, 0.58, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.8, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.sim1.colors[i2,1+6-j,4])}} + + par(mar=c(0,0,4,0), fig=c(0.60, 0.88, 0.8, 0.95), new=TRUE) + mtext("Atlantic Ridge", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.60, 0.88, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.8, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.sim1.colors[i2,1+6-j,2])}} + + par(fig=c(0.91, 1, 0.1, 0.8), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + par(fig=c(0.91, 1, 0.88, 0.93), new=TRUE) + mtext(side = 1, text = "%", line = 2, cex=1) + + dev.off() + + + + + + + # Observed Transition probability from NAO+ to X summary: + png(filename=paste0(work.dir,"/",forecast.name,"_summary_transition_prob_NAO+_obs.png"), width=550, height=400) + + # create an array similar to array.cor but with colors instead of corr.values: + trans.cols <- rev(c('#b2182b','#d6604d','#f4a582','#fddbc7','#d1e5f0','#92c5de','#4393c3','#2166ac')) + my.seq <- c(0,10,20,30,40,50,60,70,100) + + array.trans.obs1.colors <- array(trans.cols[8],c(12,7,4)) + array.trans.obs1.colors[array.trans.obs1 < my.seq[8]] <- trans.cols[7] + array.trans.obs1.colors[array.trans.obs1 < my.seq[7]] <- trans.cols[6] + array.trans.obs1.colors[array.trans.obs1 < my.seq[6]] <- trans.cols[5] + array.trans.obs1.colors[array.trans.obs1 < my.seq[5]] <- trans.cols[4] + array.trans.obs1.colors[array.trans.obs1 < my.seq[4]] <- trans.cols[3] + array.trans.obs1.colors[array.trans.obs1 < my.seq[3]] <- trans.cols[2] + array.trans.obs1.colors[array.trans.obs1 < my.seq[2]] <- trans.cols[1] + array.trans.obs1.colors[is.na(array.trans.obs1)] <- "white" + + par(mar=c(5,4,4,4.5)) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Forecast time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + title("% Observed transition from NAO+ regime", cex=1.5) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + + for(p in 1:12){ + for(l in 0:6){ + polygon(c(0.5+p-1, 0.5+p-1, 1+p-1), c(-0.5+l, 0.5+l, 0+l), col=array.trans.obs1.colors[p,1+l,1]) # first triangle + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(-0.5+l, 0+l, -0.5+l), col=array.trans.obs1.colors[p,1+l,2]) + polygon(c(1+p-1, 1.5+p-1, 1.5+p-1), c(l, 0.5+l, -0.5+l), col=array.trans.obs1.colors[p,1+l,3]) + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(0.5+l, 0+l, 0.5+l), col=array.trans.obs1.colors[p,1+l,4]) + } + } + + par(fig=c(0.875, 1, 0.14, 0.89), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_transition_prob_NAO+_obs_startdate.png"), width=750, height=600) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("% Observed transition from NAO+ regime", cex=1.5) + + # NAO+ : + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.85, 0.98), new=TRUE) + mtext("NAO+", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.obs1.colors[p,1+l,1])}} + + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.42, 0.5), new=TRUE) + mtext("Blocking", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.obs1.colors[p,1+l,4])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.85, 0.98), new=TRUE) + mtext("NAO-", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.obs1.colors[p,1+l,3])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.42, 0.5), new=TRUE) + mtext("Atlantic Ridge", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.obs1.colors[p,1+l,2])}} + + par(fig=c(0.91, 1, 0.1, 0.9), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_transition_prob_NAO+_obs_target_month.png"), width=750, height=350) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 0.95), new=TRUE) + mtext("% Observed transition from NAO+ regime", cex=1.2) + + par(mar=c(0,0,4,0), fig=c(0.10, 0.28, 0.8, 0.95), new=TRUE) + mtext("NAO-", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0, 0.28, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.8, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.obs1.colors[i2,1+6-j,3])}} + + par(mar=c(0,0,4,0), fig=c(0.30, 0.58, 0.8, 0.95), new=TRUE) + mtext("Blocking", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.30, 0.58, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.8, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.obs1.colors[i2,1+6-j,4])}} + + par(mar=c(0,0,4,0), fig=c(0.60, 0.88, 0.8, 0.95), new=TRUE) + mtext("Atlantic Ridge", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.60, 0.88, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.8, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.obs1.colors[i2,1+6-j,2])}} + + par(fig=c(0.91, 1, 0.1, 0.8), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + par(fig=c(0.91, 1, 0.88, 0.93), new=TRUE) + mtext(side = 1, text = "%", line = 2, cex=1) + + dev.off() + + + + + + + + + + + + # Simulated transition probability from NAO- to X summary: + png(filename=paste0(work.dir,"/",forecast.name,"_summary_transition_prob_NAO-.png"), width=550, height=400) + + # create an array similar to array.cor but with colors instead of corr.values: + trans.cols <- rev(c('#b2182b','#d6604d','#f4a582','#fddbc7','#d1e5f0','#92c5de','#4393c3','#2166ac')) + my.seq <- c(0,10,20,30,40,50,60,70,100) + + array.trans.sim2.colors <- array(trans.cols[8],c(12,7,4)) + array.trans.sim2.colors[array.trans.sim2 < my.seq[8]] <- trans.cols[7] + array.trans.sim2.colors[array.trans.sim2 < my.seq[7]] <- trans.cols[6] + array.trans.sim2.colors[array.trans.sim2 < my.seq[6]] <- trans.cols[5] + array.trans.sim2.colors[array.trans.sim2 < my.seq[5]] <- trans.cols[4] + array.trans.sim2.colors[array.trans.sim2 < my.seq[4]] <- trans.cols[3] + array.trans.sim2.colors[array.trans.sim2 < my.seq[3]] <- trans.cols[2] + array.trans.sim2.colors[array.trans.sim2 < my.seq[2]] <- trans.cols[1] + array.trans.sim2.colors[is.na(array.trans.sim2)] <- "white" + + par(mar=c(5,4,4,4.5)) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Forecast time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + title("% ECMWF-S4 transition from NAO- regime ", cex=1.5) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + + for(p in 1:12){ + for(l in 0:6){ + polygon(c(0.5+p-1, 0.5+p-1, 1+p-1), c(-0.5+l, 0.5+l, 0+l), col=array.trans.sim2.colors[p,1+l,1]) # first triangle + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(-0.5+l, 0+l, -0.5+l), col=array.trans.sim2.colors[p,1+l,2]) + polygon(c(1+p-1, 1.5+p-1, 1.5+p-1), c(l, 0.5+l, -0.5+l), col=array.trans.sim2.colors[p,1+l,3]) + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(0.5+l, 0+l, 0.5+l), col=array.trans.sim2.colors[p,1+l,4]) + } + } + + par(fig=c(0.875, 1, 0.14, 0.89), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_transition_prob_NAO-_startdate.png"), width=750, height=600) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("% ECMWF-S4 transition from NAO- regime", cex=1.5) + + # NAO+ : + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.85, 0.98), new=TRUE) + mtext("NAO+", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.sim2.colors[p,1+l,1])}} + + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.42, 0.5), new=TRUE) + mtext("Blocking", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.sim2.colors[p,1+l,4])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.85, 0.98), new=TRUE) + mtext("NAO-", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.sim2.colors[p,1+l,3])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.42, 0.5), new=TRUE) + mtext("Atlantic Ridge", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.sim2.colors[p,1+l,2])}} + + par(fig=c(0.91, 1, 0.1, 0.9), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_transition_prob_NAO-_target_month.png"), width=750, height=350) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 0.95), new=TRUE) + mtext("% ECMWF-S4 transition from NAO- regime", cex=1.2) + + par(mar=c(0,0,4,0), fig=c(0.1, 0.28, 0.8, 0.95), new=TRUE) + mtext("NAO+", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0, 0.28, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.8, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.sim2.colors[i2,1+6-j,1])}} + + par(mar=c(0,0,4,0), fig=c(0.30, 0.58, 0.8, 0.95), new=TRUE) + mtext("Blocking", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.30, 0.58, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.8, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.sim2.colors[i2,1+6-j,4])}} + + par(mar=c(0,0,4,0), fig=c(0.60, 0.88, 0.8, 0.95), new=TRUE) + mtext("Atlantic Ridge", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.60, 0.88, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.8, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.sim2.colors[i2,1+6-j,2])}} + + par(fig=c(0.91, 1, 0.1, 0.8), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + par(fig=c(0.91, 1, 0.88, 0.93), new=TRUE) + mtext(side = 1, text = "%", line = 2, cex=1) + + dev.off() + + + + + + + # Observed transition probability from NAO- to X summary: + png(filename=paste0(work.dir,"/",forecast.name,"_summary_transition_prob_NAO-_obs.png"), width=550, height=400) + + # create an array similar to array.cor but with colors instead of corr.values: + trans.cols <- rev(c('#b2182b','#d6604d','#f4a582','#fddbc7','#d1e5f0','#92c5de','#4393c3','#2166ac')) + my.seq <- c(0,10,20,30,40,50,60,70,100) + + array.trans.obs2.colors <- array(trans.cols[8],c(12,7,4)) + array.trans.obs2.colors[array.trans.obs2 < my.seq[8]] <- trans.cols[7] + array.trans.obs2.colors[array.trans.obs2 < my.seq[7]] <- trans.cols[6] + array.trans.obs2.colors[array.trans.obs2 < my.seq[6]] <- trans.cols[5] + array.trans.obs2.colors[array.trans.obs2 < my.seq[5]] <- trans.cols[4] + array.trans.obs2.colors[array.trans.obs2 < my.seq[4]] <- trans.cols[3] + array.trans.obs2.colors[array.trans.obs2 < my.seq[3]] <- trans.cols[2] + array.trans.obs2.colors[array.trans.obs2 < my.seq[2]] <- trans.cols[1] + array.trans.obs2.colors[is.na(array.trans.obs2)] <- "white" + + par(mar=c(5,4,4,4.5)) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Forecast time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + title("% Observed transition from NAO- regime ", cex=1.5) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + + for(p in 1:12){ + for(l in 0:6){ + polygon(c(0.5+p-1, 0.5+p-1, 1+p-1), c(-0.5+l, 0.5+l, 0+l), col=array.trans.obs2.colors[p,1+l,1]) # first triangle + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(-0.5+l, 0+l, -0.5+l), col=array.trans.obs2.colors[p,1+l,2]) + polygon(c(1+p-1, 1.5+p-1, 1.5+p-1), c(l, 0.5+l, -0.5+l), col=array.trans.obs2.colors[p,1+l,3]) + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(0.5+l, 0+l, 0.5+l), col=array.trans.obs2.colors[p,1+l,4]) + } + } + + par(fig=c(0.875, 1, 0.14, 0.89), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_transition_prob_NAO-_obs_startdate.png"), width=750, height=600) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("% Observed transition from NAO- regime", cex=1.5) + + # NAO+ : + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.85, 0.98), new=TRUE) + mtext("NAO+", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.obs2.colors[p,1+l,1])}} + + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.42, 0.5), new=TRUE) + mtext("Blocking", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.obs2.colors[p,1+l,4])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.85, 0.98), new=TRUE) + mtext("NAO-", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.obs2.colors[p,1+l,3])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.42, 0.5), new=TRUE) + mtext("Atlantic Ridge", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.obs2.colors[p,1+l,2])}} + + par(fig=c(0.91, 1, 0.1, 0.9), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_transition_prob_NAO-_obs_target_month.png"), width=750, height=350) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 0.95), new=TRUE) + mtext("% Observed transition from NAO- regime", cex=1.2) + + par(mar=c(0,0,4,0), fig=c(0.07, 0.28, 0.8, 0.95), new=TRUE) + mtext("NAO+", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0, 0.28, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.8, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.obs2.colors[i2,1+6-j,1])}} + + par(mar=c(0,0,4,0), fig=c(0.30, 0.58, 0.8, 0.95), new=TRUE) + mtext("Blocking", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.30, 0.58, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.8, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.obs2.colors[i2,1+6-j,4])}} + + par(mar=c(0,0,4,0), fig=c(0.58, 0.88, 0.8, 0.95), new=TRUE) + mtext("Atlantic Ridge", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.58, 0.88, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.8, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.obs2.colors[i2,1+6-j,2])}} + + par(fig=c(0.91, 1, 0.1, 0.8), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + par(fig=c(0.91, 1, 0.88, 0.93), new=TRUE) + mtext(side = 1, text = "%", line = 2, cex=1) + + dev.off() + + + + + + + + + + + # Simulated transition probability from blocking to X summary: + png(filename=paste0(work.dir,"/",forecast.name,"_summary_transition_prob_blocking.png"), width=550, height=400) + + # create an array similar to array.cor but with colors instead of corr.values: + trans.cols <- rev(c('#b2182b','#d6604d','#f4a582','#fddbc7','#d1e5f0','#92c5de','#4393c3','#2166ac')) + my.seq <- c(0,10,20,30,40,50,60,70,100) + + array.trans.sim3.colors <- array(trans.cols[8],c(12,7,4)) + array.trans.sim3.colors[array.trans.sim3 < my.seq[8]] <- trans.cols[7] + array.trans.sim3.colors[array.trans.sim3 < my.seq[7]] <- trans.cols[6] + array.trans.sim3.colors[array.trans.sim3 < my.seq[6]] <- trans.cols[5] + array.trans.sim3.colors[array.trans.sim3 < my.seq[5]] <- trans.cols[4] + array.trans.sim3.colors[array.trans.sim3 < my.seq[4]] <- trans.cols[3] + array.trans.sim3.colors[array.trans.sim3 < my.seq[3]] <- trans.cols[2] + array.trans.sim3.colors[array.trans.sim3 < my.seq[2]] <- trans.cols[1] + array.trans.sim3.colors[is.na(array.trans.sim3)] <- "white" + + par(mar=c(5,4,4,4.5)) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Forecast time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + title("% ECMWF-S4 transition from blocking regime", cex=1.5) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + + for(p in 1:12){ + for(l in 0:6){ + polygon(c(0.5+p-1, 0.5+p-1, 1+p-1), c(-0.5+l, 0.5+l, 0+l), col=array.trans.sim3.colors[p,1+l,1]) # first triangle + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(-0.5+l, 0+l, -0.5+l), col=array.trans.sim3.colors[p,1+l,2]) + polygon(c(1+p-1, 1.5+p-1, 1.5+p-1), c(l, 0.5+l, -0.5+l), col=array.trans.sim3.colors[p,1+l,3]) + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(0.5+l, 0+l, 0.5+l), col=array.trans.sim3.colors[p,1+l,4]) + } + } + + par(fig=c(0.875, 1, 0.14, 0.89), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_transition_prob_blocking_startdate.png"), width=750, height=600) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("% ECMWF-S4 transition from blocking regime", cex=1.5) + + # NAO+ : + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.85, 0.98), new=TRUE) + mtext("NAO+", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.sim3.colors[p,1+l,1])}} + + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.42, 0.5), new=TRUE) + mtext("Blocking", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.sim3.colors[p,1+l,4])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.85, 0.98), new=TRUE) + mtext("NAO-", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.sim3.colors[p,1+l,3])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.42, 0.5), new=TRUE) + mtext("Atlantic Ridge", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.sim3.colors[p,1+l,2])}} + + par(fig=c(0.91, 1, 0.1, 0.9), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_transition_prob_blocking_target_month.png"), width=750, height=350) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 0.95), new=TRUE) + mtext("% ECMWF-S4 transition from blocking regime", cex=1.2) + + par(mar=c(0,0,4,0), fig=c(0.10, 0.28, 0.8, 0.95), new=TRUE) + mtext("NAO+", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0, 0.28, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.8, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.sim3.colors[i2,1+6-j,1])}} + + par(mar=c(0,0,4,0), fig=c(0.30, 0.58, 0.8, 0.95), new=TRUE) + mtext("NAO-", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.30, 0.58, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.8, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.sim3.colors[i2,1+6-j,3])}} + + par(mar=c(0,0,4,0), fig=c(0.60, 0.88, 0.8, 0.95), new=TRUE) + mtext("Atlantic Ridge", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.60, 0.88, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.8, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.sim3.colors[i2,1+6-j,2])}} + + par(fig=c(0.91, 1, 0.1, 0.8), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + par(fig=c(0.91, 1, 0.88, 0.93), new=TRUE) + mtext(side = 1, text = "%", line = 2, cex=1) + + dev.off() + + + + + + + + + # Observed transition probability from blocking to X summary: + png(filename=paste0(work.dir,"/",forecast.name,"_summary_transition_prob_blocking_obs.png"), width=550, height=400) + + # create an array similar to array.cor but with colors instead of corr.values: + trans.cols <- rev(c('#b2182b','#d6604d','#f4a582','#fddbc7','#d1e5f0','#92c5de','#4393c3','#2166ac')) + my.seq <- c(0,10,20,30,40,50,60,70,100) + + array.trans.obs3.colors <- array(trans.cols[8],c(12,7,4)) + array.trans.obs3.colors[array.trans.obs3 < my.seq[8]] <- trans.cols[7] + array.trans.obs3.colors[array.trans.obs3 < my.seq[7]] <- trans.cols[6] + array.trans.obs3.colors[array.trans.obs3 < my.seq[6]] <- trans.cols[5] + array.trans.obs3.colors[array.trans.obs3 < my.seq[5]] <- trans.cols[4] + array.trans.obs3.colors[array.trans.obs3 < my.seq[4]] <- trans.cols[3] + array.trans.obs3.colors[array.trans.obs3 < my.seq[3]] <- trans.cols[2] + array.trans.obs3.colors[array.trans.obs3 < my.seq[2]] <- trans.cols[1] + array.trans.obs3.colors[is.na(array.trans.obs3)] <- "white" + + par(mar=c(5,4,4,4.5)) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Forecast time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + title("% Observed transition from blocking regime", cex=1.5) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + + for(p in 1:12){ + for(l in 0:6){ + polygon(c(0.5+p-1, 0.5+p-1, 1+p-1), c(-0.5+l, 0.5+l, 0+l), col=array.trans.obs3.colors[p,1+l,1]) # first triangle + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(-0.5+l, 0+l, -0.5+l), col=array.trans.obs3.colors[p,1+l,2]) + polygon(c(1+p-1, 1.5+p-1, 1.5+p-1), c(l, 0.5+l, -0.5+l), col=array.trans.obs3.colors[p,1+l,3]) + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(0.5+l, 0+l, 0.5+l), col=array.trans.obs3.colors[p,1+l,4]) + } + } + + par(fig=c(0.875, 1, 0.14, 0.89), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_transition_prob_blocking_obs_startdate.png"), width=750, height=600) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("% Observed transition from blocking regime", cex=1.5) + + # NAO+ : + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.85, 0.98), new=TRUE) + mtext("NAO+", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.obs3.colors[p,1+l,1])}} + + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.42, 0.5), new=TRUE) + mtext("Blocking", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.obs3.colors[p,1+l,4])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.85, 0.98), new=TRUE) + mtext("NAO-", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.obs3.colors[p,1+l,3])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.42, 0.5), new=TRUE) + mtext("Atlantic Ridge", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.obs3.colors[p,1+l,2])}} + + par(fig=c(0.91, 1, 0.1, 0.9), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_transition_prob_blocking_obs_target_month.png"), width=750, height=350) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 0.95), new=TRUE) + mtext("% Observed transition from blocking regime", cex=1.2) + + par(mar=c(0,0,4,0), fig=c(0.10, 0.28, 0.8, 0.95), new=TRUE) + mtext("NAO+", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0, 0.28, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.8, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.obs3.colors[i2,1+6-j,1])}} + + par(mar=c(0,0,4,0), fig=c(0.30, 0.58, 0.8, 0.95), new=TRUE) + mtext("NAO-", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.30, 0.58, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.8, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.obs3.colors[i2,1+6-j,3])}} + + par(mar=c(0,0,4,0), fig=c(0.60, 0.88, 0.8, 0.95), new=TRUE) + mtext("Atlantic Ridge", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.60, 0.88, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.8, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.obs3.colors[i2,1+6-j,2])}} + + par(fig=c(0.91, 1, 0.1, 0.8), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + par(fig=c(0.91, 1, 0.88, 0.93), new=TRUE) + mtext(side = 1, text = "%", line = 2, cex=1) + + dev.off() + + + + + + + + + + + + + + + + + + # Simulated transition probability from Atlantic ridge to X summary: + png(filename=paste0(work.dir,"/",forecast.name,"_summary_transition_prob_Atlantic_ridge.png"), width=550, height=400) + + # create an array similar to array.cor but with colors instead of corr.values: + trans.cols <- rev(c('#b2182b','#d6604d','#f4a582','#fddbc7','#d1e5f0','#92c5de','#4393c3','#2166ac')) + my.seq <- c(0,10,20,30,40,50,60,70,100) + + array.trans.sim4.colors <- array(trans.cols[8],c(12,7,4)) + array.trans.sim4.colors[array.trans.sim4 < my.seq[8]] <- trans.cols[7] + array.trans.sim4.colors[array.trans.sim4 < my.seq[7]] <- trans.cols[6] + array.trans.sim4.colors[array.trans.sim4 < my.seq[6]] <- trans.cols[5] + array.trans.sim4.colors[array.trans.sim4 < my.seq[5]] <- trans.cols[4] + array.trans.sim4.colors[array.trans.sim4 < my.seq[4]] <- trans.cols[3] + array.trans.sim4.colors[array.trans.sim4 < my.seq[3]] <- trans.cols[2] + array.trans.sim4.colors[array.trans.sim4 < my.seq[2]] <- trans.cols[1] + array.trans.sim4.colors[is.na(array.trans.sim4)] <- "white" + + par(mar=c(5,4,4,4.5)) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Forecast time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + title("% ECMWF-S4 transition from Atlantic ridge regime ", cex=1.5) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + + for(p in 1:12){ + for(l in 0:6){ + polygon(c(0.5+p-1, 0.5+p-1, 1+p-1), c(-0.5+l, 0.5+l, 0+l), col=array.trans.sim4.colors[p,1+l,1]) # first triangle + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(-0.5+l, 0+l, -0.5+l), col=array.trans.sim4.colors[p,1+l,2]) + polygon(c(1+p-1, 1.5+p-1, 1.5+p-1), c(l, 0.5+l, -0.5+l), col=array.trans.sim4.colors[p,1+l,3]) + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(0.5+l, 0+l, 0.5+l), col=array.trans.sim4.colors[p,1+l,4]) + } + } + + par(fig=c(0.875, 1, 0.14, 0.89), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_transition_prob_Atlantic_ridge_startdate.png"), width=750, height=600) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("% ECMWF-S4 transition from Atlantic Ridge regime", cex=1.5) + + # NAO+ : + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.85, 0.98), new=TRUE) + mtext("NAO+", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.sim4.colors[p,1+l,1])}} + + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.42, 0.5), new=TRUE) + mtext("Blocking", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.sim4.colors[p,1+l,4])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.85, 0.98), new=TRUE) + mtext("NAO-", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.sim4.colors[p,1+l,3])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.42, 0.5), new=TRUE) + mtext("Atlantic Ridge", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.sim4.colors[p,1+l,2])}} + + par(fig=c(0.91, 1, 0.1, 0.9), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_transition_prob_Atlantic_ridge_target_month.png"), width=750, height=350) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 0.95), new=TRUE) + mtext("% ECMWF-S4 transition from Atlantic Ridge regime", cex=1.2) + + par(mar=c(0,0,4,0), fig=c(0.1, 0.28, 0.8, 0.95), new=TRUE) + mtext("NAO+", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0, 0.28, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.8, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.sim4.colors[i2,1+6-j,1])}} + + par(mar=c(0,0,4,0), fig=c(0.30, 0.58, 0.8, 0.95), new=TRUE) + mtext("NAO-", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.30, 0.58, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.8, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.sim4.colors[i2,1+6-j,3])}} + + par(mar=c(0,0,4,0), fig=c(0.60, 0.88, 0.8, 0.95), new=TRUE) + mtext("Blocking", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.60, 0.88, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.8, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.sim4.colors[i2,1+6-j,4])}} + + par(fig=c(0.91, 1, 0.1, 0.8), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + par(fig=c(0.91, 1, 0.88, 0.93), new=TRUE) + mtext(side = 1, text = "%", line = 2, cex=1) + + dev.off() + + + + + + + + + + # Observed transition probability from Atlantic ridge to X summary: + png(filename=paste0(work.dir,"/",forecast.name,"_summary_transition_prob_Atlantic_ridge_obs.png"), width=550, height=400) + + # create an array obsilar to array.cor but with colors instead of corr.values: + trans.cols <- rev(c('#b2182b','#d6604d','#f4a582','#fddbc7','#d1e5f0','#92c5de','#4393c3','#2166ac')) + my.seq <- c(0,10,20,30,40,50,60,70,100) + + array.trans.obs4.colors <- array(trans.cols[8],c(12,7,4)) + array.trans.obs4.colors[array.trans.obs4 < my.seq[8]] <- trans.cols[7] + array.trans.obs4.colors[array.trans.obs4 < my.seq[7]] <- trans.cols[6] + array.trans.obs4.colors[array.trans.obs4 < my.seq[6]] <- trans.cols[5] + array.trans.obs4.colors[array.trans.obs4 < my.seq[5]] <- trans.cols[4] + array.trans.obs4.colors[array.trans.obs4 < my.seq[4]] <- trans.cols[3] + array.trans.obs4.colors[array.trans.obs4 < my.seq[3]] <- trans.cols[2] + array.trans.obs4.colors[array.trans.obs4 < my.seq[2]] <- trans.cols[1] + array.trans.obs4.colors[is.na(array.trans.obs4)] <- "white" + + par(mar=c(5,4,4,4.5)) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Forecast time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + title("% Observed transition from Atlantic ridge regime ", cex=1.5) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + + for(p in 1:12){ + for(l in 0:6){ + polygon(c(0.5+p-1, 0.5+p-1, 1+p-1), c(-0.5+l, 0.5+l, 0+l), col=array.trans.obs4.colors[p,1+l,1]) # first triangle + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(-0.5+l, 0+l, -0.5+l), col=array.trans.obs4.colors[p,1+l,2]) + polygon(c(1+p-1, 1.5+p-1, 1.5+p-1), c(l, 0.5+l, -0.5+l), col=array.trans.obs4.colors[p,1+l,3]) + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(0.5+l, 0+l, 0.5+l), col=array.trans.obs4.colors[p,1+l,4]) + } + } + + par(fig=c(0.875, 1, 0.14, 0.89), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_transition_prob_Atlantic_ridge_obs_startdate.png"), width=750, height=600) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("% Observed transition from Atlantic Ridge regime", cex=1.5) + + # NAO+ : + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.85, 0.98), new=TRUE) + mtext("NAO+", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.obs4.colors[p,1+l,1])}} + + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.42, 0.5), new=TRUE) + mtext("Blocking", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.obs4.colors[p,1+l,4])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.85, 0.98), new=TRUE) + mtext("NAO-", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.obs4.colors[p,1+l,3])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.42, 0.5), new=TRUE) + mtext("Atlantic Ridge", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.obs4.colors[p,1+l,2])}} + + par(fig=c(0.91, 1, 0.1, 0.9), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_transition_prob_Atlantic_ridge_obs_target_month.png"), width=750, height=350) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 0.95), new=TRUE) + mtext("% Observed transition from Atlantic Ridge regime", cex=1.2) + + par(mar=c(0,0,4,0), fig=c(0.1, 0.28, 0.8, 0.95), new=TRUE) + mtext("NAO+", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0, 0.28, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.8, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.obs4.colors[i2,1+6-j,1])}} + + par(mar=c(0,0,4,0), fig=c(0.30, 0.58, 0.8, 0.95), new=TRUE) + mtext("NAO-", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.30, 0.58, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.8, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.obs4.colors[i2,1+6-j,3])}} + + par(mar=c(0,0,4,0), fig=c(0.60, 0.88, 0.8, 0.95), new=TRUE) + mtext("Blocking", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.60, 0.88, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.8, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.obs4.colors[i2,1+6-j,4])}} + + par(fig=c(0.91, 1, 0.1, 0.8), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + par(fig=c(0.91, 1, 0.88, 0.93), new=TRUE) + mtext(side = 1, text = "%", line = 2, cex=1) + + dev.off() + + + + + + + + + + + + + + + # Bias of the transition probability from NAO+ to X summary: + png(filename=paste0(work.dir,"/",forecast.name,"_summary_bias_transition_prob_NAO+.png"), width=550, height=400) + + # create an array similar to array.cor but with colors instead of corr.values: + trans.cols <- rev(c('#b2182b','#d6604d','#f4a582','#fddbc7','#d1e5f0','#92c5de','#4393c3','#2166ac')) + my.seq <- c(-20,-10,-5,-2,0,2,5,10,20) + + array.trans.diff1.colors <- array(trans.cols[8],c(12,7,4)) + array.trans.diff1.colors[array.trans.diff1 < my.seq[8]] <- trans.cols[7] + array.trans.diff1.colors[array.trans.diff1 < my.seq[7]] <- trans.cols[6] + array.trans.diff1.colors[array.trans.diff1 < my.seq[6]] <- trans.cols[5] + array.trans.diff1.colors[array.trans.diff1 < my.seq[5]] <- trans.cols[4] + array.trans.diff1.colors[array.trans.diff1 < my.seq[4]] <- trans.cols[3] + array.trans.diff1.colors[array.trans.diff1 < my.seq[3]] <- trans.cols[2] + array.trans.diff1.colors[array.trans.diff1 < my.seq[2]] <- trans.cols[1] + array.trans.diff1.colors[is.na(array.trans.diff1)] <- "white" + + par(mar=c(5,4,4,4.5)) + plot(1,1, type="n", xaxt="n", yaxt="n", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + title("% Bias transition from NAO+ regime", cex=1.5) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + + for(p in 1:12){ + for(l in 0:6){ + polygon(c(0.5+p-1, 0.5+p-1, 1+p-1), c(-0.5+l, 0.5+l, 0+l), col=array.trans.diff1.colors[p,1+l,1]) # first triangle + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(-0.5+l, 0+l, -0.5+l), col=array.trans.diff1.colors[p,1+l,2]) + polygon(c(1+p-1, 1.5+p-1, 1.5+p-1), c(l, 0.5+l, -0.5+l), col=array.trans.diff1.colors[p,1+l,3]) + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(0.5+l, 0+l, 0.5+l), col=array.trans.diff1.colors[p,1+l,4]) + } + } + + par(fig=c(0.875, 1, 0.14, 0.89), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_bias_transition_prob_NAO+_startdate.png"), width=750, height=600) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("% Bias transition from NAO+ regime", cex=1.5) + + # NAO+ : + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.85, 0.98), new=TRUE) + mtext("NAO+", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.diff1.colors[p,1+l,1])}} + + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.42, 0.5), new=TRUE) + mtext("Blocking", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.diff1.colors[p,1+l,4])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.85, 0.98), new=TRUE) + mtext("NAO-", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.diff1.colors[p,1+l,3])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.42, 0.5), new=TRUE) + mtext("Atlantic Ridge", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.diff1.colors[p,1+l,2])}} + + par(fig=c(0.91, 1, 0.1, 0.9), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_bias_transition_prob_NAO+_target_month.png"), width=750, height=350) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 0.95), new=TRUE) + mtext("% Bias transition from NAO+ regime", cex=1.2) + + par(mar=c(0,0,4,0), fig=c(0.07, 0.28, 0.8, 0.95), new=TRUE) + mtext("NAO-", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0, 0.28, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.8, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.diff1.colors[i2,1+6-j,3])}} + + par(mar=c(0,0,4,0), fig=c(0.30, 0.58, 0.8, 0.95), new=TRUE) + mtext("Blocking", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.30, 0.58, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.8, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.diff1.colors[i2,1+6-j,4])}} + + par(mar=c(0,0,4,0), fig=c(0.60, 0.88, 0.8, 0.95), new=TRUE) + mtext("Atlantic Ridge", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.60, 0.88, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.8, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.diff1.colors[i2,1+6-j,2])}} + + par(fig=c(0.91, 1, 0.1, 0.8), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + par(fig=c(0.91, 1, 0.88, 0.93), new=TRUE) + mtext(side = 1, text = "%", line = 2, cex=1) + + dev.off() + + + + + + + + + + + + + + + + # Bias of the Transition probability from NAO- to X summary: + png(filename=paste0(work.dir,"/",forecast.name,"_summary_bias_transition_prob_NAO-.png"), width=550, height=400) + + # create an array similar to array.cor but with colors instead of corr.values: + trans.cols <- rev(c('#b2182b','#d6604d','#f4a582','#fddbc7','#d1e5f0','#92c5de','#4393c3','#2166ac')) + my.seq <- c(-20,-10,-5,-2,0,2,5,10,20) + + array.trans.diff2.colors <- array(trans.cols[8],c(12,7,4)) + array.trans.diff2.colors[array.trans.diff2 < my.seq[8]] <- trans.cols[7] + array.trans.diff2.colors[array.trans.diff2 < my.seq[7]] <- trans.cols[6] + array.trans.diff2.colors[array.trans.diff2 < my.seq[6]] <- trans.cols[5] + array.trans.diff2.colors[array.trans.diff2 < my.seq[5]] <- trans.cols[4] + array.trans.diff2.colors[array.trans.diff2 < my.seq[4]] <- trans.cols[3] + array.trans.diff2.colors[array.trans.diff2 < my.seq[3]] <- trans.cols[2] + array.trans.diff2.colors[array.trans.diff2 < my.seq[2]] <- trans.cols[1] + array.trans.diff2.colors[is.na(array.trans.diff2)] <- "white" + + par(mar=c(5,4,4,4.5)) + plot(1,1, type="n", xaxt="n", yaxt="n", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + title("% Bias transition from NAO- regime", cex=1.5) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + + for(p in 1:12){ + for(l in 0:6){ + polygon(c(0.5+p-1, 0.5+p-1, 1+p-1), c(-0.5+l, 0.5+l, 0+l), col=array.trans.diff2.colors[p,1+l,1]) # first triangle + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(-0.5+l, 0+l, -0.5+l), col=array.trans.diff2.colors[p,1+l,2]) + polygon(c(1+p-1, 1.5+p-1, 1.5+p-1), c(l, 0.5+l, -0.5+l), col=array.trans.diff2.colors[p,1+l,3]) + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(0.5+l, 0+l, 0.5+l), col=array.trans.diff2.colors[p,1+l,4]) + } + } + + par(fig=c(0.875, 1, 0.14, 0.89), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_bias_transition_prob_NAO-_startdate.png"), width=750, height=600) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 0.95), new=TRUE) + mtext("% Bias transition from NAO- regime", cex=1.5) + + # NAO+ : + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.85, 0.98), new=TRUE) + mtext("NAO+", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.diff2.colors[p,1+l,1])}} + + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.42, 0.5), new=TRUE) + mtext("Blocking", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.diff2.colors[p,1+l,4])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.85, 0.98), new=TRUE) + mtext("NAO-", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.diff2.colors[p,1+l,3])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.42, 0.5), new=TRUE) + mtext("Atlantic Ridge", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.diff2.colors[p,1+l,2])}} + + par(fig=c(0.91, 1, 0.1, 0.9), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_bias_transition_prob_NAO-_target_month.png"), width=750, height=350) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 0.95), new=TRUE) + mtext("% Bias transition from NAO- regime", cex=1.2) + + par(mar=c(0,0,4,0), fig=c(0.07, 0.28, 0.8, 0.95), new=TRUE) + mtext("NAO+", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0, 0.28, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.8, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.diff2.colors[i2,1+6-j,1])}} + + par(mar=c(0,0,4,0), fig=c(0.30, 0.58, 0.8, 0.95), new=TRUE) + mtext("Blocking", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.30, 0.58, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.8, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.diff2.colors[i2,1+6-j,4])}} + + par(mar=c(0,0,4,0), fig=c(0.60, 0.88, 0.8, 0.95), new=TRUE) + mtext("Atlantic Ridge", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.60, 0.88, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.8, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.diff2.colors[i2,1+6-j,2])}} + + par(fig=c(0.91, 1, 0.1, 0.8), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + par(fig=c(0.91, 1, 0.88, 0.93), new=TRUE) + mtext(side = 1, text = "%", line = 2, cex=1) + + dev.off() + + + + + + + + + + + + # Bias of the Transition probability from blocking to X summary: + png(filename=paste0(work.dir,"/",forecast.name,"_summary_bias_transition_prob_blocking.png"), width=550, height=400) + + # create an array similar to array.cor but with colors instead of corr.values: + trans.cols <- rev(c('#b2182b','#d6604d','#f4a582','#fddbc7','#d1e5f0','#92c5de','#4393c3','#2166ac')) + my.seq <- c(-20,-10,-5,-2,0,2,5,10,20) + + array.trans.diff3.colors <- array(trans.cols[8],c(12,7,4)) + array.trans.diff3.colors[array.trans.diff3 < my.seq[8]] <- trans.cols[7] + array.trans.diff3.colors[array.trans.diff3 < my.seq[7]] <- trans.cols[6] + array.trans.diff3.colors[array.trans.diff3 < my.seq[6]] <- trans.cols[5] + array.trans.diff3.colors[array.trans.diff3 < my.seq[5]] <- trans.cols[4] + array.trans.diff3.colors[array.trans.diff3 < my.seq[4]] <- trans.cols[3] + array.trans.diff3.colors[array.trans.diff3 < my.seq[3]] <- trans.cols[2] + array.trans.diff3.colors[array.trans.diff3 < my.seq[2]] <- trans.cols[1] + array.trans.diff3.colors[is.na(array.trans.diff3)] <- "white" + + par(mar=c(5,4,4,4.5)) + plot(1,1, type="n", xaxt="n", yaxt="n", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + title("% Bias transition from blocking regime", cex=1.5) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + + for(p in 1:12){ + for(l in 0:6){ + polygon(c(0.5+p-1, 0.5+p-1, 1+p-1), c(-0.5+l, 0.5+l, 0+l), col=array.trans.diff3.colors[p,1+l,1]) # first triangle + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(-0.5+l, 0+l, -0.5+l), col=array.trans.diff3.colors[p,1+l,2]) + polygon(c(1+p-1, 1.5+p-1, 1.5+p-1), c(l, 0.5+l, -0.5+l), col=array.trans.diff3.colors[p,1+l,3]) + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(0.5+l, 0+l, 0.5+l), col=array.trans.diff3.colors[p,1+l,4]) + } + } + + par(fig=c(0.875, 1, 0.14, 0.89), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_bias_transition_prob_blocking_startdate.png"), width=750, height=600) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("% Bias transition from blocking regime", cex=1.5) + + # NAO+ : + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.85, 0.98), new=TRUE) + mtext("NAO+", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.diff3.colors[p,1+l,1])}} + + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.42, 0.5), new=TRUE) + mtext("Blocking", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.diff3.colors[p,1+l,4])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.85, 0.98), new=TRUE) + mtext("NAO-", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.diff3.colors[p,1+l,3])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.42, 0.5), new=TRUE) + mtext("Atlantic Ridge", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.diff3.colors[p,1+l,2])}} + + par(fig=c(0.91, 1, 0.1, 0.9), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_bias_transition_prob_blocking_target_month.png"), width=750, height=350) + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 0.95), new=TRUE) + mtext("% Bias transition from Blocking regime", cex=1.2) + + par(mar=c(0,0,4,0), fig=c(0.07, 0.28, 0.8, 0.95), new=TRUE) + mtext("NAO+", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0, 0.28, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.8, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.diff3.colors[i2,1+6-j,1])}} + + par(mar=c(0,0,4,0), fig=c(0.30, 0.58, 0.8, 0.95), new=TRUE) + mtext("NAO-", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.30, 0.58, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.8, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.diff3.colors[i2,1+6-j,3])}} + + par(mar=c(0,0,4,0), fig=c(0.60, 0.88, 0.8, 0.95), new=TRUE) + mtext("Atlantic Ridge", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.60, 0.88, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.8, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.diff3.colors[i2,1+6-j,2])}} + + par(fig=c(0.91, 1, 0.1, 0.8), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + par(fig=c(0.91, 1, 0.88, 0.93), new=TRUE) + mtext(side = 1, text = "%", line = 2, cex=1) + + dev.off() + + + + + + + + + + + # Bias of the Transition probability from Atlantic Ridge to X summary: + png(filename=paste0(work.dir,"/",forecast.name,"_summary_bias_transition_prob_Atlantic_ridge.png"), width=550, height=400) + + # create an array similar to array.cor but with colors instead of corr.values: + trans.cols <- rev(c('#b2182b','#d6604d','#f4a582','#fddbc7','#d1e5f0','#92c5de','#4393c3','#2166ac')) + my.seq <- c(-20,-10,-5,-2,0,2,5,10,20) + + array.trans.diff4.colors <- array(trans.cols[8],c(12,7,4)) + array.trans.diff4.colors[array.trans.diff4 < my.seq[8]] <- trans.cols[7] + array.trans.diff4.colors[array.trans.diff4 < my.seq[7]] <- trans.cols[6] + array.trans.diff4.colors[array.trans.diff4 < my.seq[6]] <- trans.cols[5] + array.trans.diff4.colors[array.trans.diff4 < my.seq[5]] <- trans.cols[4] + array.trans.diff4.colors[array.trans.diff4 < my.seq[4]] <- trans.cols[3] + array.trans.diff4.colors[array.trans.diff4 < my.seq[3]] <- trans.cols[2] + array.trans.diff4.colors[array.trans.diff4 < my.seq[2]] <- trans.cols[1] + array.trans.diff4.colors[is.na(array.trans.diff4)] <- "white" + + par(mar=c(5,4,4,4.5)) + plot(1,1, type="n", xaxt="n", yaxt="n", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + title("% Bias transition from Atlantic Ridge regime", cex=1.5) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + + for(p in 1:12){ + for(l in 0:6){ + polygon(c(0.5+p-1, 0.5+p-1, 1+p-1), c(-0.5+l, 0.5+l, 0+l), col=array.trans.diff4.colors[p,1+l,1]) # first triangle + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(-0.5+l, 0+l, -0.5+l), col=array.trans.diff4.colors[p,1+l,2]) + polygon(c(1+p-1, 1.5+p-1, 1.5+p-1), c(l, 0.5+l, -0.5+l), col=array.trans.diff4.colors[p,1+l,3]) + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(0.5+l, 0+l, 0.5+l), col=array.trans.diff4.colors[p,1+l,4]) + } + } + + par(fig=c(0.875, 1, 0.14, 0.89), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_bias_transition_prob_Atlantic_ridge_startdate.png"), width=750, height=600) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("% Bias transition from Atlantic_Ridge_regime", cex=1.5) + + # NAO+ : + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.85, 0.98), new=TRUE) + mtext("NAO+", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.diff4.colors[p,1+l,1])}} + + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.42, 0.5), new=TRUE) + mtext("Blocking", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.diff4.colors[p,1+l,4])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.85, 0.98), new=TRUE) + mtext("NAO-", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.diff4.colors[p,1+l,3])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.42, 0.5), new=TRUE) + mtext("Atlantic Ridge", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.diff4.colors[p,1+l,2])}} + + par(fig=c(0.91, 1, 0.1, 0.9), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_bias_transition_prob_Atlantic_ridge_target_month.png"), width=750, height=350) + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 0.95), new=TRUE) + mtext("% Bias transition from Atlantic Ridge regime", cex=1.2) + + par(mar=c(0,0,4,0), fig=c(0.07, 0.28, 0.8, 0.95), new=TRUE) + mtext("NAO+", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0, 0.28, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.8, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.diff4.colors[i2,1+6-j,1])}} + + par(mar=c(0,0,4,0), fig=c(0.30, 0.58, 0.8, 0.95), new=TRUE) + mtext("NAO-", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.30, 0.58, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.8, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.diff4.colors[i2,1+6-j,3])}} + + par(mar=c(0,0,4,0), fig=c(0.60, 0.88, 0.8, 0.95), new=TRUE) + mtext("Blocking", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.60, 0.88, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.8, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.diff4.colors[i2,1+6-j,4])}} + + par(fig=c(0.91, 1, 0.1, 0.8), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + par(fig=c(0.91, 1, 0.88, 0.93), new=TRUE) + mtext(side = 1, text = "%", line = 2, cex=1) + + dev.off() + + ## # FairRPSS summary: + ## png(filename=paste0(work.dir,"/",forecast.name,"_summary_rpss.png"), width=550, height=400) + + ## # create an array similar to array.diff.freq but with colors instead of frequencies: + ## freq.cols <- rev(c('#b2182b','#d6604d','#f4a582','#fddbc7','#d1e5f0','#92c5de','#4393c3','#2166ac')) + + ## array.freq.colors <- array(freq.cols[8],c(12,7,4)) + ## array.freq.colors[array.diff.freq < 10] <- freq.cols[7] + ## array.freq.colors[array.diff.freq < 5] <- freq.cols[6] + ## array.freq.colors[array.diff.freq < 2] <- freq.cols[5] + ## array.freq.colors[array.diff.freq < 0] <- freq.cols[4] + ## array.freq.colors[array.diff.freq < -2] <- freq.cols[3] + ## array.freq.colors[array.diff.freq < -5] <- freq.cols[2] + ## array.freq.colors[array.diff.freq < -10] <- freq.cols[1] + + ## par(mar=c(5,4,4,4.5)) + ## plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Forecast time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + ## title("Frequency difference (%) between S4 and ERA-Interim", cex=1.5) + ## mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + ## mtext(side = 2, text = "Forecast time", line = 2.5, cex=1.5) + ## axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + ## axis(2, at=seq(0,6), las=2, cex.axis=1.5) + + ## for(p in 1:12){ + ## for(l in 0:6){ + ## polygon(c(0.5+p-1,0.5+p-1,1+p-1),c(-0.5+l,0.5+l,0+l), col=array.freq.colors[p,1+l,1]) # first triangle + ## polygon(c(0.5+p-1,1+p-1,1.5+p-1),c(-0.5+l,0+l,-0.5+l), col=array.freq.colors[p,1+l,2]) + ## polygon(c(1+p-1,1.5+p-1,1.5+p-1),c(l,0.5+l,-0.5+l), col=array.freq.colors[p,1+l,3]) + ## polygon(c(0.5+p-1,1+p-1,1.5+p-1),c(0.5+l,0+l,0.5+l), col=array.freq.colors[p,1+l,4]) + ## } + ## } + + ## par(fig=c(0.875, 1, 0.14, 0.89), new=TRUE) + ## ColorBar2(brks = c(-20,-10,-5,-2,0,2,5,10,20), cols = freq.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(c(-20,-10,-5,-2,0,2,5,10,20)), my.labels=c(-20,-10,-5,-2,0,2,5,10,20)) + ## dev.off() + +} # close if on composition == "summary" + + + + +# impact map of the stronger WR: +if(composition == "impact.highest" && fields.name == rean.name){ + + var.num <- 2 # choose a variable (1:sfcWind, 2:tas) + index <- 1 # chose an index: 1: most influent, 2: most influent positively, 3: most influent negatively + + if ( index == 1 ) png(filename=paste0(rean.dir,"/",rean.name,"_",var.name.full[var.num],"_most_influent.png"), width=550, height=650) + if ( index == 2 ) png(filename=paste0(rean.dir,"/",rean.name,"_",var.name.full[var.num],"_most_influent_positively.png"), width=550, height=650) + if ( index == 3 ) png(filename=paste0(rean.dir,"/",rean.name,"_",var.name.full[var.num],"_most_influent_negatively.png"), width=550, height=650) + + plot.new() + #par(mfrow=c(4,3)) + #col.regimes <- c('#7b3294','#c2a5cf','#a6dba0','#008837') + col.regimes <- c('#e66101','#fdb863','#b2abd2','#5e3c99') + + for(p in 13:16){ # or 1:12 for monthly maps + load(file=paste0(rean.dir,"/",rean.name,"_",my.period[p],"_",var.name[var.num],".RData")) + load(paste0(rean.dir,"/",rean.name,"_",my.period[p],"_ClusterNames.RData")) # they should not depend on var.name!!! + + # position of long values of Europe only (without the Atlantic Sea and America): + #if(fields.name == "ERA-Interim") EU <- c(1:which(lon >= lon.max)[1], (length(lon)-30):length(lon)) # restrict area to continental europe only + lon.max = 45 + EU <- c(1:which(lon >= lon.max)[1], (which(lon >= 337)[1]:length(lon))) # restrict area to continental europe only + + cluster1 <- which(orden == cluster1.name.period[p]) # in future it will be == cluster1.name + cluster2 <- which(orden == cluster2.name.period[p]) + cluster3 <- which(orden == cluster3.name.period[p]) + cluster4 <- which(orden == cluster4.name.period[p]) + + assign(paste0("imp",cluster1), varPeriodAnom1mean) + assign(paste0("imp",cluster2), varPeriodAnom2mean) + assign(paste0("imp",cluster3), varPeriodAnom3mean) + assign(paste0("imp",cluster4), varPeriodAnom4mean) + + #most influent WT: + if (index == 1) { + imp.all <- imp1 + for(i in 1:dim(imp1)[1]){ + for(j in 1:dim(imp1)[2]){ + if(abs(imp1[i,j]) >= max(abs(imp1[i,j]), abs(imp2[i,j]), abs(imp3[i,j]), abs(imp4[i,j]))) imp.all[i,j] <- 1.5 + if(abs(imp2[i,j]) >= max(abs(imp1[i,j]), abs(imp2[i,j]), abs(imp3[i,j]), abs(imp4[i,j]))) imp.all[i,j] <- 2.5 + if(abs(imp3[i,j]) >= max(abs(imp1[i,j]), abs(imp2[i,j]), abs(imp3[i,j]), abs(imp4[i,j]))) imp.all[i,j] <- 3.5 + if(abs(imp4[i,j]) >= max(abs(imp1[i,j]), abs(imp2[i,j]), abs(imp3[i,j]), abs(imp4[i,j]))) imp.all[i,j] <- 4.5 + } + } + } + + #most influent WT with positive impact: + if (index == 2) { + imp.all <- imp1 + for(i in 1:dim(imp1)[1]){ + for(j in 1:dim(imp1)[2]){ + if((imp1[i,j]) >= max((imp1[i,j]), (imp2[i,j]), (imp3[i,j]), (imp4[i,j]))) imp.all[i,j] <- 1.5 + if((imp2[i,j]) >= max((imp1[i,j]), (imp2[i,j]), (imp3[i,j]), (imp4[i,j]))) imp.all[i,j] <- 2.5 + if((imp3[i,j]) >= max((imp1[i,j]), (imp2[i,j]), (imp3[i,j]), (imp4[i,j]))) imp.all[i,j] <- 3.5 + if((imp4[i,j]) >= max((imp1[i,j]), (imp2[i,j]), (imp3[i,j]), (imp4[i,j]))) imp.all[i,j] <- 4.5 + } + } + + } + + # most influent WT with negative impact: + if (index == 3) { + imp.all <- imp1 + for(i in 1:dim(imp1)[1]){ + for(j in 1:dim(imp1)[2]){ + if((imp1[i,j]) <= min((imp1[i,j]), (imp2[i,j]), (imp3[i,j]), (imp4[i,j]))) imp.all[i,j] <- 1.5 + if((imp2[i,j]) <= min((imp1[i,j]), (imp2[i,j]), (imp3[i,j]), (imp4[i,j]))) imp.all[i,j] <- 2.5 + if((imp3[i,j]) <= min((imp1[i,j]), (imp2[i,j]), (imp3[i,j]), (imp4[i,j]))) imp.all[i,j] <- 3.5 + if((imp4[i,j]) <= min((imp1[i,j]), (imp2[i,j]), (imp3[i,j]), (imp4[i,j]))) imp.all[i,j] <- 4.5 + } + } + } + + if(p<=3) yMod <- 0.7 + if(p>=4 && p<=6) yMod <- 0.5 + if(p>=7 && p<=9) yMod <- 0.3 + if(p>=10) yMod <- 0.1 + if(p==13 || p==14) yMod <- 0.52 + if(p==15 || p==16) yMod <- 0.1 + + if(p==1 || p==4 || p==7 || p==10) xMod <- 0.05 + if(p==2 || p==5 || p==8 || p==11) xMod <- 0.35 + if(p==3 || p==6 || p==9 || p==12) xMod <- 0.65 + if(p==13 || p==15) xMod <- 0.05 + if(p==14 || p==16) xMod <- 0.50 + + # impact map: + if(p <= 12) par(fig=c(xMod, xMod + 0.3, yMod, yMod + 0.17), new=TRUE) + if(p > 12) par(fig=c(xMod, xMod + 0.45, yMod, yMod + 0.38), new=TRUE) + PlotEquiMap(imp.all[,EU], lon[EU], lat, filled.continents = FALSE, brks=1:5, cols=col.regimes, axelab=F, drawleg=F) + + # title map: + if(p <= 12) par(fig=c(xMod, xMod + 0.3, yMod + 0.162, yMod + 0.172), new=TRUE) + if(p > 12) par(fig=c(xMod + 0.07, xMod + 0.07 + 0.3, yMod +0.21 + 0.166, yMod +0.21 + 0.176), new=TRUE) + mtext(my.period[p], font=2, cex=1.5) + + } # close 'p' on 'period' + + par(fig=c(0.1,0.9, 0.03, 0.11), new=TRUE) + ColorBar2(1:5, cols=col.regimes, vert=FALSE, my.ticks=1:4, draw.ticks=FALSE, my.labels=orden) + + par(fig=c(0.1,0.9, 0.92, 0.97), new=TRUE) + if( index == 1 ) mtext(paste0("Most influent WR on ",var.name[var.num]), font=2, cex=1.5) + if( index == 2 ) mtext(paste0("Most positively influent WR on ",var.name[var.num]), font=2, cex=1.5) + if( index == 3 ) mtext(paste0("Most negatively influent WR on ",var.name[var.num]), font=2, cex=1.5) + + dev.off() + +} # close if on composition == "impact.highest" + diff --git a/old/weather_regimes_maps_v23.R~ b/old/weather_regimes_maps_v23.R~ new file mode 100644 index 0000000000000000000000000000000000000000..ed842f6d03d4fa701cfe57a347af89ed2e26d47c --- /dev/null +++ b/old/weather_regimes_maps_v23.R~ @@ -0,0 +1,4932 @@ + +# Creation: 6/2016 +# Author: Nicola Cortesi +# +# Aim: to visualize the regime anomalies of the weather regimes, their interannual frequencies and their impact on a chosen cliamte variable (i.e: wind speed or temperature) +# +# I/O: input file needed are: +# 1) the output of the weather_regimes.R script, which ends with the suffix "_psl.RData". +# 2) if you need the impact map too, the outputs of the weather_regimes_impact.R script, which end wuth the suffix "_sfcWind.RData" and "_tas.RData" +# +# Output files are the maps, witch the user can generate as .png or as .pdf +# Due to the script's length, it is better to run it from the terminal with: Rscript ~/scripts/weather_regimes_maps.R +# This script doesn't need to be parallelized because it takes only a few seconds to create and save the output maps. +# +# Assumptions: If your regimes derive from a reanalysis, this script must be run twice: once, to create a .pdf file (see below) +# that the user must open to find visually the order by which the four regimes are stored inside the four clusters. For example, he can find that the +# sequence of the images in the file (from top to down) corresponds to: Blocking / NAO- / Atl.Ridge / NAO+ regime. Subsequently, he has to change 'ordering' +# from F to T (see below) and set the four variables 'clusterX.name' (X = 1...4) to follow the same order found inside that file. +# The second time, you have to run this script only to get the maps in the right order, which by default is, from top to down: +# "NAO+","NAO-","Blocking" and "Atl.Ridge" (you can change it with the variable 'orden'). You also have to set 'save.names' to TRUE, so an .RData file +# is written to store the order of the four regimes, in case you need to visualize these maps again in future or create new ones based on the saved data. +# +# If your regimes derive from a forecast system, you have to compute before the regimes derived from a reanalysis. Then, you can associate the reanalysis +# to the forecast system, to employ it to automatically aussociate to each simulated cluster one of the +# observed regimes, the one with the highest spatial correlation. Beware that the two maps of regime anomalies must have the same spatial resolution! +# +# Branch: weather_regimes + +rm(list=ls()) +library(s2dverification) # for the function Load() +library(Kendall) # for the MannKendall test +source('/home/Earth/ncortesi/scripts/Rfunctions.R') # for the calendar functions + +### set the directory where the weather regimes computed with a reanalysis are stored (xxxxx_psl.RData files): + +#rean.dir <- "/scratch/Earth/ncortesi/RESILIENCE/Regimes/18) as 12) but with no lat correction" +#rean.dir <- "/scratch/Earth/ncortesi/RESILIENCE/Regimes/18_as_12_but_with_no_lat_correction" +#rean.dir <- "/scratch/Earth/ncortesi/RESILIENCE/Regimes/41_ERA-Interim_monthly_1981-2015_LOESS_filter" +#rean.dir <- "/scratch/Earth/ncortesi/RESILIENCE/Regimes/43_as_41_but_with_running_cluster_and_lat_correction" +rean.dir <- "/scratch/Earth/ncortesi/RESILIENCE/Regimes/44_as_43_but_with_no_lat_correction" +#rean.dir <- "/scratch/Earth/ncortesi/RESILIENCE/Regimes/45_as_43_but_with_Z500" + +rean.name <- "ERA-Interim" # reanalysis name (if input data comes from a reanalysis) +forecast.name <- "ECMWF-S4" # forecast system name (if input data comes from a forecast system) + +# Choose between loading reanalysis ('rean.name') or forecasts ('forecast.name'): +fields.name <- forecast.name + +# Choose one or more periods to plot from 1 to 17 (1-12: Jan - Dec, 13-16: Winter, Spring, Summer, Autumn, 17: Year). +period <- 1:12 # You need to have created before the '_psl.RData' file with the output of 'weather_regimes_vXX'.R for that period. + +# run it twice: once, with: 'ordering <- FALSE', 'save.names <- TRUE', 'as.pdf <- TRUE' and 'composition <- "simple" ' +# to look at the cartography and find visually the right regime names of the four clusters; then, insert the regime names in the correct order below and run this script +# a second time one month/season at time with: 'ordering = TRUE', 'save.names = TRUE', 'as.pdf = FALSE' and 'composition = "simple" ' +# to save the ordered cartography (then, you can check the correct regime sequence in the .png maps). After that, any time you run this script, remember to set: +# 'ordering = TRUE , 'save.names = FALSE' (and any type of composition you want to plot) not to overwrite the files which stores the right cluster order. + +ordering <- T # If TRUE, order the clusters following the regimes listed in the 'orden' vector (set it to TRUE only after you manually inserted the names below). +save.names <- F # if TRUE, also save the cluster names (i.e: the association between clusters and weather regimes); if FALSE, the cluster names are loaded from a file +as.pdf <- F # FALSE: save results as one .png file for each season/month, TRUE: save only one .pdf file with all seasons/months + +composition <- "summary" # choose which kind of composition you want to plot: + # - 'simple' for monthly graphs of regime anomalies / impact / interannual frequencies in three columns + # - 'psl' for all the regime anomalies for a fixed forecast month + # - 'fre' for all the interannual frequencies for a fixed forecast month + # - 'impact' for all the impact maps for a fixed forecast month and the variable specified in var.num + # - 'summary' for the summary plots of all forecast months (persistence bias, freq.bias, correlations, etc.) [You need all ClusterNames files] + # - 'impact.highest' for all the impact plot of the regime with the highest impact + # - 'single.impact' to save the individual impact maps + # - 'single.psl' to save the individual psl map + # - 'single.fre' to save the individual fre map + # - 'none' doesn't plot anything, it only saves the ClusterName.Rdata files + # - 'taylor' plot the taylor's diagram between the regime anomalies of a monthly reanalaysis and a seasonal one + +var.num <- 1 # if fields.name ='rean.name' and composition ='simple', Choose a variable for the impact maps 1: sfcWind 2: tas + +mean.freq <- FALSE # if TRUE, when composition <- "simple", it also add the mean frequency value over each frequency plots + +# Associates the regimes to the four cluster: +cluster3.name <- "NAO+" +cluster2.name <- "NAO-" +cluster1.name <- "Blocking" +cluster4.name <- "Atl.Ridge" + +# choose an order to give to the cartograpy, from top to bottom: +orden <- c("NAO+","NAO-","Blocking","Atl.Ridge") + +####### if fields.name='forecast.name', you have to specify these additional variables: + +# working dir with the input files with '_psl.RData' suffix: +work.dir <- "/scratch/Earth/ncortesi/RESILIENCE/Regimes/42_ECMWF_S4_monthly_1981-2015_LOESS_filter" + +lead.months <- 0:6 # in case you selected 'fields.name = forecast.name', specify a leadtime (0 = same month of the start month) to plot + # (and variable 'period' becomes the start month) +forecast.month <- 1 # in case composition = 'psl' or 'fre' or 'impact', choose a target month to forecast, from 1 to 12, to plot its composition + # if you want to plot all compositions from 1 to 12 at once, just enable the line below and add a { at the end of the script + # DO NOT run the loop below when composition="summary" or "taylor", because it will modify the data in the ClusterName.RData files!!! +for(forecast.month in 1:12){ +####### Derived variables ############################################################################################################################################### + +if(fields.name != rean.name && fields.name != forecast.name) stop("variable 'fields.name' is not properly set") +regime1.name <- orden[1] +regime2.name <- orden[2] +regime3.name <- orden[3] +regime4.name <- orden[4] + +var.name <- c("sfcWind","tas") +var.name.full <- c("Wind Speed","Temperature") # full name of the 'predictand' variable to put in the title of the graphs +var.unit <- c("m/s", "ºC") # unit of measure of var (for drawing color scales) + +var.num.orig <- var.num # loading _psl files, this value can change! +fields.name.orig <- fields.name +period.orig <- period +work.dir.orig <- work.dir +pdf.suffix <- ifelse(length(period)==1, my.period[period], "all_periods") + +n.map <- 0 +################################################################################################################################################################### + +if(composition != "summary" && composition != "impact.highest" && composition != "taylor"){ + + if(composition == "psl" || composition == "fre" || composition == "impact") { + if(as.pdf && fields.name == forecast.name) pdf(file=paste0(work.dir,"/",fields.name,"_",pdf.suffix,"_forecast_month_",forecast.month,"_",composition,".pdf"),width=110,height=60) + if(!as.pdf && fields.name == forecast.name) png(filename=paste0(work.dir,"/",fields.name,"_forecast_month_",forecast.month,"_",composition,".png"),width=6000,height=2300) + + lead.months <- c(6:0,0) + plot.new() + + # Sheet title: + par(fig=c(0, 1, 0.94, 0.98), new=TRUE) + if(composition == "psl") mtext(paste0(fields.name, " simulated Weather Regimes for ", month.name[forecast.month]), cex=5.5, font=2) + if(composition == "fre") mtext(paste0(fields.name, " simulated interannual frequencies for ", month.name[forecast.month]), cex=5.5, font=2) + if(composition == "impact") mtext(paste0(fields.name, " simulated ",var.name.full[var.num], " impact for ", month.name[forecast.month]), cex=5.5, font=2) + + } + + if(fields.name == rean.name) lead.months <- 1 # just to be able to enter the loop below once! + + for(lead.month in lead.months){ + #lead.month=lead.months[1] # for the debug + + if(composition == "simple"){ + if(as.pdf && fields.name == rean.name) pdf(file=paste0(rean.dir,"/",fields.name,"_",var.name[var.num],"_",pdf.suffix,".pdf"),width=40, height=60) + if(as.pdf && fields.name == forecast.name) pdf(file=paste0(work.dir,"/",fields.name,"_",var.name[var.num],"_",pdf.suffix,"_leadmonth_",lead.month,".pdf"),width=40,height=60) + } + + if(composition == 'psl' || composition == 'fre' || composition == 'impact') { + period <- forecast.month - lead.month + if(period < 1) period <- period + 12 + } + + impact.data <- FALSE + for(p in period){ + #p <- period[1] # for the debug + p.orig <- p + + if(fields.name == rean.name) print(paste("p =",p)) + if(fields.name == forecast.name) print(paste("p =",p,"lead.month =", lead.month)) + + # load regime data (for forecasts, it load only the data corresponding to the chosen start month p and lead month 'lead.month') + if(fields.name == rean.name || (fields.name == forecast.name && n.map == 7)) { + load(file=paste0(rean.dir,"/",rean.name,"_",my.period[p],"_","psl",".RData")) + if(var.num != var.num.orig) var.num <- var.num.orig # ripristinate original values of this script (necessary after loading regime data) + if(fields.name != fields.name.orig) fields.name <- fields.name.orig # ripristinate original values of this script + if(work.dir != work.dir.orig) work.dir <- work.dir.orig # ripristinate original values of this script + + # load impact data only if it is available, if not it will leave an empty space in the composition: + if(file.exists(paste0(rean.dir,"/",rean.name,"_",my.period[p],"_",var.name[var.num],".RData"))){ + impact.data <- TRUE + print("Impact data available for reanalysis") + load(file=paste0(rean.dir,"/",rean.name,"_",my.period[p],"_",var.name[var.num],".RData")) + } else { + impact.data <- FALSE + print("Impact data for reanalysis not available") + } + } + + if(fields.name == forecast.name && n.map < 7){ + load(file=paste0(work.dir,"/",forecast.name,"_",my.period[p],"_leadmonth_",lead.month,"_","psl",".RData")) # load cluster time series and mean psl data + + if(file.exists(paste0(work.dir,"/",forecast.name,"_",my.period[p],"_leadmonth_",lead.month,"_",var.name[var.num],".RData"))){ + impact.data <- TRUE + print("Impact data available for forecasts") + if((composition == "simple" || composition == "impact")) load(file=paste0(work.dir,"/",forecast.name,"_",my.period[p],"_leadmonth_",lead.month,"_",var.name[var.num],".RData")) # load mean var data for impact maps + } else { + impact.data <- FALSE + print("Impact data for forecasts not available") + } + + if(var.num != var.num.orig) var.num <- var.num.orig # ripristinate original values of this script (necessary after loading regime data) + if(fields.name != fields.name.orig) fields.name <- fields.name.orig # ripristinate original values of this script + + } + + if(var.num != var.num.orig) var.num <- var.num.orig # ripristinate original values of this script (necessary after loading regime data) + if(fields.name != fields.name.orig) fields.name <- fields.name.orig # ripristinate original values of this script + if(work.dir != work.dir.orig) work.dir <- work.dir.orig # ripristinate original values of this script + if(p != p.orig) p <- p.orig + + if(fields.name == forecast.name && n.map < 7){ + + # compute the simulated interannual monthly variability: + n.days.month <- dim(my.cluster.array[[p]])[1] # number of days in the forecasted month + + freq.sim1 <- apply(my.cluster.array[[p]] == 1, c(2,3), sum) + freq.sim2 <- apply(my.cluster.array[[p]] == 2, c(2,3), sum) + freq.sim3 <- apply(my.cluster.array[[p]] == 3, c(2,3), sum) + freq.sim4 <- apply(my.cluster.array[[p]] == 4, c(2,3), sum) + + sd.freq.sim1 <- sd(freq.sim1) / n.days.month + sd.freq.sim2 <- sd(freq.sim2) / n.days.month + sd.freq.sim3 <- sd(freq.sim3) / n.days.month + sd.freq.sim4 <- sd(freq.sim4) / n.days.month + + # compute the transition probabilities: + j1 <- j2 <- j3 <- j4 <- 1 + transition1 <- transition2 <- transition3 <- transition4 <- c() + + n.members <- dim(my.cluster.array[[p]])[3] + + for(m in 1:n.members){ + for(y in 1:n.years){ + for (d in 1:(n.days.month-1)){ + + if (my.cluster.array[[p]][d,y,m] == 1 && (my.cluster.array[[p]][d + 1,y,m] != 1)){ + transition1[j1] <- my.cluster.array[[p]][d + 1,y,m] + j1 <- j1 + 1 + } + if (my.cluster.array[[p]][d,y,m] == 2 && (my.cluster.array[[p]][d + 1,y,m] != 2)){ + transition2[j2] <- my.cluster.array[[p]][d + 1,y,m] + j2 <- j2 + 1 + } + if (my.cluster.array[[p]][d,y,m] == 3 && (my.cluster.array[[p]][d + 1,y,m] != 3)){ + transition3[j3] <- my.cluster.array[[p]][d + 1,y,m] + j3 <- j3 +1 + } + if (my.cluster.array[[p]][d,y,m] == 4 && (my.cluster.array[[p]][d + 1,y,m] != 4)){ + transition4[j4] <- my.cluster.array[[p]][d + 1,y,m] + j4 <- j4 +1 + } + + } + } + } + + trans.sim <- matrix(NA,4,4 , dimnames= list(c("cluster1.sim", "cluster2.sim", "cluster3.sim", "cluster4.sim"),c("cluster1.sim","cluster2.sim","cluster3.sim","cluster4.sim"))) + trans.sim[1,2] <- length(which(transition1 == 2)) + trans.sim[1,3] <- length(which(transition1 == 3)) + trans.sim[1,4] <- length(which(transition1 == 4)) + + trans.sim[2,1] <- length(which(transition2 == 1)) + trans.sim[2,3] <- length(which(transition2 == 3)) + trans.sim[2,4] <- length(which(transition2 == 4)) + + trans.sim[3,1] <- length(which(transition3 == 1)) + trans.sim[3,2] <- length(which(transition3 == 2)) + trans.sim[3,4] <- length(which(transition3 == 4)) + + trans.sim[4,1] <- length(which(transition4 == 1)) + trans.sim[4,2] <- length(which(transition4 == 2)) + trans.sim[4,3] <- length(which(transition4 == 3)) + + trans.tot <- apply(trans.sim,1,sum,na.rm=T) + for(i in 1:4) trans.sim[i,] <- trans.sim[i,]/trans.tot[i] + + + # compute cluster persistence: + my.cluster.vector <- as.vector(my.cluster.array[[p]]) # reduce it to a vector with the order: day1month1member1 , day2month1member1, ... , day1month2member1, day2month2member1, ,,, + + sequ1 <- sequ2 <- sequ3 <- sequ4 <- rep(0,10000) + i1 <- i2 <- i3 <- i4 <- 1 + + if(my.cluster.vector[1] != 1) i1 <- 0 + if(my.cluster.vector[1] == 1) sequ1[i1] <- sequ1[i1]+1 + if(my.cluster.vector[1] != 2) i2 <- 0 + if(my.cluster.vector[1] == 2) sequ2[i2] <- sequ2[i2]+1 + if(my.cluster.vector[1] != 3) i3 <- 0 + if(my.cluster.vector[1] == 3) sequ3[i3] <- sequ3[i3]+1 + if(my.cluster.vector[1] != 4) i4 <- 0 + if(my.cluster.vector[1] == 4) sequ4[i4] <- sequ4[i4]+1 + n.dd <- length(my.cluster.vector) + + for(d in 1:(n.dd-1)){ + if(my.cluster.vector[d] != 1 && my.cluster.vector[d+1] == 1) i1 <- i1+1 + if(my.cluster.vector[d] == 1) sequ1[i1] <- sequ1[i1]+1 + + if(my.cluster.vector[d] != 2 && my.cluster.vector[d+1] == 2) i2 <- i2+1 + if(my.cluster.vector[d] == 2) sequ2[i2] <- sequ2[i2]+1 + + if(my.cluster.vector[d] != 3 && my.cluster.vector[d+1] == 3) i3 <- i3+1 + if(my.cluster.vector[d] == 3) sequ3[i3] <- sequ3[i3]+1 + + if(my.cluster.vector[d] != 4 && my.cluster.vector[d+1] == 4) i4 <- i4+1 + if(my.cluster.vector[d] == 4) sequ4[i4] <- sequ4[i4]+1 + } + + if(my.cluster.vector[n.dd] == 1) sequ1[i1] <- sequ1[i1]+1 + if(my.cluster.vector[n.dd] == 2) sequ2[i2] <- sequ2[i2]+1 + if(my.cluster.vector[n.dd] == 3) sequ3[i3] <- sequ3[i3]+1 + if(my.cluster.vector[n.dd] == 4) sequ4[i4] <- sequ4[i4]+1 + + pers1 <- mean(sequ1[which(sequ1 != 0)]) + pers2 <- mean(sequ2[which(sequ2 != 0)]) + pers3 <- mean(sequ3[which(sequ3 != 0)]) + pers4 <- mean(sequ4[which(sequ4 != 0)]) + + # compute correlations between simulated clusters and observed regime's anomalies: + cluster1.sim <- pslwr1mean; cluster2.sim <- pslwr2mean; cluster3.sim <- pslwr3mean; cluster4.sim <- pslwr4mean + + if(composition == 'impact' && impact.data == TRUE) { + cluster1.sim.imp <- varwr1mean; cluster2.sim.imp <- varwr2mean; cluster3.sim.imp <- varwr3mean; cluster4.sim.imp <- varwr4mean + #cluster1.sim.imp.pv <- pvalue1; cluster2.sim.imp.pv <- pvalue2; cluster3.sim.imp.pv <- pvalue3; cluster4.sim.imp.pv <- pvalue4 + } + + lat.forecast <- lat; lon.forecast <- lon + + if((p + lead.month) > 12) { load.month <- p + lead.month - 12 } else { load.month <- p + lead.month } # reanalysis forecast month to load + load(file=paste0(rean.dir,"/",rean.name,"_",my.period[load.month],"_","psl",".RData")) # load reanalysis data, which overwrities the pslwrXmean variables + load(paste0(rean.dir,"/",rean.name,"_", my.period[load.month],"_","ClusterNames",".RData")) # load also reanalysis regime names + + # load reanalysis varwrXmean impact data: + if(composition == 'impact' && impact.data == TRUE) load(file=paste0(rean.dir,"/",rean.name,"_",my.period[load.month],"_",var.name[var.num],".RData")) + + if(var.num != var.num.orig) var.num <- var.num.orig # ripristinate original values of this script + if(fields.name != fields.name.orig) fields.name <- fields.name.orig # ripristinate original values of this script + if(!identical(period.orig,period)) period <- period.orig + if(work.dir != work.dir.orig) work.dir <- work.dir.orig # ripristinate original values of this script + if(p.orig != p) p <- p.orig + + # compute the observed transition probabilities: + n.days.month <- n.days.in.a.period(load.month,2001) + j1o <- j2o <- j3o <- j4o <- 1; transition.obs1 <- transition.obs2 <- transition.obs3 <- transition.obs4 <- c() + + for (d in 1:(length(my.cluster$cluster)-1)){ + if (my.cluster$cluster[d] == 1 && (my.cluster$cluster[d + 1] != my.cluster$cluster[d])){ + transition.obs1[j1o] <- my.cluster$cluster[d + 1] + j1o <- j1o + 1 + } + if (my.cluster$cluster[d] == 2 && (my.cluster$cluster[d + 1] != my.cluster$cluster[d])){ + transition.obs2[j2o] <- my.cluster$cluster[d + 1] + j2o <- j2o + 1 + } + if (my.cluster$cluster[d] == 3 && (my.cluster$cluster[d + 1] != my.cluster$cluster[d])){ + transition.obs3[j3o] <- my.cluster$cluster[d + 1] + j3o <- j3o + 1 + } + if (my.cluster$cluster[d] == 4 && (my.cluster$cluster[d + 1] != my.cluster$cluster[d])){ + transition.obs4[j4o] <- my.cluster$cluster[d + 1] + j4o <- j4o +1 + } + + } + + trans.obs <- matrix(NA,4,4 , dimnames= list(c("cluster1.obs", "cluster2.obs", "cluster3.obs", "cluster4.obs"),c("cluster1.obs","cluster2.obs","cluster3.obs","cluster4.obs"))) + trans.obs[1,2] <- length(which(transition.obs1 == 2)) + trans.obs[1,3] <- length(which(transition.obs1 == 3)) + trans.obs[1,4] <- length(which(transition.obs1 == 4)) + + trans.obs[2,1] <- length(which(transition.obs2 == 1)) + trans.obs[2,3] <- length(which(transition.obs2 == 3)) + trans.obs[2,4] <- length(which(transition.obs2 == 4)) + + trans.obs[3,1] <- length(which(transition.obs3 == 1)) + trans.obs[3,2] <- length(which(transition.obs3 == 2)) + trans.obs[3,4] <- length(which(transition.obs3 == 4)) + + trans.obs[4,1] <- length(which(transition.obs4 == 1)) + trans.obs[4,2] <- length(which(transition.obs4 == 2)) + trans.obs[4,3] <- length(which(transition.obs4 == 3)) + + trans.tot.obs <- apply(trans.obs,1,sum,na.rm=T) + for(i in 1:4) trans.obs[i,] <- trans.obs[i,]/trans.tot.obs[i] + + # compute the observed interannual monthly variability: + freq.obs1 <- freq.obs2 <- freq.obs3 <- freq.obs4 <- c() + + for(y in year.start:year.end){ + # for both reanalysis and S4, the time order is always: year1day1, year1day2, ..., year1day31, year2day1, year2day2, ..., year2day28, etc, + freq.obs1[y-year.start+1] <- length(which(my.cluster$cluster[(y-year.start)*n.days.month + (1:n.days.month)] == 1)) + freq.obs2[y-year.start+1] <- length(which(my.cluster$cluster[(y-year.start)*n.days.month + (1:n.days.month)] == 2)) + freq.obs3[y-year.start+1] <- length(which(my.cluster$cluster[(y-year.start)*n.days.month + (1:n.days.month)] == 3)) + freq.obs4[y-year.start+1] <- length(which(my.cluster$cluster[(y-year.start)*n.days.month + (1:n.days.month)] == 4)) + } + + sd.freq.obs1 <- sd(freq.obs1) / n.days.month + sd.freq.obs2 <- sd(freq.obs2) / n.days.month + sd.freq.obs3 <- sd(freq.obs3) / n.days.month + sd.freq.obs4 <- sd(freq.obs4) / n.days.month + + wr1y.obs <- wr1y; wr2y.obs <- wr2y; wr3y.obs <- wr3y; wr4y.obs <- wr4y # observed frecuencies of the regimes + + regimes.obs <- c(cluster1.name, cluster2.name, cluster3.name, cluster4.name) + + if(length(unique(regimes.obs)) < 4) stop("Two or more names of the weather types in the reanalsyis input file are repeated!") + + if(identical(rev(lat),lat.forecast)) {lat.forecast <- rev(lat.forecast); print("Warning: forecast latitudes are in the opposite order than renalysis's")} + if(!identical(lat, lat.forecast)) stop("forecast latitudes are different from reanalysis latitudes!") + if(!identical(lon, lon.forecast)) stop("forecast longitude are different from reanalysis longitudes!") + + cluster.corr <- array(NA, c(4,4), dimnames=list(c("cluster1.sim","cluster2.sim","cluster3.sim","cluster4.sim"), regimes.obs)) + cluster.corr[1,1] <- cor(as.vector(cluster1.sim), as.vector(pslwr1mean)) + cluster.corr[1,2] <- cor(as.vector(cluster1.sim), as.vector(pslwr2mean)) + cluster.corr[1,3] <- cor(as.vector(cluster1.sim), as.vector(pslwr3mean)) + cluster.corr[1,4] <- cor(as.vector(cluster1.sim), as.vector(pslwr4mean)) + cluster.corr[2,1] <- cor(as.vector(cluster2.sim), as.vector(pslwr1mean)) + cluster.corr[2,2] <- cor(as.vector(cluster2.sim), as.vector(pslwr2mean)) + cluster.corr[2,3] <- cor(as.vector(cluster2.sim), as.vector(pslwr3mean)) + cluster.corr[2,4] <- cor(as.vector(cluster2.sim), as.vector(pslwr4mean)) + cluster.corr[3,1] <- cor(as.vector(cluster3.sim), as.vector(pslwr1mean)) + cluster.corr[3,2] <- cor(as.vector(cluster3.sim), as.vector(pslwr2mean)) + cluster.corr[3,3] <- cor(as.vector(cluster3.sim), as.vector(pslwr3mean)) + cluster.corr[3,4] <- cor(as.vector(cluster3.sim), as.vector(pslwr4mean)) + cluster.corr[4,1] <- cor(as.vector(cluster4.sim), as.vector(pslwr1mean)) + cluster.corr[4,2] <- cor(as.vector(cluster4.sim), as.vector(pslwr2mean)) + cluster.corr[4,3] <- cor(as.vector(cluster4.sim), as.vector(pslwr3mean)) + cluster.corr[4,4] <- cor(as.vector(cluster4.sim), as.vector(pslwr4mean)) + + # associate each simulated cluster to one observed regime: + max1 <- which(cluster.corr == max(cluster.corr), arr.ind=T) + cluster.corr2 <- cluster.corr + cluster.corr2[max1[1],] <- -999 # set this row to negative values so it won't be found as a maximum anymore + cluster.corr2[,max1[2]] <- -999 # set this column to negative values so it won't be found as a maximum anymore + max2 <- which(cluster.corr2 == max(cluster.corr2), arr.ind=T) + cluster.corr3 <- cluster.corr2 + cluster.corr3[max2[1],] <- -999 + cluster.corr3[,max2[2]] <- -999 + max3 <- which(cluster.corr3 == max(cluster.corr3), arr.ind=T) + cluster.corr4 <- cluster.corr3 + cluster.corr4[max3[1],] <- -999 + cluster.corr4[,max3[2]] <- -999 + max4 <- which(cluster.corr4 == max(cluster.corr4), arr.ind=T) + + seq.max1 <- c(max1[1],max2[1],max3[1],max4[1]) + seq.max2 <- c(max1[2],max2[2],max3[2],max4[2]) + + cluster1.regime <- seq.max2[which(seq.max1 == 1)] + cluster2.regime <- seq.max2[which(seq.max1 == 2)] + cluster3.regime <- seq.max2[which(seq.max1 == 3)] + cluster4.regime <- seq.max2[which(seq.max1 == 4)] + + cluster1.name <- regimes.obs[cluster1.regime] + cluster2.name <- regimes.obs[cluster2.regime] + cluster3.name <- regimes.obs[cluster3.regime] + cluster4.name <- regimes.obs[cluster4.regime] + + regimes.sim <- c(cluster1.name, cluster2.name, cluster3.name, cluster4.name) + cluster.corr2 <- array(cluster.corr, c(4,4), dimnames=list(regimes.sim, regimes.obs)) + + spat.cor1 <- cluster.corr[1,seq.max2[which(seq.max1 == 1)]] + spat.cor2 <- cluster.corr[2,seq.max2[which(seq.max1 == 2)]] + spat.cor3 <- cluster.corr[3,seq.max2[which(seq.max1 == 3)]] + spat.cor4 <- cluster.corr[4,seq.max2[which(seq.max1 == 4)]] + + # measure persistence for the reanalysis dataset: + my.cluster.vector <- my.cluster[[1]] + + sequ1 <- sequ2 <- sequ3 <- sequ4 <- rep(0,10000) + i1 <- i2 <- i3 <- i4 <- 1 + + if(my.cluster.vector[1] != 1) i1 <- 0 + if(my.cluster.vector[1] == 1) sequ1[i1] <- sequ1[i1]+1 + if(my.cluster.vector[1] != 2) i2 <- 0 + if(my.cluster.vector[1] == 2) sequ2[i2] <- sequ2[i2]+1 + if(my.cluster.vector[1] != 3) i3 <- 0 + if(my.cluster.vector[1] == 3) sequ3[i3] <- sequ3[i3]+1 + if(my.cluster.vector[1] != 4) i4 <- 0 + if(my.cluster.vector[1] == 4) sequ4[i4] <- sequ4[i4]+1 + + n.dd <- length(my.cluster.vector) + for(d in 1:(n.dd-1)){ + if(my.cluster.vector[d] != 1 && my.cluster.vector[d+1] == 1) i1 <- i1+1 + if(my.cluster.vector[d] == 1) sequ1[i1] <- sequ1[i1]+1 + + if(my.cluster.vector[d] != 2 && my.cluster.vector[d+1] == 2) i2 <- i2+1 + if(my.cluster.vector[d] == 2) sequ2[i2] <- sequ2[i2]+1 + + if(my.cluster.vector[d] != 3 && my.cluster.vector[d+1] == 3) i3 <- i3+1 + if(my.cluster.vector[d] == 3) sequ3[i3] <- sequ3[i3]+1 + + if(my.cluster.vector[d] != 4 && my.cluster.vector[d+1] == 4) i4 <- i4+1 + if(my.cluster.vector[d] == 4) sequ4[i4] <- sequ4[i4]+1 + } + + if(my.cluster.vector[n.dd] == 1) sequ1[i1] <- sequ1[i1]+1 + if(my.cluster.vector[n.dd] == 2) sequ2[i2] <- sequ2[i2]+1 + if(my.cluster.vector[n.dd] == 3) sequ3[i3] <- sequ3[i3]+1 + if(my.cluster.vector[n.dd] == 4) sequ4[i4] <- sequ4[i4]+1 + + persObs1 <- mean(sequ1[which(sequ1 != 0)]) + persObs2 <- mean(sequ2[which(sequ2 != 0)]) + persObs3 <- mean(sequ3[which(sequ3 != 0)]) + persObs4 <- mean(sequ4[which(sequ4 != 0)]) + + ### insert here all the manual corrections to the regime assignation due to misasignment in the automatic selection: + ### forecast month: September + #if(p == 9 && lead.month == 0) { cluster1.name <- "Blocking"; cluster2.name <- "Atl.Ridge"; cluster3.name <- "NAO-"; cluster4.name <- "NAO+"} + #if(p == 8 && lead.month == 1) { cluster1.name <- "NAO+"; cluster2.name <- "NAO-"; cluster3.name <- "Blocking"; cluster4.name <- "Atl.Ridge"} + #if(p == 6 && lead.month == 3) { cluster1.name <- "Atl.Ridge"; cluster2.name <- "NAO+"} + #if(p == 4 && lead.month == 5) { cluster1.name <- "Blocking"; cluster2.name <- "NAO+"; cluster3.name <- "Atl.Ridge"; cluster4.name <- "NAO-"} + + if(p == 9 && lead.month == 0) { cluster1.name <- "Blocking"; cluster2.name <- "Atl.Ridge"; cluster3.name <- "NAO-"; cluster4.name <- "NAO+" } + if(p == 8 && lead.month == 1) { cluster1.name <- "NAO+"; cluster2.name <- "NAO-"; cluster3.name <- "Blocking"; cluster4.name <- "Atl.Ridge" } + if(p == 7 && lead.month == 2) { cluster1.name <- "Atl.Ridge"; cluster2.name <- "Blocking"; cluster3.name <- "NAO-"; cluster4.name <- "NAO+" } + if(p == 6 && lead.month == 3) { cluster1.name <- "Atl.Ridge"; cluster2.name <- "NAO-"; cluster3.name <- "NAO+"; cluster4.name <- "Blocking" } + if(p == 5 && lead.month == 4) { cluster1.name <- "Atl.Ridge"; cluster2.name <- "NAO+"; cluster3.name <- "NAO-"; cluster4.name <- "Blocking" } + if(p == 4 && lead.month == 5) { cluster1.name <- "Blocking"; cluster2.name <- "NAO+"; cluster3.name <- "Atl.Ridge"; cluster4.name <- "NAO-" } + if(p == 3 && lead.month == 6) { cluster2.name <- "NAO-"; cluster3.name <- "Atl.Ridge"} # cluster1.name <- "Blocking"; cluster4.name <- "NAO+" + + # regimes.sim # for the debug + + ### forecast month: October + #if(p == 4 && lead.month == 6) { cluster1.name <- "Atl.Ridge"; cluster2.name <- "Blocking"; cluster3.name <- "NAO+"; cluster4.name <- "NAO-"} + #if(p == 5 && lead.month == 5) { cluster1.name <- "NAO+"; cluster2.name <- "Blocking"; cluster3.name <- "NAO-"; cluster4.name <- "Atl.Ridge"} + #if(p == 7 && lead.month == 3) { cluster1.name <- "NAO-"; cluster2.name <- "Atl.Ridge"; cluster3.name <- "Blocking"; cluster4.name <- "NAO+"} + #if(p == 8 && lead.month == 2) { cluster1.name <- "Blocking"; cluster2.name <- "NAO-"; cluster3.name <- "Atl.Ridge"; cluster4.name <- "NAO+"} + #if(p == 9 && lead.month == 1) { cluster1.name <- "NAO-"; cluster2.name <- "Blocking"; cluster3.name <- "Atl.Ridge"; cluster4.name <- "NAO+"} + + if(p == 10 && lead.month == 0) { cluster1.name <- "Blocking"; cluster2.name <- "NAO+"; cluster3.name <- "Atl.Ridge"; cluster4.name <- "NAO-"} + if(p == 9 && lead.month == 1) { cluster1.name <- "NAO-"; cluster2.name <- "Blocking"; cluster3.name <- "Atl.Ridge"; cluster4.name <- "NAO+"} + if(p == 8 && lead.month == 2) { cluster1.name <- "Blocking"; cluster2.name <- "NAO-"; cluster3.name <- "Atl.Ridge"; cluster4.name <- "NAO+"} + if(p == 7 && lead.month == 3) { cluster1.name <- "NAO-"; cluster2.name <- "Atl.Ridge"; cluster3.name <- "Blocking"; cluster4.name <- "NAO+"} + if(p == 6 && lead.month == 4) { cluster1.name <- "NAO+"; cluster2.name <- "Blocking"; cluster3.name <- "NAO-"; cluster4.name <- "Atl.Ridge"} + + ### forecast month: November + #if(p == 6 && lead.month == 5) { cluster2.name <- "Atl.Ridge"; cluster3.name <- "NAO+"; cluster4.name <- "Blocking"} + #if(p == 7 && lead.month == 4) { cluster1.name <- "NAO+"; cluster2.name <- "Blocking"; cluster4.name <- "Atl.Ridge"} + #if(p == 8 && lead.month == 3) { cluster2.name <- "Blocking"; cluster4.name <- "Atl.Ridge"} + #if(p == 9 && lead.month == 2) { cluster2.name <- "Atl.Ridge"; cluster3.name <- "NAO+"; cluster4.name <- "Blocking"} + #if(p == 10 && lead.month == 1) { cluster2.name <- "Blocking"; cluster4.name <- "Atl.Ridge"} + + regimes.sim2 <- c(cluster1.name, cluster2.name, cluster3.name, cluster4.name) # Simulated regimes after the manual correction + #regimes.obs <- c(cluster1.name, cluster2.name, cluster3.name, cluster4.name) # already defined above, included only as reference + + order.sim <- match(regimes.sim2, orden) + order.obs <- match(regimes.obs, orden) + + clusters.sim <- list(cluster1.sim, cluster2.sim, cluster3.sim, cluster4.sim) + clusters.obs <- list(pslwr1mean, pslwr2mean, pslwr3mean, pslwr4mean) + + # recompute the spatial correlations, because they might have changed because of the manual corrections above: + spat.cor1 <- cor(as.vector(clusters.sim[[which(order.sim == 1)]]), as.vector(clusters.obs[[which(order.obs == 1)]])) + spat.cor2 <- cor(as.vector(clusters.sim[[which(order.sim == 2)]]), as.vector(clusters.obs[[which(order.obs == 2)]])) + spat.cor3 <- cor(as.vector(clusters.sim[[which(order.sim == 3)]]), as.vector(clusters.obs[[which(order.obs == 3)]])) + spat.cor4 <- cor(as.vector(clusters.sim[[which(order.sim == 4)]]), as.vector(clusters.obs[[which(order.obs == 4)]])) + + if(composition == 'impact' && impact.data == TRUE) { + clusters.sim.imp <- list(cluster1.sim.imp, cluster2.sim.imp, cluster3.sim.imp, cluster4.sim.imp) + #clusters.sim.imp.pv <- list(cluster1.sim.imp.pv, cluster2.sim.imp.pv, cluster3.sim.imp.pv, cluster4.sim.imp.pv) + + clusters.obs.imp <- list(varwr1mean, varwr2mean, varwr3mean, varwr4mean) + #clusters.obs.imp.pv <- list(pvalue1, pvalue2, pvalue3, pvalue4) + + cor.imp1 <- cor(as.vector(clusters.sim.imp[[which(order.sim == 1)]]), as.vector(clusters.obs.imp[[which(order.obs == 1)]])) + cor.imp2 <- cor(as.vector(clusters.sim.imp[[which(order.sim == 2)]]), as.vector(clusters.obs.imp[[which(order.obs == 2)]])) + cor.imp3 <- cor(as.vector(clusters.sim.imp[[which(order.sim == 3)]]), as.vector(clusters.obs.imp[[which(order.obs == 3)]])) + cor.imp4 <- cor(as.vector(clusters.sim.imp[[which(order.sim == 4)]]), as.vector(clusters.obs.imp[[which(order.obs == 4)]])) + + } + + load(file=paste0(work.dir,"/",fields.name,"_",my.period[p],"_leadmonth_",lead.month,"_","psl",".RData")) # reload forecast cluster time series and mean psl data + load(file=paste0(work.dir,"/",fields.name,"_",my.period[p],"_leadmonth_",lead.month,"_",var.name[var.num],".RData")) # reload mean var data for impact maps + + if(var.num != var.num.orig) var.num <- var.num.orig # ripristinate original values of this script + if(fields.name != fields.name.orig) fields.name <- fields.name.orig # ripristinate original values of this script + if(!identical(period.orig, period)) period <- period.orig + if(p.orig != p) p <- p.orig + if(identical(rev(lat),lat.forecast)) lat <- rev(lat) # ripristinate right lat order + if(work.dir != work.dir.orig) work.dir <- work.dir.orig # ripristinate original values of this script + + } # close for on 'forecast.name' + + if(fields.name == rean.name || (fields.name == forecast.name && n.map == 7)){ + if(save.names){ + save(cluster1.name, cluster2.name, cluster3.name, cluster4.name, file=paste0(rean.dir,"/",fields.name,"_", my.period[p],"_","ClusterNames",".RData")) + + } else { # in this case, we load the cluster names from the file already saved, if regimes come from a reanalysis: + load(paste0(rean.dir,"/",rean.name,"_", my.period[p],"_","ClusterNames",".RData")) + + if(var.num != var.num.orig) var.num <- var.num.orig # ripristinate original values of this script + if(fields.name != fields.name.orig) fields.name <- fields.name.orig # ripristinate original values of this script + } + } + + # assign to each cluster its regime: + #if(ordering == FALSE && (fields.name == rean.name || (fields.name == forecast.name && n.map == 7))){ + if(ordering == FALSE){ + cluster1 <- 1; cluster2 <- 2; cluster3 <- 3; cluster4 <- 4 + } else { + cluster1 <- which(orden == cluster1.name) + cluster2 <- which(orden == cluster2.name) + cluster3 <- which(orden == cluster3.name) + cluster4 <- which(orden == cluster4.name) + } + + assign(paste0("map",cluster1), pslwr1mean) + assign(paste0("map",cluster2), pslwr2mean) + assign(paste0("map",cluster3), pslwr3mean) + assign(paste0("map",cluster4), pslwr4mean) + + assign(paste0("fre",cluster1), wr1y) + assign(paste0("fre",cluster2), wr2y) + assign(paste0("fre",cluster3), wr3y) + assign(paste0("fre",cluster4), wr4y) + + #assign(paste0("withinss",cluster1), my.cluster[[p]]$withinss[1]/my.cluster[[p]]$size[1]) + #assign(paste0("withinss",cluster2), my.cluster[[p]]$withinss[2]/my.cluster[[p]]$size[2]) + #assign(paste0("withinss",cluster3), my.cluster[[p]]$withinss[3]/my.cluster[[p]]$size[3]) + #assign(paste0("withinss",cluster4), my.cluster[[p]]$withinss[4]/my.cluster[[p]]$size[4]) + + if(impact.data == TRUE && (composition == "simple" || composition == "single.impact" || composition == 'impact')){ + assign(paste0("imp",cluster1), varwr1mean) + assign(paste0("imp",cluster2), varwr2mean) + assign(paste0("imp",cluster3), varwr3mean) + assign(paste0("imp",cluster4), varwr4mean) + + assign(paste0("sig",cluster1), pvalue1) + assign(paste0("sig",cluster2), pvalue2) + assign(paste0("sig",cluster3), pvalue3) + assign(paste0("sig",cluster4), pvalue4) + } + + if(fields.name == forecast.name && n.map < 7){ + assign(paste0("freMax",cluster1), wr1yMax) + assign(paste0("freMax",cluster2), wr2yMax) + assign(paste0("freMax",cluster3), wr3yMax) + assign(paste0("freMax",cluster4), wr4yMax) + + assign(paste0("freMin",cluster1), wr1yMin) + assign(paste0("freMin",cluster2), wr2yMin) + assign(paste0("freMin",cluster3), wr3yMin) + assign(paste0("freMin",cluster4), wr4yMin) + + #assign(paste0("spatial.cor",cluster1), spat.cor1) + #assign(paste0("spatial.cor",cluster2), spat.cor2) + #assign(paste0("spatial.cor",cluster3), spat.cor3) + #assign(paste0("spatial.cor",cluster4), spat.cor4) + + assign(paste0("persist",cluster1), pers1) + assign(paste0("persist",cluster2), pers2) + assign(paste0("persist",cluster3), pers3) + assign(paste0("persist",cluster4), pers4) + + clusters.all <- c(cluster1, cluster2, cluster3, cluster4) + ordered.sequence <- c(which(clusters.all == 1), which(clusters.all == 2), which(clusters.all == 3), which(clusters.all == 4)) + + assign(paste0("transSim",cluster1), unname(trans.sim[1,ordered.sequence])) + assign(paste0("transSim",cluster2), unname(trans.sim[2,ordered.sequence])) + assign(paste0("transSim",cluster3), unname(trans.sim[3,ordered.sequence])) + assign(paste0("transSim",cluster4), unname(trans.sim[4,ordered.sequence])) + + cluster1.obs <- which(orden == regimes.obs[1]) + cluster2.obs <- which(orden == regimes.obs[2]) + cluster3.obs <- which(orden == regimes.obs[3]) + cluster4.obs <- which(orden == regimes.obs[4]) + + clusters.all.obs <- c(cluster1.obs, cluster2.obs, cluster3.obs, cluster4.obs) + ordered.sequence.obs <- c(which(clusters.all.obs == 1), which(clusters.all.obs == 2), which(clusters.all.obs == 3), which(clusters.all.obs == 4)) + + assign(paste0("freObs",cluster1.obs), wr1y.obs) + assign(paste0("freObs",cluster2.obs), wr2y.obs) + assign(paste0("freObs",cluster3.obs), wr3y.obs) + assign(paste0("freObs",cluster4.obs), wr4y.obs) + + assign(paste0("persistObs",cluster1.obs), persObs1) + assign(paste0("persistObs",cluster2.obs), persObs2) + assign(paste0("persistObs",cluster3.obs), persObs3) + assign(paste0("persistObs",cluster4.obs), persObs4) + + assign(paste0("transObs",cluster1.obs), unname(trans.obs[1,ordered.sequence.obs])) + assign(paste0("transObs",cluster2.obs), unname(trans.obs[2,ordered.sequence.obs])) + assign(paste0("transObs",cluster3.obs), unname(trans.obs[3,ordered.sequence.obs])) + assign(paste0("transObs",cluster4.obs), unname(trans.obs[4,ordered.sequence.obs])) + + } + + # position of long values of Europe only (without the Atlantic Sea and America): + #if(fields.name == "ERA-Interim") EU <- c(1:which(lon >= lon.max)[1], (length(lon)-30):length(lon)) # restrict area to continental europe only + EU <- c(1:which(lon >= lon.max)[1], (which(lon >= 337)[1]:length(lon))) # restrict area to continental europe only + + # breaks and colors of the geopotential fields: + if(psl == "g500"){ + #my.brks <- c(-300,-199:200,300) # % Mean anomaly of a WR + my.brks <- c(-300,seq(-200,200,10),300) # % Mean anomaly of a WR + my.brks2 <- c(-300,seq(-200,200,10),300) # % Mean anomaly of a WR for the contour lines + #my.cols <- colorRampPalette((brewer.pal(9,"PuBu")))(length(my.brks)-1) + values.to.plot <- c(-200, -100, 0, 100, 200) # values we want to show in the labels + } + + if(psl == "psl"){ + my.brks <- c(seq(-19,-1,2),0,seq(1,19,2)) # % Mean anomaly of a WR + # my.brks <- c(seq(-19,-1,2),0,seq(1,19,2)) # % Mean anomaly of a WR + + my.brks2 <- my.brks #c(seq(-20,20,2)) # % Mean anomaly of a WR for the contour lines + values.to.plot <- my.brks #c(-17,-15,-13,-11,-9,-7,-5,-3,-1,0,1,3,5,7,9,11,13,15,17) + } + + my.cols <- colorRampPalette(rev(brewer.pal(11,"RdBu")))(length(my.brks)-1) # blue--white--red colors + my.cols[floor(length(my.cols)/2)] <- my.cols[floor(length(my.cols)/2)+1] <- "white" + + # breaks and colors of the impact maps (valid both for Wind speed anomalies and Temperature anomalies): + #my.brks.var <- c(-20,seq(-10,-3,1),seq(-2.9,2.9,0.1),seq(3,10,1),20) # % Mean anomaly of a WR + #my.brks.var <- c(-20,-2,-1.5,-1,-0.5,-0.2,0.2,0.5,1,1.5,2,20) # % Mean anomaly of a WR + #my.brks.var <- c(-20,seq(-3,3,0.5),20) # % Mean anomaly of a WR + if(var.name[var.num] == "sfcWind") { + my.brks.var <- seq(-3,3,0.5) + values.to.plot2 <- c(-3,-2,-1,0,1,2,3) + } + if(var.name[var.num] == "tas") { + my.brks.var <- seq(-5,5,1) + values.to.plot2 <- my.brks.var #c(-5,-3,-1,0,1,3,5) + } + + #my.cols <- colorRampPalette(as.character(read.csv("/home/Earth/vtorralb/rgbhex.csv",header=F)[,1]))(length(my.brks)-1) # blue--yellow-red colors + my.cols.var <- colorRampPalette(rev(brewer.pal(11,"RdBu")))(length(my.brks.var)-1) # blue--white--red colors + #my.cols.var[floor(length(my.cols.var)/2)] <- my.cols.var[floor(length(my.cols.var)/2)+1] <- "white" + + freq.max=100 + if(fields.name == forecast.name){ + pos.years.present <- match(year.start:year.end, years) + years.missing <- which(is.na(pos.years.present)) + fre1.NA <- fre2.NA <- fre3.NA <- fre4.NA <- freMax1.NA <- freMax2.NA <- freMax3.NA <- freMax4.NA <- freMin1.NA <- freMin2.NA <- freMin3.NA <- freMin4.NA <- c() + k <- 0 + for(y in year.start:year.end) { + if(y %in% (years.missing + year.start - 1)) { + fre1.NA <- c(fre1.NA,NA); fre2.NA <- c(fre2.NA,NA); fre3.NA <- c(fre3.NA,NA); fre4.NA <- c(fre4.NA,NA) + freMax1.NA <- c(freMax1.NA,NA); freMax2.NA <- c(freMax2.NA,NA); freMax3.NA <- c(freMax3.NA,NA); freMax4.NA <- c(freMax4.NA,NA) + freMin1.NA <- c(freMin1.NA,NA); freMin2.NA <- c(freMin2.NA,NA); freMin3.NA <- c(freMin3.NA,NA); freMin4.NA <- c(freMin4.NA,NA) + k=k+1 + } else { + fre1.NA <- c(fre1.NA,fre1[y - year.start + 1 - k]); fre2.NA <- c(fre2.NA,fre2[y - year.start + 1 - k]) + fre3.NA <- c(fre3.NA,fre3[y - year.start + 1 - k]); fre4.NA <- c(fre4.NA,fre4[y - year.start + 1 - k]) + freMax1.NA <- c(freMax1.NA,freMax1[y - year.start + 1 - k]); freMax2.NA <- c(freMax2.NA,freMax2[y - year.start + 1 - k]) + freMax3.NA <- c(freMax3.NA,freMax3[y - year.start + 1 - k]); freMax4.NA <- c(freMax4.NA,freMax4[y - year.start + 1 - k]) + freMin1.NA <- c(freMin1.NA,freMin1[y - year.start + 1 - k]); freMin2.NA <- c(freMin2.NA,freMin2[y - year.start + 1 - k]) + freMin3.NA <- c(freMin3.NA,freMin3[y - year.start + 1 - k]); freMin4.NA <- c(freMin4.NA,freMin4[y - year.start + 1 - k]) + } + } + + sp.cor1 <- round(cor(fre1.NA,freObs1, use="complete.obs"),2) + sp.cor2 <- round(cor(fre2.NA,freObs2, use="complete.obs"),2) + sp.cor3 <- round(cor(fre3.NA,freObs3, use="complete.obs"),2) + sp.cor4 <- round(cor(fre4.NA,freObs4, use="complete.obs"),2) + + } else { + fre1.NA <- fre1; fre2.NA <- fre2; fre3.NA <- fre3; fre4.NA <- fre4 + } + + + # Visualize the composition with the 3 graphs for all the 4 regimes: its pressure fields, its impact on var and its frequency series: + if(composition == "simple"){ + if(!as.pdf && fields.name == rean.name) png(filename=paste0(rean.dir,"/",fields.name,"_",my.period[p],"_",var.name[var.num],".png"),width=3000,height=3700) + if(!as.pdf && fields.name == forecast.name) png(filename=paste0(work.dir,"/",fields.name,"_",my.period[p],"_leadmonth_",lead.month,"_",var.name[var.num],".png"),width=3000,height=3700) + + plot.new() + + # Sheet title: + par(fig=c(0, 1, 0.94, 0.98), new=TRUE) + if(fields.name == forecast.name) {forecast.title <- paste0(" startdate and ",lead.month," forecast month ")} else {forecast.title <- ""} + mtext(paste0(fields.name, " Weather Regimes for ", my.period[p], forecast.title," (",year.start,"-",year.end,")"), cex=5.5, font=2) + + # Centroid maps: + map.xpos <- 0 + map.width <- 0.46 + par(fig=c(map.xpos + 0.003, map.xpos + map.width - 0.003, 0.745, 0.925), new=TRUE) + PlotEquiMap2(rescale(map1,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map1), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, xlabel.dist=1.5, contours.lty="F1FF1F") + par(fig=c(map.xpos, map.xpos + map.width, 0.51, 0.69), new=TRUE) + PlotEquiMap2(rescale(map2,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map2), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F") + par(fig=c(map.xpos, map.xpos + map.width, 0.275, 0.455), new=TRUE) + PlotEquiMap2(rescale(map3,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map3), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F") + par(fig=c(map.xpos, map.xpos + map.width, 0.04, 0.22), new=TRUE) + PlotEquiMap2(rescale(map4,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map4), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F") + + ## # for the debug: + ## obs <- read.table("/scratch/Earth/ncortesi/RESILIENCE/Regimes/NAO_series.txt") + ## mes <- p + ## fre.obs <- obs$V3[seq(mes,12*35,12)] # get the ovserved freuency of the NAO + ## #plot(1981:2015,fre.obs,type="l") + ## #lines(1981:2015,fre1,type="l",col="red") + ## #lines(1981:2015,fre2,type="l",col="blue") + ## #lines(1981:2015,fre4,type="l",col="green") + ## cor1 <- cor(fre.obs, fre1) + ## cor2 <- cor(fre.obs, fre2) + ## cor3 <- cor(fre.obs, fre3) + ## cor4 <- cor(fre.obs, fre4) + ## round(c(cor1,cor2,cor3,cor4),2) + + # Legend centroid maps: + legend1.xpos <- 0.10 + legend1.width <- 0.30 + legend1.cex <- 2 + my.subset <- match(values.to.plot, my.brks) + par(fig=c(legend1.xpos, legend1.xpos + legend1.width, 0.719, 0.744), new=TRUE) # syntax fig=c(xmin, xmax, ymin, ymax) + ColorBar3(my.brks, cols=my.cols, vert=FALSE, cex=legend1.cex, subset=my.subset) + if(psl=="g500") {mtext(side=4," m", cex=2.5)} else {mtext(side=4," hPa", cex=legend1.cex)} + par(fig=c(legend1.xpos, legend1.xpos + legend1.width, 0.485, 0.508), new=TRUE) + ColorBar3(my.brks, cols=my.cols, vert=FALSE, cex=legend1.cex, subset=my.subset) + if(psl=="g500") {mtext(side=4," m", cex=2.5)} else {mtext(side=4," hPa", cex=legend1.cex)} + par(fig=c(legend1.xpos, legend1.xpos + legend1.width, 0.250, 0.273), new=TRUE) + ColorBar3(my.brks, cols=my.cols, vert=FALSE, cex=legend1.cex, subset=my.subset) + if(psl=="g500") {mtext(side=4," m", cex=2.5)} else {mtext(side=4," hPa", cex=legend1.cex)} + par(fig=c(legend1.xpos, legend1.xpos + legend1.width, 0.015, 0.038), new=TRUE) + ColorBar3(my.brks, cols=my.cols, vert=FALSE, cex=legend1.cex, subset=my.subset) + if(psl=="g500") {mtext(side=4," m", cex=2.5)} else {mtext(side=4," hPa", cex=legend1.cex)} + + # Subtitle centroid maps: + map.title.xpos <- 0.76 + map.title.width <- 0.2 + par(fig=c(map.title.xpos, map.title.xpos + map.title.width, 0.728, 0.729), new=TRUE) + mtext("year", cex=3) + par(fig=c(map.title.xpos, map.title.xpos + map.title.width, 0.494, 0.495), new=TRUE) + mtext("year", cex=3) + par(fig=c(map.title.xpos, map.title.xpos + map.title.width, 0.260, 0.261), new=TRUE) + mtext("year", cex=3) + par(fig=c(map.title.xpos, map.title.xpos + map.title.width, 0.026, 0.027), new=TRUE) + mtext("year", cex=3) + + # Title Centroid Maps: + title1.xpos <- 0.02 + title1.width <- 0.4 + par(fig=c(title1.xpos, title1.xpos + title1.width, 0.9305, 0.9355), new=TRUE) + mtext(paste0(regime1.name,": ", psl.name, " Anomaly"), font=2, cex=4) + par(fig=c(title1.xpos, title1.xpos + title1.width, 0.6955, 0.7005), new=TRUE) + mtext(paste0(regime2.name,": ", psl.name, " Anomaly"), font=2, cex=4) + par(fig=c(title1.xpos, title1.xpos + title1.width, 0.4605, 0.4655), new=TRUE) + mtext(paste0(regime3.name,": ", psl.name, " Anomaly"), font=2, cex=4) + par(fig=c(title1.xpos, title1.xpos + title1.width, 0.2255, 0.2305), new=TRUE) + mtext(paste0(regime4.name,": ", psl.name, " Anomaly"), font=2, cex=4) + + # Impact maps: + if(impact.data == TRUE ){ # it won't enter here never because we are already inside an if con composition="simple" + impact.xpos <- 0.47 + impact.width <- 0.26 + par(fig=c(impact.xpos, impact.xpos + impact.width, 0.745, 0.925), new=TRUE) + PlotEquiMap2(rescale(imp1[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=t(sig1[,EU] < 0.05), cex.lab=1.5) + par(fig=c(impact.xpos, impact.xpos + impact.width, 0.51, 0.69), new=TRUE) + PlotEquiMap2(rescale(imp2[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=t(sig2[,EU] < 0.05), cex.lab=1.5) + par(fig=c(impact.xpos, impact.xpos + impact.width, 0.275, 0.455), new=TRUE) + PlotEquiMap2(rescale(imp3[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=t(sig3[,EU] < 0.05), cex.lab=1.5) + par(fig=c(impact.xpos, impact.xpos + impact.width, 0.04, 0.22), new=TRUE) + PlotEquiMap2(rescale(imp4[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=t(sig4[,EU] < 0.05), cex.lab=1.5) + + # Title impact maps: + title2.xpos <- 0.42 + title2.width <- 0.35 + par(fig=c(title2.xpos, title2.xpos + title2.width, 0.935, 0.940), new=TRUE) + mtext(paste0(regime1.name, ": Impact on ", var.name.full[var.num]), font=2, cex=4) + par(fig=c(title2.xpos, title2.xpos + title2.width, 0.700, 0.705), new=TRUE) + mtext(paste0(regime2.name, ": Impact on ", var.name.full[var.num]), font=2, cex=4) + par(fig=c(title2.xpos, title2.xpos + title2.width, 0.465, 0.470), new=TRUE) + mtext(paste0(regime3.name, ": Impact on ", var.name.full[var.num]), font=2, cex=4) + par(fig=c(title2.xpos, title2.xpos + title2.width, 0.230, 0.235), new=TRUE) + mtext(paste0(regime4.name, ": Impact on ", var.name.full[var.num]), font=2, cex=4) + + # Legend: + legend2.xpos <- 0.48 + legend2.width <- 0.25 + legend2.cex <- 1.8 + + my.subset2 <- match(values.to.plot2, my.brks.var) + par(fig=c(legend2.xpos, legend2.xpos + legend2.width, 0.719 + 0.001, 0.744), new=TRUE) + ColorBar3(my.brks.var, cols=my.cols.var, vert=FALSE, cex=legend2.cex, subset=my.subset2) + mtext(side=4,paste0(" ",var.unit[var.num]), cex=legend2.cex) + par(fig=c(legend2.xpos, legend2.xpos + legend2.width, 0.485, 0.508), new=TRUE) + ColorBar3(my.brks.var, cols=my.cols.var, vert=FALSE, cex=legend2.cex, subset=my.subset2) + mtext(side=4,paste0(" ",var.unit[var.num]), cex=legend2.cex) + par(fig=c(legend2.xpos, legend2.xpos + legend2.width, 0.250, 0.273), new=TRUE) + ColorBar3(my.brks.var, cols=my.cols.var, vert=FALSE, cex=legend2.cex, subset=my.subset2) + mtext(side=4,paste0(" ",var.unit[var.num]), cex=legend2.cex) + par(fig=c(legend2.xpos, legend2.xpos + legend2.width, 0.015, 0.038), new=TRUE) + ColorBar3(my.brks.var, cols=my.cols.var, vert=FALSE, cex=legend2.cex, subset=my.subset2) + mtext(side=4,paste0(" ",var.unit[var.num]), cex=legend2.cex) + + } + + # Frequency plots: + barplot.xpos <- 0.75 + barplot.width <- 0.25 + + par(fig=c(barplot.xpos, barplot.xpos + barplot.width, 0.745, 0.915), new=TRUE) + if(fields.name == rean.name) barplot.freq(100*fre1.NA, year.start, year.end, cex.y=2, cex.x=2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0)) + if(fields.name == forecast.name) barplot.freq.sim2(100*fre1.NA, 100*freMax1.NA, 100*freMin1.NA, 100*freObs1, year.start, year.end, cex.y=2, cex.x=2, freq.min=-2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0), col.line="gray20", cex.r=3, cex.mean=2, cex.obs=2) + + par(fig=c(barplot.xpos, barplot.xpos + barplot.width,0.510,0.680), new=TRUE) + if(fields.name == rean.name) barplot.freq(100*fre2.NA, year.start, year.end, cex.y=2, cex.x=2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0)) + if(fields.name == forecast.name) barplot.freq.sim2(100*fre2.NA, 100*freMax2.NA, 100*freMin2.NA, 100*freObs2, year.start, year.end, cex.y=2, cex.x=2, freq.min=-2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0), col.line="gray20", cex.r=3, cex.mean=2, cex.obs=2) + + par(fig=c(barplot.xpos, barplot.xpos + barplot.width,0.275,0.445), new=TRUE) + if(fields.name == rean.name) barplot.freq(100*fre3.NA, year.start, year.end, cex.y=2, cex.x=2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0)) + if(fields.name == forecast.name) barplot.freq.sim2(100*fre3.NA, 100*freMax3.NA, 100*freMin3.NA, 100*freObs3, year.start, year.end, cex.y=2, cex.x=2, freq.min=-2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0), col.line="gray20", cex.r=3, cex.mean=2, cex.obs=2) + + par(fig=c(barplot.xpos, barplot.xpos + barplot.width,0.040,0.210), new=TRUE) + if(fields.name == rean.name) barplot.freq(100*fre4.NA, year.start, year.end, cex.y=2, cex.x=2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0)) + if(fields.name == forecast.name) barplot.freq.sim2(100*fre4.NA, 100*freMax4.NA, 100*freMin4.NA, 100*freObs4, year.start, year.end, cex.y=2, cex.x=2, freq.min=-2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0), col.line="gray20", cex.r=3, cex.mean=2, cex.obs=2) + + # Title Frequency maps: + title3.xpos <- 0.75 + title3.width <-0.25 + par(fig=c(title3.xpos, title3.xpos + title3.width, 0.935, 0.940), new=TRUE) + mtext(paste0(regime1.name, " Frequency"), font=2, cex=4) # paste0 doesn't work inside an expression! + par(fig=c(title3.xpos, title3.xpos + title3.width, 0.700, 0.705), new=TRUE) + mtext(paste0(regime2.name, " Frequency"), font=2, cex=4) + par(fig=c(title3.xpos, title3.xpos + title3.width, 0.465, 0.470), new=TRUE) + mtext(paste0(regime3.name, " Frequency"), font=2, cex=4) + par(fig=c(title3.xpos, title3.xpos + title3.width, 0.230, 0.235), new=TRUE) + mtext(paste0(regime4.name, " Frequency"), font=2, cex=4) + + # % on Frequency plots: + symbol.xpos <- 0.735 + symbol.width <- 0.01 + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.82, 0.83), new=TRUE) + mtext("%", cex=3.3) + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.59, 0.60), new=TRUE) + mtext("%", cex=3.3) + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.35, 0.36), new=TRUE) + mtext("%", cex=3.3) + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.12, 0.13), new=TRUE) + mtext("%", cex=3.3) + + # mean frequency on Frequency plots: + if(mean.freq == TRUE){ + symbol.xpos <- 0.9 + symbol.width <- 0.1 + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.908, 0.918), new=TRUE) + mtext(bquote(bar(nu) == .(paste0(round(mean(100*fre1.NA),1),"%"))),cex=4.5) + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.673, 0.683), new=TRUE) + mtext(bquote(bar(nu) == .(paste0(round(mean(100*fre2.NA),1),"%"))),cex=4.5) + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.44, 0.45), new=TRUE) + mtext(bquote(bar(nu) == .(paste0(round(mean(100*fre3.NA),1),"%"))),cex=4.5) + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.203, 0.213), new=TRUE) + mtext(bquote(bar(nu) == .(paste0(round(mean(100*fre4.NA),1),"%"))),cex=4.5) + } + + # add corr between obs.time series and ensemble mean time series on Frequency plots: + if(fields.name == forecast.name){ + symbol.xpos <- 0.76 + symbol.width <- 0.1 + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.908, 0.918), new=TRUE) + sp.cor1 <- round(cor(fre1.NA,freObs1, use="complete.obs"),2) + mtext(paste0("r= ",sp.cor1), cex=4.5) + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.673, 0.683), new=TRUE) + sp.cor2 <- round(cor(fre2.NA,freObs2, use="complete.obs"),2) + mtext(paste0("r= ",sp.cor2), cex=4.5) + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.44, 0.45), new=TRUE) + sp.cor3 <- round(cor(fre3.NA,freObs3, use="complete.obs"),2) + mtext(paste0("r= ",sp.cor3), cex=4.5) + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.203, 0.213), new=TRUE) + sp.cor4 <- round(cor(fre4.NA,freObs4, use="complete.obs"),2) + mtext(paste0("r= ",sp.cor4), cex=4.5) + } + + if(!as.pdf) dev.off() # for saving 4 png + + } # close if on: composition == 'simple' + + + # Visualize the composition with the regime anomalies for a selected forecasted month: + if(composition == "psl"){ + # Centroid maps: + n.map <- n.map + 1 # n.maps starts from 0 + map.width <- 0.115 + map.xpos <- 0.06 + map.width * (n.map - 1) + map.ypos <- 0.08 + map.height <- 0.19 + map.ysep <- 0.015 + + par(fig=c(map.xpos, map.xpos + map.width, map.ypos + 3*map.height + 3*map.ysep, map.ypos + 4*map.height + 3*map.ysep), new=TRUE) + PlotEquiMap2(rescale(map1,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map1), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, xlabel.dist=1.5, contours.lty="F1FF1F") + par(fig=c(map.xpos, map.xpos + map.width, map.ypos + 2*map.height + 2*map.ysep, map.ypos + 3*map.height + 2*map.ysep), new=TRUE) + PlotEquiMap2(rescale(map2,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map2), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F") + par(fig=c(map.xpos, map.xpos + map.width, map.ypos + map.height + map.ysep, map.ypos + 2*map.height + map.ysep), new=TRUE) + PlotEquiMap2(rescale(map3,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map3), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F") + par(fig=c(map.xpos, map.xpos + map.width, map.ypos, map.ypos + map.height), new=TRUE) + PlotEquiMap2(rescale(map4,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map4), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F") + + # Sheet subtitles: + par(fig=c(map.xpos, map.xpos + map.width, 0.86, 0.90), new=TRUE) + if(n.map < 8) { + mtext(paste0(my.month.short[p], " --> ", my.month.short[forecast.month]), cex=5.5, font=2) + } else{ + mtext(paste0(rean.name," ", my.month.short[forecast.month]), cex=5.5, font=2) + } + + } # close if on composition == 'psl' + + # Visualize the composition if the impact maps for a selected forecasted month: + if(composition == "impact"){ + # Centroid maps: + n.map <- n.map + 1 # n.maps starts from 0 + map.width <- 0.115 + map.xpos <- 0.06 + map.width * (n.map - 1) + map.ypos <- 0.08 + map.height <- 0.19 + map.ysep <- 0.015 + + par(fig=c(map.xpos, map.xpos + map.width, map.ypos + 3*map.height + 3*map.ysep, map.ypos + 4*map.height + 3*map.ysep), new=TRUE) + #PlotEquiMap2(rescale(imp1[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=t(sig1[,EU] < 0.05), cex.lab=1.5) + PlotEquiMap2(rescale(imp1[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5) + + par(fig=c(map.xpos, map.xpos + map.width, map.ypos + 2*map.height + 2*map.ysep, map.ypos + 3*map.height + 2*map.ysep), new=TRUE) + #PlotEquiMap2(rescale(imp2[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=t(sig2[,EU] < 0.05), cex.lab=1.5) + PlotEquiMap2(rescale(imp2[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5) + + par(fig=c(map.xpos, map.xpos + map.width, map.ypos + map.height + map.ysep, map.ypos + 2*map.height + map.ysep), new=TRUE) + #PlotEquiMap2(rescale(imp3[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=t(sig3[,EU] < 0.05), cex.lab=1.5) + PlotEquiMap2(rescale(imp3[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5) + + par(fig=c(map.xpos, map.xpos + map.width, map.ypos, map.ypos + map.height), new=TRUE) + #PlotEquiMap2(rescale(imp4[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=t(sig4[,EU] < 0.05), cex.lab=1.5) + PlotEquiMap2(rescale(imp4[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5) + + + # Sheet subtitles: + par(fig=c(map.xpos, map.xpos + map.width, 0.86, 0.90), new=TRUE) + if(n.map < 8) { + mtext(paste0(my.month.short[p], " --> ", my.month.short[forecast.month]), cex=5.5, font=2) + } else{ + mtext(paste0(rean.name," ", my.month.short[forecast.month]), cex=5.5, font=2) + } + + } # close if on composition == 'impact' + + # Visualize the composition if the impact maps for a selected forecasted month: + if(composition == "single.impact"){ + png(filename=paste0(rean.dir,"/",fields.name,"_",my.period[p],"_",var.name[var.num],"_impact_NAO+.png"),width=3000,height=3700) + PlotEquiMap2(rescale(imp1[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=t(sig1[,EU] < 0.05), cex.lab=1.5) + dev.off() + + png(filename=paste0(rean.dir,"/",fields.name,"_",my.period[p],"_",var.name[var.num],"_impact_NAO-.png"),width=3000,height=3700) + PlotEquiMap2(rescale(imp2[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=t(sig2[,EU] < 0.05), cex.lab=1.5) + dev.off() + + png(filename=paste0(rean.dir,"/",fields.name,"_",my.period[p],"_",var.name[var.num],"_impact_Blocking.png"),width=3000,height=3700) + PlotEquiMap2(rescale(imp3[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=t(sig3[,EU] < 0.05), cex.lab=1.5) + dev.off() + + png(filename=paste0(rean.dir,"/",fields.name,"_",my.period[p],"_",var.name[var.num],"_impact_Atl_Ridge.png"),width=3000,height=3700) + PlotEquiMap2(rescale(imp4[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=t(sig4[,EU] < 0.05), cex.lab=1.5) + dev.off() + } + + # Visualize the composition if the impact maps for a selected forecasted month: + if(composition == "single.psl"){ + png(filename=paste0(rean.dir,"/",fields.name,"_",my.period[p],"_",var.name[var.num],"_pattern_cluster1.png"),width=2000,height=1400, res=300) + PlotEquiMap2(rescale(map1,my.brks[1],tail(my.brks,1)), lon, lat,filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1, contours=t(map1), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=0.5, xlabel.dist=0, contours.lty="F1FF1F", toptitle=paste0(my.period[p]," WithinSS=", round(withinss1/1000000,2))) + dev.off() + + png(filename=paste0(rean.dir,"/",fields.name,"_",my.period[p],"_",var.name[var.num],"_pattern_cluster2.png"),width=2000,height=1400, res=300) + PlotEquiMap2(rescale(map2,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1, contours=t(map2), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=0.5, xlabel.dist=0, contours.lty="F1FF1F", toptitle=paste0(my.period[p]," WithinSS=", round(withinss2/1000000,2))) + dev.off() + + png(filename=paste0(rean.dir,"/",fields.name,"_",my.period[p],"_",var.name[var.num],"_pattern_cluster3.png"),width=2000,height=1400, res=300) + PlotEquiMap2(rescale(map3,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1, contours=t(map3), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=0.5, xlabel.dist=0, contours.lty="F1FF1F", toptitle=paste0(my.period[p]," WithinSS=", round(withinss3/1000000,2))) + dev.off() + + png(filename=paste0(rean.dir,"/",fields.name,"_",my.period[p],"_",var.name[var.num],"_pattern_cluster4.png"),width=2000,height=1400, res=300) + PlotEquiMap2(rescale(map4,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1, contours=t(map4), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=0.5, xlabel.dist=0, contours.lty="F1FF1F", toptitle=paste0(my.period[p]," WithinSS=", round(withinss4/1000000,2))) + dev.off() + + # Legend centroid maps: + #my.subset <- match(values.to.plot, my.brks) + #ColorBar3(my.brks, cols=my.cols, vert=FALSE, cex=legend1.cex, subset=my.subset) + #mtext(side=4," hPa", cex=legend1.cex) + + } + + # Visualize the composition with the interannual frequencies for a selected forecasted month: + if(composition == "fre"){ + # Centroid maps: + n.map <- n.map + 1 # n.maps starts from 0 + map.width <- 0.115 + map.xpos <- 0.06 + map.width * (n.map - 1) + map.ypos <- 0.08 + map.height <- 0.19 + map.ysep <- 0.015 + + par(fig=c(map.xpos, map.xpos + map.width, map.ypos + 3*map.height + 3*map.ysep, map.ypos + 4*map.height + 3*map.ysep), new=TRUE) + if(n.map < 8) barplot.freq.sim2(100*fre1.NA, 100*freMax1.NA, 100*freMin1.NA, 100*freObs1, year.start, year.end, cex.y=2, cex.x=2, freq.min=-2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0), col.line="gray20", cex.r=3, cex.mean=2, cex.obs=2) + if(n.map == 8) barplot.freq(100*fre1.NA, year.start, year.end, cex.y=2, cex.x=2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0)) + + par(fig=c(map.xpos, map.xpos + map.width, map.ypos + 2*map.height + 2*map.ysep, map.ypos + 3*map.height + 2*map.ysep), new=TRUE) + if(n.map < 8) if(fields.name == forecast.name) barplot.freq.sim2(100*fre2.NA, 100*freMax2.NA, 100*freMin2.NA, 100*freObs2, year.start, year.end, cex.y=2, cex.x=2, freq.min=-2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0), col.line="gray20", cex.r=3, cex.mean=2, cex.obs=2) + if(n.map == 8) barplot.freq(100*fre2.NA, year.start, year.end, cex.y=2, cex.x=2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0)) + + par(fig=c(map.xpos, map.xpos + map.width, map.ypos + map.height + map.ysep, map.ypos + 2*map.height + map.ysep), new=TRUE) + if(n.map < 8) if(fields.name == forecast.name) barplot.freq.sim2(100*fre3.NA, 100*freMax3.NA, 100*freMin3.NA, 100*freObs3, year.start, year.end, cex.y=2, cex.x=2, freq.min=-2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0), col.line="gray20", cex.r=3, cex.mean=2, cex.obs=2) + if(n.map == 8) barplot.freq(100*fre3.NA, year.start, year.end, cex.y=2, cex.x=2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0)) + + par(fig=c(map.xpos, map.xpos + map.width, map.ypos, map.ypos + map.height), new=TRUE) + if(n.map < 8) if(fields.name == forecast.name) barplot.freq.sim2(100*fre4.NA, 100*freMax4.NA, 100*freMin4.NA, 100*freObs4, year.start, year.end, cex.y=2, cex.x=2, freq.min=-2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0), col.line="gray20", cex.r=3, cex.mean=2, cex.obs=2) + if(n.map == 8) barplot.freq(100*fre4.NA, year.start, year.end, cex.y=2, cex.x=2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0)) + + # Sheet subtitles: + par(fig=c(map.xpos, map.xpos + map.width, 0.86, 0.90), new=TRUE) + if(n.map < 8) { + mtext(paste0(my.month.short[p], " --> ", my.month.short[forecast.month]), cex=5.5, font=2) + } else{ + mtext(paste0(rean.name," ", my.month.short[forecast.month]), cex=5.5, font=2) + } + + # % on Frequency plots: + par(fig=c(map.xpos - 0.006, map.xpos, map.ypos + 3.9*map.height + 3*map.ysep, map.ypos + 4*map.height + 3*map.ysep), new=TRUE) + mtext("%", cex=3.3) + par(fig=c(map.xpos - 0.006, map.xpos, map.ypos + 2.9*map.height + 2*map.ysep, map.ypos + 3*map.height + 2*map.ysep), new=TRUE) + mtext("%", cex=3.3) + par(fig=c(map.xpos - 0.006, map.xpos, map.ypos + 1.9*map.height + 1*map.ysep, map.ypos + 2*map.height + 1*map.ysep), new=TRUE) + mtext("%", cex=3.3) + par(fig=c(map.xpos - 0.006, map.xpos, map.ypos + 0.9*map.height + 0*map.ysep, map.ypos + 1*map.height + 0*map.ysep), new=TRUE) + mtext("%", cex=3.3) + + # mean frequency on Frequency plots: + par(fig=c(map.xpos + 0.02, map.xpos + 0.04, map.ypos + 3*map.height + 3*map.ysep, map.ypos + 3.1*map.height + 3*map.ysep), new=TRUE) + mtext(bquote(bar(nu) == .(paste0(round(mean(100*fre1.NA),1),"%"))),cex=3.5) + par(fig=c(map.xpos + 0.02, map.xpos + 0.04, map.ypos + 2*map.height + 2*map.ysep, map.ypos + 2.1*map.height + 2*map.ysep), new=TRUE) + mtext(bquote(bar(nu) == .(paste0(round(mean(100*fre2.NA),1),"%"))),cex=3.5) + par(fig=c(map.xpos + 0.02, map.xpos + 0.04, map.ypos + 1*map.height + 1*map.ysep, map.ypos + 1.1*map.height + 1*map.ysep), new=TRUE) + mtext(bquote(bar(nu) == .(paste0(round(mean(100*fre3.NA),1),"%"))),cex=3.5) + par(fig=c(map.xpos + 0.02, map.xpos + 0.04, map.ypos + 0*map.height + 0*map.ysep, map.ypos + 0.1*map.height + 0*map.ysep), new=TRUE) + mtext(bquote(bar(nu) == .(paste0(round(mean(100*fre4.NA),1),"%"))),cex=3.5) + + # add corr between obs.time series and ensemble mean time series on Frequency plots: + if(n.map < 8){ + par(fig=c(map.xpos + map.width - 0.04, map.xpos + map.width - 0.02, map.ypos + 3*map.height + 3*map.ysep, map.ypos + 3.1*map.height + 3*map.ysep), new=TRUE) + mtext(paste0("r= ",sp.cor1), cex=3.5) + par(fig=c(map.xpos + map.width - 0.04, map.xpos + map.width - 0.02, map.ypos + 2*map.height + 2*map.ysep, map.ypos + 2.1*map.height + 2*map.ysep), new=TRUE) + mtext(paste0("r= ",sp.cor2), cex=3.5) + par(fig=c(map.xpos + map.width - 0.04, map.xpos + map.width - 0.02, map.ypos + 1*map.height + 1*map.ysep, map.ypos + 1.1*map.height + 1*map.ysep), new=TRUE) + mtext(paste0("r= ",sp.cor3), cex=3.5) + par(fig=c(map.xpos + map.width - 0.04, map.xpos + map.width - 0.02, map.ypos + 0*map.height + 0*map.ysep, map.ypos + 0.1*map.height + 0*map.ysep), new=TRUE) + mtext(paste0("r= ",sp.cor4), cex=3.5) + } + } # close if on composition == 'fre' + + # save indicators for each startdate and forecast time: + # (map1, map2, map3, map4, fre1, fre2, etc. already refer to the regimes in the same order as listed in the 'orden' vector) + if(fields.name == forecast.name) { + if (composition != "impact") { + save(orden, map1, map2, map3, map4, fre1.NA, fre2.NA, fre3.NA, fre4.NA, freObs1, freObs2, freObs3, freObs4, sp.cor1, sp.cor2, sp.cor3, sp.cor4, persist1, persist2, persist3, persist4, persistObs1, persistObs2, persistObs3, persistObs4, spat.cor1, spat.cor2, spat.cor3, spat.cor4, sd.freq.sim1, sd.freq.sim2, sd.freq.sim3, sd.freq.sim4, sd.freq.obs1, sd.freq.obs2, sd.freq.obs3, sd.freq.obs4, transSim1, transSim2, transSim3, transSim4, transObs1, transObs2, transObs3, transObs4, file=paste0(work.dir,"/",fields.name,"_", my.period[p],"_leadmonth_",lead.month,"_","ClusterNames",".RData")) + } else { # also save impX objects + save(orden, map1, map2, map3, map4, fre1.NA, fre2.NA, fre3.NA, fre4.NA, freObs1, freObs2, freObs3, freObs4, sp.cor1, sp.cor2, sp.cor3, sp.cor4, persist1, persist2, persist3, persist4, persistObs1, persistObs2, persistObs3, persistObs4, spat.cor1, spat.cor2, spat.cor3, spat.cor4, sd.freq.sim1, sd.freq.sim2, sd.freq.sim3, sd.freq.sim4, sd.freq.obs1, sd.freq.obs2, sd.freq.obs3, sd.freq.obs4, transSim1, transSim2, transSim3, transSim4, transObs1, transObs2, transObs3, transObs4, imp1, imp2, imp3, imp4,cor.imp1,cor.imp2,cor.imp3,cor.imp4, file=paste0(work.dir,"/",fields.name,"_", my.period[p],"_leadmonth_",lead.month,"_","ClusterNames",".RData")) + } + } + + } # close 'p' on 'period' + + if(as.pdf) dev.off() # for saving a single pdf with all months/seasons + + } # close 'lead.month' on 'lead.months' + + if(composition == "psl" || composition == "fre" || composition == "impact"){ + # Regime's names: + title1.xpos <- 0.01 + title1.width <- 0.04 + par(fig=c(title1.xpos, title1.xpos + title1.width, 0.7905, 0.7955), new=TRUE) + mtext(paste0(regime1.name), font=2, cex=5) + par(fig=c(title1.xpos, title1.xpos + title1.width, 0.5855, 0.5905), new=TRUE) + mtext(paste0(regime2.name), font=2, cex=5) + par(fig=c(title1.xpos, title1.xpos + title1.width, 0.3805, 0.3955), new=TRUE) + mtext(paste0(regime3.name), font=2, cex=5) + par(fig=c(title1.xpos, title1.xpos + title1.width, 0.1755, 0.1805), new=TRUE) + mtext(paste0(regime4.name), font=2, cex=5) + + if(composition == "psl") { + # Color legend: + legend1.xpos <- 0.10 + legend1.width <- 0.80 + legend1.cex <- 4 + + par(fig=c(legend1.xpos, legend1.xpos + legend1.width, 0, 0.065), new=TRUE) # syntax fig=c(xmin, xmax, ymin, ymax) + ColorBar2(brks=my.brks, cols=my.cols, vert=FALSE, cex=legend1.cex, label.dist=2, my.ticks=-0.5 + 1:length(my.brks), my.labels=my.brks) + par(fig=c(legend1.xpos + legend1.width - 0.02, legend1.xpos + legend1.width + 0.05, 0, 0.02), new=TRUE) # syntax fig=c(xmin, xmax, ymin, ymax) + mtext("hPa", cex=legend1.cex*1.3) + } + + if(composition == "impact") { + # Color legend: + legend1.xpos <- 0.10 + legend1.width <- 0.80 + legend1.cex <- 4 + + #my.subset2 <- match(values.to.plot2, my.brks.var) + par(fig=c(legend1.xpos, legend1.xpos + legend1.width, 0, 0.065), new=TRUE) + ColorBar2(brks=my.brks.var, cols=my.cols.var, vert=FALSE, cex=legend1.cex, label.dist=2, my.ticks=-0.5 + 1:length(my.brks.var), my.labels=my.brks.var) + par(fig=c(legend1.xpos + legend1.width - 0.02, legend1.xpos + legend1.width + 0.05, 0, 0.02), new=TRUE) # syntax fig=c(xmin, xmax, ymin, ymax) + #ColorBar2(brks=my.brks.var, cols=my.cols.var, vert=FALSE, cex=legend2.cex, subset=my.subset2) + mtext(var.unit[var.num], cex=legend1.cex*1.3) + + } + + + dev.off() + } # close if on composition + + print("Finished!") +} # close if on composition != "summary" + +} + +if(composition == "taylor"){ + library("plotrix") + + # choose a reference season from 13 (winter) to 16 (Autumn): + season.ref <- 13 + + # set the first WR classification: + rean.dir <- "/scratch/Earth/ncortesi/RESILIENCE/Regimes/41_ERA-Interim_monthly_1981-2015_LOESS_filter" + + # load data of the reference season of the first classification (this line should be modified to load the chosen season): + #load("/scratch/Earth/ncortesi/RESILIENCE/Regimes/18) as 12) but with no lat correction/ERA-Interim_Winter_psl.RData") # load pslwrXmean data + #load("/scratch/Earth/ncortesi/RESILIENCE/Regimes/18) as 12) but with no lat correction/ERA-Interim_Winter_ClusterNames.RData") # load clusterX.name.period + load("/scratch/Earth/ncortesi/RESILIENCE/Regimes/46_as_12_but_with_no_lat_correction_and_with_LOESS/ERA-Interim_Winter_psl.RData") # load pslwrXmean data + load("/scratch/Earth/ncortesi/RESILIENCE/Regimes/46_as_12_but_with_no_lat_correction_and_with_LOESS/ERA-Interim_Winter_ClusterNames.RData") # load clusterX.name.period + + if(var.num != var.num.orig) var.num <- var.num.orig # ripristinate original values of this script (necessary after loading regime data) + if(fields.name != fields.name.orig) fields.name <- fields.name.orig # ripristinate original values of this script + + cluster1.ref <- which(orden == cluster1.name) + cluster2.ref <- which(orden == cluster2.name) + cluster3.ref <- which(orden == cluster3.name) + cluster4.ref <- which(orden == cluster4.name) + + #cluster1.ref <- cluster1.name.period[13] + #cluster2.ref <- cluster2.name.period[13] + #cluster3.ref <- cluster3.name.period[13] + #cluster4.ref <- cluster4.name.period[13] + + cluster1.ref <- 3 # bypass the previous values because for dataset num.46 the blocking and atlantic regimes were saved swapped! + cluster2.ref <- 2 + cluster3.ref <- 1 + cluster4.ref <- 4 + + assign(paste0("map.ref",cluster1.ref), pslwr1mean) # NAO+ + assign(paste0("map.ref",cluster2.ref), pslwr2mean) # NAO- + assign(paste0("map.ref",cluster3.ref), pslwr3mean) # Blocking + assign(paste0("map.ref",cluster4.ref), pslwr4mean) # Atl.ridge + + # set a second WR classification (i.e: with running cluster) to contrast with the new one: + #rean2.dir <- "/scratch/Earth/ncortesi/RESILIENCE/Regimes/41_ERA-Interim_monthly_1981-2015_LOESS_filter" + rean2.dir <- "/scratch/Earth/ncortesi/RESILIENCE/Regimes/44_as_43_but_with_no_lat_correction" + + # load data of the reference season of the second classification (this line should be modified to load the chosen season): + load("/scratch/Earth/ncortesi/RESILIENCE/Regimes/46_as_12_but_with_no_lat_correction_and_with_LOESS/ERA-Interim_Winter_psl.RData") # load pslwrXmean data + load("/scratch/Earth/ncortesi/RESILIENCE/Regimes/46_as_12_but_with_no_lat_correction_and_with_LOESS/ERA-Interim_Winter_ClusterNames.RData") # load clusterX.name.period + + if(var.num != var.num.orig) var.num <- var.num.orig # ripristinate original values of this script (necessary after loading regime data) + if(fields.name != fields.name.orig) fields.name <- fields.name.orig # ripristinate original values of this script + + #cluster1.name.ref <- cluster1.name.period[season.ref] + #cluster2.name.ref <- cluster2.name.period[season.ref] + #cluster3.name.ref <- cluster3.name.period[season.ref] + #cluster4.name.ref <- cluster4.name.period[season.ref] + + cluster1.ref <- which(orden == cluster1.name) + cluster2.ref <- which(orden == cluster2.name) + cluster3.ref <- which(orden == cluster3.name) + cluster4.ref <- which(orden == cluster4.name) + + cluster1.ref <- 3 # bypass the previous values because for dataset num.46 the blocking and atlantic regimes were saved swapped! + cluster2.ref <- 2 + cluster3.ref <- 1 + cluster4.ref <- 4 + + assign(paste0("map.old.ref",cluster1.ref), pslwr1mean) # NAO+ + assign(paste0("map.old.ref",cluster2.ref), pslwr2mean) # NAO- + assign(paste0("map.old.ref",cluster3.ref), pslwr3mean) # Blocking + assign(paste0("map.old.ref",cluster4.ref), pslwr4mean) # Atl.ridge + + + # breaks and colors of the geopotential fields: + if(psl == "g500"){ + my.brks <- c(-300,seq(-200,200,10),300) # % Mean anomaly of a WR + my.brks2 <- c(-300,seq(-200,200,10),300) # % Mean anomaly of a WR for the contour lines + values.to.plot <- c(-200, -100, 0, 100, 200) # values we want to show in the labels + } + + if(psl == "psl"){ + my.brks <- c(seq(-19,-1,2),0,seq(1,19,2)) # % Mean anomaly of a WR + my.brks2 <- my.brks #c(seq(-20,20,2)) # % Mean anomaly of a WR for the contour lines + values.to.plot <- my.brks #c(-17,-15,-13,-11,-9,-7,-5,-3,-1,0,1,3,5,7,9,11,13,15,17) + } + + my.cols <- colorRampPalette(rev(brewer.pal(11,"RdBu")))(length(my.brks)-1) # blue--white--red colors + my.cols[floor(length(my.cols)/2)] <- my.cols[floor(length(my.cols)/2)+1] <- "white" + + # plot the composition with the regime anomalies of each monthly regimes: + png(filename=paste0(rean.dir,"/",fields.name,"_taylor_source",".png"),width=6000,height=2000) + + plot.new() + + # Sheet title: + par(fig=c(0, 1, 0.94, 0.98), new=TRUE) + mtext(paste0(fields.name, " observed monthly regime anomalies"), cex=5.5, font=2) + + n.map <- 0 + for(p in c(9:12,1:8)){ + p.orig <- p + + load(file=paste0(rean.dir,"/",fields.name,"_",my.period[p],"_","psl",".RData")) # load cluster names and summarized data + load(file=paste0(rean.dir,"/",fields.name,"_",my.period[p],"_","ClusterNames",".RData")) # load cluster names and summarized data + + if(var.num != var.num.orig) var.num <- var.num.orig # ripristinate original values of this script (necessary after loading regime data) + if(fields.name != fields.name.orig) fields.name <- fields.name.orig # ripristinate original values of this script + if(p != p.orig) p <- p.orig + + cluster1 <- which(orden == cluster1.name) + cluster2 <- which(orden == cluster2.name) + cluster3 <- which(orden == cluster3.name) + cluster4 <- which(orden == cluster4.name) + + assign(paste0("map",cluster1), pslwr1mean) # NAO+ + assign(paste0("map",cluster2), pslwr2mean) # NAO- + assign(paste0("map",cluster3), pslwr3mean) # Blocking + assign(paste0("map",cluster4), pslwr4mean) # Atl.ridge + + n.map <- n.map + 1 # n.maps starts from 0 + map.width <- 0.07 + map.xpos <- 0.06 + map.width * (n.map - 1) + map.ypos <- 0.08 + map.height <- 0.19 + map.ysep <- 0.015 + + par(fig=c(map.xpos, map.xpos + map.width, map.ypos + 3*map.height + 3*map.ysep, map.ypos + 4*map.height + 3*map.ysep), new=TRUE) + PlotEquiMap2(rescale(map1,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map1), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, xlabel.dist=1.5, contours.lty="F1FF1F") + par(fig=c(map.xpos, map.xpos + map.width, map.ypos + 2*map.height + 2*map.ysep, map.ypos + 3*map.height + 2*map.ysep), new=TRUE) + PlotEquiMap2(rescale(map2,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map2), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F") + par(fig=c(map.xpos, map.xpos + map.width, map.ypos + map.height + map.ysep, map.ypos + 2*map.height + map.ysep), new=TRUE) + PlotEquiMap2(rescale(map3,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map3), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F") + par(fig=c(map.xpos, map.xpos + map.width, map.ypos, map.ypos + map.height), new=TRUE) + PlotEquiMap2(rescale(map4,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map4), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F") + + # Sheet subtitles: + par(fig=c(map.xpos, map.xpos + map.width, 0.86, 0.90), new=TRUE) + mtext(my.month.short[p], cex=5.5, font=2) + + } + + # draw the last map to the right of the composition, that with the seasonal anomalies: + n.map <- n.map + 1 # n.maps starts from 0 + map.width <- 0.07 + map.xpos <- 0.06 + map.width * (n.map - 1) + map.ypos <- 0.08 + map.height <- 0.19 + map.ysep <- 0.015 + + par(fig=c(map.xpos, map.xpos + map.width, map.ypos + 3*map.height + 3*map.ysep, map.ypos + 4*map.height + 3*map.ysep), new=TRUE) + PlotEquiMap2(rescale(map.ref1,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map.ref1), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, xlabel.dist=1.5, contours.lty="F1FF1F") + par(fig=c(map.xpos, map.xpos + map.width, map.ypos + 2*map.height + 2*map.ysep, map.ypos + 3*map.height + 2*map.ysep), new=TRUE) + PlotEquiMap2(rescale(map.ref2,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map.ref2), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F") + par(fig=c(map.xpos, map.xpos + map.width, map.ypos + map.height + map.ysep, map.ypos + 2*map.height + map.ysep), new=TRUE) + PlotEquiMap2(rescale(map.ref3,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map.ref3), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F") + par(fig=c(map.xpos, map.xpos + map.width, map.ypos, map.ypos + map.height), new=TRUE) + PlotEquiMap2(rescale(map.ref4,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map.ref4), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F") + + # subtitle: + par(fig=c(map.xpos, map.xpos + map.width, 0.86, 0.90), new=TRUE) + mtext(my.period[season.ref], cex=5.5, font=2) + + dev.off() + + corr.first <- corr.second <- rmse.first <- rmse.second <- array(NA,c(4,12)) + + # Plot the Taylor diagrams: + for(regime in 1:4){ + + png(filename=paste0(rean.dir,"/",fields.name,"_taylor_",orden[regime],".png"), width=1200, height=800) + + for(p in 1:12){ + p.orig <- p + + load(file=paste0(rean.dir,"/",fields.name,"_",my.period[p],"_","psl",".RData")) # load cluster names and summarized data + load(file=paste0(rean.dir,"/",fields.name,"_",my.period[p],"_","ClusterNames",".RData")) # load cluster names and summarized data + + if(var.num != var.num.orig) var.num <- var.num.orig # ripristinate original values of this script (necessary after loading regime data) + if(fields.name != fields.name.orig) fields.name <- fields.name.orig # ripristinate original values of this script + if(p != p.orig) p <- p.orig + + cluster1 <- which(orden == cluster1.name) + cluster2 <- which(orden == cluster2.name) + cluster3 <- which(orden == cluster3.name) + cluster4 <- which(orden == cluster4.name) + + assign(paste0("map",cluster1), pslwr1mean) # NAO+ + assign(paste0("map",cluster2), pslwr2mean) # NAO- + assign(paste0("map",cluster3), pslwr3mean) # Blocking + assign(paste0("map",cluster4), pslwr4mean) # Atl.ridge + + # load data from the second classification: + load(file=paste0(rean2.dir,"/",fields.name,"_",my.period[p],"_","psl",".RData")) # load cluster names and summarized data + load(file=paste0(rean2.dir,"/",fields.name,"_",my.period[p],"_","ClusterNames",".RData")) # load cluster names and summarized data + + if(var.num != var.num.orig) var.num <- var.num.orig # ripristinate original values of this script (necessary after loading regime data) + if(fields.name != fields.name.orig) fields.name <- fields.name.orig # ripristinate original values of this script + if(p != p.orig) p <- p.orig + + cluster1.old <- which(orden == cluster1.name) + cluster2.old <- which(orden == cluster2.name) + cluster3.old <- which(orden == cluster3.name) + cluster4.old <- which(orden == cluster4.name) + + assign(paste0("map.old",cluster1.old), pslwr1mean) # NAO+ + assign(paste0("map.old",cluster2.old), pslwr2mean) # NAO- + assign(paste0("map.old",cluster3.old), pslwr3mean) # Blocking + assign(paste0("map.old",cluster4.old), pslwr4mean) # Atl.ridge + + add.mod <- ifelse(p == 1, FALSE, TRUE) + main.mod <- ifelse(p == 1, orden[regime],"") + + color.first <- "red" + color.second <- "blue" + sd.arcs.mod <- c(0,0.1,0.2,0.3,0.4,0.5,0.6,0.7,0.8,0.9,1,1.1,1.2,1.3) #c(0,0.5,1,1.5) # doesn't work! + if(regime == 1) my.taylor(as.vector(map.ref1),as.vector(map1), normalize=TRUE, add=add.mod, pos.cor=FALSE, sd.arcs=sd.arcs.mod, ref.sd=F, cex.axis=1.7, text.cex=1, my.text=my.month.short[p], col = color.first, main=main.mod) + if(regime == 2) my.taylor(as.vector(map.ref2),as.vector(map2), normalize=TRUE, add=add.mod, pos.cor=FALSE, sd.arcs=sd.arcs.mod, ref.sd=F, cex.axis=1.7, text.cex=1, my.text=my.month.short[p], col = color.first, main=main.mod) + if(regime == 3) my.taylor(as.vector(map.ref3),as.vector(map3), normalize=TRUE, add=add.mod, pos.cor=FALSE, sd.arcs=sd.arcs.mod, ref.sd=F, cex.axis=1.7, text.cex=1, my.text=my.month.short[p], col = color.first, main=main.mod) + if(regime == 4) my.taylor(as.vector(map.ref4),as.vector(map4), normalize=TRUE, add=add.mod, pos.cor=FALSE, sd.arcs=sd.arcs.mod, ref.sd=F, cex.axis=1.7, text.cex=1, my.text=my.month.short[p], col = color.first, main=main.mod) + + # add the points from the second classification: + add.mod=TRUE + if(regime == 1) my.taylor(as.vector(map.old.ref1),as.vector(map.old1), normalize=TRUE, add=add.mod, pos.cor=FALSE, sd.arcs=sd.arcs.mod, ref.sd=F, cex.axis=1.7, text.cex=1, my.text=my.month.short[p], col = color.second) + if(regime == 2) my.taylor(as.vector(map.old.ref2),as.vector(map.old2), normalize=TRUE, add=add.mod, pos.cor=FALSE, sd.arcs=sd.arcs.mod, ref.sd=F, cex.axis=1.7, text.cex=1, my.text=my.month.short[p], col = color.second) + if(regime == 3) my.taylor(as.vector(map.old.ref3),as.vector(map.old3), normalize=TRUE, add=add.mod, pos.cor=FALSE, sd.arcs=sd.arcs.mod, ref.sd=F, cex.axis=1.7, text.cex=1, my.text=my.month.short[p], col = color.second) + if(regime == 4) my.taylor(as.vector(map.old.ref4),as.vector(map.old4), normalize=TRUE, add=add.mod, pos.cor=FALSE, sd.arcs=sd.arcs.mod, ref.sd=F, cex.axis=1.7, text.cex=1, my.text=my.month.short[p], col = color.second) + + if(regime == 1) { corr.first[1,p] <- cor(as.vector(map.ref1),as.vector(map1)); rmse.first[1,p] <- RMSE(as.vector(map.ref1),as.vector(map1)) } + if(regime == 2) { corr.first[2,p] <- cor(as.vector(map.ref2),as.vector(map2)); rmse.first[2,p] <- RMSE(as.vector(map.ref2),as.vector(map2)) } + if(regime == 3) { corr.first[3,p] <- cor(as.vector(map.ref3),as.vector(map3)); rmse.first[3,p] <- RMSE(as.vector(map.ref3),as.vector(map3)) } + if(regime == 4) { corr.first[4,p] <- cor(as.vector(map.ref4),as.vector(map4)); rmse.first[4,p] <- RMSE(as.vector(map.ref4),as.vector(map4)) } + + if(regime == 1) { corr.second[1,p] <- cor(as.vector(map.old.ref1),as.vector(map.old1)); rmse.second[1,p] <- RMSE(as.vector(map.old.ref1),as.vector(map.old1)) } + if(regime == 2) { corr.second[2,p] <- cor(as.vector(map.old.ref2),as.vector(map.old2)); rmse.second[2,p] <- RMSE(as.vector(map.old.ref2),as.vector(map.old2)) } + if(regime == 3) { corr.second[3,p] <- cor(as.vector(map.old.ref3),as.vector(map.old3)); rmse.second[3,p] <- RMSE(as.vector(map.old.ref3),as.vector(map.old3)) } + if(regime == 4) { corr.second[4,p] <- cor(as.vector(map.old.ref4),as.vector(map.old4)); rmse.second[4,p] <- RMSE(as.vector(map.old.ref4),as.vector(map.old4)) } + + } # close for on 'p' + + dev.off() + + } # close for on 'regime' + + corr.first.mean <- apply(corr.first,1,mean) + corr.second.mean <- apply(corr.second,1,mean) + corr.diff <- corr.second.mean - corr.first.mean + + rmse.first.mean <- apply(rmse.first,1,mean) + rmse.second.mean <- apply(rmse.second,1,mean) + rmse.diff <- rmse.second.mean - rmse.first.mean + +} # close if on composition == "taylor" + + + + + + +# Summary graphs: +if(composition == "summary" && fields.name == forecast.name){ + # array storing correlations in the format: [startdate, leadmonth, regime] + array.diff.sd.freq <- array.sd.freq <- array.cor <- array.sp.cor <- array.freq <- array.diff.freq <- array.rpss <- array.pers <- array.diff.pers <- array(NA,c(12,7,4)) + array.spat.cor <- array.imp <- array.trans.sim1 <- array.trans.sim2 <- array.trans.sim3 <- array.trans.sim4 <- array(NA,c(12,7,4)) + array.trans.diff1 <- array.trans.diff2 <- array.trans.diff3 <- array.trans.diff4 <- array.freq.obs <- array.pers.obs <- array(NA,c(12,7,4)) + array.trans.obs1 <- array.trans.obs2 <- array.trans.obs3 <- array.trans.obs4 <- array(NA,c(12,7,4)) + + for(p in 1:12){ + p.orig <- p + + for(l in 0:6){ + + load(file=paste0(work.dir,"/",fields.name,"_",my.period[p],"_leadmonth_",l,"_","ClusterNames",".RData")) # load cluster names and summarized data + + if(var.num != var.num.orig) var.num <- var.num.orig # ripristinate original values of this script (necessary after loading regime data) + if(fields.name != fields.name.orig) fields.name <- fields.name.orig # ripristinate original values of this script + if(p != p.orig) p <- p.orig + + array.spat.cor[p,1+l, 1] <- spat.cor1 # associates NAO+ to the first triangle (at left) + array.spat.cor[p,1+l, 3] <- spat.cor2 # associates NAO- to the third triangle (at right) + array.spat.cor[p,1+l, 4] <- spat.cor3 # associates Blocking to the fourth triangle (top one) + array.spat.cor[p,1+l, 2] <- spat.cor4 # associates Atl.Ridge to the second triangle (bottom one) + + array.cor[p,1+l, 1] <- sp.cor1 # associates NAO+ to the first triangle (at left) + array.cor[p,1+l, 3] <- sp.cor2 # associates NAO- to the third triangle (at right) + array.cor[p,1+l, 4] <- sp.cor3 # associates Blocking to the fourth triangle (top one) + array.cor[p,1+l, 2] <- sp.cor4 # associates Atl.Ridge to the second triangle (bottom one) + + array.freq[p,1+l, 1] <- 100*(mean(fre1.NA)) # NAO+ + array.freq[p,1+l, 3] <- 100*(mean(fre2.NA)) # NAO- + array.freq[p,1+l, 4] <- 100*(mean(fre3.NA)) # Blocking + array.freq[p,1+l, 2] <- 100*(mean(fre4.NA)) # Atl.Ridge + + array.freq.obs[p,1+l, 1] <- 100*(mean(freObs1)) # NAO+ + array.freq.obs[p,1+l, 3] <- 100*(mean(freObs2)) # NAO- + array.freq.obs[p,1+l, 4] <- 100*(mean(freObs3)) # Blocking + array.freq.obs[p,1+l, 2] <- 100*(mean(freObs4)) # Atl.Ridge + + array.diff.freq[p,1+l, 1] <- 100*(mean(fre1.NA) - mean(freObs1)) # NAO+ + array.diff.freq[p,1+l, 3] <- 100*(mean(fre2.NA) - mean(freObs2)) # NAO- + array.diff.freq[p,1+l, 4] <- 100*(mean(fre3.NA) - mean(freObs3)) # Blocking + array.diff.freq[p,1+l, 2] <- 100*(mean(fre4.NA) - mean(freObs4)) # Atl.Ridge + + array.pers[p,1+l, 1] <- persist1 # NAO+ + array.pers[p,1+l, 3] <- persist2 # NAO- + array.pers[p,1+l, 4] <- persist3 # Blocking + array.pers[p,1+l, 2] <- persist4 # Atl.Ridge + + array.pers.obs[p,1+l, 1] <- persistObs1 # NAO+ + array.pers.obs[p,1+l, 3] <- persistObs2 # NAO- + array.pers.obs[p,1+l, 4] <- persistObs3 # Blocking + array.pers.obs[p,1+l, 2] <- persistObs4 # Atl.Ridge + + array.diff.pers[p,1+l, 1] <- persist1 - persistObs1 # NAO+ + array.diff.pers[p,1+l, 3] <- persist2 - persistObs2 # NAO- + array.diff.pers[p,1+l, 4] <- persist3 - persistObs3 # Blocking + array.diff.pers[p,1+l, 2] <- persist4 - persistObs4 # Atl.Ridge + + array.sd.freq[p,1+l, 1] <- sd.freq.sim1 # NAO+ + array.sd.freq[p,1+l, 3] <- sd.freq.sim2 # NAO- + array.sd.freq[p,1+l, 4] <- sd.freq.sim3 # Blocking + array.sd.freq[p,1+l, 2] <- sd.freq.sim4 # Atl.Ridge + + array.diff.sd.freq[p,1+l, 1] <- sd.freq.sim1 / sd.freq.obs1 # NAO+ + array.diff.sd.freq[p,1+l, 3] <- sd.freq.sim2 / sd.freq.obs2 # NAO- + array.diff.sd.freq[p,1+l, 4] <- sd.freq.sim3 / sd.freq.obs3 # Blocking + array.diff.sd.freq[p,1+l, 2] <- sd.freq.sim4 / sd.freq.obs4 # Atl.Ridge + + array.imp[p,1+l, 1] <- cor.imp1 # NAO+ + array.imp[p,1+l, 3] <- cor.imp2 # NAO- + array.imp[p,1+l, 4] <- cor.imp3 # Blocking + array.imp[p,1+l, 2] <- cor.imp4 # Atl.Ridge + + array.trans.sim1[p,1+l, 1] <- NA # Transition from NAO+ to NAO+ + array.trans.sim1[p,1+l, 3] <- 100*transSim1[2] # Transition from NAO+ to NAO- + array.trans.sim1[p,1+l, 4] <- 100*transSim1[3] # Transition from NAO+ to blocking + array.trans.sim1[p,1+l, 2] <- 100*transSim1[4] # Transition from NAO+ to Atl.Ridge + + array.trans.sim2[p,1+l, 1] <- 100*transSim2[1] # Transition from NAO- to NAO+ + array.trans.sim2[p,1+l, 3] <- NA # Transition from NAO- to NAO- + array.trans.sim2[p,1+l, 4] <- 100*transSim2[3] # Transition from NAO- to blocking + array.trans.sim2[p,1+l, 2] <- 100*transSim2[4] # Transition from NAO- to Atl.Ridge + + array.trans.sim3[p,1+l, 1] <- 100*transSim3[1] # Transition from blocking to NAO+ + array.trans.sim3[p,1+l, 3] <- 100*transSim3[2] # Transition from blocking to NAO- + array.trans.sim3[p,1+l, 4] <- NA # Transition from blocking to blocking + array.trans.sim3[p,1+l, 2] <- 100*transSim3[4] # Transition from blocking to Atl.Ridge + + array.trans.sim4[p,1+l, 1] <- 100*transSim4[1] # Transition from Atl.ridge to NAO+ + array.trans.sim4[p,1+l, 3] <- 100*transSim4[2] # Transition from Atl.ridge to NAO- + array.trans.sim4[p,1+l, 4] <- 100*transSim4[3] # Transition from Atl.ridge to blocking + array.trans.sim4[p,1+l, 2] <- NA # Transition from Atl.ridge to Atl.Ridge + + array.trans.obs1[p,1+l, 1] <- NA # Transition from NAO+ to NAO+ + array.trans.obs1[p,1+l, 3] <- 100*transObs1[2] # Transition from NAO+ to NAO- + array.trans.obs1[p,1+l, 4] <- 100*transObs1[3] # Transition from NAO+ to blocking + array.trans.obs1[p,1+l, 2] <- 100*transObs1[4] # Transition from NAO+ to Atl.Ridge + + array.trans.obs2[p,1+l, 1] <- 100*transObs2[1] # Transition from NAO- to NAO+ + array.trans.obs2[p,1+l, 3] <- NA # Transition from NAO- to NAO- + array.trans.obs2[p,1+l, 4] <- 100*transObs2[3] # Transition from NAO- to blocking + array.trans.obs2[p,1+l, 2] <- 100*transObs2[4] # Transition from NAO- to Atl.Ridge + + array.trans.obs3[p,1+l, 1] <- 100*transObs3[1] # Transition from blocking to NAO+ + array.trans.obs3[p,1+l, 3] <- 100*transObs3[2] # Transition from blocking to NAO- + array.trans.obs3[p,1+l, 4] <- NA # Transition from blocking to blocking + array.trans.obs3[p,1+l, 2] <- 100*transObs3[4] # Transition from blocking to Atl.Ridge + + array.trans.obs4[p,1+l, 1] <- 100*transObs4[1] # Transition from Atl.ridge to NAO+ + array.trans.obs4[p,1+l, 3] <- 100*transObs4[2] # Transition from Atl.ridge to NAO- + array.trans.obs4[p,1+l, 4] <- 100*transObs4[3] # Transition from Atl.ridge to blocking + array.trans.obs4[p,1+l, 2] <- NA # Transition from Atl.ridge to Atl.Ridge + + array.trans.diff1[p,1+l, 1] <- NA # Transition from NAO+ to NAO+ + array.trans.diff1[p,1+l, 3] <- 100*(transSim1[2] - transObs1[2]) # Transition from NAO+ to NAO- + array.trans.diff1[p,1+l, 4] <- 100*(transSim1[3] - transObs1[3]) # Transition from NAO+ to blocking + array.trans.diff1[p,1+l, 2] <- 100*(transSim1[4] - transObs1[4]) # Transition from NAO+ to Atl.Ridge + + array.trans.diff2[p,1+l, 1] <- 100*(transSim2[1] - transObs2[1]) # Transition from NAO+ to NAO+ + array.trans.diff2[p,1+l, 3] <- NA + array.trans.diff2[p,1+l, 4] <- 100*(transSim2[3] - transObs2[3]) # Transition from NAO+ to blocking + array.trans.diff2[p,1+l, 2] <- 100*(transSim2[4] - transObs2[4]) # Transition from NAO+ to Atl.Ridge + + array.trans.diff3[p,1+l, 1] <- 100*(transSim3[1] - transObs3[1]) # Transition from NAO+ to NAO+ + array.trans.diff3[p,1+l, 3] <- 100*(transSim3[2] - transObs3[2]) # Transition from NAO+ to NAO- + array.trans.diff3[p,1+l, 4] <- NA + array.trans.diff3[p,1+l, 2] <- 100*(transSim3[4] - transObs3[4]) # Transition from NAO+ to Atl.Ridge + + array.trans.diff4[p,1+l, 1] <- 100*(transSim4[1] - transObs4[1]) # Transition from NAO+ to NAO+ + array.trans.diff4[p,1+l, 3] <- 100*(transSim4[2] - transObs4[2]) # Transition from NAO+ to NAO- + array.trans.diff4[p,1+l, 4] <- 100*(transSim4[3] - transObs4[3]) # Transition from NAO+ to blocking + array.trans.diff4[p,1+l, 2] <- NA + + #array.rpss[p,1+l, 1] <- # NAO+ + #array.rpss[p,1+l, 3] <- # NAO- + #array.rpss[p,1+l, 4] <- # Blocking + #array.rpss[p,1+l, 2] <- # Atl.Ridge + } + } + + + + # Spatial Corr summary: + png(filename=paste0(work.dir,"/",forecast.name,"_summary_spatial_correlations.png"), width=550, height=400) + + # create an array similar to array.cor but with colors instead of corr.values: + sp.corr.cols <- rev(c('#b2182b','#d6604d','#f4a582','#fddbc7','#d1e5f0','#92c5de','#4393c3','#2166ac')) + my.seq <- c(-1,0,0.4,0.5,0.6,0.7,0.8,0.9,1) + + array.spat.cor.colors <- array(sp.corr.cols[8],c(12,7,4)) + array.spat.cor.colors[array.spat.cor < my.seq[8]] <- sp.corr.cols[7] + array.spat.cor.colors[array.spat.cor < my.seq[7]] <- sp.corr.cols[6] + array.spat.cor.colors[array.spat.cor < my.seq[6]] <- sp.corr.cols[5] + array.spat.cor.colors[array.spat.cor < my.seq[5]] <- sp.corr.cols[4] + array.spat.cor.colors[array.spat.cor < my.seq[4]] <- sp.corr.cols[3] + array.spat.cor.colors[array.spat.cor < my.seq[3]] <- sp.corr.cols[2] + array.spat.cor.colors[array.spat.cor < my.seq[2]] <- sp.corr.cols[1] + + par(mar=c(5,4,4,4.5)) + plot(1,1, type="n", xaxt="n", yaxt="n", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + title("Spatial correlation between S4 and ERA-Interim regime anomalies", cex=1.5) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + + for(p in 1:12){ + for(l in 0:6){ + polygon(c(0.5+p-1, 0.5+p-1, 1+p-1), c(-0.5+l, 0.5+l, 0+l), col=array.spat.cor.colors[p,1+l,1]) # first triangle + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(-0.5+l, 0+l, -0.5+l), col=array.spat.cor.colors[p,1+l,2]) + polygon(c(1+p-1, 1.5+p-1, 1.5+p-1), c(l, 0.5+l, -0.5+l), col=array.spat.cor.colors[p,1+l,3]) + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(0.5+l, 0+l, 0.5+l), col=array.spat.cor.colors[p,1+l,4]) + } + } + + par(fig=c(0.875, 1, 0.14, 0.89), new=TRUE) + ColorBar2(brks = my.seq, cols = sp.corr.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + dev.off() + + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_spatial_correlations_startdate.png"), width=750, height=600) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext(text="Spatial correlation between S4 and ERA-Interim regime anomalies", cex=1.5) + + # NAO+ : + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.85, 0.98), new=TRUE) + mtext("NAO+", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.spat.cor.colors[p,1+l,1])}} + + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.42, 0.5), new=TRUE) + mtext("Blocking", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.spat.cor.colors[p,1+l,4])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.85, 0.98), new=TRUE) + mtext("NAO-", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.spat.cor.colors[p,1+l,3])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.42, 0.5), new=TRUE) + mtext("Atlantic Ridge", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.spat.cor.colors[p,1+l,2])}} + + par(fig=c(0.91, 1, 0.1, 0.9), new=TRUE) + ColorBar2(brks = my.seq, cols = sp.corr.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_spatial_correlations_target_month.png"), width=800, height=300) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext(text="Spatial correlation between S4 and ERA-Interim regime anomalies", cex=1.2) + + par(mar=c(0,0,4,0), fig=c(0.07, 0.22, 0.8, 0.98), new=TRUE) + mtext("NAO+", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0, 0.22, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.6, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.spat.cor.colors[i2,1+6-j,1])}} + + par(mar=c(0,0,4,0), fig=c(0.22, 0.44, 0.8, 0.98), new=TRUE) + mtext("NAO-", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.22, 0.44, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.6, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.spat.cor.colors[i2,1+6-j,3])}} + + par(mar=c(0,0,4,0), fig=c(0.44, 0.66, 0.8, 0.98), new=TRUE) + mtext("Blocking", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.44, 0.66, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.6, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.spat.cor.colors[i2,1+6-j,4])}} + + par(mar=c(0,0,4,0), fig=c(0.68, 0.88, 0.8, 0.98), new=TRUE) + mtext("Atlantic Ridge", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.66, 0.88, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.6, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.spat.cor.colors[i2,1+6-j,2])}} + + par(fig=c(0.91, 1, 0.1, 0.8), new=TRUE) + ColorBar2(brks = my.seq, cols = sp.corr.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + + + + + + + + # Corr summary: + png(filename=paste0(work.dir,"/",forecast.name,"_summary_temporal_correlations.png"), width=550, height=400) + + # create an array similar to array.cor but with colors instead of corr.values: + corr.cols <- rev(c('#b2182b','#d6604d','#f4a582','#fddbc7','#d1e5f0','#92c5de','#4393c3','#2166ac')) + my.seq <- c(-1,-0.75,-0.5,-0.25,0,0.25,0.5,0.75,1) + + array.cor.colors <- array(corr.cols[8],c(12,7,4)) + array.cor.colors[array.cor < 0.75] <- corr.cols[7] + array.cor.colors[array.cor < 0.5] <- corr.cols[6] + array.cor.colors[array.cor < 0.25] <- corr.cols[5] + array.cor.colors[array.cor < 0] <- corr.cols[4] + array.cor.colors[array.cor < -0.25] <- corr.cols[3] + array.cor.colors[array.cor < -0.5] <- corr.cols[2] + array.cor.colors[array.cor < -0.75] <- corr.cols[1] + + par(mar=c(5,4,4,4.5)) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Forecast time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + title("Correlation between S4 and ERA-Interim frequencies", cex=1.5) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + + for(p in 1:12){ + for(l in 0:6){ + polygon(c(0.5+p-1,0.5+p-1,1+p-1),c(-0.5+l,0.5+l,0+l), col=array.cor.colors[p,1+l,1]) # first triangle + polygon(c(0.5+p-1,1+p-1,1.5+p-1),c(-0.5+l,0+l,-0.5+l), col=array.cor.colors[p,1+l,2]) + polygon(c(1+p-1,1.5+p-1,1.5+p-1),c(l,0.5+l,-0.5+l), col=array.cor.colors[p,1+l,3]) + polygon(c(0.5+p-1,1+p-1,1.5+p-1),c(0.5+l,0+l,0.5+l), col=array.cor.colors[p,1+l,4]) + } + } + + par(fig=c(0.875, 1, 0.14, 0.89), new=TRUE) + ColorBar2(brks = seq(-1,1,0.25), cols = corr.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(seq(-1,1,0.25)), my.labels=seq(-1,1,0.25)) + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_temporal_correlations_startdate.png"), width=750, height=600) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext(text="Correlation between S4 and ERA-Interim frequencies", cex=1.5) + + # NAO+ : + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.85, 0.98), new=TRUE) + mtext("NAO+", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.cor.colors[p,1+l,1])}} + + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.42, 0.5), new=TRUE) + mtext("Blocking", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.cor.colors[p,1+l,4])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.85, 0.98), new=TRUE) + mtext("NAO-", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.cor.colors[p,1+l,3])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.42, 0.5), new=TRUE) + mtext("Atlantic Ridge", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.cor.colors[p,1+l,2])}} + + par(fig=c(0.91, 1, 0.1, 0.9), new=TRUE) + ColorBar2(brks = my.seq, cols = corr.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_temporal_correlations_target_month.png"), width=800, height=300) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext(text="Correlation between S4 and ERA-Interim frequencies", cex=1.2) + + par(mar=c(0,0,4,0), fig=c(0.07, 0.22, 0.8, 0.98), new=TRUE) + mtext("NAO+", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0, 0.22, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.6, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.cor.colors[i2,1+6-j,1])}} + + par(mar=c(0,0,4,0), fig=c(0.22, 0.44, 0.8, 0.98), new=TRUE) + mtext("NAO-", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.22, 0.44, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.6, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.cor.colors[i2,1+6-j,3])}} + + par(mar=c(0,0,4,0), fig=c(0.44, 0.66, 0.8, 0.98), new=TRUE) + mtext("Blocking", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.44, 0.66, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.6, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.cor.colors[i2,1+6-j,4])}} + + par(mar=c(0,0,4,0), fig=c(0.68, 0.88, 0.8, 0.98), new=TRUE) + mtext("Atlantic Ridge", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.66, 0.88, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.6, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.cor.colors[i2,1+6-j,2])}} + + par(fig=c(0.91, 1, 0.1, 0.8), new=TRUE) + ColorBar2(brks = my.seq, cols = corr.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + + + + + + # Frequency summary: + png(filename=paste0(work.dir,"/",forecast.name,"_summary_frequency.png"), width=550, height=400) + + # create an array similar to array.diff.freq but with colors instead of frequencies: + freq.cols <- rev(c('#d73027','#f46d43','#fdae61','#fee090','#e0f3f8','#abd9e9','#74add1','#4575b4')) + my.seq <- c(NA,20,22,24,26,28,30,32,NA) + + array.freq.colors <- array(freq.cols[8],c(12,7,4)) + array.freq.colors[array.freq < my.seq[[8]]] <- freq.cols[7] + array.freq.colors[array.freq < my.seq[[7]]] <- freq.cols[6] + array.freq.colors[array.freq < my.seq[[6]]] <- freq.cols[5] + array.freq.colors[array.freq < my.seq[[5]]] <- freq.cols[4] + array.freq.colors[array.freq < my.seq[[4]]] <- freq.cols[3] + array.freq.colors[array.freq < my.seq[[3]]] <- freq.cols[2] + array.freq.colors[array.freq < my.seq[[2]]] <- freq.cols[1] + + par(mar=c(5,4,4,4.5)) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Forecast time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + title("Mean S4 frequency (%)", cex=1.5) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + + for(p in 1:12){ + for(l in 0:6){ + polygon(c(0.5+p-1, 0.5+p-1, 1+p-1), c(-0.5+l, 0.5+l, 0+l), col=array.freq.colors[p,1+l,1]) # first triangle + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(-0.5+l, 0+l, -0.5+l), col=array.freq.colors[p,1+l,2]) + polygon(c(1+p-1, 1.5+p-1, 1.5+p-1), c(l, 0.5+l, -0.5+l), col=array.freq.colors[p,1+l,3]) + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(0.5+l, 0+l, 0.5+l), col=array.freq.colors[p,1+l,4]) + } + } + + par(fig=c(0.875, 1, 0.14, 0.89), new=TRUE) + ColorBar2(brks = my.seq, cols = freq.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_frequency_startdate.png"), width=750, height=600) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext(text="Mean S4 frequency (%)", cex=1.5) + + # NAO+ : + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.85, 0.98), new=TRUE) + mtext("NAO+", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.freq.colors[p,1+l,1])}} + + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.42, 0.5), new=TRUE) + mtext("Blocking", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.freq.colors[p,1+l,4])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.85, 0.98), new=TRUE) + mtext("NAO-", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.freq.colors[p,1+l,3])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.42, 0.5), new=TRUE) + mtext("Atlantic Ridge", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.freq.colors[p,1+l,2])}} + + par(fig=c(0.91, 1, 0.1, 0.9), new=TRUE) + ColorBar2(brks = my.seq, cols = freq.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_frequency_target_month.png"), width=800, height=300) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext(text="Mean S4 frequency (%)", cex=1.2) + + par(mar=c(0,0,4,0), fig=c(0.07, 0.22, 0.8, 0.98), new=TRUE) + mtext("NAO+", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0, 0.22, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.6, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.freq.colors[i2,1+6-j,1])}} + + par(mar=c(0,0,4,0), fig=c(0.22, 0.44, 0.8, 0.98), new=TRUE) + mtext("NAO-", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.22, 0.44, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.6, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.freq.colors[i2,1+6-j,3])}} + + par(mar=c(0,0,4,0), fig=c(0.44, 0.66, 0.8, 0.98), new=TRUE) + mtext("Blocking", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.44, 0.66, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.6, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.freq.colors[i2,1+6-j,4])}} + + par(mar=c(0,0,4,0), fig=c(0.68, 0.88, 0.8, 0.98), new=TRUE) + mtext("Atlantic Ridge", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.66, 0.88, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.6, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.freq.colors[i2,1+6-j,2])}} + + par(fig=c(0.91, 1, 0.1, 0.8), new=TRUE) + ColorBar2(brks = my.seq, cols = freq.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + par(fig=c(0.91, 1, 0.88, 0.93), new=TRUE) + mtext(side = 1, text = "%", line = 2, cex=1) + dev.off() + + + + + + + + + # Obs. frequency summary: + png(filename=paste0(work.dir,"/",forecast.name,"_summary_frequency_obs.png"), width=550, height=400) + + # create an array similar to array.diff.freq but with colors instead of frequencies: + freq.cols <- rev(c('#d73027','#f46d43','#fdae61','#fee090','#e0f3f8','#abd9e9','#74add1','#4575b4')) + my.seq <- c(NA,20,22,24,26,28,30,32,NA) + + array.freq.colors <- array(freq.cols[8],c(12,7,4)) + array.freq.colors[array.freq.obs < my.seq[[8]]] <- freq.cols[7] + array.freq.colors[array.freq.obs < my.seq[[7]]] <- freq.cols[6] + array.freq.colors[array.freq.obs < my.seq[[6]]] <- freq.cols[5] + array.freq.colors[array.freq.obs < my.seq[[5]]] <- freq.cols[4] + array.freq.colors[array.freq.obs < my.seq[[4]]] <- freq.cols[3] + array.freq.colors[array.freq.obs < my.seq[[3]]] <- freq.cols[2] + array.freq.colors[array.freq.obs < my.seq[[2]]] <- freq.cols[1] + + par(mar=c(5,4,4,4.5)) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Forecast time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + title("Mean observed frequency (%)", cex=1.5) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + + for(p in 1:12){ + for(l in 0:6){ + polygon(c(0.5+p-1, 0.5+p-1, 1+p-1), c(-0.5+l, 0.5+l, 0+l), col=array.freq.colors[p,1+l,1]) # first triangle + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(-0.5+l, 0+l, -0.5+l), col=array.freq.colors[p,1+l,2]) + polygon(c(1+p-1, 1.5+p-1, 1.5+p-1), c(l, 0.5+l, -0.5+l), col=array.freq.colors[p,1+l,3]) + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(0.5+l, 0+l, 0.5+l), col=array.freq.colors[p,1+l,4]) + } + } + + par(fig=c(0.875, 1, 0.14, 0.89), new=TRUE) + ColorBar2(brks = my.seq, cols = freq.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_frequency_obs_startdate.png"), width=750, height=600) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext(text="Mean observed frequency (%)", cex=1.5) + + # NAO+ : + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.85, 0.98), new=TRUE) + mtext("NAO+", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.freq.colors[p,1+l,1])}} + + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.42, 0.5), new=TRUE) + mtext("Blocking", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.freq.colors[p,1+l,4])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.85, 0.98), new=TRUE) + mtext("NAO-", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.freq.colors[p,1+l,3])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.42, 0.5), new=TRUE) + mtext("Atlantic Ridge", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.freq.colors[p,1+l,2])}} + + par(fig=c(0.91, 1, 0.1, 0.9), new=TRUE) + ColorBar2(brks = my.seq, cols = freq.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_frequency_obs_target_month.png"), width=800, height=300) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext(text="Mean observed frequency (%)", cex=1.2) + + par(mar=c(0,0,4,0), fig=c(0.07, 0.22, 0.8, 0.98), new=TRUE) + mtext("NAO+", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0, 0.22, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.6, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.freq.colors[i2,1+6-j,1])}} + + par(mar=c(0,0,4,0), fig=c(0.22, 0.44, 0.8, 0.98), new=TRUE) + mtext("NAO-", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.22, 0.44, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.6, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.freq.colors[i2,1+6-j,3])}} + + par(mar=c(0,0,4,0), fig=c(0.44, 0.66, 0.8, 0.98), new=TRUE) + mtext("Blocking", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.44, 0.66, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.6, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.freq.colors[i2,1+6-j,4])}} + + par(mar=c(0,0,4,0), fig=c(0.68, 0.88, 0.8, 0.98), new=TRUE) + mtext("Atlantic Ridge", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.66, 0.88, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.6, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.freq.colors[i2,1+6-j,2])}} + + par(fig=c(0.91, 1, 0.1, 0.8), new=TRUE) + ColorBar2(brks = my.seq, cols = freq.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + par(fig=c(0.91, 1, 0.88, 0.93), new=TRUE) + mtext(side = 1, text = "%", line = 2, cex=1) + dev.off() + + + + + + + + + + # Bias freq. summary: + png(filename=paste0(work.dir,"/",forecast.name,"_summary_bias_frequency.png"), width=550, height=400) + + # create an array similar to array.diff.freq but with colors instead of frequencies: + diff.freq.cols <- rev(c('#d73027','#f46d43','#fdae61','#fee090','#e0f3f8','#abd9e9','#74add1','#4575b4')) + my.seq <- c(-20,-10,-5,-2,0,2,5,10,20) + + array.diff.freq.colors <- array(diff.freq.cols[8],c(12,7,4)) + array.diff.freq.colors[array.diff.freq < my.seq[[8]]] <- diff.freq.cols[7] + array.diff.freq.colors[array.diff.freq 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.diff.freq.colors[i2,1+6-j,1])}} + + par(mar=c(0,0,4,0), fig=c(0.22, 0.44, 0.8, 0.98), new=TRUE) + mtext("NAO-", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.22, 0.44, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.6, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.diff.freq.colors[i2,1+6-j,3])}} + + par(mar=c(0,0,4,0), fig=c(0.44, 0.66, 0.8, 0.98), new=TRUE) + mtext("Blocking", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.44, 0.66, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.6, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.diff.freq.colors[i2,1+6-j,4])}} + + par(mar=c(0,0,4,0), fig=c(0.68, 0.88, 0.8, 0.98), new=TRUE) + mtext("Atlantic Ridge", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.66, 0.88, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.6, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.diff.freq.colors[i2,1+6-j,2])}} + + par(fig=c(0.91, 1, 0.1, 0.8), new=TRUE) + ColorBar2(brks = my.seq, cols = diff.freq.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + par(fig=c(0.91, 1, 0.88, 0.93), new=TRUE) + mtext(side = 1, text = "%", line = 2, cex=1) + dev.off() + + + + + + + + + # Simulated persistence summary: + png(filename=paste0(work.dir,"/",forecast.name,"_summary_persistence.png"), width=550, height=400) + + # create an array similar to array.pers but with colors instead of frequencies: + pers.cols <- rev(c('#b2182b','#d6604d','#f4a582','#fddbc7','#d1e5f0','#92c5de','#4393c3','#2166ac')) + my.seq <- c(NA, 3.25, 3.5, 3.75, 4, 4.25, 4.5, 4.75, NA) + + array.pers.colors <- array(pers.cols[8],c(12,7,4)) + array.pers.colors[array.pers < my.seq[8]] <- pers.cols[7] + array.pers.colors[array.pers < my.seq[7]] <- pers.cols[6] + array.pers.colors[array.pers < my.seq[6]] <- pers.cols[5] + array.pers.colors[array.pers < my.seq[5]] <- pers.cols[4] + array.pers.colors[array.pers < my.seq[4]] <- pers.cols[3] + array.pers.colors[array.pers < my.seq[3]] <- pers.cols[2] + array.pers.colors[array.pers < my.seq[2]] <- pers.cols[1] + + par(mar=c(5,4,4,4.5)) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Forecast time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + title("ECMWF-S4 Regime persistence (in days/month)", cex=1.5) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + + for(p in 1:12){ + for(l in 0:6){ + polygon(c(0.5+p-1,0.5+p-1,1+p-1),c(-0.5+l,0.5+l,0+l), col=array.pers.colors[p,1+l,1]) # first triangle + polygon(c(0.5+p-1,1+p-1,1.5+p-1),c(-0.5+l,0+l,-0.5+l), col=array.pers.colors[p,1+l,2]) + polygon(c(1+p-1,1.5+p-1,1.5+p-1),c(l,0.5+l,-0.5+l), col=array.pers.colors[p,1+l,3]) + polygon(c(0.5+p-1,1+p-1,1.5+p-1),c(0.5+l,0+l,0.5+l), col=array.pers.colors[p,1+l,4]) + } + } + + par(fig=c(0.875, 1, 0.14, 0.89), new=TRUE) + ColorBar2(brks = my.seq, cols = pers.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_persistence_startdate.png"), width=750, height=600) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("ECMWF-S4 Regime persistence (in days/month)", cex=1.5) + + # NAO+ : + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.85, 0.98), new=TRUE) + mtext("NAO+", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.pers.colors[p,1+l,1])}} + + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.42, 0.5), new=TRUE) + mtext("Blocking", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.pers.colors[p,1+l,4])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.85, 0.98), new=TRUE) + mtext("NAO-", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.pers.colors[p,1+l,3])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.42, 0.5), new=TRUE) + mtext("Atlantic Ridge", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.pers.colors[p,1+l,2])}} + + par(fig=c(0.91, 1, 0.1, 0.9), new=TRUE) + ColorBar2(brks = my.seq, cols = pers.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_persistence_target_month.png"), width=800, height=300) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("ECMWF-S4 Regime persistence (in days/month)", cex=1.2) + + par(mar=c(0,0,4,0), fig=c(0.07, 0.22, 0.8, 0.98), new=TRUE) + mtext("NAO+", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0, 0.22, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.6, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.pers.colors[i2,1+6-j,1])}} + + par(mar=c(0,0,4,0), fig=c(0.22, 0.44, 0.8, 0.98), new=TRUE) + mtext("NAO-", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.22, 0.44, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.6, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.pers.colors[i2,1+6-j,3])}} + + par(mar=c(0,0,4,0), fig=c(0.44, 0.66, 0.8, 0.98), new=TRUE) + mtext("Blocking", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.44, 0.66, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.6, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.pers.colors[i2,1+6-j,4])}} + + par(mar=c(0,0,4,0), fig=c(0.68, 0.88, 0.8, 0.98), new=TRUE) + mtext("Atlantic Ridge", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.66, 0.88, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.6, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.pers.colors[i2,1+6-j,2])}} + + par(fig=c(0.91, 1, 0.1, 0.8), new=TRUE) + ColorBar2(brks = my.seq, cols = pers.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + par(fig=c(0.9, 1, 0.88, 0.93), new=TRUE) + mtext(side = 1, text = "days/month", line = 2, cex=1) + dev.off() + + + + + + + + # Observed persistence summary: + png(filename=paste0(work.dir,"/",forecast.name,"_summary_persistence_obs.png"), width=550, height=400) + + # create an array similar to array.pers but with colors instead of frequencies: + pers.cols <- rev(c('#b2182b','#d6604d','#f4a582','#fddbc7','#d1e5f0','#92c5de','#4393c3','#2166ac')) + my.seq <- c(NA, 3.25, 3.5, 3.75, 4, 4.25, 4.5, 4.75, NA) + + array.pers.colors <- array(pers.cols[8],c(12,7,4)) + array.pers.colors[array.pers.obs < my.seq[8]] <- pers.cols[7] + array.pers.colors[array.pers.obs < my.seq[7]] <- pers.cols[6] + array.pers.colors[array.pers.obs < my.seq[6]] <- pers.cols[5] + array.pers.colors[array.pers.obs < my.seq[5]] <- pers.cols[4] + array.pers.colors[array.pers.obs < my.seq[4]] <- pers.cols[3] + array.pers.colors[array.pers.obs < my.seq[3]] <- pers.cols[2] + array.pers.colors[array.pers.obs < my.seq[2]] <- pers.cols[1] + + par(mar=c(5,4,4,4.5)) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Forecast time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + title("Observed regime persistence (in days/month)", cex=1.5) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + + for(p in 1:12){ + for(l in 0:6){ + polygon(c(0.5+p-1,0.5+p-1,1+p-1),c(-0.5+l,0.5+l,0+l), col=array.pers.colors[p,1+l,1]) # first triangle + polygon(c(0.5+p-1,1+p-1,1.5+p-1),c(-0.5+l,0+l,-0.5+l), col=array.pers.colors[p,1+l,2]) + polygon(c(1+p-1,1.5+p-1,1.5+p-1),c(l,0.5+l,-0.5+l), col=array.pers.colors[p,1+l,3]) + polygon(c(0.5+p-1,1+p-1,1.5+p-1),c(0.5+l,0+l,0.5+l), col=array.pers.colors[p,1+l,4]) + } + } + + par(fig=c(0.875, 1, 0.14, 0.89), new=TRUE) + ColorBar2(brks = my.seq, cols = pers.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_persistence_obs_startdate.png"), width=750, height=600) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("Observed Regime persistence (in days/month)", cex=1.5) + + # NAO+ : + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.85, 0.98), new=TRUE) + mtext("NAO+", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.pers.colors[p,1+l,1])}} + + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.42, 0.5), new=TRUE) + mtext("Blocking", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.pers.colors[p,1+l,4])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.85, 0.98), new=TRUE) + mtext("NAO-", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.pers.colors[p,1+l,3])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.42, 0.5), new=TRUE) + mtext("Atlantic Ridge", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.pers.colors[p,1+l,2])}} + + par(fig=c(0.91, 1, 0.1, 0.9), new=TRUE) + ColorBar2(brks = my.seq, cols = pers.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_persistence_obs_target_month.png"), width=800, height=300) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("Observed regime persistence (in days/month)", cex=1.2) + + par(mar=c(0,0,4,0), fig=c(0.07, 0.22, 0.8, 0.98), new=TRUE) + mtext("NAO+", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0, 0.22, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.6, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.pers.colors[i2,1+6-j,1])}} + + par(mar=c(0,0,4,0), fig=c(0.22, 0.44, 0.8, 0.98), new=TRUE) + mtext("NAO-", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.22, 0.44, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.6, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.pers.colors[i2,1+6-j,3])}} + + par(mar=c(0,0,4,0), fig=c(0.44, 0.66, 0.8, 0.98), new=TRUE) + mtext("Blocking", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.44, 0.66, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.6, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.pers.colors[i2,1+6-j,4])}} + + par(mar=c(0,0,4,0), fig=c(0.68, 0.88, 0.8, 0.98), new=TRUE) + mtext("Atlantic Ridge", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.66, 0.88, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.6, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.pers.colors[i2,1+6-j,2])}} + + par(fig=c(0.91, 1, 0.1, 0.8), new=TRUE) + ColorBar2(brks = my.seq, cols = pers.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + par(fig=c(0.9, 1, 0.88, 0.93), new=TRUE) + mtext(side = 1, text = "days/month", line = 2, cex=1) + dev.off() + + + + + + + + + + + + + # Bias persistence summary: + png(filename=paste0(work.dir,"/",forecast.name,"_summary_bias_persistence.png"), width=550, height=400) + + # create an array similar to array.pers but with colors instead of frequencies: + diff.pers.cols <- rev(c('#b2182b','#d6604d','#f4a582','#fddbc7','#d1e5f0','#92c5de','#4393c3','#2166ac')) + my.seq <- c(NA, -1.5, -1, -0.5, 0, 0.5, 1, 1.5, NA) + + array.diff.pers.colors <- array(diff.pers.cols[8],c(12,7,4)) + array.diff.pers.colors[array.diff.pers < my.seq[8]] <- diff.pers.cols[7] + array.diff.pers.colors[array.diff.pers < my.seq[7]] <- diff.pers.cols[6] + array.diff.pers.colors[array.diff.pers < my.seq[6]] <- diff.pers.cols[5] + array.diff.pers.colors[array.diff.pers < my.seq[5]] <- diff.pers.cols[4] + array.diff.pers.colors[array.diff.pers < my.seq[4]] <- diff.pers.cols[3] + array.diff.pers.colors[array.diff.pers < my.seq[3]] <- diff.pers.cols[2] + array.diff.pers.colors[array.diff.pers < my.seq[2]] <- diff.pers.cols[1] + + par(mar=c(5,4,4,4.5)) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Forecast time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + title("Diff. between S4 and ERA-Interim regime persistence (in days/month)", cex=1.5) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + + for(p in 1:12){ + for(l in 0:6){ + polygon(c(0.5+p-1,0.5+p-1,1+p-1),c(-0.5+l,0.5+l,0+l), col=array.diff.pers.colors[p,1+l,1]) # first triangle + polygon(c(0.5+p-1,1+p-1,1.5+p-1),c(-0.5+l,0+l,-0.5+l), col=array.diff.pers.colors[p,1+l,2]) + polygon(c(1+p-1,1.5+p-1,1.5+p-1),c(l,0.5+l,-0.5+l), col=array.diff.pers.colors[p,1+l,3]) + polygon(c(0.5+p-1,1+p-1,1.5+p-1),c(0.5+l,0+l,0.5+l), col=array.diff.pers.colors[p,1+l,4]) + } + } + + par(fig=c(0.875, 1, 0.14, 0.89), new=TRUE) + ColorBar2(brks = my.seq, cols = diff.pers.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_bias_persistence_startdate.png"), width=750, height=600) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("Diff. between S4 and ERA-Interim regime persistence (in days/month)", cex=1.5) + + # NAO+ : + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.85, 0.98), new=TRUE) + mtext("NAO+", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.diff.pers.colors[p,1+l,1])}} + + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.42, 0.5), new=TRUE) + mtext("Blocking", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.diff.pers.colors[p,1+l,4])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.85, 0.98), new=TRUE) + mtext("NAO-", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.diff.pers.colors[p,1+l,3])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.42, 0.5), new=TRUE) + mtext("Atlantic Ridge", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.diff.pers.colors[p,1+l,2])}} + + par(fig=c(0.91, 1, 0.1, 0.9), new=TRUE) + ColorBar2(brks = my.seq, cols = diff.pers.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_bias_persistence_target_month.png"), width=800, height=300) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("Diff. between S4 and ERA-Interim regime persistence (in days/month)", cex=1.2) + + par(mar=c(0,0,4,0), fig=c(0.07, 0.22, 0.8, 0.98), new=TRUE) + mtext("NAO+", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0, 0.22, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.6, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.diff.pers.colors[i2,1+6-j,1])}} + + par(mar=c(0,0,4,0), fig=c(0.22, 0.44, 0.8, 0.98), new=TRUE) + mtext("NAO-", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.22, 0.44, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.6, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.diff.pers.colors[i2,1+6-j,3])}} + + par(mar=c(0,0,4,0), fig=c(0.44, 0.66, 0.8, 0.98), new=TRUE) + mtext("Blocking", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.44, 0.66, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.6, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.diff.pers.colors[i2,1+6-j,4])}} + + par(mar=c(0,0,4,0), fig=c(0.68, 0.88, 0.8, 0.98), new=TRUE) + mtext("Atlantic Ridge", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.66, 0.88, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.6, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.diff.pers.colors[i2,1+6-j,2])}} + + par(fig=c(0.91, 1, 0.1, 0.8), new=TRUE) + ColorBar2(brks = my.seq, cols = diff.pers.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + par(fig=c(0.9, 1, 0.88, 0.93), new=TRUE) + mtext(side = 1, text = "days/month", line = 2, cex=1) + dev.off() + + + + + + + + + + # St.Dev.freq ratio summary: + png(filename=paste0(work.dir,"/",forecast.name,"_summary_ratio_sd_freq.png"), width=550, height=400) + + # create an array similar to array.cor but with colors instead of corr.values: + diff.sd.freq.cols <- rev(c('#b2182b','#d6604d','#f4a582','#fddbc7','#d1e5f0','#92c5de','#4393c3','#2166ac')) + my.seq <- c(0,0.7,0.8,0.9,1.0,1.1,1.2,1.3,2) + + array.diff.sd.freq.colors <- array(diff.sd.freq.cols[8],c(12,7,4)) + array.diff.sd.freq.colors[array.diff.sd.freq < my.seq[8]] <- diff.sd.freq.cols[7] + array.diff.sd.freq.colors[array.diff.sd.freq < my.seq[7]] <- diff.sd.freq.cols[6] + array.diff.sd.freq.colors[array.diff.sd.freq < my.seq[6]] <- diff.sd.freq.cols[5] + array.diff.sd.freq.colors[array.diff.sd.freq < my.seq[5]] <- diff.sd.freq.cols[4] + array.diff.sd.freq.colors[array.diff.sd.freq < my.seq[4]] <- diff.sd.freq.cols[3] + array.diff.sd.freq.colors[array.diff.sd.freq < my.seq[3]] <- diff.sd.freq.cols[2] + array.diff.sd.freq.colors[array.diff.sd.freq < my.seq[2]] <- diff.sd.freq.cols[1] + + par(mar=c(5,4,4,4.5)) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Forecast time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + title("St.Dev. ratio between sim. and obs. average frequencies", cex=1.5) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + + for(p in 1:12){ + for(l in 0:6){ + polygon(c(0.5+p-1, 0.5+p-1, 1+p-1), c(-0.5+l, 0.5+l, 0+l), col=array.diff.sd.freq.colors[p,1+l,1]) # first triangle + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(-0.5+l, 0+l, -0.5+l), col=array.diff.sd.freq.colors[p,1+l,2]) + polygon(c(1+p-1, 1.5+p-1, 1.5+p-1), c(l, 0.5+l, -0.5+l), col=array.diff.sd.freq.colors[p,1+l,3]) + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(0.5+l, 0+l, 0.5+l), col=array.diff.sd.freq.colors[p,1+l,4]) + } + } + + par(fig=c(0.875, 1, 0.14, 0.89), new=TRUE) + ColorBar2(brks = my.seq, cols = diff.sd.freq.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + dev.off() + + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_ratio_sd_frequency_startdate.png"), width=750, height=600) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("St.Dev. ratio between sim. and obs. average frequencies", cex=1.5) + + # NAO+ : + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.85, 0.98), new=TRUE) + mtext("NAO+", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.diff.sd.freq.colors[p,1+l,1])}} + + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.42, 0.5), new=TRUE) + mtext("Blocking", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.diff.sd.freq.colors[p,1+l,4])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.85, 0.98), new=TRUE) + mtext("NAO-", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.diff.sd.freq.colors[p,1+l,3])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.42, 0.5), new=TRUE) + mtext("Atlantic Ridge", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.diff.sd.freq.colors[p,1+l,2])}} + + par(fig=c(0.91, 1, 0.1, 0.9), new=TRUE) + ColorBar2(brks = my.seq, cols = diff.sd.freq.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_ratio_sd_frequency_target_month.png"), width=800, height=300) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("St.Dev. ratio between sim. and obs. average frequencies", cex=1.2) + + par(mar=c(0,0,4,0), fig=c(0.07, 0.22, 0.8, 0.98), new=TRUE) + mtext("NAO+", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0, 0.22, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.6, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.diff.sd.freq.colors[i2,1+6-j,1])}} + + par(mar=c(0,0,4,0), fig=c(0.22, 0.44, 0.8, 0.98), new=TRUE) + mtext("NAO-", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.22, 0.44, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.6, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.diff.sd.freq.colors[i2,1+6-j,3])}} + + par(mar=c(0,0,4,0), fig=c(0.44, 0.66, 0.8, 0.98), new=TRUE) + mtext("Blocking", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.44, 0.66, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.6, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.diff.sd.freq.colors[i2,1+6-j,4])}} + + par(mar=c(0,0,4,0), fig=c(0.68, 0.88, 0.8, 0.98), new=TRUE) + mtext("Atlantic Ridge", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.66, 0.88, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.6, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.diff.sd.freq.colors[i2,1+6-j,2])}} + + par(fig=c(0.91, 1, 0.1, 0.8), new=TRUE) + ColorBar2(brks = my.seq, cols = diff.sd.freq.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + + + + + + + + + + + + + # Impact summary: + png(filename=paste0(work.dir,"/",forecast.name,"_summary_impact_",var.name[var.num],".png"), width=550, height=400) + + # create an array similar to array.pers but with colors instead of frequencies: + imp.cols <- rev(c('#b2182b','#d6604d','#f4a582','#fddbc7','#d1e5f0','#92c5de','#4393c3','#2166ac')) + my.seq <- c(-1,0,0.4,0.5,0.6,0.7,0.8,0.9,1) + + array.imp.colors <- array(imp.cols[8],c(12,7,4)) + array.imp.colors[array.imp < my.seq[8]] <- imp.cols[7] + array.imp.colors[array.imp < my.seq[7]] <- imp.cols[6] + array.imp.colors[array.imp < my.seq[6]] <- imp.cols[5] + array.imp.colors[array.imp < my.seq[5]] <- imp.cols[4] + array.imp.colors[array.imp < my.seq[4]] <- imp.cols[3] + array.imp.colors[array.imp < my.seq[3]] <- imp.cols[2] + array.imp.colors[array.imp < my.seq[2]] <- imp.cols[1] + + par(mar=c(5,4,4,4.5)) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Forecast time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + title(paste0("S4 spatial correlation of impact on ",var.name[var.num]), cex=1.5) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + + for(p in 1:12){ + for(l in 0:6){ + polygon(c(0.5+p-1,0.5+p-1,1+p-1),c(-0.5+l,0.5+l,0+l), col=array.imp.colors[p,1+l,1]) # first triangle + polygon(c(0.5+p-1,1+p-1,1.5+p-1),c(-0.5+l,0+l,-0.5+l), col=array.imp.colors[p,1+l,2]) + polygon(c(1+p-1,1.5+p-1,1.5+p-1),c(l,0.5+l,-0.5+l), col=array.imp.colors[p,1+l,3]) + polygon(c(0.5+p-1,1+p-1,1.5+p-1),c(0.5+l,0+l,0.5+l), col=array.imp.colors[p,1+l,4]) + } + } + + par(fig=c(0.875, 1, 0.14, 0.89), new=TRUE) + ColorBar2(brks = my.seq, cols = imp.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_impact_",var.name[var.num],"_startdate.png"), width=750, height=600) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("S4 spatial correlation of impact on ",var.name[var.num], cex=1.5) + + # NAO+ : + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.85, 0.98), new=TRUE) + mtext("NAO+", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.imp.colors[p,1+l,1])}} + + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.42, 0.5), new=TRUE) + mtext("Blocking", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.imp.colors[p,1+l,4])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.85, 0.98), new=TRUE) + mtext("NAO-", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.imp.colors[p,1+l,3])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.42, 0.5), new=TRUE) + mtext("Atlantic Ridge", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.imp.colors[p,1+l,2])}} + + par(fig=c(0.91, 1, 0.1, 0.9), new=TRUE) + ColorBar2(brks = my.seq, cols = imp.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_impact_",var.name[var.num],"_target_month.png"), width=800, height=300) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("S4 spatial correlation of impact on ",var.name[var.num], cex=1.2) + + par(mar=c(0,0,4,0), fig=c(0.07, 0.22, 0.8, 0.98), new=TRUE) + mtext("NAO+", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0, 0.22, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.6, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.imp.colors[i2,1+6-j,1])}} + + par(mar=c(0,0,4,0), fig=c(0.22, 0.44, 0.8, 0.98), new=TRUE) + mtext("NAO-", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.22, 0.44, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.6, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.imp.colors[i2,1+6-j,3])}} + + par(mar=c(0,0,4,0), fig=c(0.44, 0.66, 0.8, 0.98), new=TRUE) + mtext("Blocking", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.44, 0.66, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.6, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.imp.colors[i2,1+6-j,4])}} + + par(mar=c(0,0,4,0), fig=c(0.68, 0.88, 0.8, 0.98), new=TRUE) + mtext("Atlantic Ridge", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.66, 0.88, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.6, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.imp.colors[i2,1+6-j,2])}} + + par(fig=c(0.91, 1, 0.1, 0.8), new=TRUE) + ColorBar2(brks = my.seq, cols = imp.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + + + + + + + # Simulated Transition probability from NAO+ to X summary: + png(filename=paste0(work.dir,"/",forecast.name,"_summary_transition_prob_NAO+.png"), width=550, height=400) + + # create an array similar to array.cor but with colors instead of corr.values: + trans.cols <- rev(c('#b2182b','#d6604d','#f4a582','#fddbc7','#d1e5f0','#92c5de','#4393c3','#2166ac')) + my.seq <- c(0,10,20,30,40,50,60,70,100) + + array.trans.sim1.colors <- array(trans.cols[8],c(12,7,4)) + array.trans.sim1.colors[array.trans.sim1 < my.seq[8]] <- trans.cols[7] + array.trans.sim1.colors[array.trans.sim1 < my.seq[7]] <- trans.cols[6] + array.trans.sim1.colors[array.trans.sim1 < my.seq[6]] <- trans.cols[5] + array.trans.sim1.colors[array.trans.sim1 < my.seq[5]] <- trans.cols[4] + array.trans.sim1.colors[array.trans.sim1 < my.seq[4]] <- trans.cols[3] + array.trans.sim1.colors[array.trans.sim1 < my.seq[3]] <- trans.cols[2] + array.trans.sim1.colors[array.trans.sim1 < my.seq[2]] <- trans.cols[1] + array.trans.sim1.colors[is.na(array.trans.sim1)] <- "white" + + par(mar=c(5,4,4,4.5)) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Forecast time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + title("% ECMWF-S4 transition from NAO+ regime", cex=1.5) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + + for(p in 1:12){ + for(l in 0:6){ + polygon(c(0.5+p-1, 0.5+p-1, 1+p-1), c(-0.5+l, 0.5+l, 0+l), col=array.trans.sim1.colors[p,1+l,1]) # first triangle + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(-0.5+l, 0+l, -0.5+l), col=array.trans.sim1.colors[p,1+l,2]) + polygon(c(1+p-1, 1.5+p-1, 1.5+p-1), c(l, 0.5+l, -0.5+l), col=array.trans.sim1.colors[p,1+l,3]) + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(0.5+l, 0+l, 0.5+l), col=array.trans.sim1.colors[p,1+l,4]) + } + } + + par(fig=c(0.875, 1, 0.14, 0.89), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_transition_prob_NAO+_startdate.png"), width=750, height=600) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("% Transition from NAO+ regime", cex=1.5) + mtext("ECMWF-S4 / NAO+ Transition Probability \nJanuary to December / 1994-2013", cex=1.5) + + # NAO+ : + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.85, 0.98), new=TRUE) + mtext("NAO+", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.sim1.colors[p,1+l,1])}} + + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.42, 0.5), new=TRUE) + mtext("Blocking", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.sim1.colors[p,1+l,4])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.85, 0.98), new=TRUE) + mtext("NAO-", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.sim1.colors[p,1+l,3])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.42, 0.5), new=TRUE) + mtext("Atlantic Ridge", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.sim1.colors[p,1+l,2])}} + + par(fig=c(0.91, 1, 0.1, 0.9), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_transition_prob_NAO+_target_month.png"), width=750, height=350) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 0.95), new=TRUE) + mtext("% Transition from NAO+ regime", cex=1.2) + + par(mar=c(0,0,4,0), fig=c(0.10, 0.28, 0.8, 0.95), new=TRUE) + mtext("NAO-", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0, 0.28, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.8, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.sim1.colors[i2,1+6-j,3])}} + + par(mar=c(0,0,4,0), fig=c(0.30, 0.58, 0.8, 0.95), new=TRUE) + mtext("Blocking", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.30, 0.58, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.8, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.sim1.colors[i2,1+6-j,4])}} + + par(mar=c(0,0,4,0), fig=c(0.60, 0.88, 0.8, 0.95), new=TRUE) + mtext("Atlantic Ridge", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.60, 0.88, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.8, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.sim1.colors[i2,1+6-j,2])}} + + par(fig=c(0.91, 1, 0.1, 0.8), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + par(fig=c(0.91, 1, 0.88, 0.93), new=TRUE) + mtext(side = 1, text = "%", line = 2, cex=1) + + dev.off() + + + + + + + # Observed Transition probability from NAO+ to X summary: + png(filename=paste0(work.dir,"/",forecast.name,"_summary_transition_prob_NAO+_obs.png"), width=550, height=400) + + # create an array similar to array.cor but with colors instead of corr.values: + trans.cols <- rev(c('#b2182b','#d6604d','#f4a582','#fddbc7','#d1e5f0','#92c5de','#4393c3','#2166ac')) + my.seq <- c(0,10,20,30,40,50,60,70,100) + + array.trans.obs1.colors <- array(trans.cols[8],c(12,7,4)) + array.trans.obs1.colors[array.trans.obs1 < my.seq[8]] <- trans.cols[7] + array.trans.obs1.colors[array.trans.obs1 < my.seq[7]] <- trans.cols[6] + array.trans.obs1.colors[array.trans.obs1 < my.seq[6]] <- trans.cols[5] + array.trans.obs1.colors[array.trans.obs1 < my.seq[5]] <- trans.cols[4] + array.trans.obs1.colors[array.trans.obs1 < my.seq[4]] <- trans.cols[3] + array.trans.obs1.colors[array.trans.obs1 < my.seq[3]] <- trans.cols[2] + array.trans.obs1.colors[array.trans.obs1 < my.seq[2]] <- trans.cols[1] + array.trans.obs1.colors[is.na(array.trans.obs1)] <- "white" + + par(mar=c(5,4,4,4.5)) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Forecast time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + title("% Observed transition from NAO+ regime", cex=1.5) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + + for(p in 1:12){ + for(l in 0:6){ + polygon(c(0.5+p-1, 0.5+p-1, 1+p-1), c(-0.5+l, 0.5+l, 0+l), col=array.trans.obs1.colors[p,1+l,1]) # first triangle + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(-0.5+l, 0+l, -0.5+l), col=array.trans.obs1.colors[p,1+l,2]) + polygon(c(1+p-1, 1.5+p-1, 1.5+p-1), c(l, 0.5+l, -0.5+l), col=array.trans.obs1.colors[p,1+l,3]) + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(0.5+l, 0+l, 0.5+l), col=array.trans.obs1.colors[p,1+l,4]) + } + } + + par(fig=c(0.875, 1, 0.14, 0.89), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_transition_prob_NAO+_obs_startdate.png"), width=750, height=600) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("% Observed transition from NAO+ regime", cex=1.5) + + # NAO+ : + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.85, 0.98), new=TRUE) + mtext("NAO+", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.obs1.colors[p,1+l,1])}} + + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.42, 0.5), new=TRUE) + mtext("Blocking", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.obs1.colors[p,1+l,4])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.85, 0.98), new=TRUE) + mtext("NAO-", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.obs1.colors[p,1+l,3])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.42, 0.5), new=TRUE) + mtext("Atlantic Ridge", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.obs1.colors[p,1+l,2])}} + + par(fig=c(0.91, 1, 0.1, 0.9), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_transition_prob_NAO+_obs_target_month.png"), width=750, height=350) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 0.95), new=TRUE) + mtext("% Observed transition from NAO+ regime", cex=1.2) + + par(mar=c(0,0,4,0), fig=c(0.10, 0.28, 0.8, 0.95), new=TRUE) + mtext("NAO-", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0, 0.28, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.8, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.obs1.colors[i2,1+6-j,3])}} + + par(mar=c(0,0,4,0), fig=c(0.30, 0.58, 0.8, 0.95), new=TRUE) + mtext("Blocking", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.30, 0.58, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.8, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.obs1.colors[i2,1+6-j,4])}} + + par(mar=c(0,0,4,0), fig=c(0.60, 0.88, 0.8, 0.95), new=TRUE) + mtext("Atlantic Ridge", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.60, 0.88, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.8, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.obs1.colors[i2,1+6-j,2])}} + + par(fig=c(0.91, 1, 0.1, 0.8), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + par(fig=c(0.91, 1, 0.88, 0.93), new=TRUE) + mtext(side = 1, text = "%", line = 2, cex=1) + + dev.off() + + + + + + + + + + + + # Simulated transition probability from NAO- to X summary: + png(filename=paste0(work.dir,"/",forecast.name,"_summary_transition_prob_NAO-.png"), width=550, height=400) + + # create an array similar to array.cor but with colors instead of corr.values: + trans.cols <- rev(c('#b2182b','#d6604d','#f4a582','#fddbc7','#d1e5f0','#92c5de','#4393c3','#2166ac')) + my.seq <- c(0,10,20,30,40,50,60,70,100) + + array.trans.sim2.colors <- array(trans.cols[8],c(12,7,4)) + array.trans.sim2.colors[array.trans.sim2 < my.seq[8]] <- trans.cols[7] + array.trans.sim2.colors[array.trans.sim2 < my.seq[7]] <- trans.cols[6] + array.trans.sim2.colors[array.trans.sim2 < my.seq[6]] <- trans.cols[5] + array.trans.sim2.colors[array.trans.sim2 < my.seq[5]] <- trans.cols[4] + array.trans.sim2.colors[array.trans.sim2 < my.seq[4]] <- trans.cols[3] + array.trans.sim2.colors[array.trans.sim2 < my.seq[3]] <- trans.cols[2] + array.trans.sim2.colors[array.trans.sim2 < my.seq[2]] <- trans.cols[1] + array.trans.sim2.colors[is.na(array.trans.sim2)] <- "white" + + par(mar=c(5,4,4,4.5)) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Forecast time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + title("% ECMWF-S4 transition from NAO- regime ", cex=1.5) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + + for(p in 1:12){ + for(l in 0:6){ + polygon(c(0.5+p-1, 0.5+p-1, 1+p-1), c(-0.5+l, 0.5+l, 0+l), col=array.trans.sim2.colors[p,1+l,1]) # first triangle + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(-0.5+l, 0+l, -0.5+l), col=array.trans.sim2.colors[p,1+l,2]) + polygon(c(1+p-1, 1.5+p-1, 1.5+p-1), c(l, 0.5+l, -0.5+l), col=array.trans.sim2.colors[p,1+l,3]) + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(0.5+l, 0+l, 0.5+l), col=array.trans.sim2.colors[p,1+l,4]) + } + } + + par(fig=c(0.875, 1, 0.14, 0.89), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_transition_prob_NAO-_startdate.png"), width=750, height=600) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("% ECMWF-S4 transition from NAO- regime", cex=1.5) + + # NAO+ : + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.85, 0.98), new=TRUE) + mtext("NAO+", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.sim2.colors[p,1+l,1])}} + + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.42, 0.5), new=TRUE) + mtext("Blocking", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.sim2.colors[p,1+l,4])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.85, 0.98), new=TRUE) + mtext("NAO-", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.sim2.colors[p,1+l,3])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.42, 0.5), new=TRUE) + mtext("Atlantic Ridge", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.sim2.colors[p,1+l,2])}} + + par(fig=c(0.91, 1, 0.1, 0.9), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_transition_prob_NAO-_target_month.png"), width=750, height=350) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 0.95), new=TRUE) + mtext("% ECMWF-S4 transition from NAO- regime", cex=1.2) + + par(mar=c(0,0,4,0), fig=c(0.1, 0.28, 0.8, 0.95), new=TRUE) + mtext("NAO+", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0, 0.28, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.8, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.sim2.colors[i2,1+6-j,1])}} + + par(mar=c(0,0,4,0), fig=c(0.30, 0.58, 0.8, 0.95), new=TRUE) + mtext("Blocking", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.30, 0.58, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.8, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.sim2.colors[i2,1+6-j,4])}} + + par(mar=c(0,0,4,0), fig=c(0.60, 0.88, 0.8, 0.95), new=TRUE) + mtext("Atlantic Ridge", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.60, 0.88, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.8, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.sim2.colors[i2,1+6-j,2])}} + + par(fig=c(0.91, 1, 0.1, 0.8), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + par(fig=c(0.91, 1, 0.88, 0.93), new=TRUE) + mtext(side = 1, text = "%", line = 2, cex=1) + + dev.off() + + + + + + + # Observed transition probability from NAO- to X summary: + png(filename=paste0(work.dir,"/",forecast.name,"_summary_transition_prob_NAO-_obs.png"), width=550, height=400) + + # create an array similar to array.cor but with colors instead of corr.values: + trans.cols <- rev(c('#b2182b','#d6604d','#f4a582','#fddbc7','#d1e5f0','#92c5de','#4393c3','#2166ac')) + my.seq <- c(0,10,20,30,40,50,60,70,100) + + array.trans.obs2.colors <- array(trans.cols[8],c(12,7,4)) + array.trans.obs2.colors[array.trans.obs2 < my.seq[8]] <- trans.cols[7] + array.trans.obs2.colors[array.trans.obs2 < my.seq[7]] <- trans.cols[6] + array.trans.obs2.colors[array.trans.obs2 < my.seq[6]] <- trans.cols[5] + array.trans.obs2.colors[array.trans.obs2 < my.seq[5]] <- trans.cols[4] + array.trans.obs2.colors[array.trans.obs2 < my.seq[4]] <- trans.cols[3] + array.trans.obs2.colors[array.trans.obs2 < my.seq[3]] <- trans.cols[2] + array.trans.obs2.colors[array.trans.obs2 < my.seq[2]] <- trans.cols[1] + array.trans.obs2.colors[is.na(array.trans.obs2)] <- "white" + + par(mar=c(5,4,4,4.5)) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Forecast time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + title("% Observed transition from NAO- regime ", cex=1.5) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + + for(p in 1:12){ + for(l in 0:6){ + polygon(c(0.5+p-1, 0.5+p-1, 1+p-1), c(-0.5+l, 0.5+l, 0+l), col=array.trans.obs2.colors[p,1+l,1]) # first triangle + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(-0.5+l, 0+l, -0.5+l), col=array.trans.obs2.colors[p,1+l,2]) + polygon(c(1+p-1, 1.5+p-1, 1.5+p-1), c(l, 0.5+l, -0.5+l), col=array.trans.obs2.colors[p,1+l,3]) + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(0.5+l, 0+l, 0.5+l), col=array.trans.obs2.colors[p,1+l,4]) + } + } + + par(fig=c(0.875, 1, 0.14, 0.89), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_transition_prob_NAO-_obs_startdate.png"), width=750, height=600) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("% Observed transition from NAO- regime", cex=1.5) + + # NAO+ : + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.85, 0.98), new=TRUE) + mtext("NAO+", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.obs2.colors[p,1+l,1])}} + + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.42, 0.5), new=TRUE) + mtext("Blocking", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.obs2.colors[p,1+l,4])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.85, 0.98), new=TRUE) + mtext("NAO-", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.obs2.colors[p,1+l,3])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.42, 0.5), new=TRUE) + mtext("Atlantic Ridge", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.obs2.colors[p,1+l,2])}} + + par(fig=c(0.91, 1, 0.1, 0.9), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_transition_prob_NAO-_obs_target_month.png"), width=750, height=350) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 0.95), new=TRUE) + mtext("% Observed transition from NAO- regime", cex=1.2) + + par(mar=c(0,0,4,0), fig=c(0.07, 0.28, 0.8, 0.95), new=TRUE) + mtext("NAO+", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0, 0.28, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.8, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.obs2.colors[i2,1+6-j,1])}} + + par(mar=c(0,0,4,0), fig=c(0.30, 0.58, 0.8, 0.95), new=TRUE) + mtext("Blocking", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.30, 0.58, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.8, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.obs2.colors[i2,1+6-j,4])}} + + par(mar=c(0,0,4,0), fig=c(0.58, 0.88, 0.8, 0.95), new=TRUE) + mtext("Atlantic Ridge", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.58, 0.88, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.8, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.obs2.colors[i2,1+6-j,2])}} + + par(fig=c(0.91, 1, 0.1, 0.8), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + par(fig=c(0.91, 1, 0.88, 0.93), new=TRUE) + mtext(side = 1, text = "%", line = 2, cex=1) + + dev.off() + + + + + + + + + + + # Simulated transition probability from blocking to X summary: + png(filename=paste0(work.dir,"/",forecast.name,"_summary_transition_prob_blocking.png"), width=550, height=400) + + # create an array similar to array.cor but with colors instead of corr.values: + trans.cols <- rev(c('#b2182b','#d6604d','#f4a582','#fddbc7','#d1e5f0','#92c5de','#4393c3','#2166ac')) + my.seq <- c(0,10,20,30,40,50,60,70,100) + + array.trans.sim3.colors <- array(trans.cols[8],c(12,7,4)) + array.trans.sim3.colors[array.trans.sim3 < my.seq[8]] <- trans.cols[7] + array.trans.sim3.colors[array.trans.sim3 < my.seq[7]] <- trans.cols[6] + array.trans.sim3.colors[array.trans.sim3 < my.seq[6]] <- trans.cols[5] + array.trans.sim3.colors[array.trans.sim3 < my.seq[5]] <- trans.cols[4] + array.trans.sim3.colors[array.trans.sim3 < my.seq[4]] <- trans.cols[3] + array.trans.sim3.colors[array.trans.sim3 < my.seq[3]] <- trans.cols[2] + array.trans.sim3.colors[array.trans.sim3 < my.seq[2]] <- trans.cols[1] + array.trans.sim3.colors[is.na(array.trans.sim3)] <- "white" + + par(mar=c(5,4,4,4.5)) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Forecast time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + title("% ECMWF-S4 transition from blocking regime", cex=1.5) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + + for(p in 1:12){ + for(l in 0:6){ + polygon(c(0.5+p-1, 0.5+p-1, 1+p-1), c(-0.5+l, 0.5+l, 0+l), col=array.trans.sim3.colors[p,1+l,1]) # first triangle + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(-0.5+l, 0+l, -0.5+l), col=array.trans.sim3.colors[p,1+l,2]) + polygon(c(1+p-1, 1.5+p-1, 1.5+p-1), c(l, 0.5+l, -0.5+l), col=array.trans.sim3.colors[p,1+l,3]) + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(0.5+l, 0+l, 0.5+l), col=array.trans.sim3.colors[p,1+l,4]) + } + } + + par(fig=c(0.875, 1, 0.14, 0.89), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_transition_prob_blocking_startdate.png"), width=750, height=600) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("% ECMWF-S4 transition from blocking regime", cex=1.5) + + # NAO+ : + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.85, 0.98), new=TRUE) + mtext("NAO+", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.sim3.colors[p,1+l,1])}} + + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.42, 0.5), new=TRUE) + mtext("Blocking", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.sim3.colors[p,1+l,4])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.85, 0.98), new=TRUE) + mtext("NAO-", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.sim3.colors[p,1+l,3])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.42, 0.5), new=TRUE) + mtext("Atlantic Ridge", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.sim3.colors[p,1+l,2])}} + + par(fig=c(0.91, 1, 0.1, 0.9), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_transition_prob_blocking_target_month.png"), width=750, height=350) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 0.95), new=TRUE) + mtext("% ECMWF-S4 transition from blocking regime", cex=1.2) + + par(mar=c(0,0,4,0), fig=c(0.10, 0.28, 0.8, 0.95), new=TRUE) + mtext("NAO+", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0, 0.28, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.8, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.sim3.colors[i2,1+6-j,1])}} + + par(mar=c(0,0,4,0), fig=c(0.30, 0.58, 0.8, 0.95), new=TRUE) + mtext("NAO-", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.30, 0.58, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.8, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.sim3.colors[i2,1+6-j,3])}} + + par(mar=c(0,0,4,0), fig=c(0.60, 0.88, 0.8, 0.95), new=TRUE) + mtext("Atlantic Ridge", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.60, 0.88, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.8, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.sim3.colors[i2,1+6-j,2])}} + + par(fig=c(0.91, 1, 0.1, 0.8), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + par(fig=c(0.91, 1, 0.88, 0.93), new=TRUE) + mtext(side = 1, text = "%", line = 2, cex=1) + + dev.off() + + + + + + + + + # Observed transition probability from blocking to X summary: + png(filename=paste0(work.dir,"/",forecast.name,"_summary_transition_prob_blocking_obs.png"), width=550, height=400) + + # create an array similar to array.cor but with colors instead of corr.values: + trans.cols <- rev(c('#b2182b','#d6604d','#f4a582','#fddbc7','#d1e5f0','#92c5de','#4393c3','#2166ac')) + my.seq <- c(0,10,20,30,40,50,60,70,100) + + array.trans.obs3.colors <- array(trans.cols[8],c(12,7,4)) + array.trans.obs3.colors[array.trans.obs3 < my.seq[8]] <- trans.cols[7] + array.trans.obs3.colors[array.trans.obs3 < my.seq[7]] <- trans.cols[6] + array.trans.obs3.colors[array.trans.obs3 < my.seq[6]] <- trans.cols[5] + array.trans.obs3.colors[array.trans.obs3 < my.seq[5]] <- trans.cols[4] + array.trans.obs3.colors[array.trans.obs3 < my.seq[4]] <- trans.cols[3] + array.trans.obs3.colors[array.trans.obs3 < my.seq[3]] <- trans.cols[2] + array.trans.obs3.colors[array.trans.obs3 < my.seq[2]] <- trans.cols[1] + array.trans.obs3.colors[is.na(array.trans.obs3)] <- "white" + + par(mar=c(5,4,4,4.5)) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Forecast time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + title("% Observed transition from blocking regime", cex=1.5) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + + for(p in 1:12){ + for(l in 0:6){ + polygon(c(0.5+p-1, 0.5+p-1, 1+p-1), c(-0.5+l, 0.5+l, 0+l), col=array.trans.obs3.colors[p,1+l,1]) # first triangle + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(-0.5+l, 0+l, -0.5+l), col=array.trans.obs3.colors[p,1+l,2]) + polygon(c(1+p-1, 1.5+p-1, 1.5+p-1), c(l, 0.5+l, -0.5+l), col=array.trans.obs3.colors[p,1+l,3]) + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(0.5+l, 0+l, 0.5+l), col=array.trans.obs3.colors[p,1+l,4]) + } + } + + par(fig=c(0.875, 1, 0.14, 0.89), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_transition_prob_blocking_obs_startdate.png"), width=750, height=600) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("% Observed transition from blocking regime", cex=1.5) + + # NAO+ : + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.85, 0.98), new=TRUE) + mtext("NAO+", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.obs3.colors[p,1+l,1])}} + + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.42, 0.5), new=TRUE) + mtext("Blocking", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.obs3.colors[p,1+l,4])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.85, 0.98), new=TRUE) + mtext("NAO-", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.obs3.colors[p,1+l,3])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.42, 0.5), new=TRUE) + mtext("Atlantic Ridge", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.obs3.colors[p,1+l,2])}} + + par(fig=c(0.91, 1, 0.1, 0.9), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_transition_prob_blocking_obs_target_month.png"), width=750, height=350) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 0.95), new=TRUE) + mtext("% Observed transition from blocking regime", cex=1.2) + + par(mar=c(0,0,4,0), fig=c(0.10, 0.28, 0.8, 0.95), new=TRUE) + mtext("NAO+", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0, 0.28, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.8, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.obs3.colors[i2,1+6-j,1])}} + + par(mar=c(0,0,4,0), fig=c(0.30, 0.58, 0.8, 0.95), new=TRUE) + mtext("NAO-", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.30, 0.58, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.8, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.obs3.colors[i2,1+6-j,3])}} + + par(mar=c(0,0,4,0), fig=c(0.60, 0.88, 0.8, 0.95), new=TRUE) + mtext("Atlantic Ridge", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.60, 0.88, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.8, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.obs3.colors[i2,1+6-j,2])}} + + par(fig=c(0.91, 1, 0.1, 0.8), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + par(fig=c(0.91, 1, 0.88, 0.93), new=TRUE) + mtext(side = 1, text = "%", line = 2, cex=1) + + dev.off() + + + + + + + + + + + + + + + + + + # Simulated transition probability from Atlantic ridge to X summary: + png(filename=paste0(work.dir,"/",forecast.name,"_summary_transition_prob_Atlantic_ridge.png"), width=550, height=400) + + # create an array similar to array.cor but with colors instead of corr.values: + trans.cols <- rev(c('#b2182b','#d6604d','#f4a582','#fddbc7','#d1e5f0','#92c5de','#4393c3','#2166ac')) + my.seq <- c(0,10,20,30,40,50,60,70,100) + + array.trans.sim4.colors <- array(trans.cols[8],c(12,7,4)) + array.trans.sim4.colors[array.trans.sim4 < my.seq[8]] <- trans.cols[7] + array.trans.sim4.colors[array.trans.sim4 < my.seq[7]] <- trans.cols[6] + array.trans.sim4.colors[array.trans.sim4 < my.seq[6]] <- trans.cols[5] + array.trans.sim4.colors[array.trans.sim4 < my.seq[5]] <- trans.cols[4] + array.trans.sim4.colors[array.trans.sim4 < my.seq[4]] <- trans.cols[3] + array.trans.sim4.colors[array.trans.sim4 < my.seq[3]] <- trans.cols[2] + array.trans.sim4.colors[array.trans.sim4 < my.seq[2]] <- trans.cols[1] + array.trans.sim4.colors[is.na(array.trans.sim4)] <- "white" + + par(mar=c(5,4,4,4.5)) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Forecast time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + title("% ECMWF-S4 transition from Atlantic ridge regime ", cex=1.5) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + + for(p in 1:12){ + for(l in 0:6){ + polygon(c(0.5+p-1, 0.5+p-1, 1+p-1), c(-0.5+l, 0.5+l, 0+l), col=array.trans.sim4.colors[p,1+l,1]) # first triangle + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(-0.5+l, 0+l, -0.5+l), col=array.trans.sim4.colors[p,1+l,2]) + polygon(c(1+p-1, 1.5+p-1, 1.5+p-1), c(l, 0.5+l, -0.5+l), col=array.trans.sim4.colors[p,1+l,3]) + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(0.5+l, 0+l, 0.5+l), col=array.trans.sim4.colors[p,1+l,4]) + } + } + + par(fig=c(0.875, 1, 0.14, 0.89), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_transition_prob_Atlantic_ridge_startdate.png"), width=750, height=600) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("% ECMWF-S4 transition from Atlantic Ridge regime", cex=1.5) + + # NAO+ : + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.85, 0.98), new=TRUE) + mtext("NAO+", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.sim4.colors[p,1+l,1])}} + + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.42, 0.5), new=TRUE) + mtext("Blocking", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.sim4.colors[p,1+l,4])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.85, 0.98), new=TRUE) + mtext("NAO-", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.sim4.colors[p,1+l,3])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.42, 0.5), new=TRUE) + mtext("Atlantic Ridge", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.sim4.colors[p,1+l,2])}} + + par(fig=c(0.91, 1, 0.1, 0.9), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_transition_prob_Atlantic_ridge_target_month.png"), width=750, height=350) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 0.95), new=TRUE) + mtext("% ECMWF-S4 transition from Atlantic Ridge regime", cex=1.2) + + par(mar=c(0,0,4,0), fig=c(0.1, 0.28, 0.8, 0.95), new=TRUE) + mtext("NAO+", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0, 0.28, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.8, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.sim4.colors[i2,1+6-j,1])}} + + par(mar=c(0,0,4,0), fig=c(0.30, 0.58, 0.8, 0.95), new=TRUE) + mtext("NAO-", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.30, 0.58, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.8, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.sim4.colors[i2,1+6-j,3])}} + + par(mar=c(0,0,4,0), fig=c(0.60, 0.88, 0.8, 0.95), new=TRUE) + mtext("Blocking", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.60, 0.88, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.8, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.sim4.colors[i2,1+6-j,4])}} + + par(fig=c(0.91, 1, 0.1, 0.8), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + par(fig=c(0.91, 1, 0.88, 0.93), new=TRUE) + mtext(side = 1, text = "%", line = 2, cex=1) + + dev.off() + + + + + + + + + + # Observed transition probability from Atlantic ridge to X summary: + png(filename=paste0(work.dir,"/",forecast.name,"_summary_transition_prob_Atlantic_ridge_obs.png"), width=550, height=400) + + # create an array obsilar to array.cor but with colors instead of corr.values: + trans.cols <- rev(c('#b2182b','#d6604d','#f4a582','#fddbc7','#d1e5f0','#92c5de','#4393c3','#2166ac')) + my.seq <- c(0,10,20,30,40,50,60,70,100) + + array.trans.obs4.colors <- array(trans.cols[8],c(12,7,4)) + array.trans.obs4.colors[array.trans.obs4 < my.seq[8]] <- trans.cols[7] + array.trans.obs4.colors[array.trans.obs4 < my.seq[7]] <- trans.cols[6] + array.trans.obs4.colors[array.trans.obs4 < my.seq[6]] <- trans.cols[5] + array.trans.obs4.colors[array.trans.obs4 < my.seq[5]] <- trans.cols[4] + array.trans.obs4.colors[array.trans.obs4 < my.seq[4]] <- trans.cols[3] + array.trans.obs4.colors[array.trans.obs4 < my.seq[3]] <- trans.cols[2] + array.trans.obs4.colors[array.trans.obs4 < my.seq[2]] <- trans.cols[1] + array.trans.obs4.colors[is.na(array.trans.obs4)] <- "white" + + par(mar=c(5,4,4,4.5)) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Forecast time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + title("% Observed transition from Atlantic ridge regime ", cex=1.5) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + + for(p in 1:12){ + for(l in 0:6){ + polygon(c(0.5+p-1, 0.5+p-1, 1+p-1), c(-0.5+l, 0.5+l, 0+l), col=array.trans.obs4.colors[p,1+l,1]) # first triangle + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(-0.5+l, 0+l, -0.5+l), col=array.trans.obs4.colors[p,1+l,2]) + polygon(c(1+p-1, 1.5+p-1, 1.5+p-1), c(l, 0.5+l, -0.5+l), col=array.trans.obs4.colors[p,1+l,3]) + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(0.5+l, 0+l, 0.5+l), col=array.trans.obs4.colors[p,1+l,4]) + } + } + + par(fig=c(0.875, 1, 0.14, 0.89), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_transition_prob_Atlantic_ridge_obs_startdate.png"), width=750, height=600) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("% Observed transition from Atlantic Ridge regime", cex=1.5) + + # NAO+ : + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.85, 0.98), new=TRUE) + mtext("NAO+", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.obs4.colors[p,1+l,1])}} + + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.42, 0.5), new=TRUE) + mtext("Blocking", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.obs4.colors[p,1+l,4])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.85, 0.98), new=TRUE) + mtext("NAO-", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.obs4.colors[p,1+l,3])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.42, 0.5), new=TRUE) + mtext("Atlantic Ridge", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.obs4.colors[p,1+l,2])}} + + par(fig=c(0.91, 1, 0.1, 0.9), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_transition_prob_Atlantic_ridge_obs_target_month.png"), width=750, height=350) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 0.95), new=TRUE) + mtext("% Observed transition from Atlantic Ridge regime", cex=1.2) + + par(mar=c(0,0,4,0), fig=c(0.1, 0.28, 0.8, 0.95), new=TRUE) + mtext("NAO+", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0, 0.28, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.8, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.obs4.colors[i2,1+6-j,1])}} + + par(mar=c(0,0,4,0), fig=c(0.30, 0.58, 0.8, 0.95), new=TRUE) + mtext("NAO-", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.30, 0.58, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.8, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.obs4.colors[i2,1+6-j,3])}} + + par(mar=c(0,0,4,0), fig=c(0.60, 0.88, 0.8, 0.95), new=TRUE) + mtext("Blocking", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.60, 0.88, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.8, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.obs4.colors[i2,1+6-j,4])}} + + par(fig=c(0.91, 1, 0.1, 0.8), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + par(fig=c(0.91, 1, 0.88, 0.93), new=TRUE) + mtext(side = 1, text = "%", line = 2, cex=1) + + dev.off() + + + + + + + + + + + + + + + # Bias of the transition probability from NAO+ to X summary: + png(filename=paste0(work.dir,"/",forecast.name,"_summary_bias_transition_prob_NAO+.png"), width=550, height=400) + + # create an array similar to array.cor but with colors instead of corr.values: + trans.cols <- rev(c('#b2182b','#d6604d','#f4a582','#fddbc7','#d1e5f0','#92c5de','#4393c3','#2166ac')) + my.seq <- c(-20,-10,-5,-2,0,2,5,10,20) + + array.trans.diff1.colors <- array(trans.cols[8],c(12,7,4)) + array.trans.diff1.colors[array.trans.diff1 < my.seq[8]] <- trans.cols[7] + array.trans.diff1.colors[array.trans.diff1 < my.seq[7]] <- trans.cols[6] + array.trans.diff1.colors[array.trans.diff1 < my.seq[6]] <- trans.cols[5] + array.trans.diff1.colors[array.trans.diff1 < my.seq[5]] <- trans.cols[4] + array.trans.diff1.colors[array.trans.diff1 < my.seq[4]] <- trans.cols[3] + array.trans.diff1.colors[array.trans.diff1 < my.seq[3]] <- trans.cols[2] + array.trans.diff1.colors[array.trans.diff1 < my.seq[2]] <- trans.cols[1] + array.trans.diff1.colors[is.na(array.trans.diff1)] <- "white" + + par(mar=c(5,4,4,4.5)) + plot(1,1, type="n", xaxt="n", yaxt="n", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + title("% Bias transition from NAO+ regime", cex=1.5) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + + for(p in 1:12){ + for(l in 0:6){ + polygon(c(0.5+p-1, 0.5+p-1, 1+p-1), c(-0.5+l, 0.5+l, 0+l), col=array.trans.diff1.colors[p,1+l,1]) # first triangle + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(-0.5+l, 0+l, -0.5+l), col=array.trans.diff1.colors[p,1+l,2]) + polygon(c(1+p-1, 1.5+p-1, 1.5+p-1), c(l, 0.5+l, -0.5+l), col=array.trans.diff1.colors[p,1+l,3]) + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(0.5+l, 0+l, 0.5+l), col=array.trans.diff1.colors[p,1+l,4]) + } + } + + par(fig=c(0.875, 1, 0.14, 0.89), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_bias_transition_prob_NAO+_startdate.png"), width=750, height=600) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("% Bias transition from NAO+ regime", cex=1.5) + + # NAO+ : + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.85, 0.98), new=TRUE) + mtext("NAO+", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.diff1.colors[p,1+l,1])}} + + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.42, 0.5), new=TRUE) + mtext("Blocking", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.diff1.colors[p,1+l,4])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.85, 0.98), new=TRUE) + mtext("NAO-", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.diff1.colors[p,1+l,3])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.42, 0.5), new=TRUE) + mtext("Atlantic Ridge", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.diff1.colors[p,1+l,2])}} + + par(fig=c(0.91, 1, 0.1, 0.9), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_bias_transition_prob_NAO+_target_month.png"), width=750, height=350) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 0.95), new=TRUE) + mtext("% Bias transition from NAO+ regime", cex=1.2) + + par(mar=c(0,0,4,0), fig=c(0.07, 0.28, 0.8, 0.95), new=TRUE) + mtext("NAO-", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0, 0.28, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.8, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.diff1.colors[i2,1+6-j,3])}} + + par(mar=c(0,0,4,0), fig=c(0.30, 0.58, 0.8, 0.95), new=TRUE) + mtext("Blocking", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.30, 0.58, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.8, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.diff1.colors[i2,1+6-j,4])}} + + par(mar=c(0,0,4,0), fig=c(0.60, 0.88, 0.8, 0.95), new=TRUE) + mtext("Atlantic Ridge", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.60, 0.88, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.8, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.diff1.colors[i2,1+6-j,2])}} + + par(fig=c(0.91, 1, 0.1, 0.8), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + par(fig=c(0.91, 1, 0.88, 0.93), new=TRUE) + mtext(side = 1, text = "%", line = 2, cex=1) + + dev.off() + + + + + + + + + + + + + + + + # Bias of the Transition probability from NAO- to X summary: + png(filename=paste0(work.dir,"/",forecast.name,"_summary_bias_transition_prob_NAO-.png"), width=550, height=400) + + # create an array similar to array.cor but with colors instead of corr.values: + trans.cols <- rev(c('#b2182b','#d6604d','#f4a582','#fddbc7','#d1e5f0','#92c5de','#4393c3','#2166ac')) + my.seq <- c(-20,-10,-5,-2,0,2,5,10,20) + + array.trans.diff2.colors <- array(trans.cols[8],c(12,7,4)) + array.trans.diff2.colors[array.trans.diff2 < my.seq[8]] <- trans.cols[7] + array.trans.diff2.colors[array.trans.diff2 < my.seq[7]] <- trans.cols[6] + array.trans.diff2.colors[array.trans.diff2 < my.seq[6]] <- trans.cols[5] + array.trans.diff2.colors[array.trans.diff2 < my.seq[5]] <- trans.cols[4] + array.trans.diff2.colors[array.trans.diff2 < my.seq[4]] <- trans.cols[3] + array.trans.diff2.colors[array.trans.diff2 < my.seq[3]] <- trans.cols[2] + array.trans.diff2.colors[array.trans.diff2 < my.seq[2]] <- trans.cols[1] + array.trans.diff2.colors[is.na(array.trans.diff2)] <- "white" + + par(mar=c(5,4,4,4.5)) + plot(1,1, type="n", xaxt="n", yaxt="n", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + title("% Bias transition from NAO- regime", cex=1.5) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + + for(p in 1:12){ + for(l in 0:6){ + polygon(c(0.5+p-1, 0.5+p-1, 1+p-1), c(-0.5+l, 0.5+l, 0+l), col=array.trans.diff2.colors[p,1+l,1]) # first triangle + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(-0.5+l, 0+l, -0.5+l), col=array.trans.diff2.colors[p,1+l,2]) + polygon(c(1+p-1, 1.5+p-1, 1.5+p-1), c(l, 0.5+l, -0.5+l), col=array.trans.diff2.colors[p,1+l,3]) + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(0.5+l, 0+l, 0.5+l), col=array.trans.diff2.colors[p,1+l,4]) + } + } + + par(fig=c(0.875, 1, 0.14, 0.89), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_bias_transition_prob_NAO-_startdate.png"), width=750, height=600) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 0.95), new=TRUE) + mtext("% Bias transition from NAO- regime", cex=1.5) + + # NAO+ : + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.85, 0.98), new=TRUE) + mtext("NAO+", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.diff2.colors[p,1+l,1])}} + + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.42, 0.5), new=TRUE) + mtext("Blocking", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.diff2.colors[p,1+l,4])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.85, 0.98), new=TRUE) + mtext("NAO-", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.diff2.colors[p,1+l,3])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.42, 0.5), new=TRUE) + mtext("Atlantic Ridge", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.diff2.colors[p,1+l,2])}} + + par(fig=c(0.91, 1, 0.1, 0.9), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_bias_transition_prob_NAO-_target_month.png"), width=750, height=350) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 0.95), new=TRUE) + mtext("% Bias transition from NAO- regime", cex=1.2) + + par(mar=c(0,0,4,0), fig=c(0.07, 0.28, 0.8, 0.95), new=TRUE) + mtext("NAO+", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0, 0.28, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.8, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.diff2.colors[i2,1+6-j,1])}} + + par(mar=c(0,0,4,0), fig=c(0.30, 0.58, 0.8, 0.95), new=TRUE) + mtext("Blocking", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.30, 0.58, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.8, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.diff2.colors[i2,1+6-j,4])}} + + par(mar=c(0,0,4,0), fig=c(0.60, 0.88, 0.8, 0.95), new=TRUE) + mtext("Atlantic Ridge", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.60, 0.88, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.8, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.diff2.colors[i2,1+6-j,2])}} + + par(fig=c(0.91, 1, 0.1, 0.8), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + par(fig=c(0.91, 1, 0.88, 0.93), new=TRUE) + mtext(side = 1, text = "%", line = 2, cex=1) + + dev.off() + + + + + + + + + + + + # Bias of the Transition probability from blocking to X summary: + png(filename=paste0(work.dir,"/",forecast.name,"_summary_bias_transition_prob_blocking.png"), width=550, height=400) + + # create an array similar to array.cor but with colors instead of corr.values: + trans.cols <- rev(c('#b2182b','#d6604d','#f4a582','#fddbc7','#d1e5f0','#92c5de','#4393c3','#2166ac')) + my.seq <- c(-20,-10,-5,-2,0,2,5,10,20) + + array.trans.diff3.colors <- array(trans.cols[8],c(12,7,4)) + array.trans.diff3.colors[array.trans.diff3 < my.seq[8]] <- trans.cols[7] + array.trans.diff3.colors[array.trans.diff3 < my.seq[7]] <- trans.cols[6] + array.trans.diff3.colors[array.trans.diff3 < my.seq[6]] <- trans.cols[5] + array.trans.diff3.colors[array.trans.diff3 < my.seq[5]] <- trans.cols[4] + array.trans.diff3.colors[array.trans.diff3 < my.seq[4]] <- trans.cols[3] + array.trans.diff3.colors[array.trans.diff3 < my.seq[3]] <- trans.cols[2] + array.trans.diff3.colors[array.trans.diff3 < my.seq[2]] <- trans.cols[1] + array.trans.diff3.colors[is.na(array.trans.diff3)] <- "white" + + par(mar=c(5,4,4,4.5)) + plot(1,1, type="n", xaxt="n", yaxt="n", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + title("% Bias transition from blocking regime", cex=1.5) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + + for(p in 1:12){ + for(l in 0:6){ + polygon(c(0.5+p-1, 0.5+p-1, 1+p-1), c(-0.5+l, 0.5+l, 0+l), col=array.trans.diff3.colors[p,1+l,1]) # first triangle + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(-0.5+l, 0+l, -0.5+l), col=array.trans.diff3.colors[p,1+l,2]) + polygon(c(1+p-1, 1.5+p-1, 1.5+p-1), c(l, 0.5+l, -0.5+l), col=array.trans.diff3.colors[p,1+l,3]) + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(0.5+l, 0+l, 0.5+l), col=array.trans.diff3.colors[p,1+l,4]) + } + } + + par(fig=c(0.875, 1, 0.14, 0.89), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_bias_transition_prob_blocking_startdate.png"), width=750, height=600) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("% Bias transition from blocking regime", cex=1.5) + + # NAO+ : + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.85, 0.98), new=TRUE) + mtext("NAO+", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.diff3.colors[p,1+l,1])}} + + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.42, 0.5), new=TRUE) + mtext("Blocking", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.diff3.colors[p,1+l,4])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.85, 0.98), new=TRUE) + mtext("NAO-", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.diff3.colors[p,1+l,3])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.42, 0.5), new=TRUE) + mtext("Atlantic Ridge", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.diff3.colors[p,1+l,2])}} + + par(fig=c(0.91, 1, 0.1, 0.9), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_bias_transition_prob_blocking_target_month.png"), width=750, height=350) + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 0.95), new=TRUE) + mtext("% Bias transition from Blocking regime", cex=1.2) + + par(mar=c(0,0,4,0), fig=c(0.07, 0.28, 0.8, 0.95), new=TRUE) + mtext("NAO+", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0, 0.28, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.8, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.diff3.colors[i2,1+6-j,1])}} + + par(mar=c(0,0,4,0), fig=c(0.30, 0.58, 0.8, 0.95), new=TRUE) + mtext("NAO-", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.30, 0.58, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.8, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.diff3.colors[i2,1+6-j,3])}} + + par(mar=c(0,0,4,0), fig=c(0.60, 0.88, 0.8, 0.95), new=TRUE) + mtext("Atlantic Ridge", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.60, 0.88, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.8, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.diff3.colors[i2,1+6-j,2])}} + + par(fig=c(0.91, 1, 0.1, 0.8), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + par(fig=c(0.91, 1, 0.88, 0.93), new=TRUE) + mtext(side = 1, text = "%", line = 2, cex=1) + + dev.off() + + + + + + + + + + + # Bias of the Transition probability from Atlantic Ridge to X summary: + png(filename=paste0(work.dir,"/",forecast.name,"_summary_bias_transition_prob_Atlantic_ridge.png"), width=550, height=400) + + # create an array similar to array.cor but with colors instead of corr.values: + trans.cols <- rev(c('#b2182b','#d6604d','#f4a582','#fddbc7','#d1e5f0','#92c5de','#4393c3','#2166ac')) + my.seq <- c(-20,-10,-5,-2,0,2,5,10,20) + + array.trans.diff4.colors <- array(trans.cols[8],c(12,7,4)) + array.trans.diff4.colors[array.trans.diff4 < my.seq[8]] <- trans.cols[7] + array.trans.diff4.colors[array.trans.diff4 < my.seq[7]] <- trans.cols[6] + array.trans.diff4.colors[array.trans.diff4 < my.seq[6]] <- trans.cols[5] + array.trans.diff4.colors[array.trans.diff4 < my.seq[5]] <- trans.cols[4] + array.trans.diff4.colors[array.trans.diff4 < my.seq[4]] <- trans.cols[3] + array.trans.diff4.colors[array.trans.diff4 < my.seq[3]] <- trans.cols[2] + array.trans.diff4.colors[array.trans.diff4 < my.seq[2]] <- trans.cols[1] + array.trans.diff4.colors[is.na(array.trans.diff4)] <- "white" + + par(mar=c(5,4,4,4.5)) + plot(1,1, type="n", xaxt="n", yaxt="n", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + title("% Bias transition from Atlantic Ridge regime", cex=1.5) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + + for(p in 1:12){ + for(l in 0:6){ + polygon(c(0.5+p-1, 0.5+p-1, 1+p-1), c(-0.5+l, 0.5+l, 0+l), col=array.trans.diff4.colors[p,1+l,1]) # first triangle + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(-0.5+l, 0+l, -0.5+l), col=array.trans.diff4.colors[p,1+l,2]) + polygon(c(1+p-1, 1.5+p-1, 1.5+p-1), c(l, 0.5+l, -0.5+l), col=array.trans.diff4.colors[p,1+l,3]) + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(0.5+l, 0+l, 0.5+l), col=array.trans.diff4.colors[p,1+l,4]) + } + } + + par(fig=c(0.875, 1, 0.14, 0.89), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_bias_transition_prob_Atlantic_ridge_startdate.png"), width=750, height=600) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("% Bias transition from Atlantic_Ridge_regime", cex=1.5) + + # NAO+ : + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.85, 0.98), new=TRUE) + mtext("NAO+", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.diff4.colors[p,1+l,1])}} + + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.42, 0.5), new=TRUE) + mtext("Blocking", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.diff4.colors[p,1+l,4])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.85, 0.98), new=TRUE) + mtext("NAO-", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.diff4.colors[p,1+l,3])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.42, 0.5), new=TRUE) + mtext("Atlantic Ridge", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.diff4.colors[p,1+l,2])}} + + par(fig=c(0.91, 1, 0.1, 0.9), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_bias_transition_prob_Atlantic_ridge_target_month.png"), width=750, height=350) + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 0.95), new=TRUE) + mtext("% Bias transition from Atlantic Ridge regime", cex=1.2) + + par(mar=c(0,0,4,0), fig=c(0.07, 0.28, 0.8, 0.95), new=TRUE) + mtext("NAO+", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0, 0.28, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.8, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.diff4.colors[i2,1+6-j,1])}} + + par(mar=c(0,0,4,0), fig=c(0.30, 0.58, 0.8, 0.95), new=TRUE) + mtext("NAO-", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.30, 0.58, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.8, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.diff4.colors[i2,1+6-j,3])}} + + par(mar=c(0,0,4,0), fig=c(0.60, 0.88, 0.8, 0.95), new=TRUE) + mtext("Blocking", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.60, 0.88, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.8, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.diff4.colors[i2,1+6-j,4])}} + + par(fig=c(0.91, 1, 0.1, 0.8), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + par(fig=c(0.91, 1, 0.88, 0.93), new=TRUE) + mtext(side = 1, text = "%", line = 2, cex=1) + + dev.off() + + ## # FairRPSS summary: + ## png(filename=paste0(work.dir,"/",forecast.name,"_summary_rpss.png"), width=550, height=400) + + ## # create an array similar to array.diff.freq but with colors instead of frequencies: + ## freq.cols <- rev(c('#b2182b','#d6604d','#f4a582','#fddbc7','#d1e5f0','#92c5de','#4393c3','#2166ac')) + + ## array.freq.colors <- array(freq.cols[8],c(12,7,4)) + ## array.freq.colors[array.diff.freq < 10] <- freq.cols[7] + ## array.freq.colors[array.diff.freq < 5] <- freq.cols[6] + ## array.freq.colors[array.diff.freq < 2] <- freq.cols[5] + ## array.freq.colors[array.diff.freq < 0] <- freq.cols[4] + ## array.freq.colors[array.diff.freq < -2] <- freq.cols[3] + ## array.freq.colors[array.diff.freq < -5] <- freq.cols[2] + ## array.freq.colors[array.diff.freq < -10] <- freq.cols[1] + + ## par(mar=c(5,4,4,4.5)) + ## plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Forecast time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + ## title("Frequency difference (%) between S4 and ERA-Interim", cex=1.5) + ## mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + ## mtext(side = 2, text = "Forecast time", line = 2.5, cex=1.5) + ## axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + ## axis(2, at=seq(0,6), las=2, cex.axis=1.5) + + ## for(p in 1:12){ + ## for(l in 0:6){ + ## polygon(c(0.5+p-1,0.5+p-1,1+p-1),c(-0.5+l,0.5+l,0+l), col=array.freq.colors[p,1+l,1]) # first triangle + ## polygon(c(0.5+p-1,1+p-1,1.5+p-1),c(-0.5+l,0+l,-0.5+l), col=array.freq.colors[p,1+l,2]) + ## polygon(c(1+p-1,1.5+p-1,1.5+p-1),c(l,0.5+l,-0.5+l), col=array.freq.colors[p,1+l,3]) + ## polygon(c(0.5+p-1,1+p-1,1.5+p-1),c(0.5+l,0+l,0.5+l), col=array.freq.colors[p,1+l,4]) + ## } + ## } + + ## par(fig=c(0.875, 1, 0.14, 0.89), new=TRUE) + ## ColorBar2(brks = c(-20,-10,-5,-2,0,2,5,10,20), cols = freq.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(c(-20,-10,-5,-2,0,2,5,10,20)), my.labels=c(-20,-10,-5,-2,0,2,5,10,20)) + ## dev.off() + +} # close if on composition == "summary" + + + + +# impact map of the stronger WR: +if(composition == "impact.highest" && fields.name == rean.name){ + + var.num <- 2 # choose a variable (1:sfcWind, 2:tas) + index <- 1 # chose an index: 1: most influent, 2: most influent positively, 3: most influent negatively + + if ( index == 1 ) png(filename=paste0(rean.dir,"/",rean.name,"_",var.name.full[var.num],"_most_influent.png"), width=550, height=650) + if ( index == 2 ) png(filename=paste0(rean.dir,"/",rean.name,"_",var.name.full[var.num],"_most_influent_positively.png"), width=550, height=650) + if ( index == 3 ) png(filename=paste0(rean.dir,"/",rean.name,"_",var.name.full[var.num],"_most_influent_negatively.png"), width=550, height=650) + + plot.new() + #par(mfrow=c(4,3)) + #col.regimes <- c('#7b3294','#c2a5cf','#a6dba0','#008837') + col.regimes <- c('#e66101','#fdb863','#b2abd2','#5e3c99') + + for(p in 13:16){ # or 1:12 for monthly maps + load(file=paste0(rean.dir,"/",rean.name,"_",my.period[p],"_",var.name[var.num],".RData")) + load(paste0(rean.dir,"/",rean.name,"_",my.period[p],"_ClusterNames.RData")) # they should not depend on var.name!!! + + # position of long values of Europe only (without the Atlantic Sea and America): + #if(fields.name == "ERA-Interim") EU <- c(1:which(lon >= lon.max)[1], (length(lon)-30):length(lon)) # restrict area to continental europe only + lon.max = 45 + EU <- c(1:which(lon >= lon.max)[1], (which(lon >= 337)[1]:length(lon))) # restrict area to continental europe only + + cluster1 <- which(orden == cluster1.name.period[p]) # in future it will be == cluster1.name + cluster2 <- which(orden == cluster2.name.period[p]) + cluster3 <- which(orden == cluster3.name.period[p]) + cluster4 <- which(orden == cluster4.name.period[p]) + + assign(paste0("imp",cluster1), varPeriodAnom1mean) + assign(paste0("imp",cluster2), varPeriodAnom2mean) + assign(paste0("imp",cluster3), varPeriodAnom3mean) + assign(paste0("imp",cluster4), varPeriodAnom4mean) + + #most influent WT: + if (index == 1) { + imp.all <- imp1 + for(i in 1:dim(imp1)[1]){ + for(j in 1:dim(imp1)[2]){ + if(abs(imp1[i,j]) >= max(abs(imp1[i,j]), abs(imp2[i,j]), abs(imp3[i,j]), abs(imp4[i,j]))) imp.all[i,j] <- 1.5 + if(abs(imp2[i,j]) >= max(abs(imp1[i,j]), abs(imp2[i,j]), abs(imp3[i,j]), abs(imp4[i,j]))) imp.all[i,j] <- 2.5 + if(abs(imp3[i,j]) >= max(abs(imp1[i,j]), abs(imp2[i,j]), abs(imp3[i,j]), abs(imp4[i,j]))) imp.all[i,j] <- 3.5 + if(abs(imp4[i,j]) >= max(abs(imp1[i,j]), abs(imp2[i,j]), abs(imp3[i,j]), abs(imp4[i,j]))) imp.all[i,j] <- 4.5 + } + } + } + + #most influent WT with positive impact: + if (index == 2) { + imp.all <- imp1 + for(i in 1:dim(imp1)[1]){ + for(j in 1:dim(imp1)[2]){ + if((imp1[i,j]) >= max((imp1[i,j]), (imp2[i,j]), (imp3[i,j]), (imp4[i,j]))) imp.all[i,j] <- 1.5 + if((imp2[i,j]) >= max((imp1[i,j]), (imp2[i,j]), (imp3[i,j]), (imp4[i,j]))) imp.all[i,j] <- 2.5 + if((imp3[i,j]) >= max((imp1[i,j]), (imp2[i,j]), (imp3[i,j]), (imp4[i,j]))) imp.all[i,j] <- 3.5 + if((imp4[i,j]) >= max((imp1[i,j]), (imp2[i,j]), (imp3[i,j]), (imp4[i,j]))) imp.all[i,j] <- 4.5 + } + } + + } + + # most influent WT with negative impact: + if (index == 3) { + imp.all <- imp1 + for(i in 1:dim(imp1)[1]){ + for(j in 1:dim(imp1)[2]){ + if((imp1[i,j]) <= min((imp1[i,j]), (imp2[i,j]), (imp3[i,j]), (imp4[i,j]))) imp.all[i,j] <- 1.5 + if((imp2[i,j]) <= min((imp1[i,j]), (imp2[i,j]), (imp3[i,j]), (imp4[i,j]))) imp.all[i,j] <- 2.5 + if((imp3[i,j]) <= min((imp1[i,j]), (imp2[i,j]), (imp3[i,j]), (imp4[i,j]))) imp.all[i,j] <- 3.5 + if((imp4[i,j]) <= min((imp1[i,j]), (imp2[i,j]), (imp3[i,j]), (imp4[i,j]))) imp.all[i,j] <- 4.5 + } + } + } + + if(p<=3) yMod <- 0.7 + if(p>=4 && p<=6) yMod <- 0.5 + if(p>=7 && p<=9) yMod <- 0.3 + if(p>=10) yMod <- 0.1 + if(p==13 || p==14) yMod <- 0.52 + if(p==15 || p==16) yMod <- 0.1 + + if(p==1 || p==4 || p==7 || p==10) xMod <- 0.05 + if(p==2 || p==5 || p==8 || p==11) xMod <- 0.35 + if(p==3 || p==6 || p==9 || p==12) xMod <- 0.65 + if(p==13 || p==15) xMod <- 0.05 + if(p==14 || p==16) xMod <- 0.50 + + # impact map: + if(p <= 12) par(fig=c(xMod, xMod + 0.3, yMod, yMod + 0.17), new=TRUE) + if(p > 12) par(fig=c(xMod, xMod + 0.45, yMod, yMod + 0.38), new=TRUE) + PlotEquiMap(imp.all[,EU], lon[EU], lat, filled.continents = FALSE, brks=1:5, cols=col.regimes, axelab=F, drawleg=F) + + # title map: + if(p <= 12) par(fig=c(xMod, xMod + 0.3, yMod + 0.162, yMod + 0.172), new=TRUE) + if(p > 12) par(fig=c(xMod + 0.07, xMod + 0.07 + 0.3, yMod +0.21 + 0.166, yMod +0.21 + 0.176), new=TRUE) + mtext(my.period[p], font=2, cex=1.5) + + } # close 'p' on 'period' + + par(fig=c(0.1,0.9, 0.03, 0.11), new=TRUE) + ColorBar2(1:5, cols=col.regimes, vert=FALSE, my.ticks=1:4, draw.ticks=FALSE, my.labels=orden) + + par(fig=c(0.1,0.9, 0.92, 0.97), new=TRUE) + if( index == 1 ) mtext(paste0("Most influent WR on ",var.name[var.num]), font=2, cex=1.5) + if( index == 2 ) mtext(paste0("Most positively influent WR on ",var.name[var.num]), font=2, cex=1.5) + if( index == 3 ) mtext(paste0("Most negatively influent WR on ",var.name[var.num]), font=2, cex=1.5) + + dev.off() + +} # close if on composition == "impact.highest" + diff --git a/old/weather_regimes_maps_v24.R b/old/weather_regimes_maps_v24.R new file mode 100644 index 0000000000000000000000000000000000000000..8832e4917ade7eb861dcad81c383b07573393be8 --- /dev/null +++ b/old/weather_regimes_maps_v24.R @@ -0,0 +1,5552 @@ + +# Creation: 6/2016 +# Author: Nicola Cortesi +# +# Aim: to visualize the regime anomalies of the weather regimes, their interannual frequencies and their impact on a chosen cliamte variable (i.e: wind speed or temperature) +# +# I/O: input file needed are: +# 1) the output of the weather_regimes.R script, which ends with the suffix "_psl.RData". +# 2) if you need the impact map too, the outputs of the weather_regimes_impact.R script, which end wuth the suffix "_sfcWind.RData" and "_tas.RData" +# +# Output files are the maps, witch the user can generate as .png or as .pdf +# Due to the script's length, it is better to run it from the terminal with: Rscript ~/scripts/weather_regimes_maps.R +# This script doesn't need to be parallelized because it takes only a few seconds to create and save the output maps. +# +# Assumptions: If your regimes derive from a reanalysis, this script must be run twice: +# first, only one month/season at time with: 'period=X', (X=1 .. 12), 'ordering = TRUE', 'save.names = TRUE', 'as.pdf = FALSE' and 'composition = "simple" +# And inserting the regime names 'cluster.name1=...' in the correct order; in this first run, you only save the ordered cartography. +# You already have to know which is the right regime order by taking a look at the output maps (_clusterX.png) of weather_regimes.R +# After that, any time you run this script, remember to set: +# 'ordering = TRUE , 'save.names = FALSE' (and any type of composition you want to plot) not to overwrite the files which stores the right cluster order. +# You can check if the monthly regimes jave been associated correctly setting composition <- "psl.rean" +# +# For example, you can find that the sequence of the images in the file (from top to down) corresponds to: +# Blocking / NAO- / Atl.Ridge / NAO+ regime. Subsequently, you have to change 'ordering' +# from F to T (see below) and set the four variables 'clusterX.name' (X = 1...4) to follow the same order found inside that file. +# Then, you have to run this script only to get the maps in the right order, which by default is, from top to down: +# "NAO+","NAO-","Blocking" and "Atl.Ridge" (you can change it with the variable 'orden'). You also have to set 'save.names' to TRUE, so an .RData file +# is written to store the order of the four regimes, in case you need to visualize these maps again in future or create new ones based on the saved data. +# +# If your regimes derive from a forecast system, you have to compute before the regimes derived from a reanalysis. Then, you can associate the reanalysis +# to the forecast system, to employ it to automatically aussociate to each simulated cluster one of the +# observed regimes, the one with the highest spatial correlation. Beware that the two maps of regime anomalies must have the same spatial resolution! +# +# Branch: weather_regimes + +rm(list=ls()) +library(s2dverification) # for the function Load() +library(Kendall) # for the MannKendall test +source('/home/Earth/ncortesi/scripts/Rfunctions.R') # for the calendar functions + +### set the directory where the weather regimes computed with a reanalysis are stored (xxxxx_psl.RData files): + +#rean.dir <- "/scratch/Earth/ncortesi/RESILIENCE/Regimes/18) as 12) but with no lat correction" +#rean.dir <- "/scratch/Earth/ncortesi/RESILIENCE/Regimes/18_as_12_but_with_no_lat_correction" +#rean.dir <- "/scratch/Earth/ncortesi/RESILIENCE/Regimes/41_ERA-Interim_monthly_1981-2015_LOESS_filter" +#rean.dir <- "/scratch/Earth/ncortesi/RESILIENCE/Regimes/43_as_41_but_with_running_cluster_and_lat_correction" +#rean.dir <- "/scratch/Earth/ncortesi/RESILIENCE/Regimes/45_as_43_but_with_Z500" +#rean.dir <- "/scratch/Earth/ncortesi/RESILIENCE/Regimes/44_as_43_but_with_no_lat_correction" +rean.dir <- "/scratch/Earth/ncortesi/RESILIENCE/Regimes/50_NCEP_monthly_1981-2016_LOESS_filter_lat_corr" +#rean.dir <- "/scratch/Earth/ncortesi/RESILIENCE/Regimes/52_JRA55_monthly_1981-2016_LOESS_filter_lat_corr" + +rean.name <- "NCEP" #"JRA-55" #"NCEP" #"ERA-Interim" # reanalysis name (if input data comes from a reanalysis) +forecast.name <- "ECMWF-S4" # forecast system name (if input data comes from a forecast system) + +# Choose between loading reanalysis ('rean.name') or forecasts ('forecast.name'): +fields.name <- rean.name #forecast.name + +ordering <- T # If TRUE, order the clusters following the regimes listed in the 'orden' vector (set it to TRUE only after you manually inserted the names below). +save.names <- F # if TRUE, also save the cluster names (i.e: the association between clusters and weather regimes); if FALSE, the cluster names are loaded from a file +as.pdf <- F # FALSE: save results as one .png file for each season/month, TRUE: save only one .pdf file with all seasons/months + +composition <- "psl.rean" # choose which kind of composition you want to plot: + # 'simple' for monthly graphs of regime anomalies / impact / interannual frequencies in three columns + # 'psl' for all the regime anomalies for a fixed forecast month + # 'fre' for all the interannual frequencies for a fixed forecast month + # 'impact' for all the impact maps for a fixed forecast month and the variable specified in var.num + # 'summary' for the summary plots of all forecast months (persistence bias, freq.bias, correlations, etc.) [You need all ClusterNames files] + # 'impact.highest' for all the impact plot of the regime with the highest impact + # 'single.impact' to save the four impact maps in a composition 2x2 + # 'single.psl' to save the individual psl map + # 'single.fre' to save the individual fre map + # 'none' doesn't plot anything, it only saves the ClusterName.Rdata files + # 'taylor' plot the taylor's diagram between the regime anomalies of a monthly reanalaysis and a seasonal one + # 'edpr' as 'simple', but swapping the position of the regime anomalie maps with that of the impact maps + # 'psl.rean' for all the regime anomalies for all months of a reanalysis + +var.num <- 1 # if fields.name ='rean.name' and composition ='simple' or 'edpr', Choose a variable for the impact maps 1: sfcWind 2: tas + +mean.freq <- TRUE # if TRUE, when composition <- "simple" o 'edpr', it also add the mean frequency value over each frequency plots + +# Choose one or more periods to plot from 1 to 17 (1-12: Jan - Dec, 13-16: Winter, Spring, Summer, Autumn, 17: Year). +period <- 11 #1:12 # You need to have created before the '_psl.RData' file with the output of 'weather_regimes_vXX'.R for that period. + +# Associates the regimes to the four cluster: +cluster1.name <- "NAO+" +cluster4.name <- "NAO-" +cluster2.name <- "Blocking" +cluster3.name <- "Atl.Ridge" + +# choose an order to give to the cartograpy, from top to bottom: +orden <- c("NAO+","NAO-","Blocking","Atl.Ridge") + +monthly_anomalies <- FALSE # if TRUE, also save maps with with monthly wind speed and slp anomalies for the chosen years and months set below over Europe +year.chosen <- 2016 +month.chosen <- 1:12 + +####### if fields.name='forecast.name', you have to specify these additional variables: + +# working dir with the input files with '_psl.RData' suffix: +work.dir <- "/scratch/Earth/ncortesi/RESILIENCE/Regimes/42_ECMWF_S4_monthly_1981-2015_LOESS_filter" + +lead.months <- 0 #0:6 # in case you selected 'fields.name = forecast.name', specify a leadtime (0 = same month of the start month) to plot + # (and variable 'period' becomes the start month) +forecast.month <- 1 # in case composition = 'psl' or 'fre' or 'impact', choose a target month to forecast, from 1 to 12, to plot its composition + # if you want to plot all compositions from 1 to 12 at once, just enable the line below and add a { at the end of the script + # DO NOT run the loop below when composition="summary" or "taylor", because it will modify the data in the ClusterName.RData files!!! +#for(forecast.month in 1:12){ + +####### Derived variables ############################################################################################################################################### + +if(fields.name != rean.name && fields.name != forecast.name) stop("variable 'fields.name' is not properly set") +regime1.name <- orden[1] +regime2.name <- orden[2] +regime3.name <- orden[3] +regime4.name <- orden[4] + +var.name <- c("sfcWind","tas") +var.name.full <- c("wind speed","temperature") # full name of the 'predictand' variable to put in the title of the graphs +var.unit <- c("m/s", "ºC") # unit of measure of var (for drawing color scales) + +var.num.orig <- var.num # loading _psl files, this value can change! +fields.name.orig <- fields.name +period.orig <- period +work.dir.orig <- work.dir +pdf.suffix <- ifelse(length(period)==1, my.period[period], "all_periods") + +n.map <- 0 +################################################################################################################################################################### + +if(composition != "summary" && composition != "impact.highest" && composition != "taylor"){ + + if(composition == "psl" || composition == "fre" || composition == "impact") { + if(as.pdf && fields.name == forecast.name) pdf(file=paste0(work.dir,"/",fields.name,"_",pdf.suffix,"_forecast_month_",forecast.month,"_",composition,".pdf"),width=110,height=60) + if(!as.pdf && fields.name == forecast.name) png(filename=paste0(work.dir,"/",fields.name,"_forecast_month_",forecast.month,"_",composition,".png"),width=6000,height=2300) + + lead.months <- c(6:0,0) + plot.new() + + # Sheet title: + par(fig=c(0, 1, 0.94, 0.98), new=TRUE) + if(composition == "psl") mtext(paste0(fields.name, " simulated Weather Regimes for ", month.name[forecast.month]), cex=5.5, font=2) + if(composition == "fre") mtext(paste0(fields.name, " simulated interannual frequencies for ", month.name[forecast.month]), cex=5.5, font=2) + if(composition == "impact") mtext(paste0(fields.name, " simulated ",var.name.full[var.num], " impact for ", month.name[forecast.month]), cex=5.5, font=2) + + } # close if on composition == "psl" ... + + if(composition == "psl.rean") { + period <- c(9:12, 1:8) # to start from September instead of January + png(filename=paste0(rean.dir,"/",fields.name,"_reanalysis_all_months.png"),width=6000,height=2000) + plot.new() + } + + if(fields.name == rean.name) lead.months <- 1 # just to be able to enter the loop below once! + + for(lead.month in lead.months){ + #lead.month=lead.months[1] # for the debug + + if(composition == "simple" || composition == 'edpr'){ + if(as.pdf && fields.name == rean.name) pdf(file=paste0(rean.dir,"/",fields.name,"_",var.name[var.num],"_",pdf.suffix,".pdf"),width=40, height=60) + if(as.pdf && fields.name == forecast.name) pdf(file=paste0(work.dir,"/",fields.name,"_",var.name[var.num],"_",pdf.suffix,"_leadmonth_",lead.month,".pdf"),width=40,height=60) + } + + if(composition == 'psl' || composition == 'fre' || composition == 'impact') { + period <- forecast.month - lead.month + if(period < 1) period <- period + 12 + } + + impact.data <- FALSE + for(p in period){ + #p <- period[1] # for the debug + p.orig <- p + + if(fields.name == rean.name) print(paste("p =",p)) + if(fields.name == forecast.name) print(paste("p =",p,"lead.month =", lead.month)) + + # load regime data (for forecasts, it load only the data corresponding to the chosen start month p and lead month 'lead.month') + if(fields.name == rean.name || (fields.name == forecast.name && n.map == 7)) { + load(file=paste0(rean.dir,"/",rean.name,"_",my.period[p],"_","psl",".RData")) + if(var.num != var.num.orig) var.num <- var.num.orig # ripristinate original values of this script (necessary after loading regime data) + if(fields.name != fields.name.orig) fields.name <- fields.name.orig # ripristinate original values of this script + if(work.dir != work.dir.orig) work.dir <- work.dir.orig # ripristinate original values of this script + + # load impact data only if it is available, if not it will leave an empty space in the composition: + if(file.exists(paste0(rean.dir,"/",rean.name,"_",my.period[p],"_",var.name[var.num],".RData"))){ + impact.data <- TRUE + print("Impact data available for reanalysis") + load(file=paste0(rean.dir,"/",rean.name,"_",my.period[p],"_",var.name[var.num],".RData")) + } else { + impact.data <- FALSE + print("Impact data for reanalysis not available") + } + + } + + if(fields.name == forecast.name && n.map < 7){ + load(file=paste0(work.dir,"/",forecast.name,"_",my.period[p],"_leadmonth_",lead.month,"_","psl",".RData")) # load cluster time series and mean psl data + + if(file.exists(paste0(work.dir,"/",forecast.name,"_",my.period[p],"_leadmonth_",lead.month,"_",var.name[var.num],".RData"))){ + impact.data <- TRUE + print("Impact data available for forecasts") + if((composition == "simple" || composition == "impact")) load(file=paste0(work.dir,"/",forecast.name,"_",my.period[p],"_leadmonth_",lead.month,"_",var.name[var.num],".RData")) # load mean var data for impact maps + } else { + impact.data <- FALSE + print("Impact data for forecasts not available") + } + + if(var.num != var.num.orig) var.num <- var.num.orig # ripristinate original values of this script (necessary after loading regime data) + if(fields.name != fields.name.orig) fields.name <- fields.name.orig # ripristinate original values of this script + + } + + if(var.num != var.num.orig) var.num <- var.num.orig # ripristinate original values of this script (necessary after loading regime data) + if(fields.name != fields.name.orig) fields.name <- fields.name.orig # ripristinate original values of this script + if(work.dir != work.dir.orig) work.dir <- work.dir.orig # ripristinate original values of this script + if(p != p.orig) p <- p.orig + + if(fields.name == forecast.name && n.map < 7){ + + # compute the simulated interannual monthly variability: + n.days.month <- dim(my.cluster.array[[p]])[1] # number of days in the forecasted month + + freq.sim1 <- apply(my.cluster.array[[p]] == 1, c(2,3), sum) + freq.sim2 <- apply(my.cluster.array[[p]] == 2, c(2,3), sum) + freq.sim3 <- apply(my.cluster.array[[p]] == 3, c(2,3), sum) + freq.sim4 <- apply(my.cluster.array[[p]] == 4, c(2,3), sum) + + sd.freq.sim1 <- sd(freq.sim1) / n.days.month + sd.freq.sim2 <- sd(freq.sim2) / n.days.month + sd.freq.sim3 <- sd(freq.sim3) / n.days.month + sd.freq.sim4 <- sd(freq.sim4) / n.days.month + + # compute the transition probabilities: + j1 <- j2 <- j3 <- j4 <- 1 + transition1 <- transition2 <- transition3 <- transition4 <- c() + + n.members <- dim(my.cluster.array[[p]])[3] + + for(m in 1:n.members){ + for(y in 1:n.years){ + for (d in 1:(n.days.month-1)){ + + if (my.cluster.array[[p]][d,y,m] == 1 && (my.cluster.array[[p]][d + 1,y,m] != 1)){ + transition1[j1] <- my.cluster.array[[p]][d + 1,y,m] + j1 <- j1 + 1 + } + if (my.cluster.array[[p]][d,y,m] == 2 && (my.cluster.array[[p]][d + 1,y,m] != 2)){ + transition2[j2] <- my.cluster.array[[p]][d + 1,y,m] + j2 <- j2 + 1 + } + if (my.cluster.array[[p]][d,y,m] == 3 && (my.cluster.array[[p]][d + 1,y,m] != 3)){ + transition3[j3] <- my.cluster.array[[p]][d + 1,y,m] + j3 <- j3 +1 + } + if (my.cluster.array[[p]][d,y,m] == 4 && (my.cluster.array[[p]][d + 1,y,m] != 4)){ + transition4[j4] <- my.cluster.array[[p]][d + 1,y,m] + j4 <- j4 +1 + } + + } + } + } + + trans.sim <- matrix(NA,4,4 , dimnames= list(c("cluster1.sim", "cluster2.sim", "cluster3.sim", "cluster4.sim"),c("cluster1.sim","cluster2.sim","cluster3.sim","cluster4.sim"))) + trans.sim[1,2] <- length(which(transition1 == 2)) + trans.sim[1,3] <- length(which(transition1 == 3)) + trans.sim[1,4] <- length(which(transition1 == 4)) + + trans.sim[2,1] <- length(which(transition2 == 1)) + trans.sim[2,3] <- length(which(transition2 == 3)) + trans.sim[2,4] <- length(which(transition2 == 4)) + + trans.sim[3,1] <- length(which(transition3 == 1)) + trans.sim[3,2] <- length(which(transition3 == 2)) + trans.sim[3,4] <- length(which(transition3 == 4)) + + trans.sim[4,1] <- length(which(transition4 == 1)) + trans.sim[4,2] <- length(which(transition4 == 2)) + trans.sim[4,3] <- length(which(transition4 == 3)) + + trans.tot <- apply(trans.sim,1,sum,na.rm=T) + for(i in 1:4) trans.sim[i,] <- trans.sim[i,]/trans.tot[i] + + + # compute cluster persistence: + my.cluster.vector <- as.vector(my.cluster.array[[p]]) # reduce it to a vector with the order: day1month1member1 , day2month1member1, ... , day1month2member1, day2month2member1, ,,, + + sequ1 <- sequ2 <- sequ3 <- sequ4 <- rep(0,10000) + i1 <- i2 <- i3 <- i4 <- 1 + + if(my.cluster.vector[1] != 1) i1 <- 0 + if(my.cluster.vector[1] == 1) sequ1[i1] <- sequ1[i1]+1 + if(my.cluster.vector[1] != 2) i2 <- 0 + if(my.cluster.vector[1] == 2) sequ2[i2] <- sequ2[i2]+1 + if(my.cluster.vector[1] != 3) i3 <- 0 + if(my.cluster.vector[1] == 3) sequ3[i3] <- sequ3[i3]+1 + if(my.cluster.vector[1] != 4) i4 <- 0 + if(my.cluster.vector[1] == 4) sequ4[i4] <- sequ4[i4]+1 + n.dd <- length(my.cluster.vector) + + for(d in 1:(n.dd-1)){ + if(my.cluster.vector[d] != 1 && my.cluster.vector[d+1] == 1) i1 <- i1+1 + if(my.cluster.vector[d] == 1) sequ1[i1] <- sequ1[i1]+1 + + if(my.cluster.vector[d] != 2 && my.cluster.vector[d+1] == 2) i2 <- i2+1 + if(my.cluster.vector[d] == 2) sequ2[i2] <- sequ2[i2]+1 + + if(my.cluster.vector[d] != 3 && my.cluster.vector[d+1] == 3) i3 <- i3+1 + if(my.cluster.vector[d] == 3) sequ3[i3] <- sequ3[i3]+1 + + if(my.cluster.vector[d] != 4 && my.cluster.vector[d+1] == 4) i4 <- i4+1 + if(my.cluster.vector[d] == 4) sequ4[i4] <- sequ4[i4]+1 + } + + if(my.cluster.vector[n.dd] == 1) sequ1[i1] <- sequ1[i1]+1 + if(my.cluster.vector[n.dd] == 2) sequ2[i2] <- sequ2[i2]+1 + if(my.cluster.vector[n.dd] == 3) sequ3[i3] <- sequ3[i3]+1 + if(my.cluster.vector[n.dd] == 4) sequ4[i4] <- sequ4[i4]+1 + + pers1 <- mean(sequ1[which(sequ1 != 0)]) + pers2 <- mean(sequ2[which(sequ2 != 0)]) + pers3 <- mean(sequ3[which(sequ3 != 0)]) + pers4 <- mean(sequ4[which(sequ4 != 0)]) + + # compute correlations between simulated clusters and observed regime's anomalies: + cluster1.sim <- pslwr1mean; cluster2.sim <- pslwr2mean; cluster3.sim <- pslwr3mean; cluster4.sim <- pslwr4mean + + if(composition == 'impact' && impact.data == TRUE) { + cluster1.sim.imp <- varwr1mean; cluster2.sim.imp <- varwr2mean; cluster3.sim.imp <- varwr3mean; cluster4.sim.imp <- varwr4mean + #cluster1.sim.imp.pv <- pvalue1; cluster2.sim.imp.pv <- pvalue2; cluster3.sim.imp.pv <- pvalue3; cluster4.sim.imp.pv <- pvalue4 + } + + lat.forecast <- lat; lon.forecast <- lon + + if((p + lead.month) > 12) { load.month <- p + lead.month - 12 } else { load.month <- p + lead.month } # reanalysis forecast month to load + load(file=paste0(rean.dir,"/",rean.name,"_",my.period[load.month],"_","psl",".RData")) # load reanalysis data, which overwrities the pslwrXmean variables + load(paste0(rean.dir,"/",rean.name,"_", my.period[load.month],"_","ClusterNames",".RData")) # load also reanalysis regime names + + # load reanalysis varwrXmean impact data: + if(composition == 'impact' && impact.data == TRUE) load(file=paste0(rean.dir,"/",rean.name,"_",my.period[load.month],"_",var.name[var.num],".RData")) + + if(var.num != var.num.orig) var.num <- var.num.orig # ripristinate original values of this script + if(fields.name != fields.name.orig) fields.name <- fields.name.orig # ripristinate original values of this script + if(!identical(period.orig,period)) period <- period.orig + if(work.dir != work.dir.orig) work.dir <- work.dir.orig # ripristinate original values of this script + if(p.orig != p) p <- p.orig + + # compute the observed transition probabilities: + n.days.month <- n.days.in.a.period(load.month,2001) + j1o <- j2o <- j3o <- j4o <- 1; transition.obs1 <- transition.obs2 <- transition.obs3 <- transition.obs4 <- c() + + for (d in 1:(length(my.cluster$cluster)-1)){ + if (my.cluster$cluster[d] == 1 && (my.cluster$cluster[d + 1] != my.cluster$cluster[d])){ + transition.obs1[j1o] <- my.cluster$cluster[d + 1] + j1o <- j1o + 1 + } + if (my.cluster$cluster[d] == 2 && (my.cluster$cluster[d + 1] != my.cluster$cluster[d])){ + transition.obs2[j2o] <- my.cluster$cluster[d + 1] + j2o <- j2o + 1 + } + if (my.cluster$cluster[d] == 3 && (my.cluster$cluster[d + 1] != my.cluster$cluster[d])){ + transition.obs3[j3o] <- my.cluster$cluster[d + 1] + j3o <- j3o + 1 + } + if (my.cluster$cluster[d] == 4 && (my.cluster$cluster[d + 1] != my.cluster$cluster[d])){ + transition.obs4[j4o] <- my.cluster$cluster[d + 1] + j4o <- j4o +1 + } + + } + + trans.obs <- matrix(NA,4,4 , dimnames= list(c("cluster1.obs", "cluster2.obs", "cluster3.obs", "cluster4.obs"),c("cluster1.obs","cluster2.obs","cluster3.obs","cluster4.obs"))) + trans.obs[1,2] <- length(which(transition.obs1 == 2)) + trans.obs[1,3] <- length(which(transition.obs1 == 3)) + trans.obs[1,4] <- length(which(transition.obs1 == 4)) + + trans.obs[2,1] <- length(which(transition.obs2 == 1)) + trans.obs[2,3] <- length(which(transition.obs2 == 3)) + trans.obs[2,4] <- length(which(transition.obs2 == 4)) + + trans.obs[3,1] <- length(which(transition.obs3 == 1)) + trans.obs[3,2] <- length(which(transition.obs3 == 2)) + trans.obs[3,4] <- length(which(transition.obs3 == 4)) + + trans.obs[4,1] <- length(which(transition.obs4 == 1)) + trans.obs[4,2] <- length(which(transition.obs4 == 2)) + trans.obs[4,3] <- length(which(transition.obs4 == 3)) + + trans.tot.obs <- apply(trans.obs,1,sum,na.rm=T) + for(i in 1:4) trans.obs[i,] <- trans.obs[i,]/trans.tot.obs[i] + + # compute the observed interannual monthly variability: + freq.obs1 <- freq.obs2 <- freq.obs3 <- freq.obs4 <- c() + + for(y in year.start:year.end){ + # for both reanalysis and S4, the time order is always: year1day1, year1day2, ..., year1day31, year2day1, year2day2, ..., year2day28, etc, + freq.obs1[y-year.start+1] <- length(which(my.cluster$cluster[(y-year.start)*n.days.month + (1:n.days.month)] == 1)) + freq.obs2[y-year.start+1] <- length(which(my.cluster$cluster[(y-year.start)*n.days.month + (1:n.days.month)] == 2)) + freq.obs3[y-year.start+1] <- length(which(my.cluster$cluster[(y-year.start)*n.days.month + (1:n.days.month)] == 3)) + freq.obs4[y-year.start+1] <- length(which(my.cluster$cluster[(y-year.start)*n.days.month + (1:n.days.month)] == 4)) + } + + sd.freq.obs1 <- sd(freq.obs1) / n.days.month + sd.freq.obs2 <- sd(freq.obs2) / n.days.month + sd.freq.obs3 <- sd(freq.obs3) / n.days.month + sd.freq.obs4 <- sd(freq.obs4) / n.days.month + + wr1y.obs <- wr1y; wr2y.obs <- wr2y; wr3y.obs <- wr3y; wr4y.obs <- wr4y # observed frecuencies of the regimes + + regimes.obs <- c(cluster1.name, cluster2.name, cluster3.name, cluster4.name) + + if(length(unique(regimes.obs)) < 4) stop("Two or more names of the weather types in the reanalsyis input file are repeated!") + + if(identical(rev(lat),lat.forecast)) {lat.forecast <- rev(lat.forecast); print("Warning: forecast latitudes are in the opposite order than renalysis's")} + if(!identical(lat, lat.forecast)) stop("forecast latitudes are different from reanalysis latitudes!") + if(!identical(lon, lon.forecast)) stop("forecast longitude are different from reanalysis longitudes!") + + cluster.corr <- array(NA, c(4,4), dimnames=list(c("cluster1.sim","cluster2.sim","cluster3.sim","cluster4.sim"), regimes.obs)) + cluster.corr[1,1] <- cor(as.vector(cluster1.sim), as.vector(pslwr1mean)) + cluster.corr[1,2] <- cor(as.vector(cluster1.sim), as.vector(pslwr2mean)) + cluster.corr[1,3] <- cor(as.vector(cluster1.sim), as.vector(pslwr3mean)) + cluster.corr[1,4] <- cor(as.vector(cluster1.sim), as.vector(pslwr4mean)) + cluster.corr[2,1] <- cor(as.vector(cluster2.sim), as.vector(pslwr1mean)) + cluster.corr[2,2] <- cor(as.vector(cluster2.sim), as.vector(pslwr2mean)) + cluster.corr[2,3] <- cor(as.vector(cluster2.sim), as.vector(pslwr3mean)) + cluster.corr[2,4] <- cor(as.vector(cluster2.sim), as.vector(pslwr4mean)) + cluster.corr[3,1] <- cor(as.vector(cluster3.sim), as.vector(pslwr1mean)) + cluster.corr[3,2] <- cor(as.vector(cluster3.sim), as.vector(pslwr2mean)) + cluster.corr[3,3] <- cor(as.vector(cluster3.sim), as.vector(pslwr3mean)) + cluster.corr[3,4] <- cor(as.vector(cluster3.sim), as.vector(pslwr4mean)) + cluster.corr[4,1] <- cor(as.vector(cluster4.sim), as.vector(pslwr1mean)) + cluster.corr[4,2] <- cor(as.vector(cluster4.sim), as.vector(pslwr2mean)) + cluster.corr[4,3] <- cor(as.vector(cluster4.sim), as.vector(pslwr3mean)) + cluster.corr[4,4] <- cor(as.vector(cluster4.sim), as.vector(pslwr4mean)) + + # associate each simulated cluster to one observed regime: + max1 <- which(cluster.corr == max(cluster.corr), arr.ind=T) + cluster.corr2 <- cluster.corr + cluster.corr2[max1[1],] <- -999 # set this row to negative values so it won't be found as a maximum anymore + cluster.corr2[,max1[2]] <- -999 # set this column to negative values so it won't be found as a maximum anymore + max2 <- which(cluster.corr2 == max(cluster.corr2), arr.ind=T) + cluster.corr3 <- cluster.corr2 + cluster.corr3[max2[1],] <- -999 + cluster.corr3[,max2[2]] <- -999 + max3 <- which(cluster.corr3 == max(cluster.corr3), arr.ind=T) + cluster.corr4 <- cluster.corr3 + cluster.corr4[max3[1],] <- -999 + cluster.corr4[,max3[2]] <- -999 + max4 <- which(cluster.corr4 == max(cluster.corr4), arr.ind=T) + + seq.max1 <- c(max1[1],max2[1],max3[1],max4[1]) + seq.max2 <- c(max1[2],max2[2],max3[2],max4[2]) + + cluster1.regime <- seq.max2[which(seq.max1 == 1)] + cluster2.regime <- seq.max2[which(seq.max1 == 2)] + cluster3.regime <- seq.max2[which(seq.max1 == 3)] + cluster4.regime <- seq.max2[which(seq.max1 == 4)] + + cluster1.name <- regimes.obs[cluster1.regime] + cluster2.name <- regimes.obs[cluster2.regime] + cluster3.name <- regimes.obs[cluster3.regime] + cluster4.name <- regimes.obs[cluster4.regime] + + regimes.sim <- c(cluster1.name, cluster2.name, cluster3.name, cluster4.name) + cluster.corr2 <- array(cluster.corr, c(4,4), dimnames=list(regimes.sim, regimes.obs)) + + spat.cor1 <- cluster.corr[1,seq.max2[which(seq.max1 == 1)]] + spat.cor2 <- cluster.corr[2,seq.max2[which(seq.max1 == 2)]] + spat.cor3 <- cluster.corr[3,seq.max2[which(seq.max1 == 3)]] + spat.cor4 <- cluster.corr[4,seq.max2[which(seq.max1 == 4)]] + + # measure persistence for the reanalysis dataset: + my.cluster.vector <- my.cluster[[1]] + + sequ1 <- sequ2 <- sequ3 <- sequ4 <- rep(0,10000) + i1 <- i2 <- i3 <- i4 <- 1 + + if(my.cluster.vector[1] != 1) i1 <- 0 + if(my.cluster.vector[1] == 1) sequ1[i1] <- sequ1[i1]+1 + if(my.cluster.vector[1] != 2) i2 <- 0 + if(my.cluster.vector[1] == 2) sequ2[i2] <- sequ2[i2]+1 + if(my.cluster.vector[1] != 3) i3 <- 0 + if(my.cluster.vector[1] == 3) sequ3[i3] <- sequ3[i3]+1 + if(my.cluster.vector[1] != 4) i4 <- 0 + if(my.cluster.vector[1] == 4) sequ4[i4] <- sequ4[i4]+1 + + n.dd <- length(my.cluster.vector) + for(d in 1:(n.dd-1)){ + if(my.cluster.vector[d] != 1 && my.cluster.vector[d+1] == 1) i1 <- i1+1 + if(my.cluster.vector[d] == 1) sequ1[i1] <- sequ1[i1]+1 + + if(my.cluster.vector[d] != 2 && my.cluster.vector[d+1] == 2) i2 <- i2+1 + if(my.cluster.vector[d] == 2) sequ2[i2] <- sequ2[i2]+1 + + if(my.cluster.vector[d] != 3 && my.cluster.vector[d+1] == 3) i3 <- i3+1 + if(my.cluster.vector[d] == 3) sequ3[i3] <- sequ3[i3]+1 + + if(my.cluster.vector[d] != 4 && my.cluster.vector[d+1] == 4) i4 <- i4+1 + if(my.cluster.vector[d] == 4) sequ4[i4] <- sequ4[i4]+1 + } + + if(my.cluster.vector[n.dd] == 1) sequ1[i1] <- sequ1[i1]+1 + if(my.cluster.vector[n.dd] == 2) sequ2[i2] <- sequ2[i2]+1 + if(my.cluster.vector[n.dd] == 3) sequ3[i3] <- sequ3[i3]+1 + if(my.cluster.vector[n.dd] == 4) sequ4[i4] <- sequ4[i4]+1 + + persObs1 <- mean(sequ1[which(sequ1 != 0)]) + persObs2 <- mean(sequ2[which(sequ2 != 0)]) + persObs3 <- mean(sequ3[which(sequ3 != 0)]) + persObs4 <- mean(sequ4[which(sequ4 != 0)]) + + ### insert here all the manual corrections to the regime assignation due to misasignment in the automatic selection: + ### forecast month: September + #if(p == 9 && lead.month == 0) { cluster1.name <- "Blocking"; cluster2.name <- "Atl.Ridge"; cluster3.name <- "NAO-"; cluster4.name <- "NAO+"} + #if(p == 8 && lead.month == 1) { cluster1.name <- "NAO+"; cluster2.name <- "NAO-"; cluster3.name <- "Blocking"; cluster4.name <- "Atl.Ridge"} + #if(p == 6 && lead.month == 3) { cluster1.name <- "Atl.Ridge"; cluster2.name <- "NAO+"} + #if(p == 4 && lead.month == 5) { cluster1.name <- "Blocking"; cluster2.name <- "NAO+"; cluster3.name <- "Atl.Ridge"; cluster4.name <- "NAO-"} + + if(p == 9 && lead.month == 0) { cluster1.name <- "Blocking"; cluster2.name <- "Atl.Ridge"; cluster3.name <- "NAO-"; cluster4.name <- "NAO+" } + if(p == 8 && lead.month == 1) { cluster1.name <- "NAO+"; cluster2.name <- "NAO-"; cluster3.name <- "Blocking"; cluster4.name <- "Atl.Ridge" } + if(p == 7 && lead.month == 2) { cluster1.name <- "Atl.Ridge"; cluster2.name <- "Blocking"; cluster3.name <- "NAO-"; cluster4.name <- "NAO+" } + if(p == 6 && lead.month == 3) { cluster1.name <- "Atl.Ridge"; cluster2.name <- "NAO-"; cluster3.name <- "NAO+"; cluster4.name <- "Blocking" } + if(p == 5 && lead.month == 4) { cluster1.name <- "Atl.Ridge"; cluster2.name <- "NAO+"; cluster3.name <- "NAO-"; cluster4.name <- "Blocking" } + if(p == 4 && lead.month == 5) { cluster1.name <- "Blocking"; cluster2.name <- "NAO+"; cluster3.name <- "Atl.Ridge"; cluster4.name <- "NAO-" } + if(p == 3 && lead.month == 6) { cluster2.name <- "NAO-"; cluster3.name <- "Atl.Ridge"} # cluster1.name <- "Blocking"; cluster4.name <- "NAO+" + + # regimes.sim # for the debug + + ### forecast month: October + #if(p == 4 && lead.month == 6) { cluster1.name <- "Atl.Ridge"; cluster2.name <- "Blocking"; cluster3.name <- "NAO+"; cluster4.name <- "NAO-"} + #if(p == 5 && lead.month == 5) { cluster1.name <- "NAO+"; cluster2.name <- "Blocking"; cluster3.name <- "NAO-"; cluster4.name <- "Atl.Ridge"} + #if(p == 7 && lead.month == 3) { cluster1.name <- "NAO-"; cluster2.name <- "Atl.Ridge"; cluster3.name <- "Blocking"; cluster4.name <- "NAO+"} + #if(p == 8 && lead.month == 2) { cluster1.name <- "Blocking"; cluster2.name <- "NAO-"; cluster3.name <- "Atl.Ridge"; cluster4.name <- "NAO+"} + #if(p == 9 && lead.month == 1) { cluster1.name <- "NAO-"; cluster2.name <- "Blocking"; cluster3.name <- "Atl.Ridge"; cluster4.name <- "NAO+"} + + if(p == 10 && lead.month == 0) { cluster1.name <- "Blocking"; cluster2.name <- "NAO+"; cluster3.name <- "Atl.Ridge"; cluster4.name <- "NAO-"} + if(p == 9 && lead.month == 1) { cluster1.name <- "NAO-"; cluster2.name <- "Blocking"; cluster3.name <- "Atl.Ridge"; cluster4.name <- "NAO+"} + if(p == 8 && lead.month == 2) { cluster1.name <- "Blocking"; cluster2.name <- "NAO-"; cluster3.name <- "Atl.Ridge"; cluster4.name <- "NAO+"} + if(p == 7 && lead.month == 3) { cluster1.name <- "NAO-"; cluster2.name <- "Atl.Ridge"; cluster3.name <- "Blocking"; cluster4.name <- "NAO+"} + if(p == 6 && lead.month == 4) { cluster1.name <- "NAO+"; cluster2.name <- "Blocking"; cluster3.name <- "NAO-"; cluster4.name <- "Atl.Ridge"} + + ### forecast month: November + #if(p == 6 && lead.month == 5) { cluster2.name <- "Atl.Ridge"; cluster3.name <- "NAO+"; cluster4.name <- "Blocking"} + #if(p == 7 && lead.month == 4) { cluster1.name <- "NAO+"; cluster2.name <- "Blocking"; cluster4.name <- "Atl.Ridge"} + #if(p == 8 && lead.month == 3) { cluster2.name <- "Blocking"; cluster4.name <- "Atl.Ridge"} + #if(p == 9 && lead.month == 2) { cluster2.name <- "Atl.Ridge"; cluster3.name <- "NAO+"; cluster4.name <- "Blocking"} + #if(p == 10 && lead.month == 1) { cluster2.name <- "Blocking"; cluster4.name <- "Atl.Ridge"} + + regimes.sim2 <- c(cluster1.name, cluster2.name, cluster3.name, cluster4.name) # Simulated regimes after the manual correction + #regimes.obs <- c(cluster1.name, cluster2.name, cluster3.name, cluster4.name) # already defined above, included only as reference + + order.sim <- match(regimes.sim2, orden) + order.obs <- match(regimes.obs, orden) + + clusters.sim <- list(cluster1.sim, cluster2.sim, cluster3.sim, cluster4.sim) + clusters.obs <- list(pslwr1mean, pslwr2mean, pslwr3mean, pslwr4mean) + + # recompute the spatial correlations, because they might have changed because of the manual corrections above: + spat.cor1 <- cor(as.vector(clusters.sim[[which(order.sim == 1)]]), as.vector(clusters.obs[[which(order.obs == 1)]])) + spat.cor2 <- cor(as.vector(clusters.sim[[which(order.sim == 2)]]), as.vector(clusters.obs[[which(order.obs == 2)]])) + spat.cor3 <- cor(as.vector(clusters.sim[[which(order.sim == 3)]]), as.vector(clusters.obs[[which(order.obs == 3)]])) + spat.cor4 <- cor(as.vector(clusters.sim[[which(order.sim == 4)]]), as.vector(clusters.obs[[which(order.obs == 4)]])) + + if(composition == 'impact' && impact.data == TRUE) { + clusters.sim.imp <- list(cluster1.sim.imp, cluster2.sim.imp, cluster3.sim.imp, cluster4.sim.imp) + #clusters.sim.imp.pv <- list(cluster1.sim.imp.pv, cluster2.sim.imp.pv, cluster3.sim.imp.pv, cluster4.sim.imp.pv) + + clusters.obs.imp <- list(varwr1mean, varwr2mean, varwr3mean, varwr4mean) + #clusters.obs.imp.pv <- list(pvalue1, pvalue2, pvalue3, pvalue4) + + cor.imp1 <- cor(as.vector(clusters.sim.imp[[which(order.sim == 1)]]), as.vector(clusters.obs.imp[[which(order.obs == 1)]])) + cor.imp2 <- cor(as.vector(clusters.sim.imp[[which(order.sim == 2)]]), as.vector(clusters.obs.imp[[which(order.obs == 2)]])) + cor.imp3 <- cor(as.vector(clusters.sim.imp[[which(order.sim == 3)]]), as.vector(clusters.obs.imp[[which(order.obs == 3)]])) + cor.imp4 <- cor(as.vector(clusters.sim.imp[[which(order.sim == 4)]]), as.vector(clusters.obs.imp[[which(order.obs == 4)]])) + + } + + load(file=paste0(work.dir,"/",fields.name,"_",my.period[p],"_leadmonth_",lead.month,"_","psl",".RData")) # reload forecast cluster time series and mean psl data + load(file=paste0(work.dir,"/",fields.name,"_",my.period[p],"_leadmonth_",lead.month,"_",var.name[var.num],".RData")) # reload mean var data for impact maps + + if(var.num != var.num.orig) var.num <- var.num.orig # ripristinate original values of this script + if(fields.name != fields.name.orig) fields.name <- fields.name.orig # ripristinate original values of this script + if(!identical(period.orig, period)) period <- period.orig + if(p.orig != p) p <- p.orig + if(identical(rev(lat),lat.forecast)) lat <- rev(lat) # ripristinate right lat order + if(work.dir != work.dir.orig) work.dir <- work.dir.orig # ripristinate original values of this script + + } # close for on 'forecast.name' + + if(fields.name == rean.name || (fields.name == forecast.name && n.map == 7)){ + if(save.names){ + save(cluster1.name, cluster2.name, cluster3.name, cluster4.name, file=paste0(rean.dir,"/",fields.name,"_", my.period[p],"_","ClusterNames",".RData")) + + } else { # in this case, we load the cluster names from the file already saved, if regimes come from a reanalysis: + load(paste0(rean.dir,"/",rean.name,"_", my.period[p],"_","ClusterNames",".RData")) + + if(var.num != var.num.orig) var.num <- var.num.orig # ripristinate original values of this script + if(fields.name != fields.name.orig) fields.name <- fields.name.orig # ripristinate original values of this script + } + } + + # assign to each cluster its regime: + #if(ordering == FALSE && (fields.name == rean.name || (fields.name == forecast.name && n.map == 7))){ + if(ordering == FALSE){ + cluster1 <- 1; cluster2 <- 2; cluster3 <- 3; cluster4 <- 4 + } else { + cluster1 <- which(orden == cluster1.name) + cluster2 <- which(orden == cluster2.name) + cluster3 <- which(orden == cluster3.name) + cluster4 <- which(orden == cluster4.name) + } + + assign(paste0("map",cluster1), pslwr1mean) + assign(paste0("map",cluster2), pslwr2mean) + assign(paste0("map",cluster3), pslwr3mean) + assign(paste0("map",cluster4), pslwr4mean) + + assign(paste0("fre",cluster1), wr1y) + assign(paste0("fre",cluster2), wr2y) + assign(paste0("fre",cluster3), wr3y) + assign(paste0("fre",cluster4), wr4y) + + #assign(paste0("withinss",cluster1), my.cluster[[p]]$withinss[1]/my.cluster[[p]]$size[1]) + #assign(paste0("withinss",cluster2), my.cluster[[p]]$withinss[2]/my.cluster[[p]]$size[2]) + #assign(paste0("withinss",cluster3), my.cluster[[p]]$withinss[3]/my.cluster[[p]]$size[3]) + #assign(paste0("withinss",cluster4), my.cluster[[p]]$withinss[4]/my.cluster[[p]]$size[4]) + + if(impact.data == TRUE && (composition == "simple" || composition == "single.impact" || composition == 'impact' || composition == 'edpr')){ + assign(paste0("imp",cluster1), varwr1mean) + assign(paste0("imp",cluster2), varwr2mean) + assign(paste0("imp",cluster3), varwr3mean) + assign(paste0("imp",cluster4), varwr4mean) + + assign(paste0("sig",cluster1), pvalue1) + assign(paste0("sig",cluster2), pvalue2) + assign(paste0("sig",cluster3), pvalue3) + assign(paste0("sig",cluster4), pvalue4) + } + + if(fields.name == forecast.name && n.map < 7){ + assign(paste0("freMax",cluster1), wr1yMax) + assign(paste0("freMax",cluster2), wr2yMax) + assign(paste0("freMax",cluster3), wr3yMax) + assign(paste0("freMax",cluster4), wr4yMax) + + assign(paste0("freMin",cluster1), wr1yMin) + assign(paste0("freMin",cluster2), wr2yMin) + assign(paste0("freMin",cluster3), wr3yMin) + assign(paste0("freMin",cluster4), wr4yMin) + + #assign(paste0("spatial.cor",cluster1), spat.cor1) + #assign(paste0("spatial.cor",cluster2), spat.cor2) + #assign(paste0("spatial.cor",cluster3), spat.cor3) + #assign(paste0("spatial.cor",cluster4), spat.cor4) + + assign(paste0("persist",cluster1), pers1) + assign(paste0("persist",cluster2), pers2) + assign(paste0("persist",cluster3), pers3) + assign(paste0("persist",cluster4), pers4) + + clusters.all <- c(cluster1, cluster2, cluster3, cluster4) + ordered.sequence <- c(which(clusters.all == 1), which(clusters.all == 2), which(clusters.all == 3), which(clusters.all == 4)) + + assign(paste0("transSim",cluster1), unname(trans.sim[1,ordered.sequence])) + assign(paste0("transSim",cluster2), unname(trans.sim[2,ordered.sequence])) + assign(paste0("transSim",cluster3), unname(trans.sim[3,ordered.sequence])) + assign(paste0("transSim",cluster4), unname(trans.sim[4,ordered.sequence])) + + cluster1.obs <- which(orden == regimes.obs[1]) + cluster2.obs <- which(orden == regimes.obs[2]) + cluster3.obs <- which(orden == regimes.obs[3]) + cluster4.obs <- which(orden == regimes.obs[4]) + + clusters.all.obs <- c(cluster1.obs, cluster2.obs, cluster3.obs, cluster4.obs) + ordered.sequence.obs <- c(which(clusters.all.obs == 1), which(clusters.all.obs == 2), which(clusters.all.obs == 3), which(clusters.all.obs == 4)) + + assign(paste0("freObs",cluster1.obs), wr1y.obs) + assign(paste0("freObs",cluster2.obs), wr2y.obs) + assign(paste0("freObs",cluster3.obs), wr3y.obs) + assign(paste0("freObs",cluster4.obs), wr4y.obs) + + assign(paste0("persistObs",cluster1.obs), persObs1) + assign(paste0("persistObs",cluster2.obs), persObs2) + assign(paste0("persistObs",cluster3.obs), persObs3) + assign(paste0("persistObs",cluster4.obs), persObs4) + + assign(paste0("transObs",cluster1.obs), unname(trans.obs[1,ordered.sequence.obs])) + assign(paste0("transObs",cluster2.obs), unname(trans.obs[2,ordered.sequence.obs])) + assign(paste0("transObs",cluster3.obs), unname(trans.obs[3,ordered.sequence.obs])) + assign(paste0("transObs",cluster4.obs), unname(trans.obs[4,ordered.sequence.obs])) + + } + + # position of long values of Europe only (without the Atlantic Sea and America): + #if(fields.name == "ERA-Interim") EU <- c(1:which(lon >= lon.max)[1], (length(lon)-30):length(lon)) # restrict area to continental europe only + EU <- c(1:which(lon >= lon.max)[1], (which(lon >= 337)[1]:length(lon))) # restrict area to continental europe only + + # breaks and colors of the geopotential fields: + if(psl == "g500"){ + #my.brks <- c(-300,-199:200,300) # % Mean anomaly of a WR + my.brks <- c(-300,seq(-200,200,10),300) # % Mean anomaly of a WR + my.brks2 <- c(-300,seq(-200,200,10),300) # % Mean anomaly of a WR for the contour lines + #my.cols <- colorRampPalette((brewer.pal(9,"PuBu")))(length(my.brks)-1) + values.to.plot <- c(-200, -100, 0, 100, 200) # values we want to show in the labels + } + + if(psl == "psl"){ + my.brks <- c(seq(-19,-1,2),0,seq(1,19,2)) # % Mean anomaly of a WR + # my.brks <- c(seq(-19,-1,2),0,seq(1,19,2)) # % Mean anomaly of a WR + + my.brks2 <- my.brks #c(seq(-20,20,2)) # % Mean anomaly of a WR for the contour lines + values.to.plot <- my.brks #c(-17,-15,-13,-11,-9,-7,-5,-3,-1,0,1,3,5,7,9,11,13,15,17) + } + + my.cols <- colorRampPalette(rev(brewer.pal(11,"RdBu")))(length(my.brks)-1) # blue--white--red colors + my.cols[floor(length(my.cols)/2)] <- my.cols[floor(length(my.cols)/2)+1] <- "white" + + # breaks and colors of the impact maps (valid both for Wind speed anomalies and Temperature anomalies): + #my.brks.var <- c(-20,seq(-10,-3,1),seq(-2.9,2.9,0.1),seq(3,10,1),20) # % Mean anomaly of a WR + #my.brks.var <- c(-20,-2,-1.5,-1,-0.5,-0.2,0.2,0.5,1,1.5,2,20) # % Mean anomaly of a WR + #my.brks.var <- c(-20,seq(-3,3,0.5),20) # % Mean anomaly of a WR + if(var.name[var.num] == "sfcWind") { + my.brks.var <- seq(-3,3,0.5) + values.to.plot2 <- c(-3,-2,-1,0,1,2,3) + } + if(var.name[var.num] == "tas") { + my.brks.var <- seq(-5,5,1) + values.to.plot2 <- my.brks.var #c(-5,-3,-1,0,1,3,5) + } + + #my.cols <- colorRampPalette(as.character(read.csv("/home/Earth/vtorralb/rgbhex.csv",header=F)[,1]))(length(my.brks)-1) # blue--yellow-red colors + my.cols.var <- colorRampPalette(rev(brewer.pal(11,"RdBu")))(length(my.brks.var)-1) # blue--white--red colors + #my.cols.var[floor(length(my.cols.var)/2)] <- my.cols.var[floor(length(my.cols.var)/2)+1] <- "white" + + freq.max=100 + if(fields.name == forecast.name){ + pos.years.present <- match(year.start:year.end, years) + years.missing <- which(is.na(pos.years.present)) + fre1.NA <- fre2.NA <- fre3.NA <- fre4.NA <- freMax1.NA <- freMax2.NA <- freMax3.NA <- freMax4.NA <- freMin1.NA <- freMin2.NA <- freMin3.NA <- freMin4.NA <- c() + k <- 0 + for(y in year.start:year.end) { + if(y %in% (years.missing + year.start - 1)) { + fre1.NA <- c(fre1.NA,NA); fre2.NA <- c(fre2.NA,NA); fre3.NA <- c(fre3.NA,NA); fre4.NA <- c(fre4.NA,NA) + freMax1.NA <- c(freMax1.NA,NA); freMax2.NA <- c(freMax2.NA,NA); freMax3.NA <- c(freMax3.NA,NA); freMax4.NA <- c(freMax4.NA,NA) + freMin1.NA <- c(freMin1.NA,NA); freMin2.NA <- c(freMin2.NA,NA); freMin3.NA <- c(freMin3.NA,NA); freMin4.NA <- c(freMin4.NA,NA) + k=k+1 + } else { + fre1.NA <- c(fre1.NA,fre1[y - year.start + 1 - k]); fre2.NA <- c(fre2.NA,fre2[y - year.start + 1 - k]) + fre3.NA <- c(fre3.NA,fre3[y - year.start + 1 - k]); fre4.NA <- c(fre4.NA,fre4[y - year.start + 1 - k]) + freMax1.NA <- c(freMax1.NA,freMax1[y - year.start + 1 - k]); freMax2.NA <- c(freMax2.NA,freMax2[y - year.start + 1 - k]) + freMax3.NA <- c(freMax3.NA,freMax3[y - year.start + 1 - k]); freMax4.NA <- c(freMax4.NA,freMax4[y - year.start + 1 - k]) + freMin1.NA <- c(freMin1.NA,freMin1[y - year.start + 1 - k]); freMin2.NA <- c(freMin2.NA,freMin2[y - year.start + 1 - k]) + freMin3.NA <- c(freMin3.NA,freMin3[y - year.start + 1 - k]); freMin4.NA <- c(freMin4.NA,freMin4[y - year.start + 1 - k]) + } + } + + sp.cor1 <- round(cor(fre1.NA,freObs1, use="complete.obs"),2) + sp.cor2 <- round(cor(fre2.NA,freObs2, use="complete.obs"),2) + sp.cor3 <- round(cor(fre3.NA,freObs3, use="complete.obs"),2) + sp.cor4 <- round(cor(fre4.NA,freObs4, use="complete.obs"),2) + + } else { + fre1.NA <- fre1; fre2.NA <- fre2; fre3.NA <- fre3; fre4.NA <- fre4 + } + + + # Visualize the composition with the 3 graphs for all the 4 regimes: its pressure fields, its impact on var and its frequency series: + if(composition == "simple"){ + + if(!as.pdf && fields.name == rean.name) png(filename=paste0(rean.dir,"/",fields.name,"_",my.period[p],"_",var.name[var.num],".png"),width=3000,height=3700) + if(!as.pdf && fields.name == forecast.name) png(filename=paste0(work.dir,"/",fields.name,"_",my.period[p],"_leadmonth_",lead.month,"_",var.name[var.num],".png"),width=3000,height=3700) + + plot.new() + + # Sheet title: + par(fig=c(0, 1, 0.94, 0.98), new=TRUE) + if(fields.name == forecast.name) {forecast.title <- paste0(" startdate and ",lead.month," forecast month ")} else {forecast.title <- ""} + mtext(paste0(fields.name, " Weather Regimes for ", my.period[p], forecast.title," (",year.start,"-",year.end,")"), cex=5.5, font=2) + + # Centroid maps: + map.xpos <- 0 + map.width <- 0.46 + par(fig=c(map.xpos + 0.003, map.xpos + map.width - 0.003, 0.745, 0.925), new=TRUE) + PlotEquiMap2(rescale(map1,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map1), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, xlabel.dist=1.5, contours.lty="F1FF1F", continents.col="black") + par(fig=c(map.xpos, map.xpos + map.width, 0.51, 0.69), new=TRUE) + PlotEquiMap2(rescale(map2,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map2), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F", continents.col="black") + par(fig=c(map.xpos, map.xpos + map.width, 0.275, 0.455), new=TRUE) + PlotEquiMap2(rescale(map3,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map3), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F", continents.col="black") + par(fig=c(map.xpos, map.xpos + map.width, 0.04, 0.22), new=TRUE) + PlotEquiMap2(rescale(map4,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map4), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F", continents.col="black") + + ## # for the debug: + ## obs <- read.table("/scratch/Earth/ncortesi/RESILIENCE/Regimes/NAO_series.txt") + ## mes <- p + ## fre.obs <- obs$V3[seq(mes,12*35,12)] # get the ovserved freuency of the NAO + ## #plot(1981:2015,fre.obs,type="l") + ## #lines(1981:2015,fre1,type="l",col="red") + ## #lines(1981:2015,fre2,type="l",col="blue") + ## #lines(1981:2015,fre4,type="l",col="green") + ## cor1 <- cor(fre.obs, fre1) + ## cor2 <- cor(fre.obs, fre2) + ## cor3 <- cor(fre.obs, fre3) + ## cor4 <- cor(fre.obs, fre4) + ## round(c(cor1,cor2,cor3,cor4),2) + + # Legend centroid maps: + legend1.xpos <- 0.10 + legend1.width <- 0.30 + legend1.cex <- 2 + my.subset <- match(values.to.plot, my.brks) + par(fig=c(legend1.xpos, legend1.xpos + legend1.width, 0.719, 0.744), new=TRUE) # syntax fig=c(xmin, xmax, ymin, ymax) + ColorBar3(my.brks, cols=my.cols, vert=FALSE, cex=legend1.cex, subset=my.subset) + if(psl=="g500") {mtext(side=4," m", cex=2.5)} else {mtext(side=4," hPa", cex=legend1.cex)} + par(fig=c(legend1.xpos, legend1.xpos + legend1.width, 0.485, 0.508), new=TRUE) + ColorBar3(my.brks, cols=my.cols, vert=FALSE, cex=legend1.cex, subset=my.subset) + if(psl=="g500") {mtext(side=4," m", cex=2.5)} else {mtext(side=4," hPa", cex=legend1.cex)} + par(fig=c(legend1.xpos, legend1.xpos + legend1.width, 0.250, 0.273), new=TRUE) + ColorBar3(my.brks, cols=my.cols, vert=FALSE, cex=legend1.cex, subset=my.subset) + if(psl=="g500") {mtext(side=4," m", cex=2.5)} else {mtext(side=4," hPa", cex=legend1.cex)} + par(fig=c(legend1.xpos, legend1.xpos + legend1.width, 0.015, 0.038), new=TRUE) + ColorBar3(my.brks, cols=my.cols, vert=FALSE, cex=legend1.cex, subset=my.subset) + if(psl=="g500") {mtext(side=4," m", cex=2.5)} else {mtext(side=4," hPa", cex=legend1.cex)} + + # Subtitle centroid maps: + map.title.xpos <- 0.76 + map.title.width <- 0.2 + par(fig=c(map.title.xpos, map.title.xpos + map.title.width, 0.728, 0.729), new=TRUE) + mtext("year", cex=3) + par(fig=c(map.title.xpos, map.title.xpos + map.title.width, 0.494, 0.495), new=TRUE) + mtext("year", cex=3) + par(fig=c(map.title.xpos, map.title.xpos + map.title.width, 0.260, 0.261), new=TRUE) + mtext("year", cex=3) + par(fig=c(map.title.xpos, map.title.xpos + map.title.width, 0.026, 0.027), new=TRUE) + mtext("year", cex=3) + + # Title Centroid Maps: + title1.xpos <- 0.02 + title1.width <- 0.4 + par(fig=c(title1.xpos, title1.xpos + title1.width, 0.9305, 0.9355), new=TRUE) + mtext(paste0(regime1.name,": ", psl.name, " Anomaly"), font=2, cex=4) + par(fig=c(title1.xpos, title1.xpos + title1.width, 0.6955, 0.7005), new=TRUE) + mtext(paste0(regime2.name,": ", psl.name, " Anomaly"), font=2, cex=4) + par(fig=c(title1.xpos, title1.xpos + title1.width, 0.4605, 0.4655), new=TRUE) + mtext(paste0(regime3.name,": ", psl.name, " Anomaly"), font=2, cex=4) + par(fig=c(title1.xpos, title1.xpos + title1.width, 0.2255, 0.2305), new=TRUE) + mtext(paste0(regime4.name,": ", psl.name, " Anomaly"), font=2, cex=4) + + # Impact maps: + if(impact.data == TRUE ){ + impact.xpos <- 0.47 + impact.width <- 0.26 + par(fig=c(impact.xpos, impact.xpos + impact.width, 0.745, 0.925), new=TRUE) + PlotEquiMap2(rescale(imp1[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=t(sig1[,EU] < 0.05), cex.lab=1.5, continents.col="black") + par(fig=c(impact.xpos, impact.xpos + impact.width, 0.51, 0.69), new=TRUE) + PlotEquiMap2(rescale(imp2[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=t(sig2[,EU] < 0.05), cex.lab=1.5, continents.col="black") + par(fig=c(impact.xpos, impact.xpos + impact.width, 0.275, 0.455), new=TRUE) + PlotEquiMap2(rescale(imp3[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=t(sig3[,EU] < 0.05), cex.lab=1.5, continents.col="black") + par(fig=c(impact.xpos, impact.xpos + impact.width, 0.04, 0.22), new=TRUE) + PlotEquiMap2(rescale(imp4[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=t(sig4[,EU] < 0.05), cex.lab=1.5, continents.col="black") + + + # Title impact maps: + title2.xpos <- 0.42 + title2.width <- 0.35 + par(fig=c(title2.xpos, title2.xpos + title2.width, 0.928, 0.933), new=TRUE) + mtext(paste0(regime1.name, ": Impact on ", var.name.full[var.num]), font=2, cex=4) + par(fig=c(title2.xpos, title2.xpos + title2.width, 0.693, 0.698), new=TRUE) + mtext(paste0(regime2.name, ": Impact on ", var.name.full[var.num]), font=2, cex=4) + par(fig=c(title2.xpos, title2.xpos + title2.width, 0.458, 0.463), new=TRUE) + mtext(paste0(regime3.name, ": Impact on ", var.name.full[var.num]), font=2, cex=4) + par(fig=c(title2.xpos, title2.xpos + title2.width, 0.223, 0.227), new=TRUE) + mtext(paste0(regime4.name, ": Impact on ", var.name.full[var.num]), font=2, cex=4) + + # Legend: + legend2.xpos <- 0.48 + legend2.width <- 0.25 + legend2.cex <- 1.8 + + my.subset2 <- match(values.to.plot2, my.brks.var) + par(fig=c(legend2.xpos, legend2.xpos + legend2.width, 0.719 + 0.001, 0.744), new=TRUE) + ColorBar3(my.brks.var, cols=my.cols.var, vert=FALSE, cex=legend2.cex, subset=my.subset2) + mtext(side=4,paste0(" ",var.unit[var.num]), cex=legend2.cex) + par(fig=c(legend2.xpos, legend2.xpos + legend2.width, 0.485, 0.508), new=TRUE) + ColorBar3(my.brks.var, cols=my.cols.var, vert=FALSE, cex=legend2.cex, subset=my.subset2) + mtext(side=4,paste0(" ",var.unit[var.num]), cex=legend2.cex) + par(fig=c(legend2.xpos, legend2.xpos + legend2.width, 0.250, 0.273), new=TRUE) + ColorBar3(my.brks.var, cols=my.cols.var, vert=FALSE, cex=legend2.cex, subset=my.subset2) + mtext(side=4,paste0(" ",var.unit[var.num]), cex=legend2.cex) + par(fig=c(legend2.xpos, legend2.xpos + legend2.width, 0.015, 0.038), new=TRUE) + ColorBar3(my.brks.var, cols=my.cols.var, vert=FALSE, cex=legend2.cex, subset=my.subset2) + mtext(side=4,paste0(" ",var.unit[var.num]), cex=legend2.cex) + + } + + # Frequency plots: + barplot.xpos <- 0.75 + barplot.width <- 0.25 + + par(fig=c(barplot.xpos, barplot.xpos + barplot.width, 0.745, 0.915), new=TRUE) + if(fields.name == rean.name) barplot.freq(100*fre1.NA, year.start, year.end, cex.y=2, cex.x=2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0)) + if(fields.name == forecast.name) barplot.freq.sim2(100*fre1.NA, 100*freMax1.NA, 100*freMin1.NA, 100*freObs1, year.start, year.end, cex.y=2, cex.x=2, freq.min=-2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0), col.line="gray20", cex.r=3, cex.mean=2, cex.obs=2) + + par(fig=c(barplot.xpos, barplot.xpos + barplot.width,0.510,0.680), new=TRUE) + if(fields.name == rean.name) barplot.freq(100*fre2.NA, year.start, year.end, cex.y=2, cex.x=2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0)) + if(fields.name == forecast.name) barplot.freq.sim2(100*fre2.NA, 100*freMax2.NA, 100*freMin2.NA, 100*freObs2, year.start, year.end, cex.y=2, cex.x=2, freq.min=-2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0), col.line="gray20", cex.r=3, cex.mean=2, cex.obs=2) + + par(fig=c(barplot.xpos, barplot.xpos + barplot.width,0.275,0.445), new=TRUE) + if(fields.name == rean.name) barplot.freq(100*fre3.NA, year.start, year.end, cex.y=2, cex.x=2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0)) + if(fields.name == forecast.name) barplot.freq.sim2(100*fre3.NA, 100*freMax3.NA, 100*freMin3.NA, 100*freObs3, year.start, year.end, cex.y=2, cex.x=2, freq.min=-2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0), col.line="gray20", cex.r=3, cex.mean=2, cex.obs=2) + + par(fig=c(barplot.xpos, barplot.xpos + barplot.width,0.040,0.210), new=TRUE) + if(fields.name == rean.name) barplot.freq(100*fre4.NA, year.start, year.end, cex.y=2, cex.x=2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0)) + if(fields.name == forecast.name) barplot.freq.sim2(100*fre4.NA, 100*freMax4.NA, 100*freMin4.NA, 100*freObs4, year.start, year.end, cex.y=2, cex.x=2, freq.min=-2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0), col.line="gray20", cex.r=3, cex.mean=2, cex.obs=2) + + # Title Frequency maps: + title3.xpos <- 0.75 + title3.width <-0.25 + par(fig=c(title3.xpos, title3.xpos + title3.width, 0.928, 0.933), new=TRUE) + mtext(paste0(regime1.name, " Frequency"), font=2, cex=4) # paste0 doesn't work inside an expression! + par(fig=c(title3.xpos, title3.xpos + title3.width, 0.693, 0.698), new=TRUE) + mtext(paste0(regime2.name, " Frequency"), font=2, cex=4) + par(fig=c(title3.xpos, title3.xpos + title3.width, 0.458, 0.463), new=TRUE) + mtext(paste0(regime3.name, " Frequency"), font=2, cex=4) + par(fig=c(title3.xpos, title3.xpos + title3.width, 0.223, 0.228), new=TRUE) + mtext(paste0(regime4.name, " Frequency"), font=2, cex=4) + + # % on Frequency plots: + symbol.xpos <- 0.735 + symbol.width <- 0.01 + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.92, 0.93), new=TRUE) + mtext("%", cex=3.3) + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.59, 0.70), new=TRUE) + mtext("%", cex=3.3) + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.45, 0.46), new=TRUE) + mtext("%", cex=3.3) + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.22, 0.23), new=TRUE) + mtext("%", cex=3.3) + + # mean frequency on Frequency plots: + if(mean.freq == TRUE){ + symbol.xpos <- 0.9 + symbol.width <- 0.1 + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.908, 0.918), new=TRUE) + mtext(bquote(bar(nu) == .(paste0(round(mean(100*fre1.NA),1),"%"))),cex=4.5) + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.673, 0.683), new=TRUE) + mtext(bquote(bar(nu) == .(paste0(round(mean(100*fre2.NA),1),"%"))),cex=4.5) + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.44, 0.45), new=TRUE) + mtext(bquote(bar(nu) == .(paste0(round(mean(100*fre3.NA),1),"%"))),cex=4.5) + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.203, 0.213), new=TRUE) + mtext(bquote(bar(nu) == .(paste0(round(mean(100*fre4.NA),1),"%"))),cex=4.5) + } + + # add corr between obs.time series and ensemble mean time series on Frequency plots: + if(fields.name == forecast.name){ + symbol.xpos <- 0.76 + symbol.width <- 0.1 + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.908, 0.918), new=TRUE) + sp.cor1 <- round(cor(fre1.NA,freObs1, use="complete.obs"),2) + mtext(paste0("r= ",sp.cor1), cex=4.5) + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.673, 0.683), new=TRUE) + sp.cor2 <- round(cor(fre2.NA,freObs2, use="complete.obs"),2) + mtext(paste0("r= ",sp.cor2), cex=4.5) + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.44, 0.45), new=TRUE) + sp.cor3 <- round(cor(fre3.NA,freObs3, use="complete.obs"),2) + mtext(paste0("r= ",sp.cor3), cex=4.5) + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.203, 0.213), new=TRUE) + sp.cor4 <- round(cor(fre4.NA,freObs4, use="complete.obs"),2) + mtext(paste0("r= ",sp.cor4), cex=4.5) + } + + if(!as.pdf) dev.off() # for saving 4 png + + } # close if on: composition == 'simple' + + + if(composition == "edpr"){ + + fileoutput <- paste0(rean.dir,"/",fields.name,"_",my.period[p],"_",var.name[var.num],"_edpr.png") + png(filename=fileoutput,width=3000,height=3700) + + plot.new() + + # Sheet title (it will be inserted later, when converting the image for the catalogue): + #par(fig=c(0, 1, 0.94, 0.98), new=TRUE) + #if(fields.name == forecast.name) {forecast.title <- paste0(" startdate and ",lead.month," forecast month ")} else {forecast.title <- ""} + #mtext(paste0(fields.name, " Weather Regimes for ", my.period[p], forecast.title," (",year.start,"-",year.end,")"), cex=5.5, font=2) + + # Centroid maps: + map.xpos <- 0.27 + map.width <- 0.46 + par(fig=c(map.xpos + 0.003, map.xpos + map.width - 0.003, 0.745, 0.925), new=TRUE) + PlotEquiMap2(rescale(map1,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map1), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, xlabel.dist=1.5, contours.lty="F1FF1F", continents.col="black") + par(fig=c(map.xpos, map.xpos + map.width, 0.51, 0.69), new=TRUE) + PlotEquiMap2(rescale(map2,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map2), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F", continents.col="black") + par(fig=c(map.xpos, map.xpos + map.width, 0.275, 0.455), new=TRUE) + PlotEquiMap2(rescale(map3,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map3), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F", continents.col="black") + par(fig=c(map.xpos, map.xpos + map.width, 0.04, 0.22), new=TRUE) + PlotEquiMap2(rescale(map4,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map4), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F", continents.col="black") + + ## # for the debug: + ## obs <- read.table("/scratch/Earth/ncortesi/RESILIENCE/Regimes/NAO_series.txt") + ## mes <- p + ## fre.obs <- obs$V3[seq(mes,12*35,12)] # get the ovserved freuency of the NAO + ## #plot(1981:2015,fre.obs,type="l") + ## #lines(1981:2015,fre1,type="l",col="red") + ## #lines(1981:2015,fre2,type="l",col="blue") + ## #lines(1981:2015,fre4,type="l",col="green") + ## cor1 <- cor(fre.obs, fre1) + ## cor2 <- cor(fre.obs, fre2) + ## cor3 <- cor(fre.obs, fre3) + ## cor4 <- cor(fre.obs, fre4) + ## round(c(cor1,cor2,cor3,cor4),2) + + # Legend centroid maps: + legend1.xpos <- 0.30 + legend1.width <- 0.40 + legend1.cex <- 2 + my.subset <- match(values.to.plot, my.brks) + par(fig=c(legend1.xpos, legend1.xpos + legend1.width, 0.719, 0.744), new=TRUE) # syntax fig=c(xmin, xmax, ymin, ymax) + ColorBar3(my.brks, cols=my.cols, vert=FALSE, cex=legend1.cex, subset=my.subset) + if(psl=="g500") {mtext(side=4," m", cex=2.5)} else {mtext(side=4," hPa", cex=legend1.cex)} + par(fig=c(legend1.xpos, legend1.xpos + legend1.width, 0.485, 0.508), new=TRUE) + ColorBar3(my.brks, cols=my.cols, vert=FALSE, cex=legend1.cex, subset=my.subset) + if(psl=="g500") {mtext(side=4," m", cex=2.5)} else {mtext(side=4," hPa", cex=legend1.cex)} + par(fig=c(legend1.xpos, legend1.xpos + legend1.width, 0.250, 0.273), new=TRUE) + ColorBar3(my.brks, cols=my.cols, vert=FALSE, cex=legend1.cex, subset=my.subset) + if(psl=="g500") {mtext(side=4," m", cex=2.5)} else {mtext(side=4," hPa", cex=legend1.cex)} + par(fig=c(legend1.xpos, legend1.xpos + legend1.width, 0.015, 0.038), new=TRUE) + ColorBar3(my.brks, cols=my.cols, vert=FALSE, cex=legend1.cex, subset=my.subset) + if(psl=="g500") {mtext(side=4," m", cex=2.5)} else {mtext(side=4," hPa", cex=legend1.cex)} + + # Subtitle centroid maps: + map.title.xpos <- 0.76 + map.title.width <- 0.2 + par(fig=c(map.title.xpos, map.title.xpos + map.title.width, 0.728, 0.729), new=TRUE) + mtext("year", cex=3) + par(fig=c(map.title.xpos, map.title.xpos + map.title.width, 0.494, 0.495), new=TRUE) + mtext("year", cex=3) + par(fig=c(map.title.xpos, map.title.xpos + map.title.width, 0.260, 0.261), new=TRUE) + mtext("year", cex=3) + par(fig=c(map.title.xpos, map.title.xpos + map.title.width, 0.026, 0.027), new=TRUE) + mtext("year", cex=3) + + # Title Centroid Maps: + title1.xpos <- 0.3 + title1.width <- 0.44 + par(fig=c(title1.xpos, title1.xpos + title1.width, 0.9305, 0.9355), new=TRUE) + mtext(paste0(regime1.name," ", psl.name, " anomaly"), font=2, cex=4) + par(fig=c(title1.xpos, title1.xpos + title1.width, 0.6955, 0.7005), new=TRUE) + mtext(paste0(regime2.name," ", psl.name, " anomaly"), font=2, cex=4) + par(fig=c(title1.xpos, title1.xpos + title1.width, 0.4605, 0.4655), new=TRUE) + mtext(paste0(regime3.name," ", psl.name, " anomaly"), font=2, cex=4) + par(fig=c(title1.xpos, title1.xpos + title1.width, 0.2255, 0.2305), new=TRUE) + mtext(paste0(regime4.name," ", psl.name, " anomaly"), font=2, cex=4) + + # Impact maps: + if(impact.data == TRUE ){ + impact.xpos <- 0 + impact.width <- 0.26 + par(fig=c(impact.xpos, impact.xpos + impact.width, 0.745, 0.925), new=TRUE) + PlotEquiMap2(rescale(imp1[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=t(sig1[,EU] < 0.05), cex.lab=1.5, continents.col="black") + par(fig=c(impact.xpos, impact.xpos + impact.width, 0.51, 0.69), new=TRUE) + PlotEquiMap2(rescale(imp2[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=t(sig2[,EU] < 0.05), cex.lab=1.5, continents.col="black") + par(fig=c(impact.xpos, impact.xpos + impact.width, 0.275, 0.455), new=TRUE) + PlotEquiMap2(rescale(imp3[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=t(sig3[,EU] < 0.05), cex.lab=1.5, continents.col="black") + par(fig=c(impact.xpos, impact.xpos + impact.width, 0.04, 0.22), new=TRUE) + PlotEquiMap2(rescale(imp4[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=t(sig4[,EU] < 0.05), cex.lab=1.5, continents.col="black") + + + # Title impact maps: + title2.xpos <- 0 + title2.width <- 0.26 + par(fig=c(title2.xpos, title2.xpos + title2.width, 0.928, 0.933), new=TRUE) + mtext(paste0(regime1.name, " impact on ", var.name.full[var.num]), font=2, cex=4) + par(fig=c(title2.xpos, title2.xpos + title2.width, 0.693, 0.698), new=TRUE) + mtext(paste0(regime2.name, " impact on ", var.name.full[var.num]), font=2, cex=4) + par(fig=c(title2.xpos, title2.xpos + title2.width, 0.458, 0.463), new=TRUE) + mtext(paste0(regime3.name, " impact on ", var.name.full[var.num]), font=2, cex=4) + par(fig=c(title2.xpos, title2.xpos + title2.width, 0.223, 0.227), new=TRUE) + mtext(paste0(regime4.name, " impact on ", var.name.full[var.num]), font=2, cex=4) + + # Legend: + legend2.xpos <- 0.01 + legend2.width <- 0.25 + legend2.cex <- 1.8 + + my.subset2 <- match(values.to.plot2, my.brks.var) + par(fig=c(legend2.xpos, legend2.xpos + legend2.width, 0.719 + 0.001, 0.744), new=TRUE) + ColorBar3(my.brks.var, cols=my.cols.var, vert=FALSE, cex=legend2.cex, subset=my.subset2) + mtext(side=4,paste0(" ",var.unit[var.num]), cex=legend2.cex) + par(fig=c(legend2.xpos, legend2.xpos + legend2.width, 0.485, 0.508), new=TRUE) + ColorBar3(my.brks.var, cols=my.cols.var, vert=FALSE, cex=legend2.cex, subset=my.subset2) + mtext(side=4,paste0(" ",var.unit[var.num]), cex=legend2.cex) + par(fig=c(legend2.xpos, legend2.xpos + legend2.width, 0.250, 0.273), new=TRUE) + ColorBar3(my.brks.var, cols=my.cols.var, vert=FALSE, cex=legend2.cex, subset=my.subset2) + mtext(side=4,paste0(" ",var.unit[var.num]), cex=legend2.cex) + par(fig=c(legend2.xpos, legend2.xpos + legend2.width, 0.015, 0.038), new=TRUE) + ColorBar3(my.brks.var, cols=my.cols.var, vert=FALSE, cex=legend2.cex, subset=my.subset2) + mtext(side=4,paste0(" ",var.unit[var.num]), cex=legend2.cex) + + } + + # Frequency plots: + barplot.xpos <- 0.75 + barplot.width <- 0.25 + + par(fig=c(barplot.xpos, barplot.xpos + barplot.width, 0.745, 0.915), new=TRUE) + if(fields.name == rean.name) barplot.freq(100*fre1.NA, year.start, year.end, cex.y=2, cex.x=2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0)) + if(fields.name == forecast.name) barplot.freq.sim2(100*fre1.NA, 100*freMax1.NA, 100*freMin1.NA, 100*freObs1, year.start, year.end, cex.y=2, cex.x=2, freq.min=-2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0), col.line="gray20", cex.r=3, cex.mean=2, cex.obs=2) + + par(fig=c(barplot.xpos, barplot.xpos + barplot.width,0.510,0.680), new=TRUE) + if(fields.name == rean.name) barplot.freq(100*fre2.NA, year.start, year.end, cex.y=2, cex.x=2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0)) + if(fields.name == forecast.name) barplot.freq.sim2(100*fre2.NA, 100*freMax2.NA, 100*freMin2.NA, 100*freObs2, year.start, year.end, cex.y=2, cex.x=2, freq.min=-2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0), col.line="gray20", cex.r=3, cex.mean=2, cex.obs=2) + + par(fig=c(barplot.xpos, barplot.xpos + barplot.width,0.275,0.445), new=TRUE) + if(fields.name == rean.name) barplot.freq(100*fre3.NA, year.start, year.end, cex.y=2, cex.x=2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0)) + if(fields.name == forecast.name) barplot.freq.sim2(100*fre3.NA, 100*freMax3.NA, 100*freMin3.NA, 100*freObs3, year.start, year.end, cex.y=2, cex.x=2, freq.min=-2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0), col.line="gray20", cex.r=3, cex.mean=2, cex.obs=2) + + par(fig=c(barplot.xpos, barplot.xpos + barplot.width,0.040,0.210), new=TRUE) + if(fields.name == rean.name) barplot.freq(100*fre4.NA, year.start, year.end, cex.y=2, cex.x=2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0)) + if(fields.name == forecast.name) barplot.freq.sim2(100*fre4.NA, 100*freMax4.NA, 100*freMin4.NA, 100*freObs4, year.start, year.end, cex.y=2, cex.x=2, freq.min=-2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0), col.line="gray20", cex.r=3, cex.mean=2, cex.obs=2) + + # Title Frequency maps: + title3.xpos <- 0.75 + title3.width <-0.25 + par(fig=c(title3.xpos, title3.xpos + title3.width, 0.928, 0.933), new=TRUE) + mtext(paste0(regime1.name, " Frequency"), font=2, cex=4) # paste0 doesn't work inside an expression! + par(fig=c(title3.xpos, title3.xpos + title3.width, 0.693, 0.698), new=TRUE) + mtext(paste0(regime2.name, " Frequency"), font=2, cex=4) + par(fig=c(title3.xpos, title3.xpos + title3.width, 0.458, 0.463), new=TRUE) + mtext(paste0(regime3.name, " Frequency"), font=2, cex=4) + par(fig=c(title3.xpos, title3.xpos + title3.width, 0.223, 0.228), new=TRUE) + mtext(paste0(regime4.name, " Frequency"), font=2, cex=4) + + # % on Frequency plots: + symbol.xpos <- 0.735 + symbol.width <- 0.01 + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.91, 0.92), new=TRUE) + mtext("%", cex=3.3) + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.68, 0.69), new=TRUE) + mtext("%", cex=3.3) + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.44, 0.45), new=TRUE) + mtext("%", cex=3.3) + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.21, 0.22), new=TRUE) + mtext("%", cex=3.3) + + # mean frequency on Frequency plots: + if(mean.freq == TRUE){ + symbol.xpos <- 0.83 + symbol.width <- 0.1 + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.908, 0.918), new=TRUE) + mtext(bquote(bar(nu) == .(paste0(round(mean(100*fre1.NA),1),"%"))),cex=3.5) + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.673, 0.683), new=TRUE) + mtext(bquote(bar(nu) == .(paste0(round(mean(100*fre2.NA),1),"%"))),cex=3.5) + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.44, 0.45), new=TRUE) + mtext(bquote(bar(nu) == .(paste0(round(mean(100*fre3.NA),1),"%"))),cex=3.5) + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.203, 0.213), new=TRUE) + mtext(bquote(bar(nu) == .(paste0(round(mean(100*fre4.NA),1),"%"))),cex=3.5) + } + + # add corr between obs.time series and ensemble mean time series on Frequency plots: + if(fields.name == forecast.name){ + symbol.xpos <- 0.76 + symbol.width <- 0.1 + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.908, 0.918), new=TRUE) + sp.cor1 <- round(cor(fre1.NA,freObs1, use="complete.obs"),2) + mtext(paste0("r= ",sp.cor1), cex=4.5) + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.673, 0.683), new=TRUE) + sp.cor2 <- round(cor(fre2.NA,freObs2, use="complete.obs"),2) + mtext(paste0("r= ",sp.cor2), cex=4.5) + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.44, 0.45), new=TRUE) + sp.cor3 <- round(cor(fre3.NA,freObs3, use="complete.obs"),2) + mtext(paste0("r= ",sp.cor3), cex=4.5) + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.203, 0.213), new=TRUE) + sp.cor4 <- round(cor(fre4.NA,freObs4, use="complete.obs"),2) + mtext(paste0("r= ",sp.cor4), cex=4.5) + } + + if(!as.pdf) dev.off() # for saving 4 png + + # same image formatted for the catalogue: + fileoutput2 <- paste0(rean.dir,"/",fields.name,"_",my.period[p],"_",var.name[var.num],"_edpr_fig2cat.png") + + system(paste0("~/scripts/fig2catalog.sh -s 0.8 -r 150 -t '",rean.name," / 10m wind speed and sea level pressure / Monthly anomalies and frequencies \n",month.name[p]," / ",year.start,"-",year.end,"' -c 'Region: North Atlantic (27°N-81°N, 85.5°W-45°E)\nReference dataset: ",rean.name," reanalysis' ", fileoutput," ", fileoutput2)) + + + } # close if on: composition == 'edpr' + + + # Visualize the composition with the regime anomalies for a selected forecasted month: + if(composition == "psl"){ + # Centroid maps: + n.map <- n.map + 1 # n.maps starts from 0 + map.width <- 0.115 + map.xpos <- 0.06 + map.width * (n.map - 1) + map.ypos <- 0.08 + map.height <- 0.19 + map.ysep <- 0.015 + + par(fig=c(map.xpos, map.xpos + map.width, map.ypos + 3*map.height + 3*map.ysep, map.ypos + 4*map.height + 3*map.ysep), new=TRUE) + PlotEquiMap2(rescale(map1,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map1), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, xlabel.dist=1.5, contours.lty="F1FF1F") + par(fig=c(map.xpos, map.xpos + map.width, map.ypos + 2*map.height + 2*map.ysep, map.ypos + 3*map.height + 2*map.ysep), new=TRUE) + PlotEquiMap2(rescale(map2,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map2), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F") + par(fig=c(map.xpos, map.xpos + map.width, map.ypos + map.height + map.ysep, map.ypos + 2*map.height + map.ysep), new=TRUE) + PlotEquiMap2(rescale(map3,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map3), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F") + par(fig=c(map.xpos, map.xpos + map.width, map.ypos, map.ypos + map.height), new=TRUE) + PlotEquiMap2(rescale(map4,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map4), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F") + + # Sheet subtitles: + par(fig=c(map.xpos, map.xpos + map.width, 0.86, 0.90), new=TRUE) + if(n.map < 8) { + mtext(paste0(my.month.short[p], " --> ", my.month.short[forecast.month]), cex=5.5, font=2) + } else{ + mtext(paste0(rean.name," ", my.month.short[forecast.month]), cex=5.5, font=2) + } + + } # close if on composition == 'psl' + + + if(composition == "psl.rean"){ + + # Centroid maps: + n.map <- n.map + 1 # n.maps starts from 0 + map.width <- 0.08 + map.xpos <- 0 + map.width * (n.map - 1) + map.ypos <- 0.08 + map.height <- 0.19 + map.ysep <- 0.015 + + par(fig=c(map.xpos, map.xpos + map.width, map.ypos + 3*map.height + 3*map.ysep, map.ypos + 4*map.height + 3*map.ysep), new=TRUE) + PlotEquiMap2(rescale(map1,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map1), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, xlabel.dist=1.5, contours.lty="F1FF1F") + par(fig=c(map.xpos, map.xpos + map.width, map.ypos + 2*map.height + 2*map.ysep, map.ypos + 3*map.height + 2*map.ysep), new=TRUE) + PlotEquiMap2(rescale(map2,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map2), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F") + par(fig=c(map.xpos, map.xpos + map.width, map.ypos + map.height + map.ysep, map.ypos + 2*map.height + map.ysep), new=TRUE) + PlotEquiMap2(rescale(map3,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map3), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F") + par(fig=c(map.xpos, map.xpos + map.width, map.ypos, map.ypos + map.height), new=TRUE) + PlotEquiMap2(rescale(map4,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map4), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F") + + # Sheet subtitles: + #par(fig=c(map.xpos, map.xpos + map.width, 0.86, 0.90), new=TRUE) + #if(n.map < 8) { + # mtext(paste0(my.month.short[p], " --> ", my.month.short[forecast.month]), cex=5.5, font=2) + #} else{ + # mtext(paste0(rean.name," ", my.month.short[forecast.month]), cex=5.5, font=2) + #} + + } # close if on composition == 'psl.rean' + + # Visualize the composition if the impact maps for a selected forecasted month: + if(composition == "impact"){ + # Centroid maps: + n.map <- n.map + 1 # n.maps starts from 0 + map.width <- 0.115 + map.xpos <- 0.06 + map.width * (n.map - 1) + map.ypos <- 0.08 + map.height <- 0.19 + map.ysep <- 0.015 + + par(fig=c(map.xpos, map.xpos + map.width, map.ypos + 3*map.height + 3*map.ysep, map.ypos + 4*map.height + 3*map.ysep), new=TRUE) + #PlotEquiMap2(rescale(imp1[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=t(sig1[,EU] < 0.05), cex.lab=1.5) + PlotEquiMap2(rescale(imp1[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5) + + par(fig=c(map.xpos, map.xpos + map.width, map.ypos + 2*map.height + 2*map.ysep, map.ypos + 3*map.height + 2*map.ysep), new=TRUE) + #PlotEquiMap2(rescale(imp2[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=t(sig2[,EU] < 0.05), cex.lab=1.5) + PlotEquiMap2(rescale(imp2[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5) + + par(fig=c(map.xpos, map.xpos + map.width, map.ypos + map.height + map.ysep, map.ypos + 2*map.height + map.ysep), new=TRUE) + #PlotEquiMap2(rescale(imp3[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=t(sig3[,EU] < 0.05), cex.lab=1.5) + PlotEquiMap2(rescale(imp3[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5) + + par(fig=c(map.xpos, map.xpos + map.width, map.ypos, map.ypos + map.height), new=TRUE) + #PlotEquiMap2(rescale(imp4[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=t(sig4[,EU] < 0.05), cex.lab=1.5) + PlotEquiMap2(rescale(imp4[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5) + + + # Sheet subtitles: + par(fig=c(map.xpos, map.xpos + map.width, 0.86, 0.90), new=TRUE) + if(n.map < 8) { + mtext(paste0(my.month.short[p], " --> ", my.month.short[forecast.month]), cex=5.5, font=2) + } else{ + mtext(paste0(rean.name," ", my.month.short[forecast.month]), cex=5.5, font=2) + } + + } # close if on composition == 'impact' + + # Visualize the 2x2 composition of the four impact maps for a selected reanalysis or forecasted month: + if(composition == "single.impact"){ + + png(filename=paste0(rean.dir,"/",fields.name,"_",my.period[p],"_",var.name[var.num],"_impact_composition.png"),width=2000,height=2000) + + plot.new() + + par(fig=c(0, 0.5, 0.95, 0.988), new=TRUE) + mtext("NAO+",cex=5) + par(fig=c(0, 0.5, 0.52, 0.95), new=TRUE) + PlotEquiMap(rescale(imp1[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=sig1[,EU] < 0.05, dot_size=2, axes_label_scale=2.5) + + par(fig=c(0.5, 1, 0.93, 0.96), new=TRUE) + mtext("NAO-",cex=5) + par(fig=c(0.5, 1, 0.52, 0.95), new=TRUE) + PlotEquiMap(rescale(imp2[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=sig2[,EU] < 0.05, dot_size=2, axes_label_scale=2.5) + + par(fig=c(0, 0.5, 0.44, 0.47), new=TRUE) + mtext("Blocking",cex=5) + par(fig=c(0, 0.5, 0.08, 0.46), new=TRUE) + PlotEquiMap(rescale(imp3[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=sig3[,EU] < 0.05, dot_size=2, axes_label_scale=2.5) + + par(fig=c(0.5, 1, 0.44, 0.47), new=TRUE) + mtext("Atlantic Ridge",cex=5) + par(fig=c(0.5, 1, 0.08, 0.46), new=TRUE) + PlotEquiMap(rescale(imp4[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=sig4[,EU] < 0.05, dot_size=2, axes_label_scale=2.5) + + par(fig=c(0.03, 0.96, 0.02, 0.08), new=TRUE) + ColorBar2(my.brks.var, cols=my.cols.var, vert=FALSE, cex=3, my.ticks=-0.5 + 1:length(my.brks.var), my.labels=my.brks.var, label.dist=3) + par(fig=c(0.965, 0.99, 0, 0.026), new=TRUE) + mtext("m/s",cex=3) + + dev.off() + + # format it manually from the bash shell (you must run it from your workstation until the identity command of ImageMagick will be loaded un the cluster): + #sh ~/scripts/fig2catalog.sh -x 20 -t 'NCEP / 10-m wind speed / Regime impact \nOctober / 1981-2016' -c 'Region: North Atlantic (27°N-81°N, 85.5°W-45°E)\nReference dataset: NCEP/NCAR reanalysis' NCEP_October_sfcWind_impact_composition.png NCEP_October_sfcWind_impact_composition_catalogue.png + + + ### to plot the impact map of all regimes on a particular month: + #imp.oct <- (imp1*3.2 + imp2*38.7 + imp3*51.6 + imp4*6.4)/100 + year.test <- 2016 + pos.year.test <- year.test - year.start +1 + imp.test <- imp1*fre1.NA[pos.year.test] + imp2*fre2.NA[pos.year.test] + imp3*fre3.NA[pos.year.test] + imp4*fre4.NA[pos.year.test] + #imp.test <- imp1*fre1.NA[pos.year.test] + imp3*(fre3.NA[pos.year.test]+0.032) + imp4*(fre4.NA[pos.year.test]+0.032) + par(fig=c(0, 1, 0.05, 1), new=TRUE) + PlotEquiMap2(rescale(imp.test[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5) + + # vector with the frequency of the WRs in the chosen month and year: + wt.test.freq <- c(fre1.NA[pos.year.test],fre2.NA[pos.year.test],fre3.NA[pos.year.test],fre4.NA[pos.year.test]) + + ## # or save them as individual maps: + ## png(filename=paste0(rean.dir,"/",fields.name,"_",my.period[p],"_",var.name[var.num],"_impact_NAO+.png"),width=3000,height=3700) + ## PlotEquiMap2(rescale(imp1[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=t(sig1[,EU] < 0.05), cex.lab=1.5) + ## dev.off() + + ## png(filename=paste0(rean.dir,"/",fields.name,"_",my.period[p],"_",var.name[var.num],"_impact_NAO-.png"),width=3000,height=3700) + ## PlotEquiMap2(rescale(imp2[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=t(sig2[,EU] < 0.05), cex.lab=1.5) + ## dev.off() + + ## png(filename=paste0(rean.dir,"/",fields.name,"_",my.period[p],"_",var.name[var.num],"_impact_Blocking.png"),width=3000,height=3700) + ## PlotEquiMap2(rescale(imp3[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=t(sig3[,EU] < 0.05), cex.lab=1.5) + ## dev.off() + + ## png(filename=paste0(rean.dir,"/",fields.name,"_",my.period[p],"_",var.name[var.num],"_impact_Atl_Ridge.png"),width=3000,height=3700) + ## PlotEquiMap2(rescale(imp4[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=t(sig4[,EU] < 0.05), cex.lab=1.5) + ## dev.off() + } + + # Visualize the composition if the impact maps for a selected forecasted month: + if(composition == "single.psl"){ + png(filename=paste0(rean.dir,"/",fields.name,"_",my.period[p],"_",var.name[var.num],"_pattern_cluster1.png"),width=2000,height=1400, res=300) + PlotEquiMap2(rescale(map1,my.brks[1],tail(my.brks,1)), lon, lat,filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1, contours=t(map1), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=0.5, xlabel.dist=0, contours.lty="F1FF1F", toptitle=paste0(my.period[p]," WithinSS=", round(withinss1/1000000,2))) + dev.off() + + png(filename=paste0(rean.dir,"/",fields.name,"_",my.period[p],"_",var.name[var.num],"_pattern_cluster2.png"),width=2000,height=1400, res=300) + PlotEquiMap2(rescale(map2,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1, contours=t(map2), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=0.5, xlabel.dist=0, contours.lty="F1FF1F", toptitle=paste0(my.period[p]," WithinSS=", round(withinss2/1000000,2))) + dev.off() + + png(filename=paste0(rean.dir,"/",fields.name,"_",my.period[p],"_",var.name[var.num],"_pattern_cluster3.png"),width=2000,height=1400, res=300) + PlotEquiMap2(rescale(map3,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1, contours=t(map3), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=0.5, xlabel.dist=0, contours.lty="F1FF1F", toptitle=paste0(my.period[p]," WithinSS=", round(withinss3/1000000,2))) + dev.off() + + png(filename=paste0(rean.dir,"/",fields.name,"_",my.period[p],"_",var.name[var.num],"_pattern_cluster4.png"),width=2000,height=1400, res=300) + PlotEquiMap2(rescale(map4,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1, contours=t(map4), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=0.5, xlabel.dist=0, contours.lty="F1FF1F", toptitle=paste0(my.period[p]," WithinSS=", round(withinss4/1000000,2))) + dev.off() + + # Legend centroid maps: + #my.subset <- match(values.to.plot, my.brks) + #ColorBar3(my.brks, cols=my.cols, vert=FALSE, cex=legend1.cex, subset=my.subset) + #mtext(side=4," hPa", cex=legend1.cex) + + } + + # Visualize the composition with the interannual frequencies for a selected forecasted month: + if(composition == "fre"){ + # Centroid maps: + n.map <- n.map + 1 # n.maps starts from 0 + map.width <- 0.115 + map.xpos <- 0.06 + map.width * (n.map - 1) + map.ypos <- 0.08 + map.height <- 0.19 + map.ysep <- 0.015 + + par(fig=c(map.xpos, map.xpos + map.width, map.ypos + 3*map.height + 3*map.ysep, map.ypos + 4*map.height + 3*map.ysep), new=TRUE) + if(n.map < 8) barplot.freq.sim2(100*fre1.NA, 100*freMax1.NA, 100*freMin1.NA, 100*freObs1, year.start, year.end, cex.y=2, cex.x=2, freq.min=-2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0), col.line="gray20", cex.r=3, cex.mean=2, cex.obs=2) + if(n.map == 8) barplot.freq(100*fre1.NA, year.start, year.end, cex.y=2, cex.x=2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0)) + + par(fig=c(map.xpos, map.xpos + map.width, map.ypos + 2*map.height + 2*map.ysep, map.ypos + 3*map.height + 2*map.ysep), new=TRUE) + if(n.map < 8) if(fields.name == forecast.name) barplot.freq.sim2(100*fre2.NA, 100*freMax2.NA, 100*freMin2.NA, 100*freObs2, year.start, year.end, cex.y=2, cex.x=2, freq.min=-2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0), col.line="gray20", cex.r=3, cex.mean=2, cex.obs=2) + if(n.map == 8) barplot.freq(100*fre2.NA, year.start, year.end, cex.y=2, cex.x=2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0)) + + par(fig=c(map.xpos, map.xpos + map.width, map.ypos + map.height + map.ysep, map.ypos + 2*map.height + map.ysep), new=TRUE) + if(n.map < 8) if(fields.name == forecast.name) barplot.freq.sim2(100*fre3.NA, 100*freMax3.NA, 100*freMin3.NA, 100*freObs3, year.start, year.end, cex.y=2, cex.x=2, freq.min=-2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0), col.line="gray20", cex.r=3, cex.mean=2, cex.obs=2) + if(n.map == 8) barplot.freq(100*fre3.NA, year.start, year.end, cex.y=2, cex.x=2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0)) + + par(fig=c(map.xpos, map.xpos + map.width, map.ypos, map.ypos + map.height), new=TRUE) + if(n.map < 8) if(fields.name == forecast.name) barplot.freq.sim2(100*fre4.NA, 100*freMax4.NA, 100*freMin4.NA, 100*freObs4, year.start, year.end, cex.y=2, cex.x=2, freq.min=-2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0), col.line="gray20", cex.r=3, cex.mean=2, cex.obs=2) + if(n.map == 8) barplot.freq(100*fre4.NA, year.start, year.end, cex.y=2, cex.x=2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0)) + + # Sheet subtitles: + par(fig=c(map.xpos, map.xpos + map.width, 0.86, 0.90), new=TRUE) + if(n.map < 8) { + mtext(paste0(my.month.short[p], " --> ", my.month.short[forecast.month]), cex=5.5, font=2) + } else{ + mtext(paste0(rean.name," ", my.month.short[forecast.month]), cex=5.5, font=2) + } + + # % on Frequency plots: + par(fig=c(map.xpos - 0.006, map.xpos, map.ypos + 3.9*map.height + 3*map.ysep, map.ypos + 4*map.height + 3*map.ysep), new=TRUE) + mtext("%", cex=3.3) + par(fig=c(map.xpos - 0.006, map.xpos, map.ypos + 2.9*map.height + 2*map.ysep, map.ypos + 3*map.height + 2*map.ysep), new=TRUE) + mtext("%", cex=3.3) + par(fig=c(map.xpos - 0.006, map.xpos, map.ypos + 1.9*map.height + 1*map.ysep, map.ypos + 2*map.height + 1*map.ysep), new=TRUE) + mtext("%", cex=3.3) + par(fig=c(map.xpos - 0.006, map.xpos, map.ypos + 0.9*map.height + 0*map.ysep, map.ypos + 1*map.height + 0*map.ysep), new=TRUE) + mtext("%", cex=3.3) + + # mean frequency on Frequency plots: + par(fig=c(map.xpos + 0.02, map.xpos + 0.04, map.ypos + 3*map.height + 3*map.ysep, map.ypos + 3.1*map.height + 3*map.ysep), new=TRUE) + mtext(bquote(bar(nu) == .(paste0(round(mean(100*fre1.NA),1),"%"))),cex=3.5) + par(fig=c(map.xpos + 0.02, map.xpos + 0.04, map.ypos + 2*map.height + 2*map.ysep, map.ypos + 2.1*map.height + 2*map.ysep), new=TRUE) + mtext(bquote(bar(nu) == .(paste0(round(mean(100*fre2.NA),1),"%"))),cex=3.5) + par(fig=c(map.xpos + 0.02, map.xpos + 0.04, map.ypos + 1*map.height + 1*map.ysep, map.ypos + 1.1*map.height + 1*map.ysep), new=TRUE) + mtext(bquote(bar(nu) == .(paste0(round(mean(100*fre3.NA),1),"%"))),cex=3.5) + par(fig=c(map.xpos + 0.02, map.xpos + 0.04, map.ypos + 0*map.height + 0*map.ysep, map.ypos + 0.1*map.height + 0*map.ysep), new=TRUE) + mtext(bquote(bar(nu) == .(paste0(round(mean(100*fre4.NA),1),"%"))),cex=3.5) + + # add corr between obs.time series and ensemble mean time series on Frequency plots: + if(n.map < 8){ + par(fig=c(map.xpos + map.width - 0.04, map.xpos + map.width - 0.02, map.ypos + 3*map.height + 3*map.ysep, map.ypos + 3.1*map.height + 3*map.ysep), new=TRUE) + mtext(paste0("r= ",sp.cor1), cex=3.5) + par(fig=c(map.xpos + map.width - 0.04, map.xpos + map.width - 0.02, map.ypos + 2*map.height + 2*map.ysep, map.ypos + 2.1*map.height + 2*map.ysep), new=TRUE) + mtext(paste0("r= ",sp.cor2), cex=3.5) + par(fig=c(map.xpos + map.width - 0.04, map.xpos + map.width - 0.02, map.ypos + 1*map.height + 1*map.ysep, map.ypos + 1.1*map.height + 1*map.ysep), new=TRUE) + mtext(paste0("r= ",sp.cor3), cex=3.5) + par(fig=c(map.xpos + map.width - 0.04, map.xpos + map.width - 0.02, map.ypos + 0*map.height + 0*map.ysep, map.ypos + 0.1*map.height + 0*map.ysep), new=TRUE) + mtext(paste0("r= ",sp.cor4), cex=3.5) + } + } # close if on composition == 'fre' + + # save indicators for each startdate and forecast time: + # (map1, map2, map3, map4, fre1, fre2, etc. already refer to the regimes in the same order as listed in the 'orden' vector) + if(fields.name == forecast.name) { + if (composition != "impact") { + save(orden, map1, map2, map3, map4, fre1.NA, fre2.NA, fre3.NA, fre4.NA, freObs1, freObs2, freObs3, freObs4, sp.cor1, sp.cor2, sp.cor3, sp.cor4, persist1, persist2, persist3, persist4, persistObs1, persistObs2, persistObs3, persistObs4, spat.cor1, spat.cor2, spat.cor3, spat.cor4, sd.freq.sim1, sd.freq.sim2, sd.freq.sim3, sd.freq.sim4, sd.freq.obs1, sd.freq.obs2, sd.freq.obs3, sd.freq.obs4, transSim1, transSim2, transSim3, transSim4, transObs1, transObs2, transObs3, transObs4, file=paste0(work.dir,"/",fields.name,"_", my.period[p],"_leadmonth_",lead.month,"_","ClusterNames",".RData")) + } else { # also save impX objects + save(orden, map1, map2, map3, map4, fre1.NA, fre2.NA, fre3.NA, fre4.NA, freObs1, freObs2, freObs3, freObs4, sp.cor1, sp.cor2, sp.cor3, sp.cor4, persist1, persist2, persist3, persist4, persistObs1, persistObs2, persistObs3, persistObs4, spat.cor1, spat.cor2, spat.cor3, spat.cor4, sd.freq.sim1, sd.freq.sim2, sd.freq.sim3, sd.freq.sim4, sd.freq.obs1, sd.freq.obs2, sd.freq.obs3, sd.freq.obs4, transSim1, transSim2, transSim3, transSim4, transObs1, transObs2, transObs3, transObs4, imp1, imp2, imp3, imp4,cor.imp1,cor.imp2,cor.imp3,cor.imp4, file=paste0(work.dir,"/",fields.name,"_", my.period[p],"_leadmonth_",lead.month,"_","ClusterNames",".RData")) + } + } + + } # close 'p' on 'period' + + if((composition == 'simple' || composition == 'edpr' ) && as.pdf) dev.off() # for saving a single pdf with all months/seasons + + } # close 'lead.month' on 'lead.months' + + if(composition == "psl" || composition == "fre" || composition == "impact"){ + # Regime's names: + title1.xpos <- 0.01 + title1.width <- 0.04 + par(fig=c(title1.xpos, title1.xpos + title1.width, 0.7905, 0.7955), new=TRUE) + mtext(paste0(regime1.name), font=2, cex=5) + par(fig=c(title1.xpos, title1.xpos + title1.width, 0.5855, 0.5905), new=TRUE) + mtext(paste0(regime2.name), font=2, cex=5) + par(fig=c(title1.xpos, title1.xpos + title1.width, 0.3805, 0.3955), new=TRUE) + mtext(paste0(regime3.name), font=2, cex=5) + par(fig=c(title1.xpos, title1.xpos + title1.width, 0.1755, 0.1805), new=TRUE) + mtext(paste0(regime4.name), font=2, cex=5) + + if(composition == "psl") { + # Color legend: + legend1.xpos <- 0.10 + legend1.width <- 0.80 + legend1.cex <- 4 + + par(fig=c(legend1.xpos, legend1.xpos + legend1.width, 0, 0.065), new=TRUE) # syntax fig=c(xmin, xmax, ymin, ymax) + ColorBar2(brks=my.brks, cols=my.cols, vert=FALSE, cex=legend1.cex, label.dist=2, my.ticks=-0.5 + 1:length(my.brks), my.labels=my.brks) + par(fig=c(legend1.xpos + legend1.width - 0.02, legend1.xpos + legend1.width + 0.05, 0, 0.02), new=TRUE) # syntax fig=c(xmin, xmax, ymin, ymax) + mtext("hPa", cex=legend1.cex*1.3) + } + + if(composition == "impact") { + # Color legend: + legend1.xpos <- 0.10 + legend1.width <- 0.80 + legend1.cex <- 4 + + #my.subset2 <- match(values.to.plot2, my.brks.var) + par(fig=c(legend1.xpos, legend1.xpos + legend1.width, 0, 0.065), new=TRUE) + ColorBar2(brks=my.brks.var, cols=my.cols.var, vert=FALSE, cex=legend1.cex, label.dist=2, my.ticks=-0.5 + 1:length(my.brks.var), my.labels=my.brks.var) + par(fig=c(legend1.xpos + legend1.width - 0.02, legend1.xpos + legend1.width + 0.05, 0, 0.02), new=TRUE) # syntax fig=c(xmin, xmax, ymin, ymax) + #ColorBar2(brks=my.brks.var, cols=my.cols.var, vert=FALSE, cex=legend2.cex, subset=my.subset2) + mtext(var.unit[var.num], cex=legend1.cex*1.3) + + } + + + dev.off() + + } # close if on composition + + print("Finished!") +} # close if on composition != "summary" + + + +#} # close for on forecasts.month + + + +if(composition == "taylor"){ + library("plotrix") + + fields.name="ERA-Interim" + + # choose a reference season from 13 (winter) to 16 (Autumn): + season.ref <- 13 + + # set the first WR classification: + rean.dir <- "/scratch/Earth/ncortesi/RESILIENCE/Regimes/41_ERA-Interim_monthly_1981-2015_LOESS_filter" + + # load data of the reference season of the first classification (this line should be modified to load the chosen season): + #load("/scratch/Earth/ncortesi/RESILIENCE/Regimes/18) as 12) but with no lat correction/ERA-Interim_Winter_psl.RData") # load pslwrXmean data + #load("/scratch/Earth/ncortesi/RESILIENCE/Regimes/18) as 12) but with no lat correction/ERA-Interim_Winter_ClusterNames.RData") # load clusterX.name.period + load("/scratch/Earth/ncortesi/RESILIENCE/Regimes/46_as_18_but_with_no_lat_correction_and_with_LOESS/ERA-Interim_Winter_psl.RData") # load pslwrXmean data + load("/scratch/Earth/ncortesi/RESILIENCE/Regimes/46_as_18_but_with_no_lat_correction_and_with_LOESS/ERA-Interim_Winter_ClusterNames.RData") # load clusterX.name.period + + if(var.num != var.num.orig) var.num <- var.num.orig # ripristinate original values of this script (necessary after loading regime data) + if(fields.name != fields.name.orig) fields.name <- fields.name.orig # ripristinate original values of this script + + cluster1.ref <- which(orden == cluster1.name) + cluster2.ref <- which(orden == cluster2.name) + cluster3.ref <- which(orden == cluster3.name) + cluster4.ref <- which(orden == cluster4.name) + + #cluster1.ref <- cluster1.name.period[13] + #cluster2.ref <- cluster2.name.period[13] + #cluster3.ref <- cluster3.name.period[13] + #cluster4.ref <- cluster4.name.period[13] + + cluster1.ref <- 3 # bypass the previous values because for dataset num.46 the blocking and atlantic regimes were saved swapped! + cluster2.ref <- 2 + cluster3.ref <- 1 + cluster4.ref <- 4 + + assign(paste0("map.ref",cluster1.ref), pslwr1mean) # NAO+ + assign(paste0("map.ref",cluster2.ref), pslwr2mean) # NAO- + assign(paste0("map.ref",cluster3.ref), pslwr3mean) # Blocking + assign(paste0("map.ref",cluster4.ref), pslwr4mean) # Atl.ridge + + # set a second WR classification (i.e: with running cluster) to contrast with the new one: + #rean2.dir <- "/scratch/Earth/ncortesi/RESILIENCE/Regimes/41_ERA-Interim_monthly_1981-2015_LOESS_filter" + rean2.dir <- "/scratch/Earth/ncortesi/RESILIENCE/Regimes/44_as_43_but_with_no_lat_correction" + rean2.name <- "ERA-Interim" + + # load data of the reference season of the second classification (this line should be modified to load the chosen season): + load("/scratch/Earth/ncortesi/RESILIENCE/Regimes/46_as_18_but_with_no_lat_correction_and_with_LOESS/ERA-Interim_Winter_psl.RData") # load pslwrXmean data + load("/scratch/Earth/ncortesi/RESILIENCE/Regimes/46_as_18_but_with_no_lat_correction_and_with_LOESS/ERA-Interim_Winter_ClusterNames.RData") # load clusterX.name.period + + if(var.num != var.num.orig) var.num <- var.num.orig # ripristinate original values of this script (necessary after loading regime data) + if(fields.name != fields.name.orig) fields.name <- fields.name.orig # ripristinate original values of this script + + #cluster1.name.ref <- cluster1.name.period[season.ref] + #cluster2.name.ref <- cluster2.name.period[season.ref] + #cluster3.name.ref <- cluster3.name.period[season.ref] + #cluster4.name.ref <- cluster4.name.period[season.ref] + + cluster1.ref <- which(orden == cluster1.name) + cluster2.ref <- which(orden == cluster2.name) + cluster3.ref <- which(orden == cluster3.name) + cluster4.ref <- which(orden == cluster4.name) + + cluster1.ref <- 3 # bypass the previous values because for dataset num.46 the blocking and atlantic regimes were saved swapped! + cluster2.ref <- 2 + cluster3.ref <- 1 + cluster4.ref <- 4 + + assign(paste0("map.old.ref",cluster1.ref), pslwr1mean) # NAO+ + assign(paste0("map.old.ref",cluster2.ref), pslwr2mean) # NAO- + assign(paste0("map.old.ref",cluster3.ref), pslwr3mean) # Blocking + assign(paste0("map.old.ref",cluster4.ref), pslwr4mean) # Atl.ridge + + + # breaks and colors of the geopotential fields: + if(psl == "g500"){ + my.brks <- c(-300,seq(-200,200,10),300) # % Mean anomaly of a WR + my.brks2 <- c(-300,seq(-200,200,10),300) # % Mean anomaly of a WR for the contour lines + values.to.plot <- c(-200, -100, 0, 100, 200) # values we want to show in the labels + } + + if(psl == "psl"){ + my.brks <- c(seq(-19,-1,2),0,seq(1,19,2)) # % Mean anomaly of a WR + my.brks2 <- my.brks #c(seq(-20,20,2)) # % Mean anomaly of a WR for the contour lines + values.to.plot <- my.brks #c(-17,-15,-13,-11,-9,-7,-5,-3,-1,0,1,3,5,7,9,11,13,15,17) + } + + my.cols <- colorRampPalette(rev(brewer.pal(11,"RdBu")))(length(my.brks)-1) # blue--white--red colors + my.cols[floor(length(my.cols)/2)] <- my.cols[floor(length(my.cols)/2)+1] <- "white" + + # plot the composition with the regime anomalies of each monthly regimes: + png(filename=paste0(rean.dir,"/",rean.name,"_taylor_source",".png"),width=6000,height=2000) + + plot.new() + + # Sheet title: + par(fig=c(0, 1, 0.94, 0.98), new=TRUE) + mtext(paste0(fields.name, " observed monthly regime anomalies"), cex=5.5, font=2) + + n.map <- 0 + for(p in c(9:12,1:8)){ + p.orig <- p + + load(file=paste0(rean.dir,"/",fields.name,"_",my.period[p],"_","psl",".RData")) # load cluster names and summarized data + load(file=paste0(rean.dir,"/",fields.name,"_",my.period[p],"_","ClusterNames",".RData")) # load cluster names and summarized data + + if(var.num != var.num.orig) var.num <- var.num.orig # ripristinate original values of this script (necessary after loading regime data) + if(fields.name != fields.name.orig) fields.name <- fields.name.orig # ripristinate original values of this script + if(p != p.orig) p <- p.orig + + cluster1 <- which(orden == cluster1.name) + cluster2 <- which(orden == cluster2.name) + cluster3 <- which(orden == cluster3.name) + cluster4 <- which(orden == cluster4.name) + + assign(paste0("map",cluster1), pslwr1mean) # NAO+ + assign(paste0("map",cluster2), pslwr2mean) # NAO- + assign(paste0("map",cluster3), pslwr3mean) # Blocking + assign(paste0("map",cluster4), pslwr4mean) # Atl.ridge + + n.map <- n.map + 1 # n.maps starts from 0 + map.width <- 0.07 + map.xpos <- 0.06 + map.width * (n.map - 1) + map.ypos <- 0.08 + map.height <- 0.19 + map.ysep <- 0.015 + + par(fig=c(map.xpos, map.xpos + map.width, map.ypos + 3*map.height + 3*map.ysep, map.ypos + 4*map.height + 3*map.ysep), new=TRUE) + PlotEquiMap2(rescale(map1,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map1), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, xlabel.dist=1.5, contours.lty="F1FF1F") + par(fig=c(map.xpos, map.xpos + map.width, map.ypos + 2*map.height + 2*map.ysep, map.ypos + 3*map.height + 2*map.ysep), new=TRUE) + PlotEquiMap2(rescale(map2,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map2), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F") + par(fig=c(map.xpos, map.xpos + map.width, map.ypos + map.height + map.ysep, map.ypos + 2*map.height + map.ysep), new=TRUE) + PlotEquiMap2(rescale(map3,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map3), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F") + par(fig=c(map.xpos, map.xpos + map.width, map.ypos, map.ypos + map.height), new=TRUE) + PlotEquiMap2(rescale(map4,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map4), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F") + + # Sheet subtitles: + par(fig=c(map.xpos, map.xpos + map.width, 0.86, 0.90), new=TRUE) + mtext(my.month.short[p], cex=5.5, font=2) + + } + + # draw the last map to the right of the composition, that with the seasonal anomalies: + n.map <- n.map + 1 # n.maps starts from 0 + map.width <- 0.07 + map.xpos <- 0.06 + map.width * (n.map - 1) + map.ypos <- 0.08 + map.height <- 0.19 + map.ysep <- 0.015 + + par(fig=c(map.xpos, map.xpos + map.width, map.ypos + 3*map.height + 3*map.ysep, map.ypos + 4*map.height + 3*map.ysep), new=TRUE) + PlotEquiMap2(rescale(map.ref1,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map.ref1), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, xlabel.dist=1.5, contours.lty="F1FF1F") + par(fig=c(map.xpos, map.xpos + map.width, map.ypos + 2*map.height + 2*map.ysep, map.ypos + 3*map.height + 2*map.ysep), new=TRUE) + PlotEquiMap2(rescale(map.ref2,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map.ref2), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F") + par(fig=c(map.xpos, map.xpos + map.width, map.ypos + map.height + map.ysep, map.ypos + 2*map.height + map.ysep), new=TRUE) + PlotEquiMap2(rescale(map.ref3,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map.ref3), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F") + par(fig=c(map.xpos, map.xpos + map.width, map.ypos, map.ypos + map.height), new=TRUE) + PlotEquiMap2(rescale(map.ref4,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map.ref4), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F") + + # subtitle: + par(fig=c(map.xpos, map.xpos + map.width, 0.86, 0.90), new=TRUE) + mtext(my.period[season.ref], cex=5.5, font=2) + + dev.off() + + # Plot the Taylor diagrams: + + corr.first <- corr.second <- rmse.first <- rmse.second <- array(NA,c(4,12)) + + for(regime in 1:4){ + + png(filename=paste0(rean.dir,"/",rean.name,"_taylor_",orden[regime],".png"), width=1200, height=800) + + for(p in 1:12){ + p.orig <- p + + load(file=paste0(rean.dir,"/",rean.name,"_",my.period[p],"_","psl",".RData")) # load cluster names and summarized data + load(file=paste0(rean.dir,"/",rean.name,"_",my.period[p],"_","ClusterNames",".RData")) # load cluster names and summarized data + + if(var.num != var.num.orig) var.num <- var.num.orig # ripristinate original values of this script (necessary after loading regime data) + if(fields.name != fields.name.orig) fields.name <- fields.name.orig # ripristinate original values of this script + if(p != p.orig) p <- p.orig + + cluster1 <- which(orden == cluster1.name) + cluster2 <- which(orden == cluster2.name) + cluster3 <- which(orden == cluster3.name) + cluster4 <- which(orden == cluster4.name) + + assign(paste0("map",cluster1), pslwr1mean) # NAO+ + assign(paste0("map",cluster2), pslwr2mean) # NAO- + assign(paste0("map",cluster3), pslwr3mean) # Blocking + assign(paste0("map",cluster4), pslwr4mean) # Atl.ridge + + # load data from the second classification: + load(file=paste0(rean2.dir,"/",rean2.name,"_",my.period[p],"_","psl",".RData")) # load cluster names and summarized data + load(file=paste0(rean2.dir,"/",rean2.name,"_",my.period[p],"_","ClusterNames",".RData")) # load cluster names and summarized data + + if(var.num != var.num.orig) var.num <- var.num.orig # ripristinate original values of this script (necessary after loading regime data) + if(fields.name != fields.name.orig) fields.name <- fields.name.orig # ripristinate original values of this script + if(p != p.orig) p <- p.orig + + cluster1.old <- which(orden == cluster1.name) + cluster2.old <- which(orden == cluster2.name) + cluster3.old <- which(orden == cluster3.name) + cluster4.old <- which(orden == cluster4.name) + + assign(paste0("map.old",cluster1.old), pslwr1mean) # NAO+ + assign(paste0("map.old",cluster2.old), pslwr2mean) # NAO- + assign(paste0("map.old",cluster3.old), pslwr3mean) # Blocking + assign(paste0("map.old",cluster4.old), pslwr4mean) # Atl.ridge + + add.mod <- ifelse(p == 1, FALSE, TRUE) + main.mod <- ifelse(p == 1, orden[regime],"") + + color.first <- "red" + color.second <- "blue" # "black" + sd.arcs.mod <- c(0,0.1,0.2,0.3,0.4,0.5,0.6,0.7,0.8,0.9,1,1.1,1.2,1.3) #c(0,0.5,1,1.5) # doesn't work! + if(regime == 1) my.taylor(as.vector(map.ref1),as.vector(map1), normalize=TRUE, add=add.mod, pos.cor=FALSE, sd.arcs=sd.arcs.mod, ref.sd=F, cex.axis=1.7, text.cex=1, my.text=my.month.short[p], col = color.first, main=main.mod) + if(regime == 2) my.taylor(as.vector(map.ref2),as.vector(map2), normalize=TRUE, add=add.mod, pos.cor=FALSE, sd.arcs=sd.arcs.mod, ref.sd=F, cex.axis=1.7, text.cex=1, my.text=my.month.short[p], col = color.first, main=main.mod) + if(regime == 3) my.taylor(as.vector(map.ref3),as.vector(map3), normalize=TRUE, add=add.mod, pos.cor=FALSE, sd.arcs=sd.arcs.mod, ref.sd=F, cex.axis=1.7, text.cex=1, my.text=my.month.short[p], col = color.first, main=main.mod) + if(regime == 4) my.taylor(as.vector(map.ref4),as.vector(map4), normalize=TRUE, add=add.mod, pos.cor=FALSE, sd.arcs=sd.arcs.mod, ref.sd=F, cex.axis=1.7, text.cex=1, my.text=my.month.short[p], col = color.first, main=main.mod) + + # add the points from the second classification: + add.mod=TRUE + #add.mod <- ifelse(p == 1, FALSE, TRUE) + + if(regime == 1) my.taylor(as.vector(map.old.ref1),as.vector(map.old1), normalize=TRUE, add=add.mod, pos.cor=FALSE, sd.arcs=sd.arcs.mod, ref.sd=F, cex.axis=1.7, text.cex=1, my.text=my.month.short[p], col = color.second) + if(regime == 2) my.taylor(as.vector(map.old.ref2),as.vector(map.old2), normalize=TRUE, add=add.mod, pos.cor=FALSE, sd.arcs=sd.arcs.mod, ref.sd=F, cex.axis=1.7, text.cex=1, my.text=my.month.short[p], col = color.second) + if(regime == 3) my.taylor(as.vector(map.old.ref3),as.vector(map.old3), normalize=TRUE, add=add.mod, pos.cor=FALSE, sd.arcs=sd.arcs.mod, ref.sd=F, cex.axis=1.7, text.cex=1, my.text=my.month.short[p], col = color.second) + if(regime == 4) my.taylor(as.vector(map.old.ref4),as.vector(map.old4), normalize=TRUE, add=add.mod, pos.cor=FALSE, sd.arcs=sd.arcs.mod, ref.sd=F, cex.axis=1.7, text.cex=1, my.text=my.month.short[p], col = color.second) + + if(regime == 1) { corr.first[1,p] <- cor(as.vector(map.ref1),as.vector(map1)); rmse.first[1,p] <- RMSE(as.vector(map.ref1),as.vector(map1)) } + if(regime == 2) { corr.first[2,p] <- cor(as.vector(map.ref2),as.vector(map2)); rmse.first[2,p] <- RMSE(as.vector(map.ref2),as.vector(map2)) } + if(regime == 3) { corr.first[3,p] <- cor(as.vector(map.ref3),as.vector(map3)); rmse.first[3,p] <- RMSE(as.vector(map.ref3),as.vector(map3)) } + if(regime == 4) { corr.first[4,p] <- cor(as.vector(map.ref4),as.vector(map4)); rmse.first[4,p] <- RMSE(as.vector(map.ref4),as.vector(map4)) } + + if(regime == 1) { corr.second[1,p] <- cor(as.vector(map.old.ref1),as.vector(map.old1)); rmse.second[1,p] <- RMSE(as.vector(map.old.ref1),as.vector(map.old1)) } + if(regime == 2) { corr.second[2,p] <- cor(as.vector(map.old.ref2),as.vector(map.old2)); rmse.second[2,p] <- RMSE(as.vector(map.old.ref2),as.vector(map.old2)) } + if(regime == 3) { corr.second[3,p] <- cor(as.vector(map.old.ref3),as.vector(map.old3)); rmse.second[3,p] <- RMSE(as.vector(map.old.ref3),as.vector(map.old3)) } + if(regime == 4) { corr.second[4,p] <- cor(as.vector(map.old.ref4),as.vector(map.old4)); rmse.second[4,p] <- RMSE(as.vector(map.old.ref4),as.vector(map.old4)) } + + } # close for on 'p' + + dev.off() + + } # close for on 'regime' + + corr.first.mean <- apply(corr.first,1,mean) + corr.second.mean <- apply(corr.second,1,mean) + corr.diff <- corr.second.mean - corr.first.mean + + rmse.first.mean <- apply(rmse.first,1,mean) + rmse.second.mean <- apply(rmse.second,1,mean) + rmse.diff <- rmse.second.mean - rmse.first.mean + +} # close if on composition == "taylor" + + + + + + +# Summary graphs: +if(composition == "summary" && fields.name == forecast.name){ + # array storing correlations in the format: [startdate, leadmonth, regime] + array.diff.sd.freq <- array.sd.freq <- array.cor <- array.sp.cor <- array.freq <- array.diff.freq <- array.rpss <- array.pers <- array.diff.pers <- array(NA,c(12,7,4)) + array.spat.cor <- array.imp <- array.trans.sim1 <- array.trans.sim2 <- array.trans.sim3 <- array.trans.sim4 <- array(NA,c(12,7,4)) + array.trans.diff1 <- array.trans.diff2 <- array.trans.diff3 <- array.trans.diff4 <- array.freq.obs <- array.pers.obs <- array(NA,c(12,7,4)) + array.trans.obs1 <- array.trans.obs2 <- array.trans.obs3 <- array.trans.obs4 <- array(NA,c(12,7,4)) + + for(p in 1:12){ + p.orig <- p + + for(l in 0:6){ + + load(file=paste0(work.dir,"/",fields.name,"_",my.period[p],"_leadmonth_",l,"_","ClusterNames",".RData")) # load cluster names and summarized data + + if(var.num != var.num.orig) var.num <- var.num.orig # ripristinate original values of this script (necessary after loading regime data) + if(fields.name != fields.name.orig) fields.name <- fields.name.orig # ripristinate original values of this script + if(p != p.orig) p <- p.orig + + array.spat.cor[p,1+l, 1] <- spat.cor1 # associates NAO+ to the first triangle (at left) + array.spat.cor[p,1+l, 3] <- spat.cor2 # associates NAO- to the third triangle (at right) + array.spat.cor[p,1+l, 4] <- spat.cor3 # associates Blocking to the fourth triangle (top one) + array.spat.cor[p,1+l, 2] <- spat.cor4 # associates Atl.Ridge to the second triangle (bottom one) + + array.cor[p,1+l, 1] <- sp.cor1 # associates NAO+ to the first triangle (at left) + array.cor[p,1+l, 3] <- sp.cor2 # associates NAO- to the third triangle (at right) + array.cor[p,1+l, 4] <- sp.cor3 # associates Blocking to the fourth triangle (top one) + array.cor[p,1+l, 2] <- sp.cor4 # associates Atl.Ridge to the second triangle (bottom one) + + array.freq[p,1+l, 1] <- 100*(mean(fre1.NA)) # NAO+ + array.freq[p,1+l, 3] <- 100*(mean(fre2.NA)) # NAO- + array.freq[p,1+l, 4] <- 100*(mean(fre3.NA)) # Blocking + array.freq[p,1+l, 2] <- 100*(mean(fre4.NA)) # Atl.Ridge + + array.freq.obs[p,1+l, 1] <- 100*(mean(freObs1)) # NAO+ + array.freq.obs[p,1+l, 3] <- 100*(mean(freObs2)) # NAO- + array.freq.obs[p,1+l, 4] <- 100*(mean(freObs3)) # Blocking + array.freq.obs[p,1+l, 2] <- 100*(mean(freObs4)) # Atl.Ridge + + array.diff.freq[p,1+l, 1] <- 100*(mean(fre1.NA) - mean(freObs1)) # NAO+ + array.diff.freq[p,1+l, 3] <- 100*(mean(fre2.NA) - mean(freObs2)) # NAO- + array.diff.freq[p,1+l, 4] <- 100*(mean(fre3.NA) - mean(freObs3)) # Blocking + array.diff.freq[p,1+l, 2] <- 100*(mean(fre4.NA) - mean(freObs4)) # Atl.Ridge + + array.pers[p,1+l, 1] <- persist1 # NAO+ + array.pers[p,1+l, 3] <- persist2 # NAO- + array.pers[p,1+l, 4] <- persist3 # Blocking + array.pers[p,1+l, 2] <- persist4 # Atl.Ridge + + array.pers.obs[p,1+l, 1] <- persistObs1 # NAO+ + array.pers.obs[p,1+l, 3] <- persistObs2 # NAO- + array.pers.obs[p,1+l, 4] <- persistObs3 # Blocking + array.pers.obs[p,1+l, 2] <- persistObs4 # Atl.Ridge + + array.diff.pers[p,1+l, 1] <- persist1 - persistObs1 # NAO+ + array.diff.pers[p,1+l, 3] <- persist2 - persistObs2 # NAO- + array.diff.pers[p,1+l, 4] <- persist3 - persistObs3 # Blocking + array.diff.pers[p,1+l, 2] <- persist4 - persistObs4 # Atl.Ridge + + array.sd.freq[p,1+l, 1] <- sd.freq.sim1 # NAO+ + array.sd.freq[p,1+l, 3] <- sd.freq.sim2 # NAO- + array.sd.freq[p,1+l, 4] <- sd.freq.sim3 # Blocking + array.sd.freq[p,1+l, 2] <- sd.freq.sim4 # Atl.Ridge + + array.diff.sd.freq[p,1+l, 1] <- sd.freq.sim1 / sd.freq.obs1 # NAO+ + array.diff.sd.freq[p,1+l, 3] <- sd.freq.sim2 / sd.freq.obs2 # NAO- + array.diff.sd.freq[p,1+l, 4] <- sd.freq.sim3 / sd.freq.obs3 # Blocking + array.diff.sd.freq[p,1+l, 2] <- sd.freq.sim4 / sd.freq.obs4 # Atl.Ridge + + array.imp[p,1+l, 1] <- cor.imp1 # NAO+ + array.imp[p,1+l, 3] <- cor.imp2 # NAO- + array.imp[p,1+l, 4] <- cor.imp3 # Blocking + array.imp[p,1+l, 2] <- cor.imp4 # Atl.Ridge + + array.trans.sim1[p,1+l, 1] <- NA # Transition from NAO+ to NAO+ + array.trans.sim1[p,1+l, 3] <- 100*transSim1[2] # Transition from NAO+ to NAO- + array.trans.sim1[p,1+l, 4] <- 100*transSim1[3] # Transition from NAO+ to blocking + array.trans.sim1[p,1+l, 2] <- 100*transSim1[4] # Transition from NAO+ to Atl.Ridge + + array.trans.sim2[p,1+l, 1] <- 100*transSim2[1] # Transition from NAO- to NAO+ + array.trans.sim2[p,1+l, 3] <- NA # Transition from NAO- to NAO- + array.trans.sim2[p,1+l, 4] <- 100*transSim2[3] # Transition from NAO- to blocking + array.trans.sim2[p,1+l, 2] <- 100*transSim2[4] # Transition from NAO- to Atl.Ridge + + array.trans.sim3[p,1+l, 1] <- 100*transSim3[1] # Transition from blocking to NAO+ + array.trans.sim3[p,1+l, 3] <- 100*transSim3[2] # Transition from blocking to NAO- + array.trans.sim3[p,1+l, 4] <- NA # Transition from blocking to blocking + array.trans.sim3[p,1+l, 2] <- 100*transSim3[4] # Transition from blocking to Atl.Ridge + + array.trans.sim4[p,1+l, 1] <- 100*transSim4[1] # Transition from Atl.ridge to NAO+ + array.trans.sim4[p,1+l, 3] <- 100*transSim4[2] # Transition from Atl.ridge to NAO- + array.trans.sim4[p,1+l, 4] <- 100*transSim4[3] # Transition from Atl.ridge to blocking + array.trans.sim4[p,1+l, 2] <- NA # Transition from Atl.ridge to Atl.Ridge + + array.trans.obs1[p,1+l, 1] <- NA # Transition from NAO+ to NAO+ + array.trans.obs1[p,1+l, 3] <- 100*transObs1[2] # Transition from NAO+ to NAO- + array.trans.obs1[p,1+l, 4] <- 100*transObs1[3] # Transition from NAO+ to blocking + array.trans.obs1[p,1+l, 2] <- 100*transObs1[4] # Transition from NAO+ to Atl.Ridge + + array.trans.obs2[p,1+l, 1] <- 100*transObs2[1] # Transition from NAO- to NAO+ + array.trans.obs2[p,1+l, 3] <- NA # Transition from NAO- to NAO- + array.trans.obs2[p,1+l, 4] <- 100*transObs2[3] # Transition from NAO- to blocking + array.trans.obs2[p,1+l, 2] <- 100*transObs2[4] # Transition from NAO- to Atl.Ridge + + array.trans.obs3[p,1+l, 1] <- 100*transObs3[1] # Transition from blocking to NAO+ + array.trans.obs3[p,1+l, 3] <- 100*transObs3[2] # Transition from blocking to NAO- + array.trans.obs3[p,1+l, 4] <- NA # Transition from blocking to blocking + array.trans.obs3[p,1+l, 2] <- 100*transObs3[4] # Transition from blocking to Atl.Ridge + + array.trans.obs4[p,1+l, 1] <- 100*transObs4[1] # Transition from Atl.ridge to NAO+ + array.trans.obs4[p,1+l, 3] <- 100*transObs4[2] # Transition from Atl.ridge to NAO- + array.trans.obs4[p,1+l, 4] <- 100*transObs4[3] # Transition from Atl.ridge to blocking + array.trans.obs4[p,1+l, 2] <- NA # Transition from Atl.ridge to Atl.Ridge + + array.trans.diff1[p,1+l, 1] <- NA # Transition from NAO+ to NAO+ + array.trans.diff1[p,1+l, 3] <- 100*(transSim1[2] - transObs1[2]) # Transition from NAO+ to NAO- + array.trans.diff1[p,1+l, 4] <- 100*(transSim1[3] - transObs1[3]) # Transition from NAO+ to blocking + array.trans.diff1[p,1+l, 2] <- 100*(transSim1[4] - transObs1[4]) # Transition from NAO+ to Atl.Ridge + + array.trans.diff2[p,1+l, 1] <- 100*(transSim2[1] - transObs2[1]) # Transition from NAO+ to NAO+ + array.trans.diff2[p,1+l, 3] <- NA + array.trans.diff2[p,1+l, 4] <- 100*(transSim2[3] - transObs2[3]) # Transition from NAO+ to blocking + array.trans.diff2[p,1+l, 2] <- 100*(transSim2[4] - transObs2[4]) # Transition from NAO+ to Atl.Ridge + + array.trans.diff3[p,1+l, 1] <- 100*(transSim3[1] - transObs3[1]) # Transition from NAO+ to NAO+ + array.trans.diff3[p,1+l, 3] <- 100*(transSim3[2] - transObs3[2]) # Transition from NAO+ to NAO- + array.trans.diff3[p,1+l, 4] <- NA + array.trans.diff3[p,1+l, 2] <- 100*(transSim3[4] - transObs3[4]) # Transition from NAO+ to Atl.Ridge + + array.trans.diff4[p,1+l, 1] <- 100*(transSim4[1] - transObs4[1]) # Transition from NAO+ to NAO+ + array.trans.diff4[p,1+l, 3] <- 100*(transSim4[2] - transObs4[2]) # Transition from NAO+ to NAO- + array.trans.diff4[p,1+l, 4] <- 100*(transSim4[3] - transObs4[3]) # Transition from NAO+ to blocking + array.trans.diff4[p,1+l, 2] <- NA + + #array.rpss[p,1+l, 1] <- # NAO+ + #array.rpss[p,1+l, 3] <- # NAO- + #array.rpss[p,1+l, 4] <- # Blocking + #array.rpss[p,1+l, 2] <- # Atl.Ridge + } + } + + + + # Spatial Corr summary: + png(filename=paste0(work.dir,"/",forecast.name,"_summary_spatial_correlations.png"), width=550, height=400) + + # create an array similar to array.cor but with colors instead of corr.values: + sp.corr.cols <- rev(c('#b2182b','#d6604d','#f4a582','#fddbc7','#d1e5f0','#92c5de','#4393c3','#2166ac')) + my.seq <- c(-1,0,0.4,0.5,0.6,0.7,0.8,0.9,1) + + array.spat.cor.colors <- array(sp.corr.cols[8],c(12,7,4)) + array.spat.cor.colors[array.spat.cor < my.seq[8]] <- sp.corr.cols[7] + array.spat.cor.colors[array.spat.cor < my.seq[7]] <- sp.corr.cols[6] + array.spat.cor.colors[array.spat.cor < my.seq[6]] <- sp.corr.cols[5] + array.spat.cor.colors[array.spat.cor < my.seq[5]] <- sp.corr.cols[4] + array.spat.cor.colors[array.spat.cor < my.seq[4]] <- sp.corr.cols[3] + array.spat.cor.colors[array.spat.cor < my.seq[3]] <- sp.corr.cols[2] + array.spat.cor.colors[array.spat.cor < my.seq[2]] <- sp.corr.cols[1] + + par(mar=c(5,4,4,4.5)) + plot(1,1, type="n", xaxt="n", yaxt="n", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + title("Spatial correlation between S4 and ERA-Interim regime anomalies", cex=1.5) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + + for(p in 1:12){ + for(l in 0:6){ + polygon(c(0.5+p-1, 0.5+p-1, 1+p-1), c(-0.5+l, 0.5+l, 0+l), col=array.spat.cor.colors[p,1+l,1]) # first triangle + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(-0.5+l, 0+l, -0.5+l), col=array.spat.cor.colors[p,1+l,2]) + polygon(c(1+p-1, 1.5+p-1, 1.5+p-1), c(l, 0.5+l, -0.5+l), col=array.spat.cor.colors[p,1+l,3]) + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(0.5+l, 0+l, 0.5+l), col=array.spat.cor.colors[p,1+l,4]) + } + } + + par(fig=c(0.875, 1, 0.14, 0.89), new=TRUE) + ColorBar2(brks = my.seq, cols = sp.corr.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + dev.off() + + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_spatial_correlations_startdate.png"), width=750, height=600) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext(text="Spatial correlation between S4 and ERA-Interim regime anomalies", cex=1.5) + + # NAO+ : + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.85, 0.98), new=TRUE) + mtext("NAO+", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.spat.cor.colors[p,1+l,1])}} + + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.42, 0.5), new=TRUE) + mtext("Blocking", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.spat.cor.colors[p,1+l,4])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.85, 0.98), new=TRUE) + mtext("NAO-", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.spat.cor.colors[p,1+l,3])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.42, 0.5), new=TRUE) + mtext("Atlantic Ridge", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.spat.cor.colors[p,1+l,2])}} + + par(fig=c(0.91, 1, 0.1, 0.9), new=TRUE) + ColorBar2(brks = my.seq, cols = sp.corr.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_spatial_correlations_target_month.png"), width=800, height=300) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext(text="Spatial correlation between S4 and ERA-Interim regime anomalies", cex=1.2) + + par(mar=c(0,0,4,0), fig=c(0.07, 0.22, 0.8, 0.98), new=TRUE) + mtext("NAO+", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0, 0.22, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.6, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.spat.cor.colors[i2,1+6-j,1])}} + + par(mar=c(0,0,4,0), fig=c(0.22, 0.44, 0.8, 0.98), new=TRUE) + mtext("NAO-", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.22, 0.44, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.6, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.spat.cor.colors[i2,1+6-j,3])}} + + par(mar=c(0,0,4,0), fig=c(0.44, 0.66, 0.8, 0.98), new=TRUE) + mtext("Blocking", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.44, 0.66, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.6, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.spat.cor.colors[i2,1+6-j,4])}} + + par(mar=c(0,0,4,0), fig=c(0.68, 0.88, 0.8, 0.98), new=TRUE) + mtext("Atlantic Ridge", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.66, 0.88, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.6, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.spat.cor.colors[i2,1+6-j,2])}} + + par(fig=c(0.91, 1, 0.1, 0.8), new=TRUE) + ColorBar2(brks = my.seq, cols = sp.corr.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + + + + + + + + # Temporal Corr summary: + png(filename=paste0(work.dir,"/",forecast.name,"_summary_temporal_correlations.png"), width=550, height=400) + + # create an array similar to array.cor but with colors instead of corr.values: + corr.cols <- rev(c('#b2182b','#d6604d','#f4a582','#fddbc7','#d1e5f0','#92c5de','#4393c3','#2166ac')) + my.seq <- c(-1,-0.75,-0.5,-0.25,0,0.25,0.5,0.75,1) + + array.cor.colors <- array(corr.cols[8],c(12,7,4)) + array.cor.colors[array.cor < 0.75] <- corr.cols[7] + array.cor.colors[array.cor < 0.5] <- corr.cols[6] + array.cor.colors[array.cor < 0.25] <- corr.cols[5] + array.cor.colors[array.cor < 0] <- corr.cols[4] + array.cor.colors[array.cor < -0.25] <- corr.cols[3] + array.cor.colors[array.cor < -0.5] <- corr.cols[2] + array.cor.colors[array.cor < -0.75] <- corr.cols[1] + + par(mar=c(5,4,4,4.5)) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Forecast time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + title("Correlation between S4 and ERA-Interim frequencies", cex=1.5) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + + for(p in 1:12){ + for(l in 0:6){ + polygon(c(0.5+p-1,0.5+p-1,1+p-1),c(-0.5+l,0.5+l,0+l), col=array.cor.colors[p,1+l,1]) # first triangle + polygon(c(0.5+p-1,1+p-1,1.5+p-1),c(-0.5+l,0+l,-0.5+l), col=array.cor.colors[p,1+l,2]) + polygon(c(1+p-1,1.5+p-1,1.5+p-1),c(l,0.5+l,-0.5+l), col=array.cor.colors[p,1+l,3]) + polygon(c(0.5+p-1,1+p-1,1.5+p-1),c(0.5+l,0+l,0.5+l), col=array.cor.colors[p,1+l,4]) + } + } + + par(fig=c(0.875, 1, 0.14, 0.89), new=TRUE) + ColorBar2(brks = seq(-1,1,0.25), cols = corr.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(seq(-1,1,0.25)), my.labels=seq(-1,1,0.25)) + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_temporal_correlations_startdate.png"), width=750, height=600) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext(text="Correlation between S4 and ERA-Interim frequencies", cex=1.5) + + # NAO+ : + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.85, 0.98), new=TRUE) + mtext("NAO+", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.cor.colors[p,1+l,1])}} + + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.42, 0.5), new=TRUE) + mtext("Blocking", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.cor.colors[p,1+l,4])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.85, 0.98), new=TRUE) + mtext("NAO-", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.cor.colors[p,1+l,3])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.42, 0.5), new=TRUE) + mtext("Atlantic Ridge", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.cor.colors[p,1+l,2])}} + + par(fig=c(0.91, 1, 0.1, 0.9), new=TRUE) + ColorBar2(brks = my.seq, cols = corr.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_temporal_correlations_target_month.png"), width=800, height=300) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext(text="Correlation between S4 and ERA-Interim frequencies", cex=1.2) + + par(mar=c(0,0,4,0), fig=c(0.07, 0.22, 0.8, 0.98), new=TRUE) + mtext("NAO+", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0, 0.22, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.6, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.cor.colors[i2,1+6-j,1])}} + + par(mar=c(0,0,4,0), fig=c(0.22, 0.44, 0.8, 0.98), new=TRUE) + mtext("NAO-", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.22, 0.44, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.6, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.cor.colors[i2,1+6-j,3])}} + + par(mar=c(0,0,4,0), fig=c(0.44, 0.66, 0.8, 0.98), new=TRUE) + mtext("Blocking", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.44, 0.66, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.6, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.cor.colors[i2,1+6-j,4])}} + + par(mar=c(0,0,4,0), fig=c(0.68, 0.88, 0.8, 0.98), new=TRUE) + mtext("Atlantic Ridge", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.66, 0.88, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.6, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.cor.colors[i2,1+6-j,2])}} + + par(fig=c(0.91, 1, 0.1, 0.8), new=TRUE) + ColorBar2(brks = my.seq, cols = corr.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + + + + + + # Frequency summary: + png(filename=paste0(work.dir,"/",forecast.name,"_summary_frequency.png"), width=550, height=400) + + # create an array similar to array.diff.freq but with colors instead of frequencies: + freq.cols <- rev(c('#d73027','#f46d43','#fdae61','#fee090','#e0f3f8','#abd9e9','#74add1','#4575b4')) + my.seq <- c(NA,20,22,24,26,28,30,32,NA) + + array.freq.colors <- array(freq.cols[8],c(12,7,4)) + array.freq.colors[array.freq < my.seq[[8]]] <- freq.cols[7] + array.freq.colors[array.freq < my.seq[[7]]] <- freq.cols[6] + array.freq.colors[array.freq < my.seq[[6]]] <- freq.cols[5] + array.freq.colors[array.freq < my.seq[[5]]] <- freq.cols[4] + array.freq.colors[array.freq < my.seq[[4]]] <- freq.cols[3] + array.freq.colors[array.freq < my.seq[[3]]] <- freq.cols[2] + array.freq.colors[array.freq < my.seq[[2]]] <- freq.cols[1] + + par(mar=c(5,4,4,4.5)) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Forecast time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + title("Mean S4 frequency (%)", cex=1.5) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + + for(p in 1:12){ + for(l in 0:6){ + polygon(c(0.5+p-1, 0.5+p-1, 1+p-1), c(-0.5+l, 0.5+l, 0+l), col=array.freq.colors[p,1+l,1]) # first triangle + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(-0.5+l, 0+l, -0.5+l), col=array.freq.colors[p,1+l,2]) + polygon(c(1+p-1, 1.5+p-1, 1.5+p-1), c(l, 0.5+l, -0.5+l), col=array.freq.colors[p,1+l,3]) + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(0.5+l, 0+l, 0.5+l), col=array.freq.colors[p,1+l,4]) + } + } + + par(fig=c(0.875, 1, 0.14, 0.89), new=TRUE) + ColorBar2(brks = my.seq, cols = freq.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_frequency_startdate.png"), width=750, height=600) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext(text="Mean S4 frequency (%)", cex=1.5) + + # NAO+ : + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.85, 0.98), new=TRUE) + mtext("NAO+", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.freq.colors[p,1+l,1])}} + + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.42, 0.5), new=TRUE) + mtext("Blocking", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.freq.colors[p,1+l,4])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.85, 0.98), new=TRUE) + mtext("NAO-", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.freq.colors[p,1+l,3])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.42, 0.5), new=TRUE) + mtext("Atlantic Ridge", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.freq.colors[p,1+l,2])}} + + par(fig=c(0.91, 1, 0.1, 0.9), new=TRUE) + ColorBar2(brks = my.seq, cols = freq.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_frequency_target_month.png"), width=800, height=300) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext(text="Mean S4 frequency (%)", cex=1.2) + + par(mar=c(0,0,4,0), fig=c(0.07, 0.22, 0.8, 0.98), new=TRUE) + mtext("NAO+", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0, 0.22, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.6, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.freq.colors[i2,1+6-j,1])}} + + par(mar=c(0,0,4,0), fig=c(0.22, 0.44, 0.8, 0.98), new=TRUE) + mtext("NAO-", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.22, 0.44, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.6, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.freq.colors[i2,1+6-j,3])}} + + par(mar=c(0,0,4,0), fig=c(0.44, 0.66, 0.8, 0.98), new=TRUE) + mtext("Blocking", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.44, 0.66, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.6, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.freq.colors[i2,1+6-j,4])}} + + par(mar=c(0,0,4,0), fig=c(0.68, 0.88, 0.8, 0.98), new=TRUE) + mtext("Atlantic Ridge", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.66, 0.88, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.6, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.freq.colors[i2,1+6-j,2])}} + + par(fig=c(0.91, 1, 0.1, 0.8), new=TRUE) + ColorBar2(brks = my.seq, cols = freq.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + par(fig=c(0.91, 1, 0.88, 0.93), new=TRUE) + mtext(side = 1, text = "%", line = 2, cex=1) + dev.off() + + + + + + + + + # Obs. frequency summary: + png(filename=paste0(work.dir,"/",forecast.name,"_summary_frequency_obs.png"), width=550, height=400) + + # create an array similar to array.diff.freq but with colors instead of frequencies: + freq.cols <- rev(c('#d73027','#f46d43','#fdae61','#fee090','#e0f3f8','#abd9e9','#74add1','#4575b4')) + my.seq <- c(NA,20,22,24,26,28,30,32,NA) + + array.freq.colors <- array(freq.cols[8],c(12,7,4)) + array.freq.colors[array.freq.obs < my.seq[[8]]] <- freq.cols[7] + array.freq.colors[array.freq.obs < my.seq[[7]]] <- freq.cols[6] + array.freq.colors[array.freq.obs < my.seq[[6]]] <- freq.cols[5] + array.freq.colors[array.freq.obs < my.seq[[5]]] <- freq.cols[4] + array.freq.colors[array.freq.obs < my.seq[[4]]] <- freq.cols[3] + array.freq.colors[array.freq.obs < my.seq[[3]]] <- freq.cols[2] + array.freq.colors[array.freq.obs < my.seq[[2]]] <- freq.cols[1] + + par(mar=c(5,4,4,4.5)) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Forecast time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + title("Mean observed frequency (%)", cex=1.5) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + + for(p in 1:12){ + for(l in 0:6){ + polygon(c(0.5+p-1, 0.5+p-1, 1+p-1), c(-0.5+l, 0.5+l, 0+l), col=array.freq.colors[p,1+l,1]) # first triangle + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(-0.5+l, 0+l, -0.5+l), col=array.freq.colors[p,1+l,2]) + polygon(c(1+p-1, 1.5+p-1, 1.5+p-1), c(l, 0.5+l, -0.5+l), col=array.freq.colors[p,1+l,3]) + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(0.5+l, 0+l, 0.5+l), col=array.freq.colors[p,1+l,4]) + } + } + + par(fig=c(0.875, 1, 0.14, 0.89), new=TRUE) + ColorBar2(brks = my.seq, cols = freq.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_frequency_obs_startdate.png"), width=750, height=600) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext(text="Mean observed frequency (%)", cex=1.5) + + # NAO+ : + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.85, 0.98), new=TRUE) + mtext("NAO+", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.freq.colors[p,1+l,1])}} + + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.42, 0.5), new=TRUE) + mtext("Blocking", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.freq.colors[p,1+l,4])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.85, 0.98), new=TRUE) + mtext("NAO-", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.freq.colors[p,1+l,3])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.42, 0.5), new=TRUE) + mtext("Atlantic Ridge", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.freq.colors[p,1+l,2])}} + + par(fig=c(0.91, 1, 0.1, 0.9), new=TRUE) + ColorBar2(brks = my.seq, cols = freq.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_frequency_obs_target_month.png"), width=800, height=300) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext(text="Mean observed frequency (%)", cex=1.2) + + par(mar=c(0,0,4,0), fig=c(0.07, 0.22, 0.8, 0.98), new=TRUE) + mtext("NAO+", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0, 0.22, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.6, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.freq.colors[i2,1+6-j,1])}} + + par(mar=c(0,0,4,0), fig=c(0.22, 0.44, 0.8, 0.98), new=TRUE) + mtext("NAO-", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.22, 0.44, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.6, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.freq.colors[i2,1+6-j,3])}} + + par(mar=c(0,0,4,0), fig=c(0.44, 0.66, 0.8, 0.98), new=TRUE) + mtext("Blocking", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.44, 0.66, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.6, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.freq.colors[i2,1+6-j,4])}} + + par(mar=c(0,0,4,0), fig=c(0.68, 0.88, 0.8, 0.98), new=TRUE) + mtext("Atlantic Ridge", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.66, 0.88, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.6, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.freq.colors[i2,1+6-j,2])}} + + par(fig=c(0.91, 1, 0.1, 0.8), new=TRUE) + ColorBar2(brks = my.seq, cols = freq.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + par(fig=c(0.91, 1, 0.88, 0.93), new=TRUE) + mtext(side = 1, text = "%", line = 2, cex=1) + dev.off() + + + + + + + + + + # Bias freq. summary: + png(filename=paste0(work.dir,"/",forecast.name,"_summary_bias_frequency.png"), width=550, height=400) + + # create an array similar to array.diff.freq but with colors instead of frequencies: + diff.freq.cols <- rev(c('#d73027','#f46d43','#fdae61','#fee090','#e0f3f8','#abd9e9','#74add1','#4575b4')) + my.seq <- c(-20,-10,-5,-2,0,2,5,10,20) + + array.diff.freq.colors <- array(diff.freq.cols[8],c(12,7,4)) + array.diff.freq.colors[array.diff.freq < my.seq[[8]]] <- diff.freq.cols[7] + array.diff.freq.colors[array.diff.freq 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.diff.freq.colors[i2,1+6-j,1])}} + + par(mar=c(0,0,4,0), fig=c(0.22, 0.44, 0.8, 0.98), new=TRUE) + mtext("NAO-", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.22, 0.44, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.6, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.diff.freq.colors[i2,1+6-j,3])}} + + par(mar=c(0,0,4,0), fig=c(0.44, 0.66, 0.8, 0.98), new=TRUE) + mtext("Blocking", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.44, 0.66, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.6, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.diff.freq.colors[i2,1+6-j,4])}} + + par(mar=c(0,0,4,0), fig=c(0.68, 0.88, 0.8, 0.98), new=TRUE) + mtext("Atlantic Ridge", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.66, 0.88, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.6, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.diff.freq.colors[i2,1+6-j,2])}} + + par(fig=c(0.91, 1, 0.1, 0.8), new=TRUE) + ColorBar2(brks = my.seq, cols = diff.freq.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + par(fig=c(0.91, 1, 0.88, 0.93), new=TRUE) + mtext(side = 1, text = "%", line = 2, cex=1) + dev.off() + + + + + + + + + # Simulated persistence summary: + png(filename=paste0(work.dir,"/",forecast.name,"_summary_persistence.png"), width=550, height=400) + + # create an array similar to array.pers but with colors instead of frequencies: + pers.cols <- rev(c('#b2182b','#d6604d','#f4a582','#fddbc7','#d1e5f0','#92c5de','#4393c3','#2166ac')) + my.seq <- c(NA, 3.25, 3.5, 3.75, 4, 4.25, 4.5, 4.75, NA) + + array.pers.colors <- array(pers.cols[8],c(12,7,4)) + array.pers.colors[array.pers < my.seq[8]] <- pers.cols[7] + array.pers.colors[array.pers < my.seq[7]] <- pers.cols[6] + array.pers.colors[array.pers < my.seq[6]] <- pers.cols[5] + array.pers.colors[array.pers < my.seq[5]] <- pers.cols[4] + array.pers.colors[array.pers < my.seq[4]] <- pers.cols[3] + array.pers.colors[array.pers < my.seq[3]] <- pers.cols[2] + array.pers.colors[array.pers < my.seq[2]] <- pers.cols[1] + + par(mar=c(5,4,4,4.5)) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Forecast time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + title("ECMWF-S4 Regime persistence (in days/month)", cex=1.5) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + + for(p in 1:12){ + for(l in 0:6){ + polygon(c(0.5+p-1,0.5+p-1,1+p-1),c(-0.5+l,0.5+l,0+l), col=array.pers.colors[p,1+l,1]) # first triangle + polygon(c(0.5+p-1,1+p-1,1.5+p-1),c(-0.5+l,0+l,-0.5+l), col=array.pers.colors[p,1+l,2]) + polygon(c(1+p-1,1.5+p-1,1.5+p-1),c(l,0.5+l,-0.5+l), col=array.pers.colors[p,1+l,3]) + polygon(c(0.5+p-1,1+p-1,1.5+p-1),c(0.5+l,0+l,0.5+l), col=array.pers.colors[p,1+l,4]) + } + } + + par(fig=c(0.875, 1, 0.14, 0.89), new=TRUE) + ColorBar2(brks = my.seq, cols = pers.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_persistence_startdate.png"), width=750, height=600) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("ECMWF-S4 Regime persistence (in days/month)", cex=1.5) + + # NAO+ : + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.85, 0.98), new=TRUE) + mtext("NAO+", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.pers.colors[p,1+l,1])}} + + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.42, 0.5), new=TRUE) + mtext("Blocking", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.pers.colors[p,1+l,4])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.85, 0.98), new=TRUE) + mtext("NAO-", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.pers.colors[p,1+l,3])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.42, 0.5), new=TRUE) + mtext("Atlantic Ridge", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.pers.colors[p,1+l,2])}} + + par(fig=c(0.91, 1, 0.1, 0.9), new=TRUE) + ColorBar2(brks = my.seq, cols = pers.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_persistence_target_month.png"), width=800, height=300) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("ECMWF-S4 Regime persistence (in days/month)", cex=1.2) + + par(mar=c(0,0,4,0), fig=c(0.07, 0.22, 0.8, 0.98), new=TRUE) + mtext("NAO+", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0, 0.22, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.6, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.pers.colors[i2,1+6-j,1])}} + + par(mar=c(0,0,4,0), fig=c(0.22, 0.44, 0.8, 0.98), new=TRUE) + mtext("NAO-", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.22, 0.44, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.6, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.pers.colors[i2,1+6-j,3])}} + + par(mar=c(0,0,4,0), fig=c(0.44, 0.66, 0.8, 0.98), new=TRUE) + mtext("Blocking", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.44, 0.66, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.6, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.pers.colors[i2,1+6-j,4])}} + + par(mar=c(0,0,4,0), fig=c(0.68, 0.88, 0.8, 0.98), new=TRUE) + mtext("Atlantic Ridge", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.66, 0.88, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.6, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.pers.colors[i2,1+6-j,2])}} + + par(fig=c(0.91, 1, 0.1, 0.8), new=TRUE) + ColorBar2(brks = my.seq, cols = pers.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + par(fig=c(0.9, 1, 0.88, 0.93), new=TRUE) + mtext(side = 1, text = "days/month", line = 2, cex=1) + dev.off() + + + + + + + + # Observed persistence summary: + png(filename=paste0(work.dir,"/",forecast.name,"_summary_persistence_obs.png"), width=550, height=400) + + # create an array similar to array.pers but with colors instead of frequencies: + pers.cols <- rev(c('#b2182b','#d6604d','#f4a582','#fddbc7','#d1e5f0','#92c5de','#4393c3','#2166ac')) + my.seq <- c(NA, 3.25, 3.5, 3.75, 4, 4.25, 4.5, 4.75, NA) + + array.pers.colors <- array(pers.cols[8],c(12,7,4)) + array.pers.colors[array.pers.obs < my.seq[8]] <- pers.cols[7] + array.pers.colors[array.pers.obs < my.seq[7]] <- pers.cols[6] + array.pers.colors[array.pers.obs < my.seq[6]] <- pers.cols[5] + array.pers.colors[array.pers.obs < my.seq[5]] <- pers.cols[4] + array.pers.colors[array.pers.obs < my.seq[4]] <- pers.cols[3] + array.pers.colors[array.pers.obs < my.seq[3]] <- pers.cols[2] + array.pers.colors[array.pers.obs < my.seq[2]] <- pers.cols[1] + + par(mar=c(5,4,4,4.5)) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Forecast time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + title("Observed regime persistence (in days/month)", cex=1.5) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + + for(p in 1:12){ + for(l in 0:6){ + polygon(c(0.5+p-1,0.5+p-1,1+p-1),c(-0.5+l,0.5+l,0+l), col=array.pers.colors[p,1+l,1]) # first triangle + polygon(c(0.5+p-1,1+p-1,1.5+p-1),c(-0.5+l,0+l,-0.5+l), col=array.pers.colors[p,1+l,2]) + polygon(c(1+p-1,1.5+p-1,1.5+p-1),c(l,0.5+l,-0.5+l), col=array.pers.colors[p,1+l,3]) + polygon(c(0.5+p-1,1+p-1,1.5+p-1),c(0.5+l,0+l,0.5+l), col=array.pers.colors[p,1+l,4]) + } + } + + par(fig=c(0.875, 1, 0.14, 0.89), new=TRUE) + ColorBar2(brks = my.seq, cols = pers.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_persistence_obs_startdate.png"), width=750, height=600) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("Observed Regime persistence (in days/month)", cex=1.5) + + # NAO+ : + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.85, 0.98), new=TRUE) + mtext("NAO+", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.pers.colors[p,1+l,1])}} + + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.42, 0.5), new=TRUE) + mtext("Blocking", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.pers.colors[p,1+l,4])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.85, 0.98), new=TRUE) + mtext("NAO-", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.pers.colors[p,1+l,3])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.42, 0.5), new=TRUE) + mtext("Atlantic Ridge", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.pers.colors[p,1+l,2])}} + + par(fig=c(0.91, 1, 0.1, 0.9), new=TRUE) + ColorBar2(brks = my.seq, cols = pers.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_persistence_obs_target_month.png"), width=800, height=300) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("Observed regime persistence (in days/month)", cex=1.2) + + par(mar=c(0,0,4,0), fig=c(0.07, 0.22, 0.8, 0.98), new=TRUE) + mtext("NAO+", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0, 0.22, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.6, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.pers.colors[i2,1+6-j,1])}} + + par(mar=c(0,0,4,0), fig=c(0.22, 0.44, 0.8, 0.98), new=TRUE) + mtext("NAO-", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.22, 0.44, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.6, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.pers.colors[i2,1+6-j,3])}} + + par(mar=c(0,0,4,0), fig=c(0.44, 0.66, 0.8, 0.98), new=TRUE) + mtext("Blocking", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.44, 0.66, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.6, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.pers.colors[i2,1+6-j,4])}} + + par(mar=c(0,0,4,0), fig=c(0.68, 0.88, 0.8, 0.98), new=TRUE) + mtext("Atlantic Ridge", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.66, 0.88, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.6, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.pers.colors[i2,1+6-j,2])}} + + par(fig=c(0.91, 1, 0.1, 0.8), new=TRUE) + ColorBar2(brks = my.seq, cols = pers.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + par(fig=c(0.9, 1, 0.88, 0.93), new=TRUE) + mtext(side = 1, text = "days/month", line = 2, cex=1) + dev.off() + + + + + + + + + + + + + # Bias persistence summary: + png(filename=paste0(work.dir,"/",forecast.name,"_summary_bias_persistence.png"), width=550, height=400) + + # create an array similar to array.pers but with colors instead of frequencies: + diff.pers.cols <- rev(c('#b2182b','#d6604d','#f4a582','#fddbc7','#d1e5f0','#92c5de','#4393c3','#2166ac')) + my.seq <- c(NA, -1.5, -1, -0.5, 0, 0.5, 1, 1.5, NA) + + array.diff.pers.colors <- array(diff.pers.cols[8],c(12,7,4)) + array.diff.pers.colors[array.diff.pers < my.seq[8]] <- diff.pers.cols[7] + array.diff.pers.colors[array.diff.pers < my.seq[7]] <- diff.pers.cols[6] + array.diff.pers.colors[array.diff.pers < my.seq[6]] <- diff.pers.cols[5] + array.diff.pers.colors[array.diff.pers < my.seq[5]] <- diff.pers.cols[4] + array.diff.pers.colors[array.diff.pers < my.seq[4]] <- diff.pers.cols[3] + array.diff.pers.colors[array.diff.pers < my.seq[3]] <- diff.pers.cols[2] + array.diff.pers.colors[array.diff.pers < my.seq[2]] <- diff.pers.cols[1] + + par(mar=c(5,4,4,4.5)) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Forecast time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + title("Diff. between S4 and ERA-Interim regime persistence (in days/month)", cex=1.5) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + + for(p in 1:12){ + for(l in 0:6){ + polygon(c(0.5+p-1,0.5+p-1,1+p-1),c(-0.5+l,0.5+l,0+l), col=array.diff.pers.colors[p,1+l,1]) # first triangle + polygon(c(0.5+p-1,1+p-1,1.5+p-1),c(-0.5+l,0+l,-0.5+l), col=array.diff.pers.colors[p,1+l,2]) + polygon(c(1+p-1,1.5+p-1,1.5+p-1),c(l,0.5+l,-0.5+l), col=array.diff.pers.colors[p,1+l,3]) + polygon(c(0.5+p-1,1+p-1,1.5+p-1),c(0.5+l,0+l,0.5+l), col=array.diff.pers.colors[p,1+l,4]) + } + } + + par(fig=c(0.875, 1, 0.14, 0.89), new=TRUE) + ColorBar2(brks = my.seq, cols = diff.pers.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_bias_persistence_startdate.png"), width=750, height=600) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("Diff. between S4 and ERA-Interim regime persistence (in days/month)", cex=1.5) + + # NAO+ : + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.85, 0.98), new=TRUE) + mtext("NAO+", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.diff.pers.colors[p,1+l,1])}} + + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.42, 0.5), new=TRUE) + mtext("Blocking", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.diff.pers.colors[p,1+l,4])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.85, 0.98), new=TRUE) + mtext("NAO-", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.diff.pers.colors[p,1+l,3])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.42, 0.5), new=TRUE) + mtext("Atlantic Ridge", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.diff.pers.colors[p,1+l,2])}} + + par(fig=c(0.91, 1, 0.1, 0.9), new=TRUE) + ColorBar2(brks = my.seq, cols = diff.pers.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_bias_persistence_target_month.png"), width=800, height=300) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("Diff. between S4 and ERA-Interim regime persistence (in days/month)", cex=1.2) + + par(mar=c(0,0,4,0), fig=c(0.07, 0.22, 0.8, 0.98), new=TRUE) + mtext("NAO+", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0, 0.22, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.6, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.diff.pers.colors[i2,1+6-j,1])}} + + par(mar=c(0,0,4,0), fig=c(0.22, 0.44, 0.8, 0.98), new=TRUE) + mtext("NAO-", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.22, 0.44, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.6, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.diff.pers.colors[i2,1+6-j,3])}} + + par(mar=c(0,0,4,0), fig=c(0.44, 0.66, 0.8, 0.98), new=TRUE) + mtext("Blocking", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.44, 0.66, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.6, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.diff.pers.colors[i2,1+6-j,4])}} + + par(mar=c(0,0,4,0), fig=c(0.68, 0.88, 0.8, 0.98), new=TRUE) + mtext("Atlantic Ridge", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.66, 0.88, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.6, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.diff.pers.colors[i2,1+6-j,2])}} + + par(fig=c(0.91, 1, 0.1, 0.8), new=TRUE) + ColorBar2(brks = my.seq, cols = diff.pers.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + par(fig=c(0.9, 1, 0.88, 0.93), new=TRUE) + mtext(side = 1, text = "days/month", line = 2, cex=1) + dev.off() + + + + + + + + + + # St.Dev.freq ratio summary: + png(filename=paste0(work.dir,"/",forecast.name,"_summary_ratio_sd_freq.png"), width=550, height=400) + + # create an array similar to array.cor but with colors instead of corr.values: + diff.sd.freq.cols <- rev(c('#b2182b','#d6604d','#f4a582','#fddbc7','#d1e5f0','#92c5de','#4393c3','#2166ac')) + my.seq <- c(0,0.7,0.8,0.9,1.0,1.1,1.2,1.3,2) + + array.diff.sd.freq.colors <- array(diff.sd.freq.cols[8],c(12,7,4)) + array.diff.sd.freq.colors[array.diff.sd.freq < my.seq[8]] <- diff.sd.freq.cols[7] + array.diff.sd.freq.colors[array.diff.sd.freq < my.seq[7]] <- diff.sd.freq.cols[6] + array.diff.sd.freq.colors[array.diff.sd.freq < my.seq[6]] <- diff.sd.freq.cols[5] + array.diff.sd.freq.colors[array.diff.sd.freq < my.seq[5]] <- diff.sd.freq.cols[4] + array.diff.sd.freq.colors[array.diff.sd.freq < my.seq[4]] <- diff.sd.freq.cols[3] + array.diff.sd.freq.colors[array.diff.sd.freq < my.seq[3]] <- diff.sd.freq.cols[2] + array.diff.sd.freq.colors[array.diff.sd.freq < my.seq[2]] <- diff.sd.freq.cols[1] + + par(mar=c(5,4,4,4.5)) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Forecast time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + title("St.Dev. ratio between sim. and obs. average frequencies", cex=1.5) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + + for(p in 1:12){ + for(l in 0:6){ + polygon(c(0.5+p-1, 0.5+p-1, 1+p-1), c(-0.5+l, 0.5+l, 0+l), col=array.diff.sd.freq.colors[p,1+l,1]) # first triangle + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(-0.5+l, 0+l, -0.5+l), col=array.diff.sd.freq.colors[p,1+l,2]) + polygon(c(1+p-1, 1.5+p-1, 1.5+p-1), c(l, 0.5+l, -0.5+l), col=array.diff.sd.freq.colors[p,1+l,3]) + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(0.5+l, 0+l, 0.5+l), col=array.diff.sd.freq.colors[p,1+l,4]) + } + } + + par(fig=c(0.875, 1, 0.14, 0.89), new=TRUE) + ColorBar2(brks = my.seq, cols = diff.sd.freq.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + dev.off() + + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_ratio_sd_frequency_startdate.png"), width=750, height=600) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("St.Dev. ratio between sim. and obs. average frequencies", cex=1.5) + + # NAO+ : + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.85, 0.98), new=TRUE) + mtext("NAO+", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.diff.sd.freq.colors[p,1+l,1])}} + + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.42, 0.5), new=TRUE) + mtext("Blocking", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.diff.sd.freq.colors[p,1+l,4])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.85, 0.98), new=TRUE) + mtext("NAO-", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.diff.sd.freq.colors[p,1+l,3])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.42, 0.5), new=TRUE) + mtext("Atlantic Ridge", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.diff.sd.freq.colors[p,1+l,2])}} + + par(fig=c(0.91, 1, 0.1, 0.9), new=TRUE) + ColorBar2(brks = my.seq, cols = diff.sd.freq.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_ratio_sd_frequency_target_month.png"), width=800, height=300) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("St.Dev. ratio between sim. and obs. average frequencies", cex=1.2) + + par(mar=c(0,0,4,0), fig=c(0.07, 0.22, 0.8, 0.98), new=TRUE) + mtext("NAO+", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0, 0.22, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.6, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.diff.sd.freq.colors[i2,1+6-j,1])}} + + par(mar=c(0,0,4,0), fig=c(0.22, 0.44, 0.8, 0.98), new=TRUE) + mtext("NAO-", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.22, 0.44, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.6, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.diff.sd.freq.colors[i2,1+6-j,3])}} + + par(mar=c(0,0,4,0), fig=c(0.44, 0.66, 0.8, 0.98), new=TRUE) + mtext("Blocking", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.44, 0.66, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.6, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.diff.sd.freq.colors[i2,1+6-j,4])}} + + par(mar=c(0,0,4,0), fig=c(0.68, 0.88, 0.8, 0.98), new=TRUE) + mtext("Atlantic Ridge", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.66, 0.88, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.6, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.diff.sd.freq.colors[i2,1+6-j,2])}} + + par(fig=c(0.91, 1, 0.1, 0.8), new=TRUE) + ColorBar2(brks = my.seq, cols = diff.sd.freq.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + + + + + + + + + + + + + # Impact summary: + png(filename=paste0(work.dir,"/",forecast.name,"_summary_impact_",var.name[var.num],".png"), width=550, height=400) + + # create an array similar to array.pers but with colors instead of frequencies: + imp.cols <- rev(c('#b2182b','#d6604d','#f4a582','#fddbc7','#d1e5f0','#92c5de','#4393c3','#2166ac')) + my.seq <- c(-1,0,0.4,0.5,0.6,0.7,0.8,0.9,1) + + array.imp.colors <- array(imp.cols[8],c(12,7,4)) + array.imp.colors[array.imp < my.seq[8]] <- imp.cols[7] + array.imp.colors[array.imp < my.seq[7]] <- imp.cols[6] + array.imp.colors[array.imp < my.seq[6]] <- imp.cols[5] + array.imp.colors[array.imp < my.seq[5]] <- imp.cols[4] + array.imp.colors[array.imp < my.seq[4]] <- imp.cols[3] + array.imp.colors[array.imp < my.seq[3]] <- imp.cols[2] + array.imp.colors[array.imp < my.seq[2]] <- imp.cols[1] + + par(mar=c(5,4,4,4.5)) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Forecast time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + title(paste0("S4 spatial correlation of impact on ",var.name[var.num]), cex=1.5) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + + for(p in 1:12){ + for(l in 0:6){ + polygon(c(0.5+p-1,0.5+p-1,1+p-1),c(-0.5+l,0.5+l,0+l), col=array.imp.colors[p,1+l,1]) # first triangle + polygon(c(0.5+p-1,1+p-1,1.5+p-1),c(-0.5+l,0+l,-0.5+l), col=array.imp.colors[p,1+l,2]) + polygon(c(1+p-1,1.5+p-1,1.5+p-1),c(l,0.5+l,-0.5+l), col=array.imp.colors[p,1+l,3]) + polygon(c(0.5+p-1,1+p-1,1.5+p-1),c(0.5+l,0+l,0.5+l), col=array.imp.colors[p,1+l,4]) + } + } + + par(fig=c(0.875, 1, 0.14, 0.89), new=TRUE) + ColorBar2(brks = my.seq, cols = imp.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_impact_",var.name[var.num],"_startdate.png"), width=750, height=600) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("S4 spatial correlation of impact on ",var.name[var.num], cex=1.5) + + # NAO+ : + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.85, 0.98), new=TRUE) + mtext("NAO+", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.imp.colors[p,1+l,1])}} + + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.42, 0.5), new=TRUE) + mtext("Blocking", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.imp.colors[p,1+l,4])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.85, 0.98), new=TRUE) + mtext("NAO-", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.imp.colors[p,1+l,3])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.42, 0.5), new=TRUE) + mtext("Atlantic Ridge", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.imp.colors[p,1+l,2])}} + + par(fig=c(0.91, 1, 0.1, 0.9), new=TRUE) + ColorBar2(brks = my.seq, cols = imp.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_impact_",var.name[var.num],"_target_month.png"), width=800, height=300) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("S4 spatial correlation of impact on ",var.name[var.num], cex=1.2) + + par(mar=c(0,0,4,0), fig=c(0.07, 0.22, 0.8, 0.98), new=TRUE) + mtext("NAO+", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0, 0.22, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.6, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.imp.colors[i2,1+6-j,1])}} + + par(mar=c(0,0,4,0), fig=c(0.22, 0.44, 0.8, 0.98), new=TRUE) + mtext("NAO-", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.22, 0.44, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.6, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.imp.colors[i2,1+6-j,3])}} + + par(mar=c(0,0,4,0), fig=c(0.44, 0.66, 0.8, 0.98), new=TRUE) + mtext("Blocking", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.44, 0.66, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.6, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.imp.colors[i2,1+6-j,4])}} + + par(mar=c(0,0,4,0), fig=c(0.68, 0.88, 0.8, 0.98), new=TRUE) + mtext("Atlantic Ridge", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.66, 0.88, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.6, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.imp.colors[i2,1+6-j,2])}} + + par(fig=c(0.91, 1, 0.1, 0.8), new=TRUE) + ColorBar2(brks = my.seq, cols = imp.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + + + + + + + # Simulated Transition probability from NAO+ to X summary: + png(filename=paste0(work.dir,"/",forecast.name,"_summary_transition_prob_NAO+.png"), width=550, height=400) + + # create an array similar to array.cor but with colors instead of corr.values: + trans.cols <- rev(c('#b2182b','#d6604d','#f4a582','#fddbc7','#d1e5f0','#92c5de','#4393c3','#2166ac')) + my.seq <- c(0,10,20,30,40,50,60,70,100) + + array.trans.sim1.colors <- array(trans.cols[8],c(12,7,4)) + array.trans.sim1.colors[array.trans.sim1 < my.seq[8]] <- trans.cols[7] + array.trans.sim1.colors[array.trans.sim1 < my.seq[7]] <- trans.cols[6] + array.trans.sim1.colors[array.trans.sim1 < my.seq[6]] <- trans.cols[5] + array.trans.sim1.colors[array.trans.sim1 < my.seq[5]] <- trans.cols[4] + array.trans.sim1.colors[array.trans.sim1 < my.seq[4]] <- trans.cols[3] + array.trans.sim1.colors[array.trans.sim1 < my.seq[3]] <- trans.cols[2] + array.trans.sim1.colors[array.trans.sim1 < my.seq[2]] <- trans.cols[1] + array.trans.sim1.colors[is.na(array.trans.sim1)] <- "white" + + par(mar=c(5,4,4,4.5)) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Forecast time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + title("% ECMWF-S4 transition from NAO+ regime", cex=1.5) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + + for(p in 1:12){ + for(l in 0:6){ + polygon(c(0.5+p-1, 0.5+p-1, 1+p-1), c(-0.5+l, 0.5+l, 0+l), col=array.trans.sim1.colors[p,1+l,1]) # first triangle + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(-0.5+l, 0+l, -0.5+l), col=array.trans.sim1.colors[p,1+l,2]) + polygon(c(1+p-1, 1.5+p-1, 1.5+p-1), c(l, 0.5+l, -0.5+l), col=array.trans.sim1.colors[p,1+l,3]) + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(0.5+l, 0+l, 0.5+l), col=array.trans.sim1.colors[p,1+l,4]) + } + } + + par(fig=c(0.875, 1, 0.14, 0.89), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_transition_prob_NAO+_startdate.png"), width=750, height=600) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("% Transition from NAO+ regime", cex=1.5) + mtext("ECMWF-S4 / NAO+ Transition Probability \nJanuary to December / 1994-2013", cex=1.5) + + # NAO+ : + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.85, 0.98), new=TRUE) + mtext("NAO+", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.sim1.colors[p,1+l,1])}} + + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.42, 0.5), new=TRUE) + mtext("Blocking", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.sim1.colors[p,1+l,4])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.85, 0.98), new=TRUE) + mtext("NAO-", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.sim1.colors[p,1+l,3])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.42, 0.5), new=TRUE) + mtext("Atlantic Ridge", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.sim1.colors[p,1+l,2])}} + + par(fig=c(0.91, 1, 0.1, 0.9), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_transition_prob_NAO+_target_month.png"), width=750, height=350) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 0.95), new=TRUE) + mtext("% Transition from NAO+ regime", cex=1.2) + + par(mar=c(0,0,4,0), fig=c(0.10, 0.28, 0.8, 0.95), new=TRUE) + mtext("NAO-", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0, 0.28, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.8, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.sim1.colors[i2,1+6-j,3])}} + + par(mar=c(0,0,4,0), fig=c(0.30, 0.58, 0.8, 0.95), new=TRUE) + mtext("Blocking", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.30, 0.58, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.8, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.sim1.colors[i2,1+6-j,4])}} + + par(mar=c(0,0,4,0), fig=c(0.60, 0.88, 0.8, 0.95), new=TRUE) + mtext("Atlantic Ridge", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.60, 0.88, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.8, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.sim1.colors[i2,1+6-j,2])}} + + par(fig=c(0.91, 1, 0.1, 0.8), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + par(fig=c(0.91, 1, 0.88, 0.93), new=TRUE) + mtext(side = 1, text = "%", line = 2, cex=1) + + dev.off() + + + + + + + # Observed Transition probability from NAO+ to X summary: + png(filename=paste0(work.dir,"/",forecast.name,"_summary_transition_prob_NAO+_obs.png"), width=550, height=400) + + # create an array similar to array.cor but with colors instead of corr.values: + trans.cols <- rev(c('#b2182b','#d6604d','#f4a582','#fddbc7','#d1e5f0','#92c5de','#4393c3','#2166ac')) + my.seq <- c(0,10,20,30,40,50,60,70,100) + + array.trans.obs1.colors <- array(trans.cols[8],c(12,7,4)) + array.trans.obs1.colors[array.trans.obs1 < my.seq[8]] <- trans.cols[7] + array.trans.obs1.colors[array.trans.obs1 < my.seq[7]] <- trans.cols[6] + array.trans.obs1.colors[array.trans.obs1 < my.seq[6]] <- trans.cols[5] + array.trans.obs1.colors[array.trans.obs1 < my.seq[5]] <- trans.cols[4] + array.trans.obs1.colors[array.trans.obs1 < my.seq[4]] <- trans.cols[3] + array.trans.obs1.colors[array.trans.obs1 < my.seq[3]] <- trans.cols[2] + array.trans.obs1.colors[array.trans.obs1 < my.seq[2]] <- trans.cols[1] + array.trans.obs1.colors[is.na(array.trans.obs1)] <- "white" + + par(mar=c(5,4,4,4.5)) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Forecast time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + title("% Observed transition from NAO+ regime", cex=1.5) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + + for(p in 1:12){ + for(l in 0:6){ + polygon(c(0.5+p-1, 0.5+p-1, 1+p-1), c(-0.5+l, 0.5+l, 0+l), col=array.trans.obs1.colors[p,1+l,1]) # first triangle + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(-0.5+l, 0+l, -0.5+l), col=array.trans.obs1.colors[p,1+l,2]) + polygon(c(1+p-1, 1.5+p-1, 1.5+p-1), c(l, 0.5+l, -0.5+l), col=array.trans.obs1.colors[p,1+l,3]) + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(0.5+l, 0+l, 0.5+l), col=array.trans.obs1.colors[p,1+l,4]) + } + } + + par(fig=c(0.875, 1, 0.14, 0.89), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_transition_prob_NAO+_obs_startdate.png"), width=750, height=600) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("% Observed transition from NAO+ regime", cex=1.5) + + # NAO+ : + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.85, 0.98), new=TRUE) + mtext("NAO+", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.obs1.colors[p,1+l,1])}} + + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.42, 0.5), new=TRUE) + mtext("Blocking", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.obs1.colors[p,1+l,4])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.85, 0.98), new=TRUE) + mtext("NAO-", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.obs1.colors[p,1+l,3])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.42, 0.5), new=TRUE) + mtext("Atlantic Ridge", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.obs1.colors[p,1+l,2])}} + + par(fig=c(0.91, 1, 0.1, 0.9), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_transition_prob_NAO+_obs_target_month.png"), width=750, height=350) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 0.95), new=TRUE) + mtext("% Observed transition from NAO+ regime", cex=1.2) + + par(mar=c(0,0,4,0), fig=c(0.10, 0.28, 0.8, 0.95), new=TRUE) + mtext("NAO-", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0, 0.28, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.8, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.obs1.colors[i2,1+6-j,3])}} + + par(mar=c(0,0,4,0), fig=c(0.30, 0.58, 0.8, 0.95), new=TRUE) + mtext("Blocking", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.30, 0.58, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.8, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.obs1.colors[i2,1+6-j,4])}} + + par(mar=c(0,0,4,0), fig=c(0.60, 0.88, 0.8, 0.95), new=TRUE) + mtext("Atlantic Ridge", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.60, 0.88, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.8, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.obs1.colors[i2,1+6-j,2])}} + + par(fig=c(0.91, 1, 0.1, 0.8), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + par(fig=c(0.91, 1, 0.88, 0.93), new=TRUE) + mtext(side = 1, text = "%", line = 2, cex=1) + + dev.off() + + + + + + + + + + + + # Simulated transition probability from NAO- to X summary: + png(filename=paste0(work.dir,"/",forecast.name,"_summary_transition_prob_NAO-.png"), width=550, height=400) + + # create an array similar to array.cor but with colors instead of corr.values: + trans.cols <- rev(c('#b2182b','#d6604d','#f4a582','#fddbc7','#d1e5f0','#92c5de','#4393c3','#2166ac')) + my.seq <- c(0,10,20,30,40,50,60,70,100) + + array.trans.sim2.colors <- array(trans.cols[8],c(12,7,4)) + array.trans.sim2.colors[array.trans.sim2 < my.seq[8]] <- trans.cols[7] + array.trans.sim2.colors[array.trans.sim2 < my.seq[7]] <- trans.cols[6] + array.trans.sim2.colors[array.trans.sim2 < my.seq[6]] <- trans.cols[5] + array.trans.sim2.colors[array.trans.sim2 < my.seq[5]] <- trans.cols[4] + array.trans.sim2.colors[array.trans.sim2 < my.seq[4]] <- trans.cols[3] + array.trans.sim2.colors[array.trans.sim2 < my.seq[3]] <- trans.cols[2] + array.trans.sim2.colors[array.trans.sim2 < my.seq[2]] <- trans.cols[1] + array.trans.sim2.colors[is.na(array.trans.sim2)] <- "white" + + par(mar=c(5,4,4,4.5)) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Forecast time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + title("% ECMWF-S4 transition from NAO- regime ", cex=1.5) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + + for(p in 1:12){ + for(l in 0:6){ + polygon(c(0.5+p-1, 0.5+p-1, 1+p-1), c(-0.5+l, 0.5+l, 0+l), col=array.trans.sim2.colors[p,1+l,1]) # first triangle + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(-0.5+l, 0+l, -0.5+l), col=array.trans.sim2.colors[p,1+l,2]) + polygon(c(1+p-1, 1.5+p-1, 1.5+p-1), c(l, 0.5+l, -0.5+l), col=array.trans.sim2.colors[p,1+l,3]) + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(0.5+l, 0+l, 0.5+l), col=array.trans.sim2.colors[p,1+l,4]) + } + } + + par(fig=c(0.875, 1, 0.14, 0.89), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_transition_prob_NAO-_startdate.png"), width=750, height=600) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("% ECMWF-S4 transition from NAO- regime", cex=1.5) + + # NAO+ : + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.85, 0.98), new=TRUE) + mtext("NAO+", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.sim2.colors[p,1+l,1])}} + + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.42, 0.5), new=TRUE) + mtext("Blocking", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.sim2.colors[p,1+l,4])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.85, 0.98), new=TRUE) + mtext("NAO-", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.sim2.colors[p,1+l,3])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.42, 0.5), new=TRUE) + mtext("Atlantic Ridge", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.sim2.colors[p,1+l,2])}} + + par(fig=c(0.91, 1, 0.1, 0.9), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_transition_prob_NAO-_target_month.png"), width=750, height=350) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 0.95), new=TRUE) + mtext("% ECMWF-S4 transition from NAO- regime", cex=1.2) + + par(mar=c(0,0,4,0), fig=c(0.1, 0.28, 0.8, 0.95), new=TRUE) + mtext("NAO+", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0, 0.28, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.8, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.sim2.colors[i2,1+6-j,1])}} + + par(mar=c(0,0,4,0), fig=c(0.30, 0.58, 0.8, 0.95), new=TRUE) + mtext("Blocking", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.30, 0.58, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.8, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.sim2.colors[i2,1+6-j,4])}} + + par(mar=c(0,0,4,0), fig=c(0.60, 0.88, 0.8, 0.95), new=TRUE) + mtext("Atlantic Ridge", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.60, 0.88, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.8, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.sim2.colors[i2,1+6-j,2])}} + + par(fig=c(0.91, 1, 0.1, 0.8), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + par(fig=c(0.91, 1, 0.88, 0.93), new=TRUE) + mtext(side = 1, text = "%", line = 2, cex=1) + + dev.off() + + + + + + + # Observed transition probability from NAO- to X summary: + png(filename=paste0(work.dir,"/",forecast.name,"_summary_transition_prob_NAO-_obs.png"), width=550, height=400) + + # create an array similar to array.cor but with colors instead of corr.values: + trans.cols <- rev(c('#b2182b','#d6604d','#f4a582','#fddbc7','#d1e5f0','#92c5de','#4393c3','#2166ac')) + my.seq <- c(0,10,20,30,40,50,60,70,100) + + array.trans.obs2.colors <- array(trans.cols[8],c(12,7,4)) + array.trans.obs2.colors[array.trans.obs2 < my.seq[8]] <- trans.cols[7] + array.trans.obs2.colors[array.trans.obs2 < my.seq[7]] <- trans.cols[6] + array.trans.obs2.colors[array.trans.obs2 < my.seq[6]] <- trans.cols[5] + array.trans.obs2.colors[array.trans.obs2 < my.seq[5]] <- trans.cols[4] + array.trans.obs2.colors[array.trans.obs2 < my.seq[4]] <- trans.cols[3] + array.trans.obs2.colors[array.trans.obs2 < my.seq[3]] <- trans.cols[2] + array.trans.obs2.colors[array.trans.obs2 < my.seq[2]] <- trans.cols[1] + array.trans.obs2.colors[is.na(array.trans.obs2)] <- "white" + + par(mar=c(5,4,4,4.5)) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Forecast time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + title("% Observed transition from NAO- regime ", cex=1.5) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + + for(p in 1:12){ + for(l in 0:6){ + polygon(c(0.5+p-1, 0.5+p-1, 1+p-1), c(-0.5+l, 0.5+l, 0+l), col=array.trans.obs2.colors[p,1+l,1]) # first triangle + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(-0.5+l, 0+l, -0.5+l), col=array.trans.obs2.colors[p,1+l,2]) + polygon(c(1+p-1, 1.5+p-1, 1.5+p-1), c(l, 0.5+l, -0.5+l), col=array.trans.obs2.colors[p,1+l,3]) + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(0.5+l, 0+l, 0.5+l), col=array.trans.obs2.colors[p,1+l,4]) + } + } + + par(fig=c(0.875, 1, 0.14, 0.89), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_transition_prob_NAO-_obs_startdate.png"), width=750, height=600) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("% Observed transition from NAO- regime", cex=1.5) + + # NAO+ : + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.85, 0.98), new=TRUE) + mtext("NAO+", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.obs2.colors[p,1+l,1])}} + + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.42, 0.5), new=TRUE) + mtext("Blocking", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.obs2.colors[p,1+l,4])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.85, 0.98), new=TRUE) + mtext("NAO-", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.obs2.colors[p,1+l,3])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.42, 0.5), new=TRUE) + mtext("Atlantic Ridge", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.obs2.colors[p,1+l,2])}} + + par(fig=c(0.91, 1, 0.1, 0.9), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_transition_prob_NAO-_obs_target_month.png"), width=750, height=350) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 0.95), new=TRUE) + mtext("% Observed transition from NAO- regime", cex=1.2) + + par(mar=c(0,0,4,0), fig=c(0.07, 0.28, 0.8, 0.95), new=TRUE) + mtext("NAO+", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0, 0.28, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.8, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.obs2.colors[i2,1+6-j,1])}} + + par(mar=c(0,0,4,0), fig=c(0.30, 0.58, 0.8, 0.95), new=TRUE) + mtext("Blocking", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.30, 0.58, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.8, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.obs2.colors[i2,1+6-j,4])}} + + par(mar=c(0,0,4,0), fig=c(0.58, 0.88, 0.8, 0.95), new=TRUE) + mtext("Atlantic Ridge", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.58, 0.88, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.8, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.obs2.colors[i2,1+6-j,2])}} + + par(fig=c(0.91, 1, 0.1, 0.8), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + par(fig=c(0.91, 1, 0.88, 0.93), new=TRUE) + mtext(side = 1, text = "%", line = 2, cex=1) + + dev.off() + + + + + + + + + + + # Simulated transition probability from blocking to X summary: + png(filename=paste0(work.dir,"/",forecast.name,"_summary_transition_prob_blocking.png"), width=550, height=400) + + # create an array similar to array.cor but with colors instead of corr.values: + trans.cols <- rev(c('#b2182b','#d6604d','#f4a582','#fddbc7','#d1e5f0','#92c5de','#4393c3','#2166ac')) + my.seq <- c(0,10,20,30,40,50,60,70,100) + + array.trans.sim3.colors <- array(trans.cols[8],c(12,7,4)) + array.trans.sim3.colors[array.trans.sim3 < my.seq[8]] <- trans.cols[7] + array.trans.sim3.colors[array.trans.sim3 < my.seq[7]] <- trans.cols[6] + array.trans.sim3.colors[array.trans.sim3 < my.seq[6]] <- trans.cols[5] + array.trans.sim3.colors[array.trans.sim3 < my.seq[5]] <- trans.cols[4] + array.trans.sim3.colors[array.trans.sim3 < my.seq[4]] <- trans.cols[3] + array.trans.sim3.colors[array.trans.sim3 < my.seq[3]] <- trans.cols[2] + array.trans.sim3.colors[array.trans.sim3 < my.seq[2]] <- trans.cols[1] + array.trans.sim3.colors[is.na(array.trans.sim3)] <- "white" + + par(mar=c(5,4,4,4.5)) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Forecast time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + title("% ECMWF-S4 transition from blocking regime", cex=1.5) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + + for(p in 1:12){ + for(l in 0:6){ + polygon(c(0.5+p-1, 0.5+p-1, 1+p-1), c(-0.5+l, 0.5+l, 0+l), col=array.trans.sim3.colors[p,1+l,1]) # first triangle + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(-0.5+l, 0+l, -0.5+l), col=array.trans.sim3.colors[p,1+l,2]) + polygon(c(1+p-1, 1.5+p-1, 1.5+p-1), c(l, 0.5+l, -0.5+l), col=array.trans.sim3.colors[p,1+l,3]) + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(0.5+l, 0+l, 0.5+l), col=array.trans.sim3.colors[p,1+l,4]) + } + } + + par(fig=c(0.875, 1, 0.14, 0.89), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_transition_prob_blocking_startdate.png"), width=750, height=600) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("% ECMWF-S4 transition from blocking regime", cex=1.5) + + # NAO+ : + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.85, 0.98), new=TRUE) + mtext("NAO+", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.sim3.colors[p,1+l,1])}} + + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.42, 0.5), new=TRUE) + mtext("Blocking", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.sim3.colors[p,1+l,4])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.85, 0.98), new=TRUE) + mtext("NAO-", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.sim3.colors[p,1+l,3])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.42, 0.5), new=TRUE) + mtext("Atlantic Ridge", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.sim3.colors[p,1+l,2])}} + + par(fig=c(0.91, 1, 0.1, 0.9), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_transition_prob_blocking_target_month.png"), width=750, height=350) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 0.95), new=TRUE) + mtext("% ECMWF-S4 transition from blocking regime", cex=1.2) + + par(mar=c(0,0,4,0), fig=c(0.10, 0.28, 0.8, 0.95), new=TRUE) + mtext("NAO+", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0, 0.28, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.8, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.sim3.colors[i2,1+6-j,1])}} + + par(mar=c(0,0,4,0), fig=c(0.30, 0.58, 0.8, 0.95), new=TRUE) + mtext("NAO-", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.30, 0.58, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.8, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.sim3.colors[i2,1+6-j,3])}} + + par(mar=c(0,0,4,0), fig=c(0.60, 0.88, 0.8, 0.95), new=TRUE) + mtext("Atlantic Ridge", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.60, 0.88, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.8, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.sim3.colors[i2,1+6-j,2])}} + + par(fig=c(0.91, 1, 0.1, 0.8), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + par(fig=c(0.91, 1, 0.88, 0.93), new=TRUE) + mtext(side = 1, text = "%", line = 2, cex=1) + + dev.off() + + + + + + + + + # Observed transition probability from blocking to X summary: + png(filename=paste0(work.dir,"/",forecast.name,"_summary_transition_prob_blocking_obs.png"), width=550, height=400) + + # create an array similar to array.cor but with colors instead of corr.values: + trans.cols <- rev(c('#b2182b','#d6604d','#f4a582','#fddbc7','#d1e5f0','#92c5de','#4393c3','#2166ac')) + my.seq <- c(0,10,20,30,40,50,60,70,100) + + array.trans.obs3.colors <- array(trans.cols[8],c(12,7,4)) + array.trans.obs3.colors[array.trans.obs3 < my.seq[8]] <- trans.cols[7] + array.trans.obs3.colors[array.trans.obs3 < my.seq[7]] <- trans.cols[6] + array.trans.obs3.colors[array.trans.obs3 < my.seq[6]] <- trans.cols[5] + array.trans.obs3.colors[array.trans.obs3 < my.seq[5]] <- trans.cols[4] + array.trans.obs3.colors[array.trans.obs3 < my.seq[4]] <- trans.cols[3] + array.trans.obs3.colors[array.trans.obs3 < my.seq[3]] <- trans.cols[2] + array.trans.obs3.colors[array.trans.obs3 < my.seq[2]] <- trans.cols[1] + array.trans.obs3.colors[is.na(array.trans.obs3)] <- "white" + + par(mar=c(5,4,4,4.5)) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Forecast time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + title("% Observed transition from blocking regime", cex=1.5) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + + for(p in 1:12){ + for(l in 0:6){ + polygon(c(0.5+p-1, 0.5+p-1, 1+p-1), c(-0.5+l, 0.5+l, 0+l), col=array.trans.obs3.colors[p,1+l,1]) # first triangle + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(-0.5+l, 0+l, -0.5+l), col=array.trans.obs3.colors[p,1+l,2]) + polygon(c(1+p-1, 1.5+p-1, 1.5+p-1), c(l, 0.5+l, -0.5+l), col=array.trans.obs3.colors[p,1+l,3]) + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(0.5+l, 0+l, 0.5+l), col=array.trans.obs3.colors[p,1+l,4]) + } + } + + par(fig=c(0.875, 1, 0.14, 0.89), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_transition_prob_blocking_obs_startdate.png"), width=750, height=600) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("% Observed transition from blocking regime", cex=1.5) + + # NAO+ : + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.85, 0.98), new=TRUE) + mtext("NAO+", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.obs3.colors[p,1+l,1])}} + + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.42, 0.5), new=TRUE) + mtext("Blocking", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.obs3.colors[p,1+l,4])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.85, 0.98), new=TRUE) + mtext("NAO-", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.obs3.colors[p,1+l,3])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.42, 0.5), new=TRUE) + mtext("Atlantic Ridge", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.obs3.colors[p,1+l,2])}} + + par(fig=c(0.91, 1, 0.1, 0.9), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_transition_prob_blocking_obs_target_month.png"), width=750, height=350) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 0.95), new=TRUE) + mtext("% Observed transition from blocking regime", cex=1.2) + + par(mar=c(0,0,4,0), fig=c(0.10, 0.28, 0.8, 0.95), new=TRUE) + mtext("NAO+", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0, 0.28, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.8, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.obs3.colors[i2,1+6-j,1])}} + + par(mar=c(0,0,4,0), fig=c(0.30, 0.58, 0.8, 0.95), new=TRUE) + mtext("NAO-", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.30, 0.58, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.8, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.obs3.colors[i2,1+6-j,3])}} + + par(mar=c(0,0,4,0), fig=c(0.60, 0.88, 0.8, 0.95), new=TRUE) + mtext("Atlantic Ridge", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.60, 0.88, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.8, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.obs3.colors[i2,1+6-j,2])}} + + par(fig=c(0.91, 1, 0.1, 0.8), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + par(fig=c(0.91, 1, 0.88, 0.93), new=TRUE) + mtext(side = 1, text = "%", line = 2, cex=1) + + dev.off() + + + + + + + + + + + + + + + + + + # Simulated transition probability from Atlantic ridge to X summary: + png(filename=paste0(work.dir,"/",forecast.name,"_summary_transition_prob_Atlantic_ridge.png"), width=550, height=400) + + # create an array similar to array.cor but with colors instead of corr.values: + trans.cols <- rev(c('#b2182b','#d6604d','#f4a582','#fddbc7','#d1e5f0','#92c5de','#4393c3','#2166ac')) + my.seq <- c(0,10,20,30,40,50,60,70,100) + + array.trans.sim4.colors <- array(trans.cols[8],c(12,7,4)) + array.trans.sim4.colors[array.trans.sim4 < my.seq[8]] <- trans.cols[7] + array.trans.sim4.colors[array.trans.sim4 < my.seq[7]] <- trans.cols[6] + array.trans.sim4.colors[array.trans.sim4 < my.seq[6]] <- trans.cols[5] + array.trans.sim4.colors[array.trans.sim4 < my.seq[5]] <- trans.cols[4] + array.trans.sim4.colors[array.trans.sim4 < my.seq[4]] <- trans.cols[3] + array.trans.sim4.colors[array.trans.sim4 < my.seq[3]] <- trans.cols[2] + array.trans.sim4.colors[array.trans.sim4 < my.seq[2]] <- trans.cols[1] + array.trans.sim4.colors[is.na(array.trans.sim4)] <- "white" + + par(mar=c(5,4,4,4.5)) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Forecast time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + title("% ECMWF-S4 transition from Atlantic ridge regime ", cex=1.5) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + + for(p in 1:12){ + for(l in 0:6){ + polygon(c(0.5+p-1, 0.5+p-1, 1+p-1), c(-0.5+l, 0.5+l, 0+l), col=array.trans.sim4.colors[p,1+l,1]) # first triangle + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(-0.5+l, 0+l, -0.5+l), col=array.trans.sim4.colors[p,1+l,2]) + polygon(c(1+p-1, 1.5+p-1, 1.5+p-1), c(l, 0.5+l, -0.5+l), col=array.trans.sim4.colors[p,1+l,3]) + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(0.5+l, 0+l, 0.5+l), col=array.trans.sim4.colors[p,1+l,4]) + } + } + + par(fig=c(0.875, 1, 0.14, 0.89), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_transition_prob_Atlantic_ridge_startdate.png"), width=750, height=600) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("% ECMWF-S4 transition from Atlantic Ridge regime", cex=1.5) + + # NAO+ : + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.85, 0.98), new=TRUE) + mtext("NAO+", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.sim4.colors[p,1+l,1])}} + + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.42, 0.5), new=TRUE) + mtext("Blocking", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.sim4.colors[p,1+l,4])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.85, 0.98), new=TRUE) + mtext("NAO-", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.sim4.colors[p,1+l,3])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.42, 0.5), new=TRUE) + mtext("Atlantic Ridge", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.sim4.colors[p,1+l,2])}} + + par(fig=c(0.91, 1, 0.1, 0.9), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_transition_prob_Atlantic_ridge_target_month.png"), width=750, height=350) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 0.95), new=TRUE) + mtext("% ECMWF-S4 transition from Atlantic Ridge regime", cex=1.2) + + par(mar=c(0,0,4,0), fig=c(0.1, 0.28, 0.8, 0.95), new=TRUE) + mtext("NAO+", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0, 0.28, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.8, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.sim4.colors[i2,1+6-j,1])}} + + par(mar=c(0,0,4,0), fig=c(0.30, 0.58, 0.8, 0.95), new=TRUE) + mtext("NAO-", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.30, 0.58, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.8, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.sim4.colors[i2,1+6-j,3])}} + + par(mar=c(0,0,4,0), fig=c(0.60, 0.88, 0.8, 0.95), new=TRUE) + mtext("Blocking", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.60, 0.88, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.8, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.sim4.colors[i2,1+6-j,4])}} + + par(fig=c(0.91, 1, 0.1, 0.8), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + par(fig=c(0.91, 1, 0.88, 0.93), new=TRUE) + mtext(side = 1, text = "%", line = 2, cex=1) + + dev.off() + + + + + + + + + + # Observed transition probability from Atlantic ridge to X summary: + png(filename=paste0(work.dir,"/",forecast.name,"_summary_transition_prob_Atlantic_ridge_obs.png"), width=550, height=400) + + # create an array obsilar to array.cor but with colors instead of corr.values: + trans.cols <- rev(c('#b2182b','#d6604d','#f4a582','#fddbc7','#d1e5f0','#92c5de','#4393c3','#2166ac')) + my.seq <- c(0,10,20,30,40,50,60,70,100) + + array.trans.obs4.colors <- array(trans.cols[8],c(12,7,4)) + array.trans.obs4.colors[array.trans.obs4 < my.seq[8]] <- trans.cols[7] + array.trans.obs4.colors[array.trans.obs4 < my.seq[7]] <- trans.cols[6] + array.trans.obs4.colors[array.trans.obs4 < my.seq[6]] <- trans.cols[5] + array.trans.obs4.colors[array.trans.obs4 < my.seq[5]] <- trans.cols[4] + array.trans.obs4.colors[array.trans.obs4 < my.seq[4]] <- trans.cols[3] + array.trans.obs4.colors[array.trans.obs4 < my.seq[3]] <- trans.cols[2] + array.trans.obs4.colors[array.trans.obs4 < my.seq[2]] <- trans.cols[1] + array.trans.obs4.colors[is.na(array.trans.obs4)] <- "white" + + par(mar=c(5,4,4,4.5)) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Forecast time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + title("% Observed transition from Atlantic ridge regime ", cex=1.5) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + + for(p in 1:12){ + for(l in 0:6){ + polygon(c(0.5+p-1, 0.5+p-1, 1+p-1), c(-0.5+l, 0.5+l, 0+l), col=array.trans.obs4.colors[p,1+l,1]) # first triangle + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(-0.5+l, 0+l, -0.5+l), col=array.trans.obs4.colors[p,1+l,2]) + polygon(c(1+p-1, 1.5+p-1, 1.5+p-1), c(l, 0.5+l, -0.5+l), col=array.trans.obs4.colors[p,1+l,3]) + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(0.5+l, 0+l, 0.5+l), col=array.trans.obs4.colors[p,1+l,4]) + } + } + + par(fig=c(0.875, 1, 0.14, 0.89), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_transition_prob_Atlantic_ridge_obs_startdate.png"), width=750, height=600) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("% Observed transition from Atlantic Ridge regime", cex=1.5) + + # NAO+ : + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.85, 0.98), new=TRUE) + mtext("NAO+", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.obs4.colors[p,1+l,1])}} + + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.42, 0.5), new=TRUE) + mtext("Blocking", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.obs4.colors[p,1+l,4])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.85, 0.98), new=TRUE) + mtext("NAO-", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.obs4.colors[p,1+l,3])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.42, 0.5), new=TRUE) + mtext("Atlantic Ridge", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.obs4.colors[p,1+l,2])}} + + par(fig=c(0.91, 1, 0.1, 0.9), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_transition_prob_Atlantic_ridge_obs_target_month.png"), width=750, height=350) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 0.95), new=TRUE) + mtext("% Observed transition from Atlantic Ridge regime", cex=1.2) + + par(mar=c(0,0,4,0), fig=c(0.1, 0.28, 0.8, 0.95), new=TRUE) + mtext("NAO+", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0, 0.28, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.8, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.obs4.colors[i2,1+6-j,1])}} + + par(mar=c(0,0,4,0), fig=c(0.30, 0.58, 0.8, 0.95), new=TRUE) + mtext("NAO-", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.30, 0.58, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.8, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.obs4.colors[i2,1+6-j,3])}} + + par(mar=c(0,0,4,0), fig=c(0.60, 0.88, 0.8, 0.95), new=TRUE) + mtext("Blocking", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.60, 0.88, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.8, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.obs4.colors[i2,1+6-j,4])}} + + par(fig=c(0.91, 1, 0.1, 0.8), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + par(fig=c(0.91, 1, 0.88, 0.93), new=TRUE) + mtext(side = 1, text = "%", line = 2, cex=1) + + dev.off() + + + + + + + + + + + + + + + # Bias of the transition probability from NAO+ to X summary: + png(filename=paste0(work.dir,"/",forecast.name,"_summary_bias_transition_prob_NAO+.png"), width=550, height=400) + + # create an array similar to array.cor but with colors instead of corr.values: + trans.cols <- rev(c('#b2182b','#d6604d','#f4a582','#fddbc7','#d1e5f0','#92c5de','#4393c3','#2166ac')) + my.seq <- c(-20,-10,-5,-2,0,2,5,10,20) + + array.trans.diff1.colors <- array(trans.cols[8],c(12,7,4)) + array.trans.diff1.colors[array.trans.diff1 < my.seq[8]] <- trans.cols[7] + array.trans.diff1.colors[array.trans.diff1 < my.seq[7]] <- trans.cols[6] + array.trans.diff1.colors[array.trans.diff1 < my.seq[6]] <- trans.cols[5] + array.trans.diff1.colors[array.trans.diff1 < my.seq[5]] <- trans.cols[4] + array.trans.diff1.colors[array.trans.diff1 < my.seq[4]] <- trans.cols[3] + array.trans.diff1.colors[array.trans.diff1 < my.seq[3]] <- trans.cols[2] + array.trans.diff1.colors[array.trans.diff1 < my.seq[2]] <- trans.cols[1] + array.trans.diff1.colors[is.na(array.trans.diff1)] <- "white" + + par(mar=c(5,4,4,4.5)) + plot(1,1, type="n", xaxt="n", yaxt="n", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + title("% Bias transition from NAO+ regime", cex=1.5) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + + for(p in 1:12){ + for(l in 0:6){ + polygon(c(0.5+p-1, 0.5+p-1, 1+p-1), c(-0.5+l, 0.5+l, 0+l), col=array.trans.diff1.colors[p,1+l,1]) # first triangle + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(-0.5+l, 0+l, -0.5+l), col=array.trans.diff1.colors[p,1+l,2]) + polygon(c(1+p-1, 1.5+p-1, 1.5+p-1), c(l, 0.5+l, -0.5+l), col=array.trans.diff1.colors[p,1+l,3]) + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(0.5+l, 0+l, 0.5+l), col=array.trans.diff1.colors[p,1+l,4]) + } + } + + par(fig=c(0.875, 1, 0.14, 0.89), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_bias_transition_prob_NAO+_startdate.png"), width=750, height=600) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("% Bias transition from NAO+ regime", cex=1.5) + + # NAO+ : + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.85, 0.98), new=TRUE) + mtext("NAO+", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.diff1.colors[p,1+l,1])}} + + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.42, 0.5), new=TRUE) + mtext("Blocking", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.diff1.colors[p,1+l,4])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.85, 0.98), new=TRUE) + mtext("NAO-", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.diff1.colors[p,1+l,3])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.42, 0.5), new=TRUE) + mtext("Atlantic Ridge", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.diff1.colors[p,1+l,2])}} + + par(fig=c(0.91, 1, 0.1, 0.9), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_bias_transition_prob_NAO+_target_month.png"), width=750, height=350) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 0.95), new=TRUE) + mtext("% Bias transition from NAO+ regime", cex=1.2) + + par(mar=c(0,0,4,0), fig=c(0.07, 0.28, 0.8, 0.95), new=TRUE) + mtext("NAO-", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0, 0.28, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.8, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.diff1.colors[i2,1+6-j,3])}} + + par(mar=c(0,0,4,0), fig=c(0.30, 0.58, 0.8, 0.95), new=TRUE) + mtext("Blocking", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.30, 0.58, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.8, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.diff1.colors[i2,1+6-j,4])}} + + par(mar=c(0,0,4,0), fig=c(0.60, 0.88, 0.8, 0.95), new=TRUE) + mtext("Atlantic Ridge", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.60, 0.88, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.8, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.diff1.colors[i2,1+6-j,2])}} + + par(fig=c(0.91, 1, 0.1, 0.8), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + par(fig=c(0.91, 1, 0.88, 0.93), new=TRUE) + mtext(side = 1, text = "%", line = 2, cex=1) + + dev.off() + + + + + + + + + + + + + + + + # Bias of the Transition probability from NAO- to X summary: + png(filename=paste0(work.dir,"/",forecast.name,"_summary_bias_transition_prob_NAO-.png"), width=550, height=400) + + # create an array similar to array.cor but with colors instead of corr.values: + trans.cols <- rev(c('#b2182b','#d6604d','#f4a582','#fddbc7','#d1e5f0','#92c5de','#4393c3','#2166ac')) + my.seq <- c(-20,-10,-5,-2,0,2,5,10,20) + + array.trans.diff2.colors <- array(trans.cols[8],c(12,7,4)) + array.trans.diff2.colors[array.trans.diff2 < my.seq[8]] <- trans.cols[7] + array.trans.diff2.colors[array.trans.diff2 < my.seq[7]] <- trans.cols[6] + array.trans.diff2.colors[array.trans.diff2 < my.seq[6]] <- trans.cols[5] + array.trans.diff2.colors[array.trans.diff2 < my.seq[5]] <- trans.cols[4] + array.trans.diff2.colors[array.trans.diff2 < my.seq[4]] <- trans.cols[3] + array.trans.diff2.colors[array.trans.diff2 < my.seq[3]] <- trans.cols[2] + array.trans.diff2.colors[array.trans.diff2 < my.seq[2]] <- trans.cols[1] + array.trans.diff2.colors[is.na(array.trans.diff2)] <- "white" + + par(mar=c(5,4,4,4.5)) + plot(1,1, type="n", xaxt="n", yaxt="n", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + title("% Bias transition from NAO- regime", cex=1.5) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + + for(p in 1:12){ + for(l in 0:6){ + polygon(c(0.5+p-1, 0.5+p-1, 1+p-1), c(-0.5+l, 0.5+l, 0+l), col=array.trans.diff2.colors[p,1+l,1]) # first triangle + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(-0.5+l, 0+l, -0.5+l), col=array.trans.diff2.colors[p,1+l,2]) + polygon(c(1+p-1, 1.5+p-1, 1.5+p-1), c(l, 0.5+l, -0.5+l), col=array.trans.diff2.colors[p,1+l,3]) + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(0.5+l, 0+l, 0.5+l), col=array.trans.diff2.colors[p,1+l,4]) + } + } + + par(fig=c(0.875, 1, 0.14, 0.89), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_bias_transition_prob_NAO-_startdate.png"), width=750, height=600) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 0.95), new=TRUE) + mtext("% Bias transition from NAO- regime", cex=1.5) + + # NAO+ : + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.85, 0.98), new=TRUE) + mtext("NAO+", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.diff2.colors[p,1+l,1])}} + + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.42, 0.5), new=TRUE) + mtext("Blocking", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.diff2.colors[p,1+l,4])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.85, 0.98), new=TRUE) + mtext("NAO-", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.diff2.colors[p,1+l,3])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.42, 0.5), new=TRUE) + mtext("Atlantic Ridge", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.diff2.colors[p,1+l,2])}} + + par(fig=c(0.91, 1, 0.1, 0.9), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_bias_transition_prob_NAO-_target_month.png"), width=750, height=350) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 0.95), new=TRUE) + mtext("% Bias transition from NAO- regime", cex=1.2) + + par(mar=c(0,0,4,0), fig=c(0.07, 0.28, 0.8, 0.95), new=TRUE) + mtext("NAO+", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0, 0.28, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.8, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.diff2.colors[i2,1+6-j,1])}} + + par(mar=c(0,0,4,0), fig=c(0.30, 0.58, 0.8, 0.95), new=TRUE) + mtext("Blocking", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.30, 0.58, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.8, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.diff2.colors[i2,1+6-j,4])}} + + par(mar=c(0,0,4,0), fig=c(0.60, 0.88, 0.8, 0.95), new=TRUE) + mtext("Atlantic Ridge", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.60, 0.88, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.8, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.diff2.colors[i2,1+6-j,2])}} + + par(fig=c(0.91, 1, 0.1, 0.8), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + par(fig=c(0.91, 1, 0.88, 0.93), new=TRUE) + mtext(side = 1, text = "%", line = 2, cex=1) + + dev.off() + + + + + + + + + + + + # Bias of the Transition probability from blocking to X summary: + png(filename=paste0(work.dir,"/",forecast.name,"_summary_bias_transition_prob_blocking.png"), width=550, height=400) + + # create an array similar to array.cor but with colors instead of corr.values: + trans.cols <- rev(c('#b2182b','#d6604d','#f4a582','#fddbc7','#d1e5f0','#92c5de','#4393c3','#2166ac')) + my.seq <- c(-20,-10,-5,-2,0,2,5,10,20) + + array.trans.diff3.colors <- array(trans.cols[8],c(12,7,4)) + array.trans.diff3.colors[array.trans.diff3 < my.seq[8]] <- trans.cols[7] + array.trans.diff3.colors[array.trans.diff3 < my.seq[7]] <- trans.cols[6] + array.trans.diff3.colors[array.trans.diff3 < my.seq[6]] <- trans.cols[5] + array.trans.diff3.colors[array.trans.diff3 < my.seq[5]] <- trans.cols[4] + array.trans.diff3.colors[array.trans.diff3 < my.seq[4]] <- trans.cols[3] + array.trans.diff3.colors[array.trans.diff3 < my.seq[3]] <- trans.cols[2] + array.trans.diff3.colors[array.trans.diff3 < my.seq[2]] <- trans.cols[1] + array.trans.diff3.colors[is.na(array.trans.diff3)] <- "white" + + par(mar=c(5,4,4,4.5)) + plot(1,1, type="n", xaxt="n", yaxt="n", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + title("% Bias transition from blocking regime", cex=1.5) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + + for(p in 1:12){ + for(l in 0:6){ + polygon(c(0.5+p-1, 0.5+p-1, 1+p-1), c(-0.5+l, 0.5+l, 0+l), col=array.trans.diff3.colors[p,1+l,1]) # first triangle + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(-0.5+l, 0+l, -0.5+l), col=array.trans.diff3.colors[p,1+l,2]) + polygon(c(1+p-1, 1.5+p-1, 1.5+p-1), c(l, 0.5+l, -0.5+l), col=array.trans.diff3.colors[p,1+l,3]) + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(0.5+l, 0+l, 0.5+l), col=array.trans.diff3.colors[p,1+l,4]) + } + } + + par(fig=c(0.875, 1, 0.14, 0.89), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_bias_transition_prob_blocking_startdate.png"), width=750, height=600) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("% Bias transition from blocking regime", cex=1.5) + + # NAO+ : + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.85, 0.98), new=TRUE) + mtext("NAO+", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.diff3.colors[p,1+l,1])}} + + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.42, 0.5), new=TRUE) + mtext("Blocking", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.diff3.colors[p,1+l,4])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.85, 0.98), new=TRUE) + mtext("NAO-", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.diff3.colors[p,1+l,3])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.42, 0.5), new=TRUE) + mtext("Atlantic Ridge", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.diff3.colors[p,1+l,2])}} + + par(fig=c(0.91, 1, 0.1, 0.9), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_bias_transition_prob_blocking_target_month.png"), width=750, height=350) + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 0.95), new=TRUE) + mtext("% Bias transition from Blocking regime", cex=1.2) + + par(mar=c(0,0,4,0), fig=c(0.07, 0.28, 0.8, 0.95), new=TRUE) + mtext("NAO+", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0, 0.28, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.8, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.diff3.colors[i2,1+6-j,1])}} + + par(mar=c(0,0,4,0), fig=c(0.30, 0.58, 0.8, 0.95), new=TRUE) + mtext("NAO-", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.30, 0.58, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.8, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.diff3.colors[i2,1+6-j,3])}} + + par(mar=c(0,0,4,0), fig=c(0.60, 0.88, 0.8, 0.95), new=TRUE) + mtext("Atlantic Ridge", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.60, 0.88, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.8, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.diff3.colors[i2,1+6-j,2])}} + + par(fig=c(0.91, 1, 0.1, 0.8), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + par(fig=c(0.91, 1, 0.88, 0.93), new=TRUE) + mtext(side = 1, text = "%", line = 2, cex=1) + + dev.off() + + + + + + + + + + + # Bias of the Transition probability from Atlantic Ridge to X summary: + png(filename=paste0(work.dir,"/",forecast.name,"_summary_bias_transition_prob_Atlantic_ridge.png"), width=550, height=400) + + # create an array similar to array.cor but with colors instead of corr.values: + trans.cols <- rev(c('#b2182b','#d6604d','#f4a582','#fddbc7','#d1e5f0','#92c5de','#4393c3','#2166ac')) + my.seq <- c(-20,-10,-5,-2,0,2,5,10,20) + + array.trans.diff4.colors <- array(trans.cols[8],c(12,7,4)) + array.trans.diff4.colors[array.trans.diff4 < my.seq[8]] <- trans.cols[7] + array.trans.diff4.colors[array.trans.diff4 < my.seq[7]] <- trans.cols[6] + array.trans.diff4.colors[array.trans.diff4 < my.seq[6]] <- trans.cols[5] + array.trans.diff4.colors[array.trans.diff4 < my.seq[5]] <- trans.cols[4] + array.trans.diff4.colors[array.trans.diff4 < my.seq[4]] <- trans.cols[3] + array.trans.diff4.colors[array.trans.diff4 < my.seq[3]] <- trans.cols[2] + array.trans.diff4.colors[array.trans.diff4 < my.seq[2]] <- trans.cols[1] + array.trans.diff4.colors[is.na(array.trans.diff4)] <- "white" + + par(mar=c(5,4,4,4.5)) + plot(1,1, type="n", xaxt="n", yaxt="n", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + title("% Bias transition from Atlantic Ridge regime", cex=1.5) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + + for(p in 1:12){ + for(l in 0:6){ + polygon(c(0.5+p-1, 0.5+p-1, 1+p-1), c(-0.5+l, 0.5+l, 0+l), col=array.trans.diff4.colors[p,1+l,1]) # first triangle + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(-0.5+l, 0+l, -0.5+l), col=array.trans.diff4.colors[p,1+l,2]) + polygon(c(1+p-1, 1.5+p-1, 1.5+p-1), c(l, 0.5+l, -0.5+l), col=array.trans.diff4.colors[p,1+l,3]) + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(0.5+l, 0+l, 0.5+l), col=array.trans.diff4.colors[p,1+l,4]) + } + } + + par(fig=c(0.875, 1, 0.14, 0.89), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_bias_transition_prob_Atlantic_ridge_startdate.png"), width=750, height=600) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("% Bias transition from Atlantic_Ridge_regime", cex=1.5) + + # NAO+ : + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.85, 0.98), new=TRUE) + mtext("NAO+", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.diff4.colors[p,1+l,1])}} + + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.42, 0.5), new=TRUE) + mtext("Blocking", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.diff4.colors[p,1+l,4])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.85, 0.98), new=TRUE) + mtext("NAO-", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.diff4.colors[p,1+l,3])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.42, 0.5), new=TRUE) + mtext("Atlantic Ridge", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.diff4.colors[p,1+l,2])}} + + par(fig=c(0.91, 1, 0.1, 0.9), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_bias_transition_prob_Atlantic_ridge_target_month.png"), width=750, height=350) + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 0.95), new=TRUE) + mtext("% Bias transition from Atlantic Ridge regime", cex=1.2) + + par(mar=c(0,0,4,0), fig=c(0.07, 0.28, 0.8, 0.95), new=TRUE) + mtext("NAO+", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0, 0.28, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.8, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.diff4.colors[i2,1+6-j,1])}} + + par(mar=c(0,0,4,0), fig=c(0.30, 0.58, 0.8, 0.95), new=TRUE) + mtext("NAO-", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.30, 0.58, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.8, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.diff4.colors[i2,1+6-j,3])}} + + par(mar=c(0,0,4,0), fig=c(0.60, 0.88, 0.8, 0.95), new=TRUE) + mtext("Blocking", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.60, 0.88, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.8, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.diff4.colors[i2,1+6-j,4])}} + + par(fig=c(0.91, 1, 0.1, 0.8), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + par(fig=c(0.91, 1, 0.88, 0.93), new=TRUE) + mtext(side = 1, text = "%", line = 2, cex=1) + + dev.off() + + ## # FairRPSS summary: + ## png(filename=paste0(work.dir,"/",forecast.name,"_summary_rpss.png"), width=550, height=400) + + ## # create an array similar to array.diff.freq but with colors instead of frequencies: + ## freq.cols <- rev(c('#b2182b','#d6604d','#f4a582','#fddbc7','#d1e5f0','#92c5de','#4393c3','#2166ac')) + + ## array.freq.colors <- array(freq.cols[8],c(12,7,4)) + ## array.freq.colors[array.diff.freq < 10] <- freq.cols[7] + ## array.freq.colors[array.diff.freq < 5] <- freq.cols[6] + ## array.freq.colors[array.diff.freq < 2] <- freq.cols[5] + ## array.freq.colors[array.diff.freq < 0] <- freq.cols[4] + ## array.freq.colors[array.diff.freq < -2] <- freq.cols[3] + ## array.freq.colors[array.diff.freq < -5] <- freq.cols[2] + ## array.freq.colors[array.diff.freq < -10] <- freq.cols[1] + + ## par(mar=c(5,4,4,4.5)) + ## plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Forecast time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + ## title("Frequency difference (%) between S4 and ERA-Interim", cex=1.5) + ## mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + ## mtext(side = 2, text = "Forecast time", line = 2.5, cex=1.5) + ## axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + ## axis(2, at=seq(0,6), las=2, cex.axis=1.5) + + ## for(p in 1:12){ + ## for(l in 0:6){ + ## polygon(c(0.5+p-1,0.5+p-1,1+p-1),c(-0.5+l,0.5+l,0+l), col=array.freq.colors[p,1+l,1]) # first triangle + ## polygon(c(0.5+p-1,1+p-1,1.5+p-1),c(-0.5+l,0+l,-0.5+l), col=array.freq.colors[p,1+l,2]) + ## polygon(c(1+p-1,1.5+p-1,1.5+p-1),c(l,0.5+l,-0.5+l), col=array.freq.colors[p,1+l,3]) + ## polygon(c(0.5+p-1,1+p-1,1.5+p-1),c(0.5+l,0+l,0.5+l), col=array.freq.colors[p,1+l,4]) + ## } + ## } + + ## par(fig=c(0.875, 1, 0.14, 0.89), new=TRUE) + ## ColorBar2(brks = c(-20,-10,-5,-2,0,2,5,10,20), cols = freq.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(c(-20,-10,-5,-2,0,2,5,10,20)), my.labels=c(-20,-10,-5,-2,0,2,5,10,20)) + ## dev.off() + +} # close if on composition == "summary" + + + + +# impact map of the stronger WR: +if(composition == "impact.highest" && fields.name == rean.name){ + + var.num <- 2 # choose a variable (1:sfcWind, 2:tas) + index <- 1 # chose an index: 1: most influent, 2: most influent positively, 3: most influent negatively + + if ( index == 1 ) png(filename=paste0(rean.dir,"/",rean.name,"_",var.name.full[var.num],"_most_influent.png"), width=550, height=650) + if ( index == 2 ) png(filename=paste0(rean.dir,"/",rean.name,"_",var.name.full[var.num],"_most_influent_positively.png"), width=550, height=650) + if ( index == 3 ) png(filename=paste0(rean.dir,"/",rean.name,"_",var.name.full[var.num],"_most_influent_negatively.png"), width=550, height=650) + + plot.new() + #par(mfrow=c(4,3)) + #col.regimes <- c('#7b3294','#c2a5cf','#a6dba0','#008837') + col.regimes <- c('#e66101','#fdb863','#b2abd2','#5e3c99') + + for(p in 13:16){ # or 1:12 for monthly maps + load(file=paste0(rean.dir,"/",rean.name,"_",my.period[p],"_",var.name[var.num],".RData")) + load(paste0(rean.dir,"/",rean.name,"_",my.period[p],"_ClusterNames.RData")) # they should not depend on var.name!!! + + # position of long values of Europe only (without the Atlantic Sea and America): + #if(fields.name == "ERA-Interim") EU <- c(1:which(lon >= lon.max)[1], (length(lon)-30):length(lon)) # restrict area to continental europe only + lon.max = 45 + EU <- c(1:which(lon >= lon.max)[1], (which(lon >= 337)[1]:length(lon))) # restrict area to continental europe only + + cluster1 <- which(orden == cluster1.name.period[p]) # in future it will be == cluster1.name + cluster2 <- which(orden == cluster2.name.period[p]) + cluster3 <- which(orden == cluster3.name.period[p]) + cluster4 <- which(orden == cluster4.name.period[p]) + + assign(paste0("imp",cluster1), varPeriodAnom1mean) + assign(paste0("imp",cluster2), varPeriodAnom2mean) + assign(paste0("imp",cluster3), varPeriodAnom3mean) + assign(paste0("imp",cluster4), varPeriodAnom4mean) + + #most influent WT: + if (index == 1) { + imp.all <- imp1 + for(i in 1:dim(imp1)[1]){ + for(j in 1:dim(imp1)[2]){ + if(abs(imp1[i,j]) >= max(abs(imp1[i,j]), abs(imp2[i,j]), abs(imp3[i,j]), abs(imp4[i,j]))) imp.all[i,j] <- 1.5 + if(abs(imp2[i,j]) >= max(abs(imp1[i,j]), abs(imp2[i,j]), abs(imp3[i,j]), abs(imp4[i,j]))) imp.all[i,j] <- 2.5 + if(abs(imp3[i,j]) >= max(abs(imp1[i,j]), abs(imp2[i,j]), abs(imp3[i,j]), abs(imp4[i,j]))) imp.all[i,j] <- 3.5 + if(abs(imp4[i,j]) >= max(abs(imp1[i,j]), abs(imp2[i,j]), abs(imp3[i,j]), abs(imp4[i,j]))) imp.all[i,j] <- 4.5 + } + } + } + + #most influent WT with positive impact: + if (index == 2) { + imp.all <- imp1 + for(i in 1:dim(imp1)[1]){ + for(j in 1:dim(imp1)[2]){ + if((imp1[i,j]) >= max((imp1[i,j]), (imp2[i,j]), (imp3[i,j]), (imp4[i,j]))) imp.all[i,j] <- 1.5 + if((imp2[i,j]) >= max((imp1[i,j]), (imp2[i,j]), (imp3[i,j]), (imp4[i,j]))) imp.all[i,j] <- 2.5 + if((imp3[i,j]) >= max((imp1[i,j]), (imp2[i,j]), (imp3[i,j]), (imp4[i,j]))) imp.all[i,j] <- 3.5 + if((imp4[i,j]) >= max((imp1[i,j]), (imp2[i,j]), (imp3[i,j]), (imp4[i,j]))) imp.all[i,j] <- 4.5 + } + } + + } + + # most influent WT with negative impact: + if (index == 3) { + imp.all <- imp1 + for(i in 1:dim(imp1)[1]){ + for(j in 1:dim(imp1)[2]){ + if((imp1[i,j]) <= min((imp1[i,j]), (imp2[i,j]), (imp3[i,j]), (imp4[i,j]))) imp.all[i,j] <- 1.5 + if((imp2[i,j]) <= min((imp1[i,j]), (imp2[i,j]), (imp3[i,j]), (imp4[i,j]))) imp.all[i,j] <- 2.5 + if((imp3[i,j]) <= min((imp1[i,j]), (imp2[i,j]), (imp3[i,j]), (imp4[i,j]))) imp.all[i,j] <- 3.5 + if((imp4[i,j]) <= min((imp1[i,j]), (imp2[i,j]), (imp3[i,j]), (imp4[i,j]))) imp.all[i,j] <- 4.5 + } + } + } + + if(p<=3) yMod <- 0.7 + if(p>=4 && p<=6) yMod <- 0.5 + if(p>=7 && p<=9) yMod <- 0.3 + if(p>=10) yMod <- 0.1 + if(p==13 || p==14) yMod <- 0.52 + if(p==15 || p==16) yMod <- 0.1 + + if(p==1 || p==4 || p==7 || p==10) xMod <- 0.05 + if(p==2 || p==5 || p==8 || p==11) xMod <- 0.35 + if(p==3 || p==6 || p==9 || p==12) xMod <- 0.65 + if(p==13 || p==15) xMod <- 0.05 + if(p==14 || p==16) xMod <- 0.50 + + # impact map: + if(p <= 12) par(fig=c(xMod, xMod + 0.3, yMod, yMod + 0.17), new=TRUE) + if(p > 12) par(fig=c(xMod, xMod + 0.45, yMod, yMod + 0.38), new=TRUE) + PlotEquiMap(imp.all[,EU], lon[EU], lat, filled.continents = FALSE, brks=1:5, cols=col.regimes, axelab=F, drawleg=F) + + # title map: + if(p <= 12) par(fig=c(xMod, xMod + 0.3, yMod + 0.162, yMod + 0.172), new=TRUE) + if(p > 12) par(fig=c(xMod + 0.07, xMod + 0.07 + 0.3, yMod +0.21 + 0.166, yMod +0.21 + 0.176), new=TRUE) + mtext(my.period[p], font=2, cex=1.5) + + } # close 'p' on 'period' + + par(fig=c(0.1,0.9, 0.03, 0.11), new=TRUE) + ColorBar2(1:5, cols=col.regimes, vert=FALSE, my.ticks=1:4, draw.ticks=FALSE, my.labels=orden) + + par(fig=c(0.1,0.9, 0.92, 0.97), new=TRUE) + if( index == 1 ) mtext(paste0("Most influent WR on ",var.name[var.num]), font=2, cex=1.5) + if( index == 2 ) mtext(paste0("Most positively influent WR on ",var.name[var.num]), font=2, cex=1.5) + if( index == 3 ) mtext(paste0("Most negatively influent WR on ",var.name[var.num]), font=2, cex=1.5) + + dev.off() + +} # close if on composition == "impact.highest" + + + + + + + + + + + + + +if(monthly_anomalies) { + # Compute climatology and anomalies (with LOESS filter) for sfcWind data, and draw monthly anomalies, if you want to compare the mean daily wind speed anomalies reconstructed by the weather regimes classification with those observed by the reanalysis: + + sfcWind366 <- Load(var = "sfcWind", exp = NULL, obs = list(list(path=fields)), paste0(year.start:year.end,'0101'), storefreq = 'daily', leadtimemax = 366, output = 'lonlat', latmin = lat.min, latmax = lat.max, lonmin = lon.min, lonmax = lon.max, nprocs=8)$obs + + sfcWind <- sfcWind366[,,,1:365,,] + + for(y in year.start:year.end){ + y2 <- y - year.start + 1 + if(n.days.in.a.year(y) == 366) sfcWind[y2,60:365,,] <- sfcWind366[1,1,y2,61:366,,] # take the march to december period removing the 29th of February + } + + rm(sfcWind366) + gc() + + sfcWindClimDaily <- apply(sfcWind, c(2,3,4), mean, na.rm=T) + + sfcWindClimLoess <- sfcWindClimDaily + for(i in n.pos.lat){ + for(j in n.pos.lon){ + my.data <- data.frame(ens.mean=sfcWindClimDaily[,i,j], day=1:365) + my.loess <- loess(ens.mean ~ day, my.data, span=0.35) + sfcWindClimLoess[,i,j] <- predict(my.loess) + } + } + + rm(sfcWindClimDaily) + gc() + + sfcWindClim2 <- InsertDim(sfcWindClimLoess, 1, n.years) + + sfcWindAnom <- sfcWind - sfcWindClim2 + + rm(sfcWindClimLoess) + gc() + + # same as above but for computing climatology and anomalies (with LOESS filter) for psl data, and draw monthly slp anomalies: + slp366 <- Load(var = psl, exp = NULL, obs = list(list(path=fields)), paste0(year.start:year.end,'0101'), storefreq = 'daily', leadtimemax = 366, output = 'lonlat', latmin = lat.min, latmax = lat.max, lonmin = lon.min, lonmax = lon.max, nprocs=8)$obs + + slp <- slp366[,,,1:365,,] + + for(y in year.start:year.end){ + y2 <- y - year.start + 1 + if(n.days.in.a.year(y) == 366) slp[y2,60:365,,] <- slp366[1,1,y2,61:366,,] # take the march to december period removing the 29th of February + } + + rm(slp366) + gc() + + slpClimDaily <- apply(slp, c(2,3,4), mean, na.rm=T) + + slpClimLoess <- slpClimDaily + for(i in n.pos.lat){ + for(j in n.pos.lon){ + my.data <- data.frame(ens.mean=slpClimDaily[,i,j], day=1:365) + my.loess <- loess(ens.mean ~ day, my.data, span=0.35) + slpClimLoess[,i,j] <- predict(my.loess) + } + } + + rm(slpClimDaily) + gc() + + slpClim2 <- InsertDim(slpClimLoess, 1, n.years) + + slpAnom <- slp - slpClim2 + + rm(slpClimLoess) + gc() + + if(psl == "psl") slpAnom <- slpAnom/100 # convert MSLP in Pascal to MSLP in hPa + + EU <- c(1:which(lon >= lon.max)[1], (which(lon >= 337)[1]:length(lon))) # restrict area to continental europe only + + my.brks.var <- seq(-3,3,0.5) + my.cols.var <- colorRampPalette(rev(brewer.pal(11,"RdBu")))(length(my.brks.var)-1) # blue--white--red colors + + # same but for slp: + my.brks.var2 <- c(seq(-21,-1,2),0,seq(1,21,2)) + my.cols.var2 <- colorRampPalette(rev(brewer.pal(11,"RdBu")))(length(my.brks.var2)-1) # blue--red colors + my.cols.var2[floor(length(my.cols.var2)/2)] <- my.cols.var2[floor(length(my.cols.var2)/2)+1] <- "white" + + # save all monthly anomaly maps: + for(year.test in year.chosen){ + for(month.test in month.chosen){ + #year.test <- 2016; month.test <- 10 # for the debug + + pos.year.test <- year.test - year.start + 1 + + # wind anomaly for the chosen year and month: + sfcWindAnomPeriod <- sfcWindAnom[pos.year.test, pos.period(1,month.test),,] + + sfcWindAnomPeriodMean <- apply(sfcWindAnomPeriod, c(2,3), mean, na.rm=TRUE) + + # psl anomaly for the chosen year and month: + slpAnomPeriod <- slpAnom[pos.year.test,pos.period(1,month.test),,] + + slpAnomPeriodMean <- apply(slpAnomPeriod, c(2,3), mean, na.rm=TRUE) + + fileoutput <- paste0(workdir,"/",fields.name,"_",my.period[month.test],"_monthly_anomaly.png") + + png(filename=fileoutput,width=2000,height=1000) + + par(fig=c(0, 0.5, 0.08, 0.98), new=TRUE) + PlotEquiMap(rescale(sfcWindAnomPeriodMean[,EU], my.brks.var[1], tail(my.brks.var,1)), lon[EU], lat, filled.continents=FALSE, brks=my.brks.var, cols=my.cols.var, title_scale=1.2, intxlon=10, intylat=10, drawleg=F) #, cex.lab=1.5) + + #my.subset2 <- match(values.to.plot2, my.brks.var) + #legend2.cex <- 1.8 + + par(fig=c(0.03, 0.5, 0.015, 0.09), new=TRUE) + ColorBar(my.brks.var, cols=my.cols.var, vert=FALSE, label_scale=1.8, triangle_ends=c(FALSE,FALSE)) #, subset=my.subset2) + + par(fig=c(0.47, 0.5, 0, 0.028), new=TRUE) + mtext("m/s", cex=1.8) + + par(fig=c(0.5, 1, 0.1, 0.98), new=TRUE) + + PlotEquiMap2(rescale(slpAnomPeriodMean[,EU], my.brks.var2[1], tail(my.brks.var2,1)), lon[EU], lat, filled.continents=FALSE, continents.col="black", brks=my.brks.var2, brks2=my.brks.var2, cols=my.cols.var2, intxlon=10, intylat=10, drawleg=F, contours=t(slpAnomPeriodMean[,EU]), contours.lty="F1FF1F", cex.lab=1) + + # you could almost use the normal PlotEquiMap funcion below, it only needs to set the black line for anomaly=0 and decrease the size of the contour labels: + # PlotEquiMap(rescale(slpAnomPeriodMean[,EU], my.brks.var2[1], tail(my.brks.var2,1)), lon[EU], lat, filled.continents=FALSE, brks=my.brks.var2, brks2=my.brks.var2, cols=my.cols.var2, intxlon=10, intylat=10, drawleg=F, contours=(slpAnomPeriodMean[,EU]), contour_lty="F1FF1F", contour_label_scale=10, title_scale=1.2) #, cex.lab=1.5) + + ##my.subset2 <- match(values.to.plot2, my.brks.var) + #legend2.cex <- 1.8 + + par(fig=c(0.5, 1, 0, 0.09), new=TRUE) + #ColorBar2(my.brks.var2, cols=my.cols.var2, vert=FALSE, my.ticks=-0.5 + 1:length(my.brks.var2), my.labels=my.brks.var2) #, subsampleg=1, label_scale=1.8) #, subset=my.subset2) + ColorBar(my.brks.var2, cols=my.cols.var2, vert=FALSE, triangle_ends=c(FALSE,FALSE), label_scale=1.8, subsampleg=2) #my.ticks=-0.5 + 1:length(my.brks.var2), my.labels=my.brks.var2) subset=my.subset2) + + par(fig=c(0.97, 1, 0, 0.027), new=TRUE) + mtext("hPa", cex=1.8) + + par(fig=c(0.74, 0.76, 0, 0.027), new=TRUE) + mtext("0", cex=1.8) + + dev.off() + + # same image formatted for the catalogue: + fileoutput2 <- paste0(workdir,"/",fields.name,"_",my.period[month.test],"_monthly_anomaly_fig2cat.png") + + system(paste0("~/scripts/fig2catalog.sh -s 0.8 -t '",rean.name," / 10m wind speed and sea level pressure / Monthly anomalies \n",month.name[month.test]," / ",year.test,"' -c 'Region: North Atlantic (27°N-81°N, 85.5°W-45°E)\nReference dataset: ",rean.name," reanalysis' ", fileoutput," ", fileoutput2)) + + + # mean wind speed and slp anomaly only during days of the chosen year: + load(paste0(rean.dir,"/",fields.name,"_",my.period[month.test],"_psl.RData")) # load cluster.sequence + + # arrange the regime sequence in an array, to separate months from years: + cluster2D <- array(cluster.sequence, c(n.days.in.a.period(p,1),n.years)) + #cluster2D <- array(my.cluster$cluster, c(n.days.in.a.period(p,1),n.years)) # only for NCEP + + cluster.test <- cluster2D[,pos.year.test] + + # wind anomaly for the chosen year and month and cluster: + sfcWind1 <- sfcWindAnomPeriod[which(cluster.test == 1),,,drop=FALSE] + sfcWind2 <- sfcWindAnomPeriod[which(cluster.test == 2),,,drop=FALSE] + sfcWind3 <- sfcWindAnomPeriod[which(cluster.test == 3),,,drop=FALSE] + sfcWind4 <- sfcWindAnomPeriod[which(cluster.test == 4),,,drop=FALSE] + + # in case a regime has NO days in that month, we must set it to NA: + if(length(which(cluster.test == 1)) == 0 ) sfcWind1 <- array(NA, c(1,dim(sfcWindAnomPeriod)[2:3])) + if(length(which(cluster.test == 2)) == 0 ) sfcWind2 <- array(NA, c(1,dim(sfcWindAnomPeriod)[2:3])) + if(length(which(cluster.test == 3)) == 0 ) sfcWind3 <- array(NA, c(1,dim(sfcWindAnomPeriod)[2:3])) + if(length(which(cluster.test == 4)) == 0 ) sfcWind4 <- array(NA, c(1,dim(sfcWindAnomPeriod)[2:3])) + + sfcWindMean1 <- apply(sfcWind1, c(2,3), mean, na.rm=FALSE) + sfcWindMean2 <- apply(sfcWind2, c(2,3), mean, na.rm=FALSE) + sfcWindMean3 <- apply(sfcWind3, c(2,3), mean, na.rm=FALSE) + sfcWindMean4 <- apply(sfcWind4, c(2,3), mean, na.rm=FALSE) + + # load regime names: + load(paste0(rean.dir,"/",rean.name,"_", my.period[p],"_","ClusterNames",".RData")) + + orden <- c("NAO+","NAO-","Blocking","Atl.Ridge") + cluster1 <- which(orden == cluster1.name) + cluster2 <- which(orden == cluster2.name) + cluster3 <- which(orden == cluster3.name) + cluster4 <- which(orden == cluster4.name) + + assign(paste0("imp.test",cluster1), sfcWindMean1) + assign(paste0("imp.test",cluster2), sfcWindMean2) + assign(paste0("imp.test",cluster3), sfcWindMean3) + assign(paste0("imp.test",cluster4), sfcWindMean4) + + # psl anomaly for the chosen year and month and cluster: + slp1 <- slpAnomPeriod[which(cluster.test == 1),,,drop=FALSE] + slp2 <- slpAnomPeriod[which(cluster.test == 2),,,drop=FALSE] + slp3 <- slpAnomPeriod[which(cluster.test == 3),,,drop=FALSE] + slp4 <- slpAnomPeriod[which(cluster.test == 4),,,drop=FALSE] + + # in case a regime has NO days in that month, we must set it to NA: + if(length(which(cluster.test == 1)) == 0 ) slp1 <- array(NA, c(1,dim(slpAnomPeriod)[2:3])) + if(length(which(cluster.test == 2)) == 0 ) slp2 <- array(NA, c(1,dim(slpAnomPeriod)[2:3])) + if(length(which(cluster.test == 3)) == 0 ) slp3 <- array(NA, c(1,dim(slpAnomPeriod)[2:3])) + if(length(which(cluster.test == 4)) == 0 ) slp4 <- array(NA, c(1,dim(slpAnomPeriod)[2:3])) + + slpMean1 <- apply(slp1, c(2,3), mean, na.rm=FALSE) + slpMean2 <- apply(slp2, c(2,3), mean, na.rm=FALSE) + slpMean3 <- apply(slp3, c(2,3), mean, na.rm=FALSE) + slpMean4 <- apply(slp4, c(2,3), mean, na.rm=FALSE) + + assign(paste0("psl.test",cluster1), slpMean1) + assign(paste0("psl.test",cluster2), slpMean2) + assign(paste0("psl.test",cluster3), slpMean3) + assign(paste0("psl.test",cluster4), slpMean4) + + fileoutput.test <- paste0(rean.dir,"/",fields.name,"_",my.period[month.test],"_monthly_anomaly_regimes.png") + + png(filename=fileoutput.test,width=1000,height=2000) + + par(fig=c(0, 0.5, 0.77, 0.97), new=TRUE) + PlotEquiMap2(rescale(imp.test1[,EU], my.brks.var[1], tail(my.brks.var,1)), lon[EU], lat, filled.continents=FALSE, continents.col="black", brks=my.brks.var, cols=my.cols.var, cex.lab=1.2, intxlon=10, intylat=10, drawleg=F) + par(fig=c(0, 0.5, 0.54, 0.74), new=TRUE) + PlotEquiMap2(rescale(imp.test2[,EU], my.brks.var[1], tail(my.brks.var,1)), lon[EU], lat, filled.continents=FALSE, continents.col="black", brks=my.brks.var, cols=my.cols.var, cex.lab=1.2, intxlon=10, intylat=10, drawleg=F) + par(fig=c(0, 0.5, 0.31, 0.51), new=TRUE) + PlotEquiMap2(rescale(imp.test3[,EU], my.brks.var[1], tail(my.brks.var,1)), lon[EU], lat, filled.continents=FALSE, continents.col="black", brks=my.brks.var, cols=my.cols.var, cex.lab=1.2, intxlon=10, intylat=10, drawleg=F) + par(fig=c(0, 0.5, 0.08, 0.28), new=TRUE) + PlotEquiMap2(rescale(imp.test4[,EU], my.brks.var[1], tail(my.brks.var,1)), lon[EU], lat, filled.continents=FALSE, continents.col="black", brks=my.brks.var, cols=my.cols.var, cex.lab=1.2, intxlon=10, intylat=10, drawleg=F) + + par(fig=c(0,0.5,0.955,0.975), new=TRUE) + mtext(paste0(orden[1], " wind speed anomaly "), font=2, cex=2) + par(fig=c(0,0.5,0.725,0.745), new=TRUE) + mtext(paste0(orden[2], " wind speed anomaly "), font=2, cex=2) + par(fig=c(0,0.5,0.495,0.515), new=TRUE) + mtext(paste0(orden[3], " wind speed anomaly "), font=2, cex=2) + par(fig=c(0,0.5,0.265,0.285), new=TRUE) + mtext(paste0(orden[4], " wind speed anomaly "), font=2, cex=2) + + par(fig=c(0.03, 0.5, 0.015, 0.06), new=TRUE) + ColorBar(my.brks.var, cols=my.cols.var, vert=FALSE, label_scale=1.8, triangle_ends=c(FALSE,FALSE)) #, subset=my.subset2) + + par(fig=c(0.48, 0.5, 0.01, 0.044), new=TRUE) + mtext("m/s", cex=1.6) + + # right figures: + par(fig=c(0.5, 1, 0.77, 0.97), new=TRUE) + PlotEquiMap2(rescale(psl.test1[,EU], my.brks.var2[1], tail(my.brks.var2,1)), lon[EU], lat, filled.continents=FALSE, continents.col="black", brks=my.brks.var2, brks2=my.brks.var2, cols=my.cols.var2, intxlon=10, intylat=10, drawleg=F, contours=t(psl.test1[,EU]), contours.lty="F1FF1F", cex.lab=1) + par(fig=c(0.5, 1, 0.54, 0.74), new=TRUE) + PlotEquiMap2(rescale(psl.test2[,EU], my.brks.var2[1], tail(my.brks.var2,1)), lon[EU], lat, filled.continents=FALSE, continents.col="black", brks=my.brks.var2, brks2=my.brks.var2, cols=my.cols.var2, intxlon=10, intylat=10, drawleg=F, contours=t(psl.test2[,EU]), contours.lty="F1FF1F", cex.lab=1) + par(fig=c(0.5, 1, 0.31, 0.51), new=TRUE) + PlotEquiMap2(rescale(psl.test3[,EU], my.brks.var2[1], tail(my.brks.var2,1)), lon[EU], lat, filled.continents=FALSE, continents.col="black", brks=my.brks.var2, brks2=my.brks.var2, cols=my.cols.var2, intxlon=10, intylat=10, drawleg=F, contours=t(psl.test3[,EU]), contours.lty="F1FF1F", cex.lab=1) + par(fig=c(0.5, 1, 0.08, 0.28), new=TRUE) + PlotEquiMap2(rescale(psl.test4[,EU], my.brks.var2[1], tail(my.brks.var2,1)), lon[EU], lat, filled.continents=FALSE, continents.col="black", brks=my.brks.var2, brks2=my.brks.var2, cols=my.cols.var2, intxlon=10, intylat=10, drawleg=F, contours=t(psl.test4[,EU]), contours.lty="F1FF1F", cex.lab=1) + + par(fig=c(0.5,1,0.955,0.975), new=TRUE) + mtext(paste0(orden[1], " sea level pressure anomaly "), font=2, cex=2) + par(fig=c(0.5,1,0.725,0.745), new=TRUE) + mtext(paste0(orden[2], " sea level pressure anomaly "), font=2, cex=2) + par(fig=c(0.5,1,0.495,0.515), new=TRUE) + mtext(paste0(orden[3], " sea level pressure anomaly "), font=2, cex=2) + par(fig=c(0.5,1,0.265,0.285), new=TRUE) + mtext(paste0(orden[4], " sea level pressure anomaly "), font=2, cex=2) + + par(fig=c(0.5, 0.99, 0.015, 0.06), new=TRUE) + #ColorBar2(my.brks.var2, cols=my.cols.var2, vert=FALSE, my.ticks=-0.5 + 1:length(my.brks.var2), my.labels=my.brks.var2) #, subsampleg=1, label_scale=1.8) #, subset=my.subset2) + ColorBar(my.brks.var2, cols=my.cols.var2, vert=FALSE, triangle_ends=c(FALSE,FALSE), label_scale=1.8, subsampleg=2) #my.ticks=-0.5 + 1:length(my.brks.var2), my.labels=my.brks.var2) subset=my.subset2) + + par(fig=c(0.971, 0.997, 0.01, 0.044), new=TRUE) + mtext("hPa", cex=1.6) + + par(fig=c(0.74, 0.76, 0, 0.027), new=TRUE) + mtext("0", cex=1.8) + + dev.off() + + # same image formatted for the catalogue: + fileoutput2.test <- paste0(rean.dir,"/",fields.name,"_",my.period[month.test],"_monthly_anomaly_regimes_fig2cat.png") + + system(paste0("~/scripts/fig2catalog.sh -s 0.8 -m 40 -t '",rean.name," / 10m wind speed and sea level pressure / Monthly regime anomalies \n",month.name[month.test]," / ",year.test,"' -c 'Region: North Atlantic (27°N-81°N, 85.5°W-45°E)\nReference dataset: ",rean.name," reanalysis' ", fileoutput.test," ", fileoutput2.test)) + + + } # close for on year.test + } # close for on month.test + + +} # close for on monthly_anomalies + + + + + + + + + + diff --git a/old/weather_regimes_maps_v24.R~ b/old/weather_regimes_maps_v24.R~ new file mode 100644 index 0000000000000000000000000000000000000000..ae7dc6e26a9a7050886c891ef62de99ac29f4d06 --- /dev/null +++ b/old/weather_regimes_maps_v24.R~ @@ -0,0 +1,5250 @@ + +# Creation: 6/2016 +# Author: Nicola Cortesi +# +# Aim: to visualize the regime anomalies of the weather regimes, their interannual frequencies and their impact on a chosen cliamte variable (i.e: wind speed or temperature) +# +# I/O: input file needed are: +# 1) the output of the weather_regimes.R script, which ends with the suffix "_psl.RData". +# 2) if you need the impact map too, the outputs of the weather_regimes_impact.R script, which end wuth the suffix "_sfcWind.RData" and "_tas.RData" +# +# Output files are the maps, witch the user can generate as .png or as .pdf +# Due to the script's length, it is better to run it from the terminal with: Rscript ~/scripts/weather_regimes_maps.R +# This script doesn't need to be parallelized because it takes only a few seconds to create and save the output maps. +# +# Assumptions: If your regimes derive from a reanalysis, this script must be run twice: +# first, only one month/season at time with: 'period=X', (X=1 .. 12), 'ordering = TRUE', 'save.names = TRUE', 'as.pdf = FALSE' and 'composition = "simple" +# And inserting the regime names 'cluster.name1=...' in the correct order; in this first run, you only save the ordered cartography. +# You already have to know which is the right regime order by taking a look at the output maps (_clusterX.png) of weather_regimes.R +# After that, any time you run this script, remember to set: +# 'ordering = TRUE , 'save.names = FALSE' (and any type of composition you want to plot) not to overwrite the files which stores the right cluster order. +# +# For example, you can find that the sequence of the images in the file (from top to down) corresponds to: +# Blocking / NAO- / Atl.Ridge / NAO+ regime. Subsequently, you have to change 'ordering' +# from F to T (see below) and set the four variables 'clusterX.name' (X = 1...4) to follow the same order found inside that file. +# Then, you have to run this script only to get the maps in the right order, which by default is, from top to down: +# "NAO+","NAO-","Blocking" and "Atl.Ridge" (you can change it with the variable 'orden'). You also have to set 'save.names' to TRUE, so an .RData file +# is written to store the order of the four regimes, in case you need to visualize these maps again in future or create new ones based on the saved data. +# +# If your regimes derive from a forecast system, you have to compute before the regimes derived from a reanalysis. Then, you can associate the reanalysis +# to the forecast system, to employ it to automatically aussociate to each simulated cluster one of the +# observed regimes, the one with the highest spatial correlation. Beware that the two maps of regime anomalies must have the same spatial resolution! +# +# Branch: weather_regimes + +rm(list=ls()) +library(s2dverification) # for the function Load() +library(Kendall) # for the MannKendall test +source('/home/Earth/ncortesi/scripts/Rfunctions.R') # for the calendar functions + +### set the directory where the weather regimes computed with a reanalysis are stored (xxxxx_psl.RData files): + +#rean.dir <- "/scratch/Earth/ncortesi/RESILIENCE/Regimes/18) as 12) but with no lat correction" +#rean.dir <- "/scratch/Earth/ncortesi/RESILIENCE/Regimes/18_as_12_but_with_no_lat_correction" +#rean.dir <- "/scratch/Earth/ncortesi/RESILIENCE/Regimes/41_ERA-Interim_monthly_1981-2015_LOESS_filter" +#rean.dir <- "/scratch/Earth/ncortesi/RESILIENCE/Regimes/43_as_41_but_with_running_cluster_and_lat_correction" +#rean.dir <- "/scratch/Earth/ncortesi/RESILIENCE/Regimes/45_as_43_but_with_Z500" +#rean.dir <- "/scratch/Earth/ncortesi/RESILIENCE/Regimes/44_as_43_but_with_no_lat_correction" +rean.dir <- "/scratch/Earth/ncortesi/RESILIENCE/Regimes/50_NCEP_monthly_1981-2016_LOESS_filter_lat_corr" +#rean.dir <- "/scratch/Earth/ncortesi/RESILIENCE/Regimes/52_JRA55_monthly_1981-2016_LOESS_filter_lat_corr" + +rean.name <- "NCEP" #"JRA-55" #"NCEP" #"ERA-Interim" # reanalysis name (if input data comes from a reanalysis) +forecast.name <- "ECMWF-S4" # forecast system name (if input data comes from a forecast system) + +# Choose between loading reanalysis ('rean.name') or forecasts ('forecast.name'): +fields.name <- rean.name #forecast.name + +ordering <- T # If TRUE, order the clusters following the regimes listed in the 'orden' vector (set it to TRUE only after you manually inserted the names below). +save.names <- F # if TRUE, also save the cluster names (i.e: the association between clusters and weather regimes); if FALSE, the cluster names are loaded from a file +as.pdf <- F # FALSE: save results as one .png file for each season/month, TRUE: save only one .pdf file with all seasons/months + +composition <- "edpr" # choose which kind of composition you want to plot: + # 'simple' for monthly graphs of regime anomalies / impact / interannual frequencies in three columns + # 'psl' for all the regime anomalies for a fixed forecast month + # 'fre' for all the interannual frequencies for a fixed forecast month + # 'impact' for all the impact maps for a fixed forecast month and the variable specified in var.num + # 'summary' for the summary plots of all forecast months (persistence bias, freq.bias, correlations, etc.) [You need all ClusterNames files] + # 'impact.highest' for all the impact plot of the regime with the highest impact + # 'single.impact' to save the four impact maps in a composition 2x2 + # 'single.psl' to save the individual psl map + # 'single.fre' to save the individual fre map + # 'none' doesn't plot anything, it only saves the ClusterName.Rdata files + # 'taylor' plot the taylor's diagram between the regime anomalies of a monthly reanalaysis and a seasonal one + # 'edpr' as 'simple', but swapping the position of the regime anomalie maps with that of the impact maps + # 'psl.rean' for all the regime anomalies for all months of a reanalysis + +var.num <- 1 # if fields.name ='rean.name' and composition ='simple' or 'edpr', Choose a variable for the impact maps 1: sfcWind 2: tas + +mean.freq <- TRUE # if TRUE, when composition <- "simple" o 'edpr', it also add the mean frequency value over each frequency plots + +# Choose one or more periods to plot from 1 to 17 (1-12: Jan - Dec, 13-16: Winter, Spring, Summer, Autumn, 17: Year). +period <- 1:12 # You need to have created before the '_psl.RData' file with the output of 'weather_regimes_vXX'.R for that period. + +# Associates the regimes to the four cluster: +cluster3.name <- "NAO+" +cluster4.name <- "NAO-" +cluster1.name <- "Blocking" +cluster2.name <- "Atl.Ridge" + +# choose an order to give to the cartograpy, from top to bottom: +orden <- c("NAO+","NAO-","Blocking","Atl.Ridge") + +####### if fields.name='forecast.name', you have to specify these additional variables: + +# working dir with the input files with '_psl.RData' suffix: +work.dir <- "/scratch/Earth/ncortesi/RESILIENCE/Regimes/42_ECMWF_S4_monthly_1981-2015_LOESS_filter" + +lead.months <- 0 #0:6 # in case you selected 'fields.name = forecast.name', specify a leadtime (0 = same month of the start month) to plot + # (and variable 'period' becomes the start month) +forecast.month <- 1 # in case composition = 'psl' or 'fre' or 'impact', choose a target month to forecast, from 1 to 12, to plot its composition + # if you want to plot all compositions from 1 to 12 at once, just enable the line below and add a { at the end of the script + # DO NOT run the loop below when composition="summary" or "taylor", because it will modify the data in the ClusterName.RData files!!! +#for(forecast.month in 1:12){ + +####### Derived variables ############################################################################################################################################### + +if(fields.name != rean.name && fields.name != forecast.name) stop("variable 'fields.name' is not properly set") +regime1.name <- orden[1] +regime2.name <- orden[2] +regime3.name <- orden[3] +regime4.name <- orden[4] + +var.name <- c("sfcWind","tas") +var.name.full <- c("wind speed","temperature") # full name of the 'predictand' variable to put in the title of the graphs +var.unit <- c("m/s", "ºC") # unit of measure of var (for drawing color scales) + +var.num.orig <- var.num # loading _psl files, this value can change! +fields.name.orig <- fields.name +period.orig <- period +work.dir.orig <- work.dir +pdf.suffix <- ifelse(length(period)==1, my.period[period], "all_periods") + +n.map <- 0 +################################################################################################################################################################### + +if(composition != "summary" && composition != "impact.highest" && composition != "taylor"){ + + if(composition == "psl" || composition == "fre" || composition == "impact") { + if(as.pdf && fields.name == forecast.name) pdf(file=paste0(work.dir,"/",fields.name,"_",pdf.suffix,"_forecast_month_",forecast.month,"_",composition,".pdf"),width=110,height=60) + if(!as.pdf && fields.name == forecast.name) png(filename=paste0(work.dir,"/",fields.name,"_forecast_month_",forecast.month,"_",composition,".png"),width=6000,height=2300) + + lead.months <- c(6:0,0) + plot.new() + + # Sheet title: + par(fig=c(0, 1, 0.94, 0.98), new=TRUE) + if(composition == "psl") mtext(paste0(fields.name, " simulated Weather Regimes for ", month.name[forecast.month]), cex=5.5, font=2) + if(composition == "fre") mtext(paste0(fields.name, " simulated interannual frequencies for ", month.name[forecast.month]), cex=5.5, font=2) + if(composition == "impact") mtext(paste0(fields.name, " simulated ",var.name.full[var.num], " impact for ", month.name[forecast.month]), cex=5.5, font=2) + + } + + if(composition == "psl.rean") { + period <- c(9:12, 1:8) # to start from September instead of January + png(filename=paste0(rean.dir,"/",fields.name,"_reanalysis_all_months.png"),width=6000,height=2000) + plot.new() + } + + if(fields.name == rean.name) lead.months <- 1 # just to be able to enter the loop below once! + + for(lead.month in lead.months){ + #lead.month=lead.months[1] # for the debug + + if(composition == "simple" || composition == 'edpr'){ + if(as.pdf && fields.name == rean.name) pdf(file=paste0(rean.dir,"/",fields.name,"_",var.name[var.num],"_",pdf.suffix,".pdf"),width=40, height=60) + if(as.pdf && fields.name == forecast.name) pdf(file=paste0(work.dir,"/",fields.name,"_",var.name[var.num],"_",pdf.suffix,"_leadmonth_",lead.month,".pdf"),width=40,height=60) + } + + if(composition == 'psl' || composition == 'fre' || composition == 'impact') { + period <- forecast.month - lead.month + if(period < 1) period <- period + 12 + } + + impact.data <- FALSE + for(p in period){ + #p <- period[1] # for the debug + p.orig <- p + + if(fields.name == rean.name) print(paste("p =",p)) + if(fields.name == forecast.name) print(paste("p =",p,"lead.month =", lead.month)) + + # load regime data (for forecasts, it load only the data corresponding to the chosen start month p and lead month 'lead.month') + if(fields.name == rean.name || (fields.name == forecast.name && n.map == 7)) { + load(file=paste0(rean.dir,"/",rean.name,"_",my.period[p],"_","psl",".RData")) + if(var.num != var.num.orig) var.num <- var.num.orig # ripristinate original values of this script (necessary after loading regime data) + if(fields.name != fields.name.orig) fields.name <- fields.name.orig # ripristinate original values of this script + if(work.dir != work.dir.orig) work.dir <- work.dir.orig # ripristinate original values of this script + + # load impact data only if it is available, if not it will leave an empty space in the composition: + if(file.exists(paste0(rean.dir,"/",rean.name,"_",my.period[p],"_",var.name[var.num],".RData"))){ + impact.data <- TRUE + print("Impact data available for reanalysis") + load(file=paste0(rean.dir,"/",rean.name,"_",my.period[p],"_",var.name[var.num],".RData")) + } else { + impact.data <- FALSE + print("Impact data for reanalysis not available") + } + + } + + if(fields.name == forecast.name && n.map < 7){ + load(file=paste0(work.dir,"/",forecast.name,"_",my.period[p],"_leadmonth_",lead.month,"_","psl",".RData")) # load cluster time series and mean psl data + + if(file.exists(paste0(work.dir,"/",forecast.name,"_",my.period[p],"_leadmonth_",lead.month,"_",var.name[var.num],".RData"))){ + impact.data <- TRUE + print("Impact data available for forecasts") + if((composition == "simple" || composition == "impact")) load(file=paste0(work.dir,"/",forecast.name,"_",my.period[p],"_leadmonth_",lead.month,"_",var.name[var.num],".RData")) # load mean var data for impact maps + } else { + impact.data <- FALSE + print("Impact data for forecasts not available") + } + + if(var.num != var.num.orig) var.num <- var.num.orig # ripristinate original values of this script (necessary after loading regime data) + if(fields.name != fields.name.orig) fields.name <- fields.name.orig # ripristinate original values of this script + + } + + if(var.num != var.num.orig) var.num <- var.num.orig # ripristinate original values of this script (necessary after loading regime data) + if(fields.name != fields.name.orig) fields.name <- fields.name.orig # ripristinate original values of this script + if(work.dir != work.dir.orig) work.dir <- work.dir.orig # ripristinate original values of this script + if(p != p.orig) p <- p.orig + + if(fields.name == forecast.name && n.map < 7){ + + # compute the simulated interannual monthly variability: + n.days.month <- dim(my.cluster.array[[p]])[1] # number of days in the forecasted month + + freq.sim1 <- apply(my.cluster.array[[p]] == 1, c(2,3), sum) + freq.sim2 <- apply(my.cluster.array[[p]] == 2, c(2,3), sum) + freq.sim3 <- apply(my.cluster.array[[p]] == 3, c(2,3), sum) + freq.sim4 <- apply(my.cluster.array[[p]] == 4, c(2,3), sum) + + sd.freq.sim1 <- sd(freq.sim1) / n.days.month + sd.freq.sim2 <- sd(freq.sim2) / n.days.month + sd.freq.sim3 <- sd(freq.sim3) / n.days.month + sd.freq.sim4 <- sd(freq.sim4) / n.days.month + + # compute the transition probabilities: + j1 <- j2 <- j3 <- j4 <- 1 + transition1 <- transition2 <- transition3 <- transition4 <- c() + + n.members <- dim(my.cluster.array[[p]])[3] + + for(m in 1:n.members){ + for(y in 1:n.years){ + for (d in 1:(n.days.month-1)){ + + if (my.cluster.array[[p]][d,y,m] == 1 && (my.cluster.array[[p]][d + 1,y,m] != 1)){ + transition1[j1] <- my.cluster.array[[p]][d + 1,y,m] + j1 <- j1 + 1 + } + if (my.cluster.array[[p]][d,y,m] == 2 && (my.cluster.array[[p]][d + 1,y,m] != 2)){ + transition2[j2] <- my.cluster.array[[p]][d + 1,y,m] + j2 <- j2 + 1 + } + if (my.cluster.array[[p]][d,y,m] == 3 && (my.cluster.array[[p]][d + 1,y,m] != 3)){ + transition3[j3] <- my.cluster.array[[p]][d + 1,y,m] + j3 <- j3 +1 + } + if (my.cluster.array[[p]][d,y,m] == 4 && (my.cluster.array[[p]][d + 1,y,m] != 4)){ + transition4[j4] <- my.cluster.array[[p]][d + 1,y,m] + j4 <- j4 +1 + } + + } + } + } + + trans.sim <- matrix(NA,4,4 , dimnames= list(c("cluster1.sim", "cluster2.sim", "cluster3.sim", "cluster4.sim"),c("cluster1.sim","cluster2.sim","cluster3.sim","cluster4.sim"))) + trans.sim[1,2] <- length(which(transition1 == 2)) + trans.sim[1,3] <- length(which(transition1 == 3)) + trans.sim[1,4] <- length(which(transition1 == 4)) + + trans.sim[2,1] <- length(which(transition2 == 1)) + trans.sim[2,3] <- length(which(transition2 == 3)) + trans.sim[2,4] <- length(which(transition2 == 4)) + + trans.sim[3,1] <- length(which(transition3 == 1)) + trans.sim[3,2] <- length(which(transition3 == 2)) + trans.sim[3,4] <- length(which(transition3 == 4)) + + trans.sim[4,1] <- length(which(transition4 == 1)) + trans.sim[4,2] <- length(which(transition4 == 2)) + trans.sim[4,3] <- length(which(transition4 == 3)) + + trans.tot <- apply(trans.sim,1,sum,na.rm=T) + for(i in 1:4) trans.sim[i,] <- trans.sim[i,]/trans.tot[i] + + + # compute cluster persistence: + my.cluster.vector <- as.vector(my.cluster.array[[p]]) # reduce it to a vector with the order: day1month1member1 , day2month1member1, ... , day1month2member1, day2month2member1, ,,, + + sequ1 <- sequ2 <- sequ3 <- sequ4 <- rep(0,10000) + i1 <- i2 <- i3 <- i4 <- 1 + + if(my.cluster.vector[1] != 1) i1 <- 0 + if(my.cluster.vector[1] == 1) sequ1[i1] <- sequ1[i1]+1 + if(my.cluster.vector[1] != 2) i2 <- 0 + if(my.cluster.vector[1] == 2) sequ2[i2] <- sequ2[i2]+1 + if(my.cluster.vector[1] != 3) i3 <- 0 + if(my.cluster.vector[1] == 3) sequ3[i3] <- sequ3[i3]+1 + if(my.cluster.vector[1] != 4) i4 <- 0 + if(my.cluster.vector[1] == 4) sequ4[i4] <- sequ4[i4]+1 + n.dd <- length(my.cluster.vector) + + for(d in 1:(n.dd-1)){ + if(my.cluster.vector[d] != 1 && my.cluster.vector[d+1] == 1) i1 <- i1+1 + if(my.cluster.vector[d] == 1) sequ1[i1] <- sequ1[i1]+1 + + if(my.cluster.vector[d] != 2 && my.cluster.vector[d+1] == 2) i2 <- i2+1 + if(my.cluster.vector[d] == 2) sequ2[i2] <- sequ2[i2]+1 + + if(my.cluster.vector[d] != 3 && my.cluster.vector[d+1] == 3) i3 <- i3+1 + if(my.cluster.vector[d] == 3) sequ3[i3] <- sequ3[i3]+1 + + if(my.cluster.vector[d] != 4 && my.cluster.vector[d+1] == 4) i4 <- i4+1 + if(my.cluster.vector[d] == 4) sequ4[i4] <- sequ4[i4]+1 + } + + if(my.cluster.vector[n.dd] == 1) sequ1[i1] <- sequ1[i1]+1 + if(my.cluster.vector[n.dd] == 2) sequ2[i2] <- sequ2[i2]+1 + if(my.cluster.vector[n.dd] == 3) sequ3[i3] <- sequ3[i3]+1 + if(my.cluster.vector[n.dd] == 4) sequ4[i4] <- sequ4[i4]+1 + + pers1 <- mean(sequ1[which(sequ1 != 0)]) + pers2 <- mean(sequ2[which(sequ2 != 0)]) + pers3 <- mean(sequ3[which(sequ3 != 0)]) + pers4 <- mean(sequ4[which(sequ4 != 0)]) + + # compute correlations between simulated clusters and observed regime's anomalies: + cluster1.sim <- pslwr1mean; cluster2.sim <- pslwr2mean; cluster3.sim <- pslwr3mean; cluster4.sim <- pslwr4mean + + if(composition == 'impact' && impact.data == TRUE) { + cluster1.sim.imp <- varwr1mean; cluster2.sim.imp <- varwr2mean; cluster3.sim.imp <- varwr3mean; cluster4.sim.imp <- varwr4mean + #cluster1.sim.imp.pv <- pvalue1; cluster2.sim.imp.pv <- pvalue2; cluster3.sim.imp.pv <- pvalue3; cluster4.sim.imp.pv <- pvalue4 + } + + lat.forecast <- lat; lon.forecast <- lon + + if((p + lead.month) > 12) { load.month <- p + lead.month - 12 } else { load.month <- p + lead.month } # reanalysis forecast month to load + load(file=paste0(rean.dir,"/",rean.name,"_",my.period[load.month],"_","psl",".RData")) # load reanalysis data, which overwrities the pslwrXmean variables + load(paste0(rean.dir,"/",rean.name,"_", my.period[load.month],"_","ClusterNames",".RData")) # load also reanalysis regime names + + # load reanalysis varwrXmean impact data: + if(composition == 'impact' && impact.data == TRUE) load(file=paste0(rean.dir,"/",rean.name,"_",my.period[load.month],"_",var.name[var.num],".RData")) + + if(var.num != var.num.orig) var.num <- var.num.orig # ripristinate original values of this script + if(fields.name != fields.name.orig) fields.name <- fields.name.orig # ripristinate original values of this script + if(!identical(period.orig,period)) period <- period.orig + if(work.dir != work.dir.orig) work.dir <- work.dir.orig # ripristinate original values of this script + if(p.orig != p) p <- p.orig + + # compute the observed transition probabilities: + n.days.month <- n.days.in.a.period(load.month,2001) + j1o <- j2o <- j3o <- j4o <- 1; transition.obs1 <- transition.obs2 <- transition.obs3 <- transition.obs4 <- c() + + for (d in 1:(length(my.cluster$cluster)-1)){ + if (my.cluster$cluster[d] == 1 && (my.cluster$cluster[d + 1] != my.cluster$cluster[d])){ + transition.obs1[j1o] <- my.cluster$cluster[d + 1] + j1o <- j1o + 1 + } + if (my.cluster$cluster[d] == 2 && (my.cluster$cluster[d + 1] != my.cluster$cluster[d])){ + transition.obs2[j2o] <- my.cluster$cluster[d + 1] + j2o <- j2o + 1 + } + if (my.cluster$cluster[d] == 3 && (my.cluster$cluster[d + 1] != my.cluster$cluster[d])){ + transition.obs3[j3o] <- my.cluster$cluster[d + 1] + j3o <- j3o + 1 + } + if (my.cluster$cluster[d] == 4 && (my.cluster$cluster[d + 1] != my.cluster$cluster[d])){ + transition.obs4[j4o] <- my.cluster$cluster[d + 1] + j4o <- j4o +1 + } + + } + + trans.obs <- matrix(NA,4,4 , dimnames= list(c("cluster1.obs", "cluster2.obs", "cluster3.obs", "cluster4.obs"),c("cluster1.obs","cluster2.obs","cluster3.obs","cluster4.obs"))) + trans.obs[1,2] <- length(which(transition.obs1 == 2)) + trans.obs[1,3] <- length(which(transition.obs1 == 3)) + trans.obs[1,4] <- length(which(transition.obs1 == 4)) + + trans.obs[2,1] <- length(which(transition.obs2 == 1)) + trans.obs[2,3] <- length(which(transition.obs2 == 3)) + trans.obs[2,4] <- length(which(transition.obs2 == 4)) + + trans.obs[3,1] <- length(which(transition.obs3 == 1)) + trans.obs[3,2] <- length(which(transition.obs3 == 2)) + trans.obs[3,4] <- length(which(transition.obs3 == 4)) + + trans.obs[4,1] <- length(which(transition.obs4 == 1)) + trans.obs[4,2] <- length(which(transition.obs4 == 2)) + trans.obs[4,3] <- length(which(transition.obs4 == 3)) + + trans.tot.obs <- apply(trans.obs,1,sum,na.rm=T) + for(i in 1:4) trans.obs[i,] <- trans.obs[i,]/trans.tot.obs[i] + + # compute the observed interannual monthly variability: + freq.obs1 <- freq.obs2 <- freq.obs3 <- freq.obs4 <- c() + + for(y in year.start:year.end){ + # for both reanalysis and S4, the time order is always: year1day1, year1day2, ..., year1day31, year2day1, year2day2, ..., year2day28, etc, + freq.obs1[y-year.start+1] <- length(which(my.cluster$cluster[(y-year.start)*n.days.month + (1:n.days.month)] == 1)) + freq.obs2[y-year.start+1] <- length(which(my.cluster$cluster[(y-year.start)*n.days.month + (1:n.days.month)] == 2)) + freq.obs3[y-year.start+1] <- length(which(my.cluster$cluster[(y-year.start)*n.days.month + (1:n.days.month)] == 3)) + freq.obs4[y-year.start+1] <- length(which(my.cluster$cluster[(y-year.start)*n.days.month + (1:n.days.month)] == 4)) + } + + sd.freq.obs1 <- sd(freq.obs1) / n.days.month + sd.freq.obs2 <- sd(freq.obs2) / n.days.month + sd.freq.obs3 <- sd(freq.obs3) / n.days.month + sd.freq.obs4 <- sd(freq.obs4) / n.days.month + + wr1y.obs <- wr1y; wr2y.obs <- wr2y; wr3y.obs <- wr3y; wr4y.obs <- wr4y # observed frecuencies of the regimes + + regimes.obs <- c(cluster1.name, cluster2.name, cluster3.name, cluster4.name) + + if(length(unique(regimes.obs)) < 4) stop("Two or more names of the weather types in the reanalsyis input file are repeated!") + + if(identical(rev(lat),lat.forecast)) {lat.forecast <- rev(lat.forecast); print("Warning: forecast latitudes are in the opposite order than renalysis's")} + if(!identical(lat, lat.forecast)) stop("forecast latitudes are different from reanalysis latitudes!") + if(!identical(lon, lon.forecast)) stop("forecast longitude are different from reanalysis longitudes!") + + cluster.corr <- array(NA, c(4,4), dimnames=list(c("cluster1.sim","cluster2.sim","cluster3.sim","cluster4.sim"), regimes.obs)) + cluster.corr[1,1] <- cor(as.vector(cluster1.sim), as.vector(pslwr1mean)) + cluster.corr[1,2] <- cor(as.vector(cluster1.sim), as.vector(pslwr2mean)) + cluster.corr[1,3] <- cor(as.vector(cluster1.sim), as.vector(pslwr3mean)) + cluster.corr[1,4] <- cor(as.vector(cluster1.sim), as.vector(pslwr4mean)) + cluster.corr[2,1] <- cor(as.vector(cluster2.sim), as.vector(pslwr1mean)) + cluster.corr[2,2] <- cor(as.vector(cluster2.sim), as.vector(pslwr2mean)) + cluster.corr[2,3] <- cor(as.vector(cluster2.sim), as.vector(pslwr3mean)) + cluster.corr[2,4] <- cor(as.vector(cluster2.sim), as.vector(pslwr4mean)) + cluster.corr[3,1] <- cor(as.vector(cluster3.sim), as.vector(pslwr1mean)) + cluster.corr[3,2] <- cor(as.vector(cluster3.sim), as.vector(pslwr2mean)) + cluster.corr[3,3] <- cor(as.vector(cluster3.sim), as.vector(pslwr3mean)) + cluster.corr[3,4] <- cor(as.vector(cluster3.sim), as.vector(pslwr4mean)) + cluster.corr[4,1] <- cor(as.vector(cluster4.sim), as.vector(pslwr1mean)) + cluster.corr[4,2] <- cor(as.vector(cluster4.sim), as.vector(pslwr2mean)) + cluster.corr[4,3] <- cor(as.vector(cluster4.sim), as.vector(pslwr3mean)) + cluster.corr[4,4] <- cor(as.vector(cluster4.sim), as.vector(pslwr4mean)) + + # associate each simulated cluster to one observed regime: + max1 <- which(cluster.corr == max(cluster.corr), arr.ind=T) + cluster.corr2 <- cluster.corr + cluster.corr2[max1[1],] <- -999 # set this row to negative values so it won't be found as a maximum anymore + cluster.corr2[,max1[2]] <- -999 # set this column to negative values so it won't be found as a maximum anymore + max2 <- which(cluster.corr2 == max(cluster.corr2), arr.ind=T) + cluster.corr3 <- cluster.corr2 + cluster.corr3[max2[1],] <- -999 + cluster.corr3[,max2[2]] <- -999 + max3 <- which(cluster.corr3 == max(cluster.corr3), arr.ind=T) + cluster.corr4 <- cluster.corr3 + cluster.corr4[max3[1],] <- -999 + cluster.corr4[,max3[2]] <- -999 + max4 <- which(cluster.corr4 == max(cluster.corr4), arr.ind=T) + + seq.max1 <- c(max1[1],max2[1],max3[1],max4[1]) + seq.max2 <- c(max1[2],max2[2],max3[2],max4[2]) + + cluster1.regime <- seq.max2[which(seq.max1 == 1)] + cluster2.regime <- seq.max2[which(seq.max1 == 2)] + cluster3.regime <- seq.max2[which(seq.max1 == 3)] + cluster4.regime <- seq.max2[which(seq.max1 == 4)] + + cluster1.name <- regimes.obs[cluster1.regime] + cluster2.name <- regimes.obs[cluster2.regime] + cluster3.name <- regimes.obs[cluster3.regime] + cluster4.name <- regimes.obs[cluster4.regime] + + regimes.sim <- c(cluster1.name, cluster2.name, cluster3.name, cluster4.name) + cluster.corr2 <- array(cluster.corr, c(4,4), dimnames=list(regimes.sim, regimes.obs)) + + spat.cor1 <- cluster.corr[1,seq.max2[which(seq.max1 == 1)]] + spat.cor2 <- cluster.corr[2,seq.max2[which(seq.max1 == 2)]] + spat.cor3 <- cluster.corr[3,seq.max2[which(seq.max1 == 3)]] + spat.cor4 <- cluster.corr[4,seq.max2[which(seq.max1 == 4)]] + + # measure persistence for the reanalysis dataset: + my.cluster.vector <- my.cluster[[1]] + + sequ1 <- sequ2 <- sequ3 <- sequ4 <- rep(0,10000) + i1 <- i2 <- i3 <- i4 <- 1 + + if(my.cluster.vector[1] != 1) i1 <- 0 + if(my.cluster.vector[1] == 1) sequ1[i1] <- sequ1[i1]+1 + if(my.cluster.vector[1] != 2) i2 <- 0 + if(my.cluster.vector[1] == 2) sequ2[i2] <- sequ2[i2]+1 + if(my.cluster.vector[1] != 3) i3 <- 0 + if(my.cluster.vector[1] == 3) sequ3[i3] <- sequ3[i3]+1 + if(my.cluster.vector[1] != 4) i4 <- 0 + if(my.cluster.vector[1] == 4) sequ4[i4] <- sequ4[i4]+1 + + n.dd <- length(my.cluster.vector) + for(d in 1:(n.dd-1)){ + if(my.cluster.vector[d] != 1 && my.cluster.vector[d+1] == 1) i1 <- i1+1 + if(my.cluster.vector[d] == 1) sequ1[i1] <- sequ1[i1]+1 + + if(my.cluster.vector[d] != 2 && my.cluster.vector[d+1] == 2) i2 <- i2+1 + if(my.cluster.vector[d] == 2) sequ2[i2] <- sequ2[i2]+1 + + if(my.cluster.vector[d] != 3 && my.cluster.vector[d+1] == 3) i3 <- i3+1 + if(my.cluster.vector[d] == 3) sequ3[i3] <- sequ3[i3]+1 + + if(my.cluster.vector[d] != 4 && my.cluster.vector[d+1] == 4) i4 <- i4+1 + if(my.cluster.vector[d] == 4) sequ4[i4] <- sequ4[i4]+1 + } + + if(my.cluster.vector[n.dd] == 1) sequ1[i1] <- sequ1[i1]+1 + if(my.cluster.vector[n.dd] == 2) sequ2[i2] <- sequ2[i2]+1 + if(my.cluster.vector[n.dd] == 3) sequ3[i3] <- sequ3[i3]+1 + if(my.cluster.vector[n.dd] == 4) sequ4[i4] <- sequ4[i4]+1 + + persObs1 <- mean(sequ1[which(sequ1 != 0)]) + persObs2 <- mean(sequ2[which(sequ2 != 0)]) + persObs3 <- mean(sequ3[which(sequ3 != 0)]) + persObs4 <- mean(sequ4[which(sequ4 != 0)]) + + ### insert here all the manual corrections to the regime assignation due to misasignment in the automatic selection: + ### forecast month: September + #if(p == 9 && lead.month == 0) { cluster1.name <- "Blocking"; cluster2.name <- "Atl.Ridge"; cluster3.name <- "NAO-"; cluster4.name <- "NAO+"} + #if(p == 8 && lead.month == 1) { cluster1.name <- "NAO+"; cluster2.name <- "NAO-"; cluster3.name <- "Blocking"; cluster4.name <- "Atl.Ridge"} + #if(p == 6 && lead.month == 3) { cluster1.name <- "Atl.Ridge"; cluster2.name <- "NAO+"} + #if(p == 4 && lead.month == 5) { cluster1.name <- "Blocking"; cluster2.name <- "NAO+"; cluster3.name <- "Atl.Ridge"; cluster4.name <- "NAO-"} + + if(p == 9 && lead.month == 0) { cluster1.name <- "Blocking"; cluster2.name <- "Atl.Ridge"; cluster3.name <- "NAO-"; cluster4.name <- "NAO+" } + if(p == 8 && lead.month == 1) { cluster1.name <- "NAO+"; cluster2.name <- "NAO-"; cluster3.name <- "Blocking"; cluster4.name <- "Atl.Ridge" } + if(p == 7 && lead.month == 2) { cluster1.name <- "Atl.Ridge"; cluster2.name <- "Blocking"; cluster3.name <- "NAO-"; cluster4.name <- "NAO+" } + if(p == 6 && lead.month == 3) { cluster1.name <- "Atl.Ridge"; cluster2.name <- "NAO-"; cluster3.name <- "NAO+"; cluster4.name <- "Blocking" } + if(p == 5 && lead.month == 4) { cluster1.name <- "Atl.Ridge"; cluster2.name <- "NAO+"; cluster3.name <- "NAO-"; cluster4.name <- "Blocking" } + if(p == 4 && lead.month == 5) { cluster1.name <- "Blocking"; cluster2.name <- "NAO+"; cluster3.name <- "Atl.Ridge"; cluster4.name <- "NAO-" } + if(p == 3 && lead.month == 6) { cluster2.name <- "NAO-"; cluster3.name <- "Atl.Ridge"} # cluster1.name <- "Blocking"; cluster4.name <- "NAO+" + + # regimes.sim # for the debug + + ### forecast month: October + #if(p == 4 && lead.month == 6) { cluster1.name <- "Atl.Ridge"; cluster2.name <- "Blocking"; cluster3.name <- "NAO+"; cluster4.name <- "NAO-"} + #if(p == 5 && lead.month == 5) { cluster1.name <- "NAO+"; cluster2.name <- "Blocking"; cluster3.name <- "NAO-"; cluster4.name <- "Atl.Ridge"} + #if(p == 7 && lead.month == 3) { cluster1.name <- "NAO-"; cluster2.name <- "Atl.Ridge"; cluster3.name <- "Blocking"; cluster4.name <- "NAO+"} + #if(p == 8 && lead.month == 2) { cluster1.name <- "Blocking"; cluster2.name <- "NAO-"; cluster3.name <- "Atl.Ridge"; cluster4.name <- "NAO+"} + #if(p == 9 && lead.month == 1) { cluster1.name <- "NAO-"; cluster2.name <- "Blocking"; cluster3.name <- "Atl.Ridge"; cluster4.name <- "NAO+"} + + if(p == 10 && lead.month == 0) { cluster1.name <- "Blocking"; cluster2.name <- "NAO+"; cluster3.name <- "Atl.Ridge"; cluster4.name <- "NAO-"} + if(p == 9 && lead.month == 1) { cluster1.name <- "NAO-"; cluster2.name <- "Blocking"; cluster3.name <- "Atl.Ridge"; cluster4.name <- "NAO+"} + if(p == 8 && lead.month == 2) { cluster1.name <- "Blocking"; cluster2.name <- "NAO-"; cluster3.name <- "Atl.Ridge"; cluster4.name <- "NAO+"} + if(p == 7 && lead.month == 3) { cluster1.name <- "NAO-"; cluster2.name <- "Atl.Ridge"; cluster3.name <- "Blocking"; cluster4.name <- "NAO+"} + if(p == 6 && lead.month == 4) { cluster1.name <- "NAO+"; cluster2.name <- "Blocking"; cluster3.name <- "NAO-"; cluster4.name <- "Atl.Ridge"} + + ### forecast month: November + #if(p == 6 && lead.month == 5) { cluster2.name <- "Atl.Ridge"; cluster3.name <- "NAO+"; cluster4.name <- "Blocking"} + #if(p == 7 && lead.month == 4) { cluster1.name <- "NAO+"; cluster2.name <- "Blocking"; cluster4.name <- "Atl.Ridge"} + #if(p == 8 && lead.month == 3) { cluster2.name <- "Blocking"; cluster4.name <- "Atl.Ridge"} + #if(p == 9 && lead.month == 2) { cluster2.name <- "Atl.Ridge"; cluster3.name <- "NAO+"; cluster4.name <- "Blocking"} + #if(p == 10 && lead.month == 1) { cluster2.name <- "Blocking"; cluster4.name <- "Atl.Ridge"} + + regimes.sim2 <- c(cluster1.name, cluster2.name, cluster3.name, cluster4.name) # Simulated regimes after the manual correction + #regimes.obs <- c(cluster1.name, cluster2.name, cluster3.name, cluster4.name) # already defined above, included only as reference + + order.sim <- match(regimes.sim2, orden) + order.obs <- match(regimes.obs, orden) + + clusters.sim <- list(cluster1.sim, cluster2.sim, cluster3.sim, cluster4.sim) + clusters.obs <- list(pslwr1mean, pslwr2mean, pslwr3mean, pslwr4mean) + + # recompute the spatial correlations, because they might have changed because of the manual corrections above: + spat.cor1 <- cor(as.vector(clusters.sim[[which(order.sim == 1)]]), as.vector(clusters.obs[[which(order.obs == 1)]])) + spat.cor2 <- cor(as.vector(clusters.sim[[which(order.sim == 2)]]), as.vector(clusters.obs[[which(order.obs == 2)]])) + spat.cor3 <- cor(as.vector(clusters.sim[[which(order.sim == 3)]]), as.vector(clusters.obs[[which(order.obs == 3)]])) + spat.cor4 <- cor(as.vector(clusters.sim[[which(order.sim == 4)]]), as.vector(clusters.obs[[which(order.obs == 4)]])) + + if(composition == 'impact' && impact.data == TRUE) { + clusters.sim.imp <- list(cluster1.sim.imp, cluster2.sim.imp, cluster3.sim.imp, cluster4.sim.imp) + #clusters.sim.imp.pv <- list(cluster1.sim.imp.pv, cluster2.sim.imp.pv, cluster3.sim.imp.pv, cluster4.sim.imp.pv) + + clusters.obs.imp <- list(varwr1mean, varwr2mean, varwr3mean, varwr4mean) + #clusters.obs.imp.pv <- list(pvalue1, pvalue2, pvalue3, pvalue4) + + cor.imp1 <- cor(as.vector(clusters.sim.imp[[which(order.sim == 1)]]), as.vector(clusters.obs.imp[[which(order.obs == 1)]])) + cor.imp2 <- cor(as.vector(clusters.sim.imp[[which(order.sim == 2)]]), as.vector(clusters.obs.imp[[which(order.obs == 2)]])) + cor.imp3 <- cor(as.vector(clusters.sim.imp[[which(order.sim == 3)]]), as.vector(clusters.obs.imp[[which(order.obs == 3)]])) + cor.imp4 <- cor(as.vector(clusters.sim.imp[[which(order.sim == 4)]]), as.vector(clusters.obs.imp[[which(order.obs == 4)]])) + + } + + load(file=paste0(work.dir,"/",fields.name,"_",my.period[p],"_leadmonth_",lead.month,"_","psl",".RData")) # reload forecast cluster time series and mean psl data + load(file=paste0(work.dir,"/",fields.name,"_",my.period[p],"_leadmonth_",lead.month,"_",var.name[var.num],".RData")) # reload mean var data for impact maps + + if(var.num != var.num.orig) var.num <- var.num.orig # ripristinate original values of this script + if(fields.name != fields.name.orig) fields.name <- fields.name.orig # ripristinate original values of this script + if(!identical(period.orig, period)) period <- period.orig + if(p.orig != p) p <- p.orig + if(identical(rev(lat),lat.forecast)) lat <- rev(lat) # ripristinate right lat order + if(work.dir != work.dir.orig) work.dir <- work.dir.orig # ripristinate original values of this script + + } # close for on 'forecast.name' + + if(fields.name == rean.name || (fields.name == forecast.name && n.map == 7)){ + if(save.names){ + save(cluster1.name, cluster2.name, cluster3.name, cluster4.name, file=paste0(rean.dir,"/",fields.name,"_", my.period[p],"_","ClusterNames",".RData")) + + } else { # in this case, we load the cluster names from the file already saved, if regimes come from a reanalysis: + load(paste0(rean.dir,"/",rean.name,"_", my.period[p],"_","ClusterNames",".RData")) + + if(var.num != var.num.orig) var.num <- var.num.orig # ripristinate original values of this script + if(fields.name != fields.name.orig) fields.name <- fields.name.orig # ripristinate original values of this script + } + } + + # assign to each cluster its regime: + #if(ordering == FALSE && (fields.name == rean.name || (fields.name == forecast.name && n.map == 7))){ + if(ordering == FALSE){ + cluster1 <- 1; cluster2 <- 2; cluster3 <- 3; cluster4 <- 4 + } else { + cluster1 <- which(orden == cluster1.name) + cluster2 <- which(orden == cluster2.name) + cluster3 <- which(orden == cluster3.name) + cluster4 <- which(orden == cluster4.name) + } + + assign(paste0("map",cluster1), pslwr1mean) + assign(paste0("map",cluster2), pslwr2mean) + assign(paste0("map",cluster3), pslwr3mean) + assign(paste0("map",cluster4), pslwr4mean) + + assign(paste0("fre",cluster1), wr1y) + assign(paste0("fre",cluster2), wr2y) + assign(paste0("fre",cluster3), wr3y) + assign(paste0("fre",cluster4), wr4y) + + #assign(paste0("withinss",cluster1), my.cluster[[p]]$withinss[1]/my.cluster[[p]]$size[1]) + #assign(paste0("withinss",cluster2), my.cluster[[p]]$withinss[2]/my.cluster[[p]]$size[2]) + #assign(paste0("withinss",cluster3), my.cluster[[p]]$withinss[3]/my.cluster[[p]]$size[3]) + #assign(paste0("withinss",cluster4), my.cluster[[p]]$withinss[4]/my.cluster[[p]]$size[4]) + + if(impact.data == TRUE && (composition == "simple" || composition == "single.impact" || composition == 'impact' || composition == 'edpr')){ + assign(paste0("imp",cluster1), varwr1mean) + assign(paste0("imp",cluster2), varwr2mean) + assign(paste0("imp",cluster3), varwr3mean) + assign(paste0("imp",cluster4), varwr4mean) + + assign(paste0("sig",cluster1), pvalue1) + assign(paste0("sig",cluster2), pvalue2) + assign(paste0("sig",cluster3), pvalue3) + assign(paste0("sig",cluster4), pvalue4) + } + + if(fields.name == forecast.name && n.map < 7){ + assign(paste0("freMax",cluster1), wr1yMax) + assign(paste0("freMax",cluster2), wr2yMax) + assign(paste0("freMax",cluster3), wr3yMax) + assign(paste0("freMax",cluster4), wr4yMax) + + assign(paste0("freMin",cluster1), wr1yMin) + assign(paste0("freMin",cluster2), wr2yMin) + assign(paste0("freMin",cluster3), wr3yMin) + assign(paste0("freMin",cluster4), wr4yMin) + + #assign(paste0("spatial.cor",cluster1), spat.cor1) + #assign(paste0("spatial.cor",cluster2), spat.cor2) + #assign(paste0("spatial.cor",cluster3), spat.cor3) + #assign(paste0("spatial.cor",cluster4), spat.cor4) + + assign(paste0("persist",cluster1), pers1) + assign(paste0("persist",cluster2), pers2) + assign(paste0("persist",cluster3), pers3) + assign(paste0("persist",cluster4), pers4) + + clusters.all <- c(cluster1, cluster2, cluster3, cluster4) + ordered.sequence <- c(which(clusters.all == 1), which(clusters.all == 2), which(clusters.all == 3), which(clusters.all == 4)) + + assign(paste0("transSim",cluster1), unname(trans.sim[1,ordered.sequence])) + assign(paste0("transSim",cluster2), unname(trans.sim[2,ordered.sequence])) + assign(paste0("transSim",cluster3), unname(trans.sim[3,ordered.sequence])) + assign(paste0("transSim",cluster4), unname(trans.sim[4,ordered.sequence])) + + cluster1.obs <- which(orden == regimes.obs[1]) + cluster2.obs <- which(orden == regimes.obs[2]) + cluster3.obs <- which(orden == regimes.obs[3]) + cluster4.obs <- which(orden == regimes.obs[4]) + + clusters.all.obs <- c(cluster1.obs, cluster2.obs, cluster3.obs, cluster4.obs) + ordered.sequence.obs <- c(which(clusters.all.obs == 1), which(clusters.all.obs == 2), which(clusters.all.obs == 3), which(clusters.all.obs == 4)) + + assign(paste0("freObs",cluster1.obs), wr1y.obs) + assign(paste0("freObs",cluster2.obs), wr2y.obs) + assign(paste0("freObs",cluster3.obs), wr3y.obs) + assign(paste0("freObs",cluster4.obs), wr4y.obs) + + assign(paste0("persistObs",cluster1.obs), persObs1) + assign(paste0("persistObs",cluster2.obs), persObs2) + assign(paste0("persistObs",cluster3.obs), persObs3) + assign(paste0("persistObs",cluster4.obs), persObs4) + + assign(paste0("transObs",cluster1.obs), unname(trans.obs[1,ordered.sequence.obs])) + assign(paste0("transObs",cluster2.obs), unname(trans.obs[2,ordered.sequence.obs])) + assign(paste0("transObs",cluster3.obs), unname(trans.obs[3,ordered.sequence.obs])) + assign(paste0("transObs",cluster4.obs), unname(trans.obs[4,ordered.sequence.obs])) + + } + + # position of long values of Europe only (without the Atlantic Sea and America): + #if(fields.name == "ERA-Interim") EU <- c(1:which(lon >= lon.max)[1], (length(lon)-30):length(lon)) # restrict area to continental europe only + EU <- c(1:which(lon >= lon.max)[1], (which(lon >= 337)[1]:length(lon))) # restrict area to continental europe only + + # breaks and colors of the geopotential fields: + if(psl == "g500"){ + #my.brks <- c(-300,-199:200,300) # % Mean anomaly of a WR + my.brks <- c(-300,seq(-200,200,10),300) # % Mean anomaly of a WR + my.brks2 <- c(-300,seq(-200,200,10),300) # % Mean anomaly of a WR for the contour lines + #my.cols <- colorRampPalette((brewer.pal(9,"PuBu")))(length(my.brks)-1) + values.to.plot <- c(-200, -100, 0, 100, 200) # values we want to show in the labels + } + + if(psl == "psl"){ + my.brks <- c(seq(-19,-1,2),0,seq(1,19,2)) # % Mean anomaly of a WR + # my.brks <- c(seq(-19,-1,2),0,seq(1,19,2)) # % Mean anomaly of a WR + + my.brks2 <- my.brks #c(seq(-20,20,2)) # % Mean anomaly of a WR for the contour lines + values.to.plot <- my.brks #c(-17,-15,-13,-11,-9,-7,-5,-3,-1,0,1,3,5,7,9,11,13,15,17) + } + + my.cols <- colorRampPalette(rev(brewer.pal(11,"RdBu")))(length(my.brks)-1) # blue--white--red colors + my.cols[floor(length(my.cols)/2)] <- my.cols[floor(length(my.cols)/2)+1] <- "white" + + # breaks and colors of the impact maps (valid both for Wind speed anomalies and Temperature anomalies): + #my.brks.var <- c(-20,seq(-10,-3,1),seq(-2.9,2.9,0.1),seq(3,10,1),20) # % Mean anomaly of a WR + #my.brks.var <- c(-20,-2,-1.5,-1,-0.5,-0.2,0.2,0.5,1,1.5,2,20) # % Mean anomaly of a WR + #my.brks.var <- c(-20,seq(-3,3,0.5),20) # % Mean anomaly of a WR + if(var.name[var.num] == "sfcWind") { + my.brks.var <- seq(-3,3,0.5) + values.to.plot2 <- c(-3,-2,-1,0,1,2,3) + } + if(var.name[var.num] == "tas") { + my.brks.var <- seq(-5,5,1) + values.to.plot2 <- my.brks.var #c(-5,-3,-1,0,1,3,5) + } + + #my.cols <- colorRampPalette(as.character(read.csv("/home/Earth/vtorralb/rgbhex.csv",header=F)[,1]))(length(my.brks)-1) # blue--yellow-red colors + my.cols.var <- colorRampPalette(rev(brewer.pal(11,"RdBu")))(length(my.brks.var)-1) # blue--white--red colors + #my.cols.var[floor(length(my.cols.var)/2)] <- my.cols.var[floor(length(my.cols.var)/2)+1] <- "white" + + freq.max=100 + if(fields.name == forecast.name){ + pos.years.present <- match(year.start:year.end, years) + years.missing <- which(is.na(pos.years.present)) + fre1.NA <- fre2.NA <- fre3.NA <- fre4.NA <- freMax1.NA <- freMax2.NA <- freMax3.NA <- freMax4.NA <- freMin1.NA <- freMin2.NA <- freMin3.NA <- freMin4.NA <- c() + k <- 0 + for(y in year.start:year.end) { + if(y %in% (years.missing + year.start - 1)) { + fre1.NA <- c(fre1.NA,NA); fre2.NA <- c(fre2.NA,NA); fre3.NA <- c(fre3.NA,NA); fre4.NA <- c(fre4.NA,NA) + freMax1.NA <- c(freMax1.NA,NA); freMax2.NA <- c(freMax2.NA,NA); freMax3.NA <- c(freMax3.NA,NA); freMax4.NA <- c(freMax4.NA,NA) + freMin1.NA <- c(freMin1.NA,NA); freMin2.NA <- c(freMin2.NA,NA); freMin3.NA <- c(freMin3.NA,NA); freMin4.NA <- c(freMin4.NA,NA) + k=k+1 + } else { + fre1.NA <- c(fre1.NA,fre1[y - year.start + 1 - k]); fre2.NA <- c(fre2.NA,fre2[y - year.start + 1 - k]) + fre3.NA <- c(fre3.NA,fre3[y - year.start + 1 - k]); fre4.NA <- c(fre4.NA,fre4[y - year.start + 1 - k]) + freMax1.NA <- c(freMax1.NA,freMax1[y - year.start + 1 - k]); freMax2.NA <- c(freMax2.NA,freMax2[y - year.start + 1 - k]) + freMax3.NA <- c(freMax3.NA,freMax3[y - year.start + 1 - k]); freMax4.NA <- c(freMax4.NA,freMax4[y - year.start + 1 - k]) + freMin1.NA <- c(freMin1.NA,freMin1[y - year.start + 1 - k]); freMin2.NA <- c(freMin2.NA,freMin2[y - year.start + 1 - k]) + freMin3.NA <- c(freMin3.NA,freMin3[y - year.start + 1 - k]); freMin4.NA <- c(freMin4.NA,freMin4[y - year.start + 1 - k]) + } + } + + sp.cor1 <- round(cor(fre1.NA,freObs1, use="complete.obs"),2) + sp.cor2 <- round(cor(fre2.NA,freObs2, use="complete.obs"),2) + sp.cor3 <- round(cor(fre3.NA,freObs3, use="complete.obs"),2) + sp.cor4 <- round(cor(fre4.NA,freObs4, use="complete.obs"),2) + + } else { + fre1.NA <- fre1; fre2.NA <- fre2; fre3.NA <- fre3; fre4.NA <- fre4 + } + + + # Visualize the composition with the 3 graphs for all the 4 regimes: its pressure fields, its impact on var and its frequency series: + if(composition == "simple"){ + + if(!as.pdf && fields.name == rean.name) png(filename=paste0(rean.dir,"/",fields.name,"_",my.period[p],"_",var.name[var.num],".png"),width=3000,height=3700) + if(!as.pdf && fields.name == forecast.name) png(filename=paste0(work.dir,"/",fields.name,"_",my.period[p],"_leadmonth_",lead.month,"_",var.name[var.num],".png"),width=3000,height=3700) + + plot.new() + + # Sheet title: + par(fig=c(0, 1, 0.94, 0.98), new=TRUE) + if(fields.name == forecast.name) {forecast.title <- paste0(" startdate and ",lead.month," forecast month ")} else {forecast.title <- ""} + mtext(paste0(fields.name, " Weather Regimes for ", my.period[p], forecast.title," (",year.start,"-",year.end,")"), cex=5.5, font=2) + + # Centroid maps: + map.xpos <- 0 + map.width <- 0.46 + par(fig=c(map.xpos + 0.003, map.xpos + map.width - 0.003, 0.745, 0.925), new=TRUE) + PlotEquiMap2(rescale(map1,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map1), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, xlabel.dist=1.5, contours.lty="F1FF1F", continents.col="black") + par(fig=c(map.xpos, map.xpos + map.width, 0.51, 0.69), new=TRUE) + PlotEquiMap2(rescale(map2,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map2), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F", continents.col="black") + par(fig=c(map.xpos, map.xpos + map.width, 0.275, 0.455), new=TRUE) + PlotEquiMap2(rescale(map3,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map3), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F", continents.col="black") + par(fig=c(map.xpos, map.xpos + map.width, 0.04, 0.22), new=TRUE) + PlotEquiMap2(rescale(map4,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map4), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F", continents.col="black") + + ## # for the debug: + ## obs <- read.table("/scratch/Earth/ncortesi/RESILIENCE/Regimes/NAO_series.txt") + ## mes <- p + ## fre.obs <- obs$V3[seq(mes,12*35,12)] # get the ovserved freuency of the NAO + ## #plot(1981:2015,fre.obs,type="l") + ## #lines(1981:2015,fre1,type="l",col="red") + ## #lines(1981:2015,fre2,type="l",col="blue") + ## #lines(1981:2015,fre4,type="l",col="green") + ## cor1 <- cor(fre.obs, fre1) + ## cor2 <- cor(fre.obs, fre2) + ## cor3 <- cor(fre.obs, fre3) + ## cor4 <- cor(fre.obs, fre4) + ## round(c(cor1,cor2,cor3,cor4),2) + + # Legend centroid maps: + legend1.xpos <- 0.10 + legend1.width <- 0.30 + legend1.cex <- 2 + my.subset <- match(values.to.plot, my.brks) + par(fig=c(legend1.xpos, legend1.xpos + legend1.width, 0.719, 0.744), new=TRUE) # syntax fig=c(xmin, xmax, ymin, ymax) + ColorBar3(my.brks, cols=my.cols, vert=FALSE, cex=legend1.cex, subset=my.subset) + if(psl=="g500") {mtext(side=4," m", cex=2.5)} else {mtext(side=4," hPa", cex=legend1.cex)} + par(fig=c(legend1.xpos, legend1.xpos + legend1.width, 0.485, 0.508), new=TRUE) + ColorBar3(my.brks, cols=my.cols, vert=FALSE, cex=legend1.cex, subset=my.subset) + if(psl=="g500") {mtext(side=4," m", cex=2.5)} else {mtext(side=4," hPa", cex=legend1.cex)} + par(fig=c(legend1.xpos, legend1.xpos + legend1.width, 0.250, 0.273), new=TRUE) + ColorBar3(my.brks, cols=my.cols, vert=FALSE, cex=legend1.cex, subset=my.subset) + if(psl=="g500") {mtext(side=4," m", cex=2.5)} else {mtext(side=4," hPa", cex=legend1.cex)} + par(fig=c(legend1.xpos, legend1.xpos + legend1.width, 0.015, 0.038), new=TRUE) + ColorBar3(my.brks, cols=my.cols, vert=FALSE, cex=legend1.cex, subset=my.subset) + if(psl=="g500") {mtext(side=4," m", cex=2.5)} else {mtext(side=4," hPa", cex=legend1.cex)} + + # Subtitle centroid maps: + map.title.xpos <- 0.76 + map.title.width <- 0.2 + par(fig=c(map.title.xpos, map.title.xpos + map.title.width, 0.728, 0.729), new=TRUE) + mtext("year", cex=3) + par(fig=c(map.title.xpos, map.title.xpos + map.title.width, 0.494, 0.495), new=TRUE) + mtext("year", cex=3) + par(fig=c(map.title.xpos, map.title.xpos + map.title.width, 0.260, 0.261), new=TRUE) + mtext("year", cex=3) + par(fig=c(map.title.xpos, map.title.xpos + map.title.width, 0.026, 0.027), new=TRUE) + mtext("year", cex=3) + + # Title Centroid Maps: + title1.xpos <- 0.02 + title1.width <- 0.4 + par(fig=c(title1.xpos, title1.xpos + title1.width, 0.9305, 0.9355), new=TRUE) + mtext(paste0(regime1.name,": ", psl.name, " Anomaly"), font=2, cex=4) + par(fig=c(title1.xpos, title1.xpos + title1.width, 0.6955, 0.7005), new=TRUE) + mtext(paste0(regime2.name,": ", psl.name, " Anomaly"), font=2, cex=4) + par(fig=c(title1.xpos, title1.xpos + title1.width, 0.4605, 0.4655), new=TRUE) + mtext(paste0(regime3.name,": ", psl.name, " Anomaly"), font=2, cex=4) + par(fig=c(title1.xpos, title1.xpos + title1.width, 0.2255, 0.2305), new=TRUE) + mtext(paste0(regime4.name,": ", psl.name, " Anomaly"), font=2, cex=4) + + # Impact maps: + if(impact.data == TRUE ){ + impact.xpos <- 0.47 + impact.width <- 0.26 + par(fig=c(impact.xpos, impact.xpos + impact.width, 0.745, 0.925), new=TRUE) + PlotEquiMap2(rescale(imp1[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=t(sig1[,EU] < 0.05), cex.lab=1.5, continents.col="black") + par(fig=c(impact.xpos, impact.xpos + impact.width, 0.51, 0.69), new=TRUE) + PlotEquiMap2(rescale(imp2[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=t(sig2[,EU] < 0.05), cex.lab=1.5, continents.col="black") + par(fig=c(impact.xpos, impact.xpos + impact.width, 0.275, 0.455), new=TRUE) + PlotEquiMap2(rescale(imp3[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=t(sig3[,EU] < 0.05), cex.lab=1.5, continents.col="black") + par(fig=c(impact.xpos, impact.xpos + impact.width, 0.04, 0.22), new=TRUE) + PlotEquiMap2(rescale(imp4[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=t(sig4[,EU] < 0.05), cex.lab=1.5, continents.col="black") + + + # Title impact maps: + title2.xpos <- 0.42 + title2.width <- 0.35 + par(fig=c(title2.xpos, title2.xpos + title2.width, 0.928, 0.933), new=TRUE) + mtext(paste0(regime1.name, ": Impact on ", var.name.full[var.num]), font=2, cex=4) + par(fig=c(title2.xpos, title2.xpos + title2.width, 0.693, 0.698), new=TRUE) + mtext(paste0(regime2.name, ": Impact on ", var.name.full[var.num]), font=2, cex=4) + par(fig=c(title2.xpos, title2.xpos + title2.width, 0.458, 0.463), new=TRUE) + mtext(paste0(regime3.name, ": Impact on ", var.name.full[var.num]), font=2, cex=4) + par(fig=c(title2.xpos, title2.xpos + title2.width, 0.223, 0.227), new=TRUE) + mtext(paste0(regime4.name, ": Impact on ", var.name.full[var.num]), font=2, cex=4) + + # Legend: + legend2.xpos <- 0.48 + legend2.width <- 0.25 + legend2.cex <- 1.8 + + my.subset2 <- match(values.to.plot2, my.brks.var) + par(fig=c(legend2.xpos, legend2.xpos + legend2.width, 0.719 + 0.001, 0.744), new=TRUE) + ColorBar3(my.brks.var, cols=my.cols.var, vert=FALSE, cex=legend2.cex, subset=my.subset2) + mtext(side=4,paste0(" ",var.unit[var.num]), cex=legend2.cex) + par(fig=c(legend2.xpos, legend2.xpos + legend2.width, 0.485, 0.508), new=TRUE) + ColorBar3(my.brks.var, cols=my.cols.var, vert=FALSE, cex=legend2.cex, subset=my.subset2) + mtext(side=4,paste0(" ",var.unit[var.num]), cex=legend2.cex) + par(fig=c(legend2.xpos, legend2.xpos + legend2.width, 0.250, 0.273), new=TRUE) + ColorBar3(my.brks.var, cols=my.cols.var, vert=FALSE, cex=legend2.cex, subset=my.subset2) + mtext(side=4,paste0(" ",var.unit[var.num]), cex=legend2.cex) + par(fig=c(legend2.xpos, legend2.xpos + legend2.width, 0.015, 0.038), new=TRUE) + ColorBar3(my.brks.var, cols=my.cols.var, vert=FALSE, cex=legend2.cex, subset=my.subset2) + mtext(side=4,paste0(" ",var.unit[var.num]), cex=legend2.cex) + + } + + # Frequency plots: + barplot.xpos <- 0.75 + barplot.width <- 0.25 + + par(fig=c(barplot.xpos, barplot.xpos + barplot.width, 0.745, 0.915), new=TRUE) + if(fields.name == rean.name) barplot.freq(100*fre1.NA, year.start, year.end, cex.y=2, cex.x=2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0)) + if(fields.name == forecast.name) barplot.freq.sim2(100*fre1.NA, 100*freMax1.NA, 100*freMin1.NA, 100*freObs1, year.start, year.end, cex.y=2, cex.x=2, freq.min=-2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0), col.line="gray20", cex.r=3, cex.mean=2, cex.obs=2) + + par(fig=c(barplot.xpos, barplot.xpos + barplot.width,0.510,0.680), new=TRUE) + if(fields.name == rean.name) barplot.freq(100*fre2.NA, year.start, year.end, cex.y=2, cex.x=2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0)) + if(fields.name == forecast.name) barplot.freq.sim2(100*fre2.NA, 100*freMax2.NA, 100*freMin2.NA, 100*freObs2, year.start, year.end, cex.y=2, cex.x=2, freq.min=-2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0), col.line="gray20", cex.r=3, cex.mean=2, cex.obs=2) + + par(fig=c(barplot.xpos, barplot.xpos + barplot.width,0.275,0.445), new=TRUE) + if(fields.name == rean.name) barplot.freq(100*fre3.NA, year.start, year.end, cex.y=2, cex.x=2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0)) + if(fields.name == forecast.name) barplot.freq.sim2(100*fre3.NA, 100*freMax3.NA, 100*freMin3.NA, 100*freObs3, year.start, year.end, cex.y=2, cex.x=2, freq.min=-2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0), col.line="gray20", cex.r=3, cex.mean=2, cex.obs=2) + + par(fig=c(barplot.xpos, barplot.xpos + barplot.width,0.040,0.210), new=TRUE) + if(fields.name == rean.name) barplot.freq(100*fre4.NA, year.start, year.end, cex.y=2, cex.x=2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0)) + if(fields.name == forecast.name) barplot.freq.sim2(100*fre4.NA, 100*freMax4.NA, 100*freMin4.NA, 100*freObs4, year.start, year.end, cex.y=2, cex.x=2, freq.min=-2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0), col.line="gray20", cex.r=3, cex.mean=2, cex.obs=2) + + # Title Frequency maps: + title3.xpos <- 0.75 + title3.width <-0.25 + par(fig=c(title3.xpos, title3.xpos + title3.width, 0.928, 0.933), new=TRUE) + mtext(paste0(regime1.name, " Frequency"), font=2, cex=4) # paste0 doesn't work inside an expression! + par(fig=c(title3.xpos, title3.xpos + title3.width, 0.693, 0.698), new=TRUE) + mtext(paste0(regime2.name, " Frequency"), font=2, cex=4) + par(fig=c(title3.xpos, title3.xpos + title3.width, 0.458, 0.463), new=TRUE) + mtext(paste0(regime3.name, " Frequency"), font=2, cex=4) + par(fig=c(title3.xpos, title3.xpos + title3.width, 0.223, 0.228), new=TRUE) + mtext(paste0(regime4.name, " Frequency"), font=2, cex=4) + + # % on Frequency plots: + symbol.xpos <- 0.735 + symbol.width <- 0.01 + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.92, 0.93), new=TRUE) + mtext("%", cex=3.3) + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.59, 0.70), new=TRUE) + mtext("%", cex=3.3) + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.45, 0.46), new=TRUE) + mtext("%", cex=3.3) + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.22, 0.23), new=TRUE) + mtext("%", cex=3.3) + + # mean frequency on Frequency plots: + if(mean.freq == TRUE){ + symbol.xpos <- 0.9 + symbol.width <- 0.1 + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.908, 0.918), new=TRUE) + mtext(bquote(bar(nu) == .(paste0(round(mean(100*fre1.NA),1),"%"))),cex=4.5) + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.673, 0.683), new=TRUE) + mtext(bquote(bar(nu) == .(paste0(round(mean(100*fre2.NA),1),"%"))),cex=4.5) + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.44, 0.45), new=TRUE) + mtext(bquote(bar(nu) == .(paste0(round(mean(100*fre3.NA),1),"%"))),cex=4.5) + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.203, 0.213), new=TRUE) + mtext(bquote(bar(nu) == .(paste0(round(mean(100*fre4.NA),1),"%"))),cex=4.5) + } + + # add corr between obs.time series and ensemble mean time series on Frequency plots: + if(fields.name == forecast.name){ + symbol.xpos <- 0.76 + symbol.width <- 0.1 + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.908, 0.918), new=TRUE) + sp.cor1 <- round(cor(fre1.NA,freObs1, use="complete.obs"),2) + mtext(paste0("r= ",sp.cor1), cex=4.5) + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.673, 0.683), new=TRUE) + sp.cor2 <- round(cor(fre2.NA,freObs2, use="complete.obs"),2) + mtext(paste0("r= ",sp.cor2), cex=4.5) + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.44, 0.45), new=TRUE) + sp.cor3 <- round(cor(fre3.NA,freObs3, use="complete.obs"),2) + mtext(paste0("r= ",sp.cor3), cex=4.5) + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.203, 0.213), new=TRUE) + sp.cor4 <- round(cor(fre4.NA,freObs4, use="complete.obs"),2) + mtext(paste0("r= ",sp.cor4), cex=4.5) + } + + if(!as.pdf) dev.off() # for saving 4 png + + } # close if on: composition == 'simple' + + + if(composition == "edpr"){ + + fileoutput <- paste0(rean.dir,"/",fields.name,"_",my.period[p],"_",var.name[var.num],"_edpr.png") + png(filename=fileoutput,width=3000,height=3700) + + plot.new() + + # Sheet title (it will be inserted later, when converting the image for the catalogue): + #par(fig=c(0, 1, 0.94, 0.98), new=TRUE) + #if(fields.name == forecast.name) {forecast.title <- paste0(" startdate and ",lead.month," forecast month ")} else {forecast.title <- ""} + #mtext(paste0(fields.name, " Weather Regimes for ", my.period[p], forecast.title," (",year.start,"-",year.end,")"), cex=5.5, font=2) + + # Centroid maps: + map.xpos <- 0.27 + map.width <- 0.46 + par(fig=c(map.xpos + 0.003, map.xpos + map.width - 0.003, 0.745, 0.925), new=TRUE) + PlotEquiMap2(rescale(map1,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map1), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, xlabel.dist=1.5, contours.lty="F1FF1F", continents.col="black") + par(fig=c(map.xpos, map.xpos + map.width, 0.51, 0.69), new=TRUE) + PlotEquiMap2(rescale(map2,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map2), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F", continents.col="black") + par(fig=c(map.xpos, map.xpos + map.width, 0.275, 0.455), new=TRUE) + PlotEquiMap2(rescale(map3,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map3), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F", continents.col="black") + par(fig=c(map.xpos, map.xpos + map.width, 0.04, 0.22), new=TRUE) + PlotEquiMap2(rescale(map4,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map4), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F", continents.col="black") + + ## # for the debug: + ## obs <- read.table("/scratch/Earth/ncortesi/RESILIENCE/Regimes/NAO_series.txt") + ## mes <- p + ## fre.obs <- obs$V3[seq(mes,12*35,12)] # get the ovserved freuency of the NAO + ## #plot(1981:2015,fre.obs,type="l") + ## #lines(1981:2015,fre1,type="l",col="red") + ## #lines(1981:2015,fre2,type="l",col="blue") + ## #lines(1981:2015,fre4,type="l",col="green") + ## cor1 <- cor(fre.obs, fre1) + ## cor2 <- cor(fre.obs, fre2) + ## cor3 <- cor(fre.obs, fre3) + ## cor4 <- cor(fre.obs, fre4) + ## round(c(cor1,cor2,cor3,cor4),2) + + # Legend centroid maps: + legend1.xpos <- 0.30 + legend1.width <- 0.40 + legend1.cex <- 2 + my.subset <- match(values.to.plot, my.brks) + par(fig=c(legend1.xpos, legend1.xpos + legend1.width, 0.719, 0.744), new=TRUE) # syntax fig=c(xmin, xmax, ymin, ymax) + ColorBar3(my.brks, cols=my.cols, vert=FALSE, cex=legend1.cex, subset=my.subset) + if(psl=="g500") {mtext(side=4," m", cex=2.5)} else {mtext(side=4," hPa", cex=legend1.cex)} + par(fig=c(legend1.xpos, legend1.xpos + legend1.width, 0.485, 0.508), new=TRUE) + ColorBar3(my.brks, cols=my.cols, vert=FALSE, cex=legend1.cex, subset=my.subset) + if(psl=="g500") {mtext(side=4," m", cex=2.5)} else {mtext(side=4," hPa", cex=legend1.cex)} + par(fig=c(legend1.xpos, legend1.xpos + legend1.width, 0.250, 0.273), new=TRUE) + ColorBar3(my.brks, cols=my.cols, vert=FALSE, cex=legend1.cex, subset=my.subset) + if(psl=="g500") {mtext(side=4," m", cex=2.5)} else {mtext(side=4," hPa", cex=legend1.cex)} + par(fig=c(legend1.xpos, legend1.xpos + legend1.width, 0.015, 0.038), new=TRUE) + ColorBar3(my.brks, cols=my.cols, vert=FALSE, cex=legend1.cex, subset=my.subset) + if(psl=="g500") {mtext(side=4," m", cex=2.5)} else {mtext(side=4," hPa", cex=legend1.cex)} + + # Subtitle centroid maps: + map.title.xpos <- 0.76 + map.title.width <- 0.2 + par(fig=c(map.title.xpos, map.title.xpos + map.title.width, 0.728, 0.729), new=TRUE) + mtext("year", cex=3) + par(fig=c(map.title.xpos, map.title.xpos + map.title.width, 0.494, 0.495), new=TRUE) + mtext("year", cex=3) + par(fig=c(map.title.xpos, map.title.xpos + map.title.width, 0.260, 0.261), new=TRUE) + mtext("year", cex=3) + par(fig=c(map.title.xpos, map.title.xpos + map.title.width, 0.026, 0.027), new=TRUE) + mtext("year", cex=3) + + # Title Centroid Maps: + title1.xpos <- 0.3 + title1.width <- 0.44 + par(fig=c(title1.xpos, title1.xpos + title1.width, 0.9305, 0.9355), new=TRUE) + mtext(paste0(regime1.name," ", psl.name, " anomaly"), font=2, cex=4) + par(fig=c(title1.xpos, title1.xpos + title1.width, 0.6955, 0.7005), new=TRUE) + mtext(paste0(regime2.name," ", psl.name, " anomaly"), font=2, cex=4) + par(fig=c(title1.xpos, title1.xpos + title1.width, 0.4605, 0.4655), new=TRUE) + mtext(paste0(regime3.name," ", psl.name, " anomaly"), font=2, cex=4) + par(fig=c(title1.xpos, title1.xpos + title1.width, 0.2255, 0.2305), new=TRUE) + mtext(paste0(regime4.name," ", psl.name, " anomaly"), font=2, cex=4) + + # Impact maps: + if(impact.data == TRUE ){ + impact.xpos <- 0 + impact.width <- 0.26 + par(fig=c(impact.xpos, impact.xpos + impact.width, 0.745, 0.925), new=TRUE) + PlotEquiMap2(rescale(imp1[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=t(sig1[,EU] < 0.05), cex.lab=1.5, continents.col="black") + par(fig=c(impact.xpos, impact.xpos + impact.width, 0.51, 0.69), new=TRUE) + PlotEquiMap2(rescale(imp2[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=t(sig2[,EU] < 0.05), cex.lab=1.5, continents.col="black") + par(fig=c(impact.xpos, impact.xpos + impact.width, 0.275, 0.455), new=TRUE) + PlotEquiMap2(rescale(imp3[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=t(sig3[,EU] < 0.05), cex.lab=1.5, continents.col="black") + par(fig=c(impact.xpos, impact.xpos + impact.width, 0.04, 0.22), new=TRUE) + PlotEquiMap2(rescale(imp4[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=t(sig4[,EU] < 0.05), cex.lab=1.5, continents.col="black") + + + # Title impact maps: + title2.xpos <- 0 + title2.width <- 0.26 + par(fig=c(title2.xpos, title2.xpos + title2.width, 0.928, 0.933), new=TRUE) + mtext(paste0(regime1.name, " impact on ", var.name.full[var.num]), font=2, cex=4) + par(fig=c(title2.xpos, title2.xpos + title2.width, 0.693, 0.698), new=TRUE) + mtext(paste0(regime2.name, " impact on ", var.name.full[var.num]), font=2, cex=4) + par(fig=c(title2.xpos, title2.xpos + title2.width, 0.458, 0.463), new=TRUE) + mtext(paste0(regime3.name, " impact on ", var.name.full[var.num]), font=2, cex=4) + par(fig=c(title2.xpos, title2.xpos + title2.width, 0.223, 0.227), new=TRUE) + mtext(paste0(regime4.name, " impact on ", var.name.full[var.num]), font=2, cex=4) + + # Legend: + legend2.xpos <- 0.01 + legend2.width <- 0.25 + legend2.cex <- 1.8 + + my.subset2 <- match(values.to.plot2, my.brks.var) + par(fig=c(legend2.xpos, legend2.xpos + legend2.width, 0.719 + 0.001, 0.744), new=TRUE) + ColorBar3(my.brks.var, cols=my.cols.var, vert=FALSE, cex=legend2.cex, subset=my.subset2) + mtext(side=4,paste0(" ",var.unit[var.num]), cex=legend2.cex) + par(fig=c(legend2.xpos, legend2.xpos + legend2.width, 0.485, 0.508), new=TRUE) + ColorBar3(my.brks.var, cols=my.cols.var, vert=FALSE, cex=legend2.cex, subset=my.subset2) + mtext(side=4,paste0(" ",var.unit[var.num]), cex=legend2.cex) + par(fig=c(legend2.xpos, legend2.xpos + legend2.width, 0.250, 0.273), new=TRUE) + ColorBar3(my.brks.var, cols=my.cols.var, vert=FALSE, cex=legend2.cex, subset=my.subset2) + mtext(side=4,paste0(" ",var.unit[var.num]), cex=legend2.cex) + par(fig=c(legend2.xpos, legend2.xpos + legend2.width, 0.015, 0.038), new=TRUE) + ColorBar3(my.brks.var, cols=my.cols.var, vert=FALSE, cex=legend2.cex, subset=my.subset2) + mtext(side=4,paste0(" ",var.unit[var.num]), cex=legend2.cex) + + } + + # Frequency plots: + barplot.xpos <- 0.75 + barplot.width <- 0.25 + + par(fig=c(barplot.xpos, barplot.xpos + barplot.width, 0.745, 0.915), new=TRUE) + if(fields.name == rean.name) barplot.freq(100*fre1.NA, year.start, year.end, cex.y=2, cex.x=2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0)) + if(fields.name == forecast.name) barplot.freq.sim2(100*fre1.NA, 100*freMax1.NA, 100*freMin1.NA, 100*freObs1, year.start, year.end, cex.y=2, cex.x=2, freq.min=-2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0), col.line="gray20", cex.r=3, cex.mean=2, cex.obs=2) + + par(fig=c(barplot.xpos, barplot.xpos + barplot.width,0.510,0.680), new=TRUE) + if(fields.name == rean.name) barplot.freq(100*fre2.NA, year.start, year.end, cex.y=2, cex.x=2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0)) + if(fields.name == forecast.name) barplot.freq.sim2(100*fre2.NA, 100*freMax2.NA, 100*freMin2.NA, 100*freObs2, year.start, year.end, cex.y=2, cex.x=2, freq.min=-2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0), col.line="gray20", cex.r=3, cex.mean=2, cex.obs=2) + + par(fig=c(barplot.xpos, barplot.xpos + barplot.width,0.275,0.445), new=TRUE) + if(fields.name == rean.name) barplot.freq(100*fre3.NA, year.start, year.end, cex.y=2, cex.x=2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0)) + if(fields.name == forecast.name) barplot.freq.sim2(100*fre3.NA, 100*freMax3.NA, 100*freMin3.NA, 100*freObs3, year.start, year.end, cex.y=2, cex.x=2, freq.min=-2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0), col.line="gray20", cex.r=3, cex.mean=2, cex.obs=2) + + par(fig=c(barplot.xpos, barplot.xpos + barplot.width,0.040,0.210), new=TRUE) + if(fields.name == rean.name) barplot.freq(100*fre4.NA, year.start, year.end, cex.y=2, cex.x=2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0)) + if(fields.name == forecast.name) barplot.freq.sim2(100*fre4.NA, 100*freMax4.NA, 100*freMin4.NA, 100*freObs4, year.start, year.end, cex.y=2, cex.x=2, freq.min=-2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0), col.line="gray20", cex.r=3, cex.mean=2, cex.obs=2) + + # Title Frequency maps: + title3.xpos <- 0.75 + title3.width <-0.25 + par(fig=c(title3.xpos, title3.xpos + title3.width, 0.928, 0.933), new=TRUE) + mtext(paste0(regime1.name, " Frequency"), font=2, cex=4) # paste0 doesn't work inside an expression! + par(fig=c(title3.xpos, title3.xpos + title3.width, 0.693, 0.698), new=TRUE) + mtext(paste0(regime2.name, " Frequency"), font=2, cex=4) + par(fig=c(title3.xpos, title3.xpos + title3.width, 0.458, 0.463), new=TRUE) + mtext(paste0(regime3.name, " Frequency"), font=2, cex=4) + par(fig=c(title3.xpos, title3.xpos + title3.width, 0.223, 0.228), new=TRUE) + mtext(paste0(regime4.name, " Frequency"), font=2, cex=4) + + # % on Frequency plots: + symbol.xpos <- 0.735 + symbol.width <- 0.01 + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.91, 0.92), new=TRUE) + mtext("%", cex=3.3) + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.68, 0.69), new=TRUE) + mtext("%", cex=3.3) + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.44, 0.45), new=TRUE) + mtext("%", cex=3.3) + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.21, 0.22), new=TRUE) + mtext("%", cex=3.3) + + # mean frequency on Frequency plots: + if(mean.freq == TRUE){ + symbol.xpos <- 0.83 + symbol.width <- 0.1 + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.908, 0.918), new=TRUE) + mtext(bquote(bar(nu) == .(paste0(round(mean(100*fre1.NA),1),"%"))),cex=3.5) + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.673, 0.683), new=TRUE) + mtext(bquote(bar(nu) == .(paste0(round(mean(100*fre2.NA),1),"%"))),cex=3.5) + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.44, 0.45), new=TRUE) + mtext(bquote(bar(nu) == .(paste0(round(mean(100*fre3.NA),1),"%"))),cex=3.5) + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.203, 0.213), new=TRUE) + mtext(bquote(bar(nu) == .(paste0(round(mean(100*fre4.NA),1),"%"))),cex=3.5) + } + + # add corr between obs.time series and ensemble mean time series on Frequency plots: + if(fields.name == forecast.name){ + symbol.xpos <- 0.76 + symbol.width <- 0.1 + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.908, 0.918), new=TRUE) + sp.cor1 <- round(cor(fre1.NA,freObs1, use="complete.obs"),2) + mtext(paste0("r= ",sp.cor1), cex=4.5) + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.673, 0.683), new=TRUE) + sp.cor2 <- round(cor(fre2.NA,freObs2, use="complete.obs"),2) + mtext(paste0("r= ",sp.cor2), cex=4.5) + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.44, 0.45), new=TRUE) + sp.cor3 <- round(cor(fre3.NA,freObs3, use="complete.obs"),2) + mtext(paste0("r= ",sp.cor3), cex=4.5) + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.203, 0.213), new=TRUE) + sp.cor4 <- round(cor(fre4.NA,freObs4, use="complete.obs"),2) + mtext(paste0("r= ",sp.cor4), cex=4.5) + } + + if(!as.pdf) dev.off() # for saving 4 png + + # same image formatted for the catalogue: + fileoutput2 <- paste0(rean.dir,"/",fields.name,"_",my.period[p],"_",var.name[var.num],"_edpr_fig2cat.png") + + system(paste0("~/scripts/fig2catalog.sh -s 0.8 -r 150 -t '",rean.name," / 10m wind speed and sea level pressure / Monthly anomalies and frequencies \n",month.name[p]," / ",year.start,"-",year.end,"' -c 'Region: North Atlantic (27°N-81°N, 85.5°W-45°E)\nReference dataset: ",rean.name," reanalysis' ", fileoutput," ", fileoutput2)) + + + } # close if on: composition == 'edpr' + + + # Visualize the composition with the regime anomalies for a selected forecasted month: + if(composition == "psl"){ + # Centroid maps: + n.map <- n.map + 1 # n.maps starts from 0 + map.width <- 0.115 + map.xpos <- 0.06 + map.width * (n.map - 1) + map.ypos <- 0.08 + map.height <- 0.19 + map.ysep <- 0.015 + + par(fig=c(map.xpos, map.xpos + map.width, map.ypos + 3*map.height + 3*map.ysep, map.ypos + 4*map.height + 3*map.ysep), new=TRUE) + PlotEquiMap2(rescale(map1,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map1), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, xlabel.dist=1.5, contours.lty="F1FF1F") + par(fig=c(map.xpos, map.xpos + map.width, map.ypos + 2*map.height + 2*map.ysep, map.ypos + 3*map.height + 2*map.ysep), new=TRUE) + PlotEquiMap2(rescale(map2,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map2), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F") + par(fig=c(map.xpos, map.xpos + map.width, map.ypos + map.height + map.ysep, map.ypos + 2*map.height + map.ysep), new=TRUE) + PlotEquiMap2(rescale(map3,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map3), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F") + par(fig=c(map.xpos, map.xpos + map.width, map.ypos, map.ypos + map.height), new=TRUE) + PlotEquiMap2(rescale(map4,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map4), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F") + + # Sheet subtitles: + par(fig=c(map.xpos, map.xpos + map.width, 0.86, 0.90), new=TRUE) + if(n.map < 8) { + mtext(paste0(my.month.short[p], " --> ", my.month.short[forecast.month]), cex=5.5, font=2) + } else{ + mtext(paste0(rean.name," ", my.month.short[forecast.month]), cex=5.5, font=2) + } + + } # close if on composition == 'psl' + + + if(composition == "psl.rean"){ + + # Centroid maps: + n.map <- n.map + 1 # n.maps starts from 0 + map.width <- 0.08 + map.xpos <- 0 + map.width * (n.map - 1) + map.ypos <- 0.08 + map.height <- 0.19 + map.ysep <- 0.015 + + par(fig=c(map.xpos, map.xpos + map.width, map.ypos + 3*map.height + 3*map.ysep, map.ypos + 4*map.height + 3*map.ysep), new=TRUE) + PlotEquiMap2(rescale(map1,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map1), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, xlabel.dist=1.5, contours.lty="F1FF1F") + par(fig=c(map.xpos, map.xpos + map.width, map.ypos + 2*map.height + 2*map.ysep, map.ypos + 3*map.height + 2*map.ysep), new=TRUE) + PlotEquiMap2(rescale(map2,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map2), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F") + par(fig=c(map.xpos, map.xpos + map.width, map.ypos + map.height + map.ysep, map.ypos + 2*map.height + map.ysep), new=TRUE) + PlotEquiMap2(rescale(map3,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map3), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F") + par(fig=c(map.xpos, map.xpos + map.width, map.ypos, map.ypos + map.height), new=TRUE) + PlotEquiMap2(rescale(map4,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map4), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F") + + # Sheet subtitles: + #par(fig=c(map.xpos, map.xpos + map.width, 0.86, 0.90), new=TRUE) + #if(n.map < 8) { + # mtext(paste0(my.month.short[p], " --> ", my.month.short[forecast.month]), cex=5.5, font=2) + #} else{ + # mtext(paste0(rean.name," ", my.month.short[forecast.month]), cex=5.5, font=2) + #} + + } # close if on composition == 'psl.rean' + + # Visualize the composition if the impact maps for a selected forecasted month: + if(composition == "impact"){ + # Centroid maps: + n.map <- n.map + 1 # n.maps starts from 0 + map.width <- 0.115 + map.xpos <- 0.06 + map.width * (n.map - 1) + map.ypos <- 0.08 + map.height <- 0.19 + map.ysep <- 0.015 + + par(fig=c(map.xpos, map.xpos + map.width, map.ypos + 3*map.height + 3*map.ysep, map.ypos + 4*map.height + 3*map.ysep), new=TRUE) + #PlotEquiMap2(rescale(imp1[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=t(sig1[,EU] < 0.05), cex.lab=1.5) + PlotEquiMap2(rescale(imp1[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5) + + par(fig=c(map.xpos, map.xpos + map.width, map.ypos + 2*map.height + 2*map.ysep, map.ypos + 3*map.height + 2*map.ysep), new=TRUE) + #PlotEquiMap2(rescale(imp2[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=t(sig2[,EU] < 0.05), cex.lab=1.5) + PlotEquiMap2(rescale(imp2[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5) + + par(fig=c(map.xpos, map.xpos + map.width, map.ypos + map.height + map.ysep, map.ypos + 2*map.height + map.ysep), new=TRUE) + #PlotEquiMap2(rescale(imp3[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=t(sig3[,EU] < 0.05), cex.lab=1.5) + PlotEquiMap2(rescale(imp3[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5) + + par(fig=c(map.xpos, map.xpos + map.width, map.ypos, map.ypos + map.height), new=TRUE) + #PlotEquiMap2(rescale(imp4[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=t(sig4[,EU] < 0.05), cex.lab=1.5) + PlotEquiMap2(rescale(imp4[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5) + + + # Sheet subtitles: + par(fig=c(map.xpos, map.xpos + map.width, 0.86, 0.90), new=TRUE) + if(n.map < 8) { + mtext(paste0(my.month.short[p], " --> ", my.month.short[forecast.month]), cex=5.5, font=2) + } else{ + mtext(paste0(rean.name," ", my.month.short[forecast.month]), cex=5.5, font=2) + } + + } # close if on composition == 'impact' + + # Visualize the 2x2 composition of the four impact maps for a selected reanalysis or forecasted month: + if(composition == "single.impact"){ + + png(filename=paste0(rean.dir,"/",fields.name,"_",my.period[p],"_",var.name[var.num],"_impact_composition.png"),width=2000,height=2000) + + plot.new() + + par(fig=c(0, 0.5, 0.95, 0.988), new=TRUE) + mtext("NAO+",cex=5) + par(fig=c(0, 0.5, 0.52, 0.95), new=TRUE) + PlotEquiMap(rescale(imp1[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=sig1[,EU] < 0.05, dot_size=2, axes_label_scale=2.5) + + par(fig=c(0.5, 1, 0.93, 0.96), new=TRUE) + mtext("NAO-",cex=5) + par(fig=c(0.5, 1, 0.52, 0.95), new=TRUE) + PlotEquiMap(rescale(imp2[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=sig2[,EU] < 0.05, dot_size=2, axes_label_scale=2.5) + + par(fig=c(0, 0.5, 0.44, 0.47), new=TRUE) + mtext("Blocking",cex=5) + par(fig=c(0, 0.5, 0.08, 0.46), new=TRUE) + PlotEquiMap(rescale(imp3[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=sig3[,EU] < 0.05, dot_size=2, axes_label_scale=2.5) + + par(fig=c(0.5, 1, 0.44, 0.47), new=TRUE) + mtext("Atlantic Ridge",cex=5) + par(fig=c(0.5, 1, 0.08, 0.46), new=TRUE) + PlotEquiMap(rescale(imp4[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=sig4[,EU] < 0.05, dot_size=2, axes_label_scale=2.5) + + par(fig=c(0.03, 0.96, 0.02, 0.08), new=TRUE) + ColorBar2(my.brks.var, cols=my.cols.var, vert=FALSE, cex=3, my.ticks=-0.5 + 1:length(my.brks.var), my.labels=my.brks.var, label.dist=3) + par(fig=c(0.965, 0.99, 0, 0.026), new=TRUE) + mtext("m/s",cex=3) + + dev.off() + + # format it manually from the bash shell (you must run it from your workstation until the identity command of ImageMagick will be loaded un the cluster): + #sh ~/scripts/fig2catalog.sh -x 20 -t 'NCEP / 10-m wind speed / Regime impact \nOctober / 1981-2016' -c 'Region: North Atlantic (27°N-81°N, 85.5°W-45°E)\nReference dataset: NCEP/NCAR reanalysis' NCEP_October_sfcWind_impact_composition.png NCEP_October_sfcWind_impact_composition_catalogue.png + + + ### to plot the impact map of all regimes on a particular month: + #imp.oct <- (imp1*3.2 + imp2*38.7 + imp3*51.6 + imp4*6.4)/100 + year.test <- 2016 + pos.year.test <- year.test - year.start +1 + imp.test <- imp1*fre1.NA[pos.year.test] + imp2*fre2.NA[pos.year.test] + imp3*fre3.NA[pos.year.test] + imp4*fre4.NA[pos.year.test] + #imp.test <- imp1*fre1.NA[pos.year.test] + imp3*(fre3.NA[pos.year.test]+0.032) + imp4*(fre4.NA[pos.year.test]+0.032) + par(fig=c(0, 1, 0.05, 1), new=TRUE) + PlotEquiMap2(rescale(imp.test[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5) + + # vector with the frequency of the WRs in the chosen month and year: + wt.test.freq <- c(fre1.NA[pos.year.test],fre2.NA[pos.year.test],fre3.NA[pos.year.test],fre4.NA[pos.year.test]) + + ## # or save them as individual maps: + ## png(filename=paste0(rean.dir,"/",fields.name,"_",my.period[p],"_",var.name[var.num],"_impact_NAO+.png"),width=3000,height=3700) + ## PlotEquiMap2(rescale(imp1[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=t(sig1[,EU] < 0.05), cex.lab=1.5) + ## dev.off() + + ## png(filename=paste0(rean.dir,"/",fields.name,"_",my.period[p],"_",var.name[var.num],"_impact_NAO-.png"),width=3000,height=3700) + ## PlotEquiMap2(rescale(imp2[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=t(sig2[,EU] < 0.05), cex.lab=1.5) + ## dev.off() + + ## png(filename=paste0(rean.dir,"/",fields.name,"_",my.period[p],"_",var.name[var.num],"_impact_Blocking.png"),width=3000,height=3700) + ## PlotEquiMap2(rescale(imp3[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=t(sig3[,EU] < 0.05), cex.lab=1.5) + ## dev.off() + + ## png(filename=paste0(rean.dir,"/",fields.name,"_",my.period[p],"_",var.name[var.num],"_impact_Atl_Ridge.png"),width=3000,height=3700) + ## PlotEquiMap2(rescale(imp4[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=t(sig4[,EU] < 0.05), cex.lab=1.5) + ## dev.off() + } + + # Visualize the composition if the impact maps for a selected forecasted month: + if(composition == "single.psl"){ + png(filename=paste0(rean.dir,"/",fields.name,"_",my.period[p],"_",var.name[var.num],"_pattern_cluster1.png"),width=2000,height=1400, res=300) + PlotEquiMap2(rescale(map1,my.brks[1],tail(my.brks,1)), lon, lat,filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1, contours=t(map1), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=0.5, xlabel.dist=0, contours.lty="F1FF1F", toptitle=paste0(my.period[p]," WithinSS=", round(withinss1/1000000,2))) + dev.off() + + png(filename=paste0(rean.dir,"/",fields.name,"_",my.period[p],"_",var.name[var.num],"_pattern_cluster2.png"),width=2000,height=1400, res=300) + PlotEquiMap2(rescale(map2,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1, contours=t(map2), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=0.5, xlabel.dist=0, contours.lty="F1FF1F", toptitle=paste0(my.period[p]," WithinSS=", round(withinss2/1000000,2))) + dev.off() + + png(filename=paste0(rean.dir,"/",fields.name,"_",my.period[p],"_",var.name[var.num],"_pattern_cluster3.png"),width=2000,height=1400, res=300) + PlotEquiMap2(rescale(map3,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1, contours=t(map3), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=0.5, xlabel.dist=0, contours.lty="F1FF1F", toptitle=paste0(my.period[p]," WithinSS=", round(withinss3/1000000,2))) + dev.off() + + png(filename=paste0(rean.dir,"/",fields.name,"_",my.period[p],"_",var.name[var.num],"_pattern_cluster4.png"),width=2000,height=1400, res=300) + PlotEquiMap2(rescale(map4,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1, contours=t(map4), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=0.5, xlabel.dist=0, contours.lty="F1FF1F", toptitle=paste0(my.period[p]," WithinSS=", round(withinss4/1000000,2))) + dev.off() + + # Legend centroid maps: + #my.subset <- match(values.to.plot, my.brks) + #ColorBar3(my.brks, cols=my.cols, vert=FALSE, cex=legend1.cex, subset=my.subset) + #mtext(side=4," hPa", cex=legend1.cex) + + } + + # Visualize the composition with the interannual frequencies for a selected forecasted month: + if(composition == "fre"){ + # Centroid maps: + n.map <- n.map + 1 # n.maps starts from 0 + map.width <- 0.115 + map.xpos <- 0.06 + map.width * (n.map - 1) + map.ypos <- 0.08 + map.height <- 0.19 + map.ysep <- 0.015 + + par(fig=c(map.xpos, map.xpos + map.width, map.ypos + 3*map.height + 3*map.ysep, map.ypos + 4*map.height + 3*map.ysep), new=TRUE) + if(n.map < 8) barplot.freq.sim2(100*fre1.NA, 100*freMax1.NA, 100*freMin1.NA, 100*freObs1, year.start, year.end, cex.y=2, cex.x=2, freq.min=-2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0), col.line="gray20", cex.r=3, cex.mean=2, cex.obs=2) + if(n.map == 8) barplot.freq(100*fre1.NA, year.start, year.end, cex.y=2, cex.x=2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0)) + + par(fig=c(map.xpos, map.xpos + map.width, map.ypos + 2*map.height + 2*map.ysep, map.ypos + 3*map.height + 2*map.ysep), new=TRUE) + if(n.map < 8) if(fields.name == forecast.name) barplot.freq.sim2(100*fre2.NA, 100*freMax2.NA, 100*freMin2.NA, 100*freObs2, year.start, year.end, cex.y=2, cex.x=2, freq.min=-2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0), col.line="gray20", cex.r=3, cex.mean=2, cex.obs=2) + if(n.map == 8) barplot.freq(100*fre2.NA, year.start, year.end, cex.y=2, cex.x=2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0)) + + par(fig=c(map.xpos, map.xpos + map.width, map.ypos + map.height + map.ysep, map.ypos + 2*map.height + map.ysep), new=TRUE) + if(n.map < 8) if(fields.name == forecast.name) barplot.freq.sim2(100*fre3.NA, 100*freMax3.NA, 100*freMin3.NA, 100*freObs3, year.start, year.end, cex.y=2, cex.x=2, freq.min=-2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0), col.line="gray20", cex.r=3, cex.mean=2, cex.obs=2) + if(n.map == 8) barplot.freq(100*fre3.NA, year.start, year.end, cex.y=2, cex.x=2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0)) + + par(fig=c(map.xpos, map.xpos + map.width, map.ypos, map.ypos + map.height), new=TRUE) + if(n.map < 8) if(fields.name == forecast.name) barplot.freq.sim2(100*fre4.NA, 100*freMax4.NA, 100*freMin4.NA, 100*freObs4, year.start, year.end, cex.y=2, cex.x=2, freq.min=-2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0), col.line="gray20", cex.r=3, cex.mean=2, cex.obs=2) + if(n.map == 8) barplot.freq(100*fre4.NA, year.start, year.end, cex.y=2, cex.x=2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0)) + + # Sheet subtitles: + par(fig=c(map.xpos, map.xpos + map.width, 0.86, 0.90), new=TRUE) + if(n.map < 8) { + mtext(paste0(my.month.short[p], " --> ", my.month.short[forecast.month]), cex=5.5, font=2) + } else{ + mtext(paste0(rean.name," ", my.month.short[forecast.month]), cex=5.5, font=2) + } + + # % on Frequency plots: + par(fig=c(map.xpos - 0.006, map.xpos, map.ypos + 3.9*map.height + 3*map.ysep, map.ypos + 4*map.height + 3*map.ysep), new=TRUE) + mtext("%", cex=3.3) + par(fig=c(map.xpos - 0.006, map.xpos, map.ypos + 2.9*map.height + 2*map.ysep, map.ypos + 3*map.height + 2*map.ysep), new=TRUE) + mtext("%", cex=3.3) + par(fig=c(map.xpos - 0.006, map.xpos, map.ypos + 1.9*map.height + 1*map.ysep, map.ypos + 2*map.height + 1*map.ysep), new=TRUE) + mtext("%", cex=3.3) + par(fig=c(map.xpos - 0.006, map.xpos, map.ypos + 0.9*map.height + 0*map.ysep, map.ypos + 1*map.height + 0*map.ysep), new=TRUE) + mtext("%", cex=3.3) + + # mean frequency on Frequency plots: + par(fig=c(map.xpos + 0.02, map.xpos + 0.04, map.ypos + 3*map.height + 3*map.ysep, map.ypos + 3.1*map.height + 3*map.ysep), new=TRUE) + mtext(bquote(bar(nu) == .(paste0(round(mean(100*fre1.NA),1),"%"))),cex=3.5) + par(fig=c(map.xpos + 0.02, map.xpos + 0.04, map.ypos + 2*map.height + 2*map.ysep, map.ypos + 2.1*map.height + 2*map.ysep), new=TRUE) + mtext(bquote(bar(nu) == .(paste0(round(mean(100*fre2.NA),1),"%"))),cex=3.5) + par(fig=c(map.xpos + 0.02, map.xpos + 0.04, map.ypos + 1*map.height + 1*map.ysep, map.ypos + 1.1*map.height + 1*map.ysep), new=TRUE) + mtext(bquote(bar(nu) == .(paste0(round(mean(100*fre3.NA),1),"%"))),cex=3.5) + par(fig=c(map.xpos + 0.02, map.xpos + 0.04, map.ypos + 0*map.height + 0*map.ysep, map.ypos + 0.1*map.height + 0*map.ysep), new=TRUE) + mtext(bquote(bar(nu) == .(paste0(round(mean(100*fre4.NA),1),"%"))),cex=3.5) + + # add corr between obs.time series and ensemble mean time series on Frequency plots: + if(n.map < 8){ + par(fig=c(map.xpos + map.width - 0.04, map.xpos + map.width - 0.02, map.ypos + 3*map.height + 3*map.ysep, map.ypos + 3.1*map.height + 3*map.ysep), new=TRUE) + mtext(paste0("r= ",sp.cor1), cex=3.5) + par(fig=c(map.xpos + map.width - 0.04, map.xpos + map.width - 0.02, map.ypos + 2*map.height + 2*map.ysep, map.ypos + 2.1*map.height + 2*map.ysep), new=TRUE) + mtext(paste0("r= ",sp.cor2), cex=3.5) + par(fig=c(map.xpos + map.width - 0.04, map.xpos + map.width - 0.02, map.ypos + 1*map.height + 1*map.ysep, map.ypos + 1.1*map.height + 1*map.ysep), new=TRUE) + mtext(paste0("r= ",sp.cor3), cex=3.5) + par(fig=c(map.xpos + map.width - 0.04, map.xpos + map.width - 0.02, map.ypos + 0*map.height + 0*map.ysep, map.ypos + 0.1*map.height + 0*map.ysep), new=TRUE) + mtext(paste0("r= ",sp.cor4), cex=3.5) + } + } # close if on composition == 'fre' + + # save indicators for each startdate and forecast time: + # (map1, map2, map3, map4, fre1, fre2, etc. already refer to the regimes in the same order as listed in the 'orden' vector) + if(fields.name == forecast.name) { + if (composition != "impact") { + save(orden, map1, map2, map3, map4, fre1.NA, fre2.NA, fre3.NA, fre4.NA, freObs1, freObs2, freObs3, freObs4, sp.cor1, sp.cor2, sp.cor3, sp.cor4, persist1, persist2, persist3, persist4, persistObs1, persistObs2, persistObs3, persistObs4, spat.cor1, spat.cor2, spat.cor3, spat.cor4, sd.freq.sim1, sd.freq.sim2, sd.freq.sim3, sd.freq.sim4, sd.freq.obs1, sd.freq.obs2, sd.freq.obs3, sd.freq.obs4, transSim1, transSim2, transSim3, transSim4, transObs1, transObs2, transObs3, transObs4, file=paste0(work.dir,"/",fields.name,"_", my.period[p],"_leadmonth_",lead.month,"_","ClusterNames",".RData")) + } else { # also save impX objects + save(orden, map1, map2, map3, map4, fre1.NA, fre2.NA, fre3.NA, fre4.NA, freObs1, freObs2, freObs3, freObs4, sp.cor1, sp.cor2, sp.cor3, sp.cor4, persist1, persist2, persist3, persist4, persistObs1, persistObs2, persistObs3, persistObs4, spat.cor1, spat.cor2, spat.cor3, spat.cor4, sd.freq.sim1, sd.freq.sim2, sd.freq.sim3, sd.freq.sim4, sd.freq.obs1, sd.freq.obs2, sd.freq.obs3, sd.freq.obs4, transSim1, transSim2, transSim3, transSim4, transObs1, transObs2, transObs3, transObs4, imp1, imp2, imp3, imp4,cor.imp1,cor.imp2,cor.imp3,cor.imp4, file=paste0(work.dir,"/",fields.name,"_", my.period[p],"_leadmonth_",lead.month,"_","ClusterNames",".RData")) + } + } + + } # close 'p' on 'period' + + if((composition == 'simple' || composition == 'edpr' ) && as.pdf) dev.off() # for saving a single pdf with all months/seasons + + } # close 'lead.month' on 'lead.months' + + if(composition == "psl" || composition == "fre" || composition == "impact"){ + # Regime's names: + title1.xpos <- 0.01 + title1.width <- 0.04 + par(fig=c(title1.xpos, title1.xpos + title1.width, 0.7905, 0.7955), new=TRUE) + mtext(paste0(regime1.name), font=2, cex=5) + par(fig=c(title1.xpos, title1.xpos + title1.width, 0.5855, 0.5905), new=TRUE) + mtext(paste0(regime2.name), font=2, cex=5) + par(fig=c(title1.xpos, title1.xpos + title1.width, 0.3805, 0.3955), new=TRUE) + mtext(paste0(regime3.name), font=2, cex=5) + par(fig=c(title1.xpos, title1.xpos + title1.width, 0.1755, 0.1805), new=TRUE) + mtext(paste0(regime4.name), font=2, cex=5) + + if(composition == "psl") { + # Color legend: + legend1.xpos <- 0.10 + legend1.width <- 0.80 + legend1.cex <- 4 + + par(fig=c(legend1.xpos, legend1.xpos + legend1.width, 0, 0.065), new=TRUE) # syntax fig=c(xmin, xmax, ymin, ymax) + ColorBar2(brks=my.brks, cols=my.cols, vert=FALSE, cex=legend1.cex, label.dist=2, my.ticks=-0.5 + 1:length(my.brks), my.labels=my.brks) + par(fig=c(legend1.xpos + legend1.width - 0.02, legend1.xpos + legend1.width + 0.05, 0, 0.02), new=TRUE) # syntax fig=c(xmin, xmax, ymin, ymax) + mtext("hPa", cex=legend1.cex*1.3) + } + + if(composition == "impact") { + # Color legend: + legend1.xpos <- 0.10 + legend1.width <- 0.80 + legend1.cex <- 4 + + #my.subset2 <- match(values.to.plot2, my.brks.var) + par(fig=c(legend1.xpos, legend1.xpos + legend1.width, 0, 0.065), new=TRUE) + ColorBar2(brks=my.brks.var, cols=my.cols.var, vert=FALSE, cex=legend1.cex, label.dist=2, my.ticks=-0.5 + 1:length(my.brks.var), my.labels=my.brks.var) + par(fig=c(legend1.xpos + legend1.width - 0.02, legend1.xpos + legend1.width + 0.05, 0, 0.02), new=TRUE) # syntax fig=c(xmin, xmax, ymin, ymax) + #ColorBar2(brks=my.brks.var, cols=my.cols.var, vert=FALSE, cex=legend2.cex, subset=my.subset2) + mtext(var.unit[var.num], cex=legend1.cex*1.3) + + } + + + dev.off() + + } # close if on composition + + print("Finished!") +} # close if on composition != "summary" + + + +#} # close for on forecasts.month + + + +if(composition == "taylor"){ + library("plotrix") + + fields.name="ERA-Interim" + + # choose a reference season from 13 (winter) to 16 (Autumn): + season.ref <- 13 + + # set the first WR classification: + rean.dir <- "/scratch/Earth/ncortesi/RESILIENCE/Regimes/41_ERA-Interim_monthly_1981-2015_LOESS_filter" + + # load data of the reference season of the first classification (this line should be modified to load the chosen season): + #load("/scratch/Earth/ncortesi/RESILIENCE/Regimes/18) as 12) but with no lat correction/ERA-Interim_Winter_psl.RData") # load pslwrXmean data + #load("/scratch/Earth/ncortesi/RESILIENCE/Regimes/18) as 12) but with no lat correction/ERA-Interim_Winter_ClusterNames.RData") # load clusterX.name.period + load("/scratch/Earth/ncortesi/RESILIENCE/Regimes/46_as_18_but_with_no_lat_correction_and_with_LOESS/ERA-Interim_Winter_psl.RData") # load pslwrXmean data + load("/scratch/Earth/ncortesi/RESILIENCE/Regimes/46_as_18_but_with_no_lat_correction_and_with_LOESS/ERA-Interim_Winter_ClusterNames.RData") # load clusterX.name.period + + if(var.num != var.num.orig) var.num <- var.num.orig # ripristinate original values of this script (necessary after loading regime data) + if(fields.name != fields.name.orig) fields.name <- fields.name.orig # ripristinate original values of this script + + cluster1.ref <- which(orden == cluster1.name) + cluster2.ref <- which(orden == cluster2.name) + cluster3.ref <- which(orden == cluster3.name) + cluster4.ref <- which(orden == cluster4.name) + + #cluster1.ref <- cluster1.name.period[13] + #cluster2.ref <- cluster2.name.period[13] + #cluster3.ref <- cluster3.name.period[13] + #cluster4.ref <- cluster4.name.period[13] + + cluster1.ref <- 3 # bypass the previous values because for dataset num.46 the blocking and atlantic regimes were saved swapped! + cluster2.ref <- 2 + cluster3.ref <- 1 + cluster4.ref <- 4 + + assign(paste0("map.ref",cluster1.ref), pslwr1mean) # NAO+ + assign(paste0("map.ref",cluster2.ref), pslwr2mean) # NAO- + assign(paste0("map.ref",cluster3.ref), pslwr3mean) # Blocking + assign(paste0("map.ref",cluster4.ref), pslwr4mean) # Atl.ridge + + # set a second WR classification (i.e: with running cluster) to contrast with the new one: + #rean2.dir <- "/scratch/Earth/ncortesi/RESILIENCE/Regimes/41_ERA-Interim_monthly_1981-2015_LOESS_filter" + rean2.dir <- "/scratch/Earth/ncortesi/RESILIENCE/Regimes/44_as_43_but_with_no_lat_correction" + rean2.name <- "ERA-Interim" + + # load data of the reference season of the second classification (this line should be modified to load the chosen season): + load("/scratch/Earth/ncortesi/RESILIENCE/Regimes/46_as_18_but_with_no_lat_correction_and_with_LOESS/ERA-Interim_Winter_psl.RData") # load pslwrXmean data + load("/scratch/Earth/ncortesi/RESILIENCE/Regimes/46_as_18_but_with_no_lat_correction_and_with_LOESS/ERA-Interim_Winter_ClusterNames.RData") # load clusterX.name.period + + if(var.num != var.num.orig) var.num <- var.num.orig # ripristinate original values of this script (necessary after loading regime data) + if(fields.name != fields.name.orig) fields.name <- fields.name.orig # ripristinate original values of this script + + #cluster1.name.ref <- cluster1.name.period[season.ref] + #cluster2.name.ref <- cluster2.name.period[season.ref] + #cluster3.name.ref <- cluster3.name.period[season.ref] + #cluster4.name.ref <- cluster4.name.period[season.ref] + + cluster1.ref <- which(orden == cluster1.name) + cluster2.ref <- which(orden == cluster2.name) + cluster3.ref <- which(orden == cluster3.name) + cluster4.ref <- which(orden == cluster4.name) + + cluster1.ref <- 3 # bypass the previous values because for dataset num.46 the blocking and atlantic regimes were saved swapped! + cluster2.ref <- 2 + cluster3.ref <- 1 + cluster4.ref <- 4 + + assign(paste0("map.old.ref",cluster1.ref), pslwr1mean) # NAO+ + assign(paste0("map.old.ref",cluster2.ref), pslwr2mean) # NAO- + assign(paste0("map.old.ref",cluster3.ref), pslwr3mean) # Blocking + assign(paste0("map.old.ref",cluster4.ref), pslwr4mean) # Atl.ridge + + + # breaks and colors of the geopotential fields: + if(psl == "g500"){ + my.brks <- c(-300,seq(-200,200,10),300) # % Mean anomaly of a WR + my.brks2 <- c(-300,seq(-200,200,10),300) # % Mean anomaly of a WR for the contour lines + values.to.plot <- c(-200, -100, 0, 100, 200) # values we want to show in the labels + } + + if(psl == "psl"){ + my.brks <- c(seq(-19,-1,2),0,seq(1,19,2)) # % Mean anomaly of a WR + my.brks2 <- my.brks #c(seq(-20,20,2)) # % Mean anomaly of a WR for the contour lines + values.to.plot <- my.brks #c(-17,-15,-13,-11,-9,-7,-5,-3,-1,0,1,3,5,7,9,11,13,15,17) + } + + my.cols <- colorRampPalette(rev(brewer.pal(11,"RdBu")))(length(my.brks)-1) # blue--white--red colors + my.cols[floor(length(my.cols)/2)] <- my.cols[floor(length(my.cols)/2)+1] <- "white" + + # plot the composition with the regime anomalies of each monthly regimes: + png(filename=paste0(rean.dir,"/",rean.name,"_taylor_source",".png"),width=6000,height=2000) + + plot.new() + + # Sheet title: + par(fig=c(0, 1, 0.94, 0.98), new=TRUE) + mtext(paste0(fields.name, " observed monthly regime anomalies"), cex=5.5, font=2) + + n.map <- 0 + for(p in c(9:12,1:8)){ + p.orig <- p + + load(file=paste0(rean.dir,"/",fields.name,"_",my.period[p],"_","psl",".RData")) # load cluster names and summarized data + load(file=paste0(rean.dir,"/",fields.name,"_",my.period[p],"_","ClusterNames",".RData")) # load cluster names and summarized data + + if(var.num != var.num.orig) var.num <- var.num.orig # ripristinate original values of this script (necessary after loading regime data) + if(fields.name != fields.name.orig) fields.name <- fields.name.orig # ripristinate original values of this script + if(p != p.orig) p <- p.orig + + cluster1 <- which(orden == cluster1.name) + cluster2 <- which(orden == cluster2.name) + cluster3 <- which(orden == cluster3.name) + cluster4 <- which(orden == cluster4.name) + + assign(paste0("map",cluster1), pslwr1mean) # NAO+ + assign(paste0("map",cluster2), pslwr2mean) # NAO- + assign(paste0("map",cluster3), pslwr3mean) # Blocking + assign(paste0("map",cluster4), pslwr4mean) # Atl.ridge + + n.map <- n.map + 1 # n.maps starts from 0 + map.width <- 0.07 + map.xpos <- 0.06 + map.width * (n.map - 1) + map.ypos <- 0.08 + map.height <- 0.19 + map.ysep <- 0.015 + + par(fig=c(map.xpos, map.xpos + map.width, map.ypos + 3*map.height + 3*map.ysep, map.ypos + 4*map.height + 3*map.ysep), new=TRUE) + PlotEquiMap2(rescale(map1,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map1), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, xlabel.dist=1.5, contours.lty="F1FF1F") + par(fig=c(map.xpos, map.xpos + map.width, map.ypos + 2*map.height + 2*map.ysep, map.ypos + 3*map.height + 2*map.ysep), new=TRUE) + PlotEquiMap2(rescale(map2,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map2), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F") + par(fig=c(map.xpos, map.xpos + map.width, map.ypos + map.height + map.ysep, map.ypos + 2*map.height + map.ysep), new=TRUE) + PlotEquiMap2(rescale(map3,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map3), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F") + par(fig=c(map.xpos, map.xpos + map.width, map.ypos, map.ypos + map.height), new=TRUE) + PlotEquiMap2(rescale(map4,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map4), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F") + + # Sheet subtitles: + par(fig=c(map.xpos, map.xpos + map.width, 0.86, 0.90), new=TRUE) + mtext(my.month.short[p], cex=5.5, font=2) + + } + + # draw the last map to the right of the composition, that with the seasonal anomalies: + n.map <- n.map + 1 # n.maps starts from 0 + map.width <- 0.07 + map.xpos <- 0.06 + map.width * (n.map - 1) + map.ypos <- 0.08 + map.height <- 0.19 + map.ysep <- 0.015 + + par(fig=c(map.xpos, map.xpos + map.width, map.ypos + 3*map.height + 3*map.ysep, map.ypos + 4*map.height + 3*map.ysep), new=TRUE) + PlotEquiMap2(rescale(map.ref1,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map.ref1), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, xlabel.dist=1.5, contours.lty="F1FF1F") + par(fig=c(map.xpos, map.xpos + map.width, map.ypos + 2*map.height + 2*map.ysep, map.ypos + 3*map.height + 2*map.ysep), new=TRUE) + PlotEquiMap2(rescale(map.ref2,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map.ref2), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F") + par(fig=c(map.xpos, map.xpos + map.width, map.ypos + map.height + map.ysep, map.ypos + 2*map.height + map.ysep), new=TRUE) + PlotEquiMap2(rescale(map.ref3,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map.ref3), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F") + par(fig=c(map.xpos, map.xpos + map.width, map.ypos, map.ypos + map.height), new=TRUE) + PlotEquiMap2(rescale(map.ref4,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map.ref4), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F") + + # subtitle: + par(fig=c(map.xpos, map.xpos + map.width, 0.86, 0.90), new=TRUE) + mtext(my.period[season.ref], cex=5.5, font=2) + + dev.off() + + # Plot the Taylor diagrams: + + corr.first <- corr.second <- rmse.first <- rmse.second <- array(NA,c(4,12)) + + for(regime in 1:4){ + + png(filename=paste0(rean.dir,"/",rean.name,"_taylor_",orden[regime],".png"), width=1200, height=800) + + for(p in 1:12){ + p.orig <- p + + load(file=paste0(rean.dir,"/",rean.name,"_",my.period[p],"_","psl",".RData")) # load cluster names and summarized data + load(file=paste0(rean.dir,"/",rean.name,"_",my.period[p],"_","ClusterNames",".RData")) # load cluster names and summarized data + + if(var.num != var.num.orig) var.num <- var.num.orig # ripristinate original values of this script (necessary after loading regime data) + if(fields.name != fields.name.orig) fields.name <- fields.name.orig # ripristinate original values of this script + if(p != p.orig) p <- p.orig + + cluster1 <- which(orden == cluster1.name) + cluster2 <- which(orden == cluster2.name) + cluster3 <- which(orden == cluster3.name) + cluster4 <- which(orden == cluster4.name) + + assign(paste0("map",cluster1), pslwr1mean) # NAO+ + assign(paste0("map",cluster2), pslwr2mean) # NAO- + assign(paste0("map",cluster3), pslwr3mean) # Blocking + assign(paste0("map",cluster4), pslwr4mean) # Atl.ridge + + # load data from the second classification: + load(file=paste0(rean2.dir,"/",rean2.name,"_",my.period[p],"_","psl",".RData")) # load cluster names and summarized data + load(file=paste0(rean2.dir,"/",rean2.name,"_",my.period[p],"_","ClusterNames",".RData")) # load cluster names and summarized data + + if(var.num != var.num.orig) var.num <- var.num.orig # ripristinate original values of this script (necessary after loading regime data) + if(fields.name != fields.name.orig) fields.name <- fields.name.orig # ripristinate original values of this script + if(p != p.orig) p <- p.orig + + cluster1.old <- which(orden == cluster1.name) + cluster2.old <- which(orden == cluster2.name) + cluster3.old <- which(orden == cluster3.name) + cluster4.old <- which(orden == cluster4.name) + + assign(paste0("map.old",cluster1.old), pslwr1mean) # NAO+ + assign(paste0("map.old",cluster2.old), pslwr2mean) # NAO- + assign(paste0("map.old",cluster3.old), pslwr3mean) # Blocking + assign(paste0("map.old",cluster4.old), pslwr4mean) # Atl.ridge + + add.mod <- ifelse(p == 1, FALSE, TRUE) + main.mod <- ifelse(p == 1, orden[regime],"") + + color.first <- "red" + color.second <- "blue" # "black" + sd.arcs.mod <- c(0,0.1,0.2,0.3,0.4,0.5,0.6,0.7,0.8,0.9,1,1.1,1.2,1.3) #c(0,0.5,1,1.5) # doesn't work! + if(regime == 1) my.taylor(as.vector(map.ref1),as.vector(map1), normalize=TRUE, add=add.mod, pos.cor=FALSE, sd.arcs=sd.arcs.mod, ref.sd=F, cex.axis=1.7, text.cex=1, my.text=my.month.short[p], col = color.first, main=main.mod) + if(regime == 2) my.taylor(as.vector(map.ref2),as.vector(map2), normalize=TRUE, add=add.mod, pos.cor=FALSE, sd.arcs=sd.arcs.mod, ref.sd=F, cex.axis=1.7, text.cex=1, my.text=my.month.short[p], col = color.first, main=main.mod) + if(regime == 3) my.taylor(as.vector(map.ref3),as.vector(map3), normalize=TRUE, add=add.mod, pos.cor=FALSE, sd.arcs=sd.arcs.mod, ref.sd=F, cex.axis=1.7, text.cex=1, my.text=my.month.short[p], col = color.first, main=main.mod) + if(regime == 4) my.taylor(as.vector(map.ref4),as.vector(map4), normalize=TRUE, add=add.mod, pos.cor=FALSE, sd.arcs=sd.arcs.mod, ref.sd=F, cex.axis=1.7, text.cex=1, my.text=my.month.short[p], col = color.first, main=main.mod) + + # add the points from the second classification: + add.mod=TRUE + #add.mod <- ifelse(p == 1, FALSE, TRUE) + + if(regime == 1) my.taylor(as.vector(map.old.ref1),as.vector(map.old1), normalize=TRUE, add=add.mod, pos.cor=FALSE, sd.arcs=sd.arcs.mod, ref.sd=F, cex.axis=1.7, text.cex=1, my.text=my.month.short[p], col = color.second) + if(regime == 2) my.taylor(as.vector(map.old.ref2),as.vector(map.old2), normalize=TRUE, add=add.mod, pos.cor=FALSE, sd.arcs=sd.arcs.mod, ref.sd=F, cex.axis=1.7, text.cex=1, my.text=my.month.short[p], col = color.second) + if(regime == 3) my.taylor(as.vector(map.old.ref3),as.vector(map.old3), normalize=TRUE, add=add.mod, pos.cor=FALSE, sd.arcs=sd.arcs.mod, ref.sd=F, cex.axis=1.7, text.cex=1, my.text=my.month.short[p], col = color.second) + if(regime == 4) my.taylor(as.vector(map.old.ref4),as.vector(map.old4), normalize=TRUE, add=add.mod, pos.cor=FALSE, sd.arcs=sd.arcs.mod, ref.sd=F, cex.axis=1.7, text.cex=1, my.text=my.month.short[p], col = color.second) + + if(regime == 1) { corr.first[1,p] <- cor(as.vector(map.ref1),as.vector(map1)); rmse.first[1,p] <- RMSE(as.vector(map.ref1),as.vector(map1)) } + if(regime == 2) { corr.first[2,p] <- cor(as.vector(map.ref2),as.vector(map2)); rmse.first[2,p] <- RMSE(as.vector(map.ref2),as.vector(map2)) } + if(regime == 3) { corr.first[3,p] <- cor(as.vector(map.ref3),as.vector(map3)); rmse.first[3,p] <- RMSE(as.vector(map.ref3),as.vector(map3)) } + if(regime == 4) { corr.first[4,p] <- cor(as.vector(map.ref4),as.vector(map4)); rmse.first[4,p] <- RMSE(as.vector(map.ref4),as.vector(map4)) } + + if(regime == 1) { corr.second[1,p] <- cor(as.vector(map.old.ref1),as.vector(map.old1)); rmse.second[1,p] <- RMSE(as.vector(map.old.ref1),as.vector(map.old1)) } + if(regime == 2) { corr.second[2,p] <- cor(as.vector(map.old.ref2),as.vector(map.old2)); rmse.second[2,p] <- RMSE(as.vector(map.old.ref2),as.vector(map.old2)) } + if(regime == 3) { corr.second[3,p] <- cor(as.vector(map.old.ref3),as.vector(map.old3)); rmse.second[3,p] <- RMSE(as.vector(map.old.ref3),as.vector(map.old3)) } + if(regime == 4) { corr.second[4,p] <- cor(as.vector(map.old.ref4),as.vector(map.old4)); rmse.second[4,p] <- RMSE(as.vector(map.old.ref4),as.vector(map.old4)) } + + } # close for on 'p' + + dev.off() + + } # close for on 'regime' + + corr.first.mean <- apply(corr.first,1,mean) + corr.second.mean <- apply(corr.second,1,mean) + corr.diff <- corr.second.mean - corr.first.mean + + rmse.first.mean <- apply(rmse.first,1,mean) + rmse.second.mean <- apply(rmse.second,1,mean) + rmse.diff <- rmse.second.mean - rmse.first.mean + +} # close if on composition == "taylor" + + + + + + +# Summary graphs: +if(composition == "summary" && fields.name == forecast.name){ + # array storing correlations in the format: [startdate, leadmonth, regime] + array.diff.sd.freq <- array.sd.freq <- array.cor <- array.sp.cor <- array.freq <- array.diff.freq <- array.rpss <- array.pers <- array.diff.pers <- array(NA,c(12,7,4)) + array.spat.cor <- array.imp <- array.trans.sim1 <- array.trans.sim2 <- array.trans.sim3 <- array.trans.sim4 <- array(NA,c(12,7,4)) + array.trans.diff1 <- array.trans.diff2 <- array.trans.diff3 <- array.trans.diff4 <- array.freq.obs <- array.pers.obs <- array(NA,c(12,7,4)) + array.trans.obs1 <- array.trans.obs2 <- array.trans.obs3 <- array.trans.obs4 <- array(NA,c(12,7,4)) + + for(p in 1:12){ + p.orig <- p + + for(l in 0:6){ + + load(file=paste0(work.dir,"/",fields.name,"_",my.period[p],"_leadmonth_",l,"_","ClusterNames",".RData")) # load cluster names and summarized data + + if(var.num != var.num.orig) var.num <- var.num.orig # ripristinate original values of this script (necessary after loading regime data) + if(fields.name != fields.name.orig) fields.name <- fields.name.orig # ripristinate original values of this script + if(p != p.orig) p <- p.orig + + array.spat.cor[p,1+l, 1] <- spat.cor1 # associates NAO+ to the first triangle (at left) + array.spat.cor[p,1+l, 3] <- spat.cor2 # associates NAO- to the third triangle (at right) + array.spat.cor[p,1+l, 4] <- spat.cor3 # associates Blocking to the fourth triangle (top one) + array.spat.cor[p,1+l, 2] <- spat.cor4 # associates Atl.Ridge to the second triangle (bottom one) + + array.cor[p,1+l, 1] <- sp.cor1 # associates NAO+ to the first triangle (at left) + array.cor[p,1+l, 3] <- sp.cor2 # associates NAO- to the third triangle (at right) + array.cor[p,1+l, 4] <- sp.cor3 # associates Blocking to the fourth triangle (top one) + array.cor[p,1+l, 2] <- sp.cor4 # associates Atl.Ridge to the second triangle (bottom one) + + array.freq[p,1+l, 1] <- 100*(mean(fre1.NA)) # NAO+ + array.freq[p,1+l, 3] <- 100*(mean(fre2.NA)) # NAO- + array.freq[p,1+l, 4] <- 100*(mean(fre3.NA)) # Blocking + array.freq[p,1+l, 2] <- 100*(mean(fre4.NA)) # Atl.Ridge + + array.freq.obs[p,1+l, 1] <- 100*(mean(freObs1)) # NAO+ + array.freq.obs[p,1+l, 3] <- 100*(mean(freObs2)) # NAO- + array.freq.obs[p,1+l, 4] <- 100*(mean(freObs3)) # Blocking + array.freq.obs[p,1+l, 2] <- 100*(mean(freObs4)) # Atl.Ridge + + array.diff.freq[p,1+l, 1] <- 100*(mean(fre1.NA) - mean(freObs1)) # NAO+ + array.diff.freq[p,1+l, 3] <- 100*(mean(fre2.NA) - mean(freObs2)) # NAO- + array.diff.freq[p,1+l, 4] <- 100*(mean(fre3.NA) - mean(freObs3)) # Blocking + array.diff.freq[p,1+l, 2] <- 100*(mean(fre4.NA) - mean(freObs4)) # Atl.Ridge + + array.pers[p,1+l, 1] <- persist1 # NAO+ + array.pers[p,1+l, 3] <- persist2 # NAO- + array.pers[p,1+l, 4] <- persist3 # Blocking + array.pers[p,1+l, 2] <- persist4 # Atl.Ridge + + array.pers.obs[p,1+l, 1] <- persistObs1 # NAO+ + array.pers.obs[p,1+l, 3] <- persistObs2 # NAO- + array.pers.obs[p,1+l, 4] <- persistObs3 # Blocking + array.pers.obs[p,1+l, 2] <- persistObs4 # Atl.Ridge + + array.diff.pers[p,1+l, 1] <- persist1 - persistObs1 # NAO+ + array.diff.pers[p,1+l, 3] <- persist2 - persistObs2 # NAO- + array.diff.pers[p,1+l, 4] <- persist3 - persistObs3 # Blocking + array.diff.pers[p,1+l, 2] <- persist4 - persistObs4 # Atl.Ridge + + array.sd.freq[p,1+l, 1] <- sd.freq.sim1 # NAO+ + array.sd.freq[p,1+l, 3] <- sd.freq.sim2 # NAO- + array.sd.freq[p,1+l, 4] <- sd.freq.sim3 # Blocking + array.sd.freq[p,1+l, 2] <- sd.freq.sim4 # Atl.Ridge + + array.diff.sd.freq[p,1+l, 1] <- sd.freq.sim1 / sd.freq.obs1 # NAO+ + array.diff.sd.freq[p,1+l, 3] <- sd.freq.sim2 / sd.freq.obs2 # NAO- + array.diff.sd.freq[p,1+l, 4] <- sd.freq.sim3 / sd.freq.obs3 # Blocking + array.diff.sd.freq[p,1+l, 2] <- sd.freq.sim4 / sd.freq.obs4 # Atl.Ridge + + array.imp[p,1+l, 1] <- cor.imp1 # NAO+ + array.imp[p,1+l, 3] <- cor.imp2 # NAO- + array.imp[p,1+l, 4] <- cor.imp3 # Blocking + array.imp[p,1+l, 2] <- cor.imp4 # Atl.Ridge + + array.trans.sim1[p,1+l, 1] <- NA # Transition from NAO+ to NAO+ + array.trans.sim1[p,1+l, 3] <- 100*transSim1[2] # Transition from NAO+ to NAO- + array.trans.sim1[p,1+l, 4] <- 100*transSim1[3] # Transition from NAO+ to blocking + array.trans.sim1[p,1+l, 2] <- 100*transSim1[4] # Transition from NAO+ to Atl.Ridge + + array.trans.sim2[p,1+l, 1] <- 100*transSim2[1] # Transition from NAO- to NAO+ + array.trans.sim2[p,1+l, 3] <- NA # Transition from NAO- to NAO- + array.trans.sim2[p,1+l, 4] <- 100*transSim2[3] # Transition from NAO- to blocking + array.trans.sim2[p,1+l, 2] <- 100*transSim2[4] # Transition from NAO- to Atl.Ridge + + array.trans.sim3[p,1+l, 1] <- 100*transSim3[1] # Transition from blocking to NAO+ + array.trans.sim3[p,1+l, 3] <- 100*transSim3[2] # Transition from blocking to NAO- + array.trans.sim3[p,1+l, 4] <- NA # Transition from blocking to blocking + array.trans.sim3[p,1+l, 2] <- 100*transSim3[4] # Transition from blocking to Atl.Ridge + + array.trans.sim4[p,1+l, 1] <- 100*transSim4[1] # Transition from Atl.ridge to NAO+ + array.trans.sim4[p,1+l, 3] <- 100*transSim4[2] # Transition from Atl.ridge to NAO- + array.trans.sim4[p,1+l, 4] <- 100*transSim4[3] # Transition from Atl.ridge to blocking + array.trans.sim4[p,1+l, 2] <- NA # Transition from Atl.ridge to Atl.Ridge + + array.trans.obs1[p,1+l, 1] <- NA # Transition from NAO+ to NAO+ + array.trans.obs1[p,1+l, 3] <- 100*transObs1[2] # Transition from NAO+ to NAO- + array.trans.obs1[p,1+l, 4] <- 100*transObs1[3] # Transition from NAO+ to blocking + array.trans.obs1[p,1+l, 2] <- 100*transObs1[4] # Transition from NAO+ to Atl.Ridge + + array.trans.obs2[p,1+l, 1] <- 100*transObs2[1] # Transition from NAO- to NAO+ + array.trans.obs2[p,1+l, 3] <- NA # Transition from NAO- to NAO- + array.trans.obs2[p,1+l, 4] <- 100*transObs2[3] # Transition from NAO- to blocking + array.trans.obs2[p,1+l, 2] <- 100*transObs2[4] # Transition from NAO- to Atl.Ridge + + array.trans.obs3[p,1+l, 1] <- 100*transObs3[1] # Transition from blocking to NAO+ + array.trans.obs3[p,1+l, 3] <- 100*transObs3[2] # Transition from blocking to NAO- + array.trans.obs3[p,1+l, 4] <- NA # Transition from blocking to blocking + array.trans.obs3[p,1+l, 2] <- 100*transObs3[4] # Transition from blocking to Atl.Ridge + + array.trans.obs4[p,1+l, 1] <- 100*transObs4[1] # Transition from Atl.ridge to NAO+ + array.trans.obs4[p,1+l, 3] <- 100*transObs4[2] # Transition from Atl.ridge to NAO- + array.trans.obs4[p,1+l, 4] <- 100*transObs4[3] # Transition from Atl.ridge to blocking + array.trans.obs4[p,1+l, 2] <- NA # Transition from Atl.ridge to Atl.Ridge + + array.trans.diff1[p,1+l, 1] <- NA # Transition from NAO+ to NAO+ + array.trans.diff1[p,1+l, 3] <- 100*(transSim1[2] - transObs1[2]) # Transition from NAO+ to NAO- + array.trans.diff1[p,1+l, 4] <- 100*(transSim1[3] - transObs1[3]) # Transition from NAO+ to blocking + array.trans.diff1[p,1+l, 2] <- 100*(transSim1[4] - transObs1[4]) # Transition from NAO+ to Atl.Ridge + + array.trans.diff2[p,1+l, 1] <- 100*(transSim2[1] - transObs2[1]) # Transition from NAO+ to NAO+ + array.trans.diff2[p,1+l, 3] <- NA + array.trans.diff2[p,1+l, 4] <- 100*(transSim2[3] - transObs2[3]) # Transition from NAO+ to blocking + array.trans.diff2[p,1+l, 2] <- 100*(transSim2[4] - transObs2[4]) # Transition from NAO+ to Atl.Ridge + + array.trans.diff3[p,1+l, 1] <- 100*(transSim3[1] - transObs3[1]) # Transition from NAO+ to NAO+ + array.trans.diff3[p,1+l, 3] <- 100*(transSim3[2] - transObs3[2]) # Transition from NAO+ to NAO- + array.trans.diff3[p,1+l, 4] <- NA + array.trans.diff3[p,1+l, 2] <- 100*(transSim3[4] - transObs3[4]) # Transition from NAO+ to Atl.Ridge + + array.trans.diff4[p,1+l, 1] <- 100*(transSim4[1] - transObs4[1]) # Transition from NAO+ to NAO+ + array.trans.diff4[p,1+l, 3] <- 100*(transSim4[2] - transObs4[2]) # Transition from NAO+ to NAO- + array.trans.diff4[p,1+l, 4] <- 100*(transSim4[3] - transObs4[3]) # Transition from NAO+ to blocking + array.trans.diff4[p,1+l, 2] <- NA + + #array.rpss[p,1+l, 1] <- # NAO+ + #array.rpss[p,1+l, 3] <- # NAO- + #array.rpss[p,1+l, 4] <- # Blocking + #array.rpss[p,1+l, 2] <- # Atl.Ridge + } + } + + + + # Spatial Corr summary: + png(filename=paste0(work.dir,"/",forecast.name,"_summary_spatial_correlations.png"), width=550, height=400) + + # create an array similar to array.cor but with colors instead of corr.values: + sp.corr.cols <- rev(c('#b2182b','#d6604d','#f4a582','#fddbc7','#d1e5f0','#92c5de','#4393c3','#2166ac')) + my.seq <- c(-1,0,0.4,0.5,0.6,0.7,0.8,0.9,1) + + array.spat.cor.colors <- array(sp.corr.cols[8],c(12,7,4)) + array.spat.cor.colors[array.spat.cor < my.seq[8]] <- sp.corr.cols[7] + array.spat.cor.colors[array.spat.cor < my.seq[7]] <- sp.corr.cols[6] + array.spat.cor.colors[array.spat.cor < my.seq[6]] <- sp.corr.cols[5] + array.spat.cor.colors[array.spat.cor < my.seq[5]] <- sp.corr.cols[4] + array.spat.cor.colors[array.spat.cor < my.seq[4]] <- sp.corr.cols[3] + array.spat.cor.colors[array.spat.cor < my.seq[3]] <- sp.corr.cols[2] + array.spat.cor.colors[array.spat.cor < my.seq[2]] <- sp.corr.cols[1] + + par(mar=c(5,4,4,4.5)) + plot(1,1, type="n", xaxt="n", yaxt="n", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + title("Spatial correlation between S4 and ERA-Interim regime anomalies", cex=1.5) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + + for(p in 1:12){ + for(l in 0:6){ + polygon(c(0.5+p-1, 0.5+p-1, 1+p-1), c(-0.5+l, 0.5+l, 0+l), col=array.spat.cor.colors[p,1+l,1]) # first triangle + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(-0.5+l, 0+l, -0.5+l), col=array.spat.cor.colors[p,1+l,2]) + polygon(c(1+p-1, 1.5+p-1, 1.5+p-1), c(l, 0.5+l, -0.5+l), col=array.spat.cor.colors[p,1+l,3]) + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(0.5+l, 0+l, 0.5+l), col=array.spat.cor.colors[p,1+l,4]) + } + } + + par(fig=c(0.875, 1, 0.14, 0.89), new=TRUE) + ColorBar2(brks = my.seq, cols = sp.corr.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + dev.off() + + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_spatial_correlations_startdate.png"), width=750, height=600) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext(text="Spatial correlation between S4 and ERA-Interim regime anomalies", cex=1.5) + + # NAO+ : + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.85, 0.98), new=TRUE) + mtext("NAO+", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.spat.cor.colors[p,1+l,1])}} + + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.42, 0.5), new=TRUE) + mtext("Blocking", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.spat.cor.colors[p,1+l,4])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.85, 0.98), new=TRUE) + mtext("NAO-", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.spat.cor.colors[p,1+l,3])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.42, 0.5), new=TRUE) + mtext("Atlantic Ridge", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.spat.cor.colors[p,1+l,2])}} + + par(fig=c(0.91, 1, 0.1, 0.9), new=TRUE) + ColorBar2(brks = my.seq, cols = sp.corr.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_spatial_correlations_target_month.png"), width=800, height=300) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext(text="Spatial correlation between S4 and ERA-Interim regime anomalies", cex=1.2) + + par(mar=c(0,0,4,0), fig=c(0.07, 0.22, 0.8, 0.98), new=TRUE) + mtext("NAO+", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0, 0.22, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.6, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.spat.cor.colors[i2,1+6-j,1])}} + + par(mar=c(0,0,4,0), fig=c(0.22, 0.44, 0.8, 0.98), new=TRUE) + mtext("NAO-", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.22, 0.44, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.6, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.spat.cor.colors[i2,1+6-j,3])}} + + par(mar=c(0,0,4,0), fig=c(0.44, 0.66, 0.8, 0.98), new=TRUE) + mtext("Blocking", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.44, 0.66, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.6, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.spat.cor.colors[i2,1+6-j,4])}} + + par(mar=c(0,0,4,0), fig=c(0.68, 0.88, 0.8, 0.98), new=TRUE) + mtext("Atlantic Ridge", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.66, 0.88, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.6, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.spat.cor.colors[i2,1+6-j,2])}} + + par(fig=c(0.91, 1, 0.1, 0.8), new=TRUE) + ColorBar2(brks = my.seq, cols = sp.corr.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + + + + + + + + # Temporal Corr summary: + png(filename=paste0(work.dir,"/",forecast.name,"_summary_temporal_correlations.png"), width=550, height=400) + + # create an array similar to array.cor but with colors instead of corr.values: + corr.cols <- rev(c('#b2182b','#d6604d','#f4a582','#fddbc7','#d1e5f0','#92c5de','#4393c3','#2166ac')) + my.seq <- c(-1,-0.75,-0.5,-0.25,0,0.25,0.5,0.75,1) + + array.cor.colors <- array(corr.cols[8],c(12,7,4)) + array.cor.colors[array.cor < 0.75] <- corr.cols[7] + array.cor.colors[array.cor < 0.5] <- corr.cols[6] + array.cor.colors[array.cor < 0.25] <- corr.cols[5] + array.cor.colors[array.cor < 0] <- corr.cols[4] + array.cor.colors[array.cor < -0.25] <- corr.cols[3] + array.cor.colors[array.cor < -0.5] <- corr.cols[2] + array.cor.colors[array.cor < -0.75] <- corr.cols[1] + + par(mar=c(5,4,4,4.5)) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Forecast time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + title("Correlation between S4 and ERA-Interim frequencies", cex=1.5) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + + for(p in 1:12){ + for(l in 0:6){ + polygon(c(0.5+p-1,0.5+p-1,1+p-1),c(-0.5+l,0.5+l,0+l), col=array.cor.colors[p,1+l,1]) # first triangle + polygon(c(0.5+p-1,1+p-1,1.5+p-1),c(-0.5+l,0+l,-0.5+l), col=array.cor.colors[p,1+l,2]) + polygon(c(1+p-1,1.5+p-1,1.5+p-1),c(l,0.5+l,-0.5+l), col=array.cor.colors[p,1+l,3]) + polygon(c(0.5+p-1,1+p-1,1.5+p-1),c(0.5+l,0+l,0.5+l), col=array.cor.colors[p,1+l,4]) + } + } + + par(fig=c(0.875, 1, 0.14, 0.89), new=TRUE) + ColorBar2(brks = seq(-1,1,0.25), cols = corr.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(seq(-1,1,0.25)), my.labels=seq(-1,1,0.25)) + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_temporal_correlations_startdate.png"), width=750, height=600) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext(text="Correlation between S4 and ERA-Interim frequencies", cex=1.5) + + # NAO+ : + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.85, 0.98), new=TRUE) + mtext("NAO+", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.cor.colors[p,1+l,1])}} + + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.42, 0.5), new=TRUE) + mtext("Blocking", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.cor.colors[p,1+l,4])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.85, 0.98), new=TRUE) + mtext("NAO-", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.cor.colors[p,1+l,3])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.42, 0.5), new=TRUE) + mtext("Atlantic Ridge", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.cor.colors[p,1+l,2])}} + + par(fig=c(0.91, 1, 0.1, 0.9), new=TRUE) + ColorBar2(brks = my.seq, cols = corr.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_temporal_correlations_target_month.png"), width=800, height=300) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext(text="Correlation between S4 and ERA-Interim frequencies", cex=1.2) + + par(mar=c(0,0,4,0), fig=c(0.07, 0.22, 0.8, 0.98), new=TRUE) + mtext("NAO+", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0, 0.22, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.6, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.cor.colors[i2,1+6-j,1])}} + + par(mar=c(0,0,4,0), fig=c(0.22, 0.44, 0.8, 0.98), new=TRUE) + mtext("NAO-", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.22, 0.44, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.6, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.cor.colors[i2,1+6-j,3])}} + + par(mar=c(0,0,4,0), fig=c(0.44, 0.66, 0.8, 0.98), new=TRUE) + mtext("Blocking", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.44, 0.66, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.6, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.cor.colors[i2,1+6-j,4])}} + + par(mar=c(0,0,4,0), fig=c(0.68, 0.88, 0.8, 0.98), new=TRUE) + mtext("Atlantic Ridge", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.66, 0.88, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.6, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.cor.colors[i2,1+6-j,2])}} + + par(fig=c(0.91, 1, 0.1, 0.8), new=TRUE) + ColorBar2(brks = my.seq, cols = corr.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + + + + + + # Frequency summary: + png(filename=paste0(work.dir,"/",forecast.name,"_summary_frequency.png"), width=550, height=400) + + # create an array similar to array.diff.freq but with colors instead of frequencies: + freq.cols <- rev(c('#d73027','#f46d43','#fdae61','#fee090','#e0f3f8','#abd9e9','#74add1','#4575b4')) + my.seq <- c(NA,20,22,24,26,28,30,32,NA) + + array.freq.colors <- array(freq.cols[8],c(12,7,4)) + array.freq.colors[array.freq < my.seq[[8]]] <- freq.cols[7] + array.freq.colors[array.freq < my.seq[[7]]] <- freq.cols[6] + array.freq.colors[array.freq < my.seq[[6]]] <- freq.cols[5] + array.freq.colors[array.freq < my.seq[[5]]] <- freq.cols[4] + array.freq.colors[array.freq < my.seq[[4]]] <- freq.cols[3] + array.freq.colors[array.freq < my.seq[[3]]] <- freq.cols[2] + array.freq.colors[array.freq < my.seq[[2]]] <- freq.cols[1] + + par(mar=c(5,4,4,4.5)) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Forecast time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + title("Mean S4 frequency (%)", cex=1.5) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + + for(p in 1:12){ + for(l in 0:6){ + polygon(c(0.5+p-1, 0.5+p-1, 1+p-1), c(-0.5+l, 0.5+l, 0+l), col=array.freq.colors[p,1+l,1]) # first triangle + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(-0.5+l, 0+l, -0.5+l), col=array.freq.colors[p,1+l,2]) + polygon(c(1+p-1, 1.5+p-1, 1.5+p-1), c(l, 0.5+l, -0.5+l), col=array.freq.colors[p,1+l,3]) + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(0.5+l, 0+l, 0.5+l), col=array.freq.colors[p,1+l,4]) + } + } + + par(fig=c(0.875, 1, 0.14, 0.89), new=TRUE) + ColorBar2(brks = my.seq, cols = freq.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_frequency_startdate.png"), width=750, height=600) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext(text="Mean S4 frequency (%)", cex=1.5) + + # NAO+ : + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.85, 0.98), new=TRUE) + mtext("NAO+", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.freq.colors[p,1+l,1])}} + + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.42, 0.5), new=TRUE) + mtext("Blocking", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.freq.colors[p,1+l,4])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.85, 0.98), new=TRUE) + mtext("NAO-", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.freq.colors[p,1+l,3])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.42, 0.5), new=TRUE) + mtext("Atlantic Ridge", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.freq.colors[p,1+l,2])}} + + par(fig=c(0.91, 1, 0.1, 0.9), new=TRUE) + ColorBar2(brks = my.seq, cols = freq.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_frequency_target_month.png"), width=800, height=300) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext(text="Mean S4 frequency (%)", cex=1.2) + + par(mar=c(0,0,4,0), fig=c(0.07, 0.22, 0.8, 0.98), new=TRUE) + mtext("NAO+", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0, 0.22, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.6, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.freq.colors[i2,1+6-j,1])}} + + par(mar=c(0,0,4,0), fig=c(0.22, 0.44, 0.8, 0.98), new=TRUE) + mtext("NAO-", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.22, 0.44, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.6, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.freq.colors[i2,1+6-j,3])}} + + par(mar=c(0,0,4,0), fig=c(0.44, 0.66, 0.8, 0.98), new=TRUE) + mtext("Blocking", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.44, 0.66, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.6, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.freq.colors[i2,1+6-j,4])}} + + par(mar=c(0,0,4,0), fig=c(0.68, 0.88, 0.8, 0.98), new=TRUE) + mtext("Atlantic Ridge", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.66, 0.88, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.6, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.freq.colors[i2,1+6-j,2])}} + + par(fig=c(0.91, 1, 0.1, 0.8), new=TRUE) + ColorBar2(brks = my.seq, cols = freq.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + par(fig=c(0.91, 1, 0.88, 0.93), new=TRUE) + mtext(side = 1, text = "%", line = 2, cex=1) + dev.off() + + + + + + + + + # Obs. frequency summary: + png(filename=paste0(work.dir,"/",forecast.name,"_summary_frequency_obs.png"), width=550, height=400) + + # create an array similar to array.diff.freq but with colors instead of frequencies: + freq.cols <- rev(c('#d73027','#f46d43','#fdae61','#fee090','#e0f3f8','#abd9e9','#74add1','#4575b4')) + my.seq <- c(NA,20,22,24,26,28,30,32,NA) + + array.freq.colors <- array(freq.cols[8],c(12,7,4)) + array.freq.colors[array.freq.obs < my.seq[[8]]] <- freq.cols[7] + array.freq.colors[array.freq.obs < my.seq[[7]]] <- freq.cols[6] + array.freq.colors[array.freq.obs < my.seq[[6]]] <- freq.cols[5] + array.freq.colors[array.freq.obs < my.seq[[5]]] <- freq.cols[4] + array.freq.colors[array.freq.obs < my.seq[[4]]] <- freq.cols[3] + array.freq.colors[array.freq.obs < my.seq[[3]]] <- freq.cols[2] + array.freq.colors[array.freq.obs < my.seq[[2]]] <- freq.cols[1] + + par(mar=c(5,4,4,4.5)) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Forecast time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + title("Mean observed frequency (%)", cex=1.5) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + + for(p in 1:12){ + for(l in 0:6){ + polygon(c(0.5+p-1, 0.5+p-1, 1+p-1), c(-0.5+l, 0.5+l, 0+l), col=array.freq.colors[p,1+l,1]) # first triangle + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(-0.5+l, 0+l, -0.5+l), col=array.freq.colors[p,1+l,2]) + polygon(c(1+p-1, 1.5+p-1, 1.5+p-1), c(l, 0.5+l, -0.5+l), col=array.freq.colors[p,1+l,3]) + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(0.5+l, 0+l, 0.5+l), col=array.freq.colors[p,1+l,4]) + } + } + + par(fig=c(0.875, 1, 0.14, 0.89), new=TRUE) + ColorBar2(brks = my.seq, cols = freq.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_frequency_obs_startdate.png"), width=750, height=600) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext(text="Mean observed frequency (%)", cex=1.5) + + # NAO+ : + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.85, 0.98), new=TRUE) + mtext("NAO+", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.freq.colors[p,1+l,1])}} + + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.42, 0.5), new=TRUE) + mtext("Blocking", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.freq.colors[p,1+l,4])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.85, 0.98), new=TRUE) + mtext("NAO-", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.freq.colors[p,1+l,3])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.42, 0.5), new=TRUE) + mtext("Atlantic Ridge", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.freq.colors[p,1+l,2])}} + + par(fig=c(0.91, 1, 0.1, 0.9), new=TRUE) + ColorBar2(brks = my.seq, cols = freq.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_frequency_obs_target_month.png"), width=800, height=300) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext(text="Mean observed frequency (%)", cex=1.2) + + par(mar=c(0,0,4,0), fig=c(0.07, 0.22, 0.8, 0.98), new=TRUE) + mtext("NAO+", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0, 0.22, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.6, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.freq.colors[i2,1+6-j,1])}} + + par(mar=c(0,0,4,0), fig=c(0.22, 0.44, 0.8, 0.98), new=TRUE) + mtext("NAO-", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.22, 0.44, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.6, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.freq.colors[i2,1+6-j,3])}} + + par(mar=c(0,0,4,0), fig=c(0.44, 0.66, 0.8, 0.98), new=TRUE) + mtext("Blocking", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.44, 0.66, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.6, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.freq.colors[i2,1+6-j,4])}} + + par(mar=c(0,0,4,0), fig=c(0.68, 0.88, 0.8, 0.98), new=TRUE) + mtext("Atlantic Ridge", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.66, 0.88, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.6, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.freq.colors[i2,1+6-j,2])}} + + par(fig=c(0.91, 1, 0.1, 0.8), new=TRUE) + ColorBar2(brks = my.seq, cols = freq.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + par(fig=c(0.91, 1, 0.88, 0.93), new=TRUE) + mtext(side = 1, text = "%", line = 2, cex=1) + dev.off() + + + + + + + + + + # Bias freq. summary: + png(filename=paste0(work.dir,"/",forecast.name,"_summary_bias_frequency.png"), width=550, height=400) + + # create an array similar to array.diff.freq but with colors instead of frequencies: + diff.freq.cols <- rev(c('#d73027','#f46d43','#fdae61','#fee090','#e0f3f8','#abd9e9','#74add1','#4575b4')) + my.seq <- c(-20,-10,-5,-2,0,2,5,10,20) + + array.diff.freq.colors <- array(diff.freq.cols[8],c(12,7,4)) + array.diff.freq.colors[array.diff.freq < my.seq[[8]]] <- diff.freq.cols[7] + array.diff.freq.colors[array.diff.freq 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.diff.freq.colors[i2,1+6-j,1])}} + + par(mar=c(0,0,4,0), fig=c(0.22, 0.44, 0.8, 0.98), new=TRUE) + mtext("NAO-", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.22, 0.44, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.6, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.diff.freq.colors[i2,1+6-j,3])}} + + par(mar=c(0,0,4,0), fig=c(0.44, 0.66, 0.8, 0.98), new=TRUE) + mtext("Blocking", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.44, 0.66, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.6, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.diff.freq.colors[i2,1+6-j,4])}} + + par(mar=c(0,0,4,0), fig=c(0.68, 0.88, 0.8, 0.98), new=TRUE) + mtext("Atlantic Ridge", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.66, 0.88, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.6, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.diff.freq.colors[i2,1+6-j,2])}} + + par(fig=c(0.91, 1, 0.1, 0.8), new=TRUE) + ColorBar2(brks = my.seq, cols = diff.freq.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + par(fig=c(0.91, 1, 0.88, 0.93), new=TRUE) + mtext(side = 1, text = "%", line = 2, cex=1) + dev.off() + + + + + + + + + # Simulated persistence summary: + png(filename=paste0(work.dir,"/",forecast.name,"_summary_persistence.png"), width=550, height=400) + + # create an array similar to array.pers but with colors instead of frequencies: + pers.cols <- rev(c('#b2182b','#d6604d','#f4a582','#fddbc7','#d1e5f0','#92c5de','#4393c3','#2166ac')) + my.seq <- c(NA, 3.25, 3.5, 3.75, 4, 4.25, 4.5, 4.75, NA) + + array.pers.colors <- array(pers.cols[8],c(12,7,4)) + array.pers.colors[array.pers < my.seq[8]] <- pers.cols[7] + array.pers.colors[array.pers < my.seq[7]] <- pers.cols[6] + array.pers.colors[array.pers < my.seq[6]] <- pers.cols[5] + array.pers.colors[array.pers < my.seq[5]] <- pers.cols[4] + array.pers.colors[array.pers < my.seq[4]] <- pers.cols[3] + array.pers.colors[array.pers < my.seq[3]] <- pers.cols[2] + array.pers.colors[array.pers < my.seq[2]] <- pers.cols[1] + + par(mar=c(5,4,4,4.5)) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Forecast time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + title("ECMWF-S4 Regime persistence (in days/month)", cex=1.5) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + + for(p in 1:12){ + for(l in 0:6){ + polygon(c(0.5+p-1,0.5+p-1,1+p-1),c(-0.5+l,0.5+l,0+l), col=array.pers.colors[p,1+l,1]) # first triangle + polygon(c(0.5+p-1,1+p-1,1.5+p-1),c(-0.5+l,0+l,-0.5+l), col=array.pers.colors[p,1+l,2]) + polygon(c(1+p-1,1.5+p-1,1.5+p-1),c(l,0.5+l,-0.5+l), col=array.pers.colors[p,1+l,3]) + polygon(c(0.5+p-1,1+p-1,1.5+p-1),c(0.5+l,0+l,0.5+l), col=array.pers.colors[p,1+l,4]) + } + } + + par(fig=c(0.875, 1, 0.14, 0.89), new=TRUE) + ColorBar2(brks = my.seq, cols = pers.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_persistence_startdate.png"), width=750, height=600) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("ECMWF-S4 Regime persistence (in days/month)", cex=1.5) + + # NAO+ : + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.85, 0.98), new=TRUE) + mtext("NAO+", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.pers.colors[p,1+l,1])}} + + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.42, 0.5), new=TRUE) + mtext("Blocking", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.pers.colors[p,1+l,4])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.85, 0.98), new=TRUE) + mtext("NAO-", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.pers.colors[p,1+l,3])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.42, 0.5), new=TRUE) + mtext("Atlantic Ridge", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.pers.colors[p,1+l,2])}} + + par(fig=c(0.91, 1, 0.1, 0.9), new=TRUE) + ColorBar2(brks = my.seq, cols = pers.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_persistence_target_month.png"), width=800, height=300) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("ECMWF-S4 Regime persistence (in days/month)", cex=1.2) + + par(mar=c(0,0,4,0), fig=c(0.07, 0.22, 0.8, 0.98), new=TRUE) + mtext("NAO+", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0, 0.22, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.6, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.pers.colors[i2,1+6-j,1])}} + + par(mar=c(0,0,4,0), fig=c(0.22, 0.44, 0.8, 0.98), new=TRUE) + mtext("NAO-", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.22, 0.44, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.6, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.pers.colors[i2,1+6-j,3])}} + + par(mar=c(0,0,4,0), fig=c(0.44, 0.66, 0.8, 0.98), new=TRUE) + mtext("Blocking", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.44, 0.66, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.6, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.pers.colors[i2,1+6-j,4])}} + + par(mar=c(0,0,4,0), fig=c(0.68, 0.88, 0.8, 0.98), new=TRUE) + mtext("Atlantic Ridge", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.66, 0.88, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.6, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.pers.colors[i2,1+6-j,2])}} + + par(fig=c(0.91, 1, 0.1, 0.8), new=TRUE) + ColorBar2(brks = my.seq, cols = pers.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + par(fig=c(0.9, 1, 0.88, 0.93), new=TRUE) + mtext(side = 1, text = "days/month", line = 2, cex=1) + dev.off() + + + + + + + + # Observed persistence summary: + png(filename=paste0(work.dir,"/",forecast.name,"_summary_persistence_obs.png"), width=550, height=400) + + # create an array similar to array.pers but with colors instead of frequencies: + pers.cols <- rev(c('#b2182b','#d6604d','#f4a582','#fddbc7','#d1e5f0','#92c5de','#4393c3','#2166ac')) + my.seq <- c(NA, 3.25, 3.5, 3.75, 4, 4.25, 4.5, 4.75, NA) + + array.pers.colors <- array(pers.cols[8],c(12,7,4)) + array.pers.colors[array.pers.obs < my.seq[8]] <- pers.cols[7] + array.pers.colors[array.pers.obs < my.seq[7]] <- pers.cols[6] + array.pers.colors[array.pers.obs < my.seq[6]] <- pers.cols[5] + array.pers.colors[array.pers.obs < my.seq[5]] <- pers.cols[4] + array.pers.colors[array.pers.obs < my.seq[4]] <- pers.cols[3] + array.pers.colors[array.pers.obs < my.seq[3]] <- pers.cols[2] + array.pers.colors[array.pers.obs < my.seq[2]] <- pers.cols[1] + + par(mar=c(5,4,4,4.5)) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Forecast time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + title("Observed regime persistence (in days/month)", cex=1.5) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + + for(p in 1:12){ + for(l in 0:6){ + polygon(c(0.5+p-1,0.5+p-1,1+p-1),c(-0.5+l,0.5+l,0+l), col=array.pers.colors[p,1+l,1]) # first triangle + polygon(c(0.5+p-1,1+p-1,1.5+p-1),c(-0.5+l,0+l,-0.5+l), col=array.pers.colors[p,1+l,2]) + polygon(c(1+p-1,1.5+p-1,1.5+p-1),c(l,0.5+l,-0.5+l), col=array.pers.colors[p,1+l,3]) + polygon(c(0.5+p-1,1+p-1,1.5+p-1),c(0.5+l,0+l,0.5+l), col=array.pers.colors[p,1+l,4]) + } + } + + par(fig=c(0.875, 1, 0.14, 0.89), new=TRUE) + ColorBar2(brks = my.seq, cols = pers.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_persistence_obs_startdate.png"), width=750, height=600) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("Observed Regime persistence (in days/month)", cex=1.5) + + # NAO+ : + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.85, 0.98), new=TRUE) + mtext("NAO+", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.pers.colors[p,1+l,1])}} + + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.42, 0.5), new=TRUE) + mtext("Blocking", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.pers.colors[p,1+l,4])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.85, 0.98), new=TRUE) + mtext("NAO-", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.pers.colors[p,1+l,3])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.42, 0.5), new=TRUE) + mtext("Atlantic Ridge", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.pers.colors[p,1+l,2])}} + + par(fig=c(0.91, 1, 0.1, 0.9), new=TRUE) + ColorBar2(brks = my.seq, cols = pers.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_persistence_obs_target_month.png"), width=800, height=300) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("Observed regime persistence (in days/month)", cex=1.2) + + par(mar=c(0,0,4,0), fig=c(0.07, 0.22, 0.8, 0.98), new=TRUE) + mtext("NAO+", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0, 0.22, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.6, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.pers.colors[i2,1+6-j,1])}} + + par(mar=c(0,0,4,0), fig=c(0.22, 0.44, 0.8, 0.98), new=TRUE) + mtext("NAO-", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.22, 0.44, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.6, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.pers.colors[i2,1+6-j,3])}} + + par(mar=c(0,0,4,0), fig=c(0.44, 0.66, 0.8, 0.98), new=TRUE) + mtext("Blocking", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.44, 0.66, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.6, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.pers.colors[i2,1+6-j,4])}} + + par(mar=c(0,0,4,0), fig=c(0.68, 0.88, 0.8, 0.98), new=TRUE) + mtext("Atlantic Ridge", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.66, 0.88, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.6, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.pers.colors[i2,1+6-j,2])}} + + par(fig=c(0.91, 1, 0.1, 0.8), new=TRUE) + ColorBar2(brks = my.seq, cols = pers.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + par(fig=c(0.9, 1, 0.88, 0.93), new=TRUE) + mtext(side = 1, text = "days/month", line = 2, cex=1) + dev.off() + + + + + + + + + + + + + # Bias persistence summary: + png(filename=paste0(work.dir,"/",forecast.name,"_summary_bias_persistence.png"), width=550, height=400) + + # create an array similar to array.pers but with colors instead of frequencies: + diff.pers.cols <- rev(c('#b2182b','#d6604d','#f4a582','#fddbc7','#d1e5f0','#92c5de','#4393c3','#2166ac')) + my.seq <- c(NA, -1.5, -1, -0.5, 0, 0.5, 1, 1.5, NA) + + array.diff.pers.colors <- array(diff.pers.cols[8],c(12,7,4)) + array.diff.pers.colors[array.diff.pers < my.seq[8]] <- diff.pers.cols[7] + array.diff.pers.colors[array.diff.pers < my.seq[7]] <- diff.pers.cols[6] + array.diff.pers.colors[array.diff.pers < my.seq[6]] <- diff.pers.cols[5] + array.diff.pers.colors[array.diff.pers < my.seq[5]] <- diff.pers.cols[4] + array.diff.pers.colors[array.diff.pers < my.seq[4]] <- diff.pers.cols[3] + array.diff.pers.colors[array.diff.pers < my.seq[3]] <- diff.pers.cols[2] + array.diff.pers.colors[array.diff.pers < my.seq[2]] <- diff.pers.cols[1] + + par(mar=c(5,4,4,4.5)) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Forecast time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + title("Diff. between S4 and ERA-Interim regime persistence (in days/month)", cex=1.5) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + + for(p in 1:12){ + for(l in 0:6){ + polygon(c(0.5+p-1,0.5+p-1,1+p-1),c(-0.5+l,0.5+l,0+l), col=array.diff.pers.colors[p,1+l,1]) # first triangle + polygon(c(0.5+p-1,1+p-1,1.5+p-1),c(-0.5+l,0+l,-0.5+l), col=array.diff.pers.colors[p,1+l,2]) + polygon(c(1+p-1,1.5+p-1,1.5+p-1),c(l,0.5+l,-0.5+l), col=array.diff.pers.colors[p,1+l,3]) + polygon(c(0.5+p-1,1+p-1,1.5+p-1),c(0.5+l,0+l,0.5+l), col=array.diff.pers.colors[p,1+l,4]) + } + } + + par(fig=c(0.875, 1, 0.14, 0.89), new=TRUE) + ColorBar2(brks = my.seq, cols = diff.pers.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_bias_persistence_startdate.png"), width=750, height=600) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("Diff. between S4 and ERA-Interim regime persistence (in days/month)", cex=1.5) + + # NAO+ : + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.85, 0.98), new=TRUE) + mtext("NAO+", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.diff.pers.colors[p,1+l,1])}} + + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.42, 0.5), new=TRUE) + mtext("Blocking", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.diff.pers.colors[p,1+l,4])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.85, 0.98), new=TRUE) + mtext("NAO-", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.diff.pers.colors[p,1+l,3])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.42, 0.5), new=TRUE) + mtext("Atlantic Ridge", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.diff.pers.colors[p,1+l,2])}} + + par(fig=c(0.91, 1, 0.1, 0.9), new=TRUE) + ColorBar2(brks = my.seq, cols = diff.pers.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_bias_persistence_target_month.png"), width=800, height=300) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("Diff. between S4 and ERA-Interim regime persistence (in days/month)", cex=1.2) + + par(mar=c(0,0,4,0), fig=c(0.07, 0.22, 0.8, 0.98), new=TRUE) + mtext("NAO+", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0, 0.22, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.6, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.diff.pers.colors[i2,1+6-j,1])}} + + par(mar=c(0,0,4,0), fig=c(0.22, 0.44, 0.8, 0.98), new=TRUE) + mtext("NAO-", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.22, 0.44, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.6, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.diff.pers.colors[i2,1+6-j,3])}} + + par(mar=c(0,0,4,0), fig=c(0.44, 0.66, 0.8, 0.98), new=TRUE) + mtext("Blocking", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.44, 0.66, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.6, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.diff.pers.colors[i2,1+6-j,4])}} + + par(mar=c(0,0,4,0), fig=c(0.68, 0.88, 0.8, 0.98), new=TRUE) + mtext("Atlantic Ridge", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.66, 0.88, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.6, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.diff.pers.colors[i2,1+6-j,2])}} + + par(fig=c(0.91, 1, 0.1, 0.8), new=TRUE) + ColorBar2(brks = my.seq, cols = diff.pers.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + par(fig=c(0.9, 1, 0.88, 0.93), new=TRUE) + mtext(side = 1, text = "days/month", line = 2, cex=1) + dev.off() + + + + + + + + + + # St.Dev.freq ratio summary: + png(filename=paste0(work.dir,"/",forecast.name,"_summary_ratio_sd_freq.png"), width=550, height=400) + + # create an array similar to array.cor but with colors instead of corr.values: + diff.sd.freq.cols <- rev(c('#b2182b','#d6604d','#f4a582','#fddbc7','#d1e5f0','#92c5de','#4393c3','#2166ac')) + my.seq <- c(0,0.7,0.8,0.9,1.0,1.1,1.2,1.3,2) + + array.diff.sd.freq.colors <- array(diff.sd.freq.cols[8],c(12,7,4)) + array.diff.sd.freq.colors[array.diff.sd.freq < my.seq[8]] <- diff.sd.freq.cols[7] + array.diff.sd.freq.colors[array.diff.sd.freq < my.seq[7]] <- diff.sd.freq.cols[6] + array.diff.sd.freq.colors[array.diff.sd.freq < my.seq[6]] <- diff.sd.freq.cols[5] + array.diff.sd.freq.colors[array.diff.sd.freq < my.seq[5]] <- diff.sd.freq.cols[4] + array.diff.sd.freq.colors[array.diff.sd.freq < my.seq[4]] <- diff.sd.freq.cols[3] + array.diff.sd.freq.colors[array.diff.sd.freq < my.seq[3]] <- diff.sd.freq.cols[2] + array.diff.sd.freq.colors[array.diff.sd.freq < my.seq[2]] <- diff.sd.freq.cols[1] + + par(mar=c(5,4,4,4.5)) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Forecast time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + title("St.Dev. ratio between sim. and obs. average frequencies", cex=1.5) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + + for(p in 1:12){ + for(l in 0:6){ + polygon(c(0.5+p-1, 0.5+p-1, 1+p-1), c(-0.5+l, 0.5+l, 0+l), col=array.diff.sd.freq.colors[p,1+l,1]) # first triangle + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(-0.5+l, 0+l, -0.5+l), col=array.diff.sd.freq.colors[p,1+l,2]) + polygon(c(1+p-1, 1.5+p-1, 1.5+p-1), c(l, 0.5+l, -0.5+l), col=array.diff.sd.freq.colors[p,1+l,3]) + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(0.5+l, 0+l, 0.5+l), col=array.diff.sd.freq.colors[p,1+l,4]) + } + } + + par(fig=c(0.875, 1, 0.14, 0.89), new=TRUE) + ColorBar2(brks = my.seq, cols = diff.sd.freq.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + dev.off() + + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_ratio_sd_frequency_startdate.png"), width=750, height=600) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("St.Dev. ratio between sim. and obs. average frequencies", cex=1.5) + + # NAO+ : + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.85, 0.98), new=TRUE) + mtext("NAO+", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.diff.sd.freq.colors[p,1+l,1])}} + + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.42, 0.5), new=TRUE) + mtext("Blocking", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.diff.sd.freq.colors[p,1+l,4])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.85, 0.98), new=TRUE) + mtext("NAO-", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.diff.sd.freq.colors[p,1+l,3])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.42, 0.5), new=TRUE) + mtext("Atlantic Ridge", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.diff.sd.freq.colors[p,1+l,2])}} + + par(fig=c(0.91, 1, 0.1, 0.9), new=TRUE) + ColorBar2(brks = my.seq, cols = diff.sd.freq.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_ratio_sd_frequency_target_month.png"), width=800, height=300) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("St.Dev. ratio between sim. and obs. average frequencies", cex=1.2) + + par(mar=c(0,0,4,0), fig=c(0.07, 0.22, 0.8, 0.98), new=TRUE) + mtext("NAO+", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0, 0.22, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.6, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.diff.sd.freq.colors[i2,1+6-j,1])}} + + par(mar=c(0,0,4,0), fig=c(0.22, 0.44, 0.8, 0.98), new=TRUE) + mtext("NAO-", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.22, 0.44, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.6, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.diff.sd.freq.colors[i2,1+6-j,3])}} + + par(mar=c(0,0,4,0), fig=c(0.44, 0.66, 0.8, 0.98), new=TRUE) + mtext("Blocking", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.44, 0.66, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.6, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.diff.sd.freq.colors[i2,1+6-j,4])}} + + par(mar=c(0,0,4,0), fig=c(0.68, 0.88, 0.8, 0.98), new=TRUE) + mtext("Atlantic Ridge", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.66, 0.88, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.6, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.diff.sd.freq.colors[i2,1+6-j,2])}} + + par(fig=c(0.91, 1, 0.1, 0.8), new=TRUE) + ColorBar2(brks = my.seq, cols = diff.sd.freq.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + + + + + + + + + + + + + # Impact summary: + png(filename=paste0(work.dir,"/",forecast.name,"_summary_impact_",var.name[var.num],".png"), width=550, height=400) + + # create an array similar to array.pers but with colors instead of frequencies: + imp.cols <- rev(c('#b2182b','#d6604d','#f4a582','#fddbc7','#d1e5f0','#92c5de','#4393c3','#2166ac')) + my.seq <- c(-1,0,0.4,0.5,0.6,0.7,0.8,0.9,1) + + array.imp.colors <- array(imp.cols[8],c(12,7,4)) + array.imp.colors[array.imp < my.seq[8]] <- imp.cols[7] + array.imp.colors[array.imp < my.seq[7]] <- imp.cols[6] + array.imp.colors[array.imp < my.seq[6]] <- imp.cols[5] + array.imp.colors[array.imp < my.seq[5]] <- imp.cols[4] + array.imp.colors[array.imp < my.seq[4]] <- imp.cols[3] + array.imp.colors[array.imp < my.seq[3]] <- imp.cols[2] + array.imp.colors[array.imp < my.seq[2]] <- imp.cols[1] + + par(mar=c(5,4,4,4.5)) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Forecast time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + title(paste0("S4 spatial correlation of impact on ",var.name[var.num]), cex=1.5) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + + for(p in 1:12){ + for(l in 0:6){ + polygon(c(0.5+p-1,0.5+p-1,1+p-1),c(-0.5+l,0.5+l,0+l), col=array.imp.colors[p,1+l,1]) # first triangle + polygon(c(0.5+p-1,1+p-1,1.5+p-1),c(-0.5+l,0+l,-0.5+l), col=array.imp.colors[p,1+l,2]) + polygon(c(1+p-1,1.5+p-1,1.5+p-1),c(l,0.5+l,-0.5+l), col=array.imp.colors[p,1+l,3]) + polygon(c(0.5+p-1,1+p-1,1.5+p-1),c(0.5+l,0+l,0.5+l), col=array.imp.colors[p,1+l,4]) + } + } + + par(fig=c(0.875, 1, 0.14, 0.89), new=TRUE) + ColorBar2(brks = my.seq, cols = imp.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_impact_",var.name[var.num],"_startdate.png"), width=750, height=600) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("S4 spatial correlation of impact on ",var.name[var.num], cex=1.5) + + # NAO+ : + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.85, 0.98), new=TRUE) + mtext("NAO+", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.imp.colors[p,1+l,1])}} + + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.42, 0.5), new=TRUE) + mtext("Blocking", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.imp.colors[p,1+l,4])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.85, 0.98), new=TRUE) + mtext("NAO-", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.imp.colors[p,1+l,3])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.42, 0.5), new=TRUE) + mtext("Atlantic Ridge", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.imp.colors[p,1+l,2])}} + + par(fig=c(0.91, 1, 0.1, 0.9), new=TRUE) + ColorBar2(brks = my.seq, cols = imp.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_impact_",var.name[var.num],"_target_month.png"), width=800, height=300) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("S4 spatial correlation of impact on ",var.name[var.num], cex=1.2) + + par(mar=c(0,0,4,0), fig=c(0.07, 0.22, 0.8, 0.98), new=TRUE) + mtext("NAO+", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0, 0.22, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.6, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.imp.colors[i2,1+6-j,1])}} + + par(mar=c(0,0,4,0), fig=c(0.22, 0.44, 0.8, 0.98), new=TRUE) + mtext("NAO-", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.22, 0.44, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.6, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.imp.colors[i2,1+6-j,3])}} + + par(mar=c(0,0,4,0), fig=c(0.44, 0.66, 0.8, 0.98), new=TRUE) + mtext("Blocking", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.44, 0.66, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.6, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.imp.colors[i2,1+6-j,4])}} + + par(mar=c(0,0,4,0), fig=c(0.68, 0.88, 0.8, 0.98), new=TRUE) + mtext("Atlantic Ridge", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.66, 0.88, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.6, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.imp.colors[i2,1+6-j,2])}} + + par(fig=c(0.91, 1, 0.1, 0.8), new=TRUE) + ColorBar2(brks = my.seq, cols = imp.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + + + + + + + # Simulated Transition probability from NAO+ to X summary: + png(filename=paste0(work.dir,"/",forecast.name,"_summary_transition_prob_NAO+.png"), width=550, height=400) + + # create an array similar to array.cor but with colors instead of corr.values: + trans.cols <- rev(c('#b2182b','#d6604d','#f4a582','#fddbc7','#d1e5f0','#92c5de','#4393c3','#2166ac')) + my.seq <- c(0,10,20,30,40,50,60,70,100) + + array.trans.sim1.colors <- array(trans.cols[8],c(12,7,4)) + array.trans.sim1.colors[array.trans.sim1 < my.seq[8]] <- trans.cols[7] + array.trans.sim1.colors[array.trans.sim1 < my.seq[7]] <- trans.cols[6] + array.trans.sim1.colors[array.trans.sim1 < my.seq[6]] <- trans.cols[5] + array.trans.sim1.colors[array.trans.sim1 < my.seq[5]] <- trans.cols[4] + array.trans.sim1.colors[array.trans.sim1 < my.seq[4]] <- trans.cols[3] + array.trans.sim1.colors[array.trans.sim1 < my.seq[3]] <- trans.cols[2] + array.trans.sim1.colors[array.trans.sim1 < my.seq[2]] <- trans.cols[1] + array.trans.sim1.colors[is.na(array.trans.sim1)] <- "white" + + par(mar=c(5,4,4,4.5)) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Forecast time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + title("% ECMWF-S4 transition from NAO+ regime", cex=1.5) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + + for(p in 1:12){ + for(l in 0:6){ + polygon(c(0.5+p-1, 0.5+p-1, 1+p-1), c(-0.5+l, 0.5+l, 0+l), col=array.trans.sim1.colors[p,1+l,1]) # first triangle + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(-0.5+l, 0+l, -0.5+l), col=array.trans.sim1.colors[p,1+l,2]) + polygon(c(1+p-1, 1.5+p-1, 1.5+p-1), c(l, 0.5+l, -0.5+l), col=array.trans.sim1.colors[p,1+l,3]) + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(0.5+l, 0+l, 0.5+l), col=array.trans.sim1.colors[p,1+l,4]) + } + } + + par(fig=c(0.875, 1, 0.14, 0.89), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_transition_prob_NAO+_startdate.png"), width=750, height=600) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("% Transition from NAO+ regime", cex=1.5) + mtext("ECMWF-S4 / NAO+ Transition Probability \nJanuary to December / 1994-2013", cex=1.5) + + # NAO+ : + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.85, 0.98), new=TRUE) + mtext("NAO+", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.sim1.colors[p,1+l,1])}} + + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.42, 0.5), new=TRUE) + mtext("Blocking", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.sim1.colors[p,1+l,4])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.85, 0.98), new=TRUE) + mtext("NAO-", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.sim1.colors[p,1+l,3])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.42, 0.5), new=TRUE) + mtext("Atlantic Ridge", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.sim1.colors[p,1+l,2])}} + + par(fig=c(0.91, 1, 0.1, 0.9), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_transition_prob_NAO+_target_month.png"), width=750, height=350) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 0.95), new=TRUE) + mtext("% Transition from NAO+ regime", cex=1.2) + + par(mar=c(0,0,4,0), fig=c(0.10, 0.28, 0.8, 0.95), new=TRUE) + mtext("NAO-", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0, 0.28, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.8, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.sim1.colors[i2,1+6-j,3])}} + + par(mar=c(0,0,4,0), fig=c(0.30, 0.58, 0.8, 0.95), new=TRUE) + mtext("Blocking", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.30, 0.58, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.8, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.sim1.colors[i2,1+6-j,4])}} + + par(mar=c(0,0,4,0), fig=c(0.60, 0.88, 0.8, 0.95), new=TRUE) + mtext("Atlantic Ridge", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.60, 0.88, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.8, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.sim1.colors[i2,1+6-j,2])}} + + par(fig=c(0.91, 1, 0.1, 0.8), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + par(fig=c(0.91, 1, 0.88, 0.93), new=TRUE) + mtext(side = 1, text = "%", line = 2, cex=1) + + dev.off() + + + + + + + # Observed Transition probability from NAO+ to X summary: + png(filename=paste0(work.dir,"/",forecast.name,"_summary_transition_prob_NAO+_obs.png"), width=550, height=400) + + # create an array similar to array.cor but with colors instead of corr.values: + trans.cols <- rev(c('#b2182b','#d6604d','#f4a582','#fddbc7','#d1e5f0','#92c5de','#4393c3','#2166ac')) + my.seq <- c(0,10,20,30,40,50,60,70,100) + + array.trans.obs1.colors <- array(trans.cols[8],c(12,7,4)) + array.trans.obs1.colors[array.trans.obs1 < my.seq[8]] <- trans.cols[7] + array.trans.obs1.colors[array.trans.obs1 < my.seq[7]] <- trans.cols[6] + array.trans.obs1.colors[array.trans.obs1 < my.seq[6]] <- trans.cols[5] + array.trans.obs1.colors[array.trans.obs1 < my.seq[5]] <- trans.cols[4] + array.trans.obs1.colors[array.trans.obs1 < my.seq[4]] <- trans.cols[3] + array.trans.obs1.colors[array.trans.obs1 < my.seq[3]] <- trans.cols[2] + array.trans.obs1.colors[array.trans.obs1 < my.seq[2]] <- trans.cols[1] + array.trans.obs1.colors[is.na(array.trans.obs1)] <- "white" + + par(mar=c(5,4,4,4.5)) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Forecast time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + title("% Observed transition from NAO+ regime", cex=1.5) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + + for(p in 1:12){ + for(l in 0:6){ + polygon(c(0.5+p-1, 0.5+p-1, 1+p-1), c(-0.5+l, 0.5+l, 0+l), col=array.trans.obs1.colors[p,1+l,1]) # first triangle + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(-0.5+l, 0+l, -0.5+l), col=array.trans.obs1.colors[p,1+l,2]) + polygon(c(1+p-1, 1.5+p-1, 1.5+p-1), c(l, 0.5+l, -0.5+l), col=array.trans.obs1.colors[p,1+l,3]) + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(0.5+l, 0+l, 0.5+l), col=array.trans.obs1.colors[p,1+l,4]) + } + } + + par(fig=c(0.875, 1, 0.14, 0.89), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_transition_prob_NAO+_obs_startdate.png"), width=750, height=600) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("% Observed transition from NAO+ regime", cex=1.5) + + # NAO+ : + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.85, 0.98), new=TRUE) + mtext("NAO+", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.obs1.colors[p,1+l,1])}} + + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.42, 0.5), new=TRUE) + mtext("Blocking", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.obs1.colors[p,1+l,4])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.85, 0.98), new=TRUE) + mtext("NAO-", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.obs1.colors[p,1+l,3])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.42, 0.5), new=TRUE) + mtext("Atlantic Ridge", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.obs1.colors[p,1+l,2])}} + + par(fig=c(0.91, 1, 0.1, 0.9), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_transition_prob_NAO+_obs_target_month.png"), width=750, height=350) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 0.95), new=TRUE) + mtext("% Observed transition from NAO+ regime", cex=1.2) + + par(mar=c(0,0,4,0), fig=c(0.10, 0.28, 0.8, 0.95), new=TRUE) + mtext("NAO-", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0, 0.28, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.8, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.obs1.colors[i2,1+6-j,3])}} + + par(mar=c(0,0,4,0), fig=c(0.30, 0.58, 0.8, 0.95), new=TRUE) + mtext("Blocking", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.30, 0.58, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.8, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.obs1.colors[i2,1+6-j,4])}} + + par(mar=c(0,0,4,0), fig=c(0.60, 0.88, 0.8, 0.95), new=TRUE) + mtext("Atlantic Ridge", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.60, 0.88, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.8, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.obs1.colors[i2,1+6-j,2])}} + + par(fig=c(0.91, 1, 0.1, 0.8), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + par(fig=c(0.91, 1, 0.88, 0.93), new=TRUE) + mtext(side = 1, text = "%", line = 2, cex=1) + + dev.off() + + + + + + + + + + + + # Simulated transition probability from NAO- to X summary: + png(filename=paste0(work.dir,"/",forecast.name,"_summary_transition_prob_NAO-.png"), width=550, height=400) + + # create an array similar to array.cor but with colors instead of corr.values: + trans.cols <- rev(c('#b2182b','#d6604d','#f4a582','#fddbc7','#d1e5f0','#92c5de','#4393c3','#2166ac')) + my.seq <- c(0,10,20,30,40,50,60,70,100) + + array.trans.sim2.colors <- array(trans.cols[8],c(12,7,4)) + array.trans.sim2.colors[array.trans.sim2 < my.seq[8]] <- trans.cols[7] + array.trans.sim2.colors[array.trans.sim2 < my.seq[7]] <- trans.cols[6] + array.trans.sim2.colors[array.trans.sim2 < my.seq[6]] <- trans.cols[5] + array.trans.sim2.colors[array.trans.sim2 < my.seq[5]] <- trans.cols[4] + array.trans.sim2.colors[array.trans.sim2 < my.seq[4]] <- trans.cols[3] + array.trans.sim2.colors[array.trans.sim2 < my.seq[3]] <- trans.cols[2] + array.trans.sim2.colors[array.trans.sim2 < my.seq[2]] <- trans.cols[1] + array.trans.sim2.colors[is.na(array.trans.sim2)] <- "white" + + par(mar=c(5,4,4,4.5)) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Forecast time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + title("% ECMWF-S4 transition from NAO- regime ", cex=1.5) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + + for(p in 1:12){ + for(l in 0:6){ + polygon(c(0.5+p-1, 0.5+p-1, 1+p-1), c(-0.5+l, 0.5+l, 0+l), col=array.trans.sim2.colors[p,1+l,1]) # first triangle + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(-0.5+l, 0+l, -0.5+l), col=array.trans.sim2.colors[p,1+l,2]) + polygon(c(1+p-1, 1.5+p-1, 1.5+p-1), c(l, 0.5+l, -0.5+l), col=array.trans.sim2.colors[p,1+l,3]) + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(0.5+l, 0+l, 0.5+l), col=array.trans.sim2.colors[p,1+l,4]) + } + } + + par(fig=c(0.875, 1, 0.14, 0.89), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_transition_prob_NAO-_startdate.png"), width=750, height=600) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("% ECMWF-S4 transition from NAO- regime", cex=1.5) + + # NAO+ : + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.85, 0.98), new=TRUE) + mtext("NAO+", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.sim2.colors[p,1+l,1])}} + + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.42, 0.5), new=TRUE) + mtext("Blocking", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.sim2.colors[p,1+l,4])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.85, 0.98), new=TRUE) + mtext("NAO-", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.sim2.colors[p,1+l,3])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.42, 0.5), new=TRUE) + mtext("Atlantic Ridge", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.sim2.colors[p,1+l,2])}} + + par(fig=c(0.91, 1, 0.1, 0.9), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_transition_prob_NAO-_target_month.png"), width=750, height=350) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 0.95), new=TRUE) + mtext("% ECMWF-S4 transition from NAO- regime", cex=1.2) + + par(mar=c(0,0,4,0), fig=c(0.1, 0.28, 0.8, 0.95), new=TRUE) + mtext("NAO+", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0, 0.28, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.8, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.sim2.colors[i2,1+6-j,1])}} + + par(mar=c(0,0,4,0), fig=c(0.30, 0.58, 0.8, 0.95), new=TRUE) + mtext("Blocking", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.30, 0.58, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.8, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.sim2.colors[i2,1+6-j,4])}} + + par(mar=c(0,0,4,0), fig=c(0.60, 0.88, 0.8, 0.95), new=TRUE) + mtext("Atlantic Ridge", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.60, 0.88, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.8, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.sim2.colors[i2,1+6-j,2])}} + + par(fig=c(0.91, 1, 0.1, 0.8), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + par(fig=c(0.91, 1, 0.88, 0.93), new=TRUE) + mtext(side = 1, text = "%", line = 2, cex=1) + + dev.off() + + + + + + + # Observed transition probability from NAO- to X summary: + png(filename=paste0(work.dir,"/",forecast.name,"_summary_transition_prob_NAO-_obs.png"), width=550, height=400) + + # create an array similar to array.cor but with colors instead of corr.values: + trans.cols <- rev(c('#b2182b','#d6604d','#f4a582','#fddbc7','#d1e5f0','#92c5de','#4393c3','#2166ac')) + my.seq <- c(0,10,20,30,40,50,60,70,100) + + array.trans.obs2.colors <- array(trans.cols[8],c(12,7,4)) + array.trans.obs2.colors[array.trans.obs2 < my.seq[8]] <- trans.cols[7] + array.trans.obs2.colors[array.trans.obs2 < my.seq[7]] <- trans.cols[6] + array.trans.obs2.colors[array.trans.obs2 < my.seq[6]] <- trans.cols[5] + array.trans.obs2.colors[array.trans.obs2 < my.seq[5]] <- trans.cols[4] + array.trans.obs2.colors[array.trans.obs2 < my.seq[4]] <- trans.cols[3] + array.trans.obs2.colors[array.trans.obs2 < my.seq[3]] <- trans.cols[2] + array.trans.obs2.colors[array.trans.obs2 < my.seq[2]] <- trans.cols[1] + array.trans.obs2.colors[is.na(array.trans.obs2)] <- "white" + + par(mar=c(5,4,4,4.5)) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Forecast time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + title("% Observed transition from NAO- regime ", cex=1.5) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + + for(p in 1:12){ + for(l in 0:6){ + polygon(c(0.5+p-1, 0.5+p-1, 1+p-1), c(-0.5+l, 0.5+l, 0+l), col=array.trans.obs2.colors[p,1+l,1]) # first triangle + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(-0.5+l, 0+l, -0.5+l), col=array.trans.obs2.colors[p,1+l,2]) + polygon(c(1+p-1, 1.5+p-1, 1.5+p-1), c(l, 0.5+l, -0.5+l), col=array.trans.obs2.colors[p,1+l,3]) + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(0.5+l, 0+l, 0.5+l), col=array.trans.obs2.colors[p,1+l,4]) + } + } + + par(fig=c(0.875, 1, 0.14, 0.89), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_transition_prob_NAO-_obs_startdate.png"), width=750, height=600) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("% Observed transition from NAO- regime", cex=1.5) + + # NAO+ : + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.85, 0.98), new=TRUE) + mtext("NAO+", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.obs2.colors[p,1+l,1])}} + + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.42, 0.5), new=TRUE) + mtext("Blocking", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.obs2.colors[p,1+l,4])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.85, 0.98), new=TRUE) + mtext("NAO-", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.obs2.colors[p,1+l,3])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.42, 0.5), new=TRUE) + mtext("Atlantic Ridge", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.obs2.colors[p,1+l,2])}} + + par(fig=c(0.91, 1, 0.1, 0.9), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_transition_prob_NAO-_obs_target_month.png"), width=750, height=350) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 0.95), new=TRUE) + mtext("% Observed transition from NAO- regime", cex=1.2) + + par(mar=c(0,0,4,0), fig=c(0.07, 0.28, 0.8, 0.95), new=TRUE) + mtext("NAO+", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0, 0.28, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.8, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.obs2.colors[i2,1+6-j,1])}} + + par(mar=c(0,0,4,0), fig=c(0.30, 0.58, 0.8, 0.95), new=TRUE) + mtext("Blocking", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.30, 0.58, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.8, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.obs2.colors[i2,1+6-j,4])}} + + par(mar=c(0,0,4,0), fig=c(0.58, 0.88, 0.8, 0.95), new=TRUE) + mtext("Atlantic Ridge", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.58, 0.88, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.8, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.obs2.colors[i2,1+6-j,2])}} + + par(fig=c(0.91, 1, 0.1, 0.8), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + par(fig=c(0.91, 1, 0.88, 0.93), new=TRUE) + mtext(side = 1, text = "%", line = 2, cex=1) + + dev.off() + + + + + + + + + + + # Simulated transition probability from blocking to X summary: + png(filename=paste0(work.dir,"/",forecast.name,"_summary_transition_prob_blocking.png"), width=550, height=400) + + # create an array similar to array.cor but with colors instead of corr.values: + trans.cols <- rev(c('#b2182b','#d6604d','#f4a582','#fddbc7','#d1e5f0','#92c5de','#4393c3','#2166ac')) + my.seq <- c(0,10,20,30,40,50,60,70,100) + + array.trans.sim3.colors <- array(trans.cols[8],c(12,7,4)) + array.trans.sim3.colors[array.trans.sim3 < my.seq[8]] <- trans.cols[7] + array.trans.sim3.colors[array.trans.sim3 < my.seq[7]] <- trans.cols[6] + array.trans.sim3.colors[array.trans.sim3 < my.seq[6]] <- trans.cols[5] + array.trans.sim3.colors[array.trans.sim3 < my.seq[5]] <- trans.cols[4] + array.trans.sim3.colors[array.trans.sim3 < my.seq[4]] <- trans.cols[3] + array.trans.sim3.colors[array.trans.sim3 < my.seq[3]] <- trans.cols[2] + array.trans.sim3.colors[array.trans.sim3 < my.seq[2]] <- trans.cols[1] + array.trans.sim3.colors[is.na(array.trans.sim3)] <- "white" + + par(mar=c(5,4,4,4.5)) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Forecast time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + title("% ECMWF-S4 transition from blocking regime", cex=1.5) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + + for(p in 1:12){ + for(l in 0:6){ + polygon(c(0.5+p-1, 0.5+p-1, 1+p-1), c(-0.5+l, 0.5+l, 0+l), col=array.trans.sim3.colors[p,1+l,1]) # first triangle + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(-0.5+l, 0+l, -0.5+l), col=array.trans.sim3.colors[p,1+l,2]) + polygon(c(1+p-1, 1.5+p-1, 1.5+p-1), c(l, 0.5+l, -0.5+l), col=array.trans.sim3.colors[p,1+l,3]) + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(0.5+l, 0+l, 0.5+l), col=array.trans.sim3.colors[p,1+l,4]) + } + } + + par(fig=c(0.875, 1, 0.14, 0.89), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_transition_prob_blocking_startdate.png"), width=750, height=600) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("% ECMWF-S4 transition from blocking regime", cex=1.5) + + # NAO+ : + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.85, 0.98), new=TRUE) + mtext("NAO+", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.sim3.colors[p,1+l,1])}} + + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.42, 0.5), new=TRUE) + mtext("Blocking", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.sim3.colors[p,1+l,4])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.85, 0.98), new=TRUE) + mtext("NAO-", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.sim3.colors[p,1+l,3])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.42, 0.5), new=TRUE) + mtext("Atlantic Ridge", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.sim3.colors[p,1+l,2])}} + + par(fig=c(0.91, 1, 0.1, 0.9), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_transition_prob_blocking_target_month.png"), width=750, height=350) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 0.95), new=TRUE) + mtext("% ECMWF-S4 transition from blocking regime", cex=1.2) + + par(mar=c(0,0,4,0), fig=c(0.10, 0.28, 0.8, 0.95), new=TRUE) + mtext("NAO+", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0, 0.28, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.8, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.sim3.colors[i2,1+6-j,1])}} + + par(mar=c(0,0,4,0), fig=c(0.30, 0.58, 0.8, 0.95), new=TRUE) + mtext("NAO-", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.30, 0.58, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.8, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.sim3.colors[i2,1+6-j,3])}} + + par(mar=c(0,0,4,0), fig=c(0.60, 0.88, 0.8, 0.95), new=TRUE) + mtext("Atlantic Ridge", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.60, 0.88, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.8, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.sim3.colors[i2,1+6-j,2])}} + + par(fig=c(0.91, 1, 0.1, 0.8), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + par(fig=c(0.91, 1, 0.88, 0.93), new=TRUE) + mtext(side = 1, text = "%", line = 2, cex=1) + + dev.off() + + + + + + + + + # Observed transition probability from blocking to X summary: + png(filename=paste0(work.dir,"/",forecast.name,"_summary_transition_prob_blocking_obs.png"), width=550, height=400) + + # create an array similar to array.cor but with colors instead of corr.values: + trans.cols <- rev(c('#b2182b','#d6604d','#f4a582','#fddbc7','#d1e5f0','#92c5de','#4393c3','#2166ac')) + my.seq <- c(0,10,20,30,40,50,60,70,100) + + array.trans.obs3.colors <- array(trans.cols[8],c(12,7,4)) + array.trans.obs3.colors[array.trans.obs3 < my.seq[8]] <- trans.cols[7] + array.trans.obs3.colors[array.trans.obs3 < my.seq[7]] <- trans.cols[6] + array.trans.obs3.colors[array.trans.obs3 < my.seq[6]] <- trans.cols[5] + array.trans.obs3.colors[array.trans.obs3 < my.seq[5]] <- trans.cols[4] + array.trans.obs3.colors[array.trans.obs3 < my.seq[4]] <- trans.cols[3] + array.trans.obs3.colors[array.trans.obs3 < my.seq[3]] <- trans.cols[2] + array.trans.obs3.colors[array.trans.obs3 < my.seq[2]] <- trans.cols[1] + array.trans.obs3.colors[is.na(array.trans.obs3)] <- "white" + + par(mar=c(5,4,4,4.5)) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Forecast time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + title("% Observed transition from blocking regime", cex=1.5) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + + for(p in 1:12){ + for(l in 0:6){ + polygon(c(0.5+p-1, 0.5+p-1, 1+p-1), c(-0.5+l, 0.5+l, 0+l), col=array.trans.obs3.colors[p,1+l,1]) # first triangle + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(-0.5+l, 0+l, -0.5+l), col=array.trans.obs3.colors[p,1+l,2]) + polygon(c(1+p-1, 1.5+p-1, 1.5+p-1), c(l, 0.5+l, -0.5+l), col=array.trans.obs3.colors[p,1+l,3]) + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(0.5+l, 0+l, 0.5+l), col=array.trans.obs3.colors[p,1+l,4]) + } + } + + par(fig=c(0.875, 1, 0.14, 0.89), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_transition_prob_blocking_obs_startdate.png"), width=750, height=600) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("% Observed transition from blocking regime", cex=1.5) + + # NAO+ : + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.85, 0.98), new=TRUE) + mtext("NAO+", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.obs3.colors[p,1+l,1])}} + + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.42, 0.5), new=TRUE) + mtext("Blocking", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.obs3.colors[p,1+l,4])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.85, 0.98), new=TRUE) + mtext("NAO-", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.obs3.colors[p,1+l,3])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.42, 0.5), new=TRUE) + mtext("Atlantic Ridge", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.obs3.colors[p,1+l,2])}} + + par(fig=c(0.91, 1, 0.1, 0.9), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_transition_prob_blocking_obs_target_month.png"), width=750, height=350) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 0.95), new=TRUE) + mtext("% Observed transition from blocking regime", cex=1.2) + + par(mar=c(0,0,4,0), fig=c(0.10, 0.28, 0.8, 0.95), new=TRUE) + mtext("NAO+", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0, 0.28, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.8, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.obs3.colors[i2,1+6-j,1])}} + + par(mar=c(0,0,4,0), fig=c(0.30, 0.58, 0.8, 0.95), new=TRUE) + mtext("NAO-", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.30, 0.58, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.8, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.obs3.colors[i2,1+6-j,3])}} + + par(mar=c(0,0,4,0), fig=c(0.60, 0.88, 0.8, 0.95), new=TRUE) + mtext("Atlantic Ridge", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.60, 0.88, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.8, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.obs3.colors[i2,1+6-j,2])}} + + par(fig=c(0.91, 1, 0.1, 0.8), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + par(fig=c(0.91, 1, 0.88, 0.93), new=TRUE) + mtext(side = 1, text = "%", line = 2, cex=1) + + dev.off() + + + + + + + + + + + + + + + + + + # Simulated transition probability from Atlantic ridge to X summary: + png(filename=paste0(work.dir,"/",forecast.name,"_summary_transition_prob_Atlantic_ridge.png"), width=550, height=400) + + # create an array similar to array.cor but with colors instead of corr.values: + trans.cols <- rev(c('#b2182b','#d6604d','#f4a582','#fddbc7','#d1e5f0','#92c5de','#4393c3','#2166ac')) + my.seq <- c(0,10,20,30,40,50,60,70,100) + + array.trans.sim4.colors <- array(trans.cols[8],c(12,7,4)) + array.trans.sim4.colors[array.trans.sim4 < my.seq[8]] <- trans.cols[7] + array.trans.sim4.colors[array.trans.sim4 < my.seq[7]] <- trans.cols[6] + array.trans.sim4.colors[array.trans.sim4 < my.seq[6]] <- trans.cols[5] + array.trans.sim4.colors[array.trans.sim4 < my.seq[5]] <- trans.cols[4] + array.trans.sim4.colors[array.trans.sim4 < my.seq[4]] <- trans.cols[3] + array.trans.sim4.colors[array.trans.sim4 < my.seq[3]] <- trans.cols[2] + array.trans.sim4.colors[array.trans.sim4 < my.seq[2]] <- trans.cols[1] + array.trans.sim4.colors[is.na(array.trans.sim4)] <- "white" + + par(mar=c(5,4,4,4.5)) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Forecast time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + title("% ECMWF-S4 transition from Atlantic ridge regime ", cex=1.5) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + + for(p in 1:12){ + for(l in 0:6){ + polygon(c(0.5+p-1, 0.5+p-1, 1+p-1), c(-0.5+l, 0.5+l, 0+l), col=array.trans.sim4.colors[p,1+l,1]) # first triangle + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(-0.5+l, 0+l, -0.5+l), col=array.trans.sim4.colors[p,1+l,2]) + polygon(c(1+p-1, 1.5+p-1, 1.5+p-1), c(l, 0.5+l, -0.5+l), col=array.trans.sim4.colors[p,1+l,3]) + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(0.5+l, 0+l, 0.5+l), col=array.trans.sim4.colors[p,1+l,4]) + } + } + + par(fig=c(0.875, 1, 0.14, 0.89), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_transition_prob_Atlantic_ridge_startdate.png"), width=750, height=600) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("% ECMWF-S4 transition from Atlantic Ridge regime", cex=1.5) + + # NAO+ : + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.85, 0.98), new=TRUE) + mtext("NAO+", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.sim4.colors[p,1+l,1])}} + + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.42, 0.5), new=TRUE) + mtext("Blocking", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.sim4.colors[p,1+l,4])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.85, 0.98), new=TRUE) + mtext("NAO-", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.sim4.colors[p,1+l,3])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.42, 0.5), new=TRUE) + mtext("Atlantic Ridge", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.sim4.colors[p,1+l,2])}} + + par(fig=c(0.91, 1, 0.1, 0.9), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_transition_prob_Atlantic_ridge_target_month.png"), width=750, height=350) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 0.95), new=TRUE) + mtext("% ECMWF-S4 transition from Atlantic Ridge regime", cex=1.2) + + par(mar=c(0,0,4,0), fig=c(0.1, 0.28, 0.8, 0.95), new=TRUE) + mtext("NAO+", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0, 0.28, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.8, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.sim4.colors[i2,1+6-j,1])}} + + par(mar=c(0,0,4,0), fig=c(0.30, 0.58, 0.8, 0.95), new=TRUE) + mtext("NAO-", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.30, 0.58, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.8, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.sim4.colors[i2,1+6-j,3])}} + + par(mar=c(0,0,4,0), fig=c(0.60, 0.88, 0.8, 0.95), new=TRUE) + mtext("Blocking", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.60, 0.88, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.8, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.sim4.colors[i2,1+6-j,4])}} + + par(fig=c(0.91, 1, 0.1, 0.8), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + par(fig=c(0.91, 1, 0.88, 0.93), new=TRUE) + mtext(side = 1, text = "%", line = 2, cex=1) + + dev.off() + + + + + + + + + + # Observed transition probability from Atlantic ridge to X summary: + png(filename=paste0(work.dir,"/",forecast.name,"_summary_transition_prob_Atlantic_ridge_obs.png"), width=550, height=400) + + # create an array obsilar to array.cor but with colors instead of corr.values: + trans.cols <- rev(c('#b2182b','#d6604d','#f4a582','#fddbc7','#d1e5f0','#92c5de','#4393c3','#2166ac')) + my.seq <- c(0,10,20,30,40,50,60,70,100) + + array.trans.obs4.colors <- array(trans.cols[8],c(12,7,4)) + array.trans.obs4.colors[array.trans.obs4 < my.seq[8]] <- trans.cols[7] + array.trans.obs4.colors[array.trans.obs4 < my.seq[7]] <- trans.cols[6] + array.trans.obs4.colors[array.trans.obs4 < my.seq[6]] <- trans.cols[5] + array.trans.obs4.colors[array.trans.obs4 < my.seq[5]] <- trans.cols[4] + array.trans.obs4.colors[array.trans.obs4 < my.seq[4]] <- trans.cols[3] + array.trans.obs4.colors[array.trans.obs4 < my.seq[3]] <- trans.cols[2] + array.trans.obs4.colors[array.trans.obs4 < my.seq[2]] <- trans.cols[1] + array.trans.obs4.colors[is.na(array.trans.obs4)] <- "white" + + par(mar=c(5,4,4,4.5)) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Forecast time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + title("% Observed transition from Atlantic ridge regime ", cex=1.5) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + + for(p in 1:12){ + for(l in 0:6){ + polygon(c(0.5+p-1, 0.5+p-1, 1+p-1), c(-0.5+l, 0.5+l, 0+l), col=array.trans.obs4.colors[p,1+l,1]) # first triangle + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(-0.5+l, 0+l, -0.5+l), col=array.trans.obs4.colors[p,1+l,2]) + polygon(c(1+p-1, 1.5+p-1, 1.5+p-1), c(l, 0.5+l, -0.5+l), col=array.trans.obs4.colors[p,1+l,3]) + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(0.5+l, 0+l, 0.5+l), col=array.trans.obs4.colors[p,1+l,4]) + } + } + + par(fig=c(0.875, 1, 0.14, 0.89), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_transition_prob_Atlantic_ridge_obs_startdate.png"), width=750, height=600) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("% Observed transition from Atlantic Ridge regime", cex=1.5) + + # NAO+ : + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.85, 0.98), new=TRUE) + mtext("NAO+", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.obs4.colors[p,1+l,1])}} + + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.42, 0.5), new=TRUE) + mtext("Blocking", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.obs4.colors[p,1+l,4])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.85, 0.98), new=TRUE) + mtext("NAO-", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.obs4.colors[p,1+l,3])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.42, 0.5), new=TRUE) + mtext("Atlantic Ridge", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.obs4.colors[p,1+l,2])}} + + par(fig=c(0.91, 1, 0.1, 0.9), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_transition_prob_Atlantic_ridge_obs_target_month.png"), width=750, height=350) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 0.95), new=TRUE) + mtext("% Observed transition from Atlantic Ridge regime", cex=1.2) + + par(mar=c(0,0,4,0), fig=c(0.1, 0.28, 0.8, 0.95), new=TRUE) + mtext("NAO+", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0, 0.28, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.8, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.obs4.colors[i2,1+6-j,1])}} + + par(mar=c(0,0,4,0), fig=c(0.30, 0.58, 0.8, 0.95), new=TRUE) + mtext("NAO-", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.30, 0.58, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.8, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.obs4.colors[i2,1+6-j,3])}} + + par(mar=c(0,0,4,0), fig=c(0.60, 0.88, 0.8, 0.95), new=TRUE) + mtext("Blocking", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.60, 0.88, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.8, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.obs4.colors[i2,1+6-j,4])}} + + par(fig=c(0.91, 1, 0.1, 0.8), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + par(fig=c(0.91, 1, 0.88, 0.93), new=TRUE) + mtext(side = 1, text = "%", line = 2, cex=1) + + dev.off() + + + + + + + + + + + + + + + # Bias of the transition probability from NAO+ to X summary: + png(filename=paste0(work.dir,"/",forecast.name,"_summary_bias_transition_prob_NAO+.png"), width=550, height=400) + + # create an array similar to array.cor but with colors instead of corr.values: + trans.cols <- rev(c('#b2182b','#d6604d','#f4a582','#fddbc7','#d1e5f0','#92c5de','#4393c3','#2166ac')) + my.seq <- c(-20,-10,-5,-2,0,2,5,10,20) + + array.trans.diff1.colors <- array(trans.cols[8],c(12,7,4)) + array.trans.diff1.colors[array.trans.diff1 < my.seq[8]] <- trans.cols[7] + array.trans.diff1.colors[array.trans.diff1 < my.seq[7]] <- trans.cols[6] + array.trans.diff1.colors[array.trans.diff1 < my.seq[6]] <- trans.cols[5] + array.trans.diff1.colors[array.trans.diff1 < my.seq[5]] <- trans.cols[4] + array.trans.diff1.colors[array.trans.diff1 < my.seq[4]] <- trans.cols[3] + array.trans.diff1.colors[array.trans.diff1 < my.seq[3]] <- trans.cols[2] + array.trans.diff1.colors[array.trans.diff1 < my.seq[2]] <- trans.cols[1] + array.trans.diff1.colors[is.na(array.trans.diff1)] <- "white" + + par(mar=c(5,4,4,4.5)) + plot(1,1, type="n", xaxt="n", yaxt="n", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + title("% Bias transition from NAO+ regime", cex=1.5) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + + for(p in 1:12){ + for(l in 0:6){ + polygon(c(0.5+p-1, 0.5+p-1, 1+p-1), c(-0.5+l, 0.5+l, 0+l), col=array.trans.diff1.colors[p,1+l,1]) # first triangle + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(-0.5+l, 0+l, -0.5+l), col=array.trans.diff1.colors[p,1+l,2]) + polygon(c(1+p-1, 1.5+p-1, 1.5+p-1), c(l, 0.5+l, -0.5+l), col=array.trans.diff1.colors[p,1+l,3]) + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(0.5+l, 0+l, 0.5+l), col=array.trans.diff1.colors[p,1+l,4]) + } + } + + par(fig=c(0.875, 1, 0.14, 0.89), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_bias_transition_prob_NAO+_startdate.png"), width=750, height=600) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("% Bias transition from NAO+ regime", cex=1.5) + + # NAO+ : + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.85, 0.98), new=TRUE) + mtext("NAO+", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.diff1.colors[p,1+l,1])}} + + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.42, 0.5), new=TRUE) + mtext("Blocking", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.diff1.colors[p,1+l,4])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.85, 0.98), new=TRUE) + mtext("NAO-", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.diff1.colors[p,1+l,3])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.42, 0.5), new=TRUE) + mtext("Atlantic Ridge", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.diff1.colors[p,1+l,2])}} + + par(fig=c(0.91, 1, 0.1, 0.9), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_bias_transition_prob_NAO+_target_month.png"), width=750, height=350) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 0.95), new=TRUE) + mtext("% Bias transition from NAO+ regime", cex=1.2) + + par(mar=c(0,0,4,0), fig=c(0.07, 0.28, 0.8, 0.95), new=TRUE) + mtext("NAO-", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0, 0.28, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.8, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.diff1.colors[i2,1+6-j,3])}} + + par(mar=c(0,0,4,0), fig=c(0.30, 0.58, 0.8, 0.95), new=TRUE) + mtext("Blocking", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.30, 0.58, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.8, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.diff1.colors[i2,1+6-j,4])}} + + par(mar=c(0,0,4,0), fig=c(0.60, 0.88, 0.8, 0.95), new=TRUE) + mtext("Atlantic Ridge", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.60, 0.88, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.8, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.diff1.colors[i2,1+6-j,2])}} + + par(fig=c(0.91, 1, 0.1, 0.8), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + par(fig=c(0.91, 1, 0.88, 0.93), new=TRUE) + mtext(side = 1, text = "%", line = 2, cex=1) + + dev.off() + + + + + + + + + + + + + + + + # Bias of the Transition probability from NAO- to X summary: + png(filename=paste0(work.dir,"/",forecast.name,"_summary_bias_transition_prob_NAO-.png"), width=550, height=400) + + # create an array similar to array.cor but with colors instead of corr.values: + trans.cols <- rev(c('#b2182b','#d6604d','#f4a582','#fddbc7','#d1e5f0','#92c5de','#4393c3','#2166ac')) + my.seq <- c(-20,-10,-5,-2,0,2,5,10,20) + + array.trans.diff2.colors <- array(trans.cols[8],c(12,7,4)) + array.trans.diff2.colors[array.trans.diff2 < my.seq[8]] <- trans.cols[7] + array.trans.diff2.colors[array.trans.diff2 < my.seq[7]] <- trans.cols[6] + array.trans.diff2.colors[array.trans.diff2 < my.seq[6]] <- trans.cols[5] + array.trans.diff2.colors[array.trans.diff2 < my.seq[5]] <- trans.cols[4] + array.trans.diff2.colors[array.trans.diff2 < my.seq[4]] <- trans.cols[3] + array.trans.diff2.colors[array.trans.diff2 < my.seq[3]] <- trans.cols[2] + array.trans.diff2.colors[array.trans.diff2 < my.seq[2]] <- trans.cols[1] + array.trans.diff2.colors[is.na(array.trans.diff2)] <- "white" + + par(mar=c(5,4,4,4.5)) + plot(1,1, type="n", xaxt="n", yaxt="n", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + title("% Bias transition from NAO- regime", cex=1.5) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + + for(p in 1:12){ + for(l in 0:6){ + polygon(c(0.5+p-1, 0.5+p-1, 1+p-1), c(-0.5+l, 0.5+l, 0+l), col=array.trans.diff2.colors[p,1+l,1]) # first triangle + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(-0.5+l, 0+l, -0.5+l), col=array.trans.diff2.colors[p,1+l,2]) + polygon(c(1+p-1, 1.5+p-1, 1.5+p-1), c(l, 0.5+l, -0.5+l), col=array.trans.diff2.colors[p,1+l,3]) + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(0.5+l, 0+l, 0.5+l), col=array.trans.diff2.colors[p,1+l,4]) + } + } + + par(fig=c(0.875, 1, 0.14, 0.89), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_bias_transition_prob_NAO-_startdate.png"), width=750, height=600) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 0.95), new=TRUE) + mtext("% Bias transition from NAO- regime", cex=1.5) + + # NAO+ : + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.85, 0.98), new=TRUE) + mtext("NAO+", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.diff2.colors[p,1+l,1])}} + + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.42, 0.5), new=TRUE) + mtext("Blocking", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.diff2.colors[p,1+l,4])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.85, 0.98), new=TRUE) + mtext("NAO-", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.diff2.colors[p,1+l,3])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.42, 0.5), new=TRUE) + mtext("Atlantic Ridge", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.diff2.colors[p,1+l,2])}} + + par(fig=c(0.91, 1, 0.1, 0.9), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_bias_transition_prob_NAO-_target_month.png"), width=750, height=350) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 0.95), new=TRUE) + mtext("% Bias transition from NAO- regime", cex=1.2) + + par(mar=c(0,0,4,0), fig=c(0.07, 0.28, 0.8, 0.95), new=TRUE) + mtext("NAO+", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0, 0.28, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.8, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.diff2.colors[i2,1+6-j,1])}} + + par(mar=c(0,0,4,0), fig=c(0.30, 0.58, 0.8, 0.95), new=TRUE) + mtext("Blocking", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.30, 0.58, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.8, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.diff2.colors[i2,1+6-j,4])}} + + par(mar=c(0,0,4,0), fig=c(0.60, 0.88, 0.8, 0.95), new=TRUE) + mtext("Atlantic Ridge", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.60, 0.88, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.8, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.diff2.colors[i2,1+6-j,2])}} + + par(fig=c(0.91, 1, 0.1, 0.8), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + par(fig=c(0.91, 1, 0.88, 0.93), new=TRUE) + mtext(side = 1, text = "%", line = 2, cex=1) + + dev.off() + + + + + + + + + + + + # Bias of the Transition probability from blocking to X summary: + png(filename=paste0(work.dir,"/",forecast.name,"_summary_bias_transition_prob_blocking.png"), width=550, height=400) + + # create an array similar to array.cor but with colors instead of corr.values: + trans.cols <- rev(c('#b2182b','#d6604d','#f4a582','#fddbc7','#d1e5f0','#92c5de','#4393c3','#2166ac')) + my.seq <- c(-20,-10,-5,-2,0,2,5,10,20) + + array.trans.diff3.colors <- array(trans.cols[8],c(12,7,4)) + array.trans.diff3.colors[array.trans.diff3 < my.seq[8]] <- trans.cols[7] + array.trans.diff3.colors[array.trans.diff3 < my.seq[7]] <- trans.cols[6] + array.trans.diff3.colors[array.trans.diff3 < my.seq[6]] <- trans.cols[5] + array.trans.diff3.colors[array.trans.diff3 < my.seq[5]] <- trans.cols[4] + array.trans.diff3.colors[array.trans.diff3 < my.seq[4]] <- trans.cols[3] + array.trans.diff3.colors[array.trans.diff3 < my.seq[3]] <- trans.cols[2] + array.trans.diff3.colors[array.trans.diff3 < my.seq[2]] <- trans.cols[1] + array.trans.diff3.colors[is.na(array.trans.diff3)] <- "white" + + par(mar=c(5,4,4,4.5)) + plot(1,1, type="n", xaxt="n", yaxt="n", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + title("% Bias transition from blocking regime", cex=1.5) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + + for(p in 1:12){ + for(l in 0:6){ + polygon(c(0.5+p-1, 0.5+p-1, 1+p-1), c(-0.5+l, 0.5+l, 0+l), col=array.trans.diff3.colors[p,1+l,1]) # first triangle + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(-0.5+l, 0+l, -0.5+l), col=array.trans.diff3.colors[p,1+l,2]) + polygon(c(1+p-1, 1.5+p-1, 1.5+p-1), c(l, 0.5+l, -0.5+l), col=array.trans.diff3.colors[p,1+l,3]) + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(0.5+l, 0+l, 0.5+l), col=array.trans.diff3.colors[p,1+l,4]) + } + } + + par(fig=c(0.875, 1, 0.14, 0.89), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_bias_transition_prob_blocking_startdate.png"), width=750, height=600) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("% Bias transition from blocking regime", cex=1.5) + + # NAO+ : + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.85, 0.98), new=TRUE) + mtext("NAO+", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.diff3.colors[p,1+l,1])}} + + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.42, 0.5), new=TRUE) + mtext("Blocking", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.diff3.colors[p,1+l,4])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.85, 0.98), new=TRUE) + mtext("NAO-", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.diff3.colors[p,1+l,3])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.42, 0.5), new=TRUE) + mtext("Atlantic Ridge", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.diff3.colors[p,1+l,2])}} + + par(fig=c(0.91, 1, 0.1, 0.9), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_bias_transition_prob_blocking_target_month.png"), width=750, height=350) + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 0.95), new=TRUE) + mtext("% Bias transition from Blocking regime", cex=1.2) + + par(mar=c(0,0,4,0), fig=c(0.07, 0.28, 0.8, 0.95), new=TRUE) + mtext("NAO+", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0, 0.28, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.8, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.diff3.colors[i2,1+6-j,1])}} + + par(mar=c(0,0,4,0), fig=c(0.30, 0.58, 0.8, 0.95), new=TRUE) + mtext("NAO-", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.30, 0.58, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.8, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.diff3.colors[i2,1+6-j,3])}} + + par(mar=c(0,0,4,0), fig=c(0.60, 0.88, 0.8, 0.95), new=TRUE) + mtext("Atlantic Ridge", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.60, 0.88, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.8, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.diff3.colors[i2,1+6-j,2])}} + + par(fig=c(0.91, 1, 0.1, 0.8), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + par(fig=c(0.91, 1, 0.88, 0.93), new=TRUE) + mtext(side = 1, text = "%", line = 2, cex=1) + + dev.off() + + + + + + + + + + + # Bias of the Transition probability from Atlantic Ridge to X summary: + png(filename=paste0(work.dir,"/",forecast.name,"_summary_bias_transition_prob_Atlantic_ridge.png"), width=550, height=400) + + # create an array similar to array.cor but with colors instead of corr.values: + trans.cols <- rev(c('#b2182b','#d6604d','#f4a582','#fddbc7','#d1e5f0','#92c5de','#4393c3','#2166ac')) + my.seq <- c(-20,-10,-5,-2,0,2,5,10,20) + + array.trans.diff4.colors <- array(trans.cols[8],c(12,7,4)) + array.trans.diff4.colors[array.trans.diff4 < my.seq[8]] <- trans.cols[7] + array.trans.diff4.colors[array.trans.diff4 < my.seq[7]] <- trans.cols[6] + array.trans.diff4.colors[array.trans.diff4 < my.seq[6]] <- trans.cols[5] + array.trans.diff4.colors[array.trans.diff4 < my.seq[5]] <- trans.cols[4] + array.trans.diff4.colors[array.trans.diff4 < my.seq[4]] <- trans.cols[3] + array.trans.diff4.colors[array.trans.diff4 < my.seq[3]] <- trans.cols[2] + array.trans.diff4.colors[array.trans.diff4 < my.seq[2]] <- trans.cols[1] + array.trans.diff4.colors[is.na(array.trans.diff4)] <- "white" + + par(mar=c(5,4,4,4.5)) + plot(1,1, type="n", xaxt="n", yaxt="n", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + title("% Bias transition from Atlantic Ridge regime", cex=1.5) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + + for(p in 1:12){ + for(l in 0:6){ + polygon(c(0.5+p-1, 0.5+p-1, 1+p-1), c(-0.5+l, 0.5+l, 0+l), col=array.trans.diff4.colors[p,1+l,1]) # first triangle + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(-0.5+l, 0+l, -0.5+l), col=array.trans.diff4.colors[p,1+l,2]) + polygon(c(1+p-1, 1.5+p-1, 1.5+p-1), c(l, 0.5+l, -0.5+l), col=array.trans.diff4.colors[p,1+l,3]) + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(0.5+l, 0+l, 0.5+l), col=array.trans.diff4.colors[p,1+l,4]) + } + } + + par(fig=c(0.875, 1, 0.14, 0.89), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_bias_transition_prob_Atlantic_ridge_startdate.png"), width=750, height=600) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("% Bias transition from Atlantic_Ridge_regime", cex=1.5) + + # NAO+ : + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.85, 0.98), new=TRUE) + mtext("NAO+", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.diff4.colors[p,1+l,1])}} + + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.42, 0.5), new=TRUE) + mtext("Blocking", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.diff4.colors[p,1+l,4])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.85, 0.98), new=TRUE) + mtext("NAO-", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.diff4.colors[p,1+l,3])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.42, 0.5), new=TRUE) + mtext("Atlantic Ridge", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.diff4.colors[p,1+l,2])}} + + par(fig=c(0.91, 1, 0.1, 0.9), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_bias_transition_prob_Atlantic_ridge_target_month.png"), width=750, height=350) + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 0.95), new=TRUE) + mtext("% Bias transition from Atlantic Ridge regime", cex=1.2) + + par(mar=c(0,0,4,0), fig=c(0.07, 0.28, 0.8, 0.95), new=TRUE) + mtext("NAO+", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0, 0.28, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.8, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.diff4.colors[i2,1+6-j,1])}} + + par(mar=c(0,0,4,0), fig=c(0.30, 0.58, 0.8, 0.95), new=TRUE) + mtext("NAO-", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.30, 0.58, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.8, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.diff4.colors[i2,1+6-j,3])}} + + par(mar=c(0,0,4,0), fig=c(0.60, 0.88, 0.8, 0.95), new=TRUE) + mtext("Blocking", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.60, 0.88, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.8, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.diff4.colors[i2,1+6-j,4])}} + + par(fig=c(0.91, 1, 0.1, 0.8), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + par(fig=c(0.91, 1, 0.88, 0.93), new=TRUE) + mtext(side = 1, text = "%", line = 2, cex=1) + + dev.off() + + ## # FairRPSS summary: + ## png(filename=paste0(work.dir,"/",forecast.name,"_summary_rpss.png"), width=550, height=400) + + ## # create an array similar to array.diff.freq but with colors instead of frequencies: + ## freq.cols <- rev(c('#b2182b','#d6604d','#f4a582','#fddbc7','#d1e5f0','#92c5de','#4393c3','#2166ac')) + + ## array.freq.colors <- array(freq.cols[8],c(12,7,4)) + ## array.freq.colors[array.diff.freq < 10] <- freq.cols[7] + ## array.freq.colors[array.diff.freq < 5] <- freq.cols[6] + ## array.freq.colors[array.diff.freq < 2] <- freq.cols[5] + ## array.freq.colors[array.diff.freq < 0] <- freq.cols[4] + ## array.freq.colors[array.diff.freq < -2] <- freq.cols[3] + ## array.freq.colors[array.diff.freq < -5] <- freq.cols[2] + ## array.freq.colors[array.diff.freq < -10] <- freq.cols[1] + + ## par(mar=c(5,4,4,4.5)) + ## plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Forecast time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + ## title("Frequency difference (%) between S4 and ERA-Interim", cex=1.5) + ## mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + ## mtext(side = 2, text = "Forecast time", line = 2.5, cex=1.5) + ## axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + ## axis(2, at=seq(0,6), las=2, cex.axis=1.5) + + ## for(p in 1:12){ + ## for(l in 0:6){ + ## polygon(c(0.5+p-1,0.5+p-1,1+p-1),c(-0.5+l,0.5+l,0+l), col=array.freq.colors[p,1+l,1]) # first triangle + ## polygon(c(0.5+p-1,1+p-1,1.5+p-1),c(-0.5+l,0+l,-0.5+l), col=array.freq.colors[p,1+l,2]) + ## polygon(c(1+p-1,1.5+p-1,1.5+p-1),c(l,0.5+l,-0.5+l), col=array.freq.colors[p,1+l,3]) + ## polygon(c(0.5+p-1,1+p-1,1.5+p-1),c(0.5+l,0+l,0.5+l), col=array.freq.colors[p,1+l,4]) + ## } + ## } + + ## par(fig=c(0.875, 1, 0.14, 0.89), new=TRUE) + ## ColorBar2(brks = c(-20,-10,-5,-2,0,2,5,10,20), cols = freq.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(c(-20,-10,-5,-2,0,2,5,10,20)), my.labels=c(-20,-10,-5,-2,0,2,5,10,20)) + ## dev.off() + +} # close if on composition == "summary" + + + + +# impact map of the stronger WR: +if(composition == "impact.highest" && fields.name == rean.name){ + + var.num <- 2 # choose a variable (1:sfcWind, 2:tas) + index <- 1 # chose an index: 1: most influent, 2: most influent positively, 3: most influent negatively + + if ( index == 1 ) png(filename=paste0(rean.dir,"/",rean.name,"_",var.name.full[var.num],"_most_influent.png"), width=550, height=650) + if ( index == 2 ) png(filename=paste0(rean.dir,"/",rean.name,"_",var.name.full[var.num],"_most_influent_positively.png"), width=550, height=650) + if ( index == 3 ) png(filename=paste0(rean.dir,"/",rean.name,"_",var.name.full[var.num],"_most_influent_negatively.png"), width=550, height=650) + + plot.new() + #par(mfrow=c(4,3)) + #col.regimes <- c('#7b3294','#c2a5cf','#a6dba0','#008837') + col.regimes <- c('#e66101','#fdb863','#b2abd2','#5e3c99') + + for(p in 13:16){ # or 1:12 for monthly maps + load(file=paste0(rean.dir,"/",rean.name,"_",my.period[p],"_",var.name[var.num],".RData")) + load(paste0(rean.dir,"/",rean.name,"_",my.period[p],"_ClusterNames.RData")) # they should not depend on var.name!!! + + # position of long values of Europe only (without the Atlantic Sea and America): + #if(fields.name == "ERA-Interim") EU <- c(1:which(lon >= lon.max)[1], (length(lon)-30):length(lon)) # restrict area to continental europe only + lon.max = 45 + EU <- c(1:which(lon >= lon.max)[1], (which(lon >= 337)[1]:length(lon))) # restrict area to continental europe only + + cluster1 <- which(orden == cluster1.name.period[p]) # in future it will be == cluster1.name + cluster2 <- which(orden == cluster2.name.period[p]) + cluster3 <- which(orden == cluster3.name.period[p]) + cluster4 <- which(orden == cluster4.name.period[p]) + + assign(paste0("imp",cluster1), varPeriodAnom1mean) + assign(paste0("imp",cluster2), varPeriodAnom2mean) + assign(paste0("imp",cluster3), varPeriodAnom3mean) + assign(paste0("imp",cluster4), varPeriodAnom4mean) + + #most influent WT: + if (index == 1) { + imp.all <- imp1 + for(i in 1:dim(imp1)[1]){ + for(j in 1:dim(imp1)[2]){ + if(abs(imp1[i,j]) >= max(abs(imp1[i,j]), abs(imp2[i,j]), abs(imp3[i,j]), abs(imp4[i,j]))) imp.all[i,j] <- 1.5 + if(abs(imp2[i,j]) >= max(abs(imp1[i,j]), abs(imp2[i,j]), abs(imp3[i,j]), abs(imp4[i,j]))) imp.all[i,j] <- 2.5 + if(abs(imp3[i,j]) >= max(abs(imp1[i,j]), abs(imp2[i,j]), abs(imp3[i,j]), abs(imp4[i,j]))) imp.all[i,j] <- 3.5 + if(abs(imp4[i,j]) >= max(abs(imp1[i,j]), abs(imp2[i,j]), abs(imp3[i,j]), abs(imp4[i,j]))) imp.all[i,j] <- 4.5 + } + } + } + + #most influent WT with positive impact: + if (index == 2) { + imp.all <- imp1 + for(i in 1:dim(imp1)[1]){ + for(j in 1:dim(imp1)[2]){ + if((imp1[i,j]) >= max((imp1[i,j]), (imp2[i,j]), (imp3[i,j]), (imp4[i,j]))) imp.all[i,j] <- 1.5 + if((imp2[i,j]) >= max((imp1[i,j]), (imp2[i,j]), (imp3[i,j]), (imp4[i,j]))) imp.all[i,j] <- 2.5 + if((imp3[i,j]) >= max((imp1[i,j]), (imp2[i,j]), (imp3[i,j]), (imp4[i,j]))) imp.all[i,j] <- 3.5 + if((imp4[i,j]) >= max((imp1[i,j]), (imp2[i,j]), (imp3[i,j]), (imp4[i,j]))) imp.all[i,j] <- 4.5 + } + } + + } + + # most influent WT with negative impact: + if (index == 3) { + imp.all <- imp1 + for(i in 1:dim(imp1)[1]){ + for(j in 1:dim(imp1)[2]){ + if((imp1[i,j]) <= min((imp1[i,j]), (imp2[i,j]), (imp3[i,j]), (imp4[i,j]))) imp.all[i,j] <- 1.5 + if((imp2[i,j]) <= min((imp1[i,j]), (imp2[i,j]), (imp3[i,j]), (imp4[i,j]))) imp.all[i,j] <- 2.5 + if((imp3[i,j]) <= min((imp1[i,j]), (imp2[i,j]), (imp3[i,j]), (imp4[i,j]))) imp.all[i,j] <- 3.5 + if((imp4[i,j]) <= min((imp1[i,j]), (imp2[i,j]), (imp3[i,j]), (imp4[i,j]))) imp.all[i,j] <- 4.5 + } + } + } + + if(p<=3) yMod <- 0.7 + if(p>=4 && p<=6) yMod <- 0.5 + if(p>=7 && p<=9) yMod <- 0.3 + if(p>=10) yMod <- 0.1 + if(p==13 || p==14) yMod <- 0.52 + if(p==15 || p==16) yMod <- 0.1 + + if(p==1 || p==4 || p==7 || p==10) xMod <- 0.05 + if(p==2 || p==5 || p==8 || p==11) xMod <- 0.35 + if(p==3 || p==6 || p==9 || p==12) xMod <- 0.65 + if(p==13 || p==15) xMod <- 0.05 + if(p==14 || p==16) xMod <- 0.50 + + # impact map: + if(p <= 12) par(fig=c(xMod, xMod + 0.3, yMod, yMod + 0.17), new=TRUE) + if(p > 12) par(fig=c(xMod, xMod + 0.45, yMod, yMod + 0.38), new=TRUE) + PlotEquiMap(imp.all[,EU], lon[EU], lat, filled.continents = FALSE, brks=1:5, cols=col.regimes, axelab=F, drawleg=F) + + # title map: + if(p <= 12) par(fig=c(xMod, xMod + 0.3, yMod + 0.162, yMod + 0.172), new=TRUE) + if(p > 12) par(fig=c(xMod + 0.07, xMod + 0.07 + 0.3, yMod +0.21 + 0.166, yMod +0.21 + 0.176), new=TRUE) + mtext(my.period[p], font=2, cex=1.5) + + } # close 'p' on 'period' + + par(fig=c(0.1,0.9, 0.03, 0.11), new=TRUE) + ColorBar2(1:5, cols=col.regimes, vert=FALSE, my.ticks=1:4, draw.ticks=FALSE, my.labels=orden) + + par(fig=c(0.1,0.9, 0.92, 0.97), new=TRUE) + if( index == 1 ) mtext(paste0("Most influent WR on ",var.name[var.num]), font=2, cex=1.5) + if( index == 2 ) mtext(paste0("Most positively influent WR on ",var.name[var.num]), font=2, cex=1.5) + if( index == 3 ) mtext(paste0("Most negatively influent WR on ",var.name[var.num]), font=2, cex=1.5) + + dev.off() + +} # close if on composition == "impact.highest" + diff --git a/old/weather_regimes_maps_v25.R b/old/weather_regimes_maps_v25.R new file mode 100644 index 0000000000000000000000000000000000000000..ab7c29b901d7574662fc52152a98a9e810f819bf --- /dev/null +++ b/old/weather_regimes_maps_v25.R @@ -0,0 +1,5793 @@ + +# Creation: 6/2016 +# Author: Nicola Cortesi +# +# Aim: to visualize the regime anomalies of the weather regimes, their interannual frequencies and their impact on a chosen cliamte variable (i.e: wind speed or temperature) +# +# I/O: input file needed are: +# 1) the output of the weather_regimes.R script, which ends with the suffix "_psl.RData". +# 2) if you need the impact map too, the outputs of the weather_regimes_impact.R script, which end wuth the suffix "_sfcWind.RData" and "_tas.RData" +# +# Output files are the maps, witch the user can generate as .png or as .pdf +# Due to the script's length, it is better to run it from the terminal with: Rscript ~/scripts/weather_regimes_maps.R +# This script doesn't need to be parallelized because it takes only a few seconds to create and save the output maps. +# +# Assumptions: You need to have created before the '_psl.RData' files which are the output of 'weather_regimes'.R, for each period you want to visualize. +# If your regimes derive from a reanalysis, this script must be run twice: +# first, only one month/season at time with: 'period=X', (X=1 .. 12), 'ordering = TRUE', 'save.names = TRUE', 'as.pdf = FALSE' and 'composition = "none" +# And inserting the regime names 'cluster.name1=...' in the correct order; in this first run, you only save the ordered cartography. +# You already have to know which is the right regime order by taking a look at the output maps (_clusterX.png) of weather_regimes.R +# After that, any time you run this script, remember to set: +# 'ordering = TRUE , 'save.names = FALSE' (and any type of composition you want to plot) not to overwrite the files which stores the right cluster order. +# You can check if the monthly regimes jave been associated correctly setting composition <- "psl.rean" +# +# For example, you can find that the sequence of the images in the file (from top to down) corresponds to: +# Blocking / NAO- / Atl.Ridge / NAO+ regime. Subsequently, you have to change 'ordering' +# from F to T (see below) and set the four variables 'clusterX.name' (X = 1...4) to follow the same order found inside that file. +# Then, you have to run this script only to get the maps in the right order, which by default is, from top to down: +# "NAO+","NAO-","Blocking" and "Atl.Ridge" (you can change it with the variable 'orden'). You also have to set 'save.names' to TRUE, so an .RData file +# is written to store the order of the four regimes, in case you need to visualize these maps again in future or create new ones based on the saved data. +# +# If your regimes derive from a forecast system, you have to compute before the regimes derived from a reanalysis. Then, you can associate the reanalysis +# to the forecast system, to employ it to automatically associate to each simulated cluster one of the +# observed regimes, the one with the highest spatial correlation. Beware that the two maps of regime anomalies must have the same spatial resolution! +# +# Branch: weather_regimes + +rm(list=ls()) +library(s2dverification) # for the function Load() +library(Kendall) # for the MannKendall test +#library("corrplot") + +source('/home/Earth/ncortesi/scripts/Rfunctions.R') # for the calendar functions + +### set the directory where the weather regimes computed with a reanalysis are stored (xxxxx_psl.RData files): + +#rean.dir <- "/scratch/Earth/ncortesi/RESILIENCE/Regimes/18) as 12) but with no lat correction" +#rean.dir <- "/scratch/Earth/ncortesi/RESILIENCE/Regimes/18_as_12_but_with_no_lat_correction" +#rean.dir <- "/scratch/Earth/ncortesi/RESILIENCE/Regimes/41_ERA-Interim_monthly_1981-2015_LOESS_filter" +#rean.dir <- "/scratch/Earth/ncortesi/RESILIENCE/Regimes/43_as_41_but_with_running_cluster_and_lat_correction" +#rean.dir <- "/scratch/Earth/ncortesi/RESILIENCE/Regimes/45_as_43_but_with_Z500" +#rean.dir <- "/scratch/Earth/ncortesi/RESILIENCE/Regimes/44_as_43_but_with_no_lat_correction" +#rean.dir <- "/scratch/Earth/ncortesi/RESILIENCE/Regimes/50_NCEP_monthly_1981-2016_LOESS_filter_lat_corr" +#rean.dir <- "/scratch/Earth/ncortesi/RESILIENCE/Regimes/52_JRA55_monthly_1981-2016_LOESS_filter_lat_corr" +#rean.dir <- "/scratch/Earth/ncortesi/RESILIENCE/Regimes/53_JRA55_seasonal_1981-2016_LOESS_filter_lat_corr" +#rean.dir <- "/scratch/Earth/ncortesi/RESILIENCE/Regimes/54_JRA55_monthly_1981-2016_LOESS_filter_lat_corr_running" +rean.dir <- "/scratch/Earth/ncortesi/RESILIENCE/Regimes/55_JRA55_monthly_1981-2016_LOESS_filter_lat_corr_running_15days" + +rean.name <- "JRA-55" #"JRA-55" #"NCEP" #"ERA-Interim" # reanalysis name (if input data comes from a reanalysis) + +forecast.name <- "ECMWF-S4" # forecast system name (if input data comes from a forecast system) + +# Choose between loading reanalysis ('rean.name') or forecasts ('forecast.name'): +fields.name <- rean.name #forecast.name + +composition <- "psl.rean.unordered" # choose which kind of composition you want to plot: + # 'none' doesn't plot anything, it only saves the ClusterName.Rdata files for each period selected, overwriting the eventual pre-existing ones + # 'simple' for monthly graphs of regime anomalies / impact / interannual frequencies in three columns + # 'psl' for all the regime anomalies for a fixed forecast month + # 'fre' for all the interannual frequencies for a fixed forecast month + # 'impact' for all the impact maps for a fixed forecast month and the variable specified in var.num + # 'summary' for the summary plots of all forecast months (persistence bias, freq.bias, correlations, etc.) [You need all ClusterNames files] + # 'impact.highest' for all the impact plot of the regime with the highest impact + # 'single.impact' to save the four impact maps in a composition 2x2 + # 'single.psl' to save the individual psl map + # 'single.fre' to save the individual fre map + # 'taylor' plot the taylor's diagram between the regime anomalies of a monthly reanalaysis and a seasonal one + # 'edpr' as 'simple', but swapping the position of the regime anomalie maps with that of the impact maps + # 'psl.rean' for all the regime anomalies for all months of a reanalysis + # 'psl.rean.unordered': as before, but without ordering clusters + # 'corr.matrix' : as before, but plot the correlation matrix only + +var.num <- 1 # if fields.name ='rean.name' and composition ='simple' or 'edpr', Choose a variable for the impact maps 1: sfcWind 2: tas + +mean.freq <- TRUE # if TRUE, when composition <- "simple" o 'edpr', it also add the mean frequency value over each frequency plots + +# Choose one or more periods to plot from 1 to 17 (1-12: Jan - Dec, 13-16: Winter, Spring, Summer, Autumn, 17: Year). +period <- 1:12 + +# Associates the regimes to the four cluster: +cluster1.name <- "NAO+" +cluster2.name <- "NAO-" +cluster3.name <- "Blocking" +cluster4.name <- "Atl.Ridge" + +# choose an order to give to the cartograpy, from top to bottom: +orden <- c("NAO+","NAO-","Blocking","Atl.Ridge") + +####### if monthly_anomalies <- TRUE, you have to specify these additional parameters: +monthly_anomalies <- FALSE # if TRUE, also save maps with with monthly wind speed and slp anomalies for the chosen years and months set below over Europe +year.chosen <- 2016 +month.chosen <- 1:12 +NCEP <- '/esnas/recon/noaa/ncep-reanalysis/$STORE_FREQ$_mean/$VAR_NAME$_f6h/$VAR_NAME$_$YEAR$$MONTH$.nc' +JRA55 <- '/esnas/recon/jma/jra55/$STORE_FREQ$_mean/$VAR_NAME$_f6h/$VAR_NAME$_$YEAR$$MONTH$.nc' +ERAint <- '/esarchive/old-files/recon_ecmwf_erainterim/$STORE_FREQ$_mean/$VAR_NAME$_f6h/$VAR_NAME$_$YEAR$$MONTH$.nc' +rean.data <- JRA55 # choose one of the above reanalysis + +####### if fields.name='forecast.name', you have to specify these additional variables: + +# working dir with the input files with '_psl.RData' suffix: +work.dir <- "/scratch/Earth/ncortesi/RESILIENCE/Regimes/42_ECMWF_S4_monthly_1981-2015_LOESS_filter" + +lead.months <- 0 #0:6 # in case you selected 'fields.name = forecast.name', specify a leadtime (0 = same month of the start month) to plot + # (and variable 'period' becomes the start month) +forecast.month <- 1 # in case composition = 'psl' or 'fre' or 'impact', choose a target month to forecast, from 1 to 12, to plot its composition + # if you want to plot all compositions from 1 to 12 at once, just enable the line below and add a { at the end of the script + # DO NOT run the loop below when composition="summary" or "taylor", because it will modify the data in the ClusterName.RData files!!! +#for(forecast.month in 1:12){ + +####### Derived variables ############################################################################################################################################### + +ordering <- T # If TRUE, order the clusters following the regimes listed in the 'orden' vector (set it to TRUE only after you manually inserted the names below). +save.names <- F # if TRUE, also save the cluster names (i.e: the association between clusters and weather regimes); if FALSE, the cluster names are loaded from a file +as.pdf <- F # FALSE: save results as one .png file for each season/month, TRUE: save only one .pdf file with all seasons/months + +if(fields.name != rean.name && fields.name != forecast.name) stop("variable 'fields.name' is not properly set") +regime1.name <- orden[1] +regime2.name <- orden[2] +regime3.name <- orden[3] +regime4.name <- orden[4] + +var.name <- c("sfcWind","tas") +var.name.full <- c("wind speed","temperature") # full name of the 'predictand' variable to put in the title of the graphs +var.unit <- c("m/s", "ºC") # unit of measure of var (for drawing color scales) + +var.num.orig <- var.num # loading _psl files, this value can change! +fields.name.orig <- fields.name +period.orig <- period +work.dir.orig <- work.dir +pdf.suffix <- ifelse(length(period)==1, my.period[period], "all_periods") + +n.map <- 0 +################################################################################################################################################################### + +if(composition != "summary" && composition != "impact.highest" && composition != "taylor"){ + + if(composition == "psl" || composition == "fre" || composition == "impact") { + if(as.pdf && fields.name == forecast.name) pdf(file=paste0(work.dir,"/",fields.name,"_",pdf.suffix,"_forecast_month_",forecast.month,"_",composition,".pdf"),width=110,height=60) + if(!as.pdf && fields.name == forecast.name) png(filename=paste0(work.dir,"/",fields.name,"_forecast_month_",forecast.month,"_",composition,".png"),width=6000,height=2300) + + lead.months <- c(6:0,0) + plot.new() + + # Sheet title: + par(fig=c(0, 1, 0.94, 0.98), new=TRUE) + if(composition == "psl") mtext(paste0(fields.name, " simulated Weather Regimes for ", month.name[forecast.month]), cex=5.5, font=2) + if(composition == "fre") mtext(paste0(fields.name, " simulated interannual frequencies for ", month.name[forecast.month]), cex=5.5, font=2) + if(composition == "impact") mtext(paste0(fields.name, " simulated ",var.name.full[var.num], " impact for ", month.name[forecast.month]), cex=5.5, font=2) + + } # close if on composition == "psl" ... + + if(composition == "none") { + ordering = TRUE + save.names = TRUE + as.pdf = FALSE + } + + if(composition == "psl.rean") { + period <- c(9:12, 1:8) # to start from September instead of January + png(filename=paste0(rean.dir,"/",fields.name,"_reanalysis_all_months.png"),width=6000,height=2000) + plot.new() + } + + if(composition == "psl.rean.unordered") { + ordering=FALSE + period <- c(9:12, 1:8) # to start from September instead of January + png(filename=paste0(rean.dir,"/",fields.name,"_reanalysis_all_months_unordered.png"),width=6000,height=2000) + plot.new() + } + + if(composition == "corr.matrix") { + ordering=FALSE + period <- c(9:12, 1:8) # to start from September instead of January + png(filename=paste0(rean.dir,"/",fields.name,"_reanalysis_all_months_corr_matrix.png"),width=6000,height=2000) + plot.new() + } + + + if(fields.name == rean.name) { lead.month <- 1; lead.months <- 1 } # just to be able to enter the loop below once! + + for(lead.month in lead.months){ + #lead.month=lead.months[1] # for the debug + + if(composition == "simple" || composition == 'edpr'){ + if(as.pdf && fields.name == rean.name) pdf(file=paste0(rean.dir,"/",fields.name,"_",var.name[var.num],"_",pdf.suffix,".pdf"),width=40, height=60) + if(as.pdf && fields.name == forecast.name) pdf(file=paste0(work.dir,"/",fields.name,"_",var.name[var.num],"_",pdf.suffix,"_leadmonth_",lead.month,".pdf"),width=40,height=60) + } + + if(composition == 'psl' || composition == 'fre' || composition == 'impact') { + period <- forecast.month - lead.month + if(period < 1) period <- period + 12 + } + + impact.data <- FALSE + for(p in period){ + #p <- period[1] # for the debug + p.orig <- p + + if(fields.name == rean.name) print(paste("p =",p)) + if(fields.name == forecast.name) print(paste("p =",p,"lead.month =", lead.month)) + + # load regime data (for forecasts, it load only the data corresponding to the chosen start month p and lead month 'lead.month') + if(fields.name == rean.name || (fields.name == forecast.name && n.map == 7)) { + load(file=paste0(rean.dir,"/",rean.name,"_",my.period[p],"_","psl",".RData")) + if(var.num != var.num.orig) var.num <- var.num.orig # ripristinate original values of this script (necessary after loading regime data) + if(fields.name != fields.name.orig) fields.name <- fields.name.orig # ripristinate original values of this script + if(work.dir != work.dir.orig) work.dir <- work.dir.orig # ripristinate original values of this script + + # load impact data only if it is available, if not it will leave an empty space in the composition: + if(file.exists(paste0(rean.dir,"/",rean.name,"_",my.period[p],"_",var.name[var.num],".RData"))){ + impact.data <- TRUE + print("Impact data available for reanalysis") + load(file=paste0(rean.dir,"/",rean.name,"_",my.period[p],"_",var.name[var.num],".RData")) + } else { + impact.data <- FALSE + print("Impact data for reanalysis not available") + } + + } + + if(fields.name == forecast.name && n.map < 7){ + load(file=paste0(work.dir,"/",forecast.name,"_",my.period[p],"_leadmonth_",lead.month,"_","psl",".RData")) # load cluster time series and mean psl data + + if(file.exists(paste0(work.dir,"/",forecast.name,"_",my.period[p],"_leadmonth_",lead.month,"_",var.name[var.num],".RData"))){ + impact.data <- TRUE + print("Impact data available for forecasts") + if((composition == "simple" || composition == "impact")) load(file=paste0(work.dir,"/",forecast.name,"_",my.period[p],"_leadmonth_",lead.month,"_",var.name[var.num],".RData")) # load mean var data for impact maps + } else { + impact.data <- FALSE + print("Impact data for forecasts not available") + } + + if(var.num != var.num.orig) var.num <- var.num.orig # ripristinate original values of this script (necessary after loading regime data) + if(fields.name != fields.name.orig) fields.name <- fields.name.orig # ripristinate original values of this script + + } + + if(var.num != var.num.orig) var.num <- var.num.orig # ripristinate original values of this script (necessary after loading regime data) + if(fields.name != fields.name.orig) fields.name <- fields.name.orig # ripristinate original values of this script + if(work.dir != work.dir.orig) work.dir <- work.dir.orig # ripristinate original values of this script + if(p != p.orig) p <- p.orig + + if(fields.name == forecast.name && n.map < 7){ + + # compute the simulated interannual monthly variability: + n.days.month <- dim(my.cluster.array[[p]])[1] # number of days in the forecasted month + + freq.sim1 <- apply(my.cluster.array[[p]] == 1, c(2,3), sum) + freq.sim2 <- apply(my.cluster.array[[p]] == 2, c(2,3), sum) + freq.sim3 <- apply(my.cluster.array[[p]] == 3, c(2,3), sum) + freq.sim4 <- apply(my.cluster.array[[p]] == 4, c(2,3), sum) + + sd.freq.sim1 <- sd(freq.sim1) / n.days.month + sd.freq.sim2 <- sd(freq.sim2) / n.days.month + sd.freq.sim3 <- sd(freq.sim3) / n.days.month + sd.freq.sim4 <- sd(freq.sim4) / n.days.month + + # compute the transition probabilities: + j1 <- j2 <- j3 <- j4 <- 1 + transition1 <- transition2 <- transition3 <- transition4 <- c() + + n.members <- dim(my.cluster.array[[p]])[3] + + for(m in 1:n.members){ + for(y in 1:n.years){ + for (d in 1:(n.days.month-1)){ + + if (my.cluster.array[[p]][d,y,m] == 1 && (my.cluster.array[[p]][d + 1,y,m] != 1)){ + transition1[j1] <- my.cluster.array[[p]][d + 1,y,m] + j1 <- j1 + 1 + } + if (my.cluster.array[[p]][d,y,m] == 2 && (my.cluster.array[[p]][d + 1,y,m] != 2)){ + transition2[j2] <- my.cluster.array[[p]][d + 1,y,m] + j2 <- j2 + 1 + } + if (my.cluster.array[[p]][d,y,m] == 3 && (my.cluster.array[[p]][d + 1,y,m] != 3)){ + transition3[j3] <- my.cluster.array[[p]][d + 1,y,m] + j3 <- j3 +1 + } + if (my.cluster.array[[p]][d,y,m] == 4 && (my.cluster.array[[p]][d + 1,y,m] != 4)){ + transition4[j4] <- my.cluster.array[[p]][d + 1,y,m] + j4 <- j4 +1 + } + + } + } + } + + trans.sim <- matrix(NA,4,4 , dimnames= list(c("cluster1.sim", "cluster2.sim", "cluster3.sim", "cluster4.sim"),c("cluster1.sim","cluster2.sim","cluster3.sim","cluster4.sim"))) + trans.sim[1,2] <- length(which(transition1 == 2)) + trans.sim[1,3] <- length(which(transition1 == 3)) + trans.sim[1,4] <- length(which(transition1 == 4)) + + trans.sim[2,1] <- length(which(transition2 == 1)) + trans.sim[2,3] <- length(which(transition2 == 3)) + trans.sim[2,4] <- length(which(transition2 == 4)) + + trans.sim[3,1] <- length(which(transition3 == 1)) + trans.sim[3,2] <- length(which(transition3 == 2)) + trans.sim[3,4] <- length(which(transition3 == 4)) + + trans.sim[4,1] <- length(which(transition4 == 1)) + trans.sim[4,2] <- length(which(transition4 == 2)) + trans.sim[4,3] <- length(which(transition4 == 3)) + + trans.tot <- apply(trans.sim,1,sum,na.rm=T) + for(i in 1:4) trans.sim[i,] <- trans.sim[i,]/trans.tot[i] + + + # compute cluster persistence: + my.cluster.vector <- as.vector(my.cluster.array[[p]]) # reduce it to a vector with the order: day1month1member1 , day2month1member1, ... , day1month2member1, day2month2member1, ,,, + + sequ1 <- sequ2 <- sequ3 <- sequ4 <- rep(0,10000) + i1 <- i2 <- i3 <- i4 <- 1 + + if(my.cluster.vector[1] != 1) i1 <- 0 + if(my.cluster.vector[1] == 1) sequ1[i1] <- sequ1[i1]+1 + if(my.cluster.vector[1] != 2) i2 <- 0 + if(my.cluster.vector[1] == 2) sequ2[i2] <- sequ2[i2]+1 + if(my.cluster.vector[1] != 3) i3 <- 0 + if(my.cluster.vector[1] == 3) sequ3[i3] <- sequ3[i3]+1 + if(my.cluster.vector[1] != 4) i4 <- 0 + if(my.cluster.vector[1] == 4) sequ4[i4] <- sequ4[i4]+1 + n.dd <- length(my.cluster.vector) + + for(d in 1:(n.dd-1)){ + if(my.cluster.vector[d] != 1 && my.cluster.vector[d+1] == 1) i1 <- i1+1 + if(my.cluster.vector[d] == 1) sequ1[i1] <- sequ1[i1]+1 + + if(my.cluster.vector[d] != 2 && my.cluster.vector[d+1] == 2) i2 <- i2+1 + if(my.cluster.vector[d] == 2) sequ2[i2] <- sequ2[i2]+1 + + if(my.cluster.vector[d] != 3 && my.cluster.vector[d+1] == 3) i3 <- i3+1 + if(my.cluster.vector[d] == 3) sequ3[i3] <- sequ3[i3]+1 + + if(my.cluster.vector[d] != 4 && my.cluster.vector[d+1] == 4) i4 <- i4+1 + if(my.cluster.vector[d] == 4) sequ4[i4] <- sequ4[i4]+1 + } + + if(my.cluster.vector[n.dd] == 1) sequ1[i1] <- sequ1[i1]+1 + if(my.cluster.vector[n.dd] == 2) sequ2[i2] <- sequ2[i2]+1 + if(my.cluster.vector[n.dd] == 3) sequ3[i3] <- sequ3[i3]+1 + if(my.cluster.vector[n.dd] == 4) sequ4[i4] <- sequ4[i4]+1 + + pers1 <- mean(sequ1[which(sequ1 != 0)]) + pers2 <- mean(sequ2[which(sequ2 != 0)]) + pers3 <- mean(sequ3[which(sequ3 != 0)]) + pers4 <- mean(sequ4[which(sequ4 != 0)]) + + # compute correlations between simulated clusters and observed regime's anomalies: + cluster1.sim <- pslwr1mean; cluster2.sim <- pslwr2mean; cluster3.sim <- pslwr3mean; cluster4.sim <- pslwr4mean + + if(composition == 'impact' && impact.data == TRUE) { + cluster1.sim.imp <- varwr1mean; cluster2.sim.imp <- varwr2mean; cluster3.sim.imp <- varwr3mean; cluster4.sim.imp <- varwr4mean + #cluster1.sim.imp.pv <- pvalue1; cluster2.sim.imp.pv <- pvalue2; cluster3.sim.imp.pv <- pvalue3; cluster4.sim.imp.pv <- pvalue4 + } + + lat.forecast <- lat; lon.forecast <- lon + + if((p + lead.month) > 12) { load.month <- p + lead.month - 12 } else { load.month <- p + lead.month } # reanalysis forecast month to load + load(file=paste0(rean.dir,"/",rean.name,"_",my.period[load.month],"_","psl",".RData")) # load reanalysis data, which overwrities the pslwrXmean variables + load(paste0(rean.dir,"/",rean.name,"_", my.period[load.month],"_","ClusterNames",".RData")) # load also reanalysis regime names + + # load reanalysis varwrXmean impact data: + if(composition == 'impact' && impact.data == TRUE) load(file=paste0(rean.dir,"/",rean.name,"_",my.period[load.month],"_",var.name[var.num],".RData")) + + if(var.num != var.num.orig) var.num <- var.num.orig # ripristinate original values of this script + if(fields.name != fields.name.orig) fields.name <- fields.name.orig # ripristinate original values of this script + if(!identical(period.orig,period)) period <- period.orig + if(work.dir != work.dir.orig) work.dir <- work.dir.orig # ripristinate original values of this script + if(p.orig != p) p <- p.orig + + # compute the observed transition probabilities: + n.days.month <- n.days.in.a.period(load.month,2001) + j1o <- j2o <- j3o <- j4o <- 1; transition.obs1 <- transition.obs2 <- transition.obs3 <- transition.obs4 <- c() + + for (d in 1:(length(my.cluster$cluster)-1)){ + if (my.cluster$cluster[d] == 1 && (my.cluster$cluster[d + 1] != my.cluster$cluster[d])){ + transition.obs1[j1o] <- my.cluster$cluster[d + 1] + j1o <- j1o + 1 + } + if (my.cluster$cluster[d] == 2 && (my.cluster$cluster[d + 1] != my.cluster$cluster[d])){ + transition.obs2[j2o] <- my.cluster$cluster[d + 1] + j2o <- j2o + 1 + } + if (my.cluster$cluster[d] == 3 && (my.cluster$cluster[d + 1] != my.cluster$cluster[d])){ + transition.obs3[j3o] <- my.cluster$cluster[d + 1] + j3o <- j3o + 1 + } + if (my.cluster$cluster[d] == 4 && (my.cluster$cluster[d + 1] != my.cluster$cluster[d])){ + transition.obs4[j4o] <- my.cluster$cluster[d + 1] + j4o <- j4o +1 + } + + } + + trans.obs <- matrix(NA,4,4 , dimnames= list(c("cluster1.obs", "cluster2.obs", "cluster3.obs", "cluster4.obs"),c("cluster1.obs","cluster2.obs","cluster3.obs","cluster4.obs"))) + trans.obs[1,2] <- length(which(transition.obs1 == 2)) + trans.obs[1,3] <- length(which(transition.obs1 == 3)) + trans.obs[1,4] <- length(which(transition.obs1 == 4)) + + trans.obs[2,1] <- length(which(transition.obs2 == 1)) + trans.obs[2,3] <- length(which(transition.obs2 == 3)) + trans.obs[2,4] <- length(which(transition.obs2 == 4)) + + trans.obs[3,1] <- length(which(transition.obs3 == 1)) + trans.obs[3,2] <- length(which(transition.obs3 == 2)) + trans.obs[3,4] <- length(which(transition.obs3 == 4)) + + trans.obs[4,1] <- length(which(transition.obs4 == 1)) + trans.obs[4,2] <- length(which(transition.obs4 == 2)) + trans.obs[4,3] <- length(which(transition.obs4 == 3)) + + trans.tot.obs <- apply(trans.obs,1,sum,na.rm=T) + for(i in 1:4) trans.obs[i,] <- trans.obs[i,]/trans.tot.obs[i] + + # compute the observed interannual monthly variability: + freq.obs1 <- freq.obs2 <- freq.obs3 <- freq.obs4 <- c() + + for(y in year.start:year.end){ + # for both reanalysis and S4, the time order is always: year1day1, year1day2, ..., year1day31, year2day1, year2day2, ..., year2day28, etc, + freq.obs1[y-year.start+1] <- length(which(my.cluster$cluster[(y-year.start)*n.days.month + (1:n.days.month)] == 1)) + freq.obs2[y-year.start+1] <- length(which(my.cluster$cluster[(y-year.start)*n.days.month + (1:n.days.month)] == 2)) + freq.obs3[y-year.start+1] <- length(which(my.cluster$cluster[(y-year.start)*n.days.month + (1:n.days.month)] == 3)) + freq.obs4[y-year.start+1] <- length(which(my.cluster$cluster[(y-year.start)*n.days.month + (1:n.days.month)] == 4)) + } + + sd.freq.obs1 <- sd(freq.obs1) / n.days.month + sd.freq.obs2 <- sd(freq.obs2) / n.days.month + sd.freq.obs3 <- sd(freq.obs3) / n.days.month + sd.freq.obs4 <- sd(freq.obs4) / n.days.month + + wr1y.obs <- wr1y; wr2y.obs <- wr2y; wr3y.obs <- wr3y; wr4y.obs <- wr4y # observed frecuencies of the regimes + + regimes.obs <- c(cluster1.name, cluster2.name, cluster3.name, cluster4.name) + + if(length(unique(regimes.obs)) < 4) stop("Two or more names of the weather types in the reanalsyis input file are repeated!") + + if(identical(rev(lat),lat.forecast)) {lat.forecast <- rev(lat.forecast); print("Warning: forecast latitudes are in the opposite order than renalysis's")} + if(!identical(lat, lat.forecast)) stop("forecast latitudes are different from reanalysis latitudes!") + if(!identical(lon, lon.forecast)) stop("forecast longitude are different from reanalysis longitudes!") + + cluster.corr <- array(NA, c(4,4), dimnames=list(c("cluster1.sim","cluster2.sim","cluster3.sim","cluster4.sim"), regimes.obs)) + cluster.corr[1,1] <- cor(as.vector(cluster1.sim), as.vector(pslwr1mean)) + cluster.corr[1,2] <- cor(as.vector(cluster1.sim), as.vector(pslwr2mean)) + cluster.corr[1,3] <- cor(as.vector(cluster1.sim), as.vector(pslwr3mean)) + cluster.corr[1,4] <- cor(as.vector(cluster1.sim), as.vector(pslwr4mean)) + cluster.corr[2,1] <- cor(as.vector(cluster2.sim), as.vector(pslwr1mean)) + cluster.corr[2,2] <- cor(as.vector(cluster2.sim), as.vector(pslwr2mean)) + cluster.corr[2,3] <- cor(as.vector(cluster2.sim), as.vector(pslwr3mean)) + cluster.corr[2,4] <- cor(as.vector(cluster2.sim), as.vector(pslwr4mean)) + cluster.corr[3,1] <- cor(as.vector(cluster3.sim), as.vector(pslwr1mean)) + cluster.corr[3,2] <- cor(as.vector(cluster3.sim), as.vector(pslwr2mean)) + cluster.corr[3,3] <- cor(as.vector(cluster3.sim), as.vector(pslwr3mean)) + cluster.corr[3,4] <- cor(as.vector(cluster3.sim), as.vector(pslwr4mean)) + cluster.corr[4,1] <- cor(as.vector(cluster4.sim), as.vector(pslwr1mean)) + cluster.corr[4,2] <- cor(as.vector(cluster4.sim), as.vector(pslwr2mean)) + cluster.corr[4,3] <- cor(as.vector(cluster4.sim), as.vector(pslwr3mean)) + cluster.corr[4,4] <- cor(as.vector(cluster4.sim), as.vector(pslwr4mean)) + + # associate each simulated cluster to one observed regime: + max1 <- which(cluster.corr == max(cluster.corr), arr.ind=T) + cluster.corr2 <- cluster.corr + cluster.corr2[max1[1],] <- -999 # set this row to negative values so it won't be found as a maximum anymore + cluster.corr2[,max1[2]] <- -999 # set this column to negative values so it won't be found as a maximum anymore + max2 <- which(cluster.corr2 == max(cluster.corr2), arr.ind=T) + cluster.corr3 <- cluster.corr2 + cluster.corr3[max2[1],] <- -999 + cluster.corr3[,max2[2]] <- -999 + max3 <- which(cluster.corr3 == max(cluster.corr3), arr.ind=T) + cluster.corr4 <- cluster.corr3 + cluster.corr4[max3[1],] <- -999 + cluster.corr4[,max3[2]] <- -999 + max4 <- which(cluster.corr4 == max(cluster.corr4), arr.ind=T) + + seq.max1 <- c(max1[1],max2[1],max3[1],max4[1]) + seq.max2 <- c(max1[2],max2[2],max3[2],max4[2]) + + cluster1.regime <- seq.max2[which(seq.max1 == 1)] + cluster2.regime <- seq.max2[which(seq.max1 == 2)] + cluster3.regime <- seq.max2[which(seq.max1 == 3)] + cluster4.regime <- seq.max2[which(seq.max1 == 4)] + + cluster1.name <- regimes.obs[cluster1.regime] + cluster2.name <- regimes.obs[cluster2.regime] + cluster3.name <- regimes.obs[cluster3.regime] + cluster4.name <- regimes.obs[cluster4.regime] + + regimes.sim <- c(cluster1.name, cluster2.name, cluster3.name, cluster4.name) + cluster.corr2 <- array(cluster.corr, c(4,4), dimnames=list(regimes.sim, regimes.obs)) + + spat.cor1 <- cluster.corr[1,seq.max2[which(seq.max1 == 1)]] + spat.cor2 <- cluster.corr[2,seq.max2[which(seq.max1 == 2)]] + spat.cor3 <- cluster.corr[3,seq.max2[which(seq.max1 == 3)]] + spat.cor4 <- cluster.corr[4,seq.max2[which(seq.max1 == 4)]] + + # measure persistence for the reanalysis dataset: + my.cluster.vector <- my.cluster[[1]] + + sequ1 <- sequ2 <- sequ3 <- sequ4 <- rep(0,10000) + i1 <- i2 <- i3 <- i4 <- 1 + + if(my.cluster.vector[1] != 1) i1 <- 0 + if(my.cluster.vector[1] == 1) sequ1[i1] <- sequ1[i1]+1 + if(my.cluster.vector[1] != 2) i2 <- 0 + if(my.cluster.vector[1] == 2) sequ2[i2] <- sequ2[i2]+1 + if(my.cluster.vector[1] != 3) i3 <- 0 + if(my.cluster.vector[1] == 3) sequ3[i3] <- sequ3[i3]+1 + if(my.cluster.vector[1] != 4) i4 <- 0 + if(my.cluster.vector[1] == 4) sequ4[i4] <- sequ4[i4]+1 + + n.dd <- length(my.cluster.vector) + for(d in 1:(n.dd-1)){ + if(my.cluster.vector[d] != 1 && my.cluster.vector[d+1] == 1) i1 <- i1+1 + if(my.cluster.vector[d] == 1) sequ1[i1] <- sequ1[i1]+1 + + if(my.cluster.vector[d] != 2 && my.cluster.vector[d+1] == 2) i2 <- i2+1 + if(my.cluster.vector[d] == 2) sequ2[i2] <- sequ2[i2]+1 + + if(my.cluster.vector[d] != 3 && my.cluster.vector[d+1] == 3) i3 <- i3+1 + if(my.cluster.vector[d] == 3) sequ3[i3] <- sequ3[i3]+1 + + if(my.cluster.vector[d] != 4 && my.cluster.vector[d+1] == 4) i4 <- i4+1 + if(my.cluster.vector[d] == 4) sequ4[i4] <- sequ4[i4]+1 + } + + if(my.cluster.vector[n.dd] == 1) sequ1[i1] <- sequ1[i1]+1 + if(my.cluster.vector[n.dd] == 2) sequ2[i2] <- sequ2[i2]+1 + if(my.cluster.vector[n.dd] == 3) sequ3[i3] <- sequ3[i3]+1 + if(my.cluster.vector[n.dd] == 4) sequ4[i4] <- sequ4[i4]+1 + + persObs1 <- mean(sequ1[which(sequ1 != 0)]) + persObs2 <- mean(sequ2[which(sequ2 != 0)]) + persObs3 <- mean(sequ3[which(sequ3 != 0)]) + persObs4 <- mean(sequ4[which(sequ4 != 0)]) + + ### insert here all the manual corrections to the regime assignation due to misasignment in the automatic selection: + ### forecast month: September + #if(p == 9 && lead.month == 0) { cluster1.name <- "Blocking"; cluster2.name <- "Atl.Ridge"; cluster3.name <- "NAO-"; cluster4.name <- "NAO+"} + #if(p == 8 && lead.month == 1) { cluster1.name <- "NAO+"; cluster2.name <- "NAO-"; cluster3.name <- "Blocking"; cluster4.name <- "Atl.Ridge"} + #if(p == 6 && lead.month == 3) { cluster1.name <- "Atl.Ridge"; cluster2.name <- "NAO+"} + #if(p == 4 && lead.month == 5) { cluster1.name <- "Blocking"; cluster2.name <- "NAO+"; cluster3.name <- "Atl.Ridge"; cluster4.name <- "NAO-"} + + if(p == 9 && lead.month == 0) { cluster1.name <- "Blocking"; cluster2.name <- "Atl.Ridge"; cluster3.name <- "NAO-"; cluster4.name <- "NAO+" } + if(p == 8 && lead.month == 1) { cluster1.name <- "NAO+"; cluster2.name <- "NAO-"; cluster3.name <- "Blocking"; cluster4.name <- "Atl.Ridge" } + if(p == 7 && lead.month == 2) { cluster1.name <- "Atl.Ridge"; cluster2.name <- "Blocking"; cluster3.name <- "NAO-"; cluster4.name <- "NAO+" } + if(p == 6 && lead.month == 3) { cluster1.name <- "Atl.Ridge"; cluster2.name <- "NAO-"; cluster3.name <- "NAO+"; cluster4.name <- "Blocking" } + if(p == 5 && lead.month == 4) { cluster1.name <- "Atl.Ridge"; cluster2.name <- "NAO+"; cluster3.name <- "NAO-"; cluster4.name <- "Blocking" } + if(p == 4 && lead.month == 5) { cluster1.name <- "Blocking"; cluster2.name <- "NAO+"; cluster3.name <- "Atl.Ridge"; cluster4.name <- "NAO-" } + if(p == 3 && lead.month == 6) { cluster2.name <- "NAO-"; cluster3.name <- "Atl.Ridge"} # cluster1.name <- "Blocking"; cluster4.name <- "NAO+" + + # regimes.sim # for the debug + + ### forecast month: October + #if(p == 4 && lead.month == 6) { cluster1.name <- "Atl.Ridge"; cluster2.name <- "Blocking"; cluster3.name <- "NAO+"; cluster4.name <- "NAO-"} + #if(p == 5 && lead.month == 5) { cluster1.name <- "NAO+"; cluster2.name <- "Blocking"; cluster3.name <- "NAO-"; cluster4.name <- "Atl.Ridge"} + #if(p == 7 && lead.month == 3) { cluster1.name <- "NAO-"; cluster2.name <- "Atl.Ridge"; cluster3.name <- "Blocking"; cluster4.name <- "NAO+"} + #if(p == 8 && lead.month == 2) { cluster1.name <- "Blocking"; cluster2.name <- "NAO-"; cluster3.name <- "Atl.Ridge"; cluster4.name <- "NAO+"} + #if(p == 9 && lead.month == 1) { cluster1.name <- "NAO-"; cluster2.name <- "Blocking"; cluster3.name <- "Atl.Ridge"; cluster4.name <- "NAO+"} + + if(p == 10 && lead.month == 0) { cluster1.name <- "Blocking"; cluster2.name <- "NAO+"; cluster3.name <- "Atl.Ridge"; cluster4.name <- "NAO-"} + if(p == 9 && lead.month == 1) { cluster1.name <- "NAO-"; cluster2.name <- "Blocking"; cluster3.name <- "Atl.Ridge"; cluster4.name <- "NAO+"} + if(p == 8 && lead.month == 2) { cluster1.name <- "Blocking"; cluster2.name <- "NAO-"; cluster3.name <- "Atl.Ridge"; cluster4.name <- "NAO+"} + if(p == 7 && lead.month == 3) { cluster1.name <- "NAO-"; cluster2.name <- "Atl.Ridge"; cluster3.name <- "Blocking"; cluster4.name <- "NAO+"} + if(p == 6 && lead.month == 4) { cluster1.name <- "NAO+"; cluster2.name <- "Blocking"; cluster3.name <- "NAO-"; cluster4.name <- "Atl.Ridge"} + + ### forecast month: November + #if(p == 6 && lead.month == 5) { cluster2.name <- "Atl.Ridge"; cluster3.name <- "NAO+"; cluster4.name <- "Blocking"} + #if(p == 7 && lead.month == 4) { cluster1.name <- "NAO+"; cluster2.name <- "Blocking"; cluster4.name <- "Atl.Ridge"} + #if(p == 8 && lead.month == 3) { cluster2.name <- "Blocking"; cluster4.name <- "Atl.Ridge"} + #if(p == 9 && lead.month == 2) { cluster2.name <- "Atl.Ridge"; cluster3.name <- "NAO+"; cluster4.name <- "Blocking"} + #if(p == 10 && lead.month == 1) { cluster2.name <- "Blocking"; cluster4.name <- "Atl.Ridge"} + + regimes.sim2 <- c(cluster1.name, cluster2.name, cluster3.name, cluster4.name) # Simulated regimes after the manual correction + #regimes.obs <- c(cluster1.name, cluster2.name, cluster3.name, cluster4.name) # already defined above, included only as reference + + order.sim <- match(regimes.sim2, orden) + order.obs <- match(regimes.obs, orden) + + clusters.sim <- list(cluster1.sim, cluster2.sim, cluster3.sim, cluster4.sim) + clusters.obs <- list(pslwr1mean, pslwr2mean, pslwr3mean, pslwr4mean) + + # recompute the spatial correlations, because they might have changed because of the manual corrections above: + spat.cor1 <- cor(as.vector(clusters.sim[[which(order.sim == 1)]]), as.vector(clusters.obs[[which(order.obs == 1)]])) + spat.cor2 <- cor(as.vector(clusters.sim[[which(order.sim == 2)]]), as.vector(clusters.obs[[which(order.obs == 2)]])) + spat.cor3 <- cor(as.vector(clusters.sim[[which(order.sim == 3)]]), as.vector(clusters.obs[[which(order.obs == 3)]])) + spat.cor4 <- cor(as.vector(clusters.sim[[which(order.sim == 4)]]), as.vector(clusters.obs[[which(order.obs == 4)]])) + + if(composition == 'impact' && impact.data == TRUE) { + clusters.sim.imp <- list(cluster1.sim.imp, cluster2.sim.imp, cluster3.sim.imp, cluster4.sim.imp) + #clusters.sim.imp.pv <- list(cluster1.sim.imp.pv, cluster2.sim.imp.pv, cluster3.sim.imp.pv, cluster4.sim.imp.pv) + + clusters.obs.imp <- list(varwr1mean, varwr2mean, varwr3mean, varwr4mean) + #clusters.obs.imp.pv <- list(pvalue1, pvalue2, pvalue3, pvalue4) + + cor.imp1 <- cor(as.vector(clusters.sim.imp[[which(order.sim == 1)]]), as.vector(clusters.obs.imp[[which(order.obs == 1)]])) + cor.imp2 <- cor(as.vector(clusters.sim.imp[[which(order.sim == 2)]]), as.vector(clusters.obs.imp[[which(order.obs == 2)]])) + cor.imp3 <- cor(as.vector(clusters.sim.imp[[which(order.sim == 3)]]), as.vector(clusters.obs.imp[[which(order.obs == 3)]])) + cor.imp4 <- cor(as.vector(clusters.sim.imp[[which(order.sim == 4)]]), as.vector(clusters.obs.imp[[which(order.obs == 4)]])) + + } + + load(file=paste0(work.dir,"/",fields.name,"_",my.period[p],"_leadmonth_",lead.month,"_","psl",".RData")) # reload forecast cluster time series and mean psl data + load(file=paste0(work.dir,"/",fields.name,"_",my.period[p],"_leadmonth_",lead.month,"_",var.name[var.num],".RData")) # reload mean var data for impact maps + + if(var.num != var.num.orig) var.num <- var.num.orig # ripristinate original values of this script + if(fields.name != fields.name.orig) fields.name <- fields.name.orig # ripristinate original values of this script + if(!identical(period.orig, period)) period <- period.orig + if(p.orig != p) p <- p.orig + if(identical(rev(lat),lat.forecast)) lat <- rev(lat) # ripristinate right lat order + if(work.dir != work.dir.orig) work.dir <- work.dir.orig # ripristinate original values of this script + + } # close for on 'forecast.name' + + if(fields.name == rean.name || (fields.name == forecast.name && n.map == 7)){ + if(save.names){ + save(cluster1.name, cluster2.name, cluster3.name, cluster4.name, file=paste0(rean.dir,"/",fields.name,"_", my.period[p],"_","ClusterNames",".RData")) + + } else { # in this case, we load the cluster names from the file already saved, if regimes come from a reanalysis: + ClusterName.file <- paste0(rean.dir,"/",rean.name,"_", my.period[p],"_","ClusterNames",".RData") + if(!file.exists(ClusterName.file)) stop(paste0("file: ",ClusterName.file," missing")) # check if file exists or not + load(ClusterName.file) # load cluster names + + if(var.num != var.num.orig) var.num <- var.num.orig # ripristinate original values of this script + if(fields.name != fields.name.orig) fields.name <- fields.name.orig # ripristinate original values of this script + } + } + + # assign to each cluster its regime: + #if(ordering == FALSE && (fields.name == rean.name || (fields.name == forecast.name && n.map == 7))){ + if(ordering == FALSE){ + cluster1 <- 1; cluster2 <- 2; cluster3 <- 3; cluster4 <- 4 + } else { + cluster1 <- which(orden == cluster1.name) + cluster2 <- which(orden == cluster2.name) + cluster3 <- which(orden == cluster3.name) + cluster4 <- which(orden == cluster4.name) + } + + assign(paste0("map",cluster1), pslwr1mean) + assign(paste0("map",cluster2), pslwr2mean) + assign(paste0("map",cluster3), pslwr3mean) + assign(paste0("map",cluster4), pslwr4mean) + + assign(paste0("fre",cluster1), wr1y) + assign(paste0("fre",cluster2), wr2y) + assign(paste0("fre",cluster3), wr3y) + assign(paste0("fre",cluster4), wr4y) + + #assign(paste0("withinss",cluster1), my.cluster[[p]]$withinss[1]/my.cluster[[p]]$size[1]) + #assign(paste0("withinss",cluster2), my.cluster[[p]]$withinss[2]/my.cluster[[p]]$size[2]) + #assign(paste0("withinss",cluster3), my.cluster[[p]]$withinss[3]/my.cluster[[p]]$size[3]) + #assign(paste0("withinss",cluster4), my.cluster[[p]]$withinss[4]/my.cluster[[p]]$size[4]) + + if(impact.data == TRUE && (composition == "simple" || composition == "single.impact" || composition == 'impact' || composition == 'edpr')){ + assign(paste0("imp",cluster1), varwr1mean) + assign(paste0("imp",cluster2), varwr2mean) + assign(paste0("imp",cluster3), varwr3mean) + assign(paste0("imp",cluster4), varwr4mean) + + assign(paste0("sig",cluster1), pvalue1) + assign(paste0("sig",cluster2), pvalue2) + assign(paste0("sig",cluster3), pvalue3) + assign(paste0("sig",cluster4), pvalue4) + } + + if(fields.name == forecast.name && n.map < 7){ + assign(paste0("freMax",cluster1), wr1yMax) + assign(paste0("freMax",cluster2), wr2yMax) + assign(paste0("freMax",cluster3), wr3yMax) + assign(paste0("freMax",cluster4), wr4yMax) + + assign(paste0("freMin",cluster1), wr1yMin) + assign(paste0("freMin",cluster2), wr2yMin) + assign(paste0("freMin",cluster3), wr3yMin) + assign(paste0("freMin",cluster4), wr4yMin) + + #assign(paste0("spatial.cor",cluster1), spat.cor1) + #assign(paste0("spatial.cor",cluster2), spat.cor2) + #assign(paste0("spatial.cor",cluster3), spat.cor3) + #assign(paste0("spatial.cor",cluster4), spat.cor4) + + assign(paste0("persist",cluster1), pers1) + assign(paste0("persist",cluster2), pers2) + assign(paste0("persist",cluster3), pers3) + assign(paste0("persist",cluster4), pers4) + + clusters.all <- c(cluster1, cluster2, cluster3, cluster4) + ordered.sequence <- c(which(clusters.all == 1), which(clusters.all == 2), which(clusters.all == 3), which(clusters.all == 4)) + + assign(paste0("transSim",cluster1), unname(trans.sim[1,ordered.sequence])) + assign(paste0("transSim",cluster2), unname(trans.sim[2,ordered.sequence])) + assign(paste0("transSim",cluster3), unname(trans.sim[3,ordered.sequence])) + assign(paste0("transSim",cluster4), unname(trans.sim[4,ordered.sequence])) + + cluster1.obs <- which(orden == regimes.obs[1]) + cluster2.obs <- which(orden == regimes.obs[2]) + cluster3.obs <- which(orden == regimes.obs[3]) + cluster4.obs <- which(orden == regimes.obs[4]) + + clusters.all.obs <- c(cluster1.obs, cluster2.obs, cluster3.obs, cluster4.obs) + ordered.sequence.obs <- c(which(clusters.all.obs == 1), which(clusters.all.obs == 2), which(clusters.all.obs == 3), which(clusters.all.obs == 4)) + + assign(paste0("freObs",cluster1.obs), wr1y.obs) + assign(paste0("freObs",cluster2.obs), wr2y.obs) + assign(paste0("freObs",cluster3.obs), wr3y.obs) + assign(paste0("freObs",cluster4.obs), wr4y.obs) + + assign(paste0("persistObs",cluster1.obs), persObs1) + assign(paste0("persistObs",cluster2.obs), persObs2) + assign(paste0("persistObs",cluster3.obs), persObs3) + assign(paste0("persistObs",cluster4.obs), persObs4) + + assign(paste0("transObs",cluster1.obs), unname(trans.obs[1,ordered.sequence.obs])) + assign(paste0("transObs",cluster2.obs), unname(trans.obs[2,ordered.sequence.obs])) + assign(paste0("transObs",cluster3.obs), unname(trans.obs[3,ordered.sequence.obs])) + assign(paste0("transObs",cluster4.obs), unname(trans.obs[4,ordered.sequence.obs])) + + } + + # position of long values of Europe only (without the Atlantic Sea and America): + #if(fields.name == "ERA-Interim") EU <- c(1:which(lon >= lon.max)[1], (length(lon)-30):length(lon)) # restrict area to continental europe only + EU <- c(1:which(lon >= lon.max)[1], (which(lon >= 337)[1]:length(lon))) # restrict area to continental europe only + + # breaks and colors of the geopotential fields: + if(psl == "g500"){ + #my.brks <- c(-300,-199:200,300) # % Mean anomaly of a WR + my.brks <- c(-300,seq(-200,200,10),300) # % Mean anomaly of a WR + my.brks2 <- c(-300,seq(-200,200,10),300) # % Mean anomaly of a WR for the contour lines + #my.cols <- colorRampPalette((brewer.pal(9,"PuBu")))(length(my.brks)-1) + values.to.plot <- c(-200, -100, 0, 100, 200) # values we want to show in the labels + } + + if(psl == "psl"){ + my.brks <- c(seq(-21,-1,2),0,seq(1,21,2)) # % Mean anomaly of a WR + # my.brks <- c(seq(-19,-1,2),0,seq(1,19,2)) # % Mean anomaly of a WR + + my.brks2 <- my.brks #c(seq(-20,20,2)) # % Mean anomaly of a WR for the contour lines + values.to.plot <- my.brks #c(-17,-15,-13,-11,-9,-7,-5,-3,-1,0,1,3,5,7,9,11,13,15,17) + } + + my.cols <- colorRampPalette(rev(brewer.pal(11,"RdBu")))(length(my.brks)-1) # blue--white--red colors + my.cols[floor(length(my.cols)/2)] <- my.cols[floor(length(my.cols)/2)+1] <- "white" + + # breaks and colors of the impact maps (valid both for Wind speed anomalies and Temperature anomalies): + #my.brks.var <- c(-20,seq(-10,-3,1),seq(-2.9,2.9,0.1),seq(3,10,1),20) # % Mean anomaly of a WR + #my.brks.var <- c(-20,-2,-1.5,-1,-0.5,-0.2,0.2,0.5,1,1.5,2,20) # % Mean anomaly of a WR + #my.brks.var <- c(-20,seq(-3,3,0.5),20) # % Mean anomaly of a WR + if(var.name[var.num] == "sfcWind") { + my.brks.var <- seq(-3,3,0.5) + values.to.plot2 <- c(-3,-2,-1,0,1,2,3) + } + if(var.name[var.num] == "tas") { + my.brks.var <- seq(-5,5,1) + values.to.plot2 <- my.brks.var #c(-5,-3,-1,0,1,3,5) + } + + #my.cols <- colorRampPalette(as.character(read.csv("/home/Earth/vtorralb/rgbhex.csv",header=F)[,1]))(length(my.brks)-1) # blue--yellow-red colors + my.cols.var <- colorRampPalette(rev(brewer.pal(11,"RdBu")))(length(my.brks.var)-1) # blue--white--red colors + #my.cols.var[floor(length(my.cols.var)/2)] <- my.cols.var[floor(length(my.cols.var)/2)+1] <- "white" + + freq.max=100 + if(fields.name == forecast.name){ + pos.years.present <- match(year.start:year.end, years) + years.missing <- which(is.na(pos.years.present)) + fre1.NA <- fre2.NA <- fre3.NA <- fre4.NA <- freMax1.NA <- freMax2.NA <- freMax3.NA <- freMax4.NA <- freMin1.NA <- freMin2.NA <- freMin3.NA <- freMin4.NA <- c() + k <- 0 + for(y in year.start:year.end) { + if(y %in% (years.missing + year.start - 1)) { + fre1.NA <- c(fre1.NA,NA); fre2.NA <- c(fre2.NA,NA); fre3.NA <- c(fre3.NA,NA); fre4.NA <- c(fre4.NA,NA) + freMax1.NA <- c(freMax1.NA,NA); freMax2.NA <- c(freMax2.NA,NA); freMax3.NA <- c(freMax3.NA,NA); freMax4.NA <- c(freMax4.NA,NA) + freMin1.NA <- c(freMin1.NA,NA); freMin2.NA <- c(freMin2.NA,NA); freMin3.NA <- c(freMin3.NA,NA); freMin4.NA <- c(freMin4.NA,NA) + k=k+1 + } else { + fre1.NA <- c(fre1.NA,fre1[y - year.start + 1 - k]); fre2.NA <- c(fre2.NA,fre2[y - year.start + 1 - k]) + fre3.NA <- c(fre3.NA,fre3[y - year.start + 1 - k]); fre4.NA <- c(fre4.NA,fre4[y - year.start + 1 - k]) + freMax1.NA <- c(freMax1.NA,freMax1[y - year.start + 1 - k]); freMax2.NA <- c(freMax2.NA,freMax2[y - year.start + 1 - k]) + freMax3.NA <- c(freMax3.NA,freMax3[y - year.start + 1 - k]); freMax4.NA <- c(freMax4.NA,freMax4[y - year.start + 1 - k]) + freMin1.NA <- c(freMin1.NA,freMin1[y - year.start + 1 - k]); freMin2.NA <- c(freMin2.NA,freMin2[y - year.start + 1 - k]) + freMin3.NA <- c(freMin3.NA,freMin3[y - year.start + 1 - k]); freMin4.NA <- c(freMin4.NA,freMin4[y - year.start + 1 - k]) + } + } + + sp.cor1 <- round(cor(fre1.NA,freObs1, use="complete.obs"),2) + sp.cor2 <- round(cor(fre2.NA,freObs2, use="complete.obs"),2) + sp.cor3 <- round(cor(fre3.NA,freObs3, use="complete.obs"),2) + sp.cor4 <- round(cor(fre4.NA,freObs4, use="complete.obs"),2) + + } else { + fre1.NA <- fre1; fre2.NA <- fre2; fre3.NA <- fre3; fre4.NA <- fre4 + } + + + # Visualize the composition with the 3 graphs for all the 4 regimes: its pressure fields, its impact on var and its frequency series: + if(composition == "simple"){ + + if(!as.pdf && fields.name == rean.name) png(filename=paste0(rean.dir,"/",fields.name,"_",my.period[p],"_",var.name[var.num],".png"),width=3000,height=3700) + if(!as.pdf && fields.name == forecast.name) png(filename=paste0(work.dir,"/",fields.name,"_",my.period[p],"_leadmonth_",lead.month,"_",var.name[var.num],".png"),width=3000,height=3700) + + plot.new() + + # Sheet title: + par(fig=c(0, 1, 0.94, 0.98), new=TRUE) + if(fields.name == forecast.name) {forecast.title <- paste0(" startdate and ",lead.month," forecast month ")} else {forecast.title <- ""} + mtext(paste0(fields.name, " Weather Regimes for ", my.period[p], forecast.title," (",year.start,"-",year.end,")"), cex=5.5, font=2) + + # Centroid maps: + map.xpos <- 0 + map.width <- 0.46 + par(fig=c(map.xpos + 0.003, map.xpos + map.width - 0.003, 0.745, 0.925), new=TRUE) + PlotEquiMap2(rescale(map1,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map1), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, xlabel.dist=1.5, contours.lty="F1FF1F", continents.col="black") + par(fig=c(map.xpos, map.xpos + map.width, 0.51, 0.69), new=TRUE) + PlotEquiMap2(rescale(map2,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map2), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F", continents.col="black") + par(fig=c(map.xpos, map.xpos + map.width, 0.275, 0.455), new=TRUE) + PlotEquiMap2(rescale(map3,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map3), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F", continents.col="black") + par(fig=c(map.xpos, map.xpos + map.width, 0.04, 0.22), new=TRUE) + PlotEquiMap2(rescale(map4,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map4), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F", continents.col="black") + + ## # for the debug: + ## obs <- read.table("/scratch/Earth/ncortesi/RESILIENCE/Regimes/NAO_series.txt") + ## mes <- p + ## fre.obs <- obs$V3[seq(mes,12*35,12)] # get the ovserved freuency of the NAO + ## #plot(1981:2015,fre.obs,type="l") + ## #lines(1981:2015,fre1,type="l",col="red") + ## #lines(1981:2015,fre2,type="l",col="blue") + ## #lines(1981:2015,fre4,type="l",col="green") + ## cor1 <- cor(fre.obs, fre1) + ## cor2 <- cor(fre.obs, fre2) + ## cor3 <- cor(fre.obs, fre3) + ## cor4 <- cor(fre.obs, fre4) + ## round(c(cor1,cor2,cor3,cor4),2) + + # Legend centroid maps: + legend1.xpos <- 0.10 + legend1.width <- 0.30 + legend1.cex <- 2 + my.subset <- match(values.to.plot, my.brks) + par(fig=c(legend1.xpos, legend1.xpos + legend1.width, 0.719, 0.744), new=TRUE) # syntax fig=c(xmin, xmax, ymin, ymax) + ColorBar3(my.brks, cols=my.cols, vert=FALSE, cex=legend1.cex, subset=my.subset) + if(psl=="g500") {mtext(side=4," m", cex=2.5)} else {mtext(side=4," hPa", cex=legend1.cex)} + par(fig=c(legend1.xpos, legend1.xpos + legend1.width, 0.485, 0.508), new=TRUE) + ColorBar3(my.brks, cols=my.cols, vert=FALSE, cex=legend1.cex, subset=my.subset) + if(psl=="g500") {mtext(side=4," m", cex=2.5)} else {mtext(side=4," hPa", cex=legend1.cex)} + par(fig=c(legend1.xpos, legend1.xpos + legend1.width, 0.250, 0.273), new=TRUE) + ColorBar3(my.brks, cols=my.cols, vert=FALSE, cex=legend1.cex, subset=my.subset) + if(psl=="g500") {mtext(side=4," m", cex=2.5)} else {mtext(side=4," hPa", cex=legend1.cex)} + par(fig=c(legend1.xpos, legend1.xpos + legend1.width, 0.015, 0.038), new=TRUE) + ColorBar3(my.brks, cols=my.cols, vert=FALSE, cex=legend1.cex, subset=my.subset) + if(psl=="g500") {mtext(side=4," m", cex=2.5)} else {mtext(side=4," hPa", cex=legend1.cex)} + + # Subtitle centroid maps: + map.title.xpos <- 0.76 + map.title.width <- 0.2 + par(fig=c(map.title.xpos, map.title.xpos + map.title.width, 0.728, 0.729), new=TRUE) + mtext("year", cex=3) + par(fig=c(map.title.xpos, map.title.xpos + map.title.width, 0.494, 0.495), new=TRUE) + mtext("year", cex=3) + par(fig=c(map.title.xpos, map.title.xpos + map.title.width, 0.260, 0.261), new=TRUE) + mtext("year", cex=3) + par(fig=c(map.title.xpos, map.title.xpos + map.title.width, 0.026, 0.027), new=TRUE) + mtext("year", cex=3) + + # Title Centroid Maps: + title1.xpos <- 0.02 + title1.width <- 0.4 + par(fig=c(title1.xpos, title1.xpos + title1.width, 0.9305, 0.9355), new=TRUE) + mtext(paste0(regime1.name,": ", psl.name, " Anomaly"), font=2, cex=4) + par(fig=c(title1.xpos, title1.xpos + title1.width, 0.6955, 0.7005), new=TRUE) + mtext(paste0(regime2.name,": ", psl.name, " Anomaly"), font=2, cex=4) + par(fig=c(title1.xpos, title1.xpos + title1.width, 0.4605, 0.4655), new=TRUE) + mtext(paste0(regime3.name,": ", psl.name, " Anomaly"), font=2, cex=4) + par(fig=c(title1.xpos, title1.xpos + title1.width, 0.2255, 0.2305), new=TRUE) + mtext(paste0(regime4.name,": ", psl.name, " Anomaly"), font=2, cex=4) + + # Impact maps: + if(impact.data == TRUE ){ + impact.xpos <- 0.47 + impact.width <- 0.26 + par(fig=c(impact.xpos, impact.xpos + impact.width, 0.745, 0.925), new=TRUE) + PlotEquiMap2(rescale(imp1[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=t(sig1[,EU] < 0.05), cex.lab=1.5, continents.col="black") + par(fig=c(impact.xpos, impact.xpos + impact.width, 0.51, 0.69), new=TRUE) + PlotEquiMap2(rescale(imp2[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=t(sig2[,EU] < 0.05), cex.lab=1.5, continents.col="black") + par(fig=c(impact.xpos, impact.xpos + impact.width, 0.275, 0.455), new=TRUE) + PlotEquiMap2(rescale(imp3[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=t(sig3[,EU] < 0.05), cex.lab=1.5, continents.col="black") + par(fig=c(impact.xpos, impact.xpos + impact.width, 0.04, 0.22), new=TRUE) + PlotEquiMap2(rescale(imp4[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=t(sig4[,EU] < 0.05), cex.lab=1.5, continents.col="black") + + + # Title impact maps: + title2.xpos <- 0.42 + title2.width <- 0.35 + par(fig=c(title2.xpos, title2.xpos + title2.width, 0.928, 0.933), new=TRUE) + mtext(paste0(regime1.name, ": Impact on ", var.name.full[var.num]), font=2, cex=4) + par(fig=c(title2.xpos, title2.xpos + title2.width, 0.693, 0.698), new=TRUE) + mtext(paste0(regime2.name, ": Impact on ", var.name.full[var.num]), font=2, cex=4) + par(fig=c(title2.xpos, title2.xpos + title2.width, 0.458, 0.463), new=TRUE) + mtext(paste0(regime3.name, ": Impact on ", var.name.full[var.num]), font=2, cex=4) + par(fig=c(title2.xpos, title2.xpos + title2.width, 0.223, 0.227), new=TRUE) + mtext(paste0(regime4.name, ": Impact on ", var.name.full[var.num]), font=2, cex=4) + + # Legend: + legend2.xpos <- 0.48 + legend2.width <- 0.25 + legend2.cex <- 1.8 + + my.subset2 <- match(values.to.plot2, my.brks.var) + par(fig=c(legend2.xpos, legend2.xpos + legend2.width, 0.719 + 0.001, 0.744), new=TRUE) + ColorBar3(my.brks.var, cols=my.cols.var, vert=FALSE, cex=legend2.cex, subset=my.subset2) + mtext(side=4,paste0(" ",var.unit[var.num]), cex=legend2.cex) + par(fig=c(legend2.xpos, legend2.xpos + legend2.width, 0.485, 0.508), new=TRUE) + ColorBar3(my.brks.var, cols=my.cols.var, vert=FALSE, cex=legend2.cex, subset=my.subset2) + mtext(side=4,paste0(" ",var.unit[var.num]), cex=legend2.cex) + par(fig=c(legend2.xpos, legend2.xpos + legend2.width, 0.250, 0.273), new=TRUE) + ColorBar3(my.brks.var, cols=my.cols.var, vert=FALSE, cex=legend2.cex, subset=my.subset2) + mtext(side=4,paste0(" ",var.unit[var.num]), cex=legend2.cex) + par(fig=c(legend2.xpos, legend2.xpos + legend2.width, 0.015, 0.038), new=TRUE) + ColorBar3(my.brks.var, cols=my.cols.var, vert=FALSE, cex=legend2.cex, subset=my.subset2) + mtext(side=4,paste0(" ",var.unit[var.num]), cex=legend2.cex) + + } + + # Frequency plots: + barplot.xpos <- 0.75 + barplot.width <- 0.25 + + par(fig=c(barplot.xpos, barplot.xpos + barplot.width, 0.745, 0.915), new=TRUE) + if(fields.name == rean.name) barplot.freq(100*fre1.NA, year.start, year.end, cex.y=2, cex.x=2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0)) + if(fields.name == forecast.name) barplot.freq.sim2(100*fre1.NA, 100*freMax1.NA, 100*freMin1.NA, 100*freObs1, year.start, year.end, cex.y=2, cex.x=2, freq.min=-2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0), col.line="gray20", cex.r=3, cex.mean=2, cex.obs=2) + + par(fig=c(barplot.xpos, barplot.xpos + barplot.width,0.510,0.680), new=TRUE) + if(fields.name == rean.name) barplot.freq(100*fre2.NA, year.start, year.end, cex.y=2, cex.x=2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0)) + if(fields.name == forecast.name) barplot.freq.sim2(100*fre2.NA, 100*freMax2.NA, 100*freMin2.NA, 100*freObs2, year.start, year.end, cex.y=2, cex.x=2, freq.min=-2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0), col.line="gray20", cex.r=3, cex.mean=2, cex.obs=2) + + par(fig=c(barplot.xpos, barplot.xpos + barplot.width,0.275,0.445), new=TRUE) + if(fields.name == rean.name) barplot.freq(100*fre3.NA, year.start, year.end, cex.y=2, cex.x=2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0)) + if(fields.name == forecast.name) barplot.freq.sim2(100*fre3.NA, 100*freMax3.NA, 100*freMin3.NA, 100*freObs3, year.start, year.end, cex.y=2, cex.x=2, freq.min=-2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0), col.line="gray20", cex.r=3, cex.mean=2, cex.obs=2) + + par(fig=c(barplot.xpos, barplot.xpos + barplot.width,0.040,0.210), new=TRUE) + if(fields.name == rean.name) barplot.freq(100*fre4.NA, year.start, year.end, cex.y=2, cex.x=2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0)) + if(fields.name == forecast.name) barplot.freq.sim2(100*fre4.NA, 100*freMax4.NA, 100*freMin4.NA, 100*freObs4, year.start, year.end, cex.y=2, cex.x=2, freq.min=-2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0), col.line="gray20", cex.r=3, cex.mean=2, cex.obs=2) + + # Title Frequency maps: + title3.xpos <- 0.75 + title3.width <-0.25 + par(fig=c(title3.xpos, title3.xpos + title3.width, 0.928, 0.933), new=TRUE) + mtext(paste0(regime1.name, " Frequency"), font=2, cex=4) # paste0 doesn't work inside an expression! + par(fig=c(title3.xpos, title3.xpos + title3.width, 0.693, 0.698), new=TRUE) + mtext(paste0(regime2.name, " Frequency"), font=2, cex=4) + par(fig=c(title3.xpos, title3.xpos + title3.width, 0.458, 0.463), new=TRUE) + mtext(paste0(regime3.name, " Frequency"), font=2, cex=4) + par(fig=c(title3.xpos, title3.xpos + title3.width, 0.223, 0.228), new=TRUE) + mtext(paste0(regime4.name, " Frequency"), font=2, cex=4) + + # % on Frequency plots: + symbol.xpos <- 0.735 + symbol.width <- 0.01 + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.92, 0.93), new=TRUE) + mtext("%", cex=3.3) + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.59, 0.70), new=TRUE) + mtext("%", cex=3.3) + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.45, 0.46), new=TRUE) + mtext("%", cex=3.3) + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.22, 0.23), new=TRUE) + mtext("%", cex=3.3) + + # mean frequency on Frequency plots: + if(mean.freq == TRUE){ + symbol.xpos <- 0.9 + symbol.width <- 0.1 + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.908, 0.918), new=TRUE) + mtext(bquote(bar(nu) == .(paste0(round(mean(100*fre1.NA),1),"%"))),cex=4.5) + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.673, 0.683), new=TRUE) + mtext(bquote(bar(nu) == .(paste0(round(mean(100*fre2.NA),1),"%"))),cex=4.5) + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.44, 0.45), new=TRUE) + mtext(bquote(bar(nu) == .(paste0(round(mean(100*fre3.NA),1),"%"))),cex=4.5) + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.203, 0.213), new=TRUE) + mtext(bquote(bar(nu) == .(paste0(round(mean(100*fre4.NA),1),"%"))),cex=4.5) + } + + # add corr between obs.time series and ensemble mean time series on Frequency plots: + if(fields.name == forecast.name){ + symbol.xpos <- 0.76 + symbol.width <- 0.1 + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.908, 0.918), new=TRUE) + sp.cor1 <- round(cor(fre1.NA,freObs1, use="complete.obs"),2) + mtext(paste0("r= ",sp.cor1), cex=4.5) + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.673, 0.683), new=TRUE) + sp.cor2 <- round(cor(fre2.NA,freObs2, use="complete.obs"),2) + mtext(paste0("r= ",sp.cor2), cex=4.5) + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.44, 0.45), new=TRUE) + sp.cor3 <- round(cor(fre3.NA,freObs3, use="complete.obs"),2) + mtext(paste0("r= ",sp.cor3), cex=4.5) + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.203, 0.213), new=TRUE) + sp.cor4 <- round(cor(fre4.NA,freObs4, use="complete.obs"),2) + mtext(paste0("r= ",sp.cor4), cex=4.5) + } + + if(!as.pdf) dev.off() # for saving 4 png + + } # close if on: composition == 'simple' + + + if(composition == "edpr"){ + + # adjust color legends to include triangles to the extremities increasing by two the number of intervals: + my.cols <- colorRampPalette(rev(brewer.pal(11,"RdBu")))(length(my.brks)+1) # blue--white--red colors + my.cols.var <- colorRampPalette(rev(brewer.pal(11,"RdBu")))(length(my.brks.var)+1) # blue--white--red colors + + fileoutput <- paste0(rean.dir,"/",fields.name,"_",my.period[p],"_",var.name[var.num],"_edpr.png") + + png(filename=fileoutput,width=3000,height=3700) + + plot.new() + + ## Centroid maps: + map.xpos <- 0.27 + map.width <- 0.46 + par(fig=c(map.xpos + 0.003, map.xpos + map.width - 0.003, 0.745, 0.925), new=TRUE) + PlotEquiMap2(rescale(map1,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols[2:(length(my.cols)-1)], sizetit=1.2, contours=t(map1), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, xlabel.dist=1.5, contours.lty="F1FF1F", continents.col="black") + par(fig=c(map.xpos, map.xpos + map.width, 0.51, 0.69), new=TRUE) + PlotEquiMap2(rescale(map2,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols[2:(length(my.cols)-1)], sizetit=1.2, contours=t(map2), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F", continents.col="black") + par(fig=c(map.xpos, map.xpos + map.width, 0.275, 0.455), new=TRUE) + PlotEquiMap2(rescale(map3,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols[2:(length(my.cols)-1)], sizetit=1.2, contours=t(map3), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F", continents.col="black") + par(fig=c(map.xpos, map.xpos + map.width, 0.04, 0.22), new=TRUE) + PlotEquiMap2(rescale(map4,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols[2:(length(my.cols)-1)], sizetit=1.2, contours=t(map4), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F", continents.col="black") + + ## # for the debug: + ## obs <- read.table("/scratch/Earth/ncortesi/RESILIENCE/Regimes/NAO_series.txt") + ## mes <- p + ## fre.obs <- obs$V3[seq(mes,12*35,12)] # get the ovserved freuency of the NAO + ## #plot(1981:2015,fre.obs,type="l") + ## #lines(1981:2015,fre1,type="l",col="red") + ## #lines(1981:2015,fre2,type="l",col="blue") + ## #lines(1981:2015,fre4,type="l",col="green") + ## cor1 <- cor(fre.obs, fre1) + ## cor2 <- cor(fre.obs, fre2) + ## cor3 <- cor(fre.obs, fre3) + ## cor4 <- cor(fre.obs, fre4) + ## round(c(cor1,cor2,cor3,cor4),2) + + # Legend centroid maps: + legend1.xpos <- 0.30 + legend1.width <- 0.40 + legend1.cex <- 2 + my.subset <- match(values.to.plot, my.brks) + par(fig=c(legend1.xpos, legend1.xpos + legend1.width, 0.719, 0.744), new=TRUE) # syntax fig=c(xmin, xmax, ymin, ymax) + ColorBar3(my.brks, cols=my.cols[2:(length(my.cols)-1)], vert=FALSE, cex=legend1.cex, subset=my.subset) + if(psl=="g500") {mtext(side=4," m", cex=2.5)} else {mtext(side=4," hPa", cex=legend1.cex)} + par(fig=c(legend1.xpos, legend1.xpos + legend1.width, 0.485, 0.508), new=TRUE) + ColorBar3(my.brks, cols=my.cols[2:(length(my.cols)-1)], vert=FALSE, cex=legend1.cex, subset=my.subset) + if(psl=="g500") {mtext(side=4," m", cex=2.5)} else {mtext(side=4," hPa", cex=legend1.cex)} + par(fig=c(legend1.xpos, legend1.xpos + legend1.width, 0.250, 0.273), new=TRUE) + ColorBar3(my.brks, cols=my.cols[2:(length(my.cols)-1)], vert=FALSE, cex=legend1.cex, subset=my.subset) + if(psl=="g500") {mtext(side=4," m", cex=2.5)} else {mtext(side=4," hPa", cex=legend1.cex)} + par(fig=c(legend1.xpos, legend1.xpos + legend1.width, 0.015, 0.038), new=TRUE) + ColorBar3(my.brks, cols=my.cols[2:(length(my.cols)-1)], vert=FALSE, cex=legend1.cex, subset=my.subset) + if(psl=="g500") {mtext(side=4," m", cex=2.5)} else {mtext(side=4," hPa", cex=legend1.cex)} + + # Subtitle centroid maps: + map.title.xpos <- 0.76 + map.title.width <- 0.2 + par(fig=c(map.title.xpos, map.title.xpos + map.title.width, 0.728, 0.729), new=TRUE) + mtext("year", cex=3) + par(fig=c(map.title.xpos, map.title.xpos + map.title.width, 0.494, 0.495), new=TRUE) + mtext("year", cex=3) + par(fig=c(map.title.xpos, map.title.xpos + map.title.width, 0.260, 0.261), new=TRUE) + mtext("year", cex=3) + par(fig=c(map.title.xpos, map.title.xpos + map.title.width, 0.026, 0.027), new=TRUE) + mtext("year", cex=3) + + # Title Centroid Maps: + title1.xpos <- 0.3 + title1.width <- 0.44 + par(fig=c(title1.xpos, title1.xpos + title1.width, 0.9305, 0.9355), new=TRUE) + mtext(paste0(regime1.name," ", psl.name, " anomaly"), font=2, cex=4) + par(fig=c(title1.xpos, title1.xpos + title1.width, 0.6955, 0.7005), new=TRUE) + mtext(paste0(regime2.name," ", psl.name, " anomaly"), font=2, cex=4) + par(fig=c(title1.xpos, title1.xpos + title1.width, 0.4605, 0.4655), new=TRUE) + mtext(paste0(regime3.name," ", psl.name, " anomaly"), font=2, cex=4) + par(fig=c(title1.xpos, title1.xpos + title1.width, 0.2255, 0.2305), new=TRUE) + mtext(paste0(regime4.name," ", psl.name, " anomaly"), font=2, cex=4) + + # Impact maps: + if(impact.data == TRUE ){ + impact.xpos <- 0 + impact.width <- 0.26 + par(fig=c(impact.xpos, impact.xpos + impact.width, 0.745, 0.925), new=TRUE) + PlotEquiMap2(rescale(imp1[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var[2:(length(my.cols.var)-1)], intxlon=10, intylat=10, drawleg=F, dots=t(sig1[,EU] < 0.05), cex.lab=1.5, continents.col="black") + par(fig=c(impact.xpos, impact.xpos + impact.width, 0.51, 0.69), new=TRUE) + PlotEquiMap2(rescale(imp2[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var[2:(length(my.cols.var)-1)], intxlon=10, intylat=10, drawleg=F, dots=t(sig2[,EU] < 0.05), cex.lab=1.5, continents.col="black") + par(fig=c(impact.xpos, impact.xpos + impact.width, 0.275, 0.455), new=TRUE) + PlotEquiMap2(rescale(imp3[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var[2:(length(my.cols.var)-1)], intxlon=10, intylat=10, drawleg=F, dots=t(sig3[,EU] < 0.05), cex.lab=1.5, continents.col="black") + par(fig=c(impact.xpos, impact.xpos + impact.width, 0.04, 0.22), new=TRUE) + PlotEquiMap2(rescale(imp4[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var[2:(length(my.cols.var)-1)], intxlon=10, intylat=10, drawleg=F, dots=t(sig4[,EU] < 0.05), cex.lab=1.5, continents.col="black") + + + # Title impact maps: + title2.xpos <- 0 + title2.width <- 0.26 + par(fig=c(title2.xpos, title2.xpos + title2.width, 0.928, 0.933), new=TRUE) + mtext(paste0(regime1.name, " impact on ", var.name.full[var.num]), font=2, cex=4) + par(fig=c(title2.xpos, title2.xpos + title2.width, 0.693, 0.698), new=TRUE) + mtext(paste0(regime2.name, " impact on ", var.name.full[var.num]), font=2, cex=4) + par(fig=c(title2.xpos, title2.xpos + title2.width, 0.458, 0.463), new=TRUE) + mtext(paste0(regime3.name, " impact on ", var.name.full[var.num]), font=2, cex=4) + par(fig=c(title2.xpos, title2.xpos + title2.width, 0.223, 0.227), new=TRUE) + mtext(paste0(regime4.name, " impact on ", var.name.full[var.num]), font=2, cex=4) + + # Legend: + legend2.xpos <- 0.01 + legend2.width <- 0.25 + legend2.cex <- 1.8 + + my.subset2 <- match(values.to.plot2, my.brks.var) + par(fig=c(legend2.xpos, legend2.xpos + legend2.width, 0.719 + 0.001, 0.744), new=TRUE) + ColorBar3(my.brks.var, cols=my.cols.var[2:(length(my.cols.var)-1)], vert=FALSE, cex=legend2.cex, subset=my.subset2) + mtext(side=4,paste0(" ",var.unit[var.num]), cex=legend2.cex) + par(fig=c(legend2.xpos, legend2.xpos + legend2.width, 0.485, 0.508), new=TRUE) + ColorBar3(my.brks.var, cols=my.cols.var[2:(length(my.cols.var)-1)], vert=FALSE, cex=legend2.cex, subset=my.subset2) + mtext(side=4,paste0(" ",var.unit[var.num]), cex=legend2.cex) + par(fig=c(legend2.xpos, legend2.xpos + legend2.width, 0.250, 0.273), new=TRUE) + ColorBar3(my.brks.var, cols=my.cols.var[2:(length(my.cols.var)-1)], vert=FALSE, cex=legend2.cex, subset=my.subset2) + mtext(side=4,paste0(" ",var.unit[var.num]), cex=legend2.cex) + par(fig=c(legend2.xpos, legend2.xpos + legend2.width, 0.015, 0.038), new=TRUE) + ColorBar3(my.brks.var, cols=my.cols.var[2:(length(my.cols.var)-1)], vert=FALSE, cex=legend2.cex, subset=my.subset2) + mtext(side=4,paste0(" ",var.unit[var.num]), cex=legend2.cex) + + } + + # Frequency plots: + barplot.xpos <- 0.75 + barplot.width <- 0.25 + + par(fig=c(barplot.xpos, barplot.xpos + barplot.width, 0.745, 0.915), new=TRUE) + if(fields.name == rean.name) barplot.freq(100*fre1.NA, year.start, year.end, cex.y=2, cex.x=2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0)) + if(fields.name == forecast.name) barplot.freq.sim2(100*fre1.NA, 100*freMax1.NA, 100*freMin1.NA, 100*freObs1, year.start, year.end, cex.y=2, cex.x=2, freq.min=-2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0), col.line="gray20", cex.r=3, cex.mean=2, cex.obs=2) + + par(fig=c(barplot.xpos, barplot.xpos + barplot.width,0.510,0.680), new=TRUE) + if(fields.name == rean.name) barplot.freq(100*fre2.NA, year.start, year.end, cex.y=2, cex.x=2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0)) + if(fields.name == forecast.name) barplot.freq.sim2(100*fre2.NA, 100*freMax2.NA, 100*freMin2.NA, 100*freObs2, year.start, year.end, cex.y=2, cex.x=2, freq.min=-2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0), col.line="gray20", cex.r=3, cex.mean=2, cex.obs=2) + + par(fig=c(barplot.xpos, barplot.xpos + barplot.width,0.275,0.445), new=TRUE) + if(fields.name == rean.name) barplot.freq(100*fre3.NA, year.start, year.end, cex.y=2, cex.x=2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0)) + if(fields.name == forecast.name) barplot.freq.sim2(100*fre3.NA, 100*freMax3.NA, 100*freMin3.NA, 100*freObs3, year.start, year.end, cex.y=2, cex.x=2, freq.min=-2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0), col.line="gray20", cex.r=3, cex.mean=2, cex.obs=2) + + par(fig=c(barplot.xpos, barplot.xpos + barplot.width,0.040,0.210), new=TRUE) + if(fields.name == rean.name) barplot.freq(100*fre4.NA, year.start, year.end, cex.y=2, cex.x=2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0)) + if(fields.name == forecast.name) barplot.freq.sim2(100*fre4.NA, 100*freMax4.NA, 100*freMin4.NA, 100*freObs4, year.start, year.end, cex.y=2, cex.x=2, freq.min=-2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0), col.line="gray20", cex.r=3, cex.mean=2, cex.obs=2) + + # Title Frequency maps: + title3.xpos <- 0.75 + title3.width <-0.25 + par(fig=c(title3.xpos, title3.xpos + title3.width, 0.928, 0.933), new=TRUE) + mtext(paste0(regime1.name, " Frequency"), font=2, cex=4) # paste0 doesn't work inside an expression! + par(fig=c(title3.xpos, title3.xpos + title3.width, 0.693, 0.698), new=TRUE) + mtext(paste0(regime2.name, " Frequency"), font=2, cex=4) + par(fig=c(title3.xpos, title3.xpos + title3.width, 0.458, 0.463), new=TRUE) + mtext(paste0(regime3.name, " Frequency"), font=2, cex=4) + par(fig=c(title3.xpos, title3.xpos + title3.width, 0.223, 0.228), new=TRUE) + mtext(paste0(regime4.name, " Frequency"), font=2, cex=4) + + # % on Frequency plots: + symbol.xpos <- 0.735 + symbol.width <- 0.01 + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.91, 0.92), new=TRUE) + mtext("%", cex=3.3) + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.68, 0.69), new=TRUE) + mtext("%", cex=3.3) + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.44, 0.45), new=TRUE) + mtext("%", cex=3.3) + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.21, 0.22), new=TRUE) + mtext("%", cex=3.3) + + # mean frequency on Frequency plots: + if(mean.freq == TRUE){ + symbol.xpos <- 0.83 + symbol.width <- 0.1 + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.908, 0.918), new=TRUE) + mtext(bquote(bar(nu) == .(paste0(round(mean(100*fre1.NA),1),"%"))),cex=3.5) + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.673, 0.683), new=TRUE) + mtext(bquote(bar(nu) == .(paste0(round(mean(100*fre2.NA),1),"%"))),cex=3.5) + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.44, 0.45), new=TRUE) + mtext(bquote(bar(nu) == .(paste0(round(mean(100*fre3.NA),1),"%"))),cex=3.5) + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.203, 0.213), new=TRUE) + mtext(bquote(bar(nu) == .(paste0(round(mean(100*fre4.NA),1),"%"))),cex=3.5) + } + + + if(!as.pdf) dev.off() # for saving 4 png + + # same image formatted for the catalogue: + fileoutput2 <- paste0(rean.dir,"/",fields.name,"_",my.period[p],"_",var.name[var.num],"_edpr_fig2cat.png") + + system(paste0("~/scripts/fig2catalog.sh -s 0.8 -r 150 -t '",rean.name," / 10m wind speed and sea level pressure / Monthly anomalies and frequencies \n",month.name[p]," / ",year.start,"-",year.end,"' -c 'Region: North Atlantic (27°N-81°N, 85.5°W-45°E)\nReference dataset: ",rean.name," reanalysis' ", fileoutput," ", fileoutput2)) + + + + } # close if on: composition == 'edpr' + + + # Visualize the composition with the regime anomalies for a selected forecasted month: + if(composition == "psl"){ + # Centroid maps: + n.map <- n.map + 1 # n.maps starts from 0 + map.width <- 0.115 + map.xpos <- 0.06 + map.width * (n.map - 1) + map.ypos <- 0.08 + map.height <- 0.19 + map.ysep <- 0.015 + + par(fig=c(map.xpos, map.xpos + map.width, map.ypos + 3*map.height + 3*map.ysep, map.ypos + 4*map.height + 3*map.ysep), new=TRUE) + PlotEquiMap2(rescale(map1,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map1), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, xlabel.dist=1.5, contours.lty="F1FF1F") + par(fig=c(map.xpos, map.xpos + map.width, map.ypos + 2*map.height + 2*map.ysep, map.ypos + 3*map.height + 2*map.ysep), new=TRUE) + PlotEquiMap2(rescale(map2,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map2), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F") + par(fig=c(map.xpos, map.xpos + map.width, map.ypos + map.height + map.ysep, map.ypos + 2*map.height + map.ysep), new=TRUE) + PlotEquiMap2(rescale(map3,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map3), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F") + par(fig=c(map.xpos, map.xpos + map.width, map.ypos, map.ypos + map.height), new=TRUE) + PlotEquiMap2(rescale(map4,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map4), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F") + + # Sheet subtitles: + par(fig=c(map.xpos, map.xpos + map.width, 0.86, 0.90), new=TRUE) + if(n.map < 8) { + mtext(paste0(my.month.short[p], " --> ", my.month.short[forecast.month]), cex=5.5, font=2) + } else{ + mtext(paste0(rean.name," ", my.month.short[forecast.month]), cex=5.5, font=2) + } + + } # close if on composition == 'psl' + + + if(composition == "psl.rean"){ + + # Centroid maps: + n.map <- n.map + 1 # n.maps starts from 0 + map.width <- 0.08 + map.xpos <- 0 + map.width * (n.map - 1) + map.ypos <- 0.08 + map.height <- 0.19 + map.ysep <- 0.015 + + par(fig=c(map.xpos, map.xpos + map.width, map.ypos + 3*map.height + 3*map.ysep, map.ypos + 4*map.height + 3*map.ysep), new=TRUE) + PlotEquiMap2(rescale(map1,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map1), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, xlabel.dist=1.5, contours.lty="F1FF1F") + par(fig=c(map.xpos, map.xpos + map.width, map.ypos + 2*map.height + 2*map.ysep, map.ypos + 3*map.height + 2*map.ysep), new=TRUE) + PlotEquiMap2(rescale(map2,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map2), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F") + par(fig=c(map.xpos, map.xpos + map.width, map.ypos + map.height + map.ysep, map.ypos + 2*map.height + map.ysep), new=TRUE) + PlotEquiMap2(rescale(map3,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map3), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F") + par(fig=c(map.xpos, map.xpos + map.width, map.ypos, map.ypos + map.height), new=TRUE) + PlotEquiMap2(rescale(map4,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map4), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F") + + # Sheet subtitles: + #par(fig=c(map.xpos, map.xpos + map.width, 0.86, 0.90), new=TRUE) + #if(n.map < 8) { + # mtext(paste0(my.month.short[p], " --> ", my.month.short[forecast.month]), cex=5.5, font=2) + #} else{ + # mtext(paste0(rean.name," ", my.month.short[forecast.month]), cex=5.5, font=2) + #} + + } # close if on composition == 'psl.rean' + + + if(composition == "psl.rean.unordered"){ + + + # Centroid maps: + n.map <- n.map + 1 # n.maps starts from 0 + map.width <- 0.08 + map.xpos <- 0 + map.width * (n.map - 1) + map.ypos <- 0.08 + map.height <- 0.19 + map.ysep <- 0.015 + + par(fig=c(map.xpos, map.xpos + map.width, map.ypos + 3*map.height + 3*map.ysep, map.ypos + 4*map.height + 3*map.ysep), new=TRUE) + PlotEquiMap2(rescale(map1,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map1), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, xlabel.dist=1.5, contours.lty="F1FF1F") + par(fig=c(map.xpos, map.xpos + map.width, map.ypos + 2*map.height + 2*map.ysep, map.ypos + 3*map.height + 2*map.ysep), new=TRUE) + PlotEquiMap2(rescale(map2,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map2), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F") + par(fig=c(map.xpos, map.xpos + map.width, map.ypos + map.height + map.ysep, map.ypos + 2*map.height + map.ysep), new=TRUE) + PlotEquiMap2(rescale(map3,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map3), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F") + par(fig=c(map.xpos, map.xpos + map.width, map.ypos, map.ypos + map.height), new=TRUE) + PlotEquiMap2(rescale(map4,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map4), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F") + + + + } # close if on psl.rean.unordered + + + if(composition == "corr.matrix"){ + + # matrix correlation with DJF regimes: + cluster1.monthly <- pslwr1mean; cluster2.monthly <- pslwr2mean; cluster3.monthly <- pslwr3mean; cluster4.monthly <- pslwr4mean + + rean.dir.DJF <- "/scratch/Earth/ncortesi/RESILIENCE/Regimes/53_JRA55_seasonal_1981-2016_LOESS_filter_lat_corr" + + load(file=paste0(rean.dir.DJF,"/",rean.name,"_",my.period[13],"_","psl",".RData")) # Load mean slp DJF data from the same reanalysis + load(paste0(rean.dir.DJF,"/",rean.name,"_", my.period[13],"_","ClusterNames",".RData")) # load also reanalysis DJF regime names + + if(var.num != var.num.orig) var.num <- var.num.orig # ripristinate original values of this script (necessary after loading regime data) + if(fields.name != fields.name.orig) fields.name <- fields.name.orig # ripristinate original values of this script + if(work.dir != work.dir.orig) work.dir <- work.dir.orig # ripristinate original values of this script + if(p.orig != p) p <- p.orig + + cluster1 <- which(orden == cluster1.name) + cluster2 <- which(orden == cluster2.name) + cluster3 <- which(orden == cluster3.name) + cluster4 <- which(orden == cluster4.name) + + assign(paste0("psl.ordered",cluster1), pslwr1mean) + assign(paste0("psl.ordered",cluster2), pslwr2mean) + assign(paste0("psl.ordered",cluster3), pslwr3mean) + assign(paste0("psl.ordered",cluster4), pslwr4mean) + + + cluster.corr <- array(NA, c(4,4), dimnames=list(c("cl1","cl2","cl3","cl4"), orden)) + cluster.corr[1,1] <- cor(as.vector(cluster1.monthly), as.vector(psl.ordered1)) + cluster.corr[1,2] <- cor(as.vector(cluster1.monthly), as.vector(psl.ordered2)) + cluster.corr[1,3] <- cor(as.vector(cluster1.monthly), as.vector(psl.ordered3)) + cluster.corr[1,4] <- cor(as.vector(cluster1.monthly), as.vector(psl.ordered4)) + cluster.corr[2,1] <- cor(as.vector(cluster2.monthly), as.vector(psl.ordered1)) + cluster.corr[2,2] <- cor(as.vector(cluster2.monthly), as.vector(psl.ordered2)) + cluster.corr[2,3] <- cor(as.vector(cluster2.monthly), as.vector(psl.ordered3)) + cluster.corr[2,4] <- cor(as.vector(cluster2.monthly), as.vector(psl.ordered4)) + cluster.corr[3,1] <- cor(as.vector(cluster3.monthly), as.vector(psl.ordered1)) + cluster.corr[3,2] <- cor(as.vector(cluster3.monthly), as.vector(psl.ordered2)) + cluster.corr[3,3] <- cor(as.vector(cluster3.monthly), as.vector(psl.ordered3)) + cluster.corr[3,4] <- cor(as.vector(cluster3.monthly), as.vector(psl.ordered4)) + cluster.corr[4,1] <- cor(as.vector(cluster4.monthly), as.vector(psl.ordered1)) + cluster.corr[4,2] <- cor(as.vector(cluster4.monthly), as.vector(psl.ordered2)) + cluster.corr[4,3] <- cor(as.vector(cluster4.monthly), as.vector(psl.ordered3)) + cluster.corr[4,4] <- cor(as.vector(cluster4.monthly), as.vector(psl.ordered4)) + + #cluster.corr2 <- t(cluster.corr) + + # Centroid maps: + n.map <- n.map + 1 # n.maps starts from 0 + map.width <- 0.08 + map.xpos <- 0 + map.width * (n.map - 1) + map.ypos <- 0.08 + map.height <- 0.19 + map.ysep <- 0.015 + print(paste0("map.xpos= ", map.xpos)) + + par(fig=c(0, 1, 0, 1), new=TRUE) + text.cex <- 2 + text.ypos <- 1.03 + text(x=map.xpos - 0.015, y=text.ypos - 0.02, labels="cl1", cex=text.cex) + text(x=map.xpos - 0.015, y=text.ypos - 0.04, labels="cl2", cex=text.cex) + text(x=map.xpos - 0.015, y=text.ypos - 0.06, labels="cl3", cex=text.cex) + text(x=map.xpos - 0.015, y=text.ypos - 0.08, labels="cl4", cex=text.cex) + text(x=map.xpos + 0.000, y=text.ypos + 0.00, labels="NAO+", cex=text.cex) + text(x=map.xpos + 0.015, y=text.ypos + 0.00, labels="NAO-", cex=text.cex) + text(x=map.xpos + 0.030, y=text.ypos + 0.00, labels="BLO", cex=text.cex) + text(x=map.xpos + 0.045, y=text.ypos + 0.00, labels="ATL", cex=text.cex) + + text(x=map.xpos + 0.00, y=text.ypos - 0.02, labels=round(cluster.corr,2)[1,1], cex=text.cex) + text(x=map.xpos + 0.00, y=text.ypos - 0.04, labels=round(cluster.corr,2)[2,1], cex=text.cex) + text(x=map.xpos + 0.00, y=text.ypos - 0.06, labels=round(cluster.corr,2)[3,1], cex=text.cex) + text(x=map.xpos + 0.00, y=text.ypos - 0.08, labels=round(cluster.corr,2)[4,1], cex=text.cex) + text(x=map.xpos + 0.015, y=text.ypos - 0.02, labels=round(cluster.corr,2)[1,2], cex=text.cex) + text(x=map.xpos + 0.015, y=text.ypos - 0.04, labels=round(cluster.corr,2)[2,2], cex=text.cex) + text(x=map.xpos + 0.015, y=text.ypos - 0.06, labels=round(cluster.corr,2)[3,2], cex=text.cex) + text(x=map.xpos + 0.015, y=text.ypos - 0.08, labels=round(cluster.corr,2)[4,2], cex=text.cex) + text(x=map.xpos + 0.03, y=text.ypos - 0.02, labels=round(cluster.corr,2)[1,3], cex=text.cex) + text(x=map.xpos + 0.03, y=text.ypos - 0.04, labels=round(cluster.corr,2)[2,3], cex=text.cex) + text(x=map.xpos + 0.03, y=text.ypos - 0.06, labels=round(cluster.corr,2)[3,3], cex=text.cex) + text(x=map.xpos + 0.03, y=text.ypos - 0.08, labels=round(cluster.corr,2)[4,3], cex=text.cex) + text(x=map.xpos + 0.045, y=text.ypos - 0.02, labels=round(cluster.corr,2)[1,4], cex=text.cex) + text(x=map.xpos + 0.045, y=text.ypos - 0.04, labels=round(cluster.corr,2)[2,4], cex=text.cex) + text(x=map.xpos + 0.045, y=text.ypos - 0.06, labels=round(cluster.corr,2)[3,4], cex=text.cex) + text(x=map.xpos + 0.045, y=text.ypos - 0.08, labels=round(cluster.corr,2)[4,4], cex=text.cex) + + + } # close if on corr.matrix + + + # Visualize the composition if the impact maps for a selected forecasted month: + if(composition == "impact"){ + # Centroid maps: + n.map <- n.map + 1 # n.maps starts from 0 + map.width <- 0.115 + map.xpos <- 0.06 + map.width * (n.map - 1) + map.ypos <- 0.08 + map.height <- 0.19 + map.ysep <- 0.015 + + par(fig=c(map.xpos, map.xpos + map.width, map.ypos + 3*map.height + 3*map.ysep, map.ypos + 4*map.height + 3*map.ysep), new=TRUE) + #PlotEquiMap2(rescale(imp1[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=t(sig1[,EU] < 0.05), cex.lab=1.5) + PlotEquiMap2(rescale(imp1[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5) + + par(fig=c(map.xpos, map.xpos + map.width, map.ypos + 2*map.height + 2*map.ysep, map.ypos + 3*map.height + 2*map.ysep), new=TRUE) + #PlotEquiMap2(rescale(imp2[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=t(sig2[,EU] < 0.05), cex.lab=1.5) + PlotEquiMap2(rescale(imp2[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5) + + par(fig=c(map.xpos, map.xpos + map.width, map.ypos + map.height + map.ysep, map.ypos + 2*map.height + map.ysep), new=TRUE) + #PlotEquiMap2(rescale(imp3[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=t(sig3[,EU] < 0.05), cex.lab=1.5) + PlotEquiMap2(rescale(imp3[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5) + + par(fig=c(map.xpos, map.xpos + map.width, map.ypos, map.ypos + map.height), new=TRUE) + #PlotEquiMap2(rescale(imp4[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=t(sig4[,EU] < 0.05), cex.lab=1.5) + PlotEquiMap2(rescale(imp4[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5) + + + # Sheet subtitles: + par(fig=c(map.xpos, map.xpos + map.width, 0.86, 0.90), new=TRUE) + if(n.map < 8) { + mtext(paste0(my.month.short[p], " --> ", my.month.short[forecast.month]), cex=5.5, font=2) + } else{ + mtext(paste0(rean.name," ", my.month.short[forecast.month]), cex=5.5, font=2) + } + + } # close if on composition == 'impact' + + # Visualize the 2x2 composition of the four impact maps for a selected reanalysis or forecasted month: + if(composition == "single.impact"){ + + png(filename=paste0(rean.dir,"/",fields.name,"_",my.period[p],"_",var.name[var.num],"_impact_composition.png"),width=2000,height=2000) + + plot.new() + + par(fig=c(0, 0.5, 0.95, 0.988), new=TRUE) + mtext("NAO+",cex=5) + par(fig=c(0, 0.5, 0.52, 0.95), new=TRUE) + PlotEquiMap(rescale(imp1[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=sig1[,EU] < 0.05, dot_size=2, axes_label_scale=2.5) + + par(fig=c(0.5, 1, 0.93, 0.96), new=TRUE) + mtext("NAO-",cex=5) + par(fig=c(0.5, 1, 0.52, 0.95), new=TRUE) + PlotEquiMap(rescale(imp2[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=sig2[,EU] < 0.05, dot_size=2, axes_label_scale=2.5) + + par(fig=c(0, 0.5, 0.44, 0.47), new=TRUE) + mtext("Blocking",cex=5) + par(fig=c(0, 0.5, 0.08, 0.46), new=TRUE) + PlotEquiMap(rescale(imp3[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=sig3[,EU] < 0.05, dot_size=2, axes_label_scale=2.5) + + par(fig=c(0.5, 1, 0.44, 0.47), new=TRUE) + mtext("Atlantic Ridge",cex=5) + par(fig=c(0.5, 1, 0.08, 0.46), new=TRUE) + PlotEquiMap(rescale(imp4[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=sig4[,EU] < 0.05, dot_size=2, axes_label_scale=2.5) + + par(fig=c(0.03, 0.96, 0.02, 0.08), new=TRUE) + ColorBar2(my.brks.var, cols=my.cols.var, vert=FALSE, cex=3, my.ticks=-0.5 + 1:length(my.brks.var), my.labels=my.brks.var, label.dist=3) + par(fig=c(0.965, 0.99, 0, 0.026), new=TRUE) + mtext("m/s",cex=3) + + dev.off() + + # format it manually from the bash shell (you must run it from your workstation until the identity command of ImageMagick will be loaded un the cluster): + #sh ~/scripts/fig2catalog.sh -x 20 -t 'NCEP / 10-m wind speed / Regime impact \nOctober / 1981-2016' -c 'Region: North Atlantic (27°N-81°N, 85.5°W-45°E)\nReference dataset: NCEP/NCAR reanalysis' NCEP_October_sfcWind_impact_composition.png NCEP_October_sfcWind_impact_composition_catalogue.png + + + ### to plot the impact map of all regimes on a particular month: + #imp.oct <- (imp1*3.2 + imp2*38.7 + imp3*51.6 + imp4*6.4)/100 + year.test <- 2016 + pos.year.test <- year.test - year.start +1 + imp.test <- imp1*fre1.NA[pos.year.test] + imp2*fre2.NA[pos.year.test] + imp3*fre3.NA[pos.year.test] + imp4*fre4.NA[pos.year.test] + #imp.test <- imp1*fre1.NA[pos.year.test] + imp3*(fre3.NA[pos.year.test]+0.032) + imp4*(fre4.NA[pos.year.test]+0.032) + par(fig=c(0, 1, 0.05, 1), new=TRUE) + PlotEquiMap2(rescale(imp.test[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5) + + # vector with the frequency of the WRs in the chosen month and year: + wt.test.freq <- c(fre1.NA[pos.year.test],fre2.NA[pos.year.test],fre3.NA[pos.year.test],fre4.NA[pos.year.test]) + + ## # or save them as individual maps: + ## png(filename=paste0(rean.dir,"/",fields.name,"_",my.period[p],"_",var.name[var.num],"_impact_NAO+.png"),width=3000,height=3700) + ## PlotEquiMap2(rescale(imp1[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=t(sig1[,EU] < 0.05), cex.lab=1.5) + ## dev.off() + + ## png(filename=paste0(rean.dir,"/",fields.name,"_",my.period[p],"_",var.name[var.num],"_impact_NAO-.png"),width=3000,height=3700) + ## PlotEquiMap2(rescale(imp2[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=t(sig2[,EU] < 0.05), cex.lab=1.5) + ## dev.off() + + ## png(filename=paste0(rean.dir,"/",fields.name,"_",my.period[p],"_",var.name[var.num],"_impact_Blocking.png"),width=3000,height=3700) + ## PlotEquiMap2(rescale(imp3[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=t(sig3[,EU] < 0.05), cex.lab=1.5) + ## dev.off() + + ## png(filename=paste0(rean.dir,"/",fields.name,"_",my.period[p],"_",var.name[var.num],"_impact_Atl_Ridge.png"),width=3000,height=3700) + ## PlotEquiMap2(rescale(imp4[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=t(sig4[,EU] < 0.05), cex.lab=1.5) + ## dev.off() + } + + # Visualize the composition if the impact maps for a selected forecasted month: + if(composition == "single.psl"){ + png(filename=paste0(rean.dir,"/",fields.name,"_",my.period[p],"_",var.name[var.num],"_pattern_cluster1.png"),width=2000,height=1400, res=300) + PlotEquiMap2(rescale(map1,my.brks[1],tail(my.brks,1)), lon, lat,filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1, contours=t(map1), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=0.5, xlabel.dist=0, contours.lty="F1FF1F", toptitle=paste0(my.period[p]," WithinSS=", round(withinss1/1000000,2))) + dev.off() + + png(filename=paste0(rean.dir,"/",fields.name,"_",my.period[p],"_",var.name[var.num],"_pattern_cluster2.png"),width=2000,height=1400, res=300) + PlotEquiMap2(rescale(map2,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1, contours=t(map2), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=0.5, xlabel.dist=0, contours.lty="F1FF1F", toptitle=paste0(my.period[p]," WithinSS=", round(withinss2/1000000,2))) + dev.off() + + png(filename=paste0(rean.dir,"/",fields.name,"_",my.period[p],"_",var.name[var.num],"_pattern_cluster3.png"),width=2000,height=1400, res=300) + PlotEquiMap2(rescale(map3,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1, contours=t(map3), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=0.5, xlabel.dist=0, contours.lty="F1FF1F", toptitle=paste0(my.period[p]," WithinSS=", round(withinss3/1000000,2))) + dev.off() + + png(filename=paste0(rean.dir,"/",fields.name,"_",my.period[p],"_",var.name[var.num],"_pattern_cluster4.png"),width=2000,height=1400, res=300) + PlotEquiMap2(rescale(map4,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1, contours=t(map4), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=0.5, xlabel.dist=0, contours.lty="F1FF1F", toptitle=paste0(my.period[p]," WithinSS=", round(withinss4/1000000,2))) + dev.off() + + # Legend centroid maps: + #my.subset <- match(values.to.plot, my.brks) + #ColorBar3(my.brks, cols=my.cols, vert=FALSE, cex=legend1.cex, subset=my.subset) + #mtext(side=4," hPa", cex=legend1.cex) + + } + + # Visualize the composition with the interannual frequencies for a selected forecasted month: + if(composition == "fre"){ + # Centroid maps: + n.map <- n.map + 1 # n.maps starts from 0 + map.width <- 0.115 + map.xpos <- 0.06 + map.width * (n.map - 1) + map.ypos <- 0.08 + map.height <- 0.19 + map.ysep <- 0.015 + + par(fig=c(map.xpos, map.xpos + map.width, map.ypos + 3*map.height + 3*map.ysep, map.ypos + 4*map.height + 3*map.ysep), new=TRUE) + if(n.map < 8) barplot.freq.sim2(100*fre1.NA, 100*freMax1.NA, 100*freMin1.NA, 100*freObs1, year.start, year.end, cex.y=2, cex.x=2, freq.min=-2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0), col.line="gray20", cex.r=3, cex.mean=2, cex.obs=2) + if(n.map == 8) barplot.freq(100*fre1.NA, year.start, year.end, cex.y=2, cex.x=2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0)) + + par(fig=c(map.xpos, map.xpos + map.width, map.ypos + 2*map.height + 2*map.ysep, map.ypos + 3*map.height + 2*map.ysep), new=TRUE) + if(n.map < 8) if(fields.name == forecast.name) barplot.freq.sim2(100*fre2.NA, 100*freMax2.NA, 100*freMin2.NA, 100*freObs2, year.start, year.end, cex.y=2, cex.x=2, freq.min=-2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0), col.line="gray20", cex.r=3, cex.mean=2, cex.obs=2) + if(n.map == 8) barplot.freq(100*fre2.NA, year.start, year.end, cex.y=2, cex.x=2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0)) + + par(fig=c(map.xpos, map.xpos + map.width, map.ypos + map.height + map.ysep, map.ypos + 2*map.height + map.ysep), new=TRUE) + if(n.map < 8) if(fields.name == forecast.name) barplot.freq.sim2(100*fre3.NA, 100*freMax3.NA, 100*freMin3.NA, 100*freObs3, year.start, year.end, cex.y=2, cex.x=2, freq.min=-2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0), col.line="gray20", cex.r=3, cex.mean=2, cex.obs=2) + if(n.map == 8) barplot.freq(100*fre3.NA, year.start, year.end, cex.y=2, cex.x=2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0)) + + par(fig=c(map.xpos, map.xpos + map.width, map.ypos, map.ypos + map.height), new=TRUE) + if(n.map < 8) if(fields.name == forecast.name) barplot.freq.sim2(100*fre4.NA, 100*freMax4.NA, 100*freMin4.NA, 100*freObs4, year.start, year.end, cex.y=2, cex.x=2, freq.min=-2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0), col.line="gray20", cex.r=3, cex.mean=2, cex.obs=2) + if(n.map == 8) barplot.freq(100*fre4.NA, year.start, year.end, cex.y=2, cex.x=2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0)) + + # Sheet subtitles: + par(fig=c(map.xpos, map.xpos + map.width, 0.86, 0.90), new=TRUE) + if(n.map < 8) { + mtext(paste0(my.month.short[p], " --> ", my.month.short[forecast.month]), cex=5.5, font=2) + } else{ + mtext(paste0(rean.name," ", my.month.short[forecast.month]), cex=5.5, font=2) + } + + # % on Frequency plots: + par(fig=c(map.xpos - 0.006, map.xpos, map.ypos + 3.9*map.height + 3*map.ysep, map.ypos + 4*map.height + 3*map.ysep), new=TRUE) + mtext("%", cex=3.3) + par(fig=c(map.xpos - 0.006, map.xpos, map.ypos + 2.9*map.height + 2*map.ysep, map.ypos + 3*map.height + 2*map.ysep), new=TRUE) + mtext("%", cex=3.3) + par(fig=c(map.xpos - 0.006, map.xpos, map.ypos + 1.9*map.height + 1*map.ysep, map.ypos + 2*map.height + 1*map.ysep), new=TRUE) + mtext("%", cex=3.3) + par(fig=c(map.xpos - 0.006, map.xpos, map.ypos + 0.9*map.height + 0*map.ysep, map.ypos + 1*map.height + 0*map.ysep), new=TRUE) + mtext("%", cex=3.3) + + # mean frequency on Frequency plots: + par(fig=c(map.xpos + 0.02, map.xpos + 0.04, map.ypos + 3*map.height + 3*map.ysep, map.ypos + 3.1*map.height + 3*map.ysep), new=TRUE) + mtext(bquote(bar(nu) == .(paste0(round(mean(100*fre1.NA),1),"%"))),cex=3.5) + par(fig=c(map.xpos + 0.02, map.xpos + 0.04, map.ypos + 2*map.height + 2*map.ysep, map.ypos + 2.1*map.height + 2*map.ysep), new=TRUE) + mtext(bquote(bar(nu) == .(paste0(round(mean(100*fre2.NA),1),"%"))),cex=3.5) + par(fig=c(map.xpos + 0.02, map.xpos + 0.04, map.ypos + 1*map.height + 1*map.ysep, map.ypos + 1.1*map.height + 1*map.ysep), new=TRUE) + mtext(bquote(bar(nu) == .(paste0(round(mean(100*fre3.NA),1),"%"))),cex=3.5) + par(fig=c(map.xpos + 0.02, map.xpos + 0.04, map.ypos + 0*map.height + 0*map.ysep, map.ypos + 0.1*map.height + 0*map.ysep), new=TRUE) + mtext(bquote(bar(nu) == .(paste0(round(mean(100*fre4.NA),1),"%"))),cex=3.5) + + # add corr between obs.time series and ensemble mean time series on Frequency plots: + if(n.map < 8){ + par(fig=c(map.xpos + map.width - 0.04, map.xpos + map.width - 0.02, map.ypos + 3*map.height + 3*map.ysep, map.ypos + 3.1*map.height + 3*map.ysep), new=TRUE) + mtext(paste0("r= ",sp.cor1), cex=3.5) + par(fig=c(map.xpos + map.width - 0.04, map.xpos + map.width - 0.02, map.ypos + 2*map.height + 2*map.ysep, map.ypos + 2.1*map.height + 2*map.ysep), new=TRUE) + mtext(paste0("r= ",sp.cor2), cex=3.5) + par(fig=c(map.xpos + map.width - 0.04, map.xpos + map.width - 0.02, map.ypos + 1*map.height + 1*map.ysep, map.ypos + 1.1*map.height + 1*map.ysep), new=TRUE) + mtext(paste0("r= ",sp.cor3), cex=3.5) + par(fig=c(map.xpos + map.width - 0.04, map.xpos + map.width - 0.02, map.ypos + 0*map.height + 0*map.ysep, map.ypos + 0.1*map.height + 0*map.ysep), new=TRUE) + mtext(paste0("r= ",sp.cor4), cex=3.5) + } + } # close if on composition == 'fre' + + # save indicators for each startdate and forecast time: + # (map1, map2, map3, map4, fre1, fre2, etc. already refer to the regimes in the same order as listed in the 'orden' vector) + if(fields.name == forecast.name) { + if (composition != "impact") { + save(orden, map1, map2, map3, map4, fre1.NA, fre2.NA, fre3.NA, fre4.NA, freObs1, freObs2, freObs3, freObs4, sp.cor1, sp.cor2, sp.cor3, sp.cor4, persist1, persist2, persist3, persist4, persistObs1, persistObs2, persistObs3, persistObs4, spat.cor1, spat.cor2, spat.cor3, spat.cor4, sd.freq.sim1, sd.freq.sim2, sd.freq.sim3, sd.freq.sim4, sd.freq.obs1, sd.freq.obs2, sd.freq.obs3, sd.freq.obs4, transSim1, transSim2, transSim3, transSim4, transObs1, transObs2, transObs3, transObs4, file=paste0(work.dir,"/",fields.name,"_", my.period[p],"_leadmonth_",lead.month,"_","ClusterNames",".RData")) + } else { # also save impX objects + save(orden, map1, map2, map3, map4, fre1.NA, fre2.NA, fre3.NA, fre4.NA, freObs1, freObs2, freObs3, freObs4, sp.cor1, sp.cor2, sp.cor3, sp.cor4, persist1, persist2, persist3, persist4, persistObs1, persistObs2, persistObs3, persistObs4, spat.cor1, spat.cor2, spat.cor3, spat.cor4, sd.freq.sim1, sd.freq.sim2, sd.freq.sim3, sd.freq.sim4, sd.freq.obs1, sd.freq.obs2, sd.freq.obs3, sd.freq.obs4, transSim1, transSim2, transSim3, transSim4, transObs1, transObs2, transObs3, transObs4, imp1, imp2, imp3, imp4,cor.imp1,cor.imp2,cor.imp3,cor.imp4, file=paste0(work.dir,"/",fields.name,"_", my.period[p],"_leadmonth_",lead.month,"_","ClusterNames",".RData")) + } + } + + } # close 'p' on 'period' + + if((composition == 'simple' || composition == 'edpr' ) && as.pdf) dev.off() # for saving a single pdf with all months/seasons + + } # close 'lead.month' on 'lead.months' + + if(composition == "psl" || composition == "fre" || composition == "impact"){ + # Regime's names: + title1.xpos <- 0.01 + title1.width <- 0.04 + par(fig=c(title1.xpos, title1.xpos + title1.width, 0.7905, 0.7955), new=TRUE) + mtext(paste0(regime1.name), font=2, cex=5) + par(fig=c(title1.xpos, title1.xpos + title1.width, 0.5855, 0.5905), new=TRUE) + mtext(paste0(regime2.name), font=2, cex=5) + par(fig=c(title1.xpos, title1.xpos + title1.width, 0.3805, 0.3955), new=TRUE) + mtext(paste0(regime3.name), font=2, cex=5) + par(fig=c(title1.xpos, title1.xpos + title1.width, 0.1755, 0.1805), new=TRUE) + mtext(paste0(regime4.name), font=2, cex=5) + + if(composition == "psl") { + # Color legend: + legend1.xpos <- 0.10 + legend1.width <- 0.80 + legend1.cex <- 4 + + par(fig=c(legend1.xpos, legend1.xpos + legend1.width, 0, 0.065), new=TRUE) # syntax fig=c(xmin, xmax, ymin, ymax) + ColorBar2(brks=my.brks, cols=my.cols, vert=FALSE, cex=legend1.cex, label.dist=2, my.ticks=-0.5 + 1:length(my.brks), my.labels=my.brks) + par(fig=c(legend1.xpos + legend1.width - 0.02, legend1.xpos + legend1.width + 0.05, 0, 0.02), new=TRUE) # syntax fig=c(xmin, xmax, ymin, ymax) + mtext("hPa", cex=legend1.cex*1.3) + } + + if(composition == "impact") { + # Color legend: + legend1.xpos <- 0.10 + legend1.width <- 0.80 + legend1.cex <- 4 + + #my.subset2 <- match(values.to.plot2, my.brks.var) + par(fig=c(legend1.xpos, legend1.xpos + legend1.width, 0, 0.065), new=TRUE) + ColorBar2(brks=my.brks.var, cols=my.cols.var, vert=FALSE, cex=legend1.cex, label.dist=2, my.ticks=-0.5 + 1:length(my.brks.var), my.labels=my.brks.var) + par(fig=c(legend1.xpos + legend1.width - 0.02, legend1.xpos + legend1.width + 0.05, 0, 0.02), new=TRUE) # syntax fig=c(xmin, xmax, ymin, ymax) + #ColorBar2(brks=my.brks.var, cols=my.cols.var, vert=FALSE, cex=legend2.cex, subset=my.subset2) + mtext(var.unit[var.num], cex=legend1.cex*1.3) + + } + + + dev.off() + + } # close if on composition + + + if(composition == "psl.rean" || composition == "psl.rean.unordered" || composition == "corr.matrix") dev.off() + + print("Finished!") +} # close if on composition != "summary" + + + +#} # close for on forecasts.month + + + +if(composition == "taylor"){ + library("plotrix") + + fields.name="ERA-Interim" + + # choose a reference season from 13 (winter) to 16 (Autumn): + season.ref <- 13 + + # set the first WR classification: + rean.dir <- "/scratch/Earth/ncortesi/RESILIENCE/Regimes/41_ERA-Interim_monthly_1981-2015_LOESS_filter" + + # load data of the reference season of the first classification (this line should be modified to load the chosen season): + #load("/scratch/Earth/ncortesi/RESILIENCE/Regimes/18) as 12) but with no lat correction/ERA-Interim_Winter_psl.RData") # load pslwrXmean data + #load("/scratch/Earth/ncortesi/RESILIENCE/Regimes/18) as 12) but with no lat correction/ERA-Interim_Winter_ClusterNames.RData") # load clusterX.name.period + load("/scratch/Earth/ncortesi/RESILIENCE/Regimes/46_as_18_but_with_no_lat_correction_and_with_LOESS/ERA-Interim_Winter_psl.RData") # load pslwrXmean data + load("/scratch/Earth/ncortesi/RESILIENCE/Regimes/46_as_18_but_with_no_lat_correction_and_with_LOESS/ERA-Interim_Winter_ClusterNames.RData") # load clusterX.name.period + + if(var.num != var.num.orig) var.num <- var.num.orig # ripristinate original values of this script (necessary after loading regime data) + if(fields.name != fields.name.orig) fields.name <- fields.name.orig # ripristinate original values of this script + + cluster1.ref <- which(orden == cluster1.name) + cluster2.ref <- which(orden == cluster2.name) + cluster3.ref <- which(orden == cluster3.name) + cluster4.ref <- which(orden == cluster4.name) + + #cluster1.ref <- cluster1.name.period[13] + #cluster2.ref <- cluster2.name.period[13] + #cluster3.ref <- cluster3.name.period[13] + #cluster4.ref <- cluster4.name.period[13] + + cluster1.ref <- 3 # bypass the previous values because for dataset num.46 the blocking and atlantic regimes were saved swapped! + cluster2.ref <- 2 + cluster3.ref <- 1 + cluster4.ref <- 4 + + assign(paste0("map.ref",cluster1.ref), pslwr1mean) # NAO+ + assign(paste0("map.ref",cluster2.ref), pslwr2mean) # NAO- + assign(paste0("map.ref",cluster3.ref), pslwr3mean) # Blocking + assign(paste0("map.ref",cluster4.ref), pslwr4mean) # Atl.ridge + + # set a second WR classification (i.e: with running cluster) to contrast with the new one: + #rean2.dir <- "/scratch/Earth/ncortesi/RESILIENCE/Regimes/41_ERA-Interim_monthly_1981-2015_LOESS_filter" + rean2.dir <- "/scratch/Earth/ncortesi/RESILIENCE/Regimes/44_as_43_but_with_no_lat_correction" + rean2.name <- "ERA-Interim" + + # load data of the reference season of the second classification (this line should be modified to load the chosen season): + load("/scratch/Earth/ncortesi/RESILIENCE/Regimes/46_as_18_but_with_no_lat_correction_and_with_LOESS/ERA-Interim_Winter_psl.RData") # load pslwrXmean data + load("/scratch/Earth/ncortesi/RESILIENCE/Regimes/46_as_18_but_with_no_lat_correction_and_with_LOESS/ERA-Interim_Winter_ClusterNames.RData") # load clusterX.name.period + + if(var.num != var.num.orig) var.num <- var.num.orig # ripristinate original values of this script (necessary after loading regime data) + if(fields.name != fields.name.orig) fields.name <- fields.name.orig # ripristinate original values of this script + + #cluster1.name.ref <- cluster1.name.period[season.ref] + #cluster2.name.ref <- cluster2.name.period[season.ref] + #cluster3.name.ref <- cluster3.name.period[season.ref] + #cluster4.name.ref <- cluster4.name.period[season.ref] + + cluster1.ref <- which(orden == cluster1.name) + cluster2.ref <- which(orden == cluster2.name) + cluster3.ref <- which(orden == cluster3.name) + cluster4.ref <- which(orden == cluster4.name) + + cluster1.ref <- 3 # bypass the previous values because for dataset num.46 the blocking and atlantic regimes were saved swapped! + cluster2.ref <- 2 + cluster3.ref <- 1 + cluster4.ref <- 4 + + assign(paste0("map.old.ref",cluster1.ref), pslwr1mean) # NAO+ + assign(paste0("map.old.ref",cluster2.ref), pslwr2mean) # NAO- + assign(paste0("map.old.ref",cluster3.ref), pslwr3mean) # Blocking + assign(paste0("map.old.ref",cluster4.ref), pslwr4mean) # Atl.ridge + + + # breaks and colors of the geopotential fields: + if(psl == "g500"){ + my.brks <- c(-300,seq(-200,200,10),300) # % Mean anomaly of a WR + my.brks2 <- c(-300,seq(-200,200,10),300) # % Mean anomaly of a WR for the contour lines + values.to.plot <- c(-200, -100, 0, 100, 200) # values we want to show in the labels + } + + if(psl == "psl"){ + my.brks <- c(seq(-19,-1,2),0,seq(1,19,2)) # % Mean anomaly of a WR + my.brks2 <- my.brks #c(seq(-20,20,2)) # % Mean anomaly of a WR for the contour lines + values.to.plot <- my.brks #c(-17,-15,-13,-11,-9,-7,-5,-3,-1,0,1,3,5,7,9,11,13,15,17) + } + + my.cols <- colorRampPalette(rev(brewer.pal(11,"RdBu")))(length(my.brks)-1) # blue--white--red colors + my.cols[floor(length(my.cols)/2)] <- my.cols[floor(length(my.cols)/2)+1] <- "white" + + # plot the composition with the regime anomalies of each monthly regimes: + png(filename=paste0(rean.dir,"/",rean.name,"_taylor_source",".png"),width=6000,height=2000) + + plot.new() + + # Sheet title: + par(fig=c(0, 1, 0.94, 0.98), new=TRUE) + mtext(paste0(fields.name, " observed monthly regime anomalies"), cex=5.5, font=2) + + n.map <- 0 + for(p in c(9:12,1:8)){ + p.orig <- p + + load(file=paste0(rean.dir,"/",fields.name,"_",my.period[p],"_","psl",".RData")) # load cluster names and summarized data + load(file=paste0(rean.dir,"/",fields.name,"_",my.period[p],"_","ClusterNames",".RData")) # load cluster names and summarized data + + if(var.num != var.num.orig) var.num <- var.num.orig # ripristinate original values of this script (necessary after loading regime data) + if(fields.name != fields.name.orig) fields.name <- fields.name.orig # ripristinate original values of this script + if(p != p.orig) p <- p.orig + + cluster1 <- which(orden == cluster1.name) + cluster2 <- which(orden == cluster2.name) + cluster3 <- which(orden == cluster3.name) + cluster4 <- which(orden == cluster4.name) + + assign(paste0("map",cluster1), pslwr1mean) # NAO+ + assign(paste0("map",cluster2), pslwr2mean) # NAO- + assign(paste0("map",cluster3), pslwr3mean) # Blocking + assign(paste0("map",cluster4), pslwr4mean) # Atl.ridge + + n.map <- n.map + 1 # n.maps starts from 0 + map.width <- 0.07 + map.xpos <- 0.06 + map.width * (n.map - 1) + map.ypos <- 0.08 + map.height <- 0.19 + map.ysep <- 0.015 + + par(fig=c(map.xpos, map.xpos + map.width, map.ypos + 3*map.height + 3*map.ysep, map.ypos + 4*map.height + 3*map.ysep), new=TRUE) + PlotEquiMap2(rescale(map1,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map1), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, xlabel.dist=1.5, contours.lty="F1FF1F") + par(fig=c(map.xpos, map.xpos + map.width, map.ypos + 2*map.height + 2*map.ysep, map.ypos + 3*map.height + 2*map.ysep), new=TRUE) + PlotEquiMap2(rescale(map2,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map2), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F") + par(fig=c(map.xpos, map.xpos + map.width, map.ypos + map.height + map.ysep, map.ypos + 2*map.height + map.ysep), new=TRUE) + PlotEquiMap2(rescale(map3,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map3), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F") + par(fig=c(map.xpos, map.xpos + map.width, map.ypos, map.ypos + map.height), new=TRUE) + PlotEquiMap2(rescale(map4,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map4), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F") + + # Sheet subtitles: + par(fig=c(map.xpos, map.xpos + map.width, 0.86, 0.90), new=TRUE) + mtext(my.month.short[p], cex=5.5, font=2) + + } + + # draw the last map to the right of the composition, that with the seasonal anomalies: + n.map <- n.map + 1 # n.maps starts from 0 + map.width <- 0.07 + map.xpos <- 0.06 + map.width * (n.map - 1) + map.ypos <- 0.08 + map.height <- 0.19 + map.ysep <- 0.015 + + par(fig=c(map.xpos, map.xpos + map.width, map.ypos + 3*map.height + 3*map.ysep, map.ypos + 4*map.height + 3*map.ysep), new=TRUE) + PlotEquiMap2(rescale(map.ref1,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map.ref1), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, xlabel.dist=1.5, contours.lty="F1FF1F") + par(fig=c(map.xpos, map.xpos + map.width, map.ypos + 2*map.height + 2*map.ysep, map.ypos + 3*map.height + 2*map.ysep), new=TRUE) + PlotEquiMap2(rescale(map.ref2,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map.ref2), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F") + par(fig=c(map.xpos, map.xpos + map.width, map.ypos + map.height + map.ysep, map.ypos + 2*map.height + map.ysep), new=TRUE) + PlotEquiMap2(rescale(map.ref3,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map.ref3), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F") + par(fig=c(map.xpos, map.xpos + map.width, map.ypos, map.ypos + map.height), new=TRUE) + PlotEquiMap2(rescale(map.ref4,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map.ref4), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F") + + # subtitle: + par(fig=c(map.xpos, map.xpos + map.width, 0.86, 0.90), new=TRUE) + mtext(my.period[season.ref], cex=5.5, font=2) + + dev.off() + + # Plot the Taylor diagrams: + + corr.first <- corr.second <- rmse.first <- rmse.second <- array(NA,c(4,12)) + + for(regime in 1:4){ + + png(filename=paste0(rean.dir,"/",rean.name,"_taylor_",orden[regime],".png"), width=1200, height=800) + + for(p in 1:12){ + p.orig <- p + + load(file=paste0(rean.dir,"/",rean.name,"_",my.period[p],"_","psl",".RData")) # load cluster names and summarized data + load(file=paste0(rean.dir,"/",rean.name,"_",my.period[p],"_","ClusterNames",".RData")) # load cluster names and summarized data + + if(var.num != var.num.orig) var.num <- var.num.orig # ripristinate original values of this script (necessary after loading regime data) + if(fields.name != fields.name.orig) fields.name <- fields.name.orig # ripristinate original values of this script + if(p != p.orig) p <- p.orig + + cluster1 <- which(orden == cluster1.name) + cluster2 <- which(orden == cluster2.name) + cluster3 <- which(orden == cluster3.name) + cluster4 <- which(orden == cluster4.name) + + assign(paste0("map",cluster1), pslwr1mean) # NAO+ + assign(paste0("map",cluster2), pslwr2mean) # NAO- + assign(paste0("map",cluster3), pslwr3mean) # Blocking + assign(paste0("map",cluster4), pslwr4mean) # Atl.ridge + + # load data from the second classification: + load(file=paste0(rean2.dir,"/",rean2.name,"_",my.period[p],"_","psl",".RData")) # load cluster names and summarized data + load(file=paste0(rean2.dir,"/",rean2.name,"_",my.period[p],"_","ClusterNames",".RData")) # load cluster names and summarized data + + if(var.num != var.num.orig) var.num <- var.num.orig # ripristinate original values of this script (necessary after loading regime data) + if(fields.name != fields.name.orig) fields.name <- fields.name.orig # ripristinate original values of this script + if(p != p.orig) p <- p.orig + + cluster1.old <- which(orden == cluster1.name) + cluster2.old <- which(orden == cluster2.name) + cluster3.old <- which(orden == cluster3.name) + cluster4.old <- which(orden == cluster4.name) + + assign(paste0("map.old",cluster1.old), pslwr1mean) # NAO+ + assign(paste0("map.old",cluster2.old), pslwr2mean) # NAO- + assign(paste0("map.old",cluster3.old), pslwr3mean) # Blocking + assign(paste0("map.old",cluster4.old), pslwr4mean) # Atl.ridge + + add.mod <- ifelse(p == 1, FALSE, TRUE) + main.mod <- ifelse(p == 1, orden[regime],"") + + color.first <- "red" + color.second <- "blue" # "black" + sd.arcs.mod <- c(0,0.1,0.2,0.3,0.4,0.5,0.6,0.7,0.8,0.9,1,1.1,1.2,1.3) #c(0,0.5,1,1.5) # doesn't work! + if(regime == 1) my.taylor(as.vector(map.ref1),as.vector(map1), normalize=TRUE, add=add.mod, pos.cor=FALSE, sd.arcs=sd.arcs.mod, ref.sd=F, cex.axis=1.7, text.cex=1, my.text=my.month.short[p], col = color.first, main=main.mod) + if(regime == 2) my.taylor(as.vector(map.ref2),as.vector(map2), normalize=TRUE, add=add.mod, pos.cor=FALSE, sd.arcs=sd.arcs.mod, ref.sd=F, cex.axis=1.7, text.cex=1, my.text=my.month.short[p], col = color.first, main=main.mod) + if(regime == 3) my.taylor(as.vector(map.ref3),as.vector(map3), normalize=TRUE, add=add.mod, pos.cor=FALSE, sd.arcs=sd.arcs.mod, ref.sd=F, cex.axis=1.7, text.cex=1, my.text=my.month.short[p], col = color.first, main=main.mod) + if(regime == 4) my.taylor(as.vector(map.ref4),as.vector(map4), normalize=TRUE, add=add.mod, pos.cor=FALSE, sd.arcs=sd.arcs.mod, ref.sd=F, cex.axis=1.7, text.cex=1, my.text=my.month.short[p], col = color.first, main=main.mod) + + # add the points from the second classification: + add.mod=TRUE + #add.mod <- ifelse(p == 1, FALSE, TRUE) + + if(regime == 1) my.taylor(as.vector(map.old.ref1),as.vector(map.old1), normalize=TRUE, add=add.mod, pos.cor=FALSE, sd.arcs=sd.arcs.mod, ref.sd=F, cex.axis=1.7, text.cex=1, my.text=my.month.short[p], col = color.second) + if(regime == 2) my.taylor(as.vector(map.old.ref2),as.vector(map.old2), normalize=TRUE, add=add.mod, pos.cor=FALSE, sd.arcs=sd.arcs.mod, ref.sd=F, cex.axis=1.7, text.cex=1, my.text=my.month.short[p], col = color.second) + if(regime == 3) my.taylor(as.vector(map.old.ref3),as.vector(map.old3), normalize=TRUE, add=add.mod, pos.cor=FALSE, sd.arcs=sd.arcs.mod, ref.sd=F, cex.axis=1.7, text.cex=1, my.text=my.month.short[p], col = color.second) + if(regime == 4) my.taylor(as.vector(map.old.ref4),as.vector(map.old4), normalize=TRUE, add=add.mod, pos.cor=FALSE, sd.arcs=sd.arcs.mod, ref.sd=F, cex.axis=1.7, text.cex=1, my.text=my.month.short[p], col = color.second) + + if(regime == 1) { corr.first[1,p] <- cor(as.vector(map.ref1),as.vector(map1)); rmse.first[1,p] <- RMSE(as.vector(map.ref1),as.vector(map1)) } + if(regime == 2) { corr.first[2,p] <- cor(as.vector(map.ref2),as.vector(map2)); rmse.first[2,p] <- RMSE(as.vector(map.ref2),as.vector(map2)) } + if(regime == 3) { corr.first[3,p] <- cor(as.vector(map.ref3),as.vector(map3)); rmse.first[3,p] <- RMSE(as.vector(map.ref3),as.vector(map3)) } + if(regime == 4) { corr.first[4,p] <- cor(as.vector(map.ref4),as.vector(map4)); rmse.first[4,p] <- RMSE(as.vector(map.ref4),as.vector(map4)) } + + if(regime == 1) { corr.second[1,p] <- cor(as.vector(map.old.ref1),as.vector(map.old1)); rmse.second[1,p] <- RMSE(as.vector(map.old.ref1),as.vector(map.old1)) } + if(regime == 2) { corr.second[2,p] <- cor(as.vector(map.old.ref2),as.vector(map.old2)); rmse.second[2,p] <- RMSE(as.vector(map.old.ref2),as.vector(map.old2)) } + if(regime == 3) { corr.second[3,p] <- cor(as.vector(map.old.ref3),as.vector(map.old3)); rmse.second[3,p] <- RMSE(as.vector(map.old.ref3),as.vector(map.old3)) } + if(regime == 4) { corr.second[4,p] <- cor(as.vector(map.old.ref4),as.vector(map.old4)); rmse.second[4,p] <- RMSE(as.vector(map.old.ref4),as.vector(map.old4)) } + + } # close for on 'p' + + dev.off() + + } # close for on 'regime' + + corr.first.mean <- apply(corr.first,1,mean) + corr.second.mean <- apply(corr.second,1,mean) + corr.diff <- corr.second.mean - corr.first.mean + + rmse.first.mean <- apply(rmse.first,1,mean) + rmse.second.mean <- apply(rmse.second,1,mean) + rmse.diff <- rmse.second.mean - rmse.first.mean + +} # close if on composition == "taylor" + + + + + + +# Summary graphs: +if(composition == "summary" && fields.name == forecast.name){ + # array storing correlations in the format: [startdate, leadmonth, regime] + array.diff.sd.freq <- array.sd.freq <- array.cor <- array.sp.cor <- array.freq <- array.diff.freq <- array.rpss <- array.pers <- array.diff.pers <- array(NA,c(12,7,4)) + array.spat.cor <- array.imp <- array.trans.sim1 <- array.trans.sim2 <- array.trans.sim3 <- array.trans.sim4 <- array(NA,c(12,7,4)) + array.trans.diff1 <- array.trans.diff2 <- array.trans.diff3 <- array.trans.diff4 <- array.freq.obs <- array.pers.obs <- array(NA,c(12,7,4)) + array.trans.obs1 <- array.trans.obs2 <- array.trans.obs3 <- array.trans.obs4 <- array(NA,c(12,7,4)) + + for(p in 1:12){ + p.orig <- p + + for(l in 0:6){ + + load(file=paste0(work.dir,"/",fields.name,"_",my.period[p],"_leadmonth_",l,"_","ClusterNames",".RData")) # load cluster names and summarized data + + if(var.num != var.num.orig) var.num <- var.num.orig # ripristinate original values of this script (necessary after loading regime data) + if(fields.name != fields.name.orig) fields.name <- fields.name.orig # ripristinate original values of this script + if(p != p.orig) p <- p.orig + + array.spat.cor[p,1+l, 1] <- spat.cor1 # associates NAO+ to the first triangle (at left) + array.spat.cor[p,1+l, 3] <- spat.cor2 # associates NAO- to the third triangle (at right) + array.spat.cor[p,1+l, 4] <- spat.cor3 # associates Blocking to the fourth triangle (top one) + array.spat.cor[p,1+l, 2] <- spat.cor4 # associates Atl.Ridge to the second triangle (bottom one) + + array.cor[p,1+l, 1] <- sp.cor1 # associates NAO+ to the first triangle (at left) + array.cor[p,1+l, 3] <- sp.cor2 # associates NAO- to the third triangle (at right) + array.cor[p,1+l, 4] <- sp.cor3 # associates Blocking to the fourth triangle (top one) + array.cor[p,1+l, 2] <- sp.cor4 # associates Atl.Ridge to the second triangle (bottom one) + + array.freq[p,1+l, 1] <- 100*(mean(fre1.NA)) # NAO+ + array.freq[p,1+l, 3] <- 100*(mean(fre2.NA)) # NAO- + array.freq[p,1+l, 4] <- 100*(mean(fre3.NA)) # Blocking + array.freq[p,1+l, 2] <- 100*(mean(fre4.NA)) # Atl.Ridge + + array.freq.obs[p,1+l, 1] <- 100*(mean(freObs1)) # NAO+ + array.freq.obs[p,1+l, 3] <- 100*(mean(freObs2)) # NAO- + array.freq.obs[p,1+l, 4] <- 100*(mean(freObs3)) # Blocking + array.freq.obs[p,1+l, 2] <- 100*(mean(freObs4)) # Atl.Ridge + + array.diff.freq[p,1+l, 1] <- 100*(mean(fre1.NA) - mean(freObs1)) # NAO+ + array.diff.freq[p,1+l, 3] <- 100*(mean(fre2.NA) - mean(freObs2)) # NAO- + array.diff.freq[p,1+l, 4] <- 100*(mean(fre3.NA) - mean(freObs3)) # Blocking + array.diff.freq[p,1+l, 2] <- 100*(mean(fre4.NA) - mean(freObs4)) # Atl.Ridge + + array.pers[p,1+l, 1] <- persist1 # NAO+ + array.pers[p,1+l, 3] <- persist2 # NAO- + array.pers[p,1+l, 4] <- persist3 # Blocking + array.pers[p,1+l, 2] <- persist4 # Atl.Ridge + + array.pers.obs[p,1+l, 1] <- persistObs1 # NAO+ + array.pers.obs[p,1+l, 3] <- persistObs2 # NAO- + array.pers.obs[p,1+l, 4] <- persistObs3 # Blocking + array.pers.obs[p,1+l, 2] <- persistObs4 # Atl.Ridge + + array.diff.pers[p,1+l, 1] <- persist1 - persistObs1 # NAO+ + array.diff.pers[p,1+l, 3] <- persist2 - persistObs2 # NAO- + array.diff.pers[p,1+l, 4] <- persist3 - persistObs3 # Blocking + array.diff.pers[p,1+l, 2] <- persist4 - persistObs4 # Atl.Ridge + + array.sd.freq[p,1+l, 1] <- sd.freq.sim1 # NAO+ + array.sd.freq[p,1+l, 3] <- sd.freq.sim2 # NAO- + array.sd.freq[p,1+l, 4] <- sd.freq.sim3 # Blocking + array.sd.freq[p,1+l, 2] <- sd.freq.sim4 # Atl.Ridge + + array.diff.sd.freq[p,1+l, 1] <- sd.freq.sim1 / sd.freq.obs1 # NAO+ + array.diff.sd.freq[p,1+l, 3] <- sd.freq.sim2 / sd.freq.obs2 # NAO- + array.diff.sd.freq[p,1+l, 4] <- sd.freq.sim3 / sd.freq.obs3 # Blocking + array.diff.sd.freq[p,1+l, 2] <- sd.freq.sim4 / sd.freq.obs4 # Atl.Ridge + + array.imp[p,1+l, 1] <- cor.imp1 # NAO+ + array.imp[p,1+l, 3] <- cor.imp2 # NAO- + array.imp[p,1+l, 4] <- cor.imp3 # Blocking + array.imp[p,1+l, 2] <- cor.imp4 # Atl.Ridge + + array.trans.sim1[p,1+l, 1] <- NA # Transition from NAO+ to NAO+ + array.trans.sim1[p,1+l, 3] <- 100*transSim1[2] # Transition from NAO+ to NAO- + array.trans.sim1[p,1+l, 4] <- 100*transSim1[3] # Transition from NAO+ to blocking + array.trans.sim1[p,1+l, 2] <- 100*transSim1[4] # Transition from NAO+ to Atl.Ridge + + array.trans.sim2[p,1+l, 1] <- 100*transSim2[1] # Transition from NAO- to NAO+ + array.trans.sim2[p,1+l, 3] <- NA # Transition from NAO- to NAO- + array.trans.sim2[p,1+l, 4] <- 100*transSim2[3] # Transition from NAO- to blocking + array.trans.sim2[p,1+l, 2] <- 100*transSim2[4] # Transition from NAO- to Atl.Ridge + + array.trans.sim3[p,1+l, 1] <- 100*transSim3[1] # Transition from blocking to NAO+ + array.trans.sim3[p,1+l, 3] <- 100*transSim3[2] # Transition from blocking to NAO- + array.trans.sim3[p,1+l, 4] <- NA # Transition from blocking to blocking + array.trans.sim3[p,1+l, 2] <- 100*transSim3[4] # Transition from blocking to Atl.Ridge + + array.trans.sim4[p,1+l, 1] <- 100*transSim4[1] # Transition from Atl.ridge to NAO+ + array.trans.sim4[p,1+l, 3] <- 100*transSim4[2] # Transition from Atl.ridge to NAO- + array.trans.sim4[p,1+l, 4] <- 100*transSim4[3] # Transition from Atl.ridge to blocking + array.trans.sim4[p,1+l, 2] <- NA # Transition from Atl.ridge to Atl.Ridge + + array.trans.obs1[p,1+l, 1] <- NA # Transition from NAO+ to NAO+ + array.trans.obs1[p,1+l, 3] <- 100*transObs1[2] # Transition from NAO+ to NAO- + array.trans.obs1[p,1+l, 4] <- 100*transObs1[3] # Transition from NAO+ to blocking + array.trans.obs1[p,1+l, 2] <- 100*transObs1[4] # Transition from NAO+ to Atl.Ridge + + array.trans.obs2[p,1+l, 1] <- 100*transObs2[1] # Transition from NAO- to NAO+ + array.trans.obs2[p,1+l, 3] <- NA # Transition from NAO- to NAO- + array.trans.obs2[p,1+l, 4] <- 100*transObs2[3] # Transition from NAO- to blocking + array.trans.obs2[p,1+l, 2] <- 100*transObs2[4] # Transition from NAO- to Atl.Ridge + + array.trans.obs3[p,1+l, 1] <- 100*transObs3[1] # Transition from blocking to NAO+ + array.trans.obs3[p,1+l, 3] <- 100*transObs3[2] # Transition from blocking to NAO- + array.trans.obs3[p,1+l, 4] <- NA # Transition from blocking to blocking + array.trans.obs3[p,1+l, 2] <- 100*transObs3[4] # Transition from blocking to Atl.Ridge + + array.trans.obs4[p,1+l, 1] <- 100*transObs4[1] # Transition from Atl.ridge to NAO+ + array.trans.obs4[p,1+l, 3] <- 100*transObs4[2] # Transition from Atl.ridge to NAO- + array.trans.obs4[p,1+l, 4] <- 100*transObs4[3] # Transition from Atl.ridge to blocking + array.trans.obs4[p,1+l, 2] <- NA # Transition from Atl.ridge to Atl.Ridge + + array.trans.diff1[p,1+l, 1] <- NA # Transition from NAO+ to NAO+ + array.trans.diff1[p,1+l, 3] <- 100*(transSim1[2] - transObs1[2]) # Transition from NAO+ to NAO- + array.trans.diff1[p,1+l, 4] <- 100*(transSim1[3] - transObs1[3]) # Transition from NAO+ to blocking + array.trans.diff1[p,1+l, 2] <- 100*(transSim1[4] - transObs1[4]) # Transition from NAO+ to Atl.Ridge + + array.trans.diff2[p,1+l, 1] <- 100*(transSim2[1] - transObs2[1]) # Transition from NAO+ to NAO+ + array.trans.diff2[p,1+l, 3] <- NA + array.trans.diff2[p,1+l, 4] <- 100*(transSim2[3] - transObs2[3]) # Transition from NAO+ to blocking + array.trans.diff2[p,1+l, 2] <- 100*(transSim2[4] - transObs2[4]) # Transition from NAO+ to Atl.Ridge + + array.trans.diff3[p,1+l, 1] <- 100*(transSim3[1] - transObs3[1]) # Transition from NAO+ to NAO+ + array.trans.diff3[p,1+l, 3] <- 100*(transSim3[2] - transObs3[2]) # Transition from NAO+ to NAO- + array.trans.diff3[p,1+l, 4] <- NA + array.trans.diff3[p,1+l, 2] <- 100*(transSim3[4] - transObs3[4]) # Transition from NAO+ to Atl.Ridge + + array.trans.diff4[p,1+l, 1] <- 100*(transSim4[1] - transObs4[1]) # Transition from NAO+ to NAO+ + array.trans.diff4[p,1+l, 3] <- 100*(transSim4[2] - transObs4[2]) # Transition from NAO+ to NAO- + array.trans.diff4[p,1+l, 4] <- 100*(transSim4[3] - transObs4[3]) # Transition from NAO+ to blocking + array.trans.diff4[p,1+l, 2] <- NA + + #array.rpss[p,1+l, 1] <- # NAO+ + #array.rpss[p,1+l, 3] <- # NAO- + #array.rpss[p,1+l, 4] <- # Blocking + #array.rpss[p,1+l, 2] <- # Atl.Ridge + } + } + + + + # Spatial Corr summary: + png(filename=paste0(work.dir,"/",forecast.name,"_summary_spatial_correlations.png"), width=550, height=400) + + # create an array similar to array.cor but with colors instead of corr.values: + sp.corr.cols <- rev(c('#b2182b','#d6604d','#f4a582','#fddbc7','#d1e5f0','#92c5de','#4393c3','#2166ac')) + my.seq <- c(-1,0,0.4,0.5,0.6,0.7,0.8,0.9,1) + + array.spat.cor.colors <- array(sp.corr.cols[8],c(12,7,4)) + array.spat.cor.colors[array.spat.cor < my.seq[8]] <- sp.corr.cols[7] + array.spat.cor.colors[array.spat.cor < my.seq[7]] <- sp.corr.cols[6] + array.spat.cor.colors[array.spat.cor < my.seq[6]] <- sp.corr.cols[5] + array.spat.cor.colors[array.spat.cor < my.seq[5]] <- sp.corr.cols[4] + array.spat.cor.colors[array.spat.cor < my.seq[4]] <- sp.corr.cols[3] + array.spat.cor.colors[array.spat.cor < my.seq[3]] <- sp.corr.cols[2] + array.spat.cor.colors[array.spat.cor < my.seq[2]] <- sp.corr.cols[1] + + par(mar=c(5,4,4,4.5)) + plot(1,1, type="n", xaxt="n", yaxt="n", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + title("Spatial correlation between S4 and ERA-Interim regime anomalies", cex=1.5) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + + for(p in 1:12){ + for(l in 0:6){ + polygon(c(0.5+p-1, 0.5+p-1, 1+p-1), c(-0.5+l, 0.5+l, 0+l), col=array.spat.cor.colors[p,1+l,1]) # first triangle + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(-0.5+l, 0+l, -0.5+l), col=array.spat.cor.colors[p,1+l,2]) + polygon(c(1+p-1, 1.5+p-1, 1.5+p-1), c(l, 0.5+l, -0.5+l), col=array.spat.cor.colors[p,1+l,3]) + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(0.5+l, 0+l, 0.5+l), col=array.spat.cor.colors[p,1+l,4]) + } + } + + par(fig=c(0.875, 1, 0.14, 0.89), new=TRUE) + ColorBar2(brks = my.seq, cols = sp.corr.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + dev.off() + + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_spatial_correlations_startdate.png"), width=750, height=600) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext(text="Spatial correlation between S4 and ERA-Interim regime anomalies", cex=1.5) + + # NAO+ : + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.85, 0.98), new=TRUE) + mtext("NAO+", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.spat.cor.colors[p,1+l,1])}} + + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.42, 0.5), new=TRUE) + mtext("Blocking", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.spat.cor.colors[p,1+l,4])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.85, 0.98), new=TRUE) + mtext("NAO-", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.spat.cor.colors[p,1+l,3])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.42, 0.5), new=TRUE) + mtext("Atlantic Ridge", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.spat.cor.colors[p,1+l,2])}} + + par(fig=c(0.91, 1, 0.1, 0.9), new=TRUE) + ColorBar2(brks = my.seq, cols = sp.corr.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_spatial_correlations_target_month.png"), width=800, height=300) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext(text="Spatial correlation between S4 and ERA-Interim regime anomalies", cex=1.2) + + par(mar=c(0,0,4,0), fig=c(0.07, 0.22, 0.8, 0.98), new=TRUE) + mtext("NAO+", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0, 0.22, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.6, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.spat.cor.colors[i2,1+6-j,1])}} + + par(mar=c(0,0,4,0), fig=c(0.22, 0.44, 0.8, 0.98), new=TRUE) + mtext("NAO-", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.22, 0.44, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.6, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.spat.cor.colors[i2,1+6-j,3])}} + + par(mar=c(0,0,4,0), fig=c(0.44, 0.66, 0.8, 0.98), new=TRUE) + mtext("Blocking", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.44, 0.66, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.6, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.spat.cor.colors[i2,1+6-j,4])}} + + par(mar=c(0,0,4,0), fig=c(0.68, 0.88, 0.8, 0.98), new=TRUE) + mtext("Atlantic Ridge", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.66, 0.88, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.6, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.spat.cor.colors[i2,1+6-j,2])}} + + par(fig=c(0.91, 1, 0.1, 0.8), new=TRUE) + ColorBar2(brks = my.seq, cols = sp.corr.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + + + + + + + + # Temporal Corr summary: + png(filename=paste0(work.dir,"/",forecast.name,"_summary_temporal_correlations.png"), width=550, height=400) + + # create an array similar to array.cor but with colors instead of corr.values: + corr.cols <- rev(c('#b2182b','#d6604d','#f4a582','#fddbc7','#d1e5f0','#92c5de','#4393c3','#2166ac')) + my.seq <- c(-1,-0.75,-0.5,-0.25,0,0.25,0.5,0.75,1) + + array.cor.colors <- array(corr.cols[8],c(12,7,4)) + array.cor.colors[array.cor < 0.75] <- corr.cols[7] + array.cor.colors[array.cor < 0.5] <- corr.cols[6] + array.cor.colors[array.cor < 0.25] <- corr.cols[5] + array.cor.colors[array.cor < 0] <- corr.cols[4] + array.cor.colors[array.cor < -0.25] <- corr.cols[3] + array.cor.colors[array.cor < -0.5] <- corr.cols[2] + array.cor.colors[array.cor < -0.75] <- corr.cols[1] + + par(mar=c(5,4,4,4.5)) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Forecast time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + title("Correlation between S4 and ERA-Interim frequencies", cex=1.5) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + + for(p in 1:12){ + for(l in 0:6){ + polygon(c(0.5+p-1,0.5+p-1,1+p-1),c(-0.5+l,0.5+l,0+l), col=array.cor.colors[p,1+l,1]) # first triangle + polygon(c(0.5+p-1,1+p-1,1.5+p-1),c(-0.5+l,0+l,-0.5+l), col=array.cor.colors[p,1+l,2]) + polygon(c(1+p-1,1.5+p-1,1.5+p-1),c(l,0.5+l,-0.5+l), col=array.cor.colors[p,1+l,3]) + polygon(c(0.5+p-1,1+p-1,1.5+p-1),c(0.5+l,0+l,0.5+l), col=array.cor.colors[p,1+l,4]) + } + } + + par(fig=c(0.875, 1, 0.14, 0.89), new=TRUE) + ColorBar2(brks = seq(-1,1,0.25), cols = corr.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(seq(-1,1,0.25)), my.labels=seq(-1,1,0.25)) + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_temporal_correlations_startdate.png"), width=750, height=600) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext(text="Correlation between S4 and ERA-Interim frequencies", cex=1.5) + + # NAO+ : + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.85, 0.98), new=TRUE) + mtext("NAO+", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.cor.colors[p,1+l,1])}} + + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.42, 0.5), new=TRUE) + mtext("Blocking", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.cor.colors[p,1+l,4])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.85, 0.98), new=TRUE) + mtext("NAO-", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.cor.colors[p,1+l,3])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.42, 0.5), new=TRUE) + mtext("Atlantic Ridge", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.cor.colors[p,1+l,2])}} + + par(fig=c(0.91, 1, 0.1, 0.9), new=TRUE) + ColorBar2(brks = my.seq, cols = corr.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_temporal_correlations_target_month.png"), width=800, height=300) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext(text="Correlation between S4 and ERA-Interim frequencies", cex=1.2) + + par(mar=c(0,0,4,0), fig=c(0.07, 0.22, 0.8, 0.98), new=TRUE) + mtext("NAO+", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0, 0.22, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.6, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.cor.colors[i2,1+6-j,1])}} + + par(mar=c(0,0,4,0), fig=c(0.22, 0.44, 0.8, 0.98), new=TRUE) + mtext("NAO-", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.22, 0.44, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.6, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.cor.colors[i2,1+6-j,3])}} + + par(mar=c(0,0,4,0), fig=c(0.44, 0.66, 0.8, 0.98), new=TRUE) + mtext("Blocking", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.44, 0.66, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.6, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.cor.colors[i2,1+6-j,4])}} + + par(mar=c(0,0,4,0), fig=c(0.68, 0.88, 0.8, 0.98), new=TRUE) + mtext("Atlantic Ridge", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.66, 0.88, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.6, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.cor.colors[i2,1+6-j,2])}} + + par(fig=c(0.91, 1, 0.1, 0.8), new=TRUE) + ColorBar2(brks = my.seq, cols = corr.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + + + + + + # Frequency summary: + png(filename=paste0(work.dir,"/",forecast.name,"_summary_frequency.png"), width=550, height=400) + + # create an array similar to array.diff.freq but with colors instead of frequencies: + freq.cols <- rev(c('#d73027','#f46d43','#fdae61','#fee090','#e0f3f8','#abd9e9','#74add1','#4575b4')) + my.seq <- c(NA,20,22,24,26,28,30,32,NA) + + array.freq.colors <- array(freq.cols[8],c(12,7,4)) + array.freq.colors[array.freq < my.seq[[8]]] <- freq.cols[7] + array.freq.colors[array.freq < my.seq[[7]]] <- freq.cols[6] + array.freq.colors[array.freq < my.seq[[6]]] <- freq.cols[5] + array.freq.colors[array.freq < my.seq[[5]]] <- freq.cols[4] + array.freq.colors[array.freq < my.seq[[4]]] <- freq.cols[3] + array.freq.colors[array.freq < my.seq[[3]]] <- freq.cols[2] + array.freq.colors[array.freq < my.seq[[2]]] <- freq.cols[1] + + par(mar=c(5,4,4,4.5)) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Forecast time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + title("Mean S4 frequency (%)", cex=1.5) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + + for(p in 1:12){ + for(l in 0:6){ + polygon(c(0.5+p-1, 0.5+p-1, 1+p-1), c(-0.5+l, 0.5+l, 0+l), col=array.freq.colors[p,1+l,1]) # first triangle + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(-0.5+l, 0+l, -0.5+l), col=array.freq.colors[p,1+l,2]) + polygon(c(1+p-1, 1.5+p-1, 1.5+p-1), c(l, 0.5+l, -0.5+l), col=array.freq.colors[p,1+l,3]) + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(0.5+l, 0+l, 0.5+l), col=array.freq.colors[p,1+l,4]) + } + } + + par(fig=c(0.875, 1, 0.14, 0.89), new=TRUE) + ColorBar2(brks = my.seq, cols = freq.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_frequency_startdate.png"), width=750, height=600) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext(text="Mean S4 frequency (%)", cex=1.5) + + # NAO+ : + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.85, 0.98), new=TRUE) + mtext("NAO+", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.freq.colors[p,1+l,1])}} + + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.42, 0.5), new=TRUE) + mtext("Blocking", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.freq.colors[p,1+l,4])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.85, 0.98), new=TRUE) + mtext("NAO-", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.freq.colors[p,1+l,3])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.42, 0.5), new=TRUE) + mtext("Atlantic Ridge", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.freq.colors[p,1+l,2])}} + + par(fig=c(0.91, 1, 0.1, 0.9), new=TRUE) + ColorBar2(brks = my.seq, cols = freq.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_frequency_target_month.png"), width=800, height=300) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext(text="Mean S4 frequency (%)", cex=1.2) + + par(mar=c(0,0,4,0), fig=c(0.07, 0.22, 0.8, 0.98), new=TRUE) + mtext("NAO+", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0, 0.22, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.6, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.freq.colors[i2,1+6-j,1])}} + + par(mar=c(0,0,4,0), fig=c(0.22, 0.44, 0.8, 0.98), new=TRUE) + mtext("NAO-", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.22, 0.44, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.6, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.freq.colors[i2,1+6-j,3])}} + + par(mar=c(0,0,4,0), fig=c(0.44, 0.66, 0.8, 0.98), new=TRUE) + mtext("Blocking", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.44, 0.66, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.6, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.freq.colors[i2,1+6-j,4])}} + + par(mar=c(0,0,4,0), fig=c(0.68, 0.88, 0.8, 0.98), new=TRUE) + mtext("Atlantic Ridge", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.66, 0.88, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.6, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.freq.colors[i2,1+6-j,2])}} + + par(fig=c(0.91, 1, 0.1, 0.8), new=TRUE) + ColorBar2(brks = my.seq, cols = freq.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + par(fig=c(0.91, 1, 0.88, 0.93), new=TRUE) + mtext(side = 1, text = "%", line = 2, cex=1) + dev.off() + + + + + + + + + # Obs. frequency summary: + png(filename=paste0(work.dir,"/",forecast.name,"_summary_frequency_obs.png"), width=550, height=400) + + # create an array similar to array.diff.freq but with colors instead of frequencies: + freq.cols <- rev(c('#d73027','#f46d43','#fdae61','#fee090','#e0f3f8','#abd9e9','#74add1','#4575b4')) + my.seq <- c(NA,20,22,24,26,28,30,32,NA) + + array.freq.colors <- array(freq.cols[8],c(12,7,4)) + array.freq.colors[array.freq.obs < my.seq[[8]]] <- freq.cols[7] + array.freq.colors[array.freq.obs < my.seq[[7]]] <- freq.cols[6] + array.freq.colors[array.freq.obs < my.seq[[6]]] <- freq.cols[5] + array.freq.colors[array.freq.obs < my.seq[[5]]] <- freq.cols[4] + array.freq.colors[array.freq.obs < my.seq[[4]]] <- freq.cols[3] + array.freq.colors[array.freq.obs < my.seq[[3]]] <- freq.cols[2] + array.freq.colors[array.freq.obs < my.seq[[2]]] <- freq.cols[1] + + par(mar=c(5,4,4,4.5)) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Forecast time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + title("Mean observed frequency (%)", cex=1.5) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + + for(p in 1:12){ + for(l in 0:6){ + polygon(c(0.5+p-1, 0.5+p-1, 1+p-1), c(-0.5+l, 0.5+l, 0+l), col=array.freq.colors[p,1+l,1]) # first triangle + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(-0.5+l, 0+l, -0.5+l), col=array.freq.colors[p,1+l,2]) + polygon(c(1+p-1, 1.5+p-1, 1.5+p-1), c(l, 0.5+l, -0.5+l), col=array.freq.colors[p,1+l,3]) + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(0.5+l, 0+l, 0.5+l), col=array.freq.colors[p,1+l,4]) + } + } + + par(fig=c(0.875, 1, 0.14, 0.89), new=TRUE) + ColorBar2(brks = my.seq, cols = freq.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_frequency_obs_startdate.png"), width=750, height=600) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext(text="Mean observed frequency (%)", cex=1.5) + + # NAO+ : + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.85, 0.98), new=TRUE) + mtext("NAO+", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.freq.colors[p,1+l,1])}} + + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.42, 0.5), new=TRUE) + mtext("Blocking", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.freq.colors[p,1+l,4])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.85, 0.98), new=TRUE) + mtext("NAO-", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.freq.colors[p,1+l,3])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.42, 0.5), new=TRUE) + mtext("Atlantic Ridge", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.freq.colors[p,1+l,2])}} + + par(fig=c(0.91, 1, 0.1, 0.9), new=TRUE) + ColorBar2(brks = my.seq, cols = freq.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_frequency_obs_target_month.png"), width=800, height=300) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext(text="Mean observed frequency (%)", cex=1.2) + + par(mar=c(0,0,4,0), fig=c(0.07, 0.22, 0.8, 0.98), new=TRUE) + mtext("NAO+", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0, 0.22, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.6, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.freq.colors[i2,1+6-j,1])}} + + par(mar=c(0,0,4,0), fig=c(0.22, 0.44, 0.8, 0.98), new=TRUE) + mtext("NAO-", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.22, 0.44, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.6, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.freq.colors[i2,1+6-j,3])}} + + par(mar=c(0,0,4,0), fig=c(0.44, 0.66, 0.8, 0.98), new=TRUE) + mtext("Blocking", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.44, 0.66, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.6, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.freq.colors[i2,1+6-j,4])}} + + par(mar=c(0,0,4,0), fig=c(0.68, 0.88, 0.8, 0.98), new=TRUE) + mtext("Atlantic Ridge", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.66, 0.88, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.6, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.freq.colors[i2,1+6-j,2])}} + + par(fig=c(0.91, 1, 0.1, 0.8), new=TRUE) + ColorBar2(brks = my.seq, cols = freq.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + par(fig=c(0.91, 1, 0.88, 0.93), new=TRUE) + mtext(side = 1, text = "%", line = 2, cex=1) + dev.off() + + + + + + + + + + # Bias freq. summary: + png(filename=paste0(work.dir,"/",forecast.name,"_summary_bias_frequency.png"), width=550, height=400) + + # create an array similar to array.diff.freq but with colors instead of frequencies: + diff.freq.cols <- rev(c('#d73027','#f46d43','#fdae61','#fee090','#e0f3f8','#abd9e9','#74add1','#4575b4')) + my.seq <- c(-20,-10,-5,-2,0,2,5,10,20) + + array.diff.freq.colors <- array(diff.freq.cols[8],c(12,7,4)) + array.diff.freq.colors[array.diff.freq < my.seq[[8]]] <- diff.freq.cols[7] + array.diff.freq.colors[array.diff.freq 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.diff.freq.colors[i2,1+6-j,1])}} + + par(mar=c(0,0,4,0), fig=c(0.22, 0.44, 0.8, 0.98), new=TRUE) + mtext("NAO-", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.22, 0.44, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.6, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.diff.freq.colors[i2,1+6-j,3])}} + + par(mar=c(0,0,4,0), fig=c(0.44, 0.66, 0.8, 0.98), new=TRUE) + mtext("Blocking", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.44, 0.66, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.6, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.diff.freq.colors[i2,1+6-j,4])}} + + par(mar=c(0,0,4,0), fig=c(0.68, 0.88, 0.8, 0.98), new=TRUE) + mtext("Atlantic Ridge", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.66, 0.88, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.6, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.diff.freq.colors[i2,1+6-j,2])}} + + par(fig=c(0.91, 1, 0.1, 0.8), new=TRUE) + ColorBar2(brks = my.seq, cols = diff.freq.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + par(fig=c(0.91, 1, 0.88, 0.93), new=TRUE) + mtext(side = 1, text = "%", line = 2, cex=1) + dev.off() + + + + + + + + + # Simulated persistence summary: + png(filename=paste0(work.dir,"/",forecast.name,"_summary_persistence.png"), width=550, height=400) + + # create an array similar to array.pers but with colors instead of frequencies: + pers.cols <- rev(c('#b2182b','#d6604d','#f4a582','#fddbc7','#d1e5f0','#92c5de','#4393c3','#2166ac')) + my.seq <- c(NA, 3.25, 3.5, 3.75, 4, 4.25, 4.5, 4.75, NA) + + array.pers.colors <- array(pers.cols[8],c(12,7,4)) + array.pers.colors[array.pers < my.seq[8]] <- pers.cols[7] + array.pers.colors[array.pers < my.seq[7]] <- pers.cols[6] + array.pers.colors[array.pers < my.seq[6]] <- pers.cols[5] + array.pers.colors[array.pers < my.seq[5]] <- pers.cols[4] + array.pers.colors[array.pers < my.seq[4]] <- pers.cols[3] + array.pers.colors[array.pers < my.seq[3]] <- pers.cols[2] + array.pers.colors[array.pers < my.seq[2]] <- pers.cols[1] + + par(mar=c(5,4,4,4.5)) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Forecast time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + title("ECMWF-S4 Regime persistence (in days/month)", cex=1.5) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + + for(p in 1:12){ + for(l in 0:6){ + polygon(c(0.5+p-1,0.5+p-1,1+p-1),c(-0.5+l,0.5+l,0+l), col=array.pers.colors[p,1+l,1]) # first triangle + polygon(c(0.5+p-1,1+p-1,1.5+p-1),c(-0.5+l,0+l,-0.5+l), col=array.pers.colors[p,1+l,2]) + polygon(c(1+p-1,1.5+p-1,1.5+p-1),c(l,0.5+l,-0.5+l), col=array.pers.colors[p,1+l,3]) + polygon(c(0.5+p-1,1+p-1,1.5+p-1),c(0.5+l,0+l,0.5+l), col=array.pers.colors[p,1+l,4]) + } + } + + par(fig=c(0.875, 1, 0.14, 0.89), new=TRUE) + ColorBar2(brks = my.seq, cols = pers.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_persistence_startdate.png"), width=750, height=600) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("ECMWF-S4 Regime persistence (in days/month)", cex=1.5) + + # NAO+ : + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.85, 0.98), new=TRUE) + mtext("NAO+", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.pers.colors[p,1+l,1])}} + + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.42, 0.5), new=TRUE) + mtext("Blocking", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.pers.colors[p,1+l,4])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.85, 0.98), new=TRUE) + mtext("NAO-", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.pers.colors[p,1+l,3])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.42, 0.5), new=TRUE) + mtext("Atlantic Ridge", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.pers.colors[p,1+l,2])}} + + par(fig=c(0.91, 1, 0.1, 0.9), new=TRUE) + ColorBar2(brks = my.seq, cols = pers.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_persistence_target_month.png"), width=800, height=300) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("ECMWF-S4 Regime persistence (in days/month)", cex=1.2) + + par(mar=c(0,0,4,0), fig=c(0.07, 0.22, 0.8, 0.98), new=TRUE) + mtext("NAO+", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0, 0.22, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.6, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.pers.colors[i2,1+6-j,1])}} + + par(mar=c(0,0,4,0), fig=c(0.22, 0.44, 0.8, 0.98), new=TRUE) + mtext("NAO-", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.22, 0.44, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.6, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.pers.colors[i2,1+6-j,3])}} + + par(mar=c(0,0,4,0), fig=c(0.44, 0.66, 0.8, 0.98), new=TRUE) + mtext("Blocking", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.44, 0.66, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.6, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.pers.colors[i2,1+6-j,4])}} + + par(mar=c(0,0,4,0), fig=c(0.68, 0.88, 0.8, 0.98), new=TRUE) + mtext("Atlantic Ridge", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.66, 0.88, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.6, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.pers.colors[i2,1+6-j,2])}} + + par(fig=c(0.91, 1, 0.1, 0.8), new=TRUE) + ColorBar2(brks = my.seq, cols = pers.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + par(fig=c(0.9, 1, 0.88, 0.93), new=TRUE) + mtext(side = 1, text = "days/month", line = 2, cex=1) + dev.off() + + + + + + + + # Observed persistence summary: + png(filename=paste0(work.dir,"/",forecast.name,"_summary_persistence_obs.png"), width=550, height=400) + + # create an array similar to array.pers but with colors instead of frequencies: + pers.cols <- rev(c('#b2182b','#d6604d','#f4a582','#fddbc7','#d1e5f0','#92c5de','#4393c3','#2166ac')) + my.seq <- c(NA, 3.25, 3.5, 3.75, 4, 4.25, 4.5, 4.75, NA) + + array.pers.colors <- array(pers.cols[8],c(12,7,4)) + array.pers.colors[array.pers.obs < my.seq[8]] <- pers.cols[7] + array.pers.colors[array.pers.obs < my.seq[7]] <- pers.cols[6] + array.pers.colors[array.pers.obs < my.seq[6]] <- pers.cols[5] + array.pers.colors[array.pers.obs < my.seq[5]] <- pers.cols[4] + array.pers.colors[array.pers.obs < my.seq[4]] <- pers.cols[3] + array.pers.colors[array.pers.obs < my.seq[3]] <- pers.cols[2] + array.pers.colors[array.pers.obs < my.seq[2]] <- pers.cols[1] + + par(mar=c(5,4,4,4.5)) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Forecast time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + title("Observed regime persistence (in days/month)", cex=1.5) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + + for(p in 1:12){ + for(l in 0:6){ + polygon(c(0.5+p-1,0.5+p-1,1+p-1),c(-0.5+l,0.5+l,0+l), col=array.pers.colors[p,1+l,1]) # first triangle + polygon(c(0.5+p-1,1+p-1,1.5+p-1),c(-0.5+l,0+l,-0.5+l), col=array.pers.colors[p,1+l,2]) + polygon(c(1+p-1,1.5+p-1,1.5+p-1),c(l,0.5+l,-0.5+l), col=array.pers.colors[p,1+l,3]) + polygon(c(0.5+p-1,1+p-1,1.5+p-1),c(0.5+l,0+l,0.5+l), col=array.pers.colors[p,1+l,4]) + } + } + + par(fig=c(0.875, 1, 0.14, 0.89), new=TRUE) + ColorBar2(brks = my.seq, cols = pers.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_persistence_obs_startdate.png"), width=750, height=600) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("Observed Regime persistence (in days/month)", cex=1.5) + + # NAO+ : + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.85, 0.98), new=TRUE) + mtext("NAO+", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.pers.colors[p,1+l,1])}} + + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.42, 0.5), new=TRUE) + mtext("Blocking", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.pers.colors[p,1+l,4])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.85, 0.98), new=TRUE) + mtext("NAO-", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.pers.colors[p,1+l,3])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.42, 0.5), new=TRUE) + mtext("Atlantic Ridge", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.pers.colors[p,1+l,2])}} + + par(fig=c(0.91, 1, 0.1, 0.9), new=TRUE) + ColorBar2(brks = my.seq, cols = pers.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_persistence_obs_target_month.png"), width=800, height=300) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("Observed regime persistence (in days/month)", cex=1.2) + + par(mar=c(0,0,4,0), fig=c(0.07, 0.22, 0.8, 0.98), new=TRUE) + mtext("NAO+", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0, 0.22, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.6, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.pers.colors[i2,1+6-j,1])}} + + par(mar=c(0,0,4,0), fig=c(0.22, 0.44, 0.8, 0.98), new=TRUE) + mtext("NAO-", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.22, 0.44, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.6, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.pers.colors[i2,1+6-j,3])}} + + par(mar=c(0,0,4,0), fig=c(0.44, 0.66, 0.8, 0.98), new=TRUE) + mtext("Blocking", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.44, 0.66, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.6, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.pers.colors[i2,1+6-j,4])}} + + par(mar=c(0,0,4,0), fig=c(0.68, 0.88, 0.8, 0.98), new=TRUE) + mtext("Atlantic Ridge", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.66, 0.88, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.6, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.pers.colors[i2,1+6-j,2])}} + + par(fig=c(0.91, 1, 0.1, 0.8), new=TRUE) + ColorBar2(brks = my.seq, cols = pers.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + par(fig=c(0.9, 1, 0.88, 0.93), new=TRUE) + mtext(side = 1, text = "days/month", line = 2, cex=1) + dev.off() + + + + + + + + + + + + + # Bias persistence summary: + png(filename=paste0(work.dir,"/",forecast.name,"_summary_bias_persistence.png"), width=550, height=400) + + # create an array similar to array.pers but with colors instead of frequencies: + diff.pers.cols <- rev(c('#b2182b','#d6604d','#f4a582','#fddbc7','#d1e5f0','#92c5de','#4393c3','#2166ac')) + my.seq <- c(NA, -1.5, -1, -0.5, 0, 0.5, 1, 1.5, NA) + + array.diff.pers.colors <- array(diff.pers.cols[8],c(12,7,4)) + array.diff.pers.colors[array.diff.pers < my.seq[8]] <- diff.pers.cols[7] + array.diff.pers.colors[array.diff.pers < my.seq[7]] <- diff.pers.cols[6] + array.diff.pers.colors[array.diff.pers < my.seq[6]] <- diff.pers.cols[5] + array.diff.pers.colors[array.diff.pers < my.seq[5]] <- diff.pers.cols[4] + array.diff.pers.colors[array.diff.pers < my.seq[4]] <- diff.pers.cols[3] + array.diff.pers.colors[array.diff.pers < my.seq[3]] <- diff.pers.cols[2] + array.diff.pers.colors[array.diff.pers < my.seq[2]] <- diff.pers.cols[1] + + par(mar=c(5,4,4,4.5)) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Forecast time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + title("Diff. between S4 and ERA-Interim regime persistence (in days/month)", cex=1.5) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + + for(p in 1:12){ + for(l in 0:6){ + polygon(c(0.5+p-1,0.5+p-1,1+p-1),c(-0.5+l,0.5+l,0+l), col=array.diff.pers.colors[p,1+l,1]) # first triangle + polygon(c(0.5+p-1,1+p-1,1.5+p-1),c(-0.5+l,0+l,-0.5+l), col=array.diff.pers.colors[p,1+l,2]) + polygon(c(1+p-1,1.5+p-1,1.5+p-1),c(l,0.5+l,-0.5+l), col=array.diff.pers.colors[p,1+l,3]) + polygon(c(0.5+p-1,1+p-1,1.5+p-1),c(0.5+l,0+l,0.5+l), col=array.diff.pers.colors[p,1+l,4]) + } + } + + par(fig=c(0.875, 1, 0.14, 0.89), new=TRUE) + ColorBar2(brks = my.seq, cols = diff.pers.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_bias_persistence_startdate.png"), width=750, height=600) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("Diff. between S4 and ERA-Interim regime persistence (in days/month)", cex=1.5) + + # NAO+ : + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.85, 0.98), new=TRUE) + mtext("NAO+", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.diff.pers.colors[p,1+l,1])}} + + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.42, 0.5), new=TRUE) + mtext("Blocking", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.diff.pers.colors[p,1+l,4])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.85, 0.98), new=TRUE) + mtext("NAO-", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.diff.pers.colors[p,1+l,3])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.42, 0.5), new=TRUE) + mtext("Atlantic Ridge", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.diff.pers.colors[p,1+l,2])}} + + par(fig=c(0.91, 1, 0.1, 0.9), new=TRUE) + ColorBar2(brks = my.seq, cols = diff.pers.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_bias_persistence_target_month.png"), width=800, height=300) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("Diff. between S4 and ERA-Interim regime persistence (in days/month)", cex=1.2) + + par(mar=c(0,0,4,0), fig=c(0.07, 0.22, 0.8, 0.98), new=TRUE) + mtext("NAO+", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0, 0.22, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.6, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.diff.pers.colors[i2,1+6-j,1])}} + + par(mar=c(0,0,4,0), fig=c(0.22, 0.44, 0.8, 0.98), new=TRUE) + mtext("NAO-", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.22, 0.44, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.6, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.diff.pers.colors[i2,1+6-j,3])}} + + par(mar=c(0,0,4,0), fig=c(0.44, 0.66, 0.8, 0.98), new=TRUE) + mtext("Blocking", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.44, 0.66, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.6, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.diff.pers.colors[i2,1+6-j,4])}} + + par(mar=c(0,0,4,0), fig=c(0.68, 0.88, 0.8, 0.98), new=TRUE) + mtext("Atlantic Ridge", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.66, 0.88, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.6, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.diff.pers.colors[i2,1+6-j,2])}} + + par(fig=c(0.91, 1, 0.1, 0.8), new=TRUE) + ColorBar2(brks = my.seq, cols = diff.pers.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + par(fig=c(0.9, 1, 0.88, 0.93), new=TRUE) + mtext(side = 1, text = "days/month", line = 2, cex=1) + dev.off() + + + + + + + + + + # St.Dev.freq ratio summary: + png(filename=paste0(work.dir,"/",forecast.name,"_summary_ratio_sd_freq.png"), width=550, height=400) + + # create an array similar to array.cor but with colors instead of corr.values: + diff.sd.freq.cols <- rev(c('#b2182b','#d6604d','#f4a582','#fddbc7','#d1e5f0','#92c5de','#4393c3','#2166ac')) + my.seq <- c(0,0.7,0.8,0.9,1.0,1.1,1.2,1.3,2) + + array.diff.sd.freq.colors <- array(diff.sd.freq.cols[8],c(12,7,4)) + array.diff.sd.freq.colors[array.diff.sd.freq < my.seq[8]] <- diff.sd.freq.cols[7] + array.diff.sd.freq.colors[array.diff.sd.freq < my.seq[7]] <- diff.sd.freq.cols[6] + array.diff.sd.freq.colors[array.diff.sd.freq < my.seq[6]] <- diff.sd.freq.cols[5] + array.diff.sd.freq.colors[array.diff.sd.freq < my.seq[5]] <- diff.sd.freq.cols[4] + array.diff.sd.freq.colors[array.diff.sd.freq < my.seq[4]] <- diff.sd.freq.cols[3] + array.diff.sd.freq.colors[array.diff.sd.freq < my.seq[3]] <- diff.sd.freq.cols[2] + array.diff.sd.freq.colors[array.diff.sd.freq < my.seq[2]] <- diff.sd.freq.cols[1] + + par(mar=c(5,4,4,4.5)) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Forecast time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + title("St.Dev. ratio between sim. and obs. average frequencies", cex=1.5) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + + for(p in 1:12){ + for(l in 0:6){ + polygon(c(0.5+p-1, 0.5+p-1, 1+p-1), c(-0.5+l, 0.5+l, 0+l), col=array.diff.sd.freq.colors[p,1+l,1]) # first triangle + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(-0.5+l, 0+l, -0.5+l), col=array.diff.sd.freq.colors[p,1+l,2]) + polygon(c(1+p-1, 1.5+p-1, 1.5+p-1), c(l, 0.5+l, -0.5+l), col=array.diff.sd.freq.colors[p,1+l,3]) + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(0.5+l, 0+l, 0.5+l), col=array.diff.sd.freq.colors[p,1+l,4]) + } + } + + par(fig=c(0.875, 1, 0.14, 0.89), new=TRUE) + ColorBar2(brks = my.seq, cols = diff.sd.freq.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + dev.off() + + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_ratio_sd_frequency_startdate.png"), width=750, height=600) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("St.Dev. ratio between sim. and obs. average frequencies", cex=1.5) + + # NAO+ : + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.85, 0.98), new=TRUE) + mtext("NAO+", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.diff.sd.freq.colors[p,1+l,1])}} + + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.42, 0.5), new=TRUE) + mtext("Blocking", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.diff.sd.freq.colors[p,1+l,4])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.85, 0.98), new=TRUE) + mtext("NAO-", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.diff.sd.freq.colors[p,1+l,3])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.42, 0.5), new=TRUE) + mtext("Atlantic Ridge", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.diff.sd.freq.colors[p,1+l,2])}} + + par(fig=c(0.91, 1, 0.1, 0.9), new=TRUE) + ColorBar2(brks = my.seq, cols = diff.sd.freq.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_ratio_sd_frequency_target_month.png"), width=800, height=300) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("St.Dev. ratio between sim. and obs. average frequencies", cex=1.2) + + par(mar=c(0,0,4,0), fig=c(0.07, 0.22, 0.8, 0.98), new=TRUE) + mtext("NAO+", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0, 0.22, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.6, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.diff.sd.freq.colors[i2,1+6-j,1])}} + + par(mar=c(0,0,4,0), fig=c(0.22, 0.44, 0.8, 0.98), new=TRUE) + mtext("NAO-", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.22, 0.44, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.6, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.diff.sd.freq.colors[i2,1+6-j,3])}} + + par(mar=c(0,0,4,0), fig=c(0.44, 0.66, 0.8, 0.98), new=TRUE) + mtext("Blocking", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.44, 0.66, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.6, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.diff.sd.freq.colors[i2,1+6-j,4])}} + + par(mar=c(0,0,4,0), fig=c(0.68, 0.88, 0.8, 0.98), new=TRUE) + mtext("Atlantic Ridge", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.66, 0.88, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.6, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.diff.sd.freq.colors[i2,1+6-j,2])}} + + par(fig=c(0.91, 1, 0.1, 0.8), new=TRUE) + ColorBar2(brks = my.seq, cols = diff.sd.freq.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + + + + + + + + + + + + + # Impact summary: + png(filename=paste0(work.dir,"/",forecast.name,"_summary_impact_",var.name[var.num],".png"), width=550, height=400) + + # create an array similar to array.pers but with colors instead of frequencies: + imp.cols <- rev(c('#b2182b','#d6604d','#f4a582','#fddbc7','#d1e5f0','#92c5de','#4393c3','#2166ac')) + my.seq <- c(-1,0,0.4,0.5,0.6,0.7,0.8,0.9,1) + + array.imp.colors <- array(imp.cols[8],c(12,7,4)) + array.imp.colors[array.imp < my.seq[8]] <- imp.cols[7] + array.imp.colors[array.imp < my.seq[7]] <- imp.cols[6] + array.imp.colors[array.imp < my.seq[6]] <- imp.cols[5] + array.imp.colors[array.imp < my.seq[5]] <- imp.cols[4] + array.imp.colors[array.imp < my.seq[4]] <- imp.cols[3] + array.imp.colors[array.imp < my.seq[3]] <- imp.cols[2] + array.imp.colors[array.imp < my.seq[2]] <- imp.cols[1] + + par(mar=c(5,4,4,4.5)) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Forecast time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + title(paste0("S4 spatial correlation of impact on ",var.name[var.num]), cex=1.5) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + + for(p in 1:12){ + for(l in 0:6){ + polygon(c(0.5+p-1,0.5+p-1,1+p-1),c(-0.5+l,0.5+l,0+l), col=array.imp.colors[p,1+l,1]) # first triangle + polygon(c(0.5+p-1,1+p-1,1.5+p-1),c(-0.5+l,0+l,-0.5+l), col=array.imp.colors[p,1+l,2]) + polygon(c(1+p-1,1.5+p-1,1.5+p-1),c(l,0.5+l,-0.5+l), col=array.imp.colors[p,1+l,3]) + polygon(c(0.5+p-1,1+p-1,1.5+p-1),c(0.5+l,0+l,0.5+l), col=array.imp.colors[p,1+l,4]) + } + } + + par(fig=c(0.875, 1, 0.14, 0.89), new=TRUE) + ColorBar2(brks = my.seq, cols = imp.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_impact_",var.name[var.num],"_startdate.png"), width=750, height=600) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("S4 spatial correlation of impact on ",var.name[var.num], cex=1.5) + + # NAO+ : + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.85, 0.98), new=TRUE) + mtext("NAO+", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.imp.colors[p,1+l,1])}} + + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.42, 0.5), new=TRUE) + mtext("Blocking", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.imp.colors[p,1+l,4])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.85, 0.98), new=TRUE) + mtext("NAO-", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.imp.colors[p,1+l,3])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.42, 0.5), new=TRUE) + mtext("Atlantic Ridge", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.imp.colors[p,1+l,2])}} + + par(fig=c(0.91, 1, 0.1, 0.9), new=TRUE) + ColorBar2(brks = my.seq, cols = imp.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_impact_",var.name[var.num],"_target_month.png"), width=800, height=300) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("S4 spatial correlation of impact on ",var.name[var.num], cex=1.2) + + par(mar=c(0,0,4,0), fig=c(0.07, 0.22, 0.8, 0.98), new=TRUE) + mtext("NAO+", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0, 0.22, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.6, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.imp.colors[i2,1+6-j,1])}} + + par(mar=c(0,0,4,0), fig=c(0.22, 0.44, 0.8, 0.98), new=TRUE) + mtext("NAO-", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.22, 0.44, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.6, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.imp.colors[i2,1+6-j,3])}} + + par(mar=c(0,0,4,0), fig=c(0.44, 0.66, 0.8, 0.98), new=TRUE) + mtext("Blocking", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.44, 0.66, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.6, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.imp.colors[i2,1+6-j,4])}} + + par(mar=c(0,0,4,0), fig=c(0.68, 0.88, 0.8, 0.98), new=TRUE) + mtext("Atlantic Ridge", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.66, 0.88, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.6, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.imp.colors[i2,1+6-j,2])}} + + par(fig=c(0.91, 1, 0.1, 0.8), new=TRUE) + ColorBar2(brks = my.seq, cols = imp.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + + + + + + + # Simulated Transition probability from NAO+ to X summary: + png(filename=paste0(work.dir,"/",forecast.name,"_summary_transition_prob_NAO+.png"), width=550, height=400) + + # create an array similar to array.cor but with colors instead of corr.values: + trans.cols <- rev(c('#b2182b','#d6604d','#f4a582','#fddbc7','#d1e5f0','#92c5de','#4393c3','#2166ac')) + my.seq <- c(0,10,20,30,40,50,60,70,100) + + array.trans.sim1.colors <- array(trans.cols[8],c(12,7,4)) + array.trans.sim1.colors[array.trans.sim1 < my.seq[8]] <- trans.cols[7] + array.trans.sim1.colors[array.trans.sim1 < my.seq[7]] <- trans.cols[6] + array.trans.sim1.colors[array.trans.sim1 < my.seq[6]] <- trans.cols[5] + array.trans.sim1.colors[array.trans.sim1 < my.seq[5]] <- trans.cols[4] + array.trans.sim1.colors[array.trans.sim1 < my.seq[4]] <- trans.cols[3] + array.trans.sim1.colors[array.trans.sim1 < my.seq[3]] <- trans.cols[2] + array.trans.sim1.colors[array.trans.sim1 < my.seq[2]] <- trans.cols[1] + array.trans.sim1.colors[is.na(array.trans.sim1)] <- "white" + + par(mar=c(5,4,4,4.5)) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Forecast time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + title("% ECMWF-S4 transition from NAO+ regime", cex=1.5) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + + for(p in 1:12){ + for(l in 0:6){ + polygon(c(0.5+p-1, 0.5+p-1, 1+p-1), c(-0.5+l, 0.5+l, 0+l), col=array.trans.sim1.colors[p,1+l,1]) # first triangle + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(-0.5+l, 0+l, -0.5+l), col=array.trans.sim1.colors[p,1+l,2]) + polygon(c(1+p-1, 1.5+p-1, 1.5+p-1), c(l, 0.5+l, -0.5+l), col=array.trans.sim1.colors[p,1+l,3]) + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(0.5+l, 0+l, 0.5+l), col=array.trans.sim1.colors[p,1+l,4]) + } + } + + par(fig=c(0.875, 1, 0.14, 0.89), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_transition_prob_NAO+_startdate.png"), width=750, height=600) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("% Transition from NAO+ regime", cex=1.5) + mtext("ECMWF-S4 / NAO+ Transition Probability \nJanuary to December / 1994-2013", cex=1.5) + + # NAO+ : + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.85, 0.98), new=TRUE) + mtext("NAO+", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.sim1.colors[p,1+l,1])}} + + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.42, 0.5), new=TRUE) + mtext("Blocking", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.sim1.colors[p,1+l,4])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.85, 0.98), new=TRUE) + mtext("NAO-", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.sim1.colors[p,1+l,3])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.42, 0.5), new=TRUE) + mtext("Atlantic Ridge", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.sim1.colors[p,1+l,2])}} + + par(fig=c(0.91, 1, 0.1, 0.9), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_transition_prob_NAO+_target_month.png"), width=750, height=350) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 0.95), new=TRUE) + mtext("% Transition from NAO+ regime", cex=1.2) + + par(mar=c(0,0,4,0), fig=c(0.10, 0.28, 0.8, 0.95), new=TRUE) + mtext("NAO-", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0, 0.28, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.8, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.sim1.colors[i2,1+6-j,3])}} + + par(mar=c(0,0,4,0), fig=c(0.30, 0.58, 0.8, 0.95), new=TRUE) + mtext("Blocking", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.30, 0.58, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.8, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.sim1.colors[i2,1+6-j,4])}} + + par(mar=c(0,0,4,0), fig=c(0.60, 0.88, 0.8, 0.95), new=TRUE) + mtext("Atlantic Ridge", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.60, 0.88, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.8, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.sim1.colors[i2,1+6-j,2])}} + + par(fig=c(0.91, 1, 0.1, 0.8), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + par(fig=c(0.91, 1, 0.88, 0.93), new=TRUE) + mtext(side = 1, text = "%", line = 2, cex=1) + + dev.off() + + + + + + + # Observed Transition probability from NAO+ to X summary: + png(filename=paste0(work.dir,"/",forecast.name,"_summary_transition_prob_NAO+_obs.png"), width=550, height=400) + + # create an array similar to array.cor but with colors instead of corr.values: + trans.cols <- rev(c('#b2182b','#d6604d','#f4a582','#fddbc7','#d1e5f0','#92c5de','#4393c3','#2166ac')) + my.seq <- c(0,10,20,30,40,50,60,70,100) + + array.trans.obs1.colors <- array(trans.cols[8],c(12,7,4)) + array.trans.obs1.colors[array.trans.obs1 < my.seq[8]] <- trans.cols[7] + array.trans.obs1.colors[array.trans.obs1 < my.seq[7]] <- trans.cols[6] + array.trans.obs1.colors[array.trans.obs1 < my.seq[6]] <- trans.cols[5] + array.trans.obs1.colors[array.trans.obs1 < my.seq[5]] <- trans.cols[4] + array.trans.obs1.colors[array.trans.obs1 < my.seq[4]] <- trans.cols[3] + array.trans.obs1.colors[array.trans.obs1 < my.seq[3]] <- trans.cols[2] + array.trans.obs1.colors[array.trans.obs1 < my.seq[2]] <- trans.cols[1] + array.trans.obs1.colors[is.na(array.trans.obs1)] <- "white" + + par(mar=c(5,4,4,4.5)) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Forecast time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + title("% Observed transition from NAO+ regime", cex=1.5) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + + for(p in 1:12){ + for(l in 0:6){ + polygon(c(0.5+p-1, 0.5+p-1, 1+p-1), c(-0.5+l, 0.5+l, 0+l), col=array.trans.obs1.colors[p,1+l,1]) # first triangle + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(-0.5+l, 0+l, -0.5+l), col=array.trans.obs1.colors[p,1+l,2]) + polygon(c(1+p-1, 1.5+p-1, 1.5+p-1), c(l, 0.5+l, -0.5+l), col=array.trans.obs1.colors[p,1+l,3]) + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(0.5+l, 0+l, 0.5+l), col=array.trans.obs1.colors[p,1+l,4]) + } + } + + par(fig=c(0.875, 1, 0.14, 0.89), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_transition_prob_NAO+_obs_startdate.png"), width=750, height=600) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("% Observed transition from NAO+ regime", cex=1.5) + + # NAO+ : + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.85, 0.98), new=TRUE) + mtext("NAO+", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.obs1.colors[p,1+l,1])}} + + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.42, 0.5), new=TRUE) + mtext("Blocking", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.obs1.colors[p,1+l,4])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.85, 0.98), new=TRUE) + mtext("NAO-", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.obs1.colors[p,1+l,3])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.42, 0.5), new=TRUE) + mtext("Atlantic Ridge", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.obs1.colors[p,1+l,2])}} + + par(fig=c(0.91, 1, 0.1, 0.9), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_transition_prob_NAO+_obs_target_month.png"), width=750, height=350) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 0.95), new=TRUE) + mtext("% Observed transition from NAO+ regime", cex=1.2) + + par(mar=c(0,0,4,0), fig=c(0.10, 0.28, 0.8, 0.95), new=TRUE) + mtext("NAO-", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0, 0.28, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.8, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.obs1.colors[i2,1+6-j,3])}} + + par(mar=c(0,0,4,0), fig=c(0.30, 0.58, 0.8, 0.95), new=TRUE) + mtext("Blocking", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.30, 0.58, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.8, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.obs1.colors[i2,1+6-j,4])}} + + par(mar=c(0,0,4,0), fig=c(0.60, 0.88, 0.8, 0.95), new=TRUE) + mtext("Atlantic Ridge", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.60, 0.88, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.8, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.obs1.colors[i2,1+6-j,2])}} + + par(fig=c(0.91, 1, 0.1, 0.8), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + par(fig=c(0.91, 1, 0.88, 0.93), new=TRUE) + mtext(side = 1, text = "%", line = 2, cex=1) + + dev.off() + + + + + + + + + + + + # Simulated transition probability from NAO- to X summary: + png(filename=paste0(work.dir,"/",forecast.name,"_summary_transition_prob_NAO-.png"), width=550, height=400) + + # create an array similar to array.cor but with colors instead of corr.values: + trans.cols <- rev(c('#b2182b','#d6604d','#f4a582','#fddbc7','#d1e5f0','#92c5de','#4393c3','#2166ac')) + my.seq <- c(0,10,20,30,40,50,60,70,100) + + array.trans.sim2.colors <- array(trans.cols[8],c(12,7,4)) + array.trans.sim2.colors[array.trans.sim2 < my.seq[8]] <- trans.cols[7] + array.trans.sim2.colors[array.trans.sim2 < my.seq[7]] <- trans.cols[6] + array.trans.sim2.colors[array.trans.sim2 < my.seq[6]] <- trans.cols[5] + array.trans.sim2.colors[array.trans.sim2 < my.seq[5]] <- trans.cols[4] + array.trans.sim2.colors[array.trans.sim2 < my.seq[4]] <- trans.cols[3] + array.trans.sim2.colors[array.trans.sim2 < my.seq[3]] <- trans.cols[2] + array.trans.sim2.colors[array.trans.sim2 < my.seq[2]] <- trans.cols[1] + array.trans.sim2.colors[is.na(array.trans.sim2)] <- "white" + + par(mar=c(5,4,4,4.5)) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Forecast time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + title("% ECMWF-S4 transition from NAO- regime ", cex=1.5) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + + for(p in 1:12){ + for(l in 0:6){ + polygon(c(0.5+p-1, 0.5+p-1, 1+p-1), c(-0.5+l, 0.5+l, 0+l), col=array.trans.sim2.colors[p,1+l,1]) # first triangle + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(-0.5+l, 0+l, -0.5+l), col=array.trans.sim2.colors[p,1+l,2]) + polygon(c(1+p-1, 1.5+p-1, 1.5+p-1), c(l, 0.5+l, -0.5+l), col=array.trans.sim2.colors[p,1+l,3]) + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(0.5+l, 0+l, 0.5+l), col=array.trans.sim2.colors[p,1+l,4]) + } + } + + par(fig=c(0.875, 1, 0.14, 0.89), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_transition_prob_NAO-_startdate.png"), width=750, height=600) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("% ECMWF-S4 transition from NAO- regime", cex=1.5) + + # NAO+ : + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.85, 0.98), new=TRUE) + mtext("NAO+", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.sim2.colors[p,1+l,1])}} + + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.42, 0.5), new=TRUE) + mtext("Blocking", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.sim2.colors[p,1+l,4])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.85, 0.98), new=TRUE) + mtext("NAO-", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.sim2.colors[p,1+l,3])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.42, 0.5), new=TRUE) + mtext("Atlantic Ridge", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.sim2.colors[p,1+l,2])}} + + par(fig=c(0.91, 1, 0.1, 0.9), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_transition_prob_NAO-_target_month.png"), width=750, height=350) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 0.95), new=TRUE) + mtext("% ECMWF-S4 transition from NAO- regime", cex=1.2) + + par(mar=c(0,0,4,0), fig=c(0.1, 0.28, 0.8, 0.95), new=TRUE) + mtext("NAO+", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0, 0.28, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.8, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.sim2.colors[i2,1+6-j,1])}} + + par(mar=c(0,0,4,0), fig=c(0.30, 0.58, 0.8, 0.95), new=TRUE) + mtext("Blocking", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.30, 0.58, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.8, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.sim2.colors[i2,1+6-j,4])}} + + par(mar=c(0,0,4,0), fig=c(0.60, 0.88, 0.8, 0.95), new=TRUE) + mtext("Atlantic Ridge", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.60, 0.88, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.8, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.sim2.colors[i2,1+6-j,2])}} + + par(fig=c(0.91, 1, 0.1, 0.8), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + par(fig=c(0.91, 1, 0.88, 0.93), new=TRUE) + mtext(side = 1, text = "%", line = 2, cex=1) + + dev.off() + + + + + + + # Observed transition probability from NAO- to X summary: + png(filename=paste0(work.dir,"/",forecast.name,"_summary_transition_prob_NAO-_obs.png"), width=550, height=400) + + # create an array similar to array.cor but with colors instead of corr.values: + trans.cols <- rev(c('#b2182b','#d6604d','#f4a582','#fddbc7','#d1e5f0','#92c5de','#4393c3','#2166ac')) + my.seq <- c(0,10,20,30,40,50,60,70,100) + + array.trans.obs2.colors <- array(trans.cols[8],c(12,7,4)) + array.trans.obs2.colors[array.trans.obs2 < my.seq[8]] <- trans.cols[7] + array.trans.obs2.colors[array.trans.obs2 < my.seq[7]] <- trans.cols[6] + array.trans.obs2.colors[array.trans.obs2 < my.seq[6]] <- trans.cols[5] + array.trans.obs2.colors[array.trans.obs2 < my.seq[5]] <- trans.cols[4] + array.trans.obs2.colors[array.trans.obs2 < my.seq[4]] <- trans.cols[3] + array.trans.obs2.colors[array.trans.obs2 < my.seq[3]] <- trans.cols[2] + array.trans.obs2.colors[array.trans.obs2 < my.seq[2]] <- trans.cols[1] + array.trans.obs2.colors[is.na(array.trans.obs2)] <- "white" + + par(mar=c(5,4,4,4.5)) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Forecast time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + title("% Observed transition from NAO- regime ", cex=1.5) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + + for(p in 1:12){ + for(l in 0:6){ + polygon(c(0.5+p-1, 0.5+p-1, 1+p-1), c(-0.5+l, 0.5+l, 0+l), col=array.trans.obs2.colors[p,1+l,1]) # first triangle + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(-0.5+l, 0+l, -0.5+l), col=array.trans.obs2.colors[p,1+l,2]) + polygon(c(1+p-1, 1.5+p-1, 1.5+p-1), c(l, 0.5+l, -0.5+l), col=array.trans.obs2.colors[p,1+l,3]) + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(0.5+l, 0+l, 0.5+l), col=array.trans.obs2.colors[p,1+l,4]) + } + } + + par(fig=c(0.875, 1, 0.14, 0.89), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_transition_prob_NAO-_obs_startdate.png"), width=750, height=600) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("% Observed transition from NAO- regime", cex=1.5) + + # NAO+ : + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.85, 0.98), new=TRUE) + mtext("NAO+", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.obs2.colors[p,1+l,1])}} + + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.42, 0.5), new=TRUE) + mtext("Blocking", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.obs2.colors[p,1+l,4])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.85, 0.98), new=TRUE) + mtext("NAO-", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.obs2.colors[p,1+l,3])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.42, 0.5), new=TRUE) + mtext("Atlantic Ridge", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.obs2.colors[p,1+l,2])}} + + par(fig=c(0.91, 1, 0.1, 0.9), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_transition_prob_NAO-_obs_target_month.png"), width=750, height=350) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 0.95), new=TRUE) + mtext("% Observed transition from NAO- regime", cex=1.2) + + par(mar=c(0,0,4,0), fig=c(0.07, 0.28, 0.8, 0.95), new=TRUE) + mtext("NAO+", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0, 0.28, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.8, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.obs2.colors[i2,1+6-j,1])}} + + par(mar=c(0,0,4,0), fig=c(0.30, 0.58, 0.8, 0.95), new=TRUE) + mtext("Blocking", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.30, 0.58, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.8, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.obs2.colors[i2,1+6-j,4])}} + + par(mar=c(0,0,4,0), fig=c(0.58, 0.88, 0.8, 0.95), new=TRUE) + mtext("Atlantic Ridge", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.58, 0.88, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.8, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.obs2.colors[i2,1+6-j,2])}} + + par(fig=c(0.91, 1, 0.1, 0.8), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + par(fig=c(0.91, 1, 0.88, 0.93), new=TRUE) + mtext(side = 1, text = "%", line = 2, cex=1) + + dev.off() + + + + + + + + + + + # Simulated transition probability from blocking to X summary: + png(filename=paste0(work.dir,"/",forecast.name,"_summary_transition_prob_blocking.png"), width=550, height=400) + + # create an array similar to array.cor but with colors instead of corr.values: + trans.cols <- rev(c('#b2182b','#d6604d','#f4a582','#fddbc7','#d1e5f0','#92c5de','#4393c3','#2166ac')) + my.seq <- c(0,10,20,30,40,50,60,70,100) + + array.trans.sim3.colors <- array(trans.cols[8],c(12,7,4)) + array.trans.sim3.colors[array.trans.sim3 < my.seq[8]] <- trans.cols[7] + array.trans.sim3.colors[array.trans.sim3 < my.seq[7]] <- trans.cols[6] + array.trans.sim3.colors[array.trans.sim3 < my.seq[6]] <- trans.cols[5] + array.trans.sim3.colors[array.trans.sim3 < my.seq[5]] <- trans.cols[4] + array.trans.sim3.colors[array.trans.sim3 < my.seq[4]] <- trans.cols[3] + array.trans.sim3.colors[array.trans.sim3 < my.seq[3]] <- trans.cols[2] + array.trans.sim3.colors[array.trans.sim3 < my.seq[2]] <- trans.cols[1] + array.trans.sim3.colors[is.na(array.trans.sim3)] <- "white" + + par(mar=c(5,4,4,4.5)) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Forecast time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + title("% ECMWF-S4 transition from blocking regime", cex=1.5) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + + for(p in 1:12){ + for(l in 0:6){ + polygon(c(0.5+p-1, 0.5+p-1, 1+p-1), c(-0.5+l, 0.5+l, 0+l), col=array.trans.sim3.colors[p,1+l,1]) # first triangle + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(-0.5+l, 0+l, -0.5+l), col=array.trans.sim3.colors[p,1+l,2]) + polygon(c(1+p-1, 1.5+p-1, 1.5+p-1), c(l, 0.5+l, -0.5+l), col=array.trans.sim3.colors[p,1+l,3]) + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(0.5+l, 0+l, 0.5+l), col=array.trans.sim3.colors[p,1+l,4]) + } + } + + par(fig=c(0.875, 1, 0.14, 0.89), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_transition_prob_blocking_startdate.png"), width=750, height=600) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("% ECMWF-S4 transition from blocking regime", cex=1.5) + + # NAO+ : + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.85, 0.98), new=TRUE) + mtext("NAO+", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.sim3.colors[p,1+l,1])}} + + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.42, 0.5), new=TRUE) + mtext("Blocking", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.sim3.colors[p,1+l,4])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.85, 0.98), new=TRUE) + mtext("NAO-", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.sim3.colors[p,1+l,3])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.42, 0.5), new=TRUE) + mtext("Atlantic Ridge", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.sim3.colors[p,1+l,2])}} + + par(fig=c(0.91, 1, 0.1, 0.9), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_transition_prob_blocking_target_month.png"), width=750, height=350) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 0.95), new=TRUE) + mtext("% ECMWF-S4 transition from blocking regime", cex=1.2) + + par(mar=c(0,0,4,0), fig=c(0.10, 0.28, 0.8, 0.95), new=TRUE) + mtext("NAO+", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0, 0.28, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.8, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.sim3.colors[i2,1+6-j,1])}} + + par(mar=c(0,0,4,0), fig=c(0.30, 0.58, 0.8, 0.95), new=TRUE) + mtext("NAO-", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.30, 0.58, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.8, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.sim3.colors[i2,1+6-j,3])}} + + par(mar=c(0,0,4,0), fig=c(0.60, 0.88, 0.8, 0.95), new=TRUE) + mtext("Atlantic Ridge", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.60, 0.88, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.8, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.sim3.colors[i2,1+6-j,2])}} + + par(fig=c(0.91, 1, 0.1, 0.8), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + par(fig=c(0.91, 1, 0.88, 0.93), new=TRUE) + mtext(side = 1, text = "%", line = 2, cex=1) + + dev.off() + + + + + + + + + # Observed transition probability from blocking to X summary: + png(filename=paste0(work.dir,"/",forecast.name,"_summary_transition_prob_blocking_obs.png"), width=550, height=400) + + # create an array similar to array.cor but with colors instead of corr.values: + trans.cols <- rev(c('#b2182b','#d6604d','#f4a582','#fddbc7','#d1e5f0','#92c5de','#4393c3','#2166ac')) + my.seq <- c(0,10,20,30,40,50,60,70,100) + + array.trans.obs3.colors <- array(trans.cols[8],c(12,7,4)) + array.trans.obs3.colors[array.trans.obs3 < my.seq[8]] <- trans.cols[7] + array.trans.obs3.colors[array.trans.obs3 < my.seq[7]] <- trans.cols[6] + array.trans.obs3.colors[array.trans.obs3 < my.seq[6]] <- trans.cols[5] + array.trans.obs3.colors[array.trans.obs3 < my.seq[5]] <- trans.cols[4] + array.trans.obs3.colors[array.trans.obs3 < my.seq[4]] <- trans.cols[3] + array.trans.obs3.colors[array.trans.obs3 < my.seq[3]] <- trans.cols[2] + array.trans.obs3.colors[array.trans.obs3 < my.seq[2]] <- trans.cols[1] + array.trans.obs3.colors[is.na(array.trans.obs3)] <- "white" + + par(mar=c(5,4,4,4.5)) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Forecast time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + title("% Observed transition from blocking regime", cex=1.5) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + + for(p in 1:12){ + for(l in 0:6){ + polygon(c(0.5+p-1, 0.5+p-1, 1+p-1), c(-0.5+l, 0.5+l, 0+l), col=array.trans.obs3.colors[p,1+l,1]) # first triangle + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(-0.5+l, 0+l, -0.5+l), col=array.trans.obs3.colors[p,1+l,2]) + polygon(c(1+p-1, 1.5+p-1, 1.5+p-1), c(l, 0.5+l, -0.5+l), col=array.trans.obs3.colors[p,1+l,3]) + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(0.5+l, 0+l, 0.5+l), col=array.trans.obs3.colors[p,1+l,4]) + } + } + + par(fig=c(0.875, 1, 0.14, 0.89), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_transition_prob_blocking_obs_startdate.png"), width=750, height=600) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("% Observed transition from blocking regime", cex=1.5) + + # NAO+ : + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.85, 0.98), new=TRUE) + mtext("NAO+", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.obs3.colors[p,1+l,1])}} + + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.42, 0.5), new=TRUE) + mtext("Blocking", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.obs3.colors[p,1+l,4])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.85, 0.98), new=TRUE) + mtext("NAO-", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.obs3.colors[p,1+l,3])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.42, 0.5), new=TRUE) + mtext("Atlantic Ridge", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.obs3.colors[p,1+l,2])}} + + par(fig=c(0.91, 1, 0.1, 0.9), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_transition_prob_blocking_obs_target_month.png"), width=750, height=350) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 0.95), new=TRUE) + mtext("% Observed transition from blocking regime", cex=1.2) + + par(mar=c(0,0,4,0), fig=c(0.10, 0.28, 0.8, 0.95), new=TRUE) + mtext("NAO+", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0, 0.28, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.8, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.obs3.colors[i2,1+6-j,1])}} + + par(mar=c(0,0,4,0), fig=c(0.30, 0.58, 0.8, 0.95), new=TRUE) + mtext("NAO-", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.30, 0.58, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.8, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.obs3.colors[i2,1+6-j,3])}} + + par(mar=c(0,0,4,0), fig=c(0.60, 0.88, 0.8, 0.95), new=TRUE) + mtext("Atlantic Ridge", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.60, 0.88, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.8, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.obs3.colors[i2,1+6-j,2])}} + + par(fig=c(0.91, 1, 0.1, 0.8), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + par(fig=c(0.91, 1, 0.88, 0.93), new=TRUE) + mtext(side = 1, text = "%", line = 2, cex=1) + + dev.off() + + + + + + + + + + + + + + + + + + # Simulated transition probability from Atlantic ridge to X summary: + png(filename=paste0(work.dir,"/",forecast.name,"_summary_transition_prob_Atlantic_ridge.png"), width=550, height=400) + + # create an array similar to array.cor but with colors instead of corr.values: + trans.cols <- rev(c('#b2182b','#d6604d','#f4a582','#fddbc7','#d1e5f0','#92c5de','#4393c3','#2166ac')) + my.seq <- c(0,10,20,30,40,50,60,70,100) + + array.trans.sim4.colors <- array(trans.cols[8],c(12,7,4)) + array.trans.sim4.colors[array.trans.sim4 < my.seq[8]] <- trans.cols[7] + array.trans.sim4.colors[array.trans.sim4 < my.seq[7]] <- trans.cols[6] + array.trans.sim4.colors[array.trans.sim4 < my.seq[6]] <- trans.cols[5] + array.trans.sim4.colors[array.trans.sim4 < my.seq[5]] <- trans.cols[4] + array.trans.sim4.colors[array.trans.sim4 < my.seq[4]] <- trans.cols[3] + array.trans.sim4.colors[array.trans.sim4 < my.seq[3]] <- trans.cols[2] + array.trans.sim4.colors[array.trans.sim4 < my.seq[2]] <- trans.cols[1] + array.trans.sim4.colors[is.na(array.trans.sim4)] <- "white" + + par(mar=c(5,4,4,4.5)) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Forecast time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + title("% ECMWF-S4 transition from Atlantic ridge regime ", cex=1.5) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + + for(p in 1:12){ + for(l in 0:6){ + polygon(c(0.5+p-1, 0.5+p-1, 1+p-1), c(-0.5+l, 0.5+l, 0+l), col=array.trans.sim4.colors[p,1+l,1]) # first triangle + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(-0.5+l, 0+l, -0.5+l), col=array.trans.sim4.colors[p,1+l,2]) + polygon(c(1+p-1, 1.5+p-1, 1.5+p-1), c(l, 0.5+l, -0.5+l), col=array.trans.sim4.colors[p,1+l,3]) + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(0.5+l, 0+l, 0.5+l), col=array.trans.sim4.colors[p,1+l,4]) + } + } + + par(fig=c(0.875, 1, 0.14, 0.89), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_transition_prob_Atlantic_ridge_startdate.png"), width=750, height=600) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("% ECMWF-S4 transition from Atlantic Ridge regime", cex=1.5) + + # NAO+ : + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.85, 0.98), new=TRUE) + mtext("NAO+", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.sim4.colors[p,1+l,1])}} + + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.42, 0.5), new=TRUE) + mtext("Blocking", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.sim4.colors[p,1+l,4])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.85, 0.98), new=TRUE) + mtext("NAO-", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.sim4.colors[p,1+l,3])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.42, 0.5), new=TRUE) + mtext("Atlantic Ridge", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.sim4.colors[p,1+l,2])}} + + par(fig=c(0.91, 1, 0.1, 0.9), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_transition_prob_Atlantic_ridge_target_month.png"), width=750, height=350) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 0.95), new=TRUE) + mtext("% ECMWF-S4 transition from Atlantic Ridge regime", cex=1.2) + + par(mar=c(0,0,4,0), fig=c(0.1, 0.28, 0.8, 0.95), new=TRUE) + mtext("NAO+", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0, 0.28, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.8, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.sim4.colors[i2,1+6-j,1])}} + + par(mar=c(0,0,4,0), fig=c(0.30, 0.58, 0.8, 0.95), new=TRUE) + mtext("NAO-", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.30, 0.58, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.8, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.sim4.colors[i2,1+6-j,3])}} + + par(mar=c(0,0,4,0), fig=c(0.60, 0.88, 0.8, 0.95), new=TRUE) + mtext("Blocking", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.60, 0.88, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.8, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.sim4.colors[i2,1+6-j,4])}} + + par(fig=c(0.91, 1, 0.1, 0.8), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + par(fig=c(0.91, 1, 0.88, 0.93), new=TRUE) + mtext(side = 1, text = "%", line = 2, cex=1) + + dev.off() + + + + + + + + + + # Observed transition probability from Atlantic ridge to X summary: + png(filename=paste0(work.dir,"/",forecast.name,"_summary_transition_prob_Atlantic_ridge_obs.png"), width=550, height=400) + + # create an array obsilar to array.cor but with colors instead of corr.values: + trans.cols <- rev(c('#b2182b','#d6604d','#f4a582','#fddbc7','#d1e5f0','#92c5de','#4393c3','#2166ac')) + my.seq <- c(0,10,20,30,40,50,60,70,100) + + array.trans.obs4.colors <- array(trans.cols[8],c(12,7,4)) + array.trans.obs4.colors[array.trans.obs4 < my.seq[8]] <- trans.cols[7] + array.trans.obs4.colors[array.trans.obs4 < my.seq[7]] <- trans.cols[6] + array.trans.obs4.colors[array.trans.obs4 < my.seq[6]] <- trans.cols[5] + array.trans.obs4.colors[array.trans.obs4 < my.seq[5]] <- trans.cols[4] + array.trans.obs4.colors[array.trans.obs4 < my.seq[4]] <- trans.cols[3] + array.trans.obs4.colors[array.trans.obs4 < my.seq[3]] <- trans.cols[2] + array.trans.obs4.colors[array.trans.obs4 < my.seq[2]] <- trans.cols[1] + array.trans.obs4.colors[is.na(array.trans.obs4)] <- "white" + + par(mar=c(5,4,4,4.5)) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Forecast time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + title("% Observed transition from Atlantic ridge regime ", cex=1.5) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + + for(p in 1:12){ + for(l in 0:6){ + polygon(c(0.5+p-1, 0.5+p-1, 1+p-1), c(-0.5+l, 0.5+l, 0+l), col=array.trans.obs4.colors[p,1+l,1]) # first triangle + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(-0.5+l, 0+l, -0.5+l), col=array.trans.obs4.colors[p,1+l,2]) + polygon(c(1+p-1, 1.5+p-1, 1.5+p-1), c(l, 0.5+l, -0.5+l), col=array.trans.obs4.colors[p,1+l,3]) + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(0.5+l, 0+l, 0.5+l), col=array.trans.obs4.colors[p,1+l,4]) + } + } + + par(fig=c(0.875, 1, 0.14, 0.89), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_transition_prob_Atlantic_ridge_obs_startdate.png"), width=750, height=600) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("% Observed transition from Atlantic Ridge regime", cex=1.5) + + # NAO+ : + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.85, 0.98), new=TRUE) + mtext("NAO+", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.obs4.colors[p,1+l,1])}} + + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.42, 0.5), new=TRUE) + mtext("Blocking", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.obs4.colors[p,1+l,4])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.85, 0.98), new=TRUE) + mtext("NAO-", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.obs4.colors[p,1+l,3])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.42, 0.5), new=TRUE) + mtext("Atlantic Ridge", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.obs4.colors[p,1+l,2])}} + + par(fig=c(0.91, 1, 0.1, 0.9), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_transition_prob_Atlantic_ridge_obs_target_month.png"), width=750, height=350) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 0.95), new=TRUE) + mtext("% Observed transition from Atlantic Ridge regime", cex=1.2) + + par(mar=c(0,0,4,0), fig=c(0.1, 0.28, 0.8, 0.95), new=TRUE) + mtext("NAO+", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0, 0.28, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.8, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.obs4.colors[i2,1+6-j,1])}} + + par(mar=c(0,0,4,0), fig=c(0.30, 0.58, 0.8, 0.95), new=TRUE) + mtext("NAO-", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.30, 0.58, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.8, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.obs4.colors[i2,1+6-j,3])}} + + par(mar=c(0,0,4,0), fig=c(0.60, 0.88, 0.8, 0.95), new=TRUE) + mtext("Blocking", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.60, 0.88, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.8, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.obs4.colors[i2,1+6-j,4])}} + + par(fig=c(0.91, 1, 0.1, 0.8), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + par(fig=c(0.91, 1, 0.88, 0.93), new=TRUE) + mtext(side = 1, text = "%", line = 2, cex=1) + + dev.off() + + + + + + + + + + + + + + + # Bias of the transition probability from NAO+ to X summary: + png(filename=paste0(work.dir,"/",forecast.name,"_summary_bias_transition_prob_NAO+.png"), width=550, height=400) + + # create an array similar to array.cor but with colors instead of corr.values: + trans.cols <- rev(c('#b2182b','#d6604d','#f4a582','#fddbc7','#d1e5f0','#92c5de','#4393c3','#2166ac')) + my.seq <- c(-20,-10,-5,-2,0,2,5,10,20) + + array.trans.diff1.colors <- array(trans.cols[8],c(12,7,4)) + array.trans.diff1.colors[array.trans.diff1 < my.seq[8]] <- trans.cols[7] + array.trans.diff1.colors[array.trans.diff1 < my.seq[7]] <- trans.cols[6] + array.trans.diff1.colors[array.trans.diff1 < my.seq[6]] <- trans.cols[5] + array.trans.diff1.colors[array.trans.diff1 < my.seq[5]] <- trans.cols[4] + array.trans.diff1.colors[array.trans.diff1 < my.seq[4]] <- trans.cols[3] + array.trans.diff1.colors[array.trans.diff1 < my.seq[3]] <- trans.cols[2] + array.trans.diff1.colors[array.trans.diff1 < my.seq[2]] <- trans.cols[1] + array.trans.diff1.colors[is.na(array.trans.diff1)] <- "white" + + par(mar=c(5,4,4,4.5)) + plot(1,1, type="n", xaxt="n", yaxt="n", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + title("% Bias transition from NAO+ regime", cex=1.5) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + + for(p in 1:12){ + for(l in 0:6){ + polygon(c(0.5+p-1, 0.5+p-1, 1+p-1), c(-0.5+l, 0.5+l, 0+l), col=array.trans.diff1.colors[p,1+l,1]) # first triangle + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(-0.5+l, 0+l, -0.5+l), col=array.trans.diff1.colors[p,1+l,2]) + polygon(c(1+p-1, 1.5+p-1, 1.5+p-1), c(l, 0.5+l, -0.5+l), col=array.trans.diff1.colors[p,1+l,3]) + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(0.5+l, 0+l, 0.5+l), col=array.trans.diff1.colors[p,1+l,4]) + } + } + + par(fig=c(0.875, 1, 0.14, 0.89), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_bias_transition_prob_NAO+_startdate.png"), width=750, height=600) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("% Bias transition from NAO+ regime", cex=1.5) + + # NAO+ : + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.85, 0.98), new=TRUE) + mtext("NAO+", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.diff1.colors[p,1+l,1])}} + + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.42, 0.5), new=TRUE) + mtext("Blocking", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.diff1.colors[p,1+l,4])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.85, 0.98), new=TRUE) + mtext("NAO-", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.diff1.colors[p,1+l,3])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.42, 0.5), new=TRUE) + mtext("Atlantic Ridge", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.diff1.colors[p,1+l,2])}} + + par(fig=c(0.91, 1, 0.1, 0.9), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_bias_transition_prob_NAO+_target_month.png"), width=750, height=350) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 0.95), new=TRUE) + mtext("% Bias transition from NAO+ regime", cex=1.2) + + par(mar=c(0,0,4,0), fig=c(0.07, 0.28, 0.8, 0.95), new=TRUE) + mtext("NAO-", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0, 0.28, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.8, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.diff1.colors[i2,1+6-j,3])}} + + par(mar=c(0,0,4,0), fig=c(0.30, 0.58, 0.8, 0.95), new=TRUE) + mtext("Blocking", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.30, 0.58, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.8, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.diff1.colors[i2,1+6-j,4])}} + + par(mar=c(0,0,4,0), fig=c(0.60, 0.88, 0.8, 0.95), new=TRUE) + mtext("Atlantic Ridge", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.60, 0.88, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.8, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.diff1.colors[i2,1+6-j,2])}} + + par(fig=c(0.91, 1, 0.1, 0.8), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + par(fig=c(0.91, 1, 0.88, 0.93), new=TRUE) + mtext(side = 1, text = "%", line = 2, cex=1) + + dev.off() + + + + + + + + + + + + + + + + # Bias of the Transition probability from NAO- to X summary: + png(filename=paste0(work.dir,"/",forecast.name,"_summary_bias_transition_prob_NAO-.png"), width=550, height=400) + + # create an array similar to array.cor but with colors instead of corr.values: + trans.cols <- rev(c('#b2182b','#d6604d','#f4a582','#fddbc7','#d1e5f0','#92c5de','#4393c3','#2166ac')) + my.seq <- c(-20,-10,-5,-2,0,2,5,10,20) + + array.trans.diff2.colors <- array(trans.cols[8],c(12,7,4)) + array.trans.diff2.colors[array.trans.diff2 < my.seq[8]] <- trans.cols[7] + array.trans.diff2.colors[array.trans.diff2 < my.seq[7]] <- trans.cols[6] + array.trans.diff2.colors[array.trans.diff2 < my.seq[6]] <- trans.cols[5] + array.trans.diff2.colors[array.trans.diff2 < my.seq[5]] <- trans.cols[4] + array.trans.diff2.colors[array.trans.diff2 < my.seq[4]] <- trans.cols[3] + array.trans.diff2.colors[array.trans.diff2 < my.seq[3]] <- trans.cols[2] + array.trans.diff2.colors[array.trans.diff2 < my.seq[2]] <- trans.cols[1] + array.trans.diff2.colors[is.na(array.trans.diff2)] <- "white" + + par(mar=c(5,4,4,4.5)) + plot(1,1, type="n", xaxt="n", yaxt="n", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + title("% Bias transition from NAO- regime", cex=1.5) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + + for(p in 1:12){ + for(l in 0:6){ + polygon(c(0.5+p-1, 0.5+p-1, 1+p-1), c(-0.5+l, 0.5+l, 0+l), col=array.trans.diff2.colors[p,1+l,1]) # first triangle + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(-0.5+l, 0+l, -0.5+l), col=array.trans.diff2.colors[p,1+l,2]) + polygon(c(1+p-1, 1.5+p-1, 1.5+p-1), c(l, 0.5+l, -0.5+l), col=array.trans.diff2.colors[p,1+l,3]) + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(0.5+l, 0+l, 0.5+l), col=array.trans.diff2.colors[p,1+l,4]) + } + } + + par(fig=c(0.875, 1, 0.14, 0.89), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_bias_transition_prob_NAO-_startdate.png"), width=750, height=600) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 0.95), new=TRUE) + mtext("% Bias transition from NAO- regime", cex=1.5) + + # NAO+ : + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.85, 0.98), new=TRUE) + mtext("NAO+", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.diff2.colors[p,1+l,1])}} + + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.42, 0.5), new=TRUE) + mtext("Blocking", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.diff2.colors[p,1+l,4])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.85, 0.98), new=TRUE) + mtext("NAO-", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.diff2.colors[p,1+l,3])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.42, 0.5), new=TRUE) + mtext("Atlantic Ridge", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.diff2.colors[p,1+l,2])}} + + par(fig=c(0.91, 1, 0.1, 0.9), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_bias_transition_prob_NAO-_target_month.png"), width=750, height=350) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 0.95), new=TRUE) + mtext("% Bias transition from NAO- regime", cex=1.2) + + par(mar=c(0,0,4,0), fig=c(0.07, 0.28, 0.8, 0.95), new=TRUE) + mtext("NAO+", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0, 0.28, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.8, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.diff2.colors[i2,1+6-j,1])}} + + par(mar=c(0,0,4,0), fig=c(0.30, 0.58, 0.8, 0.95), new=TRUE) + mtext("Blocking", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.30, 0.58, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.8, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.diff2.colors[i2,1+6-j,4])}} + + par(mar=c(0,0,4,0), fig=c(0.60, 0.88, 0.8, 0.95), new=TRUE) + mtext("Atlantic Ridge", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.60, 0.88, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.8, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.diff2.colors[i2,1+6-j,2])}} + + par(fig=c(0.91, 1, 0.1, 0.8), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + par(fig=c(0.91, 1, 0.88, 0.93), new=TRUE) + mtext(side = 1, text = "%", line = 2, cex=1) + + dev.off() + + + + + + + + + + + + # Bias of the Transition probability from blocking to X summary: + png(filename=paste0(work.dir,"/",forecast.name,"_summary_bias_transition_prob_blocking.png"), width=550, height=400) + + # create an array similar to array.cor but with colors instead of corr.values: + trans.cols <- rev(c('#b2182b','#d6604d','#f4a582','#fddbc7','#d1e5f0','#92c5de','#4393c3','#2166ac')) + my.seq <- c(-20,-10,-5,-2,0,2,5,10,20) + + array.trans.diff3.colors <- array(trans.cols[8],c(12,7,4)) + array.trans.diff3.colors[array.trans.diff3 < my.seq[8]] <- trans.cols[7] + array.trans.diff3.colors[array.trans.diff3 < my.seq[7]] <- trans.cols[6] + array.trans.diff3.colors[array.trans.diff3 < my.seq[6]] <- trans.cols[5] + array.trans.diff3.colors[array.trans.diff3 < my.seq[5]] <- trans.cols[4] + array.trans.diff3.colors[array.trans.diff3 < my.seq[4]] <- trans.cols[3] + array.trans.diff3.colors[array.trans.diff3 < my.seq[3]] <- trans.cols[2] + array.trans.diff3.colors[array.trans.diff3 < my.seq[2]] <- trans.cols[1] + array.trans.diff3.colors[is.na(array.trans.diff3)] <- "white" + + par(mar=c(5,4,4,4.5)) + plot(1,1, type="n", xaxt="n", yaxt="n", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + title("% Bias transition from blocking regime", cex=1.5) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + + for(p in 1:12){ + for(l in 0:6){ + polygon(c(0.5+p-1, 0.5+p-1, 1+p-1), c(-0.5+l, 0.5+l, 0+l), col=array.trans.diff3.colors[p,1+l,1]) # first triangle + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(-0.5+l, 0+l, -0.5+l), col=array.trans.diff3.colors[p,1+l,2]) + polygon(c(1+p-1, 1.5+p-1, 1.5+p-1), c(l, 0.5+l, -0.5+l), col=array.trans.diff3.colors[p,1+l,3]) + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(0.5+l, 0+l, 0.5+l), col=array.trans.diff3.colors[p,1+l,4]) + } + } + + par(fig=c(0.875, 1, 0.14, 0.89), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_bias_transition_prob_blocking_startdate.png"), width=750, height=600) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("% Bias transition from blocking regime", cex=1.5) + + # NAO+ : + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.85, 0.98), new=TRUE) + mtext("NAO+", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.diff3.colors[p,1+l,1])}} + + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.42, 0.5), new=TRUE) + mtext("Blocking", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.diff3.colors[p,1+l,4])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.85, 0.98), new=TRUE) + mtext("NAO-", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.diff3.colors[p,1+l,3])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.42, 0.5), new=TRUE) + mtext("Atlantic Ridge", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.diff3.colors[p,1+l,2])}} + + par(fig=c(0.91, 1, 0.1, 0.9), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_bias_transition_prob_blocking_target_month.png"), width=750, height=350) + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 0.95), new=TRUE) + mtext("% Bias transition from Blocking regime", cex=1.2) + + par(mar=c(0,0,4,0), fig=c(0.07, 0.28, 0.8, 0.95), new=TRUE) + mtext("NAO+", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0, 0.28, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.8, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.diff3.colors[i2,1+6-j,1])}} + + par(mar=c(0,0,4,0), fig=c(0.30, 0.58, 0.8, 0.95), new=TRUE) + mtext("NAO-", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.30, 0.58, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.8, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.diff3.colors[i2,1+6-j,3])}} + + par(mar=c(0,0,4,0), fig=c(0.60, 0.88, 0.8, 0.95), new=TRUE) + mtext("Atlantic Ridge", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.60, 0.88, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.8, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.diff3.colors[i2,1+6-j,2])}} + + par(fig=c(0.91, 1, 0.1, 0.8), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + par(fig=c(0.91, 1, 0.88, 0.93), new=TRUE) + mtext(side = 1, text = "%", line = 2, cex=1) + + dev.off() + + + + + + + + + + + # Bias of the Transition probability from Atlantic Ridge to X summary: + png(filename=paste0(work.dir,"/",forecast.name,"_summary_bias_transition_prob_Atlantic_ridge.png"), width=550, height=400) + + # create an array similar to array.cor but with colors instead of corr.values: + trans.cols <- rev(c('#b2182b','#d6604d','#f4a582','#fddbc7','#d1e5f0','#92c5de','#4393c3','#2166ac')) + my.seq <- c(-20,-10,-5,-2,0,2,5,10,20) + + array.trans.diff4.colors <- array(trans.cols[8],c(12,7,4)) + array.trans.diff4.colors[array.trans.diff4 < my.seq[8]] <- trans.cols[7] + array.trans.diff4.colors[array.trans.diff4 < my.seq[7]] <- trans.cols[6] + array.trans.diff4.colors[array.trans.diff4 < my.seq[6]] <- trans.cols[5] + array.trans.diff4.colors[array.trans.diff4 < my.seq[5]] <- trans.cols[4] + array.trans.diff4.colors[array.trans.diff4 < my.seq[4]] <- trans.cols[3] + array.trans.diff4.colors[array.trans.diff4 < my.seq[3]] <- trans.cols[2] + array.trans.diff4.colors[array.trans.diff4 < my.seq[2]] <- trans.cols[1] + array.trans.diff4.colors[is.na(array.trans.diff4)] <- "white" + + par(mar=c(5,4,4,4.5)) + plot(1,1, type="n", xaxt="n", yaxt="n", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + title("% Bias transition from Atlantic Ridge regime", cex=1.5) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + + for(p in 1:12){ + for(l in 0:6){ + polygon(c(0.5+p-1, 0.5+p-1, 1+p-1), c(-0.5+l, 0.5+l, 0+l), col=array.trans.diff4.colors[p,1+l,1]) # first triangle + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(-0.5+l, 0+l, -0.5+l), col=array.trans.diff4.colors[p,1+l,2]) + polygon(c(1+p-1, 1.5+p-1, 1.5+p-1), c(l, 0.5+l, -0.5+l), col=array.trans.diff4.colors[p,1+l,3]) + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(0.5+l, 0+l, 0.5+l), col=array.trans.diff4.colors[p,1+l,4]) + } + } + + par(fig=c(0.875, 1, 0.14, 0.89), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_bias_transition_prob_Atlantic_ridge_startdate.png"), width=750, height=600) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("% Bias transition from Atlantic_Ridge_regime", cex=1.5) + + # NAO+ : + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.85, 0.98), new=TRUE) + mtext("NAO+", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.diff4.colors[p,1+l,1])}} + + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.42, 0.5), new=TRUE) + mtext("Blocking", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.diff4.colors[p,1+l,4])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.85, 0.98), new=TRUE) + mtext("NAO-", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.diff4.colors[p,1+l,3])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.42, 0.5), new=TRUE) + mtext("Atlantic Ridge", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.diff4.colors[p,1+l,2])}} + + par(fig=c(0.91, 1, 0.1, 0.9), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_bias_transition_prob_Atlantic_ridge_target_month.png"), width=750, height=350) + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 0.95), new=TRUE) + mtext("% Bias transition from Atlantic Ridge regime", cex=1.2) + + par(mar=c(0,0,4,0), fig=c(0.07, 0.28, 0.8, 0.95), new=TRUE) + mtext("NAO+", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0, 0.28, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.8, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.diff4.colors[i2,1+6-j,1])}} + + par(mar=c(0,0,4,0), fig=c(0.30, 0.58, 0.8, 0.95), new=TRUE) + mtext("NAO-", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.30, 0.58, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.8, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.diff4.colors[i2,1+6-j,3])}} + + par(mar=c(0,0,4,0), fig=c(0.60, 0.88, 0.8, 0.95), new=TRUE) + mtext("Blocking", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.60, 0.88, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.8, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.diff4.colors[i2,1+6-j,4])}} + + par(fig=c(0.91, 1, 0.1, 0.8), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + par(fig=c(0.91, 1, 0.88, 0.93), new=TRUE) + mtext(side = 1, text = "%", line = 2, cex=1) + + dev.off() + + ## # FairRPSS summary: + ## png(filename=paste0(work.dir,"/",forecast.name,"_summary_rpss.png"), width=550, height=400) + + ## # create an array similar to array.diff.freq but with colors instead of frequencies: + ## freq.cols <- rev(c('#b2182b','#d6604d','#f4a582','#fddbc7','#d1e5f0','#92c5de','#4393c3','#2166ac')) + + ## array.freq.colors <- array(freq.cols[8],c(12,7,4)) + ## array.freq.colors[array.diff.freq < 10] <- freq.cols[7] + ## array.freq.colors[array.diff.freq < 5] <- freq.cols[6] + ## array.freq.colors[array.diff.freq < 2] <- freq.cols[5] + ## array.freq.colors[array.diff.freq < 0] <- freq.cols[4] + ## array.freq.colors[array.diff.freq < -2] <- freq.cols[3] + ## array.freq.colors[array.diff.freq < -5] <- freq.cols[2] + ## array.freq.colors[array.diff.freq < -10] <- freq.cols[1] + + ## par(mar=c(5,4,4,4.5)) + ## plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Forecast time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + ## title("Frequency difference (%) between S4 and ERA-Interim", cex=1.5) + ## mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + ## mtext(side = 2, text = "Forecast time", line = 2.5, cex=1.5) + ## axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + ## axis(2, at=seq(0,6), las=2, cex.axis=1.5) + + ## for(p in 1:12){ + ## for(l in 0:6){ + ## polygon(c(0.5+p-1,0.5+p-1,1+p-1),c(-0.5+l,0.5+l,0+l), col=array.freq.colors[p,1+l,1]) # first triangle + ## polygon(c(0.5+p-1,1+p-1,1.5+p-1),c(-0.5+l,0+l,-0.5+l), col=array.freq.colors[p,1+l,2]) + ## polygon(c(1+p-1,1.5+p-1,1.5+p-1),c(l,0.5+l,-0.5+l), col=array.freq.colors[p,1+l,3]) + ## polygon(c(0.5+p-1,1+p-1,1.5+p-1),c(0.5+l,0+l,0.5+l), col=array.freq.colors[p,1+l,4]) + ## } + ## } + + ## par(fig=c(0.875, 1, 0.14, 0.89), new=TRUE) + ## ColorBar2(brks = c(-20,-10,-5,-2,0,2,5,10,20), cols = freq.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(c(-20,-10,-5,-2,0,2,5,10,20)), my.labels=c(-20,-10,-5,-2,0,2,5,10,20)) + ## dev.off() + +} # close if on composition == "summary" + + + + +# impact map of the stronger WR: +if(composition == "impact.highest" && fields.name == rean.name){ + + var.num <- 2 # choose a variable (1:sfcWind, 2:tas) + index <- 1 # chose an index: 1: most influent, 2: most influent positively, 3: most influent negatively + + if ( index == 1 ) png(filename=paste0(rean.dir,"/",rean.name,"_",var.name.full[var.num],"_most_influent.png"), width=550, height=650) + if ( index == 2 ) png(filename=paste0(rean.dir,"/",rean.name,"_",var.name.full[var.num],"_most_influent_positively.png"), width=550, height=650) + if ( index == 3 ) png(filename=paste0(rean.dir,"/",rean.name,"_",var.name.full[var.num],"_most_influent_negatively.png"), width=550, height=650) + + plot.new() + #par(mfrow=c(4,3)) + #col.regimes <- c('#7b3294','#c2a5cf','#a6dba0','#008837') + col.regimes <- c('#e66101','#fdb863','#b2abd2','#5e3c99') + + for(p in 13:16){ # or 1:12 for monthly maps + load(file=paste0(rean.dir,"/",rean.name,"_",my.period[p],"_",var.name[var.num],".RData")) + load(paste0(rean.dir,"/",rean.name,"_",my.period[p],"_ClusterNames.RData")) # they should not depend on var.name!!! + + # position of long values of Europe only (without the Atlantic Sea and America): + #if(fields.name == "ERA-Interim") EU <- c(1:which(lon >= lon.max)[1], (length(lon)-30):length(lon)) # restrict area to continental europe only + lon.max = 45 + EU <- c(1:which(lon >= lon.max)[1], (which(lon >= 337)[1]:length(lon))) # restrict area to continental europe only + + cluster1 <- which(orden == cluster1.name.period[p]) # in future it will be == cluster1.name + cluster2 <- which(orden == cluster2.name.period[p]) + cluster3 <- which(orden == cluster3.name.period[p]) + cluster4 <- which(orden == cluster4.name.period[p]) + + assign(paste0("imp",cluster1), varPeriodAnom1mean) + assign(paste0("imp",cluster2), varPeriodAnom2mean) + assign(paste0("imp",cluster3), varPeriodAnom3mean) + assign(paste0("imp",cluster4), varPeriodAnom4mean) + + #most influent WT: + if (index == 1) { + imp.all <- imp1 + for(i in 1:dim(imp1)[1]){ + for(j in 1:dim(imp1)[2]){ + if(abs(imp1[i,j]) >= max(abs(imp1[i,j]), abs(imp2[i,j]), abs(imp3[i,j]), abs(imp4[i,j]))) imp.all[i,j] <- 1.5 + if(abs(imp2[i,j]) >= max(abs(imp1[i,j]), abs(imp2[i,j]), abs(imp3[i,j]), abs(imp4[i,j]))) imp.all[i,j] <- 2.5 + if(abs(imp3[i,j]) >= max(abs(imp1[i,j]), abs(imp2[i,j]), abs(imp3[i,j]), abs(imp4[i,j]))) imp.all[i,j] <- 3.5 + if(abs(imp4[i,j]) >= max(abs(imp1[i,j]), abs(imp2[i,j]), abs(imp3[i,j]), abs(imp4[i,j]))) imp.all[i,j] <- 4.5 + } + } + } + + #most influent WT with positive impact: + if (index == 2) { + imp.all <- imp1 + for(i in 1:dim(imp1)[1]){ + for(j in 1:dim(imp1)[2]){ + if((imp1[i,j]) >= max((imp1[i,j]), (imp2[i,j]), (imp3[i,j]), (imp4[i,j]))) imp.all[i,j] <- 1.5 + if((imp2[i,j]) >= max((imp1[i,j]), (imp2[i,j]), (imp3[i,j]), (imp4[i,j]))) imp.all[i,j] <- 2.5 + if((imp3[i,j]) >= max((imp1[i,j]), (imp2[i,j]), (imp3[i,j]), (imp4[i,j]))) imp.all[i,j] <- 3.5 + if((imp4[i,j]) >= max((imp1[i,j]), (imp2[i,j]), (imp3[i,j]), (imp4[i,j]))) imp.all[i,j] <- 4.5 + } + } + + } + + # most influent WT with negative impact: + if (index == 3) { + imp.all <- imp1 + for(i in 1:dim(imp1)[1]){ + for(j in 1:dim(imp1)[2]){ + if((imp1[i,j]) <= min((imp1[i,j]), (imp2[i,j]), (imp3[i,j]), (imp4[i,j]))) imp.all[i,j] <- 1.5 + if((imp2[i,j]) <= min((imp1[i,j]), (imp2[i,j]), (imp3[i,j]), (imp4[i,j]))) imp.all[i,j] <- 2.5 + if((imp3[i,j]) <= min((imp1[i,j]), (imp2[i,j]), (imp3[i,j]), (imp4[i,j]))) imp.all[i,j] <- 3.5 + if((imp4[i,j]) <= min((imp1[i,j]), (imp2[i,j]), (imp3[i,j]), (imp4[i,j]))) imp.all[i,j] <- 4.5 + } + } + } + + if(p<=3) yMod <- 0.7 + if(p>=4 && p<=6) yMod <- 0.5 + if(p>=7 && p<=9) yMod <- 0.3 + if(p>=10) yMod <- 0.1 + if(p==13 || p==14) yMod <- 0.52 + if(p==15 || p==16) yMod <- 0.1 + + if(p==1 || p==4 || p==7 || p==10) xMod <- 0.05 + if(p==2 || p==5 || p==8 || p==11) xMod <- 0.35 + if(p==3 || p==6 || p==9 || p==12) xMod <- 0.65 + if(p==13 || p==15) xMod <- 0.05 + if(p==14 || p==16) xMod <- 0.50 + + # impact map: + if(p <= 12) par(fig=c(xMod, xMod + 0.3, yMod, yMod + 0.17), new=TRUE) + if(p > 12) par(fig=c(xMod, xMod + 0.45, yMod, yMod + 0.38), new=TRUE) + PlotEquiMap(imp.all[,EU], lon[EU], lat, filled.continents = FALSE, brks=1:5, cols=col.regimes, axelab=F, drawleg=F) + + # title map: + if(p <= 12) par(fig=c(xMod, xMod + 0.3, yMod + 0.162, yMod + 0.172), new=TRUE) + if(p > 12) par(fig=c(xMod + 0.07, xMod + 0.07 + 0.3, yMod +0.21 + 0.166, yMod +0.21 + 0.176), new=TRUE) + mtext(my.period[p], font=2, cex=1.5) + + } # close 'p' on 'period' + + par(fig=c(0.1,0.9, 0.03, 0.11), new=TRUE) + ColorBar2(1:5, cols=col.regimes, vert=FALSE, my.ticks=1:4, draw.ticks=FALSE, my.labels=orden) + + par(fig=c(0.1,0.9, 0.92, 0.97), new=TRUE) + if( index == 1 ) mtext(paste0("Most influent WR on ",var.name[var.num]), font=2, cex=1.5) + if( index == 2 ) mtext(paste0("Most positively influent WR on ",var.name[var.num]), font=2, cex=1.5) + if( index == 3 ) mtext(paste0("Most negatively influent WR on ",var.name[var.num]), font=2, cex=1.5) + + dev.off() + +} # close if on composition == "impact.highest" + + + + + + + + + + + + + +if(monthly_anomalies) { + # Compute climatology and anomalies (with LOESS filter) for sfcWind data, and draw monthly anomalies, if you want to compare the mean daily wind speed anomalies reconstructed by the weather regimes classification with those observed by the reanalysis: + + load(paste0(rean.dir,"/",rean.name,"_",my.period[month.chosen[1]],"_psl.RData")) # only to load year.start and year.end + + sfcWind366 <- Load(var = "sfcWind", exp = NULL, obs = list(list(path=rean.data)), paste0(year.start:year.end,'0101'), storefreq = 'daily', leadtimemax = 366, output = 'lonlat', latmin = lat.min, latmax = lat.max, lonmin = lon.min, lonmax = lon.max, nprocs=8)$obs + + sfcWind <- sfcWind366[,,,1:365,,] + + for(y in year.start:year.end){ + y2 <- y - year.start + 1 + if(n.days.in.a.year(y) == 366) sfcWind[y2,60:365,,] <- sfcWind366[1,1,y2,61:366,,] # take the march to december period removing the 29th of February + } + + rm(sfcWind366) + gc() + + sfcWindClimDaily <- apply(sfcWind, c(2,3,4), mean, na.rm=T) + + sfcWindClimLoess <- sfcWindClimDaily + for(i in n.pos.lat){ + for(j in n.pos.lon){ + my.data <- data.frame(ens.mean=sfcWindClimDaily[,i,j], day=1:365) + my.loess <- loess(ens.mean ~ day, my.data, span=0.35) + sfcWindClimLoess[,i,j] <- predict(my.loess) + } + } + + rm(sfcWindClimDaily) + gc() + + sfcWindClim2 <- InsertDim(sfcWindClimLoess, 1, n.years) + + sfcWindAnom <- sfcWind - sfcWindClim2 + + rm(sfcWindClimLoess) + gc() + + # same as above but for computing climatology and anomalies (with LOESS filter) for psl data, and draw monthly slp anomalies: + slp366 <- Load(var = psl, exp = NULL, obs = list(list(path=rean.data)), paste0(year.start:year.end,'0101'), storefreq = 'daily', leadtimemax = 366, output = 'lonlat', latmin = lat.min, latmax = lat.max, lonmin = lon.min, lonmax = lon.max, nprocs=8)$obs + + slp <- slp366[,,,1:365,,] + + for(y in year.start:year.end){ + y2 <- y - year.start + 1 + if(n.days.in.a.year(y) == 366) slp[y2,60:365,,] <- slp366[1,1,y2,61:366,,] # take the march to december period removing the 29th of February + } + + rm(slp366) + gc() + + slpClimDaily <- apply(slp, c(2,3,4), mean, na.rm=T) + + slpClimLoess <- slpClimDaily + for(i in n.pos.lat){ + for(j in n.pos.lon){ + my.data <- data.frame(ens.mean=slpClimDaily[,i,j], day=1:365) + my.loess <- loess(ens.mean ~ day, my.data, span=0.35) + slpClimLoess[,i,j] <- predict(my.loess) + } + } + + rm(slpClimDaily) + gc() + + slpClim2 <- InsertDim(slpClimLoess, 1, n.years) + + slpAnom <- slp - slpClim2 + + rm(slpClimLoess) + gc() + + if(psl == "psl") slpAnom <- slpAnom/100 # convert MSLP in Pascal to MSLP in hPa + + EU <- c(1:which(lon >= lon.max)[1], (which(lon >= 337)[1]:length(lon))) # restrict area to continental europe only + + my.brks.var <- c(-20,seq(-3,3,0.5),20) + my.cols.var <- colorRampPalette(rev(brewer.pal(11,"RdBu")))(length(my.brks.var)-1) # blue--white--red colors + + # same but for slp: + my.brks.var2 <- c(-100,seq(-21,-1,2),0,seq(1,21,2),100) + my.cols.var2 <- colorRampPalette(rev(brewer.pal(11,"RdBu")))(length(my.brks.var2)-1) # blue--red colors + my.cols.var2[floor(length(my.cols.var2)/2)] <- my.cols.var2[floor(length(my.cols.var2)/2)+1] <- "white" + + # save all monthly anomaly maps: + for(year.test in year.chosen){ + for(month.test in month.chosen){ + #year.test <- 2016; month.test <- 10 # for the debug + + pos.year.test <- year.test - year.start + 1 + + # wind anomaly for the chosen year and month: + sfcWindAnomPeriod <- sfcWindAnom[pos.year.test, pos.period(1,month.test),,] + + sfcWindAnomPeriodMean <- apply(sfcWindAnomPeriod, c(2,3), mean, na.rm=TRUE) + + # psl anomaly for the chosen year and month: + slpAnomPeriod <- slpAnom[pos.year.test,pos.period(1,month.test),,] + + slpAnomPeriodMean <- apply(slpAnomPeriod, c(2,3), mean, na.rm=TRUE) + + fileoutput <- paste0(rean.dir,"/",rean.name,"_",my.period[month.test],"_monthly_anomaly.png") + + png(filename=fileoutput,width=2800,height=1000) + + par(fig=c(0, 0.36, 0.08, 0.98), new=TRUE) + #PlotEquiMap(rescale(sfcWindAnomPeriodMean[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents=FALSE, brks=my.brks.var, cols=my.cols.var[2:(length(my.cols.var)-1)], title_scale=1.2, intxlon=10, intylat=10, drawleg=F) #, cex.lab=1.5) + + PlotEquiMap(sfcWindAnomPeriodMean[,EU], lon[EU], lat, filled.continents=FALSE, brks=my.brks.var, cols=my.cols.var, title_scale=1.2, intxlon=10, intylat=10, drawleg=F) #, cex.lab=1.5) + + #my.subset2 <- match(values.to.plot2, my.brks.var) + #legend2.cex <- 1.8 + + par(fig=c(0.03, 0.36, 0.015, 0.09), new=TRUE) + #ColorBar(my.brks.var, cols=my.cols.var, vert=FALSE, label_scale=1.8, triangle_ends=c(FALSE,FALSE)) #, subset=my.subset2) + #ColorBar(my.brks.var, cols=my.cols.var[2:(length(my.cols.var)-1)], vert=FALSE, label_scale=1.8, var_limits=c(-10,10), bar_limits=c(my.brks.var[1],my.brks.var[l(my.brks.var)]), col_inf=my.cols.var[1], col_sup=my.cols.var[length(my.cols.var)]) + + ColorBar(brks=my.brks.var[2:(l(my.brks.var)-1)], cols=my.cols.var[2:(l(my.cols.var)-1)], vert=FALSE, label_scale=1.8, bar_limits=c(my.brks.var[2],my.brks.var[l(my.brks.var)-1]), col_inf=my.cols.var[1], col_sup=my.cols.var[l(my.cols.var)], subsample=1) + + par(fig=c(0.34, 0.37, 0, 0.028), new=TRUE) + mtext("m/s", cex=1.8) + + par(fig=c(0.37, 1, 0.1, 0.98), new=TRUE) + + PlotEquiMap2(slpAnomPeriodMean, lon, lat, filled.continents=FALSE, continents.col="black", brks=my.brks.var2, brks2=my.brks.var2, cols=my.cols.var2, intxlon=10, intylat=10, drawleg=F, contours=t(slpAnomPeriodMean), contours.lty="F1FF1F", cex.lab=1) + + # you could almost use the normal PlotEquiMap funcion below, it only needs to set the black line for anomaly=0 and decrease the size of the contour labels: + # PlotEquiMap(rescale(slpAnomPeriodMean[,EU], my.brks.var2[1], tail(my.brks.var2,1)), lon[EU], lat, filled.continents=FALSE, brks=my.brks.var2, brks2=my.brks.var2, cols=my.cols.var2, intxlon=10, intylat=10, drawleg=F, contours=(slpAnomPeriodMean[,EU]), contour_lty="F1FF1F", contour_label_scale=10, title_scale=1.2) #, cex.lab=1.5) + + ##my.subset2 <- match(values.to.plot2, my.brks.var) + #legend2.cex <- 1.8 + + par(fig=c(0.37, 1, 0, 0.09), new=TRUE) + #ColorBar2(my.brks.var2, cols=my.cols.var2, vert=FALSE, my.ticks=-0.5 + 1:length(my.brks.var2), my.labels=my.brks.var2) #, subsampleg=1, label_scale=1.8) #, subset=my.subset2) + ColorBar(my.brks.var2[2:(l(my.brks.var2)-1)], cols=my.cols.var2[2:(l(my.cols.var2)-1)], vert=FALSE, label_scale=1.8, bar_limits=c(my.brks.var2[2],my.brks.var2[l(my.brks.var2)-1]), col_inf=my.cols.var2[1], col_sup=my.cols.var2[l(my.cols.var2)], subsample=1) #my.ticks=-0.5 + 1:length(my.brks.var2), my.labels=my.brks.var2) subset=my.subset2) + + par(fig=c(0.96, 0.99, 0, 0.027), new=TRUE) + mtext("hPa", cex=1.8) + + #par(fig=c(0.74, 0.76, 0, 0.027), new=TRUE) + #mtext("0", cex=1.8) + + dev.off() + + # same image formatted for the catalogue: + fileoutput2 <- paste0(rean.dir,"/",rean.name,"_",my.period[month.test],"_monthly_anomaly_fig2cat.png") + + system(paste0("~/scripts/fig2catalog.sh -s 0.8 -t '",rean.name," / 10m wind speed and sea level pressure / Monthly anomalies \n",month.name[month.test]," / ",year.test,"' -c 'Region: North Atlantic (27°N-81°N, 85.5°W-45°E)\nReference dataset: ",rean.name," reanalysis' ", fileoutput," ", fileoutput2)) + + + # mean wind speed and slp anomaly only during days of the chosen year: + load(paste0(rean.dir,"/",rean.name,"_",my.period[month.test],"_psl.RData")) # load cluster.sequence + + # arrange the regime sequence in an array, to separate months from years: + cluster2D <- array(cluster.sequence, c(n.days.in.a.period(month.test,1),n.years)) + #cluster2D <- array(my.cluster$cluster, c(n.days.in.a.period(p,1),n.years)) # only for NCEP + + cluster.test <- cluster2D[,pos.year.test] + + fre1.days <- length(which(cluster.test == 1)) + fre2.days <- length(which(cluster.test == 2)) + fre3.days <- length(which(cluster.test == 3)) + fre4.days <- length(which(cluster.test == 4)) + + # wind anomaly for the chosen year and month and cluster: + sfcWind1 <- sfcWindAnomPeriod[which(cluster.test == 1),,,drop=FALSE] + sfcWind2 <- sfcWindAnomPeriod[which(cluster.test == 2),,,drop=FALSE] + sfcWind3 <- sfcWindAnomPeriod[which(cluster.test == 3),,,drop=FALSE] + sfcWind4 <- sfcWindAnomPeriod[which(cluster.test == 4),,,drop=FALSE] + + # in case a regime has NO days in that month, we must set it to NA: + if(length(which(cluster.test == 1)) == 0 ) sfcWind1 <- array(NA, c(1,dim(sfcWindAnomPeriod)[2:3])) + if(length(which(cluster.test == 2)) == 0 ) sfcWind2 <- array(NA, c(1,dim(sfcWindAnomPeriod)[2:3])) + if(length(which(cluster.test == 3)) == 0 ) sfcWind3 <- array(NA, c(1,dim(sfcWindAnomPeriod)[2:3])) + if(length(which(cluster.test == 4)) == 0 ) sfcWind4 <- array(NA, c(1,dim(sfcWindAnomPeriod)[2:3])) + + sfcWindMean1 <- apply(sfcWind1, c(2,3), mean, na.rm=FALSE) + sfcWindMean2 <- apply(sfcWind2, c(2,3), mean, na.rm=FALSE) + sfcWindMean3 <- apply(sfcWind3, c(2,3), mean, na.rm=FALSE) + sfcWindMean4 <- apply(sfcWind4, c(2,3), mean, na.rm=FALSE) + + # load regime names: + load(paste0(rean.dir,"/",rean.name,"_", my.period[p],"_","ClusterNames",".RData")) + + ## add strip with daily sequence of WRs: + + mod.name1 <- substr(cluster1.name, nchar(cluster1.name), nchar(cluster1.name)) + mod.name2 <- substr(cluster2.name, nchar(cluster2.name), nchar(cluster2.name)) + mod.name3 <- substr(cluster3.name, nchar(cluster3.name), nchar(cluster3.name)) + mod.name4 <- substr(cluster4.name, nchar(cluster4.name), nchar(cluster4.name)) + + cluster1.name.short <- substr(cluster1.name,1,1) + cluster2.name.short <- substr(cluster2.name,1,1) + cluster3.name.short <- substr(cluster3.name,1,1) + cluster4.name.short <- substr(cluster4.name,1,1) + + ## add + or - at the end of the cluster name, if it is a NAO+ or NAO- regime: + if(mod.name1 == "+" || mod.name1 == "-") cluster1.name.short <- paste0(substr(cluster1.name,1,1), mod.name1) + if(mod.name2 == "+" || mod.name2 == "-") cluster2.name.short <- paste0(substr(cluster2.name,1,1), mod.name2) + if(mod.name3 == "+" || mod.name3 == "-") cluster3.name.short <- paste0(substr(cluster3.name,1,1), mod.name3) + if(mod.name4 == "+" || mod.name4 == "-") cluster4.name.short <- paste0(substr(cluster4.name,1,1), mod.name4) + + c1 <- which(cluster.test == 1) + c2 <- which(cluster.test == 2) + c3 <- which(cluster.test == 3) + c4 <- which(cluster.test == 4) + + cluster.test.letters <- cluster.test + cluster.test.letters[c1] <- cluster1.name.short + cluster.test.letters[c2] <- cluster2.name.short + cluster.test.letters[c3] <- cluster3.name.short + cluster.test.letters[c4] <- cluster4.name.short + + cluster.col <- cluster.test.letters + cluster.col[which(cluster.test.letters == "N+")] <- "Firebrick1" + cluster.col[which(cluster.test.letters == "N-")] <- "Dodgerblue1" + cluster.col[which(cluster.test.letters == "B")] <- "White" + cluster.col[which(cluster.test.letters == "A")] <- "Darkgoldenrod1" + + orden <- c("NAO+","NAO-","Blocking","Atl.Ridge") + cluster1 <- which(orden == cluster1.name) + cluster2 <- which(orden == cluster2.name) + cluster3 <- which(orden == cluster3.name) + cluster4 <- which(orden == cluster4.name) + + assign(paste0("fre.days",cluster1), fre1.days) + assign(paste0("fre.days",cluster2), fre2.days) + assign(paste0("fre.days",cluster3), fre3.days) + assign(paste0("fre.days",cluster4), fre4.days) + + assign(paste0("fre",cluster1), wr1y) + assign(paste0("fre",cluster2), wr2y) + assign(paste0("fre",cluster3), wr3y) + assign(paste0("fre",cluster4), wr4y) + + assign(paste0("imp.test",cluster1), sfcWindMean1) + assign(paste0("imp.test",cluster2), sfcWindMean2) + assign(paste0("imp.test",cluster3), sfcWindMean3) + assign(paste0("imp.test",cluster4), sfcWindMean4) + + # psl anomaly for the chosen year and month and cluster: + slp1 <- slpAnomPeriod[which(cluster.test == 1),,,drop=FALSE] + slp2 <- slpAnomPeriod[which(cluster.test == 2),,,drop=FALSE] + slp3 <- slpAnomPeriod[which(cluster.test == 3),,,drop=FALSE] + slp4 <- slpAnomPeriod[which(cluster.test == 4),,,drop=FALSE] + + # in case a regime has NO days in that month, we must set it to NA: + if(length(which(cluster.test == 1)) == 0 ) slp1 <- array(NA, c(1,dim(slpAnomPeriod)[2:3])) + if(length(which(cluster.test == 2)) == 0 ) slp2 <- array(NA, c(1,dim(slpAnomPeriod)[2:3])) + if(length(which(cluster.test == 3)) == 0 ) slp3 <- array(NA, c(1,dim(slpAnomPeriod)[2:3])) + if(length(which(cluster.test == 4)) == 0 ) slp4 <- array(NA, c(1,dim(slpAnomPeriod)[2:3])) + + slpMean1 <- apply(slp1, c(2,3), mean, na.rm=FALSE) + slpMean2 <- apply(slp2, c(2,3), mean, na.rm=FALSE) + slpMean3 <- apply(slp3, c(2,3), mean, na.rm=FALSE) + slpMean4 <- apply(slp4, c(2,3), mean, na.rm=FALSE) + + assign(paste0("psl.test",cluster1), slpMean1) + assign(paste0("psl.test",cluster2), slpMean2) + assign(paste0("psl.test",cluster3), slpMean3) + assign(paste0("psl.test",cluster4), slpMean4) + + # save strip with the daily regime series for chosen month and year: + fileoutput.seq <- paste0(rean.dir,"/",rean.name,"_",my.period[month.test],"_regimes_sequence.png") + png(filename=fileoutput.seq,width=1500,height=1850) + + plot.new() + + sep <- 0.03 + for(day in 1: n.days.in.a.period(p, 2001)){ + sep.cum <- (day-1)*sep + polygon(c(sep.cum + 0.01, sep.cum + 0.01 + sep, sep.cum + 0.01 + sep, sep.cum + 0.01), c(1.01, 1.01, 1.01+sep, 1.01+sep), border="black", col=cluster.col[day]) + text(sep.cum + 0.01 + sep/2, 0.997 + sep + 0.005, labels=day, cex=1.5) + text(sep.cum + 0.01 + sep/2, 1.013 + 0.005, labels=cluster.test.letters[day], cex=2) + + } + + dev.off() + + + + # save average impact and sea level pressure only for chosne month and year: + fileoutput.test <- paste0(rean.dir,"/",rean.name,"_",my.period[month.test],"_monthly_anomaly_regimes.png") + + png(filename=fileoutput.test,width=1500,height=2000) + + plot.new() + + par(fig=c(0, 0.33, 0.77, 0.97), new=TRUE) + PlotEquiMap2(imp.test1[,EU], lon[EU], lat, filled.continents=FALSE, continents.col="black", brks=my.brks.var, cols=my.cols.var, cex.lab=1.2, intxlon=10, intylat=10, drawleg=F) + par(fig=c(0, 0.33, 0.54, 0.74), new=TRUE) + PlotEquiMap2(imp.test2[,EU], lon[EU], lat, filled.continents=FALSE, continents.col="black", brks=my.brks.var, cols=my.cols.var, cex.lab=1.2, intxlon=10, intylat=10, drawleg=F) + par(fig=c(0, 0.33, 0.31, 0.51), new=TRUE) + PlotEquiMap2(imp.test3[,EU], lon[EU], lat, filled.continents=FALSE, continents.col="black", brks=my.brks.var, cols=my.cols.var, cex.lab=1.2, intxlon=10, intylat=10, drawleg=F) + par(fig=c(0, 0.33, 0.08, 0.28), new=TRUE) + PlotEquiMap2(imp.test4[,EU], lon[EU], lat, filled.continents=FALSE, continents.col="black", brks=my.brks.var, cols=my.cols.var, cex.lab=1.2, intxlon=10, intylat=10, drawleg=F) + + par(fig=c(0,0.33,0.955,0.975), new=TRUE) + mtext(paste0(orden[1], " wind speed anomaly "), font=2, cex=2) + par(fig=c(0,0.33,0.725,0.745), new=TRUE) + mtext(paste0(orden[2], " wind speed anomaly "), font=2, cex=2) + par(fig=c(0,0.33,0.495,0.515), new=TRUE) + mtext(paste0(orden[3], " wind speed anomaly "), font=2, cex=2) + par(fig=c(0,0.33,0.265,0.285), new=TRUE) + mtext(paste0(orden[4], " wind speed anomaly "), font=2, cex=2) + + par(fig=c(0.03, 0.33, 0.015, 0.06), new=TRUE) + #ColorBar(my.brks.var, cols=my.cols.var, vert=FALSE, label_scale=1.8, triangle_ends=c(FALSE,FALSE)) #, subset=my.subset2) + ColorBar(my.brks.var[2:(length(my.brks.var)-1)], cols=my.cols.var[2:(length(my.cols.var)-1)], vert=FALSE, label_scale=1.8, bar_limits=c(my.brks.var[2],my.brks.var[l(my.brks.var)-1]), col_inf=my.cols.var[1], col_sup=my.cols.var[l(my.cols.var)], subsample=1) #triangle_ends=c(T,T)) #, subset=my.subset2) + + par(fig=c(0.33, 0.34, 0.01, 0.044), new=TRUE) + mtext("m/s", cex=1.6) + + # right figures: + par(fig=c(0.34, 0.92, 0.77, 0.97), new=TRUE) + PlotEquiMap2(psl.test1, lon, lat, filled.continents=FALSE, continents.col="black", brks=my.brks.var2, brks2=my.brks.var2, cols=my.cols.var2, intxlon=10, intylat=10, drawleg=F, contours=t(psl.test1), contours.lty="F1FF1F", cex.lab=1, xlabel.dist=.5) + par(fig=c(0.34, 0.92, 0.54, 0.74), new=TRUE) + PlotEquiMap2(psl.test2, lon, lat, filled.continents=FALSE, continents.col="black", brks=my.brks.var2, brks2=my.brks.var2, cols=my.cols.var2, intxlon=10, intylat=10, drawleg=F, contours=t(psl.test2), contours.lty="F1FF1F", cex.lab=1, xlabel.dist=.5) + par(fig=c(0.34, 0.92, 0.31, 0.51), new=TRUE) + PlotEquiMap2(psl.test3, lon, lat, filled.continents=FALSE, continents.col="black", brks=my.brks.var2, brks2=my.brks.var2, cols=my.cols.var2, intxlon=10, intylat=10, drawleg=F, contours=t(psl.test3), contours.lty="F1FF1F", cex.lab=1, xlabel.dist=.5) + par(fig=c(0.34, 0.92, 0.08, 0.28), new=TRUE) + PlotEquiMap2(psl.test4, lon, lat, filled.continents=FALSE, continents.col="black", brks=my.brks.var2, brks2=my.brks.var2, cols=my.cols.var2, intxlon=10, intylat=10, drawleg=F, contours=t(psl.test4), contours.lty="F1FF1F", cex.lab=1, xlabel.dist=.5) + + par(fig=c(0.34,0.92,0.955,0.975), new=TRUE) + mtext(paste0(orden[1], " sea level pressure anomaly "), font=2, cex=2) + par(fig=c(0.34,0.92,0.725,0.745), new=TRUE) + mtext(paste0(orden[2], " sea level pressure anomaly "), font=2, cex=2) + par(fig=c(0.34,0.92,0.495,0.515), new=TRUE) + mtext(paste0(orden[3], " sea level pressure anomaly "), font=2, cex=2) + par(fig=c(0.34,0.92,0.265,0.285), new=TRUE) + mtext(paste0(orden[4], " sea level pressure anomaly "), font=2, cex=2) + + par(fig=c(0.34, 0.93, 0.015, 0.06), new=TRUE) + #ColorBar2(my.brks.var2, cols=my.cols.var2, vert=FALSE, my.ticks=-0.5 + 1:length(my.brks.var2), my.labels=my.brks.var2) #, subsampleg=1, label_scale=1.8) #, subset=my.subset2) + ColorBar(my.brks.var2[2:(length(my.brks.var2)-1)], cols=my.cols.var2[2:(length(my.cols.var2)-1)], vert=FALSE, label_scale=1.8, bar_limits=c(my.brks.var2[2],my.brks.var2[l(my.brks.var2)-1]), col_inf=my.cols.var2[1], col_sup=my.cols.var2[l(my.cols.var2)], subsample=1) + + par(fig=c(0.924, 0.930, 0.01, 0.044), new=TRUE) + mtext("hPa", cex=1.6) + + #par(fig=c(0.627, 0.647, 0, 0.028), new=TRUE) + #mtext("0", cex=1.8) + + n.days <- floor(n.days.in.a.period(month.test,1)) + + par(fig=c(0.93, 0.99, 0.77, 0.87), new=TRUE) + mtext(paste0(fre.days1," days\n(",round(100*fre.days1/n.days,1),"%)"), cex=2.8) + par(fig=c(0.93, 0.99, 0.54, 0.64), new=TRUE) + mtext(paste0(fre.days2," days\n(",round(100*fre.days2/n.days,1),"%)"), cex=2.8) + par(fig=c(0.93, 0.99, 0.31, 0.41), new=TRUE) + mtext(paste0(fre.days3," days\n(",round(100*fre.days3/n.days,1),"%)"), cex=2.8) + par(fig=c(0.93, 0.99, 0.08, 0.18), new=TRUE) + mtext(paste0(fre.days4," days\n(",round(100*fre.days4/n.days,1),"%)"), cex=2.8) + + + dev.off() + + + ## add the strip with the regime sequence over the average impact composition: + fileoutput.temp <- paste0(rean.dir,"/",rean.name,"_",my.period[month.test],"_temp.png") + fileoutput.both <- paste0(rean.dir,"/",rean.name,"_",my.period[month.test],"_temp2.png") + + system(paste0("convert ",fileoutput.seq," -crop +0-1730 +repage ",fileoutput.temp)) # cut the lower part of the strip + system(paste0("montage ",fileoutput.temp," ",fileoutput.test," -tile 1x2 -geometry +0+0 ",fileoutput.both)) + + + ## same image formatted for the catalogue: + fileoutput2.test <- paste0(rean.dir,"/",rean.name,"_",my.period[month.test],"_monthly_anomaly_regimes_fig2cat.png") + + system(paste0("~/scripts/fig2catalog.sh -s 0.8 -m 70 -r 40 -t '",rean.name," / 10m wind speed and sea level pressure / Monthly regime anomalies \n",month.name[month.test]," / ",year.test,"' -c 'Region: North Atlantic (27°N-81°N, 85.5°W-45°E)\nReference dataset: ",rean.name," reanalysis' ", fileoutput.both," ", fileoutput2.test)) + + system(paste0("rm ", fileoutput.temp, " ", fileoutput.both," ", fileoutput.seq," ", fileoutput.test)) + + + } # close for on year.test + } # close for on month.test + + +} # close for on monthly_anomalies + + + + + + + + + + diff --git a/old/weather_regimes_maps_v25.R~ b/old/weather_regimes_maps_v25.R~ new file mode 100644 index 0000000000000000000000000000000000000000..d5ace3f94cbc5ef5831026b79ac47952049580a3 --- /dev/null +++ b/old/weather_regimes_maps_v25.R~ @@ -0,0 +1,5782 @@ + +# Creation: 6/2016 +# Author: Nicola Cortesi +# +# Aim: to visualize the regime anomalies of the weather regimes, their interannual frequencies and their impact on a chosen cliamte variable (i.e: wind speed or temperature) +# +# I/O: input file needed are: +# 1) the output of the weather_regimes.R script, which ends with the suffix "_psl.RData". +# 2) if you need the impact map too, the outputs of the weather_regimes_impact.R script, which end wuth the suffix "_sfcWind.RData" and "_tas.RData" +# +# Output files are the maps, witch the user can generate as .png or as .pdf +# Due to the script's length, it is better to run it from the terminal with: Rscript ~/scripts/weather_regimes_maps.R +# This script doesn't need to be parallelized because it takes only a few seconds to create and save the output maps. +# +# Assumptions: You need to have created before the '_psl.RData' files which are the output of 'weather_regimes'.R, for each period you want to visualize. +# If your regimes derive from a reanalysis, this script must be run twice: +# first, only one month/season at time with: 'period=X', (X=1 .. 12), 'ordering = TRUE', 'save.names = TRUE', 'as.pdf = FALSE' and 'composition = "simple" +# And inserting the regime names 'cluster.name1=...' in the correct order; in this first run, you only save the ordered cartography. +# You already have to know which is the right regime order by taking a look at the output maps (_clusterX.png) of weather_regimes.R +# After that, any time you run this script, remember to set: +# 'ordering = TRUE , 'save.names = FALSE' (and any type of composition you want to plot) not to overwrite the files which stores the right cluster order. +# You can check if the monthly regimes jave been associated correctly setting composition <- "psl.rean" +# +# For example, you can find that the sequence of the images in the file (from top to down) corresponds to: +# Blocking / NAO- / Atl.Ridge / NAO+ regime. Subsequently, you have to change 'ordering' +# from F to T (see below) and set the four variables 'clusterX.name' (X = 1...4) to follow the same order found inside that file. +# Then, you have to run this script only to get the maps in the right order, which by default is, from top to down: +# "NAO+","NAO-","Blocking" and "Atl.Ridge" (you can change it with the variable 'orden'). You also have to set 'save.names' to TRUE, so an .RData file +# is written to store the order of the four regimes, in case you need to visualize these maps again in future or create new ones based on the saved data. +# +# If your regimes derive from a forecast system, you have to compute before the regimes derived from a reanalysis. Then, you can associate the reanalysis +# to the forecast system, to employ it to automatically associate to each simulated cluster one of the +# observed regimes, the one with the highest spatial correlation. Beware that the two maps of regime anomalies must have the same spatial resolution! +# +# Branch: weather_regimes + +rm(list=ls()) +library(s2dverification) # for the function Load() +library(Kendall) # for the MannKendall test +#library("corrplot") + +source('/home/Earth/ncortesi/scripts/Rfunctions.R') # for the calendar functions + +### set the directory where the weather regimes computed with a reanalysis are stored (xxxxx_psl.RData files): + +#rean.dir <- "/scratch/Earth/ncortesi/RESILIENCE/Regimes/18) as 12) but with no lat correction" +#rean.dir <- "/scratch/Earth/ncortesi/RESILIENCE/Regimes/18_as_12_but_with_no_lat_correction" +#rean.dir <- "/scratch/Earth/ncortesi/RESILIENCE/Regimes/41_ERA-Interim_monthly_1981-2015_LOESS_filter" +#rean.dir <- "/scratch/Earth/ncortesi/RESILIENCE/Regimes/43_as_41_but_with_running_cluster_and_lat_correction" +#rean.dir <- "/scratch/Earth/ncortesi/RESILIENCE/Regimes/45_as_43_but_with_Z500" +#rean.dir <- "/scratch/Earth/ncortesi/RESILIENCE/Regimes/44_as_43_but_with_no_lat_correction" +#rean.dir <- "/scratch/Earth/ncortesi/RESILIENCE/Regimes/50_NCEP_monthly_1981-2016_LOESS_filter_lat_corr" +rean.dir <- "/scratch/Earth/ncortesi/RESILIENCE/Regimes/52_JRA55_monthly_1981-2016_LOESS_filter_lat_corr" +#rean.dir <- "/scratch/Earth/ncortesi/RESILIENCE/Regimes/53_JRA55_seasonal_1981-2016_LOESS_filter_lat_corr" + +rean.name <- "JRA-55" #"JRA-55" #"NCEP" #"ERA-Interim" # reanalysis name (if input data comes from a reanalysis) + +forecast.name <- "ECMWF-S4" # forecast system name (if input data comes from a forecast system) + +# Choose between loading reanalysis ('rean.name') or forecasts ('forecast.name'): +fields.name <- rean.name #forecast.name + +ordering <- T # If TRUE, order the clusters following the regimes listed in the 'orden' vector (set it to TRUE only after you manually inserted the names below). +save.names <- T # if TRUE, also save the cluster names (i.e: the association between clusters and weather regimes); if FALSE, the cluster names are loaded from a file +as.pdf <- F # FALSE: save results as one .png file for each season/month, TRUE: save only one .pdf file with all seasons/months + +composition <- "none" # choose which kind of composition you want to plot: + # 'simple' for monthly graphs of regime anomalies / impact / interannual frequencies in three columns + # 'psl' for all the regime anomalies for a fixed forecast month + # 'fre' for all the interannual frequencies for a fixed forecast month + # 'impact' for all the impact maps for a fixed forecast month and the variable specified in var.num + # 'summary' for the summary plots of all forecast months (persistence bias, freq.bias, correlations, etc.) [You need all ClusterNames files] + # 'impact.highest' for all the impact plot of the regime with the highest impact + # 'single.impact' to save the four impact maps in a composition 2x2 + # 'single.psl' to save the individual psl map + # 'single.fre' to save the individual fre map + # 'none' doesn't plot anything, it only saves the ClusterName.Rdata files + # 'taylor' plot the taylor's diagram between the regime anomalies of a monthly reanalaysis and a seasonal one + # 'edpr' as 'simple', but swapping the position of the regime anomalie maps with that of the impact maps + # 'psl.rean' for all the regime anomalies for all months of a reanalysis + # 'psl.rean.unordered': as before, but without ordering clusters + # 'corr.matrix' : as before, but plot the correlation matrix only + +var.num <- 1 # if fields.name ='rean.name' and composition ='simple' or 'edpr', Choose a variable for the impact maps 1: sfcWind 2: tas + +mean.freq <- TRUE # if TRUE, when composition <- "simple" o 'edpr', it also add the mean frequency value over each frequency plots + +# Choose one or more periods to plot from 1 to 17 (1-12: Jan - Dec, 13-16: Winter, Spring, Summer, Autumn, 17: Year). +period <- 1:12 + +# Associates the regimes to the four cluster: +cluster4.name <- "NAO+" +cluster1.name <- "NAO-" +cluster2.name <- "Blocking" +cluster3.name <- "Atl.Ridge" + +# choose an order to give to the cartograpy, from top to bottom: +orden <- c("NAO+","NAO-","Blocking","Atl.Ridge") + +####### if monthly_anomalies <- TRUE, you have to specify these additional parameters: +monthly_anomalies <- FALSE # if TRUE, also save maps with with monthly wind speed and slp anomalies for the chosen years and months set below over Europe +year.chosen <- 2016 +month.chosen <- 1:12 +NCEP <- '/esnas/recon/noaa/ncep-reanalysis/$STORE_FREQ$_mean/$VAR_NAME$_f6h/$VAR_NAME$_$YEAR$$MONTH$.nc' +JRA55 <- '/esnas/recon/jma/jra55/$STORE_FREQ$_mean/$VAR_NAME$_f6h/$VAR_NAME$_$YEAR$$MONTH$.nc' +ERAint <- '/esarchive/old-files/recon_ecmwf_erainterim/$STORE_FREQ$_mean/$VAR_NAME$_f6h/$VAR_NAME$_$YEAR$$MONTH$.nc' +rean.data <- JRA55 # choose one of the above reanalysis + +####### if fields.name='forecast.name', you have to specify these additional variables: + +# working dir with the input files with '_psl.RData' suffix: +work.dir <- "/scratch/Earth/ncortesi/RESILIENCE/Regimes/42_ECMWF_S4_monthly_1981-2015_LOESS_filter" + +lead.months <- 0 #0:6 # in case you selected 'fields.name = forecast.name', specify a leadtime (0 = same month of the start month) to plot + # (and variable 'period' becomes the start month) +forecast.month <- 1 # in case composition = 'psl' or 'fre' or 'impact', choose a target month to forecast, from 1 to 12, to plot its composition + # if you want to plot all compositions from 1 to 12 at once, just enable the line below and add a { at the end of the script + # DO NOT run the loop below when composition="summary" or "taylor", because it will modify the data in the ClusterName.RData files!!! +#for(forecast.month in 1:12){ + +####### Derived variables ############################################################################################################################################### + +if(fields.name != rean.name && fields.name != forecast.name) stop("variable 'fields.name' is not properly set") +regime1.name <- orden[1] +regime2.name <- orden[2] +regime3.name <- orden[3] +regime4.name <- orden[4] + +var.name <- c("sfcWind","tas") +var.name.full <- c("wind speed","temperature") # full name of the 'predictand' variable to put in the title of the graphs +var.unit <- c("m/s", "ºC") # unit of measure of var (for drawing color scales) + +var.num.orig <- var.num # loading _psl files, this value can change! +fields.name.orig <- fields.name +period.orig <- period +work.dir.orig <- work.dir +pdf.suffix <- ifelse(length(period)==1, my.period[period], "all_periods") + +n.map <- 0 +################################################################################################################################################################### + +if(composition != "summary" && composition != "impact.highest" && composition != "taylor"){ + + if(composition == "psl" || composition == "fre" || composition == "impact") { + if(as.pdf && fields.name == forecast.name) pdf(file=paste0(work.dir,"/",fields.name,"_",pdf.suffix,"_forecast_month_",forecast.month,"_",composition,".pdf"),width=110,height=60) + if(!as.pdf && fields.name == forecast.name) png(filename=paste0(work.dir,"/",fields.name,"_forecast_month_",forecast.month,"_",composition,".png"),width=6000,height=2300) + + lead.months <- c(6:0,0) + plot.new() + + # Sheet title: + par(fig=c(0, 1, 0.94, 0.98), new=TRUE) + if(composition == "psl") mtext(paste0(fields.name, " simulated Weather Regimes for ", month.name[forecast.month]), cex=5.5, font=2) + if(composition == "fre") mtext(paste0(fields.name, " simulated interannual frequencies for ", month.name[forecast.month]), cex=5.5, font=2) + if(composition == "impact") mtext(paste0(fields.name, " simulated ",var.name.full[var.num], " impact for ", month.name[forecast.month]), cex=5.5, font=2) + + } # close if on composition == "psl" ... + + if(composition == "psl.rean") { + period <- c(9:12, 1:8) # to start from September instead of January + png(filename=paste0(rean.dir,"/",fields.name,"_reanalysis_all_months.png"),width=6000,height=2000) + plot.new() + } + + if(composition == "psl.rean.unordered") { + ordering=FALSE + period <- c(9:12, 1:8) # to start from September instead of January + png(filename=paste0(rean.dir,"/",fields.name,"_reanalysis_all_months_unordered.png"),width=6000,height=2000) + plot.new() + } + + if(composition == "corr.matrix") { + ordering=FALSE + period <- c(9:12, 1:8) # to start from September instead of January + png(filename=paste0(rean.dir,"/",fields.name,"_reanalysis_all_months_corr_matrix.png"),width=6000,height=2000) + plot.new() + } + + + if(fields.name == rean.name) { lead.month <- 1; lead.months <- 1 } # just to be able to enter the loop below once! + + for(lead.month in lead.months){ + #lead.month=lead.months[1] # for the debug + + if(composition == "simple" || composition == 'edpr'){ + if(as.pdf && fields.name == rean.name) pdf(file=paste0(rean.dir,"/",fields.name,"_",var.name[var.num],"_",pdf.suffix,".pdf"),width=40, height=60) + if(as.pdf && fields.name == forecast.name) pdf(file=paste0(work.dir,"/",fields.name,"_",var.name[var.num],"_",pdf.suffix,"_leadmonth_",lead.month,".pdf"),width=40,height=60) + } + + if(composition == 'psl' || composition == 'fre' || composition == 'impact') { + period <- forecast.month - lead.month + if(period < 1) period <- period + 12 + } + + impact.data <- FALSE + for(p in period){ + #p <- period[1] # for the debug + p.orig <- p + + if(fields.name == rean.name) print(paste("p =",p)) + if(fields.name == forecast.name) print(paste("p =",p,"lead.month =", lead.month)) + + # load regime data (for forecasts, it load only the data corresponding to the chosen start month p and lead month 'lead.month') + if(fields.name == rean.name || (fields.name == forecast.name && n.map == 7)) { + load(file=paste0(rean.dir,"/",rean.name,"_",my.period[p],"_","psl",".RData")) + if(var.num != var.num.orig) var.num <- var.num.orig # ripristinate original values of this script (necessary after loading regime data) + if(fields.name != fields.name.orig) fields.name <- fields.name.orig # ripristinate original values of this script + if(work.dir != work.dir.orig) work.dir <- work.dir.orig # ripristinate original values of this script + + # load impact data only if it is available, if not it will leave an empty space in the composition: + if(file.exists(paste0(rean.dir,"/",rean.name,"_",my.period[p],"_",var.name[var.num],".RData"))){ + impact.data <- TRUE + print("Impact data available for reanalysis") + load(file=paste0(rean.dir,"/",rean.name,"_",my.period[p],"_",var.name[var.num],".RData")) + } else { + impact.data <- FALSE + print("Impact data for reanalysis not available") + } + + } + + if(fields.name == forecast.name && n.map < 7){ + load(file=paste0(work.dir,"/",forecast.name,"_",my.period[p],"_leadmonth_",lead.month,"_","psl",".RData")) # load cluster time series and mean psl data + + if(file.exists(paste0(work.dir,"/",forecast.name,"_",my.period[p],"_leadmonth_",lead.month,"_",var.name[var.num],".RData"))){ + impact.data <- TRUE + print("Impact data available for forecasts") + if((composition == "simple" || composition == "impact")) load(file=paste0(work.dir,"/",forecast.name,"_",my.period[p],"_leadmonth_",lead.month,"_",var.name[var.num],".RData")) # load mean var data for impact maps + } else { + impact.data <- FALSE + print("Impact data for forecasts not available") + } + + if(var.num != var.num.orig) var.num <- var.num.orig # ripristinate original values of this script (necessary after loading regime data) + if(fields.name != fields.name.orig) fields.name <- fields.name.orig # ripristinate original values of this script + + } + + if(var.num != var.num.orig) var.num <- var.num.orig # ripristinate original values of this script (necessary after loading regime data) + if(fields.name != fields.name.orig) fields.name <- fields.name.orig # ripristinate original values of this script + if(work.dir != work.dir.orig) work.dir <- work.dir.orig # ripristinate original values of this script + if(p != p.orig) p <- p.orig + + if(fields.name == forecast.name && n.map < 7){ + + # compute the simulated interannual monthly variability: + n.days.month <- dim(my.cluster.array[[p]])[1] # number of days in the forecasted month + + freq.sim1 <- apply(my.cluster.array[[p]] == 1, c(2,3), sum) + freq.sim2 <- apply(my.cluster.array[[p]] == 2, c(2,3), sum) + freq.sim3 <- apply(my.cluster.array[[p]] == 3, c(2,3), sum) + freq.sim4 <- apply(my.cluster.array[[p]] == 4, c(2,3), sum) + + sd.freq.sim1 <- sd(freq.sim1) / n.days.month + sd.freq.sim2 <- sd(freq.sim2) / n.days.month + sd.freq.sim3 <- sd(freq.sim3) / n.days.month + sd.freq.sim4 <- sd(freq.sim4) / n.days.month + + # compute the transition probabilities: + j1 <- j2 <- j3 <- j4 <- 1 + transition1 <- transition2 <- transition3 <- transition4 <- c() + + n.members <- dim(my.cluster.array[[p]])[3] + + for(m in 1:n.members){ + for(y in 1:n.years){ + for (d in 1:(n.days.month-1)){ + + if (my.cluster.array[[p]][d,y,m] == 1 && (my.cluster.array[[p]][d + 1,y,m] != 1)){ + transition1[j1] <- my.cluster.array[[p]][d + 1,y,m] + j1 <- j1 + 1 + } + if (my.cluster.array[[p]][d,y,m] == 2 && (my.cluster.array[[p]][d + 1,y,m] != 2)){ + transition2[j2] <- my.cluster.array[[p]][d + 1,y,m] + j2 <- j2 + 1 + } + if (my.cluster.array[[p]][d,y,m] == 3 && (my.cluster.array[[p]][d + 1,y,m] != 3)){ + transition3[j3] <- my.cluster.array[[p]][d + 1,y,m] + j3 <- j3 +1 + } + if (my.cluster.array[[p]][d,y,m] == 4 && (my.cluster.array[[p]][d + 1,y,m] != 4)){ + transition4[j4] <- my.cluster.array[[p]][d + 1,y,m] + j4 <- j4 +1 + } + + } + } + } + + trans.sim <- matrix(NA,4,4 , dimnames= list(c("cluster1.sim", "cluster2.sim", "cluster3.sim", "cluster4.sim"),c("cluster1.sim","cluster2.sim","cluster3.sim","cluster4.sim"))) + trans.sim[1,2] <- length(which(transition1 == 2)) + trans.sim[1,3] <- length(which(transition1 == 3)) + trans.sim[1,4] <- length(which(transition1 == 4)) + + trans.sim[2,1] <- length(which(transition2 == 1)) + trans.sim[2,3] <- length(which(transition2 == 3)) + trans.sim[2,4] <- length(which(transition2 == 4)) + + trans.sim[3,1] <- length(which(transition3 == 1)) + trans.sim[3,2] <- length(which(transition3 == 2)) + trans.sim[3,4] <- length(which(transition3 == 4)) + + trans.sim[4,1] <- length(which(transition4 == 1)) + trans.sim[4,2] <- length(which(transition4 == 2)) + trans.sim[4,3] <- length(which(transition4 == 3)) + + trans.tot <- apply(trans.sim,1,sum,na.rm=T) + for(i in 1:4) trans.sim[i,] <- trans.sim[i,]/trans.tot[i] + + + # compute cluster persistence: + my.cluster.vector <- as.vector(my.cluster.array[[p]]) # reduce it to a vector with the order: day1month1member1 , day2month1member1, ... , day1month2member1, day2month2member1, ,,, + + sequ1 <- sequ2 <- sequ3 <- sequ4 <- rep(0,10000) + i1 <- i2 <- i3 <- i4 <- 1 + + if(my.cluster.vector[1] != 1) i1 <- 0 + if(my.cluster.vector[1] == 1) sequ1[i1] <- sequ1[i1]+1 + if(my.cluster.vector[1] != 2) i2 <- 0 + if(my.cluster.vector[1] == 2) sequ2[i2] <- sequ2[i2]+1 + if(my.cluster.vector[1] != 3) i3 <- 0 + if(my.cluster.vector[1] == 3) sequ3[i3] <- sequ3[i3]+1 + if(my.cluster.vector[1] != 4) i4 <- 0 + if(my.cluster.vector[1] == 4) sequ4[i4] <- sequ4[i4]+1 + n.dd <- length(my.cluster.vector) + + for(d in 1:(n.dd-1)){ + if(my.cluster.vector[d] != 1 && my.cluster.vector[d+1] == 1) i1 <- i1+1 + if(my.cluster.vector[d] == 1) sequ1[i1] <- sequ1[i1]+1 + + if(my.cluster.vector[d] != 2 && my.cluster.vector[d+1] == 2) i2 <- i2+1 + if(my.cluster.vector[d] == 2) sequ2[i2] <- sequ2[i2]+1 + + if(my.cluster.vector[d] != 3 && my.cluster.vector[d+1] == 3) i3 <- i3+1 + if(my.cluster.vector[d] == 3) sequ3[i3] <- sequ3[i3]+1 + + if(my.cluster.vector[d] != 4 && my.cluster.vector[d+1] == 4) i4 <- i4+1 + if(my.cluster.vector[d] == 4) sequ4[i4] <- sequ4[i4]+1 + } + + if(my.cluster.vector[n.dd] == 1) sequ1[i1] <- sequ1[i1]+1 + if(my.cluster.vector[n.dd] == 2) sequ2[i2] <- sequ2[i2]+1 + if(my.cluster.vector[n.dd] == 3) sequ3[i3] <- sequ3[i3]+1 + if(my.cluster.vector[n.dd] == 4) sequ4[i4] <- sequ4[i4]+1 + + pers1 <- mean(sequ1[which(sequ1 != 0)]) + pers2 <- mean(sequ2[which(sequ2 != 0)]) + pers3 <- mean(sequ3[which(sequ3 != 0)]) + pers4 <- mean(sequ4[which(sequ4 != 0)]) + + # compute correlations between simulated clusters and observed regime's anomalies: + cluster1.sim <- pslwr1mean; cluster2.sim <- pslwr2mean; cluster3.sim <- pslwr3mean; cluster4.sim <- pslwr4mean + + if(composition == 'impact' && impact.data == TRUE) { + cluster1.sim.imp <- varwr1mean; cluster2.sim.imp <- varwr2mean; cluster3.sim.imp <- varwr3mean; cluster4.sim.imp <- varwr4mean + #cluster1.sim.imp.pv <- pvalue1; cluster2.sim.imp.pv <- pvalue2; cluster3.sim.imp.pv <- pvalue3; cluster4.sim.imp.pv <- pvalue4 + } + + lat.forecast <- lat; lon.forecast <- lon + + if((p + lead.month) > 12) { load.month <- p + lead.month - 12 } else { load.month <- p + lead.month } # reanalysis forecast month to load + load(file=paste0(rean.dir,"/",rean.name,"_",my.period[load.month],"_","psl",".RData")) # load reanalysis data, which overwrities the pslwrXmean variables + load(paste0(rean.dir,"/",rean.name,"_", my.period[load.month],"_","ClusterNames",".RData")) # load also reanalysis regime names + + # load reanalysis varwrXmean impact data: + if(composition == 'impact' && impact.data == TRUE) load(file=paste0(rean.dir,"/",rean.name,"_",my.period[load.month],"_",var.name[var.num],".RData")) + + if(var.num != var.num.orig) var.num <- var.num.orig # ripristinate original values of this script + if(fields.name != fields.name.orig) fields.name <- fields.name.orig # ripristinate original values of this script + if(!identical(period.orig,period)) period <- period.orig + if(work.dir != work.dir.orig) work.dir <- work.dir.orig # ripristinate original values of this script + if(p.orig != p) p <- p.orig + + # compute the observed transition probabilities: + n.days.month <- n.days.in.a.period(load.month,2001) + j1o <- j2o <- j3o <- j4o <- 1; transition.obs1 <- transition.obs2 <- transition.obs3 <- transition.obs4 <- c() + + for (d in 1:(length(my.cluster$cluster)-1)){ + if (my.cluster$cluster[d] == 1 && (my.cluster$cluster[d + 1] != my.cluster$cluster[d])){ + transition.obs1[j1o] <- my.cluster$cluster[d + 1] + j1o <- j1o + 1 + } + if (my.cluster$cluster[d] == 2 && (my.cluster$cluster[d + 1] != my.cluster$cluster[d])){ + transition.obs2[j2o] <- my.cluster$cluster[d + 1] + j2o <- j2o + 1 + } + if (my.cluster$cluster[d] == 3 && (my.cluster$cluster[d + 1] != my.cluster$cluster[d])){ + transition.obs3[j3o] <- my.cluster$cluster[d + 1] + j3o <- j3o + 1 + } + if (my.cluster$cluster[d] == 4 && (my.cluster$cluster[d + 1] != my.cluster$cluster[d])){ + transition.obs4[j4o] <- my.cluster$cluster[d + 1] + j4o <- j4o +1 + } + + } + + trans.obs <- matrix(NA,4,4 , dimnames= list(c("cluster1.obs", "cluster2.obs", "cluster3.obs", "cluster4.obs"),c("cluster1.obs","cluster2.obs","cluster3.obs","cluster4.obs"))) + trans.obs[1,2] <- length(which(transition.obs1 == 2)) + trans.obs[1,3] <- length(which(transition.obs1 == 3)) + trans.obs[1,4] <- length(which(transition.obs1 == 4)) + + trans.obs[2,1] <- length(which(transition.obs2 == 1)) + trans.obs[2,3] <- length(which(transition.obs2 == 3)) + trans.obs[2,4] <- length(which(transition.obs2 == 4)) + + trans.obs[3,1] <- length(which(transition.obs3 == 1)) + trans.obs[3,2] <- length(which(transition.obs3 == 2)) + trans.obs[3,4] <- length(which(transition.obs3 == 4)) + + trans.obs[4,1] <- length(which(transition.obs4 == 1)) + trans.obs[4,2] <- length(which(transition.obs4 == 2)) + trans.obs[4,3] <- length(which(transition.obs4 == 3)) + + trans.tot.obs <- apply(trans.obs,1,sum,na.rm=T) + for(i in 1:4) trans.obs[i,] <- trans.obs[i,]/trans.tot.obs[i] + + # compute the observed interannual monthly variability: + freq.obs1 <- freq.obs2 <- freq.obs3 <- freq.obs4 <- c() + + for(y in year.start:year.end){ + # for both reanalysis and S4, the time order is always: year1day1, year1day2, ..., year1day31, year2day1, year2day2, ..., year2day28, etc, + freq.obs1[y-year.start+1] <- length(which(my.cluster$cluster[(y-year.start)*n.days.month + (1:n.days.month)] == 1)) + freq.obs2[y-year.start+1] <- length(which(my.cluster$cluster[(y-year.start)*n.days.month + (1:n.days.month)] == 2)) + freq.obs3[y-year.start+1] <- length(which(my.cluster$cluster[(y-year.start)*n.days.month + (1:n.days.month)] == 3)) + freq.obs4[y-year.start+1] <- length(which(my.cluster$cluster[(y-year.start)*n.days.month + (1:n.days.month)] == 4)) + } + + sd.freq.obs1 <- sd(freq.obs1) / n.days.month + sd.freq.obs2 <- sd(freq.obs2) / n.days.month + sd.freq.obs3 <- sd(freq.obs3) / n.days.month + sd.freq.obs4 <- sd(freq.obs4) / n.days.month + + wr1y.obs <- wr1y; wr2y.obs <- wr2y; wr3y.obs <- wr3y; wr4y.obs <- wr4y # observed frecuencies of the regimes + + regimes.obs <- c(cluster1.name, cluster2.name, cluster3.name, cluster4.name) + + if(length(unique(regimes.obs)) < 4) stop("Two or more names of the weather types in the reanalsyis input file are repeated!") + + if(identical(rev(lat),lat.forecast)) {lat.forecast <- rev(lat.forecast); print("Warning: forecast latitudes are in the opposite order than renalysis's")} + if(!identical(lat, lat.forecast)) stop("forecast latitudes are different from reanalysis latitudes!") + if(!identical(lon, lon.forecast)) stop("forecast longitude are different from reanalysis longitudes!") + + cluster.corr <- array(NA, c(4,4), dimnames=list(c("cluster1.sim","cluster2.sim","cluster3.sim","cluster4.sim"), regimes.obs)) + cluster.corr[1,1] <- cor(as.vector(cluster1.sim), as.vector(pslwr1mean)) + cluster.corr[1,2] <- cor(as.vector(cluster1.sim), as.vector(pslwr2mean)) + cluster.corr[1,3] <- cor(as.vector(cluster1.sim), as.vector(pslwr3mean)) + cluster.corr[1,4] <- cor(as.vector(cluster1.sim), as.vector(pslwr4mean)) + cluster.corr[2,1] <- cor(as.vector(cluster2.sim), as.vector(pslwr1mean)) + cluster.corr[2,2] <- cor(as.vector(cluster2.sim), as.vector(pslwr2mean)) + cluster.corr[2,3] <- cor(as.vector(cluster2.sim), as.vector(pslwr3mean)) + cluster.corr[2,4] <- cor(as.vector(cluster2.sim), as.vector(pslwr4mean)) + cluster.corr[3,1] <- cor(as.vector(cluster3.sim), as.vector(pslwr1mean)) + cluster.corr[3,2] <- cor(as.vector(cluster3.sim), as.vector(pslwr2mean)) + cluster.corr[3,3] <- cor(as.vector(cluster3.sim), as.vector(pslwr3mean)) + cluster.corr[3,4] <- cor(as.vector(cluster3.sim), as.vector(pslwr4mean)) + cluster.corr[4,1] <- cor(as.vector(cluster4.sim), as.vector(pslwr1mean)) + cluster.corr[4,2] <- cor(as.vector(cluster4.sim), as.vector(pslwr2mean)) + cluster.corr[4,3] <- cor(as.vector(cluster4.sim), as.vector(pslwr3mean)) + cluster.corr[4,4] <- cor(as.vector(cluster4.sim), as.vector(pslwr4mean)) + + # associate each simulated cluster to one observed regime: + max1 <- which(cluster.corr == max(cluster.corr), arr.ind=T) + cluster.corr2 <- cluster.corr + cluster.corr2[max1[1],] <- -999 # set this row to negative values so it won't be found as a maximum anymore + cluster.corr2[,max1[2]] <- -999 # set this column to negative values so it won't be found as a maximum anymore + max2 <- which(cluster.corr2 == max(cluster.corr2), arr.ind=T) + cluster.corr3 <- cluster.corr2 + cluster.corr3[max2[1],] <- -999 + cluster.corr3[,max2[2]] <- -999 + max3 <- which(cluster.corr3 == max(cluster.corr3), arr.ind=T) + cluster.corr4 <- cluster.corr3 + cluster.corr4[max3[1],] <- -999 + cluster.corr4[,max3[2]] <- -999 + max4 <- which(cluster.corr4 == max(cluster.corr4), arr.ind=T) + + seq.max1 <- c(max1[1],max2[1],max3[1],max4[1]) + seq.max2 <- c(max1[2],max2[2],max3[2],max4[2]) + + cluster1.regime <- seq.max2[which(seq.max1 == 1)] + cluster2.regime <- seq.max2[which(seq.max1 == 2)] + cluster3.regime <- seq.max2[which(seq.max1 == 3)] + cluster4.regime <- seq.max2[which(seq.max1 == 4)] + + cluster1.name <- regimes.obs[cluster1.regime] + cluster2.name <- regimes.obs[cluster2.regime] + cluster3.name <- regimes.obs[cluster3.regime] + cluster4.name <- regimes.obs[cluster4.regime] + + regimes.sim <- c(cluster1.name, cluster2.name, cluster3.name, cluster4.name) + cluster.corr2 <- array(cluster.corr, c(4,4), dimnames=list(regimes.sim, regimes.obs)) + + spat.cor1 <- cluster.corr[1,seq.max2[which(seq.max1 == 1)]] + spat.cor2 <- cluster.corr[2,seq.max2[which(seq.max1 == 2)]] + spat.cor3 <- cluster.corr[3,seq.max2[which(seq.max1 == 3)]] + spat.cor4 <- cluster.corr[4,seq.max2[which(seq.max1 == 4)]] + + # measure persistence for the reanalysis dataset: + my.cluster.vector <- my.cluster[[1]] + + sequ1 <- sequ2 <- sequ3 <- sequ4 <- rep(0,10000) + i1 <- i2 <- i3 <- i4 <- 1 + + if(my.cluster.vector[1] != 1) i1 <- 0 + if(my.cluster.vector[1] == 1) sequ1[i1] <- sequ1[i1]+1 + if(my.cluster.vector[1] != 2) i2 <- 0 + if(my.cluster.vector[1] == 2) sequ2[i2] <- sequ2[i2]+1 + if(my.cluster.vector[1] != 3) i3 <- 0 + if(my.cluster.vector[1] == 3) sequ3[i3] <- sequ3[i3]+1 + if(my.cluster.vector[1] != 4) i4 <- 0 + if(my.cluster.vector[1] == 4) sequ4[i4] <- sequ4[i4]+1 + + n.dd <- length(my.cluster.vector) + for(d in 1:(n.dd-1)){ + if(my.cluster.vector[d] != 1 && my.cluster.vector[d+1] == 1) i1 <- i1+1 + if(my.cluster.vector[d] == 1) sequ1[i1] <- sequ1[i1]+1 + + if(my.cluster.vector[d] != 2 && my.cluster.vector[d+1] == 2) i2 <- i2+1 + if(my.cluster.vector[d] == 2) sequ2[i2] <- sequ2[i2]+1 + + if(my.cluster.vector[d] != 3 && my.cluster.vector[d+1] == 3) i3 <- i3+1 + if(my.cluster.vector[d] == 3) sequ3[i3] <- sequ3[i3]+1 + + if(my.cluster.vector[d] != 4 && my.cluster.vector[d+1] == 4) i4 <- i4+1 + if(my.cluster.vector[d] == 4) sequ4[i4] <- sequ4[i4]+1 + } + + if(my.cluster.vector[n.dd] == 1) sequ1[i1] <- sequ1[i1]+1 + if(my.cluster.vector[n.dd] == 2) sequ2[i2] <- sequ2[i2]+1 + if(my.cluster.vector[n.dd] == 3) sequ3[i3] <- sequ3[i3]+1 + if(my.cluster.vector[n.dd] == 4) sequ4[i4] <- sequ4[i4]+1 + + persObs1 <- mean(sequ1[which(sequ1 != 0)]) + persObs2 <- mean(sequ2[which(sequ2 != 0)]) + persObs3 <- mean(sequ3[which(sequ3 != 0)]) + persObs4 <- mean(sequ4[which(sequ4 != 0)]) + + ### insert here all the manual corrections to the regime assignation due to misasignment in the automatic selection: + ### forecast month: September + #if(p == 9 && lead.month == 0) { cluster1.name <- "Blocking"; cluster2.name <- "Atl.Ridge"; cluster3.name <- "NAO-"; cluster4.name <- "NAO+"} + #if(p == 8 && lead.month == 1) { cluster1.name <- "NAO+"; cluster2.name <- "NAO-"; cluster3.name <- "Blocking"; cluster4.name <- "Atl.Ridge"} + #if(p == 6 && lead.month == 3) { cluster1.name <- "Atl.Ridge"; cluster2.name <- "NAO+"} + #if(p == 4 && lead.month == 5) { cluster1.name <- "Blocking"; cluster2.name <- "NAO+"; cluster3.name <- "Atl.Ridge"; cluster4.name <- "NAO-"} + + if(p == 9 && lead.month == 0) { cluster1.name <- "Blocking"; cluster2.name <- "Atl.Ridge"; cluster3.name <- "NAO-"; cluster4.name <- "NAO+" } + if(p == 8 && lead.month == 1) { cluster1.name <- "NAO+"; cluster2.name <- "NAO-"; cluster3.name <- "Blocking"; cluster4.name <- "Atl.Ridge" } + if(p == 7 && lead.month == 2) { cluster1.name <- "Atl.Ridge"; cluster2.name <- "Blocking"; cluster3.name <- "NAO-"; cluster4.name <- "NAO+" } + if(p == 6 && lead.month == 3) { cluster1.name <- "Atl.Ridge"; cluster2.name <- "NAO-"; cluster3.name <- "NAO+"; cluster4.name <- "Blocking" } + if(p == 5 && lead.month == 4) { cluster1.name <- "Atl.Ridge"; cluster2.name <- "NAO+"; cluster3.name <- "NAO-"; cluster4.name <- "Blocking" } + if(p == 4 && lead.month == 5) { cluster1.name <- "Blocking"; cluster2.name <- "NAO+"; cluster3.name <- "Atl.Ridge"; cluster4.name <- "NAO-" } + if(p == 3 && lead.month == 6) { cluster2.name <- "NAO-"; cluster3.name <- "Atl.Ridge"} # cluster1.name <- "Blocking"; cluster4.name <- "NAO+" + + # regimes.sim # for the debug + + ### forecast month: October + #if(p == 4 && lead.month == 6) { cluster1.name <- "Atl.Ridge"; cluster2.name <- "Blocking"; cluster3.name <- "NAO+"; cluster4.name <- "NAO-"} + #if(p == 5 && lead.month == 5) { cluster1.name <- "NAO+"; cluster2.name <- "Blocking"; cluster3.name <- "NAO-"; cluster4.name <- "Atl.Ridge"} + #if(p == 7 && lead.month == 3) { cluster1.name <- "NAO-"; cluster2.name <- "Atl.Ridge"; cluster3.name <- "Blocking"; cluster4.name <- "NAO+"} + #if(p == 8 && lead.month == 2) { cluster1.name <- "Blocking"; cluster2.name <- "NAO-"; cluster3.name <- "Atl.Ridge"; cluster4.name <- "NAO+"} + #if(p == 9 && lead.month == 1) { cluster1.name <- "NAO-"; cluster2.name <- "Blocking"; cluster3.name <- "Atl.Ridge"; cluster4.name <- "NAO+"} + + if(p == 10 && lead.month == 0) { cluster1.name <- "Blocking"; cluster2.name <- "NAO+"; cluster3.name <- "Atl.Ridge"; cluster4.name <- "NAO-"} + if(p == 9 && lead.month == 1) { cluster1.name <- "NAO-"; cluster2.name <- "Blocking"; cluster3.name <- "Atl.Ridge"; cluster4.name <- "NAO+"} + if(p == 8 && lead.month == 2) { cluster1.name <- "Blocking"; cluster2.name <- "NAO-"; cluster3.name <- "Atl.Ridge"; cluster4.name <- "NAO+"} + if(p == 7 && lead.month == 3) { cluster1.name <- "NAO-"; cluster2.name <- "Atl.Ridge"; cluster3.name <- "Blocking"; cluster4.name <- "NAO+"} + if(p == 6 && lead.month == 4) { cluster1.name <- "NAO+"; cluster2.name <- "Blocking"; cluster3.name <- "NAO-"; cluster4.name <- "Atl.Ridge"} + + ### forecast month: November + #if(p == 6 && lead.month == 5) { cluster2.name <- "Atl.Ridge"; cluster3.name <- "NAO+"; cluster4.name <- "Blocking"} + #if(p == 7 && lead.month == 4) { cluster1.name <- "NAO+"; cluster2.name <- "Blocking"; cluster4.name <- "Atl.Ridge"} + #if(p == 8 && lead.month == 3) { cluster2.name <- "Blocking"; cluster4.name <- "Atl.Ridge"} + #if(p == 9 && lead.month == 2) { cluster2.name <- "Atl.Ridge"; cluster3.name <- "NAO+"; cluster4.name <- "Blocking"} + #if(p == 10 && lead.month == 1) { cluster2.name <- "Blocking"; cluster4.name <- "Atl.Ridge"} + + regimes.sim2 <- c(cluster1.name, cluster2.name, cluster3.name, cluster4.name) # Simulated regimes after the manual correction + #regimes.obs <- c(cluster1.name, cluster2.name, cluster3.name, cluster4.name) # already defined above, included only as reference + + order.sim <- match(regimes.sim2, orden) + order.obs <- match(regimes.obs, orden) + + clusters.sim <- list(cluster1.sim, cluster2.sim, cluster3.sim, cluster4.sim) + clusters.obs <- list(pslwr1mean, pslwr2mean, pslwr3mean, pslwr4mean) + + # recompute the spatial correlations, because they might have changed because of the manual corrections above: + spat.cor1 <- cor(as.vector(clusters.sim[[which(order.sim == 1)]]), as.vector(clusters.obs[[which(order.obs == 1)]])) + spat.cor2 <- cor(as.vector(clusters.sim[[which(order.sim == 2)]]), as.vector(clusters.obs[[which(order.obs == 2)]])) + spat.cor3 <- cor(as.vector(clusters.sim[[which(order.sim == 3)]]), as.vector(clusters.obs[[which(order.obs == 3)]])) + spat.cor4 <- cor(as.vector(clusters.sim[[which(order.sim == 4)]]), as.vector(clusters.obs[[which(order.obs == 4)]])) + + if(composition == 'impact' && impact.data == TRUE) { + clusters.sim.imp <- list(cluster1.sim.imp, cluster2.sim.imp, cluster3.sim.imp, cluster4.sim.imp) + #clusters.sim.imp.pv <- list(cluster1.sim.imp.pv, cluster2.sim.imp.pv, cluster3.sim.imp.pv, cluster4.sim.imp.pv) + + clusters.obs.imp <- list(varwr1mean, varwr2mean, varwr3mean, varwr4mean) + #clusters.obs.imp.pv <- list(pvalue1, pvalue2, pvalue3, pvalue4) + + cor.imp1 <- cor(as.vector(clusters.sim.imp[[which(order.sim == 1)]]), as.vector(clusters.obs.imp[[which(order.obs == 1)]])) + cor.imp2 <- cor(as.vector(clusters.sim.imp[[which(order.sim == 2)]]), as.vector(clusters.obs.imp[[which(order.obs == 2)]])) + cor.imp3 <- cor(as.vector(clusters.sim.imp[[which(order.sim == 3)]]), as.vector(clusters.obs.imp[[which(order.obs == 3)]])) + cor.imp4 <- cor(as.vector(clusters.sim.imp[[which(order.sim == 4)]]), as.vector(clusters.obs.imp[[which(order.obs == 4)]])) + + } + + load(file=paste0(work.dir,"/",fields.name,"_",my.period[p],"_leadmonth_",lead.month,"_","psl",".RData")) # reload forecast cluster time series and mean psl data + load(file=paste0(work.dir,"/",fields.name,"_",my.period[p],"_leadmonth_",lead.month,"_",var.name[var.num],".RData")) # reload mean var data for impact maps + + if(var.num != var.num.orig) var.num <- var.num.orig # ripristinate original values of this script + if(fields.name != fields.name.orig) fields.name <- fields.name.orig # ripristinate original values of this script + if(!identical(period.orig, period)) period <- period.orig + if(p.orig != p) p <- p.orig + if(identical(rev(lat),lat.forecast)) lat <- rev(lat) # ripristinate right lat order + if(work.dir != work.dir.orig) work.dir <- work.dir.orig # ripristinate original values of this script + + } # close for on 'forecast.name' + + if(fields.name == rean.name || (fields.name == forecast.name && n.map == 7)){ + if(save.names){ + save(cluster1.name, cluster2.name, cluster3.name, cluster4.name, file=paste0(rean.dir,"/",fields.name,"_", my.period[p],"_","ClusterNames",".RData")) + + } else { # in this case, we load the cluster names from the file already saved, if regimes come from a reanalysis: + load(paste0(rean.dir,"/",rean.name,"_", my.period[p],"_","ClusterNames",".RData")) + + if(var.num != var.num.orig) var.num <- var.num.orig # ripristinate original values of this script + if(fields.name != fields.name.orig) fields.name <- fields.name.orig # ripristinate original values of this script + } + } + + # assign to each cluster its regime: + #if(ordering == FALSE && (fields.name == rean.name || (fields.name == forecast.name && n.map == 7))){ + if(ordering == FALSE){ + cluster1 <- 1; cluster2 <- 2; cluster3 <- 3; cluster4 <- 4 + } else { + cluster1 <- which(orden == cluster1.name) + cluster2 <- which(orden == cluster2.name) + cluster3 <- which(orden == cluster3.name) + cluster4 <- which(orden == cluster4.name) + } + + assign(paste0("map",cluster1), pslwr1mean) + assign(paste0("map",cluster2), pslwr2mean) + assign(paste0("map",cluster3), pslwr3mean) + assign(paste0("map",cluster4), pslwr4mean) + + assign(paste0("fre",cluster1), wr1y) + assign(paste0("fre",cluster2), wr2y) + assign(paste0("fre",cluster3), wr3y) + assign(paste0("fre",cluster4), wr4y) + + #assign(paste0("withinss",cluster1), my.cluster[[p]]$withinss[1]/my.cluster[[p]]$size[1]) + #assign(paste0("withinss",cluster2), my.cluster[[p]]$withinss[2]/my.cluster[[p]]$size[2]) + #assign(paste0("withinss",cluster3), my.cluster[[p]]$withinss[3]/my.cluster[[p]]$size[3]) + #assign(paste0("withinss",cluster4), my.cluster[[p]]$withinss[4]/my.cluster[[p]]$size[4]) + + if(impact.data == TRUE && (composition == "simple" || composition == "single.impact" || composition == 'impact' || composition == 'edpr')){ + assign(paste0("imp",cluster1), varwr1mean) + assign(paste0("imp",cluster2), varwr2mean) + assign(paste0("imp",cluster3), varwr3mean) + assign(paste0("imp",cluster4), varwr4mean) + + assign(paste0("sig",cluster1), pvalue1) + assign(paste0("sig",cluster2), pvalue2) + assign(paste0("sig",cluster3), pvalue3) + assign(paste0("sig",cluster4), pvalue4) + } + + if(fields.name == forecast.name && n.map < 7){ + assign(paste0("freMax",cluster1), wr1yMax) + assign(paste0("freMax",cluster2), wr2yMax) + assign(paste0("freMax",cluster3), wr3yMax) + assign(paste0("freMax",cluster4), wr4yMax) + + assign(paste0("freMin",cluster1), wr1yMin) + assign(paste0("freMin",cluster2), wr2yMin) + assign(paste0("freMin",cluster3), wr3yMin) + assign(paste0("freMin",cluster4), wr4yMin) + + #assign(paste0("spatial.cor",cluster1), spat.cor1) + #assign(paste0("spatial.cor",cluster2), spat.cor2) + #assign(paste0("spatial.cor",cluster3), spat.cor3) + #assign(paste0("spatial.cor",cluster4), spat.cor4) + + assign(paste0("persist",cluster1), pers1) + assign(paste0("persist",cluster2), pers2) + assign(paste0("persist",cluster3), pers3) + assign(paste0("persist",cluster4), pers4) + + clusters.all <- c(cluster1, cluster2, cluster3, cluster4) + ordered.sequence <- c(which(clusters.all == 1), which(clusters.all == 2), which(clusters.all == 3), which(clusters.all == 4)) + + assign(paste0("transSim",cluster1), unname(trans.sim[1,ordered.sequence])) + assign(paste0("transSim",cluster2), unname(trans.sim[2,ordered.sequence])) + assign(paste0("transSim",cluster3), unname(trans.sim[3,ordered.sequence])) + assign(paste0("transSim",cluster4), unname(trans.sim[4,ordered.sequence])) + + cluster1.obs <- which(orden == regimes.obs[1]) + cluster2.obs <- which(orden == regimes.obs[2]) + cluster3.obs <- which(orden == regimes.obs[3]) + cluster4.obs <- which(orden == regimes.obs[4]) + + clusters.all.obs <- c(cluster1.obs, cluster2.obs, cluster3.obs, cluster4.obs) + ordered.sequence.obs <- c(which(clusters.all.obs == 1), which(clusters.all.obs == 2), which(clusters.all.obs == 3), which(clusters.all.obs == 4)) + + assign(paste0("freObs",cluster1.obs), wr1y.obs) + assign(paste0("freObs",cluster2.obs), wr2y.obs) + assign(paste0("freObs",cluster3.obs), wr3y.obs) + assign(paste0("freObs",cluster4.obs), wr4y.obs) + + assign(paste0("persistObs",cluster1.obs), persObs1) + assign(paste0("persistObs",cluster2.obs), persObs2) + assign(paste0("persistObs",cluster3.obs), persObs3) + assign(paste0("persistObs",cluster4.obs), persObs4) + + assign(paste0("transObs",cluster1.obs), unname(trans.obs[1,ordered.sequence.obs])) + assign(paste0("transObs",cluster2.obs), unname(trans.obs[2,ordered.sequence.obs])) + assign(paste0("transObs",cluster3.obs), unname(trans.obs[3,ordered.sequence.obs])) + assign(paste0("transObs",cluster4.obs), unname(trans.obs[4,ordered.sequence.obs])) + + } + + # position of long values of Europe only (without the Atlantic Sea and America): + #if(fields.name == "ERA-Interim") EU <- c(1:which(lon >= lon.max)[1], (length(lon)-30):length(lon)) # restrict area to continental europe only + EU <- c(1:which(lon >= lon.max)[1], (which(lon >= 337)[1]:length(lon))) # restrict area to continental europe only + + # breaks and colors of the geopotential fields: + if(psl == "g500"){ + #my.brks <- c(-300,-199:200,300) # % Mean anomaly of a WR + my.brks <- c(-300,seq(-200,200,10),300) # % Mean anomaly of a WR + my.brks2 <- c(-300,seq(-200,200,10),300) # % Mean anomaly of a WR for the contour lines + #my.cols <- colorRampPalette((brewer.pal(9,"PuBu")))(length(my.brks)-1) + values.to.plot <- c(-200, -100, 0, 100, 200) # values we want to show in the labels + } + + if(psl == "psl"){ + my.brks <- c(seq(-21,-1,2),0,seq(1,21,2)) # % Mean anomaly of a WR + # my.brks <- c(seq(-19,-1,2),0,seq(1,19,2)) # % Mean anomaly of a WR + + my.brks2 <- my.brks #c(seq(-20,20,2)) # % Mean anomaly of a WR for the contour lines + values.to.plot <- my.brks #c(-17,-15,-13,-11,-9,-7,-5,-3,-1,0,1,3,5,7,9,11,13,15,17) + } + + my.cols <- colorRampPalette(rev(brewer.pal(11,"RdBu")))(length(my.brks)-1) # blue--white--red colors + my.cols[floor(length(my.cols)/2)] <- my.cols[floor(length(my.cols)/2)+1] <- "white" + + # breaks and colors of the impact maps (valid both for Wind speed anomalies and Temperature anomalies): + #my.brks.var <- c(-20,seq(-10,-3,1),seq(-2.9,2.9,0.1),seq(3,10,1),20) # % Mean anomaly of a WR + #my.brks.var <- c(-20,-2,-1.5,-1,-0.5,-0.2,0.2,0.5,1,1.5,2,20) # % Mean anomaly of a WR + #my.brks.var <- c(-20,seq(-3,3,0.5),20) # % Mean anomaly of a WR + if(var.name[var.num] == "sfcWind") { + my.brks.var <- seq(-3,3,0.5) + values.to.plot2 <- c(-3,-2,-1,0,1,2,3) + } + if(var.name[var.num] == "tas") { + my.brks.var <- seq(-5,5,1) + values.to.plot2 <- my.brks.var #c(-5,-3,-1,0,1,3,5) + } + + #my.cols <- colorRampPalette(as.character(read.csv("/home/Earth/vtorralb/rgbhex.csv",header=F)[,1]))(length(my.brks)-1) # blue--yellow-red colors + my.cols.var <- colorRampPalette(rev(brewer.pal(11,"RdBu")))(length(my.brks.var)-1) # blue--white--red colors + #my.cols.var[floor(length(my.cols.var)/2)] <- my.cols.var[floor(length(my.cols.var)/2)+1] <- "white" + + freq.max=100 + if(fields.name == forecast.name){ + pos.years.present <- match(year.start:year.end, years) + years.missing <- which(is.na(pos.years.present)) + fre1.NA <- fre2.NA <- fre3.NA <- fre4.NA <- freMax1.NA <- freMax2.NA <- freMax3.NA <- freMax4.NA <- freMin1.NA <- freMin2.NA <- freMin3.NA <- freMin4.NA <- c() + k <- 0 + for(y in year.start:year.end) { + if(y %in% (years.missing + year.start - 1)) { + fre1.NA <- c(fre1.NA,NA); fre2.NA <- c(fre2.NA,NA); fre3.NA <- c(fre3.NA,NA); fre4.NA <- c(fre4.NA,NA) + freMax1.NA <- c(freMax1.NA,NA); freMax2.NA <- c(freMax2.NA,NA); freMax3.NA <- c(freMax3.NA,NA); freMax4.NA <- c(freMax4.NA,NA) + freMin1.NA <- c(freMin1.NA,NA); freMin2.NA <- c(freMin2.NA,NA); freMin3.NA <- c(freMin3.NA,NA); freMin4.NA <- c(freMin4.NA,NA) + k=k+1 + } else { + fre1.NA <- c(fre1.NA,fre1[y - year.start + 1 - k]); fre2.NA <- c(fre2.NA,fre2[y - year.start + 1 - k]) + fre3.NA <- c(fre3.NA,fre3[y - year.start + 1 - k]); fre4.NA <- c(fre4.NA,fre4[y - year.start + 1 - k]) + freMax1.NA <- c(freMax1.NA,freMax1[y - year.start + 1 - k]); freMax2.NA <- c(freMax2.NA,freMax2[y - year.start + 1 - k]) + freMax3.NA <- c(freMax3.NA,freMax3[y - year.start + 1 - k]); freMax4.NA <- c(freMax4.NA,freMax4[y - year.start + 1 - k]) + freMin1.NA <- c(freMin1.NA,freMin1[y - year.start + 1 - k]); freMin2.NA <- c(freMin2.NA,freMin2[y - year.start + 1 - k]) + freMin3.NA <- c(freMin3.NA,freMin3[y - year.start + 1 - k]); freMin4.NA <- c(freMin4.NA,freMin4[y - year.start + 1 - k]) + } + } + + sp.cor1 <- round(cor(fre1.NA,freObs1, use="complete.obs"),2) + sp.cor2 <- round(cor(fre2.NA,freObs2, use="complete.obs"),2) + sp.cor3 <- round(cor(fre3.NA,freObs3, use="complete.obs"),2) + sp.cor4 <- round(cor(fre4.NA,freObs4, use="complete.obs"),2) + + } else { + fre1.NA <- fre1; fre2.NA <- fre2; fre3.NA <- fre3; fre4.NA <- fre4 + } + + + # Visualize the composition with the 3 graphs for all the 4 regimes: its pressure fields, its impact on var and its frequency series: + if(composition == "simple"){ + + if(!as.pdf && fields.name == rean.name) png(filename=paste0(rean.dir,"/",fields.name,"_",my.period[p],"_",var.name[var.num],".png"),width=3000,height=3700) + if(!as.pdf && fields.name == forecast.name) png(filename=paste0(work.dir,"/",fields.name,"_",my.period[p],"_leadmonth_",lead.month,"_",var.name[var.num],".png"),width=3000,height=3700) + + plot.new() + + # Sheet title: + par(fig=c(0, 1, 0.94, 0.98), new=TRUE) + if(fields.name == forecast.name) {forecast.title <- paste0(" startdate and ",lead.month," forecast month ")} else {forecast.title <- ""} + mtext(paste0(fields.name, " Weather Regimes for ", my.period[p], forecast.title," (",year.start,"-",year.end,")"), cex=5.5, font=2) + + # Centroid maps: + map.xpos <- 0 + map.width <- 0.46 + par(fig=c(map.xpos + 0.003, map.xpos + map.width - 0.003, 0.745, 0.925), new=TRUE) + PlotEquiMap2(rescale(map1,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map1), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, xlabel.dist=1.5, contours.lty="F1FF1F", continents.col="black") + par(fig=c(map.xpos, map.xpos + map.width, 0.51, 0.69), new=TRUE) + PlotEquiMap2(rescale(map2,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map2), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F", continents.col="black") + par(fig=c(map.xpos, map.xpos + map.width, 0.275, 0.455), new=TRUE) + PlotEquiMap2(rescale(map3,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map3), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F", continents.col="black") + par(fig=c(map.xpos, map.xpos + map.width, 0.04, 0.22), new=TRUE) + PlotEquiMap2(rescale(map4,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map4), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F", continents.col="black") + + ## # for the debug: + ## obs <- read.table("/scratch/Earth/ncortesi/RESILIENCE/Regimes/NAO_series.txt") + ## mes <- p + ## fre.obs <- obs$V3[seq(mes,12*35,12)] # get the ovserved freuency of the NAO + ## #plot(1981:2015,fre.obs,type="l") + ## #lines(1981:2015,fre1,type="l",col="red") + ## #lines(1981:2015,fre2,type="l",col="blue") + ## #lines(1981:2015,fre4,type="l",col="green") + ## cor1 <- cor(fre.obs, fre1) + ## cor2 <- cor(fre.obs, fre2) + ## cor3 <- cor(fre.obs, fre3) + ## cor4 <- cor(fre.obs, fre4) + ## round(c(cor1,cor2,cor3,cor4),2) + + # Legend centroid maps: + legend1.xpos <- 0.10 + legend1.width <- 0.30 + legend1.cex <- 2 + my.subset <- match(values.to.plot, my.brks) + par(fig=c(legend1.xpos, legend1.xpos + legend1.width, 0.719, 0.744), new=TRUE) # syntax fig=c(xmin, xmax, ymin, ymax) + ColorBar3(my.brks, cols=my.cols, vert=FALSE, cex=legend1.cex, subset=my.subset) + if(psl=="g500") {mtext(side=4," m", cex=2.5)} else {mtext(side=4," hPa", cex=legend1.cex)} + par(fig=c(legend1.xpos, legend1.xpos + legend1.width, 0.485, 0.508), new=TRUE) + ColorBar3(my.brks, cols=my.cols, vert=FALSE, cex=legend1.cex, subset=my.subset) + if(psl=="g500") {mtext(side=4," m", cex=2.5)} else {mtext(side=4," hPa", cex=legend1.cex)} + par(fig=c(legend1.xpos, legend1.xpos + legend1.width, 0.250, 0.273), new=TRUE) + ColorBar3(my.brks, cols=my.cols, vert=FALSE, cex=legend1.cex, subset=my.subset) + if(psl=="g500") {mtext(side=4," m", cex=2.5)} else {mtext(side=4," hPa", cex=legend1.cex)} + par(fig=c(legend1.xpos, legend1.xpos + legend1.width, 0.015, 0.038), new=TRUE) + ColorBar3(my.brks, cols=my.cols, vert=FALSE, cex=legend1.cex, subset=my.subset) + if(psl=="g500") {mtext(side=4," m", cex=2.5)} else {mtext(side=4," hPa", cex=legend1.cex)} + + # Subtitle centroid maps: + map.title.xpos <- 0.76 + map.title.width <- 0.2 + par(fig=c(map.title.xpos, map.title.xpos + map.title.width, 0.728, 0.729), new=TRUE) + mtext("year", cex=3) + par(fig=c(map.title.xpos, map.title.xpos + map.title.width, 0.494, 0.495), new=TRUE) + mtext("year", cex=3) + par(fig=c(map.title.xpos, map.title.xpos + map.title.width, 0.260, 0.261), new=TRUE) + mtext("year", cex=3) + par(fig=c(map.title.xpos, map.title.xpos + map.title.width, 0.026, 0.027), new=TRUE) + mtext("year", cex=3) + + # Title Centroid Maps: + title1.xpos <- 0.02 + title1.width <- 0.4 + par(fig=c(title1.xpos, title1.xpos + title1.width, 0.9305, 0.9355), new=TRUE) + mtext(paste0(regime1.name,": ", psl.name, " Anomaly"), font=2, cex=4) + par(fig=c(title1.xpos, title1.xpos + title1.width, 0.6955, 0.7005), new=TRUE) + mtext(paste0(regime2.name,": ", psl.name, " Anomaly"), font=2, cex=4) + par(fig=c(title1.xpos, title1.xpos + title1.width, 0.4605, 0.4655), new=TRUE) + mtext(paste0(regime3.name,": ", psl.name, " Anomaly"), font=2, cex=4) + par(fig=c(title1.xpos, title1.xpos + title1.width, 0.2255, 0.2305), new=TRUE) + mtext(paste0(regime4.name,": ", psl.name, " Anomaly"), font=2, cex=4) + + # Impact maps: + if(impact.data == TRUE ){ + impact.xpos <- 0.47 + impact.width <- 0.26 + par(fig=c(impact.xpos, impact.xpos + impact.width, 0.745, 0.925), new=TRUE) + PlotEquiMap2(rescale(imp1[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=t(sig1[,EU] < 0.05), cex.lab=1.5, continents.col="black") + par(fig=c(impact.xpos, impact.xpos + impact.width, 0.51, 0.69), new=TRUE) + PlotEquiMap2(rescale(imp2[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=t(sig2[,EU] < 0.05), cex.lab=1.5, continents.col="black") + par(fig=c(impact.xpos, impact.xpos + impact.width, 0.275, 0.455), new=TRUE) + PlotEquiMap2(rescale(imp3[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=t(sig3[,EU] < 0.05), cex.lab=1.5, continents.col="black") + par(fig=c(impact.xpos, impact.xpos + impact.width, 0.04, 0.22), new=TRUE) + PlotEquiMap2(rescale(imp4[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=t(sig4[,EU] < 0.05), cex.lab=1.5, continents.col="black") + + + # Title impact maps: + title2.xpos <- 0.42 + title2.width <- 0.35 + par(fig=c(title2.xpos, title2.xpos + title2.width, 0.928, 0.933), new=TRUE) + mtext(paste0(regime1.name, ": Impact on ", var.name.full[var.num]), font=2, cex=4) + par(fig=c(title2.xpos, title2.xpos + title2.width, 0.693, 0.698), new=TRUE) + mtext(paste0(regime2.name, ": Impact on ", var.name.full[var.num]), font=2, cex=4) + par(fig=c(title2.xpos, title2.xpos + title2.width, 0.458, 0.463), new=TRUE) + mtext(paste0(regime3.name, ": Impact on ", var.name.full[var.num]), font=2, cex=4) + par(fig=c(title2.xpos, title2.xpos + title2.width, 0.223, 0.227), new=TRUE) + mtext(paste0(regime4.name, ": Impact on ", var.name.full[var.num]), font=2, cex=4) + + # Legend: + legend2.xpos <- 0.48 + legend2.width <- 0.25 + legend2.cex <- 1.8 + + my.subset2 <- match(values.to.plot2, my.brks.var) + par(fig=c(legend2.xpos, legend2.xpos + legend2.width, 0.719 + 0.001, 0.744), new=TRUE) + ColorBar3(my.brks.var, cols=my.cols.var, vert=FALSE, cex=legend2.cex, subset=my.subset2) + mtext(side=4,paste0(" ",var.unit[var.num]), cex=legend2.cex) + par(fig=c(legend2.xpos, legend2.xpos + legend2.width, 0.485, 0.508), new=TRUE) + ColorBar3(my.brks.var, cols=my.cols.var, vert=FALSE, cex=legend2.cex, subset=my.subset2) + mtext(side=4,paste0(" ",var.unit[var.num]), cex=legend2.cex) + par(fig=c(legend2.xpos, legend2.xpos + legend2.width, 0.250, 0.273), new=TRUE) + ColorBar3(my.brks.var, cols=my.cols.var, vert=FALSE, cex=legend2.cex, subset=my.subset2) + mtext(side=4,paste0(" ",var.unit[var.num]), cex=legend2.cex) + par(fig=c(legend2.xpos, legend2.xpos + legend2.width, 0.015, 0.038), new=TRUE) + ColorBar3(my.brks.var, cols=my.cols.var, vert=FALSE, cex=legend2.cex, subset=my.subset2) + mtext(side=4,paste0(" ",var.unit[var.num]), cex=legend2.cex) + + } + + # Frequency plots: + barplot.xpos <- 0.75 + barplot.width <- 0.25 + + par(fig=c(barplot.xpos, barplot.xpos + barplot.width, 0.745, 0.915), new=TRUE) + if(fields.name == rean.name) barplot.freq(100*fre1.NA, year.start, year.end, cex.y=2, cex.x=2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0)) + if(fields.name == forecast.name) barplot.freq.sim2(100*fre1.NA, 100*freMax1.NA, 100*freMin1.NA, 100*freObs1, year.start, year.end, cex.y=2, cex.x=2, freq.min=-2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0), col.line="gray20", cex.r=3, cex.mean=2, cex.obs=2) + + par(fig=c(barplot.xpos, barplot.xpos + barplot.width,0.510,0.680), new=TRUE) + if(fields.name == rean.name) barplot.freq(100*fre2.NA, year.start, year.end, cex.y=2, cex.x=2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0)) + if(fields.name == forecast.name) barplot.freq.sim2(100*fre2.NA, 100*freMax2.NA, 100*freMin2.NA, 100*freObs2, year.start, year.end, cex.y=2, cex.x=2, freq.min=-2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0), col.line="gray20", cex.r=3, cex.mean=2, cex.obs=2) + + par(fig=c(barplot.xpos, barplot.xpos + barplot.width,0.275,0.445), new=TRUE) + if(fields.name == rean.name) barplot.freq(100*fre3.NA, year.start, year.end, cex.y=2, cex.x=2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0)) + if(fields.name == forecast.name) barplot.freq.sim2(100*fre3.NA, 100*freMax3.NA, 100*freMin3.NA, 100*freObs3, year.start, year.end, cex.y=2, cex.x=2, freq.min=-2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0), col.line="gray20", cex.r=3, cex.mean=2, cex.obs=2) + + par(fig=c(barplot.xpos, barplot.xpos + barplot.width,0.040,0.210), new=TRUE) + if(fields.name == rean.name) barplot.freq(100*fre4.NA, year.start, year.end, cex.y=2, cex.x=2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0)) + if(fields.name == forecast.name) barplot.freq.sim2(100*fre4.NA, 100*freMax4.NA, 100*freMin4.NA, 100*freObs4, year.start, year.end, cex.y=2, cex.x=2, freq.min=-2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0), col.line="gray20", cex.r=3, cex.mean=2, cex.obs=2) + + # Title Frequency maps: + title3.xpos <- 0.75 + title3.width <-0.25 + par(fig=c(title3.xpos, title3.xpos + title3.width, 0.928, 0.933), new=TRUE) + mtext(paste0(regime1.name, " Frequency"), font=2, cex=4) # paste0 doesn't work inside an expression! + par(fig=c(title3.xpos, title3.xpos + title3.width, 0.693, 0.698), new=TRUE) + mtext(paste0(regime2.name, " Frequency"), font=2, cex=4) + par(fig=c(title3.xpos, title3.xpos + title3.width, 0.458, 0.463), new=TRUE) + mtext(paste0(regime3.name, " Frequency"), font=2, cex=4) + par(fig=c(title3.xpos, title3.xpos + title3.width, 0.223, 0.228), new=TRUE) + mtext(paste0(regime4.name, " Frequency"), font=2, cex=4) + + # % on Frequency plots: + symbol.xpos <- 0.735 + symbol.width <- 0.01 + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.92, 0.93), new=TRUE) + mtext("%", cex=3.3) + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.59, 0.70), new=TRUE) + mtext("%", cex=3.3) + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.45, 0.46), new=TRUE) + mtext("%", cex=3.3) + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.22, 0.23), new=TRUE) + mtext("%", cex=3.3) + + # mean frequency on Frequency plots: + if(mean.freq == TRUE){ + symbol.xpos <- 0.9 + symbol.width <- 0.1 + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.908, 0.918), new=TRUE) + mtext(bquote(bar(nu) == .(paste0(round(mean(100*fre1.NA),1),"%"))),cex=4.5) + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.673, 0.683), new=TRUE) + mtext(bquote(bar(nu) == .(paste0(round(mean(100*fre2.NA),1),"%"))),cex=4.5) + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.44, 0.45), new=TRUE) + mtext(bquote(bar(nu) == .(paste0(round(mean(100*fre3.NA),1),"%"))),cex=4.5) + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.203, 0.213), new=TRUE) + mtext(bquote(bar(nu) == .(paste0(round(mean(100*fre4.NA),1),"%"))),cex=4.5) + } + + # add corr between obs.time series and ensemble mean time series on Frequency plots: + if(fields.name == forecast.name){ + symbol.xpos <- 0.76 + symbol.width <- 0.1 + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.908, 0.918), new=TRUE) + sp.cor1 <- round(cor(fre1.NA,freObs1, use="complete.obs"),2) + mtext(paste0("r= ",sp.cor1), cex=4.5) + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.673, 0.683), new=TRUE) + sp.cor2 <- round(cor(fre2.NA,freObs2, use="complete.obs"),2) + mtext(paste0("r= ",sp.cor2), cex=4.5) + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.44, 0.45), new=TRUE) + sp.cor3 <- round(cor(fre3.NA,freObs3, use="complete.obs"),2) + mtext(paste0("r= ",sp.cor3), cex=4.5) + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.203, 0.213), new=TRUE) + sp.cor4 <- round(cor(fre4.NA,freObs4, use="complete.obs"),2) + mtext(paste0("r= ",sp.cor4), cex=4.5) + } + + if(!as.pdf) dev.off() # for saving 4 png + + } # close if on: composition == 'simple' + + + if(composition == "edpr"){ + + # adjust color legends to include triangles to the extremities increasing by two the number of intervals: + my.cols <- colorRampPalette(rev(brewer.pal(11,"RdBu")))(length(my.brks)+1) # blue--white--red colors + my.cols.var <- colorRampPalette(rev(brewer.pal(11,"RdBu")))(length(my.brks.var)+1) # blue--white--red colors + + fileoutput <- paste0(rean.dir,"/",fields.name,"_",my.period[p],"_",var.name[var.num],"_edpr.png") + + png(filename=fileoutput,width=3000,height=3700) + + plot.new() + + ## Centroid maps: + map.xpos <- 0.27 + map.width <- 0.46 + par(fig=c(map.xpos + 0.003, map.xpos + map.width - 0.003, 0.745, 0.925), new=TRUE) + PlotEquiMap2(rescale(map1,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols[2:(length(my.cols)-1)], sizetit=1.2, contours=t(map1), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, xlabel.dist=1.5, contours.lty="F1FF1F", continents.col="black") + par(fig=c(map.xpos, map.xpos + map.width, 0.51, 0.69), new=TRUE) + PlotEquiMap2(rescale(map2,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols[2:(length(my.cols)-1)], sizetit=1.2, contours=t(map2), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F", continents.col="black") + par(fig=c(map.xpos, map.xpos + map.width, 0.275, 0.455), new=TRUE) + PlotEquiMap2(rescale(map3,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols[2:(length(my.cols)-1)], sizetit=1.2, contours=t(map3), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F", continents.col="black") + par(fig=c(map.xpos, map.xpos + map.width, 0.04, 0.22), new=TRUE) + PlotEquiMap2(rescale(map4,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols[2:(length(my.cols)-1)], sizetit=1.2, contours=t(map4), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F", continents.col="black") + + ## # for the debug: + ## obs <- read.table("/scratch/Earth/ncortesi/RESILIENCE/Regimes/NAO_series.txt") + ## mes <- p + ## fre.obs <- obs$V3[seq(mes,12*35,12)] # get the ovserved freuency of the NAO + ## #plot(1981:2015,fre.obs,type="l") + ## #lines(1981:2015,fre1,type="l",col="red") + ## #lines(1981:2015,fre2,type="l",col="blue") + ## #lines(1981:2015,fre4,type="l",col="green") + ## cor1 <- cor(fre.obs, fre1) + ## cor2 <- cor(fre.obs, fre2) + ## cor3 <- cor(fre.obs, fre3) + ## cor4 <- cor(fre.obs, fre4) + ## round(c(cor1,cor2,cor3,cor4),2) + + # Legend centroid maps: + legend1.xpos <- 0.30 + legend1.width <- 0.40 + legend1.cex <- 2 + my.subset <- match(values.to.plot, my.brks) + par(fig=c(legend1.xpos, legend1.xpos + legend1.width, 0.719, 0.744), new=TRUE) # syntax fig=c(xmin, xmax, ymin, ymax) + ColorBar3(my.brks, cols=my.cols[2:(length(my.cols)-1)], vert=FALSE, cex=legend1.cex, subset=my.subset) + if(psl=="g500") {mtext(side=4," m", cex=2.5)} else {mtext(side=4," hPa", cex=legend1.cex)} + par(fig=c(legend1.xpos, legend1.xpos + legend1.width, 0.485, 0.508), new=TRUE) + ColorBar3(my.brks, cols=my.cols[2:(length(my.cols)-1)], vert=FALSE, cex=legend1.cex, subset=my.subset) + if(psl=="g500") {mtext(side=4," m", cex=2.5)} else {mtext(side=4," hPa", cex=legend1.cex)} + par(fig=c(legend1.xpos, legend1.xpos + legend1.width, 0.250, 0.273), new=TRUE) + ColorBar3(my.brks, cols=my.cols[2:(length(my.cols)-1)], vert=FALSE, cex=legend1.cex, subset=my.subset) + if(psl=="g500") {mtext(side=4," m", cex=2.5)} else {mtext(side=4," hPa", cex=legend1.cex)} + par(fig=c(legend1.xpos, legend1.xpos + legend1.width, 0.015, 0.038), new=TRUE) + ColorBar3(my.brks, cols=my.cols[2:(length(my.cols)-1)], vert=FALSE, cex=legend1.cex, subset=my.subset) + if(psl=="g500") {mtext(side=4," m", cex=2.5)} else {mtext(side=4," hPa", cex=legend1.cex)} + + # Subtitle centroid maps: + map.title.xpos <- 0.76 + map.title.width <- 0.2 + par(fig=c(map.title.xpos, map.title.xpos + map.title.width, 0.728, 0.729), new=TRUE) + mtext("year", cex=3) + par(fig=c(map.title.xpos, map.title.xpos + map.title.width, 0.494, 0.495), new=TRUE) + mtext("year", cex=3) + par(fig=c(map.title.xpos, map.title.xpos + map.title.width, 0.260, 0.261), new=TRUE) + mtext("year", cex=3) + par(fig=c(map.title.xpos, map.title.xpos + map.title.width, 0.026, 0.027), new=TRUE) + mtext("year", cex=3) + + # Title Centroid Maps: + title1.xpos <- 0.3 + title1.width <- 0.44 + par(fig=c(title1.xpos, title1.xpos + title1.width, 0.9305, 0.9355), new=TRUE) + mtext(paste0(regime1.name," ", psl.name, " anomaly"), font=2, cex=4) + par(fig=c(title1.xpos, title1.xpos + title1.width, 0.6955, 0.7005), new=TRUE) + mtext(paste0(regime2.name," ", psl.name, " anomaly"), font=2, cex=4) + par(fig=c(title1.xpos, title1.xpos + title1.width, 0.4605, 0.4655), new=TRUE) + mtext(paste0(regime3.name," ", psl.name, " anomaly"), font=2, cex=4) + par(fig=c(title1.xpos, title1.xpos + title1.width, 0.2255, 0.2305), new=TRUE) + mtext(paste0(regime4.name," ", psl.name, " anomaly"), font=2, cex=4) + + # Impact maps: + if(impact.data == TRUE ){ + impact.xpos <- 0 + impact.width <- 0.26 + par(fig=c(impact.xpos, impact.xpos + impact.width, 0.745, 0.925), new=TRUE) + PlotEquiMap2(rescale(imp1[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var[2:(length(my.cols.var)-1)], intxlon=10, intylat=10, drawleg=F, dots=t(sig1[,EU] < 0.05), cex.lab=1.5, continents.col="black") + par(fig=c(impact.xpos, impact.xpos + impact.width, 0.51, 0.69), new=TRUE) + PlotEquiMap2(rescale(imp2[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var[2:(length(my.cols.var)-1)], intxlon=10, intylat=10, drawleg=F, dots=t(sig2[,EU] < 0.05), cex.lab=1.5, continents.col="black") + par(fig=c(impact.xpos, impact.xpos + impact.width, 0.275, 0.455), new=TRUE) + PlotEquiMap2(rescale(imp3[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var[2:(length(my.cols.var)-1)], intxlon=10, intylat=10, drawleg=F, dots=t(sig3[,EU] < 0.05), cex.lab=1.5, continents.col="black") + par(fig=c(impact.xpos, impact.xpos + impact.width, 0.04, 0.22), new=TRUE) + PlotEquiMap2(rescale(imp4[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var[2:(length(my.cols.var)-1)], intxlon=10, intylat=10, drawleg=F, dots=t(sig4[,EU] < 0.05), cex.lab=1.5, continents.col="black") + + + # Title impact maps: + title2.xpos <- 0 + title2.width <- 0.26 + par(fig=c(title2.xpos, title2.xpos + title2.width, 0.928, 0.933), new=TRUE) + mtext(paste0(regime1.name, " impact on ", var.name.full[var.num]), font=2, cex=4) + par(fig=c(title2.xpos, title2.xpos + title2.width, 0.693, 0.698), new=TRUE) + mtext(paste0(regime2.name, " impact on ", var.name.full[var.num]), font=2, cex=4) + par(fig=c(title2.xpos, title2.xpos + title2.width, 0.458, 0.463), new=TRUE) + mtext(paste0(regime3.name, " impact on ", var.name.full[var.num]), font=2, cex=4) + par(fig=c(title2.xpos, title2.xpos + title2.width, 0.223, 0.227), new=TRUE) + mtext(paste0(regime4.name, " impact on ", var.name.full[var.num]), font=2, cex=4) + + # Legend: + legend2.xpos <- 0.01 + legend2.width <- 0.25 + legend2.cex <- 1.8 + + my.subset2 <- match(values.to.plot2, my.brks.var) + par(fig=c(legend2.xpos, legend2.xpos + legend2.width, 0.719 + 0.001, 0.744), new=TRUE) + ColorBar3(my.brks.var, cols=my.cols.var[2:(length(my.cols.var)-1)], vert=FALSE, cex=legend2.cex, subset=my.subset2) + mtext(side=4,paste0(" ",var.unit[var.num]), cex=legend2.cex) + par(fig=c(legend2.xpos, legend2.xpos + legend2.width, 0.485, 0.508), new=TRUE) + ColorBar3(my.brks.var, cols=my.cols.var[2:(length(my.cols.var)-1)], vert=FALSE, cex=legend2.cex, subset=my.subset2) + mtext(side=4,paste0(" ",var.unit[var.num]), cex=legend2.cex) + par(fig=c(legend2.xpos, legend2.xpos + legend2.width, 0.250, 0.273), new=TRUE) + ColorBar3(my.brks.var, cols=my.cols.var[2:(length(my.cols.var)-1)], vert=FALSE, cex=legend2.cex, subset=my.subset2) + mtext(side=4,paste0(" ",var.unit[var.num]), cex=legend2.cex) + par(fig=c(legend2.xpos, legend2.xpos + legend2.width, 0.015, 0.038), new=TRUE) + ColorBar3(my.brks.var, cols=my.cols.var[2:(length(my.cols.var)-1)], vert=FALSE, cex=legend2.cex, subset=my.subset2) + mtext(side=4,paste0(" ",var.unit[var.num]), cex=legend2.cex) + + } + + # Frequency plots: + barplot.xpos <- 0.75 + barplot.width <- 0.25 + + par(fig=c(barplot.xpos, barplot.xpos + barplot.width, 0.745, 0.915), new=TRUE) + if(fields.name == rean.name) barplot.freq(100*fre1.NA, year.start, year.end, cex.y=2, cex.x=2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0)) + if(fields.name == forecast.name) barplot.freq.sim2(100*fre1.NA, 100*freMax1.NA, 100*freMin1.NA, 100*freObs1, year.start, year.end, cex.y=2, cex.x=2, freq.min=-2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0), col.line="gray20", cex.r=3, cex.mean=2, cex.obs=2) + + par(fig=c(barplot.xpos, barplot.xpos + barplot.width,0.510,0.680), new=TRUE) + if(fields.name == rean.name) barplot.freq(100*fre2.NA, year.start, year.end, cex.y=2, cex.x=2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0)) + if(fields.name == forecast.name) barplot.freq.sim2(100*fre2.NA, 100*freMax2.NA, 100*freMin2.NA, 100*freObs2, year.start, year.end, cex.y=2, cex.x=2, freq.min=-2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0), col.line="gray20", cex.r=3, cex.mean=2, cex.obs=2) + + par(fig=c(barplot.xpos, barplot.xpos + barplot.width,0.275,0.445), new=TRUE) + if(fields.name == rean.name) barplot.freq(100*fre3.NA, year.start, year.end, cex.y=2, cex.x=2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0)) + if(fields.name == forecast.name) barplot.freq.sim2(100*fre3.NA, 100*freMax3.NA, 100*freMin3.NA, 100*freObs3, year.start, year.end, cex.y=2, cex.x=2, freq.min=-2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0), col.line="gray20", cex.r=3, cex.mean=2, cex.obs=2) + + par(fig=c(barplot.xpos, barplot.xpos + barplot.width,0.040,0.210), new=TRUE) + if(fields.name == rean.name) barplot.freq(100*fre4.NA, year.start, year.end, cex.y=2, cex.x=2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0)) + if(fields.name == forecast.name) barplot.freq.sim2(100*fre4.NA, 100*freMax4.NA, 100*freMin4.NA, 100*freObs4, year.start, year.end, cex.y=2, cex.x=2, freq.min=-2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0), col.line="gray20", cex.r=3, cex.mean=2, cex.obs=2) + + # Title Frequency maps: + title3.xpos <- 0.75 + title3.width <-0.25 + par(fig=c(title3.xpos, title3.xpos + title3.width, 0.928, 0.933), new=TRUE) + mtext(paste0(regime1.name, " Frequency"), font=2, cex=4) # paste0 doesn't work inside an expression! + par(fig=c(title3.xpos, title3.xpos + title3.width, 0.693, 0.698), new=TRUE) + mtext(paste0(regime2.name, " Frequency"), font=2, cex=4) + par(fig=c(title3.xpos, title3.xpos + title3.width, 0.458, 0.463), new=TRUE) + mtext(paste0(regime3.name, " Frequency"), font=2, cex=4) + par(fig=c(title3.xpos, title3.xpos + title3.width, 0.223, 0.228), new=TRUE) + mtext(paste0(regime4.name, " Frequency"), font=2, cex=4) + + # % on Frequency plots: + symbol.xpos <- 0.735 + symbol.width <- 0.01 + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.91, 0.92), new=TRUE) + mtext("%", cex=3.3) + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.68, 0.69), new=TRUE) + mtext("%", cex=3.3) + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.44, 0.45), new=TRUE) + mtext("%", cex=3.3) + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.21, 0.22), new=TRUE) + mtext("%", cex=3.3) + + # mean frequency on Frequency plots: + if(mean.freq == TRUE){ + symbol.xpos <- 0.83 + symbol.width <- 0.1 + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.908, 0.918), new=TRUE) + mtext(bquote(bar(nu) == .(paste0(round(mean(100*fre1.NA),1),"%"))),cex=3.5) + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.673, 0.683), new=TRUE) + mtext(bquote(bar(nu) == .(paste0(round(mean(100*fre2.NA),1),"%"))),cex=3.5) + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.44, 0.45), new=TRUE) + mtext(bquote(bar(nu) == .(paste0(round(mean(100*fre3.NA),1),"%"))),cex=3.5) + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.203, 0.213), new=TRUE) + mtext(bquote(bar(nu) == .(paste0(round(mean(100*fre4.NA),1),"%"))),cex=3.5) + } + + + if(!as.pdf) dev.off() # for saving 4 png + + # same image formatted for the catalogue: + fileoutput2 <- paste0(rean.dir,"/",fields.name,"_",my.period[p],"_",var.name[var.num],"_edpr_fig2cat.png") + + system(paste0("~/scripts/fig2catalog.sh -s 0.8 -r 150 -t '",rean.name," / 10m wind speed and sea level pressure / Monthly anomalies and frequencies \n",month.name[p]," / ",year.start,"-",year.end,"' -c 'Region: North Atlantic (27°N-81°N, 85.5°W-45°E)\nReference dataset: ",rean.name," reanalysis' ", fileoutput," ", fileoutput2)) + + + + } # close if on: composition == 'edpr' + + + # Visualize the composition with the regime anomalies for a selected forecasted month: + if(composition == "psl"){ + # Centroid maps: + n.map <- n.map + 1 # n.maps starts from 0 + map.width <- 0.115 + map.xpos <- 0.06 + map.width * (n.map - 1) + map.ypos <- 0.08 + map.height <- 0.19 + map.ysep <- 0.015 + + par(fig=c(map.xpos, map.xpos + map.width, map.ypos + 3*map.height + 3*map.ysep, map.ypos + 4*map.height + 3*map.ysep), new=TRUE) + PlotEquiMap2(rescale(map1,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map1), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, xlabel.dist=1.5, contours.lty="F1FF1F") + par(fig=c(map.xpos, map.xpos + map.width, map.ypos + 2*map.height + 2*map.ysep, map.ypos + 3*map.height + 2*map.ysep), new=TRUE) + PlotEquiMap2(rescale(map2,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map2), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F") + par(fig=c(map.xpos, map.xpos + map.width, map.ypos + map.height + map.ysep, map.ypos + 2*map.height + map.ysep), new=TRUE) + PlotEquiMap2(rescale(map3,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map3), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F") + par(fig=c(map.xpos, map.xpos + map.width, map.ypos, map.ypos + map.height), new=TRUE) + PlotEquiMap2(rescale(map4,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map4), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F") + + # Sheet subtitles: + par(fig=c(map.xpos, map.xpos + map.width, 0.86, 0.90), new=TRUE) + if(n.map < 8) { + mtext(paste0(my.month.short[p], " --> ", my.month.short[forecast.month]), cex=5.5, font=2) + } else{ + mtext(paste0(rean.name," ", my.month.short[forecast.month]), cex=5.5, font=2) + } + + } # close if on composition == 'psl' + + + if(composition == "psl.rean"){ + + # Centroid maps: + n.map <- n.map + 1 # n.maps starts from 0 + map.width <- 0.08 + map.xpos <- 0 + map.width * (n.map - 1) + map.ypos <- 0.08 + map.height <- 0.19 + map.ysep <- 0.015 + + par(fig=c(map.xpos, map.xpos + map.width, map.ypos + 3*map.height + 3*map.ysep, map.ypos + 4*map.height + 3*map.ysep), new=TRUE) + PlotEquiMap2(rescale(map1,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map1), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, xlabel.dist=1.5, contours.lty="F1FF1F") + par(fig=c(map.xpos, map.xpos + map.width, map.ypos + 2*map.height + 2*map.ysep, map.ypos + 3*map.height + 2*map.ysep), new=TRUE) + PlotEquiMap2(rescale(map2,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map2), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F") + par(fig=c(map.xpos, map.xpos + map.width, map.ypos + map.height + map.ysep, map.ypos + 2*map.height + map.ysep), new=TRUE) + PlotEquiMap2(rescale(map3,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map3), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F") + par(fig=c(map.xpos, map.xpos + map.width, map.ypos, map.ypos + map.height), new=TRUE) + PlotEquiMap2(rescale(map4,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map4), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F") + + # Sheet subtitles: + #par(fig=c(map.xpos, map.xpos + map.width, 0.86, 0.90), new=TRUE) + #if(n.map < 8) { + # mtext(paste0(my.month.short[p], " --> ", my.month.short[forecast.month]), cex=5.5, font=2) + #} else{ + # mtext(paste0(rean.name," ", my.month.short[forecast.month]), cex=5.5, font=2) + #} + + } # close if on composition == 'psl.rean' + + + if(composition == "psl.rean.unordered"){ + + + # Centroid maps: + n.map <- n.map + 1 # n.maps starts from 0 + map.width <- 0.08 + map.xpos <- 0 + map.width * (n.map - 1) + map.ypos <- 0.08 + map.height <- 0.19 + map.ysep <- 0.015 + + par(fig=c(map.xpos, map.xpos + map.width, map.ypos + 3*map.height + 3*map.ysep, map.ypos + 4*map.height + 3*map.ysep), new=TRUE) + PlotEquiMap2(rescale(map1,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map1), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, xlabel.dist=1.5, contours.lty="F1FF1F") + par(fig=c(map.xpos, map.xpos + map.width, map.ypos + 2*map.height + 2*map.ysep, map.ypos + 3*map.height + 2*map.ysep), new=TRUE) + PlotEquiMap2(rescale(map2,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map2), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F") + par(fig=c(map.xpos, map.xpos + map.width, map.ypos + map.height + map.ysep, map.ypos + 2*map.height + map.ysep), new=TRUE) + PlotEquiMap2(rescale(map3,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map3), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F") + par(fig=c(map.xpos, map.xpos + map.width, map.ypos, map.ypos + map.height), new=TRUE) + PlotEquiMap2(rescale(map4,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map4), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F") + + + + } # close if on psl.rean.unordered + + + if(composition == "corr.matrix"){ + + # matrix correlation with DJF regimes: + cluster1.monthly <- pslwr1mean; cluster2.monthly <- pslwr2mean; cluster3.monthly <- pslwr3mean; cluster4.monthly <- pslwr4mean + + rean.dir.DJF <- "/scratch/Earth/ncortesi/RESILIENCE/Regimes/53_JRA55_seasonal_1981-2016_LOESS_filter_lat_corr" + + load(file=paste0(rean.dir.DJF,"/",rean.name,"_",my.period[13],"_","psl",".RData")) # Load mean slp DJF data from the same reanalysis + load(paste0(rean.dir.DJF,"/",rean.name,"_", my.period[13],"_","ClusterNames",".RData")) # load also reanalysis DJF regime names + + if(var.num != var.num.orig) var.num <- var.num.orig # ripristinate original values of this script (necessary after loading regime data) + if(fields.name != fields.name.orig) fields.name <- fields.name.orig # ripristinate original values of this script + if(work.dir != work.dir.orig) work.dir <- work.dir.orig # ripristinate original values of this script + if(p.orig != p) p <- p.orig + + cluster1 <- which(orden == cluster1.name) + cluster2 <- which(orden == cluster2.name) + cluster3 <- which(orden == cluster3.name) + cluster4 <- which(orden == cluster4.name) + + assign(paste0("psl.ordered",cluster1), pslwr1mean) + assign(paste0("psl.ordered",cluster2), pslwr2mean) + assign(paste0("psl.ordered",cluster3), pslwr3mean) + assign(paste0("psl.ordered",cluster4), pslwr4mean) + + + cluster.corr <- array(NA, c(4,4), dimnames=list(c("cl1","cl2","cl3","cl4"), orden)) + cluster.corr[1,1] <- cor(as.vector(cluster1.monthly), as.vector(psl.ordered1)) + cluster.corr[1,2] <- cor(as.vector(cluster1.monthly), as.vector(psl.ordered2)) + cluster.corr[1,3] <- cor(as.vector(cluster1.monthly), as.vector(psl.ordered3)) + cluster.corr[1,4] <- cor(as.vector(cluster1.monthly), as.vector(psl.ordered4)) + cluster.corr[2,1] <- cor(as.vector(cluster2.monthly), as.vector(psl.ordered1)) + cluster.corr[2,2] <- cor(as.vector(cluster2.monthly), as.vector(psl.ordered2)) + cluster.corr[2,3] <- cor(as.vector(cluster2.monthly), as.vector(psl.ordered3)) + cluster.corr[2,4] <- cor(as.vector(cluster2.monthly), as.vector(psl.ordered4)) + cluster.corr[3,1] <- cor(as.vector(cluster3.monthly), as.vector(psl.ordered1)) + cluster.corr[3,2] <- cor(as.vector(cluster3.monthly), as.vector(psl.ordered2)) + cluster.corr[3,3] <- cor(as.vector(cluster3.monthly), as.vector(psl.ordered3)) + cluster.corr[3,4] <- cor(as.vector(cluster3.monthly), as.vector(psl.ordered4)) + cluster.corr[4,1] <- cor(as.vector(cluster4.monthly), as.vector(psl.ordered1)) + cluster.corr[4,2] <- cor(as.vector(cluster4.monthly), as.vector(psl.ordered2)) + cluster.corr[4,3] <- cor(as.vector(cluster4.monthly), as.vector(psl.ordered3)) + cluster.corr[4,4] <- cor(as.vector(cluster4.monthly), as.vector(psl.ordered4)) + + #cluster.corr2 <- t(cluster.corr) + + # Centroid maps: + n.map <- n.map + 1 # n.maps starts from 0 + map.width <- 0.08 + map.xpos <- 0 + map.width * (n.map - 1) + map.ypos <- 0.08 + map.height <- 0.19 + map.ysep <- 0.015 + print(paste0("map.xpos= ", map.xpos)) + + par(fig=c(0, 1, 0, 1), new=TRUE) + text.cex <- 2 + text.ypos <- 1.03 + text(x=map.xpos - 0.015, y=text.ypos - 0.02, labels="cl1", cex=text.cex) + text(x=map.xpos - 0.015, y=text.ypos - 0.04, labels="cl2", cex=text.cex) + text(x=map.xpos - 0.015, y=text.ypos - 0.06, labels="cl3", cex=text.cex) + text(x=map.xpos - 0.015, y=text.ypos - 0.08, labels="cl4", cex=text.cex) + text(x=map.xpos + 0.000, y=text.ypos + 0.00, labels="NAO+", cex=text.cex) + text(x=map.xpos + 0.015, y=text.ypos + 0.00, labels="NAO-", cex=text.cex) + text(x=map.xpos + 0.030, y=text.ypos + 0.00, labels="BLO", cex=text.cex) + text(x=map.xpos + 0.045, y=text.ypos + 0.00, labels="ATL", cex=text.cex) + + text(x=map.xpos + 0.00, y=text.ypos - 0.02, labels=round(cluster.corr,2)[1,1], cex=text.cex) + text(x=map.xpos + 0.00, y=text.ypos - 0.04, labels=round(cluster.corr,2)[2,1], cex=text.cex) + text(x=map.xpos + 0.00, y=text.ypos - 0.06, labels=round(cluster.corr,2)[3,1], cex=text.cex) + text(x=map.xpos + 0.00, y=text.ypos - 0.08, labels=round(cluster.corr,2)[4,1], cex=text.cex) + text(x=map.xpos + 0.015, y=text.ypos - 0.02, labels=round(cluster.corr,2)[1,2], cex=text.cex) + text(x=map.xpos + 0.015, y=text.ypos - 0.04, labels=round(cluster.corr,2)[2,2], cex=text.cex) + text(x=map.xpos + 0.015, y=text.ypos - 0.06, labels=round(cluster.corr,2)[3,2], cex=text.cex) + text(x=map.xpos + 0.015, y=text.ypos - 0.08, labels=round(cluster.corr,2)[4,2], cex=text.cex) + text(x=map.xpos + 0.03, y=text.ypos - 0.02, labels=round(cluster.corr,2)[1,3], cex=text.cex) + text(x=map.xpos + 0.03, y=text.ypos - 0.04, labels=round(cluster.corr,2)[2,3], cex=text.cex) + text(x=map.xpos + 0.03, y=text.ypos - 0.06, labels=round(cluster.corr,2)[3,3], cex=text.cex) + text(x=map.xpos + 0.03, y=text.ypos - 0.08, labels=round(cluster.corr,2)[4,3], cex=text.cex) + text(x=map.xpos + 0.045, y=text.ypos - 0.02, labels=round(cluster.corr,2)[1,4], cex=text.cex) + text(x=map.xpos + 0.045, y=text.ypos - 0.04, labels=round(cluster.corr,2)[2,4], cex=text.cex) + text(x=map.xpos + 0.045, y=text.ypos - 0.06, labels=round(cluster.corr,2)[3,4], cex=text.cex) + text(x=map.xpos + 0.045, y=text.ypos - 0.08, labels=round(cluster.corr,2)[4,4], cex=text.cex) + + } # close if on corr.matrix + + + # Visualize the composition if the impact maps for a selected forecasted month: + if(composition == "impact"){ + # Centroid maps: + n.map <- n.map + 1 # n.maps starts from 0 + map.width <- 0.115 + map.xpos <- 0.06 + map.width * (n.map - 1) + map.ypos <- 0.08 + map.height <- 0.19 + map.ysep <- 0.015 + + par(fig=c(map.xpos, map.xpos + map.width, map.ypos + 3*map.height + 3*map.ysep, map.ypos + 4*map.height + 3*map.ysep), new=TRUE) + #PlotEquiMap2(rescale(imp1[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=t(sig1[,EU] < 0.05), cex.lab=1.5) + PlotEquiMap2(rescale(imp1[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5) + + par(fig=c(map.xpos, map.xpos + map.width, map.ypos + 2*map.height + 2*map.ysep, map.ypos + 3*map.height + 2*map.ysep), new=TRUE) + #PlotEquiMap2(rescale(imp2[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=t(sig2[,EU] < 0.05), cex.lab=1.5) + PlotEquiMap2(rescale(imp2[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5) + + par(fig=c(map.xpos, map.xpos + map.width, map.ypos + map.height + map.ysep, map.ypos + 2*map.height + map.ysep), new=TRUE) + #PlotEquiMap2(rescale(imp3[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=t(sig3[,EU] < 0.05), cex.lab=1.5) + PlotEquiMap2(rescale(imp3[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5) + + par(fig=c(map.xpos, map.xpos + map.width, map.ypos, map.ypos + map.height), new=TRUE) + #PlotEquiMap2(rescale(imp4[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=t(sig4[,EU] < 0.05), cex.lab=1.5) + PlotEquiMap2(rescale(imp4[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5) + + + # Sheet subtitles: + par(fig=c(map.xpos, map.xpos + map.width, 0.86, 0.90), new=TRUE) + if(n.map < 8) { + mtext(paste0(my.month.short[p], " --> ", my.month.short[forecast.month]), cex=5.5, font=2) + } else{ + mtext(paste0(rean.name," ", my.month.short[forecast.month]), cex=5.5, font=2) + } + + } # close if on composition == 'impact' + + # Visualize the 2x2 composition of the four impact maps for a selected reanalysis or forecasted month: + if(composition == "single.impact"){ + + png(filename=paste0(rean.dir,"/",fields.name,"_",my.period[p],"_",var.name[var.num],"_impact_composition.png"),width=2000,height=2000) + + plot.new() + + par(fig=c(0, 0.5, 0.95, 0.988), new=TRUE) + mtext("NAO+",cex=5) + par(fig=c(0, 0.5, 0.52, 0.95), new=TRUE) + PlotEquiMap(rescale(imp1[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=sig1[,EU] < 0.05, dot_size=2, axes_label_scale=2.5) + + par(fig=c(0.5, 1, 0.93, 0.96), new=TRUE) + mtext("NAO-",cex=5) + par(fig=c(0.5, 1, 0.52, 0.95), new=TRUE) + PlotEquiMap(rescale(imp2[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=sig2[,EU] < 0.05, dot_size=2, axes_label_scale=2.5) + + par(fig=c(0, 0.5, 0.44, 0.47), new=TRUE) + mtext("Blocking",cex=5) + par(fig=c(0, 0.5, 0.08, 0.46), new=TRUE) + PlotEquiMap(rescale(imp3[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=sig3[,EU] < 0.05, dot_size=2, axes_label_scale=2.5) + + par(fig=c(0.5, 1, 0.44, 0.47), new=TRUE) + mtext("Atlantic Ridge",cex=5) + par(fig=c(0.5, 1, 0.08, 0.46), new=TRUE) + PlotEquiMap(rescale(imp4[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=sig4[,EU] < 0.05, dot_size=2, axes_label_scale=2.5) + + par(fig=c(0.03, 0.96, 0.02, 0.08), new=TRUE) + ColorBar2(my.brks.var, cols=my.cols.var, vert=FALSE, cex=3, my.ticks=-0.5 + 1:length(my.brks.var), my.labels=my.brks.var, label.dist=3) + par(fig=c(0.965, 0.99, 0, 0.026), new=TRUE) + mtext("m/s",cex=3) + + dev.off() + + # format it manually from the bash shell (you must run it from your workstation until the identity command of ImageMagick will be loaded un the cluster): + #sh ~/scripts/fig2catalog.sh -x 20 -t 'NCEP / 10-m wind speed / Regime impact \nOctober / 1981-2016' -c 'Region: North Atlantic (27°N-81°N, 85.5°W-45°E)\nReference dataset: NCEP/NCAR reanalysis' NCEP_October_sfcWind_impact_composition.png NCEP_October_sfcWind_impact_composition_catalogue.png + + + ### to plot the impact map of all regimes on a particular month: + #imp.oct <- (imp1*3.2 + imp2*38.7 + imp3*51.6 + imp4*6.4)/100 + year.test <- 2016 + pos.year.test <- year.test - year.start +1 + imp.test <- imp1*fre1.NA[pos.year.test] + imp2*fre2.NA[pos.year.test] + imp3*fre3.NA[pos.year.test] + imp4*fre4.NA[pos.year.test] + #imp.test <- imp1*fre1.NA[pos.year.test] + imp3*(fre3.NA[pos.year.test]+0.032) + imp4*(fre4.NA[pos.year.test]+0.032) + par(fig=c(0, 1, 0.05, 1), new=TRUE) + PlotEquiMap2(rescale(imp.test[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5) + + # vector with the frequency of the WRs in the chosen month and year: + wt.test.freq <- c(fre1.NA[pos.year.test],fre2.NA[pos.year.test],fre3.NA[pos.year.test],fre4.NA[pos.year.test]) + + ## # or save them as individual maps: + ## png(filename=paste0(rean.dir,"/",fields.name,"_",my.period[p],"_",var.name[var.num],"_impact_NAO+.png"),width=3000,height=3700) + ## PlotEquiMap2(rescale(imp1[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=t(sig1[,EU] < 0.05), cex.lab=1.5) + ## dev.off() + + ## png(filename=paste0(rean.dir,"/",fields.name,"_",my.period[p],"_",var.name[var.num],"_impact_NAO-.png"),width=3000,height=3700) + ## PlotEquiMap2(rescale(imp2[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=t(sig2[,EU] < 0.05), cex.lab=1.5) + ## dev.off() + + ## png(filename=paste0(rean.dir,"/",fields.name,"_",my.period[p],"_",var.name[var.num],"_impact_Blocking.png"),width=3000,height=3700) + ## PlotEquiMap2(rescale(imp3[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=t(sig3[,EU] < 0.05), cex.lab=1.5) + ## dev.off() + + ## png(filename=paste0(rean.dir,"/",fields.name,"_",my.period[p],"_",var.name[var.num],"_impact_Atl_Ridge.png"),width=3000,height=3700) + ## PlotEquiMap2(rescale(imp4[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=t(sig4[,EU] < 0.05), cex.lab=1.5) + ## dev.off() + } + + # Visualize the composition if the impact maps for a selected forecasted month: + if(composition == "single.psl"){ + png(filename=paste0(rean.dir,"/",fields.name,"_",my.period[p],"_",var.name[var.num],"_pattern_cluster1.png"),width=2000,height=1400, res=300) + PlotEquiMap2(rescale(map1,my.brks[1],tail(my.brks,1)), lon, lat,filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1, contours=t(map1), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=0.5, xlabel.dist=0, contours.lty="F1FF1F", toptitle=paste0(my.period[p]," WithinSS=", round(withinss1/1000000,2))) + dev.off() + + png(filename=paste0(rean.dir,"/",fields.name,"_",my.period[p],"_",var.name[var.num],"_pattern_cluster2.png"),width=2000,height=1400, res=300) + PlotEquiMap2(rescale(map2,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1, contours=t(map2), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=0.5, xlabel.dist=0, contours.lty="F1FF1F", toptitle=paste0(my.period[p]," WithinSS=", round(withinss2/1000000,2))) + dev.off() + + png(filename=paste0(rean.dir,"/",fields.name,"_",my.period[p],"_",var.name[var.num],"_pattern_cluster3.png"),width=2000,height=1400, res=300) + PlotEquiMap2(rescale(map3,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1, contours=t(map3), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=0.5, xlabel.dist=0, contours.lty="F1FF1F", toptitle=paste0(my.period[p]," WithinSS=", round(withinss3/1000000,2))) + dev.off() + + png(filename=paste0(rean.dir,"/",fields.name,"_",my.period[p],"_",var.name[var.num],"_pattern_cluster4.png"),width=2000,height=1400, res=300) + PlotEquiMap2(rescale(map4,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1, contours=t(map4), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=0.5, xlabel.dist=0, contours.lty="F1FF1F", toptitle=paste0(my.period[p]," WithinSS=", round(withinss4/1000000,2))) + dev.off() + + # Legend centroid maps: + #my.subset <- match(values.to.plot, my.brks) + #ColorBar3(my.brks, cols=my.cols, vert=FALSE, cex=legend1.cex, subset=my.subset) + #mtext(side=4," hPa", cex=legend1.cex) + + } + + # Visualize the composition with the interannual frequencies for a selected forecasted month: + if(composition == "fre"){ + # Centroid maps: + n.map <- n.map + 1 # n.maps starts from 0 + map.width <- 0.115 + map.xpos <- 0.06 + map.width * (n.map - 1) + map.ypos <- 0.08 + map.height <- 0.19 + map.ysep <- 0.015 + + par(fig=c(map.xpos, map.xpos + map.width, map.ypos + 3*map.height + 3*map.ysep, map.ypos + 4*map.height + 3*map.ysep), new=TRUE) + if(n.map < 8) barplot.freq.sim2(100*fre1.NA, 100*freMax1.NA, 100*freMin1.NA, 100*freObs1, year.start, year.end, cex.y=2, cex.x=2, freq.min=-2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0), col.line="gray20", cex.r=3, cex.mean=2, cex.obs=2) + if(n.map == 8) barplot.freq(100*fre1.NA, year.start, year.end, cex.y=2, cex.x=2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0)) + + par(fig=c(map.xpos, map.xpos + map.width, map.ypos + 2*map.height + 2*map.ysep, map.ypos + 3*map.height + 2*map.ysep), new=TRUE) + if(n.map < 8) if(fields.name == forecast.name) barplot.freq.sim2(100*fre2.NA, 100*freMax2.NA, 100*freMin2.NA, 100*freObs2, year.start, year.end, cex.y=2, cex.x=2, freq.min=-2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0), col.line="gray20", cex.r=3, cex.mean=2, cex.obs=2) + if(n.map == 8) barplot.freq(100*fre2.NA, year.start, year.end, cex.y=2, cex.x=2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0)) + + par(fig=c(map.xpos, map.xpos + map.width, map.ypos + map.height + map.ysep, map.ypos + 2*map.height + map.ysep), new=TRUE) + if(n.map < 8) if(fields.name == forecast.name) barplot.freq.sim2(100*fre3.NA, 100*freMax3.NA, 100*freMin3.NA, 100*freObs3, year.start, year.end, cex.y=2, cex.x=2, freq.min=-2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0), col.line="gray20", cex.r=3, cex.mean=2, cex.obs=2) + if(n.map == 8) barplot.freq(100*fre3.NA, year.start, year.end, cex.y=2, cex.x=2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0)) + + par(fig=c(map.xpos, map.xpos + map.width, map.ypos, map.ypos + map.height), new=TRUE) + if(n.map < 8) if(fields.name == forecast.name) barplot.freq.sim2(100*fre4.NA, 100*freMax4.NA, 100*freMin4.NA, 100*freObs4, year.start, year.end, cex.y=2, cex.x=2, freq.min=-2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0), col.line="gray20", cex.r=3, cex.mean=2, cex.obs=2) + if(n.map == 8) barplot.freq(100*fre4.NA, year.start, year.end, cex.y=2, cex.x=2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0)) + + # Sheet subtitles: + par(fig=c(map.xpos, map.xpos + map.width, 0.86, 0.90), new=TRUE) + if(n.map < 8) { + mtext(paste0(my.month.short[p], " --> ", my.month.short[forecast.month]), cex=5.5, font=2) + } else{ + mtext(paste0(rean.name," ", my.month.short[forecast.month]), cex=5.5, font=2) + } + + # % on Frequency plots: + par(fig=c(map.xpos - 0.006, map.xpos, map.ypos + 3.9*map.height + 3*map.ysep, map.ypos + 4*map.height + 3*map.ysep), new=TRUE) + mtext("%", cex=3.3) + par(fig=c(map.xpos - 0.006, map.xpos, map.ypos + 2.9*map.height + 2*map.ysep, map.ypos + 3*map.height + 2*map.ysep), new=TRUE) + mtext("%", cex=3.3) + par(fig=c(map.xpos - 0.006, map.xpos, map.ypos + 1.9*map.height + 1*map.ysep, map.ypos + 2*map.height + 1*map.ysep), new=TRUE) + mtext("%", cex=3.3) + par(fig=c(map.xpos - 0.006, map.xpos, map.ypos + 0.9*map.height + 0*map.ysep, map.ypos + 1*map.height + 0*map.ysep), new=TRUE) + mtext("%", cex=3.3) + + # mean frequency on Frequency plots: + par(fig=c(map.xpos + 0.02, map.xpos + 0.04, map.ypos + 3*map.height + 3*map.ysep, map.ypos + 3.1*map.height + 3*map.ysep), new=TRUE) + mtext(bquote(bar(nu) == .(paste0(round(mean(100*fre1.NA),1),"%"))),cex=3.5) + par(fig=c(map.xpos + 0.02, map.xpos + 0.04, map.ypos + 2*map.height + 2*map.ysep, map.ypos + 2.1*map.height + 2*map.ysep), new=TRUE) + mtext(bquote(bar(nu) == .(paste0(round(mean(100*fre2.NA),1),"%"))),cex=3.5) + par(fig=c(map.xpos + 0.02, map.xpos + 0.04, map.ypos + 1*map.height + 1*map.ysep, map.ypos + 1.1*map.height + 1*map.ysep), new=TRUE) + mtext(bquote(bar(nu) == .(paste0(round(mean(100*fre3.NA),1),"%"))),cex=3.5) + par(fig=c(map.xpos + 0.02, map.xpos + 0.04, map.ypos + 0*map.height + 0*map.ysep, map.ypos + 0.1*map.height + 0*map.ysep), new=TRUE) + mtext(bquote(bar(nu) == .(paste0(round(mean(100*fre4.NA),1),"%"))),cex=3.5) + + # add corr between obs.time series and ensemble mean time series on Frequency plots: + if(n.map < 8){ + par(fig=c(map.xpos + map.width - 0.04, map.xpos + map.width - 0.02, map.ypos + 3*map.height + 3*map.ysep, map.ypos + 3.1*map.height + 3*map.ysep), new=TRUE) + mtext(paste0("r= ",sp.cor1), cex=3.5) + par(fig=c(map.xpos + map.width - 0.04, map.xpos + map.width - 0.02, map.ypos + 2*map.height + 2*map.ysep, map.ypos + 2.1*map.height + 2*map.ysep), new=TRUE) + mtext(paste0("r= ",sp.cor2), cex=3.5) + par(fig=c(map.xpos + map.width - 0.04, map.xpos + map.width - 0.02, map.ypos + 1*map.height + 1*map.ysep, map.ypos + 1.1*map.height + 1*map.ysep), new=TRUE) + mtext(paste0("r= ",sp.cor3), cex=3.5) + par(fig=c(map.xpos + map.width - 0.04, map.xpos + map.width - 0.02, map.ypos + 0*map.height + 0*map.ysep, map.ypos + 0.1*map.height + 0*map.ysep), new=TRUE) + mtext(paste0("r= ",sp.cor4), cex=3.5) + } + } # close if on composition == 'fre' + + # save indicators for each startdate and forecast time: + # (map1, map2, map3, map4, fre1, fre2, etc. already refer to the regimes in the same order as listed in the 'orden' vector) + if(fields.name == forecast.name) { + if (composition != "impact") { + save(orden, map1, map2, map3, map4, fre1.NA, fre2.NA, fre3.NA, fre4.NA, freObs1, freObs2, freObs3, freObs4, sp.cor1, sp.cor2, sp.cor3, sp.cor4, persist1, persist2, persist3, persist4, persistObs1, persistObs2, persistObs3, persistObs4, spat.cor1, spat.cor2, spat.cor3, spat.cor4, sd.freq.sim1, sd.freq.sim2, sd.freq.sim3, sd.freq.sim4, sd.freq.obs1, sd.freq.obs2, sd.freq.obs3, sd.freq.obs4, transSim1, transSim2, transSim3, transSim4, transObs1, transObs2, transObs3, transObs4, file=paste0(work.dir,"/",fields.name,"_", my.period[p],"_leadmonth_",lead.month,"_","ClusterNames",".RData")) + } else { # also save impX objects + save(orden, map1, map2, map3, map4, fre1.NA, fre2.NA, fre3.NA, fre4.NA, freObs1, freObs2, freObs3, freObs4, sp.cor1, sp.cor2, sp.cor3, sp.cor4, persist1, persist2, persist3, persist4, persistObs1, persistObs2, persistObs3, persistObs4, spat.cor1, spat.cor2, spat.cor3, spat.cor4, sd.freq.sim1, sd.freq.sim2, sd.freq.sim3, sd.freq.sim4, sd.freq.obs1, sd.freq.obs2, sd.freq.obs3, sd.freq.obs4, transSim1, transSim2, transSim3, transSim4, transObs1, transObs2, transObs3, transObs4, imp1, imp2, imp3, imp4,cor.imp1,cor.imp2,cor.imp3,cor.imp4, file=paste0(work.dir,"/",fields.name,"_", my.period[p],"_leadmonth_",lead.month,"_","ClusterNames",".RData")) + } + } + + } # close 'p' on 'period' + + if((composition == 'simple' || composition == 'edpr' ) && as.pdf) dev.off() # for saving a single pdf with all months/seasons + + } # close 'lead.month' on 'lead.months' + + if(composition == "psl" || composition == "fre" || composition == "impact"){ + # Regime's names: + title1.xpos <- 0.01 + title1.width <- 0.04 + par(fig=c(title1.xpos, title1.xpos + title1.width, 0.7905, 0.7955), new=TRUE) + mtext(paste0(regime1.name), font=2, cex=5) + par(fig=c(title1.xpos, title1.xpos + title1.width, 0.5855, 0.5905), new=TRUE) + mtext(paste0(regime2.name), font=2, cex=5) + par(fig=c(title1.xpos, title1.xpos + title1.width, 0.3805, 0.3955), new=TRUE) + mtext(paste0(regime3.name), font=2, cex=5) + par(fig=c(title1.xpos, title1.xpos + title1.width, 0.1755, 0.1805), new=TRUE) + mtext(paste0(regime4.name), font=2, cex=5) + + if(composition == "psl") { + # Color legend: + legend1.xpos <- 0.10 + legend1.width <- 0.80 + legend1.cex <- 4 + + par(fig=c(legend1.xpos, legend1.xpos + legend1.width, 0, 0.065), new=TRUE) # syntax fig=c(xmin, xmax, ymin, ymax) + ColorBar2(brks=my.brks, cols=my.cols, vert=FALSE, cex=legend1.cex, label.dist=2, my.ticks=-0.5 + 1:length(my.brks), my.labels=my.brks) + par(fig=c(legend1.xpos + legend1.width - 0.02, legend1.xpos + legend1.width + 0.05, 0, 0.02), new=TRUE) # syntax fig=c(xmin, xmax, ymin, ymax) + mtext("hPa", cex=legend1.cex*1.3) + } + + if(composition == "impact") { + # Color legend: + legend1.xpos <- 0.10 + legend1.width <- 0.80 + legend1.cex <- 4 + + #my.subset2 <- match(values.to.plot2, my.brks.var) + par(fig=c(legend1.xpos, legend1.xpos + legend1.width, 0, 0.065), new=TRUE) + ColorBar2(brks=my.brks.var, cols=my.cols.var, vert=FALSE, cex=legend1.cex, label.dist=2, my.ticks=-0.5 + 1:length(my.brks.var), my.labels=my.brks.var) + par(fig=c(legend1.xpos + legend1.width - 0.02, legend1.xpos + legend1.width + 0.05, 0, 0.02), new=TRUE) # syntax fig=c(xmin, xmax, ymin, ymax) + #ColorBar2(brks=my.brks.var, cols=my.cols.var, vert=FALSE, cex=legend2.cex, subset=my.subset2) + mtext(var.unit[var.num], cex=legend1.cex*1.3) + + } + + + dev.off() + + } # close if on composition + + + if(composition == "psl.rean" || composition == "psl.rean.unordered" || composition == "corr.matrix") dev.off() + + print("Finished!") +} # close if on composition != "summary" + + + +#} # close for on forecasts.month + + + +if(composition == "taylor"){ + library("plotrix") + + fields.name="ERA-Interim" + + # choose a reference season from 13 (winter) to 16 (Autumn): + season.ref <- 13 + + # set the first WR classification: + rean.dir <- "/scratch/Earth/ncortesi/RESILIENCE/Regimes/41_ERA-Interim_monthly_1981-2015_LOESS_filter" + + # load data of the reference season of the first classification (this line should be modified to load the chosen season): + #load("/scratch/Earth/ncortesi/RESILIENCE/Regimes/18) as 12) but with no lat correction/ERA-Interim_Winter_psl.RData") # load pslwrXmean data + #load("/scratch/Earth/ncortesi/RESILIENCE/Regimes/18) as 12) but with no lat correction/ERA-Interim_Winter_ClusterNames.RData") # load clusterX.name.period + load("/scratch/Earth/ncortesi/RESILIENCE/Regimes/46_as_18_but_with_no_lat_correction_and_with_LOESS/ERA-Interim_Winter_psl.RData") # load pslwrXmean data + load("/scratch/Earth/ncortesi/RESILIENCE/Regimes/46_as_18_but_with_no_lat_correction_and_with_LOESS/ERA-Interim_Winter_ClusterNames.RData") # load clusterX.name.period + + if(var.num != var.num.orig) var.num <- var.num.orig # ripristinate original values of this script (necessary after loading regime data) + if(fields.name != fields.name.orig) fields.name <- fields.name.orig # ripristinate original values of this script + + cluster1.ref <- which(orden == cluster1.name) + cluster2.ref <- which(orden == cluster2.name) + cluster3.ref <- which(orden == cluster3.name) + cluster4.ref <- which(orden == cluster4.name) + + #cluster1.ref <- cluster1.name.period[13] + #cluster2.ref <- cluster2.name.period[13] + #cluster3.ref <- cluster3.name.period[13] + #cluster4.ref <- cluster4.name.period[13] + + cluster1.ref <- 3 # bypass the previous values because for dataset num.46 the blocking and atlantic regimes were saved swapped! + cluster2.ref <- 2 + cluster3.ref <- 1 + cluster4.ref <- 4 + + assign(paste0("map.ref",cluster1.ref), pslwr1mean) # NAO+ + assign(paste0("map.ref",cluster2.ref), pslwr2mean) # NAO- + assign(paste0("map.ref",cluster3.ref), pslwr3mean) # Blocking + assign(paste0("map.ref",cluster4.ref), pslwr4mean) # Atl.ridge + + # set a second WR classification (i.e: with running cluster) to contrast with the new one: + #rean2.dir <- "/scratch/Earth/ncortesi/RESILIENCE/Regimes/41_ERA-Interim_monthly_1981-2015_LOESS_filter" + rean2.dir <- "/scratch/Earth/ncortesi/RESILIENCE/Regimes/44_as_43_but_with_no_lat_correction" + rean2.name <- "ERA-Interim" + + # load data of the reference season of the second classification (this line should be modified to load the chosen season): + load("/scratch/Earth/ncortesi/RESILIENCE/Regimes/46_as_18_but_with_no_lat_correction_and_with_LOESS/ERA-Interim_Winter_psl.RData") # load pslwrXmean data + load("/scratch/Earth/ncortesi/RESILIENCE/Regimes/46_as_18_but_with_no_lat_correction_and_with_LOESS/ERA-Interim_Winter_ClusterNames.RData") # load clusterX.name.period + + if(var.num != var.num.orig) var.num <- var.num.orig # ripristinate original values of this script (necessary after loading regime data) + if(fields.name != fields.name.orig) fields.name <- fields.name.orig # ripristinate original values of this script + + #cluster1.name.ref <- cluster1.name.period[season.ref] + #cluster2.name.ref <- cluster2.name.period[season.ref] + #cluster3.name.ref <- cluster3.name.period[season.ref] + #cluster4.name.ref <- cluster4.name.period[season.ref] + + cluster1.ref <- which(orden == cluster1.name) + cluster2.ref <- which(orden == cluster2.name) + cluster3.ref <- which(orden == cluster3.name) + cluster4.ref <- which(orden == cluster4.name) + + cluster1.ref <- 3 # bypass the previous values because for dataset num.46 the blocking and atlantic regimes were saved swapped! + cluster2.ref <- 2 + cluster3.ref <- 1 + cluster4.ref <- 4 + + assign(paste0("map.old.ref",cluster1.ref), pslwr1mean) # NAO+ + assign(paste0("map.old.ref",cluster2.ref), pslwr2mean) # NAO- + assign(paste0("map.old.ref",cluster3.ref), pslwr3mean) # Blocking + assign(paste0("map.old.ref",cluster4.ref), pslwr4mean) # Atl.ridge + + + # breaks and colors of the geopotential fields: + if(psl == "g500"){ + my.brks <- c(-300,seq(-200,200,10),300) # % Mean anomaly of a WR + my.brks2 <- c(-300,seq(-200,200,10),300) # % Mean anomaly of a WR for the contour lines + values.to.plot <- c(-200, -100, 0, 100, 200) # values we want to show in the labels + } + + if(psl == "psl"){ + my.brks <- c(seq(-19,-1,2),0,seq(1,19,2)) # % Mean anomaly of a WR + my.brks2 <- my.brks #c(seq(-20,20,2)) # % Mean anomaly of a WR for the contour lines + values.to.plot <- my.brks #c(-17,-15,-13,-11,-9,-7,-5,-3,-1,0,1,3,5,7,9,11,13,15,17) + } + + my.cols <- colorRampPalette(rev(brewer.pal(11,"RdBu")))(length(my.brks)-1) # blue--white--red colors + my.cols[floor(length(my.cols)/2)] <- my.cols[floor(length(my.cols)/2)+1] <- "white" + + # plot the composition with the regime anomalies of each monthly regimes: + png(filename=paste0(rean.dir,"/",rean.name,"_taylor_source",".png"),width=6000,height=2000) + + plot.new() + + # Sheet title: + par(fig=c(0, 1, 0.94, 0.98), new=TRUE) + mtext(paste0(fields.name, " observed monthly regime anomalies"), cex=5.5, font=2) + + n.map <- 0 + for(p in c(9:12,1:8)){ + p.orig <- p + + load(file=paste0(rean.dir,"/",fields.name,"_",my.period[p],"_","psl",".RData")) # load cluster names and summarized data + load(file=paste0(rean.dir,"/",fields.name,"_",my.period[p],"_","ClusterNames",".RData")) # load cluster names and summarized data + + if(var.num != var.num.orig) var.num <- var.num.orig # ripristinate original values of this script (necessary after loading regime data) + if(fields.name != fields.name.orig) fields.name <- fields.name.orig # ripristinate original values of this script + if(p != p.orig) p <- p.orig + + cluster1 <- which(orden == cluster1.name) + cluster2 <- which(orden == cluster2.name) + cluster3 <- which(orden == cluster3.name) + cluster4 <- which(orden == cluster4.name) + + assign(paste0("map",cluster1), pslwr1mean) # NAO+ + assign(paste0("map",cluster2), pslwr2mean) # NAO- + assign(paste0("map",cluster3), pslwr3mean) # Blocking + assign(paste0("map",cluster4), pslwr4mean) # Atl.ridge + + n.map <- n.map + 1 # n.maps starts from 0 + map.width <- 0.07 + map.xpos <- 0.06 + map.width * (n.map - 1) + map.ypos <- 0.08 + map.height <- 0.19 + map.ysep <- 0.015 + + par(fig=c(map.xpos, map.xpos + map.width, map.ypos + 3*map.height + 3*map.ysep, map.ypos + 4*map.height + 3*map.ysep), new=TRUE) + PlotEquiMap2(rescale(map1,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map1), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, xlabel.dist=1.5, contours.lty="F1FF1F") + par(fig=c(map.xpos, map.xpos + map.width, map.ypos + 2*map.height + 2*map.ysep, map.ypos + 3*map.height + 2*map.ysep), new=TRUE) + PlotEquiMap2(rescale(map2,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map2), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F") + par(fig=c(map.xpos, map.xpos + map.width, map.ypos + map.height + map.ysep, map.ypos + 2*map.height + map.ysep), new=TRUE) + PlotEquiMap2(rescale(map3,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map3), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F") + par(fig=c(map.xpos, map.xpos + map.width, map.ypos, map.ypos + map.height), new=TRUE) + PlotEquiMap2(rescale(map4,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map4), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F") + + # Sheet subtitles: + par(fig=c(map.xpos, map.xpos + map.width, 0.86, 0.90), new=TRUE) + mtext(my.month.short[p], cex=5.5, font=2) + + } + + # draw the last map to the right of the composition, that with the seasonal anomalies: + n.map <- n.map + 1 # n.maps starts from 0 + map.width <- 0.07 + map.xpos <- 0.06 + map.width * (n.map - 1) + map.ypos <- 0.08 + map.height <- 0.19 + map.ysep <- 0.015 + + par(fig=c(map.xpos, map.xpos + map.width, map.ypos + 3*map.height + 3*map.ysep, map.ypos + 4*map.height + 3*map.ysep), new=TRUE) + PlotEquiMap2(rescale(map.ref1,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map.ref1), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, xlabel.dist=1.5, contours.lty="F1FF1F") + par(fig=c(map.xpos, map.xpos + map.width, map.ypos + 2*map.height + 2*map.ysep, map.ypos + 3*map.height + 2*map.ysep), new=TRUE) + PlotEquiMap2(rescale(map.ref2,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map.ref2), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F") + par(fig=c(map.xpos, map.xpos + map.width, map.ypos + map.height + map.ysep, map.ypos + 2*map.height + map.ysep), new=TRUE) + PlotEquiMap2(rescale(map.ref3,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map.ref3), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F") + par(fig=c(map.xpos, map.xpos + map.width, map.ypos, map.ypos + map.height), new=TRUE) + PlotEquiMap2(rescale(map.ref4,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map.ref4), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F") + + # subtitle: + par(fig=c(map.xpos, map.xpos + map.width, 0.86, 0.90), new=TRUE) + mtext(my.period[season.ref], cex=5.5, font=2) + + dev.off() + + # Plot the Taylor diagrams: + + corr.first <- corr.second <- rmse.first <- rmse.second <- array(NA,c(4,12)) + + for(regime in 1:4){ + + png(filename=paste0(rean.dir,"/",rean.name,"_taylor_",orden[regime],".png"), width=1200, height=800) + + for(p in 1:12){ + p.orig <- p + + load(file=paste0(rean.dir,"/",rean.name,"_",my.period[p],"_","psl",".RData")) # load cluster names and summarized data + load(file=paste0(rean.dir,"/",rean.name,"_",my.period[p],"_","ClusterNames",".RData")) # load cluster names and summarized data + + if(var.num != var.num.orig) var.num <- var.num.orig # ripristinate original values of this script (necessary after loading regime data) + if(fields.name != fields.name.orig) fields.name <- fields.name.orig # ripristinate original values of this script + if(p != p.orig) p <- p.orig + + cluster1 <- which(orden == cluster1.name) + cluster2 <- which(orden == cluster2.name) + cluster3 <- which(orden == cluster3.name) + cluster4 <- which(orden == cluster4.name) + + assign(paste0("map",cluster1), pslwr1mean) # NAO+ + assign(paste0("map",cluster2), pslwr2mean) # NAO- + assign(paste0("map",cluster3), pslwr3mean) # Blocking + assign(paste0("map",cluster4), pslwr4mean) # Atl.ridge + + # load data from the second classification: + load(file=paste0(rean2.dir,"/",rean2.name,"_",my.period[p],"_","psl",".RData")) # load cluster names and summarized data + load(file=paste0(rean2.dir,"/",rean2.name,"_",my.period[p],"_","ClusterNames",".RData")) # load cluster names and summarized data + + if(var.num != var.num.orig) var.num <- var.num.orig # ripristinate original values of this script (necessary after loading regime data) + if(fields.name != fields.name.orig) fields.name <- fields.name.orig # ripristinate original values of this script + if(p != p.orig) p <- p.orig + + cluster1.old <- which(orden == cluster1.name) + cluster2.old <- which(orden == cluster2.name) + cluster3.old <- which(orden == cluster3.name) + cluster4.old <- which(orden == cluster4.name) + + assign(paste0("map.old",cluster1.old), pslwr1mean) # NAO+ + assign(paste0("map.old",cluster2.old), pslwr2mean) # NAO- + assign(paste0("map.old",cluster3.old), pslwr3mean) # Blocking + assign(paste0("map.old",cluster4.old), pslwr4mean) # Atl.ridge + + add.mod <- ifelse(p == 1, FALSE, TRUE) + main.mod <- ifelse(p == 1, orden[regime],"") + + color.first <- "red" + color.second <- "blue" # "black" + sd.arcs.mod <- c(0,0.1,0.2,0.3,0.4,0.5,0.6,0.7,0.8,0.9,1,1.1,1.2,1.3) #c(0,0.5,1,1.5) # doesn't work! + if(regime == 1) my.taylor(as.vector(map.ref1),as.vector(map1), normalize=TRUE, add=add.mod, pos.cor=FALSE, sd.arcs=sd.arcs.mod, ref.sd=F, cex.axis=1.7, text.cex=1, my.text=my.month.short[p], col = color.first, main=main.mod) + if(regime == 2) my.taylor(as.vector(map.ref2),as.vector(map2), normalize=TRUE, add=add.mod, pos.cor=FALSE, sd.arcs=sd.arcs.mod, ref.sd=F, cex.axis=1.7, text.cex=1, my.text=my.month.short[p], col = color.first, main=main.mod) + if(regime == 3) my.taylor(as.vector(map.ref3),as.vector(map3), normalize=TRUE, add=add.mod, pos.cor=FALSE, sd.arcs=sd.arcs.mod, ref.sd=F, cex.axis=1.7, text.cex=1, my.text=my.month.short[p], col = color.first, main=main.mod) + if(regime == 4) my.taylor(as.vector(map.ref4),as.vector(map4), normalize=TRUE, add=add.mod, pos.cor=FALSE, sd.arcs=sd.arcs.mod, ref.sd=F, cex.axis=1.7, text.cex=1, my.text=my.month.short[p], col = color.first, main=main.mod) + + # add the points from the second classification: + add.mod=TRUE + #add.mod <- ifelse(p == 1, FALSE, TRUE) + + if(regime == 1) my.taylor(as.vector(map.old.ref1),as.vector(map.old1), normalize=TRUE, add=add.mod, pos.cor=FALSE, sd.arcs=sd.arcs.mod, ref.sd=F, cex.axis=1.7, text.cex=1, my.text=my.month.short[p], col = color.second) + if(regime == 2) my.taylor(as.vector(map.old.ref2),as.vector(map.old2), normalize=TRUE, add=add.mod, pos.cor=FALSE, sd.arcs=sd.arcs.mod, ref.sd=F, cex.axis=1.7, text.cex=1, my.text=my.month.short[p], col = color.second) + if(regime == 3) my.taylor(as.vector(map.old.ref3),as.vector(map.old3), normalize=TRUE, add=add.mod, pos.cor=FALSE, sd.arcs=sd.arcs.mod, ref.sd=F, cex.axis=1.7, text.cex=1, my.text=my.month.short[p], col = color.second) + if(regime == 4) my.taylor(as.vector(map.old.ref4),as.vector(map.old4), normalize=TRUE, add=add.mod, pos.cor=FALSE, sd.arcs=sd.arcs.mod, ref.sd=F, cex.axis=1.7, text.cex=1, my.text=my.month.short[p], col = color.second) + + if(regime == 1) { corr.first[1,p] <- cor(as.vector(map.ref1),as.vector(map1)); rmse.first[1,p] <- RMSE(as.vector(map.ref1),as.vector(map1)) } + if(regime == 2) { corr.first[2,p] <- cor(as.vector(map.ref2),as.vector(map2)); rmse.first[2,p] <- RMSE(as.vector(map.ref2),as.vector(map2)) } + if(regime == 3) { corr.first[3,p] <- cor(as.vector(map.ref3),as.vector(map3)); rmse.first[3,p] <- RMSE(as.vector(map.ref3),as.vector(map3)) } + if(regime == 4) { corr.first[4,p] <- cor(as.vector(map.ref4),as.vector(map4)); rmse.first[4,p] <- RMSE(as.vector(map.ref4),as.vector(map4)) } + + if(regime == 1) { corr.second[1,p] <- cor(as.vector(map.old.ref1),as.vector(map.old1)); rmse.second[1,p] <- RMSE(as.vector(map.old.ref1),as.vector(map.old1)) } + if(regime == 2) { corr.second[2,p] <- cor(as.vector(map.old.ref2),as.vector(map.old2)); rmse.second[2,p] <- RMSE(as.vector(map.old.ref2),as.vector(map.old2)) } + if(regime == 3) { corr.second[3,p] <- cor(as.vector(map.old.ref3),as.vector(map.old3)); rmse.second[3,p] <- RMSE(as.vector(map.old.ref3),as.vector(map.old3)) } + if(regime == 4) { corr.second[4,p] <- cor(as.vector(map.old.ref4),as.vector(map.old4)); rmse.second[4,p] <- RMSE(as.vector(map.old.ref4),as.vector(map.old4)) } + + } # close for on 'p' + + dev.off() + + } # close for on 'regime' + + corr.first.mean <- apply(corr.first,1,mean) + corr.second.mean <- apply(corr.second,1,mean) + corr.diff <- corr.second.mean - corr.first.mean + + rmse.first.mean <- apply(rmse.first,1,mean) + rmse.second.mean <- apply(rmse.second,1,mean) + rmse.diff <- rmse.second.mean - rmse.first.mean + +} # close if on composition == "taylor" + + + + + + +# Summary graphs: +if(composition == "summary" && fields.name == forecast.name){ + # array storing correlations in the format: [startdate, leadmonth, regime] + array.diff.sd.freq <- array.sd.freq <- array.cor <- array.sp.cor <- array.freq <- array.diff.freq <- array.rpss <- array.pers <- array.diff.pers <- array(NA,c(12,7,4)) + array.spat.cor <- array.imp <- array.trans.sim1 <- array.trans.sim2 <- array.trans.sim3 <- array.trans.sim4 <- array(NA,c(12,7,4)) + array.trans.diff1 <- array.trans.diff2 <- array.trans.diff3 <- array.trans.diff4 <- array.freq.obs <- array.pers.obs <- array(NA,c(12,7,4)) + array.trans.obs1 <- array.trans.obs2 <- array.trans.obs3 <- array.trans.obs4 <- array(NA,c(12,7,4)) + + for(p in 1:12){ + p.orig <- p + + for(l in 0:6){ + + load(file=paste0(work.dir,"/",fields.name,"_",my.period[p],"_leadmonth_",l,"_","ClusterNames",".RData")) # load cluster names and summarized data + + if(var.num != var.num.orig) var.num <- var.num.orig # ripristinate original values of this script (necessary after loading regime data) + if(fields.name != fields.name.orig) fields.name <- fields.name.orig # ripristinate original values of this script + if(p != p.orig) p <- p.orig + + array.spat.cor[p,1+l, 1] <- spat.cor1 # associates NAO+ to the first triangle (at left) + array.spat.cor[p,1+l, 3] <- spat.cor2 # associates NAO- to the third triangle (at right) + array.spat.cor[p,1+l, 4] <- spat.cor3 # associates Blocking to the fourth triangle (top one) + array.spat.cor[p,1+l, 2] <- spat.cor4 # associates Atl.Ridge to the second triangle (bottom one) + + array.cor[p,1+l, 1] <- sp.cor1 # associates NAO+ to the first triangle (at left) + array.cor[p,1+l, 3] <- sp.cor2 # associates NAO- to the third triangle (at right) + array.cor[p,1+l, 4] <- sp.cor3 # associates Blocking to the fourth triangle (top one) + array.cor[p,1+l, 2] <- sp.cor4 # associates Atl.Ridge to the second triangle (bottom one) + + array.freq[p,1+l, 1] <- 100*(mean(fre1.NA)) # NAO+ + array.freq[p,1+l, 3] <- 100*(mean(fre2.NA)) # NAO- + array.freq[p,1+l, 4] <- 100*(mean(fre3.NA)) # Blocking + array.freq[p,1+l, 2] <- 100*(mean(fre4.NA)) # Atl.Ridge + + array.freq.obs[p,1+l, 1] <- 100*(mean(freObs1)) # NAO+ + array.freq.obs[p,1+l, 3] <- 100*(mean(freObs2)) # NAO- + array.freq.obs[p,1+l, 4] <- 100*(mean(freObs3)) # Blocking + array.freq.obs[p,1+l, 2] <- 100*(mean(freObs4)) # Atl.Ridge + + array.diff.freq[p,1+l, 1] <- 100*(mean(fre1.NA) - mean(freObs1)) # NAO+ + array.diff.freq[p,1+l, 3] <- 100*(mean(fre2.NA) - mean(freObs2)) # NAO- + array.diff.freq[p,1+l, 4] <- 100*(mean(fre3.NA) - mean(freObs3)) # Blocking + array.diff.freq[p,1+l, 2] <- 100*(mean(fre4.NA) - mean(freObs4)) # Atl.Ridge + + array.pers[p,1+l, 1] <- persist1 # NAO+ + array.pers[p,1+l, 3] <- persist2 # NAO- + array.pers[p,1+l, 4] <- persist3 # Blocking + array.pers[p,1+l, 2] <- persist4 # Atl.Ridge + + array.pers.obs[p,1+l, 1] <- persistObs1 # NAO+ + array.pers.obs[p,1+l, 3] <- persistObs2 # NAO- + array.pers.obs[p,1+l, 4] <- persistObs3 # Blocking + array.pers.obs[p,1+l, 2] <- persistObs4 # Atl.Ridge + + array.diff.pers[p,1+l, 1] <- persist1 - persistObs1 # NAO+ + array.diff.pers[p,1+l, 3] <- persist2 - persistObs2 # NAO- + array.diff.pers[p,1+l, 4] <- persist3 - persistObs3 # Blocking + array.diff.pers[p,1+l, 2] <- persist4 - persistObs4 # Atl.Ridge + + array.sd.freq[p,1+l, 1] <- sd.freq.sim1 # NAO+ + array.sd.freq[p,1+l, 3] <- sd.freq.sim2 # NAO- + array.sd.freq[p,1+l, 4] <- sd.freq.sim3 # Blocking + array.sd.freq[p,1+l, 2] <- sd.freq.sim4 # Atl.Ridge + + array.diff.sd.freq[p,1+l, 1] <- sd.freq.sim1 / sd.freq.obs1 # NAO+ + array.diff.sd.freq[p,1+l, 3] <- sd.freq.sim2 / sd.freq.obs2 # NAO- + array.diff.sd.freq[p,1+l, 4] <- sd.freq.sim3 / sd.freq.obs3 # Blocking + array.diff.sd.freq[p,1+l, 2] <- sd.freq.sim4 / sd.freq.obs4 # Atl.Ridge + + array.imp[p,1+l, 1] <- cor.imp1 # NAO+ + array.imp[p,1+l, 3] <- cor.imp2 # NAO- + array.imp[p,1+l, 4] <- cor.imp3 # Blocking + array.imp[p,1+l, 2] <- cor.imp4 # Atl.Ridge + + array.trans.sim1[p,1+l, 1] <- NA # Transition from NAO+ to NAO+ + array.trans.sim1[p,1+l, 3] <- 100*transSim1[2] # Transition from NAO+ to NAO- + array.trans.sim1[p,1+l, 4] <- 100*transSim1[3] # Transition from NAO+ to blocking + array.trans.sim1[p,1+l, 2] <- 100*transSim1[4] # Transition from NAO+ to Atl.Ridge + + array.trans.sim2[p,1+l, 1] <- 100*transSim2[1] # Transition from NAO- to NAO+ + array.trans.sim2[p,1+l, 3] <- NA # Transition from NAO- to NAO- + array.trans.sim2[p,1+l, 4] <- 100*transSim2[3] # Transition from NAO- to blocking + array.trans.sim2[p,1+l, 2] <- 100*transSim2[4] # Transition from NAO- to Atl.Ridge + + array.trans.sim3[p,1+l, 1] <- 100*transSim3[1] # Transition from blocking to NAO+ + array.trans.sim3[p,1+l, 3] <- 100*transSim3[2] # Transition from blocking to NAO- + array.trans.sim3[p,1+l, 4] <- NA # Transition from blocking to blocking + array.trans.sim3[p,1+l, 2] <- 100*transSim3[4] # Transition from blocking to Atl.Ridge + + array.trans.sim4[p,1+l, 1] <- 100*transSim4[1] # Transition from Atl.ridge to NAO+ + array.trans.sim4[p,1+l, 3] <- 100*transSim4[2] # Transition from Atl.ridge to NAO- + array.trans.sim4[p,1+l, 4] <- 100*transSim4[3] # Transition from Atl.ridge to blocking + array.trans.sim4[p,1+l, 2] <- NA # Transition from Atl.ridge to Atl.Ridge + + array.trans.obs1[p,1+l, 1] <- NA # Transition from NAO+ to NAO+ + array.trans.obs1[p,1+l, 3] <- 100*transObs1[2] # Transition from NAO+ to NAO- + array.trans.obs1[p,1+l, 4] <- 100*transObs1[3] # Transition from NAO+ to blocking + array.trans.obs1[p,1+l, 2] <- 100*transObs1[4] # Transition from NAO+ to Atl.Ridge + + array.trans.obs2[p,1+l, 1] <- 100*transObs2[1] # Transition from NAO- to NAO+ + array.trans.obs2[p,1+l, 3] <- NA # Transition from NAO- to NAO- + array.trans.obs2[p,1+l, 4] <- 100*transObs2[3] # Transition from NAO- to blocking + array.trans.obs2[p,1+l, 2] <- 100*transObs2[4] # Transition from NAO- to Atl.Ridge + + array.trans.obs3[p,1+l, 1] <- 100*transObs3[1] # Transition from blocking to NAO+ + array.trans.obs3[p,1+l, 3] <- 100*transObs3[2] # Transition from blocking to NAO- + array.trans.obs3[p,1+l, 4] <- NA # Transition from blocking to blocking + array.trans.obs3[p,1+l, 2] <- 100*transObs3[4] # Transition from blocking to Atl.Ridge + + array.trans.obs4[p,1+l, 1] <- 100*transObs4[1] # Transition from Atl.ridge to NAO+ + array.trans.obs4[p,1+l, 3] <- 100*transObs4[2] # Transition from Atl.ridge to NAO- + array.trans.obs4[p,1+l, 4] <- 100*transObs4[3] # Transition from Atl.ridge to blocking + array.trans.obs4[p,1+l, 2] <- NA # Transition from Atl.ridge to Atl.Ridge + + array.trans.diff1[p,1+l, 1] <- NA # Transition from NAO+ to NAO+ + array.trans.diff1[p,1+l, 3] <- 100*(transSim1[2] - transObs1[2]) # Transition from NAO+ to NAO- + array.trans.diff1[p,1+l, 4] <- 100*(transSim1[3] - transObs1[3]) # Transition from NAO+ to blocking + array.trans.diff1[p,1+l, 2] <- 100*(transSim1[4] - transObs1[4]) # Transition from NAO+ to Atl.Ridge + + array.trans.diff2[p,1+l, 1] <- 100*(transSim2[1] - transObs2[1]) # Transition from NAO+ to NAO+ + array.trans.diff2[p,1+l, 3] <- NA + array.trans.diff2[p,1+l, 4] <- 100*(transSim2[3] - transObs2[3]) # Transition from NAO+ to blocking + array.trans.diff2[p,1+l, 2] <- 100*(transSim2[4] - transObs2[4]) # Transition from NAO+ to Atl.Ridge + + array.trans.diff3[p,1+l, 1] <- 100*(transSim3[1] - transObs3[1]) # Transition from NAO+ to NAO+ + array.trans.diff3[p,1+l, 3] <- 100*(transSim3[2] - transObs3[2]) # Transition from NAO+ to NAO- + array.trans.diff3[p,1+l, 4] <- NA + array.trans.diff3[p,1+l, 2] <- 100*(transSim3[4] - transObs3[4]) # Transition from NAO+ to Atl.Ridge + + array.trans.diff4[p,1+l, 1] <- 100*(transSim4[1] - transObs4[1]) # Transition from NAO+ to NAO+ + array.trans.diff4[p,1+l, 3] <- 100*(transSim4[2] - transObs4[2]) # Transition from NAO+ to NAO- + array.trans.diff4[p,1+l, 4] <- 100*(transSim4[3] - transObs4[3]) # Transition from NAO+ to blocking + array.trans.diff4[p,1+l, 2] <- NA + + #array.rpss[p,1+l, 1] <- # NAO+ + #array.rpss[p,1+l, 3] <- # NAO- + #array.rpss[p,1+l, 4] <- # Blocking + #array.rpss[p,1+l, 2] <- # Atl.Ridge + } + } + + + + # Spatial Corr summary: + png(filename=paste0(work.dir,"/",forecast.name,"_summary_spatial_correlations.png"), width=550, height=400) + + # create an array similar to array.cor but with colors instead of corr.values: + sp.corr.cols <- rev(c('#b2182b','#d6604d','#f4a582','#fddbc7','#d1e5f0','#92c5de','#4393c3','#2166ac')) + my.seq <- c(-1,0,0.4,0.5,0.6,0.7,0.8,0.9,1) + + array.spat.cor.colors <- array(sp.corr.cols[8],c(12,7,4)) + array.spat.cor.colors[array.spat.cor < my.seq[8]] <- sp.corr.cols[7] + array.spat.cor.colors[array.spat.cor < my.seq[7]] <- sp.corr.cols[6] + array.spat.cor.colors[array.spat.cor < my.seq[6]] <- sp.corr.cols[5] + array.spat.cor.colors[array.spat.cor < my.seq[5]] <- sp.corr.cols[4] + array.spat.cor.colors[array.spat.cor < my.seq[4]] <- sp.corr.cols[3] + array.spat.cor.colors[array.spat.cor < my.seq[3]] <- sp.corr.cols[2] + array.spat.cor.colors[array.spat.cor < my.seq[2]] <- sp.corr.cols[1] + + par(mar=c(5,4,4,4.5)) + plot(1,1, type="n", xaxt="n", yaxt="n", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + title("Spatial correlation between S4 and ERA-Interim regime anomalies", cex=1.5) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + + for(p in 1:12){ + for(l in 0:6){ + polygon(c(0.5+p-1, 0.5+p-1, 1+p-1), c(-0.5+l, 0.5+l, 0+l), col=array.spat.cor.colors[p,1+l,1]) # first triangle + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(-0.5+l, 0+l, -0.5+l), col=array.spat.cor.colors[p,1+l,2]) + polygon(c(1+p-1, 1.5+p-1, 1.5+p-1), c(l, 0.5+l, -0.5+l), col=array.spat.cor.colors[p,1+l,3]) + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(0.5+l, 0+l, 0.5+l), col=array.spat.cor.colors[p,1+l,4]) + } + } + + par(fig=c(0.875, 1, 0.14, 0.89), new=TRUE) + ColorBar2(brks = my.seq, cols = sp.corr.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + dev.off() + + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_spatial_correlations_startdate.png"), width=750, height=600) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext(text="Spatial correlation between S4 and ERA-Interim regime anomalies", cex=1.5) + + # NAO+ : + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.85, 0.98), new=TRUE) + mtext("NAO+", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.spat.cor.colors[p,1+l,1])}} + + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.42, 0.5), new=TRUE) + mtext("Blocking", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.spat.cor.colors[p,1+l,4])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.85, 0.98), new=TRUE) + mtext("NAO-", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.spat.cor.colors[p,1+l,3])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.42, 0.5), new=TRUE) + mtext("Atlantic Ridge", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.spat.cor.colors[p,1+l,2])}} + + par(fig=c(0.91, 1, 0.1, 0.9), new=TRUE) + ColorBar2(brks = my.seq, cols = sp.corr.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_spatial_correlations_target_month.png"), width=800, height=300) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext(text="Spatial correlation between S4 and ERA-Interim regime anomalies", cex=1.2) + + par(mar=c(0,0,4,0), fig=c(0.07, 0.22, 0.8, 0.98), new=TRUE) + mtext("NAO+", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0, 0.22, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.6, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.spat.cor.colors[i2,1+6-j,1])}} + + par(mar=c(0,0,4,0), fig=c(0.22, 0.44, 0.8, 0.98), new=TRUE) + mtext("NAO-", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.22, 0.44, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.6, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.spat.cor.colors[i2,1+6-j,3])}} + + par(mar=c(0,0,4,0), fig=c(0.44, 0.66, 0.8, 0.98), new=TRUE) + mtext("Blocking", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.44, 0.66, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.6, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.spat.cor.colors[i2,1+6-j,4])}} + + par(mar=c(0,0,4,0), fig=c(0.68, 0.88, 0.8, 0.98), new=TRUE) + mtext("Atlantic Ridge", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.66, 0.88, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.6, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.spat.cor.colors[i2,1+6-j,2])}} + + par(fig=c(0.91, 1, 0.1, 0.8), new=TRUE) + ColorBar2(brks = my.seq, cols = sp.corr.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + + + + + + + + # Temporal Corr summary: + png(filename=paste0(work.dir,"/",forecast.name,"_summary_temporal_correlations.png"), width=550, height=400) + + # create an array similar to array.cor but with colors instead of corr.values: + corr.cols <- rev(c('#b2182b','#d6604d','#f4a582','#fddbc7','#d1e5f0','#92c5de','#4393c3','#2166ac')) + my.seq <- c(-1,-0.75,-0.5,-0.25,0,0.25,0.5,0.75,1) + + array.cor.colors <- array(corr.cols[8],c(12,7,4)) + array.cor.colors[array.cor < 0.75] <- corr.cols[7] + array.cor.colors[array.cor < 0.5] <- corr.cols[6] + array.cor.colors[array.cor < 0.25] <- corr.cols[5] + array.cor.colors[array.cor < 0] <- corr.cols[4] + array.cor.colors[array.cor < -0.25] <- corr.cols[3] + array.cor.colors[array.cor < -0.5] <- corr.cols[2] + array.cor.colors[array.cor < -0.75] <- corr.cols[1] + + par(mar=c(5,4,4,4.5)) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Forecast time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + title("Correlation between S4 and ERA-Interim frequencies", cex=1.5) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + + for(p in 1:12){ + for(l in 0:6){ + polygon(c(0.5+p-1,0.5+p-1,1+p-1),c(-0.5+l,0.5+l,0+l), col=array.cor.colors[p,1+l,1]) # first triangle + polygon(c(0.5+p-1,1+p-1,1.5+p-1),c(-0.5+l,0+l,-0.5+l), col=array.cor.colors[p,1+l,2]) + polygon(c(1+p-1,1.5+p-1,1.5+p-1),c(l,0.5+l,-0.5+l), col=array.cor.colors[p,1+l,3]) + polygon(c(0.5+p-1,1+p-1,1.5+p-1),c(0.5+l,0+l,0.5+l), col=array.cor.colors[p,1+l,4]) + } + } + + par(fig=c(0.875, 1, 0.14, 0.89), new=TRUE) + ColorBar2(brks = seq(-1,1,0.25), cols = corr.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(seq(-1,1,0.25)), my.labels=seq(-1,1,0.25)) + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_temporal_correlations_startdate.png"), width=750, height=600) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext(text="Correlation between S4 and ERA-Interim frequencies", cex=1.5) + + # NAO+ : + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.85, 0.98), new=TRUE) + mtext("NAO+", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.cor.colors[p,1+l,1])}} + + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.42, 0.5), new=TRUE) + mtext("Blocking", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.cor.colors[p,1+l,4])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.85, 0.98), new=TRUE) + mtext("NAO-", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.cor.colors[p,1+l,3])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.42, 0.5), new=TRUE) + mtext("Atlantic Ridge", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.cor.colors[p,1+l,2])}} + + par(fig=c(0.91, 1, 0.1, 0.9), new=TRUE) + ColorBar2(brks = my.seq, cols = corr.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_temporal_correlations_target_month.png"), width=800, height=300) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext(text="Correlation between S4 and ERA-Interim frequencies", cex=1.2) + + par(mar=c(0,0,4,0), fig=c(0.07, 0.22, 0.8, 0.98), new=TRUE) + mtext("NAO+", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0, 0.22, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.6, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.cor.colors[i2,1+6-j,1])}} + + par(mar=c(0,0,4,0), fig=c(0.22, 0.44, 0.8, 0.98), new=TRUE) + mtext("NAO-", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.22, 0.44, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.6, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.cor.colors[i2,1+6-j,3])}} + + par(mar=c(0,0,4,0), fig=c(0.44, 0.66, 0.8, 0.98), new=TRUE) + mtext("Blocking", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.44, 0.66, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.6, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.cor.colors[i2,1+6-j,4])}} + + par(mar=c(0,0,4,0), fig=c(0.68, 0.88, 0.8, 0.98), new=TRUE) + mtext("Atlantic Ridge", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.66, 0.88, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.6, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.cor.colors[i2,1+6-j,2])}} + + par(fig=c(0.91, 1, 0.1, 0.8), new=TRUE) + ColorBar2(brks = my.seq, cols = corr.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + + + + + + # Frequency summary: + png(filename=paste0(work.dir,"/",forecast.name,"_summary_frequency.png"), width=550, height=400) + + # create an array similar to array.diff.freq but with colors instead of frequencies: + freq.cols <- rev(c('#d73027','#f46d43','#fdae61','#fee090','#e0f3f8','#abd9e9','#74add1','#4575b4')) + my.seq <- c(NA,20,22,24,26,28,30,32,NA) + + array.freq.colors <- array(freq.cols[8],c(12,7,4)) + array.freq.colors[array.freq < my.seq[[8]]] <- freq.cols[7] + array.freq.colors[array.freq < my.seq[[7]]] <- freq.cols[6] + array.freq.colors[array.freq < my.seq[[6]]] <- freq.cols[5] + array.freq.colors[array.freq < my.seq[[5]]] <- freq.cols[4] + array.freq.colors[array.freq < my.seq[[4]]] <- freq.cols[3] + array.freq.colors[array.freq < my.seq[[3]]] <- freq.cols[2] + array.freq.colors[array.freq < my.seq[[2]]] <- freq.cols[1] + + par(mar=c(5,4,4,4.5)) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Forecast time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + title("Mean S4 frequency (%)", cex=1.5) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + + for(p in 1:12){ + for(l in 0:6){ + polygon(c(0.5+p-1, 0.5+p-1, 1+p-1), c(-0.5+l, 0.5+l, 0+l), col=array.freq.colors[p,1+l,1]) # first triangle + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(-0.5+l, 0+l, -0.5+l), col=array.freq.colors[p,1+l,2]) + polygon(c(1+p-1, 1.5+p-1, 1.5+p-1), c(l, 0.5+l, -0.5+l), col=array.freq.colors[p,1+l,3]) + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(0.5+l, 0+l, 0.5+l), col=array.freq.colors[p,1+l,4]) + } + } + + par(fig=c(0.875, 1, 0.14, 0.89), new=TRUE) + ColorBar2(brks = my.seq, cols = freq.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_frequency_startdate.png"), width=750, height=600) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext(text="Mean S4 frequency (%)", cex=1.5) + + # NAO+ : + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.85, 0.98), new=TRUE) + mtext("NAO+", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.freq.colors[p,1+l,1])}} + + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.42, 0.5), new=TRUE) + mtext("Blocking", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.freq.colors[p,1+l,4])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.85, 0.98), new=TRUE) + mtext("NAO-", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.freq.colors[p,1+l,3])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.42, 0.5), new=TRUE) + mtext("Atlantic Ridge", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.freq.colors[p,1+l,2])}} + + par(fig=c(0.91, 1, 0.1, 0.9), new=TRUE) + ColorBar2(brks = my.seq, cols = freq.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_frequency_target_month.png"), width=800, height=300) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext(text="Mean S4 frequency (%)", cex=1.2) + + par(mar=c(0,0,4,0), fig=c(0.07, 0.22, 0.8, 0.98), new=TRUE) + mtext("NAO+", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0, 0.22, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.6, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.freq.colors[i2,1+6-j,1])}} + + par(mar=c(0,0,4,0), fig=c(0.22, 0.44, 0.8, 0.98), new=TRUE) + mtext("NAO-", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.22, 0.44, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.6, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.freq.colors[i2,1+6-j,3])}} + + par(mar=c(0,0,4,0), fig=c(0.44, 0.66, 0.8, 0.98), new=TRUE) + mtext("Blocking", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.44, 0.66, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.6, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.freq.colors[i2,1+6-j,4])}} + + par(mar=c(0,0,4,0), fig=c(0.68, 0.88, 0.8, 0.98), new=TRUE) + mtext("Atlantic Ridge", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.66, 0.88, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.6, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.freq.colors[i2,1+6-j,2])}} + + par(fig=c(0.91, 1, 0.1, 0.8), new=TRUE) + ColorBar2(brks = my.seq, cols = freq.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + par(fig=c(0.91, 1, 0.88, 0.93), new=TRUE) + mtext(side = 1, text = "%", line = 2, cex=1) + dev.off() + + + + + + + + + # Obs. frequency summary: + png(filename=paste0(work.dir,"/",forecast.name,"_summary_frequency_obs.png"), width=550, height=400) + + # create an array similar to array.diff.freq but with colors instead of frequencies: + freq.cols <- rev(c('#d73027','#f46d43','#fdae61','#fee090','#e0f3f8','#abd9e9','#74add1','#4575b4')) + my.seq <- c(NA,20,22,24,26,28,30,32,NA) + + array.freq.colors <- array(freq.cols[8],c(12,7,4)) + array.freq.colors[array.freq.obs < my.seq[[8]]] <- freq.cols[7] + array.freq.colors[array.freq.obs < my.seq[[7]]] <- freq.cols[6] + array.freq.colors[array.freq.obs < my.seq[[6]]] <- freq.cols[5] + array.freq.colors[array.freq.obs < my.seq[[5]]] <- freq.cols[4] + array.freq.colors[array.freq.obs < my.seq[[4]]] <- freq.cols[3] + array.freq.colors[array.freq.obs < my.seq[[3]]] <- freq.cols[2] + array.freq.colors[array.freq.obs < my.seq[[2]]] <- freq.cols[1] + + par(mar=c(5,4,4,4.5)) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Forecast time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + title("Mean observed frequency (%)", cex=1.5) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + + for(p in 1:12){ + for(l in 0:6){ + polygon(c(0.5+p-1, 0.5+p-1, 1+p-1), c(-0.5+l, 0.5+l, 0+l), col=array.freq.colors[p,1+l,1]) # first triangle + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(-0.5+l, 0+l, -0.5+l), col=array.freq.colors[p,1+l,2]) + polygon(c(1+p-1, 1.5+p-1, 1.5+p-1), c(l, 0.5+l, -0.5+l), col=array.freq.colors[p,1+l,3]) + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(0.5+l, 0+l, 0.5+l), col=array.freq.colors[p,1+l,4]) + } + } + + par(fig=c(0.875, 1, 0.14, 0.89), new=TRUE) + ColorBar2(brks = my.seq, cols = freq.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_frequency_obs_startdate.png"), width=750, height=600) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext(text="Mean observed frequency (%)", cex=1.5) + + # NAO+ : + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.85, 0.98), new=TRUE) + mtext("NAO+", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.freq.colors[p,1+l,1])}} + + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.42, 0.5), new=TRUE) + mtext("Blocking", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.freq.colors[p,1+l,4])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.85, 0.98), new=TRUE) + mtext("NAO-", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.freq.colors[p,1+l,3])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.42, 0.5), new=TRUE) + mtext("Atlantic Ridge", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.freq.colors[p,1+l,2])}} + + par(fig=c(0.91, 1, 0.1, 0.9), new=TRUE) + ColorBar2(brks = my.seq, cols = freq.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_frequency_obs_target_month.png"), width=800, height=300) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext(text="Mean observed frequency (%)", cex=1.2) + + par(mar=c(0,0,4,0), fig=c(0.07, 0.22, 0.8, 0.98), new=TRUE) + mtext("NAO+", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0, 0.22, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.6, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.freq.colors[i2,1+6-j,1])}} + + par(mar=c(0,0,4,0), fig=c(0.22, 0.44, 0.8, 0.98), new=TRUE) + mtext("NAO-", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.22, 0.44, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.6, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.freq.colors[i2,1+6-j,3])}} + + par(mar=c(0,0,4,0), fig=c(0.44, 0.66, 0.8, 0.98), new=TRUE) + mtext("Blocking", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.44, 0.66, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.6, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.freq.colors[i2,1+6-j,4])}} + + par(mar=c(0,0,4,0), fig=c(0.68, 0.88, 0.8, 0.98), new=TRUE) + mtext("Atlantic Ridge", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.66, 0.88, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.6, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.freq.colors[i2,1+6-j,2])}} + + par(fig=c(0.91, 1, 0.1, 0.8), new=TRUE) + ColorBar2(brks = my.seq, cols = freq.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + par(fig=c(0.91, 1, 0.88, 0.93), new=TRUE) + mtext(side = 1, text = "%", line = 2, cex=1) + dev.off() + + + + + + + + + + # Bias freq. summary: + png(filename=paste0(work.dir,"/",forecast.name,"_summary_bias_frequency.png"), width=550, height=400) + + # create an array similar to array.diff.freq but with colors instead of frequencies: + diff.freq.cols <- rev(c('#d73027','#f46d43','#fdae61','#fee090','#e0f3f8','#abd9e9','#74add1','#4575b4')) + my.seq <- c(-20,-10,-5,-2,0,2,5,10,20) + + array.diff.freq.colors <- array(diff.freq.cols[8],c(12,7,4)) + array.diff.freq.colors[array.diff.freq < my.seq[[8]]] <- diff.freq.cols[7] + array.diff.freq.colors[array.diff.freq 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.diff.freq.colors[i2,1+6-j,1])}} + + par(mar=c(0,0,4,0), fig=c(0.22, 0.44, 0.8, 0.98), new=TRUE) + mtext("NAO-", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.22, 0.44, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.6, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.diff.freq.colors[i2,1+6-j,3])}} + + par(mar=c(0,0,4,0), fig=c(0.44, 0.66, 0.8, 0.98), new=TRUE) + mtext("Blocking", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.44, 0.66, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.6, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.diff.freq.colors[i2,1+6-j,4])}} + + par(mar=c(0,0,4,0), fig=c(0.68, 0.88, 0.8, 0.98), new=TRUE) + mtext("Atlantic Ridge", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.66, 0.88, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.6, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.diff.freq.colors[i2,1+6-j,2])}} + + par(fig=c(0.91, 1, 0.1, 0.8), new=TRUE) + ColorBar2(brks = my.seq, cols = diff.freq.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + par(fig=c(0.91, 1, 0.88, 0.93), new=TRUE) + mtext(side = 1, text = "%", line = 2, cex=1) + dev.off() + + + + + + + + + # Simulated persistence summary: + png(filename=paste0(work.dir,"/",forecast.name,"_summary_persistence.png"), width=550, height=400) + + # create an array similar to array.pers but with colors instead of frequencies: + pers.cols <- rev(c('#b2182b','#d6604d','#f4a582','#fddbc7','#d1e5f0','#92c5de','#4393c3','#2166ac')) + my.seq <- c(NA, 3.25, 3.5, 3.75, 4, 4.25, 4.5, 4.75, NA) + + array.pers.colors <- array(pers.cols[8],c(12,7,4)) + array.pers.colors[array.pers < my.seq[8]] <- pers.cols[7] + array.pers.colors[array.pers < my.seq[7]] <- pers.cols[6] + array.pers.colors[array.pers < my.seq[6]] <- pers.cols[5] + array.pers.colors[array.pers < my.seq[5]] <- pers.cols[4] + array.pers.colors[array.pers < my.seq[4]] <- pers.cols[3] + array.pers.colors[array.pers < my.seq[3]] <- pers.cols[2] + array.pers.colors[array.pers < my.seq[2]] <- pers.cols[1] + + par(mar=c(5,4,4,4.5)) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Forecast time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + title("ECMWF-S4 Regime persistence (in days/month)", cex=1.5) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + + for(p in 1:12){ + for(l in 0:6){ + polygon(c(0.5+p-1,0.5+p-1,1+p-1),c(-0.5+l,0.5+l,0+l), col=array.pers.colors[p,1+l,1]) # first triangle + polygon(c(0.5+p-1,1+p-1,1.5+p-1),c(-0.5+l,0+l,-0.5+l), col=array.pers.colors[p,1+l,2]) + polygon(c(1+p-1,1.5+p-1,1.5+p-1),c(l,0.5+l,-0.5+l), col=array.pers.colors[p,1+l,3]) + polygon(c(0.5+p-1,1+p-1,1.5+p-1),c(0.5+l,0+l,0.5+l), col=array.pers.colors[p,1+l,4]) + } + } + + par(fig=c(0.875, 1, 0.14, 0.89), new=TRUE) + ColorBar2(brks = my.seq, cols = pers.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_persistence_startdate.png"), width=750, height=600) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("ECMWF-S4 Regime persistence (in days/month)", cex=1.5) + + # NAO+ : + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.85, 0.98), new=TRUE) + mtext("NAO+", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.pers.colors[p,1+l,1])}} + + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.42, 0.5), new=TRUE) + mtext("Blocking", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.pers.colors[p,1+l,4])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.85, 0.98), new=TRUE) + mtext("NAO-", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.pers.colors[p,1+l,3])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.42, 0.5), new=TRUE) + mtext("Atlantic Ridge", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.pers.colors[p,1+l,2])}} + + par(fig=c(0.91, 1, 0.1, 0.9), new=TRUE) + ColorBar2(brks = my.seq, cols = pers.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_persistence_target_month.png"), width=800, height=300) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("ECMWF-S4 Regime persistence (in days/month)", cex=1.2) + + par(mar=c(0,0,4,0), fig=c(0.07, 0.22, 0.8, 0.98), new=TRUE) + mtext("NAO+", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0, 0.22, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.6, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.pers.colors[i2,1+6-j,1])}} + + par(mar=c(0,0,4,0), fig=c(0.22, 0.44, 0.8, 0.98), new=TRUE) + mtext("NAO-", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.22, 0.44, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.6, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.pers.colors[i2,1+6-j,3])}} + + par(mar=c(0,0,4,0), fig=c(0.44, 0.66, 0.8, 0.98), new=TRUE) + mtext("Blocking", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.44, 0.66, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.6, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.pers.colors[i2,1+6-j,4])}} + + par(mar=c(0,0,4,0), fig=c(0.68, 0.88, 0.8, 0.98), new=TRUE) + mtext("Atlantic Ridge", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.66, 0.88, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.6, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.pers.colors[i2,1+6-j,2])}} + + par(fig=c(0.91, 1, 0.1, 0.8), new=TRUE) + ColorBar2(brks = my.seq, cols = pers.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + par(fig=c(0.9, 1, 0.88, 0.93), new=TRUE) + mtext(side = 1, text = "days/month", line = 2, cex=1) + dev.off() + + + + + + + + # Observed persistence summary: + png(filename=paste0(work.dir,"/",forecast.name,"_summary_persistence_obs.png"), width=550, height=400) + + # create an array similar to array.pers but with colors instead of frequencies: + pers.cols <- rev(c('#b2182b','#d6604d','#f4a582','#fddbc7','#d1e5f0','#92c5de','#4393c3','#2166ac')) + my.seq <- c(NA, 3.25, 3.5, 3.75, 4, 4.25, 4.5, 4.75, NA) + + array.pers.colors <- array(pers.cols[8],c(12,7,4)) + array.pers.colors[array.pers.obs < my.seq[8]] <- pers.cols[7] + array.pers.colors[array.pers.obs < my.seq[7]] <- pers.cols[6] + array.pers.colors[array.pers.obs < my.seq[6]] <- pers.cols[5] + array.pers.colors[array.pers.obs < my.seq[5]] <- pers.cols[4] + array.pers.colors[array.pers.obs < my.seq[4]] <- pers.cols[3] + array.pers.colors[array.pers.obs < my.seq[3]] <- pers.cols[2] + array.pers.colors[array.pers.obs < my.seq[2]] <- pers.cols[1] + + par(mar=c(5,4,4,4.5)) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Forecast time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + title("Observed regime persistence (in days/month)", cex=1.5) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + + for(p in 1:12){ + for(l in 0:6){ + polygon(c(0.5+p-1,0.5+p-1,1+p-1),c(-0.5+l,0.5+l,0+l), col=array.pers.colors[p,1+l,1]) # first triangle + polygon(c(0.5+p-1,1+p-1,1.5+p-1),c(-0.5+l,0+l,-0.5+l), col=array.pers.colors[p,1+l,2]) + polygon(c(1+p-1,1.5+p-1,1.5+p-1),c(l,0.5+l,-0.5+l), col=array.pers.colors[p,1+l,3]) + polygon(c(0.5+p-1,1+p-1,1.5+p-1),c(0.5+l,0+l,0.5+l), col=array.pers.colors[p,1+l,4]) + } + } + + par(fig=c(0.875, 1, 0.14, 0.89), new=TRUE) + ColorBar2(brks = my.seq, cols = pers.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_persistence_obs_startdate.png"), width=750, height=600) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("Observed Regime persistence (in days/month)", cex=1.5) + + # NAO+ : + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.85, 0.98), new=TRUE) + mtext("NAO+", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.pers.colors[p,1+l,1])}} + + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.42, 0.5), new=TRUE) + mtext("Blocking", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.pers.colors[p,1+l,4])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.85, 0.98), new=TRUE) + mtext("NAO-", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.pers.colors[p,1+l,3])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.42, 0.5), new=TRUE) + mtext("Atlantic Ridge", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.pers.colors[p,1+l,2])}} + + par(fig=c(0.91, 1, 0.1, 0.9), new=TRUE) + ColorBar2(brks = my.seq, cols = pers.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_persistence_obs_target_month.png"), width=800, height=300) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("Observed regime persistence (in days/month)", cex=1.2) + + par(mar=c(0,0,4,0), fig=c(0.07, 0.22, 0.8, 0.98), new=TRUE) + mtext("NAO+", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0, 0.22, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.6, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.pers.colors[i2,1+6-j,1])}} + + par(mar=c(0,0,4,0), fig=c(0.22, 0.44, 0.8, 0.98), new=TRUE) + mtext("NAO-", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.22, 0.44, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.6, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.pers.colors[i2,1+6-j,3])}} + + par(mar=c(0,0,4,0), fig=c(0.44, 0.66, 0.8, 0.98), new=TRUE) + mtext("Blocking", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.44, 0.66, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.6, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.pers.colors[i2,1+6-j,4])}} + + par(mar=c(0,0,4,0), fig=c(0.68, 0.88, 0.8, 0.98), new=TRUE) + mtext("Atlantic Ridge", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.66, 0.88, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.6, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.pers.colors[i2,1+6-j,2])}} + + par(fig=c(0.91, 1, 0.1, 0.8), new=TRUE) + ColorBar2(brks = my.seq, cols = pers.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + par(fig=c(0.9, 1, 0.88, 0.93), new=TRUE) + mtext(side = 1, text = "days/month", line = 2, cex=1) + dev.off() + + + + + + + + + + + + + # Bias persistence summary: + png(filename=paste0(work.dir,"/",forecast.name,"_summary_bias_persistence.png"), width=550, height=400) + + # create an array similar to array.pers but with colors instead of frequencies: + diff.pers.cols <- rev(c('#b2182b','#d6604d','#f4a582','#fddbc7','#d1e5f0','#92c5de','#4393c3','#2166ac')) + my.seq <- c(NA, -1.5, -1, -0.5, 0, 0.5, 1, 1.5, NA) + + array.diff.pers.colors <- array(diff.pers.cols[8],c(12,7,4)) + array.diff.pers.colors[array.diff.pers < my.seq[8]] <- diff.pers.cols[7] + array.diff.pers.colors[array.diff.pers < my.seq[7]] <- diff.pers.cols[6] + array.diff.pers.colors[array.diff.pers < my.seq[6]] <- diff.pers.cols[5] + array.diff.pers.colors[array.diff.pers < my.seq[5]] <- diff.pers.cols[4] + array.diff.pers.colors[array.diff.pers < my.seq[4]] <- diff.pers.cols[3] + array.diff.pers.colors[array.diff.pers < my.seq[3]] <- diff.pers.cols[2] + array.diff.pers.colors[array.diff.pers < my.seq[2]] <- diff.pers.cols[1] + + par(mar=c(5,4,4,4.5)) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Forecast time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + title("Diff. between S4 and ERA-Interim regime persistence (in days/month)", cex=1.5) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + + for(p in 1:12){ + for(l in 0:6){ + polygon(c(0.5+p-1,0.5+p-1,1+p-1),c(-0.5+l,0.5+l,0+l), col=array.diff.pers.colors[p,1+l,1]) # first triangle + polygon(c(0.5+p-1,1+p-1,1.5+p-1),c(-0.5+l,0+l,-0.5+l), col=array.diff.pers.colors[p,1+l,2]) + polygon(c(1+p-1,1.5+p-1,1.5+p-1),c(l,0.5+l,-0.5+l), col=array.diff.pers.colors[p,1+l,3]) + polygon(c(0.5+p-1,1+p-1,1.5+p-1),c(0.5+l,0+l,0.5+l), col=array.diff.pers.colors[p,1+l,4]) + } + } + + par(fig=c(0.875, 1, 0.14, 0.89), new=TRUE) + ColorBar2(brks = my.seq, cols = diff.pers.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_bias_persistence_startdate.png"), width=750, height=600) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("Diff. between S4 and ERA-Interim regime persistence (in days/month)", cex=1.5) + + # NAO+ : + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.85, 0.98), new=TRUE) + mtext("NAO+", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.diff.pers.colors[p,1+l,1])}} + + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.42, 0.5), new=TRUE) + mtext("Blocking", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.diff.pers.colors[p,1+l,4])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.85, 0.98), new=TRUE) + mtext("NAO-", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.diff.pers.colors[p,1+l,3])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.42, 0.5), new=TRUE) + mtext("Atlantic Ridge", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.diff.pers.colors[p,1+l,2])}} + + par(fig=c(0.91, 1, 0.1, 0.9), new=TRUE) + ColorBar2(brks = my.seq, cols = diff.pers.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_bias_persistence_target_month.png"), width=800, height=300) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("Diff. between S4 and ERA-Interim regime persistence (in days/month)", cex=1.2) + + par(mar=c(0,0,4,0), fig=c(0.07, 0.22, 0.8, 0.98), new=TRUE) + mtext("NAO+", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0, 0.22, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.6, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.diff.pers.colors[i2,1+6-j,1])}} + + par(mar=c(0,0,4,0), fig=c(0.22, 0.44, 0.8, 0.98), new=TRUE) + mtext("NAO-", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.22, 0.44, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.6, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.diff.pers.colors[i2,1+6-j,3])}} + + par(mar=c(0,0,4,0), fig=c(0.44, 0.66, 0.8, 0.98), new=TRUE) + mtext("Blocking", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.44, 0.66, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.6, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.diff.pers.colors[i2,1+6-j,4])}} + + par(mar=c(0,0,4,0), fig=c(0.68, 0.88, 0.8, 0.98), new=TRUE) + mtext("Atlantic Ridge", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.66, 0.88, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.6, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.diff.pers.colors[i2,1+6-j,2])}} + + par(fig=c(0.91, 1, 0.1, 0.8), new=TRUE) + ColorBar2(brks = my.seq, cols = diff.pers.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + par(fig=c(0.9, 1, 0.88, 0.93), new=TRUE) + mtext(side = 1, text = "days/month", line = 2, cex=1) + dev.off() + + + + + + + + + + # St.Dev.freq ratio summary: + png(filename=paste0(work.dir,"/",forecast.name,"_summary_ratio_sd_freq.png"), width=550, height=400) + + # create an array similar to array.cor but with colors instead of corr.values: + diff.sd.freq.cols <- rev(c('#b2182b','#d6604d','#f4a582','#fddbc7','#d1e5f0','#92c5de','#4393c3','#2166ac')) + my.seq <- c(0,0.7,0.8,0.9,1.0,1.1,1.2,1.3,2) + + array.diff.sd.freq.colors <- array(diff.sd.freq.cols[8],c(12,7,4)) + array.diff.sd.freq.colors[array.diff.sd.freq < my.seq[8]] <- diff.sd.freq.cols[7] + array.diff.sd.freq.colors[array.diff.sd.freq < my.seq[7]] <- diff.sd.freq.cols[6] + array.diff.sd.freq.colors[array.diff.sd.freq < my.seq[6]] <- diff.sd.freq.cols[5] + array.diff.sd.freq.colors[array.diff.sd.freq < my.seq[5]] <- diff.sd.freq.cols[4] + array.diff.sd.freq.colors[array.diff.sd.freq < my.seq[4]] <- diff.sd.freq.cols[3] + array.diff.sd.freq.colors[array.diff.sd.freq < my.seq[3]] <- diff.sd.freq.cols[2] + array.diff.sd.freq.colors[array.diff.sd.freq < my.seq[2]] <- diff.sd.freq.cols[1] + + par(mar=c(5,4,4,4.5)) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Forecast time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + title("St.Dev. ratio between sim. and obs. average frequencies", cex=1.5) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + + for(p in 1:12){ + for(l in 0:6){ + polygon(c(0.5+p-1, 0.5+p-1, 1+p-1), c(-0.5+l, 0.5+l, 0+l), col=array.diff.sd.freq.colors[p,1+l,1]) # first triangle + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(-0.5+l, 0+l, -0.5+l), col=array.diff.sd.freq.colors[p,1+l,2]) + polygon(c(1+p-1, 1.5+p-1, 1.5+p-1), c(l, 0.5+l, -0.5+l), col=array.diff.sd.freq.colors[p,1+l,3]) + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(0.5+l, 0+l, 0.5+l), col=array.diff.sd.freq.colors[p,1+l,4]) + } + } + + par(fig=c(0.875, 1, 0.14, 0.89), new=TRUE) + ColorBar2(brks = my.seq, cols = diff.sd.freq.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + dev.off() + + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_ratio_sd_frequency_startdate.png"), width=750, height=600) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("St.Dev. ratio between sim. and obs. average frequencies", cex=1.5) + + # NAO+ : + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.85, 0.98), new=TRUE) + mtext("NAO+", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.diff.sd.freq.colors[p,1+l,1])}} + + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.42, 0.5), new=TRUE) + mtext("Blocking", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.diff.sd.freq.colors[p,1+l,4])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.85, 0.98), new=TRUE) + mtext("NAO-", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.diff.sd.freq.colors[p,1+l,3])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.42, 0.5), new=TRUE) + mtext("Atlantic Ridge", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.diff.sd.freq.colors[p,1+l,2])}} + + par(fig=c(0.91, 1, 0.1, 0.9), new=TRUE) + ColorBar2(brks = my.seq, cols = diff.sd.freq.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_ratio_sd_frequency_target_month.png"), width=800, height=300) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("St.Dev. ratio between sim. and obs. average frequencies", cex=1.2) + + par(mar=c(0,0,4,0), fig=c(0.07, 0.22, 0.8, 0.98), new=TRUE) + mtext("NAO+", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0, 0.22, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.6, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.diff.sd.freq.colors[i2,1+6-j,1])}} + + par(mar=c(0,0,4,0), fig=c(0.22, 0.44, 0.8, 0.98), new=TRUE) + mtext("NAO-", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.22, 0.44, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.6, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.diff.sd.freq.colors[i2,1+6-j,3])}} + + par(mar=c(0,0,4,0), fig=c(0.44, 0.66, 0.8, 0.98), new=TRUE) + mtext("Blocking", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.44, 0.66, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.6, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.diff.sd.freq.colors[i2,1+6-j,4])}} + + par(mar=c(0,0,4,0), fig=c(0.68, 0.88, 0.8, 0.98), new=TRUE) + mtext("Atlantic Ridge", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.66, 0.88, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.6, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.diff.sd.freq.colors[i2,1+6-j,2])}} + + par(fig=c(0.91, 1, 0.1, 0.8), new=TRUE) + ColorBar2(brks = my.seq, cols = diff.sd.freq.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + + + + + + + + + + + + + # Impact summary: + png(filename=paste0(work.dir,"/",forecast.name,"_summary_impact_",var.name[var.num],".png"), width=550, height=400) + + # create an array similar to array.pers but with colors instead of frequencies: + imp.cols <- rev(c('#b2182b','#d6604d','#f4a582','#fddbc7','#d1e5f0','#92c5de','#4393c3','#2166ac')) + my.seq <- c(-1,0,0.4,0.5,0.6,0.7,0.8,0.9,1) + + array.imp.colors <- array(imp.cols[8],c(12,7,4)) + array.imp.colors[array.imp < my.seq[8]] <- imp.cols[7] + array.imp.colors[array.imp < my.seq[7]] <- imp.cols[6] + array.imp.colors[array.imp < my.seq[6]] <- imp.cols[5] + array.imp.colors[array.imp < my.seq[5]] <- imp.cols[4] + array.imp.colors[array.imp < my.seq[4]] <- imp.cols[3] + array.imp.colors[array.imp < my.seq[3]] <- imp.cols[2] + array.imp.colors[array.imp < my.seq[2]] <- imp.cols[1] + + par(mar=c(5,4,4,4.5)) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Forecast time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + title(paste0("S4 spatial correlation of impact on ",var.name[var.num]), cex=1.5) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + + for(p in 1:12){ + for(l in 0:6){ + polygon(c(0.5+p-1,0.5+p-1,1+p-1),c(-0.5+l,0.5+l,0+l), col=array.imp.colors[p,1+l,1]) # first triangle + polygon(c(0.5+p-1,1+p-1,1.5+p-1),c(-0.5+l,0+l,-0.5+l), col=array.imp.colors[p,1+l,2]) + polygon(c(1+p-1,1.5+p-1,1.5+p-1),c(l,0.5+l,-0.5+l), col=array.imp.colors[p,1+l,3]) + polygon(c(0.5+p-1,1+p-1,1.5+p-1),c(0.5+l,0+l,0.5+l), col=array.imp.colors[p,1+l,4]) + } + } + + par(fig=c(0.875, 1, 0.14, 0.89), new=TRUE) + ColorBar2(brks = my.seq, cols = imp.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_impact_",var.name[var.num],"_startdate.png"), width=750, height=600) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("S4 spatial correlation of impact on ",var.name[var.num], cex=1.5) + + # NAO+ : + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.85, 0.98), new=TRUE) + mtext("NAO+", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.imp.colors[p,1+l,1])}} + + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.42, 0.5), new=TRUE) + mtext("Blocking", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.imp.colors[p,1+l,4])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.85, 0.98), new=TRUE) + mtext("NAO-", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.imp.colors[p,1+l,3])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.42, 0.5), new=TRUE) + mtext("Atlantic Ridge", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.imp.colors[p,1+l,2])}} + + par(fig=c(0.91, 1, 0.1, 0.9), new=TRUE) + ColorBar2(brks = my.seq, cols = imp.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_impact_",var.name[var.num],"_target_month.png"), width=800, height=300) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("S4 spatial correlation of impact on ",var.name[var.num], cex=1.2) + + par(mar=c(0,0,4,0), fig=c(0.07, 0.22, 0.8, 0.98), new=TRUE) + mtext("NAO+", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0, 0.22, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.6, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.imp.colors[i2,1+6-j,1])}} + + par(mar=c(0,0,4,0), fig=c(0.22, 0.44, 0.8, 0.98), new=TRUE) + mtext("NAO-", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.22, 0.44, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.6, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.imp.colors[i2,1+6-j,3])}} + + par(mar=c(0,0,4,0), fig=c(0.44, 0.66, 0.8, 0.98), new=TRUE) + mtext("Blocking", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.44, 0.66, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.6, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.imp.colors[i2,1+6-j,4])}} + + par(mar=c(0,0,4,0), fig=c(0.68, 0.88, 0.8, 0.98), new=TRUE) + mtext("Atlantic Ridge", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.66, 0.88, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.6, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.imp.colors[i2,1+6-j,2])}} + + par(fig=c(0.91, 1, 0.1, 0.8), new=TRUE) + ColorBar2(brks = my.seq, cols = imp.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + + + + + + + # Simulated Transition probability from NAO+ to X summary: + png(filename=paste0(work.dir,"/",forecast.name,"_summary_transition_prob_NAO+.png"), width=550, height=400) + + # create an array similar to array.cor but with colors instead of corr.values: + trans.cols <- rev(c('#b2182b','#d6604d','#f4a582','#fddbc7','#d1e5f0','#92c5de','#4393c3','#2166ac')) + my.seq <- c(0,10,20,30,40,50,60,70,100) + + array.trans.sim1.colors <- array(trans.cols[8],c(12,7,4)) + array.trans.sim1.colors[array.trans.sim1 < my.seq[8]] <- trans.cols[7] + array.trans.sim1.colors[array.trans.sim1 < my.seq[7]] <- trans.cols[6] + array.trans.sim1.colors[array.trans.sim1 < my.seq[6]] <- trans.cols[5] + array.trans.sim1.colors[array.trans.sim1 < my.seq[5]] <- trans.cols[4] + array.trans.sim1.colors[array.trans.sim1 < my.seq[4]] <- trans.cols[3] + array.trans.sim1.colors[array.trans.sim1 < my.seq[3]] <- trans.cols[2] + array.trans.sim1.colors[array.trans.sim1 < my.seq[2]] <- trans.cols[1] + array.trans.sim1.colors[is.na(array.trans.sim1)] <- "white" + + par(mar=c(5,4,4,4.5)) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Forecast time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + title("% ECMWF-S4 transition from NAO+ regime", cex=1.5) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + + for(p in 1:12){ + for(l in 0:6){ + polygon(c(0.5+p-1, 0.5+p-1, 1+p-1), c(-0.5+l, 0.5+l, 0+l), col=array.trans.sim1.colors[p,1+l,1]) # first triangle + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(-0.5+l, 0+l, -0.5+l), col=array.trans.sim1.colors[p,1+l,2]) + polygon(c(1+p-1, 1.5+p-1, 1.5+p-1), c(l, 0.5+l, -0.5+l), col=array.trans.sim1.colors[p,1+l,3]) + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(0.5+l, 0+l, 0.5+l), col=array.trans.sim1.colors[p,1+l,4]) + } + } + + par(fig=c(0.875, 1, 0.14, 0.89), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_transition_prob_NAO+_startdate.png"), width=750, height=600) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("% Transition from NAO+ regime", cex=1.5) + mtext("ECMWF-S4 / NAO+ Transition Probability \nJanuary to December / 1994-2013", cex=1.5) + + # NAO+ : + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.85, 0.98), new=TRUE) + mtext("NAO+", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.sim1.colors[p,1+l,1])}} + + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.42, 0.5), new=TRUE) + mtext("Blocking", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.sim1.colors[p,1+l,4])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.85, 0.98), new=TRUE) + mtext("NAO-", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.sim1.colors[p,1+l,3])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.42, 0.5), new=TRUE) + mtext("Atlantic Ridge", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.sim1.colors[p,1+l,2])}} + + par(fig=c(0.91, 1, 0.1, 0.9), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_transition_prob_NAO+_target_month.png"), width=750, height=350) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 0.95), new=TRUE) + mtext("% Transition from NAO+ regime", cex=1.2) + + par(mar=c(0,0,4,0), fig=c(0.10, 0.28, 0.8, 0.95), new=TRUE) + mtext("NAO-", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0, 0.28, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.8, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.sim1.colors[i2,1+6-j,3])}} + + par(mar=c(0,0,4,0), fig=c(0.30, 0.58, 0.8, 0.95), new=TRUE) + mtext("Blocking", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.30, 0.58, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.8, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.sim1.colors[i2,1+6-j,4])}} + + par(mar=c(0,0,4,0), fig=c(0.60, 0.88, 0.8, 0.95), new=TRUE) + mtext("Atlantic Ridge", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.60, 0.88, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.8, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.sim1.colors[i2,1+6-j,2])}} + + par(fig=c(0.91, 1, 0.1, 0.8), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + par(fig=c(0.91, 1, 0.88, 0.93), new=TRUE) + mtext(side = 1, text = "%", line = 2, cex=1) + + dev.off() + + + + + + + # Observed Transition probability from NAO+ to X summary: + png(filename=paste0(work.dir,"/",forecast.name,"_summary_transition_prob_NAO+_obs.png"), width=550, height=400) + + # create an array similar to array.cor but with colors instead of corr.values: + trans.cols <- rev(c('#b2182b','#d6604d','#f4a582','#fddbc7','#d1e5f0','#92c5de','#4393c3','#2166ac')) + my.seq <- c(0,10,20,30,40,50,60,70,100) + + array.trans.obs1.colors <- array(trans.cols[8],c(12,7,4)) + array.trans.obs1.colors[array.trans.obs1 < my.seq[8]] <- trans.cols[7] + array.trans.obs1.colors[array.trans.obs1 < my.seq[7]] <- trans.cols[6] + array.trans.obs1.colors[array.trans.obs1 < my.seq[6]] <- trans.cols[5] + array.trans.obs1.colors[array.trans.obs1 < my.seq[5]] <- trans.cols[4] + array.trans.obs1.colors[array.trans.obs1 < my.seq[4]] <- trans.cols[3] + array.trans.obs1.colors[array.trans.obs1 < my.seq[3]] <- trans.cols[2] + array.trans.obs1.colors[array.trans.obs1 < my.seq[2]] <- trans.cols[1] + array.trans.obs1.colors[is.na(array.trans.obs1)] <- "white" + + par(mar=c(5,4,4,4.5)) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Forecast time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + title("% Observed transition from NAO+ regime", cex=1.5) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + + for(p in 1:12){ + for(l in 0:6){ + polygon(c(0.5+p-1, 0.5+p-1, 1+p-1), c(-0.5+l, 0.5+l, 0+l), col=array.trans.obs1.colors[p,1+l,1]) # first triangle + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(-0.5+l, 0+l, -0.5+l), col=array.trans.obs1.colors[p,1+l,2]) + polygon(c(1+p-1, 1.5+p-1, 1.5+p-1), c(l, 0.5+l, -0.5+l), col=array.trans.obs1.colors[p,1+l,3]) + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(0.5+l, 0+l, 0.5+l), col=array.trans.obs1.colors[p,1+l,4]) + } + } + + par(fig=c(0.875, 1, 0.14, 0.89), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_transition_prob_NAO+_obs_startdate.png"), width=750, height=600) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("% Observed transition from NAO+ regime", cex=1.5) + + # NAO+ : + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.85, 0.98), new=TRUE) + mtext("NAO+", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.obs1.colors[p,1+l,1])}} + + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.42, 0.5), new=TRUE) + mtext("Blocking", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.obs1.colors[p,1+l,4])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.85, 0.98), new=TRUE) + mtext("NAO-", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.obs1.colors[p,1+l,3])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.42, 0.5), new=TRUE) + mtext("Atlantic Ridge", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.obs1.colors[p,1+l,2])}} + + par(fig=c(0.91, 1, 0.1, 0.9), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_transition_prob_NAO+_obs_target_month.png"), width=750, height=350) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 0.95), new=TRUE) + mtext("% Observed transition from NAO+ regime", cex=1.2) + + par(mar=c(0,0,4,0), fig=c(0.10, 0.28, 0.8, 0.95), new=TRUE) + mtext("NAO-", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0, 0.28, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.8, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.obs1.colors[i2,1+6-j,3])}} + + par(mar=c(0,0,4,0), fig=c(0.30, 0.58, 0.8, 0.95), new=TRUE) + mtext("Blocking", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.30, 0.58, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.8, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.obs1.colors[i2,1+6-j,4])}} + + par(mar=c(0,0,4,0), fig=c(0.60, 0.88, 0.8, 0.95), new=TRUE) + mtext("Atlantic Ridge", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.60, 0.88, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.8, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.obs1.colors[i2,1+6-j,2])}} + + par(fig=c(0.91, 1, 0.1, 0.8), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + par(fig=c(0.91, 1, 0.88, 0.93), new=TRUE) + mtext(side = 1, text = "%", line = 2, cex=1) + + dev.off() + + + + + + + + + + + + # Simulated transition probability from NAO- to X summary: + png(filename=paste0(work.dir,"/",forecast.name,"_summary_transition_prob_NAO-.png"), width=550, height=400) + + # create an array similar to array.cor but with colors instead of corr.values: + trans.cols <- rev(c('#b2182b','#d6604d','#f4a582','#fddbc7','#d1e5f0','#92c5de','#4393c3','#2166ac')) + my.seq <- c(0,10,20,30,40,50,60,70,100) + + array.trans.sim2.colors <- array(trans.cols[8],c(12,7,4)) + array.trans.sim2.colors[array.trans.sim2 < my.seq[8]] <- trans.cols[7] + array.trans.sim2.colors[array.trans.sim2 < my.seq[7]] <- trans.cols[6] + array.trans.sim2.colors[array.trans.sim2 < my.seq[6]] <- trans.cols[5] + array.trans.sim2.colors[array.trans.sim2 < my.seq[5]] <- trans.cols[4] + array.trans.sim2.colors[array.trans.sim2 < my.seq[4]] <- trans.cols[3] + array.trans.sim2.colors[array.trans.sim2 < my.seq[3]] <- trans.cols[2] + array.trans.sim2.colors[array.trans.sim2 < my.seq[2]] <- trans.cols[1] + array.trans.sim2.colors[is.na(array.trans.sim2)] <- "white" + + par(mar=c(5,4,4,4.5)) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Forecast time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + title("% ECMWF-S4 transition from NAO- regime ", cex=1.5) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + + for(p in 1:12){ + for(l in 0:6){ + polygon(c(0.5+p-1, 0.5+p-1, 1+p-1), c(-0.5+l, 0.5+l, 0+l), col=array.trans.sim2.colors[p,1+l,1]) # first triangle + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(-0.5+l, 0+l, -0.5+l), col=array.trans.sim2.colors[p,1+l,2]) + polygon(c(1+p-1, 1.5+p-1, 1.5+p-1), c(l, 0.5+l, -0.5+l), col=array.trans.sim2.colors[p,1+l,3]) + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(0.5+l, 0+l, 0.5+l), col=array.trans.sim2.colors[p,1+l,4]) + } + } + + par(fig=c(0.875, 1, 0.14, 0.89), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_transition_prob_NAO-_startdate.png"), width=750, height=600) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("% ECMWF-S4 transition from NAO- regime", cex=1.5) + + # NAO+ : + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.85, 0.98), new=TRUE) + mtext("NAO+", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.sim2.colors[p,1+l,1])}} + + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.42, 0.5), new=TRUE) + mtext("Blocking", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.sim2.colors[p,1+l,4])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.85, 0.98), new=TRUE) + mtext("NAO-", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.sim2.colors[p,1+l,3])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.42, 0.5), new=TRUE) + mtext("Atlantic Ridge", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.sim2.colors[p,1+l,2])}} + + par(fig=c(0.91, 1, 0.1, 0.9), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_transition_prob_NAO-_target_month.png"), width=750, height=350) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 0.95), new=TRUE) + mtext("% ECMWF-S4 transition from NAO- regime", cex=1.2) + + par(mar=c(0,0,4,0), fig=c(0.1, 0.28, 0.8, 0.95), new=TRUE) + mtext("NAO+", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0, 0.28, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.8, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.sim2.colors[i2,1+6-j,1])}} + + par(mar=c(0,0,4,0), fig=c(0.30, 0.58, 0.8, 0.95), new=TRUE) + mtext("Blocking", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.30, 0.58, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.8, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.sim2.colors[i2,1+6-j,4])}} + + par(mar=c(0,0,4,0), fig=c(0.60, 0.88, 0.8, 0.95), new=TRUE) + mtext("Atlantic Ridge", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.60, 0.88, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.8, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.sim2.colors[i2,1+6-j,2])}} + + par(fig=c(0.91, 1, 0.1, 0.8), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + par(fig=c(0.91, 1, 0.88, 0.93), new=TRUE) + mtext(side = 1, text = "%", line = 2, cex=1) + + dev.off() + + + + + + + # Observed transition probability from NAO- to X summary: + png(filename=paste0(work.dir,"/",forecast.name,"_summary_transition_prob_NAO-_obs.png"), width=550, height=400) + + # create an array similar to array.cor but with colors instead of corr.values: + trans.cols <- rev(c('#b2182b','#d6604d','#f4a582','#fddbc7','#d1e5f0','#92c5de','#4393c3','#2166ac')) + my.seq <- c(0,10,20,30,40,50,60,70,100) + + array.trans.obs2.colors <- array(trans.cols[8],c(12,7,4)) + array.trans.obs2.colors[array.trans.obs2 < my.seq[8]] <- trans.cols[7] + array.trans.obs2.colors[array.trans.obs2 < my.seq[7]] <- trans.cols[6] + array.trans.obs2.colors[array.trans.obs2 < my.seq[6]] <- trans.cols[5] + array.trans.obs2.colors[array.trans.obs2 < my.seq[5]] <- trans.cols[4] + array.trans.obs2.colors[array.trans.obs2 < my.seq[4]] <- trans.cols[3] + array.trans.obs2.colors[array.trans.obs2 < my.seq[3]] <- trans.cols[2] + array.trans.obs2.colors[array.trans.obs2 < my.seq[2]] <- trans.cols[1] + array.trans.obs2.colors[is.na(array.trans.obs2)] <- "white" + + par(mar=c(5,4,4,4.5)) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Forecast time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + title("% Observed transition from NAO- regime ", cex=1.5) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + + for(p in 1:12){ + for(l in 0:6){ + polygon(c(0.5+p-1, 0.5+p-1, 1+p-1), c(-0.5+l, 0.5+l, 0+l), col=array.trans.obs2.colors[p,1+l,1]) # first triangle + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(-0.5+l, 0+l, -0.5+l), col=array.trans.obs2.colors[p,1+l,2]) + polygon(c(1+p-1, 1.5+p-1, 1.5+p-1), c(l, 0.5+l, -0.5+l), col=array.trans.obs2.colors[p,1+l,3]) + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(0.5+l, 0+l, 0.5+l), col=array.trans.obs2.colors[p,1+l,4]) + } + } + + par(fig=c(0.875, 1, 0.14, 0.89), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_transition_prob_NAO-_obs_startdate.png"), width=750, height=600) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("% Observed transition from NAO- regime", cex=1.5) + + # NAO+ : + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.85, 0.98), new=TRUE) + mtext("NAO+", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.obs2.colors[p,1+l,1])}} + + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.42, 0.5), new=TRUE) + mtext("Blocking", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.obs2.colors[p,1+l,4])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.85, 0.98), new=TRUE) + mtext("NAO-", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.obs2.colors[p,1+l,3])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.42, 0.5), new=TRUE) + mtext("Atlantic Ridge", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.obs2.colors[p,1+l,2])}} + + par(fig=c(0.91, 1, 0.1, 0.9), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_transition_prob_NAO-_obs_target_month.png"), width=750, height=350) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 0.95), new=TRUE) + mtext("% Observed transition from NAO- regime", cex=1.2) + + par(mar=c(0,0,4,0), fig=c(0.07, 0.28, 0.8, 0.95), new=TRUE) + mtext("NAO+", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0, 0.28, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.8, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.obs2.colors[i2,1+6-j,1])}} + + par(mar=c(0,0,4,0), fig=c(0.30, 0.58, 0.8, 0.95), new=TRUE) + mtext("Blocking", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.30, 0.58, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.8, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.obs2.colors[i2,1+6-j,4])}} + + par(mar=c(0,0,4,0), fig=c(0.58, 0.88, 0.8, 0.95), new=TRUE) + mtext("Atlantic Ridge", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.58, 0.88, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.8, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.obs2.colors[i2,1+6-j,2])}} + + par(fig=c(0.91, 1, 0.1, 0.8), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + par(fig=c(0.91, 1, 0.88, 0.93), new=TRUE) + mtext(side = 1, text = "%", line = 2, cex=1) + + dev.off() + + + + + + + + + + + # Simulated transition probability from blocking to X summary: + png(filename=paste0(work.dir,"/",forecast.name,"_summary_transition_prob_blocking.png"), width=550, height=400) + + # create an array similar to array.cor but with colors instead of corr.values: + trans.cols <- rev(c('#b2182b','#d6604d','#f4a582','#fddbc7','#d1e5f0','#92c5de','#4393c3','#2166ac')) + my.seq <- c(0,10,20,30,40,50,60,70,100) + + array.trans.sim3.colors <- array(trans.cols[8],c(12,7,4)) + array.trans.sim3.colors[array.trans.sim3 < my.seq[8]] <- trans.cols[7] + array.trans.sim3.colors[array.trans.sim3 < my.seq[7]] <- trans.cols[6] + array.trans.sim3.colors[array.trans.sim3 < my.seq[6]] <- trans.cols[5] + array.trans.sim3.colors[array.trans.sim3 < my.seq[5]] <- trans.cols[4] + array.trans.sim3.colors[array.trans.sim3 < my.seq[4]] <- trans.cols[3] + array.trans.sim3.colors[array.trans.sim3 < my.seq[3]] <- trans.cols[2] + array.trans.sim3.colors[array.trans.sim3 < my.seq[2]] <- trans.cols[1] + array.trans.sim3.colors[is.na(array.trans.sim3)] <- "white" + + par(mar=c(5,4,4,4.5)) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Forecast time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + title("% ECMWF-S4 transition from blocking regime", cex=1.5) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + + for(p in 1:12){ + for(l in 0:6){ + polygon(c(0.5+p-1, 0.5+p-1, 1+p-1), c(-0.5+l, 0.5+l, 0+l), col=array.trans.sim3.colors[p,1+l,1]) # first triangle + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(-0.5+l, 0+l, -0.5+l), col=array.trans.sim3.colors[p,1+l,2]) + polygon(c(1+p-1, 1.5+p-1, 1.5+p-1), c(l, 0.5+l, -0.5+l), col=array.trans.sim3.colors[p,1+l,3]) + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(0.5+l, 0+l, 0.5+l), col=array.trans.sim3.colors[p,1+l,4]) + } + } + + par(fig=c(0.875, 1, 0.14, 0.89), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_transition_prob_blocking_startdate.png"), width=750, height=600) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("% ECMWF-S4 transition from blocking regime", cex=1.5) + + # NAO+ : + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.85, 0.98), new=TRUE) + mtext("NAO+", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.sim3.colors[p,1+l,1])}} + + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.42, 0.5), new=TRUE) + mtext("Blocking", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.sim3.colors[p,1+l,4])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.85, 0.98), new=TRUE) + mtext("NAO-", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.sim3.colors[p,1+l,3])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.42, 0.5), new=TRUE) + mtext("Atlantic Ridge", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.sim3.colors[p,1+l,2])}} + + par(fig=c(0.91, 1, 0.1, 0.9), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_transition_prob_blocking_target_month.png"), width=750, height=350) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 0.95), new=TRUE) + mtext("% ECMWF-S4 transition from blocking regime", cex=1.2) + + par(mar=c(0,0,4,0), fig=c(0.10, 0.28, 0.8, 0.95), new=TRUE) + mtext("NAO+", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0, 0.28, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.8, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.sim3.colors[i2,1+6-j,1])}} + + par(mar=c(0,0,4,0), fig=c(0.30, 0.58, 0.8, 0.95), new=TRUE) + mtext("NAO-", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.30, 0.58, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.8, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.sim3.colors[i2,1+6-j,3])}} + + par(mar=c(0,0,4,0), fig=c(0.60, 0.88, 0.8, 0.95), new=TRUE) + mtext("Atlantic Ridge", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.60, 0.88, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.8, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.sim3.colors[i2,1+6-j,2])}} + + par(fig=c(0.91, 1, 0.1, 0.8), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + par(fig=c(0.91, 1, 0.88, 0.93), new=TRUE) + mtext(side = 1, text = "%", line = 2, cex=1) + + dev.off() + + + + + + + + + # Observed transition probability from blocking to X summary: + png(filename=paste0(work.dir,"/",forecast.name,"_summary_transition_prob_blocking_obs.png"), width=550, height=400) + + # create an array similar to array.cor but with colors instead of corr.values: + trans.cols <- rev(c('#b2182b','#d6604d','#f4a582','#fddbc7','#d1e5f0','#92c5de','#4393c3','#2166ac')) + my.seq <- c(0,10,20,30,40,50,60,70,100) + + array.trans.obs3.colors <- array(trans.cols[8],c(12,7,4)) + array.trans.obs3.colors[array.trans.obs3 < my.seq[8]] <- trans.cols[7] + array.trans.obs3.colors[array.trans.obs3 < my.seq[7]] <- trans.cols[6] + array.trans.obs3.colors[array.trans.obs3 < my.seq[6]] <- trans.cols[5] + array.trans.obs3.colors[array.trans.obs3 < my.seq[5]] <- trans.cols[4] + array.trans.obs3.colors[array.trans.obs3 < my.seq[4]] <- trans.cols[3] + array.trans.obs3.colors[array.trans.obs3 < my.seq[3]] <- trans.cols[2] + array.trans.obs3.colors[array.trans.obs3 < my.seq[2]] <- trans.cols[1] + array.trans.obs3.colors[is.na(array.trans.obs3)] <- "white" + + par(mar=c(5,4,4,4.5)) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Forecast time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + title("% Observed transition from blocking regime", cex=1.5) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + + for(p in 1:12){ + for(l in 0:6){ + polygon(c(0.5+p-1, 0.5+p-1, 1+p-1), c(-0.5+l, 0.5+l, 0+l), col=array.trans.obs3.colors[p,1+l,1]) # first triangle + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(-0.5+l, 0+l, -0.5+l), col=array.trans.obs3.colors[p,1+l,2]) + polygon(c(1+p-1, 1.5+p-1, 1.5+p-1), c(l, 0.5+l, -0.5+l), col=array.trans.obs3.colors[p,1+l,3]) + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(0.5+l, 0+l, 0.5+l), col=array.trans.obs3.colors[p,1+l,4]) + } + } + + par(fig=c(0.875, 1, 0.14, 0.89), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_transition_prob_blocking_obs_startdate.png"), width=750, height=600) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("% Observed transition from blocking regime", cex=1.5) + + # NAO+ : + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.85, 0.98), new=TRUE) + mtext("NAO+", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.obs3.colors[p,1+l,1])}} + + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.42, 0.5), new=TRUE) + mtext("Blocking", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.obs3.colors[p,1+l,4])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.85, 0.98), new=TRUE) + mtext("NAO-", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.obs3.colors[p,1+l,3])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.42, 0.5), new=TRUE) + mtext("Atlantic Ridge", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.obs3.colors[p,1+l,2])}} + + par(fig=c(0.91, 1, 0.1, 0.9), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_transition_prob_blocking_obs_target_month.png"), width=750, height=350) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 0.95), new=TRUE) + mtext("% Observed transition from blocking regime", cex=1.2) + + par(mar=c(0,0,4,0), fig=c(0.10, 0.28, 0.8, 0.95), new=TRUE) + mtext("NAO+", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0, 0.28, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.8, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.obs3.colors[i2,1+6-j,1])}} + + par(mar=c(0,0,4,0), fig=c(0.30, 0.58, 0.8, 0.95), new=TRUE) + mtext("NAO-", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.30, 0.58, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.8, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.obs3.colors[i2,1+6-j,3])}} + + par(mar=c(0,0,4,0), fig=c(0.60, 0.88, 0.8, 0.95), new=TRUE) + mtext("Atlantic Ridge", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.60, 0.88, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.8, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.obs3.colors[i2,1+6-j,2])}} + + par(fig=c(0.91, 1, 0.1, 0.8), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + par(fig=c(0.91, 1, 0.88, 0.93), new=TRUE) + mtext(side = 1, text = "%", line = 2, cex=1) + + dev.off() + + + + + + + + + + + + + + + + + + # Simulated transition probability from Atlantic ridge to X summary: + png(filename=paste0(work.dir,"/",forecast.name,"_summary_transition_prob_Atlantic_ridge.png"), width=550, height=400) + + # create an array similar to array.cor but with colors instead of corr.values: + trans.cols <- rev(c('#b2182b','#d6604d','#f4a582','#fddbc7','#d1e5f0','#92c5de','#4393c3','#2166ac')) + my.seq <- c(0,10,20,30,40,50,60,70,100) + + array.trans.sim4.colors <- array(trans.cols[8],c(12,7,4)) + array.trans.sim4.colors[array.trans.sim4 < my.seq[8]] <- trans.cols[7] + array.trans.sim4.colors[array.trans.sim4 < my.seq[7]] <- trans.cols[6] + array.trans.sim4.colors[array.trans.sim4 < my.seq[6]] <- trans.cols[5] + array.trans.sim4.colors[array.trans.sim4 < my.seq[5]] <- trans.cols[4] + array.trans.sim4.colors[array.trans.sim4 < my.seq[4]] <- trans.cols[3] + array.trans.sim4.colors[array.trans.sim4 < my.seq[3]] <- trans.cols[2] + array.trans.sim4.colors[array.trans.sim4 < my.seq[2]] <- trans.cols[1] + array.trans.sim4.colors[is.na(array.trans.sim4)] <- "white" + + par(mar=c(5,4,4,4.5)) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Forecast time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + title("% ECMWF-S4 transition from Atlantic ridge regime ", cex=1.5) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + + for(p in 1:12){ + for(l in 0:6){ + polygon(c(0.5+p-1, 0.5+p-1, 1+p-1), c(-0.5+l, 0.5+l, 0+l), col=array.trans.sim4.colors[p,1+l,1]) # first triangle + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(-0.5+l, 0+l, -0.5+l), col=array.trans.sim4.colors[p,1+l,2]) + polygon(c(1+p-1, 1.5+p-1, 1.5+p-1), c(l, 0.5+l, -0.5+l), col=array.trans.sim4.colors[p,1+l,3]) + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(0.5+l, 0+l, 0.5+l), col=array.trans.sim4.colors[p,1+l,4]) + } + } + + par(fig=c(0.875, 1, 0.14, 0.89), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_transition_prob_Atlantic_ridge_startdate.png"), width=750, height=600) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("% ECMWF-S4 transition from Atlantic Ridge regime", cex=1.5) + + # NAO+ : + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.85, 0.98), new=TRUE) + mtext("NAO+", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.sim4.colors[p,1+l,1])}} + + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.42, 0.5), new=TRUE) + mtext("Blocking", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.sim4.colors[p,1+l,4])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.85, 0.98), new=TRUE) + mtext("NAO-", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.sim4.colors[p,1+l,3])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.42, 0.5), new=TRUE) + mtext("Atlantic Ridge", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.sim4.colors[p,1+l,2])}} + + par(fig=c(0.91, 1, 0.1, 0.9), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_transition_prob_Atlantic_ridge_target_month.png"), width=750, height=350) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 0.95), new=TRUE) + mtext("% ECMWF-S4 transition from Atlantic Ridge regime", cex=1.2) + + par(mar=c(0,0,4,0), fig=c(0.1, 0.28, 0.8, 0.95), new=TRUE) + mtext("NAO+", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0, 0.28, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.8, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.sim4.colors[i2,1+6-j,1])}} + + par(mar=c(0,0,4,0), fig=c(0.30, 0.58, 0.8, 0.95), new=TRUE) + mtext("NAO-", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.30, 0.58, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.8, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.sim4.colors[i2,1+6-j,3])}} + + par(mar=c(0,0,4,0), fig=c(0.60, 0.88, 0.8, 0.95), new=TRUE) + mtext("Blocking", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.60, 0.88, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.8, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.sim4.colors[i2,1+6-j,4])}} + + par(fig=c(0.91, 1, 0.1, 0.8), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + par(fig=c(0.91, 1, 0.88, 0.93), new=TRUE) + mtext(side = 1, text = "%", line = 2, cex=1) + + dev.off() + + + + + + + + + + # Observed transition probability from Atlantic ridge to X summary: + png(filename=paste0(work.dir,"/",forecast.name,"_summary_transition_prob_Atlantic_ridge_obs.png"), width=550, height=400) + + # create an array obsilar to array.cor but with colors instead of corr.values: + trans.cols <- rev(c('#b2182b','#d6604d','#f4a582','#fddbc7','#d1e5f0','#92c5de','#4393c3','#2166ac')) + my.seq <- c(0,10,20,30,40,50,60,70,100) + + array.trans.obs4.colors <- array(trans.cols[8],c(12,7,4)) + array.trans.obs4.colors[array.trans.obs4 < my.seq[8]] <- trans.cols[7] + array.trans.obs4.colors[array.trans.obs4 < my.seq[7]] <- trans.cols[6] + array.trans.obs4.colors[array.trans.obs4 < my.seq[6]] <- trans.cols[5] + array.trans.obs4.colors[array.trans.obs4 < my.seq[5]] <- trans.cols[4] + array.trans.obs4.colors[array.trans.obs4 < my.seq[4]] <- trans.cols[3] + array.trans.obs4.colors[array.trans.obs4 < my.seq[3]] <- trans.cols[2] + array.trans.obs4.colors[array.trans.obs4 < my.seq[2]] <- trans.cols[1] + array.trans.obs4.colors[is.na(array.trans.obs4)] <- "white" + + par(mar=c(5,4,4,4.5)) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Forecast time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + title("% Observed transition from Atlantic ridge regime ", cex=1.5) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + + for(p in 1:12){ + for(l in 0:6){ + polygon(c(0.5+p-1, 0.5+p-1, 1+p-1), c(-0.5+l, 0.5+l, 0+l), col=array.trans.obs4.colors[p,1+l,1]) # first triangle + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(-0.5+l, 0+l, -0.5+l), col=array.trans.obs4.colors[p,1+l,2]) + polygon(c(1+p-1, 1.5+p-1, 1.5+p-1), c(l, 0.5+l, -0.5+l), col=array.trans.obs4.colors[p,1+l,3]) + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(0.5+l, 0+l, 0.5+l), col=array.trans.obs4.colors[p,1+l,4]) + } + } + + par(fig=c(0.875, 1, 0.14, 0.89), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_transition_prob_Atlantic_ridge_obs_startdate.png"), width=750, height=600) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("% Observed transition from Atlantic Ridge regime", cex=1.5) + + # NAO+ : + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.85, 0.98), new=TRUE) + mtext("NAO+", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.obs4.colors[p,1+l,1])}} + + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.42, 0.5), new=TRUE) + mtext("Blocking", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.obs4.colors[p,1+l,4])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.85, 0.98), new=TRUE) + mtext("NAO-", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.obs4.colors[p,1+l,3])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.42, 0.5), new=TRUE) + mtext("Atlantic Ridge", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.obs4.colors[p,1+l,2])}} + + par(fig=c(0.91, 1, 0.1, 0.9), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_transition_prob_Atlantic_ridge_obs_target_month.png"), width=750, height=350) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 0.95), new=TRUE) + mtext("% Observed transition from Atlantic Ridge regime", cex=1.2) + + par(mar=c(0,0,4,0), fig=c(0.1, 0.28, 0.8, 0.95), new=TRUE) + mtext("NAO+", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0, 0.28, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.8, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.obs4.colors[i2,1+6-j,1])}} + + par(mar=c(0,0,4,0), fig=c(0.30, 0.58, 0.8, 0.95), new=TRUE) + mtext("NAO-", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.30, 0.58, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.8, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.obs4.colors[i2,1+6-j,3])}} + + par(mar=c(0,0,4,0), fig=c(0.60, 0.88, 0.8, 0.95), new=TRUE) + mtext("Blocking", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.60, 0.88, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.8, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.obs4.colors[i2,1+6-j,4])}} + + par(fig=c(0.91, 1, 0.1, 0.8), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + par(fig=c(0.91, 1, 0.88, 0.93), new=TRUE) + mtext(side = 1, text = "%", line = 2, cex=1) + + dev.off() + + + + + + + + + + + + + + + # Bias of the transition probability from NAO+ to X summary: + png(filename=paste0(work.dir,"/",forecast.name,"_summary_bias_transition_prob_NAO+.png"), width=550, height=400) + + # create an array similar to array.cor but with colors instead of corr.values: + trans.cols <- rev(c('#b2182b','#d6604d','#f4a582','#fddbc7','#d1e5f0','#92c5de','#4393c3','#2166ac')) + my.seq <- c(-20,-10,-5,-2,0,2,5,10,20) + + array.trans.diff1.colors <- array(trans.cols[8],c(12,7,4)) + array.trans.diff1.colors[array.trans.diff1 < my.seq[8]] <- trans.cols[7] + array.trans.diff1.colors[array.trans.diff1 < my.seq[7]] <- trans.cols[6] + array.trans.diff1.colors[array.trans.diff1 < my.seq[6]] <- trans.cols[5] + array.trans.diff1.colors[array.trans.diff1 < my.seq[5]] <- trans.cols[4] + array.trans.diff1.colors[array.trans.diff1 < my.seq[4]] <- trans.cols[3] + array.trans.diff1.colors[array.trans.diff1 < my.seq[3]] <- trans.cols[2] + array.trans.diff1.colors[array.trans.diff1 < my.seq[2]] <- trans.cols[1] + array.trans.diff1.colors[is.na(array.trans.diff1)] <- "white" + + par(mar=c(5,4,4,4.5)) + plot(1,1, type="n", xaxt="n", yaxt="n", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + title("% Bias transition from NAO+ regime", cex=1.5) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + + for(p in 1:12){ + for(l in 0:6){ + polygon(c(0.5+p-1, 0.5+p-1, 1+p-1), c(-0.5+l, 0.5+l, 0+l), col=array.trans.diff1.colors[p,1+l,1]) # first triangle + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(-0.5+l, 0+l, -0.5+l), col=array.trans.diff1.colors[p,1+l,2]) + polygon(c(1+p-1, 1.5+p-1, 1.5+p-1), c(l, 0.5+l, -0.5+l), col=array.trans.diff1.colors[p,1+l,3]) + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(0.5+l, 0+l, 0.5+l), col=array.trans.diff1.colors[p,1+l,4]) + } + } + + par(fig=c(0.875, 1, 0.14, 0.89), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_bias_transition_prob_NAO+_startdate.png"), width=750, height=600) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("% Bias transition from NAO+ regime", cex=1.5) + + # NAO+ : + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.85, 0.98), new=TRUE) + mtext("NAO+", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.diff1.colors[p,1+l,1])}} + + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.42, 0.5), new=TRUE) + mtext("Blocking", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.diff1.colors[p,1+l,4])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.85, 0.98), new=TRUE) + mtext("NAO-", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.diff1.colors[p,1+l,3])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.42, 0.5), new=TRUE) + mtext("Atlantic Ridge", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.diff1.colors[p,1+l,2])}} + + par(fig=c(0.91, 1, 0.1, 0.9), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_bias_transition_prob_NAO+_target_month.png"), width=750, height=350) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 0.95), new=TRUE) + mtext("% Bias transition from NAO+ regime", cex=1.2) + + par(mar=c(0,0,4,0), fig=c(0.07, 0.28, 0.8, 0.95), new=TRUE) + mtext("NAO-", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0, 0.28, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.8, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.diff1.colors[i2,1+6-j,3])}} + + par(mar=c(0,0,4,0), fig=c(0.30, 0.58, 0.8, 0.95), new=TRUE) + mtext("Blocking", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.30, 0.58, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.8, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.diff1.colors[i2,1+6-j,4])}} + + par(mar=c(0,0,4,0), fig=c(0.60, 0.88, 0.8, 0.95), new=TRUE) + mtext("Atlantic Ridge", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.60, 0.88, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.8, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.diff1.colors[i2,1+6-j,2])}} + + par(fig=c(0.91, 1, 0.1, 0.8), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + par(fig=c(0.91, 1, 0.88, 0.93), new=TRUE) + mtext(side = 1, text = "%", line = 2, cex=1) + + dev.off() + + + + + + + + + + + + + + + + # Bias of the Transition probability from NAO- to X summary: + png(filename=paste0(work.dir,"/",forecast.name,"_summary_bias_transition_prob_NAO-.png"), width=550, height=400) + + # create an array similar to array.cor but with colors instead of corr.values: + trans.cols <- rev(c('#b2182b','#d6604d','#f4a582','#fddbc7','#d1e5f0','#92c5de','#4393c3','#2166ac')) + my.seq <- c(-20,-10,-5,-2,0,2,5,10,20) + + array.trans.diff2.colors <- array(trans.cols[8],c(12,7,4)) + array.trans.diff2.colors[array.trans.diff2 < my.seq[8]] <- trans.cols[7] + array.trans.diff2.colors[array.trans.diff2 < my.seq[7]] <- trans.cols[6] + array.trans.diff2.colors[array.trans.diff2 < my.seq[6]] <- trans.cols[5] + array.trans.diff2.colors[array.trans.diff2 < my.seq[5]] <- trans.cols[4] + array.trans.diff2.colors[array.trans.diff2 < my.seq[4]] <- trans.cols[3] + array.trans.diff2.colors[array.trans.diff2 < my.seq[3]] <- trans.cols[2] + array.trans.diff2.colors[array.trans.diff2 < my.seq[2]] <- trans.cols[1] + array.trans.diff2.colors[is.na(array.trans.diff2)] <- "white" + + par(mar=c(5,4,4,4.5)) + plot(1,1, type="n", xaxt="n", yaxt="n", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + title("% Bias transition from NAO- regime", cex=1.5) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + + for(p in 1:12){ + for(l in 0:6){ + polygon(c(0.5+p-1, 0.5+p-1, 1+p-1), c(-0.5+l, 0.5+l, 0+l), col=array.trans.diff2.colors[p,1+l,1]) # first triangle + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(-0.5+l, 0+l, -0.5+l), col=array.trans.diff2.colors[p,1+l,2]) + polygon(c(1+p-1, 1.5+p-1, 1.5+p-1), c(l, 0.5+l, -0.5+l), col=array.trans.diff2.colors[p,1+l,3]) + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(0.5+l, 0+l, 0.5+l), col=array.trans.diff2.colors[p,1+l,4]) + } + } + + par(fig=c(0.875, 1, 0.14, 0.89), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_bias_transition_prob_NAO-_startdate.png"), width=750, height=600) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 0.95), new=TRUE) + mtext("% Bias transition from NAO- regime", cex=1.5) + + # NAO+ : + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.85, 0.98), new=TRUE) + mtext("NAO+", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.diff2.colors[p,1+l,1])}} + + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.42, 0.5), new=TRUE) + mtext("Blocking", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.diff2.colors[p,1+l,4])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.85, 0.98), new=TRUE) + mtext("NAO-", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.diff2.colors[p,1+l,3])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.42, 0.5), new=TRUE) + mtext("Atlantic Ridge", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.diff2.colors[p,1+l,2])}} + + par(fig=c(0.91, 1, 0.1, 0.9), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_bias_transition_prob_NAO-_target_month.png"), width=750, height=350) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 0.95), new=TRUE) + mtext("% Bias transition from NAO- regime", cex=1.2) + + par(mar=c(0,0,4,0), fig=c(0.07, 0.28, 0.8, 0.95), new=TRUE) + mtext("NAO+", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0, 0.28, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.8, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.diff2.colors[i2,1+6-j,1])}} + + par(mar=c(0,0,4,0), fig=c(0.30, 0.58, 0.8, 0.95), new=TRUE) + mtext("Blocking", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.30, 0.58, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.8, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.diff2.colors[i2,1+6-j,4])}} + + par(mar=c(0,0,4,0), fig=c(0.60, 0.88, 0.8, 0.95), new=TRUE) + mtext("Atlantic Ridge", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.60, 0.88, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.8, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.diff2.colors[i2,1+6-j,2])}} + + par(fig=c(0.91, 1, 0.1, 0.8), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + par(fig=c(0.91, 1, 0.88, 0.93), new=TRUE) + mtext(side = 1, text = "%", line = 2, cex=1) + + dev.off() + + + + + + + + + + + + # Bias of the Transition probability from blocking to X summary: + png(filename=paste0(work.dir,"/",forecast.name,"_summary_bias_transition_prob_blocking.png"), width=550, height=400) + + # create an array similar to array.cor but with colors instead of corr.values: + trans.cols <- rev(c('#b2182b','#d6604d','#f4a582','#fddbc7','#d1e5f0','#92c5de','#4393c3','#2166ac')) + my.seq <- c(-20,-10,-5,-2,0,2,5,10,20) + + array.trans.diff3.colors <- array(trans.cols[8],c(12,7,4)) + array.trans.diff3.colors[array.trans.diff3 < my.seq[8]] <- trans.cols[7] + array.trans.diff3.colors[array.trans.diff3 < my.seq[7]] <- trans.cols[6] + array.trans.diff3.colors[array.trans.diff3 < my.seq[6]] <- trans.cols[5] + array.trans.diff3.colors[array.trans.diff3 < my.seq[5]] <- trans.cols[4] + array.trans.diff3.colors[array.trans.diff3 < my.seq[4]] <- trans.cols[3] + array.trans.diff3.colors[array.trans.diff3 < my.seq[3]] <- trans.cols[2] + array.trans.diff3.colors[array.trans.diff3 < my.seq[2]] <- trans.cols[1] + array.trans.diff3.colors[is.na(array.trans.diff3)] <- "white" + + par(mar=c(5,4,4,4.5)) + plot(1,1, type="n", xaxt="n", yaxt="n", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + title("% Bias transition from blocking regime", cex=1.5) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + + for(p in 1:12){ + for(l in 0:6){ + polygon(c(0.5+p-1, 0.5+p-1, 1+p-1), c(-0.5+l, 0.5+l, 0+l), col=array.trans.diff3.colors[p,1+l,1]) # first triangle + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(-0.5+l, 0+l, -0.5+l), col=array.trans.diff3.colors[p,1+l,2]) + polygon(c(1+p-1, 1.5+p-1, 1.5+p-1), c(l, 0.5+l, -0.5+l), col=array.trans.diff3.colors[p,1+l,3]) + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(0.5+l, 0+l, 0.5+l), col=array.trans.diff3.colors[p,1+l,4]) + } + } + + par(fig=c(0.875, 1, 0.14, 0.89), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_bias_transition_prob_blocking_startdate.png"), width=750, height=600) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("% Bias transition from blocking regime", cex=1.5) + + # NAO+ : + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.85, 0.98), new=TRUE) + mtext("NAO+", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.diff3.colors[p,1+l,1])}} + + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.42, 0.5), new=TRUE) + mtext("Blocking", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.diff3.colors[p,1+l,4])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.85, 0.98), new=TRUE) + mtext("NAO-", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.diff3.colors[p,1+l,3])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.42, 0.5), new=TRUE) + mtext("Atlantic Ridge", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.diff3.colors[p,1+l,2])}} + + par(fig=c(0.91, 1, 0.1, 0.9), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_bias_transition_prob_blocking_target_month.png"), width=750, height=350) + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 0.95), new=TRUE) + mtext("% Bias transition from Blocking regime", cex=1.2) + + par(mar=c(0,0,4,0), fig=c(0.07, 0.28, 0.8, 0.95), new=TRUE) + mtext("NAO+", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0, 0.28, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.8, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.diff3.colors[i2,1+6-j,1])}} + + par(mar=c(0,0,4,0), fig=c(0.30, 0.58, 0.8, 0.95), new=TRUE) + mtext("NAO-", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.30, 0.58, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.8, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.diff3.colors[i2,1+6-j,3])}} + + par(mar=c(0,0,4,0), fig=c(0.60, 0.88, 0.8, 0.95), new=TRUE) + mtext("Atlantic Ridge", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.60, 0.88, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.8, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.diff3.colors[i2,1+6-j,2])}} + + par(fig=c(0.91, 1, 0.1, 0.8), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + par(fig=c(0.91, 1, 0.88, 0.93), new=TRUE) + mtext(side = 1, text = "%", line = 2, cex=1) + + dev.off() + + + + + + + + + + + # Bias of the Transition probability from Atlantic Ridge to X summary: + png(filename=paste0(work.dir,"/",forecast.name,"_summary_bias_transition_prob_Atlantic_ridge.png"), width=550, height=400) + + # create an array similar to array.cor but with colors instead of corr.values: + trans.cols <- rev(c('#b2182b','#d6604d','#f4a582','#fddbc7','#d1e5f0','#92c5de','#4393c3','#2166ac')) + my.seq <- c(-20,-10,-5,-2,0,2,5,10,20) + + array.trans.diff4.colors <- array(trans.cols[8],c(12,7,4)) + array.trans.diff4.colors[array.trans.diff4 < my.seq[8]] <- trans.cols[7] + array.trans.diff4.colors[array.trans.diff4 < my.seq[7]] <- trans.cols[6] + array.trans.diff4.colors[array.trans.diff4 < my.seq[6]] <- trans.cols[5] + array.trans.diff4.colors[array.trans.diff4 < my.seq[5]] <- trans.cols[4] + array.trans.diff4.colors[array.trans.diff4 < my.seq[4]] <- trans.cols[3] + array.trans.diff4.colors[array.trans.diff4 < my.seq[3]] <- trans.cols[2] + array.trans.diff4.colors[array.trans.diff4 < my.seq[2]] <- trans.cols[1] + array.trans.diff4.colors[is.na(array.trans.diff4)] <- "white" + + par(mar=c(5,4,4,4.5)) + plot(1,1, type="n", xaxt="n", yaxt="n", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + title("% Bias transition from Atlantic Ridge regime", cex=1.5) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + + for(p in 1:12){ + for(l in 0:6){ + polygon(c(0.5+p-1, 0.5+p-1, 1+p-1), c(-0.5+l, 0.5+l, 0+l), col=array.trans.diff4.colors[p,1+l,1]) # first triangle + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(-0.5+l, 0+l, -0.5+l), col=array.trans.diff4.colors[p,1+l,2]) + polygon(c(1+p-1, 1.5+p-1, 1.5+p-1), c(l, 0.5+l, -0.5+l), col=array.trans.diff4.colors[p,1+l,3]) + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(0.5+l, 0+l, 0.5+l), col=array.trans.diff4.colors[p,1+l,4]) + } + } + + par(fig=c(0.875, 1, 0.14, 0.89), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_bias_transition_prob_Atlantic_ridge_startdate.png"), width=750, height=600) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("% Bias transition from Atlantic_Ridge_regime", cex=1.5) + + # NAO+ : + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.85, 0.98), new=TRUE) + mtext("NAO+", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.diff4.colors[p,1+l,1])}} + + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.42, 0.5), new=TRUE) + mtext("Blocking", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.diff4.colors[p,1+l,4])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.85, 0.98), new=TRUE) + mtext("NAO-", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.diff4.colors[p,1+l,3])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.42, 0.5), new=TRUE) + mtext("Atlantic Ridge", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.diff4.colors[p,1+l,2])}} + + par(fig=c(0.91, 1, 0.1, 0.9), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_bias_transition_prob_Atlantic_ridge_target_month.png"), width=750, height=350) + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 0.95), new=TRUE) + mtext("% Bias transition from Atlantic Ridge regime", cex=1.2) + + par(mar=c(0,0,4,0), fig=c(0.07, 0.28, 0.8, 0.95), new=TRUE) + mtext("NAO+", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0, 0.28, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.8, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.diff4.colors[i2,1+6-j,1])}} + + par(mar=c(0,0,4,0), fig=c(0.30, 0.58, 0.8, 0.95), new=TRUE) + mtext("NAO-", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.30, 0.58, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.8, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.diff4.colors[i2,1+6-j,3])}} + + par(mar=c(0,0,4,0), fig=c(0.60, 0.88, 0.8, 0.95), new=TRUE) + mtext("Blocking", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.60, 0.88, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.8, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.diff4.colors[i2,1+6-j,4])}} + + par(fig=c(0.91, 1, 0.1, 0.8), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + par(fig=c(0.91, 1, 0.88, 0.93), new=TRUE) + mtext(side = 1, text = "%", line = 2, cex=1) + + dev.off() + + ## # FairRPSS summary: + ## png(filename=paste0(work.dir,"/",forecast.name,"_summary_rpss.png"), width=550, height=400) + + ## # create an array similar to array.diff.freq but with colors instead of frequencies: + ## freq.cols <- rev(c('#b2182b','#d6604d','#f4a582','#fddbc7','#d1e5f0','#92c5de','#4393c3','#2166ac')) + + ## array.freq.colors <- array(freq.cols[8],c(12,7,4)) + ## array.freq.colors[array.diff.freq < 10] <- freq.cols[7] + ## array.freq.colors[array.diff.freq < 5] <- freq.cols[6] + ## array.freq.colors[array.diff.freq < 2] <- freq.cols[5] + ## array.freq.colors[array.diff.freq < 0] <- freq.cols[4] + ## array.freq.colors[array.diff.freq < -2] <- freq.cols[3] + ## array.freq.colors[array.diff.freq < -5] <- freq.cols[2] + ## array.freq.colors[array.diff.freq < -10] <- freq.cols[1] + + ## par(mar=c(5,4,4,4.5)) + ## plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Forecast time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + ## title("Frequency difference (%) between S4 and ERA-Interim", cex=1.5) + ## mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + ## mtext(side = 2, text = "Forecast time", line = 2.5, cex=1.5) + ## axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + ## axis(2, at=seq(0,6), las=2, cex.axis=1.5) + + ## for(p in 1:12){ + ## for(l in 0:6){ + ## polygon(c(0.5+p-1,0.5+p-1,1+p-1),c(-0.5+l,0.5+l,0+l), col=array.freq.colors[p,1+l,1]) # first triangle + ## polygon(c(0.5+p-1,1+p-1,1.5+p-1),c(-0.5+l,0+l,-0.5+l), col=array.freq.colors[p,1+l,2]) + ## polygon(c(1+p-1,1.5+p-1,1.5+p-1),c(l,0.5+l,-0.5+l), col=array.freq.colors[p,1+l,3]) + ## polygon(c(0.5+p-1,1+p-1,1.5+p-1),c(0.5+l,0+l,0.5+l), col=array.freq.colors[p,1+l,4]) + ## } + ## } + + ## par(fig=c(0.875, 1, 0.14, 0.89), new=TRUE) + ## ColorBar2(brks = c(-20,-10,-5,-2,0,2,5,10,20), cols = freq.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(c(-20,-10,-5,-2,0,2,5,10,20)), my.labels=c(-20,-10,-5,-2,0,2,5,10,20)) + ## dev.off() + +} # close if on composition == "summary" + + + + +# impact map of the stronger WR: +if(composition == "impact.highest" && fields.name == rean.name){ + + var.num <- 2 # choose a variable (1:sfcWind, 2:tas) + index <- 1 # chose an index: 1: most influent, 2: most influent positively, 3: most influent negatively + + if ( index == 1 ) png(filename=paste0(rean.dir,"/",rean.name,"_",var.name.full[var.num],"_most_influent.png"), width=550, height=650) + if ( index == 2 ) png(filename=paste0(rean.dir,"/",rean.name,"_",var.name.full[var.num],"_most_influent_positively.png"), width=550, height=650) + if ( index == 3 ) png(filename=paste0(rean.dir,"/",rean.name,"_",var.name.full[var.num],"_most_influent_negatively.png"), width=550, height=650) + + plot.new() + #par(mfrow=c(4,3)) + #col.regimes <- c('#7b3294','#c2a5cf','#a6dba0','#008837') + col.regimes <- c('#e66101','#fdb863','#b2abd2','#5e3c99') + + for(p in 13:16){ # or 1:12 for monthly maps + load(file=paste0(rean.dir,"/",rean.name,"_",my.period[p],"_",var.name[var.num],".RData")) + load(paste0(rean.dir,"/",rean.name,"_",my.period[p],"_ClusterNames.RData")) # they should not depend on var.name!!! + + # position of long values of Europe only (without the Atlantic Sea and America): + #if(fields.name == "ERA-Interim") EU <- c(1:which(lon >= lon.max)[1], (length(lon)-30):length(lon)) # restrict area to continental europe only + lon.max = 45 + EU <- c(1:which(lon >= lon.max)[1], (which(lon >= 337)[1]:length(lon))) # restrict area to continental europe only + + cluster1 <- which(orden == cluster1.name.period[p]) # in future it will be == cluster1.name + cluster2 <- which(orden == cluster2.name.period[p]) + cluster3 <- which(orden == cluster3.name.period[p]) + cluster4 <- which(orden == cluster4.name.period[p]) + + assign(paste0("imp",cluster1), varPeriodAnom1mean) + assign(paste0("imp",cluster2), varPeriodAnom2mean) + assign(paste0("imp",cluster3), varPeriodAnom3mean) + assign(paste0("imp",cluster4), varPeriodAnom4mean) + + #most influent WT: + if (index == 1) { + imp.all <- imp1 + for(i in 1:dim(imp1)[1]){ + for(j in 1:dim(imp1)[2]){ + if(abs(imp1[i,j]) >= max(abs(imp1[i,j]), abs(imp2[i,j]), abs(imp3[i,j]), abs(imp4[i,j]))) imp.all[i,j] <- 1.5 + if(abs(imp2[i,j]) >= max(abs(imp1[i,j]), abs(imp2[i,j]), abs(imp3[i,j]), abs(imp4[i,j]))) imp.all[i,j] <- 2.5 + if(abs(imp3[i,j]) >= max(abs(imp1[i,j]), abs(imp2[i,j]), abs(imp3[i,j]), abs(imp4[i,j]))) imp.all[i,j] <- 3.5 + if(abs(imp4[i,j]) >= max(abs(imp1[i,j]), abs(imp2[i,j]), abs(imp3[i,j]), abs(imp4[i,j]))) imp.all[i,j] <- 4.5 + } + } + } + + #most influent WT with positive impact: + if (index == 2) { + imp.all <- imp1 + for(i in 1:dim(imp1)[1]){ + for(j in 1:dim(imp1)[2]){ + if((imp1[i,j]) >= max((imp1[i,j]), (imp2[i,j]), (imp3[i,j]), (imp4[i,j]))) imp.all[i,j] <- 1.5 + if((imp2[i,j]) >= max((imp1[i,j]), (imp2[i,j]), (imp3[i,j]), (imp4[i,j]))) imp.all[i,j] <- 2.5 + if((imp3[i,j]) >= max((imp1[i,j]), (imp2[i,j]), (imp3[i,j]), (imp4[i,j]))) imp.all[i,j] <- 3.5 + if((imp4[i,j]) >= max((imp1[i,j]), (imp2[i,j]), (imp3[i,j]), (imp4[i,j]))) imp.all[i,j] <- 4.5 + } + } + + } + + # most influent WT with negative impact: + if (index == 3) { + imp.all <- imp1 + for(i in 1:dim(imp1)[1]){ + for(j in 1:dim(imp1)[2]){ + if((imp1[i,j]) <= min((imp1[i,j]), (imp2[i,j]), (imp3[i,j]), (imp4[i,j]))) imp.all[i,j] <- 1.5 + if((imp2[i,j]) <= min((imp1[i,j]), (imp2[i,j]), (imp3[i,j]), (imp4[i,j]))) imp.all[i,j] <- 2.5 + if((imp3[i,j]) <= min((imp1[i,j]), (imp2[i,j]), (imp3[i,j]), (imp4[i,j]))) imp.all[i,j] <- 3.5 + if((imp4[i,j]) <= min((imp1[i,j]), (imp2[i,j]), (imp3[i,j]), (imp4[i,j]))) imp.all[i,j] <- 4.5 + } + } + } + + if(p<=3) yMod <- 0.7 + if(p>=4 && p<=6) yMod <- 0.5 + if(p>=7 && p<=9) yMod <- 0.3 + if(p>=10) yMod <- 0.1 + if(p==13 || p==14) yMod <- 0.52 + if(p==15 || p==16) yMod <- 0.1 + + if(p==1 || p==4 || p==7 || p==10) xMod <- 0.05 + if(p==2 || p==5 || p==8 || p==11) xMod <- 0.35 + if(p==3 || p==6 || p==9 || p==12) xMod <- 0.65 + if(p==13 || p==15) xMod <- 0.05 + if(p==14 || p==16) xMod <- 0.50 + + # impact map: + if(p <= 12) par(fig=c(xMod, xMod + 0.3, yMod, yMod + 0.17), new=TRUE) + if(p > 12) par(fig=c(xMod, xMod + 0.45, yMod, yMod + 0.38), new=TRUE) + PlotEquiMap(imp.all[,EU], lon[EU], lat, filled.continents = FALSE, brks=1:5, cols=col.regimes, axelab=F, drawleg=F) + + # title map: + if(p <= 12) par(fig=c(xMod, xMod + 0.3, yMod + 0.162, yMod + 0.172), new=TRUE) + if(p > 12) par(fig=c(xMod + 0.07, xMod + 0.07 + 0.3, yMod +0.21 + 0.166, yMod +0.21 + 0.176), new=TRUE) + mtext(my.period[p], font=2, cex=1.5) + + } # close 'p' on 'period' + + par(fig=c(0.1,0.9, 0.03, 0.11), new=TRUE) + ColorBar2(1:5, cols=col.regimes, vert=FALSE, my.ticks=1:4, draw.ticks=FALSE, my.labels=orden) + + par(fig=c(0.1,0.9, 0.92, 0.97), new=TRUE) + if( index == 1 ) mtext(paste0("Most influent WR on ",var.name[var.num]), font=2, cex=1.5) + if( index == 2 ) mtext(paste0("Most positively influent WR on ",var.name[var.num]), font=2, cex=1.5) + if( index == 3 ) mtext(paste0("Most negatively influent WR on ",var.name[var.num]), font=2, cex=1.5) + + dev.off() + +} # close if on composition == "impact.highest" + + + + + + + + + + + + + +if(monthly_anomalies) { + # Compute climatology and anomalies (with LOESS filter) for sfcWind data, and draw monthly anomalies, if you want to compare the mean daily wind speed anomalies reconstructed by the weather regimes classification with those observed by the reanalysis: + + load(paste0(rean.dir,"/",rean.name,"_",my.period[month.chosen[1]],"_psl.RData")) # only to load year.start and year.end + + sfcWind366 <- Load(var = "sfcWind", exp = NULL, obs = list(list(path=rean.data)), paste0(year.start:year.end,'0101'), storefreq = 'daily', leadtimemax = 366, output = 'lonlat', latmin = lat.min, latmax = lat.max, lonmin = lon.min, lonmax = lon.max, nprocs=8)$obs + + sfcWind <- sfcWind366[,,,1:365,,] + + for(y in year.start:year.end){ + y2 <- y - year.start + 1 + if(n.days.in.a.year(y) == 366) sfcWind[y2,60:365,,] <- sfcWind366[1,1,y2,61:366,,] # take the march to december period removing the 29th of February + } + + rm(sfcWind366) + gc() + + sfcWindClimDaily <- apply(sfcWind, c(2,3,4), mean, na.rm=T) + + sfcWindClimLoess <- sfcWindClimDaily + for(i in n.pos.lat){ + for(j in n.pos.lon){ + my.data <- data.frame(ens.mean=sfcWindClimDaily[,i,j], day=1:365) + my.loess <- loess(ens.mean ~ day, my.data, span=0.35) + sfcWindClimLoess[,i,j] <- predict(my.loess) + } + } + + rm(sfcWindClimDaily) + gc() + + sfcWindClim2 <- InsertDim(sfcWindClimLoess, 1, n.years) + + sfcWindAnom <- sfcWind - sfcWindClim2 + + rm(sfcWindClimLoess) + gc() + + # same as above but for computing climatology and anomalies (with LOESS filter) for psl data, and draw monthly slp anomalies: + slp366 <- Load(var = psl, exp = NULL, obs = list(list(path=rean.data)), paste0(year.start:year.end,'0101'), storefreq = 'daily', leadtimemax = 366, output = 'lonlat', latmin = lat.min, latmax = lat.max, lonmin = lon.min, lonmax = lon.max, nprocs=8)$obs + + slp <- slp366[,,,1:365,,] + + for(y in year.start:year.end){ + y2 <- y - year.start + 1 + if(n.days.in.a.year(y) == 366) slp[y2,60:365,,] <- slp366[1,1,y2,61:366,,] # take the march to december period removing the 29th of February + } + + rm(slp366) + gc() + + slpClimDaily <- apply(slp, c(2,3,4), mean, na.rm=T) + + slpClimLoess <- slpClimDaily + for(i in n.pos.lat){ + for(j in n.pos.lon){ + my.data <- data.frame(ens.mean=slpClimDaily[,i,j], day=1:365) + my.loess <- loess(ens.mean ~ day, my.data, span=0.35) + slpClimLoess[,i,j] <- predict(my.loess) + } + } + + rm(slpClimDaily) + gc() + + slpClim2 <- InsertDim(slpClimLoess, 1, n.years) + + slpAnom <- slp - slpClim2 + + rm(slpClimLoess) + gc() + + if(psl == "psl") slpAnom <- slpAnom/100 # convert MSLP in Pascal to MSLP in hPa + + EU <- c(1:which(lon >= lon.max)[1], (which(lon >= 337)[1]:length(lon))) # restrict area to continental europe only + + my.brks.var <- c(-20,seq(-3,3,0.5),20) + my.cols.var <- colorRampPalette(rev(brewer.pal(11,"RdBu")))(length(my.brks.var)-1) # blue--white--red colors + + # same but for slp: + my.brks.var2 <- c(-100,seq(-21,-1,2),0,seq(1,21,2),100) + my.cols.var2 <- colorRampPalette(rev(brewer.pal(11,"RdBu")))(length(my.brks.var2)-1) # blue--red colors + my.cols.var2[floor(length(my.cols.var2)/2)] <- my.cols.var2[floor(length(my.cols.var2)/2)+1] <- "white" + + # save all monthly anomaly maps: + for(year.test in year.chosen){ + for(month.test in month.chosen){ + #year.test <- 2016; month.test <- 10 # for the debug + + pos.year.test <- year.test - year.start + 1 + + # wind anomaly for the chosen year and month: + sfcWindAnomPeriod <- sfcWindAnom[pos.year.test, pos.period(1,month.test),,] + + sfcWindAnomPeriodMean <- apply(sfcWindAnomPeriod, c(2,3), mean, na.rm=TRUE) + + # psl anomaly for the chosen year and month: + slpAnomPeriod <- slpAnom[pos.year.test,pos.period(1,month.test),,] + + slpAnomPeriodMean <- apply(slpAnomPeriod, c(2,3), mean, na.rm=TRUE) + + fileoutput <- paste0(rean.dir,"/",rean.name,"_",my.period[month.test],"_monthly_anomaly.png") + + png(filename=fileoutput,width=2800,height=1000) + + par(fig=c(0, 0.36, 0.08, 0.98), new=TRUE) + #PlotEquiMap(rescale(sfcWindAnomPeriodMean[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents=FALSE, brks=my.brks.var, cols=my.cols.var[2:(length(my.cols.var)-1)], title_scale=1.2, intxlon=10, intylat=10, drawleg=F) #, cex.lab=1.5) + + PlotEquiMap(sfcWindAnomPeriodMean[,EU], lon[EU], lat, filled.continents=FALSE, brks=my.brks.var, cols=my.cols.var, title_scale=1.2, intxlon=10, intylat=10, drawleg=F) #, cex.lab=1.5) + + #my.subset2 <- match(values.to.plot2, my.brks.var) + #legend2.cex <- 1.8 + + par(fig=c(0.03, 0.36, 0.015, 0.09), new=TRUE) + #ColorBar(my.brks.var, cols=my.cols.var, vert=FALSE, label_scale=1.8, triangle_ends=c(FALSE,FALSE)) #, subset=my.subset2) + #ColorBar(my.brks.var, cols=my.cols.var[2:(length(my.cols.var)-1)], vert=FALSE, label_scale=1.8, var_limits=c(-10,10), bar_limits=c(my.brks.var[1],my.brks.var[l(my.brks.var)]), col_inf=my.cols.var[1], col_sup=my.cols.var[length(my.cols.var)]) + + ColorBar(brks=my.brks.var[2:(l(my.brks.var)-1)], cols=my.cols.var[2:(l(my.cols.var)-1)], vert=FALSE, label_scale=1.8, bar_limits=c(my.brks.var[2],my.brks.var[l(my.brks.var)-1]), col_inf=my.cols.var[1], col_sup=my.cols.var[l(my.cols.var)], subsample=1) + + par(fig=c(0.34, 0.37, 0, 0.028), new=TRUE) + mtext("m/s", cex=1.8) + + par(fig=c(0.37, 1, 0.1, 0.98), new=TRUE) + + PlotEquiMap2(slpAnomPeriodMean, lon, lat, filled.continents=FALSE, continents.col="black", brks=my.brks.var2, brks2=my.brks.var2, cols=my.cols.var2, intxlon=10, intylat=10, drawleg=F, contours=t(slpAnomPeriodMean), contours.lty="F1FF1F", cex.lab=1) + + # you could almost use the normal PlotEquiMap funcion below, it only needs to set the black line for anomaly=0 and decrease the size of the contour labels: + # PlotEquiMap(rescale(slpAnomPeriodMean[,EU], my.brks.var2[1], tail(my.brks.var2,1)), lon[EU], lat, filled.continents=FALSE, brks=my.brks.var2, brks2=my.brks.var2, cols=my.cols.var2, intxlon=10, intylat=10, drawleg=F, contours=(slpAnomPeriodMean[,EU]), contour_lty="F1FF1F", contour_label_scale=10, title_scale=1.2) #, cex.lab=1.5) + + ##my.subset2 <- match(values.to.plot2, my.brks.var) + #legend2.cex <- 1.8 + + par(fig=c(0.37, 1, 0, 0.09), new=TRUE) + #ColorBar2(my.brks.var2, cols=my.cols.var2, vert=FALSE, my.ticks=-0.5 + 1:length(my.brks.var2), my.labels=my.brks.var2) #, subsampleg=1, label_scale=1.8) #, subset=my.subset2) + ColorBar(my.brks.var2[2:(l(my.brks.var2)-1)], cols=my.cols.var2[2:(l(my.cols.var2)-1)], vert=FALSE, label_scale=1.8, bar_limits=c(my.brks.var2[2],my.brks.var2[l(my.brks.var2)-1]), col_inf=my.cols.var2[1], col_sup=my.cols.var2[l(my.cols.var2)], subsample=1) #my.ticks=-0.5 + 1:length(my.brks.var2), my.labels=my.brks.var2) subset=my.subset2) + + par(fig=c(0.96, 0.99, 0, 0.027), new=TRUE) + mtext("hPa", cex=1.8) + + #par(fig=c(0.74, 0.76, 0, 0.027), new=TRUE) + #mtext("0", cex=1.8) + + dev.off() + + # same image formatted for the catalogue: + fileoutput2 <- paste0(rean.dir,"/",rean.name,"_",my.period[month.test],"_monthly_anomaly_fig2cat.png") + + system(paste0("~/scripts/fig2catalog.sh -s 0.8 -t '",rean.name," / 10m wind speed and sea level pressure / Monthly anomalies \n",month.name[month.test]," / ",year.test,"' -c 'Region: North Atlantic (27°N-81°N, 85.5°W-45°E)\nReference dataset: ",rean.name," reanalysis' ", fileoutput," ", fileoutput2)) + + + # mean wind speed and slp anomaly only during days of the chosen year: + load(paste0(rean.dir,"/",rean.name,"_",my.period[month.test],"_psl.RData")) # load cluster.sequence + + # arrange the regime sequence in an array, to separate months from years: + cluster2D <- array(cluster.sequence, c(n.days.in.a.period(month.test,1),n.years)) + #cluster2D <- array(my.cluster$cluster, c(n.days.in.a.period(p,1),n.years)) # only for NCEP + + cluster.test <- cluster2D[,pos.year.test] + + fre1.days <- length(which(cluster.test == 1)) + fre2.days <- length(which(cluster.test == 2)) + fre3.days <- length(which(cluster.test == 3)) + fre4.days <- length(which(cluster.test == 4)) + + # wind anomaly for the chosen year and month and cluster: + sfcWind1 <- sfcWindAnomPeriod[which(cluster.test == 1),,,drop=FALSE] + sfcWind2 <- sfcWindAnomPeriod[which(cluster.test == 2),,,drop=FALSE] + sfcWind3 <- sfcWindAnomPeriod[which(cluster.test == 3),,,drop=FALSE] + sfcWind4 <- sfcWindAnomPeriod[which(cluster.test == 4),,,drop=FALSE] + + # in case a regime has NO days in that month, we must set it to NA: + if(length(which(cluster.test == 1)) == 0 ) sfcWind1 <- array(NA, c(1,dim(sfcWindAnomPeriod)[2:3])) + if(length(which(cluster.test == 2)) == 0 ) sfcWind2 <- array(NA, c(1,dim(sfcWindAnomPeriod)[2:3])) + if(length(which(cluster.test == 3)) == 0 ) sfcWind3 <- array(NA, c(1,dim(sfcWindAnomPeriod)[2:3])) + if(length(which(cluster.test == 4)) == 0 ) sfcWind4 <- array(NA, c(1,dim(sfcWindAnomPeriod)[2:3])) + + sfcWindMean1 <- apply(sfcWind1, c(2,3), mean, na.rm=FALSE) + sfcWindMean2 <- apply(sfcWind2, c(2,3), mean, na.rm=FALSE) + sfcWindMean3 <- apply(sfcWind3, c(2,3), mean, na.rm=FALSE) + sfcWindMean4 <- apply(sfcWind4, c(2,3), mean, na.rm=FALSE) + + # load regime names: + load(paste0(rean.dir,"/",rean.name,"_", my.period[p],"_","ClusterNames",".RData")) + + ## add strip with daily sequence of WRs: + + mod.name1 <- substr(cluster1.name, nchar(cluster1.name), nchar(cluster1.name)) + mod.name2 <- substr(cluster2.name, nchar(cluster2.name), nchar(cluster2.name)) + mod.name3 <- substr(cluster3.name, nchar(cluster3.name), nchar(cluster3.name)) + mod.name4 <- substr(cluster4.name, nchar(cluster4.name), nchar(cluster4.name)) + + cluster1.name.short <- substr(cluster1.name,1,1) + cluster2.name.short <- substr(cluster2.name,1,1) + cluster3.name.short <- substr(cluster3.name,1,1) + cluster4.name.short <- substr(cluster4.name,1,1) + + ## add + or - at the end of the cluster name, if it is a NAO+ or NAO- regime: + if(mod.name1 == "+" || mod.name1 == "-") cluster1.name.short <- paste0(substr(cluster1.name,1,1), mod.name1) + if(mod.name2 == "+" || mod.name2 == "-") cluster2.name.short <- paste0(substr(cluster2.name,1,1), mod.name2) + if(mod.name3 == "+" || mod.name3 == "-") cluster3.name.short <- paste0(substr(cluster3.name,1,1), mod.name3) + if(mod.name4 == "+" || mod.name4 == "-") cluster4.name.short <- paste0(substr(cluster4.name,1,1), mod.name4) + + c1 <- which(cluster.test == 1) + c2 <- which(cluster.test == 2) + c3 <- which(cluster.test == 3) + c4 <- which(cluster.test == 4) + + cluster.test.letters <- cluster.test + cluster.test.letters[c1] <- cluster1.name.short + cluster.test.letters[c2] <- cluster2.name.short + cluster.test.letters[c3] <- cluster3.name.short + cluster.test.letters[c4] <- cluster4.name.short + + cluster.col <- cluster.test.letters + cluster.col[which(cluster.test.letters == "N+")] <- "Firebrick1" + cluster.col[which(cluster.test.letters == "N-")] <- "Dodgerblue1" + cluster.col[which(cluster.test.letters == "B")] <- "White" + cluster.col[which(cluster.test.letters == "A")] <- "Darkgoldenrod1" + + orden <- c("NAO+","NAO-","Blocking","Atl.Ridge") + cluster1 <- which(orden == cluster1.name) + cluster2 <- which(orden == cluster2.name) + cluster3 <- which(orden == cluster3.name) + cluster4 <- which(orden == cluster4.name) + + assign(paste0("fre.days",cluster1), fre1.days) + assign(paste0("fre.days",cluster2), fre2.days) + assign(paste0("fre.days",cluster3), fre3.days) + assign(paste0("fre.days",cluster4), fre4.days) + + assign(paste0("fre",cluster1), wr1y) + assign(paste0("fre",cluster2), wr2y) + assign(paste0("fre",cluster3), wr3y) + assign(paste0("fre",cluster4), wr4y) + + assign(paste0("imp.test",cluster1), sfcWindMean1) + assign(paste0("imp.test",cluster2), sfcWindMean2) + assign(paste0("imp.test",cluster3), sfcWindMean3) + assign(paste0("imp.test",cluster4), sfcWindMean4) + + # psl anomaly for the chosen year and month and cluster: + slp1 <- slpAnomPeriod[which(cluster.test == 1),,,drop=FALSE] + slp2 <- slpAnomPeriod[which(cluster.test == 2),,,drop=FALSE] + slp3 <- slpAnomPeriod[which(cluster.test == 3),,,drop=FALSE] + slp4 <- slpAnomPeriod[which(cluster.test == 4),,,drop=FALSE] + + # in case a regime has NO days in that month, we must set it to NA: + if(length(which(cluster.test == 1)) == 0 ) slp1 <- array(NA, c(1,dim(slpAnomPeriod)[2:3])) + if(length(which(cluster.test == 2)) == 0 ) slp2 <- array(NA, c(1,dim(slpAnomPeriod)[2:3])) + if(length(which(cluster.test == 3)) == 0 ) slp3 <- array(NA, c(1,dim(slpAnomPeriod)[2:3])) + if(length(which(cluster.test == 4)) == 0 ) slp4 <- array(NA, c(1,dim(slpAnomPeriod)[2:3])) + + slpMean1 <- apply(slp1, c(2,3), mean, na.rm=FALSE) + slpMean2 <- apply(slp2, c(2,3), mean, na.rm=FALSE) + slpMean3 <- apply(slp3, c(2,3), mean, na.rm=FALSE) + slpMean4 <- apply(slp4, c(2,3), mean, na.rm=FALSE) + + assign(paste0("psl.test",cluster1), slpMean1) + assign(paste0("psl.test",cluster2), slpMean2) + assign(paste0("psl.test",cluster3), slpMean3) + assign(paste0("psl.test",cluster4), slpMean4) + + # save strip with the daily regime series for chosen month and year: + fileoutput.seq <- paste0(rean.dir,"/",rean.name,"_",my.period[month.test],"_regimes_sequence.png") + png(filename=fileoutput.seq,width=1500,height=1850) + + plot.new() + + sep <- 0.03 + for(day in 1: n.days.in.a.period(p, 2001)){ + sep.cum <- (day-1)*sep + polygon(c(sep.cum + 0.01, sep.cum + 0.01 + sep, sep.cum + 0.01 + sep, sep.cum + 0.01), c(1.01, 1.01, 1.01+sep, 1.01+sep), border="black", col=cluster.col[day]) + text(sep.cum + 0.01 + sep/2, 0.997 + sep + 0.005, labels=day, cex=1.5) + text(sep.cum + 0.01 + sep/2, 1.013 + 0.005, labels=cluster.test.letters[day], cex=2) + + } + + dev.off() + + + + # save average impact and sea level pressure only for chosne month and year: + fileoutput.test <- paste0(rean.dir,"/",rean.name,"_",my.period[month.test],"_monthly_anomaly_regimes.png") + + png(filename=fileoutput.test,width=1500,height=2000) + + plot.new() + + par(fig=c(0, 0.33, 0.77, 0.97), new=TRUE) + PlotEquiMap2(imp.test1[,EU], lon[EU], lat, filled.continents=FALSE, continents.col="black", brks=my.brks.var, cols=my.cols.var, cex.lab=1.2, intxlon=10, intylat=10, drawleg=F) + par(fig=c(0, 0.33, 0.54, 0.74), new=TRUE) + PlotEquiMap2(imp.test2[,EU], lon[EU], lat, filled.continents=FALSE, continents.col="black", brks=my.brks.var, cols=my.cols.var, cex.lab=1.2, intxlon=10, intylat=10, drawleg=F) + par(fig=c(0, 0.33, 0.31, 0.51), new=TRUE) + PlotEquiMap2(imp.test3[,EU], lon[EU], lat, filled.continents=FALSE, continents.col="black", brks=my.brks.var, cols=my.cols.var, cex.lab=1.2, intxlon=10, intylat=10, drawleg=F) + par(fig=c(0, 0.33, 0.08, 0.28), new=TRUE) + PlotEquiMap2(imp.test4[,EU], lon[EU], lat, filled.continents=FALSE, continents.col="black", brks=my.brks.var, cols=my.cols.var, cex.lab=1.2, intxlon=10, intylat=10, drawleg=F) + + par(fig=c(0,0.33,0.955,0.975), new=TRUE) + mtext(paste0(orden[1], " wind speed anomaly "), font=2, cex=2) + par(fig=c(0,0.33,0.725,0.745), new=TRUE) + mtext(paste0(orden[2], " wind speed anomaly "), font=2, cex=2) + par(fig=c(0,0.33,0.495,0.515), new=TRUE) + mtext(paste0(orden[3], " wind speed anomaly "), font=2, cex=2) + par(fig=c(0,0.33,0.265,0.285), new=TRUE) + mtext(paste0(orden[4], " wind speed anomaly "), font=2, cex=2) + + par(fig=c(0.03, 0.33, 0.015, 0.06), new=TRUE) + #ColorBar(my.brks.var, cols=my.cols.var, vert=FALSE, label_scale=1.8, triangle_ends=c(FALSE,FALSE)) #, subset=my.subset2) + ColorBar(my.brks.var[2:(length(my.brks.var)-1)], cols=my.cols.var[2:(length(my.cols.var)-1)], vert=FALSE, label_scale=1.8, bar_limits=c(my.brks.var[2],my.brks.var[l(my.brks.var)-1]), col_inf=my.cols.var[1], col_sup=my.cols.var[l(my.cols.var)], subsample=1) #triangle_ends=c(T,T)) #, subset=my.subset2) + + par(fig=c(0.33, 0.34, 0.01, 0.044), new=TRUE) + mtext("m/s", cex=1.6) + + # right figures: + par(fig=c(0.34, 0.92, 0.77, 0.97), new=TRUE) + PlotEquiMap2(psl.test1, lon, lat, filled.continents=FALSE, continents.col="black", brks=my.brks.var2, brks2=my.brks.var2, cols=my.cols.var2, intxlon=10, intylat=10, drawleg=F, contours=t(psl.test1), contours.lty="F1FF1F", cex.lab=1, xlabel.dist=.5) + par(fig=c(0.34, 0.92, 0.54, 0.74), new=TRUE) + PlotEquiMap2(psl.test2, lon, lat, filled.continents=FALSE, continents.col="black", brks=my.brks.var2, brks2=my.brks.var2, cols=my.cols.var2, intxlon=10, intylat=10, drawleg=F, contours=t(psl.test2), contours.lty="F1FF1F", cex.lab=1, xlabel.dist=.5) + par(fig=c(0.34, 0.92, 0.31, 0.51), new=TRUE) + PlotEquiMap2(psl.test3, lon, lat, filled.continents=FALSE, continents.col="black", brks=my.brks.var2, brks2=my.brks.var2, cols=my.cols.var2, intxlon=10, intylat=10, drawleg=F, contours=t(psl.test3), contours.lty="F1FF1F", cex.lab=1, xlabel.dist=.5) + par(fig=c(0.34, 0.92, 0.08, 0.28), new=TRUE) + PlotEquiMap2(psl.test4, lon, lat, filled.continents=FALSE, continents.col="black", brks=my.brks.var2, brks2=my.brks.var2, cols=my.cols.var2, intxlon=10, intylat=10, drawleg=F, contours=t(psl.test4), contours.lty="F1FF1F", cex.lab=1, xlabel.dist=.5) + + par(fig=c(0.34,0.92,0.955,0.975), new=TRUE) + mtext(paste0(orden[1], " sea level pressure anomaly "), font=2, cex=2) + par(fig=c(0.34,0.92,0.725,0.745), new=TRUE) + mtext(paste0(orden[2], " sea level pressure anomaly "), font=2, cex=2) + par(fig=c(0.34,0.92,0.495,0.515), new=TRUE) + mtext(paste0(orden[3], " sea level pressure anomaly "), font=2, cex=2) + par(fig=c(0.34,0.92,0.265,0.285), new=TRUE) + mtext(paste0(orden[4], " sea level pressure anomaly "), font=2, cex=2) + + par(fig=c(0.34, 0.93, 0.015, 0.06), new=TRUE) + #ColorBar2(my.brks.var2, cols=my.cols.var2, vert=FALSE, my.ticks=-0.5 + 1:length(my.brks.var2), my.labels=my.brks.var2) #, subsampleg=1, label_scale=1.8) #, subset=my.subset2) + ColorBar(my.brks.var2[2:(length(my.brks.var2)-1)], cols=my.cols.var2[2:(length(my.cols.var2)-1)], vert=FALSE, label_scale=1.8, bar_limits=c(my.brks.var2[2],my.brks.var2[l(my.brks.var2)-1]), col_inf=my.cols.var2[1], col_sup=my.cols.var2[l(my.cols.var2)], subsample=1) + + par(fig=c(0.924, 0.930, 0.01, 0.044), new=TRUE) + mtext("hPa", cex=1.6) + + #par(fig=c(0.627, 0.647, 0, 0.028), new=TRUE) + #mtext("0", cex=1.8) + + n.days <- floor(n.days.in.a.period(month.test,1)) + + par(fig=c(0.93, 0.99, 0.77, 0.87), new=TRUE) + mtext(paste0(fre.days1," days\n(",round(100*fre.days1/n.days,1),"%)"), cex=2.8) + par(fig=c(0.93, 0.99, 0.54, 0.64), new=TRUE) + mtext(paste0(fre.days2," days\n(",round(100*fre.days2/n.days,1),"%)"), cex=2.8) + par(fig=c(0.93, 0.99, 0.31, 0.41), new=TRUE) + mtext(paste0(fre.days3," days\n(",round(100*fre.days3/n.days,1),"%)"), cex=2.8) + par(fig=c(0.93, 0.99, 0.08, 0.18), new=TRUE) + mtext(paste0(fre.days4," days\n(",round(100*fre.days4/n.days,1),"%)"), cex=2.8) + + + dev.off() + + + ## add the strip with the regime sequence over the average impact composition: + fileoutput.temp <- paste0(rean.dir,"/",rean.name,"_",my.period[month.test],"_temp.png") + fileoutput.both <- paste0(rean.dir,"/",rean.name,"_",my.period[month.test],"_temp2.png") + + system(paste0("convert ",fileoutput.seq," -crop +0-1730 +repage ",fileoutput.temp)) # cut the lower part of the strip + system(paste0("montage ",fileoutput.temp," ",fileoutput.test," -tile 1x2 -geometry +0+0 ",fileoutput.both)) + + + ## same image formatted for the catalogue: + fileoutput2.test <- paste0(rean.dir,"/",rean.name,"_",my.period[month.test],"_monthly_anomaly_regimes_fig2cat.png") + + system(paste0("~/scripts/fig2catalog.sh -s 0.8 -m 70 -r 40 -t '",rean.name," / 10m wind speed and sea level pressure / Monthly regime anomalies \n",month.name[month.test]," / ",year.test,"' -c 'Region: North Atlantic (27°N-81°N, 85.5°W-45°E)\nReference dataset: ",rean.name," reanalysis' ", fileoutput.both," ", fileoutput2.test)) + + system(paste0("rm ", fileoutput.temp, " ", fileoutput.both," ", fileoutput.seq," ", fileoutput.test)) + + + } # close for on year.test + } # close for on month.test + + +} # close for on monthly_anomalies + + + + + + + + + + diff --git a/old/weather_regimes_maps_v26.R b/old/weather_regimes_maps_v26.R new file mode 100644 index 0000000000000000000000000000000000000000..87db8a89366b6d334609a1af774827eab4578181 --- /dev/null +++ b/old/weather_regimes_maps_v26.R @@ -0,0 +1,5809 @@ + +# Creation: 6/2016 +# Author: Nicola Cortesi +# +# Aim: to visualize the regime anomalies of the weather regimes, their interannual frequencies and their impact on a chosen cliamte variable (i.e: wind speed or temperature) +# +# I/O: input file needed are: +# 1) the output of the weather_regimes.R script, which ends with the suffix "_psl.RData". +# 2) if you need the impact map too, the outputs of the weather_regimes_impact.R script, which end wuth the suffix "_sfcWind.RData" and "_tas.RData" +# +# Output files are the maps, witch the user can generate as .png or as .pdf +# Due to the script's length, it is better to run it from the terminal with: Rscript ~/scripts/weather_regimes_maps.R +# This script doesn't need to be parallelized because it takes only a few seconds to create and save the output maps. +# +# Assumptions: You need to have created before the '_psl.RData' files which are the output of 'weather_regimes'.R, for each period you want to visualize. +# If your regimes derive from a reanalysis, this script must be run twice: +# first, only one month/season at time with: 'period=X', (X=1 .. 12), 'ordering = TRUE', 'save.names = TRUE', 'as.pdf = FALSE' and 'composition = "none" +# And inserting the regime names 'cluster.name1=...' in the correct order; in this first run, you only save the ordered cartography. +# You already have to know which is the right regime order by taking a look at the output maps (_clusterX.png) of weather_regimes.R +# After that, any time you run this script, remember to set: +# 'ordering = TRUE , 'save.names = FALSE' (and any type of composition you want to plot) not to overwrite the files which stores the right cluster order. +# You can check if the monthly regimes jave been associated correctly setting composition <- "psl.rean" +# +# For example, you can find that the sequence of the images in the file (from top to down) corresponds to: +# Blocking / NAO- / Atl.Ridge / NAO+ regime. Subsequently, you have to change 'ordering' +# from F to T (see below) and set the four variables 'clusterX.name' (X = 1...4) to follow the same order found inside that file. +# Then, you have to run this script only to get the maps in the right order, which by default is, from top to down: +# "NAO+","NAO-","Blocking" and "Atl.Ridge" (you can change it with the variable 'orden'). You also have to set 'save.names' to TRUE, so an .RData file +# is written to store the order of the four regimes, in case you need to visualize these maps again in future or create new ones based on the saved data. +# +# If your regimes derive from a forecast system, you have to compute before the regimes derived from a reanalysis. Then, you can associate the reanalysis +# to the forecast system, to employ it to automatically associate to each simulated cluster one of the +# observed regimes, the one with the highest spatial correlation. Beware that the two maps of regime anomalies must have the same spatial resolution! +# +# Branch: weather_regimes + +rm(list=ls()) +library(s2dverification) # for the function Load() +library(Kendall) # for the MannKendall test +#library("corrplot") + +source('/home/Earth/ncortesi/scripts/Rfunctions.R') # for the calendar functions + +### set the directory where the weather regimes computed with a reanalysis are stored (xxxxx_psl.RData files): + +#rean.dir <- "/scratch/Earth/ncortesi/RESILIENCE/Regimes/18) as 12) but with no lat correction" +#rean.dir <- "/scratch/Earth/ncortesi/RESILIENCE/Regimes/18_as_12_but_with_no_lat_correction" +#rean.dir <- "/scratch/Earth/ncortesi/RESILIENCE/Regimes/41_ERA-Interim_monthly_1981-2015_LOESS_filter" +#rean.dir <- "/scratch/Earth/ncortesi/RESILIENCE/Regimes/43_as_41_but_with_running_cluster_and_lat_correction" +#rean.dir <- "/scratch/Earth/ncortesi/RESILIENCE/Regimes/45_as_43_but_with_Z500" +#rean.dir <- "/scratch/Earth/ncortesi/RESILIENCE/Regimes/44_as_43_but_with_no_lat_correction" +#rean.dir <- "/scratch/Earth/ncortesi/RESILIENCE/Regimes/50_NCEP_monthly_1981-2016_LOESS_filter_lat_corr" +#rean.dir <- "/scratch/Earth/ncortesi/RESILIENCE/Regimes/52_JRA55_monthly_1981-2016_LOESS_filter_lat_corr" +#rean.dir <- "/scratch/Earth/ncortesi/RESILIENCE/Regimes/53_JRA55_seasonal_1981-2016_LOESS_filter_lat_corr" +#rean.dir <- "/scratch/Earth/ncortesi/RESILIENCE/Regimes/54_JRA55_monthly_1981-2016_LOESS_filter_lat_corr_running" +#rean.dir <- "/scratch/Earth/ncortesi/RESILIENCE/Regimes/55_JRA55_monthly_1981-2016_LOESS_filter_lat_corr_running_15days" +rean.dir <- "/scratch/Earth/ncortesi/RESILIENCE/Regimes/56_JRA55_monthly_1981-2016_LOESS_filter_lat_corr_ordered4variance" + +rean.name <- "JRA-55" #"JRA-55" #"NCEP" #"ERA-Interim" # reanalysis name (if input data comes from a reanalysis) + +forecast.name <- "ECMWF-S4" # forecast system name (if input data comes from a forecast system) + +fields.name <- rean.name #forecast.name # Choose between loading reanalysis ('rean.name') or forecasts ('forecast.name') + +composition <- "corr.matrix" # choose which kind of composition you want to plot: + # 'none' doesn't plot anything, it only associates the clusters to the regimes with the manual association in the rows below + # and saves them in the ClusterName.Rdata files for each period selected, overwriting the eventual pre-existing files. + # 'simple' for monthly graphs of regime anomalies / impact / interannual frequencies in three columns + # 'psl' for all the regime anomalies for a fixed forecast month + # 'fre' for all the interannual frequencies for a fixed forecast month + # 'impact' for all the impact maps for a fixed forecast month and the variable specified in var.num + # 'summary' for the summary plots of all forecast months (persistence bias, freq.bias, correlations, etc.) [You need all ClusterNames files] + # 'impact.highest' for all the impact plot of the regime with the highest impact + # 'single.impact' to save the four impact maps in a composition 2x2 + # 'single.psl' to save the individual psl map + # 'single.fre' to save the individual fre map + # 'taylor' plot the taylor's diagram between the regime anomalies of a monthly reanalaysis and a seasonal one + # 'edpr' as 'simple', but swapping the position of the regime anomalie maps with that of the impact maps + # 'psl.rean' for all the regime anomalies for all months of a reanalysis + # 'psl.rean.unordered': as before, but without ordering the regimes with the same order in vector 'orden' + # 'corr.matrix' : as before, but plot the correlation matrix only + # 'variance' : as 'none', but instead of associating clusters to regimes, it reordinates the clusters in decreasing order of explained variance + +var.num <- 1 # if fields.name ='rean.name' and composition ='simple' or 'edpr', Choose a variable for the impact maps 1: sfcWind 2: tas + +mean.freq <- TRUE # if TRUE, when composition <- "simple" o 'edpr', it also add the mean frequency value over each frequency plots + +# Choose one or more periods to plot from 1 to 17 (1-12: Jan - Dec, 13-16: Winter, Spring, Summer, Autumn, 17: Year). +period <- 1:12 + +# Manually associates the four clusters to the four regimes, one period at time (only in case composition="none"): +cluster1.name <- "NAO+" +cluster2.name <- "NAO-" +cluster3.name <- "Blocking" +cluster4.name <- "Atl.Ridge" + +# choose an order to give to the cartograpy, from top to bottom: +orden <- c("NAO+","NAO-","Blocking","Atl.Ridge") + +####### if monthly_anomalies <- TRUE, you have to specify these additional parameters: +monthly_anomalies <- FALSE # if TRUE, also save maps with with monthly wind speed and slp anomalies for the chosen years and months set below over Europe +year.chosen <- 2016 +month.chosen <- 1:12 +NCEP <- '/esnas/recon/noaa/ncep-reanalysis/$STORE_FREQ$_mean/$VAR_NAME$_f6h/$VAR_NAME$_$YEAR$$MONTH$.nc' +JRA55 <- '/esnas/recon/jma/jra55/$STORE_FREQ$_mean/$VAR_NAME$_f6h/$VAR_NAME$_$YEAR$$MONTH$.nc' +ERAint <- '/esarchive/old-files/recon_ecmwf_erainterim/$STORE_FREQ$_mean/$VAR_NAME$_f6h/$VAR_NAME$_$YEAR$$MONTH$.nc' +rean.data <- JRA55 # choose one of the above reanalysis + +####### if fields.name='forecast.name', you have to specify these additional variables: + +# working dir with the input files with '_psl.RData' suffix: +work.dir <- "/scratch/Earth/ncortesi/RESILIENCE/Regimes/42_ECMWF_S4_monthly_1981-2015_LOESS_filter" + +lead.months <- 0 #0:6 # in case you selected 'fields.name = forecast.name', specify a leadtime (0 = same month of the start month) to plot + # (and variable 'period' becomes the start month) +forecast.month <- 1 # in case composition = 'psl' or 'fre' or 'impact', choose a target month to forecast, from 1 to 12, to plot its composition + # if you want to plot all compositions from 1 to 12 at once, just enable the line below and add a { at the end of the script + # DO NOT run the loop below when composition="summary" or "taylor", because it will modify the data in the ClusterName.RData files!!! +#for(forecast.month in 1:12){ + +####### Derived variables ############################################################################################################################################### + +ordering <- T # If TRUE, order the clusters following the regimes listed in the 'orden' vector (set it to TRUE only after you manually inserted the names below). +save.names <- F # if TRUE, also save the cluster names (i.e: the association between clusters and weather regimes); if FALSE, the cluster names are loaded from a file +as.pdf <- F # FALSE: save results as one .png file for each season/month, TRUE: save only one .pdf file with all seasons/months + +if(fields.name != rean.name && fields.name != forecast.name) stop("variable 'fields.name' is not properly set") +regime1.name <- orden[1] +regime2.name <- orden[2] +regime3.name <- orden[3] +regime4.name <- orden[4] + +var.name <- c("sfcWind","tas") +var.name.full <- c("wind speed","temperature") # full name of the 'predictand' variable to put in the title of the graphs +var.unit <- c("m/s", "ºC") # unit of measure of var (for drawing color scales) + +var.num.orig <- var.num # loading _psl files, this value can change! +fields.name.orig <- fields.name +period.orig <- period +work.dir.orig <- work.dir +pdf.suffix <- ifelse(length(period)==1, my.period[period], "all_periods") + +n.map <- 0 +################################################################################################################################################################### + +if(composition != "summary" && composition != "impact.highest" && composition != "taylor"){ + + if(composition == "psl" || composition == "fre" || composition == "impact") { + if(as.pdf && fields.name == forecast.name) pdf(file=paste0(work.dir,"/",fields.name,"_",pdf.suffix,"_forecast_month_",forecast.month,"_",composition,".pdf"),width=110,height=60) + if(!as.pdf && fields.name == forecast.name) png(filename=paste0(work.dir,"/",fields.name,"_forecast_month_",forecast.month,"_",composition,".png"),width=6000,height=2300) + + lead.months <- c(6:0,0) + plot.new() + + # Sheet title: + par(fig=c(0, 1, 0.94, 0.98), new=TRUE) + if(composition == "psl") mtext(paste0(fields.name, " simulated Weather Regimes for ", month.name[forecast.month]), cex=5.5, font=2) + if(composition == "fre") mtext(paste0(fields.name, " simulated interannual frequencies for ", month.name[forecast.month]), cex=5.5, font=2) + if(composition == "impact") mtext(paste0(fields.name, " simulated ",var.name.full[var.num], " impact for ", month.name[forecast.month]), cex=5.5, font=2) + + } # close if on composition == "psl" ... + + if(composition == "none") { + ordering <- TRUE + save.names <- TRUE + as.pdf <- FALSE + } + + if(composition == "variance"){ + ordering <- TRUE + save.names <- TRUE + as.pdf <- FALSE + period <- 1:12 + } + + if(composition == "psl.rean") { + ordering <- TRUE + period <- c(9:12, 1:8) # to start from September instead of January + png(filename=paste0(rean.dir,"/",fields.name,"_reanalysis_all_months.png"),width=6000,height=2000) + plot.new() + } + + if(composition == "psl.rean.unordered") { + ordering <- FALSE + period <- c(9:12, 1:8) # to start from September instead of January + png(filename=paste0(rean.dir,"/",fields.name,"_reanalysis_all_months_unordered.png"),width=6000,height=2000) + plot.new() + } + + if(composition == "corr.matrix") { + ordering <- FALSE # set it to TRUE if you want to see the correlation matrix of the ordered clusters instead!!! + period <- c(9:12, 1:8) # to start from September instead of January + png(filename=paste0(rean.dir,"/",fields.name,"_reanalysis_all_months_corr_matrix.png"),width=6000,height=2000) + plot.new() + } + + if(fields.name == rean.name) { lead.month <- 1; lead.months <- 1 } # just to be able to enter the loop below once! + + for(lead.month in lead.months){ + #lead.month=lead.months[1] # for the debug + + if(composition == "simple" || composition == 'edpr'){ + if(as.pdf && fields.name == rean.name) pdf(file=paste0(rean.dir,"/",fields.name,"_",var.name[var.num],"_",pdf.suffix,".pdf"),width=40, height=60) + if(as.pdf && fields.name == forecast.name) pdf(file=paste0(work.dir,"/",fields.name,"_",var.name[var.num],"_",pdf.suffix,"_leadmonth_",lead.month,".pdf"),width=40,height=60) + } + + if(composition == 'psl' || composition == 'fre' || composition == 'impact') { + period <- forecast.month - lead.month + if(period < 1) period <- period + 12 + } + + + for(p in period){ + #p <- period[1] # for the debug + p.orig <- p + + if(fields.name == rean.name) print(paste("p =",p)) + if(fields.name == forecast.name) print(paste("p =",p,"lead.month =", lead.month)) + + # load regime data (for forecasts, it load only the data corresponding to the chosen start month p and lead month 'lead.month') + impact.data <- FALSE + if(fields.name == rean.name || (fields.name == forecast.name && n.map == 7)) { + load(file=paste0(rean.dir,"/",rean.name,"_",my.period[p],"_","psl",".RData")) + if(var.num != var.num.orig) var.num <- var.num.orig # ripristinate original values of this script (necessary after loading regime data) + if(fields.name != fields.name.orig) fields.name <- fields.name.orig # ripristinate original values of this script + if(work.dir != work.dir.orig) work.dir <- work.dir.orig # ripristinate original values of this script + + # load impact data only if it is available, if not it will leave an empty space in the composition: + if(file.exists(paste0(rean.dir,"/",rean.name,"_",my.period[p],"_",var.name[var.num],".RData"))){ + impact.data <- TRUE + print(paste0("Impact data for variable ",var.name[var.num] ," available for reanalysis ", rean.name)) + load(file=paste0(rean.dir,"/",rean.name,"_",my.period[p],"_",var.name[var.num],".RData")) + } else { + impact.data <- FALSE + print(paste0("Impact data for variable ",var.name[var.num] ," not available for reanalysis ", rean.name)) + } + + if(composition == "variance"){ + my.cluster2 <- my.cluster # create a copy of my.cluster + + ss1 <- which(my.cluster$cluster == 1) + ss2 <- which(my.cluster$cluster == 2) + ss3 <- which(my.cluster$cluster == 3) + ss4 <- which(my.cluster$cluster == 4) + + withinss <- my.cluster$withinss + max1 <- which(my.cluster$withinss == max(withinss, na.rm=TRUE)) + withinss[max1] <- NA + + max2 <- which(my.cluster$withinss == max(withinss, na.rm=TRUE)) + withinss[max2] <- NA + + max3 <- which(my.cluster$withinss == max(withinss, na.rm=TRUE)) + withinss[max3] <- NA + + max4 <- which(!is.na(withinss)) + rm(withinss) + + max.seq <- c(max1, max2, max3, max4) + + assign(paste0("cluster",max1,".name"), orden[1]) # associate the cluster with the highest explained variance to the first regime to plot (usually NAO+) + assign(paste0("cluster",max2,".name"), orden[2]) + assign(paste0("cluster",max3,".name"), orden[3]) + assign(paste0("cluster",max4,".name"), orden[4]) + + } + } + + if(fields.name == forecast.name && n.map < 7){ + load(file=paste0(work.dir,"/",forecast.name,"_",my.period[p],"_leadmonth_",lead.month,"_","psl",".RData")) # load cluster time series and mean psl data + + if(file.exists(paste0(work.dir,"/",forecast.name,"_",my.period[p],"_leadmonth_",lead.month,"_",var.name[var.num],".RData"))){ + impact.data <- TRUE + print("Impact data available for forecasts") + if((composition == "simple" || composition == "impact")) load(file=paste0(work.dir,"/",forecast.name,"_",my.period[p],"_leadmonth_",lead.month,"_",var.name[var.num],".RData")) # load mean var data for impact maps + } else { + impact.data <- FALSE + print("Impact data for forecasts not available") + } + + if(var.num != var.num.orig) var.num <- var.num.orig # ripristinate original values of this script (necessary after loading regime data) + if(fields.name != fields.name.orig) fields.name <- fields.name.orig # ripristinate original values of this script + + } + + if(var.num != var.num.orig) var.num <- var.num.orig # ripristinate original values of this script (necessary after loading regime data) + if(fields.name != fields.name.orig) fields.name <- fields.name.orig # ripristinate original values of this script + if(work.dir != work.dir.orig) work.dir <- work.dir.orig # ripristinate original values of this script + if(p != p.orig) p <- p.orig + + if(fields.name == forecast.name && n.map < 7){ + + # compute the simulated interannual monthly variability: + n.days.month <- dim(my.cluster.array[[p]])[1] # number of days in the forecasted month + + freq.sim1 <- apply(my.cluster.array[[p]] == 1, c(2,3), sum) + freq.sim2 <- apply(my.cluster.array[[p]] == 2, c(2,3), sum) + freq.sim3 <- apply(my.cluster.array[[p]] == 3, c(2,3), sum) + freq.sim4 <- apply(my.cluster.array[[p]] == 4, c(2,3), sum) + + sd.freq.sim1 <- sd(freq.sim1) / n.days.month + sd.freq.sim2 <- sd(freq.sim2) / n.days.month + sd.freq.sim3 <- sd(freq.sim3) / n.days.month + sd.freq.sim4 <- sd(freq.sim4) / n.days.month + + # compute the transition probabilities: + j1 <- j2 <- j3 <- j4 <- 1 + transition1 <- transition2 <- transition3 <- transition4 <- c() + + n.members <- dim(my.cluster.array[[p]])[3] + + for(m in 1:n.members){ + for(y in 1:n.years){ + for (d in 1:(n.days.month-1)){ + + if (my.cluster.array[[p]][d,y,m] == 1 && (my.cluster.array[[p]][d + 1,y,m] != 1)){ + transition1[j1] <- my.cluster.array[[p]][d + 1,y,m] + j1 <- j1 + 1 + } + if (my.cluster.array[[p]][d,y,m] == 2 && (my.cluster.array[[p]][d + 1,y,m] != 2)){ + transition2[j2] <- my.cluster.array[[p]][d + 1,y,m] + j2 <- j2 + 1 + } + if (my.cluster.array[[p]][d,y,m] == 3 && (my.cluster.array[[p]][d + 1,y,m] != 3)){ + transition3[j3] <- my.cluster.array[[p]][d + 1,y,m] + j3 <- j3 +1 + } + if (my.cluster.array[[p]][d,y,m] == 4 && (my.cluster.array[[p]][d + 1,y,m] != 4)){ + transition4[j4] <- my.cluster.array[[p]][d + 1,y,m] + j4 <- j4 +1 + } + + } + } + } + + trans.sim <- matrix(NA,4,4 , dimnames= list(c("cluster1.sim", "cluster2.sim", "cluster3.sim", "cluster4.sim"),c("cluster1.sim","cluster2.sim","cluster3.sim","cluster4.sim"))) + trans.sim[1,2] <- length(which(transition1 == 2)) + trans.sim[1,3] <- length(which(transition1 == 3)) + trans.sim[1,4] <- length(which(transition1 == 4)) + + trans.sim[2,1] <- length(which(transition2 == 1)) + trans.sim[2,3] <- length(which(transition2 == 3)) + trans.sim[2,4] <- length(which(transition2 == 4)) + + trans.sim[3,1] <- length(which(transition3 == 1)) + trans.sim[3,2] <- length(which(transition3 == 2)) + trans.sim[3,4] <- length(which(transition3 == 4)) + + trans.sim[4,1] <- length(which(transition4 == 1)) + trans.sim[4,2] <- length(which(transition4 == 2)) + trans.sim[4,3] <- length(which(transition4 == 3)) + + trans.tot <- apply(trans.sim,1,sum,na.rm=T) + for(i in 1:4) trans.sim[i,] <- trans.sim[i,]/trans.tot[i] + + + # compute cluster persistence: + my.cluster.vector <- as.vector(my.cluster.array[[p]]) # reduce it to a vector with the order: day1month1member1 , day2month1member1, ... , day1month2member1, day2month2member1, ,,, + + sequ1 <- sequ2 <- sequ3 <- sequ4 <- rep(0,10000) + i1 <- i2 <- i3 <- i4 <- 1 + + if(my.cluster.vector[1] != 1) i1 <- 0 + if(my.cluster.vector[1] == 1) sequ1[i1] <- sequ1[i1]+1 + if(my.cluster.vector[1] != 2) i2 <- 0 + if(my.cluster.vector[1] == 2) sequ2[i2] <- sequ2[i2]+1 + if(my.cluster.vector[1] != 3) i3 <- 0 + if(my.cluster.vector[1] == 3) sequ3[i3] <- sequ3[i3]+1 + if(my.cluster.vector[1] != 4) i4 <- 0 + if(my.cluster.vector[1] == 4) sequ4[i4] <- sequ4[i4]+1 + n.dd <- length(my.cluster.vector) + + for(d in 1:(n.dd-1)){ + if(my.cluster.vector[d] != 1 && my.cluster.vector[d+1] == 1) i1 <- i1+1 + if(my.cluster.vector[d] == 1) sequ1[i1] <- sequ1[i1]+1 + + if(my.cluster.vector[d] != 2 && my.cluster.vector[d+1] == 2) i2 <- i2+1 + if(my.cluster.vector[d] == 2) sequ2[i2] <- sequ2[i2]+1 + + if(my.cluster.vector[d] != 3 && my.cluster.vector[d+1] == 3) i3 <- i3+1 + if(my.cluster.vector[d] == 3) sequ3[i3] <- sequ3[i3]+1 + + if(my.cluster.vector[d] != 4 && my.cluster.vector[d+1] == 4) i4 <- i4+1 + if(my.cluster.vector[d] == 4) sequ4[i4] <- sequ4[i4]+1 + } + + if(my.cluster.vector[n.dd] == 1) sequ1[i1] <- sequ1[i1]+1 + if(my.cluster.vector[n.dd] == 2) sequ2[i2] <- sequ2[i2]+1 + if(my.cluster.vector[n.dd] == 3) sequ3[i3] <- sequ3[i3]+1 + if(my.cluster.vector[n.dd] == 4) sequ4[i4] <- sequ4[i4]+1 + + pers1 <- mean(sequ1[which(sequ1 != 0)]) + pers2 <- mean(sequ2[which(sequ2 != 0)]) + pers3 <- mean(sequ3[which(sequ3 != 0)]) + pers4 <- mean(sequ4[which(sequ4 != 0)]) + + # compute correlations between simulated clusters and observed regime's anomalies: + cluster1.sim <- pslwr1mean; cluster2.sim <- pslwr2mean; cluster3.sim <- pslwr3mean; cluster4.sim <- pslwr4mean + + if(composition == 'impact' && impact.data == TRUE) { + cluster1.sim.imp <- varwr1mean; cluster2.sim.imp <- varwr2mean; cluster3.sim.imp <- varwr3mean; cluster4.sim.imp <- varwr4mean + #cluster1.sim.imp.pv <- pvalue1; cluster2.sim.imp.pv <- pvalue2; cluster3.sim.imp.pv <- pvalue3; cluster4.sim.imp.pv <- pvalue4 + } + + lat.forecast <- lat; lon.forecast <- lon + + if((p + lead.month) > 12) { load.month <- p + lead.month - 12 } else { load.month <- p + lead.month } # reanalysis forecast month to load + load(file=paste0(rean.dir,"/",rean.name,"_",my.period[load.month],"_","psl",".RData")) # load reanalysis data, which overwrities the pslwrXmean variables + load(paste0(rean.dir,"/",rean.name,"_", my.period[load.month],"_","ClusterNames",".RData")) # load also reanalysis regime names + + # load reanalysis varwrXmean impact data: + if(composition == 'impact' && impact.data == TRUE) load(file=paste0(rean.dir,"/",rean.name,"_",my.period[load.month],"_",var.name[var.num],".RData")) + + if(var.num != var.num.orig) var.num <- var.num.orig # ripristinate original values of this script + if(fields.name != fields.name.orig) fields.name <- fields.name.orig # ripristinate original values of this script + if(!identical(period.orig,period)) period <- period.orig + if(work.dir != work.dir.orig) work.dir <- work.dir.orig # ripristinate original values of this script + if(p.orig != p) p <- p.orig + + # compute the observed transition probabilities: + n.days.month <- n.days.in.a.period(load.month,2001) + j1o <- j2o <- j3o <- j4o <- 1; transition.obs1 <- transition.obs2 <- transition.obs3 <- transition.obs4 <- c() + + for (d in 1:(length(my.cluster$cluster)-1)){ + if (my.cluster$cluster[d] == 1 && (my.cluster$cluster[d + 1] != my.cluster$cluster[d])){ + transition.obs1[j1o] <- my.cluster$cluster[d + 1] + j1o <- j1o + 1 + } + if (my.cluster$cluster[d] == 2 && (my.cluster$cluster[d + 1] != my.cluster$cluster[d])){ + transition.obs2[j2o] <- my.cluster$cluster[d + 1] + j2o <- j2o + 1 + } + if (my.cluster$cluster[d] == 3 && (my.cluster$cluster[d + 1] != my.cluster$cluster[d])){ + transition.obs3[j3o] <- my.cluster$cluster[d + 1] + j3o <- j3o + 1 + } + if (my.cluster$cluster[d] == 4 && (my.cluster$cluster[d + 1] != my.cluster$cluster[d])){ + transition.obs4[j4o] <- my.cluster$cluster[d + 1] + j4o <- j4o +1 + } + + } + + trans.obs <- matrix(NA,4,4 , dimnames= list(c("cluster1.obs", "cluster2.obs", "cluster3.obs", "cluster4.obs"),c("cluster1.obs","cluster2.obs","cluster3.obs","cluster4.obs"))) + trans.obs[1,2] <- length(which(transition.obs1 == 2)) + trans.obs[1,3] <- length(which(transition.obs1 == 3)) + trans.obs[1,4] <- length(which(transition.obs1 == 4)) + + trans.obs[2,1] <- length(which(transition.obs2 == 1)) + trans.obs[2,3] <- length(which(transition.obs2 == 3)) + trans.obs[2,4] <- length(which(transition.obs2 == 4)) + + trans.obs[3,1] <- length(which(transition.obs3 == 1)) + trans.obs[3,2] <- length(which(transition.obs3 == 2)) + trans.obs[3,4] <- length(which(transition.obs3 == 4)) + + trans.obs[4,1] <- length(which(transition.obs4 == 1)) + trans.obs[4,2] <- length(which(transition.obs4 == 2)) + trans.obs[4,3] <- length(which(transition.obs4 == 3)) + + trans.tot.obs <- apply(trans.obs,1,sum,na.rm=T) + for(i in 1:4) trans.obs[i,] <- trans.obs[i,]/trans.tot.obs[i] + + # compute the observed interannual monthly variability: + freq.obs1 <- freq.obs2 <- freq.obs3 <- freq.obs4 <- c() + + for(y in year.start:year.end){ + # for both reanalysis and S4, the time order is always: year1day1, year1day2, ..., year1day31, year2day1, year2day2, ..., year2day28, etc, + freq.obs1[y-year.start+1] <- length(which(my.cluster$cluster[(y-year.start)*n.days.month + (1:n.days.month)] == 1)) + freq.obs2[y-year.start+1] <- length(which(my.cluster$cluster[(y-year.start)*n.days.month + (1:n.days.month)] == 2)) + freq.obs3[y-year.start+1] <- length(which(my.cluster$cluster[(y-year.start)*n.days.month + (1:n.days.month)] == 3)) + freq.obs4[y-year.start+1] <- length(which(my.cluster$cluster[(y-year.start)*n.days.month + (1:n.days.month)] == 4)) + } + + sd.freq.obs1 <- sd(freq.obs1) / n.days.month + sd.freq.obs2 <- sd(freq.obs2) / n.days.month + sd.freq.obs3 <- sd(freq.obs3) / n.days.month + sd.freq.obs4 <- sd(freq.obs4) / n.days.month + + wr1y.obs <- wr1y; wr2y.obs <- wr2y; wr3y.obs <- wr3y; wr4y.obs <- wr4y # observed frecuencies of the regimes + + regimes.obs <- c(cluster1.name, cluster2.name, cluster3.name, cluster4.name) + + if(length(unique(regimes.obs)) < 4) stop("Two or more names of the weather types in the reanalsyis input file are repeated!") + + if(identical(rev(lat),lat.forecast)) {lat.forecast <- rev(lat.forecast); print("Warning: forecast latitudes are in the opposite order than renalysis's")} + if(!identical(lat, lat.forecast)) stop("forecast latitudes are different from reanalysis latitudes!") + if(!identical(lon, lon.forecast)) stop("forecast longitude are different from reanalysis longitudes!") + + cluster.corr <- array(NA, c(4,4), dimnames=list(c("cluster1.sim","cluster2.sim","cluster3.sim","cluster4.sim"), regimes.obs)) + cluster.corr[1,1] <- cor(as.vector(cluster1.sim), as.vector(pslwr1mean)) + cluster.corr[1,2] <- cor(as.vector(cluster1.sim), as.vector(pslwr2mean)) + cluster.corr[1,3] <- cor(as.vector(cluster1.sim), as.vector(pslwr3mean)) + cluster.corr[1,4] <- cor(as.vector(cluster1.sim), as.vector(pslwr4mean)) + cluster.corr[2,1] <- cor(as.vector(cluster2.sim), as.vector(pslwr1mean)) + cluster.corr[2,2] <- cor(as.vector(cluster2.sim), as.vector(pslwr2mean)) + cluster.corr[2,3] <- cor(as.vector(cluster2.sim), as.vector(pslwr3mean)) + cluster.corr[2,4] <- cor(as.vector(cluster2.sim), as.vector(pslwr4mean)) + cluster.corr[3,1] <- cor(as.vector(cluster3.sim), as.vector(pslwr1mean)) + cluster.corr[3,2] <- cor(as.vector(cluster3.sim), as.vector(pslwr2mean)) + cluster.corr[3,3] <- cor(as.vector(cluster3.sim), as.vector(pslwr3mean)) + cluster.corr[3,4] <- cor(as.vector(cluster3.sim), as.vector(pslwr4mean)) + cluster.corr[4,1] <- cor(as.vector(cluster4.sim), as.vector(pslwr1mean)) + cluster.corr[4,2] <- cor(as.vector(cluster4.sim), as.vector(pslwr2mean)) + cluster.corr[4,3] <- cor(as.vector(cluster4.sim), as.vector(pslwr3mean)) + cluster.corr[4,4] <- cor(as.vector(cluster4.sim), as.vector(pslwr4mean)) + + # associate each simulated cluster to one observed regime: + max1 <- which(cluster.corr == max(cluster.corr), arr.ind=T) + cluster.corr2 <- cluster.corr + cluster.corr2[max1[1],] <- -999 # set this row to negative values so it won't be found as a maximum anymore + cluster.corr2[,max1[2]] <- -999 # set this column to negative values so it won't be found as a maximum anymore + max2 <- which(cluster.corr2 == max(cluster.corr2), arr.ind=T) + cluster.corr3 <- cluster.corr2 + cluster.corr3[max2[1],] <- -999 + cluster.corr3[,max2[2]] <- -999 + max3 <- which(cluster.corr3 == max(cluster.corr3), arr.ind=T) + cluster.corr4 <- cluster.corr3 + cluster.corr4[max3[1],] <- -999 + cluster.corr4[,max3[2]] <- -999 + max4 <- which(cluster.corr4 == max(cluster.corr4), arr.ind=T) + + seq.max1 <- c(max1[1],max2[1],max3[1],max4[1]) + seq.max2 <- c(max1[2],max2[2],max3[2],max4[2]) + + cluster1.regime <- seq.max2[which(seq.max1 == 1)] + cluster2.regime <- seq.max2[which(seq.max1 == 2)] + cluster3.regime <- seq.max2[which(seq.max1 == 3)] + cluster4.regime <- seq.max2[which(seq.max1 == 4)] + + cluster1.name <- regimes.obs[cluster1.regime] + cluster2.name <- regimes.obs[cluster2.regime] + cluster3.name <- regimes.obs[cluster3.regime] + cluster4.name <- regimes.obs[cluster4.regime] + + regimes.sim <- c(cluster1.name, cluster2.name, cluster3.name, cluster4.name) + cluster.corr2 <- array(cluster.corr, c(4,4), dimnames=list(regimes.sim, regimes.obs)) + + spat.cor1 <- cluster.corr[1,seq.max2[which(seq.max1 == 1)]] + spat.cor2 <- cluster.corr[2,seq.max2[which(seq.max1 == 2)]] + spat.cor3 <- cluster.corr[3,seq.max2[which(seq.max1 == 3)]] + spat.cor4 <- cluster.corr[4,seq.max2[which(seq.max1 == 4)]] + + # measure persistence for the reanalysis dataset: + my.cluster.vector <- my.cluster[[1]] + + sequ1 <- sequ2 <- sequ3 <- sequ4 <- rep(0,10000) + i1 <- i2 <- i3 <- i4 <- 1 + + if(my.cluster.vector[1] != 1) i1 <- 0 + if(my.cluster.vector[1] == 1) sequ1[i1] <- sequ1[i1]+1 + if(my.cluster.vector[1] != 2) i2 <- 0 + if(my.cluster.vector[1] == 2) sequ2[i2] <- sequ2[i2]+1 + if(my.cluster.vector[1] != 3) i3 <- 0 + if(my.cluster.vector[1] == 3) sequ3[i3] <- sequ3[i3]+1 + if(my.cluster.vector[1] != 4) i4 <- 0 + if(my.cluster.vector[1] == 4) sequ4[i4] <- sequ4[i4]+1 + + n.dd <- length(my.cluster.vector) + for(d in 1:(n.dd-1)){ + if(my.cluster.vector[d] != 1 && my.cluster.vector[d+1] == 1) i1 <- i1+1 + if(my.cluster.vector[d] == 1) sequ1[i1] <- sequ1[i1]+1 + + if(my.cluster.vector[d] != 2 && my.cluster.vector[d+1] == 2) i2 <- i2+1 + if(my.cluster.vector[d] == 2) sequ2[i2] <- sequ2[i2]+1 + + if(my.cluster.vector[d] != 3 && my.cluster.vector[d+1] == 3) i3 <- i3+1 + if(my.cluster.vector[d] == 3) sequ3[i3] <- sequ3[i3]+1 + + if(my.cluster.vector[d] != 4 && my.cluster.vector[d+1] == 4) i4 <- i4+1 + if(my.cluster.vector[d] == 4) sequ4[i4] <- sequ4[i4]+1 + } + + if(my.cluster.vector[n.dd] == 1) sequ1[i1] <- sequ1[i1]+1 + if(my.cluster.vector[n.dd] == 2) sequ2[i2] <- sequ2[i2]+1 + if(my.cluster.vector[n.dd] == 3) sequ3[i3] <- sequ3[i3]+1 + if(my.cluster.vector[n.dd] == 4) sequ4[i4] <- sequ4[i4]+1 + + persObs1 <- mean(sequ1[which(sequ1 != 0)]) + persObs2 <- mean(sequ2[which(sequ2 != 0)]) + persObs3 <- mean(sequ3[which(sequ3 != 0)]) + persObs4 <- mean(sequ4[which(sequ4 != 0)]) + + ### insert here all the manual corrections to the regime assignation due to misasignment in the automatic selection: + ### forecast month: September + #if(p == 9 && lead.month == 0) { cluster1.name <- "Blocking"; cluster2.name <- "Atl.Ridge"; cluster3.name <- "NAO-"; cluster4.name <- "NAO+"} + #if(p == 8 && lead.month == 1) { cluster1.name <- "NAO+"; cluster2.name <- "NAO-"; cluster3.name <- "Blocking"; cluster4.name <- "Atl.Ridge"} + #if(p == 6 && lead.month == 3) { cluster1.name <- "Atl.Ridge"; cluster2.name <- "NAO+"} + #if(p == 4 && lead.month == 5) { cluster1.name <- "Blocking"; cluster2.name <- "NAO+"; cluster3.name <- "Atl.Ridge"; cluster4.name <- "NAO-"} + + if(p == 9 && lead.month == 0) { cluster1.name <- "Blocking"; cluster2.name <- "Atl.Ridge"; cluster3.name <- "NAO-"; cluster4.name <- "NAO+" } + if(p == 8 && lead.month == 1) { cluster1.name <- "NAO+"; cluster2.name <- "NAO-"; cluster3.name <- "Blocking"; cluster4.name <- "Atl.Ridge" } + if(p == 7 && lead.month == 2) { cluster1.name <- "Atl.Ridge"; cluster2.name <- "Blocking"; cluster3.name <- "NAO-"; cluster4.name <- "NAO+" } + if(p == 6 && lead.month == 3) { cluster1.name <- "Atl.Ridge"; cluster2.name <- "NAO-"; cluster3.name <- "NAO+"; cluster4.name <- "Blocking" } + if(p == 5 && lead.month == 4) { cluster1.name <- "Atl.Ridge"; cluster2.name <- "NAO+"; cluster3.name <- "NAO-"; cluster4.name <- "Blocking" } + if(p == 4 && lead.month == 5) { cluster1.name <- "Blocking"; cluster2.name <- "NAO+"; cluster3.name <- "Atl.Ridge"; cluster4.name <- "NAO-" } + if(p == 3 && lead.month == 6) { cluster2.name <- "NAO-"; cluster3.name <- "Atl.Ridge"} # cluster1.name <- "Blocking"; cluster4.name <- "NAO+" + + # regimes.sim # for the debug + + ### forecast month: October + #if(p == 4 && lead.month == 6) { cluster1.name <- "Atl.Ridge"; cluster2.name <- "Blocking"; cluster3.name <- "NAO+"; cluster4.name <- "NAO-"} + #if(p == 5 && lead.month == 5) { cluster1.name <- "NAO+"; cluster2.name <- "Blocking"; cluster3.name <- "NAO-"; cluster4.name <- "Atl.Ridge"} + #if(p == 7 && lead.month == 3) { cluster1.name <- "NAO-"; cluster2.name <- "Atl.Ridge"; cluster3.name <- "Blocking"; cluster4.name <- "NAO+"} + #if(p == 8 && lead.month == 2) { cluster1.name <- "Blocking"; cluster2.name <- "NAO-"; cluster3.name <- "Atl.Ridge"; cluster4.name <- "NAO+"} + #if(p == 9 && lead.month == 1) { cluster1.name <- "NAO-"; cluster2.name <- "Blocking"; cluster3.name <- "Atl.Ridge"; cluster4.name <- "NAO+"} + + if(p == 10 && lead.month == 0) { cluster1.name <- "Blocking"; cluster2.name <- "NAO+"; cluster3.name <- "Atl.Ridge"; cluster4.name <- "NAO-"} + if(p == 9 && lead.month == 1) { cluster1.name <- "NAO-"; cluster2.name <- "Blocking"; cluster3.name <- "Atl.Ridge"; cluster4.name <- "NAO+"} + if(p == 8 && lead.month == 2) { cluster1.name <- "Blocking"; cluster2.name <- "NAO-"; cluster3.name <- "Atl.Ridge"; cluster4.name <- "NAO+"} + if(p == 7 && lead.month == 3) { cluster1.name <- "NAO-"; cluster2.name <- "Atl.Ridge"; cluster3.name <- "Blocking"; cluster4.name <- "NAO+"} + if(p == 6 && lead.month == 4) { cluster1.name <- "NAO+"; cluster2.name <- "Blocking"; cluster3.name <- "NAO-"; cluster4.name <- "Atl.Ridge"} + + ### forecast month: November + #if(p == 6 && lead.month == 5) { cluster2.name <- "Atl.Ridge"; cluster3.name <- "NAO+"; cluster4.name <- "Blocking"} + #if(p == 7 && lead.month == 4) { cluster1.name <- "NAO+"; cluster2.name <- "Blocking"; cluster4.name <- "Atl.Ridge"} + #if(p == 8 && lead.month == 3) { cluster2.name <- "Blocking"; cluster4.name <- "Atl.Ridge"} + #if(p == 9 && lead.month == 2) { cluster2.name <- "Atl.Ridge"; cluster3.name <- "NAO+"; cluster4.name <- "Blocking"} + #if(p == 10 && lead.month == 1) { cluster2.name <- "Blocking"; cluster4.name <- "Atl.Ridge"} + + regimes.sim2 <- c(cluster1.name, cluster2.name, cluster3.name, cluster4.name) # Simulated regimes after the manual correction + #regimes.obs <- c(cluster1.name, cluster2.name, cluster3.name, cluster4.name) # already defined above, included only as reference + + order.sim <- match(regimes.sim2, orden) + order.obs <- match(regimes.obs, orden) + + clusters.sim <- list(cluster1.sim, cluster2.sim, cluster3.sim, cluster4.sim) + clusters.obs <- list(pslwr1mean, pslwr2mean, pslwr3mean, pslwr4mean) + + # recompute the spatial correlations, because they might have changed because of the manual corrections above: + spat.cor1 <- cor(as.vector(clusters.sim[[which(order.sim == 1)]]), as.vector(clusters.obs[[which(order.obs == 1)]])) + spat.cor2 <- cor(as.vector(clusters.sim[[which(order.sim == 2)]]), as.vector(clusters.obs[[which(order.obs == 2)]])) + spat.cor3 <- cor(as.vector(clusters.sim[[which(order.sim == 3)]]), as.vector(clusters.obs[[which(order.obs == 3)]])) + spat.cor4 <- cor(as.vector(clusters.sim[[which(order.sim == 4)]]), as.vector(clusters.obs[[which(order.obs == 4)]])) + + if(composition == 'impact' && impact.data == TRUE) { + clusters.sim.imp <- list(cluster1.sim.imp, cluster2.sim.imp, cluster3.sim.imp, cluster4.sim.imp) + #clusters.sim.imp.pv <- list(cluster1.sim.imp.pv, cluster2.sim.imp.pv, cluster3.sim.imp.pv, cluster4.sim.imp.pv) + + clusters.obs.imp <- list(varwr1mean, varwr2mean, varwr3mean, varwr4mean) + #clusters.obs.imp.pv <- list(pvalue1, pvalue2, pvalue3, pvalue4) + + cor.imp1 <- cor(as.vector(clusters.sim.imp[[which(order.sim == 1)]]), as.vector(clusters.obs.imp[[which(order.obs == 1)]])) + cor.imp2 <- cor(as.vector(clusters.sim.imp[[which(order.sim == 2)]]), as.vector(clusters.obs.imp[[which(order.obs == 2)]])) + cor.imp3 <- cor(as.vector(clusters.sim.imp[[which(order.sim == 3)]]), as.vector(clusters.obs.imp[[which(order.obs == 3)]])) + cor.imp4 <- cor(as.vector(clusters.sim.imp[[which(order.sim == 4)]]), as.vector(clusters.obs.imp[[which(order.obs == 4)]])) + + } + + load(file=paste0(work.dir,"/",fields.name,"_",my.period[p],"_leadmonth_",lead.month,"_","psl",".RData")) # reload forecast cluster time series and mean psl data + load(file=paste0(work.dir,"/",fields.name,"_",my.period[p],"_leadmonth_",lead.month,"_",var.name[var.num],".RData")) # reload mean var data for impact maps + + if(var.num != var.num.orig) var.num <- var.num.orig # ripristinate original values of this script + if(fields.name != fields.name.orig) fields.name <- fields.name.orig # ripristinate original values of this script + if(!identical(period.orig, period)) period <- period.orig + if(p.orig != p) p <- p.orig + if(identical(rev(lat),lat.forecast)) lat <- rev(lat) # ripristinate right lat order + if(work.dir != work.dir.orig) work.dir <- work.dir.orig # ripristinate original values of this script + + } # close for on 'forecast.name' + + if(fields.name == rean.name || (fields.name == forecast.name && n.map == 7)){ + if(save.names){ + save(cluster1.name, cluster2.name, cluster3.name, cluster4.name, file=paste0(rean.dir,"/",fields.name,"_", my.period[p],"_","ClusterNames",".RData")) + + } else { # in this case, we load the cluster names from the file already saved, if regimes come from a reanalysis: + ClusterName.file <- paste0(rean.dir,"/",rean.name,"_", my.period[p],"_","ClusterNames",".RData") + if(!file.exists(ClusterName.file)) stop(paste0("file: ",ClusterName.file," missing")) # check if file exists or not + load(ClusterName.file) # load cluster names + + if(var.num != var.num.orig) var.num <- var.num.orig # ripristinate original values of this script + if(fields.name != fields.name.orig) fields.name <- fields.name.orig # ripristinate original values of this script + } + } + + # assign to each cluster its regime: + #if(ordering == FALSE && (fields.name == rean.name || (fields.name == forecast.name && n.map == 7))){ + if(ordering == FALSE){ + cluster1 <- 1; cluster2 <- 2; cluster3 <- 3; cluster4 <- 4 # same as: cluster1.name=orden[1], cluster2.name=orden[2], cluster3.name=orden[3], etc. + } else { + cluster1 <- which(orden == cluster1.name) + cluster2 <- which(orden == cluster2.name) + cluster3 <- which(orden == cluster3.name) + cluster4 <- which(orden == cluster4.name) + } + + assign(paste0("map",cluster1), pslwr1mean) + assign(paste0("map",cluster2), pslwr2mean) + assign(paste0("map",cluster3), pslwr3mean) + assign(paste0("map",cluster4), pslwr4mean) + + assign(paste0("fre",cluster1), wr1y) + assign(paste0("fre",cluster2), wr2y) + assign(paste0("fre",cluster3), wr3y) + assign(paste0("fre",cluster4), wr4y) + + #assign(paste0("withinss",cluster1), my.cluster[[p]]$withinss[1]/my.cluster[[p]]$size[1]) + #assign(paste0("withinss",cluster2), my.cluster[[p]]$withinss[2]/my.cluster[[p]]$size[2]) + #assign(paste0("withinss",cluster3), my.cluster[[p]]$withinss[3]/my.cluster[[p]]$size[3]) + #assign(paste0("withinss",cluster4), my.cluster[[p]]$withinss[4]/my.cluster[[p]]$size[4]) + + if(impact.data == TRUE && (composition == "simple" || composition == "single.impact" || composition == 'impact' || composition == 'edpr')){ + assign(paste0("imp",cluster1), varwr1mean) + assign(paste0("imp",cluster2), varwr2mean) + assign(paste0("imp",cluster3), varwr3mean) + assign(paste0("imp",cluster4), varwr4mean) + + assign(paste0("sig",cluster1), pvalue1) + assign(paste0("sig",cluster2), pvalue2) + assign(paste0("sig",cluster3), pvalue3) + assign(paste0("sig",cluster4), pvalue4) + } + + if(fields.name == forecast.name && n.map < 7){ + assign(paste0("freMax",cluster1), wr1yMax) + assign(paste0("freMax",cluster2), wr2yMax) + assign(paste0("freMax",cluster3), wr3yMax) + assign(paste0("freMax",cluster4), wr4yMax) + + assign(paste0("freMin",cluster1), wr1yMin) + assign(paste0("freMin",cluster2), wr2yMin) + assign(paste0("freMin",cluster3), wr3yMin) + assign(paste0("freMin",cluster4), wr4yMin) + + #assign(paste0("spatial.cor",cluster1), spat.cor1) + #assign(paste0("spatial.cor",cluster2), spat.cor2) + #assign(paste0("spatial.cor",cluster3), spat.cor3) + #assign(paste0("spatial.cor",cluster4), spat.cor4) + + assign(paste0("persist",cluster1), pers1) + assign(paste0("persist",cluster2), pers2) + assign(paste0("persist",cluster3), pers3) + assign(paste0("persist",cluster4), pers4) + + clusters.all <- c(cluster1, cluster2, cluster3, cluster4) + ordered.sequence <- c(which(clusters.all == 1), which(clusters.all == 2), which(clusters.all == 3), which(clusters.all == 4)) + + assign(paste0("transSim",cluster1), unname(trans.sim[1,ordered.sequence])) + assign(paste0("transSim",cluster2), unname(trans.sim[2,ordered.sequence])) + assign(paste0("transSim",cluster3), unname(trans.sim[3,ordered.sequence])) + assign(paste0("transSim",cluster4), unname(trans.sim[4,ordered.sequence])) + + cluster1.obs <- which(orden == regimes.obs[1]) + cluster2.obs <- which(orden == regimes.obs[2]) + cluster3.obs <- which(orden == regimes.obs[3]) + cluster4.obs <- which(orden == regimes.obs[4]) + + clusters.all.obs <- c(cluster1.obs, cluster2.obs, cluster3.obs, cluster4.obs) + ordered.sequence.obs <- c(which(clusters.all.obs == 1), which(clusters.all.obs == 2), which(clusters.all.obs == 3), which(clusters.all.obs == 4)) + + assign(paste0("freObs",cluster1.obs), wr1y.obs) + assign(paste0("freObs",cluster2.obs), wr2y.obs) + assign(paste0("freObs",cluster3.obs), wr3y.obs) + assign(paste0("freObs",cluster4.obs), wr4y.obs) + + assign(paste0("persistObs",cluster1.obs), persObs1) + assign(paste0("persistObs",cluster2.obs), persObs2) + assign(paste0("persistObs",cluster3.obs), persObs3) + assign(paste0("persistObs",cluster4.obs), persObs4) + + assign(paste0("transObs",cluster1.obs), unname(trans.obs[1,ordered.sequence.obs])) + assign(paste0("transObs",cluster2.obs), unname(trans.obs[2,ordered.sequence.obs])) + assign(paste0("transObs",cluster3.obs), unname(trans.obs[3,ordered.sequence.obs])) + assign(paste0("transObs",cluster4.obs), unname(trans.obs[4,ordered.sequence.obs])) + + } + + # position of long values of Europe only (without the Atlantic Sea and America): + #if(fields.name == "ERA-Interim") EU <- c(1:which(lon >= lon.max)[1], (length(lon)-30):length(lon)) # restrict area to continental europe only + EU <- c(1:which(lon >= lon.max)[1], (which(lon >= 337)[1]:length(lon))) # restrict area to continental europe only + + # breaks and colors of the geopotential fields: + if(psl == "g500"){ + #my.brks <- c(-300,-199:200,300) # % Mean anomaly of a WR + my.brks <- c(-300,seq(-200,200,10),300) # % Mean anomaly of a WR + my.brks2 <- c(-300,seq(-200,200,10),300) # % Mean anomaly of a WR for the contour lines + #my.cols <- colorRampPalette((brewer.pal(9,"PuBu")))(length(my.brks)-1) + values.to.plot <- c(-200, -100, 0, 100, 200) # values we want to show in the labels + } + + if(psl == "psl"){ + my.brks <- c(seq(-21,-1,2),0,seq(1,21,2)) # % Mean anomaly of a WR + # my.brks <- c(seq(-19,-1,2),0,seq(1,19,2)) # % Mean anomaly of a WR + + my.brks2 <- my.brks #c(seq(-20,20,2)) # % Mean anomaly of a WR for the contour lines + values.to.plot <- my.brks #c(-17,-15,-13,-11,-9,-7,-5,-3,-1,0,1,3,5,7,9,11,13,15,17) + } + + my.cols <- colorRampPalette(rev(brewer.pal(11,"RdBu")))(length(my.brks)-1) # blue--white--red colors + my.cols[floor(length(my.cols)/2)] <- my.cols[floor(length(my.cols)/2)+1] <- "white" + + # breaks and colors of the impact maps (valid both for Wind speed anomalies and Temperature anomalies): + #my.brks.var <- c(-20,seq(-10,-3,1),seq(-2.9,2.9,0.1),seq(3,10,1),20) # % Mean anomaly of a WR + #my.brks.var <- c(-20,-2,-1.5,-1,-0.5,-0.2,0.2,0.5,1,1.5,2,20) # % Mean anomaly of a WR + #my.brks.var <- c(-20,seq(-3,3,0.5),20) # % Mean anomaly of a WR + if(var.name[var.num] == "sfcWind") { + my.brks.var <- seq(-3,3,0.5) + values.to.plot2 <- c(-3,-2,-1,0,1,2,3) + } + if(var.name[var.num] == "tas") { + my.brks.var <- seq(-5,5,1) + values.to.plot2 <- my.brks.var #c(-5,-3,-1,0,1,3,5) + } + + #my.cols <- colorRampPalette(as.character(read.csv("/home/Earth/vtorralb/rgbhex.csv",header=F)[,1]))(length(my.brks)-1) # blue--yellow-red colors + my.cols.var <- colorRampPalette(rev(brewer.pal(11,"RdBu")))(length(my.brks.var)-1) # blue--white--red colors + #my.cols.var[floor(length(my.cols.var)/2)] <- my.cols.var[floor(length(my.cols.var)/2)+1] <- "white" + + freq.max=100 + if(fields.name == forecast.name){ + pos.years.present <- match(year.start:year.end, years) + years.missing <- which(is.na(pos.years.present)) + fre1.NA <- fre2.NA <- fre3.NA <- fre4.NA <- freMax1.NA <- freMax2.NA <- freMax3.NA <- freMax4.NA <- freMin1.NA <- freMin2.NA <- freMin3.NA <- freMin4.NA <- c() + k <- 0 + for(y in year.start:year.end) { + if(y %in% (years.missing + year.start - 1)) { + fre1.NA <- c(fre1.NA,NA); fre2.NA <- c(fre2.NA,NA); fre3.NA <- c(fre3.NA,NA); fre4.NA <- c(fre4.NA,NA) + freMax1.NA <- c(freMax1.NA,NA); freMax2.NA <- c(freMax2.NA,NA); freMax3.NA <- c(freMax3.NA,NA); freMax4.NA <- c(freMax4.NA,NA) + freMin1.NA <- c(freMin1.NA,NA); freMin2.NA <- c(freMin2.NA,NA); freMin3.NA <- c(freMin3.NA,NA); freMin4.NA <- c(freMin4.NA,NA) + k=k+1 + } else { + fre1.NA <- c(fre1.NA,fre1[y - year.start + 1 - k]); fre2.NA <- c(fre2.NA,fre2[y - year.start + 1 - k]) + fre3.NA <- c(fre3.NA,fre3[y - year.start + 1 - k]); fre4.NA <- c(fre4.NA,fre4[y - year.start + 1 - k]) + freMax1.NA <- c(freMax1.NA,freMax1[y - year.start + 1 - k]); freMax2.NA <- c(freMax2.NA,freMax2[y - year.start + 1 - k]) + freMax3.NA <- c(freMax3.NA,freMax3[y - year.start + 1 - k]); freMax4.NA <- c(freMax4.NA,freMax4[y - year.start + 1 - k]) + freMin1.NA <- c(freMin1.NA,freMin1[y - year.start + 1 - k]); freMin2.NA <- c(freMin2.NA,freMin2[y - year.start + 1 - k]) + freMin3.NA <- c(freMin3.NA,freMin3[y - year.start + 1 - k]); freMin4.NA <- c(freMin4.NA,freMin4[y - year.start + 1 - k]) + } + } + + sp.cor1 <- round(cor(fre1.NA,freObs1, use="complete.obs"),2) + sp.cor2 <- round(cor(fre2.NA,freObs2, use="complete.obs"),2) + sp.cor3 <- round(cor(fre3.NA,freObs3, use="complete.obs"),2) + sp.cor4 <- round(cor(fre4.NA,freObs4, use="complete.obs"),2) + + } else { + fre1.NA <- fre1; fre2.NA <- fre2; fre3.NA <- fre3; fre4.NA <- fre4 + } + + + # Visualize the composition with the 3 graphs for all the 4 regimes: its pressure fields, its impact on var and its frequency series: + if(composition == "simple"){ + + if(!as.pdf && fields.name == rean.name) png(filename=paste0(rean.dir,"/",fields.name,"_",my.period[p],"_",var.name[var.num],".png"),width=3000,height=3700) + if(!as.pdf && fields.name == forecast.name) png(filename=paste0(work.dir,"/",fields.name,"_",my.period[p],"_leadmonth_",lead.month,"_",var.name[var.num],".png"),width=3000,height=3700) + + plot.new() + + # Sheet title: + par(fig=c(0, 1, 0.94, 0.98), new=TRUE) + if(fields.name == forecast.name) {forecast.title <- paste0(" startdate and ",lead.month," forecast month ")} else {forecast.title <- ""} + mtext(paste0(fields.name, " Weather Regimes for ", my.period[p], forecast.title," (",year.start,"-",year.end,")"), cex=5.5, font=2) + + # Centroid maps: + map.xpos <- 0 + map.width <- 0.46 + par(fig=c(map.xpos + 0.003, map.xpos + map.width - 0.003, 0.745, 0.925), new=TRUE) + PlotEquiMap2(rescale(map1,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map1), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, xlabel.dist=1.5, contours.lty="F1FF1F", continents.col="black") + par(fig=c(map.xpos, map.xpos + map.width, 0.51, 0.69), new=TRUE) + PlotEquiMap2(rescale(map2,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map2), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F", continents.col="black") + par(fig=c(map.xpos, map.xpos + map.width, 0.275, 0.455), new=TRUE) + PlotEquiMap2(rescale(map3,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map3), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F", continents.col="black") + par(fig=c(map.xpos, map.xpos + map.width, 0.04, 0.22), new=TRUE) + PlotEquiMap2(rescale(map4,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map4), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F", continents.col="black") + + ## # for the debug: + ## obs <- read.table("/scratch/Earth/ncortesi/RESILIENCE/Regimes/NAO_series.txt") + ## mes <- p + ## fre.obs <- obs$V3[seq(mes,12*35,12)] # get the ovserved freuency of the NAO + ## #plot(1981:2015,fre.obs,type="l") + ## #lines(1981:2015,fre1,type="l",col="red") + ## #lines(1981:2015,fre2,type="l",col="blue") + ## #lines(1981:2015,fre4,type="l",col="green") + ## cor1 <- cor(fre.obs, fre1) + ## cor2 <- cor(fre.obs, fre2) + ## cor3 <- cor(fre.obs, fre3) + ## cor4 <- cor(fre.obs, fre4) + ## round(c(cor1,cor2,cor3,cor4),2) + + # Legend centroid maps: + legend1.xpos <- 0.10 + legend1.width <- 0.30 + legend1.cex <- 2 + my.subset <- match(values.to.plot, my.brks) + par(fig=c(legend1.xpos, legend1.xpos + legend1.width, 0.719, 0.744), new=TRUE) # syntax fig=c(xmin, xmax, ymin, ymax) + ColorBar3(my.brks, cols=my.cols, vert=FALSE, cex=legend1.cex, subset=my.subset) + if(psl=="g500") {mtext(side=4," m", cex=2.5)} else {mtext(side=4," hPa", cex=legend1.cex)} + par(fig=c(legend1.xpos, legend1.xpos + legend1.width, 0.485, 0.508), new=TRUE) + ColorBar3(my.brks, cols=my.cols, vert=FALSE, cex=legend1.cex, subset=my.subset) + if(psl=="g500") {mtext(side=4," m", cex=2.5)} else {mtext(side=4," hPa", cex=legend1.cex)} + par(fig=c(legend1.xpos, legend1.xpos + legend1.width, 0.250, 0.273), new=TRUE) + ColorBar3(my.brks, cols=my.cols, vert=FALSE, cex=legend1.cex, subset=my.subset) + if(psl=="g500") {mtext(side=4," m", cex=2.5)} else {mtext(side=4," hPa", cex=legend1.cex)} + par(fig=c(legend1.xpos, legend1.xpos + legend1.width, 0.015, 0.038), new=TRUE) + ColorBar3(my.brks, cols=my.cols, vert=FALSE, cex=legend1.cex, subset=my.subset) + if(psl=="g500") {mtext(side=4," m", cex=2.5)} else {mtext(side=4," hPa", cex=legend1.cex)} + + # Subtitle centroid maps: + map.title.xpos <- 0.76 + map.title.width <- 0.2 + par(fig=c(map.title.xpos, map.title.xpos + map.title.width, 0.728, 0.729), new=TRUE) + mtext("year", cex=3) + par(fig=c(map.title.xpos, map.title.xpos + map.title.width, 0.494, 0.495), new=TRUE) + mtext("year", cex=3) + par(fig=c(map.title.xpos, map.title.xpos + map.title.width, 0.260, 0.261), new=TRUE) + mtext("year", cex=3) + par(fig=c(map.title.xpos, map.title.xpos + map.title.width, 0.026, 0.027), new=TRUE) + mtext("year", cex=3) + + # Title Centroid Maps: + title1.xpos <- 0.02 + title1.width <- 0.4 + par(fig=c(title1.xpos, title1.xpos + title1.width, 0.9305, 0.9355), new=TRUE) + mtext(paste0(regime1.name,": ", psl.name, " Anomaly"), font=2, cex=4) + par(fig=c(title1.xpos, title1.xpos + title1.width, 0.6955, 0.7005), new=TRUE) + mtext(paste0(regime2.name,": ", psl.name, " Anomaly"), font=2, cex=4) + par(fig=c(title1.xpos, title1.xpos + title1.width, 0.4605, 0.4655), new=TRUE) + mtext(paste0(regime3.name,": ", psl.name, " Anomaly"), font=2, cex=4) + par(fig=c(title1.xpos, title1.xpos + title1.width, 0.2255, 0.2305), new=TRUE) + mtext(paste0(regime4.name,": ", psl.name, " Anomaly"), font=2, cex=4) + + # Impact maps: + if(impact.data == TRUE ){ + impact.xpos <- 0.47 + impact.width <- 0.26 + par(fig=c(impact.xpos, impact.xpos + impact.width, 0.745, 0.925), new=TRUE) + PlotEquiMap2(rescale(imp1[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=t(sig1[,EU] < 0.05), cex.lab=1.5, continents.col="black") + par(fig=c(impact.xpos, impact.xpos + impact.width, 0.51, 0.69), new=TRUE) + PlotEquiMap2(rescale(imp2[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=t(sig2[,EU] < 0.05), cex.lab=1.5, continents.col="black") + par(fig=c(impact.xpos, impact.xpos + impact.width, 0.275, 0.455), new=TRUE) + PlotEquiMap2(rescale(imp3[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=t(sig3[,EU] < 0.05), cex.lab=1.5, continents.col="black") + par(fig=c(impact.xpos, impact.xpos + impact.width, 0.04, 0.22), new=TRUE) + PlotEquiMap2(rescale(imp4[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=t(sig4[,EU] < 0.05), cex.lab=1.5, continents.col="black") + + + # Title impact maps: + title2.xpos <- 0.42 + title2.width <- 0.35 + par(fig=c(title2.xpos, title2.xpos + title2.width, 0.928, 0.933), new=TRUE) + mtext(paste0(regime1.name, ": Impact on ", var.name.full[var.num]), font=2, cex=4) + par(fig=c(title2.xpos, title2.xpos + title2.width, 0.693, 0.698), new=TRUE) + mtext(paste0(regime2.name, ": Impact on ", var.name.full[var.num]), font=2, cex=4) + par(fig=c(title2.xpos, title2.xpos + title2.width, 0.458, 0.463), new=TRUE) + mtext(paste0(regime3.name, ": Impact on ", var.name.full[var.num]), font=2, cex=4) + par(fig=c(title2.xpos, title2.xpos + title2.width, 0.223, 0.227), new=TRUE) + mtext(paste0(regime4.name, ": Impact on ", var.name.full[var.num]), font=2, cex=4) + + # Legend: + legend2.xpos <- 0.48 + legend2.width <- 0.25 + legend2.cex <- 1.8 + + my.subset2 <- match(values.to.plot2, my.brks.var) + par(fig=c(legend2.xpos, legend2.xpos + legend2.width, 0.719 + 0.001, 0.744), new=TRUE) + ColorBar3(my.brks.var, cols=my.cols.var, vert=FALSE, cex=legend2.cex, subset=my.subset2) + mtext(side=4,paste0(" ",var.unit[var.num]), cex=legend2.cex) + par(fig=c(legend2.xpos, legend2.xpos + legend2.width, 0.485, 0.508), new=TRUE) + ColorBar3(my.brks.var, cols=my.cols.var, vert=FALSE, cex=legend2.cex, subset=my.subset2) + mtext(side=4,paste0(" ",var.unit[var.num]), cex=legend2.cex) + par(fig=c(legend2.xpos, legend2.xpos + legend2.width, 0.250, 0.273), new=TRUE) + ColorBar3(my.brks.var, cols=my.cols.var, vert=FALSE, cex=legend2.cex, subset=my.subset2) + mtext(side=4,paste0(" ",var.unit[var.num]), cex=legend2.cex) + par(fig=c(legend2.xpos, legend2.xpos + legend2.width, 0.015, 0.038), new=TRUE) + ColorBar3(my.brks.var, cols=my.cols.var, vert=FALSE, cex=legend2.cex, subset=my.subset2) + mtext(side=4,paste0(" ",var.unit[var.num]), cex=legend2.cex) + + } + + # Frequency plots: + barplot.xpos <- 0.75 + barplot.width <- 0.25 + + par(fig=c(barplot.xpos, barplot.xpos + barplot.width, 0.745, 0.915), new=TRUE) + if(fields.name == rean.name) barplot.freq(100*fre1.NA, year.start, year.end, cex.y=2, cex.x=2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0)) + if(fields.name == forecast.name) barplot.freq.sim2(100*fre1.NA, 100*freMax1.NA, 100*freMin1.NA, 100*freObs1, year.start, year.end, cex.y=2, cex.x=2, freq.min=-2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0), col.line="gray20", cex.r=3, cex.mean=2, cex.obs=2) + + par(fig=c(barplot.xpos, barplot.xpos + barplot.width,0.510,0.680), new=TRUE) + if(fields.name == rean.name) barplot.freq(100*fre2.NA, year.start, year.end, cex.y=2, cex.x=2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0)) + if(fields.name == forecast.name) barplot.freq.sim2(100*fre2.NA, 100*freMax2.NA, 100*freMin2.NA, 100*freObs2, year.start, year.end, cex.y=2, cex.x=2, freq.min=-2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0), col.line="gray20", cex.r=3, cex.mean=2, cex.obs=2) + + par(fig=c(barplot.xpos, barplot.xpos + barplot.width,0.275,0.445), new=TRUE) + if(fields.name == rean.name) barplot.freq(100*fre3.NA, year.start, year.end, cex.y=2, cex.x=2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0)) + if(fields.name == forecast.name) barplot.freq.sim2(100*fre3.NA, 100*freMax3.NA, 100*freMin3.NA, 100*freObs3, year.start, year.end, cex.y=2, cex.x=2, freq.min=-2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0), col.line="gray20", cex.r=3, cex.mean=2, cex.obs=2) + + par(fig=c(barplot.xpos, barplot.xpos + barplot.width,0.040,0.210), new=TRUE) + if(fields.name == rean.name) barplot.freq(100*fre4.NA, year.start, year.end, cex.y=2, cex.x=2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0)) + if(fields.name == forecast.name) barplot.freq.sim2(100*fre4.NA, 100*freMax4.NA, 100*freMin4.NA, 100*freObs4, year.start, year.end, cex.y=2, cex.x=2, freq.min=-2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0), col.line="gray20", cex.r=3, cex.mean=2, cex.obs=2) + + # Title Frequency maps: + title3.xpos <- 0.75 + title3.width <-0.25 + par(fig=c(title3.xpos, title3.xpos + title3.width, 0.928, 0.933), new=TRUE) + mtext(paste0(regime1.name, " Frequency"), font=2, cex=4) # paste0 doesn't work inside an expression! + par(fig=c(title3.xpos, title3.xpos + title3.width, 0.693, 0.698), new=TRUE) + mtext(paste0(regime2.name, " Frequency"), font=2, cex=4) + par(fig=c(title3.xpos, title3.xpos + title3.width, 0.458, 0.463), new=TRUE) + mtext(paste0(regime3.name, " Frequency"), font=2, cex=4) + par(fig=c(title3.xpos, title3.xpos + title3.width, 0.223, 0.228), new=TRUE) + mtext(paste0(regime4.name, " Frequency"), font=2, cex=4) + + # % on Frequency plots: + symbol.xpos <- 0.735 + symbol.width <- 0.01 + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.92, 0.93), new=TRUE) + mtext("%", cex=3.3) + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.59, 0.70), new=TRUE) + mtext("%", cex=3.3) + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.45, 0.46), new=TRUE) + mtext("%", cex=3.3) + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.22, 0.23), new=TRUE) + mtext("%", cex=3.3) + + # mean frequency on Frequency plots: + if(mean.freq == TRUE){ + symbol.xpos <- 0.9 + symbol.width <- 0.1 + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.908, 0.918), new=TRUE) + mtext(bquote(bar(nu) == .(paste0(round(mean(100*fre1.NA),1),"%"))),cex=4.5) + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.673, 0.683), new=TRUE) + mtext(bquote(bar(nu) == .(paste0(round(mean(100*fre2.NA),1),"%"))),cex=4.5) + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.44, 0.45), new=TRUE) + mtext(bquote(bar(nu) == .(paste0(round(mean(100*fre3.NA),1),"%"))),cex=4.5) + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.203, 0.213), new=TRUE) + mtext(bquote(bar(nu) == .(paste0(round(mean(100*fre4.NA),1),"%"))),cex=4.5) + } + + # add corr between obs.time series and ensemble mean time series on Frequency plots: + if(fields.name == forecast.name){ + symbol.xpos <- 0.76 + symbol.width <- 0.1 + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.908, 0.918), new=TRUE) + sp.cor1 <- round(cor(fre1.NA,freObs1, use="complete.obs"),2) + mtext(paste0("r= ",sp.cor1), cex=4.5) + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.673, 0.683), new=TRUE) + sp.cor2 <- round(cor(fre2.NA,freObs2, use="complete.obs"),2) + mtext(paste0("r= ",sp.cor2), cex=4.5) + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.44, 0.45), new=TRUE) + sp.cor3 <- round(cor(fre3.NA,freObs3, use="complete.obs"),2) + mtext(paste0("r= ",sp.cor3), cex=4.5) + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.203, 0.213), new=TRUE) + sp.cor4 <- round(cor(fre4.NA,freObs4, use="complete.obs"),2) + mtext(paste0("r= ",sp.cor4), cex=4.5) + } + + if(!as.pdf) dev.off() # for saving 4 png + + } # close if on: composition == 'simple' + + + if(composition == "edpr"){ + + # adjust color legends to include triangles to the extremities increasing by two the number of intervals: + my.cols <- colorRampPalette(rev(brewer.pal(11,"RdBu")))(length(my.brks)+1) # blue--white--red colors + my.cols.var <- colorRampPalette(rev(brewer.pal(11,"RdBu")))(length(my.brks.var)+1) # blue--white--red colors + + fileoutput <- paste0(rean.dir,"/",fields.name,"_",my.period[p],"_",var.name[var.num],"_edpr.png") + + png(filename=fileoutput,width=3000,height=3700) + + plot.new() + + ## Centroid maps: + map.xpos <- 0.27 + map.width <- 0.46 + par(fig=c(map.xpos + 0.003, map.xpos + map.width - 0.003, 0.745, 0.925), new=TRUE) + PlotEquiMap2(rescale(map1,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols[2:(length(my.cols)-1)], sizetit=1.2, contours=t(map1), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, xlabel.dist=1.5, contours.lty="F1FF1F", continents.col="black") + par(fig=c(map.xpos, map.xpos + map.width, 0.51, 0.69), new=TRUE) + PlotEquiMap2(rescale(map2,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols[2:(length(my.cols)-1)], sizetit=1.2, contours=t(map2), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F", continents.col="black") + par(fig=c(map.xpos, map.xpos + map.width, 0.275, 0.455), new=TRUE) + PlotEquiMap2(rescale(map3,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols[2:(length(my.cols)-1)], sizetit=1.2, contours=t(map3), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F", continents.col="black") + par(fig=c(map.xpos, map.xpos + map.width, 0.04, 0.22), new=TRUE) + PlotEquiMap2(rescale(map4,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols[2:(length(my.cols)-1)], sizetit=1.2, contours=t(map4), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F", continents.col="black") + + ## # for the debug: + ## obs <- read.table("/scratch/Earth/ncortesi/RESILIENCE/Regimes/NAO_series.txt") + ## mes <- p + ## fre.obs <- obs$V3[seq(mes,12*35,12)] # get the ovserved freuency of the NAO + ## #plot(1981:2015,fre.obs,type="l") + ## #lines(1981:2015,fre1,type="l",col="red") + ## #lines(1981:2015,fre2,type="l",col="blue") + ## #lines(1981:2015,fre4,type="l",col="green") + ## cor1 <- cor(fre.obs, fre1) + ## cor2 <- cor(fre.obs, fre2) + ## cor3 <- cor(fre.obs, fre3) + ## cor4 <- cor(fre.obs, fre4) + ## round(c(cor1,cor2,cor3,cor4),2) + + # Legend centroid maps: + legend1.xpos <- 0.30 + legend1.width <- 0.40 + legend1.cex <- 2 + my.subset <- match(values.to.plot, my.brks) + par(fig=c(legend1.xpos, legend1.xpos + legend1.width, 0.719, 0.744), new=TRUE) # syntax fig=c(xmin, xmax, ymin, ymax) + ColorBar3(my.brks, cols=my.cols[2:(length(my.cols)-1)], vert=FALSE, cex=legend1.cex, subset=my.subset) + if(psl=="g500") {mtext(side=4," m", cex=2.5)} else {mtext(side=4," hPa", cex=legend1.cex)} + par(fig=c(legend1.xpos, legend1.xpos + legend1.width, 0.485, 0.508), new=TRUE) + ColorBar3(my.brks, cols=my.cols[2:(length(my.cols)-1)], vert=FALSE, cex=legend1.cex, subset=my.subset) + if(psl=="g500") {mtext(side=4," m", cex=2.5)} else {mtext(side=4," hPa", cex=legend1.cex)} + par(fig=c(legend1.xpos, legend1.xpos + legend1.width, 0.250, 0.273), new=TRUE) + ColorBar3(my.brks, cols=my.cols[2:(length(my.cols)-1)], vert=FALSE, cex=legend1.cex, subset=my.subset) + if(psl=="g500") {mtext(side=4," m", cex=2.5)} else {mtext(side=4," hPa", cex=legend1.cex)} + par(fig=c(legend1.xpos, legend1.xpos + legend1.width, 0.015, 0.038), new=TRUE) + ColorBar3(my.brks, cols=my.cols[2:(length(my.cols)-1)], vert=FALSE, cex=legend1.cex, subset=my.subset) + if(psl=="g500") {mtext(side=4," m", cex=2.5)} else {mtext(side=4," hPa", cex=legend1.cex)} + + # Subtitle centroid maps: + map.title.xpos <- 0.76 + map.title.width <- 0.2 + par(fig=c(map.title.xpos, map.title.xpos + map.title.width, 0.728, 0.729), new=TRUE) + mtext("year", cex=3) + par(fig=c(map.title.xpos, map.title.xpos + map.title.width, 0.494, 0.495), new=TRUE) + mtext("year", cex=3) + par(fig=c(map.title.xpos, map.title.xpos + map.title.width, 0.260, 0.261), new=TRUE) + mtext("year", cex=3) + par(fig=c(map.title.xpos, map.title.xpos + map.title.width, 0.026, 0.027), new=TRUE) + mtext("year", cex=3) + + # Title Centroid Maps: + title1.xpos <- 0.3 + title1.width <- 0.44 + par(fig=c(title1.xpos, title1.xpos + title1.width, 0.9305, 0.9355), new=TRUE) + mtext(paste0(regime1.name," ", psl.name, " anomaly"), font=2, cex=4) + par(fig=c(title1.xpos, title1.xpos + title1.width, 0.6955, 0.7005), new=TRUE) + mtext(paste0(regime2.name," ", psl.name, " anomaly"), font=2, cex=4) + par(fig=c(title1.xpos, title1.xpos + title1.width, 0.4605, 0.4655), new=TRUE) + mtext(paste0(regime3.name," ", psl.name, " anomaly"), font=2, cex=4) + par(fig=c(title1.xpos, title1.xpos + title1.width, 0.2255, 0.2305), new=TRUE) + mtext(paste0(regime4.name," ", psl.name, " anomaly"), font=2, cex=4) + + # Impact maps: + if(impact.data == TRUE ){ + impact.xpos <- 0 + impact.width <- 0.26 + par(fig=c(impact.xpos, impact.xpos + impact.width, 0.745, 0.925), new=TRUE) + PlotEquiMap2(rescale(imp1[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var[2:(length(my.cols.var)-1)], intxlon=10, intylat=10, drawleg=F, dots=t(sig1[,EU] < 0.05), cex.lab=1.5, continents.col="black") + par(fig=c(impact.xpos, impact.xpos + impact.width, 0.51, 0.69), new=TRUE) + PlotEquiMap2(rescale(imp2[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var[2:(length(my.cols.var)-1)], intxlon=10, intylat=10, drawleg=F, dots=t(sig2[,EU] < 0.05), cex.lab=1.5, continents.col="black") + par(fig=c(impact.xpos, impact.xpos + impact.width, 0.275, 0.455), new=TRUE) + PlotEquiMap2(rescale(imp3[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var[2:(length(my.cols.var)-1)], intxlon=10, intylat=10, drawleg=F, dots=t(sig3[,EU] < 0.05), cex.lab=1.5, continents.col="black") + par(fig=c(impact.xpos, impact.xpos + impact.width, 0.04, 0.22), new=TRUE) + PlotEquiMap2(rescale(imp4[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var[2:(length(my.cols.var)-1)], intxlon=10, intylat=10, drawleg=F, dots=t(sig4[,EU] < 0.05), cex.lab=1.5, continents.col="black") + + + # Title impact maps: + title2.xpos <- 0 + title2.width <- 0.26 + par(fig=c(title2.xpos, title2.xpos + title2.width, 0.928, 0.933), new=TRUE) + mtext(paste0(regime1.name, " impact on ", var.name.full[var.num]), font=2, cex=4) + par(fig=c(title2.xpos, title2.xpos + title2.width, 0.693, 0.698), new=TRUE) + mtext(paste0(regime2.name, " impact on ", var.name.full[var.num]), font=2, cex=4) + par(fig=c(title2.xpos, title2.xpos + title2.width, 0.458, 0.463), new=TRUE) + mtext(paste0(regime3.name, " impact on ", var.name.full[var.num]), font=2, cex=4) + par(fig=c(title2.xpos, title2.xpos + title2.width, 0.223, 0.227), new=TRUE) + mtext(paste0(regime4.name, " impact on ", var.name.full[var.num]), font=2, cex=4) + + # Legend: + legend2.xpos <- 0.01 + legend2.width <- 0.25 + legend2.cex <- 1.8 + + my.subset2 <- match(values.to.plot2, my.brks.var) + par(fig=c(legend2.xpos, legend2.xpos + legend2.width, 0.719 + 0.001, 0.744), new=TRUE) + ColorBar3(my.brks.var, cols=my.cols.var[2:(length(my.cols.var)-1)], vert=FALSE, cex=legend2.cex, subset=my.subset2) + mtext(side=4,paste0(" ",var.unit[var.num]), cex=legend2.cex) + par(fig=c(legend2.xpos, legend2.xpos + legend2.width, 0.485, 0.508), new=TRUE) + ColorBar3(my.brks.var, cols=my.cols.var[2:(length(my.cols.var)-1)], vert=FALSE, cex=legend2.cex, subset=my.subset2) + mtext(side=4,paste0(" ",var.unit[var.num]), cex=legend2.cex) + par(fig=c(legend2.xpos, legend2.xpos + legend2.width, 0.250, 0.273), new=TRUE) + ColorBar3(my.brks.var, cols=my.cols.var[2:(length(my.cols.var)-1)], vert=FALSE, cex=legend2.cex, subset=my.subset2) + mtext(side=4,paste0(" ",var.unit[var.num]), cex=legend2.cex) + par(fig=c(legend2.xpos, legend2.xpos + legend2.width, 0.015, 0.038), new=TRUE) + ColorBar3(my.brks.var, cols=my.cols.var[2:(length(my.cols.var)-1)], vert=FALSE, cex=legend2.cex, subset=my.subset2) + mtext(side=4,paste0(" ",var.unit[var.num]), cex=legend2.cex) + + } + + # Frequency plots: + barplot.xpos <- 0.75 + barplot.width <- 0.25 + + par(fig=c(barplot.xpos, barplot.xpos + barplot.width, 0.745, 0.915), new=TRUE) + if(fields.name == rean.name) barplot.freq(100*fre1.NA, year.start, year.end, cex.y=2, cex.x=2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0)) + if(fields.name == forecast.name) barplot.freq.sim2(100*fre1.NA, 100*freMax1.NA, 100*freMin1.NA, 100*freObs1, year.start, year.end, cex.y=2, cex.x=2, freq.min=-2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0), col.line="gray20", cex.r=3, cex.mean=2, cex.obs=2) + + par(fig=c(barplot.xpos, barplot.xpos + barplot.width,0.510,0.680), new=TRUE) + if(fields.name == rean.name) barplot.freq(100*fre2.NA, year.start, year.end, cex.y=2, cex.x=2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0)) + if(fields.name == forecast.name) barplot.freq.sim2(100*fre2.NA, 100*freMax2.NA, 100*freMin2.NA, 100*freObs2, year.start, year.end, cex.y=2, cex.x=2, freq.min=-2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0), col.line="gray20", cex.r=3, cex.mean=2, cex.obs=2) + + par(fig=c(barplot.xpos, barplot.xpos + barplot.width,0.275,0.445), new=TRUE) + if(fields.name == rean.name) barplot.freq(100*fre3.NA, year.start, year.end, cex.y=2, cex.x=2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0)) + if(fields.name == forecast.name) barplot.freq.sim2(100*fre3.NA, 100*freMax3.NA, 100*freMin3.NA, 100*freObs3, year.start, year.end, cex.y=2, cex.x=2, freq.min=-2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0), col.line="gray20", cex.r=3, cex.mean=2, cex.obs=2) + + par(fig=c(barplot.xpos, barplot.xpos + barplot.width,0.040,0.210), new=TRUE) + if(fields.name == rean.name) barplot.freq(100*fre4.NA, year.start, year.end, cex.y=2, cex.x=2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0)) + if(fields.name == forecast.name) barplot.freq.sim2(100*fre4.NA, 100*freMax4.NA, 100*freMin4.NA, 100*freObs4, year.start, year.end, cex.y=2, cex.x=2, freq.min=-2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0), col.line="gray20", cex.r=3, cex.mean=2, cex.obs=2) + + # Title Frequency maps: + title3.xpos <- 0.75 + title3.width <-0.25 + par(fig=c(title3.xpos, title3.xpos + title3.width, 0.928, 0.933), new=TRUE) + mtext(paste0(regime1.name, " Frequency"), font=2, cex=4) # paste0 doesn't work inside an expression! + par(fig=c(title3.xpos, title3.xpos + title3.width, 0.693, 0.698), new=TRUE) + mtext(paste0(regime2.name, " Frequency"), font=2, cex=4) + par(fig=c(title3.xpos, title3.xpos + title3.width, 0.458, 0.463), new=TRUE) + mtext(paste0(regime3.name, " Frequency"), font=2, cex=4) + par(fig=c(title3.xpos, title3.xpos + title3.width, 0.223, 0.228), new=TRUE) + mtext(paste0(regime4.name, " Frequency"), font=2, cex=4) + + # % on Frequency plots: + symbol.xpos <- 0.735 + symbol.width <- 0.01 + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.91, 0.92), new=TRUE) + mtext("%", cex=3.3) + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.68, 0.69), new=TRUE) + mtext("%", cex=3.3) + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.44, 0.45), new=TRUE) + mtext("%", cex=3.3) + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.21, 0.22), new=TRUE) + mtext("%", cex=3.3) + + # mean frequency on Frequency plots: + if(mean.freq == TRUE){ + symbol.xpos <- 0.83 + symbol.width <- 0.1 + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.908, 0.918), new=TRUE) + mtext(bquote(bar(nu) == .(paste0(round(mean(100*fre1.NA),1),"%"))),cex=3.5) + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.673, 0.683), new=TRUE) + mtext(bquote(bar(nu) == .(paste0(round(mean(100*fre2.NA),1),"%"))),cex=3.5) + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.44, 0.45), new=TRUE) + mtext(bquote(bar(nu) == .(paste0(round(mean(100*fre3.NA),1),"%"))),cex=3.5) + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.203, 0.213), new=TRUE) + mtext(bquote(bar(nu) == .(paste0(round(mean(100*fre4.NA),1),"%"))),cex=3.5) + } + + + if(!as.pdf) dev.off() # for saving 4 png + + # same image formatted for the catalogue: + fileoutput2 <- paste0(rean.dir,"/",fields.name,"_",my.period[p],"_",var.name[var.num],"_edpr_fig2cat.png") + + system(paste0("~/scripts/fig2catalog.sh -s 0.8 -r 150 -t '",rean.name," / 10m wind speed and sea level pressure / Monthly anomalies and frequencies \n",month.name[p]," / ",year.start,"-",year.end,"' -c 'Region: North Atlantic (27°N-81°N, 85.5°W-45°E)\nReference dataset: ",rean.name," reanalysis' ", fileoutput," ", fileoutput2)) + + + + } # close if on: composition == 'edpr' + + + # Visualize the composition with the regime anomalies for a selected forecasted month: + if(composition == "psl"){ + # Centroid maps: + n.map <- n.map + 1 # n.maps starts from 0 + map.width <- 0.115 + map.xpos <- 0.06 + map.width * (n.map - 1) + map.ypos <- 0.08 + map.height <- 0.19 + map.ysep <- 0.015 + + par(fig=c(map.xpos, map.xpos + map.width, map.ypos + 3*map.height + 3*map.ysep, map.ypos + 4*map.height + 3*map.ysep), new=TRUE) + PlotEquiMap2(rescale(map1,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map1), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, xlabel.dist=1.5, contours.lty="F1FF1F") + par(fig=c(map.xpos, map.xpos + map.width, map.ypos + 2*map.height + 2*map.ysep, map.ypos + 3*map.height + 2*map.ysep), new=TRUE) + PlotEquiMap2(rescale(map2,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map2), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F") + par(fig=c(map.xpos, map.xpos + map.width, map.ypos + map.height + map.ysep, map.ypos + 2*map.height + map.ysep), new=TRUE) + PlotEquiMap2(rescale(map3,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map3), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F") + par(fig=c(map.xpos, map.xpos + map.width, map.ypos, map.ypos + map.height), new=TRUE) + PlotEquiMap2(rescale(map4,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map4), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F") + + # Sheet subtitles: + par(fig=c(map.xpos, map.xpos + map.width, 0.86, 0.90), new=TRUE) + if(n.map < 8) { + mtext(paste0(my.month.short[p], " --> ", my.month.short[forecast.month]), cex=5.5, font=2) + } else{ + mtext(paste0(rean.name," ", my.month.short[forecast.month]), cex=5.5, font=2) + } + + } # close if on composition == 'psl' + + + if(composition == "psl.rean" || composition == "psl.rean.unordered"){ + + # Centroid maps: + n.map <- n.map + 1 # n.maps starts from 0 + map.width <- 0.08 + map.xpos <- 0 + map.width * (n.map - 1) + map.ypos <- 0.08 + map.height <- 0.19 + map.ysep <- 0.015 + + par(fig=c(map.xpos, map.xpos + map.width, map.ypos + 3*map.height + 3*map.ysep, map.ypos + 4*map.height + 3*map.ysep), new=TRUE) + PlotEquiMap2(rescale(map1,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map1), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, xlabel.dist=1.5, contours.lty="F1FF1F") + par(fig=c(map.xpos, map.xpos + map.width, map.ypos + 2*map.height + 2*map.ysep, map.ypos + 3*map.height + 2*map.ysep), new=TRUE) + PlotEquiMap2(rescale(map2,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map2), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F") + par(fig=c(map.xpos, map.xpos + map.width, map.ypos + map.height + map.ysep, map.ypos + 2*map.height + map.ysep), new=TRUE) + PlotEquiMap2(rescale(map3,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map3), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F") + par(fig=c(map.xpos, map.xpos + map.width, map.ypos, map.ypos + map.height), new=TRUE) + PlotEquiMap2(rescale(map4,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map4), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F") + + + } # close if on composition == 'psl.rean' || 'psl.rean.unordered' + + + if(composition == "corr.matrix"){ + + # matrix correlation with DJF regimes: + cluster1.monthly <- pslwr1mean; cluster2.monthly <- pslwr2mean; cluster3.monthly <- pslwr3mean; cluster4.monthly <- pslwr4mean + + rean.dir.DJF <- "/scratch/Earth/ncortesi/RESILIENCE/Regimes/53_JRA55_seasonal_1981-2016_LOESS_filter_lat_corr" + + load(file=paste0(rean.dir.DJF,"/",rean.name,"_",my.period[13],"_","psl",".RData")) # Load mean slp DJF data from the same reanalysis + load(paste0(rean.dir.DJF,"/",rean.name,"_", my.period[13],"_","ClusterNames",".RData")) # load also reanalysis DJF regime names + + if(var.num != var.num.orig) var.num <- var.num.orig # ripristinate original values of this script (necessary after loading regime data) + if(fields.name != fields.name.orig) fields.name <- fields.name.orig # ripristinate original values of this script + if(work.dir != work.dir.orig) work.dir <- work.dir.orig # ripristinate original values of this script + if(p.orig != p) p <- p.orig + + cluster1 <- which(orden == cluster1.name) + cluster2 <- which(orden == cluster2.name) + cluster3 <- which(orden == cluster3.name) + cluster4 <- which(orden == cluster4.name) + + assign(paste0("psl.ordered",cluster1), pslwr1mean) + assign(paste0("psl.ordered",cluster2), pslwr2mean) + assign(paste0("psl.ordered",cluster3), pslwr3mean) + assign(paste0("psl.ordered",cluster4), pslwr4mean) + + cluster.corr <- array(NA, c(4,4), dimnames=list(c("cl1","cl2","cl3","cl4"), orden)) + cluster.corr[1,1] <- cor(as.vector(cluster1.monthly), as.vector(psl.ordered1)) + cluster.corr[1,2] <- cor(as.vector(cluster1.monthly), as.vector(psl.ordered2)) + cluster.corr[1,3] <- cor(as.vector(cluster1.monthly), as.vector(psl.ordered3)) + cluster.corr[1,4] <- cor(as.vector(cluster1.monthly), as.vector(psl.ordered4)) + cluster.corr[2,1] <- cor(as.vector(cluster2.monthly), as.vector(psl.ordered1)) + cluster.corr[2,2] <- cor(as.vector(cluster2.monthly), as.vector(psl.ordered2)) + cluster.corr[2,3] <- cor(as.vector(cluster2.monthly), as.vector(psl.ordered3)) + cluster.corr[2,4] <- cor(as.vector(cluster2.monthly), as.vector(psl.ordered4)) + cluster.corr[3,1] <- cor(as.vector(cluster3.monthly), as.vector(psl.ordered1)) + cluster.corr[3,2] <- cor(as.vector(cluster3.monthly), as.vector(psl.ordered2)) + cluster.corr[3,3] <- cor(as.vector(cluster3.monthly), as.vector(psl.ordered3)) + cluster.corr[3,4] <- cor(as.vector(cluster3.monthly), as.vector(psl.ordered4)) + cluster.corr[4,1] <- cor(as.vector(cluster4.monthly), as.vector(psl.ordered1)) + cluster.corr[4,2] <- cor(as.vector(cluster4.monthly), as.vector(psl.ordered2)) + cluster.corr[4,3] <- cor(as.vector(cluster4.monthly), as.vector(psl.ordered3)) + cluster.corr[4,4] <- cor(as.vector(cluster4.monthly), as.vector(psl.ordered4)) + + #cluster.corr2 <- t(cluster.corr) + + # Centroid maps: + n.map <- n.map + 1 # n.maps starts from 0 + map.width <- 0.08 + map.xpos <- 0 + map.width * (n.map - 1) + map.ypos <- 0.08 + map.height <- 0.19 + map.ysep <- 0.015 + print(paste0("map.xpos= ", map.xpos)) + + par(fig=c(0, 1, 0, 1), new=TRUE) + text.cex <- 2 + text.ypos <- 1.03 + text(x=map.xpos - 0.015, y=text.ypos - 0.02, labels="cl1", cex=text.cex) + text(x=map.xpos - 0.015, y=text.ypos - 0.04, labels="cl2", cex=text.cex) + text(x=map.xpos - 0.015, y=text.ypos - 0.06, labels="cl3", cex=text.cex) + text(x=map.xpos - 0.015, y=text.ypos - 0.08, labels="cl4", cex=text.cex) + text(x=map.xpos + 0.000, y=text.ypos + 0.00, labels="NAO+", cex=text.cex) + text(x=map.xpos + 0.015, y=text.ypos + 0.00, labels="NAO-", cex=text.cex) + text(x=map.xpos + 0.030, y=text.ypos + 0.00, labels="BLO", cex=text.cex) + text(x=map.xpos + 0.045, y=text.ypos + 0.00, labels="ATL", cex=text.cex) + + text(x=map.xpos + 0.00, y=text.ypos - 0.02, labels=round(cluster.corr,2)[1,1], cex=text.cex) + text(x=map.xpos + 0.00, y=text.ypos - 0.04, labels=round(cluster.corr,2)[2,1], cex=text.cex) + text(x=map.xpos + 0.00, y=text.ypos - 0.06, labels=round(cluster.corr,2)[3,1], cex=text.cex) + text(x=map.xpos + 0.00, y=text.ypos - 0.08, labels=round(cluster.corr,2)[4,1], cex=text.cex) + text(x=map.xpos + 0.015, y=text.ypos - 0.02, labels=round(cluster.corr,2)[1,2], cex=text.cex) + text(x=map.xpos + 0.015, y=text.ypos - 0.04, labels=round(cluster.corr,2)[2,2], cex=text.cex) + text(x=map.xpos + 0.015, y=text.ypos - 0.06, labels=round(cluster.corr,2)[3,2], cex=text.cex) + text(x=map.xpos + 0.015, y=text.ypos - 0.08, labels=round(cluster.corr,2)[4,2], cex=text.cex) + text(x=map.xpos + 0.03, y=text.ypos - 0.02, labels=round(cluster.corr,2)[1,3], cex=text.cex) + text(x=map.xpos + 0.03, y=text.ypos - 0.04, labels=round(cluster.corr,2)[2,3], cex=text.cex) + text(x=map.xpos + 0.03, y=text.ypos - 0.06, labels=round(cluster.corr,2)[3,3], cex=text.cex) + text(x=map.xpos + 0.03, y=text.ypos - 0.08, labels=round(cluster.corr,2)[4,3], cex=text.cex) + text(x=map.xpos + 0.045, y=text.ypos - 0.02, labels=round(cluster.corr,2)[1,4], cex=text.cex) + text(x=map.xpos + 0.045, y=text.ypos - 0.04, labels=round(cluster.corr,2)[2,4], cex=text.cex) + text(x=map.xpos + 0.045, y=text.ypos - 0.06, labels=round(cluster.corr,2)[3,4], cex=text.cex) + text(x=map.xpos + 0.045, y=text.ypos - 0.08, labels=round(cluster.corr,2)[4,4], cex=text.cex) + + ## Centroid maps: + + par(fig=c(map.xpos, map.xpos + map.width, map.ypos + 3*map.height + 3*map.ysep, map.ypos + 4*map.height + 3*map.ysep), new=TRUE) + #PlotEquiMap2(rescale(map1,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map1), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, xlabel.dist=1.5, contours.lty="F1FF1F") + par(fig=c(map.xpos, map.xpos + map.width, map.ypos + 2*map.height + 2*map.ysep, map.ypos + 3*map.height + 2*map.ysep), new=TRUE) + #PlotEquiMap2(rescale(map2,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map2), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F") + par(fig=c(map.xpos, map.xpos + map.width, map.ypos + map.height + map.ysep, map.ypos + 2*map.height + map.ysep), new=TRUE) + #PlotEquiMap2(rescale(map3,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map3), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F") + par(fig=c(map.xpos, map.xpos + map.width, map.ypos, map.ypos + map.height), new=TRUE) + #PlotEquiMap2(rescale(map4,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map4), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F") + + } # close if on corr.matrix + + + # Visualize the composition if the impact maps for a selected forecasted month: + if(composition == "impact"){ + # Centroid maps: + n.map <- n.map + 1 # n.maps starts from 0 + map.width <- 0.115 + map.xpos <- 0.06 + map.width * (n.map - 1) + map.ypos <- 0.08 + map.height <- 0.19 + map.ysep <- 0.015 + + par(fig=c(map.xpos, map.xpos + map.width, map.ypos + 3*map.height + 3*map.ysep, map.ypos + 4*map.height + 3*map.ysep), new=TRUE) + #PlotEquiMap2(rescale(imp1[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=t(sig1[,EU] < 0.05), cex.lab=1.5) + PlotEquiMap2(rescale(imp1[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5) + + par(fig=c(map.xpos, map.xpos + map.width, map.ypos + 2*map.height + 2*map.ysep, map.ypos + 3*map.height + 2*map.ysep), new=TRUE) + #PlotEquiMap2(rescale(imp2[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=t(sig2[,EU] < 0.05), cex.lab=1.5) + PlotEquiMap2(rescale(imp2[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5) + + par(fig=c(map.xpos, map.xpos + map.width, map.ypos + map.height + map.ysep, map.ypos + 2*map.height + map.ysep), new=TRUE) + #PlotEquiMap2(rescale(imp3[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=t(sig3[,EU] < 0.05), cex.lab=1.5) + PlotEquiMap2(rescale(imp3[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5) + + par(fig=c(map.xpos, map.xpos + map.width, map.ypos, map.ypos + map.height), new=TRUE) + #PlotEquiMap2(rescale(imp4[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=t(sig4[,EU] < 0.05), cex.lab=1.5) + PlotEquiMap2(rescale(imp4[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5) + + + # Sheet subtitles: + par(fig=c(map.xpos, map.xpos + map.width, 0.86, 0.90), new=TRUE) + if(n.map < 8) { + mtext(paste0(my.month.short[p], " --> ", my.month.short[forecast.month]), cex=5.5, font=2) + } else{ + mtext(paste0(rean.name," ", my.month.short[forecast.month]), cex=5.5, font=2) + } + + } # close if on composition == 'impact' + + # Visualize the 2x2 composition of the four impact maps for a selected reanalysis or forecasted month: + if(composition == "single.impact"){ + + png(filename=paste0(rean.dir,"/",fields.name,"_",my.period[p],"_",var.name[var.num],"_impact_composition.png"),width=2000,height=2000) + + plot.new() + + par(fig=c(0, 0.5, 0.95, 0.988), new=TRUE) + mtext("NAO+",cex=5) + par(fig=c(0, 0.5, 0.52, 0.95), new=TRUE) + PlotEquiMap(rescale(imp1[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=sig1[,EU] < 0.05, dot_size=2, axes_label_scale=2.5) + + par(fig=c(0.5, 1, 0.93, 0.96), new=TRUE) + mtext("NAO-",cex=5) + par(fig=c(0.5, 1, 0.52, 0.95), new=TRUE) + PlotEquiMap(rescale(imp2[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=sig2[,EU] < 0.05, dot_size=2, axes_label_scale=2.5) + + par(fig=c(0, 0.5, 0.44, 0.47), new=TRUE) + mtext("Blocking",cex=5) + par(fig=c(0, 0.5, 0.08, 0.46), new=TRUE) + PlotEquiMap(rescale(imp3[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=sig3[,EU] < 0.05, dot_size=2, axes_label_scale=2.5) + + par(fig=c(0.5, 1, 0.44, 0.47), new=TRUE) + mtext("Atlantic Ridge",cex=5) + par(fig=c(0.5, 1, 0.08, 0.46), new=TRUE) + PlotEquiMap(rescale(imp4[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=sig4[,EU] < 0.05, dot_size=2, axes_label_scale=2.5) + + par(fig=c(0.03, 0.96, 0.02, 0.08), new=TRUE) + ColorBar2(my.brks.var, cols=my.cols.var, vert=FALSE, cex=3, my.ticks=-0.5 + 1:length(my.brks.var), my.labels=my.brks.var, label.dist=3) + par(fig=c(0.965, 0.99, 0, 0.026), new=TRUE) + mtext("m/s",cex=3) + + dev.off() + + # format it manually from the bash shell (you must run it from your workstation until the identity command of ImageMagick will be loaded un the cluster): + #sh ~/scripts/fig2catalog.sh -x 20 -t 'NCEP / 10-m wind speed / Regime impact \nOctober / 1981-2016' -c 'Region: North Atlantic (27°N-81°N, 85.5°W-45°E)\nReference dataset: NCEP/NCAR reanalysis' NCEP_October_sfcWind_impact_composition.png NCEP_October_sfcWind_impact_composition_catalogue.png + + + ### to plot the impact map of all regimes on a particular month: + #imp.oct <- (imp1*3.2 + imp2*38.7 + imp3*51.6 + imp4*6.4)/100 + year.test <- 2016 + pos.year.test <- year.test - year.start +1 + imp.test <- imp1*fre1.NA[pos.year.test] + imp2*fre2.NA[pos.year.test] + imp3*fre3.NA[pos.year.test] + imp4*fre4.NA[pos.year.test] + #imp.test <- imp1*fre1.NA[pos.year.test] + imp3*(fre3.NA[pos.year.test]+0.032) + imp4*(fre4.NA[pos.year.test]+0.032) + par(fig=c(0, 1, 0.05, 1), new=TRUE) + PlotEquiMap2(rescale(imp.test[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5) + + # vector with the frequency of the WRs in the chosen month and year: + wt.test.freq <- c(fre1.NA[pos.year.test],fre2.NA[pos.year.test],fre3.NA[pos.year.test],fre4.NA[pos.year.test]) + + ## # or save them as individual maps: + ## png(filename=paste0(rean.dir,"/",fields.name,"_",my.period[p],"_",var.name[var.num],"_impact_NAO+.png"),width=3000,height=3700) + ## PlotEquiMap2(rescale(imp1[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=t(sig1[,EU] < 0.05), cex.lab=1.5) + ## dev.off() + + ## png(filename=paste0(rean.dir,"/",fields.name,"_",my.period[p],"_",var.name[var.num],"_impact_NAO-.png"),width=3000,height=3700) + ## PlotEquiMap2(rescale(imp2[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=t(sig2[,EU] < 0.05), cex.lab=1.5) + ## dev.off() + + ## png(filename=paste0(rean.dir,"/",fields.name,"_",my.period[p],"_",var.name[var.num],"_impact_Blocking.png"),width=3000,height=3700) + ## PlotEquiMap2(rescale(imp3[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=t(sig3[,EU] < 0.05), cex.lab=1.5) + ## dev.off() + + ## png(filename=paste0(rean.dir,"/",fields.name,"_",my.period[p],"_",var.name[var.num],"_impact_Atl_Ridge.png"),width=3000,height=3700) + ## PlotEquiMap2(rescale(imp4[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=t(sig4[,EU] < 0.05), cex.lab=1.5) + ## dev.off() + } + + # Visualize the composition if the impact maps for a selected forecasted month: + if(composition == "single.psl"){ + png(filename=paste0(rean.dir,"/",fields.name,"_",my.period[p],"_",var.name[var.num],"_pattern_cluster1.png"),width=2000,height=1400, res=300) + PlotEquiMap2(rescale(map1,my.brks[1],tail(my.brks,1)), lon, lat,filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1, contours=t(map1), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=0.5, xlabel.dist=0, contours.lty="F1FF1F", toptitle=paste0(my.period[p]," WithinSS=", round(withinss1/1000000,2))) + dev.off() + + png(filename=paste0(rean.dir,"/",fields.name,"_",my.period[p],"_",var.name[var.num],"_pattern_cluster2.png"),width=2000,height=1400, res=300) + PlotEquiMap2(rescale(map2,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1, contours=t(map2), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=0.5, xlabel.dist=0, contours.lty="F1FF1F", toptitle=paste0(my.period[p]," WithinSS=", round(withinss2/1000000,2))) + dev.off() + + png(filename=paste0(rean.dir,"/",fields.name,"_",my.period[p],"_",var.name[var.num],"_pattern_cluster3.png"),width=2000,height=1400, res=300) + PlotEquiMap2(rescale(map3,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1, contours=t(map3), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=0.5, xlabel.dist=0, contours.lty="F1FF1F", toptitle=paste0(my.period[p]," WithinSS=", round(withinss3/1000000,2))) + dev.off() + + png(filename=paste0(rean.dir,"/",fields.name,"_",my.period[p],"_",var.name[var.num],"_pattern_cluster4.png"),width=2000,height=1400, res=300) + PlotEquiMap2(rescale(map4,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1, contours=t(map4), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=0.5, xlabel.dist=0, contours.lty="F1FF1F", toptitle=paste0(my.period[p]," WithinSS=", round(withinss4/1000000,2))) + dev.off() + + # Legend centroid maps: + #my.subset <- match(values.to.plot, my.brks) + #ColorBar3(my.brks, cols=my.cols, vert=FALSE, cex=legend1.cex, subset=my.subset) + #mtext(side=4," hPa", cex=legend1.cex) + + } + + # Visualize the composition with the interannual frequencies for a selected forecasted month: + if(composition == "fre"){ + # Centroid maps: + n.map <- n.map + 1 # n.maps starts from 0 + map.width <- 0.115 + map.xpos <- 0.06 + map.width * (n.map - 1) + map.ypos <- 0.08 + map.height <- 0.19 + map.ysep <- 0.015 + + par(fig=c(map.xpos, map.xpos + map.width, map.ypos + 3*map.height + 3*map.ysep, map.ypos + 4*map.height + 3*map.ysep), new=TRUE) + if(n.map < 8) barplot.freq.sim2(100*fre1.NA, 100*freMax1.NA, 100*freMin1.NA, 100*freObs1, year.start, year.end, cex.y=2, cex.x=2, freq.min=-2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0), col.line="gray20", cex.r=3, cex.mean=2, cex.obs=2) + if(n.map == 8) barplot.freq(100*fre1.NA, year.start, year.end, cex.y=2, cex.x=2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0)) + + par(fig=c(map.xpos, map.xpos + map.width, map.ypos + 2*map.height + 2*map.ysep, map.ypos + 3*map.height + 2*map.ysep), new=TRUE) + if(n.map < 8) if(fields.name == forecast.name) barplot.freq.sim2(100*fre2.NA, 100*freMax2.NA, 100*freMin2.NA, 100*freObs2, year.start, year.end, cex.y=2, cex.x=2, freq.min=-2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0), col.line="gray20", cex.r=3, cex.mean=2, cex.obs=2) + if(n.map == 8) barplot.freq(100*fre2.NA, year.start, year.end, cex.y=2, cex.x=2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0)) + + par(fig=c(map.xpos, map.xpos + map.width, map.ypos + map.height + map.ysep, map.ypos + 2*map.height + map.ysep), new=TRUE) + if(n.map < 8) if(fields.name == forecast.name) barplot.freq.sim2(100*fre3.NA, 100*freMax3.NA, 100*freMin3.NA, 100*freObs3, year.start, year.end, cex.y=2, cex.x=2, freq.min=-2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0), col.line="gray20", cex.r=3, cex.mean=2, cex.obs=2) + if(n.map == 8) barplot.freq(100*fre3.NA, year.start, year.end, cex.y=2, cex.x=2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0)) + + par(fig=c(map.xpos, map.xpos + map.width, map.ypos, map.ypos + map.height), new=TRUE) + if(n.map < 8) if(fields.name == forecast.name) barplot.freq.sim2(100*fre4.NA, 100*freMax4.NA, 100*freMin4.NA, 100*freObs4, year.start, year.end, cex.y=2, cex.x=2, freq.min=-2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0), col.line="gray20", cex.r=3, cex.mean=2, cex.obs=2) + if(n.map == 8) barplot.freq(100*fre4.NA, year.start, year.end, cex.y=2, cex.x=2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0)) + + # Sheet subtitles: + par(fig=c(map.xpos, map.xpos + map.width, 0.86, 0.90), new=TRUE) + if(n.map < 8) { + mtext(paste0(my.month.short[p], " --> ", my.month.short[forecast.month]), cex=5.5, font=2) + } else{ + mtext(paste0(rean.name," ", my.month.short[forecast.month]), cex=5.5, font=2) + } + + # % on Frequency plots: + par(fig=c(map.xpos - 0.006, map.xpos, map.ypos + 3.9*map.height + 3*map.ysep, map.ypos + 4*map.height + 3*map.ysep), new=TRUE) + mtext("%", cex=3.3) + par(fig=c(map.xpos - 0.006, map.xpos, map.ypos + 2.9*map.height + 2*map.ysep, map.ypos + 3*map.height + 2*map.ysep), new=TRUE) + mtext("%", cex=3.3) + par(fig=c(map.xpos - 0.006, map.xpos, map.ypos + 1.9*map.height + 1*map.ysep, map.ypos + 2*map.height + 1*map.ysep), new=TRUE) + mtext("%", cex=3.3) + par(fig=c(map.xpos - 0.006, map.xpos, map.ypos + 0.9*map.height + 0*map.ysep, map.ypos + 1*map.height + 0*map.ysep), new=TRUE) + mtext("%", cex=3.3) + + # mean frequency on Frequency plots: + par(fig=c(map.xpos + 0.02, map.xpos + 0.04, map.ypos + 3*map.height + 3*map.ysep, map.ypos + 3.1*map.height + 3*map.ysep), new=TRUE) + mtext(bquote(bar(nu) == .(paste0(round(mean(100*fre1.NA),1),"%"))),cex=3.5) + par(fig=c(map.xpos + 0.02, map.xpos + 0.04, map.ypos + 2*map.height + 2*map.ysep, map.ypos + 2.1*map.height + 2*map.ysep), new=TRUE) + mtext(bquote(bar(nu) == .(paste0(round(mean(100*fre2.NA),1),"%"))),cex=3.5) + par(fig=c(map.xpos + 0.02, map.xpos + 0.04, map.ypos + 1*map.height + 1*map.ysep, map.ypos + 1.1*map.height + 1*map.ysep), new=TRUE) + mtext(bquote(bar(nu) == .(paste0(round(mean(100*fre3.NA),1),"%"))),cex=3.5) + par(fig=c(map.xpos + 0.02, map.xpos + 0.04, map.ypos + 0*map.height + 0*map.ysep, map.ypos + 0.1*map.height + 0*map.ysep), new=TRUE) + mtext(bquote(bar(nu) == .(paste0(round(mean(100*fre4.NA),1),"%"))),cex=3.5) + + # add corr between obs.time series and ensemble mean time series on Frequency plots: + if(n.map < 8){ + par(fig=c(map.xpos + map.width - 0.04, map.xpos + map.width - 0.02, map.ypos + 3*map.height + 3*map.ysep, map.ypos + 3.1*map.height + 3*map.ysep), new=TRUE) + mtext(paste0("r= ",sp.cor1), cex=3.5) + par(fig=c(map.xpos + map.width - 0.04, map.xpos + map.width - 0.02, map.ypos + 2*map.height + 2*map.ysep, map.ypos + 2.1*map.height + 2*map.ysep), new=TRUE) + mtext(paste0("r= ",sp.cor2), cex=3.5) + par(fig=c(map.xpos + map.width - 0.04, map.xpos + map.width - 0.02, map.ypos + 1*map.height + 1*map.ysep, map.ypos + 1.1*map.height + 1*map.ysep), new=TRUE) + mtext(paste0("r= ",sp.cor3), cex=3.5) + par(fig=c(map.xpos + map.width - 0.04, map.xpos + map.width - 0.02, map.ypos + 0*map.height + 0*map.ysep, map.ypos + 0.1*map.height + 0*map.ysep), new=TRUE) + mtext(paste0("r= ",sp.cor4), cex=3.5) + } + } # close if on composition == 'fre' + + # save indicators for each startdate and forecast time: + # (map1, map2, map3, map4, fre1, fre2, etc. already refer to the regimes in the same order as listed in the 'orden' vector) + if(fields.name == forecast.name) { + if (composition != "impact") { + save(orden, map1, map2, map3, map4, fre1.NA, fre2.NA, fre3.NA, fre4.NA, freObs1, freObs2, freObs3, freObs4, sp.cor1, sp.cor2, sp.cor3, sp.cor4, persist1, persist2, persist3, persist4, persistObs1, persistObs2, persistObs3, persistObs4, spat.cor1, spat.cor2, spat.cor3, spat.cor4, sd.freq.sim1, sd.freq.sim2, sd.freq.sim3, sd.freq.sim4, sd.freq.obs1, sd.freq.obs2, sd.freq.obs3, sd.freq.obs4, transSim1, transSim2, transSim3, transSim4, transObs1, transObs2, transObs3, transObs4, file=paste0(work.dir,"/",fields.name,"_", my.period[p],"_leadmonth_",lead.month,"_","ClusterNames",".RData")) + } else { # also save impX objects + save(orden, map1, map2, map3, map4, fre1.NA, fre2.NA, fre3.NA, fre4.NA, freObs1, freObs2, freObs3, freObs4, sp.cor1, sp.cor2, sp.cor3, sp.cor4, persist1, persist2, persist3, persist4, persistObs1, persistObs2, persistObs3, persistObs4, spat.cor1, spat.cor2, spat.cor3, spat.cor4, sd.freq.sim1, sd.freq.sim2, sd.freq.sim3, sd.freq.sim4, sd.freq.obs1, sd.freq.obs2, sd.freq.obs3, sd.freq.obs4, transSim1, transSim2, transSim3, transSim4, transObs1, transObs2, transObs3, transObs4, imp1, imp2, imp3, imp4,cor.imp1,cor.imp2,cor.imp3,cor.imp4, file=paste0(work.dir,"/",fields.name,"_", my.period[p],"_leadmonth_",lead.month,"_","ClusterNames",".RData")) + } + } + + } # close 'p' on 'period' + + if((composition == 'simple' || composition == 'edpr' ) && as.pdf) dev.off() # for saving a single pdf with all months/seasons + + } # close 'lead.month' on 'lead.months' + + if(composition == "psl" || composition == "fre" || composition == "impact"){ + # Regime's names: + title1.xpos <- 0.01 + title1.width <- 0.04 + par(fig=c(title1.xpos, title1.xpos + title1.width, 0.7905, 0.7955), new=TRUE) + mtext(paste0(regime1.name), font=2, cex=5) + par(fig=c(title1.xpos, title1.xpos + title1.width, 0.5855, 0.5905), new=TRUE) + mtext(paste0(regime2.name), font=2, cex=5) + par(fig=c(title1.xpos, title1.xpos + title1.width, 0.3805, 0.3955), new=TRUE) + mtext(paste0(regime3.name), font=2, cex=5) + par(fig=c(title1.xpos, title1.xpos + title1.width, 0.1755, 0.1805), new=TRUE) + mtext(paste0(regime4.name), font=2, cex=5) + + if(composition == "psl") { + # Color legend: + legend1.xpos <- 0.10 + legend1.width <- 0.80 + legend1.cex <- 4 + + par(fig=c(legend1.xpos, legend1.xpos + legend1.width, 0, 0.065), new=TRUE) # syntax fig=c(xmin, xmax, ymin, ymax) + ColorBar2(brks=my.brks, cols=my.cols, vert=FALSE, cex=legend1.cex, label.dist=2, my.ticks=-0.5 + 1:length(my.brks), my.labels=my.brks) + par(fig=c(legend1.xpos + legend1.width - 0.02, legend1.xpos + legend1.width + 0.05, 0, 0.02), new=TRUE) # syntax fig=c(xmin, xmax, ymin, ymax) + mtext("hPa", cex=legend1.cex*1.3) + } + + if(composition == "impact") { + # Color legend: + legend1.xpos <- 0.10 + legend1.width <- 0.80 + legend1.cex <- 4 + + #my.subset2 <- match(values.to.plot2, my.brks.var) + par(fig=c(legend1.xpos, legend1.xpos + legend1.width, 0, 0.065), new=TRUE) + ColorBar2(brks=my.brks.var, cols=my.cols.var, vert=FALSE, cex=legend1.cex, label.dist=2, my.ticks=-0.5 + 1:length(my.brks.var), my.labels=my.brks.var) + par(fig=c(legend1.xpos + legend1.width - 0.02, legend1.xpos + legend1.width + 0.05, 0, 0.02), new=TRUE) # syntax fig=c(xmin, xmax, ymin, ymax) + #ColorBar2(brks=my.brks.var, cols=my.cols.var, vert=FALSE, cex=legend2.cex, subset=my.subset2) + mtext(var.unit[var.num], cex=legend1.cex*1.3) + + } + + + dev.off() + + } # close if on composition + + + if(composition == "psl.rean" || composition == "psl.rean.unordered" || composition == "corr.matrix") dev.off() + + print("Finished!") +} # close if on composition != "summary" + + + +#} # close for on forecasts.month + + + +if(composition == "taylor"){ + library("plotrix") + + fields.name="ERA-Interim" + + # choose a reference season from 13 (winter) to 16 (Autumn): + season.ref <- 13 + + # set the first WR classification: + rean.dir <- "/scratch/Earth/ncortesi/RESILIENCE/Regimes/41_ERA-Interim_monthly_1981-2015_LOESS_filter" + + # load data of the reference season of the first classification (this line should be modified to load the chosen season): + #load("/scratch/Earth/ncortesi/RESILIENCE/Regimes/18) as 12) but with no lat correction/ERA-Interim_Winter_psl.RData") # load pslwrXmean data + #load("/scratch/Earth/ncortesi/RESILIENCE/Regimes/18) as 12) but with no lat correction/ERA-Interim_Winter_ClusterNames.RData") # load clusterX.name.period + load("/scratch/Earth/ncortesi/RESILIENCE/Regimes/46_as_18_but_with_no_lat_correction_and_with_LOESS/ERA-Interim_Winter_psl.RData") # load pslwrXmean data + load("/scratch/Earth/ncortesi/RESILIENCE/Regimes/46_as_18_but_with_no_lat_correction_and_with_LOESS/ERA-Interim_Winter_ClusterNames.RData") # load clusterX.name.period + + if(var.num != var.num.orig) var.num <- var.num.orig # ripristinate original values of this script (necessary after loading regime data) + if(fields.name != fields.name.orig) fields.name <- fields.name.orig # ripristinate original values of this script + + cluster1.ref <- which(orden == cluster1.name) + cluster2.ref <- which(orden == cluster2.name) + cluster3.ref <- which(orden == cluster3.name) + cluster4.ref <- which(orden == cluster4.name) + + #cluster1.ref <- cluster1.name.period[13] + #cluster2.ref <- cluster2.name.period[13] + #cluster3.ref <- cluster3.name.period[13] + #cluster4.ref <- cluster4.name.period[13] + + cluster1.ref <- 3 # bypass the previous values because for dataset num.46 the blocking and atlantic regimes were saved swapped! + cluster2.ref <- 2 + cluster3.ref <- 1 + cluster4.ref <- 4 + + assign(paste0("map.ref",cluster1.ref), pslwr1mean) # NAO+ + assign(paste0("map.ref",cluster2.ref), pslwr2mean) # NAO- + assign(paste0("map.ref",cluster3.ref), pslwr3mean) # Blocking + assign(paste0("map.ref",cluster4.ref), pslwr4mean) # Atl.ridge + + # set a second WR classification (i.e: with running cluster) to contrast with the new one: + #rean2.dir <- "/scratch/Earth/ncortesi/RESILIENCE/Regimes/41_ERA-Interim_monthly_1981-2015_LOESS_filter" + rean2.dir <- "/scratch/Earth/ncortesi/RESILIENCE/Regimes/44_as_43_but_with_no_lat_correction" + rean2.name <- "ERA-Interim" + + # load data of the reference season of the second classification (this line should be modified to load the chosen season): + load("/scratch/Earth/ncortesi/RESILIENCE/Regimes/46_as_18_but_with_no_lat_correction_and_with_LOESS/ERA-Interim_Winter_psl.RData") # load pslwrXmean data + load("/scratch/Earth/ncortesi/RESILIENCE/Regimes/46_as_18_but_with_no_lat_correction_and_with_LOESS/ERA-Interim_Winter_ClusterNames.RData") # load clusterX.name.period + + if(var.num != var.num.orig) var.num <- var.num.orig # ripristinate original values of this script (necessary after loading regime data) + if(fields.name != fields.name.orig) fields.name <- fields.name.orig # ripristinate original values of this script + + #cluster1.name.ref <- cluster1.name.period[season.ref] + #cluster2.name.ref <- cluster2.name.period[season.ref] + #cluster3.name.ref <- cluster3.name.period[season.ref] + #cluster4.name.ref <- cluster4.name.period[season.ref] + + cluster1.ref <- which(orden == cluster1.name) + cluster2.ref <- which(orden == cluster2.name) + cluster3.ref <- which(orden == cluster3.name) + cluster4.ref <- which(orden == cluster4.name) + + cluster1.ref <- 3 # bypass the previous values because for dataset num.46 the blocking and atlantic regimes were saved swapped! + cluster2.ref <- 2 + cluster3.ref <- 1 + cluster4.ref <- 4 + + assign(paste0("map.old.ref",cluster1.ref), pslwr1mean) # NAO+ + assign(paste0("map.old.ref",cluster2.ref), pslwr2mean) # NAO- + assign(paste0("map.old.ref",cluster3.ref), pslwr3mean) # Blocking + assign(paste0("map.old.ref",cluster4.ref), pslwr4mean) # Atl.ridge + + + # breaks and colors of the geopotential fields: + if(psl == "g500"){ + my.brks <- c(-300,seq(-200,200,10),300) # % Mean anomaly of a WR + my.brks2 <- c(-300,seq(-200,200,10),300) # % Mean anomaly of a WR for the contour lines + values.to.plot <- c(-200, -100, 0, 100, 200) # values we want to show in the labels + } + + if(psl == "psl"){ + my.brks <- c(seq(-19,-1,2),0,seq(1,19,2)) # % Mean anomaly of a WR + my.brks2 <- my.brks #c(seq(-20,20,2)) # % Mean anomaly of a WR for the contour lines + values.to.plot <- my.brks #c(-17,-15,-13,-11,-9,-7,-5,-3,-1,0,1,3,5,7,9,11,13,15,17) + } + + my.cols <- colorRampPalette(rev(brewer.pal(11,"RdBu")))(length(my.brks)-1) # blue--white--red colors + my.cols[floor(length(my.cols)/2)] <- my.cols[floor(length(my.cols)/2)+1] <- "white" + + # plot the composition with the regime anomalies of each monthly regimes: + png(filename=paste0(rean.dir,"/",rean.name,"_taylor_source",".png"),width=6000,height=2000) + + plot.new() + + # Sheet title: + par(fig=c(0, 1, 0.94, 0.98), new=TRUE) + mtext(paste0(fields.name, " observed monthly regime anomalies"), cex=5.5, font=2) + + n.map <- 0 + for(p in c(9:12,1:8)){ + p.orig <- p + + load(file=paste0(rean.dir,"/",fields.name,"_",my.period[p],"_","psl",".RData")) # load cluster names and summarized data + load(file=paste0(rean.dir,"/",fields.name,"_",my.period[p],"_","ClusterNames",".RData")) # load cluster names and summarized data + + if(var.num != var.num.orig) var.num <- var.num.orig # ripristinate original values of this script (necessary after loading regime data) + if(fields.name != fields.name.orig) fields.name <- fields.name.orig # ripristinate original values of this script + if(p != p.orig) p <- p.orig + + cluster1 <- which(orden == cluster1.name) + cluster2 <- which(orden == cluster2.name) + cluster3 <- which(orden == cluster3.name) + cluster4 <- which(orden == cluster4.name) + + assign(paste0("map",cluster1), pslwr1mean) # NAO+ + assign(paste0("map",cluster2), pslwr2mean) # NAO- + assign(paste0("map",cluster3), pslwr3mean) # Blocking + assign(paste0("map",cluster4), pslwr4mean) # Atl.ridge + + n.map <- n.map + 1 # n.maps starts from 0 + map.width <- 0.07 + map.xpos <- 0.06 + map.width * (n.map - 1) + map.ypos <- 0.08 + map.height <- 0.19 + map.ysep <- 0.015 + + par(fig=c(map.xpos, map.xpos + map.width, map.ypos + 3*map.height + 3*map.ysep, map.ypos + 4*map.height + 3*map.ysep), new=TRUE) + PlotEquiMap2(rescale(map1,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map1), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, xlabel.dist=1.5, contours.lty="F1FF1F") + par(fig=c(map.xpos, map.xpos + map.width, map.ypos + 2*map.height + 2*map.ysep, map.ypos + 3*map.height + 2*map.ysep), new=TRUE) + PlotEquiMap2(rescale(map2,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map2), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F") + par(fig=c(map.xpos, map.xpos + map.width, map.ypos + map.height + map.ysep, map.ypos + 2*map.height + map.ysep), new=TRUE) + PlotEquiMap2(rescale(map3,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map3), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F") + par(fig=c(map.xpos, map.xpos + map.width, map.ypos, map.ypos + map.height), new=TRUE) + PlotEquiMap2(rescale(map4,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map4), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F") + + # Sheet subtitles: + par(fig=c(map.xpos, map.xpos + map.width, 0.86, 0.90), new=TRUE) + mtext(my.month.short[p], cex=5.5, font=2) + + } + + # draw the last map to the right of the composition, that with the seasonal anomalies: + n.map <- n.map + 1 # n.maps starts from 0 + map.width <- 0.07 + map.xpos <- 0.06 + map.width * (n.map - 1) + map.ypos <- 0.08 + map.height <- 0.19 + map.ysep <- 0.015 + + par(fig=c(map.xpos, map.xpos + map.width, map.ypos + 3*map.height + 3*map.ysep, map.ypos + 4*map.height + 3*map.ysep), new=TRUE) + PlotEquiMap2(rescale(map.ref1,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map.ref1), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, xlabel.dist=1.5, contours.lty="F1FF1F") + par(fig=c(map.xpos, map.xpos + map.width, map.ypos + 2*map.height + 2*map.ysep, map.ypos + 3*map.height + 2*map.ysep), new=TRUE) + PlotEquiMap2(rescale(map.ref2,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map.ref2), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F") + par(fig=c(map.xpos, map.xpos + map.width, map.ypos + map.height + map.ysep, map.ypos + 2*map.height + map.ysep), new=TRUE) + PlotEquiMap2(rescale(map.ref3,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map.ref3), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F") + par(fig=c(map.xpos, map.xpos + map.width, map.ypos, map.ypos + map.height), new=TRUE) + PlotEquiMap2(rescale(map.ref4,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map.ref4), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F") + + # subtitle: + par(fig=c(map.xpos, map.xpos + map.width, 0.86, 0.90), new=TRUE) + mtext(my.period[season.ref], cex=5.5, font=2) + + dev.off() + + # Plot the Taylor diagrams: + + corr.first <- corr.second <- rmse.first <- rmse.second <- array(NA,c(4,12)) + + for(regime in 1:4){ + + png(filename=paste0(rean.dir,"/",rean.name,"_taylor_",orden[regime],".png"), width=1200, height=800) + + for(p in 1:12){ + p.orig <- p + + load(file=paste0(rean.dir,"/",rean.name,"_",my.period[p],"_","psl",".RData")) # load cluster names and summarized data + load(file=paste0(rean.dir,"/",rean.name,"_",my.period[p],"_","ClusterNames",".RData")) # load cluster names and summarized data + + if(var.num != var.num.orig) var.num <- var.num.orig # ripristinate original values of this script (necessary after loading regime data) + if(fields.name != fields.name.orig) fields.name <- fields.name.orig # ripristinate original values of this script + if(p != p.orig) p <- p.orig + + cluster1 <- which(orden == cluster1.name) + cluster2 <- which(orden == cluster2.name) + cluster3 <- which(orden == cluster3.name) + cluster4 <- which(orden == cluster4.name) + + assign(paste0("map",cluster1), pslwr1mean) # NAO+ + assign(paste0("map",cluster2), pslwr2mean) # NAO- + assign(paste0("map",cluster3), pslwr3mean) # Blocking + assign(paste0("map",cluster4), pslwr4mean) # Atl.ridge + + # load data from the second classification: + load(file=paste0(rean2.dir,"/",rean2.name,"_",my.period[p],"_","psl",".RData")) # load cluster names and summarized data + load(file=paste0(rean2.dir,"/",rean2.name,"_",my.period[p],"_","ClusterNames",".RData")) # load cluster names and summarized data + + if(var.num != var.num.orig) var.num <- var.num.orig # ripristinate original values of this script (necessary after loading regime data) + if(fields.name != fields.name.orig) fields.name <- fields.name.orig # ripristinate original values of this script + if(p != p.orig) p <- p.orig + + cluster1.old <- which(orden == cluster1.name) + cluster2.old <- which(orden == cluster2.name) + cluster3.old <- which(orden == cluster3.name) + cluster4.old <- which(orden == cluster4.name) + + assign(paste0("map.old",cluster1.old), pslwr1mean) # NAO+ + assign(paste0("map.old",cluster2.old), pslwr2mean) # NAO- + assign(paste0("map.old",cluster3.old), pslwr3mean) # Blocking + assign(paste0("map.old",cluster4.old), pslwr4mean) # Atl.ridge + + add.mod <- ifelse(p == 1, FALSE, TRUE) + main.mod <- ifelse(p == 1, orden[regime],"") + + color.first <- "red" + color.second <- "blue" # "black" + sd.arcs.mod <- c(0,0.1,0.2,0.3,0.4,0.5,0.6,0.7,0.8,0.9,1,1.1,1.2,1.3) #c(0,0.5,1,1.5) # doesn't work! + if(regime == 1) my.taylor(as.vector(map.ref1),as.vector(map1), normalize=TRUE, add=add.mod, pos.cor=FALSE, sd.arcs=sd.arcs.mod, ref.sd=F, cex.axis=1.7, text.cex=1, my.text=my.month.short[p], col = color.first, main=main.mod) + if(regime == 2) my.taylor(as.vector(map.ref2),as.vector(map2), normalize=TRUE, add=add.mod, pos.cor=FALSE, sd.arcs=sd.arcs.mod, ref.sd=F, cex.axis=1.7, text.cex=1, my.text=my.month.short[p], col = color.first, main=main.mod) + if(regime == 3) my.taylor(as.vector(map.ref3),as.vector(map3), normalize=TRUE, add=add.mod, pos.cor=FALSE, sd.arcs=sd.arcs.mod, ref.sd=F, cex.axis=1.7, text.cex=1, my.text=my.month.short[p], col = color.first, main=main.mod) + if(regime == 4) my.taylor(as.vector(map.ref4),as.vector(map4), normalize=TRUE, add=add.mod, pos.cor=FALSE, sd.arcs=sd.arcs.mod, ref.sd=F, cex.axis=1.7, text.cex=1, my.text=my.month.short[p], col = color.first, main=main.mod) + + # add the points from the second classification: + add.mod=TRUE + #add.mod <- ifelse(p == 1, FALSE, TRUE) + + if(regime == 1) my.taylor(as.vector(map.old.ref1),as.vector(map.old1), normalize=TRUE, add=add.mod, pos.cor=FALSE, sd.arcs=sd.arcs.mod, ref.sd=F, cex.axis=1.7, text.cex=1, my.text=my.month.short[p], col = color.second) + if(regime == 2) my.taylor(as.vector(map.old.ref2),as.vector(map.old2), normalize=TRUE, add=add.mod, pos.cor=FALSE, sd.arcs=sd.arcs.mod, ref.sd=F, cex.axis=1.7, text.cex=1, my.text=my.month.short[p], col = color.second) + if(regime == 3) my.taylor(as.vector(map.old.ref3),as.vector(map.old3), normalize=TRUE, add=add.mod, pos.cor=FALSE, sd.arcs=sd.arcs.mod, ref.sd=F, cex.axis=1.7, text.cex=1, my.text=my.month.short[p], col = color.second) + if(regime == 4) my.taylor(as.vector(map.old.ref4),as.vector(map.old4), normalize=TRUE, add=add.mod, pos.cor=FALSE, sd.arcs=sd.arcs.mod, ref.sd=F, cex.axis=1.7, text.cex=1, my.text=my.month.short[p], col = color.second) + + if(regime == 1) { corr.first[1,p] <- cor(as.vector(map.ref1),as.vector(map1)); rmse.first[1,p] <- RMSE(as.vector(map.ref1),as.vector(map1)) } + if(regime == 2) { corr.first[2,p] <- cor(as.vector(map.ref2),as.vector(map2)); rmse.first[2,p] <- RMSE(as.vector(map.ref2),as.vector(map2)) } + if(regime == 3) { corr.first[3,p] <- cor(as.vector(map.ref3),as.vector(map3)); rmse.first[3,p] <- RMSE(as.vector(map.ref3),as.vector(map3)) } + if(regime == 4) { corr.first[4,p] <- cor(as.vector(map.ref4),as.vector(map4)); rmse.first[4,p] <- RMSE(as.vector(map.ref4),as.vector(map4)) } + + if(regime == 1) { corr.second[1,p] <- cor(as.vector(map.old.ref1),as.vector(map.old1)); rmse.second[1,p] <- RMSE(as.vector(map.old.ref1),as.vector(map.old1)) } + if(regime == 2) { corr.second[2,p] <- cor(as.vector(map.old.ref2),as.vector(map.old2)); rmse.second[2,p] <- RMSE(as.vector(map.old.ref2),as.vector(map.old2)) } + if(regime == 3) { corr.second[3,p] <- cor(as.vector(map.old.ref3),as.vector(map.old3)); rmse.second[3,p] <- RMSE(as.vector(map.old.ref3),as.vector(map.old3)) } + if(regime == 4) { corr.second[4,p] <- cor(as.vector(map.old.ref4),as.vector(map.old4)); rmse.second[4,p] <- RMSE(as.vector(map.old.ref4),as.vector(map.old4)) } + + } # close for on 'p' + + dev.off() + + } # close for on 'regime' + + corr.first.mean <- apply(corr.first,1,mean) + corr.second.mean <- apply(corr.second,1,mean) + corr.diff <- corr.second.mean - corr.first.mean + + rmse.first.mean <- apply(rmse.first,1,mean) + rmse.second.mean <- apply(rmse.second,1,mean) + rmse.diff <- rmse.second.mean - rmse.first.mean + +} # close if on composition == "taylor" + + + + + + +# Summary graphs: +if(composition == "summary" && fields.name == forecast.name){ + # array storing correlations in the format: [startdate, leadmonth, regime] + array.diff.sd.freq <- array.sd.freq <- array.cor <- array.sp.cor <- array.freq <- array.diff.freq <- array.rpss <- array.pers <- array.diff.pers <- array(NA,c(12,7,4)) + array.spat.cor <- array.imp <- array.trans.sim1 <- array.trans.sim2 <- array.trans.sim3 <- array.trans.sim4 <- array(NA,c(12,7,4)) + array.trans.diff1 <- array.trans.diff2 <- array.trans.diff3 <- array.trans.diff4 <- array.freq.obs <- array.pers.obs <- array(NA,c(12,7,4)) + array.trans.obs1 <- array.trans.obs2 <- array.trans.obs3 <- array.trans.obs4 <- array(NA,c(12,7,4)) + + for(p in 1:12){ + p.orig <- p + + for(l in 0:6){ + + load(file=paste0(work.dir,"/",fields.name,"_",my.period[p],"_leadmonth_",l,"_","ClusterNames",".RData")) # load cluster names and summarized data + + if(var.num != var.num.orig) var.num <- var.num.orig # ripristinate original values of this script (necessary after loading regime data) + if(fields.name != fields.name.orig) fields.name <- fields.name.orig # ripristinate original values of this script + if(p != p.orig) p <- p.orig + + array.spat.cor[p,1+l, 1] <- spat.cor1 # associates NAO+ to the first triangle (at left) + array.spat.cor[p,1+l, 3] <- spat.cor2 # associates NAO- to the third triangle (at right) + array.spat.cor[p,1+l, 4] <- spat.cor3 # associates Blocking to the fourth triangle (top one) + array.spat.cor[p,1+l, 2] <- spat.cor4 # associates Atl.Ridge to the second triangle (bottom one) + + array.cor[p,1+l, 1] <- sp.cor1 # associates NAO+ to the first triangle (at left) + array.cor[p,1+l, 3] <- sp.cor2 # associates NAO- to the third triangle (at right) + array.cor[p,1+l, 4] <- sp.cor3 # associates Blocking to the fourth triangle (top one) + array.cor[p,1+l, 2] <- sp.cor4 # associates Atl.Ridge to the second triangle (bottom one) + + array.freq[p,1+l, 1] <- 100*(mean(fre1.NA)) # NAO+ + array.freq[p,1+l, 3] <- 100*(mean(fre2.NA)) # NAO- + array.freq[p,1+l, 4] <- 100*(mean(fre3.NA)) # Blocking + array.freq[p,1+l, 2] <- 100*(mean(fre4.NA)) # Atl.Ridge + + array.freq.obs[p,1+l, 1] <- 100*(mean(freObs1)) # NAO+ + array.freq.obs[p,1+l, 3] <- 100*(mean(freObs2)) # NAO- + array.freq.obs[p,1+l, 4] <- 100*(mean(freObs3)) # Blocking + array.freq.obs[p,1+l, 2] <- 100*(mean(freObs4)) # Atl.Ridge + + array.diff.freq[p,1+l, 1] <- 100*(mean(fre1.NA) - mean(freObs1)) # NAO+ + array.diff.freq[p,1+l, 3] <- 100*(mean(fre2.NA) - mean(freObs2)) # NAO- + array.diff.freq[p,1+l, 4] <- 100*(mean(fre3.NA) - mean(freObs3)) # Blocking + array.diff.freq[p,1+l, 2] <- 100*(mean(fre4.NA) - mean(freObs4)) # Atl.Ridge + + array.pers[p,1+l, 1] <- persist1 # NAO+ + array.pers[p,1+l, 3] <- persist2 # NAO- + array.pers[p,1+l, 4] <- persist3 # Blocking + array.pers[p,1+l, 2] <- persist4 # Atl.Ridge + + array.pers.obs[p,1+l, 1] <- persistObs1 # NAO+ + array.pers.obs[p,1+l, 3] <- persistObs2 # NAO- + array.pers.obs[p,1+l, 4] <- persistObs3 # Blocking + array.pers.obs[p,1+l, 2] <- persistObs4 # Atl.Ridge + + array.diff.pers[p,1+l, 1] <- persist1 - persistObs1 # NAO+ + array.diff.pers[p,1+l, 3] <- persist2 - persistObs2 # NAO- + array.diff.pers[p,1+l, 4] <- persist3 - persistObs3 # Blocking + array.diff.pers[p,1+l, 2] <- persist4 - persistObs4 # Atl.Ridge + + array.sd.freq[p,1+l, 1] <- sd.freq.sim1 # NAO+ + array.sd.freq[p,1+l, 3] <- sd.freq.sim2 # NAO- + array.sd.freq[p,1+l, 4] <- sd.freq.sim3 # Blocking + array.sd.freq[p,1+l, 2] <- sd.freq.sim4 # Atl.Ridge + + array.diff.sd.freq[p,1+l, 1] <- sd.freq.sim1 / sd.freq.obs1 # NAO+ + array.diff.sd.freq[p,1+l, 3] <- sd.freq.sim2 / sd.freq.obs2 # NAO- + array.diff.sd.freq[p,1+l, 4] <- sd.freq.sim3 / sd.freq.obs3 # Blocking + array.diff.sd.freq[p,1+l, 2] <- sd.freq.sim4 / sd.freq.obs4 # Atl.Ridge + + array.imp[p,1+l, 1] <- cor.imp1 # NAO+ + array.imp[p,1+l, 3] <- cor.imp2 # NAO- + array.imp[p,1+l, 4] <- cor.imp3 # Blocking + array.imp[p,1+l, 2] <- cor.imp4 # Atl.Ridge + + array.trans.sim1[p,1+l, 1] <- NA # Transition from NAO+ to NAO+ + array.trans.sim1[p,1+l, 3] <- 100*transSim1[2] # Transition from NAO+ to NAO- + array.trans.sim1[p,1+l, 4] <- 100*transSim1[3] # Transition from NAO+ to blocking + array.trans.sim1[p,1+l, 2] <- 100*transSim1[4] # Transition from NAO+ to Atl.Ridge + + array.trans.sim2[p,1+l, 1] <- 100*transSim2[1] # Transition from NAO- to NAO+ + array.trans.sim2[p,1+l, 3] <- NA # Transition from NAO- to NAO- + array.trans.sim2[p,1+l, 4] <- 100*transSim2[3] # Transition from NAO- to blocking + array.trans.sim2[p,1+l, 2] <- 100*transSim2[4] # Transition from NAO- to Atl.Ridge + + array.trans.sim3[p,1+l, 1] <- 100*transSim3[1] # Transition from blocking to NAO+ + array.trans.sim3[p,1+l, 3] <- 100*transSim3[2] # Transition from blocking to NAO- + array.trans.sim3[p,1+l, 4] <- NA # Transition from blocking to blocking + array.trans.sim3[p,1+l, 2] <- 100*transSim3[4] # Transition from blocking to Atl.Ridge + + array.trans.sim4[p,1+l, 1] <- 100*transSim4[1] # Transition from Atl.ridge to NAO+ + array.trans.sim4[p,1+l, 3] <- 100*transSim4[2] # Transition from Atl.ridge to NAO- + array.trans.sim4[p,1+l, 4] <- 100*transSim4[3] # Transition from Atl.ridge to blocking + array.trans.sim4[p,1+l, 2] <- NA # Transition from Atl.ridge to Atl.Ridge + + array.trans.obs1[p,1+l, 1] <- NA # Transition from NAO+ to NAO+ + array.trans.obs1[p,1+l, 3] <- 100*transObs1[2] # Transition from NAO+ to NAO- + array.trans.obs1[p,1+l, 4] <- 100*transObs1[3] # Transition from NAO+ to blocking + array.trans.obs1[p,1+l, 2] <- 100*transObs1[4] # Transition from NAO+ to Atl.Ridge + + array.trans.obs2[p,1+l, 1] <- 100*transObs2[1] # Transition from NAO- to NAO+ + array.trans.obs2[p,1+l, 3] <- NA # Transition from NAO- to NAO- + array.trans.obs2[p,1+l, 4] <- 100*transObs2[3] # Transition from NAO- to blocking + array.trans.obs2[p,1+l, 2] <- 100*transObs2[4] # Transition from NAO- to Atl.Ridge + + array.trans.obs3[p,1+l, 1] <- 100*transObs3[1] # Transition from blocking to NAO+ + array.trans.obs3[p,1+l, 3] <- 100*transObs3[2] # Transition from blocking to NAO- + array.trans.obs3[p,1+l, 4] <- NA # Transition from blocking to blocking + array.trans.obs3[p,1+l, 2] <- 100*transObs3[4] # Transition from blocking to Atl.Ridge + + array.trans.obs4[p,1+l, 1] <- 100*transObs4[1] # Transition from Atl.ridge to NAO+ + array.trans.obs4[p,1+l, 3] <- 100*transObs4[2] # Transition from Atl.ridge to NAO- + array.trans.obs4[p,1+l, 4] <- 100*transObs4[3] # Transition from Atl.ridge to blocking + array.trans.obs4[p,1+l, 2] <- NA # Transition from Atl.ridge to Atl.Ridge + + array.trans.diff1[p,1+l, 1] <- NA # Transition from NAO+ to NAO+ + array.trans.diff1[p,1+l, 3] <- 100*(transSim1[2] - transObs1[2]) # Transition from NAO+ to NAO- + array.trans.diff1[p,1+l, 4] <- 100*(transSim1[3] - transObs1[3]) # Transition from NAO+ to blocking + array.trans.diff1[p,1+l, 2] <- 100*(transSim1[4] - transObs1[4]) # Transition from NAO+ to Atl.Ridge + + array.trans.diff2[p,1+l, 1] <- 100*(transSim2[1] - transObs2[1]) # Transition from NAO+ to NAO+ + array.trans.diff2[p,1+l, 3] <- NA + array.trans.diff2[p,1+l, 4] <- 100*(transSim2[3] - transObs2[3]) # Transition from NAO+ to blocking + array.trans.diff2[p,1+l, 2] <- 100*(transSim2[4] - transObs2[4]) # Transition from NAO+ to Atl.Ridge + + array.trans.diff3[p,1+l, 1] <- 100*(transSim3[1] - transObs3[1]) # Transition from NAO+ to NAO+ + array.trans.diff3[p,1+l, 3] <- 100*(transSim3[2] - transObs3[2]) # Transition from NAO+ to NAO- + array.trans.diff3[p,1+l, 4] <- NA + array.trans.diff3[p,1+l, 2] <- 100*(transSim3[4] - transObs3[4]) # Transition from NAO+ to Atl.Ridge + + array.trans.diff4[p,1+l, 1] <- 100*(transSim4[1] - transObs4[1]) # Transition from NAO+ to NAO+ + array.trans.diff4[p,1+l, 3] <- 100*(transSim4[2] - transObs4[2]) # Transition from NAO+ to NAO- + array.trans.diff4[p,1+l, 4] <- 100*(transSim4[3] - transObs4[3]) # Transition from NAO+ to blocking + array.trans.diff4[p,1+l, 2] <- NA + + #array.rpss[p,1+l, 1] <- # NAO+ + #array.rpss[p,1+l, 3] <- # NAO- + #array.rpss[p,1+l, 4] <- # Blocking + #array.rpss[p,1+l, 2] <- # Atl.Ridge + } + } + + + + # Spatial Corr summary: + png(filename=paste0(work.dir,"/",forecast.name,"_summary_spatial_correlations.png"), width=550, height=400) + + # create an array similar to array.cor but with colors instead of corr.values: + sp.corr.cols <- rev(c('#b2182b','#d6604d','#f4a582','#fddbc7','#d1e5f0','#92c5de','#4393c3','#2166ac')) + my.seq <- c(-1,0,0.4,0.5,0.6,0.7,0.8,0.9,1) + + array.spat.cor.colors <- array(sp.corr.cols[8],c(12,7,4)) + array.spat.cor.colors[array.spat.cor < my.seq[8]] <- sp.corr.cols[7] + array.spat.cor.colors[array.spat.cor < my.seq[7]] <- sp.corr.cols[6] + array.spat.cor.colors[array.spat.cor < my.seq[6]] <- sp.corr.cols[5] + array.spat.cor.colors[array.spat.cor < my.seq[5]] <- sp.corr.cols[4] + array.spat.cor.colors[array.spat.cor < my.seq[4]] <- sp.corr.cols[3] + array.spat.cor.colors[array.spat.cor < my.seq[3]] <- sp.corr.cols[2] + array.spat.cor.colors[array.spat.cor < my.seq[2]] <- sp.corr.cols[1] + + par(mar=c(5,4,4,4.5)) + plot(1,1, type="n", xaxt="n", yaxt="n", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + title("Spatial correlation between S4 and ERA-Interim regime anomalies", cex=1.5) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + + for(p in 1:12){ + for(l in 0:6){ + polygon(c(0.5+p-1, 0.5+p-1, 1+p-1), c(-0.5+l, 0.5+l, 0+l), col=array.spat.cor.colors[p,1+l,1]) # first triangle + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(-0.5+l, 0+l, -0.5+l), col=array.spat.cor.colors[p,1+l,2]) + polygon(c(1+p-1, 1.5+p-1, 1.5+p-1), c(l, 0.5+l, -0.5+l), col=array.spat.cor.colors[p,1+l,3]) + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(0.5+l, 0+l, 0.5+l), col=array.spat.cor.colors[p,1+l,4]) + } + } + + par(fig=c(0.875, 1, 0.14, 0.89), new=TRUE) + ColorBar2(brks = my.seq, cols = sp.corr.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + dev.off() + + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_spatial_correlations_startdate.png"), width=750, height=600) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext(text="Spatial correlation between S4 and ERA-Interim regime anomalies", cex=1.5) + + # NAO+ : + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.85, 0.98), new=TRUE) + mtext("NAO+", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.spat.cor.colors[p,1+l,1])}} + + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.42, 0.5), new=TRUE) + mtext("Blocking", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.spat.cor.colors[p,1+l,4])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.85, 0.98), new=TRUE) + mtext("NAO-", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.spat.cor.colors[p,1+l,3])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.42, 0.5), new=TRUE) + mtext("Atlantic Ridge", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.spat.cor.colors[p,1+l,2])}} + + par(fig=c(0.91, 1, 0.1, 0.9), new=TRUE) + ColorBar2(brks = my.seq, cols = sp.corr.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_spatial_correlations_target_month.png"), width=800, height=300) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext(text="Spatial correlation between S4 and ERA-Interim regime anomalies", cex=1.2) + + par(mar=c(0,0,4,0), fig=c(0.07, 0.22, 0.8, 0.98), new=TRUE) + mtext("NAO+", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0, 0.22, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.6, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.spat.cor.colors[i2,1+6-j,1])}} + + par(mar=c(0,0,4,0), fig=c(0.22, 0.44, 0.8, 0.98), new=TRUE) + mtext("NAO-", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.22, 0.44, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.6, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.spat.cor.colors[i2,1+6-j,3])}} + + par(mar=c(0,0,4,0), fig=c(0.44, 0.66, 0.8, 0.98), new=TRUE) + mtext("Blocking", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.44, 0.66, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.6, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.spat.cor.colors[i2,1+6-j,4])}} + + par(mar=c(0,0,4,0), fig=c(0.68, 0.88, 0.8, 0.98), new=TRUE) + mtext("Atlantic Ridge", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.66, 0.88, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.6, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.spat.cor.colors[i2,1+6-j,2])}} + + par(fig=c(0.91, 1, 0.1, 0.8), new=TRUE) + ColorBar2(brks = my.seq, cols = sp.corr.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + + + + + + + + # Temporal Corr summary: + png(filename=paste0(work.dir,"/",forecast.name,"_summary_temporal_correlations.png"), width=550, height=400) + + # create an array similar to array.cor but with colors instead of corr.values: + corr.cols <- rev(c('#b2182b','#d6604d','#f4a582','#fddbc7','#d1e5f0','#92c5de','#4393c3','#2166ac')) + my.seq <- c(-1,-0.75,-0.5,-0.25,0,0.25,0.5,0.75,1) + + array.cor.colors <- array(corr.cols[8],c(12,7,4)) + array.cor.colors[array.cor < 0.75] <- corr.cols[7] + array.cor.colors[array.cor < 0.5] <- corr.cols[6] + array.cor.colors[array.cor < 0.25] <- corr.cols[5] + array.cor.colors[array.cor < 0] <- corr.cols[4] + array.cor.colors[array.cor < -0.25] <- corr.cols[3] + array.cor.colors[array.cor < -0.5] <- corr.cols[2] + array.cor.colors[array.cor < -0.75] <- corr.cols[1] + + par(mar=c(5,4,4,4.5)) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Forecast time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + title("Correlation between S4 and ERA-Interim frequencies", cex=1.5) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + + for(p in 1:12){ + for(l in 0:6){ + polygon(c(0.5+p-1,0.5+p-1,1+p-1),c(-0.5+l,0.5+l,0+l), col=array.cor.colors[p,1+l,1]) # first triangle + polygon(c(0.5+p-1,1+p-1,1.5+p-1),c(-0.5+l,0+l,-0.5+l), col=array.cor.colors[p,1+l,2]) + polygon(c(1+p-1,1.5+p-1,1.5+p-1),c(l,0.5+l,-0.5+l), col=array.cor.colors[p,1+l,3]) + polygon(c(0.5+p-1,1+p-1,1.5+p-1),c(0.5+l,0+l,0.5+l), col=array.cor.colors[p,1+l,4]) + } + } + + par(fig=c(0.875, 1, 0.14, 0.89), new=TRUE) + ColorBar2(brks = seq(-1,1,0.25), cols = corr.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(seq(-1,1,0.25)), my.labels=seq(-1,1,0.25)) + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_temporal_correlations_startdate.png"), width=750, height=600) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext(text="Correlation between S4 and ERA-Interim frequencies", cex=1.5) + + # NAO+ : + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.85, 0.98), new=TRUE) + mtext("NAO+", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.cor.colors[p,1+l,1])}} + + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.42, 0.5), new=TRUE) + mtext("Blocking", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.cor.colors[p,1+l,4])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.85, 0.98), new=TRUE) + mtext("NAO-", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.cor.colors[p,1+l,3])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.42, 0.5), new=TRUE) + mtext("Atlantic Ridge", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.cor.colors[p,1+l,2])}} + + par(fig=c(0.91, 1, 0.1, 0.9), new=TRUE) + ColorBar2(brks = my.seq, cols = corr.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_temporal_correlations_target_month.png"), width=800, height=300) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext(text="Correlation between S4 and ERA-Interim frequencies", cex=1.2) + + par(mar=c(0,0,4,0), fig=c(0.07, 0.22, 0.8, 0.98), new=TRUE) + mtext("NAO+", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0, 0.22, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.6, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.cor.colors[i2,1+6-j,1])}} + + par(mar=c(0,0,4,0), fig=c(0.22, 0.44, 0.8, 0.98), new=TRUE) + mtext("NAO-", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.22, 0.44, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.6, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.cor.colors[i2,1+6-j,3])}} + + par(mar=c(0,0,4,0), fig=c(0.44, 0.66, 0.8, 0.98), new=TRUE) + mtext("Blocking", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.44, 0.66, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.6, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.cor.colors[i2,1+6-j,4])}} + + par(mar=c(0,0,4,0), fig=c(0.68, 0.88, 0.8, 0.98), new=TRUE) + mtext("Atlantic Ridge", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.66, 0.88, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.6, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.cor.colors[i2,1+6-j,2])}} + + par(fig=c(0.91, 1, 0.1, 0.8), new=TRUE) + ColorBar2(brks = my.seq, cols = corr.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + + + + + + # Frequency summary: + png(filename=paste0(work.dir,"/",forecast.name,"_summary_frequency.png"), width=550, height=400) + + # create an array similar to array.diff.freq but with colors instead of frequencies: + freq.cols <- rev(c('#d73027','#f46d43','#fdae61','#fee090','#e0f3f8','#abd9e9','#74add1','#4575b4')) + my.seq <- c(NA,20,22,24,26,28,30,32,NA) + + array.freq.colors <- array(freq.cols[8],c(12,7,4)) + array.freq.colors[array.freq < my.seq[[8]]] <- freq.cols[7] + array.freq.colors[array.freq < my.seq[[7]]] <- freq.cols[6] + array.freq.colors[array.freq < my.seq[[6]]] <- freq.cols[5] + array.freq.colors[array.freq < my.seq[[5]]] <- freq.cols[4] + array.freq.colors[array.freq < my.seq[[4]]] <- freq.cols[3] + array.freq.colors[array.freq < my.seq[[3]]] <- freq.cols[2] + array.freq.colors[array.freq < my.seq[[2]]] <- freq.cols[1] + + par(mar=c(5,4,4,4.5)) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Forecast time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + title("Mean S4 frequency (%)", cex=1.5) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + + for(p in 1:12){ + for(l in 0:6){ + polygon(c(0.5+p-1, 0.5+p-1, 1+p-1), c(-0.5+l, 0.5+l, 0+l), col=array.freq.colors[p,1+l,1]) # first triangle + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(-0.5+l, 0+l, -0.5+l), col=array.freq.colors[p,1+l,2]) + polygon(c(1+p-1, 1.5+p-1, 1.5+p-1), c(l, 0.5+l, -0.5+l), col=array.freq.colors[p,1+l,3]) + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(0.5+l, 0+l, 0.5+l), col=array.freq.colors[p,1+l,4]) + } + } + + par(fig=c(0.875, 1, 0.14, 0.89), new=TRUE) + ColorBar2(brks = my.seq, cols = freq.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_frequency_startdate.png"), width=750, height=600) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext(text="Mean S4 frequency (%)", cex=1.5) + + # NAO+ : + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.85, 0.98), new=TRUE) + mtext("NAO+", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.freq.colors[p,1+l,1])}} + + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.42, 0.5), new=TRUE) + mtext("Blocking", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.freq.colors[p,1+l,4])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.85, 0.98), new=TRUE) + mtext("NAO-", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.freq.colors[p,1+l,3])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.42, 0.5), new=TRUE) + mtext("Atlantic Ridge", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.freq.colors[p,1+l,2])}} + + par(fig=c(0.91, 1, 0.1, 0.9), new=TRUE) + ColorBar2(brks = my.seq, cols = freq.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_frequency_target_month.png"), width=800, height=300) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext(text="Mean S4 frequency (%)", cex=1.2) + + par(mar=c(0,0,4,0), fig=c(0.07, 0.22, 0.8, 0.98), new=TRUE) + mtext("NAO+", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0, 0.22, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.6, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.freq.colors[i2,1+6-j,1])}} + + par(mar=c(0,0,4,0), fig=c(0.22, 0.44, 0.8, 0.98), new=TRUE) + mtext("NAO-", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.22, 0.44, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.6, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.freq.colors[i2,1+6-j,3])}} + + par(mar=c(0,0,4,0), fig=c(0.44, 0.66, 0.8, 0.98), new=TRUE) + mtext("Blocking", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.44, 0.66, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.6, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.freq.colors[i2,1+6-j,4])}} + + par(mar=c(0,0,4,0), fig=c(0.68, 0.88, 0.8, 0.98), new=TRUE) + mtext("Atlantic Ridge", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.66, 0.88, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.6, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.freq.colors[i2,1+6-j,2])}} + + par(fig=c(0.91, 1, 0.1, 0.8), new=TRUE) + ColorBar2(brks = my.seq, cols = freq.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + par(fig=c(0.91, 1, 0.88, 0.93), new=TRUE) + mtext(side = 1, text = "%", line = 2, cex=1) + dev.off() + + + + + + + + + # Obs. frequency summary: + png(filename=paste0(work.dir,"/",forecast.name,"_summary_frequency_obs.png"), width=550, height=400) + + # create an array similar to array.diff.freq but with colors instead of frequencies: + freq.cols <- rev(c('#d73027','#f46d43','#fdae61','#fee090','#e0f3f8','#abd9e9','#74add1','#4575b4')) + my.seq <- c(NA,20,22,24,26,28,30,32,NA) + + array.freq.colors <- array(freq.cols[8],c(12,7,4)) + array.freq.colors[array.freq.obs < my.seq[[8]]] <- freq.cols[7] + array.freq.colors[array.freq.obs < my.seq[[7]]] <- freq.cols[6] + array.freq.colors[array.freq.obs < my.seq[[6]]] <- freq.cols[5] + array.freq.colors[array.freq.obs < my.seq[[5]]] <- freq.cols[4] + array.freq.colors[array.freq.obs < my.seq[[4]]] <- freq.cols[3] + array.freq.colors[array.freq.obs < my.seq[[3]]] <- freq.cols[2] + array.freq.colors[array.freq.obs < my.seq[[2]]] <- freq.cols[1] + + par(mar=c(5,4,4,4.5)) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Forecast time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + title("Mean observed frequency (%)", cex=1.5) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + + for(p in 1:12){ + for(l in 0:6){ + polygon(c(0.5+p-1, 0.5+p-1, 1+p-1), c(-0.5+l, 0.5+l, 0+l), col=array.freq.colors[p,1+l,1]) # first triangle + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(-0.5+l, 0+l, -0.5+l), col=array.freq.colors[p,1+l,2]) + polygon(c(1+p-1, 1.5+p-1, 1.5+p-1), c(l, 0.5+l, -0.5+l), col=array.freq.colors[p,1+l,3]) + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(0.5+l, 0+l, 0.5+l), col=array.freq.colors[p,1+l,4]) + } + } + + par(fig=c(0.875, 1, 0.14, 0.89), new=TRUE) + ColorBar2(brks = my.seq, cols = freq.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_frequency_obs_startdate.png"), width=750, height=600) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext(text="Mean observed frequency (%)", cex=1.5) + + # NAO+ : + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.85, 0.98), new=TRUE) + mtext("NAO+", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.freq.colors[p,1+l,1])}} + + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.42, 0.5), new=TRUE) + mtext("Blocking", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.freq.colors[p,1+l,4])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.85, 0.98), new=TRUE) + mtext("NAO-", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.freq.colors[p,1+l,3])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.42, 0.5), new=TRUE) + mtext("Atlantic Ridge", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.freq.colors[p,1+l,2])}} + + par(fig=c(0.91, 1, 0.1, 0.9), new=TRUE) + ColorBar2(brks = my.seq, cols = freq.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_frequency_obs_target_month.png"), width=800, height=300) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext(text="Mean observed frequency (%)", cex=1.2) + + par(mar=c(0,0,4,0), fig=c(0.07, 0.22, 0.8, 0.98), new=TRUE) + mtext("NAO+", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0, 0.22, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.6, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.freq.colors[i2,1+6-j,1])}} + + par(mar=c(0,0,4,0), fig=c(0.22, 0.44, 0.8, 0.98), new=TRUE) + mtext("NAO-", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.22, 0.44, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.6, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.freq.colors[i2,1+6-j,3])}} + + par(mar=c(0,0,4,0), fig=c(0.44, 0.66, 0.8, 0.98), new=TRUE) + mtext("Blocking", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.44, 0.66, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.6, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.freq.colors[i2,1+6-j,4])}} + + par(mar=c(0,0,4,0), fig=c(0.68, 0.88, 0.8, 0.98), new=TRUE) + mtext("Atlantic Ridge", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.66, 0.88, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.6, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.freq.colors[i2,1+6-j,2])}} + + par(fig=c(0.91, 1, 0.1, 0.8), new=TRUE) + ColorBar2(brks = my.seq, cols = freq.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + par(fig=c(0.91, 1, 0.88, 0.93), new=TRUE) + mtext(side = 1, text = "%", line = 2, cex=1) + dev.off() + + + + + + + + + + # Bias freq. summary: + png(filename=paste0(work.dir,"/",forecast.name,"_summary_bias_frequency.png"), width=550, height=400) + + # create an array similar to array.diff.freq but with colors instead of frequencies: + diff.freq.cols <- rev(c('#d73027','#f46d43','#fdae61','#fee090','#e0f3f8','#abd9e9','#74add1','#4575b4')) + my.seq <- c(-20,-10,-5,-2,0,2,5,10,20) + + array.diff.freq.colors <- array(diff.freq.cols[8],c(12,7,4)) + array.diff.freq.colors[array.diff.freq < my.seq[[8]]] <- diff.freq.cols[7] + array.diff.freq.colors[array.diff.freq 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.diff.freq.colors[i2,1+6-j,1])}} + + par(mar=c(0,0,4,0), fig=c(0.22, 0.44, 0.8, 0.98), new=TRUE) + mtext("NAO-", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.22, 0.44, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.6, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.diff.freq.colors[i2,1+6-j,3])}} + + par(mar=c(0,0,4,0), fig=c(0.44, 0.66, 0.8, 0.98), new=TRUE) + mtext("Blocking", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.44, 0.66, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.6, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.diff.freq.colors[i2,1+6-j,4])}} + + par(mar=c(0,0,4,0), fig=c(0.68, 0.88, 0.8, 0.98), new=TRUE) + mtext("Atlantic Ridge", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.66, 0.88, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.6, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.diff.freq.colors[i2,1+6-j,2])}} + + par(fig=c(0.91, 1, 0.1, 0.8), new=TRUE) + ColorBar2(brks = my.seq, cols = diff.freq.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + par(fig=c(0.91, 1, 0.88, 0.93), new=TRUE) + mtext(side = 1, text = "%", line = 2, cex=1) + dev.off() + + + + + + + + + # Simulated persistence summary: + png(filename=paste0(work.dir,"/",forecast.name,"_summary_persistence.png"), width=550, height=400) + + # create an array similar to array.pers but with colors instead of frequencies: + pers.cols <- rev(c('#b2182b','#d6604d','#f4a582','#fddbc7','#d1e5f0','#92c5de','#4393c3','#2166ac')) + my.seq <- c(NA, 3.25, 3.5, 3.75, 4, 4.25, 4.5, 4.75, NA) + + array.pers.colors <- array(pers.cols[8],c(12,7,4)) + array.pers.colors[array.pers < my.seq[8]] <- pers.cols[7] + array.pers.colors[array.pers < my.seq[7]] <- pers.cols[6] + array.pers.colors[array.pers < my.seq[6]] <- pers.cols[5] + array.pers.colors[array.pers < my.seq[5]] <- pers.cols[4] + array.pers.colors[array.pers < my.seq[4]] <- pers.cols[3] + array.pers.colors[array.pers < my.seq[3]] <- pers.cols[2] + array.pers.colors[array.pers < my.seq[2]] <- pers.cols[1] + + par(mar=c(5,4,4,4.5)) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Forecast time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + title("ECMWF-S4 Regime persistence (in days/month)", cex=1.5) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + + for(p in 1:12){ + for(l in 0:6){ + polygon(c(0.5+p-1,0.5+p-1,1+p-1),c(-0.5+l,0.5+l,0+l), col=array.pers.colors[p,1+l,1]) # first triangle + polygon(c(0.5+p-1,1+p-1,1.5+p-1),c(-0.5+l,0+l,-0.5+l), col=array.pers.colors[p,1+l,2]) + polygon(c(1+p-1,1.5+p-1,1.5+p-1),c(l,0.5+l,-0.5+l), col=array.pers.colors[p,1+l,3]) + polygon(c(0.5+p-1,1+p-1,1.5+p-1),c(0.5+l,0+l,0.5+l), col=array.pers.colors[p,1+l,4]) + } + } + + par(fig=c(0.875, 1, 0.14, 0.89), new=TRUE) + ColorBar2(brks = my.seq, cols = pers.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_persistence_startdate.png"), width=750, height=600) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("ECMWF-S4 Regime persistence (in days/month)", cex=1.5) + + # NAO+ : + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.85, 0.98), new=TRUE) + mtext("NAO+", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.pers.colors[p,1+l,1])}} + + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.42, 0.5), new=TRUE) + mtext("Blocking", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.pers.colors[p,1+l,4])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.85, 0.98), new=TRUE) + mtext("NAO-", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.pers.colors[p,1+l,3])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.42, 0.5), new=TRUE) + mtext("Atlantic Ridge", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.pers.colors[p,1+l,2])}} + + par(fig=c(0.91, 1, 0.1, 0.9), new=TRUE) + ColorBar2(brks = my.seq, cols = pers.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_persistence_target_month.png"), width=800, height=300) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("ECMWF-S4 Regime persistence (in days/month)", cex=1.2) + + par(mar=c(0,0,4,0), fig=c(0.07, 0.22, 0.8, 0.98), new=TRUE) + mtext("NAO+", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0, 0.22, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.6, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.pers.colors[i2,1+6-j,1])}} + + par(mar=c(0,0,4,0), fig=c(0.22, 0.44, 0.8, 0.98), new=TRUE) + mtext("NAO-", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.22, 0.44, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.6, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.pers.colors[i2,1+6-j,3])}} + + par(mar=c(0,0,4,0), fig=c(0.44, 0.66, 0.8, 0.98), new=TRUE) + mtext("Blocking", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.44, 0.66, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.6, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.pers.colors[i2,1+6-j,4])}} + + par(mar=c(0,0,4,0), fig=c(0.68, 0.88, 0.8, 0.98), new=TRUE) + mtext("Atlantic Ridge", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.66, 0.88, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.6, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.pers.colors[i2,1+6-j,2])}} + + par(fig=c(0.91, 1, 0.1, 0.8), new=TRUE) + ColorBar2(brks = my.seq, cols = pers.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + par(fig=c(0.9, 1, 0.88, 0.93), new=TRUE) + mtext(side = 1, text = "days/month", line = 2, cex=1) + dev.off() + + + + + + + + # Observed persistence summary: + png(filename=paste0(work.dir,"/",forecast.name,"_summary_persistence_obs.png"), width=550, height=400) + + # create an array similar to array.pers but with colors instead of frequencies: + pers.cols <- rev(c('#b2182b','#d6604d','#f4a582','#fddbc7','#d1e5f0','#92c5de','#4393c3','#2166ac')) + my.seq <- c(NA, 3.25, 3.5, 3.75, 4, 4.25, 4.5, 4.75, NA) + + array.pers.colors <- array(pers.cols[8],c(12,7,4)) + array.pers.colors[array.pers.obs < my.seq[8]] <- pers.cols[7] + array.pers.colors[array.pers.obs < my.seq[7]] <- pers.cols[6] + array.pers.colors[array.pers.obs < my.seq[6]] <- pers.cols[5] + array.pers.colors[array.pers.obs < my.seq[5]] <- pers.cols[4] + array.pers.colors[array.pers.obs < my.seq[4]] <- pers.cols[3] + array.pers.colors[array.pers.obs < my.seq[3]] <- pers.cols[2] + array.pers.colors[array.pers.obs < my.seq[2]] <- pers.cols[1] + + par(mar=c(5,4,4,4.5)) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Forecast time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + title("Observed regime persistence (in days/month)", cex=1.5) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + + for(p in 1:12){ + for(l in 0:6){ + polygon(c(0.5+p-1,0.5+p-1,1+p-1),c(-0.5+l,0.5+l,0+l), col=array.pers.colors[p,1+l,1]) # first triangle + polygon(c(0.5+p-1,1+p-1,1.5+p-1),c(-0.5+l,0+l,-0.5+l), col=array.pers.colors[p,1+l,2]) + polygon(c(1+p-1,1.5+p-1,1.5+p-1),c(l,0.5+l,-0.5+l), col=array.pers.colors[p,1+l,3]) + polygon(c(0.5+p-1,1+p-1,1.5+p-1),c(0.5+l,0+l,0.5+l), col=array.pers.colors[p,1+l,4]) + } + } + + par(fig=c(0.875, 1, 0.14, 0.89), new=TRUE) + ColorBar2(brks = my.seq, cols = pers.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_persistence_obs_startdate.png"), width=750, height=600) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("Observed Regime persistence (in days/month)", cex=1.5) + + # NAO+ : + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.85, 0.98), new=TRUE) + mtext("NAO+", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.pers.colors[p,1+l,1])}} + + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.42, 0.5), new=TRUE) + mtext("Blocking", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.pers.colors[p,1+l,4])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.85, 0.98), new=TRUE) + mtext("NAO-", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.pers.colors[p,1+l,3])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.42, 0.5), new=TRUE) + mtext("Atlantic Ridge", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.pers.colors[p,1+l,2])}} + + par(fig=c(0.91, 1, 0.1, 0.9), new=TRUE) + ColorBar2(brks = my.seq, cols = pers.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_persistence_obs_target_month.png"), width=800, height=300) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("Observed regime persistence (in days/month)", cex=1.2) + + par(mar=c(0,0,4,0), fig=c(0.07, 0.22, 0.8, 0.98), new=TRUE) + mtext("NAO+", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0, 0.22, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.6, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.pers.colors[i2,1+6-j,1])}} + + par(mar=c(0,0,4,0), fig=c(0.22, 0.44, 0.8, 0.98), new=TRUE) + mtext("NAO-", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.22, 0.44, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.6, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.pers.colors[i2,1+6-j,3])}} + + par(mar=c(0,0,4,0), fig=c(0.44, 0.66, 0.8, 0.98), new=TRUE) + mtext("Blocking", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.44, 0.66, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.6, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.pers.colors[i2,1+6-j,4])}} + + par(mar=c(0,0,4,0), fig=c(0.68, 0.88, 0.8, 0.98), new=TRUE) + mtext("Atlantic Ridge", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.66, 0.88, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.6, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.pers.colors[i2,1+6-j,2])}} + + par(fig=c(0.91, 1, 0.1, 0.8), new=TRUE) + ColorBar2(brks = my.seq, cols = pers.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + par(fig=c(0.9, 1, 0.88, 0.93), new=TRUE) + mtext(side = 1, text = "days/month", line = 2, cex=1) + dev.off() + + + + + + + + + + + + + # Bias persistence summary: + png(filename=paste0(work.dir,"/",forecast.name,"_summary_bias_persistence.png"), width=550, height=400) + + # create an array similar to array.pers but with colors instead of frequencies: + diff.pers.cols <- rev(c('#b2182b','#d6604d','#f4a582','#fddbc7','#d1e5f0','#92c5de','#4393c3','#2166ac')) + my.seq <- c(NA, -1.5, -1, -0.5, 0, 0.5, 1, 1.5, NA) + + array.diff.pers.colors <- array(diff.pers.cols[8],c(12,7,4)) + array.diff.pers.colors[array.diff.pers < my.seq[8]] <- diff.pers.cols[7] + array.diff.pers.colors[array.diff.pers < my.seq[7]] <- diff.pers.cols[6] + array.diff.pers.colors[array.diff.pers < my.seq[6]] <- diff.pers.cols[5] + array.diff.pers.colors[array.diff.pers < my.seq[5]] <- diff.pers.cols[4] + array.diff.pers.colors[array.diff.pers < my.seq[4]] <- diff.pers.cols[3] + array.diff.pers.colors[array.diff.pers < my.seq[3]] <- diff.pers.cols[2] + array.diff.pers.colors[array.diff.pers < my.seq[2]] <- diff.pers.cols[1] + + par(mar=c(5,4,4,4.5)) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Forecast time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + title("Diff. between S4 and ERA-Interim regime persistence (in days/month)", cex=1.5) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + + for(p in 1:12){ + for(l in 0:6){ + polygon(c(0.5+p-1,0.5+p-1,1+p-1),c(-0.5+l,0.5+l,0+l), col=array.diff.pers.colors[p,1+l,1]) # first triangle + polygon(c(0.5+p-1,1+p-1,1.5+p-1),c(-0.5+l,0+l,-0.5+l), col=array.diff.pers.colors[p,1+l,2]) + polygon(c(1+p-1,1.5+p-1,1.5+p-1),c(l,0.5+l,-0.5+l), col=array.diff.pers.colors[p,1+l,3]) + polygon(c(0.5+p-1,1+p-1,1.5+p-1),c(0.5+l,0+l,0.5+l), col=array.diff.pers.colors[p,1+l,4]) + } + } + + par(fig=c(0.875, 1, 0.14, 0.89), new=TRUE) + ColorBar2(brks = my.seq, cols = diff.pers.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_bias_persistence_startdate.png"), width=750, height=600) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("Diff. between S4 and ERA-Interim regime persistence (in days/month)", cex=1.5) + + # NAO+ : + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.85, 0.98), new=TRUE) + mtext("NAO+", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.diff.pers.colors[p,1+l,1])}} + + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.42, 0.5), new=TRUE) + mtext("Blocking", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.diff.pers.colors[p,1+l,4])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.85, 0.98), new=TRUE) + mtext("NAO-", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.diff.pers.colors[p,1+l,3])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.42, 0.5), new=TRUE) + mtext("Atlantic Ridge", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.diff.pers.colors[p,1+l,2])}} + + par(fig=c(0.91, 1, 0.1, 0.9), new=TRUE) + ColorBar2(brks = my.seq, cols = diff.pers.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_bias_persistence_target_month.png"), width=800, height=300) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("Diff. between S4 and ERA-Interim regime persistence (in days/month)", cex=1.2) + + par(mar=c(0,0,4,0), fig=c(0.07, 0.22, 0.8, 0.98), new=TRUE) + mtext("NAO+", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0, 0.22, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.6, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.diff.pers.colors[i2,1+6-j,1])}} + + par(mar=c(0,0,4,0), fig=c(0.22, 0.44, 0.8, 0.98), new=TRUE) + mtext("NAO-", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.22, 0.44, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.6, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.diff.pers.colors[i2,1+6-j,3])}} + + par(mar=c(0,0,4,0), fig=c(0.44, 0.66, 0.8, 0.98), new=TRUE) + mtext("Blocking", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.44, 0.66, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.6, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.diff.pers.colors[i2,1+6-j,4])}} + + par(mar=c(0,0,4,0), fig=c(0.68, 0.88, 0.8, 0.98), new=TRUE) + mtext("Atlantic Ridge", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.66, 0.88, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.6, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.diff.pers.colors[i2,1+6-j,2])}} + + par(fig=c(0.91, 1, 0.1, 0.8), new=TRUE) + ColorBar2(brks = my.seq, cols = diff.pers.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + par(fig=c(0.9, 1, 0.88, 0.93), new=TRUE) + mtext(side = 1, text = "days/month", line = 2, cex=1) + dev.off() + + + + + + + + + + # St.Dev.freq ratio summary: + png(filename=paste0(work.dir,"/",forecast.name,"_summary_ratio_sd_freq.png"), width=550, height=400) + + # create an array similar to array.cor but with colors instead of corr.values: + diff.sd.freq.cols <- rev(c('#b2182b','#d6604d','#f4a582','#fddbc7','#d1e5f0','#92c5de','#4393c3','#2166ac')) + my.seq <- c(0,0.7,0.8,0.9,1.0,1.1,1.2,1.3,2) + + array.diff.sd.freq.colors <- array(diff.sd.freq.cols[8],c(12,7,4)) + array.diff.sd.freq.colors[array.diff.sd.freq < my.seq[8]] <- diff.sd.freq.cols[7] + array.diff.sd.freq.colors[array.diff.sd.freq < my.seq[7]] <- diff.sd.freq.cols[6] + array.diff.sd.freq.colors[array.diff.sd.freq < my.seq[6]] <- diff.sd.freq.cols[5] + array.diff.sd.freq.colors[array.diff.sd.freq < my.seq[5]] <- diff.sd.freq.cols[4] + array.diff.sd.freq.colors[array.diff.sd.freq < my.seq[4]] <- diff.sd.freq.cols[3] + array.diff.sd.freq.colors[array.diff.sd.freq < my.seq[3]] <- diff.sd.freq.cols[2] + array.diff.sd.freq.colors[array.diff.sd.freq < my.seq[2]] <- diff.sd.freq.cols[1] + + par(mar=c(5,4,4,4.5)) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Forecast time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + title("St.Dev. ratio between sim. and obs. average frequencies", cex=1.5) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + + for(p in 1:12){ + for(l in 0:6){ + polygon(c(0.5+p-1, 0.5+p-1, 1+p-1), c(-0.5+l, 0.5+l, 0+l), col=array.diff.sd.freq.colors[p,1+l,1]) # first triangle + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(-0.5+l, 0+l, -0.5+l), col=array.diff.sd.freq.colors[p,1+l,2]) + polygon(c(1+p-1, 1.5+p-1, 1.5+p-1), c(l, 0.5+l, -0.5+l), col=array.diff.sd.freq.colors[p,1+l,3]) + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(0.5+l, 0+l, 0.5+l), col=array.diff.sd.freq.colors[p,1+l,4]) + } + } + + par(fig=c(0.875, 1, 0.14, 0.89), new=TRUE) + ColorBar2(brks = my.seq, cols = diff.sd.freq.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + dev.off() + + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_ratio_sd_frequency_startdate.png"), width=750, height=600) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("St.Dev. ratio between sim. and obs. average frequencies", cex=1.5) + + # NAO+ : + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.85, 0.98), new=TRUE) + mtext("NAO+", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.diff.sd.freq.colors[p,1+l,1])}} + + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.42, 0.5), new=TRUE) + mtext("Blocking", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.diff.sd.freq.colors[p,1+l,4])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.85, 0.98), new=TRUE) + mtext("NAO-", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.diff.sd.freq.colors[p,1+l,3])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.42, 0.5), new=TRUE) + mtext("Atlantic Ridge", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.diff.sd.freq.colors[p,1+l,2])}} + + par(fig=c(0.91, 1, 0.1, 0.9), new=TRUE) + ColorBar2(brks = my.seq, cols = diff.sd.freq.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_ratio_sd_frequency_target_month.png"), width=800, height=300) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("St.Dev. ratio between sim. and obs. average frequencies", cex=1.2) + + par(mar=c(0,0,4,0), fig=c(0.07, 0.22, 0.8, 0.98), new=TRUE) + mtext("NAO+", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0, 0.22, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.6, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.diff.sd.freq.colors[i2,1+6-j,1])}} + + par(mar=c(0,0,4,0), fig=c(0.22, 0.44, 0.8, 0.98), new=TRUE) + mtext("NAO-", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.22, 0.44, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.6, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.diff.sd.freq.colors[i2,1+6-j,3])}} + + par(mar=c(0,0,4,0), fig=c(0.44, 0.66, 0.8, 0.98), new=TRUE) + mtext("Blocking", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.44, 0.66, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.6, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.diff.sd.freq.colors[i2,1+6-j,4])}} + + par(mar=c(0,0,4,0), fig=c(0.68, 0.88, 0.8, 0.98), new=TRUE) + mtext("Atlantic Ridge", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.66, 0.88, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.6, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.diff.sd.freq.colors[i2,1+6-j,2])}} + + par(fig=c(0.91, 1, 0.1, 0.8), new=TRUE) + ColorBar2(brks = my.seq, cols = diff.sd.freq.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + + + + + + + + + + + + + # Impact summary: + png(filename=paste0(work.dir,"/",forecast.name,"_summary_impact_",var.name[var.num],".png"), width=550, height=400) + + # create an array similar to array.pers but with colors instead of frequencies: + imp.cols <- rev(c('#b2182b','#d6604d','#f4a582','#fddbc7','#d1e5f0','#92c5de','#4393c3','#2166ac')) + my.seq <- c(-1,0,0.4,0.5,0.6,0.7,0.8,0.9,1) + + array.imp.colors <- array(imp.cols[8],c(12,7,4)) + array.imp.colors[array.imp < my.seq[8]] <- imp.cols[7] + array.imp.colors[array.imp < my.seq[7]] <- imp.cols[6] + array.imp.colors[array.imp < my.seq[6]] <- imp.cols[5] + array.imp.colors[array.imp < my.seq[5]] <- imp.cols[4] + array.imp.colors[array.imp < my.seq[4]] <- imp.cols[3] + array.imp.colors[array.imp < my.seq[3]] <- imp.cols[2] + array.imp.colors[array.imp < my.seq[2]] <- imp.cols[1] + + par(mar=c(5,4,4,4.5)) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Forecast time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + title(paste0("S4 spatial correlation of impact on ",var.name[var.num]), cex=1.5) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + + for(p in 1:12){ + for(l in 0:6){ + polygon(c(0.5+p-1,0.5+p-1,1+p-1),c(-0.5+l,0.5+l,0+l), col=array.imp.colors[p,1+l,1]) # first triangle + polygon(c(0.5+p-1,1+p-1,1.5+p-1),c(-0.5+l,0+l,-0.5+l), col=array.imp.colors[p,1+l,2]) + polygon(c(1+p-1,1.5+p-1,1.5+p-1),c(l,0.5+l,-0.5+l), col=array.imp.colors[p,1+l,3]) + polygon(c(0.5+p-1,1+p-1,1.5+p-1),c(0.5+l,0+l,0.5+l), col=array.imp.colors[p,1+l,4]) + } + } + + par(fig=c(0.875, 1, 0.14, 0.89), new=TRUE) + ColorBar2(brks = my.seq, cols = imp.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_impact_",var.name[var.num],"_startdate.png"), width=750, height=600) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("S4 spatial correlation of impact on ",var.name[var.num], cex=1.5) + + # NAO+ : + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.85, 0.98), new=TRUE) + mtext("NAO+", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.imp.colors[p,1+l,1])}} + + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.42, 0.5), new=TRUE) + mtext("Blocking", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.imp.colors[p,1+l,4])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.85, 0.98), new=TRUE) + mtext("NAO-", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.imp.colors[p,1+l,3])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.42, 0.5), new=TRUE) + mtext("Atlantic Ridge", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.imp.colors[p,1+l,2])}} + + par(fig=c(0.91, 1, 0.1, 0.9), new=TRUE) + ColorBar2(brks = my.seq, cols = imp.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_impact_",var.name[var.num],"_target_month.png"), width=800, height=300) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("S4 spatial correlation of impact on ",var.name[var.num], cex=1.2) + + par(mar=c(0,0,4,0), fig=c(0.07, 0.22, 0.8, 0.98), new=TRUE) + mtext("NAO+", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0, 0.22, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.6, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.imp.colors[i2,1+6-j,1])}} + + par(mar=c(0,0,4,0), fig=c(0.22, 0.44, 0.8, 0.98), new=TRUE) + mtext("NAO-", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.22, 0.44, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.6, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.imp.colors[i2,1+6-j,3])}} + + par(mar=c(0,0,4,0), fig=c(0.44, 0.66, 0.8, 0.98), new=TRUE) + mtext("Blocking", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.44, 0.66, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.6, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.imp.colors[i2,1+6-j,4])}} + + par(mar=c(0,0,4,0), fig=c(0.68, 0.88, 0.8, 0.98), new=TRUE) + mtext("Atlantic Ridge", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.66, 0.88, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.6, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.imp.colors[i2,1+6-j,2])}} + + par(fig=c(0.91, 1, 0.1, 0.8), new=TRUE) + ColorBar2(brks = my.seq, cols = imp.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + + + + + + + # Simulated Transition probability from NAO+ to X summary: + png(filename=paste0(work.dir,"/",forecast.name,"_summary_transition_prob_NAO+.png"), width=550, height=400) + + # create an array similar to array.cor but with colors instead of corr.values: + trans.cols <- rev(c('#b2182b','#d6604d','#f4a582','#fddbc7','#d1e5f0','#92c5de','#4393c3','#2166ac')) + my.seq <- c(0,10,20,30,40,50,60,70,100) + + array.trans.sim1.colors <- array(trans.cols[8],c(12,7,4)) + array.trans.sim1.colors[array.trans.sim1 < my.seq[8]] <- trans.cols[7] + array.trans.sim1.colors[array.trans.sim1 < my.seq[7]] <- trans.cols[6] + array.trans.sim1.colors[array.trans.sim1 < my.seq[6]] <- trans.cols[5] + array.trans.sim1.colors[array.trans.sim1 < my.seq[5]] <- trans.cols[4] + array.trans.sim1.colors[array.trans.sim1 < my.seq[4]] <- trans.cols[3] + array.trans.sim1.colors[array.trans.sim1 < my.seq[3]] <- trans.cols[2] + array.trans.sim1.colors[array.trans.sim1 < my.seq[2]] <- trans.cols[1] + array.trans.sim1.colors[is.na(array.trans.sim1)] <- "white" + + par(mar=c(5,4,4,4.5)) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Forecast time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + title("% ECMWF-S4 transition from NAO+ regime", cex=1.5) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + + for(p in 1:12){ + for(l in 0:6){ + polygon(c(0.5+p-1, 0.5+p-1, 1+p-1), c(-0.5+l, 0.5+l, 0+l), col=array.trans.sim1.colors[p,1+l,1]) # first triangle + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(-0.5+l, 0+l, -0.5+l), col=array.trans.sim1.colors[p,1+l,2]) + polygon(c(1+p-1, 1.5+p-1, 1.5+p-1), c(l, 0.5+l, -0.5+l), col=array.trans.sim1.colors[p,1+l,3]) + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(0.5+l, 0+l, 0.5+l), col=array.trans.sim1.colors[p,1+l,4]) + } + } + + par(fig=c(0.875, 1, 0.14, 0.89), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_transition_prob_NAO+_startdate.png"), width=750, height=600) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("% Transition from NAO+ regime", cex=1.5) + mtext("ECMWF-S4 / NAO+ Transition Probability \nJanuary to December / 1994-2013", cex=1.5) + + # NAO+ : + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.85, 0.98), new=TRUE) + mtext("NAO+", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.sim1.colors[p,1+l,1])}} + + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.42, 0.5), new=TRUE) + mtext("Blocking", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.sim1.colors[p,1+l,4])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.85, 0.98), new=TRUE) + mtext("NAO-", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.sim1.colors[p,1+l,3])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.42, 0.5), new=TRUE) + mtext("Atlantic Ridge", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.sim1.colors[p,1+l,2])}} + + par(fig=c(0.91, 1, 0.1, 0.9), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_transition_prob_NAO+_target_month.png"), width=750, height=350) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 0.95), new=TRUE) + mtext("% Transition from NAO+ regime", cex=1.2) + + par(mar=c(0,0,4,0), fig=c(0.10, 0.28, 0.8, 0.95), new=TRUE) + mtext("NAO-", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0, 0.28, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.8, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.sim1.colors[i2,1+6-j,3])}} + + par(mar=c(0,0,4,0), fig=c(0.30, 0.58, 0.8, 0.95), new=TRUE) + mtext("Blocking", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.30, 0.58, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.8, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.sim1.colors[i2,1+6-j,4])}} + + par(mar=c(0,0,4,0), fig=c(0.60, 0.88, 0.8, 0.95), new=TRUE) + mtext("Atlantic Ridge", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.60, 0.88, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.8, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.sim1.colors[i2,1+6-j,2])}} + + par(fig=c(0.91, 1, 0.1, 0.8), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + par(fig=c(0.91, 1, 0.88, 0.93), new=TRUE) + mtext(side = 1, text = "%", line = 2, cex=1) + + dev.off() + + + + + + + # Observed Transition probability from NAO+ to X summary: + png(filename=paste0(work.dir,"/",forecast.name,"_summary_transition_prob_NAO+_obs.png"), width=550, height=400) + + # create an array similar to array.cor but with colors instead of corr.values: + trans.cols <- rev(c('#b2182b','#d6604d','#f4a582','#fddbc7','#d1e5f0','#92c5de','#4393c3','#2166ac')) + my.seq <- c(0,10,20,30,40,50,60,70,100) + + array.trans.obs1.colors <- array(trans.cols[8],c(12,7,4)) + array.trans.obs1.colors[array.trans.obs1 < my.seq[8]] <- trans.cols[7] + array.trans.obs1.colors[array.trans.obs1 < my.seq[7]] <- trans.cols[6] + array.trans.obs1.colors[array.trans.obs1 < my.seq[6]] <- trans.cols[5] + array.trans.obs1.colors[array.trans.obs1 < my.seq[5]] <- trans.cols[4] + array.trans.obs1.colors[array.trans.obs1 < my.seq[4]] <- trans.cols[3] + array.trans.obs1.colors[array.trans.obs1 < my.seq[3]] <- trans.cols[2] + array.trans.obs1.colors[array.trans.obs1 < my.seq[2]] <- trans.cols[1] + array.trans.obs1.colors[is.na(array.trans.obs1)] <- "white" + + par(mar=c(5,4,4,4.5)) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Forecast time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + title("% Observed transition from NAO+ regime", cex=1.5) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + + for(p in 1:12){ + for(l in 0:6){ + polygon(c(0.5+p-1, 0.5+p-1, 1+p-1), c(-0.5+l, 0.5+l, 0+l), col=array.trans.obs1.colors[p,1+l,1]) # first triangle + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(-0.5+l, 0+l, -0.5+l), col=array.trans.obs1.colors[p,1+l,2]) + polygon(c(1+p-1, 1.5+p-1, 1.5+p-1), c(l, 0.5+l, -0.5+l), col=array.trans.obs1.colors[p,1+l,3]) + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(0.5+l, 0+l, 0.5+l), col=array.trans.obs1.colors[p,1+l,4]) + } + } + + par(fig=c(0.875, 1, 0.14, 0.89), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_transition_prob_NAO+_obs_startdate.png"), width=750, height=600) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("% Observed transition from NAO+ regime", cex=1.5) + + # NAO+ : + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.85, 0.98), new=TRUE) + mtext("NAO+", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.obs1.colors[p,1+l,1])}} + + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.42, 0.5), new=TRUE) + mtext("Blocking", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.obs1.colors[p,1+l,4])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.85, 0.98), new=TRUE) + mtext("NAO-", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.obs1.colors[p,1+l,3])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.42, 0.5), new=TRUE) + mtext("Atlantic Ridge", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.obs1.colors[p,1+l,2])}} + + par(fig=c(0.91, 1, 0.1, 0.9), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_transition_prob_NAO+_obs_target_month.png"), width=750, height=350) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 0.95), new=TRUE) + mtext("% Observed transition from NAO+ regime", cex=1.2) + + par(mar=c(0,0,4,0), fig=c(0.10, 0.28, 0.8, 0.95), new=TRUE) + mtext("NAO-", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0, 0.28, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.8, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.obs1.colors[i2,1+6-j,3])}} + + par(mar=c(0,0,4,0), fig=c(0.30, 0.58, 0.8, 0.95), new=TRUE) + mtext("Blocking", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.30, 0.58, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.8, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.obs1.colors[i2,1+6-j,4])}} + + par(mar=c(0,0,4,0), fig=c(0.60, 0.88, 0.8, 0.95), new=TRUE) + mtext("Atlantic Ridge", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.60, 0.88, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.8, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.obs1.colors[i2,1+6-j,2])}} + + par(fig=c(0.91, 1, 0.1, 0.8), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + par(fig=c(0.91, 1, 0.88, 0.93), new=TRUE) + mtext(side = 1, text = "%", line = 2, cex=1) + + dev.off() + + + + + + + + + + + + # Simulated transition probability from NAO- to X summary: + png(filename=paste0(work.dir,"/",forecast.name,"_summary_transition_prob_NAO-.png"), width=550, height=400) + + # create an array similar to array.cor but with colors instead of corr.values: + trans.cols <- rev(c('#b2182b','#d6604d','#f4a582','#fddbc7','#d1e5f0','#92c5de','#4393c3','#2166ac')) + my.seq <- c(0,10,20,30,40,50,60,70,100) + + array.trans.sim2.colors <- array(trans.cols[8],c(12,7,4)) + array.trans.sim2.colors[array.trans.sim2 < my.seq[8]] <- trans.cols[7] + array.trans.sim2.colors[array.trans.sim2 < my.seq[7]] <- trans.cols[6] + array.trans.sim2.colors[array.trans.sim2 < my.seq[6]] <- trans.cols[5] + array.trans.sim2.colors[array.trans.sim2 < my.seq[5]] <- trans.cols[4] + array.trans.sim2.colors[array.trans.sim2 < my.seq[4]] <- trans.cols[3] + array.trans.sim2.colors[array.trans.sim2 < my.seq[3]] <- trans.cols[2] + array.trans.sim2.colors[array.trans.sim2 < my.seq[2]] <- trans.cols[1] + array.trans.sim2.colors[is.na(array.trans.sim2)] <- "white" + + par(mar=c(5,4,4,4.5)) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Forecast time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + title("% ECMWF-S4 transition from NAO- regime ", cex=1.5) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + + for(p in 1:12){ + for(l in 0:6){ + polygon(c(0.5+p-1, 0.5+p-1, 1+p-1), c(-0.5+l, 0.5+l, 0+l), col=array.trans.sim2.colors[p,1+l,1]) # first triangle + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(-0.5+l, 0+l, -0.5+l), col=array.trans.sim2.colors[p,1+l,2]) + polygon(c(1+p-1, 1.5+p-1, 1.5+p-1), c(l, 0.5+l, -0.5+l), col=array.trans.sim2.colors[p,1+l,3]) + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(0.5+l, 0+l, 0.5+l), col=array.trans.sim2.colors[p,1+l,4]) + } + } + + par(fig=c(0.875, 1, 0.14, 0.89), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_transition_prob_NAO-_startdate.png"), width=750, height=600) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("% ECMWF-S4 transition from NAO- regime", cex=1.5) + + # NAO+ : + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.85, 0.98), new=TRUE) + mtext("NAO+", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.sim2.colors[p,1+l,1])}} + + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.42, 0.5), new=TRUE) + mtext("Blocking", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.sim2.colors[p,1+l,4])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.85, 0.98), new=TRUE) + mtext("NAO-", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.sim2.colors[p,1+l,3])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.42, 0.5), new=TRUE) + mtext("Atlantic Ridge", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.sim2.colors[p,1+l,2])}} + + par(fig=c(0.91, 1, 0.1, 0.9), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_transition_prob_NAO-_target_month.png"), width=750, height=350) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 0.95), new=TRUE) + mtext("% ECMWF-S4 transition from NAO- regime", cex=1.2) + + par(mar=c(0,0,4,0), fig=c(0.1, 0.28, 0.8, 0.95), new=TRUE) + mtext("NAO+", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0, 0.28, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.8, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.sim2.colors[i2,1+6-j,1])}} + + par(mar=c(0,0,4,0), fig=c(0.30, 0.58, 0.8, 0.95), new=TRUE) + mtext("Blocking", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.30, 0.58, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.8, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.sim2.colors[i2,1+6-j,4])}} + + par(mar=c(0,0,4,0), fig=c(0.60, 0.88, 0.8, 0.95), new=TRUE) + mtext("Atlantic Ridge", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.60, 0.88, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.8, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.sim2.colors[i2,1+6-j,2])}} + + par(fig=c(0.91, 1, 0.1, 0.8), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + par(fig=c(0.91, 1, 0.88, 0.93), new=TRUE) + mtext(side = 1, text = "%", line = 2, cex=1) + + dev.off() + + + + + + + # Observed transition probability from NAO- to X summary: + png(filename=paste0(work.dir,"/",forecast.name,"_summary_transition_prob_NAO-_obs.png"), width=550, height=400) + + # create an array similar to array.cor but with colors instead of corr.values: + trans.cols <- rev(c('#b2182b','#d6604d','#f4a582','#fddbc7','#d1e5f0','#92c5de','#4393c3','#2166ac')) + my.seq <- c(0,10,20,30,40,50,60,70,100) + + array.trans.obs2.colors <- array(trans.cols[8],c(12,7,4)) + array.trans.obs2.colors[array.trans.obs2 < my.seq[8]] <- trans.cols[7] + array.trans.obs2.colors[array.trans.obs2 < my.seq[7]] <- trans.cols[6] + array.trans.obs2.colors[array.trans.obs2 < my.seq[6]] <- trans.cols[5] + array.trans.obs2.colors[array.trans.obs2 < my.seq[5]] <- trans.cols[4] + array.trans.obs2.colors[array.trans.obs2 < my.seq[4]] <- trans.cols[3] + array.trans.obs2.colors[array.trans.obs2 < my.seq[3]] <- trans.cols[2] + array.trans.obs2.colors[array.trans.obs2 < my.seq[2]] <- trans.cols[1] + array.trans.obs2.colors[is.na(array.trans.obs2)] <- "white" + + par(mar=c(5,4,4,4.5)) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Forecast time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + title("% Observed transition from NAO- regime ", cex=1.5) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + + for(p in 1:12){ + for(l in 0:6){ + polygon(c(0.5+p-1, 0.5+p-1, 1+p-1), c(-0.5+l, 0.5+l, 0+l), col=array.trans.obs2.colors[p,1+l,1]) # first triangle + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(-0.5+l, 0+l, -0.5+l), col=array.trans.obs2.colors[p,1+l,2]) + polygon(c(1+p-1, 1.5+p-1, 1.5+p-1), c(l, 0.5+l, -0.5+l), col=array.trans.obs2.colors[p,1+l,3]) + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(0.5+l, 0+l, 0.5+l), col=array.trans.obs2.colors[p,1+l,4]) + } + } + + par(fig=c(0.875, 1, 0.14, 0.89), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_transition_prob_NAO-_obs_startdate.png"), width=750, height=600) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("% Observed transition from NAO- regime", cex=1.5) + + # NAO+ : + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.85, 0.98), new=TRUE) + mtext("NAO+", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.obs2.colors[p,1+l,1])}} + + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.42, 0.5), new=TRUE) + mtext("Blocking", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.obs2.colors[p,1+l,4])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.85, 0.98), new=TRUE) + mtext("NAO-", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.obs2.colors[p,1+l,3])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.42, 0.5), new=TRUE) + mtext("Atlantic Ridge", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.obs2.colors[p,1+l,2])}} + + par(fig=c(0.91, 1, 0.1, 0.9), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_transition_prob_NAO-_obs_target_month.png"), width=750, height=350) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 0.95), new=TRUE) + mtext("% Observed transition from NAO- regime", cex=1.2) + + par(mar=c(0,0,4,0), fig=c(0.07, 0.28, 0.8, 0.95), new=TRUE) + mtext("NAO+", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0, 0.28, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.8, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.obs2.colors[i2,1+6-j,1])}} + + par(mar=c(0,0,4,0), fig=c(0.30, 0.58, 0.8, 0.95), new=TRUE) + mtext("Blocking", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.30, 0.58, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.8, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.obs2.colors[i2,1+6-j,4])}} + + par(mar=c(0,0,4,0), fig=c(0.58, 0.88, 0.8, 0.95), new=TRUE) + mtext("Atlantic Ridge", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.58, 0.88, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.8, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.obs2.colors[i2,1+6-j,2])}} + + par(fig=c(0.91, 1, 0.1, 0.8), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + par(fig=c(0.91, 1, 0.88, 0.93), new=TRUE) + mtext(side = 1, text = "%", line = 2, cex=1) + + dev.off() + + + + + + + + + + + # Simulated transition probability from blocking to X summary: + png(filename=paste0(work.dir,"/",forecast.name,"_summary_transition_prob_blocking.png"), width=550, height=400) + + # create an array similar to array.cor but with colors instead of corr.values: + trans.cols <- rev(c('#b2182b','#d6604d','#f4a582','#fddbc7','#d1e5f0','#92c5de','#4393c3','#2166ac')) + my.seq <- c(0,10,20,30,40,50,60,70,100) + + array.trans.sim3.colors <- array(trans.cols[8],c(12,7,4)) + array.trans.sim3.colors[array.trans.sim3 < my.seq[8]] <- trans.cols[7] + array.trans.sim3.colors[array.trans.sim3 < my.seq[7]] <- trans.cols[6] + array.trans.sim3.colors[array.trans.sim3 < my.seq[6]] <- trans.cols[5] + array.trans.sim3.colors[array.trans.sim3 < my.seq[5]] <- trans.cols[4] + array.trans.sim3.colors[array.trans.sim3 < my.seq[4]] <- trans.cols[3] + array.trans.sim3.colors[array.trans.sim3 < my.seq[3]] <- trans.cols[2] + array.trans.sim3.colors[array.trans.sim3 < my.seq[2]] <- trans.cols[1] + array.trans.sim3.colors[is.na(array.trans.sim3)] <- "white" + + par(mar=c(5,4,4,4.5)) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Forecast time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + title("% ECMWF-S4 transition from blocking regime", cex=1.5) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + + for(p in 1:12){ + for(l in 0:6){ + polygon(c(0.5+p-1, 0.5+p-1, 1+p-1), c(-0.5+l, 0.5+l, 0+l), col=array.trans.sim3.colors[p,1+l,1]) # first triangle + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(-0.5+l, 0+l, -0.5+l), col=array.trans.sim3.colors[p,1+l,2]) + polygon(c(1+p-1, 1.5+p-1, 1.5+p-1), c(l, 0.5+l, -0.5+l), col=array.trans.sim3.colors[p,1+l,3]) + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(0.5+l, 0+l, 0.5+l), col=array.trans.sim3.colors[p,1+l,4]) + } + } + + par(fig=c(0.875, 1, 0.14, 0.89), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_transition_prob_blocking_startdate.png"), width=750, height=600) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("% ECMWF-S4 transition from blocking regime", cex=1.5) + + # NAO+ : + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.85, 0.98), new=TRUE) + mtext("NAO+", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.sim3.colors[p,1+l,1])}} + + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.42, 0.5), new=TRUE) + mtext("Blocking", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.sim3.colors[p,1+l,4])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.85, 0.98), new=TRUE) + mtext("NAO-", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.sim3.colors[p,1+l,3])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.42, 0.5), new=TRUE) + mtext("Atlantic Ridge", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.sim3.colors[p,1+l,2])}} + + par(fig=c(0.91, 1, 0.1, 0.9), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_transition_prob_blocking_target_month.png"), width=750, height=350) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 0.95), new=TRUE) + mtext("% ECMWF-S4 transition from blocking regime", cex=1.2) + + par(mar=c(0,0,4,0), fig=c(0.10, 0.28, 0.8, 0.95), new=TRUE) + mtext("NAO+", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0, 0.28, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.8, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.sim3.colors[i2,1+6-j,1])}} + + par(mar=c(0,0,4,0), fig=c(0.30, 0.58, 0.8, 0.95), new=TRUE) + mtext("NAO-", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.30, 0.58, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.8, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.sim3.colors[i2,1+6-j,3])}} + + par(mar=c(0,0,4,0), fig=c(0.60, 0.88, 0.8, 0.95), new=TRUE) + mtext("Atlantic Ridge", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.60, 0.88, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.8, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.sim3.colors[i2,1+6-j,2])}} + + par(fig=c(0.91, 1, 0.1, 0.8), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + par(fig=c(0.91, 1, 0.88, 0.93), new=TRUE) + mtext(side = 1, text = "%", line = 2, cex=1) + + dev.off() + + + + + + + + + # Observed transition probability from blocking to X summary: + png(filename=paste0(work.dir,"/",forecast.name,"_summary_transition_prob_blocking_obs.png"), width=550, height=400) + + # create an array similar to array.cor but with colors instead of corr.values: + trans.cols <- rev(c('#b2182b','#d6604d','#f4a582','#fddbc7','#d1e5f0','#92c5de','#4393c3','#2166ac')) + my.seq <- c(0,10,20,30,40,50,60,70,100) + + array.trans.obs3.colors <- array(trans.cols[8],c(12,7,4)) + array.trans.obs3.colors[array.trans.obs3 < my.seq[8]] <- trans.cols[7] + array.trans.obs3.colors[array.trans.obs3 < my.seq[7]] <- trans.cols[6] + array.trans.obs3.colors[array.trans.obs3 < my.seq[6]] <- trans.cols[5] + array.trans.obs3.colors[array.trans.obs3 < my.seq[5]] <- trans.cols[4] + array.trans.obs3.colors[array.trans.obs3 < my.seq[4]] <- trans.cols[3] + array.trans.obs3.colors[array.trans.obs3 < my.seq[3]] <- trans.cols[2] + array.trans.obs3.colors[array.trans.obs3 < my.seq[2]] <- trans.cols[1] + array.trans.obs3.colors[is.na(array.trans.obs3)] <- "white" + + par(mar=c(5,4,4,4.5)) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Forecast time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + title("% Observed transition from blocking regime", cex=1.5) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + + for(p in 1:12){ + for(l in 0:6){ + polygon(c(0.5+p-1, 0.5+p-1, 1+p-1), c(-0.5+l, 0.5+l, 0+l), col=array.trans.obs3.colors[p,1+l,1]) # first triangle + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(-0.5+l, 0+l, -0.5+l), col=array.trans.obs3.colors[p,1+l,2]) + polygon(c(1+p-1, 1.5+p-1, 1.5+p-1), c(l, 0.5+l, -0.5+l), col=array.trans.obs3.colors[p,1+l,3]) + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(0.5+l, 0+l, 0.5+l), col=array.trans.obs3.colors[p,1+l,4]) + } + } + + par(fig=c(0.875, 1, 0.14, 0.89), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_transition_prob_blocking_obs_startdate.png"), width=750, height=600) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("% Observed transition from blocking regime", cex=1.5) + + # NAO+ : + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.85, 0.98), new=TRUE) + mtext("NAO+", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.obs3.colors[p,1+l,1])}} + + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.42, 0.5), new=TRUE) + mtext("Blocking", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.obs3.colors[p,1+l,4])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.85, 0.98), new=TRUE) + mtext("NAO-", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.obs3.colors[p,1+l,3])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.42, 0.5), new=TRUE) + mtext("Atlantic Ridge", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.obs3.colors[p,1+l,2])}} + + par(fig=c(0.91, 1, 0.1, 0.9), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_transition_prob_blocking_obs_target_month.png"), width=750, height=350) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 0.95), new=TRUE) + mtext("% Observed transition from blocking regime", cex=1.2) + + par(mar=c(0,0,4,0), fig=c(0.10, 0.28, 0.8, 0.95), new=TRUE) + mtext("NAO+", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0, 0.28, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.8, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.obs3.colors[i2,1+6-j,1])}} + + par(mar=c(0,0,4,0), fig=c(0.30, 0.58, 0.8, 0.95), new=TRUE) + mtext("NAO-", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.30, 0.58, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.8, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.obs3.colors[i2,1+6-j,3])}} + + par(mar=c(0,0,4,0), fig=c(0.60, 0.88, 0.8, 0.95), new=TRUE) + mtext("Atlantic Ridge", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.60, 0.88, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.8, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.obs3.colors[i2,1+6-j,2])}} + + par(fig=c(0.91, 1, 0.1, 0.8), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + par(fig=c(0.91, 1, 0.88, 0.93), new=TRUE) + mtext(side = 1, text = "%", line = 2, cex=1) + + dev.off() + + + + + + + + + + + + + + + + + + # Simulated transition probability from Atlantic ridge to X summary: + png(filename=paste0(work.dir,"/",forecast.name,"_summary_transition_prob_Atlantic_ridge.png"), width=550, height=400) + + # create an array similar to array.cor but with colors instead of corr.values: + trans.cols <- rev(c('#b2182b','#d6604d','#f4a582','#fddbc7','#d1e5f0','#92c5de','#4393c3','#2166ac')) + my.seq <- c(0,10,20,30,40,50,60,70,100) + + array.trans.sim4.colors <- array(trans.cols[8],c(12,7,4)) + array.trans.sim4.colors[array.trans.sim4 < my.seq[8]] <- trans.cols[7] + array.trans.sim4.colors[array.trans.sim4 < my.seq[7]] <- trans.cols[6] + array.trans.sim4.colors[array.trans.sim4 < my.seq[6]] <- trans.cols[5] + array.trans.sim4.colors[array.trans.sim4 < my.seq[5]] <- trans.cols[4] + array.trans.sim4.colors[array.trans.sim4 < my.seq[4]] <- trans.cols[3] + array.trans.sim4.colors[array.trans.sim4 < my.seq[3]] <- trans.cols[2] + array.trans.sim4.colors[array.trans.sim4 < my.seq[2]] <- trans.cols[1] + array.trans.sim4.colors[is.na(array.trans.sim4)] <- "white" + + par(mar=c(5,4,4,4.5)) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Forecast time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + title("% ECMWF-S4 transition from Atlantic ridge regime ", cex=1.5) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + + for(p in 1:12){ + for(l in 0:6){ + polygon(c(0.5+p-1, 0.5+p-1, 1+p-1), c(-0.5+l, 0.5+l, 0+l), col=array.trans.sim4.colors[p,1+l,1]) # first triangle + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(-0.5+l, 0+l, -0.5+l), col=array.trans.sim4.colors[p,1+l,2]) + polygon(c(1+p-1, 1.5+p-1, 1.5+p-1), c(l, 0.5+l, -0.5+l), col=array.trans.sim4.colors[p,1+l,3]) + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(0.5+l, 0+l, 0.5+l), col=array.trans.sim4.colors[p,1+l,4]) + } + } + + par(fig=c(0.875, 1, 0.14, 0.89), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_transition_prob_Atlantic_ridge_startdate.png"), width=750, height=600) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("% ECMWF-S4 transition from Atlantic Ridge regime", cex=1.5) + + # NAO+ : + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.85, 0.98), new=TRUE) + mtext("NAO+", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.sim4.colors[p,1+l,1])}} + + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.42, 0.5), new=TRUE) + mtext("Blocking", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.sim4.colors[p,1+l,4])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.85, 0.98), new=TRUE) + mtext("NAO-", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.sim4.colors[p,1+l,3])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.42, 0.5), new=TRUE) + mtext("Atlantic Ridge", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.sim4.colors[p,1+l,2])}} + + par(fig=c(0.91, 1, 0.1, 0.9), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_transition_prob_Atlantic_ridge_target_month.png"), width=750, height=350) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 0.95), new=TRUE) + mtext("% ECMWF-S4 transition from Atlantic Ridge regime", cex=1.2) + + par(mar=c(0,0,4,0), fig=c(0.1, 0.28, 0.8, 0.95), new=TRUE) + mtext("NAO+", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0, 0.28, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.8, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.sim4.colors[i2,1+6-j,1])}} + + par(mar=c(0,0,4,0), fig=c(0.30, 0.58, 0.8, 0.95), new=TRUE) + mtext("NAO-", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.30, 0.58, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.8, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.sim4.colors[i2,1+6-j,3])}} + + par(mar=c(0,0,4,0), fig=c(0.60, 0.88, 0.8, 0.95), new=TRUE) + mtext("Blocking", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.60, 0.88, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.8, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.sim4.colors[i2,1+6-j,4])}} + + par(fig=c(0.91, 1, 0.1, 0.8), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + par(fig=c(0.91, 1, 0.88, 0.93), new=TRUE) + mtext(side = 1, text = "%", line = 2, cex=1) + + dev.off() + + + + + + + + + + # Observed transition probability from Atlantic ridge to X summary: + png(filename=paste0(work.dir,"/",forecast.name,"_summary_transition_prob_Atlantic_ridge_obs.png"), width=550, height=400) + + # create an array obsilar to array.cor but with colors instead of corr.values: + trans.cols <- rev(c('#b2182b','#d6604d','#f4a582','#fddbc7','#d1e5f0','#92c5de','#4393c3','#2166ac')) + my.seq <- c(0,10,20,30,40,50,60,70,100) + + array.trans.obs4.colors <- array(trans.cols[8],c(12,7,4)) + array.trans.obs4.colors[array.trans.obs4 < my.seq[8]] <- trans.cols[7] + array.trans.obs4.colors[array.trans.obs4 < my.seq[7]] <- trans.cols[6] + array.trans.obs4.colors[array.trans.obs4 < my.seq[6]] <- trans.cols[5] + array.trans.obs4.colors[array.trans.obs4 < my.seq[5]] <- trans.cols[4] + array.trans.obs4.colors[array.trans.obs4 < my.seq[4]] <- trans.cols[3] + array.trans.obs4.colors[array.trans.obs4 < my.seq[3]] <- trans.cols[2] + array.trans.obs4.colors[array.trans.obs4 < my.seq[2]] <- trans.cols[1] + array.trans.obs4.colors[is.na(array.trans.obs4)] <- "white" + + par(mar=c(5,4,4,4.5)) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Forecast time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + title("% Observed transition from Atlantic ridge regime ", cex=1.5) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + + for(p in 1:12){ + for(l in 0:6){ + polygon(c(0.5+p-1, 0.5+p-1, 1+p-1), c(-0.5+l, 0.5+l, 0+l), col=array.trans.obs4.colors[p,1+l,1]) # first triangle + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(-0.5+l, 0+l, -0.5+l), col=array.trans.obs4.colors[p,1+l,2]) + polygon(c(1+p-1, 1.5+p-1, 1.5+p-1), c(l, 0.5+l, -0.5+l), col=array.trans.obs4.colors[p,1+l,3]) + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(0.5+l, 0+l, 0.5+l), col=array.trans.obs4.colors[p,1+l,4]) + } + } + + par(fig=c(0.875, 1, 0.14, 0.89), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_transition_prob_Atlantic_ridge_obs_startdate.png"), width=750, height=600) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("% Observed transition from Atlantic Ridge regime", cex=1.5) + + # NAO+ : + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.85, 0.98), new=TRUE) + mtext("NAO+", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.obs4.colors[p,1+l,1])}} + + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.42, 0.5), new=TRUE) + mtext("Blocking", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.obs4.colors[p,1+l,4])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.85, 0.98), new=TRUE) + mtext("NAO-", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.obs4.colors[p,1+l,3])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.42, 0.5), new=TRUE) + mtext("Atlantic Ridge", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.obs4.colors[p,1+l,2])}} + + par(fig=c(0.91, 1, 0.1, 0.9), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_transition_prob_Atlantic_ridge_obs_target_month.png"), width=750, height=350) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 0.95), new=TRUE) + mtext("% Observed transition from Atlantic Ridge regime", cex=1.2) + + par(mar=c(0,0,4,0), fig=c(0.1, 0.28, 0.8, 0.95), new=TRUE) + mtext("NAO+", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0, 0.28, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.8, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.obs4.colors[i2,1+6-j,1])}} + + par(mar=c(0,0,4,0), fig=c(0.30, 0.58, 0.8, 0.95), new=TRUE) + mtext("NAO-", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.30, 0.58, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.8, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.obs4.colors[i2,1+6-j,3])}} + + par(mar=c(0,0,4,0), fig=c(0.60, 0.88, 0.8, 0.95), new=TRUE) + mtext("Blocking", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.60, 0.88, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.8, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.obs4.colors[i2,1+6-j,4])}} + + par(fig=c(0.91, 1, 0.1, 0.8), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + par(fig=c(0.91, 1, 0.88, 0.93), new=TRUE) + mtext(side = 1, text = "%", line = 2, cex=1) + + dev.off() + + + + + + + + + + + + + + + # Bias of the transition probability from NAO+ to X summary: + png(filename=paste0(work.dir,"/",forecast.name,"_summary_bias_transition_prob_NAO+.png"), width=550, height=400) + + # create an array similar to array.cor but with colors instead of corr.values: + trans.cols <- rev(c('#b2182b','#d6604d','#f4a582','#fddbc7','#d1e5f0','#92c5de','#4393c3','#2166ac')) + my.seq <- c(-20,-10,-5,-2,0,2,5,10,20) + + array.trans.diff1.colors <- array(trans.cols[8],c(12,7,4)) + array.trans.diff1.colors[array.trans.diff1 < my.seq[8]] <- trans.cols[7] + array.trans.diff1.colors[array.trans.diff1 < my.seq[7]] <- trans.cols[6] + array.trans.diff1.colors[array.trans.diff1 < my.seq[6]] <- trans.cols[5] + array.trans.diff1.colors[array.trans.diff1 < my.seq[5]] <- trans.cols[4] + array.trans.diff1.colors[array.trans.diff1 < my.seq[4]] <- trans.cols[3] + array.trans.diff1.colors[array.trans.diff1 < my.seq[3]] <- trans.cols[2] + array.trans.diff1.colors[array.trans.diff1 < my.seq[2]] <- trans.cols[1] + array.trans.diff1.colors[is.na(array.trans.diff1)] <- "white" + + par(mar=c(5,4,4,4.5)) + plot(1,1, type="n", xaxt="n", yaxt="n", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + title("% Bias transition from NAO+ regime", cex=1.5) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + + for(p in 1:12){ + for(l in 0:6){ + polygon(c(0.5+p-1, 0.5+p-1, 1+p-1), c(-0.5+l, 0.5+l, 0+l), col=array.trans.diff1.colors[p,1+l,1]) # first triangle + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(-0.5+l, 0+l, -0.5+l), col=array.trans.diff1.colors[p,1+l,2]) + polygon(c(1+p-1, 1.5+p-1, 1.5+p-1), c(l, 0.5+l, -0.5+l), col=array.trans.diff1.colors[p,1+l,3]) + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(0.5+l, 0+l, 0.5+l), col=array.trans.diff1.colors[p,1+l,4]) + } + } + + par(fig=c(0.875, 1, 0.14, 0.89), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_bias_transition_prob_NAO+_startdate.png"), width=750, height=600) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("% Bias transition from NAO+ regime", cex=1.5) + + # NAO+ : + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.85, 0.98), new=TRUE) + mtext("NAO+", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.diff1.colors[p,1+l,1])}} + + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.42, 0.5), new=TRUE) + mtext("Blocking", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.diff1.colors[p,1+l,4])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.85, 0.98), new=TRUE) + mtext("NAO-", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.diff1.colors[p,1+l,3])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.42, 0.5), new=TRUE) + mtext("Atlantic Ridge", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.diff1.colors[p,1+l,2])}} + + par(fig=c(0.91, 1, 0.1, 0.9), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_bias_transition_prob_NAO+_target_month.png"), width=750, height=350) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 0.95), new=TRUE) + mtext("% Bias transition from NAO+ regime", cex=1.2) + + par(mar=c(0,0,4,0), fig=c(0.07, 0.28, 0.8, 0.95), new=TRUE) + mtext("NAO-", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0, 0.28, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.8, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.diff1.colors[i2,1+6-j,3])}} + + par(mar=c(0,0,4,0), fig=c(0.30, 0.58, 0.8, 0.95), new=TRUE) + mtext("Blocking", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.30, 0.58, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.8, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.diff1.colors[i2,1+6-j,4])}} + + par(mar=c(0,0,4,0), fig=c(0.60, 0.88, 0.8, 0.95), new=TRUE) + mtext("Atlantic Ridge", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.60, 0.88, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.8, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.diff1.colors[i2,1+6-j,2])}} + + par(fig=c(0.91, 1, 0.1, 0.8), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + par(fig=c(0.91, 1, 0.88, 0.93), new=TRUE) + mtext(side = 1, text = "%", line = 2, cex=1) + + dev.off() + + + + + + + + + + + + + + + + # Bias of the Transition probability from NAO- to X summary: + png(filename=paste0(work.dir,"/",forecast.name,"_summary_bias_transition_prob_NAO-.png"), width=550, height=400) + + # create an array similar to array.cor but with colors instead of corr.values: + trans.cols <- rev(c('#b2182b','#d6604d','#f4a582','#fddbc7','#d1e5f0','#92c5de','#4393c3','#2166ac')) + my.seq <- c(-20,-10,-5,-2,0,2,5,10,20) + + array.trans.diff2.colors <- array(trans.cols[8],c(12,7,4)) + array.trans.diff2.colors[array.trans.diff2 < my.seq[8]] <- trans.cols[7] + array.trans.diff2.colors[array.trans.diff2 < my.seq[7]] <- trans.cols[6] + array.trans.diff2.colors[array.trans.diff2 < my.seq[6]] <- trans.cols[5] + array.trans.diff2.colors[array.trans.diff2 < my.seq[5]] <- trans.cols[4] + array.trans.diff2.colors[array.trans.diff2 < my.seq[4]] <- trans.cols[3] + array.trans.diff2.colors[array.trans.diff2 < my.seq[3]] <- trans.cols[2] + array.trans.diff2.colors[array.trans.diff2 < my.seq[2]] <- trans.cols[1] + array.trans.diff2.colors[is.na(array.trans.diff2)] <- "white" + + par(mar=c(5,4,4,4.5)) + plot(1,1, type="n", xaxt="n", yaxt="n", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + title("% Bias transition from NAO- regime", cex=1.5) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + + for(p in 1:12){ + for(l in 0:6){ + polygon(c(0.5+p-1, 0.5+p-1, 1+p-1), c(-0.5+l, 0.5+l, 0+l), col=array.trans.diff2.colors[p,1+l,1]) # first triangle + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(-0.5+l, 0+l, -0.5+l), col=array.trans.diff2.colors[p,1+l,2]) + polygon(c(1+p-1, 1.5+p-1, 1.5+p-1), c(l, 0.5+l, -0.5+l), col=array.trans.diff2.colors[p,1+l,3]) + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(0.5+l, 0+l, 0.5+l), col=array.trans.diff2.colors[p,1+l,4]) + } + } + + par(fig=c(0.875, 1, 0.14, 0.89), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_bias_transition_prob_NAO-_startdate.png"), width=750, height=600) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 0.95), new=TRUE) + mtext("% Bias transition from NAO- regime", cex=1.5) + + # NAO+ : + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.85, 0.98), new=TRUE) + mtext("NAO+", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.diff2.colors[p,1+l,1])}} + + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.42, 0.5), new=TRUE) + mtext("Blocking", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.diff2.colors[p,1+l,4])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.85, 0.98), new=TRUE) + mtext("NAO-", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.diff2.colors[p,1+l,3])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.42, 0.5), new=TRUE) + mtext("Atlantic Ridge", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.diff2.colors[p,1+l,2])}} + + par(fig=c(0.91, 1, 0.1, 0.9), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_bias_transition_prob_NAO-_target_month.png"), width=750, height=350) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 0.95), new=TRUE) + mtext("% Bias transition from NAO- regime", cex=1.2) + + par(mar=c(0,0,4,0), fig=c(0.07, 0.28, 0.8, 0.95), new=TRUE) + mtext("NAO+", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0, 0.28, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.8, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.diff2.colors[i2,1+6-j,1])}} + + par(mar=c(0,0,4,0), fig=c(0.30, 0.58, 0.8, 0.95), new=TRUE) + mtext("Blocking", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.30, 0.58, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.8, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.diff2.colors[i2,1+6-j,4])}} + + par(mar=c(0,0,4,0), fig=c(0.60, 0.88, 0.8, 0.95), new=TRUE) + mtext("Atlantic Ridge", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.60, 0.88, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.8, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.diff2.colors[i2,1+6-j,2])}} + + par(fig=c(0.91, 1, 0.1, 0.8), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + par(fig=c(0.91, 1, 0.88, 0.93), new=TRUE) + mtext(side = 1, text = "%", line = 2, cex=1) + + dev.off() + + + + + + + + + + + + # Bias of the Transition probability from blocking to X summary: + png(filename=paste0(work.dir,"/",forecast.name,"_summary_bias_transition_prob_blocking.png"), width=550, height=400) + + # create an array similar to array.cor but with colors instead of corr.values: + trans.cols <- rev(c('#b2182b','#d6604d','#f4a582','#fddbc7','#d1e5f0','#92c5de','#4393c3','#2166ac')) + my.seq <- c(-20,-10,-5,-2,0,2,5,10,20) + + array.trans.diff3.colors <- array(trans.cols[8],c(12,7,4)) + array.trans.diff3.colors[array.trans.diff3 < my.seq[8]] <- trans.cols[7] + array.trans.diff3.colors[array.trans.diff3 < my.seq[7]] <- trans.cols[6] + array.trans.diff3.colors[array.trans.diff3 < my.seq[6]] <- trans.cols[5] + array.trans.diff3.colors[array.trans.diff3 < my.seq[5]] <- trans.cols[4] + array.trans.diff3.colors[array.trans.diff3 < my.seq[4]] <- trans.cols[3] + array.trans.diff3.colors[array.trans.diff3 < my.seq[3]] <- trans.cols[2] + array.trans.diff3.colors[array.trans.diff3 < my.seq[2]] <- trans.cols[1] + array.trans.diff3.colors[is.na(array.trans.diff3)] <- "white" + + par(mar=c(5,4,4,4.5)) + plot(1,1, type="n", xaxt="n", yaxt="n", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + title("% Bias transition from blocking regime", cex=1.5) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + + for(p in 1:12){ + for(l in 0:6){ + polygon(c(0.5+p-1, 0.5+p-1, 1+p-1), c(-0.5+l, 0.5+l, 0+l), col=array.trans.diff3.colors[p,1+l,1]) # first triangle + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(-0.5+l, 0+l, -0.5+l), col=array.trans.diff3.colors[p,1+l,2]) + polygon(c(1+p-1, 1.5+p-1, 1.5+p-1), c(l, 0.5+l, -0.5+l), col=array.trans.diff3.colors[p,1+l,3]) + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(0.5+l, 0+l, 0.5+l), col=array.trans.diff3.colors[p,1+l,4]) + } + } + + par(fig=c(0.875, 1, 0.14, 0.89), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_bias_transition_prob_blocking_startdate.png"), width=750, height=600) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("% Bias transition from blocking regime", cex=1.5) + + # NAO+ : + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.85, 0.98), new=TRUE) + mtext("NAO+", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.diff3.colors[p,1+l,1])}} + + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.42, 0.5), new=TRUE) + mtext("Blocking", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.diff3.colors[p,1+l,4])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.85, 0.98), new=TRUE) + mtext("NAO-", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.diff3.colors[p,1+l,3])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.42, 0.5), new=TRUE) + mtext("Atlantic Ridge", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.diff3.colors[p,1+l,2])}} + + par(fig=c(0.91, 1, 0.1, 0.9), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_bias_transition_prob_blocking_target_month.png"), width=750, height=350) + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 0.95), new=TRUE) + mtext("% Bias transition from Blocking regime", cex=1.2) + + par(mar=c(0,0,4,0), fig=c(0.07, 0.28, 0.8, 0.95), new=TRUE) + mtext("NAO+", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0, 0.28, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.8, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.diff3.colors[i2,1+6-j,1])}} + + par(mar=c(0,0,4,0), fig=c(0.30, 0.58, 0.8, 0.95), new=TRUE) + mtext("NAO-", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.30, 0.58, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.8, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.diff3.colors[i2,1+6-j,3])}} + + par(mar=c(0,0,4,0), fig=c(0.60, 0.88, 0.8, 0.95), new=TRUE) + mtext("Atlantic Ridge", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.60, 0.88, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.8, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.diff3.colors[i2,1+6-j,2])}} + + par(fig=c(0.91, 1, 0.1, 0.8), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + par(fig=c(0.91, 1, 0.88, 0.93), new=TRUE) + mtext(side = 1, text = "%", line = 2, cex=1) + + dev.off() + + + + + + + + + + + # Bias of the Transition probability from Atlantic Ridge to X summary: + png(filename=paste0(work.dir,"/",forecast.name,"_summary_bias_transition_prob_Atlantic_ridge.png"), width=550, height=400) + + # create an array similar to array.cor but with colors instead of corr.values: + trans.cols <- rev(c('#b2182b','#d6604d','#f4a582','#fddbc7','#d1e5f0','#92c5de','#4393c3','#2166ac')) + my.seq <- c(-20,-10,-5,-2,0,2,5,10,20) + + array.trans.diff4.colors <- array(trans.cols[8],c(12,7,4)) + array.trans.diff4.colors[array.trans.diff4 < my.seq[8]] <- trans.cols[7] + array.trans.diff4.colors[array.trans.diff4 < my.seq[7]] <- trans.cols[6] + array.trans.diff4.colors[array.trans.diff4 < my.seq[6]] <- trans.cols[5] + array.trans.diff4.colors[array.trans.diff4 < my.seq[5]] <- trans.cols[4] + array.trans.diff4.colors[array.trans.diff4 < my.seq[4]] <- trans.cols[3] + array.trans.diff4.colors[array.trans.diff4 < my.seq[3]] <- trans.cols[2] + array.trans.diff4.colors[array.trans.diff4 < my.seq[2]] <- trans.cols[1] + array.trans.diff4.colors[is.na(array.trans.diff4)] <- "white" + + par(mar=c(5,4,4,4.5)) + plot(1,1, type="n", xaxt="n", yaxt="n", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + title("% Bias transition from Atlantic Ridge regime", cex=1.5) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + + for(p in 1:12){ + for(l in 0:6){ + polygon(c(0.5+p-1, 0.5+p-1, 1+p-1), c(-0.5+l, 0.5+l, 0+l), col=array.trans.diff4.colors[p,1+l,1]) # first triangle + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(-0.5+l, 0+l, -0.5+l), col=array.trans.diff4.colors[p,1+l,2]) + polygon(c(1+p-1, 1.5+p-1, 1.5+p-1), c(l, 0.5+l, -0.5+l), col=array.trans.diff4.colors[p,1+l,3]) + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(0.5+l, 0+l, 0.5+l), col=array.trans.diff4.colors[p,1+l,4]) + } + } + + par(fig=c(0.875, 1, 0.14, 0.89), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_bias_transition_prob_Atlantic_ridge_startdate.png"), width=750, height=600) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("% Bias transition from Atlantic_Ridge_regime", cex=1.5) + + # NAO+ : + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.85, 0.98), new=TRUE) + mtext("NAO+", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.diff4.colors[p,1+l,1])}} + + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.42, 0.5), new=TRUE) + mtext("Blocking", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.diff4.colors[p,1+l,4])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.85, 0.98), new=TRUE) + mtext("NAO-", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.diff4.colors[p,1+l,3])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.42, 0.5), new=TRUE) + mtext("Atlantic Ridge", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.diff4.colors[p,1+l,2])}} + + par(fig=c(0.91, 1, 0.1, 0.9), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_bias_transition_prob_Atlantic_ridge_target_month.png"), width=750, height=350) + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 0.95), new=TRUE) + mtext("% Bias transition from Atlantic Ridge regime", cex=1.2) + + par(mar=c(0,0,4,0), fig=c(0.07, 0.28, 0.8, 0.95), new=TRUE) + mtext("NAO+", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0, 0.28, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.8, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.diff4.colors[i2,1+6-j,1])}} + + par(mar=c(0,0,4,0), fig=c(0.30, 0.58, 0.8, 0.95), new=TRUE) + mtext("NAO-", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.30, 0.58, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.8, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.diff4.colors[i2,1+6-j,3])}} + + par(mar=c(0,0,4,0), fig=c(0.60, 0.88, 0.8, 0.95), new=TRUE) + mtext("Blocking", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.60, 0.88, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.8, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.diff4.colors[i2,1+6-j,4])}} + + par(fig=c(0.91, 1, 0.1, 0.8), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + par(fig=c(0.91, 1, 0.88, 0.93), new=TRUE) + mtext(side = 1, text = "%", line = 2, cex=1) + + dev.off() + + ## # FairRPSS summary: + ## png(filename=paste0(work.dir,"/",forecast.name,"_summary_rpss.png"), width=550, height=400) + + ## # create an array similar to array.diff.freq but with colors instead of frequencies: + ## freq.cols <- rev(c('#b2182b','#d6604d','#f4a582','#fddbc7','#d1e5f0','#92c5de','#4393c3','#2166ac')) + + ## array.freq.colors <- array(freq.cols[8],c(12,7,4)) + ## array.freq.colors[array.diff.freq < 10] <- freq.cols[7] + ## array.freq.colors[array.diff.freq < 5] <- freq.cols[6] + ## array.freq.colors[array.diff.freq < 2] <- freq.cols[5] + ## array.freq.colors[array.diff.freq < 0] <- freq.cols[4] + ## array.freq.colors[array.diff.freq < -2] <- freq.cols[3] + ## array.freq.colors[array.diff.freq < -5] <- freq.cols[2] + ## array.freq.colors[array.diff.freq < -10] <- freq.cols[1] + + ## par(mar=c(5,4,4,4.5)) + ## plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Forecast time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + ## title("Frequency difference (%) between S4 and ERA-Interim", cex=1.5) + ## mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + ## mtext(side = 2, text = "Forecast time", line = 2.5, cex=1.5) + ## axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + ## axis(2, at=seq(0,6), las=2, cex.axis=1.5) + + ## for(p in 1:12){ + ## for(l in 0:6){ + ## polygon(c(0.5+p-1,0.5+p-1,1+p-1),c(-0.5+l,0.5+l,0+l), col=array.freq.colors[p,1+l,1]) # first triangle + ## polygon(c(0.5+p-1,1+p-1,1.5+p-1),c(-0.5+l,0+l,-0.5+l), col=array.freq.colors[p,1+l,2]) + ## polygon(c(1+p-1,1.5+p-1,1.5+p-1),c(l,0.5+l,-0.5+l), col=array.freq.colors[p,1+l,3]) + ## polygon(c(0.5+p-1,1+p-1,1.5+p-1),c(0.5+l,0+l,0.5+l), col=array.freq.colors[p,1+l,4]) + ## } + ## } + + ## par(fig=c(0.875, 1, 0.14, 0.89), new=TRUE) + ## ColorBar2(brks = c(-20,-10,-5,-2,0,2,5,10,20), cols = freq.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(c(-20,-10,-5,-2,0,2,5,10,20)), my.labels=c(-20,-10,-5,-2,0,2,5,10,20)) + ## dev.off() + +} # close if on composition == "summary" + + + + +# impact map of the stronger WR: +if(composition == "impact.highest" && fields.name == rean.name){ + + var.num <- 2 # choose a variable (1:sfcWind, 2:tas) + index <- 1 # chose an index: 1: most influent, 2: most influent positively, 3: most influent negatively + + if ( index == 1 ) png(filename=paste0(rean.dir,"/",rean.name,"_",var.name.full[var.num],"_most_influent.png"), width=550, height=650) + if ( index == 2 ) png(filename=paste0(rean.dir,"/",rean.name,"_",var.name.full[var.num],"_most_influent_positively.png"), width=550, height=650) + if ( index == 3 ) png(filename=paste0(rean.dir,"/",rean.name,"_",var.name.full[var.num],"_most_influent_negatively.png"), width=550, height=650) + + plot.new() + #par(mfrow=c(4,3)) + #col.regimes <- c('#7b3294','#c2a5cf','#a6dba0','#008837') + col.regimes <- c('#e66101','#fdb863','#b2abd2','#5e3c99') + + for(p in 13:16){ # or 1:12 for monthly maps + load(file=paste0(rean.dir,"/",rean.name,"_",my.period[p],"_",var.name[var.num],".RData")) + load(paste0(rean.dir,"/",rean.name,"_",my.period[p],"_ClusterNames.RData")) # they should not depend on var.name!!! + + # position of long values of Europe only (without the Atlantic Sea and America): + #if(fields.name == "ERA-Interim") EU <- c(1:which(lon >= lon.max)[1], (length(lon)-30):length(lon)) # restrict area to continental europe only + lon.max = 45 + EU <- c(1:which(lon >= lon.max)[1], (which(lon >= 337)[1]:length(lon))) # restrict area to continental europe only + + cluster1 <- which(orden == cluster1.name.period[p]) # in future it will be == cluster1.name + cluster2 <- which(orden == cluster2.name.period[p]) + cluster3 <- which(orden == cluster3.name.period[p]) + cluster4 <- which(orden == cluster4.name.period[p]) + + assign(paste0("imp",cluster1), varPeriodAnom1mean) + assign(paste0("imp",cluster2), varPeriodAnom2mean) + assign(paste0("imp",cluster3), varPeriodAnom3mean) + assign(paste0("imp",cluster4), varPeriodAnom4mean) + + #most influent WT: + if (index == 1) { + imp.all <- imp1 + for(i in 1:dim(imp1)[1]){ + for(j in 1:dim(imp1)[2]){ + if(abs(imp1[i,j]) >= max(abs(imp1[i,j]), abs(imp2[i,j]), abs(imp3[i,j]), abs(imp4[i,j]))) imp.all[i,j] <- 1.5 + if(abs(imp2[i,j]) >= max(abs(imp1[i,j]), abs(imp2[i,j]), abs(imp3[i,j]), abs(imp4[i,j]))) imp.all[i,j] <- 2.5 + if(abs(imp3[i,j]) >= max(abs(imp1[i,j]), abs(imp2[i,j]), abs(imp3[i,j]), abs(imp4[i,j]))) imp.all[i,j] <- 3.5 + if(abs(imp4[i,j]) >= max(abs(imp1[i,j]), abs(imp2[i,j]), abs(imp3[i,j]), abs(imp4[i,j]))) imp.all[i,j] <- 4.5 + } + } + } + + #most influent WT with positive impact: + if (index == 2) { + imp.all <- imp1 + for(i in 1:dim(imp1)[1]){ + for(j in 1:dim(imp1)[2]){ + if((imp1[i,j]) >= max((imp1[i,j]), (imp2[i,j]), (imp3[i,j]), (imp4[i,j]))) imp.all[i,j] <- 1.5 + if((imp2[i,j]) >= max((imp1[i,j]), (imp2[i,j]), (imp3[i,j]), (imp4[i,j]))) imp.all[i,j] <- 2.5 + if((imp3[i,j]) >= max((imp1[i,j]), (imp2[i,j]), (imp3[i,j]), (imp4[i,j]))) imp.all[i,j] <- 3.5 + if((imp4[i,j]) >= max((imp1[i,j]), (imp2[i,j]), (imp3[i,j]), (imp4[i,j]))) imp.all[i,j] <- 4.5 + } + } + + } + + # most influent WT with negative impact: + if (index == 3) { + imp.all <- imp1 + for(i in 1:dim(imp1)[1]){ + for(j in 1:dim(imp1)[2]){ + if((imp1[i,j]) <= min((imp1[i,j]), (imp2[i,j]), (imp3[i,j]), (imp4[i,j]))) imp.all[i,j] <- 1.5 + if((imp2[i,j]) <= min((imp1[i,j]), (imp2[i,j]), (imp3[i,j]), (imp4[i,j]))) imp.all[i,j] <- 2.5 + if((imp3[i,j]) <= min((imp1[i,j]), (imp2[i,j]), (imp3[i,j]), (imp4[i,j]))) imp.all[i,j] <- 3.5 + if((imp4[i,j]) <= min((imp1[i,j]), (imp2[i,j]), (imp3[i,j]), (imp4[i,j]))) imp.all[i,j] <- 4.5 + } + } + } + + if(p<=3) yMod <- 0.7 + if(p>=4 && p<=6) yMod <- 0.5 + if(p>=7 && p<=9) yMod <- 0.3 + if(p>=10) yMod <- 0.1 + if(p==13 || p==14) yMod <- 0.52 + if(p==15 || p==16) yMod <- 0.1 + + if(p==1 || p==4 || p==7 || p==10) xMod <- 0.05 + if(p==2 || p==5 || p==8 || p==11) xMod <- 0.35 + if(p==3 || p==6 || p==9 || p==12) xMod <- 0.65 + if(p==13 || p==15) xMod <- 0.05 + if(p==14 || p==16) xMod <- 0.50 + + # impact map: + if(p <= 12) par(fig=c(xMod, xMod + 0.3, yMod, yMod + 0.17), new=TRUE) + if(p > 12) par(fig=c(xMod, xMod + 0.45, yMod, yMod + 0.38), new=TRUE) + PlotEquiMap(imp.all[,EU], lon[EU], lat, filled.continents = FALSE, brks=1:5, cols=col.regimes, axelab=F, drawleg=F) + + # title map: + if(p <= 12) par(fig=c(xMod, xMod + 0.3, yMod + 0.162, yMod + 0.172), new=TRUE) + if(p > 12) par(fig=c(xMod + 0.07, xMod + 0.07 + 0.3, yMod +0.21 + 0.166, yMod +0.21 + 0.176), new=TRUE) + mtext(my.period[p], font=2, cex=1.5) + + } # close 'p' on 'period' + + par(fig=c(0.1,0.9, 0.03, 0.11), new=TRUE) + ColorBar2(1:5, cols=col.regimes, vert=FALSE, my.ticks=1:4, draw.ticks=FALSE, my.labels=orden) + + par(fig=c(0.1,0.9, 0.92, 0.97), new=TRUE) + if( index == 1 ) mtext(paste0("Most influent WR on ",var.name[var.num]), font=2, cex=1.5) + if( index == 2 ) mtext(paste0("Most positively influent WR on ",var.name[var.num]), font=2, cex=1.5) + if( index == 3 ) mtext(paste0("Most negatively influent WR on ",var.name[var.num]), font=2, cex=1.5) + + dev.off() + +} # close if on composition == "impact.highest" + + + + + + + + + + + + + +if(monthly_anomalies) { + # Compute climatology and anomalies (with LOESS filter) for sfcWind data, and draw monthly anomalies, if you want to compare the mean daily wind speed anomalies reconstructed by the weather regimes classification with those observed by the reanalysis: + + load(paste0(rean.dir,"/",rean.name,"_",my.period[month.chosen[1]],"_psl.RData")) # only to load year.start and year.end + + sfcWind366 <- Load(var = "sfcWind", exp = NULL, obs = list(list(path=rean.data)), paste0(year.start:year.end,'0101'), storefreq = 'daily', leadtimemax = 366, output = 'lonlat', latmin = lat.min, latmax = lat.max, lonmin = lon.min, lonmax = lon.max, nprocs=8)$obs + + sfcWind <- sfcWind366[,,,1:365,,] + + for(y in year.start:year.end){ + y2 <- y - year.start + 1 + if(n.days.in.a.year(y) == 366) sfcWind[y2,60:365,,] <- sfcWind366[1,1,y2,61:366,,] # take the march to december period removing the 29th of February + } + + rm(sfcWind366) + gc() + + sfcWindClimDaily <- apply(sfcWind, c(2,3,4), mean, na.rm=T) + + sfcWindClimLoess <- sfcWindClimDaily + for(i in n.pos.lat){ + for(j in n.pos.lon){ + my.data <- data.frame(ens.mean=sfcWindClimDaily[,i,j], day=1:365) + my.loess <- loess(ens.mean ~ day, my.data, span=0.35) + sfcWindClimLoess[,i,j] <- predict(my.loess) + } + } + + rm(sfcWindClimDaily) + gc() + + sfcWindClim2 <- InsertDim(sfcWindClimLoess, 1, n.years) + + sfcWindAnom <- sfcWind - sfcWindClim2 + + rm(sfcWindClimLoess) + gc() + + # same as above but for computing climatology and anomalies (with LOESS filter) for psl data, and draw monthly slp anomalies: + slp366 <- Load(var = psl, exp = NULL, obs = list(list(path=rean.data)), paste0(year.start:year.end,'0101'), storefreq = 'daily', leadtimemax = 366, output = 'lonlat', latmin = lat.min, latmax = lat.max, lonmin = lon.min, lonmax = lon.max, nprocs=8)$obs + + slp <- slp366[,,,1:365,,] + + for(y in year.start:year.end){ + y2 <- y - year.start + 1 + if(n.days.in.a.year(y) == 366) slp[y2,60:365,,] <- slp366[1,1,y2,61:366,,] # take the march to december period removing the 29th of February + } + + rm(slp366) + gc() + + slpClimDaily <- apply(slp, c(2,3,4), mean, na.rm=T) + + slpClimLoess <- slpClimDaily + for(i in n.pos.lat){ + for(j in n.pos.lon){ + my.data <- data.frame(ens.mean=slpClimDaily[,i,j], day=1:365) + my.loess <- loess(ens.mean ~ day, my.data, span=0.35) + slpClimLoess[,i,j] <- predict(my.loess) + } + } + + rm(slpClimDaily) + gc() + + slpClim2 <- InsertDim(slpClimLoess, 1, n.years) + + slpAnom <- slp - slpClim2 + + rm(slpClimLoess) + gc() + + if(psl == "psl") slpAnom <- slpAnom/100 # convert MSLP in Pascal to MSLP in hPa + + EU <- c(1:which(lon >= lon.max)[1], (which(lon >= 337)[1]:length(lon))) # restrict area to continental europe only + + my.brks.var <- c(-20,seq(-3,3,0.5),20) + my.cols.var <- colorRampPalette(rev(brewer.pal(11,"RdBu")))(length(my.brks.var)-1) # blue--white--red colors + + # same but for slp: + my.brks.var2 <- c(-100,seq(-21,-1,2),0,seq(1,21,2),100) + my.cols.var2 <- colorRampPalette(rev(brewer.pal(11,"RdBu")))(length(my.brks.var2)-1) # blue--red colors + my.cols.var2[floor(length(my.cols.var2)/2)] <- my.cols.var2[floor(length(my.cols.var2)/2)+1] <- "white" + + # save all monthly anomaly maps: + for(year.test in year.chosen){ + for(month.test in month.chosen){ + #year.test <- 2016; month.test <- 10 # for the debug + + pos.year.test <- year.test - year.start + 1 + + # wind anomaly for the chosen year and month: + sfcWindAnomPeriod <- sfcWindAnom[pos.year.test, pos.period(1,month.test),,] + + sfcWindAnomPeriodMean <- apply(sfcWindAnomPeriod, c(2,3), mean, na.rm=TRUE) + + # psl anomaly for the chosen year and month: + slpAnomPeriod <- slpAnom[pos.year.test,pos.period(1,month.test),,] + + slpAnomPeriodMean <- apply(slpAnomPeriod, c(2,3), mean, na.rm=TRUE) + + fileoutput <- paste0(rean.dir,"/",rean.name,"_",my.period[month.test],"_monthly_anomaly.png") + + png(filename=fileoutput,width=2800,height=1000) + + par(fig=c(0, 0.36, 0.08, 0.98), new=TRUE) + #PlotEquiMap(rescale(sfcWindAnomPeriodMean[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents=FALSE, brks=my.brks.var, cols=my.cols.var[2:(length(my.cols.var)-1)], title_scale=1.2, intxlon=10, intylat=10, drawleg=F) #, cex.lab=1.5) + + PlotEquiMap(sfcWindAnomPeriodMean[,EU], lon[EU], lat, filled.continents=FALSE, brks=my.brks.var, cols=my.cols.var, title_scale=1.2, intxlon=10, intylat=10, drawleg=F) #, cex.lab=1.5) + + #my.subset2 <- match(values.to.plot2, my.brks.var) + #legend2.cex <- 1.8 + + par(fig=c(0.03, 0.36, 0.015, 0.09), new=TRUE) + #ColorBar(my.brks.var, cols=my.cols.var, vert=FALSE, label_scale=1.8, triangle_ends=c(FALSE,FALSE)) #, subset=my.subset2) + #ColorBar(my.brks.var, cols=my.cols.var[2:(length(my.cols.var)-1)], vert=FALSE, label_scale=1.8, var_limits=c(-10,10), bar_limits=c(my.brks.var[1],my.brks.var[l(my.brks.var)]), col_inf=my.cols.var[1], col_sup=my.cols.var[length(my.cols.var)]) + + ColorBar(brks=my.brks.var[2:(l(my.brks.var)-1)], cols=my.cols.var[2:(l(my.cols.var)-1)], vert=FALSE, label_scale=1.8, bar_limits=c(my.brks.var[2],my.brks.var[l(my.brks.var)-1]), col_inf=my.cols.var[1], col_sup=my.cols.var[l(my.cols.var)], subsample=1) + + par(fig=c(0.34, 0.37, 0, 0.028), new=TRUE) + mtext("m/s", cex=1.8) + + par(fig=c(0.37, 1, 0.1, 0.98), new=TRUE) + + PlotEquiMap2(slpAnomPeriodMean, lon, lat, filled.continents=FALSE, continents.col="black", brks=my.brks.var2, brks2=my.brks.var2, cols=my.cols.var2, intxlon=10, intylat=10, drawleg=F, contours=t(slpAnomPeriodMean), contours.lty="F1FF1F", cex.lab=1) + + # you could almost use the normal PlotEquiMap funcion below, it only needs to set the black line for anomaly=0 and decrease the size of the contour labels: + # PlotEquiMap(rescale(slpAnomPeriodMean[,EU], my.brks.var2[1], tail(my.brks.var2,1)), lon[EU], lat, filled.continents=FALSE, brks=my.brks.var2, brks2=my.brks.var2, cols=my.cols.var2, intxlon=10, intylat=10, drawleg=F, contours=(slpAnomPeriodMean[,EU]), contour_lty="F1FF1F", contour_label_scale=10, title_scale=1.2) #, cex.lab=1.5) + + ##my.subset2 <- match(values.to.plot2, my.brks.var) + #legend2.cex <- 1.8 + + par(fig=c(0.37, 1, 0, 0.09), new=TRUE) + #ColorBar2(my.brks.var2, cols=my.cols.var2, vert=FALSE, my.ticks=-0.5 + 1:length(my.brks.var2), my.labels=my.brks.var2) #, subsampleg=1, label_scale=1.8) #, subset=my.subset2) + ColorBar(my.brks.var2[2:(l(my.brks.var2)-1)], cols=my.cols.var2[2:(l(my.cols.var2)-1)], vert=FALSE, label_scale=1.8, bar_limits=c(my.brks.var2[2],my.brks.var2[l(my.brks.var2)-1]), col_inf=my.cols.var2[1], col_sup=my.cols.var2[l(my.cols.var2)], subsample=1) #my.ticks=-0.5 + 1:length(my.brks.var2), my.labels=my.brks.var2) subset=my.subset2) + + par(fig=c(0.96, 0.99, 0, 0.027), new=TRUE) + mtext("hPa", cex=1.8) + + #par(fig=c(0.74, 0.76, 0, 0.027), new=TRUE) + #mtext("0", cex=1.8) + + dev.off() + + # same image formatted for the catalogue: + fileoutput2 <- paste0(rean.dir,"/",rean.name,"_",my.period[month.test],"_monthly_anomaly_fig2cat.png") + + system(paste0("~/scripts/fig2catalog.sh -s 0.8 -t '",rean.name," / 10m wind speed and sea level pressure / Monthly anomalies \n",month.name[month.test]," / ",year.test,"' -c 'Region: North Atlantic (27°N-81°N, 85.5°W-45°E)\nReference dataset: ",rean.name," reanalysis' ", fileoutput," ", fileoutput2)) + + + # mean wind speed and slp anomaly only during days of the chosen year: + load(paste0(rean.dir,"/",rean.name,"_",my.period[month.test],"_psl.RData")) # load cluster.sequence + + # arrange the regime sequence in an array, to separate months from years: + cluster2D <- array(cluster.sequence, c(n.days.in.a.period(month.test,1),n.years)) + #cluster2D <- array(my.cluster$cluster, c(n.days.in.a.period(p,1),n.years)) # only for NCEP + + cluster.test <- cluster2D[,pos.year.test] + + fre1.days <- length(which(cluster.test == 1)) + fre2.days <- length(which(cluster.test == 2)) + fre3.days <- length(which(cluster.test == 3)) + fre4.days <- length(which(cluster.test == 4)) + + # wind anomaly for the chosen year and month and cluster: + sfcWind1 <- sfcWindAnomPeriod[which(cluster.test == 1),,,drop=FALSE] + sfcWind2 <- sfcWindAnomPeriod[which(cluster.test == 2),,,drop=FALSE] + sfcWind3 <- sfcWindAnomPeriod[which(cluster.test == 3),,,drop=FALSE] + sfcWind4 <- sfcWindAnomPeriod[which(cluster.test == 4),,,drop=FALSE] + + # in case a regime has NO days in that month, we must set it to NA: + if(length(which(cluster.test == 1)) == 0 ) sfcWind1 <- array(NA, c(1,dim(sfcWindAnomPeriod)[2:3])) + if(length(which(cluster.test == 2)) == 0 ) sfcWind2 <- array(NA, c(1,dim(sfcWindAnomPeriod)[2:3])) + if(length(which(cluster.test == 3)) == 0 ) sfcWind3 <- array(NA, c(1,dim(sfcWindAnomPeriod)[2:3])) + if(length(which(cluster.test == 4)) == 0 ) sfcWind4 <- array(NA, c(1,dim(sfcWindAnomPeriod)[2:3])) + + sfcWindMean1 <- apply(sfcWind1, c(2,3), mean, na.rm=FALSE) + sfcWindMean2 <- apply(sfcWind2, c(2,3), mean, na.rm=FALSE) + sfcWindMean3 <- apply(sfcWind3, c(2,3), mean, na.rm=FALSE) + sfcWindMean4 <- apply(sfcWind4, c(2,3), mean, na.rm=FALSE) + + # load regime names: + load(paste0(rean.dir,"/",rean.name,"_", my.period[p],"_","ClusterNames",".RData")) + + ## add strip with daily sequence of WRs: + + mod.name1 <- substr(cluster1.name, nchar(cluster1.name), nchar(cluster1.name)) + mod.name2 <- substr(cluster2.name, nchar(cluster2.name), nchar(cluster2.name)) + mod.name3 <- substr(cluster3.name, nchar(cluster3.name), nchar(cluster3.name)) + mod.name4 <- substr(cluster4.name, nchar(cluster4.name), nchar(cluster4.name)) + + cluster1.name.short <- substr(cluster1.name,1,1) + cluster2.name.short <- substr(cluster2.name,1,1) + cluster3.name.short <- substr(cluster3.name,1,1) + cluster4.name.short <- substr(cluster4.name,1,1) + + ## add + or - at the end of the cluster name, if it is a NAO+ or NAO- regime: + if(mod.name1 == "+" || mod.name1 == "-") cluster1.name.short <- paste0(substr(cluster1.name,1,1), mod.name1) + if(mod.name2 == "+" || mod.name2 == "-") cluster2.name.short <- paste0(substr(cluster2.name,1,1), mod.name2) + if(mod.name3 == "+" || mod.name3 == "-") cluster3.name.short <- paste0(substr(cluster3.name,1,1), mod.name3) + if(mod.name4 == "+" || mod.name4 == "-") cluster4.name.short <- paste0(substr(cluster4.name,1,1), mod.name4) + + c1 <- which(cluster.test == 1) + c2 <- which(cluster.test == 2) + c3 <- which(cluster.test == 3) + c4 <- which(cluster.test == 4) + + cluster.test.letters <- cluster.test + cluster.test.letters[c1] <- cluster1.name.short + cluster.test.letters[c2] <- cluster2.name.short + cluster.test.letters[c3] <- cluster3.name.short + cluster.test.letters[c4] <- cluster4.name.short + + cluster.col <- cluster.test.letters + cluster.col[which(cluster.test.letters == "N+")] <- "Firebrick1" + cluster.col[which(cluster.test.letters == "N-")] <- "Dodgerblue1" + cluster.col[which(cluster.test.letters == "B")] <- "White" + cluster.col[which(cluster.test.letters == "A")] <- "Darkgoldenrod1" + + orden <- c("NAO+","NAO-","Blocking","Atl.Ridge") + cluster1 <- which(orden == cluster1.name) + cluster2 <- which(orden == cluster2.name) + cluster3 <- which(orden == cluster3.name) + cluster4 <- which(orden == cluster4.name) + + assign(paste0("fre.days",cluster1), fre1.days) + assign(paste0("fre.days",cluster2), fre2.days) + assign(paste0("fre.days",cluster3), fre3.days) + assign(paste0("fre.days",cluster4), fre4.days) + + assign(paste0("fre",cluster1), wr1y) + assign(paste0("fre",cluster2), wr2y) + assign(paste0("fre",cluster3), wr3y) + assign(paste0("fre",cluster4), wr4y) + + assign(paste0("imp.test",cluster1), sfcWindMean1) + assign(paste0("imp.test",cluster2), sfcWindMean2) + assign(paste0("imp.test",cluster3), sfcWindMean3) + assign(paste0("imp.test",cluster4), sfcWindMean4) + + # psl anomaly for the chosen year and month and cluster: + slp1 <- slpAnomPeriod[which(cluster.test == 1),,,drop=FALSE] + slp2 <- slpAnomPeriod[which(cluster.test == 2),,,drop=FALSE] + slp3 <- slpAnomPeriod[which(cluster.test == 3),,,drop=FALSE] + slp4 <- slpAnomPeriod[which(cluster.test == 4),,,drop=FALSE] + + # in case a regime has NO days in that month, we must set it to NA: + if(length(which(cluster.test == 1)) == 0 ) slp1 <- array(NA, c(1,dim(slpAnomPeriod)[2:3])) + if(length(which(cluster.test == 2)) == 0 ) slp2 <- array(NA, c(1,dim(slpAnomPeriod)[2:3])) + if(length(which(cluster.test == 3)) == 0 ) slp3 <- array(NA, c(1,dim(slpAnomPeriod)[2:3])) + if(length(which(cluster.test == 4)) == 0 ) slp4 <- array(NA, c(1,dim(slpAnomPeriod)[2:3])) + + slpMean1 <- apply(slp1, c(2,3), mean, na.rm=FALSE) + slpMean2 <- apply(slp2, c(2,3), mean, na.rm=FALSE) + slpMean3 <- apply(slp3, c(2,3), mean, na.rm=FALSE) + slpMean4 <- apply(slp4, c(2,3), mean, na.rm=FALSE) + + assign(paste0("psl.test",cluster1), slpMean1) + assign(paste0("psl.test",cluster2), slpMean2) + assign(paste0("psl.test",cluster3), slpMean3) + assign(paste0("psl.test",cluster4), slpMean4) + + # save strip with the daily regime series for chosen month and year: + fileoutput.seq <- paste0(rean.dir,"/",rean.name,"_",my.period[month.test],"_regimes_sequence.png") + png(filename=fileoutput.seq,width=1500,height=1850) + + plot.new() + + sep <- 0.03 + for(day in 1: n.days.in.a.period(p, 2001)){ + sep.cum <- (day-1)*sep + polygon(c(sep.cum + 0.01, sep.cum + 0.01 + sep, sep.cum + 0.01 + sep, sep.cum + 0.01), c(1.01, 1.01, 1.01+sep, 1.01+sep), border="black", col=cluster.col[day]) + text(sep.cum + 0.01 + sep/2, 0.997 + sep + 0.005, labels=day, cex=1.5) + text(sep.cum + 0.01 + sep/2, 1.013 + 0.005, labels=cluster.test.letters[day], cex=2) + + } + + dev.off() + + + + # save average impact and sea level pressure only for chosne month and year: + fileoutput.test <- paste0(rean.dir,"/",rean.name,"_",my.period[month.test],"_monthly_anomaly_regimes.png") + + png(filename=fileoutput.test,width=1500,height=2000) + + plot.new() + + par(fig=c(0, 0.33, 0.77, 0.97), new=TRUE) + PlotEquiMap2(imp.test1[,EU], lon[EU], lat, filled.continents=FALSE, continents.col="black", brks=my.brks.var, cols=my.cols.var, cex.lab=1.2, intxlon=10, intylat=10, drawleg=F) + par(fig=c(0, 0.33, 0.54, 0.74), new=TRUE) + PlotEquiMap2(imp.test2[,EU], lon[EU], lat, filled.continents=FALSE, continents.col="black", brks=my.brks.var, cols=my.cols.var, cex.lab=1.2, intxlon=10, intylat=10, drawleg=F) + par(fig=c(0, 0.33, 0.31, 0.51), new=TRUE) + PlotEquiMap2(imp.test3[,EU], lon[EU], lat, filled.continents=FALSE, continents.col="black", brks=my.brks.var, cols=my.cols.var, cex.lab=1.2, intxlon=10, intylat=10, drawleg=F) + par(fig=c(0, 0.33, 0.08, 0.28), new=TRUE) + PlotEquiMap2(imp.test4[,EU], lon[EU], lat, filled.continents=FALSE, continents.col="black", brks=my.brks.var, cols=my.cols.var, cex.lab=1.2, intxlon=10, intylat=10, drawleg=F) + + par(fig=c(0,0.33,0.955,0.975), new=TRUE) + mtext(paste0(orden[1], " wind speed anomaly "), font=2, cex=2) + par(fig=c(0,0.33,0.725,0.745), new=TRUE) + mtext(paste0(orden[2], " wind speed anomaly "), font=2, cex=2) + par(fig=c(0,0.33,0.495,0.515), new=TRUE) + mtext(paste0(orden[3], " wind speed anomaly "), font=2, cex=2) + par(fig=c(0,0.33,0.265,0.285), new=TRUE) + mtext(paste0(orden[4], " wind speed anomaly "), font=2, cex=2) + + par(fig=c(0.03, 0.33, 0.015, 0.06), new=TRUE) + #ColorBar(my.brks.var, cols=my.cols.var, vert=FALSE, label_scale=1.8, triangle_ends=c(FALSE,FALSE)) #, subset=my.subset2) + ColorBar(my.brks.var[2:(length(my.brks.var)-1)], cols=my.cols.var[2:(length(my.cols.var)-1)], vert=FALSE, label_scale=1.8, bar_limits=c(my.brks.var[2],my.brks.var[l(my.brks.var)-1]), col_inf=my.cols.var[1], col_sup=my.cols.var[l(my.cols.var)], subsample=1) #triangle_ends=c(T,T)) #, subset=my.subset2) + + par(fig=c(0.33, 0.34, 0.01, 0.044), new=TRUE) + mtext("m/s", cex=1.6) + + # right figures: + par(fig=c(0.34, 0.92, 0.77, 0.97), new=TRUE) + PlotEquiMap2(psl.test1, lon, lat, filled.continents=FALSE, continents.col="black", brks=my.brks.var2, brks2=my.brks.var2, cols=my.cols.var2, intxlon=10, intylat=10, drawleg=F, contours=t(psl.test1), contours.lty="F1FF1F", cex.lab=1, xlabel.dist=.5) + par(fig=c(0.34, 0.92, 0.54, 0.74), new=TRUE) + PlotEquiMap2(psl.test2, lon, lat, filled.continents=FALSE, continents.col="black", brks=my.brks.var2, brks2=my.brks.var2, cols=my.cols.var2, intxlon=10, intylat=10, drawleg=F, contours=t(psl.test2), contours.lty="F1FF1F", cex.lab=1, xlabel.dist=.5) + par(fig=c(0.34, 0.92, 0.31, 0.51), new=TRUE) + PlotEquiMap2(psl.test3, lon, lat, filled.continents=FALSE, continents.col="black", brks=my.brks.var2, brks2=my.brks.var2, cols=my.cols.var2, intxlon=10, intylat=10, drawleg=F, contours=t(psl.test3), contours.lty="F1FF1F", cex.lab=1, xlabel.dist=.5) + par(fig=c(0.34, 0.92, 0.08, 0.28), new=TRUE) + PlotEquiMap2(psl.test4, lon, lat, filled.continents=FALSE, continents.col="black", brks=my.brks.var2, brks2=my.brks.var2, cols=my.cols.var2, intxlon=10, intylat=10, drawleg=F, contours=t(psl.test4), contours.lty="F1FF1F", cex.lab=1, xlabel.dist=.5) + + par(fig=c(0.34,0.92,0.955,0.975), new=TRUE) + mtext(paste0(orden[1], " sea level pressure anomaly "), font=2, cex=2) + par(fig=c(0.34,0.92,0.725,0.745), new=TRUE) + mtext(paste0(orden[2], " sea level pressure anomaly "), font=2, cex=2) + par(fig=c(0.34,0.92,0.495,0.515), new=TRUE) + mtext(paste0(orden[3], " sea level pressure anomaly "), font=2, cex=2) + par(fig=c(0.34,0.92,0.265,0.285), new=TRUE) + mtext(paste0(orden[4], " sea level pressure anomaly "), font=2, cex=2) + + par(fig=c(0.34, 0.93, 0.015, 0.06), new=TRUE) + #ColorBar2(my.brks.var2, cols=my.cols.var2, vert=FALSE, my.ticks=-0.5 + 1:length(my.brks.var2), my.labels=my.brks.var2) #, subsampleg=1, label_scale=1.8) #, subset=my.subset2) + ColorBar(my.brks.var2[2:(length(my.brks.var2)-1)], cols=my.cols.var2[2:(length(my.cols.var2)-1)], vert=FALSE, label_scale=1.8, bar_limits=c(my.brks.var2[2],my.brks.var2[l(my.brks.var2)-1]), col_inf=my.cols.var2[1], col_sup=my.cols.var2[l(my.cols.var2)], subsample=1) + + par(fig=c(0.924, 0.930, 0.01, 0.044), new=TRUE) + mtext("hPa", cex=1.6) + + #par(fig=c(0.627, 0.647, 0, 0.028), new=TRUE) + #mtext("0", cex=1.8) + + n.days <- floor(n.days.in.a.period(month.test,1)) + + par(fig=c(0.93, 0.99, 0.77, 0.87), new=TRUE) + mtext(paste0(fre.days1," days\n(",round(100*fre.days1/n.days,1),"%)"), cex=2.8) + par(fig=c(0.93, 0.99, 0.54, 0.64), new=TRUE) + mtext(paste0(fre.days2," days\n(",round(100*fre.days2/n.days,1),"%)"), cex=2.8) + par(fig=c(0.93, 0.99, 0.31, 0.41), new=TRUE) + mtext(paste0(fre.days3," days\n(",round(100*fre.days3/n.days,1),"%)"), cex=2.8) + par(fig=c(0.93, 0.99, 0.08, 0.18), new=TRUE) + mtext(paste0(fre.days4," days\n(",round(100*fre.days4/n.days,1),"%)"), cex=2.8) + + + dev.off() + + + ## add the strip with the regime sequence over the average impact composition: + fileoutput.temp <- paste0(rean.dir,"/",rean.name,"_",my.period[month.test],"_temp.png") + fileoutput.both <- paste0(rean.dir,"/",rean.name,"_",my.period[month.test],"_temp2.png") + + system(paste0("convert ",fileoutput.seq," -crop +0-1730 +repage ",fileoutput.temp)) # cut the lower part of the strip + system(paste0("montage ",fileoutput.temp," ",fileoutput.test," -tile 1x2 -geometry +0+0 ",fileoutput.both)) + + + ## same image formatted for the catalogue: + fileoutput2.test <- paste0(rean.dir,"/",rean.name,"_",my.period[month.test],"_monthly_anomaly_regimes_fig2cat.png") + + system(paste0("~/scripts/fig2catalog.sh -s 0.8 -m 70 -r 40 -t '",rean.name," / 10m wind speed and sea level pressure / Monthly regime anomalies \n",month.name[month.test]," / ",year.test,"' -c 'Region: North Atlantic (27°N-81°N, 85.5°W-45°E)\nReference dataset: ",rean.name," reanalysis' ", fileoutput.both," ", fileoutput2.test)) + + system(paste0("rm ", fileoutput.temp, " ", fileoutput.both," ", fileoutput.seq," ", fileoutput.test)) + + + } # close for on year.test + } # close for on month.test + + +} # close for on monthly_anomalies + + + + + + + + + + diff --git a/old/weather_regimes_maps_v26.R~ b/old/weather_regimes_maps_v26.R~ new file mode 100644 index 0000000000000000000000000000000000000000..ab7c29b901d7574662fc52152a98a9e810f819bf --- /dev/null +++ b/old/weather_regimes_maps_v26.R~ @@ -0,0 +1,5793 @@ + +# Creation: 6/2016 +# Author: Nicola Cortesi +# +# Aim: to visualize the regime anomalies of the weather regimes, their interannual frequencies and their impact on a chosen cliamte variable (i.e: wind speed or temperature) +# +# I/O: input file needed are: +# 1) the output of the weather_regimes.R script, which ends with the suffix "_psl.RData". +# 2) if you need the impact map too, the outputs of the weather_regimes_impact.R script, which end wuth the suffix "_sfcWind.RData" and "_tas.RData" +# +# Output files are the maps, witch the user can generate as .png or as .pdf +# Due to the script's length, it is better to run it from the terminal with: Rscript ~/scripts/weather_regimes_maps.R +# This script doesn't need to be parallelized because it takes only a few seconds to create and save the output maps. +# +# Assumptions: You need to have created before the '_psl.RData' files which are the output of 'weather_regimes'.R, for each period you want to visualize. +# If your regimes derive from a reanalysis, this script must be run twice: +# first, only one month/season at time with: 'period=X', (X=1 .. 12), 'ordering = TRUE', 'save.names = TRUE', 'as.pdf = FALSE' and 'composition = "none" +# And inserting the regime names 'cluster.name1=...' in the correct order; in this first run, you only save the ordered cartography. +# You already have to know which is the right regime order by taking a look at the output maps (_clusterX.png) of weather_regimes.R +# After that, any time you run this script, remember to set: +# 'ordering = TRUE , 'save.names = FALSE' (and any type of composition you want to plot) not to overwrite the files which stores the right cluster order. +# You can check if the monthly regimes jave been associated correctly setting composition <- "psl.rean" +# +# For example, you can find that the sequence of the images in the file (from top to down) corresponds to: +# Blocking / NAO- / Atl.Ridge / NAO+ regime. Subsequently, you have to change 'ordering' +# from F to T (see below) and set the four variables 'clusterX.name' (X = 1...4) to follow the same order found inside that file. +# Then, you have to run this script only to get the maps in the right order, which by default is, from top to down: +# "NAO+","NAO-","Blocking" and "Atl.Ridge" (you can change it with the variable 'orden'). You also have to set 'save.names' to TRUE, so an .RData file +# is written to store the order of the four regimes, in case you need to visualize these maps again in future or create new ones based on the saved data. +# +# If your regimes derive from a forecast system, you have to compute before the regimes derived from a reanalysis. Then, you can associate the reanalysis +# to the forecast system, to employ it to automatically associate to each simulated cluster one of the +# observed regimes, the one with the highest spatial correlation. Beware that the two maps of regime anomalies must have the same spatial resolution! +# +# Branch: weather_regimes + +rm(list=ls()) +library(s2dverification) # for the function Load() +library(Kendall) # for the MannKendall test +#library("corrplot") + +source('/home/Earth/ncortesi/scripts/Rfunctions.R') # for the calendar functions + +### set the directory where the weather regimes computed with a reanalysis are stored (xxxxx_psl.RData files): + +#rean.dir <- "/scratch/Earth/ncortesi/RESILIENCE/Regimes/18) as 12) but with no lat correction" +#rean.dir <- "/scratch/Earth/ncortesi/RESILIENCE/Regimes/18_as_12_but_with_no_lat_correction" +#rean.dir <- "/scratch/Earth/ncortesi/RESILIENCE/Regimes/41_ERA-Interim_monthly_1981-2015_LOESS_filter" +#rean.dir <- "/scratch/Earth/ncortesi/RESILIENCE/Regimes/43_as_41_but_with_running_cluster_and_lat_correction" +#rean.dir <- "/scratch/Earth/ncortesi/RESILIENCE/Regimes/45_as_43_but_with_Z500" +#rean.dir <- "/scratch/Earth/ncortesi/RESILIENCE/Regimes/44_as_43_but_with_no_lat_correction" +#rean.dir <- "/scratch/Earth/ncortesi/RESILIENCE/Regimes/50_NCEP_monthly_1981-2016_LOESS_filter_lat_corr" +#rean.dir <- "/scratch/Earth/ncortesi/RESILIENCE/Regimes/52_JRA55_monthly_1981-2016_LOESS_filter_lat_corr" +#rean.dir <- "/scratch/Earth/ncortesi/RESILIENCE/Regimes/53_JRA55_seasonal_1981-2016_LOESS_filter_lat_corr" +#rean.dir <- "/scratch/Earth/ncortesi/RESILIENCE/Regimes/54_JRA55_monthly_1981-2016_LOESS_filter_lat_corr_running" +rean.dir <- "/scratch/Earth/ncortesi/RESILIENCE/Regimes/55_JRA55_monthly_1981-2016_LOESS_filter_lat_corr_running_15days" + +rean.name <- "JRA-55" #"JRA-55" #"NCEP" #"ERA-Interim" # reanalysis name (if input data comes from a reanalysis) + +forecast.name <- "ECMWF-S4" # forecast system name (if input data comes from a forecast system) + +# Choose between loading reanalysis ('rean.name') or forecasts ('forecast.name'): +fields.name <- rean.name #forecast.name + +composition <- "psl.rean.unordered" # choose which kind of composition you want to plot: + # 'none' doesn't plot anything, it only saves the ClusterName.Rdata files for each period selected, overwriting the eventual pre-existing ones + # 'simple' for monthly graphs of regime anomalies / impact / interannual frequencies in three columns + # 'psl' for all the regime anomalies for a fixed forecast month + # 'fre' for all the interannual frequencies for a fixed forecast month + # 'impact' for all the impact maps for a fixed forecast month and the variable specified in var.num + # 'summary' for the summary plots of all forecast months (persistence bias, freq.bias, correlations, etc.) [You need all ClusterNames files] + # 'impact.highest' for all the impact plot of the regime with the highest impact + # 'single.impact' to save the four impact maps in a composition 2x2 + # 'single.psl' to save the individual psl map + # 'single.fre' to save the individual fre map + # 'taylor' plot the taylor's diagram between the regime anomalies of a monthly reanalaysis and a seasonal one + # 'edpr' as 'simple', but swapping the position of the regime anomalie maps with that of the impact maps + # 'psl.rean' for all the regime anomalies for all months of a reanalysis + # 'psl.rean.unordered': as before, but without ordering clusters + # 'corr.matrix' : as before, but plot the correlation matrix only + +var.num <- 1 # if fields.name ='rean.name' and composition ='simple' or 'edpr', Choose a variable for the impact maps 1: sfcWind 2: tas + +mean.freq <- TRUE # if TRUE, when composition <- "simple" o 'edpr', it also add the mean frequency value over each frequency plots + +# Choose one or more periods to plot from 1 to 17 (1-12: Jan - Dec, 13-16: Winter, Spring, Summer, Autumn, 17: Year). +period <- 1:12 + +# Associates the regimes to the four cluster: +cluster1.name <- "NAO+" +cluster2.name <- "NAO-" +cluster3.name <- "Blocking" +cluster4.name <- "Atl.Ridge" + +# choose an order to give to the cartograpy, from top to bottom: +orden <- c("NAO+","NAO-","Blocking","Atl.Ridge") + +####### if monthly_anomalies <- TRUE, you have to specify these additional parameters: +monthly_anomalies <- FALSE # if TRUE, also save maps with with monthly wind speed and slp anomalies for the chosen years and months set below over Europe +year.chosen <- 2016 +month.chosen <- 1:12 +NCEP <- '/esnas/recon/noaa/ncep-reanalysis/$STORE_FREQ$_mean/$VAR_NAME$_f6h/$VAR_NAME$_$YEAR$$MONTH$.nc' +JRA55 <- '/esnas/recon/jma/jra55/$STORE_FREQ$_mean/$VAR_NAME$_f6h/$VAR_NAME$_$YEAR$$MONTH$.nc' +ERAint <- '/esarchive/old-files/recon_ecmwf_erainterim/$STORE_FREQ$_mean/$VAR_NAME$_f6h/$VAR_NAME$_$YEAR$$MONTH$.nc' +rean.data <- JRA55 # choose one of the above reanalysis + +####### if fields.name='forecast.name', you have to specify these additional variables: + +# working dir with the input files with '_psl.RData' suffix: +work.dir <- "/scratch/Earth/ncortesi/RESILIENCE/Regimes/42_ECMWF_S4_monthly_1981-2015_LOESS_filter" + +lead.months <- 0 #0:6 # in case you selected 'fields.name = forecast.name', specify a leadtime (0 = same month of the start month) to plot + # (and variable 'period' becomes the start month) +forecast.month <- 1 # in case composition = 'psl' or 'fre' or 'impact', choose a target month to forecast, from 1 to 12, to plot its composition + # if you want to plot all compositions from 1 to 12 at once, just enable the line below and add a { at the end of the script + # DO NOT run the loop below when composition="summary" or "taylor", because it will modify the data in the ClusterName.RData files!!! +#for(forecast.month in 1:12){ + +####### Derived variables ############################################################################################################################################### + +ordering <- T # If TRUE, order the clusters following the regimes listed in the 'orden' vector (set it to TRUE only after you manually inserted the names below). +save.names <- F # if TRUE, also save the cluster names (i.e: the association between clusters and weather regimes); if FALSE, the cluster names are loaded from a file +as.pdf <- F # FALSE: save results as one .png file for each season/month, TRUE: save only one .pdf file with all seasons/months + +if(fields.name != rean.name && fields.name != forecast.name) stop("variable 'fields.name' is not properly set") +regime1.name <- orden[1] +regime2.name <- orden[2] +regime3.name <- orden[3] +regime4.name <- orden[4] + +var.name <- c("sfcWind","tas") +var.name.full <- c("wind speed","temperature") # full name of the 'predictand' variable to put in the title of the graphs +var.unit <- c("m/s", "ºC") # unit of measure of var (for drawing color scales) + +var.num.orig <- var.num # loading _psl files, this value can change! +fields.name.orig <- fields.name +period.orig <- period +work.dir.orig <- work.dir +pdf.suffix <- ifelse(length(period)==1, my.period[period], "all_periods") + +n.map <- 0 +################################################################################################################################################################### + +if(composition != "summary" && composition != "impact.highest" && composition != "taylor"){ + + if(composition == "psl" || composition == "fre" || composition == "impact") { + if(as.pdf && fields.name == forecast.name) pdf(file=paste0(work.dir,"/",fields.name,"_",pdf.suffix,"_forecast_month_",forecast.month,"_",composition,".pdf"),width=110,height=60) + if(!as.pdf && fields.name == forecast.name) png(filename=paste0(work.dir,"/",fields.name,"_forecast_month_",forecast.month,"_",composition,".png"),width=6000,height=2300) + + lead.months <- c(6:0,0) + plot.new() + + # Sheet title: + par(fig=c(0, 1, 0.94, 0.98), new=TRUE) + if(composition == "psl") mtext(paste0(fields.name, " simulated Weather Regimes for ", month.name[forecast.month]), cex=5.5, font=2) + if(composition == "fre") mtext(paste0(fields.name, " simulated interannual frequencies for ", month.name[forecast.month]), cex=5.5, font=2) + if(composition == "impact") mtext(paste0(fields.name, " simulated ",var.name.full[var.num], " impact for ", month.name[forecast.month]), cex=5.5, font=2) + + } # close if on composition == "psl" ... + + if(composition == "none") { + ordering = TRUE + save.names = TRUE + as.pdf = FALSE + } + + if(composition == "psl.rean") { + period <- c(9:12, 1:8) # to start from September instead of January + png(filename=paste0(rean.dir,"/",fields.name,"_reanalysis_all_months.png"),width=6000,height=2000) + plot.new() + } + + if(composition == "psl.rean.unordered") { + ordering=FALSE + period <- c(9:12, 1:8) # to start from September instead of January + png(filename=paste0(rean.dir,"/",fields.name,"_reanalysis_all_months_unordered.png"),width=6000,height=2000) + plot.new() + } + + if(composition == "corr.matrix") { + ordering=FALSE + period <- c(9:12, 1:8) # to start from September instead of January + png(filename=paste0(rean.dir,"/",fields.name,"_reanalysis_all_months_corr_matrix.png"),width=6000,height=2000) + plot.new() + } + + + if(fields.name == rean.name) { lead.month <- 1; lead.months <- 1 } # just to be able to enter the loop below once! + + for(lead.month in lead.months){ + #lead.month=lead.months[1] # for the debug + + if(composition == "simple" || composition == 'edpr'){ + if(as.pdf && fields.name == rean.name) pdf(file=paste0(rean.dir,"/",fields.name,"_",var.name[var.num],"_",pdf.suffix,".pdf"),width=40, height=60) + if(as.pdf && fields.name == forecast.name) pdf(file=paste0(work.dir,"/",fields.name,"_",var.name[var.num],"_",pdf.suffix,"_leadmonth_",lead.month,".pdf"),width=40,height=60) + } + + if(composition == 'psl' || composition == 'fre' || composition == 'impact') { + period <- forecast.month - lead.month + if(period < 1) period <- period + 12 + } + + impact.data <- FALSE + for(p in period){ + #p <- period[1] # for the debug + p.orig <- p + + if(fields.name == rean.name) print(paste("p =",p)) + if(fields.name == forecast.name) print(paste("p =",p,"lead.month =", lead.month)) + + # load regime data (for forecasts, it load only the data corresponding to the chosen start month p and lead month 'lead.month') + if(fields.name == rean.name || (fields.name == forecast.name && n.map == 7)) { + load(file=paste0(rean.dir,"/",rean.name,"_",my.period[p],"_","psl",".RData")) + if(var.num != var.num.orig) var.num <- var.num.orig # ripristinate original values of this script (necessary after loading regime data) + if(fields.name != fields.name.orig) fields.name <- fields.name.orig # ripristinate original values of this script + if(work.dir != work.dir.orig) work.dir <- work.dir.orig # ripristinate original values of this script + + # load impact data only if it is available, if not it will leave an empty space in the composition: + if(file.exists(paste0(rean.dir,"/",rean.name,"_",my.period[p],"_",var.name[var.num],".RData"))){ + impact.data <- TRUE + print("Impact data available for reanalysis") + load(file=paste0(rean.dir,"/",rean.name,"_",my.period[p],"_",var.name[var.num],".RData")) + } else { + impact.data <- FALSE + print("Impact data for reanalysis not available") + } + + } + + if(fields.name == forecast.name && n.map < 7){ + load(file=paste0(work.dir,"/",forecast.name,"_",my.period[p],"_leadmonth_",lead.month,"_","psl",".RData")) # load cluster time series and mean psl data + + if(file.exists(paste0(work.dir,"/",forecast.name,"_",my.period[p],"_leadmonth_",lead.month,"_",var.name[var.num],".RData"))){ + impact.data <- TRUE + print("Impact data available for forecasts") + if((composition == "simple" || composition == "impact")) load(file=paste0(work.dir,"/",forecast.name,"_",my.period[p],"_leadmonth_",lead.month,"_",var.name[var.num],".RData")) # load mean var data for impact maps + } else { + impact.data <- FALSE + print("Impact data for forecasts not available") + } + + if(var.num != var.num.orig) var.num <- var.num.orig # ripristinate original values of this script (necessary after loading regime data) + if(fields.name != fields.name.orig) fields.name <- fields.name.orig # ripristinate original values of this script + + } + + if(var.num != var.num.orig) var.num <- var.num.orig # ripristinate original values of this script (necessary after loading regime data) + if(fields.name != fields.name.orig) fields.name <- fields.name.orig # ripristinate original values of this script + if(work.dir != work.dir.orig) work.dir <- work.dir.orig # ripristinate original values of this script + if(p != p.orig) p <- p.orig + + if(fields.name == forecast.name && n.map < 7){ + + # compute the simulated interannual monthly variability: + n.days.month <- dim(my.cluster.array[[p]])[1] # number of days in the forecasted month + + freq.sim1 <- apply(my.cluster.array[[p]] == 1, c(2,3), sum) + freq.sim2 <- apply(my.cluster.array[[p]] == 2, c(2,3), sum) + freq.sim3 <- apply(my.cluster.array[[p]] == 3, c(2,3), sum) + freq.sim4 <- apply(my.cluster.array[[p]] == 4, c(2,3), sum) + + sd.freq.sim1 <- sd(freq.sim1) / n.days.month + sd.freq.sim2 <- sd(freq.sim2) / n.days.month + sd.freq.sim3 <- sd(freq.sim3) / n.days.month + sd.freq.sim4 <- sd(freq.sim4) / n.days.month + + # compute the transition probabilities: + j1 <- j2 <- j3 <- j4 <- 1 + transition1 <- transition2 <- transition3 <- transition4 <- c() + + n.members <- dim(my.cluster.array[[p]])[3] + + for(m in 1:n.members){ + for(y in 1:n.years){ + for (d in 1:(n.days.month-1)){ + + if (my.cluster.array[[p]][d,y,m] == 1 && (my.cluster.array[[p]][d + 1,y,m] != 1)){ + transition1[j1] <- my.cluster.array[[p]][d + 1,y,m] + j1 <- j1 + 1 + } + if (my.cluster.array[[p]][d,y,m] == 2 && (my.cluster.array[[p]][d + 1,y,m] != 2)){ + transition2[j2] <- my.cluster.array[[p]][d + 1,y,m] + j2 <- j2 + 1 + } + if (my.cluster.array[[p]][d,y,m] == 3 && (my.cluster.array[[p]][d + 1,y,m] != 3)){ + transition3[j3] <- my.cluster.array[[p]][d + 1,y,m] + j3 <- j3 +1 + } + if (my.cluster.array[[p]][d,y,m] == 4 && (my.cluster.array[[p]][d + 1,y,m] != 4)){ + transition4[j4] <- my.cluster.array[[p]][d + 1,y,m] + j4 <- j4 +1 + } + + } + } + } + + trans.sim <- matrix(NA,4,4 , dimnames= list(c("cluster1.sim", "cluster2.sim", "cluster3.sim", "cluster4.sim"),c("cluster1.sim","cluster2.sim","cluster3.sim","cluster4.sim"))) + trans.sim[1,2] <- length(which(transition1 == 2)) + trans.sim[1,3] <- length(which(transition1 == 3)) + trans.sim[1,4] <- length(which(transition1 == 4)) + + trans.sim[2,1] <- length(which(transition2 == 1)) + trans.sim[2,3] <- length(which(transition2 == 3)) + trans.sim[2,4] <- length(which(transition2 == 4)) + + trans.sim[3,1] <- length(which(transition3 == 1)) + trans.sim[3,2] <- length(which(transition3 == 2)) + trans.sim[3,4] <- length(which(transition3 == 4)) + + trans.sim[4,1] <- length(which(transition4 == 1)) + trans.sim[4,2] <- length(which(transition4 == 2)) + trans.sim[4,3] <- length(which(transition4 == 3)) + + trans.tot <- apply(trans.sim,1,sum,na.rm=T) + for(i in 1:4) trans.sim[i,] <- trans.sim[i,]/trans.tot[i] + + + # compute cluster persistence: + my.cluster.vector <- as.vector(my.cluster.array[[p]]) # reduce it to a vector with the order: day1month1member1 , day2month1member1, ... , day1month2member1, day2month2member1, ,,, + + sequ1 <- sequ2 <- sequ3 <- sequ4 <- rep(0,10000) + i1 <- i2 <- i3 <- i4 <- 1 + + if(my.cluster.vector[1] != 1) i1 <- 0 + if(my.cluster.vector[1] == 1) sequ1[i1] <- sequ1[i1]+1 + if(my.cluster.vector[1] != 2) i2 <- 0 + if(my.cluster.vector[1] == 2) sequ2[i2] <- sequ2[i2]+1 + if(my.cluster.vector[1] != 3) i3 <- 0 + if(my.cluster.vector[1] == 3) sequ3[i3] <- sequ3[i3]+1 + if(my.cluster.vector[1] != 4) i4 <- 0 + if(my.cluster.vector[1] == 4) sequ4[i4] <- sequ4[i4]+1 + n.dd <- length(my.cluster.vector) + + for(d in 1:(n.dd-1)){ + if(my.cluster.vector[d] != 1 && my.cluster.vector[d+1] == 1) i1 <- i1+1 + if(my.cluster.vector[d] == 1) sequ1[i1] <- sequ1[i1]+1 + + if(my.cluster.vector[d] != 2 && my.cluster.vector[d+1] == 2) i2 <- i2+1 + if(my.cluster.vector[d] == 2) sequ2[i2] <- sequ2[i2]+1 + + if(my.cluster.vector[d] != 3 && my.cluster.vector[d+1] == 3) i3 <- i3+1 + if(my.cluster.vector[d] == 3) sequ3[i3] <- sequ3[i3]+1 + + if(my.cluster.vector[d] != 4 && my.cluster.vector[d+1] == 4) i4 <- i4+1 + if(my.cluster.vector[d] == 4) sequ4[i4] <- sequ4[i4]+1 + } + + if(my.cluster.vector[n.dd] == 1) sequ1[i1] <- sequ1[i1]+1 + if(my.cluster.vector[n.dd] == 2) sequ2[i2] <- sequ2[i2]+1 + if(my.cluster.vector[n.dd] == 3) sequ3[i3] <- sequ3[i3]+1 + if(my.cluster.vector[n.dd] == 4) sequ4[i4] <- sequ4[i4]+1 + + pers1 <- mean(sequ1[which(sequ1 != 0)]) + pers2 <- mean(sequ2[which(sequ2 != 0)]) + pers3 <- mean(sequ3[which(sequ3 != 0)]) + pers4 <- mean(sequ4[which(sequ4 != 0)]) + + # compute correlations between simulated clusters and observed regime's anomalies: + cluster1.sim <- pslwr1mean; cluster2.sim <- pslwr2mean; cluster3.sim <- pslwr3mean; cluster4.sim <- pslwr4mean + + if(composition == 'impact' && impact.data == TRUE) { + cluster1.sim.imp <- varwr1mean; cluster2.sim.imp <- varwr2mean; cluster3.sim.imp <- varwr3mean; cluster4.sim.imp <- varwr4mean + #cluster1.sim.imp.pv <- pvalue1; cluster2.sim.imp.pv <- pvalue2; cluster3.sim.imp.pv <- pvalue3; cluster4.sim.imp.pv <- pvalue4 + } + + lat.forecast <- lat; lon.forecast <- lon + + if((p + lead.month) > 12) { load.month <- p + lead.month - 12 } else { load.month <- p + lead.month } # reanalysis forecast month to load + load(file=paste0(rean.dir,"/",rean.name,"_",my.period[load.month],"_","psl",".RData")) # load reanalysis data, which overwrities the pslwrXmean variables + load(paste0(rean.dir,"/",rean.name,"_", my.period[load.month],"_","ClusterNames",".RData")) # load also reanalysis regime names + + # load reanalysis varwrXmean impact data: + if(composition == 'impact' && impact.data == TRUE) load(file=paste0(rean.dir,"/",rean.name,"_",my.period[load.month],"_",var.name[var.num],".RData")) + + if(var.num != var.num.orig) var.num <- var.num.orig # ripristinate original values of this script + if(fields.name != fields.name.orig) fields.name <- fields.name.orig # ripristinate original values of this script + if(!identical(period.orig,period)) period <- period.orig + if(work.dir != work.dir.orig) work.dir <- work.dir.orig # ripristinate original values of this script + if(p.orig != p) p <- p.orig + + # compute the observed transition probabilities: + n.days.month <- n.days.in.a.period(load.month,2001) + j1o <- j2o <- j3o <- j4o <- 1; transition.obs1 <- transition.obs2 <- transition.obs3 <- transition.obs4 <- c() + + for (d in 1:(length(my.cluster$cluster)-1)){ + if (my.cluster$cluster[d] == 1 && (my.cluster$cluster[d + 1] != my.cluster$cluster[d])){ + transition.obs1[j1o] <- my.cluster$cluster[d + 1] + j1o <- j1o + 1 + } + if (my.cluster$cluster[d] == 2 && (my.cluster$cluster[d + 1] != my.cluster$cluster[d])){ + transition.obs2[j2o] <- my.cluster$cluster[d + 1] + j2o <- j2o + 1 + } + if (my.cluster$cluster[d] == 3 && (my.cluster$cluster[d + 1] != my.cluster$cluster[d])){ + transition.obs3[j3o] <- my.cluster$cluster[d + 1] + j3o <- j3o + 1 + } + if (my.cluster$cluster[d] == 4 && (my.cluster$cluster[d + 1] != my.cluster$cluster[d])){ + transition.obs4[j4o] <- my.cluster$cluster[d + 1] + j4o <- j4o +1 + } + + } + + trans.obs <- matrix(NA,4,4 , dimnames= list(c("cluster1.obs", "cluster2.obs", "cluster3.obs", "cluster4.obs"),c("cluster1.obs","cluster2.obs","cluster3.obs","cluster4.obs"))) + trans.obs[1,2] <- length(which(transition.obs1 == 2)) + trans.obs[1,3] <- length(which(transition.obs1 == 3)) + trans.obs[1,4] <- length(which(transition.obs1 == 4)) + + trans.obs[2,1] <- length(which(transition.obs2 == 1)) + trans.obs[2,3] <- length(which(transition.obs2 == 3)) + trans.obs[2,4] <- length(which(transition.obs2 == 4)) + + trans.obs[3,1] <- length(which(transition.obs3 == 1)) + trans.obs[3,2] <- length(which(transition.obs3 == 2)) + trans.obs[3,4] <- length(which(transition.obs3 == 4)) + + trans.obs[4,1] <- length(which(transition.obs4 == 1)) + trans.obs[4,2] <- length(which(transition.obs4 == 2)) + trans.obs[4,3] <- length(which(transition.obs4 == 3)) + + trans.tot.obs <- apply(trans.obs,1,sum,na.rm=T) + for(i in 1:4) trans.obs[i,] <- trans.obs[i,]/trans.tot.obs[i] + + # compute the observed interannual monthly variability: + freq.obs1 <- freq.obs2 <- freq.obs3 <- freq.obs4 <- c() + + for(y in year.start:year.end){ + # for both reanalysis and S4, the time order is always: year1day1, year1day2, ..., year1day31, year2day1, year2day2, ..., year2day28, etc, + freq.obs1[y-year.start+1] <- length(which(my.cluster$cluster[(y-year.start)*n.days.month + (1:n.days.month)] == 1)) + freq.obs2[y-year.start+1] <- length(which(my.cluster$cluster[(y-year.start)*n.days.month + (1:n.days.month)] == 2)) + freq.obs3[y-year.start+1] <- length(which(my.cluster$cluster[(y-year.start)*n.days.month + (1:n.days.month)] == 3)) + freq.obs4[y-year.start+1] <- length(which(my.cluster$cluster[(y-year.start)*n.days.month + (1:n.days.month)] == 4)) + } + + sd.freq.obs1 <- sd(freq.obs1) / n.days.month + sd.freq.obs2 <- sd(freq.obs2) / n.days.month + sd.freq.obs3 <- sd(freq.obs3) / n.days.month + sd.freq.obs4 <- sd(freq.obs4) / n.days.month + + wr1y.obs <- wr1y; wr2y.obs <- wr2y; wr3y.obs <- wr3y; wr4y.obs <- wr4y # observed frecuencies of the regimes + + regimes.obs <- c(cluster1.name, cluster2.name, cluster3.name, cluster4.name) + + if(length(unique(regimes.obs)) < 4) stop("Two or more names of the weather types in the reanalsyis input file are repeated!") + + if(identical(rev(lat),lat.forecast)) {lat.forecast <- rev(lat.forecast); print("Warning: forecast latitudes are in the opposite order than renalysis's")} + if(!identical(lat, lat.forecast)) stop("forecast latitudes are different from reanalysis latitudes!") + if(!identical(lon, lon.forecast)) stop("forecast longitude are different from reanalysis longitudes!") + + cluster.corr <- array(NA, c(4,4), dimnames=list(c("cluster1.sim","cluster2.sim","cluster3.sim","cluster4.sim"), regimes.obs)) + cluster.corr[1,1] <- cor(as.vector(cluster1.sim), as.vector(pslwr1mean)) + cluster.corr[1,2] <- cor(as.vector(cluster1.sim), as.vector(pslwr2mean)) + cluster.corr[1,3] <- cor(as.vector(cluster1.sim), as.vector(pslwr3mean)) + cluster.corr[1,4] <- cor(as.vector(cluster1.sim), as.vector(pslwr4mean)) + cluster.corr[2,1] <- cor(as.vector(cluster2.sim), as.vector(pslwr1mean)) + cluster.corr[2,2] <- cor(as.vector(cluster2.sim), as.vector(pslwr2mean)) + cluster.corr[2,3] <- cor(as.vector(cluster2.sim), as.vector(pslwr3mean)) + cluster.corr[2,4] <- cor(as.vector(cluster2.sim), as.vector(pslwr4mean)) + cluster.corr[3,1] <- cor(as.vector(cluster3.sim), as.vector(pslwr1mean)) + cluster.corr[3,2] <- cor(as.vector(cluster3.sim), as.vector(pslwr2mean)) + cluster.corr[3,3] <- cor(as.vector(cluster3.sim), as.vector(pslwr3mean)) + cluster.corr[3,4] <- cor(as.vector(cluster3.sim), as.vector(pslwr4mean)) + cluster.corr[4,1] <- cor(as.vector(cluster4.sim), as.vector(pslwr1mean)) + cluster.corr[4,2] <- cor(as.vector(cluster4.sim), as.vector(pslwr2mean)) + cluster.corr[4,3] <- cor(as.vector(cluster4.sim), as.vector(pslwr3mean)) + cluster.corr[4,4] <- cor(as.vector(cluster4.sim), as.vector(pslwr4mean)) + + # associate each simulated cluster to one observed regime: + max1 <- which(cluster.corr == max(cluster.corr), arr.ind=T) + cluster.corr2 <- cluster.corr + cluster.corr2[max1[1],] <- -999 # set this row to negative values so it won't be found as a maximum anymore + cluster.corr2[,max1[2]] <- -999 # set this column to negative values so it won't be found as a maximum anymore + max2 <- which(cluster.corr2 == max(cluster.corr2), arr.ind=T) + cluster.corr3 <- cluster.corr2 + cluster.corr3[max2[1],] <- -999 + cluster.corr3[,max2[2]] <- -999 + max3 <- which(cluster.corr3 == max(cluster.corr3), arr.ind=T) + cluster.corr4 <- cluster.corr3 + cluster.corr4[max3[1],] <- -999 + cluster.corr4[,max3[2]] <- -999 + max4 <- which(cluster.corr4 == max(cluster.corr4), arr.ind=T) + + seq.max1 <- c(max1[1],max2[1],max3[1],max4[1]) + seq.max2 <- c(max1[2],max2[2],max3[2],max4[2]) + + cluster1.regime <- seq.max2[which(seq.max1 == 1)] + cluster2.regime <- seq.max2[which(seq.max1 == 2)] + cluster3.regime <- seq.max2[which(seq.max1 == 3)] + cluster4.regime <- seq.max2[which(seq.max1 == 4)] + + cluster1.name <- regimes.obs[cluster1.regime] + cluster2.name <- regimes.obs[cluster2.regime] + cluster3.name <- regimes.obs[cluster3.regime] + cluster4.name <- regimes.obs[cluster4.regime] + + regimes.sim <- c(cluster1.name, cluster2.name, cluster3.name, cluster4.name) + cluster.corr2 <- array(cluster.corr, c(4,4), dimnames=list(regimes.sim, regimes.obs)) + + spat.cor1 <- cluster.corr[1,seq.max2[which(seq.max1 == 1)]] + spat.cor2 <- cluster.corr[2,seq.max2[which(seq.max1 == 2)]] + spat.cor3 <- cluster.corr[3,seq.max2[which(seq.max1 == 3)]] + spat.cor4 <- cluster.corr[4,seq.max2[which(seq.max1 == 4)]] + + # measure persistence for the reanalysis dataset: + my.cluster.vector <- my.cluster[[1]] + + sequ1 <- sequ2 <- sequ3 <- sequ4 <- rep(0,10000) + i1 <- i2 <- i3 <- i4 <- 1 + + if(my.cluster.vector[1] != 1) i1 <- 0 + if(my.cluster.vector[1] == 1) sequ1[i1] <- sequ1[i1]+1 + if(my.cluster.vector[1] != 2) i2 <- 0 + if(my.cluster.vector[1] == 2) sequ2[i2] <- sequ2[i2]+1 + if(my.cluster.vector[1] != 3) i3 <- 0 + if(my.cluster.vector[1] == 3) sequ3[i3] <- sequ3[i3]+1 + if(my.cluster.vector[1] != 4) i4 <- 0 + if(my.cluster.vector[1] == 4) sequ4[i4] <- sequ4[i4]+1 + + n.dd <- length(my.cluster.vector) + for(d in 1:(n.dd-1)){ + if(my.cluster.vector[d] != 1 && my.cluster.vector[d+1] == 1) i1 <- i1+1 + if(my.cluster.vector[d] == 1) sequ1[i1] <- sequ1[i1]+1 + + if(my.cluster.vector[d] != 2 && my.cluster.vector[d+1] == 2) i2 <- i2+1 + if(my.cluster.vector[d] == 2) sequ2[i2] <- sequ2[i2]+1 + + if(my.cluster.vector[d] != 3 && my.cluster.vector[d+1] == 3) i3 <- i3+1 + if(my.cluster.vector[d] == 3) sequ3[i3] <- sequ3[i3]+1 + + if(my.cluster.vector[d] != 4 && my.cluster.vector[d+1] == 4) i4 <- i4+1 + if(my.cluster.vector[d] == 4) sequ4[i4] <- sequ4[i4]+1 + } + + if(my.cluster.vector[n.dd] == 1) sequ1[i1] <- sequ1[i1]+1 + if(my.cluster.vector[n.dd] == 2) sequ2[i2] <- sequ2[i2]+1 + if(my.cluster.vector[n.dd] == 3) sequ3[i3] <- sequ3[i3]+1 + if(my.cluster.vector[n.dd] == 4) sequ4[i4] <- sequ4[i4]+1 + + persObs1 <- mean(sequ1[which(sequ1 != 0)]) + persObs2 <- mean(sequ2[which(sequ2 != 0)]) + persObs3 <- mean(sequ3[which(sequ3 != 0)]) + persObs4 <- mean(sequ4[which(sequ4 != 0)]) + + ### insert here all the manual corrections to the regime assignation due to misasignment in the automatic selection: + ### forecast month: September + #if(p == 9 && lead.month == 0) { cluster1.name <- "Blocking"; cluster2.name <- "Atl.Ridge"; cluster3.name <- "NAO-"; cluster4.name <- "NAO+"} + #if(p == 8 && lead.month == 1) { cluster1.name <- "NAO+"; cluster2.name <- "NAO-"; cluster3.name <- "Blocking"; cluster4.name <- "Atl.Ridge"} + #if(p == 6 && lead.month == 3) { cluster1.name <- "Atl.Ridge"; cluster2.name <- "NAO+"} + #if(p == 4 && lead.month == 5) { cluster1.name <- "Blocking"; cluster2.name <- "NAO+"; cluster3.name <- "Atl.Ridge"; cluster4.name <- "NAO-"} + + if(p == 9 && lead.month == 0) { cluster1.name <- "Blocking"; cluster2.name <- "Atl.Ridge"; cluster3.name <- "NAO-"; cluster4.name <- "NAO+" } + if(p == 8 && lead.month == 1) { cluster1.name <- "NAO+"; cluster2.name <- "NAO-"; cluster3.name <- "Blocking"; cluster4.name <- "Atl.Ridge" } + if(p == 7 && lead.month == 2) { cluster1.name <- "Atl.Ridge"; cluster2.name <- "Blocking"; cluster3.name <- "NAO-"; cluster4.name <- "NAO+" } + if(p == 6 && lead.month == 3) { cluster1.name <- "Atl.Ridge"; cluster2.name <- "NAO-"; cluster3.name <- "NAO+"; cluster4.name <- "Blocking" } + if(p == 5 && lead.month == 4) { cluster1.name <- "Atl.Ridge"; cluster2.name <- "NAO+"; cluster3.name <- "NAO-"; cluster4.name <- "Blocking" } + if(p == 4 && lead.month == 5) { cluster1.name <- "Blocking"; cluster2.name <- "NAO+"; cluster3.name <- "Atl.Ridge"; cluster4.name <- "NAO-" } + if(p == 3 && lead.month == 6) { cluster2.name <- "NAO-"; cluster3.name <- "Atl.Ridge"} # cluster1.name <- "Blocking"; cluster4.name <- "NAO+" + + # regimes.sim # for the debug + + ### forecast month: October + #if(p == 4 && lead.month == 6) { cluster1.name <- "Atl.Ridge"; cluster2.name <- "Blocking"; cluster3.name <- "NAO+"; cluster4.name <- "NAO-"} + #if(p == 5 && lead.month == 5) { cluster1.name <- "NAO+"; cluster2.name <- "Blocking"; cluster3.name <- "NAO-"; cluster4.name <- "Atl.Ridge"} + #if(p == 7 && lead.month == 3) { cluster1.name <- "NAO-"; cluster2.name <- "Atl.Ridge"; cluster3.name <- "Blocking"; cluster4.name <- "NAO+"} + #if(p == 8 && lead.month == 2) { cluster1.name <- "Blocking"; cluster2.name <- "NAO-"; cluster3.name <- "Atl.Ridge"; cluster4.name <- "NAO+"} + #if(p == 9 && lead.month == 1) { cluster1.name <- "NAO-"; cluster2.name <- "Blocking"; cluster3.name <- "Atl.Ridge"; cluster4.name <- "NAO+"} + + if(p == 10 && lead.month == 0) { cluster1.name <- "Blocking"; cluster2.name <- "NAO+"; cluster3.name <- "Atl.Ridge"; cluster4.name <- "NAO-"} + if(p == 9 && lead.month == 1) { cluster1.name <- "NAO-"; cluster2.name <- "Blocking"; cluster3.name <- "Atl.Ridge"; cluster4.name <- "NAO+"} + if(p == 8 && lead.month == 2) { cluster1.name <- "Blocking"; cluster2.name <- "NAO-"; cluster3.name <- "Atl.Ridge"; cluster4.name <- "NAO+"} + if(p == 7 && lead.month == 3) { cluster1.name <- "NAO-"; cluster2.name <- "Atl.Ridge"; cluster3.name <- "Blocking"; cluster4.name <- "NAO+"} + if(p == 6 && lead.month == 4) { cluster1.name <- "NAO+"; cluster2.name <- "Blocking"; cluster3.name <- "NAO-"; cluster4.name <- "Atl.Ridge"} + + ### forecast month: November + #if(p == 6 && lead.month == 5) { cluster2.name <- "Atl.Ridge"; cluster3.name <- "NAO+"; cluster4.name <- "Blocking"} + #if(p == 7 && lead.month == 4) { cluster1.name <- "NAO+"; cluster2.name <- "Blocking"; cluster4.name <- "Atl.Ridge"} + #if(p == 8 && lead.month == 3) { cluster2.name <- "Blocking"; cluster4.name <- "Atl.Ridge"} + #if(p == 9 && lead.month == 2) { cluster2.name <- "Atl.Ridge"; cluster3.name <- "NAO+"; cluster4.name <- "Blocking"} + #if(p == 10 && lead.month == 1) { cluster2.name <- "Blocking"; cluster4.name <- "Atl.Ridge"} + + regimes.sim2 <- c(cluster1.name, cluster2.name, cluster3.name, cluster4.name) # Simulated regimes after the manual correction + #regimes.obs <- c(cluster1.name, cluster2.name, cluster3.name, cluster4.name) # already defined above, included only as reference + + order.sim <- match(regimes.sim2, orden) + order.obs <- match(regimes.obs, orden) + + clusters.sim <- list(cluster1.sim, cluster2.sim, cluster3.sim, cluster4.sim) + clusters.obs <- list(pslwr1mean, pslwr2mean, pslwr3mean, pslwr4mean) + + # recompute the spatial correlations, because they might have changed because of the manual corrections above: + spat.cor1 <- cor(as.vector(clusters.sim[[which(order.sim == 1)]]), as.vector(clusters.obs[[which(order.obs == 1)]])) + spat.cor2 <- cor(as.vector(clusters.sim[[which(order.sim == 2)]]), as.vector(clusters.obs[[which(order.obs == 2)]])) + spat.cor3 <- cor(as.vector(clusters.sim[[which(order.sim == 3)]]), as.vector(clusters.obs[[which(order.obs == 3)]])) + spat.cor4 <- cor(as.vector(clusters.sim[[which(order.sim == 4)]]), as.vector(clusters.obs[[which(order.obs == 4)]])) + + if(composition == 'impact' && impact.data == TRUE) { + clusters.sim.imp <- list(cluster1.sim.imp, cluster2.sim.imp, cluster3.sim.imp, cluster4.sim.imp) + #clusters.sim.imp.pv <- list(cluster1.sim.imp.pv, cluster2.sim.imp.pv, cluster3.sim.imp.pv, cluster4.sim.imp.pv) + + clusters.obs.imp <- list(varwr1mean, varwr2mean, varwr3mean, varwr4mean) + #clusters.obs.imp.pv <- list(pvalue1, pvalue2, pvalue3, pvalue4) + + cor.imp1 <- cor(as.vector(clusters.sim.imp[[which(order.sim == 1)]]), as.vector(clusters.obs.imp[[which(order.obs == 1)]])) + cor.imp2 <- cor(as.vector(clusters.sim.imp[[which(order.sim == 2)]]), as.vector(clusters.obs.imp[[which(order.obs == 2)]])) + cor.imp3 <- cor(as.vector(clusters.sim.imp[[which(order.sim == 3)]]), as.vector(clusters.obs.imp[[which(order.obs == 3)]])) + cor.imp4 <- cor(as.vector(clusters.sim.imp[[which(order.sim == 4)]]), as.vector(clusters.obs.imp[[which(order.obs == 4)]])) + + } + + load(file=paste0(work.dir,"/",fields.name,"_",my.period[p],"_leadmonth_",lead.month,"_","psl",".RData")) # reload forecast cluster time series and mean psl data + load(file=paste0(work.dir,"/",fields.name,"_",my.period[p],"_leadmonth_",lead.month,"_",var.name[var.num],".RData")) # reload mean var data for impact maps + + if(var.num != var.num.orig) var.num <- var.num.orig # ripristinate original values of this script + if(fields.name != fields.name.orig) fields.name <- fields.name.orig # ripristinate original values of this script + if(!identical(period.orig, period)) period <- period.orig + if(p.orig != p) p <- p.orig + if(identical(rev(lat),lat.forecast)) lat <- rev(lat) # ripristinate right lat order + if(work.dir != work.dir.orig) work.dir <- work.dir.orig # ripristinate original values of this script + + } # close for on 'forecast.name' + + if(fields.name == rean.name || (fields.name == forecast.name && n.map == 7)){ + if(save.names){ + save(cluster1.name, cluster2.name, cluster3.name, cluster4.name, file=paste0(rean.dir,"/",fields.name,"_", my.period[p],"_","ClusterNames",".RData")) + + } else { # in this case, we load the cluster names from the file already saved, if regimes come from a reanalysis: + ClusterName.file <- paste0(rean.dir,"/",rean.name,"_", my.period[p],"_","ClusterNames",".RData") + if(!file.exists(ClusterName.file)) stop(paste0("file: ",ClusterName.file," missing")) # check if file exists or not + load(ClusterName.file) # load cluster names + + if(var.num != var.num.orig) var.num <- var.num.orig # ripristinate original values of this script + if(fields.name != fields.name.orig) fields.name <- fields.name.orig # ripristinate original values of this script + } + } + + # assign to each cluster its regime: + #if(ordering == FALSE && (fields.name == rean.name || (fields.name == forecast.name && n.map == 7))){ + if(ordering == FALSE){ + cluster1 <- 1; cluster2 <- 2; cluster3 <- 3; cluster4 <- 4 + } else { + cluster1 <- which(orden == cluster1.name) + cluster2 <- which(orden == cluster2.name) + cluster3 <- which(orden == cluster3.name) + cluster4 <- which(orden == cluster4.name) + } + + assign(paste0("map",cluster1), pslwr1mean) + assign(paste0("map",cluster2), pslwr2mean) + assign(paste0("map",cluster3), pslwr3mean) + assign(paste0("map",cluster4), pslwr4mean) + + assign(paste0("fre",cluster1), wr1y) + assign(paste0("fre",cluster2), wr2y) + assign(paste0("fre",cluster3), wr3y) + assign(paste0("fre",cluster4), wr4y) + + #assign(paste0("withinss",cluster1), my.cluster[[p]]$withinss[1]/my.cluster[[p]]$size[1]) + #assign(paste0("withinss",cluster2), my.cluster[[p]]$withinss[2]/my.cluster[[p]]$size[2]) + #assign(paste0("withinss",cluster3), my.cluster[[p]]$withinss[3]/my.cluster[[p]]$size[3]) + #assign(paste0("withinss",cluster4), my.cluster[[p]]$withinss[4]/my.cluster[[p]]$size[4]) + + if(impact.data == TRUE && (composition == "simple" || composition == "single.impact" || composition == 'impact' || composition == 'edpr')){ + assign(paste0("imp",cluster1), varwr1mean) + assign(paste0("imp",cluster2), varwr2mean) + assign(paste0("imp",cluster3), varwr3mean) + assign(paste0("imp",cluster4), varwr4mean) + + assign(paste0("sig",cluster1), pvalue1) + assign(paste0("sig",cluster2), pvalue2) + assign(paste0("sig",cluster3), pvalue3) + assign(paste0("sig",cluster4), pvalue4) + } + + if(fields.name == forecast.name && n.map < 7){ + assign(paste0("freMax",cluster1), wr1yMax) + assign(paste0("freMax",cluster2), wr2yMax) + assign(paste0("freMax",cluster3), wr3yMax) + assign(paste0("freMax",cluster4), wr4yMax) + + assign(paste0("freMin",cluster1), wr1yMin) + assign(paste0("freMin",cluster2), wr2yMin) + assign(paste0("freMin",cluster3), wr3yMin) + assign(paste0("freMin",cluster4), wr4yMin) + + #assign(paste0("spatial.cor",cluster1), spat.cor1) + #assign(paste0("spatial.cor",cluster2), spat.cor2) + #assign(paste0("spatial.cor",cluster3), spat.cor3) + #assign(paste0("spatial.cor",cluster4), spat.cor4) + + assign(paste0("persist",cluster1), pers1) + assign(paste0("persist",cluster2), pers2) + assign(paste0("persist",cluster3), pers3) + assign(paste0("persist",cluster4), pers4) + + clusters.all <- c(cluster1, cluster2, cluster3, cluster4) + ordered.sequence <- c(which(clusters.all == 1), which(clusters.all == 2), which(clusters.all == 3), which(clusters.all == 4)) + + assign(paste0("transSim",cluster1), unname(trans.sim[1,ordered.sequence])) + assign(paste0("transSim",cluster2), unname(trans.sim[2,ordered.sequence])) + assign(paste0("transSim",cluster3), unname(trans.sim[3,ordered.sequence])) + assign(paste0("transSim",cluster4), unname(trans.sim[4,ordered.sequence])) + + cluster1.obs <- which(orden == regimes.obs[1]) + cluster2.obs <- which(orden == regimes.obs[2]) + cluster3.obs <- which(orden == regimes.obs[3]) + cluster4.obs <- which(orden == regimes.obs[4]) + + clusters.all.obs <- c(cluster1.obs, cluster2.obs, cluster3.obs, cluster4.obs) + ordered.sequence.obs <- c(which(clusters.all.obs == 1), which(clusters.all.obs == 2), which(clusters.all.obs == 3), which(clusters.all.obs == 4)) + + assign(paste0("freObs",cluster1.obs), wr1y.obs) + assign(paste0("freObs",cluster2.obs), wr2y.obs) + assign(paste0("freObs",cluster3.obs), wr3y.obs) + assign(paste0("freObs",cluster4.obs), wr4y.obs) + + assign(paste0("persistObs",cluster1.obs), persObs1) + assign(paste0("persistObs",cluster2.obs), persObs2) + assign(paste0("persistObs",cluster3.obs), persObs3) + assign(paste0("persistObs",cluster4.obs), persObs4) + + assign(paste0("transObs",cluster1.obs), unname(trans.obs[1,ordered.sequence.obs])) + assign(paste0("transObs",cluster2.obs), unname(trans.obs[2,ordered.sequence.obs])) + assign(paste0("transObs",cluster3.obs), unname(trans.obs[3,ordered.sequence.obs])) + assign(paste0("transObs",cluster4.obs), unname(trans.obs[4,ordered.sequence.obs])) + + } + + # position of long values of Europe only (without the Atlantic Sea and America): + #if(fields.name == "ERA-Interim") EU <- c(1:which(lon >= lon.max)[1], (length(lon)-30):length(lon)) # restrict area to continental europe only + EU <- c(1:which(lon >= lon.max)[1], (which(lon >= 337)[1]:length(lon))) # restrict area to continental europe only + + # breaks and colors of the geopotential fields: + if(psl == "g500"){ + #my.brks <- c(-300,-199:200,300) # % Mean anomaly of a WR + my.brks <- c(-300,seq(-200,200,10),300) # % Mean anomaly of a WR + my.brks2 <- c(-300,seq(-200,200,10),300) # % Mean anomaly of a WR for the contour lines + #my.cols <- colorRampPalette((brewer.pal(9,"PuBu")))(length(my.brks)-1) + values.to.plot <- c(-200, -100, 0, 100, 200) # values we want to show in the labels + } + + if(psl == "psl"){ + my.brks <- c(seq(-21,-1,2),0,seq(1,21,2)) # % Mean anomaly of a WR + # my.brks <- c(seq(-19,-1,2),0,seq(1,19,2)) # % Mean anomaly of a WR + + my.brks2 <- my.brks #c(seq(-20,20,2)) # % Mean anomaly of a WR for the contour lines + values.to.plot <- my.brks #c(-17,-15,-13,-11,-9,-7,-5,-3,-1,0,1,3,5,7,9,11,13,15,17) + } + + my.cols <- colorRampPalette(rev(brewer.pal(11,"RdBu")))(length(my.brks)-1) # blue--white--red colors + my.cols[floor(length(my.cols)/2)] <- my.cols[floor(length(my.cols)/2)+1] <- "white" + + # breaks and colors of the impact maps (valid both for Wind speed anomalies and Temperature anomalies): + #my.brks.var <- c(-20,seq(-10,-3,1),seq(-2.9,2.9,0.1),seq(3,10,1),20) # % Mean anomaly of a WR + #my.brks.var <- c(-20,-2,-1.5,-1,-0.5,-0.2,0.2,0.5,1,1.5,2,20) # % Mean anomaly of a WR + #my.brks.var <- c(-20,seq(-3,3,0.5),20) # % Mean anomaly of a WR + if(var.name[var.num] == "sfcWind") { + my.brks.var <- seq(-3,3,0.5) + values.to.plot2 <- c(-3,-2,-1,0,1,2,3) + } + if(var.name[var.num] == "tas") { + my.brks.var <- seq(-5,5,1) + values.to.plot2 <- my.brks.var #c(-5,-3,-1,0,1,3,5) + } + + #my.cols <- colorRampPalette(as.character(read.csv("/home/Earth/vtorralb/rgbhex.csv",header=F)[,1]))(length(my.brks)-1) # blue--yellow-red colors + my.cols.var <- colorRampPalette(rev(brewer.pal(11,"RdBu")))(length(my.brks.var)-1) # blue--white--red colors + #my.cols.var[floor(length(my.cols.var)/2)] <- my.cols.var[floor(length(my.cols.var)/2)+1] <- "white" + + freq.max=100 + if(fields.name == forecast.name){ + pos.years.present <- match(year.start:year.end, years) + years.missing <- which(is.na(pos.years.present)) + fre1.NA <- fre2.NA <- fre3.NA <- fre4.NA <- freMax1.NA <- freMax2.NA <- freMax3.NA <- freMax4.NA <- freMin1.NA <- freMin2.NA <- freMin3.NA <- freMin4.NA <- c() + k <- 0 + for(y in year.start:year.end) { + if(y %in% (years.missing + year.start - 1)) { + fre1.NA <- c(fre1.NA,NA); fre2.NA <- c(fre2.NA,NA); fre3.NA <- c(fre3.NA,NA); fre4.NA <- c(fre4.NA,NA) + freMax1.NA <- c(freMax1.NA,NA); freMax2.NA <- c(freMax2.NA,NA); freMax3.NA <- c(freMax3.NA,NA); freMax4.NA <- c(freMax4.NA,NA) + freMin1.NA <- c(freMin1.NA,NA); freMin2.NA <- c(freMin2.NA,NA); freMin3.NA <- c(freMin3.NA,NA); freMin4.NA <- c(freMin4.NA,NA) + k=k+1 + } else { + fre1.NA <- c(fre1.NA,fre1[y - year.start + 1 - k]); fre2.NA <- c(fre2.NA,fre2[y - year.start + 1 - k]) + fre3.NA <- c(fre3.NA,fre3[y - year.start + 1 - k]); fre4.NA <- c(fre4.NA,fre4[y - year.start + 1 - k]) + freMax1.NA <- c(freMax1.NA,freMax1[y - year.start + 1 - k]); freMax2.NA <- c(freMax2.NA,freMax2[y - year.start + 1 - k]) + freMax3.NA <- c(freMax3.NA,freMax3[y - year.start + 1 - k]); freMax4.NA <- c(freMax4.NA,freMax4[y - year.start + 1 - k]) + freMin1.NA <- c(freMin1.NA,freMin1[y - year.start + 1 - k]); freMin2.NA <- c(freMin2.NA,freMin2[y - year.start + 1 - k]) + freMin3.NA <- c(freMin3.NA,freMin3[y - year.start + 1 - k]); freMin4.NA <- c(freMin4.NA,freMin4[y - year.start + 1 - k]) + } + } + + sp.cor1 <- round(cor(fre1.NA,freObs1, use="complete.obs"),2) + sp.cor2 <- round(cor(fre2.NA,freObs2, use="complete.obs"),2) + sp.cor3 <- round(cor(fre3.NA,freObs3, use="complete.obs"),2) + sp.cor4 <- round(cor(fre4.NA,freObs4, use="complete.obs"),2) + + } else { + fre1.NA <- fre1; fre2.NA <- fre2; fre3.NA <- fre3; fre4.NA <- fre4 + } + + + # Visualize the composition with the 3 graphs for all the 4 regimes: its pressure fields, its impact on var and its frequency series: + if(composition == "simple"){ + + if(!as.pdf && fields.name == rean.name) png(filename=paste0(rean.dir,"/",fields.name,"_",my.period[p],"_",var.name[var.num],".png"),width=3000,height=3700) + if(!as.pdf && fields.name == forecast.name) png(filename=paste0(work.dir,"/",fields.name,"_",my.period[p],"_leadmonth_",lead.month,"_",var.name[var.num],".png"),width=3000,height=3700) + + plot.new() + + # Sheet title: + par(fig=c(0, 1, 0.94, 0.98), new=TRUE) + if(fields.name == forecast.name) {forecast.title <- paste0(" startdate and ",lead.month," forecast month ")} else {forecast.title <- ""} + mtext(paste0(fields.name, " Weather Regimes for ", my.period[p], forecast.title," (",year.start,"-",year.end,")"), cex=5.5, font=2) + + # Centroid maps: + map.xpos <- 0 + map.width <- 0.46 + par(fig=c(map.xpos + 0.003, map.xpos + map.width - 0.003, 0.745, 0.925), new=TRUE) + PlotEquiMap2(rescale(map1,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map1), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, xlabel.dist=1.5, contours.lty="F1FF1F", continents.col="black") + par(fig=c(map.xpos, map.xpos + map.width, 0.51, 0.69), new=TRUE) + PlotEquiMap2(rescale(map2,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map2), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F", continents.col="black") + par(fig=c(map.xpos, map.xpos + map.width, 0.275, 0.455), new=TRUE) + PlotEquiMap2(rescale(map3,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map3), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F", continents.col="black") + par(fig=c(map.xpos, map.xpos + map.width, 0.04, 0.22), new=TRUE) + PlotEquiMap2(rescale(map4,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map4), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F", continents.col="black") + + ## # for the debug: + ## obs <- read.table("/scratch/Earth/ncortesi/RESILIENCE/Regimes/NAO_series.txt") + ## mes <- p + ## fre.obs <- obs$V3[seq(mes,12*35,12)] # get the ovserved freuency of the NAO + ## #plot(1981:2015,fre.obs,type="l") + ## #lines(1981:2015,fre1,type="l",col="red") + ## #lines(1981:2015,fre2,type="l",col="blue") + ## #lines(1981:2015,fre4,type="l",col="green") + ## cor1 <- cor(fre.obs, fre1) + ## cor2 <- cor(fre.obs, fre2) + ## cor3 <- cor(fre.obs, fre3) + ## cor4 <- cor(fre.obs, fre4) + ## round(c(cor1,cor2,cor3,cor4),2) + + # Legend centroid maps: + legend1.xpos <- 0.10 + legend1.width <- 0.30 + legend1.cex <- 2 + my.subset <- match(values.to.plot, my.brks) + par(fig=c(legend1.xpos, legend1.xpos + legend1.width, 0.719, 0.744), new=TRUE) # syntax fig=c(xmin, xmax, ymin, ymax) + ColorBar3(my.brks, cols=my.cols, vert=FALSE, cex=legend1.cex, subset=my.subset) + if(psl=="g500") {mtext(side=4," m", cex=2.5)} else {mtext(side=4," hPa", cex=legend1.cex)} + par(fig=c(legend1.xpos, legend1.xpos + legend1.width, 0.485, 0.508), new=TRUE) + ColorBar3(my.brks, cols=my.cols, vert=FALSE, cex=legend1.cex, subset=my.subset) + if(psl=="g500") {mtext(side=4," m", cex=2.5)} else {mtext(side=4," hPa", cex=legend1.cex)} + par(fig=c(legend1.xpos, legend1.xpos + legend1.width, 0.250, 0.273), new=TRUE) + ColorBar3(my.brks, cols=my.cols, vert=FALSE, cex=legend1.cex, subset=my.subset) + if(psl=="g500") {mtext(side=4," m", cex=2.5)} else {mtext(side=4," hPa", cex=legend1.cex)} + par(fig=c(legend1.xpos, legend1.xpos + legend1.width, 0.015, 0.038), new=TRUE) + ColorBar3(my.brks, cols=my.cols, vert=FALSE, cex=legend1.cex, subset=my.subset) + if(psl=="g500") {mtext(side=4," m", cex=2.5)} else {mtext(side=4," hPa", cex=legend1.cex)} + + # Subtitle centroid maps: + map.title.xpos <- 0.76 + map.title.width <- 0.2 + par(fig=c(map.title.xpos, map.title.xpos + map.title.width, 0.728, 0.729), new=TRUE) + mtext("year", cex=3) + par(fig=c(map.title.xpos, map.title.xpos + map.title.width, 0.494, 0.495), new=TRUE) + mtext("year", cex=3) + par(fig=c(map.title.xpos, map.title.xpos + map.title.width, 0.260, 0.261), new=TRUE) + mtext("year", cex=3) + par(fig=c(map.title.xpos, map.title.xpos + map.title.width, 0.026, 0.027), new=TRUE) + mtext("year", cex=3) + + # Title Centroid Maps: + title1.xpos <- 0.02 + title1.width <- 0.4 + par(fig=c(title1.xpos, title1.xpos + title1.width, 0.9305, 0.9355), new=TRUE) + mtext(paste0(regime1.name,": ", psl.name, " Anomaly"), font=2, cex=4) + par(fig=c(title1.xpos, title1.xpos + title1.width, 0.6955, 0.7005), new=TRUE) + mtext(paste0(regime2.name,": ", psl.name, " Anomaly"), font=2, cex=4) + par(fig=c(title1.xpos, title1.xpos + title1.width, 0.4605, 0.4655), new=TRUE) + mtext(paste0(regime3.name,": ", psl.name, " Anomaly"), font=2, cex=4) + par(fig=c(title1.xpos, title1.xpos + title1.width, 0.2255, 0.2305), new=TRUE) + mtext(paste0(regime4.name,": ", psl.name, " Anomaly"), font=2, cex=4) + + # Impact maps: + if(impact.data == TRUE ){ + impact.xpos <- 0.47 + impact.width <- 0.26 + par(fig=c(impact.xpos, impact.xpos + impact.width, 0.745, 0.925), new=TRUE) + PlotEquiMap2(rescale(imp1[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=t(sig1[,EU] < 0.05), cex.lab=1.5, continents.col="black") + par(fig=c(impact.xpos, impact.xpos + impact.width, 0.51, 0.69), new=TRUE) + PlotEquiMap2(rescale(imp2[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=t(sig2[,EU] < 0.05), cex.lab=1.5, continents.col="black") + par(fig=c(impact.xpos, impact.xpos + impact.width, 0.275, 0.455), new=TRUE) + PlotEquiMap2(rescale(imp3[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=t(sig3[,EU] < 0.05), cex.lab=1.5, continents.col="black") + par(fig=c(impact.xpos, impact.xpos + impact.width, 0.04, 0.22), new=TRUE) + PlotEquiMap2(rescale(imp4[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=t(sig4[,EU] < 0.05), cex.lab=1.5, continents.col="black") + + + # Title impact maps: + title2.xpos <- 0.42 + title2.width <- 0.35 + par(fig=c(title2.xpos, title2.xpos + title2.width, 0.928, 0.933), new=TRUE) + mtext(paste0(regime1.name, ": Impact on ", var.name.full[var.num]), font=2, cex=4) + par(fig=c(title2.xpos, title2.xpos + title2.width, 0.693, 0.698), new=TRUE) + mtext(paste0(regime2.name, ": Impact on ", var.name.full[var.num]), font=2, cex=4) + par(fig=c(title2.xpos, title2.xpos + title2.width, 0.458, 0.463), new=TRUE) + mtext(paste0(regime3.name, ": Impact on ", var.name.full[var.num]), font=2, cex=4) + par(fig=c(title2.xpos, title2.xpos + title2.width, 0.223, 0.227), new=TRUE) + mtext(paste0(regime4.name, ": Impact on ", var.name.full[var.num]), font=2, cex=4) + + # Legend: + legend2.xpos <- 0.48 + legend2.width <- 0.25 + legend2.cex <- 1.8 + + my.subset2 <- match(values.to.plot2, my.brks.var) + par(fig=c(legend2.xpos, legend2.xpos + legend2.width, 0.719 + 0.001, 0.744), new=TRUE) + ColorBar3(my.brks.var, cols=my.cols.var, vert=FALSE, cex=legend2.cex, subset=my.subset2) + mtext(side=4,paste0(" ",var.unit[var.num]), cex=legend2.cex) + par(fig=c(legend2.xpos, legend2.xpos + legend2.width, 0.485, 0.508), new=TRUE) + ColorBar3(my.brks.var, cols=my.cols.var, vert=FALSE, cex=legend2.cex, subset=my.subset2) + mtext(side=4,paste0(" ",var.unit[var.num]), cex=legend2.cex) + par(fig=c(legend2.xpos, legend2.xpos + legend2.width, 0.250, 0.273), new=TRUE) + ColorBar3(my.brks.var, cols=my.cols.var, vert=FALSE, cex=legend2.cex, subset=my.subset2) + mtext(side=4,paste0(" ",var.unit[var.num]), cex=legend2.cex) + par(fig=c(legend2.xpos, legend2.xpos + legend2.width, 0.015, 0.038), new=TRUE) + ColorBar3(my.brks.var, cols=my.cols.var, vert=FALSE, cex=legend2.cex, subset=my.subset2) + mtext(side=4,paste0(" ",var.unit[var.num]), cex=legend2.cex) + + } + + # Frequency plots: + barplot.xpos <- 0.75 + barplot.width <- 0.25 + + par(fig=c(barplot.xpos, barplot.xpos + barplot.width, 0.745, 0.915), new=TRUE) + if(fields.name == rean.name) barplot.freq(100*fre1.NA, year.start, year.end, cex.y=2, cex.x=2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0)) + if(fields.name == forecast.name) barplot.freq.sim2(100*fre1.NA, 100*freMax1.NA, 100*freMin1.NA, 100*freObs1, year.start, year.end, cex.y=2, cex.x=2, freq.min=-2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0), col.line="gray20", cex.r=3, cex.mean=2, cex.obs=2) + + par(fig=c(barplot.xpos, barplot.xpos + barplot.width,0.510,0.680), new=TRUE) + if(fields.name == rean.name) barplot.freq(100*fre2.NA, year.start, year.end, cex.y=2, cex.x=2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0)) + if(fields.name == forecast.name) barplot.freq.sim2(100*fre2.NA, 100*freMax2.NA, 100*freMin2.NA, 100*freObs2, year.start, year.end, cex.y=2, cex.x=2, freq.min=-2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0), col.line="gray20", cex.r=3, cex.mean=2, cex.obs=2) + + par(fig=c(barplot.xpos, barplot.xpos + barplot.width,0.275,0.445), new=TRUE) + if(fields.name == rean.name) barplot.freq(100*fre3.NA, year.start, year.end, cex.y=2, cex.x=2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0)) + if(fields.name == forecast.name) barplot.freq.sim2(100*fre3.NA, 100*freMax3.NA, 100*freMin3.NA, 100*freObs3, year.start, year.end, cex.y=2, cex.x=2, freq.min=-2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0), col.line="gray20", cex.r=3, cex.mean=2, cex.obs=2) + + par(fig=c(barplot.xpos, barplot.xpos + barplot.width,0.040,0.210), new=TRUE) + if(fields.name == rean.name) barplot.freq(100*fre4.NA, year.start, year.end, cex.y=2, cex.x=2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0)) + if(fields.name == forecast.name) barplot.freq.sim2(100*fre4.NA, 100*freMax4.NA, 100*freMin4.NA, 100*freObs4, year.start, year.end, cex.y=2, cex.x=2, freq.min=-2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0), col.line="gray20", cex.r=3, cex.mean=2, cex.obs=2) + + # Title Frequency maps: + title3.xpos <- 0.75 + title3.width <-0.25 + par(fig=c(title3.xpos, title3.xpos + title3.width, 0.928, 0.933), new=TRUE) + mtext(paste0(regime1.name, " Frequency"), font=2, cex=4) # paste0 doesn't work inside an expression! + par(fig=c(title3.xpos, title3.xpos + title3.width, 0.693, 0.698), new=TRUE) + mtext(paste0(regime2.name, " Frequency"), font=2, cex=4) + par(fig=c(title3.xpos, title3.xpos + title3.width, 0.458, 0.463), new=TRUE) + mtext(paste0(regime3.name, " Frequency"), font=2, cex=4) + par(fig=c(title3.xpos, title3.xpos + title3.width, 0.223, 0.228), new=TRUE) + mtext(paste0(regime4.name, " Frequency"), font=2, cex=4) + + # % on Frequency plots: + symbol.xpos <- 0.735 + symbol.width <- 0.01 + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.92, 0.93), new=TRUE) + mtext("%", cex=3.3) + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.59, 0.70), new=TRUE) + mtext("%", cex=3.3) + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.45, 0.46), new=TRUE) + mtext("%", cex=3.3) + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.22, 0.23), new=TRUE) + mtext("%", cex=3.3) + + # mean frequency on Frequency plots: + if(mean.freq == TRUE){ + symbol.xpos <- 0.9 + symbol.width <- 0.1 + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.908, 0.918), new=TRUE) + mtext(bquote(bar(nu) == .(paste0(round(mean(100*fre1.NA),1),"%"))),cex=4.5) + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.673, 0.683), new=TRUE) + mtext(bquote(bar(nu) == .(paste0(round(mean(100*fre2.NA),1),"%"))),cex=4.5) + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.44, 0.45), new=TRUE) + mtext(bquote(bar(nu) == .(paste0(round(mean(100*fre3.NA),1),"%"))),cex=4.5) + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.203, 0.213), new=TRUE) + mtext(bquote(bar(nu) == .(paste0(round(mean(100*fre4.NA),1),"%"))),cex=4.5) + } + + # add corr between obs.time series and ensemble mean time series on Frequency plots: + if(fields.name == forecast.name){ + symbol.xpos <- 0.76 + symbol.width <- 0.1 + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.908, 0.918), new=TRUE) + sp.cor1 <- round(cor(fre1.NA,freObs1, use="complete.obs"),2) + mtext(paste0("r= ",sp.cor1), cex=4.5) + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.673, 0.683), new=TRUE) + sp.cor2 <- round(cor(fre2.NA,freObs2, use="complete.obs"),2) + mtext(paste0("r= ",sp.cor2), cex=4.5) + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.44, 0.45), new=TRUE) + sp.cor3 <- round(cor(fre3.NA,freObs3, use="complete.obs"),2) + mtext(paste0("r= ",sp.cor3), cex=4.5) + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.203, 0.213), new=TRUE) + sp.cor4 <- round(cor(fre4.NA,freObs4, use="complete.obs"),2) + mtext(paste0("r= ",sp.cor4), cex=4.5) + } + + if(!as.pdf) dev.off() # for saving 4 png + + } # close if on: composition == 'simple' + + + if(composition == "edpr"){ + + # adjust color legends to include triangles to the extremities increasing by two the number of intervals: + my.cols <- colorRampPalette(rev(brewer.pal(11,"RdBu")))(length(my.brks)+1) # blue--white--red colors + my.cols.var <- colorRampPalette(rev(brewer.pal(11,"RdBu")))(length(my.brks.var)+1) # blue--white--red colors + + fileoutput <- paste0(rean.dir,"/",fields.name,"_",my.period[p],"_",var.name[var.num],"_edpr.png") + + png(filename=fileoutput,width=3000,height=3700) + + plot.new() + + ## Centroid maps: + map.xpos <- 0.27 + map.width <- 0.46 + par(fig=c(map.xpos + 0.003, map.xpos + map.width - 0.003, 0.745, 0.925), new=TRUE) + PlotEquiMap2(rescale(map1,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols[2:(length(my.cols)-1)], sizetit=1.2, contours=t(map1), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, xlabel.dist=1.5, contours.lty="F1FF1F", continents.col="black") + par(fig=c(map.xpos, map.xpos + map.width, 0.51, 0.69), new=TRUE) + PlotEquiMap2(rescale(map2,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols[2:(length(my.cols)-1)], sizetit=1.2, contours=t(map2), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F", continents.col="black") + par(fig=c(map.xpos, map.xpos + map.width, 0.275, 0.455), new=TRUE) + PlotEquiMap2(rescale(map3,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols[2:(length(my.cols)-1)], sizetit=1.2, contours=t(map3), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F", continents.col="black") + par(fig=c(map.xpos, map.xpos + map.width, 0.04, 0.22), new=TRUE) + PlotEquiMap2(rescale(map4,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols[2:(length(my.cols)-1)], sizetit=1.2, contours=t(map4), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F", continents.col="black") + + ## # for the debug: + ## obs <- read.table("/scratch/Earth/ncortesi/RESILIENCE/Regimes/NAO_series.txt") + ## mes <- p + ## fre.obs <- obs$V3[seq(mes,12*35,12)] # get the ovserved freuency of the NAO + ## #plot(1981:2015,fre.obs,type="l") + ## #lines(1981:2015,fre1,type="l",col="red") + ## #lines(1981:2015,fre2,type="l",col="blue") + ## #lines(1981:2015,fre4,type="l",col="green") + ## cor1 <- cor(fre.obs, fre1) + ## cor2 <- cor(fre.obs, fre2) + ## cor3 <- cor(fre.obs, fre3) + ## cor4 <- cor(fre.obs, fre4) + ## round(c(cor1,cor2,cor3,cor4),2) + + # Legend centroid maps: + legend1.xpos <- 0.30 + legend1.width <- 0.40 + legend1.cex <- 2 + my.subset <- match(values.to.plot, my.brks) + par(fig=c(legend1.xpos, legend1.xpos + legend1.width, 0.719, 0.744), new=TRUE) # syntax fig=c(xmin, xmax, ymin, ymax) + ColorBar3(my.brks, cols=my.cols[2:(length(my.cols)-1)], vert=FALSE, cex=legend1.cex, subset=my.subset) + if(psl=="g500") {mtext(side=4," m", cex=2.5)} else {mtext(side=4," hPa", cex=legend1.cex)} + par(fig=c(legend1.xpos, legend1.xpos + legend1.width, 0.485, 0.508), new=TRUE) + ColorBar3(my.brks, cols=my.cols[2:(length(my.cols)-1)], vert=FALSE, cex=legend1.cex, subset=my.subset) + if(psl=="g500") {mtext(side=4," m", cex=2.5)} else {mtext(side=4," hPa", cex=legend1.cex)} + par(fig=c(legend1.xpos, legend1.xpos + legend1.width, 0.250, 0.273), new=TRUE) + ColorBar3(my.brks, cols=my.cols[2:(length(my.cols)-1)], vert=FALSE, cex=legend1.cex, subset=my.subset) + if(psl=="g500") {mtext(side=4," m", cex=2.5)} else {mtext(side=4," hPa", cex=legend1.cex)} + par(fig=c(legend1.xpos, legend1.xpos + legend1.width, 0.015, 0.038), new=TRUE) + ColorBar3(my.brks, cols=my.cols[2:(length(my.cols)-1)], vert=FALSE, cex=legend1.cex, subset=my.subset) + if(psl=="g500") {mtext(side=4," m", cex=2.5)} else {mtext(side=4," hPa", cex=legend1.cex)} + + # Subtitle centroid maps: + map.title.xpos <- 0.76 + map.title.width <- 0.2 + par(fig=c(map.title.xpos, map.title.xpos + map.title.width, 0.728, 0.729), new=TRUE) + mtext("year", cex=3) + par(fig=c(map.title.xpos, map.title.xpos + map.title.width, 0.494, 0.495), new=TRUE) + mtext("year", cex=3) + par(fig=c(map.title.xpos, map.title.xpos + map.title.width, 0.260, 0.261), new=TRUE) + mtext("year", cex=3) + par(fig=c(map.title.xpos, map.title.xpos + map.title.width, 0.026, 0.027), new=TRUE) + mtext("year", cex=3) + + # Title Centroid Maps: + title1.xpos <- 0.3 + title1.width <- 0.44 + par(fig=c(title1.xpos, title1.xpos + title1.width, 0.9305, 0.9355), new=TRUE) + mtext(paste0(regime1.name," ", psl.name, " anomaly"), font=2, cex=4) + par(fig=c(title1.xpos, title1.xpos + title1.width, 0.6955, 0.7005), new=TRUE) + mtext(paste0(regime2.name," ", psl.name, " anomaly"), font=2, cex=4) + par(fig=c(title1.xpos, title1.xpos + title1.width, 0.4605, 0.4655), new=TRUE) + mtext(paste0(regime3.name," ", psl.name, " anomaly"), font=2, cex=4) + par(fig=c(title1.xpos, title1.xpos + title1.width, 0.2255, 0.2305), new=TRUE) + mtext(paste0(regime4.name," ", psl.name, " anomaly"), font=2, cex=4) + + # Impact maps: + if(impact.data == TRUE ){ + impact.xpos <- 0 + impact.width <- 0.26 + par(fig=c(impact.xpos, impact.xpos + impact.width, 0.745, 0.925), new=TRUE) + PlotEquiMap2(rescale(imp1[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var[2:(length(my.cols.var)-1)], intxlon=10, intylat=10, drawleg=F, dots=t(sig1[,EU] < 0.05), cex.lab=1.5, continents.col="black") + par(fig=c(impact.xpos, impact.xpos + impact.width, 0.51, 0.69), new=TRUE) + PlotEquiMap2(rescale(imp2[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var[2:(length(my.cols.var)-1)], intxlon=10, intylat=10, drawleg=F, dots=t(sig2[,EU] < 0.05), cex.lab=1.5, continents.col="black") + par(fig=c(impact.xpos, impact.xpos + impact.width, 0.275, 0.455), new=TRUE) + PlotEquiMap2(rescale(imp3[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var[2:(length(my.cols.var)-1)], intxlon=10, intylat=10, drawleg=F, dots=t(sig3[,EU] < 0.05), cex.lab=1.5, continents.col="black") + par(fig=c(impact.xpos, impact.xpos + impact.width, 0.04, 0.22), new=TRUE) + PlotEquiMap2(rescale(imp4[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var[2:(length(my.cols.var)-1)], intxlon=10, intylat=10, drawleg=F, dots=t(sig4[,EU] < 0.05), cex.lab=1.5, continents.col="black") + + + # Title impact maps: + title2.xpos <- 0 + title2.width <- 0.26 + par(fig=c(title2.xpos, title2.xpos + title2.width, 0.928, 0.933), new=TRUE) + mtext(paste0(regime1.name, " impact on ", var.name.full[var.num]), font=2, cex=4) + par(fig=c(title2.xpos, title2.xpos + title2.width, 0.693, 0.698), new=TRUE) + mtext(paste0(regime2.name, " impact on ", var.name.full[var.num]), font=2, cex=4) + par(fig=c(title2.xpos, title2.xpos + title2.width, 0.458, 0.463), new=TRUE) + mtext(paste0(regime3.name, " impact on ", var.name.full[var.num]), font=2, cex=4) + par(fig=c(title2.xpos, title2.xpos + title2.width, 0.223, 0.227), new=TRUE) + mtext(paste0(regime4.name, " impact on ", var.name.full[var.num]), font=2, cex=4) + + # Legend: + legend2.xpos <- 0.01 + legend2.width <- 0.25 + legend2.cex <- 1.8 + + my.subset2 <- match(values.to.plot2, my.brks.var) + par(fig=c(legend2.xpos, legend2.xpos + legend2.width, 0.719 + 0.001, 0.744), new=TRUE) + ColorBar3(my.brks.var, cols=my.cols.var[2:(length(my.cols.var)-1)], vert=FALSE, cex=legend2.cex, subset=my.subset2) + mtext(side=4,paste0(" ",var.unit[var.num]), cex=legend2.cex) + par(fig=c(legend2.xpos, legend2.xpos + legend2.width, 0.485, 0.508), new=TRUE) + ColorBar3(my.brks.var, cols=my.cols.var[2:(length(my.cols.var)-1)], vert=FALSE, cex=legend2.cex, subset=my.subset2) + mtext(side=4,paste0(" ",var.unit[var.num]), cex=legend2.cex) + par(fig=c(legend2.xpos, legend2.xpos + legend2.width, 0.250, 0.273), new=TRUE) + ColorBar3(my.brks.var, cols=my.cols.var[2:(length(my.cols.var)-1)], vert=FALSE, cex=legend2.cex, subset=my.subset2) + mtext(side=4,paste0(" ",var.unit[var.num]), cex=legend2.cex) + par(fig=c(legend2.xpos, legend2.xpos + legend2.width, 0.015, 0.038), new=TRUE) + ColorBar3(my.brks.var, cols=my.cols.var[2:(length(my.cols.var)-1)], vert=FALSE, cex=legend2.cex, subset=my.subset2) + mtext(side=4,paste0(" ",var.unit[var.num]), cex=legend2.cex) + + } + + # Frequency plots: + barplot.xpos <- 0.75 + barplot.width <- 0.25 + + par(fig=c(barplot.xpos, barplot.xpos + barplot.width, 0.745, 0.915), new=TRUE) + if(fields.name == rean.name) barplot.freq(100*fre1.NA, year.start, year.end, cex.y=2, cex.x=2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0)) + if(fields.name == forecast.name) barplot.freq.sim2(100*fre1.NA, 100*freMax1.NA, 100*freMin1.NA, 100*freObs1, year.start, year.end, cex.y=2, cex.x=2, freq.min=-2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0), col.line="gray20", cex.r=3, cex.mean=2, cex.obs=2) + + par(fig=c(barplot.xpos, barplot.xpos + barplot.width,0.510,0.680), new=TRUE) + if(fields.name == rean.name) barplot.freq(100*fre2.NA, year.start, year.end, cex.y=2, cex.x=2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0)) + if(fields.name == forecast.name) barplot.freq.sim2(100*fre2.NA, 100*freMax2.NA, 100*freMin2.NA, 100*freObs2, year.start, year.end, cex.y=2, cex.x=2, freq.min=-2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0), col.line="gray20", cex.r=3, cex.mean=2, cex.obs=2) + + par(fig=c(barplot.xpos, barplot.xpos + barplot.width,0.275,0.445), new=TRUE) + if(fields.name == rean.name) barplot.freq(100*fre3.NA, year.start, year.end, cex.y=2, cex.x=2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0)) + if(fields.name == forecast.name) barplot.freq.sim2(100*fre3.NA, 100*freMax3.NA, 100*freMin3.NA, 100*freObs3, year.start, year.end, cex.y=2, cex.x=2, freq.min=-2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0), col.line="gray20", cex.r=3, cex.mean=2, cex.obs=2) + + par(fig=c(barplot.xpos, barplot.xpos + barplot.width,0.040,0.210), new=TRUE) + if(fields.name == rean.name) barplot.freq(100*fre4.NA, year.start, year.end, cex.y=2, cex.x=2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0)) + if(fields.name == forecast.name) barplot.freq.sim2(100*fre4.NA, 100*freMax4.NA, 100*freMin4.NA, 100*freObs4, year.start, year.end, cex.y=2, cex.x=2, freq.min=-2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0), col.line="gray20", cex.r=3, cex.mean=2, cex.obs=2) + + # Title Frequency maps: + title3.xpos <- 0.75 + title3.width <-0.25 + par(fig=c(title3.xpos, title3.xpos + title3.width, 0.928, 0.933), new=TRUE) + mtext(paste0(regime1.name, " Frequency"), font=2, cex=4) # paste0 doesn't work inside an expression! + par(fig=c(title3.xpos, title3.xpos + title3.width, 0.693, 0.698), new=TRUE) + mtext(paste0(regime2.name, " Frequency"), font=2, cex=4) + par(fig=c(title3.xpos, title3.xpos + title3.width, 0.458, 0.463), new=TRUE) + mtext(paste0(regime3.name, " Frequency"), font=2, cex=4) + par(fig=c(title3.xpos, title3.xpos + title3.width, 0.223, 0.228), new=TRUE) + mtext(paste0(regime4.name, " Frequency"), font=2, cex=4) + + # % on Frequency plots: + symbol.xpos <- 0.735 + symbol.width <- 0.01 + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.91, 0.92), new=TRUE) + mtext("%", cex=3.3) + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.68, 0.69), new=TRUE) + mtext("%", cex=3.3) + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.44, 0.45), new=TRUE) + mtext("%", cex=3.3) + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.21, 0.22), new=TRUE) + mtext("%", cex=3.3) + + # mean frequency on Frequency plots: + if(mean.freq == TRUE){ + symbol.xpos <- 0.83 + symbol.width <- 0.1 + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.908, 0.918), new=TRUE) + mtext(bquote(bar(nu) == .(paste0(round(mean(100*fre1.NA),1),"%"))),cex=3.5) + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.673, 0.683), new=TRUE) + mtext(bquote(bar(nu) == .(paste0(round(mean(100*fre2.NA),1),"%"))),cex=3.5) + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.44, 0.45), new=TRUE) + mtext(bquote(bar(nu) == .(paste0(round(mean(100*fre3.NA),1),"%"))),cex=3.5) + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.203, 0.213), new=TRUE) + mtext(bquote(bar(nu) == .(paste0(round(mean(100*fre4.NA),1),"%"))),cex=3.5) + } + + + if(!as.pdf) dev.off() # for saving 4 png + + # same image formatted for the catalogue: + fileoutput2 <- paste0(rean.dir,"/",fields.name,"_",my.period[p],"_",var.name[var.num],"_edpr_fig2cat.png") + + system(paste0("~/scripts/fig2catalog.sh -s 0.8 -r 150 -t '",rean.name," / 10m wind speed and sea level pressure / Monthly anomalies and frequencies \n",month.name[p]," / ",year.start,"-",year.end,"' -c 'Region: North Atlantic (27°N-81°N, 85.5°W-45°E)\nReference dataset: ",rean.name," reanalysis' ", fileoutput," ", fileoutput2)) + + + + } # close if on: composition == 'edpr' + + + # Visualize the composition with the regime anomalies for a selected forecasted month: + if(composition == "psl"){ + # Centroid maps: + n.map <- n.map + 1 # n.maps starts from 0 + map.width <- 0.115 + map.xpos <- 0.06 + map.width * (n.map - 1) + map.ypos <- 0.08 + map.height <- 0.19 + map.ysep <- 0.015 + + par(fig=c(map.xpos, map.xpos + map.width, map.ypos + 3*map.height + 3*map.ysep, map.ypos + 4*map.height + 3*map.ysep), new=TRUE) + PlotEquiMap2(rescale(map1,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map1), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, xlabel.dist=1.5, contours.lty="F1FF1F") + par(fig=c(map.xpos, map.xpos + map.width, map.ypos + 2*map.height + 2*map.ysep, map.ypos + 3*map.height + 2*map.ysep), new=TRUE) + PlotEquiMap2(rescale(map2,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map2), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F") + par(fig=c(map.xpos, map.xpos + map.width, map.ypos + map.height + map.ysep, map.ypos + 2*map.height + map.ysep), new=TRUE) + PlotEquiMap2(rescale(map3,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map3), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F") + par(fig=c(map.xpos, map.xpos + map.width, map.ypos, map.ypos + map.height), new=TRUE) + PlotEquiMap2(rescale(map4,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map4), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F") + + # Sheet subtitles: + par(fig=c(map.xpos, map.xpos + map.width, 0.86, 0.90), new=TRUE) + if(n.map < 8) { + mtext(paste0(my.month.short[p], " --> ", my.month.short[forecast.month]), cex=5.5, font=2) + } else{ + mtext(paste0(rean.name," ", my.month.short[forecast.month]), cex=5.5, font=2) + } + + } # close if on composition == 'psl' + + + if(composition == "psl.rean"){ + + # Centroid maps: + n.map <- n.map + 1 # n.maps starts from 0 + map.width <- 0.08 + map.xpos <- 0 + map.width * (n.map - 1) + map.ypos <- 0.08 + map.height <- 0.19 + map.ysep <- 0.015 + + par(fig=c(map.xpos, map.xpos + map.width, map.ypos + 3*map.height + 3*map.ysep, map.ypos + 4*map.height + 3*map.ysep), new=TRUE) + PlotEquiMap2(rescale(map1,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map1), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, xlabel.dist=1.5, contours.lty="F1FF1F") + par(fig=c(map.xpos, map.xpos + map.width, map.ypos + 2*map.height + 2*map.ysep, map.ypos + 3*map.height + 2*map.ysep), new=TRUE) + PlotEquiMap2(rescale(map2,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map2), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F") + par(fig=c(map.xpos, map.xpos + map.width, map.ypos + map.height + map.ysep, map.ypos + 2*map.height + map.ysep), new=TRUE) + PlotEquiMap2(rescale(map3,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map3), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F") + par(fig=c(map.xpos, map.xpos + map.width, map.ypos, map.ypos + map.height), new=TRUE) + PlotEquiMap2(rescale(map4,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map4), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F") + + # Sheet subtitles: + #par(fig=c(map.xpos, map.xpos + map.width, 0.86, 0.90), new=TRUE) + #if(n.map < 8) { + # mtext(paste0(my.month.short[p], " --> ", my.month.short[forecast.month]), cex=5.5, font=2) + #} else{ + # mtext(paste0(rean.name," ", my.month.short[forecast.month]), cex=5.5, font=2) + #} + + } # close if on composition == 'psl.rean' + + + if(composition == "psl.rean.unordered"){ + + + # Centroid maps: + n.map <- n.map + 1 # n.maps starts from 0 + map.width <- 0.08 + map.xpos <- 0 + map.width * (n.map - 1) + map.ypos <- 0.08 + map.height <- 0.19 + map.ysep <- 0.015 + + par(fig=c(map.xpos, map.xpos + map.width, map.ypos + 3*map.height + 3*map.ysep, map.ypos + 4*map.height + 3*map.ysep), new=TRUE) + PlotEquiMap2(rescale(map1,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map1), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, xlabel.dist=1.5, contours.lty="F1FF1F") + par(fig=c(map.xpos, map.xpos + map.width, map.ypos + 2*map.height + 2*map.ysep, map.ypos + 3*map.height + 2*map.ysep), new=TRUE) + PlotEquiMap2(rescale(map2,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map2), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F") + par(fig=c(map.xpos, map.xpos + map.width, map.ypos + map.height + map.ysep, map.ypos + 2*map.height + map.ysep), new=TRUE) + PlotEquiMap2(rescale(map3,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map3), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F") + par(fig=c(map.xpos, map.xpos + map.width, map.ypos, map.ypos + map.height), new=TRUE) + PlotEquiMap2(rescale(map4,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map4), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F") + + + + } # close if on psl.rean.unordered + + + if(composition == "corr.matrix"){ + + # matrix correlation with DJF regimes: + cluster1.monthly <- pslwr1mean; cluster2.monthly <- pslwr2mean; cluster3.monthly <- pslwr3mean; cluster4.monthly <- pslwr4mean + + rean.dir.DJF <- "/scratch/Earth/ncortesi/RESILIENCE/Regimes/53_JRA55_seasonal_1981-2016_LOESS_filter_lat_corr" + + load(file=paste0(rean.dir.DJF,"/",rean.name,"_",my.period[13],"_","psl",".RData")) # Load mean slp DJF data from the same reanalysis + load(paste0(rean.dir.DJF,"/",rean.name,"_", my.period[13],"_","ClusterNames",".RData")) # load also reanalysis DJF regime names + + if(var.num != var.num.orig) var.num <- var.num.orig # ripristinate original values of this script (necessary after loading regime data) + if(fields.name != fields.name.orig) fields.name <- fields.name.orig # ripristinate original values of this script + if(work.dir != work.dir.orig) work.dir <- work.dir.orig # ripristinate original values of this script + if(p.orig != p) p <- p.orig + + cluster1 <- which(orden == cluster1.name) + cluster2 <- which(orden == cluster2.name) + cluster3 <- which(orden == cluster3.name) + cluster4 <- which(orden == cluster4.name) + + assign(paste0("psl.ordered",cluster1), pslwr1mean) + assign(paste0("psl.ordered",cluster2), pslwr2mean) + assign(paste0("psl.ordered",cluster3), pslwr3mean) + assign(paste0("psl.ordered",cluster4), pslwr4mean) + + + cluster.corr <- array(NA, c(4,4), dimnames=list(c("cl1","cl2","cl3","cl4"), orden)) + cluster.corr[1,1] <- cor(as.vector(cluster1.monthly), as.vector(psl.ordered1)) + cluster.corr[1,2] <- cor(as.vector(cluster1.monthly), as.vector(psl.ordered2)) + cluster.corr[1,3] <- cor(as.vector(cluster1.monthly), as.vector(psl.ordered3)) + cluster.corr[1,4] <- cor(as.vector(cluster1.monthly), as.vector(psl.ordered4)) + cluster.corr[2,1] <- cor(as.vector(cluster2.monthly), as.vector(psl.ordered1)) + cluster.corr[2,2] <- cor(as.vector(cluster2.monthly), as.vector(psl.ordered2)) + cluster.corr[2,3] <- cor(as.vector(cluster2.monthly), as.vector(psl.ordered3)) + cluster.corr[2,4] <- cor(as.vector(cluster2.monthly), as.vector(psl.ordered4)) + cluster.corr[3,1] <- cor(as.vector(cluster3.monthly), as.vector(psl.ordered1)) + cluster.corr[3,2] <- cor(as.vector(cluster3.monthly), as.vector(psl.ordered2)) + cluster.corr[3,3] <- cor(as.vector(cluster3.monthly), as.vector(psl.ordered3)) + cluster.corr[3,4] <- cor(as.vector(cluster3.monthly), as.vector(psl.ordered4)) + cluster.corr[4,1] <- cor(as.vector(cluster4.monthly), as.vector(psl.ordered1)) + cluster.corr[4,2] <- cor(as.vector(cluster4.monthly), as.vector(psl.ordered2)) + cluster.corr[4,3] <- cor(as.vector(cluster4.monthly), as.vector(psl.ordered3)) + cluster.corr[4,4] <- cor(as.vector(cluster4.monthly), as.vector(psl.ordered4)) + + #cluster.corr2 <- t(cluster.corr) + + # Centroid maps: + n.map <- n.map + 1 # n.maps starts from 0 + map.width <- 0.08 + map.xpos <- 0 + map.width * (n.map - 1) + map.ypos <- 0.08 + map.height <- 0.19 + map.ysep <- 0.015 + print(paste0("map.xpos= ", map.xpos)) + + par(fig=c(0, 1, 0, 1), new=TRUE) + text.cex <- 2 + text.ypos <- 1.03 + text(x=map.xpos - 0.015, y=text.ypos - 0.02, labels="cl1", cex=text.cex) + text(x=map.xpos - 0.015, y=text.ypos - 0.04, labels="cl2", cex=text.cex) + text(x=map.xpos - 0.015, y=text.ypos - 0.06, labels="cl3", cex=text.cex) + text(x=map.xpos - 0.015, y=text.ypos - 0.08, labels="cl4", cex=text.cex) + text(x=map.xpos + 0.000, y=text.ypos + 0.00, labels="NAO+", cex=text.cex) + text(x=map.xpos + 0.015, y=text.ypos + 0.00, labels="NAO-", cex=text.cex) + text(x=map.xpos + 0.030, y=text.ypos + 0.00, labels="BLO", cex=text.cex) + text(x=map.xpos + 0.045, y=text.ypos + 0.00, labels="ATL", cex=text.cex) + + text(x=map.xpos + 0.00, y=text.ypos - 0.02, labels=round(cluster.corr,2)[1,1], cex=text.cex) + text(x=map.xpos + 0.00, y=text.ypos - 0.04, labels=round(cluster.corr,2)[2,1], cex=text.cex) + text(x=map.xpos + 0.00, y=text.ypos - 0.06, labels=round(cluster.corr,2)[3,1], cex=text.cex) + text(x=map.xpos + 0.00, y=text.ypos - 0.08, labels=round(cluster.corr,2)[4,1], cex=text.cex) + text(x=map.xpos + 0.015, y=text.ypos - 0.02, labels=round(cluster.corr,2)[1,2], cex=text.cex) + text(x=map.xpos + 0.015, y=text.ypos - 0.04, labels=round(cluster.corr,2)[2,2], cex=text.cex) + text(x=map.xpos + 0.015, y=text.ypos - 0.06, labels=round(cluster.corr,2)[3,2], cex=text.cex) + text(x=map.xpos + 0.015, y=text.ypos - 0.08, labels=round(cluster.corr,2)[4,2], cex=text.cex) + text(x=map.xpos + 0.03, y=text.ypos - 0.02, labels=round(cluster.corr,2)[1,3], cex=text.cex) + text(x=map.xpos + 0.03, y=text.ypos - 0.04, labels=round(cluster.corr,2)[2,3], cex=text.cex) + text(x=map.xpos + 0.03, y=text.ypos - 0.06, labels=round(cluster.corr,2)[3,3], cex=text.cex) + text(x=map.xpos + 0.03, y=text.ypos - 0.08, labels=round(cluster.corr,2)[4,3], cex=text.cex) + text(x=map.xpos + 0.045, y=text.ypos - 0.02, labels=round(cluster.corr,2)[1,4], cex=text.cex) + text(x=map.xpos + 0.045, y=text.ypos - 0.04, labels=round(cluster.corr,2)[2,4], cex=text.cex) + text(x=map.xpos + 0.045, y=text.ypos - 0.06, labels=round(cluster.corr,2)[3,4], cex=text.cex) + text(x=map.xpos + 0.045, y=text.ypos - 0.08, labels=round(cluster.corr,2)[4,4], cex=text.cex) + + + } # close if on corr.matrix + + + # Visualize the composition if the impact maps for a selected forecasted month: + if(composition == "impact"){ + # Centroid maps: + n.map <- n.map + 1 # n.maps starts from 0 + map.width <- 0.115 + map.xpos <- 0.06 + map.width * (n.map - 1) + map.ypos <- 0.08 + map.height <- 0.19 + map.ysep <- 0.015 + + par(fig=c(map.xpos, map.xpos + map.width, map.ypos + 3*map.height + 3*map.ysep, map.ypos + 4*map.height + 3*map.ysep), new=TRUE) + #PlotEquiMap2(rescale(imp1[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=t(sig1[,EU] < 0.05), cex.lab=1.5) + PlotEquiMap2(rescale(imp1[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5) + + par(fig=c(map.xpos, map.xpos + map.width, map.ypos + 2*map.height + 2*map.ysep, map.ypos + 3*map.height + 2*map.ysep), new=TRUE) + #PlotEquiMap2(rescale(imp2[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=t(sig2[,EU] < 0.05), cex.lab=1.5) + PlotEquiMap2(rescale(imp2[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5) + + par(fig=c(map.xpos, map.xpos + map.width, map.ypos + map.height + map.ysep, map.ypos + 2*map.height + map.ysep), new=TRUE) + #PlotEquiMap2(rescale(imp3[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=t(sig3[,EU] < 0.05), cex.lab=1.5) + PlotEquiMap2(rescale(imp3[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5) + + par(fig=c(map.xpos, map.xpos + map.width, map.ypos, map.ypos + map.height), new=TRUE) + #PlotEquiMap2(rescale(imp4[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=t(sig4[,EU] < 0.05), cex.lab=1.5) + PlotEquiMap2(rescale(imp4[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5) + + + # Sheet subtitles: + par(fig=c(map.xpos, map.xpos + map.width, 0.86, 0.90), new=TRUE) + if(n.map < 8) { + mtext(paste0(my.month.short[p], " --> ", my.month.short[forecast.month]), cex=5.5, font=2) + } else{ + mtext(paste0(rean.name," ", my.month.short[forecast.month]), cex=5.5, font=2) + } + + } # close if on composition == 'impact' + + # Visualize the 2x2 composition of the four impact maps for a selected reanalysis or forecasted month: + if(composition == "single.impact"){ + + png(filename=paste0(rean.dir,"/",fields.name,"_",my.period[p],"_",var.name[var.num],"_impact_composition.png"),width=2000,height=2000) + + plot.new() + + par(fig=c(0, 0.5, 0.95, 0.988), new=TRUE) + mtext("NAO+",cex=5) + par(fig=c(0, 0.5, 0.52, 0.95), new=TRUE) + PlotEquiMap(rescale(imp1[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=sig1[,EU] < 0.05, dot_size=2, axes_label_scale=2.5) + + par(fig=c(0.5, 1, 0.93, 0.96), new=TRUE) + mtext("NAO-",cex=5) + par(fig=c(0.5, 1, 0.52, 0.95), new=TRUE) + PlotEquiMap(rescale(imp2[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=sig2[,EU] < 0.05, dot_size=2, axes_label_scale=2.5) + + par(fig=c(0, 0.5, 0.44, 0.47), new=TRUE) + mtext("Blocking",cex=5) + par(fig=c(0, 0.5, 0.08, 0.46), new=TRUE) + PlotEquiMap(rescale(imp3[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=sig3[,EU] < 0.05, dot_size=2, axes_label_scale=2.5) + + par(fig=c(0.5, 1, 0.44, 0.47), new=TRUE) + mtext("Atlantic Ridge",cex=5) + par(fig=c(0.5, 1, 0.08, 0.46), new=TRUE) + PlotEquiMap(rescale(imp4[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=sig4[,EU] < 0.05, dot_size=2, axes_label_scale=2.5) + + par(fig=c(0.03, 0.96, 0.02, 0.08), new=TRUE) + ColorBar2(my.brks.var, cols=my.cols.var, vert=FALSE, cex=3, my.ticks=-0.5 + 1:length(my.brks.var), my.labels=my.brks.var, label.dist=3) + par(fig=c(0.965, 0.99, 0, 0.026), new=TRUE) + mtext("m/s",cex=3) + + dev.off() + + # format it manually from the bash shell (you must run it from your workstation until the identity command of ImageMagick will be loaded un the cluster): + #sh ~/scripts/fig2catalog.sh -x 20 -t 'NCEP / 10-m wind speed / Regime impact \nOctober / 1981-2016' -c 'Region: North Atlantic (27°N-81°N, 85.5°W-45°E)\nReference dataset: NCEP/NCAR reanalysis' NCEP_October_sfcWind_impact_composition.png NCEP_October_sfcWind_impact_composition_catalogue.png + + + ### to plot the impact map of all regimes on a particular month: + #imp.oct <- (imp1*3.2 + imp2*38.7 + imp3*51.6 + imp4*6.4)/100 + year.test <- 2016 + pos.year.test <- year.test - year.start +1 + imp.test <- imp1*fre1.NA[pos.year.test] + imp2*fre2.NA[pos.year.test] + imp3*fre3.NA[pos.year.test] + imp4*fre4.NA[pos.year.test] + #imp.test <- imp1*fre1.NA[pos.year.test] + imp3*(fre3.NA[pos.year.test]+0.032) + imp4*(fre4.NA[pos.year.test]+0.032) + par(fig=c(0, 1, 0.05, 1), new=TRUE) + PlotEquiMap2(rescale(imp.test[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5) + + # vector with the frequency of the WRs in the chosen month and year: + wt.test.freq <- c(fre1.NA[pos.year.test],fre2.NA[pos.year.test],fre3.NA[pos.year.test],fre4.NA[pos.year.test]) + + ## # or save them as individual maps: + ## png(filename=paste0(rean.dir,"/",fields.name,"_",my.period[p],"_",var.name[var.num],"_impact_NAO+.png"),width=3000,height=3700) + ## PlotEquiMap2(rescale(imp1[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=t(sig1[,EU] < 0.05), cex.lab=1.5) + ## dev.off() + + ## png(filename=paste0(rean.dir,"/",fields.name,"_",my.period[p],"_",var.name[var.num],"_impact_NAO-.png"),width=3000,height=3700) + ## PlotEquiMap2(rescale(imp2[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=t(sig2[,EU] < 0.05), cex.lab=1.5) + ## dev.off() + + ## png(filename=paste0(rean.dir,"/",fields.name,"_",my.period[p],"_",var.name[var.num],"_impact_Blocking.png"),width=3000,height=3700) + ## PlotEquiMap2(rescale(imp3[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=t(sig3[,EU] < 0.05), cex.lab=1.5) + ## dev.off() + + ## png(filename=paste0(rean.dir,"/",fields.name,"_",my.period[p],"_",var.name[var.num],"_impact_Atl_Ridge.png"),width=3000,height=3700) + ## PlotEquiMap2(rescale(imp4[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=t(sig4[,EU] < 0.05), cex.lab=1.5) + ## dev.off() + } + + # Visualize the composition if the impact maps for a selected forecasted month: + if(composition == "single.psl"){ + png(filename=paste0(rean.dir,"/",fields.name,"_",my.period[p],"_",var.name[var.num],"_pattern_cluster1.png"),width=2000,height=1400, res=300) + PlotEquiMap2(rescale(map1,my.brks[1],tail(my.brks,1)), lon, lat,filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1, contours=t(map1), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=0.5, xlabel.dist=0, contours.lty="F1FF1F", toptitle=paste0(my.period[p]," WithinSS=", round(withinss1/1000000,2))) + dev.off() + + png(filename=paste0(rean.dir,"/",fields.name,"_",my.period[p],"_",var.name[var.num],"_pattern_cluster2.png"),width=2000,height=1400, res=300) + PlotEquiMap2(rescale(map2,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1, contours=t(map2), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=0.5, xlabel.dist=0, contours.lty="F1FF1F", toptitle=paste0(my.period[p]," WithinSS=", round(withinss2/1000000,2))) + dev.off() + + png(filename=paste0(rean.dir,"/",fields.name,"_",my.period[p],"_",var.name[var.num],"_pattern_cluster3.png"),width=2000,height=1400, res=300) + PlotEquiMap2(rescale(map3,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1, contours=t(map3), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=0.5, xlabel.dist=0, contours.lty="F1FF1F", toptitle=paste0(my.period[p]," WithinSS=", round(withinss3/1000000,2))) + dev.off() + + png(filename=paste0(rean.dir,"/",fields.name,"_",my.period[p],"_",var.name[var.num],"_pattern_cluster4.png"),width=2000,height=1400, res=300) + PlotEquiMap2(rescale(map4,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1, contours=t(map4), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=0.5, xlabel.dist=0, contours.lty="F1FF1F", toptitle=paste0(my.period[p]," WithinSS=", round(withinss4/1000000,2))) + dev.off() + + # Legend centroid maps: + #my.subset <- match(values.to.plot, my.brks) + #ColorBar3(my.brks, cols=my.cols, vert=FALSE, cex=legend1.cex, subset=my.subset) + #mtext(side=4," hPa", cex=legend1.cex) + + } + + # Visualize the composition with the interannual frequencies for a selected forecasted month: + if(composition == "fre"){ + # Centroid maps: + n.map <- n.map + 1 # n.maps starts from 0 + map.width <- 0.115 + map.xpos <- 0.06 + map.width * (n.map - 1) + map.ypos <- 0.08 + map.height <- 0.19 + map.ysep <- 0.015 + + par(fig=c(map.xpos, map.xpos + map.width, map.ypos + 3*map.height + 3*map.ysep, map.ypos + 4*map.height + 3*map.ysep), new=TRUE) + if(n.map < 8) barplot.freq.sim2(100*fre1.NA, 100*freMax1.NA, 100*freMin1.NA, 100*freObs1, year.start, year.end, cex.y=2, cex.x=2, freq.min=-2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0), col.line="gray20", cex.r=3, cex.mean=2, cex.obs=2) + if(n.map == 8) barplot.freq(100*fre1.NA, year.start, year.end, cex.y=2, cex.x=2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0)) + + par(fig=c(map.xpos, map.xpos + map.width, map.ypos + 2*map.height + 2*map.ysep, map.ypos + 3*map.height + 2*map.ysep), new=TRUE) + if(n.map < 8) if(fields.name == forecast.name) barplot.freq.sim2(100*fre2.NA, 100*freMax2.NA, 100*freMin2.NA, 100*freObs2, year.start, year.end, cex.y=2, cex.x=2, freq.min=-2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0), col.line="gray20", cex.r=3, cex.mean=2, cex.obs=2) + if(n.map == 8) barplot.freq(100*fre2.NA, year.start, year.end, cex.y=2, cex.x=2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0)) + + par(fig=c(map.xpos, map.xpos + map.width, map.ypos + map.height + map.ysep, map.ypos + 2*map.height + map.ysep), new=TRUE) + if(n.map < 8) if(fields.name == forecast.name) barplot.freq.sim2(100*fre3.NA, 100*freMax3.NA, 100*freMin3.NA, 100*freObs3, year.start, year.end, cex.y=2, cex.x=2, freq.min=-2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0), col.line="gray20", cex.r=3, cex.mean=2, cex.obs=2) + if(n.map == 8) barplot.freq(100*fre3.NA, year.start, year.end, cex.y=2, cex.x=2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0)) + + par(fig=c(map.xpos, map.xpos + map.width, map.ypos, map.ypos + map.height), new=TRUE) + if(n.map < 8) if(fields.name == forecast.name) barplot.freq.sim2(100*fre4.NA, 100*freMax4.NA, 100*freMin4.NA, 100*freObs4, year.start, year.end, cex.y=2, cex.x=2, freq.min=-2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0), col.line="gray20", cex.r=3, cex.mean=2, cex.obs=2) + if(n.map == 8) barplot.freq(100*fre4.NA, year.start, year.end, cex.y=2, cex.x=2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0)) + + # Sheet subtitles: + par(fig=c(map.xpos, map.xpos + map.width, 0.86, 0.90), new=TRUE) + if(n.map < 8) { + mtext(paste0(my.month.short[p], " --> ", my.month.short[forecast.month]), cex=5.5, font=2) + } else{ + mtext(paste0(rean.name," ", my.month.short[forecast.month]), cex=5.5, font=2) + } + + # % on Frequency plots: + par(fig=c(map.xpos - 0.006, map.xpos, map.ypos + 3.9*map.height + 3*map.ysep, map.ypos + 4*map.height + 3*map.ysep), new=TRUE) + mtext("%", cex=3.3) + par(fig=c(map.xpos - 0.006, map.xpos, map.ypos + 2.9*map.height + 2*map.ysep, map.ypos + 3*map.height + 2*map.ysep), new=TRUE) + mtext("%", cex=3.3) + par(fig=c(map.xpos - 0.006, map.xpos, map.ypos + 1.9*map.height + 1*map.ysep, map.ypos + 2*map.height + 1*map.ysep), new=TRUE) + mtext("%", cex=3.3) + par(fig=c(map.xpos - 0.006, map.xpos, map.ypos + 0.9*map.height + 0*map.ysep, map.ypos + 1*map.height + 0*map.ysep), new=TRUE) + mtext("%", cex=3.3) + + # mean frequency on Frequency plots: + par(fig=c(map.xpos + 0.02, map.xpos + 0.04, map.ypos + 3*map.height + 3*map.ysep, map.ypos + 3.1*map.height + 3*map.ysep), new=TRUE) + mtext(bquote(bar(nu) == .(paste0(round(mean(100*fre1.NA),1),"%"))),cex=3.5) + par(fig=c(map.xpos + 0.02, map.xpos + 0.04, map.ypos + 2*map.height + 2*map.ysep, map.ypos + 2.1*map.height + 2*map.ysep), new=TRUE) + mtext(bquote(bar(nu) == .(paste0(round(mean(100*fre2.NA),1),"%"))),cex=3.5) + par(fig=c(map.xpos + 0.02, map.xpos + 0.04, map.ypos + 1*map.height + 1*map.ysep, map.ypos + 1.1*map.height + 1*map.ysep), new=TRUE) + mtext(bquote(bar(nu) == .(paste0(round(mean(100*fre3.NA),1),"%"))),cex=3.5) + par(fig=c(map.xpos + 0.02, map.xpos + 0.04, map.ypos + 0*map.height + 0*map.ysep, map.ypos + 0.1*map.height + 0*map.ysep), new=TRUE) + mtext(bquote(bar(nu) == .(paste0(round(mean(100*fre4.NA),1),"%"))),cex=3.5) + + # add corr between obs.time series and ensemble mean time series on Frequency plots: + if(n.map < 8){ + par(fig=c(map.xpos + map.width - 0.04, map.xpos + map.width - 0.02, map.ypos + 3*map.height + 3*map.ysep, map.ypos + 3.1*map.height + 3*map.ysep), new=TRUE) + mtext(paste0("r= ",sp.cor1), cex=3.5) + par(fig=c(map.xpos + map.width - 0.04, map.xpos + map.width - 0.02, map.ypos + 2*map.height + 2*map.ysep, map.ypos + 2.1*map.height + 2*map.ysep), new=TRUE) + mtext(paste0("r= ",sp.cor2), cex=3.5) + par(fig=c(map.xpos + map.width - 0.04, map.xpos + map.width - 0.02, map.ypos + 1*map.height + 1*map.ysep, map.ypos + 1.1*map.height + 1*map.ysep), new=TRUE) + mtext(paste0("r= ",sp.cor3), cex=3.5) + par(fig=c(map.xpos + map.width - 0.04, map.xpos + map.width - 0.02, map.ypos + 0*map.height + 0*map.ysep, map.ypos + 0.1*map.height + 0*map.ysep), new=TRUE) + mtext(paste0("r= ",sp.cor4), cex=3.5) + } + } # close if on composition == 'fre' + + # save indicators for each startdate and forecast time: + # (map1, map2, map3, map4, fre1, fre2, etc. already refer to the regimes in the same order as listed in the 'orden' vector) + if(fields.name == forecast.name) { + if (composition != "impact") { + save(orden, map1, map2, map3, map4, fre1.NA, fre2.NA, fre3.NA, fre4.NA, freObs1, freObs2, freObs3, freObs4, sp.cor1, sp.cor2, sp.cor3, sp.cor4, persist1, persist2, persist3, persist4, persistObs1, persistObs2, persistObs3, persistObs4, spat.cor1, spat.cor2, spat.cor3, spat.cor4, sd.freq.sim1, sd.freq.sim2, sd.freq.sim3, sd.freq.sim4, sd.freq.obs1, sd.freq.obs2, sd.freq.obs3, sd.freq.obs4, transSim1, transSim2, transSim3, transSim4, transObs1, transObs2, transObs3, transObs4, file=paste0(work.dir,"/",fields.name,"_", my.period[p],"_leadmonth_",lead.month,"_","ClusterNames",".RData")) + } else { # also save impX objects + save(orden, map1, map2, map3, map4, fre1.NA, fre2.NA, fre3.NA, fre4.NA, freObs1, freObs2, freObs3, freObs4, sp.cor1, sp.cor2, sp.cor3, sp.cor4, persist1, persist2, persist3, persist4, persistObs1, persistObs2, persistObs3, persistObs4, spat.cor1, spat.cor2, spat.cor3, spat.cor4, sd.freq.sim1, sd.freq.sim2, sd.freq.sim3, sd.freq.sim4, sd.freq.obs1, sd.freq.obs2, sd.freq.obs3, sd.freq.obs4, transSim1, transSim2, transSim3, transSim4, transObs1, transObs2, transObs3, transObs4, imp1, imp2, imp3, imp4,cor.imp1,cor.imp2,cor.imp3,cor.imp4, file=paste0(work.dir,"/",fields.name,"_", my.period[p],"_leadmonth_",lead.month,"_","ClusterNames",".RData")) + } + } + + } # close 'p' on 'period' + + if((composition == 'simple' || composition == 'edpr' ) && as.pdf) dev.off() # for saving a single pdf with all months/seasons + + } # close 'lead.month' on 'lead.months' + + if(composition == "psl" || composition == "fre" || composition == "impact"){ + # Regime's names: + title1.xpos <- 0.01 + title1.width <- 0.04 + par(fig=c(title1.xpos, title1.xpos + title1.width, 0.7905, 0.7955), new=TRUE) + mtext(paste0(regime1.name), font=2, cex=5) + par(fig=c(title1.xpos, title1.xpos + title1.width, 0.5855, 0.5905), new=TRUE) + mtext(paste0(regime2.name), font=2, cex=5) + par(fig=c(title1.xpos, title1.xpos + title1.width, 0.3805, 0.3955), new=TRUE) + mtext(paste0(regime3.name), font=2, cex=5) + par(fig=c(title1.xpos, title1.xpos + title1.width, 0.1755, 0.1805), new=TRUE) + mtext(paste0(regime4.name), font=2, cex=5) + + if(composition == "psl") { + # Color legend: + legend1.xpos <- 0.10 + legend1.width <- 0.80 + legend1.cex <- 4 + + par(fig=c(legend1.xpos, legend1.xpos + legend1.width, 0, 0.065), new=TRUE) # syntax fig=c(xmin, xmax, ymin, ymax) + ColorBar2(brks=my.brks, cols=my.cols, vert=FALSE, cex=legend1.cex, label.dist=2, my.ticks=-0.5 + 1:length(my.brks), my.labels=my.brks) + par(fig=c(legend1.xpos + legend1.width - 0.02, legend1.xpos + legend1.width + 0.05, 0, 0.02), new=TRUE) # syntax fig=c(xmin, xmax, ymin, ymax) + mtext("hPa", cex=legend1.cex*1.3) + } + + if(composition == "impact") { + # Color legend: + legend1.xpos <- 0.10 + legend1.width <- 0.80 + legend1.cex <- 4 + + #my.subset2 <- match(values.to.plot2, my.brks.var) + par(fig=c(legend1.xpos, legend1.xpos + legend1.width, 0, 0.065), new=TRUE) + ColorBar2(brks=my.brks.var, cols=my.cols.var, vert=FALSE, cex=legend1.cex, label.dist=2, my.ticks=-0.5 + 1:length(my.brks.var), my.labels=my.brks.var) + par(fig=c(legend1.xpos + legend1.width - 0.02, legend1.xpos + legend1.width + 0.05, 0, 0.02), new=TRUE) # syntax fig=c(xmin, xmax, ymin, ymax) + #ColorBar2(brks=my.brks.var, cols=my.cols.var, vert=FALSE, cex=legend2.cex, subset=my.subset2) + mtext(var.unit[var.num], cex=legend1.cex*1.3) + + } + + + dev.off() + + } # close if on composition + + + if(composition == "psl.rean" || composition == "psl.rean.unordered" || composition == "corr.matrix") dev.off() + + print("Finished!") +} # close if on composition != "summary" + + + +#} # close for on forecasts.month + + + +if(composition == "taylor"){ + library("plotrix") + + fields.name="ERA-Interim" + + # choose a reference season from 13 (winter) to 16 (Autumn): + season.ref <- 13 + + # set the first WR classification: + rean.dir <- "/scratch/Earth/ncortesi/RESILIENCE/Regimes/41_ERA-Interim_monthly_1981-2015_LOESS_filter" + + # load data of the reference season of the first classification (this line should be modified to load the chosen season): + #load("/scratch/Earth/ncortesi/RESILIENCE/Regimes/18) as 12) but with no lat correction/ERA-Interim_Winter_psl.RData") # load pslwrXmean data + #load("/scratch/Earth/ncortesi/RESILIENCE/Regimes/18) as 12) but with no lat correction/ERA-Interim_Winter_ClusterNames.RData") # load clusterX.name.period + load("/scratch/Earth/ncortesi/RESILIENCE/Regimes/46_as_18_but_with_no_lat_correction_and_with_LOESS/ERA-Interim_Winter_psl.RData") # load pslwrXmean data + load("/scratch/Earth/ncortesi/RESILIENCE/Regimes/46_as_18_but_with_no_lat_correction_and_with_LOESS/ERA-Interim_Winter_ClusterNames.RData") # load clusterX.name.period + + if(var.num != var.num.orig) var.num <- var.num.orig # ripristinate original values of this script (necessary after loading regime data) + if(fields.name != fields.name.orig) fields.name <- fields.name.orig # ripristinate original values of this script + + cluster1.ref <- which(orden == cluster1.name) + cluster2.ref <- which(orden == cluster2.name) + cluster3.ref <- which(orden == cluster3.name) + cluster4.ref <- which(orden == cluster4.name) + + #cluster1.ref <- cluster1.name.period[13] + #cluster2.ref <- cluster2.name.period[13] + #cluster3.ref <- cluster3.name.period[13] + #cluster4.ref <- cluster4.name.period[13] + + cluster1.ref <- 3 # bypass the previous values because for dataset num.46 the blocking and atlantic regimes were saved swapped! + cluster2.ref <- 2 + cluster3.ref <- 1 + cluster4.ref <- 4 + + assign(paste0("map.ref",cluster1.ref), pslwr1mean) # NAO+ + assign(paste0("map.ref",cluster2.ref), pslwr2mean) # NAO- + assign(paste0("map.ref",cluster3.ref), pslwr3mean) # Blocking + assign(paste0("map.ref",cluster4.ref), pslwr4mean) # Atl.ridge + + # set a second WR classification (i.e: with running cluster) to contrast with the new one: + #rean2.dir <- "/scratch/Earth/ncortesi/RESILIENCE/Regimes/41_ERA-Interim_monthly_1981-2015_LOESS_filter" + rean2.dir <- "/scratch/Earth/ncortesi/RESILIENCE/Regimes/44_as_43_but_with_no_lat_correction" + rean2.name <- "ERA-Interim" + + # load data of the reference season of the second classification (this line should be modified to load the chosen season): + load("/scratch/Earth/ncortesi/RESILIENCE/Regimes/46_as_18_but_with_no_lat_correction_and_with_LOESS/ERA-Interim_Winter_psl.RData") # load pslwrXmean data + load("/scratch/Earth/ncortesi/RESILIENCE/Regimes/46_as_18_but_with_no_lat_correction_and_with_LOESS/ERA-Interim_Winter_ClusterNames.RData") # load clusterX.name.period + + if(var.num != var.num.orig) var.num <- var.num.orig # ripristinate original values of this script (necessary after loading regime data) + if(fields.name != fields.name.orig) fields.name <- fields.name.orig # ripristinate original values of this script + + #cluster1.name.ref <- cluster1.name.period[season.ref] + #cluster2.name.ref <- cluster2.name.period[season.ref] + #cluster3.name.ref <- cluster3.name.period[season.ref] + #cluster4.name.ref <- cluster4.name.period[season.ref] + + cluster1.ref <- which(orden == cluster1.name) + cluster2.ref <- which(orden == cluster2.name) + cluster3.ref <- which(orden == cluster3.name) + cluster4.ref <- which(orden == cluster4.name) + + cluster1.ref <- 3 # bypass the previous values because for dataset num.46 the blocking and atlantic regimes were saved swapped! + cluster2.ref <- 2 + cluster3.ref <- 1 + cluster4.ref <- 4 + + assign(paste0("map.old.ref",cluster1.ref), pslwr1mean) # NAO+ + assign(paste0("map.old.ref",cluster2.ref), pslwr2mean) # NAO- + assign(paste0("map.old.ref",cluster3.ref), pslwr3mean) # Blocking + assign(paste0("map.old.ref",cluster4.ref), pslwr4mean) # Atl.ridge + + + # breaks and colors of the geopotential fields: + if(psl == "g500"){ + my.brks <- c(-300,seq(-200,200,10),300) # % Mean anomaly of a WR + my.brks2 <- c(-300,seq(-200,200,10),300) # % Mean anomaly of a WR for the contour lines + values.to.plot <- c(-200, -100, 0, 100, 200) # values we want to show in the labels + } + + if(psl == "psl"){ + my.brks <- c(seq(-19,-1,2),0,seq(1,19,2)) # % Mean anomaly of a WR + my.brks2 <- my.brks #c(seq(-20,20,2)) # % Mean anomaly of a WR for the contour lines + values.to.plot <- my.brks #c(-17,-15,-13,-11,-9,-7,-5,-3,-1,0,1,3,5,7,9,11,13,15,17) + } + + my.cols <- colorRampPalette(rev(brewer.pal(11,"RdBu")))(length(my.brks)-1) # blue--white--red colors + my.cols[floor(length(my.cols)/2)] <- my.cols[floor(length(my.cols)/2)+1] <- "white" + + # plot the composition with the regime anomalies of each monthly regimes: + png(filename=paste0(rean.dir,"/",rean.name,"_taylor_source",".png"),width=6000,height=2000) + + plot.new() + + # Sheet title: + par(fig=c(0, 1, 0.94, 0.98), new=TRUE) + mtext(paste0(fields.name, " observed monthly regime anomalies"), cex=5.5, font=2) + + n.map <- 0 + for(p in c(9:12,1:8)){ + p.orig <- p + + load(file=paste0(rean.dir,"/",fields.name,"_",my.period[p],"_","psl",".RData")) # load cluster names and summarized data + load(file=paste0(rean.dir,"/",fields.name,"_",my.period[p],"_","ClusterNames",".RData")) # load cluster names and summarized data + + if(var.num != var.num.orig) var.num <- var.num.orig # ripristinate original values of this script (necessary after loading regime data) + if(fields.name != fields.name.orig) fields.name <- fields.name.orig # ripristinate original values of this script + if(p != p.orig) p <- p.orig + + cluster1 <- which(orden == cluster1.name) + cluster2 <- which(orden == cluster2.name) + cluster3 <- which(orden == cluster3.name) + cluster4 <- which(orden == cluster4.name) + + assign(paste0("map",cluster1), pslwr1mean) # NAO+ + assign(paste0("map",cluster2), pslwr2mean) # NAO- + assign(paste0("map",cluster3), pslwr3mean) # Blocking + assign(paste0("map",cluster4), pslwr4mean) # Atl.ridge + + n.map <- n.map + 1 # n.maps starts from 0 + map.width <- 0.07 + map.xpos <- 0.06 + map.width * (n.map - 1) + map.ypos <- 0.08 + map.height <- 0.19 + map.ysep <- 0.015 + + par(fig=c(map.xpos, map.xpos + map.width, map.ypos + 3*map.height + 3*map.ysep, map.ypos + 4*map.height + 3*map.ysep), new=TRUE) + PlotEquiMap2(rescale(map1,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map1), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, xlabel.dist=1.5, contours.lty="F1FF1F") + par(fig=c(map.xpos, map.xpos + map.width, map.ypos + 2*map.height + 2*map.ysep, map.ypos + 3*map.height + 2*map.ysep), new=TRUE) + PlotEquiMap2(rescale(map2,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map2), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F") + par(fig=c(map.xpos, map.xpos + map.width, map.ypos + map.height + map.ysep, map.ypos + 2*map.height + map.ysep), new=TRUE) + PlotEquiMap2(rescale(map3,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map3), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F") + par(fig=c(map.xpos, map.xpos + map.width, map.ypos, map.ypos + map.height), new=TRUE) + PlotEquiMap2(rescale(map4,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map4), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F") + + # Sheet subtitles: + par(fig=c(map.xpos, map.xpos + map.width, 0.86, 0.90), new=TRUE) + mtext(my.month.short[p], cex=5.5, font=2) + + } + + # draw the last map to the right of the composition, that with the seasonal anomalies: + n.map <- n.map + 1 # n.maps starts from 0 + map.width <- 0.07 + map.xpos <- 0.06 + map.width * (n.map - 1) + map.ypos <- 0.08 + map.height <- 0.19 + map.ysep <- 0.015 + + par(fig=c(map.xpos, map.xpos + map.width, map.ypos + 3*map.height + 3*map.ysep, map.ypos + 4*map.height + 3*map.ysep), new=TRUE) + PlotEquiMap2(rescale(map.ref1,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map.ref1), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, xlabel.dist=1.5, contours.lty="F1FF1F") + par(fig=c(map.xpos, map.xpos + map.width, map.ypos + 2*map.height + 2*map.ysep, map.ypos + 3*map.height + 2*map.ysep), new=TRUE) + PlotEquiMap2(rescale(map.ref2,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map.ref2), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F") + par(fig=c(map.xpos, map.xpos + map.width, map.ypos + map.height + map.ysep, map.ypos + 2*map.height + map.ysep), new=TRUE) + PlotEquiMap2(rescale(map.ref3,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map.ref3), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F") + par(fig=c(map.xpos, map.xpos + map.width, map.ypos, map.ypos + map.height), new=TRUE) + PlotEquiMap2(rescale(map.ref4,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map.ref4), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F") + + # subtitle: + par(fig=c(map.xpos, map.xpos + map.width, 0.86, 0.90), new=TRUE) + mtext(my.period[season.ref], cex=5.5, font=2) + + dev.off() + + # Plot the Taylor diagrams: + + corr.first <- corr.second <- rmse.first <- rmse.second <- array(NA,c(4,12)) + + for(regime in 1:4){ + + png(filename=paste0(rean.dir,"/",rean.name,"_taylor_",orden[regime],".png"), width=1200, height=800) + + for(p in 1:12){ + p.orig <- p + + load(file=paste0(rean.dir,"/",rean.name,"_",my.period[p],"_","psl",".RData")) # load cluster names and summarized data + load(file=paste0(rean.dir,"/",rean.name,"_",my.period[p],"_","ClusterNames",".RData")) # load cluster names and summarized data + + if(var.num != var.num.orig) var.num <- var.num.orig # ripristinate original values of this script (necessary after loading regime data) + if(fields.name != fields.name.orig) fields.name <- fields.name.orig # ripristinate original values of this script + if(p != p.orig) p <- p.orig + + cluster1 <- which(orden == cluster1.name) + cluster2 <- which(orden == cluster2.name) + cluster3 <- which(orden == cluster3.name) + cluster4 <- which(orden == cluster4.name) + + assign(paste0("map",cluster1), pslwr1mean) # NAO+ + assign(paste0("map",cluster2), pslwr2mean) # NAO- + assign(paste0("map",cluster3), pslwr3mean) # Blocking + assign(paste0("map",cluster4), pslwr4mean) # Atl.ridge + + # load data from the second classification: + load(file=paste0(rean2.dir,"/",rean2.name,"_",my.period[p],"_","psl",".RData")) # load cluster names and summarized data + load(file=paste0(rean2.dir,"/",rean2.name,"_",my.period[p],"_","ClusterNames",".RData")) # load cluster names and summarized data + + if(var.num != var.num.orig) var.num <- var.num.orig # ripristinate original values of this script (necessary after loading regime data) + if(fields.name != fields.name.orig) fields.name <- fields.name.orig # ripristinate original values of this script + if(p != p.orig) p <- p.orig + + cluster1.old <- which(orden == cluster1.name) + cluster2.old <- which(orden == cluster2.name) + cluster3.old <- which(orden == cluster3.name) + cluster4.old <- which(orden == cluster4.name) + + assign(paste0("map.old",cluster1.old), pslwr1mean) # NAO+ + assign(paste0("map.old",cluster2.old), pslwr2mean) # NAO- + assign(paste0("map.old",cluster3.old), pslwr3mean) # Blocking + assign(paste0("map.old",cluster4.old), pslwr4mean) # Atl.ridge + + add.mod <- ifelse(p == 1, FALSE, TRUE) + main.mod <- ifelse(p == 1, orden[regime],"") + + color.first <- "red" + color.second <- "blue" # "black" + sd.arcs.mod <- c(0,0.1,0.2,0.3,0.4,0.5,0.6,0.7,0.8,0.9,1,1.1,1.2,1.3) #c(0,0.5,1,1.5) # doesn't work! + if(regime == 1) my.taylor(as.vector(map.ref1),as.vector(map1), normalize=TRUE, add=add.mod, pos.cor=FALSE, sd.arcs=sd.arcs.mod, ref.sd=F, cex.axis=1.7, text.cex=1, my.text=my.month.short[p], col = color.first, main=main.mod) + if(regime == 2) my.taylor(as.vector(map.ref2),as.vector(map2), normalize=TRUE, add=add.mod, pos.cor=FALSE, sd.arcs=sd.arcs.mod, ref.sd=F, cex.axis=1.7, text.cex=1, my.text=my.month.short[p], col = color.first, main=main.mod) + if(regime == 3) my.taylor(as.vector(map.ref3),as.vector(map3), normalize=TRUE, add=add.mod, pos.cor=FALSE, sd.arcs=sd.arcs.mod, ref.sd=F, cex.axis=1.7, text.cex=1, my.text=my.month.short[p], col = color.first, main=main.mod) + if(regime == 4) my.taylor(as.vector(map.ref4),as.vector(map4), normalize=TRUE, add=add.mod, pos.cor=FALSE, sd.arcs=sd.arcs.mod, ref.sd=F, cex.axis=1.7, text.cex=1, my.text=my.month.short[p], col = color.first, main=main.mod) + + # add the points from the second classification: + add.mod=TRUE + #add.mod <- ifelse(p == 1, FALSE, TRUE) + + if(regime == 1) my.taylor(as.vector(map.old.ref1),as.vector(map.old1), normalize=TRUE, add=add.mod, pos.cor=FALSE, sd.arcs=sd.arcs.mod, ref.sd=F, cex.axis=1.7, text.cex=1, my.text=my.month.short[p], col = color.second) + if(regime == 2) my.taylor(as.vector(map.old.ref2),as.vector(map.old2), normalize=TRUE, add=add.mod, pos.cor=FALSE, sd.arcs=sd.arcs.mod, ref.sd=F, cex.axis=1.7, text.cex=1, my.text=my.month.short[p], col = color.second) + if(regime == 3) my.taylor(as.vector(map.old.ref3),as.vector(map.old3), normalize=TRUE, add=add.mod, pos.cor=FALSE, sd.arcs=sd.arcs.mod, ref.sd=F, cex.axis=1.7, text.cex=1, my.text=my.month.short[p], col = color.second) + if(regime == 4) my.taylor(as.vector(map.old.ref4),as.vector(map.old4), normalize=TRUE, add=add.mod, pos.cor=FALSE, sd.arcs=sd.arcs.mod, ref.sd=F, cex.axis=1.7, text.cex=1, my.text=my.month.short[p], col = color.second) + + if(regime == 1) { corr.first[1,p] <- cor(as.vector(map.ref1),as.vector(map1)); rmse.first[1,p] <- RMSE(as.vector(map.ref1),as.vector(map1)) } + if(regime == 2) { corr.first[2,p] <- cor(as.vector(map.ref2),as.vector(map2)); rmse.first[2,p] <- RMSE(as.vector(map.ref2),as.vector(map2)) } + if(regime == 3) { corr.first[3,p] <- cor(as.vector(map.ref3),as.vector(map3)); rmse.first[3,p] <- RMSE(as.vector(map.ref3),as.vector(map3)) } + if(regime == 4) { corr.first[4,p] <- cor(as.vector(map.ref4),as.vector(map4)); rmse.first[4,p] <- RMSE(as.vector(map.ref4),as.vector(map4)) } + + if(regime == 1) { corr.second[1,p] <- cor(as.vector(map.old.ref1),as.vector(map.old1)); rmse.second[1,p] <- RMSE(as.vector(map.old.ref1),as.vector(map.old1)) } + if(regime == 2) { corr.second[2,p] <- cor(as.vector(map.old.ref2),as.vector(map.old2)); rmse.second[2,p] <- RMSE(as.vector(map.old.ref2),as.vector(map.old2)) } + if(regime == 3) { corr.second[3,p] <- cor(as.vector(map.old.ref3),as.vector(map.old3)); rmse.second[3,p] <- RMSE(as.vector(map.old.ref3),as.vector(map.old3)) } + if(regime == 4) { corr.second[4,p] <- cor(as.vector(map.old.ref4),as.vector(map.old4)); rmse.second[4,p] <- RMSE(as.vector(map.old.ref4),as.vector(map.old4)) } + + } # close for on 'p' + + dev.off() + + } # close for on 'regime' + + corr.first.mean <- apply(corr.first,1,mean) + corr.second.mean <- apply(corr.second,1,mean) + corr.diff <- corr.second.mean - corr.first.mean + + rmse.first.mean <- apply(rmse.first,1,mean) + rmse.second.mean <- apply(rmse.second,1,mean) + rmse.diff <- rmse.second.mean - rmse.first.mean + +} # close if on composition == "taylor" + + + + + + +# Summary graphs: +if(composition == "summary" && fields.name == forecast.name){ + # array storing correlations in the format: [startdate, leadmonth, regime] + array.diff.sd.freq <- array.sd.freq <- array.cor <- array.sp.cor <- array.freq <- array.diff.freq <- array.rpss <- array.pers <- array.diff.pers <- array(NA,c(12,7,4)) + array.spat.cor <- array.imp <- array.trans.sim1 <- array.trans.sim2 <- array.trans.sim3 <- array.trans.sim4 <- array(NA,c(12,7,4)) + array.trans.diff1 <- array.trans.diff2 <- array.trans.diff3 <- array.trans.diff4 <- array.freq.obs <- array.pers.obs <- array(NA,c(12,7,4)) + array.trans.obs1 <- array.trans.obs2 <- array.trans.obs3 <- array.trans.obs4 <- array(NA,c(12,7,4)) + + for(p in 1:12){ + p.orig <- p + + for(l in 0:6){ + + load(file=paste0(work.dir,"/",fields.name,"_",my.period[p],"_leadmonth_",l,"_","ClusterNames",".RData")) # load cluster names and summarized data + + if(var.num != var.num.orig) var.num <- var.num.orig # ripristinate original values of this script (necessary after loading regime data) + if(fields.name != fields.name.orig) fields.name <- fields.name.orig # ripristinate original values of this script + if(p != p.orig) p <- p.orig + + array.spat.cor[p,1+l, 1] <- spat.cor1 # associates NAO+ to the first triangle (at left) + array.spat.cor[p,1+l, 3] <- spat.cor2 # associates NAO- to the third triangle (at right) + array.spat.cor[p,1+l, 4] <- spat.cor3 # associates Blocking to the fourth triangle (top one) + array.spat.cor[p,1+l, 2] <- spat.cor4 # associates Atl.Ridge to the second triangle (bottom one) + + array.cor[p,1+l, 1] <- sp.cor1 # associates NAO+ to the first triangle (at left) + array.cor[p,1+l, 3] <- sp.cor2 # associates NAO- to the third triangle (at right) + array.cor[p,1+l, 4] <- sp.cor3 # associates Blocking to the fourth triangle (top one) + array.cor[p,1+l, 2] <- sp.cor4 # associates Atl.Ridge to the second triangle (bottom one) + + array.freq[p,1+l, 1] <- 100*(mean(fre1.NA)) # NAO+ + array.freq[p,1+l, 3] <- 100*(mean(fre2.NA)) # NAO- + array.freq[p,1+l, 4] <- 100*(mean(fre3.NA)) # Blocking + array.freq[p,1+l, 2] <- 100*(mean(fre4.NA)) # Atl.Ridge + + array.freq.obs[p,1+l, 1] <- 100*(mean(freObs1)) # NAO+ + array.freq.obs[p,1+l, 3] <- 100*(mean(freObs2)) # NAO- + array.freq.obs[p,1+l, 4] <- 100*(mean(freObs3)) # Blocking + array.freq.obs[p,1+l, 2] <- 100*(mean(freObs4)) # Atl.Ridge + + array.diff.freq[p,1+l, 1] <- 100*(mean(fre1.NA) - mean(freObs1)) # NAO+ + array.diff.freq[p,1+l, 3] <- 100*(mean(fre2.NA) - mean(freObs2)) # NAO- + array.diff.freq[p,1+l, 4] <- 100*(mean(fre3.NA) - mean(freObs3)) # Blocking + array.diff.freq[p,1+l, 2] <- 100*(mean(fre4.NA) - mean(freObs4)) # Atl.Ridge + + array.pers[p,1+l, 1] <- persist1 # NAO+ + array.pers[p,1+l, 3] <- persist2 # NAO- + array.pers[p,1+l, 4] <- persist3 # Blocking + array.pers[p,1+l, 2] <- persist4 # Atl.Ridge + + array.pers.obs[p,1+l, 1] <- persistObs1 # NAO+ + array.pers.obs[p,1+l, 3] <- persistObs2 # NAO- + array.pers.obs[p,1+l, 4] <- persistObs3 # Blocking + array.pers.obs[p,1+l, 2] <- persistObs4 # Atl.Ridge + + array.diff.pers[p,1+l, 1] <- persist1 - persistObs1 # NAO+ + array.diff.pers[p,1+l, 3] <- persist2 - persistObs2 # NAO- + array.diff.pers[p,1+l, 4] <- persist3 - persistObs3 # Blocking + array.diff.pers[p,1+l, 2] <- persist4 - persistObs4 # Atl.Ridge + + array.sd.freq[p,1+l, 1] <- sd.freq.sim1 # NAO+ + array.sd.freq[p,1+l, 3] <- sd.freq.sim2 # NAO- + array.sd.freq[p,1+l, 4] <- sd.freq.sim3 # Blocking + array.sd.freq[p,1+l, 2] <- sd.freq.sim4 # Atl.Ridge + + array.diff.sd.freq[p,1+l, 1] <- sd.freq.sim1 / sd.freq.obs1 # NAO+ + array.diff.sd.freq[p,1+l, 3] <- sd.freq.sim2 / sd.freq.obs2 # NAO- + array.diff.sd.freq[p,1+l, 4] <- sd.freq.sim3 / sd.freq.obs3 # Blocking + array.diff.sd.freq[p,1+l, 2] <- sd.freq.sim4 / sd.freq.obs4 # Atl.Ridge + + array.imp[p,1+l, 1] <- cor.imp1 # NAO+ + array.imp[p,1+l, 3] <- cor.imp2 # NAO- + array.imp[p,1+l, 4] <- cor.imp3 # Blocking + array.imp[p,1+l, 2] <- cor.imp4 # Atl.Ridge + + array.trans.sim1[p,1+l, 1] <- NA # Transition from NAO+ to NAO+ + array.trans.sim1[p,1+l, 3] <- 100*transSim1[2] # Transition from NAO+ to NAO- + array.trans.sim1[p,1+l, 4] <- 100*transSim1[3] # Transition from NAO+ to blocking + array.trans.sim1[p,1+l, 2] <- 100*transSim1[4] # Transition from NAO+ to Atl.Ridge + + array.trans.sim2[p,1+l, 1] <- 100*transSim2[1] # Transition from NAO- to NAO+ + array.trans.sim2[p,1+l, 3] <- NA # Transition from NAO- to NAO- + array.trans.sim2[p,1+l, 4] <- 100*transSim2[3] # Transition from NAO- to blocking + array.trans.sim2[p,1+l, 2] <- 100*transSim2[4] # Transition from NAO- to Atl.Ridge + + array.trans.sim3[p,1+l, 1] <- 100*transSim3[1] # Transition from blocking to NAO+ + array.trans.sim3[p,1+l, 3] <- 100*transSim3[2] # Transition from blocking to NAO- + array.trans.sim3[p,1+l, 4] <- NA # Transition from blocking to blocking + array.trans.sim3[p,1+l, 2] <- 100*transSim3[4] # Transition from blocking to Atl.Ridge + + array.trans.sim4[p,1+l, 1] <- 100*transSim4[1] # Transition from Atl.ridge to NAO+ + array.trans.sim4[p,1+l, 3] <- 100*transSim4[2] # Transition from Atl.ridge to NAO- + array.trans.sim4[p,1+l, 4] <- 100*transSim4[3] # Transition from Atl.ridge to blocking + array.trans.sim4[p,1+l, 2] <- NA # Transition from Atl.ridge to Atl.Ridge + + array.trans.obs1[p,1+l, 1] <- NA # Transition from NAO+ to NAO+ + array.trans.obs1[p,1+l, 3] <- 100*transObs1[2] # Transition from NAO+ to NAO- + array.trans.obs1[p,1+l, 4] <- 100*transObs1[3] # Transition from NAO+ to blocking + array.trans.obs1[p,1+l, 2] <- 100*transObs1[4] # Transition from NAO+ to Atl.Ridge + + array.trans.obs2[p,1+l, 1] <- 100*transObs2[1] # Transition from NAO- to NAO+ + array.trans.obs2[p,1+l, 3] <- NA # Transition from NAO- to NAO- + array.trans.obs2[p,1+l, 4] <- 100*transObs2[3] # Transition from NAO- to blocking + array.trans.obs2[p,1+l, 2] <- 100*transObs2[4] # Transition from NAO- to Atl.Ridge + + array.trans.obs3[p,1+l, 1] <- 100*transObs3[1] # Transition from blocking to NAO+ + array.trans.obs3[p,1+l, 3] <- 100*transObs3[2] # Transition from blocking to NAO- + array.trans.obs3[p,1+l, 4] <- NA # Transition from blocking to blocking + array.trans.obs3[p,1+l, 2] <- 100*transObs3[4] # Transition from blocking to Atl.Ridge + + array.trans.obs4[p,1+l, 1] <- 100*transObs4[1] # Transition from Atl.ridge to NAO+ + array.trans.obs4[p,1+l, 3] <- 100*transObs4[2] # Transition from Atl.ridge to NAO- + array.trans.obs4[p,1+l, 4] <- 100*transObs4[3] # Transition from Atl.ridge to blocking + array.trans.obs4[p,1+l, 2] <- NA # Transition from Atl.ridge to Atl.Ridge + + array.trans.diff1[p,1+l, 1] <- NA # Transition from NAO+ to NAO+ + array.trans.diff1[p,1+l, 3] <- 100*(transSim1[2] - transObs1[2]) # Transition from NAO+ to NAO- + array.trans.diff1[p,1+l, 4] <- 100*(transSim1[3] - transObs1[3]) # Transition from NAO+ to blocking + array.trans.diff1[p,1+l, 2] <- 100*(transSim1[4] - transObs1[4]) # Transition from NAO+ to Atl.Ridge + + array.trans.diff2[p,1+l, 1] <- 100*(transSim2[1] - transObs2[1]) # Transition from NAO+ to NAO+ + array.trans.diff2[p,1+l, 3] <- NA + array.trans.diff2[p,1+l, 4] <- 100*(transSim2[3] - transObs2[3]) # Transition from NAO+ to blocking + array.trans.diff2[p,1+l, 2] <- 100*(transSim2[4] - transObs2[4]) # Transition from NAO+ to Atl.Ridge + + array.trans.diff3[p,1+l, 1] <- 100*(transSim3[1] - transObs3[1]) # Transition from NAO+ to NAO+ + array.trans.diff3[p,1+l, 3] <- 100*(transSim3[2] - transObs3[2]) # Transition from NAO+ to NAO- + array.trans.diff3[p,1+l, 4] <- NA + array.trans.diff3[p,1+l, 2] <- 100*(transSim3[4] - transObs3[4]) # Transition from NAO+ to Atl.Ridge + + array.trans.diff4[p,1+l, 1] <- 100*(transSim4[1] - transObs4[1]) # Transition from NAO+ to NAO+ + array.trans.diff4[p,1+l, 3] <- 100*(transSim4[2] - transObs4[2]) # Transition from NAO+ to NAO- + array.trans.diff4[p,1+l, 4] <- 100*(transSim4[3] - transObs4[3]) # Transition from NAO+ to blocking + array.trans.diff4[p,1+l, 2] <- NA + + #array.rpss[p,1+l, 1] <- # NAO+ + #array.rpss[p,1+l, 3] <- # NAO- + #array.rpss[p,1+l, 4] <- # Blocking + #array.rpss[p,1+l, 2] <- # Atl.Ridge + } + } + + + + # Spatial Corr summary: + png(filename=paste0(work.dir,"/",forecast.name,"_summary_spatial_correlations.png"), width=550, height=400) + + # create an array similar to array.cor but with colors instead of corr.values: + sp.corr.cols <- rev(c('#b2182b','#d6604d','#f4a582','#fddbc7','#d1e5f0','#92c5de','#4393c3','#2166ac')) + my.seq <- c(-1,0,0.4,0.5,0.6,0.7,0.8,0.9,1) + + array.spat.cor.colors <- array(sp.corr.cols[8],c(12,7,4)) + array.spat.cor.colors[array.spat.cor < my.seq[8]] <- sp.corr.cols[7] + array.spat.cor.colors[array.spat.cor < my.seq[7]] <- sp.corr.cols[6] + array.spat.cor.colors[array.spat.cor < my.seq[6]] <- sp.corr.cols[5] + array.spat.cor.colors[array.spat.cor < my.seq[5]] <- sp.corr.cols[4] + array.spat.cor.colors[array.spat.cor < my.seq[4]] <- sp.corr.cols[3] + array.spat.cor.colors[array.spat.cor < my.seq[3]] <- sp.corr.cols[2] + array.spat.cor.colors[array.spat.cor < my.seq[2]] <- sp.corr.cols[1] + + par(mar=c(5,4,4,4.5)) + plot(1,1, type="n", xaxt="n", yaxt="n", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + title("Spatial correlation between S4 and ERA-Interim regime anomalies", cex=1.5) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + + for(p in 1:12){ + for(l in 0:6){ + polygon(c(0.5+p-1, 0.5+p-1, 1+p-1), c(-0.5+l, 0.5+l, 0+l), col=array.spat.cor.colors[p,1+l,1]) # first triangle + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(-0.5+l, 0+l, -0.5+l), col=array.spat.cor.colors[p,1+l,2]) + polygon(c(1+p-1, 1.5+p-1, 1.5+p-1), c(l, 0.5+l, -0.5+l), col=array.spat.cor.colors[p,1+l,3]) + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(0.5+l, 0+l, 0.5+l), col=array.spat.cor.colors[p,1+l,4]) + } + } + + par(fig=c(0.875, 1, 0.14, 0.89), new=TRUE) + ColorBar2(brks = my.seq, cols = sp.corr.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + dev.off() + + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_spatial_correlations_startdate.png"), width=750, height=600) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext(text="Spatial correlation between S4 and ERA-Interim regime anomalies", cex=1.5) + + # NAO+ : + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.85, 0.98), new=TRUE) + mtext("NAO+", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.spat.cor.colors[p,1+l,1])}} + + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.42, 0.5), new=TRUE) + mtext("Blocking", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.spat.cor.colors[p,1+l,4])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.85, 0.98), new=TRUE) + mtext("NAO-", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.spat.cor.colors[p,1+l,3])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.42, 0.5), new=TRUE) + mtext("Atlantic Ridge", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.spat.cor.colors[p,1+l,2])}} + + par(fig=c(0.91, 1, 0.1, 0.9), new=TRUE) + ColorBar2(brks = my.seq, cols = sp.corr.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_spatial_correlations_target_month.png"), width=800, height=300) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext(text="Spatial correlation between S4 and ERA-Interim regime anomalies", cex=1.2) + + par(mar=c(0,0,4,0), fig=c(0.07, 0.22, 0.8, 0.98), new=TRUE) + mtext("NAO+", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0, 0.22, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.6, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.spat.cor.colors[i2,1+6-j,1])}} + + par(mar=c(0,0,4,0), fig=c(0.22, 0.44, 0.8, 0.98), new=TRUE) + mtext("NAO-", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.22, 0.44, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.6, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.spat.cor.colors[i2,1+6-j,3])}} + + par(mar=c(0,0,4,0), fig=c(0.44, 0.66, 0.8, 0.98), new=TRUE) + mtext("Blocking", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.44, 0.66, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.6, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.spat.cor.colors[i2,1+6-j,4])}} + + par(mar=c(0,0,4,0), fig=c(0.68, 0.88, 0.8, 0.98), new=TRUE) + mtext("Atlantic Ridge", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.66, 0.88, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.6, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.spat.cor.colors[i2,1+6-j,2])}} + + par(fig=c(0.91, 1, 0.1, 0.8), new=TRUE) + ColorBar2(brks = my.seq, cols = sp.corr.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + + + + + + + + # Temporal Corr summary: + png(filename=paste0(work.dir,"/",forecast.name,"_summary_temporal_correlations.png"), width=550, height=400) + + # create an array similar to array.cor but with colors instead of corr.values: + corr.cols <- rev(c('#b2182b','#d6604d','#f4a582','#fddbc7','#d1e5f0','#92c5de','#4393c3','#2166ac')) + my.seq <- c(-1,-0.75,-0.5,-0.25,0,0.25,0.5,0.75,1) + + array.cor.colors <- array(corr.cols[8],c(12,7,4)) + array.cor.colors[array.cor < 0.75] <- corr.cols[7] + array.cor.colors[array.cor < 0.5] <- corr.cols[6] + array.cor.colors[array.cor < 0.25] <- corr.cols[5] + array.cor.colors[array.cor < 0] <- corr.cols[4] + array.cor.colors[array.cor < -0.25] <- corr.cols[3] + array.cor.colors[array.cor < -0.5] <- corr.cols[2] + array.cor.colors[array.cor < -0.75] <- corr.cols[1] + + par(mar=c(5,4,4,4.5)) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Forecast time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + title("Correlation between S4 and ERA-Interim frequencies", cex=1.5) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + + for(p in 1:12){ + for(l in 0:6){ + polygon(c(0.5+p-1,0.5+p-1,1+p-1),c(-0.5+l,0.5+l,0+l), col=array.cor.colors[p,1+l,1]) # first triangle + polygon(c(0.5+p-1,1+p-1,1.5+p-1),c(-0.5+l,0+l,-0.5+l), col=array.cor.colors[p,1+l,2]) + polygon(c(1+p-1,1.5+p-1,1.5+p-1),c(l,0.5+l,-0.5+l), col=array.cor.colors[p,1+l,3]) + polygon(c(0.5+p-1,1+p-1,1.5+p-1),c(0.5+l,0+l,0.5+l), col=array.cor.colors[p,1+l,4]) + } + } + + par(fig=c(0.875, 1, 0.14, 0.89), new=TRUE) + ColorBar2(brks = seq(-1,1,0.25), cols = corr.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(seq(-1,1,0.25)), my.labels=seq(-1,1,0.25)) + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_temporal_correlations_startdate.png"), width=750, height=600) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext(text="Correlation between S4 and ERA-Interim frequencies", cex=1.5) + + # NAO+ : + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.85, 0.98), new=TRUE) + mtext("NAO+", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.cor.colors[p,1+l,1])}} + + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.42, 0.5), new=TRUE) + mtext("Blocking", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.cor.colors[p,1+l,4])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.85, 0.98), new=TRUE) + mtext("NAO-", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.cor.colors[p,1+l,3])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.42, 0.5), new=TRUE) + mtext("Atlantic Ridge", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.cor.colors[p,1+l,2])}} + + par(fig=c(0.91, 1, 0.1, 0.9), new=TRUE) + ColorBar2(brks = my.seq, cols = corr.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_temporal_correlations_target_month.png"), width=800, height=300) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext(text="Correlation between S4 and ERA-Interim frequencies", cex=1.2) + + par(mar=c(0,0,4,0), fig=c(0.07, 0.22, 0.8, 0.98), new=TRUE) + mtext("NAO+", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0, 0.22, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.6, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.cor.colors[i2,1+6-j,1])}} + + par(mar=c(0,0,4,0), fig=c(0.22, 0.44, 0.8, 0.98), new=TRUE) + mtext("NAO-", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.22, 0.44, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.6, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.cor.colors[i2,1+6-j,3])}} + + par(mar=c(0,0,4,0), fig=c(0.44, 0.66, 0.8, 0.98), new=TRUE) + mtext("Blocking", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.44, 0.66, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.6, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.cor.colors[i2,1+6-j,4])}} + + par(mar=c(0,0,4,0), fig=c(0.68, 0.88, 0.8, 0.98), new=TRUE) + mtext("Atlantic Ridge", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.66, 0.88, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.6, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.cor.colors[i2,1+6-j,2])}} + + par(fig=c(0.91, 1, 0.1, 0.8), new=TRUE) + ColorBar2(brks = my.seq, cols = corr.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + + + + + + # Frequency summary: + png(filename=paste0(work.dir,"/",forecast.name,"_summary_frequency.png"), width=550, height=400) + + # create an array similar to array.diff.freq but with colors instead of frequencies: + freq.cols <- rev(c('#d73027','#f46d43','#fdae61','#fee090','#e0f3f8','#abd9e9','#74add1','#4575b4')) + my.seq <- c(NA,20,22,24,26,28,30,32,NA) + + array.freq.colors <- array(freq.cols[8],c(12,7,4)) + array.freq.colors[array.freq < my.seq[[8]]] <- freq.cols[7] + array.freq.colors[array.freq < my.seq[[7]]] <- freq.cols[6] + array.freq.colors[array.freq < my.seq[[6]]] <- freq.cols[5] + array.freq.colors[array.freq < my.seq[[5]]] <- freq.cols[4] + array.freq.colors[array.freq < my.seq[[4]]] <- freq.cols[3] + array.freq.colors[array.freq < my.seq[[3]]] <- freq.cols[2] + array.freq.colors[array.freq < my.seq[[2]]] <- freq.cols[1] + + par(mar=c(5,4,4,4.5)) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Forecast time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + title("Mean S4 frequency (%)", cex=1.5) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + + for(p in 1:12){ + for(l in 0:6){ + polygon(c(0.5+p-1, 0.5+p-1, 1+p-1), c(-0.5+l, 0.5+l, 0+l), col=array.freq.colors[p,1+l,1]) # first triangle + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(-0.5+l, 0+l, -0.5+l), col=array.freq.colors[p,1+l,2]) + polygon(c(1+p-1, 1.5+p-1, 1.5+p-1), c(l, 0.5+l, -0.5+l), col=array.freq.colors[p,1+l,3]) + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(0.5+l, 0+l, 0.5+l), col=array.freq.colors[p,1+l,4]) + } + } + + par(fig=c(0.875, 1, 0.14, 0.89), new=TRUE) + ColorBar2(brks = my.seq, cols = freq.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_frequency_startdate.png"), width=750, height=600) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext(text="Mean S4 frequency (%)", cex=1.5) + + # NAO+ : + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.85, 0.98), new=TRUE) + mtext("NAO+", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.freq.colors[p,1+l,1])}} + + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.42, 0.5), new=TRUE) + mtext("Blocking", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.freq.colors[p,1+l,4])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.85, 0.98), new=TRUE) + mtext("NAO-", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.freq.colors[p,1+l,3])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.42, 0.5), new=TRUE) + mtext("Atlantic Ridge", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.freq.colors[p,1+l,2])}} + + par(fig=c(0.91, 1, 0.1, 0.9), new=TRUE) + ColorBar2(brks = my.seq, cols = freq.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_frequency_target_month.png"), width=800, height=300) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext(text="Mean S4 frequency (%)", cex=1.2) + + par(mar=c(0,0,4,0), fig=c(0.07, 0.22, 0.8, 0.98), new=TRUE) + mtext("NAO+", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0, 0.22, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.6, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.freq.colors[i2,1+6-j,1])}} + + par(mar=c(0,0,4,0), fig=c(0.22, 0.44, 0.8, 0.98), new=TRUE) + mtext("NAO-", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.22, 0.44, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.6, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.freq.colors[i2,1+6-j,3])}} + + par(mar=c(0,0,4,0), fig=c(0.44, 0.66, 0.8, 0.98), new=TRUE) + mtext("Blocking", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.44, 0.66, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.6, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.freq.colors[i2,1+6-j,4])}} + + par(mar=c(0,0,4,0), fig=c(0.68, 0.88, 0.8, 0.98), new=TRUE) + mtext("Atlantic Ridge", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.66, 0.88, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.6, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.freq.colors[i2,1+6-j,2])}} + + par(fig=c(0.91, 1, 0.1, 0.8), new=TRUE) + ColorBar2(brks = my.seq, cols = freq.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + par(fig=c(0.91, 1, 0.88, 0.93), new=TRUE) + mtext(side = 1, text = "%", line = 2, cex=1) + dev.off() + + + + + + + + + # Obs. frequency summary: + png(filename=paste0(work.dir,"/",forecast.name,"_summary_frequency_obs.png"), width=550, height=400) + + # create an array similar to array.diff.freq but with colors instead of frequencies: + freq.cols <- rev(c('#d73027','#f46d43','#fdae61','#fee090','#e0f3f8','#abd9e9','#74add1','#4575b4')) + my.seq <- c(NA,20,22,24,26,28,30,32,NA) + + array.freq.colors <- array(freq.cols[8],c(12,7,4)) + array.freq.colors[array.freq.obs < my.seq[[8]]] <- freq.cols[7] + array.freq.colors[array.freq.obs < my.seq[[7]]] <- freq.cols[6] + array.freq.colors[array.freq.obs < my.seq[[6]]] <- freq.cols[5] + array.freq.colors[array.freq.obs < my.seq[[5]]] <- freq.cols[4] + array.freq.colors[array.freq.obs < my.seq[[4]]] <- freq.cols[3] + array.freq.colors[array.freq.obs < my.seq[[3]]] <- freq.cols[2] + array.freq.colors[array.freq.obs < my.seq[[2]]] <- freq.cols[1] + + par(mar=c(5,4,4,4.5)) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Forecast time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + title("Mean observed frequency (%)", cex=1.5) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + + for(p in 1:12){ + for(l in 0:6){ + polygon(c(0.5+p-1, 0.5+p-1, 1+p-1), c(-0.5+l, 0.5+l, 0+l), col=array.freq.colors[p,1+l,1]) # first triangle + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(-0.5+l, 0+l, -0.5+l), col=array.freq.colors[p,1+l,2]) + polygon(c(1+p-1, 1.5+p-1, 1.5+p-1), c(l, 0.5+l, -0.5+l), col=array.freq.colors[p,1+l,3]) + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(0.5+l, 0+l, 0.5+l), col=array.freq.colors[p,1+l,4]) + } + } + + par(fig=c(0.875, 1, 0.14, 0.89), new=TRUE) + ColorBar2(brks = my.seq, cols = freq.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_frequency_obs_startdate.png"), width=750, height=600) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext(text="Mean observed frequency (%)", cex=1.5) + + # NAO+ : + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.85, 0.98), new=TRUE) + mtext("NAO+", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.freq.colors[p,1+l,1])}} + + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.42, 0.5), new=TRUE) + mtext("Blocking", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.freq.colors[p,1+l,4])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.85, 0.98), new=TRUE) + mtext("NAO-", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.freq.colors[p,1+l,3])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.42, 0.5), new=TRUE) + mtext("Atlantic Ridge", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.freq.colors[p,1+l,2])}} + + par(fig=c(0.91, 1, 0.1, 0.9), new=TRUE) + ColorBar2(brks = my.seq, cols = freq.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_frequency_obs_target_month.png"), width=800, height=300) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext(text="Mean observed frequency (%)", cex=1.2) + + par(mar=c(0,0,4,0), fig=c(0.07, 0.22, 0.8, 0.98), new=TRUE) + mtext("NAO+", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0, 0.22, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.6, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.freq.colors[i2,1+6-j,1])}} + + par(mar=c(0,0,4,0), fig=c(0.22, 0.44, 0.8, 0.98), new=TRUE) + mtext("NAO-", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.22, 0.44, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.6, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.freq.colors[i2,1+6-j,3])}} + + par(mar=c(0,0,4,0), fig=c(0.44, 0.66, 0.8, 0.98), new=TRUE) + mtext("Blocking", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.44, 0.66, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.6, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.freq.colors[i2,1+6-j,4])}} + + par(mar=c(0,0,4,0), fig=c(0.68, 0.88, 0.8, 0.98), new=TRUE) + mtext("Atlantic Ridge", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.66, 0.88, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.6, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.freq.colors[i2,1+6-j,2])}} + + par(fig=c(0.91, 1, 0.1, 0.8), new=TRUE) + ColorBar2(brks = my.seq, cols = freq.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + par(fig=c(0.91, 1, 0.88, 0.93), new=TRUE) + mtext(side = 1, text = "%", line = 2, cex=1) + dev.off() + + + + + + + + + + # Bias freq. summary: + png(filename=paste0(work.dir,"/",forecast.name,"_summary_bias_frequency.png"), width=550, height=400) + + # create an array similar to array.diff.freq but with colors instead of frequencies: + diff.freq.cols <- rev(c('#d73027','#f46d43','#fdae61','#fee090','#e0f3f8','#abd9e9','#74add1','#4575b4')) + my.seq <- c(-20,-10,-5,-2,0,2,5,10,20) + + array.diff.freq.colors <- array(diff.freq.cols[8],c(12,7,4)) + array.diff.freq.colors[array.diff.freq < my.seq[[8]]] <- diff.freq.cols[7] + array.diff.freq.colors[array.diff.freq 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.diff.freq.colors[i2,1+6-j,1])}} + + par(mar=c(0,0,4,0), fig=c(0.22, 0.44, 0.8, 0.98), new=TRUE) + mtext("NAO-", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.22, 0.44, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.6, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.diff.freq.colors[i2,1+6-j,3])}} + + par(mar=c(0,0,4,0), fig=c(0.44, 0.66, 0.8, 0.98), new=TRUE) + mtext("Blocking", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.44, 0.66, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.6, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.diff.freq.colors[i2,1+6-j,4])}} + + par(mar=c(0,0,4,0), fig=c(0.68, 0.88, 0.8, 0.98), new=TRUE) + mtext("Atlantic Ridge", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.66, 0.88, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.6, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.diff.freq.colors[i2,1+6-j,2])}} + + par(fig=c(0.91, 1, 0.1, 0.8), new=TRUE) + ColorBar2(brks = my.seq, cols = diff.freq.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + par(fig=c(0.91, 1, 0.88, 0.93), new=TRUE) + mtext(side = 1, text = "%", line = 2, cex=1) + dev.off() + + + + + + + + + # Simulated persistence summary: + png(filename=paste0(work.dir,"/",forecast.name,"_summary_persistence.png"), width=550, height=400) + + # create an array similar to array.pers but with colors instead of frequencies: + pers.cols <- rev(c('#b2182b','#d6604d','#f4a582','#fddbc7','#d1e5f0','#92c5de','#4393c3','#2166ac')) + my.seq <- c(NA, 3.25, 3.5, 3.75, 4, 4.25, 4.5, 4.75, NA) + + array.pers.colors <- array(pers.cols[8],c(12,7,4)) + array.pers.colors[array.pers < my.seq[8]] <- pers.cols[7] + array.pers.colors[array.pers < my.seq[7]] <- pers.cols[6] + array.pers.colors[array.pers < my.seq[6]] <- pers.cols[5] + array.pers.colors[array.pers < my.seq[5]] <- pers.cols[4] + array.pers.colors[array.pers < my.seq[4]] <- pers.cols[3] + array.pers.colors[array.pers < my.seq[3]] <- pers.cols[2] + array.pers.colors[array.pers < my.seq[2]] <- pers.cols[1] + + par(mar=c(5,4,4,4.5)) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Forecast time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + title("ECMWF-S4 Regime persistence (in days/month)", cex=1.5) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + + for(p in 1:12){ + for(l in 0:6){ + polygon(c(0.5+p-1,0.5+p-1,1+p-1),c(-0.5+l,0.5+l,0+l), col=array.pers.colors[p,1+l,1]) # first triangle + polygon(c(0.5+p-1,1+p-1,1.5+p-1),c(-0.5+l,0+l,-0.5+l), col=array.pers.colors[p,1+l,2]) + polygon(c(1+p-1,1.5+p-1,1.5+p-1),c(l,0.5+l,-0.5+l), col=array.pers.colors[p,1+l,3]) + polygon(c(0.5+p-1,1+p-1,1.5+p-1),c(0.5+l,0+l,0.5+l), col=array.pers.colors[p,1+l,4]) + } + } + + par(fig=c(0.875, 1, 0.14, 0.89), new=TRUE) + ColorBar2(brks = my.seq, cols = pers.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_persistence_startdate.png"), width=750, height=600) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("ECMWF-S4 Regime persistence (in days/month)", cex=1.5) + + # NAO+ : + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.85, 0.98), new=TRUE) + mtext("NAO+", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.pers.colors[p,1+l,1])}} + + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.42, 0.5), new=TRUE) + mtext("Blocking", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.pers.colors[p,1+l,4])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.85, 0.98), new=TRUE) + mtext("NAO-", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.pers.colors[p,1+l,3])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.42, 0.5), new=TRUE) + mtext("Atlantic Ridge", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.pers.colors[p,1+l,2])}} + + par(fig=c(0.91, 1, 0.1, 0.9), new=TRUE) + ColorBar2(brks = my.seq, cols = pers.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_persistence_target_month.png"), width=800, height=300) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("ECMWF-S4 Regime persistence (in days/month)", cex=1.2) + + par(mar=c(0,0,4,0), fig=c(0.07, 0.22, 0.8, 0.98), new=TRUE) + mtext("NAO+", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0, 0.22, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.6, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.pers.colors[i2,1+6-j,1])}} + + par(mar=c(0,0,4,0), fig=c(0.22, 0.44, 0.8, 0.98), new=TRUE) + mtext("NAO-", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.22, 0.44, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.6, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.pers.colors[i2,1+6-j,3])}} + + par(mar=c(0,0,4,0), fig=c(0.44, 0.66, 0.8, 0.98), new=TRUE) + mtext("Blocking", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.44, 0.66, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.6, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.pers.colors[i2,1+6-j,4])}} + + par(mar=c(0,0,4,0), fig=c(0.68, 0.88, 0.8, 0.98), new=TRUE) + mtext("Atlantic Ridge", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.66, 0.88, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.6, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.pers.colors[i2,1+6-j,2])}} + + par(fig=c(0.91, 1, 0.1, 0.8), new=TRUE) + ColorBar2(brks = my.seq, cols = pers.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + par(fig=c(0.9, 1, 0.88, 0.93), new=TRUE) + mtext(side = 1, text = "days/month", line = 2, cex=1) + dev.off() + + + + + + + + # Observed persistence summary: + png(filename=paste0(work.dir,"/",forecast.name,"_summary_persistence_obs.png"), width=550, height=400) + + # create an array similar to array.pers but with colors instead of frequencies: + pers.cols <- rev(c('#b2182b','#d6604d','#f4a582','#fddbc7','#d1e5f0','#92c5de','#4393c3','#2166ac')) + my.seq <- c(NA, 3.25, 3.5, 3.75, 4, 4.25, 4.5, 4.75, NA) + + array.pers.colors <- array(pers.cols[8],c(12,7,4)) + array.pers.colors[array.pers.obs < my.seq[8]] <- pers.cols[7] + array.pers.colors[array.pers.obs < my.seq[7]] <- pers.cols[6] + array.pers.colors[array.pers.obs < my.seq[6]] <- pers.cols[5] + array.pers.colors[array.pers.obs < my.seq[5]] <- pers.cols[4] + array.pers.colors[array.pers.obs < my.seq[4]] <- pers.cols[3] + array.pers.colors[array.pers.obs < my.seq[3]] <- pers.cols[2] + array.pers.colors[array.pers.obs < my.seq[2]] <- pers.cols[1] + + par(mar=c(5,4,4,4.5)) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Forecast time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + title("Observed regime persistence (in days/month)", cex=1.5) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + + for(p in 1:12){ + for(l in 0:6){ + polygon(c(0.5+p-1,0.5+p-1,1+p-1),c(-0.5+l,0.5+l,0+l), col=array.pers.colors[p,1+l,1]) # first triangle + polygon(c(0.5+p-1,1+p-1,1.5+p-1),c(-0.5+l,0+l,-0.5+l), col=array.pers.colors[p,1+l,2]) + polygon(c(1+p-1,1.5+p-1,1.5+p-1),c(l,0.5+l,-0.5+l), col=array.pers.colors[p,1+l,3]) + polygon(c(0.5+p-1,1+p-1,1.5+p-1),c(0.5+l,0+l,0.5+l), col=array.pers.colors[p,1+l,4]) + } + } + + par(fig=c(0.875, 1, 0.14, 0.89), new=TRUE) + ColorBar2(brks = my.seq, cols = pers.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_persistence_obs_startdate.png"), width=750, height=600) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("Observed Regime persistence (in days/month)", cex=1.5) + + # NAO+ : + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.85, 0.98), new=TRUE) + mtext("NAO+", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.pers.colors[p,1+l,1])}} + + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.42, 0.5), new=TRUE) + mtext("Blocking", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.pers.colors[p,1+l,4])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.85, 0.98), new=TRUE) + mtext("NAO-", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.pers.colors[p,1+l,3])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.42, 0.5), new=TRUE) + mtext("Atlantic Ridge", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.pers.colors[p,1+l,2])}} + + par(fig=c(0.91, 1, 0.1, 0.9), new=TRUE) + ColorBar2(brks = my.seq, cols = pers.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_persistence_obs_target_month.png"), width=800, height=300) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("Observed regime persistence (in days/month)", cex=1.2) + + par(mar=c(0,0,4,0), fig=c(0.07, 0.22, 0.8, 0.98), new=TRUE) + mtext("NAO+", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0, 0.22, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.6, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.pers.colors[i2,1+6-j,1])}} + + par(mar=c(0,0,4,0), fig=c(0.22, 0.44, 0.8, 0.98), new=TRUE) + mtext("NAO-", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.22, 0.44, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.6, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.pers.colors[i2,1+6-j,3])}} + + par(mar=c(0,0,4,0), fig=c(0.44, 0.66, 0.8, 0.98), new=TRUE) + mtext("Blocking", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.44, 0.66, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.6, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.pers.colors[i2,1+6-j,4])}} + + par(mar=c(0,0,4,0), fig=c(0.68, 0.88, 0.8, 0.98), new=TRUE) + mtext("Atlantic Ridge", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.66, 0.88, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.6, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.pers.colors[i2,1+6-j,2])}} + + par(fig=c(0.91, 1, 0.1, 0.8), new=TRUE) + ColorBar2(brks = my.seq, cols = pers.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + par(fig=c(0.9, 1, 0.88, 0.93), new=TRUE) + mtext(side = 1, text = "days/month", line = 2, cex=1) + dev.off() + + + + + + + + + + + + + # Bias persistence summary: + png(filename=paste0(work.dir,"/",forecast.name,"_summary_bias_persistence.png"), width=550, height=400) + + # create an array similar to array.pers but with colors instead of frequencies: + diff.pers.cols <- rev(c('#b2182b','#d6604d','#f4a582','#fddbc7','#d1e5f0','#92c5de','#4393c3','#2166ac')) + my.seq <- c(NA, -1.5, -1, -0.5, 0, 0.5, 1, 1.5, NA) + + array.diff.pers.colors <- array(diff.pers.cols[8],c(12,7,4)) + array.diff.pers.colors[array.diff.pers < my.seq[8]] <- diff.pers.cols[7] + array.diff.pers.colors[array.diff.pers < my.seq[7]] <- diff.pers.cols[6] + array.diff.pers.colors[array.diff.pers < my.seq[6]] <- diff.pers.cols[5] + array.diff.pers.colors[array.diff.pers < my.seq[5]] <- diff.pers.cols[4] + array.diff.pers.colors[array.diff.pers < my.seq[4]] <- diff.pers.cols[3] + array.diff.pers.colors[array.diff.pers < my.seq[3]] <- diff.pers.cols[2] + array.diff.pers.colors[array.diff.pers < my.seq[2]] <- diff.pers.cols[1] + + par(mar=c(5,4,4,4.5)) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Forecast time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + title("Diff. between S4 and ERA-Interim regime persistence (in days/month)", cex=1.5) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + + for(p in 1:12){ + for(l in 0:6){ + polygon(c(0.5+p-1,0.5+p-1,1+p-1),c(-0.5+l,0.5+l,0+l), col=array.diff.pers.colors[p,1+l,1]) # first triangle + polygon(c(0.5+p-1,1+p-1,1.5+p-1),c(-0.5+l,0+l,-0.5+l), col=array.diff.pers.colors[p,1+l,2]) + polygon(c(1+p-1,1.5+p-1,1.5+p-1),c(l,0.5+l,-0.5+l), col=array.diff.pers.colors[p,1+l,3]) + polygon(c(0.5+p-1,1+p-1,1.5+p-1),c(0.5+l,0+l,0.5+l), col=array.diff.pers.colors[p,1+l,4]) + } + } + + par(fig=c(0.875, 1, 0.14, 0.89), new=TRUE) + ColorBar2(brks = my.seq, cols = diff.pers.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_bias_persistence_startdate.png"), width=750, height=600) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("Diff. between S4 and ERA-Interim regime persistence (in days/month)", cex=1.5) + + # NAO+ : + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.85, 0.98), new=TRUE) + mtext("NAO+", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.diff.pers.colors[p,1+l,1])}} + + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.42, 0.5), new=TRUE) + mtext("Blocking", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.diff.pers.colors[p,1+l,4])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.85, 0.98), new=TRUE) + mtext("NAO-", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.diff.pers.colors[p,1+l,3])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.42, 0.5), new=TRUE) + mtext("Atlantic Ridge", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.diff.pers.colors[p,1+l,2])}} + + par(fig=c(0.91, 1, 0.1, 0.9), new=TRUE) + ColorBar2(brks = my.seq, cols = diff.pers.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_bias_persistence_target_month.png"), width=800, height=300) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("Diff. between S4 and ERA-Interim regime persistence (in days/month)", cex=1.2) + + par(mar=c(0,0,4,0), fig=c(0.07, 0.22, 0.8, 0.98), new=TRUE) + mtext("NAO+", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0, 0.22, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.6, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.diff.pers.colors[i2,1+6-j,1])}} + + par(mar=c(0,0,4,0), fig=c(0.22, 0.44, 0.8, 0.98), new=TRUE) + mtext("NAO-", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.22, 0.44, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.6, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.diff.pers.colors[i2,1+6-j,3])}} + + par(mar=c(0,0,4,0), fig=c(0.44, 0.66, 0.8, 0.98), new=TRUE) + mtext("Blocking", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.44, 0.66, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.6, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.diff.pers.colors[i2,1+6-j,4])}} + + par(mar=c(0,0,4,0), fig=c(0.68, 0.88, 0.8, 0.98), new=TRUE) + mtext("Atlantic Ridge", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.66, 0.88, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.6, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.diff.pers.colors[i2,1+6-j,2])}} + + par(fig=c(0.91, 1, 0.1, 0.8), new=TRUE) + ColorBar2(brks = my.seq, cols = diff.pers.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + par(fig=c(0.9, 1, 0.88, 0.93), new=TRUE) + mtext(side = 1, text = "days/month", line = 2, cex=1) + dev.off() + + + + + + + + + + # St.Dev.freq ratio summary: + png(filename=paste0(work.dir,"/",forecast.name,"_summary_ratio_sd_freq.png"), width=550, height=400) + + # create an array similar to array.cor but with colors instead of corr.values: + diff.sd.freq.cols <- rev(c('#b2182b','#d6604d','#f4a582','#fddbc7','#d1e5f0','#92c5de','#4393c3','#2166ac')) + my.seq <- c(0,0.7,0.8,0.9,1.0,1.1,1.2,1.3,2) + + array.diff.sd.freq.colors <- array(diff.sd.freq.cols[8],c(12,7,4)) + array.diff.sd.freq.colors[array.diff.sd.freq < my.seq[8]] <- diff.sd.freq.cols[7] + array.diff.sd.freq.colors[array.diff.sd.freq < my.seq[7]] <- diff.sd.freq.cols[6] + array.diff.sd.freq.colors[array.diff.sd.freq < my.seq[6]] <- diff.sd.freq.cols[5] + array.diff.sd.freq.colors[array.diff.sd.freq < my.seq[5]] <- diff.sd.freq.cols[4] + array.diff.sd.freq.colors[array.diff.sd.freq < my.seq[4]] <- diff.sd.freq.cols[3] + array.diff.sd.freq.colors[array.diff.sd.freq < my.seq[3]] <- diff.sd.freq.cols[2] + array.diff.sd.freq.colors[array.diff.sd.freq < my.seq[2]] <- diff.sd.freq.cols[1] + + par(mar=c(5,4,4,4.5)) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Forecast time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + title("St.Dev. ratio between sim. and obs. average frequencies", cex=1.5) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + + for(p in 1:12){ + for(l in 0:6){ + polygon(c(0.5+p-1, 0.5+p-1, 1+p-1), c(-0.5+l, 0.5+l, 0+l), col=array.diff.sd.freq.colors[p,1+l,1]) # first triangle + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(-0.5+l, 0+l, -0.5+l), col=array.diff.sd.freq.colors[p,1+l,2]) + polygon(c(1+p-1, 1.5+p-1, 1.5+p-1), c(l, 0.5+l, -0.5+l), col=array.diff.sd.freq.colors[p,1+l,3]) + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(0.5+l, 0+l, 0.5+l), col=array.diff.sd.freq.colors[p,1+l,4]) + } + } + + par(fig=c(0.875, 1, 0.14, 0.89), new=TRUE) + ColorBar2(brks = my.seq, cols = diff.sd.freq.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + dev.off() + + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_ratio_sd_frequency_startdate.png"), width=750, height=600) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("St.Dev. ratio between sim. and obs. average frequencies", cex=1.5) + + # NAO+ : + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.85, 0.98), new=TRUE) + mtext("NAO+", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.diff.sd.freq.colors[p,1+l,1])}} + + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.42, 0.5), new=TRUE) + mtext("Blocking", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.diff.sd.freq.colors[p,1+l,4])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.85, 0.98), new=TRUE) + mtext("NAO-", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.diff.sd.freq.colors[p,1+l,3])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.42, 0.5), new=TRUE) + mtext("Atlantic Ridge", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.diff.sd.freq.colors[p,1+l,2])}} + + par(fig=c(0.91, 1, 0.1, 0.9), new=TRUE) + ColorBar2(brks = my.seq, cols = diff.sd.freq.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_ratio_sd_frequency_target_month.png"), width=800, height=300) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("St.Dev. ratio between sim. and obs. average frequencies", cex=1.2) + + par(mar=c(0,0,4,0), fig=c(0.07, 0.22, 0.8, 0.98), new=TRUE) + mtext("NAO+", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0, 0.22, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.6, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.diff.sd.freq.colors[i2,1+6-j,1])}} + + par(mar=c(0,0,4,0), fig=c(0.22, 0.44, 0.8, 0.98), new=TRUE) + mtext("NAO-", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.22, 0.44, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.6, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.diff.sd.freq.colors[i2,1+6-j,3])}} + + par(mar=c(0,0,4,0), fig=c(0.44, 0.66, 0.8, 0.98), new=TRUE) + mtext("Blocking", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.44, 0.66, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.6, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.diff.sd.freq.colors[i2,1+6-j,4])}} + + par(mar=c(0,0,4,0), fig=c(0.68, 0.88, 0.8, 0.98), new=TRUE) + mtext("Atlantic Ridge", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.66, 0.88, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.6, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.diff.sd.freq.colors[i2,1+6-j,2])}} + + par(fig=c(0.91, 1, 0.1, 0.8), new=TRUE) + ColorBar2(brks = my.seq, cols = diff.sd.freq.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + + + + + + + + + + + + + # Impact summary: + png(filename=paste0(work.dir,"/",forecast.name,"_summary_impact_",var.name[var.num],".png"), width=550, height=400) + + # create an array similar to array.pers but with colors instead of frequencies: + imp.cols <- rev(c('#b2182b','#d6604d','#f4a582','#fddbc7','#d1e5f0','#92c5de','#4393c3','#2166ac')) + my.seq <- c(-1,0,0.4,0.5,0.6,0.7,0.8,0.9,1) + + array.imp.colors <- array(imp.cols[8],c(12,7,4)) + array.imp.colors[array.imp < my.seq[8]] <- imp.cols[7] + array.imp.colors[array.imp < my.seq[7]] <- imp.cols[6] + array.imp.colors[array.imp < my.seq[6]] <- imp.cols[5] + array.imp.colors[array.imp < my.seq[5]] <- imp.cols[4] + array.imp.colors[array.imp < my.seq[4]] <- imp.cols[3] + array.imp.colors[array.imp < my.seq[3]] <- imp.cols[2] + array.imp.colors[array.imp < my.seq[2]] <- imp.cols[1] + + par(mar=c(5,4,4,4.5)) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Forecast time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + title(paste0("S4 spatial correlation of impact on ",var.name[var.num]), cex=1.5) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + + for(p in 1:12){ + for(l in 0:6){ + polygon(c(0.5+p-1,0.5+p-1,1+p-1),c(-0.5+l,0.5+l,0+l), col=array.imp.colors[p,1+l,1]) # first triangle + polygon(c(0.5+p-1,1+p-1,1.5+p-1),c(-0.5+l,0+l,-0.5+l), col=array.imp.colors[p,1+l,2]) + polygon(c(1+p-1,1.5+p-1,1.5+p-1),c(l,0.5+l,-0.5+l), col=array.imp.colors[p,1+l,3]) + polygon(c(0.5+p-1,1+p-1,1.5+p-1),c(0.5+l,0+l,0.5+l), col=array.imp.colors[p,1+l,4]) + } + } + + par(fig=c(0.875, 1, 0.14, 0.89), new=TRUE) + ColorBar2(brks = my.seq, cols = imp.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_impact_",var.name[var.num],"_startdate.png"), width=750, height=600) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("S4 spatial correlation of impact on ",var.name[var.num], cex=1.5) + + # NAO+ : + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.85, 0.98), new=TRUE) + mtext("NAO+", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.imp.colors[p,1+l,1])}} + + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.42, 0.5), new=TRUE) + mtext("Blocking", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.imp.colors[p,1+l,4])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.85, 0.98), new=TRUE) + mtext("NAO-", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.imp.colors[p,1+l,3])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.42, 0.5), new=TRUE) + mtext("Atlantic Ridge", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.imp.colors[p,1+l,2])}} + + par(fig=c(0.91, 1, 0.1, 0.9), new=TRUE) + ColorBar2(brks = my.seq, cols = imp.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_impact_",var.name[var.num],"_target_month.png"), width=800, height=300) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("S4 spatial correlation of impact on ",var.name[var.num], cex=1.2) + + par(mar=c(0,0,4,0), fig=c(0.07, 0.22, 0.8, 0.98), new=TRUE) + mtext("NAO+", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0, 0.22, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.6, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.imp.colors[i2,1+6-j,1])}} + + par(mar=c(0,0,4,0), fig=c(0.22, 0.44, 0.8, 0.98), new=TRUE) + mtext("NAO-", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.22, 0.44, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.6, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.imp.colors[i2,1+6-j,3])}} + + par(mar=c(0,0,4,0), fig=c(0.44, 0.66, 0.8, 0.98), new=TRUE) + mtext("Blocking", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.44, 0.66, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.6, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.imp.colors[i2,1+6-j,4])}} + + par(mar=c(0,0,4,0), fig=c(0.68, 0.88, 0.8, 0.98), new=TRUE) + mtext("Atlantic Ridge", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.66, 0.88, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.6, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.imp.colors[i2,1+6-j,2])}} + + par(fig=c(0.91, 1, 0.1, 0.8), new=TRUE) + ColorBar2(brks = my.seq, cols = imp.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + + + + + + + # Simulated Transition probability from NAO+ to X summary: + png(filename=paste0(work.dir,"/",forecast.name,"_summary_transition_prob_NAO+.png"), width=550, height=400) + + # create an array similar to array.cor but with colors instead of corr.values: + trans.cols <- rev(c('#b2182b','#d6604d','#f4a582','#fddbc7','#d1e5f0','#92c5de','#4393c3','#2166ac')) + my.seq <- c(0,10,20,30,40,50,60,70,100) + + array.trans.sim1.colors <- array(trans.cols[8],c(12,7,4)) + array.trans.sim1.colors[array.trans.sim1 < my.seq[8]] <- trans.cols[7] + array.trans.sim1.colors[array.trans.sim1 < my.seq[7]] <- trans.cols[6] + array.trans.sim1.colors[array.trans.sim1 < my.seq[6]] <- trans.cols[5] + array.trans.sim1.colors[array.trans.sim1 < my.seq[5]] <- trans.cols[4] + array.trans.sim1.colors[array.trans.sim1 < my.seq[4]] <- trans.cols[3] + array.trans.sim1.colors[array.trans.sim1 < my.seq[3]] <- trans.cols[2] + array.trans.sim1.colors[array.trans.sim1 < my.seq[2]] <- trans.cols[1] + array.trans.sim1.colors[is.na(array.trans.sim1)] <- "white" + + par(mar=c(5,4,4,4.5)) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Forecast time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + title("% ECMWF-S4 transition from NAO+ regime", cex=1.5) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + + for(p in 1:12){ + for(l in 0:6){ + polygon(c(0.5+p-1, 0.5+p-1, 1+p-1), c(-0.5+l, 0.5+l, 0+l), col=array.trans.sim1.colors[p,1+l,1]) # first triangle + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(-0.5+l, 0+l, -0.5+l), col=array.trans.sim1.colors[p,1+l,2]) + polygon(c(1+p-1, 1.5+p-1, 1.5+p-1), c(l, 0.5+l, -0.5+l), col=array.trans.sim1.colors[p,1+l,3]) + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(0.5+l, 0+l, 0.5+l), col=array.trans.sim1.colors[p,1+l,4]) + } + } + + par(fig=c(0.875, 1, 0.14, 0.89), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_transition_prob_NAO+_startdate.png"), width=750, height=600) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("% Transition from NAO+ regime", cex=1.5) + mtext("ECMWF-S4 / NAO+ Transition Probability \nJanuary to December / 1994-2013", cex=1.5) + + # NAO+ : + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.85, 0.98), new=TRUE) + mtext("NAO+", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.sim1.colors[p,1+l,1])}} + + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.42, 0.5), new=TRUE) + mtext("Blocking", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.sim1.colors[p,1+l,4])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.85, 0.98), new=TRUE) + mtext("NAO-", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.sim1.colors[p,1+l,3])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.42, 0.5), new=TRUE) + mtext("Atlantic Ridge", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.sim1.colors[p,1+l,2])}} + + par(fig=c(0.91, 1, 0.1, 0.9), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_transition_prob_NAO+_target_month.png"), width=750, height=350) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 0.95), new=TRUE) + mtext("% Transition from NAO+ regime", cex=1.2) + + par(mar=c(0,0,4,0), fig=c(0.10, 0.28, 0.8, 0.95), new=TRUE) + mtext("NAO-", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0, 0.28, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.8, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.sim1.colors[i2,1+6-j,3])}} + + par(mar=c(0,0,4,0), fig=c(0.30, 0.58, 0.8, 0.95), new=TRUE) + mtext("Blocking", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.30, 0.58, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.8, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.sim1.colors[i2,1+6-j,4])}} + + par(mar=c(0,0,4,0), fig=c(0.60, 0.88, 0.8, 0.95), new=TRUE) + mtext("Atlantic Ridge", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.60, 0.88, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.8, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.sim1.colors[i2,1+6-j,2])}} + + par(fig=c(0.91, 1, 0.1, 0.8), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + par(fig=c(0.91, 1, 0.88, 0.93), new=TRUE) + mtext(side = 1, text = "%", line = 2, cex=1) + + dev.off() + + + + + + + # Observed Transition probability from NAO+ to X summary: + png(filename=paste0(work.dir,"/",forecast.name,"_summary_transition_prob_NAO+_obs.png"), width=550, height=400) + + # create an array similar to array.cor but with colors instead of corr.values: + trans.cols <- rev(c('#b2182b','#d6604d','#f4a582','#fddbc7','#d1e5f0','#92c5de','#4393c3','#2166ac')) + my.seq <- c(0,10,20,30,40,50,60,70,100) + + array.trans.obs1.colors <- array(trans.cols[8],c(12,7,4)) + array.trans.obs1.colors[array.trans.obs1 < my.seq[8]] <- trans.cols[7] + array.trans.obs1.colors[array.trans.obs1 < my.seq[7]] <- trans.cols[6] + array.trans.obs1.colors[array.trans.obs1 < my.seq[6]] <- trans.cols[5] + array.trans.obs1.colors[array.trans.obs1 < my.seq[5]] <- trans.cols[4] + array.trans.obs1.colors[array.trans.obs1 < my.seq[4]] <- trans.cols[3] + array.trans.obs1.colors[array.trans.obs1 < my.seq[3]] <- trans.cols[2] + array.trans.obs1.colors[array.trans.obs1 < my.seq[2]] <- trans.cols[1] + array.trans.obs1.colors[is.na(array.trans.obs1)] <- "white" + + par(mar=c(5,4,4,4.5)) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Forecast time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + title("% Observed transition from NAO+ regime", cex=1.5) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + + for(p in 1:12){ + for(l in 0:6){ + polygon(c(0.5+p-1, 0.5+p-1, 1+p-1), c(-0.5+l, 0.5+l, 0+l), col=array.trans.obs1.colors[p,1+l,1]) # first triangle + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(-0.5+l, 0+l, -0.5+l), col=array.trans.obs1.colors[p,1+l,2]) + polygon(c(1+p-1, 1.5+p-1, 1.5+p-1), c(l, 0.5+l, -0.5+l), col=array.trans.obs1.colors[p,1+l,3]) + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(0.5+l, 0+l, 0.5+l), col=array.trans.obs1.colors[p,1+l,4]) + } + } + + par(fig=c(0.875, 1, 0.14, 0.89), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_transition_prob_NAO+_obs_startdate.png"), width=750, height=600) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("% Observed transition from NAO+ regime", cex=1.5) + + # NAO+ : + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.85, 0.98), new=TRUE) + mtext("NAO+", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.obs1.colors[p,1+l,1])}} + + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.42, 0.5), new=TRUE) + mtext("Blocking", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.obs1.colors[p,1+l,4])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.85, 0.98), new=TRUE) + mtext("NAO-", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.obs1.colors[p,1+l,3])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.42, 0.5), new=TRUE) + mtext("Atlantic Ridge", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.obs1.colors[p,1+l,2])}} + + par(fig=c(0.91, 1, 0.1, 0.9), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_transition_prob_NAO+_obs_target_month.png"), width=750, height=350) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 0.95), new=TRUE) + mtext("% Observed transition from NAO+ regime", cex=1.2) + + par(mar=c(0,0,4,0), fig=c(0.10, 0.28, 0.8, 0.95), new=TRUE) + mtext("NAO-", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0, 0.28, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.8, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.obs1.colors[i2,1+6-j,3])}} + + par(mar=c(0,0,4,0), fig=c(0.30, 0.58, 0.8, 0.95), new=TRUE) + mtext("Blocking", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.30, 0.58, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.8, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.obs1.colors[i2,1+6-j,4])}} + + par(mar=c(0,0,4,0), fig=c(0.60, 0.88, 0.8, 0.95), new=TRUE) + mtext("Atlantic Ridge", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.60, 0.88, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.8, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.obs1.colors[i2,1+6-j,2])}} + + par(fig=c(0.91, 1, 0.1, 0.8), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + par(fig=c(0.91, 1, 0.88, 0.93), new=TRUE) + mtext(side = 1, text = "%", line = 2, cex=1) + + dev.off() + + + + + + + + + + + + # Simulated transition probability from NAO- to X summary: + png(filename=paste0(work.dir,"/",forecast.name,"_summary_transition_prob_NAO-.png"), width=550, height=400) + + # create an array similar to array.cor but with colors instead of corr.values: + trans.cols <- rev(c('#b2182b','#d6604d','#f4a582','#fddbc7','#d1e5f0','#92c5de','#4393c3','#2166ac')) + my.seq <- c(0,10,20,30,40,50,60,70,100) + + array.trans.sim2.colors <- array(trans.cols[8],c(12,7,4)) + array.trans.sim2.colors[array.trans.sim2 < my.seq[8]] <- trans.cols[7] + array.trans.sim2.colors[array.trans.sim2 < my.seq[7]] <- trans.cols[6] + array.trans.sim2.colors[array.trans.sim2 < my.seq[6]] <- trans.cols[5] + array.trans.sim2.colors[array.trans.sim2 < my.seq[5]] <- trans.cols[4] + array.trans.sim2.colors[array.trans.sim2 < my.seq[4]] <- trans.cols[3] + array.trans.sim2.colors[array.trans.sim2 < my.seq[3]] <- trans.cols[2] + array.trans.sim2.colors[array.trans.sim2 < my.seq[2]] <- trans.cols[1] + array.trans.sim2.colors[is.na(array.trans.sim2)] <- "white" + + par(mar=c(5,4,4,4.5)) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Forecast time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + title("% ECMWF-S4 transition from NAO- regime ", cex=1.5) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + + for(p in 1:12){ + for(l in 0:6){ + polygon(c(0.5+p-1, 0.5+p-1, 1+p-1), c(-0.5+l, 0.5+l, 0+l), col=array.trans.sim2.colors[p,1+l,1]) # first triangle + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(-0.5+l, 0+l, -0.5+l), col=array.trans.sim2.colors[p,1+l,2]) + polygon(c(1+p-1, 1.5+p-1, 1.5+p-1), c(l, 0.5+l, -0.5+l), col=array.trans.sim2.colors[p,1+l,3]) + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(0.5+l, 0+l, 0.5+l), col=array.trans.sim2.colors[p,1+l,4]) + } + } + + par(fig=c(0.875, 1, 0.14, 0.89), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_transition_prob_NAO-_startdate.png"), width=750, height=600) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("% ECMWF-S4 transition from NAO- regime", cex=1.5) + + # NAO+ : + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.85, 0.98), new=TRUE) + mtext("NAO+", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.sim2.colors[p,1+l,1])}} + + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.42, 0.5), new=TRUE) + mtext("Blocking", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.sim2.colors[p,1+l,4])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.85, 0.98), new=TRUE) + mtext("NAO-", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.sim2.colors[p,1+l,3])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.42, 0.5), new=TRUE) + mtext("Atlantic Ridge", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.sim2.colors[p,1+l,2])}} + + par(fig=c(0.91, 1, 0.1, 0.9), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_transition_prob_NAO-_target_month.png"), width=750, height=350) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 0.95), new=TRUE) + mtext("% ECMWF-S4 transition from NAO- regime", cex=1.2) + + par(mar=c(0,0,4,0), fig=c(0.1, 0.28, 0.8, 0.95), new=TRUE) + mtext("NAO+", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0, 0.28, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.8, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.sim2.colors[i2,1+6-j,1])}} + + par(mar=c(0,0,4,0), fig=c(0.30, 0.58, 0.8, 0.95), new=TRUE) + mtext("Blocking", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.30, 0.58, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.8, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.sim2.colors[i2,1+6-j,4])}} + + par(mar=c(0,0,4,0), fig=c(0.60, 0.88, 0.8, 0.95), new=TRUE) + mtext("Atlantic Ridge", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.60, 0.88, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.8, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.sim2.colors[i2,1+6-j,2])}} + + par(fig=c(0.91, 1, 0.1, 0.8), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + par(fig=c(0.91, 1, 0.88, 0.93), new=TRUE) + mtext(side = 1, text = "%", line = 2, cex=1) + + dev.off() + + + + + + + # Observed transition probability from NAO- to X summary: + png(filename=paste0(work.dir,"/",forecast.name,"_summary_transition_prob_NAO-_obs.png"), width=550, height=400) + + # create an array similar to array.cor but with colors instead of corr.values: + trans.cols <- rev(c('#b2182b','#d6604d','#f4a582','#fddbc7','#d1e5f0','#92c5de','#4393c3','#2166ac')) + my.seq <- c(0,10,20,30,40,50,60,70,100) + + array.trans.obs2.colors <- array(trans.cols[8],c(12,7,4)) + array.trans.obs2.colors[array.trans.obs2 < my.seq[8]] <- trans.cols[7] + array.trans.obs2.colors[array.trans.obs2 < my.seq[7]] <- trans.cols[6] + array.trans.obs2.colors[array.trans.obs2 < my.seq[6]] <- trans.cols[5] + array.trans.obs2.colors[array.trans.obs2 < my.seq[5]] <- trans.cols[4] + array.trans.obs2.colors[array.trans.obs2 < my.seq[4]] <- trans.cols[3] + array.trans.obs2.colors[array.trans.obs2 < my.seq[3]] <- trans.cols[2] + array.trans.obs2.colors[array.trans.obs2 < my.seq[2]] <- trans.cols[1] + array.trans.obs2.colors[is.na(array.trans.obs2)] <- "white" + + par(mar=c(5,4,4,4.5)) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Forecast time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + title("% Observed transition from NAO- regime ", cex=1.5) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + + for(p in 1:12){ + for(l in 0:6){ + polygon(c(0.5+p-1, 0.5+p-1, 1+p-1), c(-0.5+l, 0.5+l, 0+l), col=array.trans.obs2.colors[p,1+l,1]) # first triangle + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(-0.5+l, 0+l, -0.5+l), col=array.trans.obs2.colors[p,1+l,2]) + polygon(c(1+p-1, 1.5+p-1, 1.5+p-1), c(l, 0.5+l, -0.5+l), col=array.trans.obs2.colors[p,1+l,3]) + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(0.5+l, 0+l, 0.5+l), col=array.trans.obs2.colors[p,1+l,4]) + } + } + + par(fig=c(0.875, 1, 0.14, 0.89), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_transition_prob_NAO-_obs_startdate.png"), width=750, height=600) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("% Observed transition from NAO- regime", cex=1.5) + + # NAO+ : + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.85, 0.98), new=TRUE) + mtext("NAO+", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.obs2.colors[p,1+l,1])}} + + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.42, 0.5), new=TRUE) + mtext("Blocking", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.obs2.colors[p,1+l,4])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.85, 0.98), new=TRUE) + mtext("NAO-", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.obs2.colors[p,1+l,3])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.42, 0.5), new=TRUE) + mtext("Atlantic Ridge", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.obs2.colors[p,1+l,2])}} + + par(fig=c(0.91, 1, 0.1, 0.9), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_transition_prob_NAO-_obs_target_month.png"), width=750, height=350) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 0.95), new=TRUE) + mtext("% Observed transition from NAO- regime", cex=1.2) + + par(mar=c(0,0,4,0), fig=c(0.07, 0.28, 0.8, 0.95), new=TRUE) + mtext("NAO+", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0, 0.28, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.8, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.obs2.colors[i2,1+6-j,1])}} + + par(mar=c(0,0,4,0), fig=c(0.30, 0.58, 0.8, 0.95), new=TRUE) + mtext("Blocking", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.30, 0.58, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.8, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.obs2.colors[i2,1+6-j,4])}} + + par(mar=c(0,0,4,0), fig=c(0.58, 0.88, 0.8, 0.95), new=TRUE) + mtext("Atlantic Ridge", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.58, 0.88, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.8, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.obs2.colors[i2,1+6-j,2])}} + + par(fig=c(0.91, 1, 0.1, 0.8), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + par(fig=c(0.91, 1, 0.88, 0.93), new=TRUE) + mtext(side = 1, text = "%", line = 2, cex=1) + + dev.off() + + + + + + + + + + + # Simulated transition probability from blocking to X summary: + png(filename=paste0(work.dir,"/",forecast.name,"_summary_transition_prob_blocking.png"), width=550, height=400) + + # create an array similar to array.cor but with colors instead of corr.values: + trans.cols <- rev(c('#b2182b','#d6604d','#f4a582','#fddbc7','#d1e5f0','#92c5de','#4393c3','#2166ac')) + my.seq <- c(0,10,20,30,40,50,60,70,100) + + array.trans.sim3.colors <- array(trans.cols[8],c(12,7,4)) + array.trans.sim3.colors[array.trans.sim3 < my.seq[8]] <- trans.cols[7] + array.trans.sim3.colors[array.trans.sim3 < my.seq[7]] <- trans.cols[6] + array.trans.sim3.colors[array.trans.sim3 < my.seq[6]] <- trans.cols[5] + array.trans.sim3.colors[array.trans.sim3 < my.seq[5]] <- trans.cols[4] + array.trans.sim3.colors[array.trans.sim3 < my.seq[4]] <- trans.cols[3] + array.trans.sim3.colors[array.trans.sim3 < my.seq[3]] <- trans.cols[2] + array.trans.sim3.colors[array.trans.sim3 < my.seq[2]] <- trans.cols[1] + array.trans.sim3.colors[is.na(array.trans.sim3)] <- "white" + + par(mar=c(5,4,4,4.5)) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Forecast time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + title("% ECMWF-S4 transition from blocking regime", cex=1.5) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + + for(p in 1:12){ + for(l in 0:6){ + polygon(c(0.5+p-1, 0.5+p-1, 1+p-1), c(-0.5+l, 0.5+l, 0+l), col=array.trans.sim3.colors[p,1+l,1]) # first triangle + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(-0.5+l, 0+l, -0.5+l), col=array.trans.sim3.colors[p,1+l,2]) + polygon(c(1+p-1, 1.5+p-1, 1.5+p-1), c(l, 0.5+l, -0.5+l), col=array.trans.sim3.colors[p,1+l,3]) + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(0.5+l, 0+l, 0.5+l), col=array.trans.sim3.colors[p,1+l,4]) + } + } + + par(fig=c(0.875, 1, 0.14, 0.89), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_transition_prob_blocking_startdate.png"), width=750, height=600) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("% ECMWF-S4 transition from blocking regime", cex=1.5) + + # NAO+ : + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.85, 0.98), new=TRUE) + mtext("NAO+", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.sim3.colors[p,1+l,1])}} + + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.42, 0.5), new=TRUE) + mtext("Blocking", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.sim3.colors[p,1+l,4])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.85, 0.98), new=TRUE) + mtext("NAO-", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.sim3.colors[p,1+l,3])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.42, 0.5), new=TRUE) + mtext("Atlantic Ridge", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.sim3.colors[p,1+l,2])}} + + par(fig=c(0.91, 1, 0.1, 0.9), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_transition_prob_blocking_target_month.png"), width=750, height=350) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 0.95), new=TRUE) + mtext("% ECMWF-S4 transition from blocking regime", cex=1.2) + + par(mar=c(0,0,4,0), fig=c(0.10, 0.28, 0.8, 0.95), new=TRUE) + mtext("NAO+", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0, 0.28, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.8, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.sim3.colors[i2,1+6-j,1])}} + + par(mar=c(0,0,4,0), fig=c(0.30, 0.58, 0.8, 0.95), new=TRUE) + mtext("NAO-", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.30, 0.58, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.8, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.sim3.colors[i2,1+6-j,3])}} + + par(mar=c(0,0,4,0), fig=c(0.60, 0.88, 0.8, 0.95), new=TRUE) + mtext("Atlantic Ridge", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.60, 0.88, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.8, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.sim3.colors[i2,1+6-j,2])}} + + par(fig=c(0.91, 1, 0.1, 0.8), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + par(fig=c(0.91, 1, 0.88, 0.93), new=TRUE) + mtext(side = 1, text = "%", line = 2, cex=1) + + dev.off() + + + + + + + + + # Observed transition probability from blocking to X summary: + png(filename=paste0(work.dir,"/",forecast.name,"_summary_transition_prob_blocking_obs.png"), width=550, height=400) + + # create an array similar to array.cor but with colors instead of corr.values: + trans.cols <- rev(c('#b2182b','#d6604d','#f4a582','#fddbc7','#d1e5f0','#92c5de','#4393c3','#2166ac')) + my.seq <- c(0,10,20,30,40,50,60,70,100) + + array.trans.obs3.colors <- array(trans.cols[8],c(12,7,4)) + array.trans.obs3.colors[array.trans.obs3 < my.seq[8]] <- trans.cols[7] + array.trans.obs3.colors[array.trans.obs3 < my.seq[7]] <- trans.cols[6] + array.trans.obs3.colors[array.trans.obs3 < my.seq[6]] <- trans.cols[5] + array.trans.obs3.colors[array.trans.obs3 < my.seq[5]] <- trans.cols[4] + array.trans.obs3.colors[array.trans.obs3 < my.seq[4]] <- trans.cols[3] + array.trans.obs3.colors[array.trans.obs3 < my.seq[3]] <- trans.cols[2] + array.trans.obs3.colors[array.trans.obs3 < my.seq[2]] <- trans.cols[1] + array.trans.obs3.colors[is.na(array.trans.obs3)] <- "white" + + par(mar=c(5,4,4,4.5)) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Forecast time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + title("% Observed transition from blocking regime", cex=1.5) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + + for(p in 1:12){ + for(l in 0:6){ + polygon(c(0.5+p-1, 0.5+p-1, 1+p-1), c(-0.5+l, 0.5+l, 0+l), col=array.trans.obs3.colors[p,1+l,1]) # first triangle + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(-0.5+l, 0+l, -0.5+l), col=array.trans.obs3.colors[p,1+l,2]) + polygon(c(1+p-1, 1.5+p-1, 1.5+p-1), c(l, 0.5+l, -0.5+l), col=array.trans.obs3.colors[p,1+l,3]) + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(0.5+l, 0+l, 0.5+l), col=array.trans.obs3.colors[p,1+l,4]) + } + } + + par(fig=c(0.875, 1, 0.14, 0.89), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_transition_prob_blocking_obs_startdate.png"), width=750, height=600) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("% Observed transition from blocking regime", cex=1.5) + + # NAO+ : + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.85, 0.98), new=TRUE) + mtext("NAO+", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.obs3.colors[p,1+l,1])}} + + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.42, 0.5), new=TRUE) + mtext("Blocking", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.obs3.colors[p,1+l,4])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.85, 0.98), new=TRUE) + mtext("NAO-", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.obs3.colors[p,1+l,3])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.42, 0.5), new=TRUE) + mtext("Atlantic Ridge", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.obs3.colors[p,1+l,2])}} + + par(fig=c(0.91, 1, 0.1, 0.9), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_transition_prob_blocking_obs_target_month.png"), width=750, height=350) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 0.95), new=TRUE) + mtext("% Observed transition from blocking regime", cex=1.2) + + par(mar=c(0,0,4,0), fig=c(0.10, 0.28, 0.8, 0.95), new=TRUE) + mtext("NAO+", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0, 0.28, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.8, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.obs3.colors[i2,1+6-j,1])}} + + par(mar=c(0,0,4,0), fig=c(0.30, 0.58, 0.8, 0.95), new=TRUE) + mtext("NAO-", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.30, 0.58, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.8, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.obs3.colors[i2,1+6-j,3])}} + + par(mar=c(0,0,4,0), fig=c(0.60, 0.88, 0.8, 0.95), new=TRUE) + mtext("Atlantic Ridge", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.60, 0.88, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.8, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.obs3.colors[i2,1+6-j,2])}} + + par(fig=c(0.91, 1, 0.1, 0.8), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + par(fig=c(0.91, 1, 0.88, 0.93), new=TRUE) + mtext(side = 1, text = "%", line = 2, cex=1) + + dev.off() + + + + + + + + + + + + + + + + + + # Simulated transition probability from Atlantic ridge to X summary: + png(filename=paste0(work.dir,"/",forecast.name,"_summary_transition_prob_Atlantic_ridge.png"), width=550, height=400) + + # create an array similar to array.cor but with colors instead of corr.values: + trans.cols <- rev(c('#b2182b','#d6604d','#f4a582','#fddbc7','#d1e5f0','#92c5de','#4393c3','#2166ac')) + my.seq <- c(0,10,20,30,40,50,60,70,100) + + array.trans.sim4.colors <- array(trans.cols[8],c(12,7,4)) + array.trans.sim4.colors[array.trans.sim4 < my.seq[8]] <- trans.cols[7] + array.trans.sim4.colors[array.trans.sim4 < my.seq[7]] <- trans.cols[6] + array.trans.sim4.colors[array.trans.sim4 < my.seq[6]] <- trans.cols[5] + array.trans.sim4.colors[array.trans.sim4 < my.seq[5]] <- trans.cols[4] + array.trans.sim4.colors[array.trans.sim4 < my.seq[4]] <- trans.cols[3] + array.trans.sim4.colors[array.trans.sim4 < my.seq[3]] <- trans.cols[2] + array.trans.sim4.colors[array.trans.sim4 < my.seq[2]] <- trans.cols[1] + array.trans.sim4.colors[is.na(array.trans.sim4)] <- "white" + + par(mar=c(5,4,4,4.5)) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Forecast time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + title("% ECMWF-S4 transition from Atlantic ridge regime ", cex=1.5) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + + for(p in 1:12){ + for(l in 0:6){ + polygon(c(0.5+p-1, 0.5+p-1, 1+p-1), c(-0.5+l, 0.5+l, 0+l), col=array.trans.sim4.colors[p,1+l,1]) # first triangle + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(-0.5+l, 0+l, -0.5+l), col=array.trans.sim4.colors[p,1+l,2]) + polygon(c(1+p-1, 1.5+p-1, 1.5+p-1), c(l, 0.5+l, -0.5+l), col=array.trans.sim4.colors[p,1+l,3]) + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(0.5+l, 0+l, 0.5+l), col=array.trans.sim4.colors[p,1+l,4]) + } + } + + par(fig=c(0.875, 1, 0.14, 0.89), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_transition_prob_Atlantic_ridge_startdate.png"), width=750, height=600) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("% ECMWF-S4 transition from Atlantic Ridge regime", cex=1.5) + + # NAO+ : + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.85, 0.98), new=TRUE) + mtext("NAO+", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.sim4.colors[p,1+l,1])}} + + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.42, 0.5), new=TRUE) + mtext("Blocking", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.sim4.colors[p,1+l,4])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.85, 0.98), new=TRUE) + mtext("NAO-", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.sim4.colors[p,1+l,3])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.42, 0.5), new=TRUE) + mtext("Atlantic Ridge", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.sim4.colors[p,1+l,2])}} + + par(fig=c(0.91, 1, 0.1, 0.9), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_transition_prob_Atlantic_ridge_target_month.png"), width=750, height=350) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 0.95), new=TRUE) + mtext("% ECMWF-S4 transition from Atlantic Ridge regime", cex=1.2) + + par(mar=c(0,0,4,0), fig=c(0.1, 0.28, 0.8, 0.95), new=TRUE) + mtext("NAO+", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0, 0.28, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.8, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.sim4.colors[i2,1+6-j,1])}} + + par(mar=c(0,0,4,0), fig=c(0.30, 0.58, 0.8, 0.95), new=TRUE) + mtext("NAO-", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.30, 0.58, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.8, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.sim4.colors[i2,1+6-j,3])}} + + par(mar=c(0,0,4,0), fig=c(0.60, 0.88, 0.8, 0.95), new=TRUE) + mtext("Blocking", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.60, 0.88, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.8, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.sim4.colors[i2,1+6-j,4])}} + + par(fig=c(0.91, 1, 0.1, 0.8), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + par(fig=c(0.91, 1, 0.88, 0.93), new=TRUE) + mtext(side = 1, text = "%", line = 2, cex=1) + + dev.off() + + + + + + + + + + # Observed transition probability from Atlantic ridge to X summary: + png(filename=paste0(work.dir,"/",forecast.name,"_summary_transition_prob_Atlantic_ridge_obs.png"), width=550, height=400) + + # create an array obsilar to array.cor but with colors instead of corr.values: + trans.cols <- rev(c('#b2182b','#d6604d','#f4a582','#fddbc7','#d1e5f0','#92c5de','#4393c3','#2166ac')) + my.seq <- c(0,10,20,30,40,50,60,70,100) + + array.trans.obs4.colors <- array(trans.cols[8],c(12,7,4)) + array.trans.obs4.colors[array.trans.obs4 < my.seq[8]] <- trans.cols[7] + array.trans.obs4.colors[array.trans.obs4 < my.seq[7]] <- trans.cols[6] + array.trans.obs4.colors[array.trans.obs4 < my.seq[6]] <- trans.cols[5] + array.trans.obs4.colors[array.trans.obs4 < my.seq[5]] <- trans.cols[4] + array.trans.obs4.colors[array.trans.obs4 < my.seq[4]] <- trans.cols[3] + array.trans.obs4.colors[array.trans.obs4 < my.seq[3]] <- trans.cols[2] + array.trans.obs4.colors[array.trans.obs4 < my.seq[2]] <- trans.cols[1] + array.trans.obs4.colors[is.na(array.trans.obs4)] <- "white" + + par(mar=c(5,4,4,4.5)) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Forecast time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + title("% Observed transition from Atlantic ridge regime ", cex=1.5) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + + for(p in 1:12){ + for(l in 0:6){ + polygon(c(0.5+p-1, 0.5+p-1, 1+p-1), c(-0.5+l, 0.5+l, 0+l), col=array.trans.obs4.colors[p,1+l,1]) # first triangle + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(-0.5+l, 0+l, -0.5+l), col=array.trans.obs4.colors[p,1+l,2]) + polygon(c(1+p-1, 1.5+p-1, 1.5+p-1), c(l, 0.5+l, -0.5+l), col=array.trans.obs4.colors[p,1+l,3]) + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(0.5+l, 0+l, 0.5+l), col=array.trans.obs4.colors[p,1+l,4]) + } + } + + par(fig=c(0.875, 1, 0.14, 0.89), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_transition_prob_Atlantic_ridge_obs_startdate.png"), width=750, height=600) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("% Observed transition from Atlantic Ridge regime", cex=1.5) + + # NAO+ : + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.85, 0.98), new=TRUE) + mtext("NAO+", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.obs4.colors[p,1+l,1])}} + + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.42, 0.5), new=TRUE) + mtext("Blocking", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.obs4.colors[p,1+l,4])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.85, 0.98), new=TRUE) + mtext("NAO-", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.obs4.colors[p,1+l,3])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.42, 0.5), new=TRUE) + mtext("Atlantic Ridge", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.obs4.colors[p,1+l,2])}} + + par(fig=c(0.91, 1, 0.1, 0.9), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_transition_prob_Atlantic_ridge_obs_target_month.png"), width=750, height=350) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 0.95), new=TRUE) + mtext("% Observed transition from Atlantic Ridge regime", cex=1.2) + + par(mar=c(0,0,4,0), fig=c(0.1, 0.28, 0.8, 0.95), new=TRUE) + mtext("NAO+", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0, 0.28, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.8, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.obs4.colors[i2,1+6-j,1])}} + + par(mar=c(0,0,4,0), fig=c(0.30, 0.58, 0.8, 0.95), new=TRUE) + mtext("NAO-", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.30, 0.58, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.8, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.obs4.colors[i2,1+6-j,3])}} + + par(mar=c(0,0,4,0), fig=c(0.60, 0.88, 0.8, 0.95), new=TRUE) + mtext("Blocking", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.60, 0.88, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.8, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.obs4.colors[i2,1+6-j,4])}} + + par(fig=c(0.91, 1, 0.1, 0.8), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + par(fig=c(0.91, 1, 0.88, 0.93), new=TRUE) + mtext(side = 1, text = "%", line = 2, cex=1) + + dev.off() + + + + + + + + + + + + + + + # Bias of the transition probability from NAO+ to X summary: + png(filename=paste0(work.dir,"/",forecast.name,"_summary_bias_transition_prob_NAO+.png"), width=550, height=400) + + # create an array similar to array.cor but with colors instead of corr.values: + trans.cols <- rev(c('#b2182b','#d6604d','#f4a582','#fddbc7','#d1e5f0','#92c5de','#4393c3','#2166ac')) + my.seq <- c(-20,-10,-5,-2,0,2,5,10,20) + + array.trans.diff1.colors <- array(trans.cols[8],c(12,7,4)) + array.trans.diff1.colors[array.trans.diff1 < my.seq[8]] <- trans.cols[7] + array.trans.diff1.colors[array.trans.diff1 < my.seq[7]] <- trans.cols[6] + array.trans.diff1.colors[array.trans.diff1 < my.seq[6]] <- trans.cols[5] + array.trans.diff1.colors[array.trans.diff1 < my.seq[5]] <- trans.cols[4] + array.trans.diff1.colors[array.trans.diff1 < my.seq[4]] <- trans.cols[3] + array.trans.diff1.colors[array.trans.diff1 < my.seq[3]] <- trans.cols[2] + array.trans.diff1.colors[array.trans.diff1 < my.seq[2]] <- trans.cols[1] + array.trans.diff1.colors[is.na(array.trans.diff1)] <- "white" + + par(mar=c(5,4,4,4.5)) + plot(1,1, type="n", xaxt="n", yaxt="n", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + title("% Bias transition from NAO+ regime", cex=1.5) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + + for(p in 1:12){ + for(l in 0:6){ + polygon(c(0.5+p-1, 0.5+p-1, 1+p-1), c(-0.5+l, 0.5+l, 0+l), col=array.trans.diff1.colors[p,1+l,1]) # first triangle + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(-0.5+l, 0+l, -0.5+l), col=array.trans.diff1.colors[p,1+l,2]) + polygon(c(1+p-1, 1.5+p-1, 1.5+p-1), c(l, 0.5+l, -0.5+l), col=array.trans.diff1.colors[p,1+l,3]) + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(0.5+l, 0+l, 0.5+l), col=array.trans.diff1.colors[p,1+l,4]) + } + } + + par(fig=c(0.875, 1, 0.14, 0.89), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_bias_transition_prob_NAO+_startdate.png"), width=750, height=600) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("% Bias transition from NAO+ regime", cex=1.5) + + # NAO+ : + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.85, 0.98), new=TRUE) + mtext("NAO+", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.diff1.colors[p,1+l,1])}} + + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.42, 0.5), new=TRUE) + mtext("Blocking", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.diff1.colors[p,1+l,4])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.85, 0.98), new=TRUE) + mtext("NAO-", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.diff1.colors[p,1+l,3])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.42, 0.5), new=TRUE) + mtext("Atlantic Ridge", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.diff1.colors[p,1+l,2])}} + + par(fig=c(0.91, 1, 0.1, 0.9), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_bias_transition_prob_NAO+_target_month.png"), width=750, height=350) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 0.95), new=TRUE) + mtext("% Bias transition from NAO+ regime", cex=1.2) + + par(mar=c(0,0,4,0), fig=c(0.07, 0.28, 0.8, 0.95), new=TRUE) + mtext("NAO-", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0, 0.28, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.8, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.diff1.colors[i2,1+6-j,3])}} + + par(mar=c(0,0,4,0), fig=c(0.30, 0.58, 0.8, 0.95), new=TRUE) + mtext("Blocking", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.30, 0.58, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.8, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.diff1.colors[i2,1+6-j,4])}} + + par(mar=c(0,0,4,0), fig=c(0.60, 0.88, 0.8, 0.95), new=TRUE) + mtext("Atlantic Ridge", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.60, 0.88, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.8, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.diff1.colors[i2,1+6-j,2])}} + + par(fig=c(0.91, 1, 0.1, 0.8), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + par(fig=c(0.91, 1, 0.88, 0.93), new=TRUE) + mtext(side = 1, text = "%", line = 2, cex=1) + + dev.off() + + + + + + + + + + + + + + + + # Bias of the Transition probability from NAO- to X summary: + png(filename=paste0(work.dir,"/",forecast.name,"_summary_bias_transition_prob_NAO-.png"), width=550, height=400) + + # create an array similar to array.cor but with colors instead of corr.values: + trans.cols <- rev(c('#b2182b','#d6604d','#f4a582','#fddbc7','#d1e5f0','#92c5de','#4393c3','#2166ac')) + my.seq <- c(-20,-10,-5,-2,0,2,5,10,20) + + array.trans.diff2.colors <- array(trans.cols[8],c(12,7,4)) + array.trans.diff2.colors[array.trans.diff2 < my.seq[8]] <- trans.cols[7] + array.trans.diff2.colors[array.trans.diff2 < my.seq[7]] <- trans.cols[6] + array.trans.diff2.colors[array.trans.diff2 < my.seq[6]] <- trans.cols[5] + array.trans.diff2.colors[array.trans.diff2 < my.seq[5]] <- trans.cols[4] + array.trans.diff2.colors[array.trans.diff2 < my.seq[4]] <- trans.cols[3] + array.trans.diff2.colors[array.trans.diff2 < my.seq[3]] <- trans.cols[2] + array.trans.diff2.colors[array.trans.diff2 < my.seq[2]] <- trans.cols[1] + array.trans.diff2.colors[is.na(array.trans.diff2)] <- "white" + + par(mar=c(5,4,4,4.5)) + plot(1,1, type="n", xaxt="n", yaxt="n", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + title("% Bias transition from NAO- regime", cex=1.5) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + + for(p in 1:12){ + for(l in 0:6){ + polygon(c(0.5+p-1, 0.5+p-1, 1+p-1), c(-0.5+l, 0.5+l, 0+l), col=array.trans.diff2.colors[p,1+l,1]) # first triangle + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(-0.5+l, 0+l, -0.5+l), col=array.trans.diff2.colors[p,1+l,2]) + polygon(c(1+p-1, 1.5+p-1, 1.5+p-1), c(l, 0.5+l, -0.5+l), col=array.trans.diff2.colors[p,1+l,3]) + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(0.5+l, 0+l, 0.5+l), col=array.trans.diff2.colors[p,1+l,4]) + } + } + + par(fig=c(0.875, 1, 0.14, 0.89), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_bias_transition_prob_NAO-_startdate.png"), width=750, height=600) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 0.95), new=TRUE) + mtext("% Bias transition from NAO- regime", cex=1.5) + + # NAO+ : + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.85, 0.98), new=TRUE) + mtext("NAO+", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.diff2.colors[p,1+l,1])}} + + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.42, 0.5), new=TRUE) + mtext("Blocking", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.diff2.colors[p,1+l,4])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.85, 0.98), new=TRUE) + mtext("NAO-", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.diff2.colors[p,1+l,3])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.42, 0.5), new=TRUE) + mtext("Atlantic Ridge", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.diff2.colors[p,1+l,2])}} + + par(fig=c(0.91, 1, 0.1, 0.9), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_bias_transition_prob_NAO-_target_month.png"), width=750, height=350) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 0.95), new=TRUE) + mtext("% Bias transition from NAO- regime", cex=1.2) + + par(mar=c(0,0,4,0), fig=c(0.07, 0.28, 0.8, 0.95), new=TRUE) + mtext("NAO+", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0, 0.28, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.8, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.diff2.colors[i2,1+6-j,1])}} + + par(mar=c(0,0,4,0), fig=c(0.30, 0.58, 0.8, 0.95), new=TRUE) + mtext("Blocking", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.30, 0.58, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.8, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.diff2.colors[i2,1+6-j,4])}} + + par(mar=c(0,0,4,0), fig=c(0.60, 0.88, 0.8, 0.95), new=TRUE) + mtext("Atlantic Ridge", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.60, 0.88, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.8, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.diff2.colors[i2,1+6-j,2])}} + + par(fig=c(0.91, 1, 0.1, 0.8), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + par(fig=c(0.91, 1, 0.88, 0.93), new=TRUE) + mtext(side = 1, text = "%", line = 2, cex=1) + + dev.off() + + + + + + + + + + + + # Bias of the Transition probability from blocking to X summary: + png(filename=paste0(work.dir,"/",forecast.name,"_summary_bias_transition_prob_blocking.png"), width=550, height=400) + + # create an array similar to array.cor but with colors instead of corr.values: + trans.cols <- rev(c('#b2182b','#d6604d','#f4a582','#fddbc7','#d1e5f0','#92c5de','#4393c3','#2166ac')) + my.seq <- c(-20,-10,-5,-2,0,2,5,10,20) + + array.trans.diff3.colors <- array(trans.cols[8],c(12,7,4)) + array.trans.diff3.colors[array.trans.diff3 < my.seq[8]] <- trans.cols[7] + array.trans.diff3.colors[array.trans.diff3 < my.seq[7]] <- trans.cols[6] + array.trans.diff3.colors[array.trans.diff3 < my.seq[6]] <- trans.cols[5] + array.trans.diff3.colors[array.trans.diff3 < my.seq[5]] <- trans.cols[4] + array.trans.diff3.colors[array.trans.diff3 < my.seq[4]] <- trans.cols[3] + array.trans.diff3.colors[array.trans.diff3 < my.seq[3]] <- trans.cols[2] + array.trans.diff3.colors[array.trans.diff3 < my.seq[2]] <- trans.cols[1] + array.trans.diff3.colors[is.na(array.trans.diff3)] <- "white" + + par(mar=c(5,4,4,4.5)) + plot(1,1, type="n", xaxt="n", yaxt="n", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + title("% Bias transition from blocking regime", cex=1.5) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + + for(p in 1:12){ + for(l in 0:6){ + polygon(c(0.5+p-1, 0.5+p-1, 1+p-1), c(-0.5+l, 0.5+l, 0+l), col=array.trans.diff3.colors[p,1+l,1]) # first triangle + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(-0.5+l, 0+l, -0.5+l), col=array.trans.diff3.colors[p,1+l,2]) + polygon(c(1+p-1, 1.5+p-1, 1.5+p-1), c(l, 0.5+l, -0.5+l), col=array.trans.diff3.colors[p,1+l,3]) + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(0.5+l, 0+l, 0.5+l), col=array.trans.diff3.colors[p,1+l,4]) + } + } + + par(fig=c(0.875, 1, 0.14, 0.89), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_bias_transition_prob_blocking_startdate.png"), width=750, height=600) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("% Bias transition from blocking regime", cex=1.5) + + # NAO+ : + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.85, 0.98), new=TRUE) + mtext("NAO+", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.diff3.colors[p,1+l,1])}} + + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.42, 0.5), new=TRUE) + mtext("Blocking", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.diff3.colors[p,1+l,4])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.85, 0.98), new=TRUE) + mtext("NAO-", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.diff3.colors[p,1+l,3])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.42, 0.5), new=TRUE) + mtext("Atlantic Ridge", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.diff3.colors[p,1+l,2])}} + + par(fig=c(0.91, 1, 0.1, 0.9), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_bias_transition_prob_blocking_target_month.png"), width=750, height=350) + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 0.95), new=TRUE) + mtext("% Bias transition from Blocking regime", cex=1.2) + + par(mar=c(0,0,4,0), fig=c(0.07, 0.28, 0.8, 0.95), new=TRUE) + mtext("NAO+", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0, 0.28, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.8, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.diff3.colors[i2,1+6-j,1])}} + + par(mar=c(0,0,4,0), fig=c(0.30, 0.58, 0.8, 0.95), new=TRUE) + mtext("NAO-", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.30, 0.58, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.8, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.diff3.colors[i2,1+6-j,3])}} + + par(mar=c(0,0,4,0), fig=c(0.60, 0.88, 0.8, 0.95), new=TRUE) + mtext("Atlantic Ridge", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.60, 0.88, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.8, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.diff3.colors[i2,1+6-j,2])}} + + par(fig=c(0.91, 1, 0.1, 0.8), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + par(fig=c(0.91, 1, 0.88, 0.93), new=TRUE) + mtext(side = 1, text = "%", line = 2, cex=1) + + dev.off() + + + + + + + + + + + # Bias of the Transition probability from Atlantic Ridge to X summary: + png(filename=paste0(work.dir,"/",forecast.name,"_summary_bias_transition_prob_Atlantic_ridge.png"), width=550, height=400) + + # create an array similar to array.cor but with colors instead of corr.values: + trans.cols <- rev(c('#b2182b','#d6604d','#f4a582','#fddbc7','#d1e5f0','#92c5de','#4393c3','#2166ac')) + my.seq <- c(-20,-10,-5,-2,0,2,5,10,20) + + array.trans.diff4.colors <- array(trans.cols[8],c(12,7,4)) + array.trans.diff4.colors[array.trans.diff4 < my.seq[8]] <- trans.cols[7] + array.trans.diff4.colors[array.trans.diff4 < my.seq[7]] <- trans.cols[6] + array.trans.diff4.colors[array.trans.diff4 < my.seq[6]] <- trans.cols[5] + array.trans.diff4.colors[array.trans.diff4 < my.seq[5]] <- trans.cols[4] + array.trans.diff4.colors[array.trans.diff4 < my.seq[4]] <- trans.cols[3] + array.trans.diff4.colors[array.trans.diff4 < my.seq[3]] <- trans.cols[2] + array.trans.diff4.colors[array.trans.diff4 < my.seq[2]] <- trans.cols[1] + array.trans.diff4.colors[is.na(array.trans.diff4)] <- "white" + + par(mar=c(5,4,4,4.5)) + plot(1,1, type="n", xaxt="n", yaxt="n", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + title("% Bias transition from Atlantic Ridge regime", cex=1.5) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + + for(p in 1:12){ + for(l in 0:6){ + polygon(c(0.5+p-1, 0.5+p-1, 1+p-1), c(-0.5+l, 0.5+l, 0+l), col=array.trans.diff4.colors[p,1+l,1]) # first triangle + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(-0.5+l, 0+l, -0.5+l), col=array.trans.diff4.colors[p,1+l,2]) + polygon(c(1+p-1, 1.5+p-1, 1.5+p-1), c(l, 0.5+l, -0.5+l), col=array.trans.diff4.colors[p,1+l,3]) + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(0.5+l, 0+l, 0.5+l), col=array.trans.diff4.colors[p,1+l,4]) + } + } + + par(fig=c(0.875, 1, 0.14, 0.89), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_bias_transition_prob_Atlantic_ridge_startdate.png"), width=750, height=600) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("% Bias transition from Atlantic_Ridge_regime", cex=1.5) + + # NAO+ : + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.85, 0.98), new=TRUE) + mtext("NAO+", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.diff4.colors[p,1+l,1])}} + + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.42, 0.5), new=TRUE) + mtext("Blocking", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.diff4.colors[p,1+l,4])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.85, 0.98), new=TRUE) + mtext("NAO-", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.diff4.colors[p,1+l,3])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.42, 0.5), new=TRUE) + mtext("Atlantic Ridge", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.diff4.colors[p,1+l,2])}} + + par(fig=c(0.91, 1, 0.1, 0.9), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_bias_transition_prob_Atlantic_ridge_target_month.png"), width=750, height=350) + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 0.95), new=TRUE) + mtext("% Bias transition from Atlantic Ridge regime", cex=1.2) + + par(mar=c(0,0,4,0), fig=c(0.07, 0.28, 0.8, 0.95), new=TRUE) + mtext("NAO+", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0, 0.28, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.8, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.diff4.colors[i2,1+6-j,1])}} + + par(mar=c(0,0,4,0), fig=c(0.30, 0.58, 0.8, 0.95), new=TRUE) + mtext("NAO-", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.30, 0.58, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.8, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.diff4.colors[i2,1+6-j,3])}} + + par(mar=c(0,0,4,0), fig=c(0.60, 0.88, 0.8, 0.95), new=TRUE) + mtext("Blocking", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.60, 0.88, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.8, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.diff4.colors[i2,1+6-j,4])}} + + par(fig=c(0.91, 1, 0.1, 0.8), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + par(fig=c(0.91, 1, 0.88, 0.93), new=TRUE) + mtext(side = 1, text = "%", line = 2, cex=1) + + dev.off() + + ## # FairRPSS summary: + ## png(filename=paste0(work.dir,"/",forecast.name,"_summary_rpss.png"), width=550, height=400) + + ## # create an array similar to array.diff.freq but with colors instead of frequencies: + ## freq.cols <- rev(c('#b2182b','#d6604d','#f4a582','#fddbc7','#d1e5f0','#92c5de','#4393c3','#2166ac')) + + ## array.freq.colors <- array(freq.cols[8],c(12,7,4)) + ## array.freq.colors[array.diff.freq < 10] <- freq.cols[7] + ## array.freq.colors[array.diff.freq < 5] <- freq.cols[6] + ## array.freq.colors[array.diff.freq < 2] <- freq.cols[5] + ## array.freq.colors[array.diff.freq < 0] <- freq.cols[4] + ## array.freq.colors[array.diff.freq < -2] <- freq.cols[3] + ## array.freq.colors[array.diff.freq < -5] <- freq.cols[2] + ## array.freq.colors[array.diff.freq < -10] <- freq.cols[1] + + ## par(mar=c(5,4,4,4.5)) + ## plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Forecast time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + ## title("Frequency difference (%) between S4 and ERA-Interim", cex=1.5) + ## mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + ## mtext(side = 2, text = "Forecast time", line = 2.5, cex=1.5) + ## axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + ## axis(2, at=seq(0,6), las=2, cex.axis=1.5) + + ## for(p in 1:12){ + ## for(l in 0:6){ + ## polygon(c(0.5+p-1,0.5+p-1,1+p-1),c(-0.5+l,0.5+l,0+l), col=array.freq.colors[p,1+l,1]) # first triangle + ## polygon(c(0.5+p-1,1+p-1,1.5+p-1),c(-0.5+l,0+l,-0.5+l), col=array.freq.colors[p,1+l,2]) + ## polygon(c(1+p-1,1.5+p-1,1.5+p-1),c(l,0.5+l,-0.5+l), col=array.freq.colors[p,1+l,3]) + ## polygon(c(0.5+p-1,1+p-1,1.5+p-1),c(0.5+l,0+l,0.5+l), col=array.freq.colors[p,1+l,4]) + ## } + ## } + + ## par(fig=c(0.875, 1, 0.14, 0.89), new=TRUE) + ## ColorBar2(brks = c(-20,-10,-5,-2,0,2,5,10,20), cols = freq.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(c(-20,-10,-5,-2,0,2,5,10,20)), my.labels=c(-20,-10,-5,-2,0,2,5,10,20)) + ## dev.off() + +} # close if on composition == "summary" + + + + +# impact map of the stronger WR: +if(composition == "impact.highest" && fields.name == rean.name){ + + var.num <- 2 # choose a variable (1:sfcWind, 2:tas) + index <- 1 # chose an index: 1: most influent, 2: most influent positively, 3: most influent negatively + + if ( index == 1 ) png(filename=paste0(rean.dir,"/",rean.name,"_",var.name.full[var.num],"_most_influent.png"), width=550, height=650) + if ( index == 2 ) png(filename=paste0(rean.dir,"/",rean.name,"_",var.name.full[var.num],"_most_influent_positively.png"), width=550, height=650) + if ( index == 3 ) png(filename=paste0(rean.dir,"/",rean.name,"_",var.name.full[var.num],"_most_influent_negatively.png"), width=550, height=650) + + plot.new() + #par(mfrow=c(4,3)) + #col.regimes <- c('#7b3294','#c2a5cf','#a6dba0','#008837') + col.regimes <- c('#e66101','#fdb863','#b2abd2','#5e3c99') + + for(p in 13:16){ # or 1:12 for monthly maps + load(file=paste0(rean.dir,"/",rean.name,"_",my.period[p],"_",var.name[var.num],".RData")) + load(paste0(rean.dir,"/",rean.name,"_",my.period[p],"_ClusterNames.RData")) # they should not depend on var.name!!! + + # position of long values of Europe only (without the Atlantic Sea and America): + #if(fields.name == "ERA-Interim") EU <- c(1:which(lon >= lon.max)[1], (length(lon)-30):length(lon)) # restrict area to continental europe only + lon.max = 45 + EU <- c(1:which(lon >= lon.max)[1], (which(lon >= 337)[1]:length(lon))) # restrict area to continental europe only + + cluster1 <- which(orden == cluster1.name.period[p]) # in future it will be == cluster1.name + cluster2 <- which(orden == cluster2.name.period[p]) + cluster3 <- which(orden == cluster3.name.period[p]) + cluster4 <- which(orden == cluster4.name.period[p]) + + assign(paste0("imp",cluster1), varPeriodAnom1mean) + assign(paste0("imp",cluster2), varPeriodAnom2mean) + assign(paste0("imp",cluster3), varPeriodAnom3mean) + assign(paste0("imp",cluster4), varPeriodAnom4mean) + + #most influent WT: + if (index == 1) { + imp.all <- imp1 + for(i in 1:dim(imp1)[1]){ + for(j in 1:dim(imp1)[2]){ + if(abs(imp1[i,j]) >= max(abs(imp1[i,j]), abs(imp2[i,j]), abs(imp3[i,j]), abs(imp4[i,j]))) imp.all[i,j] <- 1.5 + if(abs(imp2[i,j]) >= max(abs(imp1[i,j]), abs(imp2[i,j]), abs(imp3[i,j]), abs(imp4[i,j]))) imp.all[i,j] <- 2.5 + if(abs(imp3[i,j]) >= max(abs(imp1[i,j]), abs(imp2[i,j]), abs(imp3[i,j]), abs(imp4[i,j]))) imp.all[i,j] <- 3.5 + if(abs(imp4[i,j]) >= max(abs(imp1[i,j]), abs(imp2[i,j]), abs(imp3[i,j]), abs(imp4[i,j]))) imp.all[i,j] <- 4.5 + } + } + } + + #most influent WT with positive impact: + if (index == 2) { + imp.all <- imp1 + for(i in 1:dim(imp1)[1]){ + for(j in 1:dim(imp1)[2]){ + if((imp1[i,j]) >= max((imp1[i,j]), (imp2[i,j]), (imp3[i,j]), (imp4[i,j]))) imp.all[i,j] <- 1.5 + if((imp2[i,j]) >= max((imp1[i,j]), (imp2[i,j]), (imp3[i,j]), (imp4[i,j]))) imp.all[i,j] <- 2.5 + if((imp3[i,j]) >= max((imp1[i,j]), (imp2[i,j]), (imp3[i,j]), (imp4[i,j]))) imp.all[i,j] <- 3.5 + if((imp4[i,j]) >= max((imp1[i,j]), (imp2[i,j]), (imp3[i,j]), (imp4[i,j]))) imp.all[i,j] <- 4.5 + } + } + + } + + # most influent WT with negative impact: + if (index == 3) { + imp.all <- imp1 + for(i in 1:dim(imp1)[1]){ + for(j in 1:dim(imp1)[2]){ + if((imp1[i,j]) <= min((imp1[i,j]), (imp2[i,j]), (imp3[i,j]), (imp4[i,j]))) imp.all[i,j] <- 1.5 + if((imp2[i,j]) <= min((imp1[i,j]), (imp2[i,j]), (imp3[i,j]), (imp4[i,j]))) imp.all[i,j] <- 2.5 + if((imp3[i,j]) <= min((imp1[i,j]), (imp2[i,j]), (imp3[i,j]), (imp4[i,j]))) imp.all[i,j] <- 3.5 + if((imp4[i,j]) <= min((imp1[i,j]), (imp2[i,j]), (imp3[i,j]), (imp4[i,j]))) imp.all[i,j] <- 4.5 + } + } + } + + if(p<=3) yMod <- 0.7 + if(p>=4 && p<=6) yMod <- 0.5 + if(p>=7 && p<=9) yMod <- 0.3 + if(p>=10) yMod <- 0.1 + if(p==13 || p==14) yMod <- 0.52 + if(p==15 || p==16) yMod <- 0.1 + + if(p==1 || p==4 || p==7 || p==10) xMod <- 0.05 + if(p==2 || p==5 || p==8 || p==11) xMod <- 0.35 + if(p==3 || p==6 || p==9 || p==12) xMod <- 0.65 + if(p==13 || p==15) xMod <- 0.05 + if(p==14 || p==16) xMod <- 0.50 + + # impact map: + if(p <= 12) par(fig=c(xMod, xMod + 0.3, yMod, yMod + 0.17), new=TRUE) + if(p > 12) par(fig=c(xMod, xMod + 0.45, yMod, yMod + 0.38), new=TRUE) + PlotEquiMap(imp.all[,EU], lon[EU], lat, filled.continents = FALSE, brks=1:5, cols=col.regimes, axelab=F, drawleg=F) + + # title map: + if(p <= 12) par(fig=c(xMod, xMod + 0.3, yMod + 0.162, yMod + 0.172), new=TRUE) + if(p > 12) par(fig=c(xMod + 0.07, xMod + 0.07 + 0.3, yMod +0.21 + 0.166, yMod +0.21 + 0.176), new=TRUE) + mtext(my.period[p], font=2, cex=1.5) + + } # close 'p' on 'period' + + par(fig=c(0.1,0.9, 0.03, 0.11), new=TRUE) + ColorBar2(1:5, cols=col.regimes, vert=FALSE, my.ticks=1:4, draw.ticks=FALSE, my.labels=orden) + + par(fig=c(0.1,0.9, 0.92, 0.97), new=TRUE) + if( index == 1 ) mtext(paste0("Most influent WR on ",var.name[var.num]), font=2, cex=1.5) + if( index == 2 ) mtext(paste0("Most positively influent WR on ",var.name[var.num]), font=2, cex=1.5) + if( index == 3 ) mtext(paste0("Most negatively influent WR on ",var.name[var.num]), font=2, cex=1.5) + + dev.off() + +} # close if on composition == "impact.highest" + + + + + + + + + + + + + +if(monthly_anomalies) { + # Compute climatology and anomalies (with LOESS filter) for sfcWind data, and draw monthly anomalies, if you want to compare the mean daily wind speed anomalies reconstructed by the weather regimes classification with those observed by the reanalysis: + + load(paste0(rean.dir,"/",rean.name,"_",my.period[month.chosen[1]],"_psl.RData")) # only to load year.start and year.end + + sfcWind366 <- Load(var = "sfcWind", exp = NULL, obs = list(list(path=rean.data)), paste0(year.start:year.end,'0101'), storefreq = 'daily', leadtimemax = 366, output = 'lonlat', latmin = lat.min, latmax = lat.max, lonmin = lon.min, lonmax = lon.max, nprocs=8)$obs + + sfcWind <- sfcWind366[,,,1:365,,] + + for(y in year.start:year.end){ + y2 <- y - year.start + 1 + if(n.days.in.a.year(y) == 366) sfcWind[y2,60:365,,] <- sfcWind366[1,1,y2,61:366,,] # take the march to december period removing the 29th of February + } + + rm(sfcWind366) + gc() + + sfcWindClimDaily <- apply(sfcWind, c(2,3,4), mean, na.rm=T) + + sfcWindClimLoess <- sfcWindClimDaily + for(i in n.pos.lat){ + for(j in n.pos.lon){ + my.data <- data.frame(ens.mean=sfcWindClimDaily[,i,j], day=1:365) + my.loess <- loess(ens.mean ~ day, my.data, span=0.35) + sfcWindClimLoess[,i,j] <- predict(my.loess) + } + } + + rm(sfcWindClimDaily) + gc() + + sfcWindClim2 <- InsertDim(sfcWindClimLoess, 1, n.years) + + sfcWindAnom <- sfcWind - sfcWindClim2 + + rm(sfcWindClimLoess) + gc() + + # same as above but for computing climatology and anomalies (with LOESS filter) for psl data, and draw monthly slp anomalies: + slp366 <- Load(var = psl, exp = NULL, obs = list(list(path=rean.data)), paste0(year.start:year.end,'0101'), storefreq = 'daily', leadtimemax = 366, output = 'lonlat', latmin = lat.min, latmax = lat.max, lonmin = lon.min, lonmax = lon.max, nprocs=8)$obs + + slp <- slp366[,,,1:365,,] + + for(y in year.start:year.end){ + y2 <- y - year.start + 1 + if(n.days.in.a.year(y) == 366) slp[y2,60:365,,] <- slp366[1,1,y2,61:366,,] # take the march to december period removing the 29th of February + } + + rm(slp366) + gc() + + slpClimDaily <- apply(slp, c(2,3,4), mean, na.rm=T) + + slpClimLoess <- slpClimDaily + for(i in n.pos.lat){ + for(j in n.pos.lon){ + my.data <- data.frame(ens.mean=slpClimDaily[,i,j], day=1:365) + my.loess <- loess(ens.mean ~ day, my.data, span=0.35) + slpClimLoess[,i,j] <- predict(my.loess) + } + } + + rm(slpClimDaily) + gc() + + slpClim2 <- InsertDim(slpClimLoess, 1, n.years) + + slpAnom <- slp - slpClim2 + + rm(slpClimLoess) + gc() + + if(psl == "psl") slpAnom <- slpAnom/100 # convert MSLP in Pascal to MSLP in hPa + + EU <- c(1:which(lon >= lon.max)[1], (which(lon >= 337)[1]:length(lon))) # restrict area to continental europe only + + my.brks.var <- c(-20,seq(-3,3,0.5),20) + my.cols.var <- colorRampPalette(rev(brewer.pal(11,"RdBu")))(length(my.brks.var)-1) # blue--white--red colors + + # same but for slp: + my.brks.var2 <- c(-100,seq(-21,-1,2),0,seq(1,21,2),100) + my.cols.var2 <- colorRampPalette(rev(brewer.pal(11,"RdBu")))(length(my.brks.var2)-1) # blue--red colors + my.cols.var2[floor(length(my.cols.var2)/2)] <- my.cols.var2[floor(length(my.cols.var2)/2)+1] <- "white" + + # save all monthly anomaly maps: + for(year.test in year.chosen){ + for(month.test in month.chosen){ + #year.test <- 2016; month.test <- 10 # for the debug + + pos.year.test <- year.test - year.start + 1 + + # wind anomaly for the chosen year and month: + sfcWindAnomPeriod <- sfcWindAnom[pos.year.test, pos.period(1,month.test),,] + + sfcWindAnomPeriodMean <- apply(sfcWindAnomPeriod, c(2,3), mean, na.rm=TRUE) + + # psl anomaly for the chosen year and month: + slpAnomPeriod <- slpAnom[pos.year.test,pos.period(1,month.test),,] + + slpAnomPeriodMean <- apply(slpAnomPeriod, c(2,3), mean, na.rm=TRUE) + + fileoutput <- paste0(rean.dir,"/",rean.name,"_",my.period[month.test],"_monthly_anomaly.png") + + png(filename=fileoutput,width=2800,height=1000) + + par(fig=c(0, 0.36, 0.08, 0.98), new=TRUE) + #PlotEquiMap(rescale(sfcWindAnomPeriodMean[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents=FALSE, brks=my.brks.var, cols=my.cols.var[2:(length(my.cols.var)-1)], title_scale=1.2, intxlon=10, intylat=10, drawleg=F) #, cex.lab=1.5) + + PlotEquiMap(sfcWindAnomPeriodMean[,EU], lon[EU], lat, filled.continents=FALSE, brks=my.brks.var, cols=my.cols.var, title_scale=1.2, intxlon=10, intylat=10, drawleg=F) #, cex.lab=1.5) + + #my.subset2 <- match(values.to.plot2, my.brks.var) + #legend2.cex <- 1.8 + + par(fig=c(0.03, 0.36, 0.015, 0.09), new=TRUE) + #ColorBar(my.brks.var, cols=my.cols.var, vert=FALSE, label_scale=1.8, triangle_ends=c(FALSE,FALSE)) #, subset=my.subset2) + #ColorBar(my.brks.var, cols=my.cols.var[2:(length(my.cols.var)-1)], vert=FALSE, label_scale=1.8, var_limits=c(-10,10), bar_limits=c(my.brks.var[1],my.brks.var[l(my.brks.var)]), col_inf=my.cols.var[1], col_sup=my.cols.var[length(my.cols.var)]) + + ColorBar(brks=my.brks.var[2:(l(my.brks.var)-1)], cols=my.cols.var[2:(l(my.cols.var)-1)], vert=FALSE, label_scale=1.8, bar_limits=c(my.brks.var[2],my.brks.var[l(my.brks.var)-1]), col_inf=my.cols.var[1], col_sup=my.cols.var[l(my.cols.var)], subsample=1) + + par(fig=c(0.34, 0.37, 0, 0.028), new=TRUE) + mtext("m/s", cex=1.8) + + par(fig=c(0.37, 1, 0.1, 0.98), new=TRUE) + + PlotEquiMap2(slpAnomPeriodMean, lon, lat, filled.continents=FALSE, continents.col="black", brks=my.brks.var2, brks2=my.brks.var2, cols=my.cols.var2, intxlon=10, intylat=10, drawleg=F, contours=t(slpAnomPeriodMean), contours.lty="F1FF1F", cex.lab=1) + + # you could almost use the normal PlotEquiMap funcion below, it only needs to set the black line for anomaly=0 and decrease the size of the contour labels: + # PlotEquiMap(rescale(slpAnomPeriodMean[,EU], my.brks.var2[1], tail(my.brks.var2,1)), lon[EU], lat, filled.continents=FALSE, brks=my.brks.var2, brks2=my.brks.var2, cols=my.cols.var2, intxlon=10, intylat=10, drawleg=F, contours=(slpAnomPeriodMean[,EU]), contour_lty="F1FF1F", contour_label_scale=10, title_scale=1.2) #, cex.lab=1.5) + + ##my.subset2 <- match(values.to.plot2, my.brks.var) + #legend2.cex <- 1.8 + + par(fig=c(0.37, 1, 0, 0.09), new=TRUE) + #ColorBar2(my.brks.var2, cols=my.cols.var2, vert=FALSE, my.ticks=-0.5 + 1:length(my.brks.var2), my.labels=my.brks.var2) #, subsampleg=1, label_scale=1.8) #, subset=my.subset2) + ColorBar(my.brks.var2[2:(l(my.brks.var2)-1)], cols=my.cols.var2[2:(l(my.cols.var2)-1)], vert=FALSE, label_scale=1.8, bar_limits=c(my.brks.var2[2],my.brks.var2[l(my.brks.var2)-1]), col_inf=my.cols.var2[1], col_sup=my.cols.var2[l(my.cols.var2)], subsample=1) #my.ticks=-0.5 + 1:length(my.brks.var2), my.labels=my.brks.var2) subset=my.subset2) + + par(fig=c(0.96, 0.99, 0, 0.027), new=TRUE) + mtext("hPa", cex=1.8) + + #par(fig=c(0.74, 0.76, 0, 0.027), new=TRUE) + #mtext("0", cex=1.8) + + dev.off() + + # same image formatted for the catalogue: + fileoutput2 <- paste0(rean.dir,"/",rean.name,"_",my.period[month.test],"_monthly_anomaly_fig2cat.png") + + system(paste0("~/scripts/fig2catalog.sh -s 0.8 -t '",rean.name," / 10m wind speed and sea level pressure / Monthly anomalies \n",month.name[month.test]," / ",year.test,"' -c 'Region: North Atlantic (27°N-81°N, 85.5°W-45°E)\nReference dataset: ",rean.name," reanalysis' ", fileoutput," ", fileoutput2)) + + + # mean wind speed and slp anomaly only during days of the chosen year: + load(paste0(rean.dir,"/",rean.name,"_",my.period[month.test],"_psl.RData")) # load cluster.sequence + + # arrange the regime sequence in an array, to separate months from years: + cluster2D <- array(cluster.sequence, c(n.days.in.a.period(month.test,1),n.years)) + #cluster2D <- array(my.cluster$cluster, c(n.days.in.a.period(p,1),n.years)) # only for NCEP + + cluster.test <- cluster2D[,pos.year.test] + + fre1.days <- length(which(cluster.test == 1)) + fre2.days <- length(which(cluster.test == 2)) + fre3.days <- length(which(cluster.test == 3)) + fre4.days <- length(which(cluster.test == 4)) + + # wind anomaly for the chosen year and month and cluster: + sfcWind1 <- sfcWindAnomPeriod[which(cluster.test == 1),,,drop=FALSE] + sfcWind2 <- sfcWindAnomPeriod[which(cluster.test == 2),,,drop=FALSE] + sfcWind3 <- sfcWindAnomPeriod[which(cluster.test == 3),,,drop=FALSE] + sfcWind4 <- sfcWindAnomPeriod[which(cluster.test == 4),,,drop=FALSE] + + # in case a regime has NO days in that month, we must set it to NA: + if(length(which(cluster.test == 1)) == 0 ) sfcWind1 <- array(NA, c(1,dim(sfcWindAnomPeriod)[2:3])) + if(length(which(cluster.test == 2)) == 0 ) sfcWind2 <- array(NA, c(1,dim(sfcWindAnomPeriod)[2:3])) + if(length(which(cluster.test == 3)) == 0 ) sfcWind3 <- array(NA, c(1,dim(sfcWindAnomPeriod)[2:3])) + if(length(which(cluster.test == 4)) == 0 ) sfcWind4 <- array(NA, c(1,dim(sfcWindAnomPeriod)[2:3])) + + sfcWindMean1 <- apply(sfcWind1, c(2,3), mean, na.rm=FALSE) + sfcWindMean2 <- apply(sfcWind2, c(2,3), mean, na.rm=FALSE) + sfcWindMean3 <- apply(sfcWind3, c(2,3), mean, na.rm=FALSE) + sfcWindMean4 <- apply(sfcWind4, c(2,3), mean, na.rm=FALSE) + + # load regime names: + load(paste0(rean.dir,"/",rean.name,"_", my.period[p],"_","ClusterNames",".RData")) + + ## add strip with daily sequence of WRs: + + mod.name1 <- substr(cluster1.name, nchar(cluster1.name), nchar(cluster1.name)) + mod.name2 <- substr(cluster2.name, nchar(cluster2.name), nchar(cluster2.name)) + mod.name3 <- substr(cluster3.name, nchar(cluster3.name), nchar(cluster3.name)) + mod.name4 <- substr(cluster4.name, nchar(cluster4.name), nchar(cluster4.name)) + + cluster1.name.short <- substr(cluster1.name,1,1) + cluster2.name.short <- substr(cluster2.name,1,1) + cluster3.name.short <- substr(cluster3.name,1,1) + cluster4.name.short <- substr(cluster4.name,1,1) + + ## add + or - at the end of the cluster name, if it is a NAO+ or NAO- regime: + if(mod.name1 == "+" || mod.name1 == "-") cluster1.name.short <- paste0(substr(cluster1.name,1,1), mod.name1) + if(mod.name2 == "+" || mod.name2 == "-") cluster2.name.short <- paste0(substr(cluster2.name,1,1), mod.name2) + if(mod.name3 == "+" || mod.name3 == "-") cluster3.name.short <- paste0(substr(cluster3.name,1,1), mod.name3) + if(mod.name4 == "+" || mod.name4 == "-") cluster4.name.short <- paste0(substr(cluster4.name,1,1), mod.name4) + + c1 <- which(cluster.test == 1) + c2 <- which(cluster.test == 2) + c3 <- which(cluster.test == 3) + c4 <- which(cluster.test == 4) + + cluster.test.letters <- cluster.test + cluster.test.letters[c1] <- cluster1.name.short + cluster.test.letters[c2] <- cluster2.name.short + cluster.test.letters[c3] <- cluster3.name.short + cluster.test.letters[c4] <- cluster4.name.short + + cluster.col <- cluster.test.letters + cluster.col[which(cluster.test.letters == "N+")] <- "Firebrick1" + cluster.col[which(cluster.test.letters == "N-")] <- "Dodgerblue1" + cluster.col[which(cluster.test.letters == "B")] <- "White" + cluster.col[which(cluster.test.letters == "A")] <- "Darkgoldenrod1" + + orden <- c("NAO+","NAO-","Blocking","Atl.Ridge") + cluster1 <- which(orden == cluster1.name) + cluster2 <- which(orden == cluster2.name) + cluster3 <- which(orden == cluster3.name) + cluster4 <- which(orden == cluster4.name) + + assign(paste0("fre.days",cluster1), fre1.days) + assign(paste0("fre.days",cluster2), fre2.days) + assign(paste0("fre.days",cluster3), fre3.days) + assign(paste0("fre.days",cluster4), fre4.days) + + assign(paste0("fre",cluster1), wr1y) + assign(paste0("fre",cluster2), wr2y) + assign(paste0("fre",cluster3), wr3y) + assign(paste0("fre",cluster4), wr4y) + + assign(paste0("imp.test",cluster1), sfcWindMean1) + assign(paste0("imp.test",cluster2), sfcWindMean2) + assign(paste0("imp.test",cluster3), sfcWindMean3) + assign(paste0("imp.test",cluster4), sfcWindMean4) + + # psl anomaly for the chosen year and month and cluster: + slp1 <- slpAnomPeriod[which(cluster.test == 1),,,drop=FALSE] + slp2 <- slpAnomPeriod[which(cluster.test == 2),,,drop=FALSE] + slp3 <- slpAnomPeriod[which(cluster.test == 3),,,drop=FALSE] + slp4 <- slpAnomPeriod[which(cluster.test == 4),,,drop=FALSE] + + # in case a regime has NO days in that month, we must set it to NA: + if(length(which(cluster.test == 1)) == 0 ) slp1 <- array(NA, c(1,dim(slpAnomPeriod)[2:3])) + if(length(which(cluster.test == 2)) == 0 ) slp2 <- array(NA, c(1,dim(slpAnomPeriod)[2:3])) + if(length(which(cluster.test == 3)) == 0 ) slp3 <- array(NA, c(1,dim(slpAnomPeriod)[2:3])) + if(length(which(cluster.test == 4)) == 0 ) slp4 <- array(NA, c(1,dim(slpAnomPeriod)[2:3])) + + slpMean1 <- apply(slp1, c(2,3), mean, na.rm=FALSE) + slpMean2 <- apply(slp2, c(2,3), mean, na.rm=FALSE) + slpMean3 <- apply(slp3, c(2,3), mean, na.rm=FALSE) + slpMean4 <- apply(slp4, c(2,3), mean, na.rm=FALSE) + + assign(paste0("psl.test",cluster1), slpMean1) + assign(paste0("psl.test",cluster2), slpMean2) + assign(paste0("psl.test",cluster3), slpMean3) + assign(paste0("psl.test",cluster4), slpMean4) + + # save strip with the daily regime series for chosen month and year: + fileoutput.seq <- paste0(rean.dir,"/",rean.name,"_",my.period[month.test],"_regimes_sequence.png") + png(filename=fileoutput.seq,width=1500,height=1850) + + plot.new() + + sep <- 0.03 + for(day in 1: n.days.in.a.period(p, 2001)){ + sep.cum <- (day-1)*sep + polygon(c(sep.cum + 0.01, sep.cum + 0.01 + sep, sep.cum + 0.01 + sep, sep.cum + 0.01), c(1.01, 1.01, 1.01+sep, 1.01+sep), border="black", col=cluster.col[day]) + text(sep.cum + 0.01 + sep/2, 0.997 + sep + 0.005, labels=day, cex=1.5) + text(sep.cum + 0.01 + sep/2, 1.013 + 0.005, labels=cluster.test.letters[day], cex=2) + + } + + dev.off() + + + + # save average impact and sea level pressure only for chosne month and year: + fileoutput.test <- paste0(rean.dir,"/",rean.name,"_",my.period[month.test],"_monthly_anomaly_regimes.png") + + png(filename=fileoutput.test,width=1500,height=2000) + + plot.new() + + par(fig=c(0, 0.33, 0.77, 0.97), new=TRUE) + PlotEquiMap2(imp.test1[,EU], lon[EU], lat, filled.continents=FALSE, continents.col="black", brks=my.brks.var, cols=my.cols.var, cex.lab=1.2, intxlon=10, intylat=10, drawleg=F) + par(fig=c(0, 0.33, 0.54, 0.74), new=TRUE) + PlotEquiMap2(imp.test2[,EU], lon[EU], lat, filled.continents=FALSE, continents.col="black", brks=my.brks.var, cols=my.cols.var, cex.lab=1.2, intxlon=10, intylat=10, drawleg=F) + par(fig=c(0, 0.33, 0.31, 0.51), new=TRUE) + PlotEquiMap2(imp.test3[,EU], lon[EU], lat, filled.continents=FALSE, continents.col="black", brks=my.brks.var, cols=my.cols.var, cex.lab=1.2, intxlon=10, intylat=10, drawleg=F) + par(fig=c(0, 0.33, 0.08, 0.28), new=TRUE) + PlotEquiMap2(imp.test4[,EU], lon[EU], lat, filled.continents=FALSE, continents.col="black", brks=my.brks.var, cols=my.cols.var, cex.lab=1.2, intxlon=10, intylat=10, drawleg=F) + + par(fig=c(0,0.33,0.955,0.975), new=TRUE) + mtext(paste0(orden[1], " wind speed anomaly "), font=2, cex=2) + par(fig=c(0,0.33,0.725,0.745), new=TRUE) + mtext(paste0(orden[2], " wind speed anomaly "), font=2, cex=2) + par(fig=c(0,0.33,0.495,0.515), new=TRUE) + mtext(paste0(orden[3], " wind speed anomaly "), font=2, cex=2) + par(fig=c(0,0.33,0.265,0.285), new=TRUE) + mtext(paste0(orden[4], " wind speed anomaly "), font=2, cex=2) + + par(fig=c(0.03, 0.33, 0.015, 0.06), new=TRUE) + #ColorBar(my.brks.var, cols=my.cols.var, vert=FALSE, label_scale=1.8, triangle_ends=c(FALSE,FALSE)) #, subset=my.subset2) + ColorBar(my.brks.var[2:(length(my.brks.var)-1)], cols=my.cols.var[2:(length(my.cols.var)-1)], vert=FALSE, label_scale=1.8, bar_limits=c(my.brks.var[2],my.brks.var[l(my.brks.var)-1]), col_inf=my.cols.var[1], col_sup=my.cols.var[l(my.cols.var)], subsample=1) #triangle_ends=c(T,T)) #, subset=my.subset2) + + par(fig=c(0.33, 0.34, 0.01, 0.044), new=TRUE) + mtext("m/s", cex=1.6) + + # right figures: + par(fig=c(0.34, 0.92, 0.77, 0.97), new=TRUE) + PlotEquiMap2(psl.test1, lon, lat, filled.continents=FALSE, continents.col="black", brks=my.brks.var2, brks2=my.brks.var2, cols=my.cols.var2, intxlon=10, intylat=10, drawleg=F, contours=t(psl.test1), contours.lty="F1FF1F", cex.lab=1, xlabel.dist=.5) + par(fig=c(0.34, 0.92, 0.54, 0.74), new=TRUE) + PlotEquiMap2(psl.test2, lon, lat, filled.continents=FALSE, continents.col="black", brks=my.brks.var2, brks2=my.brks.var2, cols=my.cols.var2, intxlon=10, intylat=10, drawleg=F, contours=t(psl.test2), contours.lty="F1FF1F", cex.lab=1, xlabel.dist=.5) + par(fig=c(0.34, 0.92, 0.31, 0.51), new=TRUE) + PlotEquiMap2(psl.test3, lon, lat, filled.continents=FALSE, continents.col="black", brks=my.brks.var2, brks2=my.brks.var2, cols=my.cols.var2, intxlon=10, intylat=10, drawleg=F, contours=t(psl.test3), contours.lty="F1FF1F", cex.lab=1, xlabel.dist=.5) + par(fig=c(0.34, 0.92, 0.08, 0.28), new=TRUE) + PlotEquiMap2(psl.test4, lon, lat, filled.continents=FALSE, continents.col="black", brks=my.brks.var2, brks2=my.brks.var2, cols=my.cols.var2, intxlon=10, intylat=10, drawleg=F, contours=t(psl.test4), contours.lty="F1FF1F", cex.lab=1, xlabel.dist=.5) + + par(fig=c(0.34,0.92,0.955,0.975), new=TRUE) + mtext(paste0(orden[1], " sea level pressure anomaly "), font=2, cex=2) + par(fig=c(0.34,0.92,0.725,0.745), new=TRUE) + mtext(paste0(orden[2], " sea level pressure anomaly "), font=2, cex=2) + par(fig=c(0.34,0.92,0.495,0.515), new=TRUE) + mtext(paste0(orden[3], " sea level pressure anomaly "), font=2, cex=2) + par(fig=c(0.34,0.92,0.265,0.285), new=TRUE) + mtext(paste0(orden[4], " sea level pressure anomaly "), font=2, cex=2) + + par(fig=c(0.34, 0.93, 0.015, 0.06), new=TRUE) + #ColorBar2(my.brks.var2, cols=my.cols.var2, vert=FALSE, my.ticks=-0.5 + 1:length(my.brks.var2), my.labels=my.brks.var2) #, subsampleg=1, label_scale=1.8) #, subset=my.subset2) + ColorBar(my.brks.var2[2:(length(my.brks.var2)-1)], cols=my.cols.var2[2:(length(my.cols.var2)-1)], vert=FALSE, label_scale=1.8, bar_limits=c(my.brks.var2[2],my.brks.var2[l(my.brks.var2)-1]), col_inf=my.cols.var2[1], col_sup=my.cols.var2[l(my.cols.var2)], subsample=1) + + par(fig=c(0.924, 0.930, 0.01, 0.044), new=TRUE) + mtext("hPa", cex=1.6) + + #par(fig=c(0.627, 0.647, 0, 0.028), new=TRUE) + #mtext("0", cex=1.8) + + n.days <- floor(n.days.in.a.period(month.test,1)) + + par(fig=c(0.93, 0.99, 0.77, 0.87), new=TRUE) + mtext(paste0(fre.days1," days\n(",round(100*fre.days1/n.days,1),"%)"), cex=2.8) + par(fig=c(0.93, 0.99, 0.54, 0.64), new=TRUE) + mtext(paste0(fre.days2," days\n(",round(100*fre.days2/n.days,1),"%)"), cex=2.8) + par(fig=c(0.93, 0.99, 0.31, 0.41), new=TRUE) + mtext(paste0(fre.days3," days\n(",round(100*fre.days3/n.days,1),"%)"), cex=2.8) + par(fig=c(0.93, 0.99, 0.08, 0.18), new=TRUE) + mtext(paste0(fre.days4," days\n(",round(100*fre.days4/n.days,1),"%)"), cex=2.8) + + + dev.off() + + + ## add the strip with the regime sequence over the average impact composition: + fileoutput.temp <- paste0(rean.dir,"/",rean.name,"_",my.period[month.test],"_temp.png") + fileoutput.both <- paste0(rean.dir,"/",rean.name,"_",my.period[month.test],"_temp2.png") + + system(paste0("convert ",fileoutput.seq," -crop +0-1730 +repage ",fileoutput.temp)) # cut the lower part of the strip + system(paste0("montage ",fileoutput.temp," ",fileoutput.test," -tile 1x2 -geometry +0+0 ",fileoutput.both)) + + + ## same image formatted for the catalogue: + fileoutput2.test <- paste0(rean.dir,"/",rean.name,"_",my.period[month.test],"_monthly_anomaly_regimes_fig2cat.png") + + system(paste0("~/scripts/fig2catalog.sh -s 0.8 -m 70 -r 40 -t '",rean.name," / 10m wind speed and sea level pressure / Monthly regime anomalies \n",month.name[month.test]," / ",year.test,"' -c 'Region: North Atlantic (27°N-81°N, 85.5°W-45°E)\nReference dataset: ",rean.name," reanalysis' ", fileoutput.both," ", fileoutput2.test)) + + system(paste0("rm ", fileoutput.temp, " ", fileoutput.both," ", fileoutput.seq," ", fileoutput.test)) + + + } # close for on year.test + } # close for on month.test + + +} # close for on monthly_anomalies + + + + + + + + + + diff --git a/old/weather_regimes_maps_v27.R b/old/weather_regimes_maps_v27.R new file mode 100644 index 0000000000000000000000000000000000000000..41698dd739a4993801448ba150b44c0368b6f993 --- /dev/null +++ b/old/weather_regimes_maps_v27.R @@ -0,0 +1,5813 @@ + +# Creation: 6/2016 +# Author: Nicola Cortesi +# +# Aim: to visualize the regime anomalies of the weather regimes, their interannual frequencies and their impact on a chosen cliamte variable (i.e: wind speed or temperature) +# +# I/O: input file needed are: +# 1) the output of the weather_regimes.R script, which ends with the suffix "_psl.RData". +# 2) if you need the impact map too, the outputs of the weather_regimes_impact.R script, which end wuth the suffix "_sfcWind.RData" and "_tas.RData" +# +# Output files are the maps, witch the user can generate as .png or as .pdf +# Due to the script's length, it is better to run it from the terminal with: Rscript ~/scripts/weather_regimes_maps.R +# Note that this script doesn't need to be parallelized because it takes only a few seconds to create and save the output maps. +# +# Assumptions: You need to have created before the '_psl.RData' files which are the output of 'weather_regimes'.R, for each period you want to visualize. +# If your regimes derive from a reanalysis, this script must be run twice: +# first, only one month/season at time with: 'period=X', (X=1 .. 12), 'ordering = TRUE', 'save.names = TRUE', 'as.pdf = FALSE' and 'composition = "none" +# And inserting the regime names 'cluster.name1=...' in the correct order; in this first run, you only save the ordered cartography. +# You already have to know which is the right regime order by taking a look at the output maps (_clusterX.png) of weather_regimes.R +# After that, any time you run this script, remember to set: +# 'ordering = TRUE , 'save.names = FALSE' (and any type of composition you want to plot) not to overwrite the files which stores the right cluster order. +# You can check if the monthly regimes jave been associated correctly setting composition <- "psl.rean" +# +# For example, you can find that the sequence of the images in the file (from top to down) corresponds to: +# Blocking / NAO- / Atl.Ridge / NAO+ regime. Subsequently, you have to change 'ordering' +# from F to T (see below) and set the four variables 'clusterX.name' (X = 1...4) to follow the same order found inside that file. +# Then, you have to run this script only to get the maps in the right order, which by default is, from top to down: +# "NAO+","NAO-","Blocking" and "Atl.Ridge" (you can change it with the variable 'orden'). You also have to set 'save.names' to TRUE, so an .RData file +# is written to store the order of the four regimes, in case you need to visualize these maps again in future or create new ones based on the saved data. +# +# If your regimes derive from a forecast system, you have to compute before the regimes derived from a reanalysis. Then, you can associate the reanalysis +# to the forecast system, to employ it to automatically associate to each simulated cluster one of the +# observed regimes, the one with the highest spatial correlation. Beware that the two maps of regime anomalies must have the same spatial resolution! +# +# Branch: weather_regimes + +rm(list=ls()) +library(s2dverification) # for the function Load() +library(Kendall) # for the MannKendall test +#library("corrplot") + +source('/home/Earth/ncortesi/scripts/Rfunctions.R') # for the calendar functions + +### set the directory where the weather regimes computed with a reanalysis are stored (xxxxx_psl.RData files): + +#rean.dir <- "/scratch/Earth/ncortesi/RESILIENCE/Regimes/18) as 12) but with no lat correction" +#rean.dir <- "/scratch/Earth/ncortesi/RESILIENCE/Regimes/18_as_12_but_with_no_lat_correction" +#rean.dir <- "/scratch/Earth/ncortesi/RESILIENCE/Regimes/41_ERA-Interim_monthly_1981-2015_LOESS_filter" +#rean.dir <- "/scratch/Earth/ncortesi/RESILIENCE/Regimes/43_as_41_but_with_running_cluster_and_lat_correction" +#rean.dir <- "/scratch/Earth/ncortesi/RESILIENCE/Regimes/45_as_43_but_with_Z500" +#rean.dir <- "/scratch/Earth/ncortesi/RESILIENCE/Regimes/44_as_43_but_with_no_lat_correction" +#rean.dir <- "/scratch/Earth/ncortesi/RESILIENCE/Regimes/50_NCEP_monthly_1981-2016_LOESS_filter_lat_corr" +#rean.dir <- "/scratch/Earth/ncortesi/RESILIENCE/Regimes/52_JRA55_monthly_1981-2016_LOESS_filter_lat_corr" +#rean.dir <- "/scratch/Earth/ncortesi/RESILIENCE/Regimes/53_JRA55_seasonal_1981-2016_LOESS_filter_lat_corr" +#rean.dir <- "/scratch/Earth/ncortesi/RESILIENCE/Regimes/54_JRA55_monthly_1981-2016_LOESS_filter_lat_corr_running" +#rean.dir <- "/scratch/Earth/ncortesi/RESILIENCE/Regimes/55_JRA55_monthly_1981-2016_LOESS_filter_lat_corr_running_15days" +rean.dir <- "/scratch/Earth/ncortesi/RESILIENCE/Regimes/56_JRA55_monthly_1981-2016_LOESS_filter_lat_corr_ordered4variance" +#rean.dir <- "/scratch/Earth/ncortesi/RESILIENCE/Regimes/57_JRA55_monthly_1981-2016_LOESS_filter_lat_corr_running_ordered4variance" +#rean.dir <- "/scratch/Earth/ncortesi/RESILIENCE/Regimes/58_JRA55_monthly_1981-2016_LOESS_filter_lat_corr_running_15days_ordered4variance" + +rean.name <- "JRA-55" #"JRA-55" #"NCEP" #"ERA-Interim" # reanalysis name (if input data comes from a reanalysis) + +forecast.name <- "ECMWF-S4" # forecast system name (if input data comes from a forecast system) + +fields.name <- rean.name #forecast.name # Choose between loading reanalysis ('rean.name') or forecasts ('forecast.name') + +composition <- "edpr" # choose which kind of composition you want to plot: + # 'none' doesn't plot anything, it only associates the clusters to the regimes with the manual association in the rows below + # and saves them in the ClusterName.Rdata files for each period selected, overwriting the eventual pre-existing files. + # 'simple' for monthly graphs of regime anomalies / impact / interannual frequencies in three columns + # 'psl' for all the regime anomalies for a fixed forecast month + # 'fre' for all the interannual frequencies for a fixed forecast month + # 'impact' for all the impact maps for a fixed forecast month and the variable specified in var.num + # 'summary' for the summary plots of all forecast months (persistence bias, freq.bias, correlations, etc.) [You need all ClusterNames files] + # 'impact.highest' for all the impact plot of the regime with the highest impact + # 'single.impact' to save the four impact maps in a composition 2x2 + # 'single.psl' to save the individual psl map + # 'single.fre' to save the individual fre map + # 'taylor' plot the taylor's diagram between the regime anomalies of a monthly reanalaysis and a seasonal one + # 'edpr' as 'simple', but swapping the position of the regime anomalie maps with that of the impact maps + # 'psl.rean': plot all the regime anomalies and correlation matrix of all months of a reanalysis with DJF regime anomalies of the same reanalysis + # 'psl.rean.unordered': as before, but without ordering the regimes with the same order in vector 'orden' + # 'variance' : as 'none', but instead of associating clusters to regimes, it reordinates the clusters in decreasing order of explained variance + +var.num <- 1 # if fields.name ='rean.name' and composition ='simple' or 'edpr', Choose a variable for the impact maps 1: sfcWind 2: tas + +# Choose one or more periods to plot from 1 to 17 (1-12: Jan - Dec, 13-16: Winter, Spring, Summer, Autumn, 17: Year). +period <- 1:12 + +# Manually associates the four clusters to the four regimes, one period at time (only in case composition="none"): +cluster4.name <- "NAO+" +cluster1.name <- "NAO-" +cluster2.name <- "Blocking" +cluster3.name <- "Atl.Ridge" + +# choose an order to give to the cartograpy, from top to bottom: +orden <- c("NAO+","NAO-","Blocking","Atl.Ridge") + +no.regimes <- TRUE # if TRUE, instead of putting the regime names in the figure titles, insert "Cluster1", "Cluster2", "Cluster3" and "Cluster4" + # (when composition='edpr' or monthly_anomalies = TRUE) + +####### if monthly_anomalies <- TRUE, you have to specify these additional parameters: +monthly_anomalies <- FALSE # if TRUE, also save maps with with monthly wind speed and slp anomalies for the chosen years and months set below over Europe +year.chosen <- 2016 +month.chosen <- 1:12 +NCEP <- '/esnas/recon/noaa/ncep-reanalysis/$STORE_FREQ$_mean/$VAR_NAME$_f6h/$VAR_NAME$_$YEAR$$MONTH$.nc' +JRA55 <- '/esnas/recon/jma/jra55/$STORE_FREQ$_mean/$VAR_NAME$_f6h/$VAR_NAME$_$YEAR$$MONTH$.nc' +ERAint <- '/esarchive/old-files/recon_ecmwf_erainterim/$STORE_FREQ$_mean/$VAR_NAME$_f6h/$VAR_NAME$_$YEAR$$MONTH$.nc' +rean.data <- JRA55 # choose one of the above reanalysis + +####### if fields.name='forecast.name', you have to specify these additional variables: + +# working dir with the input files with '_psl.RData' suffix: +work.dir <- "/scratch/Earth/ncortesi/RESILIENCE/Regimes/42_ECMWF_S4_monthly_1981-2015_LOESS_filter" + +lead.months <- 0 #0:6 # in case you selected 'fields.name = forecast.name', specify a leadtime (0 = same month of the start month) to plot + # (and variable 'period' becomes the start month) +forecast.month <- 1 # in case composition = 'psl' or 'fre' or 'impact', choose a target month to forecast, from 1 to 12, to plot its composition + # if you want to plot all compositions from 1 to 12 at once, just enable the line below and add a { at the end of the script + # DO NOT run the loop below when composition="summary" or "taylor", because it will modify the data in the ClusterName.RData files!!! +#for(forecast.month in 1:12){ + +####### Derived variables ############################################################################################################################################### + +ordering <- TRUE # If TRUE, order the clusters following the regimes listed in the 'orden' vector (set it to TRUE only after you manually inserted the names below). +save.names <- FALSE # if TRUE, also save the cluster names (i.e: the association between clusters and weather regimes); if FALSE, the cluster names are loaded from a file +as.pdf <- FALSE # FALSE: save results as one .png file for each season/month, TRUE: save only one .pdf file with all seasons/months +mean.freq <- TRUE # if TRUE, when composition <- "simple" o 'edpr', it also add the mean frequency value over each frequency plot + +if(fields.name != rean.name && fields.name != forecast.name) stop("variable 'fields.name' is not properly set") +if(no.regimes) { regime.title <- paste0("Cluster",1:4)} else { regime.title <- orden} + +var.name <- c("sfcWind","tas") +var.name.full <- c("wind speed","temperature") # full name of the 'predictand' variable to put in the title of the graphs +var.unit <- c("m/s", "ºC") # unit of measure of var (for drawing color scales) + +var.num.orig <- var.num # loading _psl files, this value can change! +fields.name.orig <- fields.name +period.orig <- period +work.dir.orig <- work.dir +pdf.suffix <- ifelse(length(period)==1, my.period[period], "all_periods") + +n.map <- 0 + +############################################################################################ +## Start analysis ## +############################################################################################ + +if(composition != "summary" && composition != "impact.highest" && composition != "taylor"){ + + if(composition == "psl" || composition == "fre" || composition == "impact") { + if(as.pdf && fields.name == forecast.name) pdf(file=paste0(work.dir,"/",fields.name,"_",pdf.suffix,"_forecast_month_",forecast.month,"_",composition,".pdf"),width=110,height=60) + if(!as.pdf && fields.name == forecast.name) png(filename=paste0(work.dir,"/",fields.name,"_forecast_month_",forecast.month,"_",composition,".png"),width=6000,height=2300) + + lead.months <- c(6:0,0) + plot.new() + + # Sheet title: + par(fig=c(0, 1, 0.94, 0.98), new=TRUE) + if(composition == "psl") mtext(paste0(fields.name, " simulated Weather Regimes for ", month.name[forecast.month]), cex=5.5, font=2) + if(composition == "fre") mtext(paste0(fields.name, " simulated interannual frequencies for ", month.name[forecast.month]), cex=5.5, font=2) + if(composition == "impact") mtext(paste0(fields.name, " simulated ",var.name.full[var.num], " impact for ", month.name[forecast.month]), cex=5.5, font=2) + + } # close if on composition == "psl" ... + + if(composition == "none") { + ordering <- TRUE + save.names <- TRUE + as.pdf <- FALSE + } + + if(composition == "variance"){ + ordering <- TRUE + save.names <- TRUE + as.pdf <- FALSE + period <- 1:12 + } + + if(composition == "psl.rean") { + ordering <- TRUE + period <- c(9:12, 1:8) # to start from September instead of January + png(filename=paste0(rean.dir,"/",fields.name,"_reanalysis_all_months.png"),width=6000,height=2000) + plot.new() + } + + if(composition == "psl.rean.unordered") { + ordering <- FALSE + period <- c(9:12, 1:8) # to start from September instead of January + png(filename=paste0(rean.dir,"/",fields.name,"_reanalysis_all_months_unordered.png"),width=6000,height=2000) + plot.new() + } + + ## if(composition == "corr.matrix") { + ## ordering <- FALSE # set it to TRUE if you want to see the correlation matrix of the ordered clusters instead!!! + ## period <- c(9:12, 1:8) # to start from September instead of January + ## png(filename=paste0(rean.dir,"/",fields.name,"_reanalysis_all_months_corr_matrix.png"),width=6000,height=2000) + ## plot.new() + ## } + + if(fields.name == rean.name) { lead.month <- 1; lead.months <- 1 } # just to be able to enter the loop below once! + + for(lead.month in lead.months){ + #lead.month=lead.months[1] # for the debug + + if(composition == "simple" || composition == 'edpr'){ + if(as.pdf && fields.name == rean.name) pdf(file=paste0(rean.dir,"/",fields.name,"_",var.name[var.num],"_",pdf.suffix,".pdf"),width=40, height=60) + if(as.pdf && fields.name == forecast.name) pdf(file=paste0(work.dir,"/",fields.name,"_",var.name[var.num],"_",pdf.suffix,"_leadmonth_",lead.month,".pdf"),width=40,height=60) + } + + if(composition == 'psl' || composition == 'fre' || composition == 'impact') { + period <- forecast.month - lead.month + if(period < 1) period <- period + 12 + } + + + for(p in period){ + #p <- period[1] # for the debug + p.orig <- p + + if(fields.name == rean.name) print(paste("p =",p)) + if(fields.name == forecast.name) print(paste("p =",p,"lead.month =", lead.month)) + + # load regime data (for forecasts, it load only the data corresponding to the chosen start month p and lead month 'lead.month') + impact.data <- FALSE + if(fields.name == rean.name || (fields.name == forecast.name && n.map == 7)) { + load(file=paste0(rean.dir,"/",rean.name,"_",my.period[p],"_","psl",".RData")) + if(var.num != var.num.orig) var.num <- var.num.orig # ripristinate original values of this script (necessary after loading regime data) + if(fields.name != fields.name.orig) fields.name <- fields.name.orig # ripristinate original values of this script + if(work.dir != work.dir.orig) work.dir <- work.dir.orig # ripristinate original values of this script + + # load impact data only if it is available, if not it will leave an empty space in the composition: + if(file.exists(paste0(rean.dir,"/",rean.name,"_",my.period[p],"_",var.name[var.num],".RData"))){ + impact.data <- TRUE + print(paste0("Impact data for variable ",var.name[var.num] ," available for reanalysis ", rean.name)) + load(file=paste0(rean.dir,"/",rean.name,"_",my.period[p],"_",var.name[var.num],".RData")) + } else { + impact.data <- FALSE + print(paste0("Impact data for variable ",var.name[var.num] ," not available for reanalysis ", rean.name)) + } + + if(composition == "variance"){ + my.cluster2 <- my.cluster # create a copy of my.cluster + + ss1 <- which(my.cluster$cluster == 1) + ss2 <- which(my.cluster$cluster == 2) + ss3 <- which(my.cluster$cluster == 3) + ss4 <- which(my.cluster$cluster == 4) + + withinss <- my.cluster$withinss + max1 <- which(my.cluster$withinss == max(withinss, na.rm=TRUE)) # first cluster with maximum variance + withinss[max1] <- NA + + max2 <- which(my.cluster$withinss == max(withinss, na.rm=TRUE)) # second cluster with maximum variance + withinss[max2] <- NA + + max3 <- which(my.cluster$withinss == max(withinss, na.rm=TRUE)) # third cluster with maximum variance + withinss[max3] <- NA + + max4 <- which(!is.na(withinss)) + rm(withinss) + + # vector where the first element tells you which is the clister with the maximum variance the second element shows which is the cluster the + # second maximum variance, and so on: + max.seq <- c(max1, max2, max3, max4) + + assign(paste0("cluster",max1,".name"), orden[1]) # associate the cluster with the highest explained variance to the first regime to plot (usually NAO+) + assign(paste0("cluster",max2,".name"), orden[2]) + assign(paste0("cluster",max3,".name"), orden[3]) + assign(paste0("cluster",max4,".name"), orden[4]) + + } + } + + if(fields.name == forecast.name && n.map < 7){ + load(file=paste0(work.dir,"/",forecast.name,"_",my.period[p],"_leadmonth_",lead.month,"_","psl",".RData")) # load cluster time series and mean psl data + + if(file.exists(paste0(work.dir,"/",forecast.name,"_",my.period[p],"_leadmonth_",lead.month,"_",var.name[var.num],".RData"))){ + impact.data <- TRUE + print("Impact data available for forecasts") + if((composition == "simple" || composition == "impact")) load(file=paste0(work.dir,"/",forecast.name,"_",my.period[p],"_leadmonth_",lead.month,"_",var.name[var.num],".RData")) # load mean var data for impact maps + } else { + impact.data <- FALSE + print("Impact data for forecasts not available") + } + + if(var.num != var.num.orig) var.num <- var.num.orig # ripristinate original values of this script (necessary after loading regime data) + if(fields.name != fields.name.orig) fields.name <- fields.name.orig # ripristinate original values of this script + + } + + if(var.num != var.num.orig) var.num <- var.num.orig # ripristinate original values of this script (necessary after loading regime data) + if(fields.name != fields.name.orig) fields.name <- fields.name.orig # ripristinate original values of this script + if(work.dir != work.dir.orig) work.dir <- work.dir.orig # ripristinate original values of this script + if(p != p.orig) p <- p.orig + + if(fields.name == forecast.name && n.map < 7){ + + # compute the simulated interannual monthly variability: + n.days.month <- dim(my.cluster.array[[p]])[1] # number of days in the forecasted month + + freq.sim1 <- apply(my.cluster.array[[p]] == 1, c(2,3), sum) + freq.sim2 <- apply(my.cluster.array[[p]] == 2, c(2,3), sum) + freq.sim3 <- apply(my.cluster.array[[p]] == 3, c(2,3), sum) + freq.sim4 <- apply(my.cluster.array[[p]] == 4, c(2,3), sum) + + sd.freq.sim1 <- sd(freq.sim1) / n.days.month + sd.freq.sim2 <- sd(freq.sim2) / n.days.month + sd.freq.sim3 <- sd(freq.sim3) / n.days.month + sd.freq.sim4 <- sd(freq.sim4) / n.days.month + + # compute the transition probabilities: + j1 <- j2 <- j3 <- j4 <- 1 + transition1 <- transition2 <- transition3 <- transition4 <- c() + + n.members <- dim(my.cluster.array[[p]])[3] + + for(m in 1:n.members){ + for(y in 1:n.years){ + for (d in 1:(n.days.month-1)){ + + if (my.cluster.array[[p]][d,y,m] == 1 && (my.cluster.array[[p]][d + 1,y,m] != 1)){ + transition1[j1] <- my.cluster.array[[p]][d + 1,y,m] + j1 <- j1 + 1 + } + if (my.cluster.array[[p]][d,y,m] == 2 && (my.cluster.array[[p]][d + 1,y,m] != 2)){ + transition2[j2] <- my.cluster.array[[p]][d + 1,y,m] + j2 <- j2 + 1 + } + if (my.cluster.array[[p]][d,y,m] == 3 && (my.cluster.array[[p]][d + 1,y,m] != 3)){ + transition3[j3] <- my.cluster.array[[p]][d + 1,y,m] + j3 <- j3 +1 + } + if (my.cluster.array[[p]][d,y,m] == 4 && (my.cluster.array[[p]][d + 1,y,m] != 4)){ + transition4[j4] <- my.cluster.array[[p]][d + 1,y,m] + j4 <- j4 +1 + } + + } + } + } + + trans.sim <- matrix(NA,4,4 , dimnames= list(c("cluster1.sim", "cluster2.sim", "cluster3.sim", "cluster4.sim"),c("cluster1.sim","cluster2.sim","cluster3.sim","cluster4.sim"))) + trans.sim[1,2] <- length(which(transition1 == 2)) + trans.sim[1,3] <- length(which(transition1 == 3)) + trans.sim[1,4] <- length(which(transition1 == 4)) + + trans.sim[2,1] <- length(which(transition2 == 1)) + trans.sim[2,3] <- length(which(transition2 == 3)) + trans.sim[2,4] <- length(which(transition2 == 4)) + + trans.sim[3,1] <- length(which(transition3 == 1)) + trans.sim[3,2] <- length(which(transition3 == 2)) + trans.sim[3,4] <- length(which(transition3 == 4)) + + trans.sim[4,1] <- length(which(transition4 == 1)) + trans.sim[4,2] <- length(which(transition4 == 2)) + trans.sim[4,3] <- length(which(transition4 == 3)) + + trans.tot <- apply(trans.sim,1,sum,na.rm=T) + for(i in 1:4) trans.sim[i,] <- trans.sim[i,]/trans.tot[i] + + + # compute cluster persistence: + my.cluster.vector <- as.vector(my.cluster.array[[p]]) # reduce it to a vector with the order: day1month1member1 , day2month1member1, ... , day1month2member1, day2month2member1, ,,, + + sequ1 <- sequ2 <- sequ3 <- sequ4 <- rep(0,10000) + i1 <- i2 <- i3 <- i4 <- 1 + + if(my.cluster.vector[1] != 1) i1 <- 0 + if(my.cluster.vector[1] == 1) sequ1[i1] <- sequ1[i1]+1 + if(my.cluster.vector[1] != 2) i2 <- 0 + if(my.cluster.vector[1] == 2) sequ2[i2] <- sequ2[i2]+1 + if(my.cluster.vector[1] != 3) i3 <- 0 + if(my.cluster.vector[1] == 3) sequ3[i3] <- sequ3[i3]+1 + if(my.cluster.vector[1] != 4) i4 <- 0 + if(my.cluster.vector[1] == 4) sequ4[i4] <- sequ4[i4]+1 + n.dd <- length(my.cluster.vector) + + for(d in 1:(n.dd-1)){ + if(my.cluster.vector[d] != 1 && my.cluster.vector[d+1] == 1) i1 <- i1+1 + if(my.cluster.vector[d] == 1) sequ1[i1] <- sequ1[i1]+1 + + if(my.cluster.vector[d] != 2 && my.cluster.vector[d+1] == 2) i2 <- i2+1 + if(my.cluster.vector[d] == 2) sequ2[i2] <- sequ2[i2]+1 + + if(my.cluster.vector[d] != 3 && my.cluster.vector[d+1] == 3) i3 <- i3+1 + if(my.cluster.vector[d] == 3) sequ3[i3] <- sequ3[i3]+1 + + if(my.cluster.vector[d] != 4 && my.cluster.vector[d+1] == 4) i4 <- i4+1 + if(my.cluster.vector[d] == 4) sequ4[i4] <- sequ4[i4]+1 + } + + if(my.cluster.vector[n.dd] == 1) sequ1[i1] <- sequ1[i1]+1 + if(my.cluster.vector[n.dd] == 2) sequ2[i2] <- sequ2[i2]+1 + if(my.cluster.vector[n.dd] == 3) sequ3[i3] <- sequ3[i3]+1 + if(my.cluster.vector[n.dd] == 4) sequ4[i4] <- sequ4[i4]+1 + + pers1 <- mean(sequ1[which(sequ1 != 0)]) + pers2 <- mean(sequ2[which(sequ2 != 0)]) + pers3 <- mean(sequ3[which(sequ3 != 0)]) + pers4 <- mean(sequ4[which(sequ4 != 0)]) + + # compute correlations between simulated clusters and observed regime's anomalies: + cluster1.sim <- pslwr1mean; cluster2.sim <- pslwr2mean; cluster3.sim <- pslwr3mean; cluster4.sim <- pslwr4mean + + if(composition == 'impact' && impact.data == TRUE) { + cluster1.sim.imp <- varwr1mean; cluster2.sim.imp <- varwr2mean; cluster3.sim.imp <- varwr3mean; cluster4.sim.imp <- varwr4mean + #cluster1.sim.imp.pv <- pvalue1; cluster2.sim.imp.pv <- pvalue2; cluster3.sim.imp.pv <- pvalue3; cluster4.sim.imp.pv <- pvalue4 + } + + lat.forecast <- lat; lon.forecast <- lon + + if((p + lead.month) > 12) { load.month <- p + lead.month - 12 } else { load.month <- p + lead.month } # reanalysis forecast month to load + load(file=paste0(rean.dir,"/",rean.name,"_",my.period[load.month],"_","psl",".RData")) # load reanalysis data, which overwrities the pslwrXmean variables + load(paste0(rean.dir,"/",rean.name,"_", my.period[load.month],"_","ClusterNames",".RData")) # load also reanalysis regime names + + # load reanalysis varwrXmean impact data: + if(composition == 'impact' && impact.data == TRUE) load(file=paste0(rean.dir,"/",rean.name,"_",my.period[load.month],"_",var.name[var.num],".RData")) + + if(var.num != var.num.orig) var.num <- var.num.orig # ripristinate original values of this script + if(fields.name != fields.name.orig) fields.name <- fields.name.orig # ripristinate original values of this script + if(!identical(period.orig,period)) period <- period.orig + if(work.dir != work.dir.orig) work.dir <- work.dir.orig # ripristinate original values of this script + if(p.orig != p) p <- p.orig + + # compute the observed transition probabilities: + n.days.month <- n.days.in.a.period(load.month,2001) + j1o <- j2o <- j3o <- j4o <- 1; transition.obs1 <- transition.obs2 <- transition.obs3 <- transition.obs4 <- c() + + for (d in 1:(length(my.cluster$cluster)-1)){ + if (my.cluster$cluster[d] == 1 && (my.cluster$cluster[d + 1] != my.cluster$cluster[d])){ + transition.obs1[j1o] <- my.cluster$cluster[d + 1] + j1o <- j1o + 1 + } + if (my.cluster$cluster[d] == 2 && (my.cluster$cluster[d + 1] != my.cluster$cluster[d])){ + transition.obs2[j2o] <- my.cluster$cluster[d + 1] + j2o <- j2o + 1 + } + if (my.cluster$cluster[d] == 3 && (my.cluster$cluster[d + 1] != my.cluster$cluster[d])){ + transition.obs3[j3o] <- my.cluster$cluster[d + 1] + j3o <- j3o + 1 + } + if (my.cluster$cluster[d] == 4 && (my.cluster$cluster[d + 1] != my.cluster$cluster[d])){ + transition.obs4[j4o] <- my.cluster$cluster[d + 1] + j4o <- j4o +1 + } + + } + + trans.obs <- matrix(NA,4,4 , dimnames= list(c("cluster1.obs", "cluster2.obs", "cluster3.obs", "cluster4.obs"),c("cluster1.obs","cluster2.obs","cluster3.obs","cluster4.obs"))) + trans.obs[1,2] <- length(which(transition.obs1 == 2)) + trans.obs[1,3] <- length(which(transition.obs1 == 3)) + trans.obs[1,4] <- length(which(transition.obs1 == 4)) + + trans.obs[2,1] <- length(which(transition.obs2 == 1)) + trans.obs[2,3] <- length(which(transition.obs2 == 3)) + trans.obs[2,4] <- length(which(transition.obs2 == 4)) + + trans.obs[3,1] <- length(which(transition.obs3 == 1)) + trans.obs[3,2] <- length(which(transition.obs3 == 2)) + trans.obs[3,4] <- length(which(transition.obs3 == 4)) + + trans.obs[4,1] <- length(which(transition.obs4 == 1)) + trans.obs[4,2] <- length(which(transition.obs4 == 2)) + trans.obs[4,3] <- length(which(transition.obs4 == 3)) + + trans.tot.obs <- apply(trans.obs,1,sum,na.rm=T) + for(i in 1:4) trans.obs[i,] <- trans.obs[i,]/trans.tot.obs[i] + + # compute the observed interannual monthly variability: + freq.obs1 <- freq.obs2 <- freq.obs3 <- freq.obs4 <- c() + + for(y in year.start:year.end){ + # for both reanalysis and S4, the time order is always: year1day1, year1day2, ..., year1day31, year2day1, year2day2, ..., year2day28, etc, + freq.obs1[y-year.start+1] <- length(which(my.cluster$cluster[(y-year.start)*n.days.month + (1:n.days.month)] == 1)) + freq.obs2[y-year.start+1] <- length(which(my.cluster$cluster[(y-year.start)*n.days.month + (1:n.days.month)] == 2)) + freq.obs3[y-year.start+1] <- length(which(my.cluster$cluster[(y-year.start)*n.days.month + (1:n.days.month)] == 3)) + freq.obs4[y-year.start+1] <- length(which(my.cluster$cluster[(y-year.start)*n.days.month + (1:n.days.month)] == 4)) + } + + sd.freq.obs1 <- sd(freq.obs1) / n.days.month + sd.freq.obs2 <- sd(freq.obs2) / n.days.month + sd.freq.obs3 <- sd(freq.obs3) / n.days.month + sd.freq.obs4 <- sd(freq.obs4) / n.days.month + + wr1y.obs <- wr1y; wr2y.obs <- wr2y; wr3y.obs <- wr3y; wr4y.obs <- wr4y # observed frecuencies of the regimes + + regimes.obs <- c(cluster1.name, cluster2.name, cluster3.name, cluster4.name) + + if(length(unique(regimes.obs)) < 4) stop("Two or more names of the weather types in the reanalsyis input file are repeated!") + + if(identical(rev(lat),lat.forecast)) {lat.forecast <- rev(lat.forecast); print("Warning: forecast latitudes are in the opposite order than renalysis's")} + if(!identical(lat, lat.forecast)) stop("forecast latitudes are different from reanalysis latitudes!") + if(!identical(lon, lon.forecast)) stop("forecast longitude are different from reanalysis longitudes!") + + cluster.corr <- array(NA, c(4,4), dimnames=list(c("cluster1.sim","cluster2.sim","cluster3.sim","cluster4.sim"), regimes.obs)) + cluster.corr[1,1] <- cor(as.vector(cluster1.sim), as.vector(pslwr1mean)) + cluster.corr[1,2] <- cor(as.vector(cluster1.sim), as.vector(pslwr2mean)) + cluster.corr[1,3] <- cor(as.vector(cluster1.sim), as.vector(pslwr3mean)) + cluster.corr[1,4] <- cor(as.vector(cluster1.sim), as.vector(pslwr4mean)) + cluster.corr[2,1] <- cor(as.vector(cluster2.sim), as.vector(pslwr1mean)) + cluster.corr[2,2] <- cor(as.vector(cluster2.sim), as.vector(pslwr2mean)) + cluster.corr[2,3] <- cor(as.vector(cluster2.sim), as.vector(pslwr3mean)) + cluster.corr[2,4] <- cor(as.vector(cluster2.sim), as.vector(pslwr4mean)) + cluster.corr[3,1] <- cor(as.vector(cluster3.sim), as.vector(pslwr1mean)) + cluster.corr[3,2] <- cor(as.vector(cluster3.sim), as.vector(pslwr2mean)) + cluster.corr[3,3] <- cor(as.vector(cluster3.sim), as.vector(pslwr3mean)) + cluster.corr[3,4] <- cor(as.vector(cluster3.sim), as.vector(pslwr4mean)) + cluster.corr[4,1] <- cor(as.vector(cluster4.sim), as.vector(pslwr1mean)) + cluster.corr[4,2] <- cor(as.vector(cluster4.sim), as.vector(pslwr2mean)) + cluster.corr[4,3] <- cor(as.vector(cluster4.sim), as.vector(pslwr3mean)) + cluster.corr[4,4] <- cor(as.vector(cluster4.sim), as.vector(pslwr4mean)) + + # associate each simulated cluster to one observed regime: + max1 <- which(cluster.corr == max(cluster.corr), arr.ind=T) + cluster.corr2 <- cluster.corr + cluster.corr2[max1[1],] <- -999 # set this row to negative values so it won't be found as a maximum anymore + cluster.corr2[,max1[2]] <- -999 # set this column to negative values so it won't be found as a maximum anymore + max2 <- which(cluster.corr2 == max(cluster.corr2), arr.ind=T) + cluster.corr3 <- cluster.corr2 + cluster.corr3[max2[1],] <- -999 + cluster.corr3[,max2[2]] <- -999 + max3 <- which(cluster.corr3 == max(cluster.corr3), arr.ind=T) + cluster.corr4 <- cluster.corr3 + cluster.corr4[max3[1],] <- -999 + cluster.corr4[,max3[2]] <- -999 + max4 <- which(cluster.corr4 == max(cluster.corr4), arr.ind=T) + + seq.max1 <- c(max1[1],max2[1],max3[1],max4[1]) + seq.max2 <- c(max1[2],max2[2],max3[2],max4[2]) + + cluster1.regime <- seq.max2[which(seq.max1 == 1)] + cluster2.regime <- seq.max2[which(seq.max1 == 2)] + cluster3.regime <- seq.max2[which(seq.max1 == 3)] + cluster4.regime <- seq.max2[which(seq.max1 == 4)] + + cluster1.name <- regimes.obs[cluster1.regime] + cluster2.name <- regimes.obs[cluster2.regime] + cluster3.name <- regimes.obs[cluster3.regime] + cluster4.name <- regimes.obs[cluster4.regime] + + regimes.sim <- c(cluster1.name, cluster2.name, cluster3.name, cluster4.name) + cluster.corr2 <- array(cluster.corr, c(4,4), dimnames=list(regimes.sim, regimes.obs)) + + spat.cor1 <- cluster.corr[1,seq.max2[which(seq.max1 == 1)]] + spat.cor2 <- cluster.corr[2,seq.max2[which(seq.max1 == 2)]] + spat.cor3 <- cluster.corr[3,seq.max2[which(seq.max1 == 3)]] + spat.cor4 <- cluster.corr[4,seq.max2[which(seq.max1 == 4)]] + + # measure persistence for the reanalysis dataset: + my.cluster.vector <- my.cluster[[1]] + + sequ1 <- sequ2 <- sequ3 <- sequ4 <- rep(0,10000) + i1 <- i2 <- i3 <- i4 <- 1 + + if(my.cluster.vector[1] != 1) i1 <- 0 + if(my.cluster.vector[1] == 1) sequ1[i1] <- sequ1[i1]+1 + if(my.cluster.vector[1] != 2) i2 <- 0 + if(my.cluster.vector[1] == 2) sequ2[i2] <- sequ2[i2]+1 + if(my.cluster.vector[1] != 3) i3 <- 0 + if(my.cluster.vector[1] == 3) sequ3[i3] <- sequ3[i3]+1 + if(my.cluster.vector[1] != 4) i4 <- 0 + if(my.cluster.vector[1] == 4) sequ4[i4] <- sequ4[i4]+1 + + n.dd <- length(my.cluster.vector) + for(d in 1:(n.dd-1)){ + if(my.cluster.vector[d] != 1 && my.cluster.vector[d+1] == 1) i1 <- i1+1 + if(my.cluster.vector[d] == 1) sequ1[i1] <- sequ1[i1]+1 + + if(my.cluster.vector[d] != 2 && my.cluster.vector[d+1] == 2) i2 <- i2+1 + if(my.cluster.vector[d] == 2) sequ2[i2] <- sequ2[i2]+1 + + if(my.cluster.vector[d] != 3 && my.cluster.vector[d+1] == 3) i3 <- i3+1 + if(my.cluster.vector[d] == 3) sequ3[i3] <- sequ3[i3]+1 + + if(my.cluster.vector[d] != 4 && my.cluster.vector[d+1] == 4) i4 <- i4+1 + if(my.cluster.vector[d] == 4) sequ4[i4] <- sequ4[i4]+1 + } + + if(my.cluster.vector[n.dd] == 1) sequ1[i1] <- sequ1[i1]+1 + if(my.cluster.vector[n.dd] == 2) sequ2[i2] <- sequ2[i2]+1 + if(my.cluster.vector[n.dd] == 3) sequ3[i3] <- sequ3[i3]+1 + if(my.cluster.vector[n.dd] == 4) sequ4[i4] <- sequ4[i4]+1 + + persObs1 <- mean(sequ1[which(sequ1 != 0)]) + persObs2 <- mean(sequ2[which(sequ2 != 0)]) + persObs3 <- mean(sequ3[which(sequ3 != 0)]) + persObs4 <- mean(sequ4[which(sequ4 != 0)]) + + ### insert here all the manual corrections to the regime assignation due to misasignment in the automatic selection: + ### forecast month: September + #if(p == 9 && lead.month == 0) { cluster1.name <- "Blocking"; cluster2.name <- "Atl.Ridge"; cluster3.name <- "NAO-"; cluster4.name <- "NAO+"} + #if(p == 8 && lead.month == 1) { cluster1.name <- "NAO+"; cluster2.name <- "NAO-"; cluster3.name <- "Blocking"; cluster4.name <- "Atl.Ridge"} + #if(p == 6 && lead.month == 3) { cluster1.name <- "Atl.Ridge"; cluster2.name <- "NAO+"} + #if(p == 4 && lead.month == 5) { cluster1.name <- "Blocking"; cluster2.name <- "NAO+"; cluster3.name <- "Atl.Ridge"; cluster4.name <- "NAO-"} + + if(p == 9 && lead.month == 0) { cluster1.name <- "Blocking"; cluster2.name <- "Atl.Ridge"; cluster3.name <- "NAO-"; cluster4.name <- "NAO+" } + if(p == 8 && lead.month == 1) { cluster1.name <- "NAO+"; cluster2.name <- "NAO-"; cluster3.name <- "Blocking"; cluster4.name <- "Atl.Ridge" } + if(p == 7 && lead.month == 2) { cluster1.name <- "Atl.Ridge"; cluster2.name <- "Blocking"; cluster3.name <- "NAO-"; cluster4.name <- "NAO+" } + if(p == 6 && lead.month == 3) { cluster1.name <- "Atl.Ridge"; cluster2.name <- "NAO-"; cluster3.name <- "NAO+"; cluster4.name <- "Blocking" } + if(p == 5 && lead.month == 4) { cluster1.name <- "Atl.Ridge"; cluster2.name <- "NAO+"; cluster3.name <- "NAO-"; cluster4.name <- "Blocking" } + if(p == 4 && lead.month == 5) { cluster1.name <- "Blocking"; cluster2.name <- "NAO+"; cluster3.name <- "Atl.Ridge"; cluster4.name <- "NAO-" } + if(p == 3 && lead.month == 6) { cluster2.name <- "NAO-"; cluster3.name <- "Atl.Ridge"} # cluster1.name <- "Blocking"; cluster4.name <- "NAO+" + + # regimes.sim # for the debug + + ### forecast month: October + #if(p == 4 && lead.month == 6) { cluster1.name <- "Atl.Ridge"; cluster2.name <- "Blocking"; cluster3.name <- "NAO+"; cluster4.name <- "NAO-"} + #if(p == 5 && lead.month == 5) { cluster1.name <- "NAO+"; cluster2.name <- "Blocking"; cluster3.name <- "NAO-"; cluster4.name <- "Atl.Ridge"} + #if(p == 7 && lead.month == 3) { cluster1.name <- "NAO-"; cluster2.name <- "Atl.Ridge"; cluster3.name <- "Blocking"; cluster4.name <- "NAO+"} + #if(p == 8 && lead.month == 2) { cluster1.name <- "Blocking"; cluster2.name <- "NAO-"; cluster3.name <- "Atl.Ridge"; cluster4.name <- "NAO+"} + #if(p == 9 && lead.month == 1) { cluster1.name <- "NAO-"; cluster2.name <- "Blocking"; cluster3.name <- "Atl.Ridge"; cluster4.name <- "NAO+"} + + if(p == 10 && lead.month == 0) { cluster1.name <- "Blocking"; cluster2.name <- "NAO+"; cluster3.name <- "Atl.Ridge"; cluster4.name <- "NAO-"} + if(p == 9 && lead.month == 1) { cluster1.name <- "NAO-"; cluster2.name <- "Blocking"; cluster3.name <- "Atl.Ridge"; cluster4.name <- "NAO+"} + if(p == 8 && lead.month == 2) { cluster1.name <- "Blocking"; cluster2.name <- "NAO-"; cluster3.name <- "Atl.Ridge"; cluster4.name <- "NAO+"} + if(p == 7 && lead.month == 3) { cluster1.name <- "NAO-"; cluster2.name <- "Atl.Ridge"; cluster3.name <- "Blocking"; cluster4.name <- "NAO+"} + if(p == 6 && lead.month == 4) { cluster1.name <- "NAO+"; cluster2.name <- "Blocking"; cluster3.name <- "NAO-"; cluster4.name <- "Atl.Ridge"} + + ### forecast month: November + #if(p == 6 && lead.month == 5) { cluster2.name <- "Atl.Ridge"; cluster3.name <- "NAO+"; cluster4.name <- "Blocking"} + #if(p == 7 && lead.month == 4) { cluster1.name <- "NAO+"; cluster2.name <- "Blocking"; cluster4.name <- "Atl.Ridge"} + #if(p == 8 && lead.month == 3) { cluster2.name <- "Blocking"; cluster4.name <- "Atl.Ridge"} + #if(p == 9 && lead.month == 2) { cluster2.name <- "Atl.Ridge"; cluster3.name <- "NAO+"; cluster4.name <- "Blocking"} + #if(p == 10 && lead.month == 1) { cluster2.name <- "Blocking"; cluster4.name <- "Atl.Ridge"} + + regimes.sim2 <- c(cluster1.name, cluster2.name, cluster3.name, cluster4.name) # Simulated regimes after the manual correction + #regimes.obs <- c(cluster1.name, cluster2.name, cluster3.name, cluster4.name) # already defined above, included only as reference + + order.sim <- match(regimes.sim2, orden) + order.obs <- match(regimes.obs, orden) + + clusters.sim <- list(cluster1.sim, cluster2.sim, cluster3.sim, cluster4.sim) + clusters.obs <- list(pslwr1mean, pslwr2mean, pslwr3mean, pslwr4mean) + + # recompute the spatial correlations, because they might have changed because of the manual corrections above: + spat.cor1 <- cor(as.vector(clusters.sim[[which(order.sim == 1)]]), as.vector(clusters.obs[[which(order.obs == 1)]])) + spat.cor2 <- cor(as.vector(clusters.sim[[which(order.sim == 2)]]), as.vector(clusters.obs[[which(order.obs == 2)]])) + spat.cor3 <- cor(as.vector(clusters.sim[[which(order.sim == 3)]]), as.vector(clusters.obs[[which(order.obs == 3)]])) + spat.cor4 <- cor(as.vector(clusters.sim[[which(order.sim == 4)]]), as.vector(clusters.obs[[which(order.obs == 4)]])) + + if(composition == 'impact' && impact.data == TRUE) { + clusters.sim.imp <- list(cluster1.sim.imp, cluster2.sim.imp, cluster3.sim.imp, cluster4.sim.imp) + #clusters.sim.imp.pv <- list(cluster1.sim.imp.pv, cluster2.sim.imp.pv, cluster3.sim.imp.pv, cluster4.sim.imp.pv) + + clusters.obs.imp <- list(varwr1mean, varwr2mean, varwr3mean, varwr4mean) + #clusters.obs.imp.pv <- list(pvalue1, pvalue2, pvalue3, pvalue4) + + cor.imp1 <- cor(as.vector(clusters.sim.imp[[which(order.sim == 1)]]), as.vector(clusters.obs.imp[[which(order.obs == 1)]])) + cor.imp2 <- cor(as.vector(clusters.sim.imp[[which(order.sim == 2)]]), as.vector(clusters.obs.imp[[which(order.obs == 2)]])) + cor.imp3 <- cor(as.vector(clusters.sim.imp[[which(order.sim == 3)]]), as.vector(clusters.obs.imp[[which(order.obs == 3)]])) + cor.imp4 <- cor(as.vector(clusters.sim.imp[[which(order.sim == 4)]]), as.vector(clusters.obs.imp[[which(order.obs == 4)]])) + + } + + load(file=paste0(work.dir,"/",fields.name,"_",my.period[p],"_leadmonth_",lead.month,"_","psl",".RData")) # reload forecast cluster time series and mean psl data + load(file=paste0(work.dir,"/",fields.name,"_",my.period[p],"_leadmonth_",lead.month,"_",var.name[var.num],".RData")) # reload mean var data for impact maps + + if(var.num != var.num.orig) var.num <- var.num.orig # ripristinate original values of this script + if(fields.name != fields.name.orig) fields.name <- fields.name.orig # ripristinate original values of this script + if(!identical(period.orig, period)) period <- period.orig + if(p.orig != p) p <- p.orig + if(identical(rev(lat),lat.forecast)) lat <- rev(lat) # ripristinate right lat order + if(work.dir != work.dir.orig) work.dir <- work.dir.orig # ripristinate original values of this script + + } # close for on 'forecast.name' + + if(fields.name == rean.name || (fields.name == forecast.name && n.map == 7)){ + if(save.names){ + save(cluster1.name, cluster2.name, cluster3.name, cluster4.name, file=paste0(rean.dir,"/",fields.name,"_", my.period[p],"_","ClusterNames",".RData")) + + } else { # in this case, we load the cluster names from the file already saved, if regimes come from a reanalysis: + ClusterName.file <- paste0(rean.dir,"/",rean.name,"_", my.period[p],"_","ClusterNames",".RData") + if(!file.exists(ClusterName.file)) stop(paste0("file: ",ClusterName.file," missing")) # check if file exists or not + load(ClusterName.file) # load cluster names + + if(var.num != var.num.orig) var.num <- var.num.orig # ripristinate original values of this script + if(fields.name != fields.name.orig) fields.name <- fields.name.orig # ripristinate original values of this script + } + } + + # assign to each cluster its regime: + #if(ordering == FALSE && (fields.name == rean.name || (fields.name == forecast.name && n.map == 7))){ + if(ordering == FALSE){ + cluster1 <- 1; cluster2 <- 2; cluster3 <- 3; cluster4 <- 4 # same as: cluster1.name=orden[1], cluster2.name=orden[2], cluster3.name=orden[3], etc. + } else { + cluster1 <- which(orden == cluster1.name) + cluster2 <- which(orden == cluster2.name) + cluster3 <- which(orden == cluster3.name) + cluster4 <- which(orden == cluster4.name) + } + + assign(paste0("map",cluster1), pslwr1mean) + assign(paste0("map",cluster2), pslwr2mean) + assign(paste0("map",cluster3), pslwr3mean) + assign(paste0("map",cluster4), pslwr4mean) + + assign(paste0("fre",cluster1), wr1y) + assign(paste0("fre",cluster2), wr2y) + assign(paste0("fre",cluster3), wr3y) + assign(paste0("fre",cluster4), wr4y) + + #assign(paste0("withinss",cluster1), my.cluster[[p]]$withinss[1]/my.cluster[[p]]$size[1]) + #assign(paste0("withinss",cluster2), my.cluster[[p]]$withinss[2]/my.cluster[[p]]$size[2]) + #assign(paste0("withinss",cluster3), my.cluster[[p]]$withinss[3]/my.cluster[[p]]$size[3]) + #assign(paste0("withinss",cluster4), my.cluster[[p]]$withinss[4]/my.cluster[[p]]$size[4]) + + if(impact.data == TRUE && (composition == "simple" || composition == "single.impact" || composition == 'impact' || composition == 'edpr')){ + assign(paste0("imp",cluster1), varwr1mean) + assign(paste0("imp",cluster2), varwr2mean) + assign(paste0("imp",cluster3), varwr3mean) + assign(paste0("imp",cluster4), varwr4mean) + + assign(paste0("sig",cluster1), pvalue1) + assign(paste0("sig",cluster2), pvalue2) + assign(paste0("sig",cluster3), pvalue3) + assign(paste0("sig",cluster4), pvalue4) + } + + if(fields.name == forecast.name && n.map < 7){ + assign(paste0("freMax",cluster1), wr1yMax) + assign(paste0("freMax",cluster2), wr2yMax) + assign(paste0("freMax",cluster3), wr3yMax) + assign(paste0("freMax",cluster4), wr4yMax) + + assign(paste0("freMin",cluster1), wr1yMin) + assign(paste0("freMin",cluster2), wr2yMin) + assign(paste0("freMin",cluster3), wr3yMin) + assign(paste0("freMin",cluster4), wr4yMin) + + #assign(paste0("spatial.cor",cluster1), spat.cor1) + #assign(paste0("spatial.cor",cluster2), spat.cor2) + #assign(paste0("spatial.cor",cluster3), spat.cor3) + #assign(paste0("spatial.cor",cluster4), spat.cor4) + + assign(paste0("persist",cluster1), pers1) + assign(paste0("persist",cluster2), pers2) + assign(paste0("persist",cluster3), pers3) + assign(paste0("persist",cluster4), pers4) + + clusters.all <- c(cluster1, cluster2, cluster3, cluster4) + ordered.sequence <- c(which(clusters.all == 1), which(clusters.all == 2), which(clusters.all == 3), which(clusters.all == 4)) + + assign(paste0("transSim",cluster1), unname(trans.sim[1,ordered.sequence])) + assign(paste0("transSim",cluster2), unname(trans.sim[2,ordered.sequence])) + assign(paste0("transSim",cluster3), unname(trans.sim[3,ordered.sequence])) + assign(paste0("transSim",cluster4), unname(trans.sim[4,ordered.sequence])) + + cluster1.obs <- which(orden == regimes.obs[1]) + cluster2.obs <- which(orden == regimes.obs[2]) + cluster3.obs <- which(orden == regimes.obs[3]) + cluster4.obs <- which(orden == regimes.obs[4]) + + clusters.all.obs <- c(cluster1.obs, cluster2.obs, cluster3.obs, cluster4.obs) + ordered.sequence.obs <- c(which(clusters.all.obs == 1), which(clusters.all.obs == 2), which(clusters.all.obs == 3), which(clusters.all.obs == 4)) + + assign(paste0("freObs",cluster1.obs), wr1y.obs) + assign(paste0("freObs",cluster2.obs), wr2y.obs) + assign(paste0("freObs",cluster3.obs), wr3y.obs) + assign(paste0("freObs",cluster4.obs), wr4y.obs) + + assign(paste0("persistObs",cluster1.obs), persObs1) + assign(paste0("persistObs",cluster2.obs), persObs2) + assign(paste0("persistObs",cluster3.obs), persObs3) + assign(paste0("persistObs",cluster4.obs), persObs4) + + assign(paste0("transObs",cluster1.obs), unname(trans.obs[1,ordered.sequence.obs])) + assign(paste0("transObs",cluster2.obs), unname(trans.obs[2,ordered.sequence.obs])) + assign(paste0("transObs",cluster3.obs), unname(trans.obs[3,ordered.sequence.obs])) + assign(paste0("transObs",cluster4.obs), unname(trans.obs[4,ordered.sequence.obs])) + + } + + # position of long values of Europe only (without the Atlantic Sea and America): + #if(fields.name == "ERA-Interim") EU <- c(1:which(lon >= lon.max)[1], (length(lon)-30):length(lon)) # restrict area to continental europe only + EU <- c(1:which(lon >= lon.max)[1], (which(lon >= 337)[1]:length(lon))) # restrict area to continental europe only + + # breaks and colors of the geopotential fields: + if(psl == "g500"){ + #my.brks <- c(-300,-199:200,300) # % Mean anomaly of a WR + my.brks <- c(-300,seq(-200,200,10),300) # % Mean anomaly of a WR + my.brks2 <- c(-300,seq(-200,200,10),300) # % Mean anomaly of a WR for the contour lines + #my.cols <- colorRampPalette((brewer.pal(9,"PuBu")))(length(my.brks)-1) + values.to.plot <- c(-200, -100, 0, 100, 200) # values we want to show in the labels + } + + if(psl == "psl"){ + my.brks <- c(seq(-21,-1,2),0,seq(1,21,2)) # % Mean anomaly of a WR + # my.brks <- c(seq(-19,-1,2),0,seq(1,19,2)) # % Mean anomaly of a WR + + my.brks2 <- my.brks #c(seq(-20,20,2)) # % Mean anomaly of a WR for the contour lines + values.to.plot <- my.brks #c(-17,-15,-13,-11,-9,-7,-5,-3,-1,0,1,3,5,7,9,11,13,15,17) + } + + my.cols <- colorRampPalette(rev(brewer.pal(11,"RdBu")))(length(my.brks)-1) # blue--red colors + my.cols[floor(length(my.cols)/2)] <- my.cols[floor(length(my.cols)/2)+1] <- "white" # add white in the middle + + # breaks and colors of the impact maps (valid both for Wind speed anomalies and Temperature anomalies): + #my.brks.var <- c(-20,seq(-10,-3,1),seq(-2.9,2.9,0.1),seq(3,10,1),20) # % Mean anomaly of a WR + #my.brks.var <- c(-20,-2,-1.5,-1,-0.5,-0.2,0.2,0.5,1,1.5,2,20) # % Mean anomaly of a WR + #my.brks.var <- c(-20,seq(-3,3,0.5),20) # % Mean anomaly of a WR + if(var.name[var.num] == "sfcWind") { + my.brks.var <- seq(-3,3,0.5) + values.to.plot2 <- c(-3,-2,-1,0,1,2,3) + } + if(var.name[var.num] == "tas") { + my.brks.var <- seq(-5,5,1) + values.to.plot2 <- my.brks.var #c(-5,-3,-1,0,1,3,5) + } + + #my.cols <- colorRampPalette(as.character(read.csv("/home/Earth/vtorralb/rgbhex.csv",header=F)[,1]))(length(my.brks)-1) # blue--yellow-red colors + my.cols.var <- colorRampPalette(rev(brewer.pal(11,"RdBu")))(length(my.brks.var)-1) # blue--white--red colors + #my.cols.var[floor(length(my.cols.var)/2)] <- my.cols.var[floor(length(my.cols.var)/2)+1] <- "white" + + freq.max=100 + if(fields.name == forecast.name){ + pos.years.present <- match(year.start:year.end, years) + years.missing <- which(is.na(pos.years.present)) + fre1.NA <- fre2.NA <- fre3.NA <- fre4.NA <- freMax1.NA <- freMax2.NA <- freMax3.NA <- freMax4.NA <- freMin1.NA <- freMin2.NA <- freMin3.NA <- freMin4.NA <- c() + k <- 0 + for(y in year.start:year.end) { + if(y %in% (years.missing + year.start - 1)) { + fre1.NA <- c(fre1.NA,NA); fre2.NA <- c(fre2.NA,NA); fre3.NA <- c(fre3.NA,NA); fre4.NA <- c(fre4.NA,NA) + freMax1.NA <- c(freMax1.NA,NA); freMax2.NA <- c(freMax2.NA,NA); freMax3.NA <- c(freMax3.NA,NA); freMax4.NA <- c(freMax4.NA,NA) + freMin1.NA <- c(freMin1.NA,NA); freMin2.NA <- c(freMin2.NA,NA); freMin3.NA <- c(freMin3.NA,NA); freMin4.NA <- c(freMin4.NA,NA) + k=k+1 + } else { + fre1.NA <- c(fre1.NA,fre1[y - year.start + 1 - k]); fre2.NA <- c(fre2.NA,fre2[y - year.start + 1 - k]) + fre3.NA <- c(fre3.NA,fre3[y - year.start + 1 - k]); fre4.NA <- c(fre4.NA,fre4[y - year.start + 1 - k]) + freMax1.NA <- c(freMax1.NA,freMax1[y - year.start + 1 - k]); freMax2.NA <- c(freMax2.NA,freMax2[y - year.start + 1 - k]) + freMax3.NA <- c(freMax3.NA,freMax3[y - year.start + 1 - k]); freMax4.NA <- c(freMax4.NA,freMax4[y - year.start + 1 - k]) + freMin1.NA <- c(freMin1.NA,freMin1[y - year.start + 1 - k]); freMin2.NA <- c(freMin2.NA,freMin2[y - year.start + 1 - k]) + freMin3.NA <- c(freMin3.NA,freMin3[y - year.start + 1 - k]); freMin4.NA <- c(freMin4.NA,freMin4[y - year.start + 1 - k]) + } + } + + sp.cor1 <- round(cor(fre1.NA,freObs1, use="complete.obs"),2) + sp.cor2 <- round(cor(fre2.NA,freObs2, use="complete.obs"),2) + sp.cor3 <- round(cor(fre3.NA,freObs3, use="complete.obs"),2) + sp.cor4 <- round(cor(fre4.NA,freObs4, use="complete.obs"),2) + + } else { + fre1.NA <- fre1; fre2.NA <- fre2; fre3.NA <- fre3; fre4.NA <- fre4 + } + + + # Visualize the composition with the 3 graphs for all the 4 regimes: its pressure fields, its impact on var and its frequency series: + if(composition == "simple"){ + + if(!as.pdf && fields.name == rean.name) png(filename=paste0(rean.dir,"/",fields.name,"_",my.period[p],"_",var.name[var.num],".png"),width=3000,height=3700) + if(!as.pdf && fields.name == forecast.name) png(filename=paste0(work.dir,"/",fields.name,"_",my.period[p],"_leadmonth_",lead.month,"_",var.name[var.num],".png"),width=3000,height=3700) + + plot.new() + + # Sheet title: + par(fig=c(0, 1, 0.94, 0.98), new=TRUE) + if(fields.name == forecast.name) {forecast.title <- paste0(" startdate and ",lead.month," forecast month ")} else {forecast.title <- ""} + mtext(paste0(fields.name, " Weather Regimes for ", my.period[p], forecast.title," (",year.start,"-",year.end,")"), cex=5.5, font=2) + + # Centroid maps: + map.xpos <- 0 + map.width <- 0.46 + par(fig=c(map.xpos + 0.003, map.xpos + map.width - 0.003, 0.745, 0.925), new=TRUE) + PlotEquiMap2(rescale(map1,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map1), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, xlabel.dist=1.5, contours.lty="F1FF1F", continents.col="black") + par(fig=c(map.xpos, map.xpos + map.width, 0.51, 0.69), new=TRUE) + PlotEquiMap2(rescale(map2,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map2), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F", continents.col="black") + par(fig=c(map.xpos, map.xpos + map.width, 0.275, 0.455), new=TRUE) + PlotEquiMap2(rescale(map3,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map3), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F", continents.col="black") + par(fig=c(map.xpos, map.xpos + map.width, 0.04, 0.22), new=TRUE) + PlotEquiMap2(rescale(map4,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map4), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F", continents.col="black") + + ## # for the debug: + ## obs <- read.table("/scratch/Earth/ncortesi/RESILIENCE/Regimes/NAO_series.txt") + ## mes <- p + ## fre.obs <- obs$V3[seq(mes,12*35,12)] # get the ovserved freuency of the NAO + ## #plot(1981:2015,fre.obs,type="l") + ## #lines(1981:2015,fre1,type="l",col="red") + ## #lines(1981:2015,fre2,type="l",col="blue") + ## #lines(1981:2015,fre4,type="l",col="green") + ## cor1 <- cor(fre.obs, fre1) + ## cor2 <- cor(fre.obs, fre2) + ## cor3 <- cor(fre.obs, fre3) + ## cor4 <- cor(fre.obs, fre4) + ## round(c(cor1,cor2,cor3,cor4),2) + + # Legend centroid maps: + legend1.xpos <- 0.10 + legend1.width <- 0.30 + legend1.cex <- 2 + my.subset <- match(values.to.plot, my.brks) + par(fig=c(legend1.xpos, legend1.xpos + legend1.width, 0.719, 0.744), new=TRUE) # syntax fig=c(xmin, xmax, ymin, ymax) + ColorBar3(my.brks, cols=my.cols, vert=FALSE, cex=legend1.cex, subset=my.subset) + if(psl=="g500") {mtext(side=4," m", cex=2.5)} else {mtext(side=4," hPa", cex=legend1.cex)} + par(fig=c(legend1.xpos, legend1.xpos + legend1.width, 0.485, 0.508), new=TRUE) + ColorBar3(my.brks, cols=my.cols, vert=FALSE, cex=legend1.cex, subset=my.subset) + if(psl=="g500") {mtext(side=4," m", cex=2.5)} else {mtext(side=4," hPa", cex=legend1.cex)} + par(fig=c(legend1.xpos, legend1.xpos + legend1.width, 0.250, 0.273), new=TRUE) + ColorBar3(my.brks, cols=my.cols, vert=FALSE, cex=legend1.cex, subset=my.subset) + if(psl=="g500") {mtext(side=4," m", cex=2.5)} else {mtext(side=4," hPa", cex=legend1.cex)} + par(fig=c(legend1.xpos, legend1.xpos + legend1.width, 0.015, 0.038), new=TRUE) + ColorBar3(my.brks, cols=my.cols, vert=FALSE, cex=legend1.cex, subset=my.subset) + if(psl=="g500") {mtext(side=4," m", cex=2.5)} else {mtext(side=4," hPa", cex=legend1.cex)} + + # Subtitle centroid maps: + map.title.xpos <- 0.76 + map.title.width <- 0.2 + par(fig=c(map.title.xpos, map.title.xpos + map.title.width, 0.728, 0.729), new=TRUE) + mtext("year", cex=3) + par(fig=c(map.title.xpos, map.title.xpos + map.title.width, 0.494, 0.495), new=TRUE) + mtext("year", cex=3) + par(fig=c(map.title.xpos, map.title.xpos + map.title.width, 0.260, 0.261), new=TRUE) + mtext("year", cex=3) + par(fig=c(map.title.xpos, map.title.xpos + map.title.width, 0.026, 0.027), new=TRUE) + mtext("year", cex=3) + + # Title Centroid Maps: + title1.xpos <- 0.02 + title1.width <- 0.4 + par(fig=c(title1.xpos, title1.xpos + title1.width, 0.9305, 0.9355), new=TRUE) + mtext(paste0(orden[1],": ", psl.name, " Anomaly"), font=2, cex=4) + par(fig=c(title1.xpos, title1.xpos + title1.width, 0.6955, 0.7005), new=TRUE) + mtext(paste0(orden[2],": ", psl.name, " Anomaly"), font=2, cex=4) + par(fig=c(title1.xpos, title1.xpos + title1.width, 0.4605, 0.4655), new=TRUE) + mtext(paste0(orden[3],": ", psl.name, " Anomaly"), font=2, cex=4) + par(fig=c(title1.xpos, title1.xpos + title1.width, 0.2255, 0.2305), new=TRUE) + mtext(paste0(orden[4],": ", psl.name, " Anomaly"), font=2, cex=4) + + # Impact maps: + if(impact.data == TRUE ){ + impact.xpos <- 0.47 + impact.width <- 0.26 + par(fig=c(impact.xpos, impact.xpos + impact.width, 0.745, 0.925), new=TRUE) + PlotEquiMap2(rescale(imp1[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=t(sig1[,EU] < 0.05), cex.lab=1.5, continents.col="black") + par(fig=c(impact.xpos, impact.xpos + impact.width, 0.51, 0.69), new=TRUE) + PlotEquiMap2(rescale(imp2[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=t(sig2[,EU] < 0.05), cex.lab=1.5, continents.col="black") + par(fig=c(impact.xpos, impact.xpos + impact.width, 0.275, 0.455), new=TRUE) + PlotEquiMap2(rescale(imp3[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=t(sig3[,EU] < 0.05), cex.lab=1.5, continents.col="black") + par(fig=c(impact.xpos, impact.xpos + impact.width, 0.04, 0.22), new=TRUE) + PlotEquiMap2(rescale(imp4[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=t(sig4[,EU] < 0.05), cex.lab=1.5, continents.col="black") + + + # Title impact maps: + title2.xpos <- 0.42 + title2.width <- 0.35 + par(fig=c(title2.xpos, title2.xpos + title2.width, 0.928, 0.933), new=TRUE) + mtext(paste0(orden[1], ": Impact on ", var.name.full[var.num]), font=2, cex=4) + par(fig=c(title2.xpos, title2.xpos + title2.width, 0.693, 0.698), new=TRUE) + mtext(paste0(orden[2], ": Impact on ", var.name.full[var.num]), font=2, cex=4) + par(fig=c(title2.xpos, title2.xpos + title2.width, 0.458, 0.463), new=TRUE) + mtext(paste0(orden[3], ": Impact on ", var.name.full[var.num]), font=2, cex=4) + par(fig=c(title2.xpos, title2.xpos + title2.width, 0.223, 0.227), new=TRUE) + mtext(paste0(orden[4], ": Impact on ", var.name.full[var.num]), font=2, cex=4) + + # Legend: + legend2.xpos <- 0.48 + legend2.width <- 0.25 + legend2.cex <- 1.8 + + my.subset2 <- match(values.to.plot2, my.brks.var) + par(fig=c(legend2.xpos, legend2.xpos + legend2.width, 0.719 + 0.001, 0.744), new=TRUE) + ColorBar3(my.brks.var, cols=my.cols.var, vert=FALSE, cex=legend2.cex, subset=my.subset2) + mtext(side=4,paste0(" ",var.unit[var.num]), cex=legend2.cex) + par(fig=c(legend2.xpos, legend2.xpos + legend2.width, 0.485, 0.508), new=TRUE) + ColorBar3(my.brks.var, cols=my.cols.var, vert=FALSE, cex=legend2.cex, subset=my.subset2) + mtext(side=4,paste0(" ",var.unit[var.num]), cex=legend2.cex) + par(fig=c(legend2.xpos, legend2.xpos + legend2.width, 0.250, 0.273), new=TRUE) + ColorBar3(my.brks.var, cols=my.cols.var, vert=FALSE, cex=legend2.cex, subset=my.subset2) + mtext(side=4,paste0(" ",var.unit[var.num]), cex=legend2.cex) + par(fig=c(legend2.xpos, legend2.xpos + legend2.width, 0.015, 0.038), new=TRUE) + ColorBar3(my.brks.var, cols=my.cols.var, vert=FALSE, cex=legend2.cex, subset=my.subset2) + mtext(side=4,paste0(" ",var.unit[var.num]), cex=legend2.cex) + + } + + # Frequency plots: + barplot.xpos <- 0.75 + barplot.width <- 0.25 + + par(fig=c(barplot.xpos, barplot.xpos + barplot.width, 0.745, 0.915), new=TRUE) + if(fields.name == rean.name) barplot.freq(100*fre1.NA, year.start, year.end, cex.y=2, cex.x=2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0)) + if(fields.name == forecast.name) barplot.freq.sim2(100*fre1.NA, 100*freMax1.NA, 100*freMin1.NA, 100*freObs1, year.start, year.end, cex.y=2, cex.x=2, freq.min=-2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0), col.line="gray20", cex.r=3, cex.mean=2, cex.obs=2) + + par(fig=c(barplot.xpos, barplot.xpos + barplot.width,0.510,0.680), new=TRUE) + if(fields.name == rean.name) barplot.freq(100*fre2.NA, year.start, year.end, cex.y=2, cex.x=2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0)) + if(fields.name == forecast.name) barplot.freq.sim2(100*fre2.NA, 100*freMax2.NA, 100*freMin2.NA, 100*freObs2, year.start, year.end, cex.y=2, cex.x=2, freq.min=-2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0), col.line="gray20", cex.r=3, cex.mean=2, cex.obs=2) + + par(fig=c(barplot.xpos, barplot.xpos + barplot.width,0.275,0.445), new=TRUE) + if(fields.name == rean.name) barplot.freq(100*fre3.NA, year.start, year.end, cex.y=2, cex.x=2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0)) + if(fields.name == forecast.name) barplot.freq.sim2(100*fre3.NA, 100*freMax3.NA, 100*freMin3.NA, 100*freObs3, year.start, year.end, cex.y=2, cex.x=2, freq.min=-2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0), col.line="gray20", cex.r=3, cex.mean=2, cex.obs=2) + + par(fig=c(barplot.xpos, barplot.xpos + barplot.width,0.040,0.210), new=TRUE) + if(fields.name == rean.name) barplot.freq(100*fre4.NA, year.start, year.end, cex.y=2, cex.x=2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0)) + if(fields.name == forecast.name) barplot.freq.sim2(100*fre4.NA, 100*freMax4.NA, 100*freMin4.NA, 100*freObs4, year.start, year.end, cex.y=2, cex.x=2, freq.min=-2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0), col.line="gray20", cex.r=3, cex.mean=2, cex.obs=2) + + # Title Frequency maps: + title3.xpos <- 0.75 + title3.width <-0.25 + par(fig=c(title3.xpos, title3.xpos + title3.width, 0.928, 0.933), new=TRUE) + mtext(paste0(orden[1], " Frequency"), font=2, cex=4) # paste0 doesn't work inside an expression! + par(fig=c(title3.xpos, title3.xpos + title3.width, 0.693, 0.698), new=TRUE) + mtext(paste0(orden[2], " Frequency"), font=2, cex=4) + par(fig=c(title3.xpos, title3.xpos + title3.width, 0.458, 0.463), new=TRUE) + mtext(paste0(orden[3], " Frequency"), font=2, cex=4) + par(fig=c(title3.xpos, title3.xpos + title3.width, 0.223, 0.228), new=TRUE) + mtext(paste0(orden[4], " Frequency"), font=2, cex=4) + + # % on Frequency plots: + symbol.xpos <- 0.735 + symbol.width <- 0.01 + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.92, 0.93), new=TRUE) + mtext("%", cex=3.3) + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.59, 0.70), new=TRUE) + mtext("%", cex=3.3) + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.45, 0.46), new=TRUE) + mtext("%", cex=3.3) + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.22, 0.23), new=TRUE) + mtext("%", cex=3.3) + + # mean frequency on Frequency plots: + if(mean.freq == TRUE){ + symbol.xpos <- 0.9 + symbol.width <- 0.1 + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.908, 0.918), new=TRUE) + mtext(bquote(bar(nu) == .(paste0(round(mean(100*fre1.NA),1),"%"))),cex=4.5) + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.673, 0.683), new=TRUE) + mtext(bquote(bar(nu) == .(paste0(round(mean(100*fre2.NA),1),"%"))),cex=4.5) + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.44, 0.45), new=TRUE) + mtext(bquote(bar(nu) == .(paste0(round(mean(100*fre3.NA),1),"%"))),cex=4.5) + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.203, 0.213), new=TRUE) + mtext(bquote(bar(nu) == .(paste0(round(mean(100*fre4.NA),1),"%"))),cex=4.5) + } + + # add corr between obs.time series and ensemble mean time series on Frequency plots: + if(fields.name == forecast.name){ + symbol.xpos <- 0.76 + symbol.width <- 0.1 + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.908, 0.918), new=TRUE) + sp.cor1 <- round(cor(fre1.NA,freObs1, use="complete.obs"),2) + mtext(paste0("r= ",sp.cor1), cex=4.5) + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.673, 0.683), new=TRUE) + sp.cor2 <- round(cor(fre2.NA,freObs2, use="complete.obs"),2) + mtext(paste0("r= ",sp.cor2), cex=4.5) + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.44, 0.45), new=TRUE) + sp.cor3 <- round(cor(fre3.NA,freObs3, use="complete.obs"),2) + mtext(paste0("r= ",sp.cor3), cex=4.5) + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.203, 0.213), new=TRUE) + sp.cor4 <- round(cor(fre4.NA,freObs4, use="complete.obs"),2) + mtext(paste0("r= ",sp.cor4), cex=4.5) + } + + if(!as.pdf) dev.off() # for saving 4 png + + } # close if on: composition == 'simple' + + + if(composition == "edpr"){ + + ## adjust color legends to include triangles to the extremities increasing by two the number of intervals: + my.brks.var <- c(-20,seq(-3,3,0.5),20) + my.cols.var <- colorRampPalette(rev(brewer.pal(11,"RdBu")))(length(my.brks.var)-1) # blue--red colors + + ## same but for SLP: + my.brks <- c(-100,seq(-21,-1,2),0,seq(1,21,2),100) + my.cols <- colorRampPalette(rev(brewer.pal(11,"RdBu")))(length(my.brks)-1) # blue--red colors + my.cols[floor(length(my.cols)/2)] <- my.cols[floor(length(my.cols)/2)+1] <- "white" ## add white in the middle + + fileoutput <- paste0(rean.dir,"/",fields.name,"_",my.period[p],"_",var.name[var.num],"_edpr.png") + + png(filename=fileoutput,width=3000,height=3700) + + plot.new() + + ## Centroid maps: + map.xpos <- 0.27 + map.width <- 0.46 + par(fig=c(map.xpos + 0.003, map.xpos + map.width - 0.003, 0.745, 0.925), new=TRUE) + PlotEquiMap2(rescale(map1,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols[2:(length(my.cols)-1)], sizetit=1.2, contours=t(map1), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, xlabel.dist=1.5, contours.lty="F1FF1F", continents.col="black") + par(fig=c(map.xpos, map.xpos + map.width, 0.51, 0.69), new=TRUE) + PlotEquiMap2(rescale(map2,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols[2:(length(my.cols)-1)], sizetit=1.2, contours=t(map2), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F", continents.col="black") + par(fig=c(map.xpos, map.xpos + map.width, 0.275, 0.455), new=TRUE) + PlotEquiMap2(rescale(map3,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols[2:(length(my.cols)-1)], sizetit=1.2, contours=t(map3), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F", continents.col="black") + par(fig=c(map.xpos, map.xpos + map.width, 0.04, 0.22), new=TRUE) + PlotEquiMap2(rescale(map4,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols[2:(length(my.cols)-1)], sizetit=1.2, contours=t(map4), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F", continents.col="black") + + + # Legend centroid maps: + legend1.xpos <- 0.30 + legend1.width <- 0.40 + legend1.cex <- 2 + my.subset <- match(values.to.plot, my.brks) + par(fig=c(legend1.xpos, legend1.xpos + legend1.width, 0.719, 0.744), new=TRUE) # syntax fig=c(xmin, xmax, ymin, ymax) + ColorBar3(my.brks, cols=my.cols[2:(length(my.cols)-1)], vert=FALSE, cex=legend1.cex, subset=my.subset) + if(psl=="g500") {mtext(side=4," m", cex=2.5)} else {mtext(side=4," hPa", cex=legend1.cex)} + par(fig=c(legend1.xpos, legend1.xpos + legend1.width, 0.485, 0.508), new=TRUE) + ColorBar3(my.brks, cols=my.cols[2:(length(my.cols)-1)], vert=FALSE, cex=legend1.cex, subset=my.subset) + if(psl=="g500") {mtext(side=4," m", cex=2.5)} else {mtext(side=4," hPa", cex=legend1.cex)} + par(fig=c(legend1.xpos, legend1.xpos + legend1.width, 0.250, 0.273), new=TRUE) + ColorBar3(my.brks, cols=my.cols[2:(length(my.cols)-1)], vert=FALSE, cex=legend1.cex, subset=my.subset) + if(psl=="g500") {mtext(side=4," m", cex=2.5)} else {mtext(side=4," hPa", cex=legend1.cex)} + par(fig=c(legend1.xpos, legend1.xpos + legend1.width, 0.015, 0.038), new=TRUE) + ColorBar3(my.brks, cols=my.cols[2:(length(my.cols)-1)], vert=FALSE, cex=legend1.cex, subset=my.subset) + if(psl=="g500") {mtext(side=4," m", cex=2.5)} else {mtext(side=4," hPa", cex=legend1.cex)} + + # Subtitle centroid maps: + map.title.xpos <- 0.76 + map.title.width <- 0.2 + par(fig=c(map.title.xpos, map.title.xpos + map.title.width, 0.728, 0.729), new=TRUE) + mtext("year", cex=3) + par(fig=c(map.title.xpos, map.title.xpos + map.title.width, 0.494, 0.495), new=TRUE) + mtext("year", cex=3) + par(fig=c(map.title.xpos, map.title.xpos + map.title.width, 0.260, 0.261), new=TRUE) + mtext("year", cex=3) + par(fig=c(map.title.xpos, map.title.xpos + map.title.width, 0.026, 0.027), new=TRUE) + mtext("year", cex=3) + + # Title Centroid Maps: + title1.xpos <- 0.3 + title1.width <- 0.44 + par(fig=c(title1.xpos, title1.xpos + title1.width, 0.9305, 0.9355), new=TRUE) + mtext(paste0(orden[1]," ", psl.name, " anomaly"), font=2, cex=4) + par(fig=c(title1.xpos, title1.xpos + title1.width, 0.6955, 0.7005), new=TRUE) + mtext(paste0(orden[2]," ", psl.name, " anomaly"), font=2, cex=4) + par(fig=c(title1.xpos, title1.xpos + title1.width, 0.4605, 0.4655), new=TRUE) + mtext(paste0(orden[3]," ", psl.name, " anomaly"), font=2, cex=4) + par(fig=c(title1.xpos, title1.xpos + title1.width, 0.2255, 0.2305), new=TRUE) + mtext(paste0(orden[4]," ", psl.name, " anomaly"), font=2, cex=4) + + # Impact maps: + if(impact.data == TRUE ){ + impact.xpos <- 0 + impact.width <- 0.26 + par(fig=c(impact.xpos, impact.xpos + impact.width, 0.745, 0.925), new=TRUE) + PlotEquiMap2(rescale(imp1[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var[2:(length(my.cols.var)-1)], intxlon=10, intylat=10, drawleg=F, dots=t(sig1[,EU] < 0.05), cex.lab=1.5, continents.col="black") + par(fig=c(impact.xpos, impact.xpos + impact.width, 0.51, 0.69), new=TRUE) + PlotEquiMap2(rescale(imp2[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var[2:(length(my.cols.var)-1)], intxlon=10, intylat=10, drawleg=F, dots=t(sig2[,EU] < 0.05), cex.lab=1.5, continents.col="black") + par(fig=c(impact.xpos, impact.xpos + impact.width, 0.275, 0.455), new=TRUE) + PlotEquiMap2(rescale(imp3[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var[2:(length(my.cols.var)-1)], intxlon=10, intylat=10, drawleg=F, dots=t(sig3[,EU] < 0.05), cex.lab=1.5, continents.col="black") + par(fig=c(impact.xpos, impact.xpos + impact.width, 0.04, 0.22), new=TRUE) + PlotEquiMap2(rescale(imp4[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var[2:(length(my.cols.var)-1)], intxlon=10, intylat=10, drawleg=F, dots=t(sig4[,EU] < 0.05), cex.lab=1.5, continents.col="black") + + + # Title impact maps: + title2.xpos <- 0 + title2.width <- 0.26 + par(fig=c(title2.xpos, title2.xpos + title2.width, 0.928, 0.933), new=TRUE) + mtext(paste0(orden[1], " impact on ", var.name.full[var.num]), font=2, cex=4) + par(fig=c(title2.xpos, title2.xpos + title2.width, 0.693, 0.698), new=TRUE) + mtext(paste0(orden[2], " impact on ", var.name.full[var.num]), font=2, cex=4) + par(fig=c(title2.xpos, title2.xpos + title2.width, 0.458, 0.463), new=TRUE) + mtext(paste0(orden[3], " impact on ", var.name.full[var.num]), font=2, cex=4) + par(fig=c(title2.xpos, title2.xpos + title2.width, 0.223, 0.227), new=TRUE) + mtext(paste0(orden[4], " impact on ", var.name.full[var.num]), font=2, cex=4) + + # Legend: + legend2.xpos <- 0.01 + legend2.width <- 0.25 + legend2.cex <- 1.8 + + my.subset2 <- match(values.to.plot2, my.brks.var) + par(fig=c(legend2.xpos, legend2.xpos + legend2.width, 0.719 + 0.001, 0.744), new=TRUE) + ColorBar3(my.brks.var, cols=my.cols.var[2:(length(my.cols.var)-1)], vert=FALSE, cex=legend2.cex, subset=my.subset2) + mtext(side=4,paste0(" ",var.unit[var.num]), cex=legend2.cex) + par(fig=c(legend2.xpos, legend2.xpos + legend2.width, 0.485, 0.508), new=TRUE) + ColorBar3(my.brks.var, cols=my.cols.var[2:(length(my.cols.var)-1)], vert=FALSE, cex=legend2.cex, subset=my.subset2) + mtext(side=4,paste0(" ",var.unit[var.num]), cex=legend2.cex) + par(fig=c(legend2.xpos, legend2.xpos + legend2.width, 0.250, 0.273), new=TRUE) + ColorBar3(my.brks.var, cols=my.cols.var[2:(length(my.cols.var)-1)], vert=FALSE, cex=legend2.cex, subset=my.subset2) + mtext(side=4,paste0(" ",var.unit[var.num]), cex=legend2.cex) + par(fig=c(legend2.xpos, legend2.xpos + legend2.width, 0.015, 0.038), new=TRUE) + ColorBar3(my.brks.var, cols=my.cols.var[2:(length(my.cols.var)-1)], vert=FALSE, cex=legend2.cex, subset=my.subset2) + mtext(side=4,paste0(" ",var.unit[var.num]), cex=legend2.cex) + + } + + # Frequency plots: + barplot.xpos <- 0.75 + barplot.width <- 0.25 + + par(fig=c(barplot.xpos, barplot.xpos + barplot.width, 0.745, 0.915), new=TRUE) + if(fields.name == rean.name) barplot.freq(100*fre1.NA, year.start, year.end, cex.y=2, cex.x=2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0)) + if(fields.name == forecast.name) barplot.freq.sim2(100*fre1.NA, 100*freMax1.NA, 100*freMin1.NA, 100*freObs1, year.start, year.end, cex.y=2, cex.x=2, freq.min=-2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0), col.line="gray20", cex.r=3, cex.mean=2, cex.obs=2) + + par(fig=c(barplot.xpos, barplot.xpos + barplot.width,0.510,0.680), new=TRUE) + if(fields.name == rean.name) barplot.freq(100*fre2.NA, year.start, year.end, cex.y=2, cex.x=2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0)) + if(fields.name == forecast.name) barplot.freq.sim2(100*fre2.NA, 100*freMax2.NA, 100*freMin2.NA, 100*freObs2, year.start, year.end, cex.y=2, cex.x=2, freq.min=-2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0), col.line="gray20", cex.r=3, cex.mean=2, cex.obs=2) + + par(fig=c(barplot.xpos, barplot.xpos + barplot.width,0.275,0.445), new=TRUE) + if(fields.name == rean.name) barplot.freq(100*fre3.NA, year.start, year.end, cex.y=2, cex.x=2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0)) + if(fields.name == forecast.name) barplot.freq.sim2(100*fre3.NA, 100*freMax3.NA, 100*freMin3.NA, 100*freObs3, year.start, year.end, cex.y=2, cex.x=2, freq.min=-2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0), col.line="gray20", cex.r=3, cex.mean=2, cex.obs=2) + + par(fig=c(barplot.xpos, barplot.xpos + barplot.width,0.040,0.210), new=TRUE) + if(fields.name == rean.name) barplot.freq(100*fre4.NA, year.start, year.end, cex.y=2, cex.x=2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0)) + if(fields.name == forecast.name) barplot.freq.sim2(100*fre4.NA, 100*freMax4.NA, 100*freMin4.NA, 100*freObs4, year.start, year.end, cex.y=2, cex.x=2, freq.min=-2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0), col.line="gray20", cex.r=3, cex.mean=2, cex.obs=2) + + # Title Frequency maps: + title3.xpos <- 0.75 + title3.width <-0.25 + par(fig=c(title3.xpos, title3.xpos + title3.width, 0.928, 0.933), new=TRUE) + mtext(paste0(orden[1], " Frequency"), font=2, cex=4) # paste0 doesn't work inside an expression! + par(fig=c(title3.xpos, title3.xpos + title3.width, 0.693, 0.698), new=TRUE) + mtext(paste0(orden[2], " Frequency"), font=2, cex=4) + par(fig=c(title3.xpos, title3.xpos + title3.width, 0.458, 0.463), new=TRUE) + mtext(paste0(orden[3], " Frequency"), font=2, cex=4) + par(fig=c(title3.xpos, title3.xpos + title3.width, 0.223, 0.228), new=TRUE) + mtext(paste0(orden[4], " Frequency"), font=2, cex=4) + + # % on Frequency plots: + symbol.xpos <- 0.735 + symbol.width <- 0.01 + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.91, 0.92), new=TRUE) + mtext("%", cex=3.3) + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.68, 0.69), new=TRUE) + mtext("%", cex=3.3) + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.44, 0.45), new=TRUE) + mtext("%", cex=3.3) + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.21, 0.22), new=TRUE) + mtext("%", cex=3.3) + + # mean frequency on Frequency plots: + if(mean.freq == TRUE){ + symbol.xpos <- 0.83 + symbol.width <- 0.1 + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.908, 0.918), new=TRUE) + mtext(bquote(bar(nu) == .(paste0(round(mean(100*fre1.NA),1),"%"))),cex=3.5) + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.673, 0.683), new=TRUE) + mtext(bquote(bar(nu) == .(paste0(round(mean(100*fre2.NA),1),"%"))),cex=3.5) + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.44, 0.45), new=TRUE) + mtext(bquote(bar(nu) == .(paste0(round(mean(100*fre3.NA),1),"%"))),cex=3.5) + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.203, 0.213), new=TRUE) + mtext(bquote(bar(nu) == .(paste0(round(mean(100*fre4.NA),1),"%"))),cex=3.5) + } + + + if(!as.pdf) dev.off() # for saving 4 png + + # same image formatted for the catalogue: + fileoutput2 <- paste0(rean.dir,"/",fields.name,"_",my.period[p],"_",var.name[var.num],"_edpr_fig2cat.png") + + system(paste0("~/scripts/fig2catalog.sh -s 0.8 -r 150 -t '",rean.name," / 10m wind speed and sea level pressure / Monthly anomalies and frequencies \n",month.name[p]," / ",year.start,"-",year.end,"' -c 'Region: North Atlantic (27°N-81°N, 85.5°W-45°E)\nReference dataset: ",rean.name," reanalysis' ", fileoutput," ", fileoutput2)) + + + + } # close if on: composition == 'edpr' + + + # Visualize the composition with the regime anomalies for a selected forecasted month: + if(composition == "psl"){ + # Centroid maps: + n.map <- n.map + 1 # n.maps starts from 0 + map.width <- 0.115 + map.xpos <- 0.06 + map.width * (n.map - 1) + map.ypos <- 0.08 + map.height <- 0.19 + map.ysep <- 0.015 + + par(fig=c(map.xpos, map.xpos + map.width, map.ypos + 3*map.height + 3*map.ysep, map.ypos + 4*map.height + 3*map.ysep), new=TRUE) + PlotEquiMap2(rescale(map1,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map1), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, xlabel.dist=1.5, contours.lty="F1FF1F") + par(fig=c(map.xpos, map.xpos + map.width, map.ypos + 2*map.height + 2*map.ysep, map.ypos + 3*map.height + 2*map.ysep), new=TRUE) + PlotEquiMap2(rescale(map2,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map2), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F") + par(fig=c(map.xpos, map.xpos + map.width, map.ypos + map.height + map.ysep, map.ypos + 2*map.height + map.ysep), new=TRUE) + PlotEquiMap2(rescale(map3,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map3), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F") + par(fig=c(map.xpos, map.xpos + map.width, map.ypos, map.ypos + map.height), new=TRUE) + PlotEquiMap2(rescale(map4,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map4), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F") + + # Sheet subtitles: + par(fig=c(map.xpos, map.xpos + map.width, 0.86, 0.90), new=TRUE) + if(n.map < 8) { + mtext(paste0(my.month.short[p], " --> ", my.month.short[forecast.month]), cex=5.5, font=2) + } else{ + mtext(paste0(rean.name," ", my.month.short[forecast.month]), cex=5.5, font=2) + } + + } # close if on composition == 'psl' + + + if(composition == "psl.rean" || composition == "psl.rean.unordered"){ + ## matrix correlation with DJF regimes: + #cluster1.monthly <- pslwr1mean; cluster2.monthly <- pslwr2mean; cluster3.monthly <- pslwr3mean; cluster4.monthly <- pslwr4mean + #cluster1.name.monthly <- cluster1.name; cluster2.name.monthly <- cluster2.name; cluster3.name.monthly <- cluster3.name; cluster4.name.monthly <- cluster4.name + assign(paste0("clusterMax", which(orden == cluster1.name), ".monthly"), pslwr1mean) + assign(paste0("clusterMax", which(orden == cluster2.name), ".monthly"), pslwr2mean) + assign(paste0("clusterMax", which(orden == cluster3.name), ".monthly"), pslwr3mean) + assign(paste0("clusterMax", which(orden == cluster4.name), ".monthly"), pslwr4mean) + + ## cluster.name.monthly <- c(cluster1.name.monthly, cluster2.name.monthly, cluster3.name.monthly, cluster4.name.monthly) + ## max1 <- which(cluster.name.monthly == orden[1]) # get which is the monthly cluster with the highest explained variance (by default it is associated to NAO+) + ## max2 <- which(cluster.name.monthly == orden[2]) # get the monthly cluster with the second highest variance + ## max3 <- which(cluster.name.monthly == orden[3]) # ... + ## max4 <- which(cluster.name.monthly == orden[4]) # ... + + ## max.seq <- c(max1, max2, max3, max4) + + ## Load DJF data: + rean.dir.DJF <- "/scratch/Earth/ncortesi/RESILIENCE/Regimes/53_JRA55_seasonal_1981-2016_LOESS_filter_lat_corr" + + load(file=paste0(rean.dir.DJF,"/",rean.name,"_",my.period[13],"_","psl",".RData")) # Load mean slp DJF data from the same reanalysis + load(paste0(rean.dir.DJF,"/",rean.name,"_", my.period[13],"_","ClusterNames",".RData")) # load also reanalysis DJF regime names + + if(var.num != var.num.orig) var.num <- var.num.orig # ripristinate original values of this script (necessary after loading regime data) + if(fields.name != fields.name.orig) fields.name <- fields.name.orig # ripristinate original values of this script + if(work.dir != work.dir.orig) work.dir <- work.dir.orig # ripristinate original values of this script + if(p.orig != p) p <- p.orig + + cluster1 <- which(orden == cluster1.name) # clusters for DJF!!! + cluster2 <- which(orden == cluster2.name) + cluster3 <- which(orden == cluster3.name) + cluster4 <- which(orden == cluster4.name) + + assign(paste0("psl.ordered",cluster1), pslwr1mean) # psl for DJF + assign(paste0("psl.ordered",cluster2), pslwr2mean) + assign(paste0("psl.ordered",cluster3), pslwr3mean) + assign(paste0("psl.ordered",cluster4), pslwr4mean) + + cluster.corr <- array(NA, c(4,4), dimnames=list(c("cl1","cl2","cl3","cl4"), orden)) + cluster.corr[1,1] <- cor(as.vector(clusterMax1.monthly), as.vector(psl.ordered1)) + cluster.corr[1,2] <- cor(as.vector(clusterMax1.monthly), as.vector(psl.ordered2)) + cluster.corr[1,3] <- cor(as.vector(clusterMax1.monthly), as.vector(psl.ordered3)) + cluster.corr[1,4] <- cor(as.vector(clusterMax1.monthly), as.vector(psl.ordered4)) + cluster.corr[2,1] <- cor(as.vector(clusterMax2.monthly), as.vector(psl.ordered1)) + cluster.corr[2,2] <- cor(as.vector(clusterMax2.monthly), as.vector(psl.ordered2)) + cluster.corr[2,3] <- cor(as.vector(clusterMax2.monthly), as.vector(psl.ordered3)) + cluster.corr[2,4] <- cor(as.vector(clusterMax2.monthly), as.vector(psl.ordered4)) + cluster.corr[3,1] <- cor(as.vector(clusterMax3.monthly), as.vector(psl.ordered1)) + cluster.corr[3,2] <- cor(as.vector(clusterMax3.monthly), as.vector(psl.ordered2)) + cluster.corr[3,3] <- cor(as.vector(clusterMax3.monthly), as.vector(psl.ordered3)) + cluster.corr[3,4] <- cor(as.vector(clusterMax3.monthly), as.vector(psl.ordered4)) + cluster.corr[4,1] <- cor(as.vector(clusterMax4.monthly), as.vector(psl.ordered1)) + cluster.corr[4,2] <- cor(as.vector(clusterMax4.monthly), as.vector(psl.ordered2)) + cluster.corr[4,3] <- cor(as.vector(clusterMax4.monthly), as.vector(psl.ordered3)) + cluster.corr[4,4] <- cor(as.vector(clusterMax4.monthly), as.vector(psl.ordered4)) + + # Centroid maps: + n.map <- n.map + 1 # n.maps starts from 0 + map.width <- 0.08 + map.xpos <- map.width * (n.map - 1) + map.ypos <- 0.08 + map.height <- 0.19 + map.ysep <- 0.015 + print(paste0("map.xpos= ", map.xpos)) + + par(fig <- c(0, 1, 0, 1), new=TRUE) + # reset par to its default values, because drawing with PlotEquiMap() alters some par values: + if(n.map == 1) { op <- par(no.readonly = TRUE) } else { par(op) } + + text.cex <- 2 + text.ypos <- 1.03 + text.xmod <- 0.007 * (n.map - 1) + text.xpos <- map.xpos + text.xmod - 0.02 + text.width <- 0.015 + text(x=text.xpos - text.width, y=text.ypos - 0.02, labels="cl1", cex=text.cex) + text(x=text.xpos - text.width, y=text.ypos - 0.04, labels="cl2", cex=text.cex) + text(x=text.xpos - text.width, y=text.ypos - 0.06, labels="cl3", cex=text.cex) + text(x=text.xpos - text.width, y=text.ypos - 0.08, labels="cl4", cex=text.cex) + text(x=text.xpos + 0*text.width, y=text.ypos + 0.00, labels="NAO+", cex=text.cex) + text(x=text.xpos + 1*text.width, y=text.ypos + 0.00, labels="NAO-", cex=text.cex) + text(x=text.xpos + 2*text.width, y=text.ypos + 0.00, labels="BLO", cex=text.cex) + text(x=text.xpos + 3*text.width, y=text.ypos + 0.00, labels="ATL", cex=text.cex) + + text(x=text.xpos + 0*text.width, y=text.ypos - 0.02, labels=round(cluster.corr,2)[1,1], cex=text.cex) + text(x=text.xpos + 0*text.width, y=text.ypos - 0.04, labels=round(cluster.corr,2)[2,1], cex=text.cex) + text(x=text.xpos + 0*text.width, y=text.ypos - 0.06, labels=round(cluster.corr,2)[3,1], cex=text.cex) + text(x=text.xpos + 0*text.width, y=text.ypos - 0.08, labels=round(cluster.corr,2)[4,1], cex=text.cex) + text(x=text.xpos + 1*text.width, y=text.ypos - 0.02, labels=round(cluster.corr,2)[1,2], cex=text.cex) + text(x=text.xpos + 1*text.width, y=text.ypos - 0.04, labels=round(cluster.corr,2)[2,2], cex=text.cex) + text(x=text.xpos + 1*text.width, y=text.ypos - 0.06, labels=round(cluster.corr,2)[3,2], cex=text.cex) + text(x=text.xpos + 1*text.width, y=text.ypos - 0.08, labels=round(cluster.corr,2)[4,2], cex=text.cex) + text(x=text.xpos + 2*text.width, y=text.ypos - 0.02, labels=round(cluster.corr,2)[1,3], cex=text.cex) + text(x=text.xpos + 2*text.width, y=text.ypos - 0.04, labels=round(cluster.corr,2)[2,3], cex=text.cex) + text(x=text.xpos + 2*text.width, y=text.ypos - 0.06, labels=round(cluster.corr,2)[3,3], cex=text.cex) + text(x=text.xpos + 2*text.width, y=text.ypos - 0.08, labels=round(cluster.corr,2)[4,3], cex=text.cex) + text(x=text.xpos + 3*text.width, y=text.ypos - 0.02, labels=round(cluster.corr,2)[1,4], cex=text.cex) + text(x=text.xpos + 3*text.width, y=text.ypos - 0.04, labels=round(cluster.corr,2)[2,4], cex=text.cex) + text(x=text.xpos + 3*text.width, y=text.ypos - 0.06, labels=round(cluster.corr,2)[3,4], cex=text.cex) + text(x=text.xpos + 3*text.width, y=text.ypos - 0.08, labels=round(cluster.corr,2)[4,4], cex=text.cex) + + ## Centroid maps: + ## (note that mapX == clusterMaxX.monthly, X = 1, ..., 4 by default) + + par(fig=c(map.xpos, map.xpos + map.width, map.ypos + 3*map.height + 3*map.ysep, map.ypos + 4*map.height + 3*map.ysep), new=TRUE) + PlotEquiMap2(rescale(map1,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map1), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, xlabel.dist=1.5, contours.lty="F1FF1F") + par(fig=c(map.xpos, map.xpos + map.width, map.ypos + 2*map.height + 2*map.ysep, map.ypos + 3*map.height + 2*map.ysep), new=TRUE) + PlotEquiMap2(rescale(map2,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map2), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F") + par(fig=c(map.xpos, map.xpos + map.width, map.ypos + map.height + map.ysep, map.ypos + 2*map.height + map.ysep), new=TRUE) + PlotEquiMap2(rescale(map3,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map3), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F") + par(fig=c(map.xpos, map.xpos + map.width, map.ypos, map.ypos + map.height), new=TRUE) + PlotEquiMap2(rescale(map4,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map4), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F") + + } # close if on psl.rean or on psl.rean.unordered + + + # Visualize the composition if the impact maps for a selected forecasted month: + if(composition == "impact"){ + # Centroid maps: + n.map <- n.map + 1 # n.maps starts from 0 + map.width <- 0.115 + map.xpos <- 0.06 + map.width * (n.map - 1) + map.ypos <- 0.08 + map.height <- 0.19 + map.ysep <- 0.015 + + par(fig=c(map.xpos, map.xpos + map.width, map.ypos + 3*map.height + 3*map.ysep, map.ypos + 4*map.height + 3*map.ysep), new=TRUE) + #PlotEquiMap2(rescale(imp1[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=t(sig1[,EU] < 0.05), cex.lab=1.5) + PlotEquiMap2(rescale(imp1[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5) + + par(fig=c(map.xpos, map.xpos + map.width, map.ypos + 2*map.height + 2*map.ysep, map.ypos + 3*map.height + 2*map.ysep), new=TRUE) + #PlotEquiMap2(rescale(imp2[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=t(sig2[,EU] < 0.05), cex.lab=1.5) + PlotEquiMap2(rescale(imp2[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5) + + par(fig=c(map.xpos, map.xpos + map.width, map.ypos + map.height + map.ysep, map.ypos + 2*map.height + map.ysep), new=TRUE) + #PlotEquiMap2(rescale(imp3[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=t(sig3[,EU] < 0.05), cex.lab=1.5) + PlotEquiMap2(rescale(imp3[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5) + + par(fig=c(map.xpos, map.xpos + map.width, map.ypos, map.ypos + map.height), new=TRUE) + #PlotEquiMap2(rescale(imp4[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=t(sig4[,EU] < 0.05), cex.lab=1.5) + PlotEquiMap2(rescale(imp4[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5) + + + # Sheet subtitles: + par(fig=c(map.xpos, map.xpos + map.width, 0.86, 0.90), new=TRUE) + if(n.map < 8) { + mtext(paste0(my.month.short[p], " --> ", my.month.short[forecast.month]), cex=5.5, font=2) + } else{ + mtext(paste0(rean.name," ", my.month.short[forecast.month]), cex=5.5, font=2) + } + + } # close if on composition == 'impact' + + # Visualize the 2x2 composition of the four impact maps for a selected reanalysis or forecasted month: + if(composition == "single.impact"){ + + png(filename=paste0(rean.dir,"/",fields.name,"_",my.period[p],"_",var.name[var.num],"_impact_composition.png"),width=2000,height=2000) + + plot.new() + + par(fig=c(0, 0.5, 0.95, 0.988), new=TRUE) + mtext("NAO+",cex=5) + par(fig=c(0, 0.5, 0.52, 0.95), new=TRUE) + PlotEquiMap(rescale(imp1[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=sig1[,EU] < 0.05, dot_size=2, axes_label_scale=2.5) + + par(fig=c(0.5, 1, 0.93, 0.96), new=TRUE) + mtext("NAO-",cex=5) + par(fig=c(0.5, 1, 0.52, 0.95), new=TRUE) + PlotEquiMap(rescale(imp2[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=sig2[,EU] < 0.05, dot_size=2, axes_label_scale=2.5) + + par(fig=c(0, 0.5, 0.44, 0.47), new=TRUE) + mtext("Blocking",cex=5) + par(fig=c(0, 0.5, 0.08, 0.46), new=TRUE) + PlotEquiMap(rescale(imp3[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=sig3[,EU] < 0.05, dot_size=2, axes_label_scale=2.5) + + par(fig=c(0.5, 1, 0.44, 0.47), new=TRUE) + mtext("Atlantic Ridge",cex=5) + par(fig=c(0.5, 1, 0.08, 0.46), new=TRUE) + PlotEquiMap(rescale(imp4[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=sig4[,EU] < 0.05, dot_size=2, axes_label_scale=2.5) + + par(fig=c(0.03, 0.96, 0.02, 0.08), new=TRUE) + ColorBar2(my.brks.var, cols=my.cols.var, vert=FALSE, cex=3, my.ticks=-0.5 + 1:length(my.brks.var), my.labels=my.brks.var, label.dist=3) + par(fig=c(0.965, 0.99, 0, 0.026), new=TRUE) + mtext("m/s",cex=3) + + dev.off() + + # format it manually from the bash shell (you must run it from your workstation until the identity command of ImageMagick will be loaded un the cluster): + #sh ~/scripts/fig2catalog.sh -x 20 -t 'NCEP / 10-m wind speed / Regime impact \nOctober / 1981-2016' -c 'Region: North Atlantic (27°N-81°N, 85.5°W-45°E)\nReference dataset: NCEP/NCAR reanalysis' NCEP_October_sfcWind_impact_composition.png NCEP_October_sfcWind_impact_composition_catalogue.png + + + ### to plot the impact map of all regimes on a particular month: + #imp.oct <- (imp1*3.2 + imp2*38.7 + imp3*51.6 + imp4*6.4)/100 + year.test <- 2016 + pos.year.test <- year.test - year.start +1 + imp.test <- imp1*fre1.NA[pos.year.test] + imp2*fre2.NA[pos.year.test] + imp3*fre3.NA[pos.year.test] + imp4*fre4.NA[pos.year.test] + #imp.test <- imp1*fre1.NA[pos.year.test] + imp3*(fre3.NA[pos.year.test]+0.032) + imp4*(fre4.NA[pos.year.test]+0.032) + par(fig=c(0, 1, 0.05, 1), new=TRUE) + PlotEquiMap2(rescale(imp.test[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5) + + # vector with the frequency of the WRs in the chosen month and year: + wt.test.freq <- c(fre1.NA[pos.year.test],fre2.NA[pos.year.test],fre3.NA[pos.year.test],fre4.NA[pos.year.test]) + + ## # or save them as individual maps: + ## png(filename=paste0(rean.dir,"/",fields.name,"_",my.period[p],"_",var.name[var.num],"_impact_NAO+.png"),width=3000,height=3700) + ## PlotEquiMap2(rescale(imp1[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=t(sig1[,EU] < 0.05), cex.lab=1.5) + ## dev.off() + + ## png(filename=paste0(rean.dir,"/",fields.name,"_",my.period[p],"_",var.name[var.num],"_impact_NAO-.png"),width=3000,height=3700) + ## PlotEquiMap2(rescale(imp2[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=t(sig2[,EU] < 0.05), cex.lab=1.5) + ## dev.off() + + ## png(filename=paste0(rean.dir,"/",fields.name,"_",my.period[p],"_",var.name[var.num],"_impact_Blocking.png"),width=3000,height=3700) + ## PlotEquiMap2(rescale(imp3[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=t(sig3[,EU] < 0.05), cex.lab=1.5) + ## dev.off() + + ## png(filename=paste0(rean.dir,"/",fields.name,"_",my.period[p],"_",var.name[var.num],"_impact_Atl_Ridge.png"),width=3000,height=3700) + ## PlotEquiMap2(rescale(imp4[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=t(sig4[,EU] < 0.05), cex.lab=1.5) + ## dev.off() + } + + # Visualize the composition if the impact maps for a selected forecasted month: + if(composition == "single.psl"){ + png(filename=paste0(rean.dir,"/",fields.name,"_",my.period[p],"_",var.name[var.num],"_pattern_cluster1.png"),width=2000,height=1400, res=300) + PlotEquiMap2(rescale(map1,my.brks[1],tail(my.brks,1)), lon, lat,filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1, contours=t(map1), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=0.5, xlabel.dist=0, contours.lty="F1FF1F", toptitle=paste0(my.period[p]," WithinSS=", round(withinss1/1000000,2))) + dev.off() + + png(filename=paste0(rean.dir,"/",fields.name,"_",my.period[p],"_",var.name[var.num],"_pattern_cluster2.png"),width=2000,height=1400, res=300) + PlotEquiMap2(rescale(map2,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1, contours=t(map2), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=0.5, xlabel.dist=0, contours.lty="F1FF1F", toptitle=paste0(my.period[p]," WithinSS=", round(withinss2/1000000,2))) + dev.off() + + png(filename=paste0(rean.dir,"/",fields.name,"_",my.period[p],"_",var.name[var.num],"_pattern_cluster3.png"),width=2000,height=1400, res=300) + PlotEquiMap2(rescale(map3,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1, contours=t(map3), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=0.5, xlabel.dist=0, contours.lty="F1FF1F", toptitle=paste0(my.period[p]," WithinSS=", round(withinss3/1000000,2))) + dev.off() + + png(filename=paste0(rean.dir,"/",fields.name,"_",my.period[p],"_",var.name[var.num],"_pattern_cluster4.png"),width=2000,height=1400, res=300) + PlotEquiMap2(rescale(map4,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1, contours=t(map4), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=0.5, xlabel.dist=0, contours.lty="F1FF1F", toptitle=paste0(my.period[p]," WithinSS=", round(withinss4/1000000,2))) + dev.off() + + # Legend centroid maps: + #my.subset <- match(values.to.plot, my.brks) + #ColorBar3(my.brks, cols=my.cols, vert=FALSE, cex=legend1.cex, subset=my.subset) + #mtext(side=4," hPa", cex=legend1.cex) + + } + + # Visualize the composition with the interannual frequencies for a selected forecasted month: + if(composition == "fre"){ + # Centroid maps: + n.map <- n.map + 1 # n.maps starts from 0 + map.width <- 0.115 + map.xpos <- 0.06 + map.width * (n.map - 1) + map.ypos <- 0.08 + map.height <- 0.19 + map.ysep <- 0.015 + + par(fig=c(map.xpos, map.xpos + map.width, map.ypos + 3*map.height + 3*map.ysep, map.ypos + 4*map.height + 3*map.ysep), new=TRUE) + if(n.map < 8) barplot.freq.sim2(100*fre1.NA, 100*freMax1.NA, 100*freMin1.NA, 100*freObs1, year.start, year.end, cex.y=2, cex.x=2, freq.min=-2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0), col.line="gray20", cex.r=3, cex.mean=2, cex.obs=2) + if(n.map == 8) barplot.freq(100*fre1.NA, year.start, year.end, cex.y=2, cex.x=2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0)) + + par(fig=c(map.xpos, map.xpos + map.width, map.ypos + 2*map.height + 2*map.ysep, map.ypos + 3*map.height + 2*map.ysep), new=TRUE) + if(n.map < 8) if(fields.name == forecast.name) barplot.freq.sim2(100*fre2.NA, 100*freMax2.NA, 100*freMin2.NA, 100*freObs2, year.start, year.end, cex.y=2, cex.x=2, freq.min=-2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0), col.line="gray20", cex.r=3, cex.mean=2, cex.obs=2) + if(n.map == 8) barplot.freq(100*fre2.NA, year.start, year.end, cex.y=2, cex.x=2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0)) + + par(fig=c(map.xpos, map.xpos + map.width, map.ypos + map.height + map.ysep, map.ypos + 2*map.height + map.ysep), new=TRUE) + if(n.map < 8) if(fields.name == forecast.name) barplot.freq.sim2(100*fre3.NA, 100*freMax3.NA, 100*freMin3.NA, 100*freObs3, year.start, year.end, cex.y=2, cex.x=2, freq.min=-2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0), col.line="gray20", cex.r=3, cex.mean=2, cex.obs=2) + if(n.map == 8) barplot.freq(100*fre3.NA, year.start, year.end, cex.y=2, cex.x=2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0)) + + par(fig=c(map.xpos, map.xpos + map.width, map.ypos, map.ypos + map.height), new=TRUE) + if(n.map < 8) if(fields.name == forecast.name) barplot.freq.sim2(100*fre4.NA, 100*freMax4.NA, 100*freMin4.NA, 100*freObs4, year.start, year.end, cex.y=2, cex.x=2, freq.min=-2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0), col.line="gray20", cex.r=3, cex.mean=2, cex.obs=2) + if(n.map == 8) barplot.freq(100*fre4.NA, year.start, year.end, cex.y=2, cex.x=2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0)) + + # Sheet subtitles: + par(fig=c(map.xpos, map.xpos + map.width, 0.86, 0.90), new=TRUE) + if(n.map < 8) { + mtext(paste0(my.month.short[p], " --> ", my.month.short[forecast.month]), cex=5.5, font=2) + } else{ + mtext(paste0(rean.name," ", my.month.short[forecast.month]), cex=5.5, font=2) + } + + # % on Frequency plots: + par(fig=c(map.xpos - 0.006, map.xpos, map.ypos + 3.9*map.height + 3*map.ysep, map.ypos + 4*map.height + 3*map.ysep), new=TRUE) + mtext("%", cex=3.3) + par(fig=c(map.xpos - 0.006, map.xpos, map.ypos + 2.9*map.height + 2*map.ysep, map.ypos + 3*map.height + 2*map.ysep), new=TRUE) + mtext("%", cex=3.3) + par(fig=c(map.xpos - 0.006, map.xpos, map.ypos + 1.9*map.height + 1*map.ysep, map.ypos + 2*map.height + 1*map.ysep), new=TRUE) + mtext("%", cex=3.3) + par(fig=c(map.xpos - 0.006, map.xpos, map.ypos + 0.9*map.height + 0*map.ysep, map.ypos + 1*map.height + 0*map.ysep), new=TRUE) + mtext("%", cex=3.3) + + # mean frequency on Frequency plots: + par(fig=c(map.xpos + 0.02, map.xpos + 0.04, map.ypos + 3*map.height + 3*map.ysep, map.ypos + 3.1*map.height + 3*map.ysep), new=TRUE) + mtext(bquote(bar(nu) == .(paste0(round(mean(100*fre1.NA),1),"%"))),cex=3.5) + par(fig=c(map.xpos + 0.02, map.xpos + 0.04, map.ypos + 2*map.height + 2*map.ysep, map.ypos + 2.1*map.height + 2*map.ysep), new=TRUE) + mtext(bquote(bar(nu) == .(paste0(round(mean(100*fre2.NA),1),"%"))),cex=3.5) + par(fig=c(map.xpos + 0.02, map.xpos + 0.04, map.ypos + 1*map.height + 1*map.ysep, map.ypos + 1.1*map.height + 1*map.ysep), new=TRUE) + mtext(bquote(bar(nu) == .(paste0(round(mean(100*fre3.NA),1),"%"))),cex=3.5) + par(fig=c(map.xpos + 0.02, map.xpos + 0.04, map.ypos + 0*map.height + 0*map.ysep, map.ypos + 0.1*map.height + 0*map.ysep), new=TRUE) + mtext(bquote(bar(nu) == .(paste0(round(mean(100*fre4.NA),1),"%"))),cex=3.5) + + # add corr between obs.time series and ensemble mean time series on Frequency plots: + if(n.map < 8){ + par(fig=c(map.xpos + map.width - 0.04, map.xpos + map.width - 0.02, map.ypos + 3*map.height + 3*map.ysep, map.ypos + 3.1*map.height + 3*map.ysep), new=TRUE) + mtext(paste0("r= ",sp.cor1), cex=3.5) + par(fig=c(map.xpos + map.width - 0.04, map.xpos + map.width - 0.02, map.ypos + 2*map.height + 2*map.ysep, map.ypos + 2.1*map.height + 2*map.ysep), new=TRUE) + mtext(paste0("r= ",sp.cor2), cex=3.5) + par(fig=c(map.xpos + map.width - 0.04, map.xpos + map.width - 0.02, map.ypos + 1*map.height + 1*map.ysep, map.ypos + 1.1*map.height + 1*map.ysep), new=TRUE) + mtext(paste0("r= ",sp.cor3), cex=3.5) + par(fig=c(map.xpos + map.width - 0.04, map.xpos + map.width - 0.02, map.ypos + 0*map.height + 0*map.ysep, map.ypos + 0.1*map.height + 0*map.ysep), new=TRUE) + mtext(paste0("r= ",sp.cor4), cex=3.5) + } + } # close if on composition == 'fre' + + # save indicators for each startdate and forecast time: + # (map1, map2, map3, map4, fre1, fre2, etc. already refer to the regimes in the same order as listed in the 'orden' vector) + if(fields.name == forecast.name) { + if (composition != "impact") { + save(orden, map1, map2, map3, map4, fre1.NA, fre2.NA, fre3.NA, fre4.NA, freObs1, freObs2, freObs3, freObs4, sp.cor1, sp.cor2, sp.cor3, sp.cor4, persist1, persist2, persist3, persist4, persistObs1, persistObs2, persistObs3, persistObs4, spat.cor1, spat.cor2, spat.cor3, spat.cor4, sd.freq.sim1, sd.freq.sim2, sd.freq.sim3, sd.freq.sim4, sd.freq.obs1, sd.freq.obs2, sd.freq.obs3, sd.freq.obs4, transSim1, transSim2, transSim3, transSim4, transObs1, transObs2, transObs3, transObs4, file=paste0(work.dir,"/",fields.name,"_", my.period[p],"_leadmonth_",lead.month,"_","ClusterNames",".RData")) + } else { # also save impX objects + save(orden, map1, map2, map3, map4, fre1.NA, fre2.NA, fre3.NA, fre4.NA, freObs1, freObs2, freObs3, freObs4, sp.cor1, sp.cor2, sp.cor3, sp.cor4, persist1, persist2, persist3, persist4, persistObs1, persistObs2, persistObs3, persistObs4, spat.cor1, spat.cor2, spat.cor3, spat.cor4, sd.freq.sim1, sd.freq.sim2, sd.freq.sim3, sd.freq.sim4, sd.freq.obs1, sd.freq.obs2, sd.freq.obs3, sd.freq.obs4, transSim1, transSim2, transSim3, transSim4, transObs1, transObs2, transObs3, transObs4, imp1, imp2, imp3, imp4,cor.imp1,cor.imp2,cor.imp3,cor.imp4, file=paste0(work.dir,"/",fields.name,"_", my.period[p],"_leadmonth_",lead.month,"_","ClusterNames",".RData")) + } + } + + } # close 'p' on 'period' + + if((composition == 'simple' || composition == 'edpr' ) && as.pdf) dev.off() # for saving a single pdf with all months/seasons + + } # close 'lead.month' on 'lead.months' + + if(composition == "psl" || composition == "fre" || composition == "impact"){ + # Regime's names: + title1.xpos <- 0.01 + title1.width <- 0.04 + par(fig=c(title1.xpos, title1.xpos + title1.width, 0.7905, 0.7955), new=TRUE) + mtext(paste0(orden[1]), font=2, cex=5) + par(fig=c(title1.xpos, title1.xpos + title1.width, 0.5855, 0.5905), new=TRUE) + mtext(paste0(orden[2]), font=2, cex=5) + par(fig=c(title1.xpos, title1.xpos + title1.width, 0.3805, 0.3955), new=TRUE) + mtext(paste0(orden[3]), font=2, cex=5) + par(fig=c(title1.xpos, title1.xpos + title1.width, 0.1755, 0.1805), new=TRUE) + mtext(paste0(orden[4]), font=2, cex=5) + + if(composition == "psl") { + # Color legend: + legend1.xpos <- 0.10 + legend1.width <- 0.80 + legend1.cex <- 4 + + par(fig=c(legend1.xpos, legend1.xpos + legend1.width, 0, 0.065), new=TRUE) # syntax fig=c(xmin, xmax, ymin, ymax) + ColorBar2(brks=my.brks, cols=my.cols, vert=FALSE, cex=legend1.cex, label.dist=2, my.ticks=-0.5 + 1:length(my.brks), my.labels=my.brks) + par(fig=c(legend1.xpos + legend1.width - 0.02, legend1.xpos + legend1.width + 0.05, 0, 0.02), new=TRUE) # syntax fig=c(xmin, xmax, ymin, ymax) + mtext("hPa", cex=legend1.cex*1.3) + } + + if(composition == "impact") { + # Color legend: + legend1.xpos <- 0.10 + legend1.width <- 0.80 + legend1.cex <- 4 + + #my.subset2 <- match(values.to.plot2, my.brks.var) + par(fig=c(legend1.xpos, legend1.xpos + legend1.width, 0, 0.065), new=TRUE) + ColorBar2(brks=my.brks.var, cols=my.cols.var, vert=FALSE, cex=legend1.cex, label.dist=2, my.ticks=-0.5 + 1:length(my.brks.var), my.labels=my.brks.var) + par(fig=c(legend1.xpos + legend1.width - 0.02, legend1.xpos + legend1.width + 0.05, 0, 0.02), new=TRUE) # syntax fig=c(xmin, xmax, ymin, ymax) + #ColorBar2(brks=my.brks.var, cols=my.cols.var, vert=FALSE, cex=legend2.cex, subset=my.subset2) + mtext(var.unit[var.num], cex=legend1.cex*1.3) + + } + + dev.off() + + } # close if on composition + + + if(composition == "psl.rean" || composition == "psl.rean.unordered") dev.off() + + print("Finished!") +} # close if on composition != "summary" + + + +#} # close for on forecasts.month + + + +if(composition == "taylor"){ + library("plotrix") + + fields.name="ERA-Interim" + + # choose a reference season from 13 (winter) to 16 (Autumn): + season.ref <- 13 + + # set the first WR classification: + rean.dir <- "/scratch/Earth/ncortesi/RESILIENCE/Regimes/41_ERA-Interim_monthly_1981-2015_LOESS_filter" + + # load data of the reference season of the first classification (this line should be modified to load the chosen season): + #load("/scratch/Earth/ncortesi/RESILIENCE/Regimes/18) as 12) but with no lat correction/ERA-Interim_Winter_psl.RData") # load pslwrXmean data + #load("/scratch/Earth/ncortesi/RESILIENCE/Regimes/18) as 12) but with no lat correction/ERA-Interim_Winter_ClusterNames.RData") # load clusterX.name.period + load("/scratch/Earth/ncortesi/RESILIENCE/Regimes/46_as_18_but_with_no_lat_correction_and_with_LOESS/ERA-Interim_Winter_psl.RData") # load pslwrXmean data + load("/scratch/Earth/ncortesi/RESILIENCE/Regimes/46_as_18_but_with_no_lat_correction_and_with_LOESS/ERA-Interim_Winter_ClusterNames.RData") # load clusterX.name.period + + if(var.num != var.num.orig) var.num <- var.num.orig # ripristinate original values of this script (necessary after loading regime data) + if(fields.name != fields.name.orig) fields.name <- fields.name.orig # ripristinate original values of this script + + cluster1.ref <- which(orden == cluster1.name) + cluster2.ref <- which(orden == cluster2.name) + cluster3.ref <- which(orden == cluster3.name) + cluster4.ref <- which(orden == cluster4.name) + + #cluster1.ref <- cluster1.name.period[13] + #cluster2.ref <- cluster2.name.period[13] + #cluster3.ref <- cluster3.name.period[13] + #cluster4.ref <- cluster4.name.period[13] + + cluster1.ref <- 3 # bypass the previous values because for dataset num.46 the blocking and atlantic regimes were saved swapped! + cluster2.ref <- 2 + cluster3.ref <- 1 + cluster4.ref <- 4 + + assign(paste0("map.ref",cluster1.ref), pslwr1mean) # NAO+ + assign(paste0("map.ref",cluster2.ref), pslwr2mean) # NAO- + assign(paste0("map.ref",cluster3.ref), pslwr3mean) # Blocking + assign(paste0("map.ref",cluster4.ref), pslwr4mean) # Atl.ridge + + # set a second WR classification (i.e: with running cluster) to contrast with the new one: + #rean2.dir <- "/scratch/Earth/ncortesi/RESILIENCE/Regimes/41_ERA-Interim_monthly_1981-2015_LOESS_filter" + rean2.dir <- "/scratch/Earth/ncortesi/RESILIENCE/Regimes/44_as_43_but_with_no_lat_correction" + rean2.name <- "ERA-Interim" + + # load data of the reference season of the second classification (this line should be modified to load the chosen season): + load("/scratch/Earth/ncortesi/RESILIENCE/Regimes/46_as_18_but_with_no_lat_correction_and_with_LOESS/ERA-Interim_Winter_psl.RData") # load pslwrXmean data + load("/scratch/Earth/ncortesi/RESILIENCE/Regimes/46_as_18_but_with_no_lat_correction_and_with_LOESS/ERA-Interim_Winter_ClusterNames.RData") # load clusterX.name.period + + if(var.num != var.num.orig) var.num <- var.num.orig # ripristinate original values of this script (necessary after loading regime data) + if(fields.name != fields.name.orig) fields.name <- fields.name.orig # ripristinate original values of this script + + #cluster1.name.ref <- cluster1.name.period[season.ref] + #cluster2.name.ref <- cluster2.name.period[season.ref] + #cluster3.name.ref <- cluster3.name.period[season.ref] + #cluster4.name.ref <- cluster4.name.period[season.ref] + + cluster1.ref <- which(orden == cluster1.name) + cluster2.ref <- which(orden == cluster2.name) + cluster3.ref <- which(orden == cluster3.name) + cluster4.ref <- which(orden == cluster4.name) + + cluster1.ref <- 3 # bypass the previous values because for dataset num.46 the blocking and atlantic regimes were saved swapped! + cluster2.ref <- 2 + cluster3.ref <- 1 + cluster4.ref <- 4 + + assign(paste0("map.old.ref",cluster1.ref), pslwr1mean) # NAO+ + assign(paste0("map.old.ref",cluster2.ref), pslwr2mean) # NAO- + assign(paste0("map.old.ref",cluster3.ref), pslwr3mean) # Blocking + assign(paste0("map.old.ref",cluster4.ref), pslwr4mean) # Atl.ridge + + + # breaks and colors of the geopotential fields: + if(psl == "g500"){ + my.brks <- c(-300,seq(-200,200,10),300) # % Mean anomaly of a WR + my.brks2 <- c(-300,seq(-200,200,10),300) # % Mean anomaly of a WR for the contour lines + values.to.plot <- c(-200, -100, 0, 100, 200) # values we want to show in the labels + } + + if(psl == "psl"){ + my.brks <- c(seq(-19,-1,2),0,seq(1,19,2)) # % Mean anomaly of a WR + my.brks2 <- my.brks #c(seq(-20,20,2)) # % Mean anomaly of a WR for the contour lines + values.to.plot <- my.brks #c(-17,-15,-13,-11,-9,-7,-5,-3,-1,0,1,3,5,7,9,11,13,15,17) + } + + my.cols <- colorRampPalette(rev(brewer.pal(11,"RdBu")))(length(my.brks)-1) # blue--white--red colors + my.cols[floor(length(my.cols)/2)] <- my.cols[floor(length(my.cols)/2)+1] <- "white" + + # plot the composition with the regime anomalies of each monthly regimes: + png(filename=paste0(rean.dir,"/",rean.name,"_taylor_source",".png"),width=6000,height=2000) + + plot.new() + + # Sheet title: + par(fig=c(0, 1, 0.94, 0.98), new=TRUE) + mtext(paste0(fields.name, " observed monthly regime anomalies"), cex=5.5, font=2) + + n.map <- 0 + for(p in c(9:12,1:8)){ + p.orig <- p + + load(file=paste0(rean.dir,"/",fields.name,"_",my.period[p],"_","psl",".RData")) # load cluster names and summarized data + load(file=paste0(rean.dir,"/",fields.name,"_",my.period[p],"_","ClusterNames",".RData")) # load cluster names and summarized data + + if(var.num != var.num.orig) var.num <- var.num.orig # ripristinate original values of this script (necessary after loading regime data) + if(fields.name != fields.name.orig) fields.name <- fields.name.orig # ripristinate original values of this script + if(p != p.orig) p <- p.orig + + cluster1 <- which(orden == cluster1.name) + cluster2 <- which(orden == cluster2.name) + cluster3 <- which(orden == cluster3.name) + cluster4 <- which(orden == cluster4.name) + + assign(paste0("map",cluster1), pslwr1mean) # NAO+ + assign(paste0("map",cluster2), pslwr2mean) # NAO- + assign(paste0("map",cluster3), pslwr3mean) # Blocking + assign(paste0("map",cluster4), pslwr4mean) # Atl.ridge + + n.map <- n.map + 1 # n.maps starts from 0 + map.width <- 0.07 + map.xpos <- 0.06 + map.width * (n.map - 1) + map.ypos <- 0.08 + map.height <- 0.19 + map.ysep <- 0.015 + + par(fig=c(map.xpos, map.xpos + map.width, map.ypos + 3*map.height + 3*map.ysep, map.ypos + 4*map.height + 3*map.ysep), new=TRUE) + PlotEquiMap2(rescale(map1,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map1), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, xlabel.dist=1.5, contours.lty="F1FF1F") + par(fig=c(map.xpos, map.xpos + map.width, map.ypos + 2*map.height + 2*map.ysep, map.ypos + 3*map.height + 2*map.ysep), new=TRUE) + PlotEquiMap2(rescale(map2,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map2), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F") + par(fig=c(map.xpos, map.xpos + map.width, map.ypos + map.height + map.ysep, map.ypos + 2*map.height + map.ysep), new=TRUE) + PlotEquiMap2(rescale(map3,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map3), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F") + par(fig=c(map.xpos, map.xpos + map.width, map.ypos, map.ypos + map.height), new=TRUE) + PlotEquiMap2(rescale(map4,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map4), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F") + + # Sheet subtitles: + par(fig=c(map.xpos, map.xpos + map.width, 0.86, 0.90), new=TRUE) + mtext(my.month.short[p], cex=5.5, font=2) + + } + + # draw the last map to the right of the composition, that with the seasonal anomalies: + n.map <- n.map + 1 # n.maps starts from 0 + map.width <- 0.07 + map.xpos <- 0.06 + map.width * (n.map - 1) + map.ypos <- 0.08 + map.height <- 0.19 + map.ysep <- 0.015 + + par(fig=c(map.xpos, map.xpos + map.width, map.ypos + 3*map.height + 3*map.ysep, map.ypos + 4*map.height + 3*map.ysep), new=TRUE) + PlotEquiMap2(rescale(map.ref1,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map.ref1), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, xlabel.dist=1.5, contours.lty="F1FF1F") + par(fig=c(map.xpos, map.xpos + map.width, map.ypos + 2*map.height + 2*map.ysep, map.ypos + 3*map.height + 2*map.ysep), new=TRUE) + PlotEquiMap2(rescale(map.ref2,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map.ref2), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F") + par(fig=c(map.xpos, map.xpos + map.width, map.ypos + map.height + map.ysep, map.ypos + 2*map.height + map.ysep), new=TRUE) + PlotEquiMap2(rescale(map.ref3,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map.ref3), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F") + par(fig=c(map.xpos, map.xpos + map.width, map.ypos, map.ypos + map.height), new=TRUE) + PlotEquiMap2(rescale(map.ref4,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map.ref4), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F") + + # subtitle: + par(fig=c(map.xpos, map.xpos + map.width, 0.86, 0.90), new=TRUE) + mtext(my.period[season.ref], cex=5.5, font=2) + + dev.off() + + # Plot the Taylor diagrams: + + corr.first <- corr.second <- rmse.first <- rmse.second <- array(NA,c(4,12)) + + for(regime in 1:4){ + + png(filename=paste0(rean.dir,"/",rean.name,"_taylor_",orden[regime],".png"), width=1200, height=800) + + for(p in 1:12){ + p.orig <- p + + load(file=paste0(rean.dir,"/",rean.name,"_",my.period[p],"_","psl",".RData")) # load cluster names and summarized data + load(file=paste0(rean.dir,"/",rean.name,"_",my.period[p],"_","ClusterNames",".RData")) # load cluster names and summarized data + + if(var.num != var.num.orig) var.num <- var.num.orig # ripristinate original values of this script (necessary after loading regime data) + if(fields.name != fields.name.orig) fields.name <- fields.name.orig # ripristinate original values of this script + if(p != p.orig) p <- p.orig + + cluster1 <- which(orden == cluster1.name) + cluster2 <- which(orden == cluster2.name) + cluster3 <- which(orden == cluster3.name) + cluster4 <- which(orden == cluster4.name) + + assign(paste0("map",cluster1), pslwr1mean) # NAO+ + assign(paste0("map",cluster2), pslwr2mean) # NAO- + assign(paste0("map",cluster3), pslwr3mean) # Blocking + assign(paste0("map",cluster4), pslwr4mean) # Atl.ridge + + # load data from the second classification: + load(file=paste0(rean2.dir,"/",rean2.name,"_",my.period[p],"_","psl",".RData")) # load cluster names and summarized data + load(file=paste0(rean2.dir,"/",rean2.name,"_",my.period[p],"_","ClusterNames",".RData")) # load cluster names and summarized data + + if(var.num != var.num.orig) var.num <- var.num.orig # ripristinate original values of this script (necessary after loading regime data) + if(fields.name != fields.name.orig) fields.name <- fields.name.orig # ripristinate original values of this script + if(p != p.orig) p <- p.orig + + cluster1.old <- which(orden == cluster1.name) + cluster2.old <- which(orden == cluster2.name) + cluster3.old <- which(orden == cluster3.name) + cluster4.old <- which(orden == cluster4.name) + + assign(paste0("map.old",cluster1.old), pslwr1mean) # NAO+ + assign(paste0("map.old",cluster2.old), pslwr2mean) # NAO- + assign(paste0("map.old",cluster3.old), pslwr3mean) # Blocking + assign(paste0("map.old",cluster4.old), pslwr4mean) # Atl.ridge + + add.mod <- ifelse(p == 1, FALSE, TRUE) + main.mod <- ifelse(p == 1, orden[regime],"") + + color.first <- "red" + color.second <- "blue" # "black" + sd.arcs.mod <- c(0,0.1,0.2,0.3,0.4,0.5,0.6,0.7,0.8,0.9,1,1.1,1.2,1.3) #c(0,0.5,1,1.5) # doesn't work! + if(regime == 1) my.taylor(as.vector(map.ref1),as.vector(map1), normalize=TRUE, add=add.mod, pos.cor=FALSE, sd.arcs=sd.arcs.mod, ref.sd=F, cex.axis=1.7, text.cex=1, my.text=my.month.short[p], col = color.first, main=main.mod) + if(regime == 2) my.taylor(as.vector(map.ref2),as.vector(map2), normalize=TRUE, add=add.mod, pos.cor=FALSE, sd.arcs=sd.arcs.mod, ref.sd=F, cex.axis=1.7, text.cex=1, my.text=my.month.short[p], col = color.first, main=main.mod) + if(regime == 3) my.taylor(as.vector(map.ref3),as.vector(map3), normalize=TRUE, add=add.mod, pos.cor=FALSE, sd.arcs=sd.arcs.mod, ref.sd=F, cex.axis=1.7, text.cex=1, my.text=my.month.short[p], col = color.first, main=main.mod) + if(regime == 4) my.taylor(as.vector(map.ref4),as.vector(map4), normalize=TRUE, add=add.mod, pos.cor=FALSE, sd.arcs=sd.arcs.mod, ref.sd=F, cex.axis=1.7, text.cex=1, my.text=my.month.short[p], col = color.first, main=main.mod) + + # add the points from the second classification: + add.mod=TRUE + #add.mod <- ifelse(p == 1, FALSE, TRUE) + + if(regime == 1) my.taylor(as.vector(map.old.ref1),as.vector(map.old1), normalize=TRUE, add=add.mod, pos.cor=FALSE, sd.arcs=sd.arcs.mod, ref.sd=F, cex.axis=1.7, text.cex=1, my.text=my.month.short[p], col = color.second) + if(regime == 2) my.taylor(as.vector(map.old.ref2),as.vector(map.old2), normalize=TRUE, add=add.mod, pos.cor=FALSE, sd.arcs=sd.arcs.mod, ref.sd=F, cex.axis=1.7, text.cex=1, my.text=my.month.short[p], col = color.second) + if(regime == 3) my.taylor(as.vector(map.old.ref3),as.vector(map.old3), normalize=TRUE, add=add.mod, pos.cor=FALSE, sd.arcs=sd.arcs.mod, ref.sd=F, cex.axis=1.7, text.cex=1, my.text=my.month.short[p], col = color.second) + if(regime == 4) my.taylor(as.vector(map.old.ref4),as.vector(map.old4), normalize=TRUE, add=add.mod, pos.cor=FALSE, sd.arcs=sd.arcs.mod, ref.sd=F, cex.axis=1.7, text.cex=1, my.text=my.month.short[p], col = color.second) + + if(regime == 1) { corr.first[1,p] <- cor(as.vector(map.ref1),as.vector(map1)); rmse.first[1,p] <- RMSE(as.vector(map.ref1),as.vector(map1)) } + if(regime == 2) { corr.first[2,p] <- cor(as.vector(map.ref2),as.vector(map2)); rmse.first[2,p] <- RMSE(as.vector(map.ref2),as.vector(map2)) } + if(regime == 3) { corr.first[3,p] <- cor(as.vector(map.ref3),as.vector(map3)); rmse.first[3,p] <- RMSE(as.vector(map.ref3),as.vector(map3)) } + if(regime == 4) { corr.first[4,p] <- cor(as.vector(map.ref4),as.vector(map4)); rmse.first[4,p] <- RMSE(as.vector(map.ref4),as.vector(map4)) } + + if(regime == 1) { corr.second[1,p] <- cor(as.vector(map.old.ref1),as.vector(map.old1)); rmse.second[1,p] <- RMSE(as.vector(map.old.ref1),as.vector(map.old1)) } + if(regime == 2) { corr.second[2,p] <- cor(as.vector(map.old.ref2),as.vector(map.old2)); rmse.second[2,p] <- RMSE(as.vector(map.old.ref2),as.vector(map.old2)) } + if(regime == 3) { corr.second[3,p] <- cor(as.vector(map.old.ref3),as.vector(map.old3)); rmse.second[3,p] <- RMSE(as.vector(map.old.ref3),as.vector(map.old3)) } + if(regime == 4) { corr.second[4,p] <- cor(as.vector(map.old.ref4),as.vector(map.old4)); rmse.second[4,p] <- RMSE(as.vector(map.old.ref4),as.vector(map.old4)) } + + } # close for on 'p' + + dev.off() + + } # close for on 'regime' + + corr.first.mean <- apply(corr.first,1,mean) + corr.second.mean <- apply(corr.second,1,mean) + corr.diff <- corr.second.mean - corr.first.mean + + rmse.first.mean <- apply(rmse.first,1,mean) + rmse.second.mean <- apply(rmse.second,1,mean) + rmse.diff <- rmse.second.mean - rmse.first.mean + +} # close if on composition == "taylor" + + + + + + +# Summary graphs: +if(composition == "summary" && fields.name == forecast.name){ + # array storing correlations in the format: [startdate, leadmonth, regime] + array.diff.sd.freq <- array.sd.freq <- array.cor <- array.sp.cor <- array.freq <- array.diff.freq <- array.rpss <- array.pers <- array.diff.pers <- array(NA,c(12,7,4)) + array.spat.cor <- array.imp <- array.trans.sim1 <- array.trans.sim2 <- array.trans.sim3 <- array.trans.sim4 <- array(NA,c(12,7,4)) + array.trans.diff1 <- array.trans.diff2 <- array.trans.diff3 <- array.trans.diff4 <- array.freq.obs <- array.pers.obs <- array(NA,c(12,7,4)) + array.trans.obs1 <- array.trans.obs2 <- array.trans.obs3 <- array.trans.obs4 <- array(NA,c(12,7,4)) + + for(p in 1:12){ + p.orig <- p + + for(l in 0:6){ + + load(file=paste0(work.dir,"/",fields.name,"_",my.period[p],"_leadmonth_",l,"_","ClusterNames",".RData")) # load cluster names and summarized data + + if(var.num != var.num.orig) var.num <- var.num.orig # ripristinate original values of this script (necessary after loading regime data) + if(fields.name != fields.name.orig) fields.name <- fields.name.orig # ripristinate original values of this script + if(p != p.orig) p <- p.orig + + array.spat.cor[p,1+l, 1] <- spat.cor1 # associates NAO+ to the first triangle (at left) + array.spat.cor[p,1+l, 3] <- spat.cor2 # associates NAO- to the third triangle (at right) + array.spat.cor[p,1+l, 4] <- spat.cor3 # associates Blocking to the fourth triangle (top one) + array.spat.cor[p,1+l, 2] <- spat.cor4 # associates Atl.Ridge to the second triangle (bottom one) + + array.cor[p,1+l, 1] <- sp.cor1 # associates NAO+ to the first triangle (at left) + array.cor[p,1+l, 3] <- sp.cor2 # associates NAO- to the third triangle (at right) + array.cor[p,1+l, 4] <- sp.cor3 # associates Blocking to the fourth triangle (top one) + array.cor[p,1+l, 2] <- sp.cor4 # associates Atl.Ridge to the second triangle (bottom one) + + array.freq[p,1+l, 1] <- 100*(mean(fre1.NA)) # NAO+ + array.freq[p,1+l, 3] <- 100*(mean(fre2.NA)) # NAO- + array.freq[p,1+l, 4] <- 100*(mean(fre3.NA)) # Blocking + array.freq[p,1+l, 2] <- 100*(mean(fre4.NA)) # Atl.Ridge + + array.freq.obs[p,1+l, 1] <- 100*(mean(freObs1)) # NAO+ + array.freq.obs[p,1+l, 3] <- 100*(mean(freObs2)) # NAO- + array.freq.obs[p,1+l, 4] <- 100*(mean(freObs3)) # Blocking + array.freq.obs[p,1+l, 2] <- 100*(mean(freObs4)) # Atl.Ridge + + array.diff.freq[p,1+l, 1] <- 100*(mean(fre1.NA) - mean(freObs1)) # NAO+ + array.diff.freq[p,1+l, 3] <- 100*(mean(fre2.NA) - mean(freObs2)) # NAO- + array.diff.freq[p,1+l, 4] <- 100*(mean(fre3.NA) - mean(freObs3)) # Blocking + array.diff.freq[p,1+l, 2] <- 100*(mean(fre4.NA) - mean(freObs4)) # Atl.Ridge + + array.pers[p,1+l, 1] <- persist1 # NAO+ + array.pers[p,1+l, 3] <- persist2 # NAO- + array.pers[p,1+l, 4] <- persist3 # Blocking + array.pers[p,1+l, 2] <- persist4 # Atl.Ridge + + array.pers.obs[p,1+l, 1] <- persistObs1 # NAO+ + array.pers.obs[p,1+l, 3] <- persistObs2 # NAO- + array.pers.obs[p,1+l, 4] <- persistObs3 # Blocking + array.pers.obs[p,1+l, 2] <- persistObs4 # Atl.Ridge + + array.diff.pers[p,1+l, 1] <- persist1 - persistObs1 # NAO+ + array.diff.pers[p,1+l, 3] <- persist2 - persistObs2 # NAO- + array.diff.pers[p,1+l, 4] <- persist3 - persistObs3 # Blocking + array.diff.pers[p,1+l, 2] <- persist4 - persistObs4 # Atl.Ridge + + array.sd.freq[p,1+l, 1] <- sd.freq.sim1 # NAO+ + array.sd.freq[p,1+l, 3] <- sd.freq.sim2 # NAO- + array.sd.freq[p,1+l, 4] <- sd.freq.sim3 # Blocking + array.sd.freq[p,1+l, 2] <- sd.freq.sim4 # Atl.Ridge + + array.diff.sd.freq[p,1+l, 1] <- sd.freq.sim1 / sd.freq.obs1 # NAO+ + array.diff.sd.freq[p,1+l, 3] <- sd.freq.sim2 / sd.freq.obs2 # NAO- + array.diff.sd.freq[p,1+l, 4] <- sd.freq.sim3 / sd.freq.obs3 # Blocking + array.diff.sd.freq[p,1+l, 2] <- sd.freq.sim4 / sd.freq.obs4 # Atl.Ridge + + array.imp[p,1+l, 1] <- cor.imp1 # NAO+ + array.imp[p,1+l, 3] <- cor.imp2 # NAO- + array.imp[p,1+l, 4] <- cor.imp3 # Blocking + array.imp[p,1+l, 2] <- cor.imp4 # Atl.Ridge + + array.trans.sim1[p,1+l, 1] <- NA # Transition from NAO+ to NAO+ + array.trans.sim1[p,1+l, 3] <- 100*transSim1[2] # Transition from NAO+ to NAO- + array.trans.sim1[p,1+l, 4] <- 100*transSim1[3] # Transition from NAO+ to blocking + array.trans.sim1[p,1+l, 2] <- 100*transSim1[4] # Transition from NAO+ to Atl.Ridge + + array.trans.sim2[p,1+l, 1] <- 100*transSim2[1] # Transition from NAO- to NAO+ + array.trans.sim2[p,1+l, 3] <- NA # Transition from NAO- to NAO- + array.trans.sim2[p,1+l, 4] <- 100*transSim2[3] # Transition from NAO- to blocking + array.trans.sim2[p,1+l, 2] <- 100*transSim2[4] # Transition from NAO- to Atl.Ridge + + array.trans.sim3[p,1+l, 1] <- 100*transSim3[1] # Transition from blocking to NAO+ + array.trans.sim3[p,1+l, 3] <- 100*transSim3[2] # Transition from blocking to NAO- + array.trans.sim3[p,1+l, 4] <- NA # Transition from blocking to blocking + array.trans.sim3[p,1+l, 2] <- 100*transSim3[4] # Transition from blocking to Atl.Ridge + + array.trans.sim4[p,1+l, 1] <- 100*transSim4[1] # Transition from Atl.ridge to NAO+ + array.trans.sim4[p,1+l, 3] <- 100*transSim4[2] # Transition from Atl.ridge to NAO- + array.trans.sim4[p,1+l, 4] <- 100*transSim4[3] # Transition from Atl.ridge to blocking + array.trans.sim4[p,1+l, 2] <- NA # Transition from Atl.ridge to Atl.Ridge + + array.trans.obs1[p,1+l, 1] <- NA # Transition from NAO+ to NAO+ + array.trans.obs1[p,1+l, 3] <- 100*transObs1[2] # Transition from NAO+ to NAO- + array.trans.obs1[p,1+l, 4] <- 100*transObs1[3] # Transition from NAO+ to blocking + array.trans.obs1[p,1+l, 2] <- 100*transObs1[4] # Transition from NAO+ to Atl.Ridge + + array.trans.obs2[p,1+l, 1] <- 100*transObs2[1] # Transition from NAO- to NAO+ + array.trans.obs2[p,1+l, 3] <- NA # Transition from NAO- to NAO- + array.trans.obs2[p,1+l, 4] <- 100*transObs2[3] # Transition from NAO- to blocking + array.trans.obs2[p,1+l, 2] <- 100*transObs2[4] # Transition from NAO- to Atl.Ridge + + array.trans.obs3[p,1+l, 1] <- 100*transObs3[1] # Transition from blocking to NAO+ + array.trans.obs3[p,1+l, 3] <- 100*transObs3[2] # Transition from blocking to NAO- + array.trans.obs3[p,1+l, 4] <- NA # Transition from blocking to blocking + array.trans.obs3[p,1+l, 2] <- 100*transObs3[4] # Transition from blocking to Atl.Ridge + + array.trans.obs4[p,1+l, 1] <- 100*transObs4[1] # Transition from Atl.ridge to NAO+ + array.trans.obs4[p,1+l, 3] <- 100*transObs4[2] # Transition from Atl.ridge to NAO- + array.trans.obs4[p,1+l, 4] <- 100*transObs4[3] # Transition from Atl.ridge to blocking + array.trans.obs4[p,1+l, 2] <- NA # Transition from Atl.ridge to Atl.Ridge + + array.trans.diff1[p,1+l, 1] <- NA # Transition from NAO+ to NAO+ + array.trans.diff1[p,1+l, 3] <- 100*(transSim1[2] - transObs1[2]) # Transition from NAO+ to NAO- + array.trans.diff1[p,1+l, 4] <- 100*(transSim1[3] - transObs1[3]) # Transition from NAO+ to blocking + array.trans.diff1[p,1+l, 2] <- 100*(transSim1[4] - transObs1[4]) # Transition from NAO+ to Atl.Ridge + + array.trans.diff2[p,1+l, 1] <- 100*(transSim2[1] - transObs2[1]) # Transition from NAO+ to NAO+ + array.trans.diff2[p,1+l, 3] <- NA + array.trans.diff2[p,1+l, 4] <- 100*(transSim2[3] - transObs2[3]) # Transition from NAO+ to blocking + array.trans.diff2[p,1+l, 2] <- 100*(transSim2[4] - transObs2[4]) # Transition from NAO+ to Atl.Ridge + + array.trans.diff3[p,1+l, 1] <- 100*(transSim3[1] - transObs3[1]) # Transition from NAO+ to NAO+ + array.trans.diff3[p,1+l, 3] <- 100*(transSim3[2] - transObs3[2]) # Transition from NAO+ to NAO- + array.trans.diff3[p,1+l, 4] <- NA + array.trans.diff3[p,1+l, 2] <- 100*(transSim3[4] - transObs3[4]) # Transition from NAO+ to Atl.Ridge + + array.trans.diff4[p,1+l, 1] <- 100*(transSim4[1] - transObs4[1]) # Transition from NAO+ to NAO+ + array.trans.diff4[p,1+l, 3] <- 100*(transSim4[2] - transObs4[2]) # Transition from NAO+ to NAO- + array.trans.diff4[p,1+l, 4] <- 100*(transSim4[3] - transObs4[3]) # Transition from NAO+ to blocking + array.trans.diff4[p,1+l, 2] <- NA + + #array.rpss[p,1+l, 1] <- # NAO+ + #array.rpss[p,1+l, 3] <- # NAO- + #array.rpss[p,1+l, 4] <- # Blocking + #array.rpss[p,1+l, 2] <- # Atl.Ridge + } + } + + + + # Spatial Corr summary: + png(filename=paste0(work.dir,"/",forecast.name,"_summary_spatial_correlations.png"), width=550, height=400) + + # create an array similar to array.cor but with colors instead of corr.values: + sp.corr.cols <- rev(c('#b2182b','#d6604d','#f4a582','#fddbc7','#d1e5f0','#92c5de','#4393c3','#2166ac')) + my.seq <- c(-1,0,0.4,0.5,0.6,0.7,0.8,0.9,1) + + array.spat.cor.colors <- array(sp.corr.cols[8],c(12,7,4)) + array.spat.cor.colors[array.spat.cor < my.seq[8]] <- sp.corr.cols[7] + array.spat.cor.colors[array.spat.cor < my.seq[7]] <- sp.corr.cols[6] + array.spat.cor.colors[array.spat.cor < my.seq[6]] <- sp.corr.cols[5] + array.spat.cor.colors[array.spat.cor < my.seq[5]] <- sp.corr.cols[4] + array.spat.cor.colors[array.spat.cor < my.seq[4]] <- sp.corr.cols[3] + array.spat.cor.colors[array.spat.cor < my.seq[3]] <- sp.corr.cols[2] + array.spat.cor.colors[array.spat.cor < my.seq[2]] <- sp.corr.cols[1] + + par(mar=c(5,4,4,4.5)) + plot(1,1, type="n", xaxt="n", yaxt="n", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + title("Spatial correlation between S4 and ERA-Interim regime anomalies", cex=1.5) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + + for(p in 1:12){ + for(l in 0:6){ + polygon(c(0.5+p-1, 0.5+p-1, 1+p-1), c(-0.5+l, 0.5+l, 0+l), col=array.spat.cor.colors[p,1+l,1]) # first triangle + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(-0.5+l, 0+l, -0.5+l), col=array.spat.cor.colors[p,1+l,2]) + polygon(c(1+p-1, 1.5+p-1, 1.5+p-1), c(l, 0.5+l, -0.5+l), col=array.spat.cor.colors[p,1+l,3]) + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(0.5+l, 0+l, 0.5+l), col=array.spat.cor.colors[p,1+l,4]) + } + } + + par(fig=c(0.875, 1, 0.14, 0.89), new=TRUE) + ColorBar2(brks = my.seq, cols = sp.corr.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + dev.off() + + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_spatial_correlations_startdate.png"), width=750, height=600) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext(text="Spatial correlation between S4 and ERA-Interim regime anomalies", cex=1.5) + + # NAO+ : + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.85, 0.98), new=TRUE) + mtext("NAO+", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.spat.cor.colors[p,1+l,1])}} + + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.42, 0.5), new=TRUE) + mtext("Blocking", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.spat.cor.colors[p,1+l,4])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.85, 0.98), new=TRUE) + mtext("NAO-", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.spat.cor.colors[p,1+l,3])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.42, 0.5), new=TRUE) + mtext("Atlantic Ridge", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.spat.cor.colors[p,1+l,2])}} + + par(fig=c(0.91, 1, 0.1, 0.9), new=TRUE) + ColorBar2(brks = my.seq, cols = sp.corr.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_spatial_correlations_target_month.png"), width=800, height=300) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext(text="Spatial correlation between S4 and ERA-Interim regime anomalies", cex=1.2) + + par(mar=c(0,0,4,0), fig=c(0.07, 0.22, 0.8, 0.98), new=TRUE) + mtext("NAO+", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0, 0.22, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.6, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.spat.cor.colors[i2,1+6-j,1])}} + + par(mar=c(0,0,4,0), fig=c(0.22, 0.44, 0.8, 0.98), new=TRUE) + mtext("NAO-", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.22, 0.44, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.6, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.spat.cor.colors[i2,1+6-j,3])}} + + par(mar=c(0,0,4,0), fig=c(0.44, 0.66, 0.8, 0.98), new=TRUE) + mtext("Blocking", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.44, 0.66, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.6, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.spat.cor.colors[i2,1+6-j,4])}} + + par(mar=c(0,0,4,0), fig=c(0.68, 0.88, 0.8, 0.98), new=TRUE) + mtext("Atlantic Ridge", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.66, 0.88, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.6, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.spat.cor.colors[i2,1+6-j,2])}} + + par(fig=c(0.91, 1, 0.1, 0.8), new=TRUE) + ColorBar2(brks = my.seq, cols = sp.corr.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + + + + + + + + # Temporal Corr summary: + png(filename=paste0(work.dir,"/",forecast.name,"_summary_temporal_correlations.png"), width=550, height=400) + + # create an array similar to array.cor but with colors instead of corr.values: + corr.cols <- rev(c('#b2182b','#d6604d','#f4a582','#fddbc7','#d1e5f0','#92c5de','#4393c3','#2166ac')) + my.seq <- c(-1,-0.75,-0.5,-0.25,0,0.25,0.5,0.75,1) + + array.cor.colors <- array(corr.cols[8],c(12,7,4)) + array.cor.colors[array.cor < 0.75] <- corr.cols[7] + array.cor.colors[array.cor < 0.5] <- corr.cols[6] + array.cor.colors[array.cor < 0.25] <- corr.cols[5] + array.cor.colors[array.cor < 0] <- corr.cols[4] + array.cor.colors[array.cor < -0.25] <- corr.cols[3] + array.cor.colors[array.cor < -0.5] <- corr.cols[2] + array.cor.colors[array.cor < -0.75] <- corr.cols[1] + + par(mar=c(5,4,4,4.5)) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Forecast time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + title("Correlation between S4 and ERA-Interim frequencies", cex=1.5) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + + for(p in 1:12){ + for(l in 0:6){ + polygon(c(0.5+p-1,0.5+p-1,1+p-1),c(-0.5+l,0.5+l,0+l), col=array.cor.colors[p,1+l,1]) # first triangle + polygon(c(0.5+p-1,1+p-1,1.5+p-1),c(-0.5+l,0+l,-0.5+l), col=array.cor.colors[p,1+l,2]) + polygon(c(1+p-1,1.5+p-1,1.5+p-1),c(l,0.5+l,-0.5+l), col=array.cor.colors[p,1+l,3]) + polygon(c(0.5+p-1,1+p-1,1.5+p-1),c(0.5+l,0+l,0.5+l), col=array.cor.colors[p,1+l,4]) + } + } + + par(fig=c(0.875, 1, 0.14, 0.89), new=TRUE) + ColorBar2(brks = seq(-1,1,0.25), cols = corr.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(seq(-1,1,0.25)), my.labels=seq(-1,1,0.25)) + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_temporal_correlations_startdate.png"), width=750, height=600) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext(text="Correlation between S4 and ERA-Interim frequencies", cex=1.5) + + # NAO+ : + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.85, 0.98), new=TRUE) + mtext("NAO+", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.cor.colors[p,1+l,1])}} + + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.42, 0.5), new=TRUE) + mtext("Blocking", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.cor.colors[p,1+l,4])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.85, 0.98), new=TRUE) + mtext("NAO-", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.cor.colors[p,1+l,3])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.42, 0.5), new=TRUE) + mtext("Atlantic Ridge", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.cor.colors[p,1+l,2])}} + + par(fig=c(0.91, 1, 0.1, 0.9), new=TRUE) + ColorBar2(brks = my.seq, cols = corr.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_temporal_correlations_target_month.png"), width=800, height=300) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext(text="Correlation between S4 and ERA-Interim frequencies", cex=1.2) + + par(mar=c(0,0,4,0), fig=c(0.07, 0.22, 0.8, 0.98), new=TRUE) + mtext("NAO+", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0, 0.22, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.6, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.cor.colors[i2,1+6-j,1])}} + + par(mar=c(0,0,4,0), fig=c(0.22, 0.44, 0.8, 0.98), new=TRUE) + mtext("NAO-", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.22, 0.44, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.6, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.cor.colors[i2,1+6-j,3])}} + + par(mar=c(0,0,4,0), fig=c(0.44, 0.66, 0.8, 0.98), new=TRUE) + mtext("Blocking", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.44, 0.66, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.6, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.cor.colors[i2,1+6-j,4])}} + + par(mar=c(0,0,4,0), fig=c(0.68, 0.88, 0.8, 0.98), new=TRUE) + mtext("Atlantic Ridge", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.66, 0.88, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.6, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.cor.colors[i2,1+6-j,2])}} + + par(fig=c(0.91, 1, 0.1, 0.8), new=TRUE) + ColorBar2(brks = my.seq, cols = corr.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + + + + + + # Frequency summary: + png(filename=paste0(work.dir,"/",forecast.name,"_summary_frequency.png"), width=550, height=400) + + # create an array similar to array.diff.freq but with colors instead of frequencies: + freq.cols <- rev(c('#d73027','#f46d43','#fdae61','#fee090','#e0f3f8','#abd9e9','#74add1','#4575b4')) + my.seq <- c(NA,20,22,24,26,28,30,32,NA) + + array.freq.colors <- array(freq.cols[8],c(12,7,4)) + array.freq.colors[array.freq < my.seq[[8]]] <- freq.cols[7] + array.freq.colors[array.freq < my.seq[[7]]] <- freq.cols[6] + array.freq.colors[array.freq < my.seq[[6]]] <- freq.cols[5] + array.freq.colors[array.freq < my.seq[[5]]] <- freq.cols[4] + array.freq.colors[array.freq < my.seq[[4]]] <- freq.cols[3] + array.freq.colors[array.freq < my.seq[[3]]] <- freq.cols[2] + array.freq.colors[array.freq < my.seq[[2]]] <- freq.cols[1] + + par(mar=c(5,4,4,4.5)) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Forecast time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + title("Mean S4 frequency (%)", cex=1.5) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + + for(p in 1:12){ + for(l in 0:6){ + polygon(c(0.5+p-1, 0.5+p-1, 1+p-1), c(-0.5+l, 0.5+l, 0+l), col=array.freq.colors[p,1+l,1]) # first triangle + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(-0.5+l, 0+l, -0.5+l), col=array.freq.colors[p,1+l,2]) + polygon(c(1+p-1, 1.5+p-1, 1.5+p-1), c(l, 0.5+l, -0.5+l), col=array.freq.colors[p,1+l,3]) + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(0.5+l, 0+l, 0.5+l), col=array.freq.colors[p,1+l,4]) + } + } + + par(fig=c(0.875, 1, 0.14, 0.89), new=TRUE) + ColorBar2(brks = my.seq, cols = freq.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_frequency_startdate.png"), width=750, height=600) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext(text="Mean S4 frequency (%)", cex=1.5) + + # NAO+ : + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.85, 0.98), new=TRUE) + mtext("NAO+", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.freq.colors[p,1+l,1])}} + + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.42, 0.5), new=TRUE) + mtext("Blocking", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.freq.colors[p,1+l,4])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.85, 0.98), new=TRUE) + mtext("NAO-", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.freq.colors[p,1+l,3])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.42, 0.5), new=TRUE) + mtext("Atlantic Ridge", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.freq.colors[p,1+l,2])}} + + par(fig=c(0.91, 1, 0.1, 0.9), new=TRUE) + ColorBar2(brks = my.seq, cols = freq.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_frequency_target_month.png"), width=800, height=300) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext(text="Mean S4 frequency (%)", cex=1.2) + + par(mar=c(0,0,4,0), fig=c(0.07, 0.22, 0.8, 0.98), new=TRUE) + mtext("NAO+", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0, 0.22, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.6, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.freq.colors[i2,1+6-j,1])}} + + par(mar=c(0,0,4,0), fig=c(0.22, 0.44, 0.8, 0.98), new=TRUE) + mtext("NAO-", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.22, 0.44, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.6, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.freq.colors[i2,1+6-j,3])}} + + par(mar=c(0,0,4,0), fig=c(0.44, 0.66, 0.8, 0.98), new=TRUE) + mtext("Blocking", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.44, 0.66, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.6, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.freq.colors[i2,1+6-j,4])}} + + par(mar=c(0,0,4,0), fig=c(0.68, 0.88, 0.8, 0.98), new=TRUE) + mtext("Atlantic Ridge", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.66, 0.88, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.6, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.freq.colors[i2,1+6-j,2])}} + + par(fig=c(0.91, 1, 0.1, 0.8), new=TRUE) + ColorBar2(brks = my.seq, cols = freq.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + par(fig=c(0.91, 1, 0.88, 0.93), new=TRUE) + mtext(side = 1, text = "%", line = 2, cex=1) + dev.off() + + + + + + + + + # Obs. frequency summary: + png(filename=paste0(work.dir,"/",forecast.name,"_summary_frequency_obs.png"), width=550, height=400) + + # create an array similar to array.diff.freq but with colors instead of frequencies: + freq.cols <- rev(c('#d73027','#f46d43','#fdae61','#fee090','#e0f3f8','#abd9e9','#74add1','#4575b4')) + my.seq <- c(NA,20,22,24,26,28,30,32,NA) + + array.freq.colors <- array(freq.cols[8],c(12,7,4)) + array.freq.colors[array.freq.obs < my.seq[[8]]] <- freq.cols[7] + array.freq.colors[array.freq.obs < my.seq[[7]]] <- freq.cols[6] + array.freq.colors[array.freq.obs < my.seq[[6]]] <- freq.cols[5] + array.freq.colors[array.freq.obs < my.seq[[5]]] <- freq.cols[4] + array.freq.colors[array.freq.obs < my.seq[[4]]] <- freq.cols[3] + array.freq.colors[array.freq.obs < my.seq[[3]]] <- freq.cols[2] + array.freq.colors[array.freq.obs < my.seq[[2]]] <- freq.cols[1] + + par(mar=c(5,4,4,4.5)) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Forecast time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + title("Mean observed frequency (%)", cex=1.5) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + + for(p in 1:12){ + for(l in 0:6){ + polygon(c(0.5+p-1, 0.5+p-1, 1+p-1), c(-0.5+l, 0.5+l, 0+l), col=array.freq.colors[p,1+l,1]) # first triangle + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(-0.5+l, 0+l, -0.5+l), col=array.freq.colors[p,1+l,2]) + polygon(c(1+p-1, 1.5+p-1, 1.5+p-1), c(l, 0.5+l, -0.5+l), col=array.freq.colors[p,1+l,3]) + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(0.5+l, 0+l, 0.5+l), col=array.freq.colors[p,1+l,4]) + } + } + + par(fig=c(0.875, 1, 0.14, 0.89), new=TRUE) + ColorBar2(brks = my.seq, cols = freq.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_frequency_obs_startdate.png"), width=750, height=600) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext(text="Mean observed frequency (%)", cex=1.5) + + # NAO+ : + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.85, 0.98), new=TRUE) + mtext("NAO+", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.freq.colors[p,1+l,1])}} + + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.42, 0.5), new=TRUE) + mtext("Blocking", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.freq.colors[p,1+l,4])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.85, 0.98), new=TRUE) + mtext("NAO-", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.freq.colors[p,1+l,3])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.42, 0.5), new=TRUE) + mtext("Atlantic Ridge", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.freq.colors[p,1+l,2])}} + + par(fig=c(0.91, 1, 0.1, 0.9), new=TRUE) + ColorBar2(brks = my.seq, cols = freq.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_frequency_obs_target_month.png"), width=800, height=300) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext(text="Mean observed frequency (%)", cex=1.2) + + par(mar=c(0,0,4,0), fig=c(0.07, 0.22, 0.8, 0.98), new=TRUE) + mtext("NAO+", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0, 0.22, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.6, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.freq.colors[i2,1+6-j,1])}} + + par(mar=c(0,0,4,0), fig=c(0.22, 0.44, 0.8, 0.98), new=TRUE) + mtext("NAO-", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.22, 0.44, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.6, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.freq.colors[i2,1+6-j,3])}} + + par(mar=c(0,0,4,0), fig=c(0.44, 0.66, 0.8, 0.98), new=TRUE) + mtext("Blocking", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.44, 0.66, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.6, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.freq.colors[i2,1+6-j,4])}} + + par(mar=c(0,0,4,0), fig=c(0.68, 0.88, 0.8, 0.98), new=TRUE) + mtext("Atlantic Ridge", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.66, 0.88, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.6, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.freq.colors[i2,1+6-j,2])}} + + par(fig=c(0.91, 1, 0.1, 0.8), new=TRUE) + ColorBar2(brks = my.seq, cols = freq.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + par(fig=c(0.91, 1, 0.88, 0.93), new=TRUE) + mtext(side = 1, text = "%", line = 2, cex=1) + dev.off() + + + + + + + + + + # Bias freq. summary: + png(filename=paste0(work.dir,"/",forecast.name,"_summary_bias_frequency.png"), width=550, height=400) + + # create an array similar to array.diff.freq but with colors instead of frequencies: + diff.freq.cols <- rev(c('#d73027','#f46d43','#fdae61','#fee090','#e0f3f8','#abd9e9','#74add1','#4575b4')) + my.seq <- c(-20,-10,-5,-2,0,2,5,10,20) + + array.diff.freq.colors <- array(diff.freq.cols[8],c(12,7,4)) + array.diff.freq.colors[array.diff.freq < my.seq[[8]]] <- diff.freq.cols[7] + array.diff.freq.colors[array.diff.freq 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.diff.freq.colors[i2,1+6-j,1])}} + + par(mar=c(0,0,4,0), fig=c(0.22, 0.44, 0.8, 0.98), new=TRUE) + mtext("NAO-", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.22, 0.44, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.6, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.diff.freq.colors[i2,1+6-j,3])}} + + par(mar=c(0,0,4,0), fig=c(0.44, 0.66, 0.8, 0.98), new=TRUE) + mtext("Blocking", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.44, 0.66, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.6, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.diff.freq.colors[i2,1+6-j,4])}} + + par(mar=c(0,0,4,0), fig=c(0.68, 0.88, 0.8, 0.98), new=TRUE) + mtext("Atlantic Ridge", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.66, 0.88, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.6, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.diff.freq.colors[i2,1+6-j,2])}} + + par(fig=c(0.91, 1, 0.1, 0.8), new=TRUE) + ColorBar2(brks = my.seq, cols = diff.freq.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + par(fig=c(0.91, 1, 0.88, 0.93), new=TRUE) + mtext(side = 1, text = "%", line = 2, cex=1) + dev.off() + + + + + + + + + # Simulated persistence summary: + png(filename=paste0(work.dir,"/",forecast.name,"_summary_persistence.png"), width=550, height=400) + + # create an array similar to array.pers but with colors instead of frequencies: + pers.cols <- rev(c('#b2182b','#d6604d','#f4a582','#fddbc7','#d1e5f0','#92c5de','#4393c3','#2166ac')) + my.seq <- c(NA, 3.25, 3.5, 3.75, 4, 4.25, 4.5, 4.75, NA) + + array.pers.colors <- array(pers.cols[8],c(12,7,4)) + array.pers.colors[array.pers < my.seq[8]] <- pers.cols[7] + array.pers.colors[array.pers < my.seq[7]] <- pers.cols[6] + array.pers.colors[array.pers < my.seq[6]] <- pers.cols[5] + array.pers.colors[array.pers < my.seq[5]] <- pers.cols[4] + array.pers.colors[array.pers < my.seq[4]] <- pers.cols[3] + array.pers.colors[array.pers < my.seq[3]] <- pers.cols[2] + array.pers.colors[array.pers < my.seq[2]] <- pers.cols[1] + + par(mar=c(5,4,4,4.5)) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Forecast time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + title("ECMWF-S4 Regime persistence (in days/month)", cex=1.5) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + + for(p in 1:12){ + for(l in 0:6){ + polygon(c(0.5+p-1,0.5+p-1,1+p-1),c(-0.5+l,0.5+l,0+l), col=array.pers.colors[p,1+l,1]) # first triangle + polygon(c(0.5+p-1,1+p-1,1.5+p-1),c(-0.5+l,0+l,-0.5+l), col=array.pers.colors[p,1+l,2]) + polygon(c(1+p-1,1.5+p-1,1.5+p-1),c(l,0.5+l,-0.5+l), col=array.pers.colors[p,1+l,3]) + polygon(c(0.5+p-1,1+p-1,1.5+p-1),c(0.5+l,0+l,0.5+l), col=array.pers.colors[p,1+l,4]) + } + } + + par(fig=c(0.875, 1, 0.14, 0.89), new=TRUE) + ColorBar2(brks = my.seq, cols = pers.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_persistence_startdate.png"), width=750, height=600) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("ECMWF-S4 Regime persistence (in days/month)", cex=1.5) + + # NAO+ : + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.85, 0.98), new=TRUE) + mtext("NAO+", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.pers.colors[p,1+l,1])}} + + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.42, 0.5), new=TRUE) + mtext("Blocking", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.pers.colors[p,1+l,4])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.85, 0.98), new=TRUE) + mtext("NAO-", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.pers.colors[p,1+l,3])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.42, 0.5), new=TRUE) + mtext("Atlantic Ridge", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.pers.colors[p,1+l,2])}} + + par(fig=c(0.91, 1, 0.1, 0.9), new=TRUE) + ColorBar2(brks = my.seq, cols = pers.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_persistence_target_month.png"), width=800, height=300) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("ECMWF-S4 Regime persistence (in days/month)", cex=1.2) + + par(mar=c(0,0,4,0), fig=c(0.07, 0.22, 0.8, 0.98), new=TRUE) + mtext("NAO+", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0, 0.22, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.6, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.pers.colors[i2,1+6-j,1])}} + + par(mar=c(0,0,4,0), fig=c(0.22, 0.44, 0.8, 0.98), new=TRUE) + mtext("NAO-", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.22, 0.44, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.6, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.pers.colors[i2,1+6-j,3])}} + + par(mar=c(0,0,4,0), fig=c(0.44, 0.66, 0.8, 0.98), new=TRUE) + mtext("Blocking", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.44, 0.66, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.6, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.pers.colors[i2,1+6-j,4])}} + + par(mar=c(0,0,4,0), fig=c(0.68, 0.88, 0.8, 0.98), new=TRUE) + mtext("Atlantic Ridge", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.66, 0.88, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.6, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.pers.colors[i2,1+6-j,2])}} + + par(fig=c(0.91, 1, 0.1, 0.8), new=TRUE) + ColorBar2(brks = my.seq, cols = pers.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + par(fig=c(0.9, 1, 0.88, 0.93), new=TRUE) + mtext(side = 1, text = "days/month", line = 2, cex=1) + dev.off() + + + + + + + + # Observed persistence summary: + png(filename=paste0(work.dir,"/",forecast.name,"_summary_persistence_obs.png"), width=550, height=400) + + # create an array similar to array.pers but with colors instead of frequencies: + pers.cols <- rev(c('#b2182b','#d6604d','#f4a582','#fddbc7','#d1e5f0','#92c5de','#4393c3','#2166ac')) + my.seq <- c(NA, 3.25, 3.5, 3.75, 4, 4.25, 4.5, 4.75, NA) + + array.pers.colors <- array(pers.cols[8],c(12,7,4)) + array.pers.colors[array.pers.obs < my.seq[8]] <- pers.cols[7] + array.pers.colors[array.pers.obs < my.seq[7]] <- pers.cols[6] + array.pers.colors[array.pers.obs < my.seq[6]] <- pers.cols[5] + array.pers.colors[array.pers.obs < my.seq[5]] <- pers.cols[4] + array.pers.colors[array.pers.obs < my.seq[4]] <- pers.cols[3] + array.pers.colors[array.pers.obs < my.seq[3]] <- pers.cols[2] + array.pers.colors[array.pers.obs < my.seq[2]] <- pers.cols[1] + + par(mar=c(5,4,4,4.5)) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Forecast time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + title("Observed regime persistence (in days/month)", cex=1.5) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + + for(p in 1:12){ + for(l in 0:6){ + polygon(c(0.5+p-1,0.5+p-1,1+p-1),c(-0.5+l,0.5+l,0+l), col=array.pers.colors[p,1+l,1]) # first triangle + polygon(c(0.5+p-1,1+p-1,1.5+p-1),c(-0.5+l,0+l,-0.5+l), col=array.pers.colors[p,1+l,2]) + polygon(c(1+p-1,1.5+p-1,1.5+p-1),c(l,0.5+l,-0.5+l), col=array.pers.colors[p,1+l,3]) + polygon(c(0.5+p-1,1+p-1,1.5+p-1),c(0.5+l,0+l,0.5+l), col=array.pers.colors[p,1+l,4]) + } + } + + par(fig=c(0.875, 1, 0.14, 0.89), new=TRUE) + ColorBar2(brks = my.seq, cols = pers.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_persistence_obs_startdate.png"), width=750, height=600) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("Observed Regime persistence (in days/month)", cex=1.5) + + # NAO+ : + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.85, 0.98), new=TRUE) + mtext("NAO+", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.pers.colors[p,1+l,1])}} + + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.42, 0.5), new=TRUE) + mtext("Blocking", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.pers.colors[p,1+l,4])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.85, 0.98), new=TRUE) + mtext("NAO-", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.pers.colors[p,1+l,3])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.42, 0.5), new=TRUE) + mtext("Atlantic Ridge", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.pers.colors[p,1+l,2])}} + + par(fig=c(0.91, 1, 0.1, 0.9), new=TRUE) + ColorBar2(brks = my.seq, cols = pers.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_persistence_obs_target_month.png"), width=800, height=300) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("Observed regime persistence (in days/month)", cex=1.2) + + par(mar=c(0,0,4,0), fig=c(0.07, 0.22, 0.8, 0.98), new=TRUE) + mtext("NAO+", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0, 0.22, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.6, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.pers.colors[i2,1+6-j,1])}} + + par(mar=c(0,0,4,0), fig=c(0.22, 0.44, 0.8, 0.98), new=TRUE) + mtext("NAO-", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.22, 0.44, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.6, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.pers.colors[i2,1+6-j,3])}} + + par(mar=c(0,0,4,0), fig=c(0.44, 0.66, 0.8, 0.98), new=TRUE) + mtext("Blocking", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.44, 0.66, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.6, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.pers.colors[i2,1+6-j,4])}} + + par(mar=c(0,0,4,0), fig=c(0.68, 0.88, 0.8, 0.98), new=TRUE) + mtext("Atlantic Ridge", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.66, 0.88, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.6, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.pers.colors[i2,1+6-j,2])}} + + par(fig=c(0.91, 1, 0.1, 0.8), new=TRUE) + ColorBar2(brks = my.seq, cols = pers.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + par(fig=c(0.9, 1, 0.88, 0.93), new=TRUE) + mtext(side = 1, text = "days/month", line = 2, cex=1) + dev.off() + + + + + + + + + + + + + # Bias persistence summary: + png(filename=paste0(work.dir,"/",forecast.name,"_summary_bias_persistence.png"), width=550, height=400) + + # create an array similar to array.pers but with colors instead of frequencies: + diff.pers.cols <- rev(c('#b2182b','#d6604d','#f4a582','#fddbc7','#d1e5f0','#92c5de','#4393c3','#2166ac')) + my.seq <- c(NA, -1.5, -1, -0.5, 0, 0.5, 1, 1.5, NA) + + array.diff.pers.colors <- array(diff.pers.cols[8],c(12,7,4)) + array.diff.pers.colors[array.diff.pers < my.seq[8]] <- diff.pers.cols[7] + array.diff.pers.colors[array.diff.pers < my.seq[7]] <- diff.pers.cols[6] + array.diff.pers.colors[array.diff.pers < my.seq[6]] <- diff.pers.cols[5] + array.diff.pers.colors[array.diff.pers < my.seq[5]] <- diff.pers.cols[4] + array.diff.pers.colors[array.diff.pers < my.seq[4]] <- diff.pers.cols[3] + array.diff.pers.colors[array.diff.pers < my.seq[3]] <- diff.pers.cols[2] + array.diff.pers.colors[array.diff.pers < my.seq[2]] <- diff.pers.cols[1] + + par(mar=c(5,4,4,4.5)) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Forecast time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + title("Diff. between S4 and ERA-Interim regime persistence (in days/month)", cex=1.5) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + + for(p in 1:12){ + for(l in 0:6){ + polygon(c(0.5+p-1,0.5+p-1,1+p-1),c(-0.5+l,0.5+l,0+l), col=array.diff.pers.colors[p,1+l,1]) # first triangle + polygon(c(0.5+p-1,1+p-1,1.5+p-1),c(-0.5+l,0+l,-0.5+l), col=array.diff.pers.colors[p,1+l,2]) + polygon(c(1+p-1,1.5+p-1,1.5+p-1),c(l,0.5+l,-0.5+l), col=array.diff.pers.colors[p,1+l,3]) + polygon(c(0.5+p-1,1+p-1,1.5+p-1),c(0.5+l,0+l,0.5+l), col=array.diff.pers.colors[p,1+l,4]) + } + } + + par(fig=c(0.875, 1, 0.14, 0.89), new=TRUE) + ColorBar2(brks = my.seq, cols = diff.pers.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_bias_persistence_startdate.png"), width=750, height=600) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("Diff. between S4 and ERA-Interim regime persistence (in days/month)", cex=1.5) + + # NAO+ : + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.85, 0.98), new=TRUE) + mtext("NAO+", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.diff.pers.colors[p,1+l,1])}} + + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.42, 0.5), new=TRUE) + mtext("Blocking", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.diff.pers.colors[p,1+l,4])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.85, 0.98), new=TRUE) + mtext("NAO-", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.diff.pers.colors[p,1+l,3])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.42, 0.5), new=TRUE) + mtext("Atlantic Ridge", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.diff.pers.colors[p,1+l,2])}} + + par(fig=c(0.91, 1, 0.1, 0.9), new=TRUE) + ColorBar2(brks = my.seq, cols = diff.pers.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_bias_persistence_target_month.png"), width=800, height=300) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("Diff. between S4 and ERA-Interim regime persistence (in days/month)", cex=1.2) + + par(mar=c(0,0,4,0), fig=c(0.07, 0.22, 0.8, 0.98), new=TRUE) + mtext("NAO+", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0, 0.22, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.6, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.diff.pers.colors[i2,1+6-j,1])}} + + par(mar=c(0,0,4,0), fig=c(0.22, 0.44, 0.8, 0.98), new=TRUE) + mtext("NAO-", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.22, 0.44, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.6, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.diff.pers.colors[i2,1+6-j,3])}} + + par(mar=c(0,0,4,0), fig=c(0.44, 0.66, 0.8, 0.98), new=TRUE) + mtext("Blocking", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.44, 0.66, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.6, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.diff.pers.colors[i2,1+6-j,4])}} + + par(mar=c(0,0,4,0), fig=c(0.68, 0.88, 0.8, 0.98), new=TRUE) + mtext("Atlantic Ridge", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.66, 0.88, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.6, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.diff.pers.colors[i2,1+6-j,2])}} + + par(fig=c(0.91, 1, 0.1, 0.8), new=TRUE) + ColorBar2(brks = my.seq, cols = diff.pers.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + par(fig=c(0.9, 1, 0.88, 0.93), new=TRUE) + mtext(side = 1, text = "days/month", line = 2, cex=1) + dev.off() + + + + + + + + + + # St.Dev.freq ratio summary: + png(filename=paste0(work.dir,"/",forecast.name,"_summary_ratio_sd_freq.png"), width=550, height=400) + + # create an array similar to array.cor but with colors instead of corr.values: + diff.sd.freq.cols <- rev(c('#b2182b','#d6604d','#f4a582','#fddbc7','#d1e5f0','#92c5de','#4393c3','#2166ac')) + my.seq <- c(0,0.7,0.8,0.9,1.0,1.1,1.2,1.3,2) + + array.diff.sd.freq.colors <- array(diff.sd.freq.cols[8],c(12,7,4)) + array.diff.sd.freq.colors[array.diff.sd.freq < my.seq[8]] <- diff.sd.freq.cols[7] + array.diff.sd.freq.colors[array.diff.sd.freq < my.seq[7]] <- diff.sd.freq.cols[6] + array.diff.sd.freq.colors[array.diff.sd.freq < my.seq[6]] <- diff.sd.freq.cols[5] + array.diff.sd.freq.colors[array.diff.sd.freq < my.seq[5]] <- diff.sd.freq.cols[4] + array.diff.sd.freq.colors[array.diff.sd.freq < my.seq[4]] <- diff.sd.freq.cols[3] + array.diff.sd.freq.colors[array.diff.sd.freq < my.seq[3]] <- diff.sd.freq.cols[2] + array.diff.sd.freq.colors[array.diff.sd.freq < my.seq[2]] <- diff.sd.freq.cols[1] + + par(mar=c(5,4,4,4.5)) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Forecast time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + title("St.Dev. ratio between sim. and obs. average frequencies", cex=1.5) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + + for(p in 1:12){ + for(l in 0:6){ + polygon(c(0.5+p-1, 0.5+p-1, 1+p-1), c(-0.5+l, 0.5+l, 0+l), col=array.diff.sd.freq.colors[p,1+l,1]) # first triangle + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(-0.5+l, 0+l, -0.5+l), col=array.diff.sd.freq.colors[p,1+l,2]) + polygon(c(1+p-1, 1.5+p-1, 1.5+p-1), c(l, 0.5+l, -0.5+l), col=array.diff.sd.freq.colors[p,1+l,3]) + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(0.5+l, 0+l, 0.5+l), col=array.diff.sd.freq.colors[p,1+l,4]) + } + } + + par(fig=c(0.875, 1, 0.14, 0.89), new=TRUE) + ColorBar2(brks = my.seq, cols = diff.sd.freq.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + dev.off() + + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_ratio_sd_frequency_startdate.png"), width=750, height=600) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("St.Dev. ratio between sim. and obs. average frequencies", cex=1.5) + + # NAO+ : + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.85, 0.98), new=TRUE) + mtext("NAO+", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.diff.sd.freq.colors[p,1+l,1])}} + + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.42, 0.5), new=TRUE) + mtext("Blocking", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.diff.sd.freq.colors[p,1+l,4])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.85, 0.98), new=TRUE) + mtext("NAO-", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.diff.sd.freq.colors[p,1+l,3])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.42, 0.5), new=TRUE) + mtext("Atlantic Ridge", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.diff.sd.freq.colors[p,1+l,2])}} + + par(fig=c(0.91, 1, 0.1, 0.9), new=TRUE) + ColorBar2(brks = my.seq, cols = diff.sd.freq.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_ratio_sd_frequency_target_month.png"), width=800, height=300) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("St.Dev. ratio between sim. and obs. average frequencies", cex=1.2) + + par(mar=c(0,0,4,0), fig=c(0.07, 0.22, 0.8, 0.98), new=TRUE) + mtext("NAO+", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0, 0.22, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.6, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.diff.sd.freq.colors[i2,1+6-j,1])}} + + par(mar=c(0,0,4,0), fig=c(0.22, 0.44, 0.8, 0.98), new=TRUE) + mtext("NAO-", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.22, 0.44, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.6, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.diff.sd.freq.colors[i2,1+6-j,3])}} + + par(mar=c(0,0,4,0), fig=c(0.44, 0.66, 0.8, 0.98), new=TRUE) + mtext("Blocking", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.44, 0.66, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.6, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.diff.sd.freq.colors[i2,1+6-j,4])}} + + par(mar=c(0,0,4,0), fig=c(0.68, 0.88, 0.8, 0.98), new=TRUE) + mtext("Atlantic Ridge", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.66, 0.88, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.6, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.diff.sd.freq.colors[i2,1+6-j,2])}} + + par(fig=c(0.91, 1, 0.1, 0.8), new=TRUE) + ColorBar2(brks = my.seq, cols = diff.sd.freq.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + + + + + + + + + + + + + # Impact summary: + png(filename=paste0(work.dir,"/",forecast.name,"_summary_impact_",var.name[var.num],".png"), width=550, height=400) + + # create an array similar to array.pers but with colors instead of frequencies: + imp.cols <- rev(c('#b2182b','#d6604d','#f4a582','#fddbc7','#d1e5f0','#92c5de','#4393c3','#2166ac')) + my.seq <- c(-1,0,0.4,0.5,0.6,0.7,0.8,0.9,1) + + array.imp.colors <- array(imp.cols[8],c(12,7,4)) + array.imp.colors[array.imp < my.seq[8]] <- imp.cols[7] + array.imp.colors[array.imp < my.seq[7]] <- imp.cols[6] + array.imp.colors[array.imp < my.seq[6]] <- imp.cols[5] + array.imp.colors[array.imp < my.seq[5]] <- imp.cols[4] + array.imp.colors[array.imp < my.seq[4]] <- imp.cols[3] + array.imp.colors[array.imp < my.seq[3]] <- imp.cols[2] + array.imp.colors[array.imp < my.seq[2]] <- imp.cols[1] + + par(mar=c(5,4,4,4.5)) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Forecast time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + title(paste0("S4 spatial correlation of impact on ",var.name[var.num]), cex=1.5) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + + for(p in 1:12){ + for(l in 0:6){ + polygon(c(0.5+p-1,0.5+p-1,1+p-1),c(-0.5+l,0.5+l,0+l), col=array.imp.colors[p,1+l,1]) # first triangle + polygon(c(0.5+p-1,1+p-1,1.5+p-1),c(-0.5+l,0+l,-0.5+l), col=array.imp.colors[p,1+l,2]) + polygon(c(1+p-1,1.5+p-1,1.5+p-1),c(l,0.5+l,-0.5+l), col=array.imp.colors[p,1+l,3]) + polygon(c(0.5+p-1,1+p-1,1.5+p-1),c(0.5+l,0+l,0.5+l), col=array.imp.colors[p,1+l,4]) + } + } + + par(fig=c(0.875, 1, 0.14, 0.89), new=TRUE) + ColorBar2(brks = my.seq, cols = imp.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_impact_",var.name[var.num],"_startdate.png"), width=750, height=600) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("S4 spatial correlation of impact on ",var.name[var.num], cex=1.5) + + # NAO+ : + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.85, 0.98), new=TRUE) + mtext("NAO+", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.imp.colors[p,1+l,1])}} + + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.42, 0.5), new=TRUE) + mtext("Blocking", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.imp.colors[p,1+l,4])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.85, 0.98), new=TRUE) + mtext("NAO-", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.imp.colors[p,1+l,3])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.42, 0.5), new=TRUE) + mtext("Atlantic Ridge", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.imp.colors[p,1+l,2])}} + + par(fig=c(0.91, 1, 0.1, 0.9), new=TRUE) + ColorBar2(brks = my.seq, cols = imp.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_impact_",var.name[var.num],"_target_month.png"), width=800, height=300) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("S4 spatial correlation of impact on ",var.name[var.num], cex=1.2) + + par(mar=c(0,0,4,0), fig=c(0.07, 0.22, 0.8, 0.98), new=TRUE) + mtext("NAO+", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0, 0.22, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.6, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.imp.colors[i2,1+6-j,1])}} + + par(mar=c(0,0,4,0), fig=c(0.22, 0.44, 0.8, 0.98), new=TRUE) + mtext("NAO-", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.22, 0.44, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.6, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.imp.colors[i2,1+6-j,3])}} + + par(mar=c(0,0,4,0), fig=c(0.44, 0.66, 0.8, 0.98), new=TRUE) + mtext("Blocking", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.44, 0.66, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.6, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.imp.colors[i2,1+6-j,4])}} + + par(mar=c(0,0,4,0), fig=c(0.68, 0.88, 0.8, 0.98), new=TRUE) + mtext("Atlantic Ridge", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.66, 0.88, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.6, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.imp.colors[i2,1+6-j,2])}} + + par(fig=c(0.91, 1, 0.1, 0.8), new=TRUE) + ColorBar2(brks = my.seq, cols = imp.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + + + + + + + # Simulated Transition probability from NAO+ to X summary: + png(filename=paste0(work.dir,"/",forecast.name,"_summary_transition_prob_NAO+.png"), width=550, height=400) + + # create an array similar to array.cor but with colors instead of corr.values: + trans.cols <- rev(c('#b2182b','#d6604d','#f4a582','#fddbc7','#d1e5f0','#92c5de','#4393c3','#2166ac')) + my.seq <- c(0,10,20,30,40,50,60,70,100) + + array.trans.sim1.colors <- array(trans.cols[8],c(12,7,4)) + array.trans.sim1.colors[array.trans.sim1 < my.seq[8]] <- trans.cols[7] + array.trans.sim1.colors[array.trans.sim1 < my.seq[7]] <- trans.cols[6] + array.trans.sim1.colors[array.trans.sim1 < my.seq[6]] <- trans.cols[5] + array.trans.sim1.colors[array.trans.sim1 < my.seq[5]] <- trans.cols[4] + array.trans.sim1.colors[array.trans.sim1 < my.seq[4]] <- trans.cols[3] + array.trans.sim1.colors[array.trans.sim1 < my.seq[3]] <- trans.cols[2] + array.trans.sim1.colors[array.trans.sim1 < my.seq[2]] <- trans.cols[1] + array.trans.sim1.colors[is.na(array.trans.sim1)] <- "white" + + par(mar=c(5,4,4,4.5)) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Forecast time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + title("% ECMWF-S4 transition from NAO+ regime", cex=1.5) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + + for(p in 1:12){ + for(l in 0:6){ + polygon(c(0.5+p-1, 0.5+p-1, 1+p-1), c(-0.5+l, 0.5+l, 0+l), col=array.trans.sim1.colors[p,1+l,1]) # first triangle + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(-0.5+l, 0+l, -0.5+l), col=array.trans.sim1.colors[p,1+l,2]) + polygon(c(1+p-1, 1.5+p-1, 1.5+p-1), c(l, 0.5+l, -0.5+l), col=array.trans.sim1.colors[p,1+l,3]) + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(0.5+l, 0+l, 0.5+l), col=array.trans.sim1.colors[p,1+l,4]) + } + } + + par(fig=c(0.875, 1, 0.14, 0.89), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_transition_prob_NAO+_startdate.png"), width=750, height=600) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("% Transition from NAO+ regime", cex=1.5) + mtext("ECMWF-S4 / NAO+ Transition Probability \nJanuary to December / 1994-2013", cex=1.5) + + # NAO+ : + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.85, 0.98), new=TRUE) + mtext("NAO+", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.sim1.colors[p,1+l,1])}} + + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.42, 0.5), new=TRUE) + mtext("Blocking", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.sim1.colors[p,1+l,4])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.85, 0.98), new=TRUE) + mtext("NAO-", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.sim1.colors[p,1+l,3])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.42, 0.5), new=TRUE) + mtext("Atlantic Ridge", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.sim1.colors[p,1+l,2])}} + + par(fig=c(0.91, 1, 0.1, 0.9), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_transition_prob_NAO+_target_month.png"), width=750, height=350) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 0.95), new=TRUE) + mtext("% Transition from NAO+ regime", cex=1.2) + + par(mar=c(0,0,4,0), fig=c(0.10, 0.28, 0.8, 0.95), new=TRUE) + mtext("NAO-", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0, 0.28, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.8, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.sim1.colors[i2,1+6-j,3])}} + + par(mar=c(0,0,4,0), fig=c(0.30, 0.58, 0.8, 0.95), new=TRUE) + mtext("Blocking", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.30, 0.58, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.8, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.sim1.colors[i2,1+6-j,4])}} + + par(mar=c(0,0,4,0), fig=c(0.60, 0.88, 0.8, 0.95), new=TRUE) + mtext("Atlantic Ridge", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.60, 0.88, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.8, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.sim1.colors[i2,1+6-j,2])}} + + par(fig=c(0.91, 1, 0.1, 0.8), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + par(fig=c(0.91, 1, 0.88, 0.93), new=TRUE) + mtext(side = 1, text = "%", line = 2, cex=1) + + dev.off() + + + + + + + # Observed Transition probability from NAO+ to X summary: + png(filename=paste0(work.dir,"/",forecast.name,"_summary_transition_prob_NAO+_obs.png"), width=550, height=400) + + # create an array similar to array.cor but with colors instead of corr.values: + trans.cols <- rev(c('#b2182b','#d6604d','#f4a582','#fddbc7','#d1e5f0','#92c5de','#4393c3','#2166ac')) + my.seq <- c(0,10,20,30,40,50,60,70,100) + + array.trans.obs1.colors <- array(trans.cols[8],c(12,7,4)) + array.trans.obs1.colors[array.trans.obs1 < my.seq[8]] <- trans.cols[7] + array.trans.obs1.colors[array.trans.obs1 < my.seq[7]] <- trans.cols[6] + array.trans.obs1.colors[array.trans.obs1 < my.seq[6]] <- trans.cols[5] + array.trans.obs1.colors[array.trans.obs1 < my.seq[5]] <- trans.cols[4] + array.trans.obs1.colors[array.trans.obs1 < my.seq[4]] <- trans.cols[3] + array.trans.obs1.colors[array.trans.obs1 < my.seq[3]] <- trans.cols[2] + array.trans.obs1.colors[array.trans.obs1 < my.seq[2]] <- trans.cols[1] + array.trans.obs1.colors[is.na(array.trans.obs1)] <- "white" + + par(mar=c(5,4,4,4.5)) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Forecast time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + title("% Observed transition from NAO+ regime", cex=1.5) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + + for(p in 1:12){ + for(l in 0:6){ + polygon(c(0.5+p-1, 0.5+p-1, 1+p-1), c(-0.5+l, 0.5+l, 0+l), col=array.trans.obs1.colors[p,1+l,1]) # first triangle + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(-0.5+l, 0+l, -0.5+l), col=array.trans.obs1.colors[p,1+l,2]) + polygon(c(1+p-1, 1.5+p-1, 1.5+p-1), c(l, 0.5+l, -0.5+l), col=array.trans.obs1.colors[p,1+l,3]) + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(0.5+l, 0+l, 0.5+l), col=array.trans.obs1.colors[p,1+l,4]) + } + } + + par(fig=c(0.875, 1, 0.14, 0.89), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_transition_prob_NAO+_obs_startdate.png"), width=750, height=600) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("% Observed transition from NAO+ regime", cex=1.5) + + # NAO+ : + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.85, 0.98), new=TRUE) + mtext("NAO+", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.obs1.colors[p,1+l,1])}} + + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.42, 0.5), new=TRUE) + mtext("Blocking", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.obs1.colors[p,1+l,4])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.85, 0.98), new=TRUE) + mtext("NAO-", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.obs1.colors[p,1+l,3])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.42, 0.5), new=TRUE) + mtext("Atlantic Ridge", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.obs1.colors[p,1+l,2])}} + + par(fig=c(0.91, 1, 0.1, 0.9), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_transition_prob_NAO+_obs_target_month.png"), width=750, height=350) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 0.95), new=TRUE) + mtext("% Observed transition from NAO+ regime", cex=1.2) + + par(mar=c(0,0,4,0), fig=c(0.10, 0.28, 0.8, 0.95), new=TRUE) + mtext("NAO-", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0, 0.28, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.8, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.obs1.colors[i2,1+6-j,3])}} + + par(mar=c(0,0,4,0), fig=c(0.30, 0.58, 0.8, 0.95), new=TRUE) + mtext("Blocking", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.30, 0.58, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.8, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.obs1.colors[i2,1+6-j,4])}} + + par(mar=c(0,0,4,0), fig=c(0.60, 0.88, 0.8, 0.95), new=TRUE) + mtext("Atlantic Ridge", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.60, 0.88, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.8, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.obs1.colors[i2,1+6-j,2])}} + + par(fig=c(0.91, 1, 0.1, 0.8), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + par(fig=c(0.91, 1, 0.88, 0.93), new=TRUE) + mtext(side = 1, text = "%", line = 2, cex=1) + + dev.off() + + + + + + + + + + + + # Simulated transition probability from NAO- to X summary: + png(filename=paste0(work.dir,"/",forecast.name,"_summary_transition_prob_NAO-.png"), width=550, height=400) + + # create an array similar to array.cor but with colors instead of corr.values: + trans.cols <- rev(c('#b2182b','#d6604d','#f4a582','#fddbc7','#d1e5f0','#92c5de','#4393c3','#2166ac')) + my.seq <- c(0,10,20,30,40,50,60,70,100) + + array.trans.sim2.colors <- array(trans.cols[8],c(12,7,4)) + array.trans.sim2.colors[array.trans.sim2 < my.seq[8]] <- trans.cols[7] + array.trans.sim2.colors[array.trans.sim2 < my.seq[7]] <- trans.cols[6] + array.trans.sim2.colors[array.trans.sim2 < my.seq[6]] <- trans.cols[5] + array.trans.sim2.colors[array.trans.sim2 < my.seq[5]] <- trans.cols[4] + array.trans.sim2.colors[array.trans.sim2 < my.seq[4]] <- trans.cols[3] + array.trans.sim2.colors[array.trans.sim2 < my.seq[3]] <- trans.cols[2] + array.trans.sim2.colors[array.trans.sim2 < my.seq[2]] <- trans.cols[1] + array.trans.sim2.colors[is.na(array.trans.sim2)] <- "white" + + par(mar=c(5,4,4,4.5)) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Forecast time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + title("% ECMWF-S4 transition from NAO- regime ", cex=1.5) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + + for(p in 1:12){ + for(l in 0:6){ + polygon(c(0.5+p-1, 0.5+p-1, 1+p-1), c(-0.5+l, 0.5+l, 0+l), col=array.trans.sim2.colors[p,1+l,1]) # first triangle + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(-0.5+l, 0+l, -0.5+l), col=array.trans.sim2.colors[p,1+l,2]) + polygon(c(1+p-1, 1.5+p-1, 1.5+p-1), c(l, 0.5+l, -0.5+l), col=array.trans.sim2.colors[p,1+l,3]) + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(0.5+l, 0+l, 0.5+l), col=array.trans.sim2.colors[p,1+l,4]) + } + } + + par(fig=c(0.875, 1, 0.14, 0.89), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_transition_prob_NAO-_startdate.png"), width=750, height=600) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("% ECMWF-S4 transition from NAO- regime", cex=1.5) + + # NAO+ : + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.85, 0.98), new=TRUE) + mtext("NAO+", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.sim2.colors[p,1+l,1])}} + + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.42, 0.5), new=TRUE) + mtext("Blocking", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.sim2.colors[p,1+l,4])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.85, 0.98), new=TRUE) + mtext("NAO-", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.sim2.colors[p,1+l,3])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.42, 0.5), new=TRUE) + mtext("Atlantic Ridge", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.sim2.colors[p,1+l,2])}} + + par(fig=c(0.91, 1, 0.1, 0.9), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_transition_prob_NAO-_target_month.png"), width=750, height=350) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 0.95), new=TRUE) + mtext("% ECMWF-S4 transition from NAO- regime", cex=1.2) + + par(mar=c(0,0,4,0), fig=c(0.1, 0.28, 0.8, 0.95), new=TRUE) + mtext("NAO+", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0, 0.28, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.8, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.sim2.colors[i2,1+6-j,1])}} + + par(mar=c(0,0,4,0), fig=c(0.30, 0.58, 0.8, 0.95), new=TRUE) + mtext("Blocking", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.30, 0.58, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.8, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.sim2.colors[i2,1+6-j,4])}} + + par(mar=c(0,0,4,0), fig=c(0.60, 0.88, 0.8, 0.95), new=TRUE) + mtext("Atlantic Ridge", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.60, 0.88, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.8, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.sim2.colors[i2,1+6-j,2])}} + + par(fig=c(0.91, 1, 0.1, 0.8), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + par(fig=c(0.91, 1, 0.88, 0.93), new=TRUE) + mtext(side = 1, text = "%", line = 2, cex=1) + + dev.off() + + + + + + + # Observed transition probability from NAO- to X summary: + png(filename=paste0(work.dir,"/",forecast.name,"_summary_transition_prob_NAO-_obs.png"), width=550, height=400) + + # create an array similar to array.cor but with colors instead of corr.values: + trans.cols <- rev(c('#b2182b','#d6604d','#f4a582','#fddbc7','#d1e5f0','#92c5de','#4393c3','#2166ac')) + my.seq <- c(0,10,20,30,40,50,60,70,100) + + array.trans.obs2.colors <- array(trans.cols[8],c(12,7,4)) + array.trans.obs2.colors[array.trans.obs2 < my.seq[8]] <- trans.cols[7] + array.trans.obs2.colors[array.trans.obs2 < my.seq[7]] <- trans.cols[6] + array.trans.obs2.colors[array.trans.obs2 < my.seq[6]] <- trans.cols[5] + array.trans.obs2.colors[array.trans.obs2 < my.seq[5]] <- trans.cols[4] + array.trans.obs2.colors[array.trans.obs2 < my.seq[4]] <- trans.cols[3] + array.trans.obs2.colors[array.trans.obs2 < my.seq[3]] <- trans.cols[2] + array.trans.obs2.colors[array.trans.obs2 < my.seq[2]] <- trans.cols[1] + array.trans.obs2.colors[is.na(array.trans.obs2)] <- "white" + + par(mar=c(5,4,4,4.5)) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Forecast time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + title("% Observed transition from NAO- regime ", cex=1.5) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + + for(p in 1:12){ + for(l in 0:6){ + polygon(c(0.5+p-1, 0.5+p-1, 1+p-1), c(-0.5+l, 0.5+l, 0+l), col=array.trans.obs2.colors[p,1+l,1]) # first triangle + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(-0.5+l, 0+l, -0.5+l), col=array.trans.obs2.colors[p,1+l,2]) + polygon(c(1+p-1, 1.5+p-1, 1.5+p-1), c(l, 0.5+l, -0.5+l), col=array.trans.obs2.colors[p,1+l,3]) + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(0.5+l, 0+l, 0.5+l), col=array.trans.obs2.colors[p,1+l,4]) + } + } + + par(fig=c(0.875, 1, 0.14, 0.89), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_transition_prob_NAO-_obs_startdate.png"), width=750, height=600) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("% Observed transition from NAO- regime", cex=1.5) + + # NAO+ : + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.85, 0.98), new=TRUE) + mtext("NAO+", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.obs2.colors[p,1+l,1])}} + + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.42, 0.5), new=TRUE) + mtext("Blocking", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.obs2.colors[p,1+l,4])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.85, 0.98), new=TRUE) + mtext("NAO-", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.obs2.colors[p,1+l,3])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.42, 0.5), new=TRUE) + mtext("Atlantic Ridge", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.obs2.colors[p,1+l,2])}} + + par(fig=c(0.91, 1, 0.1, 0.9), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_transition_prob_NAO-_obs_target_month.png"), width=750, height=350) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 0.95), new=TRUE) + mtext("% Observed transition from NAO- regime", cex=1.2) + + par(mar=c(0,0,4,0), fig=c(0.07, 0.28, 0.8, 0.95), new=TRUE) + mtext("NAO+", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0, 0.28, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.8, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.obs2.colors[i2,1+6-j,1])}} + + par(mar=c(0,0,4,0), fig=c(0.30, 0.58, 0.8, 0.95), new=TRUE) + mtext("Blocking", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.30, 0.58, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.8, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.obs2.colors[i2,1+6-j,4])}} + + par(mar=c(0,0,4,0), fig=c(0.58, 0.88, 0.8, 0.95), new=TRUE) + mtext("Atlantic Ridge", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.58, 0.88, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.8, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.obs2.colors[i2,1+6-j,2])}} + + par(fig=c(0.91, 1, 0.1, 0.8), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + par(fig=c(0.91, 1, 0.88, 0.93), new=TRUE) + mtext(side = 1, text = "%", line = 2, cex=1) + + dev.off() + + + + + + + + + + + # Simulated transition probability from blocking to X summary: + png(filename=paste0(work.dir,"/",forecast.name,"_summary_transition_prob_blocking.png"), width=550, height=400) + + # create an array similar to array.cor but with colors instead of corr.values: + trans.cols <- rev(c('#b2182b','#d6604d','#f4a582','#fddbc7','#d1e5f0','#92c5de','#4393c3','#2166ac')) + my.seq <- c(0,10,20,30,40,50,60,70,100) + + array.trans.sim3.colors <- array(trans.cols[8],c(12,7,4)) + array.trans.sim3.colors[array.trans.sim3 < my.seq[8]] <- trans.cols[7] + array.trans.sim3.colors[array.trans.sim3 < my.seq[7]] <- trans.cols[6] + array.trans.sim3.colors[array.trans.sim3 < my.seq[6]] <- trans.cols[5] + array.trans.sim3.colors[array.trans.sim3 < my.seq[5]] <- trans.cols[4] + array.trans.sim3.colors[array.trans.sim3 < my.seq[4]] <- trans.cols[3] + array.trans.sim3.colors[array.trans.sim3 < my.seq[3]] <- trans.cols[2] + array.trans.sim3.colors[array.trans.sim3 < my.seq[2]] <- trans.cols[1] + array.trans.sim3.colors[is.na(array.trans.sim3)] <- "white" + + par(mar=c(5,4,4,4.5)) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Forecast time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + title("% ECMWF-S4 transition from blocking regime", cex=1.5) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + + for(p in 1:12){ + for(l in 0:6){ + polygon(c(0.5+p-1, 0.5+p-1, 1+p-1), c(-0.5+l, 0.5+l, 0+l), col=array.trans.sim3.colors[p,1+l,1]) # first triangle + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(-0.5+l, 0+l, -0.5+l), col=array.trans.sim3.colors[p,1+l,2]) + polygon(c(1+p-1, 1.5+p-1, 1.5+p-1), c(l, 0.5+l, -0.5+l), col=array.trans.sim3.colors[p,1+l,3]) + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(0.5+l, 0+l, 0.5+l), col=array.trans.sim3.colors[p,1+l,4]) + } + } + + par(fig=c(0.875, 1, 0.14, 0.89), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_transition_prob_blocking_startdate.png"), width=750, height=600) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("% ECMWF-S4 transition from blocking regime", cex=1.5) + + # NAO+ : + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.85, 0.98), new=TRUE) + mtext("NAO+", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.sim3.colors[p,1+l,1])}} + + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.42, 0.5), new=TRUE) + mtext("Blocking", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.sim3.colors[p,1+l,4])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.85, 0.98), new=TRUE) + mtext("NAO-", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.sim3.colors[p,1+l,3])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.42, 0.5), new=TRUE) + mtext("Atlantic Ridge", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.sim3.colors[p,1+l,2])}} + + par(fig=c(0.91, 1, 0.1, 0.9), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_transition_prob_blocking_target_month.png"), width=750, height=350) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 0.95), new=TRUE) + mtext("% ECMWF-S4 transition from blocking regime", cex=1.2) + + par(mar=c(0,0,4,0), fig=c(0.10, 0.28, 0.8, 0.95), new=TRUE) + mtext("NAO+", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0, 0.28, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.8, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.sim3.colors[i2,1+6-j,1])}} + + par(mar=c(0,0,4,0), fig=c(0.30, 0.58, 0.8, 0.95), new=TRUE) + mtext("NAO-", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.30, 0.58, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.8, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.sim3.colors[i2,1+6-j,3])}} + + par(mar=c(0,0,4,0), fig=c(0.60, 0.88, 0.8, 0.95), new=TRUE) + mtext("Atlantic Ridge", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.60, 0.88, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.8, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.sim3.colors[i2,1+6-j,2])}} + + par(fig=c(0.91, 1, 0.1, 0.8), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + par(fig=c(0.91, 1, 0.88, 0.93), new=TRUE) + mtext(side = 1, text = "%", line = 2, cex=1) + + dev.off() + + + + + + + + + # Observed transition probability from blocking to X summary: + png(filename=paste0(work.dir,"/",forecast.name,"_summary_transition_prob_blocking_obs.png"), width=550, height=400) + + # create an array similar to array.cor but with colors instead of corr.values: + trans.cols <- rev(c('#b2182b','#d6604d','#f4a582','#fddbc7','#d1e5f0','#92c5de','#4393c3','#2166ac')) + my.seq <- c(0,10,20,30,40,50,60,70,100) + + array.trans.obs3.colors <- array(trans.cols[8],c(12,7,4)) + array.trans.obs3.colors[array.trans.obs3 < my.seq[8]] <- trans.cols[7] + array.trans.obs3.colors[array.trans.obs3 < my.seq[7]] <- trans.cols[6] + array.trans.obs3.colors[array.trans.obs3 < my.seq[6]] <- trans.cols[5] + array.trans.obs3.colors[array.trans.obs3 < my.seq[5]] <- trans.cols[4] + array.trans.obs3.colors[array.trans.obs3 < my.seq[4]] <- trans.cols[3] + array.trans.obs3.colors[array.trans.obs3 < my.seq[3]] <- trans.cols[2] + array.trans.obs3.colors[array.trans.obs3 < my.seq[2]] <- trans.cols[1] + array.trans.obs3.colors[is.na(array.trans.obs3)] <- "white" + + par(mar=c(5,4,4,4.5)) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Forecast time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + title("% Observed transition from blocking regime", cex=1.5) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + + for(p in 1:12){ + for(l in 0:6){ + polygon(c(0.5+p-1, 0.5+p-1, 1+p-1), c(-0.5+l, 0.5+l, 0+l), col=array.trans.obs3.colors[p,1+l,1]) # first triangle + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(-0.5+l, 0+l, -0.5+l), col=array.trans.obs3.colors[p,1+l,2]) + polygon(c(1+p-1, 1.5+p-1, 1.5+p-1), c(l, 0.5+l, -0.5+l), col=array.trans.obs3.colors[p,1+l,3]) + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(0.5+l, 0+l, 0.5+l), col=array.trans.obs3.colors[p,1+l,4]) + } + } + + par(fig=c(0.875, 1, 0.14, 0.89), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_transition_prob_blocking_obs_startdate.png"), width=750, height=600) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("% Observed transition from blocking regime", cex=1.5) + + # NAO+ : + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.85, 0.98), new=TRUE) + mtext("NAO+", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.obs3.colors[p,1+l,1])}} + + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.42, 0.5), new=TRUE) + mtext("Blocking", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.obs3.colors[p,1+l,4])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.85, 0.98), new=TRUE) + mtext("NAO-", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.obs3.colors[p,1+l,3])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.42, 0.5), new=TRUE) + mtext("Atlantic Ridge", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.obs3.colors[p,1+l,2])}} + + par(fig=c(0.91, 1, 0.1, 0.9), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_transition_prob_blocking_obs_target_month.png"), width=750, height=350) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 0.95), new=TRUE) + mtext("% Observed transition from blocking regime", cex=1.2) + + par(mar=c(0,0,4,0), fig=c(0.10, 0.28, 0.8, 0.95), new=TRUE) + mtext("NAO+", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0, 0.28, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.8, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.obs3.colors[i2,1+6-j,1])}} + + par(mar=c(0,0,4,0), fig=c(0.30, 0.58, 0.8, 0.95), new=TRUE) + mtext("NAO-", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.30, 0.58, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.8, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.obs3.colors[i2,1+6-j,3])}} + + par(mar=c(0,0,4,0), fig=c(0.60, 0.88, 0.8, 0.95), new=TRUE) + mtext("Atlantic Ridge", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.60, 0.88, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.8, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.obs3.colors[i2,1+6-j,2])}} + + par(fig=c(0.91, 1, 0.1, 0.8), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + par(fig=c(0.91, 1, 0.88, 0.93), new=TRUE) + mtext(side = 1, text = "%", line = 2, cex=1) + + dev.off() + + + + + + + + + + + + + + + + + + # Simulated transition probability from Atlantic ridge to X summary: + png(filename=paste0(work.dir,"/",forecast.name,"_summary_transition_prob_Atlantic_ridge.png"), width=550, height=400) + + # create an array similar to array.cor but with colors instead of corr.values: + trans.cols <- rev(c('#b2182b','#d6604d','#f4a582','#fddbc7','#d1e5f0','#92c5de','#4393c3','#2166ac')) + my.seq <- c(0,10,20,30,40,50,60,70,100) + + array.trans.sim4.colors <- array(trans.cols[8],c(12,7,4)) + array.trans.sim4.colors[array.trans.sim4 < my.seq[8]] <- trans.cols[7] + array.trans.sim4.colors[array.trans.sim4 < my.seq[7]] <- trans.cols[6] + array.trans.sim4.colors[array.trans.sim4 < my.seq[6]] <- trans.cols[5] + array.trans.sim4.colors[array.trans.sim4 < my.seq[5]] <- trans.cols[4] + array.trans.sim4.colors[array.trans.sim4 < my.seq[4]] <- trans.cols[3] + array.trans.sim4.colors[array.trans.sim4 < my.seq[3]] <- trans.cols[2] + array.trans.sim4.colors[array.trans.sim4 < my.seq[2]] <- trans.cols[1] + array.trans.sim4.colors[is.na(array.trans.sim4)] <- "white" + + par(mar=c(5,4,4,4.5)) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Forecast time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + title("% ECMWF-S4 transition from Atlantic ridge regime ", cex=1.5) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + + for(p in 1:12){ + for(l in 0:6){ + polygon(c(0.5+p-1, 0.5+p-1, 1+p-1), c(-0.5+l, 0.5+l, 0+l), col=array.trans.sim4.colors[p,1+l,1]) # first triangle + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(-0.5+l, 0+l, -0.5+l), col=array.trans.sim4.colors[p,1+l,2]) + polygon(c(1+p-1, 1.5+p-1, 1.5+p-1), c(l, 0.5+l, -0.5+l), col=array.trans.sim4.colors[p,1+l,3]) + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(0.5+l, 0+l, 0.5+l), col=array.trans.sim4.colors[p,1+l,4]) + } + } + + par(fig=c(0.875, 1, 0.14, 0.89), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_transition_prob_Atlantic_ridge_startdate.png"), width=750, height=600) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("% ECMWF-S4 transition from Atlantic Ridge regime", cex=1.5) + + # NAO+ : + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.85, 0.98), new=TRUE) + mtext("NAO+", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.sim4.colors[p,1+l,1])}} + + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.42, 0.5), new=TRUE) + mtext("Blocking", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.sim4.colors[p,1+l,4])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.85, 0.98), new=TRUE) + mtext("NAO-", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.sim4.colors[p,1+l,3])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.42, 0.5), new=TRUE) + mtext("Atlantic Ridge", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.sim4.colors[p,1+l,2])}} + + par(fig=c(0.91, 1, 0.1, 0.9), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_transition_prob_Atlantic_ridge_target_month.png"), width=750, height=350) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 0.95), new=TRUE) + mtext("% ECMWF-S4 transition from Atlantic Ridge regime", cex=1.2) + + par(mar=c(0,0,4,0), fig=c(0.1, 0.28, 0.8, 0.95), new=TRUE) + mtext("NAO+", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0, 0.28, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.8, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.sim4.colors[i2,1+6-j,1])}} + + par(mar=c(0,0,4,0), fig=c(0.30, 0.58, 0.8, 0.95), new=TRUE) + mtext("NAO-", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.30, 0.58, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.8, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.sim4.colors[i2,1+6-j,3])}} + + par(mar=c(0,0,4,0), fig=c(0.60, 0.88, 0.8, 0.95), new=TRUE) + mtext("Blocking", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.60, 0.88, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.8, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.sim4.colors[i2,1+6-j,4])}} + + par(fig=c(0.91, 1, 0.1, 0.8), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + par(fig=c(0.91, 1, 0.88, 0.93), new=TRUE) + mtext(side = 1, text = "%", line = 2, cex=1) + + dev.off() + + + + + + + + + + # Observed transition probability from Atlantic ridge to X summary: + png(filename=paste0(work.dir,"/",forecast.name,"_summary_transition_prob_Atlantic_ridge_obs.png"), width=550, height=400) + + # create an array obsilar to array.cor but with colors instead of corr.values: + trans.cols <- rev(c('#b2182b','#d6604d','#f4a582','#fddbc7','#d1e5f0','#92c5de','#4393c3','#2166ac')) + my.seq <- c(0,10,20,30,40,50,60,70,100) + + array.trans.obs4.colors <- array(trans.cols[8],c(12,7,4)) + array.trans.obs4.colors[array.trans.obs4 < my.seq[8]] <- trans.cols[7] + array.trans.obs4.colors[array.trans.obs4 < my.seq[7]] <- trans.cols[6] + array.trans.obs4.colors[array.trans.obs4 < my.seq[6]] <- trans.cols[5] + array.trans.obs4.colors[array.trans.obs4 < my.seq[5]] <- trans.cols[4] + array.trans.obs4.colors[array.trans.obs4 < my.seq[4]] <- trans.cols[3] + array.trans.obs4.colors[array.trans.obs4 < my.seq[3]] <- trans.cols[2] + array.trans.obs4.colors[array.trans.obs4 < my.seq[2]] <- trans.cols[1] + array.trans.obs4.colors[is.na(array.trans.obs4)] <- "white" + + par(mar=c(5,4,4,4.5)) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Forecast time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + title("% Observed transition from Atlantic ridge regime ", cex=1.5) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + + for(p in 1:12){ + for(l in 0:6){ + polygon(c(0.5+p-1, 0.5+p-1, 1+p-1), c(-0.5+l, 0.5+l, 0+l), col=array.trans.obs4.colors[p,1+l,1]) # first triangle + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(-0.5+l, 0+l, -0.5+l), col=array.trans.obs4.colors[p,1+l,2]) + polygon(c(1+p-1, 1.5+p-1, 1.5+p-1), c(l, 0.5+l, -0.5+l), col=array.trans.obs4.colors[p,1+l,3]) + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(0.5+l, 0+l, 0.5+l), col=array.trans.obs4.colors[p,1+l,4]) + } + } + + par(fig=c(0.875, 1, 0.14, 0.89), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_transition_prob_Atlantic_ridge_obs_startdate.png"), width=750, height=600) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("% Observed transition from Atlantic Ridge regime", cex=1.5) + + # NAO+ : + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.85, 0.98), new=TRUE) + mtext("NAO+", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.obs4.colors[p,1+l,1])}} + + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.42, 0.5), new=TRUE) + mtext("Blocking", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.obs4.colors[p,1+l,4])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.85, 0.98), new=TRUE) + mtext("NAO-", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.obs4.colors[p,1+l,3])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.42, 0.5), new=TRUE) + mtext("Atlantic Ridge", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.obs4.colors[p,1+l,2])}} + + par(fig=c(0.91, 1, 0.1, 0.9), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_transition_prob_Atlantic_ridge_obs_target_month.png"), width=750, height=350) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 0.95), new=TRUE) + mtext("% Observed transition from Atlantic Ridge regime", cex=1.2) + + par(mar=c(0,0,4,0), fig=c(0.1, 0.28, 0.8, 0.95), new=TRUE) + mtext("NAO+", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0, 0.28, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.8, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.obs4.colors[i2,1+6-j,1])}} + + par(mar=c(0,0,4,0), fig=c(0.30, 0.58, 0.8, 0.95), new=TRUE) + mtext("NAO-", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.30, 0.58, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.8, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.obs4.colors[i2,1+6-j,3])}} + + par(mar=c(0,0,4,0), fig=c(0.60, 0.88, 0.8, 0.95), new=TRUE) + mtext("Blocking", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.60, 0.88, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.8, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.obs4.colors[i2,1+6-j,4])}} + + par(fig=c(0.91, 1, 0.1, 0.8), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + par(fig=c(0.91, 1, 0.88, 0.93), new=TRUE) + mtext(side = 1, text = "%", line = 2, cex=1) + + dev.off() + + + + + + + + + + + + + + + # Bias of the transition probability from NAO+ to X summary: + png(filename=paste0(work.dir,"/",forecast.name,"_summary_bias_transition_prob_NAO+.png"), width=550, height=400) + + # create an array similar to array.cor but with colors instead of corr.values: + trans.cols <- rev(c('#b2182b','#d6604d','#f4a582','#fddbc7','#d1e5f0','#92c5de','#4393c3','#2166ac')) + my.seq <- c(-20,-10,-5,-2,0,2,5,10,20) + + array.trans.diff1.colors <- array(trans.cols[8],c(12,7,4)) + array.trans.diff1.colors[array.trans.diff1 < my.seq[8]] <- trans.cols[7] + array.trans.diff1.colors[array.trans.diff1 < my.seq[7]] <- trans.cols[6] + array.trans.diff1.colors[array.trans.diff1 < my.seq[6]] <- trans.cols[5] + array.trans.diff1.colors[array.trans.diff1 < my.seq[5]] <- trans.cols[4] + array.trans.diff1.colors[array.trans.diff1 < my.seq[4]] <- trans.cols[3] + array.trans.diff1.colors[array.trans.diff1 < my.seq[3]] <- trans.cols[2] + array.trans.diff1.colors[array.trans.diff1 < my.seq[2]] <- trans.cols[1] + array.trans.diff1.colors[is.na(array.trans.diff1)] <- "white" + + par(mar=c(5,4,4,4.5)) + plot(1,1, type="n", xaxt="n", yaxt="n", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + title("% Bias transition from NAO+ regime", cex=1.5) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + + for(p in 1:12){ + for(l in 0:6){ + polygon(c(0.5+p-1, 0.5+p-1, 1+p-1), c(-0.5+l, 0.5+l, 0+l), col=array.trans.diff1.colors[p,1+l,1]) # first triangle + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(-0.5+l, 0+l, -0.5+l), col=array.trans.diff1.colors[p,1+l,2]) + polygon(c(1+p-1, 1.5+p-1, 1.5+p-1), c(l, 0.5+l, -0.5+l), col=array.trans.diff1.colors[p,1+l,3]) + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(0.5+l, 0+l, 0.5+l), col=array.trans.diff1.colors[p,1+l,4]) + } + } + + par(fig=c(0.875, 1, 0.14, 0.89), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_bias_transition_prob_NAO+_startdate.png"), width=750, height=600) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("% Bias transition from NAO+ regime", cex=1.5) + + # NAO+ : + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.85, 0.98), new=TRUE) + mtext("NAO+", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.diff1.colors[p,1+l,1])}} + + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.42, 0.5), new=TRUE) + mtext("Blocking", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.diff1.colors[p,1+l,4])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.85, 0.98), new=TRUE) + mtext("NAO-", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.diff1.colors[p,1+l,3])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.42, 0.5), new=TRUE) + mtext("Atlantic Ridge", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.diff1.colors[p,1+l,2])}} + + par(fig=c(0.91, 1, 0.1, 0.9), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_bias_transition_prob_NAO+_target_month.png"), width=750, height=350) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 0.95), new=TRUE) + mtext("% Bias transition from NAO+ regime", cex=1.2) + + par(mar=c(0,0,4,0), fig=c(0.07, 0.28, 0.8, 0.95), new=TRUE) + mtext("NAO-", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0, 0.28, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.8, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.diff1.colors[i2,1+6-j,3])}} + + par(mar=c(0,0,4,0), fig=c(0.30, 0.58, 0.8, 0.95), new=TRUE) + mtext("Blocking", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.30, 0.58, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.8, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.diff1.colors[i2,1+6-j,4])}} + + par(mar=c(0,0,4,0), fig=c(0.60, 0.88, 0.8, 0.95), new=TRUE) + mtext("Atlantic Ridge", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.60, 0.88, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.8, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.diff1.colors[i2,1+6-j,2])}} + + par(fig=c(0.91, 1, 0.1, 0.8), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + par(fig=c(0.91, 1, 0.88, 0.93), new=TRUE) + mtext(side = 1, text = "%", line = 2, cex=1) + + dev.off() + + + + + + + + + + + + + + + + # Bias of the Transition probability from NAO- to X summary: + png(filename=paste0(work.dir,"/",forecast.name,"_summary_bias_transition_prob_NAO-.png"), width=550, height=400) + + # create an array similar to array.cor but with colors instead of corr.values: + trans.cols <- rev(c('#b2182b','#d6604d','#f4a582','#fddbc7','#d1e5f0','#92c5de','#4393c3','#2166ac')) + my.seq <- c(-20,-10,-5,-2,0,2,5,10,20) + + array.trans.diff2.colors <- array(trans.cols[8],c(12,7,4)) + array.trans.diff2.colors[array.trans.diff2 < my.seq[8]] <- trans.cols[7] + array.trans.diff2.colors[array.trans.diff2 < my.seq[7]] <- trans.cols[6] + array.trans.diff2.colors[array.trans.diff2 < my.seq[6]] <- trans.cols[5] + array.trans.diff2.colors[array.trans.diff2 < my.seq[5]] <- trans.cols[4] + array.trans.diff2.colors[array.trans.diff2 < my.seq[4]] <- trans.cols[3] + array.trans.diff2.colors[array.trans.diff2 < my.seq[3]] <- trans.cols[2] + array.trans.diff2.colors[array.trans.diff2 < my.seq[2]] <- trans.cols[1] + array.trans.diff2.colors[is.na(array.trans.diff2)] <- "white" + + par(mar=c(5,4,4,4.5)) + plot(1,1, type="n", xaxt="n", yaxt="n", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + title("% Bias transition from NAO- regime", cex=1.5) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + + for(p in 1:12){ + for(l in 0:6){ + polygon(c(0.5+p-1, 0.5+p-1, 1+p-1), c(-0.5+l, 0.5+l, 0+l), col=array.trans.diff2.colors[p,1+l,1]) # first triangle + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(-0.5+l, 0+l, -0.5+l), col=array.trans.diff2.colors[p,1+l,2]) + polygon(c(1+p-1, 1.5+p-1, 1.5+p-1), c(l, 0.5+l, -0.5+l), col=array.trans.diff2.colors[p,1+l,3]) + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(0.5+l, 0+l, 0.5+l), col=array.trans.diff2.colors[p,1+l,4]) + } + } + + par(fig=c(0.875, 1, 0.14, 0.89), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_bias_transition_prob_NAO-_startdate.png"), width=750, height=600) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 0.95), new=TRUE) + mtext("% Bias transition from NAO- regime", cex=1.5) + + # NAO+ : + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.85, 0.98), new=TRUE) + mtext("NAO+", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.diff2.colors[p,1+l,1])}} + + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.42, 0.5), new=TRUE) + mtext("Blocking", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.diff2.colors[p,1+l,4])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.85, 0.98), new=TRUE) + mtext("NAO-", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.diff2.colors[p,1+l,3])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.42, 0.5), new=TRUE) + mtext("Atlantic Ridge", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.diff2.colors[p,1+l,2])}} + + par(fig=c(0.91, 1, 0.1, 0.9), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_bias_transition_prob_NAO-_target_month.png"), width=750, height=350) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 0.95), new=TRUE) + mtext("% Bias transition from NAO- regime", cex=1.2) + + par(mar=c(0,0,4,0), fig=c(0.07, 0.28, 0.8, 0.95), new=TRUE) + mtext("NAO+", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0, 0.28, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.8, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.diff2.colors[i2,1+6-j,1])}} + + par(mar=c(0,0,4,0), fig=c(0.30, 0.58, 0.8, 0.95), new=TRUE) + mtext("Blocking", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.30, 0.58, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.8, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.diff2.colors[i2,1+6-j,4])}} + + par(mar=c(0,0,4,0), fig=c(0.60, 0.88, 0.8, 0.95), new=TRUE) + mtext("Atlantic Ridge", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.60, 0.88, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.8, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.diff2.colors[i2,1+6-j,2])}} + + par(fig=c(0.91, 1, 0.1, 0.8), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + par(fig=c(0.91, 1, 0.88, 0.93), new=TRUE) + mtext(side = 1, text = "%", line = 2, cex=1) + + dev.off() + + + + + + + + + + + + # Bias of the Transition probability from blocking to X summary: + png(filename=paste0(work.dir,"/",forecast.name,"_summary_bias_transition_prob_blocking.png"), width=550, height=400) + + # create an array similar to array.cor but with colors instead of corr.values: + trans.cols <- rev(c('#b2182b','#d6604d','#f4a582','#fddbc7','#d1e5f0','#92c5de','#4393c3','#2166ac')) + my.seq <- c(-20,-10,-5,-2,0,2,5,10,20) + + array.trans.diff3.colors <- array(trans.cols[8],c(12,7,4)) + array.trans.diff3.colors[array.trans.diff3 < my.seq[8]] <- trans.cols[7] + array.trans.diff3.colors[array.trans.diff3 < my.seq[7]] <- trans.cols[6] + array.trans.diff3.colors[array.trans.diff3 < my.seq[6]] <- trans.cols[5] + array.trans.diff3.colors[array.trans.diff3 < my.seq[5]] <- trans.cols[4] + array.trans.diff3.colors[array.trans.diff3 < my.seq[4]] <- trans.cols[3] + array.trans.diff3.colors[array.trans.diff3 < my.seq[3]] <- trans.cols[2] + array.trans.diff3.colors[array.trans.diff3 < my.seq[2]] <- trans.cols[1] + array.trans.diff3.colors[is.na(array.trans.diff3)] <- "white" + + par(mar=c(5,4,4,4.5)) + plot(1,1, type="n", xaxt="n", yaxt="n", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + title("% Bias transition from blocking regime", cex=1.5) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + + for(p in 1:12){ + for(l in 0:6){ + polygon(c(0.5+p-1, 0.5+p-1, 1+p-1), c(-0.5+l, 0.5+l, 0+l), col=array.trans.diff3.colors[p,1+l,1]) # first triangle + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(-0.5+l, 0+l, -0.5+l), col=array.trans.diff3.colors[p,1+l,2]) + polygon(c(1+p-1, 1.5+p-1, 1.5+p-1), c(l, 0.5+l, -0.5+l), col=array.trans.diff3.colors[p,1+l,3]) + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(0.5+l, 0+l, 0.5+l), col=array.trans.diff3.colors[p,1+l,4]) + } + } + + par(fig=c(0.875, 1, 0.14, 0.89), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_bias_transition_prob_blocking_startdate.png"), width=750, height=600) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("% Bias transition from blocking regime", cex=1.5) + + # NAO+ : + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.85, 0.98), new=TRUE) + mtext("NAO+", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.diff3.colors[p,1+l,1])}} + + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.42, 0.5), new=TRUE) + mtext("Blocking", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.diff3.colors[p,1+l,4])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.85, 0.98), new=TRUE) + mtext("NAO-", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.diff3.colors[p,1+l,3])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.42, 0.5), new=TRUE) + mtext("Atlantic Ridge", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.diff3.colors[p,1+l,2])}} + + par(fig=c(0.91, 1, 0.1, 0.9), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_bias_transition_prob_blocking_target_month.png"), width=750, height=350) + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 0.95), new=TRUE) + mtext("% Bias transition from Blocking regime", cex=1.2) + + par(mar=c(0,0,4,0), fig=c(0.07, 0.28, 0.8, 0.95), new=TRUE) + mtext("NAO+", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0, 0.28, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.8, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.diff3.colors[i2,1+6-j,1])}} + + par(mar=c(0,0,4,0), fig=c(0.30, 0.58, 0.8, 0.95), new=TRUE) + mtext("NAO-", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.30, 0.58, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.8, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.diff3.colors[i2,1+6-j,3])}} + + par(mar=c(0,0,4,0), fig=c(0.60, 0.88, 0.8, 0.95), new=TRUE) + mtext("Atlantic Ridge", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.60, 0.88, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.8, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.diff3.colors[i2,1+6-j,2])}} + + par(fig=c(0.91, 1, 0.1, 0.8), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + par(fig=c(0.91, 1, 0.88, 0.93), new=TRUE) + mtext(side = 1, text = "%", line = 2, cex=1) + + dev.off() + + + + + + + + + + + # Bias of the Transition probability from Atlantic Ridge to X summary: + png(filename=paste0(work.dir,"/",forecast.name,"_summary_bias_transition_prob_Atlantic_ridge.png"), width=550, height=400) + + # create an array similar to array.cor but with colors instead of corr.values: + trans.cols <- rev(c('#b2182b','#d6604d','#f4a582','#fddbc7','#d1e5f0','#92c5de','#4393c3','#2166ac')) + my.seq <- c(-20,-10,-5,-2,0,2,5,10,20) + + array.trans.diff4.colors <- array(trans.cols[8],c(12,7,4)) + array.trans.diff4.colors[array.trans.diff4 < my.seq[8]] <- trans.cols[7] + array.trans.diff4.colors[array.trans.diff4 < my.seq[7]] <- trans.cols[6] + array.trans.diff4.colors[array.trans.diff4 < my.seq[6]] <- trans.cols[5] + array.trans.diff4.colors[array.trans.diff4 < my.seq[5]] <- trans.cols[4] + array.trans.diff4.colors[array.trans.diff4 < my.seq[4]] <- trans.cols[3] + array.trans.diff4.colors[array.trans.diff4 < my.seq[3]] <- trans.cols[2] + array.trans.diff4.colors[array.trans.diff4 < my.seq[2]] <- trans.cols[1] + array.trans.diff4.colors[is.na(array.trans.diff4)] <- "white" + + par(mar=c(5,4,4,4.5)) + plot(1,1, type="n", xaxt="n", yaxt="n", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + title("% Bias transition from Atlantic Ridge regime", cex=1.5) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + + for(p in 1:12){ + for(l in 0:6){ + polygon(c(0.5+p-1, 0.5+p-1, 1+p-1), c(-0.5+l, 0.5+l, 0+l), col=array.trans.diff4.colors[p,1+l,1]) # first triangle + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(-0.5+l, 0+l, -0.5+l), col=array.trans.diff4.colors[p,1+l,2]) + polygon(c(1+p-1, 1.5+p-1, 1.5+p-1), c(l, 0.5+l, -0.5+l), col=array.trans.diff4.colors[p,1+l,3]) + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(0.5+l, 0+l, 0.5+l), col=array.trans.diff4.colors[p,1+l,4]) + } + } + + par(fig=c(0.875, 1, 0.14, 0.89), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_bias_transition_prob_Atlantic_ridge_startdate.png"), width=750, height=600) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("% Bias transition from Atlantic_Ridge_regime", cex=1.5) + + # NAO+ : + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.85, 0.98), new=TRUE) + mtext("NAO+", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.diff4.colors[p,1+l,1])}} + + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.42, 0.5), new=TRUE) + mtext("Blocking", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.diff4.colors[p,1+l,4])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.85, 0.98), new=TRUE) + mtext("NAO-", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.diff4.colors[p,1+l,3])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.42, 0.5), new=TRUE) + mtext("Atlantic Ridge", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.diff4.colors[p,1+l,2])}} + + par(fig=c(0.91, 1, 0.1, 0.9), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_bias_transition_prob_Atlantic_ridge_target_month.png"), width=750, height=350) + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 0.95), new=TRUE) + mtext("% Bias transition from Atlantic Ridge regime", cex=1.2) + + par(mar=c(0,0,4,0), fig=c(0.07, 0.28, 0.8, 0.95), new=TRUE) + mtext("NAO+", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0, 0.28, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.8, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.diff4.colors[i2,1+6-j,1])}} + + par(mar=c(0,0,4,0), fig=c(0.30, 0.58, 0.8, 0.95), new=TRUE) + mtext("NAO-", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.30, 0.58, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.8, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.diff4.colors[i2,1+6-j,3])}} + + par(mar=c(0,0,4,0), fig=c(0.60, 0.88, 0.8, 0.95), new=TRUE) + mtext("Blocking", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.60, 0.88, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.8, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.diff4.colors[i2,1+6-j,4])}} + + par(fig=c(0.91, 1, 0.1, 0.8), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + par(fig=c(0.91, 1, 0.88, 0.93), new=TRUE) + mtext(side = 1, text = "%", line = 2, cex=1) + + dev.off() + + ## # FairRPSS summary: + ## png(filename=paste0(work.dir,"/",forecast.name,"_summary_rpss.png"), width=550, height=400) + + ## # create an array similar to array.diff.freq but with colors instead of frequencies: + ## freq.cols <- rev(c('#b2182b','#d6604d','#f4a582','#fddbc7','#d1e5f0','#92c5de','#4393c3','#2166ac')) + + ## array.freq.colors <- array(freq.cols[8],c(12,7,4)) + ## array.freq.colors[array.diff.freq < 10] <- freq.cols[7] + ## array.freq.colors[array.diff.freq < 5] <- freq.cols[6] + ## array.freq.colors[array.diff.freq < 2] <- freq.cols[5] + ## array.freq.colors[array.diff.freq < 0] <- freq.cols[4] + ## array.freq.colors[array.diff.freq < -2] <- freq.cols[3] + ## array.freq.colors[array.diff.freq < -5] <- freq.cols[2] + ## array.freq.colors[array.diff.freq < -10] <- freq.cols[1] + + ## par(mar=c(5,4,4,4.5)) + ## plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Forecast time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + ## title("Frequency difference (%) between S4 and ERA-Interim", cex=1.5) + ## mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + ## mtext(side = 2, text = "Forecast time", line = 2.5, cex=1.5) + ## axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + ## axis(2, at=seq(0,6), las=2, cex.axis=1.5) + + ## for(p in 1:12){ + ## for(l in 0:6){ + ## polygon(c(0.5+p-1,0.5+p-1,1+p-1),c(-0.5+l,0.5+l,0+l), col=array.freq.colors[p,1+l,1]) # first triangle + ## polygon(c(0.5+p-1,1+p-1,1.5+p-1),c(-0.5+l,0+l,-0.5+l), col=array.freq.colors[p,1+l,2]) + ## polygon(c(1+p-1,1.5+p-1,1.5+p-1),c(l,0.5+l,-0.5+l), col=array.freq.colors[p,1+l,3]) + ## polygon(c(0.5+p-1,1+p-1,1.5+p-1),c(0.5+l,0+l,0.5+l), col=array.freq.colors[p,1+l,4]) + ## } + ## } + + ## par(fig=c(0.875, 1, 0.14, 0.89), new=TRUE) + ## ColorBar2(brks = c(-20,-10,-5,-2,0,2,5,10,20), cols = freq.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(c(-20,-10,-5,-2,0,2,5,10,20)), my.labels=c(-20,-10,-5,-2,0,2,5,10,20)) + ## dev.off() + +} # close if on composition == "summary" + + + + +# impact map of the stronger WR: +if(composition == "impact.highest" && fields.name == rean.name){ + + var.num <- 2 # choose a variable (1:sfcWind, 2:tas) + index <- 1 # chose an index: 1: most influent, 2: most influent positively, 3: most influent negatively + + if ( index == 1 ) png(filename=paste0(rean.dir,"/",rean.name,"_",var.name.full[var.num],"_most_influent.png"), width=550, height=650) + if ( index == 2 ) png(filename=paste0(rean.dir,"/",rean.name,"_",var.name.full[var.num],"_most_influent_positively.png"), width=550, height=650) + if ( index == 3 ) png(filename=paste0(rean.dir,"/",rean.name,"_",var.name.full[var.num],"_most_influent_negatively.png"), width=550, height=650) + + plot.new() + #par(mfrow=c(4,3)) + #col.regimes <- c('#7b3294','#c2a5cf','#a6dba0','#008837') + col.regimes <- c('#e66101','#fdb863','#b2abd2','#5e3c99') + + for(p in 13:16){ # or 1:12 for monthly maps + load(file=paste0(rean.dir,"/",rean.name,"_",my.period[p],"_",var.name[var.num],".RData")) + load(paste0(rean.dir,"/",rean.name,"_",my.period[p],"_ClusterNames.RData")) # they should not depend on var.name!!! + + # position of long values of Europe only (without the Atlantic Sea and America): + #if(fields.name == "ERA-Interim") EU <- c(1:which(lon >= lon.max)[1], (length(lon)-30):length(lon)) # restrict area to continental europe only + lon.max = 45 + EU <- c(1:which(lon >= lon.max)[1], (which(lon >= 337)[1]:length(lon))) # restrict area to continental europe only + + cluster1 <- which(orden == cluster1.name.period[p]) # in future it will be == cluster1.name + cluster2 <- which(orden == cluster2.name.period[p]) + cluster3 <- which(orden == cluster3.name.period[p]) + cluster4 <- which(orden == cluster4.name.period[p]) + + assign(paste0("imp",cluster1), varPeriodAnom1mean) + assign(paste0("imp",cluster2), varPeriodAnom2mean) + assign(paste0("imp",cluster3), varPeriodAnom3mean) + assign(paste0("imp",cluster4), varPeriodAnom4mean) + + #most influent WT: + if (index == 1) { + imp.all <- imp1 + for(i in 1:dim(imp1)[1]){ + for(j in 1:dim(imp1)[2]){ + if(abs(imp1[i,j]) >= max(abs(imp1[i,j]), abs(imp2[i,j]), abs(imp3[i,j]), abs(imp4[i,j]))) imp.all[i,j] <- 1.5 + if(abs(imp2[i,j]) >= max(abs(imp1[i,j]), abs(imp2[i,j]), abs(imp3[i,j]), abs(imp4[i,j]))) imp.all[i,j] <- 2.5 + if(abs(imp3[i,j]) >= max(abs(imp1[i,j]), abs(imp2[i,j]), abs(imp3[i,j]), abs(imp4[i,j]))) imp.all[i,j] <- 3.5 + if(abs(imp4[i,j]) >= max(abs(imp1[i,j]), abs(imp2[i,j]), abs(imp3[i,j]), abs(imp4[i,j]))) imp.all[i,j] <- 4.5 + } + } + } + + #most influent WT with positive impact: + if (index == 2) { + imp.all <- imp1 + for(i in 1:dim(imp1)[1]){ + for(j in 1:dim(imp1)[2]){ + if((imp1[i,j]) >= max((imp1[i,j]), (imp2[i,j]), (imp3[i,j]), (imp4[i,j]))) imp.all[i,j] <- 1.5 + if((imp2[i,j]) >= max((imp1[i,j]), (imp2[i,j]), (imp3[i,j]), (imp4[i,j]))) imp.all[i,j] <- 2.5 + if((imp3[i,j]) >= max((imp1[i,j]), (imp2[i,j]), (imp3[i,j]), (imp4[i,j]))) imp.all[i,j] <- 3.5 + if((imp4[i,j]) >= max((imp1[i,j]), (imp2[i,j]), (imp3[i,j]), (imp4[i,j]))) imp.all[i,j] <- 4.5 + } + } + + } + + # most influent WT with negative impact: + if (index == 3) { + imp.all <- imp1 + for(i in 1:dim(imp1)[1]){ + for(j in 1:dim(imp1)[2]){ + if((imp1[i,j]) <= min((imp1[i,j]), (imp2[i,j]), (imp3[i,j]), (imp4[i,j]))) imp.all[i,j] <- 1.5 + if((imp2[i,j]) <= min((imp1[i,j]), (imp2[i,j]), (imp3[i,j]), (imp4[i,j]))) imp.all[i,j] <- 2.5 + if((imp3[i,j]) <= min((imp1[i,j]), (imp2[i,j]), (imp3[i,j]), (imp4[i,j]))) imp.all[i,j] <- 3.5 + if((imp4[i,j]) <= min((imp1[i,j]), (imp2[i,j]), (imp3[i,j]), (imp4[i,j]))) imp.all[i,j] <- 4.5 + } + } + } + + if(p<=3) yMod <- 0.7 + if(p>=4 && p<=6) yMod <- 0.5 + if(p>=7 && p<=9) yMod <- 0.3 + if(p>=10) yMod <- 0.1 + if(p==13 || p==14) yMod <- 0.52 + if(p==15 || p==16) yMod <- 0.1 + + if(p==1 || p==4 || p==7 || p==10) xMod <- 0.05 + if(p==2 || p==5 || p==8 || p==11) xMod <- 0.35 + if(p==3 || p==6 || p==9 || p==12) xMod <- 0.65 + if(p==13 || p==15) xMod <- 0.05 + if(p==14 || p==16) xMod <- 0.50 + + # impact map: + if(p <= 12) par(fig=c(xMod, xMod + 0.3, yMod, yMod + 0.17), new=TRUE) + if(p > 12) par(fig=c(xMod, xMod + 0.45, yMod, yMod + 0.38), new=TRUE) + PlotEquiMap(imp.all[,EU], lon[EU], lat, filled.continents = FALSE, brks=1:5, cols=col.regimes, axelab=F, drawleg=F) + + # title map: + if(p <= 12) par(fig=c(xMod, xMod + 0.3, yMod + 0.162, yMod + 0.172), new=TRUE) + if(p > 12) par(fig=c(xMod + 0.07, xMod + 0.07 + 0.3, yMod +0.21 + 0.166, yMod +0.21 + 0.176), new=TRUE) + mtext(my.period[p], font=2, cex=1.5) + + } # close 'p' on 'period' + + par(fig=c(0.1,0.9, 0.03, 0.11), new=TRUE) + ColorBar2(1:5, cols=col.regimes, vert=FALSE, my.ticks=1:4, draw.ticks=FALSE, my.labels=orden) + + par(fig=c(0.1,0.9, 0.92, 0.97), new=TRUE) + if( index == 1 ) mtext(paste0("Most influent WR on ",var.name[var.num]), font=2, cex=1.5) + if( index == 2 ) mtext(paste0("Most positively influent WR on ",var.name[var.num]), font=2, cex=1.5) + if( index == 3 ) mtext(paste0("Most negatively influent WR on ",var.name[var.num]), font=2, cex=1.5) + + dev.off() + +} # close if on composition == "impact.highest" + + + + + + + + + + + + + +if(monthly_anomalies) { + # Compute climatology and anomalies (with LOESS filter) for sfcWind data, and draw monthly anomalies, if you want to compare the mean daily wind speed anomalies reconstructed by the weather regimes classification with those observed by the reanalysis: + + load(paste0(rean.dir,"/",rean.name,"_",my.period[month.chosen[1]],"_psl.RData")) # only to load year.start and year.end + + sfcWind366 <- Load(var = "sfcWind", exp = NULL, obs = list(list(path=rean.data)), paste0(year.start:year.end,'0101'), storefreq = 'daily', leadtimemax = 366, output = 'lonlat', latmin = lat.min, latmax = lat.max, lonmin = lon.min, lonmax = lon.max, nprocs=8)$obs + + sfcWind <- sfcWind366[,,,1:365,,] + + for(y in year.start:year.end){ + y2 <- y - year.start + 1 + if(n.days.in.a.year(y) == 366) sfcWind[y2,60:365,,] <- sfcWind366[1,1,y2,61:366,,] # take the march to december period removing the 29th of February + } + + rm(sfcWind366) + gc() + + # LOESS anomalies: + sfcWindClimDaily <- apply(sfcWind, c(2,3,4), mean, na.rm=T) + + sfcWindClimLoess <- sfcWindClimDaily + for(i in n.pos.lat){ + for(j in n.pos.lon){ + my.data <- data.frame(ens.mean=sfcWindClimDaily[,i,j], day=1:365) + my.loess <- loess(ens.mean ~ day, my.data, span=0.35) + sfcWindClimLoess[,i,j] <- predict(my.loess) + } + } + + rm(sfcWindClimDaily) + gc() + + sfcWindClim2 <- InsertDim(sfcWindClimLoess, 1, n.years) + sfcWindAnom <- sfcWind - sfcWindClim2 + + rm(sfcWindClimLoess) + gc() + + # same as above but for computing climatology and anomalies (with LOESS filter) for psl data, and draw monthly slp anomalies: + slp366 <- Load(var = psl, exp = NULL, obs = list(list(path=rean.data)), paste0(year.start:year.end,'0101'), storefreq = 'daily', leadtimemax = 366, output = 'lonlat', latmin = lat.min, latmax = lat.max, lonmin = lon.min, lonmax = lon.max, nprocs=8)$obs + + slp <- slp366[,,,1:365,,] + + for(y in year.start:year.end){ + y2 <- y - year.start + 1 + if(n.days.in.a.year(y) == 366) slp[y2,60:365,,] <- slp366[1,1,y2,61:366,,] # take the march to december period removing the 29th of February + } + + rm(slp366) + gc() + + slpClimDaily <- apply(slp, c(2,3,4), mean, na.rm=T) + + slpClimLoess <- slpClimDaily + for(i in n.pos.lat){ + for(j in n.pos.lon){ + my.data <- data.frame(ens.mean=slpClimDaily[,i,j], day=1:365) + my.loess <- loess(ens.mean ~ day, my.data, span=0.35) + slpClimLoess[,i,j] <- predict(my.loess) + } + } + + rm(slpClimDaily) + gc() + + slpClim2 <- InsertDim(slpClimLoess, 1, n.years) + slpAnom <- slp - slpClim2 + + rm(slpClimLoess) + gc() + + if(psl == "psl") slpAnom <- slpAnom/100 # convert MSLP in Pascal to MSLP in hPa + + EU <- c(1:which(lon >= lon.max)[1], (which(lon >= 337)[1]:length(lon))) # restrict area to continental europe only + + # color scale for impact maps: + my.brks.var <- c(-20,seq(-3,3,0.5),20) + my.cols.var <- colorRampPalette(rev(brewer.pal(11,"RdBu")))(length(my.brks.var)-1) # blue--white--red colors + + # same but for slp: + my.brks.var2 <- c(-100,seq(-21,-1,2),0,seq(1,21,2),100) + my.cols.var2 <- colorRampPalette(rev(brewer.pal(11,"RdBu")))(length(my.brks.var2)-1) # blue--red colors + my.cols.var2[floor(length(my.cols.var2)/2)] <- my.cols.var2[floor(length(my.cols.var2)/2)+1] <- "white" + + orden <- c("NAO+","NAO-","Blocking","Atl.Ridge") + + # save all monthly anomaly maps: + for(year.test in year.chosen){ + for(month.test in month.chosen){ + #year.test <- 2016; month.test <- 10 # for the debug + + pos.year.test <- year.test - year.start + 1 + + # wind anomaly for the chosen year and month: + sfcWindAnomPeriod <- sfcWindAnom[pos.year.test, pos.period(1,month.test),,] + + sfcWindAnomPeriodMean <- apply(sfcWindAnomPeriod, c(2,3), mean, na.rm=TRUE) + + # psl anomaly for the chosen year and month: + slpAnomPeriod <- slpAnom[pos.year.test,pos.period(1,month.test),,] + + slpAnomPeriodMean <- apply(slpAnomPeriod, c(2,3), mean, na.rm=TRUE) + + fileoutput <- paste0(rean.dir,"/",rean.name,"_",my.period[month.test],"_monthly_anomaly.png") + + png(filename=fileoutput,width=2800,height=1000) + + par(fig=c(0, 0.36, 0.08, 0.98), new=TRUE) + #PlotEquiMap(rescale(sfcWindAnomPeriodMean[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents=FALSE, brks=my.brks.var, cols=my.cols.var[2:(length(my.cols.var)-1)], title_scale=1.2, intxlon=10, intylat=10, drawleg=F) #, cex.lab=1.5) + + PlotEquiMap(sfcWindAnomPeriodMean[,EU], lon[EU], lat, filled.continents=FALSE, brks=my.brks.var, cols=my.cols.var, title_scale=1.2, intxlon=10, intylat=10, drawleg=F) #, cex.lab=1.5) + + #my.subset2 <- match(values.to.plot2, my.brks.var) + #legend2.cex <- 1.8 + + par(fig=c(0.03, 0.36, 0.015, 0.09), new=TRUE) + #ColorBar(my.brks.var, cols=my.cols.var, vert=FALSE, label_scale=1.8, triangle_ends=c(FALSE,FALSE)) #, subset=my.subset2) + #ColorBar(my.brks.var, cols=my.cols.var[2:(length(my.cols.var)-1)], vert=FALSE, label_scale=1.8, var_limits=c(-10,10), bar_limits=c(my.brks.var[1],my.brks.var[l(my.brks.var)]), col_inf=my.cols.var[1], col_sup=my.cols.var[length(my.cols.var)]) + + ColorBar(brks=my.brks.var[2:(l(my.brks.var)-1)], cols=my.cols.var[2:(l(my.cols.var)-1)], vert=FALSE, label_scale=1.8, bar_limits=c(my.brks.var[2],my.brks.var[l(my.brks.var)-1]), col_inf=my.cols.var[1], col_sup=my.cols.var[l(my.cols.var)], subsample=1) + + par(fig=c(0.34, 0.37, 0, 0.028), new=TRUE) + mtext("m/s", cex=1.8) + + par(fig=c(0.37, 1, 0.1, 0.98), new=TRUE) + + PlotEquiMap2(slpAnomPeriodMean, lon, lat, filled.continents=FALSE, continents.col="black", brks=my.brks.var2, brks2=my.brks.var2, cols=my.cols.var2, intxlon=10, intylat=10, drawleg=F, contours=t(slpAnomPeriodMean), contours.lty="F1FF1F", cex.lab=1) + + # you could almost use the normal PlotEquiMap funcion below, it only needs to set the black line for anomaly=0 and decrease the size of the contour labels: + # PlotEquiMap(rescale(slpAnomPeriodMean[,EU], my.brks.var2[1], tail(my.brks.var2,1)), lon[EU], lat, filled.continents=FALSE, brks=my.brks.var2, brks2=my.brks.var2, cols=my.cols.var2, intxlon=10, intylat=10, drawleg=F, contours=(slpAnomPeriodMean[,EU]), contour_lty="F1FF1F", contour_label_scale=10, title_scale=1.2) #, cex.lab=1.5) + + ##my.subset2 <- match(values.to.plot2, my.brks.var) + #legend2.cex <- 1.8 + + par(fig=c(0.37, 1, 0, 0.09), new=TRUE) + #ColorBar2(my.brks.var2, cols=my.cols.var2, vert=FALSE, my.ticks=-0.5 + 1:length(my.brks.var2), my.labels=my.brks.var2) #, subsampleg=1, label_scale=1.8) #, subset=my.subset2) + ColorBar(my.brks.var2[2:(l(my.brks.var2)-1)], cols=my.cols.var2[2:(l(my.cols.var2)-1)], vert=FALSE, label_scale=1.8, bar_limits=c(my.brks.var2[2],my.brks.var2[l(my.brks.var2)-1]), col_inf=my.cols.var2[1], col_sup=my.cols.var2[l(my.cols.var2)], subsample=1) #my.ticks=-0.5 + 1:length(my.brks.var2), my.labels=my.brks.var2) subset=my.subset2) + + par(fig=c(0.96, 0.99, 0, 0.027), new=TRUE) + mtext("hPa", cex=1.8) + + #par(fig=c(0.74, 0.76, 0, 0.027), new=TRUE) + #mtext("0", cex=1.8) + + dev.off() + + # same image formatted for the catalogue: + fileoutput2 <- paste0(rean.dir,"/",rean.name,"_",my.period[month.test],"_monthly_anomaly_fig2cat.png") + + system(paste0("~/scripts/fig2catalog.sh -s 0.8 -t '",rean.name," / 10m wind speed and sea level pressure / Monthly anomalies \n",month.name[month.test]," / ",year.test,"' -c 'Region: North Atlantic (27°N-81°N, 85.5°W-45°E)\nReference dataset: ",rean.name," reanalysis' ", fileoutput," ", fileoutput2)) + + # mean wind speed and slp anomaly only during days of the chosen year: + load(paste0(rean.dir,"/",rean.name,"_",my.period[month.test],"_psl.RData")) # load cluster.sequence + + # arrange the regime sequence in an array, to separate months from years: + cluster2D <- array(cluster.sequence, c(n.days.in.a.period(month.test,1),n.years)) + #cluster2D <- array(my.cluster$cluster, c(n.days.in.a.period(p,1),n.years)) # only for NCEP + + cluster.test <- cluster2D[,pos.year.test] + + fre1.days <- length(which(cluster.test == 1)) + fre2.days <- length(which(cluster.test == 2)) + fre3.days <- length(which(cluster.test == 3)) + fre4.days <- length(which(cluster.test == 4)) + + # wind anomaly for the chosen year and month and cluster: + sfcWind1 <- sfcWindAnomPeriod[which(cluster.test == 1),,,drop=FALSE] + sfcWind2 <- sfcWindAnomPeriod[which(cluster.test == 2),,,drop=FALSE] + sfcWind3 <- sfcWindAnomPeriod[which(cluster.test == 3),,,drop=FALSE] + sfcWind4 <- sfcWindAnomPeriod[which(cluster.test == 4),,,drop=FALSE] + + # in case a regime has NO days in that month, we must set it to NA: + if(length(which(cluster.test == 1)) == 0 ) sfcWind1 <- array(NA, c(1,dim(sfcWindAnomPeriod)[2:3])) + if(length(which(cluster.test == 2)) == 0 ) sfcWind2 <- array(NA, c(1,dim(sfcWindAnomPeriod)[2:3])) + if(length(which(cluster.test == 3)) == 0 ) sfcWind3 <- array(NA, c(1,dim(sfcWindAnomPeriod)[2:3])) + if(length(which(cluster.test == 4)) == 0 ) sfcWind4 <- array(NA, c(1,dim(sfcWindAnomPeriod)[2:3])) + + sfcWindMean1 <- apply(sfcWind1, c(2,3), mean, na.rm=FALSE) + sfcWindMean2 <- apply(sfcWind2, c(2,3), mean, na.rm=FALSE) + sfcWindMean3 <- apply(sfcWind3, c(2,3), mean, na.rm=FALSE) + sfcWindMean4 <- apply(sfcWind4, c(2,3), mean, na.rm=FALSE) + + # load regime names: + load(paste0(rean.dir,"/",rean.name,"_", my.period[p],"_","ClusterNames",".RData")) + + ## add strip with daily sequence of WRs: + + mod.name1 <- substr(cluster1.name, nchar(cluster1.name), nchar(cluster1.name)) + mod.name2 <- substr(cluster2.name, nchar(cluster2.name), nchar(cluster2.name)) + mod.name3 <- substr(cluster3.name, nchar(cluster3.name), nchar(cluster3.name)) + mod.name4 <- substr(cluster4.name, nchar(cluster4.name), nchar(cluster4.name)) + + cluster1.name.short <- substr(cluster1.name,1,1) + cluster2.name.short <- substr(cluster2.name,1,1) + cluster3.name.short <- substr(cluster3.name,1,1) + cluster4.name.short <- substr(cluster4.name,1,1) + + ## add + or - at the end of the cluster name, if it is a NAO+ or NAO- regime: + if(mod.name1 == "+" || mod.name1 == "-") cluster1.name.short <- paste0(substr(cluster1.name,1,1), mod.name1) + if(mod.name2 == "+" || mod.name2 == "-") cluster2.name.short <- paste0(substr(cluster2.name,1,1), mod.name2) + if(mod.name3 == "+" || mod.name3 == "-") cluster3.name.short <- paste0(substr(cluster3.name,1,1), mod.name3) + if(mod.name4 == "+" || mod.name4 == "-") cluster4.name.short <- paste0(substr(cluster4.name,1,1), mod.name4) + + c1 <- which(cluster.test == 1) + c2 <- which(cluster.test == 2) + c3 <- which(cluster.test == 3) + c4 <- which(cluster.test == 4) + + cluster.test.letters <- cluster.test + cluster.test.letters[c1] <- cluster1.name.short + cluster.test.letters[c2] <- cluster2.name.short + cluster.test.letters[c3] <- cluster3.name.short + cluster.test.letters[c4] <- cluster4.name.short + + my.strip <- cluster.test.letters + + if(no.regimes) { + cluster.test.letters2 <- cluster.test.letters + cluster.test.letters2[which(cluster.test.letters == "N+")] <- "C1" + cluster.test.letters2[which(cluster.test.letters == "N-")] <- "C2" + cluster.test.letters2[which(cluster.test.letters == "A")] <- "C3" + cluster.test.letters2[which(cluster.test.letters == "B")] <- "C4" + my.strip <- cluster.test.letters2 + } + + cluster.col <- cluster.test.letters + cluster.col[which(cluster.test.letters == "N+")] <- "Firebrick1" + cluster.col[which(cluster.test.letters == "N-")] <- "Dodgerblue1" + cluster.col[which(cluster.test.letters == "B")] <- "White" + cluster.col[which(cluster.test.letters == "A")] <- "Darkgoldenrod1" + + cluster1 <- which(orden == cluster1.name) + cluster2 <- which(orden == cluster2.name) + cluster3 <- which(orden == cluster3.name) + cluster4 <- which(orden == cluster4.name) + + assign(paste0("fre.days",cluster1), fre1.days) + assign(paste0("fre.days",cluster2), fre2.days) + assign(paste0("fre.days",cluster3), fre3.days) + assign(paste0("fre.days",cluster4), fre4.days) + + assign(paste0("fre",cluster1), wr1y) + assign(paste0("fre",cluster2), wr2y) + assign(paste0("fre",cluster3), wr3y) + assign(paste0("fre",cluster4), wr4y) + + assign(paste0("imp.test",cluster1), sfcWindMean1) + assign(paste0("imp.test",cluster2), sfcWindMean2) + assign(paste0("imp.test",cluster3), sfcWindMean3) + assign(paste0("imp.test",cluster4), sfcWindMean4) + + # psl anomaly for the chosen year and month and cluster: + slp1 <- slpAnomPeriod[which(cluster.test == 1),,,drop=FALSE] + slp2 <- slpAnomPeriod[which(cluster.test == 2),,,drop=FALSE] + slp3 <- slpAnomPeriod[which(cluster.test == 3),,,drop=FALSE] + slp4 <- slpAnomPeriod[which(cluster.test == 4),,,drop=FALSE] + + # in case a regime has NO days in that month, we must set it to NA: + if(length(which(cluster.test == 1)) == 0 ) slp1 <- array(NA, c(1,dim(slpAnomPeriod)[2:3])) + if(length(which(cluster.test == 2)) == 0 ) slp2 <- array(NA, c(1,dim(slpAnomPeriod)[2:3])) + if(length(which(cluster.test == 3)) == 0 ) slp3 <- array(NA, c(1,dim(slpAnomPeriod)[2:3])) + if(length(which(cluster.test == 4)) == 0 ) slp4 <- array(NA, c(1,dim(slpAnomPeriod)[2:3])) + + slpMean1 <- apply(slp1, c(2,3), mean, na.rm=FALSE) + slpMean2 <- apply(slp2, c(2,3), mean, na.rm=FALSE) + slpMean3 <- apply(slp3, c(2,3), mean, na.rm=FALSE) + slpMean4 <- apply(slp4, c(2,3), mean, na.rm=FALSE) + + assign(paste0("psl.test",cluster1), slpMean1) + assign(paste0("psl.test",cluster2), slpMean2) + assign(paste0("psl.test",cluster3), slpMean3) + assign(paste0("psl.test",cluster4), slpMean4) + + # save strip with the daily regime series for chosen month and year: + fileoutput.seq <- paste0(rean.dir,"/",rean.name,"_",my.period[month.test],"_regimes_sequence.png") + png(filename=fileoutput.seq,width=1500,height=1850) + + plot.new() + + sep <- 0.03 + for(day in 1: n.days.in.a.period(p, 2001)){ + sep.cum <- (day-1)*sep + polygon(c(sep.cum + 0.01, sep.cum + 0.01 + sep, sep.cum + 0.01 + sep, sep.cum + 0.01), c(1.01, 1.01, 1.01+sep, 1.01+sep), border="black", col=cluster.col[day]) + text(sep.cum + 0.01 + sep/2, 0.997 + sep + 0.005, labels=day, cex=1.5) + text(sep.cum + 0.01 + sep/2, 1.013 + 0.005, labels=my.strip[day], cex=2) + + } + + dev.off() + + + + # save average impact and sea level pressure only for chosen month and year: + fileoutput.test <- paste0(rean.dir,"/",rean.name,"_",my.period[month.test],"_monthly_anomaly_regimes.png") + + png(filename=fileoutput.test,width=1500,height=2000) + + plot.new() + + par(fig=c(0, 0.33, 0.77, 0.97), new=TRUE) + PlotEquiMap2(imp.test1[,EU], lon[EU], lat, filled.continents=FALSE, continents.col="black", brks=my.brks.var, cols=my.cols.var, cex.lab=1.2, intxlon=10, intylat=10, drawleg=F) + par(fig=c(0, 0.33, 0.54, 0.74), new=TRUE) + PlotEquiMap2(imp.test2[,EU], lon[EU], lat, filled.continents=FALSE, continents.col="black", brks=my.brks.var, cols=my.cols.var, cex.lab=1.2, intxlon=10, intylat=10, drawleg=F) + par(fig=c(0, 0.33, 0.31, 0.51), new=TRUE) + PlotEquiMap2(imp.test3[,EU], lon[EU], lat, filled.continents=FALSE, continents.col="black", brks=my.brks.var, cols=my.cols.var, cex.lab=1.2, intxlon=10, intylat=10, drawleg=F) + par(fig=c(0, 0.33, 0.08, 0.28), new=TRUE) + PlotEquiMap2(imp.test4[,EU], lon[EU], lat, filled.continents=FALSE, continents.col="black", brks=my.brks.var, cols=my.cols.var, cex.lab=1.2, intxlon=10, intylat=10, drawleg=F) + + if(no.regimes) { regime.title <- paste0("Cluster",1:4)} else { regime.title <- orden} + + par(fig=c(0,0.33,0.955,0.975), new=TRUE) + mtext(paste0(regime.title[1], " wind speed anomaly "), font=2, cex=2) + par(fig=c(0,0.33,0.725,0.745), new=TRUE) + mtext(paste0(regime.title[2], " wind speed anomaly "), font=2, cex=2) + par(fig=c(0,0.33,0.495,0.515), new=TRUE) + mtext(paste0(regime.title[3], " wind speed anomaly "), font=2, cex=2) + par(fig=c(0,0.33,0.265,0.285), new=TRUE) + mtext(paste0(regime.title[4], " wind speed anomaly "), font=2, cex=2) + + par(fig=c(0.03, 0.33, 0.015, 0.06), new=TRUE) + #ColorBar(my.brks.var, cols=my.cols.var, vert=FALSE, label_scale=1.8, triangle_ends=c(FALSE,FALSE)) #, subset=my.subset2) + ColorBar(my.brks.var[2:(length(my.brks.var)-1)], cols=my.cols.var[2:(length(my.cols.var)-1)], vert=FALSE, label_scale=1.8, bar_limits=c(my.brks.var[2],my.brks.var[l(my.brks.var)-1]), col_inf=my.cols.var[1], col_sup=my.cols.var[l(my.cols.var)], subsample=1) #triangle_ends=c(T,T)) #, subset=my.subset2) + + par(fig=c(0.33, 0.34, 0.01, 0.044), new=TRUE) + mtext("m/s", cex=1.6) + + # right figures: + par(fig=c(0.34, 0.92, 0.77, 0.97), new=TRUE) + PlotEquiMap2(psl.test1, lon, lat, filled.continents=FALSE, continents.col="black", brks=my.brks.var2, brks2=my.brks.var2, cols=my.cols.var2, intxlon=10, intylat=10, drawleg=F, contours=t(psl.test1), contours.lty="F1FF1F", cex.lab=1, xlabel.dist=.5) + par(fig=c(0.34, 0.92, 0.54, 0.74), new=TRUE) + PlotEquiMap2(psl.test2, lon, lat, filled.continents=FALSE, continents.col="black", brks=my.brks.var2, brks2=my.brks.var2, cols=my.cols.var2, intxlon=10, intylat=10, drawleg=F, contours=t(psl.test2), contours.lty="F1FF1F", cex.lab=1, xlabel.dist=.5) + par(fig=c(0.34, 0.92, 0.31, 0.51), new=TRUE) + PlotEquiMap2(psl.test3, lon, lat, filled.continents=FALSE, continents.col="black", brks=my.brks.var2, brks2=my.brks.var2, cols=my.cols.var2, intxlon=10, intylat=10, drawleg=F, contours=t(psl.test3), contours.lty="F1FF1F", cex.lab=1, xlabel.dist=.5) + par(fig=c(0.34, 0.92, 0.08, 0.28), new=TRUE) + PlotEquiMap2(psl.test4, lon, lat, filled.continents=FALSE, continents.col="black", brks=my.brks.var2, brks2=my.brks.var2, cols=my.cols.var2, intxlon=10, intylat=10, drawleg=F, contours=t(psl.test4), contours.lty="F1FF1F", cex.lab=1, xlabel.dist=.5) + + par(fig=c(0.34,0.92,0.955,0.975), new=TRUE) + mtext(paste0(regime.title[1], " SLP anomaly "), font=2, cex=2) + par(fig=c(0.34,0.92,0.725,0.745), new=TRUE) + mtext(paste0(regime.title[2], " SLP anomaly "), font=2, cex=2) + par(fig=c(0.34,0.92,0.495,0.515), new=TRUE) + mtext(paste0(regime.title[3], " SLP anomaly "), font=2, cex=2) + par(fig=c(0.34,0.92,0.265,0.285), new=TRUE) + mtext(paste0(regime.title[4], " SLP anomaly "), font=2, cex=2) + + par(fig=c(0.34, 0.93, 0.015, 0.06), new=TRUE) + #ColorBar2(my.brks.var2, cols=my.cols.var2, vert=FALSE, my.ticks=-0.5 + 1:length(my.brks.var2), my.labels=my.brks.var2) #, subsampleg=1, label_scale=1.8) #, subset=my.subset2) + ColorBar(my.brks.var2[2:(length(my.brks.var2)-1)], cols=my.cols.var2[2:(length(my.cols.var2)-1)], vert=FALSE, label_scale=1.8, bar_limits=c(my.brks.var2[2],my.brks.var2[l(my.brks.var2)-1]), col_inf=my.cols.var2[1], col_sup=my.cols.var2[l(my.cols.var2)], subsample=1) + + par(fig=c(0.924, 0.930, 0.01, 0.044), new=TRUE) + mtext("hPa", cex=1.6) + + #par(fig=c(0.627, 0.647, 0, 0.028), new=TRUE) + #mtext("0", cex=1.8) + + n.days <- floor(n.days.in.a.period(month.test,1)) + + par(fig=c(0.93, 0.99, 0.77, 0.87), new=TRUE) + mtext(paste0(fre.days1," days\n(",round(100*fre.days1/n.days,1),"%)"), cex=2.8) + par(fig=c(0.93, 0.99, 0.54, 0.64), new=TRUE) + mtext(paste0(fre.days2," days\n(",round(100*fre.days2/n.days,1),"%)"), cex=2.8) + par(fig=c(0.93, 0.99, 0.31, 0.41), new=TRUE) + mtext(paste0(fre.days3," days\n(",round(100*fre.days3/n.days,1),"%)"), cex=2.8) + par(fig=c(0.93, 0.99, 0.08, 0.18), new=TRUE) + mtext(paste0(fre.days4," days\n(",round(100*fre.days4/n.days,1),"%)"), cex=2.8) + + + dev.off() + + + ## add the strip with the regime sequence over the average impact composition: + fileoutput.temp <- paste0(rean.dir,"/",rean.name,"_",my.period[month.test],"_temp.png") + fileoutput.both <- paste0(rean.dir,"/",rean.name,"_",my.period[month.test],"_temp2.png") + + system(paste0("convert ",fileoutput.seq," -crop +0-1730 +repage ",fileoutput.temp)) # cut the lower part of the strip + system(paste0("montage ",fileoutput.temp," ",fileoutput.test," -tile 1x2 -geometry +0+0 ",fileoutput.both)) + + + ## same image formatted for the catalogue: + fileoutput2.test <- paste0(rean.dir,"/",rean.name,"_",my.period[month.test],"_monthly_anomaly_regimes_fig2cat.png") + + system(paste0("~/scripts/fig2catalog.sh -s 0.8 -m 20 -r 40 -t '",rean.name," / 10m wind speed and sea level pressure / Monthly regime anomalies \n",month.name[month.test]," / ",year.test,"' -c 'Region: North Atlantic (27°N-81°N, 85.5°W-45°E)\nReference dataset: ",rean.name," reanalysis' ", fileoutput.both," ", fileoutput2.test)) + + system(paste0("rm ", fileoutput.temp, " ", fileoutput.both," ", fileoutput.seq," ", fileoutput.test, " ", fileoutput)) + + + } # close for on year.test + } # close for on month.test + + +} # close for on monthly_anomalies + + + + + + + + + + diff --git a/old/weather_regimes_maps_v27.R~ b/old/weather_regimes_maps_v27.R~ new file mode 100644 index 0000000000000000000000000000000000000000..e99e973793a0bdbb54b93ce0224a497e6e29459b --- /dev/null +++ b/old/weather_regimes_maps_v27.R~ @@ -0,0 +1,5812 @@ + +# Creation: 6/2016 +# Author: Nicola Cortesi +# +# Aim: to visualize the regime anomalies of the weather regimes, their interannual frequencies and their impact on a chosen cliamte variable (i.e: wind speed or temperature) +# +# I/O: input file needed are: +# 1) the output of the weather_regimes.R script, which ends with the suffix "_psl.RData". +# 2) if you need the impact map too, the outputs of the weather_regimes_impact.R script, which end wuth the suffix "_sfcWind.RData" and "_tas.RData" +# +# Output files are the maps, witch the user can generate as .png or as .pdf +# Due to the script's length, it is better to run it from the terminal with: Rscript ~/scripts/weather_regimes_maps.R +# Note that this script doesn't need to be parallelized because it takes only a few seconds to create and save the output maps. +# +# Assumptions: You need to have created before the '_psl.RData' files which are the output of 'weather_regimes'.R, for each period you want to visualize. +# If your regimes derive from a reanalysis, this script must be run twice: +# first, only one month/season at time with: 'period=X', (X=1 .. 12), 'ordering = TRUE', 'save.names = TRUE', 'as.pdf = FALSE' and 'composition = "none" +# And inserting the regime names 'cluster.name1=...' in the correct order; in this first run, you only save the ordered cartography. +# You already have to know which is the right regime order by taking a look at the output maps (_clusterX.png) of weather_regimes.R +# After that, any time you run this script, remember to set: +# 'ordering = TRUE , 'save.names = FALSE' (and any type of composition you want to plot) not to overwrite the files which stores the right cluster order. +# You can check if the monthly regimes jave been associated correctly setting composition <- "psl.rean" +# +# For example, you can find that the sequence of the images in the file (from top to down) corresponds to: +# Blocking / NAO- / Atl.Ridge / NAO+ regime. Subsequently, you have to change 'ordering' +# from F to T (see below) and set the four variables 'clusterX.name' (X = 1...4) to follow the same order found inside that file. +# Then, you have to run this script only to get the maps in the right order, which by default is, from top to down: +# "NAO+","NAO-","Blocking" and "Atl.Ridge" (you can change it with the variable 'orden'). You also have to set 'save.names' to TRUE, so an .RData file +# is written to store the order of the four regimes, in case you need to visualize these maps again in future or create new ones based on the saved data. +# +# If your regimes derive from a forecast system, you have to compute before the regimes derived from a reanalysis. Then, you can associate the reanalysis +# to the forecast system, to employ it to automatically associate to each simulated cluster one of the +# observed regimes, the one with the highest spatial correlation. Beware that the two maps of regime anomalies must have the same spatial resolution! +# +# Branch: weather_regimes + +rm(list=ls()) +library(s2dverification) # for the function Load() +library(Kendall) # for the MannKendall test +#library("corrplot") + +source('/home/Earth/ncortesi/scripts/Rfunctions.R') # for the calendar functions + +### set the directory where the weather regimes computed with a reanalysis are stored (xxxxx_psl.RData files): + +#rean.dir <- "/scratch/Earth/ncortesi/RESILIENCE/Regimes/18) as 12) but with no lat correction" +#rean.dir <- "/scratch/Earth/ncortesi/RESILIENCE/Regimes/18_as_12_but_with_no_lat_correction" +#rean.dir <- "/scratch/Earth/ncortesi/RESILIENCE/Regimes/41_ERA-Interim_monthly_1981-2015_LOESS_filter" +#rean.dir <- "/scratch/Earth/ncortesi/RESILIENCE/Regimes/43_as_41_but_with_running_cluster_and_lat_correction" +#rean.dir <- "/scratch/Earth/ncortesi/RESILIENCE/Regimes/45_as_43_but_with_Z500" +#rean.dir <- "/scratch/Earth/ncortesi/RESILIENCE/Regimes/44_as_43_but_with_no_lat_correction" +#rean.dir <- "/scratch/Earth/ncortesi/RESILIENCE/Regimes/50_NCEP_monthly_1981-2016_LOESS_filter_lat_corr" +#rean.dir <- "/scratch/Earth/ncortesi/RESILIENCE/Regimes/52_JRA55_monthly_1981-2016_LOESS_filter_lat_corr" +#rean.dir <- "/scratch/Earth/ncortesi/RESILIENCE/Regimes/53_JRA55_seasonal_1981-2016_LOESS_filter_lat_corr" +#rean.dir <- "/scratch/Earth/ncortesi/RESILIENCE/Regimes/54_JRA55_monthly_1981-2016_LOESS_filter_lat_corr_running" +#rean.dir <- "/scratch/Earth/ncortesi/RESILIENCE/Regimes/55_JRA55_monthly_1981-2016_LOESS_filter_lat_corr_running_15days" +rean.dir <- "/scratch/Earth/ncortesi/RESILIENCE/Regimes/56_JRA55_monthly_1981-2016_LOESS_filter_lat_corr_ordered4variance" +#rean.dir <- "/scratch/Earth/ncortesi/RESILIENCE/Regimes/57_JRA55_monthly_1981-2016_LOESS_filter_lat_corr_running_ordered4variance" +#rean.dir <- "/scratch/Earth/ncortesi/RESILIENCE/Regimes/58_JRA55_monthly_1981-2016_LOESS_filter_lat_corr_running_15days_ordered4variance" + +rean.name <- "JRA-55" #"JRA-55" #"NCEP" #"ERA-Interim" # reanalysis name (if input data comes from a reanalysis) + +forecast.name <- "ECMWF-S4" # forecast system name (if input data comes from a forecast system) + +fields.name <- rean.name #forecast.name # Choose between loading reanalysis ('rean.name') or forecasts ('forecast.name') + +composition <- "psl.rean" # choose which kind of composition you want to plot: + # 'none' doesn't plot anything, it only associates the clusters to the regimes with the manual association in the rows below + # and saves them in the ClusterName.Rdata files for each period selected, overwriting the eventual pre-existing files. + # 'simple' for monthly graphs of regime anomalies / impact / interannual frequencies in three columns + # 'psl' for all the regime anomalies for a fixed forecast month + # 'fre' for all the interannual frequencies for a fixed forecast month + # 'impact' for all the impact maps for a fixed forecast month and the variable specified in var.num + # 'summary' for the summary plots of all forecast months (persistence bias, freq.bias, correlations, etc.) [You need all ClusterNames files] + # 'impact.highest' for all the impact plot of the regime with the highest impact + # 'single.impact' to save the four impact maps in a composition 2x2 + # 'single.psl' to save the individual psl map + # 'single.fre' to save the individual fre map + # 'taylor' plot the taylor's diagram between the regime anomalies of a monthly reanalaysis and a seasonal one + # 'edpr' as 'simple', but swapping the position of the regime anomalie maps with that of the impact maps + # 'psl.rean': plot all the regime anomalies and correlation matrix of all months of a reanalysis with DJF regime anomalies of the same reanalysis + # 'psl.rean.unordered': as before, but without ordering the regimes with the same order in vector 'orden' + # 'variance' : as 'none', but instead of associating clusters to regimes, it reordinates the clusters in decreasing order of explained variance + +var.num <- 1 # if fields.name ='rean.name' and composition ='simple' or 'edpr', Choose a variable for the impact maps 1: sfcWind 2: tas + +mean.freq <- TRUE # if TRUE, when composition <- "simple" o 'edpr', it also add the mean frequency value over each frequency plots + +# Choose one or more periods to plot from 1 to 17 (1-12: Jan - Dec, 13-16: Winter, Spring, Summer, Autumn, 17: Year). +period <- 1:12 + +# Manually associates the four clusters to the four regimes, one period at time (only in case composition="none"): +cluster4.name <- "NAO+" +cluster1.name <- "NAO-" +cluster2.name <- "Blocking" +cluster3.name <- "Atl.Ridge" + +# choose an order to give to the cartograpy, from top to bottom: +orden <- c("NAO+","NAO-","Blocking","Atl.Ridge") + +####### if monthly_anomalies <- TRUE, you have to specify these additional parameters: +monthly_anomalies <- FALSE # if TRUE, also save maps with with monthly wind speed and slp anomalies for the chosen years and months set below over Europe +year.chosen <- 2016 +month.chosen <- 1:12 +no.regimes <- TRUE # if TRUE, instead of putting the regime names in the figure titles, insert "Cluster1", "Cluster2", "Cluster3" and "Cluster4" +NCEP <- '/esnas/recon/noaa/ncep-reanalysis/$STORE_FREQ$_mean/$VAR_NAME$_f6h/$VAR_NAME$_$YEAR$$MONTH$.nc' +JRA55 <- '/esnas/recon/jma/jra55/$STORE_FREQ$_mean/$VAR_NAME$_f6h/$VAR_NAME$_$YEAR$$MONTH$.nc' +ERAint <- '/esarchive/old-files/recon_ecmwf_erainterim/$STORE_FREQ$_mean/$VAR_NAME$_f6h/$VAR_NAME$_$YEAR$$MONTH$.nc' +rean.data <- JRA55 # choose one of the above reanalysis + +####### if fields.name='forecast.name', you have to specify these additional variables: + +# working dir with the input files with '_psl.RData' suffix: +work.dir <- "/scratch/Earth/ncortesi/RESILIENCE/Regimes/42_ECMWF_S4_monthly_1981-2015_LOESS_filter" + +lead.months <- 0 #0:6 # in case you selected 'fields.name = forecast.name', specify a leadtime (0 = same month of the start month) to plot + # (and variable 'period' becomes the start month) +forecast.month <- 1 # in case composition = 'psl' or 'fre' or 'impact', choose a target month to forecast, from 1 to 12, to plot its composition + # if you want to plot all compositions from 1 to 12 at once, just enable the line below and add a { at the end of the script + # DO NOT run the loop below when composition="summary" or "taylor", because it will modify the data in the ClusterName.RData files!!! +#for(forecast.month in 1:12){ + +####### Derived variables ############################################################################################################################################### + +ordering <- T # If TRUE, order the clusters following the regimes listed in the 'orden' vector (set it to TRUE only after you manually inserted the names below). +save.names <- F # if TRUE, also save the cluster names (i.e: the association between clusters and weather regimes); if FALSE, the cluster names are loaded from a file +as.pdf <- F # FALSE: save results as one .png file for each season/month, TRUE: save only one .pdf file with all seasons/months + +if(fields.name != rean.name && fields.name != forecast.name) stop("variable 'fields.name' is not properly set") +regime1.name <- orden[1] +regime2.name <- orden[2] +regime3.name <- orden[3] +regime4.name <- orden[4] + +var.name <- c("sfcWind","tas") +var.name.full <- c("wind speed","temperature") # full name of the 'predictand' variable to put in the title of the graphs +var.unit <- c("m/s", "ºC") # unit of measure of var (for drawing color scales) + +var.num.orig <- var.num # loading _psl files, this value can change! +fields.name.orig <- fields.name +period.orig <- period +work.dir.orig <- work.dir +pdf.suffix <- ifelse(length(period)==1, my.period[period], "all_periods") + +n.map <- 0 + +############################################################################################ +## Start analysis ## +############################################################################################ + +if(composition != "summary" && composition != "impact.highest" && composition != "taylor"){ + + if(composition == "psl" || composition == "fre" || composition == "impact") { + if(as.pdf && fields.name == forecast.name) pdf(file=paste0(work.dir,"/",fields.name,"_",pdf.suffix,"_forecast_month_",forecast.month,"_",composition,".pdf"),width=110,height=60) + if(!as.pdf && fields.name == forecast.name) png(filename=paste0(work.dir,"/",fields.name,"_forecast_month_",forecast.month,"_",composition,".png"),width=6000,height=2300) + + lead.months <- c(6:0,0) + plot.new() + + # Sheet title: + par(fig=c(0, 1, 0.94, 0.98), new=TRUE) + if(composition == "psl") mtext(paste0(fields.name, " simulated Weather Regimes for ", month.name[forecast.month]), cex=5.5, font=2) + if(composition == "fre") mtext(paste0(fields.name, " simulated interannual frequencies for ", month.name[forecast.month]), cex=5.5, font=2) + if(composition == "impact") mtext(paste0(fields.name, " simulated ",var.name.full[var.num], " impact for ", month.name[forecast.month]), cex=5.5, font=2) + + } # close if on composition == "psl" ... + + if(composition == "none") { + ordering <- TRUE + save.names <- TRUE + as.pdf <- FALSE + } + + if(composition == "variance"){ + ordering <- TRUE + save.names <- TRUE + as.pdf <- FALSE + period <- 1:12 + } + + if(composition == "psl.rean") { + ordering <- TRUE + period <- c(9:12, 1:8) # to start from September instead of January + png(filename=paste0(rean.dir,"/",fields.name,"_reanalysis_all_months.png"),width=6000,height=2000) + plot.new() + } + + if(composition == "psl.rean.unordered") { + ordering <- FALSE + period <- c(9:12, 1:8) # to start from September instead of January + png(filename=paste0(rean.dir,"/",fields.name,"_reanalysis_all_months_unordered.png"),width=6000,height=2000) + plot.new() + } + + ## if(composition == "corr.matrix") { + ## ordering <- FALSE # set it to TRUE if you want to see the correlation matrix of the ordered clusters instead!!! + ## period <- c(9:12, 1:8) # to start from September instead of January + ## png(filename=paste0(rean.dir,"/",fields.name,"_reanalysis_all_months_corr_matrix.png"),width=6000,height=2000) + ## plot.new() + ## } + + if(fields.name == rean.name) { lead.month <- 1; lead.months <- 1 } # just to be able to enter the loop below once! + + for(lead.month in lead.months){ + #lead.month=lead.months[1] # for the debug + + if(composition == "simple" || composition == 'edpr'){ + if(as.pdf && fields.name == rean.name) pdf(file=paste0(rean.dir,"/",fields.name,"_",var.name[var.num],"_",pdf.suffix,".pdf"),width=40, height=60) + if(as.pdf && fields.name == forecast.name) pdf(file=paste0(work.dir,"/",fields.name,"_",var.name[var.num],"_",pdf.suffix,"_leadmonth_",lead.month,".pdf"),width=40,height=60) + } + + if(composition == 'psl' || composition == 'fre' || composition == 'impact') { + period <- forecast.month - lead.month + if(period < 1) period <- period + 12 + } + + + for(p in period){ + #p <- period[1] # for the debug + p.orig <- p + + if(fields.name == rean.name) print(paste("p =",p)) + if(fields.name == forecast.name) print(paste("p =",p,"lead.month =", lead.month)) + + # load regime data (for forecasts, it load only the data corresponding to the chosen start month p and lead month 'lead.month') + impact.data <- FALSE + if(fields.name == rean.name || (fields.name == forecast.name && n.map == 7)) { + load(file=paste0(rean.dir,"/",rean.name,"_",my.period[p],"_","psl",".RData")) + if(var.num != var.num.orig) var.num <- var.num.orig # ripristinate original values of this script (necessary after loading regime data) + if(fields.name != fields.name.orig) fields.name <- fields.name.orig # ripristinate original values of this script + if(work.dir != work.dir.orig) work.dir <- work.dir.orig # ripristinate original values of this script + + # load impact data only if it is available, if not it will leave an empty space in the composition: + if(file.exists(paste0(rean.dir,"/",rean.name,"_",my.period[p],"_",var.name[var.num],".RData"))){ + impact.data <- TRUE + print(paste0("Impact data for variable ",var.name[var.num] ," available for reanalysis ", rean.name)) + load(file=paste0(rean.dir,"/",rean.name,"_",my.period[p],"_",var.name[var.num],".RData")) + } else { + impact.data <- FALSE + print(paste0("Impact data for variable ",var.name[var.num] ," not available for reanalysis ", rean.name)) + } + + if(composition == "variance"){ + my.cluster2 <- my.cluster # create a copy of my.cluster + + ss1 <- which(my.cluster$cluster == 1) + ss2 <- which(my.cluster$cluster == 2) + ss3 <- which(my.cluster$cluster == 3) + ss4 <- which(my.cluster$cluster == 4) + + withinss <- my.cluster$withinss + max1 <- which(my.cluster$withinss == max(withinss, na.rm=TRUE)) # first cluster with maximum variance + withinss[max1] <- NA + + max2 <- which(my.cluster$withinss == max(withinss, na.rm=TRUE)) # second cluster with maximum variance + withinss[max2] <- NA + + max3 <- which(my.cluster$withinss == max(withinss, na.rm=TRUE)) # third cluster with maximum variance + withinss[max3] <- NA + + max4 <- which(!is.na(withinss)) + rm(withinss) + + # vector where the first element tells you which is the clister with the maximum variance the second element shows which is the cluster the + # second maximum variance, and so on: + max.seq <- c(max1, max2, max3, max4) + + assign(paste0("cluster",max1,".name"), orden[1]) # associate the cluster with the highest explained variance to the first regime to plot (usually NAO+) + assign(paste0("cluster",max2,".name"), orden[2]) + assign(paste0("cluster",max3,".name"), orden[3]) + assign(paste0("cluster",max4,".name"), orden[4]) + + } + } + + if(fields.name == forecast.name && n.map < 7){ + load(file=paste0(work.dir,"/",forecast.name,"_",my.period[p],"_leadmonth_",lead.month,"_","psl",".RData")) # load cluster time series and mean psl data + + if(file.exists(paste0(work.dir,"/",forecast.name,"_",my.period[p],"_leadmonth_",lead.month,"_",var.name[var.num],".RData"))){ + impact.data <- TRUE + print("Impact data available for forecasts") + if((composition == "simple" || composition == "impact")) load(file=paste0(work.dir,"/",forecast.name,"_",my.period[p],"_leadmonth_",lead.month,"_",var.name[var.num],".RData")) # load mean var data for impact maps + } else { + impact.data <- FALSE + print("Impact data for forecasts not available") + } + + if(var.num != var.num.orig) var.num <- var.num.orig # ripristinate original values of this script (necessary after loading regime data) + if(fields.name != fields.name.orig) fields.name <- fields.name.orig # ripristinate original values of this script + + } + + if(var.num != var.num.orig) var.num <- var.num.orig # ripristinate original values of this script (necessary after loading regime data) + if(fields.name != fields.name.orig) fields.name <- fields.name.orig # ripristinate original values of this script + if(work.dir != work.dir.orig) work.dir <- work.dir.orig # ripristinate original values of this script + if(p != p.orig) p <- p.orig + + if(fields.name == forecast.name && n.map < 7){ + + # compute the simulated interannual monthly variability: + n.days.month <- dim(my.cluster.array[[p]])[1] # number of days in the forecasted month + + freq.sim1 <- apply(my.cluster.array[[p]] == 1, c(2,3), sum) + freq.sim2 <- apply(my.cluster.array[[p]] == 2, c(2,3), sum) + freq.sim3 <- apply(my.cluster.array[[p]] == 3, c(2,3), sum) + freq.sim4 <- apply(my.cluster.array[[p]] == 4, c(2,3), sum) + + sd.freq.sim1 <- sd(freq.sim1) / n.days.month + sd.freq.sim2 <- sd(freq.sim2) / n.days.month + sd.freq.sim3 <- sd(freq.sim3) / n.days.month + sd.freq.sim4 <- sd(freq.sim4) / n.days.month + + # compute the transition probabilities: + j1 <- j2 <- j3 <- j4 <- 1 + transition1 <- transition2 <- transition3 <- transition4 <- c() + + n.members <- dim(my.cluster.array[[p]])[3] + + for(m in 1:n.members){ + for(y in 1:n.years){ + for (d in 1:(n.days.month-1)){ + + if (my.cluster.array[[p]][d,y,m] == 1 && (my.cluster.array[[p]][d + 1,y,m] != 1)){ + transition1[j1] <- my.cluster.array[[p]][d + 1,y,m] + j1 <- j1 + 1 + } + if (my.cluster.array[[p]][d,y,m] == 2 && (my.cluster.array[[p]][d + 1,y,m] != 2)){ + transition2[j2] <- my.cluster.array[[p]][d + 1,y,m] + j2 <- j2 + 1 + } + if (my.cluster.array[[p]][d,y,m] == 3 && (my.cluster.array[[p]][d + 1,y,m] != 3)){ + transition3[j3] <- my.cluster.array[[p]][d + 1,y,m] + j3 <- j3 +1 + } + if (my.cluster.array[[p]][d,y,m] == 4 && (my.cluster.array[[p]][d + 1,y,m] != 4)){ + transition4[j4] <- my.cluster.array[[p]][d + 1,y,m] + j4 <- j4 +1 + } + + } + } + } + + trans.sim <- matrix(NA,4,4 , dimnames= list(c("cluster1.sim", "cluster2.sim", "cluster3.sim", "cluster4.sim"),c("cluster1.sim","cluster2.sim","cluster3.sim","cluster4.sim"))) + trans.sim[1,2] <- length(which(transition1 == 2)) + trans.sim[1,3] <- length(which(transition1 == 3)) + trans.sim[1,4] <- length(which(transition1 == 4)) + + trans.sim[2,1] <- length(which(transition2 == 1)) + trans.sim[2,3] <- length(which(transition2 == 3)) + trans.sim[2,4] <- length(which(transition2 == 4)) + + trans.sim[3,1] <- length(which(transition3 == 1)) + trans.sim[3,2] <- length(which(transition3 == 2)) + trans.sim[3,4] <- length(which(transition3 == 4)) + + trans.sim[4,1] <- length(which(transition4 == 1)) + trans.sim[4,2] <- length(which(transition4 == 2)) + trans.sim[4,3] <- length(which(transition4 == 3)) + + trans.tot <- apply(trans.sim,1,sum,na.rm=T) + for(i in 1:4) trans.sim[i,] <- trans.sim[i,]/trans.tot[i] + + + # compute cluster persistence: + my.cluster.vector <- as.vector(my.cluster.array[[p]]) # reduce it to a vector with the order: day1month1member1 , day2month1member1, ... , day1month2member1, day2month2member1, ,,, + + sequ1 <- sequ2 <- sequ3 <- sequ4 <- rep(0,10000) + i1 <- i2 <- i3 <- i4 <- 1 + + if(my.cluster.vector[1] != 1) i1 <- 0 + if(my.cluster.vector[1] == 1) sequ1[i1] <- sequ1[i1]+1 + if(my.cluster.vector[1] != 2) i2 <- 0 + if(my.cluster.vector[1] == 2) sequ2[i2] <- sequ2[i2]+1 + if(my.cluster.vector[1] != 3) i3 <- 0 + if(my.cluster.vector[1] == 3) sequ3[i3] <- sequ3[i3]+1 + if(my.cluster.vector[1] != 4) i4 <- 0 + if(my.cluster.vector[1] == 4) sequ4[i4] <- sequ4[i4]+1 + n.dd <- length(my.cluster.vector) + + for(d in 1:(n.dd-1)){ + if(my.cluster.vector[d] != 1 && my.cluster.vector[d+1] == 1) i1 <- i1+1 + if(my.cluster.vector[d] == 1) sequ1[i1] <- sequ1[i1]+1 + + if(my.cluster.vector[d] != 2 && my.cluster.vector[d+1] == 2) i2 <- i2+1 + if(my.cluster.vector[d] == 2) sequ2[i2] <- sequ2[i2]+1 + + if(my.cluster.vector[d] != 3 && my.cluster.vector[d+1] == 3) i3 <- i3+1 + if(my.cluster.vector[d] == 3) sequ3[i3] <- sequ3[i3]+1 + + if(my.cluster.vector[d] != 4 && my.cluster.vector[d+1] == 4) i4 <- i4+1 + if(my.cluster.vector[d] == 4) sequ4[i4] <- sequ4[i4]+1 + } + + if(my.cluster.vector[n.dd] == 1) sequ1[i1] <- sequ1[i1]+1 + if(my.cluster.vector[n.dd] == 2) sequ2[i2] <- sequ2[i2]+1 + if(my.cluster.vector[n.dd] == 3) sequ3[i3] <- sequ3[i3]+1 + if(my.cluster.vector[n.dd] == 4) sequ4[i4] <- sequ4[i4]+1 + + pers1 <- mean(sequ1[which(sequ1 != 0)]) + pers2 <- mean(sequ2[which(sequ2 != 0)]) + pers3 <- mean(sequ3[which(sequ3 != 0)]) + pers4 <- mean(sequ4[which(sequ4 != 0)]) + + # compute correlations between simulated clusters and observed regime's anomalies: + cluster1.sim <- pslwr1mean; cluster2.sim <- pslwr2mean; cluster3.sim <- pslwr3mean; cluster4.sim <- pslwr4mean + + if(composition == 'impact' && impact.data == TRUE) { + cluster1.sim.imp <- varwr1mean; cluster2.sim.imp <- varwr2mean; cluster3.sim.imp <- varwr3mean; cluster4.sim.imp <- varwr4mean + #cluster1.sim.imp.pv <- pvalue1; cluster2.sim.imp.pv <- pvalue2; cluster3.sim.imp.pv <- pvalue3; cluster4.sim.imp.pv <- pvalue4 + } + + lat.forecast <- lat; lon.forecast <- lon + + if((p + lead.month) > 12) { load.month <- p + lead.month - 12 } else { load.month <- p + lead.month } # reanalysis forecast month to load + load(file=paste0(rean.dir,"/",rean.name,"_",my.period[load.month],"_","psl",".RData")) # load reanalysis data, which overwrities the pslwrXmean variables + load(paste0(rean.dir,"/",rean.name,"_", my.period[load.month],"_","ClusterNames",".RData")) # load also reanalysis regime names + + # load reanalysis varwrXmean impact data: + if(composition == 'impact' && impact.data == TRUE) load(file=paste0(rean.dir,"/",rean.name,"_",my.period[load.month],"_",var.name[var.num],".RData")) + + if(var.num != var.num.orig) var.num <- var.num.orig # ripristinate original values of this script + if(fields.name != fields.name.orig) fields.name <- fields.name.orig # ripristinate original values of this script + if(!identical(period.orig,period)) period <- period.orig + if(work.dir != work.dir.orig) work.dir <- work.dir.orig # ripristinate original values of this script + if(p.orig != p) p <- p.orig + + # compute the observed transition probabilities: + n.days.month <- n.days.in.a.period(load.month,2001) + j1o <- j2o <- j3o <- j4o <- 1; transition.obs1 <- transition.obs2 <- transition.obs3 <- transition.obs4 <- c() + + for (d in 1:(length(my.cluster$cluster)-1)){ + if (my.cluster$cluster[d] == 1 && (my.cluster$cluster[d + 1] != my.cluster$cluster[d])){ + transition.obs1[j1o] <- my.cluster$cluster[d + 1] + j1o <- j1o + 1 + } + if (my.cluster$cluster[d] == 2 && (my.cluster$cluster[d + 1] != my.cluster$cluster[d])){ + transition.obs2[j2o] <- my.cluster$cluster[d + 1] + j2o <- j2o + 1 + } + if (my.cluster$cluster[d] == 3 && (my.cluster$cluster[d + 1] != my.cluster$cluster[d])){ + transition.obs3[j3o] <- my.cluster$cluster[d + 1] + j3o <- j3o + 1 + } + if (my.cluster$cluster[d] == 4 && (my.cluster$cluster[d + 1] != my.cluster$cluster[d])){ + transition.obs4[j4o] <- my.cluster$cluster[d + 1] + j4o <- j4o +1 + } + + } + + trans.obs <- matrix(NA,4,4 , dimnames= list(c("cluster1.obs", "cluster2.obs", "cluster3.obs", "cluster4.obs"),c("cluster1.obs","cluster2.obs","cluster3.obs","cluster4.obs"))) + trans.obs[1,2] <- length(which(transition.obs1 == 2)) + trans.obs[1,3] <- length(which(transition.obs1 == 3)) + trans.obs[1,4] <- length(which(transition.obs1 == 4)) + + trans.obs[2,1] <- length(which(transition.obs2 == 1)) + trans.obs[2,3] <- length(which(transition.obs2 == 3)) + trans.obs[2,4] <- length(which(transition.obs2 == 4)) + + trans.obs[3,1] <- length(which(transition.obs3 == 1)) + trans.obs[3,2] <- length(which(transition.obs3 == 2)) + trans.obs[3,4] <- length(which(transition.obs3 == 4)) + + trans.obs[4,1] <- length(which(transition.obs4 == 1)) + trans.obs[4,2] <- length(which(transition.obs4 == 2)) + trans.obs[4,3] <- length(which(transition.obs4 == 3)) + + trans.tot.obs <- apply(trans.obs,1,sum,na.rm=T) + for(i in 1:4) trans.obs[i,] <- trans.obs[i,]/trans.tot.obs[i] + + # compute the observed interannual monthly variability: + freq.obs1 <- freq.obs2 <- freq.obs3 <- freq.obs4 <- c() + + for(y in year.start:year.end){ + # for both reanalysis and S4, the time order is always: year1day1, year1day2, ..., year1day31, year2day1, year2day2, ..., year2day28, etc, + freq.obs1[y-year.start+1] <- length(which(my.cluster$cluster[(y-year.start)*n.days.month + (1:n.days.month)] == 1)) + freq.obs2[y-year.start+1] <- length(which(my.cluster$cluster[(y-year.start)*n.days.month + (1:n.days.month)] == 2)) + freq.obs3[y-year.start+1] <- length(which(my.cluster$cluster[(y-year.start)*n.days.month + (1:n.days.month)] == 3)) + freq.obs4[y-year.start+1] <- length(which(my.cluster$cluster[(y-year.start)*n.days.month + (1:n.days.month)] == 4)) + } + + sd.freq.obs1 <- sd(freq.obs1) / n.days.month + sd.freq.obs2 <- sd(freq.obs2) / n.days.month + sd.freq.obs3 <- sd(freq.obs3) / n.days.month + sd.freq.obs4 <- sd(freq.obs4) / n.days.month + + wr1y.obs <- wr1y; wr2y.obs <- wr2y; wr3y.obs <- wr3y; wr4y.obs <- wr4y # observed frecuencies of the regimes + + regimes.obs <- c(cluster1.name, cluster2.name, cluster3.name, cluster4.name) + + if(length(unique(regimes.obs)) < 4) stop("Two or more names of the weather types in the reanalsyis input file are repeated!") + + if(identical(rev(lat),lat.forecast)) {lat.forecast <- rev(lat.forecast); print("Warning: forecast latitudes are in the opposite order than renalysis's")} + if(!identical(lat, lat.forecast)) stop("forecast latitudes are different from reanalysis latitudes!") + if(!identical(lon, lon.forecast)) stop("forecast longitude are different from reanalysis longitudes!") + + cluster.corr <- array(NA, c(4,4), dimnames=list(c("cluster1.sim","cluster2.sim","cluster3.sim","cluster4.sim"), regimes.obs)) + cluster.corr[1,1] <- cor(as.vector(cluster1.sim), as.vector(pslwr1mean)) + cluster.corr[1,2] <- cor(as.vector(cluster1.sim), as.vector(pslwr2mean)) + cluster.corr[1,3] <- cor(as.vector(cluster1.sim), as.vector(pslwr3mean)) + cluster.corr[1,4] <- cor(as.vector(cluster1.sim), as.vector(pslwr4mean)) + cluster.corr[2,1] <- cor(as.vector(cluster2.sim), as.vector(pslwr1mean)) + cluster.corr[2,2] <- cor(as.vector(cluster2.sim), as.vector(pslwr2mean)) + cluster.corr[2,3] <- cor(as.vector(cluster2.sim), as.vector(pslwr3mean)) + cluster.corr[2,4] <- cor(as.vector(cluster2.sim), as.vector(pslwr4mean)) + cluster.corr[3,1] <- cor(as.vector(cluster3.sim), as.vector(pslwr1mean)) + cluster.corr[3,2] <- cor(as.vector(cluster3.sim), as.vector(pslwr2mean)) + cluster.corr[3,3] <- cor(as.vector(cluster3.sim), as.vector(pslwr3mean)) + cluster.corr[3,4] <- cor(as.vector(cluster3.sim), as.vector(pslwr4mean)) + cluster.corr[4,1] <- cor(as.vector(cluster4.sim), as.vector(pslwr1mean)) + cluster.corr[4,2] <- cor(as.vector(cluster4.sim), as.vector(pslwr2mean)) + cluster.corr[4,3] <- cor(as.vector(cluster4.sim), as.vector(pslwr3mean)) + cluster.corr[4,4] <- cor(as.vector(cluster4.sim), as.vector(pslwr4mean)) + + # associate each simulated cluster to one observed regime: + max1 <- which(cluster.corr == max(cluster.corr), arr.ind=T) + cluster.corr2 <- cluster.corr + cluster.corr2[max1[1],] <- -999 # set this row to negative values so it won't be found as a maximum anymore + cluster.corr2[,max1[2]] <- -999 # set this column to negative values so it won't be found as a maximum anymore + max2 <- which(cluster.corr2 == max(cluster.corr2), arr.ind=T) + cluster.corr3 <- cluster.corr2 + cluster.corr3[max2[1],] <- -999 + cluster.corr3[,max2[2]] <- -999 + max3 <- which(cluster.corr3 == max(cluster.corr3), arr.ind=T) + cluster.corr4 <- cluster.corr3 + cluster.corr4[max3[1],] <- -999 + cluster.corr4[,max3[2]] <- -999 + max4 <- which(cluster.corr4 == max(cluster.corr4), arr.ind=T) + + seq.max1 <- c(max1[1],max2[1],max3[1],max4[1]) + seq.max2 <- c(max1[2],max2[2],max3[2],max4[2]) + + cluster1.regime <- seq.max2[which(seq.max1 == 1)] + cluster2.regime <- seq.max2[which(seq.max1 == 2)] + cluster3.regime <- seq.max2[which(seq.max1 == 3)] + cluster4.regime <- seq.max2[which(seq.max1 == 4)] + + cluster1.name <- regimes.obs[cluster1.regime] + cluster2.name <- regimes.obs[cluster2.regime] + cluster3.name <- regimes.obs[cluster3.regime] + cluster4.name <- regimes.obs[cluster4.regime] + + regimes.sim <- c(cluster1.name, cluster2.name, cluster3.name, cluster4.name) + cluster.corr2 <- array(cluster.corr, c(4,4), dimnames=list(regimes.sim, regimes.obs)) + + spat.cor1 <- cluster.corr[1,seq.max2[which(seq.max1 == 1)]] + spat.cor2 <- cluster.corr[2,seq.max2[which(seq.max1 == 2)]] + spat.cor3 <- cluster.corr[3,seq.max2[which(seq.max1 == 3)]] + spat.cor4 <- cluster.corr[4,seq.max2[which(seq.max1 == 4)]] + + # measure persistence for the reanalysis dataset: + my.cluster.vector <- my.cluster[[1]] + + sequ1 <- sequ2 <- sequ3 <- sequ4 <- rep(0,10000) + i1 <- i2 <- i3 <- i4 <- 1 + + if(my.cluster.vector[1] != 1) i1 <- 0 + if(my.cluster.vector[1] == 1) sequ1[i1] <- sequ1[i1]+1 + if(my.cluster.vector[1] != 2) i2 <- 0 + if(my.cluster.vector[1] == 2) sequ2[i2] <- sequ2[i2]+1 + if(my.cluster.vector[1] != 3) i3 <- 0 + if(my.cluster.vector[1] == 3) sequ3[i3] <- sequ3[i3]+1 + if(my.cluster.vector[1] != 4) i4 <- 0 + if(my.cluster.vector[1] == 4) sequ4[i4] <- sequ4[i4]+1 + + n.dd <- length(my.cluster.vector) + for(d in 1:(n.dd-1)){ + if(my.cluster.vector[d] != 1 && my.cluster.vector[d+1] == 1) i1 <- i1+1 + if(my.cluster.vector[d] == 1) sequ1[i1] <- sequ1[i1]+1 + + if(my.cluster.vector[d] != 2 && my.cluster.vector[d+1] == 2) i2 <- i2+1 + if(my.cluster.vector[d] == 2) sequ2[i2] <- sequ2[i2]+1 + + if(my.cluster.vector[d] != 3 && my.cluster.vector[d+1] == 3) i3 <- i3+1 + if(my.cluster.vector[d] == 3) sequ3[i3] <- sequ3[i3]+1 + + if(my.cluster.vector[d] != 4 && my.cluster.vector[d+1] == 4) i4 <- i4+1 + if(my.cluster.vector[d] == 4) sequ4[i4] <- sequ4[i4]+1 + } + + if(my.cluster.vector[n.dd] == 1) sequ1[i1] <- sequ1[i1]+1 + if(my.cluster.vector[n.dd] == 2) sequ2[i2] <- sequ2[i2]+1 + if(my.cluster.vector[n.dd] == 3) sequ3[i3] <- sequ3[i3]+1 + if(my.cluster.vector[n.dd] == 4) sequ4[i4] <- sequ4[i4]+1 + + persObs1 <- mean(sequ1[which(sequ1 != 0)]) + persObs2 <- mean(sequ2[which(sequ2 != 0)]) + persObs3 <- mean(sequ3[which(sequ3 != 0)]) + persObs4 <- mean(sequ4[which(sequ4 != 0)]) + + ### insert here all the manual corrections to the regime assignation due to misasignment in the automatic selection: + ### forecast month: September + #if(p == 9 && lead.month == 0) { cluster1.name <- "Blocking"; cluster2.name <- "Atl.Ridge"; cluster3.name <- "NAO-"; cluster4.name <- "NAO+"} + #if(p == 8 && lead.month == 1) { cluster1.name <- "NAO+"; cluster2.name <- "NAO-"; cluster3.name <- "Blocking"; cluster4.name <- "Atl.Ridge"} + #if(p == 6 && lead.month == 3) { cluster1.name <- "Atl.Ridge"; cluster2.name <- "NAO+"} + #if(p == 4 && lead.month == 5) { cluster1.name <- "Blocking"; cluster2.name <- "NAO+"; cluster3.name <- "Atl.Ridge"; cluster4.name <- "NAO-"} + + if(p == 9 && lead.month == 0) { cluster1.name <- "Blocking"; cluster2.name <- "Atl.Ridge"; cluster3.name <- "NAO-"; cluster4.name <- "NAO+" } + if(p == 8 && lead.month == 1) { cluster1.name <- "NAO+"; cluster2.name <- "NAO-"; cluster3.name <- "Blocking"; cluster4.name <- "Atl.Ridge" } + if(p == 7 && lead.month == 2) { cluster1.name <- "Atl.Ridge"; cluster2.name <- "Blocking"; cluster3.name <- "NAO-"; cluster4.name <- "NAO+" } + if(p == 6 && lead.month == 3) { cluster1.name <- "Atl.Ridge"; cluster2.name <- "NAO-"; cluster3.name <- "NAO+"; cluster4.name <- "Blocking" } + if(p == 5 && lead.month == 4) { cluster1.name <- "Atl.Ridge"; cluster2.name <- "NAO+"; cluster3.name <- "NAO-"; cluster4.name <- "Blocking" } + if(p == 4 && lead.month == 5) { cluster1.name <- "Blocking"; cluster2.name <- "NAO+"; cluster3.name <- "Atl.Ridge"; cluster4.name <- "NAO-" } + if(p == 3 && lead.month == 6) { cluster2.name <- "NAO-"; cluster3.name <- "Atl.Ridge"} # cluster1.name <- "Blocking"; cluster4.name <- "NAO+" + + # regimes.sim # for the debug + + ### forecast month: October + #if(p == 4 && lead.month == 6) { cluster1.name <- "Atl.Ridge"; cluster2.name <- "Blocking"; cluster3.name <- "NAO+"; cluster4.name <- "NAO-"} + #if(p == 5 && lead.month == 5) { cluster1.name <- "NAO+"; cluster2.name <- "Blocking"; cluster3.name <- "NAO-"; cluster4.name <- "Atl.Ridge"} + #if(p == 7 && lead.month == 3) { cluster1.name <- "NAO-"; cluster2.name <- "Atl.Ridge"; cluster3.name <- "Blocking"; cluster4.name <- "NAO+"} + #if(p == 8 && lead.month == 2) { cluster1.name <- "Blocking"; cluster2.name <- "NAO-"; cluster3.name <- "Atl.Ridge"; cluster4.name <- "NAO+"} + #if(p == 9 && lead.month == 1) { cluster1.name <- "NAO-"; cluster2.name <- "Blocking"; cluster3.name <- "Atl.Ridge"; cluster4.name <- "NAO+"} + + if(p == 10 && lead.month == 0) { cluster1.name <- "Blocking"; cluster2.name <- "NAO+"; cluster3.name <- "Atl.Ridge"; cluster4.name <- "NAO-"} + if(p == 9 && lead.month == 1) { cluster1.name <- "NAO-"; cluster2.name <- "Blocking"; cluster3.name <- "Atl.Ridge"; cluster4.name <- "NAO+"} + if(p == 8 && lead.month == 2) { cluster1.name <- "Blocking"; cluster2.name <- "NAO-"; cluster3.name <- "Atl.Ridge"; cluster4.name <- "NAO+"} + if(p == 7 && lead.month == 3) { cluster1.name <- "NAO-"; cluster2.name <- "Atl.Ridge"; cluster3.name <- "Blocking"; cluster4.name <- "NAO+"} + if(p == 6 && lead.month == 4) { cluster1.name <- "NAO+"; cluster2.name <- "Blocking"; cluster3.name <- "NAO-"; cluster4.name <- "Atl.Ridge"} + + ### forecast month: November + #if(p == 6 && lead.month == 5) { cluster2.name <- "Atl.Ridge"; cluster3.name <- "NAO+"; cluster4.name <- "Blocking"} + #if(p == 7 && lead.month == 4) { cluster1.name <- "NAO+"; cluster2.name <- "Blocking"; cluster4.name <- "Atl.Ridge"} + #if(p == 8 && lead.month == 3) { cluster2.name <- "Blocking"; cluster4.name <- "Atl.Ridge"} + #if(p == 9 && lead.month == 2) { cluster2.name <- "Atl.Ridge"; cluster3.name <- "NAO+"; cluster4.name <- "Blocking"} + #if(p == 10 && lead.month == 1) { cluster2.name <- "Blocking"; cluster4.name <- "Atl.Ridge"} + + regimes.sim2 <- c(cluster1.name, cluster2.name, cluster3.name, cluster4.name) # Simulated regimes after the manual correction + #regimes.obs <- c(cluster1.name, cluster2.name, cluster3.name, cluster4.name) # already defined above, included only as reference + + order.sim <- match(regimes.sim2, orden) + order.obs <- match(regimes.obs, orden) + + clusters.sim <- list(cluster1.sim, cluster2.sim, cluster3.sim, cluster4.sim) + clusters.obs <- list(pslwr1mean, pslwr2mean, pslwr3mean, pslwr4mean) + + # recompute the spatial correlations, because they might have changed because of the manual corrections above: + spat.cor1 <- cor(as.vector(clusters.sim[[which(order.sim == 1)]]), as.vector(clusters.obs[[which(order.obs == 1)]])) + spat.cor2 <- cor(as.vector(clusters.sim[[which(order.sim == 2)]]), as.vector(clusters.obs[[which(order.obs == 2)]])) + spat.cor3 <- cor(as.vector(clusters.sim[[which(order.sim == 3)]]), as.vector(clusters.obs[[which(order.obs == 3)]])) + spat.cor4 <- cor(as.vector(clusters.sim[[which(order.sim == 4)]]), as.vector(clusters.obs[[which(order.obs == 4)]])) + + if(composition == 'impact' && impact.data == TRUE) { + clusters.sim.imp <- list(cluster1.sim.imp, cluster2.sim.imp, cluster3.sim.imp, cluster4.sim.imp) + #clusters.sim.imp.pv <- list(cluster1.sim.imp.pv, cluster2.sim.imp.pv, cluster3.sim.imp.pv, cluster4.sim.imp.pv) + + clusters.obs.imp <- list(varwr1mean, varwr2mean, varwr3mean, varwr4mean) + #clusters.obs.imp.pv <- list(pvalue1, pvalue2, pvalue3, pvalue4) + + cor.imp1 <- cor(as.vector(clusters.sim.imp[[which(order.sim == 1)]]), as.vector(clusters.obs.imp[[which(order.obs == 1)]])) + cor.imp2 <- cor(as.vector(clusters.sim.imp[[which(order.sim == 2)]]), as.vector(clusters.obs.imp[[which(order.obs == 2)]])) + cor.imp3 <- cor(as.vector(clusters.sim.imp[[which(order.sim == 3)]]), as.vector(clusters.obs.imp[[which(order.obs == 3)]])) + cor.imp4 <- cor(as.vector(clusters.sim.imp[[which(order.sim == 4)]]), as.vector(clusters.obs.imp[[which(order.obs == 4)]])) + + } + + load(file=paste0(work.dir,"/",fields.name,"_",my.period[p],"_leadmonth_",lead.month,"_","psl",".RData")) # reload forecast cluster time series and mean psl data + load(file=paste0(work.dir,"/",fields.name,"_",my.period[p],"_leadmonth_",lead.month,"_",var.name[var.num],".RData")) # reload mean var data for impact maps + + if(var.num != var.num.orig) var.num <- var.num.orig # ripristinate original values of this script + if(fields.name != fields.name.orig) fields.name <- fields.name.orig # ripristinate original values of this script + if(!identical(period.orig, period)) period <- period.orig + if(p.orig != p) p <- p.orig + if(identical(rev(lat),lat.forecast)) lat <- rev(lat) # ripristinate right lat order + if(work.dir != work.dir.orig) work.dir <- work.dir.orig # ripristinate original values of this script + + } # close for on 'forecast.name' + + if(fields.name == rean.name || (fields.name == forecast.name && n.map == 7)){ + if(save.names){ + save(cluster1.name, cluster2.name, cluster3.name, cluster4.name, file=paste0(rean.dir,"/",fields.name,"_", my.period[p],"_","ClusterNames",".RData")) + + } else { # in this case, we load the cluster names from the file already saved, if regimes come from a reanalysis: + ClusterName.file <- paste0(rean.dir,"/",rean.name,"_", my.period[p],"_","ClusterNames",".RData") + if(!file.exists(ClusterName.file)) stop(paste0("file: ",ClusterName.file," missing")) # check if file exists or not + load(ClusterName.file) # load cluster names + + if(var.num != var.num.orig) var.num <- var.num.orig # ripristinate original values of this script + if(fields.name != fields.name.orig) fields.name <- fields.name.orig # ripristinate original values of this script + } + } + + # assign to each cluster its regime: + #if(ordering == FALSE && (fields.name == rean.name || (fields.name == forecast.name && n.map == 7))){ + if(ordering == FALSE){ + cluster1 <- 1; cluster2 <- 2; cluster3 <- 3; cluster4 <- 4 # same as: cluster1.name=orden[1], cluster2.name=orden[2], cluster3.name=orden[3], etc. + } else { + cluster1 <- which(orden == cluster1.name) + cluster2 <- which(orden == cluster2.name) + cluster3 <- which(orden == cluster3.name) + cluster4 <- which(orden == cluster4.name) + } + + assign(paste0("map",cluster1), pslwr1mean) + assign(paste0("map",cluster2), pslwr2mean) + assign(paste0("map",cluster3), pslwr3mean) + assign(paste0("map",cluster4), pslwr4mean) + + assign(paste0("fre",cluster1), wr1y) + assign(paste0("fre",cluster2), wr2y) + assign(paste0("fre",cluster3), wr3y) + assign(paste0("fre",cluster4), wr4y) + + #assign(paste0("withinss",cluster1), my.cluster[[p]]$withinss[1]/my.cluster[[p]]$size[1]) + #assign(paste0("withinss",cluster2), my.cluster[[p]]$withinss[2]/my.cluster[[p]]$size[2]) + #assign(paste0("withinss",cluster3), my.cluster[[p]]$withinss[3]/my.cluster[[p]]$size[3]) + #assign(paste0("withinss",cluster4), my.cluster[[p]]$withinss[4]/my.cluster[[p]]$size[4]) + + if(impact.data == TRUE && (composition == "simple" || composition == "single.impact" || composition == 'impact' || composition == 'edpr')){ + assign(paste0("imp",cluster1), varwr1mean) + assign(paste0("imp",cluster2), varwr2mean) + assign(paste0("imp",cluster3), varwr3mean) + assign(paste0("imp",cluster4), varwr4mean) + + assign(paste0("sig",cluster1), pvalue1) + assign(paste0("sig",cluster2), pvalue2) + assign(paste0("sig",cluster3), pvalue3) + assign(paste0("sig",cluster4), pvalue4) + } + + if(fields.name == forecast.name && n.map < 7){ + assign(paste0("freMax",cluster1), wr1yMax) + assign(paste0("freMax",cluster2), wr2yMax) + assign(paste0("freMax",cluster3), wr3yMax) + assign(paste0("freMax",cluster4), wr4yMax) + + assign(paste0("freMin",cluster1), wr1yMin) + assign(paste0("freMin",cluster2), wr2yMin) + assign(paste0("freMin",cluster3), wr3yMin) + assign(paste0("freMin",cluster4), wr4yMin) + + #assign(paste0("spatial.cor",cluster1), spat.cor1) + #assign(paste0("spatial.cor",cluster2), spat.cor2) + #assign(paste0("spatial.cor",cluster3), spat.cor3) + #assign(paste0("spatial.cor",cluster4), spat.cor4) + + assign(paste0("persist",cluster1), pers1) + assign(paste0("persist",cluster2), pers2) + assign(paste0("persist",cluster3), pers3) + assign(paste0("persist",cluster4), pers4) + + clusters.all <- c(cluster1, cluster2, cluster3, cluster4) + ordered.sequence <- c(which(clusters.all == 1), which(clusters.all == 2), which(clusters.all == 3), which(clusters.all == 4)) + + assign(paste0("transSim",cluster1), unname(trans.sim[1,ordered.sequence])) + assign(paste0("transSim",cluster2), unname(trans.sim[2,ordered.sequence])) + assign(paste0("transSim",cluster3), unname(trans.sim[3,ordered.sequence])) + assign(paste0("transSim",cluster4), unname(trans.sim[4,ordered.sequence])) + + cluster1.obs <- which(orden == regimes.obs[1]) + cluster2.obs <- which(orden == regimes.obs[2]) + cluster3.obs <- which(orden == regimes.obs[3]) + cluster4.obs <- which(orden == regimes.obs[4]) + + clusters.all.obs <- c(cluster1.obs, cluster2.obs, cluster3.obs, cluster4.obs) + ordered.sequence.obs <- c(which(clusters.all.obs == 1), which(clusters.all.obs == 2), which(clusters.all.obs == 3), which(clusters.all.obs == 4)) + + assign(paste0("freObs",cluster1.obs), wr1y.obs) + assign(paste0("freObs",cluster2.obs), wr2y.obs) + assign(paste0("freObs",cluster3.obs), wr3y.obs) + assign(paste0("freObs",cluster4.obs), wr4y.obs) + + assign(paste0("persistObs",cluster1.obs), persObs1) + assign(paste0("persistObs",cluster2.obs), persObs2) + assign(paste0("persistObs",cluster3.obs), persObs3) + assign(paste0("persistObs",cluster4.obs), persObs4) + + assign(paste0("transObs",cluster1.obs), unname(trans.obs[1,ordered.sequence.obs])) + assign(paste0("transObs",cluster2.obs), unname(trans.obs[2,ordered.sequence.obs])) + assign(paste0("transObs",cluster3.obs), unname(trans.obs[3,ordered.sequence.obs])) + assign(paste0("transObs",cluster4.obs), unname(trans.obs[4,ordered.sequence.obs])) + + } + + # position of long values of Europe only (without the Atlantic Sea and America): + #if(fields.name == "ERA-Interim") EU <- c(1:which(lon >= lon.max)[1], (length(lon)-30):length(lon)) # restrict area to continental europe only + EU <- c(1:which(lon >= lon.max)[1], (which(lon >= 337)[1]:length(lon))) # restrict area to continental europe only + + # breaks and colors of the geopotential fields: + if(psl == "g500"){ + #my.brks <- c(-300,-199:200,300) # % Mean anomaly of a WR + my.brks <- c(-300,seq(-200,200,10),300) # % Mean anomaly of a WR + my.brks2 <- c(-300,seq(-200,200,10),300) # % Mean anomaly of a WR for the contour lines + #my.cols <- colorRampPalette((brewer.pal(9,"PuBu")))(length(my.brks)-1) + values.to.plot <- c(-200, -100, 0, 100, 200) # values we want to show in the labels + } + + if(psl == "psl"){ + my.brks <- c(seq(-21,-1,2),0,seq(1,21,2)) # % Mean anomaly of a WR + # my.brks <- c(seq(-19,-1,2),0,seq(1,19,2)) # % Mean anomaly of a WR + + my.brks2 <- my.brks #c(seq(-20,20,2)) # % Mean anomaly of a WR for the contour lines + values.to.plot <- my.brks #c(-17,-15,-13,-11,-9,-7,-5,-3,-1,0,1,3,5,7,9,11,13,15,17) + } + + my.cols <- colorRampPalette(rev(brewer.pal(11,"RdBu")))(length(my.brks)-1) # blue--white--red colors + my.cols[floor(length(my.cols)/2)] <- my.cols[floor(length(my.cols)/2)+1] <- "white" + + # breaks and colors of the impact maps (valid both for Wind speed anomalies and Temperature anomalies): + #my.brks.var <- c(-20,seq(-10,-3,1),seq(-2.9,2.9,0.1),seq(3,10,1),20) # % Mean anomaly of a WR + #my.brks.var <- c(-20,-2,-1.5,-1,-0.5,-0.2,0.2,0.5,1,1.5,2,20) # % Mean anomaly of a WR + #my.brks.var <- c(-20,seq(-3,3,0.5),20) # % Mean anomaly of a WR + if(var.name[var.num] == "sfcWind") { + my.brks.var <- seq(-3,3,0.5) + values.to.plot2 <- c(-3,-2,-1,0,1,2,3) + } + if(var.name[var.num] == "tas") { + my.brks.var <- seq(-5,5,1) + values.to.plot2 <- my.brks.var #c(-5,-3,-1,0,1,3,5) + } + + #my.cols <- colorRampPalette(as.character(read.csv("/home/Earth/vtorralb/rgbhex.csv",header=F)[,1]))(length(my.brks)-1) # blue--yellow-red colors + my.cols.var <- colorRampPalette(rev(brewer.pal(11,"RdBu")))(length(my.brks.var)-1) # blue--white--red colors + #my.cols.var[floor(length(my.cols.var)/2)] <- my.cols.var[floor(length(my.cols.var)/2)+1] <- "white" + + freq.max=100 + if(fields.name == forecast.name){ + pos.years.present <- match(year.start:year.end, years) + years.missing <- which(is.na(pos.years.present)) + fre1.NA <- fre2.NA <- fre3.NA <- fre4.NA <- freMax1.NA <- freMax2.NA <- freMax3.NA <- freMax4.NA <- freMin1.NA <- freMin2.NA <- freMin3.NA <- freMin4.NA <- c() + k <- 0 + for(y in year.start:year.end) { + if(y %in% (years.missing + year.start - 1)) { + fre1.NA <- c(fre1.NA,NA); fre2.NA <- c(fre2.NA,NA); fre3.NA <- c(fre3.NA,NA); fre4.NA <- c(fre4.NA,NA) + freMax1.NA <- c(freMax1.NA,NA); freMax2.NA <- c(freMax2.NA,NA); freMax3.NA <- c(freMax3.NA,NA); freMax4.NA <- c(freMax4.NA,NA) + freMin1.NA <- c(freMin1.NA,NA); freMin2.NA <- c(freMin2.NA,NA); freMin3.NA <- c(freMin3.NA,NA); freMin4.NA <- c(freMin4.NA,NA) + k=k+1 + } else { + fre1.NA <- c(fre1.NA,fre1[y - year.start + 1 - k]); fre2.NA <- c(fre2.NA,fre2[y - year.start + 1 - k]) + fre3.NA <- c(fre3.NA,fre3[y - year.start + 1 - k]); fre4.NA <- c(fre4.NA,fre4[y - year.start + 1 - k]) + freMax1.NA <- c(freMax1.NA,freMax1[y - year.start + 1 - k]); freMax2.NA <- c(freMax2.NA,freMax2[y - year.start + 1 - k]) + freMax3.NA <- c(freMax3.NA,freMax3[y - year.start + 1 - k]); freMax4.NA <- c(freMax4.NA,freMax4[y - year.start + 1 - k]) + freMin1.NA <- c(freMin1.NA,freMin1[y - year.start + 1 - k]); freMin2.NA <- c(freMin2.NA,freMin2[y - year.start + 1 - k]) + freMin3.NA <- c(freMin3.NA,freMin3[y - year.start + 1 - k]); freMin4.NA <- c(freMin4.NA,freMin4[y - year.start + 1 - k]) + } + } + + sp.cor1 <- round(cor(fre1.NA,freObs1, use="complete.obs"),2) + sp.cor2 <- round(cor(fre2.NA,freObs2, use="complete.obs"),2) + sp.cor3 <- round(cor(fre3.NA,freObs3, use="complete.obs"),2) + sp.cor4 <- round(cor(fre4.NA,freObs4, use="complete.obs"),2) + + } else { + fre1.NA <- fre1; fre2.NA <- fre2; fre3.NA <- fre3; fre4.NA <- fre4 + } + + + # Visualize the composition with the 3 graphs for all the 4 regimes: its pressure fields, its impact on var and its frequency series: + if(composition == "simple"){ + + if(!as.pdf && fields.name == rean.name) png(filename=paste0(rean.dir,"/",fields.name,"_",my.period[p],"_",var.name[var.num],".png"),width=3000,height=3700) + if(!as.pdf && fields.name == forecast.name) png(filename=paste0(work.dir,"/",fields.name,"_",my.period[p],"_leadmonth_",lead.month,"_",var.name[var.num],".png"),width=3000,height=3700) + + plot.new() + + # Sheet title: + par(fig=c(0, 1, 0.94, 0.98), new=TRUE) + if(fields.name == forecast.name) {forecast.title <- paste0(" startdate and ",lead.month," forecast month ")} else {forecast.title <- ""} + mtext(paste0(fields.name, " Weather Regimes for ", my.period[p], forecast.title," (",year.start,"-",year.end,")"), cex=5.5, font=2) + + # Centroid maps: + map.xpos <- 0 + map.width <- 0.46 + par(fig=c(map.xpos + 0.003, map.xpos + map.width - 0.003, 0.745, 0.925), new=TRUE) + PlotEquiMap2(rescale(map1,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map1), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, xlabel.dist=1.5, contours.lty="F1FF1F", continents.col="black") + par(fig=c(map.xpos, map.xpos + map.width, 0.51, 0.69), new=TRUE) + PlotEquiMap2(rescale(map2,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map2), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F", continents.col="black") + par(fig=c(map.xpos, map.xpos + map.width, 0.275, 0.455), new=TRUE) + PlotEquiMap2(rescale(map3,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map3), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F", continents.col="black") + par(fig=c(map.xpos, map.xpos + map.width, 0.04, 0.22), new=TRUE) + PlotEquiMap2(rescale(map4,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map4), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F", continents.col="black") + + ## # for the debug: + ## obs <- read.table("/scratch/Earth/ncortesi/RESILIENCE/Regimes/NAO_series.txt") + ## mes <- p + ## fre.obs <- obs$V3[seq(mes,12*35,12)] # get the ovserved freuency of the NAO + ## #plot(1981:2015,fre.obs,type="l") + ## #lines(1981:2015,fre1,type="l",col="red") + ## #lines(1981:2015,fre2,type="l",col="blue") + ## #lines(1981:2015,fre4,type="l",col="green") + ## cor1 <- cor(fre.obs, fre1) + ## cor2 <- cor(fre.obs, fre2) + ## cor3 <- cor(fre.obs, fre3) + ## cor4 <- cor(fre.obs, fre4) + ## round(c(cor1,cor2,cor3,cor4),2) + + # Legend centroid maps: + legend1.xpos <- 0.10 + legend1.width <- 0.30 + legend1.cex <- 2 + my.subset <- match(values.to.plot, my.brks) + par(fig=c(legend1.xpos, legend1.xpos + legend1.width, 0.719, 0.744), new=TRUE) # syntax fig=c(xmin, xmax, ymin, ymax) + ColorBar3(my.brks, cols=my.cols, vert=FALSE, cex=legend1.cex, subset=my.subset) + if(psl=="g500") {mtext(side=4," m", cex=2.5)} else {mtext(side=4," hPa", cex=legend1.cex)} + par(fig=c(legend1.xpos, legend1.xpos + legend1.width, 0.485, 0.508), new=TRUE) + ColorBar3(my.brks, cols=my.cols, vert=FALSE, cex=legend1.cex, subset=my.subset) + if(psl=="g500") {mtext(side=4," m", cex=2.5)} else {mtext(side=4," hPa", cex=legend1.cex)} + par(fig=c(legend1.xpos, legend1.xpos + legend1.width, 0.250, 0.273), new=TRUE) + ColorBar3(my.brks, cols=my.cols, vert=FALSE, cex=legend1.cex, subset=my.subset) + if(psl=="g500") {mtext(side=4," m", cex=2.5)} else {mtext(side=4," hPa", cex=legend1.cex)} + par(fig=c(legend1.xpos, legend1.xpos + legend1.width, 0.015, 0.038), new=TRUE) + ColorBar3(my.brks, cols=my.cols, vert=FALSE, cex=legend1.cex, subset=my.subset) + if(psl=="g500") {mtext(side=4," m", cex=2.5)} else {mtext(side=4," hPa", cex=legend1.cex)} + + # Subtitle centroid maps: + map.title.xpos <- 0.76 + map.title.width <- 0.2 + par(fig=c(map.title.xpos, map.title.xpos + map.title.width, 0.728, 0.729), new=TRUE) + mtext("year", cex=3) + par(fig=c(map.title.xpos, map.title.xpos + map.title.width, 0.494, 0.495), new=TRUE) + mtext("year", cex=3) + par(fig=c(map.title.xpos, map.title.xpos + map.title.width, 0.260, 0.261), new=TRUE) + mtext("year", cex=3) + par(fig=c(map.title.xpos, map.title.xpos + map.title.width, 0.026, 0.027), new=TRUE) + mtext("year", cex=3) + + # Title Centroid Maps: + title1.xpos <- 0.02 + title1.width <- 0.4 + par(fig=c(title1.xpos, title1.xpos + title1.width, 0.9305, 0.9355), new=TRUE) + mtext(paste0(regime1.name,": ", psl.name, " Anomaly"), font=2, cex=4) + par(fig=c(title1.xpos, title1.xpos + title1.width, 0.6955, 0.7005), new=TRUE) + mtext(paste0(regime2.name,": ", psl.name, " Anomaly"), font=2, cex=4) + par(fig=c(title1.xpos, title1.xpos + title1.width, 0.4605, 0.4655), new=TRUE) + mtext(paste0(regime3.name,": ", psl.name, " Anomaly"), font=2, cex=4) + par(fig=c(title1.xpos, title1.xpos + title1.width, 0.2255, 0.2305), new=TRUE) + mtext(paste0(regime4.name,": ", psl.name, " Anomaly"), font=2, cex=4) + + # Impact maps: + if(impact.data == TRUE ){ + impact.xpos <- 0.47 + impact.width <- 0.26 + par(fig=c(impact.xpos, impact.xpos + impact.width, 0.745, 0.925), new=TRUE) + PlotEquiMap2(rescale(imp1[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=t(sig1[,EU] < 0.05), cex.lab=1.5, continents.col="black") + par(fig=c(impact.xpos, impact.xpos + impact.width, 0.51, 0.69), new=TRUE) + PlotEquiMap2(rescale(imp2[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=t(sig2[,EU] < 0.05), cex.lab=1.5, continents.col="black") + par(fig=c(impact.xpos, impact.xpos + impact.width, 0.275, 0.455), new=TRUE) + PlotEquiMap2(rescale(imp3[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=t(sig3[,EU] < 0.05), cex.lab=1.5, continents.col="black") + par(fig=c(impact.xpos, impact.xpos + impact.width, 0.04, 0.22), new=TRUE) + PlotEquiMap2(rescale(imp4[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=t(sig4[,EU] < 0.05), cex.lab=1.5, continents.col="black") + + + # Title impact maps: + title2.xpos <- 0.42 + title2.width <- 0.35 + par(fig=c(title2.xpos, title2.xpos + title2.width, 0.928, 0.933), new=TRUE) + mtext(paste0(regime1.name, ": Impact on ", var.name.full[var.num]), font=2, cex=4) + par(fig=c(title2.xpos, title2.xpos + title2.width, 0.693, 0.698), new=TRUE) + mtext(paste0(regime2.name, ": Impact on ", var.name.full[var.num]), font=2, cex=4) + par(fig=c(title2.xpos, title2.xpos + title2.width, 0.458, 0.463), new=TRUE) + mtext(paste0(regime3.name, ": Impact on ", var.name.full[var.num]), font=2, cex=4) + par(fig=c(title2.xpos, title2.xpos + title2.width, 0.223, 0.227), new=TRUE) + mtext(paste0(regime4.name, ": Impact on ", var.name.full[var.num]), font=2, cex=4) + + # Legend: + legend2.xpos <- 0.48 + legend2.width <- 0.25 + legend2.cex <- 1.8 + + my.subset2 <- match(values.to.plot2, my.brks.var) + par(fig=c(legend2.xpos, legend2.xpos + legend2.width, 0.719 + 0.001, 0.744), new=TRUE) + ColorBar3(my.brks.var, cols=my.cols.var, vert=FALSE, cex=legend2.cex, subset=my.subset2) + mtext(side=4,paste0(" ",var.unit[var.num]), cex=legend2.cex) + par(fig=c(legend2.xpos, legend2.xpos + legend2.width, 0.485, 0.508), new=TRUE) + ColorBar3(my.brks.var, cols=my.cols.var, vert=FALSE, cex=legend2.cex, subset=my.subset2) + mtext(side=4,paste0(" ",var.unit[var.num]), cex=legend2.cex) + par(fig=c(legend2.xpos, legend2.xpos + legend2.width, 0.250, 0.273), new=TRUE) + ColorBar3(my.brks.var, cols=my.cols.var, vert=FALSE, cex=legend2.cex, subset=my.subset2) + mtext(side=4,paste0(" ",var.unit[var.num]), cex=legend2.cex) + par(fig=c(legend2.xpos, legend2.xpos + legend2.width, 0.015, 0.038), new=TRUE) + ColorBar3(my.brks.var, cols=my.cols.var, vert=FALSE, cex=legend2.cex, subset=my.subset2) + mtext(side=4,paste0(" ",var.unit[var.num]), cex=legend2.cex) + + } + + # Frequency plots: + barplot.xpos <- 0.75 + barplot.width <- 0.25 + + par(fig=c(barplot.xpos, barplot.xpos + barplot.width, 0.745, 0.915), new=TRUE) + if(fields.name == rean.name) barplot.freq(100*fre1.NA, year.start, year.end, cex.y=2, cex.x=2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0)) + if(fields.name == forecast.name) barplot.freq.sim2(100*fre1.NA, 100*freMax1.NA, 100*freMin1.NA, 100*freObs1, year.start, year.end, cex.y=2, cex.x=2, freq.min=-2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0), col.line="gray20", cex.r=3, cex.mean=2, cex.obs=2) + + par(fig=c(barplot.xpos, barplot.xpos + barplot.width,0.510,0.680), new=TRUE) + if(fields.name == rean.name) barplot.freq(100*fre2.NA, year.start, year.end, cex.y=2, cex.x=2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0)) + if(fields.name == forecast.name) barplot.freq.sim2(100*fre2.NA, 100*freMax2.NA, 100*freMin2.NA, 100*freObs2, year.start, year.end, cex.y=2, cex.x=2, freq.min=-2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0), col.line="gray20", cex.r=3, cex.mean=2, cex.obs=2) + + par(fig=c(barplot.xpos, barplot.xpos + barplot.width,0.275,0.445), new=TRUE) + if(fields.name == rean.name) barplot.freq(100*fre3.NA, year.start, year.end, cex.y=2, cex.x=2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0)) + if(fields.name == forecast.name) barplot.freq.sim2(100*fre3.NA, 100*freMax3.NA, 100*freMin3.NA, 100*freObs3, year.start, year.end, cex.y=2, cex.x=2, freq.min=-2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0), col.line="gray20", cex.r=3, cex.mean=2, cex.obs=2) + + par(fig=c(barplot.xpos, barplot.xpos + barplot.width,0.040,0.210), new=TRUE) + if(fields.name == rean.name) barplot.freq(100*fre4.NA, year.start, year.end, cex.y=2, cex.x=2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0)) + if(fields.name == forecast.name) barplot.freq.sim2(100*fre4.NA, 100*freMax4.NA, 100*freMin4.NA, 100*freObs4, year.start, year.end, cex.y=2, cex.x=2, freq.min=-2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0), col.line="gray20", cex.r=3, cex.mean=2, cex.obs=2) + + # Title Frequency maps: + title3.xpos <- 0.75 + title3.width <-0.25 + par(fig=c(title3.xpos, title3.xpos + title3.width, 0.928, 0.933), new=TRUE) + mtext(paste0(regime1.name, " Frequency"), font=2, cex=4) # paste0 doesn't work inside an expression! + par(fig=c(title3.xpos, title3.xpos + title3.width, 0.693, 0.698), new=TRUE) + mtext(paste0(regime2.name, " Frequency"), font=2, cex=4) + par(fig=c(title3.xpos, title3.xpos + title3.width, 0.458, 0.463), new=TRUE) + mtext(paste0(regime3.name, " Frequency"), font=2, cex=4) + par(fig=c(title3.xpos, title3.xpos + title3.width, 0.223, 0.228), new=TRUE) + mtext(paste0(regime4.name, " Frequency"), font=2, cex=4) + + # % on Frequency plots: + symbol.xpos <- 0.735 + symbol.width <- 0.01 + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.92, 0.93), new=TRUE) + mtext("%", cex=3.3) + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.59, 0.70), new=TRUE) + mtext("%", cex=3.3) + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.45, 0.46), new=TRUE) + mtext("%", cex=3.3) + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.22, 0.23), new=TRUE) + mtext("%", cex=3.3) + + # mean frequency on Frequency plots: + if(mean.freq == TRUE){ + symbol.xpos <- 0.9 + symbol.width <- 0.1 + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.908, 0.918), new=TRUE) + mtext(bquote(bar(nu) == .(paste0(round(mean(100*fre1.NA),1),"%"))),cex=4.5) + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.673, 0.683), new=TRUE) + mtext(bquote(bar(nu) == .(paste0(round(mean(100*fre2.NA),1),"%"))),cex=4.5) + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.44, 0.45), new=TRUE) + mtext(bquote(bar(nu) == .(paste0(round(mean(100*fre3.NA),1),"%"))),cex=4.5) + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.203, 0.213), new=TRUE) + mtext(bquote(bar(nu) == .(paste0(round(mean(100*fre4.NA),1),"%"))),cex=4.5) + } + + # add corr between obs.time series and ensemble mean time series on Frequency plots: + if(fields.name == forecast.name){ + symbol.xpos <- 0.76 + symbol.width <- 0.1 + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.908, 0.918), new=TRUE) + sp.cor1 <- round(cor(fre1.NA,freObs1, use="complete.obs"),2) + mtext(paste0("r= ",sp.cor1), cex=4.5) + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.673, 0.683), new=TRUE) + sp.cor2 <- round(cor(fre2.NA,freObs2, use="complete.obs"),2) + mtext(paste0("r= ",sp.cor2), cex=4.5) + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.44, 0.45), new=TRUE) + sp.cor3 <- round(cor(fre3.NA,freObs3, use="complete.obs"),2) + mtext(paste0("r= ",sp.cor3), cex=4.5) + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.203, 0.213), new=TRUE) + sp.cor4 <- round(cor(fre4.NA,freObs4, use="complete.obs"),2) + mtext(paste0("r= ",sp.cor4), cex=4.5) + } + + if(!as.pdf) dev.off() # for saving 4 png + + } # close if on: composition == 'simple' + + + if(composition == "edpr"){ + + # adjust color legends to include triangles to the extremities increasing by two the number of intervals: + my.cols <- colorRampPalette(rev(brewer.pal(11,"RdBu")))(length(my.brks)+1) # blue--white--red colors + my.cols.var <- colorRampPalette(rev(brewer.pal(11,"RdBu")))(length(my.brks.var)+1) # blue--white--red colors + + fileoutput <- paste0(rean.dir,"/",fields.name,"_",my.period[p],"_",var.name[var.num],"_edpr.png") + + png(filename=fileoutput,width=3000,height=3700) + + plot.new() + + ## Centroid maps: + map.xpos <- 0.27 + map.width <- 0.46 + par(fig=c(map.xpos + 0.003, map.xpos + map.width - 0.003, 0.745, 0.925), new=TRUE) + PlotEquiMap2(rescale(map1,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols[2:(length(my.cols)-1)], sizetit=1.2, contours=t(map1), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, xlabel.dist=1.5, contours.lty="F1FF1F", continents.col="black") + par(fig=c(map.xpos, map.xpos + map.width, 0.51, 0.69), new=TRUE) + PlotEquiMap2(rescale(map2,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols[2:(length(my.cols)-1)], sizetit=1.2, contours=t(map2), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F", continents.col="black") + par(fig=c(map.xpos, map.xpos + map.width, 0.275, 0.455), new=TRUE) + PlotEquiMap2(rescale(map3,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols[2:(length(my.cols)-1)], sizetit=1.2, contours=t(map3), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F", continents.col="black") + par(fig=c(map.xpos, map.xpos + map.width, 0.04, 0.22), new=TRUE) + PlotEquiMap2(rescale(map4,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols[2:(length(my.cols)-1)], sizetit=1.2, contours=t(map4), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F", continents.col="black") + + ## # for the debug: + ## obs <- read.table("/scratch/Earth/ncortesi/RESILIENCE/Regimes/NAO_series.txt") + ## mes <- p + ## fre.obs <- obs$V3[seq(mes,12*35,12)] # get the ovserved freuency of the NAO + ## #plot(1981:2015,fre.obs,type="l") + ## #lines(1981:2015,fre1,type="l",col="red") + ## #lines(1981:2015,fre2,type="l",col="blue") + ## #lines(1981:2015,fre4,type="l",col="green") + ## cor1 <- cor(fre.obs, fre1) + ## cor2 <- cor(fre.obs, fre2) + ## cor3 <- cor(fre.obs, fre3) + ## cor4 <- cor(fre.obs, fre4) + ## round(c(cor1,cor2,cor3,cor4),2) + + # Legend centroid maps: + legend1.xpos <- 0.30 + legend1.width <- 0.40 + legend1.cex <- 2 + my.subset <- match(values.to.plot, my.brks) + par(fig=c(legend1.xpos, legend1.xpos + legend1.width, 0.719, 0.744), new=TRUE) # syntax fig=c(xmin, xmax, ymin, ymax) + ColorBar3(my.brks, cols=my.cols[2:(length(my.cols)-1)], vert=FALSE, cex=legend1.cex, subset=my.subset) + if(psl=="g500") {mtext(side=4," m", cex=2.5)} else {mtext(side=4," hPa", cex=legend1.cex)} + par(fig=c(legend1.xpos, legend1.xpos + legend1.width, 0.485, 0.508), new=TRUE) + ColorBar3(my.brks, cols=my.cols[2:(length(my.cols)-1)], vert=FALSE, cex=legend1.cex, subset=my.subset) + if(psl=="g500") {mtext(side=4," m", cex=2.5)} else {mtext(side=4," hPa", cex=legend1.cex)} + par(fig=c(legend1.xpos, legend1.xpos + legend1.width, 0.250, 0.273), new=TRUE) + ColorBar3(my.brks, cols=my.cols[2:(length(my.cols)-1)], vert=FALSE, cex=legend1.cex, subset=my.subset) + if(psl=="g500") {mtext(side=4," m", cex=2.5)} else {mtext(side=4," hPa", cex=legend1.cex)} + par(fig=c(legend1.xpos, legend1.xpos + legend1.width, 0.015, 0.038), new=TRUE) + ColorBar3(my.brks, cols=my.cols[2:(length(my.cols)-1)], vert=FALSE, cex=legend1.cex, subset=my.subset) + if(psl=="g500") {mtext(side=4," m", cex=2.5)} else {mtext(side=4," hPa", cex=legend1.cex)} + + # Subtitle centroid maps: + map.title.xpos <- 0.76 + map.title.width <- 0.2 + par(fig=c(map.title.xpos, map.title.xpos + map.title.width, 0.728, 0.729), new=TRUE) + mtext("year", cex=3) + par(fig=c(map.title.xpos, map.title.xpos + map.title.width, 0.494, 0.495), new=TRUE) + mtext("year", cex=3) + par(fig=c(map.title.xpos, map.title.xpos + map.title.width, 0.260, 0.261), new=TRUE) + mtext("year", cex=3) + par(fig=c(map.title.xpos, map.title.xpos + map.title.width, 0.026, 0.027), new=TRUE) + mtext("year", cex=3) + + # Title Centroid Maps: + title1.xpos <- 0.3 + title1.width <- 0.44 + par(fig=c(title1.xpos, title1.xpos + title1.width, 0.9305, 0.9355), new=TRUE) + mtext(paste0(regime1.name," ", psl.name, " anomaly"), font=2, cex=4) + par(fig=c(title1.xpos, title1.xpos + title1.width, 0.6955, 0.7005), new=TRUE) + mtext(paste0(regime2.name," ", psl.name, " anomaly"), font=2, cex=4) + par(fig=c(title1.xpos, title1.xpos + title1.width, 0.4605, 0.4655), new=TRUE) + mtext(paste0(regime3.name," ", psl.name, " anomaly"), font=2, cex=4) + par(fig=c(title1.xpos, title1.xpos + title1.width, 0.2255, 0.2305), new=TRUE) + mtext(paste0(regime4.name," ", psl.name, " anomaly"), font=2, cex=4) + + # Impact maps: + if(impact.data == TRUE ){ + impact.xpos <- 0 + impact.width <- 0.26 + par(fig=c(impact.xpos, impact.xpos + impact.width, 0.745, 0.925), new=TRUE) + PlotEquiMap2(rescale(imp1[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var[2:(length(my.cols.var)-1)], intxlon=10, intylat=10, drawleg=F, dots=t(sig1[,EU] < 0.05), cex.lab=1.5, continents.col="black") + par(fig=c(impact.xpos, impact.xpos + impact.width, 0.51, 0.69), new=TRUE) + PlotEquiMap2(rescale(imp2[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var[2:(length(my.cols.var)-1)], intxlon=10, intylat=10, drawleg=F, dots=t(sig2[,EU] < 0.05), cex.lab=1.5, continents.col="black") + par(fig=c(impact.xpos, impact.xpos + impact.width, 0.275, 0.455), new=TRUE) + PlotEquiMap2(rescale(imp3[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var[2:(length(my.cols.var)-1)], intxlon=10, intylat=10, drawleg=F, dots=t(sig3[,EU] < 0.05), cex.lab=1.5, continents.col="black") + par(fig=c(impact.xpos, impact.xpos + impact.width, 0.04, 0.22), new=TRUE) + PlotEquiMap2(rescale(imp4[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var[2:(length(my.cols.var)-1)], intxlon=10, intylat=10, drawleg=F, dots=t(sig4[,EU] < 0.05), cex.lab=1.5, continents.col="black") + + + # Title impact maps: + title2.xpos <- 0 + title2.width <- 0.26 + par(fig=c(title2.xpos, title2.xpos + title2.width, 0.928, 0.933), new=TRUE) + mtext(paste0(regime1.name, " impact on ", var.name.full[var.num]), font=2, cex=4) + par(fig=c(title2.xpos, title2.xpos + title2.width, 0.693, 0.698), new=TRUE) + mtext(paste0(regime2.name, " impact on ", var.name.full[var.num]), font=2, cex=4) + par(fig=c(title2.xpos, title2.xpos + title2.width, 0.458, 0.463), new=TRUE) + mtext(paste0(regime3.name, " impact on ", var.name.full[var.num]), font=2, cex=4) + par(fig=c(title2.xpos, title2.xpos + title2.width, 0.223, 0.227), new=TRUE) + mtext(paste0(regime4.name, " impact on ", var.name.full[var.num]), font=2, cex=4) + + # Legend: + legend2.xpos <- 0.01 + legend2.width <- 0.25 + legend2.cex <- 1.8 + + my.subset2 <- match(values.to.plot2, my.brks.var) + par(fig=c(legend2.xpos, legend2.xpos + legend2.width, 0.719 + 0.001, 0.744), new=TRUE) + ColorBar3(my.brks.var, cols=my.cols.var[2:(length(my.cols.var)-1)], vert=FALSE, cex=legend2.cex, subset=my.subset2) + mtext(side=4,paste0(" ",var.unit[var.num]), cex=legend2.cex) + par(fig=c(legend2.xpos, legend2.xpos + legend2.width, 0.485, 0.508), new=TRUE) + ColorBar3(my.brks.var, cols=my.cols.var[2:(length(my.cols.var)-1)], vert=FALSE, cex=legend2.cex, subset=my.subset2) + mtext(side=4,paste0(" ",var.unit[var.num]), cex=legend2.cex) + par(fig=c(legend2.xpos, legend2.xpos + legend2.width, 0.250, 0.273), new=TRUE) + ColorBar3(my.brks.var, cols=my.cols.var[2:(length(my.cols.var)-1)], vert=FALSE, cex=legend2.cex, subset=my.subset2) + mtext(side=4,paste0(" ",var.unit[var.num]), cex=legend2.cex) + par(fig=c(legend2.xpos, legend2.xpos + legend2.width, 0.015, 0.038), new=TRUE) + ColorBar3(my.brks.var, cols=my.cols.var[2:(length(my.cols.var)-1)], vert=FALSE, cex=legend2.cex, subset=my.subset2) + mtext(side=4,paste0(" ",var.unit[var.num]), cex=legend2.cex) + + } + + # Frequency plots: + barplot.xpos <- 0.75 + barplot.width <- 0.25 + + par(fig=c(barplot.xpos, barplot.xpos + barplot.width, 0.745, 0.915), new=TRUE) + if(fields.name == rean.name) barplot.freq(100*fre1.NA, year.start, year.end, cex.y=2, cex.x=2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0)) + if(fields.name == forecast.name) barplot.freq.sim2(100*fre1.NA, 100*freMax1.NA, 100*freMin1.NA, 100*freObs1, year.start, year.end, cex.y=2, cex.x=2, freq.min=-2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0), col.line="gray20", cex.r=3, cex.mean=2, cex.obs=2) + + par(fig=c(barplot.xpos, barplot.xpos + barplot.width,0.510,0.680), new=TRUE) + if(fields.name == rean.name) barplot.freq(100*fre2.NA, year.start, year.end, cex.y=2, cex.x=2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0)) + if(fields.name == forecast.name) barplot.freq.sim2(100*fre2.NA, 100*freMax2.NA, 100*freMin2.NA, 100*freObs2, year.start, year.end, cex.y=2, cex.x=2, freq.min=-2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0), col.line="gray20", cex.r=3, cex.mean=2, cex.obs=2) + + par(fig=c(barplot.xpos, barplot.xpos + barplot.width,0.275,0.445), new=TRUE) + if(fields.name == rean.name) barplot.freq(100*fre3.NA, year.start, year.end, cex.y=2, cex.x=2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0)) + if(fields.name == forecast.name) barplot.freq.sim2(100*fre3.NA, 100*freMax3.NA, 100*freMin3.NA, 100*freObs3, year.start, year.end, cex.y=2, cex.x=2, freq.min=-2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0), col.line="gray20", cex.r=3, cex.mean=2, cex.obs=2) + + par(fig=c(barplot.xpos, barplot.xpos + barplot.width,0.040,0.210), new=TRUE) + if(fields.name == rean.name) barplot.freq(100*fre4.NA, year.start, year.end, cex.y=2, cex.x=2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0)) + if(fields.name == forecast.name) barplot.freq.sim2(100*fre4.NA, 100*freMax4.NA, 100*freMin4.NA, 100*freObs4, year.start, year.end, cex.y=2, cex.x=2, freq.min=-2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0), col.line="gray20", cex.r=3, cex.mean=2, cex.obs=2) + + # Title Frequency maps: + title3.xpos <- 0.75 + title3.width <-0.25 + par(fig=c(title3.xpos, title3.xpos + title3.width, 0.928, 0.933), new=TRUE) + mtext(paste0(regime1.name, " Frequency"), font=2, cex=4) # paste0 doesn't work inside an expression! + par(fig=c(title3.xpos, title3.xpos + title3.width, 0.693, 0.698), new=TRUE) + mtext(paste0(regime2.name, " Frequency"), font=2, cex=4) + par(fig=c(title3.xpos, title3.xpos + title3.width, 0.458, 0.463), new=TRUE) + mtext(paste0(regime3.name, " Frequency"), font=2, cex=4) + par(fig=c(title3.xpos, title3.xpos + title3.width, 0.223, 0.228), new=TRUE) + mtext(paste0(regime4.name, " Frequency"), font=2, cex=4) + + # % on Frequency plots: + symbol.xpos <- 0.735 + symbol.width <- 0.01 + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.91, 0.92), new=TRUE) + mtext("%", cex=3.3) + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.68, 0.69), new=TRUE) + mtext("%", cex=3.3) + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.44, 0.45), new=TRUE) + mtext("%", cex=3.3) + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.21, 0.22), new=TRUE) + mtext("%", cex=3.3) + + # mean frequency on Frequency plots: + if(mean.freq == TRUE){ + symbol.xpos <- 0.83 + symbol.width <- 0.1 + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.908, 0.918), new=TRUE) + mtext(bquote(bar(nu) == .(paste0(round(mean(100*fre1.NA),1),"%"))),cex=3.5) + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.673, 0.683), new=TRUE) + mtext(bquote(bar(nu) == .(paste0(round(mean(100*fre2.NA),1),"%"))),cex=3.5) + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.44, 0.45), new=TRUE) + mtext(bquote(bar(nu) == .(paste0(round(mean(100*fre3.NA),1),"%"))),cex=3.5) + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.203, 0.213), new=TRUE) + mtext(bquote(bar(nu) == .(paste0(round(mean(100*fre4.NA),1),"%"))),cex=3.5) + } + + + if(!as.pdf) dev.off() # for saving 4 png + + # same image formatted for the catalogue: + fileoutput2 <- paste0(rean.dir,"/",fields.name,"_",my.period[p],"_",var.name[var.num],"_edpr_fig2cat.png") + + system(paste0("~/scripts/fig2catalog.sh -s 0.8 -r 150 -t '",rean.name," / 10m wind speed and sea level pressure / Monthly anomalies and frequencies \n",month.name[p]," / ",year.start,"-",year.end,"' -c 'Region: North Atlantic (27°N-81°N, 85.5°W-45°E)\nReference dataset: ",rean.name," reanalysis' ", fileoutput," ", fileoutput2)) + + + + } # close if on: composition == 'edpr' + + + # Visualize the composition with the regime anomalies for a selected forecasted month: + if(composition == "psl"){ + # Centroid maps: + n.map <- n.map + 1 # n.maps starts from 0 + map.width <- 0.115 + map.xpos <- 0.06 + map.width * (n.map - 1) + map.ypos <- 0.08 + map.height <- 0.19 + map.ysep <- 0.015 + + par(fig=c(map.xpos, map.xpos + map.width, map.ypos + 3*map.height + 3*map.ysep, map.ypos + 4*map.height + 3*map.ysep), new=TRUE) + PlotEquiMap2(rescale(map1,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map1), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, xlabel.dist=1.5, contours.lty="F1FF1F") + par(fig=c(map.xpos, map.xpos + map.width, map.ypos + 2*map.height + 2*map.ysep, map.ypos + 3*map.height + 2*map.ysep), new=TRUE) + PlotEquiMap2(rescale(map2,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map2), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F") + par(fig=c(map.xpos, map.xpos + map.width, map.ypos + map.height + map.ysep, map.ypos + 2*map.height + map.ysep), new=TRUE) + PlotEquiMap2(rescale(map3,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map3), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F") + par(fig=c(map.xpos, map.xpos + map.width, map.ypos, map.ypos + map.height), new=TRUE) + PlotEquiMap2(rescale(map4,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map4), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F") + + # Sheet subtitles: + par(fig=c(map.xpos, map.xpos + map.width, 0.86, 0.90), new=TRUE) + if(n.map < 8) { + mtext(paste0(my.month.short[p], " --> ", my.month.short[forecast.month]), cex=5.5, font=2) + } else{ + mtext(paste0(rean.name," ", my.month.short[forecast.month]), cex=5.5, font=2) + } + + } # close if on composition == 'psl' + + + if(composition == "psl.rean" || composition == "psl.rean.unordered"){ + ## matrix correlation with DJF regimes: + #cluster1.monthly <- pslwr1mean; cluster2.monthly <- pslwr2mean; cluster3.monthly <- pslwr3mean; cluster4.monthly <- pslwr4mean + #cluster1.name.monthly <- cluster1.name; cluster2.name.monthly <- cluster2.name; cluster3.name.monthly <- cluster3.name; cluster4.name.monthly <- cluster4.name + assign(paste0("clusterMax", which(orden == cluster1.name), ".monthly"), pslwr1mean) + assign(paste0("clusterMax", which(orden == cluster2.name), ".monthly"), pslwr2mean) + assign(paste0("clusterMax", which(orden == cluster3.name), ".monthly"), pslwr3mean) + assign(paste0("clusterMax", which(orden == cluster4.name), ".monthly"), pslwr4mean) + + ## cluster.name.monthly <- c(cluster1.name.monthly, cluster2.name.monthly, cluster3.name.monthly, cluster4.name.monthly) + ## max1 <- which(cluster.name.monthly == orden[1]) # get which is the monthly cluster with the highest explained variance (by default it is associated to NAO+) + ## max2 <- which(cluster.name.monthly == orden[2]) # get the monthly cluster with the second highest variance + ## max3 <- which(cluster.name.monthly == orden[3]) # ... + ## max4 <- which(cluster.name.monthly == orden[4]) # ... + + ## max.seq <- c(max1, max2, max3, max4) + + ## Load DJF data: + rean.dir.DJF <- "/scratch/Earth/ncortesi/RESILIENCE/Regimes/53_JRA55_seasonal_1981-2016_LOESS_filter_lat_corr" + + load(file=paste0(rean.dir.DJF,"/",rean.name,"_",my.period[13],"_","psl",".RData")) # Load mean slp DJF data from the same reanalysis + load(paste0(rean.dir.DJF,"/",rean.name,"_", my.period[13],"_","ClusterNames",".RData")) # load also reanalysis DJF regime names + + if(var.num != var.num.orig) var.num <- var.num.orig # ripristinate original values of this script (necessary after loading regime data) + if(fields.name != fields.name.orig) fields.name <- fields.name.orig # ripristinate original values of this script + if(work.dir != work.dir.orig) work.dir <- work.dir.orig # ripristinate original values of this script + if(p.orig != p) p <- p.orig + + cluster1 <- which(orden == cluster1.name) # clusters for DJF!!! + cluster2 <- which(orden == cluster2.name) + cluster3 <- which(orden == cluster3.name) + cluster4 <- which(orden == cluster4.name) + + assign(paste0("psl.ordered",cluster1), pslwr1mean) # psl for DJF + assign(paste0("psl.ordered",cluster2), pslwr2mean) + assign(paste0("psl.ordered",cluster3), pslwr3mean) + assign(paste0("psl.ordered",cluster4), pslwr4mean) + + cluster.corr <- array(NA, c(4,4), dimnames=list(c("cl1","cl2","cl3","cl4"), orden)) + cluster.corr[1,1] <- cor(as.vector(clusterMax1.monthly), as.vector(psl.ordered1)) + cluster.corr[1,2] <- cor(as.vector(clusterMax1.monthly), as.vector(psl.ordered2)) + cluster.corr[1,3] <- cor(as.vector(clusterMax1.monthly), as.vector(psl.ordered3)) + cluster.corr[1,4] <- cor(as.vector(clusterMax1.monthly), as.vector(psl.ordered4)) + cluster.corr[2,1] <- cor(as.vector(clusterMax2.monthly), as.vector(psl.ordered1)) + cluster.corr[2,2] <- cor(as.vector(clusterMax2.monthly), as.vector(psl.ordered2)) + cluster.corr[2,3] <- cor(as.vector(clusterMax2.monthly), as.vector(psl.ordered3)) + cluster.corr[2,4] <- cor(as.vector(clusterMax2.monthly), as.vector(psl.ordered4)) + cluster.corr[3,1] <- cor(as.vector(clusterMax3.monthly), as.vector(psl.ordered1)) + cluster.corr[3,2] <- cor(as.vector(clusterMax3.monthly), as.vector(psl.ordered2)) + cluster.corr[3,3] <- cor(as.vector(clusterMax3.monthly), as.vector(psl.ordered3)) + cluster.corr[3,4] <- cor(as.vector(clusterMax3.monthly), as.vector(psl.ordered4)) + cluster.corr[4,1] <- cor(as.vector(clusterMax4.monthly), as.vector(psl.ordered1)) + cluster.corr[4,2] <- cor(as.vector(clusterMax4.monthly), as.vector(psl.ordered2)) + cluster.corr[4,3] <- cor(as.vector(clusterMax4.monthly), as.vector(psl.ordered3)) + cluster.corr[4,4] <- cor(as.vector(clusterMax4.monthly), as.vector(psl.ordered4)) + + # Centroid maps: + n.map <- n.map + 1 # n.maps starts from 0 + map.width <- 0.08 + map.xpos <- map.width * (n.map - 1) + map.ypos <- 0.08 + map.height <- 0.19 + map.ysep <- 0.015 + print(paste0("map.xpos= ", map.xpos)) + + par(fig <- c(0, 1, 0, 1), new=TRUE) + # reset par to its default values, because drawing with PlotEquiMap() alters some par values: + if(n.map == 1) { op <- par(no.readonly = TRUE) } else { par(op) } + + text.cex <- 2 + text.ypos <- 1.03 + text.xmod <- 0.007 * (n.map - 1) + text.xpos <- map.xpos + text.xmod - 0.02 + text.width <- 0.015 + text(x=text.xpos - text.width, y=text.ypos - 0.02, labels="cl1", cex=text.cex) + text(x=text.xpos - text.width, y=text.ypos - 0.04, labels="cl2", cex=text.cex) + text(x=text.xpos - text.width, y=text.ypos - 0.06, labels="cl3", cex=text.cex) + text(x=text.xpos - text.width, y=text.ypos - 0.08, labels="cl4", cex=text.cex) + text(x=text.xpos + 0*text.width, y=text.ypos + 0.00, labels="NAO+", cex=text.cex) + text(x=text.xpos + 1*text.width, y=text.ypos + 0.00, labels="NAO-", cex=text.cex) + text(x=text.xpos + 2*text.width, y=text.ypos + 0.00, labels="BLO", cex=text.cex) + text(x=text.xpos + 3*text.width, y=text.ypos + 0.00, labels="ATL", cex=text.cex) + + text(x=text.xpos + 0*text.width, y=text.ypos - 0.02, labels=round(cluster.corr,2)[1,1], cex=text.cex) + text(x=text.xpos + 0*text.width, y=text.ypos - 0.04, labels=round(cluster.corr,2)[2,1], cex=text.cex) + text(x=text.xpos + 0*text.width, y=text.ypos - 0.06, labels=round(cluster.corr,2)[3,1], cex=text.cex) + text(x=text.xpos + 0*text.width, y=text.ypos - 0.08, labels=round(cluster.corr,2)[4,1], cex=text.cex) + text(x=text.xpos + 1*text.width, y=text.ypos - 0.02, labels=round(cluster.corr,2)[1,2], cex=text.cex) + text(x=text.xpos + 1*text.width, y=text.ypos - 0.04, labels=round(cluster.corr,2)[2,2], cex=text.cex) + text(x=text.xpos + 1*text.width, y=text.ypos - 0.06, labels=round(cluster.corr,2)[3,2], cex=text.cex) + text(x=text.xpos + 1*text.width, y=text.ypos - 0.08, labels=round(cluster.corr,2)[4,2], cex=text.cex) + text(x=text.xpos + 2*text.width, y=text.ypos - 0.02, labels=round(cluster.corr,2)[1,3], cex=text.cex) + text(x=text.xpos + 2*text.width, y=text.ypos - 0.04, labels=round(cluster.corr,2)[2,3], cex=text.cex) + text(x=text.xpos + 2*text.width, y=text.ypos - 0.06, labels=round(cluster.corr,2)[3,3], cex=text.cex) + text(x=text.xpos + 2*text.width, y=text.ypos - 0.08, labels=round(cluster.corr,2)[4,3], cex=text.cex) + text(x=text.xpos + 3*text.width, y=text.ypos - 0.02, labels=round(cluster.corr,2)[1,4], cex=text.cex) + text(x=text.xpos + 3*text.width, y=text.ypos - 0.04, labels=round(cluster.corr,2)[2,4], cex=text.cex) + text(x=text.xpos + 3*text.width, y=text.ypos - 0.06, labels=round(cluster.corr,2)[3,4], cex=text.cex) + text(x=text.xpos + 3*text.width, y=text.ypos - 0.08, labels=round(cluster.corr,2)[4,4], cex=text.cex) + + ## Centroid maps: + ## (note that mapX == clusterMaxX.monthly, X = 1, ..., 4 by default) + + par(fig=c(map.xpos, map.xpos + map.width, map.ypos + 3*map.height + 3*map.ysep, map.ypos + 4*map.height + 3*map.ysep), new=TRUE) + PlotEquiMap2(rescale(map1,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map1), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, xlabel.dist=1.5, contours.lty="F1FF1F") + par(fig=c(map.xpos, map.xpos + map.width, map.ypos + 2*map.height + 2*map.ysep, map.ypos + 3*map.height + 2*map.ysep), new=TRUE) + PlotEquiMap2(rescale(map2,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map2), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F") + par(fig=c(map.xpos, map.xpos + map.width, map.ypos + map.height + map.ysep, map.ypos + 2*map.height + map.ysep), new=TRUE) + PlotEquiMap2(rescale(map3,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map3), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F") + par(fig=c(map.xpos, map.xpos + map.width, map.ypos, map.ypos + map.height), new=TRUE) + PlotEquiMap2(rescale(map4,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map4), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F") + + } # close if on psl.rean or on psl.rean.unordered + + + # Visualize the composition if the impact maps for a selected forecasted month: + if(composition == "impact"){ + # Centroid maps: + n.map <- n.map + 1 # n.maps starts from 0 + map.width <- 0.115 + map.xpos <- 0.06 + map.width * (n.map - 1) + map.ypos <- 0.08 + map.height <- 0.19 + map.ysep <- 0.015 + + par(fig=c(map.xpos, map.xpos + map.width, map.ypos + 3*map.height + 3*map.ysep, map.ypos + 4*map.height + 3*map.ysep), new=TRUE) + #PlotEquiMap2(rescale(imp1[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=t(sig1[,EU] < 0.05), cex.lab=1.5) + PlotEquiMap2(rescale(imp1[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5) + + par(fig=c(map.xpos, map.xpos + map.width, map.ypos + 2*map.height + 2*map.ysep, map.ypos + 3*map.height + 2*map.ysep), new=TRUE) + #PlotEquiMap2(rescale(imp2[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=t(sig2[,EU] < 0.05), cex.lab=1.5) + PlotEquiMap2(rescale(imp2[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5) + + par(fig=c(map.xpos, map.xpos + map.width, map.ypos + map.height + map.ysep, map.ypos + 2*map.height + map.ysep), new=TRUE) + #PlotEquiMap2(rescale(imp3[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=t(sig3[,EU] < 0.05), cex.lab=1.5) + PlotEquiMap2(rescale(imp3[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5) + + par(fig=c(map.xpos, map.xpos + map.width, map.ypos, map.ypos + map.height), new=TRUE) + #PlotEquiMap2(rescale(imp4[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=t(sig4[,EU] < 0.05), cex.lab=1.5) + PlotEquiMap2(rescale(imp4[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5) + + + # Sheet subtitles: + par(fig=c(map.xpos, map.xpos + map.width, 0.86, 0.90), new=TRUE) + if(n.map < 8) { + mtext(paste0(my.month.short[p], " --> ", my.month.short[forecast.month]), cex=5.5, font=2) + } else{ + mtext(paste0(rean.name," ", my.month.short[forecast.month]), cex=5.5, font=2) + } + + } # close if on composition == 'impact' + + # Visualize the 2x2 composition of the four impact maps for a selected reanalysis or forecasted month: + if(composition == "single.impact"){ + + png(filename=paste0(rean.dir,"/",fields.name,"_",my.period[p],"_",var.name[var.num],"_impact_composition.png"),width=2000,height=2000) + + plot.new() + + par(fig=c(0, 0.5, 0.95, 0.988), new=TRUE) + mtext("NAO+",cex=5) + par(fig=c(0, 0.5, 0.52, 0.95), new=TRUE) + PlotEquiMap(rescale(imp1[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=sig1[,EU] < 0.05, dot_size=2, axes_label_scale=2.5) + + par(fig=c(0.5, 1, 0.93, 0.96), new=TRUE) + mtext("NAO-",cex=5) + par(fig=c(0.5, 1, 0.52, 0.95), new=TRUE) + PlotEquiMap(rescale(imp2[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=sig2[,EU] < 0.05, dot_size=2, axes_label_scale=2.5) + + par(fig=c(0, 0.5, 0.44, 0.47), new=TRUE) + mtext("Blocking",cex=5) + par(fig=c(0, 0.5, 0.08, 0.46), new=TRUE) + PlotEquiMap(rescale(imp3[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=sig3[,EU] < 0.05, dot_size=2, axes_label_scale=2.5) + + par(fig=c(0.5, 1, 0.44, 0.47), new=TRUE) + mtext("Atlantic Ridge",cex=5) + par(fig=c(0.5, 1, 0.08, 0.46), new=TRUE) + PlotEquiMap(rescale(imp4[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=sig4[,EU] < 0.05, dot_size=2, axes_label_scale=2.5) + + par(fig=c(0.03, 0.96, 0.02, 0.08), new=TRUE) + ColorBar2(my.brks.var, cols=my.cols.var, vert=FALSE, cex=3, my.ticks=-0.5 + 1:length(my.brks.var), my.labels=my.brks.var, label.dist=3) + par(fig=c(0.965, 0.99, 0, 0.026), new=TRUE) + mtext("m/s",cex=3) + + dev.off() + + # format it manually from the bash shell (you must run it from your workstation until the identity command of ImageMagick will be loaded un the cluster): + #sh ~/scripts/fig2catalog.sh -x 20 -t 'NCEP / 10-m wind speed / Regime impact \nOctober / 1981-2016' -c 'Region: North Atlantic (27°N-81°N, 85.5°W-45°E)\nReference dataset: NCEP/NCAR reanalysis' NCEP_October_sfcWind_impact_composition.png NCEP_October_sfcWind_impact_composition_catalogue.png + + + ### to plot the impact map of all regimes on a particular month: + #imp.oct <- (imp1*3.2 + imp2*38.7 + imp3*51.6 + imp4*6.4)/100 + year.test <- 2016 + pos.year.test <- year.test - year.start +1 + imp.test <- imp1*fre1.NA[pos.year.test] + imp2*fre2.NA[pos.year.test] + imp3*fre3.NA[pos.year.test] + imp4*fre4.NA[pos.year.test] + #imp.test <- imp1*fre1.NA[pos.year.test] + imp3*(fre3.NA[pos.year.test]+0.032) + imp4*(fre4.NA[pos.year.test]+0.032) + par(fig=c(0, 1, 0.05, 1), new=TRUE) + PlotEquiMap2(rescale(imp.test[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5) + + # vector with the frequency of the WRs in the chosen month and year: + wt.test.freq <- c(fre1.NA[pos.year.test],fre2.NA[pos.year.test],fre3.NA[pos.year.test],fre4.NA[pos.year.test]) + + ## # or save them as individual maps: + ## png(filename=paste0(rean.dir,"/",fields.name,"_",my.period[p],"_",var.name[var.num],"_impact_NAO+.png"),width=3000,height=3700) + ## PlotEquiMap2(rescale(imp1[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=t(sig1[,EU] < 0.05), cex.lab=1.5) + ## dev.off() + + ## png(filename=paste0(rean.dir,"/",fields.name,"_",my.period[p],"_",var.name[var.num],"_impact_NAO-.png"),width=3000,height=3700) + ## PlotEquiMap2(rescale(imp2[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=t(sig2[,EU] < 0.05), cex.lab=1.5) + ## dev.off() + + ## png(filename=paste0(rean.dir,"/",fields.name,"_",my.period[p],"_",var.name[var.num],"_impact_Blocking.png"),width=3000,height=3700) + ## PlotEquiMap2(rescale(imp3[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=t(sig3[,EU] < 0.05), cex.lab=1.5) + ## dev.off() + + ## png(filename=paste0(rean.dir,"/",fields.name,"_",my.period[p],"_",var.name[var.num],"_impact_Atl_Ridge.png"),width=3000,height=3700) + ## PlotEquiMap2(rescale(imp4[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=t(sig4[,EU] < 0.05), cex.lab=1.5) + ## dev.off() + } + + # Visualize the composition if the impact maps for a selected forecasted month: + if(composition == "single.psl"){ + png(filename=paste0(rean.dir,"/",fields.name,"_",my.period[p],"_",var.name[var.num],"_pattern_cluster1.png"),width=2000,height=1400, res=300) + PlotEquiMap2(rescale(map1,my.brks[1],tail(my.brks,1)), lon, lat,filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1, contours=t(map1), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=0.5, xlabel.dist=0, contours.lty="F1FF1F", toptitle=paste0(my.period[p]," WithinSS=", round(withinss1/1000000,2))) + dev.off() + + png(filename=paste0(rean.dir,"/",fields.name,"_",my.period[p],"_",var.name[var.num],"_pattern_cluster2.png"),width=2000,height=1400, res=300) + PlotEquiMap2(rescale(map2,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1, contours=t(map2), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=0.5, xlabel.dist=0, contours.lty="F1FF1F", toptitle=paste0(my.period[p]," WithinSS=", round(withinss2/1000000,2))) + dev.off() + + png(filename=paste0(rean.dir,"/",fields.name,"_",my.period[p],"_",var.name[var.num],"_pattern_cluster3.png"),width=2000,height=1400, res=300) + PlotEquiMap2(rescale(map3,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1, contours=t(map3), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=0.5, xlabel.dist=0, contours.lty="F1FF1F", toptitle=paste0(my.period[p]," WithinSS=", round(withinss3/1000000,2))) + dev.off() + + png(filename=paste0(rean.dir,"/",fields.name,"_",my.period[p],"_",var.name[var.num],"_pattern_cluster4.png"),width=2000,height=1400, res=300) + PlotEquiMap2(rescale(map4,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1, contours=t(map4), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=0.5, xlabel.dist=0, contours.lty="F1FF1F", toptitle=paste0(my.period[p]," WithinSS=", round(withinss4/1000000,2))) + dev.off() + + # Legend centroid maps: + #my.subset <- match(values.to.plot, my.brks) + #ColorBar3(my.brks, cols=my.cols, vert=FALSE, cex=legend1.cex, subset=my.subset) + #mtext(side=4," hPa", cex=legend1.cex) + + } + + # Visualize the composition with the interannual frequencies for a selected forecasted month: + if(composition == "fre"){ + # Centroid maps: + n.map <- n.map + 1 # n.maps starts from 0 + map.width <- 0.115 + map.xpos <- 0.06 + map.width * (n.map - 1) + map.ypos <- 0.08 + map.height <- 0.19 + map.ysep <- 0.015 + + par(fig=c(map.xpos, map.xpos + map.width, map.ypos + 3*map.height + 3*map.ysep, map.ypos + 4*map.height + 3*map.ysep), new=TRUE) + if(n.map < 8) barplot.freq.sim2(100*fre1.NA, 100*freMax1.NA, 100*freMin1.NA, 100*freObs1, year.start, year.end, cex.y=2, cex.x=2, freq.min=-2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0), col.line="gray20", cex.r=3, cex.mean=2, cex.obs=2) + if(n.map == 8) barplot.freq(100*fre1.NA, year.start, year.end, cex.y=2, cex.x=2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0)) + + par(fig=c(map.xpos, map.xpos + map.width, map.ypos + 2*map.height + 2*map.ysep, map.ypos + 3*map.height + 2*map.ysep), new=TRUE) + if(n.map < 8) if(fields.name == forecast.name) barplot.freq.sim2(100*fre2.NA, 100*freMax2.NA, 100*freMin2.NA, 100*freObs2, year.start, year.end, cex.y=2, cex.x=2, freq.min=-2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0), col.line="gray20", cex.r=3, cex.mean=2, cex.obs=2) + if(n.map == 8) barplot.freq(100*fre2.NA, year.start, year.end, cex.y=2, cex.x=2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0)) + + par(fig=c(map.xpos, map.xpos + map.width, map.ypos + map.height + map.ysep, map.ypos + 2*map.height + map.ysep), new=TRUE) + if(n.map < 8) if(fields.name == forecast.name) barplot.freq.sim2(100*fre3.NA, 100*freMax3.NA, 100*freMin3.NA, 100*freObs3, year.start, year.end, cex.y=2, cex.x=2, freq.min=-2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0), col.line="gray20", cex.r=3, cex.mean=2, cex.obs=2) + if(n.map == 8) barplot.freq(100*fre3.NA, year.start, year.end, cex.y=2, cex.x=2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0)) + + par(fig=c(map.xpos, map.xpos + map.width, map.ypos, map.ypos + map.height), new=TRUE) + if(n.map < 8) if(fields.name == forecast.name) barplot.freq.sim2(100*fre4.NA, 100*freMax4.NA, 100*freMin4.NA, 100*freObs4, year.start, year.end, cex.y=2, cex.x=2, freq.min=-2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0), col.line="gray20", cex.r=3, cex.mean=2, cex.obs=2) + if(n.map == 8) barplot.freq(100*fre4.NA, year.start, year.end, cex.y=2, cex.x=2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0)) + + # Sheet subtitles: + par(fig=c(map.xpos, map.xpos + map.width, 0.86, 0.90), new=TRUE) + if(n.map < 8) { + mtext(paste0(my.month.short[p], " --> ", my.month.short[forecast.month]), cex=5.5, font=2) + } else{ + mtext(paste0(rean.name," ", my.month.short[forecast.month]), cex=5.5, font=2) + } + + # % on Frequency plots: + par(fig=c(map.xpos - 0.006, map.xpos, map.ypos + 3.9*map.height + 3*map.ysep, map.ypos + 4*map.height + 3*map.ysep), new=TRUE) + mtext("%", cex=3.3) + par(fig=c(map.xpos - 0.006, map.xpos, map.ypos + 2.9*map.height + 2*map.ysep, map.ypos + 3*map.height + 2*map.ysep), new=TRUE) + mtext("%", cex=3.3) + par(fig=c(map.xpos - 0.006, map.xpos, map.ypos + 1.9*map.height + 1*map.ysep, map.ypos + 2*map.height + 1*map.ysep), new=TRUE) + mtext("%", cex=3.3) + par(fig=c(map.xpos - 0.006, map.xpos, map.ypos + 0.9*map.height + 0*map.ysep, map.ypos + 1*map.height + 0*map.ysep), new=TRUE) + mtext("%", cex=3.3) + + # mean frequency on Frequency plots: + par(fig=c(map.xpos + 0.02, map.xpos + 0.04, map.ypos + 3*map.height + 3*map.ysep, map.ypos + 3.1*map.height + 3*map.ysep), new=TRUE) + mtext(bquote(bar(nu) == .(paste0(round(mean(100*fre1.NA),1),"%"))),cex=3.5) + par(fig=c(map.xpos + 0.02, map.xpos + 0.04, map.ypos + 2*map.height + 2*map.ysep, map.ypos + 2.1*map.height + 2*map.ysep), new=TRUE) + mtext(bquote(bar(nu) == .(paste0(round(mean(100*fre2.NA),1),"%"))),cex=3.5) + par(fig=c(map.xpos + 0.02, map.xpos + 0.04, map.ypos + 1*map.height + 1*map.ysep, map.ypos + 1.1*map.height + 1*map.ysep), new=TRUE) + mtext(bquote(bar(nu) == .(paste0(round(mean(100*fre3.NA),1),"%"))),cex=3.5) + par(fig=c(map.xpos + 0.02, map.xpos + 0.04, map.ypos + 0*map.height + 0*map.ysep, map.ypos + 0.1*map.height + 0*map.ysep), new=TRUE) + mtext(bquote(bar(nu) == .(paste0(round(mean(100*fre4.NA),1),"%"))),cex=3.5) + + # add corr between obs.time series and ensemble mean time series on Frequency plots: + if(n.map < 8){ + par(fig=c(map.xpos + map.width - 0.04, map.xpos + map.width - 0.02, map.ypos + 3*map.height + 3*map.ysep, map.ypos + 3.1*map.height + 3*map.ysep), new=TRUE) + mtext(paste0("r= ",sp.cor1), cex=3.5) + par(fig=c(map.xpos + map.width - 0.04, map.xpos + map.width - 0.02, map.ypos + 2*map.height + 2*map.ysep, map.ypos + 2.1*map.height + 2*map.ysep), new=TRUE) + mtext(paste0("r= ",sp.cor2), cex=3.5) + par(fig=c(map.xpos + map.width - 0.04, map.xpos + map.width - 0.02, map.ypos + 1*map.height + 1*map.ysep, map.ypos + 1.1*map.height + 1*map.ysep), new=TRUE) + mtext(paste0("r= ",sp.cor3), cex=3.5) + par(fig=c(map.xpos + map.width - 0.04, map.xpos + map.width - 0.02, map.ypos + 0*map.height + 0*map.ysep, map.ypos + 0.1*map.height + 0*map.ysep), new=TRUE) + mtext(paste0("r= ",sp.cor4), cex=3.5) + } + } # close if on composition == 'fre' + + # save indicators for each startdate and forecast time: + # (map1, map2, map3, map4, fre1, fre2, etc. already refer to the regimes in the same order as listed in the 'orden' vector) + if(fields.name == forecast.name) { + if (composition != "impact") { + save(orden, map1, map2, map3, map4, fre1.NA, fre2.NA, fre3.NA, fre4.NA, freObs1, freObs2, freObs3, freObs4, sp.cor1, sp.cor2, sp.cor3, sp.cor4, persist1, persist2, persist3, persist4, persistObs1, persistObs2, persistObs3, persistObs4, spat.cor1, spat.cor2, spat.cor3, spat.cor4, sd.freq.sim1, sd.freq.sim2, sd.freq.sim3, sd.freq.sim4, sd.freq.obs1, sd.freq.obs2, sd.freq.obs3, sd.freq.obs4, transSim1, transSim2, transSim3, transSim4, transObs1, transObs2, transObs3, transObs4, file=paste0(work.dir,"/",fields.name,"_", my.period[p],"_leadmonth_",lead.month,"_","ClusterNames",".RData")) + } else { # also save impX objects + save(orden, map1, map2, map3, map4, fre1.NA, fre2.NA, fre3.NA, fre4.NA, freObs1, freObs2, freObs3, freObs4, sp.cor1, sp.cor2, sp.cor3, sp.cor4, persist1, persist2, persist3, persist4, persistObs1, persistObs2, persistObs3, persistObs4, spat.cor1, spat.cor2, spat.cor3, spat.cor4, sd.freq.sim1, sd.freq.sim2, sd.freq.sim3, sd.freq.sim4, sd.freq.obs1, sd.freq.obs2, sd.freq.obs3, sd.freq.obs4, transSim1, transSim2, transSim3, transSim4, transObs1, transObs2, transObs3, transObs4, imp1, imp2, imp3, imp4,cor.imp1,cor.imp2,cor.imp3,cor.imp4, file=paste0(work.dir,"/",fields.name,"_", my.period[p],"_leadmonth_",lead.month,"_","ClusterNames",".RData")) + } + } + + } # close 'p' on 'period' + + if((composition == 'simple' || composition == 'edpr' ) && as.pdf) dev.off() # for saving a single pdf with all months/seasons + + } # close 'lead.month' on 'lead.months' + + if(composition == "psl" || composition == "fre" || composition == "impact"){ + # Regime's names: + title1.xpos <- 0.01 + title1.width <- 0.04 + par(fig=c(title1.xpos, title1.xpos + title1.width, 0.7905, 0.7955), new=TRUE) + mtext(paste0(regime1.name), font=2, cex=5) + par(fig=c(title1.xpos, title1.xpos + title1.width, 0.5855, 0.5905), new=TRUE) + mtext(paste0(regime2.name), font=2, cex=5) + par(fig=c(title1.xpos, title1.xpos + title1.width, 0.3805, 0.3955), new=TRUE) + mtext(paste0(regime3.name), font=2, cex=5) + par(fig=c(title1.xpos, title1.xpos + title1.width, 0.1755, 0.1805), new=TRUE) + mtext(paste0(regime4.name), font=2, cex=5) + + if(composition == "psl") { + # Color legend: + legend1.xpos <- 0.10 + legend1.width <- 0.80 + legend1.cex <- 4 + + par(fig=c(legend1.xpos, legend1.xpos + legend1.width, 0, 0.065), new=TRUE) # syntax fig=c(xmin, xmax, ymin, ymax) + ColorBar2(brks=my.brks, cols=my.cols, vert=FALSE, cex=legend1.cex, label.dist=2, my.ticks=-0.5 + 1:length(my.brks), my.labels=my.brks) + par(fig=c(legend1.xpos + legend1.width - 0.02, legend1.xpos + legend1.width + 0.05, 0, 0.02), new=TRUE) # syntax fig=c(xmin, xmax, ymin, ymax) + mtext("hPa", cex=legend1.cex*1.3) + } + + if(composition == "impact") { + # Color legend: + legend1.xpos <- 0.10 + legend1.width <- 0.80 + legend1.cex <- 4 + + #my.subset2 <- match(values.to.plot2, my.brks.var) + par(fig=c(legend1.xpos, legend1.xpos + legend1.width, 0, 0.065), new=TRUE) + ColorBar2(brks=my.brks.var, cols=my.cols.var, vert=FALSE, cex=legend1.cex, label.dist=2, my.ticks=-0.5 + 1:length(my.brks.var), my.labels=my.brks.var) + par(fig=c(legend1.xpos + legend1.width - 0.02, legend1.xpos + legend1.width + 0.05, 0, 0.02), new=TRUE) # syntax fig=c(xmin, xmax, ymin, ymax) + #ColorBar2(brks=my.brks.var, cols=my.cols.var, vert=FALSE, cex=legend2.cex, subset=my.subset2) + mtext(var.unit[var.num], cex=legend1.cex*1.3) + + } + + dev.off() + + } # close if on composition + + + if(composition == "psl.rean" || composition == "psl.rean.unordered") dev.off() + + print("Finished!") +} # close if on composition != "summary" + + + +#} # close for on forecasts.month + + + +if(composition == "taylor"){ + library("plotrix") + + fields.name="ERA-Interim" + + # choose a reference season from 13 (winter) to 16 (Autumn): + season.ref <- 13 + + # set the first WR classification: + rean.dir <- "/scratch/Earth/ncortesi/RESILIENCE/Regimes/41_ERA-Interim_monthly_1981-2015_LOESS_filter" + + # load data of the reference season of the first classification (this line should be modified to load the chosen season): + #load("/scratch/Earth/ncortesi/RESILIENCE/Regimes/18) as 12) but with no lat correction/ERA-Interim_Winter_psl.RData") # load pslwrXmean data + #load("/scratch/Earth/ncortesi/RESILIENCE/Regimes/18) as 12) but with no lat correction/ERA-Interim_Winter_ClusterNames.RData") # load clusterX.name.period + load("/scratch/Earth/ncortesi/RESILIENCE/Regimes/46_as_18_but_with_no_lat_correction_and_with_LOESS/ERA-Interim_Winter_psl.RData") # load pslwrXmean data + load("/scratch/Earth/ncortesi/RESILIENCE/Regimes/46_as_18_but_with_no_lat_correction_and_with_LOESS/ERA-Interim_Winter_ClusterNames.RData") # load clusterX.name.period + + if(var.num != var.num.orig) var.num <- var.num.orig # ripristinate original values of this script (necessary after loading regime data) + if(fields.name != fields.name.orig) fields.name <- fields.name.orig # ripristinate original values of this script + + cluster1.ref <- which(orden == cluster1.name) + cluster2.ref <- which(orden == cluster2.name) + cluster3.ref <- which(orden == cluster3.name) + cluster4.ref <- which(orden == cluster4.name) + + #cluster1.ref <- cluster1.name.period[13] + #cluster2.ref <- cluster2.name.period[13] + #cluster3.ref <- cluster3.name.period[13] + #cluster4.ref <- cluster4.name.period[13] + + cluster1.ref <- 3 # bypass the previous values because for dataset num.46 the blocking and atlantic regimes were saved swapped! + cluster2.ref <- 2 + cluster3.ref <- 1 + cluster4.ref <- 4 + + assign(paste0("map.ref",cluster1.ref), pslwr1mean) # NAO+ + assign(paste0("map.ref",cluster2.ref), pslwr2mean) # NAO- + assign(paste0("map.ref",cluster3.ref), pslwr3mean) # Blocking + assign(paste0("map.ref",cluster4.ref), pslwr4mean) # Atl.ridge + + # set a second WR classification (i.e: with running cluster) to contrast with the new one: + #rean2.dir <- "/scratch/Earth/ncortesi/RESILIENCE/Regimes/41_ERA-Interim_monthly_1981-2015_LOESS_filter" + rean2.dir <- "/scratch/Earth/ncortesi/RESILIENCE/Regimes/44_as_43_but_with_no_lat_correction" + rean2.name <- "ERA-Interim" + + # load data of the reference season of the second classification (this line should be modified to load the chosen season): + load("/scratch/Earth/ncortesi/RESILIENCE/Regimes/46_as_18_but_with_no_lat_correction_and_with_LOESS/ERA-Interim_Winter_psl.RData") # load pslwrXmean data + load("/scratch/Earth/ncortesi/RESILIENCE/Regimes/46_as_18_but_with_no_lat_correction_and_with_LOESS/ERA-Interim_Winter_ClusterNames.RData") # load clusterX.name.period + + if(var.num != var.num.orig) var.num <- var.num.orig # ripristinate original values of this script (necessary after loading regime data) + if(fields.name != fields.name.orig) fields.name <- fields.name.orig # ripristinate original values of this script + + #cluster1.name.ref <- cluster1.name.period[season.ref] + #cluster2.name.ref <- cluster2.name.period[season.ref] + #cluster3.name.ref <- cluster3.name.period[season.ref] + #cluster4.name.ref <- cluster4.name.period[season.ref] + + cluster1.ref <- which(orden == cluster1.name) + cluster2.ref <- which(orden == cluster2.name) + cluster3.ref <- which(orden == cluster3.name) + cluster4.ref <- which(orden == cluster4.name) + + cluster1.ref <- 3 # bypass the previous values because for dataset num.46 the blocking and atlantic regimes were saved swapped! + cluster2.ref <- 2 + cluster3.ref <- 1 + cluster4.ref <- 4 + + assign(paste0("map.old.ref",cluster1.ref), pslwr1mean) # NAO+ + assign(paste0("map.old.ref",cluster2.ref), pslwr2mean) # NAO- + assign(paste0("map.old.ref",cluster3.ref), pslwr3mean) # Blocking + assign(paste0("map.old.ref",cluster4.ref), pslwr4mean) # Atl.ridge + + + # breaks and colors of the geopotential fields: + if(psl == "g500"){ + my.brks <- c(-300,seq(-200,200,10),300) # % Mean anomaly of a WR + my.brks2 <- c(-300,seq(-200,200,10),300) # % Mean anomaly of a WR for the contour lines + values.to.plot <- c(-200, -100, 0, 100, 200) # values we want to show in the labels + } + + if(psl == "psl"){ + my.brks <- c(seq(-19,-1,2),0,seq(1,19,2)) # % Mean anomaly of a WR + my.brks2 <- my.brks #c(seq(-20,20,2)) # % Mean anomaly of a WR for the contour lines + values.to.plot <- my.brks #c(-17,-15,-13,-11,-9,-7,-5,-3,-1,0,1,3,5,7,9,11,13,15,17) + } + + my.cols <- colorRampPalette(rev(brewer.pal(11,"RdBu")))(length(my.brks)-1) # blue--white--red colors + my.cols[floor(length(my.cols)/2)] <- my.cols[floor(length(my.cols)/2)+1] <- "white" + + # plot the composition with the regime anomalies of each monthly regimes: + png(filename=paste0(rean.dir,"/",rean.name,"_taylor_source",".png"),width=6000,height=2000) + + plot.new() + + # Sheet title: + par(fig=c(0, 1, 0.94, 0.98), new=TRUE) + mtext(paste0(fields.name, " observed monthly regime anomalies"), cex=5.5, font=2) + + n.map <- 0 + for(p in c(9:12,1:8)){ + p.orig <- p + + load(file=paste0(rean.dir,"/",fields.name,"_",my.period[p],"_","psl",".RData")) # load cluster names and summarized data + load(file=paste0(rean.dir,"/",fields.name,"_",my.period[p],"_","ClusterNames",".RData")) # load cluster names and summarized data + + if(var.num != var.num.orig) var.num <- var.num.orig # ripristinate original values of this script (necessary after loading regime data) + if(fields.name != fields.name.orig) fields.name <- fields.name.orig # ripristinate original values of this script + if(p != p.orig) p <- p.orig + + cluster1 <- which(orden == cluster1.name) + cluster2 <- which(orden == cluster2.name) + cluster3 <- which(orden == cluster3.name) + cluster4 <- which(orden == cluster4.name) + + assign(paste0("map",cluster1), pslwr1mean) # NAO+ + assign(paste0("map",cluster2), pslwr2mean) # NAO- + assign(paste0("map",cluster3), pslwr3mean) # Blocking + assign(paste0("map",cluster4), pslwr4mean) # Atl.ridge + + n.map <- n.map + 1 # n.maps starts from 0 + map.width <- 0.07 + map.xpos <- 0.06 + map.width * (n.map - 1) + map.ypos <- 0.08 + map.height <- 0.19 + map.ysep <- 0.015 + + par(fig=c(map.xpos, map.xpos + map.width, map.ypos + 3*map.height + 3*map.ysep, map.ypos + 4*map.height + 3*map.ysep), new=TRUE) + PlotEquiMap2(rescale(map1,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map1), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, xlabel.dist=1.5, contours.lty="F1FF1F") + par(fig=c(map.xpos, map.xpos + map.width, map.ypos + 2*map.height + 2*map.ysep, map.ypos + 3*map.height + 2*map.ysep), new=TRUE) + PlotEquiMap2(rescale(map2,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map2), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F") + par(fig=c(map.xpos, map.xpos + map.width, map.ypos + map.height + map.ysep, map.ypos + 2*map.height + map.ysep), new=TRUE) + PlotEquiMap2(rescale(map3,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map3), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F") + par(fig=c(map.xpos, map.xpos + map.width, map.ypos, map.ypos + map.height), new=TRUE) + PlotEquiMap2(rescale(map4,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map4), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F") + + # Sheet subtitles: + par(fig=c(map.xpos, map.xpos + map.width, 0.86, 0.90), new=TRUE) + mtext(my.month.short[p], cex=5.5, font=2) + + } + + # draw the last map to the right of the composition, that with the seasonal anomalies: + n.map <- n.map + 1 # n.maps starts from 0 + map.width <- 0.07 + map.xpos <- 0.06 + map.width * (n.map - 1) + map.ypos <- 0.08 + map.height <- 0.19 + map.ysep <- 0.015 + + par(fig=c(map.xpos, map.xpos + map.width, map.ypos + 3*map.height + 3*map.ysep, map.ypos + 4*map.height + 3*map.ysep), new=TRUE) + PlotEquiMap2(rescale(map.ref1,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map.ref1), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, xlabel.dist=1.5, contours.lty="F1FF1F") + par(fig=c(map.xpos, map.xpos + map.width, map.ypos + 2*map.height + 2*map.ysep, map.ypos + 3*map.height + 2*map.ysep), new=TRUE) + PlotEquiMap2(rescale(map.ref2,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map.ref2), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F") + par(fig=c(map.xpos, map.xpos + map.width, map.ypos + map.height + map.ysep, map.ypos + 2*map.height + map.ysep), new=TRUE) + PlotEquiMap2(rescale(map.ref3,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map.ref3), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F") + par(fig=c(map.xpos, map.xpos + map.width, map.ypos, map.ypos + map.height), new=TRUE) + PlotEquiMap2(rescale(map.ref4,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map.ref4), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F") + + # subtitle: + par(fig=c(map.xpos, map.xpos + map.width, 0.86, 0.90), new=TRUE) + mtext(my.period[season.ref], cex=5.5, font=2) + + dev.off() + + # Plot the Taylor diagrams: + + corr.first <- corr.second <- rmse.first <- rmse.second <- array(NA,c(4,12)) + + for(regime in 1:4){ + + png(filename=paste0(rean.dir,"/",rean.name,"_taylor_",orden[regime],".png"), width=1200, height=800) + + for(p in 1:12){ + p.orig <- p + + load(file=paste0(rean.dir,"/",rean.name,"_",my.period[p],"_","psl",".RData")) # load cluster names and summarized data + load(file=paste0(rean.dir,"/",rean.name,"_",my.period[p],"_","ClusterNames",".RData")) # load cluster names and summarized data + + if(var.num != var.num.orig) var.num <- var.num.orig # ripristinate original values of this script (necessary after loading regime data) + if(fields.name != fields.name.orig) fields.name <- fields.name.orig # ripristinate original values of this script + if(p != p.orig) p <- p.orig + + cluster1 <- which(orden == cluster1.name) + cluster2 <- which(orden == cluster2.name) + cluster3 <- which(orden == cluster3.name) + cluster4 <- which(orden == cluster4.name) + + assign(paste0("map",cluster1), pslwr1mean) # NAO+ + assign(paste0("map",cluster2), pslwr2mean) # NAO- + assign(paste0("map",cluster3), pslwr3mean) # Blocking + assign(paste0("map",cluster4), pslwr4mean) # Atl.ridge + + # load data from the second classification: + load(file=paste0(rean2.dir,"/",rean2.name,"_",my.period[p],"_","psl",".RData")) # load cluster names and summarized data + load(file=paste0(rean2.dir,"/",rean2.name,"_",my.period[p],"_","ClusterNames",".RData")) # load cluster names and summarized data + + if(var.num != var.num.orig) var.num <- var.num.orig # ripristinate original values of this script (necessary after loading regime data) + if(fields.name != fields.name.orig) fields.name <- fields.name.orig # ripristinate original values of this script + if(p != p.orig) p <- p.orig + + cluster1.old <- which(orden == cluster1.name) + cluster2.old <- which(orden == cluster2.name) + cluster3.old <- which(orden == cluster3.name) + cluster4.old <- which(orden == cluster4.name) + + assign(paste0("map.old",cluster1.old), pslwr1mean) # NAO+ + assign(paste0("map.old",cluster2.old), pslwr2mean) # NAO- + assign(paste0("map.old",cluster3.old), pslwr3mean) # Blocking + assign(paste0("map.old",cluster4.old), pslwr4mean) # Atl.ridge + + add.mod <- ifelse(p == 1, FALSE, TRUE) + main.mod <- ifelse(p == 1, orden[regime],"") + + color.first <- "red" + color.second <- "blue" # "black" + sd.arcs.mod <- c(0,0.1,0.2,0.3,0.4,0.5,0.6,0.7,0.8,0.9,1,1.1,1.2,1.3) #c(0,0.5,1,1.5) # doesn't work! + if(regime == 1) my.taylor(as.vector(map.ref1),as.vector(map1), normalize=TRUE, add=add.mod, pos.cor=FALSE, sd.arcs=sd.arcs.mod, ref.sd=F, cex.axis=1.7, text.cex=1, my.text=my.month.short[p], col = color.first, main=main.mod) + if(regime == 2) my.taylor(as.vector(map.ref2),as.vector(map2), normalize=TRUE, add=add.mod, pos.cor=FALSE, sd.arcs=sd.arcs.mod, ref.sd=F, cex.axis=1.7, text.cex=1, my.text=my.month.short[p], col = color.first, main=main.mod) + if(regime == 3) my.taylor(as.vector(map.ref3),as.vector(map3), normalize=TRUE, add=add.mod, pos.cor=FALSE, sd.arcs=sd.arcs.mod, ref.sd=F, cex.axis=1.7, text.cex=1, my.text=my.month.short[p], col = color.first, main=main.mod) + if(regime == 4) my.taylor(as.vector(map.ref4),as.vector(map4), normalize=TRUE, add=add.mod, pos.cor=FALSE, sd.arcs=sd.arcs.mod, ref.sd=F, cex.axis=1.7, text.cex=1, my.text=my.month.short[p], col = color.first, main=main.mod) + + # add the points from the second classification: + add.mod=TRUE + #add.mod <- ifelse(p == 1, FALSE, TRUE) + + if(regime == 1) my.taylor(as.vector(map.old.ref1),as.vector(map.old1), normalize=TRUE, add=add.mod, pos.cor=FALSE, sd.arcs=sd.arcs.mod, ref.sd=F, cex.axis=1.7, text.cex=1, my.text=my.month.short[p], col = color.second) + if(regime == 2) my.taylor(as.vector(map.old.ref2),as.vector(map.old2), normalize=TRUE, add=add.mod, pos.cor=FALSE, sd.arcs=sd.arcs.mod, ref.sd=F, cex.axis=1.7, text.cex=1, my.text=my.month.short[p], col = color.second) + if(regime == 3) my.taylor(as.vector(map.old.ref3),as.vector(map.old3), normalize=TRUE, add=add.mod, pos.cor=FALSE, sd.arcs=sd.arcs.mod, ref.sd=F, cex.axis=1.7, text.cex=1, my.text=my.month.short[p], col = color.second) + if(regime == 4) my.taylor(as.vector(map.old.ref4),as.vector(map.old4), normalize=TRUE, add=add.mod, pos.cor=FALSE, sd.arcs=sd.arcs.mod, ref.sd=F, cex.axis=1.7, text.cex=1, my.text=my.month.short[p], col = color.second) + + if(regime == 1) { corr.first[1,p] <- cor(as.vector(map.ref1),as.vector(map1)); rmse.first[1,p] <- RMSE(as.vector(map.ref1),as.vector(map1)) } + if(regime == 2) { corr.first[2,p] <- cor(as.vector(map.ref2),as.vector(map2)); rmse.first[2,p] <- RMSE(as.vector(map.ref2),as.vector(map2)) } + if(regime == 3) { corr.first[3,p] <- cor(as.vector(map.ref3),as.vector(map3)); rmse.first[3,p] <- RMSE(as.vector(map.ref3),as.vector(map3)) } + if(regime == 4) { corr.first[4,p] <- cor(as.vector(map.ref4),as.vector(map4)); rmse.first[4,p] <- RMSE(as.vector(map.ref4),as.vector(map4)) } + + if(regime == 1) { corr.second[1,p] <- cor(as.vector(map.old.ref1),as.vector(map.old1)); rmse.second[1,p] <- RMSE(as.vector(map.old.ref1),as.vector(map.old1)) } + if(regime == 2) { corr.second[2,p] <- cor(as.vector(map.old.ref2),as.vector(map.old2)); rmse.second[2,p] <- RMSE(as.vector(map.old.ref2),as.vector(map.old2)) } + if(regime == 3) { corr.second[3,p] <- cor(as.vector(map.old.ref3),as.vector(map.old3)); rmse.second[3,p] <- RMSE(as.vector(map.old.ref3),as.vector(map.old3)) } + if(regime == 4) { corr.second[4,p] <- cor(as.vector(map.old.ref4),as.vector(map.old4)); rmse.second[4,p] <- RMSE(as.vector(map.old.ref4),as.vector(map.old4)) } + + } # close for on 'p' + + dev.off() + + } # close for on 'regime' + + corr.first.mean <- apply(corr.first,1,mean) + corr.second.mean <- apply(corr.second,1,mean) + corr.diff <- corr.second.mean - corr.first.mean + + rmse.first.mean <- apply(rmse.first,1,mean) + rmse.second.mean <- apply(rmse.second,1,mean) + rmse.diff <- rmse.second.mean - rmse.first.mean + +} # close if on composition == "taylor" + + + + + + +# Summary graphs: +if(composition == "summary" && fields.name == forecast.name){ + # array storing correlations in the format: [startdate, leadmonth, regime] + array.diff.sd.freq <- array.sd.freq <- array.cor <- array.sp.cor <- array.freq <- array.diff.freq <- array.rpss <- array.pers <- array.diff.pers <- array(NA,c(12,7,4)) + array.spat.cor <- array.imp <- array.trans.sim1 <- array.trans.sim2 <- array.trans.sim3 <- array.trans.sim4 <- array(NA,c(12,7,4)) + array.trans.diff1 <- array.trans.diff2 <- array.trans.diff3 <- array.trans.diff4 <- array.freq.obs <- array.pers.obs <- array(NA,c(12,7,4)) + array.trans.obs1 <- array.trans.obs2 <- array.trans.obs3 <- array.trans.obs4 <- array(NA,c(12,7,4)) + + for(p in 1:12){ + p.orig <- p + + for(l in 0:6){ + + load(file=paste0(work.dir,"/",fields.name,"_",my.period[p],"_leadmonth_",l,"_","ClusterNames",".RData")) # load cluster names and summarized data + + if(var.num != var.num.orig) var.num <- var.num.orig # ripristinate original values of this script (necessary after loading regime data) + if(fields.name != fields.name.orig) fields.name <- fields.name.orig # ripristinate original values of this script + if(p != p.orig) p <- p.orig + + array.spat.cor[p,1+l, 1] <- spat.cor1 # associates NAO+ to the first triangle (at left) + array.spat.cor[p,1+l, 3] <- spat.cor2 # associates NAO- to the third triangle (at right) + array.spat.cor[p,1+l, 4] <- spat.cor3 # associates Blocking to the fourth triangle (top one) + array.spat.cor[p,1+l, 2] <- spat.cor4 # associates Atl.Ridge to the second triangle (bottom one) + + array.cor[p,1+l, 1] <- sp.cor1 # associates NAO+ to the first triangle (at left) + array.cor[p,1+l, 3] <- sp.cor2 # associates NAO- to the third triangle (at right) + array.cor[p,1+l, 4] <- sp.cor3 # associates Blocking to the fourth triangle (top one) + array.cor[p,1+l, 2] <- sp.cor4 # associates Atl.Ridge to the second triangle (bottom one) + + array.freq[p,1+l, 1] <- 100*(mean(fre1.NA)) # NAO+ + array.freq[p,1+l, 3] <- 100*(mean(fre2.NA)) # NAO- + array.freq[p,1+l, 4] <- 100*(mean(fre3.NA)) # Blocking + array.freq[p,1+l, 2] <- 100*(mean(fre4.NA)) # Atl.Ridge + + array.freq.obs[p,1+l, 1] <- 100*(mean(freObs1)) # NAO+ + array.freq.obs[p,1+l, 3] <- 100*(mean(freObs2)) # NAO- + array.freq.obs[p,1+l, 4] <- 100*(mean(freObs3)) # Blocking + array.freq.obs[p,1+l, 2] <- 100*(mean(freObs4)) # Atl.Ridge + + array.diff.freq[p,1+l, 1] <- 100*(mean(fre1.NA) - mean(freObs1)) # NAO+ + array.diff.freq[p,1+l, 3] <- 100*(mean(fre2.NA) - mean(freObs2)) # NAO- + array.diff.freq[p,1+l, 4] <- 100*(mean(fre3.NA) - mean(freObs3)) # Blocking + array.diff.freq[p,1+l, 2] <- 100*(mean(fre4.NA) - mean(freObs4)) # Atl.Ridge + + array.pers[p,1+l, 1] <- persist1 # NAO+ + array.pers[p,1+l, 3] <- persist2 # NAO- + array.pers[p,1+l, 4] <- persist3 # Blocking + array.pers[p,1+l, 2] <- persist4 # Atl.Ridge + + array.pers.obs[p,1+l, 1] <- persistObs1 # NAO+ + array.pers.obs[p,1+l, 3] <- persistObs2 # NAO- + array.pers.obs[p,1+l, 4] <- persistObs3 # Blocking + array.pers.obs[p,1+l, 2] <- persistObs4 # Atl.Ridge + + array.diff.pers[p,1+l, 1] <- persist1 - persistObs1 # NAO+ + array.diff.pers[p,1+l, 3] <- persist2 - persistObs2 # NAO- + array.diff.pers[p,1+l, 4] <- persist3 - persistObs3 # Blocking + array.diff.pers[p,1+l, 2] <- persist4 - persistObs4 # Atl.Ridge + + array.sd.freq[p,1+l, 1] <- sd.freq.sim1 # NAO+ + array.sd.freq[p,1+l, 3] <- sd.freq.sim2 # NAO- + array.sd.freq[p,1+l, 4] <- sd.freq.sim3 # Blocking + array.sd.freq[p,1+l, 2] <- sd.freq.sim4 # Atl.Ridge + + array.diff.sd.freq[p,1+l, 1] <- sd.freq.sim1 / sd.freq.obs1 # NAO+ + array.diff.sd.freq[p,1+l, 3] <- sd.freq.sim2 / sd.freq.obs2 # NAO- + array.diff.sd.freq[p,1+l, 4] <- sd.freq.sim3 / sd.freq.obs3 # Blocking + array.diff.sd.freq[p,1+l, 2] <- sd.freq.sim4 / sd.freq.obs4 # Atl.Ridge + + array.imp[p,1+l, 1] <- cor.imp1 # NAO+ + array.imp[p,1+l, 3] <- cor.imp2 # NAO- + array.imp[p,1+l, 4] <- cor.imp3 # Blocking + array.imp[p,1+l, 2] <- cor.imp4 # Atl.Ridge + + array.trans.sim1[p,1+l, 1] <- NA # Transition from NAO+ to NAO+ + array.trans.sim1[p,1+l, 3] <- 100*transSim1[2] # Transition from NAO+ to NAO- + array.trans.sim1[p,1+l, 4] <- 100*transSim1[3] # Transition from NAO+ to blocking + array.trans.sim1[p,1+l, 2] <- 100*transSim1[4] # Transition from NAO+ to Atl.Ridge + + array.trans.sim2[p,1+l, 1] <- 100*transSim2[1] # Transition from NAO- to NAO+ + array.trans.sim2[p,1+l, 3] <- NA # Transition from NAO- to NAO- + array.trans.sim2[p,1+l, 4] <- 100*transSim2[3] # Transition from NAO- to blocking + array.trans.sim2[p,1+l, 2] <- 100*transSim2[4] # Transition from NAO- to Atl.Ridge + + array.trans.sim3[p,1+l, 1] <- 100*transSim3[1] # Transition from blocking to NAO+ + array.trans.sim3[p,1+l, 3] <- 100*transSim3[2] # Transition from blocking to NAO- + array.trans.sim3[p,1+l, 4] <- NA # Transition from blocking to blocking + array.trans.sim3[p,1+l, 2] <- 100*transSim3[4] # Transition from blocking to Atl.Ridge + + array.trans.sim4[p,1+l, 1] <- 100*transSim4[1] # Transition from Atl.ridge to NAO+ + array.trans.sim4[p,1+l, 3] <- 100*transSim4[2] # Transition from Atl.ridge to NAO- + array.trans.sim4[p,1+l, 4] <- 100*transSim4[3] # Transition from Atl.ridge to blocking + array.trans.sim4[p,1+l, 2] <- NA # Transition from Atl.ridge to Atl.Ridge + + array.trans.obs1[p,1+l, 1] <- NA # Transition from NAO+ to NAO+ + array.trans.obs1[p,1+l, 3] <- 100*transObs1[2] # Transition from NAO+ to NAO- + array.trans.obs1[p,1+l, 4] <- 100*transObs1[3] # Transition from NAO+ to blocking + array.trans.obs1[p,1+l, 2] <- 100*transObs1[4] # Transition from NAO+ to Atl.Ridge + + array.trans.obs2[p,1+l, 1] <- 100*transObs2[1] # Transition from NAO- to NAO+ + array.trans.obs2[p,1+l, 3] <- NA # Transition from NAO- to NAO- + array.trans.obs2[p,1+l, 4] <- 100*transObs2[3] # Transition from NAO- to blocking + array.trans.obs2[p,1+l, 2] <- 100*transObs2[4] # Transition from NAO- to Atl.Ridge + + array.trans.obs3[p,1+l, 1] <- 100*transObs3[1] # Transition from blocking to NAO+ + array.trans.obs3[p,1+l, 3] <- 100*transObs3[2] # Transition from blocking to NAO- + array.trans.obs3[p,1+l, 4] <- NA # Transition from blocking to blocking + array.trans.obs3[p,1+l, 2] <- 100*transObs3[4] # Transition from blocking to Atl.Ridge + + array.trans.obs4[p,1+l, 1] <- 100*transObs4[1] # Transition from Atl.ridge to NAO+ + array.trans.obs4[p,1+l, 3] <- 100*transObs4[2] # Transition from Atl.ridge to NAO- + array.trans.obs4[p,1+l, 4] <- 100*transObs4[3] # Transition from Atl.ridge to blocking + array.trans.obs4[p,1+l, 2] <- NA # Transition from Atl.ridge to Atl.Ridge + + array.trans.diff1[p,1+l, 1] <- NA # Transition from NAO+ to NAO+ + array.trans.diff1[p,1+l, 3] <- 100*(transSim1[2] - transObs1[2]) # Transition from NAO+ to NAO- + array.trans.diff1[p,1+l, 4] <- 100*(transSim1[3] - transObs1[3]) # Transition from NAO+ to blocking + array.trans.diff1[p,1+l, 2] <- 100*(transSim1[4] - transObs1[4]) # Transition from NAO+ to Atl.Ridge + + array.trans.diff2[p,1+l, 1] <- 100*(transSim2[1] - transObs2[1]) # Transition from NAO+ to NAO+ + array.trans.diff2[p,1+l, 3] <- NA + array.trans.diff2[p,1+l, 4] <- 100*(transSim2[3] - transObs2[3]) # Transition from NAO+ to blocking + array.trans.diff2[p,1+l, 2] <- 100*(transSim2[4] - transObs2[4]) # Transition from NAO+ to Atl.Ridge + + array.trans.diff3[p,1+l, 1] <- 100*(transSim3[1] - transObs3[1]) # Transition from NAO+ to NAO+ + array.trans.diff3[p,1+l, 3] <- 100*(transSim3[2] - transObs3[2]) # Transition from NAO+ to NAO- + array.trans.diff3[p,1+l, 4] <- NA + array.trans.diff3[p,1+l, 2] <- 100*(transSim3[4] - transObs3[4]) # Transition from NAO+ to Atl.Ridge + + array.trans.diff4[p,1+l, 1] <- 100*(transSim4[1] - transObs4[1]) # Transition from NAO+ to NAO+ + array.trans.diff4[p,1+l, 3] <- 100*(transSim4[2] - transObs4[2]) # Transition from NAO+ to NAO- + array.trans.diff4[p,1+l, 4] <- 100*(transSim4[3] - transObs4[3]) # Transition from NAO+ to blocking + array.trans.diff4[p,1+l, 2] <- NA + + #array.rpss[p,1+l, 1] <- # NAO+ + #array.rpss[p,1+l, 3] <- # NAO- + #array.rpss[p,1+l, 4] <- # Blocking + #array.rpss[p,1+l, 2] <- # Atl.Ridge + } + } + + + + # Spatial Corr summary: + png(filename=paste0(work.dir,"/",forecast.name,"_summary_spatial_correlations.png"), width=550, height=400) + + # create an array similar to array.cor but with colors instead of corr.values: + sp.corr.cols <- rev(c('#b2182b','#d6604d','#f4a582','#fddbc7','#d1e5f0','#92c5de','#4393c3','#2166ac')) + my.seq <- c(-1,0,0.4,0.5,0.6,0.7,0.8,0.9,1) + + array.spat.cor.colors <- array(sp.corr.cols[8],c(12,7,4)) + array.spat.cor.colors[array.spat.cor < my.seq[8]] <- sp.corr.cols[7] + array.spat.cor.colors[array.spat.cor < my.seq[7]] <- sp.corr.cols[6] + array.spat.cor.colors[array.spat.cor < my.seq[6]] <- sp.corr.cols[5] + array.spat.cor.colors[array.spat.cor < my.seq[5]] <- sp.corr.cols[4] + array.spat.cor.colors[array.spat.cor < my.seq[4]] <- sp.corr.cols[3] + array.spat.cor.colors[array.spat.cor < my.seq[3]] <- sp.corr.cols[2] + array.spat.cor.colors[array.spat.cor < my.seq[2]] <- sp.corr.cols[1] + + par(mar=c(5,4,4,4.5)) + plot(1,1, type="n", xaxt="n", yaxt="n", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + title("Spatial correlation between S4 and ERA-Interim regime anomalies", cex=1.5) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + + for(p in 1:12){ + for(l in 0:6){ + polygon(c(0.5+p-1, 0.5+p-1, 1+p-1), c(-0.5+l, 0.5+l, 0+l), col=array.spat.cor.colors[p,1+l,1]) # first triangle + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(-0.5+l, 0+l, -0.5+l), col=array.spat.cor.colors[p,1+l,2]) + polygon(c(1+p-1, 1.5+p-1, 1.5+p-1), c(l, 0.5+l, -0.5+l), col=array.spat.cor.colors[p,1+l,3]) + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(0.5+l, 0+l, 0.5+l), col=array.spat.cor.colors[p,1+l,4]) + } + } + + par(fig=c(0.875, 1, 0.14, 0.89), new=TRUE) + ColorBar2(brks = my.seq, cols = sp.corr.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + dev.off() + + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_spatial_correlations_startdate.png"), width=750, height=600) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext(text="Spatial correlation between S4 and ERA-Interim regime anomalies", cex=1.5) + + # NAO+ : + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.85, 0.98), new=TRUE) + mtext("NAO+", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.spat.cor.colors[p,1+l,1])}} + + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.42, 0.5), new=TRUE) + mtext("Blocking", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.spat.cor.colors[p,1+l,4])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.85, 0.98), new=TRUE) + mtext("NAO-", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.spat.cor.colors[p,1+l,3])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.42, 0.5), new=TRUE) + mtext("Atlantic Ridge", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.spat.cor.colors[p,1+l,2])}} + + par(fig=c(0.91, 1, 0.1, 0.9), new=TRUE) + ColorBar2(brks = my.seq, cols = sp.corr.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_spatial_correlations_target_month.png"), width=800, height=300) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext(text="Spatial correlation between S4 and ERA-Interim regime anomalies", cex=1.2) + + par(mar=c(0,0,4,0), fig=c(0.07, 0.22, 0.8, 0.98), new=TRUE) + mtext("NAO+", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0, 0.22, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.6, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.spat.cor.colors[i2,1+6-j,1])}} + + par(mar=c(0,0,4,0), fig=c(0.22, 0.44, 0.8, 0.98), new=TRUE) + mtext("NAO-", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.22, 0.44, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.6, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.spat.cor.colors[i2,1+6-j,3])}} + + par(mar=c(0,0,4,0), fig=c(0.44, 0.66, 0.8, 0.98), new=TRUE) + mtext("Blocking", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.44, 0.66, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.6, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.spat.cor.colors[i2,1+6-j,4])}} + + par(mar=c(0,0,4,0), fig=c(0.68, 0.88, 0.8, 0.98), new=TRUE) + mtext("Atlantic Ridge", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.66, 0.88, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.6, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.spat.cor.colors[i2,1+6-j,2])}} + + par(fig=c(0.91, 1, 0.1, 0.8), new=TRUE) + ColorBar2(brks = my.seq, cols = sp.corr.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + + + + + + + + # Temporal Corr summary: + png(filename=paste0(work.dir,"/",forecast.name,"_summary_temporal_correlations.png"), width=550, height=400) + + # create an array similar to array.cor but with colors instead of corr.values: + corr.cols <- rev(c('#b2182b','#d6604d','#f4a582','#fddbc7','#d1e5f0','#92c5de','#4393c3','#2166ac')) + my.seq <- c(-1,-0.75,-0.5,-0.25,0,0.25,0.5,0.75,1) + + array.cor.colors <- array(corr.cols[8],c(12,7,4)) + array.cor.colors[array.cor < 0.75] <- corr.cols[7] + array.cor.colors[array.cor < 0.5] <- corr.cols[6] + array.cor.colors[array.cor < 0.25] <- corr.cols[5] + array.cor.colors[array.cor < 0] <- corr.cols[4] + array.cor.colors[array.cor < -0.25] <- corr.cols[3] + array.cor.colors[array.cor < -0.5] <- corr.cols[2] + array.cor.colors[array.cor < -0.75] <- corr.cols[1] + + par(mar=c(5,4,4,4.5)) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Forecast time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + title("Correlation between S4 and ERA-Interim frequencies", cex=1.5) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + + for(p in 1:12){ + for(l in 0:6){ + polygon(c(0.5+p-1,0.5+p-1,1+p-1),c(-0.5+l,0.5+l,0+l), col=array.cor.colors[p,1+l,1]) # first triangle + polygon(c(0.5+p-1,1+p-1,1.5+p-1),c(-0.5+l,0+l,-0.5+l), col=array.cor.colors[p,1+l,2]) + polygon(c(1+p-1,1.5+p-1,1.5+p-1),c(l,0.5+l,-0.5+l), col=array.cor.colors[p,1+l,3]) + polygon(c(0.5+p-1,1+p-1,1.5+p-1),c(0.5+l,0+l,0.5+l), col=array.cor.colors[p,1+l,4]) + } + } + + par(fig=c(0.875, 1, 0.14, 0.89), new=TRUE) + ColorBar2(brks = seq(-1,1,0.25), cols = corr.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(seq(-1,1,0.25)), my.labels=seq(-1,1,0.25)) + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_temporal_correlations_startdate.png"), width=750, height=600) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext(text="Correlation between S4 and ERA-Interim frequencies", cex=1.5) + + # NAO+ : + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.85, 0.98), new=TRUE) + mtext("NAO+", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.cor.colors[p,1+l,1])}} + + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.42, 0.5), new=TRUE) + mtext("Blocking", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.cor.colors[p,1+l,4])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.85, 0.98), new=TRUE) + mtext("NAO-", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.cor.colors[p,1+l,3])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.42, 0.5), new=TRUE) + mtext("Atlantic Ridge", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.cor.colors[p,1+l,2])}} + + par(fig=c(0.91, 1, 0.1, 0.9), new=TRUE) + ColorBar2(brks = my.seq, cols = corr.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_temporal_correlations_target_month.png"), width=800, height=300) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext(text="Correlation between S4 and ERA-Interim frequencies", cex=1.2) + + par(mar=c(0,0,4,0), fig=c(0.07, 0.22, 0.8, 0.98), new=TRUE) + mtext("NAO+", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0, 0.22, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.6, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.cor.colors[i2,1+6-j,1])}} + + par(mar=c(0,0,4,0), fig=c(0.22, 0.44, 0.8, 0.98), new=TRUE) + mtext("NAO-", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.22, 0.44, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.6, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.cor.colors[i2,1+6-j,3])}} + + par(mar=c(0,0,4,0), fig=c(0.44, 0.66, 0.8, 0.98), new=TRUE) + mtext("Blocking", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.44, 0.66, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.6, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.cor.colors[i2,1+6-j,4])}} + + par(mar=c(0,0,4,0), fig=c(0.68, 0.88, 0.8, 0.98), new=TRUE) + mtext("Atlantic Ridge", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.66, 0.88, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.6, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.cor.colors[i2,1+6-j,2])}} + + par(fig=c(0.91, 1, 0.1, 0.8), new=TRUE) + ColorBar2(brks = my.seq, cols = corr.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + + + + + + # Frequency summary: + png(filename=paste0(work.dir,"/",forecast.name,"_summary_frequency.png"), width=550, height=400) + + # create an array similar to array.diff.freq but with colors instead of frequencies: + freq.cols <- rev(c('#d73027','#f46d43','#fdae61','#fee090','#e0f3f8','#abd9e9','#74add1','#4575b4')) + my.seq <- c(NA,20,22,24,26,28,30,32,NA) + + array.freq.colors <- array(freq.cols[8],c(12,7,4)) + array.freq.colors[array.freq < my.seq[[8]]] <- freq.cols[7] + array.freq.colors[array.freq < my.seq[[7]]] <- freq.cols[6] + array.freq.colors[array.freq < my.seq[[6]]] <- freq.cols[5] + array.freq.colors[array.freq < my.seq[[5]]] <- freq.cols[4] + array.freq.colors[array.freq < my.seq[[4]]] <- freq.cols[3] + array.freq.colors[array.freq < my.seq[[3]]] <- freq.cols[2] + array.freq.colors[array.freq < my.seq[[2]]] <- freq.cols[1] + + par(mar=c(5,4,4,4.5)) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Forecast time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + title("Mean S4 frequency (%)", cex=1.5) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + + for(p in 1:12){ + for(l in 0:6){ + polygon(c(0.5+p-1, 0.5+p-1, 1+p-1), c(-0.5+l, 0.5+l, 0+l), col=array.freq.colors[p,1+l,1]) # first triangle + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(-0.5+l, 0+l, -0.5+l), col=array.freq.colors[p,1+l,2]) + polygon(c(1+p-1, 1.5+p-1, 1.5+p-1), c(l, 0.5+l, -0.5+l), col=array.freq.colors[p,1+l,3]) + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(0.5+l, 0+l, 0.5+l), col=array.freq.colors[p,1+l,4]) + } + } + + par(fig=c(0.875, 1, 0.14, 0.89), new=TRUE) + ColorBar2(brks = my.seq, cols = freq.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_frequency_startdate.png"), width=750, height=600) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext(text="Mean S4 frequency (%)", cex=1.5) + + # NAO+ : + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.85, 0.98), new=TRUE) + mtext("NAO+", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.freq.colors[p,1+l,1])}} + + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.42, 0.5), new=TRUE) + mtext("Blocking", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.freq.colors[p,1+l,4])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.85, 0.98), new=TRUE) + mtext("NAO-", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.freq.colors[p,1+l,3])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.42, 0.5), new=TRUE) + mtext("Atlantic Ridge", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.freq.colors[p,1+l,2])}} + + par(fig=c(0.91, 1, 0.1, 0.9), new=TRUE) + ColorBar2(brks = my.seq, cols = freq.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_frequency_target_month.png"), width=800, height=300) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext(text="Mean S4 frequency (%)", cex=1.2) + + par(mar=c(0,0,4,0), fig=c(0.07, 0.22, 0.8, 0.98), new=TRUE) + mtext("NAO+", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0, 0.22, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.6, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.freq.colors[i2,1+6-j,1])}} + + par(mar=c(0,0,4,0), fig=c(0.22, 0.44, 0.8, 0.98), new=TRUE) + mtext("NAO-", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.22, 0.44, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.6, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.freq.colors[i2,1+6-j,3])}} + + par(mar=c(0,0,4,0), fig=c(0.44, 0.66, 0.8, 0.98), new=TRUE) + mtext("Blocking", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.44, 0.66, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.6, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.freq.colors[i2,1+6-j,4])}} + + par(mar=c(0,0,4,0), fig=c(0.68, 0.88, 0.8, 0.98), new=TRUE) + mtext("Atlantic Ridge", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.66, 0.88, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.6, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.freq.colors[i2,1+6-j,2])}} + + par(fig=c(0.91, 1, 0.1, 0.8), new=TRUE) + ColorBar2(brks = my.seq, cols = freq.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + par(fig=c(0.91, 1, 0.88, 0.93), new=TRUE) + mtext(side = 1, text = "%", line = 2, cex=1) + dev.off() + + + + + + + + + # Obs. frequency summary: + png(filename=paste0(work.dir,"/",forecast.name,"_summary_frequency_obs.png"), width=550, height=400) + + # create an array similar to array.diff.freq but with colors instead of frequencies: + freq.cols <- rev(c('#d73027','#f46d43','#fdae61','#fee090','#e0f3f8','#abd9e9','#74add1','#4575b4')) + my.seq <- c(NA,20,22,24,26,28,30,32,NA) + + array.freq.colors <- array(freq.cols[8],c(12,7,4)) + array.freq.colors[array.freq.obs < my.seq[[8]]] <- freq.cols[7] + array.freq.colors[array.freq.obs < my.seq[[7]]] <- freq.cols[6] + array.freq.colors[array.freq.obs < my.seq[[6]]] <- freq.cols[5] + array.freq.colors[array.freq.obs < my.seq[[5]]] <- freq.cols[4] + array.freq.colors[array.freq.obs < my.seq[[4]]] <- freq.cols[3] + array.freq.colors[array.freq.obs < my.seq[[3]]] <- freq.cols[2] + array.freq.colors[array.freq.obs < my.seq[[2]]] <- freq.cols[1] + + par(mar=c(5,4,4,4.5)) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Forecast time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + title("Mean observed frequency (%)", cex=1.5) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + + for(p in 1:12){ + for(l in 0:6){ + polygon(c(0.5+p-1, 0.5+p-1, 1+p-1), c(-0.5+l, 0.5+l, 0+l), col=array.freq.colors[p,1+l,1]) # first triangle + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(-0.5+l, 0+l, -0.5+l), col=array.freq.colors[p,1+l,2]) + polygon(c(1+p-1, 1.5+p-1, 1.5+p-1), c(l, 0.5+l, -0.5+l), col=array.freq.colors[p,1+l,3]) + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(0.5+l, 0+l, 0.5+l), col=array.freq.colors[p,1+l,4]) + } + } + + par(fig=c(0.875, 1, 0.14, 0.89), new=TRUE) + ColorBar2(brks = my.seq, cols = freq.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_frequency_obs_startdate.png"), width=750, height=600) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext(text="Mean observed frequency (%)", cex=1.5) + + # NAO+ : + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.85, 0.98), new=TRUE) + mtext("NAO+", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.freq.colors[p,1+l,1])}} + + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.42, 0.5), new=TRUE) + mtext("Blocking", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.freq.colors[p,1+l,4])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.85, 0.98), new=TRUE) + mtext("NAO-", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.freq.colors[p,1+l,3])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.42, 0.5), new=TRUE) + mtext("Atlantic Ridge", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.freq.colors[p,1+l,2])}} + + par(fig=c(0.91, 1, 0.1, 0.9), new=TRUE) + ColorBar2(brks = my.seq, cols = freq.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_frequency_obs_target_month.png"), width=800, height=300) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext(text="Mean observed frequency (%)", cex=1.2) + + par(mar=c(0,0,4,0), fig=c(0.07, 0.22, 0.8, 0.98), new=TRUE) + mtext("NAO+", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0, 0.22, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.6, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.freq.colors[i2,1+6-j,1])}} + + par(mar=c(0,0,4,0), fig=c(0.22, 0.44, 0.8, 0.98), new=TRUE) + mtext("NAO-", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.22, 0.44, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.6, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.freq.colors[i2,1+6-j,3])}} + + par(mar=c(0,0,4,0), fig=c(0.44, 0.66, 0.8, 0.98), new=TRUE) + mtext("Blocking", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.44, 0.66, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.6, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.freq.colors[i2,1+6-j,4])}} + + par(mar=c(0,0,4,0), fig=c(0.68, 0.88, 0.8, 0.98), new=TRUE) + mtext("Atlantic Ridge", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.66, 0.88, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.6, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.freq.colors[i2,1+6-j,2])}} + + par(fig=c(0.91, 1, 0.1, 0.8), new=TRUE) + ColorBar2(brks = my.seq, cols = freq.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + par(fig=c(0.91, 1, 0.88, 0.93), new=TRUE) + mtext(side = 1, text = "%", line = 2, cex=1) + dev.off() + + + + + + + + + + # Bias freq. summary: + png(filename=paste0(work.dir,"/",forecast.name,"_summary_bias_frequency.png"), width=550, height=400) + + # create an array similar to array.diff.freq but with colors instead of frequencies: + diff.freq.cols <- rev(c('#d73027','#f46d43','#fdae61','#fee090','#e0f3f8','#abd9e9','#74add1','#4575b4')) + my.seq <- c(-20,-10,-5,-2,0,2,5,10,20) + + array.diff.freq.colors <- array(diff.freq.cols[8],c(12,7,4)) + array.diff.freq.colors[array.diff.freq < my.seq[[8]]] <- diff.freq.cols[7] + array.diff.freq.colors[array.diff.freq 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.diff.freq.colors[i2,1+6-j,1])}} + + par(mar=c(0,0,4,0), fig=c(0.22, 0.44, 0.8, 0.98), new=TRUE) + mtext("NAO-", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.22, 0.44, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.6, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.diff.freq.colors[i2,1+6-j,3])}} + + par(mar=c(0,0,4,0), fig=c(0.44, 0.66, 0.8, 0.98), new=TRUE) + mtext("Blocking", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.44, 0.66, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.6, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.diff.freq.colors[i2,1+6-j,4])}} + + par(mar=c(0,0,4,0), fig=c(0.68, 0.88, 0.8, 0.98), new=TRUE) + mtext("Atlantic Ridge", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.66, 0.88, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.6, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.diff.freq.colors[i2,1+6-j,2])}} + + par(fig=c(0.91, 1, 0.1, 0.8), new=TRUE) + ColorBar2(brks = my.seq, cols = diff.freq.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + par(fig=c(0.91, 1, 0.88, 0.93), new=TRUE) + mtext(side = 1, text = "%", line = 2, cex=1) + dev.off() + + + + + + + + + # Simulated persistence summary: + png(filename=paste0(work.dir,"/",forecast.name,"_summary_persistence.png"), width=550, height=400) + + # create an array similar to array.pers but with colors instead of frequencies: + pers.cols <- rev(c('#b2182b','#d6604d','#f4a582','#fddbc7','#d1e5f0','#92c5de','#4393c3','#2166ac')) + my.seq <- c(NA, 3.25, 3.5, 3.75, 4, 4.25, 4.5, 4.75, NA) + + array.pers.colors <- array(pers.cols[8],c(12,7,4)) + array.pers.colors[array.pers < my.seq[8]] <- pers.cols[7] + array.pers.colors[array.pers < my.seq[7]] <- pers.cols[6] + array.pers.colors[array.pers < my.seq[6]] <- pers.cols[5] + array.pers.colors[array.pers < my.seq[5]] <- pers.cols[4] + array.pers.colors[array.pers < my.seq[4]] <- pers.cols[3] + array.pers.colors[array.pers < my.seq[3]] <- pers.cols[2] + array.pers.colors[array.pers < my.seq[2]] <- pers.cols[1] + + par(mar=c(5,4,4,4.5)) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Forecast time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + title("ECMWF-S4 Regime persistence (in days/month)", cex=1.5) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + + for(p in 1:12){ + for(l in 0:6){ + polygon(c(0.5+p-1,0.5+p-1,1+p-1),c(-0.5+l,0.5+l,0+l), col=array.pers.colors[p,1+l,1]) # first triangle + polygon(c(0.5+p-1,1+p-1,1.5+p-1),c(-0.5+l,0+l,-0.5+l), col=array.pers.colors[p,1+l,2]) + polygon(c(1+p-1,1.5+p-1,1.5+p-1),c(l,0.5+l,-0.5+l), col=array.pers.colors[p,1+l,3]) + polygon(c(0.5+p-1,1+p-1,1.5+p-1),c(0.5+l,0+l,0.5+l), col=array.pers.colors[p,1+l,4]) + } + } + + par(fig=c(0.875, 1, 0.14, 0.89), new=TRUE) + ColorBar2(brks = my.seq, cols = pers.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_persistence_startdate.png"), width=750, height=600) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("ECMWF-S4 Regime persistence (in days/month)", cex=1.5) + + # NAO+ : + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.85, 0.98), new=TRUE) + mtext("NAO+", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.pers.colors[p,1+l,1])}} + + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.42, 0.5), new=TRUE) + mtext("Blocking", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.pers.colors[p,1+l,4])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.85, 0.98), new=TRUE) + mtext("NAO-", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.pers.colors[p,1+l,3])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.42, 0.5), new=TRUE) + mtext("Atlantic Ridge", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.pers.colors[p,1+l,2])}} + + par(fig=c(0.91, 1, 0.1, 0.9), new=TRUE) + ColorBar2(brks = my.seq, cols = pers.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_persistence_target_month.png"), width=800, height=300) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("ECMWF-S4 Regime persistence (in days/month)", cex=1.2) + + par(mar=c(0,0,4,0), fig=c(0.07, 0.22, 0.8, 0.98), new=TRUE) + mtext("NAO+", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0, 0.22, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.6, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.pers.colors[i2,1+6-j,1])}} + + par(mar=c(0,0,4,0), fig=c(0.22, 0.44, 0.8, 0.98), new=TRUE) + mtext("NAO-", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.22, 0.44, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.6, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.pers.colors[i2,1+6-j,3])}} + + par(mar=c(0,0,4,0), fig=c(0.44, 0.66, 0.8, 0.98), new=TRUE) + mtext("Blocking", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.44, 0.66, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.6, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.pers.colors[i2,1+6-j,4])}} + + par(mar=c(0,0,4,0), fig=c(0.68, 0.88, 0.8, 0.98), new=TRUE) + mtext("Atlantic Ridge", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.66, 0.88, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.6, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.pers.colors[i2,1+6-j,2])}} + + par(fig=c(0.91, 1, 0.1, 0.8), new=TRUE) + ColorBar2(brks = my.seq, cols = pers.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + par(fig=c(0.9, 1, 0.88, 0.93), new=TRUE) + mtext(side = 1, text = "days/month", line = 2, cex=1) + dev.off() + + + + + + + + # Observed persistence summary: + png(filename=paste0(work.dir,"/",forecast.name,"_summary_persistence_obs.png"), width=550, height=400) + + # create an array similar to array.pers but with colors instead of frequencies: + pers.cols <- rev(c('#b2182b','#d6604d','#f4a582','#fddbc7','#d1e5f0','#92c5de','#4393c3','#2166ac')) + my.seq <- c(NA, 3.25, 3.5, 3.75, 4, 4.25, 4.5, 4.75, NA) + + array.pers.colors <- array(pers.cols[8],c(12,7,4)) + array.pers.colors[array.pers.obs < my.seq[8]] <- pers.cols[7] + array.pers.colors[array.pers.obs < my.seq[7]] <- pers.cols[6] + array.pers.colors[array.pers.obs < my.seq[6]] <- pers.cols[5] + array.pers.colors[array.pers.obs < my.seq[5]] <- pers.cols[4] + array.pers.colors[array.pers.obs < my.seq[4]] <- pers.cols[3] + array.pers.colors[array.pers.obs < my.seq[3]] <- pers.cols[2] + array.pers.colors[array.pers.obs < my.seq[2]] <- pers.cols[1] + + par(mar=c(5,4,4,4.5)) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Forecast time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + title("Observed regime persistence (in days/month)", cex=1.5) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + + for(p in 1:12){ + for(l in 0:6){ + polygon(c(0.5+p-1,0.5+p-1,1+p-1),c(-0.5+l,0.5+l,0+l), col=array.pers.colors[p,1+l,1]) # first triangle + polygon(c(0.5+p-1,1+p-1,1.5+p-1),c(-0.5+l,0+l,-0.5+l), col=array.pers.colors[p,1+l,2]) + polygon(c(1+p-1,1.5+p-1,1.5+p-1),c(l,0.5+l,-0.5+l), col=array.pers.colors[p,1+l,3]) + polygon(c(0.5+p-1,1+p-1,1.5+p-1),c(0.5+l,0+l,0.5+l), col=array.pers.colors[p,1+l,4]) + } + } + + par(fig=c(0.875, 1, 0.14, 0.89), new=TRUE) + ColorBar2(brks = my.seq, cols = pers.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_persistence_obs_startdate.png"), width=750, height=600) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("Observed Regime persistence (in days/month)", cex=1.5) + + # NAO+ : + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.85, 0.98), new=TRUE) + mtext("NAO+", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.pers.colors[p,1+l,1])}} + + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.42, 0.5), new=TRUE) + mtext("Blocking", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.pers.colors[p,1+l,4])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.85, 0.98), new=TRUE) + mtext("NAO-", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.pers.colors[p,1+l,3])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.42, 0.5), new=TRUE) + mtext("Atlantic Ridge", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.pers.colors[p,1+l,2])}} + + par(fig=c(0.91, 1, 0.1, 0.9), new=TRUE) + ColorBar2(brks = my.seq, cols = pers.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_persistence_obs_target_month.png"), width=800, height=300) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("Observed regime persistence (in days/month)", cex=1.2) + + par(mar=c(0,0,4,0), fig=c(0.07, 0.22, 0.8, 0.98), new=TRUE) + mtext("NAO+", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0, 0.22, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.6, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.pers.colors[i2,1+6-j,1])}} + + par(mar=c(0,0,4,0), fig=c(0.22, 0.44, 0.8, 0.98), new=TRUE) + mtext("NAO-", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.22, 0.44, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.6, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.pers.colors[i2,1+6-j,3])}} + + par(mar=c(0,0,4,0), fig=c(0.44, 0.66, 0.8, 0.98), new=TRUE) + mtext("Blocking", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.44, 0.66, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.6, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.pers.colors[i2,1+6-j,4])}} + + par(mar=c(0,0,4,0), fig=c(0.68, 0.88, 0.8, 0.98), new=TRUE) + mtext("Atlantic Ridge", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.66, 0.88, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.6, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.pers.colors[i2,1+6-j,2])}} + + par(fig=c(0.91, 1, 0.1, 0.8), new=TRUE) + ColorBar2(brks = my.seq, cols = pers.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + par(fig=c(0.9, 1, 0.88, 0.93), new=TRUE) + mtext(side = 1, text = "days/month", line = 2, cex=1) + dev.off() + + + + + + + + + + + + + # Bias persistence summary: + png(filename=paste0(work.dir,"/",forecast.name,"_summary_bias_persistence.png"), width=550, height=400) + + # create an array similar to array.pers but with colors instead of frequencies: + diff.pers.cols <- rev(c('#b2182b','#d6604d','#f4a582','#fddbc7','#d1e5f0','#92c5de','#4393c3','#2166ac')) + my.seq <- c(NA, -1.5, -1, -0.5, 0, 0.5, 1, 1.5, NA) + + array.diff.pers.colors <- array(diff.pers.cols[8],c(12,7,4)) + array.diff.pers.colors[array.diff.pers < my.seq[8]] <- diff.pers.cols[7] + array.diff.pers.colors[array.diff.pers < my.seq[7]] <- diff.pers.cols[6] + array.diff.pers.colors[array.diff.pers < my.seq[6]] <- diff.pers.cols[5] + array.diff.pers.colors[array.diff.pers < my.seq[5]] <- diff.pers.cols[4] + array.diff.pers.colors[array.diff.pers < my.seq[4]] <- diff.pers.cols[3] + array.diff.pers.colors[array.diff.pers < my.seq[3]] <- diff.pers.cols[2] + array.diff.pers.colors[array.diff.pers < my.seq[2]] <- diff.pers.cols[1] + + par(mar=c(5,4,4,4.5)) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Forecast time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + title("Diff. between S4 and ERA-Interim regime persistence (in days/month)", cex=1.5) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + + for(p in 1:12){ + for(l in 0:6){ + polygon(c(0.5+p-1,0.5+p-1,1+p-1),c(-0.5+l,0.5+l,0+l), col=array.diff.pers.colors[p,1+l,1]) # first triangle + polygon(c(0.5+p-1,1+p-1,1.5+p-1),c(-0.5+l,0+l,-0.5+l), col=array.diff.pers.colors[p,1+l,2]) + polygon(c(1+p-1,1.5+p-1,1.5+p-1),c(l,0.5+l,-0.5+l), col=array.diff.pers.colors[p,1+l,3]) + polygon(c(0.5+p-1,1+p-1,1.5+p-1),c(0.5+l,0+l,0.5+l), col=array.diff.pers.colors[p,1+l,4]) + } + } + + par(fig=c(0.875, 1, 0.14, 0.89), new=TRUE) + ColorBar2(brks = my.seq, cols = diff.pers.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_bias_persistence_startdate.png"), width=750, height=600) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("Diff. between S4 and ERA-Interim regime persistence (in days/month)", cex=1.5) + + # NAO+ : + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.85, 0.98), new=TRUE) + mtext("NAO+", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.diff.pers.colors[p,1+l,1])}} + + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.42, 0.5), new=TRUE) + mtext("Blocking", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.diff.pers.colors[p,1+l,4])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.85, 0.98), new=TRUE) + mtext("NAO-", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.diff.pers.colors[p,1+l,3])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.42, 0.5), new=TRUE) + mtext("Atlantic Ridge", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.diff.pers.colors[p,1+l,2])}} + + par(fig=c(0.91, 1, 0.1, 0.9), new=TRUE) + ColorBar2(brks = my.seq, cols = diff.pers.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_bias_persistence_target_month.png"), width=800, height=300) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("Diff. between S4 and ERA-Interim regime persistence (in days/month)", cex=1.2) + + par(mar=c(0,0,4,0), fig=c(0.07, 0.22, 0.8, 0.98), new=TRUE) + mtext("NAO+", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0, 0.22, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.6, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.diff.pers.colors[i2,1+6-j,1])}} + + par(mar=c(0,0,4,0), fig=c(0.22, 0.44, 0.8, 0.98), new=TRUE) + mtext("NAO-", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.22, 0.44, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.6, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.diff.pers.colors[i2,1+6-j,3])}} + + par(mar=c(0,0,4,0), fig=c(0.44, 0.66, 0.8, 0.98), new=TRUE) + mtext("Blocking", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.44, 0.66, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.6, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.diff.pers.colors[i2,1+6-j,4])}} + + par(mar=c(0,0,4,0), fig=c(0.68, 0.88, 0.8, 0.98), new=TRUE) + mtext("Atlantic Ridge", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.66, 0.88, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.6, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.diff.pers.colors[i2,1+6-j,2])}} + + par(fig=c(0.91, 1, 0.1, 0.8), new=TRUE) + ColorBar2(brks = my.seq, cols = diff.pers.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + par(fig=c(0.9, 1, 0.88, 0.93), new=TRUE) + mtext(side = 1, text = "days/month", line = 2, cex=1) + dev.off() + + + + + + + + + + # St.Dev.freq ratio summary: + png(filename=paste0(work.dir,"/",forecast.name,"_summary_ratio_sd_freq.png"), width=550, height=400) + + # create an array similar to array.cor but with colors instead of corr.values: + diff.sd.freq.cols <- rev(c('#b2182b','#d6604d','#f4a582','#fddbc7','#d1e5f0','#92c5de','#4393c3','#2166ac')) + my.seq <- c(0,0.7,0.8,0.9,1.0,1.1,1.2,1.3,2) + + array.diff.sd.freq.colors <- array(diff.sd.freq.cols[8],c(12,7,4)) + array.diff.sd.freq.colors[array.diff.sd.freq < my.seq[8]] <- diff.sd.freq.cols[7] + array.diff.sd.freq.colors[array.diff.sd.freq < my.seq[7]] <- diff.sd.freq.cols[6] + array.diff.sd.freq.colors[array.diff.sd.freq < my.seq[6]] <- diff.sd.freq.cols[5] + array.diff.sd.freq.colors[array.diff.sd.freq < my.seq[5]] <- diff.sd.freq.cols[4] + array.diff.sd.freq.colors[array.diff.sd.freq < my.seq[4]] <- diff.sd.freq.cols[3] + array.diff.sd.freq.colors[array.diff.sd.freq < my.seq[3]] <- diff.sd.freq.cols[2] + array.diff.sd.freq.colors[array.diff.sd.freq < my.seq[2]] <- diff.sd.freq.cols[1] + + par(mar=c(5,4,4,4.5)) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Forecast time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + title("St.Dev. ratio between sim. and obs. average frequencies", cex=1.5) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + + for(p in 1:12){ + for(l in 0:6){ + polygon(c(0.5+p-1, 0.5+p-1, 1+p-1), c(-0.5+l, 0.5+l, 0+l), col=array.diff.sd.freq.colors[p,1+l,1]) # first triangle + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(-0.5+l, 0+l, -0.5+l), col=array.diff.sd.freq.colors[p,1+l,2]) + polygon(c(1+p-1, 1.5+p-1, 1.5+p-1), c(l, 0.5+l, -0.5+l), col=array.diff.sd.freq.colors[p,1+l,3]) + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(0.5+l, 0+l, 0.5+l), col=array.diff.sd.freq.colors[p,1+l,4]) + } + } + + par(fig=c(0.875, 1, 0.14, 0.89), new=TRUE) + ColorBar2(brks = my.seq, cols = diff.sd.freq.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + dev.off() + + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_ratio_sd_frequency_startdate.png"), width=750, height=600) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("St.Dev. ratio between sim. and obs. average frequencies", cex=1.5) + + # NAO+ : + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.85, 0.98), new=TRUE) + mtext("NAO+", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.diff.sd.freq.colors[p,1+l,1])}} + + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.42, 0.5), new=TRUE) + mtext("Blocking", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.diff.sd.freq.colors[p,1+l,4])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.85, 0.98), new=TRUE) + mtext("NAO-", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.diff.sd.freq.colors[p,1+l,3])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.42, 0.5), new=TRUE) + mtext("Atlantic Ridge", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.diff.sd.freq.colors[p,1+l,2])}} + + par(fig=c(0.91, 1, 0.1, 0.9), new=TRUE) + ColorBar2(brks = my.seq, cols = diff.sd.freq.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_ratio_sd_frequency_target_month.png"), width=800, height=300) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("St.Dev. ratio between sim. and obs. average frequencies", cex=1.2) + + par(mar=c(0,0,4,0), fig=c(0.07, 0.22, 0.8, 0.98), new=TRUE) + mtext("NAO+", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0, 0.22, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.6, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.diff.sd.freq.colors[i2,1+6-j,1])}} + + par(mar=c(0,0,4,0), fig=c(0.22, 0.44, 0.8, 0.98), new=TRUE) + mtext("NAO-", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.22, 0.44, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.6, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.diff.sd.freq.colors[i2,1+6-j,3])}} + + par(mar=c(0,0,4,0), fig=c(0.44, 0.66, 0.8, 0.98), new=TRUE) + mtext("Blocking", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.44, 0.66, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.6, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.diff.sd.freq.colors[i2,1+6-j,4])}} + + par(mar=c(0,0,4,0), fig=c(0.68, 0.88, 0.8, 0.98), new=TRUE) + mtext("Atlantic Ridge", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.66, 0.88, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.6, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.diff.sd.freq.colors[i2,1+6-j,2])}} + + par(fig=c(0.91, 1, 0.1, 0.8), new=TRUE) + ColorBar2(brks = my.seq, cols = diff.sd.freq.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + + + + + + + + + + + + + # Impact summary: + png(filename=paste0(work.dir,"/",forecast.name,"_summary_impact_",var.name[var.num],".png"), width=550, height=400) + + # create an array similar to array.pers but with colors instead of frequencies: + imp.cols <- rev(c('#b2182b','#d6604d','#f4a582','#fddbc7','#d1e5f0','#92c5de','#4393c3','#2166ac')) + my.seq <- c(-1,0,0.4,0.5,0.6,0.7,0.8,0.9,1) + + array.imp.colors <- array(imp.cols[8],c(12,7,4)) + array.imp.colors[array.imp < my.seq[8]] <- imp.cols[7] + array.imp.colors[array.imp < my.seq[7]] <- imp.cols[6] + array.imp.colors[array.imp < my.seq[6]] <- imp.cols[5] + array.imp.colors[array.imp < my.seq[5]] <- imp.cols[4] + array.imp.colors[array.imp < my.seq[4]] <- imp.cols[3] + array.imp.colors[array.imp < my.seq[3]] <- imp.cols[2] + array.imp.colors[array.imp < my.seq[2]] <- imp.cols[1] + + par(mar=c(5,4,4,4.5)) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Forecast time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + title(paste0("S4 spatial correlation of impact on ",var.name[var.num]), cex=1.5) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + + for(p in 1:12){ + for(l in 0:6){ + polygon(c(0.5+p-1,0.5+p-1,1+p-1),c(-0.5+l,0.5+l,0+l), col=array.imp.colors[p,1+l,1]) # first triangle + polygon(c(0.5+p-1,1+p-1,1.5+p-1),c(-0.5+l,0+l,-0.5+l), col=array.imp.colors[p,1+l,2]) + polygon(c(1+p-1,1.5+p-1,1.5+p-1),c(l,0.5+l,-0.5+l), col=array.imp.colors[p,1+l,3]) + polygon(c(0.5+p-1,1+p-1,1.5+p-1),c(0.5+l,0+l,0.5+l), col=array.imp.colors[p,1+l,4]) + } + } + + par(fig=c(0.875, 1, 0.14, 0.89), new=TRUE) + ColorBar2(brks = my.seq, cols = imp.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_impact_",var.name[var.num],"_startdate.png"), width=750, height=600) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("S4 spatial correlation of impact on ",var.name[var.num], cex=1.5) + + # NAO+ : + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.85, 0.98), new=TRUE) + mtext("NAO+", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.imp.colors[p,1+l,1])}} + + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.42, 0.5), new=TRUE) + mtext("Blocking", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.imp.colors[p,1+l,4])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.85, 0.98), new=TRUE) + mtext("NAO-", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.imp.colors[p,1+l,3])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.42, 0.5), new=TRUE) + mtext("Atlantic Ridge", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.imp.colors[p,1+l,2])}} + + par(fig=c(0.91, 1, 0.1, 0.9), new=TRUE) + ColorBar2(brks = my.seq, cols = imp.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_impact_",var.name[var.num],"_target_month.png"), width=800, height=300) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("S4 spatial correlation of impact on ",var.name[var.num], cex=1.2) + + par(mar=c(0,0,4,0), fig=c(0.07, 0.22, 0.8, 0.98), new=TRUE) + mtext("NAO+", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0, 0.22, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.6, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.imp.colors[i2,1+6-j,1])}} + + par(mar=c(0,0,4,0), fig=c(0.22, 0.44, 0.8, 0.98), new=TRUE) + mtext("NAO-", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.22, 0.44, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.6, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.imp.colors[i2,1+6-j,3])}} + + par(mar=c(0,0,4,0), fig=c(0.44, 0.66, 0.8, 0.98), new=TRUE) + mtext("Blocking", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.44, 0.66, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.6, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.imp.colors[i2,1+6-j,4])}} + + par(mar=c(0,0,4,0), fig=c(0.68, 0.88, 0.8, 0.98), new=TRUE) + mtext("Atlantic Ridge", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.66, 0.88, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.6, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.imp.colors[i2,1+6-j,2])}} + + par(fig=c(0.91, 1, 0.1, 0.8), new=TRUE) + ColorBar2(brks = my.seq, cols = imp.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + + + + + + + # Simulated Transition probability from NAO+ to X summary: + png(filename=paste0(work.dir,"/",forecast.name,"_summary_transition_prob_NAO+.png"), width=550, height=400) + + # create an array similar to array.cor but with colors instead of corr.values: + trans.cols <- rev(c('#b2182b','#d6604d','#f4a582','#fddbc7','#d1e5f0','#92c5de','#4393c3','#2166ac')) + my.seq <- c(0,10,20,30,40,50,60,70,100) + + array.trans.sim1.colors <- array(trans.cols[8],c(12,7,4)) + array.trans.sim1.colors[array.trans.sim1 < my.seq[8]] <- trans.cols[7] + array.trans.sim1.colors[array.trans.sim1 < my.seq[7]] <- trans.cols[6] + array.trans.sim1.colors[array.trans.sim1 < my.seq[6]] <- trans.cols[5] + array.trans.sim1.colors[array.trans.sim1 < my.seq[5]] <- trans.cols[4] + array.trans.sim1.colors[array.trans.sim1 < my.seq[4]] <- trans.cols[3] + array.trans.sim1.colors[array.trans.sim1 < my.seq[3]] <- trans.cols[2] + array.trans.sim1.colors[array.trans.sim1 < my.seq[2]] <- trans.cols[1] + array.trans.sim1.colors[is.na(array.trans.sim1)] <- "white" + + par(mar=c(5,4,4,4.5)) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Forecast time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + title("% ECMWF-S4 transition from NAO+ regime", cex=1.5) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + + for(p in 1:12){ + for(l in 0:6){ + polygon(c(0.5+p-1, 0.5+p-1, 1+p-1), c(-0.5+l, 0.5+l, 0+l), col=array.trans.sim1.colors[p,1+l,1]) # first triangle + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(-0.5+l, 0+l, -0.5+l), col=array.trans.sim1.colors[p,1+l,2]) + polygon(c(1+p-1, 1.5+p-1, 1.5+p-1), c(l, 0.5+l, -0.5+l), col=array.trans.sim1.colors[p,1+l,3]) + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(0.5+l, 0+l, 0.5+l), col=array.trans.sim1.colors[p,1+l,4]) + } + } + + par(fig=c(0.875, 1, 0.14, 0.89), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_transition_prob_NAO+_startdate.png"), width=750, height=600) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("% Transition from NAO+ regime", cex=1.5) + mtext("ECMWF-S4 / NAO+ Transition Probability \nJanuary to December / 1994-2013", cex=1.5) + + # NAO+ : + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.85, 0.98), new=TRUE) + mtext("NAO+", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.sim1.colors[p,1+l,1])}} + + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.42, 0.5), new=TRUE) + mtext("Blocking", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.sim1.colors[p,1+l,4])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.85, 0.98), new=TRUE) + mtext("NAO-", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.sim1.colors[p,1+l,3])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.42, 0.5), new=TRUE) + mtext("Atlantic Ridge", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.sim1.colors[p,1+l,2])}} + + par(fig=c(0.91, 1, 0.1, 0.9), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_transition_prob_NAO+_target_month.png"), width=750, height=350) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 0.95), new=TRUE) + mtext("% Transition from NAO+ regime", cex=1.2) + + par(mar=c(0,0,4,0), fig=c(0.10, 0.28, 0.8, 0.95), new=TRUE) + mtext("NAO-", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0, 0.28, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.8, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.sim1.colors[i2,1+6-j,3])}} + + par(mar=c(0,0,4,0), fig=c(0.30, 0.58, 0.8, 0.95), new=TRUE) + mtext("Blocking", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.30, 0.58, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.8, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.sim1.colors[i2,1+6-j,4])}} + + par(mar=c(0,0,4,0), fig=c(0.60, 0.88, 0.8, 0.95), new=TRUE) + mtext("Atlantic Ridge", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.60, 0.88, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.8, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.sim1.colors[i2,1+6-j,2])}} + + par(fig=c(0.91, 1, 0.1, 0.8), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + par(fig=c(0.91, 1, 0.88, 0.93), new=TRUE) + mtext(side = 1, text = "%", line = 2, cex=1) + + dev.off() + + + + + + + # Observed Transition probability from NAO+ to X summary: + png(filename=paste0(work.dir,"/",forecast.name,"_summary_transition_prob_NAO+_obs.png"), width=550, height=400) + + # create an array similar to array.cor but with colors instead of corr.values: + trans.cols <- rev(c('#b2182b','#d6604d','#f4a582','#fddbc7','#d1e5f0','#92c5de','#4393c3','#2166ac')) + my.seq <- c(0,10,20,30,40,50,60,70,100) + + array.trans.obs1.colors <- array(trans.cols[8],c(12,7,4)) + array.trans.obs1.colors[array.trans.obs1 < my.seq[8]] <- trans.cols[7] + array.trans.obs1.colors[array.trans.obs1 < my.seq[7]] <- trans.cols[6] + array.trans.obs1.colors[array.trans.obs1 < my.seq[6]] <- trans.cols[5] + array.trans.obs1.colors[array.trans.obs1 < my.seq[5]] <- trans.cols[4] + array.trans.obs1.colors[array.trans.obs1 < my.seq[4]] <- trans.cols[3] + array.trans.obs1.colors[array.trans.obs1 < my.seq[3]] <- trans.cols[2] + array.trans.obs1.colors[array.trans.obs1 < my.seq[2]] <- trans.cols[1] + array.trans.obs1.colors[is.na(array.trans.obs1)] <- "white" + + par(mar=c(5,4,4,4.5)) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Forecast time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + title("% Observed transition from NAO+ regime", cex=1.5) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + + for(p in 1:12){ + for(l in 0:6){ + polygon(c(0.5+p-1, 0.5+p-1, 1+p-1), c(-0.5+l, 0.5+l, 0+l), col=array.trans.obs1.colors[p,1+l,1]) # first triangle + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(-0.5+l, 0+l, -0.5+l), col=array.trans.obs1.colors[p,1+l,2]) + polygon(c(1+p-1, 1.5+p-1, 1.5+p-1), c(l, 0.5+l, -0.5+l), col=array.trans.obs1.colors[p,1+l,3]) + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(0.5+l, 0+l, 0.5+l), col=array.trans.obs1.colors[p,1+l,4]) + } + } + + par(fig=c(0.875, 1, 0.14, 0.89), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_transition_prob_NAO+_obs_startdate.png"), width=750, height=600) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("% Observed transition from NAO+ regime", cex=1.5) + + # NAO+ : + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.85, 0.98), new=TRUE) + mtext("NAO+", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.obs1.colors[p,1+l,1])}} + + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.42, 0.5), new=TRUE) + mtext("Blocking", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.obs1.colors[p,1+l,4])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.85, 0.98), new=TRUE) + mtext("NAO-", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.obs1.colors[p,1+l,3])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.42, 0.5), new=TRUE) + mtext("Atlantic Ridge", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.obs1.colors[p,1+l,2])}} + + par(fig=c(0.91, 1, 0.1, 0.9), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_transition_prob_NAO+_obs_target_month.png"), width=750, height=350) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 0.95), new=TRUE) + mtext("% Observed transition from NAO+ regime", cex=1.2) + + par(mar=c(0,0,4,0), fig=c(0.10, 0.28, 0.8, 0.95), new=TRUE) + mtext("NAO-", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0, 0.28, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.8, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.obs1.colors[i2,1+6-j,3])}} + + par(mar=c(0,0,4,0), fig=c(0.30, 0.58, 0.8, 0.95), new=TRUE) + mtext("Blocking", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.30, 0.58, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.8, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.obs1.colors[i2,1+6-j,4])}} + + par(mar=c(0,0,4,0), fig=c(0.60, 0.88, 0.8, 0.95), new=TRUE) + mtext("Atlantic Ridge", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.60, 0.88, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.8, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.obs1.colors[i2,1+6-j,2])}} + + par(fig=c(0.91, 1, 0.1, 0.8), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + par(fig=c(0.91, 1, 0.88, 0.93), new=TRUE) + mtext(side = 1, text = "%", line = 2, cex=1) + + dev.off() + + + + + + + + + + + + # Simulated transition probability from NAO- to X summary: + png(filename=paste0(work.dir,"/",forecast.name,"_summary_transition_prob_NAO-.png"), width=550, height=400) + + # create an array similar to array.cor but with colors instead of corr.values: + trans.cols <- rev(c('#b2182b','#d6604d','#f4a582','#fddbc7','#d1e5f0','#92c5de','#4393c3','#2166ac')) + my.seq <- c(0,10,20,30,40,50,60,70,100) + + array.trans.sim2.colors <- array(trans.cols[8],c(12,7,4)) + array.trans.sim2.colors[array.trans.sim2 < my.seq[8]] <- trans.cols[7] + array.trans.sim2.colors[array.trans.sim2 < my.seq[7]] <- trans.cols[6] + array.trans.sim2.colors[array.trans.sim2 < my.seq[6]] <- trans.cols[5] + array.trans.sim2.colors[array.trans.sim2 < my.seq[5]] <- trans.cols[4] + array.trans.sim2.colors[array.trans.sim2 < my.seq[4]] <- trans.cols[3] + array.trans.sim2.colors[array.trans.sim2 < my.seq[3]] <- trans.cols[2] + array.trans.sim2.colors[array.trans.sim2 < my.seq[2]] <- trans.cols[1] + array.trans.sim2.colors[is.na(array.trans.sim2)] <- "white" + + par(mar=c(5,4,4,4.5)) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Forecast time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + title("% ECMWF-S4 transition from NAO- regime ", cex=1.5) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + + for(p in 1:12){ + for(l in 0:6){ + polygon(c(0.5+p-1, 0.5+p-1, 1+p-1), c(-0.5+l, 0.5+l, 0+l), col=array.trans.sim2.colors[p,1+l,1]) # first triangle + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(-0.5+l, 0+l, -0.5+l), col=array.trans.sim2.colors[p,1+l,2]) + polygon(c(1+p-1, 1.5+p-1, 1.5+p-1), c(l, 0.5+l, -0.5+l), col=array.trans.sim2.colors[p,1+l,3]) + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(0.5+l, 0+l, 0.5+l), col=array.trans.sim2.colors[p,1+l,4]) + } + } + + par(fig=c(0.875, 1, 0.14, 0.89), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_transition_prob_NAO-_startdate.png"), width=750, height=600) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("% ECMWF-S4 transition from NAO- regime", cex=1.5) + + # NAO+ : + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.85, 0.98), new=TRUE) + mtext("NAO+", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.sim2.colors[p,1+l,1])}} + + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.42, 0.5), new=TRUE) + mtext("Blocking", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.sim2.colors[p,1+l,4])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.85, 0.98), new=TRUE) + mtext("NAO-", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.sim2.colors[p,1+l,3])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.42, 0.5), new=TRUE) + mtext("Atlantic Ridge", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.sim2.colors[p,1+l,2])}} + + par(fig=c(0.91, 1, 0.1, 0.9), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_transition_prob_NAO-_target_month.png"), width=750, height=350) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 0.95), new=TRUE) + mtext("% ECMWF-S4 transition from NAO- regime", cex=1.2) + + par(mar=c(0,0,4,0), fig=c(0.1, 0.28, 0.8, 0.95), new=TRUE) + mtext("NAO+", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0, 0.28, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.8, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.sim2.colors[i2,1+6-j,1])}} + + par(mar=c(0,0,4,0), fig=c(0.30, 0.58, 0.8, 0.95), new=TRUE) + mtext("Blocking", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.30, 0.58, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.8, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.sim2.colors[i2,1+6-j,4])}} + + par(mar=c(0,0,4,0), fig=c(0.60, 0.88, 0.8, 0.95), new=TRUE) + mtext("Atlantic Ridge", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.60, 0.88, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.8, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.sim2.colors[i2,1+6-j,2])}} + + par(fig=c(0.91, 1, 0.1, 0.8), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + par(fig=c(0.91, 1, 0.88, 0.93), new=TRUE) + mtext(side = 1, text = "%", line = 2, cex=1) + + dev.off() + + + + + + + # Observed transition probability from NAO- to X summary: + png(filename=paste0(work.dir,"/",forecast.name,"_summary_transition_prob_NAO-_obs.png"), width=550, height=400) + + # create an array similar to array.cor but with colors instead of corr.values: + trans.cols <- rev(c('#b2182b','#d6604d','#f4a582','#fddbc7','#d1e5f0','#92c5de','#4393c3','#2166ac')) + my.seq <- c(0,10,20,30,40,50,60,70,100) + + array.trans.obs2.colors <- array(trans.cols[8],c(12,7,4)) + array.trans.obs2.colors[array.trans.obs2 < my.seq[8]] <- trans.cols[7] + array.trans.obs2.colors[array.trans.obs2 < my.seq[7]] <- trans.cols[6] + array.trans.obs2.colors[array.trans.obs2 < my.seq[6]] <- trans.cols[5] + array.trans.obs2.colors[array.trans.obs2 < my.seq[5]] <- trans.cols[4] + array.trans.obs2.colors[array.trans.obs2 < my.seq[4]] <- trans.cols[3] + array.trans.obs2.colors[array.trans.obs2 < my.seq[3]] <- trans.cols[2] + array.trans.obs2.colors[array.trans.obs2 < my.seq[2]] <- trans.cols[1] + array.trans.obs2.colors[is.na(array.trans.obs2)] <- "white" + + par(mar=c(5,4,4,4.5)) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Forecast time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + title("% Observed transition from NAO- regime ", cex=1.5) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + + for(p in 1:12){ + for(l in 0:6){ + polygon(c(0.5+p-1, 0.5+p-1, 1+p-1), c(-0.5+l, 0.5+l, 0+l), col=array.trans.obs2.colors[p,1+l,1]) # first triangle + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(-0.5+l, 0+l, -0.5+l), col=array.trans.obs2.colors[p,1+l,2]) + polygon(c(1+p-1, 1.5+p-1, 1.5+p-1), c(l, 0.5+l, -0.5+l), col=array.trans.obs2.colors[p,1+l,3]) + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(0.5+l, 0+l, 0.5+l), col=array.trans.obs2.colors[p,1+l,4]) + } + } + + par(fig=c(0.875, 1, 0.14, 0.89), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_transition_prob_NAO-_obs_startdate.png"), width=750, height=600) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("% Observed transition from NAO- regime", cex=1.5) + + # NAO+ : + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.85, 0.98), new=TRUE) + mtext("NAO+", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.obs2.colors[p,1+l,1])}} + + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.42, 0.5), new=TRUE) + mtext("Blocking", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.obs2.colors[p,1+l,4])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.85, 0.98), new=TRUE) + mtext("NAO-", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.obs2.colors[p,1+l,3])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.42, 0.5), new=TRUE) + mtext("Atlantic Ridge", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.obs2.colors[p,1+l,2])}} + + par(fig=c(0.91, 1, 0.1, 0.9), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_transition_prob_NAO-_obs_target_month.png"), width=750, height=350) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 0.95), new=TRUE) + mtext("% Observed transition from NAO- regime", cex=1.2) + + par(mar=c(0,0,4,0), fig=c(0.07, 0.28, 0.8, 0.95), new=TRUE) + mtext("NAO+", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0, 0.28, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.8, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.obs2.colors[i2,1+6-j,1])}} + + par(mar=c(0,0,4,0), fig=c(0.30, 0.58, 0.8, 0.95), new=TRUE) + mtext("Blocking", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.30, 0.58, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.8, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.obs2.colors[i2,1+6-j,4])}} + + par(mar=c(0,0,4,0), fig=c(0.58, 0.88, 0.8, 0.95), new=TRUE) + mtext("Atlantic Ridge", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.58, 0.88, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.8, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.obs2.colors[i2,1+6-j,2])}} + + par(fig=c(0.91, 1, 0.1, 0.8), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + par(fig=c(0.91, 1, 0.88, 0.93), new=TRUE) + mtext(side = 1, text = "%", line = 2, cex=1) + + dev.off() + + + + + + + + + + + # Simulated transition probability from blocking to X summary: + png(filename=paste0(work.dir,"/",forecast.name,"_summary_transition_prob_blocking.png"), width=550, height=400) + + # create an array similar to array.cor but with colors instead of corr.values: + trans.cols <- rev(c('#b2182b','#d6604d','#f4a582','#fddbc7','#d1e5f0','#92c5de','#4393c3','#2166ac')) + my.seq <- c(0,10,20,30,40,50,60,70,100) + + array.trans.sim3.colors <- array(trans.cols[8],c(12,7,4)) + array.trans.sim3.colors[array.trans.sim3 < my.seq[8]] <- trans.cols[7] + array.trans.sim3.colors[array.trans.sim3 < my.seq[7]] <- trans.cols[6] + array.trans.sim3.colors[array.trans.sim3 < my.seq[6]] <- trans.cols[5] + array.trans.sim3.colors[array.trans.sim3 < my.seq[5]] <- trans.cols[4] + array.trans.sim3.colors[array.trans.sim3 < my.seq[4]] <- trans.cols[3] + array.trans.sim3.colors[array.trans.sim3 < my.seq[3]] <- trans.cols[2] + array.trans.sim3.colors[array.trans.sim3 < my.seq[2]] <- trans.cols[1] + array.trans.sim3.colors[is.na(array.trans.sim3)] <- "white" + + par(mar=c(5,4,4,4.5)) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Forecast time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + title("% ECMWF-S4 transition from blocking regime", cex=1.5) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + + for(p in 1:12){ + for(l in 0:6){ + polygon(c(0.5+p-1, 0.5+p-1, 1+p-1), c(-0.5+l, 0.5+l, 0+l), col=array.trans.sim3.colors[p,1+l,1]) # first triangle + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(-0.5+l, 0+l, -0.5+l), col=array.trans.sim3.colors[p,1+l,2]) + polygon(c(1+p-1, 1.5+p-1, 1.5+p-1), c(l, 0.5+l, -0.5+l), col=array.trans.sim3.colors[p,1+l,3]) + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(0.5+l, 0+l, 0.5+l), col=array.trans.sim3.colors[p,1+l,4]) + } + } + + par(fig=c(0.875, 1, 0.14, 0.89), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_transition_prob_blocking_startdate.png"), width=750, height=600) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("% ECMWF-S4 transition from blocking regime", cex=1.5) + + # NAO+ : + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.85, 0.98), new=TRUE) + mtext("NAO+", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.sim3.colors[p,1+l,1])}} + + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.42, 0.5), new=TRUE) + mtext("Blocking", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.sim3.colors[p,1+l,4])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.85, 0.98), new=TRUE) + mtext("NAO-", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.sim3.colors[p,1+l,3])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.42, 0.5), new=TRUE) + mtext("Atlantic Ridge", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.sim3.colors[p,1+l,2])}} + + par(fig=c(0.91, 1, 0.1, 0.9), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_transition_prob_blocking_target_month.png"), width=750, height=350) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 0.95), new=TRUE) + mtext("% ECMWF-S4 transition from blocking regime", cex=1.2) + + par(mar=c(0,0,4,0), fig=c(0.10, 0.28, 0.8, 0.95), new=TRUE) + mtext("NAO+", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0, 0.28, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.8, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.sim3.colors[i2,1+6-j,1])}} + + par(mar=c(0,0,4,0), fig=c(0.30, 0.58, 0.8, 0.95), new=TRUE) + mtext("NAO-", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.30, 0.58, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.8, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.sim3.colors[i2,1+6-j,3])}} + + par(mar=c(0,0,4,0), fig=c(0.60, 0.88, 0.8, 0.95), new=TRUE) + mtext("Atlantic Ridge", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.60, 0.88, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.8, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.sim3.colors[i2,1+6-j,2])}} + + par(fig=c(0.91, 1, 0.1, 0.8), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + par(fig=c(0.91, 1, 0.88, 0.93), new=TRUE) + mtext(side = 1, text = "%", line = 2, cex=1) + + dev.off() + + + + + + + + + # Observed transition probability from blocking to X summary: + png(filename=paste0(work.dir,"/",forecast.name,"_summary_transition_prob_blocking_obs.png"), width=550, height=400) + + # create an array similar to array.cor but with colors instead of corr.values: + trans.cols <- rev(c('#b2182b','#d6604d','#f4a582','#fddbc7','#d1e5f0','#92c5de','#4393c3','#2166ac')) + my.seq <- c(0,10,20,30,40,50,60,70,100) + + array.trans.obs3.colors <- array(trans.cols[8],c(12,7,4)) + array.trans.obs3.colors[array.trans.obs3 < my.seq[8]] <- trans.cols[7] + array.trans.obs3.colors[array.trans.obs3 < my.seq[7]] <- trans.cols[6] + array.trans.obs3.colors[array.trans.obs3 < my.seq[6]] <- trans.cols[5] + array.trans.obs3.colors[array.trans.obs3 < my.seq[5]] <- trans.cols[4] + array.trans.obs3.colors[array.trans.obs3 < my.seq[4]] <- trans.cols[3] + array.trans.obs3.colors[array.trans.obs3 < my.seq[3]] <- trans.cols[2] + array.trans.obs3.colors[array.trans.obs3 < my.seq[2]] <- trans.cols[1] + array.trans.obs3.colors[is.na(array.trans.obs3)] <- "white" + + par(mar=c(5,4,4,4.5)) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Forecast time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + title("% Observed transition from blocking regime", cex=1.5) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + + for(p in 1:12){ + for(l in 0:6){ + polygon(c(0.5+p-1, 0.5+p-1, 1+p-1), c(-0.5+l, 0.5+l, 0+l), col=array.trans.obs3.colors[p,1+l,1]) # first triangle + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(-0.5+l, 0+l, -0.5+l), col=array.trans.obs3.colors[p,1+l,2]) + polygon(c(1+p-1, 1.5+p-1, 1.5+p-1), c(l, 0.5+l, -0.5+l), col=array.trans.obs3.colors[p,1+l,3]) + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(0.5+l, 0+l, 0.5+l), col=array.trans.obs3.colors[p,1+l,4]) + } + } + + par(fig=c(0.875, 1, 0.14, 0.89), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_transition_prob_blocking_obs_startdate.png"), width=750, height=600) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("% Observed transition from blocking regime", cex=1.5) + + # NAO+ : + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.85, 0.98), new=TRUE) + mtext("NAO+", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.obs3.colors[p,1+l,1])}} + + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.42, 0.5), new=TRUE) + mtext("Blocking", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.obs3.colors[p,1+l,4])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.85, 0.98), new=TRUE) + mtext("NAO-", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.obs3.colors[p,1+l,3])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.42, 0.5), new=TRUE) + mtext("Atlantic Ridge", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.obs3.colors[p,1+l,2])}} + + par(fig=c(0.91, 1, 0.1, 0.9), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_transition_prob_blocking_obs_target_month.png"), width=750, height=350) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 0.95), new=TRUE) + mtext("% Observed transition from blocking regime", cex=1.2) + + par(mar=c(0,0,4,0), fig=c(0.10, 0.28, 0.8, 0.95), new=TRUE) + mtext("NAO+", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0, 0.28, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.8, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.obs3.colors[i2,1+6-j,1])}} + + par(mar=c(0,0,4,0), fig=c(0.30, 0.58, 0.8, 0.95), new=TRUE) + mtext("NAO-", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.30, 0.58, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.8, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.obs3.colors[i2,1+6-j,3])}} + + par(mar=c(0,0,4,0), fig=c(0.60, 0.88, 0.8, 0.95), new=TRUE) + mtext("Atlantic Ridge", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.60, 0.88, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.8, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.obs3.colors[i2,1+6-j,2])}} + + par(fig=c(0.91, 1, 0.1, 0.8), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + par(fig=c(0.91, 1, 0.88, 0.93), new=TRUE) + mtext(side = 1, text = "%", line = 2, cex=1) + + dev.off() + + + + + + + + + + + + + + + + + + # Simulated transition probability from Atlantic ridge to X summary: + png(filename=paste0(work.dir,"/",forecast.name,"_summary_transition_prob_Atlantic_ridge.png"), width=550, height=400) + + # create an array similar to array.cor but with colors instead of corr.values: + trans.cols <- rev(c('#b2182b','#d6604d','#f4a582','#fddbc7','#d1e5f0','#92c5de','#4393c3','#2166ac')) + my.seq <- c(0,10,20,30,40,50,60,70,100) + + array.trans.sim4.colors <- array(trans.cols[8],c(12,7,4)) + array.trans.sim4.colors[array.trans.sim4 < my.seq[8]] <- trans.cols[7] + array.trans.sim4.colors[array.trans.sim4 < my.seq[7]] <- trans.cols[6] + array.trans.sim4.colors[array.trans.sim4 < my.seq[6]] <- trans.cols[5] + array.trans.sim4.colors[array.trans.sim4 < my.seq[5]] <- trans.cols[4] + array.trans.sim4.colors[array.trans.sim4 < my.seq[4]] <- trans.cols[3] + array.trans.sim4.colors[array.trans.sim4 < my.seq[3]] <- trans.cols[2] + array.trans.sim4.colors[array.trans.sim4 < my.seq[2]] <- trans.cols[1] + array.trans.sim4.colors[is.na(array.trans.sim4)] <- "white" + + par(mar=c(5,4,4,4.5)) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Forecast time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + title("% ECMWF-S4 transition from Atlantic ridge regime ", cex=1.5) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + + for(p in 1:12){ + for(l in 0:6){ + polygon(c(0.5+p-1, 0.5+p-1, 1+p-1), c(-0.5+l, 0.5+l, 0+l), col=array.trans.sim4.colors[p,1+l,1]) # first triangle + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(-0.5+l, 0+l, -0.5+l), col=array.trans.sim4.colors[p,1+l,2]) + polygon(c(1+p-1, 1.5+p-1, 1.5+p-1), c(l, 0.5+l, -0.5+l), col=array.trans.sim4.colors[p,1+l,3]) + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(0.5+l, 0+l, 0.5+l), col=array.trans.sim4.colors[p,1+l,4]) + } + } + + par(fig=c(0.875, 1, 0.14, 0.89), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_transition_prob_Atlantic_ridge_startdate.png"), width=750, height=600) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("% ECMWF-S4 transition from Atlantic Ridge regime", cex=1.5) + + # NAO+ : + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.85, 0.98), new=TRUE) + mtext("NAO+", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.sim4.colors[p,1+l,1])}} + + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.42, 0.5), new=TRUE) + mtext("Blocking", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.sim4.colors[p,1+l,4])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.85, 0.98), new=TRUE) + mtext("NAO-", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.sim4.colors[p,1+l,3])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.42, 0.5), new=TRUE) + mtext("Atlantic Ridge", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.sim4.colors[p,1+l,2])}} + + par(fig=c(0.91, 1, 0.1, 0.9), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_transition_prob_Atlantic_ridge_target_month.png"), width=750, height=350) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 0.95), new=TRUE) + mtext("% ECMWF-S4 transition from Atlantic Ridge regime", cex=1.2) + + par(mar=c(0,0,4,0), fig=c(0.1, 0.28, 0.8, 0.95), new=TRUE) + mtext("NAO+", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0, 0.28, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.8, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.sim4.colors[i2,1+6-j,1])}} + + par(mar=c(0,0,4,0), fig=c(0.30, 0.58, 0.8, 0.95), new=TRUE) + mtext("NAO-", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.30, 0.58, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.8, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.sim4.colors[i2,1+6-j,3])}} + + par(mar=c(0,0,4,0), fig=c(0.60, 0.88, 0.8, 0.95), new=TRUE) + mtext("Blocking", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.60, 0.88, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.8, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.sim4.colors[i2,1+6-j,4])}} + + par(fig=c(0.91, 1, 0.1, 0.8), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + par(fig=c(0.91, 1, 0.88, 0.93), new=TRUE) + mtext(side = 1, text = "%", line = 2, cex=1) + + dev.off() + + + + + + + + + + # Observed transition probability from Atlantic ridge to X summary: + png(filename=paste0(work.dir,"/",forecast.name,"_summary_transition_prob_Atlantic_ridge_obs.png"), width=550, height=400) + + # create an array obsilar to array.cor but with colors instead of corr.values: + trans.cols <- rev(c('#b2182b','#d6604d','#f4a582','#fddbc7','#d1e5f0','#92c5de','#4393c3','#2166ac')) + my.seq <- c(0,10,20,30,40,50,60,70,100) + + array.trans.obs4.colors <- array(trans.cols[8],c(12,7,4)) + array.trans.obs4.colors[array.trans.obs4 < my.seq[8]] <- trans.cols[7] + array.trans.obs4.colors[array.trans.obs4 < my.seq[7]] <- trans.cols[6] + array.trans.obs4.colors[array.trans.obs4 < my.seq[6]] <- trans.cols[5] + array.trans.obs4.colors[array.trans.obs4 < my.seq[5]] <- trans.cols[4] + array.trans.obs4.colors[array.trans.obs4 < my.seq[4]] <- trans.cols[3] + array.trans.obs4.colors[array.trans.obs4 < my.seq[3]] <- trans.cols[2] + array.trans.obs4.colors[array.trans.obs4 < my.seq[2]] <- trans.cols[1] + array.trans.obs4.colors[is.na(array.trans.obs4)] <- "white" + + par(mar=c(5,4,4,4.5)) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Forecast time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + title("% Observed transition from Atlantic ridge regime ", cex=1.5) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + + for(p in 1:12){ + for(l in 0:6){ + polygon(c(0.5+p-1, 0.5+p-1, 1+p-1), c(-0.5+l, 0.5+l, 0+l), col=array.trans.obs4.colors[p,1+l,1]) # first triangle + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(-0.5+l, 0+l, -0.5+l), col=array.trans.obs4.colors[p,1+l,2]) + polygon(c(1+p-1, 1.5+p-1, 1.5+p-1), c(l, 0.5+l, -0.5+l), col=array.trans.obs4.colors[p,1+l,3]) + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(0.5+l, 0+l, 0.5+l), col=array.trans.obs4.colors[p,1+l,4]) + } + } + + par(fig=c(0.875, 1, 0.14, 0.89), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_transition_prob_Atlantic_ridge_obs_startdate.png"), width=750, height=600) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("% Observed transition from Atlantic Ridge regime", cex=1.5) + + # NAO+ : + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.85, 0.98), new=TRUE) + mtext("NAO+", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.obs4.colors[p,1+l,1])}} + + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.42, 0.5), new=TRUE) + mtext("Blocking", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.obs4.colors[p,1+l,4])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.85, 0.98), new=TRUE) + mtext("NAO-", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.obs4.colors[p,1+l,3])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.42, 0.5), new=TRUE) + mtext("Atlantic Ridge", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.obs4.colors[p,1+l,2])}} + + par(fig=c(0.91, 1, 0.1, 0.9), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_transition_prob_Atlantic_ridge_obs_target_month.png"), width=750, height=350) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 0.95), new=TRUE) + mtext("% Observed transition from Atlantic Ridge regime", cex=1.2) + + par(mar=c(0,0,4,0), fig=c(0.1, 0.28, 0.8, 0.95), new=TRUE) + mtext("NAO+", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0, 0.28, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.8, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.obs4.colors[i2,1+6-j,1])}} + + par(mar=c(0,0,4,0), fig=c(0.30, 0.58, 0.8, 0.95), new=TRUE) + mtext("NAO-", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.30, 0.58, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.8, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.obs4.colors[i2,1+6-j,3])}} + + par(mar=c(0,0,4,0), fig=c(0.60, 0.88, 0.8, 0.95), new=TRUE) + mtext("Blocking", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.60, 0.88, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.8, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.obs4.colors[i2,1+6-j,4])}} + + par(fig=c(0.91, 1, 0.1, 0.8), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + par(fig=c(0.91, 1, 0.88, 0.93), new=TRUE) + mtext(side = 1, text = "%", line = 2, cex=1) + + dev.off() + + + + + + + + + + + + + + + # Bias of the transition probability from NAO+ to X summary: + png(filename=paste0(work.dir,"/",forecast.name,"_summary_bias_transition_prob_NAO+.png"), width=550, height=400) + + # create an array similar to array.cor but with colors instead of corr.values: + trans.cols <- rev(c('#b2182b','#d6604d','#f4a582','#fddbc7','#d1e5f0','#92c5de','#4393c3','#2166ac')) + my.seq <- c(-20,-10,-5,-2,0,2,5,10,20) + + array.trans.diff1.colors <- array(trans.cols[8],c(12,7,4)) + array.trans.diff1.colors[array.trans.diff1 < my.seq[8]] <- trans.cols[7] + array.trans.diff1.colors[array.trans.diff1 < my.seq[7]] <- trans.cols[6] + array.trans.diff1.colors[array.trans.diff1 < my.seq[6]] <- trans.cols[5] + array.trans.diff1.colors[array.trans.diff1 < my.seq[5]] <- trans.cols[4] + array.trans.diff1.colors[array.trans.diff1 < my.seq[4]] <- trans.cols[3] + array.trans.diff1.colors[array.trans.diff1 < my.seq[3]] <- trans.cols[2] + array.trans.diff1.colors[array.trans.diff1 < my.seq[2]] <- trans.cols[1] + array.trans.diff1.colors[is.na(array.trans.diff1)] <- "white" + + par(mar=c(5,4,4,4.5)) + plot(1,1, type="n", xaxt="n", yaxt="n", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + title("% Bias transition from NAO+ regime", cex=1.5) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + + for(p in 1:12){ + for(l in 0:6){ + polygon(c(0.5+p-1, 0.5+p-1, 1+p-1), c(-0.5+l, 0.5+l, 0+l), col=array.trans.diff1.colors[p,1+l,1]) # first triangle + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(-0.5+l, 0+l, -0.5+l), col=array.trans.diff1.colors[p,1+l,2]) + polygon(c(1+p-1, 1.5+p-1, 1.5+p-1), c(l, 0.5+l, -0.5+l), col=array.trans.diff1.colors[p,1+l,3]) + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(0.5+l, 0+l, 0.5+l), col=array.trans.diff1.colors[p,1+l,4]) + } + } + + par(fig=c(0.875, 1, 0.14, 0.89), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_bias_transition_prob_NAO+_startdate.png"), width=750, height=600) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("% Bias transition from NAO+ regime", cex=1.5) + + # NAO+ : + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.85, 0.98), new=TRUE) + mtext("NAO+", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.diff1.colors[p,1+l,1])}} + + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.42, 0.5), new=TRUE) + mtext("Blocking", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.diff1.colors[p,1+l,4])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.85, 0.98), new=TRUE) + mtext("NAO-", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.diff1.colors[p,1+l,3])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.42, 0.5), new=TRUE) + mtext("Atlantic Ridge", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.diff1.colors[p,1+l,2])}} + + par(fig=c(0.91, 1, 0.1, 0.9), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_bias_transition_prob_NAO+_target_month.png"), width=750, height=350) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 0.95), new=TRUE) + mtext("% Bias transition from NAO+ regime", cex=1.2) + + par(mar=c(0,0,4,0), fig=c(0.07, 0.28, 0.8, 0.95), new=TRUE) + mtext("NAO-", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0, 0.28, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.8, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.diff1.colors[i2,1+6-j,3])}} + + par(mar=c(0,0,4,0), fig=c(0.30, 0.58, 0.8, 0.95), new=TRUE) + mtext("Blocking", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.30, 0.58, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.8, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.diff1.colors[i2,1+6-j,4])}} + + par(mar=c(0,0,4,0), fig=c(0.60, 0.88, 0.8, 0.95), new=TRUE) + mtext("Atlantic Ridge", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.60, 0.88, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.8, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.diff1.colors[i2,1+6-j,2])}} + + par(fig=c(0.91, 1, 0.1, 0.8), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + par(fig=c(0.91, 1, 0.88, 0.93), new=TRUE) + mtext(side = 1, text = "%", line = 2, cex=1) + + dev.off() + + + + + + + + + + + + + + + + # Bias of the Transition probability from NAO- to X summary: + png(filename=paste0(work.dir,"/",forecast.name,"_summary_bias_transition_prob_NAO-.png"), width=550, height=400) + + # create an array similar to array.cor but with colors instead of corr.values: + trans.cols <- rev(c('#b2182b','#d6604d','#f4a582','#fddbc7','#d1e5f0','#92c5de','#4393c3','#2166ac')) + my.seq <- c(-20,-10,-5,-2,0,2,5,10,20) + + array.trans.diff2.colors <- array(trans.cols[8],c(12,7,4)) + array.trans.diff2.colors[array.trans.diff2 < my.seq[8]] <- trans.cols[7] + array.trans.diff2.colors[array.trans.diff2 < my.seq[7]] <- trans.cols[6] + array.trans.diff2.colors[array.trans.diff2 < my.seq[6]] <- trans.cols[5] + array.trans.diff2.colors[array.trans.diff2 < my.seq[5]] <- trans.cols[4] + array.trans.diff2.colors[array.trans.diff2 < my.seq[4]] <- trans.cols[3] + array.trans.diff2.colors[array.trans.diff2 < my.seq[3]] <- trans.cols[2] + array.trans.diff2.colors[array.trans.diff2 < my.seq[2]] <- trans.cols[1] + array.trans.diff2.colors[is.na(array.trans.diff2)] <- "white" + + par(mar=c(5,4,4,4.5)) + plot(1,1, type="n", xaxt="n", yaxt="n", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + title("% Bias transition from NAO- regime", cex=1.5) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + + for(p in 1:12){ + for(l in 0:6){ + polygon(c(0.5+p-1, 0.5+p-1, 1+p-1), c(-0.5+l, 0.5+l, 0+l), col=array.trans.diff2.colors[p,1+l,1]) # first triangle + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(-0.5+l, 0+l, -0.5+l), col=array.trans.diff2.colors[p,1+l,2]) + polygon(c(1+p-1, 1.5+p-1, 1.5+p-1), c(l, 0.5+l, -0.5+l), col=array.trans.diff2.colors[p,1+l,3]) + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(0.5+l, 0+l, 0.5+l), col=array.trans.diff2.colors[p,1+l,4]) + } + } + + par(fig=c(0.875, 1, 0.14, 0.89), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_bias_transition_prob_NAO-_startdate.png"), width=750, height=600) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 0.95), new=TRUE) + mtext("% Bias transition from NAO- regime", cex=1.5) + + # NAO+ : + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.85, 0.98), new=TRUE) + mtext("NAO+", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.diff2.colors[p,1+l,1])}} + + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.42, 0.5), new=TRUE) + mtext("Blocking", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.diff2.colors[p,1+l,4])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.85, 0.98), new=TRUE) + mtext("NAO-", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.diff2.colors[p,1+l,3])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.42, 0.5), new=TRUE) + mtext("Atlantic Ridge", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.diff2.colors[p,1+l,2])}} + + par(fig=c(0.91, 1, 0.1, 0.9), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_bias_transition_prob_NAO-_target_month.png"), width=750, height=350) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 0.95), new=TRUE) + mtext("% Bias transition from NAO- regime", cex=1.2) + + par(mar=c(0,0,4,0), fig=c(0.07, 0.28, 0.8, 0.95), new=TRUE) + mtext("NAO+", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0, 0.28, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.8, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.diff2.colors[i2,1+6-j,1])}} + + par(mar=c(0,0,4,0), fig=c(0.30, 0.58, 0.8, 0.95), new=TRUE) + mtext("Blocking", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.30, 0.58, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.8, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.diff2.colors[i2,1+6-j,4])}} + + par(mar=c(0,0,4,0), fig=c(0.60, 0.88, 0.8, 0.95), new=TRUE) + mtext("Atlantic Ridge", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.60, 0.88, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.8, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.diff2.colors[i2,1+6-j,2])}} + + par(fig=c(0.91, 1, 0.1, 0.8), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + par(fig=c(0.91, 1, 0.88, 0.93), new=TRUE) + mtext(side = 1, text = "%", line = 2, cex=1) + + dev.off() + + + + + + + + + + + + # Bias of the Transition probability from blocking to X summary: + png(filename=paste0(work.dir,"/",forecast.name,"_summary_bias_transition_prob_blocking.png"), width=550, height=400) + + # create an array similar to array.cor but with colors instead of corr.values: + trans.cols <- rev(c('#b2182b','#d6604d','#f4a582','#fddbc7','#d1e5f0','#92c5de','#4393c3','#2166ac')) + my.seq <- c(-20,-10,-5,-2,0,2,5,10,20) + + array.trans.diff3.colors <- array(trans.cols[8],c(12,7,4)) + array.trans.diff3.colors[array.trans.diff3 < my.seq[8]] <- trans.cols[7] + array.trans.diff3.colors[array.trans.diff3 < my.seq[7]] <- trans.cols[6] + array.trans.diff3.colors[array.trans.diff3 < my.seq[6]] <- trans.cols[5] + array.trans.diff3.colors[array.trans.diff3 < my.seq[5]] <- trans.cols[4] + array.trans.diff3.colors[array.trans.diff3 < my.seq[4]] <- trans.cols[3] + array.trans.diff3.colors[array.trans.diff3 < my.seq[3]] <- trans.cols[2] + array.trans.diff3.colors[array.trans.diff3 < my.seq[2]] <- trans.cols[1] + array.trans.diff3.colors[is.na(array.trans.diff3)] <- "white" + + par(mar=c(5,4,4,4.5)) + plot(1,1, type="n", xaxt="n", yaxt="n", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + title("% Bias transition from blocking regime", cex=1.5) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + + for(p in 1:12){ + for(l in 0:6){ + polygon(c(0.5+p-1, 0.5+p-1, 1+p-1), c(-0.5+l, 0.5+l, 0+l), col=array.trans.diff3.colors[p,1+l,1]) # first triangle + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(-0.5+l, 0+l, -0.5+l), col=array.trans.diff3.colors[p,1+l,2]) + polygon(c(1+p-1, 1.5+p-1, 1.5+p-1), c(l, 0.5+l, -0.5+l), col=array.trans.diff3.colors[p,1+l,3]) + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(0.5+l, 0+l, 0.5+l), col=array.trans.diff3.colors[p,1+l,4]) + } + } + + par(fig=c(0.875, 1, 0.14, 0.89), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_bias_transition_prob_blocking_startdate.png"), width=750, height=600) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("% Bias transition from blocking regime", cex=1.5) + + # NAO+ : + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.85, 0.98), new=TRUE) + mtext("NAO+", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.diff3.colors[p,1+l,1])}} + + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.42, 0.5), new=TRUE) + mtext("Blocking", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.diff3.colors[p,1+l,4])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.85, 0.98), new=TRUE) + mtext("NAO-", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.diff3.colors[p,1+l,3])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.42, 0.5), new=TRUE) + mtext("Atlantic Ridge", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.diff3.colors[p,1+l,2])}} + + par(fig=c(0.91, 1, 0.1, 0.9), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_bias_transition_prob_blocking_target_month.png"), width=750, height=350) + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 0.95), new=TRUE) + mtext("% Bias transition from Blocking regime", cex=1.2) + + par(mar=c(0,0,4,0), fig=c(0.07, 0.28, 0.8, 0.95), new=TRUE) + mtext("NAO+", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0, 0.28, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.8, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.diff3.colors[i2,1+6-j,1])}} + + par(mar=c(0,0,4,0), fig=c(0.30, 0.58, 0.8, 0.95), new=TRUE) + mtext("NAO-", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.30, 0.58, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.8, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.diff3.colors[i2,1+6-j,3])}} + + par(mar=c(0,0,4,0), fig=c(0.60, 0.88, 0.8, 0.95), new=TRUE) + mtext("Atlantic Ridge", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.60, 0.88, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.8, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.diff3.colors[i2,1+6-j,2])}} + + par(fig=c(0.91, 1, 0.1, 0.8), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + par(fig=c(0.91, 1, 0.88, 0.93), new=TRUE) + mtext(side = 1, text = "%", line = 2, cex=1) + + dev.off() + + + + + + + + + + + # Bias of the Transition probability from Atlantic Ridge to X summary: + png(filename=paste0(work.dir,"/",forecast.name,"_summary_bias_transition_prob_Atlantic_ridge.png"), width=550, height=400) + + # create an array similar to array.cor but with colors instead of corr.values: + trans.cols <- rev(c('#b2182b','#d6604d','#f4a582','#fddbc7','#d1e5f0','#92c5de','#4393c3','#2166ac')) + my.seq <- c(-20,-10,-5,-2,0,2,5,10,20) + + array.trans.diff4.colors <- array(trans.cols[8],c(12,7,4)) + array.trans.diff4.colors[array.trans.diff4 < my.seq[8]] <- trans.cols[7] + array.trans.diff4.colors[array.trans.diff4 < my.seq[7]] <- trans.cols[6] + array.trans.diff4.colors[array.trans.diff4 < my.seq[6]] <- trans.cols[5] + array.trans.diff4.colors[array.trans.diff4 < my.seq[5]] <- trans.cols[4] + array.trans.diff4.colors[array.trans.diff4 < my.seq[4]] <- trans.cols[3] + array.trans.diff4.colors[array.trans.diff4 < my.seq[3]] <- trans.cols[2] + array.trans.diff4.colors[array.trans.diff4 < my.seq[2]] <- trans.cols[1] + array.trans.diff4.colors[is.na(array.trans.diff4)] <- "white" + + par(mar=c(5,4,4,4.5)) + plot(1,1, type="n", xaxt="n", yaxt="n", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + title("% Bias transition from Atlantic Ridge regime", cex=1.5) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + + for(p in 1:12){ + for(l in 0:6){ + polygon(c(0.5+p-1, 0.5+p-1, 1+p-1), c(-0.5+l, 0.5+l, 0+l), col=array.trans.diff4.colors[p,1+l,1]) # first triangle + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(-0.5+l, 0+l, -0.5+l), col=array.trans.diff4.colors[p,1+l,2]) + polygon(c(1+p-1, 1.5+p-1, 1.5+p-1), c(l, 0.5+l, -0.5+l), col=array.trans.diff4.colors[p,1+l,3]) + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(0.5+l, 0+l, 0.5+l), col=array.trans.diff4.colors[p,1+l,4]) + } + } + + par(fig=c(0.875, 1, 0.14, 0.89), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_bias_transition_prob_Atlantic_ridge_startdate.png"), width=750, height=600) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("% Bias transition from Atlantic_Ridge_regime", cex=1.5) + + # NAO+ : + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.85, 0.98), new=TRUE) + mtext("NAO+", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.diff4.colors[p,1+l,1])}} + + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.42, 0.5), new=TRUE) + mtext("Blocking", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.diff4.colors[p,1+l,4])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.85, 0.98), new=TRUE) + mtext("NAO-", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.diff4.colors[p,1+l,3])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.42, 0.5), new=TRUE) + mtext("Atlantic Ridge", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.diff4.colors[p,1+l,2])}} + + par(fig=c(0.91, 1, 0.1, 0.9), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_bias_transition_prob_Atlantic_ridge_target_month.png"), width=750, height=350) + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 0.95), new=TRUE) + mtext("% Bias transition from Atlantic Ridge regime", cex=1.2) + + par(mar=c(0,0,4,0), fig=c(0.07, 0.28, 0.8, 0.95), new=TRUE) + mtext("NAO+", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0, 0.28, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.8, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.diff4.colors[i2,1+6-j,1])}} + + par(mar=c(0,0,4,0), fig=c(0.30, 0.58, 0.8, 0.95), new=TRUE) + mtext("NAO-", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.30, 0.58, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.8, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.diff4.colors[i2,1+6-j,3])}} + + par(mar=c(0,0,4,0), fig=c(0.60, 0.88, 0.8, 0.95), new=TRUE) + mtext("Blocking", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.60, 0.88, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.8, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.diff4.colors[i2,1+6-j,4])}} + + par(fig=c(0.91, 1, 0.1, 0.8), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + par(fig=c(0.91, 1, 0.88, 0.93), new=TRUE) + mtext(side = 1, text = "%", line = 2, cex=1) + + dev.off() + + ## # FairRPSS summary: + ## png(filename=paste0(work.dir,"/",forecast.name,"_summary_rpss.png"), width=550, height=400) + + ## # create an array similar to array.diff.freq but with colors instead of frequencies: + ## freq.cols <- rev(c('#b2182b','#d6604d','#f4a582','#fddbc7','#d1e5f0','#92c5de','#4393c3','#2166ac')) + + ## array.freq.colors <- array(freq.cols[8],c(12,7,4)) + ## array.freq.colors[array.diff.freq < 10] <- freq.cols[7] + ## array.freq.colors[array.diff.freq < 5] <- freq.cols[6] + ## array.freq.colors[array.diff.freq < 2] <- freq.cols[5] + ## array.freq.colors[array.diff.freq < 0] <- freq.cols[4] + ## array.freq.colors[array.diff.freq < -2] <- freq.cols[3] + ## array.freq.colors[array.diff.freq < -5] <- freq.cols[2] + ## array.freq.colors[array.diff.freq < -10] <- freq.cols[1] + + ## par(mar=c(5,4,4,4.5)) + ## plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Forecast time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + ## title("Frequency difference (%) between S4 and ERA-Interim", cex=1.5) + ## mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + ## mtext(side = 2, text = "Forecast time", line = 2.5, cex=1.5) + ## axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + ## axis(2, at=seq(0,6), las=2, cex.axis=1.5) + + ## for(p in 1:12){ + ## for(l in 0:6){ + ## polygon(c(0.5+p-1,0.5+p-1,1+p-1),c(-0.5+l,0.5+l,0+l), col=array.freq.colors[p,1+l,1]) # first triangle + ## polygon(c(0.5+p-1,1+p-1,1.5+p-1),c(-0.5+l,0+l,-0.5+l), col=array.freq.colors[p,1+l,2]) + ## polygon(c(1+p-1,1.5+p-1,1.5+p-1),c(l,0.5+l,-0.5+l), col=array.freq.colors[p,1+l,3]) + ## polygon(c(0.5+p-1,1+p-1,1.5+p-1),c(0.5+l,0+l,0.5+l), col=array.freq.colors[p,1+l,4]) + ## } + ## } + + ## par(fig=c(0.875, 1, 0.14, 0.89), new=TRUE) + ## ColorBar2(brks = c(-20,-10,-5,-2,0,2,5,10,20), cols = freq.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(c(-20,-10,-5,-2,0,2,5,10,20)), my.labels=c(-20,-10,-5,-2,0,2,5,10,20)) + ## dev.off() + +} # close if on composition == "summary" + + + + +# impact map of the stronger WR: +if(composition == "impact.highest" && fields.name == rean.name){ + + var.num <- 2 # choose a variable (1:sfcWind, 2:tas) + index <- 1 # chose an index: 1: most influent, 2: most influent positively, 3: most influent negatively + + if ( index == 1 ) png(filename=paste0(rean.dir,"/",rean.name,"_",var.name.full[var.num],"_most_influent.png"), width=550, height=650) + if ( index == 2 ) png(filename=paste0(rean.dir,"/",rean.name,"_",var.name.full[var.num],"_most_influent_positively.png"), width=550, height=650) + if ( index == 3 ) png(filename=paste0(rean.dir,"/",rean.name,"_",var.name.full[var.num],"_most_influent_negatively.png"), width=550, height=650) + + plot.new() + #par(mfrow=c(4,3)) + #col.regimes <- c('#7b3294','#c2a5cf','#a6dba0','#008837') + col.regimes <- c('#e66101','#fdb863','#b2abd2','#5e3c99') + + for(p in 13:16){ # or 1:12 for monthly maps + load(file=paste0(rean.dir,"/",rean.name,"_",my.period[p],"_",var.name[var.num],".RData")) + load(paste0(rean.dir,"/",rean.name,"_",my.period[p],"_ClusterNames.RData")) # they should not depend on var.name!!! + + # position of long values of Europe only (without the Atlantic Sea and America): + #if(fields.name == "ERA-Interim") EU <- c(1:which(lon >= lon.max)[1], (length(lon)-30):length(lon)) # restrict area to continental europe only + lon.max = 45 + EU <- c(1:which(lon >= lon.max)[1], (which(lon >= 337)[1]:length(lon))) # restrict area to continental europe only + + cluster1 <- which(orden == cluster1.name.period[p]) # in future it will be == cluster1.name + cluster2 <- which(orden == cluster2.name.period[p]) + cluster3 <- which(orden == cluster3.name.period[p]) + cluster4 <- which(orden == cluster4.name.period[p]) + + assign(paste0("imp",cluster1), varPeriodAnom1mean) + assign(paste0("imp",cluster2), varPeriodAnom2mean) + assign(paste0("imp",cluster3), varPeriodAnom3mean) + assign(paste0("imp",cluster4), varPeriodAnom4mean) + + #most influent WT: + if (index == 1) { + imp.all <- imp1 + for(i in 1:dim(imp1)[1]){ + for(j in 1:dim(imp1)[2]){ + if(abs(imp1[i,j]) >= max(abs(imp1[i,j]), abs(imp2[i,j]), abs(imp3[i,j]), abs(imp4[i,j]))) imp.all[i,j] <- 1.5 + if(abs(imp2[i,j]) >= max(abs(imp1[i,j]), abs(imp2[i,j]), abs(imp3[i,j]), abs(imp4[i,j]))) imp.all[i,j] <- 2.5 + if(abs(imp3[i,j]) >= max(abs(imp1[i,j]), abs(imp2[i,j]), abs(imp3[i,j]), abs(imp4[i,j]))) imp.all[i,j] <- 3.5 + if(abs(imp4[i,j]) >= max(abs(imp1[i,j]), abs(imp2[i,j]), abs(imp3[i,j]), abs(imp4[i,j]))) imp.all[i,j] <- 4.5 + } + } + } + + #most influent WT with positive impact: + if (index == 2) { + imp.all <- imp1 + for(i in 1:dim(imp1)[1]){ + for(j in 1:dim(imp1)[2]){ + if((imp1[i,j]) >= max((imp1[i,j]), (imp2[i,j]), (imp3[i,j]), (imp4[i,j]))) imp.all[i,j] <- 1.5 + if((imp2[i,j]) >= max((imp1[i,j]), (imp2[i,j]), (imp3[i,j]), (imp4[i,j]))) imp.all[i,j] <- 2.5 + if((imp3[i,j]) >= max((imp1[i,j]), (imp2[i,j]), (imp3[i,j]), (imp4[i,j]))) imp.all[i,j] <- 3.5 + if((imp4[i,j]) >= max((imp1[i,j]), (imp2[i,j]), (imp3[i,j]), (imp4[i,j]))) imp.all[i,j] <- 4.5 + } + } + + } + + # most influent WT with negative impact: + if (index == 3) { + imp.all <- imp1 + for(i in 1:dim(imp1)[1]){ + for(j in 1:dim(imp1)[2]){ + if((imp1[i,j]) <= min((imp1[i,j]), (imp2[i,j]), (imp3[i,j]), (imp4[i,j]))) imp.all[i,j] <- 1.5 + if((imp2[i,j]) <= min((imp1[i,j]), (imp2[i,j]), (imp3[i,j]), (imp4[i,j]))) imp.all[i,j] <- 2.5 + if((imp3[i,j]) <= min((imp1[i,j]), (imp2[i,j]), (imp3[i,j]), (imp4[i,j]))) imp.all[i,j] <- 3.5 + if((imp4[i,j]) <= min((imp1[i,j]), (imp2[i,j]), (imp3[i,j]), (imp4[i,j]))) imp.all[i,j] <- 4.5 + } + } + } + + if(p<=3) yMod <- 0.7 + if(p>=4 && p<=6) yMod <- 0.5 + if(p>=7 && p<=9) yMod <- 0.3 + if(p>=10) yMod <- 0.1 + if(p==13 || p==14) yMod <- 0.52 + if(p==15 || p==16) yMod <- 0.1 + + if(p==1 || p==4 || p==7 || p==10) xMod <- 0.05 + if(p==2 || p==5 || p==8 || p==11) xMod <- 0.35 + if(p==3 || p==6 || p==9 || p==12) xMod <- 0.65 + if(p==13 || p==15) xMod <- 0.05 + if(p==14 || p==16) xMod <- 0.50 + + # impact map: + if(p <= 12) par(fig=c(xMod, xMod + 0.3, yMod, yMod + 0.17), new=TRUE) + if(p > 12) par(fig=c(xMod, xMod + 0.45, yMod, yMod + 0.38), new=TRUE) + PlotEquiMap(imp.all[,EU], lon[EU], lat, filled.continents = FALSE, brks=1:5, cols=col.regimes, axelab=F, drawleg=F) + + # title map: + if(p <= 12) par(fig=c(xMod, xMod + 0.3, yMod + 0.162, yMod + 0.172), new=TRUE) + if(p > 12) par(fig=c(xMod + 0.07, xMod + 0.07 + 0.3, yMod +0.21 + 0.166, yMod +0.21 + 0.176), new=TRUE) + mtext(my.period[p], font=2, cex=1.5) + + } # close 'p' on 'period' + + par(fig=c(0.1,0.9, 0.03, 0.11), new=TRUE) + ColorBar2(1:5, cols=col.regimes, vert=FALSE, my.ticks=1:4, draw.ticks=FALSE, my.labels=orden) + + par(fig=c(0.1,0.9, 0.92, 0.97), new=TRUE) + if( index == 1 ) mtext(paste0("Most influent WR on ",var.name[var.num]), font=2, cex=1.5) + if( index == 2 ) mtext(paste0("Most positively influent WR on ",var.name[var.num]), font=2, cex=1.5) + if( index == 3 ) mtext(paste0("Most negatively influent WR on ",var.name[var.num]), font=2, cex=1.5) + + dev.off() + +} # close if on composition == "impact.highest" + + + + + + + + + + + + + +if(monthly_anomalies) { + # Compute climatology and anomalies (with LOESS filter) for sfcWind data, and draw monthly anomalies, if you want to compare the mean daily wind speed anomalies reconstructed by the weather regimes classification with those observed by the reanalysis: + + load(paste0(rean.dir,"/",rean.name,"_",my.period[month.chosen[1]],"_psl.RData")) # only to load year.start and year.end + + sfcWind366 <- Load(var = "sfcWind", exp = NULL, obs = list(list(path=rean.data)), paste0(year.start:year.end,'0101'), storefreq = 'daily', leadtimemax = 366, output = 'lonlat', latmin = lat.min, latmax = lat.max, lonmin = lon.min, lonmax = lon.max, nprocs=8)$obs + + sfcWind <- sfcWind366[,,,1:365,,] + + for(y in year.start:year.end){ + y2 <- y - year.start + 1 + if(n.days.in.a.year(y) == 366) sfcWind[y2,60:365,,] <- sfcWind366[1,1,y2,61:366,,] # take the march to december period removing the 29th of February + } + + rm(sfcWind366) + gc() + + # LOESS anomalies: + sfcWindClimDaily <- apply(sfcWind, c(2,3,4), mean, na.rm=T) + + sfcWindClimLoess <- sfcWindClimDaily + for(i in n.pos.lat){ + for(j in n.pos.lon){ + my.data <- data.frame(ens.mean=sfcWindClimDaily[,i,j], day=1:365) + my.loess <- loess(ens.mean ~ day, my.data, span=0.35) + sfcWindClimLoess[,i,j] <- predict(my.loess) + } + } + + rm(sfcWindClimDaily) + gc() + + sfcWindClim2 <- InsertDim(sfcWindClimLoess, 1, n.years) + sfcWindAnom <- sfcWind - sfcWindClim2 + + rm(sfcWindClimLoess) + gc() + + # same as above but for computing climatology and anomalies (with LOESS filter) for psl data, and draw monthly slp anomalies: + slp366 <- Load(var = psl, exp = NULL, obs = list(list(path=rean.data)), paste0(year.start:year.end,'0101'), storefreq = 'daily', leadtimemax = 366, output = 'lonlat', latmin = lat.min, latmax = lat.max, lonmin = lon.min, lonmax = lon.max, nprocs=8)$obs + + slp <- slp366[,,,1:365,,] + + for(y in year.start:year.end){ + y2 <- y - year.start + 1 + if(n.days.in.a.year(y) == 366) slp[y2,60:365,,] <- slp366[1,1,y2,61:366,,] # take the march to december period removing the 29th of February + } + + rm(slp366) + gc() + + slpClimDaily <- apply(slp, c(2,3,4), mean, na.rm=T) + + slpClimLoess <- slpClimDaily + for(i in n.pos.lat){ + for(j in n.pos.lon){ + my.data <- data.frame(ens.mean=slpClimDaily[,i,j], day=1:365) + my.loess <- loess(ens.mean ~ day, my.data, span=0.35) + slpClimLoess[,i,j] <- predict(my.loess) + } + } + + rm(slpClimDaily) + gc() + + slpClim2 <- InsertDim(slpClimLoess, 1, n.years) + slpAnom <- slp - slpClim2 + + rm(slpClimLoess) + gc() + + if(psl == "psl") slpAnom <- slpAnom/100 # convert MSLP in Pascal to MSLP in hPa + + EU <- c(1:which(lon >= lon.max)[1], (which(lon >= 337)[1]:length(lon))) # restrict area to continental europe only + + my.brks.var <- c(-20,seq(-3,3,0.5),20) + my.cols.var <- colorRampPalette(rev(brewer.pal(11,"RdBu")))(length(my.brks.var)-1) # blue--white--red colors + + # same but for slp: + my.brks.var2 <- c(-100,seq(-21,-1,2),0,seq(1,21,2),100) + my.cols.var2 <- colorRampPalette(rev(brewer.pal(11,"RdBu")))(length(my.brks.var2)-1) # blue--red colors + my.cols.var2[floor(length(my.cols.var2)/2)] <- my.cols.var2[floor(length(my.cols.var2)/2)+1] <- "white" + + orden <- c("NAO+","NAO-","Blocking","Atl.Ridge") + + # save all monthly anomaly maps: + for(year.test in year.chosen){ + for(month.test in month.chosen){ + #year.test <- 2016; month.test <- 10 # for the debug + + pos.year.test <- year.test - year.start + 1 + + # wind anomaly for the chosen year and month: + sfcWindAnomPeriod <- sfcWindAnom[pos.year.test, pos.period(1,month.test),,] + + sfcWindAnomPeriodMean <- apply(sfcWindAnomPeriod, c(2,3), mean, na.rm=TRUE) + + # psl anomaly for the chosen year and month: + slpAnomPeriod <- slpAnom[pos.year.test,pos.period(1,month.test),,] + + slpAnomPeriodMean <- apply(slpAnomPeriod, c(2,3), mean, na.rm=TRUE) + + fileoutput <- paste0(rean.dir,"/",rean.name,"_",my.period[month.test],"_monthly_anomaly.png") + + png(filename=fileoutput,width=2800,height=1000) + + par(fig=c(0, 0.36, 0.08, 0.98), new=TRUE) + #PlotEquiMap(rescale(sfcWindAnomPeriodMean[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents=FALSE, brks=my.brks.var, cols=my.cols.var[2:(length(my.cols.var)-1)], title_scale=1.2, intxlon=10, intylat=10, drawleg=F) #, cex.lab=1.5) + + PlotEquiMap(sfcWindAnomPeriodMean[,EU], lon[EU], lat, filled.continents=FALSE, brks=my.brks.var, cols=my.cols.var, title_scale=1.2, intxlon=10, intylat=10, drawleg=F) #, cex.lab=1.5) + + #my.subset2 <- match(values.to.plot2, my.brks.var) + #legend2.cex <- 1.8 + + par(fig=c(0.03, 0.36, 0.015, 0.09), new=TRUE) + #ColorBar(my.brks.var, cols=my.cols.var, vert=FALSE, label_scale=1.8, triangle_ends=c(FALSE,FALSE)) #, subset=my.subset2) + #ColorBar(my.brks.var, cols=my.cols.var[2:(length(my.cols.var)-1)], vert=FALSE, label_scale=1.8, var_limits=c(-10,10), bar_limits=c(my.brks.var[1],my.brks.var[l(my.brks.var)]), col_inf=my.cols.var[1], col_sup=my.cols.var[length(my.cols.var)]) + + ColorBar(brks=my.brks.var[2:(l(my.brks.var)-1)], cols=my.cols.var[2:(l(my.cols.var)-1)], vert=FALSE, label_scale=1.8, bar_limits=c(my.brks.var[2],my.brks.var[l(my.brks.var)-1]), col_inf=my.cols.var[1], col_sup=my.cols.var[l(my.cols.var)], subsample=1) + + par(fig=c(0.34, 0.37, 0, 0.028), new=TRUE) + mtext("m/s", cex=1.8) + + par(fig=c(0.37, 1, 0.1, 0.98), new=TRUE) + + PlotEquiMap2(slpAnomPeriodMean, lon, lat, filled.continents=FALSE, continents.col="black", brks=my.brks.var2, brks2=my.brks.var2, cols=my.cols.var2, intxlon=10, intylat=10, drawleg=F, contours=t(slpAnomPeriodMean), contours.lty="F1FF1F", cex.lab=1) + + # you could almost use the normal PlotEquiMap funcion below, it only needs to set the black line for anomaly=0 and decrease the size of the contour labels: + # PlotEquiMap(rescale(slpAnomPeriodMean[,EU], my.brks.var2[1], tail(my.brks.var2,1)), lon[EU], lat, filled.continents=FALSE, brks=my.brks.var2, brks2=my.brks.var2, cols=my.cols.var2, intxlon=10, intylat=10, drawleg=F, contours=(slpAnomPeriodMean[,EU]), contour_lty="F1FF1F", contour_label_scale=10, title_scale=1.2) #, cex.lab=1.5) + + ##my.subset2 <- match(values.to.plot2, my.brks.var) + #legend2.cex <- 1.8 + + par(fig=c(0.37, 1, 0, 0.09), new=TRUE) + #ColorBar2(my.brks.var2, cols=my.cols.var2, vert=FALSE, my.ticks=-0.5 + 1:length(my.brks.var2), my.labels=my.brks.var2) #, subsampleg=1, label_scale=1.8) #, subset=my.subset2) + ColorBar(my.brks.var2[2:(l(my.brks.var2)-1)], cols=my.cols.var2[2:(l(my.cols.var2)-1)], vert=FALSE, label_scale=1.8, bar_limits=c(my.brks.var2[2],my.brks.var2[l(my.brks.var2)-1]), col_inf=my.cols.var2[1], col_sup=my.cols.var2[l(my.cols.var2)], subsample=1) #my.ticks=-0.5 + 1:length(my.brks.var2), my.labels=my.brks.var2) subset=my.subset2) + + par(fig=c(0.96, 0.99, 0, 0.027), new=TRUE) + mtext("hPa", cex=1.8) + + #par(fig=c(0.74, 0.76, 0, 0.027), new=TRUE) + #mtext("0", cex=1.8) + + dev.off() + + # same image formatted for the catalogue: + fileoutput2 <- paste0(rean.dir,"/",rean.name,"_",my.period[month.test],"_monthly_anomaly_fig2cat.png") + + system(paste0("~/scripts/fig2catalog.sh -s 0.8 -t '",rean.name," / 10m wind speed and sea level pressure / Monthly anomalies \n",month.name[month.test]," / ",year.test,"' -c 'Region: North Atlantic (27°N-81°N, 85.5°W-45°E)\nReference dataset: ",rean.name," reanalysis' ", fileoutput," ", fileoutput2)) + + + # mean wind speed and slp anomaly only during days of the chosen year: + load(paste0(rean.dir,"/",rean.name,"_",my.period[month.test],"_psl.RData")) # load cluster.sequence + + # arrange the regime sequence in an array, to separate months from years: + cluster2D <- array(cluster.sequence, c(n.days.in.a.period(month.test,1),n.years)) + #cluster2D <- array(my.cluster$cluster, c(n.days.in.a.period(p,1),n.years)) # only for NCEP + + cluster.test <- cluster2D[,pos.year.test] + + fre1.days <- length(which(cluster.test == 1)) + fre2.days <- length(which(cluster.test == 2)) + fre3.days <- length(which(cluster.test == 3)) + fre4.days <- length(which(cluster.test == 4)) + + # wind anomaly for the chosen year and month and cluster: + sfcWind1 <- sfcWindAnomPeriod[which(cluster.test == 1),,,drop=FALSE] + sfcWind2 <- sfcWindAnomPeriod[which(cluster.test == 2),,,drop=FALSE] + sfcWind3 <- sfcWindAnomPeriod[which(cluster.test == 3),,,drop=FALSE] + sfcWind4 <- sfcWindAnomPeriod[which(cluster.test == 4),,,drop=FALSE] + + # in case a regime has NO days in that month, we must set it to NA: + if(length(which(cluster.test == 1)) == 0 ) sfcWind1 <- array(NA, c(1,dim(sfcWindAnomPeriod)[2:3])) + if(length(which(cluster.test == 2)) == 0 ) sfcWind2 <- array(NA, c(1,dim(sfcWindAnomPeriod)[2:3])) + if(length(which(cluster.test == 3)) == 0 ) sfcWind3 <- array(NA, c(1,dim(sfcWindAnomPeriod)[2:3])) + if(length(which(cluster.test == 4)) == 0 ) sfcWind4 <- array(NA, c(1,dim(sfcWindAnomPeriod)[2:3])) + + sfcWindMean1 <- apply(sfcWind1, c(2,3), mean, na.rm=FALSE) + sfcWindMean2 <- apply(sfcWind2, c(2,3), mean, na.rm=FALSE) + sfcWindMean3 <- apply(sfcWind3, c(2,3), mean, na.rm=FALSE) + sfcWindMean4 <- apply(sfcWind4, c(2,3), mean, na.rm=FALSE) + + # load regime names: + load(paste0(rean.dir,"/",rean.name,"_", my.period[p],"_","ClusterNames",".RData")) + + ## add strip with daily sequence of WRs: + + mod.name1 <- substr(cluster1.name, nchar(cluster1.name), nchar(cluster1.name)) + mod.name2 <- substr(cluster2.name, nchar(cluster2.name), nchar(cluster2.name)) + mod.name3 <- substr(cluster3.name, nchar(cluster3.name), nchar(cluster3.name)) + mod.name4 <- substr(cluster4.name, nchar(cluster4.name), nchar(cluster4.name)) + + cluster1.name.short <- substr(cluster1.name,1,1) + cluster2.name.short <- substr(cluster2.name,1,1) + cluster3.name.short <- substr(cluster3.name,1,1) + cluster4.name.short <- substr(cluster4.name,1,1) + + ## add + or - at the end of the cluster name, if it is a NAO+ or NAO- regime: + if(mod.name1 == "+" || mod.name1 == "-") cluster1.name.short <- paste0(substr(cluster1.name,1,1), mod.name1) + if(mod.name2 == "+" || mod.name2 == "-") cluster2.name.short <- paste0(substr(cluster2.name,1,1), mod.name2) + if(mod.name3 == "+" || mod.name3 == "-") cluster3.name.short <- paste0(substr(cluster3.name,1,1), mod.name3) + if(mod.name4 == "+" || mod.name4 == "-") cluster4.name.short <- paste0(substr(cluster4.name,1,1), mod.name4) + + c1 <- which(cluster.test == 1) + c2 <- which(cluster.test == 2) + c3 <- which(cluster.test == 3) + c4 <- which(cluster.test == 4) + + cluster.test.letters <- cluster.test + cluster.test.letters[c1] <- cluster1.name.short + cluster.test.letters[c2] <- cluster2.name.short + cluster.test.letters[c3] <- cluster3.name.short + cluster.test.letters[c4] <- cluster4.name.short + + cluster.col <- cluster.test.letters + cluster.col[which(cluster.test.letters == "N+")] <- "Firebrick1" + cluster.col[which(cluster.test.letters == "N-")] <- "Dodgerblue1" + cluster.col[which(cluster.test.letters == "B")] <- "White" + cluster.col[which(cluster.test.letters == "A")] <- "Darkgoldenrod1" + + cluster1 <- which(orden == cluster1.name) + cluster2 <- which(orden == cluster2.name) + cluster3 <- which(orden == cluster3.name) + cluster4 <- which(orden == cluster4.name) + + assign(paste0("fre.days",cluster1), fre1.days) + assign(paste0("fre.days",cluster2), fre2.days) + assign(paste0("fre.days",cluster3), fre3.days) + assign(paste0("fre.days",cluster4), fre4.days) + + assign(paste0("fre",cluster1), wr1y) + assign(paste0("fre",cluster2), wr2y) + assign(paste0("fre",cluster3), wr3y) + assign(paste0("fre",cluster4), wr4y) + + assign(paste0("imp.test",cluster1), sfcWindMean1) + assign(paste0("imp.test",cluster2), sfcWindMean2) + assign(paste0("imp.test",cluster3), sfcWindMean3) + assign(paste0("imp.test",cluster4), sfcWindMean4) + + # psl anomaly for the chosen year and month and cluster: + slp1 <- slpAnomPeriod[which(cluster.test == 1),,,drop=FALSE] + slp2 <- slpAnomPeriod[which(cluster.test == 2),,,drop=FALSE] + slp3 <- slpAnomPeriod[which(cluster.test == 3),,,drop=FALSE] + slp4 <- slpAnomPeriod[which(cluster.test == 4),,,drop=FALSE] + + # in case a regime has NO days in that month, we must set it to NA: + if(length(which(cluster.test == 1)) == 0 ) slp1 <- array(NA, c(1,dim(slpAnomPeriod)[2:3])) + if(length(which(cluster.test == 2)) == 0 ) slp2 <- array(NA, c(1,dim(slpAnomPeriod)[2:3])) + if(length(which(cluster.test == 3)) == 0 ) slp3 <- array(NA, c(1,dim(slpAnomPeriod)[2:3])) + if(length(which(cluster.test == 4)) == 0 ) slp4 <- array(NA, c(1,dim(slpAnomPeriod)[2:3])) + + slpMean1 <- apply(slp1, c(2,3), mean, na.rm=FALSE) + slpMean2 <- apply(slp2, c(2,3), mean, na.rm=FALSE) + slpMean3 <- apply(slp3, c(2,3), mean, na.rm=FALSE) + slpMean4 <- apply(slp4, c(2,3), mean, na.rm=FALSE) + + assign(paste0("psl.test",cluster1), slpMean1) + assign(paste0("psl.test",cluster2), slpMean2) + assign(paste0("psl.test",cluster3), slpMean3) + assign(paste0("psl.test",cluster4), slpMean4) + + # save strip with the daily regime series for chosen month and year: + fileoutput.seq <- paste0(rean.dir,"/",rean.name,"_",my.period[month.test],"_regimes_sequence.png") + png(filename=fileoutput.seq,width=1500,height=1850) + + plot.new() + + sep <- 0.03 + for(day in 1: n.days.in.a.period(p, 2001)){ + sep.cum <- (day-1)*sep + polygon(c(sep.cum + 0.01, sep.cum + 0.01 + sep, sep.cum + 0.01 + sep, sep.cum + 0.01), c(1.01, 1.01, 1.01+sep, 1.01+sep), border="black", col=cluster.col[day]) + text(sep.cum + 0.01 + sep/2, 0.997 + sep + 0.005, labels=day, cex=1.5) + text(sep.cum + 0.01 + sep/2, 1.013 + 0.005, labels=cluster.test.letters[day], cex=2) + + } + + dev.off() + + + + # save average impact and sea level pressure only for chosen month and year: + fileoutput.test <- paste0(rean.dir,"/",rean.name,"_",my.period[month.test],"_monthly_anomaly_regimes.png") + + png(filename=fileoutput.test,width=1500,height=2000) + + plot.new() + + par(fig=c(0, 0.33, 0.77, 0.97), new=TRUE) + PlotEquiMap2(imp.test1[,EU], lon[EU], lat, filled.continents=FALSE, continents.col="black", brks=my.brks.var, cols=my.cols.var, cex.lab=1.2, intxlon=10, intylat=10, drawleg=F) + par(fig=c(0, 0.33, 0.54, 0.74), new=TRUE) + PlotEquiMap2(imp.test2[,EU], lon[EU], lat, filled.continents=FALSE, continents.col="black", brks=my.brks.var, cols=my.cols.var, cex.lab=1.2, intxlon=10, intylat=10, drawleg=F) + par(fig=c(0, 0.33, 0.31, 0.51), new=TRUE) + PlotEquiMap2(imp.test3[,EU], lon[EU], lat, filled.continents=FALSE, continents.col="black", brks=my.brks.var, cols=my.cols.var, cex.lab=1.2, intxlon=10, intylat=10, drawleg=F) + par(fig=c(0, 0.33, 0.08, 0.28), new=TRUE) + PlotEquiMap2(imp.test4[,EU], lon[EU], lat, filled.continents=FALSE, continents.col="black", brks=my.brks.var, cols=my.cols.var, cex.lab=1.2, intxlon=10, intylat=10, drawleg=F) + + if(no.regimes) { regime.title <- paste0("Cluster",1:4)} else { regime.title <- orden} + + par(fig=c(0,0.33,0.955,0.975), new=TRUE) + mtext(paste0(regime.title[1], " wind speed anomaly "), font=2, cex=2) + par(fig=c(0,0.33,0.725,0.745), new=TRUE) + mtext(paste0(regime.title[2], " wind speed anomaly "), font=2, cex=2) + par(fig=c(0,0.33,0.495,0.515), new=TRUE) + mtext(paste0(regime.title[3], " wind speed anomaly "), font=2, cex=2) + par(fig=c(0,0.33,0.265,0.285), new=TRUE) + mtext(paste0(regime.title[4], " wind speed anomaly "), font=2, cex=2) + + par(fig=c(0.03, 0.33, 0.015, 0.06), new=TRUE) + #ColorBar(my.brks.var, cols=my.cols.var, vert=FALSE, label_scale=1.8, triangle_ends=c(FALSE,FALSE)) #, subset=my.subset2) + ColorBar(my.brks.var[2:(length(my.brks.var)-1)], cols=my.cols.var[2:(length(my.cols.var)-1)], vert=FALSE, label_scale=1.8, bar_limits=c(my.brks.var[2],my.brks.var[l(my.brks.var)-1]), col_inf=my.cols.var[1], col_sup=my.cols.var[l(my.cols.var)], subsample=1) #triangle_ends=c(T,T)) #, subset=my.subset2) + + par(fig=c(0.33, 0.34, 0.01, 0.044), new=TRUE) + mtext("m/s", cex=1.6) + + # right figures: + par(fig=c(0.34, 0.92, 0.77, 0.97), new=TRUE) + PlotEquiMap2(psl.test1, lon, lat, filled.continents=FALSE, continents.col="black", brks=my.brks.var2, brks2=my.brks.var2, cols=my.cols.var2, intxlon=10, intylat=10, drawleg=F, contours=t(psl.test1), contours.lty="F1FF1F", cex.lab=1, xlabel.dist=.5) + par(fig=c(0.34, 0.92, 0.54, 0.74), new=TRUE) + PlotEquiMap2(psl.test2, lon, lat, filled.continents=FALSE, continents.col="black", brks=my.brks.var2, brks2=my.brks.var2, cols=my.cols.var2, intxlon=10, intylat=10, drawleg=F, contours=t(psl.test2), contours.lty="F1FF1F", cex.lab=1, xlabel.dist=.5) + par(fig=c(0.34, 0.92, 0.31, 0.51), new=TRUE) + PlotEquiMap2(psl.test3, lon, lat, filled.continents=FALSE, continents.col="black", brks=my.brks.var2, brks2=my.brks.var2, cols=my.cols.var2, intxlon=10, intylat=10, drawleg=F, contours=t(psl.test3), contours.lty="F1FF1F", cex.lab=1, xlabel.dist=.5) + par(fig=c(0.34, 0.92, 0.08, 0.28), new=TRUE) + PlotEquiMap2(psl.test4, lon, lat, filled.continents=FALSE, continents.col="black", brks=my.brks.var2, brks2=my.brks.var2, cols=my.cols.var2, intxlon=10, intylat=10, drawleg=F, contours=t(psl.test4), contours.lty="F1FF1F", cex.lab=1, xlabel.dist=.5) + + par(fig=c(0.34,0.92,0.955,0.975), new=TRUE) + mtext(paste0(regime.title[1], " sea level pressure anomaly "), font=2, cex=2) + par(fig=c(0.34,0.92,0.725,0.745), new=TRUE) + mtext(paste0(regime.title[2], " sea level pressure anomaly "), font=2, cex=2) + par(fig=c(0.34,0.92,0.495,0.515), new=TRUE) + mtext(paste0(regime.title[3], " sea level pressure anomaly "), font=2, cex=2) + par(fig=c(0.34,0.92,0.265,0.285), new=TRUE) + mtext(paste0(regime.title[4], " sea level pressure anomaly "), font=2, cex=2) + + par(fig=c(0.34, 0.93, 0.015, 0.06), new=TRUE) + #ColorBar2(my.brks.var2, cols=my.cols.var2, vert=FALSE, my.ticks=-0.5 + 1:length(my.brks.var2), my.labels=my.brks.var2) #, subsampleg=1, label_scale=1.8) #, subset=my.subset2) + ColorBar(my.brks.var2[2:(length(my.brks.var2)-1)], cols=my.cols.var2[2:(length(my.cols.var2)-1)], vert=FALSE, label_scale=1.8, bar_limits=c(my.brks.var2[2],my.brks.var2[l(my.brks.var2)-1]), col_inf=my.cols.var2[1], col_sup=my.cols.var2[l(my.cols.var2)], subsample=1) + + par(fig=c(0.924, 0.930, 0.01, 0.044), new=TRUE) + mtext("hPa", cex=1.6) + + #par(fig=c(0.627, 0.647, 0, 0.028), new=TRUE) + #mtext("0", cex=1.8) + + n.days <- floor(n.days.in.a.period(month.test,1)) + + par(fig=c(0.93, 0.99, 0.77, 0.87), new=TRUE) + mtext(paste0(fre.days1," days\n(",round(100*fre.days1/n.days,1),"%)"), cex=2.8) + par(fig=c(0.93, 0.99, 0.54, 0.64), new=TRUE) + mtext(paste0(fre.days2," days\n(",round(100*fre.days2/n.days,1),"%)"), cex=2.8) + par(fig=c(0.93, 0.99, 0.31, 0.41), new=TRUE) + mtext(paste0(fre.days3," days\n(",round(100*fre.days3/n.days,1),"%)"), cex=2.8) + par(fig=c(0.93, 0.99, 0.08, 0.18), new=TRUE) + mtext(paste0(fre.days4," days\n(",round(100*fre.days4/n.days,1),"%)"), cex=2.8) + + + dev.off() + + + ## add the strip with the regime sequence over the average impact composition: + fileoutput.temp <- paste0(rean.dir,"/",rean.name,"_",my.period[month.test],"_temp.png") + fileoutput.both <- paste0(rean.dir,"/",rean.name,"_",my.period[month.test],"_temp2.png") + + system(paste0("convert ",fileoutput.seq," -crop +0-1730 +repage ",fileoutput.temp)) # cut the lower part of the strip + system(paste0("montage ",fileoutput.temp," ",fileoutput.test," -tile 1x2 -geometry +0+0 ",fileoutput.both)) + + + ## same image formatted for the catalogue: + fileoutput2.test <- paste0(rean.dir,"/",rean.name,"_",my.period[month.test],"_monthly_anomaly_regimes_fig2cat.png") + + system(paste0("~/scripts/fig2catalog.sh -s 0.8 -m 20 -r 40 -t '",rean.name," / 10m wind speed and sea level pressure / Monthly regime anomalies \n",month.name[month.test]," / ",year.test,"' -c 'Region: North Atlantic (27°N-81°N, 85.5°W-45°E)\nReference dataset: ",rean.name," reanalysis' ", fileoutput.both," ", fileoutput2.test)) + + system(paste0("rm ", fileoutput.temp, " ", fileoutput.both," ", fileoutput.seq," ", fileoutput.test, " ", fileoutput)) + + + } # close for on year.test + } # close for on month.test + + +} # close for on monthly_anomalies + + + + + + + + + + diff --git a/old/weather_regimes_maps_v28.R b/old/weather_regimes_maps_v28.R new file mode 100644 index 0000000000000000000000000000000000000000..c8838512cb040fc57b141ec56dd3f0c33f851da3 --- /dev/null +++ b/old/weather_regimes_maps_v28.R @@ -0,0 +1,5801 @@ + +# Creation: 6/2016 +# Author: Nicola Cortesi +# +# Aim: to visualize the regime anomalies of the weather regimes, their interannual frequencies and their impact on a chosen cliamte variable (i.e: wind speed or temperature) +# +# I/O: input file needed are: +# 1) the output of the weather_regimes.R script, which ends with the suffix "_psl.RData". +# 2) if you need the impact map too, the outputs of the weather_regimes_impact.R script, which end wuth the suffix "_sfcWind.RData" and "_tas.RData" +# +# Output files are the maps, witch the user can generate as .png or as .pdf +# Due to the script's length, it is better to run it from the terminal with: Rscript ~/scripts/weather_regimes_maps.R +# Note that this script doesn't need to be parallelized because it takes only a few seconds to create and save the output maps. +# +# Assumptions: You need to have created before the '_psl.RData' files which are the output of 'weather_regimes'.R, for each period you want to visualize. +# If your regimes derive from a reanalysis, this script must be run twice: +# first, only one month/season at time with: 'period=X', (X=1 .. 12), 'ordering = TRUE', 'save.names = TRUE', 'as.pdf = FALSE' and 'composition = "none" +# And inserting the regime names 'cluster.name1=...' in the correct order; in this first run, you only save the ordered cartography. +# You already have to know which is the right regime order by taking a look at the output maps (_clusterX.png) of weather_regimes.R +# After that, any time you run this script, remember to set: +# 'ordering = TRUE , 'save.names = FALSE' (and any type of composition you want to plot) not to overwrite the files which stores the right cluster order. +# You can check if the monthly regimes jave been associated correctly setting composition <- "psl.rean" +# +# For example, you can find that the sequence of the images in the file (from top to down) corresponds to: +# Blocking / NAO- / Atl.Ridge / NAO+ regime. Subsequently, you have to change 'ordering' +# from F to T (see below) and set the four variables 'clusterX.name' (X = 1...4) to follow the same order found inside that file. +# Then, you have to run this script only to get the maps in the right order, which by default is, from top to down: +# "NAO+","NAO-","Blocking" and "Atl.Ridge" (you can change it with the variable 'orden'). You also have to set 'save.names' to TRUE, so an .RData file +# is written to store the order of the four regimes, in case you need to visualize these maps again in future or create new ones based on the saved data. +# +# If your regimes derive from a forecast system, you have to compute before the regimes derived from a reanalysis. Then, you can associate the reanalysis +# to the forecast system, to employ it to automatically associate to each simulated cluster one of the +# observed regimes, the one with the highest spatial correlation. Beware that the two maps of regime anomalies must have the same spatial resolution! +# +# Branch: weather_regimes + +rm(list=ls()) +library(s2dverification) # for the function Load() +library(Kendall) # for the MannKendall test +#library("corrplot") + +source('/home/Earth/ncortesi/scripts/Rfunctions.R') # for the calendar functions + +### set the directory where the weather regimes computed with a reanalysis are stored (xxxxx_psl.RData files): + +#rean.dir <- "/scratch/Earth/ncortesi/RESILIENCE/Regimes/18) as 12) but with no lat correction" +#rean.dir <- "/scratch/Earth/ncortesi/RESILIENCE/Regimes/18_as_12_but_with_no_lat_correction" +#rean.dir <- "/scratch/Earth/ncortesi/RESILIENCE/Regimes/41_ERA-Interim_monthly_1981-2015_LOESS_filter" +#rean.dir <- "/scratch/Earth/ncortesi/RESILIENCE/Regimes/43_as_41_but_with_running_cluster_and_lat_correction" +#rean.dir <- "/scratch/Earth/ncortesi/RESILIENCE/Regimes/45_as_43_but_with_Z500" +#rean.dir <- "/scratch/Earth/ncortesi/RESILIENCE/Regimes/44_as_43_but_with_no_lat_correction" +#rean.dir <- "/scratch/Earth/ncortesi/RESILIENCE/Regimes/50_NCEP_monthly_1981-2016_LOESS_filter_lat_corr" +#rean.dir <- "/scratch/Earth/ncortesi/RESILIENCE/Regimes/52_JRA55_monthly_1981-2016_LOESS_filter_lat_corr" +#rean.dir <- "/scratch/Earth/ncortesi/RESILIENCE/Regimes/53_JRA55_seasonal_1981-2016_LOESS_filter_lat_corr" +#rean.dir <- "/scratch/Earth/ncortesi/RESILIENCE/Regimes/54_JRA55_monthly_1981-2016_LOESS_filter_lat_corr_running" +#rean.dir <- "/scratch/Earth/ncortesi/RESILIENCE/Regimes/55_JRA55_monthly_1981-2016_LOESS_filter_lat_corr_running_15days" +rean.dir <- "/scratch/Earth/ncortesi/RESILIENCE/Regimes/56_JRA55_monthly_1981-2016_LOESS_filter_lat_corr_ordered4variance" +#rean.dir <- "/scratch/Earth/ncortesi/RESILIENCE/Regimes/57_JRA55_monthly_1981-2016_LOESS_filter_lat_corr_running_ordered4variance" +#rean.dir <- "/scratch/Earth/ncortesi/RESILIENCE/Regimes/58_JRA55_monthly_1981-2016_LOESS_filter_lat_corr_running_15days_ordered4variance" + +rean.name <- "JRA-55" #"JRA-55" #"NCEP" #"ERA-Interim" # reanalysis name (if input data comes from a reanalysis) + +forecast.name <- "ECMWF-S4" # forecast system name (if input data comes from a forecast system) + +fields.name <- rean.name #forecast.name # Choose between loading reanalysis ('rean.name') or forecasts ('forecast.name') + +composition <- "edpr" # choose which kind of composition you want to plot: + # 'none' doesn't plot anything, it only associates the clusters to the regimes with the manual association in the rows below + # and saves them in the ClusterName.Rdata files for each period selected, overwriting the eventual pre-existing files. + # 'simple' for monthly graphs of regime anomalies / impact / interannual frequencies in three columns + # 'psl' for all the regime anomalies for a fixed forecast month + # 'fre' for all the interannual frequencies for a fixed forecast month + # 'impact' for all the impact maps for a fixed forecast month and the variable specified in var.num + # 'summary' for the summary plots of all forecast months (persistence bias, freq.bias, correlations, etc.) [You need all ClusterNames files] + # 'impact.highest' for all the impact plot of the regime with the highest impact + # 'single.impact' to save the four impact maps in a composition 2x2 + # 'single.psl' to save the individual psl map + # 'single.fre' to save the individual fre map + # 'taylor' plot the taylor's diagram between the regime anomalies of a monthly reanalaysis and a seasonal one + # 'edpr' as 'simple', but swapping the position of the regime anomalie maps with that of the impact maps + # 'psl.rean': plot all the regime anomalies and correlation matrix of all months of a reanalysis with DJF regime anomalies of the same reanalysis + # 'psl.rean.unordered': as before, but without ordering the regimes with the same order in vector 'orden' + # 'variance' : as 'none', but instead of associating clusters to regimes, it reordinates the clusters in decreasing order of explained variance + +var.num <- 1 # if fields.name ='rean.name' and composition ='simple' or 'edpr', Choose a variable for the impact maps; 1: sfcWind 2: tas + +# Choose one or more periods to plot from 1 to 17 (1-12: Jan - Dec, 13-16: Winter, Spring, Summer, Autumn, 17: Year). +period <- 1:12 + +# Manually associates the four clusters to the four regimes, one period at time (only in case composition="none"): +cluster4.name <- "NAO+" +cluster1.name <- "NAO-" +cluster2.name <- "Blocking" +cluster3.name <- "Atl.Ridge" + +# choose an order to give to the cartograpy, from top to bottom: +orden <- c("NAO+","NAO-","Blocking","Atl.Ridge") + +no.regimes <- TRUE # if TRUE, instead of putting the regime names in the figure titles, insert "Cluster1", "Cluster2", "Cluster3" and "Cluster4" + # (when composition='edpr' or monthly_anomalies = TRUE) + +####### if monthly_anomalies <- TRUE, you have to specify these additional parameters: +monthly_anomalies <- FALSE # if TRUE, also save maps with with monthly wind speed and slp anomalies for the chosen years and months set below over Europe +year.chosen <- 2016 +month.chosen <- 1:12 +NCEP <- '/esnas/recon/noaa/ncep-reanalysis/$STORE_FREQ$_mean/$VAR_NAME$_f6h/$VAR_NAME$_$YEAR$$MONTH$.nc' +JRA55 <- '/esnas/recon/jma/jra55/$STORE_FREQ$_mean/$VAR_NAME$_f6h/$VAR_NAME$_$YEAR$$MONTH$.nc' +ERAint <- '/esarchive/old-files/recon_ecmwf_erainterim/$STORE_FREQ$_mean/$VAR_NAME$_f6h/$VAR_NAME$_$YEAR$$MONTH$.nc' +rean.data <- JRA55 # choose one of the above reanalysis + +####### if fields.name='forecast.name', you have to specify these additional variables: + +# working dir with the input files with '_psl.RData' suffix: +work.dir <- "/scratch/Earth/ncortesi/RESILIENCE/Regimes/42_ECMWF_S4_monthly_1981-2015_LOESS_filter" + +lead.months <- 0 #0:6 # in case you selected 'fields.name = forecast.name', specify a leadtime (0 = same month of the start month) to plot + # (and variable 'period' becomes the start month) +forecast.month <- 1 # in case composition = 'psl' or 'fre' or 'impact', choose a target month to forecast, from 1 to 12, to plot its composition + # if you want to plot all compositions from 1 to 12 at once, just enable the line below and add a { at the end of the script + # DO NOT run the loop below when composition="summary" or "taylor", because it will modify the data in the ClusterName.RData files!!! +#for(forecast.month in 1:12){ + +####### Derived variables ############################################################################################################################################### + +ordering <- TRUE # If TRUE, order the clusters following the regimes listed in the 'orden' vector (set it to TRUE only after you manually inserted the names below). +save.names <- FALSE # if TRUE, also save the cluster names (i.e: the association between clusters and weather regimes); if FALSE, the cluster names are loaded from a file +as.pdf <- FALSE # FALSE: save results as one .png file for each season/month, TRUE: save only one .pdf file with all seasons/months +mean.freq <- TRUE # if TRUE, when composition <- "simple" o 'edpr', it also add the mean frequency value over each frequency plot + +if(fields.name != rean.name && fields.name != forecast.name) stop("variable 'fields.name' is not properly set") +if(no.regimes) { regime.title <- paste0("Cluster",1:4)} else { regime.title <- orden} + +var.name <- c("sfcWind","tas") +var.name.full <- c("wind speed","temperature") # full name of the 'predictand' variable to put in the title of the graphs +var.unit <- c("m/s", "ºC") # unit of measure of var (for drawing color scales) + +var.num.orig <- var.num # loading _psl files, this value can change! +fields.name.orig <- fields.name +period.orig <- period +work.dir.orig <- work.dir +pdf.suffix <- ifelse(length(period)==1, my.period[period], "all_periods") + +n.map <- 0 + +############################################################################################ +## Start analysis ## +############################################################################################ + +if(composition != "summary" && composition != "impact.highest" && composition != "taylor"){ + + if(composition == "psl" || composition == "fre" || composition == "impact") { + if(as.pdf && fields.name == forecast.name) pdf(file=paste0(work.dir,"/",fields.name,"_",pdf.suffix,"_forecast_month_",forecast.month,"_",composition,".pdf"),width=110,height=60) + if(!as.pdf && fields.name == forecast.name) png(filename=paste0(work.dir,"/",fields.name,"_forecast_month_",forecast.month,"_",composition,".png"),width=6000,height=2300) + + lead.months <- c(6:0,0) + plot.new() + + # Sheet title: + par(fig=c(0, 1, 0.94, 0.98), new=TRUE) + if(composition == "psl") mtext(paste0(fields.name, " simulated Weather Regimes for ", month.name[forecast.month]), cex=5.5, font=2) + if(composition == "fre") mtext(paste0(fields.name, " simulated interannual frequencies for ", month.name[forecast.month]), cex=5.5, font=2) + if(composition == "impact") mtext(paste0(fields.name, " simulated ",var.name.full[var.num], " impact for ", month.name[forecast.month]), cex=5.5, font=2) + + } # close if on composition == "psl" ... + + if(composition == "none") { + ordering <- TRUE + save.names <- TRUE + as.pdf <- FALSE + } + + if(composition == "variance"){ + ordering <- TRUE + save.names <- TRUE + as.pdf <- FALSE + period <- 1:12 + } + + if(composition == "psl.rean") { + ordering <- TRUE + period <- c(9:12, 1:8) # to start from September instead of January + png(filename=paste0(rean.dir,"/",fields.name,"_reanalysis_all_months.png"),width=6000,height=2000) + plot.new() + } + + if(composition == "psl.rean.unordered") { + ordering <- FALSE + period <- c(9:12, 1:8) # to start from September instead of January + png(filename=paste0(rean.dir,"/",fields.name,"_reanalysis_all_months_unordered.png"),width=6000,height=2000) + plot.new() + } + + ## if(composition == "corr.matrix") { + ## ordering <- FALSE # set it to TRUE if you want to see the correlation matrix of the ordered clusters instead!!! + ## period <- c(9:12, 1:8) # to start from September instead of January + ## png(filename=paste0(rean.dir,"/",fields.name,"_reanalysis_all_months_corr_matrix.png"),width=6000,height=2000) + ## plot.new() + ## } + + if(fields.name == rean.name) { lead.month <- 1; lead.months <- 1 } # just to be able to enter the loop below once! + + for(lead.month in lead.months){ + #lead.month=lead.months[1] # for the debug + + if(composition == "simple" || composition == 'edpr'){ + if(as.pdf && fields.name == rean.name) pdf(file=paste0(rean.dir,"/",fields.name,"_",var.name[var.num],"_",pdf.suffix,".pdf"),width=40, height=60) + if(as.pdf && fields.name == forecast.name) pdf(file=paste0(work.dir,"/",fields.name,"_",var.name[var.num],"_",pdf.suffix,"_leadmonth_",lead.month,".pdf"),width=40,height=60) + } + + if(composition == 'psl' || composition == 'fre' || composition == 'impact') { + period <- forecast.month - lead.month + if(period < 1) period <- period + 12 + } + + + for(p in period){ + #p <- period[1] # for the debug + p.orig <- p + + if(fields.name == rean.name) print(paste("p =",p)) + if(fields.name == forecast.name) print(paste("p =",p,"lead.month =", lead.month)) + + # load regime data (for forecasts, it load only the data corresponding to the chosen start month p and lead month 'lead.month') + impact.data <- FALSE + if(fields.name == rean.name || (fields.name == forecast.name && n.map == 7)) { + load(file=paste0(rean.dir,"/",rean.name,"_",my.period[p],"_","psl",".RData")) + if(var.num != var.num.orig) var.num <- var.num.orig # ripristinate original values of this script (necessary after loading regime data) + if(fields.name != fields.name.orig) fields.name <- fields.name.orig # ripristinate original values of this script + if(work.dir != work.dir.orig) work.dir <- work.dir.orig # ripristinate original values of this script + + # load impact data only if it is available, if not it will leave an empty space in the composition: + if(file.exists(paste0(rean.dir,"/",rean.name,"_",my.period[p],"_",var.name[var.num],".RData"))){ + impact.data <- TRUE + print(paste0("Impact data for variable ",var.name[var.num] ," available for reanalysis ", rean.name)) + load(file=paste0(rean.dir,"/",rean.name,"_",my.period[p],"_",var.name[var.num],".RData")) + } else { + impact.data <- FALSE + print(paste0("Impact data for variable ",var.name[var.num] ," not available for reanalysis ", rean.name)) + } + + if(composition == "variance"){ + my.cluster2 <- my.cluster # create a copy of my.cluster + + ss1 <- which(my.cluster$cluster == 1) + ss2 <- which(my.cluster$cluster == 2) + ss3 <- which(my.cluster$cluster == 3) + ss4 <- which(my.cluster$cluster == 4) + + withinss <- my.cluster$withinss + max1 <- which(my.cluster$withinss == max(withinss, na.rm=TRUE)) # first cluster with maximum variance + withinss[max1] <- NA + + max2 <- which(my.cluster$withinss == max(withinss, na.rm=TRUE)) # second cluster with maximum variance + withinss[max2] <- NA + + max3 <- which(my.cluster$withinss == max(withinss, na.rm=TRUE)) # third cluster with maximum variance + withinss[max3] <- NA + + max4 <- which(!is.na(withinss)) + rm(withinss) + + # vector where the first element tells you which is the clister with the maximum variance the second element shows which is the cluster the + # second maximum variance, and so on: + max.seq <- c(max1, max2, max3, max4) + + assign(paste0("cluster",max1,".name"), orden[1]) # associate the cluster with the highest explained variance to the first regime to plot (usually NAO+) + assign(paste0("cluster",max2,".name"), orden[2]) + assign(paste0("cluster",max3,".name"), orden[3]) + assign(paste0("cluster",max4,".name"), orden[4]) + + } + } + + if(fields.name == forecast.name && n.map < 7){ + load(file=paste0(work.dir,"/",forecast.name,"_",my.period[p],"_leadmonth_",lead.month,"_","psl",".RData")) # load cluster time series and mean psl data + + if(file.exists(paste0(work.dir,"/",forecast.name,"_",my.period[p],"_leadmonth_",lead.month,"_",var.name[var.num],".RData"))){ + impact.data <- TRUE + print("Impact data available for forecasts") + if((composition == "simple" || composition == "impact")) load(file=paste0(work.dir,"/",forecast.name,"_",my.period[p],"_leadmonth_",lead.month,"_",var.name[var.num],".RData")) # load mean var data for impact maps + } else { + impact.data <- FALSE + print("Impact data for forecasts not available") + } + + if(var.num != var.num.orig) var.num <- var.num.orig # ripristinate original values of this script (necessary after loading regime data) + if(fields.name != fields.name.orig) fields.name <- fields.name.orig # ripristinate original values of this script + + } + + if(var.num != var.num.orig) var.num <- var.num.orig # ripristinate original values of this script (necessary after loading regime data) + if(fields.name != fields.name.orig) fields.name <- fields.name.orig # ripristinate original values of this script + if(work.dir != work.dir.orig) work.dir <- work.dir.orig # ripristinate original values of this script + if(p != p.orig) p <- p.orig + + if(fields.name == forecast.name && n.map < 7){ + + # compute the simulated interannual monthly variability: + n.days.month <- dim(my.cluster.array[[p]])[1] # number of days in the forecasted month + + freq.sim1 <- apply(my.cluster.array[[p]] == 1, c(2,3), sum) + freq.sim2 <- apply(my.cluster.array[[p]] == 2, c(2,3), sum) + freq.sim3 <- apply(my.cluster.array[[p]] == 3, c(2,3), sum) + freq.sim4 <- apply(my.cluster.array[[p]] == 4, c(2,3), sum) + + sd.freq.sim1 <- sd(freq.sim1) / n.days.month + sd.freq.sim2 <- sd(freq.sim2) / n.days.month + sd.freq.sim3 <- sd(freq.sim3) / n.days.month + sd.freq.sim4 <- sd(freq.sim4) / n.days.month + + # compute the transition probabilities: + j1 <- j2 <- j3 <- j4 <- 1 + transition1 <- transition2 <- transition3 <- transition4 <- c() + + n.members <- dim(my.cluster.array[[p]])[3] + + for(m in 1:n.members){ + for(y in 1:n.years){ + for (d in 1:(n.days.month-1)){ + + if (my.cluster.array[[p]][d,y,m] == 1 && (my.cluster.array[[p]][d + 1,y,m] != 1)){ + transition1[j1] <- my.cluster.array[[p]][d + 1,y,m] + j1 <- j1 + 1 + } + if (my.cluster.array[[p]][d,y,m] == 2 && (my.cluster.array[[p]][d + 1,y,m] != 2)){ + transition2[j2] <- my.cluster.array[[p]][d + 1,y,m] + j2 <- j2 + 1 + } + if (my.cluster.array[[p]][d,y,m] == 3 && (my.cluster.array[[p]][d + 1,y,m] != 3)){ + transition3[j3] <- my.cluster.array[[p]][d + 1,y,m] + j3 <- j3 +1 + } + if (my.cluster.array[[p]][d,y,m] == 4 && (my.cluster.array[[p]][d + 1,y,m] != 4)){ + transition4[j4] <- my.cluster.array[[p]][d + 1,y,m] + j4 <- j4 +1 + } + + } + } + } + + trans.sim <- matrix(NA,4,4 , dimnames= list(c("cluster1.sim", "cluster2.sim", "cluster3.sim", "cluster4.sim"),c("cluster1.sim","cluster2.sim","cluster3.sim","cluster4.sim"))) + trans.sim[1,2] <- length(which(transition1 == 2)) + trans.sim[1,3] <- length(which(transition1 == 3)) + trans.sim[1,4] <- length(which(transition1 == 4)) + + trans.sim[2,1] <- length(which(transition2 == 1)) + trans.sim[2,3] <- length(which(transition2 == 3)) + trans.sim[2,4] <- length(which(transition2 == 4)) + + trans.sim[3,1] <- length(which(transition3 == 1)) + trans.sim[3,2] <- length(which(transition3 == 2)) + trans.sim[3,4] <- length(which(transition3 == 4)) + + trans.sim[4,1] <- length(which(transition4 == 1)) + trans.sim[4,2] <- length(which(transition4 == 2)) + trans.sim[4,3] <- length(which(transition4 == 3)) + + trans.tot <- apply(trans.sim,1,sum,na.rm=T) + for(i in 1:4) trans.sim[i,] <- trans.sim[i,]/trans.tot[i] + + + # compute cluster persistence: + my.cluster.vector <- as.vector(my.cluster.array[[p]]) # reduce it to a vector with the order: day1month1member1 , day2month1member1, ... , day1month2member1, day2month2member1, ,,, + + sequ1 <- sequ2 <- sequ3 <- sequ4 <- rep(0,10000) + i1 <- i2 <- i3 <- i4 <- 1 + + if(my.cluster.vector[1] != 1) i1 <- 0 + if(my.cluster.vector[1] == 1) sequ1[i1] <- sequ1[i1]+1 + if(my.cluster.vector[1] != 2) i2 <- 0 + if(my.cluster.vector[1] == 2) sequ2[i2] <- sequ2[i2]+1 + if(my.cluster.vector[1] != 3) i3 <- 0 + if(my.cluster.vector[1] == 3) sequ3[i3] <- sequ3[i3]+1 + if(my.cluster.vector[1] != 4) i4 <- 0 + if(my.cluster.vector[1] == 4) sequ4[i4] <- sequ4[i4]+1 + n.dd <- length(my.cluster.vector) + + for(d in 1:(n.dd-1)){ + if(my.cluster.vector[d] != 1 && my.cluster.vector[d+1] == 1) i1 <- i1+1 + if(my.cluster.vector[d] == 1) sequ1[i1] <- sequ1[i1]+1 + + if(my.cluster.vector[d] != 2 && my.cluster.vector[d+1] == 2) i2 <- i2+1 + if(my.cluster.vector[d] == 2) sequ2[i2] <- sequ2[i2]+1 + + if(my.cluster.vector[d] != 3 && my.cluster.vector[d+1] == 3) i3 <- i3+1 + if(my.cluster.vector[d] == 3) sequ3[i3] <- sequ3[i3]+1 + + if(my.cluster.vector[d] != 4 && my.cluster.vector[d+1] == 4) i4 <- i4+1 + if(my.cluster.vector[d] == 4) sequ4[i4] <- sequ4[i4]+1 + } + + if(my.cluster.vector[n.dd] == 1) sequ1[i1] <- sequ1[i1]+1 + if(my.cluster.vector[n.dd] == 2) sequ2[i2] <- sequ2[i2]+1 + if(my.cluster.vector[n.dd] == 3) sequ3[i3] <- sequ3[i3]+1 + if(my.cluster.vector[n.dd] == 4) sequ4[i4] <- sequ4[i4]+1 + + pers1 <- mean(sequ1[which(sequ1 != 0)]) + pers2 <- mean(sequ2[which(sequ2 != 0)]) + pers3 <- mean(sequ3[which(sequ3 != 0)]) + pers4 <- mean(sequ4[which(sequ4 != 0)]) + + # compute correlations between simulated clusters and observed regime's anomalies: + cluster1.sim <- pslwr1mean; cluster2.sim <- pslwr2mean; cluster3.sim <- pslwr3mean; cluster4.sim <- pslwr4mean + + if(composition == 'impact' && impact.data == TRUE) { + cluster1.sim.imp <- varwr1mean; cluster2.sim.imp <- varwr2mean; cluster3.sim.imp <- varwr3mean; cluster4.sim.imp <- varwr4mean + #cluster1.sim.imp.pv <- pvalue1; cluster2.sim.imp.pv <- pvalue2; cluster3.sim.imp.pv <- pvalue3; cluster4.sim.imp.pv <- pvalue4 + } + + lat.forecast <- lat; lon.forecast <- lon + + if((p + lead.month) > 12) { load.month <- p + lead.month - 12 } else { load.month <- p + lead.month } # reanalysis forecast month to load + load(file=paste0(rean.dir,"/",rean.name,"_",my.period[load.month],"_","psl",".RData")) # load reanalysis data, which overwrities the pslwrXmean variables + load(paste0(rean.dir,"/",rean.name,"_", my.period[load.month],"_","ClusterNames",".RData")) # load also reanalysis regime names + + # load reanalysis varwrXmean impact data: + if(composition == 'impact' && impact.data == TRUE) load(file=paste0(rean.dir,"/",rean.name,"_",my.period[load.month],"_",var.name[var.num],".RData")) + + if(var.num != var.num.orig) var.num <- var.num.orig # ripristinate original values of this script + if(fields.name != fields.name.orig) fields.name <- fields.name.orig # ripristinate original values of this script + if(!identical(period.orig,period)) period <- period.orig + if(work.dir != work.dir.orig) work.dir <- work.dir.orig # ripristinate original values of this script + if(p.orig != p) p <- p.orig + + # compute the observed transition probabilities: + n.days.month <- n.days.in.a.period(load.month,2001) + j1o <- j2o <- j3o <- j4o <- 1; transition.obs1 <- transition.obs2 <- transition.obs3 <- transition.obs4 <- c() + + for (d in 1:(length(my.cluster$cluster)-1)){ + if (my.cluster$cluster[d] == 1 && (my.cluster$cluster[d + 1] != my.cluster$cluster[d])){ + transition.obs1[j1o] <- my.cluster$cluster[d + 1] + j1o <- j1o + 1 + } + if (my.cluster$cluster[d] == 2 && (my.cluster$cluster[d + 1] != my.cluster$cluster[d])){ + transition.obs2[j2o] <- my.cluster$cluster[d + 1] + j2o <- j2o + 1 + } + if (my.cluster$cluster[d] == 3 && (my.cluster$cluster[d + 1] != my.cluster$cluster[d])){ + transition.obs3[j3o] <- my.cluster$cluster[d + 1] + j3o <- j3o + 1 + } + if (my.cluster$cluster[d] == 4 && (my.cluster$cluster[d + 1] != my.cluster$cluster[d])){ + transition.obs4[j4o] <- my.cluster$cluster[d + 1] + j4o <- j4o +1 + } + + } + + trans.obs <- matrix(NA,4,4 , dimnames= list(c("cluster1.obs", "cluster2.obs", "cluster3.obs", "cluster4.obs"),c("cluster1.obs","cluster2.obs","cluster3.obs","cluster4.obs"))) + trans.obs[1,2] <- length(which(transition.obs1 == 2)) + trans.obs[1,3] <- length(which(transition.obs1 == 3)) + trans.obs[1,4] <- length(which(transition.obs1 == 4)) + + trans.obs[2,1] <- length(which(transition.obs2 == 1)) + trans.obs[2,3] <- length(which(transition.obs2 == 3)) + trans.obs[2,4] <- length(which(transition.obs2 == 4)) + + trans.obs[3,1] <- length(which(transition.obs3 == 1)) + trans.obs[3,2] <- length(which(transition.obs3 == 2)) + trans.obs[3,4] <- length(which(transition.obs3 == 4)) + + trans.obs[4,1] <- length(which(transition.obs4 == 1)) + trans.obs[4,2] <- length(which(transition.obs4 == 2)) + trans.obs[4,3] <- length(which(transition.obs4 == 3)) + + trans.tot.obs <- apply(trans.obs,1,sum,na.rm=T) + for(i in 1:4) trans.obs[i,] <- trans.obs[i,]/trans.tot.obs[i] + + # compute the observed interannual monthly variability: + freq.obs1 <- freq.obs2 <- freq.obs3 <- freq.obs4 <- c() + + for(y in year.start:year.end){ + # for both reanalysis and S4, the time order is always: year1day1, year1day2, ..., year1day31, year2day1, year2day2, ..., year2day28, etc, + freq.obs1[y-year.start+1] <- length(which(my.cluster$cluster[(y-year.start)*n.days.month + (1:n.days.month)] == 1)) + freq.obs2[y-year.start+1] <- length(which(my.cluster$cluster[(y-year.start)*n.days.month + (1:n.days.month)] == 2)) + freq.obs3[y-year.start+1] <- length(which(my.cluster$cluster[(y-year.start)*n.days.month + (1:n.days.month)] == 3)) + freq.obs4[y-year.start+1] <- length(which(my.cluster$cluster[(y-year.start)*n.days.month + (1:n.days.month)] == 4)) + } + + sd.freq.obs1 <- sd(freq.obs1) / n.days.month + sd.freq.obs2 <- sd(freq.obs2) / n.days.month + sd.freq.obs3 <- sd(freq.obs3) / n.days.month + sd.freq.obs4 <- sd(freq.obs4) / n.days.month + + wr1y.obs <- wr1y; wr2y.obs <- wr2y; wr3y.obs <- wr3y; wr4y.obs <- wr4y # observed frecuencies of the regimes + + regimes.obs <- c(cluster1.name, cluster2.name, cluster3.name, cluster4.name) + + if(length(unique(regimes.obs)) < 4) stop("Two or more names of the weather types in the reanalsyis input file are repeated!") + + if(identical(rev(lat),lat.forecast)) {lat.forecast <- rev(lat.forecast); print("Warning: forecast latitudes are in the opposite order than renalysis's")} + if(!identical(lat, lat.forecast)) stop("forecast latitudes are different from reanalysis latitudes!") + if(!identical(lon, lon.forecast)) stop("forecast longitude are different from reanalysis longitudes!") + + cluster.corr <- array(NA, c(4,4), dimnames=list(c("cluster1.sim","cluster2.sim","cluster3.sim","cluster4.sim"), regimes.obs)) + cluster.corr[1,1] <- cor(as.vector(cluster1.sim), as.vector(pslwr1mean)) + cluster.corr[1,2] <- cor(as.vector(cluster1.sim), as.vector(pslwr2mean)) + cluster.corr[1,3] <- cor(as.vector(cluster1.sim), as.vector(pslwr3mean)) + cluster.corr[1,4] <- cor(as.vector(cluster1.sim), as.vector(pslwr4mean)) + cluster.corr[2,1] <- cor(as.vector(cluster2.sim), as.vector(pslwr1mean)) + cluster.corr[2,2] <- cor(as.vector(cluster2.sim), as.vector(pslwr2mean)) + cluster.corr[2,3] <- cor(as.vector(cluster2.sim), as.vector(pslwr3mean)) + cluster.corr[2,4] <- cor(as.vector(cluster2.sim), as.vector(pslwr4mean)) + cluster.corr[3,1] <- cor(as.vector(cluster3.sim), as.vector(pslwr1mean)) + cluster.corr[3,2] <- cor(as.vector(cluster3.sim), as.vector(pslwr2mean)) + cluster.corr[3,3] <- cor(as.vector(cluster3.sim), as.vector(pslwr3mean)) + cluster.corr[3,4] <- cor(as.vector(cluster3.sim), as.vector(pslwr4mean)) + cluster.corr[4,1] <- cor(as.vector(cluster4.sim), as.vector(pslwr1mean)) + cluster.corr[4,2] <- cor(as.vector(cluster4.sim), as.vector(pslwr2mean)) + cluster.corr[4,3] <- cor(as.vector(cluster4.sim), as.vector(pslwr3mean)) + cluster.corr[4,4] <- cor(as.vector(cluster4.sim), as.vector(pslwr4mean)) + + # associate each simulated cluster to one observed regime: + max1 <- which(cluster.corr == max(cluster.corr), arr.ind=T) + cluster.corr2 <- cluster.corr + cluster.corr2[max1[1],] <- -999 # set this row to negative values so it won't be found as a maximum anymore + cluster.corr2[,max1[2]] <- -999 # set this column to negative values so it won't be found as a maximum anymore + max2 <- which(cluster.corr2 == max(cluster.corr2), arr.ind=T) + cluster.corr3 <- cluster.corr2 + cluster.corr3[max2[1],] <- -999 + cluster.corr3[,max2[2]] <- -999 + max3 <- which(cluster.corr3 == max(cluster.corr3), arr.ind=T) + cluster.corr4 <- cluster.corr3 + cluster.corr4[max3[1],] <- -999 + cluster.corr4[,max3[2]] <- -999 + max4 <- which(cluster.corr4 == max(cluster.corr4), arr.ind=T) + + seq.max1 <- c(max1[1],max2[1],max3[1],max4[1]) + seq.max2 <- c(max1[2],max2[2],max3[2],max4[2]) + + cluster1.regime <- seq.max2[which(seq.max1 == 1)] + cluster2.regime <- seq.max2[which(seq.max1 == 2)] + cluster3.regime <- seq.max2[which(seq.max1 == 3)] + cluster4.regime <- seq.max2[which(seq.max1 == 4)] + + cluster1.name <- regimes.obs[cluster1.regime] + cluster2.name <- regimes.obs[cluster2.regime] + cluster3.name <- regimes.obs[cluster3.regime] + cluster4.name <- regimes.obs[cluster4.regime] + + regimes.sim <- c(cluster1.name, cluster2.name, cluster3.name, cluster4.name) + cluster.corr2 <- array(cluster.corr, c(4,4), dimnames=list(regimes.sim, regimes.obs)) + + spat.cor1 <- cluster.corr[1,seq.max2[which(seq.max1 == 1)]] + spat.cor2 <- cluster.corr[2,seq.max2[which(seq.max1 == 2)]] + spat.cor3 <- cluster.corr[3,seq.max2[which(seq.max1 == 3)]] + spat.cor4 <- cluster.corr[4,seq.max2[which(seq.max1 == 4)]] + + # measure persistence for the reanalysis dataset: + my.cluster.vector <- my.cluster[[1]] + + sequ1 <- sequ2 <- sequ3 <- sequ4 <- rep(0,10000) + i1 <- i2 <- i3 <- i4 <- 1 + + if(my.cluster.vector[1] != 1) i1 <- 0 + if(my.cluster.vector[1] == 1) sequ1[i1] <- sequ1[i1]+1 + if(my.cluster.vector[1] != 2) i2 <- 0 + if(my.cluster.vector[1] == 2) sequ2[i2] <- sequ2[i2]+1 + if(my.cluster.vector[1] != 3) i3 <- 0 + if(my.cluster.vector[1] == 3) sequ3[i3] <- sequ3[i3]+1 + if(my.cluster.vector[1] != 4) i4 <- 0 + if(my.cluster.vector[1] == 4) sequ4[i4] <- sequ4[i4]+1 + + n.dd <- length(my.cluster.vector) + for(d in 1:(n.dd-1)){ + if(my.cluster.vector[d] != 1 && my.cluster.vector[d+1] == 1) i1 <- i1+1 + if(my.cluster.vector[d] == 1) sequ1[i1] <- sequ1[i1]+1 + + if(my.cluster.vector[d] != 2 && my.cluster.vector[d+1] == 2) i2 <- i2+1 + if(my.cluster.vector[d] == 2) sequ2[i2] <- sequ2[i2]+1 + + if(my.cluster.vector[d] != 3 && my.cluster.vector[d+1] == 3) i3 <- i3+1 + if(my.cluster.vector[d] == 3) sequ3[i3] <- sequ3[i3]+1 + + if(my.cluster.vector[d] != 4 && my.cluster.vector[d+1] == 4) i4 <- i4+1 + if(my.cluster.vector[d] == 4) sequ4[i4] <- sequ4[i4]+1 + } + + if(my.cluster.vector[n.dd] == 1) sequ1[i1] <- sequ1[i1]+1 + if(my.cluster.vector[n.dd] == 2) sequ2[i2] <- sequ2[i2]+1 + if(my.cluster.vector[n.dd] == 3) sequ3[i3] <- sequ3[i3]+1 + if(my.cluster.vector[n.dd] == 4) sequ4[i4] <- sequ4[i4]+1 + + persObs1 <- mean(sequ1[which(sequ1 != 0)]) + persObs2 <- mean(sequ2[which(sequ2 != 0)]) + persObs3 <- mean(sequ3[which(sequ3 != 0)]) + persObs4 <- mean(sequ4[which(sequ4 != 0)]) + + ### insert here all the manual corrections to the regime assignation due to misasignment in the automatic selection: + ### forecast month: September + #if(p == 9 && lead.month == 0) { cluster1.name <- "Blocking"; cluster2.name <- "Atl.Ridge"; cluster3.name <- "NAO-"; cluster4.name <- "NAO+"} + #if(p == 8 && lead.month == 1) { cluster1.name <- "NAO+"; cluster2.name <- "NAO-"; cluster3.name <- "Blocking"; cluster4.name <- "Atl.Ridge"} + #if(p == 6 && lead.month == 3) { cluster1.name <- "Atl.Ridge"; cluster2.name <- "NAO+"} + #if(p == 4 && lead.month == 5) { cluster1.name <- "Blocking"; cluster2.name <- "NAO+"; cluster3.name <- "Atl.Ridge"; cluster4.name <- "NAO-"} + + if(p == 9 && lead.month == 0) { cluster1.name <- "Blocking"; cluster2.name <- "Atl.Ridge"; cluster3.name <- "NAO-"; cluster4.name <- "NAO+" } + if(p == 8 && lead.month == 1) { cluster1.name <- "NAO+"; cluster2.name <- "NAO-"; cluster3.name <- "Blocking"; cluster4.name <- "Atl.Ridge" } + if(p == 7 && lead.month == 2) { cluster1.name <- "Atl.Ridge"; cluster2.name <- "Blocking"; cluster3.name <- "NAO-"; cluster4.name <- "NAO+" } + if(p == 6 && lead.month == 3) { cluster1.name <- "Atl.Ridge"; cluster2.name <- "NAO-"; cluster3.name <- "NAO+"; cluster4.name <- "Blocking" } + if(p == 5 && lead.month == 4) { cluster1.name <- "Atl.Ridge"; cluster2.name <- "NAO+"; cluster3.name <- "NAO-"; cluster4.name <- "Blocking" } + if(p == 4 && lead.month == 5) { cluster1.name <- "Blocking"; cluster2.name <- "NAO+"; cluster3.name <- "Atl.Ridge"; cluster4.name <- "NAO-" } + if(p == 3 && lead.month == 6) { cluster2.name <- "NAO-"; cluster3.name <- "Atl.Ridge"} # cluster1.name <- "Blocking"; cluster4.name <- "NAO+" + + # regimes.sim # for the debug + + ### forecast month: October + #if(p == 4 && lead.month == 6) { cluster1.name <- "Atl.Ridge"; cluster2.name <- "Blocking"; cluster3.name <- "NAO+"; cluster4.name <- "NAO-"} + #if(p == 5 && lead.month == 5) { cluster1.name <- "NAO+"; cluster2.name <- "Blocking"; cluster3.name <- "NAO-"; cluster4.name <- "Atl.Ridge"} + #if(p == 7 && lead.month == 3) { cluster1.name <- "NAO-"; cluster2.name <- "Atl.Ridge"; cluster3.name <- "Blocking"; cluster4.name <- "NAO+"} + #if(p == 8 && lead.month == 2) { cluster1.name <- "Blocking"; cluster2.name <- "NAO-"; cluster3.name <- "Atl.Ridge"; cluster4.name <- "NAO+"} + #if(p == 9 && lead.month == 1) { cluster1.name <- "NAO-"; cluster2.name <- "Blocking"; cluster3.name <- "Atl.Ridge"; cluster4.name <- "NAO+"} + + if(p == 10 && lead.month == 0) { cluster1.name <- "Blocking"; cluster2.name <- "NAO+"; cluster3.name <- "Atl.Ridge"; cluster4.name <- "NAO-"} + if(p == 9 && lead.month == 1) { cluster1.name <- "NAO-"; cluster2.name <- "Blocking"; cluster3.name <- "Atl.Ridge"; cluster4.name <- "NAO+"} + if(p == 8 && lead.month == 2) { cluster1.name <- "Blocking"; cluster2.name <- "NAO-"; cluster3.name <- "Atl.Ridge"; cluster4.name <- "NAO+"} + if(p == 7 && lead.month == 3) { cluster1.name <- "NAO-"; cluster2.name <- "Atl.Ridge"; cluster3.name <- "Blocking"; cluster4.name <- "NAO+"} + if(p == 6 && lead.month == 4) { cluster1.name <- "NAO+"; cluster2.name <- "Blocking"; cluster3.name <- "NAO-"; cluster4.name <- "Atl.Ridge"} + + ### forecast month: November + #if(p == 6 && lead.month == 5) { cluster2.name <- "Atl.Ridge"; cluster3.name <- "NAO+"; cluster4.name <- "Blocking"} + #if(p == 7 && lead.month == 4) { cluster1.name <- "NAO+"; cluster2.name <- "Blocking"; cluster4.name <- "Atl.Ridge"} + #if(p == 8 && lead.month == 3) { cluster2.name <- "Blocking"; cluster4.name <- "Atl.Ridge"} + #if(p == 9 && lead.month == 2) { cluster2.name <- "Atl.Ridge"; cluster3.name <- "NAO+"; cluster4.name <- "Blocking"} + #if(p == 10 && lead.month == 1) { cluster2.name <- "Blocking"; cluster4.name <- "Atl.Ridge"} + + regimes.sim2 <- c(cluster1.name, cluster2.name, cluster3.name, cluster4.name) # Simulated regimes after the manual correction + #regimes.obs <- c(cluster1.name, cluster2.name, cluster3.name, cluster4.name) # already defined above, included only as reference + + order.sim <- match(regimes.sim2, orden) + order.obs <- match(regimes.obs, orden) + + clusters.sim <- list(cluster1.sim, cluster2.sim, cluster3.sim, cluster4.sim) + clusters.obs <- list(pslwr1mean, pslwr2mean, pslwr3mean, pslwr4mean) + + # recompute the spatial correlations, because they might have changed because of the manual corrections above: + spat.cor1 <- cor(as.vector(clusters.sim[[which(order.sim == 1)]]), as.vector(clusters.obs[[which(order.obs == 1)]])) + spat.cor2 <- cor(as.vector(clusters.sim[[which(order.sim == 2)]]), as.vector(clusters.obs[[which(order.obs == 2)]])) + spat.cor3 <- cor(as.vector(clusters.sim[[which(order.sim == 3)]]), as.vector(clusters.obs[[which(order.obs == 3)]])) + spat.cor4 <- cor(as.vector(clusters.sim[[which(order.sim == 4)]]), as.vector(clusters.obs[[which(order.obs == 4)]])) + + if(composition == 'impact' && impact.data == TRUE) { + clusters.sim.imp <- list(cluster1.sim.imp, cluster2.sim.imp, cluster3.sim.imp, cluster4.sim.imp) + #clusters.sim.imp.pv <- list(cluster1.sim.imp.pv, cluster2.sim.imp.pv, cluster3.sim.imp.pv, cluster4.sim.imp.pv) + + clusters.obs.imp <- list(varwr1mean, varwr2mean, varwr3mean, varwr4mean) + #clusters.obs.imp.pv <- list(pvalue1, pvalue2, pvalue3, pvalue4) + + cor.imp1 <- cor(as.vector(clusters.sim.imp[[which(order.sim == 1)]]), as.vector(clusters.obs.imp[[which(order.obs == 1)]])) + cor.imp2 <- cor(as.vector(clusters.sim.imp[[which(order.sim == 2)]]), as.vector(clusters.obs.imp[[which(order.obs == 2)]])) + cor.imp3 <- cor(as.vector(clusters.sim.imp[[which(order.sim == 3)]]), as.vector(clusters.obs.imp[[which(order.obs == 3)]])) + cor.imp4 <- cor(as.vector(clusters.sim.imp[[which(order.sim == 4)]]), as.vector(clusters.obs.imp[[which(order.obs == 4)]])) + + } + + load(file=paste0(work.dir,"/",fields.name,"_",my.period[p],"_leadmonth_",lead.month,"_","psl",".RData")) # reload forecast cluster time series and mean psl data + load(file=paste0(work.dir,"/",fields.name,"_",my.period[p],"_leadmonth_",lead.month,"_",var.name[var.num],".RData")) # reload mean var data for impact maps + + if(var.num != var.num.orig) var.num <- var.num.orig # ripristinate original values of this script + if(fields.name != fields.name.orig) fields.name <- fields.name.orig # ripristinate original values of this script + if(!identical(period.orig, period)) period <- period.orig + if(p.orig != p) p <- p.orig + if(identical(rev(lat),lat.forecast)) lat <- rev(lat) # ripristinate right lat order + if(work.dir != work.dir.orig) work.dir <- work.dir.orig # ripristinate original values of this script + + } # close for on 'forecast.name' + + if(fields.name == rean.name || (fields.name == forecast.name && n.map == 7)){ + if(save.names){ + save(cluster1.name, cluster2.name, cluster3.name, cluster4.name, file=paste0(rean.dir,"/",fields.name,"_", my.period[p],"_","ClusterNames",".RData")) + + } else { # in this case, we load the cluster names from the file already saved, if regimes come from a reanalysis: + ClusterName.file <- paste0(rean.dir,"/",rean.name,"_", my.period[p],"_","ClusterNames",".RData") + if(!file.exists(ClusterName.file)) stop(paste0("file: ",ClusterName.file," missing")) # check if file exists or not + load(ClusterName.file) # load cluster names + + if(var.num != var.num.orig) var.num <- var.num.orig # ripristinate original values of this script + if(fields.name != fields.name.orig) fields.name <- fields.name.orig # ripristinate original values of this script + } + } + + # assign to each cluster its regime: + #if(ordering == FALSE && (fields.name == rean.name || (fields.name == forecast.name && n.map == 7))){ + if(ordering == FALSE){ + cluster1 <- 1; cluster2 <- 2; cluster3 <- 3; cluster4 <- 4 # same as: cluster1.name=orden[1], cluster2.name=orden[2], cluster3.name=orden[3], etc. + } else { + cluster1 <- which(orden == cluster1.name) + cluster2 <- which(orden == cluster2.name) + cluster3 <- which(orden == cluster3.name) + cluster4 <- which(orden == cluster4.name) + } + + assign(paste0("map",cluster1), pslwr1mean) + assign(paste0("map",cluster2), pslwr2mean) + assign(paste0("map",cluster3), pslwr3mean) + assign(paste0("map",cluster4), pslwr4mean) + + assign(paste0("fre",cluster1), wr1y) + assign(paste0("fre",cluster2), wr2y) + assign(paste0("fre",cluster3), wr3y) + assign(paste0("fre",cluster4), wr4y) + + #assign(paste0("withinss",cluster1), my.cluster[[p]]$withinss[1]/my.cluster[[p]]$size[1]) + #assign(paste0("withinss",cluster2), my.cluster[[p]]$withinss[2]/my.cluster[[p]]$size[2]) + #assign(paste0("withinss",cluster3), my.cluster[[p]]$withinss[3]/my.cluster[[p]]$size[3]) + #assign(paste0("withinss",cluster4), my.cluster[[p]]$withinss[4]/my.cluster[[p]]$size[4]) + + if(impact.data == TRUE && (composition == "simple" || composition == "single.impact" || composition == 'impact' || composition == 'edpr')){ + assign(paste0("imp",cluster1), varwr1mean) + assign(paste0("imp",cluster2), varwr2mean) + assign(paste0("imp",cluster3), varwr3mean) + assign(paste0("imp",cluster4), varwr4mean) + + assign(paste0("sig",cluster1), pvalue1) + assign(paste0("sig",cluster2), pvalue2) + assign(paste0("sig",cluster3), pvalue3) + assign(paste0("sig",cluster4), pvalue4) + } + + if(fields.name == forecast.name && n.map < 7){ + assign(paste0("freMax",cluster1), wr1yMax) + assign(paste0("freMax",cluster2), wr2yMax) + assign(paste0("freMax",cluster3), wr3yMax) + assign(paste0("freMax",cluster4), wr4yMax) + + assign(paste0("freMin",cluster1), wr1yMin) + assign(paste0("freMin",cluster2), wr2yMin) + assign(paste0("freMin",cluster3), wr3yMin) + assign(paste0("freMin",cluster4), wr4yMin) + + #assign(paste0("spatial.cor",cluster1), spat.cor1) + #assign(paste0("spatial.cor",cluster2), spat.cor2) + #assign(paste0("spatial.cor",cluster3), spat.cor3) + #assign(paste0("spatial.cor",cluster4), spat.cor4) + + assign(paste0("persist",cluster1), pers1) + assign(paste0("persist",cluster2), pers2) + assign(paste0("persist",cluster3), pers3) + assign(paste0("persist",cluster4), pers4) + + clusters.all <- c(cluster1, cluster2, cluster3, cluster4) + ordered.sequence <- c(which(clusters.all == 1), which(clusters.all == 2), which(clusters.all == 3), which(clusters.all == 4)) + + assign(paste0("transSim",cluster1), unname(trans.sim[1,ordered.sequence])) + assign(paste0("transSim",cluster2), unname(trans.sim[2,ordered.sequence])) + assign(paste0("transSim",cluster3), unname(trans.sim[3,ordered.sequence])) + assign(paste0("transSim",cluster4), unname(trans.sim[4,ordered.sequence])) + + cluster1.obs <- which(orden == regimes.obs[1]) + cluster2.obs <- which(orden == regimes.obs[2]) + cluster3.obs <- which(orden == regimes.obs[3]) + cluster4.obs <- which(orden == regimes.obs[4]) + + clusters.all.obs <- c(cluster1.obs, cluster2.obs, cluster3.obs, cluster4.obs) + ordered.sequence.obs <- c(which(clusters.all.obs == 1), which(clusters.all.obs == 2), which(clusters.all.obs == 3), which(clusters.all.obs == 4)) + + assign(paste0("freObs",cluster1.obs), wr1y.obs) + assign(paste0("freObs",cluster2.obs), wr2y.obs) + assign(paste0("freObs",cluster3.obs), wr3y.obs) + assign(paste0("freObs",cluster4.obs), wr4y.obs) + + assign(paste0("persistObs",cluster1.obs), persObs1) + assign(paste0("persistObs",cluster2.obs), persObs2) + assign(paste0("persistObs",cluster3.obs), persObs3) + assign(paste0("persistObs",cluster4.obs), persObs4) + + assign(paste0("transObs",cluster1.obs), unname(trans.obs[1,ordered.sequence.obs])) + assign(paste0("transObs",cluster2.obs), unname(trans.obs[2,ordered.sequence.obs])) + assign(paste0("transObs",cluster3.obs), unname(trans.obs[3,ordered.sequence.obs])) + assign(paste0("transObs",cluster4.obs), unname(trans.obs[4,ordered.sequence.obs])) + + } + + # position of long values of Europe only (without the Atlantic Sea and America): + #if(fields.name == "ERA-Interim") EU <- c(1:which(lon >= lon.max)[1], (length(lon)-30):length(lon)) # restrict area to continental europe only + EU <- c(1:which(lon >= lon.max)[1], (which(lon >= 337)[1]:length(lon))) # restrict area to continental europe only + + # breaks and colors of the geopotential fields: + if(psl == "g500"){ + #my.brks <- c(-300,-199:200,300) # % Mean anomaly of a WR + my.brks <- c(-300,seq(-200,200,10),300) # % Mean anomaly of a WR + my.brks2 <- c(-300,seq(-200,200,10),300) # % Mean anomaly of a WR for the contour lines + #my.cols <- colorRampPalette((brewer.pal(9,"PuBu")))(length(my.brks)-1) + values.to.plot <- c(-200, -100, 0, 100, 200) # values we want to show in the labels + } + + if(psl == "psl"){ + my.brks <- c(seq(-21,-1,2),0,seq(1,21,2)) # % Mean anomaly of a WR + # my.brks <- c(seq(-19,-1,2),0,seq(1,19,2)) # % Mean anomaly of a WR + + my.brks2 <- my.brks #c(seq(-20,20,2)) # % Mean anomaly of a WR for the contour lines + values.to.plot <- my.brks #c(-17,-15,-13,-11,-9,-7,-5,-3,-1,0,1,3,5,7,9,11,13,15,17) + } + + my.cols <- colorRampPalette(rev(brewer.pal(11,"RdBu")))(length(my.brks)-1) # blue--white--red colors + my.cols[floor(length(my.cols)/2)] <- my.cols[floor(length(my.cols)/2)+1] <- "white" + + # breaks and colors of the impact maps (valid both for Wind speed anomalies and Temperature anomalies): + #my.brks.var <- c(-20,seq(-10,-3,1),seq(-2.9,2.9,0.1),seq(3,10,1),20) # % Mean anomaly of a WR + #my.brks.var <- c(-20,-2,-1.5,-1,-0.5,-0.2,0.2,0.5,1,1.5,2,20) # % Mean anomaly of a WR + #my.brks.var <- c(-20,seq(-3,3,0.5),20) # % Mean anomaly of a WR + if(var.name[var.num] == "sfcWind") { + my.brks.var <- seq(-3,3,0.5) + values.to.plot2 <- c(-3,-2,-1,0,1,2,3) + } + if(var.name[var.num] == "tas") { + my.brks.var <- seq(-5,5,1) + values.to.plot2 <- my.brks.var #c(-5,-3,-1,0,1,3,5) + } + + #my.cols <- colorRampPalette(as.character(read.csv("/home/Earth/vtorralb/rgbhex.csv",header=F)[,1]))(length(my.brks)-1) # blue--yellow-red colors + my.cols.var <- colorRampPalette(rev(brewer.pal(11,"RdBu")))(length(my.brks.var)-1) # blue--white--red colors + #my.cols.var[floor(length(my.cols.var)/2)] <- my.cols.var[floor(length(my.cols.var)/2)+1] <- "white" + + freq.max=100 + if(fields.name == forecast.name){ + pos.years.present <- match(year.start:year.end, years) + years.missing <- which(is.na(pos.years.present)) + fre1.NA <- fre2.NA <- fre3.NA <- fre4.NA <- freMax1.NA <- freMax2.NA <- freMax3.NA <- freMax4.NA <- freMin1.NA <- freMin2.NA <- freMin3.NA <- freMin4.NA <- c() + k <- 0 + for(y in year.start:year.end) { + if(y %in% (years.missing + year.start - 1)) { + fre1.NA <- c(fre1.NA,NA); fre2.NA <- c(fre2.NA,NA); fre3.NA <- c(fre3.NA,NA); fre4.NA <- c(fre4.NA,NA) + freMax1.NA <- c(freMax1.NA,NA); freMax2.NA <- c(freMax2.NA,NA); freMax3.NA <- c(freMax3.NA,NA); freMax4.NA <- c(freMax4.NA,NA) + freMin1.NA <- c(freMin1.NA,NA); freMin2.NA <- c(freMin2.NA,NA); freMin3.NA <- c(freMin3.NA,NA); freMin4.NA <- c(freMin4.NA,NA) + k=k+1 + } else { + fre1.NA <- c(fre1.NA,fre1[y - year.start + 1 - k]); fre2.NA <- c(fre2.NA,fre2[y - year.start + 1 - k]) + fre3.NA <- c(fre3.NA,fre3[y - year.start + 1 - k]); fre4.NA <- c(fre4.NA,fre4[y - year.start + 1 - k]) + freMax1.NA <- c(freMax1.NA,freMax1[y - year.start + 1 - k]); freMax2.NA <- c(freMax2.NA,freMax2[y - year.start + 1 - k]) + freMax3.NA <- c(freMax3.NA,freMax3[y - year.start + 1 - k]); freMax4.NA <- c(freMax4.NA,freMax4[y - year.start + 1 - k]) + freMin1.NA <- c(freMin1.NA,freMin1[y - year.start + 1 - k]); freMin2.NA <- c(freMin2.NA,freMin2[y - year.start + 1 - k]) + freMin3.NA <- c(freMin3.NA,freMin3[y - year.start + 1 - k]); freMin4.NA <- c(freMin4.NA,freMin4[y - year.start + 1 - k]) + } + } + + sp.cor1 <- round(cor(fre1.NA,freObs1, use="complete.obs"),2) + sp.cor2 <- round(cor(fre2.NA,freObs2, use="complete.obs"),2) + sp.cor3 <- round(cor(fre3.NA,freObs3, use="complete.obs"),2) + sp.cor4 <- round(cor(fre4.NA,freObs4, use="complete.obs"),2) + + } else { + fre1.NA <- fre1; fre2.NA <- fre2; fre3.NA <- fre3; fre4.NA <- fre4 + } + + + # Visualize the composition with the 3 graphs for all the 4 regimes: its pressure fields, its impact on var and its frequency series: + if(composition == "simple"){ + + if(!as.pdf && fields.name == rean.name) png(filename=paste0(rean.dir,"/",fields.name,"_",my.period[p],"_",var.name[var.num],".png"),width=3000,height=3700) + if(!as.pdf && fields.name == forecast.name) png(filename=paste0(work.dir,"/",fields.name,"_",my.period[p],"_leadmonth_",lead.month,"_",var.name[var.num],".png"),width=3000,height=3700) + + plot.new() + + # Sheet title: + par(fig=c(0, 1, 0.94, 0.98), new=TRUE) + if(fields.name == forecast.name) {forecast.title <- paste0(" startdate and ",lead.month," forecast month ")} else {forecast.title <- ""} + mtext(paste0(fields.name, " Weather Regimes for ", my.period[p], forecast.title," (",year.start,"-",year.end,")"), cex=5.5, font=2) + + # Centroid maps: + map.xpos <- 0 + map.width <- 0.46 + par(fig=c(map.xpos + 0.003, map.xpos + map.width - 0.003, 0.745, 0.925), new=TRUE) + PlotEquiMap2(rescale(map1,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map1), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, xlabel.dist=1.5, contours.lty="F1FF1F", continents.col="black") + par(fig=c(map.xpos, map.xpos + map.width, 0.51, 0.69), new=TRUE) + PlotEquiMap2(rescale(map2,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map2), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F", continents.col="black") + par(fig=c(map.xpos, map.xpos + map.width, 0.275, 0.455), new=TRUE) + PlotEquiMap2(rescale(map3,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map3), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F", continents.col="black") + par(fig=c(map.xpos, map.xpos + map.width, 0.04, 0.22), new=TRUE) + PlotEquiMap2(rescale(map4,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map4), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F", continents.col="black") + + ## # for the debug: + ## obs <- read.table("/scratch/Earth/ncortesi/RESILIENCE/Regimes/NAO_series.txt") + ## mes <- p + ## fre.obs <- obs$V3[seq(mes,12*35,12)] # get the ovserved freuency of the NAO + ## #plot(1981:2015,fre.obs,type="l") + ## #lines(1981:2015,fre1,type="l",col="red") + ## #lines(1981:2015,fre2,type="l",col="blue") + ## #lines(1981:2015,fre4,type="l",col="green") + ## cor1 <- cor(fre.obs, fre1) + ## cor2 <- cor(fre.obs, fre2) + ## cor3 <- cor(fre.obs, fre3) + ## cor4 <- cor(fre.obs, fre4) + ## round(c(cor1,cor2,cor3,cor4),2) + + # Legend centroid maps: + legend1.xpos <- 0.10 + legend1.width <- 0.30 + legend1.cex <- 2 + my.subset <- match(values.to.plot, my.brks) + par(fig=c(legend1.xpos, legend1.xpos + legend1.width, 0.719, 0.744), new=TRUE) # syntax fig=c(xmin, xmax, ymin, ymax) + ColorBar3(my.brks, cols=my.cols, vert=FALSE, cex=legend1.cex, subset=my.subset) + if(psl=="g500") {mtext(side=4," m", cex=2.5)} else {mtext(side=4," hPa", cex=legend1.cex)} + par(fig=c(legend1.xpos, legend1.xpos + legend1.width, 0.485, 0.508), new=TRUE) + ColorBar3(my.brks, cols=my.cols, vert=FALSE, cex=legend1.cex, subset=my.subset) + if(psl=="g500") {mtext(side=4," m", cex=2.5)} else {mtext(side=4," hPa", cex=legend1.cex)} + par(fig=c(legend1.xpos, legend1.xpos + legend1.width, 0.250, 0.273), new=TRUE) + ColorBar3(my.brks, cols=my.cols, vert=FALSE, cex=legend1.cex, subset=my.subset) + if(psl=="g500") {mtext(side=4," m", cex=2.5)} else {mtext(side=4," hPa", cex=legend1.cex)} + par(fig=c(legend1.xpos, legend1.xpos + legend1.width, 0.015, 0.038), new=TRUE) + ColorBar3(my.brks, cols=my.cols, vert=FALSE, cex=legend1.cex, subset=my.subset) + if(psl=="g500") {mtext(side=4," m", cex=2.5)} else {mtext(side=4," hPa", cex=legend1.cex)} + + # Subtitle centroid maps: + map.title.xpos <- 0.76 + map.title.width <- 0.2 + par(fig=c(map.title.xpos, map.title.xpos + map.title.width, 0.728, 0.729), new=TRUE) + mtext("year", cex=3) + par(fig=c(map.title.xpos, map.title.xpos + map.title.width, 0.494, 0.495), new=TRUE) + mtext("year", cex=3) + par(fig=c(map.title.xpos, map.title.xpos + map.title.width, 0.260, 0.261), new=TRUE) + mtext("year", cex=3) + par(fig=c(map.title.xpos, map.title.xpos + map.title.width, 0.026, 0.027), new=TRUE) + mtext("year", cex=3) + + # Title Centroid Maps: + title1.xpos <- 0.02 + title1.width <- 0.4 + par(fig=c(title1.xpos, title1.xpos + title1.width, 0.9305, 0.9355), new=TRUE) + mtext(paste0(orden[1],": ", psl.name, " Anomaly"), font=2, cex=4) + par(fig=c(title1.xpos, title1.xpos + title1.width, 0.6955, 0.7005), new=TRUE) + mtext(paste0(orden[2],": ", psl.name, " Anomaly"), font=2, cex=4) + par(fig=c(title1.xpos, title1.xpos + title1.width, 0.4605, 0.4655), new=TRUE) + mtext(paste0(orden[3],": ", psl.name, " Anomaly"), font=2, cex=4) + par(fig=c(title1.xpos, title1.xpos + title1.width, 0.2255, 0.2305), new=TRUE) + mtext(paste0(orden[4],": ", psl.name, " Anomaly"), font=2, cex=4) + + # Impact maps: + if(impact.data == TRUE ){ + impact.xpos <- 0.47 + impact.width <- 0.26 + par(fig=c(impact.xpos, impact.xpos + impact.width, 0.745, 0.925), new=TRUE) + PlotEquiMap2(rescale(imp1[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=t(sig1[,EU] < 0.05), cex.lab=1.5, continents.col="black") + par(fig=c(impact.xpos, impact.xpos + impact.width, 0.51, 0.69), new=TRUE) + PlotEquiMap2(rescale(imp2[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=t(sig2[,EU] < 0.05), cex.lab=1.5, continents.col="black") + par(fig=c(impact.xpos, impact.xpos + impact.width, 0.275, 0.455), new=TRUE) + PlotEquiMap2(rescale(imp3[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=t(sig3[,EU] < 0.05), cex.lab=1.5, continents.col="black") + par(fig=c(impact.xpos, impact.xpos + impact.width, 0.04, 0.22), new=TRUE) + PlotEquiMap2(rescale(imp4[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=t(sig4[,EU] < 0.05), cex.lab=1.5, continents.col="black") + + + # Title impact maps: + title2.xpos <- 0.42 + title2.width <- 0.35 + par(fig=c(title2.xpos, title2.xpos + title2.width, 0.928, 0.933), new=TRUE) + mtext(paste0(orden[1], ": Impact on ", var.name.full[var.num]), font=2, cex=4) + par(fig=c(title2.xpos, title2.xpos + title2.width, 0.693, 0.698), new=TRUE) + mtext(paste0(orden[2], ": Impact on ", var.name.full[var.num]), font=2, cex=4) + par(fig=c(title2.xpos, title2.xpos + title2.width, 0.458, 0.463), new=TRUE) + mtext(paste0(orden[3], ": Impact on ", var.name.full[var.num]), font=2, cex=4) + par(fig=c(title2.xpos, title2.xpos + title2.width, 0.223, 0.227), new=TRUE) + mtext(paste0(orden[4], ": Impact on ", var.name.full[var.num]), font=2, cex=4) + + # Legend: + legend2.xpos <- 0.48 + legend2.width <- 0.25 + legend2.cex <- 1.8 + + my.subset2 <- match(values.to.plot2, my.brks.var) + par(fig=c(legend2.xpos, legend2.xpos + legend2.width, 0.719 + 0.001, 0.744), new=TRUE) + ColorBar3(my.brks.var, cols=my.cols.var, vert=FALSE, cex=legend2.cex, subset=my.subset2) + mtext(side=4,paste0(" ",var.unit[var.num]), cex=legend2.cex) + par(fig=c(legend2.xpos, legend2.xpos + legend2.width, 0.485, 0.508), new=TRUE) + ColorBar3(my.brks.var, cols=my.cols.var, vert=FALSE, cex=legend2.cex, subset=my.subset2) + mtext(side=4,paste0(" ",var.unit[var.num]), cex=legend2.cex) + par(fig=c(legend2.xpos, legend2.xpos + legend2.width, 0.250, 0.273), new=TRUE) + ColorBar3(my.brks.var, cols=my.cols.var, vert=FALSE, cex=legend2.cex, subset=my.subset2) + mtext(side=4,paste0(" ",var.unit[var.num]), cex=legend2.cex) + par(fig=c(legend2.xpos, legend2.xpos + legend2.width, 0.015, 0.038), new=TRUE) + ColorBar3(my.brks.var, cols=my.cols.var, vert=FALSE, cex=legend2.cex, subset=my.subset2) + mtext(side=4,paste0(" ",var.unit[var.num]), cex=legend2.cex) + + } + + # Frequency plots: + barplot.xpos <- 0.75 + barplot.width <- 0.25 + + par(fig=c(barplot.xpos, barplot.xpos + barplot.width, 0.745, 0.915), new=TRUE) + if(fields.name == rean.name) barplot.freq(100*fre1.NA, year.start, year.end, cex.y=2, cex.x=2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0)) + if(fields.name == forecast.name) barplot.freq.sim2(100*fre1.NA, 100*freMax1.NA, 100*freMin1.NA, 100*freObs1, year.start, year.end, cex.y=2, cex.x=2, freq.min=-2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0), col.line="gray20", cex.r=3, cex.mean=2, cex.obs=2) + + par(fig=c(barplot.xpos, barplot.xpos + barplot.width,0.510,0.680), new=TRUE) + if(fields.name == rean.name) barplot.freq(100*fre2.NA, year.start, year.end, cex.y=2, cex.x=2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0)) + if(fields.name == forecast.name) barplot.freq.sim2(100*fre2.NA, 100*freMax2.NA, 100*freMin2.NA, 100*freObs2, year.start, year.end, cex.y=2, cex.x=2, freq.min=-2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0), col.line="gray20", cex.r=3, cex.mean=2, cex.obs=2) + + par(fig=c(barplot.xpos, barplot.xpos + barplot.width,0.275,0.445), new=TRUE) + if(fields.name == rean.name) barplot.freq(100*fre3.NA, year.start, year.end, cex.y=2, cex.x=2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0)) + if(fields.name == forecast.name) barplot.freq.sim2(100*fre3.NA, 100*freMax3.NA, 100*freMin3.NA, 100*freObs3, year.start, year.end, cex.y=2, cex.x=2, freq.min=-2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0), col.line="gray20", cex.r=3, cex.mean=2, cex.obs=2) + + par(fig=c(barplot.xpos, barplot.xpos + barplot.width,0.040,0.210), new=TRUE) + if(fields.name == rean.name) barplot.freq(100*fre4.NA, year.start, year.end, cex.y=2, cex.x=2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0)) + if(fields.name == forecast.name) barplot.freq.sim2(100*fre4.NA, 100*freMax4.NA, 100*freMin4.NA, 100*freObs4, year.start, year.end, cex.y=2, cex.x=2, freq.min=-2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0), col.line="gray20", cex.r=3, cex.mean=2, cex.obs=2) + + # Title Frequency maps: + title3.xpos <- 0.75 + title3.width <-0.25 + par(fig=c(title3.xpos, title3.xpos + title3.width, 0.928, 0.933), new=TRUE) + mtext(paste0(orden[1], " Frequency"), font=2, cex=4) # paste0 doesn't work inside an expression! + par(fig=c(title3.xpos, title3.xpos + title3.width, 0.693, 0.698), new=TRUE) + mtext(paste0(orden[2], " Frequency"), font=2, cex=4) + par(fig=c(title3.xpos, title3.xpos + title3.width, 0.458, 0.463), new=TRUE) + mtext(paste0(orden[3], " Frequency"), font=2, cex=4) + par(fig=c(title3.xpos, title3.xpos + title3.width, 0.223, 0.228), new=TRUE) + mtext(paste0(orden[4], " Frequency"), font=2, cex=4) + + # % on Frequency plots: + symbol.xpos <- 0.735 + symbol.width <- 0.01 + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.92, 0.93), new=TRUE) + mtext("%", cex=3.3) + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.59, 0.70), new=TRUE) + mtext("%", cex=3.3) + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.45, 0.46), new=TRUE) + mtext("%", cex=3.3) + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.22, 0.23), new=TRUE) + mtext("%", cex=3.3) + + # mean frequency on Frequency plots: + if(mean.freq == TRUE){ + symbol.xpos <- 0.9 + symbol.width <- 0.1 + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.908, 0.918), new=TRUE) + mtext(bquote(bar(nu) == .(paste0(round(mean(100*fre1.NA),1),"%"))),cex=4.5) + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.673, 0.683), new=TRUE) + mtext(bquote(bar(nu) == .(paste0(round(mean(100*fre2.NA),1),"%"))),cex=4.5) + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.44, 0.45), new=TRUE) + mtext(bquote(bar(nu) == .(paste0(round(mean(100*fre3.NA),1),"%"))),cex=4.5) + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.203, 0.213), new=TRUE) + mtext(bquote(bar(nu) == .(paste0(round(mean(100*fre4.NA),1),"%"))),cex=4.5) + } + + # add corr between obs.time series and ensemble mean time series on Frequency plots: + if(fields.name == forecast.name){ + symbol.xpos <- 0.76 + symbol.width <- 0.1 + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.908, 0.918), new=TRUE) + sp.cor1 <- round(cor(fre1.NA,freObs1, use="complete.obs"),2) + mtext(paste0("r= ",sp.cor1), cex=4.5) + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.673, 0.683), new=TRUE) + sp.cor2 <- round(cor(fre2.NA,freObs2, use="complete.obs"),2) + mtext(paste0("r= ",sp.cor2), cex=4.5) + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.44, 0.45), new=TRUE) + sp.cor3 <- round(cor(fre3.NA,freObs3, use="complete.obs"),2) + mtext(paste0("r= ",sp.cor3), cex=4.5) + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.203, 0.213), new=TRUE) + sp.cor4 <- round(cor(fre4.NA,freObs4, use="complete.obs"),2) + mtext(paste0("r= ",sp.cor4), cex=4.5) + } + + if(!as.pdf) dev.off() # for saving 4 png + + } # close if on: composition == 'simple' + + + if(composition == "edpr"){ + # adjust color legends to include triangles to the extremities increasing by two the number of intervals: + my.cols <- colorRampPalette(rev(brewer.pal(11,"RdBu")))(length(my.brks)+1) # blue--white--red colors + my.cols.var <- colorRampPalette(rev(brewer.pal(11,"RdBu")))(length(my.brks.var)+1) # blue--white--red colors + + fileoutput <- paste0(rean.dir,"/",fields.name,"_",my.period[p],"_",var.name[var.num],"_edpr.png") + + png(filename=fileoutput,width=3000,height=3700) + + plot.new() + + y1 <- 0.10 + y3 <- 0.315 + y5 <- 0.53 + y7 <- 0.745 + y.width <- 0.18 + + y2 <- y1 + y.width; y4 <- y3 + y.width; y6 <- y5 + y.width; y8 <- y7 + y.width + yt1 <- y2+0.003; yt3 <- y4+0.003; yt5 <- y6+0.003; yt7 <- y8+0.003 + yt2 <- yt1 + 0.004; yt4 <- yt3 + 0.005; yt6 <- yt5 + 0.005; yt8 <- yt7 + 0.005 + + ## Centroid maps: + map.xpos <- 0.27 + map.width <- 0.46 + par(fig=c(map.xpos + 0.003, map.xpos + map.width - 0.003, y7, y8), new=TRUE) + PlotEquiMap2(rescale(map1,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols[2:(length(my.cols)-1)], sizetit=1.2, contours=t(map1), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, xlabel.dist=1.5, contours.lty="F1FF1F", continents.col="black") + par(fig=c(map.xpos, map.xpos + map.width, y5, y6), new=TRUE) + PlotEquiMap2(rescale(map2,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols[2:(length(my.cols)-1)], sizetit=1.2, contours=t(map2), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F", continents.col="black") + par(fig=c(map.xpos, map.xpos + map.width, y3, y4), new=TRUE) + PlotEquiMap2(rescale(map3,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols[2:(length(my.cols)-1)], sizetit=1.2, contours=t(map3), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F", continents.col="black") + par(fig=c(map.xpos, map.xpos + map.width, y1, y2), new=TRUE) + PlotEquiMap2(rescale(map4,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols[2:(length(my.cols)-1)], sizetit=1.2, contours=t(map4), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F", continents.col="black") + + # Legend centroid maps: + legend1.xpos <- 0.30 + legend1.width <- 0.40 + legend1.cex <- 2 + my.subset <- match(values.to.plot, my.brks) + #par(fig=c(legend1.xpos, legend1.xpos + legend1.width, 0.719, 0.744), new=TRUE) # syntax fig=c(xmin, xmax, ymin, ymax) + #ColorBar3(my.brks, cols=my.cols[2:(length(my.cols)-1)], vert=FALSE, cex=legend1.cex, subset=my.subset) + #if(psl=="g500") {mtext(side=4," m", cex=2.5)} else {mtext(side=4," hPa", cex=legend1.cex)} + #par(fig=c(legend1.xpos, legend1.xpos + legend1.width, 0.485, 0.508), new=TRUE) + #ColorBar3(my.brks, cols=my.cols[2:(length(my.cols)-1)], vert=FALSE, cex=legend1.cex, subset=my.subset) + #if(psl=="g500") {mtext(side=4," m", cex=2.5)} else {mtext(side=4," hPa", cex=legend1.cex)} + #par(fig=c(legend1.xpos, legend1.xpos + legend1.width, 0.250, 0.273), new=TRUE) + #ColorBar3(my.brks, cols=my.cols[2:(length(my.cols)-1)], vert=FALSE, cex=legend1.cex, subset=my.subset) + #if(psl=="g500") {mtext(side=4," m", cex=2.5)} else {mtext(side=4," hPa", cex=legend1.cex)} + par(fig=c(legend1.xpos, legend1.xpos + legend1.width, 0.055, 0.085), new=TRUE) + ColorBar3(my.brks, cols=my.cols[2:(length(my.cols)-1)], vert=FALSE, cex=legend1.cex, subset=my.subset) + if(psl=="g500") {mtext(side=4," m", cex=2.5)} else {mtext(side=4," hPa", cex=legend1.cex)} + + # Title Centroid Maps: + title1.xpos <- 0.3 + title1.width <- 0.44 + par(fig=c(title1.xpos, title1.xpos + title1.width, yt7+0.0025, yt7+0.0075), new=TRUE) + mtext(paste0(regime.title[1]," ", psl.name, " anomaly"), font=2, cex=4) + par(fig=c(title1.xpos, title1.xpos + title1.width, yt5+0.0025, yt5+0.0075), new=TRUE) + mtext(paste0(regime.title[2]," ", psl.name, " anomaly"), font=2, cex=4) + par(fig=c(title1.xpos, title1.xpos + title1.width, yt3+0.0025, yt3+0.0075), new=TRUE) + mtext(paste0(regime.title[3]," ", psl.name, " anomaly"), font=2, cex=4) + par(fig=c(title1.xpos, title1.xpos + title1.width, yt1+0.0025, yt1+0.0075), new=TRUE) + mtext(paste0(regime.title[4]," ", psl.name, " anomaly"), font=2, cex=4) + + # Impact maps: + if(impact.data == TRUE ){ + impact.xpos <- 0 + impact.width <- 0.26 + par(fig=c(impact.xpos, impact.xpos + impact.width, y7, y8), new=TRUE) + PlotEquiMap2(rescale(imp1[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var[2:(length(my.cols.var)-1)], intxlon=10, intylat=10, drawleg=F, dots=t(sig1[,EU] < 0.05), cex.lab=1.5, continents.col="black") + par(fig=c(impact.xpos, impact.xpos + impact.width, y5, y6), new=TRUE) + PlotEquiMap2(rescale(imp2[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var[2:(length(my.cols.var)-1)], intxlon=10, intylat=10, drawleg=F, dots=t(sig2[,EU] < 0.05), cex.lab=1.5, continents.col="black") + par(fig=c(impact.xpos, impact.xpos + impact.width, y3, y4), new=TRUE) + PlotEquiMap2(rescale(imp3[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var[2:(length(my.cols.var)-1)], intxlon=10, intylat=10, drawleg=F, dots=t(sig3[,EU] < 0.05), cex.lab=1.5, continents.col="black") + par(fig=c(impact.xpos, impact.xpos + impact.width, y1, y2), new=TRUE) + PlotEquiMap2(rescale(imp4[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var[2:(length(my.cols.var)-1)], intxlon=10, intylat=10, drawleg=F, dots=t(sig4[,EU] < 0.05), cex.lab=1.5, continents.col="black") + + + # Title impact maps: + title2.xpos <- 0 + title2.width <- 0.26 + par(fig=c(title2.xpos, title2.xpos + title2.width, yt7, yt8), new=TRUE) + mtext(paste0(regime.title[1], " impact on ", var.name.full[var.num]), font=2, cex=4) + par(fig=c(title2.xpos, title2.xpos + title2.width, yt5, yt6), new=TRUE) + mtext(paste0(regime.title[2], " impact on ", var.name.full[var.num]), font=2, cex=4) + par(fig=c(title2.xpos, title2.xpos + title2.width, yt3, yt4), new=TRUE) + mtext(paste0(regime.title[3], " impact on ", var.name.full[var.num]), font=2, cex=4) + par(fig=c(title2.xpos, title2.xpos + title2.width, yt1, yt2), new=TRUE) + mtext(paste0(regime.title[4], " impact on ", var.name.full[var.num]), font=2, cex=4) + + # Legend: + legend2.xpos <- 0.01 + legend2.width <- 0.25 + legend2.cex <- 1.8 + + my.subset2 <- match(values.to.plot2, my.brks.var) + #par(fig=c(legend2.xpos, legend2.xpos + legend2.width, 0.719 + 0.001, 0.744), new=TRUE) + #ColorBar3(my.brks.var, cols=my.cols.var[2:(length(my.cols.var)-1)], vert=FALSE, cex=legend2.cex, subset=my.subset2) + #mtext(side=4,paste0(" ",var.unit[var.num]), cex=legend2.cex) + #par(fig=c(legend2.xpos, legend2.xpos + legend2.width, 0.485, 0.508), new=TRUE) + #ColorBar3(my.brks.var, cols=my.cols.var[2:(length(my.cols.var)-1)], vert=FALSE, cex=legend2.cex, subset=my.subset2) + #mtext(side=4,paste0(" ",var.unit[var.num]), cex=legend2.cex) + #par(fig=c(legend2.xpos, legend2.xpos + legend2.width, 0.250, 0.273), new=TRUE) + #ColorBar3(my.brks.var, cols=my.cols.var[2:(length(my.cols.var)-1)], vert=FALSE, cex=legend2.cex, subset=my.subset2) + #mtext(side=4,paste0(" ",var.unit[var.num]), cex=legend2.cex) + par(fig=c(legend2.xpos, legend2.xpos + legend2.width, 0.055, 0.085), new=TRUE) + ColorBar3(my.brks.var, cols=my.cols.var[2:(length(my.cols.var)-1)], vert=FALSE, cex=legend2.cex, subset=my.subset2) + mtext(side=4,paste0(" ",var.unit[var.num]), cex=legend2.cex) + + } + + # Frequency plots: + barplot.xpos <- 0.75 + barplot.width <- 0.25 + + par(fig=c(barplot.xpos, barplot.xpos + barplot.width, y7, y8-0.01), new=TRUE) + if(fields.name == rean.name) barplot.freq(100*fre1.NA, year.start, year.end, cex.y=2, cex.x=2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0)) + if(fields.name == forecast.name) barplot.freq.sim2(100*fre1.NA, 100*freMax1.NA, 100*freMin1.NA, 100*freObs1, year.start, year.end, cex.y=2, cex.x=2, freq.min=-2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0), col.line="gray20", cex.r=3, cex.mean=2, cex.obs=2) + + par(fig=c(barplot.xpos, barplot.xpos + barplot.width, y5, y6-0.01), new=TRUE) + if(fields.name == rean.name) barplot.freq(100*fre2.NA, year.start, year.end, cex.y=2, cex.x=2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0)) + if(fields.name == forecast.name) barplot.freq.sim2(100*fre2.NA, 100*freMax2.NA, 100*freMin2.NA, 100*freObs2, year.start, year.end, cex.y=2, cex.x=2, freq.min=-2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0), col.line="gray20", cex.r=3, cex.mean=2, cex.obs=2) + + par(fig=c(barplot.xpos, barplot.xpos + barplot.width, y3, y4), new=TRUE) + if(fields.name == rean.name) barplot.freq(100*fre3.NA, year.start, year.end, cex.y=2, cex.x=2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0)) + if(fields.name == forecast.name) barplot.freq.sim2(100*fre3.NA, 100*freMax3.NA, 100*freMin3.NA, 100*freObs3, year.start, year.end, cex.y=2, cex.x=2, freq.min=-2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0), col.line="gray20", cex.r=3, cex.mean=2, cex.obs=2) + + par(fig=c(barplot.xpos, barplot.xpos + barplot.width, y1, y2-0.01), new=TRUE) + if(fields.name == rean.name) barplot.freq(100*fre4.NA, year.start, year.end, cex.y=2, cex.x=2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0)) + if(fields.name == forecast.name) barplot.freq.sim2(100*fre4.NA, 100*freMax4.NA, 100*freMin4.NA, 100*freObs4, year.start, year.end, cex.y=2, cex.x=2, freq.min=-2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0), col.line="gray20", cex.r=3, cex.mean=2, cex.obs=2) + + # Title Frequency maps: + title3.xpos <- 0.75 + title3.width <-0.25 + par(fig=c(title3.xpos, title3.xpos + title3.width, yt7, yt8), new=TRUE) + mtext(paste0(regime.title[1], " Frequency"), font=2, cex=4) # paste0 doesn't work inside an expression! + par(fig=c(title3.xpos, title3.xpos + title3.width, yt5, yt6), new=TRUE) + mtext(paste0(regime.title[2], " Frequency"), font=2, cex=4) + par(fig=c(title3.xpos, title3.xpos + title3.width, yt3, yt4), new=TRUE) + mtext(paste0(regime.title[3], " Frequency"), font=2, cex=4) + par(fig=c(title3.xpos, title3.xpos + title3.width, yt1, yt2), new=TRUE) + mtext(paste0(regime.title[4], " Frequency"), font=2, cex=4) + + ## % on Frequency plots: + symbol.xpos <- 0.735 + symbol.width <- 0.01 + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, y4+0.425, y4+0.425+0.01), new=TRUE) + mtext("%", cex=3.3) + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, y3+0.39, y3+0.39+0.01), new=TRUE) + mtext("%", cex=3.3) + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, y2+0.21, y2+0.21+0.01), new=TRUE) + mtext("%", cex=3.3) + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, y1+0.17, y1+0.17+0.01), new=TRUE) + mtext("%", cex=3.3) + + ## mean frequency on Frequency plots: + if(mean.freq == TRUE){ + symbol.xpos <- 0.83 + symbol.width <- 0.1 + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, y7+0.163, y7+0.163+0.01), new=TRUE) + mtext(bquote(bar(nu) == .(paste0(round(mean(100*fre1.NA),1),"%"))),cex=3.5) + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, y5+0.163, y5+0.163+0.01), new=TRUE) + mtext(bquote(bar(nu) == .(paste0(round(mean(100*fre2.NA),1),"%"))),cex=3.5) + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, y3+0.165, y3+0.165+0.01), new=TRUE) + mtext(bquote(bar(nu) == .(paste0(round(mean(100*fre3.NA),1),"%"))),cex=3.5) + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, y1+0.163, y1+0.163+0.01), new=TRUE) + mtext(bquote(bar(nu) == .(paste0(round(mean(100*fre4.NA),1),"%"))),cex=3.5) + } + + + ## Subtitle frequency maps: + map.title.xpos <- 0.76 + map.title.width <- 0.2 + par(fig=c(map.title.xpos, map.title.xpos + map.title.width, y7-0.011, y7-0.011+0.001), new=TRUE) + mtext("year", cex=3) + par(fig=c(map.title.xpos, map.title.xpos + map.title.width, y5-0.011, y5-0.011+0.001), new=TRUE) + mtext("year", cex=3) + par(fig=c(map.title.xpos, map.title.xpos + map.title.width, y3-0.011, y3-0.011+0.001), new=TRUE) + mtext("year", cex=3) + par(fig=c(map.title.xpos, map.title.xpos + map.title.width, y1-0.010, y1-0.010+0.001), new=TRUE) + mtext("year", cex=3) + + if(!as.pdf) dev.off() # for saving 4 png + + # same image formatted for the catalogue: + fileoutput2 <- paste0(rean.dir,"/",fields.name,"_",my.period[p],"_",var.name[var.num],"_edpr_fig2cat.png") + + system(paste0("~/scripts/fig2catalog.sh -s 0.8 -r 150 -m 150 -t '",rean.name," / 10m wind speed and sea level pressure / Monthly anomalies and frequencies \n",month.name[p]," / ",year.start,"-",year.end,"' -c 'Region: North Atlantic (27°N-81°N, 85.5°W-45°E)\nReference dataset: ",rean.name," reanalysis' ", fileoutput," ", fileoutput2)) + + system(paste0("rm ", fileoutput)) + + } # close if on: composition == 'edpr' + + + # Visualize the composition with the regime anomalies for a selected forecasted month: + if(composition == "psl"){ + # Centroid maps: + n.map <- n.map + 1 # n.maps starts from 0 + map.width <- 0.115 + map.xpos <- 0.06 + map.width * (n.map - 1) + map.ypos <- 0.08 + map.height <- 0.19 + map.ysep <- 0.015 + + par(fig=c(map.xpos, map.xpos + map.width, map.ypos + 3*map.height + 3*map.ysep, map.ypos + 4*map.height + 3*map.ysep), new=TRUE) + PlotEquiMap2(rescale(map1,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map1), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, xlabel.dist=1.5, contours.lty="F1FF1F") + par(fig=c(map.xpos, map.xpos + map.width, map.ypos + 2*map.height + 2*map.ysep, map.ypos + 3*map.height + 2*map.ysep), new=TRUE) + PlotEquiMap2(rescale(map2,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map2), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F") + par(fig=c(map.xpos, map.xpos + map.width, map.ypos + map.height + map.ysep, map.ypos + 2*map.height + map.ysep), new=TRUE) + PlotEquiMap2(rescale(map3,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map3), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F") + par(fig=c(map.xpos, map.xpos + map.width, map.ypos, map.ypos + map.height), new=TRUE) + PlotEquiMap2(rescale(map4,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map4), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F") + + # Sheet subtitles: + par(fig=c(map.xpos, map.xpos + map.width, 0.86, 0.90), new=TRUE) + if(n.map < 8) { + mtext(paste0(my.month.short[p], " --> ", my.month.short[forecast.month]), cex=5.5, font=2) + } else{ + mtext(paste0(rean.name," ", my.month.short[forecast.month]), cex=5.5, font=2) + } + + } # close if on composition == 'psl' + + + if(composition == "psl.rean" || composition == "psl.rean.unordered"){ + ## matrix correlation with DJF regimes: + #cluster1.monthly <- pslwr1mean; cluster2.monthly <- pslwr2mean; cluster3.monthly <- pslwr3mean; cluster4.monthly <- pslwr4mean + #cluster1.name.monthly <- cluster1.name; cluster2.name.monthly <- cluster2.name; cluster3.name.monthly <- cluster3.name; cluster4.name.monthly <- cluster4.name + assign(paste0("clusterMax", which(orden == cluster1.name), ".monthly"), pslwr1mean) + assign(paste0("clusterMax", which(orden == cluster2.name), ".monthly"), pslwr2mean) + assign(paste0("clusterMax", which(orden == cluster3.name), ".monthly"), pslwr3mean) + assign(paste0("clusterMax", which(orden == cluster4.name), ".monthly"), pslwr4mean) + + ## cluster.name.monthly <- c(cluster1.name.monthly, cluster2.name.monthly, cluster3.name.monthly, cluster4.name.monthly) + ## max1 <- which(cluster.name.monthly == orden[1]) # get which is the monthly cluster with the highest explained variance (by default it is associated to NAO+) + ## max2 <- which(cluster.name.monthly == orden[2]) # get the monthly cluster with the second highest variance + ## max3 <- which(cluster.name.monthly == orden[3]) # ... + ## max4 <- which(cluster.name.monthly == orden[4]) # ... + + ## max.seq <- c(max1, max2, max3, max4) + + ## Load DJF data: + rean.dir.DJF <- "/scratch/Earth/ncortesi/RESILIENCE/Regimes/53_JRA55_seasonal_1981-2016_LOESS_filter_lat_corr" + + load(file=paste0(rean.dir.DJF,"/",rean.name,"_",my.period[13],"_","psl",".RData")) # Load mean slp DJF data from the same reanalysis + load(paste0(rean.dir.DJF,"/",rean.name,"_", my.period[13],"_","ClusterNames",".RData")) # load also reanalysis DJF regime names + + if(var.num != var.num.orig) var.num <- var.num.orig # ripristinate original values of this script (necessary after loading regime data) + if(fields.name != fields.name.orig) fields.name <- fields.name.orig # ripristinate original values of this script + if(work.dir != work.dir.orig) work.dir <- work.dir.orig # ripristinate original values of this script + if(p.orig != p) p <- p.orig + + cluster1 <- which(orden == cluster1.name) # clusters for DJF!!! + cluster2 <- which(orden == cluster2.name) + cluster3 <- which(orden == cluster3.name) + cluster4 <- which(orden == cluster4.name) + + assign(paste0("psl.ordered",cluster1), pslwr1mean) # psl for DJF + assign(paste0("psl.ordered",cluster2), pslwr2mean) + assign(paste0("psl.ordered",cluster3), pslwr3mean) + assign(paste0("psl.ordered",cluster4), pslwr4mean) + + cluster.corr <- array(NA, c(4,4), dimnames=list(c("cl1","cl2","cl3","cl4"), orden)) + cluster.corr[1,1] <- cor(as.vector(clusterMax1.monthly), as.vector(psl.ordered1)) + cluster.corr[1,2] <- cor(as.vector(clusterMax1.monthly), as.vector(psl.ordered2)) + cluster.corr[1,3] <- cor(as.vector(clusterMax1.monthly), as.vector(psl.ordered3)) + cluster.corr[1,4] <- cor(as.vector(clusterMax1.monthly), as.vector(psl.ordered4)) + cluster.corr[2,1] <- cor(as.vector(clusterMax2.monthly), as.vector(psl.ordered1)) + cluster.corr[2,2] <- cor(as.vector(clusterMax2.monthly), as.vector(psl.ordered2)) + cluster.corr[2,3] <- cor(as.vector(clusterMax2.monthly), as.vector(psl.ordered3)) + cluster.corr[2,4] <- cor(as.vector(clusterMax2.monthly), as.vector(psl.ordered4)) + cluster.corr[3,1] <- cor(as.vector(clusterMax3.monthly), as.vector(psl.ordered1)) + cluster.corr[3,2] <- cor(as.vector(clusterMax3.monthly), as.vector(psl.ordered2)) + cluster.corr[3,3] <- cor(as.vector(clusterMax3.monthly), as.vector(psl.ordered3)) + cluster.corr[3,4] <- cor(as.vector(clusterMax3.monthly), as.vector(psl.ordered4)) + cluster.corr[4,1] <- cor(as.vector(clusterMax4.monthly), as.vector(psl.ordered1)) + cluster.corr[4,2] <- cor(as.vector(clusterMax4.monthly), as.vector(psl.ordered2)) + cluster.corr[4,3] <- cor(as.vector(clusterMax4.monthly), as.vector(psl.ordered3)) + cluster.corr[4,4] <- cor(as.vector(clusterMax4.monthly), as.vector(psl.ordered4)) + + # Centroid maps: + n.map <- n.map + 1 # n.maps starts from 0 + map.width <- 0.08 + map.xpos <- map.width * (n.map - 1) + map.ypos <- 0.08 + map.height <- 0.19 + map.ysep <- 0.015 + print(paste0("map.xpos= ", map.xpos)) + + par(fig <- c(0, 1, 0, 1), new=TRUE) + # reset par to its default values, because drawing with PlotEquiMap() alters some par values: + if(n.map == 1) { op <- par(no.readonly = TRUE) } else { par(op) } + + text.cex <- 2 + text.ypos <- 1.03 + text.xmod <- 0.007 * (n.map - 1) + text.xpos <- map.xpos + text.xmod - 0.02 + text.width <- 0.015 + text(x=text.xpos - text.width, y=text.ypos - 0.02, labels="cl1", cex=text.cex) + text(x=text.xpos - text.width, y=text.ypos - 0.04, labels="cl2", cex=text.cex) + text(x=text.xpos - text.width, y=text.ypos - 0.06, labels="cl3", cex=text.cex) + text(x=text.xpos - text.width, y=text.ypos - 0.08, labels="cl4", cex=text.cex) + text(x=text.xpos + 0*text.width, y=text.ypos + 0.00, labels="NAO+", cex=text.cex) + text(x=text.xpos + 1*text.width, y=text.ypos + 0.00, labels="NAO-", cex=text.cex) + text(x=text.xpos + 2*text.width, y=text.ypos + 0.00, labels="BLO", cex=text.cex) + text(x=text.xpos + 3*text.width, y=text.ypos + 0.00, labels="ATL", cex=text.cex) + + text(x=text.xpos + 0*text.width, y=text.ypos - 0.02, labels=round(cluster.corr,2)[1,1], cex=text.cex) + text(x=text.xpos + 0*text.width, y=text.ypos - 0.04, labels=round(cluster.corr,2)[2,1], cex=text.cex) + text(x=text.xpos + 0*text.width, y=text.ypos - 0.06, labels=round(cluster.corr,2)[3,1], cex=text.cex) + text(x=text.xpos + 0*text.width, y=text.ypos - 0.08, labels=round(cluster.corr,2)[4,1], cex=text.cex) + text(x=text.xpos + 1*text.width, y=text.ypos - 0.02, labels=round(cluster.corr,2)[1,2], cex=text.cex) + text(x=text.xpos + 1*text.width, y=text.ypos - 0.04, labels=round(cluster.corr,2)[2,2], cex=text.cex) + text(x=text.xpos + 1*text.width, y=text.ypos - 0.06, labels=round(cluster.corr,2)[3,2], cex=text.cex) + text(x=text.xpos + 1*text.width, y=text.ypos - 0.08, labels=round(cluster.corr,2)[4,2], cex=text.cex) + text(x=text.xpos + 2*text.width, y=text.ypos - 0.02, labels=round(cluster.corr,2)[1,3], cex=text.cex) + text(x=text.xpos + 2*text.width, y=text.ypos - 0.04, labels=round(cluster.corr,2)[2,3], cex=text.cex) + text(x=text.xpos + 2*text.width, y=text.ypos - 0.06, labels=round(cluster.corr,2)[3,3], cex=text.cex) + text(x=text.xpos + 2*text.width, y=text.ypos - 0.08, labels=round(cluster.corr,2)[4,3], cex=text.cex) + text(x=text.xpos + 3*text.width, y=text.ypos - 0.02, labels=round(cluster.corr,2)[1,4], cex=text.cex) + text(x=text.xpos + 3*text.width, y=text.ypos - 0.04, labels=round(cluster.corr,2)[2,4], cex=text.cex) + text(x=text.xpos + 3*text.width, y=text.ypos - 0.06, labels=round(cluster.corr,2)[3,4], cex=text.cex) + text(x=text.xpos + 3*text.width, y=text.ypos - 0.08, labels=round(cluster.corr,2)[4,4], cex=text.cex) + + ## Centroid maps: + ## (note that mapX == clusterMaxX.monthly, X = 1, ..., 4 by default) + + par(fig=c(map.xpos, map.xpos + map.width, map.ypos + 3*map.height + 3*map.ysep, map.ypos + 4*map.height + 3*map.ysep), new=TRUE) + PlotEquiMap2(rescale(map1,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map1), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, xlabel.dist=1.5, contours.lty="F1FF1F") + par(fig=c(map.xpos, map.xpos + map.width, map.ypos + 2*map.height + 2*map.ysep, map.ypos + 3*map.height + 2*map.ysep), new=TRUE) + PlotEquiMap2(rescale(map2,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map2), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F") + par(fig=c(map.xpos, map.xpos + map.width, map.ypos + map.height + map.ysep, map.ypos + 2*map.height + map.ysep), new=TRUE) + PlotEquiMap2(rescale(map3,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map3), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F") + par(fig=c(map.xpos, map.xpos + map.width, map.ypos, map.ypos + map.height), new=TRUE) + PlotEquiMap2(rescale(map4,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map4), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F") + + } # close if on psl.rean or on psl.rean.unordered + + + # Visualize the composition if the impact maps for a selected forecasted month: + if(composition == "impact"){ + # Centroid maps: + n.map <- n.map + 1 # n.maps starts from 0 + map.width <- 0.115 + map.xpos <- 0.06 + map.width * (n.map - 1) + map.ypos <- 0.08 + map.height <- 0.19 + map.ysep <- 0.015 + + par(fig=c(map.xpos, map.xpos + map.width, map.ypos + 3*map.height + 3*map.ysep, map.ypos + 4*map.height + 3*map.ysep), new=TRUE) + #PlotEquiMap2(rescale(imp1[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=t(sig1[,EU] < 0.05), cex.lab=1.5) + PlotEquiMap2(rescale(imp1[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5) + + par(fig=c(map.xpos, map.xpos + map.width, map.ypos + 2*map.height + 2*map.ysep, map.ypos + 3*map.height + 2*map.ysep), new=TRUE) + #PlotEquiMap2(rescale(imp2[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=t(sig2[,EU] < 0.05), cex.lab=1.5) + PlotEquiMap2(rescale(imp2[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5) + + par(fig=c(map.xpos, map.xpos + map.width, map.ypos + map.height + map.ysep, map.ypos + 2*map.height + map.ysep), new=TRUE) + #PlotEquiMap2(rescale(imp3[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=t(sig3[,EU] < 0.05), cex.lab=1.5) + PlotEquiMap2(rescale(imp3[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5) + + par(fig=c(map.xpos, map.xpos + map.width, map.ypos, map.ypos + map.height), new=TRUE) + #PlotEquiMap2(rescale(imp4[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=t(sig4[,EU] < 0.05), cex.lab=1.5) + PlotEquiMap2(rescale(imp4[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5) + + + # Sheet subtitles: + par(fig=c(map.xpos, map.xpos + map.width, 0.86, 0.90), new=TRUE) + if(n.map < 8) { + mtext(paste0(my.month.short[p], " --> ", my.month.short[forecast.month]), cex=5.5, font=2) + } else{ + mtext(paste0(rean.name," ", my.month.short[forecast.month]), cex=5.5, font=2) + } + + } # close if on composition == 'impact' + + # Visualize the 2x2 composition of the four impact maps for a selected reanalysis or forecasted month: + if(composition == "single.impact"){ + + png(filename=paste0(rean.dir,"/",fields.name,"_",my.period[p],"_",var.name[var.num],"_impact_composition.png"),width=2000,height=2000) + + plot.new() + + par(fig=c(0, 0.5, 0.95, 0.988), new=TRUE) + mtext("NAO+",cex=5) + par(fig=c(0, 0.5, 0.52, 0.95), new=TRUE) + PlotEquiMap(rescale(imp1[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=sig1[,EU] < 0.05, dot_size=2, axes_label_scale=2.5) + + par(fig=c(0.5, 1, 0.93, 0.96), new=TRUE) + mtext("NAO-",cex=5) + par(fig=c(0.5, 1, 0.52, 0.95), new=TRUE) + PlotEquiMap(rescale(imp2[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=sig2[,EU] < 0.05, dot_size=2, axes_label_scale=2.5) + + par(fig=c(0, 0.5, 0.44, 0.47), new=TRUE) + mtext("Blocking",cex=5) + par(fig=c(0, 0.5, 0.08, 0.46), new=TRUE) + PlotEquiMap(rescale(imp3[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=sig3[,EU] < 0.05, dot_size=2, axes_label_scale=2.5) + + par(fig=c(0.5, 1, 0.44, 0.47), new=TRUE) + mtext("Atlantic Ridge",cex=5) + par(fig=c(0.5, 1, 0.08, 0.46), new=TRUE) + PlotEquiMap(rescale(imp4[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=sig4[,EU] < 0.05, dot_size=2, axes_label_scale=2.5) + + par(fig=c(0.03, 0.96, 0.02, 0.08), new=TRUE) + ColorBar2(my.brks.var, cols=my.cols.var, vert=FALSE, cex=3, my.ticks=-0.5 + 1:length(my.brks.var), my.labels=my.brks.var, label.dist=3) + par(fig=c(0.965, 0.99, 0, 0.026), new=TRUE) + mtext("m/s",cex=3) + + dev.off() + + # format it manually from the bash shell (you must run it from your workstation until the identity command of ImageMagick will be loaded un the cluster): + #sh ~/scripts/fig2catalog.sh -x 20 -t 'NCEP / 10-m wind speed / Regime impact \nOctober / 1981-2016' -c 'Region: North Atlantic (27°N-81°N, 85.5°W-45°E)\nReference dataset: NCEP/NCAR reanalysis' NCEP_October_sfcWind_impact_composition.png NCEP_October_sfcWind_impact_composition_catalogue.png + + + ### to plot the impact map of all regimes on a particular month: + #imp.oct <- (imp1*3.2 + imp2*38.7 + imp3*51.6 + imp4*6.4)/100 + year.test <- 2016 + pos.year.test <- year.test - year.start +1 + imp.test <- imp1*fre1.NA[pos.year.test] + imp2*fre2.NA[pos.year.test] + imp3*fre3.NA[pos.year.test] + imp4*fre4.NA[pos.year.test] + #imp.test <- imp1*fre1.NA[pos.year.test] + imp3*(fre3.NA[pos.year.test]+0.032) + imp4*(fre4.NA[pos.year.test]+0.032) + par(fig=c(0, 1, 0.05, 1), new=TRUE) + PlotEquiMap2(rescale(imp.test[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5) + + # vector with the frequency of the WRs in the chosen month and year: + wt.test.freq <- c(fre1.NA[pos.year.test],fre2.NA[pos.year.test],fre3.NA[pos.year.test],fre4.NA[pos.year.test]) + + ## # or save them as individual maps: + ## png(filename=paste0(rean.dir,"/",fields.name,"_",my.period[p],"_",var.name[var.num],"_impact_NAO+.png"),width=3000,height=3700) + ## PlotEquiMap2(rescale(imp1[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=t(sig1[,EU] < 0.05), cex.lab=1.5) + ## dev.off() + + ## png(filename=paste0(rean.dir,"/",fields.name,"_",my.period[p],"_",var.name[var.num],"_impact_NAO-.png"),width=3000,height=3700) + ## PlotEquiMap2(rescale(imp2[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=t(sig2[,EU] < 0.05), cex.lab=1.5) + ## dev.off() + + ## png(filename=paste0(rean.dir,"/",fields.name,"_",my.period[p],"_",var.name[var.num],"_impact_Blocking.png"),width=3000,height=3700) + ## PlotEquiMap2(rescale(imp3[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=t(sig3[,EU] < 0.05), cex.lab=1.5) + ## dev.off() + + ## png(filename=paste0(rean.dir,"/",fields.name,"_",my.period[p],"_",var.name[var.num],"_impact_Atl_Ridge.png"),width=3000,height=3700) + ## PlotEquiMap2(rescale(imp4[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=t(sig4[,EU] < 0.05), cex.lab=1.5) + ## dev.off() + } + + # Visualize the composition if the impact maps for a selected forecasted month: + if(composition == "single.psl"){ + png(filename=paste0(rean.dir,"/",fields.name,"_",my.period[p],"_",var.name[var.num],"_pattern_cluster1.png"),width=2000,height=1400, res=300) + PlotEquiMap2(rescale(map1,my.brks[1],tail(my.brks,1)), lon, lat,filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1, contours=t(map1), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=0.5, xlabel.dist=0, contours.lty="F1FF1F", toptitle=paste0(my.period[p]," WithinSS=", round(withinss1/1000000,2))) + dev.off() + + png(filename=paste0(rean.dir,"/",fields.name,"_",my.period[p],"_",var.name[var.num],"_pattern_cluster2.png"),width=2000,height=1400, res=300) + PlotEquiMap2(rescale(map2,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1, contours=t(map2), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=0.5, xlabel.dist=0, contours.lty="F1FF1F", toptitle=paste0(my.period[p]," WithinSS=", round(withinss2/1000000,2))) + dev.off() + + png(filename=paste0(rean.dir,"/",fields.name,"_",my.period[p],"_",var.name[var.num],"_pattern_cluster3.png"),width=2000,height=1400, res=300) + PlotEquiMap2(rescale(map3,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1, contours=t(map3), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=0.5, xlabel.dist=0, contours.lty="F1FF1F", toptitle=paste0(my.period[p]," WithinSS=", round(withinss3/1000000,2))) + dev.off() + + png(filename=paste0(rean.dir,"/",fields.name,"_",my.period[p],"_",var.name[var.num],"_pattern_cluster4.png"),width=2000,height=1400, res=300) + PlotEquiMap2(rescale(map4,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1, contours=t(map4), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=0.5, xlabel.dist=0, contours.lty="F1FF1F", toptitle=paste0(my.period[p]," WithinSS=", round(withinss4/1000000,2))) + dev.off() + + # Legend centroid maps: + #my.subset <- match(values.to.plot, my.brks) + #ColorBar3(my.brks, cols=my.cols, vert=FALSE, cex=legend1.cex, subset=my.subset) + #mtext(side=4," hPa", cex=legend1.cex) + + } + + # Visualize the composition with the interannual frequencies for a selected forecasted month: + if(composition == "fre"){ + # Centroid maps: + n.map <- n.map + 1 # n.maps starts from 0 + map.width <- 0.115 + map.xpos <- 0.06 + map.width * (n.map - 1) + map.ypos <- 0.08 + map.height <- 0.19 + map.ysep <- 0.015 + + par(fig=c(map.xpos, map.xpos + map.width, map.ypos + 3*map.height + 3*map.ysep, map.ypos + 4*map.height + 3*map.ysep), new=TRUE) + if(n.map < 8) barplot.freq.sim2(100*fre1.NA, 100*freMax1.NA, 100*freMin1.NA, 100*freObs1, year.start, year.end, cex.y=2, cex.x=2, freq.min=-2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0), col.line="gray20", cex.r=3, cex.mean=2, cex.obs=2) + if(n.map == 8) barplot.freq(100*fre1.NA, year.start, year.end, cex.y=2, cex.x=2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0)) + + par(fig=c(map.xpos, map.xpos + map.width, map.ypos + 2*map.height + 2*map.ysep, map.ypos + 3*map.height + 2*map.ysep), new=TRUE) + if(n.map < 8) if(fields.name == forecast.name) barplot.freq.sim2(100*fre2.NA, 100*freMax2.NA, 100*freMin2.NA, 100*freObs2, year.start, year.end, cex.y=2, cex.x=2, freq.min=-2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0), col.line="gray20", cex.r=3, cex.mean=2, cex.obs=2) + if(n.map == 8) barplot.freq(100*fre2.NA, year.start, year.end, cex.y=2, cex.x=2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0)) + + par(fig=c(map.xpos, map.xpos + map.width, map.ypos + map.height + map.ysep, map.ypos + 2*map.height + map.ysep), new=TRUE) + if(n.map < 8) if(fields.name == forecast.name) barplot.freq.sim2(100*fre3.NA, 100*freMax3.NA, 100*freMin3.NA, 100*freObs3, year.start, year.end, cex.y=2, cex.x=2, freq.min=-2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0), col.line="gray20", cex.r=3, cex.mean=2, cex.obs=2) + if(n.map == 8) barplot.freq(100*fre3.NA, year.start, year.end, cex.y=2, cex.x=2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0)) + + par(fig=c(map.xpos, map.xpos + map.width, map.ypos, map.ypos + map.height), new=TRUE) + if(n.map < 8) if(fields.name == forecast.name) barplot.freq.sim2(100*fre4.NA, 100*freMax4.NA, 100*freMin4.NA, 100*freObs4, year.start, year.end, cex.y=2, cex.x=2, freq.min=-2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0), col.line="gray20", cex.r=3, cex.mean=2, cex.obs=2) + if(n.map == 8) barplot.freq(100*fre4.NA, year.start, year.end, cex.y=2, cex.x=2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0)) + + # Sheet subtitles: + par(fig=c(map.xpos, map.xpos + map.width, 0.86, 0.90), new=TRUE) + if(n.map < 8) { + mtext(paste0(my.month.short[p], " --> ", my.month.short[forecast.month]), cex=5.5, font=2) + } else{ + mtext(paste0(rean.name," ", my.month.short[forecast.month]), cex=5.5, font=2) + } + + # % on Frequency plots: + par(fig=c(map.xpos - 0.006, map.xpos, map.ypos + 3.9*map.height + 3*map.ysep, map.ypos + 4*map.height + 3*map.ysep), new=TRUE) + mtext("%", cex=3.3) + par(fig=c(map.xpos - 0.006, map.xpos, map.ypos + 2.9*map.height + 2*map.ysep, map.ypos + 3*map.height + 2*map.ysep), new=TRUE) + mtext("%", cex=3.3) + par(fig=c(map.xpos - 0.006, map.xpos, map.ypos + 1.9*map.height + 1*map.ysep, map.ypos + 2*map.height + 1*map.ysep), new=TRUE) + mtext("%", cex=3.3) + par(fig=c(map.xpos - 0.006, map.xpos, map.ypos + 0.9*map.height + 0*map.ysep, map.ypos + 1*map.height + 0*map.ysep), new=TRUE) + mtext("%", cex=3.3) + + # mean frequency on Frequency plots: + par(fig=c(map.xpos + 0.02, map.xpos + 0.04, map.ypos + 3*map.height + 3*map.ysep, map.ypos + 3.1*map.height + 3*map.ysep), new=TRUE) + mtext(bquote(bar(nu) == .(paste0(round(mean(100*fre1.NA),1),"%"))),cex=3.5) + par(fig=c(map.xpos + 0.02, map.xpos + 0.04, map.ypos + 2*map.height + 2*map.ysep, map.ypos + 2.1*map.height + 2*map.ysep), new=TRUE) + mtext(bquote(bar(nu) == .(paste0(round(mean(100*fre2.NA),1),"%"))),cex=3.5) + par(fig=c(map.xpos + 0.02, map.xpos + 0.04, map.ypos + 1*map.height + 1*map.ysep, map.ypos + 1.1*map.height + 1*map.ysep), new=TRUE) + mtext(bquote(bar(nu) == .(paste0(round(mean(100*fre3.NA),1),"%"))),cex=3.5) + par(fig=c(map.xpos + 0.02, map.xpos + 0.04, map.ypos + 0*map.height + 0*map.ysep, map.ypos + 0.1*map.height + 0*map.ysep), new=TRUE) + mtext(bquote(bar(nu) == .(paste0(round(mean(100*fre4.NA),1),"%"))),cex=3.5) + + # add corr between obs.time series and ensemble mean time series on Frequency plots: + if(n.map < 8){ + par(fig=c(map.xpos + map.width - 0.04, map.xpos + map.width - 0.02, map.ypos + 3*map.height + 3*map.ysep, map.ypos + 3.1*map.height + 3*map.ysep), new=TRUE) + mtext(paste0("r= ",sp.cor1), cex=3.5) + par(fig=c(map.xpos + map.width - 0.04, map.xpos + map.width - 0.02, map.ypos + 2*map.height + 2*map.ysep, map.ypos + 2.1*map.height + 2*map.ysep), new=TRUE) + mtext(paste0("r= ",sp.cor2), cex=3.5) + par(fig=c(map.xpos + map.width - 0.04, map.xpos + map.width - 0.02, map.ypos + 1*map.height + 1*map.ysep, map.ypos + 1.1*map.height + 1*map.ysep), new=TRUE) + mtext(paste0("r= ",sp.cor3), cex=3.5) + par(fig=c(map.xpos + map.width - 0.04, map.xpos + map.width - 0.02, map.ypos + 0*map.height + 0*map.ysep, map.ypos + 0.1*map.height + 0*map.ysep), new=TRUE) + mtext(paste0("r= ",sp.cor4), cex=3.5) + } + } # close if on composition == 'fre' + + # save indicators for each startdate and forecast time: + # (map1, map2, map3, map4, fre1, fre2, etc. already refer to the regimes in the same order as listed in the 'orden' vector) + if(fields.name == forecast.name) { + if (composition != "impact") { + save(orden, map1, map2, map3, map4, fre1.NA, fre2.NA, fre3.NA, fre4.NA, freObs1, freObs2, freObs3, freObs4, sp.cor1, sp.cor2, sp.cor3, sp.cor4, persist1, persist2, persist3, persist4, persistObs1, persistObs2, persistObs3, persistObs4, spat.cor1, spat.cor2, spat.cor3, spat.cor4, sd.freq.sim1, sd.freq.sim2, sd.freq.sim3, sd.freq.sim4, sd.freq.obs1, sd.freq.obs2, sd.freq.obs3, sd.freq.obs4, transSim1, transSim2, transSim3, transSim4, transObs1, transObs2, transObs3, transObs4, file=paste0(work.dir,"/",fields.name,"_", my.period[p],"_leadmonth_",lead.month,"_","ClusterNames",".RData")) + } else { # also save impX objects + save(orden, map1, map2, map3, map4, fre1.NA, fre2.NA, fre3.NA, fre4.NA, freObs1, freObs2, freObs3, freObs4, sp.cor1, sp.cor2, sp.cor3, sp.cor4, persist1, persist2, persist3, persist4, persistObs1, persistObs2, persistObs3, persistObs4, spat.cor1, spat.cor2, spat.cor3, spat.cor4, sd.freq.sim1, sd.freq.sim2, sd.freq.sim3, sd.freq.sim4, sd.freq.obs1, sd.freq.obs2, sd.freq.obs3, sd.freq.obs4, transSim1, transSim2, transSim3, transSim4, transObs1, transObs2, transObs3, transObs4, imp1, imp2, imp3, imp4,cor.imp1,cor.imp2,cor.imp3,cor.imp4, file=paste0(work.dir,"/",fields.name,"_", my.period[p],"_leadmonth_",lead.month,"_","ClusterNames",".RData")) + } + } + + } # close 'p' on 'period' + + if((composition == 'simple') && as.pdf) dev.off() # for saving a single pdf with all months/seasons + + } # close 'lead.month' on 'lead.months' + + if(composition == "psl" || composition == "fre" || composition == "impact"){ + # Regime's names: + title1.xpos <- 0.01 + title1.width <- 0.04 + par(fig=c(title1.xpos, title1.xpos + title1.width, 0.7905, 0.7955), new=TRUE) + mtext(paste0(orden[1]), font=2, cex=5) + par(fig=c(title1.xpos, title1.xpos + title1.width, 0.5855, 0.5905), new=TRUE) + mtext(paste0(orden[2]), font=2, cex=5) + par(fig=c(title1.xpos, title1.xpos + title1.width, 0.3805, 0.3955), new=TRUE) + mtext(paste0(orden[3]), font=2, cex=5) + par(fig=c(title1.xpos, title1.xpos + title1.width, 0.1755, 0.1805), new=TRUE) + mtext(paste0(orden[4]), font=2, cex=5) + + if(composition == "psl") { + # Color legend: + legend1.xpos <- 0.10 + legend1.width <- 0.80 + legend1.cex <- 4 + + par(fig=c(legend1.xpos, legend1.xpos + legend1.width, 0, 0.065), new=TRUE) # syntax fig=c(xmin, xmax, ymin, ymax) + ColorBar2(brks=my.brks, cols=my.cols, vert=FALSE, cex=legend1.cex, label.dist=2, my.ticks=-0.5 + 1:length(my.brks), my.labels=my.brks) + par(fig=c(legend1.xpos + legend1.width - 0.02, legend1.xpos + legend1.width + 0.05, 0, 0.02), new=TRUE) # syntax fig=c(xmin, xmax, ymin, ymax) + mtext("hPa", cex=legend1.cex*1.3) + } + + if(composition == "impact") { + # Color legend: + legend1.xpos <- 0.10 + legend1.width <- 0.80 + legend1.cex <- 4 + + #my.subset2 <- match(values.to.plot2, my.brks.var) + par(fig=c(legend1.xpos, legend1.xpos + legend1.width, 0, 0.065), new=TRUE) + ColorBar2(brks=my.brks.var, cols=my.cols.var, vert=FALSE, cex=legend1.cex, label.dist=2, my.ticks=-0.5 + 1:length(my.brks.var), my.labels=my.brks.var) + par(fig=c(legend1.xpos + legend1.width - 0.02, legend1.xpos + legend1.width + 0.05, 0, 0.02), new=TRUE) # syntax fig=c(xmin, xmax, ymin, ymax) + #ColorBar2(brks=my.brks.var, cols=my.cols.var, vert=FALSE, cex=legend2.cex, subset=my.subset2) + mtext(var.unit[var.num], cex=legend1.cex*1.3) + + } + + dev.off() + + } # close if on composition + + + if(composition == "psl.rean" || composition == "psl.rean.unordered") dev.off() + + print("Finished!") +} # close if on composition != "summary" + + + +#} # close for on forecasts.month + + + +if(composition == "taylor"){ + library("plotrix") + + fields.name="ERA-Interim" + + # choose a reference season from 13 (winter) to 16 (Autumn): + season.ref <- 13 + + # set the first WR classification: + rean.dir <- "/scratch/Earth/ncortesi/RESILIENCE/Regimes/41_ERA-Interim_monthly_1981-2015_LOESS_filter" + + # load data of the reference season of the first classification (this line should be modified to load the chosen season): + #load("/scratch/Earth/ncortesi/RESILIENCE/Regimes/18) as 12) but with no lat correction/ERA-Interim_Winter_psl.RData") # load pslwrXmean data + #load("/scratch/Earth/ncortesi/RESILIENCE/Regimes/18) as 12) but with no lat correction/ERA-Interim_Winter_ClusterNames.RData") # load clusterX.name.period + load("/scratch/Earth/ncortesi/RESILIENCE/Regimes/46_as_18_but_with_no_lat_correction_and_with_LOESS/ERA-Interim_Winter_psl.RData") # load pslwrXmean data + load("/scratch/Earth/ncortesi/RESILIENCE/Regimes/46_as_18_but_with_no_lat_correction_and_with_LOESS/ERA-Interim_Winter_ClusterNames.RData") # load clusterX.name.period + + if(var.num != var.num.orig) var.num <- var.num.orig # ripristinate original values of this script (necessary after loading regime data) + if(fields.name != fields.name.orig) fields.name <- fields.name.orig # ripristinate original values of this script + + cluster1.ref <- which(orden == cluster1.name) + cluster2.ref <- which(orden == cluster2.name) + cluster3.ref <- which(orden == cluster3.name) + cluster4.ref <- which(orden == cluster4.name) + + #cluster1.ref <- cluster1.name.period[13] + #cluster2.ref <- cluster2.name.period[13] + #cluster3.ref <- cluster3.name.period[13] + #cluster4.ref <- cluster4.name.period[13] + + cluster1.ref <- 3 # bypass the previous values because for dataset num.46 the blocking and atlantic regimes were saved swapped! + cluster2.ref <- 2 + cluster3.ref <- 1 + cluster4.ref <- 4 + + assign(paste0("map.ref",cluster1.ref), pslwr1mean) # NAO+ + assign(paste0("map.ref",cluster2.ref), pslwr2mean) # NAO- + assign(paste0("map.ref",cluster3.ref), pslwr3mean) # Blocking + assign(paste0("map.ref",cluster4.ref), pslwr4mean) # Atl.ridge + + # set a second WR classification (i.e: with running cluster) to contrast with the new one: + #rean2.dir <- "/scratch/Earth/ncortesi/RESILIENCE/Regimes/41_ERA-Interim_monthly_1981-2015_LOESS_filter" + rean2.dir <- "/scratch/Earth/ncortesi/RESILIENCE/Regimes/44_as_43_but_with_no_lat_correction" + rean2.name <- "ERA-Interim" + + # load data of the reference season of the second classification (this line should be modified to load the chosen season): + load("/scratch/Earth/ncortesi/RESILIENCE/Regimes/46_as_18_but_with_no_lat_correction_and_with_LOESS/ERA-Interim_Winter_psl.RData") # load pslwrXmean data + load("/scratch/Earth/ncortesi/RESILIENCE/Regimes/46_as_18_but_with_no_lat_correction_and_with_LOESS/ERA-Interim_Winter_ClusterNames.RData") # load clusterX.name.period + + if(var.num != var.num.orig) var.num <- var.num.orig # ripristinate original values of this script (necessary after loading regime data) + if(fields.name != fields.name.orig) fields.name <- fields.name.orig # ripristinate original values of this script + + #cluster1.name.ref <- cluster1.name.period[season.ref] + #cluster2.name.ref <- cluster2.name.period[season.ref] + #cluster3.name.ref <- cluster3.name.period[season.ref] + #cluster4.name.ref <- cluster4.name.period[season.ref] + + cluster1.ref <- which(orden == cluster1.name) + cluster2.ref <- which(orden == cluster2.name) + cluster3.ref <- which(orden == cluster3.name) + cluster4.ref <- which(orden == cluster4.name) + + cluster1.ref <- 3 # bypass the previous values because for dataset num.46 the blocking and atlantic regimes were saved swapped! + cluster2.ref <- 2 + cluster3.ref <- 1 + cluster4.ref <- 4 + + assign(paste0("map.old.ref",cluster1.ref), pslwr1mean) # NAO+ + assign(paste0("map.old.ref",cluster2.ref), pslwr2mean) # NAO- + assign(paste0("map.old.ref",cluster3.ref), pslwr3mean) # Blocking + assign(paste0("map.old.ref",cluster4.ref), pslwr4mean) # Atl.ridge + + + # breaks and colors of the geopotential fields: + if(psl == "g500"){ + my.brks <- c(-300,seq(-200,200,10),300) # % Mean anomaly of a WR + my.brks2 <- c(-300,seq(-200,200,10),300) # % Mean anomaly of a WR for the contour lines + values.to.plot <- c(-200, -100, 0, 100, 200) # values we want to show in the labels + } + + if(psl == "psl"){ + my.brks <- c(seq(-19,-1,2),0,seq(1,19,2)) # % Mean anomaly of a WR + my.brks2 <- my.brks #c(seq(-20,20,2)) # % Mean anomaly of a WR for the contour lines + values.to.plot <- my.brks #c(-17,-15,-13,-11,-9,-7,-5,-3,-1,0,1,3,5,7,9,11,13,15,17) + } + + my.cols <- colorRampPalette(rev(brewer.pal(11,"RdBu")))(length(my.brks)-1) # blue--white--red colors + my.cols[floor(length(my.cols)/2)] <- my.cols[floor(length(my.cols)/2)+1] <- "white" + + # plot the composition with the regime anomalies of each monthly regimes: + png(filename=paste0(rean.dir,"/",rean.name,"_taylor_source",".png"),width=6000,height=2000) + + plot.new() + + # Sheet title: + par(fig=c(0, 1, 0.94, 0.98), new=TRUE) + mtext(paste0(fields.name, " observed monthly regime anomalies"), cex=5.5, font=2) + + n.map <- 0 + for(p in c(9:12,1:8)){ + p.orig <- p + + load(file=paste0(rean.dir,"/",fields.name,"_",my.period[p],"_","psl",".RData")) # load cluster names and summarized data + load(file=paste0(rean.dir,"/",fields.name,"_",my.period[p],"_","ClusterNames",".RData")) # load cluster names and summarized data + + if(var.num != var.num.orig) var.num <- var.num.orig # ripristinate original values of this script (necessary after loading regime data) + if(fields.name != fields.name.orig) fields.name <- fields.name.orig # ripristinate original values of this script + if(p != p.orig) p <- p.orig + + cluster1 <- which(orden == cluster1.name) + cluster2 <- which(orden == cluster2.name) + cluster3 <- which(orden == cluster3.name) + cluster4 <- which(orden == cluster4.name) + + assign(paste0("map",cluster1), pslwr1mean) # NAO+ + assign(paste0("map",cluster2), pslwr2mean) # NAO- + assign(paste0("map",cluster3), pslwr3mean) # Blocking + assign(paste0("map",cluster4), pslwr4mean) # Atl.ridge + + n.map <- n.map + 1 # n.maps starts from 0 + map.width <- 0.07 + map.xpos <- 0.06 + map.width * (n.map - 1) + map.ypos <- 0.08 + map.height <- 0.19 + map.ysep <- 0.015 + + par(fig=c(map.xpos, map.xpos + map.width, map.ypos + 3*map.height + 3*map.ysep, map.ypos + 4*map.height + 3*map.ysep), new=TRUE) + PlotEquiMap2(rescale(map1,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map1), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, xlabel.dist=1.5, contours.lty="F1FF1F") + par(fig=c(map.xpos, map.xpos + map.width, map.ypos + 2*map.height + 2*map.ysep, map.ypos + 3*map.height + 2*map.ysep), new=TRUE) + PlotEquiMap2(rescale(map2,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map2), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F") + par(fig=c(map.xpos, map.xpos + map.width, map.ypos + map.height + map.ysep, map.ypos + 2*map.height + map.ysep), new=TRUE) + PlotEquiMap2(rescale(map3,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map3), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F") + par(fig=c(map.xpos, map.xpos + map.width, map.ypos, map.ypos + map.height), new=TRUE) + PlotEquiMap2(rescale(map4,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map4), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F") + + # Sheet subtitles: + par(fig=c(map.xpos, map.xpos + map.width, 0.86, 0.90), new=TRUE) + mtext(my.month.short[p], cex=5.5, font=2) + + } + + # draw the last map to the right of the composition, that with the seasonal anomalies: + n.map <- n.map + 1 # n.maps starts from 0 + map.width <- 0.07 + map.xpos <- 0.06 + map.width * (n.map - 1) + map.ypos <- 0.08 + map.height <- 0.19 + map.ysep <- 0.015 + + par(fig=c(map.xpos, map.xpos + map.width, map.ypos + 3*map.height + 3*map.ysep, map.ypos + 4*map.height + 3*map.ysep), new=TRUE) + PlotEquiMap2(rescale(map.ref1,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map.ref1), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, xlabel.dist=1.5, contours.lty="F1FF1F") + par(fig=c(map.xpos, map.xpos + map.width, map.ypos + 2*map.height + 2*map.ysep, map.ypos + 3*map.height + 2*map.ysep), new=TRUE) + PlotEquiMap2(rescale(map.ref2,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map.ref2), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F") + par(fig=c(map.xpos, map.xpos + map.width, map.ypos + map.height + map.ysep, map.ypos + 2*map.height + map.ysep), new=TRUE) + PlotEquiMap2(rescale(map.ref3,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map.ref3), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F") + par(fig=c(map.xpos, map.xpos + map.width, map.ypos, map.ypos + map.height), new=TRUE) + PlotEquiMap2(rescale(map.ref4,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map.ref4), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F") + + # subtitle: + par(fig=c(map.xpos, map.xpos + map.width, 0.86, 0.90), new=TRUE) + mtext(my.period[season.ref], cex=5.5, font=2) + + dev.off() + + # Plot the Taylor diagrams: + + corr.first <- corr.second <- rmse.first <- rmse.second <- array(NA,c(4,12)) + + for(regime in 1:4){ + + png(filename=paste0(rean.dir,"/",rean.name,"_taylor_",orden[regime],".png"), width=1200, height=800) + + for(p in 1:12){ + p.orig <- p + + load(file=paste0(rean.dir,"/",rean.name,"_",my.period[p],"_","psl",".RData")) # load cluster names and summarized data + load(file=paste0(rean.dir,"/",rean.name,"_",my.period[p],"_","ClusterNames",".RData")) # load cluster names and summarized data + + if(var.num != var.num.orig) var.num <- var.num.orig # ripristinate original values of this script (necessary after loading regime data) + if(fields.name != fields.name.orig) fields.name <- fields.name.orig # ripristinate original values of this script + if(p != p.orig) p <- p.orig + + cluster1 <- which(orden == cluster1.name) + cluster2 <- which(orden == cluster2.name) + cluster3 <- which(orden == cluster3.name) + cluster4 <- which(orden == cluster4.name) + + assign(paste0("map",cluster1), pslwr1mean) # NAO+ + assign(paste0("map",cluster2), pslwr2mean) # NAO- + assign(paste0("map",cluster3), pslwr3mean) # Blocking + assign(paste0("map",cluster4), pslwr4mean) # Atl.ridge + + # load data from the second classification: + load(file=paste0(rean2.dir,"/",rean2.name,"_",my.period[p],"_","psl",".RData")) # load cluster names and summarized data + load(file=paste0(rean2.dir,"/",rean2.name,"_",my.period[p],"_","ClusterNames",".RData")) # load cluster names and summarized data + + if(var.num != var.num.orig) var.num <- var.num.orig # ripristinate original values of this script (necessary after loading regime data) + if(fields.name != fields.name.orig) fields.name <- fields.name.orig # ripristinate original values of this script + if(p != p.orig) p <- p.orig + + cluster1.old <- which(orden == cluster1.name) + cluster2.old <- which(orden == cluster2.name) + cluster3.old <- which(orden == cluster3.name) + cluster4.old <- which(orden == cluster4.name) + + assign(paste0("map.old",cluster1.old), pslwr1mean) # NAO+ + assign(paste0("map.old",cluster2.old), pslwr2mean) # NAO- + assign(paste0("map.old",cluster3.old), pslwr3mean) # Blocking + assign(paste0("map.old",cluster4.old), pslwr4mean) # Atl.ridge + + add.mod <- ifelse(p == 1, FALSE, TRUE) + main.mod <- ifelse(p == 1, orden[regime],"") + + color.first <- "red" + color.second <- "blue" # "black" + sd.arcs.mod <- c(0,0.1,0.2,0.3,0.4,0.5,0.6,0.7,0.8,0.9,1,1.1,1.2,1.3) #c(0,0.5,1,1.5) # doesn't work! + if(regime == 1) my.taylor(as.vector(map.ref1),as.vector(map1), normalize=TRUE, add=add.mod, pos.cor=FALSE, sd.arcs=sd.arcs.mod, ref.sd=F, cex.axis=1.7, text.cex=1, my.text=my.month.short[p], col = color.first, main=main.mod) + if(regime == 2) my.taylor(as.vector(map.ref2),as.vector(map2), normalize=TRUE, add=add.mod, pos.cor=FALSE, sd.arcs=sd.arcs.mod, ref.sd=F, cex.axis=1.7, text.cex=1, my.text=my.month.short[p], col = color.first, main=main.mod) + if(regime == 3) my.taylor(as.vector(map.ref3),as.vector(map3), normalize=TRUE, add=add.mod, pos.cor=FALSE, sd.arcs=sd.arcs.mod, ref.sd=F, cex.axis=1.7, text.cex=1, my.text=my.month.short[p], col = color.first, main=main.mod) + if(regime == 4) my.taylor(as.vector(map.ref4),as.vector(map4), normalize=TRUE, add=add.mod, pos.cor=FALSE, sd.arcs=sd.arcs.mod, ref.sd=F, cex.axis=1.7, text.cex=1, my.text=my.month.short[p], col = color.first, main=main.mod) + + # add the points from the second classification: + add.mod=TRUE + #add.mod <- ifelse(p == 1, FALSE, TRUE) + + if(regime == 1) my.taylor(as.vector(map.old.ref1),as.vector(map.old1), normalize=TRUE, add=add.mod, pos.cor=FALSE, sd.arcs=sd.arcs.mod, ref.sd=F, cex.axis=1.7, text.cex=1, my.text=my.month.short[p], col = color.second) + if(regime == 2) my.taylor(as.vector(map.old.ref2),as.vector(map.old2), normalize=TRUE, add=add.mod, pos.cor=FALSE, sd.arcs=sd.arcs.mod, ref.sd=F, cex.axis=1.7, text.cex=1, my.text=my.month.short[p], col = color.second) + if(regime == 3) my.taylor(as.vector(map.old.ref3),as.vector(map.old3), normalize=TRUE, add=add.mod, pos.cor=FALSE, sd.arcs=sd.arcs.mod, ref.sd=F, cex.axis=1.7, text.cex=1, my.text=my.month.short[p], col = color.second) + if(regime == 4) my.taylor(as.vector(map.old.ref4),as.vector(map.old4), normalize=TRUE, add=add.mod, pos.cor=FALSE, sd.arcs=sd.arcs.mod, ref.sd=F, cex.axis=1.7, text.cex=1, my.text=my.month.short[p], col = color.second) + + if(regime == 1) { corr.first[1,p] <- cor(as.vector(map.ref1),as.vector(map1)); rmse.first[1,p] <- RMSE(as.vector(map.ref1),as.vector(map1)) } + if(regime == 2) { corr.first[2,p] <- cor(as.vector(map.ref2),as.vector(map2)); rmse.first[2,p] <- RMSE(as.vector(map.ref2),as.vector(map2)) } + if(regime == 3) { corr.first[3,p] <- cor(as.vector(map.ref3),as.vector(map3)); rmse.first[3,p] <- RMSE(as.vector(map.ref3),as.vector(map3)) } + if(regime == 4) { corr.first[4,p] <- cor(as.vector(map.ref4),as.vector(map4)); rmse.first[4,p] <- RMSE(as.vector(map.ref4),as.vector(map4)) } + + if(regime == 1) { corr.second[1,p] <- cor(as.vector(map.old.ref1),as.vector(map.old1)); rmse.second[1,p] <- RMSE(as.vector(map.old.ref1),as.vector(map.old1)) } + if(regime == 2) { corr.second[2,p] <- cor(as.vector(map.old.ref2),as.vector(map.old2)); rmse.second[2,p] <- RMSE(as.vector(map.old.ref2),as.vector(map.old2)) } + if(regime == 3) { corr.second[3,p] <- cor(as.vector(map.old.ref3),as.vector(map.old3)); rmse.second[3,p] <- RMSE(as.vector(map.old.ref3),as.vector(map.old3)) } + if(regime == 4) { corr.second[4,p] <- cor(as.vector(map.old.ref4),as.vector(map.old4)); rmse.second[4,p] <- RMSE(as.vector(map.old.ref4),as.vector(map.old4)) } + + } # close for on 'p' + + dev.off() + + } # close for on 'regime' + + corr.first.mean <- apply(corr.first,1,mean) + corr.second.mean <- apply(corr.second,1,mean) + corr.diff <- corr.second.mean - corr.first.mean + + rmse.first.mean <- apply(rmse.first,1,mean) + rmse.second.mean <- apply(rmse.second,1,mean) + rmse.diff <- rmse.second.mean - rmse.first.mean + +} # close if on composition == "taylor" + + + + + + +# Summary graphs: +if(composition == "summary" && fields.name == forecast.name){ + # array storing correlations in the format: [startdate, leadmonth, regime] + array.diff.sd.freq <- array.sd.freq <- array.cor <- array.sp.cor <- array.freq <- array.diff.freq <- array.rpss <- array.pers <- array.diff.pers <- array(NA,c(12,7,4)) + array.spat.cor <- array.imp <- array.trans.sim1 <- array.trans.sim2 <- array.trans.sim3 <- array.trans.sim4 <- array(NA,c(12,7,4)) + array.trans.diff1 <- array.trans.diff2 <- array.trans.diff3 <- array.trans.diff4 <- array.freq.obs <- array.pers.obs <- array(NA,c(12,7,4)) + array.trans.obs1 <- array.trans.obs2 <- array.trans.obs3 <- array.trans.obs4 <- array(NA,c(12,7,4)) + + for(p in 1:12){ + p.orig <- p + + for(l in 0:6){ + + load(file=paste0(work.dir,"/",fields.name,"_",my.period[p],"_leadmonth_",l,"_","ClusterNames",".RData")) # load cluster names and summarized data + + if(var.num != var.num.orig) var.num <- var.num.orig # ripristinate original values of this script (necessary after loading regime data) + if(fields.name != fields.name.orig) fields.name <- fields.name.orig # ripristinate original values of this script + if(p != p.orig) p <- p.orig + + array.spat.cor[p,1+l, 1] <- spat.cor1 # associates NAO+ to the first triangle (at left) + array.spat.cor[p,1+l, 3] <- spat.cor2 # associates NAO- to the third triangle (at right) + array.spat.cor[p,1+l, 4] <- spat.cor3 # associates Blocking to the fourth triangle (top one) + array.spat.cor[p,1+l, 2] <- spat.cor4 # associates Atl.Ridge to the second triangle (bottom one) + + array.cor[p,1+l, 1] <- sp.cor1 # associates NAO+ to the first triangle (at left) + array.cor[p,1+l, 3] <- sp.cor2 # associates NAO- to the third triangle (at right) + array.cor[p,1+l, 4] <- sp.cor3 # associates Blocking to the fourth triangle (top one) + array.cor[p,1+l, 2] <- sp.cor4 # associates Atl.Ridge to the second triangle (bottom one) + + array.freq[p,1+l, 1] <- 100*(mean(fre1.NA)) # NAO+ + array.freq[p,1+l, 3] <- 100*(mean(fre2.NA)) # NAO- + array.freq[p,1+l, 4] <- 100*(mean(fre3.NA)) # Blocking + array.freq[p,1+l, 2] <- 100*(mean(fre4.NA)) # Atl.Ridge + + array.freq.obs[p,1+l, 1] <- 100*(mean(freObs1)) # NAO+ + array.freq.obs[p,1+l, 3] <- 100*(mean(freObs2)) # NAO- + array.freq.obs[p,1+l, 4] <- 100*(mean(freObs3)) # Blocking + array.freq.obs[p,1+l, 2] <- 100*(mean(freObs4)) # Atl.Ridge + + array.diff.freq[p,1+l, 1] <- 100*(mean(fre1.NA) - mean(freObs1)) # NAO+ + array.diff.freq[p,1+l, 3] <- 100*(mean(fre2.NA) - mean(freObs2)) # NAO- + array.diff.freq[p,1+l, 4] <- 100*(mean(fre3.NA) - mean(freObs3)) # Blocking + array.diff.freq[p,1+l, 2] <- 100*(mean(fre4.NA) - mean(freObs4)) # Atl.Ridge + + array.pers[p,1+l, 1] <- persist1 # NAO+ + array.pers[p,1+l, 3] <- persist2 # NAO- + array.pers[p,1+l, 4] <- persist3 # Blocking + array.pers[p,1+l, 2] <- persist4 # Atl.Ridge + + array.pers.obs[p,1+l, 1] <- persistObs1 # NAO+ + array.pers.obs[p,1+l, 3] <- persistObs2 # NAO- + array.pers.obs[p,1+l, 4] <- persistObs3 # Blocking + array.pers.obs[p,1+l, 2] <- persistObs4 # Atl.Ridge + + array.diff.pers[p,1+l, 1] <- persist1 - persistObs1 # NAO+ + array.diff.pers[p,1+l, 3] <- persist2 - persistObs2 # NAO- + array.diff.pers[p,1+l, 4] <- persist3 - persistObs3 # Blocking + array.diff.pers[p,1+l, 2] <- persist4 - persistObs4 # Atl.Ridge + + array.sd.freq[p,1+l, 1] <- sd.freq.sim1 # NAO+ + array.sd.freq[p,1+l, 3] <- sd.freq.sim2 # NAO- + array.sd.freq[p,1+l, 4] <- sd.freq.sim3 # Blocking + array.sd.freq[p,1+l, 2] <- sd.freq.sim4 # Atl.Ridge + + array.diff.sd.freq[p,1+l, 1] <- sd.freq.sim1 / sd.freq.obs1 # NAO+ + array.diff.sd.freq[p,1+l, 3] <- sd.freq.sim2 / sd.freq.obs2 # NAO- + array.diff.sd.freq[p,1+l, 4] <- sd.freq.sim3 / sd.freq.obs3 # Blocking + array.diff.sd.freq[p,1+l, 2] <- sd.freq.sim4 / sd.freq.obs4 # Atl.Ridge + + array.imp[p,1+l, 1] <- cor.imp1 # NAO+ + array.imp[p,1+l, 3] <- cor.imp2 # NAO- + array.imp[p,1+l, 4] <- cor.imp3 # Blocking + array.imp[p,1+l, 2] <- cor.imp4 # Atl.Ridge + + array.trans.sim1[p,1+l, 1] <- NA # Transition from NAO+ to NAO+ + array.trans.sim1[p,1+l, 3] <- 100*transSim1[2] # Transition from NAO+ to NAO- + array.trans.sim1[p,1+l, 4] <- 100*transSim1[3] # Transition from NAO+ to blocking + array.trans.sim1[p,1+l, 2] <- 100*transSim1[4] # Transition from NAO+ to Atl.Ridge + + array.trans.sim2[p,1+l, 1] <- 100*transSim2[1] # Transition from NAO- to NAO+ + array.trans.sim2[p,1+l, 3] <- NA # Transition from NAO- to NAO- + array.trans.sim2[p,1+l, 4] <- 100*transSim2[3] # Transition from NAO- to blocking + array.trans.sim2[p,1+l, 2] <- 100*transSim2[4] # Transition from NAO- to Atl.Ridge + + array.trans.sim3[p,1+l, 1] <- 100*transSim3[1] # Transition from blocking to NAO+ + array.trans.sim3[p,1+l, 3] <- 100*transSim3[2] # Transition from blocking to NAO- + array.trans.sim3[p,1+l, 4] <- NA # Transition from blocking to blocking + array.trans.sim3[p,1+l, 2] <- 100*transSim3[4] # Transition from blocking to Atl.Ridge + + array.trans.sim4[p,1+l, 1] <- 100*transSim4[1] # Transition from Atl.ridge to NAO+ + array.trans.sim4[p,1+l, 3] <- 100*transSim4[2] # Transition from Atl.ridge to NAO- + array.trans.sim4[p,1+l, 4] <- 100*transSim4[3] # Transition from Atl.ridge to blocking + array.trans.sim4[p,1+l, 2] <- NA # Transition from Atl.ridge to Atl.Ridge + + array.trans.obs1[p,1+l, 1] <- NA # Transition from NAO+ to NAO+ + array.trans.obs1[p,1+l, 3] <- 100*transObs1[2] # Transition from NAO+ to NAO- + array.trans.obs1[p,1+l, 4] <- 100*transObs1[3] # Transition from NAO+ to blocking + array.trans.obs1[p,1+l, 2] <- 100*transObs1[4] # Transition from NAO+ to Atl.Ridge + + array.trans.obs2[p,1+l, 1] <- 100*transObs2[1] # Transition from NAO- to NAO+ + array.trans.obs2[p,1+l, 3] <- NA # Transition from NAO- to NAO- + array.trans.obs2[p,1+l, 4] <- 100*transObs2[3] # Transition from NAO- to blocking + array.trans.obs2[p,1+l, 2] <- 100*transObs2[4] # Transition from NAO- to Atl.Ridge + + array.trans.obs3[p,1+l, 1] <- 100*transObs3[1] # Transition from blocking to NAO+ + array.trans.obs3[p,1+l, 3] <- 100*transObs3[2] # Transition from blocking to NAO- + array.trans.obs3[p,1+l, 4] <- NA # Transition from blocking to blocking + array.trans.obs3[p,1+l, 2] <- 100*transObs3[4] # Transition from blocking to Atl.Ridge + + array.trans.obs4[p,1+l, 1] <- 100*transObs4[1] # Transition from Atl.ridge to NAO+ + array.trans.obs4[p,1+l, 3] <- 100*transObs4[2] # Transition from Atl.ridge to NAO- + array.trans.obs4[p,1+l, 4] <- 100*transObs4[3] # Transition from Atl.ridge to blocking + array.trans.obs4[p,1+l, 2] <- NA # Transition from Atl.ridge to Atl.Ridge + + array.trans.diff1[p,1+l, 1] <- NA # Transition from NAO+ to NAO+ + array.trans.diff1[p,1+l, 3] <- 100*(transSim1[2] - transObs1[2]) # Transition from NAO+ to NAO- + array.trans.diff1[p,1+l, 4] <- 100*(transSim1[3] - transObs1[3]) # Transition from NAO+ to blocking + array.trans.diff1[p,1+l, 2] <- 100*(transSim1[4] - transObs1[4]) # Transition from NAO+ to Atl.Ridge + + array.trans.diff2[p,1+l, 1] <- 100*(transSim2[1] - transObs2[1]) # Transition from NAO+ to NAO+ + array.trans.diff2[p,1+l, 3] <- NA + array.trans.diff2[p,1+l, 4] <- 100*(transSim2[3] - transObs2[3]) # Transition from NAO+ to blocking + array.trans.diff2[p,1+l, 2] <- 100*(transSim2[4] - transObs2[4]) # Transition from NAO+ to Atl.Ridge + + array.trans.diff3[p,1+l, 1] <- 100*(transSim3[1] - transObs3[1]) # Transition from NAO+ to NAO+ + array.trans.diff3[p,1+l, 3] <- 100*(transSim3[2] - transObs3[2]) # Transition from NAO+ to NAO- + array.trans.diff3[p,1+l, 4] <- NA + array.trans.diff3[p,1+l, 2] <- 100*(transSim3[4] - transObs3[4]) # Transition from NAO+ to Atl.Ridge + + array.trans.diff4[p,1+l, 1] <- 100*(transSim4[1] - transObs4[1]) # Transition from NAO+ to NAO+ + array.trans.diff4[p,1+l, 3] <- 100*(transSim4[2] - transObs4[2]) # Transition from NAO+ to NAO- + array.trans.diff4[p,1+l, 4] <- 100*(transSim4[3] - transObs4[3]) # Transition from NAO+ to blocking + array.trans.diff4[p,1+l, 2] <- NA + + #array.rpss[p,1+l, 1] <- # NAO+ + #array.rpss[p,1+l, 3] <- # NAO- + #array.rpss[p,1+l, 4] <- # Blocking + #array.rpss[p,1+l, 2] <- # Atl.Ridge + } + } + + + + # Spatial Corr summary: + png(filename=paste0(work.dir,"/",forecast.name,"_summary_spatial_correlations.png"), width=550, height=400) + + # create an array similar to array.cor but with colors instead of corr.values: + sp.corr.cols <- rev(c('#b2182b','#d6604d','#f4a582','#fddbc7','#d1e5f0','#92c5de','#4393c3','#2166ac')) + my.seq <- c(-1,0,0.4,0.5,0.6,0.7,0.8,0.9,1) + + array.spat.cor.colors <- array(sp.corr.cols[8],c(12,7,4)) + array.spat.cor.colors[array.spat.cor < my.seq[8]] <- sp.corr.cols[7] + array.spat.cor.colors[array.spat.cor < my.seq[7]] <- sp.corr.cols[6] + array.spat.cor.colors[array.spat.cor < my.seq[6]] <- sp.corr.cols[5] + array.spat.cor.colors[array.spat.cor < my.seq[5]] <- sp.corr.cols[4] + array.spat.cor.colors[array.spat.cor < my.seq[4]] <- sp.corr.cols[3] + array.spat.cor.colors[array.spat.cor < my.seq[3]] <- sp.corr.cols[2] + array.spat.cor.colors[array.spat.cor < my.seq[2]] <- sp.corr.cols[1] + + par(mar=c(5,4,4,4.5)) + plot(1,1, type="n", xaxt="n", yaxt="n", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + title("Spatial correlation between S4 and ERA-Interim regime anomalies", cex=1.5) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + + for(p in 1:12){ + for(l in 0:6){ + polygon(c(0.5+p-1, 0.5+p-1, 1+p-1), c(-0.5+l, 0.5+l, 0+l), col=array.spat.cor.colors[p,1+l,1]) # first triangle + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(-0.5+l, 0+l, -0.5+l), col=array.spat.cor.colors[p,1+l,2]) + polygon(c(1+p-1, 1.5+p-1, 1.5+p-1), c(l, 0.5+l, -0.5+l), col=array.spat.cor.colors[p,1+l,3]) + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(0.5+l, 0+l, 0.5+l), col=array.spat.cor.colors[p,1+l,4]) + } + } + + par(fig=c(0.875, 1, 0.14, 0.89), new=TRUE) + ColorBar2(brks = my.seq, cols = sp.corr.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + dev.off() + + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_spatial_correlations_startdate.png"), width=750, height=600) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext(text="Spatial correlation between S4 and ERA-Interim regime anomalies", cex=1.5) + + # NAO+ : + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.85, 0.98), new=TRUE) + mtext("NAO+", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.spat.cor.colors[p,1+l,1])}} + + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.42, 0.5), new=TRUE) + mtext("Blocking", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.spat.cor.colors[p,1+l,4])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.85, 0.98), new=TRUE) + mtext("NAO-", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.spat.cor.colors[p,1+l,3])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.42, 0.5), new=TRUE) + mtext("Atlantic Ridge", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.spat.cor.colors[p,1+l,2])}} + + par(fig=c(0.91, 1, 0.1, 0.9), new=TRUE) + ColorBar2(brks = my.seq, cols = sp.corr.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_spatial_correlations_target_month.png"), width=800, height=300) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext(text="Spatial correlation between S4 and ERA-Interim regime anomalies", cex=1.2) + + par(mar=c(0,0,4,0), fig=c(0.07, 0.22, 0.8, 0.98), new=TRUE) + mtext("NAO+", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0, 0.22, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.6, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.spat.cor.colors[i2,1+6-j,1])}} + + par(mar=c(0,0,4,0), fig=c(0.22, 0.44, 0.8, 0.98), new=TRUE) + mtext("NAO-", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.22, 0.44, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.6, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.spat.cor.colors[i2,1+6-j,3])}} + + par(mar=c(0,0,4,0), fig=c(0.44, 0.66, 0.8, 0.98), new=TRUE) + mtext("Blocking", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.44, 0.66, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.6, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.spat.cor.colors[i2,1+6-j,4])}} + + par(mar=c(0,0,4,0), fig=c(0.68, 0.88, 0.8, 0.98), new=TRUE) + mtext("Atlantic Ridge", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.66, 0.88, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.6, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.spat.cor.colors[i2,1+6-j,2])}} + + par(fig=c(0.91, 1, 0.1, 0.8), new=TRUE) + ColorBar2(brks = my.seq, cols = sp.corr.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + + + + + + + + # Temporal Corr summary: + png(filename=paste0(work.dir,"/",forecast.name,"_summary_temporal_correlations.png"), width=550, height=400) + + # create an array similar to array.cor but with colors instead of corr.values: + corr.cols <- rev(c('#b2182b','#d6604d','#f4a582','#fddbc7','#d1e5f0','#92c5de','#4393c3','#2166ac')) + my.seq <- c(-1,-0.75,-0.5,-0.25,0,0.25,0.5,0.75,1) + + array.cor.colors <- array(corr.cols[8],c(12,7,4)) + array.cor.colors[array.cor < 0.75] <- corr.cols[7] + array.cor.colors[array.cor < 0.5] <- corr.cols[6] + array.cor.colors[array.cor < 0.25] <- corr.cols[5] + array.cor.colors[array.cor < 0] <- corr.cols[4] + array.cor.colors[array.cor < -0.25] <- corr.cols[3] + array.cor.colors[array.cor < -0.5] <- corr.cols[2] + array.cor.colors[array.cor < -0.75] <- corr.cols[1] + + par(mar=c(5,4,4,4.5)) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Forecast time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + title("Correlation between S4 and ERA-Interim frequencies", cex=1.5) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + + for(p in 1:12){ + for(l in 0:6){ + polygon(c(0.5+p-1,0.5+p-1,1+p-1),c(-0.5+l,0.5+l,0+l), col=array.cor.colors[p,1+l,1]) # first triangle + polygon(c(0.5+p-1,1+p-1,1.5+p-1),c(-0.5+l,0+l,-0.5+l), col=array.cor.colors[p,1+l,2]) + polygon(c(1+p-1,1.5+p-1,1.5+p-1),c(l,0.5+l,-0.5+l), col=array.cor.colors[p,1+l,3]) + polygon(c(0.5+p-1,1+p-1,1.5+p-1),c(0.5+l,0+l,0.5+l), col=array.cor.colors[p,1+l,4]) + } + } + + par(fig=c(0.875, 1, 0.14, 0.89), new=TRUE) + ColorBar2(brks = seq(-1,1,0.25), cols = corr.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(seq(-1,1,0.25)), my.labels=seq(-1,1,0.25)) + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_temporal_correlations_startdate.png"), width=750, height=600) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext(text="Correlation between S4 and ERA-Interim frequencies", cex=1.5) + + # NAO+ : + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.85, 0.98), new=TRUE) + mtext("NAO+", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.cor.colors[p,1+l,1])}} + + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.42, 0.5), new=TRUE) + mtext("Blocking", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.cor.colors[p,1+l,4])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.85, 0.98), new=TRUE) + mtext("NAO-", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.cor.colors[p,1+l,3])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.42, 0.5), new=TRUE) + mtext("Atlantic Ridge", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.cor.colors[p,1+l,2])}} + + par(fig=c(0.91, 1, 0.1, 0.9), new=TRUE) + ColorBar2(brks = my.seq, cols = corr.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_temporal_correlations_target_month.png"), width=800, height=300) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext(text="Correlation between S4 and ERA-Interim frequencies", cex=1.2) + + par(mar=c(0,0,4,0), fig=c(0.07, 0.22, 0.8, 0.98), new=TRUE) + mtext("NAO+", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0, 0.22, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.6, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.cor.colors[i2,1+6-j,1])}} + + par(mar=c(0,0,4,0), fig=c(0.22, 0.44, 0.8, 0.98), new=TRUE) + mtext("NAO-", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.22, 0.44, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.6, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.cor.colors[i2,1+6-j,3])}} + + par(mar=c(0,0,4,0), fig=c(0.44, 0.66, 0.8, 0.98), new=TRUE) + mtext("Blocking", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.44, 0.66, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.6, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.cor.colors[i2,1+6-j,4])}} + + par(mar=c(0,0,4,0), fig=c(0.68, 0.88, 0.8, 0.98), new=TRUE) + mtext("Atlantic Ridge", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.66, 0.88, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.6, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.cor.colors[i2,1+6-j,2])}} + + par(fig=c(0.91, 1, 0.1, 0.8), new=TRUE) + ColorBar2(brks = my.seq, cols = corr.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + + + + + + # Frequency summary: + png(filename=paste0(work.dir,"/",forecast.name,"_summary_frequency.png"), width=550, height=400) + + # create an array similar to array.diff.freq but with colors instead of frequencies: + freq.cols <- rev(c('#d73027','#f46d43','#fdae61','#fee090','#e0f3f8','#abd9e9','#74add1','#4575b4')) + my.seq <- c(NA,20,22,24,26,28,30,32,NA) + + array.freq.colors <- array(freq.cols[8],c(12,7,4)) + array.freq.colors[array.freq < my.seq[[8]]] <- freq.cols[7] + array.freq.colors[array.freq < my.seq[[7]]] <- freq.cols[6] + array.freq.colors[array.freq < my.seq[[6]]] <- freq.cols[5] + array.freq.colors[array.freq < my.seq[[5]]] <- freq.cols[4] + array.freq.colors[array.freq < my.seq[[4]]] <- freq.cols[3] + array.freq.colors[array.freq < my.seq[[3]]] <- freq.cols[2] + array.freq.colors[array.freq < my.seq[[2]]] <- freq.cols[1] + + par(mar=c(5,4,4,4.5)) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Forecast time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + title("Mean S4 frequency (%)", cex=1.5) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + + for(p in 1:12){ + for(l in 0:6){ + polygon(c(0.5+p-1, 0.5+p-1, 1+p-1), c(-0.5+l, 0.5+l, 0+l), col=array.freq.colors[p,1+l,1]) # first triangle + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(-0.5+l, 0+l, -0.5+l), col=array.freq.colors[p,1+l,2]) + polygon(c(1+p-1, 1.5+p-1, 1.5+p-1), c(l, 0.5+l, -0.5+l), col=array.freq.colors[p,1+l,3]) + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(0.5+l, 0+l, 0.5+l), col=array.freq.colors[p,1+l,4]) + } + } + + par(fig=c(0.875, 1, 0.14, 0.89), new=TRUE) + ColorBar2(brks = my.seq, cols = freq.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_frequency_startdate.png"), width=750, height=600) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext(text="Mean S4 frequency (%)", cex=1.5) + + # NAO+ : + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.85, 0.98), new=TRUE) + mtext("NAO+", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.freq.colors[p,1+l,1])}} + + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.42, 0.5), new=TRUE) + mtext("Blocking", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.freq.colors[p,1+l,4])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.85, 0.98), new=TRUE) + mtext("NAO-", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.freq.colors[p,1+l,3])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.42, 0.5), new=TRUE) + mtext("Atlantic Ridge", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.freq.colors[p,1+l,2])}} + + par(fig=c(0.91, 1, 0.1, 0.9), new=TRUE) + ColorBar2(brks = my.seq, cols = freq.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_frequency_target_month.png"), width=800, height=300) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext(text="Mean S4 frequency (%)", cex=1.2) + + par(mar=c(0,0,4,0), fig=c(0.07, 0.22, 0.8, 0.98), new=TRUE) + mtext("NAO+", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0, 0.22, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.6, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.freq.colors[i2,1+6-j,1])}} + + par(mar=c(0,0,4,0), fig=c(0.22, 0.44, 0.8, 0.98), new=TRUE) + mtext("NAO-", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.22, 0.44, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.6, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.freq.colors[i2,1+6-j,3])}} + + par(mar=c(0,0,4,0), fig=c(0.44, 0.66, 0.8, 0.98), new=TRUE) + mtext("Blocking", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.44, 0.66, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.6, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.freq.colors[i2,1+6-j,4])}} + + par(mar=c(0,0,4,0), fig=c(0.68, 0.88, 0.8, 0.98), new=TRUE) + mtext("Atlantic Ridge", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.66, 0.88, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.6, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.freq.colors[i2,1+6-j,2])}} + + par(fig=c(0.91, 1, 0.1, 0.8), new=TRUE) + ColorBar2(brks = my.seq, cols = freq.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + par(fig=c(0.91, 1, 0.88, 0.93), new=TRUE) + mtext(side = 1, text = "%", line = 2, cex=1) + dev.off() + + + + + + + + + # Obs. frequency summary: + png(filename=paste0(work.dir,"/",forecast.name,"_summary_frequency_obs.png"), width=550, height=400) + + # create an array similar to array.diff.freq but with colors instead of frequencies: + freq.cols <- rev(c('#d73027','#f46d43','#fdae61','#fee090','#e0f3f8','#abd9e9','#74add1','#4575b4')) + my.seq <- c(NA,20,22,24,26,28,30,32,NA) + + array.freq.colors <- array(freq.cols[8],c(12,7,4)) + array.freq.colors[array.freq.obs < my.seq[[8]]] <- freq.cols[7] + array.freq.colors[array.freq.obs < my.seq[[7]]] <- freq.cols[6] + array.freq.colors[array.freq.obs < my.seq[[6]]] <- freq.cols[5] + array.freq.colors[array.freq.obs < my.seq[[5]]] <- freq.cols[4] + array.freq.colors[array.freq.obs < my.seq[[4]]] <- freq.cols[3] + array.freq.colors[array.freq.obs < my.seq[[3]]] <- freq.cols[2] + array.freq.colors[array.freq.obs < my.seq[[2]]] <- freq.cols[1] + + par(mar=c(5,4,4,4.5)) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Forecast time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + title("Mean observed frequency (%)", cex=1.5) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + + for(p in 1:12){ + for(l in 0:6){ + polygon(c(0.5+p-1, 0.5+p-1, 1+p-1), c(-0.5+l, 0.5+l, 0+l), col=array.freq.colors[p,1+l,1]) # first triangle + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(-0.5+l, 0+l, -0.5+l), col=array.freq.colors[p,1+l,2]) + polygon(c(1+p-1, 1.5+p-1, 1.5+p-1), c(l, 0.5+l, -0.5+l), col=array.freq.colors[p,1+l,3]) + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(0.5+l, 0+l, 0.5+l), col=array.freq.colors[p,1+l,4]) + } + } + + par(fig=c(0.875, 1, 0.14, 0.89), new=TRUE) + ColorBar2(brks = my.seq, cols = freq.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_frequency_obs_startdate.png"), width=750, height=600) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext(text="Mean observed frequency (%)", cex=1.5) + + # NAO+ : + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.85, 0.98), new=TRUE) + mtext("NAO+", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.freq.colors[p,1+l,1])}} + + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.42, 0.5), new=TRUE) + mtext("Blocking", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.freq.colors[p,1+l,4])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.85, 0.98), new=TRUE) + mtext("NAO-", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.freq.colors[p,1+l,3])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.42, 0.5), new=TRUE) + mtext("Atlantic Ridge", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.freq.colors[p,1+l,2])}} + + par(fig=c(0.91, 1, 0.1, 0.9), new=TRUE) + ColorBar2(brks = my.seq, cols = freq.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_frequency_obs_target_month.png"), width=800, height=300) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext(text="Mean observed frequency (%)", cex=1.2) + + par(mar=c(0,0,4,0), fig=c(0.07, 0.22, 0.8, 0.98), new=TRUE) + mtext("NAO+", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0, 0.22, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.6, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.freq.colors[i2,1+6-j,1])}} + + par(mar=c(0,0,4,0), fig=c(0.22, 0.44, 0.8, 0.98), new=TRUE) + mtext("NAO-", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.22, 0.44, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.6, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.freq.colors[i2,1+6-j,3])}} + + par(mar=c(0,0,4,0), fig=c(0.44, 0.66, 0.8, 0.98), new=TRUE) + mtext("Blocking", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.44, 0.66, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.6, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.freq.colors[i2,1+6-j,4])}} + + par(mar=c(0,0,4,0), fig=c(0.68, 0.88, 0.8, 0.98), new=TRUE) + mtext("Atlantic Ridge", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.66, 0.88, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.6, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.freq.colors[i2,1+6-j,2])}} + + par(fig=c(0.91, 1, 0.1, 0.8), new=TRUE) + ColorBar2(brks = my.seq, cols = freq.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + par(fig=c(0.91, 1, 0.88, 0.93), new=TRUE) + mtext(side = 1, text = "%", line = 2, cex=1) + dev.off() + + + + + + + + + + # Bias freq. summary: + png(filename=paste0(work.dir,"/",forecast.name,"_summary_bias_frequency.png"), width=550, height=400) + + # create an array similar to array.diff.freq but with colors instead of frequencies: + diff.freq.cols <- rev(c('#d73027','#f46d43','#fdae61','#fee090','#e0f3f8','#abd9e9','#74add1','#4575b4')) + my.seq <- c(-20,-10,-5,-2,0,2,5,10,20) + + array.diff.freq.colors <- array(diff.freq.cols[8],c(12,7,4)) + array.diff.freq.colors[array.diff.freq < my.seq[[8]]] <- diff.freq.cols[7] + array.diff.freq.colors[array.diff.freq 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.diff.freq.colors[i2,1+6-j,1])}} + + par(mar=c(0,0,4,0), fig=c(0.22, 0.44, 0.8, 0.98), new=TRUE) + mtext("NAO-", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.22, 0.44, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.6, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.diff.freq.colors[i2,1+6-j,3])}} + + par(mar=c(0,0,4,0), fig=c(0.44, 0.66, 0.8, 0.98), new=TRUE) + mtext("Blocking", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.44, 0.66, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.6, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.diff.freq.colors[i2,1+6-j,4])}} + + par(mar=c(0,0,4,0), fig=c(0.68, 0.88, 0.8, 0.98), new=TRUE) + mtext("Atlantic Ridge", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.66, 0.88, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.6, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.diff.freq.colors[i2,1+6-j,2])}} + + par(fig=c(0.91, 1, 0.1, 0.8), new=TRUE) + ColorBar2(brks = my.seq, cols = diff.freq.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + par(fig=c(0.91, 1, 0.88, 0.93), new=TRUE) + mtext(side = 1, text = "%", line = 2, cex=1) + dev.off() + + + + + + + + + # Simulated persistence summary: + png(filename=paste0(work.dir,"/",forecast.name,"_summary_persistence.png"), width=550, height=400) + + # create an array similar to array.pers but with colors instead of frequencies: + pers.cols <- rev(c('#b2182b','#d6604d','#f4a582','#fddbc7','#d1e5f0','#92c5de','#4393c3','#2166ac')) + my.seq <- c(NA, 3.25, 3.5, 3.75, 4, 4.25, 4.5, 4.75, NA) + + array.pers.colors <- array(pers.cols[8],c(12,7,4)) + array.pers.colors[array.pers < my.seq[8]] <- pers.cols[7] + array.pers.colors[array.pers < my.seq[7]] <- pers.cols[6] + array.pers.colors[array.pers < my.seq[6]] <- pers.cols[5] + array.pers.colors[array.pers < my.seq[5]] <- pers.cols[4] + array.pers.colors[array.pers < my.seq[4]] <- pers.cols[3] + array.pers.colors[array.pers < my.seq[3]] <- pers.cols[2] + array.pers.colors[array.pers < my.seq[2]] <- pers.cols[1] + + par(mar=c(5,4,4,4.5)) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Forecast time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + title("ECMWF-S4 Regime persistence (in days/month)", cex=1.5) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + + for(p in 1:12){ + for(l in 0:6){ + polygon(c(0.5+p-1,0.5+p-1,1+p-1),c(-0.5+l,0.5+l,0+l), col=array.pers.colors[p,1+l,1]) # first triangle + polygon(c(0.5+p-1,1+p-1,1.5+p-1),c(-0.5+l,0+l,-0.5+l), col=array.pers.colors[p,1+l,2]) + polygon(c(1+p-1,1.5+p-1,1.5+p-1),c(l,0.5+l,-0.5+l), col=array.pers.colors[p,1+l,3]) + polygon(c(0.5+p-1,1+p-1,1.5+p-1),c(0.5+l,0+l,0.5+l), col=array.pers.colors[p,1+l,4]) + } + } + + par(fig=c(0.875, 1, 0.14, 0.89), new=TRUE) + ColorBar2(brks = my.seq, cols = pers.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_persistence_startdate.png"), width=750, height=600) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("ECMWF-S4 Regime persistence (in days/month)", cex=1.5) + + # NAO+ : + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.85, 0.98), new=TRUE) + mtext("NAO+", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.pers.colors[p,1+l,1])}} + + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.42, 0.5), new=TRUE) + mtext("Blocking", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.pers.colors[p,1+l,4])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.85, 0.98), new=TRUE) + mtext("NAO-", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.pers.colors[p,1+l,3])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.42, 0.5), new=TRUE) + mtext("Atlantic Ridge", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.pers.colors[p,1+l,2])}} + + par(fig=c(0.91, 1, 0.1, 0.9), new=TRUE) + ColorBar2(brks = my.seq, cols = pers.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_persistence_target_month.png"), width=800, height=300) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("ECMWF-S4 Regime persistence (in days/month)", cex=1.2) + + par(mar=c(0,0,4,0), fig=c(0.07, 0.22, 0.8, 0.98), new=TRUE) + mtext("NAO+", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0, 0.22, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.6, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.pers.colors[i2,1+6-j,1])}} + + par(mar=c(0,0,4,0), fig=c(0.22, 0.44, 0.8, 0.98), new=TRUE) + mtext("NAO-", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.22, 0.44, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.6, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.pers.colors[i2,1+6-j,3])}} + + par(mar=c(0,0,4,0), fig=c(0.44, 0.66, 0.8, 0.98), new=TRUE) + mtext("Blocking", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.44, 0.66, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.6, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.pers.colors[i2,1+6-j,4])}} + + par(mar=c(0,0,4,0), fig=c(0.68, 0.88, 0.8, 0.98), new=TRUE) + mtext("Atlantic Ridge", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.66, 0.88, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.6, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.pers.colors[i2,1+6-j,2])}} + + par(fig=c(0.91, 1, 0.1, 0.8), new=TRUE) + ColorBar2(brks = my.seq, cols = pers.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + par(fig=c(0.9, 1, 0.88, 0.93), new=TRUE) + mtext(side = 1, text = "days/month", line = 2, cex=1) + dev.off() + + + + + + + + # Observed persistence summary: + png(filename=paste0(work.dir,"/",forecast.name,"_summary_persistence_obs.png"), width=550, height=400) + + # create an array similar to array.pers but with colors instead of frequencies: + pers.cols <- rev(c('#b2182b','#d6604d','#f4a582','#fddbc7','#d1e5f0','#92c5de','#4393c3','#2166ac')) + my.seq <- c(NA, 3.25, 3.5, 3.75, 4, 4.25, 4.5, 4.75, NA) + + array.pers.colors <- array(pers.cols[8],c(12,7,4)) + array.pers.colors[array.pers.obs < my.seq[8]] <- pers.cols[7] + array.pers.colors[array.pers.obs < my.seq[7]] <- pers.cols[6] + array.pers.colors[array.pers.obs < my.seq[6]] <- pers.cols[5] + array.pers.colors[array.pers.obs < my.seq[5]] <- pers.cols[4] + array.pers.colors[array.pers.obs < my.seq[4]] <- pers.cols[3] + array.pers.colors[array.pers.obs < my.seq[3]] <- pers.cols[2] + array.pers.colors[array.pers.obs < my.seq[2]] <- pers.cols[1] + + par(mar=c(5,4,4,4.5)) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Forecast time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + title("Observed regime persistence (in days/month)", cex=1.5) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + + for(p in 1:12){ + for(l in 0:6){ + polygon(c(0.5+p-1,0.5+p-1,1+p-1),c(-0.5+l,0.5+l,0+l), col=array.pers.colors[p,1+l,1]) # first triangle + polygon(c(0.5+p-1,1+p-1,1.5+p-1),c(-0.5+l,0+l,-0.5+l), col=array.pers.colors[p,1+l,2]) + polygon(c(1+p-1,1.5+p-1,1.5+p-1),c(l,0.5+l,-0.5+l), col=array.pers.colors[p,1+l,3]) + polygon(c(0.5+p-1,1+p-1,1.5+p-1),c(0.5+l,0+l,0.5+l), col=array.pers.colors[p,1+l,4]) + } + } + + par(fig=c(0.875, 1, 0.14, 0.89), new=TRUE) + ColorBar2(brks = my.seq, cols = pers.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_persistence_obs_startdate.png"), width=750, height=600) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("Observed Regime persistence (in days/month)", cex=1.5) + + # NAO+ : + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.85, 0.98), new=TRUE) + mtext("NAO+", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.pers.colors[p,1+l,1])}} + + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.42, 0.5), new=TRUE) + mtext("Blocking", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.pers.colors[p,1+l,4])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.85, 0.98), new=TRUE) + mtext("NAO-", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.pers.colors[p,1+l,3])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.42, 0.5), new=TRUE) + mtext("Atlantic Ridge", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.pers.colors[p,1+l,2])}} + + par(fig=c(0.91, 1, 0.1, 0.9), new=TRUE) + ColorBar2(brks = my.seq, cols = pers.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_persistence_obs_target_month.png"), width=800, height=300) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("Observed regime persistence (in days/month)", cex=1.2) + + par(mar=c(0,0,4,0), fig=c(0.07, 0.22, 0.8, 0.98), new=TRUE) + mtext("NAO+", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0, 0.22, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.6, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.pers.colors[i2,1+6-j,1])}} + + par(mar=c(0,0,4,0), fig=c(0.22, 0.44, 0.8, 0.98), new=TRUE) + mtext("NAO-", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.22, 0.44, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.6, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.pers.colors[i2,1+6-j,3])}} + + par(mar=c(0,0,4,0), fig=c(0.44, 0.66, 0.8, 0.98), new=TRUE) + mtext("Blocking", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.44, 0.66, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.6, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.pers.colors[i2,1+6-j,4])}} + + par(mar=c(0,0,4,0), fig=c(0.68, 0.88, 0.8, 0.98), new=TRUE) + mtext("Atlantic Ridge", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.66, 0.88, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.6, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.pers.colors[i2,1+6-j,2])}} + + par(fig=c(0.91, 1, 0.1, 0.8), new=TRUE) + ColorBar2(brks = my.seq, cols = pers.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + par(fig=c(0.9, 1, 0.88, 0.93), new=TRUE) + mtext(side = 1, text = "days/month", line = 2, cex=1) + dev.off() + + + + + + + + + + + + + # Bias persistence summary: + png(filename=paste0(work.dir,"/",forecast.name,"_summary_bias_persistence.png"), width=550, height=400) + + # create an array similar to array.pers but with colors instead of frequencies: + diff.pers.cols <- rev(c('#b2182b','#d6604d','#f4a582','#fddbc7','#d1e5f0','#92c5de','#4393c3','#2166ac')) + my.seq <- c(NA, -1.5, -1, -0.5, 0, 0.5, 1, 1.5, NA) + + array.diff.pers.colors <- array(diff.pers.cols[8],c(12,7,4)) + array.diff.pers.colors[array.diff.pers < my.seq[8]] <- diff.pers.cols[7] + array.diff.pers.colors[array.diff.pers < my.seq[7]] <- diff.pers.cols[6] + array.diff.pers.colors[array.diff.pers < my.seq[6]] <- diff.pers.cols[5] + array.diff.pers.colors[array.diff.pers < my.seq[5]] <- diff.pers.cols[4] + array.diff.pers.colors[array.diff.pers < my.seq[4]] <- diff.pers.cols[3] + array.diff.pers.colors[array.diff.pers < my.seq[3]] <- diff.pers.cols[2] + array.diff.pers.colors[array.diff.pers < my.seq[2]] <- diff.pers.cols[1] + + par(mar=c(5,4,4,4.5)) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Forecast time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + title("Diff. between S4 and ERA-Interim regime persistence (in days/month)", cex=1.5) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + + for(p in 1:12){ + for(l in 0:6){ + polygon(c(0.5+p-1,0.5+p-1,1+p-1),c(-0.5+l,0.5+l,0+l), col=array.diff.pers.colors[p,1+l,1]) # first triangle + polygon(c(0.5+p-1,1+p-1,1.5+p-1),c(-0.5+l,0+l,-0.5+l), col=array.diff.pers.colors[p,1+l,2]) + polygon(c(1+p-1,1.5+p-1,1.5+p-1),c(l,0.5+l,-0.5+l), col=array.diff.pers.colors[p,1+l,3]) + polygon(c(0.5+p-1,1+p-1,1.5+p-1),c(0.5+l,0+l,0.5+l), col=array.diff.pers.colors[p,1+l,4]) + } + } + + par(fig=c(0.875, 1, 0.14, 0.89), new=TRUE) + ColorBar2(brks = my.seq, cols = diff.pers.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_bias_persistence_startdate.png"), width=750, height=600) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("Diff. between S4 and ERA-Interim regime persistence (in days/month)", cex=1.5) + + # NAO+ : + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.85, 0.98), new=TRUE) + mtext("NAO+", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.diff.pers.colors[p,1+l,1])}} + + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.42, 0.5), new=TRUE) + mtext("Blocking", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.diff.pers.colors[p,1+l,4])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.85, 0.98), new=TRUE) + mtext("NAO-", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.diff.pers.colors[p,1+l,3])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.42, 0.5), new=TRUE) + mtext("Atlantic Ridge", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.diff.pers.colors[p,1+l,2])}} + + par(fig=c(0.91, 1, 0.1, 0.9), new=TRUE) + ColorBar2(brks = my.seq, cols = diff.pers.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_bias_persistence_target_month.png"), width=800, height=300) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("Diff. between S4 and ERA-Interim regime persistence (in days/month)", cex=1.2) + + par(mar=c(0,0,4,0), fig=c(0.07, 0.22, 0.8, 0.98), new=TRUE) + mtext("NAO+", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0, 0.22, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.6, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.diff.pers.colors[i2,1+6-j,1])}} + + par(mar=c(0,0,4,0), fig=c(0.22, 0.44, 0.8, 0.98), new=TRUE) + mtext("NAO-", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.22, 0.44, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.6, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.diff.pers.colors[i2,1+6-j,3])}} + + par(mar=c(0,0,4,0), fig=c(0.44, 0.66, 0.8, 0.98), new=TRUE) + mtext("Blocking", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.44, 0.66, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.6, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.diff.pers.colors[i2,1+6-j,4])}} + + par(mar=c(0,0,4,0), fig=c(0.68, 0.88, 0.8, 0.98), new=TRUE) + mtext("Atlantic Ridge", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.66, 0.88, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.6, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.diff.pers.colors[i2,1+6-j,2])}} + + par(fig=c(0.91, 1, 0.1, 0.8), new=TRUE) + ColorBar2(brks = my.seq, cols = diff.pers.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + par(fig=c(0.9, 1, 0.88, 0.93), new=TRUE) + mtext(side = 1, text = "days/month", line = 2, cex=1) + dev.off() + + + + + + + + + + # St.Dev.freq ratio summary: + png(filename=paste0(work.dir,"/",forecast.name,"_summary_ratio_sd_freq.png"), width=550, height=400) + + # create an array similar to array.cor but with colors instead of corr.values: + diff.sd.freq.cols <- rev(c('#b2182b','#d6604d','#f4a582','#fddbc7','#d1e5f0','#92c5de','#4393c3','#2166ac')) + my.seq <- c(0,0.7,0.8,0.9,1.0,1.1,1.2,1.3,2) + + array.diff.sd.freq.colors <- array(diff.sd.freq.cols[8],c(12,7,4)) + array.diff.sd.freq.colors[array.diff.sd.freq < my.seq[8]] <- diff.sd.freq.cols[7] + array.diff.sd.freq.colors[array.diff.sd.freq < my.seq[7]] <- diff.sd.freq.cols[6] + array.diff.sd.freq.colors[array.diff.sd.freq < my.seq[6]] <- diff.sd.freq.cols[5] + array.diff.sd.freq.colors[array.diff.sd.freq < my.seq[5]] <- diff.sd.freq.cols[4] + array.diff.sd.freq.colors[array.diff.sd.freq < my.seq[4]] <- diff.sd.freq.cols[3] + array.diff.sd.freq.colors[array.diff.sd.freq < my.seq[3]] <- diff.sd.freq.cols[2] + array.diff.sd.freq.colors[array.diff.sd.freq < my.seq[2]] <- diff.sd.freq.cols[1] + + par(mar=c(5,4,4,4.5)) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Forecast time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + title("St.Dev. ratio between sim. and obs. average frequencies", cex=1.5) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + + for(p in 1:12){ + for(l in 0:6){ + polygon(c(0.5+p-1, 0.5+p-1, 1+p-1), c(-0.5+l, 0.5+l, 0+l), col=array.diff.sd.freq.colors[p,1+l,1]) # first triangle + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(-0.5+l, 0+l, -0.5+l), col=array.diff.sd.freq.colors[p,1+l,2]) + polygon(c(1+p-1, 1.5+p-1, 1.5+p-1), c(l, 0.5+l, -0.5+l), col=array.diff.sd.freq.colors[p,1+l,3]) + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(0.5+l, 0+l, 0.5+l), col=array.diff.sd.freq.colors[p,1+l,4]) + } + } + + par(fig=c(0.875, 1, 0.14, 0.89), new=TRUE) + ColorBar2(brks = my.seq, cols = diff.sd.freq.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + dev.off() + + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_ratio_sd_frequency_startdate.png"), width=750, height=600) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("St.Dev. ratio between sim. and obs. average frequencies", cex=1.5) + + # NAO+ : + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.85, 0.98), new=TRUE) + mtext("NAO+", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.diff.sd.freq.colors[p,1+l,1])}} + + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.42, 0.5), new=TRUE) + mtext("Blocking", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.diff.sd.freq.colors[p,1+l,4])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.85, 0.98), new=TRUE) + mtext("NAO-", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.diff.sd.freq.colors[p,1+l,3])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.42, 0.5), new=TRUE) + mtext("Atlantic Ridge", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.diff.sd.freq.colors[p,1+l,2])}} + + par(fig=c(0.91, 1, 0.1, 0.9), new=TRUE) + ColorBar2(brks = my.seq, cols = diff.sd.freq.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_ratio_sd_frequency_target_month.png"), width=800, height=300) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("St.Dev. ratio between sim. and obs. average frequencies", cex=1.2) + + par(mar=c(0,0,4,0), fig=c(0.07, 0.22, 0.8, 0.98), new=TRUE) + mtext("NAO+", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0, 0.22, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.6, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.diff.sd.freq.colors[i2,1+6-j,1])}} + + par(mar=c(0,0,4,0), fig=c(0.22, 0.44, 0.8, 0.98), new=TRUE) + mtext("NAO-", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.22, 0.44, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.6, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.diff.sd.freq.colors[i2,1+6-j,3])}} + + par(mar=c(0,0,4,0), fig=c(0.44, 0.66, 0.8, 0.98), new=TRUE) + mtext("Blocking", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.44, 0.66, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.6, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.diff.sd.freq.colors[i2,1+6-j,4])}} + + par(mar=c(0,0,4,0), fig=c(0.68, 0.88, 0.8, 0.98), new=TRUE) + mtext("Atlantic Ridge", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.66, 0.88, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.6, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.diff.sd.freq.colors[i2,1+6-j,2])}} + + par(fig=c(0.91, 1, 0.1, 0.8), new=TRUE) + ColorBar2(brks = my.seq, cols = diff.sd.freq.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + + + + + + + + + + + + + # Impact summary: + png(filename=paste0(work.dir,"/",forecast.name,"_summary_impact_",var.name[var.num],".png"), width=550, height=400) + + # create an array similar to array.pers but with colors instead of frequencies: + imp.cols <- rev(c('#b2182b','#d6604d','#f4a582','#fddbc7','#d1e5f0','#92c5de','#4393c3','#2166ac')) + my.seq <- c(-1,0,0.4,0.5,0.6,0.7,0.8,0.9,1) + + array.imp.colors <- array(imp.cols[8],c(12,7,4)) + array.imp.colors[array.imp < my.seq[8]] <- imp.cols[7] + array.imp.colors[array.imp < my.seq[7]] <- imp.cols[6] + array.imp.colors[array.imp < my.seq[6]] <- imp.cols[5] + array.imp.colors[array.imp < my.seq[5]] <- imp.cols[4] + array.imp.colors[array.imp < my.seq[4]] <- imp.cols[3] + array.imp.colors[array.imp < my.seq[3]] <- imp.cols[2] + array.imp.colors[array.imp < my.seq[2]] <- imp.cols[1] + + par(mar=c(5,4,4,4.5)) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Forecast time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + title(paste0("S4 spatial correlation of impact on ",var.name[var.num]), cex=1.5) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + + for(p in 1:12){ + for(l in 0:6){ + polygon(c(0.5+p-1,0.5+p-1,1+p-1),c(-0.5+l,0.5+l,0+l), col=array.imp.colors[p,1+l,1]) # first triangle + polygon(c(0.5+p-1,1+p-1,1.5+p-1),c(-0.5+l,0+l,-0.5+l), col=array.imp.colors[p,1+l,2]) + polygon(c(1+p-1,1.5+p-1,1.5+p-1),c(l,0.5+l,-0.5+l), col=array.imp.colors[p,1+l,3]) + polygon(c(0.5+p-1,1+p-1,1.5+p-1),c(0.5+l,0+l,0.5+l), col=array.imp.colors[p,1+l,4]) + } + } + + par(fig=c(0.875, 1, 0.14, 0.89), new=TRUE) + ColorBar2(brks = my.seq, cols = imp.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_impact_",var.name[var.num],"_startdate.png"), width=750, height=600) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("S4 spatial correlation of impact on ",var.name[var.num], cex=1.5) + + # NAO+ : + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.85, 0.98), new=TRUE) + mtext("NAO+", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.imp.colors[p,1+l,1])}} + + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.42, 0.5), new=TRUE) + mtext("Blocking", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.imp.colors[p,1+l,4])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.85, 0.98), new=TRUE) + mtext("NAO-", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.imp.colors[p,1+l,3])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.42, 0.5), new=TRUE) + mtext("Atlantic Ridge", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.imp.colors[p,1+l,2])}} + + par(fig=c(0.91, 1, 0.1, 0.9), new=TRUE) + ColorBar2(brks = my.seq, cols = imp.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_impact_",var.name[var.num],"_target_month.png"), width=800, height=300) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("S4 spatial correlation of impact on ",var.name[var.num], cex=1.2) + + par(mar=c(0,0,4,0), fig=c(0.07, 0.22, 0.8, 0.98), new=TRUE) + mtext("NAO+", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0, 0.22, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.6, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.imp.colors[i2,1+6-j,1])}} + + par(mar=c(0,0,4,0), fig=c(0.22, 0.44, 0.8, 0.98), new=TRUE) + mtext("NAO-", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.22, 0.44, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.6, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.imp.colors[i2,1+6-j,3])}} + + par(mar=c(0,0,4,0), fig=c(0.44, 0.66, 0.8, 0.98), new=TRUE) + mtext("Blocking", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.44, 0.66, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.6, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.imp.colors[i2,1+6-j,4])}} + + par(mar=c(0,0,4,0), fig=c(0.68, 0.88, 0.8, 0.98), new=TRUE) + mtext("Atlantic Ridge", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.66, 0.88, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.6, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.imp.colors[i2,1+6-j,2])}} + + par(fig=c(0.91, 1, 0.1, 0.8), new=TRUE) + ColorBar2(brks = my.seq, cols = imp.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + + + + + + + # Simulated Transition probability from NAO+ to X summary: + png(filename=paste0(work.dir,"/",forecast.name,"_summary_transition_prob_NAO+.png"), width=550, height=400) + + # create an array similar to array.cor but with colors instead of corr.values: + trans.cols <- rev(c('#b2182b','#d6604d','#f4a582','#fddbc7','#d1e5f0','#92c5de','#4393c3','#2166ac')) + my.seq <- c(0,10,20,30,40,50,60,70,100) + + array.trans.sim1.colors <- array(trans.cols[8],c(12,7,4)) + array.trans.sim1.colors[array.trans.sim1 < my.seq[8]] <- trans.cols[7] + array.trans.sim1.colors[array.trans.sim1 < my.seq[7]] <- trans.cols[6] + array.trans.sim1.colors[array.trans.sim1 < my.seq[6]] <- trans.cols[5] + array.trans.sim1.colors[array.trans.sim1 < my.seq[5]] <- trans.cols[4] + array.trans.sim1.colors[array.trans.sim1 < my.seq[4]] <- trans.cols[3] + array.trans.sim1.colors[array.trans.sim1 < my.seq[3]] <- trans.cols[2] + array.trans.sim1.colors[array.trans.sim1 < my.seq[2]] <- trans.cols[1] + array.trans.sim1.colors[is.na(array.trans.sim1)] <- "white" + + par(mar=c(5,4,4,4.5)) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Forecast time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + title("% ECMWF-S4 transition from NAO+ regime", cex=1.5) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + + for(p in 1:12){ + for(l in 0:6){ + polygon(c(0.5+p-1, 0.5+p-1, 1+p-1), c(-0.5+l, 0.5+l, 0+l), col=array.trans.sim1.colors[p,1+l,1]) # first triangle + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(-0.5+l, 0+l, -0.5+l), col=array.trans.sim1.colors[p,1+l,2]) + polygon(c(1+p-1, 1.5+p-1, 1.5+p-1), c(l, 0.5+l, -0.5+l), col=array.trans.sim1.colors[p,1+l,3]) + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(0.5+l, 0+l, 0.5+l), col=array.trans.sim1.colors[p,1+l,4]) + } + } + + par(fig=c(0.875, 1, 0.14, 0.89), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_transition_prob_NAO+_startdate.png"), width=750, height=600) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("% Transition from NAO+ regime", cex=1.5) + mtext("ECMWF-S4 / NAO+ Transition Probability \nJanuary to December / 1994-2013", cex=1.5) + + # NAO+ : + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.85, 0.98), new=TRUE) + mtext("NAO+", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.sim1.colors[p,1+l,1])}} + + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.42, 0.5), new=TRUE) + mtext("Blocking", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.sim1.colors[p,1+l,4])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.85, 0.98), new=TRUE) + mtext("NAO-", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.sim1.colors[p,1+l,3])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.42, 0.5), new=TRUE) + mtext("Atlantic Ridge", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.sim1.colors[p,1+l,2])}} + + par(fig=c(0.91, 1, 0.1, 0.9), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_transition_prob_NAO+_target_month.png"), width=750, height=350) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 0.95), new=TRUE) + mtext("% Transition from NAO+ regime", cex=1.2) + + par(mar=c(0,0,4,0), fig=c(0.10, 0.28, 0.8, 0.95), new=TRUE) + mtext("NAO-", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0, 0.28, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.8, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.sim1.colors[i2,1+6-j,3])}} + + par(mar=c(0,0,4,0), fig=c(0.30, 0.58, 0.8, 0.95), new=TRUE) + mtext("Blocking", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.30, 0.58, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.8, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.sim1.colors[i2,1+6-j,4])}} + + par(mar=c(0,0,4,0), fig=c(0.60, 0.88, 0.8, 0.95), new=TRUE) + mtext("Atlantic Ridge", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.60, 0.88, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.8, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.sim1.colors[i2,1+6-j,2])}} + + par(fig=c(0.91, 1, 0.1, 0.8), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + par(fig=c(0.91, 1, 0.88, 0.93), new=TRUE) + mtext(side = 1, text = "%", line = 2, cex=1) + + dev.off() + + + + + + + # Observed Transition probability from NAO+ to X summary: + png(filename=paste0(work.dir,"/",forecast.name,"_summary_transition_prob_NAO+_obs.png"), width=550, height=400) + + # create an array similar to array.cor but with colors instead of corr.values: + trans.cols <- rev(c('#b2182b','#d6604d','#f4a582','#fddbc7','#d1e5f0','#92c5de','#4393c3','#2166ac')) + my.seq <- c(0,10,20,30,40,50,60,70,100) + + array.trans.obs1.colors <- array(trans.cols[8],c(12,7,4)) + array.trans.obs1.colors[array.trans.obs1 < my.seq[8]] <- trans.cols[7] + array.trans.obs1.colors[array.trans.obs1 < my.seq[7]] <- trans.cols[6] + array.trans.obs1.colors[array.trans.obs1 < my.seq[6]] <- trans.cols[5] + array.trans.obs1.colors[array.trans.obs1 < my.seq[5]] <- trans.cols[4] + array.trans.obs1.colors[array.trans.obs1 < my.seq[4]] <- trans.cols[3] + array.trans.obs1.colors[array.trans.obs1 < my.seq[3]] <- trans.cols[2] + array.trans.obs1.colors[array.trans.obs1 < my.seq[2]] <- trans.cols[1] + array.trans.obs1.colors[is.na(array.trans.obs1)] <- "white" + + par(mar=c(5,4,4,4.5)) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Forecast time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + title("% Observed transition from NAO+ regime", cex=1.5) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + + for(p in 1:12){ + for(l in 0:6){ + polygon(c(0.5+p-1, 0.5+p-1, 1+p-1), c(-0.5+l, 0.5+l, 0+l), col=array.trans.obs1.colors[p,1+l,1]) # first triangle + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(-0.5+l, 0+l, -0.5+l), col=array.trans.obs1.colors[p,1+l,2]) + polygon(c(1+p-1, 1.5+p-1, 1.5+p-1), c(l, 0.5+l, -0.5+l), col=array.trans.obs1.colors[p,1+l,3]) + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(0.5+l, 0+l, 0.5+l), col=array.trans.obs1.colors[p,1+l,4]) + } + } + + par(fig=c(0.875, 1, 0.14, 0.89), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_transition_prob_NAO+_obs_startdate.png"), width=750, height=600) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("% Observed transition from NAO+ regime", cex=1.5) + + # NAO+ : + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.85, 0.98), new=TRUE) + mtext("NAO+", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.obs1.colors[p,1+l,1])}} + + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.42, 0.5), new=TRUE) + mtext("Blocking", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.obs1.colors[p,1+l,4])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.85, 0.98), new=TRUE) + mtext("NAO-", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.obs1.colors[p,1+l,3])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.42, 0.5), new=TRUE) + mtext("Atlantic Ridge", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.obs1.colors[p,1+l,2])}} + + par(fig=c(0.91, 1, 0.1, 0.9), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_transition_prob_NAO+_obs_target_month.png"), width=750, height=350) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 0.95), new=TRUE) + mtext("% Observed transition from NAO+ regime", cex=1.2) + + par(mar=c(0,0,4,0), fig=c(0.10, 0.28, 0.8, 0.95), new=TRUE) + mtext("NAO-", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0, 0.28, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.8, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.obs1.colors[i2,1+6-j,3])}} + + par(mar=c(0,0,4,0), fig=c(0.30, 0.58, 0.8, 0.95), new=TRUE) + mtext("Blocking", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.30, 0.58, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.8, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.obs1.colors[i2,1+6-j,4])}} + + par(mar=c(0,0,4,0), fig=c(0.60, 0.88, 0.8, 0.95), new=TRUE) + mtext("Atlantic Ridge", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.60, 0.88, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.8, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.obs1.colors[i2,1+6-j,2])}} + + par(fig=c(0.91, 1, 0.1, 0.8), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + par(fig=c(0.91, 1, 0.88, 0.93), new=TRUE) + mtext(side = 1, text = "%", line = 2, cex=1) + + dev.off() + + + + + + + + + + + + # Simulated transition probability from NAO- to X summary: + png(filename=paste0(work.dir,"/",forecast.name,"_summary_transition_prob_NAO-.png"), width=550, height=400) + + # create an array similar to array.cor but with colors instead of corr.values: + trans.cols <- rev(c('#b2182b','#d6604d','#f4a582','#fddbc7','#d1e5f0','#92c5de','#4393c3','#2166ac')) + my.seq <- c(0,10,20,30,40,50,60,70,100) + + array.trans.sim2.colors <- array(trans.cols[8],c(12,7,4)) + array.trans.sim2.colors[array.trans.sim2 < my.seq[8]] <- trans.cols[7] + array.trans.sim2.colors[array.trans.sim2 < my.seq[7]] <- trans.cols[6] + array.trans.sim2.colors[array.trans.sim2 < my.seq[6]] <- trans.cols[5] + array.trans.sim2.colors[array.trans.sim2 < my.seq[5]] <- trans.cols[4] + array.trans.sim2.colors[array.trans.sim2 < my.seq[4]] <- trans.cols[3] + array.trans.sim2.colors[array.trans.sim2 < my.seq[3]] <- trans.cols[2] + array.trans.sim2.colors[array.trans.sim2 < my.seq[2]] <- trans.cols[1] + array.trans.sim2.colors[is.na(array.trans.sim2)] <- "white" + + par(mar=c(5,4,4,4.5)) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Forecast time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + title("% ECMWF-S4 transition from NAO- regime ", cex=1.5) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + + for(p in 1:12){ + for(l in 0:6){ + polygon(c(0.5+p-1, 0.5+p-1, 1+p-1), c(-0.5+l, 0.5+l, 0+l), col=array.trans.sim2.colors[p,1+l,1]) # first triangle + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(-0.5+l, 0+l, -0.5+l), col=array.trans.sim2.colors[p,1+l,2]) + polygon(c(1+p-1, 1.5+p-1, 1.5+p-1), c(l, 0.5+l, -0.5+l), col=array.trans.sim2.colors[p,1+l,3]) + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(0.5+l, 0+l, 0.5+l), col=array.trans.sim2.colors[p,1+l,4]) + } + } + + par(fig=c(0.875, 1, 0.14, 0.89), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_transition_prob_NAO-_startdate.png"), width=750, height=600) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("% ECMWF-S4 transition from NAO- regime", cex=1.5) + + # NAO+ : + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.85, 0.98), new=TRUE) + mtext("NAO+", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.sim2.colors[p,1+l,1])}} + + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.42, 0.5), new=TRUE) + mtext("Blocking", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.sim2.colors[p,1+l,4])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.85, 0.98), new=TRUE) + mtext("NAO-", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.sim2.colors[p,1+l,3])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.42, 0.5), new=TRUE) + mtext("Atlantic Ridge", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.sim2.colors[p,1+l,2])}} + + par(fig=c(0.91, 1, 0.1, 0.9), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_transition_prob_NAO-_target_month.png"), width=750, height=350) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 0.95), new=TRUE) + mtext("% ECMWF-S4 transition from NAO- regime", cex=1.2) + + par(mar=c(0,0,4,0), fig=c(0.1, 0.28, 0.8, 0.95), new=TRUE) + mtext("NAO+", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0, 0.28, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.8, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.sim2.colors[i2,1+6-j,1])}} + + par(mar=c(0,0,4,0), fig=c(0.30, 0.58, 0.8, 0.95), new=TRUE) + mtext("Blocking", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.30, 0.58, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.8, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.sim2.colors[i2,1+6-j,4])}} + + par(mar=c(0,0,4,0), fig=c(0.60, 0.88, 0.8, 0.95), new=TRUE) + mtext("Atlantic Ridge", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.60, 0.88, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.8, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.sim2.colors[i2,1+6-j,2])}} + + par(fig=c(0.91, 1, 0.1, 0.8), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + par(fig=c(0.91, 1, 0.88, 0.93), new=TRUE) + mtext(side = 1, text = "%", line = 2, cex=1) + + dev.off() + + + + + + + # Observed transition probability from NAO- to X summary: + png(filename=paste0(work.dir,"/",forecast.name,"_summary_transition_prob_NAO-_obs.png"), width=550, height=400) + + # create an array similar to array.cor but with colors instead of corr.values: + trans.cols <- rev(c('#b2182b','#d6604d','#f4a582','#fddbc7','#d1e5f0','#92c5de','#4393c3','#2166ac')) + my.seq <- c(0,10,20,30,40,50,60,70,100) + + array.trans.obs2.colors <- array(trans.cols[8],c(12,7,4)) + array.trans.obs2.colors[array.trans.obs2 < my.seq[8]] <- trans.cols[7] + array.trans.obs2.colors[array.trans.obs2 < my.seq[7]] <- trans.cols[6] + array.trans.obs2.colors[array.trans.obs2 < my.seq[6]] <- trans.cols[5] + array.trans.obs2.colors[array.trans.obs2 < my.seq[5]] <- trans.cols[4] + array.trans.obs2.colors[array.trans.obs2 < my.seq[4]] <- trans.cols[3] + array.trans.obs2.colors[array.trans.obs2 < my.seq[3]] <- trans.cols[2] + array.trans.obs2.colors[array.trans.obs2 < my.seq[2]] <- trans.cols[1] + array.trans.obs2.colors[is.na(array.trans.obs2)] <- "white" + + par(mar=c(5,4,4,4.5)) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Forecast time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + title("% Observed transition from NAO- regime ", cex=1.5) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + + for(p in 1:12){ + for(l in 0:6){ + polygon(c(0.5+p-1, 0.5+p-1, 1+p-1), c(-0.5+l, 0.5+l, 0+l), col=array.trans.obs2.colors[p,1+l,1]) # first triangle + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(-0.5+l, 0+l, -0.5+l), col=array.trans.obs2.colors[p,1+l,2]) + polygon(c(1+p-1, 1.5+p-1, 1.5+p-1), c(l, 0.5+l, -0.5+l), col=array.trans.obs2.colors[p,1+l,3]) + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(0.5+l, 0+l, 0.5+l), col=array.trans.obs2.colors[p,1+l,4]) + } + } + + par(fig=c(0.875, 1, 0.14, 0.89), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_transition_prob_NAO-_obs_startdate.png"), width=750, height=600) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("% Observed transition from NAO- regime", cex=1.5) + + # NAO+ : + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.85, 0.98), new=TRUE) + mtext("NAO+", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.obs2.colors[p,1+l,1])}} + + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.42, 0.5), new=TRUE) + mtext("Blocking", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.obs2.colors[p,1+l,4])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.85, 0.98), new=TRUE) + mtext("NAO-", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.obs2.colors[p,1+l,3])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.42, 0.5), new=TRUE) + mtext("Atlantic Ridge", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.obs2.colors[p,1+l,2])}} + + par(fig=c(0.91, 1, 0.1, 0.9), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_transition_prob_NAO-_obs_target_month.png"), width=750, height=350) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 0.95), new=TRUE) + mtext("% Observed transition from NAO- regime", cex=1.2) + + par(mar=c(0,0,4,0), fig=c(0.07, 0.28, 0.8, 0.95), new=TRUE) + mtext("NAO+", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0, 0.28, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.8, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.obs2.colors[i2,1+6-j,1])}} + + par(mar=c(0,0,4,0), fig=c(0.30, 0.58, 0.8, 0.95), new=TRUE) + mtext("Blocking", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.30, 0.58, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.8, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.obs2.colors[i2,1+6-j,4])}} + + par(mar=c(0,0,4,0), fig=c(0.58, 0.88, 0.8, 0.95), new=TRUE) + mtext("Atlantic Ridge", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.58, 0.88, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.8, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.obs2.colors[i2,1+6-j,2])}} + + par(fig=c(0.91, 1, 0.1, 0.8), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + par(fig=c(0.91, 1, 0.88, 0.93), new=TRUE) + mtext(side = 1, text = "%", line = 2, cex=1) + + dev.off() + + + + + + + + + + + # Simulated transition probability from blocking to X summary: + png(filename=paste0(work.dir,"/",forecast.name,"_summary_transition_prob_blocking.png"), width=550, height=400) + + # create an array similar to array.cor but with colors instead of corr.values: + trans.cols <- rev(c('#b2182b','#d6604d','#f4a582','#fddbc7','#d1e5f0','#92c5de','#4393c3','#2166ac')) + my.seq <- c(0,10,20,30,40,50,60,70,100) + + array.trans.sim3.colors <- array(trans.cols[8],c(12,7,4)) + array.trans.sim3.colors[array.trans.sim3 < my.seq[8]] <- trans.cols[7] + array.trans.sim3.colors[array.trans.sim3 < my.seq[7]] <- trans.cols[6] + array.trans.sim3.colors[array.trans.sim3 < my.seq[6]] <- trans.cols[5] + array.trans.sim3.colors[array.trans.sim3 < my.seq[5]] <- trans.cols[4] + array.trans.sim3.colors[array.trans.sim3 < my.seq[4]] <- trans.cols[3] + array.trans.sim3.colors[array.trans.sim3 < my.seq[3]] <- trans.cols[2] + array.trans.sim3.colors[array.trans.sim3 < my.seq[2]] <- trans.cols[1] + array.trans.sim3.colors[is.na(array.trans.sim3)] <- "white" + + par(mar=c(5,4,4,4.5)) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Forecast time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + title("% ECMWF-S4 transition from blocking regime", cex=1.5) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + + for(p in 1:12){ + for(l in 0:6){ + polygon(c(0.5+p-1, 0.5+p-1, 1+p-1), c(-0.5+l, 0.5+l, 0+l), col=array.trans.sim3.colors[p,1+l,1]) # first triangle + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(-0.5+l, 0+l, -0.5+l), col=array.trans.sim3.colors[p,1+l,2]) + polygon(c(1+p-1, 1.5+p-1, 1.5+p-1), c(l, 0.5+l, -0.5+l), col=array.trans.sim3.colors[p,1+l,3]) + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(0.5+l, 0+l, 0.5+l), col=array.trans.sim3.colors[p,1+l,4]) + } + } + + par(fig=c(0.875, 1, 0.14, 0.89), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_transition_prob_blocking_startdate.png"), width=750, height=600) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("% ECMWF-S4 transition from blocking regime", cex=1.5) + + # NAO+ : + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.85, 0.98), new=TRUE) + mtext("NAO+", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.sim3.colors[p,1+l,1])}} + + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.42, 0.5), new=TRUE) + mtext("Blocking", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.sim3.colors[p,1+l,4])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.85, 0.98), new=TRUE) + mtext("NAO-", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.sim3.colors[p,1+l,3])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.42, 0.5), new=TRUE) + mtext("Atlantic Ridge", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.sim3.colors[p,1+l,2])}} + + par(fig=c(0.91, 1, 0.1, 0.9), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_transition_prob_blocking_target_month.png"), width=750, height=350) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 0.95), new=TRUE) + mtext("% ECMWF-S4 transition from blocking regime", cex=1.2) + + par(mar=c(0,0,4,0), fig=c(0.10, 0.28, 0.8, 0.95), new=TRUE) + mtext("NAO+", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0, 0.28, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.8, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.sim3.colors[i2,1+6-j,1])}} + + par(mar=c(0,0,4,0), fig=c(0.30, 0.58, 0.8, 0.95), new=TRUE) + mtext("NAO-", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.30, 0.58, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.8, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.sim3.colors[i2,1+6-j,3])}} + + par(mar=c(0,0,4,0), fig=c(0.60, 0.88, 0.8, 0.95), new=TRUE) + mtext("Atlantic Ridge", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.60, 0.88, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.8, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.sim3.colors[i2,1+6-j,2])}} + + par(fig=c(0.91, 1, 0.1, 0.8), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + par(fig=c(0.91, 1, 0.88, 0.93), new=TRUE) + mtext(side = 1, text = "%", line = 2, cex=1) + + dev.off() + + + + + + + + + # Observed transition probability from blocking to X summary: + png(filename=paste0(work.dir,"/",forecast.name,"_summary_transition_prob_blocking_obs.png"), width=550, height=400) + + # create an array similar to array.cor but with colors instead of corr.values: + trans.cols <- rev(c('#b2182b','#d6604d','#f4a582','#fddbc7','#d1e5f0','#92c5de','#4393c3','#2166ac')) + my.seq <- c(0,10,20,30,40,50,60,70,100) + + array.trans.obs3.colors <- array(trans.cols[8],c(12,7,4)) + array.trans.obs3.colors[array.trans.obs3 < my.seq[8]] <- trans.cols[7] + array.trans.obs3.colors[array.trans.obs3 < my.seq[7]] <- trans.cols[6] + array.trans.obs3.colors[array.trans.obs3 < my.seq[6]] <- trans.cols[5] + array.trans.obs3.colors[array.trans.obs3 < my.seq[5]] <- trans.cols[4] + array.trans.obs3.colors[array.trans.obs3 < my.seq[4]] <- trans.cols[3] + array.trans.obs3.colors[array.trans.obs3 < my.seq[3]] <- trans.cols[2] + array.trans.obs3.colors[array.trans.obs3 < my.seq[2]] <- trans.cols[1] + array.trans.obs3.colors[is.na(array.trans.obs3)] <- "white" + + par(mar=c(5,4,4,4.5)) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Forecast time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + title("% Observed transition from blocking regime", cex=1.5) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + + for(p in 1:12){ + for(l in 0:6){ + polygon(c(0.5+p-1, 0.5+p-1, 1+p-1), c(-0.5+l, 0.5+l, 0+l), col=array.trans.obs3.colors[p,1+l,1]) # first triangle + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(-0.5+l, 0+l, -0.5+l), col=array.trans.obs3.colors[p,1+l,2]) + polygon(c(1+p-1, 1.5+p-1, 1.5+p-1), c(l, 0.5+l, -0.5+l), col=array.trans.obs3.colors[p,1+l,3]) + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(0.5+l, 0+l, 0.5+l), col=array.trans.obs3.colors[p,1+l,4]) + } + } + + par(fig=c(0.875, 1, 0.14, 0.89), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_transition_prob_blocking_obs_startdate.png"), width=750, height=600) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("% Observed transition from blocking regime", cex=1.5) + + # NAO+ : + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.85, 0.98), new=TRUE) + mtext("NAO+", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.obs3.colors[p,1+l,1])}} + + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.42, 0.5), new=TRUE) + mtext("Blocking", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.obs3.colors[p,1+l,4])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.85, 0.98), new=TRUE) + mtext("NAO-", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.obs3.colors[p,1+l,3])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.42, 0.5), new=TRUE) + mtext("Atlantic Ridge", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.obs3.colors[p,1+l,2])}} + + par(fig=c(0.91, 1, 0.1, 0.9), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_transition_prob_blocking_obs_target_month.png"), width=750, height=350) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 0.95), new=TRUE) + mtext("% Observed transition from blocking regime", cex=1.2) + + par(mar=c(0,0,4,0), fig=c(0.10, 0.28, 0.8, 0.95), new=TRUE) + mtext("NAO+", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0, 0.28, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.8, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.obs3.colors[i2,1+6-j,1])}} + + par(mar=c(0,0,4,0), fig=c(0.30, 0.58, 0.8, 0.95), new=TRUE) + mtext("NAO-", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.30, 0.58, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.8, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.obs3.colors[i2,1+6-j,3])}} + + par(mar=c(0,0,4,0), fig=c(0.60, 0.88, 0.8, 0.95), new=TRUE) + mtext("Atlantic Ridge", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.60, 0.88, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.8, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.obs3.colors[i2,1+6-j,2])}} + + par(fig=c(0.91, 1, 0.1, 0.8), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + par(fig=c(0.91, 1, 0.88, 0.93), new=TRUE) + mtext(side = 1, text = "%", line = 2, cex=1) + + dev.off() + + + + + + + + + + + + + + + + + + # Simulated transition probability from Atlantic ridge to X summary: + png(filename=paste0(work.dir,"/",forecast.name,"_summary_transition_prob_Atlantic_ridge.png"), width=550, height=400) + + # create an array similar to array.cor but with colors instead of corr.values: + trans.cols <- rev(c('#b2182b','#d6604d','#f4a582','#fddbc7','#d1e5f0','#92c5de','#4393c3','#2166ac')) + my.seq <- c(0,10,20,30,40,50,60,70,100) + + array.trans.sim4.colors <- array(trans.cols[8],c(12,7,4)) + array.trans.sim4.colors[array.trans.sim4 < my.seq[8]] <- trans.cols[7] + array.trans.sim4.colors[array.trans.sim4 < my.seq[7]] <- trans.cols[6] + array.trans.sim4.colors[array.trans.sim4 < my.seq[6]] <- trans.cols[5] + array.trans.sim4.colors[array.trans.sim4 < my.seq[5]] <- trans.cols[4] + array.trans.sim4.colors[array.trans.sim4 < my.seq[4]] <- trans.cols[3] + array.trans.sim4.colors[array.trans.sim4 < my.seq[3]] <- trans.cols[2] + array.trans.sim4.colors[array.trans.sim4 < my.seq[2]] <- trans.cols[1] + array.trans.sim4.colors[is.na(array.trans.sim4)] <- "white" + + par(mar=c(5,4,4,4.5)) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Forecast time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + title("% ECMWF-S4 transition from Atlantic ridge regime ", cex=1.5) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + + for(p in 1:12){ + for(l in 0:6){ + polygon(c(0.5+p-1, 0.5+p-1, 1+p-1), c(-0.5+l, 0.5+l, 0+l), col=array.trans.sim4.colors[p,1+l,1]) # first triangle + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(-0.5+l, 0+l, -0.5+l), col=array.trans.sim4.colors[p,1+l,2]) + polygon(c(1+p-1, 1.5+p-1, 1.5+p-1), c(l, 0.5+l, -0.5+l), col=array.trans.sim4.colors[p,1+l,3]) + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(0.5+l, 0+l, 0.5+l), col=array.trans.sim4.colors[p,1+l,4]) + } + } + + par(fig=c(0.875, 1, 0.14, 0.89), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_transition_prob_Atlantic_ridge_startdate.png"), width=750, height=600) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("% ECMWF-S4 transition from Atlantic Ridge regime", cex=1.5) + + # NAO+ : + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.85, 0.98), new=TRUE) + mtext("NAO+", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.sim4.colors[p,1+l,1])}} + + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.42, 0.5), new=TRUE) + mtext("Blocking", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.sim4.colors[p,1+l,4])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.85, 0.98), new=TRUE) + mtext("NAO-", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.sim4.colors[p,1+l,3])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.42, 0.5), new=TRUE) + mtext("Atlantic Ridge", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.sim4.colors[p,1+l,2])}} + + par(fig=c(0.91, 1, 0.1, 0.9), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_transition_prob_Atlantic_ridge_target_month.png"), width=750, height=350) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 0.95), new=TRUE) + mtext("% ECMWF-S4 transition from Atlantic Ridge regime", cex=1.2) + + par(mar=c(0,0,4,0), fig=c(0.1, 0.28, 0.8, 0.95), new=TRUE) + mtext("NAO+", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0, 0.28, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.8, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.sim4.colors[i2,1+6-j,1])}} + + par(mar=c(0,0,4,0), fig=c(0.30, 0.58, 0.8, 0.95), new=TRUE) + mtext("NAO-", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.30, 0.58, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.8, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.sim4.colors[i2,1+6-j,3])}} + + par(mar=c(0,0,4,0), fig=c(0.60, 0.88, 0.8, 0.95), new=TRUE) + mtext("Blocking", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.60, 0.88, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.8, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.sim4.colors[i2,1+6-j,4])}} + + par(fig=c(0.91, 1, 0.1, 0.8), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + par(fig=c(0.91, 1, 0.88, 0.93), new=TRUE) + mtext(side = 1, text = "%", line = 2, cex=1) + + dev.off() + + + + + + + + + + # Observed transition probability from Atlantic ridge to X summary: + png(filename=paste0(work.dir,"/",forecast.name,"_summary_transition_prob_Atlantic_ridge_obs.png"), width=550, height=400) + + # create an array obsilar to array.cor but with colors instead of corr.values: + trans.cols <- rev(c('#b2182b','#d6604d','#f4a582','#fddbc7','#d1e5f0','#92c5de','#4393c3','#2166ac')) + my.seq <- c(0,10,20,30,40,50,60,70,100) + + array.trans.obs4.colors <- array(trans.cols[8],c(12,7,4)) + array.trans.obs4.colors[array.trans.obs4 < my.seq[8]] <- trans.cols[7] + array.trans.obs4.colors[array.trans.obs4 < my.seq[7]] <- trans.cols[6] + array.trans.obs4.colors[array.trans.obs4 < my.seq[6]] <- trans.cols[5] + array.trans.obs4.colors[array.trans.obs4 < my.seq[5]] <- trans.cols[4] + array.trans.obs4.colors[array.trans.obs4 < my.seq[4]] <- trans.cols[3] + array.trans.obs4.colors[array.trans.obs4 < my.seq[3]] <- trans.cols[2] + array.trans.obs4.colors[array.trans.obs4 < my.seq[2]] <- trans.cols[1] + array.trans.obs4.colors[is.na(array.trans.obs4)] <- "white" + + par(mar=c(5,4,4,4.5)) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Forecast time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + title("% Observed transition from Atlantic ridge regime ", cex=1.5) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + + for(p in 1:12){ + for(l in 0:6){ + polygon(c(0.5+p-1, 0.5+p-1, 1+p-1), c(-0.5+l, 0.5+l, 0+l), col=array.trans.obs4.colors[p,1+l,1]) # first triangle + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(-0.5+l, 0+l, -0.5+l), col=array.trans.obs4.colors[p,1+l,2]) + polygon(c(1+p-1, 1.5+p-1, 1.5+p-1), c(l, 0.5+l, -0.5+l), col=array.trans.obs4.colors[p,1+l,3]) + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(0.5+l, 0+l, 0.5+l), col=array.trans.obs4.colors[p,1+l,4]) + } + } + + par(fig=c(0.875, 1, 0.14, 0.89), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_transition_prob_Atlantic_ridge_obs_startdate.png"), width=750, height=600) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("% Observed transition from Atlantic Ridge regime", cex=1.5) + + # NAO+ : + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.85, 0.98), new=TRUE) + mtext("NAO+", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.obs4.colors[p,1+l,1])}} + + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.42, 0.5), new=TRUE) + mtext("Blocking", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.obs4.colors[p,1+l,4])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.85, 0.98), new=TRUE) + mtext("NAO-", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.obs4.colors[p,1+l,3])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.42, 0.5), new=TRUE) + mtext("Atlantic Ridge", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.obs4.colors[p,1+l,2])}} + + par(fig=c(0.91, 1, 0.1, 0.9), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_transition_prob_Atlantic_ridge_obs_target_month.png"), width=750, height=350) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 0.95), new=TRUE) + mtext("% Observed transition from Atlantic Ridge regime", cex=1.2) + + par(mar=c(0,0,4,0), fig=c(0.1, 0.28, 0.8, 0.95), new=TRUE) + mtext("NAO+", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0, 0.28, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.8, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.obs4.colors[i2,1+6-j,1])}} + + par(mar=c(0,0,4,0), fig=c(0.30, 0.58, 0.8, 0.95), new=TRUE) + mtext("NAO-", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.30, 0.58, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.8, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.obs4.colors[i2,1+6-j,3])}} + + par(mar=c(0,0,4,0), fig=c(0.60, 0.88, 0.8, 0.95), new=TRUE) + mtext("Blocking", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.60, 0.88, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.8, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.obs4.colors[i2,1+6-j,4])}} + + par(fig=c(0.91, 1, 0.1, 0.8), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + par(fig=c(0.91, 1, 0.88, 0.93), new=TRUE) + mtext(side = 1, text = "%", line = 2, cex=1) + + dev.off() + + + + + + + + + + + + + + + # Bias of the transition probability from NAO+ to X summary: + png(filename=paste0(work.dir,"/",forecast.name,"_summary_bias_transition_prob_NAO+.png"), width=550, height=400) + + # create an array similar to array.cor but with colors instead of corr.values: + trans.cols <- rev(c('#b2182b','#d6604d','#f4a582','#fddbc7','#d1e5f0','#92c5de','#4393c3','#2166ac')) + my.seq <- c(-20,-10,-5,-2,0,2,5,10,20) + + array.trans.diff1.colors <- array(trans.cols[8],c(12,7,4)) + array.trans.diff1.colors[array.trans.diff1 < my.seq[8]] <- trans.cols[7] + array.trans.diff1.colors[array.trans.diff1 < my.seq[7]] <- trans.cols[6] + array.trans.diff1.colors[array.trans.diff1 < my.seq[6]] <- trans.cols[5] + array.trans.diff1.colors[array.trans.diff1 < my.seq[5]] <- trans.cols[4] + array.trans.diff1.colors[array.trans.diff1 < my.seq[4]] <- trans.cols[3] + array.trans.diff1.colors[array.trans.diff1 < my.seq[3]] <- trans.cols[2] + array.trans.diff1.colors[array.trans.diff1 < my.seq[2]] <- trans.cols[1] + array.trans.diff1.colors[is.na(array.trans.diff1)] <- "white" + + par(mar=c(5,4,4,4.5)) + plot(1,1, type="n", xaxt="n", yaxt="n", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + title("% Bias transition from NAO+ regime", cex=1.5) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + + for(p in 1:12){ + for(l in 0:6){ + polygon(c(0.5+p-1, 0.5+p-1, 1+p-1), c(-0.5+l, 0.5+l, 0+l), col=array.trans.diff1.colors[p,1+l,1]) # first triangle + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(-0.5+l, 0+l, -0.5+l), col=array.trans.diff1.colors[p,1+l,2]) + polygon(c(1+p-1, 1.5+p-1, 1.5+p-1), c(l, 0.5+l, -0.5+l), col=array.trans.diff1.colors[p,1+l,3]) + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(0.5+l, 0+l, 0.5+l), col=array.trans.diff1.colors[p,1+l,4]) + } + } + + par(fig=c(0.875, 1, 0.14, 0.89), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_bias_transition_prob_NAO+_startdate.png"), width=750, height=600) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("% Bias transition from NAO+ regime", cex=1.5) + + # NAO+ : + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.85, 0.98), new=TRUE) + mtext("NAO+", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.diff1.colors[p,1+l,1])}} + + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.42, 0.5), new=TRUE) + mtext("Blocking", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.diff1.colors[p,1+l,4])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.85, 0.98), new=TRUE) + mtext("NAO-", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.diff1.colors[p,1+l,3])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.42, 0.5), new=TRUE) + mtext("Atlantic Ridge", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.diff1.colors[p,1+l,2])}} + + par(fig=c(0.91, 1, 0.1, 0.9), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_bias_transition_prob_NAO+_target_month.png"), width=750, height=350) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 0.95), new=TRUE) + mtext("% Bias transition from NAO+ regime", cex=1.2) + + par(mar=c(0,0,4,0), fig=c(0.07, 0.28, 0.8, 0.95), new=TRUE) + mtext("NAO-", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0, 0.28, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.8, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.diff1.colors[i2,1+6-j,3])}} + + par(mar=c(0,0,4,0), fig=c(0.30, 0.58, 0.8, 0.95), new=TRUE) + mtext("Blocking", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.30, 0.58, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.8, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.diff1.colors[i2,1+6-j,4])}} + + par(mar=c(0,0,4,0), fig=c(0.60, 0.88, 0.8, 0.95), new=TRUE) + mtext("Atlantic Ridge", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.60, 0.88, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.8, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.diff1.colors[i2,1+6-j,2])}} + + par(fig=c(0.91, 1, 0.1, 0.8), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + par(fig=c(0.91, 1, 0.88, 0.93), new=TRUE) + mtext(side = 1, text = "%", line = 2, cex=1) + + dev.off() + + + + + + + + + + + + + + + + # Bias of the Transition probability from NAO- to X summary: + png(filename=paste0(work.dir,"/",forecast.name,"_summary_bias_transition_prob_NAO-.png"), width=550, height=400) + + # create an array similar to array.cor but with colors instead of corr.values: + trans.cols <- rev(c('#b2182b','#d6604d','#f4a582','#fddbc7','#d1e5f0','#92c5de','#4393c3','#2166ac')) + my.seq <- c(-20,-10,-5,-2,0,2,5,10,20) + + array.trans.diff2.colors <- array(trans.cols[8],c(12,7,4)) + array.trans.diff2.colors[array.trans.diff2 < my.seq[8]] <- trans.cols[7] + array.trans.diff2.colors[array.trans.diff2 < my.seq[7]] <- trans.cols[6] + array.trans.diff2.colors[array.trans.diff2 < my.seq[6]] <- trans.cols[5] + array.trans.diff2.colors[array.trans.diff2 < my.seq[5]] <- trans.cols[4] + array.trans.diff2.colors[array.trans.diff2 < my.seq[4]] <- trans.cols[3] + array.trans.diff2.colors[array.trans.diff2 < my.seq[3]] <- trans.cols[2] + array.trans.diff2.colors[array.trans.diff2 < my.seq[2]] <- trans.cols[1] + array.trans.diff2.colors[is.na(array.trans.diff2)] <- "white" + + par(mar=c(5,4,4,4.5)) + plot(1,1, type="n", xaxt="n", yaxt="n", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + title("% Bias transition from NAO- regime", cex=1.5) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + + for(p in 1:12){ + for(l in 0:6){ + polygon(c(0.5+p-1, 0.5+p-1, 1+p-1), c(-0.5+l, 0.5+l, 0+l), col=array.trans.diff2.colors[p,1+l,1]) # first triangle + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(-0.5+l, 0+l, -0.5+l), col=array.trans.diff2.colors[p,1+l,2]) + polygon(c(1+p-1, 1.5+p-1, 1.5+p-1), c(l, 0.5+l, -0.5+l), col=array.trans.diff2.colors[p,1+l,3]) + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(0.5+l, 0+l, 0.5+l), col=array.trans.diff2.colors[p,1+l,4]) + } + } + + par(fig=c(0.875, 1, 0.14, 0.89), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_bias_transition_prob_NAO-_startdate.png"), width=750, height=600) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 0.95), new=TRUE) + mtext("% Bias transition from NAO- regime", cex=1.5) + + # NAO+ : + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.85, 0.98), new=TRUE) + mtext("NAO+", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.diff2.colors[p,1+l,1])}} + + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.42, 0.5), new=TRUE) + mtext("Blocking", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.diff2.colors[p,1+l,4])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.85, 0.98), new=TRUE) + mtext("NAO-", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.diff2.colors[p,1+l,3])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.42, 0.5), new=TRUE) + mtext("Atlantic Ridge", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.diff2.colors[p,1+l,2])}} + + par(fig=c(0.91, 1, 0.1, 0.9), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_bias_transition_prob_NAO-_target_month.png"), width=750, height=350) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 0.95), new=TRUE) + mtext("% Bias transition from NAO- regime", cex=1.2) + + par(mar=c(0,0,4,0), fig=c(0.07, 0.28, 0.8, 0.95), new=TRUE) + mtext("NAO+", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0, 0.28, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.8, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.diff2.colors[i2,1+6-j,1])}} + + par(mar=c(0,0,4,0), fig=c(0.30, 0.58, 0.8, 0.95), new=TRUE) + mtext("Blocking", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.30, 0.58, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.8, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.diff2.colors[i2,1+6-j,4])}} + + par(mar=c(0,0,4,0), fig=c(0.60, 0.88, 0.8, 0.95), new=TRUE) + mtext("Atlantic Ridge", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.60, 0.88, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.8, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.diff2.colors[i2,1+6-j,2])}} + + par(fig=c(0.91, 1, 0.1, 0.8), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + par(fig=c(0.91, 1, 0.88, 0.93), new=TRUE) + mtext(side = 1, text = "%", line = 2, cex=1) + + dev.off() + + + + + + + + + + + + # Bias of the Transition probability from blocking to X summary: + png(filename=paste0(work.dir,"/",forecast.name,"_summary_bias_transition_prob_blocking.png"), width=550, height=400) + + # create an array similar to array.cor but with colors instead of corr.values: + trans.cols <- rev(c('#b2182b','#d6604d','#f4a582','#fddbc7','#d1e5f0','#92c5de','#4393c3','#2166ac')) + my.seq <- c(-20,-10,-5,-2,0,2,5,10,20) + + array.trans.diff3.colors <- array(trans.cols[8],c(12,7,4)) + array.trans.diff3.colors[array.trans.diff3 < my.seq[8]] <- trans.cols[7] + array.trans.diff3.colors[array.trans.diff3 < my.seq[7]] <- trans.cols[6] + array.trans.diff3.colors[array.trans.diff3 < my.seq[6]] <- trans.cols[5] + array.trans.diff3.colors[array.trans.diff3 < my.seq[5]] <- trans.cols[4] + array.trans.diff3.colors[array.trans.diff3 < my.seq[4]] <- trans.cols[3] + array.trans.diff3.colors[array.trans.diff3 < my.seq[3]] <- trans.cols[2] + array.trans.diff3.colors[array.trans.diff3 < my.seq[2]] <- trans.cols[1] + array.trans.diff3.colors[is.na(array.trans.diff3)] <- "white" + + par(mar=c(5,4,4,4.5)) + plot(1,1, type="n", xaxt="n", yaxt="n", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + title("% Bias transition from blocking regime", cex=1.5) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + + for(p in 1:12){ + for(l in 0:6){ + polygon(c(0.5+p-1, 0.5+p-1, 1+p-1), c(-0.5+l, 0.5+l, 0+l), col=array.trans.diff3.colors[p,1+l,1]) # first triangle + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(-0.5+l, 0+l, -0.5+l), col=array.trans.diff3.colors[p,1+l,2]) + polygon(c(1+p-1, 1.5+p-1, 1.5+p-1), c(l, 0.5+l, -0.5+l), col=array.trans.diff3.colors[p,1+l,3]) + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(0.5+l, 0+l, 0.5+l), col=array.trans.diff3.colors[p,1+l,4]) + } + } + + par(fig=c(0.875, 1, 0.14, 0.89), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_bias_transition_prob_blocking_startdate.png"), width=750, height=600) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("% Bias transition from blocking regime", cex=1.5) + + # NAO+ : + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.85, 0.98), new=TRUE) + mtext("NAO+", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.diff3.colors[p,1+l,1])}} + + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.42, 0.5), new=TRUE) + mtext("Blocking", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.diff3.colors[p,1+l,4])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.85, 0.98), new=TRUE) + mtext("NAO-", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.diff3.colors[p,1+l,3])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.42, 0.5), new=TRUE) + mtext("Atlantic Ridge", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.diff3.colors[p,1+l,2])}} + + par(fig=c(0.91, 1, 0.1, 0.9), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_bias_transition_prob_blocking_target_month.png"), width=750, height=350) + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 0.95), new=TRUE) + mtext("% Bias transition from Blocking regime", cex=1.2) + + par(mar=c(0,0,4,0), fig=c(0.07, 0.28, 0.8, 0.95), new=TRUE) + mtext("NAO+", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0, 0.28, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.8, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.diff3.colors[i2,1+6-j,1])}} + + par(mar=c(0,0,4,0), fig=c(0.30, 0.58, 0.8, 0.95), new=TRUE) + mtext("NAO-", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.30, 0.58, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.8, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.diff3.colors[i2,1+6-j,3])}} + + par(mar=c(0,0,4,0), fig=c(0.60, 0.88, 0.8, 0.95), new=TRUE) + mtext("Atlantic Ridge", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.60, 0.88, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.8, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.diff3.colors[i2,1+6-j,2])}} + + par(fig=c(0.91, 1, 0.1, 0.8), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + par(fig=c(0.91, 1, 0.88, 0.93), new=TRUE) + mtext(side = 1, text = "%", line = 2, cex=1) + + dev.off() + + + + + + + + + + + # Bias of the Transition probability from Atlantic Ridge to X summary: + png(filename=paste0(work.dir,"/",forecast.name,"_summary_bias_transition_prob_Atlantic_ridge.png"), width=550, height=400) + + # create an array similar to array.cor but with colors instead of corr.values: + trans.cols <- rev(c('#b2182b','#d6604d','#f4a582','#fddbc7','#d1e5f0','#92c5de','#4393c3','#2166ac')) + my.seq <- c(-20,-10,-5,-2,0,2,5,10,20) + + array.trans.diff4.colors <- array(trans.cols[8],c(12,7,4)) + array.trans.diff4.colors[array.trans.diff4 < my.seq[8]] <- trans.cols[7] + array.trans.diff4.colors[array.trans.diff4 < my.seq[7]] <- trans.cols[6] + array.trans.diff4.colors[array.trans.diff4 < my.seq[6]] <- trans.cols[5] + array.trans.diff4.colors[array.trans.diff4 < my.seq[5]] <- trans.cols[4] + array.trans.diff4.colors[array.trans.diff4 < my.seq[4]] <- trans.cols[3] + array.trans.diff4.colors[array.trans.diff4 < my.seq[3]] <- trans.cols[2] + array.trans.diff4.colors[array.trans.diff4 < my.seq[2]] <- trans.cols[1] + array.trans.diff4.colors[is.na(array.trans.diff4)] <- "white" + + par(mar=c(5,4,4,4.5)) + plot(1,1, type="n", xaxt="n", yaxt="n", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + title("% Bias transition from Atlantic Ridge regime", cex=1.5) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + + for(p in 1:12){ + for(l in 0:6){ + polygon(c(0.5+p-1, 0.5+p-1, 1+p-1), c(-0.5+l, 0.5+l, 0+l), col=array.trans.diff4.colors[p,1+l,1]) # first triangle + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(-0.5+l, 0+l, -0.5+l), col=array.trans.diff4.colors[p,1+l,2]) + polygon(c(1+p-1, 1.5+p-1, 1.5+p-1), c(l, 0.5+l, -0.5+l), col=array.trans.diff4.colors[p,1+l,3]) + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(0.5+l, 0+l, 0.5+l), col=array.trans.diff4.colors[p,1+l,4]) + } + } + + par(fig=c(0.875, 1, 0.14, 0.89), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_bias_transition_prob_Atlantic_ridge_startdate.png"), width=750, height=600) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("% Bias transition from Atlantic_Ridge_regime", cex=1.5) + + # NAO+ : + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.85, 0.98), new=TRUE) + mtext("NAO+", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.diff4.colors[p,1+l,1])}} + + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.42, 0.5), new=TRUE) + mtext("Blocking", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.diff4.colors[p,1+l,4])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.85, 0.98), new=TRUE) + mtext("NAO-", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.diff4.colors[p,1+l,3])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.42, 0.5), new=TRUE) + mtext("Atlantic Ridge", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.diff4.colors[p,1+l,2])}} + + par(fig=c(0.91, 1, 0.1, 0.9), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_bias_transition_prob_Atlantic_ridge_target_month.png"), width=750, height=350) + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 0.95), new=TRUE) + mtext("% Bias transition from Atlantic Ridge regime", cex=1.2) + + par(mar=c(0,0,4,0), fig=c(0.07, 0.28, 0.8, 0.95), new=TRUE) + mtext("NAO+", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0, 0.28, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.8, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.diff4.colors[i2,1+6-j,1])}} + + par(mar=c(0,0,4,0), fig=c(0.30, 0.58, 0.8, 0.95), new=TRUE) + mtext("NAO-", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.30, 0.58, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.8, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.diff4.colors[i2,1+6-j,3])}} + + par(mar=c(0,0,4,0), fig=c(0.60, 0.88, 0.8, 0.95), new=TRUE) + mtext("Blocking", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.60, 0.88, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.8, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.diff4.colors[i2,1+6-j,4])}} + + par(fig=c(0.91, 1, 0.1, 0.8), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + par(fig=c(0.91, 1, 0.88, 0.93), new=TRUE) + mtext(side = 1, text = "%", line = 2, cex=1) + + dev.off() + + ## # FairRPSS summary: + ## png(filename=paste0(work.dir,"/",forecast.name,"_summary_rpss.png"), width=550, height=400) + + ## # create an array similar to array.diff.freq but with colors instead of frequencies: + ## freq.cols <- rev(c('#b2182b','#d6604d','#f4a582','#fddbc7','#d1e5f0','#92c5de','#4393c3','#2166ac')) + + ## array.freq.colors <- array(freq.cols[8],c(12,7,4)) + ## array.freq.colors[array.diff.freq < 10] <- freq.cols[7] + ## array.freq.colors[array.diff.freq < 5] <- freq.cols[6] + ## array.freq.colors[array.diff.freq < 2] <- freq.cols[5] + ## array.freq.colors[array.diff.freq < 0] <- freq.cols[4] + ## array.freq.colors[array.diff.freq < -2] <- freq.cols[3] + ## array.freq.colors[array.diff.freq < -5] <- freq.cols[2] + ## array.freq.colors[array.diff.freq < -10] <- freq.cols[1] + + ## par(mar=c(5,4,4,4.5)) + ## plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Forecast time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + ## title("Frequency difference (%) between S4 and ERA-Interim", cex=1.5) + ## mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + ## mtext(side = 2, text = "Forecast time", line = 2.5, cex=1.5) + ## axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + ## axis(2, at=seq(0,6), las=2, cex.axis=1.5) + + ## for(p in 1:12){ + ## for(l in 0:6){ + ## polygon(c(0.5+p-1,0.5+p-1,1+p-1),c(-0.5+l,0.5+l,0+l), col=array.freq.colors[p,1+l,1]) # first triangle + ## polygon(c(0.5+p-1,1+p-1,1.5+p-1),c(-0.5+l,0+l,-0.5+l), col=array.freq.colors[p,1+l,2]) + ## polygon(c(1+p-1,1.5+p-1,1.5+p-1),c(l,0.5+l,-0.5+l), col=array.freq.colors[p,1+l,3]) + ## polygon(c(0.5+p-1,1+p-1,1.5+p-1),c(0.5+l,0+l,0.5+l), col=array.freq.colors[p,1+l,4]) + ## } + ## } + + ## par(fig=c(0.875, 1, 0.14, 0.89), new=TRUE) + ## ColorBar2(brks = c(-20,-10,-5,-2,0,2,5,10,20), cols = freq.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(c(-20,-10,-5,-2,0,2,5,10,20)), my.labels=c(-20,-10,-5,-2,0,2,5,10,20)) + ## dev.off() + +} # close if on composition == "summary" + + + + +# impact map of the stronger WR: +if(composition == "impact.highest" && fields.name == rean.name){ + + var.num <- 2 # choose a variable (1:sfcWind, 2:tas) + index <- 1 # chose an index: 1: most influent, 2: most influent positively, 3: most influent negatively + + if ( index == 1 ) png(filename=paste0(rean.dir,"/",rean.name,"_",var.name.full[var.num],"_most_influent.png"), width=550, height=650) + if ( index == 2 ) png(filename=paste0(rean.dir,"/",rean.name,"_",var.name.full[var.num],"_most_influent_positively.png"), width=550, height=650) + if ( index == 3 ) png(filename=paste0(rean.dir,"/",rean.name,"_",var.name.full[var.num],"_most_influent_negatively.png"), width=550, height=650) + + plot.new() + #par(mfrow=c(4,3)) + #col.regimes <- c('#7b3294','#c2a5cf','#a6dba0','#008837') + col.regimes <- c('#e66101','#fdb863','#b2abd2','#5e3c99') + + for(p in 13:16){ # or 1:12 for monthly maps + load(file=paste0(rean.dir,"/",rean.name,"_",my.period[p],"_",var.name[var.num],".RData")) + load(paste0(rean.dir,"/",rean.name,"_",my.period[p],"_ClusterNames.RData")) # they should not depend on var.name!!! + + # position of long values of Europe only (without the Atlantic Sea and America): + #if(fields.name == "ERA-Interim") EU <- c(1:which(lon >= lon.max)[1], (length(lon)-30):length(lon)) # restrict area to continental europe only + lon.max = 45 + EU <- c(1:which(lon >= lon.max)[1], (which(lon >= 337)[1]:length(lon))) # restrict area to continental europe only + + cluster1 <- which(orden == cluster1.name.period[p]) # in future it will be == cluster1.name + cluster2 <- which(orden == cluster2.name.period[p]) + cluster3 <- which(orden == cluster3.name.period[p]) + cluster4 <- which(orden == cluster4.name.period[p]) + + assign(paste0("imp",cluster1), varPeriodAnom1mean) + assign(paste0("imp",cluster2), varPeriodAnom2mean) + assign(paste0("imp",cluster3), varPeriodAnom3mean) + assign(paste0("imp",cluster4), varPeriodAnom4mean) + + #most influent WT: + if (index == 1) { + imp.all <- imp1 + for(i in 1:dim(imp1)[1]){ + for(j in 1:dim(imp1)[2]){ + if(abs(imp1[i,j]) >= max(abs(imp1[i,j]), abs(imp2[i,j]), abs(imp3[i,j]), abs(imp4[i,j]))) imp.all[i,j] <- 1.5 + if(abs(imp2[i,j]) >= max(abs(imp1[i,j]), abs(imp2[i,j]), abs(imp3[i,j]), abs(imp4[i,j]))) imp.all[i,j] <- 2.5 + if(abs(imp3[i,j]) >= max(abs(imp1[i,j]), abs(imp2[i,j]), abs(imp3[i,j]), abs(imp4[i,j]))) imp.all[i,j] <- 3.5 + if(abs(imp4[i,j]) >= max(abs(imp1[i,j]), abs(imp2[i,j]), abs(imp3[i,j]), abs(imp4[i,j]))) imp.all[i,j] <- 4.5 + } + } + } + + #most influent WT with positive impact: + if (index == 2) { + imp.all <- imp1 + for(i in 1:dim(imp1)[1]){ + for(j in 1:dim(imp1)[2]){ + if((imp1[i,j]) >= max((imp1[i,j]), (imp2[i,j]), (imp3[i,j]), (imp4[i,j]))) imp.all[i,j] <- 1.5 + if((imp2[i,j]) >= max((imp1[i,j]), (imp2[i,j]), (imp3[i,j]), (imp4[i,j]))) imp.all[i,j] <- 2.5 + if((imp3[i,j]) >= max((imp1[i,j]), (imp2[i,j]), (imp3[i,j]), (imp4[i,j]))) imp.all[i,j] <- 3.5 + if((imp4[i,j]) >= max((imp1[i,j]), (imp2[i,j]), (imp3[i,j]), (imp4[i,j]))) imp.all[i,j] <- 4.5 + } + } + + } + + # most influent WT with negative impact: + if (index == 3) { + imp.all <- imp1 + for(i in 1:dim(imp1)[1]){ + for(j in 1:dim(imp1)[2]){ + if((imp1[i,j]) <= min((imp1[i,j]), (imp2[i,j]), (imp3[i,j]), (imp4[i,j]))) imp.all[i,j] <- 1.5 + if((imp2[i,j]) <= min((imp1[i,j]), (imp2[i,j]), (imp3[i,j]), (imp4[i,j]))) imp.all[i,j] <- 2.5 + if((imp3[i,j]) <= min((imp1[i,j]), (imp2[i,j]), (imp3[i,j]), (imp4[i,j]))) imp.all[i,j] <- 3.5 + if((imp4[i,j]) <= min((imp1[i,j]), (imp2[i,j]), (imp3[i,j]), (imp4[i,j]))) imp.all[i,j] <- 4.5 + } + } + } + + if(p<=3) yMod <- 0.7 + if(p>=4 && p<=6) yMod <- 0.5 + if(p>=7 && p<=9) yMod <- 0.3 + if(p>=10) yMod <- 0.1 + if(p==13 || p==14) yMod <- 0.52 + if(p==15 || p==16) yMod <- 0.1 + + if(p==1 || p==4 || p==7 || p==10) xMod <- 0.05 + if(p==2 || p==5 || p==8 || p==11) xMod <- 0.35 + if(p==3 || p==6 || p==9 || p==12) xMod <- 0.65 + if(p==13 || p==15) xMod <- 0.05 + if(p==14 || p==16) xMod <- 0.50 + + # impact map: + if(p <= 12) par(fig=c(xMod, xMod + 0.3, yMod, yMod + 0.17), new=TRUE) + if(p > 12) par(fig=c(xMod, xMod + 0.45, yMod, yMod + 0.38), new=TRUE) + PlotEquiMap(imp.all[,EU], lon[EU], lat, filled.continents = FALSE, brks=1:5, cols=col.regimes, axelab=F, drawleg=F) + + # title map: + if(p <= 12) par(fig=c(xMod, xMod + 0.3, yMod + 0.162, yMod + 0.172), new=TRUE) + if(p > 12) par(fig=c(xMod + 0.07, xMod + 0.07 + 0.3, yMod +0.21 + 0.166, yMod +0.21 + 0.176), new=TRUE) + mtext(my.period[p], font=2, cex=1.5) + + } # close 'p' on 'period' + + par(fig=c(0.1,0.9, 0.03, 0.11), new=TRUE) + ColorBar2(1:5, cols=col.regimes, vert=FALSE, my.ticks=1:4, draw.ticks=FALSE, my.labels=orden) + + par(fig=c(0.1,0.9, 0.92, 0.97), new=TRUE) + if( index == 1 ) mtext(paste0("Most influent WR on ",var.name[var.num]), font=2, cex=1.5) + if( index == 2 ) mtext(paste0("Most positively influent WR on ",var.name[var.num]), font=2, cex=1.5) + if( index == 3 ) mtext(paste0("Most negatively influent WR on ",var.name[var.num]), font=2, cex=1.5) + + dev.off() + +} # close if on composition == "impact.highest" + + + + + + + + + + + + + +if(monthly_anomalies) { + # Compute climatology and anomalies (with LOESS filter) for sfcWind data, and draw monthly anomalies, if you want to compare the mean daily wind speed anomalies reconstructed by the weather regimes classification with those observed by the reanalysis: + + load(paste0(rean.dir,"/",rean.name,"_",my.period[month.chosen[1]],"_psl.RData")) # only to load year.start and year.end + + sfcWind366 <- Load(var = "sfcWind", exp = NULL, obs = list(list(path=rean.data)), paste0(year.start:year.end,'0101'), storefreq = 'daily', leadtimemax = 366, output = 'lonlat', latmin = lat.min, latmax = lat.max, lonmin = lon.min, lonmax = lon.max, nprocs=8)$obs + + sfcWind <- sfcWind366[,,,1:365,,] + + for(y in year.start:year.end){ + y2 <- y - year.start + 1 + if(n.days.in.a.year(y) == 366) sfcWind[y2,60:365,,] <- sfcWind366[1,1,y2,61:366,,] # take the march to december period removing the 29th of February + } + + rm(sfcWind366) + gc() + + # LOESS anomalies: + sfcWindClimDaily <- apply(sfcWind, c(2,3,4), mean, na.rm=T) + + sfcWindClimLoess <- sfcWindClimDaily + for(i in n.pos.lat){ + for(j in n.pos.lon){ + my.data <- data.frame(ens.mean=sfcWindClimDaily[,i,j], day=1:365) + my.loess <- loess(ens.mean ~ day, my.data, span=0.35) + sfcWindClimLoess[,i,j] <- predict(my.loess) + } + } + + rm(sfcWindClimDaily) + gc() + + sfcWindClim2 <- InsertDim(sfcWindClimLoess, 1, n.years) + sfcWindAnom <- sfcWind - sfcWindClim2 + + rm(sfcWindClimLoess) + gc() + + # same as above but for computing climatology and anomalies (with LOESS filter) for psl data, and draw monthly slp anomalies: + slp366 <- Load(var = psl, exp = NULL, obs = list(list(path=rean.data)), paste0(year.start:year.end,'0101'), storefreq = 'daily', leadtimemax = 366, output = 'lonlat', latmin = lat.min, latmax = lat.max, lonmin = lon.min, lonmax = lon.max, nprocs=8)$obs + + slp <- slp366[,,,1:365,,] + + for(y in year.start:year.end){ + y2 <- y - year.start + 1 + if(n.days.in.a.year(y) == 366) slp[y2,60:365,,] <- slp366[1,1,y2,61:366,,] # take the march to december period removing the 29th of February + } + + rm(slp366) + gc() + + slpClimDaily <- apply(slp, c(2,3,4), mean, na.rm=T) + + slpClimLoess <- slpClimDaily + for(i in n.pos.lat){ + for(j in n.pos.lon){ + my.data <- data.frame(ens.mean=slpClimDaily[,i,j], day=1:365) + my.loess <- loess(ens.mean ~ day, my.data, span=0.35) + slpClimLoess[,i,j] <- predict(my.loess) + } + } + + rm(slpClimDaily) + gc() + + slpClim2 <- InsertDim(slpClimLoess, 1, n.years) + slpAnom <- slp - slpClim2 + + rm(slpClimLoess) + gc() + + if(psl == "psl") slpAnom <- slpAnom/100 # convert MSLP in Pascal to MSLP in hPa + + EU <- c(1:which(lon >= lon.max)[1], (which(lon >= 337)[1]:length(lon))) # restrict area to continental europe only + + my.brks.var <- c(-20,seq(-3,3,0.5),20) + my.cols.var <- colorRampPalette(rev(brewer.pal(11,"RdBu")))(length(my.brks.var)-1) # blue--white--red colors + + # same but for slp: + my.brks.var2 <- c(-100,seq(-21,-1,2),0,seq(1,21,2),100) + my.cols.var2 <- colorRampPalette(rev(brewer.pal(11,"RdBu")))(length(my.brks.var2)-1) # blue--red colors + my.cols.var2[floor(length(my.cols.var2)/2)] <- my.cols.var2[floor(length(my.cols.var2)/2)+1] <- "white" + + # save all monthly anomaly maps: + for(year.test in year.chosen){ + for(month.test in month.chosen){ + #year.test <- 2016; month.test <- 10 # for the debug + + pos.year.test <- year.test - year.start + 1 + + # wind anomaly for the chosen year and month: + sfcWindAnomPeriod <- sfcWindAnom[pos.year.test, pos.period(1,month.test),,] + + sfcWindAnomPeriodMean <- apply(sfcWindAnomPeriod, c(2,3), mean, na.rm=TRUE) + + # psl anomaly for the chosen year and month: + slpAnomPeriod <- slpAnom[pos.year.test,pos.period(1,month.test),,] + + slpAnomPeriodMean <- apply(slpAnomPeriod, c(2,3), mean, na.rm=TRUE) + + fileoutput <- paste0(rean.dir,"/",rean.name,"_",my.period[month.test],"_monthly_anomaly.png") + + png(filename=fileoutput,width=2800,height=1000) + + par(fig=c(0, 0.36, 0.08, 0.98), new=TRUE) + #PlotEquiMap(rescale(sfcWindAnomPeriodMean[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents=FALSE, brks=my.brks.var, cols=my.cols.var[2:(length(my.cols.var)-1)], title_scale=1.2, intxlon=10, intylat=10, drawleg=F) #, cex.lab=1.5) + + PlotEquiMap(sfcWindAnomPeriodMean[,EU], lon[EU], lat, filled.continents=FALSE, brks=my.brks.var, cols=my.cols.var, title_scale=1.2, intxlon=10, intylat=10, drawleg=F) #, cex.lab=1.5) + + #my.subset2 <- match(values.to.plot2, my.brks.var) + #legend2.cex <- 1.8 + + par(fig=c(0.03, 0.36, 0.015, 0.09), new=TRUE) + #ColorBar(my.brks.var, cols=my.cols.var, vert=FALSE, label_scale=1.8, triangle_ends=c(FALSE,FALSE)) #, subset=my.subset2) + #ColorBar(my.brks.var, cols=my.cols.var[2:(length(my.cols.var)-1)], vert=FALSE, label_scale=1.8, var_limits=c(-10,10), bar_limits=c(my.brks.var[1],my.brks.var[l(my.brks.var)]), col_inf=my.cols.var[1], col_sup=my.cols.var[length(my.cols.var)]) + + ColorBar(brks=my.brks.var[2:(l(my.brks.var)-1)], cols=my.cols.var[2:(l(my.cols.var)-1)], vert=FALSE, label_scale=1.8, bar_limits=c(my.brks.var[2],my.brks.var[l(my.brks.var)-1]), col_inf=my.cols.var[1], col_sup=my.cols.var[l(my.cols.var)], subsample=1) + + par(fig=c(0.34, 0.37, 0, 0.028), new=TRUE) + mtext("m/s", cex=1.8) + + par(fig=c(0.37, 1, 0.1, 0.98), new=TRUE) + + PlotEquiMap2(slpAnomPeriodMean, lon, lat, filled.continents=FALSE, continents.col="black", brks=my.brks.var2, brks2=my.brks.var2, cols=my.cols.var2, intxlon=10, intylat=10, drawleg=F, contours=t(slpAnomPeriodMean), contours.lty="F1FF1F", cex.lab=1) + + # you could almost use the normal PlotEquiMap funcion below, it only needs to set the black line for anomaly=0 and decrease the size of the contour labels: + # PlotEquiMap(rescale(slpAnomPeriodMean[,EU], my.brks.var2[1], tail(my.brks.var2,1)), lon[EU], lat, filled.continents=FALSE, brks=my.brks.var2, brks2=my.brks.var2, cols=my.cols.var2, intxlon=10, intylat=10, drawleg=F, contours=(slpAnomPeriodMean[,EU]), contour_lty="F1FF1F", contour_label_scale=10, title_scale=1.2) #, cex.lab=1.5) + + ##my.subset2 <- match(values.to.plot2, my.brks.var) + #legend2.cex <- 1.8 + + par(fig=c(0.37, 1, 0, 0.09), new=TRUE) + #ColorBar2(my.brks.var2, cols=my.cols.var2, vert=FALSE, my.ticks=-0.5 + 1:length(my.brks.var2), my.labels=my.brks.var2) #, subsampleg=1, label_scale=1.8) #, subset=my.subset2) + ColorBar(my.brks.var2[2:(l(my.brks.var2)-1)], cols=my.cols.var2[2:(l(my.cols.var2)-1)], vert=FALSE, label_scale=1.8, bar_limits=c(my.brks.var2[2],my.brks.var2[l(my.brks.var2)-1]), col_inf=my.cols.var2[1], col_sup=my.cols.var2[l(my.cols.var2)], subsample=1) #my.ticks=-0.5 + 1:length(my.brks.var2), my.labels=my.brks.var2) subset=my.subset2) + + par(fig=c(0.96, 0.99, 0, 0.027), new=TRUE) + mtext("hPa", cex=1.8) + + #par(fig=c(0.74, 0.76, 0, 0.027), new=TRUE) + #mtext("0", cex=1.8) + + dev.off() + + # same image formatted for the catalogue: + fileoutput2 <- paste0(rean.dir,"/",rean.name,"_",my.period[month.test],"_monthly_anomaly_fig2cat.png") + + system(paste0("~/scripts/fig2catalog.sh -s 0.8 -t '",rean.name," / 10m wind speed and sea level pressure / Monthly anomalies \n",month.name[month.test]," / ",year.test,"' -c 'Region: North Atlantic (27°N-81°N, 85.5°W-45°E)\nReference dataset: ",rean.name," reanalysis' ", fileoutput," ", fileoutput2)) + + + # mean wind speed and slp anomaly only during days of the chosen year: + load(paste0(rean.dir,"/",rean.name,"_",my.period[month.test],"_psl.RData")) # load cluster.sequence + + # arrange the regime sequence in an array, to separate months from years: + cluster2D <- array(cluster.sequence, c(n.days.in.a.period(month.test,1),n.years)) + #cluster2D <- array(my.cluster$cluster, c(n.days.in.a.period(p,1),n.years)) # only for NCEP + + cluster.test <- cluster2D[,pos.year.test] + + fre1.days <- length(which(cluster.test == 1)) + fre2.days <- length(which(cluster.test == 2)) + fre3.days <- length(which(cluster.test == 3)) + fre4.days <- length(which(cluster.test == 4)) + + # wind anomaly for the chosen year and month and cluster: + sfcWind1 <- sfcWindAnomPeriod[which(cluster.test == 1),,,drop=FALSE] + sfcWind2 <- sfcWindAnomPeriod[which(cluster.test == 2),,,drop=FALSE] + sfcWind3 <- sfcWindAnomPeriod[which(cluster.test == 3),,,drop=FALSE] + sfcWind4 <- sfcWindAnomPeriod[which(cluster.test == 4),,,drop=FALSE] + + # in case a regime has NO days in that month, we must set it to NA: + if(length(which(cluster.test == 1)) == 0 ) sfcWind1 <- array(NA, c(1,dim(sfcWindAnomPeriod)[2:3])) + if(length(which(cluster.test == 2)) == 0 ) sfcWind2 <- array(NA, c(1,dim(sfcWindAnomPeriod)[2:3])) + if(length(which(cluster.test == 3)) == 0 ) sfcWind3 <- array(NA, c(1,dim(sfcWindAnomPeriod)[2:3])) + if(length(which(cluster.test == 4)) == 0 ) sfcWind4 <- array(NA, c(1,dim(sfcWindAnomPeriod)[2:3])) + + sfcWindMean1 <- apply(sfcWind1, c(2,3), mean, na.rm=FALSE) + sfcWindMean2 <- apply(sfcWind2, c(2,3), mean, na.rm=FALSE) + sfcWindMean3 <- apply(sfcWind3, c(2,3), mean, na.rm=FALSE) + sfcWindMean4 <- apply(sfcWind4, c(2,3), mean, na.rm=FALSE) + + # load regime names: + load(paste0(rean.dir,"/",rean.name,"_", my.period[p],"_","ClusterNames",".RData")) + + ## add strip with daily sequence of WRs: + + mod.name1 <- substr(cluster1.name, nchar(cluster1.name), nchar(cluster1.name)) + mod.name2 <- substr(cluster2.name, nchar(cluster2.name), nchar(cluster2.name)) + mod.name3 <- substr(cluster3.name, nchar(cluster3.name), nchar(cluster3.name)) + mod.name4 <- substr(cluster4.name, nchar(cluster4.name), nchar(cluster4.name)) + + cluster1.name.short <- substr(cluster1.name,1,1) + cluster2.name.short <- substr(cluster2.name,1,1) + cluster3.name.short <- substr(cluster3.name,1,1) + cluster4.name.short <- substr(cluster4.name,1,1) + + ## add + or - at the end of the cluster name, if it is a NAO+ or NAO- regime: + if(mod.name1 == "+" || mod.name1 == "-") cluster1.name.short <- paste0(substr(cluster1.name,1,1), mod.name1) + if(mod.name2 == "+" || mod.name2 == "-") cluster2.name.short <- paste0(substr(cluster2.name,1,1), mod.name2) + if(mod.name3 == "+" || mod.name3 == "-") cluster3.name.short <- paste0(substr(cluster3.name,1,1), mod.name3) + if(mod.name4 == "+" || mod.name4 == "-") cluster4.name.short <- paste0(substr(cluster4.name,1,1), mod.name4) + + c1 <- which(cluster.test == 1) + c2 <- which(cluster.test == 2) + c3 <- which(cluster.test == 3) + c4 <- which(cluster.test == 4) + + cluster.test.letters <- cluster.test + cluster.test.letters[c1] <- cluster1.name.short + cluster.test.letters[c2] <- cluster2.name.short + cluster.test.letters[c3] <- cluster3.name.short + cluster.test.letters[c4] <- cluster4.name.short + + cluster.col <- cluster.test.letters + cluster.col[which(cluster.test.letters == "N+")] <- "Firebrick1" + cluster.col[which(cluster.test.letters == "N-")] <- "Dodgerblue1" + cluster.col[which(cluster.test.letters == "B")] <- "White" + cluster.col[which(cluster.test.letters == "A")] <- "Darkgoldenrod1" + + cluster1 <- which(orden == cluster1.name) + cluster2 <- which(orden == cluster2.name) + cluster3 <- which(orden == cluster3.name) + cluster4 <- which(orden == cluster4.name) + + assign(paste0("fre.days",cluster1), fre1.days) + assign(paste0("fre.days",cluster2), fre2.days) + assign(paste0("fre.days",cluster3), fre3.days) + assign(paste0("fre.days",cluster4), fre4.days) + + assign(paste0("fre",cluster1), wr1y) + assign(paste0("fre",cluster2), wr2y) + assign(paste0("fre",cluster3), wr3y) + assign(paste0("fre",cluster4), wr4y) + + assign(paste0("imp.test",cluster1), sfcWindMean1) + assign(paste0("imp.test",cluster2), sfcWindMean2) + assign(paste0("imp.test",cluster3), sfcWindMean3) + assign(paste0("imp.test",cluster4), sfcWindMean4) + + # psl anomaly for the chosen year and month and cluster: + slp1 <- slpAnomPeriod[which(cluster.test == 1),,,drop=FALSE] + slp2 <- slpAnomPeriod[which(cluster.test == 2),,,drop=FALSE] + slp3 <- slpAnomPeriod[which(cluster.test == 3),,,drop=FALSE] + slp4 <- slpAnomPeriod[which(cluster.test == 4),,,drop=FALSE] + + # in case a regime has NO days in that month, we must set it to NA: + if(length(which(cluster.test == 1)) == 0 ) slp1 <- array(NA, c(1,dim(slpAnomPeriod)[2:3])) + if(length(which(cluster.test == 2)) == 0 ) slp2 <- array(NA, c(1,dim(slpAnomPeriod)[2:3])) + if(length(which(cluster.test == 3)) == 0 ) slp3 <- array(NA, c(1,dim(slpAnomPeriod)[2:3])) + if(length(which(cluster.test == 4)) == 0 ) slp4 <- array(NA, c(1,dim(slpAnomPeriod)[2:3])) + + slpMean1 <- apply(slp1, c(2,3), mean, na.rm=FALSE) + slpMean2 <- apply(slp2, c(2,3), mean, na.rm=FALSE) + slpMean3 <- apply(slp3, c(2,3), mean, na.rm=FALSE) + slpMean4 <- apply(slp4, c(2,3), mean, na.rm=FALSE) + + assign(paste0("psl.test",cluster1), slpMean1) + assign(paste0("psl.test",cluster2), slpMean2) + assign(paste0("psl.test",cluster3), slpMean3) + assign(paste0("psl.test",cluster4), slpMean4) + + # save strip with the daily regime series for chosen month and year: + fileoutput.seq <- paste0(rean.dir,"/",rean.name,"_",my.period[month.test],"_regimes_sequence.png") + png(filename=fileoutput.seq,width=1500,height=1850) + + plot.new() + + sep <- 0.03 + for(day in 1: n.days.in.a.period(p, 2001)){ + sep.cum <- (day-1)*sep + polygon(c(sep.cum + 0.01, sep.cum + 0.01 + sep, sep.cum + 0.01 + sep, sep.cum + 0.01), c(1.01, 1.01, 1.01+sep, 1.01+sep), border="black", col=cluster.col[day]) + text(sep.cum + 0.01 + sep/2, 0.997 + sep + 0.005, labels=day, cex=1.5) + text(sep.cum + 0.01 + sep/2, 1.013 + 0.005, labels=cluster.test.letters[day], cex=2) + + } + + dev.off() + + + + # save average impact and sea level pressure only for chosen month and year: + fileoutput.test <- paste0(rean.dir,"/",rean.name,"_",my.period[month.test],"_monthly_anomaly_regimes.png") + + png(filename=fileoutput.test,width=1500,height=2000) + + plot.new() + + par(fig=c(0, 0.33, 0.77, 0.97), new=TRUE) + PlotEquiMap2(imp.test1[,EU], lon[EU], lat, filled.continents=FALSE, continents.col="black", brks=my.brks.var, cols=my.cols.var, cex.lab=1.2, intxlon=10, intylat=10, drawleg=F) + par(fig=c(0, 0.33, 0.54, 0.74), new=TRUE) + PlotEquiMap2(imp.test2[,EU], lon[EU], lat, filled.continents=FALSE, continents.col="black", brks=my.brks.var, cols=my.cols.var, cex.lab=1.2, intxlon=10, intylat=10, drawleg=F) + par(fig=c(0, 0.33, 0.31, 0.51), new=TRUE) + PlotEquiMap2(imp.test3[,EU], lon[EU], lat, filled.continents=FALSE, continents.col="black", brks=my.brks.var, cols=my.cols.var, cex.lab=1.2, intxlon=10, intylat=10, drawleg=F) + par(fig=c(0, 0.33, 0.08, 0.28), new=TRUE) + PlotEquiMap2(imp.test4[,EU], lon[EU], lat, filled.continents=FALSE, continents.col="black", brks=my.brks.var, cols=my.cols.var, cex.lab=1.2, intxlon=10, intylat=10, drawleg=F) + + par(fig=c(0,0.33,0.955,0.975), new=TRUE) + mtext(paste0(regime.title[1], " wind speed anomaly "), font=2, cex=2) + par(fig=c(0,0.33,0.725,0.745), new=TRUE) + mtext(paste0(regime.title[2], " wind speed anomaly "), font=2, cex=2) + par(fig=c(0,0.33,0.495,0.515), new=TRUE) + mtext(paste0(regime.title[3], " wind speed anomaly "), font=2, cex=2) + par(fig=c(0,0.33,0.265,0.285), new=TRUE) + mtext(paste0(regime.title[4], " wind speed anomaly "), font=2, cex=2) + + par(fig=c(0.03, 0.33, 0.015, 0.06), new=TRUE) + #ColorBar(my.brks.var, cols=my.cols.var, vert=FALSE, label_scale=1.8, triangle_ends=c(FALSE,FALSE)) #, subset=my.subset2) + ColorBar(my.brks.var[2:(length(my.brks.var)-1)], cols=my.cols.var[2:(length(my.cols.var)-1)], vert=FALSE, label_scale=1.8, bar_limits=c(my.brks.var[2],my.brks.var[l(my.brks.var)-1]), col_inf=my.cols.var[1], col_sup=my.cols.var[l(my.cols.var)], subsample=1) #triangle_ends=c(T,T)) #, subset=my.subset2) + + par(fig=c(0.33, 0.34, 0.01, 0.044), new=TRUE) + mtext("m/s", cex=1.6) + + # right figures: + par(fig=c(0.34, 0.92, 0.77, 0.97), new=TRUE) + PlotEquiMap2(psl.test1, lon, lat, filled.continents=FALSE, continents.col="black", brks=my.brks.var2, brks2=my.brks.var2, cols=my.cols.var2, intxlon=10, intylat=10, drawleg=F, contours=t(psl.test1), contours.lty="F1FF1F", cex.lab=1, xlabel.dist=.5) + par(fig=c(0.34, 0.92, 0.54, 0.74), new=TRUE) + PlotEquiMap2(psl.test2, lon, lat, filled.continents=FALSE, continents.col="black", brks=my.brks.var2, brks2=my.brks.var2, cols=my.cols.var2, intxlon=10, intylat=10, drawleg=F, contours=t(psl.test2), contours.lty="F1FF1F", cex.lab=1, xlabel.dist=.5) + par(fig=c(0.34, 0.92, 0.31, 0.51), new=TRUE) + PlotEquiMap2(psl.test3, lon, lat, filled.continents=FALSE, continents.col="black", brks=my.brks.var2, brks2=my.brks.var2, cols=my.cols.var2, intxlon=10, intylat=10, drawleg=F, contours=t(psl.test3), contours.lty="F1FF1F", cex.lab=1, xlabel.dist=.5) + par(fig=c(0.34, 0.92, 0.08, 0.28), new=TRUE) + PlotEquiMap2(psl.test4, lon, lat, filled.continents=FALSE, continents.col="black", brks=my.brks.var2, brks2=my.brks.var2, cols=my.cols.var2, intxlon=10, intylat=10, drawleg=F, contours=t(psl.test4), contours.lty="F1FF1F", cex.lab=1, xlabel.dist=.5) + + par(fig=c(0.34,0.92,0.955,0.975), new=TRUE) + mtext(paste0(regime.title[1], " sea level pressure anomaly "), font=2, cex=2) + par(fig=c(0.34,0.92,0.725,0.745), new=TRUE) + mtext(paste0(regime.title[2], " sea level pressure anomaly "), font=2, cex=2) + par(fig=c(0.34,0.92,0.495,0.515), new=TRUE) + mtext(paste0(regime.title[3], " sea level pressure anomaly "), font=2, cex=2) + par(fig=c(0.34,0.92,0.265,0.285), new=TRUE) + mtext(paste0(regime.title[4], " sea level pressure anomaly "), font=2, cex=2) + + par(fig=c(0.34, 0.93, 0.015, 0.06), new=TRUE) + #ColorBar2(my.brks.var2, cols=my.cols.var2, vert=FALSE, my.ticks=-0.5 + 1:length(my.brks.var2), my.labels=my.brks.var2) #, subsampleg=1, label_scale=1.8) #, subset=my.subset2) + ColorBar(my.brks.var2[2:(length(my.brks.var2)-1)], cols=my.cols.var2[2:(length(my.cols.var2)-1)], vert=FALSE, label_scale=1.8, bar_limits=c(my.brks.var2[2],my.brks.var2[l(my.brks.var2)-1]), col_inf=my.cols.var2[1], col_sup=my.cols.var2[l(my.cols.var2)], subsample=1) + + par(fig=c(0.924, 0.930, 0.01, 0.044), new=TRUE) + mtext("hPa", cex=1.6) + + #par(fig=c(0.627, 0.647, 0, 0.028), new=TRUE) + #mtext("0", cex=1.8) + + n.days <- floor(n.days.in.a.period(month.test,1)) + + par(fig=c(0.93, 0.99, 0.77, 0.87), new=TRUE) + mtext(paste0(fre.days1," days\n(",round(100*fre.days1/n.days,1),"%)"), cex=2.8) + par(fig=c(0.93, 0.99, 0.54, 0.64), new=TRUE) + mtext(paste0(fre.days2," days\n(",round(100*fre.days2/n.days,1),"%)"), cex=2.8) + par(fig=c(0.93, 0.99, 0.31, 0.41), new=TRUE) + mtext(paste0(fre.days3," days\n(",round(100*fre.days3/n.days,1),"%)"), cex=2.8) + par(fig=c(0.93, 0.99, 0.08, 0.18), new=TRUE) + mtext(paste0(fre.days4," days\n(",round(100*fre.days4/n.days,1),"%)"), cex=2.8) + + + dev.off() + + + ## add the strip with the regime sequence over the average impact composition: + fileoutput.temp <- paste0(rean.dir,"/",rean.name,"_",my.period[month.test],"_temp.png") + fileoutput.both <- paste0(rean.dir,"/",rean.name,"_",my.period[month.test],"_temp2.png") + + system(paste0("convert ",fileoutput.seq," -crop +0-1730 +repage ",fileoutput.temp)) # cut the lower part of the strip + system(paste0("montage ",fileoutput.temp," ",fileoutput.test," -tile 1x2 -geometry +0+0 ",fileoutput.both)) + + + ## same image formatted for the catalogue: + fileoutput2.test <- paste0(rean.dir,"/",rean.name,"_",my.period[month.test],"_monthly_anomaly_regimes_fig2cat.png") + + system(paste0("~/scripts/fig2catalog.sh -s 0.8 -m 20 -r 40 -t '",rean.name," / 10m wind speed and sea level pressure / Monthly regime anomalies \n",month.name[month.test]," / ",year.test,"' -c 'Region: North Atlantic (27°N-81°N, 85.5°W-45°E)\nReference dataset: ",rean.name," reanalysis' ", fileoutput.both," ", fileoutput2.test)) + + system(paste0("rm ", fileoutput.temp, " ", fileoutput.both," ", fileoutput.seq," ", fileoutput.test, " ", fileoutput)) + + + } # close for on year.test + } # close for on month.test + + +} # close for on monthly_anomalies + + + + + + + + + + diff --git a/old/weather_regimes_maps_v28.R~ b/old/weather_regimes_maps_v28.R~ new file mode 100644 index 0000000000000000000000000000000000000000..585316c6757a869f3b2a5a191775990440b5ffd2 --- /dev/null +++ b/old/weather_regimes_maps_v28.R~ @@ -0,0 +1,5807 @@ + +# Creation: 6/2016 +# Author: Nicola Cortesi +# +# Aim: to visualize the regime anomalies of the weather regimes, their interannual frequencies and their impact on a chosen cliamte variable (i.e: wind speed or temperature) +# +# I/O: input file needed are: +# 1) the output of the weather_regimes.R script, which ends with the suffix "_psl.RData". +# 2) if you need the impact map too, the outputs of the weather_regimes_impact.R script, which end wuth the suffix "_sfcWind.RData" and "_tas.RData" +# +# Output files are the maps, witch the user can generate as .png or as .pdf +# Due to the script's length, it is better to run it from the terminal with: Rscript ~/scripts/weather_regimes_maps.R +# Note that this script doesn't need to be parallelized because it takes only a few seconds to create and save the output maps. +# +# Assumptions: You need to have created before the '_psl.RData' files which are the output of 'weather_regimes'.R, for each period you want to visualize. +# If your regimes derive from a reanalysis, this script must be run twice: +# first, only one month/season at time with: 'period=X', (X=1 .. 12), 'ordering = TRUE', 'save.names = TRUE', 'as.pdf = FALSE' and 'composition = "none" +# And inserting the regime names 'cluster.name1=...' in the correct order; in this first run, you only save the ordered cartography. +# You already have to know which is the right regime order by taking a look at the output maps (_clusterX.png) of weather_regimes.R +# After that, any time you run this script, remember to set: +# 'ordering = TRUE , 'save.names = FALSE' (and any type of composition you want to plot) not to overwrite the files which stores the right cluster order. +# You can check if the monthly regimes jave been associated correctly setting composition <- "psl.rean" +# +# For example, you can find that the sequence of the images in the file (from top to down) corresponds to: +# Blocking / NAO- / Atl.Ridge / NAO+ regime. Subsequently, you have to change 'ordering' +# from F to T (see below) and set the four variables 'clusterX.name' (X = 1...4) to follow the same order found inside that file. +# Then, you have to run this script only to get the maps in the right order, which by default is, from top to down: +# "NAO+","NAO-","Blocking" and "Atl.Ridge" (you can change it with the variable 'orden'). You also have to set 'save.names' to TRUE, so an .RData file +# is written to store the order of the four regimes, in case you need to visualize these maps again in future or create new ones based on the saved data. +# +# If your regimes derive from a forecast system, you have to compute before the regimes derived from a reanalysis. Then, you can associate the reanalysis +# to the forecast system, to employ it to automatically associate to each simulated cluster one of the +# observed regimes, the one with the highest spatial correlation. Beware that the two maps of regime anomalies must have the same spatial resolution! +# +# Branch: weather_regimes + +rm(list=ls()) +library(s2dverification) # for the function Load() +library(Kendall) # for the MannKendall test +#library("corrplot") + +source('/home/Earth/ncortesi/scripts/Rfunctions.R') # for the calendar functions + +### set the directory where the weather regimes computed with a reanalysis are stored (xxxxx_psl.RData files): + +#rean.dir <- "/scratch/Earth/ncortesi/RESILIENCE/Regimes/18) as 12) but with no lat correction" +#rean.dir <- "/scratch/Earth/ncortesi/RESILIENCE/Regimes/18_as_12_but_with_no_lat_correction" +#rean.dir <- "/scratch/Earth/ncortesi/RESILIENCE/Regimes/41_ERA-Interim_monthly_1981-2015_LOESS_filter" +#rean.dir <- "/scratch/Earth/ncortesi/RESILIENCE/Regimes/43_as_41_but_with_running_cluster_and_lat_correction" +#rean.dir <- "/scratch/Earth/ncortesi/RESILIENCE/Regimes/45_as_43_but_with_Z500" +#rean.dir <- "/scratch/Earth/ncortesi/RESILIENCE/Regimes/44_as_43_but_with_no_lat_correction" +#rean.dir <- "/scratch/Earth/ncortesi/RESILIENCE/Regimes/50_NCEP_monthly_1981-2016_LOESS_filter_lat_corr" +#rean.dir <- "/scratch/Earth/ncortesi/RESILIENCE/Regimes/52_JRA55_monthly_1981-2016_LOESS_filter_lat_corr" +#rean.dir <- "/scratch/Earth/ncortesi/RESILIENCE/Regimes/53_JRA55_seasonal_1981-2016_LOESS_filter_lat_corr" +#rean.dir <- "/scratch/Earth/ncortesi/RESILIENCE/Regimes/54_JRA55_monthly_1981-2016_LOESS_filter_lat_corr_running" +#rean.dir <- "/scratch/Earth/ncortesi/RESILIENCE/Regimes/55_JRA55_monthly_1981-2016_LOESS_filter_lat_corr_running_15days" +rean.dir <- "/scratch/Earth/ncortesi/RESILIENCE/Regimes/56_JRA55_monthly_1981-2016_LOESS_filter_lat_corr_ordered4variance" +#rean.dir <- "/scratch/Earth/ncortesi/RESILIENCE/Regimes/57_JRA55_monthly_1981-2016_LOESS_filter_lat_corr_running_ordered4variance" +#rean.dir <- "/scratch/Earth/ncortesi/RESILIENCE/Regimes/58_JRA55_monthly_1981-2016_LOESS_filter_lat_corr_running_15days_ordered4variance" + +rean.name <- "JRA-55" #"JRA-55" #"NCEP" #"ERA-Interim" # reanalysis name (if input data comes from a reanalysis) + +forecast.name <- "ECMWF-S4" # forecast system name (if input data comes from a forecast system) + +fields.name <- rean.name #forecast.name # Choose between loading reanalysis ('rean.name') or forecasts ('forecast.name') + +composition <- "psl.rean" # choose which kind of composition you want to plot: + # 'none' doesn't plot anything, it only associates the clusters to the regimes with the manual association in the rows below + # and saves them in the ClusterName.Rdata files for each period selected, overwriting the eventual pre-existing files. + # 'simple' for monthly graphs of regime anomalies / impact / interannual frequencies in three columns + # 'psl' for all the regime anomalies for a fixed forecast month + # 'fre' for all the interannual frequencies for a fixed forecast month + # 'impact' for all the impact maps for a fixed forecast month and the variable specified in var.num + # 'summary' for the summary plots of all forecast months (persistence bias, freq.bias, correlations, etc.) [You need all ClusterNames files] + # 'impact.highest' for all the impact plot of the regime with the highest impact + # 'single.impact' to save the four impact maps in a composition 2x2 + # 'single.psl' to save the individual psl map + # 'single.fre' to save the individual fre map + # 'taylor' plot the taylor's diagram between the regime anomalies of a monthly reanalaysis and a seasonal one + # 'edpr' as 'simple', but swapping the position of the regime anomalie maps with that of the impact maps + # 'psl.rean': plot all the regime anomalies and correlation matrix of all months of a reanalysis with DJF regime anomalies of the same reanalysis + # 'psl.rean.unordered': as before, but without ordering the regimes with the same order in vector 'orden' + # 'variance' : as 'none', but instead of associating clusters to regimes, it reordinates the clusters in decreasing order of explained variance + +var.num <- 1 # if fields.name ='rean.name' and composition ='simple' or 'edpr', Choose a variable for the impact maps 1: sfcWind 2: tas + +mean.freq <- TRUE # if TRUE, when composition <- "simple" o 'edpr', it also add the mean frequency value over each frequency plots + +# Choose one or more periods to plot from 1 to 17 (1-12: Jan - Dec, 13-16: Winter, Spring, Summer, Autumn, 17: Year). +period <- 1:12 + +# Manually associates the four clusters to the four regimes, one period at time (only in case composition="none"): +cluster4.name <- "NAO+" +cluster1.name <- "NAO-" +cluster2.name <- "Blocking" +cluster3.name <- "Atl.Ridge" + +# choose an order to give to the cartograpy, from top to bottom: +orden <- c("NAO+","NAO-","Blocking","Atl.Ridge") + +####### if monthly_anomalies <- TRUE, you have to specify these additional parameters: +monthly_anomalies <- FALSE # if TRUE, also save maps with with monthly wind speed and slp anomalies for the chosen years and months set below over Europe +year.chosen <- 2016 +month.chosen <- 1:12 +no.regimes <- TRUE # if TRUE, instead of putting the regime names in the figure titles, insert "Cluster1", "Cluster2", "Cluster3" and "Cluster4" +NCEP <- '/esnas/recon/noaa/ncep-reanalysis/$STORE_FREQ$_mean/$VAR_NAME$_f6h/$VAR_NAME$_$YEAR$$MONTH$.nc' +JRA55 <- '/esnas/recon/jma/jra55/$STORE_FREQ$_mean/$VAR_NAME$_f6h/$VAR_NAME$_$YEAR$$MONTH$.nc' +ERAint <- '/esarchive/old-files/recon_ecmwf_erainterim/$STORE_FREQ$_mean/$VAR_NAME$_f6h/$VAR_NAME$_$YEAR$$MONTH$.nc' +rean.data <- JRA55 # choose one of the above reanalysis + +####### if fields.name='forecast.name', you have to specify these additional variables: + +# working dir with the input files with '_psl.RData' suffix: +work.dir <- "/scratch/Earth/ncortesi/RESILIENCE/Regimes/42_ECMWF_S4_monthly_1981-2015_LOESS_filter" + +lead.months <- 0 #0:6 # in case you selected 'fields.name = forecast.name', specify a leadtime (0 = same month of the start month) to plot + # (and variable 'period' becomes the start month) +forecast.month <- 1 # in case composition = 'psl' or 'fre' or 'impact', choose a target month to forecast, from 1 to 12, to plot its composition + # if you want to plot all compositions from 1 to 12 at once, just enable the line below and add a { at the end of the script + # DO NOT run the loop below when composition="summary" or "taylor", because it will modify the data in the ClusterName.RData files!!! +#for(forecast.month in 1:12){ + +####### Derived variables ############################################################################################################################################### + +ordering <- T # If TRUE, order the clusters following the regimes listed in the 'orden' vector (set it to TRUE only after you manually inserted the names below). +save.names <- F # if TRUE, also save the cluster names (i.e: the association between clusters and weather regimes); if FALSE, the cluster names are loaded from a file +as.pdf <- F # FALSE: save results as one .png file for each season/month, TRUE: save only one .pdf file with all seasons/months + +if(fields.name != rean.name && fields.name != forecast.name) stop("variable 'fields.name' is not properly set") + +var.name <- c("sfcWind","tas") +var.name.full <- c("wind speed","temperature") # full name of the 'predictand' variable to put in the title of the graphs +var.unit <- c("m/s", "ºC") # unit of measure of var (for drawing color scales) + +var.num.orig <- var.num # loading _psl files, this value can change! +fields.name.orig <- fields.name +period.orig <- period +work.dir.orig <- work.dir +pdf.suffix <- ifelse(length(period)==1, my.period[period], "all_periods") + +n.map <- 0 + +############################################################################################ +## Start analysis ## +############################################################################################ + +if(composition != "summary" && composition != "impact.highest" && composition != "taylor"){ + + if(composition == "psl" || composition == "fre" || composition == "impact") { + if(as.pdf && fields.name == forecast.name) pdf(file=paste0(work.dir,"/",fields.name,"_",pdf.suffix,"_forecast_month_",forecast.month,"_",composition,".pdf"),width=110,height=60) + if(!as.pdf && fields.name == forecast.name) png(filename=paste0(work.dir,"/",fields.name,"_forecast_month_",forecast.month,"_",composition,".png"),width=6000,height=2300) + + lead.months <- c(6:0,0) + plot.new() + + # Sheet title: + par(fig=c(0, 1, 0.94, 0.98), new=TRUE) + if(composition == "psl") mtext(paste0(fields.name, " simulated Weather Regimes for ", month.name[forecast.month]), cex=5.5, font=2) + if(composition == "fre") mtext(paste0(fields.name, " simulated interannual frequencies for ", month.name[forecast.month]), cex=5.5, font=2) + if(composition == "impact") mtext(paste0(fields.name, " simulated ",var.name.full[var.num], " impact for ", month.name[forecast.month]), cex=5.5, font=2) + + } # close if on composition == "psl" ... + + if(composition == "none") { + ordering <- TRUE + save.names <- TRUE + as.pdf <- FALSE + } + + if(composition == "variance"){ + ordering <- TRUE + save.names <- TRUE + as.pdf <- FALSE + period <- 1:12 + } + + if(composition == "psl.rean") { + ordering <- TRUE + period <- c(9:12, 1:8) # to start from September instead of January + png(filename=paste0(rean.dir,"/",fields.name,"_reanalysis_all_months.png"),width=6000,height=2000) + plot.new() + } + + if(composition == "psl.rean.unordered") { + ordering <- FALSE + period <- c(9:12, 1:8) # to start from September instead of January + png(filename=paste0(rean.dir,"/",fields.name,"_reanalysis_all_months_unordered.png"),width=6000,height=2000) + plot.new() + } + + ## if(composition == "corr.matrix") { + ## ordering <- FALSE # set it to TRUE if you want to see the correlation matrix of the ordered clusters instead!!! + ## period <- c(9:12, 1:8) # to start from September instead of January + ## png(filename=paste0(rean.dir,"/",fields.name,"_reanalysis_all_months_corr_matrix.png"),width=6000,height=2000) + ## plot.new() + ## } + + if(fields.name == rean.name) { lead.month <- 1; lead.months <- 1 } # just to be able to enter the loop below once! + + for(lead.month in lead.months){ + #lead.month=lead.months[1] # for the debug + + if(composition == "simple" || composition == 'edpr'){ + if(as.pdf && fields.name == rean.name) pdf(file=paste0(rean.dir,"/",fields.name,"_",var.name[var.num],"_",pdf.suffix,".pdf"),width=40, height=60) + if(as.pdf && fields.name == forecast.name) pdf(file=paste0(work.dir,"/",fields.name,"_",var.name[var.num],"_",pdf.suffix,"_leadmonth_",lead.month,".pdf"),width=40,height=60) + } + + if(composition == 'psl' || composition == 'fre' || composition == 'impact') { + period <- forecast.month - lead.month + if(period < 1) period <- period + 12 + } + + + for(p in period){ + #p <- period[1] # for the debug + p.orig <- p + + if(fields.name == rean.name) print(paste("p =",p)) + if(fields.name == forecast.name) print(paste("p =",p,"lead.month =", lead.month)) + + # load regime data (for forecasts, it load only the data corresponding to the chosen start month p and lead month 'lead.month') + impact.data <- FALSE + if(fields.name == rean.name || (fields.name == forecast.name && n.map == 7)) { + load(file=paste0(rean.dir,"/",rean.name,"_",my.period[p],"_","psl",".RData")) + if(var.num != var.num.orig) var.num <- var.num.orig # ripristinate original values of this script (necessary after loading regime data) + if(fields.name != fields.name.orig) fields.name <- fields.name.orig # ripristinate original values of this script + if(work.dir != work.dir.orig) work.dir <- work.dir.orig # ripristinate original values of this script + + # load impact data only if it is available, if not it will leave an empty space in the composition: + if(file.exists(paste0(rean.dir,"/",rean.name,"_",my.period[p],"_",var.name[var.num],".RData"))){ + impact.data <- TRUE + print(paste0("Impact data for variable ",var.name[var.num] ," available for reanalysis ", rean.name)) + load(file=paste0(rean.dir,"/",rean.name,"_",my.period[p],"_",var.name[var.num],".RData")) + } else { + impact.data <- FALSE + print(paste0("Impact data for variable ",var.name[var.num] ," not available for reanalysis ", rean.name)) + } + + if(composition == "variance"){ + my.cluster2 <- my.cluster # create a copy of my.cluster + + ss1 <- which(my.cluster$cluster == 1) + ss2 <- which(my.cluster$cluster == 2) + ss3 <- which(my.cluster$cluster == 3) + ss4 <- which(my.cluster$cluster == 4) + + withinss <- my.cluster$withinss + max1 <- which(my.cluster$withinss == max(withinss, na.rm=TRUE)) # first cluster with maximum variance + withinss[max1] <- NA + + max2 <- which(my.cluster$withinss == max(withinss, na.rm=TRUE)) # second cluster with maximum variance + withinss[max2] <- NA + + max3 <- which(my.cluster$withinss == max(withinss, na.rm=TRUE)) # third cluster with maximum variance + withinss[max3] <- NA + + max4 <- which(!is.na(withinss)) + rm(withinss) + + # vector where the first element tells you which is the clister with the maximum variance the second element shows which is the cluster the + # second maximum variance, and so on: + max.seq <- c(max1, max2, max3, max4) + + assign(paste0("cluster",max1,".name"), orden[1]) # associate the cluster with the highest explained variance to the first regime to plot (usually NAO+) + assign(paste0("cluster",max2,".name"), orden[2]) + assign(paste0("cluster",max3,".name"), orden[3]) + assign(paste0("cluster",max4,".name"), orden[4]) + + } + } + + if(fields.name == forecast.name && n.map < 7){ + load(file=paste0(work.dir,"/",forecast.name,"_",my.period[p],"_leadmonth_",lead.month,"_","psl",".RData")) # load cluster time series and mean psl data + + if(file.exists(paste0(work.dir,"/",forecast.name,"_",my.period[p],"_leadmonth_",lead.month,"_",var.name[var.num],".RData"))){ + impact.data <- TRUE + print("Impact data available for forecasts") + if((composition == "simple" || composition == "impact")) load(file=paste0(work.dir,"/",forecast.name,"_",my.period[p],"_leadmonth_",lead.month,"_",var.name[var.num],".RData")) # load mean var data for impact maps + } else { + impact.data <- FALSE + print("Impact data for forecasts not available") + } + + if(var.num != var.num.orig) var.num <- var.num.orig # ripristinate original values of this script (necessary after loading regime data) + if(fields.name != fields.name.orig) fields.name <- fields.name.orig # ripristinate original values of this script + + } + + if(var.num != var.num.orig) var.num <- var.num.orig # ripristinate original values of this script (necessary after loading regime data) + if(fields.name != fields.name.orig) fields.name <- fields.name.orig # ripristinate original values of this script + if(work.dir != work.dir.orig) work.dir <- work.dir.orig # ripristinate original values of this script + if(p != p.orig) p <- p.orig + + if(fields.name == forecast.name && n.map < 7){ + + # compute the simulated interannual monthly variability: + n.days.month <- dim(my.cluster.array[[p]])[1] # number of days in the forecasted month + + freq.sim1 <- apply(my.cluster.array[[p]] == 1, c(2,3), sum) + freq.sim2 <- apply(my.cluster.array[[p]] == 2, c(2,3), sum) + freq.sim3 <- apply(my.cluster.array[[p]] == 3, c(2,3), sum) + freq.sim4 <- apply(my.cluster.array[[p]] == 4, c(2,3), sum) + + sd.freq.sim1 <- sd(freq.sim1) / n.days.month + sd.freq.sim2 <- sd(freq.sim2) / n.days.month + sd.freq.sim3 <- sd(freq.sim3) / n.days.month + sd.freq.sim4 <- sd(freq.sim4) / n.days.month + + # compute the transition probabilities: + j1 <- j2 <- j3 <- j4 <- 1 + transition1 <- transition2 <- transition3 <- transition4 <- c() + + n.members <- dim(my.cluster.array[[p]])[3] + + for(m in 1:n.members){ + for(y in 1:n.years){ + for (d in 1:(n.days.month-1)){ + + if (my.cluster.array[[p]][d,y,m] == 1 && (my.cluster.array[[p]][d + 1,y,m] != 1)){ + transition1[j1] <- my.cluster.array[[p]][d + 1,y,m] + j1 <- j1 + 1 + } + if (my.cluster.array[[p]][d,y,m] == 2 && (my.cluster.array[[p]][d + 1,y,m] != 2)){ + transition2[j2] <- my.cluster.array[[p]][d + 1,y,m] + j2 <- j2 + 1 + } + if (my.cluster.array[[p]][d,y,m] == 3 && (my.cluster.array[[p]][d + 1,y,m] != 3)){ + transition3[j3] <- my.cluster.array[[p]][d + 1,y,m] + j3 <- j3 +1 + } + if (my.cluster.array[[p]][d,y,m] == 4 && (my.cluster.array[[p]][d + 1,y,m] != 4)){ + transition4[j4] <- my.cluster.array[[p]][d + 1,y,m] + j4 <- j4 +1 + } + + } + } + } + + trans.sim <- matrix(NA,4,4 , dimnames= list(c("cluster1.sim", "cluster2.sim", "cluster3.sim", "cluster4.sim"),c("cluster1.sim","cluster2.sim","cluster3.sim","cluster4.sim"))) + trans.sim[1,2] <- length(which(transition1 == 2)) + trans.sim[1,3] <- length(which(transition1 == 3)) + trans.sim[1,4] <- length(which(transition1 == 4)) + + trans.sim[2,1] <- length(which(transition2 == 1)) + trans.sim[2,3] <- length(which(transition2 == 3)) + trans.sim[2,4] <- length(which(transition2 == 4)) + + trans.sim[3,1] <- length(which(transition3 == 1)) + trans.sim[3,2] <- length(which(transition3 == 2)) + trans.sim[3,4] <- length(which(transition3 == 4)) + + trans.sim[4,1] <- length(which(transition4 == 1)) + trans.sim[4,2] <- length(which(transition4 == 2)) + trans.sim[4,3] <- length(which(transition4 == 3)) + + trans.tot <- apply(trans.sim,1,sum,na.rm=T) + for(i in 1:4) trans.sim[i,] <- trans.sim[i,]/trans.tot[i] + + + # compute cluster persistence: + my.cluster.vector <- as.vector(my.cluster.array[[p]]) # reduce it to a vector with the order: day1month1member1 , day2month1member1, ... , day1month2member1, day2month2member1, ,,, + + sequ1 <- sequ2 <- sequ3 <- sequ4 <- rep(0,10000) + i1 <- i2 <- i3 <- i4 <- 1 + + if(my.cluster.vector[1] != 1) i1 <- 0 + if(my.cluster.vector[1] == 1) sequ1[i1] <- sequ1[i1]+1 + if(my.cluster.vector[1] != 2) i2 <- 0 + if(my.cluster.vector[1] == 2) sequ2[i2] <- sequ2[i2]+1 + if(my.cluster.vector[1] != 3) i3 <- 0 + if(my.cluster.vector[1] == 3) sequ3[i3] <- sequ3[i3]+1 + if(my.cluster.vector[1] != 4) i4 <- 0 + if(my.cluster.vector[1] == 4) sequ4[i4] <- sequ4[i4]+1 + n.dd <- length(my.cluster.vector) + + for(d in 1:(n.dd-1)){ + if(my.cluster.vector[d] != 1 && my.cluster.vector[d+1] == 1) i1 <- i1+1 + if(my.cluster.vector[d] == 1) sequ1[i1] <- sequ1[i1]+1 + + if(my.cluster.vector[d] != 2 && my.cluster.vector[d+1] == 2) i2 <- i2+1 + if(my.cluster.vector[d] == 2) sequ2[i2] <- sequ2[i2]+1 + + if(my.cluster.vector[d] != 3 && my.cluster.vector[d+1] == 3) i3 <- i3+1 + if(my.cluster.vector[d] == 3) sequ3[i3] <- sequ3[i3]+1 + + if(my.cluster.vector[d] != 4 && my.cluster.vector[d+1] == 4) i4 <- i4+1 + if(my.cluster.vector[d] == 4) sequ4[i4] <- sequ4[i4]+1 + } + + if(my.cluster.vector[n.dd] == 1) sequ1[i1] <- sequ1[i1]+1 + if(my.cluster.vector[n.dd] == 2) sequ2[i2] <- sequ2[i2]+1 + if(my.cluster.vector[n.dd] == 3) sequ3[i3] <- sequ3[i3]+1 + if(my.cluster.vector[n.dd] == 4) sequ4[i4] <- sequ4[i4]+1 + + pers1 <- mean(sequ1[which(sequ1 != 0)]) + pers2 <- mean(sequ2[which(sequ2 != 0)]) + pers3 <- mean(sequ3[which(sequ3 != 0)]) + pers4 <- mean(sequ4[which(sequ4 != 0)]) + + # compute correlations between simulated clusters and observed regime's anomalies: + cluster1.sim <- pslwr1mean; cluster2.sim <- pslwr2mean; cluster3.sim <- pslwr3mean; cluster4.sim <- pslwr4mean + + if(composition == 'impact' && impact.data == TRUE) { + cluster1.sim.imp <- varwr1mean; cluster2.sim.imp <- varwr2mean; cluster3.sim.imp <- varwr3mean; cluster4.sim.imp <- varwr4mean + #cluster1.sim.imp.pv <- pvalue1; cluster2.sim.imp.pv <- pvalue2; cluster3.sim.imp.pv <- pvalue3; cluster4.sim.imp.pv <- pvalue4 + } + + lat.forecast <- lat; lon.forecast <- lon + + if((p + lead.month) > 12) { load.month <- p + lead.month - 12 } else { load.month <- p + lead.month } # reanalysis forecast month to load + load(file=paste0(rean.dir,"/",rean.name,"_",my.period[load.month],"_","psl",".RData")) # load reanalysis data, which overwrities the pslwrXmean variables + load(paste0(rean.dir,"/",rean.name,"_", my.period[load.month],"_","ClusterNames",".RData")) # load also reanalysis regime names + + # load reanalysis varwrXmean impact data: + if(composition == 'impact' && impact.data == TRUE) load(file=paste0(rean.dir,"/",rean.name,"_",my.period[load.month],"_",var.name[var.num],".RData")) + + if(var.num != var.num.orig) var.num <- var.num.orig # ripristinate original values of this script + if(fields.name != fields.name.orig) fields.name <- fields.name.orig # ripristinate original values of this script + if(!identical(period.orig,period)) period <- period.orig + if(work.dir != work.dir.orig) work.dir <- work.dir.orig # ripristinate original values of this script + if(p.orig != p) p <- p.orig + + # compute the observed transition probabilities: + n.days.month <- n.days.in.a.period(load.month,2001) + j1o <- j2o <- j3o <- j4o <- 1; transition.obs1 <- transition.obs2 <- transition.obs3 <- transition.obs4 <- c() + + for (d in 1:(length(my.cluster$cluster)-1)){ + if (my.cluster$cluster[d] == 1 && (my.cluster$cluster[d + 1] != my.cluster$cluster[d])){ + transition.obs1[j1o] <- my.cluster$cluster[d + 1] + j1o <- j1o + 1 + } + if (my.cluster$cluster[d] == 2 && (my.cluster$cluster[d + 1] != my.cluster$cluster[d])){ + transition.obs2[j2o] <- my.cluster$cluster[d + 1] + j2o <- j2o + 1 + } + if (my.cluster$cluster[d] == 3 && (my.cluster$cluster[d + 1] != my.cluster$cluster[d])){ + transition.obs3[j3o] <- my.cluster$cluster[d + 1] + j3o <- j3o + 1 + } + if (my.cluster$cluster[d] == 4 && (my.cluster$cluster[d + 1] != my.cluster$cluster[d])){ + transition.obs4[j4o] <- my.cluster$cluster[d + 1] + j4o <- j4o +1 + } + + } + + trans.obs <- matrix(NA,4,4 , dimnames= list(c("cluster1.obs", "cluster2.obs", "cluster3.obs", "cluster4.obs"),c("cluster1.obs","cluster2.obs","cluster3.obs","cluster4.obs"))) + trans.obs[1,2] <- length(which(transition.obs1 == 2)) + trans.obs[1,3] <- length(which(transition.obs1 == 3)) + trans.obs[1,4] <- length(which(transition.obs1 == 4)) + + trans.obs[2,1] <- length(which(transition.obs2 == 1)) + trans.obs[2,3] <- length(which(transition.obs2 == 3)) + trans.obs[2,4] <- length(which(transition.obs2 == 4)) + + trans.obs[3,1] <- length(which(transition.obs3 == 1)) + trans.obs[3,2] <- length(which(transition.obs3 == 2)) + trans.obs[3,4] <- length(which(transition.obs3 == 4)) + + trans.obs[4,1] <- length(which(transition.obs4 == 1)) + trans.obs[4,2] <- length(which(transition.obs4 == 2)) + trans.obs[4,3] <- length(which(transition.obs4 == 3)) + + trans.tot.obs <- apply(trans.obs,1,sum,na.rm=T) + for(i in 1:4) trans.obs[i,] <- trans.obs[i,]/trans.tot.obs[i] + + # compute the observed interannual monthly variability: + freq.obs1 <- freq.obs2 <- freq.obs3 <- freq.obs4 <- c() + + for(y in year.start:year.end){ + # for both reanalysis and S4, the time order is always: year1day1, year1day2, ..., year1day31, year2day1, year2day2, ..., year2day28, etc, + freq.obs1[y-year.start+1] <- length(which(my.cluster$cluster[(y-year.start)*n.days.month + (1:n.days.month)] == 1)) + freq.obs2[y-year.start+1] <- length(which(my.cluster$cluster[(y-year.start)*n.days.month + (1:n.days.month)] == 2)) + freq.obs3[y-year.start+1] <- length(which(my.cluster$cluster[(y-year.start)*n.days.month + (1:n.days.month)] == 3)) + freq.obs4[y-year.start+1] <- length(which(my.cluster$cluster[(y-year.start)*n.days.month + (1:n.days.month)] == 4)) + } + + sd.freq.obs1 <- sd(freq.obs1) / n.days.month + sd.freq.obs2 <- sd(freq.obs2) / n.days.month + sd.freq.obs3 <- sd(freq.obs3) / n.days.month + sd.freq.obs4 <- sd(freq.obs4) / n.days.month + + wr1y.obs <- wr1y; wr2y.obs <- wr2y; wr3y.obs <- wr3y; wr4y.obs <- wr4y # observed frecuencies of the regimes + + regimes.obs <- c(cluster1.name, cluster2.name, cluster3.name, cluster4.name) + + if(length(unique(regimes.obs)) < 4) stop("Two or more names of the weather types in the reanalsyis input file are repeated!") + + if(identical(rev(lat),lat.forecast)) {lat.forecast <- rev(lat.forecast); print("Warning: forecast latitudes are in the opposite order than renalysis's")} + if(!identical(lat, lat.forecast)) stop("forecast latitudes are different from reanalysis latitudes!") + if(!identical(lon, lon.forecast)) stop("forecast longitude are different from reanalysis longitudes!") + + cluster.corr <- array(NA, c(4,4), dimnames=list(c("cluster1.sim","cluster2.sim","cluster3.sim","cluster4.sim"), regimes.obs)) + cluster.corr[1,1] <- cor(as.vector(cluster1.sim), as.vector(pslwr1mean)) + cluster.corr[1,2] <- cor(as.vector(cluster1.sim), as.vector(pslwr2mean)) + cluster.corr[1,3] <- cor(as.vector(cluster1.sim), as.vector(pslwr3mean)) + cluster.corr[1,4] <- cor(as.vector(cluster1.sim), as.vector(pslwr4mean)) + cluster.corr[2,1] <- cor(as.vector(cluster2.sim), as.vector(pslwr1mean)) + cluster.corr[2,2] <- cor(as.vector(cluster2.sim), as.vector(pslwr2mean)) + cluster.corr[2,3] <- cor(as.vector(cluster2.sim), as.vector(pslwr3mean)) + cluster.corr[2,4] <- cor(as.vector(cluster2.sim), as.vector(pslwr4mean)) + cluster.corr[3,1] <- cor(as.vector(cluster3.sim), as.vector(pslwr1mean)) + cluster.corr[3,2] <- cor(as.vector(cluster3.sim), as.vector(pslwr2mean)) + cluster.corr[3,3] <- cor(as.vector(cluster3.sim), as.vector(pslwr3mean)) + cluster.corr[3,4] <- cor(as.vector(cluster3.sim), as.vector(pslwr4mean)) + cluster.corr[4,1] <- cor(as.vector(cluster4.sim), as.vector(pslwr1mean)) + cluster.corr[4,2] <- cor(as.vector(cluster4.sim), as.vector(pslwr2mean)) + cluster.corr[4,3] <- cor(as.vector(cluster4.sim), as.vector(pslwr3mean)) + cluster.corr[4,4] <- cor(as.vector(cluster4.sim), as.vector(pslwr4mean)) + + # associate each simulated cluster to one observed regime: + max1 <- which(cluster.corr == max(cluster.corr), arr.ind=T) + cluster.corr2 <- cluster.corr + cluster.corr2[max1[1],] <- -999 # set this row to negative values so it won't be found as a maximum anymore + cluster.corr2[,max1[2]] <- -999 # set this column to negative values so it won't be found as a maximum anymore + max2 <- which(cluster.corr2 == max(cluster.corr2), arr.ind=T) + cluster.corr3 <- cluster.corr2 + cluster.corr3[max2[1],] <- -999 + cluster.corr3[,max2[2]] <- -999 + max3 <- which(cluster.corr3 == max(cluster.corr3), arr.ind=T) + cluster.corr4 <- cluster.corr3 + cluster.corr4[max3[1],] <- -999 + cluster.corr4[,max3[2]] <- -999 + max4 <- which(cluster.corr4 == max(cluster.corr4), arr.ind=T) + + seq.max1 <- c(max1[1],max2[1],max3[1],max4[1]) + seq.max2 <- c(max1[2],max2[2],max3[2],max4[2]) + + cluster1.regime <- seq.max2[which(seq.max1 == 1)] + cluster2.regime <- seq.max2[which(seq.max1 == 2)] + cluster3.regime <- seq.max2[which(seq.max1 == 3)] + cluster4.regime <- seq.max2[which(seq.max1 == 4)] + + cluster1.name <- regimes.obs[cluster1.regime] + cluster2.name <- regimes.obs[cluster2.regime] + cluster3.name <- regimes.obs[cluster3.regime] + cluster4.name <- regimes.obs[cluster4.regime] + + regimes.sim <- c(cluster1.name, cluster2.name, cluster3.name, cluster4.name) + cluster.corr2 <- array(cluster.corr, c(4,4), dimnames=list(regimes.sim, regimes.obs)) + + spat.cor1 <- cluster.corr[1,seq.max2[which(seq.max1 == 1)]] + spat.cor2 <- cluster.corr[2,seq.max2[which(seq.max1 == 2)]] + spat.cor3 <- cluster.corr[3,seq.max2[which(seq.max1 == 3)]] + spat.cor4 <- cluster.corr[4,seq.max2[which(seq.max1 == 4)]] + + # measure persistence for the reanalysis dataset: + my.cluster.vector <- my.cluster[[1]] + + sequ1 <- sequ2 <- sequ3 <- sequ4 <- rep(0,10000) + i1 <- i2 <- i3 <- i4 <- 1 + + if(my.cluster.vector[1] != 1) i1 <- 0 + if(my.cluster.vector[1] == 1) sequ1[i1] <- sequ1[i1]+1 + if(my.cluster.vector[1] != 2) i2 <- 0 + if(my.cluster.vector[1] == 2) sequ2[i2] <- sequ2[i2]+1 + if(my.cluster.vector[1] != 3) i3 <- 0 + if(my.cluster.vector[1] == 3) sequ3[i3] <- sequ3[i3]+1 + if(my.cluster.vector[1] != 4) i4 <- 0 + if(my.cluster.vector[1] == 4) sequ4[i4] <- sequ4[i4]+1 + + n.dd <- length(my.cluster.vector) + for(d in 1:(n.dd-1)){ + if(my.cluster.vector[d] != 1 && my.cluster.vector[d+1] == 1) i1 <- i1+1 + if(my.cluster.vector[d] == 1) sequ1[i1] <- sequ1[i1]+1 + + if(my.cluster.vector[d] != 2 && my.cluster.vector[d+1] == 2) i2 <- i2+1 + if(my.cluster.vector[d] == 2) sequ2[i2] <- sequ2[i2]+1 + + if(my.cluster.vector[d] != 3 && my.cluster.vector[d+1] == 3) i3 <- i3+1 + if(my.cluster.vector[d] == 3) sequ3[i3] <- sequ3[i3]+1 + + if(my.cluster.vector[d] != 4 && my.cluster.vector[d+1] == 4) i4 <- i4+1 + if(my.cluster.vector[d] == 4) sequ4[i4] <- sequ4[i4]+1 + } + + if(my.cluster.vector[n.dd] == 1) sequ1[i1] <- sequ1[i1]+1 + if(my.cluster.vector[n.dd] == 2) sequ2[i2] <- sequ2[i2]+1 + if(my.cluster.vector[n.dd] == 3) sequ3[i3] <- sequ3[i3]+1 + if(my.cluster.vector[n.dd] == 4) sequ4[i4] <- sequ4[i4]+1 + + persObs1 <- mean(sequ1[which(sequ1 != 0)]) + persObs2 <- mean(sequ2[which(sequ2 != 0)]) + persObs3 <- mean(sequ3[which(sequ3 != 0)]) + persObs4 <- mean(sequ4[which(sequ4 != 0)]) + + ### insert here all the manual corrections to the regime assignation due to misasignment in the automatic selection: + ### forecast month: September + #if(p == 9 && lead.month == 0) { cluster1.name <- "Blocking"; cluster2.name <- "Atl.Ridge"; cluster3.name <- "NAO-"; cluster4.name <- "NAO+"} + #if(p == 8 && lead.month == 1) { cluster1.name <- "NAO+"; cluster2.name <- "NAO-"; cluster3.name <- "Blocking"; cluster4.name <- "Atl.Ridge"} + #if(p == 6 && lead.month == 3) { cluster1.name <- "Atl.Ridge"; cluster2.name <- "NAO+"} + #if(p == 4 && lead.month == 5) { cluster1.name <- "Blocking"; cluster2.name <- "NAO+"; cluster3.name <- "Atl.Ridge"; cluster4.name <- "NAO-"} + + if(p == 9 && lead.month == 0) { cluster1.name <- "Blocking"; cluster2.name <- "Atl.Ridge"; cluster3.name <- "NAO-"; cluster4.name <- "NAO+" } + if(p == 8 && lead.month == 1) { cluster1.name <- "NAO+"; cluster2.name <- "NAO-"; cluster3.name <- "Blocking"; cluster4.name <- "Atl.Ridge" } + if(p == 7 && lead.month == 2) { cluster1.name <- "Atl.Ridge"; cluster2.name <- "Blocking"; cluster3.name <- "NAO-"; cluster4.name <- "NAO+" } + if(p == 6 && lead.month == 3) { cluster1.name <- "Atl.Ridge"; cluster2.name <- "NAO-"; cluster3.name <- "NAO+"; cluster4.name <- "Blocking" } + if(p == 5 && lead.month == 4) { cluster1.name <- "Atl.Ridge"; cluster2.name <- "NAO+"; cluster3.name <- "NAO-"; cluster4.name <- "Blocking" } + if(p == 4 && lead.month == 5) { cluster1.name <- "Blocking"; cluster2.name <- "NAO+"; cluster3.name <- "Atl.Ridge"; cluster4.name <- "NAO-" } + if(p == 3 && lead.month == 6) { cluster2.name <- "NAO-"; cluster3.name <- "Atl.Ridge"} # cluster1.name <- "Blocking"; cluster4.name <- "NAO+" + + # regimes.sim # for the debug + + ### forecast month: October + #if(p == 4 && lead.month == 6) { cluster1.name <- "Atl.Ridge"; cluster2.name <- "Blocking"; cluster3.name <- "NAO+"; cluster4.name <- "NAO-"} + #if(p == 5 && lead.month == 5) { cluster1.name <- "NAO+"; cluster2.name <- "Blocking"; cluster3.name <- "NAO-"; cluster4.name <- "Atl.Ridge"} + #if(p == 7 && lead.month == 3) { cluster1.name <- "NAO-"; cluster2.name <- "Atl.Ridge"; cluster3.name <- "Blocking"; cluster4.name <- "NAO+"} + #if(p == 8 && lead.month == 2) { cluster1.name <- "Blocking"; cluster2.name <- "NAO-"; cluster3.name <- "Atl.Ridge"; cluster4.name <- "NAO+"} + #if(p == 9 && lead.month == 1) { cluster1.name <- "NAO-"; cluster2.name <- "Blocking"; cluster3.name <- "Atl.Ridge"; cluster4.name <- "NAO+"} + + if(p == 10 && lead.month == 0) { cluster1.name <- "Blocking"; cluster2.name <- "NAO+"; cluster3.name <- "Atl.Ridge"; cluster4.name <- "NAO-"} + if(p == 9 && lead.month == 1) { cluster1.name <- "NAO-"; cluster2.name <- "Blocking"; cluster3.name <- "Atl.Ridge"; cluster4.name <- "NAO+"} + if(p == 8 && lead.month == 2) { cluster1.name <- "Blocking"; cluster2.name <- "NAO-"; cluster3.name <- "Atl.Ridge"; cluster4.name <- "NAO+"} + if(p == 7 && lead.month == 3) { cluster1.name <- "NAO-"; cluster2.name <- "Atl.Ridge"; cluster3.name <- "Blocking"; cluster4.name <- "NAO+"} + if(p == 6 && lead.month == 4) { cluster1.name <- "NAO+"; cluster2.name <- "Blocking"; cluster3.name <- "NAO-"; cluster4.name <- "Atl.Ridge"} + + ### forecast month: November + #if(p == 6 && lead.month == 5) { cluster2.name <- "Atl.Ridge"; cluster3.name <- "NAO+"; cluster4.name <- "Blocking"} + #if(p == 7 && lead.month == 4) { cluster1.name <- "NAO+"; cluster2.name <- "Blocking"; cluster4.name <- "Atl.Ridge"} + #if(p == 8 && lead.month == 3) { cluster2.name <- "Blocking"; cluster4.name <- "Atl.Ridge"} + #if(p == 9 && lead.month == 2) { cluster2.name <- "Atl.Ridge"; cluster3.name <- "NAO+"; cluster4.name <- "Blocking"} + #if(p == 10 && lead.month == 1) { cluster2.name <- "Blocking"; cluster4.name <- "Atl.Ridge"} + + regimes.sim2 <- c(cluster1.name, cluster2.name, cluster3.name, cluster4.name) # Simulated regimes after the manual correction + #regimes.obs <- c(cluster1.name, cluster2.name, cluster3.name, cluster4.name) # already defined above, included only as reference + + order.sim <- match(regimes.sim2, orden) + order.obs <- match(regimes.obs, orden) + + clusters.sim <- list(cluster1.sim, cluster2.sim, cluster3.sim, cluster4.sim) + clusters.obs <- list(pslwr1mean, pslwr2mean, pslwr3mean, pslwr4mean) + + # recompute the spatial correlations, because they might have changed because of the manual corrections above: + spat.cor1 <- cor(as.vector(clusters.sim[[which(order.sim == 1)]]), as.vector(clusters.obs[[which(order.obs == 1)]])) + spat.cor2 <- cor(as.vector(clusters.sim[[which(order.sim == 2)]]), as.vector(clusters.obs[[which(order.obs == 2)]])) + spat.cor3 <- cor(as.vector(clusters.sim[[which(order.sim == 3)]]), as.vector(clusters.obs[[which(order.obs == 3)]])) + spat.cor4 <- cor(as.vector(clusters.sim[[which(order.sim == 4)]]), as.vector(clusters.obs[[which(order.obs == 4)]])) + + if(composition == 'impact' && impact.data == TRUE) { + clusters.sim.imp <- list(cluster1.sim.imp, cluster2.sim.imp, cluster3.sim.imp, cluster4.sim.imp) + #clusters.sim.imp.pv <- list(cluster1.sim.imp.pv, cluster2.sim.imp.pv, cluster3.sim.imp.pv, cluster4.sim.imp.pv) + + clusters.obs.imp <- list(varwr1mean, varwr2mean, varwr3mean, varwr4mean) + #clusters.obs.imp.pv <- list(pvalue1, pvalue2, pvalue3, pvalue4) + + cor.imp1 <- cor(as.vector(clusters.sim.imp[[which(order.sim == 1)]]), as.vector(clusters.obs.imp[[which(order.obs == 1)]])) + cor.imp2 <- cor(as.vector(clusters.sim.imp[[which(order.sim == 2)]]), as.vector(clusters.obs.imp[[which(order.obs == 2)]])) + cor.imp3 <- cor(as.vector(clusters.sim.imp[[which(order.sim == 3)]]), as.vector(clusters.obs.imp[[which(order.obs == 3)]])) + cor.imp4 <- cor(as.vector(clusters.sim.imp[[which(order.sim == 4)]]), as.vector(clusters.obs.imp[[which(order.obs == 4)]])) + + } + + load(file=paste0(work.dir,"/",fields.name,"_",my.period[p],"_leadmonth_",lead.month,"_","psl",".RData")) # reload forecast cluster time series and mean psl data + load(file=paste0(work.dir,"/",fields.name,"_",my.period[p],"_leadmonth_",lead.month,"_",var.name[var.num],".RData")) # reload mean var data for impact maps + + if(var.num != var.num.orig) var.num <- var.num.orig # ripristinate original values of this script + if(fields.name != fields.name.orig) fields.name <- fields.name.orig # ripristinate original values of this script + if(!identical(period.orig, period)) period <- period.orig + if(p.orig != p) p <- p.orig + if(identical(rev(lat),lat.forecast)) lat <- rev(lat) # ripristinate right lat order + if(work.dir != work.dir.orig) work.dir <- work.dir.orig # ripristinate original values of this script + + } # close for on 'forecast.name' + + if(fields.name == rean.name || (fields.name == forecast.name && n.map == 7)){ + if(save.names){ + save(cluster1.name, cluster2.name, cluster3.name, cluster4.name, file=paste0(rean.dir,"/",fields.name,"_", my.period[p],"_","ClusterNames",".RData")) + + } else { # in this case, we load the cluster names from the file already saved, if regimes come from a reanalysis: + ClusterName.file <- paste0(rean.dir,"/",rean.name,"_", my.period[p],"_","ClusterNames",".RData") + if(!file.exists(ClusterName.file)) stop(paste0("file: ",ClusterName.file," missing")) # check if file exists or not + load(ClusterName.file) # load cluster names + + if(var.num != var.num.orig) var.num <- var.num.orig # ripristinate original values of this script + if(fields.name != fields.name.orig) fields.name <- fields.name.orig # ripristinate original values of this script + } + } + + # assign to each cluster its regime: + #if(ordering == FALSE && (fields.name == rean.name || (fields.name == forecast.name && n.map == 7))){ + if(ordering == FALSE){ + cluster1 <- 1; cluster2 <- 2; cluster3 <- 3; cluster4 <- 4 # same as: cluster1.name=orden[1], cluster2.name=orden[2], cluster3.name=orden[3], etc. + } else { + cluster1 <- which(orden == cluster1.name) + cluster2 <- which(orden == cluster2.name) + cluster3 <- which(orden == cluster3.name) + cluster4 <- which(orden == cluster4.name) + } + + assign(paste0("map",cluster1), pslwr1mean) + assign(paste0("map",cluster2), pslwr2mean) + assign(paste0("map",cluster3), pslwr3mean) + assign(paste0("map",cluster4), pslwr4mean) + + assign(paste0("fre",cluster1), wr1y) + assign(paste0("fre",cluster2), wr2y) + assign(paste0("fre",cluster3), wr3y) + assign(paste0("fre",cluster4), wr4y) + + #assign(paste0("withinss",cluster1), my.cluster[[p]]$withinss[1]/my.cluster[[p]]$size[1]) + #assign(paste0("withinss",cluster2), my.cluster[[p]]$withinss[2]/my.cluster[[p]]$size[2]) + #assign(paste0("withinss",cluster3), my.cluster[[p]]$withinss[3]/my.cluster[[p]]$size[3]) + #assign(paste0("withinss",cluster4), my.cluster[[p]]$withinss[4]/my.cluster[[p]]$size[4]) + + if(impact.data == TRUE && (composition == "simple" || composition == "single.impact" || composition == 'impact' || composition == 'edpr')){ + assign(paste0("imp",cluster1), varwr1mean) + assign(paste0("imp",cluster2), varwr2mean) + assign(paste0("imp",cluster3), varwr3mean) + assign(paste0("imp",cluster4), varwr4mean) + + assign(paste0("sig",cluster1), pvalue1) + assign(paste0("sig",cluster2), pvalue2) + assign(paste0("sig",cluster3), pvalue3) + assign(paste0("sig",cluster4), pvalue4) + } + + if(fields.name == forecast.name && n.map < 7){ + assign(paste0("freMax",cluster1), wr1yMax) + assign(paste0("freMax",cluster2), wr2yMax) + assign(paste0("freMax",cluster3), wr3yMax) + assign(paste0("freMax",cluster4), wr4yMax) + + assign(paste0("freMin",cluster1), wr1yMin) + assign(paste0("freMin",cluster2), wr2yMin) + assign(paste0("freMin",cluster3), wr3yMin) + assign(paste0("freMin",cluster4), wr4yMin) + + #assign(paste0("spatial.cor",cluster1), spat.cor1) + #assign(paste0("spatial.cor",cluster2), spat.cor2) + #assign(paste0("spatial.cor",cluster3), spat.cor3) + #assign(paste0("spatial.cor",cluster4), spat.cor4) + + assign(paste0("persist",cluster1), pers1) + assign(paste0("persist",cluster2), pers2) + assign(paste0("persist",cluster3), pers3) + assign(paste0("persist",cluster4), pers4) + + clusters.all <- c(cluster1, cluster2, cluster3, cluster4) + ordered.sequence <- c(which(clusters.all == 1), which(clusters.all == 2), which(clusters.all == 3), which(clusters.all == 4)) + + assign(paste0("transSim",cluster1), unname(trans.sim[1,ordered.sequence])) + assign(paste0("transSim",cluster2), unname(trans.sim[2,ordered.sequence])) + assign(paste0("transSim",cluster3), unname(trans.sim[3,ordered.sequence])) + assign(paste0("transSim",cluster4), unname(trans.sim[4,ordered.sequence])) + + cluster1.obs <- which(orden == regimes.obs[1]) + cluster2.obs <- which(orden == regimes.obs[2]) + cluster3.obs <- which(orden == regimes.obs[3]) + cluster4.obs <- which(orden == regimes.obs[4]) + + clusters.all.obs <- c(cluster1.obs, cluster2.obs, cluster3.obs, cluster4.obs) + ordered.sequence.obs <- c(which(clusters.all.obs == 1), which(clusters.all.obs == 2), which(clusters.all.obs == 3), which(clusters.all.obs == 4)) + + assign(paste0("freObs",cluster1.obs), wr1y.obs) + assign(paste0("freObs",cluster2.obs), wr2y.obs) + assign(paste0("freObs",cluster3.obs), wr3y.obs) + assign(paste0("freObs",cluster4.obs), wr4y.obs) + + assign(paste0("persistObs",cluster1.obs), persObs1) + assign(paste0("persistObs",cluster2.obs), persObs2) + assign(paste0("persistObs",cluster3.obs), persObs3) + assign(paste0("persistObs",cluster4.obs), persObs4) + + assign(paste0("transObs",cluster1.obs), unname(trans.obs[1,ordered.sequence.obs])) + assign(paste0("transObs",cluster2.obs), unname(trans.obs[2,ordered.sequence.obs])) + assign(paste0("transObs",cluster3.obs), unname(trans.obs[3,ordered.sequence.obs])) + assign(paste0("transObs",cluster4.obs), unname(trans.obs[4,ordered.sequence.obs])) + + } + + # position of long values of Europe only (without the Atlantic Sea and America): + #if(fields.name == "ERA-Interim") EU <- c(1:which(lon >= lon.max)[1], (length(lon)-30):length(lon)) # restrict area to continental europe only + EU <- c(1:which(lon >= lon.max)[1], (which(lon >= 337)[1]:length(lon))) # restrict area to continental europe only + + # breaks and colors of the geopotential fields: + if(psl == "g500"){ + #my.brks <- c(-300,-199:200,300) # % Mean anomaly of a WR + my.brks <- c(-300,seq(-200,200,10),300) # % Mean anomaly of a WR + my.brks2 <- c(-300,seq(-200,200,10),300) # % Mean anomaly of a WR for the contour lines + #my.cols <- colorRampPalette((brewer.pal(9,"PuBu")))(length(my.brks)-1) + values.to.plot <- c(-200, -100, 0, 100, 200) # values we want to show in the labels + } + + if(psl == "psl"){ + my.brks <- c(seq(-21,-1,2),0,seq(1,21,2)) # % Mean anomaly of a WR + # my.brks <- c(seq(-19,-1,2),0,seq(1,19,2)) # % Mean anomaly of a WR + + my.brks2 <- my.brks #c(seq(-20,20,2)) # % Mean anomaly of a WR for the contour lines + values.to.plot <- my.brks #c(-17,-15,-13,-11,-9,-7,-5,-3,-1,0,1,3,5,7,9,11,13,15,17) + } + + my.cols <- colorRampPalette(rev(brewer.pal(11,"RdBu")))(length(my.brks)-1) # blue--white--red colors + my.cols[floor(length(my.cols)/2)] <- my.cols[floor(length(my.cols)/2)+1] <- "white" + + # breaks and colors of the impact maps (valid both for Wind speed anomalies and Temperature anomalies): + #my.brks.var <- c(-20,seq(-10,-3,1),seq(-2.9,2.9,0.1),seq(3,10,1),20) # % Mean anomaly of a WR + #my.brks.var <- c(-20,-2,-1.5,-1,-0.5,-0.2,0.2,0.5,1,1.5,2,20) # % Mean anomaly of a WR + #my.brks.var <- c(-20,seq(-3,3,0.5),20) # % Mean anomaly of a WR + if(var.name[var.num] == "sfcWind") { + my.brks.var <- seq(-3,3,0.5) + values.to.plot2 <- c(-3,-2,-1,0,1,2,3) + } + if(var.name[var.num] == "tas") { + my.brks.var <- seq(-5,5,1) + values.to.plot2 <- my.brks.var #c(-5,-3,-1,0,1,3,5) + } + + #my.cols <- colorRampPalette(as.character(read.csv("/home/Earth/vtorralb/rgbhex.csv",header=F)[,1]))(length(my.brks)-1) # blue--yellow-red colors + my.cols.var <- colorRampPalette(rev(brewer.pal(11,"RdBu")))(length(my.brks.var)-1) # blue--white--red colors + #my.cols.var[floor(length(my.cols.var)/2)] <- my.cols.var[floor(length(my.cols.var)/2)+1] <- "white" + + freq.max=100 + if(fields.name == forecast.name){ + pos.years.present <- match(year.start:year.end, years) + years.missing <- which(is.na(pos.years.present)) + fre1.NA <- fre2.NA <- fre3.NA <- fre4.NA <- freMax1.NA <- freMax2.NA <- freMax3.NA <- freMax4.NA <- freMin1.NA <- freMin2.NA <- freMin3.NA <- freMin4.NA <- c() + k <- 0 + for(y in year.start:year.end) { + if(y %in% (years.missing + year.start - 1)) { + fre1.NA <- c(fre1.NA,NA); fre2.NA <- c(fre2.NA,NA); fre3.NA <- c(fre3.NA,NA); fre4.NA <- c(fre4.NA,NA) + freMax1.NA <- c(freMax1.NA,NA); freMax2.NA <- c(freMax2.NA,NA); freMax3.NA <- c(freMax3.NA,NA); freMax4.NA <- c(freMax4.NA,NA) + freMin1.NA <- c(freMin1.NA,NA); freMin2.NA <- c(freMin2.NA,NA); freMin3.NA <- c(freMin3.NA,NA); freMin4.NA <- c(freMin4.NA,NA) + k=k+1 + } else { + fre1.NA <- c(fre1.NA,fre1[y - year.start + 1 - k]); fre2.NA <- c(fre2.NA,fre2[y - year.start + 1 - k]) + fre3.NA <- c(fre3.NA,fre3[y - year.start + 1 - k]); fre4.NA <- c(fre4.NA,fre4[y - year.start + 1 - k]) + freMax1.NA <- c(freMax1.NA,freMax1[y - year.start + 1 - k]); freMax2.NA <- c(freMax2.NA,freMax2[y - year.start + 1 - k]) + freMax3.NA <- c(freMax3.NA,freMax3[y - year.start + 1 - k]); freMax4.NA <- c(freMax4.NA,freMax4[y - year.start + 1 - k]) + freMin1.NA <- c(freMin1.NA,freMin1[y - year.start + 1 - k]); freMin2.NA <- c(freMin2.NA,freMin2[y - year.start + 1 - k]) + freMin3.NA <- c(freMin3.NA,freMin3[y - year.start + 1 - k]); freMin4.NA <- c(freMin4.NA,freMin4[y - year.start + 1 - k]) + } + } + + sp.cor1 <- round(cor(fre1.NA,freObs1, use="complete.obs"),2) + sp.cor2 <- round(cor(fre2.NA,freObs2, use="complete.obs"),2) + sp.cor3 <- round(cor(fre3.NA,freObs3, use="complete.obs"),2) + sp.cor4 <- round(cor(fre4.NA,freObs4, use="complete.obs"),2) + + } else { + fre1.NA <- fre1; fre2.NA <- fre2; fre3.NA <- fre3; fre4.NA <- fre4 + } + + + # Visualize the composition with the 3 graphs for all the 4 regimes: its pressure fields, its impact on var and its frequency series: + if(composition == "simple"){ + + if(!as.pdf && fields.name == rean.name) png(filename=paste0(rean.dir,"/",fields.name,"_",my.period[p],"_",var.name[var.num],".png"),width=3000,height=3700) + if(!as.pdf && fields.name == forecast.name) png(filename=paste0(work.dir,"/",fields.name,"_",my.period[p],"_leadmonth_",lead.month,"_",var.name[var.num],".png"),width=3000,height=3700) + + plot.new() + + # Sheet title: + par(fig=c(0, 1, 0.94, 0.98), new=TRUE) + if(fields.name == forecast.name) {forecast.title <- paste0(" startdate and ",lead.month," forecast month ")} else {forecast.title <- ""} + mtext(paste0(fields.name, " Weather Regimes for ", my.period[p], forecast.title," (",year.start,"-",year.end,")"), cex=5.5, font=2) + + # Centroid maps: + map.xpos <- 0 + map.width <- 0.46 + par(fig=c(map.xpos + 0.003, map.xpos + map.width - 0.003, 0.745, 0.925), new=TRUE) + PlotEquiMap2(rescale(map1,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map1), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, xlabel.dist=1.5, contours.lty="F1FF1F", continents.col="black") + par(fig=c(map.xpos, map.xpos + map.width, 0.51, 0.69), new=TRUE) + PlotEquiMap2(rescale(map2,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map2), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F", continents.col="black") + par(fig=c(map.xpos, map.xpos + map.width, 0.275, 0.455), new=TRUE) + PlotEquiMap2(rescale(map3,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map3), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F", continents.col="black") + par(fig=c(map.xpos, map.xpos + map.width, 0.04, 0.22), new=TRUE) + PlotEquiMap2(rescale(map4,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map4), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F", continents.col="black") + + ## # for the debug: + ## obs <- read.table("/scratch/Earth/ncortesi/RESILIENCE/Regimes/NAO_series.txt") + ## mes <- p + ## fre.obs <- obs$V3[seq(mes,12*35,12)] # get the ovserved freuency of the NAO + ## #plot(1981:2015,fre.obs,type="l") + ## #lines(1981:2015,fre1,type="l",col="red") + ## #lines(1981:2015,fre2,type="l",col="blue") + ## #lines(1981:2015,fre4,type="l",col="green") + ## cor1 <- cor(fre.obs, fre1) + ## cor2 <- cor(fre.obs, fre2) + ## cor3 <- cor(fre.obs, fre3) + ## cor4 <- cor(fre.obs, fre4) + ## round(c(cor1,cor2,cor3,cor4),2) + + # Legend centroid maps: + legend1.xpos <- 0.10 + legend1.width <- 0.30 + legend1.cex <- 2 + my.subset <- match(values.to.plot, my.brks) + par(fig=c(legend1.xpos, legend1.xpos + legend1.width, 0.719, 0.744), new=TRUE) # syntax fig=c(xmin, xmax, ymin, ymax) + ColorBar3(my.brks, cols=my.cols, vert=FALSE, cex=legend1.cex, subset=my.subset) + if(psl=="g500") {mtext(side=4," m", cex=2.5)} else {mtext(side=4," hPa", cex=legend1.cex)} + par(fig=c(legend1.xpos, legend1.xpos + legend1.width, 0.485, 0.508), new=TRUE) + ColorBar3(my.brks, cols=my.cols, vert=FALSE, cex=legend1.cex, subset=my.subset) + if(psl=="g500") {mtext(side=4," m", cex=2.5)} else {mtext(side=4," hPa", cex=legend1.cex)} + par(fig=c(legend1.xpos, legend1.xpos + legend1.width, 0.250, 0.273), new=TRUE) + ColorBar3(my.brks, cols=my.cols, vert=FALSE, cex=legend1.cex, subset=my.subset) + if(psl=="g500") {mtext(side=4," m", cex=2.5)} else {mtext(side=4," hPa", cex=legend1.cex)} + par(fig=c(legend1.xpos, legend1.xpos + legend1.width, 0.015, 0.038), new=TRUE) + ColorBar3(my.brks, cols=my.cols, vert=FALSE, cex=legend1.cex, subset=my.subset) + if(psl=="g500") {mtext(side=4," m", cex=2.5)} else {mtext(side=4," hPa", cex=legend1.cex)} + + # Subtitle centroid maps: + map.title.xpos <- 0.76 + map.title.width <- 0.2 + par(fig=c(map.title.xpos, map.title.xpos + map.title.width, 0.728, 0.729), new=TRUE) + mtext("year", cex=3) + par(fig=c(map.title.xpos, map.title.xpos + map.title.width, 0.494, 0.495), new=TRUE) + mtext("year", cex=3) + par(fig=c(map.title.xpos, map.title.xpos + map.title.width, 0.260, 0.261), new=TRUE) + mtext("year", cex=3) + par(fig=c(map.title.xpos, map.title.xpos + map.title.width, 0.026, 0.027), new=TRUE) + mtext("year", cex=3) + + # Title Centroid Maps: + title1.xpos <- 0.02 + title1.width <- 0.4 + par(fig=c(title1.xpos, title1.xpos + title1.width, 0.9305, 0.9355), new=TRUE) + mtext(paste0(orden[1],": ", psl.name, " Anomaly"), font=2, cex=4) + par(fig=c(title1.xpos, title1.xpos + title1.width, 0.6955, 0.7005), new=TRUE) + mtext(paste0(orden[2],": ", psl.name, " Anomaly"), font=2, cex=4) + par(fig=c(title1.xpos, title1.xpos + title1.width, 0.4605, 0.4655), new=TRUE) + mtext(paste0(orden[3],": ", psl.name, " Anomaly"), font=2, cex=4) + par(fig=c(title1.xpos, title1.xpos + title1.width, 0.2255, 0.2305), new=TRUE) + mtext(paste0(orden[4],": ", psl.name, " Anomaly"), font=2, cex=4) + + # Impact maps: + if(impact.data == TRUE ){ + impact.xpos <- 0.47 + impact.width <- 0.26 + par(fig=c(impact.xpos, impact.xpos + impact.width, 0.745, 0.925), new=TRUE) + PlotEquiMap2(rescale(imp1[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=t(sig1[,EU] < 0.05), cex.lab=1.5, continents.col="black") + par(fig=c(impact.xpos, impact.xpos + impact.width, 0.51, 0.69), new=TRUE) + PlotEquiMap2(rescale(imp2[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=t(sig2[,EU] < 0.05), cex.lab=1.5, continents.col="black") + par(fig=c(impact.xpos, impact.xpos + impact.width, 0.275, 0.455), new=TRUE) + PlotEquiMap2(rescale(imp3[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=t(sig3[,EU] < 0.05), cex.lab=1.5, continents.col="black") + par(fig=c(impact.xpos, impact.xpos + impact.width, 0.04, 0.22), new=TRUE) + PlotEquiMap2(rescale(imp4[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=t(sig4[,EU] < 0.05), cex.lab=1.5, continents.col="black") + + + # Title impact maps: + title2.xpos <- 0.42 + title2.width <- 0.35 + par(fig=c(title2.xpos, title2.xpos + title2.width, 0.928, 0.933), new=TRUE) + mtext(paste0(orden[1], ": Impact on ", var.name.full[var.num]), font=2, cex=4) + par(fig=c(title2.xpos, title2.xpos + title2.width, 0.693, 0.698), new=TRUE) + mtext(paste0(orden[2], ": Impact on ", var.name.full[var.num]), font=2, cex=4) + par(fig=c(title2.xpos, title2.xpos + title2.width, 0.458, 0.463), new=TRUE) + mtext(paste0(orden[3], ": Impact on ", var.name.full[var.num]), font=2, cex=4) + par(fig=c(title2.xpos, title2.xpos + title2.width, 0.223, 0.227), new=TRUE) + mtext(paste0(orden[4], ": Impact on ", var.name.full[var.num]), font=2, cex=4) + + # Legend: + legend2.xpos <- 0.48 + legend2.width <- 0.25 + legend2.cex <- 1.8 + + my.subset2 <- match(values.to.plot2, my.brks.var) + par(fig=c(legend2.xpos, legend2.xpos + legend2.width, 0.719 + 0.001, 0.744), new=TRUE) + ColorBar3(my.brks.var, cols=my.cols.var, vert=FALSE, cex=legend2.cex, subset=my.subset2) + mtext(side=4,paste0(" ",var.unit[var.num]), cex=legend2.cex) + par(fig=c(legend2.xpos, legend2.xpos + legend2.width, 0.485, 0.508), new=TRUE) + ColorBar3(my.brks.var, cols=my.cols.var, vert=FALSE, cex=legend2.cex, subset=my.subset2) + mtext(side=4,paste0(" ",var.unit[var.num]), cex=legend2.cex) + par(fig=c(legend2.xpos, legend2.xpos + legend2.width, 0.250, 0.273), new=TRUE) + ColorBar3(my.brks.var, cols=my.cols.var, vert=FALSE, cex=legend2.cex, subset=my.subset2) + mtext(side=4,paste0(" ",var.unit[var.num]), cex=legend2.cex) + par(fig=c(legend2.xpos, legend2.xpos + legend2.width, 0.015, 0.038), new=TRUE) + ColorBar3(my.brks.var, cols=my.cols.var, vert=FALSE, cex=legend2.cex, subset=my.subset2) + mtext(side=4,paste0(" ",var.unit[var.num]), cex=legend2.cex) + + } + + # Frequency plots: + barplot.xpos <- 0.75 + barplot.width <- 0.25 + + par(fig=c(barplot.xpos, barplot.xpos + barplot.width, 0.745, 0.915), new=TRUE) + if(fields.name == rean.name) barplot.freq(100*fre1.NA, year.start, year.end, cex.y=2, cex.x=2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0)) + if(fields.name == forecast.name) barplot.freq.sim2(100*fre1.NA, 100*freMax1.NA, 100*freMin1.NA, 100*freObs1, year.start, year.end, cex.y=2, cex.x=2, freq.min=-2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0), col.line="gray20", cex.r=3, cex.mean=2, cex.obs=2) + + par(fig=c(barplot.xpos, barplot.xpos + barplot.width,0.510,0.680), new=TRUE) + if(fields.name == rean.name) barplot.freq(100*fre2.NA, year.start, year.end, cex.y=2, cex.x=2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0)) + if(fields.name == forecast.name) barplot.freq.sim2(100*fre2.NA, 100*freMax2.NA, 100*freMin2.NA, 100*freObs2, year.start, year.end, cex.y=2, cex.x=2, freq.min=-2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0), col.line="gray20", cex.r=3, cex.mean=2, cex.obs=2) + + par(fig=c(barplot.xpos, barplot.xpos + barplot.width,0.275,0.445), new=TRUE) + if(fields.name == rean.name) barplot.freq(100*fre3.NA, year.start, year.end, cex.y=2, cex.x=2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0)) + if(fields.name == forecast.name) barplot.freq.sim2(100*fre3.NA, 100*freMax3.NA, 100*freMin3.NA, 100*freObs3, year.start, year.end, cex.y=2, cex.x=2, freq.min=-2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0), col.line="gray20", cex.r=3, cex.mean=2, cex.obs=2) + + par(fig=c(barplot.xpos, barplot.xpos + barplot.width,0.040,0.210), new=TRUE) + if(fields.name == rean.name) barplot.freq(100*fre4.NA, year.start, year.end, cex.y=2, cex.x=2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0)) + if(fields.name == forecast.name) barplot.freq.sim2(100*fre4.NA, 100*freMax4.NA, 100*freMin4.NA, 100*freObs4, year.start, year.end, cex.y=2, cex.x=2, freq.min=-2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0), col.line="gray20", cex.r=3, cex.mean=2, cex.obs=2) + + # Title Frequency maps: + title3.xpos <- 0.75 + title3.width <-0.25 + par(fig=c(title3.xpos, title3.xpos + title3.width, 0.928, 0.933), new=TRUE) + mtext(paste0(orden[1], " Frequency"), font=2, cex=4) # paste0 doesn't work inside an expression! + par(fig=c(title3.xpos, title3.xpos + title3.width, 0.693, 0.698), new=TRUE) + mtext(paste0(orden[2], " Frequency"), font=2, cex=4) + par(fig=c(title3.xpos, title3.xpos + title3.width, 0.458, 0.463), new=TRUE) + mtext(paste0(orden[3], " Frequency"), font=2, cex=4) + par(fig=c(title3.xpos, title3.xpos + title3.width, 0.223, 0.228), new=TRUE) + mtext(paste0(orden[4], " Frequency"), font=2, cex=4) + + # % on Frequency plots: + symbol.xpos <- 0.735 + symbol.width <- 0.01 + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.92, 0.93), new=TRUE) + mtext("%", cex=3.3) + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.59, 0.70), new=TRUE) + mtext("%", cex=3.3) + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.45, 0.46), new=TRUE) + mtext("%", cex=3.3) + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.22, 0.23), new=TRUE) + mtext("%", cex=3.3) + + # mean frequency on Frequency plots: + if(mean.freq == TRUE){ + symbol.xpos <- 0.9 + symbol.width <- 0.1 + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.908, 0.918), new=TRUE) + mtext(bquote(bar(nu) == .(paste0(round(mean(100*fre1.NA),1),"%"))),cex=4.5) + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.673, 0.683), new=TRUE) + mtext(bquote(bar(nu) == .(paste0(round(mean(100*fre2.NA),1),"%"))),cex=4.5) + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.44, 0.45), new=TRUE) + mtext(bquote(bar(nu) == .(paste0(round(mean(100*fre3.NA),1),"%"))),cex=4.5) + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.203, 0.213), new=TRUE) + mtext(bquote(bar(nu) == .(paste0(round(mean(100*fre4.NA),1),"%"))),cex=4.5) + } + + # add corr between obs.time series and ensemble mean time series on Frequency plots: + if(fields.name == forecast.name){ + symbol.xpos <- 0.76 + symbol.width <- 0.1 + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.908, 0.918), new=TRUE) + sp.cor1 <- round(cor(fre1.NA,freObs1, use="complete.obs"),2) + mtext(paste0("r= ",sp.cor1), cex=4.5) + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.673, 0.683), new=TRUE) + sp.cor2 <- round(cor(fre2.NA,freObs2, use="complete.obs"),2) + mtext(paste0("r= ",sp.cor2), cex=4.5) + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.44, 0.45), new=TRUE) + sp.cor3 <- round(cor(fre3.NA,freObs3, use="complete.obs"),2) + mtext(paste0("r= ",sp.cor3), cex=4.5) + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.203, 0.213), new=TRUE) + sp.cor4 <- round(cor(fre4.NA,freObs4, use="complete.obs"),2) + mtext(paste0("r= ",sp.cor4), cex=4.5) + } + + if(!as.pdf) dev.off() # for saving 4 png + + } # close if on: composition == 'simple' + + + if(composition == "edpr"){ + # adjust color legends to include triangles to the extremities increasing by two the number of intervals: + my.cols <- colorRampPalette(rev(brewer.pal(11,"RdBu")))(length(my.brks)+1) # blue--white--red colors + my.cols.var <- colorRampPalette(rev(brewer.pal(11,"RdBu")))(length(my.brks.var)+1) # blue--white--red colors + + fileoutput <- paste0(rean.dir,"/",fields.name,"_",my.period[p],"_",var.name[var.num],"_edpr.png") + + png(filename=fileoutput,width=3000,height=3700) + + plot.new() + + ## Centroid maps: + map.xpos <- 0.27 + map.width <- 0.46 + par(fig=c(map.xpos + 0.003, map.xpos + map.width - 0.003, 0.745, 0.925), new=TRUE) + PlotEquiMap2(rescale(map1,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols[2:(length(my.cols)-1)], sizetit=1.2, contours=t(map1), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, xlabel.dist=1.5, contours.lty="F1FF1F", continents.col="black") + par(fig=c(map.xpos, map.xpos + map.width, 0.51, 0.69), new=TRUE) + PlotEquiMap2(rescale(map2,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols[2:(length(my.cols)-1)], sizetit=1.2, contours=t(map2), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F", continents.col="black") + par(fig=c(map.xpos, map.xpos + map.width, 0.275, 0.455), new=TRUE) + PlotEquiMap2(rescale(map3,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols[2:(length(my.cols)-1)], sizetit=1.2, contours=t(map3), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F", continents.col="black") + par(fig=c(map.xpos, map.xpos + map.width, 0.04, 0.22), new=TRUE) + PlotEquiMap2(rescale(map4,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols[2:(length(my.cols)-1)], sizetit=1.2, contours=t(map4), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F", continents.col="black") + + ## # for the debug: + ## obs <- read.table("/scratch/Earth/ncortesi/RESILIENCE/Regimes/NAO_series.txt") + ## mes <- p + ## fre.obs <- obs$V3[seq(mes,12*35,12)] # get the ovserved freuency of the NAO + ## #plot(1981:2015,fre.obs,type="l") + ## #lines(1981:2015,fre1,type="l",col="red") + ## #lines(1981:2015,fre2,type="l",col="blue") + ## #lines(1981:2015,fre4,type="l",col="green") + ## cor1 <- cor(fre.obs, fre1) + ## cor2 <- cor(fre.obs, fre2) + ## cor3 <- cor(fre.obs, fre3) + ## cor4 <- cor(fre.obs, fre4) + ## round(c(cor1,cor2,cor3,cor4),2) + + # Legend centroid maps: + legend1.xpos <- 0.30 + legend1.width <- 0.40 + legend1.cex <- 2 + my.subset <- match(values.to.plot, my.brks) + par(fig=c(legend1.xpos, legend1.xpos + legend1.width, 0.719, 0.744), new=TRUE) # syntax fig=c(xmin, xmax, ymin, ymax) + ColorBar3(my.brks, cols=my.cols[2:(length(my.cols)-1)], vert=FALSE, cex=legend1.cex, subset=my.subset) + if(psl=="g500") {mtext(side=4," m", cex=2.5)} else {mtext(side=4," hPa", cex=legend1.cex)} + par(fig=c(legend1.xpos, legend1.xpos + legend1.width, 0.485, 0.508), new=TRUE) + ColorBar3(my.brks, cols=my.cols[2:(length(my.cols)-1)], vert=FALSE, cex=legend1.cex, subset=my.subset) + if(psl=="g500") {mtext(side=4," m", cex=2.5)} else {mtext(side=4," hPa", cex=legend1.cex)} + par(fig=c(legend1.xpos, legend1.xpos + legend1.width, 0.250, 0.273), new=TRUE) + ColorBar3(my.brks, cols=my.cols[2:(length(my.cols)-1)], vert=FALSE, cex=legend1.cex, subset=my.subset) + if(psl=="g500") {mtext(side=4," m", cex=2.5)} else {mtext(side=4," hPa", cex=legend1.cex)} + par(fig=c(legend1.xpos, legend1.xpos + legend1.width, 0.015, 0.038), new=TRUE) + ColorBar3(my.brks, cols=my.cols[2:(length(my.cols)-1)], vert=FALSE, cex=legend1.cex, subset=my.subset) + if(psl=="g500") {mtext(side=4," m", cex=2.5)} else {mtext(side=4," hPa", cex=legend1.cex)} + + # Subtitle centroid maps: + map.title.xpos <- 0.76 + map.title.width <- 0.2 + par(fig=c(map.title.xpos, map.title.xpos + map.title.width, 0.728, 0.729), new=TRUE) + mtext("year", cex=3) + par(fig=c(map.title.xpos, map.title.xpos + map.title.width, 0.494, 0.495), new=TRUE) + mtext("year", cex=3) + par(fig=c(map.title.xpos, map.title.xpos + map.title.width, 0.260, 0.261), new=TRUE) + mtext("year", cex=3) + par(fig=c(map.title.xpos, map.title.xpos + map.title.width, 0.026, 0.027), new=TRUE) + mtext("year", cex=3) + + # Title Centroid Maps: + title1.xpos <- 0.3 + title1.width <- 0.44 + par(fig=c(title1.xpos, title1.xpos + title1.width, 0.9305, 0.9355), new=TRUE) + mtext(paste0(orden[1]," ", psl.name, " anomaly"), font=2, cex=4) + par(fig=c(title1.xpos, title1.xpos + title1.width, 0.6955, 0.7005), new=TRUE) + mtext(paste0(orden[2]," ", psl.name, " anomaly"), font=2, cex=4) + par(fig=c(title1.xpos, title1.xpos + title1.width, 0.4605, 0.4655), new=TRUE) + mtext(paste0(orden[3]," ", psl.name, " anomaly"), font=2, cex=4) + par(fig=c(title1.xpos, title1.xpos + title1.width, 0.2255, 0.2305), new=TRUE) + mtext(paste0(orden[4]," ", psl.name, " anomaly"), font=2, cex=4) + + # Impact maps: + if(impact.data == TRUE ){ + impact.xpos <- 0 + impact.width <- 0.26 + par(fig=c(impact.xpos, impact.xpos + impact.width, 0.745, 0.925), new=TRUE) + PlotEquiMap2(rescale(imp1[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var[2:(length(my.cols.var)-1)], intxlon=10, intylat=10, drawleg=F, dots=t(sig1[,EU] < 0.05), cex.lab=1.5, continents.col="black") + par(fig=c(impact.xpos, impact.xpos + impact.width, 0.51, 0.69), new=TRUE) + PlotEquiMap2(rescale(imp2[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var[2:(length(my.cols.var)-1)], intxlon=10, intylat=10, drawleg=F, dots=t(sig2[,EU] < 0.05), cex.lab=1.5, continents.col="black") + par(fig=c(impact.xpos, impact.xpos + impact.width, 0.275, 0.455), new=TRUE) + PlotEquiMap2(rescale(imp3[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var[2:(length(my.cols.var)-1)], intxlon=10, intylat=10, drawleg=F, dots=t(sig3[,EU] < 0.05), cex.lab=1.5, continents.col="black") + par(fig=c(impact.xpos, impact.xpos + impact.width, 0.04, 0.22), new=TRUE) + PlotEquiMap2(rescale(imp4[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var[2:(length(my.cols.var)-1)], intxlon=10, intylat=10, drawleg=F, dots=t(sig4[,EU] < 0.05), cex.lab=1.5, continents.col="black") + + + # Title impact maps: + title2.xpos <- 0 + title2.width <- 0.26 + par(fig=c(title2.xpos, title2.xpos + title2.width, 0.928, 0.933), new=TRUE) + mtext(paste0(orden[1], " impact on ", var.name.full[var.num]), font=2, cex=4) + par(fig=c(title2.xpos, title2.xpos + title2.width, 0.693, 0.698), new=TRUE) + mtext(paste0(orden[2], " impact on ", var.name.full[var.num]), font=2, cex=4) + par(fig=c(title2.xpos, title2.xpos + title2.width, 0.458, 0.463), new=TRUE) + mtext(paste0(orden[3], " impact on ", var.name.full[var.num]), font=2, cex=4) + par(fig=c(title2.xpos, title2.xpos + title2.width, 0.223, 0.227), new=TRUE) + mtext(paste0(orden[4], " impact on ", var.name.full[var.num]), font=2, cex=4) + + # Legend: + legend2.xpos <- 0.01 + legend2.width <- 0.25 + legend2.cex <- 1.8 + + my.subset2 <- match(values.to.plot2, my.brks.var) + par(fig=c(legend2.xpos, legend2.xpos + legend2.width, 0.719 + 0.001, 0.744), new=TRUE) + ColorBar3(my.brks.var, cols=my.cols.var[2:(length(my.cols.var)-1)], vert=FALSE, cex=legend2.cex, subset=my.subset2) + mtext(side=4,paste0(" ",var.unit[var.num]), cex=legend2.cex) + par(fig=c(legend2.xpos, legend2.xpos + legend2.width, 0.485, 0.508), new=TRUE) + ColorBar3(my.brks.var, cols=my.cols.var[2:(length(my.cols.var)-1)], vert=FALSE, cex=legend2.cex, subset=my.subset2) + mtext(side=4,paste0(" ",var.unit[var.num]), cex=legend2.cex) + par(fig=c(legend2.xpos, legend2.xpos + legend2.width, 0.250, 0.273), new=TRUE) + ColorBar3(my.brks.var, cols=my.cols.var[2:(length(my.cols.var)-1)], vert=FALSE, cex=legend2.cex, subset=my.subset2) + mtext(side=4,paste0(" ",var.unit[var.num]), cex=legend2.cex) + par(fig=c(legend2.xpos, legend2.xpos + legend2.width, 0.015, 0.038), new=TRUE) + ColorBar3(my.brks.var, cols=my.cols.var[2:(length(my.cols.var)-1)], vert=FALSE, cex=legend2.cex, subset=my.subset2) + mtext(side=4,paste0(" ",var.unit[var.num]), cex=legend2.cex) + + } + + # Frequency plots: + barplot.xpos <- 0.75 + barplot.width <- 0.25 + + par(fig=c(barplot.xpos, barplot.xpos + barplot.width, 0.745, 0.915), new=TRUE) + if(fields.name == rean.name) barplot.freq(100*fre1.NA, year.start, year.end, cex.y=2, cex.x=2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0)) + if(fields.name == forecast.name) barplot.freq.sim2(100*fre1.NA, 100*freMax1.NA, 100*freMin1.NA, 100*freObs1, year.start, year.end, cex.y=2, cex.x=2, freq.min=-2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0), col.line="gray20", cex.r=3, cex.mean=2, cex.obs=2) + + par(fig=c(barplot.xpos, barplot.xpos + barplot.width,0.510,0.680), new=TRUE) + if(fields.name == rean.name) barplot.freq(100*fre2.NA, year.start, year.end, cex.y=2, cex.x=2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0)) + if(fields.name == forecast.name) barplot.freq.sim2(100*fre2.NA, 100*freMax2.NA, 100*freMin2.NA, 100*freObs2, year.start, year.end, cex.y=2, cex.x=2, freq.min=-2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0), col.line="gray20", cex.r=3, cex.mean=2, cex.obs=2) + + par(fig=c(barplot.xpos, barplot.xpos + barplot.width,0.275,0.445), new=TRUE) + if(fields.name == rean.name) barplot.freq(100*fre3.NA, year.start, year.end, cex.y=2, cex.x=2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0)) + if(fields.name == forecast.name) barplot.freq.sim2(100*fre3.NA, 100*freMax3.NA, 100*freMin3.NA, 100*freObs3, year.start, year.end, cex.y=2, cex.x=2, freq.min=-2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0), col.line="gray20", cex.r=3, cex.mean=2, cex.obs=2) + + par(fig=c(barplot.xpos, barplot.xpos + barplot.width,0.040,0.210), new=TRUE) + if(fields.name == rean.name) barplot.freq(100*fre4.NA, year.start, year.end, cex.y=2, cex.x=2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0)) + if(fields.name == forecast.name) barplot.freq.sim2(100*fre4.NA, 100*freMax4.NA, 100*freMin4.NA, 100*freObs4, year.start, year.end, cex.y=2, cex.x=2, freq.min=-2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0), col.line="gray20", cex.r=3, cex.mean=2, cex.obs=2) + + # Title Frequency maps: + title3.xpos <- 0.75 + title3.width <-0.25 + par(fig=c(title3.xpos, title3.xpos + title3.width, 0.928, 0.933), new=TRUE) + mtext(paste0(orden[1], " Frequency"), font=2, cex=4) # paste0 doesn't work inside an expression! + par(fig=c(title3.xpos, title3.xpos + title3.width, 0.693, 0.698), new=TRUE) + mtext(paste0(orden[2], " Frequency"), font=2, cex=4) + par(fig=c(title3.xpos, title3.xpos + title3.width, 0.458, 0.463), new=TRUE) + mtext(paste0(orden[3], " Frequency"), font=2, cex=4) + par(fig=c(title3.xpos, title3.xpos + title3.width, 0.223, 0.228), new=TRUE) + mtext(paste0(orden[4], " Frequency"), font=2, cex=4) + + # % on Frequency plots: + symbol.xpos <- 0.735 + symbol.width <- 0.01 + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.91, 0.92), new=TRUE) + mtext("%", cex=3.3) + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.68, 0.69), new=TRUE) + mtext("%", cex=3.3) + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.44, 0.45), new=TRUE) + mtext("%", cex=3.3) + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.21, 0.22), new=TRUE) + mtext("%", cex=3.3) + + # mean frequency on Frequency plots: + if(mean.freq == TRUE){ + symbol.xpos <- 0.83 + symbol.width <- 0.1 + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.908, 0.918), new=TRUE) + mtext(bquote(bar(nu) == .(paste0(round(mean(100*fre1.NA),1),"%"))),cex=3.5) + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.673, 0.683), new=TRUE) + mtext(bquote(bar(nu) == .(paste0(round(mean(100*fre2.NA),1),"%"))),cex=3.5) + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.44, 0.45), new=TRUE) + mtext(bquote(bar(nu) == .(paste0(round(mean(100*fre3.NA),1),"%"))),cex=3.5) + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.203, 0.213), new=TRUE) + mtext(bquote(bar(nu) == .(paste0(round(mean(100*fre4.NA),1),"%"))),cex=3.5) + } + + + if(!as.pdf) dev.off() # for saving 4 png + + # same image formatted for the catalogue: + fileoutput2 <- paste0(rean.dir,"/",fields.name,"_",my.period[p],"_",var.name[var.num],"_edpr_fig2cat.png") + + system(paste0("~/scripts/fig2catalog.sh -s 0.8 -r 150 -t '",rean.name," / 10m wind speed and sea level pressure / Monthly anomalies and frequencies \n",month.name[p]," / ",year.start,"-",year.end,"' -c 'Region: North Atlantic (27°N-81°N, 85.5°W-45°E)\nReference dataset: ",rean.name," reanalysis' ", fileoutput," ", fileoutput2)) + + + + } # close if on: composition == 'edpr' + + + # Visualize the composition with the regime anomalies for a selected forecasted month: + if(composition == "psl"){ + # Centroid maps: + n.map <- n.map + 1 # n.maps starts from 0 + map.width <- 0.115 + map.xpos <- 0.06 + map.width * (n.map - 1) + map.ypos <- 0.08 + map.height <- 0.19 + map.ysep <- 0.015 + + par(fig=c(map.xpos, map.xpos + map.width, map.ypos + 3*map.height + 3*map.ysep, map.ypos + 4*map.height + 3*map.ysep), new=TRUE) + PlotEquiMap2(rescale(map1,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map1), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, xlabel.dist=1.5, contours.lty="F1FF1F") + par(fig=c(map.xpos, map.xpos + map.width, map.ypos + 2*map.height + 2*map.ysep, map.ypos + 3*map.height + 2*map.ysep), new=TRUE) + PlotEquiMap2(rescale(map2,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map2), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F") + par(fig=c(map.xpos, map.xpos + map.width, map.ypos + map.height + map.ysep, map.ypos + 2*map.height + map.ysep), new=TRUE) + PlotEquiMap2(rescale(map3,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map3), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F") + par(fig=c(map.xpos, map.xpos + map.width, map.ypos, map.ypos + map.height), new=TRUE) + PlotEquiMap2(rescale(map4,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map4), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F") + + # Sheet subtitles: + par(fig=c(map.xpos, map.xpos + map.width, 0.86, 0.90), new=TRUE) + if(n.map < 8) { + mtext(paste0(my.month.short[p], " --> ", my.month.short[forecast.month]), cex=5.5, font=2) + } else{ + mtext(paste0(rean.name," ", my.month.short[forecast.month]), cex=5.5, font=2) + } + + } # close if on composition == 'psl' + + + if(composition == "psl.rean" || composition == "psl.rean.unordered"){ + ## matrix correlation with DJF regimes: + #cluster1.monthly <- pslwr1mean; cluster2.monthly <- pslwr2mean; cluster3.monthly <- pslwr3mean; cluster4.monthly <- pslwr4mean + #cluster1.name.monthly <- cluster1.name; cluster2.name.monthly <- cluster2.name; cluster3.name.monthly <- cluster3.name; cluster4.name.monthly <- cluster4.name + assign(paste0("clusterMax", which(orden == cluster1.name), ".monthly"), pslwr1mean) + assign(paste0("clusterMax", which(orden == cluster2.name), ".monthly"), pslwr2mean) + assign(paste0("clusterMax", which(orden == cluster3.name), ".monthly"), pslwr3mean) + assign(paste0("clusterMax", which(orden == cluster4.name), ".monthly"), pslwr4mean) + + ## cluster.name.monthly <- c(cluster1.name.monthly, cluster2.name.monthly, cluster3.name.monthly, cluster4.name.monthly) + ## max1 <- which(cluster.name.monthly == orden[1]) # get which is the monthly cluster with the highest explained variance (by default it is associated to NAO+) + ## max2 <- which(cluster.name.monthly == orden[2]) # get the monthly cluster with the second highest variance + ## max3 <- which(cluster.name.monthly == orden[3]) # ... + ## max4 <- which(cluster.name.monthly == orden[4]) # ... + + ## max.seq <- c(max1, max2, max3, max4) + + ## Load DJF data: + rean.dir.DJF <- "/scratch/Earth/ncortesi/RESILIENCE/Regimes/53_JRA55_seasonal_1981-2016_LOESS_filter_lat_corr" + + load(file=paste0(rean.dir.DJF,"/",rean.name,"_",my.period[13],"_","psl",".RData")) # Load mean slp DJF data from the same reanalysis + load(paste0(rean.dir.DJF,"/",rean.name,"_", my.period[13],"_","ClusterNames",".RData")) # load also reanalysis DJF regime names + + if(var.num != var.num.orig) var.num <- var.num.orig # ripristinate original values of this script (necessary after loading regime data) + if(fields.name != fields.name.orig) fields.name <- fields.name.orig # ripristinate original values of this script + if(work.dir != work.dir.orig) work.dir <- work.dir.orig # ripristinate original values of this script + if(p.orig != p) p <- p.orig + + cluster1 <- which(orden == cluster1.name) # clusters for DJF!!! + cluster2 <- which(orden == cluster2.name) + cluster3 <- which(orden == cluster3.name) + cluster4 <- which(orden == cluster4.name) + + assign(paste0("psl.ordered",cluster1), pslwr1mean) # psl for DJF + assign(paste0("psl.ordered",cluster2), pslwr2mean) + assign(paste0("psl.ordered",cluster3), pslwr3mean) + assign(paste0("psl.ordered",cluster4), pslwr4mean) + + cluster.corr <- array(NA, c(4,4), dimnames=list(c("cl1","cl2","cl3","cl4"), orden)) + cluster.corr[1,1] <- cor(as.vector(clusterMax1.monthly), as.vector(psl.ordered1)) + cluster.corr[1,2] <- cor(as.vector(clusterMax1.monthly), as.vector(psl.ordered2)) + cluster.corr[1,3] <- cor(as.vector(clusterMax1.monthly), as.vector(psl.ordered3)) + cluster.corr[1,4] <- cor(as.vector(clusterMax1.monthly), as.vector(psl.ordered4)) + cluster.corr[2,1] <- cor(as.vector(clusterMax2.monthly), as.vector(psl.ordered1)) + cluster.corr[2,2] <- cor(as.vector(clusterMax2.monthly), as.vector(psl.ordered2)) + cluster.corr[2,3] <- cor(as.vector(clusterMax2.monthly), as.vector(psl.ordered3)) + cluster.corr[2,4] <- cor(as.vector(clusterMax2.monthly), as.vector(psl.ordered4)) + cluster.corr[3,1] <- cor(as.vector(clusterMax3.monthly), as.vector(psl.ordered1)) + cluster.corr[3,2] <- cor(as.vector(clusterMax3.monthly), as.vector(psl.ordered2)) + cluster.corr[3,3] <- cor(as.vector(clusterMax3.monthly), as.vector(psl.ordered3)) + cluster.corr[3,4] <- cor(as.vector(clusterMax3.monthly), as.vector(psl.ordered4)) + cluster.corr[4,1] <- cor(as.vector(clusterMax4.monthly), as.vector(psl.ordered1)) + cluster.corr[4,2] <- cor(as.vector(clusterMax4.monthly), as.vector(psl.ordered2)) + cluster.corr[4,3] <- cor(as.vector(clusterMax4.monthly), as.vector(psl.ordered3)) + cluster.corr[4,4] <- cor(as.vector(clusterMax4.monthly), as.vector(psl.ordered4)) + + # Centroid maps: + n.map <- n.map + 1 # n.maps starts from 0 + map.width <- 0.08 + map.xpos <- map.width * (n.map - 1) + map.ypos <- 0.08 + map.height <- 0.19 + map.ysep <- 0.015 + print(paste0("map.xpos= ", map.xpos)) + + par(fig <- c(0, 1, 0, 1), new=TRUE) + # reset par to its default values, because drawing with PlotEquiMap() alters some par values: + if(n.map == 1) { op <- par(no.readonly = TRUE) } else { par(op) } + + text.cex <- 2 + text.ypos <- 1.03 + text.xmod <- 0.007 * (n.map - 1) + text.xpos <- map.xpos + text.xmod - 0.02 + text.width <- 0.015 + text(x=text.xpos - text.width, y=text.ypos - 0.02, labels="cl1", cex=text.cex) + text(x=text.xpos - text.width, y=text.ypos - 0.04, labels="cl2", cex=text.cex) + text(x=text.xpos - text.width, y=text.ypos - 0.06, labels="cl3", cex=text.cex) + text(x=text.xpos - text.width, y=text.ypos - 0.08, labels="cl4", cex=text.cex) + text(x=text.xpos + 0*text.width, y=text.ypos + 0.00, labels="NAO+", cex=text.cex) + text(x=text.xpos + 1*text.width, y=text.ypos + 0.00, labels="NAO-", cex=text.cex) + text(x=text.xpos + 2*text.width, y=text.ypos + 0.00, labels="BLO", cex=text.cex) + text(x=text.xpos + 3*text.width, y=text.ypos + 0.00, labels="ATL", cex=text.cex) + + text(x=text.xpos + 0*text.width, y=text.ypos - 0.02, labels=round(cluster.corr,2)[1,1], cex=text.cex) + text(x=text.xpos + 0*text.width, y=text.ypos - 0.04, labels=round(cluster.corr,2)[2,1], cex=text.cex) + text(x=text.xpos + 0*text.width, y=text.ypos - 0.06, labels=round(cluster.corr,2)[3,1], cex=text.cex) + text(x=text.xpos + 0*text.width, y=text.ypos - 0.08, labels=round(cluster.corr,2)[4,1], cex=text.cex) + text(x=text.xpos + 1*text.width, y=text.ypos - 0.02, labels=round(cluster.corr,2)[1,2], cex=text.cex) + text(x=text.xpos + 1*text.width, y=text.ypos - 0.04, labels=round(cluster.corr,2)[2,2], cex=text.cex) + text(x=text.xpos + 1*text.width, y=text.ypos - 0.06, labels=round(cluster.corr,2)[3,2], cex=text.cex) + text(x=text.xpos + 1*text.width, y=text.ypos - 0.08, labels=round(cluster.corr,2)[4,2], cex=text.cex) + text(x=text.xpos + 2*text.width, y=text.ypos - 0.02, labels=round(cluster.corr,2)[1,3], cex=text.cex) + text(x=text.xpos + 2*text.width, y=text.ypos - 0.04, labels=round(cluster.corr,2)[2,3], cex=text.cex) + text(x=text.xpos + 2*text.width, y=text.ypos - 0.06, labels=round(cluster.corr,2)[3,3], cex=text.cex) + text(x=text.xpos + 2*text.width, y=text.ypos - 0.08, labels=round(cluster.corr,2)[4,3], cex=text.cex) + text(x=text.xpos + 3*text.width, y=text.ypos - 0.02, labels=round(cluster.corr,2)[1,4], cex=text.cex) + text(x=text.xpos + 3*text.width, y=text.ypos - 0.04, labels=round(cluster.corr,2)[2,4], cex=text.cex) + text(x=text.xpos + 3*text.width, y=text.ypos - 0.06, labels=round(cluster.corr,2)[3,4], cex=text.cex) + text(x=text.xpos + 3*text.width, y=text.ypos - 0.08, labels=round(cluster.corr,2)[4,4], cex=text.cex) + + ## Centroid maps: + ## (note that mapX == clusterMaxX.monthly, X = 1, ..., 4 by default) + + par(fig=c(map.xpos, map.xpos + map.width, map.ypos + 3*map.height + 3*map.ysep, map.ypos + 4*map.height + 3*map.ysep), new=TRUE) + PlotEquiMap2(rescale(map1,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map1), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, xlabel.dist=1.5, contours.lty="F1FF1F") + par(fig=c(map.xpos, map.xpos + map.width, map.ypos + 2*map.height + 2*map.ysep, map.ypos + 3*map.height + 2*map.ysep), new=TRUE) + PlotEquiMap2(rescale(map2,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map2), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F") + par(fig=c(map.xpos, map.xpos + map.width, map.ypos + map.height + map.ysep, map.ypos + 2*map.height + map.ysep), new=TRUE) + PlotEquiMap2(rescale(map3,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map3), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F") + par(fig=c(map.xpos, map.xpos + map.width, map.ypos, map.ypos + map.height), new=TRUE) + PlotEquiMap2(rescale(map4,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map4), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F") + + } # close if on psl.rean or on psl.rean.unordered + + + # Visualize the composition if the impact maps for a selected forecasted month: + if(composition == "impact"){ + # Centroid maps: + n.map <- n.map + 1 # n.maps starts from 0 + map.width <- 0.115 + map.xpos <- 0.06 + map.width * (n.map - 1) + map.ypos <- 0.08 + map.height <- 0.19 + map.ysep <- 0.015 + + par(fig=c(map.xpos, map.xpos + map.width, map.ypos + 3*map.height + 3*map.ysep, map.ypos + 4*map.height + 3*map.ysep), new=TRUE) + #PlotEquiMap2(rescale(imp1[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=t(sig1[,EU] < 0.05), cex.lab=1.5) + PlotEquiMap2(rescale(imp1[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5) + + par(fig=c(map.xpos, map.xpos + map.width, map.ypos + 2*map.height + 2*map.ysep, map.ypos + 3*map.height + 2*map.ysep), new=TRUE) + #PlotEquiMap2(rescale(imp2[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=t(sig2[,EU] < 0.05), cex.lab=1.5) + PlotEquiMap2(rescale(imp2[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5) + + par(fig=c(map.xpos, map.xpos + map.width, map.ypos + map.height + map.ysep, map.ypos + 2*map.height + map.ysep), new=TRUE) + #PlotEquiMap2(rescale(imp3[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=t(sig3[,EU] < 0.05), cex.lab=1.5) + PlotEquiMap2(rescale(imp3[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5) + + par(fig=c(map.xpos, map.xpos + map.width, map.ypos, map.ypos + map.height), new=TRUE) + #PlotEquiMap2(rescale(imp4[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=t(sig4[,EU] < 0.05), cex.lab=1.5) + PlotEquiMap2(rescale(imp4[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5) + + + # Sheet subtitles: + par(fig=c(map.xpos, map.xpos + map.width, 0.86, 0.90), new=TRUE) + if(n.map < 8) { + mtext(paste0(my.month.short[p], " --> ", my.month.short[forecast.month]), cex=5.5, font=2) + } else{ + mtext(paste0(rean.name," ", my.month.short[forecast.month]), cex=5.5, font=2) + } + + } # close if on composition == 'impact' + + # Visualize the 2x2 composition of the four impact maps for a selected reanalysis or forecasted month: + if(composition == "single.impact"){ + + png(filename=paste0(rean.dir,"/",fields.name,"_",my.period[p],"_",var.name[var.num],"_impact_composition.png"),width=2000,height=2000) + + plot.new() + + par(fig=c(0, 0.5, 0.95, 0.988), new=TRUE) + mtext("NAO+",cex=5) + par(fig=c(0, 0.5, 0.52, 0.95), new=TRUE) + PlotEquiMap(rescale(imp1[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=sig1[,EU] < 0.05, dot_size=2, axes_label_scale=2.5) + + par(fig=c(0.5, 1, 0.93, 0.96), new=TRUE) + mtext("NAO-",cex=5) + par(fig=c(0.5, 1, 0.52, 0.95), new=TRUE) + PlotEquiMap(rescale(imp2[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=sig2[,EU] < 0.05, dot_size=2, axes_label_scale=2.5) + + par(fig=c(0, 0.5, 0.44, 0.47), new=TRUE) + mtext("Blocking",cex=5) + par(fig=c(0, 0.5, 0.08, 0.46), new=TRUE) + PlotEquiMap(rescale(imp3[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=sig3[,EU] < 0.05, dot_size=2, axes_label_scale=2.5) + + par(fig=c(0.5, 1, 0.44, 0.47), new=TRUE) + mtext("Atlantic Ridge",cex=5) + par(fig=c(0.5, 1, 0.08, 0.46), new=TRUE) + PlotEquiMap(rescale(imp4[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=sig4[,EU] < 0.05, dot_size=2, axes_label_scale=2.5) + + par(fig=c(0.03, 0.96, 0.02, 0.08), new=TRUE) + ColorBar2(my.brks.var, cols=my.cols.var, vert=FALSE, cex=3, my.ticks=-0.5 + 1:length(my.brks.var), my.labels=my.brks.var, label.dist=3) + par(fig=c(0.965, 0.99, 0, 0.026), new=TRUE) + mtext("m/s",cex=3) + + dev.off() + + # format it manually from the bash shell (you must run it from your workstation until the identity command of ImageMagick will be loaded un the cluster): + #sh ~/scripts/fig2catalog.sh -x 20 -t 'NCEP / 10-m wind speed / Regime impact \nOctober / 1981-2016' -c 'Region: North Atlantic (27°N-81°N, 85.5°W-45°E)\nReference dataset: NCEP/NCAR reanalysis' NCEP_October_sfcWind_impact_composition.png NCEP_October_sfcWind_impact_composition_catalogue.png + + + ### to plot the impact map of all regimes on a particular month: + #imp.oct <- (imp1*3.2 + imp2*38.7 + imp3*51.6 + imp4*6.4)/100 + year.test <- 2016 + pos.year.test <- year.test - year.start +1 + imp.test <- imp1*fre1.NA[pos.year.test] + imp2*fre2.NA[pos.year.test] + imp3*fre3.NA[pos.year.test] + imp4*fre4.NA[pos.year.test] + #imp.test <- imp1*fre1.NA[pos.year.test] + imp3*(fre3.NA[pos.year.test]+0.032) + imp4*(fre4.NA[pos.year.test]+0.032) + par(fig=c(0, 1, 0.05, 1), new=TRUE) + PlotEquiMap2(rescale(imp.test[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5) + + # vector with the frequency of the WRs in the chosen month and year: + wt.test.freq <- c(fre1.NA[pos.year.test],fre2.NA[pos.year.test],fre3.NA[pos.year.test],fre4.NA[pos.year.test]) + + ## # or save them as individual maps: + ## png(filename=paste0(rean.dir,"/",fields.name,"_",my.period[p],"_",var.name[var.num],"_impact_NAO+.png"),width=3000,height=3700) + ## PlotEquiMap2(rescale(imp1[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=t(sig1[,EU] < 0.05), cex.lab=1.5) + ## dev.off() + + ## png(filename=paste0(rean.dir,"/",fields.name,"_",my.period[p],"_",var.name[var.num],"_impact_NAO-.png"),width=3000,height=3700) + ## PlotEquiMap2(rescale(imp2[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=t(sig2[,EU] < 0.05), cex.lab=1.5) + ## dev.off() + + ## png(filename=paste0(rean.dir,"/",fields.name,"_",my.period[p],"_",var.name[var.num],"_impact_Blocking.png"),width=3000,height=3700) + ## PlotEquiMap2(rescale(imp3[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=t(sig3[,EU] < 0.05), cex.lab=1.5) + ## dev.off() + + ## png(filename=paste0(rean.dir,"/",fields.name,"_",my.period[p],"_",var.name[var.num],"_impact_Atl_Ridge.png"),width=3000,height=3700) + ## PlotEquiMap2(rescale(imp4[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=t(sig4[,EU] < 0.05), cex.lab=1.5) + ## dev.off() + } + + # Visualize the composition if the impact maps for a selected forecasted month: + if(composition == "single.psl"){ + png(filename=paste0(rean.dir,"/",fields.name,"_",my.period[p],"_",var.name[var.num],"_pattern_cluster1.png"),width=2000,height=1400, res=300) + PlotEquiMap2(rescale(map1,my.brks[1],tail(my.brks,1)), lon, lat,filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1, contours=t(map1), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=0.5, xlabel.dist=0, contours.lty="F1FF1F", toptitle=paste0(my.period[p]," WithinSS=", round(withinss1/1000000,2))) + dev.off() + + png(filename=paste0(rean.dir,"/",fields.name,"_",my.period[p],"_",var.name[var.num],"_pattern_cluster2.png"),width=2000,height=1400, res=300) + PlotEquiMap2(rescale(map2,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1, contours=t(map2), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=0.5, xlabel.dist=0, contours.lty="F1FF1F", toptitle=paste0(my.period[p]," WithinSS=", round(withinss2/1000000,2))) + dev.off() + + png(filename=paste0(rean.dir,"/",fields.name,"_",my.period[p],"_",var.name[var.num],"_pattern_cluster3.png"),width=2000,height=1400, res=300) + PlotEquiMap2(rescale(map3,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1, contours=t(map3), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=0.5, xlabel.dist=0, contours.lty="F1FF1F", toptitle=paste0(my.period[p]," WithinSS=", round(withinss3/1000000,2))) + dev.off() + + png(filename=paste0(rean.dir,"/",fields.name,"_",my.period[p],"_",var.name[var.num],"_pattern_cluster4.png"),width=2000,height=1400, res=300) + PlotEquiMap2(rescale(map4,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1, contours=t(map4), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=0.5, xlabel.dist=0, contours.lty="F1FF1F", toptitle=paste0(my.period[p]," WithinSS=", round(withinss4/1000000,2))) + dev.off() + + # Legend centroid maps: + #my.subset <- match(values.to.plot, my.brks) + #ColorBar3(my.brks, cols=my.cols, vert=FALSE, cex=legend1.cex, subset=my.subset) + #mtext(side=4," hPa", cex=legend1.cex) + + } + + # Visualize the composition with the interannual frequencies for a selected forecasted month: + if(composition == "fre"){ + # Centroid maps: + n.map <- n.map + 1 # n.maps starts from 0 + map.width <- 0.115 + map.xpos <- 0.06 + map.width * (n.map - 1) + map.ypos <- 0.08 + map.height <- 0.19 + map.ysep <- 0.015 + + par(fig=c(map.xpos, map.xpos + map.width, map.ypos + 3*map.height + 3*map.ysep, map.ypos + 4*map.height + 3*map.ysep), new=TRUE) + if(n.map < 8) barplot.freq.sim2(100*fre1.NA, 100*freMax1.NA, 100*freMin1.NA, 100*freObs1, year.start, year.end, cex.y=2, cex.x=2, freq.min=-2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0), col.line="gray20", cex.r=3, cex.mean=2, cex.obs=2) + if(n.map == 8) barplot.freq(100*fre1.NA, year.start, year.end, cex.y=2, cex.x=2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0)) + + par(fig=c(map.xpos, map.xpos + map.width, map.ypos + 2*map.height + 2*map.ysep, map.ypos + 3*map.height + 2*map.ysep), new=TRUE) + if(n.map < 8) if(fields.name == forecast.name) barplot.freq.sim2(100*fre2.NA, 100*freMax2.NA, 100*freMin2.NA, 100*freObs2, year.start, year.end, cex.y=2, cex.x=2, freq.min=-2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0), col.line="gray20", cex.r=3, cex.mean=2, cex.obs=2) + if(n.map == 8) barplot.freq(100*fre2.NA, year.start, year.end, cex.y=2, cex.x=2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0)) + + par(fig=c(map.xpos, map.xpos + map.width, map.ypos + map.height + map.ysep, map.ypos + 2*map.height + map.ysep), new=TRUE) + if(n.map < 8) if(fields.name == forecast.name) barplot.freq.sim2(100*fre3.NA, 100*freMax3.NA, 100*freMin3.NA, 100*freObs3, year.start, year.end, cex.y=2, cex.x=2, freq.min=-2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0), col.line="gray20", cex.r=3, cex.mean=2, cex.obs=2) + if(n.map == 8) barplot.freq(100*fre3.NA, year.start, year.end, cex.y=2, cex.x=2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0)) + + par(fig=c(map.xpos, map.xpos + map.width, map.ypos, map.ypos + map.height), new=TRUE) + if(n.map < 8) if(fields.name == forecast.name) barplot.freq.sim2(100*fre4.NA, 100*freMax4.NA, 100*freMin4.NA, 100*freObs4, year.start, year.end, cex.y=2, cex.x=2, freq.min=-2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0), col.line="gray20", cex.r=3, cex.mean=2, cex.obs=2) + if(n.map == 8) barplot.freq(100*fre4.NA, year.start, year.end, cex.y=2, cex.x=2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0)) + + # Sheet subtitles: + par(fig=c(map.xpos, map.xpos + map.width, 0.86, 0.90), new=TRUE) + if(n.map < 8) { + mtext(paste0(my.month.short[p], " --> ", my.month.short[forecast.month]), cex=5.5, font=2) + } else{ + mtext(paste0(rean.name," ", my.month.short[forecast.month]), cex=5.5, font=2) + } + + # % on Frequency plots: + par(fig=c(map.xpos - 0.006, map.xpos, map.ypos + 3.9*map.height + 3*map.ysep, map.ypos + 4*map.height + 3*map.ysep), new=TRUE) + mtext("%", cex=3.3) + par(fig=c(map.xpos - 0.006, map.xpos, map.ypos + 2.9*map.height + 2*map.ysep, map.ypos + 3*map.height + 2*map.ysep), new=TRUE) + mtext("%", cex=3.3) + par(fig=c(map.xpos - 0.006, map.xpos, map.ypos + 1.9*map.height + 1*map.ysep, map.ypos + 2*map.height + 1*map.ysep), new=TRUE) + mtext("%", cex=3.3) + par(fig=c(map.xpos - 0.006, map.xpos, map.ypos + 0.9*map.height + 0*map.ysep, map.ypos + 1*map.height + 0*map.ysep), new=TRUE) + mtext("%", cex=3.3) + + # mean frequency on Frequency plots: + par(fig=c(map.xpos + 0.02, map.xpos + 0.04, map.ypos + 3*map.height + 3*map.ysep, map.ypos + 3.1*map.height + 3*map.ysep), new=TRUE) + mtext(bquote(bar(nu) == .(paste0(round(mean(100*fre1.NA),1),"%"))),cex=3.5) + par(fig=c(map.xpos + 0.02, map.xpos + 0.04, map.ypos + 2*map.height + 2*map.ysep, map.ypos + 2.1*map.height + 2*map.ysep), new=TRUE) + mtext(bquote(bar(nu) == .(paste0(round(mean(100*fre2.NA),1),"%"))),cex=3.5) + par(fig=c(map.xpos + 0.02, map.xpos + 0.04, map.ypos + 1*map.height + 1*map.ysep, map.ypos + 1.1*map.height + 1*map.ysep), new=TRUE) + mtext(bquote(bar(nu) == .(paste0(round(mean(100*fre3.NA),1),"%"))),cex=3.5) + par(fig=c(map.xpos + 0.02, map.xpos + 0.04, map.ypos + 0*map.height + 0*map.ysep, map.ypos + 0.1*map.height + 0*map.ysep), new=TRUE) + mtext(bquote(bar(nu) == .(paste0(round(mean(100*fre4.NA),1),"%"))),cex=3.5) + + # add corr between obs.time series and ensemble mean time series on Frequency plots: + if(n.map < 8){ + par(fig=c(map.xpos + map.width - 0.04, map.xpos + map.width - 0.02, map.ypos + 3*map.height + 3*map.ysep, map.ypos + 3.1*map.height + 3*map.ysep), new=TRUE) + mtext(paste0("r= ",sp.cor1), cex=3.5) + par(fig=c(map.xpos + map.width - 0.04, map.xpos + map.width - 0.02, map.ypos + 2*map.height + 2*map.ysep, map.ypos + 2.1*map.height + 2*map.ysep), new=TRUE) + mtext(paste0("r= ",sp.cor2), cex=3.5) + par(fig=c(map.xpos + map.width - 0.04, map.xpos + map.width - 0.02, map.ypos + 1*map.height + 1*map.ysep, map.ypos + 1.1*map.height + 1*map.ysep), new=TRUE) + mtext(paste0("r= ",sp.cor3), cex=3.5) + par(fig=c(map.xpos + map.width - 0.04, map.xpos + map.width - 0.02, map.ypos + 0*map.height + 0*map.ysep, map.ypos + 0.1*map.height + 0*map.ysep), new=TRUE) + mtext(paste0("r= ",sp.cor4), cex=3.5) + } + } # close if on composition == 'fre' + + # save indicators for each startdate and forecast time: + # (map1, map2, map3, map4, fre1, fre2, etc. already refer to the regimes in the same order as listed in the 'orden' vector) + if(fields.name == forecast.name) { + if (composition != "impact") { + save(orden, map1, map2, map3, map4, fre1.NA, fre2.NA, fre3.NA, fre4.NA, freObs1, freObs2, freObs3, freObs4, sp.cor1, sp.cor2, sp.cor3, sp.cor4, persist1, persist2, persist3, persist4, persistObs1, persistObs2, persistObs3, persistObs4, spat.cor1, spat.cor2, spat.cor3, spat.cor4, sd.freq.sim1, sd.freq.sim2, sd.freq.sim3, sd.freq.sim4, sd.freq.obs1, sd.freq.obs2, sd.freq.obs3, sd.freq.obs4, transSim1, transSim2, transSim3, transSim4, transObs1, transObs2, transObs3, transObs4, file=paste0(work.dir,"/",fields.name,"_", my.period[p],"_leadmonth_",lead.month,"_","ClusterNames",".RData")) + } else { # also save impX objects + save(orden, map1, map2, map3, map4, fre1.NA, fre2.NA, fre3.NA, fre4.NA, freObs1, freObs2, freObs3, freObs4, sp.cor1, sp.cor2, sp.cor3, sp.cor4, persist1, persist2, persist3, persist4, persistObs1, persistObs2, persistObs3, persistObs4, spat.cor1, spat.cor2, spat.cor3, spat.cor4, sd.freq.sim1, sd.freq.sim2, sd.freq.sim3, sd.freq.sim4, sd.freq.obs1, sd.freq.obs2, sd.freq.obs3, sd.freq.obs4, transSim1, transSim2, transSim3, transSim4, transObs1, transObs2, transObs3, transObs4, imp1, imp2, imp3, imp4,cor.imp1,cor.imp2,cor.imp3,cor.imp4, file=paste0(work.dir,"/",fields.name,"_", my.period[p],"_leadmonth_",lead.month,"_","ClusterNames",".RData")) + } + } + + } # close 'p' on 'period' + + if((composition == 'simple' || composition == 'edpr' ) && as.pdf) dev.off() # for saving a single pdf with all months/seasons + + } # close 'lead.month' on 'lead.months' + + if(composition == "psl" || composition == "fre" || composition == "impact"){ + # Regime's names: + title1.xpos <- 0.01 + title1.width <- 0.04 + par(fig=c(title1.xpos, title1.xpos + title1.width, 0.7905, 0.7955), new=TRUE) + mtext(paste0(orden[1]), font=2, cex=5) + par(fig=c(title1.xpos, title1.xpos + title1.width, 0.5855, 0.5905), new=TRUE) + mtext(paste0(orden[2]), font=2, cex=5) + par(fig=c(title1.xpos, title1.xpos + title1.width, 0.3805, 0.3955), new=TRUE) + mtext(paste0(orden[3]), font=2, cex=5) + par(fig=c(title1.xpos, title1.xpos + title1.width, 0.1755, 0.1805), new=TRUE) + mtext(paste0(orden[4]), font=2, cex=5) + + if(composition == "psl") { + # Color legend: + legend1.xpos <- 0.10 + legend1.width <- 0.80 + legend1.cex <- 4 + + par(fig=c(legend1.xpos, legend1.xpos + legend1.width, 0, 0.065), new=TRUE) # syntax fig=c(xmin, xmax, ymin, ymax) + ColorBar2(brks=my.brks, cols=my.cols, vert=FALSE, cex=legend1.cex, label.dist=2, my.ticks=-0.5 + 1:length(my.brks), my.labels=my.brks) + par(fig=c(legend1.xpos + legend1.width - 0.02, legend1.xpos + legend1.width + 0.05, 0, 0.02), new=TRUE) # syntax fig=c(xmin, xmax, ymin, ymax) + mtext("hPa", cex=legend1.cex*1.3) + } + + if(composition == "impact") { + # Color legend: + legend1.xpos <- 0.10 + legend1.width <- 0.80 + legend1.cex <- 4 + + #my.subset2 <- match(values.to.plot2, my.brks.var) + par(fig=c(legend1.xpos, legend1.xpos + legend1.width, 0, 0.065), new=TRUE) + ColorBar2(brks=my.brks.var, cols=my.cols.var, vert=FALSE, cex=legend1.cex, label.dist=2, my.ticks=-0.5 + 1:length(my.brks.var), my.labels=my.brks.var) + par(fig=c(legend1.xpos + legend1.width - 0.02, legend1.xpos + legend1.width + 0.05, 0, 0.02), new=TRUE) # syntax fig=c(xmin, xmax, ymin, ymax) + #ColorBar2(brks=my.brks.var, cols=my.cols.var, vert=FALSE, cex=legend2.cex, subset=my.subset2) + mtext(var.unit[var.num], cex=legend1.cex*1.3) + + } + + dev.off() + + } # close if on composition + + + if(composition == "psl.rean" || composition == "psl.rean.unordered") dev.off() + + print("Finished!") +} # close if on composition != "summary" + + + +#} # close for on forecasts.month + + + +if(composition == "taylor"){ + library("plotrix") + + fields.name="ERA-Interim" + + # choose a reference season from 13 (winter) to 16 (Autumn): + season.ref <- 13 + + # set the first WR classification: + rean.dir <- "/scratch/Earth/ncortesi/RESILIENCE/Regimes/41_ERA-Interim_monthly_1981-2015_LOESS_filter" + + # load data of the reference season of the first classification (this line should be modified to load the chosen season): + #load("/scratch/Earth/ncortesi/RESILIENCE/Regimes/18) as 12) but with no lat correction/ERA-Interim_Winter_psl.RData") # load pslwrXmean data + #load("/scratch/Earth/ncortesi/RESILIENCE/Regimes/18) as 12) but with no lat correction/ERA-Interim_Winter_ClusterNames.RData") # load clusterX.name.period + load("/scratch/Earth/ncortesi/RESILIENCE/Regimes/46_as_18_but_with_no_lat_correction_and_with_LOESS/ERA-Interim_Winter_psl.RData") # load pslwrXmean data + load("/scratch/Earth/ncortesi/RESILIENCE/Regimes/46_as_18_but_with_no_lat_correction_and_with_LOESS/ERA-Interim_Winter_ClusterNames.RData") # load clusterX.name.period + + if(var.num != var.num.orig) var.num <- var.num.orig # ripristinate original values of this script (necessary after loading regime data) + if(fields.name != fields.name.orig) fields.name <- fields.name.orig # ripristinate original values of this script + + cluster1.ref <- which(orden == cluster1.name) + cluster2.ref <- which(orden == cluster2.name) + cluster3.ref <- which(orden == cluster3.name) + cluster4.ref <- which(orden == cluster4.name) + + #cluster1.ref <- cluster1.name.period[13] + #cluster2.ref <- cluster2.name.period[13] + #cluster3.ref <- cluster3.name.period[13] + #cluster4.ref <- cluster4.name.period[13] + + cluster1.ref <- 3 # bypass the previous values because for dataset num.46 the blocking and atlantic regimes were saved swapped! + cluster2.ref <- 2 + cluster3.ref <- 1 + cluster4.ref <- 4 + + assign(paste0("map.ref",cluster1.ref), pslwr1mean) # NAO+ + assign(paste0("map.ref",cluster2.ref), pslwr2mean) # NAO- + assign(paste0("map.ref",cluster3.ref), pslwr3mean) # Blocking + assign(paste0("map.ref",cluster4.ref), pslwr4mean) # Atl.ridge + + # set a second WR classification (i.e: with running cluster) to contrast with the new one: + #rean2.dir <- "/scratch/Earth/ncortesi/RESILIENCE/Regimes/41_ERA-Interim_monthly_1981-2015_LOESS_filter" + rean2.dir <- "/scratch/Earth/ncortesi/RESILIENCE/Regimes/44_as_43_but_with_no_lat_correction" + rean2.name <- "ERA-Interim" + + # load data of the reference season of the second classification (this line should be modified to load the chosen season): + load("/scratch/Earth/ncortesi/RESILIENCE/Regimes/46_as_18_but_with_no_lat_correction_and_with_LOESS/ERA-Interim_Winter_psl.RData") # load pslwrXmean data + load("/scratch/Earth/ncortesi/RESILIENCE/Regimes/46_as_18_but_with_no_lat_correction_and_with_LOESS/ERA-Interim_Winter_ClusterNames.RData") # load clusterX.name.period + + if(var.num != var.num.orig) var.num <- var.num.orig # ripristinate original values of this script (necessary after loading regime data) + if(fields.name != fields.name.orig) fields.name <- fields.name.orig # ripristinate original values of this script + + #cluster1.name.ref <- cluster1.name.period[season.ref] + #cluster2.name.ref <- cluster2.name.period[season.ref] + #cluster3.name.ref <- cluster3.name.period[season.ref] + #cluster4.name.ref <- cluster4.name.period[season.ref] + + cluster1.ref <- which(orden == cluster1.name) + cluster2.ref <- which(orden == cluster2.name) + cluster3.ref <- which(orden == cluster3.name) + cluster4.ref <- which(orden == cluster4.name) + + cluster1.ref <- 3 # bypass the previous values because for dataset num.46 the blocking and atlantic regimes were saved swapped! + cluster2.ref <- 2 + cluster3.ref <- 1 + cluster4.ref <- 4 + + assign(paste0("map.old.ref",cluster1.ref), pslwr1mean) # NAO+ + assign(paste0("map.old.ref",cluster2.ref), pslwr2mean) # NAO- + assign(paste0("map.old.ref",cluster3.ref), pslwr3mean) # Blocking + assign(paste0("map.old.ref",cluster4.ref), pslwr4mean) # Atl.ridge + + + # breaks and colors of the geopotential fields: + if(psl == "g500"){ + my.brks <- c(-300,seq(-200,200,10),300) # % Mean anomaly of a WR + my.brks2 <- c(-300,seq(-200,200,10),300) # % Mean anomaly of a WR for the contour lines + values.to.plot <- c(-200, -100, 0, 100, 200) # values we want to show in the labels + } + + if(psl == "psl"){ + my.brks <- c(seq(-19,-1,2),0,seq(1,19,2)) # % Mean anomaly of a WR + my.brks2 <- my.brks #c(seq(-20,20,2)) # % Mean anomaly of a WR for the contour lines + values.to.plot <- my.brks #c(-17,-15,-13,-11,-9,-7,-5,-3,-1,0,1,3,5,7,9,11,13,15,17) + } + + my.cols <- colorRampPalette(rev(brewer.pal(11,"RdBu")))(length(my.brks)-1) # blue--white--red colors + my.cols[floor(length(my.cols)/2)] <- my.cols[floor(length(my.cols)/2)+1] <- "white" + + # plot the composition with the regime anomalies of each monthly regimes: + png(filename=paste0(rean.dir,"/",rean.name,"_taylor_source",".png"),width=6000,height=2000) + + plot.new() + + # Sheet title: + par(fig=c(0, 1, 0.94, 0.98), new=TRUE) + mtext(paste0(fields.name, " observed monthly regime anomalies"), cex=5.5, font=2) + + n.map <- 0 + for(p in c(9:12,1:8)){ + p.orig <- p + + load(file=paste0(rean.dir,"/",fields.name,"_",my.period[p],"_","psl",".RData")) # load cluster names and summarized data + load(file=paste0(rean.dir,"/",fields.name,"_",my.period[p],"_","ClusterNames",".RData")) # load cluster names and summarized data + + if(var.num != var.num.orig) var.num <- var.num.orig # ripristinate original values of this script (necessary after loading regime data) + if(fields.name != fields.name.orig) fields.name <- fields.name.orig # ripristinate original values of this script + if(p != p.orig) p <- p.orig + + cluster1 <- which(orden == cluster1.name) + cluster2 <- which(orden == cluster2.name) + cluster3 <- which(orden == cluster3.name) + cluster4 <- which(orden == cluster4.name) + + assign(paste0("map",cluster1), pslwr1mean) # NAO+ + assign(paste0("map",cluster2), pslwr2mean) # NAO- + assign(paste0("map",cluster3), pslwr3mean) # Blocking + assign(paste0("map",cluster4), pslwr4mean) # Atl.ridge + + n.map <- n.map + 1 # n.maps starts from 0 + map.width <- 0.07 + map.xpos <- 0.06 + map.width * (n.map - 1) + map.ypos <- 0.08 + map.height <- 0.19 + map.ysep <- 0.015 + + par(fig=c(map.xpos, map.xpos + map.width, map.ypos + 3*map.height + 3*map.ysep, map.ypos + 4*map.height + 3*map.ysep), new=TRUE) + PlotEquiMap2(rescale(map1,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map1), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, xlabel.dist=1.5, contours.lty="F1FF1F") + par(fig=c(map.xpos, map.xpos + map.width, map.ypos + 2*map.height + 2*map.ysep, map.ypos + 3*map.height + 2*map.ysep), new=TRUE) + PlotEquiMap2(rescale(map2,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map2), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F") + par(fig=c(map.xpos, map.xpos + map.width, map.ypos + map.height + map.ysep, map.ypos + 2*map.height + map.ysep), new=TRUE) + PlotEquiMap2(rescale(map3,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map3), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F") + par(fig=c(map.xpos, map.xpos + map.width, map.ypos, map.ypos + map.height), new=TRUE) + PlotEquiMap2(rescale(map4,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map4), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F") + + # Sheet subtitles: + par(fig=c(map.xpos, map.xpos + map.width, 0.86, 0.90), new=TRUE) + mtext(my.month.short[p], cex=5.5, font=2) + + } + + # draw the last map to the right of the composition, that with the seasonal anomalies: + n.map <- n.map + 1 # n.maps starts from 0 + map.width <- 0.07 + map.xpos <- 0.06 + map.width * (n.map - 1) + map.ypos <- 0.08 + map.height <- 0.19 + map.ysep <- 0.015 + + par(fig=c(map.xpos, map.xpos + map.width, map.ypos + 3*map.height + 3*map.ysep, map.ypos + 4*map.height + 3*map.ysep), new=TRUE) + PlotEquiMap2(rescale(map.ref1,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map.ref1), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, xlabel.dist=1.5, contours.lty="F1FF1F") + par(fig=c(map.xpos, map.xpos + map.width, map.ypos + 2*map.height + 2*map.ysep, map.ypos + 3*map.height + 2*map.ysep), new=TRUE) + PlotEquiMap2(rescale(map.ref2,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map.ref2), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F") + par(fig=c(map.xpos, map.xpos + map.width, map.ypos + map.height + map.ysep, map.ypos + 2*map.height + map.ysep), new=TRUE) + PlotEquiMap2(rescale(map.ref3,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map.ref3), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F") + par(fig=c(map.xpos, map.xpos + map.width, map.ypos, map.ypos + map.height), new=TRUE) + PlotEquiMap2(rescale(map.ref4,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map.ref4), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F") + + # subtitle: + par(fig=c(map.xpos, map.xpos + map.width, 0.86, 0.90), new=TRUE) + mtext(my.period[season.ref], cex=5.5, font=2) + + dev.off() + + # Plot the Taylor diagrams: + + corr.first <- corr.second <- rmse.first <- rmse.second <- array(NA,c(4,12)) + + for(regime in 1:4){ + + png(filename=paste0(rean.dir,"/",rean.name,"_taylor_",orden[regime],".png"), width=1200, height=800) + + for(p in 1:12){ + p.orig <- p + + load(file=paste0(rean.dir,"/",rean.name,"_",my.period[p],"_","psl",".RData")) # load cluster names and summarized data + load(file=paste0(rean.dir,"/",rean.name,"_",my.period[p],"_","ClusterNames",".RData")) # load cluster names and summarized data + + if(var.num != var.num.orig) var.num <- var.num.orig # ripristinate original values of this script (necessary after loading regime data) + if(fields.name != fields.name.orig) fields.name <- fields.name.orig # ripristinate original values of this script + if(p != p.orig) p <- p.orig + + cluster1 <- which(orden == cluster1.name) + cluster2 <- which(orden == cluster2.name) + cluster3 <- which(orden == cluster3.name) + cluster4 <- which(orden == cluster4.name) + + assign(paste0("map",cluster1), pslwr1mean) # NAO+ + assign(paste0("map",cluster2), pslwr2mean) # NAO- + assign(paste0("map",cluster3), pslwr3mean) # Blocking + assign(paste0("map",cluster4), pslwr4mean) # Atl.ridge + + # load data from the second classification: + load(file=paste0(rean2.dir,"/",rean2.name,"_",my.period[p],"_","psl",".RData")) # load cluster names and summarized data + load(file=paste0(rean2.dir,"/",rean2.name,"_",my.period[p],"_","ClusterNames",".RData")) # load cluster names and summarized data + + if(var.num != var.num.orig) var.num <- var.num.orig # ripristinate original values of this script (necessary after loading regime data) + if(fields.name != fields.name.orig) fields.name <- fields.name.orig # ripristinate original values of this script + if(p != p.orig) p <- p.orig + + cluster1.old <- which(orden == cluster1.name) + cluster2.old <- which(orden == cluster2.name) + cluster3.old <- which(orden == cluster3.name) + cluster4.old <- which(orden == cluster4.name) + + assign(paste0("map.old",cluster1.old), pslwr1mean) # NAO+ + assign(paste0("map.old",cluster2.old), pslwr2mean) # NAO- + assign(paste0("map.old",cluster3.old), pslwr3mean) # Blocking + assign(paste0("map.old",cluster4.old), pslwr4mean) # Atl.ridge + + add.mod <- ifelse(p == 1, FALSE, TRUE) + main.mod <- ifelse(p == 1, orden[regime],"") + + color.first <- "red" + color.second <- "blue" # "black" + sd.arcs.mod <- c(0,0.1,0.2,0.3,0.4,0.5,0.6,0.7,0.8,0.9,1,1.1,1.2,1.3) #c(0,0.5,1,1.5) # doesn't work! + if(regime == 1) my.taylor(as.vector(map.ref1),as.vector(map1), normalize=TRUE, add=add.mod, pos.cor=FALSE, sd.arcs=sd.arcs.mod, ref.sd=F, cex.axis=1.7, text.cex=1, my.text=my.month.short[p], col = color.first, main=main.mod) + if(regime == 2) my.taylor(as.vector(map.ref2),as.vector(map2), normalize=TRUE, add=add.mod, pos.cor=FALSE, sd.arcs=sd.arcs.mod, ref.sd=F, cex.axis=1.7, text.cex=1, my.text=my.month.short[p], col = color.first, main=main.mod) + if(regime == 3) my.taylor(as.vector(map.ref3),as.vector(map3), normalize=TRUE, add=add.mod, pos.cor=FALSE, sd.arcs=sd.arcs.mod, ref.sd=F, cex.axis=1.7, text.cex=1, my.text=my.month.short[p], col = color.first, main=main.mod) + if(regime == 4) my.taylor(as.vector(map.ref4),as.vector(map4), normalize=TRUE, add=add.mod, pos.cor=FALSE, sd.arcs=sd.arcs.mod, ref.sd=F, cex.axis=1.7, text.cex=1, my.text=my.month.short[p], col = color.first, main=main.mod) + + # add the points from the second classification: + add.mod=TRUE + #add.mod <- ifelse(p == 1, FALSE, TRUE) + + if(regime == 1) my.taylor(as.vector(map.old.ref1),as.vector(map.old1), normalize=TRUE, add=add.mod, pos.cor=FALSE, sd.arcs=sd.arcs.mod, ref.sd=F, cex.axis=1.7, text.cex=1, my.text=my.month.short[p], col = color.second) + if(regime == 2) my.taylor(as.vector(map.old.ref2),as.vector(map.old2), normalize=TRUE, add=add.mod, pos.cor=FALSE, sd.arcs=sd.arcs.mod, ref.sd=F, cex.axis=1.7, text.cex=1, my.text=my.month.short[p], col = color.second) + if(regime == 3) my.taylor(as.vector(map.old.ref3),as.vector(map.old3), normalize=TRUE, add=add.mod, pos.cor=FALSE, sd.arcs=sd.arcs.mod, ref.sd=F, cex.axis=1.7, text.cex=1, my.text=my.month.short[p], col = color.second) + if(regime == 4) my.taylor(as.vector(map.old.ref4),as.vector(map.old4), normalize=TRUE, add=add.mod, pos.cor=FALSE, sd.arcs=sd.arcs.mod, ref.sd=F, cex.axis=1.7, text.cex=1, my.text=my.month.short[p], col = color.second) + + if(regime == 1) { corr.first[1,p] <- cor(as.vector(map.ref1),as.vector(map1)); rmse.first[1,p] <- RMSE(as.vector(map.ref1),as.vector(map1)) } + if(regime == 2) { corr.first[2,p] <- cor(as.vector(map.ref2),as.vector(map2)); rmse.first[2,p] <- RMSE(as.vector(map.ref2),as.vector(map2)) } + if(regime == 3) { corr.first[3,p] <- cor(as.vector(map.ref3),as.vector(map3)); rmse.first[3,p] <- RMSE(as.vector(map.ref3),as.vector(map3)) } + if(regime == 4) { corr.first[4,p] <- cor(as.vector(map.ref4),as.vector(map4)); rmse.first[4,p] <- RMSE(as.vector(map.ref4),as.vector(map4)) } + + if(regime == 1) { corr.second[1,p] <- cor(as.vector(map.old.ref1),as.vector(map.old1)); rmse.second[1,p] <- RMSE(as.vector(map.old.ref1),as.vector(map.old1)) } + if(regime == 2) { corr.second[2,p] <- cor(as.vector(map.old.ref2),as.vector(map.old2)); rmse.second[2,p] <- RMSE(as.vector(map.old.ref2),as.vector(map.old2)) } + if(regime == 3) { corr.second[3,p] <- cor(as.vector(map.old.ref3),as.vector(map.old3)); rmse.second[3,p] <- RMSE(as.vector(map.old.ref3),as.vector(map.old3)) } + if(regime == 4) { corr.second[4,p] <- cor(as.vector(map.old.ref4),as.vector(map.old4)); rmse.second[4,p] <- RMSE(as.vector(map.old.ref4),as.vector(map.old4)) } + + } # close for on 'p' + + dev.off() + + } # close for on 'regime' + + corr.first.mean <- apply(corr.first,1,mean) + corr.second.mean <- apply(corr.second,1,mean) + corr.diff <- corr.second.mean - corr.first.mean + + rmse.first.mean <- apply(rmse.first,1,mean) + rmse.second.mean <- apply(rmse.second,1,mean) + rmse.diff <- rmse.second.mean - rmse.first.mean + +} # close if on composition == "taylor" + + + + + + +# Summary graphs: +if(composition == "summary" && fields.name == forecast.name){ + # array storing correlations in the format: [startdate, leadmonth, regime] + array.diff.sd.freq <- array.sd.freq <- array.cor <- array.sp.cor <- array.freq <- array.diff.freq <- array.rpss <- array.pers <- array.diff.pers <- array(NA,c(12,7,4)) + array.spat.cor <- array.imp <- array.trans.sim1 <- array.trans.sim2 <- array.trans.sim3 <- array.trans.sim4 <- array(NA,c(12,7,4)) + array.trans.diff1 <- array.trans.diff2 <- array.trans.diff3 <- array.trans.diff4 <- array.freq.obs <- array.pers.obs <- array(NA,c(12,7,4)) + array.trans.obs1 <- array.trans.obs2 <- array.trans.obs3 <- array.trans.obs4 <- array(NA,c(12,7,4)) + + for(p in 1:12){ + p.orig <- p + + for(l in 0:6){ + + load(file=paste0(work.dir,"/",fields.name,"_",my.period[p],"_leadmonth_",l,"_","ClusterNames",".RData")) # load cluster names and summarized data + + if(var.num != var.num.orig) var.num <- var.num.orig # ripristinate original values of this script (necessary after loading regime data) + if(fields.name != fields.name.orig) fields.name <- fields.name.orig # ripristinate original values of this script + if(p != p.orig) p <- p.orig + + array.spat.cor[p,1+l, 1] <- spat.cor1 # associates NAO+ to the first triangle (at left) + array.spat.cor[p,1+l, 3] <- spat.cor2 # associates NAO- to the third triangle (at right) + array.spat.cor[p,1+l, 4] <- spat.cor3 # associates Blocking to the fourth triangle (top one) + array.spat.cor[p,1+l, 2] <- spat.cor4 # associates Atl.Ridge to the second triangle (bottom one) + + array.cor[p,1+l, 1] <- sp.cor1 # associates NAO+ to the first triangle (at left) + array.cor[p,1+l, 3] <- sp.cor2 # associates NAO- to the third triangle (at right) + array.cor[p,1+l, 4] <- sp.cor3 # associates Blocking to the fourth triangle (top one) + array.cor[p,1+l, 2] <- sp.cor4 # associates Atl.Ridge to the second triangle (bottom one) + + array.freq[p,1+l, 1] <- 100*(mean(fre1.NA)) # NAO+ + array.freq[p,1+l, 3] <- 100*(mean(fre2.NA)) # NAO- + array.freq[p,1+l, 4] <- 100*(mean(fre3.NA)) # Blocking + array.freq[p,1+l, 2] <- 100*(mean(fre4.NA)) # Atl.Ridge + + array.freq.obs[p,1+l, 1] <- 100*(mean(freObs1)) # NAO+ + array.freq.obs[p,1+l, 3] <- 100*(mean(freObs2)) # NAO- + array.freq.obs[p,1+l, 4] <- 100*(mean(freObs3)) # Blocking + array.freq.obs[p,1+l, 2] <- 100*(mean(freObs4)) # Atl.Ridge + + array.diff.freq[p,1+l, 1] <- 100*(mean(fre1.NA) - mean(freObs1)) # NAO+ + array.diff.freq[p,1+l, 3] <- 100*(mean(fre2.NA) - mean(freObs2)) # NAO- + array.diff.freq[p,1+l, 4] <- 100*(mean(fre3.NA) - mean(freObs3)) # Blocking + array.diff.freq[p,1+l, 2] <- 100*(mean(fre4.NA) - mean(freObs4)) # Atl.Ridge + + array.pers[p,1+l, 1] <- persist1 # NAO+ + array.pers[p,1+l, 3] <- persist2 # NAO- + array.pers[p,1+l, 4] <- persist3 # Blocking + array.pers[p,1+l, 2] <- persist4 # Atl.Ridge + + array.pers.obs[p,1+l, 1] <- persistObs1 # NAO+ + array.pers.obs[p,1+l, 3] <- persistObs2 # NAO- + array.pers.obs[p,1+l, 4] <- persistObs3 # Blocking + array.pers.obs[p,1+l, 2] <- persistObs4 # Atl.Ridge + + array.diff.pers[p,1+l, 1] <- persist1 - persistObs1 # NAO+ + array.diff.pers[p,1+l, 3] <- persist2 - persistObs2 # NAO- + array.diff.pers[p,1+l, 4] <- persist3 - persistObs3 # Blocking + array.diff.pers[p,1+l, 2] <- persist4 - persistObs4 # Atl.Ridge + + array.sd.freq[p,1+l, 1] <- sd.freq.sim1 # NAO+ + array.sd.freq[p,1+l, 3] <- sd.freq.sim2 # NAO- + array.sd.freq[p,1+l, 4] <- sd.freq.sim3 # Blocking + array.sd.freq[p,1+l, 2] <- sd.freq.sim4 # Atl.Ridge + + array.diff.sd.freq[p,1+l, 1] <- sd.freq.sim1 / sd.freq.obs1 # NAO+ + array.diff.sd.freq[p,1+l, 3] <- sd.freq.sim2 / sd.freq.obs2 # NAO- + array.diff.sd.freq[p,1+l, 4] <- sd.freq.sim3 / sd.freq.obs3 # Blocking + array.diff.sd.freq[p,1+l, 2] <- sd.freq.sim4 / sd.freq.obs4 # Atl.Ridge + + array.imp[p,1+l, 1] <- cor.imp1 # NAO+ + array.imp[p,1+l, 3] <- cor.imp2 # NAO- + array.imp[p,1+l, 4] <- cor.imp3 # Blocking + array.imp[p,1+l, 2] <- cor.imp4 # Atl.Ridge + + array.trans.sim1[p,1+l, 1] <- NA # Transition from NAO+ to NAO+ + array.trans.sim1[p,1+l, 3] <- 100*transSim1[2] # Transition from NAO+ to NAO- + array.trans.sim1[p,1+l, 4] <- 100*transSim1[3] # Transition from NAO+ to blocking + array.trans.sim1[p,1+l, 2] <- 100*transSim1[4] # Transition from NAO+ to Atl.Ridge + + array.trans.sim2[p,1+l, 1] <- 100*transSim2[1] # Transition from NAO- to NAO+ + array.trans.sim2[p,1+l, 3] <- NA # Transition from NAO- to NAO- + array.trans.sim2[p,1+l, 4] <- 100*transSim2[3] # Transition from NAO- to blocking + array.trans.sim2[p,1+l, 2] <- 100*transSim2[4] # Transition from NAO- to Atl.Ridge + + array.trans.sim3[p,1+l, 1] <- 100*transSim3[1] # Transition from blocking to NAO+ + array.trans.sim3[p,1+l, 3] <- 100*transSim3[2] # Transition from blocking to NAO- + array.trans.sim3[p,1+l, 4] <- NA # Transition from blocking to blocking + array.trans.sim3[p,1+l, 2] <- 100*transSim3[4] # Transition from blocking to Atl.Ridge + + array.trans.sim4[p,1+l, 1] <- 100*transSim4[1] # Transition from Atl.ridge to NAO+ + array.trans.sim4[p,1+l, 3] <- 100*transSim4[2] # Transition from Atl.ridge to NAO- + array.trans.sim4[p,1+l, 4] <- 100*transSim4[3] # Transition from Atl.ridge to blocking + array.trans.sim4[p,1+l, 2] <- NA # Transition from Atl.ridge to Atl.Ridge + + array.trans.obs1[p,1+l, 1] <- NA # Transition from NAO+ to NAO+ + array.trans.obs1[p,1+l, 3] <- 100*transObs1[2] # Transition from NAO+ to NAO- + array.trans.obs1[p,1+l, 4] <- 100*transObs1[3] # Transition from NAO+ to blocking + array.trans.obs1[p,1+l, 2] <- 100*transObs1[4] # Transition from NAO+ to Atl.Ridge + + array.trans.obs2[p,1+l, 1] <- 100*transObs2[1] # Transition from NAO- to NAO+ + array.trans.obs2[p,1+l, 3] <- NA # Transition from NAO- to NAO- + array.trans.obs2[p,1+l, 4] <- 100*transObs2[3] # Transition from NAO- to blocking + array.trans.obs2[p,1+l, 2] <- 100*transObs2[4] # Transition from NAO- to Atl.Ridge + + array.trans.obs3[p,1+l, 1] <- 100*transObs3[1] # Transition from blocking to NAO+ + array.trans.obs3[p,1+l, 3] <- 100*transObs3[2] # Transition from blocking to NAO- + array.trans.obs3[p,1+l, 4] <- NA # Transition from blocking to blocking + array.trans.obs3[p,1+l, 2] <- 100*transObs3[4] # Transition from blocking to Atl.Ridge + + array.trans.obs4[p,1+l, 1] <- 100*transObs4[1] # Transition from Atl.ridge to NAO+ + array.trans.obs4[p,1+l, 3] <- 100*transObs4[2] # Transition from Atl.ridge to NAO- + array.trans.obs4[p,1+l, 4] <- 100*transObs4[3] # Transition from Atl.ridge to blocking + array.trans.obs4[p,1+l, 2] <- NA # Transition from Atl.ridge to Atl.Ridge + + array.trans.diff1[p,1+l, 1] <- NA # Transition from NAO+ to NAO+ + array.trans.diff1[p,1+l, 3] <- 100*(transSim1[2] - transObs1[2]) # Transition from NAO+ to NAO- + array.trans.diff1[p,1+l, 4] <- 100*(transSim1[3] - transObs1[3]) # Transition from NAO+ to blocking + array.trans.diff1[p,1+l, 2] <- 100*(transSim1[4] - transObs1[4]) # Transition from NAO+ to Atl.Ridge + + array.trans.diff2[p,1+l, 1] <- 100*(transSim2[1] - transObs2[1]) # Transition from NAO+ to NAO+ + array.trans.diff2[p,1+l, 3] <- NA + array.trans.diff2[p,1+l, 4] <- 100*(transSim2[3] - transObs2[3]) # Transition from NAO+ to blocking + array.trans.diff2[p,1+l, 2] <- 100*(transSim2[4] - transObs2[4]) # Transition from NAO+ to Atl.Ridge + + array.trans.diff3[p,1+l, 1] <- 100*(transSim3[1] - transObs3[1]) # Transition from NAO+ to NAO+ + array.trans.diff3[p,1+l, 3] <- 100*(transSim3[2] - transObs3[2]) # Transition from NAO+ to NAO- + array.trans.diff3[p,1+l, 4] <- NA + array.trans.diff3[p,1+l, 2] <- 100*(transSim3[4] - transObs3[4]) # Transition from NAO+ to Atl.Ridge + + array.trans.diff4[p,1+l, 1] <- 100*(transSim4[1] - transObs4[1]) # Transition from NAO+ to NAO+ + array.trans.diff4[p,1+l, 3] <- 100*(transSim4[2] - transObs4[2]) # Transition from NAO+ to NAO- + array.trans.diff4[p,1+l, 4] <- 100*(transSim4[3] - transObs4[3]) # Transition from NAO+ to blocking + array.trans.diff4[p,1+l, 2] <- NA + + #array.rpss[p,1+l, 1] <- # NAO+ + #array.rpss[p,1+l, 3] <- # NAO- + #array.rpss[p,1+l, 4] <- # Blocking + #array.rpss[p,1+l, 2] <- # Atl.Ridge + } + } + + + + # Spatial Corr summary: + png(filename=paste0(work.dir,"/",forecast.name,"_summary_spatial_correlations.png"), width=550, height=400) + + # create an array similar to array.cor but with colors instead of corr.values: + sp.corr.cols <- rev(c('#b2182b','#d6604d','#f4a582','#fddbc7','#d1e5f0','#92c5de','#4393c3','#2166ac')) + my.seq <- c(-1,0,0.4,0.5,0.6,0.7,0.8,0.9,1) + + array.spat.cor.colors <- array(sp.corr.cols[8],c(12,7,4)) + array.spat.cor.colors[array.spat.cor < my.seq[8]] <- sp.corr.cols[7] + array.spat.cor.colors[array.spat.cor < my.seq[7]] <- sp.corr.cols[6] + array.spat.cor.colors[array.spat.cor < my.seq[6]] <- sp.corr.cols[5] + array.spat.cor.colors[array.spat.cor < my.seq[5]] <- sp.corr.cols[4] + array.spat.cor.colors[array.spat.cor < my.seq[4]] <- sp.corr.cols[3] + array.spat.cor.colors[array.spat.cor < my.seq[3]] <- sp.corr.cols[2] + array.spat.cor.colors[array.spat.cor < my.seq[2]] <- sp.corr.cols[1] + + par(mar=c(5,4,4,4.5)) + plot(1,1, type="n", xaxt="n", yaxt="n", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + title("Spatial correlation between S4 and ERA-Interim regime anomalies", cex=1.5) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + + for(p in 1:12){ + for(l in 0:6){ + polygon(c(0.5+p-1, 0.5+p-1, 1+p-1), c(-0.5+l, 0.5+l, 0+l), col=array.spat.cor.colors[p,1+l,1]) # first triangle + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(-0.5+l, 0+l, -0.5+l), col=array.spat.cor.colors[p,1+l,2]) + polygon(c(1+p-1, 1.5+p-1, 1.5+p-1), c(l, 0.5+l, -0.5+l), col=array.spat.cor.colors[p,1+l,3]) + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(0.5+l, 0+l, 0.5+l), col=array.spat.cor.colors[p,1+l,4]) + } + } + + par(fig=c(0.875, 1, 0.14, 0.89), new=TRUE) + ColorBar2(brks = my.seq, cols = sp.corr.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + dev.off() + + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_spatial_correlations_startdate.png"), width=750, height=600) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext(text="Spatial correlation between S4 and ERA-Interim regime anomalies", cex=1.5) + + # NAO+ : + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.85, 0.98), new=TRUE) + mtext("NAO+", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.spat.cor.colors[p,1+l,1])}} + + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.42, 0.5), new=TRUE) + mtext("Blocking", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.spat.cor.colors[p,1+l,4])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.85, 0.98), new=TRUE) + mtext("NAO-", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.spat.cor.colors[p,1+l,3])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.42, 0.5), new=TRUE) + mtext("Atlantic Ridge", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.spat.cor.colors[p,1+l,2])}} + + par(fig=c(0.91, 1, 0.1, 0.9), new=TRUE) + ColorBar2(brks = my.seq, cols = sp.corr.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_spatial_correlations_target_month.png"), width=800, height=300) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext(text="Spatial correlation between S4 and ERA-Interim regime anomalies", cex=1.2) + + par(mar=c(0,0,4,0), fig=c(0.07, 0.22, 0.8, 0.98), new=TRUE) + mtext("NAO+", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0, 0.22, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.6, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.spat.cor.colors[i2,1+6-j,1])}} + + par(mar=c(0,0,4,0), fig=c(0.22, 0.44, 0.8, 0.98), new=TRUE) + mtext("NAO-", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.22, 0.44, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.6, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.spat.cor.colors[i2,1+6-j,3])}} + + par(mar=c(0,0,4,0), fig=c(0.44, 0.66, 0.8, 0.98), new=TRUE) + mtext("Blocking", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.44, 0.66, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.6, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.spat.cor.colors[i2,1+6-j,4])}} + + par(mar=c(0,0,4,0), fig=c(0.68, 0.88, 0.8, 0.98), new=TRUE) + mtext("Atlantic Ridge", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.66, 0.88, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.6, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.spat.cor.colors[i2,1+6-j,2])}} + + par(fig=c(0.91, 1, 0.1, 0.8), new=TRUE) + ColorBar2(brks = my.seq, cols = sp.corr.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + + + + + + + + # Temporal Corr summary: + png(filename=paste0(work.dir,"/",forecast.name,"_summary_temporal_correlations.png"), width=550, height=400) + + # create an array similar to array.cor but with colors instead of corr.values: + corr.cols <- rev(c('#b2182b','#d6604d','#f4a582','#fddbc7','#d1e5f0','#92c5de','#4393c3','#2166ac')) + my.seq <- c(-1,-0.75,-0.5,-0.25,0,0.25,0.5,0.75,1) + + array.cor.colors <- array(corr.cols[8],c(12,7,4)) + array.cor.colors[array.cor < 0.75] <- corr.cols[7] + array.cor.colors[array.cor < 0.5] <- corr.cols[6] + array.cor.colors[array.cor < 0.25] <- corr.cols[5] + array.cor.colors[array.cor < 0] <- corr.cols[4] + array.cor.colors[array.cor < -0.25] <- corr.cols[3] + array.cor.colors[array.cor < -0.5] <- corr.cols[2] + array.cor.colors[array.cor < -0.75] <- corr.cols[1] + + par(mar=c(5,4,4,4.5)) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Forecast time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + title("Correlation between S4 and ERA-Interim frequencies", cex=1.5) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + + for(p in 1:12){ + for(l in 0:6){ + polygon(c(0.5+p-1,0.5+p-1,1+p-1),c(-0.5+l,0.5+l,0+l), col=array.cor.colors[p,1+l,1]) # first triangle + polygon(c(0.5+p-1,1+p-1,1.5+p-1),c(-0.5+l,0+l,-0.5+l), col=array.cor.colors[p,1+l,2]) + polygon(c(1+p-1,1.5+p-1,1.5+p-1),c(l,0.5+l,-0.5+l), col=array.cor.colors[p,1+l,3]) + polygon(c(0.5+p-1,1+p-1,1.5+p-1),c(0.5+l,0+l,0.5+l), col=array.cor.colors[p,1+l,4]) + } + } + + par(fig=c(0.875, 1, 0.14, 0.89), new=TRUE) + ColorBar2(brks = seq(-1,1,0.25), cols = corr.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(seq(-1,1,0.25)), my.labels=seq(-1,1,0.25)) + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_temporal_correlations_startdate.png"), width=750, height=600) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext(text="Correlation between S4 and ERA-Interim frequencies", cex=1.5) + + # NAO+ : + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.85, 0.98), new=TRUE) + mtext("NAO+", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.cor.colors[p,1+l,1])}} + + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.42, 0.5), new=TRUE) + mtext("Blocking", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.cor.colors[p,1+l,4])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.85, 0.98), new=TRUE) + mtext("NAO-", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.cor.colors[p,1+l,3])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.42, 0.5), new=TRUE) + mtext("Atlantic Ridge", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.cor.colors[p,1+l,2])}} + + par(fig=c(0.91, 1, 0.1, 0.9), new=TRUE) + ColorBar2(brks = my.seq, cols = corr.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_temporal_correlations_target_month.png"), width=800, height=300) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext(text="Correlation between S4 and ERA-Interim frequencies", cex=1.2) + + par(mar=c(0,0,4,0), fig=c(0.07, 0.22, 0.8, 0.98), new=TRUE) + mtext("NAO+", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0, 0.22, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.6, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.cor.colors[i2,1+6-j,1])}} + + par(mar=c(0,0,4,0), fig=c(0.22, 0.44, 0.8, 0.98), new=TRUE) + mtext("NAO-", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.22, 0.44, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.6, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.cor.colors[i2,1+6-j,3])}} + + par(mar=c(0,0,4,0), fig=c(0.44, 0.66, 0.8, 0.98), new=TRUE) + mtext("Blocking", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.44, 0.66, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.6, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.cor.colors[i2,1+6-j,4])}} + + par(mar=c(0,0,4,0), fig=c(0.68, 0.88, 0.8, 0.98), new=TRUE) + mtext("Atlantic Ridge", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.66, 0.88, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.6, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.cor.colors[i2,1+6-j,2])}} + + par(fig=c(0.91, 1, 0.1, 0.8), new=TRUE) + ColorBar2(brks = my.seq, cols = corr.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + + + + + + # Frequency summary: + png(filename=paste0(work.dir,"/",forecast.name,"_summary_frequency.png"), width=550, height=400) + + # create an array similar to array.diff.freq but with colors instead of frequencies: + freq.cols <- rev(c('#d73027','#f46d43','#fdae61','#fee090','#e0f3f8','#abd9e9','#74add1','#4575b4')) + my.seq <- c(NA,20,22,24,26,28,30,32,NA) + + array.freq.colors <- array(freq.cols[8],c(12,7,4)) + array.freq.colors[array.freq < my.seq[[8]]] <- freq.cols[7] + array.freq.colors[array.freq < my.seq[[7]]] <- freq.cols[6] + array.freq.colors[array.freq < my.seq[[6]]] <- freq.cols[5] + array.freq.colors[array.freq < my.seq[[5]]] <- freq.cols[4] + array.freq.colors[array.freq < my.seq[[4]]] <- freq.cols[3] + array.freq.colors[array.freq < my.seq[[3]]] <- freq.cols[2] + array.freq.colors[array.freq < my.seq[[2]]] <- freq.cols[1] + + par(mar=c(5,4,4,4.5)) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Forecast time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + title("Mean S4 frequency (%)", cex=1.5) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + + for(p in 1:12){ + for(l in 0:6){ + polygon(c(0.5+p-1, 0.5+p-1, 1+p-1), c(-0.5+l, 0.5+l, 0+l), col=array.freq.colors[p,1+l,1]) # first triangle + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(-0.5+l, 0+l, -0.5+l), col=array.freq.colors[p,1+l,2]) + polygon(c(1+p-1, 1.5+p-1, 1.5+p-1), c(l, 0.5+l, -0.5+l), col=array.freq.colors[p,1+l,3]) + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(0.5+l, 0+l, 0.5+l), col=array.freq.colors[p,1+l,4]) + } + } + + par(fig=c(0.875, 1, 0.14, 0.89), new=TRUE) + ColorBar2(brks = my.seq, cols = freq.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_frequency_startdate.png"), width=750, height=600) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext(text="Mean S4 frequency (%)", cex=1.5) + + # NAO+ : + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.85, 0.98), new=TRUE) + mtext("NAO+", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.freq.colors[p,1+l,1])}} + + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.42, 0.5), new=TRUE) + mtext("Blocking", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.freq.colors[p,1+l,4])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.85, 0.98), new=TRUE) + mtext("NAO-", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.freq.colors[p,1+l,3])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.42, 0.5), new=TRUE) + mtext("Atlantic Ridge", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.freq.colors[p,1+l,2])}} + + par(fig=c(0.91, 1, 0.1, 0.9), new=TRUE) + ColorBar2(brks = my.seq, cols = freq.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_frequency_target_month.png"), width=800, height=300) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext(text="Mean S4 frequency (%)", cex=1.2) + + par(mar=c(0,0,4,0), fig=c(0.07, 0.22, 0.8, 0.98), new=TRUE) + mtext("NAO+", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0, 0.22, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.6, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.freq.colors[i2,1+6-j,1])}} + + par(mar=c(0,0,4,0), fig=c(0.22, 0.44, 0.8, 0.98), new=TRUE) + mtext("NAO-", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.22, 0.44, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.6, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.freq.colors[i2,1+6-j,3])}} + + par(mar=c(0,0,4,0), fig=c(0.44, 0.66, 0.8, 0.98), new=TRUE) + mtext("Blocking", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.44, 0.66, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.6, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.freq.colors[i2,1+6-j,4])}} + + par(mar=c(0,0,4,0), fig=c(0.68, 0.88, 0.8, 0.98), new=TRUE) + mtext("Atlantic Ridge", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.66, 0.88, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.6, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.freq.colors[i2,1+6-j,2])}} + + par(fig=c(0.91, 1, 0.1, 0.8), new=TRUE) + ColorBar2(brks = my.seq, cols = freq.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + par(fig=c(0.91, 1, 0.88, 0.93), new=TRUE) + mtext(side = 1, text = "%", line = 2, cex=1) + dev.off() + + + + + + + + + # Obs. frequency summary: + png(filename=paste0(work.dir,"/",forecast.name,"_summary_frequency_obs.png"), width=550, height=400) + + # create an array similar to array.diff.freq but with colors instead of frequencies: + freq.cols <- rev(c('#d73027','#f46d43','#fdae61','#fee090','#e0f3f8','#abd9e9','#74add1','#4575b4')) + my.seq <- c(NA,20,22,24,26,28,30,32,NA) + + array.freq.colors <- array(freq.cols[8],c(12,7,4)) + array.freq.colors[array.freq.obs < my.seq[[8]]] <- freq.cols[7] + array.freq.colors[array.freq.obs < my.seq[[7]]] <- freq.cols[6] + array.freq.colors[array.freq.obs < my.seq[[6]]] <- freq.cols[5] + array.freq.colors[array.freq.obs < my.seq[[5]]] <- freq.cols[4] + array.freq.colors[array.freq.obs < my.seq[[4]]] <- freq.cols[3] + array.freq.colors[array.freq.obs < my.seq[[3]]] <- freq.cols[2] + array.freq.colors[array.freq.obs < my.seq[[2]]] <- freq.cols[1] + + par(mar=c(5,4,4,4.5)) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Forecast time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + title("Mean observed frequency (%)", cex=1.5) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + + for(p in 1:12){ + for(l in 0:6){ + polygon(c(0.5+p-1, 0.5+p-1, 1+p-1), c(-0.5+l, 0.5+l, 0+l), col=array.freq.colors[p,1+l,1]) # first triangle + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(-0.5+l, 0+l, -0.5+l), col=array.freq.colors[p,1+l,2]) + polygon(c(1+p-1, 1.5+p-1, 1.5+p-1), c(l, 0.5+l, -0.5+l), col=array.freq.colors[p,1+l,3]) + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(0.5+l, 0+l, 0.5+l), col=array.freq.colors[p,1+l,4]) + } + } + + par(fig=c(0.875, 1, 0.14, 0.89), new=TRUE) + ColorBar2(brks = my.seq, cols = freq.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_frequency_obs_startdate.png"), width=750, height=600) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext(text="Mean observed frequency (%)", cex=1.5) + + # NAO+ : + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.85, 0.98), new=TRUE) + mtext("NAO+", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.freq.colors[p,1+l,1])}} + + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.42, 0.5), new=TRUE) + mtext("Blocking", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.freq.colors[p,1+l,4])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.85, 0.98), new=TRUE) + mtext("NAO-", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.freq.colors[p,1+l,3])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.42, 0.5), new=TRUE) + mtext("Atlantic Ridge", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.freq.colors[p,1+l,2])}} + + par(fig=c(0.91, 1, 0.1, 0.9), new=TRUE) + ColorBar2(brks = my.seq, cols = freq.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_frequency_obs_target_month.png"), width=800, height=300) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext(text="Mean observed frequency (%)", cex=1.2) + + par(mar=c(0,0,4,0), fig=c(0.07, 0.22, 0.8, 0.98), new=TRUE) + mtext("NAO+", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0, 0.22, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.6, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.freq.colors[i2,1+6-j,1])}} + + par(mar=c(0,0,4,0), fig=c(0.22, 0.44, 0.8, 0.98), new=TRUE) + mtext("NAO-", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.22, 0.44, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.6, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.freq.colors[i2,1+6-j,3])}} + + par(mar=c(0,0,4,0), fig=c(0.44, 0.66, 0.8, 0.98), new=TRUE) + mtext("Blocking", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.44, 0.66, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.6, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.freq.colors[i2,1+6-j,4])}} + + par(mar=c(0,0,4,0), fig=c(0.68, 0.88, 0.8, 0.98), new=TRUE) + mtext("Atlantic Ridge", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.66, 0.88, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.6, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.freq.colors[i2,1+6-j,2])}} + + par(fig=c(0.91, 1, 0.1, 0.8), new=TRUE) + ColorBar2(brks = my.seq, cols = freq.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + par(fig=c(0.91, 1, 0.88, 0.93), new=TRUE) + mtext(side = 1, text = "%", line = 2, cex=1) + dev.off() + + + + + + + + + + # Bias freq. summary: + png(filename=paste0(work.dir,"/",forecast.name,"_summary_bias_frequency.png"), width=550, height=400) + + # create an array similar to array.diff.freq but with colors instead of frequencies: + diff.freq.cols <- rev(c('#d73027','#f46d43','#fdae61','#fee090','#e0f3f8','#abd9e9','#74add1','#4575b4')) + my.seq <- c(-20,-10,-5,-2,0,2,5,10,20) + + array.diff.freq.colors <- array(diff.freq.cols[8],c(12,7,4)) + array.diff.freq.colors[array.diff.freq < my.seq[[8]]] <- diff.freq.cols[7] + array.diff.freq.colors[array.diff.freq 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.diff.freq.colors[i2,1+6-j,1])}} + + par(mar=c(0,0,4,0), fig=c(0.22, 0.44, 0.8, 0.98), new=TRUE) + mtext("NAO-", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.22, 0.44, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.6, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.diff.freq.colors[i2,1+6-j,3])}} + + par(mar=c(0,0,4,0), fig=c(0.44, 0.66, 0.8, 0.98), new=TRUE) + mtext("Blocking", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.44, 0.66, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.6, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.diff.freq.colors[i2,1+6-j,4])}} + + par(mar=c(0,0,4,0), fig=c(0.68, 0.88, 0.8, 0.98), new=TRUE) + mtext("Atlantic Ridge", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.66, 0.88, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.6, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.diff.freq.colors[i2,1+6-j,2])}} + + par(fig=c(0.91, 1, 0.1, 0.8), new=TRUE) + ColorBar2(brks = my.seq, cols = diff.freq.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + par(fig=c(0.91, 1, 0.88, 0.93), new=TRUE) + mtext(side = 1, text = "%", line = 2, cex=1) + dev.off() + + + + + + + + + # Simulated persistence summary: + png(filename=paste0(work.dir,"/",forecast.name,"_summary_persistence.png"), width=550, height=400) + + # create an array similar to array.pers but with colors instead of frequencies: + pers.cols <- rev(c('#b2182b','#d6604d','#f4a582','#fddbc7','#d1e5f0','#92c5de','#4393c3','#2166ac')) + my.seq <- c(NA, 3.25, 3.5, 3.75, 4, 4.25, 4.5, 4.75, NA) + + array.pers.colors <- array(pers.cols[8],c(12,7,4)) + array.pers.colors[array.pers < my.seq[8]] <- pers.cols[7] + array.pers.colors[array.pers < my.seq[7]] <- pers.cols[6] + array.pers.colors[array.pers < my.seq[6]] <- pers.cols[5] + array.pers.colors[array.pers < my.seq[5]] <- pers.cols[4] + array.pers.colors[array.pers < my.seq[4]] <- pers.cols[3] + array.pers.colors[array.pers < my.seq[3]] <- pers.cols[2] + array.pers.colors[array.pers < my.seq[2]] <- pers.cols[1] + + par(mar=c(5,4,4,4.5)) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Forecast time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + title("ECMWF-S4 Regime persistence (in days/month)", cex=1.5) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + + for(p in 1:12){ + for(l in 0:6){ + polygon(c(0.5+p-1,0.5+p-1,1+p-1),c(-0.5+l,0.5+l,0+l), col=array.pers.colors[p,1+l,1]) # first triangle + polygon(c(0.5+p-1,1+p-1,1.5+p-1),c(-0.5+l,0+l,-0.5+l), col=array.pers.colors[p,1+l,2]) + polygon(c(1+p-1,1.5+p-1,1.5+p-1),c(l,0.5+l,-0.5+l), col=array.pers.colors[p,1+l,3]) + polygon(c(0.5+p-1,1+p-1,1.5+p-1),c(0.5+l,0+l,0.5+l), col=array.pers.colors[p,1+l,4]) + } + } + + par(fig=c(0.875, 1, 0.14, 0.89), new=TRUE) + ColorBar2(brks = my.seq, cols = pers.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_persistence_startdate.png"), width=750, height=600) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("ECMWF-S4 Regime persistence (in days/month)", cex=1.5) + + # NAO+ : + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.85, 0.98), new=TRUE) + mtext("NAO+", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.pers.colors[p,1+l,1])}} + + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.42, 0.5), new=TRUE) + mtext("Blocking", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.pers.colors[p,1+l,4])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.85, 0.98), new=TRUE) + mtext("NAO-", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.pers.colors[p,1+l,3])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.42, 0.5), new=TRUE) + mtext("Atlantic Ridge", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.pers.colors[p,1+l,2])}} + + par(fig=c(0.91, 1, 0.1, 0.9), new=TRUE) + ColorBar2(brks = my.seq, cols = pers.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_persistence_target_month.png"), width=800, height=300) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("ECMWF-S4 Regime persistence (in days/month)", cex=1.2) + + par(mar=c(0,0,4,0), fig=c(0.07, 0.22, 0.8, 0.98), new=TRUE) + mtext("NAO+", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0, 0.22, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.6, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.pers.colors[i2,1+6-j,1])}} + + par(mar=c(0,0,4,0), fig=c(0.22, 0.44, 0.8, 0.98), new=TRUE) + mtext("NAO-", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.22, 0.44, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.6, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.pers.colors[i2,1+6-j,3])}} + + par(mar=c(0,0,4,0), fig=c(0.44, 0.66, 0.8, 0.98), new=TRUE) + mtext("Blocking", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.44, 0.66, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.6, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.pers.colors[i2,1+6-j,4])}} + + par(mar=c(0,0,4,0), fig=c(0.68, 0.88, 0.8, 0.98), new=TRUE) + mtext("Atlantic Ridge", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.66, 0.88, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.6, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.pers.colors[i2,1+6-j,2])}} + + par(fig=c(0.91, 1, 0.1, 0.8), new=TRUE) + ColorBar2(brks = my.seq, cols = pers.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + par(fig=c(0.9, 1, 0.88, 0.93), new=TRUE) + mtext(side = 1, text = "days/month", line = 2, cex=1) + dev.off() + + + + + + + + # Observed persistence summary: + png(filename=paste0(work.dir,"/",forecast.name,"_summary_persistence_obs.png"), width=550, height=400) + + # create an array similar to array.pers but with colors instead of frequencies: + pers.cols <- rev(c('#b2182b','#d6604d','#f4a582','#fddbc7','#d1e5f0','#92c5de','#4393c3','#2166ac')) + my.seq <- c(NA, 3.25, 3.5, 3.75, 4, 4.25, 4.5, 4.75, NA) + + array.pers.colors <- array(pers.cols[8],c(12,7,4)) + array.pers.colors[array.pers.obs < my.seq[8]] <- pers.cols[7] + array.pers.colors[array.pers.obs < my.seq[7]] <- pers.cols[6] + array.pers.colors[array.pers.obs < my.seq[6]] <- pers.cols[5] + array.pers.colors[array.pers.obs < my.seq[5]] <- pers.cols[4] + array.pers.colors[array.pers.obs < my.seq[4]] <- pers.cols[3] + array.pers.colors[array.pers.obs < my.seq[3]] <- pers.cols[2] + array.pers.colors[array.pers.obs < my.seq[2]] <- pers.cols[1] + + par(mar=c(5,4,4,4.5)) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Forecast time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + title("Observed regime persistence (in days/month)", cex=1.5) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + + for(p in 1:12){ + for(l in 0:6){ + polygon(c(0.5+p-1,0.5+p-1,1+p-1),c(-0.5+l,0.5+l,0+l), col=array.pers.colors[p,1+l,1]) # first triangle + polygon(c(0.5+p-1,1+p-1,1.5+p-1),c(-0.5+l,0+l,-0.5+l), col=array.pers.colors[p,1+l,2]) + polygon(c(1+p-1,1.5+p-1,1.5+p-1),c(l,0.5+l,-0.5+l), col=array.pers.colors[p,1+l,3]) + polygon(c(0.5+p-1,1+p-1,1.5+p-1),c(0.5+l,0+l,0.5+l), col=array.pers.colors[p,1+l,4]) + } + } + + par(fig=c(0.875, 1, 0.14, 0.89), new=TRUE) + ColorBar2(brks = my.seq, cols = pers.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_persistence_obs_startdate.png"), width=750, height=600) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("Observed Regime persistence (in days/month)", cex=1.5) + + # NAO+ : + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.85, 0.98), new=TRUE) + mtext("NAO+", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.pers.colors[p,1+l,1])}} + + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.42, 0.5), new=TRUE) + mtext("Blocking", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.pers.colors[p,1+l,4])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.85, 0.98), new=TRUE) + mtext("NAO-", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.pers.colors[p,1+l,3])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.42, 0.5), new=TRUE) + mtext("Atlantic Ridge", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.pers.colors[p,1+l,2])}} + + par(fig=c(0.91, 1, 0.1, 0.9), new=TRUE) + ColorBar2(brks = my.seq, cols = pers.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_persistence_obs_target_month.png"), width=800, height=300) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("Observed regime persistence (in days/month)", cex=1.2) + + par(mar=c(0,0,4,0), fig=c(0.07, 0.22, 0.8, 0.98), new=TRUE) + mtext("NAO+", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0, 0.22, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.6, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.pers.colors[i2,1+6-j,1])}} + + par(mar=c(0,0,4,0), fig=c(0.22, 0.44, 0.8, 0.98), new=TRUE) + mtext("NAO-", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.22, 0.44, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.6, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.pers.colors[i2,1+6-j,3])}} + + par(mar=c(0,0,4,0), fig=c(0.44, 0.66, 0.8, 0.98), new=TRUE) + mtext("Blocking", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.44, 0.66, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.6, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.pers.colors[i2,1+6-j,4])}} + + par(mar=c(0,0,4,0), fig=c(0.68, 0.88, 0.8, 0.98), new=TRUE) + mtext("Atlantic Ridge", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.66, 0.88, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.6, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.pers.colors[i2,1+6-j,2])}} + + par(fig=c(0.91, 1, 0.1, 0.8), new=TRUE) + ColorBar2(brks = my.seq, cols = pers.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + par(fig=c(0.9, 1, 0.88, 0.93), new=TRUE) + mtext(side = 1, text = "days/month", line = 2, cex=1) + dev.off() + + + + + + + + + + + + + # Bias persistence summary: + png(filename=paste0(work.dir,"/",forecast.name,"_summary_bias_persistence.png"), width=550, height=400) + + # create an array similar to array.pers but with colors instead of frequencies: + diff.pers.cols <- rev(c('#b2182b','#d6604d','#f4a582','#fddbc7','#d1e5f0','#92c5de','#4393c3','#2166ac')) + my.seq <- c(NA, -1.5, -1, -0.5, 0, 0.5, 1, 1.5, NA) + + array.diff.pers.colors <- array(diff.pers.cols[8],c(12,7,4)) + array.diff.pers.colors[array.diff.pers < my.seq[8]] <- diff.pers.cols[7] + array.diff.pers.colors[array.diff.pers < my.seq[7]] <- diff.pers.cols[6] + array.diff.pers.colors[array.diff.pers < my.seq[6]] <- diff.pers.cols[5] + array.diff.pers.colors[array.diff.pers < my.seq[5]] <- diff.pers.cols[4] + array.diff.pers.colors[array.diff.pers < my.seq[4]] <- diff.pers.cols[3] + array.diff.pers.colors[array.diff.pers < my.seq[3]] <- diff.pers.cols[2] + array.diff.pers.colors[array.diff.pers < my.seq[2]] <- diff.pers.cols[1] + + par(mar=c(5,4,4,4.5)) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Forecast time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + title("Diff. between S4 and ERA-Interim regime persistence (in days/month)", cex=1.5) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + + for(p in 1:12){ + for(l in 0:6){ + polygon(c(0.5+p-1,0.5+p-1,1+p-1),c(-0.5+l,0.5+l,0+l), col=array.diff.pers.colors[p,1+l,1]) # first triangle + polygon(c(0.5+p-1,1+p-1,1.5+p-1),c(-0.5+l,0+l,-0.5+l), col=array.diff.pers.colors[p,1+l,2]) + polygon(c(1+p-1,1.5+p-1,1.5+p-1),c(l,0.5+l,-0.5+l), col=array.diff.pers.colors[p,1+l,3]) + polygon(c(0.5+p-1,1+p-1,1.5+p-1),c(0.5+l,0+l,0.5+l), col=array.diff.pers.colors[p,1+l,4]) + } + } + + par(fig=c(0.875, 1, 0.14, 0.89), new=TRUE) + ColorBar2(brks = my.seq, cols = diff.pers.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_bias_persistence_startdate.png"), width=750, height=600) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("Diff. between S4 and ERA-Interim regime persistence (in days/month)", cex=1.5) + + # NAO+ : + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.85, 0.98), new=TRUE) + mtext("NAO+", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.diff.pers.colors[p,1+l,1])}} + + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.42, 0.5), new=TRUE) + mtext("Blocking", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.diff.pers.colors[p,1+l,4])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.85, 0.98), new=TRUE) + mtext("NAO-", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.diff.pers.colors[p,1+l,3])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.42, 0.5), new=TRUE) + mtext("Atlantic Ridge", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.diff.pers.colors[p,1+l,2])}} + + par(fig=c(0.91, 1, 0.1, 0.9), new=TRUE) + ColorBar2(brks = my.seq, cols = diff.pers.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_bias_persistence_target_month.png"), width=800, height=300) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("Diff. between S4 and ERA-Interim regime persistence (in days/month)", cex=1.2) + + par(mar=c(0,0,4,0), fig=c(0.07, 0.22, 0.8, 0.98), new=TRUE) + mtext("NAO+", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0, 0.22, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.6, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.diff.pers.colors[i2,1+6-j,1])}} + + par(mar=c(0,0,4,0), fig=c(0.22, 0.44, 0.8, 0.98), new=TRUE) + mtext("NAO-", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.22, 0.44, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.6, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.diff.pers.colors[i2,1+6-j,3])}} + + par(mar=c(0,0,4,0), fig=c(0.44, 0.66, 0.8, 0.98), new=TRUE) + mtext("Blocking", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.44, 0.66, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.6, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.diff.pers.colors[i2,1+6-j,4])}} + + par(mar=c(0,0,4,0), fig=c(0.68, 0.88, 0.8, 0.98), new=TRUE) + mtext("Atlantic Ridge", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.66, 0.88, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.6, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.diff.pers.colors[i2,1+6-j,2])}} + + par(fig=c(0.91, 1, 0.1, 0.8), new=TRUE) + ColorBar2(brks = my.seq, cols = diff.pers.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + par(fig=c(0.9, 1, 0.88, 0.93), new=TRUE) + mtext(side = 1, text = "days/month", line = 2, cex=1) + dev.off() + + + + + + + + + + # St.Dev.freq ratio summary: + png(filename=paste0(work.dir,"/",forecast.name,"_summary_ratio_sd_freq.png"), width=550, height=400) + + # create an array similar to array.cor but with colors instead of corr.values: + diff.sd.freq.cols <- rev(c('#b2182b','#d6604d','#f4a582','#fddbc7','#d1e5f0','#92c5de','#4393c3','#2166ac')) + my.seq <- c(0,0.7,0.8,0.9,1.0,1.1,1.2,1.3,2) + + array.diff.sd.freq.colors <- array(diff.sd.freq.cols[8],c(12,7,4)) + array.diff.sd.freq.colors[array.diff.sd.freq < my.seq[8]] <- diff.sd.freq.cols[7] + array.diff.sd.freq.colors[array.diff.sd.freq < my.seq[7]] <- diff.sd.freq.cols[6] + array.diff.sd.freq.colors[array.diff.sd.freq < my.seq[6]] <- diff.sd.freq.cols[5] + array.diff.sd.freq.colors[array.diff.sd.freq < my.seq[5]] <- diff.sd.freq.cols[4] + array.diff.sd.freq.colors[array.diff.sd.freq < my.seq[4]] <- diff.sd.freq.cols[3] + array.diff.sd.freq.colors[array.diff.sd.freq < my.seq[3]] <- diff.sd.freq.cols[2] + array.diff.sd.freq.colors[array.diff.sd.freq < my.seq[2]] <- diff.sd.freq.cols[1] + + par(mar=c(5,4,4,4.5)) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Forecast time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + title("St.Dev. ratio between sim. and obs. average frequencies", cex=1.5) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + + for(p in 1:12){ + for(l in 0:6){ + polygon(c(0.5+p-1, 0.5+p-1, 1+p-1), c(-0.5+l, 0.5+l, 0+l), col=array.diff.sd.freq.colors[p,1+l,1]) # first triangle + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(-0.5+l, 0+l, -0.5+l), col=array.diff.sd.freq.colors[p,1+l,2]) + polygon(c(1+p-1, 1.5+p-1, 1.5+p-1), c(l, 0.5+l, -0.5+l), col=array.diff.sd.freq.colors[p,1+l,3]) + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(0.5+l, 0+l, 0.5+l), col=array.diff.sd.freq.colors[p,1+l,4]) + } + } + + par(fig=c(0.875, 1, 0.14, 0.89), new=TRUE) + ColorBar2(brks = my.seq, cols = diff.sd.freq.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + dev.off() + + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_ratio_sd_frequency_startdate.png"), width=750, height=600) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("St.Dev. ratio between sim. and obs. average frequencies", cex=1.5) + + # NAO+ : + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.85, 0.98), new=TRUE) + mtext("NAO+", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.diff.sd.freq.colors[p,1+l,1])}} + + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.42, 0.5), new=TRUE) + mtext("Blocking", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.diff.sd.freq.colors[p,1+l,4])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.85, 0.98), new=TRUE) + mtext("NAO-", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.diff.sd.freq.colors[p,1+l,3])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.42, 0.5), new=TRUE) + mtext("Atlantic Ridge", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.diff.sd.freq.colors[p,1+l,2])}} + + par(fig=c(0.91, 1, 0.1, 0.9), new=TRUE) + ColorBar2(brks = my.seq, cols = diff.sd.freq.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_ratio_sd_frequency_target_month.png"), width=800, height=300) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("St.Dev. ratio between sim. and obs. average frequencies", cex=1.2) + + par(mar=c(0,0,4,0), fig=c(0.07, 0.22, 0.8, 0.98), new=TRUE) + mtext("NAO+", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0, 0.22, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.6, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.diff.sd.freq.colors[i2,1+6-j,1])}} + + par(mar=c(0,0,4,0), fig=c(0.22, 0.44, 0.8, 0.98), new=TRUE) + mtext("NAO-", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.22, 0.44, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.6, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.diff.sd.freq.colors[i2,1+6-j,3])}} + + par(mar=c(0,0,4,0), fig=c(0.44, 0.66, 0.8, 0.98), new=TRUE) + mtext("Blocking", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.44, 0.66, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.6, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.diff.sd.freq.colors[i2,1+6-j,4])}} + + par(mar=c(0,0,4,0), fig=c(0.68, 0.88, 0.8, 0.98), new=TRUE) + mtext("Atlantic Ridge", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.66, 0.88, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.6, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.diff.sd.freq.colors[i2,1+6-j,2])}} + + par(fig=c(0.91, 1, 0.1, 0.8), new=TRUE) + ColorBar2(brks = my.seq, cols = diff.sd.freq.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + + + + + + + + + + + + + # Impact summary: + png(filename=paste0(work.dir,"/",forecast.name,"_summary_impact_",var.name[var.num],".png"), width=550, height=400) + + # create an array similar to array.pers but with colors instead of frequencies: + imp.cols <- rev(c('#b2182b','#d6604d','#f4a582','#fddbc7','#d1e5f0','#92c5de','#4393c3','#2166ac')) + my.seq <- c(-1,0,0.4,0.5,0.6,0.7,0.8,0.9,1) + + array.imp.colors <- array(imp.cols[8],c(12,7,4)) + array.imp.colors[array.imp < my.seq[8]] <- imp.cols[7] + array.imp.colors[array.imp < my.seq[7]] <- imp.cols[6] + array.imp.colors[array.imp < my.seq[6]] <- imp.cols[5] + array.imp.colors[array.imp < my.seq[5]] <- imp.cols[4] + array.imp.colors[array.imp < my.seq[4]] <- imp.cols[3] + array.imp.colors[array.imp < my.seq[3]] <- imp.cols[2] + array.imp.colors[array.imp < my.seq[2]] <- imp.cols[1] + + par(mar=c(5,4,4,4.5)) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Forecast time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + title(paste0("S4 spatial correlation of impact on ",var.name[var.num]), cex=1.5) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + + for(p in 1:12){ + for(l in 0:6){ + polygon(c(0.5+p-1,0.5+p-1,1+p-1),c(-0.5+l,0.5+l,0+l), col=array.imp.colors[p,1+l,1]) # first triangle + polygon(c(0.5+p-1,1+p-1,1.5+p-1),c(-0.5+l,0+l,-0.5+l), col=array.imp.colors[p,1+l,2]) + polygon(c(1+p-1,1.5+p-1,1.5+p-1),c(l,0.5+l,-0.5+l), col=array.imp.colors[p,1+l,3]) + polygon(c(0.5+p-1,1+p-1,1.5+p-1),c(0.5+l,0+l,0.5+l), col=array.imp.colors[p,1+l,4]) + } + } + + par(fig=c(0.875, 1, 0.14, 0.89), new=TRUE) + ColorBar2(brks = my.seq, cols = imp.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_impact_",var.name[var.num],"_startdate.png"), width=750, height=600) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("S4 spatial correlation of impact on ",var.name[var.num], cex=1.5) + + # NAO+ : + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.85, 0.98), new=TRUE) + mtext("NAO+", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.imp.colors[p,1+l,1])}} + + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.42, 0.5), new=TRUE) + mtext("Blocking", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.imp.colors[p,1+l,4])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.85, 0.98), new=TRUE) + mtext("NAO-", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.imp.colors[p,1+l,3])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.42, 0.5), new=TRUE) + mtext("Atlantic Ridge", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.imp.colors[p,1+l,2])}} + + par(fig=c(0.91, 1, 0.1, 0.9), new=TRUE) + ColorBar2(brks = my.seq, cols = imp.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_impact_",var.name[var.num],"_target_month.png"), width=800, height=300) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("S4 spatial correlation of impact on ",var.name[var.num], cex=1.2) + + par(mar=c(0,0,4,0), fig=c(0.07, 0.22, 0.8, 0.98), new=TRUE) + mtext("NAO+", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0, 0.22, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.6, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.imp.colors[i2,1+6-j,1])}} + + par(mar=c(0,0,4,0), fig=c(0.22, 0.44, 0.8, 0.98), new=TRUE) + mtext("NAO-", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.22, 0.44, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.6, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.imp.colors[i2,1+6-j,3])}} + + par(mar=c(0,0,4,0), fig=c(0.44, 0.66, 0.8, 0.98), new=TRUE) + mtext("Blocking", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.44, 0.66, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.6, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.imp.colors[i2,1+6-j,4])}} + + par(mar=c(0,0,4,0), fig=c(0.68, 0.88, 0.8, 0.98), new=TRUE) + mtext("Atlantic Ridge", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.66, 0.88, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.6, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.imp.colors[i2,1+6-j,2])}} + + par(fig=c(0.91, 1, 0.1, 0.8), new=TRUE) + ColorBar2(brks = my.seq, cols = imp.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + + + + + + + # Simulated Transition probability from NAO+ to X summary: + png(filename=paste0(work.dir,"/",forecast.name,"_summary_transition_prob_NAO+.png"), width=550, height=400) + + # create an array similar to array.cor but with colors instead of corr.values: + trans.cols <- rev(c('#b2182b','#d6604d','#f4a582','#fddbc7','#d1e5f0','#92c5de','#4393c3','#2166ac')) + my.seq <- c(0,10,20,30,40,50,60,70,100) + + array.trans.sim1.colors <- array(trans.cols[8],c(12,7,4)) + array.trans.sim1.colors[array.trans.sim1 < my.seq[8]] <- trans.cols[7] + array.trans.sim1.colors[array.trans.sim1 < my.seq[7]] <- trans.cols[6] + array.trans.sim1.colors[array.trans.sim1 < my.seq[6]] <- trans.cols[5] + array.trans.sim1.colors[array.trans.sim1 < my.seq[5]] <- trans.cols[4] + array.trans.sim1.colors[array.trans.sim1 < my.seq[4]] <- trans.cols[3] + array.trans.sim1.colors[array.trans.sim1 < my.seq[3]] <- trans.cols[2] + array.trans.sim1.colors[array.trans.sim1 < my.seq[2]] <- trans.cols[1] + array.trans.sim1.colors[is.na(array.trans.sim1)] <- "white" + + par(mar=c(5,4,4,4.5)) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Forecast time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + title("% ECMWF-S4 transition from NAO+ regime", cex=1.5) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + + for(p in 1:12){ + for(l in 0:6){ + polygon(c(0.5+p-1, 0.5+p-1, 1+p-1), c(-0.5+l, 0.5+l, 0+l), col=array.trans.sim1.colors[p,1+l,1]) # first triangle + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(-0.5+l, 0+l, -0.5+l), col=array.trans.sim1.colors[p,1+l,2]) + polygon(c(1+p-1, 1.5+p-1, 1.5+p-1), c(l, 0.5+l, -0.5+l), col=array.trans.sim1.colors[p,1+l,3]) + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(0.5+l, 0+l, 0.5+l), col=array.trans.sim1.colors[p,1+l,4]) + } + } + + par(fig=c(0.875, 1, 0.14, 0.89), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_transition_prob_NAO+_startdate.png"), width=750, height=600) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("% Transition from NAO+ regime", cex=1.5) + mtext("ECMWF-S4 / NAO+ Transition Probability \nJanuary to December / 1994-2013", cex=1.5) + + # NAO+ : + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.85, 0.98), new=TRUE) + mtext("NAO+", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.sim1.colors[p,1+l,1])}} + + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.42, 0.5), new=TRUE) + mtext("Blocking", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.sim1.colors[p,1+l,4])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.85, 0.98), new=TRUE) + mtext("NAO-", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.sim1.colors[p,1+l,3])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.42, 0.5), new=TRUE) + mtext("Atlantic Ridge", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.sim1.colors[p,1+l,2])}} + + par(fig=c(0.91, 1, 0.1, 0.9), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_transition_prob_NAO+_target_month.png"), width=750, height=350) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 0.95), new=TRUE) + mtext("% Transition from NAO+ regime", cex=1.2) + + par(mar=c(0,0,4,0), fig=c(0.10, 0.28, 0.8, 0.95), new=TRUE) + mtext("NAO-", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0, 0.28, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.8, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.sim1.colors[i2,1+6-j,3])}} + + par(mar=c(0,0,4,0), fig=c(0.30, 0.58, 0.8, 0.95), new=TRUE) + mtext("Blocking", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.30, 0.58, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.8, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.sim1.colors[i2,1+6-j,4])}} + + par(mar=c(0,0,4,0), fig=c(0.60, 0.88, 0.8, 0.95), new=TRUE) + mtext("Atlantic Ridge", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.60, 0.88, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.8, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.sim1.colors[i2,1+6-j,2])}} + + par(fig=c(0.91, 1, 0.1, 0.8), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + par(fig=c(0.91, 1, 0.88, 0.93), new=TRUE) + mtext(side = 1, text = "%", line = 2, cex=1) + + dev.off() + + + + + + + # Observed Transition probability from NAO+ to X summary: + png(filename=paste0(work.dir,"/",forecast.name,"_summary_transition_prob_NAO+_obs.png"), width=550, height=400) + + # create an array similar to array.cor but with colors instead of corr.values: + trans.cols <- rev(c('#b2182b','#d6604d','#f4a582','#fddbc7','#d1e5f0','#92c5de','#4393c3','#2166ac')) + my.seq <- c(0,10,20,30,40,50,60,70,100) + + array.trans.obs1.colors <- array(trans.cols[8],c(12,7,4)) + array.trans.obs1.colors[array.trans.obs1 < my.seq[8]] <- trans.cols[7] + array.trans.obs1.colors[array.trans.obs1 < my.seq[7]] <- trans.cols[6] + array.trans.obs1.colors[array.trans.obs1 < my.seq[6]] <- trans.cols[5] + array.trans.obs1.colors[array.trans.obs1 < my.seq[5]] <- trans.cols[4] + array.trans.obs1.colors[array.trans.obs1 < my.seq[4]] <- trans.cols[3] + array.trans.obs1.colors[array.trans.obs1 < my.seq[3]] <- trans.cols[2] + array.trans.obs1.colors[array.trans.obs1 < my.seq[2]] <- trans.cols[1] + array.trans.obs1.colors[is.na(array.trans.obs1)] <- "white" + + par(mar=c(5,4,4,4.5)) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Forecast time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + title("% Observed transition from NAO+ regime", cex=1.5) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + + for(p in 1:12){ + for(l in 0:6){ + polygon(c(0.5+p-1, 0.5+p-1, 1+p-1), c(-0.5+l, 0.5+l, 0+l), col=array.trans.obs1.colors[p,1+l,1]) # first triangle + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(-0.5+l, 0+l, -0.5+l), col=array.trans.obs1.colors[p,1+l,2]) + polygon(c(1+p-1, 1.5+p-1, 1.5+p-1), c(l, 0.5+l, -0.5+l), col=array.trans.obs1.colors[p,1+l,3]) + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(0.5+l, 0+l, 0.5+l), col=array.trans.obs1.colors[p,1+l,4]) + } + } + + par(fig=c(0.875, 1, 0.14, 0.89), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_transition_prob_NAO+_obs_startdate.png"), width=750, height=600) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("% Observed transition from NAO+ regime", cex=1.5) + + # NAO+ : + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.85, 0.98), new=TRUE) + mtext("NAO+", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.obs1.colors[p,1+l,1])}} + + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.42, 0.5), new=TRUE) + mtext("Blocking", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.obs1.colors[p,1+l,4])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.85, 0.98), new=TRUE) + mtext("NAO-", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.obs1.colors[p,1+l,3])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.42, 0.5), new=TRUE) + mtext("Atlantic Ridge", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.obs1.colors[p,1+l,2])}} + + par(fig=c(0.91, 1, 0.1, 0.9), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_transition_prob_NAO+_obs_target_month.png"), width=750, height=350) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 0.95), new=TRUE) + mtext("% Observed transition from NAO+ regime", cex=1.2) + + par(mar=c(0,0,4,0), fig=c(0.10, 0.28, 0.8, 0.95), new=TRUE) + mtext("NAO-", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0, 0.28, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.8, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.obs1.colors[i2,1+6-j,3])}} + + par(mar=c(0,0,4,0), fig=c(0.30, 0.58, 0.8, 0.95), new=TRUE) + mtext("Blocking", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.30, 0.58, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.8, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.obs1.colors[i2,1+6-j,4])}} + + par(mar=c(0,0,4,0), fig=c(0.60, 0.88, 0.8, 0.95), new=TRUE) + mtext("Atlantic Ridge", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.60, 0.88, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.8, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.obs1.colors[i2,1+6-j,2])}} + + par(fig=c(0.91, 1, 0.1, 0.8), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + par(fig=c(0.91, 1, 0.88, 0.93), new=TRUE) + mtext(side = 1, text = "%", line = 2, cex=1) + + dev.off() + + + + + + + + + + + + # Simulated transition probability from NAO- to X summary: + png(filename=paste0(work.dir,"/",forecast.name,"_summary_transition_prob_NAO-.png"), width=550, height=400) + + # create an array similar to array.cor but with colors instead of corr.values: + trans.cols <- rev(c('#b2182b','#d6604d','#f4a582','#fddbc7','#d1e5f0','#92c5de','#4393c3','#2166ac')) + my.seq <- c(0,10,20,30,40,50,60,70,100) + + array.trans.sim2.colors <- array(trans.cols[8],c(12,7,4)) + array.trans.sim2.colors[array.trans.sim2 < my.seq[8]] <- trans.cols[7] + array.trans.sim2.colors[array.trans.sim2 < my.seq[7]] <- trans.cols[6] + array.trans.sim2.colors[array.trans.sim2 < my.seq[6]] <- trans.cols[5] + array.trans.sim2.colors[array.trans.sim2 < my.seq[5]] <- trans.cols[4] + array.trans.sim2.colors[array.trans.sim2 < my.seq[4]] <- trans.cols[3] + array.trans.sim2.colors[array.trans.sim2 < my.seq[3]] <- trans.cols[2] + array.trans.sim2.colors[array.trans.sim2 < my.seq[2]] <- trans.cols[1] + array.trans.sim2.colors[is.na(array.trans.sim2)] <- "white" + + par(mar=c(5,4,4,4.5)) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Forecast time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + title("% ECMWF-S4 transition from NAO- regime ", cex=1.5) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + + for(p in 1:12){ + for(l in 0:6){ + polygon(c(0.5+p-1, 0.5+p-1, 1+p-1), c(-0.5+l, 0.5+l, 0+l), col=array.trans.sim2.colors[p,1+l,1]) # first triangle + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(-0.5+l, 0+l, -0.5+l), col=array.trans.sim2.colors[p,1+l,2]) + polygon(c(1+p-1, 1.5+p-1, 1.5+p-1), c(l, 0.5+l, -0.5+l), col=array.trans.sim2.colors[p,1+l,3]) + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(0.5+l, 0+l, 0.5+l), col=array.trans.sim2.colors[p,1+l,4]) + } + } + + par(fig=c(0.875, 1, 0.14, 0.89), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_transition_prob_NAO-_startdate.png"), width=750, height=600) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("% ECMWF-S4 transition from NAO- regime", cex=1.5) + + # NAO+ : + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.85, 0.98), new=TRUE) + mtext("NAO+", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.sim2.colors[p,1+l,1])}} + + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.42, 0.5), new=TRUE) + mtext("Blocking", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.sim2.colors[p,1+l,4])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.85, 0.98), new=TRUE) + mtext("NAO-", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.sim2.colors[p,1+l,3])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.42, 0.5), new=TRUE) + mtext("Atlantic Ridge", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.sim2.colors[p,1+l,2])}} + + par(fig=c(0.91, 1, 0.1, 0.9), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_transition_prob_NAO-_target_month.png"), width=750, height=350) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 0.95), new=TRUE) + mtext("% ECMWF-S4 transition from NAO- regime", cex=1.2) + + par(mar=c(0,0,4,0), fig=c(0.1, 0.28, 0.8, 0.95), new=TRUE) + mtext("NAO+", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0, 0.28, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.8, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.sim2.colors[i2,1+6-j,1])}} + + par(mar=c(0,0,4,0), fig=c(0.30, 0.58, 0.8, 0.95), new=TRUE) + mtext("Blocking", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.30, 0.58, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.8, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.sim2.colors[i2,1+6-j,4])}} + + par(mar=c(0,0,4,0), fig=c(0.60, 0.88, 0.8, 0.95), new=TRUE) + mtext("Atlantic Ridge", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.60, 0.88, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.8, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.sim2.colors[i2,1+6-j,2])}} + + par(fig=c(0.91, 1, 0.1, 0.8), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + par(fig=c(0.91, 1, 0.88, 0.93), new=TRUE) + mtext(side = 1, text = "%", line = 2, cex=1) + + dev.off() + + + + + + + # Observed transition probability from NAO- to X summary: + png(filename=paste0(work.dir,"/",forecast.name,"_summary_transition_prob_NAO-_obs.png"), width=550, height=400) + + # create an array similar to array.cor but with colors instead of corr.values: + trans.cols <- rev(c('#b2182b','#d6604d','#f4a582','#fddbc7','#d1e5f0','#92c5de','#4393c3','#2166ac')) + my.seq <- c(0,10,20,30,40,50,60,70,100) + + array.trans.obs2.colors <- array(trans.cols[8],c(12,7,4)) + array.trans.obs2.colors[array.trans.obs2 < my.seq[8]] <- trans.cols[7] + array.trans.obs2.colors[array.trans.obs2 < my.seq[7]] <- trans.cols[6] + array.trans.obs2.colors[array.trans.obs2 < my.seq[6]] <- trans.cols[5] + array.trans.obs2.colors[array.trans.obs2 < my.seq[5]] <- trans.cols[4] + array.trans.obs2.colors[array.trans.obs2 < my.seq[4]] <- trans.cols[3] + array.trans.obs2.colors[array.trans.obs2 < my.seq[3]] <- trans.cols[2] + array.trans.obs2.colors[array.trans.obs2 < my.seq[2]] <- trans.cols[1] + array.trans.obs2.colors[is.na(array.trans.obs2)] <- "white" + + par(mar=c(5,4,4,4.5)) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Forecast time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + title("% Observed transition from NAO- regime ", cex=1.5) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + + for(p in 1:12){ + for(l in 0:6){ + polygon(c(0.5+p-1, 0.5+p-1, 1+p-1), c(-0.5+l, 0.5+l, 0+l), col=array.trans.obs2.colors[p,1+l,1]) # first triangle + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(-0.5+l, 0+l, -0.5+l), col=array.trans.obs2.colors[p,1+l,2]) + polygon(c(1+p-1, 1.5+p-1, 1.5+p-1), c(l, 0.5+l, -0.5+l), col=array.trans.obs2.colors[p,1+l,3]) + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(0.5+l, 0+l, 0.5+l), col=array.trans.obs2.colors[p,1+l,4]) + } + } + + par(fig=c(0.875, 1, 0.14, 0.89), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_transition_prob_NAO-_obs_startdate.png"), width=750, height=600) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("% Observed transition from NAO- regime", cex=1.5) + + # NAO+ : + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.85, 0.98), new=TRUE) + mtext("NAO+", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.obs2.colors[p,1+l,1])}} + + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.42, 0.5), new=TRUE) + mtext("Blocking", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.obs2.colors[p,1+l,4])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.85, 0.98), new=TRUE) + mtext("NAO-", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.obs2.colors[p,1+l,3])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.42, 0.5), new=TRUE) + mtext("Atlantic Ridge", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.obs2.colors[p,1+l,2])}} + + par(fig=c(0.91, 1, 0.1, 0.9), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_transition_prob_NAO-_obs_target_month.png"), width=750, height=350) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 0.95), new=TRUE) + mtext("% Observed transition from NAO- regime", cex=1.2) + + par(mar=c(0,0,4,0), fig=c(0.07, 0.28, 0.8, 0.95), new=TRUE) + mtext("NAO+", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0, 0.28, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.8, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.obs2.colors[i2,1+6-j,1])}} + + par(mar=c(0,0,4,0), fig=c(0.30, 0.58, 0.8, 0.95), new=TRUE) + mtext("Blocking", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.30, 0.58, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.8, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.obs2.colors[i2,1+6-j,4])}} + + par(mar=c(0,0,4,0), fig=c(0.58, 0.88, 0.8, 0.95), new=TRUE) + mtext("Atlantic Ridge", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.58, 0.88, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.8, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.obs2.colors[i2,1+6-j,2])}} + + par(fig=c(0.91, 1, 0.1, 0.8), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + par(fig=c(0.91, 1, 0.88, 0.93), new=TRUE) + mtext(side = 1, text = "%", line = 2, cex=1) + + dev.off() + + + + + + + + + + + # Simulated transition probability from blocking to X summary: + png(filename=paste0(work.dir,"/",forecast.name,"_summary_transition_prob_blocking.png"), width=550, height=400) + + # create an array similar to array.cor but with colors instead of corr.values: + trans.cols <- rev(c('#b2182b','#d6604d','#f4a582','#fddbc7','#d1e5f0','#92c5de','#4393c3','#2166ac')) + my.seq <- c(0,10,20,30,40,50,60,70,100) + + array.trans.sim3.colors <- array(trans.cols[8],c(12,7,4)) + array.trans.sim3.colors[array.trans.sim3 < my.seq[8]] <- trans.cols[7] + array.trans.sim3.colors[array.trans.sim3 < my.seq[7]] <- trans.cols[6] + array.trans.sim3.colors[array.trans.sim3 < my.seq[6]] <- trans.cols[5] + array.trans.sim3.colors[array.trans.sim3 < my.seq[5]] <- trans.cols[4] + array.trans.sim3.colors[array.trans.sim3 < my.seq[4]] <- trans.cols[3] + array.trans.sim3.colors[array.trans.sim3 < my.seq[3]] <- trans.cols[2] + array.trans.sim3.colors[array.trans.sim3 < my.seq[2]] <- trans.cols[1] + array.trans.sim3.colors[is.na(array.trans.sim3)] <- "white" + + par(mar=c(5,4,4,4.5)) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Forecast time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + title("% ECMWF-S4 transition from blocking regime", cex=1.5) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + + for(p in 1:12){ + for(l in 0:6){ + polygon(c(0.5+p-1, 0.5+p-1, 1+p-1), c(-0.5+l, 0.5+l, 0+l), col=array.trans.sim3.colors[p,1+l,1]) # first triangle + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(-0.5+l, 0+l, -0.5+l), col=array.trans.sim3.colors[p,1+l,2]) + polygon(c(1+p-1, 1.5+p-1, 1.5+p-1), c(l, 0.5+l, -0.5+l), col=array.trans.sim3.colors[p,1+l,3]) + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(0.5+l, 0+l, 0.5+l), col=array.trans.sim3.colors[p,1+l,4]) + } + } + + par(fig=c(0.875, 1, 0.14, 0.89), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_transition_prob_blocking_startdate.png"), width=750, height=600) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("% ECMWF-S4 transition from blocking regime", cex=1.5) + + # NAO+ : + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.85, 0.98), new=TRUE) + mtext("NAO+", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.sim3.colors[p,1+l,1])}} + + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.42, 0.5), new=TRUE) + mtext("Blocking", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.sim3.colors[p,1+l,4])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.85, 0.98), new=TRUE) + mtext("NAO-", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.sim3.colors[p,1+l,3])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.42, 0.5), new=TRUE) + mtext("Atlantic Ridge", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.sim3.colors[p,1+l,2])}} + + par(fig=c(0.91, 1, 0.1, 0.9), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_transition_prob_blocking_target_month.png"), width=750, height=350) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 0.95), new=TRUE) + mtext("% ECMWF-S4 transition from blocking regime", cex=1.2) + + par(mar=c(0,0,4,0), fig=c(0.10, 0.28, 0.8, 0.95), new=TRUE) + mtext("NAO+", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0, 0.28, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.8, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.sim3.colors[i2,1+6-j,1])}} + + par(mar=c(0,0,4,0), fig=c(0.30, 0.58, 0.8, 0.95), new=TRUE) + mtext("NAO-", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.30, 0.58, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.8, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.sim3.colors[i2,1+6-j,3])}} + + par(mar=c(0,0,4,0), fig=c(0.60, 0.88, 0.8, 0.95), new=TRUE) + mtext("Atlantic Ridge", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.60, 0.88, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.8, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.sim3.colors[i2,1+6-j,2])}} + + par(fig=c(0.91, 1, 0.1, 0.8), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + par(fig=c(0.91, 1, 0.88, 0.93), new=TRUE) + mtext(side = 1, text = "%", line = 2, cex=1) + + dev.off() + + + + + + + + + # Observed transition probability from blocking to X summary: + png(filename=paste0(work.dir,"/",forecast.name,"_summary_transition_prob_blocking_obs.png"), width=550, height=400) + + # create an array similar to array.cor but with colors instead of corr.values: + trans.cols <- rev(c('#b2182b','#d6604d','#f4a582','#fddbc7','#d1e5f0','#92c5de','#4393c3','#2166ac')) + my.seq <- c(0,10,20,30,40,50,60,70,100) + + array.trans.obs3.colors <- array(trans.cols[8],c(12,7,4)) + array.trans.obs3.colors[array.trans.obs3 < my.seq[8]] <- trans.cols[7] + array.trans.obs3.colors[array.trans.obs3 < my.seq[7]] <- trans.cols[6] + array.trans.obs3.colors[array.trans.obs3 < my.seq[6]] <- trans.cols[5] + array.trans.obs3.colors[array.trans.obs3 < my.seq[5]] <- trans.cols[4] + array.trans.obs3.colors[array.trans.obs3 < my.seq[4]] <- trans.cols[3] + array.trans.obs3.colors[array.trans.obs3 < my.seq[3]] <- trans.cols[2] + array.trans.obs3.colors[array.trans.obs3 < my.seq[2]] <- trans.cols[1] + array.trans.obs3.colors[is.na(array.trans.obs3)] <- "white" + + par(mar=c(5,4,4,4.5)) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Forecast time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + title("% Observed transition from blocking regime", cex=1.5) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + + for(p in 1:12){ + for(l in 0:6){ + polygon(c(0.5+p-1, 0.5+p-1, 1+p-1), c(-0.5+l, 0.5+l, 0+l), col=array.trans.obs3.colors[p,1+l,1]) # first triangle + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(-0.5+l, 0+l, -0.5+l), col=array.trans.obs3.colors[p,1+l,2]) + polygon(c(1+p-1, 1.5+p-1, 1.5+p-1), c(l, 0.5+l, -0.5+l), col=array.trans.obs3.colors[p,1+l,3]) + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(0.5+l, 0+l, 0.5+l), col=array.trans.obs3.colors[p,1+l,4]) + } + } + + par(fig=c(0.875, 1, 0.14, 0.89), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_transition_prob_blocking_obs_startdate.png"), width=750, height=600) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("% Observed transition from blocking regime", cex=1.5) + + # NAO+ : + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.85, 0.98), new=TRUE) + mtext("NAO+", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.obs3.colors[p,1+l,1])}} + + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.42, 0.5), new=TRUE) + mtext("Blocking", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.obs3.colors[p,1+l,4])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.85, 0.98), new=TRUE) + mtext("NAO-", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.obs3.colors[p,1+l,3])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.42, 0.5), new=TRUE) + mtext("Atlantic Ridge", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.obs3.colors[p,1+l,2])}} + + par(fig=c(0.91, 1, 0.1, 0.9), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_transition_prob_blocking_obs_target_month.png"), width=750, height=350) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 0.95), new=TRUE) + mtext("% Observed transition from blocking regime", cex=1.2) + + par(mar=c(0,0,4,0), fig=c(0.10, 0.28, 0.8, 0.95), new=TRUE) + mtext("NAO+", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0, 0.28, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.8, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.obs3.colors[i2,1+6-j,1])}} + + par(mar=c(0,0,4,0), fig=c(0.30, 0.58, 0.8, 0.95), new=TRUE) + mtext("NAO-", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.30, 0.58, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.8, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.obs3.colors[i2,1+6-j,3])}} + + par(mar=c(0,0,4,0), fig=c(0.60, 0.88, 0.8, 0.95), new=TRUE) + mtext("Atlantic Ridge", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.60, 0.88, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.8, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.obs3.colors[i2,1+6-j,2])}} + + par(fig=c(0.91, 1, 0.1, 0.8), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + par(fig=c(0.91, 1, 0.88, 0.93), new=TRUE) + mtext(side = 1, text = "%", line = 2, cex=1) + + dev.off() + + + + + + + + + + + + + + + + + + # Simulated transition probability from Atlantic ridge to X summary: + png(filename=paste0(work.dir,"/",forecast.name,"_summary_transition_prob_Atlantic_ridge.png"), width=550, height=400) + + # create an array similar to array.cor but with colors instead of corr.values: + trans.cols <- rev(c('#b2182b','#d6604d','#f4a582','#fddbc7','#d1e5f0','#92c5de','#4393c3','#2166ac')) + my.seq <- c(0,10,20,30,40,50,60,70,100) + + array.trans.sim4.colors <- array(trans.cols[8],c(12,7,4)) + array.trans.sim4.colors[array.trans.sim4 < my.seq[8]] <- trans.cols[7] + array.trans.sim4.colors[array.trans.sim4 < my.seq[7]] <- trans.cols[6] + array.trans.sim4.colors[array.trans.sim4 < my.seq[6]] <- trans.cols[5] + array.trans.sim4.colors[array.trans.sim4 < my.seq[5]] <- trans.cols[4] + array.trans.sim4.colors[array.trans.sim4 < my.seq[4]] <- trans.cols[3] + array.trans.sim4.colors[array.trans.sim4 < my.seq[3]] <- trans.cols[2] + array.trans.sim4.colors[array.trans.sim4 < my.seq[2]] <- trans.cols[1] + array.trans.sim4.colors[is.na(array.trans.sim4)] <- "white" + + par(mar=c(5,4,4,4.5)) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Forecast time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + title("% ECMWF-S4 transition from Atlantic ridge regime ", cex=1.5) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + + for(p in 1:12){ + for(l in 0:6){ + polygon(c(0.5+p-1, 0.5+p-1, 1+p-1), c(-0.5+l, 0.5+l, 0+l), col=array.trans.sim4.colors[p,1+l,1]) # first triangle + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(-0.5+l, 0+l, -0.5+l), col=array.trans.sim4.colors[p,1+l,2]) + polygon(c(1+p-1, 1.5+p-1, 1.5+p-1), c(l, 0.5+l, -0.5+l), col=array.trans.sim4.colors[p,1+l,3]) + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(0.5+l, 0+l, 0.5+l), col=array.trans.sim4.colors[p,1+l,4]) + } + } + + par(fig=c(0.875, 1, 0.14, 0.89), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_transition_prob_Atlantic_ridge_startdate.png"), width=750, height=600) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("% ECMWF-S4 transition from Atlantic Ridge regime", cex=1.5) + + # NAO+ : + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.85, 0.98), new=TRUE) + mtext("NAO+", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.sim4.colors[p,1+l,1])}} + + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.42, 0.5), new=TRUE) + mtext("Blocking", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.sim4.colors[p,1+l,4])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.85, 0.98), new=TRUE) + mtext("NAO-", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.sim4.colors[p,1+l,3])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.42, 0.5), new=TRUE) + mtext("Atlantic Ridge", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.sim4.colors[p,1+l,2])}} + + par(fig=c(0.91, 1, 0.1, 0.9), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_transition_prob_Atlantic_ridge_target_month.png"), width=750, height=350) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 0.95), new=TRUE) + mtext("% ECMWF-S4 transition from Atlantic Ridge regime", cex=1.2) + + par(mar=c(0,0,4,0), fig=c(0.1, 0.28, 0.8, 0.95), new=TRUE) + mtext("NAO+", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0, 0.28, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.8, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.sim4.colors[i2,1+6-j,1])}} + + par(mar=c(0,0,4,0), fig=c(0.30, 0.58, 0.8, 0.95), new=TRUE) + mtext("NAO-", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.30, 0.58, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.8, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.sim4.colors[i2,1+6-j,3])}} + + par(mar=c(0,0,4,0), fig=c(0.60, 0.88, 0.8, 0.95), new=TRUE) + mtext("Blocking", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.60, 0.88, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.8, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.sim4.colors[i2,1+6-j,4])}} + + par(fig=c(0.91, 1, 0.1, 0.8), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + par(fig=c(0.91, 1, 0.88, 0.93), new=TRUE) + mtext(side = 1, text = "%", line = 2, cex=1) + + dev.off() + + + + + + + + + + # Observed transition probability from Atlantic ridge to X summary: + png(filename=paste0(work.dir,"/",forecast.name,"_summary_transition_prob_Atlantic_ridge_obs.png"), width=550, height=400) + + # create an array obsilar to array.cor but with colors instead of corr.values: + trans.cols <- rev(c('#b2182b','#d6604d','#f4a582','#fddbc7','#d1e5f0','#92c5de','#4393c3','#2166ac')) + my.seq <- c(0,10,20,30,40,50,60,70,100) + + array.trans.obs4.colors <- array(trans.cols[8],c(12,7,4)) + array.trans.obs4.colors[array.trans.obs4 < my.seq[8]] <- trans.cols[7] + array.trans.obs4.colors[array.trans.obs4 < my.seq[7]] <- trans.cols[6] + array.trans.obs4.colors[array.trans.obs4 < my.seq[6]] <- trans.cols[5] + array.trans.obs4.colors[array.trans.obs4 < my.seq[5]] <- trans.cols[4] + array.trans.obs4.colors[array.trans.obs4 < my.seq[4]] <- trans.cols[3] + array.trans.obs4.colors[array.trans.obs4 < my.seq[3]] <- trans.cols[2] + array.trans.obs4.colors[array.trans.obs4 < my.seq[2]] <- trans.cols[1] + array.trans.obs4.colors[is.na(array.trans.obs4)] <- "white" + + par(mar=c(5,4,4,4.5)) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Forecast time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + title("% Observed transition from Atlantic ridge regime ", cex=1.5) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + + for(p in 1:12){ + for(l in 0:6){ + polygon(c(0.5+p-1, 0.5+p-1, 1+p-1), c(-0.5+l, 0.5+l, 0+l), col=array.trans.obs4.colors[p,1+l,1]) # first triangle + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(-0.5+l, 0+l, -0.5+l), col=array.trans.obs4.colors[p,1+l,2]) + polygon(c(1+p-1, 1.5+p-1, 1.5+p-1), c(l, 0.5+l, -0.5+l), col=array.trans.obs4.colors[p,1+l,3]) + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(0.5+l, 0+l, 0.5+l), col=array.trans.obs4.colors[p,1+l,4]) + } + } + + par(fig=c(0.875, 1, 0.14, 0.89), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_transition_prob_Atlantic_ridge_obs_startdate.png"), width=750, height=600) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("% Observed transition from Atlantic Ridge regime", cex=1.5) + + # NAO+ : + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.85, 0.98), new=TRUE) + mtext("NAO+", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.obs4.colors[p,1+l,1])}} + + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.42, 0.5), new=TRUE) + mtext("Blocking", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.obs4.colors[p,1+l,4])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.85, 0.98), new=TRUE) + mtext("NAO-", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.obs4.colors[p,1+l,3])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.42, 0.5), new=TRUE) + mtext("Atlantic Ridge", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.obs4.colors[p,1+l,2])}} + + par(fig=c(0.91, 1, 0.1, 0.9), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_transition_prob_Atlantic_ridge_obs_target_month.png"), width=750, height=350) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 0.95), new=TRUE) + mtext("% Observed transition from Atlantic Ridge regime", cex=1.2) + + par(mar=c(0,0,4,0), fig=c(0.1, 0.28, 0.8, 0.95), new=TRUE) + mtext("NAO+", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0, 0.28, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.8, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.obs4.colors[i2,1+6-j,1])}} + + par(mar=c(0,0,4,0), fig=c(0.30, 0.58, 0.8, 0.95), new=TRUE) + mtext("NAO-", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.30, 0.58, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.8, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.obs4.colors[i2,1+6-j,3])}} + + par(mar=c(0,0,4,0), fig=c(0.60, 0.88, 0.8, 0.95), new=TRUE) + mtext("Blocking", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.60, 0.88, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.8, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.obs4.colors[i2,1+6-j,4])}} + + par(fig=c(0.91, 1, 0.1, 0.8), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + par(fig=c(0.91, 1, 0.88, 0.93), new=TRUE) + mtext(side = 1, text = "%", line = 2, cex=1) + + dev.off() + + + + + + + + + + + + + + + # Bias of the transition probability from NAO+ to X summary: + png(filename=paste0(work.dir,"/",forecast.name,"_summary_bias_transition_prob_NAO+.png"), width=550, height=400) + + # create an array similar to array.cor but with colors instead of corr.values: + trans.cols <- rev(c('#b2182b','#d6604d','#f4a582','#fddbc7','#d1e5f0','#92c5de','#4393c3','#2166ac')) + my.seq <- c(-20,-10,-5,-2,0,2,5,10,20) + + array.trans.diff1.colors <- array(trans.cols[8],c(12,7,4)) + array.trans.diff1.colors[array.trans.diff1 < my.seq[8]] <- trans.cols[7] + array.trans.diff1.colors[array.trans.diff1 < my.seq[7]] <- trans.cols[6] + array.trans.diff1.colors[array.trans.diff1 < my.seq[6]] <- trans.cols[5] + array.trans.diff1.colors[array.trans.diff1 < my.seq[5]] <- trans.cols[4] + array.trans.diff1.colors[array.trans.diff1 < my.seq[4]] <- trans.cols[3] + array.trans.diff1.colors[array.trans.diff1 < my.seq[3]] <- trans.cols[2] + array.trans.diff1.colors[array.trans.diff1 < my.seq[2]] <- trans.cols[1] + array.trans.diff1.colors[is.na(array.trans.diff1)] <- "white" + + par(mar=c(5,4,4,4.5)) + plot(1,1, type="n", xaxt="n", yaxt="n", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + title("% Bias transition from NAO+ regime", cex=1.5) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + + for(p in 1:12){ + for(l in 0:6){ + polygon(c(0.5+p-1, 0.5+p-1, 1+p-1), c(-0.5+l, 0.5+l, 0+l), col=array.trans.diff1.colors[p,1+l,1]) # first triangle + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(-0.5+l, 0+l, -0.5+l), col=array.trans.diff1.colors[p,1+l,2]) + polygon(c(1+p-1, 1.5+p-1, 1.5+p-1), c(l, 0.5+l, -0.5+l), col=array.trans.diff1.colors[p,1+l,3]) + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(0.5+l, 0+l, 0.5+l), col=array.trans.diff1.colors[p,1+l,4]) + } + } + + par(fig=c(0.875, 1, 0.14, 0.89), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_bias_transition_prob_NAO+_startdate.png"), width=750, height=600) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("% Bias transition from NAO+ regime", cex=1.5) + + # NAO+ : + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.85, 0.98), new=TRUE) + mtext("NAO+", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.diff1.colors[p,1+l,1])}} + + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.42, 0.5), new=TRUE) + mtext("Blocking", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.diff1.colors[p,1+l,4])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.85, 0.98), new=TRUE) + mtext("NAO-", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.diff1.colors[p,1+l,3])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.42, 0.5), new=TRUE) + mtext("Atlantic Ridge", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.diff1.colors[p,1+l,2])}} + + par(fig=c(0.91, 1, 0.1, 0.9), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_bias_transition_prob_NAO+_target_month.png"), width=750, height=350) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 0.95), new=TRUE) + mtext("% Bias transition from NAO+ regime", cex=1.2) + + par(mar=c(0,0,4,0), fig=c(0.07, 0.28, 0.8, 0.95), new=TRUE) + mtext("NAO-", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0, 0.28, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.8, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.diff1.colors[i2,1+6-j,3])}} + + par(mar=c(0,0,4,0), fig=c(0.30, 0.58, 0.8, 0.95), new=TRUE) + mtext("Blocking", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.30, 0.58, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.8, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.diff1.colors[i2,1+6-j,4])}} + + par(mar=c(0,0,4,0), fig=c(0.60, 0.88, 0.8, 0.95), new=TRUE) + mtext("Atlantic Ridge", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.60, 0.88, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.8, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.diff1.colors[i2,1+6-j,2])}} + + par(fig=c(0.91, 1, 0.1, 0.8), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + par(fig=c(0.91, 1, 0.88, 0.93), new=TRUE) + mtext(side = 1, text = "%", line = 2, cex=1) + + dev.off() + + + + + + + + + + + + + + + + # Bias of the Transition probability from NAO- to X summary: + png(filename=paste0(work.dir,"/",forecast.name,"_summary_bias_transition_prob_NAO-.png"), width=550, height=400) + + # create an array similar to array.cor but with colors instead of corr.values: + trans.cols <- rev(c('#b2182b','#d6604d','#f4a582','#fddbc7','#d1e5f0','#92c5de','#4393c3','#2166ac')) + my.seq <- c(-20,-10,-5,-2,0,2,5,10,20) + + array.trans.diff2.colors <- array(trans.cols[8],c(12,7,4)) + array.trans.diff2.colors[array.trans.diff2 < my.seq[8]] <- trans.cols[7] + array.trans.diff2.colors[array.trans.diff2 < my.seq[7]] <- trans.cols[6] + array.trans.diff2.colors[array.trans.diff2 < my.seq[6]] <- trans.cols[5] + array.trans.diff2.colors[array.trans.diff2 < my.seq[5]] <- trans.cols[4] + array.trans.diff2.colors[array.trans.diff2 < my.seq[4]] <- trans.cols[3] + array.trans.diff2.colors[array.trans.diff2 < my.seq[3]] <- trans.cols[2] + array.trans.diff2.colors[array.trans.diff2 < my.seq[2]] <- trans.cols[1] + array.trans.diff2.colors[is.na(array.trans.diff2)] <- "white" + + par(mar=c(5,4,4,4.5)) + plot(1,1, type="n", xaxt="n", yaxt="n", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + title("% Bias transition from NAO- regime", cex=1.5) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + + for(p in 1:12){ + for(l in 0:6){ + polygon(c(0.5+p-1, 0.5+p-1, 1+p-1), c(-0.5+l, 0.5+l, 0+l), col=array.trans.diff2.colors[p,1+l,1]) # first triangle + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(-0.5+l, 0+l, -0.5+l), col=array.trans.diff2.colors[p,1+l,2]) + polygon(c(1+p-1, 1.5+p-1, 1.5+p-1), c(l, 0.5+l, -0.5+l), col=array.trans.diff2.colors[p,1+l,3]) + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(0.5+l, 0+l, 0.5+l), col=array.trans.diff2.colors[p,1+l,4]) + } + } + + par(fig=c(0.875, 1, 0.14, 0.89), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_bias_transition_prob_NAO-_startdate.png"), width=750, height=600) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 0.95), new=TRUE) + mtext("% Bias transition from NAO- regime", cex=1.5) + + # NAO+ : + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.85, 0.98), new=TRUE) + mtext("NAO+", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.diff2.colors[p,1+l,1])}} + + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.42, 0.5), new=TRUE) + mtext("Blocking", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.diff2.colors[p,1+l,4])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.85, 0.98), new=TRUE) + mtext("NAO-", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.diff2.colors[p,1+l,3])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.42, 0.5), new=TRUE) + mtext("Atlantic Ridge", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.diff2.colors[p,1+l,2])}} + + par(fig=c(0.91, 1, 0.1, 0.9), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_bias_transition_prob_NAO-_target_month.png"), width=750, height=350) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 0.95), new=TRUE) + mtext("% Bias transition from NAO- regime", cex=1.2) + + par(mar=c(0,0,4,0), fig=c(0.07, 0.28, 0.8, 0.95), new=TRUE) + mtext("NAO+", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0, 0.28, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.8, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.diff2.colors[i2,1+6-j,1])}} + + par(mar=c(0,0,4,0), fig=c(0.30, 0.58, 0.8, 0.95), new=TRUE) + mtext("Blocking", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.30, 0.58, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.8, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.diff2.colors[i2,1+6-j,4])}} + + par(mar=c(0,0,4,0), fig=c(0.60, 0.88, 0.8, 0.95), new=TRUE) + mtext("Atlantic Ridge", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.60, 0.88, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.8, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.diff2.colors[i2,1+6-j,2])}} + + par(fig=c(0.91, 1, 0.1, 0.8), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + par(fig=c(0.91, 1, 0.88, 0.93), new=TRUE) + mtext(side = 1, text = "%", line = 2, cex=1) + + dev.off() + + + + + + + + + + + + # Bias of the Transition probability from blocking to X summary: + png(filename=paste0(work.dir,"/",forecast.name,"_summary_bias_transition_prob_blocking.png"), width=550, height=400) + + # create an array similar to array.cor but with colors instead of corr.values: + trans.cols <- rev(c('#b2182b','#d6604d','#f4a582','#fddbc7','#d1e5f0','#92c5de','#4393c3','#2166ac')) + my.seq <- c(-20,-10,-5,-2,0,2,5,10,20) + + array.trans.diff3.colors <- array(trans.cols[8],c(12,7,4)) + array.trans.diff3.colors[array.trans.diff3 < my.seq[8]] <- trans.cols[7] + array.trans.diff3.colors[array.trans.diff3 < my.seq[7]] <- trans.cols[6] + array.trans.diff3.colors[array.trans.diff3 < my.seq[6]] <- trans.cols[5] + array.trans.diff3.colors[array.trans.diff3 < my.seq[5]] <- trans.cols[4] + array.trans.diff3.colors[array.trans.diff3 < my.seq[4]] <- trans.cols[3] + array.trans.diff3.colors[array.trans.diff3 < my.seq[3]] <- trans.cols[2] + array.trans.diff3.colors[array.trans.diff3 < my.seq[2]] <- trans.cols[1] + array.trans.diff3.colors[is.na(array.trans.diff3)] <- "white" + + par(mar=c(5,4,4,4.5)) + plot(1,1, type="n", xaxt="n", yaxt="n", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + title("% Bias transition from blocking regime", cex=1.5) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + + for(p in 1:12){ + for(l in 0:6){ + polygon(c(0.5+p-1, 0.5+p-1, 1+p-1), c(-0.5+l, 0.5+l, 0+l), col=array.trans.diff3.colors[p,1+l,1]) # first triangle + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(-0.5+l, 0+l, -0.5+l), col=array.trans.diff3.colors[p,1+l,2]) + polygon(c(1+p-1, 1.5+p-1, 1.5+p-1), c(l, 0.5+l, -0.5+l), col=array.trans.diff3.colors[p,1+l,3]) + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(0.5+l, 0+l, 0.5+l), col=array.trans.diff3.colors[p,1+l,4]) + } + } + + par(fig=c(0.875, 1, 0.14, 0.89), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_bias_transition_prob_blocking_startdate.png"), width=750, height=600) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("% Bias transition from blocking regime", cex=1.5) + + # NAO+ : + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.85, 0.98), new=TRUE) + mtext("NAO+", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.diff3.colors[p,1+l,1])}} + + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.42, 0.5), new=TRUE) + mtext("Blocking", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.diff3.colors[p,1+l,4])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.85, 0.98), new=TRUE) + mtext("NAO-", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.diff3.colors[p,1+l,3])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.42, 0.5), new=TRUE) + mtext("Atlantic Ridge", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.diff3.colors[p,1+l,2])}} + + par(fig=c(0.91, 1, 0.1, 0.9), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_bias_transition_prob_blocking_target_month.png"), width=750, height=350) + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 0.95), new=TRUE) + mtext("% Bias transition from Blocking regime", cex=1.2) + + par(mar=c(0,0,4,0), fig=c(0.07, 0.28, 0.8, 0.95), new=TRUE) + mtext("NAO+", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0, 0.28, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.8, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.diff3.colors[i2,1+6-j,1])}} + + par(mar=c(0,0,4,0), fig=c(0.30, 0.58, 0.8, 0.95), new=TRUE) + mtext("NAO-", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.30, 0.58, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.8, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.diff3.colors[i2,1+6-j,3])}} + + par(mar=c(0,0,4,0), fig=c(0.60, 0.88, 0.8, 0.95), new=TRUE) + mtext("Atlantic Ridge", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.60, 0.88, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.8, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.diff3.colors[i2,1+6-j,2])}} + + par(fig=c(0.91, 1, 0.1, 0.8), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + par(fig=c(0.91, 1, 0.88, 0.93), new=TRUE) + mtext(side = 1, text = "%", line = 2, cex=1) + + dev.off() + + + + + + + + + + + # Bias of the Transition probability from Atlantic Ridge to X summary: + png(filename=paste0(work.dir,"/",forecast.name,"_summary_bias_transition_prob_Atlantic_ridge.png"), width=550, height=400) + + # create an array similar to array.cor but with colors instead of corr.values: + trans.cols <- rev(c('#b2182b','#d6604d','#f4a582','#fddbc7','#d1e5f0','#92c5de','#4393c3','#2166ac')) + my.seq <- c(-20,-10,-5,-2,0,2,5,10,20) + + array.trans.diff4.colors <- array(trans.cols[8],c(12,7,4)) + array.trans.diff4.colors[array.trans.diff4 < my.seq[8]] <- trans.cols[7] + array.trans.diff4.colors[array.trans.diff4 < my.seq[7]] <- trans.cols[6] + array.trans.diff4.colors[array.trans.diff4 < my.seq[6]] <- trans.cols[5] + array.trans.diff4.colors[array.trans.diff4 < my.seq[5]] <- trans.cols[4] + array.trans.diff4.colors[array.trans.diff4 < my.seq[4]] <- trans.cols[3] + array.trans.diff4.colors[array.trans.diff4 < my.seq[3]] <- trans.cols[2] + array.trans.diff4.colors[array.trans.diff4 < my.seq[2]] <- trans.cols[1] + array.trans.diff4.colors[is.na(array.trans.diff4)] <- "white" + + par(mar=c(5,4,4,4.5)) + plot(1,1, type="n", xaxt="n", yaxt="n", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + title("% Bias transition from Atlantic Ridge regime", cex=1.5) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + + for(p in 1:12){ + for(l in 0:6){ + polygon(c(0.5+p-1, 0.5+p-1, 1+p-1), c(-0.5+l, 0.5+l, 0+l), col=array.trans.diff4.colors[p,1+l,1]) # first triangle + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(-0.5+l, 0+l, -0.5+l), col=array.trans.diff4.colors[p,1+l,2]) + polygon(c(1+p-1, 1.5+p-1, 1.5+p-1), c(l, 0.5+l, -0.5+l), col=array.trans.diff4.colors[p,1+l,3]) + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(0.5+l, 0+l, 0.5+l), col=array.trans.diff4.colors[p,1+l,4]) + } + } + + par(fig=c(0.875, 1, 0.14, 0.89), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_bias_transition_prob_Atlantic_ridge_startdate.png"), width=750, height=600) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("% Bias transition from Atlantic_Ridge_regime", cex=1.5) + + # NAO+ : + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.85, 0.98), new=TRUE) + mtext("NAO+", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.diff4.colors[p,1+l,1])}} + + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.42, 0.5), new=TRUE) + mtext("Blocking", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.diff4.colors[p,1+l,4])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.85, 0.98), new=TRUE) + mtext("NAO-", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.diff4.colors[p,1+l,3])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.42, 0.5), new=TRUE) + mtext("Atlantic Ridge", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.diff4.colors[p,1+l,2])}} + + par(fig=c(0.91, 1, 0.1, 0.9), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_bias_transition_prob_Atlantic_ridge_target_month.png"), width=750, height=350) + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 0.95), new=TRUE) + mtext("% Bias transition from Atlantic Ridge regime", cex=1.2) + + par(mar=c(0,0,4,0), fig=c(0.07, 0.28, 0.8, 0.95), new=TRUE) + mtext("NAO+", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0, 0.28, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.8, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.diff4.colors[i2,1+6-j,1])}} + + par(mar=c(0,0,4,0), fig=c(0.30, 0.58, 0.8, 0.95), new=TRUE) + mtext("NAO-", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.30, 0.58, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.8, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.diff4.colors[i2,1+6-j,3])}} + + par(mar=c(0,0,4,0), fig=c(0.60, 0.88, 0.8, 0.95), new=TRUE) + mtext("Blocking", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.60, 0.88, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.8, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.diff4.colors[i2,1+6-j,4])}} + + par(fig=c(0.91, 1, 0.1, 0.8), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + par(fig=c(0.91, 1, 0.88, 0.93), new=TRUE) + mtext(side = 1, text = "%", line = 2, cex=1) + + dev.off() + + ## # FairRPSS summary: + ## png(filename=paste0(work.dir,"/",forecast.name,"_summary_rpss.png"), width=550, height=400) + + ## # create an array similar to array.diff.freq but with colors instead of frequencies: + ## freq.cols <- rev(c('#b2182b','#d6604d','#f4a582','#fddbc7','#d1e5f0','#92c5de','#4393c3','#2166ac')) + + ## array.freq.colors <- array(freq.cols[8],c(12,7,4)) + ## array.freq.colors[array.diff.freq < 10] <- freq.cols[7] + ## array.freq.colors[array.diff.freq < 5] <- freq.cols[6] + ## array.freq.colors[array.diff.freq < 2] <- freq.cols[5] + ## array.freq.colors[array.diff.freq < 0] <- freq.cols[4] + ## array.freq.colors[array.diff.freq < -2] <- freq.cols[3] + ## array.freq.colors[array.diff.freq < -5] <- freq.cols[2] + ## array.freq.colors[array.diff.freq < -10] <- freq.cols[1] + + ## par(mar=c(5,4,4,4.5)) + ## plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Forecast time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + ## title("Frequency difference (%) between S4 and ERA-Interim", cex=1.5) + ## mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + ## mtext(side = 2, text = "Forecast time", line = 2.5, cex=1.5) + ## axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + ## axis(2, at=seq(0,6), las=2, cex.axis=1.5) + + ## for(p in 1:12){ + ## for(l in 0:6){ + ## polygon(c(0.5+p-1,0.5+p-1,1+p-1),c(-0.5+l,0.5+l,0+l), col=array.freq.colors[p,1+l,1]) # first triangle + ## polygon(c(0.5+p-1,1+p-1,1.5+p-1),c(-0.5+l,0+l,-0.5+l), col=array.freq.colors[p,1+l,2]) + ## polygon(c(1+p-1,1.5+p-1,1.5+p-1),c(l,0.5+l,-0.5+l), col=array.freq.colors[p,1+l,3]) + ## polygon(c(0.5+p-1,1+p-1,1.5+p-1),c(0.5+l,0+l,0.5+l), col=array.freq.colors[p,1+l,4]) + ## } + ## } + + ## par(fig=c(0.875, 1, 0.14, 0.89), new=TRUE) + ## ColorBar2(brks = c(-20,-10,-5,-2,0,2,5,10,20), cols = freq.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(c(-20,-10,-5,-2,0,2,5,10,20)), my.labels=c(-20,-10,-5,-2,0,2,5,10,20)) + ## dev.off() + +} # close if on composition == "summary" + + + + +# impact map of the stronger WR: +if(composition == "impact.highest" && fields.name == rean.name){ + + var.num <- 2 # choose a variable (1:sfcWind, 2:tas) + index <- 1 # chose an index: 1: most influent, 2: most influent positively, 3: most influent negatively + + if ( index == 1 ) png(filename=paste0(rean.dir,"/",rean.name,"_",var.name.full[var.num],"_most_influent.png"), width=550, height=650) + if ( index == 2 ) png(filename=paste0(rean.dir,"/",rean.name,"_",var.name.full[var.num],"_most_influent_positively.png"), width=550, height=650) + if ( index == 3 ) png(filename=paste0(rean.dir,"/",rean.name,"_",var.name.full[var.num],"_most_influent_negatively.png"), width=550, height=650) + + plot.new() + #par(mfrow=c(4,3)) + #col.regimes <- c('#7b3294','#c2a5cf','#a6dba0','#008837') + col.regimes <- c('#e66101','#fdb863','#b2abd2','#5e3c99') + + for(p in 13:16){ # or 1:12 for monthly maps + load(file=paste0(rean.dir,"/",rean.name,"_",my.period[p],"_",var.name[var.num],".RData")) + load(paste0(rean.dir,"/",rean.name,"_",my.period[p],"_ClusterNames.RData")) # they should not depend on var.name!!! + + # position of long values of Europe only (without the Atlantic Sea and America): + #if(fields.name == "ERA-Interim") EU <- c(1:which(lon >= lon.max)[1], (length(lon)-30):length(lon)) # restrict area to continental europe only + lon.max = 45 + EU <- c(1:which(lon >= lon.max)[1], (which(lon >= 337)[1]:length(lon))) # restrict area to continental europe only + + cluster1 <- which(orden == cluster1.name.period[p]) # in future it will be == cluster1.name + cluster2 <- which(orden == cluster2.name.period[p]) + cluster3 <- which(orden == cluster3.name.period[p]) + cluster4 <- which(orden == cluster4.name.period[p]) + + assign(paste0("imp",cluster1), varPeriodAnom1mean) + assign(paste0("imp",cluster2), varPeriodAnom2mean) + assign(paste0("imp",cluster3), varPeriodAnom3mean) + assign(paste0("imp",cluster4), varPeriodAnom4mean) + + #most influent WT: + if (index == 1) { + imp.all <- imp1 + for(i in 1:dim(imp1)[1]){ + for(j in 1:dim(imp1)[2]){ + if(abs(imp1[i,j]) >= max(abs(imp1[i,j]), abs(imp2[i,j]), abs(imp3[i,j]), abs(imp4[i,j]))) imp.all[i,j] <- 1.5 + if(abs(imp2[i,j]) >= max(abs(imp1[i,j]), abs(imp2[i,j]), abs(imp3[i,j]), abs(imp4[i,j]))) imp.all[i,j] <- 2.5 + if(abs(imp3[i,j]) >= max(abs(imp1[i,j]), abs(imp2[i,j]), abs(imp3[i,j]), abs(imp4[i,j]))) imp.all[i,j] <- 3.5 + if(abs(imp4[i,j]) >= max(abs(imp1[i,j]), abs(imp2[i,j]), abs(imp3[i,j]), abs(imp4[i,j]))) imp.all[i,j] <- 4.5 + } + } + } + + #most influent WT with positive impact: + if (index == 2) { + imp.all <- imp1 + for(i in 1:dim(imp1)[1]){ + for(j in 1:dim(imp1)[2]){ + if((imp1[i,j]) >= max((imp1[i,j]), (imp2[i,j]), (imp3[i,j]), (imp4[i,j]))) imp.all[i,j] <- 1.5 + if((imp2[i,j]) >= max((imp1[i,j]), (imp2[i,j]), (imp3[i,j]), (imp4[i,j]))) imp.all[i,j] <- 2.5 + if((imp3[i,j]) >= max((imp1[i,j]), (imp2[i,j]), (imp3[i,j]), (imp4[i,j]))) imp.all[i,j] <- 3.5 + if((imp4[i,j]) >= max((imp1[i,j]), (imp2[i,j]), (imp3[i,j]), (imp4[i,j]))) imp.all[i,j] <- 4.5 + } + } + + } + + # most influent WT with negative impact: + if (index == 3) { + imp.all <- imp1 + for(i in 1:dim(imp1)[1]){ + for(j in 1:dim(imp1)[2]){ + if((imp1[i,j]) <= min((imp1[i,j]), (imp2[i,j]), (imp3[i,j]), (imp4[i,j]))) imp.all[i,j] <- 1.5 + if((imp2[i,j]) <= min((imp1[i,j]), (imp2[i,j]), (imp3[i,j]), (imp4[i,j]))) imp.all[i,j] <- 2.5 + if((imp3[i,j]) <= min((imp1[i,j]), (imp2[i,j]), (imp3[i,j]), (imp4[i,j]))) imp.all[i,j] <- 3.5 + if((imp4[i,j]) <= min((imp1[i,j]), (imp2[i,j]), (imp3[i,j]), (imp4[i,j]))) imp.all[i,j] <- 4.5 + } + } + } + + if(p<=3) yMod <- 0.7 + if(p>=4 && p<=6) yMod <- 0.5 + if(p>=7 && p<=9) yMod <- 0.3 + if(p>=10) yMod <- 0.1 + if(p==13 || p==14) yMod <- 0.52 + if(p==15 || p==16) yMod <- 0.1 + + if(p==1 || p==4 || p==7 || p==10) xMod <- 0.05 + if(p==2 || p==5 || p==8 || p==11) xMod <- 0.35 + if(p==3 || p==6 || p==9 || p==12) xMod <- 0.65 + if(p==13 || p==15) xMod <- 0.05 + if(p==14 || p==16) xMod <- 0.50 + + # impact map: + if(p <= 12) par(fig=c(xMod, xMod + 0.3, yMod, yMod + 0.17), new=TRUE) + if(p > 12) par(fig=c(xMod, xMod + 0.45, yMod, yMod + 0.38), new=TRUE) + PlotEquiMap(imp.all[,EU], lon[EU], lat, filled.continents = FALSE, brks=1:5, cols=col.regimes, axelab=F, drawleg=F) + + # title map: + if(p <= 12) par(fig=c(xMod, xMod + 0.3, yMod + 0.162, yMod + 0.172), new=TRUE) + if(p > 12) par(fig=c(xMod + 0.07, xMod + 0.07 + 0.3, yMod +0.21 + 0.166, yMod +0.21 + 0.176), new=TRUE) + mtext(my.period[p], font=2, cex=1.5) + + } # close 'p' on 'period' + + par(fig=c(0.1,0.9, 0.03, 0.11), new=TRUE) + ColorBar2(1:5, cols=col.regimes, vert=FALSE, my.ticks=1:4, draw.ticks=FALSE, my.labels=orden) + + par(fig=c(0.1,0.9, 0.92, 0.97), new=TRUE) + if( index == 1 ) mtext(paste0("Most influent WR on ",var.name[var.num]), font=2, cex=1.5) + if( index == 2 ) mtext(paste0("Most positively influent WR on ",var.name[var.num]), font=2, cex=1.5) + if( index == 3 ) mtext(paste0("Most negatively influent WR on ",var.name[var.num]), font=2, cex=1.5) + + dev.off() + +} # close if on composition == "impact.highest" + + + + + + + + + + + + + +if(monthly_anomalies) { + # Compute climatology and anomalies (with LOESS filter) for sfcWind data, and draw monthly anomalies, if you want to compare the mean daily wind speed anomalies reconstructed by the weather regimes classification with those observed by the reanalysis: + + load(paste0(rean.dir,"/",rean.name,"_",my.period[month.chosen[1]],"_psl.RData")) # only to load year.start and year.end + + sfcWind366 <- Load(var = "sfcWind", exp = NULL, obs = list(list(path=rean.data)), paste0(year.start:year.end,'0101'), storefreq = 'daily', leadtimemax = 366, output = 'lonlat', latmin = lat.min, latmax = lat.max, lonmin = lon.min, lonmax = lon.max, nprocs=8)$obs + + sfcWind <- sfcWind366[,,,1:365,,] + + for(y in year.start:year.end){ + y2 <- y - year.start + 1 + if(n.days.in.a.year(y) == 366) sfcWind[y2,60:365,,] <- sfcWind366[1,1,y2,61:366,,] # take the march to december period removing the 29th of February + } + + rm(sfcWind366) + gc() + + # LOESS anomalies: + sfcWindClimDaily <- apply(sfcWind, c(2,3,4), mean, na.rm=T) + + sfcWindClimLoess <- sfcWindClimDaily + for(i in n.pos.lat){ + for(j in n.pos.lon){ + my.data <- data.frame(ens.mean=sfcWindClimDaily[,i,j], day=1:365) + my.loess <- loess(ens.mean ~ day, my.data, span=0.35) + sfcWindClimLoess[,i,j] <- predict(my.loess) + } + } + + rm(sfcWindClimDaily) + gc() + + sfcWindClim2 <- InsertDim(sfcWindClimLoess, 1, n.years) + sfcWindAnom <- sfcWind - sfcWindClim2 + + rm(sfcWindClimLoess) + gc() + + # same as above but for computing climatology and anomalies (with LOESS filter) for psl data, and draw monthly slp anomalies: + slp366 <- Load(var = psl, exp = NULL, obs = list(list(path=rean.data)), paste0(year.start:year.end,'0101'), storefreq = 'daily', leadtimemax = 366, output = 'lonlat', latmin = lat.min, latmax = lat.max, lonmin = lon.min, lonmax = lon.max, nprocs=8)$obs + + slp <- slp366[,,,1:365,,] + + for(y in year.start:year.end){ + y2 <- y - year.start + 1 + if(n.days.in.a.year(y) == 366) slp[y2,60:365,,] <- slp366[1,1,y2,61:366,,] # take the march to december period removing the 29th of February + } + + rm(slp366) + gc() + + slpClimDaily <- apply(slp, c(2,3,4), mean, na.rm=T) + + slpClimLoess <- slpClimDaily + for(i in n.pos.lat){ + for(j in n.pos.lon){ + my.data <- data.frame(ens.mean=slpClimDaily[,i,j], day=1:365) + my.loess <- loess(ens.mean ~ day, my.data, span=0.35) + slpClimLoess[,i,j] <- predict(my.loess) + } + } + + rm(slpClimDaily) + gc() + + slpClim2 <- InsertDim(slpClimLoess, 1, n.years) + slpAnom <- slp - slpClim2 + + rm(slpClimLoess) + gc() + + if(psl == "psl") slpAnom <- slpAnom/100 # convert MSLP in Pascal to MSLP in hPa + + EU <- c(1:which(lon >= lon.max)[1], (which(lon >= 337)[1]:length(lon))) # restrict area to continental europe only + + my.brks.var <- c(-20,seq(-3,3,0.5),20) + my.cols.var <- colorRampPalette(rev(brewer.pal(11,"RdBu")))(length(my.brks.var)-1) # blue--white--red colors + + # same but for slp: + my.brks.var2 <- c(-100,seq(-21,-1,2),0,seq(1,21,2),100) + my.cols.var2 <- colorRampPalette(rev(brewer.pal(11,"RdBu")))(length(my.brks.var2)-1) # blue--red colors + my.cols.var2[floor(length(my.cols.var2)/2)] <- my.cols.var2[floor(length(my.cols.var2)/2)+1] <- "white" + + orden <- c("NAO+","NAO-","Blocking","Atl.Ridge") + + # save all monthly anomaly maps: + for(year.test in year.chosen){ + for(month.test in month.chosen){ + #year.test <- 2016; month.test <- 10 # for the debug + + pos.year.test <- year.test - year.start + 1 + + # wind anomaly for the chosen year and month: + sfcWindAnomPeriod <- sfcWindAnom[pos.year.test, pos.period(1,month.test),,] + + sfcWindAnomPeriodMean <- apply(sfcWindAnomPeriod, c(2,3), mean, na.rm=TRUE) + + # psl anomaly for the chosen year and month: + slpAnomPeriod <- slpAnom[pos.year.test,pos.period(1,month.test),,] + + slpAnomPeriodMean <- apply(slpAnomPeriod, c(2,3), mean, na.rm=TRUE) + + fileoutput <- paste0(rean.dir,"/",rean.name,"_",my.period[month.test],"_monthly_anomaly.png") + + png(filename=fileoutput,width=2800,height=1000) + + par(fig=c(0, 0.36, 0.08, 0.98), new=TRUE) + #PlotEquiMap(rescale(sfcWindAnomPeriodMean[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents=FALSE, brks=my.brks.var, cols=my.cols.var[2:(length(my.cols.var)-1)], title_scale=1.2, intxlon=10, intylat=10, drawleg=F) #, cex.lab=1.5) + + PlotEquiMap(sfcWindAnomPeriodMean[,EU], lon[EU], lat, filled.continents=FALSE, brks=my.brks.var, cols=my.cols.var, title_scale=1.2, intxlon=10, intylat=10, drawleg=F) #, cex.lab=1.5) + + #my.subset2 <- match(values.to.plot2, my.brks.var) + #legend2.cex <- 1.8 + + par(fig=c(0.03, 0.36, 0.015, 0.09), new=TRUE) + #ColorBar(my.brks.var, cols=my.cols.var, vert=FALSE, label_scale=1.8, triangle_ends=c(FALSE,FALSE)) #, subset=my.subset2) + #ColorBar(my.brks.var, cols=my.cols.var[2:(length(my.cols.var)-1)], vert=FALSE, label_scale=1.8, var_limits=c(-10,10), bar_limits=c(my.brks.var[1],my.brks.var[l(my.brks.var)]), col_inf=my.cols.var[1], col_sup=my.cols.var[length(my.cols.var)]) + + ColorBar(brks=my.brks.var[2:(l(my.brks.var)-1)], cols=my.cols.var[2:(l(my.cols.var)-1)], vert=FALSE, label_scale=1.8, bar_limits=c(my.brks.var[2],my.brks.var[l(my.brks.var)-1]), col_inf=my.cols.var[1], col_sup=my.cols.var[l(my.cols.var)], subsample=1) + + par(fig=c(0.34, 0.37, 0, 0.028), new=TRUE) + mtext("m/s", cex=1.8) + + par(fig=c(0.37, 1, 0.1, 0.98), new=TRUE) + + PlotEquiMap2(slpAnomPeriodMean, lon, lat, filled.continents=FALSE, continents.col="black", brks=my.brks.var2, brks2=my.brks.var2, cols=my.cols.var2, intxlon=10, intylat=10, drawleg=F, contours=t(slpAnomPeriodMean), contours.lty="F1FF1F", cex.lab=1) + + # you could almost use the normal PlotEquiMap funcion below, it only needs to set the black line for anomaly=0 and decrease the size of the contour labels: + # PlotEquiMap(rescale(slpAnomPeriodMean[,EU], my.brks.var2[1], tail(my.brks.var2,1)), lon[EU], lat, filled.continents=FALSE, brks=my.brks.var2, brks2=my.brks.var2, cols=my.cols.var2, intxlon=10, intylat=10, drawleg=F, contours=(slpAnomPeriodMean[,EU]), contour_lty="F1FF1F", contour_label_scale=10, title_scale=1.2) #, cex.lab=1.5) + + ##my.subset2 <- match(values.to.plot2, my.brks.var) + #legend2.cex <- 1.8 + + par(fig=c(0.37, 1, 0, 0.09), new=TRUE) + #ColorBar2(my.brks.var2, cols=my.cols.var2, vert=FALSE, my.ticks=-0.5 + 1:length(my.brks.var2), my.labels=my.brks.var2) #, subsampleg=1, label_scale=1.8) #, subset=my.subset2) + ColorBar(my.brks.var2[2:(l(my.brks.var2)-1)], cols=my.cols.var2[2:(l(my.cols.var2)-1)], vert=FALSE, label_scale=1.8, bar_limits=c(my.brks.var2[2],my.brks.var2[l(my.brks.var2)-1]), col_inf=my.cols.var2[1], col_sup=my.cols.var2[l(my.cols.var2)], subsample=1) #my.ticks=-0.5 + 1:length(my.brks.var2), my.labels=my.brks.var2) subset=my.subset2) + + par(fig=c(0.96, 0.99, 0, 0.027), new=TRUE) + mtext("hPa", cex=1.8) + + #par(fig=c(0.74, 0.76, 0, 0.027), new=TRUE) + #mtext("0", cex=1.8) + + dev.off() + + # same image formatted for the catalogue: + fileoutput2 <- paste0(rean.dir,"/",rean.name,"_",my.period[month.test],"_monthly_anomaly_fig2cat.png") + + system(paste0("~/scripts/fig2catalog.sh -s 0.8 -t '",rean.name," / 10m wind speed and sea level pressure / Monthly anomalies \n",month.name[month.test]," / ",year.test,"' -c 'Region: North Atlantic (27°N-81°N, 85.5°W-45°E)\nReference dataset: ",rean.name," reanalysis' ", fileoutput," ", fileoutput2)) + + + # mean wind speed and slp anomaly only during days of the chosen year: + load(paste0(rean.dir,"/",rean.name,"_",my.period[month.test],"_psl.RData")) # load cluster.sequence + + # arrange the regime sequence in an array, to separate months from years: + cluster2D <- array(cluster.sequence, c(n.days.in.a.period(month.test,1),n.years)) + #cluster2D <- array(my.cluster$cluster, c(n.days.in.a.period(p,1),n.years)) # only for NCEP + + cluster.test <- cluster2D[,pos.year.test] + + fre1.days <- length(which(cluster.test == 1)) + fre2.days <- length(which(cluster.test == 2)) + fre3.days <- length(which(cluster.test == 3)) + fre4.days <- length(which(cluster.test == 4)) + + # wind anomaly for the chosen year and month and cluster: + sfcWind1 <- sfcWindAnomPeriod[which(cluster.test == 1),,,drop=FALSE] + sfcWind2 <- sfcWindAnomPeriod[which(cluster.test == 2),,,drop=FALSE] + sfcWind3 <- sfcWindAnomPeriod[which(cluster.test == 3),,,drop=FALSE] + sfcWind4 <- sfcWindAnomPeriod[which(cluster.test == 4),,,drop=FALSE] + + # in case a regime has NO days in that month, we must set it to NA: + if(length(which(cluster.test == 1)) == 0 ) sfcWind1 <- array(NA, c(1,dim(sfcWindAnomPeriod)[2:3])) + if(length(which(cluster.test == 2)) == 0 ) sfcWind2 <- array(NA, c(1,dim(sfcWindAnomPeriod)[2:3])) + if(length(which(cluster.test == 3)) == 0 ) sfcWind3 <- array(NA, c(1,dim(sfcWindAnomPeriod)[2:3])) + if(length(which(cluster.test == 4)) == 0 ) sfcWind4 <- array(NA, c(1,dim(sfcWindAnomPeriod)[2:3])) + + sfcWindMean1 <- apply(sfcWind1, c(2,3), mean, na.rm=FALSE) + sfcWindMean2 <- apply(sfcWind2, c(2,3), mean, na.rm=FALSE) + sfcWindMean3 <- apply(sfcWind3, c(2,3), mean, na.rm=FALSE) + sfcWindMean4 <- apply(sfcWind4, c(2,3), mean, na.rm=FALSE) + + # load regime names: + load(paste0(rean.dir,"/",rean.name,"_", my.period[p],"_","ClusterNames",".RData")) + + ## add strip with daily sequence of WRs: + + mod.name1 <- substr(cluster1.name, nchar(cluster1.name), nchar(cluster1.name)) + mod.name2 <- substr(cluster2.name, nchar(cluster2.name), nchar(cluster2.name)) + mod.name3 <- substr(cluster3.name, nchar(cluster3.name), nchar(cluster3.name)) + mod.name4 <- substr(cluster4.name, nchar(cluster4.name), nchar(cluster4.name)) + + cluster1.name.short <- substr(cluster1.name,1,1) + cluster2.name.short <- substr(cluster2.name,1,1) + cluster3.name.short <- substr(cluster3.name,1,1) + cluster4.name.short <- substr(cluster4.name,1,1) + + ## add + or - at the end of the cluster name, if it is a NAO+ or NAO- regime: + if(mod.name1 == "+" || mod.name1 == "-") cluster1.name.short <- paste0(substr(cluster1.name,1,1), mod.name1) + if(mod.name2 == "+" || mod.name2 == "-") cluster2.name.short <- paste0(substr(cluster2.name,1,1), mod.name2) + if(mod.name3 == "+" || mod.name3 == "-") cluster3.name.short <- paste0(substr(cluster3.name,1,1), mod.name3) + if(mod.name4 == "+" || mod.name4 == "-") cluster4.name.short <- paste0(substr(cluster4.name,1,1), mod.name4) + + c1 <- which(cluster.test == 1) + c2 <- which(cluster.test == 2) + c3 <- which(cluster.test == 3) + c4 <- which(cluster.test == 4) + + cluster.test.letters <- cluster.test + cluster.test.letters[c1] <- cluster1.name.short + cluster.test.letters[c2] <- cluster2.name.short + cluster.test.letters[c3] <- cluster3.name.short + cluster.test.letters[c4] <- cluster4.name.short + + cluster.col <- cluster.test.letters + cluster.col[which(cluster.test.letters == "N+")] <- "Firebrick1" + cluster.col[which(cluster.test.letters == "N-")] <- "Dodgerblue1" + cluster.col[which(cluster.test.letters == "B")] <- "White" + cluster.col[which(cluster.test.letters == "A")] <- "Darkgoldenrod1" + + cluster1 <- which(orden == cluster1.name) + cluster2 <- which(orden == cluster2.name) + cluster3 <- which(orden == cluster3.name) + cluster4 <- which(orden == cluster4.name) + + assign(paste0("fre.days",cluster1), fre1.days) + assign(paste0("fre.days",cluster2), fre2.days) + assign(paste0("fre.days",cluster3), fre3.days) + assign(paste0("fre.days",cluster4), fre4.days) + + assign(paste0("fre",cluster1), wr1y) + assign(paste0("fre",cluster2), wr2y) + assign(paste0("fre",cluster3), wr3y) + assign(paste0("fre",cluster4), wr4y) + + assign(paste0("imp.test",cluster1), sfcWindMean1) + assign(paste0("imp.test",cluster2), sfcWindMean2) + assign(paste0("imp.test",cluster3), sfcWindMean3) + assign(paste0("imp.test",cluster4), sfcWindMean4) + + # psl anomaly for the chosen year and month and cluster: + slp1 <- slpAnomPeriod[which(cluster.test == 1),,,drop=FALSE] + slp2 <- slpAnomPeriod[which(cluster.test == 2),,,drop=FALSE] + slp3 <- slpAnomPeriod[which(cluster.test == 3),,,drop=FALSE] + slp4 <- slpAnomPeriod[which(cluster.test == 4),,,drop=FALSE] + + # in case a regime has NO days in that month, we must set it to NA: + if(length(which(cluster.test == 1)) == 0 ) slp1 <- array(NA, c(1,dim(slpAnomPeriod)[2:3])) + if(length(which(cluster.test == 2)) == 0 ) slp2 <- array(NA, c(1,dim(slpAnomPeriod)[2:3])) + if(length(which(cluster.test == 3)) == 0 ) slp3 <- array(NA, c(1,dim(slpAnomPeriod)[2:3])) + if(length(which(cluster.test == 4)) == 0 ) slp4 <- array(NA, c(1,dim(slpAnomPeriod)[2:3])) + + slpMean1 <- apply(slp1, c(2,3), mean, na.rm=FALSE) + slpMean2 <- apply(slp2, c(2,3), mean, na.rm=FALSE) + slpMean3 <- apply(slp3, c(2,3), mean, na.rm=FALSE) + slpMean4 <- apply(slp4, c(2,3), mean, na.rm=FALSE) + + assign(paste0("psl.test",cluster1), slpMean1) + assign(paste0("psl.test",cluster2), slpMean2) + assign(paste0("psl.test",cluster3), slpMean3) + assign(paste0("psl.test",cluster4), slpMean4) + + # save strip with the daily regime series for chosen month and year: + fileoutput.seq <- paste0(rean.dir,"/",rean.name,"_",my.period[month.test],"_regimes_sequence.png") + png(filename=fileoutput.seq,width=1500,height=1850) + + plot.new() + + sep <- 0.03 + for(day in 1: n.days.in.a.period(p, 2001)){ + sep.cum <- (day-1)*sep + polygon(c(sep.cum + 0.01, sep.cum + 0.01 + sep, sep.cum + 0.01 + sep, sep.cum + 0.01), c(1.01, 1.01, 1.01+sep, 1.01+sep), border="black", col=cluster.col[day]) + text(sep.cum + 0.01 + sep/2, 0.997 + sep + 0.005, labels=day, cex=1.5) + text(sep.cum + 0.01 + sep/2, 1.013 + 0.005, labels=cluster.test.letters[day], cex=2) + + } + + dev.off() + + + + # save average impact and sea level pressure only for chosen month and year: + fileoutput.test <- paste0(rean.dir,"/",rean.name,"_",my.period[month.test],"_monthly_anomaly_regimes.png") + + png(filename=fileoutput.test,width=1500,height=2000) + + plot.new() + + par(fig=c(0, 0.33, 0.77, 0.97), new=TRUE) + PlotEquiMap2(imp.test1[,EU], lon[EU], lat, filled.continents=FALSE, continents.col="black", brks=my.brks.var, cols=my.cols.var, cex.lab=1.2, intxlon=10, intylat=10, drawleg=F) + par(fig=c(0, 0.33, 0.54, 0.74), new=TRUE) + PlotEquiMap2(imp.test2[,EU], lon[EU], lat, filled.continents=FALSE, continents.col="black", brks=my.brks.var, cols=my.cols.var, cex.lab=1.2, intxlon=10, intylat=10, drawleg=F) + par(fig=c(0, 0.33, 0.31, 0.51), new=TRUE) + PlotEquiMap2(imp.test3[,EU], lon[EU], lat, filled.continents=FALSE, continents.col="black", brks=my.brks.var, cols=my.cols.var, cex.lab=1.2, intxlon=10, intylat=10, drawleg=F) + par(fig=c(0, 0.33, 0.08, 0.28), new=TRUE) + PlotEquiMap2(imp.test4[,EU], lon[EU], lat, filled.continents=FALSE, continents.col="black", brks=my.brks.var, cols=my.cols.var, cex.lab=1.2, intxlon=10, intylat=10, drawleg=F) + + if(no.regimes) { regime.title <- paste0("Cluster",1:4)} else { regime.title <- orden} + + par(fig=c(0,0.33,0.955,0.975), new=TRUE) + mtext(paste0(regime.title[1], " wind speed anomaly "), font=2, cex=2) + par(fig=c(0,0.33,0.725,0.745), new=TRUE) + mtext(paste0(regime.title[2], " wind speed anomaly "), font=2, cex=2) + par(fig=c(0,0.33,0.495,0.515), new=TRUE) + mtext(paste0(regime.title[3], " wind speed anomaly "), font=2, cex=2) + par(fig=c(0,0.33,0.265,0.285), new=TRUE) + mtext(paste0(regime.title[4], " wind speed anomaly "), font=2, cex=2) + + par(fig=c(0.03, 0.33, 0.015, 0.06), new=TRUE) + #ColorBar(my.brks.var, cols=my.cols.var, vert=FALSE, label_scale=1.8, triangle_ends=c(FALSE,FALSE)) #, subset=my.subset2) + ColorBar(my.brks.var[2:(length(my.brks.var)-1)], cols=my.cols.var[2:(length(my.cols.var)-1)], vert=FALSE, label_scale=1.8, bar_limits=c(my.brks.var[2],my.brks.var[l(my.brks.var)-1]), col_inf=my.cols.var[1], col_sup=my.cols.var[l(my.cols.var)], subsample=1) #triangle_ends=c(T,T)) #, subset=my.subset2) + + par(fig=c(0.33, 0.34, 0.01, 0.044), new=TRUE) + mtext("m/s", cex=1.6) + + # right figures: + par(fig=c(0.34, 0.92, 0.77, 0.97), new=TRUE) + PlotEquiMap2(psl.test1, lon, lat, filled.continents=FALSE, continents.col="black", brks=my.brks.var2, brks2=my.brks.var2, cols=my.cols.var2, intxlon=10, intylat=10, drawleg=F, contours=t(psl.test1), contours.lty="F1FF1F", cex.lab=1, xlabel.dist=.5) + par(fig=c(0.34, 0.92, 0.54, 0.74), new=TRUE) + PlotEquiMap2(psl.test2, lon, lat, filled.continents=FALSE, continents.col="black", brks=my.brks.var2, brks2=my.brks.var2, cols=my.cols.var2, intxlon=10, intylat=10, drawleg=F, contours=t(psl.test2), contours.lty="F1FF1F", cex.lab=1, xlabel.dist=.5) + par(fig=c(0.34, 0.92, 0.31, 0.51), new=TRUE) + PlotEquiMap2(psl.test3, lon, lat, filled.continents=FALSE, continents.col="black", brks=my.brks.var2, brks2=my.brks.var2, cols=my.cols.var2, intxlon=10, intylat=10, drawleg=F, contours=t(psl.test3), contours.lty="F1FF1F", cex.lab=1, xlabel.dist=.5) + par(fig=c(0.34, 0.92, 0.08, 0.28), new=TRUE) + PlotEquiMap2(psl.test4, lon, lat, filled.continents=FALSE, continents.col="black", brks=my.brks.var2, brks2=my.brks.var2, cols=my.cols.var2, intxlon=10, intylat=10, drawleg=F, contours=t(psl.test4), contours.lty="F1FF1F", cex.lab=1, xlabel.dist=.5) + + par(fig=c(0.34,0.92,0.955,0.975), new=TRUE) + mtext(paste0(regime.title[1], " sea level pressure anomaly "), font=2, cex=2) + par(fig=c(0.34,0.92,0.725,0.745), new=TRUE) + mtext(paste0(regime.title[2], " sea level pressure anomaly "), font=2, cex=2) + par(fig=c(0.34,0.92,0.495,0.515), new=TRUE) + mtext(paste0(regime.title[3], " sea level pressure anomaly "), font=2, cex=2) + par(fig=c(0.34,0.92,0.265,0.285), new=TRUE) + mtext(paste0(regime.title[4], " sea level pressure anomaly "), font=2, cex=2) + + par(fig=c(0.34, 0.93, 0.015, 0.06), new=TRUE) + #ColorBar2(my.brks.var2, cols=my.cols.var2, vert=FALSE, my.ticks=-0.5 + 1:length(my.brks.var2), my.labels=my.brks.var2) #, subsampleg=1, label_scale=1.8) #, subset=my.subset2) + ColorBar(my.brks.var2[2:(length(my.brks.var2)-1)], cols=my.cols.var2[2:(length(my.cols.var2)-1)], vert=FALSE, label_scale=1.8, bar_limits=c(my.brks.var2[2],my.brks.var2[l(my.brks.var2)-1]), col_inf=my.cols.var2[1], col_sup=my.cols.var2[l(my.cols.var2)], subsample=1) + + par(fig=c(0.924, 0.930, 0.01, 0.044), new=TRUE) + mtext("hPa", cex=1.6) + + #par(fig=c(0.627, 0.647, 0, 0.028), new=TRUE) + #mtext("0", cex=1.8) + + n.days <- floor(n.days.in.a.period(month.test,1)) + + par(fig=c(0.93, 0.99, 0.77, 0.87), new=TRUE) + mtext(paste0(fre.days1," days\n(",round(100*fre.days1/n.days,1),"%)"), cex=2.8) + par(fig=c(0.93, 0.99, 0.54, 0.64), new=TRUE) + mtext(paste0(fre.days2," days\n(",round(100*fre.days2/n.days,1),"%)"), cex=2.8) + par(fig=c(0.93, 0.99, 0.31, 0.41), new=TRUE) + mtext(paste0(fre.days3," days\n(",round(100*fre.days3/n.days,1),"%)"), cex=2.8) + par(fig=c(0.93, 0.99, 0.08, 0.18), new=TRUE) + mtext(paste0(fre.days4," days\n(",round(100*fre.days4/n.days,1),"%)"), cex=2.8) + + + dev.off() + + + ## add the strip with the regime sequence over the average impact composition: + fileoutput.temp <- paste0(rean.dir,"/",rean.name,"_",my.period[month.test],"_temp.png") + fileoutput.both <- paste0(rean.dir,"/",rean.name,"_",my.period[month.test],"_temp2.png") + + system(paste0("convert ",fileoutput.seq," -crop +0-1730 +repage ",fileoutput.temp)) # cut the lower part of the strip + system(paste0("montage ",fileoutput.temp," ",fileoutput.test," -tile 1x2 -geometry +0+0 ",fileoutput.both)) + + + ## same image formatted for the catalogue: + fileoutput2.test <- paste0(rean.dir,"/",rean.name,"_",my.period[month.test],"_monthly_anomaly_regimes_fig2cat.png") + + system(paste0("~/scripts/fig2catalog.sh -s 0.8 -m 20 -r 40 -t '",rean.name," / 10m wind speed and sea level pressure / Monthly regime anomalies \n",month.name[month.test]," / ",year.test,"' -c 'Region: North Atlantic (27°N-81°N, 85.5°W-45°E)\nReference dataset: ",rean.name," reanalysis' ", fileoutput.both," ", fileoutput2.test)) + + system(paste0("rm ", fileoutput.temp, " ", fileoutput.both," ", fileoutput.seq," ", fileoutput.test, " ", fileoutput)) + + + } # close for on year.test + } # close for on month.test + + +} # close for on monthly_anomalies + + + + + + + + + + diff --git a/old/weather_regimes_maps_v3.R b/old/weather_regimes_maps_v3.R new file mode 100644 index 0000000000000000000000000000000000000000..b6b951195a2b2961312020494aa9c53769a61980 --- /dev/null +++ b/old/weather_regimes_maps_v3.R @@ -0,0 +1,482 @@ + +# Creation: 6/2016 +# Author: Nicola Cortesi +# +# Aim: to visualize the regime anomalies of the weather regimes, their interannual frequencies and their impact on a chosen cliamte variable (i.e: wind speed or temperature) +# +# I/O: the only input file needed is the output of the weather_regimes.R script, which ends with the suffix "_mapdata.RData". +# Output files are the maps, with the user can generate as .png or as .pdf +# +# Assumptions: If your regimes derive from a reanalysis, this script must be run twice: once, with the option 'ordering = F' to create a .pdf or .png file that the user +# must open to find visually the order by which the four regimes are stored inside the four clusters. For example, he can find that the sequence of the images +# in the file (from top to down) corresponds to: Blocking / NAO- / Atl.Ridge / NAO+ regime. Subsequently, he has to change 'ordering' from F to T +# and set the four variables 'clusterX.name' (X=1..4) to follow the same order found inside that file. +# The second time, you have to run this script only to get the maps in the right order, which by default is, from top to down: +# "NAO+","NAO-","Blocking" and "Atl.Ridge" (you can change it with the variable 'orden'). +# During these first two runs, the variable 'save.names' must be set to TRUE, so an .RData file is written to store the order of the four regimes, in case +# in future a user needs to visualize these maps again. In this case, the user must set 'save.names= FALSE' and 'ordering=TRUE' to be able to visualize the maps +# in the correct order. +# +# If your regimes derive from a forecast system, you have to compute before the regimes derived from a reanalysis. Then, you can associate the reanalysis +# to the forecast system, to employ it to automatically associate to each simulated cluster one of the +# observed regimes, the one with the highest spatial correlation. Beware that the two maps of regime anomalies must have the same spatial resolution! +# +# Branch: weather_regimes + +library(s2dverification) # for the function Load() +library(Kendall) # for the MannKendall test + +source('/home/Earth/ncortesi/Downloads/scripts/Rfunctions.R') # for the calendar functions +workdir <- "/scratch/Earth/ncortesi/RESILIENCE/Regimes" # working dir with the input files with the WT classifications for each MSLP grid point + +rean.name <- "ERA-Interim" # reanalysis name (if input data comes from a reanalysis) +forecast.name <- "ECMWF-S4" # forecast system name (if input data comes from a forecast system) + +fields.name <- forecast.name # Choose between loading reanalysis ('rean.name') or forecasts ('forecast.name') +var.num <- 2 # if fields.name ='rean.name', Choose a variable for the impact maps 1: sfcWind 2: tas + +period <- 12 # 1:12 # Choose one or more periods to plot from 1 to 17 (1-12: Jan - Dec, 13-16: Winter, Spring, Summer, Autumn, 17: Year). + # You need to have created before the '_mapdata.RData' file with the output of 'weather_regimes'.R for that period. +lead.month <- 0 # in case you selected 'fields.name = forecast.name', specify a leadtime (0 = same month of the start month) to plot + # (and variable 'period' becomes the start month) + +# run it twice: once, with ordering <- FALSE to look only at the cartography and find visually the right regime names of the four clusters; +# and the second time, to save the ordered cartography: +ordering <- T # If TRUE, order the clusters following the regimes listed in the 'orden' vector (after you manually insert the names below). +as.pdf <- T # FALSE: save results as one .png file for each season, TRUE: save only 1 .pdf file with all seasons +save.names <- F # if TRUE, also save the cluster names (i.e: the association between clusters and weather regimes); if FALSE, the cluster names are loaded from a file + +rean.dir <- "20) as 18) but for monthly WRs" # if fields.name='forecast.name', subdir of 'workdir' where the weather regimes computed with a reanalysis are stored. + +# Associates the regimes to the four cluster: +cluster3.name <- "NAO+" +cluster2.name <- "NAO-" +cluster1.name <- "Blocking" +cluster4.name <- "Atl.Ridge" + +# choose an order to give to the cartograpy, from top to bottom: +orden <- c("NAO+","NAO-","Blocking","Atl.Ridge") + +regime1.name <- orden[1] +regime2.name <- orden[2] +regime3.name <- orden[3] +regime4.name <- orden[4] + +var.name <- c("sfcWind","tas") +var.num.orig <- var.num # loading _mapadata files this value can change! +fields.name.orig <- fields.name + +if(as.pdf && fields.name==rean.name)(pdf(file=paste0(workdir,"/",fields.name,"_",var.name[var.num],"_",my.period[p],".pdf"),width=40, height=60)) +if(as.pdf && fields.name==forecast.name)(pdf(file=paste0(workdir,"/",fields.name,"_",var.name[var.num],"_",my.period[p],"_leadmonth_",lead.month,".pdf"),width=40,height=60)) + +for(p in period){ + # load regime data (for forecasts, it load only the data correpsonding to the chosen start month p and lead month 'lead.month':: + if(fields.name == rean.name) load(file=paste0(workdir,"/",fields.name,"_",var.name[var.num],"_",my.period[p],"_mapdata.RData")) + if(fields.name == forecast.name) load(file=paste0(workdir,"/",fields.name,"_",var.name[var.num],"_",my.period[p],"_leadmonth_",lead.month,"_mapdata.RData")) + if(var.num != var.num.orig) var.num <- var.num.orig # ripristinate original values of this script (necessary after loading regime data) + if(fields.name != fields.name.orig) fields.name <- fields.name.orig # ripristinate original values of this script + + if(fields.name == forecast.name){ + # compute correlations between simulated clusters and observed regime's anomalies: + cluster1.sim <- pslwr1mean; cluster2.sim <- pslwr2mean; cluster3.sim <- pslwr3mean; cluster4.sim <- pslwr4mean + lat.forecast <- lat; lon.forecast <- lon + + load(file=paste0(workdir,"/",rean.dir,"/",rean.name,"_",var.name[var.num],"_",my.period[p],"_mapdata.RData")) # load also reanalysis data, which overwrities the pslwrXmean variables + if((p + lead.month) > 12) { load.month <- p + lead.month - 12 } else { load.month <- p + lead.month } # reanalysis forecast month to load + load(paste0(workdir,"/",rean.dir,"/",rean.name,"_", my.period[load.month],"_ClusterNames.RData")) # load also reanalysis regime names + + regimes.obs <- c(cluster1.name.period[p], cluster2.name.period[p], cluster3.name.period[p], cluster4.name.period[p]) # to be replaced soon by c(cluster1.name, cluster2.name, ...) + + if(identical(rev(lat),lat.forecast)) {lat.forecast <- rev(lat.forecast); print("Warning: forecast latitudes are in the opposite order than renalysis's")} + if(!identical(lat, lat.forecast)) stop("forecast latitudes are different from reanalysis latitudes!") + if(!identical(lon, lon.forecast)) stop("forecast longitude are different from reanalysis longitudes!") + + cluster.corr <- array(NA, c(4,4), dimnames=list(c("cluster1.sim","cluster2.sim","cluster3.sim","cluster4.sim"), regimes.obs)) + cluster.corr[1,1] <- cor(as.vector(cluster1.sim), as.vector(pslwr1mean)) + cluster.corr[1,2] <- cor(as.vector(cluster1.sim), as.vector(pslwr2mean)) + cluster.corr[1,3] <- cor(as.vector(cluster1.sim), as.vector(pslwr3mean)) + cluster.corr[1,4] <- cor(as.vector(cluster1.sim), as.vector(pslwr4mean)) + cluster.corr[2,1] <- cor(as.vector(cluster2.sim), as.vector(pslwr1mean)) + cluster.corr[2,2] <- cor(as.vector(cluster2.sim), as.vector(pslwr2mean)) + cluster.corr[2,3] <- cor(as.vector(cluster2.sim), as.vector(pslwr3mean)) + cluster.corr[2,4] <- cor(as.vector(cluster2.sim), as.vector(pslwr4mean)) + cluster.corr[3,1] <- cor(as.vector(cluster3.sim), as.vector(pslwr1mean)) + cluster.corr[3,2] <- cor(as.vector(cluster3.sim), as.vector(pslwr2mean)) + cluster.corr[3,3] <- cor(as.vector(cluster3.sim), as.vector(pslwr3mean)) + cluster.corr[3,4] <- cor(as.vector(cluster3.sim), as.vector(pslwr4mean)) + cluster.corr[4,1] <- cor(as.vector(cluster4.sim), as.vector(pslwr1mean)) + cluster.corr[4,2] <- cor(as.vector(cluster4.sim), as.vector(pslwr2mean)) + cluster.corr[4,3] <- cor(as.vector(cluster4.sim), as.vector(pslwr3mean)) + cluster.corr[4,4] <- cor(as.vector(cluster4.sim), as.vector(pslwr4mean)) + + cluster1.regime <- unname(which(cluster.corr[1,] == max(cluster.corr[1,]))) + cluster2.regime <- unname(which(cluster.corr[2,] == max(cluster.corr[2,]))) + cluster3.regime <- unname(which(cluster.corr[3,] == max(cluster.corr[3,]))) + cluster4.regime <- unname(which(cluster.corr[4,] == max(cluster.corr[4]))) + + # associate each simulated cluster to one observed regime: + cluster1.name <- regimes.obs[cluster1.regime] + cluster2.name <- regimes.obs[cluster2.regime] + cluster3.name <- regimes.obs[cluster3.regime] + cluster4.name <- regimes.obs[cluster4.regime] + + if(var.num != var.num.orig) var.num <- var.num.orig # ripristinate original values of this script + if(fields.name != fields.name.orig) fields.name <- fields.name.orig # ripristinate original values of this script + load(file=paste0(workdir,"/",fields.name,"_",var.name[var.num],"_",my.period[p],"_leadmonth_",lead.month,"_mapdata.RData")) # reload forecast regime data + if(var.num != var.num.orig) var.num <- var.num.orig # ripristinate original values of this script + if(fields.name != fields.name.orig) fields.name <- fields.name.orig # ripristinate original values of this script + if(identical(rev(lat),lat.forecast)) lat <- rev(lat) # ripristinate right lat order + + } # close for on 'forecast.name' + + # position of long values of Europe only (without the Atlantic Sea and America): + #if(fields.name == "ERA-Interim") EU <- c(1:which(lon >= lon.max)[1], (length(lon)-30):length(lon)) # restrict area to continental europe only + EU <- c(1:which(lon >= lon.max)[1], (which(lon >= 337)[1]:length(lon))) # restrict area to continental europe only + + # breaks and colors of the geopotential fields: + if(psl == "g500"){ + #my.brks <- c(-300,-199:200,300) # % Mean anomaly of a WR + my.brks <- c(-300,seq(-200,200,10),300) # % Mean anomaly of a WR + my.brks2 <- c(-300,seq(-200,200,10),300) # % Mean anomaly of a WR for the contour lines + #my.cols <- colorRampPalette((brewer.pal(9,"PuBu")))(length(my.brks)-1) + values.to.plot <- c(-200, -100, 0, 100, 200) # values we want to show in the labels + } + + if(psl == "psl"){ + my.brks <- c(seq(-19,-1,2),0,seq(1,19,2)) # % Mean anomaly of a WR + # my.brks <- c(seq(-19,-1,2),0,seq(1,19,2)) # % Mean anomaly of a WR + + my.brks2 <- my.brks #c(seq(-20,20,2)) # % Mean anomaly of a WR for the contour lines + values.to.plot <- my.brks #c(-17,-15,-13,-11,-9,-7,-5,-3,-1,0,1,3,5,7,9,11,13,15,17) + } + + my.cols <- colorRampPalette(rev(brewer.pal(11,"RdBu")))(length(my.brks)-1) # blue--white--red colors + my.cols[floor(length(my.cols)/2)] <- my.cols[floor(length(my.cols)/2)+1] <- "white" + + # breaks and colors of the impact maps (valid both for Wind speed anomalies and Temperature anomalies): + #my.brks.var <- c(-20,seq(-10,-3,1),seq(-2.9,2.9,0.1),seq(3,10,1),20) # % Mean anomaly of a WR + #my.brks.var <- c(-20,-2,-1.5,-1,-0.5,-0.2,0.2,0.5,1,1.5,2,20) # % Mean anomaly of a WR + #my.brks.var <- c(-20,seq(-3,3,0.5),20) # % Mean anomaly of a WR + if(var.name[var.num] == "sfcWind") { + my.brks.var <- seq(-3,3,0.5) + values.to.plot2 <- c(-3,-2,-1,0,1,2,3) + } + if(var.name[var.num] == "tas") { + my.brks.var <- seq(-5,5,1) + values.to.plot2 <- my.brks.var #c(-5,-3,-1,0,1,3,5) + } + + #my.cols <- colorRampPalette(as.character(read.csv("/home/Earth/vtorralb/rgbhex.csv",header=F)[,1]))(length(my.brks)-1) # blue--yellow-red colors + my.cols.var <- colorRampPalette(rev(brewer.pal(11,"RdBu")))(length(my.brks.var)-1) # blue--white--red colors + #my.cols.var[floor(length(my.cols.var)/2)] <- my.cols.var[floor(length(my.cols.var)/2)+1] <- "white" + + if(save.names){ + #cluster1.name.period <- cluster2.name.period <- cluster3.name.period <- cluster4.name.period <- c() + + #cluster1.name.period[p] <- cluster1.name + #cluster2.name.period[p] <- cluster2.name + #cluster3.name.period[p] <- cluster3.name + #cluster4.name.period[p] <- cluster4.name + + #if(fields.name==rean.name) save(cluster1.name.period, cluster2.name.period, cluster3.name.period, cluster4.name.period, file=paste0(workdir,"/",fields.name,"_", var.name[var.num],"_",my.period[p],"_ClusterNames.RData")) + #if(fields.name==forecast.name) save(cluster1.name.period, cluster2.name.period, cluster3.name.period, cluster4.name.period, file=paste0(workdir,"/",fields.name,"_", var.name[var.num],"_",my.period[p],"_leadmonth_",lead.month,"_ClusterNames.RData")) + + if(fields.name == rean.name) save(cluster1.name, cluster2.name, cluster3.name, cluster4.name, + file=paste0(workdir,"/",fields.name,"_", my.period[p],"_ClusterNames.RData")) + #if(fields.name == forecast.name) save(cluster1.name, cluster2.name, cluster3.name, cluster4.name, cluster.corr, + # file=paste0(workdir,"/",fields.name,"_", my.period[p],"_leadmonth_",lead.month,"_ClusterNames.RData")) + + } else { # in this case, we load the cluster names from the file already saved, if regimes come from a reanalysis: + if(fields.name == rean.name) load(paste0(workdir,"/",fields.name,"_", my.period[p],"_ClusterNames.RData")) + #if(fields.name == forecast.name && auto==F) load(paste0(workdir,"/",fields.name,"_", my.period[p],"_leadmonth_",lead.month,"_ClusterNames.RData")) + + if(var.num != var.num.orig) var.num <- var.num.orig # ripristinate original values of this script + if(fields.name != fields.name.orig) fields.name <- fields.name.orig # ripristinate original values of this script + + #cluster1.name <- cluster1.name.period[p] + #cluster2.name <- cluster2.name.period[p] + #cluster3.name <- cluster3.name.period[p] + #cluster4.name <- cluster4.name.period[p] + } + + # assign to each cluster its regime: + if(ordering == FALSE){ + cluster1 <- 1; cluster2 <- 2; cluster3 <- 3; cluster4 <- 4 + } else { + cluster1 <- which(orden == cluster1.name) + cluster2 <- which(orden == cluster2.name) + cluster3 <- which(orden == cluster3.name) + cluster4 <- which(orden == cluster4.name) + } + + assign(paste0("map",cluster1), pslwr1mean) + assign(paste0("map",cluster2), pslwr2mean) + assign(paste0("map",cluster3), pslwr3mean) + assign(paste0("map",cluster4), pslwr4mean) + + assign(paste0("fre",cluster1), wr1y) + assign(paste0("fre",cluster2), wr2y) + assign(paste0("fre",cluster3), wr3y) + assign(paste0("fre",cluster4), wr4y) + + if(fields.name == rean.name){ + assign(paste0("imp",cluster1), varPeriodAnom1mean) + assign(paste0("imp",cluster2), varPeriodAnom2mean) + assign(paste0("imp",cluster3), varPeriodAnom3mean) + assign(paste0("imp",cluster4), varPeriodAnom4mean) + + assign(paste0("sig",cluster1), pvalue1) + assign(paste0("sig",cluster2), pvalue2) + assign(paste0("sig",cluster3), pvalue3) + assign(paste0("sig",cluster4), pvalue4) + } + + # Visualize the composition with the 3 graphs for all the 4 regimes: its pressure fields, its impact on var and its frequency series: + if(!as.pdf && fields.name==rean.name) png(filename=paste0(workdir,"/",fields.name,"_",var.name[var.num],"_",my.period[p],".png"),width=3000,height=3700) + if(!as.pdf && fields.name==forecast.name) png(filename=paste0(workdir,"/",fields.name,"_",var.name[var.num],"_",my.period[p],"_leadmonth_",lead.month,".png"),width=3000,height=3700) + + plot.new() + + # Sheet title: + par(fig=c(0, 1, 0.94, 0.98), new=TRUE) + + if(fields.name == forecast.name) {forecast.title <- paste0(" startdate and ",lead.month," forecast month ")} else {forecast.title <- ""} + mtext(paste0(fields.name, " Weather Regimes for ", my.period[p], forecast.title," (",year.start,"-",year.end,")"), cex=5.5, font=2) + + # Centroid maps: + map.xpos <- 0 + map.width <- 0.46 + par(fig=c(map.xpos + 0.003, map.xpos + map.width - 0.003, 0.745, 0.925), new=TRUE) + PlotEquiMap2(rescale(map1,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map1), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, xlabel.dist=1.5, contours.lty="F1FF1F") + par(fig=c(map.xpos, map.xpos + map.width, 0.51, 0.69), new=TRUE) + PlotEquiMap2(rescale(map2,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map2), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F") + par(fig=c(map.xpos, map.xpos + map.width, 0.275, 0.455), new=TRUE) + PlotEquiMap2(rescale(map3,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map3), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F") + par(fig=c(map.xpos, map.xpos + map.width, 0.04, 0.22), new=TRUE) + PlotEquiMap2(rescale(map4,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map4), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F") + + # Title Centroid Maps: + map.title.xpos <- 0.76 + map.title.width <- 0.2 + par(fig=c(map.title.xpos, map.title.xpos + map.title.width, 0.728, 0.729), new=TRUE) + mtext("year", cex=3) + par(fig=c(map.title.xpos, map.title.xpos + map.title.width, 0.494, 0.495), new=TRUE) + mtext("year", cex=3) + par(fig=c(map.title.xpos, map.title.xpos + map.title.width, 0.260, 0.261), new=TRUE) + mtext("year", cex=3) + par(fig=c(map.title.xpos, map.title.xpos + map.title.width, 0.026, 0.027), new=TRUE) + mtext("year", cex=3) + + # Title 1: + title1.xpos <- 0.02 + title1.width <- 0.4 + par(fig=c(title1.xpos, title1.xpos + title1.width, 0.930, 0.935), new=TRUE) + mtext(paste0(regime1.name,": ", psl.name, " Anomaly"), font=2, cex=4) + par(fig=c(title1.xpos, title1.xpos + title1.width, 0.695, 0.700), new=TRUE) + mtext(paste0(regime2.name,": ", psl.name, " Anomaly"), font=2, cex=4) + par(fig=c(title1.xpos, title1.xpos + title1.width, 0.460, 0.465), new=TRUE) + mtext(paste0(regime3.name,": ", psl.name, " Anomaly"), font=2, cex=4) + par(fig=c(title1.xpos, title1.xpos + title1.width, 0.225, 0.230), new=TRUE) + mtext(paste0(regime4.name,": ", psl.name, " Anomaly"), font=2, cex=4) + + # Impact maps: + if(fields.name == rean.name){ + impact.xpos <- 0.47 + impact.width <- 0.26 + par(fig=c(impact.xpos, impact.xpos + impact.width, 0.745, 0.925), new=TRUE) + PlotEquiMap2(rescale(imp1[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=t(sig1[,EU] < 0.05), cex.lab=1.5) + par(fig=c(impact.xpos, impact.xpos + impact.width, 0.51, 0.69), new=TRUE) + PlotEquiMap2(rescale(imp2[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=t(sig2[,EU] < 0.05), cex.lab=1.5) + par(fig=c(impact.xpos, impact.xpos + impact.width, 0.275, 0.455), new=TRUE) + PlotEquiMap2(rescale(imp3[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=t(sig3[,EU] < 0.05), cex.lab=1.5) + par(fig=c(impact.xpos, impact.xpos + impact.width, 0.04, 0.22), new=TRUE) + PlotEquiMap2(rescale(imp4[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=t(sig4[,EU] < 0.05), cex.lab=1.5) + } + + # Frequency plots: + barplot.xpos <- 0.75 + barplot.width <- 0.25 + freq.max=80 + + if(fields.name == forecast.name){ + pos.years.present <- match(year.start:year.end, years) + years.missing <- which(is.na(pos.years.present)) + fre1.NA <- fre2.NA <- fre3.NA <- fre4.NA <- c() + + k <- 0 + for(y in year.start:year.end) { + if(y %in% (years.missing + year.start - 1)) { + fre1.NA <- c(fre1.NA,NA) + fre2.NA <- c(fre2.NA,NA) + fre3.NA <- c(fre3.NA,NA) + fre4.NA <- c(fre4.NA,NA) + k=k+1 + } else { + fre1.NA <- c(fre1.NA,fre1[y - year.start + 1 - k]) + fre2.NA <- c(fre2.NA,fre2[y - year.start + 1 - k]) + fre3.NA <- c(fre3.NA,fre3[y - year.start + 1 - k]) + fre4.NA <- c(fre4.NA,fre4[y - year.start + 1 - k]) + } + } + } else { fre1.NA <- fre1; fre2.NA <- fre2; fre3.NA <- fre3; fre4.NA <- fre4 } + + par(fig=c(barplot.xpos, barplot.xpos + barplot.width, 0.745, 0.915), new=TRUE) + if(fields.name == rean.name) barplot.freq(100*fre1.NA, year.start, year.end, cex.y=2, cex.x=2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0)) + if(fields.name == forecast.name) fre1.NA + par(fig=c(barplot.xpos, barplot.xpos + barplot.width,0.510,0.680), new=TRUE) + if(fields.name == rean.name) barplot.freq(100*fre2.NA, year.start, year.end, cex.y=2, cex.x=2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0)) + par(fig=c(barplot.xpos, barplot.xpos + barplot.width,0.275,0.445), new=TRUE) + if(fields.name == rean.name) barplot.freq(100*fre3.NA, year.start, year.end, cex.y=2, cex.x=2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0)) + par(fig=c(barplot.xpos, barplot.xpos + barplot.width,0.040,0.210), new=TRUE) + if(fields.name == rean.name) barplot.freq(100*fre4.NA, year.start, year.end, cex.y=2, cex.x=2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0)) + + # % on Frequency plots: + symbol.xpos <- 0.735 + symbol.width <- 0.01 + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.83, 0.84), new=TRUE) + mtext("%", cex=3.3) + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.60, 0.61), new=TRUE) + mtext("%", cex=3.3) + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.36, 0.37), new=TRUE) + mtext("%", cex=3.3) + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.13, 0.14), new=TRUE) + mtext("%", cex=3.3) + + + # Title 2: + if(fields.name == rean.name){ + title2.xpos <- 0.42 + title2.width <- 0.35 + par(fig=c(title2.xpos, title2.xpos + title2.width, 0.930, 0.935), new=TRUE) + mtext(paste0(regime1.name, ": Impact on ", var.name.full[var.num]), font=2, cex=4) + par(fig=c(title2.xpos, title2.xpos + title2.width, 0.695, 0.700), new=TRUE) + mtext(paste0(regime2.name, ": Impact on ", var.name.full[var.num]), font=2, cex=4) + par(fig=c(title2.xpos, title2.xpos + title2.width, 0.460, 0.465), new=TRUE) + mtext(paste0(regime3.name, ": Impact on ", var.name.full[var.num]), font=2, cex=4) + par(fig=c(title2.xpos, title2.xpos + title2.width, 0.225, 0.230), new=TRUE) + mtext(paste0(regime4.name, ": Impact on ", var.name.full[var.num]), font=2, cex=4) + } + + # Title 3: + title3.xpos <- 0.75 + title3.width <-0.25 + par(fig=c(title3.xpos, title3.xpos + title3.width, 0.930, 0.935), new=TRUE) + mtext(paste0(regime1.name, ": Frequency (", round(100*mean(fre1),1), "%)"), font=2, cex=4) + par(fig=c(title3.xpos, title3.xpos + title3.width, 0.695, 0.700), new=TRUE) + mtext(paste0(regime2.name, ": Frequency (", round(100*mean(fre2),1), "%)"), font=2, cex=4) + par(fig=c(title3.xpos, title3.xpos + title3.width, 0.460, 0.465), new=TRUE) + mtext(paste0(regime3.name, ": Frequency (", round(100*mean(fre3),1), "%)"), font=2, cex=4) + par(fig=c(title3.xpos, title3.xpos + title3.width, 0.225, 0.230), new=TRUE) + mtext(paste0(regime4.name, ": Frequency (", round(100*mean(fre4),1), "%)"), font=2, cex=4) + + # Legend 1: + legend1.xpos <- 0.01 + legend1.width <- 0.44 + legend1.cex <- 1.8 + my.subset <- match(values.to.plot, my.brks) + par(fig=c(legend1.xpos, legend1.xpos + legend1.width, 0.719, 0.744), new=TRUE) # syntax fig=c(xmin, xmax, ymin, ymax) + ColorBar3(my.brks, cols=my.cols, vert=FALSE, cex=legend1.cex, subset=my.subset) + if(psl=="g500") {mtext(side=4," m", cex=2.5)} else {mtext(side=4," hPa", cex=legend1.cex)} + par(fig=c(legend1.xpos, legend1.xpos + legend1.width, 0.485, 0.508), new=TRUE) + ColorBar3(my.brks, cols=my.cols, vert=FALSE, cex=legend1.cex, subset=my.subset) + if(psl=="g500") {mtext(side=4," m", cex=2.5)} else {mtext(side=4," hPa", cex=legend1.cex)} + par(fig=c(legend1.xpos, legend1.xpos + legend1.width, 0.250, 0.273), new=TRUE) + ColorBar3(my.brks, cols=my.cols, vert=FALSE, cex=legend1.cex, subset=my.subset) + if(psl=="g500") {mtext(side=4," m", cex=2.5)} else {mtext(side=4," hPa", cex=legend1.cex)} + par(fig=c(legend1.xpos, legend1.xpos + legend1.width, 0.015, 0.038), new=TRUE) + ColorBar3(my.brks, cols=my.cols, vert=FALSE, cex=legend1.cex, subset=my.subset) + if(psl=="g500") {mtext(side=4," m", cex=2.5)} else {mtext(side=4," hPa", cex=legend1.cex)} + + # Legend 2: + if(fields.name == rean.name){ + legend2.xpos <- 0.48 + legend2.width <- 0.25 + legend2.cex <- 1.8 + my.subset2 <- match(values.to.plot2, my.brks.var) + par(fig=c(legend2.xpos, legend2.xpos + legend2.width, 0.719 + 0.001, 0.744), new=TRUE) + ColorBar3(my.brks.var, cols=my.cols.var, vert=FALSE, cex=legend2.cex, subset=my.subset2) + mtext(side=4,paste0(" ",var.unit[var.num]), cex=legend2.cex) + par(fig=c(legend2.xpos, legend2.xpos + legend2.width, 0.485, 0.508), new=TRUE) + ColorBar3(my.brks.var, cols=my.cols.var, vert=FALSE, cex=legend2.cex, subset=my.subset2) + mtext(side=4,paste0(" ",var.unit[var.num]), cex=legend2.cex) + par(fig=c(legend2.xpos, legend2.xpos + legend2.width, 0.250, 0.273), new=TRUE) + ColorBar3(my.brks.var, cols=my.cols.var, vert=FALSE, cex=legend2.cex, subset=my.subset2) + mtext(side=4,paste0(" ",var.unit[var.num]), cex=legend2.cex) + par(fig=c(legend2.xpos, legend2.xpos + legend2.width, 0.015, 0.038), new=TRUE) + ColorBar3(my.brks.var, cols=my.cols.var, vert=FALSE, cex=legend2.cex, subset=my.subset2) + mtext(side=4,paste0(" ",var.unit[var.num]), cex=legend2.cex) + } + + if(!as.pdf) dev.off() # for saving 4 png + +} # close 'p' on 'period' + +if(as.pdf) dev.off() # for saving a single pdf with all months/seasons + + + + + + +# impact map of the stronger WR: +if(fields.name == rean.name){ + + var.num <- 2 # choose a variable (1:sfcWind, 2:tas) + + png(filename=paste0(workdir,"/",fields.name,"_",var.name[var.num],"_most_influent.png"), width=500, height=600) + + plot.new() + #par(mfrow=c(4,3)) + col.regimes <- c("indianred1","cyan","khaki","lightpink") + + for(p in 1:12){ + load(file=paste0(workdir,"/",fields.name,"_",var.name[var.num],"_",my.period[p],"_mapdata.RData")) + load(paste0(workdir,"/",fields.name,"_", my.period[p],"_ClusterNames.RData")) # they should not depend on var.name!!! + + cluster1 <- which(orden == cluster1.name.period[p]) # in future it will be == cluster1.name + cluster2 <- which(orden == cluster2.name.period[p]) + cluster3 <- which(orden == cluster3.name.period[p]) + cluster4 <- which(orden == cluster4.name.period[p]) + + assign(paste0("imp",cluster1), varPeriodAnom1mean) + assign(paste0("imp",cluster2), varPeriodAnom2mean) + assign(paste0("imp",cluster3), varPeriodAnom3mean) + assign(paste0("imp",cluster4), varPeriodAnom4mean) + + imp.all <- imp1 + for(i in 1:dim(imp1)[1]){ + for(j in 1:dim(imp1)[2]){ + if(abs(imp1[i,j]) >= max(abs(imp1[i,j]), abs(imp2[i,j]), abs(imp3[i,j]), abs(imp4[i,j]))) imp.all[i,j] <- 1.5 + if(abs(imp2[i,j]) >= max(abs(imp1[i,j]), abs(imp2[i,j]), abs(imp3[i,j]), abs(imp4[i,j]))) imp.all[i,j] <- 2.5 + if(abs(imp3[i,j]) >= max(abs(imp1[i,j]), abs(imp2[i,j]), abs(imp3[i,j]), abs(imp4[i,j]))) imp.all[i,j] <- 3.5 + if(abs(imp4[i,j]) >= max(abs(imp1[i,j]), abs(imp2[i,j]), abs(imp3[i,j]), abs(imp4[i,j]))) imp.all[i,j] <- 4.5 + } + } + + if(p<=3) yMod <- 0.7 + if(p>=4 && p<=6) yMod <- 0.5 + if(p>=7 && p<=9) yMod <- 0.3 + if(p>=10) yMod <- 0.1 + if(p==1 || p==4 || p==7 || p==10) xMod <- 0.05 + if(p==2 || p==5 || p==8 || p==11) xMod <- 0.35 + if(p==3 || p==6 || p==9 || p==12) xMod <- 0.65 + + # impact map: + par(fig=c(xMod, xMod + 0.3, yMod, yMod + 0.17), new=TRUE) + PlotEquiMap(imp.all[,EU], lon[EU], lat, filled.continents = FALSE, brks=1:5, cols=col.regimes, axelab=F, drawleg=F) + + # title map: + par(fig=c(xMod, xMod + 0.3, yMod + 0.162, yMod + 0.172), new=TRUE) + mtext(my.period[p], font=2, cex=.9) + + } # close 'p' on 'period' + + par(fig=c(0.1,0.9, 0, 0.11), new=TRUE) + ColorBar2(1:5, cols=col.regimes, vert=FALSE, my.ticks=c(1,2,3,4), my.labels=orden) + + par(fig=c(0.1,0.9, 0.9, 0.95), new=TRUE) + mtext(paste0("Most influent WR on ",var.name[var.num]), font=2, cex=1.5) + + dev.off() +} + diff --git a/old/weather_regimes_maps_v3.R~ b/old/weather_regimes_maps_v3.R~ new file mode 100644 index 0000000000000000000000000000000000000000..95a0747404028cf1ae99ee186d6e1a1942286e92 --- /dev/null +++ b/old/weather_regimes_maps_v3.R~ @@ -0,0 +1,479 @@ + +# Creation: 6/2016 +# Author: Nicola Cortesi +# Aim: to visualize the regime anomalies of the weather regimes, their interannual frequencies and their impact on a chosen cliamte variable (i.e: wind speed or temperature) +# I/O: the only input file needed is the output of the weather_regimes.R script, which ends with the suffix "_mapdata.RData". +# Output files are the maps, with the user can generate as .png or as .pdf +# Assumptions: this script must be run twice: once, with the option 'ordering = F' to create a .pdf or .png file that the user must open to find visually the order by which +# the four regimes are stored inside the four clusters. For example, he can find that the sequence of the images in the file (from top to down) corresponds to: +# Blocking / NAO- / Atl.Ridge / NAO+ regime. Subsequently, he has to change 'ordering' from F to T and set the four variables 'clusterX.name' (X=1..4) +# to follow the same order found inside that file. +# The second time, you have to run this script only to get the maps in the right order, which by default is, from top to down: +# "NAO+","NAO-","Blocking" and "Atl.Ridge" (you can change it with the variable 'orden'). +# During these first two runs, the variable 'save.names' must be set to TRUE, so an .RData file is written to store the order of the four regimes, in case +# in future a user needs to visualize these maps again. In this case, the user must set 'save.names= FALSE' and 'ordering=TRUE' to be able to visualize the maps +# in the correct order. +# +# If you run this script with a forecast system, and you already computed the regimes for a reanalysis, you can also choose to associate +# to the forecast system a reanalysis, to employ to automatically associate the regimes to the four clusters, by associating each simulated cluster to one of the +# observed regime, the one with the highest spatial correlation. This option is possible only if the two maps of regime anomalies have the same resolution. +# +# Branch: weather_regimes + +library(s2dverification) # for the function Load() +library(Kendall) # for the MannKendall test + +source('/home/Earth/ncortesi/Downloads/scripts/Rfunctions.R') # for the calendar functions +workdir <- "/scratch/Earth/ncortesi/RESILIENCE/Regimes" # working dir with the input files with the WT classifications for each MSLP grid point + +rean.name <- "ERA-Interim" # reanalysis name (if input data comes from a reanalysis) +forecast.name <- "ECMWF-S4" # forecast system name (if input data comes from a forecast system) + +fields.name <- forecast.name # Choose between loading reanalysis ('rean.name') or forecasts ('forecast.name') +var.num <- 2 # if fields.name ='rean.name', Choose a variable for the impact maps 1: sfcWind 2: tas + +period <- 12 # 1:12 # Choose one or more periods to plot from 1 to 17 (1-12: Jan - Dec, 13-16: Winter, Spring, Summer, Autumn, 17: Year). + # You need to have created before the '_mapdata.RData' file with the output of 'weather_regimes'.R for that period. +lead.month <- 0 # in case you selected 'fields.name = forecast.name', specify a leadtime (0 = same month of the start month) to plot + # (and variable 'period' becomes the start month) + +# run it twice: once, with ordering <- FALSE to look only at the cartography and find visually the right regime names of the four clusters; +# and the second time, to save the ordered cartography: +ordering <- T # If TRUE, order the clusters following the regimes listed in the 'orden' vector (after you manually insert the names below). +as.pdf <- T # FALSE: save results as one .png file for each season, TRUE: save only 1 .pdf file with all seasons +save.names <- F # if TRUE, also save the cluster names (i.e: the association between clusters and weather regimes); if FALSE, the cluster names are loaded from a file +auto <- T # if TRUE, it automatically associates a simulated cluster to the observed regime with the highest pattern correlation of the regime anomalies (see script header) + +rean.dir <- "20) as 18) but for monthly WRs" # if auto=T, subdir of 'workdir' where the weather regimes computed with a reanalysis are stored. + +# Associates the regimes to the four cluster: +cluster3.name <- "NAO+" +cluster2.name <- "NAO-" +cluster1.name <- "Blocking" +cluster4.name <- "Atl.Ridge" + +# choose an order to give to the cartography, from top to bottom: +orden <- c("NAO+","NAO-","Blocking","Atl.Ridge") + +regime1.name <- orden[1] +regime2.name <- orden[2] +regime3.name <- orden[3] +regime4.name <- orden[4] + +var.name <- c("sfcWind","tas") +var.num.orig <- var.num # loading _mapadata files this value can change! +fields.name.orig <- fields.name + +if(as.pdf && fields.name==rean.name)(pdf(file=paste0(workdir,"/",fields.name,"_",var.name[var.num],"_",my.period[p],".pdf"),width=40, height=60)) +if(as.pdf && fields.name==forecast.name)(pdf(file=paste0(workdir,"/",fields.name,"_",var.name[var.num],"_",my.period[p],"_leadmonth_",lead.month,".pdf"),width=40,height=60)) + +for(p in period){ + # load regime data (for forecasts, it load only the data correpsonding to the chosen start month p and lead month 'lead.month':: + if(fields.name == rean.name) load(file=paste0(workdir,"/",fields.name,"_",var.name[var.num],"_",my.period[p],"_mapdata.RData")) + if(fields.name == forecast.name) load(file=paste0(workdir,"/",fields.name,"_",var.name[var.num],"_",my.period[p],"_leadmonth_",lead.month,"_mapdata.RData")) + if(var.num != var.num.orig) var.num <- var.num.orig # ripristinate original values of this script (necessary after loading regime data) + if(fields.name != fields.name.orig) fields.name <- fields.name.orig # ripristinate original values of this script + + if(fields.name == forecast.name && auto == T){ + # compute correlations between simulated clusters and observed regime's anomalies: + cluster1.sim <- pslwr1mean; cluster2.sim <- pslwr2mean; cluster3.sim <- pslwr3mean; cluster4.sim <- pslwr4mean + lat.forecast <- lat; lon.forecast <- lon + + load(file=paste0(workdir,"/",rean.dir,"/",rean.name,"_",var.name[var.num],"_",my.period[p],"_mapdata.RData")) # load also reanalysis data, which overwrities the pslwrXmean variables + if((p + lead.month) > 12) { load.month <- p + lead.month - 12 } else { load.month <- p + lead.month } # reanalysis forecast month to load + load(paste0(workdir,"/",rean.dir,"/",rean.name,"_", my.period[load.month],"_ClusterNames.RData")) # load also reanalysis regime names + + regimes.obs <- c(cluster1.name.period[p], cluster2.name.period[p], cluster3.name.period[p], cluster4.name.period[p]) # to be replaced soon by c(cluster1.name, cluster2.name, ...) + + if(identical(rev(lat),lat.forecast)) {lat.forecast <- rev(lat.forecast); print("Warning: forecast latitudes are in the opposite order than renalysis's")} + if(!identical(lat, lat.forecast)) stop("forecast latitudes are different from reanalysis latitudes!") + if(!identical(lon, lon.forecast)) stop("forecast longitude are different from reanalysis longitudes!") + + cluster.corr <- array(NA, c(4,4), dimnames=list(c("cluster1.sim","cluster2.sim","cluster3.sim","cluster4.sim"), regimes.obs)) + cluster.corr[1,1] <- cor(as.vector(cluster1.sim), as.vector(pslwr1mean)) + cluster.corr[1,2] <- cor(as.vector(cluster1.sim), as.vector(pslwr2mean)) + cluster.corr[1,3] <- cor(as.vector(cluster1.sim), as.vector(pslwr3mean)) + cluster.corr[1,4] <- cor(as.vector(cluster1.sim), as.vector(pslwr4mean)) + cluster.corr[2,1] <- cor(as.vector(cluster2.sim), as.vector(pslwr1mean)) + cluster.corr[2,2] <- cor(as.vector(cluster2.sim), as.vector(pslwr2mean)) + cluster.corr[2,3] <- cor(as.vector(cluster2.sim), as.vector(pslwr3mean)) + cluster.corr[2,4] <- cor(as.vector(cluster2.sim), as.vector(pslwr4mean)) + cluster.corr[3,1] <- cor(as.vector(cluster3.sim), as.vector(pslwr1mean)) + cluster.corr[3,2] <- cor(as.vector(cluster3.sim), as.vector(pslwr2mean)) + cluster.corr[3,3] <- cor(as.vector(cluster3.sim), as.vector(pslwr3mean)) + cluster.corr[3,4] <- cor(as.vector(cluster3.sim), as.vector(pslwr4mean)) + cluster.corr[4,1] <- cor(as.vector(cluster4.sim), as.vector(pslwr1mean)) + cluster.corr[4,2] <- cor(as.vector(cluster4.sim), as.vector(pslwr2mean)) + cluster.corr[4,3] <- cor(as.vector(cluster4.sim), as.vector(pslwr3mean)) + cluster.corr[4,4] <- cor(as.vector(cluster4.sim), as.vector(pslwr4mean)) + + cluster1.regime <- unname(which(cluster.corr[1,] == max(cluster.corr[1,]))) + cluster2.regime <- unname(which(cluster.corr[2,] == max(cluster.corr[2,]))) + cluster3.regime <- unname(which(cluster.corr[3,] == max(cluster.corr[3,]))) + cluster4.regime <- unname(which(cluster.corr[4,] == max(cluster.corr[4]))) + + # associate each simulated cluster to one observed regime: + cluster1.name <- regimes.obs[cluster1.regime] + cluster2.name <- regimes.obs[cluster2.regime] + cluster3.name <- regimes.obs[cluster3.regime] + cluster4.name <- regimes.obs[cluster4.regime] + + if(var.num != var.num.orig) var.num <- var.num.orig # ripristinate original values of this script + if(fields.name != fields.name.orig) fields.name <- fields.name.orig # ripristinate original values of this script + load(file=paste0(workdir,"/",fields.name,"_",var.name[var.num],"_",my.period[p],"_leadmonth_",lead.month,"_mapdata.RData")) # reload forecast regime data + if(var.num != var.num.orig) var.num <- var.num.orig # ripristinate original values of this script + if(fields.name != fields.name.orig) fields.name <- fields.name.orig # ripristinate original values of this script + if(identical(rev(lat),lat.forecast)) lat <- rev(lat) # ripristinate right lat order + + } # close for on 'auto' + + # position of long values of Europe only (without the Atlantic Sea and America): + #if(fields.name == "ERA-Interim") EU <- c(1:which(lon >= lon.max)[1], (length(lon)-30):length(lon)) # restrict area to continental europe only + EU <- c(1:which(lon >= lon.max)[1], (which(lon >= 337)[1]:length(lon))) # restrict area to continental europe only + + # breaks and colors of the geopotential fields: + if(psl == "g500"){ + #my.brks <- c(-300,-199:200,300) # % Mean anomaly of a WR + my.brks <- c(-300,seq(-200,200,10),300) # % Mean anomaly of a WR + my.brks2 <- c(-300,seq(-200,200,10),300) # % Mean anomaly of a WR for the contour lines + #my.cols <- colorRampPalette((brewer.pal(9,"PuBu")))(length(my.brks)-1) + values.to.plot <- c(-200, -100, 0, 100, 200) # values we want to show in the labels + } + + if(psl == "psl"){ + my.brks <- c(seq(-19,-1,2),0,seq(1,19,2)) # % Mean anomaly of a WR + # my.brks <- c(seq(-19,-1,2),0,seq(1,19,2)) # % Mean anomaly of a WR + + my.brks2 <- my.brks #c(seq(-20,20,2)) # % Mean anomaly of a WR for the contour lines + values.to.plot <- my.brks #c(-17,-15,-13,-11,-9,-7,-5,-3,-1,0,1,3,5,7,9,11,13,15,17) + } + + my.cols <- colorRampPalette(rev(brewer.pal(11,"RdBu")))(length(my.brks)-1) # blue--white--red colors + my.cols[floor(length(my.cols)/2)] <- my.cols[floor(length(my.cols)/2)+1] <- "white" + + # breaks and colors of the impact maps (valid both for Wind speed anomalies and Temperature anomalies): + #my.brks.var <- c(-20,seq(-10,-3,1),seq(-2.9,2.9,0.1),seq(3,10,1),20) # % Mean anomaly of a WR + #my.brks.var <- c(-20,-2,-1.5,-1,-0.5,-0.2,0.2,0.5,1,1.5,2,20) # % Mean anomaly of a WR + #my.brks.var <- c(-20,seq(-3,3,0.5),20) # % Mean anomaly of a WR + if(var.name[var.num] == "sfcWind") { + my.brks.var <- seq(-3,3,0.5) + values.to.plot2 <- c(-3,-2,-1,0,1,2,3) + } + if(var.name[var.num] == "tas") { + my.brks.var <- seq(-5,5,1) + values.to.plot2 <- my.brks.var #c(-5,-3,-1,0,1,3,5) + } + + #my.cols <- colorRampPalette(as.character(read.csv("/home/Earth/vtorralb/rgbhex.csv",header=F)[,1]))(length(my.brks)-1) # blue--yellow-red colors + my.cols.var <- colorRampPalette(rev(brewer.pal(11,"RdBu")))(length(my.brks.var)-1) # blue--white--red colors + #my.cols.var[floor(length(my.cols.var)/2)] <- my.cols.var[floor(length(my.cols.var)/2)+1] <- "white" + + if(save.names){ + #cluster1.name.period <- cluster2.name.period <- cluster3.name.period <- cluster4.name.period <- c() + + #cluster1.name.period[p] <- cluster1.name + #cluster2.name.period[p] <- cluster2.name + #cluster3.name.period[p] <- cluster3.name + #cluster4.name.period[p] <- cluster4.name + + #if(fields.name==rean.name) save(cluster1.name.period, cluster2.name.period, cluster3.name.period, cluster4.name.period, file=paste0(workdir,"/",fields.name,"_", var.name[var.num],"_",my.period[p],"_ClusterNames.RData")) + #if(fields.name==forecast.name) save(cluster1.name.period, cluster2.name.period, cluster3.name.period, cluster4.name.period, file=paste0(workdir,"/",fields.name,"_", var.name[var.num],"_",my.period[p],"_leadmonth_",lead.month,"_ClusterNames.RData")) + + if(fields.name == rean.name) save(cluster1.name, cluster2.name, cluster3.name, cluster4.name, + file=paste0(workdir,"/",fields.name,"_", my.period[p],"_ClusterNames.RData")) + if(fields.name == forecast.name) save(cluster1.name, cluster2.name, cluster3.name, cluster4.name, cluster.corr, + file=paste0(workdir,"/",fields.name,"_", my.period[p],"_leadmonth_",lead.month,"_ClusterNames.RData")) + + } else { # in this case, we load the cluster names from the file already saved, unless auto=T: + if(fields.name == rean.name) load(paste0(workdir,"/",fields.name,"_", my.period[p],"_ClusterNames.RData")) + if(fields.name == forecast.name && auto==F) load(paste0(workdir,"/",fields.name,"_", my.period[p],"_leadmonth_",lead.month,"_ClusterNames.RData")) + + if(var.num != var.num.orig) var.num <- var.num.orig # ripristinate original values of this script + if(fields.name != fields.name.orig) fields.name <- fields.name.orig # ripristinate original values of this script + + #cluster1.name <- cluster1.name.period[p] + #cluster2.name <- cluster2.name.period[p] + #cluster3.name <- cluster3.name.period[p] + #cluster4.name <- cluster4.name.period[p] + } + + # assign to each cluster its regime: + if(ordering == FALSE){ + cluster1 <- 1; cluster2 <- 2; cluster3 <- 3; cluster4 <- 4 + } else { + cluster1 <- which(orden == cluster1.name) + cluster2 <- which(orden == cluster2.name) + cluster3 <- which(orden == cluster3.name) + cluster4 <- which(orden == cluster4.name) + } + + assign(paste0("map",cluster1), pslwr1mean) + assign(paste0("map",cluster2), pslwr2mean) + assign(paste0("map",cluster3), pslwr3mean) + assign(paste0("map",cluster4), pslwr4mean) + + assign(paste0("fre",cluster1), wr1y) + assign(paste0("fre",cluster2), wr2y) + assign(paste0("fre",cluster3), wr3y) + assign(paste0("fre",cluster4), wr4y) + + if(fields.name == rean.name){ + assign(paste0("imp",cluster1), varPeriodAnom1mean) + assign(paste0("imp",cluster2), varPeriodAnom2mean) + assign(paste0("imp",cluster3), varPeriodAnom3mean) + assign(paste0("imp",cluster4), varPeriodAnom4mean) + + assign(paste0("sig",cluster1), pvalue1) + assign(paste0("sig",cluster2), pvalue2) + assign(paste0("sig",cluster3), pvalue3) + assign(paste0("sig",cluster4), pvalue4) + } + + # Visualize the composition with the 3 graphs for all the 4 regimes: its pressure fields, its impact on var and its frequency series: + if(!as.pdf && fields.name==rean.name) png(filename=paste0(workdir,"/",fields.name,"_",var.name[var.num],"_",my.period[p],".png"),width=3000,height=3700) + if(!as.pdf && fields.name==forecast.name) png(filename=paste0(workdir,"/",fields.name,"_",var.name[var.num],"_",my.period[p],"_leadmonth_",lead.month,".png"),width=3000,height=3700) + + plot.new() + + # Sheet title: + par(fig=c(0, 1, 0.94, 0.98), new=TRUE) + + if(fields.name == forecast.name) {forecast.title <- paste0(" startdate and ",lead.month," forecast month ")} else {forecast.title <- ""} + mtext(paste0(fields.name, " Weather Regimes for ", my.period[p], forecast.title," (",year.start,"-",year.end,")"), cex=5.5, font=2) + + # Centroid maps: + map.xpos <- 0 + map.width <- 0.46 + par(fig=c(map.xpos + 0.003, map.xpos + map.width - 0.003, 0.745, 0.925), new=TRUE) + PlotEquiMap2(rescale(map1,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map1), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, xlabel.dist=1.5, contours.lty="F1FF1F") + par(fig=c(map.xpos, map.xpos + map.width, 0.51, 0.69), new=TRUE) + PlotEquiMap2(rescale(map2,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map2), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F") + par(fig=c(map.xpos, map.xpos + map.width, 0.275, 0.455), new=TRUE) + PlotEquiMap2(rescale(map3,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map3), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F") + par(fig=c(map.xpos, map.xpos + map.width, 0.04, 0.22), new=TRUE) + PlotEquiMap2(rescale(map4,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map4), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F") + + # Title Centroid Maps: + map.title.xpos <- 0.76 + map.title.width <- 0.2 + par(fig=c(map.title.xpos, map.title.xpos + map.title.width, 0.728, 0.729), new=TRUE) + mtext("year", cex=3) + par(fig=c(map.title.xpos, map.title.xpos + map.title.width, 0.494, 0.495), new=TRUE) + mtext("year", cex=3) + par(fig=c(map.title.xpos, map.title.xpos + map.title.width, 0.260, 0.261), new=TRUE) + mtext("year", cex=3) + par(fig=c(map.title.xpos, map.title.xpos + map.title.width, 0.026, 0.027), new=TRUE) + mtext("year", cex=3) + + # Title 1: + title1.xpos <- 0.02 + title1.width <- 0.4 + par(fig=c(title1.xpos, title1.xpos + title1.width, 0.930, 0.935), new=TRUE) + mtext(paste0(regime1.name,": ", psl.name, " Anomaly"), font=2, cex=4) + par(fig=c(title1.xpos, title1.xpos + title1.width, 0.695, 0.700), new=TRUE) + mtext(paste0(regime2.name,": ", psl.name, " Anomaly"), font=2, cex=4) + par(fig=c(title1.xpos, title1.xpos + title1.width, 0.460, 0.465), new=TRUE) + mtext(paste0(regime3.name,": ", psl.name, " Anomaly"), font=2, cex=4) + par(fig=c(title1.xpos, title1.xpos + title1.width, 0.225, 0.230), new=TRUE) + mtext(paste0(regime4.name,": ", psl.name, " Anomaly"), font=2, cex=4) + + # Impact maps: + if(fields.name == rean.name){ + impact.xpos <- 0.47 + impact.width <- 0.26 + par(fig=c(impact.xpos, impact.xpos + impact.width, 0.745, 0.925), new=TRUE) + PlotEquiMap2(rescale(imp1[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=t(sig1[,EU] < 0.05), cex.lab=1.5) + par(fig=c(impact.xpos, impact.xpos + impact.width, 0.51, 0.69), new=TRUE) + PlotEquiMap2(rescale(imp2[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=t(sig2[,EU] < 0.05), cex.lab=1.5) + par(fig=c(impact.xpos, impact.xpos + impact.width, 0.275, 0.455), new=TRUE) + PlotEquiMap2(rescale(imp3[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=t(sig3[,EU] < 0.05), cex.lab=1.5) + par(fig=c(impact.xpos, impact.xpos + impact.width, 0.04, 0.22), new=TRUE) + PlotEquiMap2(rescale(imp4[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=t(sig4[,EU] < 0.05), cex.lab=1.5) + } + + # Frequency plots: + barplot.xpos <- 0.75 + barplot.width <- 0.25 + freq.max=80 + + if(fields.name == forecast.name){ + pos.years.present <- match(year.start:year.end, years) + years.missing <- which(is.na(pos.years.present)) + fre1.NA <- fre2.NA <- fre3.NA <- fre4.NA <- c() + + k <- 0 + for(y in year.start:year.end) { + if(y %in% (years.missing + year.start - 1)) { + fre1.NA <- c(fre1.NA,NA) + fre2.NA <- c(fre2.NA,NA) + fre3.NA <- c(fre3.NA,NA) + fre4.NA <- c(fre4.NA,NA) + k=k+1 + } else { + fre1.NA <- c(fre1.NA,fre1[y - year.start + 1 - k]) + fre2.NA <- c(fre2.NA,fre2[y - year.start + 1 - k]) + fre3.NA <- c(fre3.NA,fre3[y - year.start + 1 - k]) + fre4.NA <- c(fre4.NA,fre4[y - year.start + 1 - k]) + } + } + } else { fre1.NA <- fre1; fre2.NA <- fre2; fre3.NA <- fre3; fre4.NA <- fre4 } + + par(fig=c(barplot.xpos, barplot.xpos + barplot.width, 0.745, 0.915), new=TRUE) + barplot.freq(100*fre1.NA, year.start, year.end, cex.y=2, cex.x=2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0)) + par(fig=c(barplot.xpos, barplot.xpos + barplot.width,0.510,0.680), new=TRUE) + barplot.freq(100*fre2.NA, year.start, year.end, cex.y=2, cex.x=2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0)) + par(fig=c(barplot.xpos, barplot.xpos + barplot.width,0.275,0.445), new=TRUE) + barplot.freq(100*fre3.NA, year.start, year.end, cex.y=2, cex.x=2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0)) + par(fig=c(barplot.xpos, barplot.xpos + barplot.width,0.040,0.210), new=TRUE) + barplot.freq(100*fre4.NA, year.start, year.end, cex.y=2, cex.x=2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0)) + + # % on Frequency plots: + symbol.xpos <- 0.735 + symbol.width <- 0.01 + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.83, 0.84), new=TRUE) + mtext("%", cex=3.3) + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.60, 0.61), new=TRUE) + mtext("%", cex=3.3) + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.36, 0.37), new=TRUE) + mtext("%", cex=3.3) + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.13, 0.14), new=TRUE) + mtext("%", cex=3.3) + + + # Title 2: + if(fields.name == rean.name){ + title2.xpos <- 0.42 + title2.width <- 0.35 + par(fig=c(title2.xpos, title2.xpos + title2.width, 0.930, 0.935), new=TRUE) + mtext(paste0(regime1.name, ": Impact on ", var.name.full[var.num]), font=2, cex=4) + par(fig=c(title2.xpos, title2.xpos + title2.width, 0.695, 0.700), new=TRUE) + mtext(paste0(regime2.name, ": Impact on ", var.name.full[var.num]), font=2, cex=4) + par(fig=c(title2.xpos, title2.xpos + title2.width, 0.460, 0.465), new=TRUE) + mtext(paste0(regime3.name, ": Impact on ", var.name.full[var.num]), font=2, cex=4) + par(fig=c(title2.xpos, title2.xpos + title2.width, 0.225, 0.230), new=TRUE) + mtext(paste0(regime4.name, ": Impact on ", var.name.full[var.num]), font=2, cex=4) + } + + # Title 3: + title3.xpos <- 0.75 + title3.width <-0.25 + par(fig=c(title3.xpos, title3.xpos + title3.width, 0.930, 0.935), new=TRUE) + mtext(paste0(regime1.name, ": Frequency (", round(100*mean(fre1),1), "%)"), font=2, cex=4) + par(fig=c(title3.xpos, title3.xpos + title3.width, 0.695, 0.700), new=TRUE) + mtext(paste0(regime2.name, ": Frequency (", round(100*mean(fre2),1), "%)"), font=2, cex=4) + par(fig=c(title3.xpos, title3.xpos + title3.width, 0.460, 0.465), new=TRUE) + mtext(paste0(regime3.name, ": Frequency (", round(100*mean(fre3),1), "%)"), font=2, cex=4) + par(fig=c(title3.xpos, title3.xpos + title3.width, 0.225, 0.230), new=TRUE) + mtext(paste0(regime4.name, ": Frequency (", round(100*mean(fre4),1), "%)"), font=2, cex=4) + + # Legend 1: + legend1.xpos <- 0.01 + legend1.width <- 0.44 + legend1.cex <- 1.8 + my.subset <- match(values.to.plot, my.brks) + par(fig=c(legend1.xpos, legend1.xpos + legend1.width, 0.719, 0.744), new=TRUE) # syntax fig=c(xmin, xmax, ymin, ymax) + ColorBar3(my.brks, cols=my.cols, vert=FALSE, cex=legend1.cex, subset=my.subset) + if(psl=="g500") {mtext(side=4," m", cex=2.5)} else {mtext(side=4," hPa", cex=legend1.cex)} + par(fig=c(legend1.xpos, legend1.xpos + legend1.width, 0.485, 0.508), new=TRUE) + ColorBar3(my.brks, cols=my.cols, vert=FALSE, cex=legend1.cex, subset=my.subset) + if(psl=="g500") {mtext(side=4," m", cex=2.5)} else {mtext(side=4," hPa", cex=legend1.cex)} + par(fig=c(legend1.xpos, legend1.xpos + legend1.width, 0.250, 0.273), new=TRUE) + ColorBar3(my.brks, cols=my.cols, vert=FALSE, cex=legend1.cex, subset=my.subset) + if(psl=="g500") {mtext(side=4," m", cex=2.5)} else {mtext(side=4," hPa", cex=legend1.cex)} + par(fig=c(legend1.xpos, legend1.xpos + legend1.width, 0.015, 0.038), new=TRUE) + ColorBar3(my.brks, cols=my.cols, vert=FALSE, cex=legend1.cex, subset=my.subset) + if(psl=="g500") {mtext(side=4," m", cex=2.5)} else {mtext(side=4," hPa", cex=legend1.cex)} + + # Legend 2: + if(fields.name == rean.name){ + legend2.xpos <- 0.48 + legend2.width <- 0.25 + legend2.cex <- 1.8 + my.subset2 <- match(values.to.plot2, my.brks.var) + par(fig=c(legend2.xpos, legend2.xpos + legend2.width, 0.719 + 0.001, 0.744), new=TRUE) + ColorBar3(my.brks.var, cols=my.cols.var, vert=FALSE, cex=legend2.cex, subset=my.subset2) + mtext(side=4,paste0(" ",var.unit[var.num]), cex=legend2.cex) + par(fig=c(legend2.xpos, legend2.xpos + legend2.width, 0.485, 0.508), new=TRUE) + ColorBar3(my.brks.var, cols=my.cols.var, vert=FALSE, cex=legend2.cex, subset=my.subset2) + mtext(side=4,paste0(" ",var.unit[var.num]), cex=legend2.cex) + par(fig=c(legend2.xpos, legend2.xpos + legend2.width, 0.250, 0.273), new=TRUE) + ColorBar3(my.brks.var, cols=my.cols.var, vert=FALSE, cex=legend2.cex, subset=my.subset2) + mtext(side=4,paste0(" ",var.unit[var.num]), cex=legend2.cex) + par(fig=c(legend2.xpos, legend2.xpos + legend2.width, 0.015, 0.038), new=TRUE) + ColorBar3(my.brks.var, cols=my.cols.var, vert=FALSE, cex=legend2.cex, subset=my.subset2) + mtext(side=4,paste0(" ",var.unit[var.num]), cex=legend2.cex) + } + + if(!as.pdf) dev.off() # for saving 4 png + +} # close 'p' on 'period' + +if(as.pdf) dev.off() # for saving a single pdf with all months/seasons + + + + + + +# impact map of the stronger WR: +if(fields.name == rean.name){ + + var.num <- 2 # choose a variable (1:sfcWind, 2:tas) + + png(filename=paste0(workdir,"/",fields.name,"_",var.name[var.num],"_most_influent.png"), width=500, height=600) + + plot.new() + #par(mfrow=c(4,3)) + col.regimes <- c("indianred1","cyan","khaki","lightpink") + + for(p in 1:12){ + load(file=paste0(workdir,"/",fields.name,"_",var.name[var.num],"_",my.period[p],"_mapdata.RData")) + load(paste0(workdir,"/",fields.name,"_", my.period[p],"_ClusterNames.RData")) # they should not depend on var.name!!! + + cluster1 <- which(orden == cluster1.name.period[p]) # in future it will be == cluster1.name + cluster2 <- which(orden == cluster2.name.period[p]) + cluster3 <- which(orden == cluster3.name.period[p]) + cluster4 <- which(orden == cluster4.name.period[p]) + + assign(paste0("imp",cluster1), varPeriodAnom1mean) + assign(paste0("imp",cluster2), varPeriodAnom2mean) + assign(paste0("imp",cluster3), varPeriodAnom3mean) + assign(paste0("imp",cluster4), varPeriodAnom4mean) + + imp.all <- imp1 + for(i in 1:dim(imp1)[1]){ + for(j in 1:dim(imp1)[2]){ + if(abs(imp1[i,j]) >= max(abs(imp1[i,j]), abs(imp2[i,j]), abs(imp3[i,j]), abs(imp4[i,j]))) imp.all[i,j] <- 1.5 + if(abs(imp2[i,j]) >= max(abs(imp1[i,j]), abs(imp2[i,j]), abs(imp3[i,j]), abs(imp4[i,j]))) imp.all[i,j] <- 2.5 + if(abs(imp3[i,j]) >= max(abs(imp1[i,j]), abs(imp2[i,j]), abs(imp3[i,j]), abs(imp4[i,j]))) imp.all[i,j] <- 3.5 + if(abs(imp4[i,j]) >= max(abs(imp1[i,j]), abs(imp2[i,j]), abs(imp3[i,j]), abs(imp4[i,j]))) imp.all[i,j] <- 4.5 + } + } + + if(p<=3) yMod <- 0.7 + if(p>=4 && p<=6) yMod <- 0.5 + if(p>=7 && p<=9) yMod <- 0.3 + if(p>=10) yMod <- 0.1 + if(p==1 || p==4 || p==7 || p==10) xMod <- 0.05 + if(p==2 || p==5 || p==8 || p==11) xMod <- 0.35 + if(p==3 || p==6 || p==9 || p==12) xMod <- 0.65 + + # impact map: + par(fig=c(xMod, xMod + 0.3, yMod, yMod + 0.17), new=TRUE) + PlotEquiMap(imp.all[,EU], lon[EU], lat, filled.continents = FALSE, brks=1:5, cols=col.regimes, axelab=F, drawleg=F) + + # title map: + par(fig=c(xMod, xMod + 0.3, yMod + 0.162, yMod + 0.172), new=TRUE) + mtext(my.period[p], font=2, cex=.9) + + } # close 'p' on 'period' + + par(fig=c(0.1,0.9, 0, 0.11), new=TRUE) + ColorBar2(1:5, cols=col.regimes, vert=FALSE, my.ticks=c(1,2,3,4), my.labels=orden) + + par(fig=c(0.1,0.9, 0.9, 0.95), new=TRUE) + mtext(paste0("Most influent WR on ",var.name[var.num]), font=2, cex=1.5) + + dev.off() +} + diff --git a/old/weather_regimes_maps_v4.R b/old/weather_regimes_maps_v4.R new file mode 100644 index 0000000000000000000000000000000000000000..6868b03a534fc9975255dd3364087ba1b78f8174 --- /dev/null +++ b/old/weather_regimes_maps_v4.R @@ -0,0 +1,565 @@ + +# Creation: 6/2016 +# Author: Nicola Cortesi +# +# Aim: to visualize the regime anomalies of the weather regimes, their interannual frequencies and their impact on a chosen cliamte variable (i.e: wind speed or temperature) +# +# I/O: the only input file needed is the output of the weather_regimes.R script, which ends with the suffix "_mapdata.RData". +# Output files are the maps, with the user can generate as .png or as .pdf +# +# Assumptions: If your regimes derive from a reanalysis, this script must be run twice: once, with the option 'ordering = F' to create a .pdf or .png file that the user +# must open to find visually the order by which the four regimes are stored inside the four clusters. For example, he can find that the sequence of the images +# in the file (from top to down) corresponds to: Blocking / NAO- / Atl.Ridge / NAO+ regime. Subsequently, he has to change 'ordering' from F to T +# and set the four variables 'clusterX.name' (X=1..4) to follow the same order found inside that file. +# The second time, you have to run this script only to get the maps in the right order, which by default is, from top to down: +# "NAO+","NAO-","Blocking" and "Atl.Ridge" (you can change it with the variable 'orden'). You also have to set 'save.names' to TRUE, so an .RData file +# is written to store the order of the four regimes, in case in future a user needs to visualize these maps again. In this case, the user must set +# 'ordering=TRUE' and 'save.names= FALSE' to be able to visualize the maps in the correct order. +# +# If your regimes derive from a forecast system, you have to compute before the regimes derived from a reanalysis. Then, you can associate the reanalysis +# to the forecast system, to employ it to automatically associate to each simulated cluster one of the +# observed regimes, the one with the highest spatial correlation. Beware that the two maps of regime anomalies must have the same spatial resolution! +# +# Branch: weather_regimes + +library(s2dverification) # for the function Load() +library(Kendall) # for the MannKendall test + +source('/home/Earth/ncortesi/Downloads/scripts/Rfunctions.R') # for the calendar functions +workdir <- "/scratch/Earth/ncortesi/RESILIENCE/Regimes" # working dir with the input files with '_psl.RData' suffix: + +rean.name <- "ERA-Interim" # reanalysis name (if input data comes from a reanalysis) +forecast.name <- "ECMWF-S4" # forecast system name (if input data comes from a forecast system) + +fields.name <- rean.name # Choose between loading reanalysis ('rean.name') or forecasts ('forecast.name') + +period <- 1 # 1:12 # Choose one or more periods to plot from 1 to 17 (1-12: Jan - Dec, 13-16: Winter, Spring, Summer, Autumn, 17: Year). + # You need to have created before the '_mapdata.RData' file with the output of 'weather_regimes'.R for that period. +lead.month <- 3 # in case you selected 'fields.name = forecast.name', specify a leadtime (0 = same month of the start month) to plot + # (and variable 'period' becomes the start month) +var.num <- 2 # if fields.name ='rean.name', Choose a variable for the impact maps 1: sfcWind 2: tas + +# run it twice: once, with 'ordering <- FALSE' and 'save.names <- TRUE' to look only at the cartography and find visually the right regime names of the four clusters; then, +# insert the regime names in the correct order below and run this script a the second time with 'ordering = TRUE' and 'save.names = TRUE', to save the ordered cartography. +# after that, any time you need to run this script, run it with 'ordering = TRUE and 'save.names = FALSE', not to overwrite the file which stores the cluster order: +ordering <- F # If TRUE, order the clusters following the regimes listed in the 'orden' vector (set it to TRUE only after you manually inserted the names below). +save.names <- T # if TRUE, also save the cluster names (i.e: the association between clusters and weather regimes); if FALSE, the cluster names are loaded from a file + +as.pdf <- F # FALSE: save results as one .png file for each season, TRUE: save only 1 .pdf file with all seasons + +# if fields.name='forecast.name', set the subdir of 'workdir' where the weather regimes computed with a reanalysis are stored: +rean.dir <- "41_ERA-Interim_monthly_1981-2015_LOESS_filter" + +# Associates the regimes to the four cluster: +cluster3.name <- "NAO+" +cluster2.name <- "NAO-" +cluster1.name <- "Blocking" +cluster4.name <- "Atl.Ridge" + +# choose an order to give to the cartograpy, from top to bottom: +orden <- c("NAO+","NAO-","Blocking","Atl.Ridge") + +################################################################################################################################################################### + +regime1.name <- orden[1] +regime2.name <- orden[2] +regime3.name <- orden[3] +regime4.name <- orden[4] + +var.name <- c("sfcWind","tas") +var.num.orig <- var.num # loading _mapdata files, this value can change! +fields.name.orig <- fields.name +period.orig <- period + +if(as.pdf && fields.name==rean.name)(pdf(file=paste0(workdir,"/",fields.name,"_",var.name[var.num],"_",my.period[p],".pdf"),width=40, height=60)) +if(as.pdf && fields.name==forecast.name)(pdf(file=paste0(workdir,"/",fields.name,"_",var.name[var.num],"_",my.period[p],"_leadmonth_",lead.month,".pdf"),width=40,height=60)) + +for(p in period){ + # load regime data (for forecasts, it load only the data correpsonding to the chosen start month p and lead month 'lead.month':: + if(fields.name == rean.name) load(file=paste0(workdir,"/",fields.name,"_",var.name[var.num],"_",my.period[p],"_mapdata.RData")) + if(fields.name == forecast.name) load(file=paste0(workdir,"/",fields.name,"_",var.name[var.num],"_",my.period[p],"_leadmonth_",lead.month,"_mapdata.RData")) + + if(var.num != var.num.orig) var.num <- var.num.orig # ripristinate original values of this script (necessary after loading regime data) + if(fields.name != fields.name.orig) fields.name <- fields.name.orig # ripristinate original values of this script + + if(fields.name == forecast.name){ + # compute correlations between simulated clusters and observed regime's anomalies: + cluster1.sim <- pslwr1mean; cluster2.sim <- pslwr2mean; cluster3.sim <- pslwr3mean; cluster4.sim <- pslwr4mean + lat.forecast <- lat; lon.forecast <- lon + + if((p + lead.month) > 12) { load.month <- p + lead.month - 12 } else { load.month <- p + lead.month } # reanalysis forecast month to load + load(file=paste0(workdir,"/",rean.dir,"/",rean.name,"_",var.name[var.num],"_",my.period[load.month],"_mapdata.RData")) # load also reanalysis data, which overwrities the pslwrXmean variables + load(paste0(workdir,"/",rean.dir,"/",rean.name,"_", my.period[load.month],"_ClusterNames.RData")) # load also reanalysis regime names + + wr1y.obs <- wr1y; wr2y.obs <- wr2y; wr3y.obs <- wr3y; wr4y.obs <- wr4y # observed frecuencies of the regimes + + regimes.obs <- c(cluster1.name.period[load.month], cluster2.name.period[load.month], cluster3.name.period[load.month], cluster4.name.period[load.month]) # to be replaced soon by c(cluster1.name, cluster2.name, ...) + if(length(unique(regimes.obs)) < 4) stop("Two or more names of the weather types in the reanalsyis input file are repeated!") + + if(identical(rev(lat),lat.forecast)) {lat.forecast <- rev(lat.forecast); print("Warning: forecast latitudes are in the opposite order than renalysis's")} + if(!identical(lat, lat.forecast)) stop("forecast latitudes are different from reanalysis latitudes!") + if(!identical(lon, lon.forecast)) stop("forecast longitude are different from reanalysis longitudes!") + + cluster.corr <- array(NA, c(4,4), dimnames=list(c("cluster1.sim","cluster2.sim","cluster3.sim","cluster4.sim"), regimes.obs)) + cluster.corr[1,1] <- cor(as.vector(cluster1.sim), as.vector(pslwr1mean)) + cluster.corr[1,2] <- cor(as.vector(cluster1.sim), as.vector(pslwr2mean)) + cluster.corr[1,3] <- cor(as.vector(cluster1.sim), as.vector(pslwr3mean)) + cluster.corr[1,4] <- cor(as.vector(cluster1.sim), as.vector(pslwr4mean)) + cluster.corr[2,1] <- cor(as.vector(cluster2.sim), as.vector(pslwr1mean)) + cluster.corr[2,2] <- cor(as.vector(cluster2.sim), as.vector(pslwr2mean)) + cluster.corr[2,3] <- cor(as.vector(cluster2.sim), as.vector(pslwr3mean)) + cluster.corr[2,4] <- cor(as.vector(cluster2.sim), as.vector(pslwr4mean)) + cluster.corr[3,1] <- cor(as.vector(cluster3.sim), as.vector(pslwr1mean)) + cluster.corr[3,2] <- cor(as.vector(cluster3.sim), as.vector(pslwr2mean)) + cluster.corr[3,3] <- cor(as.vector(cluster3.sim), as.vector(pslwr3mean)) + cluster.corr[3,4] <- cor(as.vector(cluster3.sim), as.vector(pslwr4mean)) + cluster.corr[4,1] <- cor(as.vector(cluster4.sim), as.vector(pslwr1mean)) + cluster.corr[4,2] <- cor(as.vector(cluster4.sim), as.vector(pslwr2mean)) + cluster.corr[4,3] <- cor(as.vector(cluster4.sim), as.vector(pslwr3mean)) + cluster.corr[4,4] <- cor(as.vector(cluster4.sim), as.vector(pslwr4mean)) + + # associate each simulated cluster to one observed regime: + max1 <- which(cluster.corr == max(cluster.corr), arr.ind=T) + cluster.corr2 <- cluster.corr + cluster.corr2[max1[1],] <- -999 # set this row to negative values so it won't be found as a maximum anymore + cluster.corr2[,max1[2]] <- -999 # set this column to negative values so it won't be found as a maximum anymore + max2 <- which(cluster.corr2 == max(cluster.corr2), arr.ind=T) + cluster.corr3 <- cluster.corr2 + cluster.corr3[max2[1],] <- -999 + cluster.corr3[,max2[2]] <- -999 + max3 <- which(cluster.corr3 == max(cluster.corr3), arr.ind=T) + cluster.corr4 <- cluster.corr3 + cluster.corr4[max3[1],] <- -999 + cluster.corr4[,max3[2]] <- -999 + max4 <- which(cluster.corr4 == max(cluster.corr4), arr.ind=T) + + seq.max1 <- c(max1[1],max2[1],max3[1],max4[1]) + seq.max2 <- c(max1[2],max2[2],max3[2],max4[2]) + + cluster1.regime <- seq.max2[which(seq.max1 == 1)] + cluster2.regime <- seq.max2[which(seq.max1 == 2)] + cluster3.regime <- seq.max2[which(seq.max1 == 3)] + cluster4.regime <- seq.max2[which(seq.max1 == 4)] + + cluster1.name <- regimes.obs[cluster1.regime] + cluster2.name <- regimes.obs[cluster2.regime] + cluster3.name <- regimes.obs[cluster3.regime] + cluster4.name <- regimes.obs[cluster4.regime] + + if(var.num != var.num.orig) var.num <- var.num.orig # ripristinate original values of this script + if(fields.name != fields.name.orig) fields.name <- fields.name.orig # ripristinate original values of this script + load(file=paste0(workdir,"/",fields.name,"_",var.name[var.num],"_",my.period[p],"_leadmonth_",lead.month,"_mapdata.RData")) # reload forecast regime data + load(file=paste0(workdir,"/",fields.name,"_",my.period[p],"_leadmonth_",lead.month,"_ClusterData.RData")) # reload forecast cluster data (for my.cluster.array) + + if(var.num != var.num.orig) var.num <- var.num.orig # ripristinate original values of this script + if(fields.name != fields.name.orig) fields.name <- fields.name.orig # ripristinate original values of this script + if(period.orig != period) period <- period.orig + if(identical(rev(lat),lat.forecast)) lat <- rev(lat) # ripristinate right lat order + + } # close for on 'forecast.name' + + if(fields.name == rean.name){ + if(save.names){ + save(cluster1.name, cluster2.name, cluster3.name, cluster4.name, + file=paste0(workdir,"/",fields.name,"_", my.period[p],"_ClusterNames.RData")) + #if(fields.name == forecast.name) save(cluster1.name, cluster2.name, cluster3.name, cluster4.name, cluster.corr, + # file=paste0(workdir,"/",fields.name,"_", my.period[p],"_leadmonth_",lead.month,"_ClusterNames.RData")) + } else { # in this case, we load the cluster names from the file already saved, if regimes come from a reanalysis: + load(paste0(workdir,"/",fields.name,"_", my.period[p],"_ClusterNames.RData")) + #if(fields.name == forecast.name && auto==F) load(paste0(workdir,"/",fields.name,"_", my.period[p],"_leadmonth_",lead.month,"_ClusterNames.RData")) + + if(var.num != var.num.orig) var.num <- var.num.orig # ripristinate original values of this script + if(fields.name != fields.name.orig) fields.name <- fields.name.orig # ripristinate original values of this script + + # cluster1.name <- cluster1.name.period[p] + # cluster2.name <- cluster2.name.period[p] + # cluster3.name <- cluster3.name.period[p] + # cluster4.name <- cluster4.name.period[p] + } + } + + # assign to each cluster its regime: + if(ordering == FALSE){ + cluster1 <- 1; cluster2 <- 2; cluster3 <- 3; cluster4 <- 4 + } else { + cluster1 <- which(orden == cluster1.name) + cluster2 <- which(orden == cluster2.name) + cluster3 <- which(orden == cluster3.name) + cluster4 <- which(orden == cluster4.name) + } + + assign(paste0("map",cluster1), pslwr1mean) + assign(paste0("map",cluster2), pslwr2mean) + assign(paste0("map",cluster3), pslwr3mean) + assign(paste0("map",cluster4), pslwr4mean) + + assign(paste0("fre",cluster1), wr1y) + assign(paste0("fre",cluster2), wr2y) + assign(paste0("fre",cluster3), wr3y) + assign(paste0("fre",cluster4), wr4y) + + if(fields.name == rean.name){ + assign(paste0("imp",cluster1), varPeriodAnom1mean) + assign(paste0("imp",cluster2), varPeriodAnom2mean) + assign(paste0("imp",cluster3), varPeriodAnom3mean) + assign(paste0("imp",cluster4), varPeriodAnom4mean) + + assign(paste0("sig",cluster1), pvalue1) + assign(paste0("sig",cluster2), pvalue2) + assign(paste0("sig",cluster3), pvalue3) + assign(paste0("sig",cluster4), pvalue4) + } + + if(fields.name == forecast.name){ + assign(paste0("freMax",cluster1), wr1yMax) + assign(paste0("freMax",cluster2), wr2yMax) + assign(paste0("freMax",cluster3), wr3yMax) + assign(paste0("freMax",cluster4), wr4yMax) + + assign(paste0("freMin",cluster1), wr1yMin) + assign(paste0("freMin",cluster2), wr2yMin) + assign(paste0("freMin",cluster3), wr3yMin) + assign(paste0("freMin",cluster4), wr4yMin) + + cluster1.Obs <- which(orden == regimes.obs[1]) + cluster2.Obs <- which(orden == regimes.obs[2]) + cluster3.Obs <- which(orden == regimes.obs[3]) + cluster4.Obs <- which(orden == regimes.obs[4]) + + assign(paste0("freObs",cluster1), wr1y.obs) + assign(paste0("freObs",cluster2), wr2y.obs) + assign(paste0("freObs",cluster3), wr3y.obs) + assign(paste0("freObs",cluster4), wr4y.obs) + } + + + # position of long values of Europe only (without the Atlantic Sea and America): + #if(fields.name == "ERA-Interim") EU <- c(1:which(lon >= lon.max)[1], (length(lon)-30):length(lon)) # restrict area to continental europe only + EU <- c(1:which(lon >= lon.max)[1], (which(lon >= 337)[1]:length(lon))) # restrict area to continental europe only + + # breaks and colors of the geopotential fields: + if(psl == "g500"){ + #my.brks <- c(-300,-199:200,300) # % Mean anomaly of a WR + my.brks <- c(-300,seq(-200,200,10),300) # % Mean anomaly of a WR + my.brks2 <- c(-300,seq(-200,200,10),300) # % Mean anomaly of a WR for the contour lines + #my.cols <- colorRampPalette((brewer.pal(9,"PuBu")))(length(my.brks)-1) + values.to.plot <- c(-200, -100, 0, 100, 200) # values we want to show in the labels + } + + if(psl == "psl"){ + my.brks <- c(seq(-19,-1,2),0,seq(1,19,2)) # % Mean anomaly of a WR + # my.brks <- c(seq(-19,-1,2),0,seq(1,19,2)) # % Mean anomaly of a WR + + my.brks2 <- my.brks #c(seq(-20,20,2)) # % Mean anomaly of a WR for the contour lines + values.to.plot <- my.brks #c(-17,-15,-13,-11,-9,-7,-5,-3,-1,0,1,3,5,7,9,11,13,15,17) + } + + my.cols <- colorRampPalette(rev(brewer.pal(11,"RdBu")))(length(my.brks)-1) # blue--white--red colors + my.cols[floor(length(my.cols)/2)] <- my.cols[floor(length(my.cols)/2)+1] <- "white" + + # breaks and colors of the impact maps (valid both for Wind speed anomalies and Temperature anomalies): + #my.brks.var <- c(-20,seq(-10,-3,1),seq(-2.9,2.9,0.1),seq(3,10,1),20) # % Mean anomaly of a WR + #my.brks.var <- c(-20,-2,-1.5,-1,-0.5,-0.2,0.2,0.5,1,1.5,2,20) # % Mean anomaly of a WR + #my.brks.var <- c(-20,seq(-3,3,0.5),20) # % Mean anomaly of a WR + if(var.name[var.num] == "sfcWind") { + my.brks.var <- seq(-3,3,0.5) + values.to.plot2 <- c(-3,-2,-1,0,1,2,3) + } + if(var.name[var.num] == "tas") { + my.brks.var <- seq(-5,5,1) + values.to.plot2 <- my.brks.var #c(-5,-3,-1,0,1,3,5) + } + + #my.cols <- colorRampPalette(as.character(read.csv("/home/Earth/vtorralb/rgbhex.csv",header=F)[,1]))(length(my.brks)-1) # blue--yellow-red colors + my.cols.var <- colorRampPalette(rev(brewer.pal(11,"RdBu")))(length(my.brks.var)-1) # blue--white--red colors + #my.cols.var[floor(length(my.cols.var)/2)] <- my.cols.var[floor(length(my.cols.var)/2)+1] <- "white" + + # Visualize the composition with the 3 graphs for all the 4 regimes: its pressure fields, its impact on var and its frequency series: + if(!as.pdf && fields.name==rean.name) png(filename=paste0(workdir,"/",fields.name,"_",var.name[var.num],"_",my.period[p],".png"),width=3000,height=3700) + if(!as.pdf && fields.name==forecast.name) png(filename=paste0(workdir,"/",fields.name,"_",var.name[var.num],"_",my.period[p],"_leadmonth_",lead.month,".png"),width=3000,height=3700) + + plot.new() + + # Sheet title: + par(fig=c(0, 1, 0.94, 0.98), new=TRUE) + + if(fields.name == forecast.name) {forecast.title <- paste0(" startdate and ",lead.month," forecast month ")} else {forecast.title <- ""} + mtext(paste0(fields.name, " Weather Regimes for ", my.period[p], forecast.title," (",year.start,"-",year.end,")"), cex=5.5, font=2) + + # Centroid maps: + map.xpos <- 0 + map.width <- 0.46 + par(fig=c(map.xpos + 0.003, map.xpos + map.width - 0.003, 0.745, 0.925), new=TRUE) + PlotEquiMap2(rescale(map1,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map1), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, xlabel.dist=1.5, contours.lty="F1FF1F") + par(fig=c(map.xpos, map.xpos + map.width, 0.51, 0.69), new=TRUE) + PlotEquiMap2(rescale(map2,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map2), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F") + par(fig=c(map.xpos, map.xpos + map.width, 0.275, 0.455), new=TRUE) + PlotEquiMap2(rescale(map3,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map3), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F") + par(fig=c(map.xpos, map.xpos + map.width, 0.04, 0.22), new=TRUE) + PlotEquiMap2(rescale(map4,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map4), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F") + + # Title Centroid Maps: + map.title.xpos <- 0.76 + map.title.width <- 0.2 + par(fig=c(map.title.xpos, map.title.xpos + map.title.width, 0.728, 0.729), new=TRUE) + mtext("year", cex=3) + par(fig=c(map.title.xpos, map.title.xpos + map.title.width, 0.494, 0.495), new=TRUE) + mtext("year", cex=3) + par(fig=c(map.title.xpos, map.title.xpos + map.title.width, 0.260, 0.261), new=TRUE) + mtext("year", cex=3) + par(fig=c(map.title.xpos, map.title.xpos + map.title.width, 0.026, 0.027), new=TRUE) + mtext("year", cex=3) + + # Title 1: + title1.xpos <- 0.02 + title1.width <- 0.4 + par(fig=c(title1.xpos, title1.xpos + title1.width, 0.930, 0.935), new=TRUE) + mtext(paste0(regime1.name,": ", psl.name, " Anomaly"), font=2, cex=4) + par(fig=c(title1.xpos, title1.xpos + title1.width, 0.695, 0.700), new=TRUE) + mtext(paste0(regime2.name,": ", psl.name, " Anomaly"), font=2, cex=4) + par(fig=c(title1.xpos, title1.xpos + title1.width, 0.460, 0.465), new=TRUE) + mtext(paste0(regime3.name,": ", psl.name, " Anomaly"), font=2, cex=4) + par(fig=c(title1.xpos, title1.xpos + title1.width, 0.225, 0.230), new=TRUE) + mtext(paste0(regime4.name,": ", psl.name, " Anomaly"), font=2, cex=4) + + # Impact maps: + if(fields.name == rean.name){ + impact.xpos <- 0.47 + impact.width <- 0.26 + par(fig=c(impact.xpos, impact.xpos + impact.width, 0.745, 0.925), new=TRUE) + PlotEquiMap2(rescale(imp1[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=t(sig1[,EU] < 0.05), cex.lab=1.5) + par(fig=c(impact.xpos, impact.xpos + impact.width, 0.51, 0.69), new=TRUE) + PlotEquiMap2(rescale(imp2[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=t(sig2[,EU] < 0.05), cex.lab=1.5) + par(fig=c(impact.xpos, impact.xpos + impact.width, 0.275, 0.455), new=TRUE) + PlotEquiMap2(rescale(imp3[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=t(sig3[,EU] < 0.05), cex.lab=1.5) + par(fig=c(impact.xpos, impact.xpos + impact.width, 0.04, 0.22), new=TRUE) + PlotEquiMap2(rescale(imp4[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=t(sig4[,EU] < 0.05), cex.lab=1.5) + } + + # Frequency plots: + barplot.xpos <- 0.75 + barplot.width <- 0.25 + freq.max=100 + + if(fields.name == forecast.name){ + pos.years.present <- match(year.start:year.end, years) + years.missing <- which(is.na(pos.years.present)) + fre1.NA <- fre2.NA <- fre3.NA <- fre4.NA <- freMax1.NA <- freMax2.NA <- freMax3.NA <- freMax4.NA <- freMin1.NA <- freMin2.NA <- freMin3.NA <- freMin4.NA <- c() + + k <- 0 + for(y in year.start:year.end) { + if(y %in% (years.missing + year.start - 1)) { + fre1.NA <- c(fre1.NA,NA); fre2.NA <- c(fre2.NA,NA); fre3.NA <- c(fre3.NA,NA); fre4.NA <- c(fre4.NA,NA) + freMax1.NA <- c(freMax1.NA,NA); freMax2.NA <- c(freMax2.NA,NA); freMax3.NA <- c(freMax3.NA,NA); freMax4.NA <- c(freMax4.NA,NA) + freMin1.NA <- c(freMin1.NA,NA); freMin2.NA <- c(freMin2.NA,NA); freMin3.NA <- c(freMin3.NA,NA); freMin4.NA <- c(freMin4.NA,NA) + k=k+1 + } else { + fre1.NA <- c(fre1.NA,fre1[y - year.start + 1 - k]); fre2.NA <- c(fre2.NA,fre2[y - year.start + 1 - k]) + fre3.NA <- c(fre3.NA,fre3[y - year.start + 1 - k]); fre4.NA <- c(fre4.NA,fre4[y - year.start + 1 - k]) + freMax1.NA <- c(freMax1.NA,freMax1[y - year.start + 1 - k]); freMax2.NA <- c(freMax2.NA,freMax2[y - year.start + 1 - k]) + freMax3.NA <- c(freMax3.NA,freMax3[y - year.start + 1 - k]); freMax4.NA <- c(freMax4.NA,freMax4[y - year.start + 1 - k]) + freMin1.NA <- c(freMin1.NA,freMin1[y - year.start + 1 - k]); freMin2.NA <- c(freMin2.NA,freMin2[y - year.start + 1 - k]) + freMin3.NA <- c(freMin3.NA,freMin3[y - year.start + 1 - k]); freMin4.NA <- c(freMin4.NA,freMin4[y - year.start + 1 - k]) + } + } + } else { fre1.NA <- fre1; fre2.NA <- fre2; fre3.NA <- fre3; fre4.NA <- fre4 } + + par(fig=c(barplot.xpos, barplot.xpos + barplot.width, 0.745, 0.915), new=TRUE) + if(fields.name == rean.name) barplot.freq(100*fre1.NA, year.start, year.end, cex.y=2, cex.x=2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0)) + if(fields.name == forecast.name) barplot.freq.sim2(100*fre1.NA, 100*freMax1.NA, 100*freMin1.NA, 100*freObs1[-c(1,2)], year.start, year.end, cex.y=2, cex.x=2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0), col.line="gray20", cex.r=3, cex.mean=2, cex.obs=2) + + par(fig=c(barplot.xpos, barplot.xpos + barplot.width,0.510,0.680), new=TRUE) + if(fields.name == rean.name) barplot.freq(100*fre2.NA, year.start, year.end, cex.y=2, cex.x=2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0)) + if(fields.name == forecast.name) barplot.freq.sim2(100*fre2.NA, 100*freMax2.NA, 100*freMin2.NA, 100*freObs2[-c(1,2)], year.start, year.end, cex.y=2, cex.x=2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0), col.line="gray20", cex.r=3, cex.mean=2, cex.obs=2) + + par(fig=c(barplot.xpos, barplot.xpos + barplot.width,0.275,0.445), new=TRUE) + if(fields.name == rean.name) barplot.freq(100*fre3.NA, year.start, year.end, cex.y=2, cex.x=2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0)) + if(fields.name == forecast.name) barplot.freq.sim2(100*fre3.NA, 100*freMax3.NA, 100*freMin3.NA, 100*freObs3[-c(1,2)], year.start, year.end, cex.y=2, cex.x=2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0), col.line="gray20", cex.r=3, cex.mean=2, cex.obs=2) + + par(fig=c(barplot.xpos, barplot.xpos + barplot.width,0.040,0.210), new=TRUE) + if(fields.name == rean.name) barplot.freq(100*fre4.NA, year.start, year.end, cex.y=2, cex.x=2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0)) + if(fields.name == forecast.name) barplot.freq.sim2(100*fre4.NA, 100*freMax4.NA, 100*freMin4.NA, 100*freObs4[-c(1,2)], year.start, year.end, cex.y=2, cex.x=2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0), col.line="gray20", cex.r=3, cex.mean=2, cex.obs=2) + + # % on Frequency plots: + symbol.xpos <- 0.735 + symbol.width <- 0.01 + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.83, 0.84), new=TRUE) + mtext("%", cex=3.3) + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.60, 0.61), new=TRUE) + mtext("%", cex=3.3) + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.36, 0.37), new=TRUE) + mtext("%", cex=3.3) + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.13, 0.14), new=TRUE) + mtext("%", cex=3.3) + + + # Title 2: + if(fields.name == rean.name){ + title2.xpos <- 0.42 + title2.width <- 0.35 + par(fig=c(title2.xpos, title2.xpos + title2.width, 0.930, 0.935), new=TRUE) + mtext(paste0(regime1.name, ": Impact on ", var.name.full[var.num]), font=2, cex=4) + par(fig=c(title2.xpos, title2.xpos + title2.width, 0.695, 0.700), new=TRUE) + mtext(paste0(regime2.name, ": Impact on ", var.name.full[var.num]), font=2, cex=4) + par(fig=c(title2.xpos, title2.xpos + title2.width, 0.460, 0.465), new=TRUE) + mtext(paste0(regime3.name, ": Impact on ", var.name.full[var.num]), font=2, cex=4) + par(fig=c(title2.xpos, title2.xpos + title2.width, 0.225, 0.230), new=TRUE) + mtext(paste0(regime4.name, ": Impact on ", var.name.full[var.num]), font=2, cex=4) + } + + # Title 3: + title3.xpos <- 0.75 + title3.width <-0.25 + par(fig=c(title3.xpos, title3.xpos + title3.width, 0.930, 0.935), new=TRUE) + mtext(paste0(regime1.name, ": Frequency (", round(100*mean(fre1),1), "%)"), font=2, cex=4) + par(fig=c(title3.xpos, title3.xpos + title3.width, 0.695, 0.700), new=TRUE) + mtext(paste0(regime2.name, ": Frequency (", round(100*mean(fre2),1), "%)"), font=2, cex=4) + par(fig=c(title3.xpos, title3.xpos + title3.width, 0.460, 0.465), new=TRUE) + mtext(paste0(regime3.name, ": Frequency (", round(100*mean(fre3),1), "%)"), font=2, cex=4) + par(fig=c(title3.xpos, title3.xpos + title3.width, 0.225, 0.230), new=TRUE) + mtext(paste0(regime4.name, ": Frequency (", round(100*mean(fre4),1), "%)"), font=2, cex=4) + + # Legend 1: + legend1.xpos <- 0.01 + legend1.width <- 0.44 + legend1.cex <- 1.8 + my.subset <- match(values.to.plot, my.brks) + par(fig=c(legend1.xpos, legend1.xpos + legend1.width, 0.719, 0.744), new=TRUE) # syntax fig=c(xmin, xmax, ymin, ymax) + ColorBar3(my.brks, cols=my.cols, vert=FALSE, cex=legend1.cex, subset=my.subset) + if(psl=="g500") {mtext(side=4," m", cex=2.5)} else {mtext(side=4," hPa", cex=legend1.cex)} + par(fig=c(legend1.xpos, legend1.xpos + legend1.width, 0.485, 0.508), new=TRUE) + ColorBar3(my.brks, cols=my.cols, vert=FALSE, cex=legend1.cex, subset=my.subset) + if(psl=="g500") {mtext(side=4," m", cex=2.5)} else {mtext(side=4," hPa", cex=legend1.cex)} + par(fig=c(legend1.xpos, legend1.xpos + legend1.width, 0.250, 0.273), new=TRUE) + ColorBar3(my.brks, cols=my.cols, vert=FALSE, cex=legend1.cex, subset=my.subset) + if(psl=="g500") {mtext(side=4," m", cex=2.5)} else {mtext(side=4," hPa", cex=legend1.cex)} + par(fig=c(legend1.xpos, legend1.xpos + legend1.width, 0.015, 0.038), new=TRUE) + ColorBar3(my.brks, cols=my.cols, vert=FALSE, cex=legend1.cex, subset=my.subset) + if(psl=="g500") {mtext(side=4," m", cex=2.5)} else {mtext(side=4," hPa", cex=legend1.cex)} + + # Legend 2: + if(fields.name == rean.name){ + legend2.xpos <- 0.48 + legend2.width <- 0.25 + legend2.cex <- 1.8 + my.subset2 <- match(values.to.plot2, my.brks.var) + par(fig=c(legend2.xpos, legend2.xpos + legend2.width, 0.719 + 0.001, 0.744), new=TRUE) + ColorBar3(my.brks.var, cols=my.cols.var, vert=FALSE, cex=legend2.cex, subset=my.subset2) + mtext(side=4,paste0(" ",var.unit[var.num]), cex=legend2.cex) + par(fig=c(legend2.xpos, legend2.xpos + legend2.width, 0.485, 0.508), new=TRUE) + ColorBar3(my.brks.var, cols=my.cols.var, vert=FALSE, cex=legend2.cex, subset=my.subset2) + mtext(side=4,paste0(" ",var.unit[var.num]), cex=legend2.cex) + par(fig=c(legend2.xpos, legend2.xpos + legend2.width, 0.250, 0.273), new=TRUE) + ColorBar3(my.brks.var, cols=my.cols.var, vert=FALSE, cex=legend2.cex, subset=my.subset2) + mtext(side=4,paste0(" ",var.unit[var.num]), cex=legend2.cex) + par(fig=c(legend2.xpos, legend2.xpos + legend2.width, 0.015, 0.038), new=TRUE) + ColorBar3(my.brks.var, cols=my.cols.var, vert=FALSE, cex=legend2.cex, subset=my.subset2) + mtext(side=4,paste0(" ",var.unit[var.num]), cex=legend2.cex) + } + + if(!as.pdf) dev.off() # for saving 4 png + +} # close 'p' on 'period' + +if(as.pdf) dev.off() # for saving a single pdf with all months/seasons + + + + + + + + + +# impact map of the stronger WR: +if(fields.name == rean.name){ + + var.num <- 1 # choose a variable (1:sfcWind, 2:tas) + + png(filename=paste0(workdir,"/",rean.name,"_",var.name[var.num],"_most_influent.png"), width=500, height=600) + + plot.new() + #par(mfrow=c(4,3)) + col.regimes <- c("indianred1","cyan","khaki","lightpink") + + for(p in 13:16){ # or 1:12 for monthly maps + load(file=paste0(workdir,"/", rean.dir,"/",rean.name,"_",var.name[var.num],"_",my.period[p],"_mapdata.RData")) + load(paste0(workdir,"/", rean.dir,"/",rean.name,"_ClusterNames.RData")) # they should not depend on var.name!!! + + # position of long values of Europe only (without the Atlantic Sea and America): + #if(fields.name == "ERA-Interim") EU <- c(1:which(lon >= lon.max)[1], (length(lon)-30):length(lon)) # restrict area to continental europe only + lon.max = 45 + EU <- c(1:which(lon >= lon.max)[1], (which(lon >= 337)[1]:length(lon))) # restrict area to continental europe only + + + cluster1 <- which(orden == cluster1.name.period[p]) # in future it will be == cluster1.name + cluster2 <- which(orden == cluster2.name.period[p]) + cluster3 <- which(orden == cluster3.name.period[p]) + cluster4 <- which(orden == cluster4.name.period[p]) + + assign(paste0("imp",cluster1), varPeriodAnom1mean) + assign(paste0("imp",cluster2), varPeriodAnom2mean) + assign(paste0("imp",cluster3), varPeriodAnom3mean) + assign(paste0("imp",cluster4), varPeriodAnom4mean) + + imp.all <- imp1 + for(i in 1:dim(imp1)[1]){ + for(j in 1:dim(imp1)[2]){ + if(abs(imp1[i,j]) >= max(abs(imp1[i,j]), abs(imp2[i,j]), abs(imp3[i,j]), abs(imp4[i,j]))) imp.all[i,j] <- 1.5 + if(abs(imp2[i,j]) >= max(abs(imp1[i,j]), abs(imp2[i,j]), abs(imp3[i,j]), abs(imp4[i,j]))) imp.all[i,j] <- 2.5 + if(abs(imp3[i,j]) >= max(abs(imp1[i,j]), abs(imp2[i,j]), abs(imp3[i,j]), abs(imp4[i,j]))) imp.all[i,j] <- 3.5 + if(abs(imp4[i,j]) >= max(abs(imp1[i,j]), abs(imp2[i,j]), abs(imp3[i,j]), abs(imp4[i,j]))) imp.all[i,j] <- 4.5 + } + } + + if(p<=3) yMod <- 0.7 + if(p>=4 && p<=6) yMod <- 0.5 + if(p>=7 && p<=9) yMod <- 0.3 + if(p>=10) yMod <- 0.1 + if(p==13 || p==14) yMod <- 0.5 + if(p==15 || p==16) yMod <- 0.1 + + if(p==1 || p==4 || p==7 || p==10) xMod <- 0.05 + if(p==2 || p==5 || p==8 || p==11) xMod <- 0.35 + if(p==3 || p==6 || p==9 || p==12) xMod <- 0.65 + if(p==13 || p==15) xMod <- 0.05 + if(p==14 || p==16) xMod <- 0.50 + + # impact map: + if(p <= 12) par(fig=c(xMod, xMod + 0.3, yMod, yMod + 0.17), new=TRUE) + if(p > 12) par(fig=c(xMod, xMod + 0.45, yMod, yMod + 0.38), new=TRUE) + PlotEquiMap(imp.all[,EU], lon[EU], lat, filled.continents = FALSE, brks=1:5, cols=col.regimes, axelab=F, drawleg=F) + + # title map: + if(p <= 12) par(fig=c(xMod, xMod + 0.3, yMod + 0.162, yMod + 0.172), new=TRUE) + if(p > 12) par(fig=c(xMod + 0.07, xMod + 0.07 + 0.3, yMod +0.21 + 0.162, yMod +0.21 + 0.172), new=TRUE) + mtext(my.period[p], font=2, cex=.9) + + } # close 'p' on 'period' + + par(fig=c(0.1,0.9, 0, 0.12), new=TRUE) + ColorBar2(1:5, cols=col.regimes, vert=FALSE, my.ticks=1:4, draw.ticks=FALSE, my.labels=orden) + + par(fig=c(0.1,0.9, 0.9, 0.95), new=TRUE) + mtext(paste0("Most influent WR on ",var.name[var.num]), font=2, cex=1.5) + + dev.off() +} + + + + + + + + + + + + + + + + + + diff --git a/old/weather_regimes_maps_v4.R~ b/old/weather_regimes_maps_v4.R~ new file mode 100644 index 0000000000000000000000000000000000000000..81b9026189faab056c6fc14fdbb9be818dbbf4f8 --- /dev/null +++ b/old/weather_regimes_maps_v4.R~ @@ -0,0 +1,565 @@ + +# Creation: 6/2016 +# Author: Nicola Cortesi +# +# Aim: to visualize the regime anomalies of the weather regimes, their interannual frequencies and their impact on a chosen cliamte variable (i.e: wind speed or temperature) +# +# I/O: the only input file needed is the output of the weather_regimes.R script, which ends with the suffix "_mapdata.RData". +# Output files are the maps, with the user can generate as .png or as .pdf +# +# Assumptions: If your regimes derive from a reanalysis, this script must be run twice: once, with the option 'ordering = F' to create a .pdf or .png file that the user +# must open to find visually the order by which the four regimes are stored inside the four clusters. For example, he can find that the sequence of the images +# in the file (from top to down) corresponds to: Blocking / NAO- / Atl.Ridge / NAO+ regime. Subsequently, he has to change 'ordering' from F to T +# and set the four variables 'clusterX.name' (X=1..4) to follow the same order found inside that file. +# The second time, you have to run this script only to get the maps in the right order, which by default is, from top to down: +# "NAO+","NAO-","Blocking" and "Atl.Ridge" (you can change it with the variable 'orden'). You also have to set 'save.names' to TRUE, so an .RData file +# is written to store the order of the four regimes, in case in future a user needs to visualize these maps again. In this case, the user must set +# 'ordering=TRUE' and 'save.names= FALSE' to be able to visualize the maps in the correct order. +# +# If your regimes derive from a forecast system, you have to compute before the regimes derived from a reanalysis. Then, you can associate the reanalysis +# to the forecast system, to employ it to automatically associate to each simulated cluster one of the +# observed regimes, the one with the highest spatial correlation. Beware that the two maps of regime anomalies must have the same spatial resolution! +# +# Branch: weather_regimes + +library(s2dverification) # for the function Load() +library(Kendall) # for the MannKendall test + +source('/home/Earth/ncortesi/Downloads/scripts/Rfunctions.R') # for the calendar functions +workdir <- "/scratch/Earth/ncortesi/RESILIENCE/Regimes" # working dir with the input files with the WT classifications for each MSLP grid point + +rean.name <- "ERA-Interim" # reanalysis name (if input data comes from a reanalysis) +forecast.name <- "ECMWF-S4" # forecast system name (if input data comes from a forecast system) + +fields.name <- forecast.name # Choose between loading reanalysis ('rean.name') or forecasts ('forecast.name') +var.num <- 2 # if fields.name ='rean.name', Choose a variable for the impact maps 1: sfcWind 2: tas + +period <- 9 # 1:12 # Choose one or more periods to plot from 1 to 17 (1-12: Jan - Dec, 13-16: Winter, Spring, Summer, Autumn, 17: Year). + # You need to have created before the '_mapdata.RData' file with the output of 'weather_regimes'.R for that period. +lead.month <- 3 # in case you selected 'fields.name = forecast.name', specify a leadtime (0 = same month of the start month) to plot + # (and variable 'period' becomes the start month) + +# run it twice: once, with ordering <- FALSE to look only at the cartography and find visually the right regime names of the four clusters; +# and the second time, to save the ordered cartography: +ordering <- T # If TRUE, order the clusters following the regimes listed in the 'orden' vector (set it to TRUE only after you manually inserted the names below). +save.names <- F # if TRUE, also save the cluster names (i.e: the association between clusters and weather regimes); if FALSE, the cluster names are loaded from a file + +as.pdf <- F # FALSE: save results as one .png file for each season, TRUE: save only 1 .pdf file with all seasons + +#rean.dir <- "20) as 18) but for monthly WRs" # if fields.name='forecast.name', subdir of 'workdir' where the weather regimes computed with a reanalysis are stored. +rean.dir <- "18) as 12) but with no lat correction" + +# Associates the regimes to the four cluster: +cluster3.name <- "NAO+" +cluster2.name <- "NAO-" +cluster1.name <- "Blocking" +cluster4.name <- "Atl.Ridge" + +# choose an order to give to the cartograpy, from top to bottom: +orden <- c("NAO+","NAO-","Blocking","Atl.Ridge") + +################################################################################################################################################################### + +regime1.name <- orden[1] +regime2.name <- orden[2] +regime3.name <- orden[3] +regime4.name <- orden[4] + +var.name <- c("sfcWind","tas") +var.num.orig <- var.num # loading _mapadata files this value can change! +fields.name.orig <- fields.name +period.orig <- period + +if(as.pdf && fields.name==rean.name)(pdf(file=paste0(workdir,"/",fields.name,"_",var.name[var.num],"_",my.period[p],".pdf"),width=40, height=60)) +if(as.pdf && fields.name==forecast.name)(pdf(file=paste0(workdir,"/",fields.name,"_",var.name[var.num],"_",my.period[p],"_leadmonth_",lead.month,".pdf"),width=40,height=60)) + +for(p in period){ + + # load regime data (for forecasts, it load only the data correpsonding to the chosen start month p and lead month 'lead.month':: + if(fields.name == rean.name) load(file=paste0(workdir,"/",fields.name,"_",var.name[var.num],"_",my.period[p],"_mapdata.RData")) + if(fields.name == forecast.name) load(file=paste0(workdir,"/",fields.name,"_",var.name[var.num],"_",my.period[p],"_leadmonth_",lead.month,"_mapdata.RData")) + + if(var.num != var.num.orig) var.num <- var.num.orig # ripristinate original values of this script (necessary after loading regime data) + if(fields.name != fields.name.orig) fields.name <- fields.name.orig # ripristinate original values of this script + + if(fields.name == forecast.name){ + # compute correlations between simulated clusters and observed regime's anomalies: + cluster1.sim <- pslwr1mean; cluster2.sim <- pslwr2mean; cluster3.sim <- pslwr3mean; cluster4.sim <- pslwr4mean + lat.forecast <- lat; lon.forecast <- lon + + if((p + lead.month) > 12) { load.month <- p + lead.month - 12 } else { load.month <- p + lead.month } # reanalysis forecast month to load + load(file=paste0(workdir,"/",rean.dir,"/",rean.name,"_",var.name[var.num],"_",my.period[load.month],"_mapdata.RData")) # load also reanalysis data, which overwrities the pslwrXmean variables + load(paste0(workdir,"/",rean.dir,"/",rean.name,"_", my.period[load.month],"_ClusterNames.RData")) # load also reanalysis regime names + + wr1y.obs <- wr1y; wr2y.obs <- wr2y; wr3y.obs <- wr3y; wr4y.obs <- wr4y # observed frecuencies of the regimes + + regimes.obs <- c(cluster1.name.period[load.month], cluster2.name.period[load.month], cluster3.name.period[load.month], cluster4.name.period[load.month]) # to be replaced soon by c(cluster1.name, cluster2.name, ...) + if(length(unique(regimes.obs)) < 4) stop("Two or more names of the weather types in the reanalsyis input file are repeated!") + + if(identical(rev(lat),lat.forecast)) {lat.forecast <- rev(lat.forecast); print("Warning: forecast latitudes are in the opposite order than renalysis's")} + if(!identical(lat, lat.forecast)) stop("forecast latitudes are different from reanalysis latitudes!") + if(!identical(lon, lon.forecast)) stop("forecast longitude are different from reanalysis longitudes!") + + cluster.corr <- array(NA, c(4,4), dimnames=list(c("cluster1.sim","cluster2.sim","cluster3.sim","cluster4.sim"), regimes.obs)) + cluster.corr[1,1] <- cor(as.vector(cluster1.sim), as.vector(pslwr1mean)) + cluster.corr[1,2] <- cor(as.vector(cluster1.sim), as.vector(pslwr2mean)) + cluster.corr[1,3] <- cor(as.vector(cluster1.sim), as.vector(pslwr3mean)) + cluster.corr[1,4] <- cor(as.vector(cluster1.sim), as.vector(pslwr4mean)) + cluster.corr[2,1] <- cor(as.vector(cluster2.sim), as.vector(pslwr1mean)) + cluster.corr[2,2] <- cor(as.vector(cluster2.sim), as.vector(pslwr2mean)) + cluster.corr[2,3] <- cor(as.vector(cluster2.sim), as.vector(pslwr3mean)) + cluster.corr[2,4] <- cor(as.vector(cluster2.sim), as.vector(pslwr4mean)) + cluster.corr[3,1] <- cor(as.vector(cluster3.sim), as.vector(pslwr1mean)) + cluster.corr[3,2] <- cor(as.vector(cluster3.sim), as.vector(pslwr2mean)) + cluster.corr[3,3] <- cor(as.vector(cluster3.sim), as.vector(pslwr3mean)) + cluster.corr[3,4] <- cor(as.vector(cluster3.sim), as.vector(pslwr4mean)) + cluster.corr[4,1] <- cor(as.vector(cluster4.sim), as.vector(pslwr1mean)) + cluster.corr[4,2] <- cor(as.vector(cluster4.sim), as.vector(pslwr2mean)) + cluster.corr[4,3] <- cor(as.vector(cluster4.sim), as.vector(pslwr3mean)) + cluster.corr[4,4] <- cor(as.vector(cluster4.sim), as.vector(pslwr4mean)) + + # associate each simulated cluster to one observed regime: + max1 <- which(cluster.corr == max(cluster.corr), arr.ind=T) + cluster.corr2 <- cluster.corr + cluster.corr2[max1[1],] <- -999 # set this row to negative values so it won't be found as a maximum anymore + cluster.corr2[,max1[2]] <- -999 # set this column to negative values so it won't be found as a maximum anymore + max2 <- which(cluster.corr2 == max(cluster.corr2), arr.ind=T) + cluster.corr3 <- cluster.corr2 + cluster.corr3[max2[1],] <- -999 + cluster.corr3[,max2[2]] <- -999 + max3 <- which(cluster.corr3 == max(cluster.corr3), arr.ind=T) + cluster.corr4 <- cluster.corr3 + cluster.corr4[max3[1],] <- -999 + cluster.corr4[,max3[2]] <- -999 + max4 <- which(cluster.corr4 == max(cluster.corr4), arr.ind=T) + + seq.max1 <- c(max1[1],max2[1],max3[1],max4[1]) + seq.max2 <- c(max1[2],max2[2],max3[2],max4[2]) + + cluster1.regime <- seq.max2[which(seq.max1 == 1)] + cluster2.regime <- seq.max2[which(seq.max1 == 2)] + cluster3.regime <- seq.max2[which(seq.max1 == 3)] + cluster4.regime <- seq.max2[which(seq.max1 == 4)] + + cluster1.name <- regimes.obs[cluster1.regime] + cluster2.name <- regimes.obs[cluster2.regime] + cluster3.name <- regimes.obs[cluster3.regime] + cluster4.name <- regimes.obs[cluster4.regime] + + if(var.num != var.num.orig) var.num <- var.num.orig # ripristinate original values of this script + if(fields.name != fields.name.orig) fields.name <- fields.name.orig # ripristinate original values of this script + load(file=paste0(workdir,"/",fields.name,"_",var.name[var.num],"_",my.period[p],"_leadmonth_",lead.month,"_mapdata.RData")) # reload forecast regime data + load(file=paste0(workdir,"/",fields.name,"_",my.period[p],"_leadmonth_",lead.month,"_ClusterData.RData")) # reload forecast cluster data (for my.cluster.array) + + if(var.num != var.num.orig) var.num <- var.num.orig # ripristinate original values of this script + if(fields.name != fields.name.orig) fields.name <- fields.name.orig # ripristinate original values of this script + if(period.orig != period) period <- period.orig + if(identical(rev(lat),lat.forecast)) lat <- rev(lat) # ripristinate right lat order + + } # close for on 'forecast.name' + + if(fields.name == rean.name){ + if(save.names){ + save(cluster1.name, cluster2.name, cluster3.name, cluster4.name, + file=paste0(workdir,"/",fields.name,"_", my.period[p],"_ClusterNames.RData")) + #if(fields.name == forecast.name) save(cluster1.name, cluster2.name, cluster3.name, cluster4.name, cluster.corr, + # file=paste0(workdir,"/",fields.name,"_", my.period[p],"_leadmonth_",lead.month,"_ClusterNames.RData")) + } else { # in this case, we load the cluster names from the file already saved, if regimes come from a reanalysis: + load(paste0(workdir,"/",fields.name,"_", my.period[p],"_ClusterNames.RData")) + #if(fields.name == forecast.name && auto==F) load(paste0(workdir,"/",fields.name,"_", my.period[p],"_leadmonth_",lead.month,"_ClusterNames.RData")) + + if(var.num != var.num.orig) var.num <- var.num.orig # ripristinate original values of this script + if(fields.name != fields.name.orig) fields.name <- fields.name.orig # ripristinate original values of this script + + # cluster1.name <- cluster1.name.period[p] + # cluster2.name <- cluster2.name.period[p] + # cluster3.name <- cluster3.name.period[p] + # cluster4.name <- cluster4.name.period[p] + } + } + + # assign to each cluster its regime: + if(ordering == FALSE){ + cluster1 <- 1; cluster2 <- 2; cluster3 <- 3; cluster4 <- 4 + } else { + cluster1 <- which(orden == cluster1.name) + cluster2 <- which(orden == cluster2.name) + cluster3 <- which(orden == cluster3.name) + cluster4 <- which(orden == cluster4.name) + } + + assign(paste0("map",cluster1), pslwr1mean) + assign(paste0("map",cluster2), pslwr2mean) + assign(paste0("map",cluster3), pslwr3mean) + assign(paste0("map",cluster4), pslwr4mean) + + assign(paste0("fre",cluster1), wr1y) + assign(paste0("fre",cluster2), wr2y) + assign(paste0("fre",cluster3), wr3y) + assign(paste0("fre",cluster4), wr4y) + + if(fields.name == rean.name){ + assign(paste0("imp",cluster1), varPeriodAnom1mean) + assign(paste0("imp",cluster2), varPeriodAnom2mean) + assign(paste0("imp",cluster3), varPeriodAnom3mean) + assign(paste0("imp",cluster4), varPeriodAnom4mean) + + assign(paste0("sig",cluster1), pvalue1) + assign(paste0("sig",cluster2), pvalue2) + assign(paste0("sig",cluster3), pvalue3) + assign(paste0("sig",cluster4), pvalue4) + } + + if(fields.name == forecast.name){ + assign(paste0("freMax",cluster1), wr1yMax) + assign(paste0("freMax",cluster2), wr2yMax) + assign(paste0("freMax",cluster3), wr3yMax) + assign(paste0("freMax",cluster4), wr4yMax) + + assign(paste0("freMin",cluster1), wr1yMin) + assign(paste0("freMin",cluster2), wr2yMin) + assign(paste0("freMin",cluster3), wr3yMin) + assign(paste0("freMin",cluster4), wr4yMin) + + cluster1.Obs <- which(orden == regimes.obs[1]) + cluster2.Obs <- which(orden == regimes.obs[2]) + cluster3.Obs <- which(orden == regimes.obs[3]) + cluster4.Obs <- which(orden == regimes.obs[4]) + + assign(paste0("freObs",cluster1), wr1y.obs) + assign(paste0("freObs",cluster2), wr2y.obs) + assign(paste0("freObs",cluster3), wr3y.obs) + assign(paste0("freObs",cluster4), wr4y.obs) + } + + + # position of long values of Europe only (without the Atlantic Sea and America): + #if(fields.name == "ERA-Interim") EU <- c(1:which(lon >= lon.max)[1], (length(lon)-30):length(lon)) # restrict area to continental europe only + EU <- c(1:which(lon >= lon.max)[1], (which(lon >= 337)[1]:length(lon))) # restrict area to continental europe only + + # breaks and colors of the geopotential fields: + if(psl == "g500"){ + #my.brks <- c(-300,-199:200,300) # % Mean anomaly of a WR + my.brks <- c(-300,seq(-200,200,10),300) # % Mean anomaly of a WR + my.brks2 <- c(-300,seq(-200,200,10),300) # % Mean anomaly of a WR for the contour lines + #my.cols <- colorRampPalette((brewer.pal(9,"PuBu")))(length(my.brks)-1) + values.to.plot <- c(-200, -100, 0, 100, 200) # values we want to show in the labels + } + + if(psl == "psl"){ + my.brks <- c(seq(-19,-1,2),0,seq(1,19,2)) # % Mean anomaly of a WR + # my.brks <- c(seq(-19,-1,2),0,seq(1,19,2)) # % Mean anomaly of a WR + + my.brks2 <- my.brks #c(seq(-20,20,2)) # % Mean anomaly of a WR for the contour lines + values.to.plot <- my.brks #c(-17,-15,-13,-11,-9,-7,-5,-3,-1,0,1,3,5,7,9,11,13,15,17) + } + + my.cols <- colorRampPalette(rev(brewer.pal(11,"RdBu")))(length(my.brks)-1) # blue--white--red colors + my.cols[floor(length(my.cols)/2)] <- my.cols[floor(length(my.cols)/2)+1] <- "white" + + # breaks and colors of the impact maps (valid both for Wind speed anomalies and Temperature anomalies): + #my.brks.var <- c(-20,seq(-10,-3,1),seq(-2.9,2.9,0.1),seq(3,10,1),20) # % Mean anomaly of a WR + #my.brks.var <- c(-20,-2,-1.5,-1,-0.5,-0.2,0.2,0.5,1,1.5,2,20) # % Mean anomaly of a WR + #my.brks.var <- c(-20,seq(-3,3,0.5),20) # % Mean anomaly of a WR + if(var.name[var.num] == "sfcWind") { + my.brks.var <- seq(-3,3,0.5) + values.to.plot2 <- c(-3,-2,-1,0,1,2,3) + } + if(var.name[var.num] == "tas") { + my.brks.var <- seq(-5,5,1) + values.to.plot2 <- my.brks.var #c(-5,-3,-1,0,1,3,5) + } + + #my.cols <- colorRampPalette(as.character(read.csv("/home/Earth/vtorralb/rgbhex.csv",header=F)[,1]))(length(my.brks)-1) # blue--yellow-red colors + my.cols.var <- colorRampPalette(rev(brewer.pal(11,"RdBu")))(length(my.brks.var)-1) # blue--white--red colors + #my.cols.var[floor(length(my.cols.var)/2)] <- my.cols.var[floor(length(my.cols.var)/2)+1] <- "white" + + # Visualize the composition with the 3 graphs for all the 4 regimes: its pressure fields, its impact on var and its frequency series: + if(!as.pdf && fields.name==rean.name) png(filename=paste0(workdir,"/",fields.name,"_",var.name[var.num],"_",my.period[p],".png"),width=3000,height=3700) + if(!as.pdf && fields.name==forecast.name) png(filename=paste0(workdir,"/",fields.name,"_",var.name[var.num],"_",my.period[p],"_leadmonth_",lead.month,".png"),width=3000,height=3700) + + plot.new() + + # Sheet title: + par(fig=c(0, 1, 0.94, 0.98), new=TRUE) + + if(fields.name == forecast.name) {forecast.title <- paste0(" startdate and ",lead.month," forecast month ")} else {forecast.title <- ""} + mtext(paste0(fields.name, " Weather Regimes for ", my.period[p], forecast.title," (",year.start,"-",year.end,")"), cex=5.5, font=2) + + # Centroid maps: + map.xpos <- 0 + map.width <- 0.46 + par(fig=c(map.xpos + 0.003, map.xpos + map.width - 0.003, 0.745, 0.925), new=TRUE) + PlotEquiMap2(rescale(map1,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map1), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, xlabel.dist=1.5, contours.lty="F1FF1F") + par(fig=c(map.xpos, map.xpos + map.width, 0.51, 0.69), new=TRUE) + PlotEquiMap2(rescale(map2,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map2), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F") + par(fig=c(map.xpos, map.xpos + map.width, 0.275, 0.455), new=TRUE) + PlotEquiMap2(rescale(map3,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map3), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F") + par(fig=c(map.xpos, map.xpos + map.width, 0.04, 0.22), new=TRUE) + PlotEquiMap2(rescale(map4,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map4), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F") + + # Title Centroid Maps: + map.title.xpos <- 0.76 + map.title.width <- 0.2 + par(fig=c(map.title.xpos, map.title.xpos + map.title.width, 0.728, 0.729), new=TRUE) + mtext("year", cex=3) + par(fig=c(map.title.xpos, map.title.xpos + map.title.width, 0.494, 0.495), new=TRUE) + mtext("year", cex=3) + par(fig=c(map.title.xpos, map.title.xpos + map.title.width, 0.260, 0.261), new=TRUE) + mtext("year", cex=3) + par(fig=c(map.title.xpos, map.title.xpos + map.title.width, 0.026, 0.027), new=TRUE) + mtext("year", cex=3) + + # Title 1: + title1.xpos <- 0.02 + title1.width <- 0.4 + par(fig=c(title1.xpos, title1.xpos + title1.width, 0.930, 0.935), new=TRUE) + mtext(paste0(regime1.name,": ", psl.name, " Anomaly"), font=2, cex=4) + par(fig=c(title1.xpos, title1.xpos + title1.width, 0.695, 0.700), new=TRUE) + mtext(paste0(regime2.name,": ", psl.name, " Anomaly"), font=2, cex=4) + par(fig=c(title1.xpos, title1.xpos + title1.width, 0.460, 0.465), new=TRUE) + mtext(paste0(regime3.name,": ", psl.name, " Anomaly"), font=2, cex=4) + par(fig=c(title1.xpos, title1.xpos + title1.width, 0.225, 0.230), new=TRUE) + mtext(paste0(regime4.name,": ", psl.name, " Anomaly"), font=2, cex=4) + + # Impact maps: + if(fields.name == rean.name){ + impact.xpos <- 0.47 + impact.width <- 0.26 + par(fig=c(impact.xpos, impact.xpos + impact.width, 0.745, 0.925), new=TRUE) + PlotEquiMap2(rescale(imp1[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=t(sig1[,EU] < 0.05), cex.lab=1.5) + par(fig=c(impact.xpos, impact.xpos + impact.width, 0.51, 0.69), new=TRUE) + PlotEquiMap2(rescale(imp2[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=t(sig2[,EU] < 0.05), cex.lab=1.5) + par(fig=c(impact.xpos, impact.xpos + impact.width, 0.275, 0.455), new=TRUE) + PlotEquiMap2(rescale(imp3[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=t(sig3[,EU] < 0.05), cex.lab=1.5) + par(fig=c(impact.xpos, impact.xpos + impact.width, 0.04, 0.22), new=TRUE) + PlotEquiMap2(rescale(imp4[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=t(sig4[,EU] < 0.05), cex.lab=1.5) + } + + # Frequency plots: + barplot.xpos <- 0.75 + barplot.width <- 0.25 + freq.max=100 + + if(fields.name == forecast.name){ + pos.years.present <- match(year.start:year.end, years) + years.missing <- which(is.na(pos.years.present)) + fre1.NA <- fre2.NA <- fre3.NA <- fre4.NA <- freMax1.NA <- freMax2.NA <- freMax3.NA <- freMax4.NA <- freMin1.NA <- freMin2.NA <- freMin3.NA <- freMin4.NA <- c() + + k <- 0 + for(y in year.start:year.end) { + if(y %in% (years.missing + year.start - 1)) { + fre1.NA <- c(fre1.NA,NA); fre2.NA <- c(fre2.NA,NA); fre3.NA <- c(fre3.NA,NA); fre4.NA <- c(fre4.NA,NA) + freMax1.NA <- c(freMax1.NA,NA); freMax2.NA <- c(freMax2.NA,NA); freMax3.NA <- c(freMax3.NA,NA); freMax4.NA <- c(freMax4.NA,NA) + freMin1.NA <- c(freMin1.NA,NA); freMin2.NA <- c(freMin2.NA,NA); freMin3.NA <- c(freMin3.NA,NA); freMin4.NA <- c(freMin4.NA,NA) + k=k+1 + } else { + fre1.NA <- c(fre1.NA,fre1[y - year.start + 1 - k]); fre2.NA <- c(fre2.NA,fre2[y - year.start + 1 - k]) + fre3.NA <- c(fre3.NA,fre3[y - year.start + 1 - k]); fre4.NA <- c(fre4.NA,fre4[y - year.start + 1 - k]) + freMax1.NA <- c(freMax1.NA,freMax1[y - year.start + 1 - k]); freMax2.NA <- c(freMax2.NA,freMax2[y - year.start + 1 - k]) + freMax3.NA <- c(freMax3.NA,freMax3[y - year.start + 1 - k]); freMax4.NA <- c(freMax4.NA,freMax4[y - year.start + 1 - k]) + freMin1.NA <- c(freMin1.NA,freMin1[y - year.start + 1 - k]); freMin2.NA <- c(freMin2.NA,freMin2[y - year.start + 1 - k]) + freMin3.NA <- c(freMin3.NA,freMin3[y - year.start + 1 - k]); freMin4.NA <- c(freMin4.NA,freMin4[y - year.start + 1 - k]) + } + } + } else { fre1.NA <- fre1; fre2.NA <- fre2; fre3.NA <- fre3; fre4.NA <- fre4 } + + par(fig=c(barplot.xpos, barplot.xpos + barplot.width, 0.745, 0.915), new=TRUE) + if(fields.name == rean.name) barplot.freq(100*fre1.NA, year.start, year.end, cex.y=2, cex.x=2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0)) + if(fields.name == forecast.name) barplot.freq.sim2(100*fre1.NA, 100*freMax1.NA, 100*freMin1.NA, 100*freObs1[-c(1,2)], year.start, year.end, cex.y=2, cex.x=2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0), col.line="gray20", cex.r=3, cex.mean=2, cex.obs=2) + + par(fig=c(barplot.xpos, barplot.xpos + barplot.width,0.510,0.680), new=TRUE) + if(fields.name == rean.name) barplot.freq(100*fre2.NA, year.start, year.end, cex.y=2, cex.x=2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0)) + if(fields.name == forecast.name) barplot.freq.sim2(100*fre2.NA, 100*freMax2.NA, 100*freMin2.NA, 100*freObs2[-c(1,2)], year.start, year.end, cex.y=2, cex.x=2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0), col.line="gray20", cex.r=3, cex.mean=2, cex.obs=2) + + par(fig=c(barplot.xpos, barplot.xpos + barplot.width,0.275,0.445), new=TRUE) + if(fields.name == rean.name) barplot.freq(100*fre3.NA, year.start, year.end, cex.y=2, cex.x=2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0)) + if(fields.name == forecast.name) barplot.freq.sim2(100*fre3.NA, 100*freMax3.NA, 100*freMin3.NA, 100*freObs3[-c(1,2)], year.start, year.end, cex.y=2, cex.x=2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0), col.line="gray20", cex.r=3, cex.mean=2, cex.obs=2) + + par(fig=c(barplot.xpos, barplot.xpos + barplot.width,0.040,0.210), new=TRUE) + if(fields.name == rean.name) barplot.freq(100*fre4.NA, year.start, year.end, cex.y=2, cex.x=2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0)) + if(fields.name == forecast.name) barplot.freq.sim2(100*fre4.NA, 100*freMax4.NA, 100*freMin4.NA, 100*freObs4[-c(1,2)], year.start, year.end, cex.y=2, cex.x=2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0), col.line="gray20", cex.r=3, cex.mean=2, cex.obs=2) + + # % on Frequency plots: + symbol.xpos <- 0.735 + symbol.width <- 0.01 + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.83, 0.84), new=TRUE) + mtext("%", cex=3.3) + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.60, 0.61), new=TRUE) + mtext("%", cex=3.3) + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.36, 0.37), new=TRUE) + mtext("%", cex=3.3) + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.13, 0.14), new=TRUE) + mtext("%", cex=3.3) + + + # Title 2: + if(fields.name == rean.name){ + title2.xpos <- 0.42 + title2.width <- 0.35 + par(fig=c(title2.xpos, title2.xpos + title2.width, 0.930, 0.935), new=TRUE) + mtext(paste0(regime1.name, ": Impact on ", var.name.full[var.num]), font=2, cex=4) + par(fig=c(title2.xpos, title2.xpos + title2.width, 0.695, 0.700), new=TRUE) + mtext(paste0(regime2.name, ": Impact on ", var.name.full[var.num]), font=2, cex=4) + par(fig=c(title2.xpos, title2.xpos + title2.width, 0.460, 0.465), new=TRUE) + mtext(paste0(regime3.name, ": Impact on ", var.name.full[var.num]), font=2, cex=4) + par(fig=c(title2.xpos, title2.xpos + title2.width, 0.225, 0.230), new=TRUE) + mtext(paste0(regime4.name, ": Impact on ", var.name.full[var.num]), font=2, cex=4) + } + + # Title 3: + title3.xpos <- 0.75 + title3.width <-0.25 + par(fig=c(title3.xpos, title3.xpos + title3.width, 0.930, 0.935), new=TRUE) + mtext(paste0(regime1.name, ": Frequency (", round(100*mean(fre1),1), "%)"), font=2, cex=4) + par(fig=c(title3.xpos, title3.xpos + title3.width, 0.695, 0.700), new=TRUE) + mtext(paste0(regime2.name, ": Frequency (", round(100*mean(fre2),1), "%)"), font=2, cex=4) + par(fig=c(title3.xpos, title3.xpos + title3.width, 0.460, 0.465), new=TRUE) + mtext(paste0(regime3.name, ": Frequency (", round(100*mean(fre3),1), "%)"), font=2, cex=4) + par(fig=c(title3.xpos, title3.xpos + title3.width, 0.225, 0.230), new=TRUE) + mtext(paste0(regime4.name, ": Frequency (", round(100*mean(fre4),1), "%)"), font=2, cex=4) + + # Legend 1: + legend1.xpos <- 0.01 + legend1.width <- 0.44 + legend1.cex <- 1.8 + my.subset <- match(values.to.plot, my.brks) + par(fig=c(legend1.xpos, legend1.xpos + legend1.width, 0.719, 0.744), new=TRUE) # syntax fig=c(xmin, xmax, ymin, ymax) + ColorBar3(my.brks, cols=my.cols, vert=FALSE, cex=legend1.cex, subset=my.subset) + if(psl=="g500") {mtext(side=4," m", cex=2.5)} else {mtext(side=4," hPa", cex=legend1.cex)} + par(fig=c(legend1.xpos, legend1.xpos + legend1.width, 0.485, 0.508), new=TRUE) + ColorBar3(my.brks, cols=my.cols, vert=FALSE, cex=legend1.cex, subset=my.subset) + if(psl=="g500") {mtext(side=4," m", cex=2.5)} else {mtext(side=4," hPa", cex=legend1.cex)} + par(fig=c(legend1.xpos, legend1.xpos + legend1.width, 0.250, 0.273), new=TRUE) + ColorBar3(my.brks, cols=my.cols, vert=FALSE, cex=legend1.cex, subset=my.subset) + if(psl=="g500") {mtext(side=4," m", cex=2.5)} else {mtext(side=4," hPa", cex=legend1.cex)} + par(fig=c(legend1.xpos, legend1.xpos + legend1.width, 0.015, 0.038), new=TRUE) + ColorBar3(my.brks, cols=my.cols, vert=FALSE, cex=legend1.cex, subset=my.subset) + if(psl=="g500") {mtext(side=4," m", cex=2.5)} else {mtext(side=4," hPa", cex=legend1.cex)} + + # Legend 2: + if(fields.name == rean.name){ + legend2.xpos <- 0.48 + legend2.width <- 0.25 + legend2.cex <- 1.8 + my.subset2 <- match(values.to.plot2, my.brks.var) + par(fig=c(legend2.xpos, legend2.xpos + legend2.width, 0.719 + 0.001, 0.744), new=TRUE) + ColorBar3(my.brks.var, cols=my.cols.var, vert=FALSE, cex=legend2.cex, subset=my.subset2) + mtext(side=4,paste0(" ",var.unit[var.num]), cex=legend2.cex) + par(fig=c(legend2.xpos, legend2.xpos + legend2.width, 0.485, 0.508), new=TRUE) + ColorBar3(my.brks.var, cols=my.cols.var, vert=FALSE, cex=legend2.cex, subset=my.subset2) + mtext(side=4,paste0(" ",var.unit[var.num]), cex=legend2.cex) + par(fig=c(legend2.xpos, legend2.xpos + legend2.width, 0.250, 0.273), new=TRUE) + ColorBar3(my.brks.var, cols=my.cols.var, vert=FALSE, cex=legend2.cex, subset=my.subset2) + mtext(side=4,paste0(" ",var.unit[var.num]), cex=legend2.cex) + par(fig=c(legend2.xpos, legend2.xpos + legend2.width, 0.015, 0.038), new=TRUE) + ColorBar3(my.brks.var, cols=my.cols.var, vert=FALSE, cex=legend2.cex, subset=my.subset2) + mtext(side=4,paste0(" ",var.unit[var.num]), cex=legend2.cex) + } + + if(!as.pdf) dev.off() # for saving 4 png + +} # close 'p' on 'period' + +if(as.pdf) dev.off() # for saving a single pdf with all months/seasons + + + + + + + + + +# impact map of the stronger WR: +if(fields.name == rean.name){ + + var.num <- 1 # choose a variable (1:sfcWind, 2:tas) + + png(filename=paste0(workdir,"/",rean.name,"_",var.name[var.num],"_most_influent.png"), width=500, height=600) + + plot.new() + #par(mfrow=c(4,3)) + col.regimes <- c("indianred1","cyan","khaki","lightpink") + + for(p in 13:16){ # or 1:12 for monthly maps + load(file=paste0(workdir,"/", rean.dir,"/",rean.name,"_",var.name[var.num],"_",my.period[p],"_mapdata.RData")) + load(paste0(workdir,"/", rean.dir,"/",rean.name,"_ClusterNames.RData")) # they should not depend on var.name!!! + + # position of long values of Europe only (without the Atlantic Sea and America): + #if(fields.name == "ERA-Interim") EU <- c(1:which(lon >= lon.max)[1], (length(lon)-30):length(lon)) # restrict area to continental europe only + lon.max = 45 + EU <- c(1:which(lon >= lon.max)[1], (which(lon >= 337)[1]:length(lon))) # restrict area to continental europe only + + + cluster1 <- which(orden == cluster1.name.period[p]) # in future it will be == cluster1.name + cluster2 <- which(orden == cluster2.name.period[p]) + cluster3 <- which(orden == cluster3.name.period[p]) + cluster4 <- which(orden == cluster4.name.period[p]) + + assign(paste0("imp",cluster1), varPeriodAnom1mean) + assign(paste0("imp",cluster2), varPeriodAnom2mean) + assign(paste0("imp",cluster3), varPeriodAnom3mean) + assign(paste0("imp",cluster4), varPeriodAnom4mean) + + imp.all <- imp1 + for(i in 1:dim(imp1)[1]){ + for(j in 1:dim(imp1)[2]){ + if(abs(imp1[i,j]) >= max(abs(imp1[i,j]), abs(imp2[i,j]), abs(imp3[i,j]), abs(imp4[i,j]))) imp.all[i,j] <- 1.5 + if(abs(imp2[i,j]) >= max(abs(imp1[i,j]), abs(imp2[i,j]), abs(imp3[i,j]), abs(imp4[i,j]))) imp.all[i,j] <- 2.5 + if(abs(imp3[i,j]) >= max(abs(imp1[i,j]), abs(imp2[i,j]), abs(imp3[i,j]), abs(imp4[i,j]))) imp.all[i,j] <- 3.5 + if(abs(imp4[i,j]) >= max(abs(imp1[i,j]), abs(imp2[i,j]), abs(imp3[i,j]), abs(imp4[i,j]))) imp.all[i,j] <- 4.5 + } + } + + if(p<=3) yMod <- 0.7 + if(p>=4 && p<=6) yMod <- 0.5 + if(p>=7 && p<=9) yMod <- 0.3 + if(p>=10) yMod <- 0.1 + if(p==13 || p==14) yMod <- 0.5 + if(p==15 || p==16) yMod <- 0.1 + + if(p==1 || p==4 || p==7 || p==10) xMod <- 0.05 + if(p==2 || p==5 || p==8 || p==11) xMod <- 0.35 + if(p==3 || p==6 || p==9 || p==12) xMod <- 0.65 + if(p==13 || p==15) xMod <- 0.05 + if(p==14 || p==16) xMod <- 0.50 + + # impact map: + if(p <= 12) par(fig=c(xMod, xMod + 0.3, yMod, yMod + 0.17), new=TRUE) + if(p > 12) par(fig=c(xMod, xMod + 0.45, yMod, yMod + 0.38), new=TRUE) + PlotEquiMap(imp.all[,EU], lon[EU], lat, filled.continents = FALSE, brks=1:5, cols=col.regimes, axelab=F, drawleg=F) + + # title map: + if(p <= 12) par(fig=c(xMod, xMod + 0.3, yMod + 0.162, yMod + 0.172), new=TRUE) + if(p > 12) par(fig=c(xMod + 0.07, xMod + 0.07 + 0.3, yMod +0.21 + 0.162, yMod +0.21 + 0.172), new=TRUE) + mtext(my.period[p], font=2, cex=.9) + + } # close 'p' on 'period' + + par(fig=c(0.1,0.9, 0, 0.12), new=TRUE) + ColorBar2(1:5, cols=col.regimes, vert=FALSE, my.ticks=1:4, draw.ticks=FALSE, my.labels=orden) + + par(fig=c(0.1,0.9, 0.9, 0.95), new=TRUE) + mtext(paste0("Most influent WR on ",var.name[var.num]), font=2, cex=1.5) + + dev.off() +} + + + + + + + + + + + + + + + + + + diff --git a/old/weather_regimes_maps_v5.R b/old/weather_regimes_maps_v5.R new file mode 100644 index 0000000000000000000000000000000000000000..768043f40c6b6d5fcf2e0aedb052f7053e8d35de --- /dev/null +++ b/old/weather_regimes_maps_v5.R @@ -0,0 +1,573 @@ + +# Creation: 6/2016 +# Author: Nicola Cortesi +# +# Aim: to visualize the regime anomalies of the weather regimes, their interannual frequencies and their impact on a chosen cliamte variable (i.e: wind speed or temperature) +# +# I/O: the only input file needed is the output of the weather_regimes.R script, which ends with the suffix "_mapdata.RData". +# Output files are the maps, with the user can generate as .png or as .pdf +# +# Assumptions: If your regimes derive from a reanalysis, this script must be run twice: once, with the option 'ordering = F' and 'save.names = T' to create a .pdf or .png +# file that the user must open to find visually the order by which the four regimes are stored inside the four clusters. For example, he can find that the +# sequence of the images in the file (from top to down) corresponds to: Blocking / NAO- / Atl.Ridge / NAO+ regime. Subsequently, he has to change 'ordering' +# from F to T and set the four variables 'clusterX.name' (X=1..4) to follow the same order found inside that file. +# The second time, you have to run this script only to get the maps in the right order, which by default is, from top to down: +# "NAO+","NAO-","Blocking" and "Atl.Ridge" (you can change it with the variable 'orden'). You also have to set 'save.names' to TRUE, so an .RData file +# is written to store the order of the four regimes, in case in future a user needs to visualize these maps again. In this case, the user must set +# 'ordering = TRUE' and 'save.names = FALSE' to be able to visualize the maps in the correct order. +# +# If your regimes derive from a forecast system, you have to compute before the regimes derived from a reanalysis. Then, you can associate the reanalysis +# to the forecast system, to employ it to automatically associate to each simulated cluster one of the +# observed regimes, the one with the highest spatial correlation. Beware that the two maps of regime anomalies must have the same spatial resolution! +# +# Branch: weather_regimes + +library(s2dverification) # for the function Load() +library(Kendall) # for the MannKendall test + +source('/home/Earth/ncortesi/Downloads/scripts/Rfunctions.R') # for the calendar functions +workdir <- "/scratch/Earth/ncortesi/RESILIENCE/Regimes" # working dir with the input files with '_psl.RData' suffix: + +rean.name <- "ERA-Interim" # reanalysis name (if input data comes from a reanalysis) +forecast.name <- "ECMWF-S4" # forecast system name (if input data comes from a forecast system) + +fields.name <- forecast.name # Choose between loading reanalysis ('rean.name') or forecasts ('forecast.name') + +var.num <- 1 # if fields.name ='rean.name', Choose a variable for the impact maps 1: sfcWind 2: tas + +period <- 1 # 1:12 # Choose one or more periods to plot from 1 to 17 (1-12: Jan - Dec, 13-16: Winter, Spring, Summer, Autumn, 17: Year). + # You need to have created before the '_mapdata.RData' file with the output of 'weather_regimes'.R for that period. + +lead.month <- 0 # in case you selected 'fields.name = forecast.name', specify a leadtime (0 = same month of the start month) to plot + # (and variable 'period' becomes the start month) + +# run it twice: once, with 'ordering <- FALSE' and 'save.names <- TRUE' to look only at the cartography and find visually the right regime names of the four clusters; then, +# insert the regime names in the correct order below and run this script a the second time with 'ordering = TRUE' and 'save.names = TRUE', to save the ordered cartography. +# after that, any time you need to run this script, run it with 'ordering = TRUE and 'save.names = FALSE', not to overwrite the file which stores the cluster order: +ordering <- T # If TRUE, order the clusters following the regimes listed in the 'orden' vector (set it to TRUE only after you manually inserted the names below). +save.names <- F # if TRUE, also save the cluster names (i.e: the association between clusters and weather regimes); if FALSE, the cluster names are loaded from a file + +as.pdf <- T # FALSE: save results as one .png file for each season, TRUE: save only 1 .pdf file with all seasons + +# if fields.name='forecast.name', set the subdir of 'workdir' where the weather regimes computed with a reanalysis are stored: +rean.dir <- "41_ERA-Interim_monthly_1981-2015_LOESS_filter" + +# Associates the regimes to the four cluster: +cluster2.name <- "NAO+" +cluster4.name <- "NAO-" +cluster1.name <- "Blocking" +cluster3.name <- "Atl.Ridge" + +# choose an order to give to the cartograpy, from top to bottom: +orden <- c("NAO+","NAO-","Blocking","Atl.Ridge") + +################################################################################################################################################################### + +regime1.name <- orden[1] +regime2.name <- orden[2] +regime3.name <- orden[3] +regime4.name <- orden[4] + +var.name <- c("sfcWind","tas") +var.name.full <- c("Wind Speed","Temperature") # full name of the 'predictand' variable to put in the title of the graphs +var.unit <- c("m/s", "ºC") # unit of measure of var (for drawing color scales) +var.num.orig <- var.num # loading _mapdata files, this value can change! +fields.name.orig <- fields.name +period.orig <- period +pdf.suffix <- ifelse(length(period)==1, my.period[period], "all_periods") + +if(as.pdf && fields.name==rean.name)(pdf(file=paste0(workdir,"/",fields.name,"_",var.name[var.num],"_",pdf.suffix,".pdf"),width=40, height=60)) +if(as.pdf && fields.name==forecast.name)(pdf(file=paste0(workdir,"/",fields.name,"_",var.name[var.num],"_",pdf.suffix,"_leadmonth_",lead.month,".pdf"),width=40,height=60)) + +for(p in period){ + # load regime data (for forecasts, it load only the data corresponding to the chosen start month p and lead month 'lead.month':: + if(fields.name == rean.name) { + load(file=paste0(workdir,"/",fields.name,"_",my.period[p],"_psl.RData")) + load(file=paste0(workdir,"/",fields.name,"_",my.period[p],"_",var.name[var.num],".RData")) + } + if(fields.name == forecast.name){ + load(file=paste0(workdir,"/",fields.name,"_",my.period[p],"_leadmonth_",lead.month,"_psl.RData")) # load cluster time series and mean psl data + load(file=paste0(workdir,"/",fields.name,"_",var.name[var.num],"_",my.period[p],"_leadmonth_",var.name[var.num],".RData")) # load mean var data + } + + if(var.num != var.num.orig) var.num <- var.num.orig # ripristinate original values of this script (necessary after loading regime data) + if(fields.name != fields.name.orig) fields.name <- fields.name.orig # ripristinate original values of this script + + if(fields.name == forecast.name){ + # compute correlations between simulated clusters and observed regime's anomalies: + cluster1.sim <- pslwr1mean; cluster2.sim <- pslwr2mean; cluster3.sim <- pslwr3mean; cluster4.sim <- pslwr4mean + lat.forecast <- lat; lon.forecast <- lon + + if((p + lead.month) > 12) { load.month <- p + lead.month - 12 } else { load.month <- p + lead.month } # reanalysis forecast month to load + load(file=paste0(workdir,"/",rean.dir,"/",rean.name,"_",var.name[var.num],"_",my.period[load.month],"_mapdata.RData")) # load also reanalysis data, which overwrities the pslwrXmean variables + load(paste0(workdir,"/",rean.dir,"/",rean.name,"_", my.period[load.month],"_ClusterNames.RData")) # load also reanalysis regime names + + wr1y.obs <- wr1y; wr2y.obs <- wr2y; wr3y.obs <- wr3y; wr4y.obs <- wr4y # observed frecuencies of the regimes + + regimes.obs <- c(cluster1.name.period[load.month], cluster2.name.period[load.month], cluster3.name.period[load.month], cluster4.name.period[load.month]) # to be replaced soon by c(cluster1.name, cluster2.name, ...) + if(length(unique(regimes.obs)) < 4) stop("Two or more names of the weather types in the reanalsyis input file are repeated!") + + if(identical(rev(lat),lat.forecast)) {lat.forecast <- rev(lat.forecast); print("Warning: forecast latitudes are in the opposite order than renalysis's")} + if(!identical(lat, lat.forecast)) stop("forecast latitudes are different from reanalysis latitudes!") + if(!identical(lon, lon.forecast)) stop("forecast longitude are different from reanalysis longitudes!") + + cluster.corr <- array(NA, c(4,4), dimnames=list(c("cluster1.sim","cluster2.sim","cluster3.sim","cluster4.sim"), regimes.obs)) + cluster.corr[1,1] <- cor(as.vector(cluster1.sim), as.vector(pslwr1mean)) + cluster.corr[1,2] <- cor(as.vector(cluster1.sim), as.vector(pslwr2mean)) + cluster.corr[1,3] <- cor(as.vector(cluster1.sim), as.vector(pslwr3mean)) + cluster.corr[1,4] <- cor(as.vector(cluster1.sim), as.vector(pslwr4mean)) + cluster.corr[2,1] <- cor(as.vector(cluster2.sim), as.vector(pslwr1mean)) + cluster.corr[2,2] <- cor(as.vector(cluster2.sim), as.vector(pslwr2mean)) + cluster.corr[2,3] <- cor(as.vector(cluster2.sim), as.vector(pslwr3mean)) + cluster.corr[2,4] <- cor(as.vector(cluster2.sim), as.vector(pslwr4mean)) + cluster.corr[3,1] <- cor(as.vector(cluster3.sim), as.vector(pslwr1mean)) + cluster.corr[3,2] <- cor(as.vector(cluster3.sim), as.vector(pslwr2mean)) + cluster.corr[3,3] <- cor(as.vector(cluster3.sim), as.vector(pslwr3mean)) + cluster.corr[3,4] <- cor(as.vector(cluster3.sim), as.vector(pslwr4mean)) + cluster.corr[4,1] <- cor(as.vector(cluster4.sim), as.vector(pslwr1mean)) + cluster.corr[4,2] <- cor(as.vector(cluster4.sim), as.vector(pslwr2mean)) + cluster.corr[4,3] <- cor(as.vector(cluster4.sim), as.vector(pslwr3mean)) + cluster.corr[4,4] <- cor(as.vector(cluster4.sim), as.vector(pslwr4mean)) + + # associate each simulated cluster to one observed regime: + max1 <- which(cluster.corr == max(cluster.corr), arr.ind=T) + cluster.corr2 <- cluster.corr + cluster.corr2[max1[1],] <- -999 # set this row to negative values so it won't be found as a maximum anymore + cluster.corr2[,max1[2]] <- -999 # set this column to negative values so it won't be found as a maximum anymore + max2 <- which(cluster.corr2 == max(cluster.corr2), arr.ind=T) + cluster.corr3 <- cluster.corr2 + cluster.corr3[max2[1],] <- -999 + cluster.corr3[,max2[2]] <- -999 + max3 <- which(cluster.corr3 == max(cluster.corr3), arr.ind=T) + cluster.corr4 <- cluster.corr3 + cluster.corr4[max3[1],] <- -999 + cluster.corr4[,max3[2]] <- -999 + max4 <- which(cluster.corr4 == max(cluster.corr4), arr.ind=T) + + seq.max1 <- c(max1[1],max2[1],max3[1],max4[1]) + seq.max2 <- c(max1[2],max2[2],max3[2],max4[2]) + + cluster1.regime <- seq.max2[which(seq.max1 == 1)] + cluster2.regime <- seq.max2[which(seq.max1 == 2)] + cluster3.regime <- seq.max2[which(seq.max1 == 3)] + cluster4.regime <- seq.max2[which(seq.max1 == 4)] + + cluster1.name <- regimes.obs[cluster1.regime] + cluster2.name <- regimes.obs[cluster2.regime] + cluster3.name <- regimes.obs[cluster3.regime] + cluster4.name <- regimes.obs[cluster4.regime] + + if(var.num != var.num.orig) var.num <- var.num.orig # ripristinate original values of this script + if(fields.name != fields.name.orig) fields.name <- fields.name.orig # ripristinate original values of this script + load(file=paste0(workdir,"/",fields.name,"_",var.name[var.num],"_",my.period[p],"_leadmonth_",lead.month,"_mapdata.RData")) # reload forecast regime data + load(file=paste0(workdir,"/",fields.name,"_",my.period[p],"_leadmonth_",lead.month,"_ClusterData.RData")) # reload forecast cluster data (for my.cluster.array) + + if(var.num != var.num.orig) var.num <- var.num.orig # ripristinate original values of this script + if(fields.name != fields.name.orig) fields.name <- fields.name.orig # ripristinate original values of this script + if(period.orig != period) period <- period.orig + if(identical(rev(lat),lat.forecast)) lat <- rev(lat) # ripristinate right lat order + + } # close for on 'forecast.name' + + if(fields.name == rean.name){ + if(save.names){ + save(cluster1.name, cluster2.name, cluster3.name, cluster4.name, file=paste0(workdir,"/",fields.name,"_", my.period[p],"_ClusterNames.RData")) + #if(fields.name == forecast.name) save(cluster1.name, cluster2.name, cluster3.name, cluster4.name, cluster.corr, + # file=paste0(workdir,"/",fields.name,"_", my.period[p],"_leadmonth_",lead.month,"_ClusterNames.RData")) + } else { # in this case, we load the cluster names from the file already saved, if regimes come from a reanalysis: + load(paste0(workdir,"/",fields.name,"_", my.period[p],"_ClusterNames.RData")) + #if(fields.name == forecast.name && auto==F) load(paste0(workdir,"/",fields.name,"_", my.period[p],"_leadmonth_",lead.month,"_ClusterNames.RData")) + + if(var.num != var.num.orig) var.num <- var.num.orig # ripristinate original values of this script + if(fields.name != fields.name.orig) fields.name <- fields.name.orig # ripristinate original values of this script + + # cluster1.name <- cluster1.name.period[p] + # cluster2.name <- cluster2.name.period[p] + # cluster3.name <- cluster3.name.period[p] + # cluster4.name <- cluster4.name.period[p] + } + } + + # assign to each cluster its regime: + if(ordering == FALSE){ + cluster1 <- 1; cluster2 <- 2; cluster3 <- 3; cluster4 <- 4 + } else { + cluster1 <- which(orden == cluster1.name) + cluster2 <- which(orden == cluster2.name) + cluster3 <- which(orden == cluster3.name) + cluster4 <- which(orden == cluster4.name) + } + + assign(paste0("map",cluster1), pslwr1mean) + assign(paste0("map",cluster2), pslwr2mean) + assign(paste0("map",cluster3), pslwr3mean) + assign(paste0("map",cluster4), pslwr4mean) + + assign(paste0("fre",cluster1), wr1y) + assign(paste0("fre",cluster2), wr2y) + assign(paste0("fre",cluster3), wr3y) + assign(paste0("fre",cluster4), wr4y) + + assign(paste0("imp",cluster1), varwr1mean) + assign(paste0("imp",cluster2), varwr2mean) + assign(paste0("imp",cluster3), varwr3mean) + assign(paste0("imp",cluster4), varwr4mean) + + assign(paste0("sig",cluster1), pvalue1) + assign(paste0("sig",cluster2), pvalue2) + assign(paste0("sig",cluster3), pvalue3) + assign(paste0("sig",cluster4), pvalue4) + + + if(fields.name == forecast.name){ + assign(paste0("freMax",cluster1), wr1yMax) + assign(paste0("freMax",cluster2), wr2yMax) + assign(paste0("freMax",cluster3), wr3yMax) + assign(paste0("freMax",cluster4), wr4yMax) + + assign(paste0("freMin",cluster1), wr1yMin) + assign(paste0("freMin",cluster2), wr2yMin) + assign(paste0("freMin",cluster3), wr3yMin) + assign(paste0("freMin",cluster4), wr4yMin) + + cluster1.Obs <- which(orden == regimes.obs[1]) + cluster2.Obs <- which(orden == regimes.obs[2]) + cluster3.Obs <- which(orden == regimes.obs[3]) + cluster4.Obs <- which(orden == regimes.obs[4]) + + assign(paste0("freObs",cluster1), wr1y.obs) + assign(paste0("freObs",cluster2), wr2y.obs) + assign(paste0("freObs",cluster3), wr3y.obs) + assign(paste0("freObs",cluster4), wr4y.obs) + } + + + # position of long values of Europe only (without the Atlantic Sea and America): + #if(fields.name == "ERA-Interim") EU <- c(1:which(lon >= lon.max)[1], (length(lon)-30):length(lon)) # restrict area to continental europe only + EU <- c(1:which(lon >= lon.max)[1], (which(lon >= 337)[1]:length(lon))) # restrict area to continental europe only + + # breaks and colors of the geopotential fields: + if(psl == "g500"){ + #my.brks <- c(-300,-199:200,300) # % Mean anomaly of a WR + my.brks <- c(-300,seq(-200,200,10),300) # % Mean anomaly of a WR + my.brks2 <- c(-300,seq(-200,200,10),300) # % Mean anomaly of a WR for the contour lines + #my.cols <- colorRampPalette((brewer.pal(9,"PuBu")))(length(my.brks)-1) + values.to.plot <- c(-200, -100, 0, 100, 200) # values we want to show in the labels + } + + if(psl == "psl"){ + my.brks <- c(seq(-19,-1,2),0,seq(1,19,2)) # % Mean anomaly of a WR + # my.brks <- c(seq(-19,-1,2),0,seq(1,19,2)) # % Mean anomaly of a WR + + my.brks2 <- my.brks #c(seq(-20,20,2)) # % Mean anomaly of a WR for the contour lines + values.to.plot <- my.brks #c(-17,-15,-13,-11,-9,-7,-5,-3,-1,0,1,3,5,7,9,11,13,15,17) + } + + my.cols <- colorRampPalette(rev(brewer.pal(11,"RdBu")))(length(my.brks)-1) # blue--white--red colors + my.cols[floor(length(my.cols)/2)] <- my.cols[floor(length(my.cols)/2)+1] <- "white" + + # breaks and colors of the impact maps (valid both for Wind speed anomalies and Temperature anomalies): + #my.brks.var <- c(-20,seq(-10,-3,1),seq(-2.9,2.9,0.1),seq(3,10,1),20) # % Mean anomaly of a WR + #my.brks.var <- c(-20,-2,-1.5,-1,-0.5,-0.2,0.2,0.5,1,1.5,2,20) # % Mean anomaly of a WR + #my.brks.var <- c(-20,seq(-3,3,0.5),20) # % Mean anomaly of a WR + if(var.name[var.num] == "sfcWind") { + my.brks.var <- seq(-3,3,0.5) + values.to.plot2 <- c(-3,-2,-1,0,1,2,3) + } + if(var.name[var.num] == "tas") { + my.brks.var <- seq(-5,5,1) + values.to.plot2 <- my.brks.var #c(-5,-3,-1,0,1,3,5) + } + + #my.cols <- colorRampPalette(as.character(read.csv("/home/Earth/vtorralb/rgbhex.csv",header=F)[,1]))(length(my.brks)-1) # blue--yellow-red colors + my.cols.var <- colorRampPalette(rev(brewer.pal(11,"RdBu")))(length(my.brks.var)-1) # blue--white--red colors + #my.cols.var[floor(length(my.cols.var)/2)] <- my.cols.var[floor(length(my.cols.var)/2)+1] <- "white" + + # Visualize the composition with the 3 graphs for all the 4 regimes: its pressure fields, its impact on var and its frequency series: + if(!as.pdf && fields.name==rean.name) png(filename=paste0(workdir,"/",fields.name,"_",my.period[p],"_",var.name[var.num],".png"),width=3000,height=3700) + if(!as.pdf && fields.name==forecast.name) png(filename=paste0(workdir,"/",fields.name,"_",my.period[p],"_leadmonth_",lead.month,"_",var.name[var.num],".png"),width=3000,height=3700) + + plot.new() + + # Sheet title: + par(fig=c(0, 1, 0.94, 0.98), new=TRUE) + + if(fields.name == forecast.name) {forecast.title <- paste0(" startdate and ",lead.month," forecast month ")} else {forecast.title <- ""} + mtext(paste0(fields.name, " Weather Regimes for ", my.period[p], forecast.title," (",year.start,"-",year.end,")"), cex=5.5, font=2) + + # Centroid maps: + map.xpos <- 0 + map.width <- 0.46 + par(fig=c(map.xpos + 0.003, map.xpos + map.width - 0.003, 0.745, 0.925), new=TRUE) + PlotEquiMap2(rescale(map1,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map1), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, xlabel.dist=1.5, contours.lty="F1FF1F") + par(fig=c(map.xpos, map.xpos + map.width, 0.51, 0.69), new=TRUE) + PlotEquiMap2(rescale(map2,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map2), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F") + par(fig=c(map.xpos, map.xpos + map.width, 0.275, 0.455), new=TRUE) + PlotEquiMap2(rescale(map3,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map3), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F") + par(fig=c(map.xpos, map.xpos + map.width, 0.04, 0.22), new=TRUE) + PlotEquiMap2(rescale(map4,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map4), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F") + + + # Title Centroid Maps: + map.title.xpos <- 0.76 + map.title.width <- 0.2 + par(fig=c(map.title.xpos, map.title.xpos + map.title.width, 0.728, 0.729), new=TRUE) + mtext("year", cex=3) + par(fig=c(map.title.xpos, map.title.xpos + map.title.width, 0.494, 0.495), new=TRUE) + mtext("year", cex=3) + par(fig=c(map.title.xpos, map.title.xpos + map.title.width, 0.260, 0.261), new=TRUE) + mtext("year", cex=3) + par(fig=c(map.title.xpos, map.title.xpos + map.title.width, 0.026, 0.027), new=TRUE) + mtext("year", cex=3) + + # Title 1: + title1.xpos <- 0.02 + title1.width <- 0.4 + par(fig=c(title1.xpos, title1.xpos + title1.width, 0.930, 0.935), new=TRUE) + mtext(paste0(regime1.name,": ", psl.name, " Anomaly"), font=2, cex=4) + par(fig=c(title1.xpos, title1.xpos + title1.width, 0.695, 0.700), new=TRUE) + mtext(paste0(regime2.name,": ", psl.name, " Anomaly"), font=2, cex=4) + par(fig=c(title1.xpos, title1.xpos + title1.width, 0.460, 0.465), new=TRUE) + mtext(paste0(regime3.name,": ", psl.name, " Anomaly"), font=2, cex=4) + par(fig=c(title1.xpos, title1.xpos + title1.width, 0.225, 0.230), new=TRUE) + mtext(paste0(regime4.name,": ", psl.name, " Anomaly"), font=2, cex=4) + + # Impact maps: + impact.xpos <- 0.47 + impact.width <- 0.26 + par(fig=c(impact.xpos, impact.xpos + impact.width, 0.745, 0.925), new=TRUE) + PlotEquiMap2(rescale(imp1[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=t(sig1[,EU] < 0.05), cex.lab=1.5) + par(fig=c(impact.xpos, impact.xpos + impact.width, 0.51, 0.69), new=TRUE) + PlotEquiMap2(rescale(imp2[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=t(sig2[,EU] < 0.05), cex.lab=1.5) + par(fig=c(impact.xpos, impact.xpos + impact.width, 0.275, 0.455), new=TRUE) + PlotEquiMap2(rescale(imp3[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=t(sig3[,EU] < 0.05), cex.lab=1.5) + par(fig=c(impact.xpos, impact.xpos + impact.width, 0.04, 0.22), new=TRUE) + PlotEquiMap2(rescale(imp4[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=t(sig4[,EU] < 0.05), cex.lab=1.5) + + + # Frequency plots: + barplot.xpos <- 0.75 + barplot.width <- 0.25 + freq.max=100 + + if(fields.name == forecast.name){ + pos.years.present <- match(year.start:year.end, years) + years.missing <- which(is.na(pos.years.present)) + fre1.NA <- fre2.NA <- fre3.NA <- fre4.NA <- freMax1.NA <- freMax2.NA <- freMax3.NA <- freMax4.NA <- freMin1.NA <- freMin2.NA <- freMin3.NA <- freMin4.NA <- c() + + k <- 0 + for(y in year.start:year.end) { + if(y %in% (years.missing + year.start - 1)) { + fre1.NA <- c(fre1.NA,NA); fre2.NA <- c(fre2.NA,NA); fre3.NA <- c(fre3.NA,NA); fre4.NA <- c(fre4.NA,NA) + freMax1.NA <- c(freMax1.NA,NA); freMax2.NA <- c(freMax2.NA,NA); freMax3.NA <- c(freMax3.NA,NA); freMax4.NA <- c(freMax4.NA,NA) + freMin1.NA <- c(freMin1.NA,NA); freMin2.NA <- c(freMin2.NA,NA); freMin3.NA <- c(freMin3.NA,NA); freMin4.NA <- c(freMin4.NA,NA) + k=k+1 + } else { + fre1.NA <- c(fre1.NA,fre1[y - year.start + 1 - k]); fre2.NA <- c(fre2.NA,fre2[y - year.start + 1 - k]) + fre3.NA <- c(fre3.NA,fre3[y - year.start + 1 - k]); fre4.NA <- c(fre4.NA,fre4[y - year.start + 1 - k]) + freMax1.NA <- c(freMax1.NA,freMax1[y - year.start + 1 - k]); freMax2.NA <- c(freMax2.NA,freMax2[y - year.start + 1 - k]) + freMax3.NA <- c(freMax3.NA,freMax3[y - year.start + 1 - k]); freMax4.NA <- c(freMax4.NA,freMax4[y - year.start + 1 - k]) + freMin1.NA <- c(freMin1.NA,freMin1[y - year.start + 1 - k]); freMin2.NA <- c(freMin2.NA,freMin2[y - year.start + 1 - k]) + freMin3.NA <- c(freMin3.NA,freMin3[y - year.start + 1 - k]); freMin4.NA <- c(freMin4.NA,freMin4[y - year.start + 1 - k]) + } + } + } else { fre1.NA <- fre1; fre2.NA <- fre2; fre3.NA <- fre3; fre4.NA <- fre4 } + + par(fig=c(barplot.xpos, barplot.xpos + barplot.width, 0.745, 0.915), new=TRUE) + if(fields.name == rean.name) barplot.freq(100*fre1.NA, year.start, year.end, cex.y=2, cex.x=2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0)) + if(fields.name == forecast.name) barplot.freq.sim2(100*fre1.NA, 100*freMax1.NA, 100*freMin1.NA, 100*freObs1[-c(1,2)], year.start, year.end, cex.y=2, cex.x=2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0), col.line="gray20", cex.r=3, cex.mean=2, cex.obs=2) + + par(fig=c(barplot.xpos, barplot.xpos + barplot.width,0.510,0.680), new=TRUE) + if(fields.name == rean.name) barplot.freq(100*fre2.NA, year.start, year.end, cex.y=2, cex.x=2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0)) + if(fields.name == forecast.name) barplot.freq.sim2(100*fre2.NA, 100*freMax2.NA, 100*freMin2.NA, 100*freObs2[-c(1,2)], year.start, year.end, cex.y=2, cex.x=2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0), col.line="gray20", cex.r=3, cex.mean=2, cex.obs=2) + + par(fig=c(barplot.xpos, barplot.xpos + barplot.width,0.275,0.445), new=TRUE) + if(fields.name == rean.name) barplot.freq(100*fre3.NA, year.start, year.end, cex.y=2, cex.x=2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0)) + if(fields.name == forecast.name) barplot.freq.sim2(100*fre3.NA, 100*freMax3.NA, 100*freMin3.NA, 100*freObs3[-c(1,2)], year.start, year.end, cex.y=2, cex.x=2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0), col.line="gray20", cex.r=3, cex.mean=2, cex.obs=2) + + par(fig=c(barplot.xpos, barplot.xpos + barplot.width,0.040,0.210), new=TRUE) + if(fields.name == rean.name) barplot.freq(100*fre4.NA, year.start, year.end, cex.y=2, cex.x=2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0)) + if(fields.name == forecast.name) barplot.freq.sim2(100*fre4.NA, 100*freMax4.NA, 100*freMin4.NA, 100*freObs4[-c(1,2)], year.start, year.end, cex.y=2, cex.x=2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0), col.line="gray20", cex.r=3, cex.mean=2, cex.obs=2) + + # % on Frequency plots: + symbol.xpos <- 0.735 + symbol.width <- 0.01 + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.83, 0.84), new=TRUE) + mtext("%", cex=3.3) + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.60, 0.61), new=TRUE) + mtext("%", cex=3.3) + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.36, 0.37), new=TRUE) + mtext("%", cex=3.3) + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.13, 0.14), new=TRUE) + mtext("%", cex=3.3) + + + # Title 2: + if(fields.name == rean.name){ + title2.xpos <- 0.42 + title2.width <- 0.35 + par(fig=c(title2.xpos, title2.xpos + title2.width, 0.930, 0.935), new=TRUE) + mtext(paste0(regime1.name, ": Impact on ", var.name.full[var.num]), font=2, cex=4) + par(fig=c(title2.xpos, title2.xpos + title2.width, 0.695, 0.700), new=TRUE) + mtext(paste0(regime2.name, ": Impact on ", var.name.full[var.num]), font=2, cex=4) + par(fig=c(title2.xpos, title2.xpos + title2.width, 0.460, 0.465), new=TRUE) + mtext(paste0(regime3.name, ": Impact on ", var.name.full[var.num]), font=2, cex=4) + par(fig=c(title2.xpos, title2.xpos + title2.width, 0.225, 0.230), new=TRUE) + mtext(paste0(regime4.name, ": Impact on ", var.name.full[var.num]), font=2, cex=4) + } + + # Title 3: + title3.xpos <- 0.75 + title3.width <-0.25 + par(fig=c(title3.xpos, title3.xpos + title3.width, 0.930, 0.935), new=TRUE) + mtext(paste0(regime1.name, ": Frequency (", round(100*mean(fre1),1), "%)"), font=2, cex=4) + par(fig=c(title3.xpos, title3.xpos + title3.width, 0.695, 0.700), new=TRUE) + mtext(paste0(regime2.name, ": Frequency (", round(100*mean(fre2),1), "%)"), font=2, cex=4) + par(fig=c(title3.xpos, title3.xpos + title3.width, 0.460, 0.465), new=TRUE) + mtext(paste0(regime3.name, ": Frequency (", round(100*mean(fre3),1), "%)"), font=2, cex=4) + par(fig=c(title3.xpos, title3.xpos + title3.width, 0.225, 0.230), new=TRUE) + mtext(paste0(regime4.name, ": Frequency (", round(100*mean(fre4),1), "%)"), font=2, cex=4) + + # Legend 1: + legend1.xpos <- 0.01 + legend1.width <- 0.44 + legend1.cex <- 1.8 + my.subset <- match(values.to.plot, my.brks) + par(fig=c(legend1.xpos, legend1.xpos + legend1.width, 0.719, 0.744), new=TRUE) # syntax fig=c(xmin, xmax, ymin, ymax) + ColorBar3(my.brks, cols=my.cols, vert=FALSE, cex=legend1.cex, subset=my.subset) + if(psl=="g500") {mtext(side=4," m", cex=2.5)} else {mtext(side=4," hPa", cex=legend1.cex)} + par(fig=c(legend1.xpos, legend1.xpos + legend1.width, 0.485, 0.508), new=TRUE) + ColorBar3(my.brks, cols=my.cols, vert=FALSE, cex=legend1.cex, subset=my.subset) + if(psl=="g500") {mtext(side=4," m", cex=2.5)} else {mtext(side=4," hPa", cex=legend1.cex)} + par(fig=c(legend1.xpos, legend1.xpos + legend1.width, 0.250, 0.273), new=TRUE) + ColorBar3(my.brks, cols=my.cols, vert=FALSE, cex=legend1.cex, subset=my.subset) + if(psl=="g500") {mtext(side=4," m", cex=2.5)} else {mtext(side=4," hPa", cex=legend1.cex)} + par(fig=c(legend1.xpos, legend1.xpos + legend1.width, 0.015, 0.038), new=TRUE) + ColorBar3(my.brks, cols=my.cols, vert=FALSE, cex=legend1.cex, subset=my.subset) + if(psl=="g500") {mtext(side=4," m", cex=2.5)} else {mtext(side=4," hPa", cex=legend1.cex)} + + # Legend 2: + if(fields.name == rean.name){ + legend2.xpos <- 0.48 + legend2.width <- 0.25 + legend2.cex <- 1.8 + my.subset2 <- match(values.to.plot2, my.brks.var) + par(fig=c(legend2.xpos, legend2.xpos + legend2.width, 0.719 + 0.001, 0.744), new=TRUE) + ColorBar3(my.brks.var, cols=my.cols.var, vert=FALSE, cex=legend2.cex, subset=my.subset2) + mtext(side=4,paste0(" ",var.unit[var.num]), cex=legend2.cex) + par(fig=c(legend2.xpos, legend2.xpos + legend2.width, 0.485, 0.508), new=TRUE) + ColorBar3(my.brks.var, cols=my.cols.var, vert=FALSE, cex=legend2.cex, subset=my.subset2) + mtext(side=4,paste0(" ",var.unit[var.num]), cex=legend2.cex) + par(fig=c(legend2.xpos, legend2.xpos + legend2.width, 0.250, 0.273), new=TRUE) + ColorBar3(my.brks.var, cols=my.cols.var, vert=FALSE, cex=legend2.cex, subset=my.subset2) + mtext(side=4,paste0(" ",var.unit[var.num]), cex=legend2.cex) + par(fig=c(legend2.xpos, legend2.xpos + legend2.width, 0.015, 0.038), new=TRUE) + ColorBar3(my.brks.var, cols=my.cols.var, vert=FALSE, cex=legend2.cex, subset=my.subset2) + mtext(side=4,paste0(" ",var.unit[var.num]), cex=legend2.cex) + } + + if(!as.pdf) dev.off() # for saving 4 png + +} # close 'p' on 'period' + +if(as.pdf) dev.off() # for saving a single pdf with all months/seasons + + + + + + + + +# impact map of the stronger WR: +if(fields.name == rean.name && period == 100){ + + var.num <- 1 # choose a variable (1:sfcWind, 2:tas) + + png(filename=paste0(workdir,"/",rean.name,"_",var.name[var.num],"_most_influent.png"), width=500, height=600) + + plot.new() + #par(mfrow=c(4,3)) + col.regimes <- c("indianred1","cyan","khaki","lightpink") + + for(p in 13:16){ # or 1:12 for monthly maps + load(file=paste0(workdir,"/", rean.dir,"/",rean.name,"_",var.name[var.num],"_",my.period[p],"_mapdata.RData")) + load(paste0(workdir,"/", rean.dir,"/",rean.name,"_ClusterNames.RData")) # they should not depend on var.name!!! + + # position of long values of Europe only (without the Atlantic Sea and America): + #if(fields.name == "ERA-Interim") EU <- c(1:which(lon >= lon.max)[1], (length(lon)-30):length(lon)) # restrict area to continental europe only + lon.max = 45 + EU <- c(1:which(lon >= lon.max)[1], (which(lon >= 337)[1]:length(lon))) # restrict area to continental europe only + + + cluster1 <- which(orden == cluster1.name.period[p]) # in future it will be == cluster1.name + cluster2 <- which(orden == cluster2.name.period[p]) + cluster3 <- which(orden == cluster3.name.period[p]) + cluster4 <- which(orden == cluster4.name.period[p]) + + assign(paste0("imp",cluster1), varPeriodAnom1mean) + assign(paste0("imp",cluster2), varPeriodAnom2mean) + assign(paste0("imp",cluster3), varPeriodAnom3mean) + assign(paste0("imp",cluster4), varPeriodAnom4mean) + + imp.all <- imp1 + for(i in 1:dim(imp1)[1]){ + for(j in 1:dim(imp1)[2]){ + if(abs(imp1[i,j]) >= max(abs(imp1[i,j]), abs(imp2[i,j]), abs(imp3[i,j]), abs(imp4[i,j]))) imp.all[i,j] <- 1.5 + if(abs(imp2[i,j]) >= max(abs(imp1[i,j]), abs(imp2[i,j]), abs(imp3[i,j]), abs(imp4[i,j]))) imp.all[i,j] <- 2.5 + if(abs(imp3[i,j]) >= max(abs(imp1[i,j]), abs(imp2[i,j]), abs(imp3[i,j]), abs(imp4[i,j]))) imp.all[i,j] <- 3.5 + if(abs(imp4[i,j]) >= max(abs(imp1[i,j]), abs(imp2[i,j]), abs(imp3[i,j]), abs(imp4[i,j]))) imp.all[i,j] <- 4.5 + } + } + + if(p<=3) yMod <- 0.7 + if(p>=4 && p<=6) yMod <- 0.5 + if(p>=7 && p<=9) yMod <- 0.3 + if(p>=10) yMod <- 0.1 + if(p==13 || p==14) yMod <- 0.5 + if(p==15 || p==16) yMod <- 0.1 + + if(p==1 || p==4 || p==7 || p==10) xMod <- 0.05 + if(p==2 || p==5 || p==8 || p==11) xMod <- 0.35 + if(p==3 || p==6 || p==9 || p==12) xMod <- 0.65 + if(p==13 || p==15) xMod <- 0.05 + if(p==14 || p==16) xMod <- 0.50 + + # impact map: + if(p <= 12) par(fig=c(xMod, xMod + 0.3, yMod, yMod + 0.17), new=TRUE) + if(p > 12) par(fig=c(xMod, xMod + 0.45, yMod, yMod + 0.38), new=TRUE) + PlotEquiMap(imp.all[,EU], lon[EU], lat, filled.continents = FALSE, brks=1:5, cols=col.regimes, axelab=F, drawleg=F) + + # title map: + if(p <= 12) par(fig=c(xMod, xMod + 0.3, yMod + 0.162, yMod + 0.172), new=TRUE) + if(p > 12) par(fig=c(xMod + 0.07, xMod + 0.07 + 0.3, yMod +0.21 + 0.162, yMod +0.21 + 0.172), new=TRUE) + mtext(my.period[p], font=2, cex=.9) + + } # close 'p' on 'period' + + par(fig=c(0.1,0.9, 0, 0.12), new=TRUE) + ColorBar2(1:5, cols=col.regimes, vert=FALSE, my.ticks=1:4, draw.ticks=FALSE, my.labels=orden) + + par(fig=c(0.1,0.9, 0.9, 0.95), new=TRUE) + mtext(paste0("Most influent WR on ",var.name[var.num]), font=2, cex=1.5) + + dev.off() +} + + + + + + + + + + + + + + + + + + diff --git a/old/weather_regimes_maps_v5.R~ b/old/weather_regimes_maps_v5.R~ new file mode 100644 index 0000000000000000000000000000000000000000..4ed17bea4480bf33486b4f46ae235cc7852e3312 --- /dev/null +++ b/old/weather_regimes_maps_v5.R~ @@ -0,0 +1,570 @@ + +# Creation: 6/2016 +# Author: Nicola Cortesi +# +# Aim: to visualize the regime anomalies of the weather regimes, their interannual frequencies and their impact on a chosen cliamte variable (i.e: wind speed or temperature) +# +# I/O: the only input file needed is the output of the weather_regimes.R script, which ends with the suffix "_mapdata.RData". +# Output files are the maps, with the user can generate as .png or as .pdf +# +# Assumptions: If your regimes derive from a reanalysis, this script must be run twice: once, with the option 'ordering = F' to create a .pdf or .png file that the user +# must open to find visually the order by which the four regimes are stored inside the four clusters. For example, he can find that the sequence of the images +# in the file (from top to down) corresponds to: Blocking / NAO- / Atl.Ridge / NAO+ regime. Subsequently, he has to change 'ordering' from F to T +# and set the four variables 'clusterX.name' (X=1..4) to follow the same order found inside that file. +# The second time, you have to run this script only to get the maps in the right order, which by default is, from top to down: +# "NAO+","NAO-","Blocking" and "Atl.Ridge" (you can change it with the variable 'orden'). You also have to set 'save.names' to TRUE, so an .RData file +# is written to store the order of the four regimes, in case in future a user needs to visualize these maps again. In this case, the user must set +# 'ordering=TRUE' and 'save.names= FALSE' to be able to visualize the maps in the correct order. +# +# If your regimes derive from a forecast system, you have to compute before the regimes derived from a reanalysis. Then, you can associate the reanalysis +# to the forecast system, to employ it to automatically associate to each simulated cluster one of the +# observed regimes, the one with the highest spatial correlation. Beware that the two maps of regime anomalies must have the same spatial resolution! +# +# Branch: weather_regimes + +library(s2dverification) # for the function Load() +library(Kendall) # for the MannKendall test + +source('/home/Earth/ncortesi/Downloads/scripts/Rfunctions.R') # for the calendar functions +workdir <- "/scratch/Earth/ncortesi/RESILIENCE/Regimes" # working dir with the input files with '_psl.RData' suffix: + +rean.name <- "ERA-Interim" # reanalysis name (if input data comes from a reanalysis) +forecast.name <- "ECMWF-S4" # forecast system name (if input data comes from a forecast system) + +fields.name <- rean.name # Choose between loading reanalysis ('rean.name') or forecasts ('forecast.name') + +period <- 1:12 # 1:12 # Choose one or more periods to plot from 1 to 17 (1-12: Jan - Dec, 13-16: Winter, Spring, Summer, Autumn, 17: Year). + # You need to have created before the '_mapdata.RData' file with the output of 'weather_regimes'.R for that period. +var.num <- 1 # if fields.name ='rean.name', Choose a variable for the impact maps 1: sfcWind 2: tas +lead.month <- 3 # in case you selected 'fields.name = forecast.name', specify a leadtime (0 = same month of the start month) to plot + # (and variable 'period' becomes the start month) + +# run it twice: once, with 'ordering <- FALSE' and 'save.names <- TRUE' to look only at the cartography and find visually the right regime names of the four clusters; then, +# insert the regime names in the correct order below and run this script a the second time with 'ordering = TRUE' and 'save.names = TRUE', to save the ordered cartography. +# after that, any time you need to run this script, run it with 'ordering = TRUE and 'save.names = FALSE', not to overwrite the file which stores the cluster order: +ordering <- F # If TRUE, order the clusters following the regimes listed in the 'orden' vector (set it to TRUE only after you manually inserted the names below). +save.names <- T # if TRUE, also save the cluster names (i.e: the association between clusters and weather regimes); if FALSE, the cluster names are loaded from a file + +as.pdf <- T # FALSE: save results as one .png file for each season, TRUE: save only 1 .pdf file with all seasons + +# if fields.name='forecast.name', set the subdir of 'workdir' where the weather regimes computed with a reanalysis are stored: +rean.dir <- "41_ERA-Interim_monthly_1981-2015_LOESS_filter" + +# Associates the regimes to the four cluster: +cluster3.name <- "NAO+" +cluster2.name <- "NAO-" +cluster1.name <- "Blocking" +cluster4.name <- "Atl.Ridge" + +# choose an order to give to the cartograpy, from top to bottom: +orden <- c("NAO+","NAO-","Blocking","Atl.Ridge") + +################################################################################################################################################################### + +regime1.name <- orden[1] +regime2.name <- orden[2] +regime3.name <- orden[3] +regime4.name <- orden[4] + +var.name <- c("sfcWind","tas") +var.name.full <- c("Wind Speed","Temperature") # full name of the 'predictand' variable to put in the title of the graphs +var.unit <- c("m/s", "ºC") # unit of measure of var (for drawing color scales) +var.num.orig <- var.num # loading _mapdata files, this value can change! +fields.name.orig <- fields.name +period.orig <- period + +if(as.pdf && fields.name==rean.name)(pdf(file=paste0(workdir,"/",fields.name,"_",var.name[var.num],"_",my.period[period[1]],".pdf"),width=40, height=60)) +if(as.pdf && fields.name==forecast.name)(pdf(file=paste0(workdir,"/",fields.name,"_",var.name[var.num],"_",my.period[period[1]],"_leadmonth_",lead.month,".pdf"),width=40,height=60)) + +for(p in period){ + # load regime data (for forecasts, it load only the data correpsonding to the chosen start month p and lead month 'lead.month':: + if(fields.name == rean.name) { + load(file=paste0(workdir,"/",fields.name,"_",my.period[p],"_psl.RData")) + load(file=paste0(workdir,"/",fields.name,"_",my.period[p],"_",var.name[var.num],".RData")) + } + if(fields.name == forecast.name){ + load(file=paste0(workdir,"/",fields.name,"_",var.name[var.num],"_",my.period[p],"_leadmonth_",lead.month,"_psl.RData")) + load(file=paste0(workdir,"/",fields.name,"_",var.name[var.num],"_",my.period[p],"_leadmonth_",var.name[var.num],".RData")) + } + + if(var.num != var.num.orig) var.num <- var.num.orig # ripristinate original values of this script (necessary after loading regime data) + if(fields.name != fields.name.orig) fields.name <- fields.name.orig # ripristinate original values of this script + + if(fields.name == forecast.name){ + # compute correlations between simulated clusters and observed regime's anomalies: + cluster1.sim <- pslwr1mean; cluster2.sim <- pslwr2mean; cluster3.sim <- pslwr3mean; cluster4.sim <- pslwr4mean + lat.forecast <- lat; lon.forecast <- lon + + if((p + lead.month) > 12) { load.month <- p + lead.month - 12 } else { load.month <- p + lead.month } # reanalysis forecast month to load + load(file=paste0(workdir,"/",rean.dir,"/",rean.name,"_",var.name[var.num],"_",my.period[load.month],"_mapdata.RData")) # load also reanalysis data, which overwrities the pslwrXmean variables + load(paste0(workdir,"/",rean.dir,"/",rean.name,"_", my.period[load.month],"_ClusterNames.RData")) # load also reanalysis regime names + + wr1y.obs <- wr1y; wr2y.obs <- wr2y; wr3y.obs <- wr3y; wr4y.obs <- wr4y # observed frecuencies of the regimes + + regimes.obs <- c(cluster1.name.period[load.month], cluster2.name.period[load.month], cluster3.name.period[load.month], cluster4.name.period[load.month]) # to be replaced soon by c(cluster1.name, cluster2.name, ...) + if(length(unique(regimes.obs)) < 4) stop("Two or more names of the weather types in the reanalsyis input file are repeated!") + + if(identical(rev(lat),lat.forecast)) {lat.forecast <- rev(lat.forecast); print("Warning: forecast latitudes are in the opposite order than renalysis's")} + if(!identical(lat, lat.forecast)) stop("forecast latitudes are different from reanalysis latitudes!") + if(!identical(lon, lon.forecast)) stop("forecast longitude are different from reanalysis longitudes!") + + cluster.corr <- array(NA, c(4,4), dimnames=list(c("cluster1.sim","cluster2.sim","cluster3.sim","cluster4.sim"), regimes.obs)) + cluster.corr[1,1] <- cor(as.vector(cluster1.sim), as.vector(pslwr1mean)) + cluster.corr[1,2] <- cor(as.vector(cluster1.sim), as.vector(pslwr2mean)) + cluster.corr[1,3] <- cor(as.vector(cluster1.sim), as.vector(pslwr3mean)) + cluster.corr[1,4] <- cor(as.vector(cluster1.sim), as.vector(pslwr4mean)) + cluster.corr[2,1] <- cor(as.vector(cluster2.sim), as.vector(pslwr1mean)) + cluster.corr[2,2] <- cor(as.vector(cluster2.sim), as.vector(pslwr2mean)) + cluster.corr[2,3] <- cor(as.vector(cluster2.sim), as.vector(pslwr3mean)) + cluster.corr[2,4] <- cor(as.vector(cluster2.sim), as.vector(pslwr4mean)) + cluster.corr[3,1] <- cor(as.vector(cluster3.sim), as.vector(pslwr1mean)) + cluster.corr[3,2] <- cor(as.vector(cluster3.sim), as.vector(pslwr2mean)) + cluster.corr[3,3] <- cor(as.vector(cluster3.sim), as.vector(pslwr3mean)) + cluster.corr[3,4] <- cor(as.vector(cluster3.sim), as.vector(pslwr4mean)) + cluster.corr[4,1] <- cor(as.vector(cluster4.sim), as.vector(pslwr1mean)) + cluster.corr[4,2] <- cor(as.vector(cluster4.sim), as.vector(pslwr2mean)) + cluster.corr[4,3] <- cor(as.vector(cluster4.sim), as.vector(pslwr3mean)) + cluster.corr[4,4] <- cor(as.vector(cluster4.sim), as.vector(pslwr4mean)) + + # associate each simulated cluster to one observed regime: + max1 <- which(cluster.corr == max(cluster.corr), arr.ind=T) + cluster.corr2 <- cluster.corr + cluster.corr2[max1[1],] <- -999 # set this row to negative values so it won't be found as a maximum anymore + cluster.corr2[,max1[2]] <- -999 # set this column to negative values so it won't be found as a maximum anymore + max2 <- which(cluster.corr2 == max(cluster.corr2), arr.ind=T) + cluster.corr3 <- cluster.corr2 + cluster.corr3[max2[1],] <- -999 + cluster.corr3[,max2[2]] <- -999 + max3 <- which(cluster.corr3 == max(cluster.corr3), arr.ind=T) + cluster.corr4 <- cluster.corr3 + cluster.corr4[max3[1],] <- -999 + cluster.corr4[,max3[2]] <- -999 + max4 <- which(cluster.corr4 == max(cluster.corr4), arr.ind=T) + + seq.max1 <- c(max1[1],max2[1],max3[1],max4[1]) + seq.max2 <- c(max1[2],max2[2],max3[2],max4[2]) + + cluster1.regime <- seq.max2[which(seq.max1 == 1)] + cluster2.regime <- seq.max2[which(seq.max1 == 2)] + cluster3.regime <- seq.max2[which(seq.max1 == 3)] + cluster4.regime <- seq.max2[which(seq.max1 == 4)] + + cluster1.name <- regimes.obs[cluster1.regime] + cluster2.name <- regimes.obs[cluster2.regime] + cluster3.name <- regimes.obs[cluster3.regime] + cluster4.name <- regimes.obs[cluster4.regime] + + if(var.num != var.num.orig) var.num <- var.num.orig # ripristinate original values of this script + if(fields.name != fields.name.orig) fields.name <- fields.name.orig # ripristinate original values of this script + load(file=paste0(workdir,"/",fields.name,"_",var.name[var.num],"_",my.period[p],"_leadmonth_",lead.month,"_mapdata.RData")) # reload forecast regime data + load(file=paste0(workdir,"/",fields.name,"_",my.period[p],"_leadmonth_",lead.month,"_ClusterData.RData")) # reload forecast cluster data (for my.cluster.array) + + if(var.num != var.num.orig) var.num <- var.num.orig # ripristinate original values of this script + if(fields.name != fields.name.orig) fields.name <- fields.name.orig # ripristinate original values of this script + if(period.orig != period) period <- period.orig + if(identical(rev(lat),lat.forecast)) lat <- rev(lat) # ripristinate right lat order + + } # close for on 'forecast.name' + + if(fields.name == rean.name){ + if(save.names){ + save(cluster1.name, cluster2.name, cluster3.name, cluster4.name, file=paste0(workdir,"/",fields.name,"_", my.period[p],"_ClusterNames.RData")) + #if(fields.name == forecast.name) save(cluster1.name, cluster2.name, cluster3.name, cluster4.name, cluster.corr, + # file=paste0(workdir,"/",fields.name,"_", my.period[p],"_leadmonth_",lead.month,"_ClusterNames.RData")) + } else { # in this case, we load the cluster names from the file already saved, if regimes come from a reanalysis: + load(paste0(workdir,"/",fields.name,"_", my.period[p],"_ClusterNames.RData")) + #if(fields.name == forecast.name && auto==F) load(paste0(workdir,"/",fields.name,"_", my.period[p],"_leadmonth_",lead.month,"_ClusterNames.RData")) + + if(var.num != var.num.orig) var.num <- var.num.orig # ripristinate original values of this script + if(fields.name != fields.name.orig) fields.name <- fields.name.orig # ripristinate original values of this script + + # cluster1.name <- cluster1.name.period[p] + # cluster2.name <- cluster2.name.period[p] + # cluster3.name <- cluster3.name.period[p] + # cluster4.name <- cluster4.name.period[p] + } + } + + # assign to each cluster its regime: + if(ordering == FALSE){ + cluster1 <- 1; cluster2 <- 2; cluster3 <- 3; cluster4 <- 4 + } else { + cluster1 <- which(orden == cluster1.name) + cluster2 <- which(orden == cluster2.name) + cluster3 <- which(orden == cluster3.name) + cluster4 <- which(orden == cluster4.name) + } + + assign(paste0("map",cluster1), pslwr1mean) + assign(paste0("map",cluster2), pslwr2mean) + assign(paste0("map",cluster3), pslwr3mean) + assign(paste0("map",cluster4), pslwr4mean) + + assign(paste0("fre",cluster1), wr1y) + assign(paste0("fre",cluster2), wr2y) + assign(paste0("fre",cluster3), wr3y) + assign(paste0("fre",cluster4), wr4y) + + assign(paste0("imp",cluster1), varwr1mean) + assign(paste0("imp",cluster2), varwr2mean) + assign(paste0("imp",cluster3), varwr3mean) + assign(paste0("imp",cluster4), varwr4mean) + + assign(paste0("sig",cluster1), pvalue1) + assign(paste0("sig",cluster2), pvalue2) + assign(paste0("sig",cluster3), pvalue3) + assign(paste0("sig",cluster4), pvalue4) + + + if(fields.name == forecast.name){ + assign(paste0("freMax",cluster1), wr1yMax) + assign(paste0("freMax",cluster2), wr2yMax) + assign(paste0("freMax",cluster3), wr3yMax) + assign(paste0("freMax",cluster4), wr4yMax) + + assign(paste0("freMin",cluster1), wr1yMin) + assign(paste0("freMin",cluster2), wr2yMin) + assign(paste0("freMin",cluster3), wr3yMin) + assign(paste0("freMin",cluster4), wr4yMin) + + cluster1.Obs <- which(orden == regimes.obs[1]) + cluster2.Obs <- which(orden == regimes.obs[2]) + cluster3.Obs <- which(orden == regimes.obs[3]) + cluster4.Obs <- which(orden == regimes.obs[4]) + + assign(paste0("freObs",cluster1), wr1y.obs) + assign(paste0("freObs",cluster2), wr2y.obs) + assign(paste0("freObs",cluster3), wr3y.obs) + assign(paste0("freObs",cluster4), wr4y.obs) + } + + + # position of long values of Europe only (without the Atlantic Sea and America): + #if(fields.name == "ERA-Interim") EU <- c(1:which(lon >= lon.max)[1], (length(lon)-30):length(lon)) # restrict area to continental europe only + EU <- c(1:which(lon >= lon.max)[1], (which(lon >= 337)[1]:length(lon))) # restrict area to continental europe only + + # breaks and colors of the geopotential fields: + if(psl == "g500"){ + #my.brks <- c(-300,-199:200,300) # % Mean anomaly of a WR + my.brks <- c(-300,seq(-200,200,10),300) # % Mean anomaly of a WR + my.brks2 <- c(-300,seq(-200,200,10),300) # % Mean anomaly of a WR for the contour lines + #my.cols <- colorRampPalette((brewer.pal(9,"PuBu")))(length(my.brks)-1) + values.to.plot <- c(-200, -100, 0, 100, 200) # values we want to show in the labels + } + + if(psl == "psl"){ + my.brks <- c(seq(-19,-1,2),0,seq(1,19,2)) # % Mean anomaly of a WR + # my.brks <- c(seq(-19,-1,2),0,seq(1,19,2)) # % Mean anomaly of a WR + + my.brks2 <- my.brks #c(seq(-20,20,2)) # % Mean anomaly of a WR for the contour lines + values.to.plot <- my.brks #c(-17,-15,-13,-11,-9,-7,-5,-3,-1,0,1,3,5,7,9,11,13,15,17) + } + + my.cols <- colorRampPalette(rev(brewer.pal(11,"RdBu")))(length(my.brks)-1) # blue--white--red colors + my.cols[floor(length(my.cols)/2)] <- my.cols[floor(length(my.cols)/2)+1] <- "white" + + # breaks and colors of the impact maps (valid both for Wind speed anomalies and Temperature anomalies): + #my.brks.var <- c(-20,seq(-10,-3,1),seq(-2.9,2.9,0.1),seq(3,10,1),20) # % Mean anomaly of a WR + #my.brks.var <- c(-20,-2,-1.5,-1,-0.5,-0.2,0.2,0.5,1,1.5,2,20) # % Mean anomaly of a WR + #my.brks.var <- c(-20,seq(-3,3,0.5),20) # % Mean anomaly of a WR + if(var.name[var.num] == "sfcWind") { + my.brks.var <- seq(-3,3,0.5) + values.to.plot2 <- c(-3,-2,-1,0,1,2,3) + } + if(var.name[var.num] == "tas") { + my.brks.var <- seq(-5,5,1) + values.to.plot2 <- my.brks.var #c(-5,-3,-1,0,1,3,5) + } + + #my.cols <- colorRampPalette(as.character(read.csv("/home/Earth/vtorralb/rgbhex.csv",header=F)[,1]))(length(my.brks)-1) # blue--yellow-red colors + my.cols.var <- colorRampPalette(rev(brewer.pal(11,"RdBu")))(length(my.brks.var)-1) # blue--white--red colors + #my.cols.var[floor(length(my.cols.var)/2)] <- my.cols.var[floor(length(my.cols.var)/2)+1] <- "white" + + # Visualize the composition with the 3 graphs for all the 4 regimes: its pressure fields, its impact on var and its frequency series: + if(!as.pdf && fields.name==rean.name) png(filename=paste0(workdir,"/",fields.name,"_",my.period[p],"_",var.name[var.num],".png"),width=3000,height=3700) + if(!as.pdf && fields.name==forecast.name) png(filename=paste0(workdir,"/",fields.name,"_",my.period[p],"_leadmonth_",lead.month,"_",var.name[var.num],".png"),width=3000,height=3700) + + plot.new() + + # Sheet title: + par(fig=c(0, 1, 0.94, 0.98), new=TRUE) + + if(fields.name == forecast.name) {forecast.title <- paste0(" startdate and ",lead.month," forecast month ")} else {forecast.title <- ""} + mtext(paste0(fields.name, " Weather Regimes for ", my.period[p], forecast.title," (",year.start,"-",year.end,")"), cex=5.5, font=2) + + # Centroid maps: + map.xpos <- 0 + map.width <- 0.46 + par(fig=c(map.xpos + 0.003, map.xpos + map.width - 0.003, 0.745, 0.925), new=TRUE) + PlotEquiMap2(rescale(map1,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map1), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, xlabel.dist=1.5, contours.lty="F1FF1F") + par(fig=c(map.xpos, map.xpos + map.width, 0.51, 0.69), new=TRUE) + PlotEquiMap2(rescale(map2,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map2), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F") + par(fig=c(map.xpos, map.xpos + map.width, 0.275, 0.455), new=TRUE) + PlotEquiMap2(rescale(map3,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map3), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F") + par(fig=c(map.xpos, map.xpos + map.width, 0.04, 0.22), new=TRUE) + PlotEquiMap2(rescale(map4,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map4), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F") + + + # Title Centroid Maps: + map.title.xpos <- 0.76 + map.title.width <- 0.2 + par(fig=c(map.title.xpos, map.title.xpos + map.title.width, 0.728, 0.729), new=TRUE) + mtext("year", cex=3) + par(fig=c(map.title.xpos, map.title.xpos + map.title.width, 0.494, 0.495), new=TRUE) + mtext("year", cex=3) + par(fig=c(map.title.xpos, map.title.xpos + map.title.width, 0.260, 0.261), new=TRUE) + mtext("year", cex=3) + par(fig=c(map.title.xpos, map.title.xpos + map.title.width, 0.026, 0.027), new=TRUE) + mtext("year", cex=3) + + # Title 1: + title1.xpos <- 0.02 + title1.width <- 0.4 + par(fig=c(title1.xpos, title1.xpos + title1.width, 0.930, 0.935), new=TRUE) + mtext(paste0(regime1.name,": ", psl.name, " Anomaly"), font=2, cex=4) + par(fig=c(title1.xpos, title1.xpos + title1.width, 0.695, 0.700), new=TRUE) + mtext(paste0(regime2.name,": ", psl.name, " Anomaly"), font=2, cex=4) + par(fig=c(title1.xpos, title1.xpos + title1.width, 0.460, 0.465), new=TRUE) + mtext(paste0(regime3.name,": ", psl.name, " Anomaly"), font=2, cex=4) + par(fig=c(title1.xpos, title1.xpos + title1.width, 0.225, 0.230), new=TRUE) + mtext(paste0(regime4.name,": ", psl.name, " Anomaly"), font=2, cex=4) + + # Impact maps: + impact.xpos <- 0.47 + impact.width <- 0.26 + par(fig=c(impact.xpos, impact.xpos + impact.width, 0.745, 0.925), new=TRUE) + PlotEquiMap2(rescale(imp1[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=t(sig1[,EU] < 0.05), cex.lab=1.5) + par(fig=c(impact.xpos, impact.xpos + impact.width, 0.51, 0.69), new=TRUE) + PlotEquiMap2(rescale(imp2[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=t(sig2[,EU] < 0.05), cex.lab=1.5) + par(fig=c(impact.xpos, impact.xpos + impact.width, 0.275, 0.455), new=TRUE) + PlotEquiMap2(rescale(imp3[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=t(sig3[,EU] < 0.05), cex.lab=1.5) + par(fig=c(impact.xpos, impact.xpos + impact.width, 0.04, 0.22), new=TRUE) + PlotEquiMap2(rescale(imp4[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=t(sig4[,EU] < 0.05), cex.lab=1.5) + + + # Frequency plots: + barplot.xpos <- 0.75 + barplot.width <- 0.25 + freq.max=100 + + if(fields.name == forecast.name){ + pos.years.present <- match(year.start:year.end, years) + years.missing <- which(is.na(pos.years.present)) + fre1.NA <- fre2.NA <- fre3.NA <- fre4.NA <- freMax1.NA <- freMax2.NA <- freMax3.NA <- freMax4.NA <- freMin1.NA <- freMin2.NA <- freMin3.NA <- freMin4.NA <- c() + + k <- 0 + for(y in year.start:year.end) { + if(y %in% (years.missing + year.start - 1)) { + fre1.NA <- c(fre1.NA,NA); fre2.NA <- c(fre2.NA,NA); fre3.NA <- c(fre3.NA,NA); fre4.NA <- c(fre4.NA,NA) + freMax1.NA <- c(freMax1.NA,NA); freMax2.NA <- c(freMax2.NA,NA); freMax3.NA <- c(freMax3.NA,NA); freMax4.NA <- c(freMax4.NA,NA) + freMin1.NA <- c(freMin1.NA,NA); freMin2.NA <- c(freMin2.NA,NA); freMin3.NA <- c(freMin3.NA,NA); freMin4.NA <- c(freMin4.NA,NA) + k=k+1 + } else { + fre1.NA <- c(fre1.NA,fre1[y - year.start + 1 - k]); fre2.NA <- c(fre2.NA,fre2[y - year.start + 1 - k]) + fre3.NA <- c(fre3.NA,fre3[y - year.start + 1 - k]); fre4.NA <- c(fre4.NA,fre4[y - year.start + 1 - k]) + freMax1.NA <- c(freMax1.NA,freMax1[y - year.start + 1 - k]); freMax2.NA <- c(freMax2.NA,freMax2[y - year.start + 1 - k]) + freMax3.NA <- c(freMax3.NA,freMax3[y - year.start + 1 - k]); freMax4.NA <- c(freMax4.NA,freMax4[y - year.start + 1 - k]) + freMin1.NA <- c(freMin1.NA,freMin1[y - year.start + 1 - k]); freMin2.NA <- c(freMin2.NA,freMin2[y - year.start + 1 - k]) + freMin3.NA <- c(freMin3.NA,freMin3[y - year.start + 1 - k]); freMin4.NA <- c(freMin4.NA,freMin4[y - year.start + 1 - k]) + } + } + } else { fre1.NA <- fre1; fre2.NA <- fre2; fre3.NA <- fre3; fre4.NA <- fre4 } + + par(fig=c(barplot.xpos, barplot.xpos + barplot.width, 0.745, 0.915), new=TRUE) + if(fields.name == rean.name) barplot.freq(100*fre1.NA, year.start, year.end, cex.y=2, cex.x=2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0)) + if(fields.name == forecast.name) barplot.freq.sim2(100*fre1.NA, 100*freMax1.NA, 100*freMin1.NA, 100*freObs1[-c(1,2)], year.start, year.end, cex.y=2, cex.x=2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0), col.line="gray20", cex.r=3, cex.mean=2, cex.obs=2) + + par(fig=c(barplot.xpos, barplot.xpos + barplot.width,0.510,0.680), new=TRUE) + if(fields.name == rean.name) barplot.freq(100*fre2.NA, year.start, year.end, cex.y=2, cex.x=2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0)) + if(fields.name == forecast.name) barplot.freq.sim2(100*fre2.NA, 100*freMax2.NA, 100*freMin2.NA, 100*freObs2[-c(1,2)], year.start, year.end, cex.y=2, cex.x=2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0), col.line="gray20", cex.r=3, cex.mean=2, cex.obs=2) + + par(fig=c(barplot.xpos, barplot.xpos + barplot.width,0.275,0.445), new=TRUE) + if(fields.name == rean.name) barplot.freq(100*fre3.NA, year.start, year.end, cex.y=2, cex.x=2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0)) + if(fields.name == forecast.name) barplot.freq.sim2(100*fre3.NA, 100*freMax3.NA, 100*freMin3.NA, 100*freObs3[-c(1,2)], year.start, year.end, cex.y=2, cex.x=2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0), col.line="gray20", cex.r=3, cex.mean=2, cex.obs=2) + + par(fig=c(barplot.xpos, barplot.xpos + barplot.width,0.040,0.210), new=TRUE) + if(fields.name == rean.name) barplot.freq(100*fre4.NA, year.start, year.end, cex.y=2, cex.x=2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0)) + if(fields.name == forecast.name) barplot.freq.sim2(100*fre4.NA, 100*freMax4.NA, 100*freMin4.NA, 100*freObs4[-c(1,2)], year.start, year.end, cex.y=2, cex.x=2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0), col.line="gray20", cex.r=3, cex.mean=2, cex.obs=2) + + # % on Frequency plots: + symbol.xpos <- 0.735 + symbol.width <- 0.01 + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.83, 0.84), new=TRUE) + mtext("%", cex=3.3) + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.60, 0.61), new=TRUE) + mtext("%", cex=3.3) + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.36, 0.37), new=TRUE) + mtext("%", cex=3.3) + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.13, 0.14), new=TRUE) + mtext("%", cex=3.3) + + + # Title 2: + if(fields.name == rean.name){ + title2.xpos <- 0.42 + title2.width <- 0.35 + par(fig=c(title2.xpos, title2.xpos + title2.width, 0.930, 0.935), new=TRUE) + mtext(paste0(regime1.name, ": Impact on ", var.name.full[var.num]), font=2, cex=4) + par(fig=c(title2.xpos, title2.xpos + title2.width, 0.695, 0.700), new=TRUE) + mtext(paste0(regime2.name, ": Impact on ", var.name.full[var.num]), font=2, cex=4) + par(fig=c(title2.xpos, title2.xpos + title2.width, 0.460, 0.465), new=TRUE) + mtext(paste0(regime3.name, ": Impact on ", var.name.full[var.num]), font=2, cex=4) + par(fig=c(title2.xpos, title2.xpos + title2.width, 0.225, 0.230), new=TRUE) + mtext(paste0(regime4.name, ": Impact on ", var.name.full[var.num]), font=2, cex=4) + } + + # Title 3: + title3.xpos <- 0.75 + title3.width <-0.25 + par(fig=c(title3.xpos, title3.xpos + title3.width, 0.930, 0.935), new=TRUE) + mtext(paste0(regime1.name, ": Frequency (", round(100*mean(fre1),1), "%)"), font=2, cex=4) + par(fig=c(title3.xpos, title3.xpos + title3.width, 0.695, 0.700), new=TRUE) + mtext(paste0(regime2.name, ": Frequency (", round(100*mean(fre2),1), "%)"), font=2, cex=4) + par(fig=c(title3.xpos, title3.xpos + title3.width, 0.460, 0.465), new=TRUE) + mtext(paste0(regime3.name, ": Frequency (", round(100*mean(fre3),1), "%)"), font=2, cex=4) + par(fig=c(title3.xpos, title3.xpos + title3.width, 0.225, 0.230), new=TRUE) + mtext(paste0(regime4.name, ": Frequency (", round(100*mean(fre4),1), "%)"), font=2, cex=4) + + # Legend 1: + legend1.xpos <- 0.01 + legend1.width <- 0.44 + legend1.cex <- 1.8 + my.subset <- match(values.to.plot, my.brks) + par(fig=c(legend1.xpos, legend1.xpos + legend1.width, 0.719, 0.744), new=TRUE) # syntax fig=c(xmin, xmax, ymin, ymax) + ColorBar3(my.brks, cols=my.cols, vert=FALSE, cex=legend1.cex, subset=my.subset) + if(psl=="g500") {mtext(side=4," m", cex=2.5)} else {mtext(side=4," hPa", cex=legend1.cex)} + par(fig=c(legend1.xpos, legend1.xpos + legend1.width, 0.485, 0.508), new=TRUE) + ColorBar3(my.brks, cols=my.cols, vert=FALSE, cex=legend1.cex, subset=my.subset) + if(psl=="g500") {mtext(side=4," m", cex=2.5)} else {mtext(side=4," hPa", cex=legend1.cex)} + par(fig=c(legend1.xpos, legend1.xpos + legend1.width, 0.250, 0.273), new=TRUE) + ColorBar3(my.brks, cols=my.cols, vert=FALSE, cex=legend1.cex, subset=my.subset) + if(psl=="g500") {mtext(side=4," m", cex=2.5)} else {mtext(side=4," hPa", cex=legend1.cex)} + par(fig=c(legend1.xpos, legend1.xpos + legend1.width, 0.015, 0.038), new=TRUE) + ColorBar3(my.brks, cols=my.cols, vert=FALSE, cex=legend1.cex, subset=my.subset) + if(psl=="g500") {mtext(side=4," m", cex=2.5)} else {mtext(side=4," hPa", cex=legend1.cex)} + + # Legend 2: + if(fields.name == rean.name){ + legend2.xpos <- 0.48 + legend2.width <- 0.25 + legend2.cex <- 1.8 + my.subset2 <- match(values.to.plot2, my.brks.var) + par(fig=c(legend2.xpos, legend2.xpos + legend2.width, 0.719 + 0.001, 0.744), new=TRUE) + ColorBar3(my.brks.var, cols=my.cols.var, vert=FALSE, cex=legend2.cex, subset=my.subset2) + mtext(side=4,paste0(" ",var.unit[var.num]), cex=legend2.cex) + par(fig=c(legend2.xpos, legend2.xpos + legend2.width, 0.485, 0.508), new=TRUE) + ColorBar3(my.brks.var, cols=my.cols.var, vert=FALSE, cex=legend2.cex, subset=my.subset2) + mtext(side=4,paste0(" ",var.unit[var.num]), cex=legend2.cex) + par(fig=c(legend2.xpos, legend2.xpos + legend2.width, 0.250, 0.273), new=TRUE) + ColorBar3(my.brks.var, cols=my.cols.var, vert=FALSE, cex=legend2.cex, subset=my.subset2) + mtext(side=4,paste0(" ",var.unit[var.num]), cex=legend2.cex) + par(fig=c(legend2.xpos, legend2.xpos + legend2.width, 0.015, 0.038), new=TRUE) + ColorBar3(my.brks.var, cols=my.cols.var, vert=FALSE, cex=legend2.cex, subset=my.subset2) + mtext(side=4,paste0(" ",var.unit[var.num]), cex=legend2.cex) + } + + if(!as.pdf) dev.off() # for saving 4 png + +} # close 'p' on 'period' + +if(as.pdf) dev.off() # for saving a single pdf with all months/seasons + + + + + + + + +# impact map of the stronger WR: +if(fields.name == rean.name){ + + var.num <- 1 # choose a variable (1:sfcWind, 2:tas) + + png(filename=paste0(workdir,"/",rean.name,"_",var.name[var.num],"_most_influent.png"), width=500, height=600) + + plot.new() + #par(mfrow=c(4,3)) + col.regimes <- c("indianred1","cyan","khaki","lightpink") + + for(p in 13:16){ # or 1:12 for monthly maps + load(file=paste0(workdir,"/", rean.dir,"/",rean.name,"_",var.name[var.num],"_",my.period[p],"_mapdata.RData")) + load(paste0(workdir,"/", rean.dir,"/",rean.name,"_ClusterNames.RData")) # they should not depend on var.name!!! + + # position of long values of Europe only (without the Atlantic Sea and America): + #if(fields.name == "ERA-Interim") EU <- c(1:which(lon >= lon.max)[1], (length(lon)-30):length(lon)) # restrict area to continental europe only + lon.max = 45 + EU <- c(1:which(lon >= lon.max)[1], (which(lon >= 337)[1]:length(lon))) # restrict area to continental europe only + + + cluster1 <- which(orden == cluster1.name.period[p]) # in future it will be == cluster1.name + cluster2 <- which(orden == cluster2.name.period[p]) + cluster3 <- which(orden == cluster3.name.period[p]) + cluster4 <- which(orden == cluster4.name.period[p]) + + assign(paste0("imp",cluster1), varPeriodAnom1mean) + assign(paste0("imp",cluster2), varPeriodAnom2mean) + assign(paste0("imp",cluster3), varPeriodAnom3mean) + assign(paste0("imp",cluster4), varPeriodAnom4mean) + + imp.all <- imp1 + for(i in 1:dim(imp1)[1]){ + for(j in 1:dim(imp1)[2]){ + if(abs(imp1[i,j]) >= max(abs(imp1[i,j]), abs(imp2[i,j]), abs(imp3[i,j]), abs(imp4[i,j]))) imp.all[i,j] <- 1.5 + if(abs(imp2[i,j]) >= max(abs(imp1[i,j]), abs(imp2[i,j]), abs(imp3[i,j]), abs(imp4[i,j]))) imp.all[i,j] <- 2.5 + if(abs(imp3[i,j]) >= max(abs(imp1[i,j]), abs(imp2[i,j]), abs(imp3[i,j]), abs(imp4[i,j]))) imp.all[i,j] <- 3.5 + if(abs(imp4[i,j]) >= max(abs(imp1[i,j]), abs(imp2[i,j]), abs(imp3[i,j]), abs(imp4[i,j]))) imp.all[i,j] <- 4.5 + } + } + + if(p<=3) yMod <- 0.7 + if(p>=4 && p<=6) yMod <- 0.5 + if(p>=7 && p<=9) yMod <- 0.3 + if(p>=10) yMod <- 0.1 + if(p==13 || p==14) yMod <- 0.5 + if(p==15 || p==16) yMod <- 0.1 + + if(p==1 || p==4 || p==7 || p==10) xMod <- 0.05 + if(p==2 || p==5 || p==8 || p==11) xMod <- 0.35 + if(p==3 || p==6 || p==9 || p==12) xMod <- 0.65 + if(p==13 || p==15) xMod <- 0.05 + if(p==14 || p==16) xMod <- 0.50 + + # impact map: + if(p <= 12) par(fig=c(xMod, xMod + 0.3, yMod, yMod + 0.17), new=TRUE) + if(p > 12) par(fig=c(xMod, xMod + 0.45, yMod, yMod + 0.38), new=TRUE) + PlotEquiMap(imp.all[,EU], lon[EU], lat, filled.continents = FALSE, brks=1:5, cols=col.regimes, axelab=F, drawleg=F) + + # title map: + if(p <= 12) par(fig=c(xMod, xMod + 0.3, yMod + 0.162, yMod + 0.172), new=TRUE) + if(p > 12) par(fig=c(xMod + 0.07, xMod + 0.07 + 0.3, yMod +0.21 + 0.162, yMod +0.21 + 0.172), new=TRUE) + mtext(my.period[p], font=2, cex=.9) + + } # close 'p' on 'period' + + par(fig=c(0.1,0.9, 0, 0.12), new=TRUE) + ColorBar2(1:5, cols=col.regimes, vert=FALSE, my.ticks=1:4, draw.ticks=FALSE, my.labels=orden) + + par(fig=c(0.1,0.9, 0.9, 0.95), new=TRUE) + mtext(paste0("Most influent WR on ",var.name[var.num]), font=2, cex=1.5) + + dev.off() +} + + + + + + + + + + + + + + + + + + diff --git a/old/weather_regimes_maps_v6.R b/old/weather_regimes_maps_v6.R new file mode 100644 index 0000000000000000000000000000000000000000..be173e5b8bda9e4837feec22897b390ffd1749ed --- /dev/null +++ b/old/weather_regimes_maps_v6.R @@ -0,0 +1,582 @@ + +# Creation: 6/2016 +# Author: Nicola Cortesi +# +# Aim: to visualize the regime anomalies of the weather regimes, their interannual frequencies and their impact on a chosen cliamte variable (i.e: wind speed or temperature) +# +# I/O: the only input file needed is the output of the weather_regimes.R script, which ends with the suffix "_mapdata.RData". +# Output files are the maps, with the user can generate as .png or as .pdf +# +# Assumptions: If your regimes derive from a reanalysis, this script must be run twice: once, with the option 'ordering = F' and 'save.names = T' to create a .pdf or .png +# file that the user must open to find visually the order by which the four regimes are stored inside the four clusters. For example, he can find that the +# sequence of the images in the file (from top to down) corresponds to: Blocking / NAO- / Atl.Ridge / NAO+ regime. Subsequently, he has to change 'ordering' +# from F to T and set the four variables 'clusterX.name' (X=1..4) to follow the same order found inside that file. +# The second time, you have to run this script only to get the maps in the right order, which by default is, from top to down: +# "NAO+","NAO-","Blocking" and "Atl.Ridge" (you can change it with the variable 'orden'). You also have to set 'save.names' to TRUE, so an .RData file +# is written to store the order of the four regimes, in case in future a user needs to visualize these maps again. In this case, the user must set +# 'ordering = TRUE' and 'save.names = FALSE' to be able to visualize the maps in the correct order. +# +# If your regimes derive from a forecast system, you have to compute before the regimes derived from a reanalysis. Then, you can associate the reanalysis +# to the forecast system, to employ it to automatically associate to each simulated cluster one of the +# observed regimes, the one with the highest spatial correlation. Beware that the two maps of regime anomalies must have the same spatial resolution! +# +# Branch: weather_regimes + +library(s2dverification) # for the function Load() +library(Kendall) # for the MannKendall test + +source('/home/Earth/ncortesi/Downloads/scripts/Rfunctions.R') # for the calendar functions +workdir <- "/scratch/Earth/ncortesi/RESILIENCE/Regimes" # working dir with the input files with '_psl.RData' suffix: + +rean.name <- "ERA-Interim" # reanalysis name (if input data comes from a reanalysis) +forecast.name <- "ECMWF-S4" # forecast system name (if input data comes from a forecast system) + +fields.name <- forecast.name # Choose between loading reanalysis ('rean.name') or forecasts ('forecast.name') + +var.num <- 1 # if fields.name ='rean.name', Choose a variable for the impact maps 1: sfcWind 2: tas + +period <- 1:12 # Choose one or more periods to plot from 1 to 17 (1-12: Jan - Dec, 13-16: Winter, Spring, Summer, Autumn, 17: Year). + # You need to have created before the '_mapdata.RData' file with the output of 'weather_regimes'.R for that period. + +lead.month <- 1 # in case you selected 'fields.name = forecast.name', specify a leadtime (0 = same month of the start month) to plot + # (and variable 'period' becomes the start month) + +# run it twice: once, with 'ordering <- FALSE' and 'save.names <- TRUE' to look only at the cartography and find visually the right regime names of the four clusters; then, +# insert the regime names in the correct order below and run this script a the second time with 'ordering = TRUE' and 'save.names = TRUE', to save the ordered cartography. +# after that, any time you need to run this script, run it with 'ordering = TRUE and 'save.names = FALSE', not to overwrite the file which stores the cluster order: +ordering <- T # If TRUE, order the clusters following the regimes listed in the 'orden' vector (set it to TRUE only after you manually inserted the names below). +save.names <- F # if TRUE, also save the cluster names (i.e: the association between clusters and weather regimes); if FALSE, the cluster names are loaded from a file + +as.pdf <- F # FALSE: save results as one .png file for each season/month, TRUE: save only 1 .pdf file with all seasons/month + +# if fields.name='forecast.name', set the subdir of 'workdir' where the weather regimes computed with a reanalysis are stored: +rean.dir <- "41_ERA-Interim_monthly_1981-2015_LOESS_filter" + +# Associates the regimes to the four cluster: +cluster2.name <- "NAO+" +cluster4.name <- "NAO-" +cluster1.name <- "Blocking" +cluster3.name <- "Atl.Ridge" + +# choose an order to give to the cartograpy, from top to bottom: +orden <- c("NAO+","NAO-","Blocking","Atl.Ridge") + +################################################################################################################################################################### + +regime1.name <- orden[1] +regime2.name <- orden[2] +regime3.name <- orden[3] +regime4.name <- orden[4] + +var.name <- c("sfcWind","tas") +var.name.full <- c("Wind Speed","Temperature") # full name of the 'predictand' variable to put in the title of the graphs +var.unit <- c("m/s", "ºC") # unit of measure of var (for drawing color scales) +var.num.orig <- var.num # loading _psl files, this value can change! +fields.name.orig <- fields.name +period.orig <- period +pdf.suffix <- ifelse(length(period)==1, my.period[period], "all_periods") + +if(as.pdf && fields.name==rean.name)(pdf(file=paste0(workdir,"/",fields.name,"_",var.name[var.num],"_",pdf.suffix,".pdf"),width=40, height=60)) +if(as.pdf && fields.name==forecast.name)(pdf(file=paste0(workdir,"/",fields.name,"_",var.name[var.num],"_",pdf.suffix,"_leadmonth_",lead.month,".pdf"),width=40,height=60)) + +for(p in period){ + p.orig <- p + # load regime data (for forecasts, it load only the data corresponding to the chosen start month p and lead month 'lead.month':: + if(fields.name == rean.name) { + load(file=paste0(workdir,"/",fields.name,"_",my.period[p],"_","psl",".RData")) + load(file=paste0(workdir,"/",fields.name,"_",my.period[p],"_",var.name[var.num],".RData")) + } + if(fields.name == forecast.name){ + load(file=paste0(workdir,"/",fields.name,"_",my.period[p],"_leadmonth_",lead.month,"_","psl",".RData")) # load cluster time series and mean psl data + #load(file=paste0(workdir,"/",fields.name,"_",my.period[p],"_leadmonth_",lead.month,"_",var.name[var.num],".RData")) # load mean var data + } + + if(var.num != var.num.orig) var.num <- var.num.orig # ripristinate original values of this script (necessary after loading regime data) + if(fields.name != fields.name.orig) fields.name <- fields.name.orig # ripristinate original values of this script + + if(fields.name == forecast.name){ + # compute correlations between simulated clusters and observed regime's anomalies: + cluster1.sim <- pslwr1mean; cluster2.sim <- pslwr2mean; cluster3.sim <- pslwr3mean; cluster4.sim <- pslwr4mean + lat.forecast <- lat; lon.forecast <- lon + + if((p + lead.month) > 12) { load.month <- p + lead.month - 12 } else { load.month <- p + lead.month } # reanalysis forecast month to load + load(file=paste0(workdir,"/",rean.dir,"/",rean.name,"_",my.period[load.month],"_psl.RData")) # load reanalysis data, which overwrities the pslwrXmean variables + load(paste0(workdir,"/",rean.dir,"/",rean.name,"_", my.period[load.month],"_ClusterNames.RData")) # load also reanalysis regime names + + wr1y.obs <- wr1y; wr2y.obs <- wr2y; wr3y.obs <- wr3y; wr4y.obs <- wr4y # observed frecuencies of the regimes + + #regimes.obs <- c(cluster1.name.period[load.month], cluster2.name.period[load.month], cluster3.name.period[load.month], cluster4.name.period[load.month]) # to be replaced soon by c(cluster1.name, cluster2.name, ...) + regimes.obs <- c(cluster1.name, cluster2.name, cluster3.name, cluster4.name) + + if(length(unique(regimes.obs)) < 4) stop("Two or more names of the weather types in the reanalsyis input file are repeated!") + + if(identical(rev(lat),lat.forecast)) {lat.forecast <- rev(lat.forecast); print("Warning: forecast latitudes are in the opposite order than renalysis's")} + if(!identical(lat, lat.forecast)) stop("forecast latitudes are different from reanalysis latitudes!") + if(!identical(lon, lon.forecast)) stop("forecast longitude are different from reanalysis longitudes!") + + cluster.corr <- array(NA, c(4,4), dimnames=list(c("cluster1.sim","cluster2.sim","cluster3.sim","cluster4.sim"), regimes.obs)) + cluster.corr[1,1] <- cor(as.vector(cluster1.sim), as.vector(pslwr1mean)) + cluster.corr[1,2] <- cor(as.vector(cluster1.sim), as.vector(pslwr2mean)) + cluster.corr[1,3] <- cor(as.vector(cluster1.sim), as.vector(pslwr3mean)) + cluster.corr[1,4] <- cor(as.vector(cluster1.sim), as.vector(pslwr4mean)) + cluster.corr[2,1] <- cor(as.vector(cluster2.sim), as.vector(pslwr1mean)) + cluster.corr[2,2] <- cor(as.vector(cluster2.sim), as.vector(pslwr2mean)) + cluster.corr[2,3] <- cor(as.vector(cluster2.sim), as.vector(pslwr3mean)) + cluster.corr[2,4] <- cor(as.vector(cluster2.sim), as.vector(pslwr4mean)) + cluster.corr[3,1] <- cor(as.vector(cluster3.sim), as.vector(pslwr1mean)) + cluster.corr[3,2] <- cor(as.vector(cluster3.sim), as.vector(pslwr2mean)) + cluster.corr[3,3] <- cor(as.vector(cluster3.sim), as.vector(pslwr3mean)) + cluster.corr[3,4] <- cor(as.vector(cluster3.sim), as.vector(pslwr4mean)) + cluster.corr[4,1] <- cor(as.vector(cluster4.sim), as.vector(pslwr1mean)) + cluster.corr[4,2] <- cor(as.vector(cluster4.sim), as.vector(pslwr2mean)) + cluster.corr[4,3] <- cor(as.vector(cluster4.sim), as.vector(pslwr3mean)) + cluster.corr[4,4] <- cor(as.vector(cluster4.sim), as.vector(pslwr4mean)) + + # associate each simulated cluster to one observed regime: + max1 <- which(cluster.corr == max(cluster.corr), arr.ind=T) + cluster.corr2 <- cluster.corr + cluster.corr2[max1[1],] <- -999 # set this row to negative values so it won't be found as a maximum anymore + cluster.corr2[,max1[2]] <- -999 # set this column to negative values so it won't be found as a maximum anymore + max2 <- which(cluster.corr2 == max(cluster.corr2), arr.ind=T) + cluster.corr3 <- cluster.corr2 + cluster.corr3[max2[1],] <- -999 + cluster.corr3[,max2[2]] <- -999 + max3 <- which(cluster.corr3 == max(cluster.corr3), arr.ind=T) + cluster.corr4 <- cluster.corr3 + cluster.corr4[max3[1],] <- -999 + cluster.corr4[,max3[2]] <- -999 + max4 <- which(cluster.corr4 == max(cluster.corr4), arr.ind=T) + + seq.max1 <- c(max1[1],max2[1],max3[1],max4[1]) + seq.max2 <- c(max1[2],max2[2],max3[2],max4[2]) + + cluster1.regime <- seq.max2[which(seq.max1 == 1)] + cluster2.regime <- seq.max2[which(seq.max1 == 2)] + cluster3.regime <- seq.max2[which(seq.max1 == 3)] + cluster4.regime <- seq.max2[which(seq.max1 == 4)] + + cluster1.name <- regimes.obs[cluster1.regime] + cluster2.name <- regimes.obs[cluster2.regime] + cluster3.name <- regimes.obs[cluster3.regime] + cluster4.name <- regimes.obs[cluster4.regime] + + if(var.num != var.num.orig) var.num <- var.num.orig # ripristinate original values of this script + if(fields.name != fields.name.orig) fields.name <- fields.name.orig # ripristinate original values of this script + if(!identical(period.orig,period)) period <- period.orig + if(p.orig != p) p <- p.orig + + load(file=paste0(workdir,"/",fields.name,"_",my.period[p],"_leadmonth_",lead.month,"_psl.RData")) # reload forecast cluster time series and mean psl data + #load(file=paste0(workdir,"/",fields.name,"_",my.period[p],"_leadmonth_",lead.month,"_ClusterData.RData")) # reload forecast cluster data (for my.cluster.array) + + if(var.num != var.num.orig) var.num <- var.num.orig # ripristinate original values of this script + if(fields.name != fields.name.orig) fields.name <- fields.name.orig # ripristinate original values of this script + if(!identical(period.orig, period)) period <- period.orig + if(p.orig != p) p <- p.orig + if(identical(rev(lat),lat.forecast)) lat <- rev(lat) # ripristinate right lat order + + } # close for on 'forecast.name' + + if(fields.name == rean.name){ + if(save.names){ + save(cluster1.name, cluster2.name, cluster3.name, cluster4.name, file=paste0(workdir,"/",fields.name,"_", my.period[p],"_ClusterNames.RData")) + #if(fields.name == forecast.name) save(cluster1.name, cluster2.name, cluster3.name, cluster4.name, cluster.corr, + # file=paste0(workdir,"/",fields.name,"_", my.period[p],"_leadmonth_",lead.month,"_ClusterNames.RData")) + } else { # in this case, we load the cluster names from the file already saved, if regimes come from a reanalysis: + load(paste0(workdir,"/",fields.name,"_", my.period[p],"_ClusterNames.RData")) + #if(fields.name == forecast.name && auto==F) load(paste0(workdir,"/",fields.name,"_", my.period[p],"_leadmonth_",lead.month,"_ClusterNames.RData")) + + if(var.num != var.num.orig) var.num <- var.num.orig # ripristinate original values of this script + if(fields.name != fields.name.orig) fields.name <- fields.name.orig # ripristinate original values of this script + + # cluster1.name <- cluster1.name.period[p] + # cluster2.name <- cluster2.name.period[p] + # cluster3.name <- cluster3.name.period[p] + # cluster4.name <- cluster4.name.period[p] + } + } + + # assign to each cluster its regime: + if(ordering == FALSE && fields.name == rean.name){ + cluster1 <- 1; cluster2 <- 2; cluster3 <- 3; cluster4 <- 4 + } else { + cluster1 <- which(orden == cluster1.name) + cluster2 <- which(orden == cluster2.name) + cluster3 <- which(orden == cluster3.name) + cluster4 <- which(orden == cluster4.name) + } + + assign(paste0("map",cluster1), pslwr1mean) + assign(paste0("map",cluster2), pslwr2mean) + assign(paste0("map",cluster3), pslwr3mean) + assign(paste0("map",cluster4), pslwr4mean) + + assign(paste0("fre",cluster1), wr1y) + assign(paste0("fre",cluster2), wr2y) + assign(paste0("fre",cluster3), wr3y) + assign(paste0("fre",cluster4), wr4y) + + if(period==100){ + assign(paste0("imp",cluster1), varwr1mean) + assign(paste0("imp",cluster2), varwr2mean) + assign(paste0("imp",cluster3), varwr3mean) + assign(paste0("imp",cluster4), varwr4mean) + + assign(paste0("sig",cluster1), pvalue1) + assign(paste0("sig",cluster2), pvalue2) + assign(paste0("sig",cluster3), pvalue3) + assign(paste0("sig",cluster4), pvalue4) + } + + if(fields.name == forecast.name){ + assign(paste0("freMax",cluster1), wr1yMax) + assign(paste0("freMax",cluster2), wr2yMax) + assign(paste0("freMax",cluster3), wr3yMax) + assign(paste0("freMax",cluster4), wr4yMax) + + assign(paste0("freMin",cluster1), wr1yMin) + assign(paste0("freMin",cluster2), wr2yMin) + assign(paste0("freMin",cluster3), wr3yMin) + assign(paste0("freMin",cluster4), wr4yMin) + + cluster1.Obs <- which(orden == regimes.obs[1]) + cluster2.Obs <- which(orden == regimes.obs[2]) + cluster3.Obs <- which(orden == regimes.obs[3]) + cluster4.Obs <- which(orden == regimes.obs[4]) + + assign(paste0("freObs",cluster1), wr1y.obs) + assign(paste0("freObs",cluster2), wr2y.obs) + assign(paste0("freObs",cluster3), wr3y.obs) + assign(paste0("freObs",cluster4), wr4y.obs) + } + + + # position of long values of Europe only (without the Atlantic Sea and America): + #if(fields.name == "ERA-Interim") EU <- c(1:which(lon >= lon.max)[1], (length(lon)-30):length(lon)) # restrict area to continental europe only + EU <- c(1:which(lon >= lon.max)[1], (which(lon >= 337)[1]:length(lon))) # restrict area to continental europe only + + # breaks and colors of the geopotential fields: + if(psl == "g500"){ + #my.brks <- c(-300,-199:200,300) # % Mean anomaly of a WR + my.brks <- c(-300,seq(-200,200,10),300) # % Mean anomaly of a WR + my.brks2 <- c(-300,seq(-200,200,10),300) # % Mean anomaly of a WR for the contour lines + #my.cols <- colorRampPalette((brewer.pal(9,"PuBu")))(length(my.brks)-1) + values.to.plot <- c(-200, -100, 0, 100, 200) # values we want to show in the labels + } + + if(psl == "psl"){ + my.brks <- c(seq(-19,-1,2),0,seq(1,19,2)) # % Mean anomaly of a WR + # my.brks <- c(seq(-19,-1,2),0,seq(1,19,2)) # % Mean anomaly of a WR + + my.brks2 <- my.brks #c(seq(-20,20,2)) # % Mean anomaly of a WR for the contour lines + values.to.plot <- my.brks #c(-17,-15,-13,-11,-9,-7,-5,-3,-1,0,1,3,5,7,9,11,13,15,17) + } + + my.cols <- colorRampPalette(rev(brewer.pal(11,"RdBu")))(length(my.brks)-1) # blue--white--red colors + my.cols[floor(length(my.cols)/2)] <- my.cols[floor(length(my.cols)/2)+1] <- "white" + + # breaks and colors of the impact maps (valid both for Wind speed anomalies and Temperature anomalies): + #my.brks.var <- c(-20,seq(-10,-3,1),seq(-2.9,2.9,0.1),seq(3,10,1),20) # % Mean anomaly of a WR + #my.brks.var <- c(-20,-2,-1.5,-1,-0.5,-0.2,0.2,0.5,1,1.5,2,20) # % Mean anomaly of a WR + #my.brks.var <- c(-20,seq(-3,3,0.5),20) # % Mean anomaly of a WR + if(var.name[var.num] == "sfcWind") { + my.brks.var <- seq(-3,3,0.5) + values.to.plot2 <- c(-3,-2,-1,0,1,2,3) + } + if(var.name[var.num] == "tas") { + my.brks.var <- seq(-5,5,1) + values.to.plot2 <- my.brks.var #c(-5,-3,-1,0,1,3,5) + } + + #my.cols <- colorRampPalette(as.character(read.csv("/home/Earth/vtorralb/rgbhex.csv",header=F)[,1]))(length(my.brks)-1) # blue--yellow-red colors + my.cols.var <- colorRampPalette(rev(brewer.pal(11,"RdBu")))(length(my.brks.var)-1) # blue--white--red colors + #my.cols.var[floor(length(my.cols.var)/2)] <- my.cols.var[floor(length(my.cols.var)/2)+1] <- "white" + + # Visualize the composition with the 3 graphs for all the 4 regimes: its pressure fields, its impact on var and its frequency series: + if(!as.pdf && fields.name==rean.name) png(filename=paste0(workdir,"/",fields.name,"_",my.period[p],"_",var.name[var.num],".png"),width=3000,height=3700) + if(!as.pdf && fields.name==forecast.name) png(filename=paste0(workdir,"/",fields.name,"_",my.period[p],"_leadmonth_",lead.month,"_",var.name[var.num],".png"),width=3000,height=3700) + + plot.new() + + # Sheet title: + par(fig=c(0, 1, 0.94, 0.98), new=TRUE) + + if(fields.name == forecast.name) {forecast.title <- paste0(" startdate and ",lead.month," forecast month ")} else {forecast.title <- ""} + mtext(paste0(fields.name, " Weather Regimes for ", my.period[p], forecast.title," (",year.start,"-",year.end,")"), cex=5.5, font=2) + + # Centroid maps: + map.xpos <- 0 + map.width <- 0.46 + par(fig=c(map.xpos + 0.003, map.xpos + map.width - 0.003, 0.745, 0.925), new=TRUE) + PlotEquiMap2(rescale(map1,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map1), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, xlabel.dist=1.5, contours.lty="F1FF1F") + par(fig=c(map.xpos, map.xpos + map.width, 0.51, 0.69), new=TRUE) + PlotEquiMap2(rescale(map2,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map2), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F") + par(fig=c(map.xpos, map.xpos + map.width, 0.275, 0.455), new=TRUE) + PlotEquiMap2(rescale(map3,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map3), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F") + par(fig=c(map.xpos, map.xpos + map.width, 0.04, 0.22), new=TRUE) + PlotEquiMap2(rescale(map4,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map4), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F") + + + # Title Centroid Maps: + map.title.xpos <- 0.76 + map.title.width <- 0.2 + par(fig=c(map.title.xpos, map.title.xpos + map.title.width, 0.728, 0.729), new=TRUE) + mtext("year", cex=3) + par(fig=c(map.title.xpos, map.title.xpos + map.title.width, 0.494, 0.495), new=TRUE) + mtext("year", cex=3) + par(fig=c(map.title.xpos, map.title.xpos + map.title.width, 0.260, 0.261), new=TRUE) + mtext("year", cex=3) + par(fig=c(map.title.xpos, map.title.xpos + map.title.width, 0.026, 0.027), new=TRUE) + mtext("year", cex=3) + + # Title 1: + title1.xpos <- 0.02 + title1.width <- 0.4 + par(fig=c(title1.xpos, title1.xpos + title1.width, 0.930, 0.935), new=TRUE) + mtext(paste0(regime1.name,": ", psl.name, " Anomaly"), font=2, cex=4) + par(fig=c(title1.xpos, title1.xpos + title1.width, 0.695, 0.700), new=TRUE) + mtext(paste0(regime2.name,": ", psl.name, " Anomaly"), font=2, cex=4) + par(fig=c(title1.xpos, title1.xpos + title1.width, 0.460, 0.465), new=TRUE) + mtext(paste0(regime3.name,": ", psl.name, " Anomaly"), font=2, cex=4) + par(fig=c(title1.xpos, title1.xpos + title1.width, 0.225, 0.230), new=TRUE) + mtext(paste0(regime4.name,": ", psl.name, " Anomaly"), font=2, cex=4) + + # Impact maps: + if(period==100){ + impact.xpos <- 0.47 + impact.width <- 0.26 + par(fig=c(impact.xpos, impact.xpos + impact.width, 0.745, 0.925), new=TRUE) + PlotEquiMap2(rescale(imp1[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=t(sig1[,EU] < 0.05), cex.lab=1.5) + par(fig=c(impact.xpos, impact.xpos + impact.width, 0.51, 0.69), new=TRUE) + PlotEquiMap2(rescale(imp2[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=t(sig2[,EU] < 0.05), cex.lab=1.5) + par(fig=c(impact.xpos, impact.xpos + impact.width, 0.275, 0.455), new=TRUE) + PlotEquiMap2(rescale(imp3[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=t(sig3[,EU] < 0.05), cex.lab=1.5) + par(fig=c(impact.xpos, impact.xpos + impact.width, 0.04, 0.22), new=TRUE) + PlotEquiMap2(rescale(imp4[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=t(sig4[,EU] < 0.05), cex.lab=1.5) + } + + # Frequency plots: + barplot.xpos <- 0.75 + barplot.width <- 0.25 + freq.max=100 + + if(fields.name == forecast.name){ + pos.years.present <- match(year.start:year.end, years) + years.missing <- which(is.na(pos.years.present)) + fre1.NA <- fre2.NA <- fre3.NA <- fre4.NA <- freMax1.NA <- freMax2.NA <- freMax3.NA <- freMax4.NA <- freMin1.NA <- freMin2.NA <- freMin3.NA <- freMin4.NA <- c() + + k <- 0 + for(y in year.start:year.end) { + if(y %in% (years.missing + year.start - 1)) { + fre1.NA <- c(fre1.NA,NA); fre2.NA <- c(fre2.NA,NA); fre3.NA <- c(fre3.NA,NA); fre4.NA <- c(fre4.NA,NA) + freMax1.NA <- c(freMax1.NA,NA); freMax2.NA <- c(freMax2.NA,NA); freMax3.NA <- c(freMax3.NA,NA); freMax4.NA <- c(freMax4.NA,NA) + freMin1.NA <- c(freMin1.NA,NA); freMin2.NA <- c(freMin2.NA,NA); freMin3.NA <- c(freMin3.NA,NA); freMin4.NA <- c(freMin4.NA,NA) + k=k+1 + } else { + fre1.NA <- c(fre1.NA,fre1[y - year.start + 1 - k]); fre2.NA <- c(fre2.NA,fre2[y - year.start + 1 - k]) + fre3.NA <- c(fre3.NA,fre3[y - year.start + 1 - k]); fre4.NA <- c(fre4.NA,fre4[y - year.start + 1 - k]) + freMax1.NA <- c(freMax1.NA,freMax1[y - year.start + 1 - k]); freMax2.NA <- c(freMax2.NA,freMax2[y - year.start + 1 - k]) + freMax3.NA <- c(freMax3.NA,freMax3[y - year.start + 1 - k]); freMax4.NA <- c(freMax4.NA,freMax4[y - year.start + 1 - k]) + freMin1.NA <- c(freMin1.NA,freMin1[y - year.start + 1 - k]); freMin2.NA <- c(freMin2.NA,freMin2[y - year.start + 1 - k]) + freMin3.NA <- c(freMin3.NA,freMin3[y - year.start + 1 - k]); freMin4.NA <- c(freMin4.NA,freMin4[y - year.start + 1 - k]) + } + } + } else { fre1.NA <- fre1; fre2.NA <- fre2; fre3.NA <- fre3; fre4.NA <- fre4 } + + par(fig=c(barplot.xpos, barplot.xpos + barplot.width, 0.745, 0.915), new=TRUE) + if(fields.name == rean.name) barplot.freq(100*fre1.NA, year.start, year.end, cex.y=2, cex.x=2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0)) + if(fields.name == forecast.name) barplot.freq.sim2(100*fre1.NA, 100*freMax1.NA, 100*freMin1.NA, 100*freObs1, year.start, year.end, cex.y=2, cex.x=2, freq.min=-2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0), col.line="gray20", cex.r=3, cex.mean=2, cex.obs=2) + + par(fig=c(barplot.xpos, barplot.xpos + barplot.width,0.510,0.680), new=TRUE) + if(fields.name == rean.name) barplot.freq(100*fre2.NA, year.start, year.end, cex.y=2, cex.x=2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0)) + if(fields.name == forecast.name) barplot.freq.sim2(100*fre2.NA, 100*freMax2.NA, 100*freMin2.NA, 100*freObs2, year.start, year.end, cex.y=2, cex.x=2, freq.min=-2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0), col.line="gray20", cex.r=3, cex.mean=2, cex.obs=2) + + par(fig=c(barplot.xpos, barplot.xpos + barplot.width,0.275,0.445), new=TRUE) + if(fields.name == rean.name) barplot.freq(100*fre3.NA, year.start, year.end, cex.y=2, cex.x=2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0)) + if(fields.name == forecast.name) barplot.freq.sim2(100*fre3.NA, 100*freMax3.NA, 100*freMin3.NA, 100*freObs3, year.start, year.end, cex.y=2, cex.x=2, freq.min=-2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0), col.line="gray20", cex.r=3, cex.mean=2, cex.obs=2) + + par(fig=c(barplot.xpos, barplot.xpos + barplot.width,0.040,0.210), new=TRUE) + if(fields.name == rean.name) barplot.freq(100*fre4.NA, year.start, year.end, cex.y=2, cex.x=2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0)) + if(fields.name == forecast.name) barplot.freq.sim2(100*fre4.NA, 100*freMax4.NA, 100*freMin4.NA, 100*freObs4, year.start, year.end, cex.y=2, cex.x=2, freq.min=-2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0), col.line="gray20", cex.r=3, cex.mean=2, cex.obs=2) + + # % on Frequency plots: + symbol.xpos <- 0.735 + symbol.width <- 0.01 + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.83, 0.84), new=TRUE) + mtext("%", cex=3.3) + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.60, 0.61), new=TRUE) + mtext("%", cex=3.3) + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.36, 0.37), new=TRUE) + mtext("%", cex=3.3) + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.13, 0.14), new=TRUE) + mtext("%", cex=3.3) + + + # Title 2: + if(fields.name == rean.name){ + title2.xpos <- 0.42 + title2.width <- 0.35 + par(fig=c(title2.xpos, title2.xpos + title2.width, 0.930, 0.935), new=TRUE) + mtext(paste0(regime1.name, ": Impact on ", var.name.full[var.num]), font=2, cex=4) + par(fig=c(title2.xpos, title2.xpos + title2.width, 0.695, 0.700), new=TRUE) + mtext(paste0(regime2.name, ": Impact on ", var.name.full[var.num]), font=2, cex=4) + par(fig=c(title2.xpos, title2.xpos + title2.width, 0.460, 0.465), new=TRUE) + mtext(paste0(regime3.name, ": Impact on ", var.name.full[var.num]), font=2, cex=4) + par(fig=c(title2.xpos, title2.xpos + title2.width, 0.225, 0.230), new=TRUE) + mtext(paste0(regime4.name, ": Impact on ", var.name.full[var.num]), font=2, cex=4) + } + + # Title 3: + title3.xpos <- 0.75 + title3.width <-0.25 + par(fig=c(title3.xpos, title3.xpos + title3.width, 0.930, 0.935), new=TRUE) + mtext(paste0(regime1.name, " Frequency"), font=2, cex=4) # paste0 doesn't work inside an expression! + par(fig=c(title3.xpos, title3.xpos + title3.width, 0.695, 0.700), new=TRUE) + mtext(paste0(regime2.name, " Frequency"), font=2, cex=4) + par(fig=c(title3.xpos, title3.xpos + title3.width, 0.460, 0.465), new=TRUE) + mtext(paste0(regime3.name, " Frequency"), font=2, cex=4) + par(fig=c(title3.xpos, title3.xpos + title3.width, 0.225, 0.230), new=TRUE) + mtext(paste0(regime4.name, " Frequency"), font=2, cex=4) + + # Legend 1: + legend1.xpos <- 0.01 + legend1.width <- 0.44 + legend1.cex <- 1.8 + my.subset <- match(values.to.plot, my.brks) + par(fig=c(legend1.xpos, legend1.xpos + legend1.width, 0.719, 0.744), new=TRUE) # syntax fig=c(xmin, xmax, ymin, ymax) + ColorBar3(my.brks, cols=my.cols, vert=FALSE, cex=legend1.cex, subset=my.subset) + if(psl=="g500") {mtext(side=4," m", cex=2.5)} else {mtext(side=4," hPa", cex=legend1.cex)} + par(fig=c(legend1.xpos, legend1.xpos + legend1.width, 0.485, 0.508), new=TRUE) + ColorBar3(my.brks, cols=my.cols, vert=FALSE, cex=legend1.cex, subset=my.subset) + if(psl=="g500") {mtext(side=4," m", cex=2.5)} else {mtext(side=4," hPa", cex=legend1.cex)} + par(fig=c(legend1.xpos, legend1.xpos + legend1.width, 0.250, 0.273), new=TRUE) + ColorBar3(my.brks, cols=my.cols, vert=FALSE, cex=legend1.cex, subset=my.subset) + if(psl=="g500") {mtext(side=4," m", cex=2.5)} else {mtext(side=4," hPa", cex=legend1.cex)} + par(fig=c(legend1.xpos, legend1.xpos + legend1.width, 0.015, 0.038), new=TRUE) + ColorBar3(my.brks, cols=my.cols, vert=FALSE, cex=legend1.cex, subset=my.subset) + if(psl=="g500") {mtext(side=4," m", cex=2.5)} else {mtext(side=4," hPa", cex=legend1.cex)} + + # Legend 2: + if(fields.name == rean.name){ + legend2.xpos <- 0.48 + legend2.width <- 0.25 + legend2.cex <- 1.8 + my.subset2 <- match(values.to.plot2, my.brks.var) + par(fig=c(legend2.xpos, legend2.xpos + legend2.width, 0.719 + 0.001, 0.744), new=TRUE) + ColorBar3(my.brks.var, cols=my.cols.var, vert=FALSE, cex=legend2.cex, subset=my.subset2) + mtext(side=4,paste0(" ",var.unit[var.num]), cex=legend2.cex) + par(fig=c(legend2.xpos, legend2.xpos + legend2.width, 0.485, 0.508), new=TRUE) + ColorBar3(my.brks.var, cols=my.cols.var, vert=FALSE, cex=legend2.cex, subset=my.subset2) + mtext(side=4,paste0(" ",var.unit[var.num]), cex=legend2.cex) + par(fig=c(legend2.xpos, legend2.xpos + legend2.width, 0.250, 0.273), new=TRUE) + ColorBar3(my.brks.var, cols=my.cols.var, vert=FALSE, cex=legend2.cex, subset=my.subset2) + mtext(side=4,paste0(" ",var.unit[var.num]), cex=legend2.cex) + par(fig=c(legend2.xpos, legend2.xpos + legend2.width, 0.015, 0.038), new=TRUE) + ColorBar3(my.brks.var, cols=my.cols.var, vert=FALSE, cex=legend2.cex, subset=my.subset2) + mtext(side=4,paste0(" ",var.unit[var.num]), cex=legend2.cex) + } + + if(!as.pdf) dev.off() # for saving 4 png + +} # close 'p' on 'period' + +if(as.pdf) dev.off() # for saving a single pdf with all months/seasons + + + + + + + + +# impact map of the stronger WR: +if(fields.name == rean.name && period == 100){ + + var.num <- 1 # choose a variable (1:sfcWind, 2:tas) + + png(filename=paste0(workdir,"/",rean.name,"_",var.name[var.num],"_most_influent.png"), width=500, height=600) + + plot.new() + #par(mfrow=c(4,3)) + col.regimes <- c("indianred1","cyan","khaki","lightpink") + + for(p in 13:16){ # or 1:12 for monthly maps + load(file=paste0(workdir,"/", rean.dir,"/",rean.name,"_",var.name[var.num],"_",my.period[p],"_mapdata.RData")) + load(paste0(workdir,"/", rean.dir,"/",rean.name,"_ClusterNames.RData")) # they should not depend on var.name!!! + + # position of long values of Europe only (without the Atlantic Sea and America): + #if(fields.name == "ERA-Interim") EU <- c(1:which(lon >= lon.max)[1], (length(lon)-30):length(lon)) # restrict area to continental europe only + lon.max = 45 + EU <- c(1:which(lon >= lon.max)[1], (which(lon >= 337)[1]:length(lon))) # restrict area to continental europe only + + + cluster1 <- which(orden == cluster1.name.period[p]) # in future it will be == cluster1.name + cluster2 <- which(orden == cluster2.name.period[p]) + cluster3 <- which(orden == cluster3.name.period[p]) + cluster4 <- which(orden == cluster4.name.period[p]) + + assign(paste0("imp",cluster1), varPeriodAnom1mean) + assign(paste0("imp",cluster2), varPeriodAnom2mean) + assign(paste0("imp",cluster3), varPeriodAnom3mean) + assign(paste0("imp",cluster4), varPeriodAnom4mean) + + imp.all <- imp1 + for(i in 1:dim(imp1)[1]){ + for(j in 1:dim(imp1)[2]){ + if(abs(imp1[i,j]) >= max(abs(imp1[i,j]), abs(imp2[i,j]), abs(imp3[i,j]), abs(imp4[i,j]))) imp.all[i,j] <- 1.5 + if(abs(imp2[i,j]) >= max(abs(imp1[i,j]), abs(imp2[i,j]), abs(imp3[i,j]), abs(imp4[i,j]))) imp.all[i,j] <- 2.5 + if(abs(imp3[i,j]) >= max(abs(imp1[i,j]), abs(imp2[i,j]), abs(imp3[i,j]), abs(imp4[i,j]))) imp.all[i,j] <- 3.5 + if(abs(imp4[i,j]) >= max(abs(imp1[i,j]), abs(imp2[i,j]), abs(imp3[i,j]), abs(imp4[i,j]))) imp.all[i,j] <- 4.5 + } + } + + if(p<=3) yMod <- 0.7 + if(p>=4 && p<=6) yMod <- 0.5 + if(p>=7 && p<=9) yMod <- 0.3 + if(p>=10) yMod <- 0.1 + if(p==13 || p==14) yMod <- 0.5 + if(p==15 || p==16) yMod <- 0.1 + + if(p==1 || p==4 || p==7 || p==10) xMod <- 0.05 + if(p==2 || p==5 || p==8 || p==11) xMod <- 0.35 + if(p==3 || p==6 || p==9 || p==12) xMod <- 0.65 + if(p==13 || p==15) xMod <- 0.05 + if(p==14 || p==16) xMod <- 0.50 + + # impact map: + if(p <= 12) par(fig=c(xMod, xMod + 0.3, yMod, yMod + 0.17), new=TRUE) + if(p > 12) par(fig=c(xMod, xMod + 0.45, yMod, yMod + 0.38), new=TRUE) + PlotEquiMap(imp.all[,EU], lon[EU], lat, filled.continents = FALSE, brks=1:5, cols=col.regimes, axelab=F, drawleg=F) + + # title map: + if(p <= 12) par(fig=c(xMod, xMod + 0.3, yMod + 0.162, yMod + 0.172), new=TRUE) + if(p > 12) par(fig=c(xMod + 0.07, xMod + 0.07 + 0.3, yMod +0.21 + 0.162, yMod +0.21 + 0.172), new=TRUE) + mtext(my.period[p], font=2, cex=.9) + + } # close 'p' on 'period' + + par(fig=c(0.1,0.9, 0, 0.12), new=TRUE) + ColorBar2(1:5, cols=col.regimes, vert=FALSE, my.ticks=1:4, draw.ticks=FALSE, my.labels=orden) + + par(fig=c(0.1,0.9, 0.9, 0.95), new=TRUE) + mtext(paste0("Most influent WR on ",var.name[var.num]), font=2, cex=1.5) + + dev.off() +} + + + + + + + + + + + + + + + + + + diff --git a/old/weather_regimes_maps_v6.R~ b/old/weather_regimes_maps_v6.R~ new file mode 100644 index 0000000000000000000000000000000000000000..bc7e94de6e702677d713f1f9faf16038acd3a148 --- /dev/null +++ b/old/weather_regimes_maps_v6.R~ @@ -0,0 +1,576 @@ + +# Creation: 6/2016 +# Author: Nicola Cortesi +# +# Aim: to visualize the regime anomalies of the weather regimes, their interannual frequencies and their impact on a chosen cliamte variable (i.e: wind speed or temperature) +# +# I/O: the only input file needed is the output of the weather_regimes.R script, which ends with the suffix "_mapdata.RData". +# Output files are the maps, with the user can generate as .png or as .pdf +# +# Assumptions: If your regimes derive from a reanalysis, this script must be run twice: once, with the option 'ordering = F' and 'save.names = T' to create a .pdf or .png +# file that the user must open to find visually the order by which the four regimes are stored inside the four clusters. For example, he can find that the +# sequence of the images in the file (from top to down) corresponds to: Blocking / NAO- / Atl.Ridge / NAO+ regime. Subsequently, he has to change 'ordering' +# from F to T and set the four variables 'clusterX.name' (X=1..4) to follow the same order found inside that file. +# The second time, you have to run this script only to get the maps in the right order, which by default is, from top to down: +# "NAO+","NAO-","Blocking" and "Atl.Ridge" (you can change it with the variable 'orden'). You also have to set 'save.names' to TRUE, so an .RData file +# is written to store the order of the four regimes, in case in future a user needs to visualize these maps again. In this case, the user must set +# 'ordering = TRUE' and 'save.names = FALSE' to be able to visualize the maps in the correct order. +# +# If your regimes derive from a forecast system, you have to compute before the regimes derived from a reanalysis. Then, you can associate the reanalysis +# to the forecast system, to employ it to automatically associate to each simulated cluster one of the +# observed regimes, the one with the highest spatial correlation. Beware that the two maps of regime anomalies must have the same spatial resolution! +# +# Branch: weather_regimes + +library(s2dverification) # for the function Load() +library(Kendall) # for the MannKendall test + +source('/home/Earth/ncortesi/Downloads/scripts/Rfunctions.R') # for the calendar functions +workdir <- "/scratch/Earth/ncortesi/RESILIENCE/Regimes" # working dir with the input files with '_psl.RData' suffix: + +rean.name <- "ERA-Interim" # reanalysis name (if input data comes from a reanalysis) +forecast.name <- "ECMWF-S4" # forecast system name (if input data comes from a forecast system) + +fields.name <- forecast.name # Choose between loading reanalysis ('rean.name') or forecasts ('forecast.name') + +var.num <- 1 # if fields.name ='rean.name', Choose a variable for the impact maps 1: sfcWind 2: tas + +period <- 9 # 1:12 # Choose one or more periods to plot from 1 to 17 (1-12: Jan - Dec, 13-16: Winter, Spring, Summer, Autumn, 17: Year). + # You need to have created before the '_mapdata.RData' file with the output of 'weather_regimes'.R for that period. + +lead.month <- 3 # in case you selected 'fields.name = forecast.name', specify a leadtime (0 = same month of the start month) to plot + # (and variable 'period' becomes the start month) + +# run it twice: once, with 'ordering <- FALSE' and 'save.names <- TRUE' to look only at the cartography and find visually the right regime names of the four clusters; then, +# insert the regime names in the correct order below and run this script a the second time with 'ordering = TRUE' and 'save.names = TRUE', to save the ordered cartography. +# after that, any time you need to run this script, run it with 'ordering = TRUE and 'save.names = FALSE', not to overwrite the file which stores the cluster order: +ordering <- T # If TRUE, order the clusters following the regimes listed in the 'orden' vector (set it to TRUE only after you manually inserted the names below). +save.names <- F # if TRUE, also save the cluster names (i.e: the association between clusters and weather regimes); if FALSE, the cluster names are loaded from a file + +as.pdf <- T # FALSE: save results as one .png file for each season, TRUE: save only 1 .pdf file with all seasons + +# if fields.name='forecast.name', set the subdir of 'workdir' where the weather regimes computed with a reanalysis are stored: +rean.dir <- "41_ERA-Interim_monthly_1981-2015_LOESS_filter" + +# Associates the regimes to the four cluster: +cluster2.name <- "NAO+" +cluster4.name <- "NAO-" +cluster1.name <- "Blocking" +cluster3.name <- "Atl.Ridge" + +# choose an order to give to the cartograpy, from top to bottom: +orden <- c("NAO+","NAO-","Blocking","Atl.Ridge") + +################################################################################################################################################################### + +regime1.name <- orden[1] +regime2.name <- orden[2] +regime3.name <- orden[3] +regime4.name <- orden[4] + +var.name <- c("sfcWind","tas") +var.name.full <- c("Wind Speed","Temperature") # full name of the 'predictand' variable to put in the title of the graphs +var.unit <- c("m/s", "ºC") # unit of measure of var (for drawing color scales) +var.num.orig <- var.num # loading _mapdata files, this value can change! +fields.name.orig <- fields.name +period.orig <- period +pdf.suffix <- ifelse(length(period)==1, my.period[period], "all_periods") + +if(as.pdf && fields.name==rean.name)(pdf(file=paste0(workdir,"/",fields.name,"_",var.name[var.num],"_",pdf.suffix,".pdf"),width=40, height=60)) +if(as.pdf && fields.name==forecast.name)(pdf(file=paste0(workdir,"/",fields.name,"_",var.name[var.num],"_",pdf.suffix,"_leadmonth_",lead.month,".pdf"),width=40,height=60)) + +for(p in period){ + # load regime data (for forecasts, it load only the data corresponding to the chosen start month p and lead month 'lead.month':: + if(fields.name == rean.name) { + load(file=paste0(workdir,"/",fields.name,"_",my.period[p],"_psl.RData")) + load(file=paste0(workdir,"/",fields.name,"_",my.period[p],"_",var.name[var.num],".RData")) + } + if(fields.name == forecast.name){ + load(file=paste0(workdir,"/",fields.name,"_",my.period[p],"_leadmonth_",lead.month,"_psl.RData")) # load cluster time series and mean psl data + #load(file=paste0(workdir,"/",fields.name,"_",var.name[var.num],"_",my.period[p],"_leadmonth_",var.name[var.num],".RData")) # load mean var data + } + + if(var.num != var.num.orig) var.num <- var.num.orig # ripristinate original values of this script (necessary after loading regime data) + if(fields.name != fields.name.orig) fields.name <- fields.name.orig # ripristinate original values of this script + + if(fields.name == forecast.name){ + # compute correlations between simulated clusters and observed regime's anomalies: + cluster1.sim <- pslwr1mean; cluster2.sim <- pslwr2mean; cluster3.sim <- pslwr3mean; cluster4.sim <- pslwr4mean + lat.forecast <- lat; lon.forecast <- lon + + if((p + lead.month) > 12) { load.month <- p + lead.month - 12 } else { load.month <- p + lead.month } # reanalysis forecast month to load + load(file=paste0(workdir,"/",rean.dir,"/",rean.name,"_",my.period[load.month],"_psl.RData")) # load reanalysis data, which overwrities the pslwrXmean variables + load(paste0(workdir,"/",rean.dir,"/",rean.name,"_", my.period[load.month],"_ClusterNames.RData")) # load also reanalysis regime names + + wr1y.obs <- wr1y; wr2y.obs <- wr2y; wr3y.obs <- wr3y; wr4y.obs <- wr4y # observed frecuencies of the regimes + + #regimes.obs <- c(cluster1.name.period[load.month], cluster2.name.period[load.month], cluster3.name.period[load.month], cluster4.name.period[load.month]) # to be replaced soon by c(cluster1.name, cluster2.name, ...) + regimes.obs <- c(cluster1.name, cluster2.name, cluster3.name, cluster4.name) + + if(length(unique(regimes.obs)) < 4) stop("Two or more names of the weather types in the reanalsyis input file are repeated!") + + if(identical(rev(lat),lat.forecast)) {lat.forecast <- rev(lat.forecast); print("Warning: forecast latitudes are in the opposite order than renalysis's")} + if(!identical(lat, lat.forecast)) stop("forecast latitudes are different from reanalysis latitudes!") + if(!identical(lon, lon.forecast)) stop("forecast longitude are different from reanalysis longitudes!") + + cluster.corr <- array(NA, c(4,4), dimnames=list(c("cluster1.sim","cluster2.sim","cluster3.sim","cluster4.sim"), regimes.obs)) + cluster.corr[1,1] <- cor(as.vector(cluster1.sim), as.vector(pslwr1mean)) + cluster.corr[1,2] <- cor(as.vector(cluster1.sim), as.vector(pslwr2mean)) + cluster.corr[1,3] <- cor(as.vector(cluster1.sim), as.vector(pslwr3mean)) + cluster.corr[1,4] <- cor(as.vector(cluster1.sim), as.vector(pslwr4mean)) + cluster.corr[2,1] <- cor(as.vector(cluster2.sim), as.vector(pslwr1mean)) + cluster.corr[2,2] <- cor(as.vector(cluster2.sim), as.vector(pslwr2mean)) + cluster.corr[2,3] <- cor(as.vector(cluster2.sim), as.vector(pslwr3mean)) + cluster.corr[2,4] <- cor(as.vector(cluster2.sim), as.vector(pslwr4mean)) + cluster.corr[3,1] <- cor(as.vector(cluster3.sim), as.vector(pslwr1mean)) + cluster.corr[3,2] <- cor(as.vector(cluster3.sim), as.vector(pslwr2mean)) + cluster.corr[3,3] <- cor(as.vector(cluster3.sim), as.vector(pslwr3mean)) + cluster.corr[3,4] <- cor(as.vector(cluster3.sim), as.vector(pslwr4mean)) + cluster.corr[4,1] <- cor(as.vector(cluster4.sim), as.vector(pslwr1mean)) + cluster.corr[4,2] <- cor(as.vector(cluster4.sim), as.vector(pslwr2mean)) + cluster.corr[4,3] <- cor(as.vector(cluster4.sim), as.vector(pslwr3mean)) + cluster.corr[4,4] <- cor(as.vector(cluster4.sim), as.vector(pslwr4mean)) + + # associate each simulated cluster to one observed regime: + max1 <- which(cluster.corr == max(cluster.corr), arr.ind=T) + cluster.corr2 <- cluster.corr + cluster.corr2[max1[1],] <- -999 # set this row to negative values so it won't be found as a maximum anymore + cluster.corr2[,max1[2]] <- -999 # set this column to negative values so it won't be found as a maximum anymore + max2 <- which(cluster.corr2 == max(cluster.corr2), arr.ind=T) + cluster.corr3 <- cluster.corr2 + cluster.corr3[max2[1],] <- -999 + cluster.corr3[,max2[2]] <- -999 + max3 <- which(cluster.corr3 == max(cluster.corr3), arr.ind=T) + cluster.corr4 <- cluster.corr3 + cluster.corr4[max3[1],] <- -999 + cluster.corr4[,max3[2]] <- -999 + max4 <- which(cluster.corr4 == max(cluster.corr4), arr.ind=T) + + seq.max1 <- c(max1[1],max2[1],max3[1],max4[1]) + seq.max2 <- c(max1[2],max2[2],max3[2],max4[2]) + + cluster1.regime <- seq.max2[which(seq.max1 == 1)] + cluster2.regime <- seq.max2[which(seq.max1 == 2)] + cluster3.regime <- seq.max2[which(seq.max1 == 3)] + cluster4.regime <- seq.max2[which(seq.max1 == 4)] + + cluster1.name <- regimes.obs[cluster1.regime] + cluster2.name <- regimes.obs[cluster2.regime] + cluster3.name <- regimes.obs[cluster3.regime] + cluster4.name <- regimes.obs[cluster4.regime] + + if(var.num != var.num.orig) var.num <- var.num.orig # ripristinate original values of this script + if(fields.name != fields.name.orig) fields.name <- fields.name.orig # ripristinate original values of this script + + load(file=paste0(workdir,"/",fields.name,"_",my.period[p],"_leadmonth_",lead.month,"_psl.RData")) # reload forecast regime data + load(file=paste0(workdir,"/",fields.name,"_",my.period[p],"_leadmonth_",lead.month,"_ClusterData.RData")) # reload forecast cluster data (for my.cluster.array) + + if(var.num != var.num.orig) var.num <- var.num.orig # ripristinate original values of this script + if(fields.name != fields.name.orig) fields.name <- fields.name.orig # ripristinate original values of this script + if(period.orig != period) period <- period.orig + if(identical(rev(lat),lat.forecast)) lat <- rev(lat) # ripristinate right lat order + + } # close for on 'forecast.name' + + if(fields.name == rean.name){ + if(save.names){ + save(cluster1.name, cluster2.name, cluster3.name, cluster4.name, file=paste0(workdir,"/",fields.name,"_", my.period[p],"_ClusterNames.RData")) + #if(fields.name == forecast.name) save(cluster1.name, cluster2.name, cluster3.name, cluster4.name, cluster.corr, + # file=paste0(workdir,"/",fields.name,"_", my.period[p],"_leadmonth_",lead.month,"_ClusterNames.RData")) + } else { # in this case, we load the cluster names from the file already saved, if regimes come from a reanalysis: + load(paste0(workdir,"/",fields.name,"_", my.period[p],"_ClusterNames.RData")) + #if(fields.name == forecast.name && auto==F) load(paste0(workdir,"/",fields.name,"_", my.period[p],"_leadmonth_",lead.month,"_ClusterNames.RData")) + + if(var.num != var.num.orig) var.num <- var.num.orig # ripristinate original values of this script + if(fields.name != fields.name.orig) fields.name <- fields.name.orig # ripristinate original values of this script + + # cluster1.name <- cluster1.name.period[p] + # cluster2.name <- cluster2.name.period[p] + # cluster3.name <- cluster3.name.period[p] + # cluster4.name <- cluster4.name.period[p] + } + } + + # assign to each cluster its regime: + if(ordering == FALSE){ + cluster1 <- 1; cluster2 <- 2; cluster3 <- 3; cluster4 <- 4 + } else { + cluster1 <- which(orden == cluster1.name) + cluster2 <- which(orden == cluster2.name) + cluster3 <- which(orden == cluster3.name) + cluster4 <- which(orden == cluster4.name) + } + + assign(paste0("map",cluster1), pslwr1mean) + assign(paste0("map",cluster2), pslwr2mean) + assign(paste0("map",cluster3), pslwr3mean) + assign(paste0("map",cluster4), pslwr4mean) + + assign(paste0("fre",cluster1), wr1y) + assign(paste0("fre",cluster2), wr2y) + assign(paste0("fre",cluster3), wr3y) + assign(paste0("fre",cluster4), wr4y) + + assign(paste0("imp",cluster1), varwr1mean) + assign(paste0("imp",cluster2), varwr2mean) + assign(paste0("imp",cluster3), varwr3mean) + assign(paste0("imp",cluster4), varwr4mean) + + assign(paste0("sig",cluster1), pvalue1) + assign(paste0("sig",cluster2), pvalue2) + assign(paste0("sig",cluster3), pvalue3) + assign(paste0("sig",cluster4), pvalue4) + + + if(fields.name == forecast.name){ + assign(paste0("freMax",cluster1), wr1yMax) + assign(paste0("freMax",cluster2), wr2yMax) + assign(paste0("freMax",cluster3), wr3yMax) + assign(paste0("freMax",cluster4), wr4yMax) + + assign(paste0("freMin",cluster1), wr1yMin) + assign(paste0("freMin",cluster2), wr2yMin) + assign(paste0("freMin",cluster3), wr3yMin) + assign(paste0("freMin",cluster4), wr4yMin) + + cluster1.Obs <- which(orden == regimes.obs[1]) + cluster2.Obs <- which(orden == regimes.obs[2]) + cluster3.Obs <- which(orden == regimes.obs[3]) + cluster4.Obs <- which(orden == regimes.obs[4]) + + assign(paste0("freObs",cluster1), wr1y.obs) + assign(paste0("freObs",cluster2), wr2y.obs) + assign(paste0("freObs",cluster3), wr3y.obs) + assign(paste0("freObs",cluster4), wr4y.obs) + } + + + # position of long values of Europe only (without the Atlantic Sea and America): + #if(fields.name == "ERA-Interim") EU <- c(1:which(lon >= lon.max)[1], (length(lon)-30):length(lon)) # restrict area to continental europe only + EU <- c(1:which(lon >= lon.max)[1], (which(lon >= 337)[1]:length(lon))) # restrict area to continental europe only + + # breaks and colors of the geopotential fields: + if(psl == "g500"){ + #my.brks <- c(-300,-199:200,300) # % Mean anomaly of a WR + my.brks <- c(-300,seq(-200,200,10),300) # % Mean anomaly of a WR + my.brks2 <- c(-300,seq(-200,200,10),300) # % Mean anomaly of a WR for the contour lines + #my.cols <- colorRampPalette((brewer.pal(9,"PuBu")))(length(my.brks)-1) + values.to.plot <- c(-200, -100, 0, 100, 200) # values we want to show in the labels + } + + if(psl == "psl"){ + my.brks <- c(seq(-19,-1,2),0,seq(1,19,2)) # % Mean anomaly of a WR + # my.brks <- c(seq(-19,-1,2),0,seq(1,19,2)) # % Mean anomaly of a WR + + my.brks2 <- my.brks #c(seq(-20,20,2)) # % Mean anomaly of a WR for the contour lines + values.to.plot <- my.brks #c(-17,-15,-13,-11,-9,-7,-5,-3,-1,0,1,3,5,7,9,11,13,15,17) + } + + my.cols <- colorRampPalette(rev(brewer.pal(11,"RdBu")))(length(my.brks)-1) # blue--white--red colors + my.cols[floor(length(my.cols)/2)] <- my.cols[floor(length(my.cols)/2)+1] <- "white" + + # breaks and colors of the impact maps (valid both for Wind speed anomalies and Temperature anomalies): + #my.brks.var <- c(-20,seq(-10,-3,1),seq(-2.9,2.9,0.1),seq(3,10,1),20) # % Mean anomaly of a WR + #my.brks.var <- c(-20,-2,-1.5,-1,-0.5,-0.2,0.2,0.5,1,1.5,2,20) # % Mean anomaly of a WR + #my.brks.var <- c(-20,seq(-3,3,0.5),20) # % Mean anomaly of a WR + if(var.name[var.num] == "sfcWind") { + my.brks.var <- seq(-3,3,0.5) + values.to.plot2 <- c(-3,-2,-1,0,1,2,3) + } + if(var.name[var.num] == "tas") { + my.brks.var <- seq(-5,5,1) + values.to.plot2 <- my.brks.var #c(-5,-3,-1,0,1,3,5) + } + + #my.cols <- colorRampPalette(as.character(read.csv("/home/Earth/vtorralb/rgbhex.csv",header=F)[,1]))(length(my.brks)-1) # blue--yellow-red colors + my.cols.var <- colorRampPalette(rev(brewer.pal(11,"RdBu")))(length(my.brks.var)-1) # blue--white--red colors + #my.cols.var[floor(length(my.cols.var)/2)] <- my.cols.var[floor(length(my.cols.var)/2)+1] <- "white" + + # Visualize the composition with the 3 graphs for all the 4 regimes: its pressure fields, its impact on var and its frequency series: + if(!as.pdf && fields.name==rean.name) png(filename=paste0(workdir,"/",fields.name,"_",my.period[p],"_",var.name[var.num],".png"),width=3000,height=3700) + if(!as.pdf && fields.name==forecast.name) png(filename=paste0(workdir,"/",fields.name,"_",my.period[p],"_leadmonth_",lead.month,"_",var.name[var.num],".png"),width=3000,height=3700) + + plot.new() + + # Sheet title: + par(fig=c(0, 1, 0.94, 0.98), new=TRUE) + + if(fields.name == forecast.name) {forecast.title <- paste0(" startdate and ",lead.month," forecast month ")} else {forecast.title <- ""} + mtext(paste0(fields.name, " Weather Regimes for ", my.period[p], forecast.title," (",year.start,"-",year.end,")"), cex=5.5, font=2) + + # Centroid maps: + map.xpos <- 0 + map.width <- 0.46 + par(fig=c(map.xpos + 0.003, map.xpos + map.width - 0.003, 0.745, 0.925), new=TRUE) + PlotEquiMap2(rescale(map1,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map1), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, xlabel.dist=1.5, contours.lty="F1FF1F") + par(fig=c(map.xpos, map.xpos + map.width, 0.51, 0.69), new=TRUE) + PlotEquiMap2(rescale(map2,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map2), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F") + par(fig=c(map.xpos, map.xpos + map.width, 0.275, 0.455), new=TRUE) + PlotEquiMap2(rescale(map3,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map3), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F") + par(fig=c(map.xpos, map.xpos + map.width, 0.04, 0.22), new=TRUE) + PlotEquiMap2(rescale(map4,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map4), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F") + + + # Title Centroid Maps: + map.title.xpos <- 0.76 + map.title.width <- 0.2 + par(fig=c(map.title.xpos, map.title.xpos + map.title.width, 0.728, 0.729), new=TRUE) + mtext("year", cex=3) + par(fig=c(map.title.xpos, map.title.xpos + map.title.width, 0.494, 0.495), new=TRUE) + mtext("year", cex=3) + par(fig=c(map.title.xpos, map.title.xpos + map.title.width, 0.260, 0.261), new=TRUE) + mtext("year", cex=3) + par(fig=c(map.title.xpos, map.title.xpos + map.title.width, 0.026, 0.027), new=TRUE) + mtext("year", cex=3) + + # Title 1: + title1.xpos <- 0.02 + title1.width <- 0.4 + par(fig=c(title1.xpos, title1.xpos + title1.width, 0.930, 0.935), new=TRUE) + mtext(paste0(regime1.name,": ", psl.name, " Anomaly"), font=2, cex=4) + par(fig=c(title1.xpos, title1.xpos + title1.width, 0.695, 0.700), new=TRUE) + mtext(paste0(regime2.name,": ", psl.name, " Anomaly"), font=2, cex=4) + par(fig=c(title1.xpos, title1.xpos + title1.width, 0.460, 0.465), new=TRUE) + mtext(paste0(regime3.name,": ", psl.name, " Anomaly"), font=2, cex=4) + par(fig=c(title1.xpos, title1.xpos + title1.width, 0.225, 0.230), new=TRUE) + mtext(paste0(regime4.name,": ", psl.name, " Anomaly"), font=2, cex=4) + + # Impact maps: + impact.xpos <- 0.47 + impact.width <- 0.26 + par(fig=c(impact.xpos, impact.xpos + impact.width, 0.745, 0.925), new=TRUE) + PlotEquiMap2(rescale(imp1[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=t(sig1[,EU] < 0.05), cex.lab=1.5) + par(fig=c(impact.xpos, impact.xpos + impact.width, 0.51, 0.69), new=TRUE) + PlotEquiMap2(rescale(imp2[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=t(sig2[,EU] < 0.05), cex.lab=1.5) + par(fig=c(impact.xpos, impact.xpos + impact.width, 0.275, 0.455), new=TRUE) + PlotEquiMap2(rescale(imp3[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=t(sig3[,EU] < 0.05), cex.lab=1.5) + par(fig=c(impact.xpos, impact.xpos + impact.width, 0.04, 0.22), new=TRUE) + PlotEquiMap2(rescale(imp4[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=t(sig4[,EU] < 0.05), cex.lab=1.5) + + + # Frequency plots: + barplot.xpos <- 0.75 + barplot.width <- 0.25 + freq.max=100 + + if(fields.name == forecast.name){ + pos.years.present <- match(year.start:year.end, years) + years.missing <- which(is.na(pos.years.present)) + fre1.NA <- fre2.NA <- fre3.NA <- fre4.NA <- freMax1.NA <- freMax2.NA <- freMax3.NA <- freMax4.NA <- freMin1.NA <- freMin2.NA <- freMin3.NA <- freMin4.NA <- c() + + k <- 0 + for(y in year.start:year.end) { + if(y %in% (years.missing + year.start - 1)) { + fre1.NA <- c(fre1.NA,NA); fre2.NA <- c(fre2.NA,NA); fre3.NA <- c(fre3.NA,NA); fre4.NA <- c(fre4.NA,NA) + freMax1.NA <- c(freMax1.NA,NA); freMax2.NA <- c(freMax2.NA,NA); freMax3.NA <- c(freMax3.NA,NA); freMax4.NA <- c(freMax4.NA,NA) + freMin1.NA <- c(freMin1.NA,NA); freMin2.NA <- c(freMin2.NA,NA); freMin3.NA <- c(freMin3.NA,NA); freMin4.NA <- c(freMin4.NA,NA) + k=k+1 + } else { + fre1.NA <- c(fre1.NA,fre1[y - year.start + 1 - k]); fre2.NA <- c(fre2.NA,fre2[y - year.start + 1 - k]) + fre3.NA <- c(fre3.NA,fre3[y - year.start + 1 - k]); fre4.NA <- c(fre4.NA,fre4[y - year.start + 1 - k]) + freMax1.NA <- c(freMax1.NA,freMax1[y - year.start + 1 - k]); freMax2.NA <- c(freMax2.NA,freMax2[y - year.start + 1 - k]) + freMax3.NA <- c(freMax3.NA,freMax3[y - year.start + 1 - k]); freMax4.NA <- c(freMax4.NA,freMax4[y - year.start + 1 - k]) + freMin1.NA <- c(freMin1.NA,freMin1[y - year.start + 1 - k]); freMin2.NA <- c(freMin2.NA,freMin2[y - year.start + 1 - k]) + freMin3.NA <- c(freMin3.NA,freMin3[y - year.start + 1 - k]); freMin4.NA <- c(freMin4.NA,freMin4[y - year.start + 1 - k]) + } + } + } else { fre1.NA <- fre1; fre2.NA <- fre2; fre3.NA <- fre3; fre4.NA <- fre4 } + + par(fig=c(barplot.xpos, barplot.xpos + barplot.width, 0.745, 0.915), new=TRUE) + if(fields.name == rean.name) barplot.freq(100*fre1.NA, year.start, year.end, cex.y=2, cex.x=2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0)) + if(fields.name == forecast.name) barplot.freq.sim2(100*fre1.NA, 100*freMax1.NA, 100*freMin1.NA, 100*freObs1[-c(1,2)], year.start, year.end, cex.y=2, cex.x=2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0), col.line="gray20", cex.r=3, cex.mean=2, cex.obs=2) + + par(fig=c(barplot.xpos, barplot.xpos + barplot.width,0.510,0.680), new=TRUE) + if(fields.name == rean.name) barplot.freq(100*fre2.NA, year.start, year.end, cex.y=2, cex.x=2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0)) + if(fields.name == forecast.name) barplot.freq.sim2(100*fre2.NA, 100*freMax2.NA, 100*freMin2.NA, 100*freObs2[-c(1,2)], year.start, year.end, cex.y=2, cex.x=2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0), col.line="gray20", cex.r=3, cex.mean=2, cex.obs=2) + + par(fig=c(barplot.xpos, barplot.xpos + barplot.width,0.275,0.445), new=TRUE) + if(fields.name == rean.name) barplot.freq(100*fre3.NA, year.start, year.end, cex.y=2, cex.x=2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0)) + if(fields.name == forecast.name) barplot.freq.sim2(100*fre3.NA, 100*freMax3.NA, 100*freMin3.NA, 100*freObs3[-c(1,2)], year.start, year.end, cex.y=2, cex.x=2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0), col.line="gray20", cex.r=3, cex.mean=2, cex.obs=2) + + par(fig=c(barplot.xpos, barplot.xpos + barplot.width,0.040,0.210), new=TRUE) + if(fields.name == rean.name) barplot.freq(100*fre4.NA, year.start, year.end, cex.y=2, cex.x=2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0)) + if(fields.name == forecast.name) barplot.freq.sim2(100*fre4.NA, 100*freMax4.NA, 100*freMin4.NA, 100*freObs4[-c(1,2)], year.start, year.end, cex.y=2, cex.x=2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0), col.line="gray20", cex.r=3, cex.mean=2, cex.obs=2) + + # % on Frequency plots: + symbol.xpos <- 0.735 + symbol.width <- 0.01 + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.83, 0.84), new=TRUE) + mtext("%", cex=3.3) + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.60, 0.61), new=TRUE) + mtext("%", cex=3.3) + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.36, 0.37), new=TRUE) + mtext("%", cex=3.3) + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.13, 0.14), new=TRUE) + mtext("%", cex=3.3) + + + # Title 2: + if(fields.name == rean.name){ + title2.xpos <- 0.42 + title2.width <- 0.35 + par(fig=c(title2.xpos, title2.xpos + title2.width, 0.930, 0.935), new=TRUE) + mtext(paste0(regime1.name, ": Impact on ", var.name.full[var.num]), font=2, cex=4) + par(fig=c(title2.xpos, title2.xpos + title2.width, 0.695, 0.700), new=TRUE) + mtext(paste0(regime2.name, ": Impact on ", var.name.full[var.num]), font=2, cex=4) + par(fig=c(title2.xpos, title2.xpos + title2.width, 0.460, 0.465), new=TRUE) + mtext(paste0(regime3.name, ": Impact on ", var.name.full[var.num]), font=2, cex=4) + par(fig=c(title2.xpos, title2.xpos + title2.width, 0.225, 0.230), new=TRUE) + mtext(paste0(regime4.name, ": Impact on ", var.name.full[var.num]), font=2, cex=4) + } + + # Title 3: + title3.xpos <- 0.75 + title3.width <-0.25 + par(fig=c(title3.xpos, title3.xpos + title3.width, 0.930, 0.935), new=TRUE) + mtext(paste0(regime1.name, ": Frequency (", round(100*mean(fre1),1), "%)"), font=2, cex=4) + par(fig=c(title3.xpos, title3.xpos + title3.width, 0.695, 0.700), new=TRUE) + mtext(paste0(regime2.name, ": Frequency (", round(100*mean(fre2),1), "%)"), font=2, cex=4) + par(fig=c(title3.xpos, title3.xpos + title3.width, 0.460, 0.465), new=TRUE) + mtext(paste0(regime3.name, ": Frequency (", round(100*mean(fre3),1), "%)"), font=2, cex=4) + par(fig=c(title3.xpos, title3.xpos + title3.width, 0.225, 0.230), new=TRUE) + mtext(paste0(regime4.name, ": Frequency (", round(100*mean(fre4),1), "%)"), font=2, cex=4) + + # Legend 1: + legend1.xpos <- 0.01 + legend1.width <- 0.44 + legend1.cex <- 1.8 + my.subset <- match(values.to.plot, my.brks) + par(fig=c(legend1.xpos, legend1.xpos + legend1.width, 0.719, 0.744), new=TRUE) # syntax fig=c(xmin, xmax, ymin, ymax) + ColorBar3(my.brks, cols=my.cols, vert=FALSE, cex=legend1.cex, subset=my.subset) + if(psl=="g500") {mtext(side=4," m", cex=2.5)} else {mtext(side=4," hPa", cex=legend1.cex)} + par(fig=c(legend1.xpos, legend1.xpos + legend1.width, 0.485, 0.508), new=TRUE) + ColorBar3(my.brks, cols=my.cols, vert=FALSE, cex=legend1.cex, subset=my.subset) + if(psl=="g500") {mtext(side=4," m", cex=2.5)} else {mtext(side=4," hPa", cex=legend1.cex)} + par(fig=c(legend1.xpos, legend1.xpos + legend1.width, 0.250, 0.273), new=TRUE) + ColorBar3(my.brks, cols=my.cols, vert=FALSE, cex=legend1.cex, subset=my.subset) + if(psl=="g500") {mtext(side=4," m", cex=2.5)} else {mtext(side=4," hPa", cex=legend1.cex)} + par(fig=c(legend1.xpos, legend1.xpos + legend1.width, 0.015, 0.038), new=TRUE) + ColorBar3(my.brks, cols=my.cols, vert=FALSE, cex=legend1.cex, subset=my.subset) + if(psl=="g500") {mtext(side=4," m", cex=2.5)} else {mtext(side=4," hPa", cex=legend1.cex)} + + # Legend 2: + if(fields.name == rean.name){ + legend2.xpos <- 0.48 + legend2.width <- 0.25 + legend2.cex <- 1.8 + my.subset2 <- match(values.to.plot2, my.brks.var) + par(fig=c(legend2.xpos, legend2.xpos + legend2.width, 0.719 + 0.001, 0.744), new=TRUE) + ColorBar3(my.brks.var, cols=my.cols.var, vert=FALSE, cex=legend2.cex, subset=my.subset2) + mtext(side=4,paste0(" ",var.unit[var.num]), cex=legend2.cex) + par(fig=c(legend2.xpos, legend2.xpos + legend2.width, 0.485, 0.508), new=TRUE) + ColorBar3(my.brks.var, cols=my.cols.var, vert=FALSE, cex=legend2.cex, subset=my.subset2) + mtext(side=4,paste0(" ",var.unit[var.num]), cex=legend2.cex) + par(fig=c(legend2.xpos, legend2.xpos + legend2.width, 0.250, 0.273), new=TRUE) + ColorBar3(my.brks.var, cols=my.cols.var, vert=FALSE, cex=legend2.cex, subset=my.subset2) + mtext(side=4,paste0(" ",var.unit[var.num]), cex=legend2.cex) + par(fig=c(legend2.xpos, legend2.xpos + legend2.width, 0.015, 0.038), new=TRUE) + ColorBar3(my.brks.var, cols=my.cols.var, vert=FALSE, cex=legend2.cex, subset=my.subset2) + mtext(side=4,paste0(" ",var.unit[var.num]), cex=legend2.cex) + } + + if(!as.pdf) dev.off() # for saving 4 png + +} # close 'p' on 'period' + +if(as.pdf) dev.off() # for saving a single pdf with all months/seasons + + + + + + + + +# impact map of the stronger WR: +if(fields.name == rean.name && period == 100){ + + var.num <- 1 # choose a variable (1:sfcWind, 2:tas) + + png(filename=paste0(workdir,"/",rean.name,"_",var.name[var.num],"_most_influent.png"), width=500, height=600) + + plot.new() + #par(mfrow=c(4,3)) + col.regimes <- c("indianred1","cyan","khaki","lightpink") + + for(p in 13:16){ # or 1:12 for monthly maps + load(file=paste0(workdir,"/", rean.dir,"/",rean.name,"_",var.name[var.num],"_",my.period[p],"_mapdata.RData")) + load(paste0(workdir,"/", rean.dir,"/",rean.name,"_ClusterNames.RData")) # they should not depend on var.name!!! + + # position of long values of Europe only (without the Atlantic Sea and America): + #if(fields.name == "ERA-Interim") EU <- c(1:which(lon >= lon.max)[1], (length(lon)-30):length(lon)) # restrict area to continental europe only + lon.max = 45 + EU <- c(1:which(lon >= lon.max)[1], (which(lon >= 337)[1]:length(lon))) # restrict area to continental europe only + + + cluster1 <- which(orden == cluster1.name.period[p]) # in future it will be == cluster1.name + cluster2 <- which(orden == cluster2.name.period[p]) + cluster3 <- which(orden == cluster3.name.period[p]) + cluster4 <- which(orden == cluster4.name.period[p]) + + assign(paste0("imp",cluster1), varPeriodAnom1mean) + assign(paste0("imp",cluster2), varPeriodAnom2mean) + assign(paste0("imp",cluster3), varPeriodAnom3mean) + assign(paste0("imp",cluster4), varPeriodAnom4mean) + + imp.all <- imp1 + for(i in 1:dim(imp1)[1]){ + for(j in 1:dim(imp1)[2]){ + if(abs(imp1[i,j]) >= max(abs(imp1[i,j]), abs(imp2[i,j]), abs(imp3[i,j]), abs(imp4[i,j]))) imp.all[i,j] <- 1.5 + if(abs(imp2[i,j]) >= max(abs(imp1[i,j]), abs(imp2[i,j]), abs(imp3[i,j]), abs(imp4[i,j]))) imp.all[i,j] <- 2.5 + if(abs(imp3[i,j]) >= max(abs(imp1[i,j]), abs(imp2[i,j]), abs(imp3[i,j]), abs(imp4[i,j]))) imp.all[i,j] <- 3.5 + if(abs(imp4[i,j]) >= max(abs(imp1[i,j]), abs(imp2[i,j]), abs(imp3[i,j]), abs(imp4[i,j]))) imp.all[i,j] <- 4.5 + } + } + + if(p<=3) yMod <- 0.7 + if(p>=4 && p<=6) yMod <- 0.5 + if(p>=7 && p<=9) yMod <- 0.3 + if(p>=10) yMod <- 0.1 + if(p==13 || p==14) yMod <- 0.5 + if(p==15 || p==16) yMod <- 0.1 + + if(p==1 || p==4 || p==7 || p==10) xMod <- 0.05 + if(p==2 || p==5 || p==8 || p==11) xMod <- 0.35 + if(p==3 || p==6 || p==9 || p==12) xMod <- 0.65 + if(p==13 || p==15) xMod <- 0.05 + if(p==14 || p==16) xMod <- 0.50 + + # impact map: + if(p <= 12) par(fig=c(xMod, xMod + 0.3, yMod, yMod + 0.17), new=TRUE) + if(p > 12) par(fig=c(xMod, xMod + 0.45, yMod, yMod + 0.38), new=TRUE) + PlotEquiMap(imp.all[,EU], lon[EU], lat, filled.continents = FALSE, brks=1:5, cols=col.regimes, axelab=F, drawleg=F) + + # title map: + if(p <= 12) par(fig=c(xMod, xMod + 0.3, yMod + 0.162, yMod + 0.172), new=TRUE) + if(p > 12) par(fig=c(xMod + 0.07, xMod + 0.07 + 0.3, yMod +0.21 + 0.162, yMod +0.21 + 0.172), new=TRUE) + mtext(my.period[p], font=2, cex=.9) + + } # close 'p' on 'period' + + par(fig=c(0.1,0.9, 0, 0.12), new=TRUE) + ColorBar2(1:5, cols=col.regimes, vert=FALSE, my.ticks=1:4, draw.ticks=FALSE, my.labels=orden) + + par(fig=c(0.1,0.9, 0.9, 0.95), new=TRUE) + mtext(paste0("Most influent WR on ",var.name[var.num]), font=2, cex=1.5) + + dev.off() +} + + + + + + + + + + + + + + + + + + diff --git a/old/weather_regimes_maps_v7.R b/old/weather_regimes_maps_v7.R new file mode 100644 index 0000000000000000000000000000000000000000..b73485b4716b84b61019a5646065c75732a7807b --- /dev/null +++ b/old/weather_regimes_maps_v7.R @@ -0,0 +1,625 @@ + +# Creation: 6/2016 +# Author: Nicola Cortesi +# +# Aim: to visualize the regime anomalies of the weather regimes, their interannual frequencies and their impact on a chosen cliamte variable (i.e: wind speed or temperature) +# +# I/O: the only input file needed is the output of the weather_regimes.R script, which ends with the suffix "_mapdata.RData". +# Output files are the maps, with the user can generate as .png or as .pdf +# +# Assumptions: If your regimes derive from a reanalysis, this script must be run twice: once, with the option 'ordering = F' and 'save.names = T' to create a .pdf or .png +# file that the user must open to find visually the order by which the four regimes are stored inside the four clusters. For example, he can find that the +# sequence of the images in the file (from top to down) corresponds to: Blocking / NAO- / Atl.Ridge / NAO+ regime. Subsequently, he has to change 'ordering' +# from F to T and set the four variables 'clusterX.name' (X=1..4) to follow the same order found inside that file. +# The second time, you have to run this script only to get the maps in the right order, which by default is, from top to down: +# "NAO+","NAO-","Blocking" and "Atl.Ridge" (you can change it with the variable 'orden'). You also have to set 'save.names' to TRUE, so an .RData file +# is written to store the order of the four regimes, in case in future a user needs to visualize these maps again. In this case, the user must set +# 'ordering = TRUE' and 'save.names = FALSE' to be able to visualize the maps in the correct order. +# +# If your regimes derive from a forecast system, you have to compute before the regimes derived from a reanalysis. Then, you can associate the reanalysis +# to the forecast system, to employ it to automatically associate to each simulated cluster one of the +# observed regimes, the one with the highest spatial correlation. Beware that the two maps of regime anomalies must have the same spatial resolution! +# +# Branch: weather_regimes + +library(s2dverification) # for the function Load() +library(Kendall) # for the MannKendall test + +source('/home/Earth/ncortesi/Downloads/scripts/Rfunctions.R') # for the calendar functions +workdir <- "/scratch/Earth/ncortesi/RESILIENCE/Regimes" #/41_ERA-Interim_monthly_1981-2015_LOESS_filter" # working dir with the input files with '_psl.RData' suffix: + +rean.name <- "ERA-Interim" # reanalysis name (if input data comes from a reanalysis) +forecast.name <- "ECMWF-S4" # forecast system name (if input data comes from a forecast system) + +fields.name <- forecast.name # Choose between loading reanalysis ('rean.name') or forecasts ('forecast.name') + +var.num <- 1 # if fields.name ='rean.name', Choose a variable for the impact maps 1: sfcWind 2: tas + +period <- 8 # Choose one or more periods to plot from 1 to 17 (1-12: Jan - Dec, 13-16: Winter, Spring, Summer, Autumn, 17: Year). + # You need to have created before the '_mapdata.RData' file with the output of 'weather_regimes'.R for that period. + +lead.month <- 4 # in case you selected 'fields.name = forecast.name', specify a leadtime (0 = same month of the start month) to plot + # (and variable 'period' becomes the start month) + +# run it twice: once, with 'ordering <- FALSE' and 'save.names <- TRUE' to look only at the cartography and find visually the right regime names of the four clusters; then, +# insert the regime names in the correct order below and run this script a the second time with 'ordering = TRUE' and 'save.names = TRUE', to save the ordered cartography. +# after that, any time you need to run this script, run it with 'ordering = TRUE and 'save.names = FALSE', not to overwrite the file which stores the cluster order: +ordering <- T # If TRUE, order the clusters following the regimes listed in the 'orden' vector (set it to TRUE only after you manually inserted the names below). +save.names <- F # if TRUE, also save the cluster names (i.e: the association between clusters and weather regimes); if FALSE, the cluster names are loaded from a file + +as.pdf <- F # FALSE: save results as one .png file for each season/month, TRUE: save only 1 .pdf file with all seasons/months + +# if fields.name='forecast.name', set the subdir of 'workdir' where the weather regimes computed with a reanalysis are stored: +rean.dir <- "41_ERA-Interim_monthly_1981-2015_LOESS_filter" + +# Associates the regimes to the four cluster: +cluster1.name <- "NAO+" +cluster2.name <- "NAO-" +cluster4.name <- "Blocking" +cluster3.name <- "Atl.Ridge" + +# choose an order to give to the cartograpy, from top to bottom: +orden <- c("NAO+","NAO-","Blocking","Atl.Ridge") + +################################################################################################################################################################### + +if(fields.name != rean.name && fields.name != forecast.name) stop("variable 'fields.name' is not properly set") +regime1.name <- orden[1] +regime2.name <- orden[2] +regime3.name <- orden[3] +regime4.name <- orden[4] + +var.name <- c("sfcWind","tas") +var.name.full <- c("Wind Speed","Temperature") # full name of the 'predictand' variable to put in the title of the graphs +var.unit <- c("m/s", "ºC") # unit of measure of var (for drawing color scales) +var.num.orig <- var.num # loading _psl files, this value can change! +fields.name.orig <- fields.name +period.orig <- period; workdir.orig <- workdir +pdf.suffix <- ifelse(length(period)==1, my.period[period], "all_periods") + +if(as.pdf && fields.name == rean.name)(pdf(file=paste0(workdir,"/",fields.name,"_",var.name[var.num],"_",pdf.suffix,".pdf"),width=40, height=60)) +if(as.pdf && fields.name == forecast.name)(pdf(file=paste0(workdir,"/",fields.name,"_",var.name[var.num],"_",pdf.suffix,"_leadmonth_",lead.month,".pdf"),width=40,height=60)) + +for(p in period){ + p.orig <- p + # load regime data (for forecasts, it load only the data corresponding to the chosen start month p and lead month 'lead.month':: + if(fields.name == rean.name) { + load(file=paste0(workdir,"/",fields.name,"_",my.period[p],"_","psl",".RData")) + if(var.num != var.num.orig) var.num <- var.num.orig # ripristinate original values of this script (necessary after loading regime data) + if(fields.name != fields.name.orig) fields.name <- fields.name.orig # ripristinate original values of this script + if(workdir != workdir.orig) workdir <- workdir.orig # ripristinate original values of this script + + load(file=paste0(workdir,"/",fields.name,"_",my.period[p],"_",var.name[var.num],".RData")) + } + if(fields.name == forecast.name){ + load(file=paste0(workdir,"/",fields.name,"_",my.period[p],"_leadmonth_",lead.month,"_","psl",".RData")) # load cluster time series and mean psl data + #load(file=paste0(workdir,"/",fields.name,"_",my.period[p],"_leadmonth_",lead.month,"_",var.name[var.num],".RData")) # load mean var data + } + + if(var.num != var.num.orig) var.num <- var.num.orig # ripristinate original values of this script (necessary after loading regime data) + if(fields.name != fields.name.orig) fields.name <- fields.name.orig # ripristinate original values of this script + if(p != p.orig) p <- p.orig + + if(fields.name == forecast.name){ + # compute correlations between simulated clusters and observed regime's anomalies: + cluster1.sim <- pslwr1mean; cluster2.sim <- pslwr2mean; cluster3.sim <- pslwr3mean; cluster4.sim <- pslwr4mean + lat.forecast <- lat; lon.forecast <- lon + + if((p + lead.month) > 12) { load.month <- p + lead.month - 12 } else { load.month <- p + lead.month } # reanalysis forecast month to load + load(file=paste0(workdir,"/",rean.dir,"/",rean.name,"_",my.period[load.month],"_psl.RData")) # load reanalysis data, which overwrities the pslwrXmean variables + load(paste0(workdir,"/",rean.dir,"/",rean.name,"_", my.period[load.month],"_ClusterNames.RData")) # load also reanalysis regime names + + wr1y.obs <- wr1y; wr2y.obs <- wr2y; wr3y.obs <- wr3y; wr4y.obs <- wr4y # observed frecuencies of the regimes + + #regimes.obs <- c(cluster1.name.period[load.month], cluster2.name.period[load.month], cluster3.name.period[load.month], cluster4.name.period[load.month]) # to be replaced soon by c(cluster1.name, cluster2.name, ...) + regimes.obs <- c(cluster1.name, cluster2.name, cluster3.name, cluster4.name) + + if(length(unique(regimes.obs)) < 4) stop("Two or more names of the weather types in the reanalsyis input file are repeated!") + + if(identical(rev(lat),lat.forecast)) {lat.forecast <- rev(lat.forecast); print("Warning: forecast latitudes are in the opposite order than renalysis's")} + if(!identical(lat, lat.forecast)) stop("forecast latitudes are different from reanalysis latitudes!") + if(!identical(lon, lon.forecast)) stop("forecast longitude are different from reanalysis longitudes!") + + cluster.corr <- array(NA, c(4,4), dimnames=list(c("cluster1.sim","cluster2.sim","cluster3.sim","cluster4.sim"), regimes.obs)) + cluster.corr[1,1] <- cor(as.vector(cluster1.sim), as.vector(pslwr1mean)) + cluster.corr[1,2] <- cor(as.vector(cluster1.sim), as.vector(pslwr2mean)) + cluster.corr[1,3] <- cor(as.vector(cluster1.sim), as.vector(pslwr3mean)) + cluster.corr[1,4] <- cor(as.vector(cluster1.sim), as.vector(pslwr4mean)) + cluster.corr[2,1] <- cor(as.vector(cluster2.sim), as.vector(pslwr1mean)) + cluster.corr[2,2] <- cor(as.vector(cluster2.sim), as.vector(pslwr2mean)) + cluster.corr[2,3] <- cor(as.vector(cluster2.sim), as.vector(pslwr3mean)) + cluster.corr[2,4] <- cor(as.vector(cluster2.sim), as.vector(pslwr4mean)) + cluster.corr[3,1] <- cor(as.vector(cluster3.sim), as.vector(pslwr1mean)) + cluster.corr[3,2] <- cor(as.vector(cluster3.sim), as.vector(pslwr2mean)) + cluster.corr[3,3] <- cor(as.vector(cluster3.sim), as.vector(pslwr3mean)) + cluster.corr[3,4] <- cor(as.vector(cluster3.sim), as.vector(pslwr4mean)) + cluster.corr[4,1] <- cor(as.vector(cluster4.sim), as.vector(pslwr1mean)) + cluster.corr[4,2] <- cor(as.vector(cluster4.sim), as.vector(pslwr2mean)) + cluster.corr[4,3] <- cor(as.vector(cluster4.sim), as.vector(pslwr3mean)) + cluster.corr[4,4] <- cor(as.vector(cluster4.sim), as.vector(pslwr4mean)) + + # associate each simulated cluster to one observed regime: + max1 <- which(cluster.corr == max(cluster.corr), arr.ind=T) + cluster.corr2 <- cluster.corr + cluster.corr2[max1[1],] <- -999 # set this row to negative values so it won't be found as a maximum anymore + cluster.corr2[,max1[2]] <- -999 # set this column to negative values so it won't be found as a maximum anymore + max2 <- which(cluster.corr2 == max(cluster.corr2), arr.ind=T) + cluster.corr3 <- cluster.corr2 + cluster.corr3[max2[1],] <- -999 + cluster.corr3[,max2[2]] <- -999 + max3 <- which(cluster.corr3 == max(cluster.corr3), arr.ind=T) + cluster.corr4 <- cluster.corr3 + cluster.corr4[max3[1],] <- -999 + cluster.corr4[,max3[2]] <- -999 + max4 <- which(cluster.corr4 == max(cluster.corr4), arr.ind=T) + + seq.max1 <- c(max1[1],max2[1],max3[1],max4[1]) + seq.max2 <- c(max1[2],max2[2],max3[2],max4[2]) + + cluster1.regime <- seq.max2[which(seq.max1 == 1)] + cluster2.regime <- seq.max2[which(seq.max1 == 2)] + cluster3.regime <- seq.max2[which(seq.max1 == 3)] + cluster4.regime <- seq.max2[which(seq.max1 == 4)] + + cluster1.name <- regimes.obs[cluster1.regime] + cluster2.name <- regimes.obs[cluster2.regime] + cluster3.name <- regimes.obs[cluster3.regime] + cluster4.name <- regimes.obs[cluster4.regime] + + save(cluster1.name, cluster2.name, cluster3.name, cluster4.name, cluster.corr, file=paste0(workdir,"/",fields.name,"_", my.period[p],"_leadmonth_",lead.month,"_ClusterNames.RData")) + if(var.num != var.num.orig) var.num <- var.num.orig # ripristinate original values of this script + if(fields.name != fields.name.orig) fields.name <- fields.name.orig # ripristinate original values of this script + if(!identical(period.orig,period)) period <- period.orig + if(p.orig != p) p <- p.orig + + load(file=paste0(workdir,"/",fields.name,"_",my.period[p],"_leadmonth_",lead.month,"_psl.RData")) # reload forecast cluster time series and mean psl data + #load(file=paste0(workdir,"/",fields.name,"_",my.period[p],"_leadmonth_",lead.month,"_ClusterData.RData")) # reload forecast cluster data (for my.cluster.array) + + if(var.num != var.num.orig) var.num <- var.num.orig # ripristinate original values of this script + if(fields.name != fields.name.orig) fields.name <- fields.name.orig # ripristinate original values of this script + if(!identical(period.orig, period)) period <- period.orig + if(p.orig != p) p <- p.orig + if(identical(rev(lat),lat.forecast)) lat <- rev(lat) # ripristinate right lat order + + } # close for on 'forecast.name' + + if(fields.name == rean.name){ + if(save.names){ + save(cluster1.name, cluster2.name, cluster3.name, cluster4.name, file=paste0(workdir,"/",fields.name,"_", my.period[p],"_ClusterNames.RData")) + + } else { # in this case, we load the cluster names from the file already saved, if regimes come from a reanalysis: + load(paste0(workdir,"/",fields.name,"_", my.period[p],"_ClusterNames.RData")) + + if(var.num != var.num.orig) var.num <- var.num.orig # ripristinate original values of this script + if(fields.name != fields.name.orig) fields.name <- fields.name.orig # ripristinate original values of this script + } + } + + # assign to each cluster its regime: + if(ordering == FALSE && fields.name == rean.name){ + cluster1 <- 1; cluster2 <- 2; cluster3 <- 3; cluster4 <- 4 + } else { + cluster1 <- which(orden == cluster1.name) + cluster2 <- which(orden == cluster2.name) + cluster3 <- which(orden == cluster3.name) + cluster4 <- which(orden == cluster4.name) + } + + assign(paste0("map",cluster1), pslwr1mean) + assign(paste0("map",cluster2), pslwr2mean) + assign(paste0("map",cluster3), pslwr3mean) + assign(paste0("map",cluster4), pslwr4mean) + + assign(paste0("fre",cluster1), wr1y) + assign(paste0("fre",cluster2), wr2y) + assign(paste0("fre",cluster3), wr3y) + assign(paste0("fre",cluster4), wr4y) + + if(period==100){ + assign(paste0("imp",cluster1), varwr1mean) + assign(paste0("imp",cluster2), varwr2mean) + assign(paste0("imp",cluster3), varwr3mean) + assign(paste0("imp",cluster4), varwr4mean) + + assign(paste0("sig",cluster1), pvalue1) + assign(paste0("sig",cluster2), pvalue2) + assign(paste0("sig",cluster3), pvalue3) + assign(paste0("sig",cluster4), pvalue4) + } + + if(fields.name == forecast.name){ + assign(paste0("freMax",cluster1), wr1yMax) + assign(paste0("freMax",cluster2), wr2yMax) + assign(paste0("freMax",cluster3), wr3yMax) + assign(paste0("freMax",cluster4), wr4yMax) + + assign(paste0("freMin",cluster1), wr1yMin) + assign(paste0("freMin",cluster2), wr2yMin) + assign(paste0("freMin",cluster3), wr3yMin) + assign(paste0("freMin",cluster4), wr4yMin) + + cluster1.Obs <- which(orden == regimes.obs[1]) + cluster2.Obs <- which(orden == regimes.obs[2]) + cluster3.Obs <- which(orden == regimes.obs[3]) + cluster4.Obs <- which(orden == regimes.obs[4]) + + assign(paste0("freObs",cluster1), wr1y.obs) + assign(paste0("freObs",cluster2), wr2y.obs) + assign(paste0("freObs",cluster3), wr3y.obs) + assign(paste0("freObs",cluster4), wr4y.obs) + } + + + # position of long values of Europe only (without the Atlantic Sea and America): + #if(fields.name == "ERA-Interim") EU <- c(1:which(lon >= lon.max)[1], (length(lon)-30):length(lon)) # restrict area to continental europe only + EU <- c(1:which(lon >= lon.max)[1], (which(lon >= 337)[1]:length(lon))) # restrict area to continental europe only + + # breaks and colors of the geopotential fields: + if(psl == "g500"){ + #my.brks <- c(-300,-199:200,300) # % Mean anomaly of a WR + my.brks <- c(-300,seq(-200,200,10),300) # % Mean anomaly of a WR + my.brks2 <- c(-300,seq(-200,200,10),300) # % Mean anomaly of a WR for the contour lines + #my.cols <- colorRampPalette((brewer.pal(9,"PuBu")))(length(my.brks)-1) + values.to.plot <- c(-200, -100, 0, 100, 200) # values we want to show in the labels + } + + if(psl == "psl"){ + my.brks <- c(seq(-19,-1,2),0,seq(1,19,2)) # % Mean anomaly of a WR + # my.brks <- c(seq(-19,-1,2),0,seq(1,19,2)) # % Mean anomaly of a WR + + my.brks2 <- my.brks #c(seq(-20,20,2)) # % Mean anomaly of a WR for the contour lines + values.to.plot <- my.brks #c(-17,-15,-13,-11,-9,-7,-5,-3,-1,0,1,3,5,7,9,11,13,15,17) + } + + my.cols <- colorRampPalette(rev(brewer.pal(11,"RdBu")))(length(my.brks)-1) # blue--white--red colors + my.cols[floor(length(my.cols)/2)] <- my.cols[floor(length(my.cols)/2)+1] <- "white" + + # breaks and colors of the impact maps (valid both for Wind speed anomalies and Temperature anomalies): + #my.brks.var <- c(-20,seq(-10,-3,1),seq(-2.9,2.9,0.1),seq(3,10,1),20) # % Mean anomaly of a WR + #my.brks.var <- c(-20,-2,-1.5,-1,-0.5,-0.2,0.2,0.5,1,1.5,2,20) # % Mean anomaly of a WR + #my.brks.var <- c(-20,seq(-3,3,0.5),20) # % Mean anomaly of a WR + if(var.name[var.num] == "sfcWind") { + my.brks.var <- seq(-3,3,0.5) + values.to.plot2 <- c(-3,-2,-1,0,1,2,3) + } + if(var.name[var.num] == "tas") { + my.brks.var <- seq(-5,5,1) + values.to.plot2 <- my.brks.var #c(-5,-3,-1,0,1,3,5) + } + + #my.cols <- colorRampPalette(as.character(read.csv("/home/Earth/vtorralb/rgbhex.csv",header=F)[,1]))(length(my.brks)-1) # blue--yellow-red colors + my.cols.var <- colorRampPalette(rev(brewer.pal(11,"RdBu")))(length(my.brks.var)-1) # blue--white--red colors + #my.cols.var[floor(length(my.cols.var)/2)] <- my.cols.var[floor(length(my.cols.var)/2)+1] <- "white" + + # Visualize the composition with the 3 graphs for all the 4 regimes: its pressure fields, its impact on var and its frequency series: + if(!as.pdf && fields.name==rean.name) png(filename=paste0(workdir,"/",fields.name,"_",my.period[p],"_",var.name[var.num],".png"),width=3000,height=3700) + if(!as.pdf && fields.name==forecast.name) png(filename=paste0(workdir,"/",fields.name,"_",my.period[p],"_leadmonth_",lead.month,"_",var.name[var.num],".png"),width=3000,height=3700) + + plot.new() + + # Sheet title: + par(fig=c(0, 1, 0.94, 0.98), new=TRUE) + + if(fields.name == forecast.name) {forecast.title <- paste0(" startdate and ",lead.month," forecast month ")} else {forecast.title <- ""} + mtext(paste0(fields.name, " Weather Regimes for ", my.period[p], forecast.title," (",year.start,"-",year.end,")"), cex=5.5, font=2) + + # Centroid maps: + map.xpos <- 0 + map.width <- 0.46 + par(fig=c(map.xpos + 0.003, map.xpos + map.width - 0.003, 0.745, 0.925), new=TRUE) + PlotEquiMap2(rescale(map1,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map1), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, xlabel.dist=1.5, contours.lty="F1FF1F") + par(fig=c(map.xpos, map.xpos + map.width, 0.51, 0.69), new=TRUE) + PlotEquiMap2(rescale(map2,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map2), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F") + par(fig=c(map.xpos, map.xpos + map.width, 0.275, 0.455), new=TRUE) + PlotEquiMap2(rescale(map3,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map3), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F") + par(fig=c(map.xpos, map.xpos + map.width, 0.04, 0.22), new=TRUE) + PlotEquiMap2(rescale(map4,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map4), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F") + + # Subtitle centroid maps: + map.title.xpos <- 0.76 + map.title.width <- 0.2 + par(fig=c(map.title.xpos, map.title.xpos + map.title.width, 0.728, 0.729), new=TRUE) + mtext("year", cex=3) + par(fig=c(map.title.xpos, map.title.xpos + map.title.width, 0.494, 0.495), new=TRUE) + mtext("year", cex=3) + par(fig=c(map.title.xpos, map.title.xpos + map.title.width, 0.260, 0.261), new=TRUE) + mtext("year", cex=3) + par(fig=c(map.title.xpos, map.title.xpos + map.title.width, 0.026, 0.027), new=TRUE) + mtext("year", cex=3) + + # Title Centroid Maps: + title1.xpos <- 0.02 + title1.width <- 0.4 + par(fig=c(title1.xpos, title1.xpos + title1.width, 0.9305, 0.9355), new=TRUE) + mtext(paste0(regime1.name,": ", psl.name, " Anomaly"), font=2, cex=4) + par(fig=c(title1.xpos, title1.xpos + title1.width, 0.6955, 0.7005), new=TRUE) + mtext(paste0(regime2.name,": ", psl.name, " Anomaly"), font=2, cex=4) + par(fig=c(title1.xpos, title1.xpos + title1.width, 0.4605, 0.4655), new=TRUE) + mtext(paste0(regime3.name,": ", psl.name, " Anomaly"), font=2, cex=4) + par(fig=c(title1.xpos, title1.xpos + title1.width, 0.2255, 0.2305), new=TRUE) + mtext(paste0(regime4.name,": ", psl.name, " Anomaly"), font=2, cex=4) + + # Impact maps: + if(period==100){ + impact.xpos <- 0.47 + impact.width <- 0.26 + par(fig=c(impact.xpos, impact.xpos + impact.width, 0.745, 0.925), new=TRUE) + PlotEquiMap2(rescale(imp1[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=t(sig1[,EU] < 0.05), cex.lab=1.5) + par(fig=c(impact.xpos, impact.xpos + impact.width, 0.51, 0.69), new=TRUE) + PlotEquiMap2(rescale(imp2[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=t(sig2[,EU] < 0.05), cex.lab=1.5) + par(fig=c(impact.xpos, impact.xpos + impact.width, 0.275, 0.455), new=TRUE) + PlotEquiMap2(rescale(imp3[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=t(sig3[,EU] < 0.05), cex.lab=1.5) + par(fig=c(impact.xpos, impact.xpos + impact.width, 0.04, 0.22), new=TRUE) + PlotEquiMap2(rescale(imp4[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=t(sig4[,EU] < 0.05), cex.lab=1.5) + + # Title impact maps: + title2.xpos <- 0.42 + title2.width <- 0.35 + par(fig=c(title2.xpos, title2.xpos + title2.width, 0.935, 0.940), new=TRUE) + mtext(paste0(regime1.name, ": Impact on ", var.name.full[var.num]), font=2, cex=4) + par(fig=c(title2.xpos, title2.xpos + title2.width, 0.700, 0.705), new=TRUE) + mtext(paste0(regime2.name, ": Impact on ", var.name.full[var.num]), font=2, cex=4) + par(fig=c(title2.xpos, title2.xpos + title2.width, 0.465, 0.470), new=TRUE) + mtext(paste0(regime3.name, ": Impact on ", var.name.full[var.num]), font=2, cex=4) + par(fig=c(title2.xpos, title2.xpos + title2.width, 0.230, 0.235), new=TRUE) + mtext(paste0(regime4.name, ": Impact on ", var.name.full[var.num]), font=2, cex=4) + } + + # Frequency plots: + barplot.xpos <- 0.75 + barplot.width <- 0.25 + freq.max=100 + + if(fields.name == forecast.name){ + pos.years.present <- match(year.start:year.end, years) + years.missing <- which(is.na(pos.years.present)) + fre1.NA <- fre2.NA <- fre3.NA <- fre4.NA <- freMax1.NA <- freMax2.NA <- freMax3.NA <- freMax4.NA <- freMin1.NA <- freMin2.NA <- freMin3.NA <- freMin4.NA <- c() + + k <- 0 + for(y in year.start:year.end) { + if(y %in% (years.missing + year.start - 1)) { + fre1.NA <- c(fre1.NA,NA); fre2.NA <- c(fre2.NA,NA); fre3.NA <- c(fre3.NA,NA); fre4.NA <- c(fre4.NA,NA) + freMax1.NA <- c(freMax1.NA,NA); freMax2.NA <- c(freMax2.NA,NA); freMax3.NA <- c(freMax3.NA,NA); freMax4.NA <- c(freMax4.NA,NA) + freMin1.NA <- c(freMin1.NA,NA); freMin2.NA <- c(freMin2.NA,NA); freMin3.NA <- c(freMin3.NA,NA); freMin4.NA <- c(freMin4.NA,NA) + k=k+1 + } else { + fre1.NA <- c(fre1.NA,fre1[y - year.start + 1 - k]); fre2.NA <- c(fre2.NA,fre2[y - year.start + 1 - k]) + fre3.NA <- c(fre3.NA,fre3[y - year.start + 1 - k]); fre4.NA <- c(fre4.NA,fre4[y - year.start + 1 - k]) + freMax1.NA <- c(freMax1.NA,freMax1[y - year.start + 1 - k]); freMax2.NA <- c(freMax2.NA,freMax2[y - year.start + 1 - k]) + freMax3.NA <- c(freMax3.NA,freMax3[y - year.start + 1 - k]); freMax4.NA <- c(freMax4.NA,freMax4[y - year.start + 1 - k]) + freMin1.NA <- c(freMin1.NA,freMin1[y - year.start + 1 - k]); freMin2.NA <- c(freMin2.NA,freMin2[y - year.start + 1 - k]) + freMin3.NA <- c(freMin3.NA,freMin3[y - year.start + 1 - k]); freMin4.NA <- c(freMin4.NA,freMin4[y - year.start + 1 - k]) + } + } + } else { fre1.NA <- fre1; fre2.NA <- fre2; fre3.NA <- fre3; fre4.NA <- fre4 } + + par(fig=c(barplot.xpos, barplot.xpos + barplot.width, 0.745, 0.915), new=TRUE) + if(fields.name == rean.name) barplot.freq(100*fre1.NA, year.start, year.end, cex.y=2, cex.x=2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0)) + if(fields.name == forecast.name) barplot.freq.sim2(100*fre1.NA, 100*freMax1.NA, 100*freMin1.NA, 100*freObs1, year.start, year.end, cex.y=2, cex.x=2, freq.min=-2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0), col.line="gray20", cex.r=3, cex.mean=2, cex.obs=2) + + par(fig=c(barplot.xpos, barplot.xpos + barplot.width,0.510,0.680), new=TRUE) + if(fields.name == rean.name) barplot.freq(100*fre2.NA, year.start, year.end, cex.y=2, cex.x=2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0)) + if(fields.name == forecast.name) barplot.freq.sim2(100*fre2.NA, 100*freMax2.NA, 100*freMin2.NA, 100*freObs2, year.start, year.end, cex.y=2, cex.x=2, freq.min=-2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0), col.line="gray20", cex.r=3, cex.mean=2, cex.obs=2) + + par(fig=c(barplot.xpos, barplot.xpos + barplot.width,0.275,0.445), new=TRUE) + if(fields.name == rean.name) barplot.freq(100*fre3.NA, year.start, year.end, cex.y=2, cex.x=2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0)) + if(fields.name == forecast.name) barplot.freq.sim2(100*fre3.NA, 100*freMax3.NA, 100*freMin3.NA, 100*freObs3, year.start, year.end, cex.y=2, cex.x=2, freq.min=-2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0), col.line="gray20", cex.r=3, cex.mean=2, cex.obs=2) + + par(fig=c(barplot.xpos, barplot.xpos + barplot.width,0.040,0.210), new=TRUE) + if(fields.name == rean.name) barplot.freq(100*fre4.NA, year.start, year.end, cex.y=2, cex.x=2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0)) + if(fields.name == forecast.name) barplot.freq.sim2(100*fre4.NA, 100*freMax4.NA, 100*freMin4.NA, 100*freObs4, year.start, year.end, cex.y=2, cex.x=2, freq.min=-2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0), col.line="gray20", cex.r=3, cex.mean=2, cex.obs=2) + + # Title Frequency maps: + title3.xpos <- 0.75 + title3.width <-0.25 + par(fig=c(title3.xpos, title3.xpos + title3.width, 0.935, 0.940), new=TRUE) + mtext(paste0(regime1.name, " Frequency"), font=2, cex=4) # paste0 doesn't work inside an expression! + par(fig=c(title3.xpos, title3.xpos + title3.width, 0.700, 0.705), new=TRUE) + mtext(paste0(regime2.name, " Frequency"), font=2, cex=4) + par(fig=c(title3.xpos, title3.xpos + title3.width, 0.465, 0.470), new=TRUE) + mtext(paste0(regime3.name, " Frequency"), font=2, cex=4) + par(fig=c(title3.xpos, title3.xpos + title3.width, 0.230, 0.235), new=TRUE) + mtext(paste0(regime4.name, " Frequency"), font=2, cex=4) + + # % on Frequency plots: + symbol.xpos <- 0.735 + symbol.width <- 0.01 + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.82, 0.83), new=TRUE) + mtext("%", cex=3.3) + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.59, 0.60), new=TRUE) + mtext("%", cex=3.3) + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.35, 0.36), new=TRUE) + mtext("%", cex=3.3) + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.12, 0.13), new=TRUE) + mtext("%", cex=3.3) + + # mean frequency on Frequency plots: + symbol.xpos <- 0.9 + symbol.width <- 0.1 + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.908, 0.918), new=TRUE) + mtext(bquote(bar(nu) == .(paste0(round(mean(100*fre1.NA),1),"%"))),cex=4.5) + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.673, 0.683), new=TRUE) + mtext(bquote(bar(nu) == .(paste0(round(mean(100*fre2.NA),1),"%"))),cex=4.5) + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.44, 0.45), new=TRUE) + mtext(bquote(bar(nu) == .(paste0(round(mean(100*fre3.NA),1),"%"))),cex=4.5) + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.203, 0.213), new=TRUE) + mtext(bquote(bar(nu) == .(paste0(round(mean(100*fre4.NA),1),"%"))),cex=4.5) + + # add corr between obs.time series and ensemble mean time series on Frequency plots: + if(fields.name == forecast.name){ + symbol.xpos <- 0.76 + symbol.width <- 0.1 + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.908, 0.918), new=TRUE) + corr1 <- round(cor(fre1.NA,freObs1, use="complete.obs"),2) + mtext(paste0("r= ",corr1), cex=4.5) + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.673, 0.683), new=TRUE) + corr2 <- round(cor(fre2.NA,freObs2, use="complete.obs"),2) + mtext(paste0("r= ",corr2), cex=4.5) + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.44, 0.45), new=TRUE) + corr3 <- round(cor(fre3.NA,freObs3, use="complete.obs"),2) + mtext(paste0("r= ",corr3), cex=4.5) + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.203, 0.213), new=TRUE) + corr4 <- round(cor(fre4.NA,freObs4, use="complete.obs"),2) + mtext(paste0("r= ",corr4), cex=4.5) + } + + # Legend 1: + legend1.xpos <- 0.01 + legend1.width <- 0.44 + legend1.cex <- 1.8 + my.subset <- match(values.to.plot, my.brks) + par(fig=c(legend1.xpos, legend1.xpos + legend1.width, 0.719, 0.744), new=TRUE) # syntax fig=c(xmin, xmax, ymin, ymax) + ColorBar3(my.brks, cols=my.cols, vert=FALSE, cex=legend1.cex, subset=my.subset) + if(psl=="g500") {mtext(side=4," m", cex=2.5)} else {mtext(side=4," hPa", cex=legend1.cex)} + par(fig=c(legend1.xpos, legend1.xpos + legend1.width, 0.485, 0.508), new=TRUE) + ColorBar3(my.brks, cols=my.cols, vert=FALSE, cex=legend1.cex, subset=my.subset) + if(psl=="g500") {mtext(side=4," m", cex=2.5)} else {mtext(side=4," hPa", cex=legend1.cex)} + par(fig=c(legend1.xpos, legend1.xpos + legend1.width, 0.250, 0.273), new=TRUE) + ColorBar3(my.brks, cols=my.cols, vert=FALSE, cex=legend1.cex, subset=my.subset) + if(psl=="g500") {mtext(side=4," m", cex=2.5)} else {mtext(side=4," hPa", cex=legend1.cex)} + par(fig=c(legend1.xpos, legend1.xpos + legend1.width, 0.015, 0.038), new=TRUE) + ColorBar3(my.brks, cols=my.cols, vert=FALSE, cex=legend1.cex, subset=my.subset) + if(psl=="g500") {mtext(side=4," m", cex=2.5)} else {mtext(side=4," hPa", cex=legend1.cex)} + + # Legend 2: + if(fields.name == rean.name){ + legend2.xpos <- 0.48 + legend2.width <- 0.25 + legend2.cex <- 1.8 + my.subset2 <- match(values.to.plot2, my.brks.var) + par(fig=c(legend2.xpos, legend2.xpos + legend2.width, 0.719 + 0.001, 0.744), new=TRUE) + ColorBar3(my.brks.var, cols=my.cols.var, vert=FALSE, cex=legend2.cex, subset=my.subset2) + mtext(side=4,paste0(" ",var.unit[var.num]), cex=legend2.cex) + par(fig=c(legend2.xpos, legend2.xpos + legend2.width, 0.485, 0.508), new=TRUE) + ColorBar3(my.brks.var, cols=my.cols.var, vert=FALSE, cex=legend2.cex, subset=my.subset2) + mtext(side=4,paste0(" ",var.unit[var.num]), cex=legend2.cex) + par(fig=c(legend2.xpos, legend2.xpos + legend2.width, 0.250, 0.273), new=TRUE) + ColorBar3(my.brks.var, cols=my.cols.var, vert=FALSE, cex=legend2.cex, subset=my.subset2) + mtext(side=4,paste0(" ",var.unit[var.num]), cex=legend2.cex) + par(fig=c(legend2.xpos, legend2.xpos + legend2.width, 0.015, 0.038), new=TRUE) + ColorBar3(my.brks.var, cols=my.cols.var, vert=FALSE, cex=legend2.cex, subset=my.subset2) + mtext(side=4,paste0(" ",var.unit[var.num]), cex=legend2.cex) + } + + if(!as.pdf) dev.off() # for saving 4 png + +} # close 'p' on 'period' + +if(as.pdf) dev.off() # for saving a single pdf with all months/seasons + + + +# Summary graphs: +if(fields.name == forecast.name && period == 100){ + + png(filename=paste0(workdir,"/",forecast.name,"_summary_corr.png"), width=500, height=600) + + for(p in 1:12){ + p.orig <- p + + load(file=paste0(workdir,"/",fields.name,"_",my.period[p],"_leadmonth_",lead.month,"_","psl",".RData")) # load cluster time series and mean psl data + #load(file=paste0(workdir,"/",fields.name,"_",my.period[p],"_leadmonth_",lead.month,"_",var.name[var.num],".RData")) # load mean var data + if(var.num != var.num.orig) var.num <- var.num.orig # ripristinate original values of this script (necessary after loading regime data) + if(fields.name != fields.name.orig) fields.name <- fields.name.orig # ripristinate original values of this script + if(p != p.orig) p <- p.orig + + } + + plot(seq(0, 12, 0.5), c(0,rep(6,length(seq(0,12,0.5))-1)), type = "n") + polygon(seq(1,12,0.5), rep(2,23),col="red") + +} + + +# impact map of the stronger WR: +if(fields.name == rean.name && period == 100){ + + var.num <- 1 # choose a variable (1:sfcWind, 2:tas) + + png(filename=paste0(workdir,"/",rean.name,"_",var.name[var.num],"_most_influent.png"), width=500, height=600) + + plot.new() + #par(mfrow=c(4,3)) + col.regimes <- c("indianred1","cyan","khaki","lightpink") + + for(p in 13:16){ # or 1:12 for monthly maps + load(file=paste0(workdir,"/", rean.dir,"/",rean.name,"_",var.name[var.num],"_",my.period[p],"_mapdata.RData")) + load(paste0(workdir,"/", rean.dir,"/",rean.name,"_ClusterNames.RData")) # they should not depend on var.name!!! + + # position of long values of Europe only (without the Atlantic Sea and America): + #if(fields.name == "ERA-Interim") EU <- c(1:which(lon >= lon.max)[1], (length(lon)-30):length(lon)) # restrict area to continental europe only + lon.max = 45 + EU <- c(1:which(lon >= lon.max)[1], (which(lon >= 337)[1]:length(lon))) # restrict area to continental europe only + + + cluster1 <- which(orden == cluster1.name.period[p]) # in future it will be == cluster1.name + cluster2 <- which(orden == cluster2.name.period[p]) + cluster3 <- which(orden == cluster3.name.period[p]) + cluster4 <- which(orden == cluster4.name.period[p]) + + assign(paste0("imp",cluster1), varPeriodAnom1mean) + assign(paste0("imp",cluster2), varPeriodAnom2mean) + assign(paste0("imp",cluster3), varPeriodAnom3mean) + assign(paste0("imp",cluster4), varPeriodAnom4mean) + + imp.all <- imp1 + for(i in 1:dim(imp1)[1]){ + for(j in 1:dim(imp1)[2]){ + if(abs(imp1[i,j]) >= max(abs(imp1[i,j]), abs(imp2[i,j]), abs(imp3[i,j]), abs(imp4[i,j]))) imp.all[i,j] <- 1.5 + if(abs(imp2[i,j]) >= max(abs(imp1[i,j]), abs(imp2[i,j]), abs(imp3[i,j]), abs(imp4[i,j]))) imp.all[i,j] <- 2.5 + if(abs(imp3[i,j]) >= max(abs(imp1[i,j]), abs(imp2[i,j]), abs(imp3[i,j]), abs(imp4[i,j]))) imp.all[i,j] <- 3.5 + if(abs(imp4[i,j]) >= max(abs(imp1[i,j]), abs(imp2[i,j]), abs(imp3[i,j]), abs(imp4[i,j]))) imp.all[i,j] <- 4.5 + } + } + + if(p<=3) yMod <- 0.7 + if(p>=4 && p<=6) yMod <- 0.5 + if(p>=7 && p<=9) yMod <- 0.3 + if(p>=10) yMod <- 0.1 + if(p==13 || p==14) yMod <- 0.5 + if(p==15 || p==16) yMod <- 0.1 + + if(p==1 || p==4 || p==7 || p==10) xMod <- 0.05 + if(p==2 || p==5 || p==8 || p==11) xMod <- 0.35 + if(p==3 || p==6 || p==9 || p==12) xMod <- 0.65 + if(p==13 || p==15) xMod <- 0.05 + if(p==14 || p==16) xMod <- 0.50 + + # impact map: + if(p <= 12) par(fig=c(xMod, xMod + 0.3, yMod, yMod + 0.17), new=TRUE) + if(p > 12) par(fig=c(xMod, xMod + 0.45, yMod, yMod + 0.38), new=TRUE) + PlotEquiMap(imp.all[,EU], lon[EU], lat, filled.continents = FALSE, brks=1:5, cols=col.regimes, axelab=F, drawleg=F) + + # title map: + if(p <= 12) par(fig=c(xMod, xMod + 0.3, yMod + 0.162, yMod + 0.172), new=TRUE) + if(p > 12) par(fig=c(xMod + 0.07, xMod + 0.07 + 0.3, yMod +0.21 + 0.162, yMod +0.21 + 0.172), new=TRUE) + mtext(my.period[p], font=2, cex=.9) + + } # close 'p' on 'period' + + par(fig=c(0.1,0.9, 0, 0.12), new=TRUE) + ColorBar2(1:5, cols=col.regimes, vert=FALSE, my.ticks=1:4, draw.ticks=FALSE, my.labels=orden) + + par(fig=c(0.1,0.9, 0.9, 0.95), new=TRUE) + mtext(paste0("Most influent WR on ",var.name[var.num]), font=2, cex=1.5) + + dev.off() +} + + + + + + + + + + + + + + + + + + diff --git a/old/weather_regimes_maps_v7.R~ b/old/weather_regimes_maps_v7.R~ new file mode 100644 index 0000000000000000000000000000000000000000..affc7405ed869ad7d7596379370b3a38890794e1 --- /dev/null +++ b/old/weather_regimes_maps_v7.R~ @@ -0,0 +1,586 @@ + +# Creation: 6/2016 +# Author: Nicola Cortesi +# +# Aim: to visualize the regime anomalies of the weather regimes, their interannual frequencies and their impact on a chosen cliamte variable (i.e: wind speed or temperature) +# +# I/O: the only input file needed is the output of the weather_regimes.R script, which ends with the suffix "_mapdata.RData". +# Output files are the maps, with the user can generate as .png or as .pdf +# +# Assumptions: If your regimes derive from a reanalysis, this script must be run twice: once, with the option 'ordering = F' and 'save.names = T' to create a .pdf or .png +# file that the user must open to find visually the order by which the four regimes are stored inside the four clusters. For example, he can find that the +# sequence of the images in the file (from top to down) corresponds to: Blocking / NAO- / Atl.Ridge / NAO+ regime. Subsequently, he has to change 'ordering' +# from F to T and set the four variables 'clusterX.name' (X=1..4) to follow the same order found inside that file. +# The second time, you have to run this script only to get the maps in the right order, which by default is, from top to down: +# "NAO+","NAO-","Blocking" and "Atl.Ridge" (you can change it with the variable 'orden'). You also have to set 'save.names' to TRUE, so an .RData file +# is written to store the order of the four regimes, in case in future a user needs to visualize these maps again. In this case, the user must set +# 'ordering = TRUE' and 'save.names = FALSE' to be able to visualize the maps in the correct order. +# +# If your regimes derive from a forecast system, you have to compute before the regimes derived from a reanalysis. Then, you can associate the reanalysis +# to the forecast system, to employ it to automatically associate to each simulated cluster one of the +# observed regimes, the one with the highest spatial correlation. Beware that the two maps of regime anomalies must have the same spatial resolution! +# +# Branch: weather_regimes + +library(s2dverification) # for the function Load() +library(Kendall) # for the MannKendall test + +source('/home/Earth/ncortesi/Downloads/scripts/Rfunctions.R') # for the calendar functions +workdir <- "/scratch/Earth/ncortesi/RESILIENCE/Regimes/41_ERA-Interim_monthly_1981-2015_LOESS_filter" # working dir with the input files with '_psl.RData' suffix: + +rean.name <- "ERA-Interim" # reanalysis name (if input data comes from a reanalysis) +forecast.name <- "ECMWF-S4" # forecast system name (if input data comes from a forecast system) + +fields.name <- rean.name # Choose between loading reanalysis ('rean.name') or forecasts ('forecast.name') + +var.num <- 1 # if fields.name ='rean.name', Choose a variable for the impact maps 1: sfcWind 2: tas + +period <- 1:12 # Choose one or more periods to plot from 1 to 17 (1-12: Jan - Dec, 13-16: Winter, Spring, Summer, Autumn, 17: Year). + # You need to have created before the '_mapdata.RData' file with the output of 'weather_regimes'.R for that period. + +lead.month <- 0 # in case you selected 'fields.name = forecast.name', specify a leadtime (0 = same month of the start month) to plot + # (and variable 'period' becomes the start month) + +# run it twice: once, with 'ordering <- FALSE' and 'save.names <- TRUE' to look only at the cartography and find visually the right regime names of the four clusters; then, +# insert the regime names in the correct order below and run this script a the second time with 'ordering = TRUE' and 'save.names = TRUE', to save the ordered cartography. +# after that, any time you need to run this script, run it with 'ordering = TRUE and 'save.names = FALSE', not to overwrite the file which stores the cluster order: +ordering <- T # If TRUE, order the clusters following the regimes listed in the 'orden' vector (set it to TRUE only after you manually inserted the names below). +save.names <- F # if TRUE, also save the cluster names (i.e: the association between clusters and weather regimes); if FALSE, the cluster names are loaded from a file + +as.pdf <- F # FALSE: save results as one .png file for each season/month, TRUE: save only 1 .pdf file with all seasons/month + +# if fields.name='forecast.name', set the subdir of 'workdir' where the weather regimes computed with a reanalysis are stored: +rean.dir <- "41_ERA-Interim_monthly_1981-2015_LOESS_filter" + +# Associates the regimes to the four cluster: +cluster2.name <- "NAO+" +cluster4.name <- "NAO-" +cluster1.name <- "Blocking" +cluster3.name <- "Atl.Ridge" + +# choose an order to give to the cartograpy, from top to bottom: +orden <- c("NAO+","NAO-","Blocking","Atl.Ridge") + +################################################################################################################################################################### + +regime1.name <- orden[1] +regime2.name <- orden[2] +regime3.name <- orden[3] +regime4.name <- orden[4] + +var.name <- c("sfcWind","tas") +var.name.full <- c("Wind Speed","Temperature") # full name of the 'predictand' variable to put in the title of the graphs +var.unit <- c("m/s", "ºC") # unit of measure of var (for drawing color scales) +var.num.orig <- var.num # loading _psl files, this value can change! +fields.name.orig <- fields.name +period.orig <- period; workdir.orig <- workdir +pdf.suffix <- ifelse(length(period)==1, my.period[period], "all_periods") + +if(as.pdf && fields.name==rean.name)(pdf(file=paste0(workdir,"/",fields.name,"_",var.name[var.num],"_",pdf.suffix,".pdf"),width=40, height=60)) +if(as.pdf && fields.name==forecast.name)(pdf(file=paste0(workdir,"/",fields.name,"_",var.name[var.num],"_",pdf.suffix,"_leadmonth_",lead.month,".pdf"),width=40,height=60)) + +for(p in period){ + p.orig <- p + # load regime data (for forecasts, it load only the data corresponding to the chosen start month p and lead month 'lead.month':: + if(fields.name == rean.name) { + load(file=paste0(workdir,"/",fields.name,"_",my.period[p],"_","psl",".RData")) + if(var.num != var.num.orig) var.num <- var.num.orig # ripristinate original values of this script (necessary after loading regime data) + if(fields.name != fields.name.orig) fields.name <- fields.name.orig # ripristinate original values of this script + if(workdir != workdir.orig) workdir <- workdir.orig # ripristinate original values of this script + + load(file=paste0(workdir,"/",fields.name,"_",my.period[p],"_",var.name[var.num],".RData")) + } + if(fields.name == forecast.name){ + load(file=paste0(workdir,"/",fields.name,"_",my.period[p],"_leadmonth_",lead.month,"_","psl",".RData")) # load cluster time series and mean psl data + #load(file=paste0(workdir,"/",fields.name,"_",my.period[p],"_leadmonth_",lead.month,"_",var.name[var.num],".RData")) # load mean var data + } + + if(var.num != var.num.orig) var.num <- var.num.orig # ripristinate original values of this script (necessary after loading regime data) + if(fields.name != fields.name.orig) fields.name <- fields.name.orig # ripristinate original values of this script + + if(fields.name == forecast.name){ + # compute correlations between simulated clusters and observed regime's anomalies: + cluster1.sim <- pslwr1mean; cluster2.sim <- pslwr2mean; cluster3.sim <- pslwr3mean; cluster4.sim <- pslwr4mean + lat.forecast <- lat; lon.forecast <- lon + + if((p + lead.month) > 12) { load.month <- p + lead.month - 12 } else { load.month <- p + lead.month } # reanalysis forecast month to load + load(file=paste0(workdir,"/",rean.dir,"/",rean.name,"_",my.period[load.month],"_psl.RData")) # load reanalysis data, which overwrities the pslwrXmean variables + load(paste0(workdir,"/",rean.dir,"/",rean.name,"_", my.period[load.month],"_ClusterNames.RData")) # load also reanalysis regime names + + wr1y.obs <- wr1y; wr2y.obs <- wr2y; wr3y.obs <- wr3y; wr4y.obs <- wr4y # observed frecuencies of the regimes + + #regimes.obs <- c(cluster1.name.period[load.month], cluster2.name.period[load.month], cluster3.name.period[load.month], cluster4.name.period[load.month]) # to be replaced soon by c(cluster1.name, cluster2.name, ...) + regimes.obs <- c(cluster1.name, cluster2.name, cluster3.name, cluster4.name) + + if(length(unique(regimes.obs)) < 4) stop("Two or more names of the weather types in the reanalsyis input file are repeated!") + + if(identical(rev(lat),lat.forecast)) {lat.forecast <- rev(lat.forecast); print("Warning: forecast latitudes are in the opposite order than renalysis's")} + if(!identical(lat, lat.forecast)) stop("forecast latitudes are different from reanalysis latitudes!") + if(!identical(lon, lon.forecast)) stop("forecast longitude are different from reanalysis longitudes!") + + cluster.corr <- array(NA, c(4,4), dimnames=list(c("cluster1.sim","cluster2.sim","cluster3.sim","cluster4.sim"), regimes.obs)) + cluster.corr[1,1] <- cor(as.vector(cluster1.sim), as.vector(pslwr1mean)) + cluster.corr[1,2] <- cor(as.vector(cluster1.sim), as.vector(pslwr2mean)) + cluster.corr[1,3] <- cor(as.vector(cluster1.sim), as.vector(pslwr3mean)) + cluster.corr[1,4] <- cor(as.vector(cluster1.sim), as.vector(pslwr4mean)) + cluster.corr[2,1] <- cor(as.vector(cluster2.sim), as.vector(pslwr1mean)) + cluster.corr[2,2] <- cor(as.vector(cluster2.sim), as.vector(pslwr2mean)) + cluster.corr[2,3] <- cor(as.vector(cluster2.sim), as.vector(pslwr3mean)) + cluster.corr[2,4] <- cor(as.vector(cluster2.sim), as.vector(pslwr4mean)) + cluster.corr[3,1] <- cor(as.vector(cluster3.sim), as.vector(pslwr1mean)) + cluster.corr[3,2] <- cor(as.vector(cluster3.sim), as.vector(pslwr2mean)) + cluster.corr[3,3] <- cor(as.vector(cluster3.sim), as.vector(pslwr3mean)) + cluster.corr[3,4] <- cor(as.vector(cluster3.sim), as.vector(pslwr4mean)) + cluster.corr[4,1] <- cor(as.vector(cluster4.sim), as.vector(pslwr1mean)) + cluster.corr[4,2] <- cor(as.vector(cluster4.sim), as.vector(pslwr2mean)) + cluster.corr[4,3] <- cor(as.vector(cluster4.sim), as.vector(pslwr3mean)) + cluster.corr[4,4] <- cor(as.vector(cluster4.sim), as.vector(pslwr4mean)) + + # associate each simulated cluster to one observed regime: + max1 <- which(cluster.corr == max(cluster.corr), arr.ind=T) + cluster.corr2 <- cluster.corr + cluster.corr2[max1[1],] <- -999 # set this row to negative values so it won't be found as a maximum anymore + cluster.corr2[,max1[2]] <- -999 # set this column to negative values so it won't be found as a maximum anymore + max2 <- which(cluster.corr2 == max(cluster.corr2), arr.ind=T) + cluster.corr3 <- cluster.corr2 + cluster.corr3[max2[1],] <- -999 + cluster.corr3[,max2[2]] <- -999 + max3 <- which(cluster.corr3 == max(cluster.corr3), arr.ind=T) + cluster.corr4 <- cluster.corr3 + cluster.corr4[max3[1],] <- -999 + cluster.corr4[,max3[2]] <- -999 + max4 <- which(cluster.corr4 == max(cluster.corr4), arr.ind=T) + + seq.max1 <- c(max1[1],max2[1],max3[1],max4[1]) + seq.max2 <- c(max1[2],max2[2],max3[2],max4[2]) + + cluster1.regime <- seq.max2[which(seq.max1 == 1)] + cluster2.regime <- seq.max2[which(seq.max1 == 2)] + cluster3.regime <- seq.max2[which(seq.max1 == 3)] + cluster4.regime <- seq.max2[which(seq.max1 == 4)] + + cluster1.name <- regimes.obs[cluster1.regime] + cluster2.name <- regimes.obs[cluster2.regime] + cluster3.name <- regimes.obs[cluster3.regime] + cluster4.name <- regimes.obs[cluster4.regime] + + if(var.num != var.num.orig) var.num <- var.num.orig # ripristinate original values of this script + if(fields.name != fields.name.orig) fields.name <- fields.name.orig # ripristinate original values of this script + if(!identical(period.orig,period)) period <- period.orig + if(p.orig != p) p <- p.orig + + load(file=paste0(workdir,"/",fields.name,"_",my.period[p],"_leadmonth_",lead.month,"_psl.RData")) # reload forecast cluster time series and mean psl data + #load(file=paste0(workdir,"/",fields.name,"_",my.period[p],"_leadmonth_",lead.month,"_ClusterData.RData")) # reload forecast cluster data (for my.cluster.array) + + if(var.num != var.num.orig) var.num <- var.num.orig # ripristinate original values of this script + if(fields.name != fields.name.orig) fields.name <- fields.name.orig # ripristinate original values of this script + if(!identical(period.orig, period)) period <- period.orig + if(p.orig != p) p <- p.orig + if(identical(rev(lat),lat.forecast)) lat <- rev(lat) # ripristinate right lat order + + } # close for on 'forecast.name' + + if(fields.name == rean.name){ + if(save.names){ + save(cluster1.name, cluster2.name, cluster3.name, cluster4.name, file=paste0(workdir,"/",fields.name,"_", my.period[p],"_ClusterNames.RData")) + #if(fields.name == forecast.name) save(cluster1.name, cluster2.name, cluster3.name, cluster4.name, cluster.corr, + # file=paste0(workdir,"/",fields.name,"_", my.period[p],"_leadmonth_",lead.month,"_ClusterNames.RData")) + } else { # in this case, we load the cluster names from the file already saved, if regimes come from a reanalysis: + load(paste0(workdir,"/",fields.name,"_", my.period[p],"_ClusterNames.RData")) + #if(fields.name == forecast.name && auto==F) load(paste0(workdir,"/",fields.name,"_", my.period[p],"_leadmonth_",lead.month,"_ClusterNames.RData")) + + if(var.num != var.num.orig) var.num <- var.num.orig # ripristinate original values of this script + if(fields.name != fields.name.orig) fields.name <- fields.name.orig # ripristinate original values of this script + + # cluster1.name <- cluster1.name.period[p] + # cluster2.name <- cluster2.name.period[p] + # cluster3.name <- cluster3.name.period[p] + # cluster4.name <- cluster4.name.period[p] + } + } + + # assign to each cluster its regime: + if(ordering == FALSE && fields.name == rean.name){ + cluster1 <- 1; cluster2 <- 2; cluster3 <- 3; cluster4 <- 4 + } else { + cluster1 <- which(orden == cluster1.name) + cluster2 <- which(orden == cluster2.name) + cluster3 <- which(orden == cluster3.name) + cluster4 <- which(orden == cluster4.name) + } + + assign(paste0("map",cluster1), pslwr1mean) + assign(paste0("map",cluster2), pslwr2mean) + assign(paste0("map",cluster3), pslwr3mean) + assign(paste0("map",cluster4), pslwr4mean) + + assign(paste0("fre",cluster1), wr1y) + assign(paste0("fre",cluster2), wr2y) + assign(paste0("fre",cluster3), wr3y) + assign(paste0("fre",cluster4), wr4y) + + if(period==100){ + assign(paste0("imp",cluster1), varwr1mean) + assign(paste0("imp",cluster2), varwr2mean) + assign(paste0("imp",cluster3), varwr3mean) + assign(paste0("imp",cluster4), varwr4mean) + + assign(paste0("sig",cluster1), pvalue1) + assign(paste0("sig",cluster2), pvalue2) + assign(paste0("sig",cluster3), pvalue3) + assign(paste0("sig",cluster4), pvalue4) + } + + if(fields.name == forecast.name){ + assign(paste0("freMax",cluster1), wr1yMax) + assign(paste0("freMax",cluster2), wr2yMax) + assign(paste0("freMax",cluster3), wr3yMax) + assign(paste0("freMax",cluster4), wr4yMax) + + assign(paste0("freMin",cluster1), wr1yMin) + assign(paste0("freMin",cluster2), wr2yMin) + assign(paste0("freMin",cluster3), wr3yMin) + assign(paste0("freMin",cluster4), wr4yMin) + + cluster1.Obs <- which(orden == regimes.obs[1]) + cluster2.Obs <- which(orden == regimes.obs[2]) + cluster3.Obs <- which(orden == regimes.obs[3]) + cluster4.Obs <- which(orden == regimes.obs[4]) + + assign(paste0("freObs",cluster1), wr1y.obs) + assign(paste0("freObs",cluster2), wr2y.obs) + assign(paste0("freObs",cluster3), wr3y.obs) + assign(paste0("freObs",cluster4), wr4y.obs) + } + + + # position of long values of Europe only (without the Atlantic Sea and America): + #if(fields.name == "ERA-Interim") EU <- c(1:which(lon >= lon.max)[1], (length(lon)-30):length(lon)) # restrict area to continental europe only + EU <- c(1:which(lon >= lon.max)[1], (which(lon >= 337)[1]:length(lon))) # restrict area to continental europe only + + # breaks and colors of the geopotential fields: + if(psl == "g500"){ + #my.brks <- c(-300,-199:200,300) # % Mean anomaly of a WR + my.brks <- c(-300,seq(-200,200,10),300) # % Mean anomaly of a WR + my.brks2 <- c(-300,seq(-200,200,10),300) # % Mean anomaly of a WR for the contour lines + #my.cols <- colorRampPalette((brewer.pal(9,"PuBu")))(length(my.brks)-1) + values.to.plot <- c(-200, -100, 0, 100, 200) # values we want to show in the labels + } + + if(psl == "psl"){ + my.brks <- c(seq(-19,-1,2),0,seq(1,19,2)) # % Mean anomaly of a WR + # my.brks <- c(seq(-19,-1,2),0,seq(1,19,2)) # % Mean anomaly of a WR + + my.brks2 <- my.brks #c(seq(-20,20,2)) # % Mean anomaly of a WR for the contour lines + values.to.plot <- my.brks #c(-17,-15,-13,-11,-9,-7,-5,-3,-1,0,1,3,5,7,9,11,13,15,17) + } + + my.cols <- colorRampPalette(rev(brewer.pal(11,"RdBu")))(length(my.brks)-1) # blue--white--red colors + my.cols[floor(length(my.cols)/2)] <- my.cols[floor(length(my.cols)/2)+1] <- "white" + + # breaks and colors of the impact maps (valid both for Wind speed anomalies and Temperature anomalies): + #my.brks.var <- c(-20,seq(-10,-3,1),seq(-2.9,2.9,0.1),seq(3,10,1),20) # % Mean anomaly of a WR + #my.brks.var <- c(-20,-2,-1.5,-1,-0.5,-0.2,0.2,0.5,1,1.5,2,20) # % Mean anomaly of a WR + #my.brks.var <- c(-20,seq(-3,3,0.5),20) # % Mean anomaly of a WR + if(var.name[var.num] == "sfcWind") { + my.brks.var <- seq(-3,3,0.5) + values.to.plot2 <- c(-3,-2,-1,0,1,2,3) + } + if(var.name[var.num] == "tas") { + my.brks.var <- seq(-5,5,1) + values.to.plot2 <- my.brks.var #c(-5,-3,-1,0,1,3,5) + } + + #my.cols <- colorRampPalette(as.character(read.csv("/home/Earth/vtorralb/rgbhex.csv",header=F)[,1]))(length(my.brks)-1) # blue--yellow-red colors + my.cols.var <- colorRampPalette(rev(brewer.pal(11,"RdBu")))(length(my.brks.var)-1) # blue--white--red colors + #my.cols.var[floor(length(my.cols.var)/2)] <- my.cols.var[floor(length(my.cols.var)/2)+1] <- "white" + + # Visualize the composition with the 3 graphs for all the 4 regimes: its pressure fields, its impact on var and its frequency series: + if(!as.pdf && fields.name==rean.name) png(filename=paste0(workdir,"/",fields.name,"_",my.period[p],"_",var.name[var.num],".png"),width=3000,height=3700) + if(!as.pdf && fields.name==forecast.name) png(filename=paste0(workdir,"/",fields.name,"_",my.period[p],"_leadmonth_",lead.month,"_",var.name[var.num],".png"),width=3000,height=3700) + + plot.new() + + # Sheet title: + par(fig=c(0, 1, 0.94, 0.98), new=TRUE) + + if(fields.name == forecast.name) {forecast.title <- paste0(" startdate and ",lead.month," forecast month ")} else {forecast.title <- ""} + mtext(paste0(fields.name, " Weather Regimes for ", my.period[p], forecast.title," (",year.start,"-",year.end,")"), cex=5.5, font=2) + + # Centroid maps: + map.xpos <- 0 + map.width <- 0.46 + par(fig=c(map.xpos + 0.003, map.xpos + map.width - 0.003, 0.745, 0.925), new=TRUE) + PlotEquiMap2(rescale(map1,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map1), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, xlabel.dist=1.5, contours.lty="F1FF1F") + par(fig=c(map.xpos, map.xpos + map.width, 0.51, 0.69), new=TRUE) + PlotEquiMap2(rescale(map2,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map2), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F") + par(fig=c(map.xpos, map.xpos + map.width, 0.275, 0.455), new=TRUE) + PlotEquiMap2(rescale(map3,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map3), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F") + par(fig=c(map.xpos, map.xpos + map.width, 0.04, 0.22), new=TRUE) + PlotEquiMap2(rescale(map4,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map4), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F") + + + # Title Centroid Maps: + map.title.xpos <- 0.76 + map.title.width <- 0.2 + par(fig=c(map.title.xpos, map.title.xpos + map.title.width, 0.728, 0.729), new=TRUE) + mtext("year", cex=3) + par(fig=c(map.title.xpos, map.title.xpos + map.title.width, 0.494, 0.495), new=TRUE) + mtext("year", cex=3) + par(fig=c(map.title.xpos, map.title.xpos + map.title.width, 0.260, 0.261), new=TRUE) + mtext("year", cex=3) + par(fig=c(map.title.xpos, map.title.xpos + map.title.width, 0.026, 0.027), new=TRUE) + mtext("year", cex=3) + + # Title 1: + title1.xpos <- 0.02 + title1.width <- 0.4 + par(fig=c(title1.xpos, title1.xpos + title1.width, 0.930, 0.935), new=TRUE) + mtext(paste0(regime1.name,": ", psl.name, " Anomaly"), font=2, cex=4) + par(fig=c(title1.xpos, title1.xpos + title1.width, 0.695, 0.700), new=TRUE) + mtext(paste0(regime2.name,": ", psl.name, " Anomaly"), font=2, cex=4) + par(fig=c(title1.xpos, title1.xpos + title1.width, 0.460, 0.465), new=TRUE) + mtext(paste0(regime3.name,": ", psl.name, " Anomaly"), font=2, cex=4) + par(fig=c(title1.xpos, title1.xpos + title1.width, 0.225, 0.230), new=TRUE) + mtext(paste0(regime4.name,": ", psl.name, " Anomaly"), font=2, cex=4) + + # Impact maps: + if(period==100){ + impact.xpos <- 0.47 + impact.width <- 0.26 + par(fig=c(impact.xpos, impact.xpos + impact.width, 0.745, 0.925), new=TRUE) + PlotEquiMap2(rescale(imp1[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=t(sig1[,EU] < 0.05), cex.lab=1.5) + par(fig=c(impact.xpos, impact.xpos + impact.width, 0.51, 0.69), new=TRUE) + PlotEquiMap2(rescale(imp2[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=t(sig2[,EU] < 0.05), cex.lab=1.5) + par(fig=c(impact.xpos, impact.xpos + impact.width, 0.275, 0.455), new=TRUE) + PlotEquiMap2(rescale(imp3[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=t(sig3[,EU] < 0.05), cex.lab=1.5) + par(fig=c(impact.xpos, impact.xpos + impact.width, 0.04, 0.22), new=TRUE) + PlotEquiMap2(rescale(imp4[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=t(sig4[,EU] < 0.05), cex.lab=1.5) + } + + # Frequency plots: + barplot.xpos <- 0.75 + barplot.width <- 0.25 + freq.max=100 + + if(fields.name == forecast.name){ + pos.years.present <- match(year.start:year.end, years) + years.missing <- which(is.na(pos.years.present)) + fre1.NA <- fre2.NA <- fre3.NA <- fre4.NA <- freMax1.NA <- freMax2.NA <- freMax3.NA <- freMax4.NA <- freMin1.NA <- freMin2.NA <- freMin3.NA <- freMin4.NA <- c() + + k <- 0 + for(y in year.start:year.end) { + if(y %in% (years.missing + year.start - 1)) { + fre1.NA <- c(fre1.NA,NA); fre2.NA <- c(fre2.NA,NA); fre3.NA <- c(fre3.NA,NA); fre4.NA <- c(fre4.NA,NA) + freMax1.NA <- c(freMax1.NA,NA); freMax2.NA <- c(freMax2.NA,NA); freMax3.NA <- c(freMax3.NA,NA); freMax4.NA <- c(freMax4.NA,NA) + freMin1.NA <- c(freMin1.NA,NA); freMin2.NA <- c(freMin2.NA,NA); freMin3.NA <- c(freMin3.NA,NA); freMin4.NA <- c(freMin4.NA,NA) + k=k+1 + } else { + fre1.NA <- c(fre1.NA,fre1[y - year.start + 1 - k]); fre2.NA <- c(fre2.NA,fre2[y - year.start + 1 - k]) + fre3.NA <- c(fre3.NA,fre3[y - year.start + 1 - k]); fre4.NA <- c(fre4.NA,fre4[y - year.start + 1 - k]) + freMax1.NA <- c(freMax1.NA,freMax1[y - year.start + 1 - k]); freMax2.NA <- c(freMax2.NA,freMax2[y - year.start + 1 - k]) + freMax3.NA <- c(freMax3.NA,freMax3[y - year.start + 1 - k]); freMax4.NA <- c(freMax4.NA,freMax4[y - year.start + 1 - k]) + freMin1.NA <- c(freMin1.NA,freMin1[y - year.start + 1 - k]); freMin2.NA <- c(freMin2.NA,freMin2[y - year.start + 1 - k]) + freMin3.NA <- c(freMin3.NA,freMin3[y - year.start + 1 - k]); freMin4.NA <- c(freMin4.NA,freMin4[y - year.start + 1 - k]) + } + } + } else { fre1.NA <- fre1; fre2.NA <- fre2; fre3.NA <- fre3; fre4.NA <- fre4 } + + par(fig=c(barplot.xpos, barplot.xpos + barplot.width, 0.745, 0.915), new=TRUE) + if(fields.name == rean.name) barplot.freq(100*fre1.NA, year.start, year.end, cex.y=2, cex.x=2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0)) + if(fields.name == forecast.name) barplot.freq.sim2(100*fre1.NA, 100*freMax1.NA, 100*freMin1.NA, 100*freObs1, year.start, year.end, cex.y=2, cex.x=2, freq.min=-2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0), col.line="gray20", cex.r=3, cex.mean=2, cex.obs=2) + + par(fig=c(barplot.xpos, barplot.xpos + barplot.width,0.510,0.680), new=TRUE) + if(fields.name == rean.name) barplot.freq(100*fre2.NA, year.start, year.end, cex.y=2, cex.x=2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0)) + if(fields.name == forecast.name) barplot.freq.sim2(100*fre2.NA, 100*freMax2.NA, 100*freMin2.NA, 100*freObs2, year.start, year.end, cex.y=2, cex.x=2, freq.min=-2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0), col.line="gray20", cex.r=3, cex.mean=2, cex.obs=2) + + par(fig=c(barplot.xpos, barplot.xpos + barplot.width,0.275,0.445), new=TRUE) + if(fields.name == rean.name) barplot.freq(100*fre3.NA, year.start, year.end, cex.y=2, cex.x=2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0)) + if(fields.name == forecast.name) barplot.freq.sim2(100*fre3.NA, 100*freMax3.NA, 100*freMin3.NA, 100*freObs3, year.start, year.end, cex.y=2, cex.x=2, freq.min=-2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0), col.line="gray20", cex.r=3, cex.mean=2, cex.obs=2) + + par(fig=c(barplot.xpos, barplot.xpos + barplot.width,0.040,0.210), new=TRUE) + if(fields.name == rean.name) barplot.freq(100*fre4.NA, year.start, year.end, cex.y=2, cex.x=2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0)) + if(fields.name == forecast.name) barplot.freq.sim2(100*fre4.NA, 100*freMax4.NA, 100*freMin4.NA, 100*freObs4, year.start, year.end, cex.y=2, cex.x=2, freq.min=-2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0), col.line="gray20", cex.r=3, cex.mean=2, cex.obs=2) + + # % on Frequency plots: + symbol.xpos <- 0.735 + symbol.width <- 0.01 + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.83, 0.84), new=TRUE) + mtext("%", cex=3.3) + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.60, 0.61), new=TRUE) + mtext("%", cex=3.3) + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.36, 0.37), new=TRUE) + mtext("%", cex=3.3) + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.13, 0.14), new=TRUE) + mtext("%", cex=3.3) + + + # Title 2: + if(fields.name == rean.name){ + title2.xpos <- 0.42 + title2.width <- 0.35 + par(fig=c(title2.xpos, title2.xpos + title2.width, 0.930, 0.935), new=TRUE) + mtext(paste0(regime1.name, ": Impact on ", var.name.full[var.num]), font=2, cex=4) + par(fig=c(title2.xpos, title2.xpos + title2.width, 0.695, 0.700), new=TRUE) + mtext(paste0(regime2.name, ": Impact on ", var.name.full[var.num]), font=2, cex=4) + par(fig=c(title2.xpos, title2.xpos + title2.width, 0.460, 0.465), new=TRUE) + mtext(paste0(regime3.name, ": Impact on ", var.name.full[var.num]), font=2, cex=4) + par(fig=c(title2.xpos, title2.xpos + title2.width, 0.225, 0.230), new=TRUE) + mtext(paste0(regime4.name, ": Impact on ", var.name.full[var.num]), font=2, cex=4) + } + + # Title 3: + title3.xpos <- 0.75 + title3.width <-0.25 + par(fig=c(title3.xpos, title3.xpos + title3.width, 0.930, 0.935), new=TRUE) + mtext(paste0(regime1.name, " Frequency"), font=2, cex=4) # paste0 doesn't work inside an expression! + par(fig=c(title3.xpos, title3.xpos + title3.width, 0.695, 0.700), new=TRUE) + mtext(paste0(regime2.name, " Frequency"), font=2, cex=4) + par(fig=c(title3.xpos, title3.xpos + title3.width, 0.460, 0.465), new=TRUE) + mtext(paste0(regime3.name, " Frequency"), font=2, cex=4) + par(fig=c(title3.xpos, title3.xpos + title3.width, 0.225, 0.230), new=TRUE) + mtext(paste0(regime4.name, " Frequency"), font=2, cex=4) + + # Legend 1: + legend1.xpos <- 0.01 + legend1.width <- 0.44 + legend1.cex <- 1.8 + my.subset <- match(values.to.plot, my.brks) + par(fig=c(legend1.xpos, legend1.xpos + legend1.width, 0.719, 0.744), new=TRUE) # syntax fig=c(xmin, xmax, ymin, ymax) + ColorBar3(my.brks, cols=my.cols, vert=FALSE, cex=legend1.cex, subset=my.subset) + if(psl=="g500") {mtext(side=4," m", cex=2.5)} else {mtext(side=4," hPa", cex=legend1.cex)} + par(fig=c(legend1.xpos, legend1.xpos + legend1.width, 0.485, 0.508), new=TRUE) + ColorBar3(my.brks, cols=my.cols, vert=FALSE, cex=legend1.cex, subset=my.subset) + if(psl=="g500") {mtext(side=4," m", cex=2.5)} else {mtext(side=4," hPa", cex=legend1.cex)} + par(fig=c(legend1.xpos, legend1.xpos + legend1.width, 0.250, 0.273), new=TRUE) + ColorBar3(my.brks, cols=my.cols, vert=FALSE, cex=legend1.cex, subset=my.subset) + if(psl=="g500") {mtext(side=4," m", cex=2.5)} else {mtext(side=4," hPa", cex=legend1.cex)} + par(fig=c(legend1.xpos, legend1.xpos + legend1.width, 0.015, 0.038), new=TRUE) + ColorBar3(my.brks, cols=my.cols, vert=FALSE, cex=legend1.cex, subset=my.subset) + if(psl=="g500") {mtext(side=4," m", cex=2.5)} else {mtext(side=4," hPa", cex=legend1.cex)} + + # Legend 2: + if(fields.name == rean.name){ + legend2.xpos <- 0.48 + legend2.width <- 0.25 + legend2.cex <- 1.8 + my.subset2 <- match(values.to.plot2, my.brks.var) + par(fig=c(legend2.xpos, legend2.xpos + legend2.width, 0.719 + 0.001, 0.744), new=TRUE) + ColorBar3(my.brks.var, cols=my.cols.var, vert=FALSE, cex=legend2.cex, subset=my.subset2) + mtext(side=4,paste0(" ",var.unit[var.num]), cex=legend2.cex) + par(fig=c(legend2.xpos, legend2.xpos + legend2.width, 0.485, 0.508), new=TRUE) + ColorBar3(my.brks.var, cols=my.cols.var, vert=FALSE, cex=legend2.cex, subset=my.subset2) + mtext(side=4,paste0(" ",var.unit[var.num]), cex=legend2.cex) + par(fig=c(legend2.xpos, legend2.xpos + legend2.width, 0.250, 0.273), new=TRUE) + ColorBar3(my.brks.var, cols=my.cols.var, vert=FALSE, cex=legend2.cex, subset=my.subset2) + mtext(side=4,paste0(" ",var.unit[var.num]), cex=legend2.cex) + par(fig=c(legend2.xpos, legend2.xpos + legend2.width, 0.015, 0.038), new=TRUE) + ColorBar3(my.brks.var, cols=my.cols.var, vert=FALSE, cex=legend2.cex, subset=my.subset2) + mtext(side=4,paste0(" ",var.unit[var.num]), cex=legend2.cex) + } + + if(!as.pdf) dev.off() # for saving 4 png + +} # close 'p' on 'period' + +if(as.pdf) dev.off() # for saving a single pdf with all months/seasons + + + + + + + + +# impact map of the stronger WR: +if(fields.name == rean.name && period == 100){ + + var.num <- 1 # choose a variable (1:sfcWind, 2:tas) + + png(filename=paste0(workdir,"/",rean.name,"_",var.name[var.num],"_most_influent.png"), width=500, height=600) + + plot.new() + #par(mfrow=c(4,3)) + col.regimes <- c("indianred1","cyan","khaki","lightpink") + + for(p in 13:16){ # or 1:12 for monthly maps + load(file=paste0(workdir,"/", rean.dir,"/",rean.name,"_",var.name[var.num],"_",my.period[p],"_mapdata.RData")) + load(paste0(workdir,"/", rean.dir,"/",rean.name,"_ClusterNames.RData")) # they should not depend on var.name!!! + + # position of long values of Europe only (without the Atlantic Sea and America): + #if(fields.name == "ERA-Interim") EU <- c(1:which(lon >= lon.max)[1], (length(lon)-30):length(lon)) # restrict area to continental europe only + lon.max = 45 + EU <- c(1:which(lon >= lon.max)[1], (which(lon >= 337)[1]:length(lon))) # restrict area to continental europe only + + + cluster1 <- which(orden == cluster1.name.period[p]) # in future it will be == cluster1.name + cluster2 <- which(orden == cluster2.name.period[p]) + cluster3 <- which(orden == cluster3.name.period[p]) + cluster4 <- which(orden == cluster4.name.period[p]) + + assign(paste0("imp",cluster1), varPeriodAnom1mean) + assign(paste0("imp",cluster2), varPeriodAnom2mean) + assign(paste0("imp",cluster3), varPeriodAnom3mean) + assign(paste0("imp",cluster4), varPeriodAnom4mean) + + imp.all <- imp1 + for(i in 1:dim(imp1)[1]){ + for(j in 1:dim(imp1)[2]){ + if(abs(imp1[i,j]) >= max(abs(imp1[i,j]), abs(imp2[i,j]), abs(imp3[i,j]), abs(imp4[i,j]))) imp.all[i,j] <- 1.5 + if(abs(imp2[i,j]) >= max(abs(imp1[i,j]), abs(imp2[i,j]), abs(imp3[i,j]), abs(imp4[i,j]))) imp.all[i,j] <- 2.5 + if(abs(imp3[i,j]) >= max(abs(imp1[i,j]), abs(imp2[i,j]), abs(imp3[i,j]), abs(imp4[i,j]))) imp.all[i,j] <- 3.5 + if(abs(imp4[i,j]) >= max(abs(imp1[i,j]), abs(imp2[i,j]), abs(imp3[i,j]), abs(imp4[i,j]))) imp.all[i,j] <- 4.5 + } + } + + if(p<=3) yMod <- 0.7 + if(p>=4 && p<=6) yMod <- 0.5 + if(p>=7 && p<=9) yMod <- 0.3 + if(p>=10) yMod <- 0.1 + if(p==13 || p==14) yMod <- 0.5 + if(p==15 || p==16) yMod <- 0.1 + + if(p==1 || p==4 || p==7 || p==10) xMod <- 0.05 + if(p==2 || p==5 || p==8 || p==11) xMod <- 0.35 + if(p==3 || p==6 || p==9 || p==12) xMod <- 0.65 + if(p==13 || p==15) xMod <- 0.05 + if(p==14 || p==16) xMod <- 0.50 + + # impact map: + if(p <= 12) par(fig=c(xMod, xMod + 0.3, yMod, yMod + 0.17), new=TRUE) + if(p > 12) par(fig=c(xMod, xMod + 0.45, yMod, yMod + 0.38), new=TRUE) + PlotEquiMap(imp.all[,EU], lon[EU], lat, filled.continents = FALSE, brks=1:5, cols=col.regimes, axelab=F, drawleg=F) + + # title map: + if(p <= 12) par(fig=c(xMod, xMod + 0.3, yMod + 0.162, yMod + 0.172), new=TRUE) + if(p > 12) par(fig=c(xMod + 0.07, xMod + 0.07 + 0.3, yMod +0.21 + 0.162, yMod +0.21 + 0.172), new=TRUE) + mtext(my.period[p], font=2, cex=.9) + + } # close 'p' on 'period' + + par(fig=c(0.1,0.9, 0, 0.12), new=TRUE) + ColorBar2(1:5, cols=col.regimes, vert=FALSE, my.ticks=1:4, draw.ticks=FALSE, my.labels=orden) + + par(fig=c(0.1,0.9, 0.9, 0.95), new=TRUE) + mtext(paste0("Most influent WR on ",var.name[var.num]), font=2, cex=1.5) + + dev.off() +} + + + + + + + + + + + + + + + + + + diff --git a/old/weather_regimes_maps_v8.R b/old/weather_regimes_maps_v8.R new file mode 100644 index 0000000000000000000000000000000000000000..6f2e304e566d96f5a5155e432377fa706bf16751 --- /dev/null +++ b/old/weather_regimes_maps_v8.R @@ -0,0 +1,861 @@ + +# Creation: 6/2016 +# Author: Nicola Cortesi +# +# Aim: to visualize the regime anomalies of the weather regimes, their interannual frequencies and their impact on a chosen cliamte variable (i.e: wind speed or temperature) +# +# I/O: the only input file needed is the output of the weather_regimes.R script, which ends with the suffix "_mapdata.RData". +# Output files are the maps, with the user can generate as .png or as .pdf +# +# Assumptions: If your regimes derive from a reanalysis, this script must be run twice: once, with the option 'ordering = F' and 'save.names = T' to create a .pdf or .png +# file that the user must open to find visually the order by which the four regimes are stored inside the four clusters. For example, he can find that the +# sequence of the images in the file (from top to down) corresponds to: Blocking / NAO- / Atl.Ridge / NAO+ regime. Subsequently, he has to change 'ordering' +# from F to T and set the four variables 'clusterX.name' (X=1..4) to follow the same order found inside that file. +# The second time, you have to run this script only to get the maps in the right order, which by default is, from top to down: +# "NAO+","NAO-","Blocking" and "Atl.Ridge" (you can change it with the variable 'orden'). You also have to set 'save.names' to TRUE, so an .RData file +# is written to store the order of the four regimes, in case in future a user needs to visualize these maps again. In this case, the user must set +# 'ordering = TRUE' and 'save.names = FALSE' to be able to visualize the maps in the correct order. +# +# If your regimes derive from a forecast system, you have to compute before the regimes derived from a reanalysis. Then, you can associate the reanalysis +# to the forecast system, to employ it to automatically associate to each simulated cluster one of the +# observed regimes, the one with the highest spatial correlation. Beware that the two maps of regime anomalies must have the same spatial resolution! +# +# Branch: weather_regimes + +library(s2dverification) # for the function Load() +library(Kendall) # for the MannKendall test + +source('/home/Earth/ncortesi/Downloads/scripts/Rfunctions.R') # for the calendar functions +workdir <- "/scratch/Earth/ncortesi/RESILIENCE/Regimes" #/41_ERA-Interim_monthly_1981-2015_LOESS_filter" # working dir with the input files with '_psl.RData' suffix: + +rean.name <- "ERA-Interim" # reanalysis name (if input data comes from a reanalysis) +forecast.name <- "ECMWF-S4" # forecast system name (if input data comes from a forecast system) + +fields.name <- forecast.name # Choose between loading reanalysis ('rean.name') or forecasts ('forecast.name') + +var.num <- 1:12 # if fields.name ='rean.name', Choose a variable for the impact maps 1: sfcWind 2: tas + +period <- 1 # Choose one or more periods to plot from 1 to 17 (1-12: Jan - Dec, 13-16: Winter, Spring, Summer, Autumn, 17: Year). + # You need to have created before the '_mapdata.RData' file with the output of 'weather_regimes'.R for that period. +lead.month <- 0 # in case you selected 'fields.name = forecast.name', specify a leadtime (0 = same month of the start month) to plot + # (and variable 'period' becomes the start month) + +# run it twice: once, with 'ordering <- FALSE' and 'save.names <- TRUE' to look only at the cartography and find visually the right regime names of the four clusters; then, +# insert the regime names in the correct order below and run this script a the second time with 'ordering = TRUE' and 'save.names = TRUE', to save the ordered cartography. +# after that, any time you need to run this script, run it with 'ordering = TRUE and 'save.names = FALSE', not to overwrite the file which stores the cluster order: +ordering <- T # If TRUE, order the clusters following the regimes listed in the 'orden' vector (set it to TRUE only after you manually inserted the names below). +save.names <- F # if TRUE, also save the cluster names (i.e: the association between clusters and weather regimes); if FALSE, the cluster names are loaded from a file + +as.pdf <- F # FALSE: save results as one .png file for each season/month, TRUE: save only 1 .pdf file with all seasons/months + +# if fields.name='forecast.name', set the subdir of 'workdir' where the weather regimes computed with a reanalysis are stored: +rean.dir <- "41_ERA-Interim_monthly_1981-2015_LOESS_filter" + +# Associates the regimes to the four cluster: +cluster1.name <- "NAO+" +cluster2.name <- "NAO-" +cluster4.name <- "Blocking" +cluster3.name <- "Atl.Ridge" + +# choose an order to give to the cartograpy, from top to bottom: +orden <- c("NAO+","NAO-","Blocking","Atl.Ridge") + +################################################################################################################################################################### + +if(fields.name != rean.name && fields.name != forecast.name) stop("variable 'fields.name' is not properly set") +regime1.name <- orden[1] +regime2.name <- orden[2] +regime3.name <- orden[3] +regime4.name <- orden[4] + +var.name <- c("sfcWind","tas") +var.name.full <- c("Wind Speed","Temperature") # full name of the 'predictand' variable to put in the title of the graphs +var.unit <- c("m/s", "ºC") # unit of measure of var (for drawing color scales) +var.num.orig <- var.num # loading _psl files, this value can change! +fields.name.orig <- fields.name +period.orig <- period; workdir.orig <- workdir +pdf.suffix <- ifelse(length(period)==1, my.period[period], "all_periods") + +if(as.pdf && fields.name == rean.name)(pdf(file=paste0(workdir,"/",fields.name,"_",var.name[var.num],"_",pdf.suffix,".pdf"),width=40, height=60)) +if(as.pdf && fields.name == forecast.name)(pdf(file=paste0(workdir,"/",fields.name,"_",var.name[var.num],"_",pdf.suffix,"_leadmonth_",lead.month,".pdf"),width=40,height=60)) + +for(p in period){ + #p <- 1 # for the debug + p.orig <- p + + # load regime data (for forecasts, it load only the data corresponding to the chosen start month p and lead month 'lead.month':: + if(fields.name == rean.name) { + load(file=paste0(workdir,"/",fields.name,"_",my.period[p],"_","psl",".RData")) + if(var.num != var.num.orig) var.num <- var.num.orig # ripristinate original values of this script (necessary after loading regime data) + if(fields.name != fields.name.orig) fields.name <- fields.name.orig # ripristinate original values of this script + if(workdir != workdir.orig) workdir <- workdir.orig # ripristinate original values of this script + + load(file=paste0(workdir,"/",fields.name,"_",my.period[p],"_",var.name[var.num],".RData")) + } + if(fields.name == forecast.name){ + load(file=paste0(workdir,"/",fields.name,"_",my.period[p],"_leadmonth_",lead.month,"_","psl",".RData")) # load cluster time series and mean psl data + #load(file=paste0(workdir,"/",fields.name,"_",my.period[p],"_leadmonth_",lead.month,"_",var.name[var.num],".RData")) # load mean var data + } + + if(var.num != var.num.orig) var.num <- var.num.orig # ripristinate original values of this script (necessary after loading regime data) + if(fields.name != fields.name.orig) fields.name <- fields.name.orig # ripristinate original values of this script + if(p != p.orig) p <- p.orig + + if(fields.name == forecast.name){ + + # compute cluster persistence: + my.cluster.vector <- as.vector(my.cluster.array[[1]]) # reduce it to a vector with the order: day1month1member1 , day2month1member1, ... , day1month2member1, day2month2member1, ,,, + + sequ1 <- sequ2 <- sequ3 <- sequ4 <- rep(0,10000) + i1 <- i2 <- i3 <- i4 <- 1 + + if(my.cluster.vector[1] != 1) i1 <- 0 + if(my.cluster.vector[1] == 1) sequ1[i1] <- sequ1[i1]+1 + if(my.cluster.vector[1] != 2) i2 <- 0 + if(my.cluster.vector[1] == 2) sequ2[i2] <- sequ2[i2]+1 + if(my.cluster.vector[1] != 3) i3 <- 0 + if(my.cluster.vector[1] == 3) sequ3[i3] <- sequ3[i3]+1 + if(my.cluster.vector[1] != 4) i4 <- 0 + if(my.cluster.vector[1] == 4) sequ4[i4] <- sequ4[i4]+1 + n.dd <- length(my.cluster.vector) + for(d in 1:(n.dd-1)){ + if(my.cluster.vector[d] != 1 && my.cluster.vector[d+1] == 1) i1 <- i1+1 + if(my.cluster.vector[d] == 1) sequ1[i1] <- sequ1[i1]+1 + + if(my.cluster.vector[d] != 2 && my.cluster.vector[d+1] == 2) i2 <- i2+1 + if(my.cluster.vector[d] == 2) sequ2[i2] <- sequ2[i2]+1 + + if(my.cluster.vector[d] != 3 && my.cluster.vector[d+1] == 3) i3 <- i3+1 + if(my.cluster.vector[d] == 3) sequ3[i3] <- sequ3[i3]+1 + + if(my.cluster.vector[d] != 4 && my.cluster.vector[d+1] == 4) i4 <- i4+1 + if(my.cluster.vector[d] == 4) sequ4[i4] <- sequ4[i4]+1 + } + + if(my.cluster.vector[n.dd] == 1) sequ1[i1] <- sequ1[i1]+1 + if(my.cluster.vector[n.dd] == 2) sequ2[i2] <- sequ2[i2]+1 + if(my.cluster.vector[n.dd] == 3) sequ3[i3] <- sequ3[i3]+1 + if(my.cluster.vector[n.dd] == 4) sequ4[i4] <- sequ4[i4]+1 + + pers1 <- mean(sequ1[which(sequ1 != 0)]) + pers2 <- mean(sequ2[which(sequ2 != 0)]) + pers3 <- mean(sequ3[which(sequ3 != 0)]) + pers4 <- mean(sequ4[which(sequ4 != 0)]) + + # compute correlations between simulated clusters and observed regime's anomalies: + cluster1.sim <- pslwr1mean; cluster2.sim <- pslwr2mean; cluster3.sim <- pslwr3mean; cluster4.sim <- pslwr4mean + lat.forecast <- lat; lon.forecast <- lon + + if((p + lead.month) > 12) { load.month <- p + lead.month - 12 } else { load.month <- p + lead.month } # reanalysis forecast month to load + load(file=paste0(workdir,"/",rean.dir,"/",rean.name,"_",my.period[load.month],"_","psl",".RData")) # load reanalysis data, which overwrities the pslwrXmean variables + load(paste0(workdir,"/",rean.dir,"/",rean.name,"_", my.period[load.month],"_","ClusterNames",".RData")) # load also reanalysis regime names + + wr1y.obs <- wr1y; wr2y.obs <- wr2y; wr3y.obs <- wr3y; wr4y.obs <- wr4y # observed frecuencies of the regimes + + regimes.obs <- c(cluster1.name, cluster2.name, cluster3.name, cluster4.name) + + if(length(unique(regimes.obs)) < 4) stop("Two or more names of the weather types in the reanalsyis input file are repeated!") + + if(identical(rev(lat),lat.forecast)) {lat.forecast <- rev(lat.forecast); print("Warning: forecast latitudes are in the opposite order than renalysis's")} + if(!identical(lat, lat.forecast)) stop("forecast latitudes are different from reanalysis latitudes!") + if(!identical(lon, lon.forecast)) stop("forecast longitude are different from reanalysis longitudes!") + + cluster.corr <- array(NA, c(4,4), dimnames=list(c("cluster1.sim","cluster2.sim","cluster3.sim","cluster4.sim"), regimes.obs)) + cluster.corr[1,1] <- cor(as.vector(cluster1.sim), as.vector(pslwr1mean)) + cluster.corr[1,2] <- cor(as.vector(cluster1.sim), as.vector(pslwr2mean)) + cluster.corr[1,3] <- cor(as.vector(cluster1.sim), as.vector(pslwr3mean)) + cluster.corr[1,4] <- cor(as.vector(cluster1.sim), as.vector(pslwr4mean)) + cluster.corr[2,1] <- cor(as.vector(cluster2.sim), as.vector(pslwr1mean)) + cluster.corr[2,2] <- cor(as.vector(cluster2.sim), as.vector(pslwr2mean)) + cluster.corr[2,3] <- cor(as.vector(cluster2.sim), as.vector(pslwr3mean)) + cluster.corr[2,4] <- cor(as.vector(cluster2.sim), as.vector(pslwr4mean)) + cluster.corr[3,1] <- cor(as.vector(cluster3.sim), as.vector(pslwr1mean)) + cluster.corr[3,2] <- cor(as.vector(cluster3.sim), as.vector(pslwr2mean)) + cluster.corr[3,3] <- cor(as.vector(cluster3.sim), as.vector(pslwr3mean)) + cluster.corr[3,4] <- cor(as.vector(cluster3.sim), as.vector(pslwr4mean)) + cluster.corr[4,1] <- cor(as.vector(cluster4.sim), as.vector(pslwr1mean)) + cluster.corr[4,2] <- cor(as.vector(cluster4.sim), as.vector(pslwr2mean)) + cluster.corr[4,3] <- cor(as.vector(cluster4.sim), as.vector(pslwr3mean)) + cluster.corr[4,4] <- cor(as.vector(cluster4.sim), as.vector(pslwr4mean)) + + # associate each simulated cluster to one observed regime: + max1 <- which(cluster.corr == max(cluster.corr), arr.ind=T) + cluster.corr2 <- cluster.corr + cluster.corr2[max1[1],] <- -999 # set this row to negative values so it won't be found as a maximum anymore + cluster.corr2[,max1[2]] <- -999 # set this column to negative values so it won't be found as a maximum anymore + max2 <- which(cluster.corr2 == max(cluster.corr2), arr.ind=T) + cluster.corr3 <- cluster.corr2 + cluster.corr3[max2[1],] <- -999 + cluster.corr3[,max2[2]] <- -999 + max3 <- which(cluster.corr3 == max(cluster.corr3), arr.ind=T) + cluster.corr4 <- cluster.corr3 + cluster.corr4[max3[1],] <- -999 + cluster.corr4[,max3[2]] <- -999 + max4 <- which(cluster.corr4 == max(cluster.corr4), arr.ind=T) + + seq.max1 <- c(max1[1],max2[1],max3[1],max4[1]) + seq.max2 <- c(max1[2],max2[2],max3[2],max4[2]) + + cluster1.regime <- seq.max2[which(seq.max1 == 1)] + cluster2.regime <- seq.max2[which(seq.max1 == 2)] + cluster3.regime <- seq.max2[which(seq.max1 == 3)] + cluster4.regime <- seq.max2[which(seq.max1 == 4)] + + cluster1.name <- regimes.obs[cluster1.regime] + cluster2.name <- regimes.obs[cluster2.regime] + cluster3.name <- regimes.obs[cluster3.regime] + cluster4.name <- regimes.obs[cluster4.regime] + + # insert here all the manual corrections to the regime assignation due to misasignment in the automatic selection: + #if(p= && lead.month= ) clusterX.name=... + + if(var.num != var.num.orig) var.num <- var.num.orig # ripristinate original values of this script + if(fields.name != fields.name.orig) fields.name <- fields.name.orig # ripristinate original values of this script + if(!identical(period.orig,period)) period <- period.orig + if(p.orig != p) p <- p.orig + + load(file=paste0(workdir,"/",fields.name,"_",my.period[p],"_leadmonth_",lead.month,"_","psl",".RData")) # reload forecast cluster time series and mean psl data + #load(file=paste0(workdir,"/",fields.name,"_",my.period[p],"_leadmonth_",lead.month,"_ClusterData.RData")) # reload forecast cluster data (for my.cluster.array) + + if(var.num != var.num.orig) var.num <- var.num.orig # ripristinate original values of this script + if(fields.name != fields.name.orig) fields.name <- fields.name.orig # ripristinate original values of this script + if(!identical(period.orig, period)) period <- period.orig + if(p.orig != p) p <- p.orig + if(identical(rev(lat),lat.forecast)) lat <- rev(lat) # ripristinate right lat order + + } # close for on 'forecast.name' + + if(fields.name == rean.name){ + if(save.names){ + save(cluster1.name, cluster2.name, cluster3.name, cluster4.name, file=paste0(workdir,"/",fields.name,"_", my.period[p],"_","ClusterNames",".RData")) + + } else { # in this case, we load the cluster names from the file already saved, if regimes come from a reanalysis: + load(paste0(workdir,"/",fields.name,"_", my.period[p],"_","ClusterNames",".RData")) + + if(var.num != var.num.orig) var.num <- var.num.orig # ripristinate original values of this script + if(fields.name != fields.name.orig) fields.name <- fields.name.orig # ripristinate original values of this script + } + } + + # assign to each cluster its regime: + if(ordering == FALSE && fields.name == rean.name){ + cluster1 <- 1; cluster2 <- 2; cluster3 <- 3; cluster4 <- 4 + } else { + cluster1 <- which(orden == cluster1.name) + cluster2 <- which(orden == cluster2.name) + cluster3 <- which(orden == cluster3.name) + cluster4 <- which(orden == cluster4.name) + } + + assign(paste0("map",cluster1), pslwr1mean) + assign(paste0("map",cluster2), pslwr2mean) + assign(paste0("map",cluster3), pslwr3mean) + assign(paste0("map",cluster4), pslwr4mean) + + assign(paste0("fre",cluster1), wr1y) + assign(paste0("fre",cluster2), wr2y) + assign(paste0("fre",cluster3), wr3y) + assign(paste0("fre",cluster4), wr4y) + + if(period==100){ + assign(paste0("imp",cluster1), varwr1mean) + assign(paste0("imp",cluster2), varwr2mean) + assign(paste0("imp",cluster3), varwr3mean) + assign(paste0("imp",cluster4), varwr4mean) + + assign(paste0("sig",cluster1), pvalue1) + assign(paste0("sig",cluster2), pvalue2) + assign(paste0("sig",cluster3), pvalue3) + assign(paste0("sig",cluster4), pvalue4) + } + + if(fields.name == forecast.name){ + assign(paste0("freMax",cluster1), wr1yMax) + assign(paste0("freMax",cluster2), wr2yMax) + assign(paste0("freMax",cluster3), wr3yMax) + assign(paste0("freMax",cluster4), wr4yMax) + + assign(paste0("freMin",cluster1), wr1yMin) + assign(paste0("freMin",cluster2), wr2yMin) + assign(paste0("freMin",cluster3), wr3yMin) + assign(paste0("freMin",cluster4), wr4yMin) + + cluster1.Obs <- which(orden == regimes.obs[1]) + cluster2.Obs <- which(orden == regimes.obs[2]) + cluster3.Obs <- which(orden == regimes.obs[3]) + cluster4.Obs <- which(orden == regimes.obs[4]) + + assign(paste0("freObs",cluster1), wr1y.obs) + assign(paste0("freObs",cluster2), wr2y.obs) + assign(paste0("freObs",cluster3), wr3y.obs) + assign(paste0("freObs",cluster4), wr4y.obs) + + assign(paste0("persist",cluster1), pers1) + assign(paste0("persist",cluster2), pers2) + assign(paste0("persist",cluster3), pers3) + assign(paste0("persist",cluster4), pers4) + + #assign(paste0("persistObs",cluster1), persObs1) + #assign(paste0("persistObs",cluster2), persObs2) + #assign(paste0("persistObs",cluster3), persObs3) + #assign(paste0("persistObs",cluster4), persObs4) + + } + + + # position of long values of Europe only (without the Atlantic Sea and America): + #if(fields.name == "ERA-Interim") EU <- c(1:which(lon >= lon.max)[1], (length(lon)-30):length(lon)) # restrict area to continental europe only + EU <- c(1:which(lon >= lon.max)[1], (which(lon >= 337)[1]:length(lon))) # restrict area to continental europe only + + # breaks and colors of the geopotential fields: + if(psl == "g500"){ + #my.brks <- c(-300,-199:200,300) # % Mean anomaly of a WR + my.brks <- c(-300,seq(-200,200,10),300) # % Mean anomaly of a WR + my.brks2 <- c(-300,seq(-200,200,10),300) # % Mean anomaly of a WR for the contour lines + #my.cols <- colorRampPalette((brewer.pal(9,"PuBu")))(length(my.brks)-1) + values.to.plot <- c(-200, -100, 0, 100, 200) # values we want to show in the labels + } + + if(psl == "psl"){ + my.brks <- c(seq(-19,-1,2),0,seq(1,19,2)) # % Mean anomaly of a WR + # my.brks <- c(seq(-19,-1,2),0,seq(1,19,2)) # % Mean anomaly of a WR + + my.brks2 <- my.brks #c(seq(-20,20,2)) # % Mean anomaly of a WR for the contour lines + values.to.plot <- my.brks #c(-17,-15,-13,-11,-9,-7,-5,-3,-1,0,1,3,5,7,9,11,13,15,17) + } + + my.cols <- colorRampPalette(rev(brewer.pal(11,"RdBu")))(length(my.brks)-1) # blue--white--red colors + my.cols[floor(length(my.cols)/2)] <- my.cols[floor(length(my.cols)/2)+1] <- "white" + + # breaks and colors of the impact maps (valid both for Wind speed anomalies and Temperature anomalies): + #my.brks.var <- c(-20,seq(-10,-3,1),seq(-2.9,2.9,0.1),seq(3,10,1),20) # % Mean anomaly of a WR + #my.brks.var <- c(-20,-2,-1.5,-1,-0.5,-0.2,0.2,0.5,1,1.5,2,20) # % Mean anomaly of a WR + #my.brks.var <- c(-20,seq(-3,3,0.5),20) # % Mean anomaly of a WR + if(var.name[var.num] == "sfcWind") { + my.brks.var <- seq(-3,3,0.5) + values.to.plot2 <- c(-3,-2,-1,0,1,2,3) + } + if(var.name[var.num] == "tas") { + my.brks.var <- seq(-5,5,1) + values.to.plot2 <- my.brks.var #c(-5,-3,-1,0,1,3,5) + } + + #my.cols <- colorRampPalette(as.character(read.csv("/home/Earth/vtorralb/rgbhex.csv",header=F)[,1]))(length(my.brks)-1) # blue--yellow-red colors + my.cols.var <- colorRampPalette(rev(brewer.pal(11,"RdBu")))(length(my.brks.var)-1) # blue--white--red colors + #my.cols.var[floor(length(my.cols.var)/2)] <- my.cols.var[floor(length(my.cols.var)/2)+1] <- "white" + + # Visualize the composition with the 3 graphs for all the 4 regimes: its pressure fields, its impact on var and its frequency series: + if(!as.pdf && fields.name==rean.name) png(filename=paste0(workdir,"/",fields.name,"_",my.period[p],"_",var.name[var.num],".png"),width=3000,height=3700) + if(!as.pdf && fields.name==forecast.name) png(filename=paste0(workdir,"/",fields.name,"_",my.period[p],"_leadmonth_",lead.month,"_",var.name[var.num],".png"),width=3000,height=3700) + + plot.new() + + # Sheet title: + par(fig=c(0, 1, 0.94, 0.98), new=TRUE) + + if(fields.name == forecast.name) {forecast.title <- paste0(" startdate and ",lead.month," forecast month ")} else {forecast.title <- ""} + mtext(paste0(fields.name, " Weather Regimes for ", my.period[p], forecast.title," (",year.start,"-",year.end,")"), cex=5.5, font=2) + + # Centroid maps: + map.xpos <- 0 + map.width <- 0.46 + par(fig=c(map.xpos + 0.003, map.xpos + map.width - 0.003, 0.745, 0.925), new=TRUE) + PlotEquiMap2(rescale(map1,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map1), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, xlabel.dist=1.5, contours.lty="F1FF1F") + par(fig=c(map.xpos, map.xpos + map.width, 0.51, 0.69), new=TRUE) + PlotEquiMap2(rescale(map2,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map2), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F") + par(fig=c(map.xpos, map.xpos + map.width, 0.275, 0.455), new=TRUE) + PlotEquiMap2(rescale(map3,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map3), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F") + par(fig=c(map.xpos, map.xpos + map.width, 0.04, 0.22), new=TRUE) + PlotEquiMap2(rescale(map4,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map4), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F") + + # Subtitle centroid maps: + map.title.xpos <- 0.76 + map.title.width <- 0.2 + par(fig=c(map.title.xpos, map.title.xpos + map.title.width, 0.728, 0.729), new=TRUE) + mtext("year", cex=3) + par(fig=c(map.title.xpos, map.title.xpos + map.title.width, 0.494, 0.495), new=TRUE) + mtext("year", cex=3) + par(fig=c(map.title.xpos, map.title.xpos + map.title.width, 0.260, 0.261), new=TRUE) + mtext("year", cex=3) + par(fig=c(map.title.xpos, map.title.xpos + map.title.width, 0.026, 0.027), new=TRUE) + mtext("year", cex=3) + + # Title Centroid Maps: + title1.xpos <- 0.02 + title1.width <- 0.4 + par(fig=c(title1.xpos, title1.xpos + title1.width, 0.9305, 0.9355), new=TRUE) + mtext(paste0(regime1.name,": ", psl.name, " Anomaly"), font=2, cex=4) + par(fig=c(title1.xpos, title1.xpos + title1.width, 0.6955, 0.7005), new=TRUE) + mtext(paste0(regime2.name,": ", psl.name, " Anomaly"), font=2, cex=4) + par(fig=c(title1.xpos, title1.xpos + title1.width, 0.4605, 0.4655), new=TRUE) + mtext(paste0(regime3.name,": ", psl.name, " Anomaly"), font=2, cex=4) + par(fig=c(title1.xpos, title1.xpos + title1.width, 0.2255, 0.2305), new=TRUE) + mtext(paste0(regime4.name,": ", psl.name, " Anomaly"), font=2, cex=4) + + # Impact maps: + if(period==100){ + impact.xpos <- 0.47 + impact.width <- 0.26 + par(fig=c(impact.xpos, impact.xpos + impact.width, 0.745, 0.925), new=TRUE) + PlotEquiMap2(rescale(imp1[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=t(sig1[,EU] < 0.05), cex.lab=1.5) + par(fig=c(impact.xpos, impact.xpos + impact.width, 0.51, 0.69), new=TRUE) + PlotEquiMap2(rescale(imp2[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=t(sig2[,EU] < 0.05), cex.lab=1.5) + par(fig=c(impact.xpos, impact.xpos + impact.width, 0.275, 0.455), new=TRUE) + PlotEquiMap2(rescale(imp3[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=t(sig3[,EU] < 0.05), cex.lab=1.5) + par(fig=c(impact.xpos, impact.xpos + impact.width, 0.04, 0.22), new=TRUE) + PlotEquiMap2(rescale(imp4[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=t(sig4[,EU] < 0.05), cex.lab=1.5) + + # Title impact maps: + title2.xpos <- 0.42 + title2.width <- 0.35 + par(fig=c(title2.xpos, title2.xpos + title2.width, 0.935, 0.940), new=TRUE) + mtext(paste0(regime1.name, ": Impact on ", var.name.full[var.num]), font=2, cex=4) + par(fig=c(title2.xpos, title2.xpos + title2.width, 0.700, 0.705), new=TRUE) + mtext(paste0(regime2.name, ": Impact on ", var.name.full[var.num]), font=2, cex=4) + par(fig=c(title2.xpos, title2.xpos + title2.width, 0.465, 0.470), new=TRUE) + mtext(paste0(regime3.name, ": Impact on ", var.name.full[var.num]), font=2, cex=4) + par(fig=c(title2.xpos, title2.xpos + title2.width, 0.230, 0.235), new=TRUE) + mtext(paste0(regime4.name, ": Impact on ", var.name.full[var.num]), font=2, cex=4) + } + + # Frequency plots: + barplot.xpos <- 0.75 + barplot.width <- 0.25 + freq.max=100 + + if(fields.name == forecast.name){ + pos.years.present <- match(year.start:year.end, years) + years.missing <- which(is.na(pos.years.present)) + fre1.NA <- fre2.NA <- fre3.NA <- fre4.NA <- freMax1.NA <- freMax2.NA <- freMax3.NA <- freMax4.NA <- freMin1.NA <- freMin2.NA <- freMin3.NA <- freMin4.NA <- c() + + k <- 0 + for(y in year.start:year.end) { + if(y %in% (years.missing + year.start - 1)) { + fre1.NA <- c(fre1.NA,NA); fre2.NA <- c(fre2.NA,NA); fre3.NA <- c(fre3.NA,NA); fre4.NA <- c(fre4.NA,NA) + freMax1.NA <- c(freMax1.NA,NA); freMax2.NA <- c(freMax2.NA,NA); freMax3.NA <- c(freMax3.NA,NA); freMax4.NA <- c(freMax4.NA,NA) + freMin1.NA <- c(freMin1.NA,NA); freMin2.NA <- c(freMin2.NA,NA); freMin3.NA <- c(freMin3.NA,NA); freMin4.NA <- c(freMin4.NA,NA) + k=k+1 + } else { + fre1.NA <- c(fre1.NA,fre1[y - year.start + 1 - k]); fre2.NA <- c(fre2.NA,fre2[y - year.start + 1 - k]) + fre3.NA <- c(fre3.NA,fre3[y - year.start + 1 - k]); fre4.NA <- c(fre4.NA,fre4[y - year.start + 1 - k]) + freMax1.NA <- c(freMax1.NA,freMax1[y - year.start + 1 - k]); freMax2.NA <- c(freMax2.NA,freMax2[y - year.start + 1 - k]) + freMax3.NA <- c(freMax3.NA,freMax3[y - year.start + 1 - k]); freMax4.NA <- c(freMax4.NA,freMax4[y - year.start + 1 - k]) + freMin1.NA <- c(freMin1.NA,freMin1[y - year.start + 1 - k]); freMin2.NA <- c(freMin2.NA,freMin2[y - year.start + 1 - k]) + freMin3.NA <- c(freMin3.NA,freMin3[y - year.start + 1 - k]); freMin4.NA <- c(freMin4.NA,freMin4[y - year.start + 1 - k]) + } + } + } else { fre1.NA <- fre1; fre2.NA <- fre2; fre3.NA <- fre3; fre4.NA <- fre4 } + + par(fig=c(barplot.xpos, barplot.xpos + barplot.width, 0.745, 0.915), new=TRUE) + if(fields.name == rean.name) barplot.freq(100*fre1.NA, year.start, year.end, cex.y=2, cex.x=2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0)) + if(fields.name == forecast.name) barplot.freq.sim2(100*fre1.NA, 100*freMax1.NA, 100*freMin1.NA, 100*freObs1, year.start, year.end, cex.y=2, cex.x=2, freq.min=-2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0), col.line="gray20", cex.r=3, cex.mean=2, cex.obs=2) + + par(fig=c(barplot.xpos, barplot.xpos + barplot.width,0.510,0.680), new=TRUE) + if(fields.name == rean.name) barplot.freq(100*fre2.NA, year.start, year.end, cex.y=2, cex.x=2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0)) + if(fields.name == forecast.name) barplot.freq.sim2(100*fre2.NA, 100*freMax2.NA, 100*freMin2.NA, 100*freObs2, year.start, year.end, cex.y=2, cex.x=2, freq.min=-2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0), col.line="gray20", cex.r=3, cex.mean=2, cex.obs=2) + + par(fig=c(barplot.xpos, barplot.xpos + barplot.width,0.275,0.445), new=TRUE) + if(fields.name == rean.name) barplot.freq(100*fre3.NA, year.start, year.end, cex.y=2, cex.x=2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0)) + if(fields.name == forecast.name) barplot.freq.sim2(100*fre3.NA, 100*freMax3.NA, 100*freMin3.NA, 100*freObs3, year.start, year.end, cex.y=2, cex.x=2, freq.min=-2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0), col.line="gray20", cex.r=3, cex.mean=2, cex.obs=2) + + par(fig=c(barplot.xpos, barplot.xpos + barplot.width,0.040,0.210), new=TRUE) + if(fields.name == rean.name) barplot.freq(100*fre4.NA, year.start, year.end, cex.y=2, cex.x=2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0)) + if(fields.name == forecast.name) barplot.freq.sim2(100*fre4.NA, 100*freMax4.NA, 100*freMin4.NA, 100*freObs4, year.start, year.end, cex.y=2, cex.x=2, freq.min=-2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0), col.line="gray20", cex.r=3, cex.mean=2, cex.obs=2) + + # Title Frequency maps: + title3.xpos <- 0.75 + title3.width <-0.25 + par(fig=c(title3.xpos, title3.xpos + title3.width, 0.935, 0.940), new=TRUE) + mtext(paste0(regime1.name, " Frequency"), font=2, cex=4) # paste0 doesn't work inside an expression! + par(fig=c(title3.xpos, title3.xpos + title3.width, 0.700, 0.705), new=TRUE) + mtext(paste0(regime2.name, " Frequency"), font=2, cex=4) + par(fig=c(title3.xpos, title3.xpos + title3.width, 0.465, 0.470), new=TRUE) + mtext(paste0(regime3.name, " Frequency"), font=2, cex=4) + par(fig=c(title3.xpos, title3.xpos + title3.width, 0.230, 0.235), new=TRUE) + mtext(paste0(regime4.name, " Frequency"), font=2, cex=4) + + # % on Frequency plots: + symbol.xpos <- 0.735 + symbol.width <- 0.01 + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.82, 0.83), new=TRUE) + mtext("%", cex=3.3) + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.59, 0.60), new=TRUE) + mtext("%", cex=3.3) + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.35, 0.36), new=TRUE) + mtext("%", cex=3.3) + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.12, 0.13), new=TRUE) + mtext("%", cex=3.3) + + # mean frequency on Frequency plots: + symbol.xpos <- 0.9 + symbol.width <- 0.1 + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.908, 0.918), new=TRUE) + mtext(bquote(bar(nu) == .(paste0(round(mean(100*fre1.NA),1),"%"))),cex=4.5) + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.673, 0.683), new=TRUE) + mtext(bquote(bar(nu) == .(paste0(round(mean(100*fre2.NA),1),"%"))),cex=4.5) + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.44, 0.45), new=TRUE) + mtext(bquote(bar(nu) == .(paste0(round(mean(100*fre3.NA),1),"%"))),cex=4.5) + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.203, 0.213), new=TRUE) + mtext(bquote(bar(nu) == .(paste0(round(mean(100*fre4.NA),1),"%"))),cex=4.5) + + # add corr between obs.time series and ensemble mean time series on Frequency plots: + if(fields.name == forecast.name){ + symbol.xpos <- 0.76 + symbol.width <- 0.1 + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.908, 0.918), new=TRUE) + sp.cor1 <- round(cor(fre1.NA,freObs1, use="complete.obs"),2) + mtext(paste0("r= ",sp.cor1), cex=4.5) + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.673, 0.683), new=TRUE) + sp.cor2 <- round(cor(fre2.NA,freObs2, use="complete.obs"),2) + mtext(paste0("r= ",sp.cor2), cex=4.5) + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.44, 0.45), new=TRUE) + sp.cor3 <- round(cor(fre3.NA,freObs3, use="complete.obs"),2) + mtext(paste0("r= ",sp.cor3), cex=4.5) + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.203, 0.213), new=TRUE) + sp.cor4 <- round(cor(fre4.NA,freObs4, use="complete.obs"),2) + mtext(paste0("r= ",sp.cor4), cex=4.5) + } + + # Legend 1: + legend1.xpos <- 0.01 + legend1.width <- 0.44 + legend1.cex <- 1.8 + my.subset <- match(values.to.plot, my.brks) + par(fig=c(legend1.xpos, legend1.xpos + legend1.width, 0.719, 0.744), new=TRUE) # syntax fig=c(xmin, xmax, ymin, ymax) + ColorBar3(my.brks, cols=my.cols, vert=FALSE, cex=legend1.cex, subset=my.subset) + if(psl=="g500") {mtext(side=4," m", cex=2.5)} else {mtext(side=4," hPa", cex=legend1.cex)} + par(fig=c(legend1.xpos, legend1.xpos + legend1.width, 0.485, 0.508), new=TRUE) + ColorBar3(my.brks, cols=my.cols, vert=FALSE, cex=legend1.cex, subset=my.subset) + if(psl=="g500") {mtext(side=4," m", cex=2.5)} else {mtext(side=4," hPa", cex=legend1.cex)} + par(fig=c(legend1.xpos, legend1.xpos + legend1.width, 0.250, 0.273), new=TRUE) + ColorBar3(my.brks, cols=my.cols, vert=FALSE, cex=legend1.cex, subset=my.subset) + if(psl=="g500") {mtext(side=4," m", cex=2.5)} else {mtext(side=4," hPa", cex=legend1.cex)} + par(fig=c(legend1.xpos, legend1.xpos + legend1.width, 0.015, 0.038), new=TRUE) + ColorBar3(my.brks, cols=my.cols, vert=FALSE, cex=legend1.cex, subset=my.subset) + if(psl=="g500") {mtext(side=4," m", cex=2.5)} else {mtext(side=4," hPa", cex=legend1.cex)} + + # Legend 2: + if(fields.name == rean.name){ + legend2.xpos <- 0.48 + legend2.width <- 0.25 + legend2.cex <- 1.8 + my.subset2 <- match(values.to.plot2, my.brks.var) + par(fig=c(legend2.xpos, legend2.xpos + legend2.width, 0.719 + 0.001, 0.744), new=TRUE) + ColorBar3(my.brks.var, cols=my.cols.var, vert=FALSE, cex=legend2.cex, subset=my.subset2) + mtext(side=4,paste0(" ",var.unit[var.num]), cex=legend2.cex) + par(fig=c(legend2.xpos, legend2.xpos + legend2.width, 0.485, 0.508), new=TRUE) + ColorBar3(my.brks.var, cols=my.cols.var, vert=FALSE, cex=legend2.cex, subset=my.subset2) + mtext(side=4,paste0(" ",var.unit[var.num]), cex=legend2.cex) + par(fig=c(legend2.xpos, legend2.xpos + legend2.width, 0.250, 0.273), new=TRUE) + ColorBar3(my.brks.var, cols=my.cols.var, vert=FALSE, cex=legend2.cex, subset=my.subset2) + mtext(side=4,paste0(" ",var.unit[var.num]), cex=legend2.cex) + par(fig=c(legend2.xpos, legend2.xpos + legend2.width, 0.015, 0.038), new=TRUE) + ColorBar3(my.brks.var, cols=my.cols.var, vert=FALSE, cex=legend2.cex, subset=my.subset2) + mtext(side=4,paste0(" ",var.unit[var.num]), cex=legend2.cex) + } + + if(!as.pdf) dev.off() # for saving 4 png + + #if(fields.name == forecast.name) save(orden, cluster1.name, cluster2.name, cluster3.name, cluster4.name, cluster.corr, fre1.NA, fre2.NA, fre3.NA, fre4.NA, freObs1, freObs2, freObs3, freObs4, sp.cor1, sp.cor2, sp.cor3, sp.cor4, file=paste0(workdir,"/",fields.name,"_", my.period[p],"_leadmonth_",lead.month,"_","ClusterNames",".RData")) + + # fre1, fre2, etc. already refer to the regimes listed in the 'orden' vector: + if(fields.name == forecast.name) save(orden, fre1.NA, fre2.NA, fre3.NA, fre4.NA, freObs1, freObs2, freObs3, freObs4, sp.cor1, sp.cor2, sp.cor3, sp.cor4, pers1, pers2, pers3, pers4, file=paste0(workdir,"/",fields.name,"_", my.period[p],"_leadmonth_",lead.month,"_","ClusterNames",".RData")) + +} # close 'p' on 'period' + +if(as.pdf) dev.off() # for saving a single pdf with all months/seasons + + + + + + + +# Summary graphs: +if(fields.name == forecast.name && period == 100){ + array.cor <- array.diff.freq <- array.rpss <- array.pers <- array(NA,c(12,7,4)) # array storing correlations in the format: [ startdate, leadmonth, regime] + + for(p in 1:12){ + p.orig <- p + + for(l in 0:6){ + + load(file=paste0(workdir,"/",fields.name,"_",my.period[p],"_leadmonth_",l,"_","ClusterNames",".RData")) # load cluster names and summarized data + + if(var.num != var.num.orig) var.num <- var.num.orig # ripristinate original values of this script (necessary after loading regime data) + if(fields.name != fields.name.orig) fields.name <- fields.name.orig # ripristinate original values of this script + if(p != p.orig) p <- p.orig + + #cluster1 <- which(orden == cluster1.name) + #cluster2 <- which(orden == cluster2.name) + #cluster3 <- which(orden == cluster3.name) + #cluster4 <- which(orden == cluster4.name) + + array.cor[p,1+l, 1] <- sp.cor1 # NAO+ + array.cor[p,1+l, 3] <- sp.cor2 # NAO- + array.cor[p,1+l, 4] <- sp.cor3 # Blocking + array.cor[p,1+l, 2] <- sp.cor4 # Atl.Ridge + + array.diff.freq[p,1+l, 1] <- 100*(mean(fre1.NA) - mean(freObs1)) # NAO+ + array.diff.freq[p,1+l, 3] <- 100*(mean(fre2.NA) - mean(freObs2)) # NAO- + array.diff.freq[p,1+l, 4] <- 100*(mean(fre3.NA) - mean(freObs3)) # Blocking + array.diff.freq[p,1+l, 2] <- 100*(mean(fre4.NA) - mean(freObs4)) # Atl.Ridge + + array.rpss[p,1+l, 1] <- # NAO+ + array.rpss[p,1+l, 3] <- # NAO- + array.rpss[p,1+l, 4] <- # Blocking + array.rpss[p,1+l, 2] <- # Atl.Ridge + } + } + + + + # Corr summary: + png(filename=paste0(workdir,"/",forecast.name,"_summary_corr.png"), width=550, height=400) + + # create an array similar to array.cor but with colors instead of corr.values: + corr.cols <- rev(c('#b2182b','#d6604d','#f4a582','#fddbc7','#d1e5f0','#92c5de','#4393c3','#2166ac')) + array.cor.colors <- array(corr.cols[8],c(12,7,4)) + array.cor.colors[array.cor < 0.75] <- corr.cols[7] + array.cor.colors[array.cor < 0.5] <- corr.cols[6] + array.cor.colors[array.cor < 0.25] <- corr.cols[5] + array.cor.colors[array.cor < 0] <- corr.cols[4] + array.cor.colors[array.cor < -0.25] <- corr.cols[3] + array.cor.colors[array.cor < -0.5] <- corr.cols[2] + array.cor.colors[array.cor < -0.75] <- corr.cols[1] + + par(mar=c(5,4,4,4.5)) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Forecast time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + title("Correlation between S4 and ERA-Interim frequencies", cex=1.5) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Forecast time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5) + + for(p in 1:12){ + for(l in 0:6){ + polygon(c(0.5+p-1,0.5+p-1,1+p-1),c(-0.5+l,0.5+l,0+l), col=array.cor.colors[p,1+l,1]) # first triangle + polygon(c(0.5+p-1,1+p-1,1.5+p-1),c(-0.5+l,0+l,-0.5+l), col=array.cor.colors[p,1+l,2]) + polygon(c(1+p-1,1.5+p-1,1.5+p-1),c(l,0.5+l,-0.5+l), col=array.cor.colors[p,1+l,3]) + polygon(c(0.5+p-1,1+p-1,1.5+p-1),c(0.5+l,0+l,0.5+l), col=array.cor.colors[p,1+l,4]) + } + } + + par(fig=c(0.875, 1, 0.14, 0.89), new=TRUE) + ColorBar2(brks = seq(-1,1,0.25), cols = corr.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(seq(-1,1,0.25)), my.labels=seq(-1,1,0.25)) + dev.off() + + + + # Delta freq. summary: + png(filename=paste0(workdir,"/",forecast.name,"_summary_diff_freq.png"), width=550, height=400) + + # create an array similar to array.diff.freq but with colors instead of frequencies: + freq.cols <- rev(c('#b2182b','#d6604d','#f4a582','#fddbc7','#d1e5f0','#92c5de','#4393c3','#2166ac')) + + array.freq.colors <- array(freq.cols[8],c(12,7,4)) + array.freq.colors[array.diff.freq < 10] <- freq.cols[7] + array.freq.colors[array.diff.freq < 5] <- freq.cols[6] + array.freq.colors[array.diff.freq < 2] <- freq.cols[5] + array.freq.colors[array.diff.freq < 0] <- freq.cols[4] + array.freq.colors[array.diff.freq < -2] <- freq.cols[3] + array.freq.colors[array.diff.freq < -5] <- freq.cols[2] + array.freq.colors[array.diff.freq < -10] <- freq.cols[1] + + par(mar=c(5,4,4,4.5)) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Forecast time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + title("Frequency difference (%) between S4 and ERA-Interim", cex=1.5) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Forecast time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5) + + for(p in 1:12){ + for(l in 0:6){ + polygon(c(0.5+p-1,0.5+p-1,1+p-1),c(-0.5+l,0.5+l,0+l), col=array.freq.colors[p,1+l,1]) # first triangle + polygon(c(0.5+p-1,1+p-1,1.5+p-1),c(-0.5+l,0+l,-0.5+l), col=array.freq.colors[p,1+l,2]) + polygon(c(1+p-1,1.5+p-1,1.5+p-1),c(l,0.5+l,-0.5+l), col=array.freq.colors[p,1+l,3]) + polygon(c(0.5+p-1,1+p-1,1.5+p-1),c(0.5+l,0+l,0.5+l), col=array.freq.colors[p,1+l,4]) + } + } + + par(fig=c(0.875, 1, 0.14, 0.89), new=TRUE) + ColorBar2(brks = c(-20,-10,-5,-2,0,2,5,10,20), cols = freq.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(c(-20,-10,-5,-2,0,2,5,10,20)), my.labels=c(-20,-10,-5,-2,0,2,5,10,20)) + dev.off() + + + + # Persistence summary: + png(filename=paste0(workdir,"/",forecast.name,"_summary_pers.png"), width=550, height=400) + + # create an array similar to array.pers but with colors instead of frequencies: + pers.cols <- rev(c('#b2182b','#d6604d','#f4a582','#fddbc7','#d1e5f0','#92c5de','#4393c3','#2166ac')) + + array.pers.colors <- array(pers.cols[8],c(12,7,4)) + array.pers.colors[array.pers < 10] <- pers.cols[7] + array.pers.colors[array.pers < 5] <- pers.cols[6] + array.pers.colors[array.pers < 2] <- pers.cols[5] + array.pers.colors[array.pers < 0] <- pers.cols[4] + array.pers.colors[array.pers < -2] <- pers.cols[3] + array.pers.colors[array.pers < -5] <- pers.cols[2] + array.pers.colors[array.pers < -10] <- pers.cols[1] + + par(mar=c(5,4,4,4.5)) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Forecast time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + title("Persistence difference (in days/month) between S4 and ERA-Interim", cex=1.5) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Forecast time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5) + + for(p in 1:12){ + for(l in 0:6){ + polygon(c(0.5+p-1,0.5+p-1,1+p-1),c(-0.5+l,0.5+l,0+l), col=array.pers.colors[p,1+l,1]) # first triangle + polygon(c(0.5+p-1,1+p-1,1.5+p-1),c(-0.5+l,0+l,-0.5+l), col=array.pers.colors[p,1+l,2]) + polygon(c(1+p-1,1.5+p-1,1.5+p-1),c(l,0.5+l,-0.5+l), col=array.pers.colors[p,1+l,3]) + polygon(c(0.5+p-1,1+p-1,1.5+p-1),c(0.5+l,0+l,0.5+l), col=array.pers.colors[p,1+l,4]) + } + } + + par(fig=c(0.875, 1, 0.14, 0.89), new=TRUE) + ColorBar2(brks = c(-20,-10,-5,-2,0,2,5,10,20), cols = pers.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(c(-20,-10,-5,-2,0,2,5,10,20)), my.labels=c(-20,-10,-5,-2,0,2,5,10,20)) + dev.off() + + + + # FairRPSS summary: + png(filename=paste0(workdir,"/",forecast.name,"_summary_rpss.png"), width=550, height=400) + + # create an array similar to array.diff.freq but with colors instead of frequencies: + freq.cols <- rev(c('#b2182b','#d6604d','#f4a582','#fddbc7','#d1e5f0','#92c5de','#4393c3','#2166ac')) + + array.freq.colors <- array(freq.cols[8],c(12,7,4)) + array.freq.colors[array.diff.freq < 10] <- freq.cols[7] + array.freq.colors[array.diff.freq < 5] <- freq.cols[6] + array.freq.colors[array.diff.freq < 2] <- freq.cols[5] + array.freq.colors[array.diff.freq < 0] <- freq.cols[4] + array.freq.colors[array.diff.freq < -2] <- freq.cols[3] + array.freq.colors[array.diff.freq < -5] <- freq.cols[2] + array.freq.colors[array.diff.freq < -10] <- freq.cols[1] + + par(mar=c(5,4,4,4.5)) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Forecast time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + title("Frequency difference (%) between S4 and ERA-Interim", cex=1.5) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Forecast time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5) + + for(p in 1:12){ + for(l in 0:6){ + polygon(c(0.5+p-1,0.5+p-1,1+p-1),c(-0.5+l,0.5+l,0+l), col=array.freq.colors[p,1+l,1]) # first triangle + polygon(c(0.5+p-1,1+p-1,1.5+p-1),c(-0.5+l,0+l,-0.5+l), col=array.freq.colors[p,1+l,2]) + polygon(c(1+p-1,1.5+p-1,1.5+p-1),c(l,0.5+l,-0.5+l), col=array.freq.colors[p,1+l,3]) + polygon(c(0.5+p-1,1+p-1,1.5+p-1),c(0.5+l,0+l,0.5+l), col=array.freq.colors[p,1+l,4]) + } + } + + par(fig=c(0.875, 1, 0.14, 0.89), new=TRUE) + ColorBar2(brks = c(-20,-10,-5,-2,0,2,5,10,20), cols = freq.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(c(-20,-10,-5,-2,0,2,5,10,20)), my.labels=c(-20,-10,-5,-2,0,2,5,10,20)) + dev.off() + +} + + + + + + + +# impact map of the stronger WR: +if(fields.name == rean.name && period == 100){ + + var.num <- 1 # choose a variable (1:sfcWind, 2:tas) + + png(filename=paste0(workdir,"/",rean.name,"_",var.name[var.num],"_most_influent.png"), width=500, height=600) + + plot.new() + #par(mfrow=c(4,3)) + col.regimes <- c("indianred1","cyan","khaki","lightpink") + + for(p in 13:16){ # or 1:12 for monthly maps + load(file=paste0(workdir,"/", rean.dir,"/",rean.name,"_",var.name[var.num],"_",my.period[p],"_psl.RData")) + load(paste0(workdir,"/", rean.dir,"/",rean.name,"_ClusterNames.RData")) # they should not depend on var.name!!! + + # position of long values of Europe only (without the Atlantic Sea and America): + #if(fields.name == "ERA-Interim") EU <- c(1:which(lon >= lon.max)[1], (length(lon)-30):length(lon)) # restrict area to continental europe only + lon.max = 45 + EU <- c(1:which(lon >= lon.max)[1], (which(lon >= 337)[1]:length(lon))) # restrict area to continental europe only + + + cluster1 <- which(orden == cluster1.name.period[p]) # in future it will be == cluster1.name + cluster2 <- which(orden == cluster2.name.period[p]) + cluster3 <- which(orden == cluster3.name.period[p]) + cluster4 <- which(orden == cluster4.name.period[p]) + + assign(paste0("imp",cluster1), varPeriodAnom1mean) + assign(paste0("imp",cluster2), varPeriodAnom2mean) + assign(paste0("imp",cluster3), varPeriodAnom3mean) + assign(paste0("imp",cluster4), varPeriodAnom4mean) + + imp.all <- imp1 + for(i in 1:dim(imp1)[1]){ + for(j in 1:dim(imp1)[2]){ + if(abs(imp1[i,j]) >= max(abs(imp1[i,j]), abs(imp2[i,j]), abs(imp3[i,j]), abs(imp4[i,j]))) imp.all[i,j] <- 1.5 + if(abs(imp2[i,j]) >= max(abs(imp1[i,j]), abs(imp2[i,j]), abs(imp3[i,j]), abs(imp4[i,j]))) imp.all[i,j] <- 2.5 + if(abs(imp3[i,j]) >= max(abs(imp1[i,j]), abs(imp2[i,j]), abs(imp3[i,j]), abs(imp4[i,j]))) imp.all[i,j] <- 3.5 + if(abs(imp4[i,j]) >= max(abs(imp1[i,j]), abs(imp2[i,j]), abs(imp3[i,j]), abs(imp4[i,j]))) imp.all[i,j] <- 4.5 + } + } + + if(p<=3) yMod <- 0.7 + if(p>=4 && p<=6) yMod <- 0.5 + if(p>=7 && p<=9) yMod <- 0.3 + if(p>=10) yMod <- 0.1 + if(p==13 || p==14) yMod <- 0.5 + if(p==15 || p==16) yMod <- 0.1 + + if(p==1 || p==4 || p==7 || p==10) xMod <- 0.05 + if(p==2 || p==5 || p==8 || p==11) xMod <- 0.35 + if(p==3 || p==6 || p==9 || p==12) xMod <- 0.65 + if(p==13 || p==15) xMod <- 0.05 + if(p==14 || p==16) xMod <- 0.50 + + # impact map: + if(p <= 12) par(fig=c(xMod, xMod + 0.3, yMod, yMod + 0.17), new=TRUE) + if(p > 12) par(fig=c(xMod, xMod + 0.45, yMod, yMod + 0.38), new=TRUE) + PlotEquiMap(imp.all[,EU], lon[EU], lat, filled.continents = FALSE, brks=1:5, cols=col.regimes, axelab=F, drawleg=F) + + # title map: + if(p <= 12) par(fig=c(xMod, xMod + 0.3, yMod + 0.162, yMod + 0.172), new=TRUE) + if(p > 12) par(fig=c(xMod + 0.07, xMod + 0.07 + 0.3, yMod +0.21 + 0.162, yMod +0.21 + 0.172), new=TRUE) + mtext(my.period[p], font=2, cex=.9) + + } # close 'p' on 'period' + + par(fig=c(0.1,0.9, 0, 0.12), new=TRUE) + ColorBar2(1:5, cols=col.regimes, vert=FALSE, my.ticks=1:4, draw.ticks=FALSE, my.labels=orden) + + par(fig=c(0.1,0.9, 0.9, 0.95), new=TRUE) + mtext(paste0("Most influent WR on ",var.name[var.num]), font=2, cex=1.5) + + dev.off() +} + + + + + + + + + + + + + + + + + + diff --git a/old/weather_regimes_maps_v8.R~ b/old/weather_regimes_maps_v8.R~ new file mode 100644 index 0000000000000000000000000000000000000000..7ab8589774db974a37fe1abb557671ce038a2560 --- /dev/null +++ b/old/weather_regimes_maps_v8.R~ @@ -0,0 +1,811 @@ + +# Creation: 6/2016 +# Author: Nicola Cortesi +# +# Aim: to visualize the regime anomalies of the weather regimes, their interannual frequencies and their impact on a chosen cliamte variable (i.e: wind speed or temperature) +# +# I/O: the only input file needed is the output of the weather_regimes.R script, which ends with the suffix "_mapdata.RData". +# Output files are the maps, with the user can generate as .png or as .pdf +# +# Assumptions: If your regimes derive from a reanalysis, this script must be run twice: once, with the option 'ordering = F' and 'save.names = T' to create a .pdf or .png +# file that the user must open to find visually the order by which the four regimes are stored inside the four clusters. For example, he can find that the +# sequence of the images in the file (from top to down) corresponds to: Blocking / NAO- / Atl.Ridge / NAO+ regime. Subsequently, he has to change 'ordering' +# from F to T and set the four variables 'clusterX.name' (X=1..4) to follow the same order found inside that file. +# The second time, you have to run this script only to get the maps in the right order, which by default is, from top to down: +# "NAO+","NAO-","Blocking" and "Atl.Ridge" (you can change it with the variable 'orden'). You also have to set 'save.names' to TRUE, so an .RData file +# is written to store the order of the four regimes, in case in future a user needs to visualize these maps again. In this case, the user must set +# 'ordering = TRUE' and 'save.names = FALSE' to be able to visualize the maps in the correct order. +# +# If your regimes derive from a forecast system, you have to compute before the regimes derived from a reanalysis. Then, you can associate the reanalysis +# to the forecast system, to employ it to automatically associate to each simulated cluster one of the +# observed regimes, the one with the highest spatial correlation. Beware that the two maps of regime anomalies must have the same spatial resolution! +# +# Branch: weather_regimes + +library(s2dverification) # for the function Load() +library(Kendall) # for the MannKendall test + +source('/home/Earth/ncortesi/Downloads/scripts/Rfunctions.R') # for the calendar functions +workdir <- "/scratch/Earth/ncortesi/RESILIENCE/Regimes" #/41_ERA-Interim_monthly_1981-2015_LOESS_filter" # working dir with the input files with '_psl.RData' suffix: + +rean.name <- "ERA-Interim" # reanalysis name (if input data comes from a reanalysis) +forecast.name <- "ECMWF-S4" # forecast system name (if input data comes from a forecast system) + +fields.name <- forecast.name # Choose between loading reanalysis ('rean.name') or forecasts ('forecast.name') + +var.num <- 1 # if fields.name ='rean.name', Choose a variable for the impact maps 1: sfcWind 2: tas + +period <- 1 # Choose one or more periods to plot from 1 to 17 (1-12: Jan - Dec, 13-16: Winter, Spring, Summer, Autumn, 17: Year). + # You need to have created before the '_mapdata.RData' file with the output of 'weather_regimes'.R for that period. +lead.month <- 0 # in case you selected 'fields.name = forecast.name', specify a leadtime (0 = same month of the start month) to plot + # (and variable 'period' becomes the start month) + +# run it twice: once, with 'ordering <- FALSE' and 'save.names <- TRUE' to look only at the cartography and find visually the right regime names of the four clusters; then, +# insert the regime names in the correct order below and run this script a the second time with 'ordering = TRUE' and 'save.names = TRUE', to save the ordered cartography. +# after that, any time you need to run this script, run it with 'ordering = TRUE and 'save.names = FALSE', not to overwrite the file which stores the cluster order: +ordering <- T # If TRUE, order the clusters following the regimes listed in the 'orden' vector (set it to TRUE only after you manually inserted the names below). +save.names <- F # if TRUE, also save the cluster names (i.e: the association between clusters and weather regimes); if FALSE, the cluster names are loaded from a file + +as.pdf <- F # FALSE: save results as one .png file for each season/month, TRUE: save only 1 .pdf file with all seasons/months + +# if fields.name='forecast.name', set the subdir of 'workdir' where the weather regimes computed with a reanalysis are stored: +rean.dir <- "41_ERA-Interim_monthly_1981-2015_LOESS_filter" + +# Associates the regimes to the four cluster: +cluster1.name <- "NAO+" +cluster2.name <- "NAO-" +cluster4.name <- "Blocking" +cluster3.name <- "Atl.Ridge" + +# choose an order to give to the cartograpy, from top to bottom: +orden <- c("NAO+","NAO-","Blocking","Atl.Ridge") + +################################################################################################################################################################### + +if(fields.name != rean.name && fields.name != forecast.name) stop("variable 'fields.name' is not properly set") +regime1.name <- orden[1] +regime2.name <- orden[2] +regime3.name <- orden[3] +regime4.name <- orden[4] + +var.name <- c("sfcWind","tas") +var.name.full <- c("Wind Speed","Temperature") # full name of the 'predictand' variable to put in the title of the graphs +var.unit <- c("m/s", "ºC") # unit of measure of var (for drawing color scales) +var.num.orig <- var.num # loading _psl files, this value can change! +fields.name.orig <- fields.name +period.orig <- period; workdir.orig <- workdir +pdf.suffix <- ifelse(length(period)==1, my.period[period], "all_periods") + +if(as.pdf && fields.name == rean.name)(pdf(file=paste0(workdir,"/",fields.name,"_",var.name[var.num],"_",pdf.suffix,".pdf"),width=40, height=60)) +if(as.pdf && fields.name == forecast.name)(pdf(file=paste0(workdir,"/",fields.name,"_",var.name[var.num],"_",pdf.suffix,"_leadmonth_",lead.month,".pdf"),width=40,height=60)) + +for(p in period){ + p.orig <- p + # load regime data (for forecasts, it load only the data corresponding to the chosen start month p and lead month 'lead.month':: + if(fields.name == rean.name) { + load(file=paste0(workdir,"/",fields.name,"_",my.period[p],"_","psl",".RData")) + if(var.num != var.num.orig) var.num <- var.num.orig # ripristinate original values of this script (necessary after loading regime data) + if(fields.name != fields.name.orig) fields.name <- fields.name.orig # ripristinate original values of this script + if(workdir != workdir.orig) workdir <- workdir.orig # ripristinate original values of this script + + load(file=paste0(workdir,"/",fields.name,"_",my.period[p],"_",var.name[var.num],".RData")) + } + if(fields.name == forecast.name){ + load(file=paste0(workdir,"/",fields.name,"_",my.period[p],"_leadmonth_",lead.month,"_","psl",".RData")) # load cluster time series and mean psl data + #load(file=paste0(workdir,"/",fields.name,"_",my.period[p],"_leadmonth_",lead.month,"_",var.name[var.num],".RData")) # load mean var data + } + + if(var.num != var.num.orig) var.num <- var.num.orig # ripristinate original values of this script (necessary after loading regime data) + if(fields.name != fields.name.orig) fields.name <- fields.name.orig # ripristinate original values of this script + if(p != p.orig) p <- p.orig + + if(fields.name == forecast.name){ + # compute cluster persistence: + my.cluster + + # compute correlations between simulated clusters and observed regime's anomalies: + cluster1.sim <- pslwr1mean; cluster2.sim <- pslwr2mean; cluster3.sim <- pslwr3mean; cluster4.sim <- pslwr4mean + lat.forecast <- lat; lon.forecast <- lon + + if((p + lead.month) > 12) { load.month <- p + lead.month - 12 } else { load.month <- p + lead.month } # reanalysis forecast month to load + load(file=paste0(workdir,"/",rean.dir,"/",rean.name,"_",my.period[load.month],"_","psl",".RData")) # load reanalysis data, which overwrities the pslwrXmean variables + load(paste0(workdir,"/",rean.dir,"/",rean.name,"_", my.period[load.month],"_","ClusterNames",".RData")) # load also reanalysis regime names + + wr1y.obs <- wr1y; wr2y.obs <- wr2y; wr3y.obs <- wr3y; wr4y.obs <- wr4y # observed frecuencies of the regimes + + regimes.obs <- c(cluster1.name, cluster2.name, cluster3.name, cluster4.name) + + if(length(unique(regimes.obs)) < 4) stop("Two or more names of the weather types in the reanalsyis input file are repeated!") + + if(identical(rev(lat),lat.forecast)) {lat.forecast <- rev(lat.forecast); print("Warning: forecast latitudes are in the opposite order than renalysis's")} + if(!identical(lat, lat.forecast)) stop("forecast latitudes are different from reanalysis latitudes!") + if(!identical(lon, lon.forecast)) stop("forecast longitude are different from reanalysis longitudes!") + + cluster.corr <- array(NA, c(4,4), dimnames=list(c("cluster1.sim","cluster2.sim","cluster3.sim","cluster4.sim"), regimes.obs)) + cluster.corr[1,1] <- cor(as.vector(cluster1.sim), as.vector(pslwr1mean)) + cluster.corr[1,2] <- cor(as.vector(cluster1.sim), as.vector(pslwr2mean)) + cluster.corr[1,3] <- cor(as.vector(cluster1.sim), as.vector(pslwr3mean)) + cluster.corr[1,4] <- cor(as.vector(cluster1.sim), as.vector(pslwr4mean)) + cluster.corr[2,1] <- cor(as.vector(cluster2.sim), as.vector(pslwr1mean)) + cluster.corr[2,2] <- cor(as.vector(cluster2.sim), as.vector(pslwr2mean)) + cluster.corr[2,3] <- cor(as.vector(cluster2.sim), as.vector(pslwr3mean)) + cluster.corr[2,4] <- cor(as.vector(cluster2.sim), as.vector(pslwr4mean)) + cluster.corr[3,1] <- cor(as.vector(cluster3.sim), as.vector(pslwr1mean)) + cluster.corr[3,2] <- cor(as.vector(cluster3.sim), as.vector(pslwr2mean)) + cluster.corr[3,3] <- cor(as.vector(cluster3.sim), as.vector(pslwr3mean)) + cluster.corr[3,4] <- cor(as.vector(cluster3.sim), as.vector(pslwr4mean)) + cluster.corr[4,1] <- cor(as.vector(cluster4.sim), as.vector(pslwr1mean)) + cluster.corr[4,2] <- cor(as.vector(cluster4.sim), as.vector(pslwr2mean)) + cluster.corr[4,3] <- cor(as.vector(cluster4.sim), as.vector(pslwr3mean)) + cluster.corr[4,4] <- cor(as.vector(cluster4.sim), as.vector(pslwr4mean)) + + # associate each simulated cluster to one observed regime: + max1 <- which(cluster.corr == max(cluster.corr), arr.ind=T) + cluster.corr2 <- cluster.corr + cluster.corr2[max1[1],] <- -999 # set this row to negative values so it won't be found as a maximum anymore + cluster.corr2[,max1[2]] <- -999 # set this column to negative values so it won't be found as a maximum anymore + max2 <- which(cluster.corr2 == max(cluster.corr2), arr.ind=T) + cluster.corr3 <- cluster.corr2 + cluster.corr3[max2[1],] <- -999 + cluster.corr3[,max2[2]] <- -999 + max3 <- which(cluster.corr3 == max(cluster.corr3), arr.ind=T) + cluster.corr4 <- cluster.corr3 + cluster.corr4[max3[1],] <- -999 + cluster.corr4[,max3[2]] <- -999 + max4 <- which(cluster.corr4 == max(cluster.corr4), arr.ind=T) + + seq.max1 <- c(max1[1],max2[1],max3[1],max4[1]) + seq.max2 <- c(max1[2],max2[2],max3[2],max4[2]) + + cluster1.regime <- seq.max2[which(seq.max1 == 1)] + cluster2.regime <- seq.max2[which(seq.max1 == 2)] + cluster3.regime <- seq.max2[which(seq.max1 == 3)] + cluster4.regime <- seq.max2[which(seq.max1 == 4)] + + cluster1.name <- regimes.obs[cluster1.regime] + cluster2.name <- regimes.obs[cluster2.regime] + cluster3.name <- regimes.obs[cluster3.regime] + cluster4.name <- regimes.obs[cluster4.regime] + + # insert here all the manual corrections to the regime assignation due to misasignment in the automatic selection: + #if(p= && lead.month= ) clusterX.name=... + + if(var.num != var.num.orig) var.num <- var.num.orig # ripristinate original values of this script + if(fields.name != fields.name.orig) fields.name <- fields.name.orig # ripristinate original values of this script + if(!identical(period.orig,period)) period <- period.orig + if(p.orig != p) p <- p.orig + + load(file=paste0(workdir,"/",fields.name,"_",my.period[p],"_leadmonth_",lead.month,"_","psl",".RData")) # reload forecast cluster time series and mean psl data + #load(file=paste0(workdir,"/",fields.name,"_",my.period[p],"_leadmonth_",lead.month,"_ClusterData.RData")) # reload forecast cluster data (for my.cluster.array) + + if(var.num != var.num.orig) var.num <- var.num.orig # ripristinate original values of this script + if(fields.name != fields.name.orig) fields.name <- fields.name.orig # ripristinate original values of this script + if(!identical(period.orig, period)) period <- period.orig + if(p.orig != p) p <- p.orig + if(identical(rev(lat),lat.forecast)) lat <- rev(lat) # ripristinate right lat order + + } # close for on 'forecast.name' + + if(fields.name == rean.name){ + if(save.names){ + save(cluster1.name, cluster2.name, cluster3.name, cluster4.name, file=paste0(workdir,"/",fields.name,"_", my.period[p],"_","ClusterNames",".RData")) + + } else { # in this case, we load the cluster names from the file already saved, if regimes come from a reanalysis: + load(paste0(workdir,"/",fields.name,"_", my.period[p],"_","ClusterNames",".RData")) + + if(var.num != var.num.orig) var.num <- var.num.orig # ripristinate original values of this script + if(fields.name != fields.name.orig) fields.name <- fields.name.orig # ripristinate original values of this script + } + } + + # assign to each cluster its regime: + if(ordering == FALSE && fields.name == rean.name){ + cluster1 <- 1; cluster2 <- 2; cluster3 <- 3; cluster4 <- 4 + } else { + cluster1 <- which(orden == cluster1.name) + cluster2 <- which(orden == cluster2.name) + cluster3 <- which(orden == cluster3.name) + cluster4 <- which(orden == cluster4.name) + } + + assign(paste0("map",cluster1), pslwr1mean) + assign(paste0("map",cluster2), pslwr2mean) + assign(paste0("map",cluster3), pslwr3mean) + assign(paste0("map",cluster4), pslwr4mean) + + assign(paste0("fre",cluster1), wr1y) + assign(paste0("fre",cluster2), wr2y) + assign(paste0("fre",cluster3), wr3y) + assign(paste0("fre",cluster4), wr4y) + + if(period==100){ + assign(paste0("imp",cluster1), varwr1mean) + assign(paste0("imp",cluster2), varwr2mean) + assign(paste0("imp",cluster3), varwr3mean) + assign(paste0("imp",cluster4), varwr4mean) + + assign(paste0("sig",cluster1), pvalue1) + assign(paste0("sig",cluster2), pvalue2) + assign(paste0("sig",cluster3), pvalue3) + assign(paste0("sig",cluster4), pvalue4) + } + + if(fields.name == forecast.name){ + assign(paste0("freMax",cluster1), wr1yMax) + assign(paste0("freMax",cluster2), wr2yMax) + assign(paste0("freMax",cluster3), wr3yMax) + assign(paste0("freMax",cluster4), wr4yMax) + + assign(paste0("freMin",cluster1), wr1yMin) + assign(paste0("freMin",cluster2), wr2yMin) + assign(paste0("freMin",cluster3), wr3yMin) + assign(paste0("freMin",cluster4), wr4yMin) + + cluster1.Obs <- which(orden == regimes.obs[1]) + cluster2.Obs <- which(orden == regimes.obs[2]) + cluster3.Obs <- which(orden == regimes.obs[3]) + cluster4.Obs <- which(orden == regimes.obs[4]) + + assign(paste0("freObs",cluster1), wr1y.obs) + assign(paste0("freObs",cluster2), wr2y.obs) + assign(paste0("freObs",cluster3), wr3y.obs) + assign(paste0("freObs",cluster4), wr4y.obs) + } + + + # position of long values of Europe only (without the Atlantic Sea and America): + #if(fields.name == "ERA-Interim") EU <- c(1:which(lon >= lon.max)[1], (length(lon)-30):length(lon)) # restrict area to continental europe only + EU <- c(1:which(lon >= lon.max)[1], (which(lon >= 337)[1]:length(lon))) # restrict area to continental europe only + + # breaks and colors of the geopotential fields: + if(psl == "g500"){ + #my.brks <- c(-300,-199:200,300) # % Mean anomaly of a WR + my.brks <- c(-300,seq(-200,200,10),300) # % Mean anomaly of a WR + my.brks2 <- c(-300,seq(-200,200,10),300) # % Mean anomaly of a WR for the contour lines + #my.cols <- colorRampPalette((brewer.pal(9,"PuBu")))(length(my.brks)-1) + values.to.plot <- c(-200, -100, 0, 100, 200) # values we want to show in the labels + } + + if(psl == "psl"){ + my.brks <- c(seq(-19,-1,2),0,seq(1,19,2)) # % Mean anomaly of a WR + # my.brks <- c(seq(-19,-1,2),0,seq(1,19,2)) # % Mean anomaly of a WR + + my.brks2 <- my.brks #c(seq(-20,20,2)) # % Mean anomaly of a WR for the contour lines + values.to.plot <- my.brks #c(-17,-15,-13,-11,-9,-7,-5,-3,-1,0,1,3,5,7,9,11,13,15,17) + } + + my.cols <- colorRampPalette(rev(brewer.pal(11,"RdBu")))(length(my.brks)-1) # blue--white--red colors + my.cols[floor(length(my.cols)/2)] <- my.cols[floor(length(my.cols)/2)+1] <- "white" + + # breaks and colors of the impact maps (valid both for Wind speed anomalies and Temperature anomalies): + #my.brks.var <- c(-20,seq(-10,-3,1),seq(-2.9,2.9,0.1),seq(3,10,1),20) # % Mean anomaly of a WR + #my.brks.var <- c(-20,-2,-1.5,-1,-0.5,-0.2,0.2,0.5,1,1.5,2,20) # % Mean anomaly of a WR + #my.brks.var <- c(-20,seq(-3,3,0.5),20) # % Mean anomaly of a WR + if(var.name[var.num] == "sfcWind") { + my.brks.var <- seq(-3,3,0.5) + values.to.plot2 <- c(-3,-2,-1,0,1,2,3) + } + if(var.name[var.num] == "tas") { + my.brks.var <- seq(-5,5,1) + values.to.plot2 <- my.brks.var #c(-5,-3,-1,0,1,3,5) + } + + #my.cols <- colorRampPalette(as.character(read.csv("/home/Earth/vtorralb/rgbhex.csv",header=F)[,1]))(length(my.brks)-1) # blue--yellow-red colors + my.cols.var <- colorRampPalette(rev(brewer.pal(11,"RdBu")))(length(my.brks.var)-1) # blue--white--red colors + #my.cols.var[floor(length(my.cols.var)/2)] <- my.cols.var[floor(length(my.cols.var)/2)+1] <- "white" + + # Visualize the composition with the 3 graphs for all the 4 regimes: its pressure fields, its impact on var and its frequency series: + if(!as.pdf && fields.name==rean.name) png(filename=paste0(workdir,"/",fields.name,"_",my.period[p],"_",var.name[var.num],".png"),width=3000,height=3700) + if(!as.pdf && fields.name==forecast.name) png(filename=paste0(workdir,"/",fields.name,"_",my.period[p],"_leadmonth_",lead.month,"_",var.name[var.num],".png"),width=3000,height=3700) + + plot.new() + + # Sheet title: + par(fig=c(0, 1, 0.94, 0.98), new=TRUE) + + if(fields.name == forecast.name) {forecast.title <- paste0(" startdate and ",lead.month," forecast month ")} else {forecast.title <- ""} + mtext(paste0(fields.name, " Weather Regimes for ", my.period[p], forecast.title," (",year.start,"-",year.end,")"), cex=5.5, font=2) + + # Centroid maps: + map.xpos <- 0 + map.width <- 0.46 + par(fig=c(map.xpos + 0.003, map.xpos + map.width - 0.003, 0.745, 0.925), new=TRUE) + PlotEquiMap2(rescale(map1,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map1), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, xlabel.dist=1.5, contours.lty="F1FF1F") + par(fig=c(map.xpos, map.xpos + map.width, 0.51, 0.69), new=TRUE) + PlotEquiMap2(rescale(map2,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map2), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F") + par(fig=c(map.xpos, map.xpos + map.width, 0.275, 0.455), new=TRUE) + PlotEquiMap2(rescale(map3,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map3), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F") + par(fig=c(map.xpos, map.xpos + map.width, 0.04, 0.22), new=TRUE) + PlotEquiMap2(rescale(map4,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map4), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F") + + # Subtitle centroid maps: + map.title.xpos <- 0.76 + map.title.width <- 0.2 + par(fig=c(map.title.xpos, map.title.xpos + map.title.width, 0.728, 0.729), new=TRUE) + mtext("year", cex=3) + par(fig=c(map.title.xpos, map.title.xpos + map.title.width, 0.494, 0.495), new=TRUE) + mtext("year", cex=3) + par(fig=c(map.title.xpos, map.title.xpos + map.title.width, 0.260, 0.261), new=TRUE) + mtext("year", cex=3) + par(fig=c(map.title.xpos, map.title.xpos + map.title.width, 0.026, 0.027), new=TRUE) + mtext("year", cex=3) + + # Title Centroid Maps: + title1.xpos <- 0.02 + title1.width <- 0.4 + par(fig=c(title1.xpos, title1.xpos + title1.width, 0.9305, 0.9355), new=TRUE) + mtext(paste0(regime1.name,": ", psl.name, " Anomaly"), font=2, cex=4) + par(fig=c(title1.xpos, title1.xpos + title1.width, 0.6955, 0.7005), new=TRUE) + mtext(paste0(regime2.name,": ", psl.name, " Anomaly"), font=2, cex=4) + par(fig=c(title1.xpos, title1.xpos + title1.width, 0.4605, 0.4655), new=TRUE) + mtext(paste0(regime3.name,": ", psl.name, " Anomaly"), font=2, cex=4) + par(fig=c(title1.xpos, title1.xpos + title1.width, 0.2255, 0.2305), new=TRUE) + mtext(paste0(regime4.name,": ", psl.name, " Anomaly"), font=2, cex=4) + + # Impact maps: + if(period==100){ + impact.xpos <- 0.47 + impact.width <- 0.26 + par(fig=c(impact.xpos, impact.xpos + impact.width, 0.745, 0.925), new=TRUE) + PlotEquiMap2(rescale(imp1[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=t(sig1[,EU] < 0.05), cex.lab=1.5) + par(fig=c(impact.xpos, impact.xpos + impact.width, 0.51, 0.69), new=TRUE) + PlotEquiMap2(rescale(imp2[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=t(sig2[,EU] < 0.05), cex.lab=1.5) + par(fig=c(impact.xpos, impact.xpos + impact.width, 0.275, 0.455), new=TRUE) + PlotEquiMap2(rescale(imp3[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=t(sig3[,EU] < 0.05), cex.lab=1.5) + par(fig=c(impact.xpos, impact.xpos + impact.width, 0.04, 0.22), new=TRUE) + PlotEquiMap2(rescale(imp4[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=t(sig4[,EU] < 0.05), cex.lab=1.5) + + # Title impact maps: + title2.xpos <- 0.42 + title2.width <- 0.35 + par(fig=c(title2.xpos, title2.xpos + title2.width, 0.935, 0.940), new=TRUE) + mtext(paste0(regime1.name, ": Impact on ", var.name.full[var.num]), font=2, cex=4) + par(fig=c(title2.xpos, title2.xpos + title2.width, 0.700, 0.705), new=TRUE) + mtext(paste0(regime2.name, ": Impact on ", var.name.full[var.num]), font=2, cex=4) + par(fig=c(title2.xpos, title2.xpos + title2.width, 0.465, 0.470), new=TRUE) + mtext(paste0(regime3.name, ": Impact on ", var.name.full[var.num]), font=2, cex=4) + par(fig=c(title2.xpos, title2.xpos + title2.width, 0.230, 0.235), new=TRUE) + mtext(paste0(regime4.name, ": Impact on ", var.name.full[var.num]), font=2, cex=4) + } + + # Frequency plots: + barplot.xpos <- 0.75 + barplot.width <- 0.25 + freq.max=100 + + if(fields.name == forecast.name){ + pos.years.present <- match(year.start:year.end, years) + years.missing <- which(is.na(pos.years.present)) + fre1.NA <- fre2.NA <- fre3.NA <- fre4.NA <- freMax1.NA <- freMax2.NA <- freMax3.NA <- freMax4.NA <- freMin1.NA <- freMin2.NA <- freMin3.NA <- freMin4.NA <- c() + + k <- 0 + for(y in year.start:year.end) { + if(y %in% (years.missing + year.start - 1)) { + fre1.NA <- c(fre1.NA,NA); fre2.NA <- c(fre2.NA,NA); fre3.NA <- c(fre3.NA,NA); fre4.NA <- c(fre4.NA,NA) + freMax1.NA <- c(freMax1.NA,NA); freMax2.NA <- c(freMax2.NA,NA); freMax3.NA <- c(freMax3.NA,NA); freMax4.NA <- c(freMax4.NA,NA) + freMin1.NA <- c(freMin1.NA,NA); freMin2.NA <- c(freMin2.NA,NA); freMin3.NA <- c(freMin3.NA,NA); freMin4.NA <- c(freMin4.NA,NA) + k=k+1 + } else { + fre1.NA <- c(fre1.NA,fre1[y - year.start + 1 - k]); fre2.NA <- c(fre2.NA,fre2[y - year.start + 1 - k]) + fre3.NA <- c(fre3.NA,fre3[y - year.start + 1 - k]); fre4.NA <- c(fre4.NA,fre4[y - year.start + 1 - k]) + freMax1.NA <- c(freMax1.NA,freMax1[y - year.start + 1 - k]); freMax2.NA <- c(freMax2.NA,freMax2[y - year.start + 1 - k]) + freMax3.NA <- c(freMax3.NA,freMax3[y - year.start + 1 - k]); freMax4.NA <- c(freMax4.NA,freMax4[y - year.start + 1 - k]) + freMin1.NA <- c(freMin1.NA,freMin1[y - year.start + 1 - k]); freMin2.NA <- c(freMin2.NA,freMin2[y - year.start + 1 - k]) + freMin3.NA <- c(freMin3.NA,freMin3[y - year.start + 1 - k]); freMin4.NA <- c(freMin4.NA,freMin4[y - year.start + 1 - k]) + } + } + } else { fre1.NA <- fre1; fre2.NA <- fre2; fre3.NA <- fre3; fre4.NA <- fre4 } + + par(fig=c(barplot.xpos, barplot.xpos + barplot.width, 0.745, 0.915), new=TRUE) + if(fields.name == rean.name) barplot.freq(100*fre1.NA, year.start, year.end, cex.y=2, cex.x=2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0)) + if(fields.name == forecast.name) barplot.freq.sim2(100*fre1.NA, 100*freMax1.NA, 100*freMin1.NA, 100*freObs1, year.start, year.end, cex.y=2, cex.x=2, freq.min=-2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0), col.line="gray20", cex.r=3, cex.mean=2, cex.obs=2) + + par(fig=c(barplot.xpos, barplot.xpos + barplot.width,0.510,0.680), new=TRUE) + if(fields.name == rean.name) barplot.freq(100*fre2.NA, year.start, year.end, cex.y=2, cex.x=2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0)) + if(fields.name == forecast.name) barplot.freq.sim2(100*fre2.NA, 100*freMax2.NA, 100*freMin2.NA, 100*freObs2, year.start, year.end, cex.y=2, cex.x=2, freq.min=-2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0), col.line="gray20", cex.r=3, cex.mean=2, cex.obs=2) + + par(fig=c(barplot.xpos, barplot.xpos + barplot.width,0.275,0.445), new=TRUE) + if(fields.name == rean.name) barplot.freq(100*fre3.NA, year.start, year.end, cex.y=2, cex.x=2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0)) + if(fields.name == forecast.name) barplot.freq.sim2(100*fre3.NA, 100*freMax3.NA, 100*freMin3.NA, 100*freObs3, year.start, year.end, cex.y=2, cex.x=2, freq.min=-2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0), col.line="gray20", cex.r=3, cex.mean=2, cex.obs=2) + + par(fig=c(barplot.xpos, barplot.xpos + barplot.width,0.040,0.210), new=TRUE) + if(fields.name == rean.name) barplot.freq(100*fre4.NA, year.start, year.end, cex.y=2, cex.x=2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0)) + if(fields.name == forecast.name) barplot.freq.sim2(100*fre4.NA, 100*freMax4.NA, 100*freMin4.NA, 100*freObs4, year.start, year.end, cex.y=2, cex.x=2, freq.min=-2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0), col.line="gray20", cex.r=3, cex.mean=2, cex.obs=2) + + # Title Frequency maps: + title3.xpos <- 0.75 + title3.width <-0.25 + par(fig=c(title3.xpos, title3.xpos + title3.width, 0.935, 0.940), new=TRUE) + mtext(paste0(regime1.name, " Frequency"), font=2, cex=4) # paste0 doesn't work inside an expression! + par(fig=c(title3.xpos, title3.xpos + title3.width, 0.700, 0.705), new=TRUE) + mtext(paste0(regime2.name, " Frequency"), font=2, cex=4) + par(fig=c(title3.xpos, title3.xpos + title3.width, 0.465, 0.470), new=TRUE) + mtext(paste0(regime3.name, " Frequency"), font=2, cex=4) + par(fig=c(title3.xpos, title3.xpos + title3.width, 0.230, 0.235), new=TRUE) + mtext(paste0(regime4.name, " Frequency"), font=2, cex=4) + + # % on Frequency plots: + symbol.xpos <- 0.735 + symbol.width <- 0.01 + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.82, 0.83), new=TRUE) + mtext("%", cex=3.3) + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.59, 0.60), new=TRUE) + mtext("%", cex=3.3) + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.35, 0.36), new=TRUE) + mtext("%", cex=3.3) + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.12, 0.13), new=TRUE) + mtext("%", cex=3.3) + + # mean frequency on Frequency plots: + symbol.xpos <- 0.9 + symbol.width <- 0.1 + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.908, 0.918), new=TRUE) + mtext(bquote(bar(nu) == .(paste0(round(mean(100*fre1.NA),1),"%"))),cex=4.5) + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.673, 0.683), new=TRUE) + mtext(bquote(bar(nu) == .(paste0(round(mean(100*fre2.NA),1),"%"))),cex=4.5) + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.44, 0.45), new=TRUE) + mtext(bquote(bar(nu) == .(paste0(round(mean(100*fre3.NA),1),"%"))),cex=4.5) + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.203, 0.213), new=TRUE) + mtext(bquote(bar(nu) == .(paste0(round(mean(100*fre4.NA),1),"%"))),cex=4.5) + + # add corr between obs.time series and ensemble mean time series on Frequency plots: + if(fields.name == forecast.name){ + symbol.xpos <- 0.76 + symbol.width <- 0.1 + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.908, 0.918), new=TRUE) + sp.cor1 <- round(cor(fre1.NA,freObs1, use="complete.obs"),2) + mtext(paste0("r= ",sp.cor1), cex=4.5) + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.673, 0.683), new=TRUE) + sp.cor2 <- round(cor(fre2.NA,freObs2, use="complete.obs"),2) + mtext(paste0("r= ",sp.cor2), cex=4.5) + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.44, 0.45), new=TRUE) + sp.cor3 <- round(cor(fre3.NA,freObs3, use="complete.obs"),2) + mtext(paste0("r= ",sp.cor3), cex=4.5) + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.203, 0.213), new=TRUE) + sp.cor4 <- round(cor(fre4.NA,freObs4, use="complete.obs"),2) + mtext(paste0("r= ",sp.cor4), cex=4.5) + } + + # Legend 1: + legend1.xpos <- 0.01 + legend1.width <- 0.44 + legend1.cex <- 1.8 + my.subset <- match(values.to.plot, my.brks) + par(fig=c(legend1.xpos, legend1.xpos + legend1.width, 0.719, 0.744), new=TRUE) # syntax fig=c(xmin, xmax, ymin, ymax) + ColorBar3(my.brks, cols=my.cols, vert=FALSE, cex=legend1.cex, subset=my.subset) + if(psl=="g500") {mtext(side=4," m", cex=2.5)} else {mtext(side=4," hPa", cex=legend1.cex)} + par(fig=c(legend1.xpos, legend1.xpos + legend1.width, 0.485, 0.508), new=TRUE) + ColorBar3(my.brks, cols=my.cols, vert=FALSE, cex=legend1.cex, subset=my.subset) + if(psl=="g500") {mtext(side=4," m", cex=2.5)} else {mtext(side=4," hPa", cex=legend1.cex)} + par(fig=c(legend1.xpos, legend1.xpos + legend1.width, 0.250, 0.273), new=TRUE) + ColorBar3(my.brks, cols=my.cols, vert=FALSE, cex=legend1.cex, subset=my.subset) + if(psl=="g500") {mtext(side=4," m", cex=2.5)} else {mtext(side=4," hPa", cex=legend1.cex)} + par(fig=c(legend1.xpos, legend1.xpos + legend1.width, 0.015, 0.038), new=TRUE) + ColorBar3(my.brks, cols=my.cols, vert=FALSE, cex=legend1.cex, subset=my.subset) + if(psl=="g500") {mtext(side=4," m", cex=2.5)} else {mtext(side=4," hPa", cex=legend1.cex)} + + # Legend 2: + if(fields.name == rean.name){ + legend2.xpos <- 0.48 + legend2.width <- 0.25 + legend2.cex <- 1.8 + my.subset2 <- match(values.to.plot2, my.brks.var) + par(fig=c(legend2.xpos, legend2.xpos + legend2.width, 0.719 + 0.001, 0.744), new=TRUE) + ColorBar3(my.brks.var, cols=my.cols.var, vert=FALSE, cex=legend2.cex, subset=my.subset2) + mtext(side=4,paste0(" ",var.unit[var.num]), cex=legend2.cex) + par(fig=c(legend2.xpos, legend2.xpos + legend2.width, 0.485, 0.508), new=TRUE) + ColorBar3(my.brks.var, cols=my.cols.var, vert=FALSE, cex=legend2.cex, subset=my.subset2) + mtext(side=4,paste0(" ",var.unit[var.num]), cex=legend2.cex) + par(fig=c(legend2.xpos, legend2.xpos + legend2.width, 0.250, 0.273), new=TRUE) + ColorBar3(my.brks.var, cols=my.cols.var, vert=FALSE, cex=legend2.cex, subset=my.subset2) + mtext(side=4,paste0(" ",var.unit[var.num]), cex=legend2.cex) + par(fig=c(legend2.xpos, legend2.xpos + legend2.width, 0.015, 0.038), new=TRUE) + ColorBar3(my.brks.var, cols=my.cols.var, vert=FALSE, cex=legend2.cex, subset=my.subset2) + mtext(side=4,paste0(" ",var.unit[var.num]), cex=legend2.cex) + } + + if(!as.pdf) dev.off() # for saving 4 png + + #if(fields.name == forecast.name) save(orden, cluster1.name, cluster2.name, cluster3.name, cluster4.name, cluster.corr, fre1.NA, fre2.NA, fre3.NA, fre4.NA, freObs1, freObs2, freObs3, freObs4, sp.cor1, sp.cor2, sp.cor3, sp.cor4, file=paste0(workdir,"/",fields.name,"_", my.period[p],"_leadmonth_",lead.month,"_","ClusterNames",".RData")) + + # fre1, fre2, etc. already refer to the regimes listed in the 'orden' vector: + if(fields.name == forecast.name) save(orden, fre1.NA, fre2.NA, fre3.NA, fre4.NA, freObs1, freObs2, freObs3, freObs4, sp.cor1, sp.cor2, sp.cor3, sp.cor4, file=paste0(workdir,"/",fields.name,"_", my.period[p],"_leadmonth_",lead.month,"_","ClusterNames",".RData")) + +} # close 'p' on 'period' + +if(as.pdf) dev.off() # for saving a single pdf with all months/seasons + + + + + + + +# Summary graphs: +if(fields.name == forecast.name && period == 100){ + array.cor <- array.diff.freq <- array.rpss <- array.pers <- array(NA,c(12,7,4)) # array storing correlations in the format: [ startdate, leadmonth, regime] + + for(p in 1:12){ + p.orig <- p + + for(l in 0:6){ + + load(file=paste0(workdir,"/",fields.name,"_",my.period[p],"_leadmonth_",l,"_","ClusterNames",".RData")) # load cluster names and summarized data + + if(var.num != var.num.orig) var.num <- var.num.orig # ripristinate original values of this script (necessary after loading regime data) + if(fields.name != fields.name.orig) fields.name <- fields.name.orig # ripristinate original values of this script + if(p != p.orig) p <- p.orig + + #cluster1 <- which(orden == cluster1.name) + #cluster2 <- which(orden == cluster2.name) + #cluster3 <- which(orden == cluster3.name) + #cluster4 <- which(orden == cluster4.name) + + array.cor[p,1+l, 1] <- sp.cor1 # NAO+ + array.cor[p,1+l, 3] <- sp.cor2 # NAO- + array.cor[p,1+l, 4] <- sp.cor3 # Blocking + array.cor[p,1+l, 2] <- sp.cor4 # Atl.Ridge + + array.diff.freq[p,1+l, 1] <- 100*(mean(fre1.NA) - mean(freObs1)) # NAO+ + array.diff.freq[p,1+l, 3] <- 100*(mean(fre2.NA) - mean(freObs2)) # NAO- + array.diff.freq[p,1+l, 4] <- 100*(mean(fre3.NA) - mean(freObs3)) # Blocking + array.diff.freq[p,1+l, 2] <- 100*(mean(fre4.NA) - mean(freObs4)) # Atl.Ridge + + array.rpss[p,1+l, 1] <- # NAO+ + array.rpss[p,1+l, 3] <- # NAO- + array.rpss[p,1+l, 4] <- # Blocking + array.rpss[p,1+l, 2] <- # Atl.Ridge + } + } + + + + # Corr summary: + png(filename=paste0(workdir,"/",forecast.name,"_summary_corr.png"), width=550, height=400) + + # create an array similar to array.cor but with colors instead of corr.values: + corr.cols <- rev(c('#b2182b','#d6604d','#f4a582','#fddbc7','#d1e5f0','#92c5de','#4393c3','#2166ac')) + array.cor.colors <- array(corr.cols[8],c(12,7,4)) + array.cor.colors[array.cor < 0.75] <- corr.cols[7] + array.cor.colors[array.cor < 0.5] <- corr.cols[6] + array.cor.colors[array.cor < 0.25] <- corr.cols[5] + array.cor.colors[array.cor < 0] <- corr.cols[4] + array.cor.colors[array.cor < -0.25] <- corr.cols[3] + array.cor.colors[array.cor < -0.5] <- corr.cols[2] + array.cor.colors[array.cor < -0.75] <- corr.cols[1] + + par(mar=c(5,4,4,4.5)) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Forecast time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + title("Correlation between S4 and ERA-Interim frequencies", cex=1.5) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Forecast time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5) + + for(p in 1:12){ + for(l in 0:6){ + polygon(c(0.5+p-1,0.5+p-1,1+p-1),c(-0.5+l,0.5+l,0+l), col=array.cor.colors[p,1+l,1]) # first triangle + polygon(c(0.5+p-1,1+p-1,1.5+p-1),c(-0.5+l,0+l,-0.5+l), col=array.cor.colors[p,1+l,2]) + polygon(c(1+p-1,1.5+p-1,1.5+p-1),c(l,0.5+l,-0.5+l), col=array.cor.colors[p,1+l,3]) + polygon(c(0.5+p-1,1+p-1,1.5+p-1),c(0.5+l,0+l,0.5+l), col=array.cor.colors[p,1+l,4]) + } + } + + par(fig=c(0.875, 1, 0.14, 0.89), new=TRUE) + ColorBar2(brks = seq(-1,1,0.25), cols = corr.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(seq(-1,1,0.25)), my.labels=seq(-1,1,0.25)) + dev.off() + + + + # Delta freq. summary: + png(filename=paste0(workdir,"/",forecast.name,"_summary_diff_freq.png"), width=550, height=400) + + # create an array similar to array.diff.freq but with colors instead of frequencies: + freq.cols <- rev(c('#b2182b','#d6604d','#f4a582','#fddbc7','#d1e5f0','#92c5de','#4393c3','#2166ac')) + + array.freq.colors <- array(freq.cols[8],c(12,7,4)) + array.freq.colors[array.diff.freq < 10] <- freq.cols[7] + array.freq.colors[array.diff.freq < 5] <- freq.cols[6] + array.freq.colors[array.diff.freq < 2] <- freq.cols[5] + array.freq.colors[array.diff.freq < 0] <- freq.cols[4] + array.freq.colors[array.diff.freq < -2] <- freq.cols[3] + array.freq.colors[array.diff.freq < -5] <- freq.cols[2] + array.freq.colors[array.diff.freq < -10] <- freq.cols[1] + + par(mar=c(5,4,4,4.5)) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Forecast time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + title("Frequency difference (%) between S4 and ERA-Interim", cex=1.5) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Forecast time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5) + + for(p in 1:12){ + for(l in 0:6){ + polygon(c(0.5+p-1,0.5+p-1,1+p-1),c(-0.5+l,0.5+l,0+l), col=array.freq.colors[p,1+l,1]) # first triangle + polygon(c(0.5+p-1,1+p-1,1.5+p-1),c(-0.5+l,0+l,-0.5+l), col=array.freq.colors[p,1+l,2]) + polygon(c(1+p-1,1.5+p-1,1.5+p-1),c(l,0.5+l,-0.5+l), col=array.freq.colors[p,1+l,3]) + polygon(c(0.5+p-1,1+p-1,1.5+p-1),c(0.5+l,0+l,0.5+l), col=array.freq.colors[p,1+l,4]) + } + } + + par(fig=c(0.875, 1, 0.14, 0.89), new=TRUE) + ColorBar2(brks = c(-20,-10,-5,-2,0,2,5,10,20), cols = freq.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(c(-20,-10,-5,-2,0,2,5,10,20)), my.labels=c(-20,-10,-5,-2,0,2,5,10,20)) + dev.off() + + + + # Persistence summary: + png(filename=paste0(workdir,"/",forecast.name,"_summary_pers.png"), width=550, height=400) + + # create an array similar to array.diff.freq but with colors instead of frequencies: + pers.cols <- rev(c('#b2182b','#d6604d','#f4a582','#fddbc7','#d1e5f0','#92c5de','#4393c3','#2166ac')) + + array.pers.colors <- array(pers.cols[8],c(12,7,4)) + array.pers.colors[array.pers < 10] <- pers.cols[7] + array.pers.colors[array.pers < 5] <- pers.cols[6] + array.pers.colors[array.pers < 2] <- pers.cols[5] + array.pers.colors[array.pers < 0] <- pers.cols[4] + array.pers.colors[array.pers < -2] <- pers.cols[3] + array.pers.colors[array.pers < -5] <- pers.cols[2] + array.pers.colors[array.pers < -10] <- pers.cols[1] + + par(mar=c(5,4,4,4.5)) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Forecast time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + title("Persistence difference (in days/month) between S4 and ERA-Interim", cex=1.5) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Forecast time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5) + + for(p in 1:12){ + for(l in 0:6){ + polygon(c(0.5+p-1,0.5+p-1,1+p-1),c(-0.5+l,0.5+l,0+l), col=array.pers.colors[p,1+l,1]) # first triangle + polygon(c(0.5+p-1,1+p-1,1.5+p-1),c(-0.5+l,0+l,-0.5+l), col=array.pers.colors[p,1+l,2]) + polygon(c(1+p-1,1.5+p-1,1.5+p-1),c(l,0.5+l,-0.5+l), col=array.pers.colors[p,1+l,3]) + polygon(c(0.5+p-1,1+p-1,1.5+p-1),c(0.5+l,0+l,0.5+l), col=array.pers.colors[p,1+l,4]) + } + } + + par(fig=c(0.875, 1, 0.14, 0.89), new=TRUE) + ColorBar2(brks = c(-20,-10,-5,-2,0,2,5,10,20), cols = pers.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(c(-20,-10,-5,-2,0,2,5,10,20)), my.labels=c(-20,-10,-5,-2,0,2,5,10,20)) + dev.off() + + + + # FairRPSS summary: + png(filename=paste0(workdir,"/",forecast.name,"_summary_rpss.png"), width=550, height=400) + + # create an array similar to array.diff.freq but with colors instead of frequencies: + freq.cols <- rev(c('#b2182b','#d6604d','#f4a582','#fddbc7','#d1e5f0','#92c5de','#4393c3','#2166ac')) + + array.freq.colors <- array(freq.cols[8],c(12,7,4)) + array.freq.colors[array.diff.freq < 10] <- freq.cols[7] + array.freq.colors[array.diff.freq < 5] <- freq.cols[6] + array.freq.colors[array.diff.freq < 2] <- freq.cols[5] + array.freq.colors[array.diff.freq < 0] <- freq.cols[4] + array.freq.colors[array.diff.freq < -2] <- freq.cols[3] + array.freq.colors[array.diff.freq < -5] <- freq.cols[2] + array.freq.colors[array.diff.freq < -10] <- freq.cols[1] + + par(mar=c(5,4,4,4.5)) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Forecast time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + title("Frequency difference (%) between S4 and ERA-Interim", cex=1.5) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Forecast time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5) + + for(p in 1:12){ + for(l in 0:6){ + polygon(c(0.5+p-1,0.5+p-1,1+p-1),c(-0.5+l,0.5+l,0+l), col=array.freq.colors[p,1+l,1]) # first triangle + polygon(c(0.5+p-1,1+p-1,1.5+p-1),c(-0.5+l,0+l,-0.5+l), col=array.freq.colors[p,1+l,2]) + polygon(c(1+p-1,1.5+p-1,1.5+p-1),c(l,0.5+l,-0.5+l), col=array.freq.colors[p,1+l,3]) + polygon(c(0.5+p-1,1+p-1,1.5+p-1),c(0.5+l,0+l,0.5+l), col=array.freq.colors[p,1+l,4]) + } + } + + par(fig=c(0.875, 1, 0.14, 0.89), new=TRUE) + ColorBar2(brks = c(-20,-10,-5,-2,0,2,5,10,20), cols = freq.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(c(-20,-10,-5,-2,0,2,5,10,20)), my.labels=c(-20,-10,-5,-2,0,2,5,10,20)) + dev.off() + +} + + + + + + + +# impact map of the stronger WR: +if(fields.name == rean.name && period == 100){ + + var.num <- 1 # choose a variable (1:sfcWind, 2:tas) + + png(filename=paste0(workdir,"/",rean.name,"_",var.name[var.num],"_most_influent.png"), width=500, height=600) + + plot.new() + #par(mfrow=c(4,3)) + col.regimes <- c("indianred1","cyan","khaki","lightpink") + + for(p in 13:16){ # or 1:12 for monthly maps + load(file=paste0(workdir,"/", rean.dir,"/",rean.name,"_",var.name[var.num],"_",my.period[p],"_psl.RData")) + load(paste0(workdir,"/", rean.dir,"/",rean.name,"_ClusterNames.RData")) # they should not depend on var.name!!! + + # position of long values of Europe only (without the Atlantic Sea and America): + #if(fields.name == "ERA-Interim") EU <- c(1:which(lon >= lon.max)[1], (length(lon)-30):length(lon)) # restrict area to continental europe only + lon.max = 45 + EU <- c(1:which(lon >= lon.max)[1], (which(lon >= 337)[1]:length(lon))) # restrict area to continental europe only + + + cluster1 <- which(orden == cluster1.name.period[p]) # in future it will be == cluster1.name + cluster2 <- which(orden == cluster2.name.period[p]) + cluster3 <- which(orden == cluster3.name.period[p]) + cluster4 <- which(orden == cluster4.name.period[p]) + + assign(paste0("imp",cluster1), varPeriodAnom1mean) + assign(paste0("imp",cluster2), varPeriodAnom2mean) + assign(paste0("imp",cluster3), varPeriodAnom3mean) + assign(paste0("imp",cluster4), varPeriodAnom4mean) + + imp.all <- imp1 + for(i in 1:dim(imp1)[1]){ + for(j in 1:dim(imp1)[2]){ + if(abs(imp1[i,j]) >= max(abs(imp1[i,j]), abs(imp2[i,j]), abs(imp3[i,j]), abs(imp4[i,j]))) imp.all[i,j] <- 1.5 + if(abs(imp2[i,j]) >= max(abs(imp1[i,j]), abs(imp2[i,j]), abs(imp3[i,j]), abs(imp4[i,j]))) imp.all[i,j] <- 2.5 + if(abs(imp3[i,j]) >= max(abs(imp1[i,j]), abs(imp2[i,j]), abs(imp3[i,j]), abs(imp4[i,j]))) imp.all[i,j] <- 3.5 + if(abs(imp4[i,j]) >= max(abs(imp1[i,j]), abs(imp2[i,j]), abs(imp3[i,j]), abs(imp4[i,j]))) imp.all[i,j] <- 4.5 + } + } + + if(p<=3) yMod <- 0.7 + if(p>=4 && p<=6) yMod <- 0.5 + if(p>=7 && p<=9) yMod <- 0.3 + if(p>=10) yMod <- 0.1 + if(p==13 || p==14) yMod <- 0.5 + if(p==15 || p==16) yMod <- 0.1 + + if(p==1 || p==4 || p==7 || p==10) xMod <- 0.05 + if(p==2 || p==5 || p==8 || p==11) xMod <- 0.35 + if(p==3 || p==6 || p==9 || p==12) xMod <- 0.65 + if(p==13 || p==15) xMod <- 0.05 + if(p==14 || p==16) xMod <- 0.50 + + # impact map: + if(p <= 12) par(fig=c(xMod, xMod + 0.3, yMod, yMod + 0.17), new=TRUE) + if(p > 12) par(fig=c(xMod, xMod + 0.45, yMod, yMod + 0.38), new=TRUE) + PlotEquiMap(imp.all[,EU], lon[EU], lat, filled.continents = FALSE, brks=1:5, cols=col.regimes, axelab=F, drawleg=F) + + # title map: + if(p <= 12) par(fig=c(xMod, xMod + 0.3, yMod + 0.162, yMod + 0.172), new=TRUE) + if(p > 12) par(fig=c(xMod + 0.07, xMod + 0.07 + 0.3, yMod +0.21 + 0.162, yMod +0.21 + 0.172), new=TRUE) + mtext(my.period[p], font=2, cex=.9) + + } # close 'p' on 'period' + + par(fig=c(0.1,0.9, 0, 0.12), new=TRUE) + ColorBar2(1:5, cols=col.regimes, vert=FALSE, my.ticks=1:4, draw.ticks=FALSE, my.labels=orden) + + par(fig=c(0.1,0.9, 0.9, 0.95), new=TRUE) + mtext(paste0("Most influent WR on ",var.name[var.num]), font=2, cex=1.5) + + dev.off() +} + + + + + + + + + + + + + + + + + + diff --git a/old/weather_regimes_maps_v9.R b/old/weather_regimes_maps_v9.R new file mode 100644 index 0000000000000000000000000000000000000000..3eb68821ac90cfcab01b3e1317ab6f949cfa54aa --- /dev/null +++ b/old/weather_regimes_maps_v9.R @@ -0,0 +1,868 @@ + +# Creation: 6/2016 +# Author: Nicola Cortesi +# +# Aim: to visualize the regime anomalies of the weather regimes, their interannual frequencies and their impact on a chosen cliamte variable (i.e: wind speed or temperature) +# +# I/O: the only input file needed is the output of the weather_regimes.R script, which ends with the suffix "_mapdata.RData". +# Output files are the maps, with the user can generate as .png or as .pdf +# +# Assumptions: If your regimes derive from a reanalysis, this script must be run twice: once, with the option 'ordering = F' and 'save.names = T' to create a .pdf or .png +# file that the user must open to find visually the order by which the four regimes are stored inside the four clusters. For example, he can find that the +# sequence of the images in the file (from top to down) corresponds to: Blocking / NAO- / Atl.Ridge / NAO+ regime. Subsequently, he has to change 'ordering' +# from F to T and set the four variables 'clusterX.name' (X=1..4) to follow the same order found inside that file. +# The second time, you have to run this script only to get the maps in the right order, which by default is, from top to down: +# "NAO+","NAO-","Blocking" and "Atl.Ridge" (you can change it with the variable 'orden'). You also have to set 'save.names' to TRUE, so an .RData file +# is written to store the order of the four regimes, in case in future a user needs to visualize these maps again. In this case, the user must set +# 'ordering = TRUE' and 'save.names = FALSE' to be able to visualize the maps in the correct order. +# +# If your regimes derive from a forecast system, you have to compute before the regimes derived from a reanalysis. Then, you can associate the reanalysis +# to the forecast system, to employ it to automatically associate to each simulated cluster one of the +# observed regimes, the one with the highest spatial correlation. Beware that the two maps of regime anomalies must have the same spatial resolution! +# +# Branch: weather_regimes + +library(s2dverification) # for the function Load() +library(Kendall) # for the MannKendall test + +source('/home/Earth/ncortesi/Downloads/scripts/Rfunctions.R') # for the calendar functions +workdir <- "/scratch/Earth/ncortesi/RESILIENCE/Regimes" #/41_ERA-Interim_monthly_1981-2015_LOESS_filter" # working dir with the input files with '_psl.RData' suffix: + +rean.name <- "ERA-Interim" # reanalysis name (if input data comes from a reanalysis) +forecast.name <- "ECMWF-S4" # forecast system name (if input data comes from a forecast system) + +fields.name <- forecast.name # Choose between loading reanalysis ('rean.name') or forecasts ('forecast.name') + +var.num <- 1 # if fields.name ='rean.name', Choose a variable for the impact maps 1: sfcWind 2: tas + +period <- 1:12 # Choose one or more periods to plot from 1 to 17 (1-12: Jan - Dec, 13-16: Winter, Spring, Summer, Autumn, 17: Year). + # You need to have created before the '_mapdata.RData' file with the output of 'weather_regimes'.R for that period. +lead.months <- 0:6 # in case you selected 'fields.name = forecast.name', specify a leadtime (0 = same month of the start month) to plot + # (and variable 'period' becomes the start month) + +# run it twice: once, with 'ordering <- FALSE' and 'save.names <- TRUE' to look only at the cartography and find visually the right regime names of the four clusters; then, +# insert the regime names in the correct order below and run this script a the second time with 'ordering = TRUE' and 'save.names = TRUE', to save the ordered cartography. +# after that, any time you need to run this script, run it with 'ordering = TRUE and 'save.names = FALSE', not to overwrite the file which stores the cluster order: +ordering <- T # If TRUE, order the clusters following the regimes listed in the 'orden' vector (set it to TRUE only after you manually inserted the names below). +save.names <- F # if TRUE, also save the cluster names (i.e: the association between clusters and weather regimes); if FALSE, the cluster names are loaded from a file + +as.pdf <- F # FALSE: save results as one .png file for each season/month, TRUE: save only 1 .pdf file with all seasons/months + +# if fields.name='forecast.name', set the subdir of 'workdir' where the weather regimes computed with a reanalysis are stored: +rean.dir <- "41_ERA-Interim_monthly_1981-2015_LOESS_filter" + +# Associates the regimes to the four cluster: +cluster1.name <- "NAO+" +cluster2.name <- "NAO-" +cluster4.name <- "Blocking" +cluster3.name <- "Atl.Ridge" + +# choose an order to give to the cartograpy, from top to bottom: +orden <- c("NAO+","NAO-","Blocking","Atl.Ridge") + +################################################################################################################################################################### + +if(fields.name != rean.name && fields.name != forecast.name) stop("variable 'fields.name' is not properly set") +regime1.name <- orden[1] +regime2.name <- orden[2] +regime3.name <- orden[3] +regime4.name <- orden[4] + +var.name <- c("sfcWind","tas") +var.name.full <- c("Wind Speed","Temperature") # full name of the 'predictand' variable to put in the title of the graphs +var.unit <- c("m/s", "ºC") # unit of measure of var (for drawing color scales) +var.num.orig <- var.num # loading _psl files, this value can change! +fields.name.orig <- fields.name +period.orig <- period; workdir.orig <- workdir +pdf.suffix <- ifelse(length(period)==1, my.period[period], "all_periods") + +for(lead.month in lead.months){ + +if(as.pdf && fields.name == rean.name)(pdf(file=paste0(workdir,"/",fields.name,"_",var.name[var.num],"_",pdf.suffix,".pdf"),width=40, height=60)) +if(as.pdf && fields.name == forecast.name)(pdf(file=paste0(workdir,"/",fields.name,"_",var.name[var.num],"_",pdf.suffix,"_leadmonth_",lead.month,".pdf"),width=40,height=60)) + + +for(p in period){ + #p <- 1 # for the debug + p.orig <- p + + # load regime data (for forecasts, it load only the data corresponding to the chosen start month p and lead month 'lead.month':: + if(fields.name == rean.name) { + load(file=paste0(workdir,"/",fields.name,"_",my.period[p],"_","psl",".RData")) + if(var.num != var.num.orig) var.num <- var.num.orig # ripristinate original values of this script (necessary after loading regime data) + if(fields.name != fields.name.orig) fields.name <- fields.name.orig # ripristinate original values of this script + if(workdir != workdir.orig) workdir <- workdir.orig # ripristinate original values of this script + + load(file=paste0(workdir,"/",fields.name,"_",my.period[p],"_",var.name[var.num],".RData")) + } + if(fields.name == forecast.name){ + load(file=paste0(workdir,"/",fields.name,"_",my.period[p],"_leadmonth_",lead.month,"_","psl",".RData")) # load cluster time series and mean psl data + #load(file=paste0(workdir,"/",fields.name,"_",my.period[p],"_leadmonth_",lead.month,"_",var.name[var.num],".RData")) # load mean var data + } + + if(var.num != var.num.orig) var.num <- var.num.orig # ripristinate original values of this script (necessary after loading regime data) + if(fields.name != fields.name.orig) fields.name <- fields.name.orig # ripristinate original values of this script + if(p != p.orig) p <- p.orig + + if(fields.name == forecast.name){ + + # compute cluster persistence: + my.cluster.vector <- as.vector(my.cluster.array[[p]]) # reduce it to a vector with the order: day1month1member1 , day2month1member1, ... , day1month2member1, day2month2member1, ,,, + + sequ1 <- sequ2 <- sequ3 <- sequ4 <- rep(0,10000) + i1 <- i2 <- i3 <- i4 <- 1 + + if(my.cluster.vector[1] != 1) i1 <- 0 + if(my.cluster.vector[1] == 1) sequ1[i1] <- sequ1[i1]+1 + if(my.cluster.vector[1] != 2) i2 <- 0 + if(my.cluster.vector[1] == 2) sequ2[i2] <- sequ2[i2]+1 + if(my.cluster.vector[1] != 3) i3 <- 0 + if(my.cluster.vector[1] == 3) sequ3[i3] <- sequ3[i3]+1 + if(my.cluster.vector[1] != 4) i4 <- 0 + if(my.cluster.vector[1] == 4) sequ4[i4] <- sequ4[i4]+1 + n.dd <- length(my.cluster.vector) + for(d in 1:(n.dd-1)){ + if(my.cluster.vector[d] != 1 && my.cluster.vector[d+1] == 1) i1 <- i1+1 + if(my.cluster.vector[d] == 1) sequ1[i1] <- sequ1[i1]+1 + + if(my.cluster.vector[d] != 2 && my.cluster.vector[d+1] == 2) i2 <- i2+1 + if(my.cluster.vector[d] == 2) sequ2[i2] <- sequ2[i2]+1 + + if(my.cluster.vector[d] != 3 && my.cluster.vector[d+1] == 3) i3 <- i3+1 + if(my.cluster.vector[d] == 3) sequ3[i3] <- sequ3[i3]+1 + + if(my.cluster.vector[d] != 4 && my.cluster.vector[d+1] == 4) i4 <- i4+1 + if(my.cluster.vector[d] == 4) sequ4[i4] <- sequ4[i4]+1 + } + + if(my.cluster.vector[n.dd] == 1) sequ1[i1] <- sequ1[i1]+1 + if(my.cluster.vector[n.dd] == 2) sequ2[i2] <- sequ2[i2]+1 + if(my.cluster.vector[n.dd] == 3) sequ3[i3] <- sequ3[i3]+1 + if(my.cluster.vector[n.dd] == 4) sequ4[i4] <- sequ4[i4]+1 + + pers1 <- mean(sequ1[which(sequ1 != 0)]) + pers2 <- mean(sequ2[which(sequ2 != 0)]) + pers3 <- mean(sequ3[which(sequ3 != 0)]) + pers4 <- mean(sequ4[which(sequ4 != 0)]) + + # compute correlations between simulated clusters and observed regime's anomalies: + cluster1.sim <- pslwr1mean; cluster2.sim <- pslwr2mean; cluster3.sim <- pslwr3mean; cluster4.sim <- pslwr4mean + lat.forecast <- lat; lon.forecast <- lon + + if((p + lead.month) > 12) { load.month <- p + lead.month - 12 } else { load.month <- p + lead.month } # reanalysis forecast month to load + load(file=paste0(workdir,"/",rean.dir,"/",rean.name,"_",my.period[load.month],"_","psl",".RData")) # load reanalysis data, which overwrities the pslwrXmean variables + load(paste0(workdir,"/",rean.dir,"/",rean.name,"_", my.period[load.month],"_","ClusterNames",".RData")) # load also reanalysis regime names + + wr1y.obs <- wr1y; wr2y.obs <- wr2y; wr3y.obs <- wr3y; wr4y.obs <- wr4y # observed frecuencies of the regimes + + regimes.obs <- c(cluster1.name, cluster2.name, cluster3.name, cluster4.name) + + if(length(unique(regimes.obs)) < 4) stop("Two or more names of the weather types in the reanalsyis input file are repeated!") + + if(identical(rev(lat),lat.forecast)) {lat.forecast <- rev(lat.forecast); print("Warning: forecast latitudes are in the opposite order than renalysis's")} + if(!identical(lat, lat.forecast)) stop("forecast latitudes are different from reanalysis latitudes!") + if(!identical(lon, lon.forecast)) stop("forecast longitude are different from reanalysis longitudes!") + + cluster.corr <- array(NA, c(4,4), dimnames=list(c("cluster1.sim","cluster2.sim","cluster3.sim","cluster4.sim"), regimes.obs)) + cluster.corr[1,1] <- cor(as.vector(cluster1.sim), as.vector(pslwr1mean)) + cluster.corr[1,2] <- cor(as.vector(cluster1.sim), as.vector(pslwr2mean)) + cluster.corr[1,3] <- cor(as.vector(cluster1.sim), as.vector(pslwr3mean)) + cluster.corr[1,4] <- cor(as.vector(cluster1.sim), as.vector(pslwr4mean)) + cluster.corr[2,1] <- cor(as.vector(cluster2.sim), as.vector(pslwr1mean)) + cluster.corr[2,2] <- cor(as.vector(cluster2.sim), as.vector(pslwr2mean)) + cluster.corr[2,3] <- cor(as.vector(cluster2.sim), as.vector(pslwr3mean)) + cluster.corr[2,4] <- cor(as.vector(cluster2.sim), as.vector(pslwr4mean)) + cluster.corr[3,1] <- cor(as.vector(cluster3.sim), as.vector(pslwr1mean)) + cluster.corr[3,2] <- cor(as.vector(cluster3.sim), as.vector(pslwr2mean)) + cluster.corr[3,3] <- cor(as.vector(cluster3.sim), as.vector(pslwr3mean)) + cluster.corr[3,4] <- cor(as.vector(cluster3.sim), as.vector(pslwr4mean)) + cluster.corr[4,1] <- cor(as.vector(cluster4.sim), as.vector(pslwr1mean)) + cluster.corr[4,2] <- cor(as.vector(cluster4.sim), as.vector(pslwr2mean)) + cluster.corr[4,3] <- cor(as.vector(cluster4.sim), as.vector(pslwr3mean)) + cluster.corr[4,4] <- cor(as.vector(cluster4.sim), as.vector(pslwr4mean)) + + # associate each simulated cluster to one observed regime: + max1 <- which(cluster.corr == max(cluster.corr), arr.ind=T) + cluster.corr2 <- cluster.corr + cluster.corr2[max1[1],] <- -999 # set this row to negative values so it won't be found as a maximum anymore + cluster.corr2[,max1[2]] <- -999 # set this column to negative values so it won't be found as a maximum anymore + max2 <- which(cluster.corr2 == max(cluster.corr2), arr.ind=T) + cluster.corr3 <- cluster.corr2 + cluster.corr3[max2[1],] <- -999 + cluster.corr3[,max2[2]] <- -999 + max3 <- which(cluster.corr3 == max(cluster.corr3), arr.ind=T) + cluster.corr4 <- cluster.corr3 + cluster.corr4[max3[1],] <- -999 + cluster.corr4[,max3[2]] <- -999 + max4 <- which(cluster.corr4 == max(cluster.corr4), arr.ind=T) + + seq.max1 <- c(max1[1],max2[1],max3[1],max4[1]) + seq.max2 <- c(max1[2],max2[2],max3[2],max4[2]) + + cluster1.regime <- seq.max2[which(seq.max1 == 1)] + cluster2.regime <- seq.max2[which(seq.max1 == 2)] + cluster3.regime <- seq.max2[which(seq.max1 == 3)] + cluster4.regime <- seq.max2[which(seq.max1 == 4)] + + cluster1.name <- regimes.obs[cluster1.regime] + cluster2.name <- regimes.obs[cluster2.regime] + cluster3.name <- regimes.obs[cluster3.regime] + cluster4.name <- regimes.obs[cluster4.regime] + + # insert here all the manual corrections to the regime assignation due to misasignment in the automatic selection: + #if(p= && lead.month= ) clusterX.name=... + + if(var.num != var.num.orig) var.num <- var.num.orig # ripristinate original values of this script + if(fields.name != fields.name.orig) fields.name <- fields.name.orig # ripristinate original values of this script + if(!identical(period.orig,period)) period <- period.orig + if(p.orig != p) p <- p.orig + + load(file=paste0(workdir,"/",fields.name,"_",my.period[p],"_leadmonth_",lead.month,"_","psl",".RData")) # reload forecast cluster time series and mean psl data + #load(file=paste0(workdir,"/",fields.name,"_",my.period[p],"_leadmonth_",lead.month,"_ClusterData.RData")) # reload forecast cluster data (for my.cluster.array) + + if(var.num != var.num.orig) var.num <- var.num.orig # ripristinate original values of this script + if(fields.name != fields.name.orig) fields.name <- fields.name.orig # ripristinate original values of this script + if(!identical(period.orig, period)) period <- period.orig + if(p.orig != p) p <- p.orig + if(identical(rev(lat),lat.forecast)) lat <- rev(lat) # ripristinate right lat order + + } # close for on 'forecast.name' + + if(fields.name == rean.name){ + if(save.names){ + save(cluster1.name, cluster2.name, cluster3.name, cluster4.name, file=paste0(workdir,"/",fields.name,"_", my.period[p],"_","ClusterNames",".RData")) + + } else { # in this case, we load the cluster names from the file already saved, if regimes come from a reanalysis: + load(paste0(workdir,"/",fields.name,"_", my.period[p],"_","ClusterNames",".RData")) + + if(var.num != var.num.orig) var.num <- var.num.orig # ripristinate original values of this script + if(fields.name != fields.name.orig) fields.name <- fields.name.orig # ripristinate original values of this script + } + } + + # assign to each cluster its regime: + if(ordering == FALSE && fields.name == rean.name){ + cluster1 <- 1; cluster2 <- 2; cluster3 <- 3; cluster4 <- 4 + } else { + cluster1 <- which(orden == cluster1.name) + cluster2 <- which(orden == cluster2.name) + cluster3 <- which(orden == cluster3.name) + cluster4 <- which(orden == cluster4.name) + } + + assign(paste0("map",cluster1), pslwr1mean) + assign(paste0("map",cluster2), pslwr2mean) + assign(paste0("map",cluster3), pslwr3mean) + assign(paste0("map",cluster4), pslwr4mean) + + assign(paste0("fre",cluster1), wr1y) + assign(paste0("fre",cluster2), wr2y) + assign(paste0("fre",cluster3), wr3y) + assign(paste0("fre",cluster4), wr4y) + + if(p == 100){ + assign(paste0("imp",cluster1), varwr1mean) + assign(paste0("imp",cluster2), varwr2mean) + assign(paste0("imp",cluster3), varwr3mean) + assign(paste0("imp",cluster4), varwr4mean) + + assign(paste0("sig",cluster1), pvalue1) + assign(paste0("sig",cluster2), pvalue2) + assign(paste0("sig",cluster3), pvalue3) + assign(paste0("sig",cluster4), pvalue4) + } + + if(fields.name == forecast.name){ + assign(paste0("freMax",cluster1), wr1yMax) + assign(paste0("freMax",cluster2), wr2yMax) + assign(paste0("freMax",cluster3), wr3yMax) + assign(paste0("freMax",cluster4), wr4yMax) + + assign(paste0("freMin",cluster1), wr1yMin) + assign(paste0("freMin",cluster2), wr2yMin) + assign(paste0("freMin",cluster3), wr3yMin) + assign(paste0("freMin",cluster4), wr4yMin) + + cluster1.Obs <- which(orden == regimes.obs[1]) + cluster2.Obs <- which(orden == regimes.obs[2]) + cluster3.Obs <- which(orden == regimes.obs[3]) + cluster4.Obs <- which(orden == regimes.obs[4]) + + assign(paste0("freObs",cluster1), wr1y.obs) + assign(paste0("freObs",cluster2), wr2y.obs) + assign(paste0("freObs",cluster3), wr3y.obs) + assign(paste0("freObs",cluster4), wr4y.obs) + + assign(paste0("persist",cluster1), pers1) + assign(paste0("persist",cluster2), pers2) + assign(paste0("persist",cluster3), pers3) + assign(paste0("persist",cluster4), pers4) + + #assign(paste0("persistObs",cluster1), persObs1) + #assign(paste0("persistObs",cluster2), persObs2) + #assign(paste0("persistObs",cluster3), persObs3) + #assign(paste0("persistObs",cluster4), persObs4) + + } + + + # position of long values of Europe only (without the Atlantic Sea and America): + #if(fields.name == "ERA-Interim") EU <- c(1:which(lon >= lon.max)[1], (length(lon)-30):length(lon)) # restrict area to continental europe only + EU <- c(1:which(lon >= lon.max)[1], (which(lon >= 337)[1]:length(lon))) # restrict area to continental europe only + + # breaks and colors of the geopotential fields: + if(psl == "g500"){ + #my.brks <- c(-300,-199:200,300) # % Mean anomaly of a WR + my.brks <- c(-300,seq(-200,200,10),300) # % Mean anomaly of a WR + my.brks2 <- c(-300,seq(-200,200,10),300) # % Mean anomaly of a WR for the contour lines + #my.cols <- colorRampPalette((brewer.pal(9,"PuBu")))(length(my.brks)-1) + values.to.plot <- c(-200, -100, 0, 100, 200) # values we want to show in the labels + } + + if(psl == "psl"){ + my.brks <- c(seq(-19,-1,2),0,seq(1,19,2)) # % Mean anomaly of a WR + # my.brks <- c(seq(-19,-1,2),0,seq(1,19,2)) # % Mean anomaly of a WR + + my.brks2 <- my.brks #c(seq(-20,20,2)) # % Mean anomaly of a WR for the contour lines + values.to.plot <- my.brks #c(-17,-15,-13,-11,-9,-7,-5,-3,-1,0,1,3,5,7,9,11,13,15,17) + } + + my.cols <- colorRampPalette(rev(brewer.pal(11,"RdBu")))(length(my.brks)-1) # blue--white--red colors + my.cols[floor(length(my.cols)/2)] <- my.cols[floor(length(my.cols)/2)+1] <- "white" + + # breaks and colors of the impact maps (valid both for Wind speed anomalies and Temperature anomalies): + #my.brks.var <- c(-20,seq(-10,-3,1),seq(-2.9,2.9,0.1),seq(3,10,1),20) # % Mean anomaly of a WR + #my.brks.var <- c(-20,-2,-1.5,-1,-0.5,-0.2,0.2,0.5,1,1.5,2,20) # % Mean anomaly of a WR + #my.brks.var <- c(-20,seq(-3,3,0.5),20) # % Mean anomaly of a WR + if(var.name[var.num] == "sfcWind") { + my.brks.var <- seq(-3,3,0.5) + values.to.plot2 <- c(-3,-2,-1,0,1,2,3) + } + if(var.name[var.num] == "tas") { + my.brks.var <- seq(-5,5,1) + values.to.plot2 <- my.brks.var #c(-5,-3,-1,0,1,3,5) + } + + #my.cols <- colorRampPalette(as.character(read.csv("/home/Earth/vtorralb/rgbhex.csv",header=F)[,1]))(length(my.brks)-1) # blue--yellow-red colors + my.cols.var <- colorRampPalette(rev(brewer.pal(11,"RdBu")))(length(my.brks.var)-1) # blue--white--red colors + #my.cols.var[floor(length(my.cols.var)/2)] <- my.cols.var[floor(length(my.cols.var)/2)+1] <- "white" + + # Visualize the composition with the 3 graphs for all the 4 regimes: its pressure fields, its impact on var and its frequency series: + if(!as.pdf && fields.name==rean.name) png(filename=paste0(workdir,"/",fields.name,"_",my.period[p],"_",var.name[var.num],".png"),width=3000,height=3700) + if(!as.pdf && fields.name==forecast.name) png(filename=paste0(workdir,"/",fields.name,"_",my.period[p],"_leadmonth_",lead.month,"_",var.name[var.num],".png"),width=3000,height=3700) + + plot.new() + + # Sheet title: + par(fig=c(0, 1, 0.94, 0.98), new=TRUE) + + if(fields.name == forecast.name) {forecast.title <- paste0(" startdate and ",lead.month," forecast month ")} else {forecast.title <- ""} + mtext(paste0(fields.name, " Weather Regimes for ", my.period[p], forecast.title," (",year.start,"-",year.end,")"), cex=5.5, font=2) + + # Centroid maps: + map.xpos <- 0 + map.width <- 0.46 + par(fig=c(map.xpos + 0.003, map.xpos + map.width - 0.003, 0.745, 0.925), new=TRUE) + PlotEquiMap2(rescale(map1,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map1), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, xlabel.dist=1.5, contours.lty="F1FF1F") + par(fig=c(map.xpos, map.xpos + map.width, 0.51, 0.69), new=TRUE) + PlotEquiMap2(rescale(map2,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map2), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F") + par(fig=c(map.xpos, map.xpos + map.width, 0.275, 0.455), new=TRUE) + PlotEquiMap2(rescale(map3,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map3), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F") + par(fig=c(map.xpos, map.xpos + map.width, 0.04, 0.22), new=TRUE) + PlotEquiMap2(rescale(map4,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map4), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F") + + # Subtitle centroid maps: + map.title.xpos <- 0.76 + map.title.width <- 0.2 + par(fig=c(map.title.xpos, map.title.xpos + map.title.width, 0.728, 0.729), new=TRUE) + mtext("year", cex=3) + par(fig=c(map.title.xpos, map.title.xpos + map.title.width, 0.494, 0.495), new=TRUE) + mtext("year", cex=3) + par(fig=c(map.title.xpos, map.title.xpos + map.title.width, 0.260, 0.261), new=TRUE) + mtext("year", cex=3) + par(fig=c(map.title.xpos, map.title.xpos + map.title.width, 0.026, 0.027), new=TRUE) + mtext("year", cex=3) + + # Title Centroid Maps: + title1.xpos <- 0.02 + title1.width <- 0.4 + par(fig=c(title1.xpos, title1.xpos + title1.width, 0.9305, 0.9355), new=TRUE) + mtext(paste0(regime1.name,": ", psl.name, " Anomaly"), font=2, cex=4) + par(fig=c(title1.xpos, title1.xpos + title1.width, 0.6955, 0.7005), new=TRUE) + mtext(paste0(regime2.name,": ", psl.name, " Anomaly"), font=2, cex=4) + par(fig=c(title1.xpos, title1.xpos + title1.width, 0.4605, 0.4655), new=TRUE) + mtext(paste0(regime3.name,": ", psl.name, " Anomaly"), font=2, cex=4) + par(fig=c(title1.xpos, title1.xpos + title1.width, 0.2255, 0.2305), new=TRUE) + mtext(paste0(regime4.name,": ", psl.name, " Anomaly"), font=2, cex=4) + + # Impact maps: + if(p == 100){ + impact.xpos <- 0.47 + impact.width <- 0.26 + par(fig=c(impact.xpos, impact.xpos + impact.width, 0.745, 0.925), new=TRUE) + PlotEquiMap2(rescale(imp1[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=t(sig1[,EU] < 0.05), cex.lab=1.5) + par(fig=c(impact.xpos, impact.xpos + impact.width, 0.51, 0.69), new=TRUE) + PlotEquiMap2(rescale(imp2[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=t(sig2[,EU] < 0.05), cex.lab=1.5) + par(fig=c(impact.xpos, impact.xpos + impact.width, 0.275, 0.455), new=TRUE) + PlotEquiMap2(rescale(imp3[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=t(sig3[,EU] < 0.05), cex.lab=1.5) + par(fig=c(impact.xpos, impact.xpos + impact.width, 0.04, 0.22), new=TRUE) + PlotEquiMap2(rescale(imp4[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=t(sig4[,EU] < 0.05), cex.lab=1.5) + + # Title impact maps: + title2.xpos <- 0.42 + title2.width <- 0.35 + par(fig=c(title2.xpos, title2.xpos + title2.width, 0.935, 0.940), new=TRUE) + mtext(paste0(regime1.name, ": Impact on ", var.name.full[var.num]), font=2, cex=4) + par(fig=c(title2.xpos, title2.xpos + title2.width, 0.700, 0.705), new=TRUE) + mtext(paste0(regime2.name, ": Impact on ", var.name.full[var.num]), font=2, cex=4) + par(fig=c(title2.xpos, title2.xpos + title2.width, 0.465, 0.470), new=TRUE) + mtext(paste0(regime3.name, ": Impact on ", var.name.full[var.num]), font=2, cex=4) + par(fig=c(title2.xpos, title2.xpos + title2.width, 0.230, 0.235), new=TRUE) + mtext(paste0(regime4.name, ": Impact on ", var.name.full[var.num]), font=2, cex=4) + } + + # Frequency plots: + barplot.xpos <- 0.75 + barplot.width <- 0.25 + freq.max=100 + + if(fields.name == forecast.name){ + pos.years.present <- match(year.start:year.end, years) + years.missing <- which(is.na(pos.years.present)) + fre1.NA <- fre2.NA <- fre3.NA <- fre4.NA <- freMax1.NA <- freMax2.NA <- freMax3.NA <- freMax4.NA <- freMin1.NA <- freMin2.NA <- freMin3.NA <- freMin4.NA <- c() + + k <- 0 + for(y in year.start:year.end) { + if(y %in% (years.missing + year.start - 1)) { + fre1.NA <- c(fre1.NA,NA); fre2.NA <- c(fre2.NA,NA); fre3.NA <- c(fre3.NA,NA); fre4.NA <- c(fre4.NA,NA) + freMax1.NA <- c(freMax1.NA,NA); freMax2.NA <- c(freMax2.NA,NA); freMax3.NA <- c(freMax3.NA,NA); freMax4.NA <- c(freMax4.NA,NA) + freMin1.NA <- c(freMin1.NA,NA); freMin2.NA <- c(freMin2.NA,NA); freMin3.NA <- c(freMin3.NA,NA); freMin4.NA <- c(freMin4.NA,NA) + k=k+1 + } else { + fre1.NA <- c(fre1.NA,fre1[y - year.start + 1 - k]); fre2.NA <- c(fre2.NA,fre2[y - year.start + 1 - k]) + fre3.NA <- c(fre3.NA,fre3[y - year.start + 1 - k]); fre4.NA <- c(fre4.NA,fre4[y - year.start + 1 - k]) + freMax1.NA <- c(freMax1.NA,freMax1[y - year.start + 1 - k]); freMax2.NA <- c(freMax2.NA,freMax2[y - year.start + 1 - k]) + freMax3.NA <- c(freMax3.NA,freMax3[y - year.start + 1 - k]); freMax4.NA <- c(freMax4.NA,freMax4[y - year.start + 1 - k]) + freMin1.NA <- c(freMin1.NA,freMin1[y - year.start + 1 - k]); freMin2.NA <- c(freMin2.NA,freMin2[y - year.start + 1 - k]) + freMin3.NA <- c(freMin3.NA,freMin3[y - year.start + 1 - k]); freMin4.NA <- c(freMin4.NA,freMin4[y - year.start + 1 - k]) + } + } + } else { fre1.NA <- fre1; fre2.NA <- fre2; fre3.NA <- fre3; fre4.NA <- fre4 } + + par(fig=c(barplot.xpos, barplot.xpos + barplot.width, 0.745, 0.915), new=TRUE) + if(fields.name == rean.name) barplot.freq(100*fre1.NA, year.start, year.end, cex.y=2, cex.x=2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0)) + if(fields.name == forecast.name) barplot.freq.sim2(100*fre1.NA, 100*freMax1.NA, 100*freMin1.NA, 100*freObs1, year.start, year.end, cex.y=2, cex.x=2, freq.min=-2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0), col.line="gray20", cex.r=3, cex.mean=2, cex.obs=2) + + par(fig=c(barplot.xpos, barplot.xpos + barplot.width,0.510,0.680), new=TRUE) + if(fields.name == rean.name) barplot.freq(100*fre2.NA, year.start, year.end, cex.y=2, cex.x=2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0)) + if(fields.name == forecast.name) barplot.freq.sim2(100*fre2.NA, 100*freMax2.NA, 100*freMin2.NA, 100*freObs2, year.start, year.end, cex.y=2, cex.x=2, freq.min=-2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0), col.line="gray20", cex.r=3, cex.mean=2, cex.obs=2) + + par(fig=c(barplot.xpos, barplot.xpos + barplot.width,0.275,0.445), new=TRUE) + if(fields.name == rean.name) barplot.freq(100*fre3.NA, year.start, year.end, cex.y=2, cex.x=2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0)) + if(fields.name == forecast.name) barplot.freq.sim2(100*fre3.NA, 100*freMax3.NA, 100*freMin3.NA, 100*freObs3, year.start, year.end, cex.y=2, cex.x=2, freq.min=-2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0), col.line="gray20", cex.r=3, cex.mean=2, cex.obs=2) + + par(fig=c(barplot.xpos, barplot.xpos + barplot.width,0.040,0.210), new=TRUE) + if(fields.name == rean.name) barplot.freq(100*fre4.NA, year.start, year.end, cex.y=2, cex.x=2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0)) + if(fields.name == forecast.name) barplot.freq.sim2(100*fre4.NA, 100*freMax4.NA, 100*freMin4.NA, 100*freObs4, year.start, year.end, cex.y=2, cex.x=2, freq.min=-2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0), col.line="gray20", cex.r=3, cex.mean=2, cex.obs=2) + + # Title Frequency maps: + title3.xpos <- 0.75 + title3.width <-0.25 + par(fig=c(title3.xpos, title3.xpos + title3.width, 0.935, 0.940), new=TRUE) + mtext(paste0(regime1.name, " Frequency"), font=2, cex=4) # paste0 doesn't work inside an expression! + par(fig=c(title3.xpos, title3.xpos + title3.width, 0.700, 0.705), new=TRUE) + mtext(paste0(regime2.name, " Frequency"), font=2, cex=4) + par(fig=c(title3.xpos, title3.xpos + title3.width, 0.465, 0.470), new=TRUE) + mtext(paste0(regime3.name, " Frequency"), font=2, cex=4) + par(fig=c(title3.xpos, title3.xpos + title3.width, 0.230, 0.235), new=TRUE) + mtext(paste0(regime4.name, " Frequency"), font=2, cex=4) + + # % on Frequency plots: + symbol.xpos <- 0.735 + symbol.width <- 0.01 + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.82, 0.83), new=TRUE) + mtext("%", cex=3.3) + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.59, 0.60), new=TRUE) + mtext("%", cex=3.3) + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.35, 0.36), new=TRUE) + mtext("%", cex=3.3) + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.12, 0.13), new=TRUE) + mtext("%", cex=3.3) + + # mean frequency on Frequency plots: + symbol.xpos <- 0.9 + symbol.width <- 0.1 + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.908, 0.918), new=TRUE) + mtext(bquote(bar(nu) == .(paste0(round(mean(100*fre1.NA),1),"%"))),cex=4.5) + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.673, 0.683), new=TRUE) + mtext(bquote(bar(nu) == .(paste0(round(mean(100*fre2.NA),1),"%"))),cex=4.5) + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.44, 0.45), new=TRUE) + mtext(bquote(bar(nu) == .(paste0(round(mean(100*fre3.NA),1),"%"))),cex=4.5) + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.203, 0.213), new=TRUE) + mtext(bquote(bar(nu) == .(paste0(round(mean(100*fre4.NA),1),"%"))),cex=4.5) + + # add corr between obs.time series and ensemble mean time series on Frequency plots: + if(fields.name == forecast.name){ + symbol.xpos <- 0.76 + symbol.width <- 0.1 + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.908, 0.918), new=TRUE) + sp.cor1 <- round(cor(fre1.NA,freObs1, use="complete.obs"),2) + mtext(paste0("r= ",sp.cor1), cex=4.5) + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.673, 0.683), new=TRUE) + sp.cor2 <- round(cor(fre2.NA,freObs2, use="complete.obs"),2) + mtext(paste0("r= ",sp.cor2), cex=4.5) + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.44, 0.45), new=TRUE) + sp.cor3 <- round(cor(fre3.NA,freObs3, use="complete.obs"),2) + mtext(paste0("r= ",sp.cor3), cex=4.5) + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.203, 0.213), new=TRUE) + sp.cor4 <- round(cor(fre4.NA,freObs4, use="complete.obs"),2) + mtext(paste0("r= ",sp.cor4), cex=4.5) + } + + # Legend 1: + legend1.xpos <- 0.01 + legend1.width <- 0.44 + legend1.cex <- 1.8 + my.subset <- match(values.to.plot, my.brks) + par(fig=c(legend1.xpos, legend1.xpos + legend1.width, 0.719, 0.744), new=TRUE) # syntax fig=c(xmin, xmax, ymin, ymax) + ColorBar3(my.brks, cols=my.cols, vert=FALSE, cex=legend1.cex, subset=my.subset) + if(psl=="g500") {mtext(side=4," m", cex=2.5)} else {mtext(side=4," hPa", cex=legend1.cex)} + par(fig=c(legend1.xpos, legend1.xpos + legend1.width, 0.485, 0.508), new=TRUE) + ColorBar3(my.brks, cols=my.cols, vert=FALSE, cex=legend1.cex, subset=my.subset) + if(psl=="g500") {mtext(side=4," m", cex=2.5)} else {mtext(side=4," hPa", cex=legend1.cex)} + par(fig=c(legend1.xpos, legend1.xpos + legend1.width, 0.250, 0.273), new=TRUE) + ColorBar3(my.brks, cols=my.cols, vert=FALSE, cex=legend1.cex, subset=my.subset) + if(psl=="g500") {mtext(side=4," m", cex=2.5)} else {mtext(side=4," hPa", cex=legend1.cex)} + par(fig=c(legend1.xpos, legend1.xpos + legend1.width, 0.015, 0.038), new=TRUE) + ColorBar3(my.brks, cols=my.cols, vert=FALSE, cex=legend1.cex, subset=my.subset) + if(psl=="g500") {mtext(side=4," m", cex=2.5)} else {mtext(side=4," hPa", cex=legend1.cex)} + + # Legend 2: + if(fields.name == rean.name){ + legend2.xpos <- 0.48 + legend2.width <- 0.25 + legend2.cex <- 1.8 + my.subset2 <- match(values.to.plot2, my.brks.var) + par(fig=c(legend2.xpos, legend2.xpos + legend2.width, 0.719 + 0.001, 0.744), new=TRUE) + ColorBar3(my.brks.var, cols=my.cols.var, vert=FALSE, cex=legend2.cex, subset=my.subset2) + mtext(side=4,paste0(" ",var.unit[var.num]), cex=legend2.cex) + par(fig=c(legend2.xpos, legend2.xpos + legend2.width, 0.485, 0.508), new=TRUE) + ColorBar3(my.brks.var, cols=my.cols.var, vert=FALSE, cex=legend2.cex, subset=my.subset2) + mtext(side=4,paste0(" ",var.unit[var.num]), cex=legend2.cex) + par(fig=c(legend2.xpos, legend2.xpos + legend2.width, 0.250, 0.273), new=TRUE) + ColorBar3(my.brks.var, cols=my.cols.var, vert=FALSE, cex=legend2.cex, subset=my.subset2) + mtext(side=4,paste0(" ",var.unit[var.num]), cex=legend2.cex) + par(fig=c(legend2.xpos, legend2.xpos + legend2.width, 0.015, 0.038), new=TRUE) + ColorBar3(my.brks.var, cols=my.cols.var, vert=FALSE, cex=legend2.cex, subset=my.subset2) + mtext(side=4,paste0(" ",var.unit[var.num]), cex=legend2.cex) + } + + if(!as.pdf) dev.off() # for saving 4 png + + #if(fields.name == forecast.name) save(orden, cluster1.name, cluster2.name, cluster3.name, cluster4.name, cluster.corr, fre1.NA, fre2.NA, fre3.NA, fre4.NA, freObs1, freObs2, freObs3, freObs4, sp.cor1, sp.cor2, sp.cor3, sp.cor4, file=paste0(workdir,"/",fields.name,"_", my.period[p],"_leadmonth_",lead.month,"_","ClusterNames",".RData")) + + # fre1, fre2, etc. already refer to the regimes listed in the 'orden' vector: + if(fields.name == forecast.name) save(orden, fre1.NA, fre2.NA, fre3.NA, fre4.NA, freObs1, freObs2, freObs3, freObs4, sp.cor1, sp.cor2, sp.cor3, sp.cor4, persist1, persist2, persist3, persist4, file=paste0(workdir,"/",fields.name,"_", my.period[p],"_leadmonth_",lead.month,"_","ClusterNames",".RData")) + +} # close 'p' on 'period' + +if(as.pdf) dev.off() # for saving a single pdf with all months/seasons + +} # close 'lead.month' on 'lead.months' + + + + + +# Summary graphs: +if(fields.name == forecast.name && p == 100){ + array.cor <- array.diff.freq <- array.rpss <- array.pers <- array(NA,c(12,7,4)) # array storing correlations in the format: [ startdate, leadmonth, regime] + + for(p in 1:12){ + p.orig <- p + + for(l in 0:6){ + + load(file=paste0(workdir,"/",fields.name,"_",my.period[p],"_leadmonth_",l,"_","ClusterNames",".RData")) # load cluster names and summarized data + + if(var.num != var.num.orig) var.num <- var.num.orig # ripristinate original values of this script (necessary after loading regime data) + if(fields.name != fields.name.orig) fields.name <- fields.name.orig # ripristinate original values of this script + if(p != p.orig) p <- p.orig + + #cluster1 <- which(orden == cluster1.name) + #cluster2 <- which(orden == cluster2.name) + #cluster3 <- which(orden == cluster3.name) + #cluster4 <- which(orden == cluster4.name) + + array.cor[p,1+l, 1] <- sp.cor1 # NAO+ + array.cor[p,1+l, 3] <- sp.cor2 # NAO- + array.cor[p,1+l, 4] <- sp.cor3 # Blocking + array.cor[p,1+l, 2] <- sp.cor4 # Atl.Ridge + + array.diff.freq[p,1+l, 1] <- 100*(mean(fre1.NA) - mean(freObs1)) # NAO+ + array.diff.freq[p,1+l, 3] <- 100*(mean(fre2.NA) - mean(freObs2)) # NAO- + array.diff.freq[p,1+l, 4] <- 100*(mean(fre3.NA) - mean(freObs3)) # Blocking + array.diff.freq[p,1+l, 2] <- 100*(mean(fre4.NA) - mean(freObs4)) # Atl.Ridge + + array.pers[p,1+l, 1] <- persist1 # NAO+ + array.pers[p,1+l, 3] <- persist2 # NAO- + array.pers[p,1+l, 4] <- persist3 # Blocking + array.pers[p,1+l, 2] <- persist4 # Atl.Ridge + + #array.rpss[p,1+l, 1] <- # NAO+ + #array.rpss[p,1+l, 3] <- # NAO- + #array.rpss[p,1+l, 4] <- # Blocking + #array.rpss[p,1+l, 2] <- # Atl.Ridge + } + } + + + + # Corr summary: + png(filename=paste0(workdir,"/",forecast.name,"_summary_corr.png"), width=550, height=400) + + # create an array similar to array.cor but with colors instead of corr.values: + corr.cols <- rev(c('#b2182b','#d6604d','#f4a582','#fddbc7','#d1e5f0','#92c5de','#4393c3','#2166ac')) + array.cor.colors <- array(corr.cols[8],c(12,7,4)) + array.cor.colors[array.cor < 0.75] <- corr.cols[7] + array.cor.colors[array.cor < 0.5] <- corr.cols[6] + array.cor.colors[array.cor < 0.25] <- corr.cols[5] + array.cor.colors[array.cor < 0] <- corr.cols[4] + array.cor.colors[array.cor < -0.25] <- corr.cols[3] + array.cor.colors[array.cor < -0.5] <- corr.cols[2] + array.cor.colors[array.cor < -0.75] <- corr.cols[1] + + par(mar=c(5,4,4,4.5)) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Forecast time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + title("Correlation between S4 and ERA-Interim frequencies", cex=1.5) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Forecast time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5) + + for(p in 1:12){ + for(l in 0:6){ + polygon(c(0.5+p-1,0.5+p-1,1+p-1),c(-0.5+l,0.5+l,0+l), col=array.cor.colors[p,1+l,1]) # first triangle + polygon(c(0.5+p-1,1+p-1,1.5+p-1),c(-0.5+l,0+l,-0.5+l), col=array.cor.colors[p,1+l,2]) + polygon(c(1+p-1,1.5+p-1,1.5+p-1),c(l,0.5+l,-0.5+l), col=array.cor.colors[p,1+l,3]) + polygon(c(0.5+p-1,1+p-1,1.5+p-1),c(0.5+l,0+l,0.5+l), col=array.cor.colors[p,1+l,4]) + } + } + + par(fig=c(0.875, 1, 0.14, 0.89), new=TRUE) + ColorBar2(brks = seq(-1,1,0.25), cols = corr.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(seq(-1,1,0.25)), my.labels=seq(-1,1,0.25)) + dev.off() + + + + # Delta freq. summary: + png(filename=paste0(workdir,"/",forecast.name,"_summary_diff_freq.png"), width=550, height=400) + + # create an array similar to array.diff.freq but with colors instead of frequencies: + freq.cols <- rev(c('#b2182b','#d6604d','#f4a582','#fddbc7','#d1e5f0','#92c5de','#4393c3','#2166ac')) + + array.freq.colors <- array(freq.cols[8],c(12,7,4)) + array.freq.colors[array.diff.freq < 10] <- freq.cols[7] + array.freq.colors[array.diff.freq < 5] <- freq.cols[6] + array.freq.colors[array.diff.freq < 2] <- freq.cols[5] + array.freq.colors[array.diff.freq < 0] <- freq.cols[4] + array.freq.colors[array.diff.freq < -2] <- freq.cols[3] + array.freq.colors[array.diff.freq < -5] <- freq.cols[2] + array.freq.colors[array.diff.freq < -10] <- freq.cols[1] + + par(mar=c(5,4,4,4.5)) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Forecast time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + title("Frequency difference (%) between S4 and ERA-Interim", cex=1.5) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Forecast time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5) + + for(p in 1:12){ + for(l in 0:6){ + polygon(c(0.5+p-1,0.5+p-1,1+p-1),c(-0.5+l,0.5+l,0+l), col=array.freq.colors[p,1+l,1]) # first triangle + polygon(c(0.5+p-1,1+p-1,1.5+p-1),c(-0.5+l,0+l,-0.5+l), col=array.freq.colors[p,1+l,2]) + polygon(c(1+p-1,1.5+p-1,1.5+p-1),c(l,0.5+l,-0.5+l), col=array.freq.colors[p,1+l,3]) + polygon(c(0.5+p-1,1+p-1,1.5+p-1),c(0.5+l,0+l,0.5+l), col=array.freq.colors[p,1+l,4]) + } + } + + par(fig=c(0.875, 1, 0.14, 0.89), new=TRUE) + ColorBar2(brks = c(-20,-10,-5,-2,0,2,5,10,20), cols = freq.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(c(-20,-10,-5,-2,0,2,5,10,20)), my.labels=c(-20,-10,-5,-2,0,2,5,10,20)) + dev.off() + + + + # Persistence summary: + png(filename=paste0(workdir,"/",forecast.name,"_summary_pers.png"), width=550, height=400) + + # create an array similar to array.pers but with colors instead of frequencies: + pers.cols <- rev(c('#b2182b','#d6604d','#f4a582','#fddbc7','#d1e5f0','#92c5de','#4393c3','#2166ac')) + + array.pers.colors <- array(pers.cols[8],c(12,7,4)) + array.pers.colors[array.pers < 5] <- pers.cols[7] + array.pers.colors[array.pers < 2] <- pers.cols[6] + array.pers.colors[array.pers < 1] <- pers.cols[5] + array.pers.colors[array.pers < 0] <- pers.cols[4] + array.pers.colors[array.pers < -1] <- pers.cols[3] + array.pers.colors[array.pers < -2] <- pers.cols[2] + array.pers.colors[array.pers < -5] <- pers.cols[1] + + par(mar=c(5,4,4,4.5)) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Forecast time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + title("S4 Regime persistence (in days/month)", cex=1.5) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Forecast time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5) + + for(p in 1:12){ + for(l in 0:6){ + polygon(c(0.5+p-1,0.5+p-1,1+p-1),c(-0.5+l,0.5+l,0+l), col=array.pers.colors[p,1+l,1]) # first triangle + polygon(c(0.5+p-1,1+p-1,1.5+p-1),c(-0.5+l,0+l,-0.5+l), col=array.pers.colors[p,1+l,2]) + polygon(c(1+p-1,1.5+p-1,1.5+p-1),c(l,0.5+l,-0.5+l), col=array.pers.colors[p,1+l,3]) + polygon(c(0.5+p-1,1+p-1,1.5+p-1),c(0.5+l,0+l,0.5+l), col=array.pers.colors[p,1+l,4]) + } + } + + par(fig=c(0.875, 1, 0.14, 0.89), new=TRUE) + ColorBar2(brks = c(-10,-5,-2,-1,0,1,2,5,10), cols = pers.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(c(-10,-5,-2,-1,0,1,2,5,10)), my.labels=c(-10,-5,-2,-1,0,1,2,5,10)) + dev.off() + + + ## # FairRPSS summary: + ## png(filename=paste0(workdir,"/",forecast.name,"_summary_rpss.png"), width=550, height=400) + + ## # create an array similar to array.diff.freq but with colors instead of frequencies: + ## freq.cols <- rev(c('#b2182b','#d6604d','#f4a582','#fddbc7','#d1e5f0','#92c5de','#4393c3','#2166ac')) + + ## array.freq.colors <- array(freq.cols[8],c(12,7,4)) + ## array.freq.colors[array.diff.freq < 10] <- freq.cols[7] + ## array.freq.colors[array.diff.freq < 5] <- freq.cols[6] + ## array.freq.colors[array.diff.freq < 2] <- freq.cols[5] + ## array.freq.colors[array.diff.freq < 0] <- freq.cols[4] + ## array.freq.colors[array.diff.freq < -2] <- freq.cols[3] + ## array.freq.colors[array.diff.freq < -5] <- freq.cols[2] + ## array.freq.colors[array.diff.freq < -10] <- freq.cols[1] + + ## par(mar=c(5,4,4,4.5)) + ## plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Forecast time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + ## title("Frequency difference (%) between S4 and ERA-Interim", cex=1.5) + ## mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + ## mtext(side = 2, text = "Forecast time", line = 2.5, cex=1.5) + ## axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + ## axis(2, at=seq(0,6), las=2, cex.axis=1.5) + + ## for(p in 1:12){ + ## for(l in 0:6){ + ## polygon(c(0.5+p-1,0.5+p-1,1+p-1),c(-0.5+l,0.5+l,0+l), col=array.freq.colors[p,1+l,1]) # first triangle + ## polygon(c(0.5+p-1,1+p-1,1.5+p-1),c(-0.5+l,0+l,-0.5+l), col=array.freq.colors[p,1+l,2]) + ## polygon(c(1+p-1,1.5+p-1,1.5+p-1),c(l,0.5+l,-0.5+l), col=array.freq.colors[p,1+l,3]) + ## polygon(c(0.5+p-1,1+p-1,1.5+p-1),c(0.5+l,0+l,0.5+l), col=array.freq.colors[p,1+l,4]) + ## } + ## } + + ## par(fig=c(0.875, 1, 0.14, 0.89), new=TRUE) + ## ColorBar2(brks = c(-20,-10,-5,-2,0,2,5,10,20), cols = freq.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(c(-20,-10,-5,-2,0,2,5,10,20)), my.labels=c(-20,-10,-5,-2,0,2,5,10,20)) + ## dev.off() + +} + + + + + + + +# impact map of the stronger WR: +if(fields.name == rean.name && p == 100){ + + var.num <- 1 # choose a variable (1:sfcWind, 2:tas) + + png(filename=paste0(workdir,"/",rean.name,"_",var.name[var.num],"_most_influent.png"), width=500, height=600) + + plot.new() + #par(mfrow=c(4,3)) + col.regimes <- c("indianred1","cyan","khaki","lightpink") + + for(p in 13:16){ # or 1:12 for monthly maps + load(file=paste0(workdir,"/", rean.dir,"/",rean.name,"_",var.name[var.num],"_",my.period[p],"_psl.RData")) + load(paste0(workdir,"/", rean.dir,"/",rean.name,"_ClusterNames.RData")) # they should not depend on var.name!!! + + # position of long values of Europe only (without the Atlantic Sea and America): + #if(fields.name == "ERA-Interim") EU <- c(1:which(lon >= lon.max)[1], (length(lon)-30):length(lon)) # restrict area to continental europe only + lon.max = 45 + EU <- c(1:which(lon >= lon.max)[1], (which(lon >= 337)[1]:length(lon))) # restrict area to continental europe only + + + cluster1 <- which(orden == cluster1.name.period[p]) # in future it will be == cluster1.name + cluster2 <- which(orden == cluster2.name.period[p]) + cluster3 <- which(orden == cluster3.name.period[p]) + cluster4 <- which(orden == cluster4.name.period[p]) + + assign(paste0("imp",cluster1), varPeriodAnom1mean) + assign(paste0("imp",cluster2), varPeriodAnom2mean) + assign(paste0("imp",cluster3), varPeriodAnom3mean) + assign(paste0("imp",cluster4), varPeriodAnom4mean) + + imp.all <- imp1 + for(i in 1:dim(imp1)[1]){ + for(j in 1:dim(imp1)[2]){ + if(abs(imp1[i,j]) >= max(abs(imp1[i,j]), abs(imp2[i,j]), abs(imp3[i,j]), abs(imp4[i,j]))) imp.all[i,j] <- 1.5 + if(abs(imp2[i,j]) >= max(abs(imp1[i,j]), abs(imp2[i,j]), abs(imp3[i,j]), abs(imp4[i,j]))) imp.all[i,j] <- 2.5 + if(abs(imp3[i,j]) >= max(abs(imp1[i,j]), abs(imp2[i,j]), abs(imp3[i,j]), abs(imp4[i,j]))) imp.all[i,j] <- 3.5 + if(abs(imp4[i,j]) >= max(abs(imp1[i,j]), abs(imp2[i,j]), abs(imp3[i,j]), abs(imp4[i,j]))) imp.all[i,j] <- 4.5 + } + } + + if(p<=3) yMod <- 0.7 + if(p>=4 && p<=6) yMod <- 0.5 + if(p>=7 && p<=9) yMod <- 0.3 + if(p>=10) yMod <- 0.1 + if(p==13 || p==14) yMod <- 0.5 + if(p==15 || p==16) yMod <- 0.1 + + if(p==1 || p==4 || p==7 || p==10) xMod <- 0.05 + if(p==2 || p==5 || p==8 || p==11) xMod <- 0.35 + if(p==3 || p==6 || p==9 || p==12) xMod <- 0.65 + if(p==13 || p==15) xMod <- 0.05 + if(p==14 || p==16) xMod <- 0.50 + + # impact map: + if(p <= 12) par(fig=c(xMod, xMod + 0.3, yMod, yMod + 0.17), new=TRUE) + if(p > 12) par(fig=c(xMod, xMod + 0.45, yMod, yMod + 0.38), new=TRUE) + PlotEquiMap(imp.all[,EU], lon[EU], lat, filled.continents = FALSE, brks=1:5, cols=col.regimes, axelab=F, drawleg=F) + + # title map: + if(p <= 12) par(fig=c(xMod, xMod + 0.3, yMod + 0.162, yMod + 0.172), new=TRUE) + if(p > 12) par(fig=c(xMod + 0.07, xMod + 0.07 + 0.3, yMod +0.21 + 0.162, yMod +0.21 + 0.172), new=TRUE) + mtext(my.period[p], font=2, cex=.9) + + } # close 'p' on 'period' + + par(fig=c(0.1,0.9, 0, 0.12), new=TRUE) + ColorBar2(1:5, cols=col.regimes, vert=FALSE, my.ticks=1:4, draw.ticks=FALSE, my.labels=orden) + + par(fig=c(0.1,0.9, 0.9, 0.95), new=TRUE) + mtext(paste0("Most influent WR on ",var.name[var.num]), font=2, cex=1.5) + + dev.off() +} + + + + + + + + + + + + + + + + + + diff --git a/old/weather_regimes_maps_v9.R~ b/old/weather_regimes_maps_v9.R~ new file mode 100644 index 0000000000000000000000000000000000000000..a43f4be837f01ad2109acb49a186fcd0cf099759 --- /dev/null +++ b/old/weather_regimes_maps_v9.R~ @@ -0,0 +1,864 @@ + +# Creation: 6/2016 +# Author: Nicola Cortesi +# +# Aim: to visualize the regime anomalies of the weather regimes, their interannual frequencies and their impact on a chosen cliamte variable (i.e: wind speed or temperature) +# +# I/O: the only input file needed is the output of the weather_regimes.R script, which ends with the suffix "_mapdata.RData". +# Output files are the maps, with the user can generate as .png or as .pdf +# +# Assumptions: If your regimes derive from a reanalysis, this script must be run twice: once, with the option 'ordering = F' and 'save.names = T' to create a .pdf or .png +# file that the user must open to find visually the order by which the four regimes are stored inside the four clusters. For example, he can find that the +# sequence of the images in the file (from top to down) corresponds to: Blocking / NAO- / Atl.Ridge / NAO+ regime. Subsequently, he has to change 'ordering' +# from F to T and set the four variables 'clusterX.name' (X=1..4) to follow the same order found inside that file. +# The second time, you have to run this script only to get the maps in the right order, which by default is, from top to down: +# "NAO+","NAO-","Blocking" and "Atl.Ridge" (you can change it with the variable 'orden'). You also have to set 'save.names' to TRUE, so an .RData file +# is written to store the order of the four regimes, in case in future a user needs to visualize these maps again. In this case, the user must set +# 'ordering = TRUE' and 'save.names = FALSE' to be able to visualize the maps in the correct order. +# +# If your regimes derive from a forecast system, you have to compute before the regimes derived from a reanalysis. Then, you can associate the reanalysis +# to the forecast system, to employ it to automatically associate to each simulated cluster one of the +# observed regimes, the one with the highest spatial correlation. Beware that the two maps of regime anomalies must have the same spatial resolution! +# +# Branch: weather_regimes + +library(s2dverification) # for the function Load() +library(Kendall) # for the MannKendall test + +source('/home/Earth/ncortesi/Downloads/scripts/Rfunctions.R') # for the calendar functions +workdir <- "/scratch/Earth/ncortesi/RESILIENCE/Regimes" #/41_ERA-Interim_monthly_1981-2015_LOESS_filter" # working dir with the input files with '_psl.RData' suffix: + +rean.name <- "ERA-Interim" # reanalysis name (if input data comes from a reanalysis) +forecast.name <- "ECMWF-S4" # forecast system name (if input data comes from a forecast system) + +fields.name <- forecast.name # Choose between loading reanalysis ('rean.name') or forecasts ('forecast.name') + +var.num <- 1:12 # if fields.name ='rean.name', Choose a variable for the impact maps 1: sfcWind 2: tas + +period <- 1:12 # Choose one or more periods to plot from 1 to 17 (1-12: Jan - Dec, 13-16: Winter, Spring, Summer, Autumn, 17: Year). + # You need to have created before the '_mapdata.RData' file with the output of 'weather_regimes'.R for that period. +lead.months <- 0:6 # in case you selected 'fields.name = forecast.name', specify a leadtime (0 = same month of the start month) to plot + # (and variable 'period' becomes the start month) + +# run it twice: once, with 'ordering <- FALSE' and 'save.names <- TRUE' to look only at the cartography and find visually the right regime names of the four clusters; then, +# insert the regime names in the correct order below and run this script a the second time with 'ordering = TRUE' and 'save.names = TRUE', to save the ordered cartography. +# after that, any time you need to run this script, run it with 'ordering = TRUE and 'save.names = FALSE', not to overwrite the file which stores the cluster order: +ordering <- T # If TRUE, order the clusters following the regimes listed in the 'orden' vector (set it to TRUE only after you manually inserted the names below). +save.names <- F # if TRUE, also save the cluster names (i.e: the association between clusters and weather regimes); if FALSE, the cluster names are loaded from a file + +as.pdf <- F # FALSE: save results as one .png file for each season/month, TRUE: save only 1 .pdf file with all seasons/months + +# if fields.name='forecast.name', set the subdir of 'workdir' where the weather regimes computed with a reanalysis are stored: +rean.dir <- "41_ERA-Interim_monthly_1981-2015_LOESS_filter" + +# Associates the regimes to the four cluster: +cluster1.name <- "NAO+" +cluster2.name <- "NAO-" +cluster4.name <- "Blocking" +cluster3.name <- "Atl.Ridge" + +# choose an order to give to the cartograpy, from top to bottom: +orden <- c("NAO+","NAO-","Blocking","Atl.Ridge") + +################################################################################################################################################################### + +if(fields.name != rean.name && fields.name != forecast.name) stop("variable 'fields.name' is not properly set") +regime1.name <- orden[1] +regime2.name <- orden[2] +regime3.name <- orden[3] +regime4.name <- orden[4] + +var.name <- c("sfcWind","tas") +var.name.full <- c("Wind Speed","Temperature") # full name of the 'predictand' variable to put in the title of the graphs +var.unit <- c("m/s", "ºC") # unit of measure of var (for drawing color scales) +var.num.orig <- var.num # loading _psl files, this value can change! +fields.name.orig <- fields.name +period.orig <- period; workdir.orig <- workdir +pdf.suffix <- ifelse(length(period)==1, my.period[period], "all_periods") + +for(lead.month in lead.months){ + +if(as.pdf && fields.name == rean.name)(pdf(file=paste0(workdir,"/",fields.name,"_",var.name[var.num],"_",pdf.suffix,".pdf"),width=40, height=60)) +if(as.pdf && fields.name == forecast.name)(pdf(file=paste0(workdir,"/",fields.name,"_",var.name[var.num],"_",pdf.suffix,"_leadmonth_",lead.month,".pdf"),width=40,height=60)) + + +for(p in period){ + #p <- 1 # for the debug + p.orig <- p + + # load regime data (for forecasts, it load only the data corresponding to the chosen start month p and lead month 'lead.month':: + if(fields.name == rean.name) { + load(file=paste0(workdir,"/",fields.name,"_",my.period[p],"_","psl",".RData")) + if(var.num != var.num.orig) var.num <- var.num.orig # ripristinate original values of this script (necessary after loading regime data) + if(fields.name != fields.name.orig) fields.name <- fields.name.orig # ripristinate original values of this script + if(workdir != workdir.orig) workdir <- workdir.orig # ripristinate original values of this script + + load(file=paste0(workdir,"/",fields.name,"_",my.period[p],"_",var.name[var.num],".RData")) + } + if(fields.name == forecast.name){ + load(file=paste0(workdir,"/",fields.name,"_",my.period[p],"_leadmonth_",lead.month,"_","psl",".RData")) # load cluster time series and mean psl data + #load(file=paste0(workdir,"/",fields.name,"_",my.period[p],"_leadmonth_",lead.month,"_",var.name[var.num],".RData")) # load mean var data + } + + if(var.num != var.num.orig) var.num <- var.num.orig # ripristinate original values of this script (necessary after loading regime data) + if(fields.name != fields.name.orig) fields.name <- fields.name.orig # ripristinate original values of this script + if(p != p.orig) p <- p.orig + + if(fields.name == forecast.name){ + + # compute cluster persistence: + my.cluster.vector <- as.vector(my.cluster.array[[1]]) # reduce it to a vector with the order: day1month1member1 , day2month1member1, ... , day1month2member1, day2month2member1, ,,, + + sequ1 <- sequ2 <- sequ3 <- sequ4 <- rep(0,10000) + i1 <- i2 <- i3 <- i4 <- 1 + + if(my.cluster.vector[1] != 1) i1 <- 0 + if(my.cluster.vector[1] == 1) sequ1[i1] <- sequ1[i1]+1 + if(my.cluster.vector[1] != 2) i2 <- 0 + if(my.cluster.vector[1] == 2) sequ2[i2] <- sequ2[i2]+1 + if(my.cluster.vector[1] != 3) i3 <- 0 + if(my.cluster.vector[1] == 3) sequ3[i3] <- sequ3[i3]+1 + if(my.cluster.vector[1] != 4) i4 <- 0 + if(my.cluster.vector[1] == 4) sequ4[i4] <- sequ4[i4]+1 + n.dd <- length(my.cluster.vector) + for(d in 1:(n.dd-1)){ + if(my.cluster.vector[d] != 1 && my.cluster.vector[d+1] == 1) i1 <- i1+1 + if(my.cluster.vector[d] == 1) sequ1[i1] <- sequ1[i1]+1 + + if(my.cluster.vector[d] != 2 && my.cluster.vector[d+1] == 2) i2 <- i2+1 + if(my.cluster.vector[d] == 2) sequ2[i2] <- sequ2[i2]+1 + + if(my.cluster.vector[d] != 3 && my.cluster.vector[d+1] == 3) i3 <- i3+1 + if(my.cluster.vector[d] == 3) sequ3[i3] <- sequ3[i3]+1 + + if(my.cluster.vector[d] != 4 && my.cluster.vector[d+1] == 4) i4 <- i4+1 + if(my.cluster.vector[d] == 4) sequ4[i4] <- sequ4[i4]+1 + } + + if(my.cluster.vector[n.dd] == 1) sequ1[i1] <- sequ1[i1]+1 + if(my.cluster.vector[n.dd] == 2) sequ2[i2] <- sequ2[i2]+1 + if(my.cluster.vector[n.dd] == 3) sequ3[i3] <- sequ3[i3]+1 + if(my.cluster.vector[n.dd] == 4) sequ4[i4] <- sequ4[i4]+1 + + pers1 <- mean(sequ1[which(sequ1 != 0)]) + pers2 <- mean(sequ2[which(sequ2 != 0)]) + pers3 <- mean(sequ3[which(sequ3 != 0)]) + pers4 <- mean(sequ4[which(sequ4 != 0)]) + + # compute correlations between simulated clusters and observed regime's anomalies: + cluster1.sim <- pslwr1mean; cluster2.sim <- pslwr2mean; cluster3.sim <- pslwr3mean; cluster4.sim <- pslwr4mean + lat.forecast <- lat; lon.forecast <- lon + + if((p + lead.month) > 12) { load.month <- p + lead.month - 12 } else { load.month <- p + lead.month } # reanalysis forecast month to load + load(file=paste0(workdir,"/",rean.dir,"/",rean.name,"_",my.period[load.month],"_","psl",".RData")) # load reanalysis data, which overwrities the pslwrXmean variables + load(paste0(workdir,"/",rean.dir,"/",rean.name,"_", my.period[load.month],"_","ClusterNames",".RData")) # load also reanalysis regime names + + wr1y.obs <- wr1y; wr2y.obs <- wr2y; wr3y.obs <- wr3y; wr4y.obs <- wr4y # observed frecuencies of the regimes + + regimes.obs <- c(cluster1.name, cluster2.name, cluster3.name, cluster4.name) + + if(length(unique(regimes.obs)) < 4) stop("Two or more names of the weather types in the reanalsyis input file are repeated!") + + if(identical(rev(lat),lat.forecast)) {lat.forecast <- rev(lat.forecast); print("Warning: forecast latitudes are in the opposite order than renalysis's")} + if(!identical(lat, lat.forecast)) stop("forecast latitudes are different from reanalysis latitudes!") + if(!identical(lon, lon.forecast)) stop("forecast longitude are different from reanalysis longitudes!") + + cluster.corr <- array(NA, c(4,4), dimnames=list(c("cluster1.sim","cluster2.sim","cluster3.sim","cluster4.sim"), regimes.obs)) + cluster.corr[1,1] <- cor(as.vector(cluster1.sim), as.vector(pslwr1mean)) + cluster.corr[1,2] <- cor(as.vector(cluster1.sim), as.vector(pslwr2mean)) + cluster.corr[1,3] <- cor(as.vector(cluster1.sim), as.vector(pslwr3mean)) + cluster.corr[1,4] <- cor(as.vector(cluster1.sim), as.vector(pslwr4mean)) + cluster.corr[2,1] <- cor(as.vector(cluster2.sim), as.vector(pslwr1mean)) + cluster.corr[2,2] <- cor(as.vector(cluster2.sim), as.vector(pslwr2mean)) + cluster.corr[2,3] <- cor(as.vector(cluster2.sim), as.vector(pslwr3mean)) + cluster.corr[2,4] <- cor(as.vector(cluster2.sim), as.vector(pslwr4mean)) + cluster.corr[3,1] <- cor(as.vector(cluster3.sim), as.vector(pslwr1mean)) + cluster.corr[3,2] <- cor(as.vector(cluster3.sim), as.vector(pslwr2mean)) + cluster.corr[3,3] <- cor(as.vector(cluster3.sim), as.vector(pslwr3mean)) + cluster.corr[3,4] <- cor(as.vector(cluster3.sim), as.vector(pslwr4mean)) + cluster.corr[4,1] <- cor(as.vector(cluster4.sim), as.vector(pslwr1mean)) + cluster.corr[4,2] <- cor(as.vector(cluster4.sim), as.vector(pslwr2mean)) + cluster.corr[4,3] <- cor(as.vector(cluster4.sim), as.vector(pslwr3mean)) + cluster.corr[4,4] <- cor(as.vector(cluster4.sim), as.vector(pslwr4mean)) + + # associate each simulated cluster to one observed regime: + max1 <- which(cluster.corr == max(cluster.corr), arr.ind=T) + cluster.corr2 <- cluster.corr + cluster.corr2[max1[1],] <- -999 # set this row to negative values so it won't be found as a maximum anymore + cluster.corr2[,max1[2]] <- -999 # set this column to negative values so it won't be found as a maximum anymore + max2 <- which(cluster.corr2 == max(cluster.corr2), arr.ind=T) + cluster.corr3 <- cluster.corr2 + cluster.corr3[max2[1],] <- -999 + cluster.corr3[,max2[2]] <- -999 + max3 <- which(cluster.corr3 == max(cluster.corr3), arr.ind=T) + cluster.corr4 <- cluster.corr3 + cluster.corr4[max3[1],] <- -999 + cluster.corr4[,max3[2]] <- -999 + max4 <- which(cluster.corr4 == max(cluster.corr4), arr.ind=T) + + seq.max1 <- c(max1[1],max2[1],max3[1],max4[1]) + seq.max2 <- c(max1[2],max2[2],max3[2],max4[2]) + + cluster1.regime <- seq.max2[which(seq.max1 == 1)] + cluster2.regime <- seq.max2[which(seq.max1 == 2)] + cluster3.regime <- seq.max2[which(seq.max1 == 3)] + cluster4.regime <- seq.max2[which(seq.max1 == 4)] + + cluster1.name <- regimes.obs[cluster1.regime] + cluster2.name <- regimes.obs[cluster2.regime] + cluster3.name <- regimes.obs[cluster3.regime] + cluster4.name <- regimes.obs[cluster4.regime] + + # insert here all the manual corrections to the regime assignation due to misasignment in the automatic selection: + #if(p= && lead.month= ) clusterX.name=... + + if(var.num != var.num.orig) var.num <- var.num.orig # ripristinate original values of this script + if(fields.name != fields.name.orig) fields.name <- fields.name.orig # ripristinate original values of this script + if(!identical(period.orig,period)) period <- period.orig + if(p.orig != p) p <- p.orig + + load(file=paste0(workdir,"/",fields.name,"_",my.period[p],"_leadmonth_",lead.month,"_","psl",".RData")) # reload forecast cluster time series and mean psl data + #load(file=paste0(workdir,"/",fields.name,"_",my.period[p],"_leadmonth_",lead.month,"_ClusterData.RData")) # reload forecast cluster data (for my.cluster.array) + + if(var.num != var.num.orig) var.num <- var.num.orig # ripristinate original values of this script + if(fields.name != fields.name.orig) fields.name <- fields.name.orig # ripristinate original values of this script + if(!identical(period.orig, period)) period <- period.orig + if(p.orig != p) p <- p.orig + if(identical(rev(lat),lat.forecast)) lat <- rev(lat) # ripristinate right lat order + + } # close for on 'forecast.name' + + if(fields.name == rean.name){ + if(save.names){ + save(cluster1.name, cluster2.name, cluster3.name, cluster4.name, file=paste0(workdir,"/",fields.name,"_", my.period[p],"_","ClusterNames",".RData")) + + } else { # in this case, we load the cluster names from the file already saved, if regimes come from a reanalysis: + load(paste0(workdir,"/",fields.name,"_", my.period[p],"_","ClusterNames",".RData")) + + if(var.num != var.num.orig) var.num <- var.num.orig # ripristinate original values of this script + if(fields.name != fields.name.orig) fields.name <- fields.name.orig # ripristinate original values of this script + } + } + + # assign to each cluster its regime: + if(ordering == FALSE && fields.name == rean.name){ + cluster1 <- 1; cluster2 <- 2; cluster3 <- 3; cluster4 <- 4 + } else { + cluster1 <- which(orden == cluster1.name) + cluster2 <- which(orden == cluster2.name) + cluster3 <- which(orden == cluster3.name) + cluster4 <- which(orden == cluster4.name) + } + + assign(paste0("map",cluster1), pslwr1mean) + assign(paste0("map",cluster2), pslwr2mean) + assign(paste0("map",cluster3), pslwr3mean) + assign(paste0("map",cluster4), pslwr4mean) + + assign(paste0("fre",cluster1), wr1y) + assign(paste0("fre",cluster2), wr2y) + assign(paste0("fre",cluster3), wr3y) + assign(paste0("fre",cluster4), wr4y) + + if(period==100){ + assign(paste0("imp",cluster1), varwr1mean) + assign(paste0("imp",cluster2), varwr2mean) + assign(paste0("imp",cluster3), varwr3mean) + assign(paste0("imp",cluster4), varwr4mean) + + assign(paste0("sig",cluster1), pvalue1) + assign(paste0("sig",cluster2), pvalue2) + assign(paste0("sig",cluster3), pvalue3) + assign(paste0("sig",cluster4), pvalue4) + } + + if(fields.name == forecast.name){ + assign(paste0("freMax",cluster1), wr1yMax) + assign(paste0("freMax",cluster2), wr2yMax) + assign(paste0("freMax",cluster3), wr3yMax) + assign(paste0("freMax",cluster4), wr4yMax) + + assign(paste0("freMin",cluster1), wr1yMin) + assign(paste0("freMin",cluster2), wr2yMin) + assign(paste0("freMin",cluster3), wr3yMin) + assign(paste0("freMin",cluster4), wr4yMin) + + cluster1.Obs <- which(orden == regimes.obs[1]) + cluster2.Obs <- which(orden == regimes.obs[2]) + cluster3.Obs <- which(orden == regimes.obs[3]) + cluster4.Obs <- which(orden == regimes.obs[4]) + + assign(paste0("freObs",cluster1), wr1y.obs) + assign(paste0("freObs",cluster2), wr2y.obs) + assign(paste0("freObs",cluster3), wr3y.obs) + assign(paste0("freObs",cluster4), wr4y.obs) + + assign(paste0("persist",cluster1), pers1) + assign(paste0("persist",cluster2), pers2) + assign(paste0("persist",cluster3), pers3) + assign(paste0("persist",cluster4), pers4) + + #assign(paste0("persistObs",cluster1), persObs1) + #assign(paste0("persistObs",cluster2), persObs2) + #assign(paste0("persistObs",cluster3), persObs3) + #assign(paste0("persistObs",cluster4), persObs4) + + } + + + # position of long values of Europe only (without the Atlantic Sea and America): + #if(fields.name == "ERA-Interim") EU <- c(1:which(lon >= lon.max)[1], (length(lon)-30):length(lon)) # restrict area to continental europe only + EU <- c(1:which(lon >= lon.max)[1], (which(lon >= 337)[1]:length(lon))) # restrict area to continental europe only + + # breaks and colors of the geopotential fields: + if(psl == "g500"){ + #my.brks <- c(-300,-199:200,300) # % Mean anomaly of a WR + my.brks <- c(-300,seq(-200,200,10),300) # % Mean anomaly of a WR + my.brks2 <- c(-300,seq(-200,200,10),300) # % Mean anomaly of a WR for the contour lines + #my.cols <- colorRampPalette((brewer.pal(9,"PuBu")))(length(my.brks)-1) + values.to.plot <- c(-200, -100, 0, 100, 200) # values we want to show in the labels + } + + if(psl == "psl"){ + my.brks <- c(seq(-19,-1,2),0,seq(1,19,2)) # % Mean anomaly of a WR + # my.brks <- c(seq(-19,-1,2),0,seq(1,19,2)) # % Mean anomaly of a WR + + my.brks2 <- my.brks #c(seq(-20,20,2)) # % Mean anomaly of a WR for the contour lines + values.to.plot <- my.brks #c(-17,-15,-13,-11,-9,-7,-5,-3,-1,0,1,3,5,7,9,11,13,15,17) + } + + my.cols <- colorRampPalette(rev(brewer.pal(11,"RdBu")))(length(my.brks)-1) # blue--white--red colors + my.cols[floor(length(my.cols)/2)] <- my.cols[floor(length(my.cols)/2)+1] <- "white" + + # breaks and colors of the impact maps (valid both for Wind speed anomalies and Temperature anomalies): + #my.brks.var <- c(-20,seq(-10,-3,1),seq(-2.9,2.9,0.1),seq(3,10,1),20) # % Mean anomaly of a WR + #my.brks.var <- c(-20,-2,-1.5,-1,-0.5,-0.2,0.2,0.5,1,1.5,2,20) # % Mean anomaly of a WR + #my.brks.var <- c(-20,seq(-3,3,0.5),20) # % Mean anomaly of a WR + if(var.name[var.num] == "sfcWind") { + my.brks.var <- seq(-3,3,0.5) + values.to.plot2 <- c(-3,-2,-1,0,1,2,3) + } + if(var.name[var.num] == "tas") { + my.brks.var <- seq(-5,5,1) + values.to.plot2 <- my.brks.var #c(-5,-3,-1,0,1,3,5) + } + + #my.cols <- colorRampPalette(as.character(read.csv("/home/Earth/vtorralb/rgbhex.csv",header=F)[,1]))(length(my.brks)-1) # blue--yellow-red colors + my.cols.var <- colorRampPalette(rev(brewer.pal(11,"RdBu")))(length(my.brks.var)-1) # blue--white--red colors + #my.cols.var[floor(length(my.cols.var)/2)] <- my.cols.var[floor(length(my.cols.var)/2)+1] <- "white" + + # Visualize the composition with the 3 graphs for all the 4 regimes: its pressure fields, its impact on var and its frequency series: + if(!as.pdf && fields.name==rean.name) png(filename=paste0(workdir,"/",fields.name,"_",my.period[p],"_",var.name[var.num],".png"),width=3000,height=3700) + if(!as.pdf && fields.name==forecast.name) png(filename=paste0(workdir,"/",fields.name,"_",my.period[p],"_leadmonth_",lead.month,"_",var.name[var.num],".png"),width=3000,height=3700) + + plot.new() + + # Sheet title: + par(fig=c(0, 1, 0.94, 0.98), new=TRUE) + + if(fields.name == forecast.name) {forecast.title <- paste0(" startdate and ",lead.month," forecast month ")} else {forecast.title <- ""} + mtext(paste0(fields.name, " Weather Regimes for ", my.period[p], forecast.title," (",year.start,"-",year.end,")"), cex=5.5, font=2) + + # Centroid maps: + map.xpos <- 0 + map.width <- 0.46 + par(fig=c(map.xpos + 0.003, map.xpos + map.width - 0.003, 0.745, 0.925), new=TRUE) + PlotEquiMap2(rescale(map1,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map1), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, xlabel.dist=1.5, contours.lty="F1FF1F") + par(fig=c(map.xpos, map.xpos + map.width, 0.51, 0.69), new=TRUE) + PlotEquiMap2(rescale(map2,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map2), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F") + par(fig=c(map.xpos, map.xpos + map.width, 0.275, 0.455), new=TRUE) + PlotEquiMap2(rescale(map3,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map3), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F") + par(fig=c(map.xpos, map.xpos + map.width, 0.04, 0.22), new=TRUE) + PlotEquiMap2(rescale(map4,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map4), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F") + + # Subtitle centroid maps: + map.title.xpos <- 0.76 + map.title.width <- 0.2 + par(fig=c(map.title.xpos, map.title.xpos + map.title.width, 0.728, 0.729), new=TRUE) + mtext("year", cex=3) + par(fig=c(map.title.xpos, map.title.xpos + map.title.width, 0.494, 0.495), new=TRUE) + mtext("year", cex=3) + par(fig=c(map.title.xpos, map.title.xpos + map.title.width, 0.260, 0.261), new=TRUE) + mtext("year", cex=3) + par(fig=c(map.title.xpos, map.title.xpos + map.title.width, 0.026, 0.027), new=TRUE) + mtext("year", cex=3) + + # Title Centroid Maps: + title1.xpos <- 0.02 + title1.width <- 0.4 + par(fig=c(title1.xpos, title1.xpos + title1.width, 0.9305, 0.9355), new=TRUE) + mtext(paste0(regime1.name,": ", psl.name, " Anomaly"), font=2, cex=4) + par(fig=c(title1.xpos, title1.xpos + title1.width, 0.6955, 0.7005), new=TRUE) + mtext(paste0(regime2.name,": ", psl.name, " Anomaly"), font=2, cex=4) + par(fig=c(title1.xpos, title1.xpos + title1.width, 0.4605, 0.4655), new=TRUE) + mtext(paste0(regime3.name,": ", psl.name, " Anomaly"), font=2, cex=4) + par(fig=c(title1.xpos, title1.xpos + title1.width, 0.2255, 0.2305), new=TRUE) + mtext(paste0(regime4.name,": ", psl.name, " Anomaly"), font=2, cex=4) + + # Impact maps: + if(period==100){ + impact.xpos <- 0.47 + impact.width <- 0.26 + par(fig=c(impact.xpos, impact.xpos + impact.width, 0.745, 0.925), new=TRUE) + PlotEquiMap2(rescale(imp1[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=t(sig1[,EU] < 0.05), cex.lab=1.5) + par(fig=c(impact.xpos, impact.xpos + impact.width, 0.51, 0.69), new=TRUE) + PlotEquiMap2(rescale(imp2[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=t(sig2[,EU] < 0.05), cex.lab=1.5) + par(fig=c(impact.xpos, impact.xpos + impact.width, 0.275, 0.455), new=TRUE) + PlotEquiMap2(rescale(imp3[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=t(sig3[,EU] < 0.05), cex.lab=1.5) + par(fig=c(impact.xpos, impact.xpos + impact.width, 0.04, 0.22), new=TRUE) + PlotEquiMap2(rescale(imp4[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=t(sig4[,EU] < 0.05), cex.lab=1.5) + + # Title impact maps: + title2.xpos <- 0.42 + title2.width <- 0.35 + par(fig=c(title2.xpos, title2.xpos + title2.width, 0.935, 0.940), new=TRUE) + mtext(paste0(regime1.name, ": Impact on ", var.name.full[var.num]), font=2, cex=4) + par(fig=c(title2.xpos, title2.xpos + title2.width, 0.700, 0.705), new=TRUE) + mtext(paste0(regime2.name, ": Impact on ", var.name.full[var.num]), font=2, cex=4) + par(fig=c(title2.xpos, title2.xpos + title2.width, 0.465, 0.470), new=TRUE) + mtext(paste0(regime3.name, ": Impact on ", var.name.full[var.num]), font=2, cex=4) + par(fig=c(title2.xpos, title2.xpos + title2.width, 0.230, 0.235), new=TRUE) + mtext(paste0(regime4.name, ": Impact on ", var.name.full[var.num]), font=2, cex=4) + } + + # Frequency plots: + barplot.xpos <- 0.75 + barplot.width <- 0.25 + freq.max=100 + + if(fields.name == forecast.name){ + pos.years.present <- match(year.start:year.end, years) + years.missing <- which(is.na(pos.years.present)) + fre1.NA <- fre2.NA <- fre3.NA <- fre4.NA <- freMax1.NA <- freMax2.NA <- freMax3.NA <- freMax4.NA <- freMin1.NA <- freMin2.NA <- freMin3.NA <- freMin4.NA <- c() + + k <- 0 + for(y in year.start:year.end) { + if(y %in% (years.missing + year.start - 1)) { + fre1.NA <- c(fre1.NA,NA); fre2.NA <- c(fre2.NA,NA); fre3.NA <- c(fre3.NA,NA); fre4.NA <- c(fre4.NA,NA) + freMax1.NA <- c(freMax1.NA,NA); freMax2.NA <- c(freMax2.NA,NA); freMax3.NA <- c(freMax3.NA,NA); freMax4.NA <- c(freMax4.NA,NA) + freMin1.NA <- c(freMin1.NA,NA); freMin2.NA <- c(freMin2.NA,NA); freMin3.NA <- c(freMin3.NA,NA); freMin4.NA <- c(freMin4.NA,NA) + k=k+1 + } else { + fre1.NA <- c(fre1.NA,fre1[y - year.start + 1 - k]); fre2.NA <- c(fre2.NA,fre2[y - year.start + 1 - k]) + fre3.NA <- c(fre3.NA,fre3[y - year.start + 1 - k]); fre4.NA <- c(fre4.NA,fre4[y - year.start + 1 - k]) + freMax1.NA <- c(freMax1.NA,freMax1[y - year.start + 1 - k]); freMax2.NA <- c(freMax2.NA,freMax2[y - year.start + 1 - k]) + freMax3.NA <- c(freMax3.NA,freMax3[y - year.start + 1 - k]); freMax4.NA <- c(freMax4.NA,freMax4[y - year.start + 1 - k]) + freMin1.NA <- c(freMin1.NA,freMin1[y - year.start + 1 - k]); freMin2.NA <- c(freMin2.NA,freMin2[y - year.start + 1 - k]) + freMin3.NA <- c(freMin3.NA,freMin3[y - year.start + 1 - k]); freMin4.NA <- c(freMin4.NA,freMin4[y - year.start + 1 - k]) + } + } + } else { fre1.NA <- fre1; fre2.NA <- fre2; fre3.NA <- fre3; fre4.NA <- fre4 } + + par(fig=c(barplot.xpos, barplot.xpos + barplot.width, 0.745, 0.915), new=TRUE) + if(fields.name == rean.name) barplot.freq(100*fre1.NA, year.start, year.end, cex.y=2, cex.x=2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0)) + if(fields.name == forecast.name) barplot.freq.sim2(100*fre1.NA, 100*freMax1.NA, 100*freMin1.NA, 100*freObs1, year.start, year.end, cex.y=2, cex.x=2, freq.min=-2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0), col.line="gray20", cex.r=3, cex.mean=2, cex.obs=2) + + par(fig=c(barplot.xpos, barplot.xpos + barplot.width,0.510,0.680), new=TRUE) + if(fields.name == rean.name) barplot.freq(100*fre2.NA, year.start, year.end, cex.y=2, cex.x=2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0)) + if(fields.name == forecast.name) barplot.freq.sim2(100*fre2.NA, 100*freMax2.NA, 100*freMin2.NA, 100*freObs2, year.start, year.end, cex.y=2, cex.x=2, freq.min=-2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0), col.line="gray20", cex.r=3, cex.mean=2, cex.obs=2) + + par(fig=c(barplot.xpos, barplot.xpos + barplot.width,0.275,0.445), new=TRUE) + if(fields.name == rean.name) barplot.freq(100*fre3.NA, year.start, year.end, cex.y=2, cex.x=2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0)) + if(fields.name == forecast.name) barplot.freq.sim2(100*fre3.NA, 100*freMax3.NA, 100*freMin3.NA, 100*freObs3, year.start, year.end, cex.y=2, cex.x=2, freq.min=-2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0), col.line="gray20", cex.r=3, cex.mean=2, cex.obs=2) + + par(fig=c(barplot.xpos, barplot.xpos + barplot.width,0.040,0.210), new=TRUE) + if(fields.name == rean.name) barplot.freq(100*fre4.NA, year.start, year.end, cex.y=2, cex.x=2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0)) + if(fields.name == forecast.name) barplot.freq.sim2(100*fre4.NA, 100*freMax4.NA, 100*freMin4.NA, 100*freObs4, year.start, year.end, cex.y=2, cex.x=2, freq.min=-2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0), col.line="gray20", cex.r=3, cex.mean=2, cex.obs=2) + + # Title Frequency maps: + title3.xpos <- 0.75 + title3.width <-0.25 + par(fig=c(title3.xpos, title3.xpos + title3.width, 0.935, 0.940), new=TRUE) + mtext(paste0(regime1.name, " Frequency"), font=2, cex=4) # paste0 doesn't work inside an expression! + par(fig=c(title3.xpos, title3.xpos + title3.width, 0.700, 0.705), new=TRUE) + mtext(paste0(regime2.name, " Frequency"), font=2, cex=4) + par(fig=c(title3.xpos, title3.xpos + title3.width, 0.465, 0.470), new=TRUE) + mtext(paste0(regime3.name, " Frequency"), font=2, cex=4) + par(fig=c(title3.xpos, title3.xpos + title3.width, 0.230, 0.235), new=TRUE) + mtext(paste0(regime4.name, " Frequency"), font=2, cex=4) + + # % on Frequency plots: + symbol.xpos <- 0.735 + symbol.width <- 0.01 + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.82, 0.83), new=TRUE) + mtext("%", cex=3.3) + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.59, 0.60), new=TRUE) + mtext("%", cex=3.3) + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.35, 0.36), new=TRUE) + mtext("%", cex=3.3) + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.12, 0.13), new=TRUE) + mtext("%", cex=3.3) + + # mean frequency on Frequency plots: + symbol.xpos <- 0.9 + symbol.width <- 0.1 + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.908, 0.918), new=TRUE) + mtext(bquote(bar(nu) == .(paste0(round(mean(100*fre1.NA),1),"%"))),cex=4.5) + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.673, 0.683), new=TRUE) + mtext(bquote(bar(nu) == .(paste0(round(mean(100*fre2.NA),1),"%"))),cex=4.5) + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.44, 0.45), new=TRUE) + mtext(bquote(bar(nu) == .(paste0(round(mean(100*fre3.NA),1),"%"))),cex=4.5) + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.203, 0.213), new=TRUE) + mtext(bquote(bar(nu) == .(paste0(round(mean(100*fre4.NA),1),"%"))),cex=4.5) + + # add corr between obs.time series and ensemble mean time series on Frequency plots: + if(fields.name == forecast.name){ + symbol.xpos <- 0.76 + symbol.width <- 0.1 + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.908, 0.918), new=TRUE) + sp.cor1 <- round(cor(fre1.NA,freObs1, use="complete.obs"),2) + mtext(paste0("r= ",sp.cor1), cex=4.5) + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.673, 0.683), new=TRUE) + sp.cor2 <- round(cor(fre2.NA,freObs2, use="complete.obs"),2) + mtext(paste0("r= ",sp.cor2), cex=4.5) + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.44, 0.45), new=TRUE) + sp.cor3 <- round(cor(fre3.NA,freObs3, use="complete.obs"),2) + mtext(paste0("r= ",sp.cor3), cex=4.5) + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.203, 0.213), new=TRUE) + sp.cor4 <- round(cor(fre4.NA,freObs4, use="complete.obs"),2) + mtext(paste0("r= ",sp.cor4), cex=4.5) + } + + # Legend 1: + legend1.xpos <- 0.01 + legend1.width <- 0.44 + legend1.cex <- 1.8 + my.subset <- match(values.to.plot, my.brks) + par(fig=c(legend1.xpos, legend1.xpos + legend1.width, 0.719, 0.744), new=TRUE) # syntax fig=c(xmin, xmax, ymin, ymax) + ColorBar3(my.brks, cols=my.cols, vert=FALSE, cex=legend1.cex, subset=my.subset) + if(psl=="g500") {mtext(side=4," m", cex=2.5)} else {mtext(side=4," hPa", cex=legend1.cex)} + par(fig=c(legend1.xpos, legend1.xpos + legend1.width, 0.485, 0.508), new=TRUE) + ColorBar3(my.brks, cols=my.cols, vert=FALSE, cex=legend1.cex, subset=my.subset) + if(psl=="g500") {mtext(side=4," m", cex=2.5)} else {mtext(side=4," hPa", cex=legend1.cex)} + par(fig=c(legend1.xpos, legend1.xpos + legend1.width, 0.250, 0.273), new=TRUE) + ColorBar3(my.brks, cols=my.cols, vert=FALSE, cex=legend1.cex, subset=my.subset) + if(psl=="g500") {mtext(side=4," m", cex=2.5)} else {mtext(side=4," hPa", cex=legend1.cex)} + par(fig=c(legend1.xpos, legend1.xpos + legend1.width, 0.015, 0.038), new=TRUE) + ColorBar3(my.brks, cols=my.cols, vert=FALSE, cex=legend1.cex, subset=my.subset) + if(psl=="g500") {mtext(side=4," m", cex=2.5)} else {mtext(side=4," hPa", cex=legend1.cex)} + + # Legend 2: + if(fields.name == rean.name){ + legend2.xpos <- 0.48 + legend2.width <- 0.25 + legend2.cex <- 1.8 + my.subset2 <- match(values.to.plot2, my.brks.var) + par(fig=c(legend2.xpos, legend2.xpos + legend2.width, 0.719 + 0.001, 0.744), new=TRUE) + ColorBar3(my.brks.var, cols=my.cols.var, vert=FALSE, cex=legend2.cex, subset=my.subset2) + mtext(side=4,paste0(" ",var.unit[var.num]), cex=legend2.cex) + par(fig=c(legend2.xpos, legend2.xpos + legend2.width, 0.485, 0.508), new=TRUE) + ColorBar3(my.brks.var, cols=my.cols.var, vert=FALSE, cex=legend2.cex, subset=my.subset2) + mtext(side=4,paste0(" ",var.unit[var.num]), cex=legend2.cex) + par(fig=c(legend2.xpos, legend2.xpos + legend2.width, 0.250, 0.273), new=TRUE) + ColorBar3(my.brks.var, cols=my.cols.var, vert=FALSE, cex=legend2.cex, subset=my.subset2) + mtext(side=4,paste0(" ",var.unit[var.num]), cex=legend2.cex) + par(fig=c(legend2.xpos, legend2.xpos + legend2.width, 0.015, 0.038), new=TRUE) + ColorBar3(my.brks.var, cols=my.cols.var, vert=FALSE, cex=legend2.cex, subset=my.subset2) + mtext(side=4,paste0(" ",var.unit[var.num]), cex=legend2.cex) + } + + if(!as.pdf) dev.off() # for saving 4 png + + #if(fields.name == forecast.name) save(orden, cluster1.name, cluster2.name, cluster3.name, cluster4.name, cluster.corr, fre1.NA, fre2.NA, fre3.NA, fre4.NA, freObs1, freObs2, freObs3, freObs4, sp.cor1, sp.cor2, sp.cor3, sp.cor4, file=paste0(workdir,"/",fields.name,"_", my.period[p],"_leadmonth_",lead.month,"_","ClusterNames",".RData")) + + # fre1, fre2, etc. already refer to the regimes listed in the 'orden' vector: + if(fields.name == forecast.name) save(orden, fre1.NA, fre2.NA, fre3.NA, fre4.NA, freObs1, freObs2, freObs3, freObs4, sp.cor1, sp.cor2, sp.cor3, sp.cor4, pers1, pers2, pers3, pers4, file=paste0(workdir,"/",fields.name,"_", my.period[p],"_leadmonth_",lead.month,"_","ClusterNames",".RData")) + +} # close 'p' on 'period' + +if(as.pdf) dev.off() # for saving a single pdf with all months/seasons + +} # close 'lead.month' on 'lead.months' + + + + + +# Summary graphs: +if(fields.name == forecast.name && period == 100){ + array.cor <- array.diff.freq <- array.rpss <- array.pers <- array(NA,c(12,7,4)) # array storing correlations in the format: [ startdate, leadmonth, regime] + + for(p in 1:12){ + p.orig <- p + + for(l in 0:6){ + + load(file=paste0(workdir,"/",fields.name,"_",my.period[p],"_leadmonth_",l,"_","ClusterNames",".RData")) # load cluster names and summarized data + + if(var.num != var.num.orig) var.num <- var.num.orig # ripristinate original values of this script (necessary after loading regime data) + if(fields.name != fields.name.orig) fields.name <- fields.name.orig # ripristinate original values of this script + if(p != p.orig) p <- p.orig + + #cluster1 <- which(orden == cluster1.name) + #cluster2 <- which(orden == cluster2.name) + #cluster3 <- which(orden == cluster3.name) + #cluster4 <- which(orden == cluster4.name) + + array.cor[p,1+l, 1] <- sp.cor1 # NAO+ + array.cor[p,1+l, 3] <- sp.cor2 # NAO- + array.cor[p,1+l, 4] <- sp.cor3 # Blocking + array.cor[p,1+l, 2] <- sp.cor4 # Atl.Ridge + + array.diff.freq[p,1+l, 1] <- 100*(mean(fre1.NA) - mean(freObs1)) # NAO+ + array.diff.freq[p,1+l, 3] <- 100*(mean(fre2.NA) - mean(freObs2)) # NAO- + array.diff.freq[p,1+l, 4] <- 100*(mean(fre3.NA) - mean(freObs3)) # Blocking + array.diff.freq[p,1+l, 2] <- 100*(mean(fre4.NA) - mean(freObs4)) # Atl.Ridge + + array.rpss[p,1+l, 1] <- # NAO+ + array.rpss[p,1+l, 3] <- # NAO- + array.rpss[p,1+l, 4] <- # Blocking + array.rpss[p,1+l, 2] <- # Atl.Ridge + } + } + + + + # Corr summary: + png(filename=paste0(workdir,"/",forecast.name,"_summary_corr.png"), width=550, height=400) + + # create an array similar to array.cor but with colors instead of corr.values: + corr.cols <- rev(c('#b2182b','#d6604d','#f4a582','#fddbc7','#d1e5f0','#92c5de','#4393c3','#2166ac')) + array.cor.colors <- array(corr.cols[8],c(12,7,4)) + array.cor.colors[array.cor < 0.75] <- corr.cols[7] + array.cor.colors[array.cor < 0.5] <- corr.cols[6] + array.cor.colors[array.cor < 0.25] <- corr.cols[5] + array.cor.colors[array.cor < 0] <- corr.cols[4] + array.cor.colors[array.cor < -0.25] <- corr.cols[3] + array.cor.colors[array.cor < -0.5] <- corr.cols[2] + array.cor.colors[array.cor < -0.75] <- corr.cols[1] + + par(mar=c(5,4,4,4.5)) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Forecast time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + title("Correlation between S4 and ERA-Interim frequencies", cex=1.5) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Forecast time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5) + + for(p in 1:12){ + for(l in 0:6){ + polygon(c(0.5+p-1,0.5+p-1,1+p-1),c(-0.5+l,0.5+l,0+l), col=array.cor.colors[p,1+l,1]) # first triangle + polygon(c(0.5+p-1,1+p-1,1.5+p-1),c(-0.5+l,0+l,-0.5+l), col=array.cor.colors[p,1+l,2]) + polygon(c(1+p-1,1.5+p-1,1.5+p-1),c(l,0.5+l,-0.5+l), col=array.cor.colors[p,1+l,3]) + polygon(c(0.5+p-1,1+p-1,1.5+p-1),c(0.5+l,0+l,0.5+l), col=array.cor.colors[p,1+l,4]) + } + } + + par(fig=c(0.875, 1, 0.14, 0.89), new=TRUE) + ColorBar2(brks = seq(-1,1,0.25), cols = corr.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(seq(-1,1,0.25)), my.labels=seq(-1,1,0.25)) + dev.off() + + + + # Delta freq. summary: + png(filename=paste0(workdir,"/",forecast.name,"_summary_diff_freq.png"), width=550, height=400) + + # create an array similar to array.diff.freq but with colors instead of frequencies: + freq.cols <- rev(c('#b2182b','#d6604d','#f4a582','#fddbc7','#d1e5f0','#92c5de','#4393c3','#2166ac')) + + array.freq.colors <- array(freq.cols[8],c(12,7,4)) + array.freq.colors[array.diff.freq < 10] <- freq.cols[7] + array.freq.colors[array.diff.freq < 5] <- freq.cols[6] + array.freq.colors[array.diff.freq < 2] <- freq.cols[5] + array.freq.colors[array.diff.freq < 0] <- freq.cols[4] + array.freq.colors[array.diff.freq < -2] <- freq.cols[3] + array.freq.colors[array.diff.freq < -5] <- freq.cols[2] + array.freq.colors[array.diff.freq < -10] <- freq.cols[1] + + par(mar=c(5,4,4,4.5)) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Forecast time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + title("Frequency difference (%) between S4 and ERA-Interim", cex=1.5) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Forecast time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5) + + for(p in 1:12){ + for(l in 0:6){ + polygon(c(0.5+p-1,0.5+p-1,1+p-1),c(-0.5+l,0.5+l,0+l), col=array.freq.colors[p,1+l,1]) # first triangle + polygon(c(0.5+p-1,1+p-1,1.5+p-1),c(-0.5+l,0+l,-0.5+l), col=array.freq.colors[p,1+l,2]) + polygon(c(1+p-1,1.5+p-1,1.5+p-1),c(l,0.5+l,-0.5+l), col=array.freq.colors[p,1+l,3]) + polygon(c(0.5+p-1,1+p-1,1.5+p-1),c(0.5+l,0+l,0.5+l), col=array.freq.colors[p,1+l,4]) + } + } + + par(fig=c(0.875, 1, 0.14, 0.89), new=TRUE) + ColorBar2(brks = c(-20,-10,-5,-2,0,2,5,10,20), cols = freq.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(c(-20,-10,-5,-2,0,2,5,10,20)), my.labels=c(-20,-10,-5,-2,0,2,5,10,20)) + dev.off() + + + + # Persistence summary: + png(filename=paste0(workdir,"/",forecast.name,"_summary_pers.png"), width=550, height=400) + + # create an array similar to array.pers but with colors instead of frequencies: + pers.cols <- rev(c('#b2182b','#d6604d','#f4a582','#fddbc7','#d1e5f0','#92c5de','#4393c3','#2166ac')) + + array.pers.colors <- array(pers.cols[8],c(12,7,4)) + array.pers.colors[array.pers < 10] <- pers.cols[7] + array.pers.colors[array.pers < 5] <- pers.cols[6] + array.pers.colors[array.pers < 2] <- pers.cols[5] + array.pers.colors[array.pers < 0] <- pers.cols[4] + array.pers.colors[array.pers < -2] <- pers.cols[3] + array.pers.colors[array.pers < -5] <- pers.cols[2] + array.pers.colors[array.pers < -10] <- pers.cols[1] + + par(mar=c(5,4,4,4.5)) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Forecast time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + title("Persistence difference (in days/month) between S4 and ERA-Interim", cex=1.5) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Forecast time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5) + + for(p in 1:12){ + for(l in 0:6){ + polygon(c(0.5+p-1,0.5+p-1,1+p-1),c(-0.5+l,0.5+l,0+l), col=array.pers.colors[p,1+l,1]) # first triangle + polygon(c(0.5+p-1,1+p-1,1.5+p-1),c(-0.5+l,0+l,-0.5+l), col=array.pers.colors[p,1+l,2]) + polygon(c(1+p-1,1.5+p-1,1.5+p-1),c(l,0.5+l,-0.5+l), col=array.pers.colors[p,1+l,3]) + polygon(c(0.5+p-1,1+p-1,1.5+p-1),c(0.5+l,0+l,0.5+l), col=array.pers.colors[p,1+l,4]) + } + } + + par(fig=c(0.875, 1, 0.14, 0.89), new=TRUE) + ColorBar2(brks = c(-20,-10,-5,-2,0,2,5,10,20), cols = pers.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(c(-20,-10,-5,-2,0,2,5,10,20)), my.labels=c(-20,-10,-5,-2,0,2,5,10,20)) + dev.off() + + + + # FairRPSS summary: + png(filename=paste0(workdir,"/",forecast.name,"_summary_rpss.png"), width=550, height=400) + + # create an array similar to array.diff.freq but with colors instead of frequencies: + freq.cols <- rev(c('#b2182b','#d6604d','#f4a582','#fddbc7','#d1e5f0','#92c5de','#4393c3','#2166ac')) + + array.freq.colors <- array(freq.cols[8],c(12,7,4)) + array.freq.colors[array.diff.freq < 10] <- freq.cols[7] + array.freq.colors[array.diff.freq < 5] <- freq.cols[6] + array.freq.colors[array.diff.freq < 2] <- freq.cols[5] + array.freq.colors[array.diff.freq < 0] <- freq.cols[4] + array.freq.colors[array.diff.freq < -2] <- freq.cols[3] + array.freq.colors[array.diff.freq < -5] <- freq.cols[2] + array.freq.colors[array.diff.freq < -10] <- freq.cols[1] + + par(mar=c(5,4,4,4.5)) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Forecast time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + title("Frequency difference (%) between S4 and ERA-Interim", cex=1.5) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Forecast time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5) + + for(p in 1:12){ + for(l in 0:6){ + polygon(c(0.5+p-1,0.5+p-1,1+p-1),c(-0.5+l,0.5+l,0+l), col=array.freq.colors[p,1+l,1]) # first triangle + polygon(c(0.5+p-1,1+p-1,1.5+p-1),c(-0.5+l,0+l,-0.5+l), col=array.freq.colors[p,1+l,2]) + polygon(c(1+p-1,1.5+p-1,1.5+p-1),c(l,0.5+l,-0.5+l), col=array.freq.colors[p,1+l,3]) + polygon(c(0.5+p-1,1+p-1,1.5+p-1),c(0.5+l,0+l,0.5+l), col=array.freq.colors[p,1+l,4]) + } + } + + par(fig=c(0.875, 1, 0.14, 0.89), new=TRUE) + ColorBar2(brks = c(-20,-10,-5,-2,0,2,5,10,20), cols = freq.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(c(-20,-10,-5,-2,0,2,5,10,20)), my.labels=c(-20,-10,-5,-2,0,2,5,10,20)) + dev.off() + +} + + + + + + + +# impact map of the stronger WR: +if(fields.name == rean.name && period == 100){ + + var.num <- 1 # choose a variable (1:sfcWind, 2:tas) + + png(filename=paste0(workdir,"/",rean.name,"_",var.name[var.num],"_most_influent.png"), width=500, height=600) + + plot.new() + #par(mfrow=c(4,3)) + col.regimes <- c("indianred1","cyan","khaki","lightpink") + + for(p in 13:16){ # or 1:12 for monthly maps + load(file=paste0(workdir,"/", rean.dir,"/",rean.name,"_",var.name[var.num],"_",my.period[p],"_psl.RData")) + load(paste0(workdir,"/", rean.dir,"/",rean.name,"_ClusterNames.RData")) # they should not depend on var.name!!! + + # position of long values of Europe only (without the Atlantic Sea and America): + #if(fields.name == "ERA-Interim") EU <- c(1:which(lon >= lon.max)[1], (length(lon)-30):length(lon)) # restrict area to continental europe only + lon.max = 45 + EU <- c(1:which(lon >= lon.max)[1], (which(lon >= 337)[1]:length(lon))) # restrict area to continental europe only + + + cluster1 <- which(orden == cluster1.name.period[p]) # in future it will be == cluster1.name + cluster2 <- which(orden == cluster2.name.period[p]) + cluster3 <- which(orden == cluster3.name.period[p]) + cluster4 <- which(orden == cluster4.name.period[p]) + + assign(paste0("imp",cluster1), varPeriodAnom1mean) + assign(paste0("imp",cluster2), varPeriodAnom2mean) + assign(paste0("imp",cluster3), varPeriodAnom3mean) + assign(paste0("imp",cluster4), varPeriodAnom4mean) + + imp.all <- imp1 + for(i in 1:dim(imp1)[1]){ + for(j in 1:dim(imp1)[2]){ + if(abs(imp1[i,j]) >= max(abs(imp1[i,j]), abs(imp2[i,j]), abs(imp3[i,j]), abs(imp4[i,j]))) imp.all[i,j] <- 1.5 + if(abs(imp2[i,j]) >= max(abs(imp1[i,j]), abs(imp2[i,j]), abs(imp3[i,j]), abs(imp4[i,j]))) imp.all[i,j] <- 2.5 + if(abs(imp3[i,j]) >= max(abs(imp1[i,j]), abs(imp2[i,j]), abs(imp3[i,j]), abs(imp4[i,j]))) imp.all[i,j] <- 3.5 + if(abs(imp4[i,j]) >= max(abs(imp1[i,j]), abs(imp2[i,j]), abs(imp3[i,j]), abs(imp4[i,j]))) imp.all[i,j] <- 4.5 + } + } + + if(p<=3) yMod <- 0.7 + if(p>=4 && p<=6) yMod <- 0.5 + if(p>=7 && p<=9) yMod <- 0.3 + if(p>=10) yMod <- 0.1 + if(p==13 || p==14) yMod <- 0.5 + if(p==15 || p==16) yMod <- 0.1 + + if(p==1 || p==4 || p==7 || p==10) xMod <- 0.05 + if(p==2 || p==5 || p==8 || p==11) xMod <- 0.35 + if(p==3 || p==6 || p==9 || p==12) xMod <- 0.65 + if(p==13 || p==15) xMod <- 0.05 + if(p==14 || p==16) xMod <- 0.50 + + # impact map: + if(p <= 12) par(fig=c(xMod, xMod + 0.3, yMod, yMod + 0.17), new=TRUE) + if(p > 12) par(fig=c(xMod, xMod + 0.45, yMod, yMod + 0.38), new=TRUE) + PlotEquiMap(imp.all[,EU], lon[EU], lat, filled.continents = FALSE, brks=1:5, cols=col.regimes, axelab=F, drawleg=F) + + # title map: + if(p <= 12) par(fig=c(xMod, xMod + 0.3, yMod + 0.162, yMod + 0.172), new=TRUE) + if(p > 12) par(fig=c(xMod + 0.07, xMod + 0.07 + 0.3, yMod +0.21 + 0.162, yMod +0.21 + 0.172), new=TRUE) + mtext(my.period[p], font=2, cex=.9) + + } # close 'p' on 'period' + + par(fig=c(0.1,0.9, 0, 0.12), new=TRUE) + ColorBar2(1:5, cols=col.regimes, vert=FALSE, my.ticks=1:4, draw.ticks=FALSE, my.labels=orden) + + par(fig=c(0.1,0.9, 0.9, 0.95), new=TRUE) + mtext(paste0("Most influent WR on ",var.name[var.num]), font=2, cex=1.5) + + dev.off() +} + + + + + + + + + + + + + + + + + + diff --git a/old/weather_regimes_v1.R b/old/weather_regimes_v1.R new file mode 100644 index 0000000000000000000000000000000000000000..1a478cb4ebe590cf3fe2e7bb572f4433cbb02b8c --- /dev/null +++ b/old/weather_regimes_v1.R @@ -0,0 +1,773 @@ + +# Creation: 28/6/2016 +# Author: Nicola Cortesi +# Aim: To compute the four Euro-Atlantic weather regimes from a reanalysis or from a forecast system +# I/O: input must be in a format compatible with Load(); 1 output .RData file is created in the 'workdir' folder. +# Assumption: its output is need by the script weather_regimes_maps.R +# Branch: weather_regimes + +library(s2dverification) # for the function Load() +library(abind) +library(Kendall) +library(reshape2) +library(TTR) +source('/home/Earth/ncortesi/Downloads/scripts/Rfunctions.R') # for the calendar functions +workdir <- "/scratch/Earth/ncortesi/RESILIENCE/Regimes" # working dir with the input files with the WT classifications for each MSLP grid point + +# available reanalysis for the geopotential (g500) or the geopotential height (z500 = g500/9.81) and for var data: +ERAint <- '/esnas/reconstructions/ecmwf/eraint/$STORE_FREQ$_mean/$VAR_NAME$_f6h/$VAR_NAME$_$YEAR$$MONTH$.nc' +ERAint.name <- "ERA-Interim" + +JRA55 <- '/esnas/reconstructions/jma/jra-55/$STORE_FREQ$_mean/$VAR_NAME$_f6h/$VAR_NAME$_$YEAR$$MONTH$.nc' +JRA55.name <- "JRA-55" + +rean <- ERAint #JRA55 #ERAint # choose one of the two above reanalysis from where to load the input psl data + +# available forecast systems for the psl and var data: +#ECMWF_S4 <- list(path = paste0('/esnas/exp/ECMWF/seasonal/0001/s004/m001/$STORE_FREQ$_mean/$VAR_NAME$_f6h/$VAR_NAME$_$START_DATE$.nc')) +#ECMWF_S4 <- '/esnas/exp/ECMWF/seasonal/0001/s004/m001/$STORE_FREQ$_mean/psl_f6h/psl_$START_DATE$.nc' +ECMWF_S4 <- '/esnas/exp/ecmwf/system4_m1/$STORE_FREQ$_mean/$VAR_NAME$_f6h/$VAR_NAME$_$START_DATE$.nc' +ECMWF_S4.name <- "ECMWF-S4" + +ECMWF_monthly <- '/esnas/exp/ECMWF/monthly/ensforhc/6hourly/$VAR_NAME$/2014$MONTH$$DAY$00/$VAR_NAME$_$START_DATE$00.nc' +ECMWF_monthly.name <- "ECMWF-Monthly" + +forecast <- ECMWF_S4 + +fields <- forecast #rean #forecast # put 'rean' to load pressure fields and var from reanalisis, put 'forecast' to load them from forecast systems + +psl <- "psl" #"g500" # pressure variable to use from the chosen reanalysis +psl.name <- "MSLP" # "Z500" # the name to show in the maps + +year.start <- 1982 #1981 #1994 #1979 #1981 +year.end <- 2013 #2013 #2010 + +member.name <- "ensemble" # name of the dimension with the ensemble members inside the netCDF files of S4 + +res <- 0.75 # set the resolution you want to interpolate the psl and var data when loading it (i.e: set to 0.75 to use the same res of ERA-Interim) + # interpolation method is BILINEAR. +PCA <- FALSE # set it to TRUE if you want to apply the PCA before the k-means cluster analysis, FALSE otherwise +variance.explained <- 0.8 # the user can set here the minimum % of variance explained by the PCs (typically 0.8 which is equal to 80%) + +lat.weighting=FALSE # set it to true to weight psl data on latitude before applying the cluster analysis +sequences=FALSE # set it to TRUE if you want to select only days beloning to sequences of at least 5 consecutive days belonging to the same regime. + +# Coordinates of the box enclosing the study region (the Northern Atlantic in this case): +lat.max <- 81 #81 #80 #70 +lat.min <- 27 #30 #20 #30 +lon.max <- 45 #36 #30 #40 +lon.min <- 274.5 #274.5 #270 #280 # put a positive number here because the geopotential has only positive values of longitud! + +var.num <- 1 # Choose a variable. 1: sfcWind 2: tas + +var.name <- c("sfcWind","tas") # name of the 'predictand' variable of the chosen reanalysis +var.name.full <- c("Wind Speed","Temperature") # full name of the 'predictand' variable to put in the title of the graphs +var.unit <- c("m/s", "ºC") # unit of measure of var (for drawing color scales) + +# Only for Reanalysis:: +WR.period <- 1:12 #13:16 # 1-12: Jan-Dec; 13-16: Winter-Spring-Summer-Autumn [bypassed by the optional argument of the script] + +# only for Seasonal forecasts: +n.members <- 15 # number of members of the forecast system to load +n.leadtimes <- 216 # number of leadtimes of the forecast system to load +startM <- 1 # start month (1=January, 12=December) [bypassed by the optional arguments of the script] +leadM <- 1 # select only the data of one lead month: [bypassed by the optional arguments of the script] + +# Only for Monthly forecasts: +#leadtime <- 1:7 #5:11 #1 # if fields=forecast, set the forecast time (in days) you want to compute the weather regimes. +#startdate.shift <- 1 # if leadtime=5:11, it's better to shift all startdates of the season 1 week in the past, to fit the exact season of obs.data + # if leadtime=12:18, it's better to shift all startdates of the season 2 weeks in the past, and so on. +#forecast.year <- year.end+1 # if fields=forecast, set the forecast year (it is usually the year after year.end) +#mes=1 # if fields=forecast, set the month of the first startdate of the forecast year +#day=2 # if fields=forecast, set the day of the first startdate of the forecast year + +############################################################################################################################# +#ECMWF_monthly <- list(path = paste0('/esnas/exp/ECMWF/monthly/ensforhc/6hourly/psl/2014010200/1994_010200.nc')) +#ECMWF_monthly <- list(path = paste0('/esnas/exp/ECMWF/monthly/ensforhc/6hourly/$VAR_NAME$/2014$MONTH$$DAY$00/$VAR_NAME$_$START_DATE$00.nc')) +rean.name <- unname(ifelse(rean == ERAint, ERAint.name, JRA55.name)) +forecast.name <- unname(ifelse(forecast == ECMWF_S4, ECMWF_S4.name, ECMWF_monthly.name)) +fields.name <- unname(ifelse(fields == rean, rean.name, forecast.name)) +if(fields.name == forecast.name) WR.period = 1 +n.years <- year.end - year.start + 1 + +# in case the script is run with two arguments, they are assigned to the two below variables: +script.arg <- as.integer(commandArgs(TRUE)) + +if(length(script.arg) == 2){ + start.month <- script.arg[1] + lead.month <- script.arg[2] + WR.period <- start.month +} else { + start.month <- startM + lead.month <- leadM + WR.period <- start.month +} + +# in case the script is run with 1 argument, it is assumed you are using Reanalysis: +if(length(script.arg) == 1) WR.period <- script.arg[1] + +var.data <- fields # dataset from which to load the input var data (reanalysis or forecasts) +var.data.name <- fields.name #ECMWF_monthly.name # ERAint.name + +if(lat.min >= lat.max) stop("lat.min cannot be greater than lat.max") + +days.period <- n.days.period <- period.length <- list() +for (pp in 1:17){ + days.period[[pp]] <- NA + for(y in year.start:year.end) days.period[[pp]] <- c(days.period[[pp]], n.days.in.a.future.year(year.start, y) + pos.period(y,pp)) + days.period[[pp]] <- days.period[[pp]][-1] # remove the NA at the begin that was introduced only to be able to execute the above command + # number of days belonging to that period from year.start to year.end: + n.days.period[[pp]] <- length(days.period[[pp]]) + # Number of days belonging to that period in a single year: + period.length[[pp]] <- n.days.in.a.period(pp,1999) #+ ifelse(period==13 | period==2, 1, 0) # using year 1999 introduce a small error in winter season length because of bisestile years +} + +# Create a regular lat/lon grid to interpolate the data: +n.lon.grid <- 360/res +n.lat.grid <- (180/res)+1 +my.grid <- paste0('r',n.lon.grid,'x',n.lat.grid) +#domain<-list() +#domain$lon <- seq(0,359.999999999,res) +#domain$lat <- c(rev(seq(0,90,res)),seq(res[1],-90,-res)) # Notice that the regular grid is always centered at lat=0 and lon=0! + +# load only 1 day of pressure data to detect the minimum and maximum lat and lon values which identify only the North Atlantic area: +if(fields.name == rean.name) domain <- Load(var = psl, exp = NULL, obs = list(list(path=fields)), '19990101', storefreq = 'daily', leadtimemax = 1, output = 'lonlat', nprocs=1) +# in the case of S4, we also interpolate it (notice that if the S4 grid has the same grid of the chosen grid (typically ERA-Interim), Load() leaves the S4 grid unchanged): +if(fields.name == ECMWF_S4.name) domain <- Load(var = "psl", exp = list(list(path=fields)), sdates=paste0(year.start,'0101'), storefreq = 'daily', dimnames=list(member=member.name), leadtimemax=1, nmember=1, output = 'lonlat') #, grid=my.grid, method='bilinear', nprocs=1) + +#if(fields.name == ECMWF_monthly.name) domain <- Load(var = psl, exp = NULL, obs = list(list(path=fields)), paste0(year.start,'0102'), storefreq = 'daily', leadtimemax = 1, output = 'lonlat', nprocs=1) + +n.lon <- length(domain$lon) +n.lat <- length(domain$lat) + +pos.lat.max <- which(lat.max - round(domain$lat,4) >= 0)[1] # select the first latitude of the domain below the maximum latitude +pos.lat.min <- tail(which(round(domain$lat,4) - lat.min >= 0),1) # select the last latitude of the domain over the minumum latitude +pos.lon.max <- tail(which(lon.max - round(domain$lon,4) >= 0),1) +pos.lon.min <- which(round(domain$lon,4) - lon.min >= 0)[1] + +pos.lat <- if(pos.lat.min < pos.lat.max) {pos.lat.min:pos.lat.max} else {pos.lat.max:pos.lat.min} +pos.lon <- c(1:pos.lon.max,pos.lon.min:length(domain$lon)) # beware that it only consider the case where the study area cross the meridian of Greenwhich! +n.pos.lat <- length(pos.lat) +n.pos.lon <- length(pos.lon) + +lat <- domain$lat[pos.lat] # lat values of chosen area only (it is smaller than the whole spatial domain loaded by the data) +lon <- domain$lon[pos.lon] # lon values of chosen area only + +#lat.min.area <- domain$lat[pos.lat.min] # min and max lat/lon of the chosen area only (same as lat.max, but its values correspond to actual values in the lat vector) +#lat.max.area <- domain$lat[pos.lat.max] +#lon.min.area <- domain$lon[pos.lon.min] +#lon.max.area <- domain$lon[pos.lon.max] + +#rm(domain) + +# Load psl data: +if(fields.name == rean.name){ # Load daily psl data in the reanalysis case: + psleuFull <-array(NA,c(n.days.in.a.yearly.period(year.start,year.end), n.pos.lat, n.pos.lon)) + + for (y in year.start:year.end){ + var <- Load(var = psl, exp = NULL, obs = list(list(path=fields)), paste0(y,'0101'), storefreq = 'daily', leadtimemax = n.days.in.a.year(y), output = 'lonlat', + latmin = lat.min, latmax = lat.max, lonmin = lon.min, lonmax = lon.max) + psleuFull[seq.days.in.a.future.year(year.start, y),,] <- var$obs + rm(var) + gc() + } +} + +if(fields.name == ECMWF_S4.name){ # in this case, we can load only the data for 1 month at time (since each month needs ~3 GB of memory) + if(start.month <= 9) {start.month.char <- paste0("0",as.character(start.month))} else {start.month.char <- start.month} + + psleuFull <- Load(var = psl, exp = list(list(path=fields)), obs = NULL, sdates=paste0(year.start:year.end, start.month.char,'01'), dimnames=list(member=member.name), nmember=n.members, leadtimemax=n.leadtimes, storefreq='daily', output = 'lonlat', latmin = lat.min, latmax = lat.max, lonmin = lon.min, lonmax = lon.max, nprocs=1)$mod #, grid=my.grid, method='bilinear') + +} + +if(fields.name == ECMWF_monthly.name){ + # Load 6-hourly psl data in the forecast case: + psleuFull <-array(NA, c(n.sdates*n.years, n.leadtimes, n.pos.lat, n.pos.lon)) # daily order: 19940102 19950102 ... 20130102 19940109 19950109 ... 20130109 ... + n.leadtimes <- length(leadtime) + sdates <- weekly.seq(forecast.year,mes,day) + n.sdates <- length(sdates) + + for (startdate in 1:n.sdates){ + for(lday in leadtime){ + var <- Load(var = psl, exp = NULL, obs = list(list(path=fields)), paste0(year.start:year.end, substr(sdates[startdate],5,6), substr(sdates[startdate],7,8) ), storefreq = 'daily', leadtimemin = lday*4-3, leadtimemax = lday*4, output = 'lonlat', latmin = lat.min, latmax = lat.max, lonmin = lon.min, lonmax = lon.max) + + mean.lday <- apply(var$obs, c(3,5,6), mean, na.rm=T) + rm(var) + gc() + + psleuFull[(1+(startdate-1)*n.years):(startdate*n.years), lday-leadtime[1]+1,,] <- mean.lday + rm(mean.lday) + gc() + } + } +} + +if(psl=="g500") psleuFull <- psleuFull/9.81 # convert geopotential to geopotential height (in m) +if(psl=="psl") psleuFull <- psleuFull/100 # convert MSLP in Pascal to MSLP in hPa +gc() + +#save.image(file=paste0(workdir,"/weather_regimes_",fields.name,"_",year.start,"-",year.end,".RData")) +#load(file=paste0(workdir,"/weather_regimes_",fields.name,"_",year.start,"-",year.end,".RData")) + +# compute the cluster analysis: +my.PCA <- list() +my.cluster <- list() +my.cluster.array <- list() +tot.variance <- list() +n.pcs <- my.cluster <- c() + +#p=1 # for the debug +for(p in WR.period){ + # Select only the data of the month/season we want: + if(fields.name == rean.name){ + pslPeriod <- psleuFull[days.period[[p]],,] # select only days in the chosen period (i.e: winter) + + # weight the pressure fields based on latitude (apply it to anomalies, not to absolute values!): + if(lat.weighting){ + lat.weighted <- cos(lat*pi/180)^0.5 + lat.weighted.array <- InsertDim(InsertDim(lat.weighted,2,n.pos.lon), 1, n.days.period[[p]]) + psl.kmeans <- pslPeriod * lat.weighted.array + rm(lat.weighted.array) + gc() + } + + # select period data from psleuFull to use it as input of the cluster analysis: + psl.kmeans <- pslPeriod + dim(psl.kmeans) <- c(n.days.period[[p]], n.pos.lat*n.pos.lon) # convert array in a matrix! + rm(pslPeriod, pslPeriod.weighted) + } + + # in this case of S4 we don't need to select anything because we already loaded only 1 month of data! + if(fields.name == ECMWF_S4.name){ + + ## climatology plots: + + ## load eraint data: + ## ERAint <- '/esnas/reconstructions/ecmwf/eraint/$STORE_FREQ$_mean/$VAR_NAME$_f6h/$VAR_NAME$_$YEAR$$MONTH$.nc' + ## my.years <- year.start:year.end + ## var <- Load(var = "sfcWind", exp = NULL, obs = list(list(path=ERAint)), paste0(my.years,'0101'), storefreq = 'daily', leadtimemax = 216, output = 'lonlat',latmin = lat.min, latmax = lat.max, lonmin = lon.min, lonmax = lon.max) # -273.16 for tas # or /100 for psl + + ## # check S4 lon and lat: + ## #varS4 <- Load(var = psl, exp = list(list(path=fields)), obs = NULL, sdates=paste0(year.start, start.month.char,'01'), dimnames=list(member=member.name), nmember=n.members, leadtimemax=1, storefreq='daily', output = 'lonlat', latmin = lat.min, latmax = lat.max, lonmin = lon.min, lonmax = lon.max) #, grid=my.grid, method='bilinear') + + ## # draw climatologies: + ## drift <- apply(psleuFull[,,,,1,1,drop=F], c(1,2,4), mean, na.rm=T) + ## #matplot(t(drift[1,,]),type="l", col=1:15, lty=2, pch=19, cex=.5) + ## #matplot(t(drift[1,,1:31]),type="b", col=1:15, lty=1, pch=19, cex=.5) # zoom over the first leadtime + + ## matplot(t(drift[1,,]),type="l", col="gray60", lty=3, pch=19, cex=.1) #, ylim=c(-25,5)) + + ## true.climate <- apply(psleuFull[,,,,1,1,drop=F], c(1,4), mean, na.rm=T) + ## lines(true.climate[1,],type="l", col="red", lty=1, pch=19, lwd=2) + + ## true.climate.5d <- stats::filter(true.climate[1,], rep(1/5,5), sides=2) + ## lines(true.climate.5d,type="l", col="orange", lty=1, pch=19, lwd=2) + + ## s4.data <- data.frame(s4=true.climate[1,], day=1:216) + ## s4.loess <- loess(s4 ~ day, s4.data, span=0.50) + ## s4.pred <- predict(s4.loess) + ## lines(s4.pred,type="l", col="purple", lty=1, pch=19, lwd=2) + + ## col.monthly <- "brown" + ## true.climate1 <- apply(psleuFull[,,,1:31,1,1,drop=F], 1, mean, na.rm=T) + ## lines(rep(true.climate1,31),type="l", col=col.monthly, lty=1, pch=19, lwd=2) + ## true.climate2 <- apply(psleuFull[,,,32:59,1,1,drop=F], 1, mean, na.rm=T) + ## lines(c(rep(NA,31),rep(true.climate2,28)),type="l", col=col.monthly, lty=1, pch=19, lwd=2) + ## true.climate3 <- apply(psleuFull[,,,60:90,1,1,drop=F], 1, mean, na.rm=T) + ## lines(c(rep(NA,59),rep(true.climate3,31)),type="l", col=col.monthly, lty=1, pch=19, lwd=2) + ## true.climate4 <- apply(psleuFull[,,,91:120,1,1,drop=F], 1, mean, na.rm=T) + ## lines(c(rep(NA,90),rep(true.climate4,30)),type="l", col=col.monthly, lty=1, pch=19, lwd=2) + ## true.climate5 <- apply(psleuFull[,,,121:151,1,1,drop=F], 1, mean, na.rm=T) + ## lines(c(rep(NA,120),rep(true.climate5,31)),type="l", col=col.monthly, lty=1, pch=19, lwd=2) + ## true.climate6 <- apply(psleuFull[,,,151:180,1,1,drop=F], 1, mean, na.rm=T) + ## lines(c(rep(NA,150),rep(true.climate6,30)),type="l", col=col.monthly, lty=1, pch=19, lwd=2) + ## true.climate7 <- apply(psleuFull[,,,181:211,1,1,drop=F], 1, mean, na.rm=T) + ## lines(c(rep(NA,180),rep(true.climate7,31)),type="l", col=col.monthly, lty=1, pch=19, lwd=2) + + + ## era <- apply(var$obs[,,,,1,1,drop=F], c(1,2,4), mean, na.rm=T) + ## plot(era[1,1,1:216],type="l", col="black", lty=1, pch=19, lwd=2) + + ## era.5d <- stats::filter(era[1,1,1:216], rep(1/5,5), sides=2) + ## lines(era.5d,type="l", col="darkgreen", lty=1, pch=19, lwd=2) + + ## era.data <- data.frame(era=era[1,1,1:216], day=1:216) + ## era.loess <- loess(era ~ day, era.data, span=0.50) + ## era.pred <- predict(era.loess) + ## lines(era.pred,type="l", col="blue", lty=1, pch=19, lwd=2) + + ## era.data2 <- data.frame(era=era[1,1,1:216], day=1:216) + ## era.loess2 <- loess(era ~ day, era.data2, span=0.3) + ## era.pred2 <- predict(era.loess2) + ## lines(era.pred2,type="l", col="purple", lty=1, pch=19, lwd=2) + + ## era.data3 <- data.frame(era=era[1,1,1:216], day=1:216) + ## era.loess3 <- loess(era ~ day, era.data3, span=0.35) + ## era.pred3 <- predict(era.loess3) + + ## col.monthly.erai <- "turquoise3" + ## era1 <- apply(var$obs[,,,1:31,1,1,drop=F], c(1,2), mean, na.rm=T) + ## lines(rep(era1,31),type="l", col=col.monthly.erai, lty=1, pch=19, lwd=2) + ## era2 <- apply(var$obs[,,,32:59,1,1,drop=F], c(1,2), mean, na.rm=T) + ## lines(c(rep(NA,31),rep(era2,28)),type="l", col=col.monthly.erai, lty=1, pch=19, lwd=2) + ## era3 <- apply(var$obs[,,,60:90,1,1,drop=F], c(1,2), mean, na.rm=T) + ## lines(c(rep(NA,59),rep(era3,31)),type="l", col=col.monthly.erai, lty=1, pch=19, lwd=2) + ## era4 <- apply(var$obs[,,,91:120,1,1,drop=F], c(1,2), mean, na.rm=T) + ## lines(c(rep(NA,90),rep(era4,30)),type="l", col=col.monthly.erai, lty=1, pch=19, lwd=2) + ## era5 <- apply(var$obs[,,,121:151,1,1,drop=F], c(1,2), mean, na.rm=T) + ## lines(c(rep(NA,120),rep(era5,31)),type="l", col=col.monthly.erai, lty=1, pch=19, lwd=2) + ## era6 <- apply(var$obs[,,,151:180,1,1,drop=F], c(1,2), mean, na.rm=T) + ## lines(c(rep(NA,150),rep(era6,30)),type="l", col=col.monthly.erai, lty=1, pch=19, lwd=2) + ## era7 <- apply(var$obs[,,,181:211,1,1,drop=F], c(1,2), mean, na.rm=T) + ## lines(c(rep(NA,180),rep(era7,30)),type="l", col=col.monthly.erai, lty=1, pch=19, lwd=2) + + + ## # anomalias: + ## point.type <- "l" + ## true.climate.anom <- psleuFull[1,1,pos.last.year,,1,1] - true.climate[1,] + ## plot(true.climate.anom,type=point.type, col="red", lty=1, pch=19, lwd=2, cex=.3) + ## lines(rep(0,216)) + + ## true.climate.5d.anom <- psleuFull[1,1,pos.last.year,,1,1] - true.climate.5d + ## lines(true.climate.5d.anom,type=point.type, col="orange", lty=1, pch=19, lwd=2, cex=.3) + + ## s4.pred.anom <- psleuFull[1,1,pos.last.year,,1,1] - s4.pred + ## lines(s4.pred.anom,type=point.type, col="purple", lty=1, pch=19, lwd=2, cex=.3) + + ## s4.monthly.clim <- c(rep(true.climate1,31),rep(true.climate2,28),rep(true.climate3,31),rep(true.climate4,30),rep(true.climate5,31),rep(true.climate6,30),rep(true.climate7,31), rep(NA,4)) + ## s4.monthly.anom <- psleuFull[1,1,pos.last.year,,1,1] - s4.monthly.clim + ## lines(s4.monthly.anom,type=point.type, col="brown", lty=1, pch=19, lwd=2, cex=.3) + + + ## pos.last.year <- dim(var$obs)[3] + ## era.anom <- var$obs[1,1,pos.last.year,1:216,1,1] - era[1,1,1:216] + ## plot(era.anom,type=point.type, col="black", lty=1, pch=19, lwd=2) + ## lines(rep(0,216)) + + ## era.anom.5d <- var$obs[1,1,pos.last.year,1:216,1,1] - era.5d[1:216] + ## lines(era.anom.5d,type=point.type, col="darkgreen", lty=1, pch=19, lwd=2) + + ## era.anom.pred <- var$obs[1,1,pos.last.year,1:216,1,1] - era.pred[1:216] + ## lines(era.anom.pred,type=point.type, col="blue", lty=1, pch=19, lwd=2) + + ## era.anom.pred2 <- var$obs[1,1,pos.last.year,1:216,1,1] - era.pred2[1:216] + ## era.anom.pred3 <- var$obs[1,1,pos.last.year,1:216,1,1] - era.pred3[1:216] + + ## era.monthly.clim <- c(rep(era1,31),rep(era2,28),rep(era3,31),rep(era4,30),rep(era5,31),rep(era6,30),rep(era7,31), rep(NA,4)) + ## era.anom.monthly <- var$obs[1,1,pos.last.year,1:216,1,1] - era.monthly.clim + ## lines(era.anom.monthly,type=point.type, col="turquoise3", lty=1, pch=19, lwd=2) + + ## # Nube plot: + ## plot(era.anom.pred,type="l", col="blue", lty=1, pch=19, lwd=2) + ## grid(nx=10,lwd=2) + ## lines(era.anom.pred2,type="l", col="purple", lty=1, pch=19, lwd=2) + ## lines(era.anom.pred3,type="l", col="turquoise3", lty=1, pch=19, lwd=2) + ## lines(era.anom.5d,type="l", col="darkgreen", lty=1, pch=19, lwd=2) + + ## lines(era.5d,type="l", col="darkgreen", lty=1, pch=19, lwd=2) + ## lines(era.pred,type="l", col="blue", lty=1, pch=19, lwd=2) + ## lines(rep(0,216)) + + ## lines(era.pred2,type="l", col="purple", lty=1, pch=19, lwd=2) + ## lines(era.pred3,type="l", col="", lty=1, pch=19, lwd=2) + + + ## # fit alfa parameter for each grid point: + ## i=1 + ## j=1 + ## era <- apply(var$obs[,,,,i,j,drop=F], c(1,2,4), mean, na.rm=T) + ## era.data <- data.frame(era=era[1,1,1:216], day=1:216) + ## k=0; error <- c() + ## for (alfa in seq(0.25,0.50,0.01)){ + ## k=k+1 + ## era.loess <- loess(era ~ day, era.data, span=alfa) + ## era.pred <- predict(era.loess) + ## error[k] <- mean(abs(era.pred - era[1,1,1:216])) + ## } + + # convert psl in daily anomalies with the LOESS filter: + pslPeriodClim <- apply(psleuFull, c(1,4,5,6), mean, na.rm=T) + pslPeriodClimLoess <- pslPeriodClim + for(i in n.pos.lat){ + for(j in n.pos.lon){ + my.data <- data.frame(ens.mean=pslPeriodClim[1,,i,j], day=1:n.leadtimes) + my.loess <- loess(ens.mean ~ day, my.data, span=0.35) + pslPeriodClimLoess[1,,i,j] <- predict(my.loess) + } + } + + pslPeriodClim2 <- InsertDim(InsertDim(pslPeriodClimLoess, 2, n.years), 2, n.members) + + ## s4.data <- data.frame(s4=pslPeriodClim[1,], day=1:216) + ## s4.loess <- loess(s4 ~ day, s4.data, span=0.35) + ## s4.pred <- predict(s4.loess) + rm(pslPeriodClim) + gc() + + psleuFull <- psleuFull - pslPeriodClim2 + rm(pslPeriodClim2) + gc() + + ## # convert psl in daily anomalies computed with the 10-days leadtime window including the 9 days AFTER each day: + ## n.leadtimes <- dim(psleuFull)[4] + ## pslPeriodClim10 <- array(NA, c(1, n.members, n.years, n.leadtimes-9,n.pos.lat,n.pos.lon)) + ## for(year in 1:n.years){ + ## for(lead in 1:(n.leadtimes-9)){ + ## pslPeriodClim10[1,,year,lead,,] <- (psleuFull[1,,year,lead,,] + psleuFull[1,,year,lead+1,,] + psleuFull[1,,year,lead+2,,] + psleuFull[1,,year,lead+3,,] + psleuFull[1,,year,lead+4,,] + psleuFull[1,,year,lead+5,,] + psleuFull[1,,year,lead+6,,] + psleuFull[1,,year,lead+7,,] + psleuFull[1,,year,lead+8,,] + psleuFull[1,,year,lead+9,,])/10 + ## } + ## } + + ## pslPeriodClim10mean <- apply(pslPeriodClim10, c(1,4,5,6), mean, na.rm=T) + ## pslPeriodClim10mean2 <- InsertDim(InsertDim(pslPeriodClim10mean,2,n.years),2,n.members) + + ## psleuFull10 <- psleuFull[1,,,1:(n.leadtimes-9),,,drop=F] - pslPeriodClim10mean2 + + ## psleuFull <- psleuFull10 + ## rm(pslPeriodClim2, psleuFull10, pslPeriodClim10, pslPeriodClim10mean2, psleuFull10mean, pslPeriodClim10mean) + ## gc() + + chosen.month <- start.month + lead.month # find which month we want to select data + if(chosen.month > 12) chosen.month <- chosen.month - 12 + + for(y in year.start:year.end){ + leadtime.min <- 1 + n.days.in.a.monthly.period(start.month, chosen.month, y) - n.days.in.a.month(chosen.month, y) + leadtime.max <- leadtime.min + n.days.in.a.month(chosen.month, y) - 1 + if (n.days.in.a.month(chosen.month, y) == 29) leadtime.max <- leadtime.max - 1 # remove the 29th of February to have the same n. of elements for all years + int.leadtimes <- leadtime.min:leadtime.max + n.leadtimes <- leadtime.max - leadtime.min + 1 + + if(y == year.start) { + pslPeriod <- psleuFull[,,1,int.leadtimes,,,drop=FALSE] + } else { + pslPeriod <- unname(abind(pslPeriod, psleuFull[,,y-year.start+1,int.leadtimes,,,drop=FALSE], along=3)) + } + } + + + ## # if you didn't convert psl data in anomalies until now, you can convert psl data in monthly anomalies: + ## pslPeriodClim <- apply(pslPeriod, c(1,5,6), mean, na.rm=T) + ## pslPeriodClim2 <- InsertDim(InsertDim(InsertDim(pslPeriodClim,2,n.leadtimes), 2, n.years), 2, n.members) + + ## pslPeriod <- pslPeriod - pslPeriodClim2 + ## rm(pslPeriodClim, pslPeriodClim2) + ## gc() + + # note that the data order in psl.kmeans[,X] is: year1day1, year1day2, ... , year1day31, year2day1, year2day2, ... , year2day28 + psl.melted <- melt(pslPeriod[1,,,,,, drop=FALSE], varnames=c("Exp","Member","StartYear","LeadDay","Lat","Lon")) + psl.kmeans <- unname(acast(psl.melted, Member + StartYear + LeadDay ~ Lat + Lon)) + + #rm(pslPeriod) + gc() + } + + if(fields.name == ECMWF_monthly.name){ + my.startdates.period <- months.period(forecast.year,mes,day,p) # get which startdates belong to the chosen period + + days.period <- list() # get which days inside psleuFull belong to the chosen period + for(startdate in my.startdates.period){ + days.period[[p]] <- c(days.period[[p]], (1+(startdate-1)*n.years):(startdate*n.years)) + } + + # in winter you must remove the 2nd startdate (20140109) because its data is bad, as you can see with: plot(psl.kmeans[,1:2]) + # if(fields.name == forecast.name && period == 13) psl.kmeans[21:40,] <- NA + } + + + # compute the PCs or not: + if(PCA){ + my.seq <- seq(1, n.pos.lat*n.pos.lon, 9) # select only 1 point of 9 + pslcut <- psl.kmeans[,my.seq] + + my.PCA[[p]] <- princomp(pslcut,cor=FALSE) + tot.variance[[p]] <- head(cumsum(my.PCA[[p]]$sdev^2/sum(my.PCA[[p]]$sdev^2)),50) # check how many PCAs to keep basing on the sum of their expl. variance + n.pcs[p] <- head(as.numeric(which(tot.variance[[p]] > variance.explained)),1) # select only the pcs that explains at least 80% of variance + my.cluster[[p]] <- kmeans(my.PCA[[p]]$scores[,1:n.pcs[p]], centers=4, iter.max=100, nstart=30) # 4 is the number of clusters + rm(pslcut) + } else { # 4 is the number of clusters: + if(fields.name == rean.name) my.cluster[[p]] <- kmeans(psl.kmeans[which(!is.na(psl.kmeans[,1])),], centers=4, iter.max=100, nstart=30) + if(fields.name == ECMWF_S4.name) { + my.cluster[[p]] <- kmeans(psl.kmeans, centers=4, iter.max=100, nstart=30, trace=TRUE) + my.cluster.array[[p]] <- array(my.cluster[[p]]$cluster, c(n.leadtimes, n.years, n.members)) # in reverse order compared to the definition of psl.kmeans + + + #plot(SMA(my.cluster[[p]]$centers[1,],201),type="l",col="gold",ylim=c(-20,20));lines(SMA(my.cluster[[p]]$centers[2,],201),type="l",col="red"); lines(SMA(my.cluster[[p]]$centers[3,],201),type="l",col="green");lines(SMA(my.cluster[[p]]$centers[4,],201),type="l",col="blue");lines(SMA(psl.kmeans[29,],201),type="l",col="gray");lines(rep(0,14410),type="l",col="black",lty=2) + } + } + + rm(psl.kmeans) + gc() +} + + +#save.image(file=paste0(workdir,"/weather_regimes_",fields.name,"_",year.start,"-",year.end,".RData")) +#load(file=paste0(workdir,"/weather_regimes_",fields.name,"_",year.start,"-",year.end,".RData")) + +# Load var data: +if(fields.name == rean.name){ # Load daily var data in the reanalysis case: + vareuFull <-array(NA,c(n.days.in.a.yearly.period(year.start,year.end), n.pos.lat, n.pos.lon)) + + for (y in year.start:year.end){ + var <- Load(var = var.name[var.num], exp = NULL, obs = list(var.data), paste0(y,'0101'), storefreq = 'daily', leadtimemax = n.days.in.a.year(y), output = 'lonlat', + latmin = lat.min, latmax = lat.max, lonmin = lon.min, lonmax = lon.max) + vareuFull[seq.days.in.a.future.year(year.start, y),,] <- var$obs + rm(var) + gc() + } +} + +## if(fields.name == ECMWF_S4.name) { +## if(start.month <= 9) {start.month.char <- paste0("0",as.character(start.month))} else {start.month.char <- start.month} + +## my.years <- year.start:year.end +## vareuFull <- Load(var = var.name[var.num], exp = list(list(path=fields)), obs = NULL, sdates=paste0(my.years, start.month.char,'01'), dimnames=list(member=member.name), nmember=n.members, leadtimemax=216, storefreq='daily', output = 'lonlat', latmin = lat.min, latmax = lat.max, lonmin = lon.min, lonmax = lon.max)$mod #, grid=my.grid, method='bilinear') +## } + +if(fields.name == ECMWF_monthly.name){ # Load 6-hourly var data in the monthly forecast case: + sdates <- weekly.seq(forecast.year,mes,day) + vareuFull <-array(NA,c(length(sdates)*(year.end-year.start+1), n.pos.lat, n.pos.lon)) + + for (startdate in 1:length(sdates)){ + var <- Load(var = var.name[var.num], exp = NULL, obs = list(var.data), paste0(year.start:year.end, substr(sdates[startdate],5,6), substr(sdates[startdate],7,8) ), storefreq = 'daily', leadtimemin=leadtime*4-3, leadtimemax=leadtime*4, output = 'lonlat', latmin = lat.min, latmax = lat.max, lonmin = lon.min, lonmax = lon.max) + temp <- apply(var$obs, c(1,3,5,6), mean, na.rm=T) + vareuFull[(1+(startdate-1)*(year.end-year.start+1)):(startdate*(year.end-year.start+1)),,] <- temp + rm(temp,var) + gc() + } + +} + +#my.grid<-paste0('r',n.lon,'x',n.lat) +#coord <- Load(var = 'sfcWind', exp = list(ECMWF_monthly), obs = NULL, sdates=paste0(2013,'0102'), leadtimemin = 1, leadtimemax=1, output = 'lonlat', nprocs=1, grid=my.grid, method='bilinear') + +#save.image(file=paste0(workdir,"/weather_regimes_",fields.name,"_",var.name[var.num],"_",year.start,"-",year.end,".RData")) +#load(file=paste0(workdir,"/weather_regimes_",fields.name,"_",var.name[var.num],"_",year.start,"-",year.end,".RData")) + +for(p in WR.period){ # 1-12: Jan-Dec; 13-16: Winter-Spring-Summer-Autumn; 17: Year + + if(fields.name == rean.name){ + cluster.sequence <- my.cluster[[p]]$cluster + + if(sequences){ # it works only for rean data!!! + # for each of the 4 clusters, remove the days not belonging to sequences of at least 5 days: + # warning: first 4 days and last 4 days are not take into account y the algorithm and are treated as not belonging to any sequence (sequ=FALSE) + wrdiff <- diff(my.cluster[[p]]$cluster) + + sequ <- rep(FALSE, n.days.period[[p]]) + for(i in 5:(n.days.period[[p]]-5)){ + sequ[i] <- TRUE + if(wrdiff[i] != 0 & wrdiff[i+1] != 0) sequ[i] = FALSE + if(wrdiff[i] != 0 & wrdiff[i+2] != 0) sequ[i] = FALSE + if(wrdiff[i] != 0 & wrdiff[i+3] != 0) sequ[i] = FALSE + if(wrdiff[i] != 0 & wrdiff[i+4] != 0) sequ[i] = FALSE + if(wrdiff[i-1] != 0 & wrdiff[i] != 0) sequ[i] = FALSE + if(wrdiff[i-1] != 0 & wrdiff[i+1] != 0) sequ[i] = FALSE + if(wrdiff[i-1] != 0 & wrdiff[i+2] != 0) sequ[i] = FALSE + if(wrdiff[i-1] != 0 & wrdiff[i+3] != 0) sequ[i] = FALSE + if(wrdiff[i-2] != 0 & wrdiff[i] != 0) sequ[i] = FALSE + if(wrdiff[i-2] != 0 & wrdiff[i+1] != 0) sequ[i] = FALSE + if(wrdiff[i-2] != 0 & wrdiff[i+2] != 0) sequ[i] = FALSE + if(wrdiff[i-3] != 0 & wrdiff[i] != 0) sequ[i] = FALSE + if(wrdiff[i-3] != 0 & wrdiff[i+1] != 0) sequ[i] = FALSE + if(wrdiff[i-4] != 0 & wrdiff[i] != 0) sequ[i] = FALSE + } # close for loop on i + + # sequence of daily cluster numbers without the days that doesn't belong to sequences of 5 or more days: + cluster.sequence <- my.cluster[[p]]$cluster * sequ + rm(wrdiff, sequ) + } + + # Replace the days belonging to a regime with the days belonging to a sequence of at least 5 days of that regime: + wr1 <- which(cluster.sequence == 1) + wr2 <- which(cluster.sequence == 2) + wr3 <- which(cluster.sequence == 3) + wr4 <- which(cluster.sequence == 4) + + # yearly frequency of each regime: + wr1y <- wr2y <- wr3y <-wr4y <- c() + for(y in year.start:year.end){ + # for both reanalysis and S4, the time order is always: year1day1, year1day2, ..., year1day31, year2day1, year2day2, ..., year2day28, etc, + wr1y[y-year.start+1] <- length(which(cluster.sequence[(y-year.start)*period.length[[p]]+(1:period.length[[p]])] == 1)) + wr2y[y-year.start+1] <- length(which(cluster.sequence[(y-year.start)*period.length[[p]]+(1:period.length[[p]])] == 2)) + wr3y[y-year.start+1] <- length(which(cluster.sequence[(y-year.start)*period.length[[p]]+(1:period.length[[p]])] == 3)) + wr4y[y-year.start+1] <- length(which(cluster.sequence[(y-year.start)*period.length[[p]]+(1:period.length[[p]])] == 4)) + } + + # convert to frequencies in %: + wr1y <- wr1y/period.length[[p]] + wr2y <- wr2y/period.length[[p]] + wr3y <- wr3y/period.length[[p]] + wr4y <- wr4y/period.length[[p]] + + pslPeriod <- psleuFull[days.period[[p]],,] + pslPeriodClim <- apply(pslPeriod, c(2,3), mean, na.rm=T) + pslPeriodClim2 <- InsertDim(pslPeriodClim, 1, n.days.period[[p]]) + pslPeriodAnom <- pslPeriod - pslPeriodClim2 + + rm(pslPeriod, pslPeriodClim, pslPeriodClim2) + gc() + + pslwr1 <- pslPeriodAnom[wr1,,] + pslwr2 <- pslPeriodAnom[wr2,,] + pslwr3 <- pslPeriodAnom[wr3,,] + pslwr4 <- pslPeriodAnom[wr4,,] + + rm(pslPeriodAnom) + gc() + + pslwr1mean <- apply(pslwr1,c(2,3),mean,na.rm=T) + pslwr2mean <- apply(pslwr2,c(2,3),mean,na.rm=T) + pslwr3mean <- apply(pslwr3,c(2,3),mean,na.rm=T) + pslwr4mean <- apply(pslwr4,c(2,3),mean,na.rm=T) + rm(pslwr1,pslwr2,pslwr3,pslwr4) + + # in the reanalysis case, we also measure the impact maps: + + # similar selection of above, but for var instead of psl: + varPeriod <- vareuFull[days.period[[p]],,] # select only var data during the chosen period + + varPeriodClim <- apply(varPeriod, c(2,3), mean, na.rm=T) + varPeriodClim2 <- InsertDim(varPeriodClim, 1, n.days.period[[p]]) + varPeriodAnom <- varPeriod - varPeriodClim2 + rm(varPeriod, varPeriodClim, varPeriodClim2) + gc() + + #PlotEquiMap2(varPeriodClim[,EU], lon[EU], lat, filled.continents = FALSE, cols=my.cols.var, intxlon=10, intylat=10, cex.lab=1.5) # plot the climatology of the period + varPeriodAnom1 <- varPeriodAnom[wr1,,] + varPeriodAnom2 <- varPeriodAnom[wr2,,] + varPeriodAnom3 <- varPeriodAnom[wr3,,] + varPeriodAnom4 <- varPeriodAnom[wr4,,] + + varPeriodAnom1mean <- apply(varPeriodAnom1,c(2,3),mean,na.rm=T) + varPeriodAnom2mean <- apply(varPeriodAnom2,c(2,3),mean,na.rm=T) + varPeriodAnom3mean <- apply(varPeriodAnom3,c(2,3),mean,na.rm=T) + varPeriodAnom4mean <- apply(varPeriodAnom4,c(2,3),mean,na.rm=T) + + varPeriodAnomBoth1 <- abind(varPeriodAnom, varPeriodAnom1, along = 1) + pvalue1 <- apply(varPeriodAnomBoth1, c(2,3), function(x) t.test(x[1:n.days.period[[p]]],x[(n.days.period[[p]]+1):length(x)])$p.value) + rm(varPeriodAnomBoth1, varPeriodAnom1) + gc() + + varPeriodAnomBoth2 <- abind(varPeriodAnom, varPeriodAnom2, along = 1) + pvalue2 <- apply(varPeriodAnomBoth2, c(2,3), function(x) t.test(x[1:n.days.period[[p]]],x[(n.days.period[[p]]+1):length(x)])$p.value) + rm(varPeriodAnomBoth2, varPeriodAnom2) + gc() + + varPeriodAnomBoth3 <- abind(varPeriodAnom, varPeriodAnom3, along = 1) + pvalue3 <- apply(varPeriodAnomBoth3, c(2,3), function(x) t.test(x[1:n.days.period[[p]]],x[(n.days.period[[p]]+1):length(x)])$p.value) + rm(varPeriodAnomBoth3, varPeriodAnom3) + gc() + + varPeriodAnomBoth4 <- abind(varPeriodAnom, varPeriodAnom4, along = 1) + pvalue4 <- apply(varPeriodAnomBoth4, c(2,3), function(x) t.test(x[1:n.days.period[[p]]],x[(n.days.period[[p]]+1):length(x)])$p.value) + rm(varPeriodAnomBoth4, varPeriodAnom4) + + rm(varPeriodAnom) + gc() + + # save all the data necessary to redraw the graphs when we know the right regime: + save(workdir,rean.name,fields.name,var.num,var.name,var.name.full,var.unit,year.start,year.end,p,WR.period,lon,lat,pslwr1mean,pslwr2mean,pslwr3mean,pslwr4mean, varPeriodAnom1mean, varPeriodAnom2mean, varPeriodAnom3mean,varPeriodAnom4mean,wr1y,wr2y,wr3y,wr4y,pvalue1,pvalue2,pvalue3,pvalue4,cluster.sequence,file=paste0(workdir,"/",fields.name,"_",var.name[var.num],"_",my.period[p],"_mapdata.RData")) + + rm(pvalue1,pvalue2,pvalue3,pvalue4) + rm(pslwr1mean,pslwr2mean,pslwr3mean,pslwr4mean) + rm(varPeriodAnom1mean,varPeriodAnom2mean,varPeriodAnom3mean,varPeriodAnom4mean) + gc() + + } + + if(fields.name == ECMWF_S4.name){ + cluster.sequence <- my.cluster[[p]]$cluster #as.vector(my.cluster.array[[p]]) + + wr1 <- which(cluster.sequence == 1) + wr2 <- which(cluster.sequence == 2) + wr3 <- which(cluster.sequence == 3) + wr4 <- which(cluster.sequence == 4) + + wr1y <- apply(my.cluster.array[[p]] == 1, 2, sum) + wr2y <- apply(my.cluster.array[[p]] == 2, 2, sum) + wr3y <- apply(my.cluster.array[[p]] == 3, 2, sum) + wr4y <- apply(my.cluster.array[[p]] == 4, 2, sum) + + # convert to frequencies in %: + wr1y <- wr1y/(n.leadtimes*n.members) + wr2y <- wr2y/(n.leadtimes*n.members) + wr3y <- wr3y/(n.leadtimes*n.members) + wr4y <- wr4y/(n.leadtimes*n.members) + + # in this case, must use psl.melted instead of psleuFull. Both are already in anomalies: + pslmat <- unname(acast(psl.melted, Member + StartYear + LeadDay ~ Lat ~ Lon)) + #pslmat <- unname(acast(melt(pslPeriod[1,1:2,,,,, drop=FALSE],varnames=c("Exp","Member","StartYear","LeadDay","Lat","Lon")), Member ~ StartYear + LeadDay ~ Lat ~ Lon)) + + pslwr1 <- pslmat[wr1,,, drop=F] + pslwr2 <- pslmat[wr2,,, drop=F] + pslwr3 <- pslmat[wr3,,, drop=F] + pslwr4 <- pslmat[wr4,,, drop=F] + + pslwr1mean <- apply(pslwr1, c(2,3), mean, na.rm=T) + pslwr2mean <- apply(pslwr2, c(2,3), mean, na.rm=T) + pslwr3mean <- apply(pslwr3, c(2,3), mean, na.rm=T) + pslwr4mean <- apply(pslwr4, c(2,3), mean, na.rm=T) + + rm(pslwr1,pslwr2,pslwr3,pslwr4) + rm(pslmat) + gc() + + # save all the data necessary to draw the graphs (but not the impact maps) + save(lon, lon.max, psl, my.period, p, workdir,rean.name,fields.name,var.num,var.name,var.name.full,var.unit,year.start,year.end, p, WR.period, lon, lat, pslwr1mean, pslwr2mean,pslwr3mean,pslwr4mean,wr1y,wr2y,wr3y,wr4y,n.leadtimes,cluster.sequence,file=paste0(workdir,"/",fields.name,"_",var.name[var.num],"_",my.period[p],"_leadmonth_",lead.month,"_mapdata.RData")) + + rm(pslwr1mean,pslwr2mean,pslwr3mean,pslwr4mean) + gc() + + #pslwr1meanPartial <- apply(pslwr1, c(1,3,4), mean, na.rm=T) + #pslwr2meanPartial <- apply(pslwr2, c(1,3,4), mean, na.rm=T) + #pslwr3meanPartial <- apply(pslwr3, c(1,3,4), mean, na.rm=T) + #pslwr4meanPartial <- apply(pslwr4, c(1,3,4), mean, na.rm=T) + + #PlotEquiMap(rescale(pslwr1meanPartial[1,,],my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2) + #PlotEquiMap(rescale(pslwr1meanPartial[2,,],my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2) + + #n=0 + #for(sy in 1:32) { + # for(ld in 1:28){ + # if(my.cluster.array[[p]][ld,sy] == 1){ + # n=n+1 + # if(n == 1) {temp <- pslPeriod[1,2,sy,ld,,]} + # temp <- temp + pslPeriod[1,2,sy,ld,,] + # } + # } + #} + #PlotEquiMap(rescale(temp/n,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, drawleg=F) + + #pslwr1meanPartial <- apply(pslmat[,wr1,,,drop=F], c(1,3,4), mean, na.rm=T) + #pslwr2meanPartial <- apply(pslmat[,wr2,,,drop=F], c(1,3,4), mean, na.rm=T) + #pslwr3meanPartial <- apply(pslmat[,wr3,,,drop=F], c(1,3,4), mean, na.rm=T) + #pslwr4meanPartial <- apply(pslmat[,wr4,,,drop=F], c(1,3,4), mean, na.rm=T) + + } + + # for the monthly system, the time order is always: year1day1, year2day1, ... , yearNday1, year1day2, year2day2, ..., yearNday2, etc.: + if(fields.name == ECMWF_monthly.name) { + if(fields.name == ECMWF_monthly.name){ + my.startdates.period <- months.period(forecast.year,mes,day,p) + + #if(p == 13) my.startdates.period <- my.startdates.period[-2] # remove the second startdate because it is broken + + days.period <- period.length <- list() + for(startdate in my.startdates.period){ + days.period[[p]] <- c(days.period[[p]], (1+(startdate-1)*(year.end-year.start+1)):(startdate*(year.end-year.start+1))) + } + period.length[[p]] <- length(my.startdates.period) # number of Thusdays in the period + } + + wr1y <- wr2y <- wr3y <-wr4y <- c() + wr1y[y-year.start+1] <- length(which(cluster.sequence[seq((y-year.start+1),length(cluster.sequence), n.years)] == 1)) + wr2y[y-year.start+1] <- length(which(cluster.sequence[seq((y-year.start+1),length(cluster.sequence), n.years)] == 2)) + wr3y[y-year.start+1] <- length(which(cluster.sequence[seq((y-year.start+1),length(cluster.sequence), n.years)] == 3)) + wr4y[y-year.start+1] <- length(which(cluster.sequence[seq((y-year.start+1),length(cluster.sequence), n.years)] == 4)) + } + +} # close the for loop on 'p' + + + + + diff --git a/old/weather_regimes_v10.R b/old/weather_regimes_v10.R new file mode 100644 index 0000000000000000000000000000000000000000..7905bb91ab92a4e2f8b43b425b391bd66deb9229 --- /dev/null +++ b/old/weather_regimes_v10.R @@ -0,0 +1,453 @@ + +library(s2dverification) # for the function Load() +library(abind) +source('/home/Earth/ncortesi/Downloads/scripts/Rfunctions.R') # for the calendar functions + +# available reanalysis for the geopotential (g500) or the geopotential height (z500 = g500/9.81): +ERAint <- list(path = '/esnas/reconstructions/ecmwf/eraint/$STORE_FREQ$_mean/$VAR_NAME$_f6h/$VAR_NAME$_$YEAR$$MONTH$.nc') + +ERAint.name <- "ERA-Interim" +JRA55 <- list(path = '/esnas/reconstructions/jma/jra-55/$STORE_FREQ$_mean/$VAR_NAME$_f6h/$VAR_NAME$_$YEAR$$MONTH$.nc') +JRA55.name <- "JRA-55" + +workdir <- "/scratch/Earth/ncortesi/RESILIENCE/Regimes" # working dir with the input files with the WT classifications for each MSLP grid point +mapdir <- "/scratch/Earth/ncortesi/RESILIENCE/Regimes_maps" # output dir with seasonal and yearly maps + +rean <- ERAint # choose one of the two above reanalysis from where to load the input psl and var data +rean.name <- ERAint.name + +psl <- "g500" # pressure variable to use from the chosen reanalysis + +var.name <- "sfcWind" #"tas" # name of the 'predictand' variable of the chosen reanalysis +var.name.full <- "Wind Speed" #"Temperature" # full name of the 'predictand' variable to put in the title of the graphs +var.unit <- "m/s" #"ºC" #"m/s" # unit of measure (for drawing color scales) + +year.start <- 1979 +year.end <- 2013 + +variance.explained <- 0.8 # the user can set here the minimum % of variance explained by the PCs (typically 0.8 which is equal to 80%) + +# Coordinates of the box enclosing the study region (the Northern Atlantic in this case): +lat.max <- 70 +lat.min <- 30 +lon.max <- 40 +lon.min <- 280 # put a positive number here because the geopotential has only positive values of longitud! + +############################################################################################################################# + +# load only 1 day of geopot to detect the minimum and maximum lat and lon values to identify only the North Atlantic +domain <- Load(var = psl, exp = NULL, obs = list(rean), paste0(year.start,'0101'), storefreq = 'daily', leadtimemax = 1, output = 'lonlat', nprocs=1) +pos.lat.max <- which(abs(domain$lat-lat.max)==min(abs(domain$lat-lat.max))) +pos.lat.min <- which(abs(domain$lat-lat.min)==min(abs(domain$lat-lat.min))) +pos.lon.max <- which(abs(domain$lon-lon.max)==min(abs(domain$lon-lon.max))) +pos.lon.min <- which(abs(domain$lon-lon.min)==min(abs(domain$lon-lon.min))) +pos.lat <- if(pos.lat.min < pos.lat.max) {pos.lat.min:pos.lat.max} else { pos.lat.max:pos.lat.min} +pos.lon <- c(1:pos.lon.max,pos.lon.min:length(domain$lon)) # beware that it only consider the case where the study region cross the meridian of Greenwhich! +n.pos.lat <- length(pos.lat) +n.pos.lon <- length(pos.lon) +lat <- domain$lat[pos.lat] # lat of chosen area only (it is smaller than the whole spatial domain loaded by the data) +lon <- domain$lon[pos.lon] # lon of chosen area only +lat.min.area <- domain$lat[pos.lat.min] # min and max lat/lon of the chosen area only (same as lat.max, but its values correspond to actual values in the lat vector) +lat.max.area <- domain$lat[pos.lat.max] +lon.min.area <- domain$lon[pos.lon.min] +lon.max.area <- domain$lon[pos.lon.max] +if(lat.min >= lat.max) stop("lat.min cannot be greater than lat.max") + +# Load psl data: +psleuFull <-array(NA,c(n.days.in.a.yearly.period(year.start,year.end), n.pos.lat, n.pos.lon)) + +for (y in year.start:year.end){ + var <- Load(var = psl, exp = NULL, obs = list(rean), paste0(y,'0101'), storefreq = 'daily', leadtimemax = n.days.in.a.year(y), output = 'lonlat', + latmin = lat.min.area, latmax = lat.max.area, lonmin = lon.min.area, lonmax = lon.max.area) + psleuFull[seq.days.in.a.future.year(year.start, y),,] <- var$obs + rm(var) + gc() +} + +if(psl=="g500") psleuFull <- psleuFull/9.81 # convert geopotential to geopotential height + +# compute the PCs: +my.PCA <- list() +my.cluster <- list() +tot.variance <- list() +n.pcs <- my.cluster <- c() + +for(period in 13:16){ # 1-12: Jan-Dec; 13-16: Winter-Spring-Summer-Autumn; 17: Year + + days.period <- NA + for(y in year.start:year.end) days.period <- c(days.period, n.days.in.a.future.year(year.start, y) + pos.period(y,period)) + days.period <- days.period[-1] # remove the NA at the begin that was introduced only to be able to execure the above command + n.days.period <- length(days.period) + + pslPeriod <- psleuFull[days.period,,] # select only days in the chosen period (i.e: winter) + + # weight the pressure fields based on latitude: + lat.weighted <- cos(lat*pi/180)^0.5 + lat.weighted.array <- InsertDim(InsertDim(lat.weighted,2,n.pos.lon),1,n.days.period) + pslPeriod.weighted <- pslPeriod * lat.weighted.array + rm(lat.weighted.array) + gc() + + pslmat <- pslPeriod.weighted + dim(pslmat) <- c(n.days.period, n.pos.lat*n.pos.lon) # convert array in a matrix! + rm(pslPeriod, pslPeriod.weighted) + gc() + + my.seq <- seq(1, n.pos.lat*n.pos.lon, 9) # select only 1 point of 9 + pslcut <- pslmat[,my.seq] + + my.PCA[[period]] <- princomp(pslcut,cor=FALSE) + tot.variance[[period]] <- head(cumsum(my.PCA[[period]]$sdev^2/sum(my.PCA[[period]]$sdev^2)),50) # check how many PCAs to keep basing on the sum of their expl. variance + n.pcs[period] <- head(as.numeric(which(tot.variance[[period]] > variance.explained)),1) # select only the pcs that explains at least 80% of variance + my.cluster[[period]] <- kmeans(my.PCA[[period]]$scores[,1:n.pcs[period]], centers=4, iter.max=1000) # 4 is the number of clusters, 7 the number of EOFs which explains ~80% of variance + + rm(pslcut, pslmat) + gc() +} + +save.image(file=paste0(workdir,"/weather_regimes_",rean.name,"_",year.start,"-",year.end,".RData")) +load(file=paste0(workdir,"/weather_regimes_",rean.name,"_",year.start,"-",year.end,".RData")) + +# Load var data: +vareuFull <-array(NA,c(n.days.in.a.yearly.period(year.start,year.end),n.pos.lat,n.pos.lon)) + +for (y in year.start:year.end){ + var <- Load(var = var.name, exp = NULL, obs = list(rean), paste0(y,'0101'), storefreq = 'daily', leadtimemax = n.days.in.a.year(y), output = 'lonlat', + latmin = domain$lat[pos.lat.min], latmax = domain$lat[pos.lat.max], lonmin = domain$lon[pos.lon.min], lonmax = domain$lon[pos.lon.max]) + vareuFull[seq.days.in.a.future.year(year.start, y),,] <- var$obs + rm(var) + gc() +} + +save.image(file=paste0(workdir,"/weather_regimes_",rean.name,"_",var.name,"_",year.start,"-",year.end,".RData")) +load(file=paste0(workdir,"/weather_regimes_",rean.name,"_",var.name,"_",year.start,"-",year.end,".RData")) + +for(period in 13:16){ # 1-12: Jan-Dec; 13-16: Winter-Spring-Summer-Autumn; 17: Year + days.period <- NA + for(y in year.start:year.end) days.period <- c(days.period, n.days.in.a.future.year(year.start, y) + pos.period(y,period)) + days.period <- days.period[-1] # remove the NA at the begin that was introduced only to be able to execure the above command + n.days.period <- length(days.period) + + varPeriod <- vareuFull[days.period,,] # select only var data during the chosen period + + varPeriodClim <- apply(varPeriod, c(2,3), mean, na.rm=T) + varPeriodClim2 <- InsertDim(varPeriodClim, 1, n.days.period) + + varPeriodAnom <- varPeriod - varPeriodClim2 + rm(varPeriod, varPeriodClim2) + gc() + + # for each of the 4 clusters, remove the days not belonging to sequences of at least 5 days: + # warning: first 4 days and last 4 days are not take into account y the algorithm and are treated as not belonging to any sequence (sequ=FALSE) + wrdiff <- diff(my.cluster[[period]]$cluster) + + sequ <- rep(FALSE, n.days.period) + for(i in 5:(n.days.period-5)){ + sequ[i] <- TRUE + if(wrdiff[i] != 0 & wrdiff[i+1] != 0) sequ[i] = FALSE + if(wrdiff[i] != 0 & wrdiff[i+2] != 0) sequ[i] = FALSE + if(wrdiff[i] != 0 & wrdiff[i+3] != 0) sequ[i] = FALSE + if(wrdiff[i] != 0 & wrdiff[i+4] != 0) sequ[i] = FALSE + if(wrdiff[i-1] != 0 & wrdiff[i] != 0) sequ[i] = FALSE + if(wrdiff[i-1] != 0 & wrdiff[i+1] != 0) sequ[i] = FALSE + if(wrdiff[i-1] != 0 & wrdiff[i+2] != 0) sequ[i] = FALSE + if(wrdiff[i-1] != 0 & wrdiff[i+3] != 0) sequ[i] = FALSE + if(wrdiff[i-2] != 0 & wrdiff[i] != 0) sequ[i] = FALSE + if(wrdiff[i-2] != 0 & wrdiff[i+1] != 0) sequ[i] = FALSE + if(wrdiff[i-2] != 0 & wrdiff[i+2] != 0) sequ[i] = FALSE + if(wrdiff[i-3] != 0 & wrdiff[i] != 0) sequ[i] = FALSE + if(wrdiff[i-3] != 0 & wrdiff[i+1] != 0) sequ[i] = FALSE + if(wrdiff[i-4] != 0 & wrdiff[i] != 0) sequ[i] = FALSE + } # close for loop on i + + # sequence of daily cluster numbers without the days that doesn't belong to sequences of 5 or more days: + cluster.sequence <- my.cluster[[period]]$cluster * sequ + + rm(wrdiff, sequ) + gc() + + #wr1 <- which(my.cluster[[period]]$cluster==1) + #wr2 <- which(my.cluster[[period]]$cluster==2) + #wr3 <- which(my.cluster[[period]]$cluster==3) + #wr4 <- which(my.cluster[[period]]$cluster==4) + + # Replace the days belonging to a regime with the days belonging to a sequence of at least 5 days of that regime: + wr1 <- which(cluster.sequence == 1) + wr2 <- which(cluster.sequence == 2) + wr3 <- which(cluster.sequence == 3) + wr4 <- which(cluster.sequence == 4) + + period.length <- n.days.in.a.period(period,1999) # using year 1999 introduce a small error in winter season because of bisestile years + + # yearly frequency of each regime: + wr1y <- wr2y <- wr3y <-wr4y <- c() + for(y in year.start:year.end){ + #wr1y[y-year.start+1] <- length(which(my.cluster[[period]]$cluster[(y-year.start)*period.length+(1:period.length)] == 1)) + #wr2y[y-year.start+1] <- length(which(my.cluster[[period]]$cluster[(y-year.start)*period.length+(1:period.length)] == 2)) + #wr3y[y-year.start+1] <- length(which(my.cluster[[period]]$cluster[(y-year.start)*period.length+(1:period.length)] == 3)) + #wr4y[y-year.start+1] <- length(which(my.cluster[[period]]$cluster[(y-year.start)*period.length+(1:period.length)] == 4)) + wr1y[y-year.start+1] <- length(which(cluster.sequence[(y-year.start)*period.length+(1:period.length)] == 1)) + wr2y[y-year.start+1] <- length(which(cluster.sequence[(y-year.start)*period.length+(1:period.length)] == 2)) + wr3y[y-year.start+1] <- length(which(cluster.sequence[(y-year.start)*period.length+(1:period.length)] == 3)) + wr4y[y-year.start+1] <- length(which(cluster.sequence[(y-year.start)*period.length+(1:period.length)] == 4)) + } + + # convert to frequencies in %: + wr1y <- wr1y/period.length + wr2y <- wr2y/period.length + wr3y <- wr3y/period.length + wr4y <- wr4y/period.length + + varPeriodAnom1 <- varPeriodAnom[wr1,,] + varPeriodAnom2 <- varPeriodAnom[wr2,,] + varPeriodAnom3 <- varPeriodAnom[wr3,,] + varPeriodAnom4 <- varPeriodAnom[wr4,,] + + varPeriodAnom1mean <- apply(varPeriodAnom1,c(2,3),mean,na.rm=T) + varPeriodAnom2mean <- apply(varPeriodAnom2,c(2,3),mean,na.rm=T) + varPeriodAnom3mean <- apply(varPeriodAnom3,c(2,3),mean,na.rm=T) + varPeriodAnom4mean <- apply(varPeriodAnom4,c(2,3),mean,na.rm=T) + + varPeriodAnomBoth1 <- abind(varPeriodAnom, varPeriodAnom1, along = 1) + varPeriodAnomBoth2 <- abind(varPeriodAnom, varPeriodAnom2, along = 1) + varPeriodAnomBoth3 <- abind(varPeriodAnom, varPeriodAnom3, along = 1) + varPeriodAnomBoth4 <- abind(varPeriodAnom, varPeriodAnom4, along = 1) + + pvalue1 <- apply(varPeriodAnomBoth1, c(2,3), function(x) t.test(x[1:n.days.period],x[(n.days.period+1):length(x)])$p.value) + pvalue2 <- apply(varPeriodAnomBoth2, c(2,3), function(x) t.test(x[1:n.days.period],x[(n.days.period+1):length(x)])$p.value) + pvalue3 <- apply(varPeriodAnomBoth3, c(2,3), function(x) t.test(x[1:n.days.period],x[(n.days.period+1):length(x)])$p.value) + pvalue4 <- apply(varPeriodAnomBoth4, c(2,3), function(x) t.test(x[1:n.days.period],x[(n.days.period+1):length(x)])$p.value) + rm(varPeriodAnom, varPeriodAnomBoth1, varPeriodAnomBoth2, varPeriodAnomBoth3, varPeriodAnomBoth4) + gc() + + pslPeriod <- psleuFull[days.period,,] + pslPeriodClim <- apply(pslPeriod, c(2,3), mean, na.rm=T) + pslPeriodClim2 <- InsertDim(pslPeriodClim, 1, n.days.period) + + pslPeriodAnom <- pslPeriod - pslPeriodClim2 + rm(pslPeriod, pslPeriodClim, pslPeriodClim2) + gc() + + pslwr1 <- pslPeriodAnom[wr1,,] + pslwr2 <- pslPeriodAnom[wr2,,] + pslwr3 <- pslPeriodAnom[wr3,,] + pslwr4 <- pslPeriodAnom[wr4,,] + rm(pslPeriodAnom) + gc() + + pslwr1mean <- apply(pslwr1,c(2,3),mean,na.rm=T) + pslwr2mean <- apply(pslwr2,c(2,3),mean,na.rm=T) + pslwr3mean <- apply(pslwr3,c(2,3),mean,na.rm=T) + pslwr4mean <- apply(pslwr4,c(2,3),mean,na.rm=T) + + rm(pslwr1,pslwr2,pslwr3,pslwr4) + + # breaks and colors of the geopotential fields: + #my.brks <- c(48000, seq(48501,57100,1), 60000) # % Mean anomaly of a WR + #my.brks2 <- c(48000, seq(48500,58000,500)) # % Mean anomaly of a WR + my.brks <- c(-300,-199:200,300) # % Mean anomaly of a WR + my.brks2 <- c(-300,seq(-200,200,10),300) # % Mean anomaly of a WR + #my.cols <- colorRampPalette((brewer.pal(9,"PuBu")))(length(my.brks)-1) + my.cols <- colorRampPalette(rev(brewer.pal(11,"RdBu")))(length(my.brks)-1) # blue--white--red colors + + # breaks and colors of the impact maps: + my.brks.var <- c(-20,seq(-10,-3,1),seq(-2.9,2.9,0.1),seq(3,10,1),20) # % Mean anomaly of a WR + #my.brks.var <- c(-20,seq(-10,-3,1),seq(-2.8,2.8,0.2),seq(3,10,1),20) # % Mean anomaly of a WR + + #my.cols <- colorRampPalette(as.character(read.csv("/home/Earth/vtorralb/rgbhex.csv",header=F)[,1]))(length(my.brks)-1) # blue--yellow-red colors + my.cols.var <- colorRampPalette(rev(brewer.pal(11,"RdBu")))(length(my.brks.var)-1) # blue--white--red colors + + #regime1.name <- "Cluster #1" + #regime2.name <- "Cluster #2" + #regime3.name <- "Cluster #3" + #regime4.name <- "Cluster #4" + + # save all the data necessary to redraw the graphs when we know the right regime: + save(workdir,rean.name,var.name,var.name.full,var.unit,year.start,year.end,period,my.period,lon,lat,my.brks,my.brks2,my.brks.var,my.cols,my.cols.var,pslwr1mean,pslwr2mean,pslwr3mean,pslwr4mean, varPeriodAnom1mean, varPeriodAnom2mean, varPeriodAnom3mean,varPeriodAnom4mean,wr1y,wr2y,wr3y,wr4y,pvalue1,pvalue2,pvalue3,pvalue4,file=paste0(workdir,"/",rean.name,"_",var.name,"_",my.period[period],"_mapdata.RData")) + + rm(pvalue1,pvalue2,pvalue3,pvalue4) + rm(pslwr1mean,pslwr2mean,pslwr3mean,pslwr4mean) + rm(varPeriodAnom1mean,varPeriodAnom2mean,varPeriodAnom3mean,varPeriodAnom4mean) + gc() + +} # close the for loop on 'period' + + + +# run it twice: once, with ordering <- FALSE to look only at the cartography and find visually the right regime names of the four clusters; +# and the second time, to save the ordered cartography: +ordering <- TRUE +as.pdf <- FALSE # choose if you prefer to save results as one file .png for each season, or only 1 pdf with all seasons + +# choose an order to give to the cartography, from top to bottom: +orden <- c("NAO+","NAO-","Blocking","Atlantic Ridge") + +regime1.name <- orden[1] +regime2.name <- orden[2] +regime3.name <- orden[3] +regime4.name <- orden[4] + +if(as.pdf)(pdf(file=paste0(mapdir,"/",rean.name,"_",var.name,".pdf"),width=40, height=60)) + +for(period in 13:16){ + load(file=paste0(workdir,"/",rean.name,"_",var.name,"_",my.period[period],"_mapdata.RData")) + + if(period == 13){ # Winter + cluster2.name="NAO+" + cluster3.name="NAO-" + cluster4.name="Blocking" + cluster1.name="Atlantic Ridge" + } + if(period == 14){ + cluster1.name="NAO+" + cluster2.name="NAO-" + cluster4.name="Blocking" + cluster3.name="Atlantic Ridge" + } + if(period == 15){ + cluster3.name="NAO+" + cluster1.name="NAO-" + cluster4.name="Blocking" + cluster2.name="Atlantic Ridge" + } + if(period == 16){ + cluster2.name="NAO+" + cluster4.name="NAO-" + cluster3.name="Blocking" + cluster1.name="Atlantic Ridge" + } + + # assign to each cluster its regime: + if(ordering == FALSE){ + cluster1 <- 1; cluster2 <- 2; cluster3 <- 3; cluster4 <- 4 + } else { + cluster1 <- which(orden == cluster1.name) + cluster2 <- which(orden == cluster2.name) + cluster3 <- which(orden == cluster3.name) + cluster4 <- which(orden == cluster4.name) + } + + assign(paste0("map",cluster1), pslwr1mean) + assign(paste0("map",cluster2), pslwr2mean) + assign(paste0("map",cluster3), pslwr3mean) + assign(paste0("map",cluster4), pslwr4mean) + + assign(paste0("imp",cluster1), varPeriodAnom1mean) + assign(paste0("imp",cluster2), varPeriodAnom2mean) + assign(paste0("imp",cluster3), varPeriodAnom3mean) + assign(paste0("imp",cluster4), varPeriodAnom4mean) + + assign(paste0("sig",cluster1), pvalue1) + assign(paste0("sig",cluster2), pvalue2) + assign(paste0("sig",cluster3), pvalue3) + assign(paste0("sig",cluster4), pvalue4) + + assign(paste0("fre",cluster1), wr1y) + assign(paste0("fre",cluster2), wr2y) + assign(paste0("fre",cluster3), wr3y) + assign(paste0("fre",cluster4), wr4y) + + # Visualize the composition with the 3 graphs for all the 4 regimes: its pressure fields, its impact on var and its frequency series: + if(!as.pdf) png(filename=paste0(mapdir,"/",rean.name,"_",var.name,"_",my.period[period],".png"),width=3000,height=3700) + + layout(matrix(c(1,1,1,1,1,1,1,1,31,32,6,33,2,4,6,7,rep(c(2,4,6,8),8),3,5,6,9,34,35,6,36, + 10,12,6,14,rep(c(10,12,6,15),8),11,13,6,16,37,38,6,39, + 17,19,6,21,rep(c(17,19,6,22),8),18,20,6,23,40,41,6,42, + 24,26,6,28,rep(c(24,26,6,29),8),25,27,6,30,43,43,6,44),47,4,byrow=TRUE), + widths=c(2,2,0.2,2), heights=c(rep(0.2,2),0.3,rep(0.2,10),0.3,rep(0.2,10),0.3,rep(0.2,10),0.3,rep(0.2,12))) + #layout.show(44) + + plot.new(); title(paste("Weather Regimes for",my.period[period],"season (1979-2013). Source:",rean.name), cex.main=9, line=-7) + EU <- c(1:54,130:161) # position of long values of Europe only (without the Atlantic Sea and America) + + PlotEquiMap(map1, lon, lat, filled.continents = FALSE, brks=my.brks, cols=my.cols, toptitle="", sizetit=1.2, contours=t(map1),brks2=my.brks2, drawleg=F) + #par(mar=c(10.5,14.5,10.5,14.5)) + plot.new(); PlotEquiMap(imp1[,EU], lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, drawleg=F, dots=t(sig1[,EU] < 0.05)) + plot.new(); plot.new(); plot.new() # plot 2 empty graphs + barplot.freq(100*fre1, year.start, year.end, cex.y=2, cex.x=2, freq.max=60) + plot.new(); title("Year", cex.main=2, line=-2) + + PlotEquiMap(map2, lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, toptitle="", sizetit=1.2, contours=t(map2), brks2=my.brks2, drawleg=F) + plot.new(); PlotEquiMap(imp2[,EU], lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, drawleg=F, dots=t(sig2[,EU] < 0.05)) + plot.new(); plot.new(); barplot.freq(100*fre2, year.start, year.end, cex.y=2, cex.x=2, freq.max=60) + plot.new(); title("Year", cex.main=2, line=-2) + + PlotEquiMap(map3, lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, toptitle="", sizetit=1.2, contours=t(map3), brks2=my.brks2, drawleg=F) + plot.new(); PlotEquiMap(imp3[,EU], lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, drawleg=F, dots=t(sig3[,EU] < 0.05)) + plot.new(); plot.new(); barplot.freq(100*fre3, year.start, year.end, cex.y=2, cex.x=2, freq.max=60) + plot.new(); title("Year", cex.main=2, line=-2) + + PlotEquiMap(map4, lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, contours=t(map4), brks2=my.brks2, drawleg=F) + plot.new(); PlotEquiMap(imp4[,EU], lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, drawleg=F, dots=t(sig4[,EU] < 0.05)) + plot.new(); plot.new(); barplot.freq(100*fre4, year.start, year.end, cex.y=2, cex.x=2, freq.max=60) + plot.new(); title("Year", cex.main=2, line=-3) + + # Titles: + plot.new(); title(paste(regime1.name, "geopotential height"), cex.main=3.3, line=-4) + plot.new(); title(paste(regime1.name, "impact on", var.name.full), cex.main=3.3, line=-4) + plot.new(); title(paste0(regime1.name, " Frequency (", round(100*mean(fre1),1), "%)"), cex.main=3.3, line=-4) + plot.new(); title(paste(regime2.name, "geopotential height"), cex.main=3.3, line=-4) + plot.new(); title(paste(regime2.name, "impact on", var.name.full), cex.main=3.3, line=-4) + plot.new(); title(paste0(regime2.name, " Frequency (", round(100*mean(fre2),1), "%)"), cex.main=3.3, line=-4) + plot.new(); title(paste(regime3.name, "geopotential height"), cex.main=3.3, line=-4) + plot.new(); title(paste(regime3.name, "impact on", var.name.full), cex.main=3.3, line=-4) + plot.new(); title(paste0(regime3.name, " Frequency (", round(100*mean(fre3),1), "%)"), cex.main=3.3, line=-4) + plot.new(); title(paste(regime4.name, "geopotential height"), cex.main=3.3, line=-4) + plot.new(); title(paste(regime4.name, "impact on", var.name.full), cex.main=3.3, line=-4) + plot.new(); title(paste0(regime4.name, " Frequency (", round(100*mean(fre4),1), "%)"), cex.main=3.3, line=-4) + + # Legends: + par(fig=c(0.04,0.3,0.718,0.744), new=TRUE) + ColorBar(my.brks, cols=my.cols, vert=FALSE, subsampleg=100, cex=2.5) + mtext(side=4," m", cex=2.5) + par(fig=c(0.355,0.605,0.718,0.743), new=TRUE) + ColorBar2(my.brks.var, cols=my.cols.var, vert=FALSE, subsampleg=10, cex=2.5, my.ticks=c(0,10,20,30,40,50,60,70,80), my.labels=c(-20,-10,-6,-3,0,3,6,10,20)) + mtext(side=4,paste0(" ",var.unit), cex=2.5) + + par(fig=c(0.04,0.3,0.483,0.508), new=TRUE) + ColorBar(my.brks, cols=my.cols, vert=FALSE, subsampleg=100, cex=2.5) + mtext(side=4," m", cex=2.5) + par(fig=c(0.355,0.605,0.484,0.509), new=TRUE) + ColorBar2(my.brks.var, cols=my.cols.var, vert=FALSE, subsampleg=10, cex=2.5, my.ticks=c(0,10,20,30,40,50,60,70,80), my.labels=c(-20,-10,-6,-3,0,3,6,10,20)) + mtext(side=4,paste0(" ",var.unit), cex=2.5) + + par(fig=c(0.04,0.3,0.248,0.273), new=TRUE) + ColorBar(my.brks, cols=my.cols, vert=FALSE, subsampleg=100, cex=2.5) + mtext(side=4," m", cex=2.5) + par(fig=c(0.355,0.605,0.248,0.273), new=TRUE) + ColorBar2(my.brks.var, cols=my.cols.var, vert=FALSE, subsampleg=10, cex=2.5, my.ticks=c(0,10,20,30,40,50,60,70,80), my.labels=c(-20,-10,-6,-3,0,3,6,10,20)) + mtext(side=4,paste0(" ",var.unit), cex=2.5) + + par(fig=c(0.04,0.3,0.013,0.038), new=TRUE) + ColorBar(my.brks, cols=my.cols, vert=FALSE, subsampleg=100, cex=2.5) + mtext(side=4," m", cex=2.5) + par(fig=c(0.355,0.605,0.013,0.038), new=TRUE) + ColorBar2(my.brks.var, cols=my.cols.var, vert=FALSE, subsampleg=10, cex=2.5, my.ticks=c(0,10,20,30,40,50,60,70,80), my.labels=c(-20,-10,-6,-3,0,3,6,10,20)) + mtext(side=4,paste0(" ",var.unit), cex=2.5) + + if(!as.pdf) dev.off() # for saving 4 png + +} # close for loop on 'period' + +if(as.pdf) dev.off() # for saving a single pdf with all seasons + + + + + + + + + + + + + + + + + + + + + diff --git a/old/weather_regimes_v10.R~ b/old/weather_regimes_v10.R~ new file mode 100644 index 0000000000000000000000000000000000000000..c1cb165bb51c74b7173bca0b58492c7778d1549f --- /dev/null +++ b/old/weather_regimes_v10.R~ @@ -0,0 +1,455 @@ + +library(s2dverification) # for the function Load() +library(abind) +source('/home/Earth/ncortesi/Downloads/scripts/Rfunctions.R') # for the calendar functions + +# available reanalysis for the geopotential (g500) or the geopotential height (z500 = g500/9.81): +ERAint <- list(path = '/esnas/reconstructions/ecmwf/eraint/$STORE_FREQ$_mean/$VAR_NAME$_f6h/$VAR_NAME$_$YEAR$$MONTH$.nc') + +ERAint.name <- "ERA-Interim" +JRA55 <- list(path = '/esnas/reconstructions/jma/jra-55/$STORE_FREQ$_mean/$VAR_NAME$_f6h/$VAR_NAME$_$YEAR$$MONTH$.nc') +JRA55.name <- "JRA-55" + +workdir <- "/scratch/Earth/ncortesi/RESILIENCE/Regimes" # working dir with the input files with the WT classifications for each MSLP grid point +mapdir <- "/scratch/Earth/ncortesi/RESILIENCE/Regimes_maps" # output dir with seasonal and yearly maps + +rean <- ERAint # choose one of the two above reanalysis from where to load the input psl and var data +rean.name <- ERAint.name + +psl <- "g500" # pressure variable to use from the chosen reanalysis + +var.name <- "sfcWind" #"tas" # name of the 'predictand' variable of the chosen reanalysis +var.name.full <- "Wind Speed" #"Temperature" # full name of the 'predictand' variable to put in the title of the graphs +var.unit <- "m/s" #"ºC" #"m/s" # unit of measure (for drawing color scales) + +year.start <- 1979 +year.end <- 2013 + +variance.explained <- 0.8 # the user can set here the minimum % of variance explained by the PCs (typically 0.8 which is equal to 80%) + +# Coordinates of the box enclosing the study region (the Northern Atlantic in this case): +lat.max <- 70 +lat.min <- 30 +lon.max <- 40 +lon.min <- 280 # put a positive number here because the geopotential has only positive values of longitud! + +############################################################################################################################# + +# load only 1 day of geopot to detect the minimum and maximum lat and lon values to identify only the North Atlantic +domain <- Load(var = psl, exp = NULL, obs = list(rean), paste0(year.start,'0101'), storefreq = 'daily', leadtimemax = 1, output = 'lonlat', nprocs=1) +pos.lat.max <- which(abs(domain$lat-lat.max)==min(abs(domain$lat-lat.max))) +pos.lat.min <- which(abs(domain$lat-lat.min)==min(abs(domain$lat-lat.min))) +pos.lon.max <- which(abs(domain$lon-lon.max)==min(abs(domain$lon-lon.max))) +pos.lon.min <- which(abs(domain$lon-lon.min)==min(abs(domain$lon-lon.min))) +pos.lat <- if(pos.lat.min < pos.lat.max) {pos.lat.min:pos.lat.max} else { pos.lat.max:pos.lat.min} +pos.lon <- c(1:pos.lon.max,pos.lon.min:length(domain$lon)) # beware that it only consider the case where the study region cross the meridian of Greenwhich! +n.pos.lat <- length(pos.lat) +n.pos.lon <- length(pos.lon) +lat <- domain$lat[pos.lat] # lat of chosen area only (it is smaller than the whole spatial domain loaded by the data) +lon <- domain$lon[pos.lon] # lon of chosen area only +lat.min.area <- domain$lat[pos.lat.min] # min and max lat/lon of the chosen area only (same as lat.max, but its values correspond to actual values in the lat vector) +lat.max.area <- domain$lat[pos.lat.max] +lon.min.area <- domain$lon[pos.lon.min] +lon.max.area <- domain$lon[pos.lon.max] +if(lat.min >= lat.max) stop("lat.min cannot be greater than lat.max") + +# Load psl data: +psleuFull <-array(NA,c(n.days.in.a.yearly.period(year.start,year.end), n.pos.lat, n.pos.lon)) + +for (y in year.start:year.end){ + var <- Load(var = psl, exp = NULL, obs = list(rean), paste0(y,'0101'), storefreq = 'daily', leadtimemax = n.days.in.a.year(y), output = 'lonlat', + latmin = lat.min.area, latmax = lat.max.area, lonmin = lon.min.area, lonmax = lon.max.area) + psleuFull[seq.days.in.a.future.year(year.start, y),,] <- var$obs + rm(var) + gc() +} + +if(psl=="g500") psleuFull <- psleuFull/9.81 # convert geopotential to geopotential height + +# compute the PCs: +my.PCA <- list() +my.cluster <- list() +tot.variance <- list() +n.pcs <- my.cluster <- c() + +for(period in 13:16){ # 1-12: Jan-Dec; 13-16: Winter-Spring-Summer-Autumn; 17: Year + + days.period <- NA + for(y in year.start:year.end) days.period <- c(days.period, n.days.in.a.future.year(year.start, y) + pos.period(y,period)) + days.period <- days.period[-1] # remove the NA at the begin that was introduced only to be able to execure the above command + n.days.period <- length(days.period) + + pslPeriod <- psleuFull[days.period,,] # select only days in the chosen period (i.e: winter) + + # weight the pressure fields based on latitude: + lat.weighted <- cos(lat*pi/180)^0.5 + lat.weighted.array <- InsertDim(InsertDim(lat.weighted,2,n.pos.lon),1,n.days.period) + pslPeriod.weighted <- pslPeriod * lat.weighted.array + rm(lat.weighted.array) + gc() + + pslmat <- pslPeriod.weighted + dim(pslmat) <- c(n.days.period, n.pos.lat*n.pos.lon) # convert array in a matrix! + rm(pslPeriod, pslPeriod.weighted) + gc() + + my.seq <- seq(1, n.pos.lat*n.pos.lon, 9) # select only 1 point of 9 + pslcut <- pslmat[,my.seq] + + my.PCA[[period]] <- princomp(pslcut,cor=FALSE) + tot.variance[[period]] <- head(cumsum(my.PCA[[period]]$sdev^2/sum(my.PCA[[period]]$sdev^2)),50) # check how many PCAs to keep basing on the sum of their expl. variance + n.pcs[period] <- head(as.numeric(which(tot.variance[[period]] > variance.explained)),1) # select only the pcs that explains at least 80% of variance + my.cluster[[period]] <- kmeans(my.PCA[[period]]$scores[,1:n.pcs[period]], 4) # 4 is the number of clusters, 7 the number of EOFs which explains ~80% of variance + + rm(pslcut, pslmat) + gc() +} + +save.image(file=paste0(workdir,"/weather_regimes_",rean.name,"_",year.start,"-",year.end,".RData")) +load(file=paste0(workdir,"/weather_regimes_",rean.name,"_",year.start,"-",year.end,".RData")) + +# Load var data: +vareuFull <-array(NA,c(n.days.in.a.yearly.period(year.start,year.end),n.pos.lat,n.pos.lon)) + +for (y in year.start:year.end){ + var <- Load(var = var.name, exp = NULL, obs = list(rean), paste0(y,'0101'), storefreq = 'daily', leadtimemax = n.days.in.a.year(y), output = 'lonlat', + latmin = domain$lat[pos.lat.min], latmax = domain$lat[pos.lat.max], lonmin = domain$lon[pos.lon.min], lonmax = domain$lon[pos.lon.max]) + vareuFull[seq.days.in.a.future.year(year.start, y),,] <- var$obs + rm(var) + gc() +} + +save.image(file=paste0(workdir,"/weather_regimes_",rean.name,"_",var.name,"_",year.start,"-",year.end,".RData")) +load(file=paste0(workdir,"/weather_regimes_",rean.name,"_",var.name,"_",year.start,"-",year.end,".RData")) + +for(period in 13:16){ # 1-12: Jan-Dec; 13-16: Winter-Spring-Summer-Autumn; 17: Year + days.period <- NA + for(y in year.start:year.end) days.period <- c(days.period, n.days.in.a.future.year(year.start, y) + pos.period(y,period)) + days.period <- days.period[-1] # remove the NA at the begin that was introduced only to be able to execure the above command + n.days.period <- length(days.period) + + varPeriod <- vareuFull[days.period,,] # select only var data during the chosen period + + varPeriodClim <- apply(varPeriod, c(2,3), mean, na.rm=T) + varPeriodClim2 <- InsertDim(varPeriodClim, 1, n.days.period) + + varPeriodAnom <- varPeriod - varPeriodClim2 + rm(varPeriod, varPeriodClim2) + gc() + + # for each of the 4 clusters, remove the days not belonging to sequences of at least 5 days: + # warning: first 4 days and last 4 days are not take into account y the algorithm and are treated as not belonging to any sequence (sequ=FALSE) + wrdiff <- diff(my.cluster[[period]]$cluster) + + sequ <- rep(FALSE, n.days.period) + for(i in 5:(n.days.period-5)){ + sequ[i] <- TRUE + if(wrdiff[i] != 0 & wrdiff[i+1] != 0) sequ[i] = FALSE + if(wrdiff[i] != 0 & wrdiff[i+2] != 0) sequ[i] = FALSE + if(wrdiff[i] != 0 & wrdiff[i+3] != 0) sequ[i] = FALSE + if(wrdiff[i] != 0 & wrdiff[i+4] != 0) sequ[i] = FALSE + if(wrdiff[i-1] != 0 & wrdiff[i] != 0) sequ[i] = FALSE + if(wrdiff[i-1] != 0 & wrdiff[i+1] != 0) sequ[i] = FALSE + if(wrdiff[i-1] != 0 & wrdiff[i+2] != 0) sequ[i] = FALSE + if(wrdiff[i-1] != 0 & wrdiff[i+3] != 0) sequ[i] = FALSE + if(wrdiff[i-2] != 0 & wrdiff[i] != 0) sequ[i] = FALSE + if(wrdiff[i-2] != 0 & wrdiff[i+1] != 0) sequ[i] = FALSE + if(wrdiff[i-2] != 0 & wrdiff[i+2] != 0) sequ[i] = FALSE + if(wrdiff[i-3] != 0 & wrdiff[i] != 0) sequ[i] = FALSE + if(wrdiff[i-3] != 0 & wrdiff[i+1] != 0) sequ[i] = FALSE + if(wrdiff[i-4] != 0 & wrdiff[i] != 0) sequ[i] = FALSE + } # close for loop on i + + # sequence of daily cluster numbers without the days that doesn't belong to sequences of 5 or more days: + cluster.sequence <- my.cluster[[period]]$cluster * sequ + + rm(wrdiff, sequ) + gc() + + #wr1 <- which(my.cluster[[period]]$cluster==1) + #wr2 <- which(my.cluster[[period]]$cluster==2) + #wr3 <- which(my.cluster[[period]]$cluster==3) + #wr4 <- which(my.cluster[[period]]$cluster==4) + + # Replace the days belonging to a regime with the days belonging to a sequence of at least 5 days of that regime: + wr1 <- which(cluster.sequence == 1) + wr2 <- which(cluster.sequence == 2) + wr3 <- which(cluster.sequence == 3) + wr4 <- which(cluster.sequence == 4) + + period.length <- n.days.in.a.period(period,1999) # using year 1999 introduce a small error in winter season because of bisestile years + + # yearly frequency of each regime: + wr1y <- wr2y <- wr3y <-wr4y <- c() + for(y in year.start:year.end){ + #wr1y[y-year.start+1] <- length(which(my.cluster[[period]]$cluster[(y-year.start)*period.length+(1:period.length)] == 1)) + #wr2y[y-year.start+1] <- length(which(my.cluster[[period]]$cluster[(y-year.start)*period.length+(1:period.length)] == 2)) + #wr3y[y-year.start+1] <- length(which(my.cluster[[period]]$cluster[(y-year.start)*period.length+(1:period.length)] == 3)) + #wr4y[y-year.start+1] <- length(which(my.cluster[[period]]$cluster[(y-year.start)*period.length+(1:period.length)] == 4)) + wr1y[y-year.start+1] <- length(which(cluster.sequence[(y-year.start)*period.length+(1:period.length)] == 1)) + wr2y[y-year.start+1] <- length(which(cluster.sequence[(y-year.start)*period.length+(1:period.length)] == 2)) + wr3y[y-year.start+1] <- length(which(cluster.sequence[(y-year.start)*period.length+(1:period.length)] == 3)) + wr4y[y-year.start+1] <- length(which(cluster.sequence[(y-year.start)*period.length+(1:period.length)] == 4)) + } + + # convert to frequencies in %: + wr1y <- wr1y/period.length + wr2y <- wr2y/period.length + wr3y <- wr3y/period.length + wr4y <- wr4y/period.length + + varPeriodAnom1 <- varPeriodAnom[wr1,,] + varPeriodAnom2 <- varPeriodAnom[wr2,,] + varPeriodAnom3 <- varPeriodAnom[wr3,,] + varPeriodAnom4 <- varPeriodAnom[wr4,,] + + varPeriodAnom1mean <- apply(varPeriodAnom1,c(2,3),mean,na.rm=T) + varPeriodAnom2mean <- apply(varPeriodAnom2,c(2,3),mean,na.rm=T) + varPeriodAnom3mean <- apply(varPeriodAnom3,c(2,3),mean,na.rm=T) + varPeriodAnom4mean <- apply(varPeriodAnom4,c(2,3),mean,na.rm=T) + + varPeriodAnomBoth1 <- abind(varPeriodAnom, varPeriodAnom1, along = 1) + varPeriodAnomBoth2 <- abind(varPeriodAnom, varPeriodAnom2, along = 1) + varPeriodAnomBoth3 <- abind(varPeriodAnom, varPeriodAnom3, along = 1) + varPeriodAnomBoth4 <- abind(varPeriodAnom, varPeriodAnom4, along = 1) + + pvalue1 <- apply(varPeriodAnomBoth1, c(2,3), function(x) t.test(x[1:n.days.period],x[(n.days.period+1):length(x)])$p.value) + pvalue2 <- apply(varPeriodAnomBoth2, c(2,3), function(x) t.test(x[1:n.days.period],x[(n.days.period+1):length(x)])$p.value) + pvalue3 <- apply(varPeriodAnomBoth3, c(2,3), function(x) t.test(x[1:n.days.period],x[(n.days.period+1):length(x)])$p.value) + pvalue4 <- apply(varPeriodAnomBoth4, c(2,3), function(x) t.test(x[1:n.days.period],x[(n.days.period+1):length(x)])$p.value) + rm(varPeriodAnom, varPeriodAnomBoth1, varPeriodAnomBoth2, varPeriodAnomBoth3, varPeriodAnomBoth4) + gc() + + pslPeriod <- psleuFull[days.period,,] + pslPeriodClim <- apply(pslPeriod, c(2,3), mean, na.rm=T) + pslPeriodClim2 <- InsertDim(pslPeriodClim, 1, n.days.period) + + pslPeriodAnom <- pslPeriod - pslPeriodClim2 + rm(pslPeriod, pslPeriodClim, pslPeriodClim2) + gc() + + pslwr1 <- pslPeriodAnom[wr1,,] + pslwr2 <- pslPeriodAnom[wr2,,] + pslwr3 <- pslPeriodAnom[wr3,,] + pslwr4 <- pslPeriodAnom[wr4,,] + rm(pslPeriodAnom) + gc() + + pslwr1mean <- apply(pslwr1,c(2,3),mean,na.rm=T) + pslwr2mean <- apply(pslwr2,c(2,3),mean,na.rm=T) + pslwr3mean <- apply(pslwr3,c(2,3),mean,na.rm=T) + pslwr4mean <- apply(pslwr4,c(2,3),mean,na.rm=T) + + rm(pslwr1,pslwr2,pslwr3,pslwr4) + + # breaks and colors of the geopotential fields: + #my.brks <- c(48000, seq(48501,57100,1), 60000) # % Mean anomaly of a WR + #my.brks2 <- c(48000, seq(48500,58000,500)) # % Mean anomaly of a WR + my.brks <- c(-300,-199:200,300) # % Mean anomaly of a WR + my.brks2 <- c(-300,seq(-200,200,10),300) # % Mean anomaly of a WR + #my.cols <- colorRampPalette((brewer.pal(9,"PuBu")))(length(my.brks)-1) + my.cols <- colorRampPalette(rev(brewer.pal(11,"RdBu")))(length(my.brks)-1) # blue--white--red colors + + # breaks and colors of the impact maps: + my.brks.var <- c(-20,seq(-10,-3,1),seq(-2.9,2.9,0.1),seq(3,10,1),20) # % Mean anomaly of a WR + #my.brks.var <- c(-20,seq(-10,-3,1),seq(-2.8,2.8,0.2),seq(3,10,1),20) # % Mean anomaly of a WR + + #my.cols <- colorRampPalette(as.character(read.csv("/home/Earth/vtorralb/rgbhex.csv",header=F)[,1]))(length(my.brks)-1) # blue--yellow-red colors + my.cols.var <- colorRampPalette(rev(brewer.pal(11,"RdBu")))(length(my.brks.var)-1) # blue--white--red colors + + #regime1.name <- "Cluster #1" + #regime2.name <- "Cluster #2" + #regime3.name <- "Cluster #3" + #regime4.name <- "Cluster #4" + + # save all the data necessary to redraw the graphs when we know the right regime: + save(workdir,rean.name,var.name,var.name.full,var.unit,year.start,year.end,period,my.period,lon,lat,my.brks,my.brks2,my.brks.var,my.cols,my.cols.var,pslwr1mean,pslwr2mean,pslwr3mean,pslwr4mean, varPeriodAnom1mean, varPeriodAnom2mean, varPeriodAnom3mean,varPeriodAnom4mean,wr1y,wr2y,wr3y,wr4y,pvalue1,pvalue2,pvalue3,pvalue4,file=paste0(workdir,"/",rean.name,"_",var.name,"_",my.period[period],"_mapdata.RData")) + + rm(pvalue1,pvalue2,pvalue3,pvalue4) + rm(pslwr1mean,pslwr2mean,pslwr3mean,pslwr4mean) + rm(varPeriodAnom1mean,varPeriodAnom2mean,varPeriodAnom3mean,varPeriodAnom4mean) + gc() + +} # close the for loop on 'period' + + + +# run it twice: once, with ordering <- FALSE to look only at the cartography and find visually the right regime names of the four clusters; +# and the second time, to save the ordered cartography: +ordering <- FALSE +as.pdf <- TRUE # choose if you prefer to save results as 1 .png for each season, or only 1 pdf with all seasons + +# choose an order to give to the cartography, from top to bottom: +orden <- c("NAO+","NAO-","Blocking","Atlantic Ridge") + +regime1.name <- orden[1] +regime2.name <- orden[2] +regime3.name <- orden[3] +regime4.name <- orden[4] + +if(as.pdf)(pdf(file=paste0(mapdir,"/",rean.name,"_",var.name,".pdf"),width=40, height=60)) + +for(period in 13:16){ + load(file=paste0(workdir,"/",rean.name,"_",var.name,"_",my.period[period],"_mapdata.RData")) + + if(period == 13){ # Winter + cluster1.name="NAO-" + cluster2.name="Atlantic Ridge" + cluster4.name="NAO+" + cluster3.name="Blocking" + } + if(period == 14){ + cluster3.name="Atlantic Ridge" + cluster1.name="NAO-" + cluster2.name="Blocking" + cluster4.name="NAO+" + } + if(period == 15){ + cluster2.name="NAO+" + cluster1.name="NAO-" + cluster4.name="Blocking" + cluster3.name="Atlantic Ridge" + } + if(period == 16){ + cluster3.name="Atlantic Ridge" + cluster4.name="Blocking" + cluster1.name="NAO-" + cluster2.name="NAO+" + } + + # assign to each cluster its regime: + if(ordering == FALSE){ + cluster1 <- 1; cluster2 <- 2; cluster3 <- 3; cluster4 <- 4 + } else { + cluster1 <- which(orden == cluster1.name) + cluster2 <- which(orden == cluster2.name) + cluster3 <- which(orden == cluster3.name) + cluster4 <- which(orden == cluster4.name) + } + + assign(paste0("map",cluster1), pslwr1mean) + assign(paste0("map",cluster2), pslwr2mean) + assign(paste0("map",cluster3), pslwr3mean) + assign(paste0("map",cluster4), pslwr4mean) + + assign(paste0("imp",cluster1), varPeriodAnom1mean) + assign(paste0("imp",cluster2), varPeriodAnom2mean) + assign(paste0("imp",cluster3), varPeriodAnom3mean) + assign(paste0("imp",cluster4), varPeriodAnom4mean) + + assign(paste0("sig",cluster1), pvalue1) + assign(paste0("sig",cluster2), pvalue2) + assign(paste0("sig",cluster3), pvalue3) + assign(paste0("sig",cluster4), pvalue4) + + assign(paste0("fre",cluster1), wr1y) + assign(paste0("fre",cluster2), wr2y) + assign(paste0("fre",cluster3), wr3y) + assign(paste0("fre",cluster4), wr4y) + + # Visualize the composition with the 3 graphs for all the 4 regimes: its pressure fields, its impact on var and its frequency series: + if(!as.pdf) png(filename=paste0(mapdir,"/",rean.name,"_",var.name,"_",my.period[period],".png"),width=3000,height=3700) + + layout(matrix(c(1,1,1,1,1,1,1,1,31,32,6,33,2,4,6,7,rep(c(2,4,6,8),8),3,5,6,9,34,35,6,36, + 10,12,6,14,rep(c(10,12,6,15),8),11,13,6,16,37,38,6,39, + 17,19,6,21,rep(c(17,19,6,22),8),18,20,6,23,40,41,6,42, + 24,26,6,28,rep(c(24,26,6,29),8),25,27,6,30,43,43,6,44),47,4,byrow=TRUE), + widths=c(2,2,0.2,2), heights=c(rep(0.2,2),0.3,rep(0.2,10),0.3,rep(0.2,10),0.3,rep(0.2,10),0.3,rep(0.2,12))) + layout.show(44) + + plot.new(); title(paste("Weather Regimes for",my.period[period],"season (1979-2013). Source:",rean.name), cex.main=9, line=-7) + EU <- c(1:54,130:161) # position of long values of Europe only (without the Atlantic Sea and America) + + PlotEquiMap(map1, lon, lat, filled.continents = FALSE, brks=my.brks, cols=my.cols, toptitle="", sizetit=1.2, contours=t(map1),brks2=my.brks2, drawleg=F) + #par(mar=c(10.5,14.5,10.5,14.5)) + plot.new(); PlotEquiMap(imp1[,EU], lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, drawleg=F, dots=t(sig1[,EU] < 0.05)) + plot.new(); plot.new(); plot.new() # plot 2 empty graphs + barplot.freq(100*fre1, year.start, year.end, cex.y=2, cex.x=2, freq.max=60) + plot.new(); title("Year", cex.main=2, line=-2) + + PlotEquiMap(map2, lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, toptitle="", sizetit=1.2, contours=t(map2), brks2=my.brks2, drawleg=F) + plot.new(); PlotEquiMap(imp2[,EU], lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, drawleg=F, dots=t(sig2[,EU] < 0.05)) + plot.new(); plot.new(); barplot.freq(100*fre2, year.start, year.end, cex.y=2, cex.x=2, freq.max=60) + plot.new(); title("Year", cex.main=2, line=-2) + + PlotEquiMap(map3, lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, toptitle="", sizetit=1.2, contours=t(map3), brks2=my.brks2, drawleg=F) + plot.new(); PlotEquiMap(imp3[,EU], lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, drawleg=F, dots=t(sig3[,EU] < 0.05)) + plot.new(); plot.new(); barplot.freq(100*fre3, year.start, year.end, cex.y=2, cex.x=2, freq.max=60) + plot.new(); title("Year", cex.main=2, line=-2) + + PlotEquiMap(map4, lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, contours=t(map4), brks2=my.brks2, drawleg=F) + plot.new(); PlotEquiMap(imp4[,EU], lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, drawleg=F, dots=t(sig4[,EU] < 0.05)) + plot.new(); plot.new(); barplot.freq(100*fre4, year.start, year.end, cex.y=2, cex.x=2, freq.max=60) + plot.new(); title("Year", cex.main=2, line=-3) + + # Titles: + plot.new(); title(paste(regime1.name, "geopotential height"), cex.main=3.3, line=-4) + plot.new(); title(paste(regime1.name, "impact on", var.name.full), cex.main=3.3, line=-4) + plot.new(); title(paste0(regime1.name, " Frequency (", round(100*mean(fre1),1), "%)"), cex.main=3.3, line=-4) + plot.new(); title(paste(regime2.name, "geopotential height"), cex.main=3.3, line=-4) + plot.new(); title(paste(regime2.name, "impact on", var.name.full), cex.main=3.3, line=-4) + plot.new(); title(paste0(regime2.name, " Frequency (", round(100*mean(fre2),1), "%)"), cex.main=3.3, line=-4) + plot.new(); title(paste(regime3.name, "geopotential height"), cex.main=3.3, line=-4) + plot.new(); title(paste(regime3.name, "impact on", var.name.full), cex.main=3.3, line=-4) + plot.new(); title(paste0(regime3.name, " Frequency (", round(100*mean(fre3),1), "%)"), cex.main=3.3, line=-4) + plot.new(); title(paste(regime4.name, "geopotential height"), cex.main=3.3, line=-4) + plot.new(); title(paste(regime4.name, "impact on", var.name.full), cex.main=3.3, line=-4) + plot.new(); title(paste0(regime4.name, " Frequency (", round(100*mean(fre4),1), "%)"), cex.main=3.3, line=-4) + + # Legends: + par(fig=c(0.04,0.3,0.718,0.744), new=TRUE) + ColorBar(my.brks, cols=my.cols, vert=FALSE, subsampleg=100, cex=2.5) + mtext(side=4," m", cex=2.5) + par(fig=c(0.355,0.605,0.718,0.743), new=TRUE) + ColorBar2(my.brks.var, cols=my.cols.var, vert=FALSE, subsampleg=10, cex=2.5, my.ticks=c(0,10,20,30,40,50,60,70,80), my.labels=c(-20,-10,-6,-3,0,3,6,10,20)) + mtext(side=4,paste0(" ",var.unit), cex=2.5) + + par(fig=c(0.04,0.3,0.483,0.508), new=TRUE) + ColorBar(my.brks, cols=my.cols, vert=FALSE, subsampleg=100, cex=2.5) + mtext(side=4," m", cex=2.5) + par(fig=c(0.355,0.605,0.484,0.509), new=TRUE) + ColorBar2(my.brks.var, cols=my.cols.var, vert=FALSE, subsampleg=10, cex=2.5, my.ticks=c(0,10,20,30,40,50,60,70,80), my.labels=c(-20,-10,-6,-3,0,3,6,10,20)) + mtext(side=4,paste0(" ",var.unit), cex=2.5) + + par(fig=c(0.04,0.3,0.248,0.273), new=TRUE) + ColorBar(my.brks, cols=my.cols, vert=FALSE, subsampleg=100, cex=2.5) + mtext(side=4," m", cex=2.5) + par(fig=c(0.355,0.605,0.248,0.273), new=TRUE) + ColorBar2(my.brks.var, cols=my.cols.var, vert=FALSE, subsampleg=10, cex=2.5, my.ticks=c(0,10,20,30,40,50,60,70,80), my.labels=c(-20,-10,-6,-3,0,3,6,10,20)) + mtext(side=4,paste0(" ",var.unit), cex=2.5) + + par(fig=c(0.04,0.3,0.013,0.038), new=TRUE) + ColorBar(my.brks, cols=my.cols, vert=FALSE, subsampleg=100, cex=2.5) + mtext(side=4," m", cex=2.5) + par(fig=c(0.355,0.605,0.013,0.038), new=TRUE) + ColorBar2(my.brks.var, cols=my.cols.var, vert=FALSE, subsampleg=10, cex=2.5, my.ticks=c(0,10,20,30,40,50,60,70,80), my.labels=c(-20,-10,-6,-3,0,3,6,10,20)) + mtext(side=4,paste0(" ",var.unit), cex=2.5) + + if(!as.pdf) dev.off() # for saving 4 png + + + +} # close for loop on 'period' + +if(as.pdf) dev.off() # for saving a single pdf with all seasons + + + + + + + + + + + + + + + + + + + + + diff --git a/old/weather_regimes_v11.R b/old/weather_regimes_v11.R new file mode 100644 index 0000000000000000000000000000000000000000..565a869026a27d363ebad54df6cf2081eba77634 --- /dev/null +++ b/old/weather_regimes_v11.R @@ -0,0 +1,476 @@ + +library(s2dverification) # for the function Load() +library(abind) +source('/home/Earth/ncortesi/Downloads/scripts/Rfunctions.R') # for the calendar functions + +# available reanalysis for the geopotential (g500) or the geopotential height (z500 = g500/9.81): +ERAint <- list(path = '/esnas/reconstructions/ecmwf/eraint/$STORE_FREQ$_mean/$VAR_NAME$_f6h/$VAR_NAME$_$YEAR$$MONTH$.nc') + +ERAint.name <- "ERA-Interim" +JRA55 <- list(path = '/esnas/reconstructions/jma/jra-55/$STORE_FREQ$_mean/$VAR_NAME$_f6h/$VAR_NAME$_$YEAR$$MONTH$.nc') +JRA55.name <- "JRA-55" + +workdir <- "/scratch/Earth/ncortesi/RESILIENCE/Regimes" # working dir with the input files with the WT classifications for each MSLP grid point + +rean <- ERAint # choose one of the two above reanalysis from where to load the input psl and var data +rean.name <- ERAint.name + +psl <- "g500" # pressure variable to use from the chosen reanalysis + +var.num <- 1 # Choose a variable. 1: sfcWind 2: tas + +var.name <- c("sfcWind","tas") # name of the 'predictand' variable of the chosen reanalysis +var.name.full <- c("Wind Speed","Temperature") # full name of the 'predictand' variable to put in the title of the graphs +var.unit <- c("m/s", "ºC") # unit of measure (for drawing color scales) + +year.start <- 1994 #1979 +year.end <- 2013 + +PCA <- TRUE # se to TRUE if you want to apply the PCA before the k-means cluster analysis, FALSE otherwise +variance.explained <- 0.8 # the user can set here the minimum % of variance explained by the PCs (typically 0.8 which is equal to 80%) + +# Coordinates of the box enclosing the study region (the Northern Atlantic in this case): +lat.max <- 80 #70 +lat.min <- 20 #30 +lon.max <- 30 #40 +lon.min <- 270 #280 # put a positive number here because the geopotential has only positive values of longitud! + +############################################################################################################################# + +# load only 1 day of geopot to detect the minimum and maximum lat and lon values to identify only the North Atlantic +domain <- Load(var = psl, exp = NULL, obs = list(rean), paste0(year.start,'0101'), storefreq = 'daily', leadtimemax = 1, output = 'lonlat', nprocs=1) +pos.lat.max <- which(abs(domain$lat-lat.max)==min(abs(domain$lat-lat.max))) +pos.lat.min <- which(abs(domain$lat-lat.min)==min(abs(domain$lat-lat.min))) +pos.lon.max <- which(abs(domain$lon-lon.max)==min(abs(domain$lon-lon.max))) +pos.lon.min <- which(abs(domain$lon-lon.min)==min(abs(domain$lon-lon.min))) +pos.lat <- if(pos.lat.min < pos.lat.max) {pos.lat.min:pos.lat.max} else { pos.lat.max:pos.lat.min} +pos.lon <- c(1:pos.lon.max,pos.lon.min:length(domain$lon)) # beware that it only consider the case where the study region cross the meridian of Greenwhich! +n.pos.lat <- length(pos.lat) +n.pos.lon <- length(pos.lon) +lat <- domain$lat[pos.lat] # lat of chosen area only (it is smaller than the whole spatial domain loaded by the data) +lon <- domain$lon[pos.lon] # lon of chosen area only +lat.min.area <- domain$lat[pos.lat.min] # min and max lat/lon of the chosen area only (same as lat.max, but its values correspond to actual values in the lat vector) +lat.max.area <- domain$lat[pos.lat.max] +lon.min.area <- domain$lon[pos.lon.min] +lon.max.area <- domain$lon[pos.lon.max] +if(lat.min >= lat.max) stop("lat.min cannot be greater than lat.max") +rm(domain) + +# Load psl data: +psleuFull <-array(NA,c(n.days.in.a.yearly.period(year.start,year.end), n.pos.lat, n.pos.lon)) + +for (y in year.start:year.end){ + var <- Load(var = psl, exp = NULL, obs = list(rean), paste0(y,'0101'), storefreq = 'daily', leadtimemax = n.days.in.a.year(y), output = 'lonlat', + latmin = lat.min.area, latmax = lat.max.area, lonmin = lon.min.area, lonmax = lon.max.area) + psleuFull[seq.days.in.a.future.year(year.start, y),,] <- var$obs + rm(var) + gc() +} + +if(psl=="g500") psleuFull <- psleuFull/9.81 # convert geopotential to geopotential height + +# compute the cluster analysis: +my.PCA <- list() +my.cluster <- list() +tot.variance <- list() +n.pcs <- my.cluster <- c() + +for(period in 13:16){ # 1-12: Jan-Dec; 13-16: Winter-Spring-Summer-Autumn; 17: Year + + days.period <- NA + for(y in year.start:year.end) days.period <- c(days.period, n.days.in.a.future.year(year.start, y) + pos.period(y,period)) + days.period <- days.period[-1] # remove the NA at the begin that was introduced only to be able to execure the above command + n.days.period <- length(days.period) + + pslPeriod <- psleuFull[days.period,,] # select only days in the chosen period (i.e: winter) + + # weight the pressure fields based on latitude: + lat.weighted <- cos(lat*pi/180)^0.5 + lat.weighted.array <- InsertDim(InsertDim(lat.weighted,2,n.pos.lon),1,n.days.period) + pslPeriod.weighted <- pslPeriod * lat.weighted.array + rm(lat.weighted.array) + gc() + + pslmat <- pslPeriod.weighted + dim(pslmat) <- c(n.days.period, n.pos.lat*n.pos.lon) # convert array in a matrix! + rm(pslPeriod, pslPeriod.weighted) + gc() + + if(PCA){ # compute the PCs: + my.seq <- seq(1, n.pos.lat*n.pos.lon, 9) # select only 1 point of 9 + pslcut <- pslmat[,my.seq] + + my.PCA[[period]] <- princomp(pslcut,cor=FALSE) + tot.variance[[period]] <- head(cumsum(my.PCA[[period]]$sdev^2/sum(my.PCA[[period]]$sdev^2)),50) # check how many PCAs to keep basing on the sum of their expl. variance + n.pcs[period] <- head(as.numeric(which(tot.variance[[period]] > variance.explained)),1) # select only the pcs that explains at least 80% of variance + my.cluster[[period]] <- kmeans(my.PCA[[period]]$scores[,1:n.pcs[period]], centers=4, iter.max=100, nstart=20) # 4 is the number of clusters + rm(pslcut) + } else { + my.cluster[[period]] <- kmeans(pslmat, centers=4, iter.max=100, nstart=20) # 4 is the number of clusters + } + + rm(pslmat) + gc() +} + +save.image(file=paste0(workdir,"/weather_regimes_",rean.name,"_",year.start,"-",year.end,".RData")) +load(file=paste0(workdir,"/weather_regimes_",rean.name,"_",year.start,"-",year.end,".RData")) + +# Load var data: +vareuFull <-array(NA,c(n.days.in.a.yearly.period(year.start,year.end),n.pos.lat,n.pos.lon)) + +for (y in year.start:year.end){ + var <- Load(var = var.name[var.num], exp = NULL, obs = list(rean), paste0(y,'0101'), storefreq = 'daily', leadtimemax = n.days.in.a.year(y), output = 'lonlat', + latmin = lat.min.area, latmax = lat.max.area, lonmin = lon.min.area, lonmax = lon.max.area) + vareuFull[seq.days.in.a.future.year(year.start, y),,] <- var$obs + rm(var) + gc() +} + +save.image(file=paste0(workdir,"/weather_regimes_",rean.name,"_",var.name[var.num],"_",year.start,"-",year.end,".RData")) +load(file=paste0(workdir,"/weather_regimes_",rean.name,"_",var.name[var.num],"_",year.start,"-",year.end,".RData")) + +for(period in 13:16){ # 1-12: Jan-Dec; 13-16: Winter-Spring-Summer-Autumn; 17: Year + days.period <- NA + for(y in year.start:year.end) days.period <- c(days.period, n.days.in.a.future.year(year.start, y) + pos.period(y,period)) + days.period <- days.period[-1] # remove the NA at the begin that was introduced only to be able to execure the above command + n.days.period <- length(days.period) + + varPeriod <- vareuFull[days.period,,] # select only var data during the chosen period + + varPeriodClim <- apply(varPeriod, c(2,3), mean, na.rm=T) + varPeriodClim2 <- InsertDim(varPeriodClim, 1, n.days.period) + + varPeriodAnom <- varPeriod - varPeriodClim2 + rm(varPeriod, varPeriodClim, varPeriodClim2) + gc() + + # for each of the 4 clusters, remove the days not belonging to sequences of at least 5 days: + # warning: first 4 days and last 4 days are not take into account y the algorithm and are treated as not belonging to any sequence (sequ=FALSE) + wrdiff <- diff(my.cluster[[period]]$cluster) + + sequ <- rep(FALSE, n.days.period) + for(i in 5:(n.days.period-5)){ + sequ[i] <- TRUE + if(wrdiff[i] != 0 & wrdiff[i+1] != 0) sequ[i] = FALSE + if(wrdiff[i] != 0 & wrdiff[i+2] != 0) sequ[i] = FALSE + if(wrdiff[i] != 0 & wrdiff[i+3] != 0) sequ[i] = FALSE + if(wrdiff[i] != 0 & wrdiff[i+4] != 0) sequ[i] = FALSE + if(wrdiff[i-1] != 0 & wrdiff[i] != 0) sequ[i] = FALSE + if(wrdiff[i-1] != 0 & wrdiff[i+1] != 0) sequ[i] = FALSE + if(wrdiff[i-1] != 0 & wrdiff[i+2] != 0) sequ[i] = FALSE + if(wrdiff[i-1] != 0 & wrdiff[i+3] != 0) sequ[i] = FALSE + if(wrdiff[i-2] != 0 & wrdiff[i] != 0) sequ[i] = FALSE + if(wrdiff[i-2] != 0 & wrdiff[i+1] != 0) sequ[i] = FALSE + if(wrdiff[i-2] != 0 & wrdiff[i+2] != 0) sequ[i] = FALSE + if(wrdiff[i-3] != 0 & wrdiff[i] != 0) sequ[i] = FALSE + if(wrdiff[i-3] != 0 & wrdiff[i+1] != 0) sequ[i] = FALSE + if(wrdiff[i-4] != 0 & wrdiff[i] != 0) sequ[i] = FALSE + } # close for loop on i + + # sequence of daily cluster numbers without the days that doesn't belong to sequences of 5 or more days: + cluster.sequence <- my.cluster[[period]]$cluster * sequ + + rm(wrdiff, sequ) + gc() + + #wr1 <- which(my.cluster[[period]]$cluster==1) + #wr2 <- which(my.cluster[[period]]$cluster==2) + #wr3 <- which(my.cluster[[period]]$cluster==3) + #wr4 <- which(my.cluster[[period]]$cluster==4) + + # Replace the days belonging to a regime with the days belonging to a sequence of at least 5 days of that regime: + wr1 <- which(cluster.sequence == 1) + wr2 <- which(cluster.sequence == 2) + wr3 <- which(cluster.sequence == 3) + wr4 <- which(cluster.sequence == 4) + + period.length <- n.days.in.a.period(period,1999) # using year 1999 introduce a small error in winter season because of bisestile years + + # yearly frequency of each regime: + wr1y <- wr2y <- wr3y <-wr4y <- c() + for(y in year.start:year.end){ + #wr1y[y-year.start+1] <- length(which(my.cluster[[period]]$cluster[(y-year.start)*period.length+(1:period.length)] == 1)) + #wr2y[y-year.start+1] <- length(which(my.cluster[[period]]$cluster[(y-year.start)*period.length+(1:period.length)] == 2)) + #wr3y[y-year.start+1] <- length(which(my.cluster[[period]]$cluster[(y-year.start)*period.length+(1:period.length)] == 3)) + #wr4y[y-year.start+1] <- length(which(my.cluster[[period]]$cluster[(y-year.start)*period.length+(1:period.length)] == 4)) + wr1y[y-year.start+1] <- length(which(cluster.sequence[(y-year.start)*period.length+(1:period.length)] == 1)) + wr2y[y-year.start+1] <- length(which(cluster.sequence[(y-year.start)*period.length+(1:period.length)] == 2)) + wr3y[y-year.start+1] <- length(which(cluster.sequence[(y-year.start)*period.length+(1:period.length)] == 3)) + wr4y[y-year.start+1] <- length(which(cluster.sequence[(y-year.start)*period.length+(1:period.length)] == 4)) + } + + rm(cluster.sequence) + gc() + + # convert to frequencies in %: + wr1y <- wr1y/period.length + wr2y <- wr2y/period.length + wr3y <- wr3y/period.length + wr4y <- wr4y/period.length + + varPeriodAnom1 <- varPeriodAnom[wr1,,] + varPeriodAnom2 <- varPeriodAnom[wr2,,] + varPeriodAnom3 <- varPeriodAnom[wr3,,] + varPeriodAnom4 <- varPeriodAnom[wr4,,] + + varPeriodAnom1mean <- apply(varPeriodAnom1,c(2,3),mean,na.rm=T) + varPeriodAnom2mean <- apply(varPeriodAnom2,c(2,3),mean,na.rm=T) + varPeriodAnom3mean <- apply(varPeriodAnom3,c(2,3),mean,na.rm=T) + varPeriodAnom4mean <- apply(varPeriodAnom4,c(2,3),mean,na.rm=T) + + varPeriodAnomBoth1 <- abind(varPeriodAnom, varPeriodAnom1, along = 1) + pvalue1 <- apply(varPeriodAnomBoth1, c(2,3), function(x) t.test(x[1:n.days.period],x[(n.days.period+1):length(x)])$p.value) + rm(varPeriodAnomBoth1) + gc() + + varPeriodAnomBoth2 <- abind(varPeriodAnom, varPeriodAnom2, along = 1) + pvalue2 <- apply(varPeriodAnomBoth2, c(2,3), function(x) t.test(x[1:n.days.period],x[(n.days.period+1):length(x)])$p.value) + rm(varPeriodAnomBoth2) + gc() + + varPeriodAnomBoth3 <- abind(varPeriodAnom, varPeriodAnom3, along = 1) + pvalue3 <- apply(varPeriodAnomBoth3, c(2,3), function(x) t.test(x[1:n.days.period],x[(n.days.period+1):length(x)])$p.value) + rm(varPeriodAnomBoth3) + gc() + + varPeriodAnomBoth4 <- abind(varPeriodAnom, varPeriodAnom4, along = 1) + pvalue4 <- apply(varPeriodAnomBoth4, c(2,3), function(x) t.test(x[1:n.days.period],x[(n.days.period+1):length(x)])$p.value) + rm(varPeriodAnomBoth4) + rm(varPeriodAnom) + gc() + + pslPeriod <- psleuFull[days.period,,] + pslPeriodClim <- apply(pslPeriod, c(2,3), mean, na.rm=T) + pslPeriodClim2 <- InsertDim(pslPeriodClim, 1, n.days.period) + pslPeriodAnom <- pslPeriod - pslPeriodClim2 + + rm(pslPeriod, pslPeriodClim, pslPeriodClim2) + gc() + + pslwr1 <- pslPeriodAnom[wr1,,] + pslwr2 <- pslPeriodAnom[wr2,,] + pslwr3 <- pslPeriodAnom[wr3,,] + pslwr4 <- pslPeriodAnom[wr4,,] + rm(pslPeriodAnom) + gc() + + pslwr1mean <- apply(pslwr1,c(2,3),mean,na.rm=T) + pslwr2mean <- apply(pslwr2,c(2,3),mean,na.rm=T) + pslwr3mean <- apply(pslwr3,c(2,3),mean,na.rm=T) + pslwr4mean <- apply(pslwr4,c(2,3),mean,na.rm=T) + + rm(pslwr1,pslwr2,pslwr3,pslwr4) + + # breaks and colors of the geopotential fields: + #my.brks <- c(48000, seq(48501,57100,1), 60000) # % Mean anomaly of a WR + #my.brks2 <- c(48000, seq(48500,58000,500)) # % Mean anomaly of a WR + my.brks <- c(-300,-199:200,300) # % Mean anomaly of a WR + my.brks2 <- c(-300,seq(-200,200,10),300) # % Mean anomaly of a WR + #my.cols <- colorRampPalette((brewer.pal(9,"PuBu")))(length(my.brks)-1) + my.cols <- colorRampPalette(rev(brewer.pal(11,"RdBu")))(length(my.brks)-1) # blue--white--red colors + + # breaks and colors of the impact maps: + my.brks.var <- c(-20,seq(-10,-3,1),seq(-2.9,2.9,0.1),seq(3,10,1),20) # % Mean anomaly of a WR + #my.brks.var <- c(-20,seq(-10,-3,1),seq(-2.8,2.8,0.2),seq(3,10,1),20) # % Mean anomaly of a WR + + #my.cols <- colorRampPalette(as.character(read.csv("/home/Earth/vtorralb/rgbhex.csv",header=F)[,1]))(length(my.brks)-1) # blue--yellow-red colors + my.cols.var <- colorRampPalette(rev(brewer.pal(11,"RdBu")))(length(my.brks.var)-1) # blue--white--red colors + + # save all the data necessary to redraw the graphs when we know the right regime: + save(workdir,rean.name,var.num,var.name,var.name.full,var.unit,year.start,year.end,period,my.period,lon,lat,my.brks,my.brks2,my.brks.var,my.cols,my.cols.var,pslwr1mean,pslwr2mean,pslwr3mean,pslwr4mean, varPeriodAnom1mean, varPeriodAnom2mean, varPeriodAnom3mean,varPeriodAnom4mean,wr1y,wr2y,wr3y,wr4y,pvalue1,pvalue2,pvalue3,pvalue4,file=paste0(workdir,"/",rean.name,"_",var.name[var.num],"_",my.period[period],"_mapdata.RData")) + + rm(pvalue1,pvalue2,pvalue3,pvalue4) + rm(pslwr1mean,pslwr2mean,pslwr3mean,pslwr4mean) + rm(varPeriodAnom1mean,varPeriodAnom2mean,varPeriodAnom3mean,varPeriodAnom4mean) + gc() + +} # close the for loop on 'period' + + +# run it twice: once, with ordering <- FALSE to look only at the cartography and find visually the right regime names of the four clusters; +# and the second time, to save the ordered cartography: +ordering <- TRUE +as.pdf <- FALSE # choose if you prefer to save results as one file .png for each season, or only 1 pdf with all seasons + +# choose an order to give to the cartography, from top to bottom: +orden <- c("NAO+","NAO-","Blocking","Atl. Ridge") + +regime1.name <- orden[1] +regime2.name <- orden[2] +regime3.name <- orden[3] +regime4.name <- orden[4] + +if(as.pdf)(pdf(file=paste0(workdir,"/",rean.name,"_",var.name[var.num],".pdf"),width=40, height=60)) + +for(period in 13:16){ + load(file=paste0(workdir,"/",rean.name,"_",var.name[var.num],"_",my.period[period],"_mapdata.RData")) + + if(period == 13){ # Winter + cluster2.name="NAO+" + cluster3.name="NAO-" + cluster1.name="Blocking" + cluster4.name="Atl. Ridge" + } + if(period == 14){ + cluster1.name="NAO+" + cluster3.name="NAO-" + cluster2.name="Blocking" + cluster4.name="Atl. Ridge" + } + if(period == 15){ + cluster3.name="NAO+" + cluster1.name="NAO-" + cluster4.name="Blocking" + cluster2.name="Atl. Ridge" + } + if(period == 16){ + cluster3.name="NAO+" + cluster2.name="NAO-" + cluster4.name="Blocking" + cluster1.name="Atl. Ridge" + } + + # assign to each cluster its regime: + if(ordering == FALSE){ + cluster1 <- 1; cluster2 <- 2; cluster3 <- 3; cluster4 <- 4 + } else { + cluster1 <- which(orden == cluster1.name) + cluster2 <- which(orden == cluster2.name) + cluster3 <- which(orden == cluster3.name) + cluster4 <- which(orden == cluster4.name) + } + + assign(paste0("map",cluster1), pslwr1mean) + assign(paste0("map",cluster2), pslwr2mean) + assign(paste0("map",cluster3), pslwr3mean) + assign(paste0("map",cluster4), pslwr4mean) + + assign(paste0("imp",cluster1), varPeriodAnom1mean) + assign(paste0("imp",cluster2), varPeriodAnom2mean) + assign(paste0("imp",cluster3), varPeriodAnom3mean) + assign(paste0("imp",cluster4), varPeriodAnom4mean) + + assign(paste0("sig",cluster1), pvalue1) + assign(paste0("sig",cluster2), pvalue2) + assign(paste0("sig",cluster3), pvalue3) + assign(paste0("sig",cluster4), pvalue4) + + assign(paste0("fre",cluster1), wr1y) + assign(paste0("fre",cluster2), wr2y) + assign(paste0("fre",cluster3), wr3y) + assign(paste0("fre",cluster4), wr4y) + + # Visualize the composition with the 3 graphs for all the 4 regimes: its pressure fields, its impact on var and its frequency series: + if(!as.pdf) png(filename=paste0(workdir,"/",rean.name,"_",var.name[var.num],"_",my.period[period],".png"),width=3000,height=3700) + + layout(matrix(c(1,1,1,1,1,1,1,1,31,32,6,33,2,4,6,7,rep(c(2,4,6,8),8),3,5,6,9,34,35,6,36, + 10,12,6,14,rep(c(10,12,6,15),8),11,13,6,16,37,38,6,39, + 17,19,6,21,rep(c(17,19,6,22),8),18,20,6,23,40,41,6,42, + 24,26,6,28,rep(c(24,26,6,29),8),25,27,6,30,43,43,6,44),47,4,byrow=TRUE), + widths=c(2,2,0.2,2), heights=c(rep(0.2,2),0.3,rep(0.2,10),0.3,rep(0.2,10),0.3,rep(0.2,10),0.3,rep(0.2,12))) + #layout.show(44) + + plot.new(); title(paste("Weather Regimes for",my.period[period],"season (1979-2013). Source:",rean.name), cex.main=9, line=-7) + EU <- c(1:41, 130:161) #c(1:54,130:161) # position of long values of Europe only (without the Atlantic Sea and America) + + PlotEquiMap(map1, lon, lat, filled.continents = FALSE, brks=my.brks, cols=my.cols, toptitle="", sizetit=1.2, contours=t(map1),brks2=my.brks2, drawleg=F) + #par(mar=c(10.5,14.5,10.5,14.5)) + plot.new(); PlotEquiMap(imp1[,EU], lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, drawleg=F, dots=t(sig1[,EU] < 0.05)) + plot.new(); plot.new(); plot.new() # plot 2 empty graphs + barplot.freq(100*fre1, year.start, year.end, cex.y=2, cex.x=2, freq.max=60) + plot.new(); title("Year", cex.main=2, line=-2) + + PlotEquiMap(map2, lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, toptitle="", sizetit=1.2, contours=t(map2), brks2=my.brks2, drawleg=F) + plot.new(); PlotEquiMap(imp2[,EU], lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, drawleg=F, dots=t(sig2[,EU] < 0.05)) + plot.new(); plot.new(); barplot.freq(100*fre2, year.start, year.end, cex.y=2, cex.x=2, freq.max=60) + plot.new(); title("Year", cex.main=2, line=-2) + + PlotEquiMap(map3, lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, toptitle="", sizetit=1.2, contours=t(map3), brks2=my.brks2, drawleg=F) + plot.new(); PlotEquiMap(imp3[,EU], lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, drawleg=F, dots=t(sig3[,EU] < 0.05)) + plot.new(); plot.new(); barplot.freq(100*fre3, year.start, year.end, cex.y=2, cex.x=2, freq.max=60) + plot.new(); title("Year", cex.main=2, line=-2) + + PlotEquiMap(map4, lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, contours=t(map4), brks2=my.brks2, drawleg=F) + plot.new(); PlotEquiMap(imp4[,EU], lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, drawleg=F, dots=t(sig4[,EU] < 0.05)) + plot.new(); plot.new(); barplot.freq(100*fre4, year.start, year.end, cex.y=2, cex.x=2, freq.max=60) + plot.new(); title("Year", cex.main=2, line=-3) + + # Titles: + plot.new(); title(paste(regime1.name, "geopotential height"), cex.main=3.3, line=-4.5) + plot.new(); title(paste(regime1.name, "impact on", var.name.full[var.num]), cex.main=3.3, line=-4.5) + plot.new(); title(paste0(regime1.name, " Frequency (", round(100*mean(fre1),1), "%)"), cex.main=3.3, line=-4.5) + plot.new(); title(paste(regime2.name, "geopotential height"), cex.main=3.3, line=-4.5) + plot.new(); title(paste(regime2.name, "impact on", var.name.full[var.num]), cex.main=3.3, line=-4.5) + plot.new(); title(paste0(regime2.name, " Frequency (", round(100*mean(fre2),1), "%)"), cex.main=3.3, line=-4.5) + plot.new(); title(paste(regime3.name, "geopotential height"), cex.main=3.3, line=-4.5) + plot.new(); title(paste(regime3.name, "impact on", var.name.full[var.num]), cex.main=3.3, line=-4.5) + plot.new(); title(paste0(regime3.name, " Frequency (", round(100*mean(fre3),1), "%)"), cex.main=3.3, line=-4.5) + plot.new(); title(paste(regime4.name, "geopotential height"), cex.main=3.3, line=-5) + plot.new(); title(paste(regime4.name, "impact on", var.name.full[var.num]), cex.main=3.3, line=-4.5) + plot.new(); title(paste0(regime4.name, " Frequency (", round(100*mean(fre4),1), "%)"), cex.main=3.3, line=-4.5) + + # Legends: + par(fig=c(0.04,0.3,0.718,0.744), new=TRUE) + ColorBar(my.brks, cols=my.cols, vert=FALSE, subsampleg=100, cex=2.5) + mtext(side=4," m", cex=2.5) + par(fig=c(0.355,0.605,0.718,0.743), new=TRUE) + ColorBar2(my.brks.var, cols=my.cols.var, vert=FALSE, subsampleg=10, cex=2.5, my.ticks=c(0,10,20,30,40,50,60,70,80), my.labels=c(-20,-10,-6,-3,0,3,6,10,20)) + mtext(side=4,paste0(" ",var.unit[var.num]), cex=2.5) + + par(fig=c(0.04,0.3,0.483,0.508), new=TRUE) + ColorBar(my.brks, cols=my.cols, vert=FALSE, subsampleg=100, cex=2.5) + mtext(side=4," m", cex=2.5) + par(fig=c(0.355,0.605,0.484,0.509), new=TRUE) + ColorBar2(my.brks.var, cols=my.cols.var, vert=FALSE, subsampleg=10, cex=2.5, my.ticks=c(0,10,20,30,40,50,60,70,80), my.labels=c(-20,-10,-6,-3,0,3,6,10,20)) + mtext(side=4,paste0(" ",var.unit[var.num]), cex=2.5) + + par(fig=c(0.04,0.3,0.248,0.273), new=TRUE) + ColorBar(my.brks, cols=my.cols, vert=FALSE, subsampleg=100, cex=2.5) + mtext(side=4," m", cex=2.5) + par(fig=c(0.355,0.605,0.248,0.273), new=TRUE) + ColorBar2(my.brks.var, cols=my.cols.var, vert=FALSE, subsampleg=10, cex=2.5, my.ticks=c(0,10,20,30,40,50,60,70,80), my.labels=c(-20,-10,-6,-3,0,3,6,10,20)) + mtext(side=4,paste0(" ",var.unit[var.num]), cex=2.5) + + par(fig=c(0.04,0.3,0.013,0.038), new=TRUE) + ColorBar(my.brks, cols=my.cols, vert=FALSE, subsampleg=100, cex=2.5) + mtext(side=4," m", cex=2.5) + par(fig=c(0.355,0.605,0.013,0.038), new=TRUE) + ColorBar2(my.brks.var, cols=my.cols.var, vert=FALSE, subsampleg=10, cex=2.5, my.ticks=c(0,10,20,30,40,50,60,70,80), my.labels=c(-20,-10,-6,-3,0,3,6,10,20)) + mtext(side=4,paste0(" ",var.unit[var.num]), cex=2.5) + + par(fig=c(0.66,0.67,0.83,0.84), new=TRUE) + mtext("%", cex=3.3) + par(fig=c(0.66,0.67,0.60,0.61), new=TRUE) + mtext("%", cex=3.3) + par(fig=c(0.66,0.67,0.37,0.38), new=TRUE) + mtext("%", cex=3.3) + par(fig=c(0.66,0.67,0.13,0.14), new=TRUE) + mtext("%", cex=3.3) + + if(!as.pdf) dev.off() # for saving 4 png + +} # close for loop on 'period' + +if(as.pdf) dev.off() # for saving a single pdf with all seasons + + + + + + + + + + + + + + + + + + + + + diff --git a/old/weather_regimes_v11.R~ b/old/weather_regimes_v11.R~ new file mode 100644 index 0000000000000000000000000000000000000000..496e0350b1bd1b946fd637b1a79385ed7ca37512 --- /dev/null +++ b/old/weather_regimes_v11.R~ @@ -0,0 +1,477 @@ + +library(s2dverification) # for the function Load() +library(abind) +source('/home/Earth/ncortesi/Downloads/scripts/Rfunctions.R') # for the calendar functions + +# available reanalysis for the geopotential (g500) or the geopotential height (z500 = g500/9.81): +ERAint <- list(path = '/esnas/reconstructions/ecmwf/eraint/$STORE_FREQ$_mean/$VAR_NAME$_f6h/$VAR_NAME$_$YEAR$$MONTH$.nc') + +ERAint.name <- "ERA-Interim" +JRA55 <- list(path = '/esnas/reconstructions/jma/jra-55/$STORE_FREQ$_mean/$VAR_NAME$_f6h/$VAR_NAME$_$YEAR$$MONTH$.nc') +JRA55.name <- "JRA-55" + +workdir <- "/scratch/Earth/ncortesi/RESILIENCE/Regimes" # working dir with the input files with the WT classifications for each MSLP grid point +#mapdir <- "/scratch/Earth/ncortesi/RESILIENCE/Regimes_maps" # output dir with seasonal and yearly maps + +rean <- ERAint # choose one of the two above reanalysis from where to load the input psl and var data +rean.name <- ERAint.name + +psl <- "g500" # pressure variable to use from the chosen reanalysis + +var.num <- 1 # Choose a variable. 1: sfcWind 2: tas + +var.name <- c("sfcWind","tas") # name of the 'predictand' variable of the chosen reanalysis +var.name.full <- c("Wind Speed","Temperature") # full name of the 'predictand' variable to put in the title of the graphs +var.unit <- c("m/s", "ºC") # unit of measure (for drawing color scales) + +year.start <- 1979 +year.end <- 2013 + +PCA <- FALSE # se to TRUE if you want to apply the PCA before the k-means cluster analysis, FALSE otherwise +variance.explained <- 0.8 # the user can set here the minimum % of variance explained by the PCs (typically 0.8 which is equal to 80%) + +# Coordinates of the box enclosing the study region (the Northern Atlantic in this case): +lat.max <- 80 #70 +lat.min <- 20 #30 +lon.max <- 30 #40 +lon.min <- 270 #280 # put a positive number here because the geopotential has only positive values of longitud! + +############################################################################################################################# + +# load only 1 day of geopot to detect the minimum and maximum lat and lon values to identify only the North Atlantic +domain <- Load(var = psl, exp = NULL, obs = list(rean), paste0(year.start,'0101'), storefreq = 'daily', leadtimemax = 1, output = 'lonlat', nprocs=1) +pos.lat.max <- which(abs(domain$lat-lat.max)==min(abs(domain$lat-lat.max))) +pos.lat.min <- which(abs(domain$lat-lat.min)==min(abs(domain$lat-lat.min))) +pos.lon.max <- which(abs(domain$lon-lon.max)==min(abs(domain$lon-lon.max))) +pos.lon.min <- which(abs(domain$lon-lon.min)==min(abs(domain$lon-lon.min))) +pos.lat <- if(pos.lat.min < pos.lat.max) {pos.lat.min:pos.lat.max} else { pos.lat.max:pos.lat.min} +pos.lon <- c(1:pos.lon.max,pos.lon.min:length(domain$lon)) # beware that it only consider the case where the study region cross the meridian of Greenwhich! +n.pos.lat <- length(pos.lat) +n.pos.lon <- length(pos.lon) +lat <- domain$lat[pos.lat] # lat of chosen area only (it is smaller than the whole spatial domain loaded by the data) +lon <- domain$lon[pos.lon] # lon of chosen area only +lat.min.area <- domain$lat[pos.lat.min] # min and max lat/lon of the chosen area only (same as lat.max, but its values correspond to actual values in the lat vector) +lat.max.area <- domain$lat[pos.lat.max] +lon.min.area <- domain$lon[pos.lon.min] +lon.max.area <- domain$lon[pos.lon.max] +if(lat.min >= lat.max) stop("lat.min cannot be greater than lat.max") +rm(domain) + +# Load psl data: +psleuFull <-array(NA,c(n.days.in.a.yearly.period(year.start,year.end), n.pos.lat, n.pos.lon)) + +for (y in year.start:year.end){ + var <- Load(var = psl, exp = NULL, obs = list(rean), paste0(y,'0101'), storefreq = 'daily', leadtimemax = n.days.in.a.year(y), output = 'lonlat', + latmin = lat.min.area, latmax = lat.max.area, lonmin = lon.min.area, lonmax = lon.max.area) + psleuFull[seq.days.in.a.future.year(year.start, y),,] <- var$obs + rm(var) + gc() +} + +if(psl=="g500") psleuFull <- psleuFull/9.81 # convert geopotential to geopotential height + +# compute the cluster analysis: +my.PCA <- list() +my.cluster <- list() +tot.variance <- list() +n.pcs <- my.cluster <- c() + +for(period in 13:16){ # 1-12: Jan-Dec; 13-16: Winter-Spring-Summer-Autumn; 17: Year + + days.period <- NA + for(y in year.start:year.end) days.period <- c(days.period, n.days.in.a.future.year(year.start, y) + pos.period(y,period)) + days.period <- days.period[-1] # remove the NA at the begin that was introduced only to be able to execure the above command + n.days.period <- length(days.period) + + pslPeriod <- psleuFull[days.period,,] # select only days in the chosen period (i.e: winter) + + # weight the pressure fields based on latitude: + lat.weighted <- cos(lat*pi/180)^0.5 + lat.weighted.array <- InsertDim(InsertDim(lat.weighted,2,n.pos.lon),1,n.days.period) + pslPeriod.weighted <- pslPeriod * lat.weighted.array + rm(lat.weighted.array) + gc() + + pslmat <- pslPeriod.weighted + dim(pslmat) <- c(n.days.period, n.pos.lat*n.pos.lon) # convert array in a matrix! + rm(pslPeriod, pslPeriod.weighted) + gc() + + if(PCA){ # compute the PCs: + my.seq <- seq(1, n.pos.lat*n.pos.lon, 9) # select only 1 point of 9 + pslcut <- pslmat[,my.seq] + + my.PCA[[period]] <- princomp(pslcut,cor=FALSE) + tot.variance[[period]] <- head(cumsum(my.PCA[[period]]$sdev^2/sum(my.PCA[[period]]$sdev^2)),50) # check how many PCAs to keep basing on the sum of their expl. variance + n.pcs[period] <- head(as.numeric(which(tot.variance[[period]] > variance.explained)),1) # select only the pcs that explains at least 80% of variance + my.cluster[[period]] <- kmeans(my.PCA[[period]]$scores[,1:n.pcs[period]], centers=4, iter.max=100, nstart=20) # 4 is the number of clusters + rm(pslcut) + } else { + my.cluster[[period]] <- kmeans(pslmat, centers=4, iter.max=100, nstart=20) # 4 is the number of clusters + } + + rm(pslmat) + gc() +} + +save.image(file=paste0(workdir,"/weather_regimes_",rean.name,"_",year.start,"-",year.end,".RData")) +load(file=paste0(workdir,"/weather_regimes_",rean.name,"_",year.start,"-",year.end,".RData")) + +# Load var data: +vareuFull <-array(NA,c(n.days.in.a.yearly.period(year.start,year.end),n.pos.lat,n.pos.lon)) + +for (y in year.start:year.end){ + var <- Load(var = var.name[var.num], exp = NULL, obs = list(rean), paste0(y,'0101'), storefreq = 'daily', leadtimemax = n.days.in.a.year(y), output = 'lonlat', + latmin = lat.min.area, latmax = lat.max.area, lonmin = lon.min.area, lonmax = lon.max.area) + vareuFull[seq.days.in.a.future.year(year.start, y),,] <- var$obs + rm(var) + gc() +} + +save.image(file=paste0(workdir,"/weather_regimes_",rean.name,"_",var.name[var.num],"_",year.start,"-",year.end,".RData")) +load(file=paste0(workdir,"/weather_regimes_",rean.name,"_",var.name[var.num],"_",year.start,"-",year.end,".RData")) + +for(period in 13:16){ # 1-12: Jan-Dec; 13-16: Winter-Spring-Summer-Autumn; 17: Year + days.period <- NA + for(y in year.start:year.end) days.period <- c(days.period, n.days.in.a.future.year(year.start, y) + pos.period(y,period)) + days.period <- days.period[-1] # remove the NA at the begin that was introduced only to be able to execure the above command + n.days.period <- length(days.period) + + varPeriod <- vareuFull[days.period,,] # select only var data during the chosen period + + varPeriodClim <- apply(varPeriod, c(2,3), mean, na.rm=T) + varPeriodClim2 <- InsertDim(varPeriodClim, 1, n.days.period) + + varPeriodAnom <- varPeriod - varPeriodClim2 + rm(varPeriod, varPeriodClim, varPeriodClim2) + gc() + + # for each of the 4 clusters, remove the days not belonging to sequences of at least 5 days: + # warning: first 4 days and last 4 days are not take into account y the algorithm and are treated as not belonging to any sequence (sequ=FALSE) + wrdiff <- diff(my.cluster[[period]]$cluster) + + sequ <- rep(FALSE, n.days.period) + for(i in 5:(n.days.period-5)){ + sequ[i] <- TRUE + if(wrdiff[i] != 0 & wrdiff[i+1] != 0) sequ[i] = FALSE + if(wrdiff[i] != 0 & wrdiff[i+2] != 0) sequ[i] = FALSE + if(wrdiff[i] != 0 & wrdiff[i+3] != 0) sequ[i] = FALSE + if(wrdiff[i] != 0 & wrdiff[i+4] != 0) sequ[i] = FALSE + if(wrdiff[i-1] != 0 & wrdiff[i] != 0) sequ[i] = FALSE + if(wrdiff[i-1] != 0 & wrdiff[i+1] != 0) sequ[i] = FALSE + if(wrdiff[i-1] != 0 & wrdiff[i+2] != 0) sequ[i] = FALSE + if(wrdiff[i-1] != 0 & wrdiff[i+3] != 0) sequ[i] = FALSE + if(wrdiff[i-2] != 0 & wrdiff[i] != 0) sequ[i] = FALSE + if(wrdiff[i-2] != 0 & wrdiff[i+1] != 0) sequ[i] = FALSE + if(wrdiff[i-2] != 0 & wrdiff[i+2] != 0) sequ[i] = FALSE + if(wrdiff[i-3] != 0 & wrdiff[i] != 0) sequ[i] = FALSE + if(wrdiff[i-3] != 0 & wrdiff[i+1] != 0) sequ[i] = FALSE + if(wrdiff[i-4] != 0 & wrdiff[i] != 0) sequ[i] = FALSE + } # close for loop on i + + # sequence of daily cluster numbers without the days that doesn't belong to sequences of 5 or more days: + cluster.sequence <- my.cluster[[period]]$cluster * sequ + + rm(wrdiff, sequ) + gc() + + #wr1 <- which(my.cluster[[period]]$cluster==1) + #wr2 <- which(my.cluster[[period]]$cluster==2) + #wr3 <- which(my.cluster[[period]]$cluster==3) + #wr4 <- which(my.cluster[[period]]$cluster==4) + + # Replace the days belonging to a regime with the days belonging to a sequence of at least 5 days of that regime: + wr1 <- which(cluster.sequence == 1) + wr2 <- which(cluster.sequence == 2) + wr3 <- which(cluster.sequence == 3) + wr4 <- which(cluster.sequence == 4) + + period.length <- n.days.in.a.period(period,1999) # using year 1999 introduce a small error in winter season because of bisestile years + + # yearly frequency of each regime: + wr1y <- wr2y <- wr3y <-wr4y <- c() + for(y in year.start:year.end){ + #wr1y[y-year.start+1] <- length(which(my.cluster[[period]]$cluster[(y-year.start)*period.length+(1:period.length)] == 1)) + #wr2y[y-year.start+1] <- length(which(my.cluster[[period]]$cluster[(y-year.start)*period.length+(1:period.length)] == 2)) + #wr3y[y-year.start+1] <- length(which(my.cluster[[period]]$cluster[(y-year.start)*period.length+(1:period.length)] == 3)) + #wr4y[y-year.start+1] <- length(which(my.cluster[[period]]$cluster[(y-year.start)*period.length+(1:period.length)] == 4)) + wr1y[y-year.start+1] <- length(which(cluster.sequence[(y-year.start)*period.length+(1:period.length)] == 1)) + wr2y[y-year.start+1] <- length(which(cluster.sequence[(y-year.start)*period.length+(1:period.length)] == 2)) + wr3y[y-year.start+1] <- length(which(cluster.sequence[(y-year.start)*period.length+(1:period.length)] == 3)) + wr4y[y-year.start+1] <- length(which(cluster.sequence[(y-year.start)*period.length+(1:period.length)] == 4)) + } + + rm(cluster.sequence) + gc() + + # convert to frequencies in %: + wr1y <- wr1y/period.length + wr2y <- wr2y/period.length + wr3y <- wr3y/period.length + wr4y <- wr4y/period.length + + varPeriodAnom1 <- varPeriodAnom[wr1,,] + varPeriodAnom2 <- varPeriodAnom[wr2,,] + varPeriodAnom3 <- varPeriodAnom[wr3,,] + varPeriodAnom4 <- varPeriodAnom[wr4,,] + + varPeriodAnom1mean <- apply(varPeriodAnom1,c(2,3),mean,na.rm=T) + varPeriodAnom2mean <- apply(varPeriodAnom2,c(2,3),mean,na.rm=T) + varPeriodAnom3mean <- apply(varPeriodAnom3,c(2,3),mean,na.rm=T) + varPeriodAnom4mean <- apply(varPeriodAnom4,c(2,3),mean,na.rm=T) + + varPeriodAnomBoth1 <- abind(varPeriodAnom, varPeriodAnom1, along = 1) + pvalue1 <- apply(varPeriodAnomBoth1, c(2,3), function(x) t.test(x[1:n.days.period],x[(n.days.period+1):length(x)])$p.value) + rm(varPeriodAnomBoth1) + gc() + + varPeriodAnomBoth2 <- abind(varPeriodAnom, varPeriodAnom2, along = 1) + pvalue2 <- apply(varPeriodAnomBoth2, c(2,3), function(x) t.test(x[1:n.days.period],x[(n.days.period+1):length(x)])$p.value) + rm(varPeriodAnomBoth2) + gc() + + varPeriodAnomBoth3 <- abind(varPeriodAnom, varPeriodAnom3, along = 1) + pvalue3 <- apply(varPeriodAnomBoth3, c(2,3), function(x) t.test(x[1:n.days.period],x[(n.days.period+1):length(x)])$p.value) + rm(varPeriodAnomBoth3) + gc() + + varPeriodAnomBoth4 <- abind(varPeriodAnom, varPeriodAnom4, along = 1) + pvalue4 <- apply(varPeriodAnomBoth4, c(2,3), function(x) t.test(x[1:n.days.period],x[(n.days.period+1):length(x)])$p.value) + rm(varPeriodAnomBoth4) + rm(varPeriodAnom) + gc() + + pslPeriod <- psleuFull[days.period,,] + pslPeriodClim <- apply(pslPeriod, c(2,3), mean, na.rm=T) + pslPeriodClim2 <- InsertDim(pslPeriodClim, 1, n.days.period) + pslPeriodAnom <- pslPeriod - pslPeriodClim2 + + rm(pslPeriod, pslPeriodClim, pslPeriodClim2) + gc() + + pslwr1 <- pslPeriodAnom[wr1,,] + pslwr2 <- pslPeriodAnom[wr2,,] + pslwr3 <- pslPeriodAnom[wr3,,] + pslwr4 <- pslPeriodAnom[wr4,,] + rm(pslPeriodAnom) + gc() + + pslwr1mean <- apply(pslwr1,c(2,3),mean,na.rm=T) + pslwr2mean <- apply(pslwr2,c(2,3),mean,na.rm=T) + pslwr3mean <- apply(pslwr3,c(2,3),mean,na.rm=T) + pslwr4mean <- apply(pslwr4,c(2,3),mean,na.rm=T) + + rm(pslwr1,pslwr2,pslwr3,pslwr4) + + # breaks and colors of the geopotential fields: + #my.brks <- c(48000, seq(48501,57100,1), 60000) # % Mean anomaly of a WR + #my.brks2 <- c(48000, seq(48500,58000,500)) # % Mean anomaly of a WR + my.brks <- c(-300,-199:200,300) # % Mean anomaly of a WR + my.brks2 <- c(-300,seq(-200,200,10),300) # % Mean anomaly of a WR + #my.cols <- colorRampPalette((brewer.pal(9,"PuBu")))(length(my.brks)-1) + my.cols <- colorRampPalette(rev(brewer.pal(11,"RdBu")))(length(my.brks)-1) # blue--white--red colors + + # breaks and colors of the impact maps: + my.brks.var <- c(-20,seq(-10,-3,1),seq(-2.9,2.9,0.1),seq(3,10,1),20) # % Mean anomaly of a WR + #my.brks.var <- c(-20,seq(-10,-3,1),seq(-2.8,2.8,0.2),seq(3,10,1),20) # % Mean anomaly of a WR + + #my.cols <- colorRampPalette(as.character(read.csv("/home/Earth/vtorralb/rgbhex.csv",header=F)[,1]))(length(my.brks)-1) # blue--yellow-red colors + my.cols.var <- colorRampPalette(rev(brewer.pal(11,"RdBu")))(length(my.brks.var)-1) # blue--white--red colors + + # save all the data necessary to redraw the graphs when we know the right regime: + save(workdir,rean.name,var.num,var.name,var.name.full,var.unit,year.start,year.end,period,my.period,lon,lat,my.brks,my.brks2,my.brks.var,my.cols,my.cols.var,pslwr1mean,pslwr2mean,pslwr3mean,pslwr4mean, varPeriodAnom1mean, varPeriodAnom2mean, varPeriodAnom3mean,varPeriodAnom4mean,wr1y,wr2y,wr3y,wr4y,pvalue1,pvalue2,pvalue3,pvalue4,file=paste0(workdir,"/",rean.name,"_",var.name[var.num],"_",my.period[period],"_mapdata.RData")) + + rm(pvalue1,pvalue2,pvalue3,pvalue4) + rm(pslwr1mean,pslwr2mean,pslwr3mean,pslwr4mean) + rm(varPeriodAnom1mean,varPeriodAnom2mean,varPeriodAnom3mean,varPeriodAnom4mean) + gc() + +} # close the for loop on 'period' + + +# run it twice: once, with ordering <- FALSE to look only at the cartography and find visually the right regime names of the four clusters; +# and the second time, to save the ordered cartography: +ordering <- FALSE +as.pdf <- TRUE # choose if you prefer to save results as one file .png for each season, or only 1 pdf with all seasons + +# choose an order to give to the cartography, from top to bottom: +orden <- c("NAO+","NAO-","Blocking","Atl. Ridge") + +regime1.name <- orden[1] +regime2.name <- orden[2] +regime3.name <- orden[3] +regime4.name <- orden[4] + +if(as.pdf)(pdf(file=paste0(workdir,"/",rean.name,"_",var.name[var.num],".pdf"),width=40, height=60)) + +for(period in 13:16){ + load(file=paste0(workdir,"/",rean.name,"_",var.name[var.num],"_",my.period[period],"_mapdata.RData")) + + if(period == 13){ # Winter + cluster2.name="NAO+" + cluster3.name="NAO-" + cluster4.name="Blocking" + cluster1.name="Atl. Ridge" + } + if(period == 14){ + cluster1.name="NAO+" + cluster2.name="NAO-" + cluster4.name="Blocking" + cluster3.name="Atl. Ridge" + } + if(period == 15){ + cluster3.name="NAO+" + cluster1.name="NAO-" + cluster4.name="Blocking" + cluster2.name="Atl. Ridge" + } + if(period == 16){ + cluster2.name="NAO+" + cluster4.name="NAO-" + cluster3.name="Blocking" + cluster1.name="Atl. Ridge" + } + + # assign to each cluster its regime: + if(ordering == FALSE){ + cluster1 <- 1; cluster2 <- 2; cluster3 <- 3; cluster4 <- 4 + } else { + cluster1 <- which(orden == cluster1.name) + cluster2 <- which(orden == cluster2.name) + cluster3 <- which(orden == cluster3.name) + cluster4 <- which(orden == cluster4.name) + } + + assign(paste0("map",cluster1), pslwr1mean) + assign(paste0("map",cluster2), pslwr2mean) + assign(paste0("map",cluster3), pslwr3mean) + assign(paste0("map",cluster4), pslwr4mean) + + assign(paste0("imp",cluster1), varPeriodAnom1mean) + assign(paste0("imp",cluster2), varPeriodAnom2mean) + assign(paste0("imp",cluster3), varPeriodAnom3mean) + assign(paste0("imp",cluster4), varPeriodAnom4mean) + + assign(paste0("sig",cluster1), pvalue1) + assign(paste0("sig",cluster2), pvalue2) + assign(paste0("sig",cluster3), pvalue3) + assign(paste0("sig",cluster4), pvalue4) + + assign(paste0("fre",cluster1), wr1y) + assign(paste0("fre",cluster2), wr2y) + assign(paste0("fre",cluster3), wr3y) + assign(paste0("fre",cluster4), wr4y) + + # Visualize the composition with the 3 graphs for all the 4 regimes: its pressure fields, its impact on var and its frequency series: + if(!as.pdf) png(filename=paste0(workdir,"/",rean.name,"_",var.name[var.num],"_",my.period[period],".png"),width=3000,height=3700) + + layout(matrix(c(1,1,1,1,1,1,1,1,31,32,6,33,2,4,6,7,rep(c(2,4,6,8),8),3,5,6,9,34,35,6,36, + 10,12,6,14,rep(c(10,12,6,15),8),11,13,6,16,37,38,6,39, + 17,19,6,21,rep(c(17,19,6,22),8),18,20,6,23,40,41,6,42, + 24,26,6,28,rep(c(24,26,6,29),8),25,27,6,30,43,43,6,44),47,4,byrow=TRUE), + widths=c(2,2,0.2,2), heights=c(rep(0.2,2),0.3,rep(0.2,10),0.3,rep(0.2,10),0.3,rep(0.2,10),0.3,rep(0.2,12))) + layout.show(44) + + plot.new(); title(paste("Weather Regimes for",my.period[period],"season (1979-2013). Source:",rean.name), cex.main=9, line=-7) + EU <- c(1:41, 130:161) #c(1:54,130:161) # position of long values of Europe only (without the Atlantic Sea and America) + + PlotEquiMap(map1, lon, lat, filled.continents = FALSE, brks=my.brks, cols=my.cols, toptitle="", sizetit=1.2, contours=t(map1),brks2=my.brks2, drawleg=F) + #par(mar=c(10.5,14.5,10.5,14.5)) + plot.new(); PlotEquiMap(imp1[,EU], lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, drawleg=F, dots=t(sig1[,EU] < 0.05)) + plot.new(); plot.new(); plot.new() # plot 2 empty graphs + barplot.freq(100*fre1, year.start, year.end, cex.y=2, cex.x=2, freq.max=60) + plot.new(); title("Year", cex.main=2, line=-2) + + PlotEquiMap(map2, lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, toptitle="", sizetit=1.2, contours=t(map2), brks2=my.brks2, drawleg=F) + plot.new(); PlotEquiMap(imp2[,EU], lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, drawleg=F, dots=t(sig2[,EU] < 0.05)) + plot.new(); plot.new(); barplot.freq(100*fre2, year.start, year.end, cex.y=2, cex.x=2, freq.max=60) + plot.new(); title("Year", cex.main=2, line=-2) + + PlotEquiMap(map3, lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, toptitle="", sizetit=1.2, contours=t(map3), brks2=my.brks2, drawleg=F) + plot.new(); PlotEquiMap(imp3[,EU], lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, drawleg=F, dots=t(sig3[,EU] < 0.05)) + plot.new(); plot.new(); barplot.freq(100*fre3, year.start, year.end, cex.y=2, cex.x=2, freq.max=60) + plot.new(); title("Year", cex.main=2, line=-2) + + PlotEquiMap(map4, lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, contours=t(map4), brks2=my.brks2, drawleg=F) + plot.new(); PlotEquiMap(imp4[,EU], lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, drawleg=F, dots=t(sig4[,EU] < 0.05)) + plot.new(); plot.new(); barplot.freq(100*fre4, year.start, year.end, cex.y=2, cex.x=2, freq.max=60) + plot.new(); title("Year", cex.main=2, line=-3) + + # Titles: + plot.new(); title(paste(regime1.name, "geopotential height"), cex.main=3.3, line=-5) + plot.new(); title(paste(regime1.name, "impact on", var.name.full[var.num]), cex.main=3.3, line=-5) + plot.new(); title(paste0(regime1.name, " Frequency (", round(100*mean(fre1),1), "%)"), cex.main=3.3, line=-5) + plot.new(); title(paste(regime2.name, "geopotential height"), cex.main=3.3, line=-5) + plot.new(); title(paste(regime2.name, "impact on", var.name.full[var.num]), cex.main=3.3, line=-5) + plot.new(); title(paste0(regime2.name, " Frequency (", round(100*mean(fre2),1), "%)"), cex.main=3.3, line=-5) + plot.new(); title(paste(regime3.name, "geopotential height"), cex.main=3.3, line=-5) + plot.new(); title(paste(regime3.name, "impact on", var.name.full[var.num]), cex.main=3.3, line=-5) + plot.new(); title(paste0(regime3.name, " Frequency (", round(100*mean(fre3),1), "%)"), cex.main=3.3, line=-5) + plot.new(); title(paste(regime4.name, "geopotential height"), cex.main=3.3, line=-5) + plot.new(); title(paste(regime4.name, "impact on", var.name.full[var.num]), cex.main=3.3, line=-5) + plot.new(); title(paste0(regime4.name, " Frequency (", round(100*mean(fre4),1), "%)"), cex.main=3.3, line=-5) + + # Legends: + par(fig=c(0.04,0.3,0.718,0.744), new=TRUE) + ColorBar(my.brks, cols=my.cols, vert=FALSE, subsampleg=100, cex=2.5) + mtext(side=4," m", cex=2.5) + par(fig=c(0.355,0.605,0.718,0.743), new=TRUE) + ColorBar2(my.brks.var, cols=my.cols.var, vert=FALSE, subsampleg=10, cex=2.5, my.ticks=c(0,10,20,30,40,50,60,70,80), my.labels=c(-20,-10,-6,-3,0,3,6,10,20)) + mtext(side=4,paste0(" ",var.unit[var.num]), cex=2.5) + + par(fig=c(0.04,0.3,0.483,0.508), new=TRUE) + ColorBar(my.brks, cols=my.cols, vert=FALSE, subsampleg=100, cex=2.5) + mtext(side=4," m", cex=2.5) + par(fig=c(0.355,0.605,0.484,0.509), new=TRUE) + ColorBar2(my.brks.var, cols=my.cols.var, vert=FALSE, subsampleg=10, cex=2.5, my.ticks=c(0,10,20,30,40,50,60,70,80), my.labels=c(-20,-10,-6,-3,0,3,6,10,20)) + mtext(side=4,paste0(" ",var.unit[var.num]), cex=2.5) + + par(fig=c(0.04,0.3,0.248,0.273), new=TRUE) + ColorBar(my.brks, cols=my.cols, vert=FALSE, subsampleg=100, cex=2.5) + mtext(side=4," m", cex=2.5) + par(fig=c(0.355,0.605,0.248,0.273), new=TRUE) + ColorBar2(my.brks.var, cols=my.cols.var, vert=FALSE, subsampleg=10, cex=2.5, my.ticks=c(0,10,20,30,40,50,60,70,80), my.labels=c(-20,-10,-6,-3,0,3,6,10,20)) + mtext(side=4,paste0(" ",var.unit[var.num]), cex=2.5) + + par(fig=c(0.04,0.3,0.013,0.038), new=TRUE) + ColorBar(my.brks, cols=my.cols, vert=FALSE, subsampleg=100, cex=2.5) + mtext(side=4," m", cex=2.5) + par(fig=c(0.355,0.605,0.013,0.038), new=TRUE) + ColorBar2(my.brks.var, cols=my.cols.var, vert=FALSE, subsampleg=10, cex=2.5, my.ticks=c(0,10,20,30,40,50,60,70,80), my.labels=c(-20,-10,-6,-3,0,3,6,10,20)) + mtext(side=4,paste0(" ",var.unit[var.num]), cex=2.5) + + par(fig=c(0.66,0.67,0.83,0.84), new=TRUE) + mtext("%", cex=3.3) + par(fig=c(0.66,0.67,0.60,0.61), new=TRUE) + mtext("%", cex=3.3) + par(fig=c(0.66,0.67,0.37,0.38), new=TRUE) + mtext("%", cex=3.3) + par(fig=c(0.66,0.67,0.13,0.14), new=TRUE) + mtext("%", cex=3.3) + + if(!as.pdf) dev.off() # for saving 4 png + +} # close for loop on 'period' + +if(as.pdf) dev.off() # for saving a single pdf with all seasons + + + + + + + + + + + + + + + + + + + + + diff --git a/old/weather_regimes_v12.R b/old/weather_regimes_v12.R new file mode 100644 index 0000000000000000000000000000000000000000..f941e46dfd2d7421e8bbac64f6a6828275e3dd0f --- /dev/null +++ b/old/weather_regimes_v12.R @@ -0,0 +1,564 @@ + +library(s2dverification) # for the function Load() +library(abind) +source('/home/Earth/ncortesi/Downloads/scripts/Rfunctions.R') # for the calendar functions + +workdir <- "/scratch/Earth/ncortesi/RESILIENCE/Regimes" # working dir with the input files with the WT classifications for each MSLP grid point + +# available reanalysis for the geopotential (g500) or the geopotential height (z500 = g500/9.81): +ERAint <- list(path = '/esnas/reconstructions/ecmwf/eraint/$STORE_FREQ$_mean/$VAR_NAME$_f6h/$VAR_NAME$_$YEAR$$MONTH$.nc') +ERAint.name <- "ERA-Interim" + +JRA55 <- list(path = '/esnas/reconstructions/jma/jra-55/$STORE_FREQ$_mean/$VAR_NAME$_f6h/$VAR_NAME$_$YEAR$$MONTH$.nc') +JRA55.name <- "JRA-55" + +rean <- ERAint # choose one of the two above reanalysis from where to load the input psl data +rean.name <- ERAint.name + +# available forecast systems for the var data: +ECMWF_monthly <- list(path = paste0('/esnas/exp/ECMWF/monthly/ensforhc/6hourly/$VAR_NAME$/2014$MONTH$$DAY$00/$VAR_NAME$_$START_DATE$00.nc')) +ECMWF_monthly.name <- "ECMWF-Monthly" + +forecast <- ECMWF_monthly +forecast.name <- ECMWF_monthly.name + +fields <- forecast # put 'rean' to load pressure fields and var from reanalisis, put 'forecast' to load them from forecast systems +fields.name <- forecast.name + +year.start <- 1994 #1979 #1981 #1994 +year.end <- 2013 #2010 + +leadtime <- 1 # if fields=forecast, set the forecast time (in days) you want to compute the weather regimes. +forecast.year <- year.end+1 # if fields=forecast, set the forecast year (it is usually the year after year.end) +mes=1 # if fields=forecast, set the month of the first startdate of the forecast year +day=2 # if fields=forecast, set the day of the first startdate of the forecast year + +psl <- "psl" #"g500" # pressure variable to use from the chosen reanalysis +psl.name <- "MSLP" # "Z500" # the name to show in the maps + +var.data <- ECMWF_monthly # ERAint # chose a dataset from which to load the input var data (reanalysis or forecasts) +var.data.name <- ECMWF_monthly.name # ERAint.name + +var.num <- 1 # Choose a variable. 1: sfcWind 2: tas + +var.name <- c("sfcWind","tas") # name of the 'predictand' variable of the chosen reanalysis +var.name.full <- c("Wind Speed","Temperature") # full name of the 'predictand' variable to put in the title of the graphs +var.unit <- c("m/s", "ºC") # unit of measure (for drawing color scales) + +sequences=FALSE # set it to TRUE if you want to select only days beloning to sequences of at least 5 consecutive days belonging to the same regime. + +PCA <- FALSE # se to TRUE if you want to apply the PCA before the k-means cluster analysis, FALSE otherwise +variance.explained <- 0.8 # the user can set here the minimum % of variance explained by the PCs (typically 0.8 which is equal to 80%) + +# Coordinates of the box enclosing the study region (the Northern Atlantic in this case): +lat.max <- 81 #81 #80 #70 +lat.min <- 27 #30 #20 #30 +lon.max <- 45 #36 #30 #40 +lon.min <- 274.5 #274.5 #270 #280 # put a positive number here because the geopotential has only positive values of longitud! + +############################################################################################################################# +#ECMWF_monthly <- list(path = paste0('/esnas/exp/ECMWF/monthly/ensforhc/6hourly/psl/2014010200/1994_010200.nc')) +#ECMWF_monthly <- list(path = paste0('/esnas/exp/ECMWF/monthly/ensforhc/6hourly/$VAR_NAME$/2014$MONTH$$DAY$00/$VAR_NAME$_$START_DATE$00.nc')) + +# load only 1 day of pressure data to detect the minimum and maximum lat and lon values which identify only the North Atlantic area: +domain <- Load(var = psl, exp = NULL, obs = list(fields), paste0(year.start,'0102'), storefreq = 'daily', leadtimemax = 1, output = 'lonlat', nprocs=1) +n.lat <- length(domain$lat) +n.lon <- length(domain$lon) + +pos.lat.max <- which(abs(domain$lat-lat.max) == min(abs(domain$lat-lat.max))) +pos.lat.min <- which(abs(domain$lat-lat.min) == min(abs(domain$lat-lat.min))) +pos.lon.max <- which(abs(domain$lon-lon.max) == min(abs(domain$lon-lon.max))) +pos.lon.min <- which(abs(domain$lon-lon.min) == min(abs(domain$lon-lon.min))) +pos.lat <- if(pos.lat.min < pos.lat.max) {pos.lat.min:pos.lat.max} else { pos.lat.max:pos.lat.min} +pos.lon <- c(1:pos.lon.max,pos.lon.min:length(domain$lon)) # beware that it only consider the case where the study area cross the meridian of Greenwhich! +n.pos.lat <- length(pos.lat) +n.pos.lon <- length(pos.lon) +lat <- domain$lat[pos.lat] # lat of chosen area only (it is smaller than the whole spatial domain loaded by the data) +lon <- domain$lon[pos.lon] # lon of chosen area only +lat.min.area <- domain$lat[pos.lat.min] # min and max lat/lon of the chosen area only (same as lat.max, but its values correspond to actual values in the lat vector) +lat.max.area <- domain$lat[pos.lat.max] +lon.min.area <- domain$lon[pos.lon.min] +lon.max.area <- domain$lon[pos.lon.max] +if(lat.min >= lat.max) stop("lat.min cannot be greater than lat.max") +#rm(domain) + +# Load psl data: +if(unlist(fields) == unlist(rean)){ # Load daily psl data in the reanalysis case: + psleuFull <-array(NA,c(n.days.in.a.yearly.period(year.start,year.end), n.pos.lat, n.pos.lon)) + + for (y in year.start:year.end){ + var <- Load(var = psl, exp = NULL, obs = list(fields), paste0(y,'0101'), storefreq = 'daily', leadtimemax = n.days.in.a.year(y), output = 'lonlat', + latmin = lat.min.area, latmax = lat.max.area, lonmin = lon.min.area, lonmax = lon.max.area) + psleuFull[seq.days.in.a.future.year(year.start, y),,] <- var$obs + rm(var) + gc() + } + +} else { # Load 6-hourly psl data in the forecast case: + sdates.seq <- weekly.seq(forecast.year,mes,day) + psleuFull <-array(NA,c(length(sdates.seq)*(year.end-year.start+1), n.pos.lat, n.pos.lon)) + + for (startdate in 1:length(sdates.seq)){ + var <- Load(var = psl, exp = NULL, obs = list(fields), paste0(year.start:year.end, substr(sdates.seq[startdate],5,6), substr(sdates.seq[startdate],7,8) ), storefreq = 'daily', leadtimemin=leadtime*4-3, leadtimemax=leadtime*4, output = 'lonlat', latmin = lat.min.area, latmax = lat.max.area, lonmin = lon.min.area, lonmax = lon.max.area) + temp <- apply(var$obs, c(1,3,5,6), mean, na.rm=T) + psleuFull[(1+(startdate-1)*(year.end-year.start+1)):(startdate*(year.end-year.start+1)),,] <- temp + rm(temp,var) + gc() + } +} + +if(psl=="g500") psleuFull <- psleuFull/9.81 # convert geopotential to geopotential height + +# compute the cluster analysis: +my.PCA <- list() +my.cluster <- list() +tot.variance <- list() +n.pcs <- my.cluster <- c() + +for(period in 13:16){ # 1-12: Jan-Dec; 13-16: Winter-Spring-Summer-Autumn; 17: Year + if(unlist(fields) == unlist(rean)){ + days.period <- NA + for(y in year.start:year.end) days.period <- c(days.period, n.days.in.a.future.year(year.start, y) + pos.period(y,period)) + days.period <- days.period[-1] # remove the NA at the begin that was introduced only to be able to execure the above command + } else { + my.startdates.period <- months.period(forecast.year,mes,day,period) + + days.period <- c() + for(startdate in my.startdates.period){ + days.period <- c(days.period, (1+(startdate-1)*(year.end-year.start+1)):(startdate*(year.end-year.start+1))) + } + } + + n.days.period <- length(days.period) + + pslPeriod <- psleuFull[days.period,,] # select only days in the chosen period (i.e: winter) + + # weight the pressure fields based on latitude: + lat.weighted <- cos(lat*pi/180)^0.5 + lat.weighted.array <- InsertDim(InsertDim(lat.weighted,2,n.pos.lon),1,n.days.period) + pslPeriod.weighted <- pslPeriod * lat.weighted.array + rm(lat.weighted.array) + gc() + + pslmat <- pslPeriod.weighted + dim(pslmat) <- c(n.days.period, n.pos.lat*n.pos.lon) # convert array in a matrix! + rm(pslPeriod, pslPeriod.weighted) + gc() + + if(PCA){ # compute the PCs: + my.seq <- seq(1, n.pos.lat*n.pos.lon, 9) # select only 1 point of 9 + pslcut <- pslmat[,my.seq] + + my.PCA[[period]] <- princomp(pslcut,cor=FALSE) + tot.variance[[period]] <- head(cumsum(my.PCA[[period]]$sdev^2/sum(my.PCA[[period]]$sdev^2)),50) # check how many PCAs to keep basing on the sum of their expl. variance + n.pcs[period] <- head(as.numeric(which(tot.variance[[period]] > variance.explained)),1) # select only the pcs that explains at least 80% of variance + my.cluster[[period]] <- kmeans(my.PCA[[period]]$scores[,1:n.pcs[period]], centers=4, iter.max=100, nstart=30) # 4 is the number of clusters + rm(pslcut) + } else { + my.cluster[[period]] <- kmeans(pslmat, centers=4, iter.max=100, nstart=30) # 4 is the number of clusters + } + + rm(pslmat) + gc() +} + +save.image(file=paste0(workdir,"/weather_regimes_",fields.name,"_",year.start,"-",year.end,".RData")) +load(file=paste0(workdir,"/weather_regimes_",fields.name,"_",year.start,"-",year.end,".RData")) + +# Load var data: +vareuFull <-array(NA,c(n.days.in.a.yearly.period(year.start,year.end),n.pos.lat,n.pos.lon)) + +for (y in year.start:year.end){ + var <- Load(var = var.name[var.num], exp = NULL, obs = list(var.data), paste0(y,'0101'), storefreq = 'daily', leadtimemax = n.days.in.a.year(y), output = 'lonlat', + latmin = lat.min.area, latmax = lat.max.area, lonmin = lon.min.area, lonmax = lon.max.area) + vareuFull[seq.days.in.a.future.year(year.start, y),,] <- var$obs + rm(var) + gc() +} + + +# for loading forecasting systems: + +### load once 1 file to retrieve the lat and lon values, then save them in an .RData file to retrieve them when needed: +#coord <- Load(var = 'sfcWind', exp = list(ECMWF_monthly), obs = NULL, sdates=paste0(2013,'0102'), leadtimemin = 1, leadtimemax=1, output = 'lonlat', nprocs=1) + +#my.grid<-paste0('r',n.lon,'x',n.lat) +#coord <- Load(var = 'sfcWind', exp = list(ECMWF_monthly), obs = NULL, sdates=paste0(2013,'0102'), leadtimemin = 1, leadtimemax=1, output = 'lonlat', nprocs=1, grid=my.grid, method='bilinear') + + +save.image(file=paste0(workdir,"/weather_regimes_",rean.name,"_",var.name[var.num],"_",year.start,"-",year.end,".RData")) +load(file=paste0(workdir,"/weather_regimes_",rean.name,"_",var.name[var.num],"_",year.start,"-",year.end,".RData")) + +for(period in 13:16){ # 1-12: Jan-Dec; 13-16: Winter-Spring-Summer-Autumn; 17: Year + days.period <- NA + for(y in year.start:year.end) days.period <- c(days.period, n.days.in.a.future.year(year.start, y) + pos.period(y,period)) + days.period <- days.period[-1] # remove the NA at the begin that was introduced only to be able to execure the above command + n.days.period <- length(days.period) + + varPeriod <- vareuFull[days.period,,] # select only var data during the chosen period + + varPeriodClim <- apply(varPeriod, c(2,3), mean, na.rm=T) + varPeriodClim2 <- InsertDim(varPeriodClim, 1, n.days.period) + + varPeriodAnom <- varPeriod - varPeriodClim2 + rm(varPeriod, varPeriodClim, varPeriodClim2) + gc() + + if(sequences){ + # for each of the 4 clusters, remove the days not belonging to sequences of at least 5 days: + # warning: first 4 days and last 4 days are not take into account y the algorithm and are treated as not belonging to any sequence (sequ=FALSE) + wrdiff <- diff(my.cluster[[period]]$cluster) + + sequ <- rep(FALSE, n.days.period) + for(i in 5:(n.days.period-5)){ + sequ[i] <- TRUE + if(wrdiff[i] != 0 & wrdiff[i+1] != 0) sequ[i] = FALSE + if(wrdiff[i] != 0 & wrdiff[i+2] != 0) sequ[i] = FALSE + if(wrdiff[i] != 0 & wrdiff[i+3] != 0) sequ[i] = FALSE + if(wrdiff[i] != 0 & wrdiff[i+4] != 0) sequ[i] = FALSE + if(wrdiff[i-1] != 0 & wrdiff[i] != 0) sequ[i] = FALSE + if(wrdiff[i-1] != 0 & wrdiff[i+1] != 0) sequ[i] = FALSE + if(wrdiff[i-1] != 0 & wrdiff[i+2] != 0) sequ[i] = FALSE + if(wrdiff[i-1] != 0 & wrdiff[i+3] != 0) sequ[i] = FALSE + if(wrdiff[i-2] != 0 & wrdiff[i] != 0) sequ[i] = FALSE + if(wrdiff[i-2] != 0 & wrdiff[i+1] != 0) sequ[i] = FALSE + if(wrdiff[i-2] != 0 & wrdiff[i+2] != 0) sequ[i] = FALSE + if(wrdiff[i-3] != 0 & wrdiff[i] != 0) sequ[i] = FALSE + if(wrdiff[i-3] != 0 & wrdiff[i+1] != 0) sequ[i] = FALSE + if(wrdiff[i-4] != 0 & wrdiff[i] != 0) sequ[i] = FALSE + } # close for loop on i + + # sequence of daily cluster numbers without the days that doesn't belong to sequences of 5 or more days: + cluster.sequence <- my.cluster[[period]]$cluster * sequ + rm(wrdiff, sequ) + } else{ + cluster.sequence <- my.cluster[[period]]$cluster + } + + gc() + + #wr1 <- which(my.cluster[[period]]$cluster==1) + #wr2 <- which(my.cluster[[period]]$cluster==2) + #wr3 <- which(my.cluster[[period]]$cluster==3) + #wr4 <- which(my.cluster[[period]]$cluster==4) + + # Replace the days belonging to a regime with the days belonging to a sequence of at least 5 days of that regime: + wr1 <- which(cluster.sequence == 1) + wr2 <- which(cluster.sequence == 2) + wr3 <- which(cluster.sequence == 3) + wr4 <- which(cluster.sequence == 4) + + period.length <- n.days.in.a.period(period,1999) # using year 1999 introduce a small error in winter season because of bisestile years + + # yearly frequency of each regime: + wr1y <- wr2y <- wr3y <-wr4y <- c() + for(y in year.start:year.end){ + #wr1y[y-year.start+1] <- length(which(my.cluster[[period]]$cluster[(y-year.start)*period.length+(1:period.length)] == 1)) + #wr2y[y-year.start+1] <- length(which(my.cluster[[period]]$cluster[(y-year.start)*period.length+(1:period.length)] == 2)) + #wr3y[y-year.start+1] <- length(which(my.cluster[[period]]$cluster[(y-year.start)*period.length+(1:period.length)] == 3)) + #wr4y[y-year.start+1] <- length(which(my.cluster[[period]]$cluster[(y-year.start)*period.length+(1:period.length)] == 4)) + wr1y[y-year.start+1] <- length(which(cluster.sequence[(y-year.start)*period.length+(1:period.length)] == 1)) + wr2y[y-year.start+1] <- length(which(cluster.sequence[(y-year.start)*period.length+(1:period.length)] == 2)) + wr3y[y-year.start+1] <- length(which(cluster.sequence[(y-year.start)*period.length+(1:period.length)] == 3)) + wr4y[y-year.start+1] <- length(which(cluster.sequence[(y-year.start)*period.length+(1:period.length)] == 4)) + } + + rm(cluster.sequence) + gc() + + # convert to frequencies in %: + wr1y <- wr1y/period.length + wr2y <- wr2y/period.length + wr3y <- wr3y/period.length + wr4y <- wr4y/period.length + + varPeriodAnom1 <- varPeriodAnom[wr1,,] + varPeriodAnom2 <- varPeriodAnom[wr2,,] + varPeriodAnom3 <- varPeriodAnom[wr3,,] + varPeriodAnom4 <- varPeriodAnom[wr4,,] + + varPeriodAnom1mean <- apply(varPeriodAnom1,c(2,3),mean,na.rm=T) + varPeriodAnom2mean <- apply(varPeriodAnom2,c(2,3),mean,na.rm=T) + varPeriodAnom3mean <- apply(varPeriodAnom3,c(2,3),mean,na.rm=T) + varPeriodAnom4mean <- apply(varPeriodAnom4,c(2,3),mean,na.rm=T) + + varPeriodAnomBoth1 <- abind(varPeriodAnom, varPeriodAnom1, along = 1) + pvalue1 <- apply(varPeriodAnomBoth1, c(2,3), function(x) t.test(x[1:n.days.period],x[(n.days.period+1):length(x)])$p.value) + rm(varPeriodAnomBoth1, varPeriodAnom1) + gc() + + varPeriodAnomBoth2 <- abind(varPeriodAnom, varPeriodAnom2, along = 1) + pvalue2 <- apply(varPeriodAnomBoth2, c(2,3), function(x) t.test(x[1:n.days.period],x[(n.days.period+1):length(x)])$p.value) + rm(varPeriodAnomBoth2, varPeriodAnom2) + gc() + + varPeriodAnomBoth3 <- abind(varPeriodAnom, varPeriodAnom3, along = 1) + pvalue3 <- apply(varPeriodAnomBoth3, c(2,3), function(x) t.test(x[1:n.days.period],x[(n.days.period+1):length(x)])$p.value) + rm(varPeriodAnomBoth3, varPeriodAnom3) + gc() + + varPeriodAnomBoth4 <- abind(varPeriodAnom, varPeriodAnom4, along = 1) + pvalue4 <- apply(varPeriodAnomBoth4, c(2,3), function(x) t.test(x[1:n.days.period],x[(n.days.period+1):length(x)])$p.value) + rm(varPeriodAnomBoth4, varPeriodAnom4) + + rm(varPeriodAnom) + gc() + + pslPeriod <- psleuFull[days.period,,] + pslPeriodClim <- apply(pslPeriod, c(2,3), mean, na.rm=T) + pslPeriodClim2 <- InsertDim(pslPeriodClim, 1, n.days.period) + pslPeriodAnom <- pslPeriod - pslPeriodClim2 + + rm(pslPeriod, pslPeriodClim, pslPeriodClim2) + gc() + + pslwr1 <- pslPeriodAnom[wr1,,] + pslwr2 <- pslPeriodAnom[wr2,,] + pslwr3 <- pslPeriodAnom[wr3,,] + pslwr4 <- pslPeriodAnom[wr4,,] + rm(pslPeriodAnom) + gc() + + pslwr1mean <- apply(pslwr1,c(2,3),mean,na.rm=T) + pslwr2mean <- apply(pslwr2,c(2,3),mean,na.rm=T) + pslwr3mean <- apply(pslwr3,c(2,3),mean,na.rm=T) + pslwr4mean <- apply(pslwr4,c(2,3),mean,na.rm=T) + + rm(pslwr1,pslwr2,pslwr3,pslwr4) + + # save all the data necessary to redraw the graphs when we know the right regime: + save(workdir,rean.name,var.num,var.name,var.name.full,var.unit,year.start,year.end,period,my.period,lon,lat,pslwr1mean,pslwr2mean,pslwr3mean,pslwr4mean, varPeriodAnom1mean, varPeriodAnom2mean, varPeriodAnom3mean,varPeriodAnom4mean,wr1y,wr2y,wr3y,wr4y,pvalue1,pvalue2,pvalue3,pvalue4,file=paste0(workdir,"/",rean.name,"_",var.name[var.num],"_",my.period[period],"_mapdata.RData")) + + rm(pvalue1,pvalue2,pvalue3,pvalue4) + rm(pslwr1mean,pslwr2mean,pslwr3mean,pslwr4mean) + rm(varPeriodAnom1mean,varPeriodAnom2mean,varPeriodAnom3mean,varPeriodAnom4mean) + gc() + +} # close the for loop on 'period' + + +# run it twice: once, with ordering <- FALSE to look only at the cartography and find visually the right regime names of the four clusters; +# and the second time, to save the ordered cartography: +ordering <- TRUE +as.pdf <- FALSE # choose if you prefer to save results as one file .png for each season, or only 1 pdf with all seasons + +# choose an order to give to the cartography, from top to bottom: +orden <- c("NAO+","NAO-","Blocking","Atl. Ridge") + +regime1.name <- orden[1] +regime2.name <- orden[2] +regime3.name <- orden[3] +regime4.name <- orden[4] + +# breaks and colors of the geopotential fields: + +if(psl == "g500"){ + #my.brks <- c(-300,-199:200,300) # % Mean anomaly of a WR + my.brks <- c(-300,seq(-200,200,10),300) # % Mean anomaly of a WR + my.brks2 <- c(-300,seq(-200,200,10),300) # % Mean anomaly of a WR for the contour lines + #my.cols <- colorRampPalette((brewer.pal(9,"PuBu")))(length(my.brks)-1) + values.to.plot <- c(-200, -100, 0, 100, 200) # values we want to show in the labels +} + +if(psl == "psl"){ + my.brks <- c(-2000,seq(-1400,1400, 200), 2000) # % Mean anomaly of a WR + my.brks2 <- c(-2000,seq(-1400,1400,200), 2000) # % Mean anomaly of a WR for the contour lines + values.to.plot <- c(-1000,-500,0,500,1000) +} + +my.cols <- colorRampPalette(rev(brewer.pal(11,"RdBu")))(length(my.brks)-1) # blue--white--red colors + +# breaks and colors of the impact maps (valid both for Wind speed anomalies and Temperature anomalies): +#my.brks.var <- c(-20,seq(-10,-3,1),seq(-2.9,2.9,0.1),seq(3,10,1),20) # % Mean anomaly of a WR +#my.brks.var <- c(-20,-2,-1.5,-1,-0.5,-0.2,0.2,0.5,1,1.5,2,20) # % Mean anomaly of a WR +my.brks.var <- c(-20,seq(-3,3,0.2),20) # % Mean anomaly of a WR + +#my.cols <- colorRampPalette(as.character(read.csv("/home/Earth/vtorralb/rgbhex.csv",header=F)[,1]))(length(my.brks)-1) # blue--yellow-red colors +my.cols.var <- colorRampPalette(rev(brewer.pal(11,"RdBu")))(length(my.brks.var)-1) # blue--white--red colors + +if(as.pdf)(pdf(file=paste0(workdir,"/",rean.name,"_",var.name[var.num],".pdf"),width=40, height=60)) + +for(period in 13:16){ + load(file=paste0(workdir,"/",rean.name,"_",var.name[var.num],"_",my.period[period],"_mapdata.RData")) + + if(period == 13){ # Winter + cluster3.name="NAO+" + cluster4.name="NAO-" + cluster1.name="Blocking" + cluster2.name="Atl. Ridge" + } + if(period == 14){ + cluster2.name="NAO+" + cluster3.name="NAO-" + cluster1.name="Blocking" + cluster4.name="Atl. Ridge" + } + if(period == 15){ + cluster3.name="NAO+" + cluster2.name="NAO-" + cluster4.name="Blocking" + cluster1.name="Atl. Ridge" + } + if(period == 16){ + cluster4.name="NAO+" + cluster3.name="NAO-" + cluster2.name="Blocking" + cluster1.name="Atl. Ridge" + } + + # assign to each cluster its regime: + if(ordering == FALSE){ + cluster1 <- 1; cluster2 <- 2; cluster3 <- 3; cluster4 <- 4 + } else { + cluster1 <- which(orden == cluster1.name) + cluster2 <- which(orden == cluster2.name) + cluster3 <- which(orden == cluster3.name) + cluster4 <- which(orden == cluster4.name) + } + + assign(paste0("map",cluster1), pslwr1mean) + assign(paste0("map",cluster2), pslwr2mean) + assign(paste0("map",cluster3), pslwr3mean) + assign(paste0("map",cluster4), pslwr4mean) + + assign(paste0("imp",cluster1), varPeriodAnom1mean) + assign(paste0("imp",cluster2), varPeriodAnom2mean) + assign(paste0("imp",cluster3), varPeriodAnom3mean) + assign(paste0("imp",cluster4), varPeriodAnom4mean) + + assign(paste0("sig",cluster1), pvalue1) + assign(paste0("sig",cluster2), pvalue2) + assign(paste0("sig",cluster3), pvalue3) + assign(paste0("sig",cluster4), pvalue4) + + assign(paste0("fre",cluster1), wr1y) + assign(paste0("fre",cluster2), wr2y) + assign(paste0("fre",cluster3), wr3y) + assign(paste0("fre",cluster4), wr4y) + + # Visualize the composition with the 3 graphs for all the 4 regimes: its pressure fields, its impact on var and its frequency series: + if(!as.pdf) png(filename=paste0(workdir,"/",rean.name,"_",var.name[var.num],"_",my.period[period],".png"),width=3000,height=3700) + + layout(matrix(c(1,1,1,1,1,1,1,1,31,32,6,33,2,4,6,7,rep(c(2,4,6,8),8),3,5,6,9,34,35,6,36, + 10,12,6,14,rep(c(10,12,6,15),8),11,13,6,16,37,38,6,39, + 17,19,6,21,rep(c(17,19,6,22),8),18,20,6,23,40,41,6,42, + 24,26,6,28,rep(c(24,26,6,29),8),25,27,6,30,43,43,6,44),47,4,byrow=TRUE), + widths=c(2,2,0.2,2), heights=c(rep(0.2,2),0.3,rep(0.2,10),0.3,rep(0.2,10),0.3,rep(0.2,10),0.3,rep(0.2,12))) + #layout.show(44) + + plot.new(); title(paste("Weather Regimes for",my.period[period],"season (1979-2013). Source:",rean.name), cex.main=9, line=-7) + + #EU <- c(1:41, 130:175) #c(1:54,130:161) # position of long values of Europe only (without the Atlantic Sea and America) + #EU <- c(1:which(lon >= lon.max)[1], which(lon >= lon.min)[1]:length(lon)) # to show the same area used for calculation of anomalies + EU <- c(1:which(lon >= lon.max)[1], (length(lon)-30):length(lon)) # restrict area to continental europe only + + PlotEquiMap(map1, lon, lat, filled.continents = FALSE, brks=my.brks, cols=my.cols, toptitle="", sizetit=1.2, contours=t(map1), brks2=my.brks2, drawleg=F) + #par(mar=c(10.5,14.5,10.5,14.5)) + plot.new(); PlotEquiMap(imp1[,EU], lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, drawleg=F, dots=t(sig1[,EU] < 0.05)) + plot.new(); plot.new(); plot.new() # plot 2 empty graphs + barplot.freq(100*fre1, year.start, year.end, cex.y=2, cex.x=2, freq.max=60) + plot.new(); title("Year", cex.main=2, line=-2) + + PlotEquiMap(map2, lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map2), brks2=my.brks2, drawleg=F) + plot.new(); PlotEquiMap(imp2[,EU], lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, drawleg=F, dots=t(sig2[,EU] < 0.05)) + plot.new(); plot.new(); barplot.freq(100*fre2, year.start, year.end, cex.y=2, cex.x=2, freq.max=60) + plot.new(); title("Year", cex.main=2, line=-2) + + PlotEquiMap(map3, lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map3), brks2=my.brks2, drawleg=F) + plot.new(); PlotEquiMap(imp3[,EU], lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, drawleg=F, dots=t(sig3[,EU] < 0.05)) + plot.new(); plot.new(); barplot.freq(100*fre3, year.start, year.end, cex.y=2, cex.x=2, freq.max=60) + plot.new(); title("Year", cex.main=2, line=-2) + + PlotEquiMap(map4, lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, contours=t(map4), brks2=my.brks2, drawleg=F) + plot.new(); PlotEquiMap(imp4[,EU], lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, drawleg=F, dots=t(sig4[,EU] < 0.05)) + plot.new(); plot.new(); barplot.freq(100*fre4, year.start, year.end, cex.y=2, cex.x=2, freq.max=60) + plot.new(); title("Year", cex.main=2, line=-3) + + # Titles: + plot.new(); title(paste(regime1.name, psl.name, "Anomaly"), cex.main=3.3, line=-4.5) + plot.new(); title(paste(regime1.name, "impact on", var.name.full[var.num]), cex.main=3.3, line=-4.5) + plot.new(); title(paste0(regime1.name, " Frequency (", round(100*mean(fre1),1), "%)"), cex.main=3.3, line=-4.5) + plot.new(); title(paste(regime2.name, psl.name, "Anomaly"), cex.main=3.3, line=-4.5) + plot.new(); title(paste(regime2.name, "impact on", var.name.full[var.num]), cex.main=3.3, line=-4.5) + plot.new(); title(paste0(regime2.name, " Frequency (", round(100*mean(fre2),1), "%)"), cex.main=3.3, line=-4.5) + plot.new(); title(paste(regime3.name, psl.name, "Anomaly"), cex.main=3.3, line=-4.5) + plot.new(); title(paste(regime3.name, "impact on", var.name.full[var.num]), cex.main=3.3, line=-4.5) + plot.new(); title(paste0(regime3.name, " Frequency (", round(100*mean(fre3),1), "%)"), cex.main=3.3, line=-4.5) + plot.new(); title(paste(regime4.name, psl.name, "Anomaly"), cex.main=3.3, line=-5) + plot.new(); title(paste(regime4.name, "impact on", var.name.full[var.num]), cex.main=3.3, line=-4.5) + plot.new(); title(paste0(regime4.name, " Frequency (", round(100*mean(fre4),1), "%)"), cex.main=3.3, line=-4.5) + + # Legends: + + if(var.name[var.num] == "sfcWind") values.to.plot2 <- c(-3,-2,-1,0,1,2,3) + if(var.name[var.num] == "tas") values.to.plot2 <- c(-10,-6,-3,0,3,6,10) + + my.subset <- match(values.to.plot, my.brks) + my.subset2 <- match(values.to.plot2, my.brks.var) + + par(fig=c(0.04, 0.302, 0.717, 0.744), new=TRUE) # syntax fig=c(xmin, xmax, ymin, ymax) + #ColorBar(my.brks, cols=my.cols, vert=FALSE, subsampleg=10, cex=2.5) + #ColorBar2(my.brks, cols=my.cols, vert=FALSE, subsampleg=10, cex=2.5, my.ticks=c(0,10,20,30,40,50,60,70,80), my.labels=my.labels) + ColorBar3(my.brks, cols=my.cols, vert=FALSE, subsampleg=1, cex=2.5, subset=my.subset) + mtext(side=4," m", cex=2.5) + par(fig=c(0.355,0.605,0.718,0.743), new=TRUE) + ColorBar3(my.brks.var, cols=my.cols.var, vert=FALSE, subsampleg=1, cex=2.5, subset=my.subset2) + #ColorBar2(my.brks.var, cols=my.cols.var, vert=FALSE, subsampleg=1, cex=2.5, my.ticks=c(0,10,20,30,40,50,60,70,80), my.labels=my.labels2) + mtext(side=4,paste0(" ",var.unit[var.num]), cex=2.5) + + par(fig=c(0.04,0.3,0.483,0.508), new=TRUE) + ColorBar3(my.brks, cols=my.cols, vert=FALSE, subsampleg=1, cex=2.5, subset=my.subset) + mtext(side=4," m", cex=2.5) + par(fig=c(0.355,0.605,0.484,0.509), new=TRUE) + ColorBar3(my.brks.var, cols=my.cols.var, vert=FALSE, subsampleg=1, cex=2.5, subset=my.subset2) + mtext(side=4,paste0(" ",var.unit[var.num]), cex=2.5) + + par(fig=c(0.04,0.3,0.248,0.273), new=TRUE) + ColorBar3(my.brks, cols=my.cols, vert=FALSE, subsampleg=1, cex=2.5, subset=my.subset) + mtext(side=4," m", cex=2.5) + par(fig=c(0.355,0.605,0.248,0.273), new=TRUE) + ColorBar3(my.brks.var, cols=my.cols.var, vert=FALSE, subsampleg=1, cex=2.5, subset=my.subset2) + mtext(side=4,paste0(" ",var.unit[var.num]), cex=2.5) + + par(fig=c(0.04,0.3,0.013,0.038), new=TRUE) + ColorBar3(my.brks, cols=my.cols, vert=FALSE, subsampleg=1, cex=2.5, subset=my.subset) + mtext(side=4," m", cex=2.5) + par(fig=c(0.355,0.605,0.013,0.038), new=TRUE) + ColorBar3(my.brks.var, cols=my.cols.var, vert=FALSE, subsampleg=1, cex=2.5, subset=my.subset2) + mtext(side=4,paste0(" ",var.unit[var.num]), cex=2.5) + + par(fig=c(0.66,0.67,0.83,0.84), new=TRUE) + mtext("%", cex=3.3) + par(fig=c(0.66,0.67,0.60,0.61), new=TRUE) + mtext("%", cex=3.3) + par(fig=c(0.66,0.67,0.37,0.38), new=TRUE) + mtext("%", cex=3.3) + par(fig=c(0.66,0.67,0.13,0.14), new=TRUE) + mtext("%", cex=3.3) + + if(!as.pdf) dev.off() # for saving 4 png + +} # close for loop on 'period' + +if(as.pdf) dev.off() # for saving a single pdf with all seasons + + + + + + + + + + + + + + + + + + + + + diff --git a/old/weather_regimes_v12.R~ b/old/weather_regimes_v12.R~ new file mode 100644 index 0000000000000000000000000000000000000000..fbf01c77d0a4920d16fd2701c79f767a9ca02d00 --- /dev/null +++ b/old/weather_regimes_v12.R~ @@ -0,0 +1,536 @@ + +library(s2dverification) # for the function Load() +library(abind) +source('/home/Earth/ncortesi/Downloads/scripts/Rfunctions.R') # for the calendar functions + +workdir <- "/scratch/Earth/ncortesi/RESILIENCE/Regimes" # working dir with the input files with the WT classifications for each MSLP grid point + +# available reanalysis for the geopotential (g500) or the geopotential height (z500 = g500/9.81): +ERAint <- list(path = '/esnas/reconstructions/ecmwf/eraint/$STORE_FREQ$_mean/$VAR_NAME$_f6h/$VAR_NAME$_$YEAR$$MONTH$.nc') +ERAint.name <- "ERA-Interim" + +JRA55 <- list(path = '/esnas/reconstructions/jma/jra-55/$STORE_FREQ$_mean/$VAR_NAME$_f6h/$VAR_NAME$_$YEAR$$MONTH$.nc') +JRA55.name <- "JRA-55" + +rean <- ERAint # choose one of the two above reanalysis from where to load the input psl data +rean.name <- ERAint.name + +psl <- "psl" #"g500" # pressure variable to use from the chosen reanalysis +psl.name <- "MSLP" # "Z500" # the name to show in the maps + +# available forecast systems for the var data: +ECMWF_monthly <- list(path = paste0('/esnas/exp/ECMWF/monthly/ensforhc/6hourly/$VAR_NAME$/2014$MONTH$$DAY$00/$VAR_NAME$_$START_DATE$00.nc')) +ECMWF_monthly.name <- "ECMWF-Monthly" + +var.data <- ERAint #ECMWF_monthly # chose a dataset from which to load the input var data (reanalysis or forecasts) +var.data.name <- ERAint.name #ECMWF_monthly.name + +var.num <- 1 # Choose a variable. 1: sfcWind 2: tas + +var.name <- c("sfcWind","tas") # name of the 'predictand' variable of the chosen reanalysis +var.name.full <- c("Wind Speed","Temperature") # full name of the 'predictand' variable to put in the title of the graphs +var.unit <- c("m/s", "ºC") # unit of measure (for drawing color scales) + +year.start <- 1979 #1981 #1994 +year.end <- 2013 #2010 + +sequences=FALSE # set it to TRUE if you want to select only days beloning to sequences of at least 5 consecutive days belonging to the same regime. + +PCA <- FALSE # se to TRUE if you want to apply the PCA before the k-means cluster analysis, FALSE otherwise +variance.explained <- 0.8 # the user can set here the minimum % of variance explained by the PCs (typically 0.8 which is equal to 80%) + +# Coordinates of the box enclosing the study region (the Northern Atlantic in this case): +lat.max <- 81 #81 #80 #70 +lat.min <- 27 #30 #20 #30 +lon.max <- 45 #36 #30 #40 +lon.min <- 274.5 #274.5 #270 #280 # put a positive number here because the geopotential has only positive values of longitud! + +############################################################################################################################# + +# load only 1 day of geopot to detect the minimum and maximum lat and lon values to identify only the North Atlantic +domain <- Load(var = psl, exp = NULL, obs = list(rean), paste0(year.start,'0101'), storefreq = 'daily', leadtimemax = 1, output = 'lonlat', nprocs=1) +n.lat <- length(domain$lat) +n.lon <- length(domain$lon) + +pos.lat.max <- which(abs(domain$lat-lat.max)==min(abs(domain$lat-lat.max))) +pos.lat.min <- which(abs(domain$lat-lat.min)==min(abs(domain$lat-lat.min))) +pos.lon.max <- which(abs(domain$lon-lon.max)==min(abs(domain$lon-lon.max))) +pos.lon.min <- which(abs(domain$lon-lon.min)==min(abs(domain$lon-lon.min))) +pos.lat <- if(pos.lat.min < pos.lat.max) {pos.lat.min:pos.lat.max} else { pos.lat.max:pos.lat.min} +pos.lon <- c(1:pos.lon.max,pos.lon.min:length(domain$lon)) # beware that it only consider the case where the study region cross the meridian of Greenwhich! +n.pos.lat <- length(pos.lat) +n.pos.lon <- length(pos.lon) +lat <- domain$lat[pos.lat] # lat of chosen area only (it is smaller than the whole spatial domain loaded by the data) +lon <- domain$lon[pos.lon] # lon of chosen area only +lat.min.area <- domain$lat[pos.lat.min] # min and max lat/lon of the chosen area only (same as lat.max, but its values correspond to actual values in the lat vector) +lat.max.area <- domain$lat[pos.lat.max] +lon.min.area <- domain$lon[pos.lon.min] +lon.max.area <- domain$lon[pos.lon.max] +if(lat.min >= lat.max) stop("lat.min cannot be greater than lat.max") +#rm(domain) + +# Load psl data: +psleuFull <-array(NA,c(n.days.in.a.yearly.period(year.start,year.end), n.pos.lat, n.pos.lon)) + +for (y in year.start:year.end){ + var <- Load(var = psl, exp = NULL, obs = list(rean), paste0(y,'0101'), storefreq = 'daily', leadtimemax = n.days.in.a.year(y), output = 'lonlat', + latmin = lat.min.area, latmax = lat.max.area, lonmin = lon.min.area, lonmax = lon.max.area) + psleuFull[seq.days.in.a.future.year(year.start, y),,] <- var$obs + rm(var) + gc() +} + +if(psl=="g500") psleuFull <- psleuFull/9.81 # convert geopotential to geopotential height + +# compute the cluster analysis: +my.PCA <- list() +my.cluster <- list() +tot.variance <- list() +n.pcs <- my.cluster <- c() + +for(period in 13:16){ # 1-12: Jan-Dec; 13-16: Winter-Spring-Summer-Autumn; 17: Year + + days.period <- NA + for(y in year.start:year.end) days.period <- c(days.period, n.days.in.a.future.year(year.start, y) + pos.period(y,period)) + days.period <- days.period[-1] # remove the NA at the begin that was introduced only to be able to execure the above command + n.days.period <- length(days.period) + + pslPeriod <- psleuFull[days.period,,] # select only days in the chosen period (i.e: winter) + + # weight the pressure fields based on latitude: + lat.weighted <- cos(lat*pi/180)^0.5 + lat.weighted.array <- InsertDim(InsertDim(lat.weighted,2,n.pos.lon),1,n.days.period) + pslPeriod.weighted <- pslPeriod * lat.weighted.array + rm(lat.weighted.array) + gc() + + pslmat <- pslPeriod.weighted + dim(pslmat) <- c(n.days.period, n.pos.lat*n.pos.lon) # convert array in a matrix! + rm(pslPeriod, pslPeriod.weighted) + gc() + + if(PCA){ # compute the PCs: + my.seq <- seq(1, n.pos.lat*n.pos.lon, 9) # select only 1 point of 9 + pslcut <- pslmat[,my.seq] + + my.PCA[[period]] <- princomp(pslcut,cor=FALSE) + tot.variance[[period]] <- head(cumsum(my.PCA[[period]]$sdev^2/sum(my.PCA[[period]]$sdev^2)),50) # check how many PCAs to keep basing on the sum of their expl. variance + n.pcs[period] <- head(as.numeric(which(tot.variance[[period]] > variance.explained)),1) # select only the pcs that explains at least 80% of variance + my.cluster[[period]] <- kmeans(my.PCA[[period]]$scores[,1:n.pcs[period]], centers=4, iter.max=100, nstart=30) # 4 is the number of clusters + rm(pslcut) + } else { + my.cluster[[period]] <- kmeans(pslmat, centers=4, iter.max=100, nstart=30) # 4 is the number of clusters + } + + rm(pslmat) + gc() +} + +save.image(file=paste0(workdir,"/weather_regimes_",rean.name,"_",year.start,"-",year.end,".RData")) +load(file=paste0(workdir,"/weather_regimes_",rean.name,"_",year.start,"-",year.end,".RData")) + +# Load var data: +vareuFull <-array(NA,c(n.days.in.a.yearly.period(year.start,year.end),n.pos.lat,n.pos.lon)) + +for (y in year.start:year.end){ + var <- Load(var = var.name[var.num], exp = NULL, obs = list(var.data), paste0(y,'0101'), storefreq = 'daily', leadtimemax = n.days.in.a.year(y), output = 'lonlat', + latmin = lat.min.area, latmax = lat.max.area, lonmin = lon.min.area, lonmax = lon.max.area) + vareuFull[seq.days.in.a.future.year(year.start, y),,] <- var$obs + rm(var) + gc() +} + + +# for loading forecasting systems: + +### load once 1 file to retrieve the lat and lon values, then save them in an .RData file to retrieve them when needed: +#coord <- Load(var = 'sfcWind', exp = list(ECMWF_monthly), obs = NULL, sdates=paste0(2013,'0102'), leadtimemin = 1, leadtimemax=1, output = 'lonlat', nprocs=1) + +#my.grid<-paste0('r',n.lon,'x',n.lat) +#coord <- Load(var = 'sfcWind', exp = list(ECMWF_monthly), obs = NULL, sdates=paste0(2013,'0102'), leadtimemin = 1, leadtimemax=1, output = 'lonlat', nprocs=1, grid=my.grid, method='bilinear') + + +#system.time(a<-Load(var = 'sfcWind', exp = list(ECMWF_monthly), obs = NULL, sdates=paste0(2010:2013,'0102'), leadtimemin = 2, leadtimemax=2, output = 'lonlat', nprocs=1, latmin = lat.min.area, latmax = lat.max.area)) + +#my.grid<-paste0('r',n.lon,'x',n.lat) +#data.rean <- Load(var = var.name, exp = 'ERAintEnsWeek', +# obs = NULL, sdates=paste0(yr1.hind:yr2.hind,substr(sdates.seq[startdate],5,6),substr(sdates.seq[startdate],7,8)), +# nleadtime = n.leadtimes, leadtimemin = 1, leadtimemax = n.leadtimes, output = 'lonlat', configfile = file_path, grid=my.grid, method='distance-weighted'#) + + +save.image(file=paste0(workdir,"/weather_regimes_",rean.name,"_",var.name[var.num],"_",year.start,"-",year.end,".RData")) +load(file=paste0(workdir,"/weather_regimes_",rean.name,"_",var.name[var.num],"_",year.start,"-",year.end,".RData")) + +for(period in 13:16){ # 1-12: Jan-Dec; 13-16: Winter-Spring-Summer-Autumn; 17: Year + days.period <- NA + for(y in year.start:year.end) days.period <- c(days.period, n.days.in.a.future.year(year.start, y) + pos.period(y,period)) + days.period <- days.period[-1] # remove the NA at the begin that was introduced only to be able to execure the above command + n.days.period <- length(days.period) + + varPeriod <- vareuFull[days.period,,] # select only var data during the chosen period + + varPeriodClim <- apply(varPeriod, c(2,3), mean, na.rm=T) + varPeriodClim2 <- InsertDim(varPeriodClim, 1, n.days.period) + + varPeriodAnom <- varPeriod - varPeriodClim2 + rm(varPeriod, varPeriodClim, varPeriodClim2) + gc() + + if(sequences){ + # for each of the 4 clusters, remove the days not belonging to sequences of at least 5 days: + # warning: first 4 days and last 4 days are not take into account y the algorithm and are treated as not belonging to any sequence (sequ=FALSE) + wrdiff <- diff(my.cluster[[period]]$cluster) + + sequ <- rep(FALSE, n.days.period) + for(i in 5:(n.days.period-5)){ + sequ[i] <- TRUE + if(wrdiff[i] != 0 & wrdiff[i+1] != 0) sequ[i] = FALSE + if(wrdiff[i] != 0 & wrdiff[i+2] != 0) sequ[i] = FALSE + if(wrdiff[i] != 0 & wrdiff[i+3] != 0) sequ[i] = FALSE + if(wrdiff[i] != 0 & wrdiff[i+4] != 0) sequ[i] = FALSE + if(wrdiff[i-1] != 0 & wrdiff[i] != 0) sequ[i] = FALSE + if(wrdiff[i-1] != 0 & wrdiff[i+1] != 0) sequ[i] = FALSE + if(wrdiff[i-1] != 0 & wrdiff[i+2] != 0) sequ[i] = FALSE + if(wrdiff[i-1] != 0 & wrdiff[i+3] != 0) sequ[i] = FALSE + if(wrdiff[i-2] != 0 & wrdiff[i] != 0) sequ[i] = FALSE + if(wrdiff[i-2] != 0 & wrdiff[i+1] != 0) sequ[i] = FALSE + if(wrdiff[i-2] != 0 & wrdiff[i+2] != 0) sequ[i] = FALSE + if(wrdiff[i-3] != 0 & wrdiff[i] != 0) sequ[i] = FALSE + if(wrdiff[i-3] != 0 & wrdiff[i+1] != 0) sequ[i] = FALSE + if(wrdiff[i-4] != 0 & wrdiff[i] != 0) sequ[i] = FALSE + } # close for loop on i + + # sequence of daily cluster numbers without the days that doesn't belong to sequences of 5 or more days: + cluster.sequence <- my.cluster[[period]]$cluster * sequ + rm(wrdiff, sequ) + } else{ + cluster.sequence <- my.cluster[[period]]$cluster + } + + gc() + + #wr1 <- which(my.cluster[[period]]$cluster==1) + #wr2 <- which(my.cluster[[period]]$cluster==2) + #wr3 <- which(my.cluster[[period]]$cluster==3) + #wr4 <- which(my.cluster[[period]]$cluster==4) + + # Replace the days belonging to a regime with the days belonging to a sequence of at least 5 days of that regime: + wr1 <- which(cluster.sequence == 1) + wr2 <- which(cluster.sequence == 2) + wr3 <- which(cluster.sequence == 3) + wr4 <- which(cluster.sequence == 4) + + period.length <- n.days.in.a.period(period,1999) # using year 1999 introduce a small error in winter season because of bisestile years + + # yearly frequency of each regime: + wr1y <- wr2y <- wr3y <-wr4y <- c() + for(y in year.start:year.end){ + #wr1y[y-year.start+1] <- length(which(my.cluster[[period]]$cluster[(y-year.start)*period.length+(1:period.length)] == 1)) + #wr2y[y-year.start+1] <- length(which(my.cluster[[period]]$cluster[(y-year.start)*period.length+(1:period.length)] == 2)) + #wr3y[y-year.start+1] <- length(which(my.cluster[[period]]$cluster[(y-year.start)*period.length+(1:period.length)] == 3)) + #wr4y[y-year.start+1] <- length(which(my.cluster[[period]]$cluster[(y-year.start)*period.length+(1:period.length)] == 4)) + wr1y[y-year.start+1] <- length(which(cluster.sequence[(y-year.start)*period.length+(1:period.length)] == 1)) + wr2y[y-year.start+1] <- length(which(cluster.sequence[(y-year.start)*period.length+(1:period.length)] == 2)) + wr3y[y-year.start+1] <- length(which(cluster.sequence[(y-year.start)*period.length+(1:period.length)] == 3)) + wr4y[y-year.start+1] <- length(which(cluster.sequence[(y-year.start)*period.length+(1:period.length)] == 4)) + } + + rm(cluster.sequence) + gc() + + # convert to frequencies in %: + wr1y <- wr1y/period.length + wr2y <- wr2y/period.length + wr3y <- wr3y/period.length + wr4y <- wr4y/period.length + + varPeriodAnom1 <- varPeriodAnom[wr1,,] + varPeriodAnom2 <- varPeriodAnom[wr2,,] + varPeriodAnom3 <- varPeriodAnom[wr3,,] + varPeriodAnom4 <- varPeriodAnom[wr4,,] + + varPeriodAnom1mean <- apply(varPeriodAnom1,c(2,3),mean,na.rm=T) + varPeriodAnom2mean <- apply(varPeriodAnom2,c(2,3),mean,na.rm=T) + varPeriodAnom3mean <- apply(varPeriodAnom3,c(2,3),mean,na.rm=T) + varPeriodAnom4mean <- apply(varPeriodAnom4,c(2,3),mean,na.rm=T) + + varPeriodAnomBoth1 <- abind(varPeriodAnom, varPeriodAnom1, along = 1) + pvalue1 <- apply(varPeriodAnomBoth1, c(2,3), function(x) t.test(x[1:n.days.period],x[(n.days.period+1):length(x)])$p.value) + rm(varPeriodAnomBoth1, varPeriodAnom1) + gc() + + varPeriodAnomBoth2 <- abind(varPeriodAnom, varPeriodAnom2, along = 1) + pvalue2 <- apply(varPeriodAnomBoth2, c(2,3), function(x) t.test(x[1:n.days.period],x[(n.days.period+1):length(x)])$p.value) + rm(varPeriodAnomBoth2, varPeriodAnom2) + gc() + + varPeriodAnomBoth3 <- abind(varPeriodAnom, varPeriodAnom3, along = 1) + pvalue3 <- apply(varPeriodAnomBoth3, c(2,3), function(x) t.test(x[1:n.days.period],x[(n.days.period+1):length(x)])$p.value) + rm(varPeriodAnomBoth3, varPeriodAnom3) + gc() + + varPeriodAnomBoth4 <- abind(varPeriodAnom, varPeriodAnom4, along = 1) + pvalue4 <- apply(varPeriodAnomBoth4, c(2,3), function(x) t.test(x[1:n.days.period],x[(n.days.period+1):length(x)])$p.value) + rm(varPeriodAnomBoth4, varPeriodAnom4) + + rm(varPeriodAnom) + gc() + + pslPeriod <- psleuFull[days.period,,] + pslPeriodClim <- apply(pslPeriod, c(2,3), mean, na.rm=T) + pslPeriodClim2 <- InsertDim(pslPeriodClim, 1, n.days.period) + pslPeriodAnom <- pslPeriod - pslPeriodClim2 + + rm(pslPeriod, pslPeriodClim, pslPeriodClim2) + gc() + + pslwr1 <- pslPeriodAnom[wr1,,] + pslwr2 <- pslPeriodAnom[wr2,,] + pslwr3 <- pslPeriodAnom[wr3,,] + pslwr4 <- pslPeriodAnom[wr4,,] + rm(pslPeriodAnom) + gc() + + pslwr1mean <- apply(pslwr1,c(2,3),mean,na.rm=T) + pslwr2mean <- apply(pslwr2,c(2,3),mean,na.rm=T) + pslwr3mean <- apply(pslwr3,c(2,3),mean,na.rm=T) + pslwr4mean <- apply(pslwr4,c(2,3),mean,na.rm=T) + + rm(pslwr1,pslwr2,pslwr3,pslwr4) + + # save all the data necessary to redraw the graphs when we know the right regime: + save(workdir,rean.name,var.num,var.name,var.name.full,var.unit,year.start,year.end,period,my.period,lon,lat,pslwr1mean,pslwr2mean,pslwr3mean,pslwr4mean, varPeriodAnom1mean, varPeriodAnom2mean, varPeriodAnom3mean,varPeriodAnom4mean,wr1y,wr2y,wr3y,wr4y,pvalue1,pvalue2,pvalue3,pvalue4,file=paste0(workdir,"/",rean.name,"_",var.name[var.num],"_",my.period[period],"_mapdata.RData")) + + rm(pvalue1,pvalue2,pvalue3,pvalue4) + rm(pslwr1mean,pslwr2mean,pslwr3mean,pslwr4mean) + rm(varPeriodAnom1mean,varPeriodAnom2mean,varPeriodAnom3mean,varPeriodAnom4mean) + gc() + +} # close the for loop on 'period' + + +# run it twice: once, with ordering <- FALSE to look only at the cartography and find visually the right regime names of the four clusters; +# and the second time, to save the ordered cartography: +ordering <- TRUE +as.pdf <- FALSE # choose if you prefer to save results as one file .png for each season, or only 1 pdf with all seasons + +# choose an order to give to the cartography, from top to bottom: +orden <- c("NAO+","NAO-","Blocking","Atl. Ridge") + +regime1.name <- orden[1] +regime2.name <- orden[2] +regime3.name <- orden[3] +regime4.name <- orden[4] + +# breaks and colors of the geopotential fields: + +if(psl == "g500"){ + #my.brks <- c(-300,-199:200,300) # % Mean anomaly of a WR + my.brks <- c(-300,seq(-200,200,10),300) # % Mean anomaly of a WR + my.brks2 <- c(-300,seq(-200,200,10),300) # % Mean anomaly of a WR for the contour lines + #my.cols <- colorRampPalette((brewer.pal(9,"PuBu")))(length(my.brks)-1) + values.to.plot <- c(-200, -100, 0, 100, 200) # values we want to show in the labels +} + +if(psl == "psl"){ + my.brks <- c(-2000,seq(-1400,1400, 200), 2000) # % Mean anomaly of a WR + my.brks2 <- c(-2000,seq(-1400,1400,200), 2000) # % Mean anomaly of a WR for the contour lines + values.to.plot <- c(-1000,-500,0,500,1000) +} + +my.cols <- colorRampPalette(rev(brewer.pal(11,"RdBu")))(length(my.brks)-1) # blue--white--red colors + +# breaks and colors of the impact maps (valid both for Wind speed anomalies and Temperature anomalies): +#my.brks.var <- c(-20,seq(-10,-3,1),seq(-2.9,2.9,0.1),seq(3,10,1),20) # % Mean anomaly of a WR +#my.brks.var <- c(-20,-2,-1.5,-1,-0.5,-0.2,0.2,0.5,1,1.5,2,20) # % Mean anomaly of a WR +my.brks.var <- c(-20,seq(-3,3,0.2),20) # % Mean anomaly of a WR + +#my.cols <- colorRampPalette(as.character(read.csv("/home/Earth/vtorralb/rgbhex.csv",header=F)[,1]))(length(my.brks)-1) # blue--yellow-red colors +my.cols.var <- colorRampPalette(rev(brewer.pal(11,"RdBu")))(length(my.brks.var)-1) # blue--white--red colors + +if(as.pdf)(pdf(file=paste0(workdir,"/",rean.name,"_",var.name[var.num],".pdf"),width=40, height=60)) + +for(period in 13:16){ + load(file=paste0(workdir,"/",rean.name,"_",var.name[var.num],"_",my.period[period],"_mapdata.RData")) + + if(period == 13){ # Winter + cluster3.name="NAO+" + cluster4.name="NAO-" + cluster1.name="Blocking" + cluster2.name="Atl. Ridge" + } + if(period == 14){ + cluster2.name="NAO+" + cluster3.name="NAO-" + cluster1.name="Blocking" + cluster4.name="Atl. Ridge" + } + if(period == 15){ + cluster3.name="NAO+" + cluster2.name="NAO-" + cluster4.name="Blocking" + cluster1.name="Atl. Ridge" + } + if(period == 16){ + cluster4.name="NAO+" + cluster3.name="NAO-" + cluster2.name="Blocking" + cluster1.name="Atl. Ridge" + } + + # assign to each cluster its regime: + if(ordering == FALSE){ + cluster1 <- 1; cluster2 <- 2; cluster3 <- 3; cluster4 <- 4 + } else { + cluster1 <- which(orden == cluster1.name) + cluster2 <- which(orden == cluster2.name) + cluster3 <- which(orden == cluster3.name) + cluster4 <- which(orden == cluster4.name) + } + + assign(paste0("map",cluster1), pslwr1mean) + assign(paste0("map",cluster2), pslwr2mean) + assign(paste0("map",cluster3), pslwr3mean) + assign(paste0("map",cluster4), pslwr4mean) + + assign(paste0("imp",cluster1), varPeriodAnom1mean) + assign(paste0("imp",cluster2), varPeriodAnom2mean) + assign(paste0("imp",cluster3), varPeriodAnom3mean) + assign(paste0("imp",cluster4), varPeriodAnom4mean) + + assign(paste0("sig",cluster1), pvalue1) + assign(paste0("sig",cluster2), pvalue2) + assign(paste0("sig",cluster3), pvalue3) + assign(paste0("sig",cluster4), pvalue4) + + assign(paste0("fre",cluster1), wr1y) + assign(paste0("fre",cluster2), wr2y) + assign(paste0("fre",cluster3), wr3y) + assign(paste0("fre",cluster4), wr4y) + + # Visualize the composition with the 3 graphs for all the 4 regimes: its pressure fields, its impact on var and its frequency series: + if(!as.pdf) png(filename=paste0(workdir,"/",rean.name,"_",var.name[var.num],"_",my.period[period],".png"),width=3000,height=3700) + + layout(matrix(c(1,1,1,1,1,1,1,1,31,32,6,33,2,4,6,7,rep(c(2,4,6,8),8),3,5,6,9,34,35,6,36, + 10,12,6,14,rep(c(10,12,6,15),8),11,13,6,16,37,38,6,39, + 17,19,6,21,rep(c(17,19,6,22),8),18,20,6,23,40,41,6,42, + 24,26,6,28,rep(c(24,26,6,29),8),25,27,6,30,43,43,6,44),47,4,byrow=TRUE), + widths=c(2,2,0.2,2), heights=c(rep(0.2,2),0.3,rep(0.2,10),0.3,rep(0.2,10),0.3,rep(0.2,10),0.3,rep(0.2,12))) + #layout.show(44) + + plot.new(); title(paste("Weather Regimes for",my.period[period],"season (1979-2013). Source:",rean.name), cex.main=9, line=-7) + + #EU <- c(1:41, 130:175) #c(1:54,130:161) # position of long values of Europe only (without the Atlantic Sea and America) + #EU <- c(1:which(lon >= lon.max)[1], which(lon >= lon.min)[1]:length(lon)) # to show the same area used for calculation of anomalies + EU <- c(1:which(lon >= lon.max)[1], (length(lon)-30):length(lon)) # restrict area to continental europe only + + PlotEquiMap(map1, lon, lat, filled.continents = FALSE, brks=my.brks, cols=my.cols, toptitle="", sizetit=1.2, contours=t(map1), brks2=my.brks2, drawleg=F) + #par(mar=c(10.5,14.5,10.5,14.5)) + plot.new(); PlotEquiMap(imp1[,EU], lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, drawleg=F, dots=t(sig1[,EU] < 0.05)) + plot.new(); plot.new(); plot.new() # plot 2 empty graphs + barplot.freq(100*fre1, year.start, year.end, cex.y=2, cex.x=2, freq.max=60) + plot.new(); title("Year", cex.main=2, line=-2) + + PlotEquiMap(map2, lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map2), brks2=my.brks2, drawleg=F) + plot.new(); PlotEquiMap(imp2[,EU], lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, drawleg=F, dots=t(sig2[,EU] < 0.05)) + plot.new(); plot.new(); barplot.freq(100*fre2, year.start, year.end, cex.y=2, cex.x=2, freq.max=60) + plot.new(); title("Year", cex.main=2, line=-2) + + PlotEquiMap(map3, lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map3), brks2=my.brks2, drawleg=F) + plot.new(); PlotEquiMap(imp3[,EU], lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, drawleg=F, dots=t(sig3[,EU] < 0.05)) + plot.new(); plot.new(); barplot.freq(100*fre3, year.start, year.end, cex.y=2, cex.x=2, freq.max=60) + plot.new(); title("Year", cex.main=2, line=-2) + + PlotEquiMap(map4, lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, contours=t(map4), brks2=my.brks2, drawleg=F) + plot.new(); PlotEquiMap(imp4[,EU], lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, drawleg=F, dots=t(sig4[,EU] < 0.05)) + plot.new(); plot.new(); barplot.freq(100*fre4, year.start, year.end, cex.y=2, cex.x=2, freq.max=60) + plot.new(); title("Year", cex.main=2, line=-3) + + # Titles: + plot.new(); title(paste(regime1.name, psl.name, "Anomaly"), cex.main=3.3, line=-4.5) + plot.new(); title(paste(regime1.name, "impact on", var.name.full[var.num]), cex.main=3.3, line=-4.5) + plot.new(); title(paste0(regime1.name, " Frequency (", round(100*mean(fre1),1), "%)"), cex.main=3.3, line=-4.5) + plot.new(); title(paste(regime2.name, psl.name, "Anomaly"), cex.main=3.3, line=-4.5) + plot.new(); title(paste(regime2.name, "impact on", var.name.full[var.num]), cex.main=3.3, line=-4.5) + plot.new(); title(paste0(regime2.name, " Frequency (", round(100*mean(fre2),1), "%)"), cex.main=3.3, line=-4.5) + plot.new(); title(paste(regime3.name, psl.name, "Anomaly"), cex.main=3.3, line=-4.5) + plot.new(); title(paste(regime3.name, "impact on", var.name.full[var.num]), cex.main=3.3, line=-4.5) + plot.new(); title(paste0(regime3.name, " Frequency (", round(100*mean(fre3),1), "%)"), cex.main=3.3, line=-4.5) + plot.new(); title(paste(regime4.name, psl.name, "Anomaly"), cex.main=3.3, line=-5) + plot.new(); title(paste(regime4.name, "impact on", var.name.full[var.num]), cex.main=3.3, line=-4.5) + plot.new(); title(paste0(regime4.name, " Frequency (", round(100*mean(fre4),1), "%)"), cex.main=3.3, line=-4.5) + + # Legends: + + if(var.name[var.num] == "sfcWind") values.to.plot2 <- c(-3,-2,-1,0,1,2,3) + if(var.name[var.num] == "tas") values.to.plot2 <- c(-10,-6,-3,0,3,6,10) + + my.subset <- match(values.to.plot, my.brks) + my.subset2 <- match(values.to.plot2, my.brks.var) + + par(fig=c(0.04, 0.302, 0.717, 0.744), new=TRUE) # syntax fig=c(xmin, xmax, ymin, ymax) + #ColorBar(my.brks, cols=my.cols, vert=FALSE, subsampleg=10, cex=2.5) + #ColorBar2(my.brks, cols=my.cols, vert=FALSE, subsampleg=10, cex=2.5, my.ticks=c(0,10,20,30,40,50,60,70,80), my.labels=my.labels) + ColorBar3(my.brks, cols=my.cols, vert=FALSE, subsampleg=1, cex=2.5, subset=my.subset) + mtext(side=4," m", cex=2.5) + par(fig=c(0.355,0.605,0.718,0.743), new=TRUE) + ColorBar3(my.brks.var, cols=my.cols.var, vert=FALSE, subsampleg=1, cex=2.5, subset=my.subset2) + #ColorBar2(my.brks.var, cols=my.cols.var, vert=FALSE, subsampleg=1, cex=2.5, my.ticks=c(0,10,20,30,40,50,60,70,80), my.labels=my.labels2) + mtext(side=4,paste0(" ",var.unit[var.num]), cex=2.5) + + par(fig=c(0.04,0.3,0.483,0.508), new=TRUE) + ColorBar3(my.brks, cols=my.cols, vert=FALSE, subsampleg=1, cex=2.5, subset=my.subset) + mtext(side=4," m", cex=2.5) + par(fig=c(0.355,0.605,0.484,0.509), new=TRUE) + ColorBar3(my.brks.var, cols=my.cols.var, vert=FALSE, subsampleg=1, cex=2.5, subset=my.subset2) + mtext(side=4,paste0(" ",var.unit[var.num]), cex=2.5) + + par(fig=c(0.04,0.3,0.248,0.273), new=TRUE) + ColorBar3(my.brks, cols=my.cols, vert=FALSE, subsampleg=1, cex=2.5, subset=my.subset) + mtext(side=4," m", cex=2.5) + par(fig=c(0.355,0.605,0.248,0.273), new=TRUE) + ColorBar3(my.brks.var, cols=my.cols.var, vert=FALSE, subsampleg=1, cex=2.5, subset=my.subset2) + mtext(side=4,paste0(" ",var.unit[var.num]), cex=2.5) + + par(fig=c(0.04,0.3,0.013,0.038), new=TRUE) + ColorBar3(my.brks, cols=my.cols, vert=FALSE, subsampleg=1, cex=2.5, subset=my.subset) + mtext(side=4," m", cex=2.5) + par(fig=c(0.355,0.605,0.013,0.038), new=TRUE) + ColorBar3(my.brks.var, cols=my.cols.var, vert=FALSE, subsampleg=1, cex=2.5, subset=my.subset2) + mtext(side=4,paste0(" ",var.unit[var.num]), cex=2.5) + + par(fig=c(0.66,0.67,0.83,0.84), new=TRUE) + mtext("%", cex=3.3) + par(fig=c(0.66,0.67,0.60,0.61), new=TRUE) + mtext("%", cex=3.3) + par(fig=c(0.66,0.67,0.37,0.38), new=TRUE) + mtext("%", cex=3.3) + par(fig=c(0.66,0.67,0.13,0.14), new=TRUE) + mtext("%", cex=3.3) + + if(!as.pdf) dev.off() # for saving 4 png + +} # close for loop on 'period' + +if(as.pdf) dev.off() # for saving a single pdf with all seasons + + + + + + + + + + + + + + + + + + + + + diff --git a/old/weather_regimes_v13.R b/old/weather_regimes_v13.R new file mode 100644 index 0000000000000000000000000000000000000000..193de74752767b3d0f40eac8ab273be5778b11c2 --- /dev/null +++ b/old/weather_regimes_v13.R @@ -0,0 +1,588 @@ + +library(s2dverification) # for the function Load() +library(abind) +source('/home/Earth/ncortesi/Downloads/scripts/Rfunctions.R') # for the calendar functions + +workdir <- "/scratch/Earth/ncortesi/RESILIENCE/Regimes" # working dir with the input files with the WT classifications for each MSLP grid point + +# available reanalysis for the geopotential (g500) or the geopotential height (z500 = g500/9.81): +ERAint <- list(path = '/esnas/reconstructions/ecmwf/eraint/$STORE_FREQ$_mean/$VAR_NAME$_f6h/$VAR_NAME$_$YEAR$$MONTH$.nc') +ERAint.name <- "ERA-Interim" + +JRA55 <- list(path = '/esnas/reconstructions/jma/jra-55/$STORE_FREQ$_mean/$VAR_NAME$_f6h/$VAR_NAME$_$YEAR$$MONTH$.nc') +JRA55.name <- "JRA-55" + +rean <- JRA55 # choose one of the two above reanalysis from where to load the input psl data +rean.name <- JRA55.name + +# available forecast systems for the var data: +ECMWF_monthly <- list(path = paste0('/esnas/exp/ECMWF/monthly/ensforhc/6hourly/$VAR_NAME$/2014$MONTH$$DAY$00/$VAR_NAME$_$START_DATE$00.nc')) +ECMWF_monthly.name <- "ECMWF-Monthly" + +forecast <- ECMWF_monthly +forecast.name <- ECMWF_monthly.name + +fields <- forecast # put 'rean' to load pressure fields and var from reanalisis, put 'forecast' to load them from forecast systems +fields.name <- forecast.name + +year.start <- 1994 #1979 #1981 #1994 +year.end <- 2013 #2010 + +leadtime <- 1 # if fields=forecast, set the forecast time (in days) you want to compute the weather regimes. +forecast.year <- year.end+1 # if fields=forecast, set the forecast year (it is usually the year after year.end) +mes=1 # if fields=forecast, set the month of the first startdate of the forecast year +day=2 # if fields=forecast, set the day of the first startdate of the forecast year + +psl <- "psl" #"g500" # pressure variable to use from the chosen reanalysis +psl.name <- "MSLP" # "Z500" # the name to show in the maps + +var.data <- ECMWF_monthly # ERAint # chose a dataset from which to load the input var data (reanalysis or forecasts) +var.data.name <- ECMWF_monthly.name # ERAint.name + +var.num <- 1 # Choose a variable. 1: sfcWind 2: tas + +var.name <- c("sfcWind","tas") # name of the 'predictand' variable of the chosen reanalysis +var.name.full <- c("Wind Speed","Temperature") # full name of the 'predictand' variable to put in the title of the graphs +var.unit <- c("m/s", "ºC") # unit of measure (for drawing color scales) + +sequences=FALSE # set it to TRUE if you want to select only days beloning to sequences of at least 5 consecutive days belonging to the same regime. + +PCA <- FALSE # se to TRUE if you want to apply the PCA before the k-means cluster analysis, FALSE otherwise +variance.explained <- 0.8 # the user can set here the minimum % of variance explained by the PCs (typically 0.8 which is equal to 80%) + +# Coordinates of the box enclosing the study region (the Northern Atlantic in this case): +lat.max <- 81 #81 #80 #70 +lat.min <- 27 #30 #20 #30 +lon.max <- 45 #36 #30 #40 +lon.min <- 274.5 #274.5 #270 #280 # put a positive number here because the geopotential has only positive values of longitud! + +############################################################################################################################# +#ECMWF_monthly <- list(path = paste0('/esnas/exp/ECMWF/monthly/ensforhc/6hourly/psl/2014010200/1994_010200.nc')) +#ECMWF_monthly <- list(path = paste0('/esnas/exp/ECMWF/monthly/ensforhc/6hourly/$VAR_NAME$/2014$MONTH$$DAY$00/$VAR_NAME$_$START_DATE$00.nc')) +if(lat.min >= lat.max) stop("lat.min cannot be greater than lat.max") + +# load only 1 day of pressure data to detect the minimum and maximum lat and lon values which identify only the North Atlantic area: +domain <- Load(var = psl, exp = NULL, obs = list(fields), paste0(year.start,'0102'), storefreq = 'daily', leadtimemax = 1, output = 'lonlat', nprocs=1) +n.lat <- length(domain$lat) +n.lon <- length(domain$lon) + +pos.lat.max <- which(lat.max - round(domain$lat,4) >= 0)[1] # select the first latitude of the domain below the maximum latitude +pos.lat.min <- tail(which(round(domain$lat,4) - lat.min >= 0),1) # select the last latitude of the domain over the minumum latitude +pos.lon.max <- tail(which(lon.max - round(domain$lon,4) >= 0),1) +pos.lon.min <- which(round(domain$lon,4) - lon.min >= 0)[1] + +pos.lat <- if(pos.lat.min < pos.lat.max) {pos.lat.min:pos.lat.max} else {pos.lat.max:pos.lat.min} +pos.lon <- c(1:pos.lon.max,pos.lon.min:length(domain$lon)) # beware that it only consider the case where the study area cross the meridian of Greenwhich! +n.pos.lat <- length(pos.lat) +n.pos.lon <- length(pos.lon) + +lat <- domain$lat[pos.lat] # lat values of chosen area only (it is smaller than the whole spatial domain loaded by the data) +lon <- domain$lon[pos.lon] # lon values of chosen area only + +lat.min.area <- domain$lat[pos.lat.min] # min and max lat/lon of the chosen area only (same as lat.max, but its values correspond to actual values in the lat vector) +lat.max.area <- domain$lat[pos.lat.max] +lon.min.area <- domain$lon[pos.lon.min] +lon.max.area <- domain$lon[pos.lon.max] + +#rm(domain) + +# Load psl data: +if(unlist(fields) == unlist(rean)){ # Load daily psl data in the reanalysis case: + psleuFull <-array(NA,c(n.days.in.a.yearly.period(year.start,year.end), n.pos.lat, n.pos.lon)) + + for (y in year.start:year.end){ + var <- Load(var = psl, exp = NULL, obs = list(fields), paste0(y,'0101'), storefreq = 'daily', leadtimemax = n.days.in.a.year(y), output = 'lonlat', + latmin = lat.min, latmax = lat.max, lonmin = lon.min, lonmax = lon.max) + psleuFull[seq.days.in.a.future.year(year.start, y),,] <- var$obs + rm(var) + gc() + } + +} else { # Load 6-hourly psl data in the forecast case: + sdates.seq <- weekly.seq(forecast.year,mes,day) + psleuFull <-array(NA,c(length(sdates.seq)*(year.end-year.start+1), n.pos.lat, n.pos.lon)) + + for (startdate in 1:length(sdates.seq)){ + var <- Load(var = psl, exp = NULL, obs = list(fields), paste0(year.start:year.end, substr(sdates.seq[startdate],5,6), substr(sdates.seq[startdate],7,8) ), storefreq = 'daily', leadtimemin=leadtime*4-3, leadtimemax=leadtime*4, output = 'lonlat', latmin = lat.min, latmax = lat.max, lonmin = lon.min, lonmax = lon.max) + temp <- apply(var$obs, c(1,3,5,6), mean, na.rm=T) + psleuFull[(1+(startdate-1)*(year.end-year.start+1)):(startdate*(year.end-year.start+1)),,] <- temp + rm(temp,var) + gc() + } +} + +if(psl=="g500") psleuFull <- psleuFull/9.81 # convert geopotential to geopotential height + +# compute the cluster analysis: +my.PCA <- list() +my.cluster <- list() +tot.variance <- list() +n.pcs <- my.cluster <- c() + +for(period in 13:16){ # 1-12: Jan-Dec; 13-16: Winter-Spring-Summer-Autumn; 17: Year + if(unlist(fields) == unlist(rean)){ + days.period <- NA + for(y in year.start:year.end) days.period <- c(days.period, n.days.in.a.future.year(year.start, y) + pos.period(y,period)) + days.period <- days.period[-1] # remove the NA at the begin that was introduced only to be able to execure the above command + } else { + my.startdates.period <- months.period(forecast.year,mes,day,period) + + days.period <- c() + for(startdate in my.startdates.period){ + days.period <- c(days.period, (1+(startdate-1)*(year.end-year.start+1)):(startdate*(year.end-year.start+1))) + } + } + + n.days.period <- length(days.period) + + pslPeriod <- psleuFull[days.period,,] # select only days in the chosen period (i.e: winter) + + # weight the pressure fields based on latitude: + lat.weighted <- cos(lat*pi/180)^0.5 + lat.weighted.array <- InsertDim(InsertDim(lat.weighted,2,n.pos.lon),1,n.days.period) + pslPeriod.weighted <- pslPeriod * lat.weighted.array + rm(lat.weighted.array) + gc() + + pslmat <- pslPeriod.weighted + dim(pslmat) <- c(n.days.period, n.pos.lat*n.pos.lon) # convert array in a matrix! + rm(pslPeriod, pslPeriod.weighted) + gc() + + if(PCA){ # compute the PCs: + my.seq <- seq(1, n.pos.lat*n.pos.lon, 9) # select only 1 point of 9 + pslcut <- pslmat[,my.seq] + + my.PCA[[period]] <- princomp(pslcut,cor=FALSE) + tot.variance[[period]] <- head(cumsum(my.PCA[[period]]$sdev^2/sum(my.PCA[[period]]$sdev^2)),50) # check how many PCAs to keep basing on the sum of their expl. variance + n.pcs[period] <- head(as.numeric(which(tot.variance[[period]] > variance.explained)),1) # select only the pcs that explains at least 80% of variance + my.cluster[[period]] <- kmeans(my.PCA[[period]]$scores[,1:n.pcs[period]], centers=4, iter.max=100, nstart=30) # 4 is the number of clusters + rm(pslcut) + } else { + my.cluster[[period]] <- kmeans(pslmat, centers=4, iter.max=100, nstart=30) # 4 is the number of clusters + } + + rm(pslmat) + gc() +} + +save.image(file=paste0(workdir,"/weather_regimes_",fields.name,"_",year.start,"-",year.end,".RData")) +load(file=paste0(workdir,"/weather_regimes_",fields.name,"_",year.start,"-",year.end,".RData")) + +# Load var data: +if(unlist(fields) == unlist(rean)){ # Load daily var data in the reanalysis case: + vareuFull <-array(NA,c(n.days.in.a.yearly.period(year.start,year.end), n.pos.lat, n.pos.lon)) + + for (y in year.start:year.end){ + var <- Load(var = var.name[var.num], exp = NULL, obs = list(var.data), paste0(y,'0101'), storefreq = 'daily', leadtimemax = n.days.in.a.year(y), output = 'lonlat', + latmin = lat.min, latmax = lat.max, lonmin = lon.min, lonmax = lon.max) + vareuFull[seq.days.in.a.future.year(year.start, y),,] <- var$obs + rm(var) + gc() + } + +} else { # Load 6-hourly var data in the forecast case: + sdates.seq <- weekly.seq(forecast.year,mes,day) + vareuFull <-array(NA,c(length(sdates.seq)*(year.end-year.start+1), n.pos.lat, n.pos.lon)) + + for (startdate in 1:length(sdates.seq)){ + var <- Load(var = var.name[var.num], exp = NULL, obs = list(var.data), paste0(year.start:year.end, substr(sdates.seq[startdate],5,6), substr(sdates.seq[startdate],7,8) ), storefreq = 'daily', leadtimemin=leadtime*4-3, leadtimemax=leadtime*4, output = 'lonlat', latmin = lat.min, latmax = lat.max, lonmin = lon.min, lonmax = lon.max) + temp <- apply(var$obs, c(1,3,5,6), mean, na.rm=T) + vareuFull[(1+(startdate-1)*(year.end-year.start+1)):(startdate*(year.end-year.start+1)),,] <- temp + rm(temp,var) + gc() + } + +} + + + + + + + +# for loading forecasting systems: + +### load once 1 file to retrieve the lat and lon values, then save them in an .RData file to retrieve them when needed: +#coord <- Load(var = 'sfcWind', exp = list(ECMWF_monthly), obs = NULL, sdates=paste0(2013,'0102'), leadtimemin = 1, leadtimemax=1, output = 'lonlat', nprocs=1) + +#my.grid<-paste0('r',n.lon,'x',n.lat) +#coord <- Load(var = 'sfcWind', exp = list(ECMWF_monthly), obs = NULL, sdates=paste0(2013,'0102'), leadtimemin = 1, leadtimemax=1, output = 'lonlat', nprocs=1, grid=my.grid, method='bilinear') + + +save.image(file=paste0(workdir,"/weather_regimes_",rean.name,"_",var.name[var.num],"_",year.start,"-",year.end,".RData")) +load(file=paste0(workdir,"/weather_regimes_",rean.name,"_",var.name[var.num],"_",year.start,"-",year.end,".RData")) + +for(period in 13:16){ # 1-12: Jan-Dec; 13-16: Winter-Spring-Summer-Autumn; 17: Year + days.period <- NA + for(y in year.start:year.end) days.period <- c(days.period, n.days.in.a.future.year(year.start, y) + pos.period(y,period)) + days.period <- days.period[-1] # remove the NA at the begin that was introduced only to be able to execure the above command + n.days.period <- length(days.period) + + varPeriod <- vareuFull[days.period,,] # select only var data during the chosen period + + varPeriodClim <- apply(varPeriod, c(2,3), mean, na.rm=T) + varPeriodClim2 <- InsertDim(varPeriodClim, 1, n.days.period) + + varPeriodAnom <- varPeriod - varPeriodClim2 + rm(varPeriod, varPeriodClim, varPeriodClim2) + gc() + + if(sequences){ + # for each of the 4 clusters, remove the days not belonging to sequences of at least 5 days: + # warning: first 4 days and last 4 days are not take into account y the algorithm and are treated as not belonging to any sequence (sequ=FALSE) + wrdiff <- diff(my.cluster[[period]]$cluster) + + sequ <- rep(FALSE, n.days.period) + for(i in 5:(n.days.period-5)){ + sequ[i] <- TRUE + if(wrdiff[i] != 0 & wrdiff[i+1] != 0) sequ[i] = FALSE + if(wrdiff[i] != 0 & wrdiff[i+2] != 0) sequ[i] = FALSE + if(wrdiff[i] != 0 & wrdiff[i+3] != 0) sequ[i] = FALSE + if(wrdiff[i] != 0 & wrdiff[i+4] != 0) sequ[i] = FALSE + if(wrdiff[i-1] != 0 & wrdiff[i] != 0) sequ[i] = FALSE + if(wrdiff[i-1] != 0 & wrdiff[i+1] != 0) sequ[i] = FALSE + if(wrdiff[i-1] != 0 & wrdiff[i+2] != 0) sequ[i] = FALSE + if(wrdiff[i-1] != 0 & wrdiff[i+3] != 0) sequ[i] = FALSE + if(wrdiff[i-2] != 0 & wrdiff[i] != 0) sequ[i] = FALSE + if(wrdiff[i-2] != 0 & wrdiff[i+1] != 0) sequ[i] = FALSE + if(wrdiff[i-2] != 0 & wrdiff[i+2] != 0) sequ[i] = FALSE + if(wrdiff[i-3] != 0 & wrdiff[i] != 0) sequ[i] = FALSE + if(wrdiff[i-3] != 0 & wrdiff[i+1] != 0) sequ[i] = FALSE + if(wrdiff[i-4] != 0 & wrdiff[i] != 0) sequ[i] = FALSE + } # close for loop on i + + # sequence of daily cluster numbers without the days that doesn't belong to sequences of 5 or more days: + cluster.sequence <- my.cluster[[period]]$cluster * sequ + rm(wrdiff, sequ) + } else{ + cluster.sequence <- my.cluster[[period]]$cluster + } + + gc() + + #wr1 <- which(my.cluster[[period]]$cluster==1) + #wr2 <- which(my.cluster[[period]]$cluster==2) + #wr3 <- which(my.cluster[[period]]$cluster==3) + #wr4 <- which(my.cluster[[period]]$cluster==4) + + # Replace the days belonging to a regime with the days belonging to a sequence of at least 5 days of that regime: + wr1 <- which(cluster.sequence == 1) + wr2 <- which(cluster.sequence == 2) + wr3 <- which(cluster.sequence == 3) + wr4 <- which(cluster.sequence == 4) + + period.length <- n.days.in.a.period(period,1999) # using year 1999 introduce a small error in winter season because of bisestile years + + # yearly frequency of each regime: + wr1y <- wr2y <- wr3y <-wr4y <- c() + for(y in year.start:year.end){ + #wr1y[y-year.start+1] <- length(which(my.cluster[[period]]$cluster[(y-year.start)*period.length+(1:period.length)] == 1)) + #wr2y[y-year.start+1] <- length(which(my.cluster[[period]]$cluster[(y-year.start)*period.length+(1:period.length)] == 2)) + #wr3y[y-year.start+1] <- length(which(my.cluster[[period]]$cluster[(y-year.start)*period.length+(1:period.length)] == 3)) + #wr4y[y-year.start+1] <- length(which(my.cluster[[period]]$cluster[(y-year.start)*period.length+(1:period.length)] == 4)) + wr1y[y-year.start+1] <- length(which(cluster.sequence[(y-year.start)*period.length+(1:period.length)] == 1)) + wr2y[y-year.start+1] <- length(which(cluster.sequence[(y-year.start)*period.length+(1:period.length)] == 2)) + wr3y[y-year.start+1] <- length(which(cluster.sequence[(y-year.start)*period.length+(1:period.length)] == 3)) + wr4y[y-year.start+1] <- length(which(cluster.sequence[(y-year.start)*period.length+(1:period.length)] == 4)) + } + + rm(cluster.sequence) + gc() + + # convert to frequencies in %: + wr1y <- wr1y/period.length + wr2y <- wr2y/period.length + wr3y <- wr3y/period.length + wr4y <- wr4y/period.length + + varPeriodAnom1 <- varPeriodAnom[wr1,,] + varPeriodAnom2 <- varPeriodAnom[wr2,,] + varPeriodAnom3 <- varPeriodAnom[wr3,,] + varPeriodAnom4 <- varPeriodAnom[wr4,,] + + varPeriodAnom1mean <- apply(varPeriodAnom1,c(2,3),mean,na.rm=T) + varPeriodAnom2mean <- apply(varPeriodAnom2,c(2,3),mean,na.rm=T) + varPeriodAnom3mean <- apply(varPeriodAnom3,c(2,3),mean,na.rm=T) + varPeriodAnom4mean <- apply(varPeriodAnom4,c(2,3),mean,na.rm=T) + + varPeriodAnomBoth1 <- abind(varPeriodAnom, varPeriodAnom1, along = 1) + pvalue1 <- apply(varPeriodAnomBoth1, c(2,3), function(x) t.test(x[1:n.days.period],x[(n.days.period+1):length(x)])$p.value) + rm(varPeriodAnomBoth1, varPeriodAnom1) + gc() + + varPeriodAnomBoth2 <- abind(varPeriodAnom, varPeriodAnom2, along = 1) + pvalue2 <- apply(varPeriodAnomBoth2, c(2,3), function(x) t.test(x[1:n.days.period],x[(n.days.period+1):length(x)])$p.value) + rm(varPeriodAnomBoth2, varPeriodAnom2) + gc() + + varPeriodAnomBoth3 <- abind(varPeriodAnom, varPeriodAnom3, along = 1) + pvalue3 <- apply(varPeriodAnomBoth3, c(2,3), function(x) t.test(x[1:n.days.period],x[(n.days.period+1):length(x)])$p.value) + rm(varPeriodAnomBoth3, varPeriodAnom3) + gc() + + varPeriodAnomBoth4 <- abind(varPeriodAnom, varPeriodAnom4, along = 1) + pvalue4 <- apply(varPeriodAnomBoth4, c(2,3), function(x) t.test(x[1:n.days.period],x[(n.days.period+1):length(x)])$p.value) + rm(varPeriodAnomBoth4, varPeriodAnom4) + + rm(varPeriodAnom) + gc() + + pslPeriod <- psleuFull[days.period,,] + pslPeriodClim <- apply(pslPeriod, c(2,3), mean, na.rm=T) + pslPeriodClim2 <- InsertDim(pslPeriodClim, 1, n.days.period) + pslPeriodAnom <- pslPeriod - pslPeriodClim2 + + rm(pslPeriod, pslPeriodClim, pslPeriodClim2) + gc() + + pslwr1 <- pslPeriodAnom[wr1,,] + pslwr2 <- pslPeriodAnom[wr2,,] + pslwr3 <- pslPeriodAnom[wr3,,] + pslwr4 <- pslPeriodAnom[wr4,,] + rm(pslPeriodAnom) + gc() + + pslwr1mean <- apply(pslwr1,c(2,3),mean,na.rm=T) + pslwr2mean <- apply(pslwr2,c(2,3),mean,na.rm=T) + pslwr3mean <- apply(pslwr3,c(2,3),mean,na.rm=T) + pslwr4mean <- apply(pslwr4,c(2,3),mean,na.rm=T) + + rm(pslwr1,pslwr2,pslwr3,pslwr4) + + # save all the data necessary to redraw the graphs when we know the right regime: + save(workdir,rean.name,var.num,var.name,var.name.full,var.unit,year.start,year.end,period,my.period,lon,lat,pslwr1mean,pslwr2mean,pslwr3mean,pslwr4mean, varPeriodAnom1mean, varPeriodAnom2mean, varPeriodAnom3mean,varPeriodAnom4mean,wr1y,wr2y,wr3y,wr4y,pvalue1,pvalue2,pvalue3,pvalue4,file=paste0(workdir,"/",rean.name,"_",var.name[var.num],"_",my.period[period],"_mapdata.RData")) + + rm(pvalue1,pvalue2,pvalue3,pvalue4) + rm(pslwr1mean,pslwr2mean,pslwr3mean,pslwr4mean) + rm(varPeriodAnom1mean,varPeriodAnom2mean,varPeriodAnom3mean,varPeriodAnom4mean) + gc() + +} # close the for loop on 'period' + + +# run it twice: once, with ordering <- FALSE to look only at the cartography and find visually the right regime names of the four clusters; +# and the second time, to save the ordered cartography: +ordering <- TRUE +as.pdf <- FALSE # choose if you prefer to save results as one file .png for each season, or only 1 pdf with all seasons + +# choose an order to give to the cartography, from top to bottom: +orden <- c("NAO+","NAO-","Blocking","Atl. Ridge") + +regime1.name <- orden[1] +regime2.name <- orden[2] +regime3.name <- orden[3] +regime4.name <- orden[4] + +# breaks and colors of the geopotential fields: + +if(psl == "g500"){ + #my.brks <- c(-300,-199:200,300) # % Mean anomaly of a WR + my.brks <- c(-300,seq(-200,200,10),300) # % Mean anomaly of a WR + my.brks2 <- c(-300,seq(-200,200,10),300) # % Mean anomaly of a WR for the contour lines + #my.cols <- colorRampPalette((brewer.pal(9,"PuBu")))(length(my.brks)-1) + values.to.plot <- c(-200, -100, 0, 100, 200) # values we want to show in the labels +} + +if(psl == "psl"){ + my.brks <- c(-2000,seq(-1400,1400, 200), 2000) # % Mean anomaly of a WR + my.brks2 <- c(-2000,seq(-1400,1400,200), 2000) # % Mean anomaly of a WR for the contour lines + values.to.plot <- c(-1000,-500,0,500,1000) +} + +my.cols <- colorRampPalette(rev(brewer.pal(11,"RdBu")))(length(my.brks)-1) # blue--white--red colors + +# breaks and colors of the impact maps (valid both for Wind speed anomalies and Temperature anomalies): +#my.brks.var <- c(-20,seq(-10,-3,1),seq(-2.9,2.9,0.1),seq(3,10,1),20) # % Mean anomaly of a WR +#my.brks.var <- c(-20,-2,-1.5,-1,-0.5,-0.2,0.2,0.5,1,1.5,2,20) # % Mean anomaly of a WR +my.brks.var <- c(-20,seq(-3,3,0.2),20) # % Mean anomaly of a WR + +#my.cols <- colorRampPalette(as.character(read.csv("/home/Earth/vtorralb/rgbhex.csv",header=F)[,1]))(length(my.brks)-1) # blue--yellow-red colors +my.cols.var <- colorRampPalette(rev(brewer.pal(11,"RdBu")))(length(my.brks.var)-1) # blue--white--red colors + +if(as.pdf)(pdf(file=paste0(workdir,"/",rean.name,"_",var.name[var.num],".pdf"),width=40, height=60)) + +for(period in 13:16){ + load(file=paste0(workdir,"/",rean.name,"_",var.name[var.num],"_",my.period[period],"_mapdata.RData")) + + if(period == 13){ # Winter + cluster3.name="NAO+" + cluster4.name="NAO-" + cluster1.name="Blocking" + cluster2.name="Atl. Ridge" + } + if(period == 14){ + cluster2.name="NAO+" + cluster3.name="NAO-" + cluster1.name="Blocking" + cluster4.name="Atl. Ridge" + } + if(period == 15){ + cluster3.name="NAO+" + cluster2.name="NAO-" + cluster4.name="Blocking" + cluster1.name="Atl. Ridge" + } + if(period == 16){ + cluster4.name="NAO+" + cluster3.name="NAO-" + cluster2.name="Blocking" + cluster1.name="Atl. Ridge" + } + + # assign to each cluster its regime: + if(ordering == FALSE){ + cluster1 <- 1; cluster2 <- 2; cluster3 <- 3; cluster4 <- 4 + } else { + cluster1 <- which(orden == cluster1.name) + cluster2 <- which(orden == cluster2.name) + cluster3 <- which(orden == cluster3.name) + cluster4 <- which(orden == cluster4.name) + } + + assign(paste0("map",cluster1), pslwr1mean) + assign(paste0("map",cluster2), pslwr2mean) + assign(paste0("map",cluster3), pslwr3mean) + assign(paste0("map",cluster4), pslwr4mean) + + assign(paste0("imp",cluster1), varPeriodAnom1mean) + assign(paste0("imp",cluster2), varPeriodAnom2mean) + assign(paste0("imp",cluster3), varPeriodAnom3mean) + assign(paste0("imp",cluster4), varPeriodAnom4mean) + + assign(paste0("sig",cluster1), pvalue1) + assign(paste0("sig",cluster2), pvalue2) + assign(paste0("sig",cluster3), pvalue3) + assign(paste0("sig",cluster4), pvalue4) + + assign(paste0("fre",cluster1), wr1y) + assign(paste0("fre",cluster2), wr2y) + assign(paste0("fre",cluster3), wr3y) + assign(paste0("fre",cluster4), wr4y) + + # Visualize the composition with the 3 graphs for all the 4 regimes: its pressure fields, its impact on var and its frequency series: + if(!as.pdf) png(filename=paste0(workdir,"/",rean.name,"_",var.name[var.num],"_",my.period[period],".png"),width=3000,height=3700) + + layout(matrix(c(1,1,1,1,1,1,1,1,31,32,6,33,2,4,6,7,rep(c(2,4,6,8),8),3,5,6,9,34,35,6,36, + 10,12,6,14,rep(c(10,12,6,15),8),11,13,6,16,37,38,6,39, + 17,19,6,21,rep(c(17,19,6,22),8),18,20,6,23,40,41,6,42, + 24,26,6,28,rep(c(24,26,6,29),8),25,27,6,30,43,43,6,44),47,4,byrow=TRUE), + widths=c(2,2,0.2,2), heights=c(rep(0.2,2),0.3,rep(0.2,10),0.3,rep(0.2,10),0.3,rep(0.2,10),0.3,rep(0.2,12))) + #layout.show(44) + + plot.new(); title(paste("Weather Regimes for",my.period[period],"season (1979-2013). Source:",rean.name), cex.main=9, line=-7) + + #EU <- c(1:41, 130:175) #c(1:54,130:161) # position of long values of Europe only (without the Atlantic Sea and America) + #EU <- c(1:which(lon >= lon.max)[1], which(lon >= lon.min)[1]:length(lon)) # to show the same area used for calculation of anomalies + EU <- c(1:which(lon >= lon.max)[1], (length(lon)-30):length(lon)) # restrict area to continental europe only + + PlotEquiMap(map1, lon, lat, filled.continents = FALSE, brks=my.brks, cols=my.cols, toptitle="", sizetit=1.2, contours=t(map1), brks2=my.brks2, drawleg=F) + #par(mar=c(10.5,14.5,10.5,14.5)) + plot.new(); PlotEquiMap(imp1[,EU], lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, drawleg=F, dots=t(sig1[,EU] < 0.05)) + plot.new(); plot.new(); plot.new() # plot 2 empty graphs + barplot.freq(100*fre1, year.start, year.end, cex.y=2, cex.x=2, freq.max=60) + plot.new(); title("Year", cex.main=2, line=-2) + + PlotEquiMap(map2, lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map2), brks2=my.brks2, drawleg=F) + plot.new(); PlotEquiMap(imp2[,EU], lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, drawleg=F, dots=t(sig2[,EU] < 0.05)) + plot.new(); plot.new(); barplot.freq(100*fre2, year.start, year.end, cex.y=2, cex.x=2, freq.max=60) + plot.new(); title("Year", cex.main=2, line=-2) + + PlotEquiMap(map3, lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map3), brks2=my.brks2, drawleg=F) + plot.new(); PlotEquiMap(imp3[,EU], lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, drawleg=F, dots=t(sig3[,EU] < 0.05)) + plot.new(); plot.new(); barplot.freq(100*fre3, year.start, year.end, cex.y=2, cex.x=2, freq.max=60) + plot.new(); title("Year", cex.main=2, line=-2) + + PlotEquiMap(map4, lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, contours=t(map4), brks2=my.brks2, drawleg=F) + plot.new(); PlotEquiMap(imp4[,EU], lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, drawleg=F, dots=t(sig4[,EU] < 0.05)) + plot.new(); plot.new(); barplot.freq(100*fre4, year.start, year.end, cex.y=2, cex.x=2, freq.max=60) + plot.new(); title("Year", cex.main=2, line=-3) + + # Titles: + plot.new(); title(paste(regime1.name, psl.name, "Anomaly"), cex.main=3.3, line=-4.5) + plot.new(); title(paste(regime1.name, "impact on", var.name.full[var.num]), cex.main=3.3, line=-4.5) + plot.new(); title(paste0(regime1.name, " Frequency (", round(100*mean(fre1),1), "%)"), cex.main=3.3, line=-4.5) + plot.new(); title(paste(regime2.name, psl.name, "Anomaly"), cex.main=3.3, line=-4.5) + plot.new(); title(paste(regime2.name, "impact on", var.name.full[var.num]), cex.main=3.3, line=-4.5) + plot.new(); title(paste0(regime2.name, " Frequency (", round(100*mean(fre2),1), "%)"), cex.main=3.3, line=-4.5) + plot.new(); title(paste(regime3.name, psl.name, "Anomaly"), cex.main=3.3, line=-4.5) + plot.new(); title(paste(regime3.name, "impact on", var.name.full[var.num]), cex.main=3.3, line=-4.5) + plot.new(); title(paste0(regime3.name, " Frequency (", round(100*mean(fre3),1), "%)"), cex.main=3.3, line=-4.5) + plot.new(); title(paste(regime4.name, psl.name, "Anomaly"), cex.main=3.3, line=-5) + plot.new(); title(paste(regime4.name, "impact on", var.name.full[var.num]), cex.main=3.3, line=-4.5) + plot.new(); title(paste0(regime4.name, " Frequency (", round(100*mean(fre4),1), "%)"), cex.main=3.3, line=-4.5) + + # Legends: + + if(var.name[var.num] == "sfcWind") values.to.plot2 <- c(-3,-2,-1,0,1,2,3) + if(var.name[var.num] == "tas") values.to.plot2 <- c(-10,-6,-3,0,3,6,10) + + my.subset <- match(values.to.plot, my.brks) + my.subset2 <- match(values.to.plot2, my.brks.var) + + par(fig=c(0.04, 0.302, 0.717, 0.744), new=TRUE) # syntax fig=c(xmin, xmax, ymin, ymax) + #ColorBar(my.brks, cols=my.cols, vert=FALSE, subsampleg=10, cex=2.5) + #ColorBar2(my.brks, cols=my.cols, vert=FALSE, subsampleg=10, cex=2.5, my.ticks=c(0,10,20,30,40,50,60,70,80), my.labels=my.labels) + ColorBar3(my.brks, cols=my.cols, vert=FALSE, subsampleg=1, cex=2.5, subset=my.subset) + mtext(side=4," m", cex=2.5) + par(fig=c(0.355,0.605,0.718,0.743), new=TRUE) + ColorBar3(my.brks.var, cols=my.cols.var, vert=FALSE, subsampleg=1, cex=2.5, subset=my.subset2) + #ColorBar2(my.brks.var, cols=my.cols.var, vert=FALSE, subsampleg=1, cex=2.5, my.ticks=c(0,10,20,30,40,50,60,70,80), my.labels=my.labels2) + mtext(side=4,paste0(" ",var.unit[var.num]), cex=2.5) + + par(fig=c(0.04,0.3,0.483,0.508), new=TRUE) + ColorBar3(my.brks, cols=my.cols, vert=FALSE, subsampleg=1, cex=2.5, subset=my.subset) + mtext(side=4," m", cex=2.5) + par(fig=c(0.355,0.605,0.484,0.509), new=TRUE) + ColorBar3(my.brks.var, cols=my.cols.var, vert=FALSE, subsampleg=1, cex=2.5, subset=my.subset2) + mtext(side=4,paste0(" ",var.unit[var.num]), cex=2.5) + + par(fig=c(0.04,0.3,0.248,0.273), new=TRUE) + ColorBar3(my.brks, cols=my.cols, vert=FALSE, subsampleg=1, cex=2.5, subset=my.subset) + mtext(side=4," m", cex=2.5) + par(fig=c(0.355,0.605,0.248,0.273), new=TRUE) + ColorBar3(my.brks.var, cols=my.cols.var, vert=FALSE, subsampleg=1, cex=2.5, subset=my.subset2) + mtext(side=4,paste0(" ",var.unit[var.num]), cex=2.5) + + par(fig=c(0.04,0.3,0.013,0.038), new=TRUE) + ColorBar3(my.brks, cols=my.cols, vert=FALSE, subsampleg=1, cex=2.5, subset=my.subset) + mtext(side=4," m", cex=2.5) + par(fig=c(0.355,0.605,0.013,0.038), new=TRUE) + ColorBar3(my.brks.var, cols=my.cols.var, vert=FALSE, subsampleg=1, cex=2.5, subset=my.subset2) + mtext(side=4,paste0(" ",var.unit[var.num]), cex=2.5) + + par(fig=c(0.66,0.67,0.83,0.84), new=TRUE) + mtext("%", cex=3.3) + par(fig=c(0.66,0.67,0.60,0.61), new=TRUE) + mtext("%", cex=3.3) + par(fig=c(0.66,0.67,0.37,0.38), new=TRUE) + mtext("%", cex=3.3) + par(fig=c(0.66,0.67,0.13,0.14), new=TRUE) + mtext("%", cex=3.3) + + if(!as.pdf) dev.off() # for saving 4 png + +} # close for loop on 'period' + +if(as.pdf) dev.off() # for saving a single pdf with all seasons + + + + + + + + + + + + + + + + + + + + + diff --git a/old/weather_regimes_v13.R~ b/old/weather_regimes_v13.R~ new file mode 100644 index 0000000000000000000000000000000000000000..2d674c2b08b84f1248b38114067034a435e1e929 --- /dev/null +++ b/old/weather_regimes_v13.R~ @@ -0,0 +1,607 @@ + +library(s2dverification) # for the function Load() +library(abind) +source('/home/Earth/ncortesi/Downloads/scripts/Rfunctions.R') # for the calendar functions + +workdir <- "/scratch/Earth/ncortesi/RESILIENCE/Regimes" # working dir with the input files with the WT classifications for each MSLP grid point + +# available reanalysis for the geopotential (g500) or the geopotential height (z500 = g500/9.81): +ERAint <- list(path = '/esnas/reconstructions/ecmwf/eraint/$STORE_FREQ$_mean/$VAR_NAME$_f6h/$VAR_NAME$_$YEAR$$MONTH$.nc') +ERAint.name <- "ERA-Interim" + +JRA55 <- list(path = '/esnas/reconstructions/jma/jra-55/$STORE_FREQ$_mean/$VAR_NAME$_f6h/$VAR_NAME$_$YEAR$$MONTH$.nc') +JRA55.name <- "JRA-55" + +rean <- ERAint # choose one of the two above reanalysis from where to load the input psl data +rean.name <- ERAint.name + +# available forecast systems for the var data: +ECMWF_monthly <- list(path = paste0('/esnas/exp/ECMWF/monthly/ensforhc/6hourly/$VAR_NAME$/2014$MONTH$$DAY$00/$VAR_NAME$_$START_DATE$00.nc')) +ECMWF_monthly.name <- "ECMWF-Monthly" + +forecast <- ECMWF_monthly +forecast.name <- ECMWF_monthly.name + +fields <- forecast # put 'rean' to load pressure fields and var from reanalisis, put 'forecast' to load them from forecast systems +fields.name <- forecast.name + +year.start <- 1994 #1979 #1981 #1994 +year.end <- 2013 #2010 + +leadtime <- 1 # if fields=forecast, set the forecast time (in days) you want to compute the weather regimes. +forecast.year <- year.end+1 # if fields=forecast, set the forecast year (it is usually the year after year.end) +mes=1 # if fields=forecast, set the month of the first startdate of the forecast year +day=2 # if fields=forecast, set the day of the first startdate of the forecast year + +psl <- "psl" #"g500" # pressure variable to use from the chosen reanalysis +psl.name <- "MSLP" # "Z500" # the name to show in the maps + +var.data <- ECMWF_monthly # ERAint # chose a dataset from which to load the input var data (reanalysis or forecasts) +var.data.name <- ECMWF_monthly.name # ERAint.name + +var.num <- 1 # Choose a variable. 1: sfcWind 2: tas + +var.name <- c("sfcWind","tas") # name of the 'predictand' variable of the chosen reanalysis +var.name.full <- c("Wind Speed","Temperature") # full name of the 'predictand' variable to put in the title of the graphs +var.unit <- c("m/s", "ºC") # unit of measure (for drawing color scales) + +sequences=FALSE # set it to TRUE if you want to select only days beloning to sequences of at least 5 consecutive days belonging to the same regime. + +PCA <- FALSE # se to TRUE if you want to apply the PCA before the k-means cluster analysis, FALSE otherwise +variance.explained <- 0.8 # the user can set here the minimum % of variance explained by the PCs (typically 0.8 which is equal to 80%) + +# Coordinates of the box enclosing the study region (the Northern Atlantic in this case): +lat.max <- 81 #81 #80 #70 +lat.min <- 27 #30 #20 #30 +lon.max <- 45 #36 #30 #40 +lon.min <- 274.5 #274.5 #270 #280 # put a positive number here because the geopotential has only positive values of longitud! + +############################################################################################################################# +#ECMWF_monthly <- list(path = paste0('/esnas/exp/ECMWF/monthly/ensforhc/6hourly/psl/2014010200/1994_010200.nc')) +#ECMWF_monthly <- list(path = paste0('/esnas/exp/ECMWF/monthly/ensforhc/6hourly/$VAR_NAME$/2014$MONTH$$DAY$00/$VAR_NAME$_$START_DATE$00.nc')) + +# load only 1 day of pressure data to detect the minimum and maximum lat and lon values which identify only the North Atlantic area: +domain <- Load(var = psl, exp = NULL, obs = list(fields), paste0(year.start,'0102'), storefreq = 'daily', leadtimemax = 1, output = 'lonlat', nprocs=1) +n.lat <- length(domain$lat) +n.lon <- length(domain$lon) + +pos.lat.max <- which(abs(domain$lat-lat.max) == min(abs(domain$lat-lat.max))) +pos.lat.min <- which(abs(domain$lat-lat.min) == min(abs(domain$lat-lat.min))) +pos.lon.max <- which(abs(domain$lon-lon.max) == min(abs(domain$lon-lon.max))) +pos.lon.min <- which(abs(domain$lon-lon.min) == min(abs(domain$lon-lon.min))) +pos.lat <- if(pos.lat.min < pos.lat.max) {pos.lat.min:pos.lat.max} else { pos.lat.max:pos.lat.min} +pos.lon <- c(1:pos.lon.max,pos.lon.min:length(domain$lon)) # beware that it only consider the case where the study area cross the meridian of Greenwhich! +n.pos.lat <- length(pos.lat) +n.pos.lon <- length(pos.lon) +lat <- domain$lat[pos.lat] # lat of chosen area only (it is smaller than the whole spatial domain loaded by the data) +lon <- domain$lon[pos.lon] # lon of chosen area only +lat.min.area <- domain$lat[pos.lat.min] # min and max lat/lon of the chosen area only (same as lat.max, but its values correspond to actual values in the lat vector) +lat.max.area <- domain$lat[pos.lat.max] +lon.min.area <- domain$lon[pos.lon.min] +lon.max.area <- domain$lon[pos.lon.max] +if(lat.min >= lat.max) stop("lat.min cannot be greater than lat.max") +#rm(domain) + +# Load psl data: +if(unlist(fields) == unlist(rean)){ # Load daily psl data in the reanalysis case: + psleuFull <-array(NA,c(n.days.in.a.yearly.period(year.start,year.end), n.pos.lat, n.pos.lon)) + + for (y in year.start:year.end){ + var <- Load(var = psl, exp = NULL, obs = list(fields), paste0(y,'0101'), storefreq = 'daily', leadtimemax = n.days.in.a.year(y), output = 'lonlat', + latmin = lat.min.area, latmax = lat.max.area, lonmin = lon.min.area, lonmax = lon.max.area) + psleuFull[seq.days.in.a.future.year(year.start, y),,] <- var$obs + rm(var) + gc() + } + +} else { # Load 6-hourly psl data in the forecast case: + sdates.seq <- weekly.seq(forecast.year,mes,day) + psleuFull <-array(NA,c(length(sdates.seq)*(year.end-year.start+1), n.pos.lat, n.pos.lon)) + + for (startdate in 1:length(sdates.seq)){ + var <- Load(var = psl, exp = NULL, obs = list(fields), paste0(year.start:year.end, substr(sdates.seq[startdate],5,6), substr(sdates.seq[startdate],7,8) ), storefreq = 'daily', leadtimemin=leadtime*4-3, leadtimemax=leadtime*4, output = 'lonlat', latmin = lat.min.area, latmax = lat.max.area, lonmin = lon.min.area, lonmax = lon.max.area) + temp <- apply(var$obs, c(1,3,5,6), mean, na.rm=T) + psleuFull[(1+(startdate-1)*(year.end-year.start+1)):(startdate*(year.end-year.start+1)),,] <- temp + rm(temp,var) + gc() + } +} + +if(psl=="g500") psleuFull <- psleuFull/9.81 # convert geopotential to geopotential height + +# compute the cluster analysis: +my.PCA <- list() +my.cluster <- list() +tot.variance <- list() +n.pcs <- my.cluster <- c() + +for(period in 13:16){ # 1-12: Jan-Dec; 13-16: Winter-Spring-Summer-Autumn; 17: Year + if(unlist(fields) == unlist(rean)){ + days.period <- NA + for(y in year.start:year.end) days.period <- c(days.period, n.days.in.a.future.year(year.start, y) + pos.period(y,period)) + days.period <- days.period[-1] # remove the NA at the begin that was introduced only to be able to execure the above command + } else { + my.startdates.period <- months.period(forecast.year,mes,day,period) + + days.period <- c() + for(startdate in my.startdates.period){ + days.period <- c(days.period, (1+(startdate-1)*(year.end-year.start+1)):(startdate*(year.end-year.start+1))) + } + } + + n.days.period <- length(days.period) + + pslPeriod <- psleuFull[days.period,,] # select only days in the chosen period (i.e: winter) + + # weight the pressure fields based on latitude: + lat.weighted <- cos(lat*pi/180)^0.5 + lat.weighted.array <- InsertDim(InsertDim(lat.weighted,2,n.pos.lon),1,n.days.period) + pslPeriod.weighted <- pslPeriod * lat.weighted.array + rm(lat.weighted.array) + gc() + + pslmat <- pslPeriod.weighted + dim(pslmat) <- c(n.days.period, n.pos.lat*n.pos.lon) # convert array in a matrix! + rm(pslPeriod, pslPeriod.weighted) + gc() + + if(PCA){ # compute the PCs: + my.seq <- seq(1, n.pos.lat*n.pos.lon, 9) # select only 1 point of 9 + pslcut <- pslmat[,my.seq] + + my.PCA[[period]] <- princomp(pslcut,cor=FALSE) + tot.variance[[period]] <- head(cumsum(my.PCA[[period]]$sdev^2/sum(my.PCA[[period]]$sdev^2)),50) # check how many PCAs to keep basing on the sum of their expl. variance + n.pcs[period] <- head(as.numeric(which(tot.variance[[period]] > variance.explained)),1) # select only the pcs that explains at least 80% of variance + my.cluster[[period]] <- kmeans(my.PCA[[period]]$scores[,1:n.pcs[period]], centers=4, iter.max=100, nstart=30) # 4 is the number of clusters + rm(pslcut) + } else { + my.cluster[[period]] <- kmeans(pslmat, centers=4, iter.max=100, nstart=30) # 4 is the number of clusters + } + + rm(pslmat) + gc() +} + +save.image(file=paste0(workdir,"/weather_regimes_",fields.name,"_",year.start,"-",year.end,".RData")) +load(file=paste0(workdir,"/weather_regimes_",fields.name,"_",year.start,"-",year.end,".RData")) + +# Load var data: +vareuFull <-array(NA,c(n.days.in.a.yearly.period(year.start,year.end),n.pos.lat,n.pos.lon)) + +for (y in year.start:year.end){ + var <- Load(var = var.name[var.num], exp = NULL, obs = list(var.data), paste0(y,'0101'), storefreq = 'daily', leadtimemax = n.days.in.a.year(y), output = 'lonlat', + latmin = lat.min.area, latmax = lat.max.area, lonmin = lon.min.area, lonmax = lon.max.area) + vareuFull[seq.days.in.a.future.year(year.start, y),,] <- var$obs + rm(var) + gc() +} + + + + + + +# Load psl data: +if(unlist(fields) == unlist(rean)){ # Load daily psl data in the reanalysis case: + vareuFull <-array(NA,c(n.days.in.a.yearly.period(year.start,year.end), n.pos.lat, n.pos.lon)) + + for (y in year.start:year.end){ + var <- Load(var = var.name[var.num], exp = NULL, obs = list(var.data), paste0(y,'0101'), storefreq = 'daily', leadtimemax = n.days.in.a.year(y), output = 'lonlat', + latmin = lat.min.area, latmax = lat.max.area, lonmin = lon.min.area, lonmax = lon.max.area) + vareuFull[seq.days.in.a.future.year(year.start, y),,] <- var$obs + rm(var) + gc() + } + +} else { # Load 6-hourly psl data in the forecast case: + sdates.seq <- weekly.seq(forecast.year,mes,day) + vareuFull <-array(NA,c(length(sdates.seq)*(year.end-year.start+1), n.pos.lat, n.pos.lon)) + + for (startdate in 1:length(sdates.seq)){ + var <- Load(var = var.name[var.num], exp = NULL, obs = list(var.data), paste0(year.start:year.end, substr(sdates.seq[startdate],5,6), substr(sdates.seq[startdate],7,8) ), storefreq = 'daily', leadtimemin=leadtime*4-3, leadtimemax=leadtime*4, output = 'lonlat', latmin = lat.min.area, latmax = lat.max.area, lonmin = lon.min.area, lonmax = lon.max.area) + temp <- apply(var$obs, c(1,3,5,6), mean, na.rm=T) + vareuFull[(1+(startdate-1)*(year.end-year.start+1)):(startdate*(year.end-year.start+1)),,] <- temp + rm(temp,var) + gc() + } +} + + + + + + + + + + + + + + + +# for loading forecasting systems: + +### load once 1 file to retrieve the lat and lon values, then save them in an .RData file to retrieve them when needed: +#coord <- Load(var = 'sfcWind', exp = list(ECMWF_monthly), obs = NULL, sdates=paste0(2013,'0102'), leadtimemin = 1, leadtimemax=1, output = 'lonlat', nprocs=1) + +#my.grid<-paste0('r',n.lon,'x',n.lat) +#coord <- Load(var = 'sfcWind', exp = list(ECMWF_monthly), obs = NULL, sdates=paste0(2013,'0102'), leadtimemin = 1, leadtimemax=1, output = 'lonlat', nprocs=1, grid=my.grid, method='bilinear') + + +save.image(file=paste0(workdir,"/weather_regimes_",rean.name,"_",var.name[var.num],"_",year.start,"-",year.end,".RData")) +load(file=paste0(workdir,"/weather_regimes_",rean.name,"_",var.name[var.num],"_",year.start,"-",year.end,".RData")) + +for(period in 13:16){ # 1-12: Jan-Dec; 13-16: Winter-Spring-Summer-Autumn; 17: Year + days.period <- NA + for(y in year.start:year.end) days.period <- c(days.period, n.days.in.a.future.year(year.start, y) + pos.period(y,period)) + days.period <- days.period[-1] # remove the NA at the begin that was introduced only to be able to execure the above command + n.days.period <- length(days.period) + + varPeriod <- vareuFull[days.period,,] # select only var data during the chosen period + + varPeriodClim <- apply(varPeriod, c(2,3), mean, na.rm=T) + varPeriodClim2 <- InsertDim(varPeriodClim, 1, n.days.period) + + varPeriodAnom <- varPeriod - varPeriodClim2 + rm(varPeriod, varPeriodClim, varPeriodClim2) + gc() + + if(sequences){ + # for each of the 4 clusters, remove the days not belonging to sequences of at least 5 days: + # warning: first 4 days and last 4 days are not take into account y the algorithm and are treated as not belonging to any sequence (sequ=FALSE) + wrdiff <- diff(my.cluster[[period]]$cluster) + + sequ <- rep(FALSE, n.days.period) + for(i in 5:(n.days.period-5)){ + sequ[i] <- TRUE + if(wrdiff[i] != 0 & wrdiff[i+1] != 0) sequ[i] = FALSE + if(wrdiff[i] != 0 & wrdiff[i+2] != 0) sequ[i] = FALSE + if(wrdiff[i] != 0 & wrdiff[i+3] != 0) sequ[i] = FALSE + if(wrdiff[i] != 0 & wrdiff[i+4] != 0) sequ[i] = FALSE + if(wrdiff[i-1] != 0 & wrdiff[i] != 0) sequ[i] = FALSE + if(wrdiff[i-1] != 0 & wrdiff[i+1] != 0) sequ[i] = FALSE + if(wrdiff[i-1] != 0 & wrdiff[i+2] != 0) sequ[i] = FALSE + if(wrdiff[i-1] != 0 & wrdiff[i+3] != 0) sequ[i] = FALSE + if(wrdiff[i-2] != 0 & wrdiff[i] != 0) sequ[i] = FALSE + if(wrdiff[i-2] != 0 & wrdiff[i+1] != 0) sequ[i] = FALSE + if(wrdiff[i-2] != 0 & wrdiff[i+2] != 0) sequ[i] = FALSE + if(wrdiff[i-3] != 0 & wrdiff[i] != 0) sequ[i] = FALSE + if(wrdiff[i-3] != 0 & wrdiff[i+1] != 0) sequ[i] = FALSE + if(wrdiff[i-4] != 0 & wrdiff[i] != 0) sequ[i] = FALSE + } # close for loop on i + + # sequence of daily cluster numbers without the days that doesn't belong to sequences of 5 or more days: + cluster.sequence <- my.cluster[[period]]$cluster * sequ + rm(wrdiff, sequ) + } else{ + cluster.sequence <- my.cluster[[period]]$cluster + } + + gc() + + #wr1 <- which(my.cluster[[period]]$cluster==1) + #wr2 <- which(my.cluster[[period]]$cluster==2) + #wr3 <- which(my.cluster[[period]]$cluster==3) + #wr4 <- which(my.cluster[[period]]$cluster==4) + + # Replace the days belonging to a regime with the days belonging to a sequence of at least 5 days of that regime: + wr1 <- which(cluster.sequence == 1) + wr2 <- which(cluster.sequence == 2) + wr3 <- which(cluster.sequence == 3) + wr4 <- which(cluster.sequence == 4) + + period.length <- n.days.in.a.period(period,1999) # using year 1999 introduce a small error in winter season because of bisestile years + + # yearly frequency of each regime: + wr1y <- wr2y <- wr3y <-wr4y <- c() + for(y in year.start:year.end){ + #wr1y[y-year.start+1] <- length(which(my.cluster[[period]]$cluster[(y-year.start)*period.length+(1:period.length)] == 1)) + #wr2y[y-year.start+1] <- length(which(my.cluster[[period]]$cluster[(y-year.start)*period.length+(1:period.length)] == 2)) + #wr3y[y-year.start+1] <- length(which(my.cluster[[period]]$cluster[(y-year.start)*period.length+(1:period.length)] == 3)) + #wr4y[y-year.start+1] <- length(which(my.cluster[[period]]$cluster[(y-year.start)*period.length+(1:period.length)] == 4)) + wr1y[y-year.start+1] <- length(which(cluster.sequence[(y-year.start)*period.length+(1:period.length)] == 1)) + wr2y[y-year.start+1] <- length(which(cluster.sequence[(y-year.start)*period.length+(1:period.length)] == 2)) + wr3y[y-year.start+1] <- length(which(cluster.sequence[(y-year.start)*period.length+(1:period.length)] == 3)) + wr4y[y-year.start+1] <- length(which(cluster.sequence[(y-year.start)*period.length+(1:period.length)] == 4)) + } + + rm(cluster.sequence) + gc() + + # convert to frequencies in %: + wr1y <- wr1y/period.length + wr2y <- wr2y/period.length + wr3y <- wr3y/period.length + wr4y <- wr4y/period.length + + varPeriodAnom1 <- varPeriodAnom[wr1,,] + varPeriodAnom2 <- varPeriodAnom[wr2,,] + varPeriodAnom3 <- varPeriodAnom[wr3,,] + varPeriodAnom4 <- varPeriodAnom[wr4,,] + + varPeriodAnom1mean <- apply(varPeriodAnom1,c(2,3),mean,na.rm=T) + varPeriodAnom2mean <- apply(varPeriodAnom2,c(2,3),mean,na.rm=T) + varPeriodAnom3mean <- apply(varPeriodAnom3,c(2,3),mean,na.rm=T) + varPeriodAnom4mean <- apply(varPeriodAnom4,c(2,3),mean,na.rm=T) + + varPeriodAnomBoth1 <- abind(varPeriodAnom, varPeriodAnom1, along = 1) + pvalue1 <- apply(varPeriodAnomBoth1, c(2,3), function(x) t.test(x[1:n.days.period],x[(n.days.period+1):length(x)])$p.value) + rm(varPeriodAnomBoth1, varPeriodAnom1) + gc() + + varPeriodAnomBoth2 <- abind(varPeriodAnom, varPeriodAnom2, along = 1) + pvalue2 <- apply(varPeriodAnomBoth2, c(2,3), function(x) t.test(x[1:n.days.period],x[(n.days.period+1):length(x)])$p.value) + rm(varPeriodAnomBoth2, varPeriodAnom2) + gc() + + varPeriodAnomBoth3 <- abind(varPeriodAnom, varPeriodAnom3, along = 1) + pvalue3 <- apply(varPeriodAnomBoth3, c(2,3), function(x) t.test(x[1:n.days.period],x[(n.days.period+1):length(x)])$p.value) + rm(varPeriodAnomBoth3, varPeriodAnom3) + gc() + + varPeriodAnomBoth4 <- abind(varPeriodAnom, varPeriodAnom4, along = 1) + pvalue4 <- apply(varPeriodAnomBoth4, c(2,3), function(x) t.test(x[1:n.days.period],x[(n.days.period+1):length(x)])$p.value) + rm(varPeriodAnomBoth4, varPeriodAnom4) + + rm(varPeriodAnom) + gc() + + pslPeriod <- psleuFull[days.period,,] + pslPeriodClim <- apply(pslPeriod, c(2,3), mean, na.rm=T) + pslPeriodClim2 <- InsertDim(pslPeriodClim, 1, n.days.period) + pslPeriodAnom <- pslPeriod - pslPeriodClim2 + + rm(pslPeriod, pslPeriodClim, pslPeriodClim2) + gc() + + pslwr1 <- pslPeriodAnom[wr1,,] + pslwr2 <- pslPeriodAnom[wr2,,] + pslwr3 <- pslPeriodAnom[wr3,,] + pslwr4 <- pslPeriodAnom[wr4,,] + rm(pslPeriodAnom) + gc() + + pslwr1mean <- apply(pslwr1,c(2,3),mean,na.rm=T) + pslwr2mean <- apply(pslwr2,c(2,3),mean,na.rm=T) + pslwr3mean <- apply(pslwr3,c(2,3),mean,na.rm=T) + pslwr4mean <- apply(pslwr4,c(2,3),mean,na.rm=T) + + rm(pslwr1,pslwr2,pslwr3,pslwr4) + + # save all the data necessary to redraw the graphs when we know the right regime: + save(workdir,rean.name,var.num,var.name,var.name.full,var.unit,year.start,year.end,period,my.period,lon,lat,pslwr1mean,pslwr2mean,pslwr3mean,pslwr4mean, varPeriodAnom1mean, varPeriodAnom2mean, varPeriodAnom3mean,varPeriodAnom4mean,wr1y,wr2y,wr3y,wr4y,pvalue1,pvalue2,pvalue3,pvalue4,file=paste0(workdir,"/",rean.name,"_",var.name[var.num],"_",my.period[period],"_mapdata.RData")) + + rm(pvalue1,pvalue2,pvalue3,pvalue4) + rm(pslwr1mean,pslwr2mean,pslwr3mean,pslwr4mean) + rm(varPeriodAnom1mean,varPeriodAnom2mean,varPeriodAnom3mean,varPeriodAnom4mean) + gc() + +} # close the for loop on 'period' + + +# run it twice: once, with ordering <- FALSE to look only at the cartography and find visually the right regime names of the four clusters; +# and the second time, to save the ordered cartography: +ordering <- TRUE +as.pdf <- FALSE # choose if you prefer to save results as one file .png for each season, or only 1 pdf with all seasons + +# choose an order to give to the cartography, from top to bottom: +orden <- c("NAO+","NAO-","Blocking","Atl. Ridge") + +regime1.name <- orden[1] +regime2.name <- orden[2] +regime3.name <- orden[3] +regime4.name <- orden[4] + +# breaks and colors of the geopotential fields: + +if(psl == "g500"){ + #my.brks <- c(-300,-199:200,300) # % Mean anomaly of a WR + my.brks <- c(-300,seq(-200,200,10),300) # % Mean anomaly of a WR + my.brks2 <- c(-300,seq(-200,200,10),300) # % Mean anomaly of a WR for the contour lines + #my.cols <- colorRampPalette((brewer.pal(9,"PuBu")))(length(my.brks)-1) + values.to.plot <- c(-200, -100, 0, 100, 200) # values we want to show in the labels +} + +if(psl == "psl"){ + my.brks <- c(-2000,seq(-1400,1400, 200), 2000) # % Mean anomaly of a WR + my.brks2 <- c(-2000,seq(-1400,1400,200), 2000) # % Mean anomaly of a WR for the contour lines + values.to.plot <- c(-1000,-500,0,500,1000) +} + +my.cols <- colorRampPalette(rev(brewer.pal(11,"RdBu")))(length(my.brks)-1) # blue--white--red colors + +# breaks and colors of the impact maps (valid both for Wind speed anomalies and Temperature anomalies): +#my.brks.var <- c(-20,seq(-10,-3,1),seq(-2.9,2.9,0.1),seq(3,10,1),20) # % Mean anomaly of a WR +#my.brks.var <- c(-20,-2,-1.5,-1,-0.5,-0.2,0.2,0.5,1,1.5,2,20) # % Mean anomaly of a WR +my.brks.var <- c(-20,seq(-3,3,0.2),20) # % Mean anomaly of a WR + +#my.cols <- colorRampPalette(as.character(read.csv("/home/Earth/vtorralb/rgbhex.csv",header=F)[,1]))(length(my.brks)-1) # blue--yellow-red colors +my.cols.var <- colorRampPalette(rev(brewer.pal(11,"RdBu")))(length(my.brks.var)-1) # blue--white--red colors + +if(as.pdf)(pdf(file=paste0(workdir,"/",rean.name,"_",var.name[var.num],".pdf"),width=40, height=60)) + +for(period in 13:16){ + load(file=paste0(workdir,"/",rean.name,"_",var.name[var.num],"_",my.period[period],"_mapdata.RData")) + + if(period == 13){ # Winter + cluster3.name="NAO+" + cluster4.name="NAO-" + cluster1.name="Blocking" + cluster2.name="Atl. Ridge" + } + if(period == 14){ + cluster2.name="NAO+" + cluster3.name="NAO-" + cluster1.name="Blocking" + cluster4.name="Atl. Ridge" + } + if(period == 15){ + cluster3.name="NAO+" + cluster2.name="NAO-" + cluster4.name="Blocking" + cluster1.name="Atl. Ridge" + } + if(period == 16){ + cluster4.name="NAO+" + cluster3.name="NAO-" + cluster2.name="Blocking" + cluster1.name="Atl. Ridge" + } + + # assign to each cluster its regime: + if(ordering == FALSE){ + cluster1 <- 1; cluster2 <- 2; cluster3 <- 3; cluster4 <- 4 + } else { + cluster1 <- which(orden == cluster1.name) + cluster2 <- which(orden == cluster2.name) + cluster3 <- which(orden == cluster3.name) + cluster4 <- which(orden == cluster4.name) + } + + assign(paste0("map",cluster1), pslwr1mean) + assign(paste0("map",cluster2), pslwr2mean) + assign(paste0("map",cluster3), pslwr3mean) + assign(paste0("map",cluster4), pslwr4mean) + + assign(paste0("imp",cluster1), varPeriodAnom1mean) + assign(paste0("imp",cluster2), varPeriodAnom2mean) + assign(paste0("imp",cluster3), varPeriodAnom3mean) + assign(paste0("imp",cluster4), varPeriodAnom4mean) + + assign(paste0("sig",cluster1), pvalue1) + assign(paste0("sig",cluster2), pvalue2) + assign(paste0("sig",cluster3), pvalue3) + assign(paste0("sig",cluster4), pvalue4) + + assign(paste0("fre",cluster1), wr1y) + assign(paste0("fre",cluster2), wr2y) + assign(paste0("fre",cluster3), wr3y) + assign(paste0("fre",cluster4), wr4y) + + # Visualize the composition with the 3 graphs for all the 4 regimes: its pressure fields, its impact on var and its frequency series: + if(!as.pdf) png(filename=paste0(workdir,"/",rean.name,"_",var.name[var.num],"_",my.period[period],".png"),width=3000,height=3700) + + layout(matrix(c(1,1,1,1,1,1,1,1,31,32,6,33,2,4,6,7,rep(c(2,4,6,8),8),3,5,6,9,34,35,6,36, + 10,12,6,14,rep(c(10,12,6,15),8),11,13,6,16,37,38,6,39, + 17,19,6,21,rep(c(17,19,6,22),8),18,20,6,23,40,41,6,42, + 24,26,6,28,rep(c(24,26,6,29),8),25,27,6,30,43,43,6,44),47,4,byrow=TRUE), + widths=c(2,2,0.2,2), heights=c(rep(0.2,2),0.3,rep(0.2,10),0.3,rep(0.2,10),0.3,rep(0.2,10),0.3,rep(0.2,12))) + #layout.show(44) + + plot.new(); title(paste("Weather Regimes for",my.period[period],"season (1979-2013). Source:",rean.name), cex.main=9, line=-7) + + #EU <- c(1:41, 130:175) #c(1:54,130:161) # position of long values of Europe only (without the Atlantic Sea and America) + #EU <- c(1:which(lon >= lon.max)[1], which(lon >= lon.min)[1]:length(lon)) # to show the same area used for calculation of anomalies + EU <- c(1:which(lon >= lon.max)[1], (length(lon)-30):length(lon)) # restrict area to continental europe only + + PlotEquiMap(map1, lon, lat, filled.continents = FALSE, brks=my.brks, cols=my.cols, toptitle="", sizetit=1.2, contours=t(map1), brks2=my.brks2, drawleg=F) + #par(mar=c(10.5,14.5,10.5,14.5)) + plot.new(); PlotEquiMap(imp1[,EU], lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, drawleg=F, dots=t(sig1[,EU] < 0.05)) + plot.new(); plot.new(); plot.new() # plot 2 empty graphs + barplot.freq(100*fre1, year.start, year.end, cex.y=2, cex.x=2, freq.max=60) + plot.new(); title("Year", cex.main=2, line=-2) + + PlotEquiMap(map2, lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map2), brks2=my.brks2, drawleg=F) + plot.new(); PlotEquiMap(imp2[,EU], lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, drawleg=F, dots=t(sig2[,EU] < 0.05)) + plot.new(); plot.new(); barplot.freq(100*fre2, year.start, year.end, cex.y=2, cex.x=2, freq.max=60) + plot.new(); title("Year", cex.main=2, line=-2) + + PlotEquiMap(map3, lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map3), brks2=my.brks2, drawleg=F) + plot.new(); PlotEquiMap(imp3[,EU], lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, drawleg=F, dots=t(sig3[,EU] < 0.05)) + plot.new(); plot.new(); barplot.freq(100*fre3, year.start, year.end, cex.y=2, cex.x=2, freq.max=60) + plot.new(); title("Year", cex.main=2, line=-2) + + PlotEquiMap(map4, lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, contours=t(map4), brks2=my.brks2, drawleg=F) + plot.new(); PlotEquiMap(imp4[,EU], lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, drawleg=F, dots=t(sig4[,EU] < 0.05)) + plot.new(); plot.new(); barplot.freq(100*fre4, year.start, year.end, cex.y=2, cex.x=2, freq.max=60) + plot.new(); title("Year", cex.main=2, line=-3) + + # Titles: + plot.new(); title(paste(regime1.name, psl.name, "Anomaly"), cex.main=3.3, line=-4.5) + plot.new(); title(paste(regime1.name, "impact on", var.name.full[var.num]), cex.main=3.3, line=-4.5) + plot.new(); title(paste0(regime1.name, " Frequency (", round(100*mean(fre1),1), "%)"), cex.main=3.3, line=-4.5) + plot.new(); title(paste(regime2.name, psl.name, "Anomaly"), cex.main=3.3, line=-4.5) + plot.new(); title(paste(regime2.name, "impact on", var.name.full[var.num]), cex.main=3.3, line=-4.5) + plot.new(); title(paste0(regime2.name, " Frequency (", round(100*mean(fre2),1), "%)"), cex.main=3.3, line=-4.5) + plot.new(); title(paste(regime3.name, psl.name, "Anomaly"), cex.main=3.3, line=-4.5) + plot.new(); title(paste(regime3.name, "impact on", var.name.full[var.num]), cex.main=3.3, line=-4.5) + plot.new(); title(paste0(regime3.name, " Frequency (", round(100*mean(fre3),1), "%)"), cex.main=3.3, line=-4.5) + plot.new(); title(paste(regime4.name, psl.name, "Anomaly"), cex.main=3.3, line=-5) + plot.new(); title(paste(regime4.name, "impact on", var.name.full[var.num]), cex.main=3.3, line=-4.5) + plot.new(); title(paste0(regime4.name, " Frequency (", round(100*mean(fre4),1), "%)"), cex.main=3.3, line=-4.5) + + # Legends: + + if(var.name[var.num] == "sfcWind") values.to.plot2 <- c(-3,-2,-1,0,1,2,3) + if(var.name[var.num] == "tas") values.to.plot2 <- c(-10,-6,-3,0,3,6,10) + + my.subset <- match(values.to.plot, my.brks) + my.subset2 <- match(values.to.plot2, my.brks.var) + + par(fig=c(0.04, 0.302, 0.717, 0.744), new=TRUE) # syntax fig=c(xmin, xmax, ymin, ymax) + #ColorBar(my.brks, cols=my.cols, vert=FALSE, subsampleg=10, cex=2.5) + #ColorBar2(my.brks, cols=my.cols, vert=FALSE, subsampleg=10, cex=2.5, my.ticks=c(0,10,20,30,40,50,60,70,80), my.labels=my.labels) + ColorBar3(my.brks, cols=my.cols, vert=FALSE, subsampleg=1, cex=2.5, subset=my.subset) + mtext(side=4," m", cex=2.5) + par(fig=c(0.355,0.605,0.718,0.743), new=TRUE) + ColorBar3(my.brks.var, cols=my.cols.var, vert=FALSE, subsampleg=1, cex=2.5, subset=my.subset2) + #ColorBar2(my.brks.var, cols=my.cols.var, vert=FALSE, subsampleg=1, cex=2.5, my.ticks=c(0,10,20,30,40,50,60,70,80), my.labels=my.labels2) + mtext(side=4,paste0(" ",var.unit[var.num]), cex=2.5) + + par(fig=c(0.04,0.3,0.483,0.508), new=TRUE) + ColorBar3(my.brks, cols=my.cols, vert=FALSE, subsampleg=1, cex=2.5, subset=my.subset) + mtext(side=4," m", cex=2.5) + par(fig=c(0.355,0.605,0.484,0.509), new=TRUE) + ColorBar3(my.brks.var, cols=my.cols.var, vert=FALSE, subsampleg=1, cex=2.5, subset=my.subset2) + mtext(side=4,paste0(" ",var.unit[var.num]), cex=2.5) + + par(fig=c(0.04,0.3,0.248,0.273), new=TRUE) + ColorBar3(my.brks, cols=my.cols, vert=FALSE, subsampleg=1, cex=2.5, subset=my.subset) + mtext(side=4," m", cex=2.5) + par(fig=c(0.355,0.605,0.248,0.273), new=TRUE) + ColorBar3(my.brks.var, cols=my.cols.var, vert=FALSE, subsampleg=1, cex=2.5, subset=my.subset2) + mtext(side=4,paste0(" ",var.unit[var.num]), cex=2.5) + + par(fig=c(0.04,0.3,0.013,0.038), new=TRUE) + ColorBar3(my.brks, cols=my.cols, vert=FALSE, subsampleg=1, cex=2.5, subset=my.subset) + mtext(side=4," m", cex=2.5) + par(fig=c(0.355,0.605,0.013,0.038), new=TRUE) + ColorBar3(my.brks.var, cols=my.cols.var, vert=FALSE, subsampleg=1, cex=2.5, subset=my.subset2) + mtext(side=4,paste0(" ",var.unit[var.num]), cex=2.5) + + par(fig=c(0.66,0.67,0.83,0.84), new=TRUE) + mtext("%", cex=3.3) + par(fig=c(0.66,0.67,0.60,0.61), new=TRUE) + mtext("%", cex=3.3) + par(fig=c(0.66,0.67,0.37,0.38), new=TRUE) + mtext("%", cex=3.3) + par(fig=c(0.66,0.67,0.13,0.14), new=TRUE) + mtext("%", cex=3.3) + + if(!as.pdf) dev.off() # for saving 4 png + +} # close for loop on 'period' + +if(as.pdf) dev.off() # for saving a single pdf with all seasons + + + + + + + + + + + + + + + + + + + + + diff --git a/old/weather_regimes_v14.R b/old/weather_regimes_v14.R new file mode 100644 index 0000000000000000000000000000000000000000..8843ebcf14dc7ca72dcf4dba56bac9100c26718d --- /dev/null +++ b/old/weather_regimes_v14.R @@ -0,0 +1,671 @@ + +library(s2dverification) # for the function Load() +library(abind) +library(Kendall) +source('/home/Earth/ncortesi/Downloads/scripts/Rfunctions.R') # for the calendar functions + +workdir <- "/scratch/Earth/ncortesi/RESILIENCE/Regimes" # working dir with the input files with the WT classifications for each MSLP grid point + +# available reanalysis for the geopotential (g500) or the geopotential height (z500 = g500/9.81): +ERAint <- list(path = '/esnas/reconstructions/ecmwf/eraint/$STORE_FREQ$_mean/$VAR_NAME$_f6h/$VAR_NAME$_$YEAR$$MONTH$.nc') +ERAint.name <- "ERA-Interim" + +JRA55 <- list(path = '/esnas/reconstructions/jma/jra-55/$STORE_FREQ$_mean/$VAR_NAME$_f6h/$VAR_NAME$_$YEAR$$MONTH$.nc') +JRA55.name <- "JRA-55" + +rean <- ERAint # choose one of the two above reanalysis from where to load the input psl data +rean.name <- ERAint.name + +# available forecast systems for the var data: +ECMWF_monthly <- list(path = paste0('/esnas/exp/ECMWF/monthly/ensforhc/6hourly/$VAR_NAME$/2014$MONTH$$DAY$00/$VAR_NAME$_$START_DATE$00.nc')) +ECMWF_monthly.name <- "ECMWF-Monthly" + +forecast <- ECMWF_monthly +forecast.name <- ECMWF_monthly.name + +fields <- rean # put 'rean' to load pressure fields and var from reanalisis, put 'forecast' to load them from forecast systems +fields.name <- rean.name + +year.start <- 1994 #1979 #1981 #1994 +year.end <- 2013 #2010 + +leadtime <- 1 # if fields=forecast, set the forecast time (in days) you want to compute the weather regimes. +forecast.year <- year.end+1 # if fields=forecast, set the forecast year (it is usually the year after year.end) +mes=1 # if fields=forecast, set the month of the first startdate of the forecast year +day=2 # if fields=forecast, set the day of the first startdate of the forecast year + +psl <- "psl" #"g500" # pressure variable to use from the chosen reanalysis +psl.name <- "MSLP" # "Z500" # the name to show in the maps + +var.data <- ERAint #ECMWF_monthly # ERAint # chose a dataset from which to load the input var data (reanalysis or forecasts) +var.data.name <- ERAint.name #ECMWF_monthly.name # ERAint.name + +var.num <- 1 # Choose a variable. 1: sfcWind 2: tas + +var.name <- c("sfcWind","tas") # name of the 'predictand' variable of the chosen reanalysis +var.name.full <- c("Wind Speed","Temperature") # full name of the 'predictand' variable to put in the title of the graphs +var.unit <- c("m/s", "ºC") # unit of measure of var (for drawing color scales) + +sequences=FALSE # set it to TRUE if you want to select only days beloning to sequences of at least 5 consecutive days belonging to the same regime. + +PCA <- FALSE # se to TRUE if you want to apply the PCA before the k-means cluster analysis, FALSE otherwise +variance.explained <- 0.8 # the user can set here the minimum % of variance explained by the PCs (typically 0.8 which is equal to 80%) + +# Coordinates of the box enclosing the study region (the Northern Atlantic in this case): +lat.max <- 81 #81 #80 #70 +lat.min <- 27 #30 #20 #30 +lon.max <- 45 #36 #30 #40 +lon.min <- 274.5 #274.5 #270 #280 # put a positive number here because the geopotential has only positive values of longitud! + +############################################################################################################################# +#ECMWF_monthly <- list(path = paste0('/esnas/exp/ECMWF/monthly/ensforhc/6hourly/psl/2014010200/1994_010200.nc')) +#ECMWF_monthly <- list(path = paste0('/esnas/exp/ECMWF/monthly/ensforhc/6hourly/$VAR_NAME$/2014$MONTH$$DAY$00/$VAR_NAME$_$START_DATE$00.nc')) +if(lat.min >= lat.max) stop("lat.min cannot be greater than lat.max") +n.years <- year.end - year.start + 1 + +# load only 1 day of pressure data to detect the minimum and maximum lat and lon values which identify only the North Atlantic area: +domain <- Load(var = psl, exp = NULL, obs = list(fields), paste0(year.start,'0102'), storefreq = 'daily', leadtimemax = 1, output = 'lonlat', nprocs=1) +n.lat <- length(domain$lat) +n.lon <- length(domain$lon) + +pos.lat.max <- which(lat.max - round(domain$lat,4) >= 0)[1] # select the first latitude of the domain below the maximum latitude +pos.lat.min <- tail(which(round(domain$lat,4) - lat.min >= 0),1) # select the last latitude of the domain over the minumum latitude +pos.lon.max <- tail(which(lon.max - round(domain$lon,4) >= 0),1) +pos.lon.min <- which(round(domain$lon,4) - lon.min >= 0)[1] + +pos.lat <- if(pos.lat.min < pos.lat.max) {pos.lat.min:pos.lat.max} else {pos.lat.max:pos.lat.min} +pos.lon <- c(1:pos.lon.max,pos.lon.min:length(domain$lon)) # beware that it only consider the case where the study area cross the meridian of Greenwhich! +n.pos.lat <- length(pos.lat) +n.pos.lon <- length(pos.lon) + +lat <- domain$lat[pos.lat] # lat values of chosen area only (it is smaller than the whole spatial domain loaded by the data) +lon <- domain$lon[pos.lon] # lon values of chosen area only + +lat.min.area <- domain$lat[pos.lat.min] # min and max lat/lon of the chosen area only (same as lat.max, but its values correspond to actual values in the lat vector) +lat.max.area <- domain$lat[pos.lat.max] +lon.min.area <- domain$lon[pos.lon.min] +lon.max.area <- domain$lon[pos.lon.max] + +#rm(domain) + +# Load psl data: +if(unlist(fields) == unlist(rean)){ # Load daily psl data in the reanalysis case: + psleuFull <-array(NA,c(n.days.in.a.yearly.period(year.start,year.end), n.pos.lat, n.pos.lon)) + + for (y in year.start:year.end){ + var <- Load(var = psl, exp = NULL, obs = list(fields), paste0(y,'0101'), storefreq = 'daily', leadtimemax = n.days.in.a.year(y), output = 'lonlat', + latmin = lat.min, latmax = lat.max, lonmin = lon.min, lonmax = lon.max) + psleuFull[seq.days.in.a.future.year(year.start, y),,] <- var$obs + rm(var) + gc() + } + +} else { # Load 6-hourly psl data in the forecast case: + sdates.seq <- weekly.seq(forecast.year,mes,day) + psleuFull <-array(NA,c(length(sdates.seq)*n.years, n.pos.lat, n.pos.lon)) # daily order: 19940102 19950102 ... 20130102 19940109 19950109 ... 20130109 ... + + for (startdate in 1:length(sdates.seq)){ + var <- Load(var = psl, exp = NULL, obs = list(fields), paste0(year.start:year.end, substr(sdates.seq[startdate],5,6), substr(sdates.seq[startdate],7,8) ), storefreq = 'daily', leadtimemin=leadtime*4-3, leadtimemax=leadtime*4, output = 'lonlat', latmin = lat.min, latmax = lat.max, lonmin = lon.min, lonmax = lon.max) + temp <- apply(var$obs, c(1,3,5,6), mean, na.rm=T) + psleuFull[(1+(startdate-1)*(year.end-year.start+1)):(startdate*(year.end-year.start+1)),,] <- temp + rm(temp,var) + gc() + } +} + +if(psl=="g500") psleuFull <- psleuFull/9.81 # convert geopotential to geopotential height (in m) +if(psl=="psl") psleuFull <- psleuFull/100 # convert MSLP in Pascal to MSLP in hPa + +# compute the cluster analysis: +my.PCA <- list() +my.cluster <- list() +tot.variance <- list() +n.pcs <- my.cluster <- c() + +for(period in 13:16){ # 1-12: Jan-Dec; 13-16: Winter-Spring-Summer-Autumn; 17: Year + if(unlist(fields) == unlist(rean)){ + days.period <- NA + for(y in year.start:year.end) days.period <- c(days.period, n.days.in.a.future.year(year.start, y) + pos.period(y,period)) + days.period <- days.period[-1] # remove the NA at the begin that was introduced only to be able to execure the above command + } else { + my.startdates.period <- months.period(forecast.year,mes,day,period) # get which startdates belong to the chosen period + + days.period <- c() # get which days inside psleuFull belong to the chosen period + for(startdate in my.startdates.period){ + days.period <- c(days.period, (1+(startdate-1)*(year.end-year.start+1)):(startdate*(year.end-year.start+1))) + } + } + + n.days.period <- length(days.period) + + pslPeriod <- psleuFull[days.period,,] # select only days in the chosen period (i.e: winter) + + # weight the pressure fields based on latitude: + lat.weighted <- cos(lat*pi/180)^0.5 + lat.weighted.array <- InsertDim(InsertDim(lat.weighted,2,n.pos.lon),1,n.days.period) + pslPeriod.weighted <- pslPeriod * lat.weighted.array + rm(lat.weighted.array) + gc() + + pslmat <- pslPeriod.weighted + dim(pslmat) <- c(n.days.period, n.pos.lat*n.pos.lon) # convert array in a matrix! + rm(pslPeriod, pslPeriod.weighted) + gc() + + # if you are using the monthly forecast system, in winter you must remove the 2nd startdate (20140109) because its data is bad, as you can see with: plot(pslmat[,1:2]) + if(unlist(fields) == unlist(forecast) && period == 13) pslmat[21:40,] <- NA + + if(PCA){ # compute the PCs: + my.seq <- seq(1, n.pos.lat*n.pos.lon, 9) # select only 1 point of 9 + pslcut <- pslmat[,my.seq] + + my.PCA[[period]] <- princomp(pslcut,cor=FALSE) + tot.variance[[period]] <- head(cumsum(my.PCA[[period]]$sdev^2/sum(my.PCA[[period]]$sdev^2)),50) # check how many PCAs to keep basing on the sum of their expl. variance + n.pcs[period] <- head(as.numeric(which(tot.variance[[period]] > variance.explained)),1) # select only the pcs that explains at least 80% of variance + my.cluster[[period]] <- kmeans(my.PCA[[period]]$scores[,1:n.pcs[period]], centers=4, iter.max=100, nstart=30) # 4 is the number of clusters + rm(pslcut) + } else { # remove from matpsl the years with NA: + my.cluster[[period]] <- kmeans(pslmat[which(!is.na(pslmat[,1])),], centers=4, iter.max=100, nstart=30) # 4 is the number of clusters + } + + rm(pslmat) + gc() +} + +save.image(file=paste0(workdir,"/weather_regimes_",fields.name,"_",year.start,"-",year.end,".RData")) +load(file=paste0(workdir,"/weather_regimes_",fields.name,"_",year.start,"-",year.end,".RData")) + +# Load var data: +if(unlist(fields) == unlist(rean)){ # Load daily var data in the reanalysis case: + vareuFull <-array(NA,c(n.days.in.a.yearly.period(year.start,year.end), n.pos.lat, n.pos.lon)) + + for (y in year.start:year.end){ + var <- Load(var = var.name[var.num], exp = NULL, obs = list(var.data), paste0(y,'0101'), storefreq = 'daily', leadtimemax = n.days.in.a.year(y), output = 'lonlat', + latmin = lat.min, latmax = lat.max, lonmin = lon.min, lonmax = lon.max) + vareuFull[seq.days.in.a.future.year(year.start, y),,] <- var$obs + rm(var) + gc() + } + +} else { # Load 6-hourly var data in the forecast case: + sdates.seq <- weekly.seq(forecast.year,mes,day) + vareuFull <-array(NA,c(length(sdates.seq)*(year.end-year.start+1), n.pos.lat, n.pos.lon)) + + for (startdate in 1:length(sdates.seq)){ + var <- Load(var = var.name[var.num], exp = NULL, obs = list(var.data), paste0(year.start:year.end, substr(sdates.seq[startdate],5,6), substr(sdates.seq[startdate],7,8) ), storefreq = 'daily', leadtimemin=leadtime*4-3, leadtimemax=leadtime*4, output = 'lonlat', latmin = lat.min, latmax = lat.max, lonmin = lon.min, lonmax = lon.max) + temp <- apply(var$obs, c(1,3,5,6), mean, na.rm=T) + vareuFull[(1+(startdate-1)*(year.end-year.start+1)):(startdate*(year.end-year.start+1)),,] <- temp + rm(temp,var) + gc() + } + +} + +#my.grid<-paste0('r',n.lon,'x',n.lat) +#coord <- Load(var = 'sfcWind', exp = list(ECMWF_monthly), obs = NULL, sdates=paste0(2013,'0102'), leadtimemin = 1, leadtimemax=1, output = 'lonlat', nprocs=1, grid=my.grid, method='bilinear') + +save.image(file=paste0(workdir,"/weather_regimes_",fields.name,"_",var.name[var.num],"_",year.start,"-",year.end,".RData")) +load(file=paste0(workdir,"/weather_regimes_",fields.name,"_",var.name[var.num],"_",year.start,"-",year.end,".RData")) + +for(period in 13:16){ # 1-12: Jan-Dec; 13-16: Winter-Spring-Summer-Autumn; 17: Year + if(unlist(fields) == unlist(rean)){ + days.period <- NA + for(y in year.start:year.end) days.period <- c(days.period, n.days.in.a.future.year(year.start, y) + pos.period(y,period)) + days.period <- days.period[-1] # remove the NA at the begin that was introduced only to be able to execure the above command + period.length <- n.days.in.a.period(period,1999) + ifelse(period==13 | period==2, floor(n.years/4), 0) # using year 1999 introduce a small error in winter season length because of bisestile years that can be roughly corrected adding floor(n.years/4) in winter and in february + + } else { + my.startdates.period <- months.period(forecast.year,mes,day,period) + + if(period == 13) my.startdates.period <- my.startdates.period[-2] # remove the second startdate because it is broken + + days.period <- c() + for(startdate in my.startdates.period){ + days.period <- c(days.period, (1+(startdate-1)*(year.end-year.start+1)):(startdate*(year.end-year.start+1))) + } + period.length <- length(my.startdates.period) # number of Thusdays in the period + } + + n.days.period <- length(days.period) + + varPeriod <- vareuFull[days.period,,] # select only var data during the chosen period + + varPeriodClim <- apply(varPeriod, c(2,3), mean, na.rm=T) + varPeriodClim2 <- InsertDim(varPeriodClim, 1, n.days.period) + + varPeriodAnom <- varPeriod - varPeriodClim2 + rm(varPeriod, varPeriodClim, varPeriodClim2) + gc() + + if(sequences){ + # for each of the 4 clusters, remove the days not belonging to sequences of at least 5 days: + # warning: first 4 days and last 4 days are not take into account y the algorithm and are treated as not belonging to any sequence (sequ=FALSE) + wrdiff <- diff(my.cluster[[period]]$cluster) + + sequ <- rep(FALSE, n.days.period) + for(i in 5:(n.days.period-5)){ + sequ[i] <- TRUE + if(wrdiff[i] != 0 & wrdiff[i+1] != 0) sequ[i] = FALSE + if(wrdiff[i] != 0 & wrdiff[i+2] != 0) sequ[i] = FALSE + if(wrdiff[i] != 0 & wrdiff[i+3] != 0) sequ[i] = FALSE + if(wrdiff[i] != 0 & wrdiff[i+4] != 0) sequ[i] = FALSE + if(wrdiff[i-1] != 0 & wrdiff[i] != 0) sequ[i] = FALSE + if(wrdiff[i-1] != 0 & wrdiff[i+1] != 0) sequ[i] = FALSE + if(wrdiff[i-1] != 0 & wrdiff[i+2] != 0) sequ[i] = FALSE + if(wrdiff[i-1] != 0 & wrdiff[i+3] != 0) sequ[i] = FALSE + if(wrdiff[i-2] != 0 & wrdiff[i] != 0) sequ[i] = FALSE + if(wrdiff[i-2] != 0 & wrdiff[i+1] != 0) sequ[i] = FALSE + if(wrdiff[i-2] != 0 & wrdiff[i+2] != 0) sequ[i] = FALSE + if(wrdiff[i-3] != 0 & wrdiff[i] != 0) sequ[i] = FALSE + if(wrdiff[i-3] != 0 & wrdiff[i+1] != 0) sequ[i] = FALSE + if(wrdiff[i-4] != 0 & wrdiff[i] != 0) sequ[i] = FALSE + } # close for loop on i + + # sequence of daily cluster numbers without the days that doesn't belong to sequences of 5 or more days: + cluster.sequence <- my.cluster[[period]]$cluster * sequ + rm(wrdiff, sequ) + } else{ + cluster.sequence <- my.cluster[[period]]$cluster + } + + gc() + + #wr1 <- which(my.cluster[[period]]$cluster==1) + #wr2 <- which(my.cluster[[period]]$cluster==2) + #wr3 <- which(my.cluster[[period]]$cluster==3) + #wr4 <- which(my.cluster[[period]]$cluster==4) + + # Replace the days belonging to a regime with the days belonging to a sequence of at least 5 days of that regime: + wr1 <- which(cluster.sequence == 1) + wr2 <- which(cluster.sequence == 2) + wr3 <- which(cluster.sequence == 3) + wr4 <- which(cluster.sequence == 4) + + # yearly frequency of each regime: + wr1y <- wr2y <- wr3y <-wr4y <- c() + for(y in year.start:year.end){ + + if(unlist(fields) == unlist(rean)){ + wr1y[y-year.start+1] <- length(which(cluster.sequence[(y-year.start)*period.length+(1:period.length)] == 1)) + wr2y[y-year.start+1] <- length(which(cluster.sequence[(y-year.start)*period.length+(1:period.length)] == 2)) + wr3y[y-year.start+1] <- length(which(cluster.sequence[(y-year.start)*period.length+(1:period.length)] == 3)) + wr4y[y-year.start+1] <- length(which(cluster.sequence[(y-year.start)*period.length+(1:period.length)] == 4)) + } else { + wr1y[y-year.start+1] <- length(which(cluster.sequence[seq((y-year.start+1),length(cluster.sequence), n.years)] == 1)) + wr2y[y-year.start+1] <- length(which(cluster.sequence[seq((y-year.start+1),length(cluster.sequence), n.years)] == 2)) + wr3y[y-year.start+1] <- length(which(cluster.sequence[seq((y-year.start+1),length(cluster.sequence), n.years)] == 3)) + wr4y[y-year.start+1] <- length(which(cluster.sequence[seq((y-year.start+1),length(cluster.sequence), n.years)] == 4)) + } + } + + rm(cluster.sequence) + gc() + + # convert to frequencies in %: + wr1y <- wr1y/period.length + wr2y <- wr2y/period.length + wr3y <- wr3y/period.length + wr4y <- wr4y/period.length + + varPeriodAnom1 <- varPeriodAnom[wr1,,] + varPeriodAnom2 <- varPeriodAnom[wr2,,] + varPeriodAnom3 <- varPeriodAnom[wr3,,] + varPeriodAnom4 <- varPeriodAnom[wr4,,] + + varPeriodAnom1mean <- apply(varPeriodAnom1,c(2,3),mean,na.rm=T) + varPeriodAnom2mean <- apply(varPeriodAnom2,c(2,3),mean,na.rm=T) + varPeriodAnom3mean <- apply(varPeriodAnom3,c(2,3),mean,na.rm=T) + varPeriodAnom4mean <- apply(varPeriodAnom4,c(2,3),mean,na.rm=T) + + varPeriodAnomBoth1 <- abind(varPeriodAnom, varPeriodAnom1, along = 1) + pvalue1 <- apply(varPeriodAnomBoth1, c(2,3), function(x) t.test(x[1:n.days.period],x[(n.days.period+1):length(x)])$p.value) + rm(varPeriodAnomBoth1, varPeriodAnom1) + gc() + + varPeriodAnomBoth2 <- abind(varPeriodAnom, varPeriodAnom2, along = 1) + pvalue2 <- apply(varPeriodAnomBoth2, c(2,3), function(x) t.test(x[1:n.days.period],x[(n.days.period+1):length(x)])$p.value) + rm(varPeriodAnomBoth2, varPeriodAnom2) + gc() + + varPeriodAnomBoth3 <- abind(varPeriodAnom, varPeriodAnom3, along = 1) + pvalue3 <- apply(varPeriodAnomBoth3, c(2,3), function(x) t.test(x[1:n.days.period],x[(n.days.period+1):length(x)])$p.value) + rm(varPeriodAnomBoth3, varPeriodAnom3) + gc() + + varPeriodAnomBoth4 <- abind(varPeriodAnom, varPeriodAnom4, along = 1) + pvalue4 <- apply(varPeriodAnomBoth4, c(2,3), function(x) t.test(x[1:n.days.period],x[(n.days.period+1):length(x)])$p.value) + rm(varPeriodAnomBoth4, varPeriodAnom4) + + rm(varPeriodAnom) + gc() + + pslPeriod <- psleuFull[days.period,,] + pslPeriodClim <- apply(pslPeriod, c(2,3), mean, na.rm=T) + pslPeriodClim2 <- InsertDim(pslPeriodClim, 1, n.days.period) + pslPeriodAnom <- pslPeriod - pslPeriodClim2 + + rm(pslPeriod, pslPeriodClim, pslPeriodClim2) + gc() + + pslwr1 <- pslPeriodAnom[wr1,,] + pslwr2 <- pslPeriodAnom[wr2,,] + pslwr3 <- pslPeriodAnom[wr3,,] + pslwr4 <- pslPeriodAnom[wr4,,] + rm(pslPeriodAnom) + gc() + + pslwr1mean <- apply(pslwr1,c(2,3),mean,na.rm=T) + pslwr2mean <- apply(pslwr2,c(2,3),mean,na.rm=T) + pslwr3mean <- apply(pslwr3,c(2,3),mean,na.rm=T) + pslwr4mean <- apply(pslwr4,c(2,3),mean,na.rm=T) + + rm(pslwr1,pslwr2,pslwr3,pslwr4) + + # save all the data necessary to redraw the graphs when we know the right regime: + save(workdir,rean.name,fields.name,var.num,var.name,var.name.full,var.unit,year.start,year.end,period,my.period,lon,lat,pslwr1mean,pslwr2mean,pslwr3mean,pslwr4mean, varPeriodAnom1mean, varPeriodAnom2mean, varPeriodAnom3mean,varPeriodAnom4mean,wr1y,wr2y,wr3y,wr4y,pvalue1,pvalue2,pvalue3,pvalue4,file=paste0(workdir,"/",fields.name,"_",var.name[var.num],"_",my.period[period],"_mapdata.RData")) + + rm(pvalue1,pvalue2,pvalue3,pvalue4) + rm(pslwr1mean,pslwr2mean,pslwr3mean,pslwr4mean) + rm(varPeriodAnom1mean,varPeriodAnom2mean,varPeriodAnom3mean,varPeriodAnom4mean) + gc() + +} # close the for loop on 'period' + + +# run it twice: once, with ordering <- FALSE to look only at the cartography and find visually the right regime names of the four clusters; +# and the second time, to save the ordered cartography: +ordering <- TRUE +as.pdf <- TRUE # choose if you prefer to save results as one file .png for each season, or only 1 pdf with all seasons + +# choose an order to give to the cartography, from top to bottom: +orden <- c("NAO+","NAO-","Blocking","Atl.Ridge") + +regime1.name <- orden[1] +regime2.name <- orden[2] +regime3.name <- orden[3] +regime4.name <- orden[4] + +# breaks and colors of the geopotential fields: + +if(psl == "g500"){ + #my.brks <- c(-300,-199:200,300) # % Mean anomaly of a WR + my.brks <- c(-300,seq(-200,200,10),300) # % Mean anomaly of a WR + my.brks2 <- c(-300,seq(-200,200,10),300) # % Mean anomaly of a WR for the contour lines + #my.cols <- colorRampPalette((brewer.pal(9,"PuBu")))(length(my.brks)-1) + values.to.plot <- c(-200, -100, 0, 100, 200) # values we want to show in the labels +} + +if(psl == "psl"){ + my.brks <- c(seq(-20,20,2)) # % Mean anomaly of a WR + my.brks2 <- c(seq(-20,20,2)) # % Mean anomaly of a WR for the contour lines + values.to.plot <- c(-20,-16,-12,-8,-4,0,4,8,12,16,20) +} + +my.cols <- colorRampPalette(rev(brewer.pal(11,"RdBu")))(length(my.brks)-1) # blue--white--red colors +my.cols[length(my.cols)/2] <- my.cols[length(my.cols)/2 +1] <- "white" + +# breaks and colors of the impact maps (valid both for Wind speed anomalies and Temperature anomalies): +#my.brks.var <- c(-20,seq(-10,-3,1),seq(-2.9,2.9,0.1),seq(3,10,1),20) # % Mean anomaly of a WR +#my.brks.var <- c(-20,-2,-1.5,-1,-0.5,-0.2,0.2,0.5,1,1.5,2,20) # % Mean anomaly of a WR +my.brks.var <- c(-20,seq(-3,3,0.5),20) # % Mean anomaly of a WR + +#my.cols <- colorRampPalette(as.character(read.csv("/home/Earth/vtorralb/rgbhex.csv",header=F)[,1]))(length(my.brks)-1) # blue--yellow-red colors +my.cols.var <- colorRampPalette(rev(brewer.pal(11,"RdBu")))(length(my.brks.var)-1) # blue--white--red colors +my.cols.var[length(my.cols.var)/2] <- my.cols.var[length(my.cols.var)/2 +1] <- "white" + +if(as.pdf)(pdf(file=paste0(workdir,"/",fields.name,"_",var.name[var.num],".pdf"),width=40, height=60)) + +for(period in 13:16){ + load(file=paste0(workdir,"/",fields.name,"_",var.name[var.num],"_",my.period[period],"_mapdata.RData")) + + if(period == 13){ # Winter + cluster3.name="NAO+" + cluster2.name="NAO-" + cluster4.name="Blocking" + cluster1.name="Atl.Ridge" + } + if(period == 14){ + cluster2.name="NAO+" + cluster4.name="NAO-" + cluster3.name="Blocking" + cluster1.name="Atl.Ridge" + } + if(period == 15){ + cluster2.name="NAO+" + cluster3.name="NAO-" + cluster1.name="Blocking" + cluster4.name="Atl.Ridge" + } + if(period == 16){ + cluster1.name="NAO+" + cluster4.name="NAO-" + cluster2.name="Blocking" + cluster3.name="Atl.Ridge" + } + + # assign to each cluster its regime: + if(ordering == FALSE){ + cluster1 <- 1; cluster2 <- 2; cluster3 <- 3; cluster4 <- 4 + } else { + cluster1 <- which(orden == cluster1.name) + cluster2 <- which(orden == cluster2.name) + cluster3 <- which(orden == cluster3.name) + cluster4 <- which(orden == cluster4.name) + } + + assign(paste0("map",cluster1), pslwr1mean) + assign(paste0("map",cluster2), pslwr2mean) + assign(paste0("map",cluster3), pslwr3mean) + assign(paste0("map",cluster4), pslwr4mean) + + assign(paste0("imp",cluster1), varPeriodAnom1mean) + assign(paste0("imp",cluster2), varPeriodAnom2mean) + assign(paste0("imp",cluster3), varPeriodAnom3mean) + assign(paste0("imp",cluster4), varPeriodAnom4mean) + + assign(paste0("sig",cluster1), pvalue1) + assign(paste0("sig",cluster2), pvalue2) + assign(paste0("sig",cluster3), pvalue3) + assign(paste0("sig",cluster4), pvalue4) + + assign(paste0("fre",cluster1), wr1y) + assign(paste0("fre",cluster2), wr2y) + assign(paste0("fre",cluster3), wr3y) + assign(paste0("fre",cluster4), wr4y) + + # Visualize the composition with the 3 graphs for all the 4 regimes: its pressure fields, its impact on var and its frequency series: + if(!as.pdf) png(filename=paste0(workdir,"/",fields.name,"_",var.name[var.num],"_",my.period[period],".png"),width=3000,height=3700) + + layout(matrix(c(1,1,1,1,1,1,1,1,31,32,6,33,2,4,6,7,rep(c(2,4,6,8),8),3,5,6,9,34,35,6,36, + 10,12,6,14,rep(c(10,12,6,15),8),11,13,6,16,37,38,6,39, + 17,19,6,21,rep(c(17,19,6,22),8),18,20,6,23,40,41,6,42, + 24,26,6,28,rep(c(24,26,6,29),8),25,27,6,30,43,43,6,44),47,4,byrow=TRUE), + widths=c(2,2,0.2,2), heights=c(rep(0.2,2),0.3,rep(0.2,10),0.3,rep(0.2,10),0.3,rep(0.2,10),0.3,rep(0.2,12))) + #layout.show(44) + + plot.new(); title(paste0("Weather Regimes for ",my.period[period]," season (",year.start,"-",year.end,"). Source:",fields.name), cex.main=9, line=-7) + + #EU <- c(1:41, 130:175) #c(1:54,130:161) # position of long values of Europe only (without the Atlantic Sea and America) + #EU <- c(1:which(lon >= lon.max)[1], which(lon >= lon.min)[1]:length(lon)) # to show the same area used for calculation of anomalies + EU <- c(1:which(lon >= lon.max)[1], (length(lon)-30):length(lon)) # restrict area to continental europe only + + plot.new() + #par(mar=c(10.5,14.5,10.5,14.5)) + plot.new(); plot.new() #PlotEquiMap2(imp1[,EU], lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=t(sig1[,EU] < 0.05), cex.lab=1.5) + plot.new(); plot.new(); plot.new(); plot.new() + plot.new(); plot.new() + + plot.new() + plot.new(); plot.new() + plot.new(); plot.new(); plot.new() #barplot.freq(100*fre2, year.start, year.end, cex.y=2, cex.x=2, freq.max=60) + plot.new(); plot.new() + + plot.new() + plot.new(); plot.new() + plot.new(); plot.new(); plot.new() #barplot.freq(100*fre3, year.start, year.end, cex.y=2, cex.x=2, freq.max=60) + plot.new(); title("Year", cex.main=2, line=-3) + + plot.new() + plot.new(); plot.new() + plot.new(); plot.new(); plot.new() #barplot.freq(100*fre4, year.start, year.end, cex.y=2, cex.x=2, freq.max=60) # plot 2 empty graphs + plot.new(); title("Year", cex.main=2, line=-3) + + # Titles: + plot.new(); title(paste(regime1.name, psl.name, "Anomaly"), cex.main=3.3, line=-4.5) + plot.new(); title(paste(regime1.name, "impact on", var.name.full[var.num]), cex.main=3.3, line=-4.5) + plot.new(); title(paste0(regime1.name, " Frequency (", round(100*mean(fre1),1), "%)"), cex.main=3.3, line=-4.5) + plot.new(); title(paste(regime2.name, psl.name, "Anomaly"), cex.main=3.3, line=-4.5) + plot.new(); title(paste(regime2.name, "impact on", var.name.full[var.num]), cex.main=3.3, line=-4.5) + plot.new(); title(paste0(regime2.name, " Frequency (", round(100*mean(fre2),1), "%)"), cex.main=3.3, line=-4.5) + plot.new(); title(paste(regime3.name, psl.name, "Anomaly"), cex.main=3.3, line=-4.5) + plot.new(); title(paste(regime3.name, "impact on", var.name.full[var.num]), cex.main=3.3, line=-4.5) + plot.new(); title(paste0(regime3.name, " Frequency (", round(100*mean(fre3),1), "%)"), cex.main=3.3, line=-4.5) + plot.new(); title(paste(regime4.name, psl.name, "Anomaly"), cex.main=3.3, line=-5) + plot.new(); title(paste(regime4.name, "impact on", var.name.full[var.num]), cex.main=3.3, line=-4.5) + plot.new(); title(paste0(regime4.name, " Frequency (", round(100*mean(fre4),1), "%)"), cex.main=3.3, line=-4.5) + + # Centroid maps: + map.xpos <- 0 + map.width <- 0.44 + par(fig=c(map.xpos, map.xpos + map.width, 0.745, 0.925), new=TRUE) + PlotEquiMap2(map1, lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map1), brks2=my.brks2, intylat=10, drawleg=F, cex.lab=1.5) + par(fig=c(map.xpos, map.xpos + map.width, 0.51, 0.69), new=TRUE) + PlotEquiMap2(map2, lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map2), brks2=my.brks2, intylat=10, drawleg=F, cex.lab=1.5) + par(fig=c(map.xpos, map.xpos + map.width, 0.275, 0.455), new=TRUE) + PlotEquiMap2(map3, lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map3), brks2=my.brks2, intylat=10, drawleg=F, cex.lab=1.5) + par(fig=c(map.xpos, map.xpos + map.width, 0.04, 0.22), new=TRUE) + PlotEquiMap2(map4, lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map4), brks2=my.brks2, intylat=10, drawleg=F, cex.lab=1.5) + + # Title Centroid Maps: + #plot.new() + map.title.xpos <- 0.2 + map.title.width <- 0.3 + par(fig=c(map.title.xpos, map.title.xpos + map.title.width, 0.745, 0.925), new=TRUE) + mtext("Year", cex=3) + par(fig=c(map.title.xpos, map.title.xpos + map.title.width, 0.51, 0.69), new=TRUE) + mtext("Year", cex=3) + + # Impact maps: + impact.xpos <- 0.45 + impact.width <- 0.28 + par(fig=c(impact.xpos, impact.xpos + impact.width, 0.745, 0.925), new=TRUE) + PlotEquiMap2(imp1[,EU], lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=t(sig1[,EU] < 0.05), cex.lab=1.5) + par(fig=c(impact.xpos, impact.xpos + impact.width, 0.51, 0.69), new=TRUE) + PlotEquiMap2(imp2[,EU], lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=t(sig2[,EU] < 0.05), cex.lab=1.5) + par(fig=c(impact.xpos, impact.xpos + impact.width, 0.275, 0.455), new=TRUE) + PlotEquiMap2(imp3[,EU], lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=t(sig3[,EU] < 0.05), cex.lab=1.5) + par(fig=c(impact.xpos, impact.xpos + impact.width, 0.04, 0.22), new=TRUE) + PlotEquiMap2(imp4[,EU], lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=t(sig4[,EU] < 0.05), cex.lab=1.5) + + # Frequency plots: + barplot.xpos <- 0.75 + barplot.width <- 0.25 + par(fig=c(barplot.xpos, barplot.xpos + barplot.width, 0.745, 0.925), new=TRUE) + barplot.freq(100*fre1, year.start, year.end, cex.y=2, cex.x=2, freq.max=60) + par(fig=c(barplot.xpos, barplot.xpos + barplot.width,0.51,0.69), new=TRUE) + barplot.freq(100*fre2, year.start, year.end, cex.y=2, cex.x=2, freq.max=60) + par(fig=c(barplot.xpos, barplot.xpos + barplot.width,0.275,0.455), new=TRUE) + barplot.freq(100*fre3, year.start, year.end, cex.y=2, cex.x=2, freq.max=60) + par(fig=c(barplot.xpos, barplot.xpos + barplot.width,0.04,0.22), new=TRUE) + barplot.freq(100*fre4, year.start, year.end, cex.y=2, cex.x=2, freq.max=60) + + # Legends: + + if(var.name[var.num] == "sfcWind") values.to.plot2 <- c(-3,-2,-1,0,1,2,3) + if(var.name[var.num] == "tas") values.to.plot2 <- c(-10,-6,-3,0,3,6,10) + + my.subset <- match(values.to.plot, my.brks) + my.subset2 <- match(values.to.plot2, my.brks.var) + + par(fig=c(0.04, 0.302, 0.717, 0.744), new=TRUE) # syntax fig=c(xmin, xmax, ymin, ymax) + #ColorBar(my.brks, cols=my.cols, vert=FALSE, subsampleg=10, cex=2.5) + #ColorBar2(my.brks, cols=my.cols, vert=FALSE, subsampleg=10, cex=2.5, my.ticks=c(0,10,20,30,40,50,60,70,80), my.labels=my.labels) + ColorBar3(my.brks, cols=my.cols, vert=FALSE, cex=2.5, subset=my.subset) + if(psl=="g500") {mtext(side=4," m", cex=2.5)} else {mtext(side=4," hPa", cex=2.5)} + par(fig=c(0.355,0.605,0.718,0.743), new=TRUE) + ColorBar3(my.brks.var, cols=my.cols.var, vert=FALSE, cex=2.5, subset=my.subset2) + #ColorBar2(my.brks.var, cols=my.cols.var, vert=FALSE, subsampleg=1, cex=2.5, my.ticks=c(0,10,20,30,40,50,60,70,80), my.labels=my.labels2) + mtext(side=4,paste0(" ",var.unit[var.num]), cex=2.5) + + par(fig=c(0.04,0.3,0.483,0.508), new=TRUE) + ColorBar3(my.brks, cols=my.cols, vert=FALSE, cex=2.5, subset=my.subset) + if(psl=="g500") {mtext(side=4," m", cex=2.5)} else {mtext(side=4," hPa", cex=2.5)} + par(fig=c(0.355,0.605,0.484,0.509), new=TRUE) + ColorBar3(my.brks.var, cols=my.cols.var, vert=FALSE, cex=2.5, subset=my.subset2) + mtext(side=4,paste0(" ",var.unit[var.num]), cex=2.5) + + par(fig=c(0.04,0.3,0.248,0.273), new=TRUE) + ColorBar3(my.brks, cols=my.cols, vert=FALSE, cex=2.5, subset=my.subset) + if(psl=="g500") {mtext(side=4," m", cex=2.5)} else {mtext(side=4," hPa", cex=2.5) + par(fig=c(0.355,0.605,0.248,0.273), new=TRUE) + ColorBar3(my.brks.var, cols=my.cols.var, vert=FALSE, cex=2.5, subset=my.subset2)} + mtext(side=4,paste0(" ",var.unit[var.num]), cex=2.5) + + par(fig=c(0.04,0.3,0.013,0.038), new=TRUE) + ColorBar3(my.brks, cols=my.cols, vert=FALSE, cex=2.5, subset=my.subset) + if(psl=="g500") {mtext(side=4," m", cex=2.5)} else {mtext(side=4," hPa", cex=2.5)} + par(fig=c(0.355,0.605,0.013,0.038), new=TRUE) + ColorBar3(my.brks.var, cols=my.cols.var, vert=FALSE, cex=2.5, subset=my.subset2) + mtext(side=4,paste0(" ",var.unit[var.num]), cex=2.5) + + symbol.xpos <- 0.735 + symbol.width <- 0.01 + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.84, 0.85), new=TRUE) + mtext("%", cex=3.3) + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.60,0.61), new=TRUE) + mtext("%", cex=3.3) + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.37,0.38), new=TRUE) + mtext("%", cex=3.3) + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.13,0.14), new=TRUE) + mtext("%", cex=3.3) + + if(!as.pdf) dev.off() # for saving 4 png + +} # close for loop on 'period' + +if(as.pdf) dev.off() # for saving a single pdf with all seasons + + + + + + + + + + + + + + + + + + + + + + ## if(period == 13){ # Winter + ## cluster2.name="NAO+" + ## cluster3.name="NAO-" + ## cluster1.name="Blocking" + ## cluster4.name="Atl.Ridge" + ## } + ## if(period == 14){ + ## cluster1.name="NAO+" + ## cluster4.name="NAO-" + ## cluster3.name="Blocking" + ## cluster2.name="Atl.Ridge" + ## } + ## if(period == 15){ + ## cluster1.name="NAO+" + ## cluster2.name="NAO-" + ## cluster3.name="Blocking" + ## cluster4.name="Atl.Ridge" + ## } + ## if(period == 16){ + ## cluster1.name="NAO+" + ## cluster3.name="NAO-" + ## cluster2.name="Blocking" + ## cluster4.name="Atl.Ridge" + ## } diff --git a/old/weather_regimes_v14.R~ b/old/weather_regimes_v14.R~ new file mode 100644 index 0000000000000000000000000000000000000000..29e163adbd2815300c6b0490305ed24e078abe80 --- /dev/null +++ b/old/weather_regimes_v14.R~ @@ -0,0 +1,599 @@ + +library(s2dverification) # for the function Load() +library(abind) +source('/home/Earth/ncortesi/Downloads/scripts/Rfunctions.R') # for the calendar functions + +workdir <- "/scratch/Earth/ncortesi/RESILIENCE/Regimes" # working dir with the input files with the WT classifications for each MSLP grid point + +# available reanalysis for the geopotential (g500) or the geopotential height (z500 = g500/9.81): +ERAint <- list(path = '/esnas/reconstructions/ecmwf/eraint/$STORE_FREQ$_mean/$VAR_NAME$_f6h/$VAR_NAME$_$YEAR$$MONTH$.nc') +ERAint.name <- "ERA-Interim" + +JRA55 <- list(path = '/esnas/reconstructions/jma/jra-55/$STORE_FREQ$_mean/$VAR_NAME$_f6h/$VAR_NAME$_$YEAR$$MONTH$.nc') +JRA55.name <- "JRA-55" + +rean <- ERAint # choose one of the two above reanalysis from where to load the input psl data +rean.name <- ERAint.name + +# available forecast systems for the var data: +ECMWF_monthly <- list(path = paste0('/esnas/exp/ECMWF/monthly/ensforhc/6hourly/$VAR_NAME$/2014$MONTH$$DAY$00/$VAR_NAME$_$START_DATE$00.nc')) +ECMWF_monthly.name <- "ECMWF-Monthly" + +forecast <- ECMWF_monthly +forecast.name <- ECMWF_monthly.name + +fields <- rean # put 'rean' to load pressure fields and var from reanalisis, put 'forecast' to load them from forecast systems +fields.name <- rean.name + +year.start <- 1994 #1979 #1981 #1994 +year.end <- 2013 #2010 + +leadtime <- 1 # if fields=forecast, set the forecast time (in days) you want to compute the weather regimes. +forecast.year <- year.end+1 # if fields=forecast, set the forecast year (it is usually the year after year.end) +mes=1 # if fields=forecast, set the month of the first startdate of the forecast year +day=2 # if fields=forecast, set the day of the first startdate of the forecast year + +psl <- "psl" #"g500" # pressure variable to use from the chosen reanalysis +psl.name <- "MSLP" # "Z500" # the name to show in the maps + +var.data <- ERAint #ECMWF_monthly # ERAint # chose a dataset from which to load the input var data (reanalysis or forecasts) +var.data.name <- ERAint.name #ECMWF_monthly.name # ERAint.name + +var.num <- 1 # Choose a variable. 1: sfcWind 2: tas + +var.name <- c("sfcWind","tas") # name of the 'predictand' variable of the chosen reanalysis +var.name.full <- c("Wind Speed","Temperature") # full name of the 'predictand' variable to put in the title of the graphs +var.unit <- c("m/s", "ºC") # unit of measure of var (for drawing color scales) + +sequences=FALSE # set it to TRUE if you want to select only days beloning to sequences of at least 5 consecutive days belonging to the same regime. + +PCA <- FALSE # se to TRUE if you want to apply the PCA before the k-means cluster analysis, FALSE otherwise +variance.explained <- 0.8 # the user can set here the minimum % of variance explained by the PCs (typically 0.8 which is equal to 80%) + +# Coordinates of the box enclosing the study region (the Northern Atlantic in this case): +lat.max <- 81 #81 #80 #70 +lat.min <- 27 #30 #20 #30 +lon.max <- 45 #36 #30 #40 +lon.min <- 274.5 #274.5 #270 #280 # put a positive number here because the geopotential has only positive values of longitud! + +############################################################################################################################# +#ECMWF_monthly <- list(path = paste0('/esnas/exp/ECMWF/monthly/ensforhc/6hourly/psl/2014010200/1994_010200.nc')) +#ECMWF_monthly <- list(path = paste0('/esnas/exp/ECMWF/monthly/ensforhc/6hourly/$VAR_NAME$/2014$MONTH$$DAY$00/$VAR_NAME$_$START_DATE$00.nc')) +if(lat.min >= lat.max) stop("lat.min cannot be greater than lat.max") +n.years <- year.end - year.start + 1 + +# load only 1 day of pressure data to detect the minimum and maximum lat and lon values which identify only the North Atlantic area: +domain <- Load(var = psl, exp = NULL, obs = list(fields), paste0(year.start,'0102'), storefreq = 'daily', leadtimemax = 1, output = 'lonlat', nprocs=1) +n.lat <- length(domain$lat) +n.lon <- length(domain$lon) + +pos.lat.max <- which(lat.max - round(domain$lat,4) >= 0)[1] # select the first latitude of the domain below the maximum latitude +pos.lat.min <- tail(which(round(domain$lat,4) - lat.min >= 0),1) # select the last latitude of the domain over the minumum latitude +pos.lon.max <- tail(which(lon.max - round(domain$lon,4) >= 0),1) +pos.lon.min <- which(round(domain$lon,4) - lon.min >= 0)[1] + +pos.lat <- if(pos.lat.min < pos.lat.max) {pos.lat.min:pos.lat.max} else {pos.lat.max:pos.lat.min} +pos.lon <- c(1:pos.lon.max,pos.lon.min:length(domain$lon)) # beware that it only consider the case where the study area cross the meridian of Greenwhich! +n.pos.lat <- length(pos.lat) +n.pos.lon <- length(pos.lon) + +lat <- domain$lat[pos.lat] # lat values of chosen area only (it is smaller than the whole spatial domain loaded by the data) +lon <- domain$lon[pos.lon] # lon values of chosen area only + +lat.min.area <- domain$lat[pos.lat.min] # min and max lat/lon of the chosen area only (same as lat.max, but its values correspond to actual values in the lat vector) +lat.max.area <- domain$lat[pos.lat.max] +lon.min.area <- domain$lon[pos.lon.min] +lon.max.area <- domain$lon[pos.lon.max] + +#rm(domain) + +# Load psl data: +if(unlist(fields) == unlist(rean)){ # Load daily psl data in the reanalysis case: + psleuFull <-array(NA,c(n.days.in.a.yearly.period(year.start,year.end), n.pos.lat, n.pos.lon)) + + for (y in year.start:year.end){ + var <- Load(var = psl, exp = NULL, obs = list(fields), paste0(y,'0101'), storefreq = 'daily', leadtimemax = n.days.in.a.year(y), output = 'lonlat', + latmin = lat.min, latmax = lat.max, lonmin = lon.min, lonmax = lon.max) + psleuFull[seq.days.in.a.future.year(year.start, y),,] <- var$obs + rm(var) + gc() + } + +} else { # Load 6-hourly psl data in the forecast case: + sdates.seq <- weekly.seq(forecast.year,mes,day) + psleuFull <-array(NA,c(length(sdates.seq)*n.years, n.pos.lat, n.pos.lon)) # daily order: 19940102 19950102 ... 20130102 19940109 19950109 ... 20130109 ... + + for (startdate in 1:length(sdates.seq)){ + var <- Load(var = psl, exp = NULL, obs = list(fields), paste0(year.start:year.end, substr(sdates.seq[startdate],5,6), substr(sdates.seq[startdate],7,8) ), storefreq = 'daily', leadtimemin=leadtime*4-3, leadtimemax=leadtime*4, output = 'lonlat', latmin = lat.min, latmax = lat.max, lonmin = lon.min, lonmax = lon.max) + temp <- apply(var$obs, c(1,3,5,6), mean, na.rm=T) + psleuFull[(1+(startdate-1)*(year.end-year.start+1)):(startdate*(year.end-year.start+1)),,] <- temp + rm(temp,var) + gc() + } +} + +if(psl=="g500") psleuFull <- psleuFull/9.81 # convert geopotential to geopotential height (in m) +if(psl=="psl") psleuFull <- psleuFull/100 # convert MSLP in Pascal to MSLP in hPa + +# compute the cluster analysis: +my.PCA <- list() +my.cluster <- list() +tot.variance <- list() +n.pcs <- my.cluster <- c() + +for(period in 13:16){ # 1-12: Jan-Dec; 13-16: Winter-Spring-Summer-Autumn; 17: Year + if(unlist(fields) == unlist(rean)){ + days.period <- NA + for(y in year.start:year.end) days.period <- c(days.period, n.days.in.a.future.year(year.start, y) + pos.period(y,period)) + days.period <- days.period[-1] # remove the NA at the begin that was introduced only to be able to execure the above command + } else { + my.startdates.period <- months.period(forecast.year,mes,day,period) # get which startdates belong to the chosen period + + days.period <- c() # get which days inside psleuFull belong to the chosen period + for(startdate in my.startdates.period){ + days.period <- c(days.period, (1+(startdate-1)*(year.end-year.start+1)):(startdate*(year.end-year.start+1))) + } + } + + n.days.period <- length(days.period) + + pslPeriod <- psleuFull[days.period,,] # select only days in the chosen period (i.e: winter) + + # weight the pressure fields based on latitude: + lat.weighted <- cos(lat*pi/180)^0.5 + lat.weighted.array <- InsertDim(InsertDim(lat.weighted,2,n.pos.lon),1,n.days.period) + pslPeriod.weighted <- pslPeriod * lat.weighted.array + rm(lat.weighted.array) + gc() + + pslmat <- pslPeriod.weighted + dim(pslmat) <- c(n.days.period, n.pos.lat*n.pos.lon) # convert array in a matrix! + rm(pslPeriod, pslPeriod.weighted) + gc() + + # if you are using the monthly forecast system, in winter you must remove the 2nd startdate (20140109) because its data is bad, as you can see with: plot(pslmat[,1:2]) + if(unlist(fields) == unlist(forecast) | period == 13) pslmat[21:40,] <- NA + + if(PCA){ # compute the PCs: + my.seq <- seq(1, n.pos.lat*n.pos.lon, 9) # select only 1 point of 9 + pslcut <- pslmat[,my.seq] + + my.PCA[[period]] <- princomp(pslcut,cor=FALSE) + tot.variance[[period]] <- head(cumsum(my.PCA[[period]]$sdev^2/sum(my.PCA[[period]]$sdev^2)),50) # check how many PCAs to keep basing on the sum of their expl. variance + n.pcs[period] <- head(as.numeric(which(tot.variance[[period]] > variance.explained)),1) # select only the pcs that explains at least 80% of variance + my.cluster[[period]] <- kmeans(my.PCA[[period]]$scores[,1:n.pcs[period]], centers=4, iter.max=100, nstart=30) # 4 is the number of clusters + rm(pslcut) + } else { # remove from matpsl the years with NA: + my.cluster[[period]] <- kmeans(pslmat[which(!is.na(pslmat[,1])),], centers=4, iter.max=100, nstart=30) # 4 is the number of clusters + } + + rm(pslmat) + gc() +} + +save.image(file=paste0(workdir,"/weather_regimes_",fields.name,"_",year.start,"-",year.end,".RData")) +load(file=paste0(workdir,"/weather_regimes_",fields.name,"_",year.start,"-",year.end,".RData")) + +# Load var data: +if(unlist(fields) == unlist(rean)){ # Load daily var data in the reanalysis case: + vareuFull <-array(NA,c(n.days.in.a.yearly.period(year.start,year.end), n.pos.lat, n.pos.lon)) + + for (y in year.start:year.end){ + var <- Load(var = var.name[var.num], exp = NULL, obs = list(var.data), paste0(y,'0101'), storefreq = 'daily', leadtimemax = n.days.in.a.year(y), output = 'lonlat', + latmin = lat.min, latmax = lat.max, lonmin = lon.min, lonmax = lon.max) + vareuFull[seq.days.in.a.future.year(year.start, y),,] <- var$obs + rm(var) + gc() + } + +} else { # Load 6-hourly var data in the forecast case: + sdates.seq <- weekly.seq(forecast.year,mes,day) + vareuFull <-array(NA,c(length(sdates.seq)*(year.end-year.start+1), n.pos.lat, n.pos.lon)) + + for (startdate in 1:length(sdates.seq)){ + var <- Load(var = var.name[var.num], exp = NULL, obs = list(var.data), paste0(year.start:year.end, substr(sdates.seq[startdate],5,6), substr(sdates.seq[startdate],7,8) ), storefreq = 'daily', leadtimemin=leadtime*4-3, leadtimemax=leadtime*4, output = 'lonlat', latmin = lat.min, latmax = lat.max, lonmin = lon.min, lonmax = lon.max) + temp <- apply(var$obs, c(1,3,5,6), mean, na.rm=T) + vareuFull[(1+(startdate-1)*(year.end-year.start+1)):(startdate*(year.end-year.start+1)),,] <- temp + rm(temp,var) + gc() + } + +} + +#my.grid<-paste0('r',n.lon,'x',n.lat) +#coord <- Load(var = 'sfcWind', exp = list(ECMWF_monthly), obs = NULL, sdates=paste0(2013,'0102'), leadtimemin = 1, leadtimemax=1, output = 'lonlat', nprocs=1, grid=my.grid, method='bilinear') + +save.image(file=paste0(workdir,"/weather_regimes_",fields.name,"_",var.name[var.num],"_",year.start,"-",year.end,".RData")) +load(file=paste0(workdir,"/weather_regimes_",fields.name,"_",var.name[var.num],"_",year.start,"-",year.end,".RData")) + +for(period in 13:16){ # 1-12: Jan-Dec; 13-16: Winter-Spring-Summer-Autumn; 17: Year + if(unlist(fields) == unlist(rean)){ + days.period <- NA + for(y in year.start:year.end) days.period <- c(days.period, n.days.in.a.future.year(year.start, y) + pos.period(y,period)) + days.period <- days.period[-1] # remove the NA at the begin that was introduced only to be able to execure the above command + period.length <- n.days.in.a.period(period,1999) + ifelse(period==13 | period==2, floor(n.years/4), 0) # using year 1999 introduce a small error in winter season length because of bisestile years that can be roughly corrected adding floor(n.years/4) in winter and in february + + } else { + my.startdates.period <- months.period(forecast.year,mes,day,period) + + if(period == 13) my.startdates.period <- my.startdates.period[-2] # remove the second startdate because it is broken + + days.period <- c() + for(startdate in my.startdates.period){ + days.period <- c(days.period, (1+(startdate-1)*(year.end-year.start+1)):(startdate*(year.end-year.start+1))) + } + period.length <- length(my.startdates.period) # number of Thusdays in the period + } + + n.days.period <- length(days.period) + + varPeriod <- vareuFull[days.period,,] # select only var data during the chosen period + + varPeriodClim <- apply(varPeriod, c(2,3), mean, na.rm=T) + varPeriodClim2 <- InsertDim(varPeriodClim, 1, n.days.period) + + varPeriodAnom <- varPeriod - varPeriodClim2 + rm(varPeriod, varPeriodClim, varPeriodClim2) + gc() + + if(sequences){ + # for each of the 4 clusters, remove the days not belonging to sequences of at least 5 days: + # warning: first 4 days and last 4 days are not take into account y the algorithm and are treated as not belonging to any sequence (sequ=FALSE) + wrdiff <- diff(my.cluster[[period]]$cluster) + + sequ <- rep(FALSE, n.days.period) + for(i in 5:(n.days.period-5)){ + sequ[i] <- TRUE + if(wrdiff[i] != 0 & wrdiff[i+1] != 0) sequ[i] = FALSE + if(wrdiff[i] != 0 & wrdiff[i+2] != 0) sequ[i] = FALSE + if(wrdiff[i] != 0 & wrdiff[i+3] != 0) sequ[i] = FALSE + if(wrdiff[i] != 0 & wrdiff[i+4] != 0) sequ[i] = FALSE + if(wrdiff[i-1] != 0 & wrdiff[i] != 0) sequ[i] = FALSE + if(wrdiff[i-1] != 0 & wrdiff[i+1] != 0) sequ[i] = FALSE + if(wrdiff[i-1] != 0 & wrdiff[i+2] != 0) sequ[i] = FALSE + if(wrdiff[i-1] != 0 & wrdiff[i+3] != 0) sequ[i] = FALSE + if(wrdiff[i-2] != 0 & wrdiff[i] != 0) sequ[i] = FALSE + if(wrdiff[i-2] != 0 & wrdiff[i+1] != 0) sequ[i] = FALSE + if(wrdiff[i-2] != 0 & wrdiff[i+2] != 0) sequ[i] = FALSE + if(wrdiff[i-3] != 0 & wrdiff[i] != 0) sequ[i] = FALSE + if(wrdiff[i-3] != 0 & wrdiff[i+1] != 0) sequ[i] = FALSE + if(wrdiff[i-4] != 0 & wrdiff[i] != 0) sequ[i] = FALSE + } # close for loop on i + + # sequence of daily cluster numbers without the days that doesn't belong to sequences of 5 or more days: + cluster.sequence <- my.cluster[[period]]$cluster * sequ + rm(wrdiff, sequ) + } else{ + cluster.sequence <- my.cluster[[period]]$cluster + } + + gc() + + #wr1 <- which(my.cluster[[period]]$cluster==1) + #wr2 <- which(my.cluster[[period]]$cluster==2) + #wr3 <- which(my.cluster[[period]]$cluster==3) + #wr4 <- which(my.cluster[[period]]$cluster==4) + + # Replace the days belonging to a regime with the days belonging to a sequence of at least 5 days of that regime: + wr1 <- which(cluster.sequence == 1) + wr2 <- which(cluster.sequence == 2) + wr3 <- which(cluster.sequence == 3) + wr4 <- which(cluster.sequence == 4) + + # yearly frequency of each regime: + wr1y <- wr2y <- wr3y <-wr4y <- c() + for(y in year.start:year.end){ + + if(unlist(fields) == unlist(rean)){ + wr1y[y-year.start+1] <- length(which(cluster.sequence[(y-year.start)*period.length+(1:period.length)] == 1)) + wr2y[y-year.start+1] <- length(which(cluster.sequence[(y-year.start)*period.length+(1:period.length)] == 2)) + wr3y[y-year.start+1] <- length(which(cluster.sequence[(y-year.start)*period.length+(1:period.length)] == 3)) + wr4y[y-year.start+1] <- length(which(cluster.sequence[(y-year.start)*period.length+(1:period.length)] == 4)) + } else { + wr1y[y-year.start+1] <- length(which(cluster.sequence[seq((y-year.start+1),length(cluster.sequence), n.years)] == 1)) + wr2y[y-year.start+1] <- length(which(cluster.sequence[seq((y-year.start+1),length(cluster.sequence), n.years)] == 2)) + wr3y[y-year.start+1] <- length(which(cluster.sequence[seq((y-year.start+1),length(cluster.sequence), n.years)] == 3)) + wr4y[y-year.start+1] <- length(which(cluster.sequence[seq((y-year.start+1),length(cluster.sequence), n.years)] == 4)) + } + } + + rm(cluster.sequence) + gc() + + # convert to frequencies in %: + wr1y <- wr1y/period.length + wr2y <- wr2y/period.length + wr3y <- wr3y/period.length + wr4y <- wr4y/period.length + + varPeriodAnom1 <- varPeriodAnom[wr1,,] + varPeriodAnom2 <- varPeriodAnom[wr2,,] + varPeriodAnom3 <- varPeriodAnom[wr3,,] + varPeriodAnom4 <- varPeriodAnom[wr4,,] + + varPeriodAnom1mean <- apply(varPeriodAnom1,c(2,3),mean,na.rm=T) + varPeriodAnom2mean <- apply(varPeriodAnom2,c(2,3),mean,na.rm=T) + varPeriodAnom3mean <- apply(varPeriodAnom3,c(2,3),mean,na.rm=T) + varPeriodAnom4mean <- apply(varPeriodAnom4,c(2,3),mean,na.rm=T) + + varPeriodAnomBoth1 <- abind(varPeriodAnom, varPeriodAnom1, along = 1) + pvalue1 <- apply(varPeriodAnomBoth1, c(2,3), function(x) t.test(x[1:n.days.period],x[(n.days.period+1):length(x)])$p.value) + rm(varPeriodAnomBoth1, varPeriodAnom1) + gc() + + varPeriodAnomBoth2 <- abind(varPeriodAnom, varPeriodAnom2, along = 1) + pvalue2 <- apply(varPeriodAnomBoth2, c(2,3), function(x) t.test(x[1:n.days.period],x[(n.days.period+1):length(x)])$p.value) + rm(varPeriodAnomBoth2, varPeriodAnom2) + gc() + + varPeriodAnomBoth3 <- abind(varPeriodAnom, varPeriodAnom3, along = 1) + pvalue3 <- apply(varPeriodAnomBoth3, c(2,3), function(x) t.test(x[1:n.days.period],x[(n.days.period+1):length(x)])$p.value) + rm(varPeriodAnomBoth3, varPeriodAnom3) + gc() + + varPeriodAnomBoth4 <- abind(varPeriodAnom, varPeriodAnom4, along = 1) + pvalue4 <- apply(varPeriodAnomBoth4, c(2,3), function(x) t.test(x[1:n.days.period],x[(n.days.period+1):length(x)])$p.value) + rm(varPeriodAnomBoth4, varPeriodAnom4) + + rm(varPeriodAnom) + gc() + + pslPeriod <- psleuFull[days.period,,] + pslPeriodClim <- apply(pslPeriod, c(2,3), mean, na.rm=T) + pslPeriodClim2 <- InsertDim(pslPeriodClim, 1, n.days.period) + pslPeriodAnom <- pslPeriod - pslPeriodClim2 + + rm(pslPeriod, pslPeriodClim, pslPeriodClim2) + gc() + + pslwr1 <- pslPeriodAnom[wr1,,] + pslwr2 <- pslPeriodAnom[wr2,,] + pslwr3 <- pslPeriodAnom[wr3,,] + pslwr4 <- pslPeriodAnom[wr4,,] + rm(pslPeriodAnom) + gc() + + pslwr1mean <- apply(pslwr1,c(2,3),mean,na.rm=T) + pslwr2mean <- apply(pslwr2,c(2,3),mean,na.rm=T) + pslwr3mean <- apply(pslwr3,c(2,3),mean,na.rm=T) + pslwr4mean <- apply(pslwr4,c(2,3),mean,na.rm=T) + + rm(pslwr1,pslwr2,pslwr3,pslwr4) + + # save all the data necessary to redraw the graphs when we know the right regime: + save(workdir,rean.name,fields.name,var.num,var.name,var.name.full,var.unit,year.start,year.end,period,my.period,lon,lat,pslwr1mean,pslwr2mean,pslwr3mean,pslwr4mean, varPeriodAnom1mean, varPeriodAnom2mean, varPeriodAnom3mean,varPeriodAnom4mean,wr1y,wr2y,wr3y,wr4y,pvalue1,pvalue2,pvalue3,pvalue4,file=paste0(workdir,"/",fields.name,"_",var.name[var.num],"_",my.period[period],"_mapdata.RData")) + + rm(pvalue1,pvalue2,pvalue3,pvalue4) + rm(pslwr1mean,pslwr2mean,pslwr3mean,pslwr4mean) + rm(varPeriodAnom1mean,varPeriodAnom2mean,varPeriodAnom3mean,varPeriodAnom4mean) + gc() + +} # close the for loop on 'period' + + +# run it twice: once, with ordering <- FALSE to look only at the cartography and find visually the right regime names of the four clusters; +# and the second time, to save the ordered cartography: +ordering <- TRUE +as.pdf <- TRUE # choose if you prefer to save results as one file .png for each season, or only 1 pdf with all seasons + +# choose an order to give to the cartography, from top to bottom: +orden <- c("NAO+","NAO-","Blocking","Atl. Ridge") + +regime1.name <- orden[1] +regime2.name <- orden[2] +regime3.name <- orden[3] +regime4.name <- orden[4] + +# breaks and colors of the geopotential fields: + +if(psl == "g500"){ + #my.brks <- c(-300,-199:200,300) # % Mean anomaly of a WR + my.brks <- c(-300,seq(-200,200,10),300) # % Mean anomaly of a WR + my.brks2 <- c(-300,seq(-200,200,10),300) # % Mean anomaly of a WR for the contour lines + #my.cols <- colorRampPalette((brewer.pal(9,"PuBu")))(length(my.brks)-1) + values.to.plot <- c(-200, -100, 0, 100, 200) # values we want to show in the labels +} + +if(psl == "psl"){ + my.brks <- c(-20,seq(-14,14, 2), 20) # % Mean anomaly of a WR + my.brks2 <- c(-20,seq(-14,14,2), 20) # % Mean anomaly of a WR for the contour lines + values.to.plot <- c(-12,-8,-4,0,4,8,12) +} + +my.cols <- colorRampPalette(rev(brewer.pal(11,"RdBu")))(length(my.brks)-1) # blue--white--red colors + +# breaks and colors of the impact maps (valid both for Wind speed anomalies and Temperature anomalies): +#my.brks.var <- c(-20,seq(-10,-3,1),seq(-2.9,2.9,0.1),seq(3,10,1),20) # % Mean anomaly of a WR +#my.brks.var <- c(-20,-2,-1.5,-1,-0.5,-0.2,0.2,0.5,1,1.5,2,20) # % Mean anomaly of a WR +my.brks.var <- c(-20,seq(-3,3,0.5),20) # % Mean anomaly of a WR + +#my.cols <- colorRampPalette(as.character(read.csv("/home/Earth/vtorralb/rgbhex.csv",header=F)[,1]))(length(my.brks)-1) # blue--yellow-red colors +my.cols.var <- colorRampPalette(rev(brewer.pal(11,"RdBu")))(length(my.brks.var)-1) # blue--white--red colors + +if(as.pdf)(pdf(file=paste0(workdir,"/",fields.name,"_",var.name[var.num],".pdf"),width=40, height=60)) + +for(period in 13:16){ + load(file=paste0(workdir,"/",fields.name,"_",var.name[var.num],"_",my.period[period],"_mapdata.RData")) + + if(period == 13){ # Winter + cluster2.name="NAO+" + cluster3.name="NAO-" + cluster1.name="Blocking" + cluster4.name="Atl. Ridge" + } + if(period == 14){ + cluster1.name="NAO+" + cluster4.name="NAO-" + cluster3.name="Blocking" + cluster2.name="Atl. Ridge" + } + if(period == 15){ + cluster1.name="NAO+" + cluster2.name="NAO-" + cluster3.name="Blocking" + cluster4.name="Atl. Ridge" + } + if(period == 16){ + cluster1.name="NAO+" + cluster3.name="NAO-" + cluster2.name="Blocking" + cluster4.name="Atl. Ridge" + } + + # assign to each cluster its regime: + if(ordering == FALSE){ + cluster1 <- 1; cluster2 <- 2; cluster3 <- 3; cluster4 <- 4 + } else { + cluster1 <- which(orden == cluster1.name) + cluster2 <- which(orden == cluster2.name) + cluster3 <- which(orden == cluster3.name) + cluster4 <- which(orden == cluster4.name) + } + + assign(paste0("map",cluster1), pslwr1mean) + assign(paste0("map",cluster2), pslwr2mean) + assign(paste0("map",cluster3), pslwr3mean) + assign(paste0("map",cluster4), pslwr4mean) + + assign(paste0("imp",cluster1), varPeriodAnom1mean) + assign(paste0("imp",cluster2), varPeriodAnom2mean) + assign(paste0("imp",cluster3), varPeriodAnom3mean) + assign(paste0("imp",cluster4), varPeriodAnom4mean) + + assign(paste0("sig",cluster1), pvalue1) + assign(paste0("sig",cluster2), pvalue2) + assign(paste0("sig",cluster3), pvalue3) + assign(paste0("sig",cluster4), pvalue4) + + assign(paste0("fre",cluster1), wr1y) + assign(paste0("fre",cluster2), wr2y) + assign(paste0("fre",cluster3), wr3y) + assign(paste0("fre",cluster4), wr4y) + + # Visualize the composition with the 3 graphs for all the 4 regimes: its pressure fields, its impact on var and its frequency series: + if(!as.pdf) png(filename=paste0(workdir,"/",fields.name,"_",var.name[var.num],"_",my.period[period],".png"),width=3000,height=3700) + + layout(matrix(c(1,1,1,1,1,1,1,1,31,32,6,33,2,4,6,7,rep(c(2,4,6,8),8),3,5,6,9,34,35,6,36, + 10,12,6,14,rep(c(10,12,6,15),8),11,13,6,16,37,38,6,39, + 17,19,6,21,rep(c(17,19,6,22),8),18,20,6,23,40,41,6,42, + 24,26,6,28,rep(c(24,26,6,29),8),25,27,6,30,43,43,6,44),47,4,byrow=TRUE), + widths=c(2,2,0.2,2), heights=c(rep(0.2,2),0.3,rep(0.2,10),0.3,rep(0.2,10),0.3,rep(0.2,10),0.3,rep(0.2,12))) + #layout.show(44) + + plot.new(); title(paste("Weather Regimes for",my.period[period],"season (1979-2013). Source:",fields.name), cex.main=9, line=-7) + + #EU <- c(1:41, 130:175) #c(1:54,130:161) # position of long values of Europe only (without the Atlantic Sea and America) + #EU <- c(1:which(lon >= lon.max)[1], which(lon >= lon.min)[1]:length(lon)) # to show the same area used for calculation of anomalies + EU <- c(1:which(lon >= lon.max)[1], (length(lon)-30):length(lon)) # restrict area to continental europe only + + PlotEquiMap2(map1, lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map1), brks2=my.brks2, intylat=10, drawleg=F, cex.lab=1.5) + #par(mar=c(10.5,14.5,10.5,14.5)) + plot.new(); PlotEquiMap2(imp1[,EU], lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, drawleg=F, dots=t(sig1[,EU] < 0.05), cex.lab=1.5) + plot.new(); plot.new(); plot.new() # plot 2 empty graphs + barplot.freq(100*fre1, year.start, year.end, cex.y=2, cex.x=2, freq.max=60) + plot.new(); title("Year", cex.main=2, line=-2) + + PlotEquiMap2(map2, lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map2), brks2=my.brks2, intylat=10, drawleg=F, cex.lab=1.5) + plot.new(); PlotEquiMap2(imp2[,EU], lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, drawleg=F, dots=t(sig2[,EU] < 0.05), cex.lab=1.5) + plot.new(); plot.new(); barplot.freq(100*fre2, year.start, year.end, cex.y=2, cex.x=2, freq.max=60) + plot.new(); title("Year", cex.main=2, line=-2) + + PlotEquiMap2(map3, lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map3), brks2=my.brks2, intylat=10, drawleg=F, cex.lab=1.5) + plot.new(); PlotEquiMap2(imp3[,EU], lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, drawleg=F, dots=t(sig3[,EU] < 0.05), cex.lab=1.5) + plot.new(); plot.new(); barplot.freq(100*fre3, year.start, year.end, cex.y=2, cex.x=2, freq.max=60) + plot.new(); title("Year", cex.main=2, line=-2) + + PlotEquiMap2(map4, lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map4), brks2=my.brks2, intylat=10, drawleg=F, cex.lab=1.5) + plot.new(); PlotEquiMap2(imp4[,EU], lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, drawleg=F, dots=t(sig4[,EU] < 0.05), cex.lab=1.5) + plot.new(); plot.new(); barplot.freq(100*fre4, year.start, year.end, cex.y=2, cex.x=2, freq.max=60) + plot.new(); title("Year", cex.main=2, line=-3) + + # Titles: + plot.new(); title(paste(regime1.name, psl.name, "Anomaly"), cex.main=3.3, line=-4.5) + plot.new(); title(paste(regime1.name, "impact on", var.name.full[var.num]), cex.main=3.3, line=-4.5) + plot.new(); title(paste0(regime1.name, " Frequency (", round(100*mean(fre1),1), "%)"), cex.main=3.3, line=-4.5) + plot.new(); title(paste(regime2.name, psl.name, "Anomaly"), cex.main=3.3, line=-4.5) + plot.new(); title(paste(regime2.name, "impact on", var.name.full[var.num]), cex.main=3.3, line=-4.5) + plot.new(); title(paste0(regime2.name, " Frequency (", round(100*mean(fre2),1), "%)"), cex.main=3.3, line=-4.5) + plot.new(); title(paste(regime3.name, psl.name, "Anomaly"), cex.main=3.3, line=-4.5) + plot.new(); title(paste(regime3.name, "impact on", var.name.full[var.num]), cex.main=3.3, line=-4.5) + plot.new(); title(paste0(regime3.name, " Frequency (", round(100*mean(fre3),1), "%)"), cex.main=3.3, line=-4.5) + plot.new(); title(paste(regime4.name, psl.name, "Anomaly"), cex.main=3.3, line=-5) + plot.new(); title(paste(regime4.name, "impact on", var.name.full[var.num]), cex.main=3.3, line=-4.5) + plot.new(); title(paste0(regime4.name, " Frequency (", round(100*mean(fre4),1), "%)"), cex.main=3.3, line=-4.5) + + # Legends: + + if(var.name[var.num] == "sfcWind") values.to.plot2 <- c(-3,-2,-1,0,1,2,3) + if(var.name[var.num] == "tas") values.to.plot2 <- c(-10,-6,-3,0,3,6,10) + + my.subset <- match(values.to.plot, my.brks) + my.subset2 <- match(values.to.plot2, my.brks.var) + + par(fig=c(0.04, 0.302, 0.717, 0.744), new=TRUE) # syntax fig=c(xmin, xmax, ymin, ymax) + #ColorBar(my.brks, cols=my.cols, vert=FALSE, subsampleg=10, cex=2.5) + #ColorBar2(my.brks, cols=my.cols, vert=FALSE, subsampleg=10, cex=2.5, my.ticks=c(0,10,20,30,40,50,60,70,80), my.labels=my.labels) + ColorBar3(my.brks, cols=my.cols, vert=FALSE, cex=2.5, subset=my.subset) + if(psl=="g500") {mtext(side=4," m", cex=2.5)} else {mtext(side=4," hPa", cex=2.5)} + par(fig=c(0.355,0.605,0.718,0.743), new=TRUE) + ColorBar3(my.brks.var, cols=my.cols.var, vert=FALSE, cex=2.5, subset=my.subset2) + #ColorBar2(my.brks.var, cols=my.cols.var, vert=FALSE, subsampleg=1, cex=2.5, my.ticks=c(0,10,20,30,40,50,60,70,80), my.labels=my.labels2) + mtext(side=4,paste0(" ",var.unit[var.num]), cex=2.5) + + par(fig=c(0.04,0.3,0.483,0.508), new=TRUE) + ColorBar3(my.brks, cols=my.cols, vert=FALSE, cex=2.5, subset=my.subset) + if(psl=="g500") {mtext(side=4," m", cex=2.5)} else {mtext(side=4," hPa", cex=2.5)} + par(fig=c(0.355,0.605,0.484,0.509), new=TRUE) + ColorBar3(my.brks.var, cols=my.cols.var, vert=FALSE, cex=2.5, subset=my.subset2) + mtext(side=4,paste0(" ",var.unit[var.num]), cex=2.5) + + par(fig=c(0.04,0.3,0.248,0.273), new=TRUE) + ColorBar3(my.brks, cols=my.cols, vert=FALSE, cex=2.5, subset=my.subset) + if(psl=="g500") {mtext(side=4," m", cex=2.5)} else {mtext(side=4," hPa", cex=2.5) + par(fig=c(0.355,0.605,0.248,0.273), new=TRUE) + ColorBar3(my.brks.var, cols=my.cols.var, vert=FALSE, cex=2.5, subset=my.subset2)} + mtext(side=4,paste0(" ",var.unit[var.num]), cex=2.5) + + par(fig=c(0.04,0.3,0.013,0.038), new=TRUE) + ColorBar3(my.brks, cols=my.cols, vert=FALSE, cex=2.5, subset=my.subset) + if(psl=="g500") {mtext(side=4," m", cex=2.5)} else {mtext(side=4," hPa", cex=2.5)} + + par(fig=c(0.355,0.605,0.013,0.038), new=TRUE) + ColorBar3(my.brks.var, cols=my.cols.var, vert=FALSE, cex=2.5, subset=my.subset2) + mtext(side=4,paste0(" ",var.unit[var.num]), cex=2.5) + + par(fig=c(0.66,0.67,0.83,0.84), new=TRUE) + mtext("%", cex=3.3) + par(fig=c(0.66,0.67,0.60,0.61), new=TRUE) + mtext("%", cex=3.3) + par(fig=c(0.66,0.67,0.37,0.38), new=TRUE) + mtext("%", cex=3.3) + par(fig=c(0.66,0.67,0.13,0.14), new=TRUE) + mtext("%", cex=3.3) + + if(!as.pdf) dev.off() # for saving 4 png + +} # close for loop on 'period' + +if(as.pdf) dev.off() # for saving a single pdf with all seasons + + + + + + + + + + + + + + + + + + + + + diff --git a/old/weather_regimes_v15.R b/old/weather_regimes_v15.R new file mode 100644 index 0000000000000000000000000000000000000000..286ea6869aab4a09c44cb3e0948c68e4bda3ae43 --- /dev/null +++ b/old/weather_regimes_v15.R @@ -0,0 +1,700 @@ + +library(s2dverification) # for the function Load() +library(abind) +library(Kendall) +source('/home/Earth/ncortesi/Downloads/scripts/Rfunctions.R') # for the calendar functions + +workdir <- "/scratch/Earth/ncortesi/RESILIENCE/Regimes" # working dir with the input files with the WT classifications for each MSLP grid point + +# available reanalysis for the geopotential (g500) or the geopotential height (z500 = g500/9.81): +ERAint <- list(path = '/esnas/reconstructions/ecmwf/eraint/$STORE_FREQ$_mean/$VAR_NAME$_f6h/$VAR_NAME$_$YEAR$$MONTH$.nc') +ERAint.name <- "ERA-Interim" + +JRA55 <- list(path = '/esnas/reconstructions/jma/jra-55/$STORE_FREQ$_mean/$VAR_NAME$_f6h/$VAR_NAME$_$YEAR$$MONTH$.nc') +JRA55.name <- "JRA-55" + +rean <- ERAint # choose one of the two above reanalysis from where to load the input psl data +rean.name <- ERAint.name + +# available forecast systems for the var data: +ECMWF_monthly <- list(path = paste0('/esnas/exp/ECMWF/monthly/ensforhc/6hourly/$VAR_NAME$/2014$MONTH$$DAY$00/$VAR_NAME$_$START_DATE$00.nc')) +ECMWF_monthly.name <- "ECMWF-Monthly" + +forecast <- ECMWF_monthly +forecast.name <- ECMWF_monthly.name + +fields <- forecast # put 'rean' to load pressure fields and var from reanalisis, put 'forecast' to load them from forecast systems +fields.name <- forecast.name + +year.start <- 1994 #1979 #1981 +year.end <- 2013 #2010 + +leadtime <- 1 # if fields=forecast, set the forecast time (in days) you want to compute the weather regimes. +forecast.year <- year.end+1 # if fields=forecast, set the forecast year (it is usually the year after year.end) +mes=1 # if fields=forecast, set the month of the first startdate of the forecast year +day=2 # if fields=forecast, set the day of the first startdate of the forecast year + +psl <- "psl" #"g500" # pressure variable to use from the chosen reanalysis +psl.name <- "MSLP" # "Z500" # the name to show in the maps + +var.data <- ERAint #ECMWF_monthly # ERAint # chose a dataset from which to load the input var data (reanalysis or forecasts) +var.data.name <- ERAint.name #ECMWF_monthly.name # ERAint.name + +sequences=FALSE # set it to TRUE if you want to select only days beloning to sequences of at least 5 consecutive days belonging to the same regime. + +PCA <- FALSE # se to TRUE if you want to apply the PCA before the k-means cluster analysis, FALSE otherwise +variance.explained <- 0.8 # the user can set here the minimum % of variance explained by the PCs (typically 0.8 which is equal to 80%) + +# Coordinates of the box enclosing the study region (the Northern Atlantic in this case): +lat.max <- 81 #81 #80 #70 +lat.min <- 27 #30 #20 #30 +lon.max <- 45 #36 #30 #40 +lon.min <- 274.5 #274.5 #270 #280 # put a positive number here because the geopotential has only positive values of longitud! + +############################################################################################################################# +#ECMWF_monthly <- list(path = paste0('/esnas/exp/ECMWF/monthly/ensforhc/6hourly/psl/2014010200/1994_010200.nc')) +#ECMWF_monthly <- list(path = paste0('/esnas/exp/ECMWF/monthly/ensforhc/6hourly/$VAR_NAME$/2014$MONTH$$DAY$00/$VAR_NAME$_$START_DATE$00.nc')) +if(lat.min >= lat.max) stop("lat.min cannot be greater than lat.max") +n.years <- year.end - year.start + 1 + +# load only 1 day of pressure data to detect the minimum and maximum lat and lon values which identify only the North Atlantic area: +domain <- Load(var = psl, exp = NULL, obs = list(fields), paste0(year.start,'0102'), storefreq = 'daily', leadtimemax = 1, output = 'lonlat', nprocs=1) +n.lat <- length(domain$lat) +n.lon <- length(domain$lon) + +pos.lat.max <- which(lat.max - round(domain$lat,4) >= 0)[1] # select the first latitude of the domain below the maximum latitude +pos.lat.min <- tail(which(round(domain$lat,4) - lat.min >= 0),1) # select the last latitude of the domain over the minumum latitude +pos.lon.max <- tail(which(lon.max - round(domain$lon,4) >= 0),1) +pos.lon.min <- which(round(domain$lon,4) - lon.min >= 0)[1] + +pos.lat <- if(pos.lat.min < pos.lat.max) {pos.lat.min:pos.lat.max} else {pos.lat.max:pos.lat.min} +pos.lon <- c(1:pos.lon.max,pos.lon.min:length(domain$lon)) # beware that it only consider the case where the study area cross the meridian of Greenwhich! +n.pos.lat <- length(pos.lat) +n.pos.lon <- length(pos.lon) + +lat <- domain$lat[pos.lat] # lat values of chosen area only (it is smaller than the whole spatial domain loaded by the data) +lon <- domain$lon[pos.lon] # lon values of chosen area only + +lat.min.area <- domain$lat[pos.lat.min] # min and max lat/lon of the chosen area only (same as lat.max, but its values correspond to actual values in the lat vector) +lat.max.area <- domain$lat[pos.lat.max] +lon.min.area <- domain$lon[pos.lon.min] +lon.max.area <- domain$lon[pos.lon.max] + +#rm(domain) + +# Load psl data: +if(unlist(fields) == unlist(rean)){ # Load daily psl data in the reanalysis case: + psleuFull <-array(NA,c(n.days.in.a.yearly.period(year.start,year.end), n.pos.lat, n.pos.lon)) + + for (y in year.start:year.end){ + var <- Load(var = psl, exp = NULL, obs = list(fields), paste0(y,'0101'), storefreq = 'daily', leadtimemax = n.days.in.a.year(y), output = 'lonlat', + latmin = lat.min, latmax = lat.max, lonmin = lon.min, lonmax = lon.max) + psleuFull[seq.days.in.a.future.year(year.start, y),,] <- var$obs + rm(var) + gc() + } + +} else { # Load 6-hourly psl data in the forecast case: + sdates.seq <- weekly.seq(forecast.year,mes,day) + psleuFull <-array(NA,c(length(sdates.seq)*n.years, n.pos.lat, n.pos.lon)) # daily order: 19940102 19950102 ... 20130102 19940109 19950109 ... 20130109 ... + + for (startdate in 1:length(sdates.seq)){ + var <- Load(var = psl, exp = NULL, obs = list(fields), paste0(year.start:year.end, substr(sdates.seq[startdate],5,6), substr(sdates.seq[startdate],7,8) ), storefreq = 'daily', leadtimemin=leadtime*4-3, leadtimemax=leadtime*4, output = 'lonlat', latmin = lat.min, latmax = lat.max, lonmin = lon.min, lonmax = lon.max) + temp <- apply(var$obs, c(1,3,5,6), mean, na.rm=T) + psleuFull[(1+(startdate-1)*(year.end-year.start+1)):(startdate*(year.end-year.start+1)),,] <- temp + rm(temp,var) + gc() + } +} + +if(psl=="g500") psleuFull <- psleuFull/9.81 # convert geopotential to geopotential height (in m) +if(psl=="psl") psleuFull <- psleuFull/100 # convert MSLP in Pascal to MSLP in hPa +gc() + +# compute the cluster analysis: +my.PCA <- list() +my.cluster <- list() +tot.variance <- list() +n.pcs <- my.cluster <- c() + +for(period in 13:16){ # 1-12: Jan-Dec; 13-16: Winter-Spring-Summer-Autumn; 17: Year + if(unlist(fields) == unlist(rean)){ + days.period <- NA + for(y in year.start:year.end) days.period <- c(days.period, n.days.in.a.future.year(year.start, y) + pos.period(y,period)) + days.period <- days.period[-1] # remove the NA at the begin that was introduced only to be able to execure the above command + } else { + my.startdates.period <- months.period(forecast.year,mes,day,period) # get which startdates belong to the chosen period + + days.period <- c() # get which days inside psleuFull belong to the chosen period + for(startdate in my.startdates.period){ + days.period <- c(days.period, (1+(startdate-1)*(year.end-year.start+1)):(startdate*(year.end-year.start+1))) + } + } + + n.days.period <- length(days.period) + + pslPeriod <- psleuFull[days.period,,] # select only days in the chosen period (i.e: winter) + + # weight the pressure fields based on latitude: + lat.weighted <- cos(lat*pi/180)^0.5 + lat.weighted.array <- InsertDim(InsertDim(lat.weighted,2,n.pos.lon),1,n.days.period) + pslPeriod.weighted <- pslPeriod * lat.weighted.array + rm(lat.weighted.array) + gc() + + pslmat <- pslPeriod.weighted + dim(pslmat) <- c(n.days.period, n.pos.lat*n.pos.lon) # convert array in a matrix! + rm(pslPeriod, pslPeriod.weighted) + gc() + + # if you are using the monthly forecast system, in winter you must remove the 2nd startdate (20140109) because its data is bad, as you can see with: plot(pslmat[,1:2]) + if(unlist(fields) == unlist(forecast) && period == 13) pslmat[21:40,] <- NA + + if(PCA){ # compute the PCs: + my.seq <- seq(1, n.pos.lat*n.pos.lon, 9) # select only 1 point of 9 + pslcut <- pslmat[,my.seq] + + my.PCA[[period]] <- princomp(pslcut,cor=FALSE) + tot.variance[[period]] <- head(cumsum(my.PCA[[period]]$sdev^2/sum(my.PCA[[period]]$sdev^2)),50) # check how many PCAs to keep basing on the sum of their expl. variance + n.pcs[period] <- head(as.numeric(which(tot.variance[[period]] > variance.explained)),1) # select only the pcs that explains at least 80% of variance + my.cluster[[period]] <- kmeans(my.PCA[[period]]$scores[,1:n.pcs[period]], centers=4, iter.max=100, nstart=30) # 4 is the number of clusters + rm(pslcut) + } else { # remove from matpsl the years with NA: + my.cluster[[period]] <- kmeans(pslmat[which(!is.na(pslmat[,1])),], centers=4, iter.max=100, nstart=30) # 4 is the number of clusters + } + + rm(pslmat) + gc() +} + +#save.image(file=paste0(workdir,"/weather_regimes_",fields.name,"_",year.start,"-",year.end,".RData")) +#load(file=paste0(workdir,"/weather_regimes_",fields.name,"_",year.start,"-",year.end,".RData")) + +# Load var data: +if(unlist(fields) == unlist(rean)){ # Load daily var data in the reanalysis case: + vareuFull <-array(NA,c(n.days.in.a.yearly.period(year.start,year.end), n.pos.lat, n.pos.lon)) + + for (y in year.start:year.end){ + var <- Load(var = var.name[var.num], exp = NULL, obs = list(var.data), paste0(y,'0101'), storefreq = 'daily', leadtimemax = n.days.in.a.year(y), output = 'lonlat', + latmin = lat.min, latmax = lat.max, lonmin = lon.min, lonmax = lon.max) + vareuFull[seq.days.in.a.future.year(year.start, y),,] <- var$obs + rm(var) + gc() + } + +} else { # Load 6-hourly var data in the forecast case: + sdates.seq <- weekly.seq(forecast.year,mes,day) + vareuFull <-array(NA,c(length(sdates.seq)*(year.end-year.start+1), n.pos.lat, n.pos.lon)) + + for (startdate in 1:length(sdates.seq)){ + var <- Load(var = var.name[var.num], exp = NULL, obs = list(var.data), paste0(year.start:year.end, substr(sdates.seq[startdate],5,6), substr(sdates.seq[startdate],7,8) ), storefreq = 'daily', leadtimemin=leadtime*4-3, leadtimemax=leadtime*4, output = 'lonlat', latmin = lat.min, latmax = lat.max, lonmin = lon.min, lonmax = lon.max) + temp <- apply(var$obs, c(1,3,5,6), mean, na.rm=T) + vareuFull[(1+(startdate-1)*(year.end-year.start+1)):(startdate*(year.end-year.start+1)),,] <- temp + rm(temp,var) + gc() + } + +} + +#my.grid<-paste0('r',n.lon,'x',n.lat) +#coord <- Load(var = 'sfcWind', exp = list(ECMWF_monthly), obs = NULL, sdates=paste0(2013,'0102'), leadtimemin = 1, leadtimemax=1, output = 'lonlat', nprocs=1, grid=my.grid, method='bilinear') + +var.num <- 2 # Choose a variable. 1: sfcWind 2: tas + +var.name <- c("sfcWind","tas") # name of the 'predictand' variable of the chosen reanalysis +var.name.full <- c("Wind Speed","Temperature") # full name of the 'predictand' variable to put in the title of the graphs +var.unit <- c("m/s", "ºC") # unit of measure of var (for drawing color scales) + +save.image(file=paste0(workdir,"/weather_regimes_",fields.name,"_",var.name[var.num],"_",year.start,"-",year.end,".RData")) +load(file=paste0(workdir,"/weather_regimes_",fields.name,"_",var.name[var.num],"_",year.start,"-",year.end,".RData")) + +for(period in 13:16){ # 1-12: Jan-Dec; 13-16: Winter-Spring-Summer-Autumn; 17: Year + if(unlist(fields) == unlist(rean)){ + days.period <- NA + for(y in year.start:year.end) days.period <- c(days.period, n.days.in.a.future.year(year.start, y) + pos.period(y,period)) + days.period <- days.period[-1] # remove the NA at the begin that was introduced only to be able to execure the above command + period.length <- n.days.in.a.period(period,1999) + ifelse(period==13 | period==2, floor(n.years/4), 0) # using year 1999 introduce a small error in winter season length because of bisestile years that can be roughly corrected adding floor(n.years/4) in winter and in february + + } else { + my.startdates.period <- months.period(forecast.year,mes,day,period) + + if(period == 13) my.startdates.period <- my.startdates.period[-2] # remove the second startdate because it is broken + + days.period <- c() + for(startdate in my.startdates.period){ + days.period <- c(days.period, (1+(startdate-1)*(year.end-year.start+1)):(startdate*(year.end-year.start+1))) + } + period.length <- length(my.startdates.period) # number of Thusdays in the period + } + + n.days.period <- length(days.period) + + varPeriod <- vareuFull[days.period,,] # select only var data during the chosen period + + varPeriodClim <- apply(varPeriod, c(2,3), mean, na.rm=T) + varPeriodClim2 <- InsertDim(varPeriodClim, 1, n.days.period) + + varPeriodAnom <- varPeriod - varPeriodClim2 + rm(varPeriod, varPeriodClim, varPeriodClim2) + gc() + + if(sequences){ + # for each of the 4 clusters, remove the days not belonging to sequences of at least 5 days: + # warning: first 4 days and last 4 days are not take into account y the algorithm and are treated as not belonging to any sequence (sequ=FALSE) + wrdiff <- diff(my.cluster[[period]]$cluster) + + sequ <- rep(FALSE, n.days.period) + for(i in 5:(n.days.period-5)){ + sequ[i] <- TRUE + if(wrdiff[i] != 0 & wrdiff[i+1] != 0) sequ[i] = FALSE + if(wrdiff[i] != 0 & wrdiff[i+2] != 0) sequ[i] = FALSE + if(wrdiff[i] != 0 & wrdiff[i+3] != 0) sequ[i] = FALSE + if(wrdiff[i] != 0 & wrdiff[i+4] != 0) sequ[i] = FALSE + if(wrdiff[i-1] != 0 & wrdiff[i] != 0) sequ[i] = FALSE + if(wrdiff[i-1] != 0 & wrdiff[i+1] != 0) sequ[i] = FALSE + if(wrdiff[i-1] != 0 & wrdiff[i+2] != 0) sequ[i] = FALSE + if(wrdiff[i-1] != 0 & wrdiff[i+3] != 0) sequ[i] = FALSE + if(wrdiff[i-2] != 0 & wrdiff[i] != 0) sequ[i] = FALSE + if(wrdiff[i-2] != 0 & wrdiff[i+1] != 0) sequ[i] = FALSE + if(wrdiff[i-2] != 0 & wrdiff[i+2] != 0) sequ[i] = FALSE + if(wrdiff[i-3] != 0 & wrdiff[i] != 0) sequ[i] = FALSE + if(wrdiff[i-3] != 0 & wrdiff[i+1] != 0) sequ[i] = FALSE + if(wrdiff[i-4] != 0 & wrdiff[i] != 0) sequ[i] = FALSE + } # close for loop on i + + # sequence of daily cluster numbers without the days that doesn't belong to sequences of 5 or more days: + cluster.sequence <- my.cluster[[period]]$cluster * sequ + rm(wrdiff, sequ) + } else{ + cluster.sequence <- my.cluster[[period]]$cluster + } + + gc() + + #wr1 <- which(my.cluster[[period]]$cluster==1) + #wr2 <- which(my.cluster[[period]]$cluster==2) + #wr3 <- which(my.cluster[[period]]$cluster==3) + #wr4 <- which(my.cluster[[period]]$cluster==4) + + # Replace the days belonging to a regime with the days belonging to a sequence of at least 5 days of that regime: + wr1 <- which(cluster.sequence == 1) + wr2 <- which(cluster.sequence == 2) + wr3 <- which(cluster.sequence == 3) + wr4 <- which(cluster.sequence == 4) + + # yearly frequency of each regime: + wr1y <- wr2y <- wr3y <-wr4y <- c() + for(y in year.start:year.end){ + + if(unlist(fields) == unlist(rean)){ + wr1y[y-year.start+1] <- length(which(cluster.sequence[(y-year.start)*period.length+(1:period.length)] == 1)) + wr2y[y-year.start+1] <- length(which(cluster.sequence[(y-year.start)*period.length+(1:period.length)] == 2)) + wr3y[y-year.start+1] <- length(which(cluster.sequence[(y-year.start)*period.length+(1:period.length)] == 3)) + wr4y[y-year.start+1] <- length(which(cluster.sequence[(y-year.start)*period.length+(1:period.length)] == 4)) + } else { + wr1y[y-year.start+1] <- length(which(cluster.sequence[seq((y-year.start+1),length(cluster.sequence), n.years)] == 1)) + wr2y[y-year.start+1] <- length(which(cluster.sequence[seq((y-year.start+1),length(cluster.sequence), n.years)] == 2)) + wr3y[y-year.start+1] <- length(which(cluster.sequence[seq((y-year.start+1),length(cluster.sequence), n.years)] == 3)) + wr4y[y-year.start+1] <- length(which(cluster.sequence[seq((y-year.start+1),length(cluster.sequence), n.years)] == 4)) + } + } + + rm(cluster.sequence) + gc() + + # convert to frequencies in %: + wr1y <- wr1y/period.length + wr2y <- wr2y/period.length + wr3y <- wr3y/period.length + wr4y <- wr4y/period.length + + varPeriodAnom1 <- varPeriodAnom[wr1,,] + varPeriodAnom2 <- varPeriodAnom[wr2,,] + varPeriodAnom3 <- varPeriodAnom[wr3,,] + varPeriodAnom4 <- varPeriodAnom[wr4,,] + + varPeriodAnom1mean <- apply(varPeriodAnom1,c(2,3),mean,na.rm=T) + varPeriodAnom2mean <- apply(varPeriodAnom2,c(2,3),mean,na.rm=T) + varPeriodAnom3mean <- apply(varPeriodAnom3,c(2,3),mean,na.rm=T) + varPeriodAnom4mean <- apply(varPeriodAnom4,c(2,3),mean,na.rm=T) + + varPeriodAnomBoth1 <- abind(varPeriodAnom, varPeriodAnom1, along = 1) + pvalue1 <- apply(varPeriodAnomBoth1, c(2,3), function(x) t.test(x[1:n.days.period],x[(n.days.period+1):length(x)])$p.value) + rm(varPeriodAnomBoth1, varPeriodAnom1) + gc() + + varPeriodAnomBoth2 <- abind(varPeriodAnom, varPeriodAnom2, along = 1) + pvalue2 <- apply(varPeriodAnomBoth2, c(2,3), function(x) t.test(x[1:n.days.period],x[(n.days.period+1):length(x)])$p.value) + rm(varPeriodAnomBoth2, varPeriodAnom2) + gc() + + varPeriodAnomBoth3 <- abind(varPeriodAnom, varPeriodAnom3, along = 1) + pvalue3 <- apply(varPeriodAnomBoth3, c(2,3), function(x) t.test(x[1:n.days.period],x[(n.days.period+1):length(x)])$p.value) + rm(varPeriodAnomBoth3, varPeriodAnom3) + gc() + + varPeriodAnomBoth4 <- abind(varPeriodAnom, varPeriodAnom4, along = 1) + pvalue4 <- apply(varPeriodAnomBoth4, c(2,3), function(x) t.test(x[1:n.days.period],x[(n.days.period+1):length(x)])$p.value) + rm(varPeriodAnomBoth4, varPeriodAnom4) + + rm(varPeriodAnom) + gc() + + pslPeriod <- psleuFull[days.period,,] + pslPeriodClim <- apply(pslPeriod, c(2,3), mean, na.rm=T) + pslPeriodClim2 <- InsertDim(pslPeriodClim, 1, n.days.period) + pslPeriodAnom <- pslPeriod - pslPeriodClim2 + + rm(pslPeriod, pslPeriodClim, pslPeriodClim2) + gc() + + pslwr1 <- pslPeriodAnom[wr1,,] + pslwr2 <- pslPeriodAnom[wr2,,] + pslwr3 <- pslPeriodAnom[wr3,,] + pslwr4 <- pslPeriodAnom[wr4,,] + rm(pslPeriodAnom) + gc() + + pslwr1mean <- apply(pslwr1,c(2,3),mean,na.rm=T) + pslwr2mean <- apply(pslwr2,c(2,3),mean,na.rm=T) + pslwr3mean <- apply(pslwr3,c(2,3),mean,na.rm=T) + pslwr4mean <- apply(pslwr4,c(2,3),mean,na.rm=T) + + rm(pslwr1,pslwr2,pslwr3,pslwr4) + + # save all the data necessary to redraw the graphs when we know the right regime: + save(workdir,rean.name,fields.name,var.num,var.name,var.name.full,var.unit,year.start,year.end,period,my.period,lon,lat,pslwr1mean,pslwr2mean,pslwr3mean,pslwr4mean, varPeriodAnom1mean, varPeriodAnom2mean, varPeriodAnom3mean,varPeriodAnom4mean,wr1y,wr2y,wr3y,wr4y,pvalue1,pvalue2,pvalue3,pvalue4,file=paste0(workdir,"/",fields.name,"_",var.name[var.num],"_",my.period[period],"_mapdata.RData")) + + rm(pvalue1,pvalue2,pvalue3,pvalue4) + rm(pslwr1mean,pslwr2mean,pslwr3mean,pslwr4mean) + rm(varPeriodAnom1mean,varPeriodAnom2mean,varPeriodAnom3mean,varPeriodAnom4mean) + gc() + +} # close the for loop on 'period' + + +# run it twice: once, with ordering <- FALSE to look only at the cartography and find visually the right regime names of the four clusters; +# and the second time, to save the ordered cartography: +ordering <- TRUE # If TRUE, order the clusters following the regimes listed in the 'orden' vector (after you manually insert the names). +save.names <- FALSE # if TRUE, also save the cluster names (i.e: the association between clusters and weather regimes); if FALSE, the cluster names are loaded from the file +as.pdf <- FALSE # choose if you prefer to save results as one file .png for each season, or only 1 pdf with all seasons + +# choose an order to give to the cartography, from top to bottom: +orden <- c("NAO+","NAO-","Blocking","Atl.Ridge") + +regime1.name <- orden[1] +regime2.name <- orden[2] +regime3.name <- orden[3] +regime4.name <- orden[4] + +if(save.names){cluster1.name.period <- cluster2.name.period <- cluster3.name.period <- cluster4.name.period <- c()} + +# breaks and colors of the geopotential fields: +if(psl == "g500"){ + #my.brks <- c(-300,-199:200,300) # % Mean anomaly of a WR + my.brks <- c(-300,seq(-200,200,10),300) # % Mean anomaly of a WR + my.brks2 <- c(-300,seq(-200,200,10),300) # % Mean anomaly of a WR for the contour lines + #my.cols <- colorRampPalette((brewer.pal(9,"PuBu")))(length(my.brks)-1) + values.to.plot <- c(-200, -100, 0, 100, 200) # values we want to show in the labels +} + +if(psl == "psl"){ + my.brks <- c(seq(-19,-1,2),0,seq(1,19,2)) # % Mean anomaly of a WR + my.brks2 <- my.brks #c(seq(-20,20,2)) # % Mean anomaly of a WR for the contour lines + values.to.plot <- my.brks #c(-17,-15,-13,-11,-9,-7,-5,-3,-1,0,1,3,5,7,9,11,13,15,17) +} + +my.cols <- colorRampPalette(rev(brewer.pal(11,"RdBu")))(length(my.brks)-1) # blue--white--red colors +my.cols[floor(length(my.cols)/2)] <- my.cols[floor(length(my.cols)/2)+1] <- "white" + +# breaks and colors of the impact maps (valid both for Wind speed anomalies and Temperature anomalies): +#my.brks.var <- c(-20,seq(-10,-3,1),seq(-2.9,2.9,0.1),seq(3,10,1),20) # % Mean anomaly of a WR +#my.brks.var <- c(-20,-2,-1.5,-1,-0.5,-0.2,0.2,0.5,1,1.5,2,20) # % Mean anomaly of a WR +#my.brks.var <- c(-20,seq(-3,3,0.5),20) # % Mean anomaly of a WR +if(var.name[var.num] == "sfcWind") { + my.brks.var <- seq(-3,3,0.5) + values.to.plot2 <- c(-3,-2,-1,0,1,2,3) +} +if(var.name[var.num] == "tas") { + my.brks.var <- seq(-5,5,1) + values.to.plot2 <- my.brks.var #c(-5,-3,-1,0,1,3,5) +} + +#my.cols <- colorRampPalette(as.character(read.csv("/home/Earth/vtorralb/rgbhex.csv",header=F)[,1]))(length(my.brks)-1) # blue--yellow-red colors +my.cols.var <- colorRampPalette(rev(brewer.pal(11,"RdBu")))(length(my.brks.var)-1) # blue--white--red colors +#my.cols.var[floor(length(my.cols.var)/2)] <- my.cols.var[floor(length(my.cols.var)/2)+1] <- "white" + +if(as.pdf)(pdf(file=paste0(workdir,"/",fields.name,"_",var.name[var.num],".pdf"),width=40, height=60)) + +for(period in 13:16){ + load(file=paste0(workdir,"/",fields.name,"_",var.name[var.num],"_",my.period[period],"_mapdata.RData")) + + if(save.names){ + if(period == 13){ # Winter + cluster3.name="NAO+" + cluster4.name="NAO-" + cluster1.name="Blocking" + cluster2.name="Atl.Ridge" + } + if(period == 14){ + cluster2.name="NAO+" + cluster3.name="NAO-" + cluster1.name="Blocking" + cluster4.name="Atl.Ridge" + } + if(period == 15){ + cluster3.name="NAO+" + cluster2.name="NAO-" + cluster4.name="Blocking" + cluster1.name="Atl.Ridge" + } + if(period == 16){ + cluster4.name="NAO+" + cluster3.name="NAO-" + cluster2.name="Blocking" + cluster1.name="Atl.Ridge" + } + + cluster1.name.period[period] <- cluster1.name + cluster2.name.period[period] <- cluster2.name + cluster3.name.period[period] <- cluster3.name + cluster4.name.period[period] <- cluster4.name + + save(cluster1.name.period, cluster2.name.period, cluster3.name.period, cluster4.name.period, file=paste0(workdir,"/",fields.name,"_ClusterNames.RData")) + + } else { # in this case, we load the cluster names from the file already saved: + load(paste0(workdir,"/",fields.name,"_ClusterNames.RData")) + cluster1.name <- cluster1.name.period[period] + cluster2.name <- cluster2.name.period[period] + cluster3.name <- cluster3.name.period[period] + cluster4.name <- cluster4.name.period[period] + } + + # assign to each cluster its regime: + if(ordering == FALSE){ + cluster1 <- 1; cluster2 <- 2; cluster3 <- 3; cluster4 <- 4 + } else { + cluster1 <- which(orden == cluster1.name) + cluster2 <- which(orden == cluster2.name) + cluster3 <- which(orden == cluster3.name) + cluster4 <- which(orden == cluster4.name) + } + + assign(paste0("map",cluster1), pslwr1mean) + assign(paste0("map",cluster2), pslwr2mean) + assign(paste0("map",cluster3), pslwr3mean) + assign(paste0("map",cluster4), pslwr4mean) + + assign(paste0("imp",cluster1), varPeriodAnom1mean) + assign(paste0("imp",cluster2), varPeriodAnom2mean) + assign(paste0("imp",cluster3), varPeriodAnom3mean) + assign(paste0("imp",cluster4), varPeriodAnom4mean) + + assign(paste0("sig",cluster1), pvalue1) + assign(paste0("sig",cluster2), pvalue2) + assign(paste0("sig",cluster3), pvalue3) + assign(paste0("sig",cluster4), pvalue4) + + assign(paste0("fre",cluster1), wr1y) + assign(paste0("fre",cluster2), wr2y) + assign(paste0("fre",cluster3), wr3y) + assign(paste0("fre",cluster4), wr4y) + + # Visualize the composition with the 3 graphs for all the 4 regimes: its pressure fields, its impact on var and its frequency series: + if(!as.pdf) png(filename=paste0(workdir,"/",fields.name,"_",var.name[var.num],"_",my.period[period],".png"),width=3000,height=3700) + + plot.new() + + # Sheet title: + par(fig=c(0, 1, 0.94, 0.98), new=TRUE) + mtext(paste0("Weather Regimes for ",my.period[period]," season (",year.start,"-",year.end,"). Source: ",fields.name), cex=6, font=2) + + #EU <- c(1:41, 130:175) #c(1:54,130:161) # position of long values of Europe only (without the Atlantic Sea and America) + #EU <- c(1:which(lon >= lon.max)[1], which(lon >= lon.min)[1]:length(lon)) # to show the same area used for calculation of anomalies + EU <- c(1:which(lon >= lon.max)[1], (length(lon)-30):length(lon)) # restrict area to continental europe only + + # Centroid maps: + map.xpos <- 0 + map.width <- 0.46 + par(fig=c(map.xpos + 0.003, map.xpos + map.width - 0.003, 0.745, 0.925), new=TRUE) + PlotEquiMap2(map1, lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map1), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, xlabel.dist=1.5, contours.lty="F1FF1F") + par(fig=c(map.xpos, map.xpos + map.width, 0.51, 0.69), new=TRUE) + PlotEquiMap2(map2, lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map2), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F") + par(fig=c(map.xpos, map.xpos + map.width, 0.275, 0.455), new=TRUE) + PlotEquiMap2(map3, lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map3), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F") + par(fig=c(map.xpos, map.xpos + map.width, 0.04, 0.22), new=TRUE) + PlotEquiMap2(map4, lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map4), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F") + + # Title Centroid Maps: + map.title.xpos <- 0.76 + map.title.width <- 0.2 + par(fig=c(map.title.xpos, map.title.xpos + map.title.width, 0.728, 0.729), new=TRUE) + mtext("year", cex=3) + par(fig=c(map.title.xpos, map.title.xpos + map.title.width, 0.494, 0.495), new=TRUE) + mtext("year", cex=3) + par(fig=c(map.title.xpos, map.title.xpos + map.title.width, 0.260, 0.261), new=TRUE) + mtext("year", cex=3) + par(fig=c(map.title.xpos, map.title.xpos + map.title.width, 0.026, 0.027), new=TRUE) + mtext("year", cex=3) + + # Impact maps: + impact.xpos <- 0.47 + impact.width <- 0.26 + par(fig=c(impact.xpos, impact.xpos + impact.width, 0.745, 0.925), new=TRUE) + PlotEquiMap2(rescale(imp1[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=t(sig1[,EU] < 0.05), cex.lab=1.5) + par(fig=c(impact.xpos, impact.xpos + impact.width, 0.51, 0.69), new=TRUE) + PlotEquiMap2(rescale(imp2[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=t(sig2[,EU] < 0.05), cex.lab=1.5) + par(fig=c(impact.xpos, impact.xpos + impact.width, 0.275, 0.455), new=TRUE) + PlotEquiMap2(rescale(imp3[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=t(sig3[,EU] < 0.05), cex.lab=1.5) + par(fig=c(impact.xpos, impact.xpos + impact.width, 0.04, 0.22), new=TRUE) + PlotEquiMap2(rescale(imp4[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=t(sig4[,EU] < 0.05), cex.lab=1.5) + + # Frequency plots: + barplot.xpos <- 0.75 + barplot.width <- 0.25 + par(fig=c(barplot.xpos, barplot.xpos + barplot.width, 0.745, 0.915), new=TRUE) + barplot.freq(100*fre1, year.start, year.end, cex.y=2, cex.x=2, freq.max=60, ylab="", mgp=c(3,1.5,0)) + par(fig=c(barplot.xpos, barplot.xpos + barplot.width,0.510,0.680), new=TRUE) + barplot.freq(100*fre2, year.start, year.end, cex.y=2, cex.x=2, freq.max=60, ylab="", mgp=c(3,1.5,0)) + par(fig=c(barplot.xpos, barplot.xpos + barplot.width,0.275,0.445), new=TRUE) + barplot.freq(100*fre3, year.start, year.end, cex.y=2, cex.x=2, freq.max=60, ylab="", mgp=c(3,1.5,0)) + par(fig=c(barplot.xpos, barplot.xpos + barplot.width,0.040,0.210), new=TRUE) + barplot.freq(100*fre4, year.start, year.end, cex.y=2, cex.x=2, freq.max=60, ylab="", mgp=c(3,1.5,0)) + + # % on Frequency plots: + symbol.xpos <- 0.735 + symbol.width <- 0.01 + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.83, 0.84), new=TRUE) + mtext("%", cex=3.3) + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.60, 0.61), new=TRUE) + mtext("%", cex=3.3) + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.36, 0.37), new=TRUE) + mtext("%", cex=3.3) + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.13, 0.14), new=TRUE) + mtext("%", cex=3.3) + + # Title 1: + title1.xpos <- 0.02 + title1.width <- 0.4 + par(fig=c(title1.xpos, title1.xpos + title1.width, 0.925, 0.930), new=TRUE) + mtext(paste0(regime1.name,": ", psl.name, " Anomaly"), font=2, cex=4) + par(fig=c(title1.xpos, title1.xpos + title1.width, 0.690, 0.696), new=TRUE) + mtext(paste0(regime2.name,": ", psl.name, " Anomaly"), font=2, cex=4) + par(fig=c(title1.xpos, title1.xpos + title1.width, 0.455, 0.460), new=TRUE) + mtext(paste0(regime3.name,": ", psl.name, " Anomaly"), font=2, cex=4) + par(fig=c(title1.xpos, title1.xpos + title1.width, 0.220, 0.225), new=TRUE) + mtext(paste0(regime3.name,": ", psl.name, " Anomaly"), font=2, cex=4) + + # Title 2: + title2.xpos <- 0.42 + title2.width <- 0.35 + par(fig=c(title2.xpos, title2.xpos + title2.width, 0.925, 0.930), new=TRUE) + mtext(paste0(regime1.name, ": Impact on ", var.name.full[var.num]), font=2, cex=4) + par(fig=c(title2.xpos, title2.xpos + title2.width, 0.690, 0.696), new=TRUE) + mtext(paste0(regime2.name, ": Impact on ", var.name.full[var.num]), font=2, cex=4) + par(fig=c(title2.xpos, title2.xpos + title2.width, 0.455, 0.460), new=TRUE) + mtext(paste0(regime3.name, ": Impact on ", var.name.full[var.num]), font=2, cex=4) + par(fig=c(title2.xpos, title2.xpos + title2.width, 0.220, 0.225), new=TRUE) + mtext(paste0(regime4.name, ": Impact on ", var.name.full[var.num]), font=2, cex=4) + + # Title 3: + title3.xpos <- 0.75 + title3.width <-0.25 + par(fig=c(title3.xpos, title3.xpos + title3.width, 0.925, 0.930), new=TRUE) + mtext(paste0(regime1.name, ": Frequency (", round(100*mean(fre1),1), "%)"), font=2, cex=4) + par(fig=c(title3.xpos, title3.xpos + title3.width, 0.690, 0.696), new=TRUE) + mtext(paste0(regime2.name, ": Frequency (", round(100*mean(fre2),1), "%)"), font=2, cex=4) + par(fig=c(title3.xpos, title3.xpos + title3.width, 0.455, 0.460), new=TRUE) + mtext(paste0(regime3.name, ": Frequency (", round(100*mean(fre3),1), "%)"), font=2, cex=4) + par(fig=c(title3.xpos, title3.xpos + title3.width, 0.220, 0.225), new=TRUE) + mtext(paste0(regime4.name, ": Frequency (", round(100*mean(fre4),1), "%)"), font=2, cex=4) + + # Legend 1: + legend1.xpos <- 0.01 + legend1.width <- 0.44 + legend1.cex <- 1.8 + my.subset <- match(values.to.plot, my.brks) + + par(fig=c(legend1.xpos, legend1.xpos + legend1.width, 0.719, 0.744), new=TRUE) # syntax fig=c(xmin, xmax, ymin, ymax) + ColorBar3(my.brks, cols=my.cols, vert=FALSE, cex=legend1.cex, subset=my.subset) + if(psl=="g500") {mtext(side=4," m", cex=2.5)} else {mtext(side=4," hPa", cex=legend1.cex)} + par(fig=c(legend1.xpos, legend1.xpos + legend1.width, 0.485, 0.508), new=TRUE) + ColorBar3(my.brks, cols=my.cols, vert=FALSE, cex=legend1.cex, subset=my.subset) + if(psl=="g500") {mtext(side=4," m", cex=2.5)} else {mtext(side=4," hPa", cex=legend1.cex)} + par(fig=c(legend1.xpos, legend1.xpos + legend1.width, 0.250, 0.273), new=TRUE) + ColorBar3(my.brks, cols=my.cols, vert=FALSE, cex=legend1.cex, subset=my.subset) + if(psl=="g500") {mtext(side=4," m", cex=2.5)} else {mtext(side=4," hPa", cex=legend1.cex) + par(fig=c(legend1.xpos, legend1.xpos + legend1.width, 0.015, 0.038), new=TRUE) + ColorBar3(my.brks, cols=my.cols, vert=FALSE, cex=legend1.cex, subset=my.subset) + if(psl=="g500") {mtext(side=4," m", cex=2.5)} else {mtext(side=4," hPa", cex=legend1.cex)} + + # Legend 2: + legend2.xpos <- 0.48 + legend2.width <- 0.25 + legend2.cex <- 1.8 + my.subset2 <- match(values.to.plot2, my.brks.var) + + par(fig=c(legend2.xpos, legend2.xpos + legend2.width, 0.719 + 0.001, 0.744), new=TRUE) + ColorBar3(my.brks.var, cols=my.cols.var, vert=FALSE, cex=legend2.cex, subset=my.subset2) + mtext(side=4,paste0(" ",var.unit[var.num]), cex=legend2.cex) + par(fig=c(legend2.xpos, legend2.xpos + legend2.width, 0.485, 0.508), new=TRUE) + ColorBar3(my.brks.var, cols=my.cols.var, vert=FALSE, cex=legend2.cex, subset=my.subset2) + mtext(side=4,paste0(" ",var.unit[var.num]), cex=legend2.cex) + par(fig=c(legend2.xpos, legend2.xpos + legend2.width, 0.250, 0.273), new=TRUE) + ColorBar3(my.brks.var, cols=my.cols.var, vert=FALSE, cex=legend2.cex, subset=my.subset2)} + mtext(side=4,paste0(" ",var.unit[var.num]), cex=legend2.cex) + par(fig=c(legend2.xpos, legend2.xpos + legend2.width, 0.015, 0.038), new=TRUE) + ColorBar3(my.brks.var, cols=my.cols.var, vert=FALSE, cex=legend2.cex, subset=my.subset2) + mtext(side=4,paste0(" ",var.unit[var.num]), cex=legend2.cex) + + if(!as.pdf) dev.off() # for saving 4 png + +} # close for loop on 'period' + +if(as.pdf) dev.off() # for saving a single pdf with all seasons + + + + + + + + + + + + + + + + + + + + + + ## if(period == 13){ # Winter + ## cluster2.name="NAO+" + ## cluster3.name="NAO-" + ## cluster1.name="Blocking" + ## cluster4.name="Atl.Ridge" + ## } + ## if(period == 14){ + ## cluster1.name="NAO+" + ## cluster4.name="NAO-" + ## cluster3.name="Blocking" + ## cluster2.name="Atl.Ridge" + ## } + ## if(period == 15){ + ## cluster1.name="NAO+" + ## cluster2.name="NAO-" + ## cluster3.name="Blocking" + ## cluster4.name="Atl.Ridge" + ## } + ## if(period == 16){ + ## cluster1.name="NAO+" + ## cluster3.name="NAO-" + ## cluster2.name="Blocking" + ## cluster4.name="Atl.Ridge" + ## } + diff --git a/old/weather_regimes_v15.R~ b/old/weather_regimes_v15.R~ new file mode 100644 index 0000000000000000000000000000000000000000..2507bd2a683251b7df3cee6a3c5340164af87db9 --- /dev/null +++ b/old/weather_regimes_v15.R~ @@ -0,0 +1,673 @@ + +library(s2dverification) # for the function Load() +library(abind) +library(Kendall) +source('/home/Earth/ncortesi/Downloads/scripts/Rfunctions.R') # for the calendar functions + +workdir <- "/scratch/Earth/ncortesi/RESILIENCE/Regimes" # working dir with the input files with the WT classifications for each MSLP grid point + +# available reanalysis for the geopotential (g500) or the geopotential height (z500 = g500/9.81): +ERAint <- list(path = '/esnas/reconstructions/ecmwf/eraint/$STORE_FREQ$_mean/$VAR_NAME$_f6h/$VAR_NAME$_$YEAR$$MONTH$.nc') +ERAint.name <- "ERA-Interim" + +JRA55 <- list(path = '/esnas/reconstructions/jma/jra-55/$STORE_FREQ$_mean/$VAR_NAME$_f6h/$VAR_NAME$_$YEAR$$MONTH$.nc') +JRA55.name <- "JRA-55" + +rean <- ERAint # choose one of the two above reanalysis from where to load the input psl data +rean.name <- ERAint.name + +# available forecast systems for the var data: +ECMWF_monthly <- list(path = paste0('/esnas/exp/ECMWF/monthly/ensforhc/6hourly/$VAR_NAME$/2014$MONTH$$DAY$00/$VAR_NAME$_$START_DATE$00.nc')) +ECMWF_monthly.name <- "ECMWF-Monthly" + +forecast <- ECMWF_monthly +forecast.name <- ECMWF_monthly.name + +fields <- rean # put 'rean' to load pressure fields and var from reanalisis, put 'forecast' to load them from forecast systems +fields.name <- rean.name + +year.start <- 1994 #1979 #1981 #1994 +year.end <- 2013 #2010 + +leadtime <- 1 # if fields=forecast, set the forecast time (in days) you want to compute the weather regimes. +forecast.year <- year.end+1 # if fields=forecast, set the forecast year (it is usually the year after year.end) +mes=1 # if fields=forecast, set the month of the first startdate of the forecast year +day=2 # if fields=forecast, set the day of the first startdate of the forecast year + +psl <- "psl" #"g500" # pressure variable to use from the chosen reanalysis +psl.name <- "MSLP" # "Z500" # the name to show in the maps + +var.data <- ERAint #ECMWF_monthly # ERAint # chose a dataset from which to load the input var data (reanalysis or forecasts) +var.data.name <- ERAint.name #ECMWF_monthly.name # ERAint.name + +var.num <- 1 # Choose a variable. 1: sfcWind 2: tas + +var.name <- c("sfcWind","tas") # name of the 'predictand' variable of the chosen reanalysis +var.name.full <- c("Wind Speed","Temperature") # full name of the 'predictand' variable to put in the title of the graphs +var.unit <- c("m/s", "ºC") # unit of measure of var (for drawing color scales) + +sequences=FALSE # set it to TRUE if you want to select only days beloning to sequences of at least 5 consecutive days belonging to the same regime. + +PCA <- FALSE # se to TRUE if you want to apply the PCA before the k-means cluster analysis, FALSE otherwise +variance.explained <- 0.8 # the user can set here the minimum % of variance explained by the PCs (typically 0.8 which is equal to 80%) + +# Coordinates of the box enclosing the study region (the Northern Atlantic in this case): +lat.max <- 81 #81 #80 #70 +lat.min <- 27 #30 #20 #30 +lon.max <- 45 #36 #30 #40 +lon.min <- 274.5 #274.5 #270 #280 # put a positive number here because the geopotential has only positive values of longitud! + +############################################################################################################################# +#ECMWF_monthly <- list(path = paste0('/esnas/exp/ECMWF/monthly/ensforhc/6hourly/psl/2014010200/1994_010200.nc')) +#ECMWF_monthly <- list(path = paste0('/esnas/exp/ECMWF/monthly/ensforhc/6hourly/$VAR_NAME$/2014$MONTH$$DAY$00/$VAR_NAME$_$START_DATE$00.nc')) +if(lat.min >= lat.max) stop("lat.min cannot be greater than lat.max") +n.years <- year.end - year.start + 1 + +# load only 1 day of pressure data to detect the minimum and maximum lat and lon values which identify only the North Atlantic area: +domain <- Load(var = psl, exp = NULL, obs = list(fields), paste0(year.start,'0102'), storefreq = 'daily', leadtimemax = 1, output = 'lonlat', nprocs=1) +n.lat <- length(domain$lat) +n.lon <- length(domain$lon) + +pos.lat.max <- which(lat.max - round(domain$lat,4) >= 0)[1] # select the first latitude of the domain below the maximum latitude +pos.lat.min <- tail(which(round(domain$lat,4) - lat.min >= 0),1) # select the last latitude of the domain over the minumum latitude +pos.lon.max <- tail(which(lon.max - round(domain$lon,4) >= 0),1) +pos.lon.min <- which(round(domain$lon,4) - lon.min >= 0)[1] + +pos.lat <- if(pos.lat.min < pos.lat.max) {pos.lat.min:pos.lat.max} else {pos.lat.max:pos.lat.min} +pos.lon <- c(1:pos.lon.max,pos.lon.min:length(domain$lon)) # beware that it only consider the case where the study area cross the meridian of Greenwhich! +n.pos.lat <- length(pos.lat) +n.pos.lon <- length(pos.lon) + +lat <- domain$lat[pos.lat] # lat values of chosen area only (it is smaller than the whole spatial domain loaded by the data) +lon <- domain$lon[pos.lon] # lon values of chosen area only + +lat.min.area <- domain$lat[pos.lat.min] # min and max lat/lon of the chosen area only (same as lat.max, but its values correspond to actual values in the lat vector) +lat.max.area <- domain$lat[pos.lat.max] +lon.min.area <- domain$lon[pos.lon.min] +lon.max.area <- domain$lon[pos.lon.max] + +#rm(domain) + +# Load psl data: +if(unlist(fields) == unlist(rean)){ # Load daily psl data in the reanalysis case: + psleuFull <-array(NA,c(n.days.in.a.yearly.period(year.start,year.end), n.pos.lat, n.pos.lon)) + + for (y in year.start:year.end){ + var <- Load(var = psl, exp = NULL, obs = list(fields), paste0(y,'0101'), storefreq = 'daily', leadtimemax = n.days.in.a.year(y), output = 'lonlat', + latmin = lat.min, latmax = lat.max, lonmin = lon.min, lonmax = lon.max) + psleuFull[seq.days.in.a.future.year(year.start, y),,] <- var$obs + rm(var) + gc() + } + +} else { # Load 6-hourly psl data in the forecast case: + sdates.seq <- weekly.seq(forecast.year,mes,day) + psleuFull <-array(NA,c(length(sdates.seq)*n.years, n.pos.lat, n.pos.lon)) # daily order: 19940102 19950102 ... 20130102 19940109 19950109 ... 20130109 ... + + for (startdate in 1:length(sdates.seq)){ + var <- Load(var = psl, exp = NULL, obs = list(fields), paste0(year.start:year.end, substr(sdates.seq[startdate],5,6), substr(sdates.seq[startdate],7,8) ), storefreq = 'daily', leadtimemin=leadtime*4-3, leadtimemax=leadtime*4, output = 'lonlat', latmin = lat.min, latmax = lat.max, lonmin = lon.min, lonmax = lon.max) + temp <- apply(var$obs, c(1,3,5,6), mean, na.rm=T) + psleuFull[(1+(startdate-1)*(year.end-year.start+1)):(startdate*(year.end-year.start+1)),,] <- temp + rm(temp,var) + gc() + } +} + +if(psl=="g500") psleuFull <- psleuFull/9.81 # convert geopotential to geopotential height (in m) +if(psl=="psl") psleuFull <- psleuFull/100 # convert MSLP in Pascal to MSLP in hPa + +# compute the cluster analysis: +my.PCA <- list() +my.cluster <- list() +tot.variance <- list() +n.pcs <- my.cluster <- c() + +for(period in 13:16){ # 1-12: Jan-Dec; 13-16: Winter-Spring-Summer-Autumn; 17: Year + if(unlist(fields) == unlist(rean)){ + days.period <- NA + for(y in year.start:year.end) days.period <- c(days.period, n.days.in.a.future.year(year.start, y) + pos.period(y,period)) + days.period <- days.period[-1] # remove the NA at the begin that was introduced only to be able to execure the above command + } else { + my.startdates.period <- months.period(forecast.year,mes,day,period) # get which startdates belong to the chosen period + + days.period <- c() # get which days inside psleuFull belong to the chosen period + for(startdate in my.startdates.period){ + days.period <- c(days.period, (1+(startdate-1)*(year.end-year.start+1)):(startdate*(year.end-year.start+1))) + } + } + + n.days.period <- length(days.period) + + pslPeriod <- psleuFull[days.period,,] # select only days in the chosen period (i.e: winter) + + # weight the pressure fields based on latitude: + lat.weighted <- cos(lat*pi/180)^0.5 + lat.weighted.array <- InsertDim(InsertDim(lat.weighted,2,n.pos.lon),1,n.days.period) + pslPeriod.weighted <- pslPeriod * lat.weighted.array + rm(lat.weighted.array) + gc() + + pslmat <- pslPeriod.weighted + dim(pslmat) <- c(n.days.period, n.pos.lat*n.pos.lon) # convert array in a matrix! + rm(pslPeriod, pslPeriod.weighted) + gc() + + # if you are using the monthly forecast system, in winter you must remove the 2nd startdate (20140109) because its data is bad, as you can see with: plot(pslmat[,1:2]) + if(unlist(fields) == unlist(forecast) && period == 13) pslmat[21:40,] <- NA + + if(PCA){ # compute the PCs: + my.seq <- seq(1, n.pos.lat*n.pos.lon, 9) # select only 1 point of 9 + pslcut <- pslmat[,my.seq] + + my.PCA[[period]] <- princomp(pslcut,cor=FALSE) + tot.variance[[period]] <- head(cumsum(my.PCA[[period]]$sdev^2/sum(my.PCA[[period]]$sdev^2)),50) # check how many PCAs to keep basing on the sum of their expl. variance + n.pcs[period] <- head(as.numeric(which(tot.variance[[period]] > variance.explained)),1) # select only the pcs that explains at least 80% of variance + my.cluster[[period]] <- kmeans(my.PCA[[period]]$scores[,1:n.pcs[period]], centers=4, iter.max=100, nstart=30) # 4 is the number of clusters + rm(pslcut) + } else { # remove from matpsl the years with NA: + my.cluster[[period]] <- kmeans(pslmat[which(!is.na(pslmat[,1])),], centers=4, iter.max=100, nstart=30) # 4 is the number of clusters + } + + rm(pslmat) + gc() +} + +save.image(file=paste0(workdir,"/weather_regimes_",fields.name,"_",year.start,"-",year.end,".RData")) +load(file=paste0(workdir,"/weather_regimes_",fields.name,"_",year.start,"-",year.end,".RData")) + +# Load var data: +if(unlist(fields) == unlist(rean)){ # Load daily var data in the reanalysis case: + vareuFull <-array(NA,c(n.days.in.a.yearly.period(year.start,year.end), n.pos.lat, n.pos.lon)) + + for (y in year.start:year.end){ + var <- Load(var = var.name[var.num], exp = NULL, obs = list(var.data), paste0(y,'0101'), storefreq = 'daily', leadtimemax = n.days.in.a.year(y), output = 'lonlat', + latmin = lat.min, latmax = lat.max, lonmin = lon.min, lonmax = lon.max) + vareuFull[seq.days.in.a.future.year(year.start, y),,] <- var$obs + rm(var) + gc() + } + +} else { # Load 6-hourly var data in the forecast case: + sdates.seq <- weekly.seq(forecast.year,mes,day) + vareuFull <-array(NA,c(length(sdates.seq)*(year.end-year.start+1), n.pos.lat, n.pos.lon)) + + for (startdate in 1:length(sdates.seq)){ + var <- Load(var = var.name[var.num], exp = NULL, obs = list(var.data), paste0(year.start:year.end, substr(sdates.seq[startdate],5,6), substr(sdates.seq[startdate],7,8) ), storefreq = 'daily', leadtimemin=leadtime*4-3, leadtimemax=leadtime*4, output = 'lonlat', latmin = lat.min, latmax = lat.max, lonmin = lon.min, lonmax = lon.max) + temp <- apply(var$obs, c(1,3,5,6), mean, na.rm=T) + vareuFull[(1+(startdate-1)*(year.end-year.start+1)):(startdate*(year.end-year.start+1)),,] <- temp + rm(temp,var) + gc() + } + +} + +#my.grid<-paste0('r',n.lon,'x',n.lat) +#coord <- Load(var = 'sfcWind', exp = list(ECMWF_monthly), obs = NULL, sdates=paste0(2013,'0102'), leadtimemin = 1, leadtimemax=1, output = 'lonlat', nprocs=1, grid=my.grid, method='bilinear') + +save.image(file=paste0(workdir,"/weather_regimes_",fields.name,"_",var.name[var.num],"_",year.start,"-",year.end,".RData")) +load(file=paste0(workdir,"/weather_regimes_",fields.name,"_",var.name[var.num],"_",year.start,"-",year.end,".RData")) + +for(period in 13:16){ # 1-12: Jan-Dec; 13-16: Winter-Spring-Summer-Autumn; 17: Year + if(unlist(fields) == unlist(rean)){ + days.period <- NA + for(y in year.start:year.end) days.period <- c(days.period, n.days.in.a.future.year(year.start, y) + pos.period(y,period)) + days.period <- days.period[-1] # remove the NA at the begin that was introduced only to be able to execure the above command + period.length <- n.days.in.a.period(period,1999) + ifelse(period==13 | period==2, floor(n.years/4), 0) # using year 1999 introduce a small error in winter season length because of bisestile years that can be roughly corrected adding floor(n.years/4) in winter and in february + + } else { + my.startdates.period <- months.period(forecast.year,mes,day,period) + + if(period == 13) my.startdates.period <- my.startdates.period[-2] # remove the second startdate because it is broken + + days.period <- c() + for(startdate in my.startdates.period){ + days.period <- c(days.period, (1+(startdate-1)*(year.end-year.start+1)):(startdate*(year.end-year.start+1))) + } + period.length <- length(my.startdates.period) # number of Thusdays in the period + } + + n.days.period <- length(days.period) + + varPeriod <- vareuFull[days.period,,] # select only var data during the chosen period + + varPeriodClim <- apply(varPeriod, c(2,3), mean, na.rm=T) + varPeriodClim2 <- InsertDim(varPeriodClim, 1, n.days.period) + + varPeriodAnom <- varPeriod - varPeriodClim2 + rm(varPeriod, varPeriodClim, varPeriodClim2) + gc() + + if(sequences){ + # for each of the 4 clusters, remove the days not belonging to sequences of at least 5 days: + # warning: first 4 days and last 4 days are not take into account y the algorithm and are treated as not belonging to any sequence (sequ=FALSE) + wrdiff <- diff(my.cluster[[period]]$cluster) + + sequ <- rep(FALSE, n.days.period) + for(i in 5:(n.days.period-5)){ + sequ[i] <- TRUE + if(wrdiff[i] != 0 & wrdiff[i+1] != 0) sequ[i] = FALSE + if(wrdiff[i] != 0 & wrdiff[i+2] != 0) sequ[i] = FALSE + if(wrdiff[i] != 0 & wrdiff[i+3] != 0) sequ[i] = FALSE + if(wrdiff[i] != 0 & wrdiff[i+4] != 0) sequ[i] = FALSE + if(wrdiff[i-1] != 0 & wrdiff[i] != 0) sequ[i] = FALSE + if(wrdiff[i-1] != 0 & wrdiff[i+1] != 0) sequ[i] = FALSE + if(wrdiff[i-1] != 0 & wrdiff[i+2] != 0) sequ[i] = FALSE + if(wrdiff[i-1] != 0 & wrdiff[i+3] != 0) sequ[i] = FALSE + if(wrdiff[i-2] != 0 & wrdiff[i] != 0) sequ[i] = FALSE + if(wrdiff[i-2] != 0 & wrdiff[i+1] != 0) sequ[i] = FALSE + if(wrdiff[i-2] != 0 & wrdiff[i+2] != 0) sequ[i] = FALSE + if(wrdiff[i-3] != 0 & wrdiff[i] != 0) sequ[i] = FALSE + if(wrdiff[i-3] != 0 & wrdiff[i+1] != 0) sequ[i] = FALSE + if(wrdiff[i-4] != 0 & wrdiff[i] != 0) sequ[i] = FALSE + } # close for loop on i + + # sequence of daily cluster numbers without the days that doesn't belong to sequences of 5 or more days: + cluster.sequence <- my.cluster[[period]]$cluster * sequ + rm(wrdiff, sequ) + } else{ + cluster.sequence <- my.cluster[[period]]$cluster + } + + gc() + + #wr1 <- which(my.cluster[[period]]$cluster==1) + #wr2 <- which(my.cluster[[period]]$cluster==2) + #wr3 <- which(my.cluster[[period]]$cluster==3) + #wr4 <- which(my.cluster[[period]]$cluster==4) + + # Replace the days belonging to a regime with the days belonging to a sequence of at least 5 days of that regime: + wr1 <- which(cluster.sequence == 1) + wr2 <- which(cluster.sequence == 2) + wr3 <- which(cluster.sequence == 3) + wr4 <- which(cluster.sequence == 4) + + # yearly frequency of each regime: + wr1y <- wr2y <- wr3y <-wr4y <- c() + for(y in year.start:year.end){ + + if(unlist(fields) == unlist(rean)){ + wr1y[y-year.start+1] <- length(which(cluster.sequence[(y-year.start)*period.length+(1:period.length)] == 1)) + wr2y[y-year.start+1] <- length(which(cluster.sequence[(y-year.start)*period.length+(1:period.length)] == 2)) + wr3y[y-year.start+1] <- length(which(cluster.sequence[(y-year.start)*period.length+(1:period.length)] == 3)) + wr4y[y-year.start+1] <- length(which(cluster.sequence[(y-year.start)*period.length+(1:period.length)] == 4)) + } else { + wr1y[y-year.start+1] <- length(which(cluster.sequence[seq((y-year.start+1),length(cluster.sequence), n.years)] == 1)) + wr2y[y-year.start+1] <- length(which(cluster.sequence[seq((y-year.start+1),length(cluster.sequence), n.years)] == 2)) + wr3y[y-year.start+1] <- length(which(cluster.sequence[seq((y-year.start+1),length(cluster.sequence), n.years)] == 3)) + wr4y[y-year.start+1] <- length(which(cluster.sequence[seq((y-year.start+1),length(cluster.sequence), n.years)] == 4)) + } + } + + rm(cluster.sequence) + gc() + + # convert to frequencies in %: + wr1y <- wr1y/period.length + wr2y <- wr2y/period.length + wr3y <- wr3y/period.length + wr4y <- wr4y/period.length + + varPeriodAnom1 <- varPeriodAnom[wr1,,] + varPeriodAnom2 <- varPeriodAnom[wr2,,] + varPeriodAnom3 <- varPeriodAnom[wr3,,] + varPeriodAnom4 <- varPeriodAnom[wr4,,] + + varPeriodAnom1mean <- apply(varPeriodAnom1,c(2,3),mean,na.rm=T) + varPeriodAnom2mean <- apply(varPeriodAnom2,c(2,3),mean,na.rm=T) + varPeriodAnom3mean <- apply(varPeriodAnom3,c(2,3),mean,na.rm=T) + varPeriodAnom4mean <- apply(varPeriodAnom4,c(2,3),mean,na.rm=T) + + varPeriodAnomBoth1 <- abind(varPeriodAnom, varPeriodAnom1, along = 1) + pvalue1 <- apply(varPeriodAnomBoth1, c(2,3), function(x) t.test(x[1:n.days.period],x[(n.days.period+1):length(x)])$p.value) + rm(varPeriodAnomBoth1, varPeriodAnom1) + gc() + + varPeriodAnomBoth2 <- abind(varPeriodAnom, varPeriodAnom2, along = 1) + pvalue2 <- apply(varPeriodAnomBoth2, c(2,3), function(x) t.test(x[1:n.days.period],x[(n.days.period+1):length(x)])$p.value) + rm(varPeriodAnomBoth2, varPeriodAnom2) + gc() + + varPeriodAnomBoth3 <- abind(varPeriodAnom, varPeriodAnom3, along = 1) + pvalue3 <- apply(varPeriodAnomBoth3, c(2,3), function(x) t.test(x[1:n.days.period],x[(n.days.period+1):length(x)])$p.value) + rm(varPeriodAnomBoth3, varPeriodAnom3) + gc() + + varPeriodAnomBoth4 <- abind(varPeriodAnom, varPeriodAnom4, along = 1) + pvalue4 <- apply(varPeriodAnomBoth4, c(2,3), function(x) t.test(x[1:n.days.period],x[(n.days.period+1):length(x)])$p.value) + rm(varPeriodAnomBoth4, varPeriodAnom4) + + rm(varPeriodAnom) + gc() + + pslPeriod <- psleuFull[days.period,,] + pslPeriodClim <- apply(pslPeriod, c(2,3), mean, na.rm=T) + pslPeriodClim2 <- InsertDim(pslPeriodClim, 1, n.days.period) + pslPeriodAnom <- pslPeriod - pslPeriodClim2 + + rm(pslPeriod, pslPeriodClim, pslPeriodClim2) + gc() + + pslwr1 <- pslPeriodAnom[wr1,,] + pslwr2 <- pslPeriodAnom[wr2,,] + pslwr3 <- pslPeriodAnom[wr3,,] + pslwr4 <- pslPeriodAnom[wr4,,] + rm(pslPeriodAnom) + gc() + + pslwr1mean <- apply(pslwr1,c(2,3),mean,na.rm=T) + pslwr2mean <- apply(pslwr2,c(2,3),mean,na.rm=T) + pslwr3mean <- apply(pslwr3,c(2,3),mean,na.rm=T) + pslwr4mean <- apply(pslwr4,c(2,3),mean,na.rm=T) + + rm(pslwr1,pslwr2,pslwr3,pslwr4) + + # save all the data necessary to redraw the graphs when we know the right regime: + save(workdir,rean.name,fields.name,var.num,var.name,var.name.full,var.unit,year.start,year.end,period,my.period,lon,lat,pslwr1mean,pslwr2mean,pslwr3mean,pslwr4mean, varPeriodAnom1mean, varPeriodAnom2mean, varPeriodAnom3mean,varPeriodAnom4mean,wr1y,wr2y,wr3y,wr4y,pvalue1,pvalue2,pvalue3,pvalue4,file=paste0(workdir,"/",fields.name,"_",var.name[var.num],"_",my.period[period],"_mapdata.RData")) + + rm(pvalue1,pvalue2,pvalue3,pvalue4) + rm(pslwr1mean,pslwr2mean,pslwr3mean,pslwr4mean) + rm(varPeriodAnom1mean,varPeriodAnom2mean,varPeriodAnom3mean,varPeriodAnom4mean) + gc() + +} # close the for loop on 'period' + + +# run it twice: once, with ordering <- FALSE to look only at the cartography and find visually the right regime names of the four clusters; +# and the second time, to save the ordered cartography: +ordering <- TRUE +as.pdf <- TRUE # choose if you prefer to save results as one file .png for each season, or only 1 pdf with all seasons + +# choose an order to give to the cartography, from top to bottom: +orden <- c("NAO+","NAO-","Blocking","Atl.Ridge") + +regime1.name <- orden[1] +regime2.name <- orden[2] +regime3.name <- orden[3] +regime4.name <- orden[4] + +# breaks and colors of the geopotential fields: + +if(psl == "g500"){ + #my.brks <- c(-300,-199:200,300) # % Mean anomaly of a WR + my.brks <- c(-300,seq(-200,200,10),300) # % Mean anomaly of a WR + my.brks2 <- c(-300,seq(-200,200,10),300) # % Mean anomaly of a WR for the contour lines + #my.cols <- colorRampPalette((brewer.pal(9,"PuBu")))(length(my.brks)-1) + values.to.plot <- c(-200, -100, 0, 100, 200) # values we want to show in the labels +} + +if(psl == "psl"){ + my.brks <- c(seq(-20,20,2)) # % Mean anomaly of a WR + my.brks2 <- c(seq(-20,20,2)) # % Mean anomaly of a WR for the contour lines + values.to.plot <- c(-20,-16,-12,-8,-4,0,4,8,12,16,20) +} + +my.cols <- colorRampPalette(rev(brewer.pal(11,"RdBu")))(length(my.brks)-1) # blue--white--red colors +my.cols[length(my.cols)/2] <- my.cols[length(my.cols)/2 +1] <- "white" + +# breaks and colors of the impact maps (valid both for Wind speed anomalies and Temperature anomalies): +#my.brks.var <- c(-20,seq(-10,-3,1),seq(-2.9,2.9,0.1),seq(3,10,1),20) # % Mean anomaly of a WR +#my.brks.var <- c(-20,-2,-1.5,-1,-0.5,-0.2,0.2,0.5,1,1.5,2,20) # % Mean anomaly of a WR +my.brks.var <- c(-20,seq(-3,3,0.5),20) # % Mean anomaly of a WR + +#my.cols <- colorRampPalette(as.character(read.csv("/home/Earth/vtorralb/rgbhex.csv",header=F)[,1]))(length(my.brks)-1) # blue--yellow-red colors +my.cols.var <- colorRampPalette(rev(brewer.pal(11,"RdBu")))(length(my.brks.var)-1) # blue--white--red colors +my.cols.var[length(my.cols.var)/2] <- my.cols.var[length(my.cols.var)/2 +1] <- "white" + +if(as.pdf)(pdf(file=paste0(workdir,"/",fields.name,"_",var.name[var.num],".pdf"),width=40, height=60)) + +for(period in 13:16){ + load(file=paste0(workdir,"/",fields.name,"_",var.name[var.num],"_",my.period[period],"_mapdata.RData")) + + if(period == 13){ # Winter + cluster3.name="NAO+" + cluster2.name="NAO-" + cluster4.name="Blocking" + cluster1.name="Atl.Ridge" + } + if(period == 14){ + cluster2.name="NAO+" + cluster4.name="NAO-" + cluster3.name="Blocking" + cluster1.name="Atl.Ridge" + } + if(period == 15){ + cluster2.name="NAO+" + cluster3.name="NAO-" + cluster1.name="Blocking" + cluster4.name="Atl.Ridge" + } + if(period == 16){ + cluster1.name="NAO+" + cluster4.name="NAO-" + cluster2.name="Blocking" + cluster3.name="Atl.Ridge" + } + + # assign to each cluster its regime: + if(ordering == FALSE){ + cluster1 <- 1; cluster2 <- 2; cluster3 <- 3; cluster4 <- 4 + } else { + cluster1 <- which(orden == cluster1.name) + cluster2 <- which(orden == cluster2.name) + cluster3 <- which(orden == cluster3.name) + cluster4 <- which(orden == cluster4.name) + } + + assign(paste0("map",cluster1), pslwr1mean) + assign(paste0("map",cluster2), pslwr2mean) + assign(paste0("map",cluster3), pslwr3mean) + assign(paste0("map",cluster4), pslwr4mean) + + assign(paste0("imp",cluster1), varPeriodAnom1mean) + assign(paste0("imp",cluster2), varPeriodAnom2mean) + assign(paste0("imp",cluster3), varPeriodAnom3mean) + assign(paste0("imp",cluster4), varPeriodAnom4mean) + + assign(paste0("sig",cluster1), pvalue1) + assign(paste0("sig",cluster2), pvalue2) + assign(paste0("sig",cluster3), pvalue3) + assign(paste0("sig",cluster4), pvalue4) + + assign(paste0("fre",cluster1), wr1y) + assign(paste0("fre",cluster2), wr2y) + assign(paste0("fre",cluster3), wr3y) + assign(paste0("fre",cluster4), wr4y) + + # Visualize the composition with the 3 graphs for all the 4 regimes: its pressure fields, its impact on var and its frequency series: + if(!as.pdf) png(filename=paste0(workdir,"/",fields.name,"_",var.name[var.num],"_",my.period[period],".png"),width=3000,height=3700) + + layout(matrix(c(1,1,1,1,1,1,1,1,31,32,6,33,2,4,6,7,rep(c(2,4,6,8),8),3,5,6,9,34,35,6,36, + 10,12,6,14,rep(c(10,12,6,15),8),11,13,6,16,37,38,6,39, + 17,19,6,21,rep(c(17,19,6,22),8),18,20,6,23,40,41,6,42, + 24,26,6,28,rep(c(24,26,6,29),8),25,27,6,30,43,43,6,44),47,4,byrow=TRUE), + widths=c(2,2,0.2,2), heights=c(rep(0.2,2),0.3,rep(0.2,10),0.3,rep(0.2,10),0.3,rep(0.2,10),0.3,rep(0.2,12))) + #layout.show(44) + + plot.new(); title(paste0("Weather Regimes for ",my.period[period]," season (",year.start,"-",year.end,"). Source:",fields.name), cex.main=9, line=-7) + + #EU <- c(1:41, 130:175) #c(1:54,130:161) # position of long values of Europe only (without the Atlantic Sea and America) + #EU <- c(1:which(lon >= lon.max)[1], which(lon >= lon.min)[1]:length(lon)) # to show the same area used for calculation of anomalies + EU <- c(1:which(lon >= lon.max)[1], (length(lon)-30):length(lon)) # restrict area to continental europe only + + plot.new() + #par(mar=c(10.5,14.5,10.5,14.5)) + plot.new(); plot.new() #PlotEquiMap2(imp1[,EU], lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=t(sig1[,EU] < 0.05), cex.lab=1.5) + plot.new(); plot.new(); plot.new(); plot.new() + plot.new(); plot.new() + + plot.new() + plot.new(); plot.new() + plot.new(); plot.new(); plot.new() #barplot.freq(100*fre2, year.start, year.end, cex.y=2, cex.x=2, freq.max=60) + plot.new(); plot.new() + + plot.new() + plot.new(); plot.new() + plot.new(); plot.new(); plot.new() #barplot.freq(100*fre3, year.start, year.end, cex.y=2, cex.x=2, freq.max=60) + plot.new(); plot.new() + + plot.new() + plot.new(); plot.new() + plot.new(); plot.new(); plot.new() #barplot.freq(100*fre4, year.start, year.end, cex.y=2, cex.x=2, freq.max=60) # plot 2 empty graphs + plot.new(); title("Year", cex.main=2, line=-3) + + # Titles: + plot.new(); title(paste(regime1.name, psl.name, "Anomaly"), cex.main=3.3, line=-4.5) + plot.new(); title(paste(regime1.name, "impact on", var.name.full[var.num]), cex.main=3.3, line=-4.5) + plot.new(); title(paste0(regime1.name, " Frequency (", round(100*mean(fre1),1), "%)"), cex.main=3.3, line=-4.5) + plot.new(); title(paste(regime2.name, psl.name, "Anomaly"), cex.main=3.3, line=-4.5) + plot.new(); title(paste(regime2.name, "impact on", var.name.full[var.num]), cex.main=3.3, line=-4.5) + plot.new(); title(paste0(regime2.name, " Frequency (", round(100*mean(fre2),1), "%)"), cex.main=3.3, line=-4.5) + plot.new(); title(paste(regime3.name, psl.name, "Anomaly"), cex.main=3.3, line=-4.5) + plot.new(); title(paste(regime3.name, "impact on", var.name.full[var.num]), cex.main=3.3, line=-4.5) + plot.new(); title(paste0(regime3.name, " Frequency (", round(100*mean(fre3),1), "%)"), cex.main=3.3, line=-4.5) + plot.new(); title(paste(regime4.name, psl.name, "Anomaly"), cex.main=3.3, line=-5) + plot.new(); title(paste(regime4.name, "impact on", var.name.full[var.num]), cex.main=3.3, line=-4.5) + plot.new(); title(paste0(regime4.name, " Frequency (", round(100*mean(fre4),1), "%)"), cex.main=3.3, line=-4.5) + + # Centroid maps: + map.xpos <- 0 + map.width <- 0.44 + par(fig=c(map.xpos, map.xpos + map.width, 0.745, 0.925), new=TRUE) + PlotEquiMap2(map1, lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map1), brks2=my.brks2, intylat=10, drawleg=F, cex.lab=1.5) + par(fig=c(map.xpos, map.xpos + map.width, 0.51, 0.69), new=TRUE) + PlotEquiMap2(map2, lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map2), brks2=my.brks2, intylat=10, drawleg=F, cex.lab=1.5) + par(fig=c(map.xpos, map.xpos + map.width, 0.275, 0.455), new=TRUE) + PlotEquiMap2(map3, lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map3), brks2=my.brks2, intylat=10, drawleg=F, cex.lab=1.5) + par(fig=c(map.xpos, map.xpos + map.width, 0.04, 0.22), new=TRUE) + PlotEquiMap2(map4, lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map4), brks2=my.brks2, intylat=10, drawleg=F, cex.lab=1.5) + + # Title Centroid Maps: + #plot.new() + map.title.xpos <- 0.8 + map.title.width <- 0.2 + par(fig=c(map.title.xpos, map.title.xpos + map.title.width, 0.745, 0.925), new=TRUE) + mtext("year", cex=3) + par(fig=c(map.title.xpos, map.title.xpos + map.title.width, 0.51, 0.69), new=TRUE) + mtext("year", cex=3) + par(fig=c(map.title.xpos, map.title.xpos + map.title.width, 0.275, 0.455), new=TRUE) + mtext("year", cex=3) + + # Impact maps: + impact.xpos <- 0.45 + impact.width <- 0.28 + par(fig=c(impact.xpos, impact.xpos + impact.width, 0.745, 0.925), new=TRUE) + PlotEquiMap2(imp1[,EU], lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=t(sig1[,EU] < 0.05), cex.lab=1.5) + par(fig=c(impact.xpos, impact.xpos + impact.width, 0.51, 0.69), new=TRUE) + PlotEquiMap2(imp2[,EU], lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=t(sig2[,EU] < 0.05), cex.lab=1.5) + par(fig=c(impact.xpos, impact.xpos + impact.width, 0.275, 0.455), new=TRUE) + PlotEquiMap2(imp3[,EU], lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=t(sig3[,EU] < 0.05), cex.lab=1.5) + par(fig=c(impact.xpos, impact.xpos + impact.width, 0.04, 0.22), new=TRUE) + PlotEquiMap2(imp4[,EU], lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=t(sig4[,EU] < 0.05), cex.lab=1.5) + + # Frequency plots: + barplot.xpos <- 0.75 + barplot.width <- 0.25 + par(fig=c(barplot.xpos, barplot.xpos + barplot.width, 0.745, 0.925), new=TRUE) + barplot.freq(100*fre1, year.start, year.end, cex.y=2, cex.x=2, freq.max=60) + par(fig=c(barplot.xpos, barplot.xpos + barplot.width,0.51,0.69), new=TRUE) + barplot.freq(100*fre2, year.start, year.end, cex.y=2, cex.x=2, freq.max=60) + par(fig=c(barplot.xpos, barplot.xpos + barplot.width,0.275,0.455), new=TRUE) + barplot.freq(100*fre3, year.start, year.end, cex.y=2, cex.x=2, freq.max=60) + par(fig=c(barplot.xpos, barplot.xpos + barplot.width,0.04,0.22), new=TRUE) + barplot.freq(100*fre4, year.start, year.end, cex.y=2, cex.x=2, freq.max=60) + + # Legends: + + if(var.name[var.num] == "sfcWind") values.to.plot2 <- c(-3,-2,-1,0,1,2,3) + if(var.name[var.num] == "tas") values.to.plot2 <- c(-10,-6,-3,0,3,6,10) + + my.subset <- match(values.to.plot, my.brks) + my.subset2 <- match(values.to.plot2, my.brks.var) + + par(fig=c(0.04, 0.302, 0.717, 0.744), new=TRUE) # syntax fig=c(xmin, xmax, ymin, ymax) + #ColorBar(my.brks, cols=my.cols, vert=FALSE, subsampleg=10, cex=2.5) + #ColorBar2(my.brks, cols=my.cols, vert=FALSE, subsampleg=10, cex=2.5, my.ticks=c(0,10,20,30,40,50,60,70,80), my.labels=my.labels) + ColorBar3(my.brks, cols=my.cols, vert=FALSE, cex=2.5, subset=my.subset) + if(psl=="g500") {mtext(side=4," m", cex=2.5)} else {mtext(side=4," hPa", cex=2.5)} + par(fig=c(0.355,0.605,0.718,0.743), new=TRUE) + ColorBar3(my.brks.var, cols=my.cols.var, vert=FALSE, cex=2.5, subset=my.subset2) + #ColorBar2(my.brks.var, cols=my.cols.var, vert=FALSE, subsampleg=1, cex=2.5, my.ticks=c(0,10,20,30,40,50,60,70,80), my.labels=my.labels2) + mtext(side=4,paste0(" ",var.unit[var.num]), cex=2.5) + + par(fig=c(0.04,0.3,0.483,0.508), new=TRUE) + ColorBar3(my.brks, cols=my.cols, vert=FALSE, cex=2.5, subset=my.subset) + if(psl=="g500") {mtext(side=4," m", cex=2.5)} else {mtext(side=4," hPa", cex=2.5)} + par(fig=c(0.355,0.605,0.484,0.509), new=TRUE) + ColorBar3(my.brks.var, cols=my.cols.var, vert=FALSE, cex=2.5, subset=my.subset2) + mtext(side=4,paste0(" ",var.unit[var.num]), cex=2.5) + + par(fig=c(0.04,0.3,0.248,0.273), new=TRUE) + ColorBar3(my.brks, cols=my.cols, vert=FALSE, cex=2.5, subset=my.subset) + if(psl=="g500") {mtext(side=4," m", cex=2.5)} else {mtext(side=4," hPa", cex=2.5) + par(fig=c(0.355,0.605,0.248,0.273), new=TRUE) + ColorBar3(my.brks.var, cols=my.cols.var, vert=FALSE, cex=2.5, subset=my.subset2)} + mtext(side=4,paste0(" ",var.unit[var.num]), cex=2.5) + + par(fig=c(0.04,0.3,0.013,0.038), new=TRUE) + ColorBar3(my.brks, cols=my.cols, vert=FALSE, cex=2.5, subset=my.subset) + if(psl=="g500") {mtext(side=4," m", cex=2.5)} else {mtext(side=4," hPa", cex=2.5)} + par(fig=c(0.355,0.605,0.013,0.038), new=TRUE) + ColorBar3(my.brks.var, cols=my.cols.var, vert=FALSE, cex=2.5, subset=my.subset2) + mtext(side=4,paste0(" ",var.unit[var.num]), cex=2.5) + + symbol.xpos <- 0.735 + symbol.width <- 0.01 + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.84, 0.85), new=TRUE) + mtext("%", cex=3.3) + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.60,0.61), new=TRUE) + mtext("%", cex=3.3) + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.37,0.38), new=TRUE) + mtext("%", cex=3.3) + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.13,0.14), new=TRUE) + mtext("%", cex=3.3) + + if(!as.pdf) dev.off() # for saving 4 png + +} # close for loop on 'period' + +if(as.pdf) dev.off() # for saving a single pdf with all seasons + + + + + + + + + + + + + + + + + + + + + + ## if(period == 13){ # Winter + ## cluster2.name="NAO+" + ## cluster3.name="NAO-" + ## cluster1.name="Blocking" + ## cluster4.name="Atl.Ridge" + ## } + ## if(period == 14){ + ## cluster1.name="NAO+" + ## cluster4.name="NAO-" + ## cluster3.name="Blocking" + ## cluster2.name="Atl.Ridge" + ## } + ## if(period == 15){ + ## cluster1.name="NAO+" + ## cluster2.name="NAO-" + ## cluster3.name="Blocking" + ## cluster4.name="Atl.Ridge" + ## } + ## if(period == 16){ + ## cluster1.name="NAO+" + ## cluster3.name="NAO-" + ## cluster2.name="Blocking" + ## cluster4.name="Atl.Ridge" + ## } diff --git a/old/weather_regimes_v16.R b/old/weather_regimes_v16.R new file mode 100644 index 0000000000000000000000000000000000000000..6e7d2c73b9b00546b05c91c2bdf7cc57a7bf8e25 --- /dev/null +++ b/old/weather_regimes_v16.R @@ -0,0 +1,712 @@ + +library(s2dverification) # for the function Load() +library(abind) +library(Kendall) +source('/home/Earth/ncortesi/Downloads/scripts/Rfunctions.R') # for the calendar functions +workdir <- "/scratch/Earth/ncortesi/RESILIENCE/Regimes" # working dir with the input files with the WT classifications for each MSLP grid point + +# available reanalysis for the geopotential (g500) or the geopotential height (z500 = g500/9.81): +ERAint <- list(path = '/esnas/reconstructions/ecmwf/eraint/$STORE_FREQ$_mean/$VAR_NAME$_f6h/$VAR_NAME$_$YEAR$$MONTH$.nc') +ERAint.name <- "ERA-Interim" + +JRA55 <- list(path = '/esnas/reconstructions/jma/jra-55/$STORE_FREQ$_mean/$VAR_NAME$_f6h/$VAR_NAME$_$YEAR$$MONTH$.nc') +JRA55.name <- "JRA-55" + +rean <- JRA55 #ERAint # choose one of the two above reanalysis from where to load the input psl data +rean.name <- JRA55.name #ERAint.name + +# available forecast systems for the var data: +ECMWF_monthly <- list(path = paste0('/esnas/exp/ECMWF/monthly/ensforhc/6hourly/$VAR_NAME$/2014$MONTH$$DAY$00/$VAR_NAME$_$START_DATE$00.nc')) +ECMWF_monthly.name <- "ECMWF-Monthly" + +forecast <- ECMWF_monthly +forecast.name <- ECMWF_monthly.name + +fields <- rean #forecast # put 'rean' to load pressure fields and var from reanalisis, put 'forecast' to load them from forecast systems +fields.name <- rean.name #forecast.name + +year.start <- 1979 #1994 #1979 #1981 +year.end <- 2013 #2010 + +leadtime <- 1:7 #5:11 #1 # if fields=forecast, set the forecast time (in days) you want to compute the weather regimes. +startdate.shift <- 1 # if leadtime=5:11, it's better to shift all startdates of the season 1 week in the past, to fit the exact season of obs.data + # if leadtime=12:18, it's better to shift all startdates of the season 2 weeks in the past, and so on. +forecast.year <- year.end+1 # if fields=forecast, set the forecast year (it is usually the year after year.end) +mes=1 # if fields=forecast, set the month of the first startdate of the forecast year +day=2 # if fields=forecast, set the day of the first startdate of the forecast year + +psl <- "psl" #"g500" # pressure variable to use from the chosen reanalysis +psl.name <- "MSLP" # "Z500" # the name to show in the maps + +var.data <- rean #ECMWF_monthly # ERAint # chose a dataset from which to load the input var data (reanalysis or forecasts) +var.data.name <- rean.name #ECMWF_monthly.name # ERAint.name + +sequences=FALSE # set it to TRUE if you want to select only days beloning to sequences of at least 5 consecutive days belonging to the same regime. + +PCA <- FALSE # se to TRUE if you want to apply the PCA before the k-means cluster analysis, FALSE otherwise +variance.explained <- 0.8 # the user can set here the minimum % of variance explained by the PCs (typically 0.8 which is equal to 80%) + +# Coordinates of the box enclosing the study region (the Northern Atlantic in this case): +lat.max <- 81 #81 #80 #70 +lat.min <- 27 #30 #20 #30 +lon.max <- 45 #36 #30 #40 +lon.min <- 274.5 #274.5 #270 #280 # put a positive number here because the geopotential has only positive values of longitud! + +############################################################################################################################# +#ECMWF_monthly <- list(path = paste0('/esnas/exp/ECMWF/monthly/ensforhc/6hourly/psl/2014010200/1994_010200.nc')) +#ECMWF_monthly <- list(path = paste0('/esnas/exp/ECMWF/monthly/ensforhc/6hourly/$VAR_NAME$/2014$MONTH$$DAY$00/$VAR_NAME$_$START_DATE$00.nc')) +if(lat.min >= lat.max) stop("lat.min cannot be greater than lat.max") +n.years <- year.end - year.start + 1 + +# load only 1 day of pressure data to detect the minimum and maximum lat and lon values which identify only the North Atlantic area: +domain <- Load(var = psl, exp = NULL, obs = list(fields), paste0(year.start,'0102'), storefreq = 'daily', leadtimemax = 1, output = 'lonlat', nprocs=1) +n.lat <- length(domain$lat) +n.lon <- length(domain$lon) + +pos.lat.max <- which(lat.max - round(domain$lat,4) >= 0)[1] # select the first latitude of the domain below the maximum latitude +pos.lat.min <- tail(which(round(domain$lat,4) - lat.min >= 0),1) # select the last latitude of the domain over the minumum latitude +pos.lon.max <- tail(which(lon.max - round(domain$lon,4) >= 0),1) +pos.lon.min <- which(round(domain$lon,4) - lon.min >= 0)[1] + +pos.lat <- if(pos.lat.min < pos.lat.max) {pos.lat.min:pos.lat.max} else {pos.lat.max:pos.lat.min} +pos.lon <- c(1:pos.lon.max,pos.lon.min:length(domain$lon)) # beware that it only consider the case where the study area cross the meridian of Greenwhich! +n.pos.lat <- length(pos.lat) +n.pos.lon <- length(pos.lon) + +lat <- domain$lat[pos.lat] # lat values of chosen area only (it is smaller than the whole spatial domain loaded by the data) +lon <- domain$lon[pos.lon] # lon values of chosen area only + +lat.min.area <- domain$lat[pos.lat.min] # min and max lat/lon of the chosen area only (same as lat.max, but its values correspond to actual values in the lat vector) +lat.max.area <- domain$lat[pos.lat.max] +lon.min.area <- domain$lon[pos.lon.min] +lon.max.area <- domain$lon[pos.lon.max] + +#rm(domain) + +# Load psl data: +if(unlist(fields) == unlist(rean)){ # Load daily psl data in the reanalysis case: + psleuFull <-array(NA,c(n.days.in.a.yearly.period(year.start,year.end), n.pos.lat, n.pos.lon)) + + for (y in year.start:year.end){ + var <- Load(var = psl, exp = NULL, obs = list(fields), paste0(y,'0101'), storefreq = 'daily', leadtimemax = n.days.in.a.year(y), output = 'lonlat', + latmin = lat.min, latmax = lat.max, lonmin = lon.min, lonmax = lon.max) + psleuFull[seq.days.in.a.future.year(year.start, y),,] <- var$obs + rm(var) + gc() + } + +} else { # Load 6-hourly psl data in the forecast case: + psleuFull <-array(NA, c(n.sdates*n.years, n.leadtimes, n.pos.lat, n.pos.lon)) # daily order: 19940102 19950102 ... 20130102 19940109 19950109 ... 20130109 ... + n.leadtimes <- length(leadtime) + sdates <- weekly.seq(forecast.year,mes,day) + n.sdates <- length(sdates) + + for (startdate in 1:n.sdates){ + for(lday in leadtime){ + var <- Load(var = psl, exp = NULL, obs = list(fields), paste0(year.start:year.end, substr(sdates[startdate],5,6), substr(sdates[startdate],7,8) ), storefreq = 'daily', leadtimemin = lday*4-3, leadtimemax = lday*4, output = 'lonlat', latmin = lat.min, latmax = lat.max, lonmin = lon.min, lonmax = lon.max) + + mean.lday <- apply(var$obs, c(3,5,6), mean, na.rm=T) + rm(var) + gc() + + psleuFull[(1+(startdate-1)*n.years):(startdate*n.years), lday-leadtime[1]+1,,] <- mean.lday + rm(mean.lday) + gc() + } + } + +} + +if(psl=="g500") psleuFull <- psleuFull/9.81 # convert geopotential to geopotential height (in m) +if(psl=="psl") psleuFull <- psleuFull/100 # convert MSLP in Pascal to MSLP in hPa +gc() + +save.image(file=paste0(workdir,"/weather_regimes_",fields.name,"_",year.start,"-",year.end,".RData")) +load(file=paste0(workdir,"/weather_regimes_",fields.name,"_",year.start,"-",year.end,".RData")) + +# compute the cluster analysis: +my.PCA <- list() +my.cluster <- list() +tot.variance <- list() +n.pcs <- my.cluster <- c() + +for(period in 13:16){ # 1-12: Jan-Dec; 13-16: Winter-Spring-Summer-Autumn; 17: Year + if(unlist(fields) == unlist(rean)){ + days.period <- NA + for(y in year.start:year.end) days.period <- c(days.period, n.days.in.a.future.year(year.start, y) + pos.period(y,period)) + days.period <- days.period[-1] # remove the NA at the begin that was introduced only to be able to execute the above command + + } else { + my.startdates.period <- months.period(forecast.year,mes,day,period) # get which startdates belong to the chosen period + + days.period <- c() # get which days inside psleuFull belong to the chosen period + for(startdate in my.startdates.period){ + days.period <- c(days.period, (1+(startdate-1)*n.years):(startdate*n.years)) + } + } + + n.days.period <- length(days.period) + + pslPeriod <- psleuFull[days.period,,] # select only days in the chosen period (i.e: winter) + + # weight the pressure fields based on latitude: + lat.weighted <- cos(lat*pi/180)^0.5 + lat.weighted.array <- InsertDim(InsertDim(lat.weighted,2,n.pos.lon),1,n.days.period) + pslPeriod.weighted <- pslPeriod * lat.weighted.array + rm(lat.weighted.array) + gc() + + pslmat <- pslPeriod.weighted + dim(pslmat) <- c(n.days.period, n.pos.lat*n.pos.lon) # convert array in a matrix! + rm(pslPeriod, pslPeriod.weighted) + gc() + + # if you are using the monthly forecast system, in winter you must remove the 2nd startdate (20140109) because its data is bad, as you can see with: plot(pslmat[,1:2]) + if(unlist(fields) == unlist(forecast) && period == 13) pslmat[21:40,] <- NA + + if(PCA){ # compute the PCs: + my.seq <- seq(1, n.pos.lat*n.pos.lon, 9) # select only 1 point of 9 + pslcut <- pslmat[,my.seq] + + my.PCA[[period]] <- princomp(pslcut,cor=FALSE) + tot.variance[[period]] <- head(cumsum(my.PCA[[period]]$sdev^2/sum(my.PCA[[period]]$sdev^2)),50) # check how many PCAs to keep basing on the sum of their expl. variance + n.pcs[period] <- head(as.numeric(which(tot.variance[[period]] > variance.explained)),1) # select only the pcs that explains at least 80% of variance + my.cluster[[period]] <- kmeans(my.PCA[[period]]$scores[,1:n.pcs[period]], centers=4, iter.max=100, nstart=30) # 4 is the number of clusters + rm(pslcut) + } else { # remove from matpsl the years with NA: + my.cluster[[period]] <- kmeans(pslmat[which(!is.na(pslmat[,1])),], centers=4, iter.max=100, nstart=30) # 4 is the number of clusters + } + + rm(pslmat) + gc() +} + +var.num <- 1 # Choose a variable. 1: sfcWind 2: tas + + +var.name <- c("sfcWind","tas") # name of the 'predictand' variable of the chosen reanalysis +var.name.full <- c("Wind Speed","Temperature") # full name of the 'predictand' variable to put in the title of the graphs +var.unit <- c("m/s", "ºC") # unit of measure of var (for drawing color scales) + +# Load var data: +if(unlist(fields) == unlist(rean)){ # Load daily var data in the reanalysis case: + vareuFull <-array(NA,c(n.days.in.a.yearly.period(year.start,year.end), n.pos.lat, n.pos.lon)) + + for (y in year.start:year.end){ + var <- Load(var = var.name[var.num], exp = NULL, obs = list(var.data), paste0(y,'0101'), storefreq = 'daily', leadtimemax = n.days.in.a.year(y), output = 'lonlat', + latmin = lat.min, latmax = lat.max, lonmin = lon.min, lonmax = lon.max) + vareuFull[seq.days.in.a.future.year(year.start, y),,] <- var$obs + rm(var) + gc() + } + +} else { # Load 6-hourly var data in the forecast case: + sdates <- weekly.seq(forecast.year,mes,day) + vareuFull <-array(NA,c(length(sdates)*(year.end-year.start+1), n.pos.lat, n.pos.lon)) + + for (startdate in 1:length(sdates)){ + var <- Load(var = var.name[var.num], exp = NULL, obs = list(var.data), paste0(year.start:year.end, substr(sdates[startdate],5,6), substr(sdates[startdate],7,8) ), storefreq = 'daily', leadtimemin=leadtime*4-3, leadtimemax=leadtime*4, output = 'lonlat', latmin = lat.min, latmax = lat.max, lonmin = lon.min, lonmax = lon.max) + temp <- apply(var$obs, c(1,3,5,6), mean, na.rm=T) + vareuFull[(1+(startdate-1)*(year.end-year.start+1)):(startdate*(year.end-year.start+1)),,] <- temp + rm(temp,var) + gc() + } + +} + +#my.grid<-paste0('r',n.lon,'x',n.lat) +#coord <- Load(var = 'sfcWind', exp = list(ECMWF_monthly), obs = NULL, sdates=paste0(2013,'0102'), leadtimemin = 1, leadtimemax=1, output = 'lonlat', nprocs=1, grid=my.grid, method='bilinear') + +save.image(file=paste0(workdir,"/weather_regimes_",fields.name,"_",var.name[var.num],"_",year.start,"-",year.end,".RData")) +load(file=paste0(workdir,"/weather_regimes_",fields.name,"_",var.name[var.num],"_",year.start,"-",year.end,".RData")) + +for(period in 13:16){ # 1-12: Jan-Dec; 13-16: Winter-Spring-Summer-Autumn; 17: Year + if(unlist(fields) == unlist(rean)){ + days.period <- NA + for(y in year.start:year.end) days.period <- c(days.period, n.days.in.a.future.year(year.start, y) + pos.period(y,period)) + days.period <- days.period[-1] # remove the NA at the begin that was introduced only to be able to execure the above command + period.length <- n.days.in.a.period(period,1999) #+ ifelse(period==13 | period==2, 1, 0) # using year 1999 introduce a small error in winter season length because of bisestile years + + } else { + my.startdates.period <- months.period(forecast.year,mes,day,period) + + if(period == 13) my.startdates.period <- my.startdates.period[-2] # remove the second startdate because it is broken + + days.period <- c() + for(startdate in my.startdates.period){ + days.period <- c(days.period, (1+(startdate-1)*(year.end-year.start+1)):(startdate*(year.end-year.start+1))) + } + period.length <- length(my.startdates.period) # number of Thusdays in the period + } + + n.days.period <- length(days.period) + + varPeriod <- vareuFull[days.period,,] # select only var data during the chosen period + + varPeriodClim <- apply(varPeriod, c(2,3), mean, na.rm=T) + varPeriodClim2 <- InsertDim(varPeriodClim, 1, n.days.period) + + varPeriodAnom <- varPeriod - varPeriodClim2 + rm(varPeriod, varPeriodClim, varPeriodClim2) + gc() + + if(sequences){ + # for each of the 4 clusters, remove the days not belonging to sequences of at least 5 days: + # warning: first 4 days and last 4 days are not take into account y the algorithm and are treated as not belonging to any sequence (sequ=FALSE) + wrdiff <- diff(my.cluster[[period]]$cluster) + + sequ <- rep(FALSE, n.days.period) + for(i in 5:(n.days.period-5)){ + sequ[i] <- TRUE + if(wrdiff[i] != 0 & wrdiff[i+1] != 0) sequ[i] = FALSE + if(wrdiff[i] != 0 & wrdiff[i+2] != 0) sequ[i] = FALSE + if(wrdiff[i] != 0 & wrdiff[i+3] != 0) sequ[i] = FALSE + if(wrdiff[i] != 0 & wrdiff[i+4] != 0) sequ[i] = FALSE + if(wrdiff[i-1] != 0 & wrdiff[i] != 0) sequ[i] = FALSE + if(wrdiff[i-1] != 0 & wrdiff[i+1] != 0) sequ[i] = FALSE + if(wrdiff[i-1] != 0 & wrdiff[i+2] != 0) sequ[i] = FALSE + if(wrdiff[i-1] != 0 & wrdiff[i+3] != 0) sequ[i] = FALSE + if(wrdiff[i-2] != 0 & wrdiff[i] != 0) sequ[i] = FALSE + if(wrdiff[i-2] != 0 & wrdiff[i+1] != 0) sequ[i] = FALSE + if(wrdiff[i-2] != 0 & wrdiff[i+2] != 0) sequ[i] = FALSE + if(wrdiff[i-3] != 0 & wrdiff[i] != 0) sequ[i] = FALSE + if(wrdiff[i-3] != 0 & wrdiff[i+1] != 0) sequ[i] = FALSE + if(wrdiff[i-4] != 0 & wrdiff[i] != 0) sequ[i] = FALSE + } # close for loop on i + + # sequence of daily cluster numbers without the days that doesn't belong to sequences of 5 or more days: + cluster.sequence <- my.cluster[[period]]$cluster * sequ + rm(wrdiff, sequ) + } else{ + cluster.sequence <- my.cluster[[period]]$cluster + } + + gc() + + #wr1 <- which(my.cluster[[period]]$cluster==1) + #wr2 <- which(my.cluster[[period]]$cluster==2) + #wr3 <- which(my.cluster[[period]]$cluster==3) + #wr4 <- which(my.cluster[[period]]$cluster==4) + + # Replace the days belonging to a regime with the days belonging to a sequence of at least 5 days of that regime: + wr1 <- which(cluster.sequence == 1) + wr2 <- which(cluster.sequence == 2) + wr3 <- which(cluster.sequence == 3) + wr4 <- which(cluster.sequence == 4) + + # yearly frequency of each regime: + wr1y <- wr2y <- wr3y <-wr4y <- c() + for(y in year.start:year.end){ + + if(unlist(fields) == unlist(rean)){ + wr1y[y-year.start+1] <- length(which(cluster.sequence[(y-year.start)*period.length+(1:period.length)] == 1)) + wr2y[y-year.start+1] <- length(which(cluster.sequence[(y-year.start)*period.length+(1:period.length)] == 2)) + wr3y[y-year.start+1] <- length(which(cluster.sequence[(y-year.start)*period.length+(1:period.length)] == 3)) + wr4y[y-year.start+1] <- length(which(cluster.sequence[(y-year.start)*period.length+(1:period.length)] == 4)) + } else { + wr1y[y-year.start+1] <- length(which(cluster.sequence[seq((y-year.start+1),length(cluster.sequence), n.years)] == 1)) + wr2y[y-year.start+1] <- length(which(cluster.sequence[seq((y-year.start+1),length(cluster.sequence), n.years)] == 2)) + wr3y[y-year.start+1] <- length(which(cluster.sequence[seq((y-year.start+1),length(cluster.sequence), n.years)] == 3)) + wr4y[y-year.start+1] <- length(which(cluster.sequence[seq((y-year.start+1),length(cluster.sequence), n.years)] == 4)) + } + } + + rm(cluster.sequence) + gc() + + # convert to frequencies in %: + wr1y <- wr1y/period.length + wr2y <- wr2y/period.length + wr3y <- wr3y/period.length + wr4y <- wr4y/period.length + + varPeriodAnom1 <- varPeriodAnom[wr1,,] + varPeriodAnom2 <- varPeriodAnom[wr2,,] + varPeriodAnom3 <- varPeriodAnom[wr3,,] + varPeriodAnom4 <- varPeriodAnom[wr4,,] + + varPeriodAnom1mean <- apply(varPeriodAnom1,c(2,3),mean,na.rm=T) + varPeriodAnom2mean <- apply(varPeriodAnom2,c(2,3),mean,na.rm=T) + varPeriodAnom3mean <- apply(varPeriodAnom3,c(2,3),mean,na.rm=T) + varPeriodAnom4mean <- apply(varPeriodAnom4,c(2,3),mean,na.rm=T) + + varPeriodAnomBoth1 <- abind(varPeriodAnom, varPeriodAnom1, along = 1) + pvalue1 <- apply(varPeriodAnomBoth1, c(2,3), function(x) t.test(x[1:n.days.period],x[(n.days.period+1):length(x)])$p.value) + rm(varPeriodAnomBoth1, varPeriodAnom1) + gc() + + varPeriodAnomBoth2 <- abind(varPeriodAnom, varPeriodAnom2, along = 1) + pvalue2 <- apply(varPeriodAnomBoth2, c(2,3), function(x) t.test(x[1:n.days.period],x[(n.days.period+1):length(x)])$p.value) + rm(varPeriodAnomBoth2, varPeriodAnom2) + gc() + + varPeriodAnomBoth3 <- abind(varPeriodAnom, varPeriodAnom3, along = 1) + pvalue3 <- apply(varPeriodAnomBoth3, c(2,3), function(x) t.test(x[1:n.days.period],x[(n.days.period+1):length(x)])$p.value) + rm(varPeriodAnomBoth3, varPeriodAnom3) + gc() + + varPeriodAnomBoth4 <- abind(varPeriodAnom, varPeriodAnom4, along = 1) + pvalue4 <- apply(varPeriodAnomBoth4, c(2,3), function(x) t.test(x[1:n.days.period],x[(n.days.period+1):length(x)])$p.value) + rm(varPeriodAnomBoth4, varPeriodAnom4) + + rm(varPeriodAnom) + gc() + + pslPeriod <- psleuFull[days.period,,] + pslPeriodClim <- apply(pslPeriod, c(2,3), mean, na.rm=T) + pslPeriodClim2 <- InsertDim(pslPeriodClim, 1, n.days.period) + pslPeriodAnom <- pslPeriod - pslPeriodClim2 + + rm(pslPeriod, pslPeriodClim, pslPeriodClim2) + gc() + + pslwr1 <- pslPeriodAnom[wr1,,] + pslwr2 <- pslPeriodAnom[wr2,,] + pslwr3 <- pslPeriodAnom[wr3,,] + pslwr4 <- pslPeriodAnom[wr4,,] + rm(pslPeriodAnom) + gc() + + pslwr1mean <- apply(pslwr1,c(2,3),mean,na.rm=T) + pslwr2mean <- apply(pslwr2,c(2,3),mean,na.rm=T) + pslwr3mean <- apply(pslwr3,c(2,3),mean,na.rm=T) + pslwr4mean <- apply(pslwr4,c(2,3),mean,na.rm=T) + + rm(pslwr1,pslwr2,pslwr3,pslwr4) + + # save all the data necessary to redraw the graphs when we know the right regime: + save(workdir,rean.name,fields.name,var.num,var.name,var.name.full,var.unit,year.start,year.end,period,my.period,lon,lat,pslwr1mean,pslwr2mean,pslwr3mean,pslwr4mean, varPeriodAnom1mean, varPeriodAnom2mean, varPeriodAnom3mean,varPeriodAnom4mean,wr1y,wr2y,wr3y,wr4y,pvalue1,pvalue2,pvalue3,pvalue4,file=paste0(workdir,"/",fields.name,"_",var.name[var.num],"_",my.period[period],"_mapdata.RData")) + + rm(pvalue1,pvalue2,pvalue3,pvalue4) + rm(pslwr1mean,pslwr2mean,pslwr3mean,pslwr4mean) + rm(varPeriodAnom1mean,varPeriodAnom2mean,varPeriodAnom3mean,varPeriodAnom4mean) + gc() + +} # close the for loop on 'period' + + +# run it twice: once, with ordering <- FALSE to look only at the cartography and find visually the right regime names of the four clusters; +# and the second time, to save the ordered cartography: +ordering <- TRUE # If TRUE, order the clusters following the regimes listed in the 'orden' vector (after you manually insert the names). +save.names <- FALSE # if TRUE, also save the cluster names (i.e: the association between clusters and weather regimes); if FALSE, the cluster names are loaded from the file +as.pdf <- TRUE # choose if you prefer to save results as one file .png for each season, or only 1 pdf with all seasons + +# choose an order to give to the cartography, from top to bottom: +orden <- c("NAO+","NAO-","Blocking","Atl.Ridge") + +regime1.name <- orden[1] +regime2.name <- orden[2] +regime3.name <- orden[3] +regime4.name <- orden[4] + +if(save.names){cluster1.name.period <- cluster2.name.period <- cluster3.name.period <- cluster4.name.period <- c()} + +# breaks and colors of the geopotential fields: +if(psl == "g500"){ + #my.brks <- c(-300,-199:200,300) # % Mean anomaly of a WR + my.brks <- c(-300,seq(-200,200,10),300) # % Mean anomaly of a WR + my.brks2 <- c(-300,seq(-200,200,10),300) # % Mean anomaly of a WR for the contour lines + #my.cols <- colorRampPalette((brewer.pal(9,"PuBu")))(length(my.brks)-1) + values.to.plot <- c(-200, -100, 0, 100, 200) # values we want to show in the labels +} + +if(psl == "psl"){ + my.brks <- c(seq(-19,-1,2),0,seq(1,19,2)) # % Mean anomaly of a WR + my.brks2 <- my.brks #c(seq(-20,20,2)) # % Mean anomaly of a WR for the contour lines + values.to.plot <- my.brks #c(-17,-15,-13,-11,-9,-7,-5,-3,-1,0,1,3,5,7,9,11,13,15,17) +} + +my.cols <- colorRampPalette(rev(brewer.pal(11,"RdBu")))(length(my.brks)-1) # blue--white--red colors +my.cols[floor(length(my.cols)/2)] <- my.cols[floor(length(my.cols)/2)+1] <- "white" + +# breaks and colors of the impact maps (valid both for Wind speed anomalies and Temperature anomalies): +#my.brks.var <- c(-20,seq(-10,-3,1),seq(-2.9,2.9,0.1),seq(3,10,1),20) # % Mean anomaly of a WR +#my.brks.var <- c(-20,-2,-1.5,-1,-0.5,-0.2,0.2,0.5,1,1.5,2,20) # % Mean anomaly of a WR +#my.brks.var <- c(-20,seq(-3,3,0.5),20) # % Mean anomaly of a WR +if(var.name[var.num] == "sfcWind") { + my.brks.var <- seq(-3,3,0.5) + values.to.plot2 <- c(-3,-2,-1,0,1,2,3) +} +if(var.name[var.num] == "tas") { + my.brks.var <- seq(-5,5,1) + values.to.plot2 <- my.brks.var #c(-5,-3,-1,0,1,3,5) +} + +#my.cols <- colorRampPalette(as.character(read.csv("/home/Earth/vtorralb/rgbhex.csv",header=F)[,1]))(length(my.brks)-1) # blue--yellow-red colors +my.cols.var <- colorRampPalette(rev(brewer.pal(11,"RdBu")))(length(my.brks.var)-1) # blue--white--red colors +#my.cols.var[floor(length(my.cols.var)/2)] <- my.cols.var[floor(length(my.cols.var)/2)+1] <- "white" + +if(as.pdf)(pdf(file=paste0(workdir,"/",fields.name,"_",var.name[var.num],".pdf"),width=40, height=60)) + +for(period in 13:16){ + load(file=paste0(workdir,"/",fields.name,"_",var.name[var.num],"_",my.period[period],"_mapdata.RData")) + + if(save.names){ + if(period == 13){ # Winter + cluster3.name="NAO+" + cluster4.name="NAO-" + cluster1.name="Blocking" + cluster2.name="Atl.Ridge" + } + if(period == 14){ + cluster2.name="NAO+" + cluster3.name="NAO-" + cluster1.name="Blocking" + cluster4.name="Atl.Ridge" + } + if(period == 15){ + cluster3.name="NAO+" + cluster2.name="NAO-" + cluster4.name="Blocking" + cluster1.name="Atl.Ridge" + } + if(period == 16){ + cluster4.name="NAO+" + cluster3.name="NAO-" + cluster2.name="Blocking" + cluster1.name="Atl.Ridge" + } + + cluster1.name.period[period] <- cluster1.name + cluster2.name.period[period] <- cluster2.name + cluster3.name.period[period] <- cluster3.name + cluster4.name.period[period] <- cluster4.name + + save(cluster1.name.period, cluster2.name.period, cluster3.name.period, cluster4.name.period, file=paste0(workdir,"/",fields.name,"_ClusterNames.RData")) + + } else { # in this case, we load the cluster names from the file already saved: + load(paste0(workdir,"/",fields.name,"_ClusterNames.RData")) + cluster1.name <- cluster1.name.period[period] + cluster2.name <- cluster2.name.period[period] + cluster3.name <- cluster3.name.period[period] + cluster4.name <- cluster4.name.period[period] + } + + # assign to each cluster its regime: + if(ordering == FALSE){ + cluster1 <- 1; cluster2 <- 2; cluster3 <- 3; cluster4 <- 4 + } else { + cluster1 <- which(orden == cluster1.name) + cluster2 <- which(orden == cluster2.name) + cluster3 <- which(orden == cluster3.name) + cluster4 <- which(orden == cluster4.name) + } + + assign(paste0("map",cluster1), pslwr1mean) + assign(paste0("map",cluster2), pslwr2mean) + assign(paste0("map",cluster3), pslwr3mean) + assign(paste0("map",cluster4), pslwr4mean) + + assign(paste0("imp",cluster1), varPeriodAnom1mean) + assign(paste0("imp",cluster2), varPeriodAnom2mean) + assign(paste0("imp",cluster3), varPeriodAnom3mean) + assign(paste0("imp",cluster4), varPeriodAnom4mean) + + assign(paste0("sig",cluster1), pvalue1) + assign(paste0("sig",cluster2), pvalue2) + assign(paste0("sig",cluster3), pvalue3) + assign(paste0("sig",cluster4), pvalue4) + + assign(paste0("fre",cluster1), wr1y) + assign(paste0("fre",cluster2), wr2y) + assign(paste0("fre",cluster3), wr3y) + assign(paste0("fre",cluster4), wr4y) + + # Visualize the composition with the 3 graphs for all the 4 regimes: its pressure fields, its impact on var and its frequency series: + if(!as.pdf) png(filename=paste0(workdir,"/",fields.name,"_",var.name[var.num],"_",my.period[period],".png"),width=3000,height=3700) + + plot.new() + + # Sheet title: + par(fig=c(0, 1, 0.94, 0.98), new=TRUE) + mtext(paste0("Weather Regimes for ",my.period[period]," season (",year.start,"-",year.end,"). Source: ",fields.name), cex=6, font=2) + + #EU <- c(1:41, 130:175) #c(1:54,130:161) # position of long values of Europe only (without the Atlantic Sea and America) + #EU <- c(1:which(lon >= lon.max)[1], which(lon >= lon.min)[1]:length(lon)) # to show the same area used for calculation of anomalies + EU <- c(1:which(lon >= lon.max)[1], (length(lon)-30):length(lon)) # restrict area to continental europe only + + # Centroid maps: + map.xpos <- 0 + map.width <- 0.46 + par(fig=c(map.xpos + 0.003, map.xpos + map.width - 0.003, 0.745, 0.925), new=TRUE) + PlotEquiMap2(map1, lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map1), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, xlabel.dist=1.5, contours.lty="F1FF1F") + par(fig=c(map.xpos, map.xpos + map.width, 0.51, 0.69), new=TRUE) + PlotEquiMap2(map2, lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map2), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F") + par(fig=c(map.xpos, map.xpos + map.width, 0.275, 0.455), new=TRUE) + PlotEquiMap2(map3, lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map3), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F") + par(fig=c(map.xpos, map.xpos + map.width, 0.04, 0.22), new=TRUE) + PlotEquiMap2(map4, lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map4), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F") + + # Title Centroid Maps: + map.title.xpos <- 0.76 + map.title.width <- 0.2 + par(fig=c(map.title.xpos, map.title.xpos + map.title.width, 0.728, 0.729), new=TRUE) + mtext("year", cex=3) + par(fig=c(map.title.xpos, map.title.xpos + map.title.width, 0.494, 0.495), new=TRUE) + mtext("year", cex=3) + par(fig=c(map.title.xpos, map.title.xpos + map.title.width, 0.260, 0.261), new=TRUE) + mtext("year", cex=3) + par(fig=c(map.title.xpos, map.title.xpos + map.title.width, 0.026, 0.027), new=TRUE) + mtext("year", cex=3) + + # Impact maps: + impact.xpos <- 0.47 + impact.width <- 0.26 + par(fig=c(impact.xpos, impact.xpos + impact.width, 0.745, 0.925), new=TRUE) + PlotEquiMap2(rescale(imp1[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=t(sig1[,EU] < 0.05), cex.lab=1.5) + par(fig=c(impact.xpos, impact.xpos + impact.width, 0.51, 0.69), new=TRUE) + PlotEquiMap2(rescale(imp2[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=t(sig2[,EU] < 0.05), cex.lab=1.5) + par(fig=c(impact.xpos, impact.xpos + impact.width, 0.275, 0.455), new=TRUE) + PlotEquiMap2(rescale(imp3[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=t(sig3[,EU] < 0.05), cex.lab=1.5) + par(fig=c(impact.xpos, impact.xpos + impact.width, 0.04, 0.22), new=TRUE) + PlotEquiMap2(rescale(imp4[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=t(sig4[,EU] < 0.05), cex.lab=1.5) + + # Frequency plots: + barplot.xpos <- 0.75 + barplot.width <- 0.25 + par(fig=c(barplot.xpos, barplot.xpos + barplot.width, 0.745, 0.915), new=TRUE) + barplot.freq(100*fre1, year.start, year.end, cex.y=2, cex.x=2, freq.max=60, ylab="", mgp=c(3,1.5,0)) + par(fig=c(barplot.xpos, barplot.xpos + barplot.width,0.510,0.680), new=TRUE) + barplot.freq(100*fre2, year.start, year.end, cex.y=2, cex.x=2, freq.max=60, ylab="", mgp=c(3,1.5,0)) + par(fig=c(barplot.xpos, barplot.xpos + barplot.width,0.275,0.445), new=TRUE) + barplot.freq(100*fre3, year.start, year.end, cex.y=2, cex.x=2, freq.max=60, ylab="", mgp=c(3,1.5,0)) + par(fig=c(barplot.xpos, barplot.xpos + barplot.width,0.040,0.210), new=TRUE) + barplot.freq(100*fre4, year.start, year.end, cex.y=2, cex.x=2, freq.max=60, ylab="", mgp=c(3,1.5,0)) + + # % on Frequency plots: + symbol.xpos <- 0.735 + symbol.width <- 0.01 + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.83, 0.84), new=TRUE) + mtext("%", cex=3.3) + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.60, 0.61), new=TRUE) + mtext("%", cex=3.3) + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.36, 0.37), new=TRUE) + mtext("%", cex=3.3) + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.13, 0.14), new=TRUE) + mtext("%", cex=3.3) + + # Title 1: + title1.xpos <- 0.02 + title1.width <- 0.4 + par(fig=c(title1.xpos, title1.xpos + title1.width, 0.925, 0.930), new=TRUE) + mtext(paste0(regime1.name,": ", psl.name, " Anomaly"), font=2, cex=4) + par(fig=c(title1.xpos, title1.xpos + title1.width, 0.690, 0.696), new=TRUE) + mtext(paste0(regime2.name,": ", psl.name, " Anomaly"), font=2, cex=4) + par(fig=c(title1.xpos, title1.xpos + title1.width, 0.455, 0.460), new=TRUE) + mtext(paste0(regime3.name,": ", psl.name, " Anomaly"), font=2, cex=4) + par(fig=c(title1.xpos, title1.xpos + title1.width, 0.220, 0.225), new=TRUE) + mtext(paste0(regime4.name,": ", psl.name, " Anomaly"), font=2, cex=4) + + # Title 2: + title2.xpos <- 0.42 + title2.width <- 0.35 + par(fig=c(title2.xpos, title2.xpos + title2.width, 0.925, 0.930), new=TRUE) + mtext(paste0(regime1.name, ": Impact on ", var.name.full[var.num]), font=2, cex=4) + par(fig=c(title2.xpos, title2.xpos + title2.width, 0.690, 0.696), new=TRUE) + mtext(paste0(regime2.name, ": Impact on ", var.name.full[var.num]), font=2, cex=4) + par(fig=c(title2.xpos, title2.xpos + title2.width, 0.455, 0.460), new=TRUE) + mtext(paste0(regime3.name, ": Impact on ", var.name.full[var.num]), font=2, cex=4) + par(fig=c(title2.xpos, title2.xpos + title2.width, 0.220, 0.225), new=TRUE) + mtext(paste0(regime4.name, ": Impact on ", var.name.full[var.num]), font=2, cex=4) + + # Title 3: + title3.xpos <- 0.75 + title3.width <-0.25 + par(fig=c(title3.xpos, title3.xpos + title3.width, 0.925, 0.930), new=TRUE) + mtext(paste0(regime1.name, ": Frequency (", round(100*mean(fre1),1), "%)"), font=2, cex=4) + par(fig=c(title3.xpos, title3.xpos + title3.width, 0.690, 0.696), new=TRUE) + mtext(paste0(regime2.name, ": Frequency (", round(100*mean(fre2),1), "%)"), font=2, cex=4) + par(fig=c(title3.xpos, title3.xpos + title3.width, 0.455, 0.460), new=TRUE) + mtext(paste0(regime3.name, ": Frequency (", round(100*mean(fre3),1), "%)"), font=2, cex=4) + par(fig=c(title3.xpos, title3.xpos + title3.width, 0.220, 0.225), new=TRUE) + mtext(paste0(regime4.name, ": Frequency (", round(100*mean(fre4),1), "%)"), font=2, cex=4) + + # Legend 1: + legend1.xpos <- 0.01 + legend1.width <- 0.44 + legend1.cex <- 1.8 + my.subset <- match(values.to.plot, my.brks) + + par(fig=c(legend1.xpos, legend1.xpos + legend1.width, 0.719, 0.744), new=TRUE) # syntax fig=c(xmin, xmax, ymin, ymax) + ColorBar3(my.brks, cols=my.cols, vert=FALSE, cex=legend1.cex, subset=my.subset) + if(psl=="g500") {mtext(side=4," m", cex=2.5)} else {mtext(side=4," hPa", cex=legend1.cex)} + par(fig=c(legend1.xpos, legend1.xpos + legend1.width, 0.485, 0.508), new=TRUE) + ColorBar3(my.brks, cols=my.cols, vert=FALSE, cex=legend1.cex, subset=my.subset) + if(psl=="g500") {mtext(side=4," m", cex=2.5)} else {mtext(side=4," hPa", cex=legend1.cex)} + par(fig=c(legend1.xpos, legend1.xpos + legend1.width, 0.250, 0.273), new=TRUE) + ColorBar3(my.brks, cols=my.cols, vert=FALSE, cex=legend1.cex, subset=my.subset) + if(psl=="g500") {mtext(side=4," m", cex=2.5)} else {mtext(side=4," hPa", cex=legend1.cex) + par(fig=c(legend1.xpos, legend1.xpos + legend1.width, 0.015, 0.038), new=TRUE) + ColorBar3(my.brks, cols=my.cols, vert=FALSE, cex=legend1.cex, subset=my.subset) + if(psl=="g500") {mtext(side=4," m", cex=2.5)} else {mtext(side=4," hPa", cex=legend1.cex)} + + # Legend 2: + legend2.xpos <- 0.48 + legend2.width <- 0.25 + legend2.cex <- 1.8 + my.subset2 <- match(values.to.plot2, my.brks.var) + + par(fig=c(legend2.xpos, legend2.xpos + legend2.width, 0.719 + 0.001, 0.744), new=TRUE) + ColorBar3(my.brks.var, cols=my.cols.var, vert=FALSE, cex=legend2.cex, subset=my.subset2) + mtext(side=4,paste0(" ",var.unit[var.num]), cex=legend2.cex) + par(fig=c(legend2.xpos, legend2.xpos + legend2.width, 0.485, 0.508), new=TRUE) + ColorBar3(my.brks.var, cols=my.cols.var, vert=FALSE, cex=legend2.cex, subset=my.subset2) + mtext(side=4,paste0(" ",var.unit[var.num]), cex=legend2.cex) + par(fig=c(legend2.xpos, legend2.xpos + legend2.width, 0.250, 0.273), new=TRUE) + ColorBar3(my.brks.var, cols=my.cols.var, vert=FALSE, cex=legend2.cex, subset=my.subset2)} + mtext(side=4,paste0(" ",var.unit[var.num]), cex=legend2.cex) + par(fig=c(legend2.xpos, legend2.xpos + legend2.width, 0.015, 0.038), new=TRUE) + ColorBar3(my.brks.var, cols=my.cols.var, vert=FALSE, cex=legend2.cex, subset=my.subset2) + mtext(side=4,paste0(" ",var.unit[var.num]), cex=legend2.cex) + + if(!as.pdf) dev.off() # for saving 4 png + +} # close for loop on 'period' + +if(as.pdf) dev.off() # for saving a single pdf with all seasons + + + + + + + + + + + + + + + + + + + + + + ## if(period == 13){ # Winter + ## cluster2.name="NAO+" + ## cluster3.name="NAO-" + ## cluster1.name="Blocking" + ## cluster4.name="Atl.Ridge" + ## } + ## if(period == 14){ + ## cluster1.name="NAO+" + ## cluster4.name="NAO-" + ## cluster3.name="Blocking" + ## cluster2.name="Atl.Ridge" + ## } + ## if(period == 15){ + ## cluster1.name="NAO+" + ## cluster2.name="NAO-" + ## cluster3.name="Blocking" + ## cluster4.name="Atl.Ridge" + ## } + ## if(period == 16){ + ## cluster1.name="NAO+" + ## cluster3.name="NAO-" + ## cluster2.name="Blocking" + ## cluster4.name="Atl.Ridge" + ## } + diff --git a/old/weather_regimes_v16.R~ b/old/weather_regimes_v16.R~ new file mode 100644 index 0000000000000000000000000000000000000000..8c2f7454afc9416a57aa04e81ce04fc2d0501cf2 --- /dev/null +++ b/old/weather_regimes_v16.R~ @@ -0,0 +1,712 @@ + +library(s2dverification) # for the function Load() +library(abind) +library(Kendall) +source('/home/Earth/ncortesi/Downloads/scripts/Rfunctions.R') # for the calendar functions + +workdir <- "/scratch/Earth/ncortesi/RESILIENCE/Regimes" # working dir with the input files with the WT classifications for each MSLP grid point + +# available reanalysis for the geopotential (g500) or the geopotential height (z500 = g500/9.81): +ERAint <- list(path = '/esnas/reconstructions/ecmwf/eraint/$STORE_FREQ$_mean/$VAR_NAME$_f6h/$VAR_NAME$_$YEAR$$MONTH$.nc') +ERAint.name <- "ERA-Interim" + +JRA55 <- list(path = '/esnas/reconstructions/jma/jra-55/$STORE_FREQ$_mean/$VAR_NAME$_f6h/$VAR_NAME$_$YEAR$$MONTH$.nc') +JRA55.name <- "JRA-55" + +rean <- ERAint # choose one of the two above reanalysis from where to load the input psl data +rean.name <- ERAint.name + +# available forecast systems for the var data: +ECMWF_monthly <- list(path = paste0('/esnas/exp/ECMWF/monthly/ensforhc/6hourly/$VAR_NAME$/2014$MONTH$$DAY$00/$VAR_NAME$_$START_DATE$00.nc')) +ECMWF_monthly.name <- "ECMWF-Monthly" + +forecast <- ECMWF_monthly +forecast.name <- ECMWF_monthly.name + +fields <- forecast # put 'rean' to load pressure fields and var from reanalisis, put 'forecast' to load them from forecast systems +fields.name <- forecast.name + +year.start <- 1994 #1979 #1981 +year.end <- 2013 #2010 + +leadtime <- 1:7 #5:11 #1 # if fields=forecast, set the forecast time (in days) you want to compute the weather regimes. +startdate.shift <- 1 # if leadtime=5:11, it's better to shift all startdates of the season 1 week in the past, to fit the exact season of obs.data + # if leadtime=12:18, it's better to shift all startdates of the season 2 weeks in the past, and so on. +forecast.year <- year.end+1 # if fields=forecast, set the forecast year (it is usually the year after year.end) +mes=1 # if fields=forecast, set the month of the first startdate of the forecast year +day=2 # if fields=forecast, set the day of the first startdate of the forecast year + +psl <- "psl" #"g500" # pressure variable to use from the chosen reanalysis +psl.name <- "MSLP" # "Z500" # the name to show in the maps + +var.data <- ERAint #ECMWF_monthly # ERAint # chose a dataset from which to load the input var data (reanalysis or forecasts) +var.data.name <- ERAint.name #ECMWF_monthly.name # ERAint.name + +sequences=FALSE # set it to TRUE if you want to select only days beloning to sequences of at least 5 consecutive days belonging to the same regime. + +PCA <- FALSE # se to TRUE if you want to apply the PCA before the k-means cluster analysis, FALSE otherwise +variance.explained <- 0.8 # the user can set here the minimum % of variance explained by the PCs (typically 0.8 which is equal to 80%) + +# Coordinates of the box enclosing the study region (the Northern Atlantic in this case): +lat.max <- 81 #81 #80 #70 +lat.min <- 27 #30 #20 #30 +lon.max <- 45 #36 #30 #40 +lon.min <- 274.5 #274.5 #270 #280 # put a positive number here because the geopotential has only positive values of longitud! + +############################################################################################################################# +#ECMWF_monthly <- list(path = paste0('/esnas/exp/ECMWF/monthly/ensforhc/6hourly/psl/2014010200/1994_010200.nc')) +#ECMWF_monthly <- list(path = paste0('/esnas/exp/ECMWF/monthly/ensforhc/6hourly/$VAR_NAME$/2014$MONTH$$DAY$00/$VAR_NAME$_$START_DATE$00.nc')) +if(lat.min >= lat.max) stop("lat.min cannot be greater than lat.max") +n.years <- year.end - year.start + 1 + +# load only 1 day of pressure data to detect the minimum and maximum lat and lon values which identify only the North Atlantic area: +domain <- Load(var = psl, exp = NULL, obs = list(fields), paste0(year.start,'0102'), storefreq = 'daily', leadtimemax = 1, output = 'lonlat', nprocs=1) +n.lat <- length(domain$lat) +n.lon <- length(domain$lon) + +pos.lat.max <- which(lat.max - round(domain$lat,4) >= 0)[1] # select the first latitude of the domain below the maximum latitude +pos.lat.min <- tail(which(round(domain$lat,4) - lat.min >= 0),1) # select the last latitude of the domain over the minumum latitude +pos.lon.max <- tail(which(lon.max - round(domain$lon,4) >= 0),1) +pos.lon.min <- which(round(domain$lon,4) - lon.min >= 0)[1] + +pos.lat <- if(pos.lat.min < pos.lat.max) {pos.lat.min:pos.lat.max} else {pos.lat.max:pos.lat.min} +pos.lon <- c(1:pos.lon.max,pos.lon.min:length(domain$lon)) # beware that it only consider the case where the study area cross the meridian of Greenwhich! +n.pos.lat <- length(pos.lat) +n.pos.lon <- length(pos.lon) + +lat <- domain$lat[pos.lat] # lat values of chosen area only (it is smaller than the whole spatial domain loaded by the data) +lon <- domain$lon[pos.lon] # lon values of chosen area only + +lat.min.area <- domain$lat[pos.lat.min] # min and max lat/lon of the chosen area only (same as lat.max, but its values correspond to actual values in the lat vector) +lat.max.area <- domain$lat[pos.lat.max] +lon.min.area <- domain$lon[pos.lon.min] +lon.max.area <- domain$lon[pos.lon.max] + +#rm(domain) + +# Load psl data: +if(unlist(fields) == unlist(rean)){ # Load daily psl data in the reanalysis case: + psleuFull <-array(NA,c(n.days.in.a.yearly.period(year.start,year.end), n.pos.lat, n.pos.lon)) + + for (y in year.start:year.end){ + var <- Load(var = psl, exp = NULL, obs = list(fields), paste0(y,'0101'), storefreq = 'daily', leadtimemax = n.days.in.a.year(y), output = 'lonlat', + latmin = lat.min, latmax = lat.max, lonmin = lon.min, lonmax = lon.max) + psleuFull[seq.days.in.a.future.year(year.start, y),,] <- var$obs + rm(var) + gc() + } + +} else { # Load 6-hourly psl data in the forecast case: + psleuFull <-array(NA, c(n.sdates*n.years, n.leadtimes, n.pos.lat, n.pos.lon)) # daily order: 19940102 19950102 ... 20130102 19940109 19950109 ... 20130109 ... + n.leadtimes <- length(leadtime) + sdates <- weekly.seq(forecast.year,mes,day) + n.sdates <- length(sdates) + + for (startdate in 1:n.sdates){ + for(lday in leadtime){ + var <- Load(var = psl, exp = NULL, obs = list(fields), paste0(year.start:year.end, substr(sdates[startdate],5,6), substr(sdates[startdate],7,8) ), storefreq = 'daily', leadtimemin = lday*4-3, leadtimemax = lday*4, output = 'lonlat', latmin = lat.min, latmax = lat.max, lonmin = lon.min, lonmax = lon.max) + + mean.lday <- apply(var$obs, c(3,5,6), mean, na.rm=T) + rm(var) + gc() + + psleuFull[(1+(startdate-1)*n.years):(startdate*n.years), lday-leadtime[1]+1,,] <- mean.lday + rm(mean.lday) + gc() + } + } + +} + +if(psl=="g500") psleuFull <- psleuFull/9.81 # convert geopotential to geopotential height (in m) +if(psl=="psl") psleuFull <- psleuFull/100 # convert MSLP in Pascal to MSLP in hPa +gc() + +save.image(file=paste0(workdir,"/weather_regimes_",fields.name,"_",year.start,"-",year.end,".RData")) +load(file=paste0(workdir,"/weather_regimes_",fields.name,"_",year.start,"-",year.end,".RData")) + +# compute the cluster analysis: +my.PCA <- list() +my.cluster <- list() +tot.variance <- list() +n.pcs <- my.cluster <- c() + +for(period in 13:16){ # 1-12: Jan-Dec; 13-16: Winter-Spring-Summer-Autumn; 17: Year + if(unlist(fields) == unlist(rean)){ + days.period <- NA + for(y in year.start:year.end) days.period <- c(days.period, n.days.in.a.future.year(year.start, y) + pos.period(y,period)) + days.period <- days.period[-1] # remove the NA at the begin that was introduced only to be able to execute the above command + + } else { + my.startdates.period <- months.period(forecast.year,mes,day,period) # get which startdates belong to the chosen period + + days.period <- c() # get which days inside psleuFull belong to the chosen period + for(startdate in my.startdates.period){ + days.period <- c(days.period, (1+(startdate-1)*n.years):(startdate*n.years)) + } + } + + n.days.period <- length(days.period) + + pslPeriod <- psleuFull[days.period,,] # select only days in the chosen period (i.e: winter) + + # weight the pressure fields based on latitude: + lat.weighted <- cos(lat*pi/180)^0.5 + lat.weighted.array <- InsertDim(InsertDim(lat.weighted,2,n.pos.lon),1,n.days.period) + pslPeriod.weighted <- pslPeriod * lat.weighted.array + rm(lat.weighted.array) + gc() + + pslmat <- pslPeriod.weighted + dim(pslmat) <- c(n.days.period, n.pos.lat*n.pos.lon) # convert array in a matrix! + rm(pslPeriod, pslPeriod.weighted) + gc() + + # if you are using the monthly forecast system, in winter you must remove the 2nd startdate (20140109) because its data is bad, as you can see with: plot(pslmat[,1:2]) + if(unlist(fields) == unlist(forecast) && period == 13) pslmat[21:40,] <- NA + + if(PCA){ # compute the PCs: + my.seq <- seq(1, n.pos.lat*n.pos.lon, 9) # select only 1 point of 9 + pslcut <- pslmat[,my.seq] + + my.PCA[[period]] <- princomp(pslcut,cor=FALSE) + tot.variance[[period]] <- head(cumsum(my.PCA[[period]]$sdev^2/sum(my.PCA[[period]]$sdev^2)),50) # check how many PCAs to keep basing on the sum of their expl. variance + n.pcs[period] <- head(as.numeric(which(tot.variance[[period]] > variance.explained)),1) # select only the pcs that explains at least 80% of variance + my.cluster[[period]] <- kmeans(my.PCA[[period]]$scores[,1:n.pcs[period]], centers=4, iter.max=100, nstart=30) # 4 is the number of clusters + rm(pslcut) + } else { # remove from matpsl the years with NA: + my.cluster[[period]] <- kmeans(pslmat[which(!is.na(pslmat[,1])),], centers=4, iter.max=100, nstart=30) # 4 is the number of clusters + } + + rm(pslmat) + gc() +} + +# Load var data: +if(unlist(fields) == unlist(rean)){ # Load daily var data in the reanalysis case: + vareuFull <-array(NA,c(n.days.in.a.yearly.period(year.start,year.end), n.pos.lat, n.pos.lon)) + + for (y in year.start:year.end){ + var <- Load(var = var.name[var.num], exp = NULL, obs = list(var.data), paste0(y,'0101'), storefreq = 'daily', leadtimemax = n.days.in.a.year(y), output = 'lonlat', + latmin = lat.min, latmax = lat.max, lonmin = lon.min, lonmax = lon.max) + vareuFull[seq.days.in.a.future.year(year.start, y),,] <- var$obs + rm(var) + gc() + } + +} else { # Load 6-hourly var data in the forecast case: + sdates <- weekly.seq(forecast.year,mes,day) + vareuFull <-array(NA,c(length(sdates)*(year.end-year.start+1), n.pos.lat, n.pos.lon)) + + for (startdate in 1:length(sdates)){ + var <- Load(var = var.name[var.num], exp = NULL, obs = list(var.data), paste0(year.start:year.end, substr(sdates[startdate],5,6), substr(sdates[startdate],7,8) ), storefreq = 'daily', leadtimemin=leadtime*4-3, leadtimemax=leadtime*4, output = 'lonlat', latmin = lat.min, latmax = lat.max, lonmin = lon.min, lonmax = lon.max) + temp <- apply(var$obs, c(1,3,5,6), mean, na.rm=T) + vareuFull[(1+(startdate-1)*(year.end-year.start+1)):(startdate*(year.end-year.start+1)),,] <- temp + rm(temp,var) + gc() + } + +} + +#my.grid<-paste0('r',n.lon,'x',n.lat) +#coord <- Load(var = 'sfcWind', exp = list(ECMWF_monthly), obs = NULL, sdates=paste0(2013,'0102'), leadtimemin = 1, leadtimemax=1, output = 'lonlat', nprocs=1, grid=my.grid, method='bilinear') + +var.num <- 2 # Choose a variable. 1: sfcWind 2: tas + +var.name <- c("sfcWind","tas") # name of the 'predictand' variable of the chosen reanalysis +var.name.full <- c("Wind Speed","Temperature") # full name of the 'predictand' variable to put in the title of the graphs +var.unit <- c("m/s", "ºC") # unit of measure of var (for drawing color scales) + +save.image(file=paste0(workdir,"/weather_regimes_",fields.name,"_",var.name[var.num],"_",year.start,"-",year.end,".RData")) +load(file=paste0(workdir,"/weather_regimes_",fields.name,"_",var.name[var.num],"_",year.start,"-",year.end,".RData")) + +for(period in 13:16){ # 1-12: Jan-Dec; 13-16: Winter-Spring-Summer-Autumn; 17: Year + if(unlist(fields) == unlist(rean)){ + days.period <- NA + for(y in year.start:year.end) days.period <- c(days.period, n.days.in.a.future.year(year.start, y) + pos.period(y,period)) + days.period <- days.period[-1] # remove the NA at the begin that was introduced only to be able to execure the above command + period.length <- n.days.in.a.period(period,1999) + ifelse(period==13 | period==2, floor(n.years/4), 0) # using year 1999 introduce a small error in winter season length because of bisestile years that can be roughly corrected adding floor(n.years/4) in winter and in february + + } else { + my.startdates.period <- months.period(forecast.year,mes,day,period) + + if(period == 13) my.startdates.period <- my.startdates.period[-2] # remove the second startdate because it is broken + + days.period <- c() + for(startdate in my.startdates.period){ + days.period <- c(days.period, (1+(startdate-1)*(year.end-year.start+1)):(startdate*(year.end-year.start+1))) + } + period.length <- length(my.startdates.period) # number of Thusdays in the period + } + + n.days.period <- length(days.period) + + varPeriod <- vareuFull[days.period,,] # select only var data during the chosen period + + varPeriodClim <- apply(varPeriod, c(2,3), mean, na.rm=T) + varPeriodClim2 <- InsertDim(varPeriodClim, 1, n.days.period) + + varPeriodAnom <- varPeriod - varPeriodClim2 + rm(varPeriod, varPeriodClim, varPeriodClim2) + gc() + + if(sequences){ + # for each of the 4 clusters, remove the days not belonging to sequences of at least 5 days: + # warning: first 4 days and last 4 days are not take into account y the algorithm and are treated as not belonging to any sequence (sequ=FALSE) + wrdiff <- diff(my.cluster[[period]]$cluster) + + sequ <- rep(FALSE, n.days.period) + for(i in 5:(n.days.period-5)){ + sequ[i] <- TRUE + if(wrdiff[i] != 0 & wrdiff[i+1] != 0) sequ[i] = FALSE + if(wrdiff[i] != 0 & wrdiff[i+2] != 0) sequ[i] = FALSE + if(wrdiff[i] != 0 & wrdiff[i+3] != 0) sequ[i] = FALSE + if(wrdiff[i] != 0 & wrdiff[i+4] != 0) sequ[i] = FALSE + if(wrdiff[i-1] != 0 & wrdiff[i] != 0) sequ[i] = FALSE + if(wrdiff[i-1] != 0 & wrdiff[i+1] != 0) sequ[i] = FALSE + if(wrdiff[i-1] != 0 & wrdiff[i+2] != 0) sequ[i] = FALSE + if(wrdiff[i-1] != 0 & wrdiff[i+3] != 0) sequ[i] = FALSE + if(wrdiff[i-2] != 0 & wrdiff[i] != 0) sequ[i] = FALSE + if(wrdiff[i-2] != 0 & wrdiff[i+1] != 0) sequ[i] = FALSE + if(wrdiff[i-2] != 0 & wrdiff[i+2] != 0) sequ[i] = FALSE + if(wrdiff[i-3] != 0 & wrdiff[i] != 0) sequ[i] = FALSE + if(wrdiff[i-3] != 0 & wrdiff[i+1] != 0) sequ[i] = FALSE + if(wrdiff[i-4] != 0 & wrdiff[i] != 0) sequ[i] = FALSE + } # close for loop on i + + # sequence of daily cluster numbers without the days that doesn't belong to sequences of 5 or more days: + cluster.sequence <- my.cluster[[period]]$cluster * sequ + rm(wrdiff, sequ) + } else{ + cluster.sequence <- my.cluster[[period]]$cluster + } + + gc() + + #wr1 <- which(my.cluster[[period]]$cluster==1) + #wr2 <- which(my.cluster[[period]]$cluster==2) + #wr3 <- which(my.cluster[[period]]$cluster==3) + #wr4 <- which(my.cluster[[period]]$cluster==4) + + # Replace the days belonging to a regime with the days belonging to a sequence of at least 5 days of that regime: + wr1 <- which(cluster.sequence == 1) + wr2 <- which(cluster.sequence == 2) + wr3 <- which(cluster.sequence == 3) + wr4 <- which(cluster.sequence == 4) + + # yearly frequency of each regime: + wr1y <- wr2y <- wr3y <-wr4y <- c() + for(y in year.start:year.end){ + + if(unlist(fields) == unlist(rean)){ + wr1y[y-year.start+1] <- length(which(cluster.sequence[(y-year.start)*period.length+(1:period.length)] == 1)) + wr2y[y-year.start+1] <- length(which(cluster.sequence[(y-year.start)*period.length+(1:period.length)] == 2)) + wr3y[y-year.start+1] <- length(which(cluster.sequence[(y-year.start)*period.length+(1:period.length)] == 3)) + wr4y[y-year.start+1] <- length(which(cluster.sequence[(y-year.start)*period.length+(1:period.length)] == 4)) + } else { + wr1y[y-year.start+1] <- length(which(cluster.sequence[seq((y-year.start+1),length(cluster.sequence), n.years)] == 1)) + wr2y[y-year.start+1] <- length(which(cluster.sequence[seq((y-year.start+1),length(cluster.sequence), n.years)] == 2)) + wr3y[y-year.start+1] <- length(which(cluster.sequence[seq((y-year.start+1),length(cluster.sequence), n.years)] == 3)) + wr4y[y-year.start+1] <- length(which(cluster.sequence[seq((y-year.start+1),length(cluster.sequence), n.years)] == 4)) + } + } + + rm(cluster.sequence) + gc() + + # convert to frequencies in %: + wr1y <- wr1y/period.length + wr2y <- wr2y/period.length + wr3y <- wr3y/period.length + wr4y <- wr4y/period.length + + varPeriodAnom1 <- varPeriodAnom[wr1,,] + varPeriodAnom2 <- varPeriodAnom[wr2,,] + varPeriodAnom3 <- varPeriodAnom[wr3,,] + varPeriodAnom4 <- varPeriodAnom[wr4,,] + + varPeriodAnom1mean <- apply(varPeriodAnom1,c(2,3),mean,na.rm=T) + varPeriodAnom2mean <- apply(varPeriodAnom2,c(2,3),mean,na.rm=T) + varPeriodAnom3mean <- apply(varPeriodAnom3,c(2,3),mean,na.rm=T) + varPeriodAnom4mean <- apply(varPeriodAnom4,c(2,3),mean,na.rm=T) + + varPeriodAnomBoth1 <- abind(varPeriodAnom, varPeriodAnom1, along = 1) + pvalue1 <- apply(varPeriodAnomBoth1, c(2,3), function(x) t.test(x[1:n.days.period],x[(n.days.period+1):length(x)])$p.value) + rm(varPeriodAnomBoth1, varPeriodAnom1) + gc() + + varPeriodAnomBoth2 <- abind(varPeriodAnom, varPeriodAnom2, along = 1) + pvalue2 <- apply(varPeriodAnomBoth2, c(2,3), function(x) t.test(x[1:n.days.period],x[(n.days.period+1):length(x)])$p.value) + rm(varPeriodAnomBoth2, varPeriodAnom2) + gc() + + varPeriodAnomBoth3 <- abind(varPeriodAnom, varPeriodAnom3, along = 1) + pvalue3 <- apply(varPeriodAnomBoth3, c(2,3), function(x) t.test(x[1:n.days.period],x[(n.days.period+1):length(x)])$p.value) + rm(varPeriodAnomBoth3, varPeriodAnom3) + gc() + + varPeriodAnomBoth4 <- abind(varPeriodAnom, varPeriodAnom4, along = 1) + pvalue4 <- apply(varPeriodAnomBoth4, c(2,3), function(x) t.test(x[1:n.days.period],x[(n.days.period+1):length(x)])$p.value) + rm(varPeriodAnomBoth4, varPeriodAnom4) + + rm(varPeriodAnom) + gc() + + pslPeriod <- psleuFull[days.period,,] + pslPeriodClim <- apply(pslPeriod, c(2,3), mean, na.rm=T) + pslPeriodClim2 <- InsertDim(pslPeriodClim, 1, n.days.period) + pslPeriodAnom <- pslPeriod - pslPeriodClim2 + + rm(pslPeriod, pslPeriodClim, pslPeriodClim2) + gc() + + pslwr1 <- pslPeriodAnom[wr1,,] + pslwr2 <- pslPeriodAnom[wr2,,] + pslwr3 <- pslPeriodAnom[wr3,,] + pslwr4 <- pslPeriodAnom[wr4,,] + rm(pslPeriodAnom) + gc() + + pslwr1mean <- apply(pslwr1,c(2,3),mean,na.rm=T) + pslwr2mean <- apply(pslwr2,c(2,3),mean,na.rm=T) + pslwr3mean <- apply(pslwr3,c(2,3),mean,na.rm=T) + pslwr4mean <- apply(pslwr4,c(2,3),mean,na.rm=T) + + rm(pslwr1,pslwr2,pslwr3,pslwr4) + + # save all the data necessary to redraw the graphs when we know the right regime: + save(workdir,rean.name,fields.name,var.num,var.name,var.name.full,var.unit,year.start,year.end,period,my.period,lon,lat,pslwr1mean,pslwr2mean,pslwr3mean,pslwr4mean, varPeriodAnom1mean, varPeriodAnom2mean, varPeriodAnom3mean,varPeriodAnom4mean,wr1y,wr2y,wr3y,wr4y,pvalue1,pvalue2,pvalue3,pvalue4,file=paste0(workdir,"/",fields.name,"_",var.name[var.num],"_",my.period[period],"_mapdata.RData")) + + rm(pvalue1,pvalue2,pvalue3,pvalue4) + rm(pslwr1mean,pslwr2mean,pslwr3mean,pslwr4mean) + rm(varPeriodAnom1mean,varPeriodAnom2mean,varPeriodAnom3mean,varPeriodAnom4mean) + gc() + +} # close the for loop on 'period' + + +# run it twice: once, with ordering <- FALSE to look only at the cartography and find visually the right regime names of the four clusters; +# and the second time, to save the ordered cartography: +ordering <- TRUE # If TRUE, order the clusters following the regimes listed in the 'orden' vector (after you manually insert the names). +save.names <- FALSE # if TRUE, also save the cluster names (i.e: the association between clusters and weather regimes); if FALSE, the cluster names are loaded from the file +as.pdf <- FALSE # choose if you prefer to save results as one file .png for each season, or only 1 pdf with all seasons + +# choose an order to give to the cartography, from top to bottom: +orden <- c("NAO+","NAO-","Blocking","Atl.Ridge") + +regime1.name <- orden[1] +regime2.name <- orden[2] +regime3.name <- orden[3] +regime4.name <- orden[4] + +if(save.names){cluster1.name.period <- cluster2.name.period <- cluster3.name.period <- cluster4.name.period <- c()} + +# breaks and colors of the geopotential fields: +if(psl == "g500"){ + #my.brks <- c(-300,-199:200,300) # % Mean anomaly of a WR + my.brks <- c(-300,seq(-200,200,10),300) # % Mean anomaly of a WR + my.brks2 <- c(-300,seq(-200,200,10),300) # % Mean anomaly of a WR for the contour lines + #my.cols <- colorRampPalette((brewer.pal(9,"PuBu")))(length(my.brks)-1) + values.to.plot <- c(-200, -100, 0, 100, 200) # values we want to show in the labels +} + +if(psl == "psl"){ + my.brks <- c(seq(-19,-1,2),0,seq(1,19,2)) # % Mean anomaly of a WR + my.brks2 <- my.brks #c(seq(-20,20,2)) # % Mean anomaly of a WR for the contour lines + values.to.plot <- my.brks #c(-17,-15,-13,-11,-9,-7,-5,-3,-1,0,1,3,5,7,9,11,13,15,17) +} + +my.cols <- colorRampPalette(rev(brewer.pal(11,"RdBu")))(length(my.brks)-1) # blue--white--red colors +my.cols[floor(length(my.cols)/2)] <- my.cols[floor(length(my.cols)/2)+1] <- "white" + +# breaks and colors of the impact maps (valid both for Wind speed anomalies and Temperature anomalies): +#my.brks.var <- c(-20,seq(-10,-3,1),seq(-2.9,2.9,0.1),seq(3,10,1),20) # % Mean anomaly of a WR +#my.brks.var <- c(-20,-2,-1.5,-1,-0.5,-0.2,0.2,0.5,1,1.5,2,20) # % Mean anomaly of a WR +#my.brks.var <- c(-20,seq(-3,3,0.5),20) # % Mean anomaly of a WR +if(var.name[var.num] == "sfcWind") { + my.brks.var <- seq(-3,3,0.5) + values.to.plot2 <- c(-3,-2,-1,0,1,2,3) +} +if(var.name[var.num] == "tas") { + my.brks.var <- seq(-5,5,1) + values.to.plot2 <- my.brks.var #c(-5,-3,-1,0,1,3,5) +} + +#my.cols <- colorRampPalette(as.character(read.csv("/home/Earth/vtorralb/rgbhex.csv",header=F)[,1]))(length(my.brks)-1) # blue--yellow-red colors +my.cols.var <- colorRampPalette(rev(brewer.pal(11,"RdBu")))(length(my.brks.var)-1) # blue--white--red colors +#my.cols.var[floor(length(my.cols.var)/2)] <- my.cols.var[floor(length(my.cols.var)/2)+1] <- "white" + +if(as.pdf)(pdf(file=paste0(workdir,"/",fields.name,"_",var.name[var.num],".pdf"),width=40, height=60)) + +for(period in 13:16){ + load(file=paste0(workdir,"/",fields.name,"_",var.name[var.num],"_",my.period[period],"_mapdata.RData")) + + if(save.names){ + if(period == 13){ # Winter + cluster3.name="NAO+" + cluster4.name="NAO-" + cluster1.name="Blocking" + cluster2.name="Atl.Ridge" + } + if(period == 14){ + cluster2.name="NAO+" + cluster3.name="NAO-" + cluster1.name="Blocking" + cluster4.name="Atl.Ridge" + } + if(period == 15){ + cluster3.name="NAO+" + cluster2.name="NAO-" + cluster4.name="Blocking" + cluster1.name="Atl.Ridge" + } + if(period == 16){ + cluster4.name="NAO+" + cluster3.name="NAO-" + cluster2.name="Blocking" + cluster1.name="Atl.Ridge" + } + + cluster1.name.period[period] <- cluster1.name + cluster2.name.period[period] <- cluster2.name + cluster3.name.period[period] <- cluster3.name + cluster4.name.period[period] <- cluster4.name + + save(cluster1.name.period, cluster2.name.period, cluster3.name.period, cluster4.name.period, file=paste0(workdir,"/",fields.name,"_ClusterNames.RData")) + + } else { # in this case, we load the cluster names from the file already saved: + load(paste0(workdir,"/",fields.name,"_ClusterNames.RData")) + cluster1.name <- cluster1.name.period[period] + cluster2.name <- cluster2.name.period[period] + cluster3.name <- cluster3.name.period[period] + cluster4.name <- cluster4.name.period[period] + } + + # assign to each cluster its regime: + if(ordering == FALSE){ + cluster1 <- 1; cluster2 <- 2; cluster3 <- 3; cluster4 <- 4 + } else { + cluster1 <- which(orden == cluster1.name) + cluster2 <- which(orden == cluster2.name) + cluster3 <- which(orden == cluster3.name) + cluster4 <- which(orden == cluster4.name) + } + + assign(paste0("map",cluster1), pslwr1mean) + assign(paste0("map",cluster2), pslwr2mean) + assign(paste0("map",cluster3), pslwr3mean) + assign(paste0("map",cluster4), pslwr4mean) + + assign(paste0("imp",cluster1), varPeriodAnom1mean) + assign(paste0("imp",cluster2), varPeriodAnom2mean) + assign(paste0("imp",cluster3), varPeriodAnom3mean) + assign(paste0("imp",cluster4), varPeriodAnom4mean) + + assign(paste0("sig",cluster1), pvalue1) + assign(paste0("sig",cluster2), pvalue2) + assign(paste0("sig",cluster3), pvalue3) + assign(paste0("sig",cluster4), pvalue4) + + assign(paste0("fre",cluster1), wr1y) + assign(paste0("fre",cluster2), wr2y) + assign(paste0("fre",cluster3), wr3y) + assign(paste0("fre",cluster4), wr4y) + + # Visualize the composition with the 3 graphs for all the 4 regimes: its pressure fields, its impact on var and its frequency series: + if(!as.pdf) png(filename=paste0(workdir,"/",fields.name,"_",var.name[var.num],"_",my.period[period],".png"),width=3000,height=3700) + + plot.new() + + # Sheet title: + par(fig=c(0, 1, 0.94, 0.98), new=TRUE) + mtext(paste0("Weather Regimes for ",my.period[period]," season (",year.start,"-",year.end,"). Source: ",fields.name), cex=6, font=2) + + #EU <- c(1:41, 130:175) #c(1:54,130:161) # position of long values of Europe only (without the Atlantic Sea and America) + #EU <- c(1:which(lon >= lon.max)[1], which(lon >= lon.min)[1]:length(lon)) # to show the same area used for calculation of anomalies + EU <- c(1:which(lon >= lon.max)[1], (length(lon)-30):length(lon)) # restrict area to continental europe only + + # Centroid maps: + map.xpos <- 0 + map.width <- 0.46 + par(fig=c(map.xpos + 0.003, map.xpos + map.width - 0.003, 0.745, 0.925), new=TRUE) + PlotEquiMap2(map1, lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map1), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, xlabel.dist=1.5, contours.lty="F1FF1F") + par(fig=c(map.xpos, map.xpos + map.width, 0.51, 0.69), new=TRUE) + PlotEquiMap2(map2, lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map2), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F") + par(fig=c(map.xpos, map.xpos + map.width, 0.275, 0.455), new=TRUE) + PlotEquiMap2(map3, lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map3), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F") + par(fig=c(map.xpos, map.xpos + map.width, 0.04, 0.22), new=TRUE) + PlotEquiMap2(map4, lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map4), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F") + + # Title Centroid Maps: + map.title.xpos <- 0.76 + map.title.width <- 0.2 + par(fig=c(map.title.xpos, map.title.xpos + map.title.width, 0.728, 0.729), new=TRUE) + mtext("year", cex=3) + par(fig=c(map.title.xpos, map.title.xpos + map.title.width, 0.494, 0.495), new=TRUE) + mtext("year", cex=3) + par(fig=c(map.title.xpos, map.title.xpos + map.title.width, 0.260, 0.261), new=TRUE) + mtext("year", cex=3) + par(fig=c(map.title.xpos, map.title.xpos + map.title.width, 0.026, 0.027), new=TRUE) + mtext("year", cex=3) + + # Impact maps: + impact.xpos <- 0.47 + impact.width <- 0.26 + par(fig=c(impact.xpos, impact.xpos + impact.width, 0.745, 0.925), new=TRUE) + PlotEquiMap2(rescale(imp1[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=t(sig1[,EU] < 0.05), cex.lab=1.5) + par(fig=c(impact.xpos, impact.xpos + impact.width, 0.51, 0.69), new=TRUE) + PlotEquiMap2(rescale(imp2[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=t(sig2[,EU] < 0.05), cex.lab=1.5) + par(fig=c(impact.xpos, impact.xpos + impact.width, 0.275, 0.455), new=TRUE) + PlotEquiMap2(rescale(imp3[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=t(sig3[,EU] < 0.05), cex.lab=1.5) + par(fig=c(impact.xpos, impact.xpos + impact.width, 0.04, 0.22), new=TRUE) + PlotEquiMap2(rescale(imp4[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=t(sig4[,EU] < 0.05), cex.lab=1.5) + + # Frequency plots: + barplot.xpos <- 0.75 + barplot.width <- 0.25 + par(fig=c(barplot.xpos, barplot.xpos + barplot.width, 0.745, 0.915), new=TRUE) + barplot.freq(100*fre1, year.start, year.end, cex.y=2, cex.x=2, freq.max=60, ylab="", mgp=c(3,1.5,0)) + par(fig=c(barplot.xpos, barplot.xpos + barplot.width,0.510,0.680), new=TRUE) + barplot.freq(100*fre2, year.start, year.end, cex.y=2, cex.x=2, freq.max=60, ylab="", mgp=c(3,1.5,0)) + par(fig=c(barplot.xpos, barplot.xpos + barplot.width,0.275,0.445), new=TRUE) + barplot.freq(100*fre3, year.start, year.end, cex.y=2, cex.x=2, freq.max=60, ylab="", mgp=c(3,1.5,0)) + par(fig=c(barplot.xpos, barplot.xpos + barplot.width,0.040,0.210), new=TRUE) + barplot.freq(100*fre4, year.start, year.end, cex.y=2, cex.x=2, freq.max=60, ylab="", mgp=c(3,1.5,0)) + + # % on Frequency plots: + symbol.xpos <- 0.735 + symbol.width <- 0.01 + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.83, 0.84), new=TRUE) + mtext("%", cex=3.3) + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.60, 0.61), new=TRUE) + mtext("%", cex=3.3) + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.36, 0.37), new=TRUE) + mtext("%", cex=3.3) + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.13, 0.14), new=TRUE) + mtext("%", cex=3.3) + + # Title 1: + title1.xpos <- 0.02 + title1.width <- 0.4 + par(fig=c(title1.xpos, title1.xpos + title1.width, 0.925, 0.930), new=TRUE) + mtext(paste0(regime1.name,": ", psl.name, " Anomaly"), font=2, cex=4) + par(fig=c(title1.xpos, title1.xpos + title1.width, 0.690, 0.696), new=TRUE) + mtext(paste0(regime2.name,": ", psl.name, " Anomaly"), font=2, cex=4) + par(fig=c(title1.xpos, title1.xpos + title1.width, 0.455, 0.460), new=TRUE) + mtext(paste0(regime3.name,": ", psl.name, " Anomaly"), font=2, cex=4) + par(fig=c(title1.xpos, title1.xpos + title1.width, 0.220, 0.225), new=TRUE) + mtext(paste0(regime3.name,": ", psl.name, " Anomaly"), font=2, cex=4) + + # Title 2: + title2.xpos <- 0.42 + title2.width <- 0.35 + par(fig=c(title2.xpos, title2.xpos + title2.width, 0.925, 0.930), new=TRUE) + mtext(paste0(regime1.name, ": Impact on ", var.name.full[var.num]), font=2, cex=4) + par(fig=c(title2.xpos, title2.xpos + title2.width, 0.690, 0.696), new=TRUE) + mtext(paste0(regime2.name, ": Impact on ", var.name.full[var.num]), font=2, cex=4) + par(fig=c(title2.xpos, title2.xpos + title2.width, 0.455, 0.460), new=TRUE) + mtext(paste0(regime3.name, ": Impact on ", var.name.full[var.num]), font=2, cex=4) + par(fig=c(title2.xpos, title2.xpos + title2.width, 0.220, 0.225), new=TRUE) + mtext(paste0(regime4.name, ": Impact on ", var.name.full[var.num]), font=2, cex=4) + + # Title 3: + title3.xpos <- 0.75 + title3.width <-0.25 + par(fig=c(title3.xpos, title3.xpos + title3.width, 0.925, 0.930), new=TRUE) + mtext(paste0(regime1.name, ": Frequency (", round(100*mean(fre1),1), "%)"), font=2, cex=4) + par(fig=c(title3.xpos, title3.xpos + title3.width, 0.690, 0.696), new=TRUE) + mtext(paste0(regime2.name, ": Frequency (", round(100*mean(fre2),1), "%)"), font=2, cex=4) + par(fig=c(title3.xpos, title3.xpos + title3.width, 0.455, 0.460), new=TRUE) + mtext(paste0(regime3.name, ": Frequency (", round(100*mean(fre3),1), "%)"), font=2, cex=4) + par(fig=c(title3.xpos, title3.xpos + title3.width, 0.220, 0.225), new=TRUE) + mtext(paste0(regime4.name, ": Frequency (", round(100*mean(fre4),1), "%)"), font=2, cex=4) + + # Legend 1: + legend1.xpos <- 0.01 + legend1.width <- 0.44 + legend1.cex <- 1.8 + my.subset <- match(values.to.plot, my.brks) + + par(fig=c(legend1.xpos, legend1.xpos + legend1.width, 0.719, 0.744), new=TRUE) # syntax fig=c(xmin, xmax, ymin, ymax) + ColorBar3(my.brks, cols=my.cols, vert=FALSE, cex=legend1.cex, subset=my.subset) + if(psl=="g500") {mtext(side=4," m", cex=2.5)} else {mtext(side=4," hPa", cex=legend1.cex)} + par(fig=c(legend1.xpos, legend1.xpos + legend1.width, 0.485, 0.508), new=TRUE) + ColorBar3(my.brks, cols=my.cols, vert=FALSE, cex=legend1.cex, subset=my.subset) + if(psl=="g500") {mtext(side=4," m", cex=2.5)} else {mtext(side=4," hPa", cex=legend1.cex)} + par(fig=c(legend1.xpos, legend1.xpos + legend1.width, 0.250, 0.273), new=TRUE) + ColorBar3(my.brks, cols=my.cols, vert=FALSE, cex=legend1.cex, subset=my.subset) + if(psl=="g500") {mtext(side=4," m", cex=2.5)} else {mtext(side=4," hPa", cex=legend1.cex) + par(fig=c(legend1.xpos, legend1.xpos + legend1.width, 0.015, 0.038), new=TRUE) + ColorBar3(my.brks, cols=my.cols, vert=FALSE, cex=legend1.cex, subset=my.subset) + if(psl=="g500") {mtext(side=4," m", cex=2.5)} else {mtext(side=4," hPa", cex=legend1.cex)} + + # Legend 2: + legend2.xpos <- 0.48 + legend2.width <- 0.25 + legend2.cex <- 1.8 + my.subset2 <- match(values.to.plot2, my.brks.var) + + par(fig=c(legend2.xpos, legend2.xpos + legend2.width, 0.719 + 0.001, 0.744), new=TRUE) + ColorBar3(my.brks.var, cols=my.cols.var, vert=FALSE, cex=legend2.cex, subset=my.subset2) + mtext(side=4,paste0(" ",var.unit[var.num]), cex=legend2.cex) + par(fig=c(legend2.xpos, legend2.xpos + legend2.width, 0.485, 0.508), new=TRUE) + ColorBar3(my.brks.var, cols=my.cols.var, vert=FALSE, cex=legend2.cex, subset=my.subset2) + mtext(side=4,paste0(" ",var.unit[var.num]), cex=legend2.cex) + par(fig=c(legend2.xpos, legend2.xpos + legend2.width, 0.250, 0.273), new=TRUE) + ColorBar3(my.brks.var, cols=my.cols.var, vert=FALSE, cex=legend2.cex, subset=my.subset2)} + mtext(side=4,paste0(" ",var.unit[var.num]), cex=legend2.cex) + par(fig=c(legend2.xpos, legend2.xpos + legend2.width, 0.015, 0.038), new=TRUE) + ColorBar3(my.brks.var, cols=my.cols.var, vert=FALSE, cex=legend2.cex, subset=my.subset2) + mtext(side=4,paste0(" ",var.unit[var.num]), cex=legend2.cex) + + if(!as.pdf) dev.off() # for saving 4 png + +} # close for loop on 'period' + +if(as.pdf) dev.off() # for saving a single pdf with all seasons + + + + + + + + + + + + + + + + + + + + + + ## if(period == 13){ # Winter + ## cluster2.name="NAO+" + ## cluster3.name="NAO-" + ## cluster1.name="Blocking" + ## cluster4.name="Atl.Ridge" + ## } + ## if(period == 14){ + ## cluster1.name="NAO+" + ## cluster4.name="NAO-" + ## cluster3.name="Blocking" + ## cluster2.name="Atl.Ridge" + ## } + ## if(period == 15){ + ## cluster1.name="NAO+" + ## cluster2.name="NAO-" + ## cluster3.name="Blocking" + ## cluster4.name="Atl.Ridge" + ## } + ## if(period == 16){ + ## cluster1.name="NAO+" + ## cluster3.name="NAO-" + ## cluster2.name="Blocking" + ## cluster4.name="Atl.Ridge" + ## } + diff --git a/old/weather_regimes_v17.R b/old/weather_regimes_v17.R new file mode 100644 index 0000000000000000000000000000000000000000..a31bd20162154227a4da0933eb60929a862c3687 --- /dev/null +++ b/old/weather_regimes_v17.R @@ -0,0 +1,680 @@ +library(s2dverification) # for the function Load() +library(abind) +library(Kendall) +source('/home/Earth/ncortesi/Downloads/scripts/Rfunctions.R') # for the calendar functions +workdir <- "/scratch/Earth/ncortesi/RESILIENCE/Regimes" # working dir with the input files with the WT classifications for each MSLP grid point + +# available reanalysis for the geopotential (g500) or the geopotential height (z500 = g500/9.81): +ERAint <- list(path = '/esnas/reconstructions/ecmwf/eraint/$STORE_FREQ$_mean/$VAR_NAME$_f6h/$VAR_NAME$_$YEAR$$MONTH$.nc') +ERAint.name <- "ERA-Interim" + +JRA55 <- list(path = '/esnas/reconstructions/jma/jra-55/$STORE_FREQ$_mean/$VAR_NAME$_f6h/$VAR_NAME$_$YEAR$$MONTH$.nc') +JRA55.name <- "JRA-55" + +rean <- ERAint #JRA55 #ERAint # choose one of the two above reanalysis from where to load the input psl data +rean.name <- ERAint.name #JRA55.name #ERAint.name + +# available forecast systems for the var data: +ECMWF_monthly <- list(path = paste0('/esnas/exp/ECMWF/monthly/ensforhc/6hourly/$VAR_NAME$/2014$MONTH$$DAY$00/$VAR_NAME$_$START_DATE$00.nc')) +ECMWF_monthly.name <- "ECMWF-Monthly" + +forecast <- ECMWF_monthly +forecast.name <- ECMWF_monthly.name + +fields <- rean #forecast # put 'rean' to load pressure fields and var from reanalisis, put 'forecast' to load them from forecast systems +fields.name <- rean.name #forecast.name + +year.start <- 1979 #1994 #1979 #1981 +year.end <- 2013 #2010 + +leadtime <- 1:7 #5:11 #1 # if fields=forecast, set the forecast time (in days) you want to compute the weather regimes. +startdate.shift <- 1 # if leadtime=5:11, it's better to shift all startdates of the season 1 week in the past, to fit the exact season of obs.data + # if leadtime=12:18, it's better to shift all startdates of the season 2 weeks in the past, and so on. +forecast.year <- year.end+1 # if fields=forecast, set the forecast year (it is usually the year after year.end) +mes=1 # if fields=forecast, set the month of the first startdate of the forecast year +day=2 # if fields=forecast, set the day of the first startdate of the forecast year + +psl <- "psl" #"g500" # pressure variable to use from the chosen reanalysis +psl.name <- "MSLP" # "Z500" # the name to show in the maps + +var.data <- rean #ECMWF_monthly # ERAint # chose a dataset from which to load the input var data (reanalysis or forecasts) +var.data.name <- rean.name #ECMWF_monthly.name # ERAint.name + +lat.weighting=FALSE # set it to true to weight psl data on latitude before applying the cluster analysis +sequences=FALSE # set it to TRUE if you want to select only days beloning to sequences of at least 5 consecutive days belonging to the same regime. + +PCA <- FALSE # se to TRUE if you want to apply the PCA before the k-means cluster analysis, FALSE otherwise +variance.explained <- 0.8 # the user can set here the minimum % of variance explained by the PCs (typically 0.8 which is equal to 80%) + +# Coordinates of the box enclosing the study region (the Northern Atlantic in this case): +lat.max <- 81 #81 #80 #70 +lat.min <- 27 #30 #20 #30 +lon.max <- 45 #36 #30 #40 +lon.min <- 274.5 #274.5 #270 #280 # put a positive number here because the geopotential has only positive values of longitud! + +############################################################################################################################# +#ECMWF_monthly <- list(path = paste0('/esnas/exp/ECMWF/monthly/ensforhc/6hourly/psl/2014010200/1994_010200.nc')) +#ECMWF_monthly <- list(path = paste0('/esnas/exp/ECMWF/monthly/ensforhc/6hourly/$VAR_NAME$/2014$MONTH$$DAY$00/$VAR_NAME$_$START_DATE$00.nc')) +if(lat.min >= lat.max) stop("lat.min cannot be greater than lat.max") +n.years <- year.end - year.start + 1 + +# load only 1 day of pressure data to detect the minimum and maximum lat and lon values which identify only the North Atlantic area: +domain <- Load(var = psl, exp = NULL, obs = list(fields), paste0(year.start,'0102'), storefreq = 'daily', leadtimemax = 1, output = 'lonlat', nprocs=1) +n.lat <- length(domain$lat) +n.lon <- length(domain$lon) + +pos.lat.max <- which(lat.max - round(domain$lat,4) >= 0)[1] # select the first latitude of the domain below the maximum latitude +pos.lat.min <- tail(which(round(domain$lat,4) - lat.min >= 0),1) # select the last latitude of the domain over the minumum latitude +pos.lon.max <- tail(which(lon.max - round(domain$lon,4) >= 0),1) +pos.lon.min <- which(round(domain$lon,4) - lon.min >= 0)[1] + +pos.lat <- if(pos.lat.min < pos.lat.max) {pos.lat.min:pos.lat.max} else {pos.lat.max:pos.lat.min} +pos.lon <- c(1:pos.lon.max,pos.lon.min:length(domain$lon)) # beware that it only consider the case where the study area cross the meridian of Greenwhich! +n.pos.lat <- length(pos.lat) +n.pos.lon <- length(pos.lon) + +lat <- domain$lat[pos.lat] # lat values of chosen area only (it is smaller than the whole spatial domain loaded by the data) +lon <- domain$lon[pos.lon] # lon values of chosen area only + +lat.min.area <- domain$lat[pos.lat.min] # min and max lat/lon of the chosen area only (same as lat.max, but its values correspond to actual values in the lat vector) +lat.max.area <- domain$lat[pos.lat.max] +lon.min.area <- domain$lon[pos.lon.min] +lon.max.area <- domain$lon[pos.lon.max] + +#rm(domain) + +# Load psl data: +if(unlist(fields) == unlist(rean)){ # Load daily psl data in the reanalysis case: + psleuFull <-array(NA,c(n.days.in.a.yearly.period(year.start,year.end), n.pos.lat, n.pos.lon)) + + for (y in year.start:year.end){ + var <- Load(var = psl, exp = NULL, obs = list(fields), paste0(y,'0101'), storefreq = 'daily', leadtimemax = n.days.in.a.year(y), output = 'lonlat', + latmin = lat.min, latmax = lat.max, lonmin = lon.min, lonmax = lon.max) + psleuFull[seq.days.in.a.future.year(year.start, y),,] <- var$obs + rm(var) + gc() + } + +} else { # Load 6-hourly psl data in the forecast case: + psleuFull <-array(NA, c(n.sdates*n.years, n.leadtimes, n.pos.lat, n.pos.lon)) # daily order: 19940102 19950102 ... 20130102 19940109 19950109 ... 20130109 ... + n.leadtimes <- length(leadtime) + sdates <- weekly.seq(forecast.year,mes,day) + n.sdates <- length(sdates) + + for (startdate in 1:n.sdates){ + for(lday in leadtime){ + var <- Load(var = psl, exp = NULL, obs = list(fields), paste0(year.start:year.end, substr(sdates[startdate],5,6), substr(sdates[startdate],7,8) ), storefreq = 'daily', leadtimemin = lday*4-3, leadtimemax = lday*4, output = 'lonlat', latmin = lat.min, latmax = lat.max, lonmin = lon.min, lonmax = lon.max) + + mean.lday <- apply(var$obs, c(3,5,6), mean, na.rm=T) + rm(var) + gc() + + psleuFull[(1+(startdate-1)*n.years):(startdate*n.years), lday-leadtime[1]+1,,] <- mean.lday + rm(mean.lday) + gc() + } + } + +} + +if(psl=="g500") psleuFull <- psleuFull/9.81 # convert geopotential to geopotential height (in m) +if(psl=="psl") psleuFull <- psleuFull/100 # convert MSLP in Pascal to MSLP in hPa +gc() + +save.image(file=paste0(workdir,"/weather_regimes_",fields.name,"_",year.start,"-",year.end,".RData")) +load(file=paste0(workdir,"/weather_regimes_",fields.name,"_",year.start,"-",year.end,".RData")) + +# compute the cluster analysis: +my.PCA <- list() +my.cluster <- list() +tot.variance <- list() +n.pcs <- my.cluster <- c() + +for(period in 13:16){ # 1-12: Jan-Dec; 13-16: Winter-Spring-Summer-Autumn; 17: Year + if(unlist(fields) == unlist(rean)){ + days.period <- NA + for(y in year.start:year.end) days.period <- c(days.period, n.days.in.a.future.year(year.start, y) + pos.period(y,period)) + days.period <- days.period[-1] # remove the NA at the begin that was introduced only to be able to execute the above command + + } else { + my.startdates.period <- months.period(forecast.year,mes,day,period) # get which startdates belong to the chosen period + + days.period <- c() # get which days inside psleuFull belong to the chosen period + for(startdate in my.startdates.period){ + days.period <- c(days.period, (1+(startdate-1)*n.years):(startdate*n.years)) + } + } + + n.days.period <- length(days.period) + + pslPeriod <- psleuFull[days.period,,] # select only days in the chosen period (i.e: winter) + + # weight the pressure fields based on latitude: + if(lat.weighting){ + lat.weighted <- cos(lat*pi/180)^0.5 + lat.weighted.array <- InsertDim(InsertDim(lat.weighted,2,n.pos.lon),1,n.days.period) + pslPeriod.weighted <- pslPeriod * lat.weighted.array + rm(lat.weighted.array) + gc() + } else { + pslPeriod.weighted <- pslPeriod + } + + pslmat <- pslPeriod.weighted + dim(pslmat) <- c(n.days.period, n.pos.lat*n.pos.lon) # convert array in a matrix! + rm(pslPeriod, pslPeriod.weighted) + gc() + + # if you are using the monthly forecast system, in winter you must remove the 2nd startdate (20140109) because its data is bad, as you can see with: plot(pslmat[,1:2]) + if(unlist(fields) == unlist(forecast) && period == 13) pslmat[21:40,] <- NA + + if(PCA){ # compute the PCs: + my.seq <- seq(1, n.pos.lat*n.pos.lon, 9) # select only 1 point of 9 + pslcut <- pslmat[,my.seq] + + my.PCA[[period]] <- princomp(pslcut,cor=FALSE) + tot.variance[[period]] <- head(cumsum(my.PCA[[period]]$sdev^2/sum(my.PCA[[period]]$sdev^2)),50) # check how many PCAs to keep basing on the sum of their expl. variance + n.pcs[period] <- head(as.numeric(which(tot.variance[[period]] > variance.explained)),1) # select only the pcs that explains at least 80% of variance + my.cluster[[period]] <- kmeans(my.PCA[[period]]$scores[,1:n.pcs[period]], centers=4, iter.max=100, nstart=30) # 4 is the number of clusters + rm(pslcut) + } else { # remove from matpsl the years with NA: + my.cluster[[period]] <- kmeans(pslmat[which(!is.na(pslmat[,1])),], centers=4, iter.max=100, nstart=30) # 4 is the number of clusters + } + + rm(pslmat) + gc() +} + +var.num <- 2 # Choose a variable. 1: sfcWind 2: tas + +var.name <- c("sfcWind","tas") # name of the 'predictand' variable of the chosen reanalysis +var.name.full <- c("Wind Speed","Temperature") # full name of the 'predictand' variable to put in the title of the graphs +var.unit <- c("m/s", "ºC") # unit of measure of var (for drawing color scales) + +# Load var data: +if(unlist(fields) == unlist(rean)){ # Load daily var data in the reanalysis case: + vareuFull <-array(NA,c(n.days.in.a.yearly.period(year.start,year.end), n.pos.lat, n.pos.lon)) + + for (y in year.start:year.end){ + var <- Load(var = var.name[var.num], exp = NULL, obs = list(var.data), paste0(y,'0101'), storefreq = 'daily', leadtimemax = n.days.in.a.year(y), output = 'lonlat', + latmin = lat.min, latmax = lat.max, lonmin = lon.min, lonmax = lon.max) + vareuFull[seq.days.in.a.future.year(year.start, y),,] <- var$obs + rm(var) + gc() + } + +} else { # Load 6-hourly var data in the forecast case: + sdates <- weekly.seq(forecast.year,mes,day) + vareuFull <-array(NA,c(length(sdates)*(year.end-year.start+1), n.pos.lat, n.pos.lon)) + + for (startdate in 1:length(sdates)){ + var <- Load(var = var.name[var.num], exp = NULL, obs = list(var.data), paste0(year.start:year.end, substr(sdates[startdate],5,6), substr(sdates[startdate],7,8) ), storefreq = 'daily', leadtimemin=leadtime*4-3, leadtimemax=leadtime*4, output = 'lonlat', latmin = lat.min, latmax = lat.max, lonmin = lon.min, lonmax = lon.max) + temp <- apply(var$obs, c(1,3,5,6), mean, na.rm=T) + vareuFull[(1+(startdate-1)*(year.end-year.start+1)):(startdate*(year.end-year.start+1)),,] <- temp + rm(temp,var) + gc() + } + +} + +#my.grid<-paste0('r',n.lon,'x',n.lat) +#coord <- Load(var = 'sfcWind', exp = list(ECMWF_monthly), obs = NULL, sdates=paste0(2013,'0102'), leadtimemin = 1, leadtimemax=1, output = 'lonlat', nprocs=1, grid=my.grid, method='bilinear') + +save.image(file=paste0(workdir,"/weather_regimes_",fields.name,"_",var.name[var.num],"_",year.start,"-",year.end,".RData")) +load(file=paste0(workdir,"/weather_regimes_",fields.name,"_",var.name[var.num],"_",year.start,"-",year.end,".RData")) + +for(period in 13:16){ # 1-12: Jan-Dec; 13-16: Winter-Spring-Summer-Autumn; 17: Year + if(unlist(fields) == unlist(rean)){ + days.period <- NA + for(y in year.start:year.end) days.period <- c(days.period, n.days.in.a.future.year(year.start, y) + pos.period(y,period)) + days.period <- days.period[-1] # remove the NA at the begin that was introduced only to be able to execure the above command + period.length <- n.days.in.a.period(period,1999) #+ ifelse(period==13 | period==2, 1, 0) # using year 1999 introduce a small error in winter season length because of bisestile years + + } else { + my.startdates.period <- months.period(forecast.year,mes,day,period) + + if(period == 13) my.startdates.period <- my.startdates.period[-2] # remove the second startdate because it is broken + + days.period <- c() + for(startdate in my.startdates.period){ + days.period <- c(days.period, (1+(startdate-1)*(year.end-year.start+1)):(startdate*(year.end-year.start+1))) + } + period.length <- length(my.startdates.period) # number of Thusdays in the period + } + + n.days.period <- length(days.period) + + varPeriod <- vareuFull[days.period,,] # select only var data during the chosen period + + varPeriodClim <- apply(varPeriod, c(2,3), mean, na.rm=T) + varPeriodClim2 <- InsertDim(varPeriodClim, 1, n.days.period) + + varPeriodAnom <- varPeriod - varPeriodClim2 + rm(varPeriod, varPeriodClim, varPeriodClim2) + gc() + + if(sequences){ + # for each of the 4 clusters, remove the days not belonging to sequences of at least 5 days: + # warning: first 4 days and last 4 days are not take into account y the algorithm and are treated as not belonging to any sequence (sequ=FALSE) + wrdiff <- diff(my.cluster[[period]]$cluster) + + sequ <- rep(FALSE, n.days.period) + for(i in 5:(n.days.period-5)){ + sequ[i] <- TRUE + if(wrdiff[i] != 0 & wrdiff[i+1] != 0) sequ[i] = FALSE + if(wrdiff[i] != 0 & wrdiff[i+2] != 0) sequ[i] = FALSE + if(wrdiff[i] != 0 & wrdiff[i+3] != 0) sequ[i] = FALSE + if(wrdiff[i] != 0 & wrdiff[i+4] != 0) sequ[i] = FALSE + if(wrdiff[i-1] != 0 & wrdiff[i] != 0) sequ[i] = FALSE + if(wrdiff[i-1] != 0 & wrdiff[i+1] != 0) sequ[i] = FALSE + if(wrdiff[i-1] != 0 & wrdiff[i+2] != 0) sequ[i] = FALSE + if(wrdiff[i-1] != 0 & wrdiff[i+3] != 0) sequ[i] = FALSE + if(wrdiff[i-2] != 0 & wrdiff[i] != 0) sequ[i] = FALSE + if(wrdiff[i-2] != 0 & wrdiff[i+1] != 0) sequ[i] = FALSE + if(wrdiff[i-2] != 0 & wrdiff[i+2] != 0) sequ[i] = FALSE + if(wrdiff[i-3] != 0 & wrdiff[i] != 0) sequ[i] = FALSE + if(wrdiff[i-3] != 0 & wrdiff[i+1] != 0) sequ[i] = FALSE + if(wrdiff[i-4] != 0 & wrdiff[i] != 0) sequ[i] = FALSE + } # close for loop on i + + # sequence of daily cluster numbers without the days that doesn't belong to sequences of 5 or more days: + cluster.sequence <- my.cluster[[period]]$cluster * sequ + rm(wrdiff, sequ) + } else{ + cluster.sequence <- my.cluster[[period]]$cluster + } + + gc() + + #wr1 <- which(my.cluster[[period]]$cluster==1) + #wr2 <- which(my.cluster[[period]]$cluster==2) + #wr3 <- which(my.cluster[[period]]$cluster==3) + #wr4 <- which(my.cluster[[period]]$cluster==4) + + # Replace the days belonging to a regime with the days belonging to a sequence of at least 5 days of that regime: + wr1 <- which(cluster.sequence == 1) + wr2 <- which(cluster.sequence == 2) + wr3 <- which(cluster.sequence == 3) + wr4 <- which(cluster.sequence == 4) + + # yearly frequency of each regime: + wr1y <- wr2y <- wr3y <-wr4y <- c() + for(y in year.start:year.end){ + + if(unlist(fields) == unlist(rean)){ + wr1y[y-year.start+1] <- length(which(cluster.sequence[(y-year.start)*period.length+(1:period.length)] == 1)) + wr2y[y-year.start+1] <- length(which(cluster.sequence[(y-year.start)*period.length+(1:period.length)] == 2)) + wr3y[y-year.start+1] <- length(which(cluster.sequence[(y-year.start)*period.length+(1:period.length)] == 3)) + wr4y[y-year.start+1] <- length(which(cluster.sequence[(y-year.start)*period.length+(1:period.length)] == 4)) + } else { + wr1y[y-year.start+1] <- length(which(cluster.sequence[seq((y-year.start+1),length(cluster.sequence), n.years)] == 1)) + wr2y[y-year.start+1] <- length(which(cluster.sequence[seq((y-year.start+1),length(cluster.sequence), n.years)] == 2)) + wr3y[y-year.start+1] <- length(which(cluster.sequence[seq((y-year.start+1),length(cluster.sequence), n.years)] == 3)) + wr4y[y-year.start+1] <- length(which(cluster.sequence[seq((y-year.start+1),length(cluster.sequence), n.years)] == 4)) + } + } + + rm(cluster.sequence) + gc() + + # convert to frequencies in %: + wr1y <- wr1y/period.length + wr2y <- wr2y/period.length + wr3y <- wr3y/period.length + wr4y <- wr4y/period.length + + varPeriodAnom1 <- varPeriodAnom[wr1,,] + varPeriodAnom2 <- varPeriodAnom[wr2,,] + varPeriodAnom3 <- varPeriodAnom[wr3,,] + varPeriodAnom4 <- varPeriodAnom[wr4,,] + + varPeriodAnom1mean <- apply(varPeriodAnom1,c(2,3),mean,na.rm=T) + varPeriodAnom2mean <- apply(varPeriodAnom2,c(2,3),mean,na.rm=T) + varPeriodAnom3mean <- apply(varPeriodAnom3,c(2,3),mean,na.rm=T) + varPeriodAnom4mean <- apply(varPeriodAnom4,c(2,3),mean,na.rm=T) + + varPeriodAnomBoth1 <- abind(varPeriodAnom, varPeriodAnom1, along = 1) + pvalue1 <- apply(varPeriodAnomBoth1, c(2,3), function(x) t.test(x[1:n.days.period],x[(n.days.period+1):length(x)])$p.value) + rm(varPeriodAnomBoth1, varPeriodAnom1) + gc() + + varPeriodAnomBoth2 <- abind(varPeriodAnom, varPeriodAnom2, along = 1) + pvalue2 <- apply(varPeriodAnomBoth2, c(2,3), function(x) t.test(x[1:n.days.period],x[(n.days.period+1):length(x)])$p.value) + rm(varPeriodAnomBoth2, varPeriodAnom2) + gc() + + varPeriodAnomBoth3 <- abind(varPeriodAnom, varPeriodAnom3, along = 1) + pvalue3 <- apply(varPeriodAnomBoth3, c(2,3), function(x) t.test(x[1:n.days.period],x[(n.days.period+1):length(x)])$p.value) + rm(varPeriodAnomBoth3, varPeriodAnom3) + gc() + + varPeriodAnomBoth4 <- abind(varPeriodAnom, varPeriodAnom4, along = 1) + pvalue4 <- apply(varPeriodAnomBoth4, c(2,3), function(x) t.test(x[1:n.days.period],x[(n.days.period+1):length(x)])$p.value) + rm(varPeriodAnomBoth4, varPeriodAnom4) + + rm(varPeriodAnom) + gc() + + pslPeriod <- psleuFull[days.period,,] + pslPeriodClim <- apply(pslPeriod, c(2,3), mean, na.rm=T) + pslPeriodClim2 <- InsertDim(pslPeriodClim, 1, n.days.period) + pslPeriodAnom <- pslPeriod - pslPeriodClim2 + + rm(pslPeriod, pslPeriodClim, pslPeriodClim2) + gc() + + pslwr1 <- pslPeriodAnom[wr1,,] + pslwr2 <- pslPeriodAnom[wr2,,] + pslwr3 <- pslPeriodAnom[wr3,,] + pslwr4 <- pslPeriodAnom[wr4,,] + rm(pslPeriodAnom) + gc() + + pslwr1mean <- apply(pslwr1,c(2,3),mean,na.rm=T) + pslwr2mean <- apply(pslwr2,c(2,3),mean,na.rm=T) + pslwr3mean <- apply(pslwr3,c(2,3),mean,na.rm=T) + pslwr4mean <- apply(pslwr4,c(2,3),mean,na.rm=T) + + rm(pslwr1,pslwr2,pslwr3,pslwr4) + + # save all the data necessary to redraw the graphs when we know the right regime: + save(workdir,rean.name,fields.name,var.num,var.name,var.name.full,var.unit,year.start,year.end,period,my.period,lon,lat,pslwr1mean,pslwr2mean,pslwr3mean,pslwr4mean, varPeriodAnom1mean, varPeriodAnom2mean, varPeriodAnom3mean,varPeriodAnom4mean,wr1y,wr2y,wr3y,wr4y,pvalue1,pvalue2,pvalue3,pvalue4,file=paste0(workdir,"/",fields.name,"_",var.name[var.num],"_",my.period[period],"_mapdata.RData")) + + rm(pvalue1,pvalue2,pvalue3,pvalue4) + rm(pslwr1mean,pslwr2mean,pslwr3mean,pslwr4mean) + rm(varPeriodAnom1mean,varPeriodAnom2mean,varPeriodAnom3mean,varPeriodAnom4mean) + gc() + +} # close the for loop on 'period' + + +# run it twice: once, with ordering <- FALSE to look only at the cartography and find visually the right regime names of the four clusters; +# and the second time, to save the ordered cartography: +ordering <- FALSE # If TRUE, order the clusters following the regimes listed in the 'orden' vector (after you manually insert the names). +save.names <- TRUE # if TRUE, also save the cluster names (i.e: the association between clusters and weather regimes); if FALSE, the cluster names are loaded from the file +as.pdf <- TRUE # FALSE: save results as one .png file for each season, TRUE: save only 1 .pdf file with all seasons + +# position of long values of Europe only (without the Atlantic Sea and America): +#if(fields.name == "ERA-Interim") EU <- c(1:which(lon >= lon.max)[1], (length(lon)-30):length(lon)) # restrict area to continental europe only +EU <- c(1:which(lon >= lon.max)[1], (which(lon >= 337)[1]:length(lon))) # restrict area to continental europe only + +# choose an order to give to the cartography, from top to bottom: +orden <- c("NAO+","NAO-","Blocking","Atl.Ridge") + +regime1.name <- orden[1] +regime2.name <- orden[2] +regime3.name <- orden[3] +regime4.name <- orden[4] + +if(save.names){cluster1.name.period <- cluster2.name.period <- cluster3.name.period <- cluster4.name.period <- c()} + +# breaks and colors of the geopotential fields: +if(psl == "g500"){ + #my.brks <- c(-300,-199:200,300) # % Mean anomaly of a WR + my.brks <- c(-300,seq(-200,200,10),300) # % Mean anomaly of a WR + my.brks2 <- c(-300,seq(-200,200,10),300) # % Mean anomaly of a WR for the contour lines + #my.cols <- colorRampPalette((brewer.pal(9,"PuBu")))(length(my.brks)-1) + values.to.plot <- c(-200, -100, 0, 100, 200) # values we want to show in the labels +} + +if(psl == "psl"){ + my.brks <- c(seq(-19,-1,2),0,seq(1,19,2)) # % Mean anomaly of a WR + my.brks2 <- my.brks #c(seq(-20,20,2)) # % Mean anomaly of a WR for the contour lines + values.to.plot <- my.brks #c(-17,-15,-13,-11,-9,-7,-5,-3,-1,0,1,3,5,7,9,11,13,15,17) +} + +my.cols <- colorRampPalette(rev(brewer.pal(11,"RdBu")))(length(my.brks)-1) # blue--white--red colors +my.cols[floor(length(my.cols)/2)] <- my.cols[floor(length(my.cols)/2)+1] <- "white" + +# breaks and colors of the impact maps (valid both for Wind speed anomalies and Temperature anomalies): +#my.brks.var <- c(-20,seq(-10,-3,1),seq(-2.9,2.9,0.1),seq(3,10,1),20) # % Mean anomaly of a WR +#my.brks.var <- c(-20,-2,-1.5,-1,-0.5,-0.2,0.2,0.5,1,1.5,2,20) # % Mean anomaly of a WR +#my.brks.var <- c(-20,seq(-3,3,0.5),20) # % Mean anomaly of a WR +if(var.name[var.num] == "sfcWind") { + my.brks.var <- seq(-3,3,0.5) + values.to.plot2 <- c(-3,-2,-1,0,1,2,3) +} +if(var.name[var.num] == "tas") { + my.brks.var <- seq(-5,5,1) + values.to.plot2 <- my.brks.var #c(-5,-3,-1,0,1,3,5) +} + +#my.cols <- colorRampPalette(as.character(read.csv("/home/Earth/vtorralb/rgbhex.csv",header=F)[,1]))(length(my.brks)-1) # blue--yellow-red colors +my.cols.var <- colorRampPalette(rev(brewer.pal(11,"RdBu")))(length(my.brks.var)-1) # blue--white--red colors +#my.cols.var[floor(length(my.cols.var)/2)] <- my.cols.var[floor(length(my.cols.var)/2)+1] <- "white" + +if(as.pdf)(pdf(file=paste0(workdir,"/",fields.name,"_",var.name[var.num],".pdf"),width=40, height=60)) + +for(period in 13:16){ + load(file=paste0(workdir,"/",fields.name,"_",var.name[var.num],"_",my.period[period],"_mapdata.RData")) + + if(save.names){ + if(period == 13){ # Winter + cluster3.name="NAO+" + cluster4.name="NAO-" + cluster1.name="Blocking" + cluster2.name="Atl.Ridge" + } + if(period == 14){ # Spring + cluster1.name="NAO+" + cluster3.name="NAO-" + cluster2.name="Blocking" + cluster4.name="Atl.Ridge" + } + if(period == 15){ # Summer + cluster2.name="NAO+" + cluster3.name="NAO-" + cluster4.name="Blocking" + cluster1.name="Atl.Ridge" + } + if(period == 16){ # Autumn + cluster4.name="NAO+" + cluster1.name="NAO-" + cluster2.name="Blocking" + cluster3.name="Atl.Ridge" + } + + cluster1.name.period[period] <- cluster1.name + cluster2.name.period[period] <- cluster2.name + cluster3.name.period[period] <- cluster3.name + cluster4.name.period[period] <- cluster4.name + + save(cluster1.name.period, cluster2.name.period, cluster3.name.period, cluster4.name.period, file=paste0(workdir,"/",fields.name,"_ClusterNames.RData")) + + } else { # in this case, we load the cluster names from the file already saved: + load(paste0(workdir,"/",fields.name,"_ClusterNames.RData")) + cluster1.name <- cluster1.name.period[period] + cluster2.name <- cluster2.name.period[period] + cluster3.name <- cluster3.name.period[period] + cluster4.name <- cluster4.name.period[period] + } + + # assign to each cluster its regime: + if(ordering == FALSE){ + cluster1 <- 1; cluster2 <- 2; cluster3 <- 3; cluster4 <- 4 + } else { + cluster1 <- which(orden == cluster1.name) + cluster2 <- which(orden == cluster2.name) + cluster3 <- which(orden == cluster3.name) + cluster4 <- which(orden == cluster4.name) + } + + assign(paste0("map",cluster1), pslwr1mean) + assign(paste0("map",cluster2), pslwr2mean) + assign(paste0("map",cluster3), pslwr3mean) + assign(paste0("map",cluster4), pslwr4mean) + + assign(paste0("imp",cluster1), varPeriodAnom1mean) + assign(paste0("imp",cluster2), varPeriodAnom2mean) + assign(paste0("imp",cluster3), varPeriodAnom3mean) + assign(paste0("imp",cluster4), varPeriodAnom4mean) + + assign(paste0("sig",cluster1), pvalue1) + assign(paste0("sig",cluster2), pvalue2) + assign(paste0("sig",cluster3), pvalue3) + assign(paste0("sig",cluster4), pvalue4) + + assign(paste0("fre",cluster1), wr1y) + assign(paste0("fre",cluster2), wr2y) + assign(paste0("fre",cluster3), wr3y) + assign(paste0("fre",cluster4), wr4y) + + # Visualize the composition with the 3 graphs for all the 4 regimes: its pressure fields, its impact on var and its frequency series: + if(!as.pdf) png(filename=paste0(workdir,"/",fields.name,"_",var.name[var.num],"_",my.period[period],".png"),width=3000,height=3700) + + plot.new() + + # Sheet title: + par(fig=c(0, 1, 0.94, 0.98), new=TRUE) + mtext(paste0("Weather Regimes for ",my.period[period]," season (",year.start,"-",year.end,"). Source: ",fields.name), cex=6, font=2) + + # Centroid maps: + map.xpos <- 0 + map.width <- 0.46 + par(fig=c(map.xpos + 0.003, map.xpos + map.width - 0.003, 0.745, 0.925), new=TRUE) + PlotEquiMap2(rescale(map1,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map1), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, xlabel.dist=1.5, contours.lty="F1FF1F") + par(fig=c(map.xpos, map.xpos + map.width, 0.51, 0.69), new=TRUE) + PlotEquiMap2(rescale(map2,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map2), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F") + par(fig=c(map.xpos, map.xpos + map.width, 0.275, 0.455), new=TRUE) + PlotEquiMap2(rescale(map3,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map3), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F") + par(fig=c(map.xpos, map.xpos + map.width, 0.04, 0.22), new=TRUE) + PlotEquiMap2(rescale(map4,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map4), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F") + + # Title Centroid Maps: + map.title.xpos <- 0.76 + map.title.width <- 0.2 + par(fig=c(map.title.xpos, map.title.xpos + map.title.width, 0.728, 0.729), new=TRUE) + mtext("year", cex=3) + par(fig=c(map.title.xpos, map.title.xpos + map.title.width, 0.494, 0.495), new=TRUE) + mtext("year", cex=3) + par(fig=c(map.title.xpos, map.title.xpos + map.title.width, 0.260, 0.261), new=TRUE) + mtext("year", cex=3) + par(fig=c(map.title.xpos, map.title.xpos + map.title.width, 0.026, 0.027), new=TRUE) + mtext("year", cex=3) + + # Impact maps: + impact.xpos <- 0.47 + impact.width <- 0.26 + par(fig=c(impact.xpos, impact.xpos + impact.width, 0.745, 0.925), new=TRUE) + PlotEquiMap2(rescale(imp1[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=t(sig1[,EU] < 0.05), cex.lab=1.5) + par(fig=c(impact.xpos, impact.xpos + impact.width, 0.51, 0.69), new=TRUE) + PlotEquiMap2(rescale(imp2[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=t(sig2[,EU] < 0.05), cex.lab=1.5) + par(fig=c(impact.xpos, impact.xpos + impact.width, 0.275, 0.455), new=TRUE) + PlotEquiMap2(rescale(imp3[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=t(sig3[,EU] < 0.05), cex.lab=1.5) + par(fig=c(impact.xpos, impact.xpos + impact.width, 0.04, 0.22), new=TRUE) + PlotEquiMap2(rescale(imp4[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=t(sig4[,EU] < 0.05), cex.lab=1.5) + + # Frequency plots: + barplot.xpos <- 0.75 + barplot.width <- 0.25 + par(fig=c(barplot.xpos, barplot.xpos + barplot.width, 0.745, 0.915), new=TRUE) + barplot.freq(100*fre1, year.start, year.end, cex.y=2, cex.x=2, freq.max=60, ylab="", mgp=c(3,1.5,0)) + par(fig=c(barplot.xpos, barplot.xpos + barplot.width,0.510,0.680), new=TRUE) + barplot.freq(100*fre2, year.start, year.end, cex.y=2, cex.x=2, freq.max=60, ylab="", mgp=c(3,1.5,0)) + par(fig=c(barplot.xpos, barplot.xpos + barplot.width,0.275,0.445), new=TRUE) + barplot.freq(100*fre3, year.start, year.end, cex.y=2, cex.x=2, freq.max=60, ylab="", mgp=c(3,1.5,0)) + par(fig=c(barplot.xpos, barplot.xpos + barplot.width,0.040,0.210), new=TRUE) + barplot.freq(100*fre4, year.start, year.end, cex.y=2, cex.x=2, freq.max=60, ylab="", mgp=c(3,1.5,0)) + + # % on Frequency plots: + symbol.xpos <- 0.735 + symbol.width <- 0.01 + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.83, 0.84), new=TRUE) + mtext("%", cex=3.3) + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.60, 0.61), new=TRUE) + mtext("%", cex=3.3) + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.36, 0.37), new=TRUE) + mtext("%", cex=3.3) + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.13, 0.14), new=TRUE) + mtext("%", cex=3.3) + + # Title 1: + title1.xpos <- 0.02 + title1.width <- 0.4 + par(fig=c(title1.xpos, title1.xpos + title1.width, 0.930, 0.935), new=TRUE) + mtext(paste0(regime1.name,": ", psl.name, " Anomaly"), font=2, cex=4) + par(fig=c(title1.xpos, title1.xpos + title1.width, 0.695, 0.700), new=TRUE) + mtext(paste0(regime2.name,": ", psl.name, " Anomaly"), font=2, cex=4) + par(fig=c(title1.xpos, title1.xpos + title1.width, 0.460, 0.465), new=TRUE) + mtext(paste0(regime3.name,": ", psl.name, " Anomaly"), font=2, cex=4) + par(fig=c(title1.xpos, title1.xpos + title1.width, 0.225, 0.230), new=TRUE) + mtext(paste0(regime4.name,": ", psl.name, " Anomaly"), font=2, cex=4) + + # Title 2: + title2.xpos <- 0.42 + title2.width <- 0.35 + par(fig=c(title2.xpos, title2.xpos + title2.width, 0.930, 0.935), new=TRUE) + mtext(paste0(regime1.name, ": Impact on ", var.name.full[var.num]), font=2, cex=4) + par(fig=c(title2.xpos, title2.xpos + title2.width, 0.695, 0.700), new=TRUE) + mtext(paste0(regime2.name, ": Impact on ", var.name.full[var.num]), font=2, cex=4) + par(fig=c(title2.xpos, title2.xpos + title2.width, 0.460, 0.465), new=TRUE) + mtext(paste0(regime3.name, ": Impact on ", var.name.full[var.num]), font=2, cex=4) + par(fig=c(title2.xpos, title2.xpos + title2.width, 0.225, 0.230), new=TRUE) + mtext(paste0(regime4.name, ": Impact on ", var.name.full[var.num]), font=2, cex=4) + + # Title 3: + title3.xpos <- 0.75 + title3.width <-0.25 + par(fig=c(title3.xpos, title3.xpos + title3.width, 0.930, 0.935), new=TRUE) + mtext(paste0(regime1.name, ": Frequency (", round(100*mean(fre1),1), "%)"), font=2, cex=4) + par(fig=c(title3.xpos, title3.xpos + title3.width, 0.695, 0.700), new=TRUE) + mtext(paste0(regime2.name, ": Frequency (", round(100*mean(fre2),1), "%)"), font=2, cex=4) + par(fig=c(title3.xpos, title3.xpos + title3.width, 0.460, 0.465), new=TRUE) + mtext(paste0(regime3.name, ": Frequency (", round(100*mean(fre3),1), "%)"), font=2, cex=4) + par(fig=c(title3.xpos, title3.xpos + title3.width, 0.225, 0.230), new=TRUE) + mtext(paste0(regime4.name, ": Frequency (", round(100*mean(fre4),1), "%)"), font=2, cex=4) + + # Legend 1: + legend1.xpos <- 0.01 + legend1.width <- 0.44 + legend1.cex <- 1.8 + my.subset <- match(values.to.plot, my.brks) + + par(fig=c(legend1.xpos, legend1.xpos + legend1.width, 0.719, 0.744), new=TRUE) # syntax fig=c(xmin, xmax, ymin, ymax) + ColorBar3(my.brks, cols=my.cols, vert=FALSE, cex=legend1.cex, subset=my.subset) + if(psl=="g500") {mtext(side=4," m", cex=2.5)} else {mtext(side=4," hPa", cex=legend1.cex)} + par(fig=c(legend1.xpos, legend1.xpos + legend1.width, 0.485, 0.508), new=TRUE) + ColorBar3(my.brks, cols=my.cols, vert=FALSE, cex=legend1.cex, subset=my.subset) + if(psl=="g500") {mtext(side=4," m", cex=2.5)} else {mtext(side=4," hPa", cex=legend1.cex)} + par(fig=c(legend1.xpos, legend1.xpos + legend1.width, 0.250, 0.273), new=TRUE) + ColorBar3(my.brks, cols=my.cols, vert=FALSE, cex=legend1.cex, subset=my.subset) + if(psl=="g500") {mtext(side=4," m", cex=2.5)} else {mtext(side=4," hPa", cex=legend1.cex) + par(fig=c(legend1.xpos, legend1.xpos + legend1.width, 0.015, 0.038), new=TRUE) + ColorBar3(my.brks, cols=my.cols, vert=FALSE, cex=legend1.cex, subset=my.subset) + if(psl=="g500") {mtext(side=4," m", cex=2.5)} else {mtext(side=4," hPa", cex=legend1.cex)} + + # Legend 2: + legend2.xpos <- 0.48 + legend2.width <- 0.25 + legend2.cex <- 1.8 + my.subset2 <- match(values.to.plot2, my.brks.var) + + par(fig=c(legend2.xpos, legend2.xpos + legend2.width, 0.719 + 0.001, 0.744), new=TRUE) + ColorBar3(my.brks.var, cols=my.cols.var, vert=FALSE, cex=legend2.cex, subset=my.subset2) + mtext(side=4,paste0(" ",var.unit[var.num]), cex=legend2.cex) + par(fig=c(legend2.xpos, legend2.xpos + legend2.width, 0.485, 0.508), new=TRUE) + ColorBar3(my.brks.var, cols=my.cols.var, vert=FALSE, cex=legend2.cex, subset=my.subset2) + mtext(side=4,paste0(" ",var.unit[var.num]), cex=legend2.cex) + par(fig=c(legend2.xpos, legend2.xpos + legend2.width, 0.250, 0.273), new=TRUE) + ColorBar3(my.brks.var, cols=my.cols.var, vert=FALSE, cex=legend2.cex, subset=my.subset2)} + mtext(side=4,paste0(" ",var.unit[var.num]), cex=legend2.cex) + par(fig=c(legend2.xpos, legend2.xpos + legend2.width, 0.015, 0.038), new=TRUE) + ColorBar3(my.brks.var, cols=my.cols.var, vert=FALSE, cex=legend2.cex, subset=my.subset2) + mtext(side=4,paste0(" ",var.unit[var.num]), cex=legend2.cex) + + if(!as.pdf) dev.off() # for saving 4 png + +} # close for loop on 'period' + +if(as.pdf) dev.off() # for saving a single pdf with all seasons + + + + + + + + + + + diff --git a/old/weather_regimes_v17.R~ b/old/weather_regimes_v17.R~ new file mode 100644 index 0000000000000000000000000000000000000000..568cb14e46463cb4f860e6cf1bf9f672c23730db --- /dev/null +++ b/old/weather_regimes_v17.R~ @@ -0,0 +1,711 @@ + +library(s2dverification) # for the function Load() +library(abind) +library(Kendall) +source('/home/Earth/ncortesi/Downloads/scripts/Rfunctions.R') # for the calendar functions +workdir <- "/scratch/Earth/ncortesi/RESILIENCE/Regimes" # working dir with the input files with the WT classifications for each MSLP grid point + +# available reanalysis for the geopotential (g500) or the geopotential height (z500 = g500/9.81): +ERAint <- list(path = '/esnas/reconstructions/ecmwf/eraint/$STORE_FREQ$_mean/$VAR_NAME$_f6h/$VAR_NAME$_$YEAR$$MONTH$.nc') +ERAint.name <- "ERA-Interim" + +JRA55 <- list(path = '/esnas/reconstructions/jma/jra-55/$STORE_FREQ$_mean/$VAR_NAME$_f6h/$VAR_NAME$_$YEAR$$MONTH$.nc') +JRA55.name <- "JRA-55" + +rean <- JRA55 #ERAint # choose one of the two above reanalysis from where to load the input psl data +rean.name <- JRA55.name #ERAint.name + +# available forecast systems for the var data: +ECMWF_monthly <- list(path = paste0('/esnas/exp/ECMWF/monthly/ensforhc/6hourly/$VAR_NAME$/2014$MONTH$$DAY$00/$VAR_NAME$_$START_DATE$00.nc')) +ECMWF_monthly.name <- "ECMWF-Monthly" + +forecast <- ECMWF_monthly +forecast.name <- ECMWF_monthly.name + +fields <- rean #forecast # put 'rean' to load pressure fields and var from reanalisis, put 'forecast' to load them from forecast systems +fields.name <- rean.name #forecast.name + +year.start <- 1979 #1994 #1979 #1981 +year.end <- 2013 #2010 + +leadtime <- 1:7 #5:11 #1 # if fields=forecast, set the forecast time (in days) you want to compute the weather regimes. +startdate.shift <- 1 # if leadtime=5:11, it's better to shift all startdates of the season 1 week in the past, to fit the exact season of obs.data + # if leadtime=12:18, it's better to shift all startdates of the season 2 weeks in the past, and so on. +forecast.year <- year.end+1 # if fields=forecast, set the forecast year (it is usually the year after year.end) +mes=1 # if fields=forecast, set the month of the first startdate of the forecast year +day=2 # if fields=forecast, set the day of the first startdate of the forecast year + +psl <- "psl" #"g500" # pressure variable to use from the chosen reanalysis +psl.name <- "MSLP" # "Z500" # the name to show in the maps + +var.data <- rean #ECMWF_monthly # ERAint # chose a dataset from which to load the input var data (reanalysis or forecasts) +var.data.name <- rean.name #ECMWF_monthly.name # ERAint.name + +sequences=FALSE # set it to TRUE if you want to select only days beloning to sequences of at least 5 consecutive days belonging to the same regime. + +PCA <- FALSE # se to TRUE if you want to apply the PCA before the k-means cluster analysis, FALSE otherwise +variance.explained <- 0.8 # the user can set here the minimum % of variance explained by the PCs (typically 0.8 which is equal to 80%) + +# Coordinates of the box enclosing the study region (the Northern Atlantic in this case): +lat.max <- 81 #81 #80 #70 +lat.min <- 27 #30 #20 #30 +lon.max <- 45 #36 #30 #40 +lon.min <- 274.5 #274.5 #270 #280 # put a positive number here because the geopotential has only positive values of longitud! + +############################################################################################################################# +#ECMWF_monthly <- list(path = paste0('/esnas/exp/ECMWF/monthly/ensforhc/6hourly/psl/2014010200/1994_010200.nc')) +#ECMWF_monthly <- list(path = paste0('/esnas/exp/ECMWF/monthly/ensforhc/6hourly/$VAR_NAME$/2014$MONTH$$DAY$00/$VAR_NAME$_$START_DATE$00.nc')) +if(lat.min >= lat.max) stop("lat.min cannot be greater than lat.max") +n.years <- year.end - year.start + 1 + +# load only 1 day of pressure data to detect the minimum and maximum lat and lon values which identify only the North Atlantic area: +domain <- Load(var = psl, exp = NULL, obs = list(fields), paste0(year.start,'0102'), storefreq = 'daily', leadtimemax = 1, output = 'lonlat', nprocs=1) +n.lat <- length(domain$lat) +n.lon <- length(domain$lon) + +pos.lat.max <- which(lat.max - round(domain$lat,4) >= 0)[1] # select the first latitude of the domain below the maximum latitude +pos.lat.min <- tail(which(round(domain$lat,4) - lat.min >= 0),1) # select the last latitude of the domain over the minumum latitude +pos.lon.max <- tail(which(lon.max - round(domain$lon,4) >= 0),1) +pos.lon.min <- which(round(domain$lon,4) - lon.min >= 0)[1] + +pos.lat <- if(pos.lat.min < pos.lat.max) {pos.lat.min:pos.lat.max} else {pos.lat.max:pos.lat.min} +pos.lon <- c(1:pos.lon.max,pos.lon.min:length(domain$lon)) # beware that it only consider the case where the study area cross the meridian of Greenwhich! +n.pos.lat <- length(pos.lat) +n.pos.lon <- length(pos.lon) + +lat <- domain$lat[pos.lat] # lat values of chosen area only (it is smaller than the whole spatial domain loaded by the data) +lon <- domain$lon[pos.lon] # lon values of chosen area only + +lat.min.area <- domain$lat[pos.lat.min] # min and max lat/lon of the chosen area only (same as lat.max, but its values correspond to actual values in the lat vector) +lat.max.area <- domain$lat[pos.lat.max] +lon.min.area <- domain$lon[pos.lon.min] +lon.max.area <- domain$lon[pos.lon.max] + +#rm(domain) + +# Load psl data: +if(unlist(fields) == unlist(rean)){ # Load daily psl data in the reanalysis case: + psleuFull <-array(NA,c(n.days.in.a.yearly.period(year.start,year.end), n.pos.lat, n.pos.lon)) + + for (y in year.start:year.end){ + var <- Load(var = psl, exp = NULL, obs = list(fields), paste0(y,'0101'), storefreq = 'daily', leadtimemax = n.days.in.a.year(y), output = 'lonlat', + latmin = lat.min, latmax = lat.max, lonmin = lon.min, lonmax = lon.max) + psleuFull[seq.days.in.a.future.year(year.start, y),,] <- var$obs + rm(var) + gc() + } + +} else { # Load 6-hourly psl data in the forecast case: + psleuFull <-array(NA, c(n.sdates*n.years, n.leadtimes, n.pos.lat, n.pos.lon)) # daily order: 19940102 19950102 ... 20130102 19940109 19950109 ... 20130109 ... + n.leadtimes <- length(leadtime) + sdates <- weekly.seq(forecast.year,mes,day) + n.sdates <- length(sdates) + + for (startdate in 1:n.sdates){ + for(lday in leadtime){ + var <- Load(var = psl, exp = NULL, obs = list(fields), paste0(year.start:year.end, substr(sdates[startdate],5,6), substr(sdates[startdate],7,8) ), storefreq = 'daily', leadtimemin = lday*4-3, leadtimemax = lday*4, output = 'lonlat', latmin = lat.min, latmax = lat.max, lonmin = lon.min, lonmax = lon.max) + + mean.lday <- apply(var$obs, c(3,5,6), mean, na.rm=T) + rm(var) + gc() + + psleuFull[(1+(startdate-1)*n.years):(startdate*n.years), lday-leadtime[1]+1,,] <- mean.lday + rm(mean.lday) + gc() + } + } + +} + +if(psl=="g500") psleuFull <- psleuFull/9.81 # convert geopotential to geopotential height (in m) +if(psl=="psl") psleuFull <- psleuFull/100 # convert MSLP in Pascal to MSLP in hPa +gc() + +save.image(file=paste0(workdir,"/weather_regimes_",fields.name,"_",year.start,"-",year.end,".RData")) +load(file=paste0(workdir,"/weather_regimes_",fields.name,"_",year.start,"-",year.end,".RData")) + +# compute the cluster analysis: +my.PCA <- list() +my.cluster <- list() +tot.variance <- list() +n.pcs <- my.cluster <- c() + +for(period in 13:16){ # 1-12: Jan-Dec; 13-16: Winter-Spring-Summer-Autumn; 17: Year + if(unlist(fields) == unlist(rean)){ + days.period <- NA + for(y in year.start:year.end) days.period <- c(days.period, n.days.in.a.future.year(year.start, y) + pos.period(y,period)) + days.period <- days.period[-1] # remove the NA at the begin that was introduced only to be able to execute the above command + + } else { + my.startdates.period <- months.period(forecast.year,mes,day,period) # get which startdates belong to the chosen period + + days.period <- c() # get which days inside psleuFull belong to the chosen period + for(startdate in my.startdates.period){ + days.period <- c(days.period, (1+(startdate-1)*n.years):(startdate*n.years)) + } + } + + n.days.period <- length(days.period) + + pslPeriod <- psleuFull[days.period,,] # select only days in the chosen period (i.e: winter) + + # weight the pressure fields based on latitude: + lat.weighted <- cos(lat*pi/180)^0.5 + lat.weighted.array <- InsertDim(InsertDim(lat.weighted,2,n.pos.lon),1,n.days.period) + pslPeriod.weighted <- pslPeriod * lat.weighted.array + rm(lat.weighted.array) + gc() + + pslmat <- pslPeriod.weighted + dim(pslmat) <- c(n.days.period, n.pos.lat*n.pos.lon) # convert array in a matrix! + rm(pslPeriod, pslPeriod.weighted) + gc() + + # if you are using the monthly forecast system, in winter you must remove the 2nd startdate (20140109) because its data is bad, as you can see with: plot(pslmat[,1:2]) + if(unlist(fields) == unlist(forecast) && period == 13) pslmat[21:40,] <- NA + + if(PCA){ # compute the PCs: + my.seq <- seq(1, n.pos.lat*n.pos.lon, 9) # select only 1 point of 9 + pslcut <- pslmat[,my.seq] + + my.PCA[[period]] <- princomp(pslcut,cor=FALSE) + tot.variance[[period]] <- head(cumsum(my.PCA[[period]]$sdev^2/sum(my.PCA[[period]]$sdev^2)),50) # check how many PCAs to keep basing on the sum of their expl. variance + n.pcs[period] <- head(as.numeric(which(tot.variance[[period]] > variance.explained)),1) # select only the pcs that explains at least 80% of variance + my.cluster[[period]] <- kmeans(my.PCA[[period]]$scores[,1:n.pcs[period]], centers=4, iter.max=100, nstart=30) # 4 is the number of clusters + rm(pslcut) + } else { # remove from matpsl the years with NA: + my.cluster[[period]] <- kmeans(pslmat[which(!is.na(pslmat[,1])),], centers=4, iter.max=100, nstart=30) # 4 is the number of clusters + } + + rm(pslmat) + gc() +} + +var.num <- 1 # Choose a variable. 1: sfcWind 2: tas + +var.name <- c("sfcWind","tas") # name of the 'predictand' variable of the chosen reanalysis +var.name.full <- c("Wind Speed","Temperature") # full name of the 'predictand' variable to put in the title of the graphs +var.unit <- c("m/s", "ºC") # unit of measure of var (for drawing color scales) + +# Load var data: +if(unlist(fields) == unlist(rean)){ # Load daily var data in the reanalysis case: + vareuFull <-array(NA,c(n.days.in.a.yearly.period(year.start,year.end), n.pos.lat, n.pos.lon)) + + for (y in year.start:year.end){ + var <- Load(var = var.name[var.num], exp = NULL, obs = list(var.data), paste0(y,'0101'), storefreq = 'daily', leadtimemax = n.days.in.a.year(y), output = 'lonlat', + latmin = lat.min, latmax = lat.max, lonmin = lon.min, lonmax = lon.max) + vareuFull[seq.days.in.a.future.year(year.start, y),,] <- var$obs + rm(var) + gc() + } + +} else { # Load 6-hourly var data in the forecast case: + sdates <- weekly.seq(forecast.year,mes,day) + vareuFull <-array(NA,c(length(sdates)*(year.end-year.start+1), n.pos.lat, n.pos.lon)) + + for (startdate in 1:length(sdates)){ + var <- Load(var = var.name[var.num], exp = NULL, obs = list(var.data), paste0(year.start:year.end, substr(sdates[startdate],5,6), substr(sdates[startdate],7,8) ), storefreq = 'daily', leadtimemin=leadtime*4-3, leadtimemax=leadtime*4, output = 'lonlat', latmin = lat.min, latmax = lat.max, lonmin = lon.min, lonmax = lon.max) + temp <- apply(var$obs, c(1,3,5,6), mean, na.rm=T) + vareuFull[(1+(startdate-1)*(year.end-year.start+1)):(startdate*(year.end-year.start+1)),,] <- temp + rm(temp,var) + gc() + } + +} + +#my.grid<-paste0('r',n.lon,'x',n.lat) +#coord <- Load(var = 'sfcWind', exp = list(ECMWF_monthly), obs = NULL, sdates=paste0(2013,'0102'), leadtimemin = 1, leadtimemax=1, output = 'lonlat', nprocs=1, grid=my.grid, method='bilinear') + +save.image(file=paste0(workdir,"/weather_regimes_",fields.name,"_",var.name[var.num],"_",year.start,"-",year.end,".RData")) +load(file=paste0(workdir,"/weather_regimes_",fields.name,"_",var.name[var.num],"_",year.start,"-",year.end,".RData")) + +for(period in 13:16){ # 1-12: Jan-Dec; 13-16: Winter-Spring-Summer-Autumn; 17: Year + if(unlist(fields) == unlist(rean)){ + days.period <- NA + for(y in year.start:year.end) days.period <- c(days.period, n.days.in.a.future.year(year.start, y) + pos.period(y,period)) + days.period <- days.period[-1] # remove the NA at the begin that was introduced only to be able to execure the above command + period.length <- n.days.in.a.period(period,1999) #+ ifelse(period==13 | period==2, 1, 0) # using year 1999 introduce a small error in winter season length because of bisestile years + + } else { + my.startdates.period <- months.period(forecast.year,mes,day,period) + + if(period == 13) my.startdates.period <- my.startdates.period[-2] # remove the second startdate because it is broken + + days.period <- c() + for(startdate in my.startdates.period){ + days.period <- c(days.period, (1+(startdate-1)*(year.end-year.start+1)):(startdate*(year.end-year.start+1))) + } + period.length <- length(my.startdates.period) # number of Thusdays in the period + } + + n.days.period <- length(days.period) + + varPeriod <- vareuFull[days.period,,] # select only var data during the chosen period + + varPeriodClim <- apply(varPeriod, c(2,3), mean, na.rm=T) + varPeriodClim2 <- InsertDim(varPeriodClim, 1, n.days.period) + + varPeriodAnom <- varPeriod - varPeriodClim2 + rm(varPeriod, varPeriodClim, varPeriodClim2) + gc() + + if(sequences){ + # for each of the 4 clusters, remove the days not belonging to sequences of at least 5 days: + # warning: first 4 days and last 4 days are not take into account y the algorithm and are treated as not belonging to any sequence (sequ=FALSE) + wrdiff <- diff(my.cluster[[period]]$cluster) + + sequ <- rep(FALSE, n.days.period) + for(i in 5:(n.days.period-5)){ + sequ[i] <- TRUE + if(wrdiff[i] != 0 & wrdiff[i+1] != 0) sequ[i] = FALSE + if(wrdiff[i] != 0 & wrdiff[i+2] != 0) sequ[i] = FALSE + if(wrdiff[i] != 0 & wrdiff[i+3] != 0) sequ[i] = FALSE + if(wrdiff[i] != 0 & wrdiff[i+4] != 0) sequ[i] = FALSE + if(wrdiff[i-1] != 0 & wrdiff[i] != 0) sequ[i] = FALSE + if(wrdiff[i-1] != 0 & wrdiff[i+1] != 0) sequ[i] = FALSE + if(wrdiff[i-1] != 0 & wrdiff[i+2] != 0) sequ[i] = FALSE + if(wrdiff[i-1] != 0 & wrdiff[i+3] != 0) sequ[i] = FALSE + if(wrdiff[i-2] != 0 & wrdiff[i] != 0) sequ[i] = FALSE + if(wrdiff[i-2] != 0 & wrdiff[i+1] != 0) sequ[i] = FALSE + if(wrdiff[i-2] != 0 & wrdiff[i+2] != 0) sequ[i] = FALSE + if(wrdiff[i-3] != 0 & wrdiff[i] != 0) sequ[i] = FALSE + if(wrdiff[i-3] != 0 & wrdiff[i+1] != 0) sequ[i] = FALSE + if(wrdiff[i-4] != 0 & wrdiff[i] != 0) sequ[i] = FALSE + } # close for loop on i + + # sequence of daily cluster numbers without the days that doesn't belong to sequences of 5 or more days: + cluster.sequence <- my.cluster[[period]]$cluster * sequ + rm(wrdiff, sequ) + } else{ + cluster.sequence <- my.cluster[[period]]$cluster + } + + gc() + + #wr1 <- which(my.cluster[[period]]$cluster==1) + #wr2 <- which(my.cluster[[period]]$cluster==2) + #wr3 <- which(my.cluster[[period]]$cluster==3) + #wr4 <- which(my.cluster[[period]]$cluster==4) + + # Replace the days belonging to a regime with the days belonging to a sequence of at least 5 days of that regime: + wr1 <- which(cluster.sequence == 1) + wr2 <- which(cluster.sequence == 2) + wr3 <- which(cluster.sequence == 3) + wr4 <- which(cluster.sequence == 4) + + # yearly frequency of each regime: + wr1y <- wr2y <- wr3y <-wr4y <- c() + for(y in year.start:year.end){ + + if(unlist(fields) == unlist(rean)){ + wr1y[y-year.start+1] <- length(which(cluster.sequence[(y-year.start)*period.length+(1:period.length)] == 1)) + wr2y[y-year.start+1] <- length(which(cluster.sequence[(y-year.start)*period.length+(1:period.length)] == 2)) + wr3y[y-year.start+1] <- length(which(cluster.sequence[(y-year.start)*period.length+(1:period.length)] == 3)) + wr4y[y-year.start+1] <- length(which(cluster.sequence[(y-year.start)*period.length+(1:period.length)] == 4)) + } else { + wr1y[y-year.start+1] <- length(which(cluster.sequence[seq((y-year.start+1),length(cluster.sequence), n.years)] == 1)) + wr2y[y-year.start+1] <- length(which(cluster.sequence[seq((y-year.start+1),length(cluster.sequence), n.years)] == 2)) + wr3y[y-year.start+1] <- length(which(cluster.sequence[seq((y-year.start+1),length(cluster.sequence), n.years)] == 3)) + wr4y[y-year.start+1] <- length(which(cluster.sequence[seq((y-year.start+1),length(cluster.sequence), n.years)] == 4)) + } + } + + rm(cluster.sequence) + gc() + + # convert to frequencies in %: + wr1y <- wr1y/period.length + wr2y <- wr2y/period.length + wr3y <- wr3y/period.length + wr4y <- wr4y/period.length + + varPeriodAnom1 <- varPeriodAnom[wr1,,] + varPeriodAnom2 <- varPeriodAnom[wr2,,] + varPeriodAnom3 <- varPeriodAnom[wr3,,] + varPeriodAnom4 <- varPeriodAnom[wr4,,] + + varPeriodAnom1mean <- apply(varPeriodAnom1,c(2,3),mean,na.rm=T) + varPeriodAnom2mean <- apply(varPeriodAnom2,c(2,3),mean,na.rm=T) + varPeriodAnom3mean <- apply(varPeriodAnom3,c(2,3),mean,na.rm=T) + varPeriodAnom4mean <- apply(varPeriodAnom4,c(2,3),mean,na.rm=T) + + varPeriodAnomBoth1 <- abind(varPeriodAnom, varPeriodAnom1, along = 1) + pvalue1 <- apply(varPeriodAnomBoth1, c(2,3), function(x) t.test(x[1:n.days.period],x[(n.days.period+1):length(x)])$p.value) + rm(varPeriodAnomBoth1, varPeriodAnom1) + gc() + + varPeriodAnomBoth2 <- abind(varPeriodAnom, varPeriodAnom2, along = 1) + pvalue2 <- apply(varPeriodAnomBoth2, c(2,3), function(x) t.test(x[1:n.days.period],x[(n.days.period+1):length(x)])$p.value) + rm(varPeriodAnomBoth2, varPeriodAnom2) + gc() + + varPeriodAnomBoth3 <- abind(varPeriodAnom, varPeriodAnom3, along = 1) + pvalue3 <- apply(varPeriodAnomBoth3, c(2,3), function(x) t.test(x[1:n.days.period],x[(n.days.period+1):length(x)])$p.value) + rm(varPeriodAnomBoth3, varPeriodAnom3) + gc() + + varPeriodAnomBoth4 <- abind(varPeriodAnom, varPeriodAnom4, along = 1) + pvalue4 <- apply(varPeriodAnomBoth4, c(2,3), function(x) t.test(x[1:n.days.period],x[(n.days.period+1):length(x)])$p.value) + rm(varPeriodAnomBoth4, varPeriodAnom4) + + rm(varPeriodAnom) + gc() + + pslPeriod <- psleuFull[days.period,,] + pslPeriodClim <- apply(pslPeriod, c(2,3), mean, na.rm=T) + pslPeriodClim2 <- InsertDim(pslPeriodClim, 1, n.days.period) + pslPeriodAnom <- pslPeriod - pslPeriodClim2 + + rm(pslPeriod, pslPeriodClim, pslPeriodClim2) + gc() + + pslwr1 <- pslPeriodAnom[wr1,,] + pslwr2 <- pslPeriodAnom[wr2,,] + pslwr3 <- pslPeriodAnom[wr3,,] + pslwr4 <- pslPeriodAnom[wr4,,] + rm(pslPeriodAnom) + gc() + + pslwr1mean <- apply(pslwr1,c(2,3),mean,na.rm=T) + pslwr2mean <- apply(pslwr2,c(2,3),mean,na.rm=T) + pslwr3mean <- apply(pslwr3,c(2,3),mean,na.rm=T) + pslwr4mean <- apply(pslwr4,c(2,3),mean,na.rm=T) + + rm(pslwr1,pslwr2,pslwr3,pslwr4) + + # save all the data necessary to redraw the graphs when we know the right regime: + save(workdir,rean.name,fields.name,var.num,var.name,var.name.full,var.unit,year.start,year.end,period,my.period,lon,lat,pslwr1mean,pslwr2mean,pslwr3mean,pslwr4mean, varPeriodAnom1mean, varPeriodAnom2mean, varPeriodAnom3mean,varPeriodAnom4mean,wr1y,wr2y,wr3y,wr4y,pvalue1,pvalue2,pvalue3,pvalue4,file=paste0(workdir,"/",fields.name,"_",var.name[var.num],"_",my.period[period],"_mapdata.RData")) + + rm(pvalue1,pvalue2,pvalue3,pvalue4) + rm(pslwr1mean,pslwr2mean,pslwr3mean,pslwr4mean) + rm(varPeriodAnom1mean,varPeriodAnom2mean,varPeriodAnom3mean,varPeriodAnom4mean) + gc() + +} # close the for loop on 'period' + + +# run it twice: once, with ordering <- FALSE to look only at the cartography and find visually the right regime names of the four clusters; +# and the second time, to save the ordered cartography: +ordering <- TRUE # If TRUE, order the clusters following the regimes listed in the 'orden' vector (after you manually insert the names). +save.names <- FALSE # if TRUE, also save the cluster names (i.e: the association between clusters and weather regimes); if FALSE, the cluster names are loaded from the file +as.pdf <- FALSE # choose if you prefer to save results as one file .png for each season, or only 1 pdf with all seasons + +# choose an order to give to the cartography, from top to bottom: +orden <- c("NAO+","NAO-","Blocking","Atl.Ridge") + +regime1.name <- orden[1] +regime2.name <- orden[2] +regime3.name <- orden[3] +regime4.name <- orden[4] + +if(save.names){cluster1.name.period <- cluster2.name.period <- cluster3.name.period <- cluster4.name.period <- c()} + +# breaks and colors of the geopotential fields: +if(psl == "g500"){ + #my.brks <- c(-300,-199:200,300) # % Mean anomaly of a WR + my.brks <- c(-300,seq(-200,200,10),300) # % Mean anomaly of a WR + my.brks2 <- c(-300,seq(-200,200,10),300) # % Mean anomaly of a WR for the contour lines + #my.cols <- colorRampPalette((brewer.pal(9,"PuBu")))(length(my.brks)-1) + values.to.plot <- c(-200, -100, 0, 100, 200) # values we want to show in the labels +} + +if(psl == "psl"){ + my.brks <- c(seq(-19,-1,2),0,seq(1,19,2)) # % Mean anomaly of a WR + my.brks2 <- my.brks #c(seq(-20,20,2)) # % Mean anomaly of a WR for the contour lines + values.to.plot <- my.brks #c(-17,-15,-13,-11,-9,-7,-5,-3,-1,0,1,3,5,7,9,11,13,15,17) +} + +my.cols <- colorRampPalette(rev(brewer.pal(11,"RdBu")))(length(my.brks)-1) # blue--white--red colors +my.cols[floor(length(my.cols)/2)] <- my.cols[floor(length(my.cols)/2)+1] <- "white" + +# breaks and colors of the impact maps (valid both for Wind speed anomalies and Temperature anomalies): +#my.brks.var <- c(-20,seq(-10,-3,1),seq(-2.9,2.9,0.1),seq(3,10,1),20) # % Mean anomaly of a WR +#my.brks.var <- c(-20,-2,-1.5,-1,-0.5,-0.2,0.2,0.5,1,1.5,2,20) # % Mean anomaly of a WR +#my.brks.var <- c(-20,seq(-3,3,0.5),20) # % Mean anomaly of a WR +if(var.name[var.num] == "sfcWind") { + my.brks.var <- seq(-3,3,0.5) + values.to.plot2 <- c(-3,-2,-1,0,1,2,3) +} +if(var.name[var.num] == "tas") { + my.brks.var <- seq(-5,5,1) + values.to.plot2 <- my.brks.var #c(-5,-3,-1,0,1,3,5) +} + +#my.cols <- colorRampPalette(as.character(read.csv("/home/Earth/vtorralb/rgbhex.csv",header=F)[,1]))(length(my.brks)-1) # blue--yellow-red colors +my.cols.var <- colorRampPalette(rev(brewer.pal(11,"RdBu")))(length(my.brks.var)-1) # blue--white--red colors +#my.cols.var[floor(length(my.cols.var)/2)] <- my.cols.var[floor(length(my.cols.var)/2)+1] <- "white" + +if(as.pdf)(pdf(file=paste0(workdir,"/",fields.name,"_",var.name[var.num],".pdf"),width=40, height=60)) + +for(period in 13:16){ + load(file=paste0(workdir,"/",fields.name,"_",var.name[var.num],"_",my.period[period],"_mapdata.RData")) + + if(save.names){ + if(period == 13){ # Winter + cluster1.name="NAO+" + cluster4.name="NAO-" + cluster3.name="Blocking" + cluster2.name="Atl.Ridge" + } + if(period == 14){ + cluster4.name="NAO+" + cluster1.name="NAO-" + cluster2.name="Blocking" + cluster3.name="Atl.Ridge" + } + if(period == 15){ + cluster4.name="NAO+" + cluster1.name="NAO-" + cluster3.name="Blocking" + cluster2.name="Atl.Ridge" + } + if(period == 16){ + cluster3.name="NAO+" + cluster1.name="NAO-" + cluster4.name="Blocking" + cluster2.name="Atl.Ridge" + } + + cluster1.name.period[period] <- cluster1.name + cluster2.name.period[period] <- cluster2.name + cluster3.name.period[period] <- cluster3.name + cluster4.name.period[period] <- cluster4.name + + save(cluster1.name.period, cluster2.name.period, cluster3.name.period, cluster4.name.period, file=paste0(workdir,"/",fields.name,"_ClusterNames.RData")) + + } else { # in this case, we load the cluster names from the file already saved: + load(paste0(workdir,"/",fields.name,"_ClusterNames.RData")) + cluster1.name <- cluster1.name.period[period] + cluster2.name <- cluster2.name.period[period] + cluster3.name <- cluster3.name.period[period] + cluster4.name <- cluster4.name.period[period] + } + + # assign to each cluster its regime: + if(ordering == FALSE){ + cluster1 <- 1; cluster2 <- 2; cluster3 <- 3; cluster4 <- 4 + } else { + cluster1 <- which(orden == cluster1.name) + cluster2 <- which(orden == cluster2.name) + cluster3 <- which(orden == cluster3.name) + cluster4 <- which(orden == cluster4.name) + } + + assign(paste0("map",cluster1), pslwr1mean) + assign(paste0("map",cluster2), pslwr2mean) + assign(paste0("map",cluster3), pslwr3mean) + assign(paste0("map",cluster4), pslwr4mean) + + assign(paste0("imp",cluster1), varPeriodAnom1mean) + assign(paste0("imp",cluster2), varPeriodAnom2mean) + assign(paste0("imp",cluster3), varPeriodAnom3mean) + assign(paste0("imp",cluster4), varPeriodAnom4mean) + + assign(paste0("sig",cluster1), pvalue1) + assign(paste0("sig",cluster2), pvalue2) + assign(paste0("sig",cluster3), pvalue3) + assign(paste0("sig",cluster4), pvalue4) + + assign(paste0("fre",cluster1), wr1y) + assign(paste0("fre",cluster2), wr2y) + assign(paste0("fre",cluster3), wr3y) + assign(paste0("fre",cluster4), wr4y) + + # Visualize the composition with the 3 graphs for all the 4 regimes: its pressure fields, its impact on var and its frequency series: + if(!as.pdf) png(filename=paste0(workdir,"/",fields.name,"_",var.name[var.num],"_",my.period[period],".png"),width=3000,height=3700) + + plot.new() + + # Sheet title: + par(fig=c(0, 1, 0.94, 0.98), new=TRUE) + mtext(paste0("Weather Regimes for ",my.period[period]," season (",year.start,"-",year.end,"). Source: ",fields.name), cex=6, font=2) + + #EU <- c(1:41, 130:175) #c(1:54,130:161) # position of long values of Europe only (without the Atlantic Sea and America) + #EU <- c(1:which(lon >= lon.max)[1], which(lon >= lon.min)[1]:length(lon)) # to show the same area used for calculation of anomalies + EU <- c(1:which(lon >= lon.max)[1], (length(lon)-30):length(lon)) # restrict area to continental europe only + + # Centroid maps: + map.xpos <- 0 + map.width <- 0.46 + par(fig=c(map.xpos + 0.003, map.xpos + map.width - 0.003, 0.745, 0.925), new=TRUE) + PlotEquiMap2(map1, lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map1), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, xlabel.dist=1.5, contours.lty="F1FF1F") + par(fig=c(map.xpos, map.xpos + map.width, 0.51, 0.69), new=TRUE) + PlotEquiMap2(map2, lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map2), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F") + par(fig=c(map.xpos, map.xpos + map.width, 0.275, 0.455), new=TRUE) + PlotEquiMap2(map3, lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map3), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F") + par(fig=c(map.xpos, map.xpos + map.width, 0.04, 0.22), new=TRUE) + PlotEquiMap2(map4, lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map4), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F") + + # Title Centroid Maps: + map.title.xpos <- 0.76 + map.title.width <- 0.2 + par(fig=c(map.title.xpos, map.title.xpos + map.title.width, 0.728, 0.729), new=TRUE) + mtext("year", cex=3) + par(fig=c(map.title.xpos, map.title.xpos + map.title.width, 0.494, 0.495), new=TRUE) + mtext("year", cex=3) + par(fig=c(map.title.xpos, map.title.xpos + map.title.width, 0.260, 0.261), new=TRUE) + mtext("year", cex=3) + par(fig=c(map.title.xpos, map.title.xpos + map.title.width, 0.026, 0.027), new=TRUE) + mtext("year", cex=3) + + # Impact maps: + impact.xpos <- 0.47 + impact.width <- 0.26 + par(fig=c(impact.xpos, impact.xpos + impact.width, 0.745, 0.925), new=TRUE) + PlotEquiMap2(rescale(imp1[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=t(sig1[,EU] < 0.05), cex.lab=1.5) + par(fig=c(impact.xpos, impact.xpos + impact.width, 0.51, 0.69), new=TRUE) + PlotEquiMap2(rescale(imp2[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=t(sig2[,EU] < 0.05), cex.lab=1.5) + par(fig=c(impact.xpos, impact.xpos + impact.width, 0.275, 0.455), new=TRUE) + PlotEquiMap2(rescale(imp3[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=t(sig3[,EU] < 0.05), cex.lab=1.5) + par(fig=c(impact.xpos, impact.xpos + impact.width, 0.04, 0.22), new=TRUE) + PlotEquiMap2(rescale(imp4[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=t(sig4[,EU] < 0.05), cex.lab=1.5) + + # Frequency plots: + barplot.xpos <- 0.75 + barplot.width <- 0.25 + par(fig=c(barplot.xpos, barplot.xpos + barplot.width, 0.745, 0.915), new=TRUE) + barplot.freq(100*fre1, year.start, year.end, cex.y=2, cex.x=2, freq.max=60, ylab="", mgp=c(3,1.5,0)) + par(fig=c(barplot.xpos, barplot.xpos + barplot.width,0.510,0.680), new=TRUE) + barplot.freq(100*fre2, year.start, year.end, cex.y=2, cex.x=2, freq.max=60, ylab="", mgp=c(3,1.5,0)) + par(fig=c(barplot.xpos, barplot.xpos + barplot.width,0.275,0.445), new=TRUE) + barplot.freq(100*fre3, year.start, year.end, cex.y=2, cex.x=2, freq.max=60, ylab="", mgp=c(3,1.5,0)) + par(fig=c(barplot.xpos, barplot.xpos + barplot.width,0.040,0.210), new=TRUE) + barplot.freq(100*fre4, year.start, year.end, cex.y=2, cex.x=2, freq.max=60, ylab="", mgp=c(3,1.5,0)) + + # % on Frequency plots: + symbol.xpos <- 0.735 + symbol.width <- 0.01 + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.83, 0.84), new=TRUE) + mtext("%", cex=3.3) + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.60, 0.61), new=TRUE) + mtext("%", cex=3.3) + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.36, 0.37), new=TRUE) + mtext("%", cex=3.3) + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.13, 0.14), new=TRUE) + mtext("%", cex=3.3) + + # Title 1: + title1.xpos <- 0.02 + title1.width <- 0.4 + par(fig=c(title1.xpos, title1.xpos + title1.width, 0.925, 0.930), new=TRUE) + mtext(paste0(regime1.name,": ", psl.name, " Anomaly"), font=2, cex=4) + par(fig=c(title1.xpos, title1.xpos + title1.width, 0.690, 0.696), new=TRUE) + mtext(paste0(regime2.name,": ", psl.name, " Anomaly"), font=2, cex=4) + par(fig=c(title1.xpos, title1.xpos + title1.width, 0.455, 0.460), new=TRUE) + mtext(paste0(regime3.name,": ", psl.name, " Anomaly"), font=2, cex=4) + par(fig=c(title1.xpos, title1.xpos + title1.width, 0.220, 0.225), new=TRUE) + mtext(paste0(regime4.name,": ", psl.name, " Anomaly"), font=2, cex=4) + + # Title 2: + title2.xpos <- 0.42 + title2.width <- 0.35 + par(fig=c(title2.xpos, title2.xpos + title2.width, 0.925, 0.930), new=TRUE) + mtext(paste0(regime1.name, ": Impact on ", var.name.full[var.num]), font=2, cex=4) + par(fig=c(title2.xpos, title2.xpos + title2.width, 0.690, 0.696), new=TRUE) + mtext(paste0(regime2.name, ": Impact on ", var.name.full[var.num]), font=2, cex=4) + par(fig=c(title2.xpos, title2.xpos + title2.width, 0.455, 0.460), new=TRUE) + mtext(paste0(regime3.name, ": Impact on ", var.name.full[var.num]), font=2, cex=4) + par(fig=c(title2.xpos, title2.xpos + title2.width, 0.220, 0.225), new=TRUE) + mtext(paste0(regime4.name, ": Impact on ", var.name.full[var.num]), font=2, cex=4) + + # Title 3: + title3.xpos <- 0.75 + title3.width <-0.25 + par(fig=c(title3.xpos, title3.xpos + title3.width, 0.925, 0.930), new=TRUE) + mtext(paste0(regime1.name, ": Frequency (", round(100*mean(fre1),1), "%)"), font=2, cex=4) + par(fig=c(title3.xpos, title3.xpos + title3.width, 0.690, 0.696), new=TRUE) + mtext(paste0(regime2.name, ": Frequency (", round(100*mean(fre2),1), "%)"), font=2, cex=4) + par(fig=c(title3.xpos, title3.xpos + title3.width, 0.455, 0.460), new=TRUE) + mtext(paste0(regime3.name, ": Frequency (", round(100*mean(fre3),1), "%)"), font=2, cex=4) + par(fig=c(title3.xpos, title3.xpos + title3.width, 0.220, 0.225), new=TRUE) + mtext(paste0(regime4.name, ": Frequency (", round(100*mean(fre4),1), "%)"), font=2, cex=4) + + # Legend 1: + legend1.xpos <- 0.01 + legend1.width <- 0.44 + legend1.cex <- 1.8 + my.subset <- match(values.to.plot, my.brks) + + par(fig=c(legend1.xpos, legend1.xpos + legend1.width, 0.719, 0.744), new=TRUE) # syntax fig=c(xmin, xmax, ymin, ymax) + ColorBar3(my.brks, cols=my.cols, vert=FALSE, cex=legend1.cex, subset=my.subset) + if(psl=="g500") {mtext(side=4," m", cex=2.5)} else {mtext(side=4," hPa", cex=legend1.cex)} + par(fig=c(legend1.xpos, legend1.xpos + legend1.width, 0.485, 0.508), new=TRUE) + ColorBar3(my.brks, cols=my.cols, vert=FALSE, cex=legend1.cex, subset=my.subset) + if(psl=="g500") {mtext(side=4," m", cex=2.5)} else {mtext(side=4," hPa", cex=legend1.cex)} + par(fig=c(legend1.xpos, legend1.xpos + legend1.width, 0.250, 0.273), new=TRUE) + ColorBar3(my.brks, cols=my.cols, vert=FALSE, cex=legend1.cex, subset=my.subset) + if(psl=="g500") {mtext(side=4," m", cex=2.5)} else {mtext(side=4," hPa", cex=legend1.cex) + par(fig=c(legend1.xpos, legend1.xpos + legend1.width, 0.015, 0.038), new=TRUE) + ColorBar3(my.brks, cols=my.cols, vert=FALSE, cex=legend1.cex, subset=my.subset) + if(psl=="g500") {mtext(side=4," m", cex=2.5)} else {mtext(side=4," hPa", cex=legend1.cex)} + + # Legend 2: + legend2.xpos <- 0.48 + legend2.width <- 0.25 + legend2.cex <- 1.8 + my.subset2 <- match(values.to.plot2, my.brks.var) + + par(fig=c(legend2.xpos, legend2.xpos + legend2.width, 0.719 + 0.001, 0.744), new=TRUE) + ColorBar3(my.brks.var, cols=my.cols.var, vert=FALSE, cex=legend2.cex, subset=my.subset2) + mtext(side=4,paste0(" ",var.unit[var.num]), cex=legend2.cex) + par(fig=c(legend2.xpos, legend2.xpos + legend2.width, 0.485, 0.508), new=TRUE) + ColorBar3(my.brks.var, cols=my.cols.var, vert=FALSE, cex=legend2.cex, subset=my.subset2) + mtext(side=4,paste0(" ",var.unit[var.num]), cex=legend2.cex) + par(fig=c(legend2.xpos, legend2.xpos + legend2.width, 0.250, 0.273), new=TRUE) + ColorBar3(my.brks.var, cols=my.cols.var, vert=FALSE, cex=legend2.cex, subset=my.subset2)} + mtext(side=4,paste0(" ",var.unit[var.num]), cex=legend2.cex) + par(fig=c(legend2.xpos, legend2.xpos + legend2.width, 0.015, 0.038), new=TRUE) + ColorBar3(my.brks.var, cols=my.cols.var, vert=FALSE, cex=legend2.cex, subset=my.subset2) + mtext(side=4,paste0(" ",var.unit[var.num]), cex=legend2.cex) + + if(!as.pdf) dev.off() # for saving 4 png + +} # close for loop on 'period' + +if(as.pdf) dev.off() # for saving a single pdf with all seasons + + + + + + + + + + + + + + + + + + + + + + ## if(period == 13){ # Winter + ## cluster2.name="NAO+" + ## cluster3.name="NAO-" + ## cluster1.name="Blocking" + ## cluster4.name="Atl.Ridge" + ## } + ## if(period == 14){ + ## cluster1.name="NAO+" + ## cluster4.name="NAO-" + ## cluster3.name="Blocking" + ## cluster2.name="Atl.Ridge" + ## } + ## if(period == 15){ + ## cluster1.name="NAO+" + ## cluster2.name="NAO-" + ## cluster3.name="Blocking" + ## cluster4.name="Atl.Ridge" + ## } + ## if(period == 16){ + ## cluster1.name="NAO+" + ## cluster3.name="NAO-" + ## cluster2.name="Blocking" + ## cluster4.name="Atl.Ridge" + ## } + diff --git a/old/weather_regimes_v18 b/old/weather_regimes_v18 new file mode 100644 index 0000000000000000000000000000000000000000..ba7731544900110c8936c64bd2e47906fa8143f5 --- /dev/null +++ b/old/weather_regimes_v18 @@ -0,0 +1,796 @@ +library(s2dverification) # for the function Load() +library(abind) +library(Kendall) +source('/home/Earth/ncortesi/Downloads/scripts/Rfunctions.R') # for the calendar functions +workdir <- "/scratch/Earth/ncortesi/RESILIENCE/Regimes" # working dir with the input files with the WT classifications for each MSLP grid point + +# available reanalysis for the geopotential (g500) or the geopotential height (z500 = g500/9.81): +ERAint <- list(path = '/esnas/reconstructions/ecmwf/eraint/$STORE_FREQ$_mean/$VAR_NAME$_f6h/$VAR_NAME$_$YEAR$$MONTH$.nc') +ERAint.name <- "ERA-Interim" + +JRA55 <- list(path = '/esnas/reconstructions/jma/jra-55/$STORE_FREQ$_mean/$VAR_NAME$_f6h/$VAR_NAME$_$YEAR$$MONTH$.nc') +JRA55.name <- "JRA-55" + +rean <- ERAint #JRA55 #ERAint # choose one of the two above reanalysis from where to load the input psl data +rean.name <- ERAint.name #JRA55.name #ERAint.name + +# available forecast systems for the var data: +ECMWF_monthly <- list(path = paste0('/esnas/exp/ECMWF/monthly/ensforhc/6hourly/$VAR_NAME$/2014$MONTH$$DAY$00/$VAR_NAME$_$START_DATE$00.nc')) +ECMWF_monthly.name <- "ECMWF-Monthly" + +forecast <- ECMWF_monthly +forecast.name <- ECMWF_monthly.name + +fields <- rean #forecast # put 'rean' to load pressure fields and var from reanalisis, put 'forecast' to load them from forecast systems +fields.name <- rean.name #forecast.name + +year.start <- 1979 #1994 #1979 #1981 +year.end <- 2013 #2010 + +leadtime <- 1:7 #5:11 #1 # if fields=forecast, set the forecast time (in days) you want to compute the weather regimes. +startdate.shift <- 1 # if leadtime=5:11, it's better to shift all startdates of the season 1 week in the past, to fit the exact season of obs.data + # if leadtime=12:18, it's better to shift all startdates of the season 2 weeks in the past, and so on. +forecast.year <- year.end+1 # if fields=forecast, set the forecast year (it is usually the year after year.end) +mes=1 # if fields=forecast, set the month of the first startdate of the forecast year +day=2 # if fields=forecast, set the day of the first startdate of the forecast year + +psl <- "psl" #"g500" # pressure variable to use from the chosen reanalysis +psl.name <- "MSLP" # "Z500" # the name to show in the maps + +var.data <- rean #ECMWF_monthly # ERAint # chose a dataset from which to load the input var data (reanalysis or forecasts) +var.data.name <- rean.name #ECMWF_monthly.name # ERAint.name + +lat.weighting=FALSE # set it to true to weight psl data on latitude before applying the cluster analysis +sequences=FALSE # set it to TRUE if you want to select only days beloning to sequences of at least 5 consecutive days belonging to the same regime. + +PCA <- FALSE # se to TRUE if you want to apply the PCA before the k-means cluster analysis, FALSE otherwise +variance.explained <- 0.8 # the user can set here the minimum % of variance explained by the PCs (typically 0.8 which is equal to 80%) + +# Coordinates of the box enclosing the study region (the Northern Atlantic in this case): +lat.max <- 81 #81 #80 #70 +lat.min <- 27 #30 #20 #30 +lon.max <- 45 #36 #30 #40 +lon.min <- 274.5 #274.5 #270 #280 # put a positive number here because the geopotential has only positive values of longitud! + +############################################################################################################################# +#ECMWF_monthly <- list(path = paste0('/esnas/exp/ECMWF/monthly/ensforhc/6hourly/psl/2014010200/1994_010200.nc')) +#ECMWF_monthly <- list(path = paste0('/esnas/exp/ECMWF/monthly/ensforhc/6hourly/$VAR_NAME$/2014$MONTH$$DAY$00/$VAR_NAME$_$START_DATE$00.nc')) +if(lat.min >= lat.max) stop("lat.min cannot be greater than lat.max") +n.years <- year.end - year.start + 1 + +# load only 1 day of pressure data to detect the minimum and maximum lat and lon values which identify only the North Atlantic area: +domain <- Load(var = psl, exp = NULL, obs = list(fields), paste0(year.start,'0102'), storefreq = 'daily', leadtimemax = 1, output = 'lonlat', nprocs=1) +n.lat <- length(domain$lat) +n.lon <- length(domain$lon) + +pos.lat.max <- which(lat.max - round(domain$lat,4) >= 0)[1] # select the first latitude of the domain below the maximum latitude +pos.lat.min <- tail(which(round(domain$lat,4) - lat.min >= 0),1) # select the last latitude of the domain over the minumum latitude +pos.lon.max <- tail(which(lon.max - round(domain$lon,4) >= 0),1) +pos.lon.min <- which(round(domain$lon,4) - lon.min >= 0)[1] + +pos.lat <- if(pos.lat.min < pos.lat.max) {pos.lat.min:pos.lat.max} else {pos.lat.max:pos.lat.min} +pos.lon <- c(1:pos.lon.max,pos.lon.min:length(domain$lon)) # beware that it only consider the case where the study area cross the meridian of Greenwhich! +n.pos.lat <- length(pos.lat) +n.pos.lon <- length(pos.lon) + +lat <- domain$lat[pos.lat] # lat values of chosen area only (it is smaller than the whole spatial domain loaded by the data) +lon <- domain$lon[pos.lon] # lon values of chosen area only + +lat.min.area <- domain$lat[pos.lat.min] # min and max lat/lon of the chosen area only (same as lat.max, but its values correspond to actual values in the lat vector) +lat.max.area <- domain$lat[pos.lat.max] +lon.min.area <- domain$lon[pos.lon.min] +lon.max.area <- domain$lon[pos.lon.max] + +#rm(domain) + +# Load psl data: +if(unlist(fields) == unlist(rean)){ # Load daily psl data in the reanalysis case: + psleuFull <-array(NA,c(n.days.in.a.yearly.period(year.start,year.end), n.pos.lat, n.pos.lon)) + + for (y in year.start:year.end){ + var <- Load(var = psl, exp = NULL, obs = list(fields), paste0(y,'0101'), storefreq = 'daily', leadtimemax = n.days.in.a.year(y), output = 'lonlat', + latmin = lat.min, latmax = lat.max, lonmin = lon.min, lonmax = lon.max) + psleuFull[seq.days.in.a.future.year(year.start, y),,] <- var$obs + rm(var) + gc() + } + +} else { # Load 6-hourly psl data in the forecast case: + psleuFull <-array(NA, c(n.sdates*n.years, n.leadtimes, n.pos.lat, n.pos.lon)) # daily order: 19940102 19950102 ... 20130102 19940109 19950109 ... 20130109 ... + n.leadtimes <- length(leadtime) + sdates <- weekly.seq(forecast.year,mes,day) + n.sdates <- length(sdates) + + for (startdate in 1:n.sdates){ + for(lday in leadtime){ + var <- Load(var = psl, exp = NULL, obs = list(fields), paste0(year.start:year.end, substr(sdates[startdate],5,6), substr(sdates[startdate],7,8) ), storefreq = 'daily', leadtimemin = lday*4-3, leadtimemax = lday*4, output = 'lonlat', latmin = lat.min, latmax = lat.max, lonmin = lon.min, lonmax = lon.max) + + mean.lday <- apply(var$obs, c(3,5,6), mean, na.rm=T) + rm(var) + gc() + + psleuFull[(1+(startdate-1)*n.years):(startdate*n.years), lday-leadtime[1]+1,,] <- mean.lday + rm(mean.lday) + gc() + } + } + +} + +if(psl=="g500") psleuFull <- psleuFull/9.81 # convert geopotential to geopotential height (in m) +if(psl=="psl") psleuFull <- psleuFull/100 # convert MSLP in Pascal to MSLP in hPa +gc() + +save.image(file=paste0(workdir,"/weather_regimes_",fields.name,"_",year.start,"-",year.end,".RData")) +load(file=paste0(workdir,"/weather_regimes_",fields.name,"_",year.start,"-",year.end,".RData")) + +# compute the cluster analysis: +my.PCA <- list() +my.cluster <- list() +tot.variance <- list() +n.pcs <- my.cluster <- c() + +for(period in 13:16){ + if(unlist(fields) == unlist(rean)){ + days.period <- NA + for(y in year.start:year.end) days.period <- c(days.period, n.days.in.a.future.year(year.start, y) + pos.period(y,period)) + days.period <- days.period[-1] # remove the NA at the begin that was introduced only to be able to execute the above command + + } else { + my.startdates.period <- months.period(forecast.year,mes,day,period) # get which startdates belong to the chosen period + + days.period <- c() # get which days inside psleuFull belong to the chosen period + for(startdate in my.startdates.period){ + days.period <- c(days.period, (1+(startdate-1)*n.years):(startdate*n.years)) + } + } + + n.days.period <- length(days.period) + + pslPeriod <- psleuFull[days.period,,] # select only days in the chosen period (i.e: winter) + + # weight the pressure fields based on latitude: + if(lat.weighting){ + lat.weighted <- cos(lat*pi/180)^0.5 + lat.weighted.array <- InsertDim(InsertDim(lat.weighted,2,n.pos.lon),1,n.days.period) + pslPeriod.weighted <- pslPeriod * lat.weighted.array + rm(lat.weighted.array) + gc() + } else { + pslPeriod.weighted <- pslPeriod + } + + pslmat <- pslPeriod.weighted + dim(pslmat) <- c(n.days.period, n.pos.lat*n.pos.lon) # convert array in a matrix! + rm(pslPeriod, pslPeriod.weighted) + gc() + + # # if you are using the monthly forecast system, in winter you must remove the 2nd startdate (20140109) because its data is bad, as you can see with: plot(pslmat[,1:2]) + #if(unlist(fields) == unlist(forecast) && period == 13) pslmat[21:40,] <- NA + + if(PCA){ # compute the PCs: + my.seq <- seq(1, n.pos.lat*n.pos.lon, 9) # select only 1 point of 9 + pslcut <- pslmat[,my.seq] + + my.PCA[[period]] <- princomp(pslcut,cor=FALSE) + tot.variance[[period]] <- head(cumsum(my.PCA[[period]]$sdev^2/sum(my.PCA[[period]]$sdev^2)),50) # check how many PCAs to keep basing on the sum of their expl. variance + n.pcs[period] <- head(as.numeric(which(tot.variance[[period]] > variance.explained)),1) # select only the pcs that explains at least 80% of variance + my.cluster[[period]] <- kmeans(my.PCA[[period]]$scores[,1:n.pcs[period]], centers=4, iter.max=100, nstart=30) # 4 is the number of clusters + rm(pslcut) + } else { # remove from matpsl the years with NA: + my.cluster[[period]] <- kmeans(pslmat[which(!is.na(pslmat[,1])),], centers=4, iter.max=100, nstart=30) # 4 is the number of clusters + } + + rm(pslmat) + gc() +} + +var.num <- 2 # Choose a variable. 1: sfcWind 2: tas + +var.name <- c("sfcWind","tas") # name of the 'predictand' variable of the chosen reanalysis +var.name.full <- c("Wind Speed","Temperature") # full name of the 'predictand' variable to put in the title of the graphs +var.unit <- c("m/s", "ºC") # unit of measure of var (for drawing color scales) + +# Load var data: +if(unlist(fields) == unlist(rean)){ # Load daily var data in the reanalysis case: + vareuFull <-array(NA,c(n.days.in.a.yearly.period(year.start,year.end), n.pos.lat, n.pos.lon)) + + for (y in year.start:year.end){ + var <- Load(var = var.name[var.num], exp = NULL, obs = list(var.data), paste0(y,'0101'), storefreq = 'daily', leadtimemax = n.days.in.a.year(y), output = 'lonlat', + latmin = lat.min, latmax = lat.max, lonmin = lon.min, lonmax = lon.max) + vareuFull[seq.days.in.a.future.year(year.start, y),,] <- var$obs + rm(var) + gc() + } + +} else { # Load 6-hourly var data in the forecast case: + sdates <- weekly.seq(forecast.year,mes,day) + vareuFull <-array(NA,c(length(sdates)*(year.end-year.start+1), n.pos.lat, n.pos.lon)) + + for (startdate in 1:length(sdates)){ + var <- Load(var = var.name[var.num], exp = NULL, obs = list(var.data), paste0(year.start:year.end, substr(sdates[startdate],5,6), substr(sdates[startdate],7,8) ), storefreq = 'daily', leadtimemin=leadtime*4-3, leadtimemax=leadtime*4, output = 'lonlat', latmin = lat.min, latmax = lat.max, lonmin = lon.min, lonmax = lon.max) + temp <- apply(var$obs, c(1,3,5,6), mean, na.rm=T) + vareuFull[(1+(startdate-1)*(year.end-year.start+1)):(startdate*(year.end-year.start+1)),,] <- temp + rm(temp,var) + gc() + } + +} + +#my.grid<-paste0('r',n.lon,'x',n.lat) +#coord <- Load(var = 'sfcWind', exp = list(ECMWF_monthly), obs = NULL, sdates=paste0(2013,'0102'), leadtimemin = 1, leadtimemax=1, output = 'lonlat', nprocs=1, grid=my.grid, method='bilinear') + +save.image(file=paste0(workdir,"/weather_regimes_",fields.name,"_",var.name[var.num],"_",year.start,"-",year.end,".RData")) +load(file=paste0(workdir,"/weather_regimes_",fields.name,"_",var.name[var.num],"_",year.start,"-",year.end,".RData")) + +WR.period <- 13:16 # 1-12: Jan-Dec; 13-16: Winter-Spring-Summer-Autumn + +# if you want to study WRs at monhly level but using their seasonal time series, you have to copy the WRs time series to the months from 1 to 12: +if(WR.period <- 1:12){ + days.period.list <- list() + for (p in 1:16){ + days.period.list[[p]] <- NA + for(y in year.start:year.end) days.period.list[[p]] <- c(days.period.list[[p]], n.days.in.a.future.year(year.start, y) + pos.period(y,p)) + days.period.list[[p]] <- days.period.list[[p]][-1] # remove the NA at the begin that was introduced only to be able to execute the above command + } + + my.cluster[[1]] <- my.cluster[[2]] <- my.cluster[[3]] <- my.cluster[[4]] <- my.cluster[[5]] <- my.cluster[[6]] <- list() + my.cluster[[7]] <- my.cluster[[8]] <- my.cluster[[9]] <- my.cluster[[10]] <- my.cluster[[11]] <- my.cluster[[12]] <- list() + + ss <- match(days.period.list[[13]],days.period.list[[1]]); ss[which(!is.na(ss))] <- 1; qq <- ss*my.cluster[[13]]$cluster; my.cluster[[1]]$cluster <- qq[!is.na(qq)] + ss <- match(days.period.list[[13]],days.period.list[[2]]); ss[which(!is.na(ss))] <- 1; qq <- ss*my.cluster[[13]]$cluster; my.cluster[[2]]$cluster <- qq[!is.na(qq)] + ss <- match(days.period.list[[13]],days.period.list[[12]]); ss[which(!is.na(ss))] <- 1; qq <- ss*my.cluster[[13]]$cluster; my.cluster[[12]]$cluster <- qq[!is.na(qq)] + ss <- match(days.period.list[[14]],days.period.list[[3]]); ss[which(!is.na(ss))] <- 1; qq <- ss*my.cluster[[14]]$cluster; my.cluster[[3]]$cluster <- qq[!is.na(qq)] + ss <- match(days.period.list[[14]],days.period.list[[4]]); ss[which(!is.na(ss))] <- 1; qq <- ss*my.cluster[[14]]$cluster; my.cluster[[4]]$cluster <- qq[!is.na(qq)] + ss <- match(days.period.list[[14]],days.period.list[[5]]); ss[which(!is.na(ss))] <- 1; qq <- ss*my.cluster[[14]]$cluster; my.cluster[[5]]$cluster <- qq[!is.na(qq)] + ss <- match(days.period.list[[15]],days.period.list[[6]]); ss[which(!is.na(ss))] <- 1; qq <- ss*my.cluster[[15]]$cluster; my.cluster[[6]]$cluster <- qq[!is.na(qq)] + ss <- match(days.period.list[[15]],days.period.list[[7]]); ss[which(!is.na(ss))] <- 1; qq <- ss*my.cluster[[15]]$cluster; my.cluster[[7]]$cluster <- qq[!is.na(qq)] + ss <- match(days.period.list[[15]],days.period.list[[8]]); ss[which(!is.na(ss))] <- 1; qq <- ss*my.cluster[[15]]$cluster; my.cluster[[8]]$cluster <- qq[!is.na(qq)] + ss <- match(days.period.list[[16]],days.period.list[[9]]); ss[which(!is.na(ss))] <- 1; qq <- ss*my.cluster[[16]]$cluster; my.cluster[[9]]$cluster <- qq[!is.na(qq)] + ss <- match(days.period.list[[16]],days.period.list[[10]]); ss[which(!is.na(ss))] <- 1; qq <- ss*my.cluster[[16]]$cluster; my.cluster[[10]]$cluster <- qq[!is.na(qq)] + ss <- match(days.period.list[[16]],days.period.list[[11]]); ss[which(!is.na(ss))] <- 1; qq <- ss*my.cluster[[16]]$cluster; my.cluster[[11]]$cluster <- qq[!is.na(qq)] +} + +for(period in WR.period){ # 1-12: Jan-Dec; 13-16: Winter-Spring-Summer-Autumn; 17: Year + if(unlist(fields) == unlist(rean)){ + days.period <- NA + for(y in year.start:year.end) days.period <- c(days.period, n.days.in.a.future.year(year.start, y) + pos.period(y,period)) + days.period <- days.period[-1] # remove the NA at the begin that was introduced only to be able to execure the above command + period.length <- n.days.in.a.period(period,1999) #+ ifelse(period==13 | period==2, 1, 0) # using year 1999 introduce a small error in winter season length because of bisestile years + + } else { + my.startdates.period <- months.period(forecast.year,mes,day,period) + + #if(period == 13) my.startdates.period <- my.startdates.period[-2] # remove the second startdate because it is broken + + days.period <- c() + for(startdate in my.startdates.period){ + days.period <- c(days.period, (1+(startdate-1)*(year.end-year.start+1)):(startdate*(year.end-year.start+1))) + } + period.length <- length(my.startdates.period) # number of Thusdays in the period + } + + n.days.period <- length(days.period) + + varPeriod <- vareuFull[days.period,,] # select only var data during the chosen period + + varPeriodClim <- apply(varPeriod, c(2,3), mean, na.rm=T) + varPeriodClim2 <- InsertDim(varPeriodClim, 1, n.days.period) + varPeriodAnom <- varPeriod - varPeriodClim2 + rm(varPeriod, varPeriodClim, varPeriodClim2) + gc() + + + #pdf(file=paste0(workdir,"/",fields.name,"_",var.name[var.num],"_spring_climatology_may.pdf"),width=20, height=15) + #PlotEquiMap2(varPeriodClim[,EU], lon[EU], lat, filled.continents = FALSE, cols=my.cols.var, intxlon=10, intylat=10, cex.lab=1.5) # plot the climatology of the period + #for(d in 1:n.days.period){ #n.days.period){ + #PlotEquiMap2(rescale(varPeriodAnom[d,,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, toptitle=paste0("Day ",d), filled.continents = FALSE, brks=my.brks.var, cols=my#.cols.var, intxlon=10, intylat=10, cex.lab=1.5) + #} + #dev.off() + + if(sequences){ + # for each of the 4 clusters, remove the days not belonging to sequences of at least 5 days: + # warning: first 4 days and last 4 days are not take into account y the algorithm and are treated as not belonging to any sequence (sequ=FALSE) + wrdiff <- diff(my.cluster[[period]]$cluster) + + sequ <- rep(FALSE, n.days.period) + for(i in 5:(n.days.period-5)){ + sequ[i] <- TRUE + if(wrdiff[i] != 0 & wrdiff[i+1] != 0) sequ[i] = FALSE + if(wrdiff[i] != 0 & wrdiff[i+2] != 0) sequ[i] = FALSE + if(wrdiff[i] != 0 & wrdiff[i+3] != 0) sequ[i] = FALSE + if(wrdiff[i] != 0 & wrdiff[i+4] != 0) sequ[i] = FALSE + if(wrdiff[i-1] != 0 & wrdiff[i] != 0) sequ[i] = FALSE + if(wrdiff[i-1] != 0 & wrdiff[i+1] != 0) sequ[i] = FALSE + if(wrdiff[i-1] != 0 & wrdiff[i+2] != 0) sequ[i] = FALSE + if(wrdiff[i-1] != 0 & wrdiff[i+3] != 0) sequ[i] = FALSE + if(wrdiff[i-2] != 0 & wrdiff[i] != 0) sequ[i] = FALSE + if(wrdiff[i-2] != 0 & wrdiff[i+1] != 0) sequ[i] = FALSE + if(wrdiff[i-2] != 0 & wrdiff[i+2] != 0) sequ[i] = FALSE + if(wrdiff[i-3] != 0 & wrdiff[i] != 0) sequ[i] = FALSE + if(wrdiff[i-3] != 0 & wrdiff[i+1] != 0) sequ[i] = FALSE + if(wrdiff[i-4] != 0 & wrdiff[i] != 0) sequ[i] = FALSE + } # close for loop on i + + # sequence of daily cluster numbers without the days that doesn't belong to sequences of 5 or more days: + cluster.sequence <- my.cluster[[period]]$cluster * sequ + rm(wrdiff, sequ) + } else{ + cluster.sequence <- my.cluster[[period]]$cluster + } + + gc() + + # Replace the days belonging to a regime with the days belonging to a sequence of at least 5 days of that regime: + wr1 <- which(cluster.sequence == 1) + wr2 <- which(cluster.sequence == 2) + wr3 <- which(cluster.sequence == 3) + wr4 <- which(cluster.sequence == 4) + + # yearly frequency of each regime: + wr1y <- wr2y <- wr3y <-wr4y <- c() + for(y in year.start:year.end){ + + if(unlist(fields) == unlist(rean)){ + wr1y[y-year.start+1] <- length(which(cluster.sequence[(y-year.start)*period.length+(1:period.length)] == 1)) + wr2y[y-year.start+1] <- length(which(cluster.sequence[(y-year.start)*period.length+(1:period.length)] == 2)) + wr3y[y-year.start+1] <- length(which(cluster.sequence[(y-year.start)*period.length+(1:period.length)] == 3)) + wr4y[y-year.start+1] <- length(which(cluster.sequence[(y-year.start)*period.length+(1:period.length)] == 4)) + } else { + wr1y[y-year.start+1] <- length(which(cluster.sequence[seq((y-year.start+1),length(cluster.sequence), n.years)] == 1)) + wr2y[y-year.start+1] <- length(which(cluster.sequence[seq((y-year.start+1),length(cluster.sequence), n.years)] == 2)) + wr3y[y-year.start+1] <- length(which(cluster.sequence[seq((y-year.start+1),length(cluster.sequence), n.years)] == 3)) + wr4y[y-year.start+1] <- length(which(cluster.sequence[seq((y-year.start+1),length(cluster.sequence), n.years)] == 4)) + } + } + + rm(cluster.sequence) + gc() + + # convert to frequencies in %: + wr1y <- wr1y/period.length + wr2y <- wr2y/period.length + wr3y <- wr3y/period.length + wr4y <- wr4y/period.length + + varPeriodAnom1 <- varPeriodAnom[wr1,,] + varPeriodAnom2 <- varPeriodAnom[wr2,,] + varPeriodAnom3 <- varPeriodAnom[wr3,,] + varPeriodAnom4 <- varPeriodAnom[wr4,,] + + varPeriodAnom1mean <- apply(varPeriodAnom1,c(2,3),mean,na.rm=T) + varPeriodAnom2mean <- apply(varPeriodAnom2,c(2,3),mean,na.rm=T) + varPeriodAnom3mean <- apply(varPeriodAnom3,c(2,3),mean,na.rm=T) + varPeriodAnom4mean <- apply(varPeriodAnom4,c(2,3),mean,na.rm=T) + #varPeriodAnom.mean <- apply(varPeriodAnom,c(2,3),mean,na.rm=T) + + #pdf(file=paste0(workdir,"/",fields.name,"_",var.name[var.num],"_spring_anomalies_NAOp_may.pdf"),width=20, height=15) + #for(day in 1:601){ + #PlotEquiMap2(rescale(varPeriodAnom.mean[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, cex.lab=1.5) + #} + #dev.off() + #PlotEquiMap2(rescale(varPeriodAnom4mean[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, cex.lab=1.5) + + #pdf(file=paste0(workdir,"/",fields.name,"_",var.name[var.num],"_spring_anomalies_may.pdf"),width=20, height=15) + #days <-c(); for(y in year.start:year.end) days <- c(days, 92*(y-year.start) + 62:92) + #varPeriodAnomMes <- apply(varPeriodAnom[days,,],c(2,3),mean,na.rm=T) + #PlotEquiMap2(rescale(varPeriodAnomMes[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, toptitle="May tas anomalies",filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, cex.lab=1.5) + #dev.off() + + varPeriodAnomBoth1 <- abind(varPeriodAnom, varPeriodAnom1, along = 1) + pvalue1 <- apply(varPeriodAnomBoth1, c(2,3), function(x) t.test(x[1:n.days.period],x[(n.days.period+1):length(x)])$p.value) + rm(varPeriodAnomBoth1, varPeriodAnom1) + gc() + + varPeriodAnomBoth2 <- abind(varPeriodAnom, varPeriodAnom2, along = 1) + pvalue2 <- apply(varPeriodAnomBoth2, c(2,3), function(x) t.test(x[1:n.days.period],x[(n.days.period+1):length(x)])$p.value) + rm(varPeriodAnomBoth2, varPeriodAnom2) + gc() + + varPeriodAnomBoth3 <- abind(varPeriodAnom, varPeriodAnom3, along = 1) + pvalue3 <- apply(varPeriodAnomBoth3, c(2,3), function(x) t.test(x[1:n.days.period],x[(n.days.period+1):length(x)])$p.value) + rm(varPeriodAnomBoth3, varPeriodAnom3) + gc() + + varPeriodAnomBoth4 <- abind(varPeriodAnom, varPeriodAnom4, along = 1) + pvalue4 <- apply(varPeriodAnomBoth4, c(2,3), function(x) t.test(x[1:n.days.period],x[(n.days.period+1):length(x)])$p.value) + rm(varPeriodAnomBoth4, varPeriodAnom4) + + rm(varPeriodAnom) + gc() + + pslPeriod <- psleuFull[days.period,,] + pslPeriodClim <- apply(pslPeriod, c(2,3), mean, na.rm=T) + pslPeriodClim2 <- InsertDim(pslPeriodClim, 1, n.days.period) + pslPeriodAnom <- pslPeriod - pslPeriodClim2 + + rm(pslPeriod, pslPeriodClim, pslPeriodClim2) + gc() + + pslwr1 <- pslPeriodAnom[wr1,,] + pslwr2 <- pslPeriodAnom[wr2,,] + pslwr3 <- pslPeriodAnom[wr3,,] + pslwr4 <- pslPeriodAnom[wr4,,] + rm(pslPeriodAnom) + gc() + + pslwr1mean <- apply(pslwr1,c(2,3),mean,na.rm=T) + pslwr2mean <- apply(pslwr2,c(2,3),mean,na.rm=T) + pslwr3mean <- apply(pslwr3,c(2,3),mean,na.rm=T) + pslwr4mean <- apply(pslwr4,c(2,3),mean,na.rm=T) + + rm(pslwr1,pslwr2,pslwr3,pslwr4) + + # save all the data necessary to redraw the graphs when we know the right regime: + save(workdir,rean.name,fields.name,var.num,var.name,var.name.full,var.unit,year.start,year.end,period,WR.period,lon,lat,pslwr1mean,pslwr2mean,pslwr3mean,pslwr4mean, varPeriodAnom1mean, varPeriodAnom2mean, varPeriodAnom3mean,varPeriodAnom4mean,wr1y,wr2y,wr3y,wr4y,pvalue1,pvalue2,pvalue3,pvalue4,file=paste0(workdir,"/",fields.name,"_",var.name[var.num],"_",my.period[period],"_mapdata.RData")) + + rm(pvalue1,pvalue2,pvalue3,pvalue4) + rm(pslwr1mean,pslwr2mean,pslwr3mean,pslwr4mean) + rm(varPeriodAnom1mean,varPeriodAnom2mean,varPeriodAnom3mean,varPeriodAnom4mean) + gc() + +} # close the for loop on 'period' + + +# run it twice: once, with ordering <- FALSE to look only at the cartography and find visually the right regime names of the four clusters; +# and the second time, to save the ordered cartography: +ordering <- FALSE # If TRUE, order the clusters following the regimes listed in the 'orden' vector (after you manually insert the names). +save.names <- TRUE # if TRUE, also save the cluster names (i.e: the association between clusters and weather regimes); if FALSE, the cluster names are loaded from the file +as.pdf <- FALSE # FALSE: save results as one .png file for each season, TRUE: save only 1 .pdf file with all seasons + +# position of long values of Europe only (without the Atlantic Sea and America): +#if(fields.name == "ERA-Interim") EU <- c(1:which(lon >= lon.max)[1], (length(lon)-30):length(lon)) # restrict area to continental europe only +EU <- c(1:which(lon >= lon.max)[1], (which(lon >= 337)[1]:length(lon))) # restrict area to continental europe only + +# choose an order to give to the cartography, from top to bottom: +orden <- c("NAO+","NAO-","Blocking","Atl.Ridge") + +regime1.name <- orden[1] +regime2.name <- orden[2] +regime3.name <- orden[3] +regime4.name <- orden[4] + +if(save.names){cluster1.name.period <- cluster2.name.period <- cluster3.name.period <- cluster4.name.period <- c()} + +# breaks and colors of the geopotential fields: +if(psl == "g500"){ + #my.brks <- c(-300,-199:200,300) # % Mean anomaly of a WR + my.brks <- c(-300,seq(-200,200,10),300) # % Mean anomaly of a WR + my.brks2 <- c(-300,seq(-200,200,10),300) # % Mean anomaly of a WR for the contour lines + #my.cols <- colorRampPalette((brewer.pal(9,"PuBu")))(length(my.brks)-1) + values.to.plot <- c(-200, -100, 0, 100, 200) # values we want to show in the labels +} + +if(psl == "psl"){ + my.brks <- c(seq(-19,-1,2),0,seq(1,19,2)) # % Mean anomaly of a WR + my.brks2 <- my.brks #c(seq(-20,20,2)) # % Mean anomaly of a WR for the contour lines + values.to.plot <- my.brks #c(-17,-15,-13,-11,-9,-7,-5,-3,-1,0,1,3,5,7,9,11,13,15,17) +} + +my.cols <- colorRampPalette(rev(brewer.pal(11,"RdBu")))(length(my.brks)-1) # blue--white--red colors +my.cols[floor(length(my.cols)/2)] <- my.cols[floor(length(my.cols)/2)+1] <- "white" + +# breaks and colors of the impact maps (valid both for Wind speed anomalies and Temperature anomalies): +#my.brks.var <- c(-20,seq(-10,-3,1),seq(-2.9,2.9,0.1),seq(3,10,1),20) # % Mean anomaly of a WR +#my.brks.var <- c(-20,-2,-1.5,-1,-0.5,-0.2,0.2,0.5,1,1.5,2,20) # % Mean anomaly of a WR +#my.brks.var <- c(-20,seq(-3,3,0.5),20) # % Mean anomaly of a WR +if(var.name[var.num] == "sfcWind") { + my.brks.var <- seq(-3,3,0.5) + values.to.plot2 <- c(-3,-2,-1,0,1,2,3) +} +if(var.name[var.num] == "tas") { + my.brks.var <- seq(-5,5,1) + values.to.plot2 <- my.brks.var #c(-5,-3,-1,0,1,3,5) +} + +#my.cols <- colorRampPalette(as.character(read.csv("/home/Earth/vtorralb/rgbhex.csv",header=F)[,1]))(length(my.brks)-1) # blue--yellow-red colors +my.cols.var <- colorRampPalette(rev(brewer.pal(11,"RdBu")))(length(my.brks.var)-1) # blue--white--red colors +#my.cols.var[floor(length(my.cols.var)/2)] <- my.cols.var[floor(length(my.cols.var)/2)+1] <- "white" + +if(as.pdf)(pdf(file=paste0(workdir,"/",fields.name,"_",var.name[var.num],".pdf"),width=40, height=60)) + +for(period in WR.period){ + load(file=paste0(workdir,"/",fields.name,"_",var.name[var.num],"_",my.period[period],"_mapdata.RData")) + + if(save.names){ + if(period == 1){ # January + cluster2.name="NAO+" + cluster1.name="NAO-" + cluster4.name="Blocking" + cluster3.name="Atl.Ridge" + } + if(period == 2){ # February + cluster3.name="NAO+" + cluster2.name="NAO-" + cluster4.name="Blocking" + cluster1.name="Atl.Ridge" + } + if(period == 3){ # March + cluster3.name="NAO+" + cluster2.name="NAO-" + cluster4.name="Blocking" + cluster1.name="Atl.Ridge" + } + if(period == 4){ # April + cluster2.name="NAO+" + cluster1.name="NAO-" + cluster3.name="Blocking" + cluster4.name="Atl.Ridge" + } + if(period == 5){ # May + cluster4.name="NAO+" + cluster2.name="NAO-" + cluster3.name="Blocking" + cluster1.name="Atl.Ridge" + } + if(period == 6){ # June + cluster4.name="NAO+" + cluster1.name="NAO-" + cluster2.name="Blocking" + cluster3.name="Atl.Ridge" + } + if(period == 7){ # July + cluster4.name="NAO+" + cluster2.name="NAO-" + cluster1.name="Blocking" + cluster3.name="Atl.Ridge" + } + if(period == 8){ # August + cluster2.name="NAO+" + cluster4.name="NAO-" + cluster3.name="Blocking" + cluster1.name="Atl.Ridge" + } + if(period == 9){ # September + cluster4.name="NAO+" + cluster3.name="NAO-" + cluster1.name="Blocking" + cluster2.name="Atl.Ridge" + } + if(period == 10){ # October + cluster1.name="NAO+" + cluster4.name="NAO-" + cluster2.name="Blocking" + cluster3.name="Atl.Ridge" + } + if(period == 11){ # November + cluster2.name="NAO+" + cluster3.name="NAO-" + cluster1.name="Blocking" + cluster4.name="Atl.Ridge" + } + if(period == 12){ # December + cluster2.name="NAO+" + cluster4.name="NAO-" + cluster1.name="Blocking" + cluster3.name="Atl.Ridge" + } + if(period == 13){ # Winter + cluster3.name="NAO+" + cluster4.name="NAO-" + cluster1.name="Blocking" + cluster2.name="Atl.Ridge" + } + if(period == 14){ # Spring + cluster1.name="NAO+" + cluster3.name="NAO-" + cluster2.name="Blocking" + cluster4.name="Atl.Ridge" + } + if(period == 15){ # Summer + cluster2.name="NAO+" + cluster3.name="NAO-" + cluster4.name="Blocking" + cluster1.name="Atl.Ridge" + } + if(period == 16){ # Autumn + cluster4.name="NAO+" + cluster1.name="NAO-" + cluster2.name="Blocking" + cluster3.name="Atl.Ridge" + } + + cluster1.name.period[period] <- cluster1.name + cluster2.name.period[period] <- cluster2.name + cluster3.name.period[period] <- cluster3.name + cluster4.name.period[period] <- cluster4.name + + save(cluster1.name.period, cluster2.name.period, cluster3.name.period, cluster4.name.period, file=paste0(workdir,"/",fields.name,"_ClusterNames.RData")) + + } else { # in this case, we load the cluster names from the file already saved: + load(paste0(workdir,"/",fields.name,"_ClusterNames.RData")) + cluster1.name <- cluster1.name.period[period] + cluster2.name <- cluster2.name.period[period] + cluster3.name <- cluster3.name.period[period] + cluster4.name <- cluster4.name.period[period] + } + + # assign to each cluster its regime: + if(ordering == FALSE){ + cluster1 <- 1; cluster2 <- 2; cluster3 <- 3; cluster4 <- 4 + } else { + cluster1 <- which(orden == cluster1.name) + cluster2 <- which(orden == cluster2.name) + cluster3 <- which(orden == cluster3.name) + cluster4 <- which(orden == cluster4.name) + } + + assign(paste0("map",cluster1), pslwr1mean) + assign(paste0("map",cluster2), pslwr2mean) + assign(paste0("map",cluster3), pslwr3mean) + assign(paste0("map",cluster4), pslwr4mean) + + assign(paste0("imp",cluster1), varPeriodAnom1mean) + assign(paste0("imp",cluster2), varPeriodAnom2mean) + assign(paste0("imp",cluster3), varPeriodAnom3mean) + assign(paste0("imp",cluster4), varPeriodAnom4mean) + + assign(paste0("sig",cluster1), pvalue1) + assign(paste0("sig",cluster2), pvalue2) + assign(paste0("sig",cluster3), pvalue3) + assign(paste0("sig",cluster4), pvalue4) + + assign(paste0("fre",cluster1), wr1y) + assign(paste0("fre",cluster2), wr2y) + assign(paste0("fre",cluster3), wr3y) + assign(paste0("fre",cluster4), wr4y) + + # Visualize the composition with the 3 graphs for all the 4 regimes: its pressure fields, its impact on var and its frequency series: + if(!as.pdf) png(filename=paste0(workdir,"/",fields.name,"_",var.name[var.num],"_",my.period[period],".png"),width=3000,height=3700) + + plot.new() + + # Sheet title: + par(fig=c(0, 1, 0.94, 0.98), new=TRUE) + mtext(paste0("Weather Regimes for ",my.period[period]," season (",year.start,"-",year.end,"). Source: ",fields.name), cex=6, font=2) + + # Centroid maps: + map.xpos <- 0 + map.width <- 0.46 + par(fig=c(map.xpos + 0.003, map.xpos + map.width - 0.003, 0.745, 0.925), new=TRUE) + PlotEquiMap2(rescale(map1,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map1), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, xlabel.dist=1.5, contours.lty="F1FF1F") + par(fig=c(map.xpos, map.xpos + map.width, 0.51, 0.69), new=TRUE) + PlotEquiMap2(rescale(map2,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map2), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F") + par(fig=c(map.xpos, map.xpos + map.width, 0.275, 0.455), new=TRUE) + PlotEquiMap2(rescale(map3,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map3), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F") + par(fig=c(map.xpos, map.xpos + map.width, 0.04, 0.22), new=TRUE) + PlotEquiMap2(rescale(map4,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map4), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F") + + # Title Centroid Maps: + map.title.xpos <- 0.76 + map.title.width <- 0.2 + par(fig=c(map.title.xpos, map.title.xpos + map.title.width, 0.728, 0.729), new=TRUE) + mtext("year", cex=3) + par(fig=c(map.title.xpos, map.title.xpos + map.title.width, 0.494, 0.495), new=TRUE) + mtext("year", cex=3) + par(fig=c(map.title.xpos, map.title.xpos + map.title.width, 0.260, 0.261), new=TRUE) + mtext("year", cex=3) + par(fig=c(map.title.xpos, map.title.xpos + map.title.width, 0.026, 0.027), new=TRUE) + mtext("year", cex=3) + + # Impact maps: + impact.xpos <- 0.47 + impact.width <- 0.26 + par(fig=c(impact.xpos, impact.xpos + impact.width, 0.745, 0.925), new=TRUE) + PlotEquiMap2(rescale(imp1[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=t(sig1[,EU] < 0.05), cex.lab=1.5) + par(fig=c(impact.xpos, impact.xpos + impact.width, 0.51, 0.69), new=TRUE) + PlotEquiMap2(rescale(imp2[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=t(sig2[,EU] < 0.05), cex.lab=1.5) + par(fig=c(impact.xpos, impact.xpos + impact.width, 0.275, 0.455), new=TRUE) + PlotEquiMap2(rescale(imp3[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=t(sig3[,EU] < 0.05), cex.lab=1.5) + par(fig=c(impact.xpos, impact.xpos + impact.width, 0.04, 0.22), new=TRUE) + PlotEquiMap2(rescale(imp4[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=t(sig4[,EU] < 0.05), cex.lab=1.5) + + # Frequency plots: + barplot.xpos <- 0.75 + barplot.width <- 0.25 + par(fig=c(barplot.xpos, barplot.xpos + barplot.width, 0.745, 0.915), new=TRUE) + barplot.freq(100*fre1, year.start, year.end, cex.y=2, cex.x=2, freq.max=60, ylab="", mgp=c(3,1.5,0)) + par(fig=c(barplot.xpos, barplot.xpos + barplot.width,0.510,0.680), new=TRUE) + barplot.freq(100*fre2, year.start, year.end, cex.y=2, cex.x=2, freq.max=60, ylab="", mgp=c(3,1.5,0)) + par(fig=c(barplot.xpos, barplot.xpos + barplot.width,0.275,0.445), new=TRUE) + barplot.freq(100*fre3, year.start, year.end, cex.y=2, cex.x=2, freq.max=60, ylab="", mgp=c(3,1.5,0)) + par(fig=c(barplot.xpos, barplot.xpos + barplot.width,0.040,0.210), new=TRUE) + barplot.freq(100*fre4, year.start, year.end, cex.y=2, cex.x=2, freq.max=60, ylab="", mgp=c(3,1.5,0)) + + # % on Frequency plots: + symbol.xpos <- 0.735 + symbol.width <- 0.01 + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.83, 0.84), new=TRUE) + mtext("%", cex=3.3) + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.60, 0.61), new=TRUE) + mtext("%", cex=3.3) + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.36, 0.37), new=TRUE) + mtext("%", cex=3.3) + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.13, 0.14), new=TRUE) + mtext("%", cex=3.3) + + # Title 1: + title1.xpos <- 0.02 + title1.width <- 0.4 + par(fig=c(title1.xpos, title1.xpos + title1.width, 0.930, 0.935), new=TRUE) + mtext(paste0(regime1.name,": ", psl.name, " Anomaly"), font=2, cex=4) + par(fig=c(title1.xpos, title1.xpos + title1.width, 0.695, 0.700), new=TRUE) + mtext(paste0(regime2.name,": ", psl.name, " Anomaly"), font=2, cex=4) + par(fig=c(title1.xpos, title1.xpos + title1.width, 0.460, 0.465), new=TRUE) + mtext(paste0(regime3.name,": ", psl.name, " Anomaly"), font=2, cex=4) + par(fig=c(title1.xpos, title1.xpos + title1.width, 0.225, 0.230), new=TRUE) + mtext(paste0(regime4.name,": ", psl.name, " Anomaly"), font=2, cex=4) + + # Title 2: + title2.xpos <- 0.42 + title2.width <- 0.35 + par(fig=c(title2.xpos, title2.xpos + title2.width, 0.930, 0.935), new=TRUE) + mtext(paste0(regime1.name, ": Impact on ", var.name.full[var.num]), font=2, cex=4) + par(fig=c(title2.xpos, title2.xpos + title2.width, 0.695, 0.700), new=TRUE) + mtext(paste0(regime2.name, ": Impact on ", var.name.full[var.num]), font=2, cex=4) + par(fig=c(title2.xpos, title2.xpos + title2.width, 0.460, 0.465), new=TRUE) + mtext(paste0(regime3.name, ": Impact on ", var.name.full[var.num]), font=2, cex=4) + par(fig=c(title2.xpos, title2.xpos + title2.width, 0.225, 0.230), new=TRUE) + mtext(paste0(regime4.name, ": Impact on ", var.name.full[var.num]), font=2, cex=4) + + # Title 3: + title3.xpos <- 0.75 + title3.width <-0.25 + par(fig=c(title3.xpos, title3.xpos + title3.width, 0.930, 0.935), new=TRUE) + mtext(paste0(regime1.name, ": Frequency (", round(100*mean(fre1),1), "%)"), font=2, cex=4) + par(fig=c(title3.xpos, title3.xpos + title3.width, 0.695, 0.700), new=TRUE) + mtext(paste0(regime2.name, ": Frequency (", round(100*mean(fre2),1), "%)"), font=2, cex=4) + par(fig=c(title3.xpos, title3.xpos + title3.width, 0.460, 0.465), new=TRUE) + mtext(paste0(regime3.name, ": Frequency (", round(100*mean(fre3),1), "%)"), font=2, cex=4) + par(fig=c(title3.xpos, title3.xpos + title3.width, 0.225, 0.230), new=TRUE) + mtext(paste0(regime4.name, ": Frequency (", round(100*mean(fre4),1), "%)"), font=2, cex=4) + + # Legend 1: + legend1.xpos <- 0.01 + legend1.width <- 0.44 + legend1.cex <- 1.8 + my.subset <- match(values.to.plot, my.brks) + + par(fig=c(legend1.xpos, legend1.xpos + legend1.width, 0.719, 0.744), new=TRUE) # syntax fig=c(xmin, xmax, ymin, ymax) + ColorBar3(my.brks, cols=my.cols, vert=FALSE, cex=legend1.cex, subset=my.subset) + if(psl=="g500") {mtext(side=4," m", cex=2.5)} else {mtext(side=4," hPa", cex=legend1.cex)} + par(fig=c(legend1.xpos, legend1.xpos + legend1.width, 0.485, 0.508), new=TRUE) + ColorBar3(my.brks, cols=my.cols, vert=FALSE, cex=legend1.cex, subset=my.subset) + if(psl=="g500") {mtext(side=4," m", cex=2.5)} else {mtext(side=4," hPa", cex=legend1.cex)} + par(fig=c(legend1.xpos, legend1.xpos + legend1.width, 0.250, 0.273), new=TRUE) + ColorBar3(my.brks, cols=my.cols, vert=FALSE, cex=legend1.cex, subset=my.subset) + if(psl=="g500") {mtext(side=4," m", cex=2.5)} else {mtext(side=4," hPa", cex=legend1.cex) + par(fig=c(legend1.xpos, legend1.xpos + legend1.width, 0.015, 0.038), new=TRUE) + ColorBar3(my.brks, cols=my.cols, vert=FALSE, cex=legend1.cex, subset=my.subset) + if(psl=="g500") {mtext(side=4," m", cex=2.5)} else {mtext(side=4," hPa", cex=legend1.cex)} + + # Legend 2: + legend2.xpos <- 0.48 + legend2.width <- 0.25 + legend2.cex <- 1.8 + my.subset2 <- match(values.to.plot2, my.brks.var) + + par(fig=c(legend2.xpos, legend2.xpos + legend2.width, 0.719 + 0.001, 0.744), new=TRUE) + ColorBar3(my.brks.var, cols=my.cols.var, vert=FALSE, cex=legend2.cex, subset=my.subset2) + mtext(side=4,paste0(" ",var.unit[var.num]), cex=legend2.cex) + par(fig=c(legend2.xpos, legend2.xpos + legend2.width, 0.485, 0.508), new=TRUE) + ColorBar3(my.brks.var, cols=my.cols.var, vert=FALSE, cex=legend2.cex, subset=my.subset2) + mtext(side=4,paste0(" ",var.unit[var.num]), cex=legend2.cex) + par(fig=c(legend2.xpos, legend2.xpos + legend2.width, 0.250, 0.273), new=TRUE) + ColorBar3(my.brks.var, cols=my.cols.var, vert=FALSE, cex=legend2.cex, subset=my.subset2)} + mtext(side=4,paste0(" ",var.unit[var.num]), cex=legend2.cex) + par(fig=c(legend2.xpos, legend2.xpos + legend2.width, 0.015, 0.038), new=TRUE) + ColorBar3(my.brks.var, cols=my.cols.var, vert=FALSE, cex=legend2.cex, subset=my.subset2) + mtext(side=4,paste0(" ",var.unit[var.num]), cex=legend2.cex) + + if(!as.pdf) dev.off() # for saving 4 png + +} # close for loop on 'period' + +if(as.pdf) dev.off() # for saving a single pdf with all seasons + + + + + + + + + + + diff --git a/old/weather_regimes_v18~ b/old/weather_regimes_v18~ new file mode 100644 index 0000000000000000000000000000000000000000..a31bd20162154227a4da0933eb60929a862c3687 --- /dev/null +++ b/old/weather_regimes_v18~ @@ -0,0 +1,680 @@ +library(s2dverification) # for the function Load() +library(abind) +library(Kendall) +source('/home/Earth/ncortesi/Downloads/scripts/Rfunctions.R') # for the calendar functions +workdir <- "/scratch/Earth/ncortesi/RESILIENCE/Regimes" # working dir with the input files with the WT classifications for each MSLP grid point + +# available reanalysis for the geopotential (g500) or the geopotential height (z500 = g500/9.81): +ERAint <- list(path = '/esnas/reconstructions/ecmwf/eraint/$STORE_FREQ$_mean/$VAR_NAME$_f6h/$VAR_NAME$_$YEAR$$MONTH$.nc') +ERAint.name <- "ERA-Interim" + +JRA55 <- list(path = '/esnas/reconstructions/jma/jra-55/$STORE_FREQ$_mean/$VAR_NAME$_f6h/$VAR_NAME$_$YEAR$$MONTH$.nc') +JRA55.name <- "JRA-55" + +rean <- ERAint #JRA55 #ERAint # choose one of the two above reanalysis from where to load the input psl data +rean.name <- ERAint.name #JRA55.name #ERAint.name + +# available forecast systems for the var data: +ECMWF_monthly <- list(path = paste0('/esnas/exp/ECMWF/monthly/ensforhc/6hourly/$VAR_NAME$/2014$MONTH$$DAY$00/$VAR_NAME$_$START_DATE$00.nc')) +ECMWF_monthly.name <- "ECMWF-Monthly" + +forecast <- ECMWF_monthly +forecast.name <- ECMWF_monthly.name + +fields <- rean #forecast # put 'rean' to load pressure fields and var from reanalisis, put 'forecast' to load them from forecast systems +fields.name <- rean.name #forecast.name + +year.start <- 1979 #1994 #1979 #1981 +year.end <- 2013 #2010 + +leadtime <- 1:7 #5:11 #1 # if fields=forecast, set the forecast time (in days) you want to compute the weather regimes. +startdate.shift <- 1 # if leadtime=5:11, it's better to shift all startdates of the season 1 week in the past, to fit the exact season of obs.data + # if leadtime=12:18, it's better to shift all startdates of the season 2 weeks in the past, and so on. +forecast.year <- year.end+1 # if fields=forecast, set the forecast year (it is usually the year after year.end) +mes=1 # if fields=forecast, set the month of the first startdate of the forecast year +day=2 # if fields=forecast, set the day of the first startdate of the forecast year + +psl <- "psl" #"g500" # pressure variable to use from the chosen reanalysis +psl.name <- "MSLP" # "Z500" # the name to show in the maps + +var.data <- rean #ECMWF_monthly # ERAint # chose a dataset from which to load the input var data (reanalysis or forecasts) +var.data.name <- rean.name #ECMWF_monthly.name # ERAint.name + +lat.weighting=FALSE # set it to true to weight psl data on latitude before applying the cluster analysis +sequences=FALSE # set it to TRUE if you want to select only days beloning to sequences of at least 5 consecutive days belonging to the same regime. + +PCA <- FALSE # se to TRUE if you want to apply the PCA before the k-means cluster analysis, FALSE otherwise +variance.explained <- 0.8 # the user can set here the minimum % of variance explained by the PCs (typically 0.8 which is equal to 80%) + +# Coordinates of the box enclosing the study region (the Northern Atlantic in this case): +lat.max <- 81 #81 #80 #70 +lat.min <- 27 #30 #20 #30 +lon.max <- 45 #36 #30 #40 +lon.min <- 274.5 #274.5 #270 #280 # put a positive number here because the geopotential has only positive values of longitud! + +############################################################################################################################# +#ECMWF_monthly <- list(path = paste0('/esnas/exp/ECMWF/monthly/ensforhc/6hourly/psl/2014010200/1994_010200.nc')) +#ECMWF_monthly <- list(path = paste0('/esnas/exp/ECMWF/monthly/ensforhc/6hourly/$VAR_NAME$/2014$MONTH$$DAY$00/$VAR_NAME$_$START_DATE$00.nc')) +if(lat.min >= lat.max) stop("lat.min cannot be greater than lat.max") +n.years <- year.end - year.start + 1 + +# load only 1 day of pressure data to detect the minimum and maximum lat and lon values which identify only the North Atlantic area: +domain <- Load(var = psl, exp = NULL, obs = list(fields), paste0(year.start,'0102'), storefreq = 'daily', leadtimemax = 1, output = 'lonlat', nprocs=1) +n.lat <- length(domain$lat) +n.lon <- length(domain$lon) + +pos.lat.max <- which(lat.max - round(domain$lat,4) >= 0)[1] # select the first latitude of the domain below the maximum latitude +pos.lat.min <- tail(which(round(domain$lat,4) - lat.min >= 0),1) # select the last latitude of the domain over the minumum latitude +pos.lon.max <- tail(which(lon.max - round(domain$lon,4) >= 0),1) +pos.lon.min <- which(round(domain$lon,4) - lon.min >= 0)[1] + +pos.lat <- if(pos.lat.min < pos.lat.max) {pos.lat.min:pos.lat.max} else {pos.lat.max:pos.lat.min} +pos.lon <- c(1:pos.lon.max,pos.lon.min:length(domain$lon)) # beware that it only consider the case where the study area cross the meridian of Greenwhich! +n.pos.lat <- length(pos.lat) +n.pos.lon <- length(pos.lon) + +lat <- domain$lat[pos.lat] # lat values of chosen area only (it is smaller than the whole spatial domain loaded by the data) +lon <- domain$lon[pos.lon] # lon values of chosen area only + +lat.min.area <- domain$lat[pos.lat.min] # min and max lat/lon of the chosen area only (same as lat.max, but its values correspond to actual values in the lat vector) +lat.max.area <- domain$lat[pos.lat.max] +lon.min.area <- domain$lon[pos.lon.min] +lon.max.area <- domain$lon[pos.lon.max] + +#rm(domain) + +# Load psl data: +if(unlist(fields) == unlist(rean)){ # Load daily psl data in the reanalysis case: + psleuFull <-array(NA,c(n.days.in.a.yearly.period(year.start,year.end), n.pos.lat, n.pos.lon)) + + for (y in year.start:year.end){ + var <- Load(var = psl, exp = NULL, obs = list(fields), paste0(y,'0101'), storefreq = 'daily', leadtimemax = n.days.in.a.year(y), output = 'lonlat', + latmin = lat.min, latmax = lat.max, lonmin = lon.min, lonmax = lon.max) + psleuFull[seq.days.in.a.future.year(year.start, y),,] <- var$obs + rm(var) + gc() + } + +} else { # Load 6-hourly psl data in the forecast case: + psleuFull <-array(NA, c(n.sdates*n.years, n.leadtimes, n.pos.lat, n.pos.lon)) # daily order: 19940102 19950102 ... 20130102 19940109 19950109 ... 20130109 ... + n.leadtimes <- length(leadtime) + sdates <- weekly.seq(forecast.year,mes,day) + n.sdates <- length(sdates) + + for (startdate in 1:n.sdates){ + for(lday in leadtime){ + var <- Load(var = psl, exp = NULL, obs = list(fields), paste0(year.start:year.end, substr(sdates[startdate],5,6), substr(sdates[startdate],7,8) ), storefreq = 'daily', leadtimemin = lday*4-3, leadtimemax = lday*4, output = 'lonlat', latmin = lat.min, latmax = lat.max, lonmin = lon.min, lonmax = lon.max) + + mean.lday <- apply(var$obs, c(3,5,6), mean, na.rm=T) + rm(var) + gc() + + psleuFull[(1+(startdate-1)*n.years):(startdate*n.years), lday-leadtime[1]+1,,] <- mean.lday + rm(mean.lday) + gc() + } + } + +} + +if(psl=="g500") psleuFull <- psleuFull/9.81 # convert geopotential to geopotential height (in m) +if(psl=="psl") psleuFull <- psleuFull/100 # convert MSLP in Pascal to MSLP in hPa +gc() + +save.image(file=paste0(workdir,"/weather_regimes_",fields.name,"_",year.start,"-",year.end,".RData")) +load(file=paste0(workdir,"/weather_regimes_",fields.name,"_",year.start,"-",year.end,".RData")) + +# compute the cluster analysis: +my.PCA <- list() +my.cluster <- list() +tot.variance <- list() +n.pcs <- my.cluster <- c() + +for(period in 13:16){ # 1-12: Jan-Dec; 13-16: Winter-Spring-Summer-Autumn; 17: Year + if(unlist(fields) == unlist(rean)){ + days.period <- NA + for(y in year.start:year.end) days.period <- c(days.period, n.days.in.a.future.year(year.start, y) + pos.period(y,period)) + days.period <- days.period[-1] # remove the NA at the begin that was introduced only to be able to execute the above command + + } else { + my.startdates.period <- months.period(forecast.year,mes,day,period) # get which startdates belong to the chosen period + + days.period <- c() # get which days inside psleuFull belong to the chosen period + for(startdate in my.startdates.period){ + days.period <- c(days.period, (1+(startdate-1)*n.years):(startdate*n.years)) + } + } + + n.days.period <- length(days.period) + + pslPeriod <- psleuFull[days.period,,] # select only days in the chosen period (i.e: winter) + + # weight the pressure fields based on latitude: + if(lat.weighting){ + lat.weighted <- cos(lat*pi/180)^0.5 + lat.weighted.array <- InsertDim(InsertDim(lat.weighted,2,n.pos.lon),1,n.days.period) + pslPeriod.weighted <- pslPeriod * lat.weighted.array + rm(lat.weighted.array) + gc() + } else { + pslPeriod.weighted <- pslPeriod + } + + pslmat <- pslPeriod.weighted + dim(pslmat) <- c(n.days.period, n.pos.lat*n.pos.lon) # convert array in a matrix! + rm(pslPeriod, pslPeriod.weighted) + gc() + + # if you are using the monthly forecast system, in winter you must remove the 2nd startdate (20140109) because its data is bad, as you can see with: plot(pslmat[,1:2]) + if(unlist(fields) == unlist(forecast) && period == 13) pslmat[21:40,] <- NA + + if(PCA){ # compute the PCs: + my.seq <- seq(1, n.pos.lat*n.pos.lon, 9) # select only 1 point of 9 + pslcut <- pslmat[,my.seq] + + my.PCA[[period]] <- princomp(pslcut,cor=FALSE) + tot.variance[[period]] <- head(cumsum(my.PCA[[period]]$sdev^2/sum(my.PCA[[period]]$sdev^2)),50) # check how many PCAs to keep basing on the sum of their expl. variance + n.pcs[period] <- head(as.numeric(which(tot.variance[[period]] > variance.explained)),1) # select only the pcs that explains at least 80% of variance + my.cluster[[period]] <- kmeans(my.PCA[[period]]$scores[,1:n.pcs[period]], centers=4, iter.max=100, nstart=30) # 4 is the number of clusters + rm(pslcut) + } else { # remove from matpsl the years with NA: + my.cluster[[period]] <- kmeans(pslmat[which(!is.na(pslmat[,1])),], centers=4, iter.max=100, nstart=30) # 4 is the number of clusters + } + + rm(pslmat) + gc() +} + +var.num <- 2 # Choose a variable. 1: sfcWind 2: tas + +var.name <- c("sfcWind","tas") # name of the 'predictand' variable of the chosen reanalysis +var.name.full <- c("Wind Speed","Temperature") # full name of the 'predictand' variable to put in the title of the graphs +var.unit <- c("m/s", "ºC") # unit of measure of var (for drawing color scales) + +# Load var data: +if(unlist(fields) == unlist(rean)){ # Load daily var data in the reanalysis case: + vareuFull <-array(NA,c(n.days.in.a.yearly.period(year.start,year.end), n.pos.lat, n.pos.lon)) + + for (y in year.start:year.end){ + var <- Load(var = var.name[var.num], exp = NULL, obs = list(var.data), paste0(y,'0101'), storefreq = 'daily', leadtimemax = n.days.in.a.year(y), output = 'lonlat', + latmin = lat.min, latmax = lat.max, lonmin = lon.min, lonmax = lon.max) + vareuFull[seq.days.in.a.future.year(year.start, y),,] <- var$obs + rm(var) + gc() + } + +} else { # Load 6-hourly var data in the forecast case: + sdates <- weekly.seq(forecast.year,mes,day) + vareuFull <-array(NA,c(length(sdates)*(year.end-year.start+1), n.pos.lat, n.pos.lon)) + + for (startdate in 1:length(sdates)){ + var <- Load(var = var.name[var.num], exp = NULL, obs = list(var.data), paste0(year.start:year.end, substr(sdates[startdate],5,6), substr(sdates[startdate],7,8) ), storefreq = 'daily', leadtimemin=leadtime*4-3, leadtimemax=leadtime*4, output = 'lonlat', latmin = lat.min, latmax = lat.max, lonmin = lon.min, lonmax = lon.max) + temp <- apply(var$obs, c(1,3,5,6), mean, na.rm=T) + vareuFull[(1+(startdate-1)*(year.end-year.start+1)):(startdate*(year.end-year.start+1)),,] <- temp + rm(temp,var) + gc() + } + +} + +#my.grid<-paste0('r',n.lon,'x',n.lat) +#coord <- Load(var = 'sfcWind', exp = list(ECMWF_monthly), obs = NULL, sdates=paste0(2013,'0102'), leadtimemin = 1, leadtimemax=1, output = 'lonlat', nprocs=1, grid=my.grid, method='bilinear') + +save.image(file=paste0(workdir,"/weather_regimes_",fields.name,"_",var.name[var.num],"_",year.start,"-",year.end,".RData")) +load(file=paste0(workdir,"/weather_regimes_",fields.name,"_",var.name[var.num],"_",year.start,"-",year.end,".RData")) + +for(period in 13:16){ # 1-12: Jan-Dec; 13-16: Winter-Spring-Summer-Autumn; 17: Year + if(unlist(fields) == unlist(rean)){ + days.period <- NA + for(y in year.start:year.end) days.period <- c(days.period, n.days.in.a.future.year(year.start, y) + pos.period(y,period)) + days.period <- days.period[-1] # remove the NA at the begin that was introduced only to be able to execure the above command + period.length <- n.days.in.a.period(period,1999) #+ ifelse(period==13 | period==2, 1, 0) # using year 1999 introduce a small error in winter season length because of bisestile years + + } else { + my.startdates.period <- months.period(forecast.year,mes,day,period) + + if(period == 13) my.startdates.period <- my.startdates.period[-2] # remove the second startdate because it is broken + + days.period <- c() + for(startdate in my.startdates.period){ + days.period <- c(days.period, (1+(startdate-1)*(year.end-year.start+1)):(startdate*(year.end-year.start+1))) + } + period.length <- length(my.startdates.period) # number of Thusdays in the period + } + + n.days.period <- length(days.period) + + varPeriod <- vareuFull[days.period,,] # select only var data during the chosen period + + varPeriodClim <- apply(varPeriod, c(2,3), mean, na.rm=T) + varPeriodClim2 <- InsertDim(varPeriodClim, 1, n.days.period) + + varPeriodAnom <- varPeriod - varPeriodClim2 + rm(varPeriod, varPeriodClim, varPeriodClim2) + gc() + + if(sequences){ + # for each of the 4 clusters, remove the days not belonging to sequences of at least 5 days: + # warning: first 4 days and last 4 days are not take into account y the algorithm and are treated as not belonging to any sequence (sequ=FALSE) + wrdiff <- diff(my.cluster[[period]]$cluster) + + sequ <- rep(FALSE, n.days.period) + for(i in 5:(n.days.period-5)){ + sequ[i] <- TRUE + if(wrdiff[i] != 0 & wrdiff[i+1] != 0) sequ[i] = FALSE + if(wrdiff[i] != 0 & wrdiff[i+2] != 0) sequ[i] = FALSE + if(wrdiff[i] != 0 & wrdiff[i+3] != 0) sequ[i] = FALSE + if(wrdiff[i] != 0 & wrdiff[i+4] != 0) sequ[i] = FALSE + if(wrdiff[i-1] != 0 & wrdiff[i] != 0) sequ[i] = FALSE + if(wrdiff[i-1] != 0 & wrdiff[i+1] != 0) sequ[i] = FALSE + if(wrdiff[i-1] != 0 & wrdiff[i+2] != 0) sequ[i] = FALSE + if(wrdiff[i-1] != 0 & wrdiff[i+3] != 0) sequ[i] = FALSE + if(wrdiff[i-2] != 0 & wrdiff[i] != 0) sequ[i] = FALSE + if(wrdiff[i-2] != 0 & wrdiff[i+1] != 0) sequ[i] = FALSE + if(wrdiff[i-2] != 0 & wrdiff[i+2] != 0) sequ[i] = FALSE + if(wrdiff[i-3] != 0 & wrdiff[i] != 0) sequ[i] = FALSE + if(wrdiff[i-3] != 0 & wrdiff[i+1] != 0) sequ[i] = FALSE + if(wrdiff[i-4] != 0 & wrdiff[i] != 0) sequ[i] = FALSE + } # close for loop on i + + # sequence of daily cluster numbers without the days that doesn't belong to sequences of 5 or more days: + cluster.sequence <- my.cluster[[period]]$cluster * sequ + rm(wrdiff, sequ) + } else{ + cluster.sequence <- my.cluster[[period]]$cluster + } + + gc() + + #wr1 <- which(my.cluster[[period]]$cluster==1) + #wr2 <- which(my.cluster[[period]]$cluster==2) + #wr3 <- which(my.cluster[[period]]$cluster==3) + #wr4 <- which(my.cluster[[period]]$cluster==4) + + # Replace the days belonging to a regime with the days belonging to a sequence of at least 5 days of that regime: + wr1 <- which(cluster.sequence == 1) + wr2 <- which(cluster.sequence == 2) + wr3 <- which(cluster.sequence == 3) + wr4 <- which(cluster.sequence == 4) + + # yearly frequency of each regime: + wr1y <- wr2y <- wr3y <-wr4y <- c() + for(y in year.start:year.end){ + + if(unlist(fields) == unlist(rean)){ + wr1y[y-year.start+1] <- length(which(cluster.sequence[(y-year.start)*period.length+(1:period.length)] == 1)) + wr2y[y-year.start+1] <- length(which(cluster.sequence[(y-year.start)*period.length+(1:period.length)] == 2)) + wr3y[y-year.start+1] <- length(which(cluster.sequence[(y-year.start)*period.length+(1:period.length)] == 3)) + wr4y[y-year.start+1] <- length(which(cluster.sequence[(y-year.start)*period.length+(1:period.length)] == 4)) + } else { + wr1y[y-year.start+1] <- length(which(cluster.sequence[seq((y-year.start+1),length(cluster.sequence), n.years)] == 1)) + wr2y[y-year.start+1] <- length(which(cluster.sequence[seq((y-year.start+1),length(cluster.sequence), n.years)] == 2)) + wr3y[y-year.start+1] <- length(which(cluster.sequence[seq((y-year.start+1),length(cluster.sequence), n.years)] == 3)) + wr4y[y-year.start+1] <- length(which(cluster.sequence[seq((y-year.start+1),length(cluster.sequence), n.years)] == 4)) + } + } + + rm(cluster.sequence) + gc() + + # convert to frequencies in %: + wr1y <- wr1y/period.length + wr2y <- wr2y/period.length + wr3y <- wr3y/period.length + wr4y <- wr4y/period.length + + varPeriodAnom1 <- varPeriodAnom[wr1,,] + varPeriodAnom2 <- varPeriodAnom[wr2,,] + varPeriodAnom3 <- varPeriodAnom[wr3,,] + varPeriodAnom4 <- varPeriodAnom[wr4,,] + + varPeriodAnom1mean <- apply(varPeriodAnom1,c(2,3),mean,na.rm=T) + varPeriodAnom2mean <- apply(varPeriodAnom2,c(2,3),mean,na.rm=T) + varPeriodAnom3mean <- apply(varPeriodAnom3,c(2,3),mean,na.rm=T) + varPeriodAnom4mean <- apply(varPeriodAnom4,c(2,3),mean,na.rm=T) + + varPeriodAnomBoth1 <- abind(varPeriodAnom, varPeriodAnom1, along = 1) + pvalue1 <- apply(varPeriodAnomBoth1, c(2,3), function(x) t.test(x[1:n.days.period],x[(n.days.period+1):length(x)])$p.value) + rm(varPeriodAnomBoth1, varPeriodAnom1) + gc() + + varPeriodAnomBoth2 <- abind(varPeriodAnom, varPeriodAnom2, along = 1) + pvalue2 <- apply(varPeriodAnomBoth2, c(2,3), function(x) t.test(x[1:n.days.period],x[(n.days.period+1):length(x)])$p.value) + rm(varPeriodAnomBoth2, varPeriodAnom2) + gc() + + varPeriodAnomBoth3 <- abind(varPeriodAnom, varPeriodAnom3, along = 1) + pvalue3 <- apply(varPeriodAnomBoth3, c(2,3), function(x) t.test(x[1:n.days.period],x[(n.days.period+1):length(x)])$p.value) + rm(varPeriodAnomBoth3, varPeriodAnom3) + gc() + + varPeriodAnomBoth4 <- abind(varPeriodAnom, varPeriodAnom4, along = 1) + pvalue4 <- apply(varPeriodAnomBoth4, c(2,3), function(x) t.test(x[1:n.days.period],x[(n.days.period+1):length(x)])$p.value) + rm(varPeriodAnomBoth4, varPeriodAnom4) + + rm(varPeriodAnom) + gc() + + pslPeriod <- psleuFull[days.period,,] + pslPeriodClim <- apply(pslPeriod, c(2,3), mean, na.rm=T) + pslPeriodClim2 <- InsertDim(pslPeriodClim, 1, n.days.period) + pslPeriodAnom <- pslPeriod - pslPeriodClim2 + + rm(pslPeriod, pslPeriodClim, pslPeriodClim2) + gc() + + pslwr1 <- pslPeriodAnom[wr1,,] + pslwr2 <- pslPeriodAnom[wr2,,] + pslwr3 <- pslPeriodAnom[wr3,,] + pslwr4 <- pslPeriodAnom[wr4,,] + rm(pslPeriodAnom) + gc() + + pslwr1mean <- apply(pslwr1,c(2,3),mean,na.rm=T) + pslwr2mean <- apply(pslwr2,c(2,3),mean,na.rm=T) + pslwr3mean <- apply(pslwr3,c(2,3),mean,na.rm=T) + pslwr4mean <- apply(pslwr4,c(2,3),mean,na.rm=T) + + rm(pslwr1,pslwr2,pslwr3,pslwr4) + + # save all the data necessary to redraw the graphs when we know the right regime: + save(workdir,rean.name,fields.name,var.num,var.name,var.name.full,var.unit,year.start,year.end,period,my.period,lon,lat,pslwr1mean,pslwr2mean,pslwr3mean,pslwr4mean, varPeriodAnom1mean, varPeriodAnom2mean, varPeriodAnom3mean,varPeriodAnom4mean,wr1y,wr2y,wr3y,wr4y,pvalue1,pvalue2,pvalue3,pvalue4,file=paste0(workdir,"/",fields.name,"_",var.name[var.num],"_",my.period[period],"_mapdata.RData")) + + rm(pvalue1,pvalue2,pvalue3,pvalue4) + rm(pslwr1mean,pslwr2mean,pslwr3mean,pslwr4mean) + rm(varPeriodAnom1mean,varPeriodAnom2mean,varPeriodAnom3mean,varPeriodAnom4mean) + gc() + +} # close the for loop on 'period' + + +# run it twice: once, with ordering <- FALSE to look only at the cartography and find visually the right regime names of the four clusters; +# and the second time, to save the ordered cartography: +ordering <- FALSE # If TRUE, order the clusters following the regimes listed in the 'orden' vector (after you manually insert the names). +save.names <- TRUE # if TRUE, also save the cluster names (i.e: the association between clusters and weather regimes); if FALSE, the cluster names are loaded from the file +as.pdf <- TRUE # FALSE: save results as one .png file for each season, TRUE: save only 1 .pdf file with all seasons + +# position of long values of Europe only (without the Atlantic Sea and America): +#if(fields.name == "ERA-Interim") EU <- c(1:which(lon >= lon.max)[1], (length(lon)-30):length(lon)) # restrict area to continental europe only +EU <- c(1:which(lon >= lon.max)[1], (which(lon >= 337)[1]:length(lon))) # restrict area to continental europe only + +# choose an order to give to the cartography, from top to bottom: +orden <- c("NAO+","NAO-","Blocking","Atl.Ridge") + +regime1.name <- orden[1] +regime2.name <- orden[2] +regime3.name <- orden[3] +regime4.name <- orden[4] + +if(save.names){cluster1.name.period <- cluster2.name.period <- cluster3.name.period <- cluster4.name.period <- c()} + +# breaks and colors of the geopotential fields: +if(psl == "g500"){ + #my.brks <- c(-300,-199:200,300) # % Mean anomaly of a WR + my.brks <- c(-300,seq(-200,200,10),300) # % Mean anomaly of a WR + my.brks2 <- c(-300,seq(-200,200,10),300) # % Mean anomaly of a WR for the contour lines + #my.cols <- colorRampPalette((brewer.pal(9,"PuBu")))(length(my.brks)-1) + values.to.plot <- c(-200, -100, 0, 100, 200) # values we want to show in the labels +} + +if(psl == "psl"){ + my.brks <- c(seq(-19,-1,2),0,seq(1,19,2)) # % Mean anomaly of a WR + my.brks2 <- my.brks #c(seq(-20,20,2)) # % Mean anomaly of a WR for the contour lines + values.to.plot <- my.brks #c(-17,-15,-13,-11,-9,-7,-5,-3,-1,0,1,3,5,7,9,11,13,15,17) +} + +my.cols <- colorRampPalette(rev(brewer.pal(11,"RdBu")))(length(my.brks)-1) # blue--white--red colors +my.cols[floor(length(my.cols)/2)] <- my.cols[floor(length(my.cols)/2)+1] <- "white" + +# breaks and colors of the impact maps (valid both for Wind speed anomalies and Temperature anomalies): +#my.brks.var <- c(-20,seq(-10,-3,1),seq(-2.9,2.9,0.1),seq(3,10,1),20) # % Mean anomaly of a WR +#my.brks.var <- c(-20,-2,-1.5,-1,-0.5,-0.2,0.2,0.5,1,1.5,2,20) # % Mean anomaly of a WR +#my.brks.var <- c(-20,seq(-3,3,0.5),20) # % Mean anomaly of a WR +if(var.name[var.num] == "sfcWind") { + my.brks.var <- seq(-3,3,0.5) + values.to.plot2 <- c(-3,-2,-1,0,1,2,3) +} +if(var.name[var.num] == "tas") { + my.brks.var <- seq(-5,5,1) + values.to.plot2 <- my.brks.var #c(-5,-3,-1,0,1,3,5) +} + +#my.cols <- colorRampPalette(as.character(read.csv("/home/Earth/vtorralb/rgbhex.csv",header=F)[,1]))(length(my.brks)-1) # blue--yellow-red colors +my.cols.var <- colorRampPalette(rev(brewer.pal(11,"RdBu")))(length(my.brks.var)-1) # blue--white--red colors +#my.cols.var[floor(length(my.cols.var)/2)] <- my.cols.var[floor(length(my.cols.var)/2)+1] <- "white" + +if(as.pdf)(pdf(file=paste0(workdir,"/",fields.name,"_",var.name[var.num],".pdf"),width=40, height=60)) + +for(period in 13:16){ + load(file=paste0(workdir,"/",fields.name,"_",var.name[var.num],"_",my.period[period],"_mapdata.RData")) + + if(save.names){ + if(period == 13){ # Winter + cluster3.name="NAO+" + cluster4.name="NAO-" + cluster1.name="Blocking" + cluster2.name="Atl.Ridge" + } + if(period == 14){ # Spring + cluster1.name="NAO+" + cluster3.name="NAO-" + cluster2.name="Blocking" + cluster4.name="Atl.Ridge" + } + if(period == 15){ # Summer + cluster2.name="NAO+" + cluster3.name="NAO-" + cluster4.name="Blocking" + cluster1.name="Atl.Ridge" + } + if(period == 16){ # Autumn + cluster4.name="NAO+" + cluster1.name="NAO-" + cluster2.name="Blocking" + cluster3.name="Atl.Ridge" + } + + cluster1.name.period[period] <- cluster1.name + cluster2.name.period[period] <- cluster2.name + cluster3.name.period[period] <- cluster3.name + cluster4.name.period[period] <- cluster4.name + + save(cluster1.name.period, cluster2.name.period, cluster3.name.period, cluster4.name.period, file=paste0(workdir,"/",fields.name,"_ClusterNames.RData")) + + } else { # in this case, we load the cluster names from the file already saved: + load(paste0(workdir,"/",fields.name,"_ClusterNames.RData")) + cluster1.name <- cluster1.name.period[period] + cluster2.name <- cluster2.name.period[period] + cluster3.name <- cluster3.name.period[period] + cluster4.name <- cluster4.name.period[period] + } + + # assign to each cluster its regime: + if(ordering == FALSE){ + cluster1 <- 1; cluster2 <- 2; cluster3 <- 3; cluster4 <- 4 + } else { + cluster1 <- which(orden == cluster1.name) + cluster2 <- which(orden == cluster2.name) + cluster3 <- which(orden == cluster3.name) + cluster4 <- which(orden == cluster4.name) + } + + assign(paste0("map",cluster1), pslwr1mean) + assign(paste0("map",cluster2), pslwr2mean) + assign(paste0("map",cluster3), pslwr3mean) + assign(paste0("map",cluster4), pslwr4mean) + + assign(paste0("imp",cluster1), varPeriodAnom1mean) + assign(paste0("imp",cluster2), varPeriodAnom2mean) + assign(paste0("imp",cluster3), varPeriodAnom3mean) + assign(paste0("imp",cluster4), varPeriodAnom4mean) + + assign(paste0("sig",cluster1), pvalue1) + assign(paste0("sig",cluster2), pvalue2) + assign(paste0("sig",cluster3), pvalue3) + assign(paste0("sig",cluster4), pvalue4) + + assign(paste0("fre",cluster1), wr1y) + assign(paste0("fre",cluster2), wr2y) + assign(paste0("fre",cluster3), wr3y) + assign(paste0("fre",cluster4), wr4y) + + # Visualize the composition with the 3 graphs for all the 4 regimes: its pressure fields, its impact on var and its frequency series: + if(!as.pdf) png(filename=paste0(workdir,"/",fields.name,"_",var.name[var.num],"_",my.period[period],".png"),width=3000,height=3700) + + plot.new() + + # Sheet title: + par(fig=c(0, 1, 0.94, 0.98), new=TRUE) + mtext(paste0("Weather Regimes for ",my.period[period]," season (",year.start,"-",year.end,"). Source: ",fields.name), cex=6, font=2) + + # Centroid maps: + map.xpos <- 0 + map.width <- 0.46 + par(fig=c(map.xpos + 0.003, map.xpos + map.width - 0.003, 0.745, 0.925), new=TRUE) + PlotEquiMap2(rescale(map1,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map1), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, xlabel.dist=1.5, contours.lty="F1FF1F") + par(fig=c(map.xpos, map.xpos + map.width, 0.51, 0.69), new=TRUE) + PlotEquiMap2(rescale(map2,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map2), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F") + par(fig=c(map.xpos, map.xpos + map.width, 0.275, 0.455), new=TRUE) + PlotEquiMap2(rescale(map3,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map3), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F") + par(fig=c(map.xpos, map.xpos + map.width, 0.04, 0.22), new=TRUE) + PlotEquiMap2(rescale(map4,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map4), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F") + + # Title Centroid Maps: + map.title.xpos <- 0.76 + map.title.width <- 0.2 + par(fig=c(map.title.xpos, map.title.xpos + map.title.width, 0.728, 0.729), new=TRUE) + mtext("year", cex=3) + par(fig=c(map.title.xpos, map.title.xpos + map.title.width, 0.494, 0.495), new=TRUE) + mtext("year", cex=3) + par(fig=c(map.title.xpos, map.title.xpos + map.title.width, 0.260, 0.261), new=TRUE) + mtext("year", cex=3) + par(fig=c(map.title.xpos, map.title.xpos + map.title.width, 0.026, 0.027), new=TRUE) + mtext("year", cex=3) + + # Impact maps: + impact.xpos <- 0.47 + impact.width <- 0.26 + par(fig=c(impact.xpos, impact.xpos + impact.width, 0.745, 0.925), new=TRUE) + PlotEquiMap2(rescale(imp1[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=t(sig1[,EU] < 0.05), cex.lab=1.5) + par(fig=c(impact.xpos, impact.xpos + impact.width, 0.51, 0.69), new=TRUE) + PlotEquiMap2(rescale(imp2[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=t(sig2[,EU] < 0.05), cex.lab=1.5) + par(fig=c(impact.xpos, impact.xpos + impact.width, 0.275, 0.455), new=TRUE) + PlotEquiMap2(rescale(imp3[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=t(sig3[,EU] < 0.05), cex.lab=1.5) + par(fig=c(impact.xpos, impact.xpos + impact.width, 0.04, 0.22), new=TRUE) + PlotEquiMap2(rescale(imp4[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=t(sig4[,EU] < 0.05), cex.lab=1.5) + + # Frequency plots: + barplot.xpos <- 0.75 + barplot.width <- 0.25 + par(fig=c(barplot.xpos, barplot.xpos + barplot.width, 0.745, 0.915), new=TRUE) + barplot.freq(100*fre1, year.start, year.end, cex.y=2, cex.x=2, freq.max=60, ylab="", mgp=c(3,1.5,0)) + par(fig=c(barplot.xpos, barplot.xpos + barplot.width,0.510,0.680), new=TRUE) + barplot.freq(100*fre2, year.start, year.end, cex.y=2, cex.x=2, freq.max=60, ylab="", mgp=c(3,1.5,0)) + par(fig=c(barplot.xpos, barplot.xpos + barplot.width,0.275,0.445), new=TRUE) + barplot.freq(100*fre3, year.start, year.end, cex.y=2, cex.x=2, freq.max=60, ylab="", mgp=c(3,1.5,0)) + par(fig=c(barplot.xpos, barplot.xpos + barplot.width,0.040,0.210), new=TRUE) + barplot.freq(100*fre4, year.start, year.end, cex.y=2, cex.x=2, freq.max=60, ylab="", mgp=c(3,1.5,0)) + + # % on Frequency plots: + symbol.xpos <- 0.735 + symbol.width <- 0.01 + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.83, 0.84), new=TRUE) + mtext("%", cex=3.3) + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.60, 0.61), new=TRUE) + mtext("%", cex=3.3) + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.36, 0.37), new=TRUE) + mtext("%", cex=3.3) + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.13, 0.14), new=TRUE) + mtext("%", cex=3.3) + + # Title 1: + title1.xpos <- 0.02 + title1.width <- 0.4 + par(fig=c(title1.xpos, title1.xpos + title1.width, 0.930, 0.935), new=TRUE) + mtext(paste0(regime1.name,": ", psl.name, " Anomaly"), font=2, cex=4) + par(fig=c(title1.xpos, title1.xpos + title1.width, 0.695, 0.700), new=TRUE) + mtext(paste0(regime2.name,": ", psl.name, " Anomaly"), font=2, cex=4) + par(fig=c(title1.xpos, title1.xpos + title1.width, 0.460, 0.465), new=TRUE) + mtext(paste0(regime3.name,": ", psl.name, " Anomaly"), font=2, cex=4) + par(fig=c(title1.xpos, title1.xpos + title1.width, 0.225, 0.230), new=TRUE) + mtext(paste0(regime4.name,": ", psl.name, " Anomaly"), font=2, cex=4) + + # Title 2: + title2.xpos <- 0.42 + title2.width <- 0.35 + par(fig=c(title2.xpos, title2.xpos + title2.width, 0.930, 0.935), new=TRUE) + mtext(paste0(regime1.name, ": Impact on ", var.name.full[var.num]), font=2, cex=4) + par(fig=c(title2.xpos, title2.xpos + title2.width, 0.695, 0.700), new=TRUE) + mtext(paste0(regime2.name, ": Impact on ", var.name.full[var.num]), font=2, cex=4) + par(fig=c(title2.xpos, title2.xpos + title2.width, 0.460, 0.465), new=TRUE) + mtext(paste0(regime3.name, ": Impact on ", var.name.full[var.num]), font=2, cex=4) + par(fig=c(title2.xpos, title2.xpos + title2.width, 0.225, 0.230), new=TRUE) + mtext(paste0(regime4.name, ": Impact on ", var.name.full[var.num]), font=2, cex=4) + + # Title 3: + title3.xpos <- 0.75 + title3.width <-0.25 + par(fig=c(title3.xpos, title3.xpos + title3.width, 0.930, 0.935), new=TRUE) + mtext(paste0(regime1.name, ": Frequency (", round(100*mean(fre1),1), "%)"), font=2, cex=4) + par(fig=c(title3.xpos, title3.xpos + title3.width, 0.695, 0.700), new=TRUE) + mtext(paste0(regime2.name, ": Frequency (", round(100*mean(fre2),1), "%)"), font=2, cex=4) + par(fig=c(title3.xpos, title3.xpos + title3.width, 0.460, 0.465), new=TRUE) + mtext(paste0(regime3.name, ": Frequency (", round(100*mean(fre3),1), "%)"), font=2, cex=4) + par(fig=c(title3.xpos, title3.xpos + title3.width, 0.225, 0.230), new=TRUE) + mtext(paste0(regime4.name, ": Frequency (", round(100*mean(fre4),1), "%)"), font=2, cex=4) + + # Legend 1: + legend1.xpos <- 0.01 + legend1.width <- 0.44 + legend1.cex <- 1.8 + my.subset <- match(values.to.plot, my.brks) + + par(fig=c(legend1.xpos, legend1.xpos + legend1.width, 0.719, 0.744), new=TRUE) # syntax fig=c(xmin, xmax, ymin, ymax) + ColorBar3(my.brks, cols=my.cols, vert=FALSE, cex=legend1.cex, subset=my.subset) + if(psl=="g500") {mtext(side=4," m", cex=2.5)} else {mtext(side=4," hPa", cex=legend1.cex)} + par(fig=c(legend1.xpos, legend1.xpos + legend1.width, 0.485, 0.508), new=TRUE) + ColorBar3(my.brks, cols=my.cols, vert=FALSE, cex=legend1.cex, subset=my.subset) + if(psl=="g500") {mtext(side=4," m", cex=2.5)} else {mtext(side=4," hPa", cex=legend1.cex)} + par(fig=c(legend1.xpos, legend1.xpos + legend1.width, 0.250, 0.273), new=TRUE) + ColorBar3(my.brks, cols=my.cols, vert=FALSE, cex=legend1.cex, subset=my.subset) + if(psl=="g500") {mtext(side=4," m", cex=2.5)} else {mtext(side=4," hPa", cex=legend1.cex) + par(fig=c(legend1.xpos, legend1.xpos + legend1.width, 0.015, 0.038), new=TRUE) + ColorBar3(my.brks, cols=my.cols, vert=FALSE, cex=legend1.cex, subset=my.subset) + if(psl=="g500") {mtext(side=4," m", cex=2.5)} else {mtext(side=4," hPa", cex=legend1.cex)} + + # Legend 2: + legend2.xpos <- 0.48 + legend2.width <- 0.25 + legend2.cex <- 1.8 + my.subset2 <- match(values.to.plot2, my.brks.var) + + par(fig=c(legend2.xpos, legend2.xpos + legend2.width, 0.719 + 0.001, 0.744), new=TRUE) + ColorBar3(my.brks.var, cols=my.cols.var, vert=FALSE, cex=legend2.cex, subset=my.subset2) + mtext(side=4,paste0(" ",var.unit[var.num]), cex=legend2.cex) + par(fig=c(legend2.xpos, legend2.xpos + legend2.width, 0.485, 0.508), new=TRUE) + ColorBar3(my.brks.var, cols=my.cols.var, vert=FALSE, cex=legend2.cex, subset=my.subset2) + mtext(side=4,paste0(" ",var.unit[var.num]), cex=legend2.cex) + par(fig=c(legend2.xpos, legend2.xpos + legend2.width, 0.250, 0.273), new=TRUE) + ColorBar3(my.brks.var, cols=my.cols.var, vert=FALSE, cex=legend2.cex, subset=my.subset2)} + mtext(side=4,paste0(" ",var.unit[var.num]), cex=legend2.cex) + par(fig=c(legend2.xpos, legend2.xpos + legend2.width, 0.015, 0.038), new=TRUE) + ColorBar3(my.brks.var, cols=my.cols.var, vert=FALSE, cex=legend2.cex, subset=my.subset2) + mtext(side=4,paste0(" ",var.unit[var.num]), cex=legend2.cex) + + if(!as.pdf) dev.off() # for saving 4 png + +} # close for loop on 'period' + +if(as.pdf) dev.off() # for saving a single pdf with all seasons + + + + + + + + + + + diff --git a/old/weather_regimes_v19.R b/old/weather_regimes_v19.R new file mode 100644 index 0000000000000000000000000000000000000000..81c0f2799631320729df9b8bd691042aa91a2ea9 --- /dev/null +++ b/old/weather_regimes_v19.R @@ -0,0 +1,774 @@ +library(s2dverification) # for the function Load() +library(abind) +library(Kendall) +source('/home/Earth/ncortesi/Downloads/scripts/Rfunctions.R') # for the calendar functions +workdir <- "/scratch/Earth/ncortesi/RESILIENCE/Regimes" # working dir with the input files with the WT classifications for each MSLP grid point + +# available reanalysis for the geopotential (g500) or the geopotential height (z500 = g500/9.81): +ERAint <- list(path = '/esnas/reconstructions/ecmwf/eraint/$STORE_FREQ$_mean/$VAR_NAME$_f6h/$VAR_NAME$_$YEAR$$MONTH$.nc') +ERAint.name <- "ERA-Interim" + +JRA55 <- list(path = '/esnas/reconstructions/jma/jra-55/$STORE_FREQ$_mean/$VAR_NAME$_f6h/$VAR_NAME$_$YEAR$$MONTH$.nc') +JRA55.name <- "JRA-55" + +rean <- ERAint #JRA55 #ERAint # choose one of the two above reanalysis from where to load the input psl data +rean.name <- ERAint.name #JRA55.name #ERAint.name + +# available forecast systems for the var data: +ECMWF_monthly <- list(path = paste0('/esnas/exp/ECMWF/monthly/ensforhc/6hourly/$VAR_NAME$/2014$MONTH$$DAY$00/$VAR_NAME$_$START_DATE$00.nc')) +ECMWF_monthly.name <- "ECMWF-Monthly" + +forecast <- ECMWF_monthly +forecast.name <- ECMWF_monthly.name + +fields <- rean #forecast # put 'rean' to load pressure fields and var from reanalisis, put 'forecast' to load them from forecast systems +fields.name <- rean.name #forecast.name + +year.start <- 1979 #1994 #1979 #1981 +year.end <- 2013 #2010 + +leadtime <- 1:7 #5:11 #1 # if fields=forecast, set the forecast time (in days) you want to compute the weather regimes. +startdate.shift <- 1 # if leadtime=5:11, it's better to shift all startdates of the season 1 week in the past, to fit the exact season of obs.data + # if leadtime=12:18, it's better to shift all startdates of the season 2 weeks in the past, and so on. +forecast.year <- year.end+1 # if fields=forecast, set the forecast year (it is usually the year after year.end) +mes=1 # if fields=forecast, set the month of the first startdate of the forecast year +day=2 # if fields=forecast, set the day of the first startdate of the forecast year + +psl <- "psl" #"g500" # pressure variable to use from the chosen reanalysis +psl.name <- "MSLP" # "Z500" # the name to show in the maps + +var.data <- rean #ECMWF_monthly # ERAint # chose a dataset from which to load the input var data (reanalysis or forecasts) +var.data.name <- rean.name #ECMWF_monthly.name # ERAint.name + +lat.weighting=FALSE # set it to true to weight psl data on latitude before applying the cluster analysis +sequences=FALSE # set it to TRUE if you want to select only days beloning to sequences of at least 5 consecutive days belonging to the same regime. + +PCA <- FALSE # se to TRUE if you want to apply the PCA before the k-means cluster analysis, FALSE otherwise +variance.explained <- 0.8 # the user can set here the minimum % of variance explained by the PCs (typically 0.8 which is equal to 80%) + +# Coordinates of the box enclosing the study region (the Northern Atlantic in this case): +lat.max <- 81 #81 #80 #70 +lat.min <- 27 #30 #20 #30 +lon.max <- 45 #36 #30 #40 +lon.min <- 274.5 #274.5 #270 #280 # put a positive number here because the geopotential has only positive values of longitud! + +############################################################################################################################# +#ECMWF_monthly <- list(path = paste0('/esnas/exp/ECMWF/monthly/ensforhc/6hourly/psl/2014010200/1994_010200.nc')) +#ECMWF_monthly <- list(path = paste0('/esnas/exp/ECMWF/monthly/ensforhc/6hourly/$VAR_NAME$/2014$MONTH$$DAY$00/$VAR_NAME$_$START_DATE$00.nc')) +if(lat.min >= lat.max) stop("lat.min cannot be greater than lat.max") +n.years <- year.end - year.start + 1 + +days.period <- n.days.period <- period.length <- list() +for (p in 1:17){ + days.period[[p]] <- NA + for(y in year.start:year.end) days.period[[p]] <- c(days.period[[p]], n.days.in.a.future.year(year.start, y) + pos.period(y,p)) + days.period[[p]] <- days.period[[p]][-1] # remove the NA at the begin that was introduced only to be able to execute the above command + # number of days belonging to that period from year.start to year.end: + n.days.period[[p]] <- length(days.period[[p]]) + # Number of days belonging to that period in a single year: + period.length[[p]] <- n.days.in.a.period(p,1999) #+ ifelse(period==13 | period==2, 1, 0) # using year 1999 introduce a small error in winter season length because of bisestile years +} + + +# load only 1 day of pressure data to detect the minimum and maximum lat and lon values which identify only the North Atlantic area: +domain <- Load(var = psl, exp = NULL, obs = list(fields), paste0(year.start,'0102'), storefreq = 'daily', leadtimemax = 1, output = 'lonlat', nprocs=1) +n.lat <- length(domain$lat) +n.lon <- length(domain$lon) + +pos.lat.max <- which(lat.max - round(domain$lat,4) >= 0)[1] # select the first latitude of the domain below the maximum latitude +pos.lat.min <- tail(which(round(domain$lat,4) - lat.min >= 0),1) # select the last latitude of the domain over the minumum latitude +pos.lon.max <- tail(which(lon.max - round(domain$lon,4) >= 0),1) +pos.lon.min <- which(round(domain$lon,4) - lon.min >= 0)[1] + +pos.lat <- if(pos.lat.min < pos.lat.max) {pos.lat.min:pos.lat.max} else {pos.lat.max:pos.lat.min} +pos.lon <- c(1:pos.lon.max,pos.lon.min:length(domain$lon)) # beware that it only consider the case where the study area cross the meridian of Greenwhich! +n.pos.lat <- length(pos.lat) +n.pos.lon <- length(pos.lon) + +lat <- domain$lat[pos.lat] # lat values of chosen area only (it is smaller than the whole spatial domain loaded by the data) +lon <- domain$lon[pos.lon] # lon values of chosen area only + +lat.min.area <- domain$lat[pos.lat.min] # min and max lat/lon of the chosen area only (same as lat.max, but its values correspond to actual values in the lat vector) +lat.max.area <- domain$lat[pos.lat.max] +lon.min.area <- domain$lon[pos.lon.min] +lon.max.area <- domain$lon[pos.lon.max] + +#rm(domain) + +# Load psl data: +if(unlist(fields) == unlist(rean)){ # Load daily psl data in the reanalysis case: + psleuFull <-array(NA,c(n.days.in.a.yearly.period(year.start,year.end), n.pos.lat, n.pos.lon)) + + for (y in year.start:year.end){ + var <- Load(var = psl, exp = NULL, obs = list(fields), paste0(y,'0101'), storefreq = 'daily', leadtimemax = n.days.in.a.year(y), output = 'lonlat', + latmin = lat.min, latmax = lat.max, lonmin = lon.min, lonmax = lon.max) + psleuFull[seq.days.in.a.future.year(year.start, y),,] <- var$obs + rm(var) + gc() + } + +} else { # Load 6-hourly psl data in the forecast case: + psleuFull <-array(NA, c(n.sdates*n.years, n.leadtimes, n.pos.lat, n.pos.lon)) # daily order: 19940102 19950102 ... 20130102 19940109 19950109 ... 20130109 ... + n.leadtimes <- length(leadtime) + sdates <- weekly.seq(forecast.year,mes,day) + n.sdates <- length(sdates) + + for (startdate in 1:n.sdates){ + for(lday in leadtime){ + var <- Load(var = psl, exp = NULL, obs = list(fields), paste0(year.start:year.end, substr(sdates[startdate],5,6), substr(sdates[startdate],7,8) ), storefreq = 'daily', leadtimemin = lday*4-3, leadtimemax = lday*4, output = 'lonlat', latmin = lat.min, latmax = lat.max, lonmin = lon.min, lonmax = lon.max) + + mean.lday <- apply(var$obs, c(3,5,6), mean, na.rm=T) + rm(var) + gc() + + psleuFull[(1+(startdate-1)*n.years):(startdate*n.years), lday-leadtime[1]+1,,] <- mean.lday + rm(mean.lday) + gc() + } + } + +} + +if(psl=="g500") psleuFull <- psleuFull/9.81 # convert geopotential to geopotential height (in m) +if(psl=="psl") psleuFull <- psleuFull/100 # convert MSLP in Pascal to MSLP in hPa +gc() + +save.image(file=paste0(workdir,"/weather_regimes_",fields.name,"_",year.start,"-",year.end,".RData")) +load(file=paste0(workdir,"/weather_regimes_",fields.name,"_",year.start,"-",year.end,".RData")) + +# compute the cluster analysis: +my.PCA <- list() +my.cluster <- list() +tot.variance <- list() +n.pcs <- my.cluster <- c() + +for(period in 13:16){ + if(unlist(fields) == unlist(rean)){ + + } else { + my.startdates.period <- months.period(forecast.year,mes,day,period) # get which startdates belong to the chosen period + + days.period <- c() # get which days inside psleuFull belong to the chosen period + for(startdate in my.startdates.period){ + days.period <- c(days.period, (1+(startdate-1)*n.years):(startdate*n.years)) + } + } + + + pslPeriod <- psleuFull[days.period[[p]],,] # select only days in the chosen period (i.e: winter) + + # weight the pressure fields based on latitude: + if(lat.weighting){ + lat.weighted <- cos(lat*pi/180)^0.5 + lat.weighted.array <- InsertDim(InsertDim(lat.weighted,2,n.pos.lon), 1, n.days.period[[p]]) + pslPeriod.weighted <- pslPeriod * lat.weighted.array + rm(lat.weighted.array) + gc() + } else { + pslPeriod.weighted <- pslPeriod + } + + pslmat <- pslPeriod.weighted + dim(pslmat) <- c(n.days.period[[p]], n.pos.lat*n.pos.lon) # convert array in a matrix! + rm(pslPeriod, pslPeriod.weighted) + gc() + + # # if you are using the monthly forecast system, in winter you must remove the 2nd startdate (20140109) because its data is bad, as you can see with: plot(pslmat[,1:2]) + #if(unlist(fields) == unlist(forecast) && period == 13) pslmat[21:40,] <- NA + + if(PCA){ # compute the PCs: + my.seq <- seq(1, n.pos.lat*n.pos.lon, 9) # select only 1 point of 9 + pslcut <- pslmat[,my.seq] + + my.PCA[[period]] <- princomp(pslcut,cor=FALSE) + tot.variance[[period]] <- head(cumsum(my.PCA[[period]]$sdev^2/sum(my.PCA[[period]]$sdev^2)),50) # check how many PCAs to keep basing on the sum of their expl. variance + n.pcs[period] <- head(as.numeric(which(tot.variance[[period]] > variance.explained)),1) # select only the pcs that explains at least 80% of variance + my.cluster[[period]] <- kmeans(my.PCA[[period]]$scores[,1:n.pcs[period]], centers=4, iter.max=100, nstart=30) # 4 is the number of clusters + rm(pslcut) + } else { # remove from matpsl the years with NA: + my.cluster[[period]] <- kmeans(pslmat[which(!is.na(pslmat[,1])),], centers=4, iter.max=100, nstart=30) # 4 is the number of clusters + } + + rm(pslmat) + gc() +} + +var.num <- 2 # Choose a variable. 1: sfcWind 2: tas + +var.name <- c("sfcWind","tas") # name of the 'predictand' variable of the chosen reanalysis +var.name.full <- c("Wind Speed","Temperature") # full name of the 'predictand' variable to put in the title of the graphs +var.unit <- c("m/s", "ºC") # unit of measure of var (for drawing color scales) + +# Load var data: +if(unlist(fields) == unlist(rean)){ # Load daily var data in the reanalysis case: + vareuFull <-array(NA,c(n.days.in.a.yearly.period(year.start,year.end), n.pos.lat, n.pos.lon)) + + for (y in year.start:year.end){ + var <- Load(var = var.name[var.num], exp = NULL, obs = list(var.data), paste0(y,'0101'), storefreq = 'daily', leadtimemax = n.days.in.a.year(y), output = 'lonlat', + latmin = lat.min, latmax = lat.max, lonmin = lon.min, lonmax = lon.max) + vareuFull[seq.days.in.a.future.year(year.start, y),,] <- var$obs + rm(var) + gc() + } + +} else { # Load 6-hourly var data in the forecast case: + sdates <- weekly.seq(forecast.year,mes,day) + vareuFull <-array(NA,c(length(sdates)*(year.end-year.start+1), n.pos.lat, n.pos.lon)) + + for (startdate in 1:length(sdates)){ + var <- Load(var = var.name[var.num], exp = NULL, obs = list(var.data), paste0(year.start:year.end, substr(sdates[startdate],5,6), substr(sdates[startdate],7,8) ), storefreq = 'daily', leadtimemin=leadtime*4-3, leadtimemax=leadtime*4, output = 'lonlat', latmin = lat.min, latmax = lat.max, lonmin = lon.min, lonmax = lon.max) + temp <- apply(var$obs, c(1,3,5,6), mean, na.rm=T) + vareuFull[(1+(startdate-1)*(year.end-year.start+1)):(startdate*(year.end-year.start+1)),,] <- temp + rm(temp,var) + gc() + } + +} + +#my.grid<-paste0('r',n.lon,'x',n.lat) +#coord <- Load(var = 'sfcWind', exp = list(ECMWF_monthly), obs = NULL, sdates=paste0(2013,'0102'), leadtimemin = 1, leadtimemax=1, output = 'lonlat', nprocs=1, grid=my.grid, method='bilinear') + +save.image(file=paste0(workdir,"/weather_regimes_",fields.name,"_",var.name[var.num],"_",year.start,"-",year.end,".RData")) +load(file=paste0(workdir,"/weather_regimes_",fields.name,"_",var.name[var.num],"_",year.start,"-",year.end,".RData")) + +WR.period <- 1:12 #13:16 # 1-12: Jan-Dec; 13-16: Winter-Spring-Summer-Autumn + +# if you want to study WRs at monhly level but using their seasonal time series, you have to copy the WRs time series to the months from 1 to 12: +if(WR.period <- 1:12){ + my.cluster[[1]] <- my.cluster[[2]] <- my.cluster[[3]] <- my.cluster[[4]] <- my.cluster[[5]] <- my.cluster[[6]] <- list() + my.cluster[[7]] <- my.cluster[[8]] <- my.cluster[[9]] <- my.cluster[[10]] <- my.cluster[[11]] <- my.cluster[[12]] <- list() + + ss <- match(days.period[[13]],days.period[[1]]); ss[which(!is.na(ss))] <- 1; qq <- ss*my.cluster[[13]]$cluster; my.cluster[[1]]$cluster <- qq[!is.na(qq)] + ss <- match(days.period[[13]],days.period[[2]]); ss[which(!is.na(ss))] <- 1; qq <- ss*my.cluster[[13]]$cluster; my.cluster[[2]]$cluster <- qq[!is.na(qq)] + ss <- match(days.period[[13]],days.period[[12]]); ss[which(!is.na(ss))] <- 1; qq <- ss*my.cluster[[13]]$cluster; my.cluster[[12]]$cluster <- qq[!is.na(qq)] + ss <- match(days.period[[14]],days.period[[3]]); ss[which(!is.na(ss))] <- 1; qq <- ss*my.cluster[[14]]$cluster; my.cluster[[3]]$cluster <- qq[!is.na(qq)] + ss <- match(days.period[[14]],days.period[[4]]); ss[which(!is.na(ss))] <- 1; qq <- ss*my.cluster[[14]]$cluster; my.cluster[[4]]$cluster <- qq[!is.na(qq)] + ss <- match(days.period[[14]],days.period[[5]]); ss[which(!is.na(ss))] <- 1; qq <- ss*my.cluster[[14]]$cluster; my.cluster[[5]]$cluster <- qq[!is.na(qq)] + ss <- match(days.period[[15]],days.period[[6]]); ss[which(!is.na(ss))] <- 1; qq <- ss*my.cluster[[15]]$cluster; my.cluster[[6]]$cluster <- qq[!is.na(qq)] + ss <- match(days.period[[15]],days.period[[7]]); ss[which(!is.na(ss))] <- 1; qq <- ss*my.cluster[[15]]$cluster; my.cluster[[7]]$cluster <- qq[!is.na(qq)] + ss <- match(days.period[[15]],days.period[[8]]); ss[which(!is.na(ss))] <- 1; qq <- ss*my.cluster[[15]]$cluster; my.cluster[[8]]$cluster <- qq[!is.na(qq)] + ss <- match(days.period[[16]],days.period[[9]]); ss[which(!is.na(ss))] <- 1; qq <- ss*my.cluster[[16]]$cluster; my.cluster[[9]]$cluster <- qq[!is.na(qq)] + ss <- match(days.period[[16]],days.period[[10]]); ss[which(!is.na(ss))] <- 1; qq <- ss*my.cluster[[16]]$cluster; my.cluster[[10]]$cluster <- qq[!is.na(qq)] + ss <- match(days.period[[16]],days.period[[11]]); ss[which(!is.na(ss))] <- 1; qq <- ss*my.cluster[[16]]$cluster; my.cluster[[11]]$cluster <- qq[!is.na(qq)] +} + +for(period in WR.period){ # 1-12: Jan-Dec; 13-16: Winter-Spring-Summer-Autumn; 17: Year + if(unlist(fields) == unlist(rean)){ + + + } else { + my.startdates.period <- months.period(forecast.year,mes,day,period) + + #if(period == 13) my.startdates.period <- my.startdates.period[-2] # remove the second startdate because it is broken + + days.period <- c() + for(startdate in my.startdates.period){ + days.period <- c(days.period, (1+(startdate-1)*(year.end-year.start+1)):(startdate*(year.end-year.start+1))) + } + period.length <- length(my.startdates.period) # number of Thusdays in the period + } + + n.days.period <- length(days.period) + + varPeriod <- vareuFull[days.period,,] # select only var data during the chosen period + + varPeriodClim <- apply(varPeriod, c(2,3), mean, na.rm=T) + varPeriodClim2 <- InsertDim(varPeriodClim, 1, n.days.period) + varPeriodAnom <- varPeriod - varPeriodClim2 + rm(varPeriod, varPeriodClim, varPeriodClim2) + gc() + + #PlotEquiMap2(varPeriodClim[,EU], lon[EU], lat, filled.continents = FALSE, cols=my.cols.var, intxlon=10, intylat=10, cex.lab=1.5) # plot the climatology of the period + + if(sequences){ + # for each of the 4 clusters, remove the days not belonging to sequences of at least 5 days: + # warning: first 4 days and last 4 days are not take into account y the algorithm and are treated as not belonging to any sequence (sequ=FALSE) + wrdiff <- diff(my.cluster[[period]]$cluster) + + sequ <- rep(FALSE, n.days.period) + for(i in 5:(n.days.period-5)){ + sequ[i] <- TRUE + if(wrdiff[i] != 0 & wrdiff[i+1] != 0) sequ[i] = FALSE + if(wrdiff[i] != 0 & wrdiff[i+2] != 0) sequ[i] = FALSE + if(wrdiff[i] != 0 & wrdiff[i+3] != 0) sequ[i] = FALSE + if(wrdiff[i] != 0 & wrdiff[i+4] != 0) sequ[i] = FALSE + if(wrdiff[i-1] != 0 & wrdiff[i] != 0) sequ[i] = FALSE + if(wrdiff[i-1] != 0 & wrdiff[i+1] != 0) sequ[i] = FALSE + if(wrdiff[i-1] != 0 & wrdiff[i+2] != 0) sequ[i] = FALSE + if(wrdiff[i-1] != 0 & wrdiff[i+3] != 0) sequ[i] = FALSE + if(wrdiff[i-2] != 0 & wrdiff[i] != 0) sequ[i] = FALSE + if(wrdiff[i-2] != 0 & wrdiff[i+1] != 0) sequ[i] = FALSE + if(wrdiff[i-2] != 0 & wrdiff[i+2] != 0) sequ[i] = FALSE + if(wrdiff[i-3] != 0 & wrdiff[i] != 0) sequ[i] = FALSE + if(wrdiff[i-3] != 0 & wrdiff[i+1] != 0) sequ[i] = FALSE + if(wrdiff[i-4] != 0 & wrdiff[i] != 0) sequ[i] = FALSE + } # close for loop on i + + # sequence of daily cluster numbers without the days that doesn't belong to sequences of 5 or more days: + cluster.sequence <- my.cluster[[period]]$cluster * sequ + rm(wrdiff, sequ) + } else{ + cluster.sequence <- my.cluster[[period]]$cluster + } + + gc() + + # Replace the days belonging to a regime with the days belonging to a sequence of at least 5 days of that regime: + wr1 <- which(cluster.sequence == 1) + wr2 <- which(cluster.sequence == 2) + wr3 <- which(cluster.sequence == 3) + wr4 <- which(cluster.sequence == 4) + + # yearly frequency of each regime: + wr1y <- wr2y <- wr3y <-wr4y <- c() + for(y in year.start:year.end){ + + if(unlist(fields) == unlist(rean)){ + wr1y[y-year.start+1] <- length(which(cluster.sequence[(y-year.start)*period.length+(1:period.length)] == 1)) + wr2y[y-year.start+1] <- length(which(cluster.sequence[(y-year.start)*period.length+(1:period.length)] == 2)) + wr3y[y-year.start+1] <- length(which(cluster.sequence[(y-year.start)*period.length+(1:period.length)] == 3)) + wr4y[y-year.start+1] <- length(which(cluster.sequence[(y-year.start)*period.length+(1:period.length)] == 4)) + } else { + wr1y[y-year.start+1] <- length(which(cluster.sequence[seq((y-year.start+1),length(cluster.sequence), n.years)] == 1)) + wr2y[y-year.start+1] <- length(which(cluster.sequence[seq((y-year.start+1),length(cluster.sequence), n.years)] == 2)) + wr3y[y-year.start+1] <- length(which(cluster.sequence[seq((y-year.start+1),length(cluster.sequence), n.years)] == 3)) + wr4y[y-year.start+1] <- length(which(cluster.sequence[seq((y-year.start+1),length(cluster.sequence), n.years)] == 4)) + } + } + + rm(cluster.sequence) + gc() + + # convert to frequencies in %: + wr1y <- wr1y/period.length + wr2y <- wr2y/period.length + wr3y <- wr3y/period.length + wr4y <- wr4y/period.length + + varPeriodAnom1 <- varPeriodAnom[wr1,,] + varPeriodAnom2 <- varPeriodAnom[wr2,,] + varPeriodAnom3 <- varPeriodAnom[wr3,,] + varPeriodAnom4 <- varPeriodAnom[wr4,,] + + varPeriodAnom1mean <- apply(varPeriodAnom1,c(2,3),mean,na.rm=T) + varPeriodAnom2mean <- apply(varPeriodAnom2,c(2,3),mean,na.rm=T) + varPeriodAnom3mean <- apply(varPeriodAnom3,c(2,3),mean,na.rm=T) + varPeriodAnom4mean <- apply(varPeriodAnom4,c(2,3),mean,na.rm=T) + + varPeriodAnomBoth1 <- abind(varPeriodAnom, varPeriodAnom1, along = 1) + pvalue1 <- apply(varPeriodAnomBoth1, c(2,3), function(x) t.test(x[1:n.days.period],x[(n.days.period+1):length(x)])$p.value) + rm(varPeriodAnomBoth1, varPeriodAnom1) + gc() + + varPeriodAnomBoth2 <- abind(varPeriodAnom, varPeriodAnom2, along = 1) + pvalue2 <- apply(varPeriodAnomBoth2, c(2,3), function(x) t.test(x[1:n.days.period],x[(n.days.period+1):length(x)])$p.value) + rm(varPeriodAnomBoth2, varPeriodAnom2) + gc() + + varPeriodAnomBoth3 <- abind(varPeriodAnom, varPeriodAnom3, along = 1) + pvalue3 <- apply(varPeriodAnomBoth3, c(2,3), function(x) t.test(x[1:n.days.period],x[(n.days.period+1):length(x)])$p.value) + rm(varPeriodAnomBoth3, varPeriodAnom3) + gc() + + varPeriodAnomBoth4 <- abind(varPeriodAnom, varPeriodAnom4, along = 1) + pvalue4 <- apply(varPeriodAnomBoth4, c(2,3), function(x) t.test(x[1:n.days.period],x[(n.days.period+1):length(x)])$p.value) + rm(varPeriodAnomBoth4, varPeriodAnom4) + + rm(varPeriodAnom) + gc() + + pslPeriod <- psleuFull[days.period,,] + pslPeriodClim <- apply(pslPeriod, c(2,3), mean, na.rm=T) + pslPeriodClim2 <- InsertDim(pslPeriodClim, 1, n.days.period) + pslPeriodAnom <- pslPeriod - pslPeriodClim2 + + rm(pslPeriod, pslPeriodClim, pslPeriodClim2) + gc() + + pslwr1 <- pslPeriodAnom[wr1,,] + pslwr2 <- pslPeriodAnom[wr2,,] + pslwr3 <- pslPeriodAnom[wr3,,] + pslwr4 <- pslPeriodAnom[wr4,,] + rm(pslPeriodAnom) + gc() + + pslwr1mean <- apply(pslwr1,c(2,3),mean,na.rm=T) + pslwr2mean <- apply(pslwr2,c(2,3),mean,na.rm=T) + pslwr3mean <- apply(pslwr3,c(2,3),mean,na.rm=T) + pslwr4mean <- apply(pslwr4,c(2,3),mean,na.rm=T) + + rm(pslwr1,pslwr2,pslwr3,pslwr4) + + # save all the data necessary to redraw the graphs when we know the right regime: + save(workdir,rean.name,fields.name,var.num,var.name,var.name.full,var.unit,year.start,year.end,period,WR.period,lon,lat,pslwr1mean,pslwr2mean,pslwr3mean,pslwr4mean, varPeriodAnom1mean, varPeriodAnom2mean, varPeriodAnom3mean,varPeriodAnom4mean,wr1y,wr2y,wr3y,wr4y,pvalue1,pvalue2,pvalue3,pvalue4,file=paste0(workdir,"/",fields.name,"_",var.name[var.num],"_",my.period[period],"_mapdata.RData")) + + rm(pvalue1,pvalue2,pvalue3,pvalue4) + rm(pslwr1mean,pslwr2mean,pslwr3mean,pslwr4mean) + rm(varPeriodAnom1mean,varPeriodAnom2mean,varPeriodAnom3mean,varPeriodAnom4mean) + gc() + +} # close the for loop on 'period' + + +# run it twice: once, with ordering <- FALSE to look only at the cartography and find visually the right regime names of the four clusters; +# and the second time, to save the ordered cartography: +ordering <- TRUE # If TRUE, order the clusters following the regimes listed in the 'orden' vector (after you manually insert the names). +save.names <- FALSE # if TRUE, also save the cluster names (i.e: the association between clusters and weather regimes); if FALSE, the cluster names are loaded from the file +as.pdf <- FALSE # FALSE: save results as one .png file for each season, TRUE: save only 1 .pdf file with all seasons + +# position of long values of Europe only (without the Atlantic Sea and America): +#if(fields.name == "ERA-Interim") EU <- c(1:which(lon >= lon.max)[1], (length(lon)-30):length(lon)) # restrict area to continental europe only +EU <- c(1:which(lon >= lon.max)[1], (which(lon >= 337)[1]:length(lon))) # restrict area to continental europe only + +# choose an order to give to the cartography, from top to bottom: +orden <- c("NAO+","NAO-","Blocking","Atl.Ridge") + +regime1.name <- orden[1] +regime2.name <- orden[2] +regime3.name <- orden[3] +regime4.name <- orden[4] + +if(save.names){cluster1.name.period <- cluster2.name.period <- cluster3.name.period <- cluster4.name.period <- c()} + +# breaks and colors of the geopotential fields: +if(psl == "g500"){ + #my.brks <- c(-300,-199:200,300) # % Mean anomaly of a WR + my.brks <- c(-300,seq(-200,200,10),300) # % Mean anomaly of a WR + my.brks2 <- c(-300,seq(-200,200,10),300) # % Mean anomaly of a WR for the contour lines + #my.cols <- colorRampPalette((brewer.pal(9,"PuBu")))(length(my.brks)-1) + values.to.plot <- c(-200, -100, 0, 100, 200) # values we want to show in the labels +} + +if(psl == "psl"){ + my.brks <- c(seq(-19,-1,2),0,seq(1,19,2)) # % Mean anomaly of a WR + my.brks2 <- my.brks #c(seq(-20,20,2)) # % Mean anomaly of a WR for the contour lines + values.to.plot <- my.brks #c(-17,-15,-13,-11,-9,-7,-5,-3,-1,0,1,3,5,7,9,11,13,15,17) +} + +my.cols <- colorRampPalette(rev(brewer.pal(11,"RdBu")))(length(my.brks)-1) # blue--white--red colors +my.cols[floor(length(my.cols)/2)] <- my.cols[floor(length(my.cols)/2)+1] <- "white" + +# breaks and colors of the impact maps (valid both for Wind speed anomalies and Temperature anomalies): +#my.brks.var <- c(-20,seq(-10,-3,1),seq(-2.9,2.9,0.1),seq(3,10,1),20) # % Mean anomaly of a WR +#my.brks.var <- c(-20,-2,-1.5,-1,-0.5,-0.2,0.2,0.5,1,1.5,2,20) # % Mean anomaly of a WR +#my.brks.var <- c(-20,seq(-3,3,0.5),20) # % Mean anomaly of a WR +if(var.name[var.num] == "sfcWind") { + my.brks.var <- seq(-3,3,0.5) + values.to.plot2 <- c(-3,-2,-1,0,1,2,3) +} +if(var.name[var.num] == "tas") { + my.brks.var <- seq(-5,5,1) + values.to.plot2 <- my.brks.var #c(-5,-3,-1,0,1,3,5) +} + +#my.cols <- colorRampPalette(as.character(read.csv("/home/Earth/vtorralb/rgbhex.csv",header=F)[,1]))(length(my.brks)-1) # blue--yellow-red colors +my.cols.var <- colorRampPalette(rev(brewer.pal(11,"RdBu")))(length(my.brks.var)-1) # blue--white--red colors +#my.cols.var[floor(length(my.cols.var)/2)] <- my.cols.var[floor(length(my.cols.var)/2)+1] <- "white" + +if(as.pdf)(pdf(file=paste0(workdir,"/",fields.name,"_",var.name[var.num],".pdf"),width=40, height=60)) + +for(period in WR.period){ + load(file=paste0(workdir,"/",fields.name,"_",var.name[var.num],"_",my.period[period],"_mapdata.RData")) + + if(save.names){ + if(period == 1){ # January + cluster2.name="NAO+" + cluster1.name="NAO-" + cluster4.name="Blocking" + cluster3.name="Atl.Ridge" + } + if(period == 2){ # February + cluster3.name="NAO+" + cluster2.name="NAO-" + cluster4.name="Blocking" + cluster1.name="Atl.Ridge" + } + if(period == 3){ # March + cluster3.name="NAO+" + cluster2.name="NAO-" + cluster4.name="Blocking" + cluster1.name="Atl.Ridge" + } + if(period == 4){ # April + cluster2.name="NAO+" + cluster1.name="NAO-" + cluster3.name="Blocking" + cluster4.name="Atl.Ridge" + } + if(period == 5){ # May + cluster4.name="NAO+" + cluster2.name="NAO-" + cluster3.name="Blocking" + cluster1.name="Atl.Ridge" + } + if(period == 6){ # June + cluster4.name="NAO+" + cluster1.name="NAO-" + cluster2.name="Blocking" + cluster3.name="Atl.Ridge" + } + if(period == 7){ # July + cluster4.name="NAO+" + cluster2.name="NAO-" + cluster1.name="Blocking" + cluster3.name="Atl.Ridge" + } + if(period == 8){ # August + cluster2.name="NAO+" + cluster4.name="NAO-" + cluster3.name="Blocking" + cluster1.name="Atl.Ridge" + } + if(period == 9){ # September + cluster4.name="NAO+" + cluster3.name="NAO-" + cluster1.name="Blocking" + cluster2.name="Atl.Ridge" + } + if(period == 10){ # October + cluster1.name="NAO+" + cluster4.name="NAO-" + cluster2.name="Blocking" + cluster3.name="Atl.Ridge" + } + if(period == 11){ # November + cluster2.name="NAO+" + cluster3.name="NAO-" + cluster1.name="Blocking" + cluster4.name="Atl.Ridge" + } + if(period == 12){ # December + cluster2.name="NAO+" + cluster4.name="NAO-" + cluster1.name="Blocking" + cluster3.name="Atl.Ridge" + } + if(period == 13){ # Winter + cluster3.name="NAO+" + cluster4.name="NAO-" + cluster1.name="Blocking" + cluster2.name="Atl.Ridge" + } + if(period == 14){ # Spring + cluster1.name="NAO+" + cluster3.name="NAO-" + cluster2.name="Blocking" + cluster4.name="Atl.Ridge" + } + if(period == 15){ # Summer + cluster2.name="NAO+" + cluster3.name="NAO-" + cluster4.name="Blocking" + cluster1.name="Atl.Ridge" + } + if(period == 16){ # Autumn + cluster4.name="NAO+" + cluster1.name="NAO-" + cluster2.name="Blocking" + cluster3.name="Atl.Ridge" + } + + cluster1.name.period[period] <- cluster1.name + cluster2.name.period[period] <- cluster2.name + cluster3.name.period[period] <- cluster3.name + cluster4.name.period[period] <- cluster4.name + + save(cluster1.name.period, cluster2.name.period, cluster3.name.period, cluster4.name.period, file=paste0(workdir,"/",fields.name,"_ClusterNames.RData")) + + } else { # in this case, we load the cluster names from the file already saved: + load(paste0(workdir,"/",fields.name,"_ClusterNames.RData")) + cluster1.name <- cluster1.name.period[period] + cluster2.name <- cluster2.name.period[period] + cluster3.name <- cluster3.name.period[period] + cluster4.name <- cluster4.name.period[period] + } + + # assign to each cluster its regime: + if(ordering == FALSE){ + cluster1 <- 1; cluster2 <- 2; cluster3 <- 3; cluster4 <- 4 + } else { + cluster1 <- which(orden == cluster1.name) + cluster2 <- which(orden == cluster2.name) + cluster3 <- which(orden == cluster3.name) + cluster4 <- which(orden == cluster4.name) + } + + assign(paste0("map",cluster1), pslwr1mean) + assign(paste0("map",cluster2), pslwr2mean) + assign(paste0("map",cluster3), pslwr3mean) + assign(paste0("map",cluster4), pslwr4mean) + + assign(paste0("imp",cluster1), varPeriodAnom1mean) + assign(paste0("imp",cluster2), varPeriodAnom2mean) + assign(paste0("imp",cluster3), varPeriodAnom3mean) + assign(paste0("imp",cluster4), varPeriodAnom4mean) + + assign(paste0("sig",cluster1), pvalue1) + assign(paste0("sig",cluster2), pvalue2) + assign(paste0("sig",cluster3), pvalue3) + assign(paste0("sig",cluster4), pvalue4) + + assign(paste0("fre",cluster1), wr1y) + assign(paste0("fre",cluster2), wr2y) + assign(paste0("fre",cluster3), wr3y) + assign(paste0("fre",cluster4), wr4y) + + # Visualize the composition with the 3 graphs for all the 4 regimes: its pressure fields, its impact on var and its frequency series: + if(!as.pdf) png(filename=paste0(workdir,"/",fields.name,"_",var.name[var.num],"_",my.period[period],".png"),width=3000,height=3700) + + plot.new() + + # Sheet title: + par(fig=c(0, 1, 0.94, 0.98), new=TRUE) + mtext(paste0("Weather Regimes for ",my.period[period]," season (",year.start,"-",year.end,"). Source: ",fields.name), cex=6, font=2) + + # Centroid maps: + map.xpos <- 0 + map.width <- 0.46 + par(fig=c(map.xpos + 0.003, map.xpos + map.width - 0.003, 0.745, 0.925), new=TRUE) + PlotEquiMap2(rescale(map1,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map1), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, xlabel.dist=1.5, contours.lty="F1FF1F") + par(fig=c(map.xpos, map.xpos + map.width, 0.51, 0.69), new=TRUE) + PlotEquiMap2(rescale(map2,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map2), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F") + par(fig=c(map.xpos, map.xpos + map.width, 0.275, 0.455), new=TRUE) + PlotEquiMap2(rescale(map3,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map3), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F") + par(fig=c(map.xpos, map.xpos + map.width, 0.04, 0.22), new=TRUE) + PlotEquiMap2(rescale(map4,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map4), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F") + + # Title Centroid Maps: + map.title.xpos <- 0.76 + map.title.width <- 0.2 + par(fig=c(map.title.xpos, map.title.xpos + map.title.width, 0.728, 0.729), new=TRUE) + mtext("year", cex=3) + par(fig=c(map.title.xpos, map.title.xpos + map.title.width, 0.494, 0.495), new=TRUE) + mtext("year", cex=3) + par(fig=c(map.title.xpos, map.title.xpos + map.title.width, 0.260, 0.261), new=TRUE) + mtext("year", cex=3) + par(fig=c(map.title.xpos, map.title.xpos + map.title.width, 0.026, 0.027), new=TRUE) + mtext("year", cex=3) + + # Impact maps: + impact.xpos <- 0.47 + impact.width <- 0.26 + par(fig=c(impact.xpos, impact.xpos + impact.width, 0.745, 0.925), new=TRUE) + PlotEquiMap2(rescale(imp1[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=t(sig1[,EU] < 0.05), cex.lab=1.5) + par(fig=c(impact.xpos, impact.xpos + impact.width, 0.51, 0.69), new=TRUE) + PlotEquiMap2(rescale(imp2[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=t(sig2[,EU] < 0.05), cex.lab=1.5) + par(fig=c(impact.xpos, impact.xpos + impact.width, 0.275, 0.455), new=TRUE) + PlotEquiMap2(rescale(imp3[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=t(sig3[,EU] < 0.05), cex.lab=1.5) + par(fig=c(impact.xpos, impact.xpos + impact.width, 0.04, 0.22), new=TRUE) + PlotEquiMap2(rescale(imp4[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=t(sig4[,EU] < 0.05), cex.lab=1.5) + + # Frequency plots: + barplot.xpos <- 0.75 + barplot.width <- 0.25 + par(fig=c(barplot.xpos, barplot.xpos + barplot.width, 0.745, 0.915), new=TRUE) + barplot.freq(100*fre1, year.start, year.end, cex.y=2, cex.x=2, freq.max=60, ylab="", mgp=c(3,1.5,0)) + par(fig=c(barplot.xpos, barplot.xpos + barplot.width,0.510,0.680), new=TRUE) + barplot.freq(100*fre2, year.start, year.end, cex.y=2, cex.x=2, freq.max=60, ylab="", mgp=c(3,1.5,0)) + par(fig=c(barplot.xpos, barplot.xpos + barplot.width,0.275,0.445), new=TRUE) + barplot.freq(100*fre3, year.start, year.end, cex.y=2, cex.x=2, freq.max=60, ylab="", mgp=c(3,1.5,0)) + par(fig=c(barplot.xpos, barplot.xpos + barplot.width,0.040,0.210), new=TRUE) + barplot.freq(100*fre4, year.start, year.end, cex.y=2, cex.x=2, freq.max=60, ylab="", mgp=c(3,1.5,0)) + + # % on Frequency plots: + symbol.xpos <- 0.735 + symbol.width <- 0.01 + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.83, 0.84), new=TRUE) + mtext("%", cex=3.3) + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.60, 0.61), new=TRUE) + mtext("%", cex=3.3) + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.36, 0.37), new=TRUE) + mtext("%", cex=3.3) + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.13, 0.14), new=TRUE) + mtext("%", cex=3.3) + + # Title 1: + title1.xpos <- 0.02 + title1.width <- 0.4 + par(fig=c(title1.xpos, title1.xpos + title1.width, 0.930, 0.935), new=TRUE) + mtext(paste0(regime1.name,": ", psl.name, " Anomaly"), font=2, cex=4) + par(fig=c(title1.xpos, title1.xpos + title1.width, 0.695, 0.700), new=TRUE) + mtext(paste0(regime2.name,": ", psl.name, " Anomaly"), font=2, cex=4) + par(fig=c(title1.xpos, title1.xpos + title1.width, 0.460, 0.465), new=TRUE) + mtext(paste0(regime3.name,": ", psl.name, " Anomaly"), font=2, cex=4) + par(fig=c(title1.xpos, title1.xpos + title1.width, 0.225, 0.230), new=TRUE) + mtext(paste0(regime4.name,": ", psl.name, " Anomaly"), font=2, cex=4) + + # Title 2: + title2.xpos <- 0.42 + title2.width <- 0.35 + par(fig=c(title2.xpos, title2.xpos + title2.width, 0.930, 0.935), new=TRUE) + mtext(paste0(regime1.name, ": Impact on ", var.name.full[var.num]), font=2, cex=4) + par(fig=c(title2.xpos, title2.xpos + title2.width, 0.695, 0.700), new=TRUE) + mtext(paste0(regime2.name, ": Impact on ", var.name.full[var.num]), font=2, cex=4) + par(fig=c(title2.xpos, title2.xpos + title2.width, 0.460, 0.465), new=TRUE) + mtext(paste0(regime3.name, ": Impact on ", var.name.full[var.num]), font=2, cex=4) + par(fig=c(title2.xpos, title2.xpos + title2.width, 0.225, 0.230), new=TRUE) + mtext(paste0(regime4.name, ": Impact on ", var.name.full[var.num]), font=2, cex=4) + + # Title 3: + title3.xpos <- 0.75 + title3.width <-0.25 + par(fig=c(title3.xpos, title3.xpos + title3.width, 0.930, 0.935), new=TRUE) + mtext(paste0(regime1.name, ": Frequency (", round(100*mean(fre1),1), "%)"), font=2, cex=4) + par(fig=c(title3.xpos, title3.xpos + title3.width, 0.695, 0.700), new=TRUE) + mtext(paste0(regime2.name, ": Frequency (", round(100*mean(fre2),1), "%)"), font=2, cex=4) + par(fig=c(title3.xpos, title3.xpos + title3.width, 0.460, 0.465), new=TRUE) + mtext(paste0(regime3.name, ": Frequency (", round(100*mean(fre3),1), "%)"), font=2, cex=4) + par(fig=c(title3.xpos, title3.xpos + title3.width, 0.225, 0.230), new=TRUE) + mtext(paste0(regime4.name, ": Frequency (", round(100*mean(fre4),1), "%)"), font=2, cex=4) + + # Legend 1: + legend1.xpos <- 0.01 + legend1.width <- 0.44 + legend1.cex <- 1.8 + my.subset <- match(values.to.plot, my.brks) + + par(fig=c(legend1.xpos, legend1.xpos + legend1.width, 0.719, 0.744), new=TRUE) # syntax fig=c(xmin, xmax, ymin, ymax) + ColorBar3(my.brks, cols=my.cols, vert=FALSE, cex=legend1.cex, subset=my.subset) + if(psl=="g500") {mtext(side=4," m", cex=2.5)} else {mtext(side=4," hPa", cex=legend1.cex)} + par(fig=c(legend1.xpos, legend1.xpos + legend1.width, 0.485, 0.508), new=TRUE) + ColorBar3(my.brks, cols=my.cols, vert=FALSE, cex=legend1.cex, subset=my.subset) + if(psl=="g500") {mtext(side=4," m", cex=2.5)} else {mtext(side=4," hPa", cex=legend1.cex)} + par(fig=c(legend1.xpos, legend1.xpos + legend1.width, 0.250, 0.273), new=TRUE) + ColorBar3(my.brks, cols=my.cols, vert=FALSE, cex=legend1.cex, subset=my.subset) + if(psl=="g500") {mtext(side=4," m", cex=2.5)} else {mtext(side=4," hPa", cex=legend1.cex) + par(fig=c(legend1.xpos, legend1.xpos + legend1.width, 0.015, 0.038), new=TRUE) + ColorBar3(my.brks, cols=my.cols, vert=FALSE, cex=legend1.cex, subset=my.subset) + if(psl=="g500") {mtext(side=4," m", cex=2.5)} else {mtext(side=4," hPa", cex=legend1.cex)} + + # Legend 2: + legend2.xpos <- 0.48 + legend2.width <- 0.25 + legend2.cex <- 1.8 + my.subset2 <- match(values.to.plot2, my.brks.var) + + par(fig=c(legend2.xpos, legend2.xpos + legend2.width, 0.719 + 0.001, 0.744), new=TRUE) + ColorBar3(my.brks.var, cols=my.cols.var, vert=FALSE, cex=legend2.cex, subset=my.subset2) + mtext(side=4,paste0(" ",var.unit[var.num]), cex=legend2.cex) + par(fig=c(legend2.xpos, legend2.xpos + legend2.width, 0.485, 0.508), new=TRUE) + ColorBar3(my.brks.var, cols=my.cols.var, vert=FALSE, cex=legend2.cex, subset=my.subset2) + mtext(side=4,paste0(" ",var.unit[var.num]), cex=legend2.cex) + par(fig=c(legend2.xpos, legend2.xpos + legend2.width, 0.250, 0.273), new=TRUE) + ColorBar3(my.brks.var, cols=my.cols.var, vert=FALSE, cex=legend2.cex, subset=my.subset2)} + mtext(side=4,paste0(" ",var.unit[var.num]), cex=legend2.cex) + par(fig=c(legend2.xpos, legend2.xpos + legend2.width, 0.015, 0.038), new=TRUE) + ColorBar3(my.brks.var, cols=my.cols.var, vert=FALSE, cex=legend2.cex, subset=my.subset2) + mtext(side=4,paste0(" ",var.unit[var.num]), cex=legend2.cex) + + if(!as.pdf) dev.off() # for saving 4 png + +} # close for loop on 'period' + +if(as.pdf) dev.off() # for saving a single pdf with all seasons + + + + + + + + + + + diff --git a/old/weather_regimes_v19.R~ b/old/weather_regimes_v19.R~ new file mode 100644 index 0000000000000000000000000000000000000000..fb6bb6c2e29578628b982d3bc8b1fc8ccb539bbd --- /dev/null +++ b/old/weather_regimes_v19.R~ @@ -0,0 +1,782 @@ +library(s2dverification) # for the function Load() +library(abind) +library(Kendall) +source('/home/Earth/ncortesi/Downloads/scripts/Rfunctions.R') # for the calendar functions +workdir <- "/scratch/Earth/ncortesi/RESILIENCE/Regimes" # working dir with the input files with the WT classifications for each MSLP grid point + +# available reanalysis for the geopotential (g500) or the geopotential height (z500 = g500/9.81): +ERAint <- list(path = '/esnas/reconstructions/ecmwf/eraint/$STORE_FREQ$_mean/$VAR_NAME$_f6h/$VAR_NAME$_$YEAR$$MONTH$.nc') +ERAint.name <- "ERA-Interim" + +JRA55 <- list(path = '/esnas/reconstructions/jma/jra-55/$STORE_FREQ$_mean/$VAR_NAME$_f6h/$VAR_NAME$_$YEAR$$MONTH$.nc') +JRA55.name <- "JRA-55" + +rean <- ERAint #JRA55 #ERAint # choose one of the two above reanalysis from where to load the input psl data +rean.name <- ERAint.name #JRA55.name #ERAint.name + +# available forecast systems for the var data: +ECMWF_monthly <- list(path = paste0('/esnas/exp/ECMWF/monthly/ensforhc/6hourly/$VAR_NAME$/2014$MONTH$$DAY$00/$VAR_NAME$_$START_DATE$00.nc')) +ECMWF_monthly.name <- "ECMWF-Monthly" + +forecast <- ECMWF_monthly +forecast.name <- ECMWF_monthly.name + +fields <- rean #forecast # put 'rean' to load pressure fields and var from reanalisis, put 'forecast' to load them from forecast systems +fields.name <- rean.name #forecast.name + +year.start <- 1979 #1994 #1979 #1981 +year.end <- 2013 #2010 + +leadtime <- 1:7 #5:11 #1 # if fields=forecast, set the forecast time (in days) you want to compute the weather regimes. +startdate.shift <- 1 # if leadtime=5:11, it's better to shift all startdates of the season 1 week in the past, to fit the exact season of obs.data + # if leadtime=12:18, it's better to shift all startdates of the season 2 weeks in the past, and so on. +forecast.year <- year.end+1 # if fields=forecast, set the forecast year (it is usually the year after year.end) +mes=1 # if fields=forecast, set the month of the first startdate of the forecast year +day=2 # if fields=forecast, set the day of the first startdate of the forecast year + +psl <- "psl" #"g500" # pressure variable to use from the chosen reanalysis +psl.name <- "MSLP" # "Z500" # the name to show in the maps + +var.data <- rean #ECMWF_monthly # ERAint # chose a dataset from which to load the input var data (reanalysis or forecasts) +var.data.name <- rean.name #ECMWF_monthly.name # ERAint.name + +lat.weighting=FALSE # set it to true to weight psl data on latitude before applying the cluster analysis +sequences=FALSE # set it to TRUE if you want to select only days beloning to sequences of at least 5 consecutive days belonging to the same regime. + +PCA <- FALSE # se to TRUE if you want to apply the PCA before the k-means cluster analysis, FALSE otherwise +variance.explained <- 0.8 # the user can set here the minimum % of variance explained by the PCs (typically 0.8 which is equal to 80%) + +# Coordinates of the box enclosing the study region (the Northern Atlantic in this case): +lat.max <- 81 #81 #80 #70 +lat.min <- 27 #30 #20 #30 +lon.max <- 45 #36 #30 #40 +lon.min <- 274.5 #274.5 #270 #280 # put a positive number here because the geopotential has only positive values of longitud! + +############################################################################################################################# +#ECMWF_monthly <- list(path = paste0('/esnas/exp/ECMWF/monthly/ensforhc/6hourly/psl/2014010200/1994_010200.nc')) +#ECMWF_monthly <- list(path = paste0('/esnas/exp/ECMWF/monthly/ensforhc/6hourly/$VAR_NAME$/2014$MONTH$$DAY$00/$VAR_NAME$_$START_DATE$00.nc')) +if(lat.min >= lat.max) stop("lat.min cannot be greater than lat.max") +n.years <- year.end - year.start + 1 + +# load only 1 day of pressure data to detect the minimum and maximum lat and lon values which identify only the North Atlantic area: +domain <- Load(var = psl, exp = NULL, obs = list(fields), paste0(year.start,'0102'), storefreq = 'daily', leadtimemax = 1, output = 'lonlat', nprocs=1) +n.lat <- length(domain$lat) +n.lon <- length(domain$lon) + +pos.lat.max <- which(lat.max - round(domain$lat,4) >= 0)[1] # select the first latitude of the domain below the maximum latitude +pos.lat.min <- tail(which(round(domain$lat,4) - lat.min >= 0),1) # select the last latitude of the domain over the minumum latitude +pos.lon.max <- tail(which(lon.max - round(domain$lon,4) >= 0),1) +pos.lon.min <- which(round(domain$lon,4) - lon.min >= 0)[1] + +pos.lat <- if(pos.lat.min < pos.lat.max) {pos.lat.min:pos.lat.max} else {pos.lat.max:pos.lat.min} +pos.lon <- c(1:pos.lon.max,pos.lon.min:length(domain$lon)) # beware that it only consider the case where the study area cross the meridian of Greenwhich! +n.pos.lat <- length(pos.lat) +n.pos.lon <- length(pos.lon) + +lat <- domain$lat[pos.lat] # lat values of chosen area only (it is smaller than the whole spatial domain loaded by the data) +lon <- domain$lon[pos.lon] # lon values of chosen area only + +lat.min.area <- domain$lat[pos.lat.min] # min and max lat/lon of the chosen area only (same as lat.max, but its values correspond to actual values in the lat vector) +lat.max.area <- domain$lat[pos.lat.max] +lon.min.area <- domain$lon[pos.lon.min] +lon.max.area <- domain$lon[pos.lon.max] + +#rm(domain) + +# Load psl data: +if(unlist(fields) == unlist(rean)){ # Load daily psl data in the reanalysis case: + psleuFull <-array(NA,c(n.days.in.a.yearly.period(year.start,year.end), n.pos.lat, n.pos.lon)) + + for (y in year.start:year.end){ + var <- Load(var = psl, exp = NULL, obs = list(fields), paste0(y,'0101'), storefreq = 'daily', leadtimemax = n.days.in.a.year(y), output = 'lonlat', + latmin = lat.min, latmax = lat.max, lonmin = lon.min, lonmax = lon.max) + psleuFull[seq.days.in.a.future.year(year.start, y),,] <- var$obs + rm(var) + gc() + } + +} else { # Load 6-hourly psl data in the forecast case: + psleuFull <-array(NA, c(n.sdates*n.years, n.leadtimes, n.pos.lat, n.pos.lon)) # daily order: 19940102 19950102 ... 20130102 19940109 19950109 ... 20130109 ... + n.leadtimes <- length(leadtime) + sdates <- weekly.seq(forecast.year,mes,day) + n.sdates <- length(sdates) + + for (startdate in 1:n.sdates){ + for(lday in leadtime){ + var <- Load(var = psl, exp = NULL, obs = list(fields), paste0(year.start:year.end, substr(sdates[startdate],5,6), substr(sdates[startdate],7,8) ), storefreq = 'daily', leadtimemin = lday*4-3, leadtimemax = lday*4, output = 'lonlat', latmin = lat.min, latmax = lat.max, lonmin = lon.min, lonmax = lon.max) + + mean.lday <- apply(var$obs, c(3,5,6), mean, na.rm=T) + rm(var) + gc() + + psleuFull[(1+(startdate-1)*n.years):(startdate*n.years), lday-leadtime[1]+1,,] <- mean.lday + rm(mean.lday) + gc() + } + } + +} + +if(psl=="g500") psleuFull <- psleuFull/9.81 # convert geopotential to geopotential height (in m) +if(psl=="psl") psleuFull <- psleuFull/100 # convert MSLP in Pascal to MSLP in hPa +gc() + +save.image(file=paste0(workdir,"/weather_regimes_",fields.name,"_",year.start,"-",year.end,".RData")) +load(file=paste0(workdir,"/weather_regimes_",fields.name,"_",year.start,"-",year.end,".RData")) + +# compute the cluster analysis: +my.PCA <- list() +my.cluster <- list() +tot.variance <- list() +n.pcs <- my.cluster <- c() + +for(period in 13:16){ + if(unlist(fields) == unlist(rean)){ + days.period <- NA + for(y in year.start:year.end) days.period <- c(days.period, n.days.in.a.future.year(year.start, y) + pos.period(y,period)) + days.period <- days.period[-1] # remove the NA at the begin that was introduced only to be able to execute the above command + + } else { + my.startdates.period <- months.period(forecast.year,mes,day,period) # get which startdates belong to the chosen period + + days.period <- c() # get which days inside psleuFull belong to the chosen period + for(startdate in my.startdates.period){ + days.period <- c(days.period, (1+(startdate-1)*n.years):(startdate*n.years)) + } + } + + n.days.period <- length(days.period) + + pslPeriod <- psleuFull[days.period,,] # select only days in the chosen period (i.e: winter) + + # weight the pressure fields based on latitude: + if(lat.weighting){ + lat.weighted <- cos(lat*pi/180)^0.5 + lat.weighted.array <- InsertDim(InsertDim(lat.weighted,2,n.pos.lon),1,n.days.period) + pslPeriod.weighted <- pslPeriod * lat.weighted.array + rm(lat.weighted.array) + gc() + } else { + pslPeriod.weighted <- pslPeriod + } + + pslmat <- pslPeriod.weighted + dim(pslmat) <- c(n.days.period, n.pos.lat*n.pos.lon) # convert array in a matrix! + rm(pslPeriod, pslPeriod.weighted) + gc() + + # # if you are using the monthly forecast system, in winter you must remove the 2nd startdate (20140109) because its data is bad, as you can see with: plot(pslmat[,1:2]) + #if(unlist(fields) == unlist(forecast) && period == 13) pslmat[21:40,] <- NA + + if(PCA){ # compute the PCs: + my.seq <- seq(1, n.pos.lat*n.pos.lon, 9) # select only 1 point of 9 + pslcut <- pslmat[,my.seq] + + my.PCA[[period]] <- princomp(pslcut,cor=FALSE) + tot.variance[[period]] <- head(cumsum(my.PCA[[period]]$sdev^2/sum(my.PCA[[period]]$sdev^2)),50) # check how many PCAs to keep basing on the sum of their expl. variance + n.pcs[period] <- head(as.numeric(which(tot.variance[[period]] > variance.explained)),1) # select only the pcs that explains at least 80% of variance + my.cluster[[period]] <- kmeans(my.PCA[[period]]$scores[,1:n.pcs[period]], centers=4, iter.max=100, nstart=30) # 4 is the number of clusters + rm(pslcut) + } else { # remove from matpsl the years with NA: + my.cluster[[period]] <- kmeans(pslmat[which(!is.na(pslmat[,1])),], centers=4, iter.max=100, nstart=30) # 4 is the number of clusters + } + + rm(pslmat) + gc() +} + +var.num <- 2 # Choose a variable. 1: sfcWind 2: tas + +var.name <- c("sfcWind","tas") # name of the 'predictand' variable of the chosen reanalysis +var.name.full <- c("Wind Speed","Temperature") # full name of the 'predictand' variable to put in the title of the graphs +var.unit <- c("m/s", "ºC") # unit of measure of var (for drawing color scales) + +# Load var data: +if(unlist(fields) == unlist(rean)){ # Load daily var data in the reanalysis case: + vareuFull <-array(NA,c(n.days.in.a.yearly.period(year.start,year.end), n.pos.lat, n.pos.lon)) + + for (y in year.start:year.end){ + var <- Load(var = var.name[var.num], exp = NULL, obs = list(var.data), paste0(y,'0101'), storefreq = 'daily', leadtimemax = n.days.in.a.year(y), output = 'lonlat', + latmin = lat.min, latmax = lat.max, lonmin = lon.min, lonmax = lon.max) + vareuFull[seq.days.in.a.future.year(year.start, y),,] <- var$obs + rm(var) + gc() + } + +} else { # Load 6-hourly var data in the forecast case: + sdates <- weekly.seq(forecast.year,mes,day) + vareuFull <-array(NA,c(length(sdates)*(year.end-year.start+1), n.pos.lat, n.pos.lon)) + + for (startdate in 1:length(sdates)){ + var <- Load(var = var.name[var.num], exp = NULL, obs = list(var.data), paste0(year.start:year.end, substr(sdates[startdate],5,6), substr(sdates[startdate],7,8) ), storefreq = 'daily', leadtimemin=leadtime*4-3, leadtimemax=leadtime*4, output = 'lonlat', latmin = lat.min, latmax = lat.max, lonmin = lon.min, lonmax = lon.max) + temp <- apply(var$obs, c(1,3,5,6), mean, na.rm=T) + vareuFull[(1+(startdate-1)*(year.end-year.start+1)):(startdate*(year.end-year.start+1)),,] <- temp + rm(temp,var) + gc() + } + +} + +#my.grid<-paste0('r',n.lon,'x',n.lat) +#coord <- Load(var = 'sfcWind', exp = list(ECMWF_monthly), obs = NULL, sdates=paste0(2013,'0102'), leadtimemin = 1, leadtimemax=1, output = 'lonlat', nprocs=1, grid=my.grid, method='bilinear') + +save.image(file=paste0(workdir,"/weather_regimes_",fields.name,"_",var.name[var.num],"_",year.start,"-",year.end,".RData")) +load(file=paste0(workdir,"/weather_regimes_",fields.name,"_",var.name[var.num],"_",year.start,"-",year.end,".RData")) + +WR.period <- 1:12 #13:16 # 1-12: Jan-Dec; 13-16: Winter-Spring-Summer-Autumn + +# if you want to study WRs at monhly level but using their seasonal time series, you have to copy the WRs time series to the months from 1 to 12: +if(WR.period <- 1:12){ + days.period.list <- list() + for (p in 1:16){ + days.period.list[[p]] <- NA + for(y in year.start:year.end) days.period.list[[p]] <- c(days.period.list[[p]], n.days.in.a.future.year(year.start, y) + pos.period(y,p)) + days.period.list[[p]] <- days.period.list[[p]][-1] # remove the NA at the begin that was introduced only to be able to execute the above command + } + + my.cluster[[1]] <- my.cluster[[2]] <- my.cluster[[3]] <- my.cluster[[4]] <- my.cluster[[5]] <- my.cluster[[6]] <- list() + my.cluster[[7]] <- my.cluster[[8]] <- my.cluster[[9]] <- my.cluster[[10]] <- my.cluster[[11]] <- my.cluster[[12]] <- list() + + ss <- match(days.period.list[[13]],days.period.list[[1]]); ss[which(!is.na(ss))] <- 1; qq <- ss*my.cluster[[13]]$cluster; my.cluster[[1]]$cluster <- qq[!is.na(qq)] + ss <- match(days.period.list[[13]],days.period.list[[2]]); ss[which(!is.na(ss))] <- 1; qq <- ss*my.cluster[[13]]$cluster; my.cluster[[2]]$cluster <- qq[!is.na(qq)] + ss <- match(days.period.list[[13]],days.period.list[[12]]); ss[which(!is.na(ss))] <- 1; qq <- ss*my.cluster[[13]]$cluster; my.cluster[[12]]$cluster <- qq[!is.na(qq)] + ss <- match(days.period.list[[14]],days.period.list[[3]]); ss[which(!is.na(ss))] <- 1; qq <- ss*my.cluster[[14]]$cluster; my.cluster[[3]]$cluster <- qq[!is.na(qq)] + ss <- match(days.period.list[[14]],days.period.list[[4]]); ss[which(!is.na(ss))] <- 1; qq <- ss*my.cluster[[14]]$cluster; my.cluster[[4]]$cluster <- qq[!is.na(qq)] + ss <- match(days.period.list[[14]],days.period.list[[5]]); ss[which(!is.na(ss))] <- 1; qq <- ss*my.cluster[[14]]$cluster; my.cluster[[5]]$cluster <- qq[!is.na(qq)] + ss <- match(days.period.list[[15]],days.period.list[[6]]); ss[which(!is.na(ss))] <- 1; qq <- ss*my.cluster[[15]]$cluster; my.cluster[[6]]$cluster <- qq[!is.na(qq)] + ss <- match(days.period.list[[15]],days.period.list[[7]]); ss[which(!is.na(ss))] <- 1; qq <- ss*my.cluster[[15]]$cluster; my.cluster[[7]]$cluster <- qq[!is.na(qq)] + ss <- match(days.period.list[[15]],days.period.list[[8]]); ss[which(!is.na(ss))] <- 1; qq <- ss*my.cluster[[15]]$cluster; my.cluster[[8]]$cluster <- qq[!is.na(qq)] + ss <- match(days.period.list[[16]],days.period.list[[9]]); ss[which(!is.na(ss))] <- 1; qq <- ss*my.cluster[[16]]$cluster; my.cluster[[9]]$cluster <- qq[!is.na(qq)] + ss <- match(days.period.list[[16]],days.period.list[[10]]); ss[which(!is.na(ss))] <- 1; qq <- ss*my.cluster[[16]]$cluster; my.cluster[[10]]$cluster <- qq[!is.na(qq)] + ss <- match(days.period.list[[16]],days.period.list[[11]]); ss[which(!is.na(ss))] <- 1; qq <- ss*my.cluster[[16]]$cluster; my.cluster[[11]]$cluster <- qq[!is.na(qq)] +} + +for(period in WR.period){ # 1-12: Jan-Dec; 13-16: Winter-Spring-Summer-Autumn; 17: Year + if(unlist(fields) == unlist(rean)){ + days.period <- NA + for(y in year.start:year.end) days.period <- c(days.period, n.days.in.a.future.year(year.start, y) + pos.period(y,period)) + days.period <- days.period[-1] # remove the NA at the begin that was introduced only to be able to execure the above command + period.length <- n.days.in.a.period(period,1999) #+ ifelse(period==13 | period==2, 1, 0) # using year 1999 introduce a small error in winter season length because of bisestile years + + } else { + my.startdates.period <- months.period(forecast.year,mes,day,period) + + #if(period == 13) my.startdates.period <- my.startdates.period[-2] # remove the second startdate because it is broken + + days.period <- c() + for(startdate in my.startdates.period){ + days.period <- c(days.period, (1+(startdate-1)*(year.end-year.start+1)):(startdate*(year.end-year.start+1))) + } + period.length <- length(my.startdates.period) # number of Thusdays in the period + } + + n.days.period <- length(days.period) + + varPeriod <- vareuFull[days.period,,] # select only var data during the chosen period + + varPeriodClim <- apply(varPeriod, c(2,3), mean, na.rm=T) + varPeriodClim2 <- InsertDim(varPeriodClim, 1, n.days.period) + varPeriodAnom <- varPeriod - varPeriodClim2 + rm(varPeriod, varPeriodClim, varPeriodClim2) + gc() + + + #pdf(file=paste0(workdir,"/",fields.name,"_",var.name[var.num],"_spring_climatology_may.pdf"),width=20, height=15) + #PlotEquiMap2(varPeriodClim[,EU], lon[EU], lat, filled.continents = FALSE, cols=my.cols.var, intxlon=10, intylat=10, cex.lab=1.5) # plot the climatology of the period + #for(d in 1:n.days.period){ #n.days.period){ + #PlotEquiMap2(rescale(varPeriodAnom[d,,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, toptitle=paste0("Day ",d), filled.continents = FALSE, brks=my.brks.var, cols=my#.cols.var, intxlon=10, intylat=10, cex.lab=1.5) + #} + #dev.off() + + if(sequences){ + # for each of the 4 clusters, remove the days not belonging to sequences of at least 5 days: + # warning: first 4 days and last 4 days are not take into account y the algorithm and are treated as not belonging to any sequence (sequ=FALSE) + wrdiff <- diff(my.cluster[[period]]$cluster) + + sequ <- rep(FALSE, n.days.period) + for(i in 5:(n.days.period-5)){ + sequ[i] <- TRUE + if(wrdiff[i] != 0 & wrdiff[i+1] != 0) sequ[i] = FALSE + if(wrdiff[i] != 0 & wrdiff[i+2] != 0) sequ[i] = FALSE + if(wrdiff[i] != 0 & wrdiff[i+3] != 0) sequ[i] = FALSE + if(wrdiff[i] != 0 & wrdiff[i+4] != 0) sequ[i] = FALSE + if(wrdiff[i-1] != 0 & wrdiff[i] != 0) sequ[i] = FALSE + if(wrdiff[i-1] != 0 & wrdiff[i+1] != 0) sequ[i] = FALSE + if(wrdiff[i-1] != 0 & wrdiff[i+2] != 0) sequ[i] = FALSE + if(wrdiff[i-1] != 0 & wrdiff[i+3] != 0) sequ[i] = FALSE + if(wrdiff[i-2] != 0 & wrdiff[i] != 0) sequ[i] = FALSE + if(wrdiff[i-2] != 0 & wrdiff[i+1] != 0) sequ[i] = FALSE + if(wrdiff[i-2] != 0 & wrdiff[i+2] != 0) sequ[i] = FALSE + if(wrdiff[i-3] != 0 & wrdiff[i] != 0) sequ[i] = FALSE + if(wrdiff[i-3] != 0 & wrdiff[i+1] != 0) sequ[i] = FALSE + if(wrdiff[i-4] != 0 & wrdiff[i] != 0) sequ[i] = FALSE + } # close for loop on i + + # sequence of daily cluster numbers without the days that doesn't belong to sequences of 5 or more days: + cluster.sequence <- my.cluster[[period]]$cluster * sequ + rm(wrdiff, sequ) + } else{ + cluster.sequence <- my.cluster[[period]]$cluster + } + + gc() + + # Replace the days belonging to a regime with the days belonging to a sequence of at least 5 days of that regime: + wr1 <- which(cluster.sequence == 1) + wr2 <- which(cluster.sequence == 2) + wr3 <- which(cluster.sequence == 3) + wr4 <- which(cluster.sequence == 4) + + # yearly frequency of each regime: + wr1y <- wr2y <- wr3y <-wr4y <- c() + for(y in year.start:year.end){ + + if(unlist(fields) == unlist(rean)){ + wr1y[y-year.start+1] <- length(which(cluster.sequence[(y-year.start)*period.length+(1:period.length)] == 1)) + wr2y[y-year.start+1] <- length(which(cluster.sequence[(y-year.start)*period.length+(1:period.length)] == 2)) + wr3y[y-year.start+1] <- length(which(cluster.sequence[(y-year.start)*period.length+(1:period.length)] == 3)) + wr4y[y-year.start+1] <- length(which(cluster.sequence[(y-year.start)*period.length+(1:period.length)] == 4)) + } else { + wr1y[y-year.start+1] <- length(which(cluster.sequence[seq((y-year.start+1),length(cluster.sequence), n.years)] == 1)) + wr2y[y-year.start+1] <- length(which(cluster.sequence[seq((y-year.start+1),length(cluster.sequence), n.years)] == 2)) + wr3y[y-year.start+1] <- length(which(cluster.sequence[seq((y-year.start+1),length(cluster.sequence), n.years)] == 3)) + wr4y[y-year.start+1] <- length(which(cluster.sequence[seq((y-year.start+1),length(cluster.sequence), n.years)] == 4)) + } + } + + rm(cluster.sequence) + gc() + + # convert to frequencies in %: + wr1y <- wr1y/period.length + wr2y <- wr2y/period.length + wr3y <- wr3y/period.length + wr4y <- wr4y/period.length + + varPeriodAnom1 <- varPeriodAnom[wr1,,] + varPeriodAnom2 <- varPeriodAnom[wr2,,] + varPeriodAnom3 <- varPeriodAnom[wr3,,] + varPeriodAnom4 <- varPeriodAnom[wr4,,] + + varPeriodAnom1mean <- apply(varPeriodAnom1,c(2,3),mean,na.rm=T) + varPeriodAnom2mean <- apply(varPeriodAnom2,c(2,3),mean,na.rm=T) + varPeriodAnom3mean <- apply(varPeriodAnom3,c(2,3),mean,na.rm=T) + varPeriodAnom4mean <- apply(varPeriodAnom4,c(2,3),mean,na.rm=T) + + varPeriodAnomBoth1 <- abind(varPeriodAnom, varPeriodAnom1, along = 1) + pvalue1 <- apply(varPeriodAnomBoth1, c(2,3), function(x) t.test(x[1:n.days.period],x[(n.days.period+1):length(x)])$p.value) + rm(varPeriodAnomBoth1, varPeriodAnom1) + gc() + + varPeriodAnomBoth2 <- abind(varPeriodAnom, varPeriodAnom2, along = 1) + pvalue2 <- apply(varPeriodAnomBoth2, c(2,3), function(x) t.test(x[1:n.days.period],x[(n.days.period+1):length(x)])$p.value) + rm(varPeriodAnomBoth2, varPeriodAnom2) + gc() + + varPeriodAnomBoth3 <- abind(varPeriodAnom, varPeriodAnom3, along = 1) + pvalue3 <- apply(varPeriodAnomBoth3, c(2,3), function(x) t.test(x[1:n.days.period],x[(n.days.period+1):length(x)])$p.value) + rm(varPeriodAnomBoth3, varPeriodAnom3) + gc() + + varPeriodAnomBoth4 <- abind(varPeriodAnom, varPeriodAnom4, along = 1) + pvalue4 <- apply(varPeriodAnomBoth4, c(2,3), function(x) t.test(x[1:n.days.period],x[(n.days.period+1):length(x)])$p.value) + rm(varPeriodAnomBoth4, varPeriodAnom4) + + rm(varPeriodAnom) + gc() + + pslPeriod <- psleuFull[days.period,,] + pslPeriodClim <- apply(pslPeriod, c(2,3), mean, na.rm=T) + pslPeriodClim2 <- InsertDim(pslPeriodClim, 1, n.days.period) + pslPeriodAnom <- pslPeriod - pslPeriodClim2 + + rm(pslPeriod, pslPeriodClim, pslPeriodClim2) + gc() + + pslwr1 <- pslPeriodAnom[wr1,,] + pslwr2 <- pslPeriodAnom[wr2,,] + pslwr3 <- pslPeriodAnom[wr3,,] + pslwr4 <- pslPeriodAnom[wr4,,] + rm(pslPeriodAnom) + gc() + + pslwr1mean <- apply(pslwr1,c(2,3),mean,na.rm=T) + pslwr2mean <- apply(pslwr2,c(2,3),mean,na.rm=T) + pslwr3mean <- apply(pslwr3,c(2,3),mean,na.rm=T) + pslwr4mean <- apply(pslwr4,c(2,3),mean,na.rm=T) + + rm(pslwr1,pslwr2,pslwr3,pslwr4) + + # save all the data necessary to redraw the graphs when we know the right regime: + save(workdir,rean.name,fields.name,var.num,var.name,var.name.full,var.unit,year.start,year.end,period,WR.period,lon,lat,pslwr1mean,pslwr2mean,pslwr3mean,pslwr4mean, varPeriodAnom1mean, varPeriodAnom2mean, varPeriodAnom3mean,varPeriodAnom4mean,wr1y,wr2y,wr3y,wr4y,pvalue1,pvalue2,pvalue3,pvalue4,file=paste0(workdir,"/",fields.name,"_",var.name[var.num],"_",my.period[period],"_mapdata.RData")) + + rm(pvalue1,pvalue2,pvalue3,pvalue4) + rm(pslwr1mean,pslwr2mean,pslwr3mean,pslwr4mean) + rm(varPeriodAnom1mean,varPeriodAnom2mean,varPeriodAnom3mean,varPeriodAnom4mean) + gc() + +} # close the for loop on 'period' + + +# run it twice: once, with ordering <- FALSE to look only at the cartography and find visually the right regime names of the four clusters; +# and the second time, to save the ordered cartography: +ordering <- TRUE # If TRUE, order the clusters following the regimes listed in the 'orden' vector (after you manually insert the names). +save.names <- FALSE # if TRUE, also save the cluster names (i.e: the association between clusters and weather regimes); if FALSE, the cluster names are loaded from the file +as.pdf <- FALSE # FALSE: save results as one .png file for each season, TRUE: save only 1 .pdf file with all seasons + +# position of long values of Europe only (without the Atlantic Sea and America): +#if(fields.name == "ERA-Interim") EU <- c(1:which(lon >= lon.max)[1], (length(lon)-30):length(lon)) # restrict area to continental europe only +EU <- c(1:which(lon >= lon.max)[1], (which(lon >= 337)[1]:length(lon))) # restrict area to continental europe only + +# choose an order to give to the cartography, from top to bottom: +orden <- c("NAO+","NAO-","Blocking","Atl.Ridge") + +regime1.name <- orden[1] +regime2.name <- orden[2] +regime3.name <- orden[3] +regime4.name <- orden[4] + +if(save.names){cluster1.name.period <- cluster2.name.period <- cluster3.name.period <- cluster4.name.period <- c()} + +# breaks and colors of the geopotential fields: +if(psl == "g500"){ + #my.brks <- c(-300,-199:200,300) # % Mean anomaly of a WR + my.brks <- c(-300,seq(-200,200,10),300) # % Mean anomaly of a WR + my.brks2 <- c(-300,seq(-200,200,10),300) # % Mean anomaly of a WR for the contour lines + #my.cols <- colorRampPalette((brewer.pal(9,"PuBu")))(length(my.brks)-1) + values.to.plot <- c(-200, -100, 0, 100, 200) # values we want to show in the labels +} + +if(psl == "psl"){ + my.brks <- c(seq(-19,-1,2),0,seq(1,19,2)) # % Mean anomaly of a WR + my.brks2 <- my.brks #c(seq(-20,20,2)) # % Mean anomaly of a WR for the contour lines + values.to.plot <- my.brks #c(-17,-15,-13,-11,-9,-7,-5,-3,-1,0,1,3,5,7,9,11,13,15,17) +} + +my.cols <- colorRampPalette(rev(brewer.pal(11,"RdBu")))(length(my.brks)-1) # blue--white--red colors +my.cols[floor(length(my.cols)/2)] <- my.cols[floor(length(my.cols)/2)+1] <- "white" + +# breaks and colors of the impact maps (valid both for Wind speed anomalies and Temperature anomalies): +#my.brks.var <- c(-20,seq(-10,-3,1),seq(-2.9,2.9,0.1),seq(3,10,1),20) # % Mean anomaly of a WR +#my.brks.var <- c(-20,-2,-1.5,-1,-0.5,-0.2,0.2,0.5,1,1.5,2,20) # % Mean anomaly of a WR +#my.brks.var <- c(-20,seq(-3,3,0.5),20) # % Mean anomaly of a WR +if(var.name[var.num] == "sfcWind") { + my.brks.var <- seq(-3,3,0.5) + values.to.plot2 <- c(-3,-2,-1,0,1,2,3) +} +if(var.name[var.num] == "tas") { + my.brks.var <- seq(-5,5,1) + values.to.plot2 <- my.brks.var #c(-5,-3,-1,0,1,3,5) +} + +#my.cols <- colorRampPalette(as.character(read.csv("/home/Earth/vtorralb/rgbhex.csv",header=F)[,1]))(length(my.brks)-1) # blue--yellow-red colors +my.cols.var <- colorRampPalette(rev(brewer.pal(11,"RdBu")))(length(my.brks.var)-1) # blue--white--red colors +#my.cols.var[floor(length(my.cols.var)/2)] <- my.cols.var[floor(length(my.cols.var)/2)+1] <- "white" + +if(as.pdf)(pdf(file=paste0(workdir,"/",fields.name,"_",var.name[var.num],".pdf"),width=40, height=60)) + +for(period in WR.period){ + load(file=paste0(workdir,"/",fields.name,"_",var.name[var.num],"_",my.period[period],"_mapdata.RData")) + + if(save.names){ + if(period == 1){ # January + cluster2.name="NAO+" + cluster1.name="NAO-" + cluster4.name="Blocking" + cluster3.name="Atl.Ridge" + } + if(period == 2){ # February + cluster3.name="NAO+" + cluster2.name="NAO-" + cluster4.name="Blocking" + cluster1.name="Atl.Ridge" + } + if(period == 3){ # March + cluster3.name="NAO+" + cluster2.name="NAO-" + cluster4.name="Blocking" + cluster1.name="Atl.Ridge" + } + if(period == 4){ # April + cluster2.name="NAO+" + cluster1.name="NAO-" + cluster3.name="Blocking" + cluster4.name="Atl.Ridge" + } + if(period == 5){ # May + cluster4.name="NAO+" + cluster2.name="NAO-" + cluster3.name="Blocking" + cluster1.name="Atl.Ridge" + } + if(period == 6){ # June + cluster4.name="NAO+" + cluster1.name="NAO-" + cluster2.name="Blocking" + cluster3.name="Atl.Ridge" + } + if(period == 7){ # July + cluster4.name="NAO+" + cluster2.name="NAO-" + cluster1.name="Blocking" + cluster3.name="Atl.Ridge" + } + if(period == 8){ # August + cluster2.name="NAO+" + cluster4.name="NAO-" + cluster3.name="Blocking" + cluster1.name="Atl.Ridge" + } + if(period == 9){ # September + cluster4.name="NAO+" + cluster3.name="NAO-" + cluster1.name="Blocking" + cluster2.name="Atl.Ridge" + } + if(period == 10){ # October + cluster1.name="NAO+" + cluster4.name="NAO-" + cluster2.name="Blocking" + cluster3.name="Atl.Ridge" + } + if(period == 11){ # November + cluster2.name="NAO+" + cluster3.name="NAO-" + cluster1.name="Blocking" + cluster4.name="Atl.Ridge" + } + if(period == 12){ # December + cluster2.name="NAO+" + cluster4.name="NAO-" + cluster1.name="Blocking" + cluster3.name="Atl.Ridge" + } + if(period == 13){ # Winter + cluster3.name="NAO+" + cluster4.name="NAO-" + cluster1.name="Blocking" + cluster2.name="Atl.Ridge" + } + if(period == 14){ # Spring + cluster1.name="NAO+" + cluster3.name="NAO-" + cluster2.name="Blocking" + cluster4.name="Atl.Ridge" + } + if(period == 15){ # Summer + cluster2.name="NAO+" + cluster3.name="NAO-" + cluster4.name="Blocking" + cluster1.name="Atl.Ridge" + } + if(period == 16){ # Autumn + cluster4.name="NAO+" + cluster1.name="NAO-" + cluster2.name="Blocking" + cluster3.name="Atl.Ridge" + } + + cluster1.name.period[period] <- cluster1.name + cluster2.name.period[period] <- cluster2.name + cluster3.name.period[period] <- cluster3.name + cluster4.name.period[period] <- cluster4.name + + save(cluster1.name.period, cluster2.name.period, cluster3.name.period, cluster4.name.period, file=paste0(workdir,"/",fields.name,"_ClusterNames.RData")) + + } else { # in this case, we load the cluster names from the file already saved: + load(paste0(workdir,"/",fields.name,"_ClusterNames.RData")) + cluster1.name <- cluster1.name.period[period] + cluster2.name <- cluster2.name.period[period] + cluster3.name <- cluster3.name.period[period] + cluster4.name <- cluster4.name.period[period] + } + + # assign to each cluster its regime: + if(ordering == FALSE){ + cluster1 <- 1; cluster2 <- 2; cluster3 <- 3; cluster4 <- 4 + } else { + cluster1 <- which(orden == cluster1.name) + cluster2 <- which(orden == cluster2.name) + cluster3 <- which(orden == cluster3.name) + cluster4 <- which(orden == cluster4.name) + } + + assign(paste0("map",cluster1), pslwr1mean) + assign(paste0("map",cluster2), pslwr2mean) + assign(paste0("map",cluster3), pslwr3mean) + assign(paste0("map",cluster4), pslwr4mean) + + assign(paste0("imp",cluster1), varPeriodAnom1mean) + assign(paste0("imp",cluster2), varPeriodAnom2mean) + assign(paste0("imp",cluster3), varPeriodAnom3mean) + assign(paste0("imp",cluster4), varPeriodAnom4mean) + + assign(paste0("sig",cluster1), pvalue1) + assign(paste0("sig",cluster2), pvalue2) + assign(paste0("sig",cluster3), pvalue3) + assign(paste0("sig",cluster4), pvalue4) + + assign(paste0("fre",cluster1), wr1y) + assign(paste0("fre",cluster2), wr2y) + assign(paste0("fre",cluster3), wr3y) + assign(paste0("fre",cluster4), wr4y) + + # Visualize the composition with the 3 graphs for all the 4 regimes: its pressure fields, its impact on var and its frequency series: + if(!as.pdf) png(filename=paste0(workdir,"/",fields.name,"_",var.name[var.num],"_",my.period[period],".png"),width=3000,height=3700) + + plot.new() + + # Sheet title: + par(fig=c(0, 1, 0.94, 0.98), new=TRUE) + mtext(paste0("Weather Regimes for ",my.period[period]," season (",year.start,"-",year.end,"). Source: ",fields.name), cex=6, font=2) + + # Centroid maps: + map.xpos <- 0 + map.width <- 0.46 + par(fig=c(map.xpos + 0.003, map.xpos + map.width - 0.003, 0.745, 0.925), new=TRUE) + PlotEquiMap2(rescale(map1,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map1), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, xlabel.dist=1.5, contours.lty="F1FF1F") + par(fig=c(map.xpos, map.xpos + map.width, 0.51, 0.69), new=TRUE) + PlotEquiMap2(rescale(map2,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map2), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F") + par(fig=c(map.xpos, map.xpos + map.width, 0.275, 0.455), new=TRUE) + PlotEquiMap2(rescale(map3,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map3), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F") + par(fig=c(map.xpos, map.xpos + map.width, 0.04, 0.22), new=TRUE) + PlotEquiMap2(rescale(map4,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map4), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F") + + # Title Centroid Maps: + map.title.xpos <- 0.76 + map.title.width <- 0.2 + par(fig=c(map.title.xpos, map.title.xpos + map.title.width, 0.728, 0.729), new=TRUE) + mtext("year", cex=3) + par(fig=c(map.title.xpos, map.title.xpos + map.title.width, 0.494, 0.495), new=TRUE) + mtext("year", cex=3) + par(fig=c(map.title.xpos, map.title.xpos + map.title.width, 0.260, 0.261), new=TRUE) + mtext("year", cex=3) + par(fig=c(map.title.xpos, map.title.xpos + map.title.width, 0.026, 0.027), new=TRUE) + mtext("year", cex=3) + + # Impact maps: + impact.xpos <- 0.47 + impact.width <- 0.26 + par(fig=c(impact.xpos, impact.xpos + impact.width, 0.745, 0.925), new=TRUE) + PlotEquiMap2(rescale(imp1[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=t(sig1[,EU] < 0.05), cex.lab=1.5) + par(fig=c(impact.xpos, impact.xpos + impact.width, 0.51, 0.69), new=TRUE) + PlotEquiMap2(rescale(imp2[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=t(sig2[,EU] < 0.05), cex.lab=1.5) + par(fig=c(impact.xpos, impact.xpos + impact.width, 0.275, 0.455), new=TRUE) + PlotEquiMap2(rescale(imp3[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=t(sig3[,EU] < 0.05), cex.lab=1.5) + par(fig=c(impact.xpos, impact.xpos + impact.width, 0.04, 0.22), new=TRUE) + PlotEquiMap2(rescale(imp4[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=t(sig4[,EU] < 0.05), cex.lab=1.5) + + # Frequency plots: + barplot.xpos <- 0.75 + barplot.width <- 0.25 + par(fig=c(barplot.xpos, barplot.xpos + barplot.width, 0.745, 0.915), new=TRUE) + barplot.freq(100*fre1, year.start, year.end, cex.y=2, cex.x=2, freq.max=60, ylab="", mgp=c(3,1.5,0)) + par(fig=c(barplot.xpos, barplot.xpos + barplot.width,0.510,0.680), new=TRUE) + barplot.freq(100*fre2, year.start, year.end, cex.y=2, cex.x=2, freq.max=60, ylab="", mgp=c(3,1.5,0)) + par(fig=c(barplot.xpos, barplot.xpos + barplot.width,0.275,0.445), new=TRUE) + barplot.freq(100*fre3, year.start, year.end, cex.y=2, cex.x=2, freq.max=60, ylab="", mgp=c(3,1.5,0)) + par(fig=c(barplot.xpos, barplot.xpos + barplot.width,0.040,0.210), new=TRUE) + barplot.freq(100*fre4, year.start, year.end, cex.y=2, cex.x=2, freq.max=60, ylab="", mgp=c(3,1.5,0)) + + # % on Frequency plots: + symbol.xpos <- 0.735 + symbol.width <- 0.01 + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.83, 0.84), new=TRUE) + mtext("%", cex=3.3) + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.60, 0.61), new=TRUE) + mtext("%", cex=3.3) + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.36, 0.37), new=TRUE) + mtext("%", cex=3.3) + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.13, 0.14), new=TRUE) + mtext("%", cex=3.3) + + # Title 1: + title1.xpos <- 0.02 + title1.width <- 0.4 + par(fig=c(title1.xpos, title1.xpos + title1.width, 0.930, 0.935), new=TRUE) + mtext(paste0(regime1.name,": ", psl.name, " Anomaly"), font=2, cex=4) + par(fig=c(title1.xpos, title1.xpos + title1.width, 0.695, 0.700), new=TRUE) + mtext(paste0(regime2.name,": ", psl.name, " Anomaly"), font=2, cex=4) + par(fig=c(title1.xpos, title1.xpos + title1.width, 0.460, 0.465), new=TRUE) + mtext(paste0(regime3.name,": ", psl.name, " Anomaly"), font=2, cex=4) + par(fig=c(title1.xpos, title1.xpos + title1.width, 0.225, 0.230), new=TRUE) + mtext(paste0(regime4.name,": ", psl.name, " Anomaly"), font=2, cex=4) + + # Title 2: + title2.xpos <- 0.42 + title2.width <- 0.35 + par(fig=c(title2.xpos, title2.xpos + title2.width, 0.930, 0.935), new=TRUE) + mtext(paste0(regime1.name, ": Impact on ", var.name.full[var.num]), font=2, cex=4) + par(fig=c(title2.xpos, title2.xpos + title2.width, 0.695, 0.700), new=TRUE) + mtext(paste0(regime2.name, ": Impact on ", var.name.full[var.num]), font=2, cex=4) + par(fig=c(title2.xpos, title2.xpos + title2.width, 0.460, 0.465), new=TRUE) + mtext(paste0(regime3.name, ": Impact on ", var.name.full[var.num]), font=2, cex=4) + par(fig=c(title2.xpos, title2.xpos + title2.width, 0.225, 0.230), new=TRUE) + mtext(paste0(regime4.name, ": Impact on ", var.name.full[var.num]), font=2, cex=4) + + # Title 3: + title3.xpos <- 0.75 + title3.width <-0.25 + par(fig=c(title3.xpos, title3.xpos + title3.width, 0.930, 0.935), new=TRUE) + mtext(paste0(regime1.name, ": Frequency (", round(100*mean(fre1),1), "%)"), font=2, cex=4) + par(fig=c(title3.xpos, title3.xpos + title3.width, 0.695, 0.700), new=TRUE) + mtext(paste0(regime2.name, ": Frequency (", round(100*mean(fre2),1), "%)"), font=2, cex=4) + par(fig=c(title3.xpos, title3.xpos + title3.width, 0.460, 0.465), new=TRUE) + mtext(paste0(regime3.name, ": Frequency (", round(100*mean(fre3),1), "%)"), font=2, cex=4) + par(fig=c(title3.xpos, title3.xpos + title3.width, 0.225, 0.230), new=TRUE) + mtext(paste0(regime4.name, ": Frequency (", round(100*mean(fre4),1), "%)"), font=2, cex=4) + + # Legend 1: + legend1.xpos <- 0.01 + legend1.width <- 0.44 + legend1.cex <- 1.8 + my.subset <- match(values.to.plot, my.brks) + + par(fig=c(legend1.xpos, legend1.xpos + legend1.width, 0.719, 0.744), new=TRUE) # syntax fig=c(xmin, xmax, ymin, ymax) + ColorBar3(my.brks, cols=my.cols, vert=FALSE, cex=legend1.cex, subset=my.subset) + if(psl=="g500") {mtext(side=4," m", cex=2.5)} else {mtext(side=4," hPa", cex=legend1.cex)} + par(fig=c(legend1.xpos, legend1.xpos + legend1.width, 0.485, 0.508), new=TRUE) + ColorBar3(my.brks, cols=my.cols, vert=FALSE, cex=legend1.cex, subset=my.subset) + if(psl=="g500") {mtext(side=4," m", cex=2.5)} else {mtext(side=4," hPa", cex=legend1.cex)} + par(fig=c(legend1.xpos, legend1.xpos + legend1.width, 0.250, 0.273), new=TRUE) + ColorBar3(my.brks, cols=my.cols, vert=FALSE, cex=legend1.cex, subset=my.subset) + if(psl=="g500") {mtext(side=4," m", cex=2.5)} else {mtext(side=4," hPa", cex=legend1.cex) + par(fig=c(legend1.xpos, legend1.xpos + legend1.width, 0.015, 0.038), new=TRUE) + ColorBar3(my.brks, cols=my.cols, vert=FALSE, cex=legend1.cex, subset=my.subset) + if(psl=="g500") {mtext(side=4," m", cex=2.5)} else {mtext(side=4," hPa", cex=legend1.cex)} + + # Legend 2: + legend2.xpos <- 0.48 + legend2.width <- 0.25 + legend2.cex <- 1.8 + my.subset2 <- match(values.to.plot2, my.brks.var) + + par(fig=c(legend2.xpos, legend2.xpos + legend2.width, 0.719 + 0.001, 0.744), new=TRUE) + ColorBar3(my.brks.var, cols=my.cols.var, vert=FALSE, cex=legend2.cex, subset=my.subset2) + mtext(side=4,paste0(" ",var.unit[var.num]), cex=legend2.cex) + par(fig=c(legend2.xpos, legend2.xpos + legend2.width, 0.485, 0.508), new=TRUE) + ColorBar3(my.brks.var, cols=my.cols.var, vert=FALSE, cex=legend2.cex, subset=my.subset2) + mtext(side=4,paste0(" ",var.unit[var.num]), cex=legend2.cex) + par(fig=c(legend2.xpos, legend2.xpos + legend2.width, 0.250, 0.273), new=TRUE) + ColorBar3(my.brks.var, cols=my.cols.var, vert=FALSE, cex=legend2.cex, subset=my.subset2)} + mtext(side=4,paste0(" ",var.unit[var.num]), cex=legend2.cex) + par(fig=c(legend2.xpos, legend2.xpos + legend2.width, 0.015, 0.038), new=TRUE) + ColorBar3(my.brks.var, cols=my.cols.var, vert=FALSE, cex=legend2.cex, subset=my.subset2) + mtext(side=4,paste0(" ",var.unit[var.num]), cex=legend2.cex) + + if(!as.pdf) dev.off() # for saving 4 png + +} # close for loop on 'period' + +if(as.pdf) dev.off() # for saving a single pdf with all seasons + + + + + + + + + + + diff --git a/old/weather_regimes_v2.R b/old/weather_regimes_v2.R new file mode 100644 index 0000000000000000000000000000000000000000..41baa898f7a03c827eaf359c0fc59f27da4f7f8b --- /dev/null +++ b/old/weather_regimes_v2.R @@ -0,0 +1,187 @@ + +library(s2dverification) # for the function Load() +source('/home/Earth/ncortesi/Downloads/scripts/Rfunctions.R') # for the calendar functions +# reanalysis available: +ERAint <- list(path = '/esnas/reconstructions/ecmwf/eraint/$STORE_FREQ$_mean/$VAR_NAME$_f6h/$VAR_NAME$_$YEAR$$MONTH$.nc') +JRA55 <- list(path = '/esnas/reconstructions/jma/jra-55/$STORE_FREQ$_mean/$VAR_NAME$_f6h/$VAR_NAME$_$YEAR$$MONTH$.nc') + +workdir <- "/scratch/Earth/ncortesi/RESILIENCE/Regimes" # working dir with the input files with the WT classifications for each MSLP grid point +mapdir <- "/scratch/Earth/ncortesi/RESILIENCE/Regimes_maps" # output dir with seasonal and yearly maps + +year.start <- 1979 +year.end <- 2013 + +rean <- ERAint # choose one of the two above reanalysis + +var.name <- "tas" #"sfcWind" # name of the 'predictand' variable of the chosen reanalysis + +# Coordinates of the box enclosing the study region (the Northern Atlantic in this case): +lat.max <- 70 +lat.min <- 30 +lon.max <- 40 +lon.min <- 280 # put a positive number here because Z500 has only positive values of longitud! + +############################################################################################################################# + +# load only 1 day of z500 to detect the minimum and maximum lat and lon values to identify only the North Atlantic +domain <- Load(var = 'z500', exp = NULL, obs = list(rean), paste0(year.start,'0101'), storefreq = 'daily', leadtimemax = 1, output = 'lonlat', nprocs=1) +pos.lat.max <- which(abs(domain$lat-lat.max)==min(abs(domain$lat-lat.max))) +pos.lat.min <- which(abs(domain$lat-lat.min)==min(abs(domain$lat-lat.min))) +pos.lon.max <- which(abs(domain$lon-lon.max)==min(abs(domain$lon-lon.max))) +pos.lon.min <- which(abs(domain$lon-lon.min)==min(abs(domain$lon-lon.min))) +pos.lat <- if(pos.lat.min < pos.lat.max) {pos.lat.min:pos.lat.max} else { pos.lat.max:pos.lat.min} +pos.lon <- c(1:pos.lon.max,pos.lon.min:length(domain$lon)) # beware that it only consider the case where the study region cross the meridian of Greenwhich! +n.pos.lat <- length(pos.lat) +n.pos.lon <- length(pos.lon) + +z500euFull <-array(NA,c(n.days.in.a.yearly.period(year.start,year.end), n.pos.lat, n.pos.lon)) + +for (y in year.start:year.end){ + var <- Load(var = 'z500', exp = NULL, obs = list(rean), paste0(y,'0101'), storefreq = 'daily', leadtimemax = n.days.in.a.year(y), output = 'lonlat') + z500eu <- var$obs[,,,,pos.lat,pos.lon] + z500euFull[seq.days.in.a.future.year(year.start, y),,] <- z500eu + rm(z500eu) + gc() +} + +lat <- var$lat[pos.lat] +lon <- var$lon[pos.lon] + +period = 13 # (winter) + +days.period <- NA +for(y in year.start:year.end) days.period <- c(days.period, n.days.in.a.future.year(year.start, y) + pos.period(y,period)) +days.period <- days.period[-1] # remove the NA at the begin that was introduced only to be able to execture the above command +n.days.period <- length(days.period) + +z500 <- z500euFull[days.period,,] # select only days in the chosen period (i.e: winter) + +z500mat <- z500 + +dim(z500mat) <- c(head(dim(z500mat),1), n.pos.lat*172) # convert array in a matrix! + +my.seq <- seq(1,9976,9) + +z500cut <- z500mat[,my.seq] + +my.PCA <- princomp(z500cut,cor=FALSE) + +head(cumsum(my.PCA$sdev^2/sum(my.PCA$sdev^2)),20) # check how many PCAs to keep basing on the sum of their explained variance + +my.cluster <- kmeans(my.PCA$scores[,1:7], 4) # 4 is the number of clusters, 7 the number of EOFs which explains in our case the 79% of variance + +rm(z500euFull,z500eu,z500mat,var) +gc() + +# Load wind data: + +vareuFull <-array(NA,c(n.days.in.a.yearly.period(year.start,year.end),n.pos.lat,n.pos.lon)) + +for (y in year.start:year.end){ + var <- Load(var = var.name, exp = NULL, obs = list(rean), paste0(y,'0101'), storefreq = 'daily', leadtimemax = n.days.in.a.year(y), output = 'lonlat') + vareu <- var$obs[,,,,pos.lat,pos.lon] + vareuFull[seq.days.in.a.future.year(year.start, y),,] <- vareu +} + +save.image(file=paste0(workdir,"/weather_regimes_",rean,"_",var.name,".RData")) +load(file=paste0(workdir,"/weather_regimes_",rean,"_",var.name,".RData")) + +varPeriod <- vareuFull[days.period,,] + +varPeriodClim <- apply(varPeriod,c(2,3),mean,na.rm=T) +varPeriodClim2 <- InsertDim(varPeriodClim,1,2708) + +varPeriodAnom <- varPeriod - varPeriodClim2 + +wr1 <- which(my.cluster$cluster==1) +wr2 <- which(my.cluster$cluster==2) +wr3 <- which(my.cluster$cluster==3) +wr4 <- which(my.cluster$cluster==4) + +varPeriodAnom1 <- varPeriodAnom[wr1,,] +varPeriodAnom2 <- varPeriodAnom[wr2,,] +varPeriodAnom3 <- varPeriodAnom[wr3,,] +varPeriodAnom4 <- varPeriodAnom[wr4,,] + +varPeriodAnom1mean <- apply(varPeriodAnom1,c(2,3),mean,na.rm=T) +varPeriodAnom2mean <- apply(varPeriodAnom2,c(2,3),mean,na.rm=T) +varPeriodAnom3mean <- apply(varPeriodAnom3,c(2,3),mean,na.rm=T) +varPeriodAnom4mean <- apply(varPeriodAnom4,c(2,3),mean,na.rm=T) + +z500wr1 <- z500[wr1,,] +z500wr2 <- z500[wr2,,] +z500wr3 <- z500[wr3,,] +z500wr4 <- z500[wr4,,] + +z500wr1mean <- apply(z500wr1,c(2,3),mean,na.rm=T) +z500wr2mean <- apply(z500wr2,c(2,3),mean,na.rm=T) +z500wr3mean <- apply(z500wr3,c(2,3),mean,na.rm=T) +z500wr4mean <- apply(z500wr4,c(2,3),mean,na.rm=T) + +# Mean z500 maps: + +my.brks <- c(seq(48800,57100,1)) # % Mean anomaly of a WR +my.cols <- colorRampPalette(as.character(read.csv("/home/Earth/vtorralb/rgbhex.csv",header=F)[,1]))(length(my.brks)-1) # blue--yellow-red colors + +PlotEquiMap(z500wr1mean,lon,lat,filled.continents = FALSE, brks=my.brks, cols=my.cols) # NAO- +PlotEquiMap(z500wr2mean,lon,lat,filled.continents = FALSE, brks=my.brks, cols=my.cols) # blocking +PlotEquiMap(z500wr3mean,lon,lat,filled.continents = FALSE, brks=my.brks, cols=my.cols) # atlantic ridge +PlotEquiMap(z500wr4mean,lon,lat,filled.continents = FALSE, brks=my.brks, cols=my.cols) # NAO+ + + +# Average wind anomalies: + +my.brks <- c(seq(-10,-3,1),seq(-2.9,2.9,0.1),seq(3,10,1)) # % Mean anomaly of a WR +my.cols <- colorRampPalette(as.character(read.csv("/home/Earth/vtorralb/rgbhex.csv",header=F)[,1]))(length(my.brks)-1) # blue--yellow-red colors + +png(filename=paste0(mapdir,"/",rean,"_",var.name,"_",my.period[period],"NAO-.png"),width=1000,height=700) +layout(matrix(c(rep(1,9),2), 10, 1, byrow = TRUE)) +PlotEquiMap(varPeriodAnom1mean,lon,lat,filled.continents = FALSE, brks=my.brks, cols=my.cols, toptitle="NAO-",drawleg=F) +ColorBar(my.brks, cols=my.cols, vert=FALSE, subsampleg=1, cex=1.2) +dev.off() + +png(filename=paste0(mapdir,"/",rean,"_",var.name,"_", my.period[period],"Blocking.png"), width=1000, height=700) +layout(matrix(c(rep(1,9),2), 10, 1, byrow = TRUE)) +PlotEquiMap(varPeriodAnom2mean,lon,lat,filled.continents = FALSE, brks=my.brks, cols=my.cols, toptitle="Blocking", drawleg=F) +ColorBar(my.brks, cols=my.cols, vert=FALSE, subsampleg=1, cex=1.2) +dev.off() + +png(filename=paste0(mapdir,"/",rean,"_",var.name,"_", my.period[period],"Atlantic Ridge.png"),width=1000,height=700) +layout(matrix(c(rep(1,9),2), 10, 1, byrow = TRUE)) +PlotEquiMap(varPeriodAnom3mean,lon,lat,filled.continents = FALSE, brks=my.brks, cols=my.cols, toptitle="Atlantic Ridge", drawleg=F) +ColorBar(my.brks, cols=my.cols, vert=FALSE, subsampleg=1, cex=1.2) +dev.off() + +png(filename=paste0(mapdir,"/",rean,"_",var.name,"_", my.period[period],"NAO+.png"),width=1000,height=700) +layout(matrix(c(rep(1,9),2), 10, 1, byrow = TRUE)) +PlotEquiMap(varPeriodAnom4mean,lon,lat,filled.continents = FALSE, brks=my.brks, cols=my.cols, toptitle="NAO+", drawleg=F) +ColorBar(my.brks, cols=my.cols, vert=FALSE, subsampleg=1, cex=1.2) +dev.off() + + +# save as .pdf instead of .png: +pdf(file=paste0(mapdir,"/",rean,"_",var.name,"_", my.period[period],"NAO-.pdf"),width=1000,height=700) +layout(matrix(c(rep(1,9),2), 10, 1, byrow = TRUE)) +PlotEquiMap(varPeriodAnom1mean,lon,lat,filled.continents = FALSE, brks=my.brks, cols=my.cols, toptitle="NAO-",drawleg=F) +ColorBar(my.brks, cols=my.cols, vert=FALSE, subsampleg=1, cex=1.2) +dev.off() + +pdf(file=paste0(mapdir,"/",rean,"_",var.name,"_", my.period[period],"Blocking.pdf"), width=1000, height=700) +layout(matrix(c(rep(1,9),2), 10, 1, byrow = TRUE)) +PlotEquiMap(varPeriodAnom2mean,lon,lat,filled.continents = FALSE, brks=my.brks, cols=my.cols, toptitle="Blocking", drawleg=F) +ColorBar(my.brks, cols=my.cols, vert=FALSE, subsampleg=1, cex=1.2) +dev.off() + +pdf(file=paste0(mapdir,"/",rean,"_",var.name,"_", my.period[period],"Atlantic Ridge.pdf"),width=1000,height=700) +layout(matrix(c(rep(1,9),2), 10, 1, byrow = TRUE)) +PlotEquiMap(varPeriodAnom3mean,lon,lat,filled.continents = FALSE, brks=my.brks, cols=my.cols, toptitle="Atlantic Ridge", drawleg=F) +ColorBar(my.brks, cols=my.cols, vert=FALSE, subsampleg=1, cex=1.2) +dev.off() + +pdf(file=paste0(mapdir,"/",rean,"_",var.name,"_", my.period[period],"NAO+.pdf"),width=1000,height=700) +layout(matrix(c(rep(1,9),2), 10, 1, byrow = TRUE)) +PlotEquiMap(varPeriodAnom4mean,lon,lat,filled.continents = FALSE, brks=my.brks, cols=my.cols, toptitle="NAO+", drawleg=F) +ColorBar(my.brks, cols=my.cols, vert=FALSE, subsampleg=1, cex=1.2) +dev.off() + +rm(varPeriodeuFull,varPeriodeu,varPeriod,var) diff --git a/old/weather_regimes_v2.R~ b/old/weather_regimes_v2.R~ new file mode 100644 index 0000000000000000000000000000000000000000..a9cdf46e64e55e11658360c684127dfdd52045a6 --- /dev/null +++ b/old/weather_regimes_v2.R~ @@ -0,0 +1,186 @@ + +library(s2dverification) # for the function Load() +source('/home/Earth/ncortesi/Downloads/scripts/Rfunctions.R') # for the calendar functions +# reanalysis available: +ERAint <- list(path = '/esnas/reconstructions/ecmwf/eraint/$STORE_FREQ$_mean/$VAR_NAME$_f6h/$VAR_NAME$_$YEAR$$MONTH$.nc') +JRA55 <- list(path = '/esnas/reconstructions/jma/jra-55/$STORE_FREQ$_mean/$VAR_NAME$_f6h/$VAR_NAME$_$YEAR$$MONTH$.nc') + +workdir <- "/scratch/Earth/ncortesi/RESILIENCE/Regimes" # working dir with the input files with the WT classifications for each MSLP grid point +mapdir <- "/scratch/Earth/ncortesi/RESILIENCE/Regimes_maps" # output dir with seasonal and yearly maps + +year.start <- 1984 +year.end <- 2013 + +rean <- ERAint # choose one of the two above reanalysis + +var.name <- "tas" #"sfcWind" # name of the 'predictand' variable of the chosen reanalysis + +# Coordinates of the box enclosing the study region (the Northern Atlantic in this case): +lat.max <- 70 +lat.min <- 30 +lon.max <- 40 +lon.min <- 280 # put a positive number here because Z500 has only positive values of longitud! + +############################################################################################################################# + +# load only 1 day of z500 to detect the minimum and maximum lat and lon values to identify only the North Atlantic +domain <- Load(var = 'z500', exp = NULL, obs = list(rean), paste0(y,'0101'), storefreq = 'daily', leadtimemax = 1, output = 'lonlat') +pos.lat.max <- which(abs(domain$lat-lat.max)==min(abs(domain$lat-lat.max))) +pos.lat.min <- which(abs(domain$lat-lat.min)==min(abs(domain$lat-lat.min))) +pos.lon.max <- which(abs(domain$lon-lon.max)==min(abs(domain$lon-lon.max))) +pos.lon.min <- which(abs(domain$lon-lon.min)==min(abs(domain$lon-lon.min))) +pos.lat <- if(pos.lat.min < pos.lat.max) {pos.lat.min:pos.lat.max} else { pos.lat.max:pos.lat.min} +pos.lon <- c(1:pos.lon.max,pos.lon.min:length(domain$lon)) # beware that it only consider the case where the study region cross the meridian of Greenwhich! +n.pos.lat <- length(pos.lat) +n.pos.lon <- length(pos.lon) + +z500euFull <-array(NA,c(n.days.in.a.yearly.period(year.start,year.end), n.pos.lat, n.pos.lon)) + +for (y in year.start:year.end){ + var <- Load(var = 'z500', exp = NULL, obs = list(rean), paste0(y,'0101'), storefreq = 'daily', leadtimemax = n.days.in.a.year(y), output = 'lonlat') + z500eu <- var$obs[,,,,pos.lat,pos.lon] + z500euFull[seq.days.in.a.future.year(year.start, y),,] <- z500eu +} + +lat <- var$lat[pos.lat] +lon <- var$lon[pos.lon] + +period=13 # (winter) + +days.period <- NA +for(y in year.start:year.end) days.period <- c(days.period, n.days.in.a.future.year(year.start, y) + pos.period(y,period)) +days.period <- days.period[-1] # remove the NA at the begin that was introduced only to be able to execture the above command +n.days.period <- length(days.period) + +z500 <- z500euFull[days.period,,] # select only winter days + +z500mat <- z500 + +dim(z500mat) <- c(head(dim(z500mat),1), n.pos.lat*172) # convert array in a matrix! + +my.seq <- seq(1,9976,9) + +z500cut <- z500mat[,my.seq] + +my.PCA <- princomp(z500cut,cor=FALSE) + +head(cumsum(my.PCA$sdev^2/sum(my.PCA$sdev^2)),20) # check how many PCAs to keep basing on the sum of their explained variance + +my.cluster <- kmeans(my.PCA$scores[,1:7], 4) # 4 is the number of clusters, 7 the number of EOFs which explains in our case the 19% of variance + +rm(z500euFull,z500eu,z500mat,var) +gc() + +# Load wind data: + +vareuFull <-array(NA,c(n.days.in.a.yearly.period(year.start,year.end),n.pos.lat,n.pos.lon)) + +for (y in year.start:year.end){ + var <- Load(var = var.name, exp = NULL, obs = list(rean), paste0(y,'0101'), storefreq = 'daily', leadtimemax = n.days.in.a.year(y), output = 'lonlat') + vareu <- var$obs[,,,,pos.lat,pos.lon] + vareuFull[seq.days.in.a.future.year(year.start, y),,] <- vareu +} + +save.image(file=paste0(workdir,"/weather_regimes_",rean,"_",var.name,".RData")) +load(file=paste0(workdir,"/weather_regimes_",rean,"_",var.name,".RData")) + +varWinter <- vareuFull[days.period,,] + +varWinterClim <- apply(varWinter,c(2,3),mean,na.rm=T) +varWinterClim2 <- InsertDim(varWinterClim,1,2708) + +varWinterAnom <- varWinter - varWinterClim2 + +wr1 <- which(my.cluster$cluster==1) +wr2 <- which(my.cluster$cluster==2) +wr3 <- which(my.cluster$cluster==3) +wr4 <- which(my.cluster$cluster==4) + +varWinterAnom1 <- varWinterAnom[wr1,,] +varWinterAnom2 <- varWinterAnom[wr2,,] +varWinterAnom3 <- varWinterAnom[wr3,,] +varWinterAnom4 <- varWinterAnom[wr4,,] + +varWinterAnom1mean <- apply(varWinterAnom1,c(2,3),mean,na.rm=T) +varWinterAnom2mean <- apply(varWinterAnom2,c(2,3),mean,na.rm=T) +varWinterAnom3mean <- apply(varWinterAnom3,c(2,3),mean,na.rm=T) +varWinterAnom4mean <- apply(varWinterAnom4,c(2,3),mean,na.rm=T) + +z500wr1 <- z500[wr1,,] +z500wr2 <- z500[wr2,,] +z500wr3 <- z500[wr3,,] +z500wr4 <- z500[wr4,,] + +z500wr1mean <- apply(z500wr1,c(2,3),mean,na.rm=T) +z500wr2mean <- apply(z500wr2,c(2,3),mean,na.rm=T) +z500wr3mean <- apply(z500wr3,c(2,3),mean,na.rm=T) +z500wr4mean <- apply(z500wr4,c(2,3),mean,na.rm=T) + +# Mean z500 maps: + +my.brks <- c(seq(48800,57100,1)) # % Mean anomaly of a WR +my.cols <- colorRampPalette(as.character(read.csv("/home/Earth/vtorralb/rgbhex.csv",header=F)[,1]))(length(my.brks)-1) # blue--yellow-red colors + +PlotEquiMap(z500wr1mean,lon,lat,filled.continents = FALSE, brks=my.brks, cols=my.cols) # NAO- +PlotEquiMap(z500wr2mean,lon,lat,filled.continents = FALSE, brks=my.brks, cols=my.cols) # blocking +PlotEquiMap(z500wr3mean,lon,lat,filled.continents = FALSE, brks=my.brks, cols=my.cols) # atlantic ridge +PlotEquiMap(z500wr4mean,lon,lat,filled.continents = FALSE, brks=my.brks, cols=my.cols) # NAO+ + + +# Average wind anomalies: + +my.brks <- c(seq(-10,-3,1),seq(-2.9,2.9,0.1),seq(3,10,1)) # % Mean anomaly of a WR +my.cols <- colorRampPalette(as.character(read.csv("/home/Earth/vtorralb/rgbhex.csv",header=F)[,1]))(length(my.brks)-1) # blue--yellow-red colors + +png(filename=paste0(mapdir,"/",rean,"_",var.name,"_NAO-.png"),width=1000,height=700) +layout(matrix(c(rep(1,9),2), 10, 1, byrow = TRUE)) +PlotEquiMap(varWinterAnom1mean,lon,lat,filled.continents = FALSE, brks=my.brks, cols=my.cols, toptitle="NAO-",drawleg=F) +ColorBar(my.brks, cols=my.cols, vert=FALSE, subsampleg=1, cex=1.2) +dev.off() + +png(filename=paste0(mapdir,"/",rean,"_",var.name,"_Blocking.png"), width=1000, height=700) +layout(matrix(c(rep(1,9),2), 10, 1, byrow = TRUE)) +PlotEquiMap(varWinterAnom2mean,lon,lat,filled.continents = FALSE, brks=my.brks, cols=my.cols, toptitle="Blocking", drawleg=F) +ColorBar(my.brks, cols=my.cols, vert=FALSE, subsampleg=1, cex=1.2) +dev.off() + +png(filename=paste0(mapdir,"/",rean,"_",var.name,"_Atlantic Ridge.png"),width=1000,height=700) +layout(matrix(c(rep(1,9),2), 10, 1, byrow = TRUE)) +PlotEquiMap(varWinterAnom3mean,lon,lat,filled.continents = FALSE, brks=my.brks, cols=my.cols, toptitle="Atlantic Ridge", drawleg=F) +ColorBar(my.brks, cols=my.cols, vert=FALSE, subsampleg=1, cex=1.2) +dev.off() + +png(filename=paste0(mapdir,"/",rean,"_",var.name,"_NAO+.png"),width=1000,height=700) +layout(matrix(c(rep(1,9),2), 10, 1, byrow = TRUE)) +PlotEquiMap(varWinterAnom4mean,lon,lat,filled.continents = FALSE, brks=my.brks, cols=my.cols, toptitle="NAO+", drawleg=F) +ColorBar(my.brks, cols=my.cols, vert=FALSE, subsampleg=1, cex=1.2) +dev.off() + + +# save as .pdf instead of .png: +pdf(file=paste0(mapdir,"/",rean,"_",var.name,"_NAO-.pdf"),width=1000,height=700) +layout(matrix(c(rep(1,9),2), 10, 1, byrow = TRUE)) +PlotEquiMap(varWinterAnom1mean,lon,lat,filled.continents = FALSE, brks=my.brks, cols=my.cols, toptitle="NAO-",drawleg=F) +ColorBar(my.brks, cols=my.cols, vert=FALSE, subsampleg=1, cex=1.2) +dev.off() + +pdf(file=paste0(mapdir,"/",rean,"_",var.name,"_Blocking.pdf"), width=1000, height=700) +layout(matrix(c(rep(1,9),2), 10, 1, byrow = TRUE)) +PlotEquiMap(varWinterAnom2mean,lon,lat,filled.continents = FALSE, brks=my.brks, cols=my.cols, toptitle="Blocking", drawleg=F) +ColorBar(my.brks, cols=my.cols, vert=FALSE, subsampleg=1, cex=1.2) +dev.off() + +pdf(file=paste0(mapdir,"/",rean,"_",var.name,"_Atlantic Ridge.pdf"),width=1000,height=700) +layout(matrix(c(rep(1,9),2), 10, 1, byrow = TRUE)) +PlotEquiMap(varWinterAnom3mean,lon,lat,filled.continents = FALSE, brks=my.brks, cols=my.cols, toptitle="Atlantic Ridge", drawleg=F) +ColorBar(my.brks, cols=my.cols, vert=FALSE, subsampleg=1, cex=1.2) +dev.off() + +pdf(file=paste0(mapdir,"/",rean,"_",var.name,"_NAO+.pdf"),width=1000,height=700) +layout(matrix(c(rep(1,9),2), 10, 1, byrow = TRUE)) +PlotEquiMap(varWinterAnom4mean,lon,lat,filled.continents = FALSE, brks=my.brks, cols=my.cols, toptitle="NAO+", drawleg=F) +ColorBar(my.brks, cols=my.cols, vert=FALSE, subsampleg=1, cex=1.2) +dev.off() + +rm(varWintereuFull,varWintereu,varWinter,var) + diff --git a/old/weather_regimes_v20.R b/old/weather_regimes_v20.R new file mode 100644 index 0000000000000000000000000000000000000000..aa68d6d1a6a67816e1afc409f4d2f7738571dc57 --- /dev/null +++ b/old/weather_regimes_v20.R @@ -0,0 +1,772 @@ +library(s2dverification) # for the function Load() +library(abind) +library(Kendall) +source('/home/Earth/ncortesi/Downloads/scripts/Rfunctions.R') # for the calendar functions +workdir <- "/scratch/Earth/ncortesi/RESILIENCE/Regimes" # working dir with the input files with the WT classifications for each MSLP grid point + +# available reanalysis for the geopotential (g500) or the geopotential height (z500 = g500/9.81): +ERAint <- list(path = '/esnas/reconstructions/ecmwf/eraint/$STORE_FREQ$_mean/$VAR_NAME$_f6h/$VAR_NAME$_$YEAR$$MONTH$.nc') +ERAint.name <- "ERA-Interim" + +JRA55 <- list(path = '/esnas/reconstructions/jma/jra-55/$STORE_FREQ$_mean/$VAR_NAME$_f6h/$VAR_NAME$_$YEAR$$MONTH$.nc') +JRA55.name <- "JRA-55" + +rean <- ERAint #JRA55 #ERAint # choose one of the two above reanalysis from where to load the input psl data +rean.name <- ERAint.name #JRA55.name #ERAint.name + +# available forecast systems for the var data: +ECMWF_monthly <- list(path = paste0('/esnas/exp/ECMWF/monthly/ensforhc/6hourly/$VAR_NAME$/2014$MONTH$$DAY$00/$VAR_NAME$_$START_DATE$00.nc')) +ECMWF_monthly.name <- "ECMWF-Monthly" + +forecast <- ECMWF_monthly +forecast.name <- ECMWF_monthly.name + +fields <- rean #forecast # put 'rean' to load pressure fields and var from reanalisis, put 'forecast' to load them from forecast systems +fields.name <- rean.name #forecast.name + +year.start <- 1979 #1994 #1979 #1981 +year.end <- 2013 #2010 + +leadtime <- 1:7 #5:11 #1 # if fields=forecast, set the forecast time (in days) you want to compute the weather regimes. +startdate.shift <- 1 # if leadtime=5:11, it's better to shift all startdates of the season 1 week in the past, to fit the exact season of obs.data + # if leadtime=12:18, it's better to shift all startdates of the season 2 weeks in the past, and so on. +forecast.year <- year.end+1 # if fields=forecast, set the forecast year (it is usually the year after year.end) +mes=1 # if fields=forecast, set the month of the first startdate of the forecast year +day=2 # if fields=forecast, set the day of the first startdate of the forecast year + +psl <- "psl" #"g500" # pressure variable to use from the chosen reanalysis +psl.name <- "MSLP" # "Z500" # the name to show in the maps + +var.data <- rean #ECMWF_monthly # ERAint # chose a dataset from which to load the input var data (reanalysis or forecasts) +var.data.name <- rean.name #ECMWF_monthly.name # ERAint.name + +lat.weighting=FALSE # set it to true to weight psl data on latitude before applying the cluster analysis +sequences=FALSE # set it to TRUE if you want to select only days beloning to sequences of at least 5 consecutive days belonging to the same regime. + +PCA <- FALSE # se to TRUE if you want to apply the PCA before the k-means cluster analysis, FALSE otherwise +variance.explained <- 0.8 # the user can set here the minimum % of variance explained by the PCs (typically 0.8 which is equal to 80%) + +# Coordinates of the box enclosing the study region (the Northern Atlantic in this case): +lat.max <- 81 #81 #80 #70 +lat.min <- 27 #30 #20 #30 +lon.max <- 45 #36 #30 #40 +lon.min <- 274.5 #274.5 #270 #280 # put a positive number here because the geopotential has only positive values of longitud! + +############################################################################################################################# +#ECMWF_monthly <- list(path = paste0('/esnas/exp/ECMWF/monthly/ensforhc/6hourly/psl/2014010200/1994_010200.nc')) +#ECMWF_monthly <- list(path = paste0('/esnas/exp/ECMWF/monthly/ensforhc/6hourly/$VAR_NAME$/2014$MONTH$$DAY$00/$VAR_NAME$_$START_DATE$00.nc')) +if(lat.min >= lat.max) stop("lat.min cannot be greater than lat.max") +n.years <- year.end - year.start + 1 + +days.period <- n.days.period <- period.length <- list() +for (p in 1:17){ + days.period[[p]] <- NA + for(y in year.start:year.end) days.period[[p]] <- c(days.period[[p]], n.days.in.a.future.year(year.start, y) + pos.period(y,p)) + days.period[[p]] <- days.period[[p]][-1] # remove the NA at the begin that was introduced only to be able to execute the above command + # number of days belonging to that period from year.start to year.end: + n.days.period[[p]] <- length(days.period[[p]]) + # Number of days belonging to that period in a single year: + period.length[[p]] <- n.days.in.a.period(p,1999) #+ ifelse(period==13 | period==2, 1, 0) # using year 1999 introduce a small error in winter season length because of bisestile years +} + + +# load only 1 day of pressure data to detect the minimum and maximum lat and lon values which identify only the North Atlantic area: +domain <- Load(var = psl, exp = NULL, obs = list(fields), paste0(year.start,'0102'), storefreq = 'daily', leadtimemax = 1, output = 'lonlat', nprocs=1) +n.lat <- length(domain$lat) +n.lon <- length(domain$lon) + +pos.lat.max <- which(lat.max - round(domain$lat,4) >= 0)[1] # select the first latitude of the domain below the maximum latitude +pos.lat.min <- tail(which(round(domain$lat,4) - lat.min >= 0),1) # select the last latitude of the domain over the minumum latitude +pos.lon.max <- tail(which(lon.max - round(domain$lon,4) >= 0),1) +pos.lon.min <- which(round(domain$lon,4) - lon.min >= 0)[1] + +pos.lat <- if(pos.lat.min < pos.lat.max) {pos.lat.min:pos.lat.max} else {pos.lat.max:pos.lat.min} +pos.lon <- c(1:pos.lon.max,pos.lon.min:length(domain$lon)) # beware that it only consider the case where the study area cross the meridian of Greenwhich! +n.pos.lat <- length(pos.lat) +n.pos.lon <- length(pos.lon) + +lat <- domain$lat[pos.lat] # lat values of chosen area only (it is smaller than the whole spatial domain loaded by the data) +lon <- domain$lon[pos.lon] # lon values of chosen area only + +lat.min.area <- domain$lat[pos.lat.min] # min and max lat/lon of the chosen area only (same as lat.max, but its values correspond to actual values in the lat vector) +lat.max.area <- domain$lat[pos.lat.max] +lon.min.area <- domain$lon[pos.lon.min] +lon.max.area <- domain$lon[pos.lon.max] + +#rm(domain) + +# Load psl data: +if(unlist(fields) == unlist(rean)){ # Load daily psl data in the reanalysis case: + psleuFull <-array(NA,c(n.days.in.a.yearly.period(year.start,year.end), n.pos.lat, n.pos.lon)) + + for (y in year.start:year.end){ + var <- Load(var = psl, exp = NULL, obs = list(fields), paste0(y,'0101'), storefreq = 'daily', leadtimemax = n.days.in.a.year(y), output = 'lonlat', + latmin = lat.min, latmax = lat.max, lonmin = lon.min, lonmax = lon.max) + psleuFull[seq.days.in.a.future.year(year.start, y),,] <- var$obs + rm(var) + gc() + } + +} else { # Load 6-hourly psl data in the forecast case: + psleuFull <-array(NA, c(n.sdates*n.years, n.leadtimes, n.pos.lat, n.pos.lon)) # daily order: 19940102 19950102 ... 20130102 19940109 19950109 ... 20130109 ... + n.leadtimes <- length(leadtime) + sdates <- weekly.seq(forecast.year,mes,day) + n.sdates <- length(sdates) + + for (startdate in 1:n.sdates){ + for(lday in leadtime){ + var <- Load(var = psl, exp = NULL, obs = list(fields), paste0(year.start:year.end, substr(sdates[startdate],5,6), substr(sdates[startdate],7,8) ), storefreq = 'daily', leadtimemin = lday*4-3, leadtimemax = lday*4, output = 'lonlat', latmin = lat.min, latmax = lat.max, lonmin = lon.min, lonmax = lon.max) + + mean.lday <- apply(var$obs, c(3,5,6), mean, na.rm=T) + rm(var) + gc() + + psleuFull[(1+(startdate-1)*n.years):(startdate*n.years), lday-leadtime[1]+1,,] <- mean.lday + rm(mean.lday) + gc() + } + } + +} + +if(psl=="g500") psleuFull <- psleuFull/9.81 # convert geopotential to geopotential height (in m) +if(psl=="psl") psleuFull <- psleuFull/100 # convert MSLP in Pascal to MSLP in hPa +gc() + +save.image(file=paste0(workdir,"/weather_regimes_",fields.name,"_",year.start,"-",year.end,".RData")) +load(file=paste0(workdir,"/weather_regimes_",fields.name,"_",year.start,"-",year.end,".RData")) + +# compute the cluster analysis: +my.PCA <- list() +my.cluster <- list() +tot.variance <- list() +n.pcs <- my.cluster <- c() + +for(period in 13:16){ + if(unlist(fields) == unlist(rean)){ + + } else { + my.startdates.period <- months.period(forecast.year,mes,day,period) # get which startdates belong to the chosen period + + days.period <- list() # get which days inside psleuFull belong to the chosen period + for(startdate in my.startdates.period){ + days.period[[p]] <- c(days.period[[p]], (1+(startdate-1)*n.years):(startdate*n.years)) + } + } + + + pslPeriod <- psleuFull[days.period[[p]],,] # select only days in the chosen period (i.e: winter) + + # weight the pressure fields based on latitude: + if(lat.weighting){ + lat.weighted <- cos(lat*pi/180)^0.5 + lat.weighted.array <- InsertDim(InsertDim(lat.weighted,2,n.pos.lon), 1, n.days.period[[p]]) + pslPeriod.weighted <- pslPeriod * lat.weighted.array + rm(lat.weighted.array) + gc() + } else { + pslPeriod.weighted <- pslPeriod + } + + pslmat <- pslPeriod.weighted + dim(pslmat) <- c(n.days.period[[p]], n.pos.lat*n.pos.lon) # convert array in a matrix! + rm(pslPeriod, pslPeriod.weighted) + gc() + + # # if you are using the monthly forecast system, in winter you must remove the 2nd startdate (20140109) because its data is bad, as you can see with: plot(pslmat[,1:2]) + #if(unlist(fields) == unlist(forecast) && period == 13) pslmat[21:40,] <- NA + + if(PCA){ # compute the PCs: + my.seq <- seq(1, n.pos.lat*n.pos.lon, 9) # select only 1 point of 9 + pslcut <- pslmat[,my.seq] + + my.PCA[[period]] <- princomp(pslcut,cor=FALSE) + tot.variance[[period]] <- head(cumsum(my.PCA[[period]]$sdev^2/sum(my.PCA[[period]]$sdev^2)),50) # check how many PCAs to keep basing on the sum of their expl. variance + n.pcs[period] <- head(as.numeric(which(tot.variance[[period]] > variance.explained)),1) # select only the pcs that explains at least 80% of variance + my.cluster[[period]] <- kmeans(my.PCA[[period]]$scores[,1:n.pcs[period]], centers=4, iter.max=100, nstart=30) # 4 is the number of clusters + rm(pslcut) + } else { # remove from matpsl the years with NA: + my.cluster[[period]] <- kmeans(pslmat[which(!is.na(pslmat[,1])),], centers=4, iter.max=100, nstart=30) # 4 is the number of clusters + } + + rm(pslmat) + gc() +} + +var.num <- 2 # Choose a variable. 1: sfcWind 2: tas + +var.name <- c("sfcWind","tas") # name of the 'predictand' variable of the chosen reanalysis +var.name.full <- c("Wind Speed","Temperature") # full name of the 'predictand' variable to put in the title of the graphs +var.unit <- c("m/s", "ºC") # unit of measure of var (for drawing color scales) + +# Load var data: +if(unlist(fields) == unlist(rean)){ # Load daily var data in the reanalysis case: + vareuFull <-array(NA,c(n.days.in.a.yearly.period(year.start,year.end), n.pos.lat, n.pos.lon)) + + for (y in year.start:year.end){ + var <- Load(var = var.name[var.num], exp = NULL, obs = list(var.data), paste0(y,'0101'), storefreq = 'daily', leadtimemax = n.days.in.a.year(y), output = 'lonlat', + latmin = lat.min, latmax = lat.max, lonmin = lon.min, lonmax = lon.max) + vareuFull[seq.days.in.a.future.year(year.start, y),,] <- var$obs + rm(var) + gc() + } + +} else { # Load 6-hourly var data in the forecast case: + sdates <- weekly.seq(forecast.year,mes,day) + vareuFull <-array(NA,c(length(sdates)*(year.end-year.start+1), n.pos.lat, n.pos.lon)) + + for (startdate in 1:length(sdates)){ + var <- Load(var = var.name[var.num], exp = NULL, obs = list(var.data), paste0(year.start:year.end, substr(sdates[startdate],5,6), substr(sdates[startdate],7,8) ), storefreq = 'daily', leadtimemin=leadtime*4-3, leadtimemax=leadtime*4, output = 'lonlat', latmin = lat.min, latmax = lat.max, lonmin = lon.min, lonmax = lon.max) + temp <- apply(var$obs, c(1,3,5,6), mean, na.rm=T) + vareuFull[(1+(startdate-1)*(year.end-year.start+1)):(startdate*(year.end-year.start+1)),,] <- temp + rm(temp,var) + gc() + } + +} + +#my.grid<-paste0('r',n.lon,'x',n.lat) +#coord <- Load(var = 'sfcWind', exp = list(ECMWF_monthly), obs = NULL, sdates=paste0(2013,'0102'), leadtimemin = 1, leadtimemax=1, output = 'lonlat', nprocs=1, grid=my.grid, method='bilinear') + +save.image(file=paste0(workdir,"/weather_regimes_",fields.name,"_",var.name[var.num],"_",year.start,"-",year.end,".RData")) +load(file=paste0(workdir,"/weather_regimes_",fields.name,"_",var.name[var.num],"_",year.start,"-",year.end,".RData")) + +WR.period <- 1:12 #13:16 # 1-12: Jan-Dec; 13-16: Winter-Spring-Summer-Autumn + +# if you want to study WRs at monhly level but using their seasonal time series, you have to copy the WRs time series to the months from 1 to 12: +if(WR.period <- 1:12){ + my.cluster[[1]] <- my.cluster[[2]] <- my.cluster[[3]] <- my.cluster[[4]] <- my.cluster[[5]] <- my.cluster[[6]] <- list() + my.cluster[[7]] <- my.cluster[[8]] <- my.cluster[[9]] <- my.cluster[[10]] <- my.cluster[[11]] <- my.cluster[[12]] <- list() + + ss <- match(days.period[[13]],days.period[[1]]); ss[which(!is.na(ss))] <- 1; qq <- ss*my.cluster[[13]]$cluster; my.cluster[[1]]$cluster <- qq[!is.na(qq)] + ss <- match(days.period[[13]],days.period[[2]]); ss[which(!is.na(ss))] <- 1; qq <- ss*my.cluster[[13]]$cluster; my.cluster[[2]]$cluster <- qq[!is.na(qq)] + ss <- match(days.period[[13]],days.period[[12]]); ss[which(!is.na(ss))] <- 1; qq <- ss*my.cluster[[13]]$cluster; my.cluster[[12]]$cluster <- qq[!is.na(qq)] + ss <- match(days.period[[14]],days.period[[3]]); ss[which(!is.na(ss))] <- 1; qq <- ss*my.cluster[[14]]$cluster; my.cluster[[3]]$cluster <- qq[!is.na(qq)] + ss <- match(days.period[[14]],days.period[[4]]); ss[which(!is.na(ss))] <- 1; qq <- ss*my.cluster[[14]]$cluster; my.cluster[[4]]$cluster <- qq[!is.na(qq)] + ss <- match(days.period[[14]],days.period[[5]]); ss[which(!is.na(ss))] <- 1; qq <- ss*my.cluster[[14]]$cluster; my.cluster[[5]]$cluster <- qq[!is.na(qq)] + ss <- match(days.period[[15]],days.period[[6]]); ss[which(!is.na(ss))] <- 1; qq <- ss*my.cluster[[15]]$cluster; my.cluster[[6]]$cluster <- qq[!is.na(qq)] + ss <- match(days.period[[15]],days.period[[7]]); ss[which(!is.na(ss))] <- 1; qq <- ss*my.cluster[[15]]$cluster; my.cluster[[7]]$cluster <- qq[!is.na(qq)] + ss <- match(days.period[[15]],days.period[[8]]); ss[which(!is.na(ss))] <- 1; qq <- ss*my.cluster[[15]]$cluster; my.cluster[[8]]$cluster <- qq[!is.na(qq)] + ss <- match(days.period[[16]],days.period[[9]]); ss[which(!is.na(ss))] <- 1; qq <- ss*my.cluster[[16]]$cluster; my.cluster[[9]]$cluster <- qq[!is.na(qq)] + ss <- match(days.period[[16]],days.period[[10]]); ss[which(!is.na(ss))] <- 1; qq <- ss*my.cluster[[16]]$cluster; my.cluster[[10]]$cluster <- qq[!is.na(qq)] + ss <- match(days.period[[16]],days.period[[11]]); ss[which(!is.na(ss))] <- 1; qq <- ss*my.cluster[[16]]$cluster; my.cluster[[11]]$cluster <- qq[!is.na(qq)] +} + +for(period in WR.period){ # 1-12: Jan-Dec; 13-16: Winter-Spring-Summer-Autumn; 17: Year + if(unlist(fields) == unlist(rean)){ + + + } else { + my.startdates.period <- months.period(forecast.year,mes,day,period) + + #if(period == 13) my.startdates.period <- my.startdates.period[-2] # remove the second startdate because it is broken + + days.period <- list() + for(startdate in my.startdates.period){ + days.period[[p]] <- c(days.period[[p]], (1+(startdate-1)*(year.end-year.start+1)):(startdate*(year.end-year.start+1))) + } + period.length <- length(my.startdates.period) # number of Thusdays in the period + } + + varPeriod <- vareuFull[days.period[[p]],,] # select only var data during the chosen period + + varPeriodClim <- apply(varPeriod, c(2,3), mean, na.rm=T) + varPeriodClim2 <- InsertDim(varPeriodClim, 1, n.days.period[[p]]) + varPeriodAnom <- varPeriod - varPeriodClim2 + rm(varPeriod, varPeriodClim, varPeriodClim2) + gc() + + #PlotEquiMap2(varPeriodClim[,EU], lon[EU], lat, filled.continents = FALSE, cols=my.cols.var, intxlon=10, intylat=10, cex.lab=1.5) # plot the climatology of the period + + if(sequences){ + # for each of the 4 clusters, remove the days not belonging to sequences of at least 5 days: + # warning: first 4 days and last 4 days are not take into account y the algorithm and are treated as not belonging to any sequence (sequ=FALSE) + wrdiff <- diff(my.cluster[[period]]$cluster) + + sequ <- rep(FALSE, n.days.period[[p]]) + for(i in 5:(n.days.period[[p]]-5)){ + sequ[i] <- TRUE + if(wrdiff[i] != 0 & wrdiff[i+1] != 0) sequ[i] = FALSE + if(wrdiff[i] != 0 & wrdiff[i+2] != 0) sequ[i] = FALSE + if(wrdiff[i] != 0 & wrdiff[i+3] != 0) sequ[i] = FALSE + if(wrdiff[i] != 0 & wrdiff[i+4] != 0) sequ[i] = FALSE + if(wrdiff[i-1] != 0 & wrdiff[i] != 0) sequ[i] = FALSE + if(wrdiff[i-1] != 0 & wrdiff[i+1] != 0) sequ[i] = FALSE + if(wrdiff[i-1] != 0 & wrdiff[i+2] != 0) sequ[i] = FALSE + if(wrdiff[i-1] != 0 & wrdiff[i+3] != 0) sequ[i] = FALSE + if(wrdiff[i-2] != 0 & wrdiff[i] != 0) sequ[i] = FALSE + if(wrdiff[i-2] != 0 & wrdiff[i+1] != 0) sequ[i] = FALSE + if(wrdiff[i-2] != 0 & wrdiff[i+2] != 0) sequ[i] = FALSE + if(wrdiff[i-3] != 0 & wrdiff[i] != 0) sequ[i] = FALSE + if(wrdiff[i-3] != 0 & wrdiff[i+1] != 0) sequ[i] = FALSE + if(wrdiff[i-4] != 0 & wrdiff[i] != 0) sequ[i] = FALSE + } # close for loop on i + + # sequence of daily cluster numbers without the days that doesn't belong to sequences of 5 or more days: + cluster.sequence <- my.cluster[[period]]$cluster * sequ + rm(wrdiff, sequ) + } else{ + cluster.sequence <- my.cluster[[period]]$cluster + } + + gc() + + # Replace the days belonging to a regime with the days belonging to a sequence of at least 5 days of that regime: + wr1 <- which(cluster.sequence == 1) + wr2 <- which(cluster.sequence == 2) + wr3 <- which(cluster.sequence == 3) + wr4 <- which(cluster.sequence == 4) + + # yearly frequency of each regime: + wr1y <- wr2y <- wr3y <-wr4y <- c() + for(y in year.start:year.end){ + + if(unlist(fields) == unlist(rean)){ + wr1y[y-year.start+1] <- length(which(cluster.sequence[(y-year.start)*period.length+(1:period.length)] == 1)) + wr2y[y-year.start+1] <- length(which(cluster.sequence[(y-year.start)*period.length+(1:period.length)] == 2)) + wr3y[y-year.start+1] <- length(which(cluster.sequence[(y-year.start)*period.length+(1:period.length)] == 3)) + wr4y[y-year.start+1] <- length(which(cluster.sequence[(y-year.start)*period.length+(1:period.length)] == 4)) + } else { + wr1y[y-year.start+1] <- length(which(cluster.sequence[seq((y-year.start+1),length(cluster.sequence), n.years)] == 1)) + wr2y[y-year.start+1] <- length(which(cluster.sequence[seq((y-year.start+1),length(cluster.sequence), n.years)] == 2)) + wr3y[y-year.start+1] <- length(which(cluster.sequence[seq((y-year.start+1),length(cluster.sequence), n.years)] == 3)) + wr4y[y-year.start+1] <- length(which(cluster.sequence[seq((y-year.start+1),length(cluster.sequence), n.years)] == 4)) + } + } + + rm(cluster.sequence) + gc() + + # convert to frequencies in %: + wr1y <- wr1y/period.length + wr2y <- wr2y/period.length + wr3y <- wr3y/period.length + wr4y <- wr4y/period.length + + varPeriodAnom1 <- varPeriodAnom[wr1,,] + varPeriodAnom2 <- varPeriodAnom[wr2,,] + varPeriodAnom3 <- varPeriodAnom[wr3,,] + varPeriodAnom4 <- varPeriodAnom[wr4,,] + + varPeriodAnom1mean <- apply(varPeriodAnom1,c(2,3),mean,na.rm=T) + varPeriodAnom2mean <- apply(varPeriodAnom2,c(2,3),mean,na.rm=T) + varPeriodAnom3mean <- apply(varPeriodAnom3,c(2,3),mean,na.rm=T) + varPeriodAnom4mean <- apply(varPeriodAnom4,c(2,3),mean,na.rm=T) + + varPeriodAnomBoth1 <- abind(varPeriodAnom, varPeriodAnom1, along = 1) + pvalue1 <- apply(varPeriodAnomBoth1, c(2,3), function(x) t.test(x[1:n.days.period[[p]]],x[(n.days.period[[p]]+1):length(x)])$p.value) + rm(varPeriodAnomBoth1, varPeriodAnom1) + gc() + + varPeriodAnomBoth2 <- abind(varPeriodAnom, varPeriodAnom2, along = 1) + pvalue2 <- apply(varPeriodAnomBoth2, c(2,3), function(x) t.test(x[1:n.days.period[[p]]],x[(n.days.period[[p]]+1):length(x)])$p.value) + rm(varPeriodAnomBoth2, varPeriodAnom2) + gc() + + varPeriodAnomBoth3 <- abind(varPeriodAnom, varPeriodAnom3, along = 1) + pvalue3 <- apply(varPeriodAnomBoth3, c(2,3), function(x) t.test(x[1:n.days.period],x[(n.days.period+1):length(x)])$p.value) + rm(varPeriodAnomBoth3, varPeriodAnom3) + gc() + + varPeriodAnomBoth4 <- abind(varPeriodAnom, varPeriodAnom4, along = 1) + pvalue4 <- apply(varPeriodAnomBoth4, c(2,3), function(x) t.test(x[1:n.days.period],x[(n.days.period+1):length(x)])$p.value) + rm(varPeriodAnomBoth4, varPeriodAnom4) + + rm(varPeriodAnom) + gc() + + pslPeriod <- psleuFull[days.period,,] + pslPeriodClim <- apply(pslPeriod, c(2,3), mean, na.rm=T) + pslPeriodClim2 <- InsertDim(pslPeriodClim, 1, n.days.period) + pslPeriodAnom <- pslPeriod - pslPeriodClim2 + + rm(pslPeriod, pslPeriodClim, pslPeriodClim2) + gc() + + pslwr1 <- pslPeriodAnom[wr1,,] + pslwr2 <- pslPeriodAnom[wr2,,] + pslwr3 <- pslPeriodAnom[wr3,,] + pslwr4 <- pslPeriodAnom[wr4,,] + rm(pslPeriodAnom) + gc() + + pslwr1mean <- apply(pslwr1,c(2,3),mean,na.rm=T) + pslwr2mean <- apply(pslwr2,c(2,3),mean,na.rm=T) + pslwr3mean <- apply(pslwr3,c(2,3),mean,na.rm=T) + pslwr4mean <- apply(pslwr4,c(2,3),mean,na.rm=T) + + rm(pslwr1,pslwr2,pslwr3,pslwr4) + + # save all the data necessary to redraw the graphs when we know the right regime: + save(workdir,rean.name,fields.name,var.num,var.name,var.name.full,var.unit,year.start,year.end,period,WR.period,lon,lat,pslwr1mean,pslwr2mean,pslwr3mean,pslwr4mean, varPeriodAnom1mean, varPeriodAnom2mean, varPeriodAnom3mean,varPeriodAnom4mean,wr1y,wr2y,wr3y,wr4y,pvalue1,pvalue2,pvalue3,pvalue4,file=paste0(workdir,"/",fields.name,"_",var.name[var.num],"_",my.period[period],"_mapdata.RData")) + + rm(pvalue1,pvalue2,pvalue3,pvalue4) + rm(pslwr1mean,pslwr2mean,pslwr3mean,pslwr4mean) + rm(varPeriodAnom1mean,varPeriodAnom2mean,varPeriodAnom3mean,varPeriodAnom4mean) + gc() + +} # close the for loop on 'period' + + +# run it twice: once, with ordering <- FALSE to look only at the cartography and find visually the right regime names of the four clusters; +# and the second time, to save the ordered cartography: +ordering <- TRUE # If TRUE, order the clusters following the regimes listed in the 'orden' vector (after you manually insert the names). +save.names <- FALSE # if TRUE, also save the cluster names (i.e: the association between clusters and weather regimes); if FALSE, the cluster names are loaded from the file +as.pdf <- FALSE # FALSE: save results as one .png file for each season, TRUE: save only 1 .pdf file with all seasons + +# position of long values of Europe only (without the Atlantic Sea and America): +#if(fields.name == "ERA-Interim") EU <- c(1:which(lon >= lon.max)[1], (length(lon)-30):length(lon)) # restrict area to continental europe only +EU <- c(1:which(lon >= lon.max)[1], (which(lon >= 337)[1]:length(lon))) # restrict area to continental europe only + +# choose an order to give to the cartography, from top to bottom: +orden <- c("NAO+","NAO-","Blocking","Atl.Ridge") + +regime1.name <- orden[1] +regime2.name <- orden[2] +regime3.name <- orden[3] +regime4.name <- orden[4] + +if(save.names){cluster1.name.period <- cluster2.name.period <- cluster3.name.period <- cluster4.name.period <- c()} + +# breaks and colors of the geopotential fields: +if(psl == "g500"){ + #my.brks <- c(-300,-199:200,300) # % Mean anomaly of a WR + my.brks <- c(-300,seq(-200,200,10),300) # % Mean anomaly of a WR + my.brks2 <- c(-300,seq(-200,200,10),300) # % Mean anomaly of a WR for the contour lines + #my.cols <- colorRampPalette((brewer.pal(9,"PuBu")))(length(my.brks)-1) + values.to.plot <- c(-200, -100, 0, 100, 200) # values we want to show in the labels +} + +if(psl == "psl"){ + my.brks <- c(seq(-19,-1,2),0,seq(1,19,2)) # % Mean anomaly of a WR + my.brks2 <- my.brks #c(seq(-20,20,2)) # % Mean anomaly of a WR for the contour lines + values.to.plot <- my.brks #c(-17,-15,-13,-11,-9,-7,-5,-3,-1,0,1,3,5,7,9,11,13,15,17) +} + +my.cols <- colorRampPalette(rev(brewer.pal(11,"RdBu")))(length(my.brks)-1) # blue--white--red colors +my.cols[floor(length(my.cols)/2)] <- my.cols[floor(length(my.cols)/2)+1] <- "white" + +# breaks and colors of the impact maps (valid both for Wind speed anomalies and Temperature anomalies): +#my.brks.var <- c(-20,seq(-10,-3,1),seq(-2.9,2.9,0.1),seq(3,10,1),20) # % Mean anomaly of a WR +#my.brks.var <- c(-20,-2,-1.5,-1,-0.5,-0.2,0.2,0.5,1,1.5,2,20) # % Mean anomaly of a WR +#my.brks.var <- c(-20,seq(-3,3,0.5),20) # % Mean anomaly of a WR +if(var.name[var.num] == "sfcWind") { + my.brks.var <- seq(-3,3,0.5) + values.to.plot2 <- c(-3,-2,-1,0,1,2,3) +} +if(var.name[var.num] == "tas") { + my.brks.var <- seq(-5,5,1) + values.to.plot2 <- my.brks.var #c(-5,-3,-1,0,1,3,5) +} + +#my.cols <- colorRampPalette(as.character(read.csv("/home/Earth/vtorralb/rgbhex.csv",header=F)[,1]))(length(my.brks)-1) # blue--yellow-red colors +my.cols.var <- colorRampPalette(rev(brewer.pal(11,"RdBu")))(length(my.brks.var)-1) # blue--white--red colors +#my.cols.var[floor(length(my.cols.var)/2)] <- my.cols.var[floor(length(my.cols.var)/2)+1] <- "white" + +if(as.pdf)(pdf(file=paste0(workdir,"/",fields.name,"_",var.name[var.num],".pdf"),width=40, height=60)) + +for(period in WR.period){ + load(file=paste0(workdir,"/",fields.name,"_",var.name[var.num],"_",my.period[period],"_mapdata.RData")) + + if(save.names){ + if(period == 1){ # January + cluster2.name="NAO+" + cluster1.name="NAO-" + cluster4.name="Blocking" + cluster3.name="Atl.Ridge" + } + if(period == 2){ # February + cluster3.name="NAO+" + cluster2.name="NAO-" + cluster4.name="Blocking" + cluster1.name="Atl.Ridge" + } + if(period == 3){ # March + cluster3.name="NAO+" + cluster2.name="NAO-" + cluster4.name="Blocking" + cluster1.name="Atl.Ridge" + } + if(period == 4){ # April + cluster2.name="NAO+" + cluster1.name="NAO-" + cluster3.name="Blocking" + cluster4.name="Atl.Ridge" + } + if(period == 5){ # May + cluster4.name="NAO+" + cluster2.name="NAO-" + cluster3.name="Blocking" + cluster1.name="Atl.Ridge" + } + if(period == 6){ # June + cluster4.name="NAO+" + cluster1.name="NAO-" + cluster2.name="Blocking" + cluster3.name="Atl.Ridge" + } + if(period == 7){ # July + cluster4.name="NAO+" + cluster2.name="NAO-" + cluster1.name="Blocking" + cluster3.name="Atl.Ridge" + } + if(period == 8){ # August + cluster2.name="NAO+" + cluster4.name="NAO-" + cluster3.name="Blocking" + cluster1.name="Atl.Ridge" + } + if(period == 9){ # September + cluster4.name="NAO+" + cluster3.name="NAO-" + cluster1.name="Blocking" + cluster2.name="Atl.Ridge" + } + if(period == 10){ # October + cluster1.name="NAO+" + cluster4.name="NAO-" + cluster2.name="Blocking" + cluster3.name="Atl.Ridge" + } + if(period == 11){ # November + cluster2.name="NAO+" + cluster3.name="NAO-" + cluster1.name="Blocking" + cluster4.name="Atl.Ridge" + } + if(period == 12){ # December + cluster2.name="NAO+" + cluster4.name="NAO-" + cluster1.name="Blocking" + cluster3.name="Atl.Ridge" + } + if(period == 13){ # Winter + cluster3.name="NAO+" + cluster4.name="NAO-" + cluster1.name="Blocking" + cluster2.name="Atl.Ridge" + } + if(period == 14){ # Spring + cluster1.name="NAO+" + cluster3.name="NAO-" + cluster2.name="Blocking" + cluster4.name="Atl.Ridge" + } + if(period == 15){ # Summer + cluster2.name="NAO+" + cluster3.name="NAO-" + cluster4.name="Blocking" + cluster1.name="Atl.Ridge" + } + if(period == 16){ # Autumn + cluster4.name="NAO+" + cluster1.name="NAO-" + cluster2.name="Blocking" + cluster3.name="Atl.Ridge" + } + + cluster1.name.period[period] <- cluster1.name + cluster2.name.period[period] <- cluster2.name + cluster3.name.period[period] <- cluster3.name + cluster4.name.period[period] <- cluster4.name + + save(cluster1.name.period, cluster2.name.period, cluster3.name.period, cluster4.name.period, file=paste0(workdir,"/",fields.name,"_ClusterNames.RData")) + + } else { # in this case, we load the cluster names from the file already saved: + load(paste0(workdir,"/",fields.name,"_ClusterNames.RData")) + cluster1.name <- cluster1.name.period[period] + cluster2.name <- cluster2.name.period[period] + cluster3.name <- cluster3.name.period[period] + cluster4.name <- cluster4.name.period[period] + } + + # assign to each cluster its regime: + if(ordering == FALSE){ + cluster1 <- 1; cluster2 <- 2; cluster3 <- 3; cluster4 <- 4 + } else { + cluster1 <- which(orden == cluster1.name) + cluster2 <- which(orden == cluster2.name) + cluster3 <- which(orden == cluster3.name) + cluster4 <- which(orden == cluster4.name) + } + + assign(paste0("map",cluster1), pslwr1mean) + assign(paste0("map",cluster2), pslwr2mean) + assign(paste0("map",cluster3), pslwr3mean) + assign(paste0("map",cluster4), pslwr4mean) + + assign(paste0("imp",cluster1), varPeriodAnom1mean) + assign(paste0("imp",cluster2), varPeriodAnom2mean) + assign(paste0("imp",cluster3), varPeriodAnom3mean) + assign(paste0("imp",cluster4), varPeriodAnom4mean) + + assign(paste0("sig",cluster1), pvalue1) + assign(paste0("sig",cluster2), pvalue2) + assign(paste0("sig",cluster3), pvalue3) + assign(paste0("sig",cluster4), pvalue4) + + assign(paste0("fre",cluster1), wr1y) + assign(paste0("fre",cluster2), wr2y) + assign(paste0("fre",cluster3), wr3y) + assign(paste0("fre",cluster4), wr4y) + + # Visualize the composition with the 3 graphs for all the 4 regimes: its pressure fields, its impact on var and its frequency series: + if(!as.pdf) png(filename=paste0(workdir,"/",fields.name,"_",var.name[var.num],"_",my.period[period],".png"),width=3000,height=3700) + + plot.new() + + # Sheet title: + par(fig=c(0, 1, 0.94, 0.98), new=TRUE) + mtext(paste0("Weather Regimes for ",my.period[period]," season (",year.start,"-",year.end,"). Source: ",fields.name), cex=6, font=2) + + # Centroid maps: + map.xpos <- 0 + map.width <- 0.46 + par(fig=c(map.xpos + 0.003, map.xpos + map.width - 0.003, 0.745, 0.925), new=TRUE) + PlotEquiMap2(rescale(map1,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map1), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, xlabel.dist=1.5, contours.lty="F1FF1F") + par(fig=c(map.xpos, map.xpos + map.width, 0.51, 0.69), new=TRUE) + PlotEquiMap2(rescale(map2,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map2), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F") + par(fig=c(map.xpos, map.xpos + map.width, 0.275, 0.455), new=TRUE) + PlotEquiMap2(rescale(map3,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map3), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F") + par(fig=c(map.xpos, map.xpos + map.width, 0.04, 0.22), new=TRUE) + PlotEquiMap2(rescale(map4,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map4), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F") + + # Title Centroid Maps: + map.title.xpos <- 0.76 + map.title.width <- 0.2 + par(fig=c(map.title.xpos, map.title.xpos + map.title.width, 0.728, 0.729), new=TRUE) + mtext("year", cex=3) + par(fig=c(map.title.xpos, map.title.xpos + map.title.width, 0.494, 0.495), new=TRUE) + mtext("year", cex=3) + par(fig=c(map.title.xpos, map.title.xpos + map.title.width, 0.260, 0.261), new=TRUE) + mtext("year", cex=3) + par(fig=c(map.title.xpos, map.title.xpos + map.title.width, 0.026, 0.027), new=TRUE) + mtext("year", cex=3) + + # Impact maps: + impact.xpos <- 0.47 + impact.width <- 0.26 + par(fig=c(impact.xpos, impact.xpos + impact.width, 0.745, 0.925), new=TRUE) + PlotEquiMap2(rescale(imp1[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=t(sig1[,EU] < 0.05), cex.lab=1.5) + par(fig=c(impact.xpos, impact.xpos + impact.width, 0.51, 0.69), new=TRUE) + PlotEquiMap2(rescale(imp2[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=t(sig2[,EU] < 0.05), cex.lab=1.5) + par(fig=c(impact.xpos, impact.xpos + impact.width, 0.275, 0.455), new=TRUE) + PlotEquiMap2(rescale(imp3[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=t(sig3[,EU] < 0.05), cex.lab=1.5) + par(fig=c(impact.xpos, impact.xpos + impact.width, 0.04, 0.22), new=TRUE) + PlotEquiMap2(rescale(imp4[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=t(sig4[,EU] < 0.05), cex.lab=1.5) + + # Frequency plots: + barplot.xpos <- 0.75 + barplot.width <- 0.25 + par(fig=c(barplot.xpos, barplot.xpos + barplot.width, 0.745, 0.915), new=TRUE) + barplot.freq(100*fre1, year.start, year.end, cex.y=2, cex.x=2, freq.max=60, ylab="", mgp=c(3,1.5,0)) + par(fig=c(barplot.xpos, barplot.xpos + barplot.width,0.510,0.680), new=TRUE) + barplot.freq(100*fre2, year.start, year.end, cex.y=2, cex.x=2, freq.max=60, ylab="", mgp=c(3,1.5,0)) + par(fig=c(barplot.xpos, barplot.xpos + barplot.width,0.275,0.445), new=TRUE) + barplot.freq(100*fre3, year.start, year.end, cex.y=2, cex.x=2, freq.max=60, ylab="", mgp=c(3,1.5,0)) + par(fig=c(barplot.xpos, barplot.xpos + barplot.width,0.040,0.210), new=TRUE) + barplot.freq(100*fre4, year.start, year.end, cex.y=2, cex.x=2, freq.max=60, ylab="", mgp=c(3,1.5,0)) + + # % on Frequency plots: + symbol.xpos <- 0.735 + symbol.width <- 0.01 + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.83, 0.84), new=TRUE) + mtext("%", cex=3.3) + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.60, 0.61), new=TRUE) + mtext("%", cex=3.3) + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.36, 0.37), new=TRUE) + mtext("%", cex=3.3) + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.13, 0.14), new=TRUE) + mtext("%", cex=3.3) + + # Title 1: + title1.xpos <- 0.02 + title1.width <- 0.4 + par(fig=c(title1.xpos, title1.xpos + title1.width, 0.930, 0.935), new=TRUE) + mtext(paste0(regime1.name,": ", psl.name, " Anomaly"), font=2, cex=4) + par(fig=c(title1.xpos, title1.xpos + title1.width, 0.695, 0.700), new=TRUE) + mtext(paste0(regime2.name,": ", psl.name, " Anomaly"), font=2, cex=4) + par(fig=c(title1.xpos, title1.xpos + title1.width, 0.460, 0.465), new=TRUE) + mtext(paste0(regime3.name,": ", psl.name, " Anomaly"), font=2, cex=4) + par(fig=c(title1.xpos, title1.xpos + title1.width, 0.225, 0.230), new=TRUE) + mtext(paste0(regime4.name,": ", psl.name, " Anomaly"), font=2, cex=4) + + # Title 2: + title2.xpos <- 0.42 + title2.width <- 0.35 + par(fig=c(title2.xpos, title2.xpos + title2.width, 0.930, 0.935), new=TRUE) + mtext(paste0(regime1.name, ": Impact on ", var.name.full[var.num]), font=2, cex=4) + par(fig=c(title2.xpos, title2.xpos + title2.width, 0.695, 0.700), new=TRUE) + mtext(paste0(regime2.name, ": Impact on ", var.name.full[var.num]), font=2, cex=4) + par(fig=c(title2.xpos, title2.xpos + title2.width, 0.460, 0.465), new=TRUE) + mtext(paste0(regime3.name, ": Impact on ", var.name.full[var.num]), font=2, cex=4) + par(fig=c(title2.xpos, title2.xpos + title2.width, 0.225, 0.230), new=TRUE) + mtext(paste0(regime4.name, ": Impact on ", var.name.full[var.num]), font=2, cex=4) + + # Title 3: + title3.xpos <- 0.75 + title3.width <-0.25 + par(fig=c(title3.xpos, title3.xpos + title3.width, 0.930, 0.935), new=TRUE) + mtext(paste0(regime1.name, ": Frequency (", round(100*mean(fre1),1), "%)"), font=2, cex=4) + par(fig=c(title3.xpos, title3.xpos + title3.width, 0.695, 0.700), new=TRUE) + mtext(paste0(regime2.name, ": Frequency (", round(100*mean(fre2),1), "%)"), font=2, cex=4) + par(fig=c(title3.xpos, title3.xpos + title3.width, 0.460, 0.465), new=TRUE) + mtext(paste0(regime3.name, ": Frequency (", round(100*mean(fre3),1), "%)"), font=2, cex=4) + par(fig=c(title3.xpos, title3.xpos + title3.width, 0.225, 0.230), new=TRUE) + mtext(paste0(regime4.name, ": Frequency (", round(100*mean(fre4),1), "%)"), font=2, cex=4) + + # Legend 1: + legend1.xpos <- 0.01 + legend1.width <- 0.44 + legend1.cex <- 1.8 + my.subset <- match(values.to.plot, my.brks) + + par(fig=c(legend1.xpos, legend1.xpos + legend1.width, 0.719, 0.744), new=TRUE) # syntax fig=c(xmin, xmax, ymin, ymax) + ColorBar3(my.brks, cols=my.cols, vert=FALSE, cex=legend1.cex, subset=my.subset) + if(psl=="g500") {mtext(side=4," m", cex=2.5)} else {mtext(side=4," hPa", cex=legend1.cex)} + par(fig=c(legend1.xpos, legend1.xpos + legend1.width, 0.485, 0.508), new=TRUE) + ColorBar3(my.brks, cols=my.cols, vert=FALSE, cex=legend1.cex, subset=my.subset) + if(psl=="g500") {mtext(side=4," m", cex=2.5)} else {mtext(side=4," hPa", cex=legend1.cex)} + par(fig=c(legend1.xpos, legend1.xpos + legend1.width, 0.250, 0.273), new=TRUE) + ColorBar3(my.brks, cols=my.cols, vert=FALSE, cex=legend1.cex, subset=my.subset) + if(psl=="g500") {mtext(side=4," m", cex=2.5)} else {mtext(side=4," hPa", cex=legend1.cex) + par(fig=c(legend1.xpos, legend1.xpos + legend1.width, 0.015, 0.038), new=TRUE) + ColorBar3(my.brks, cols=my.cols, vert=FALSE, cex=legend1.cex, subset=my.subset) + if(psl=="g500") {mtext(side=4," m", cex=2.5)} else {mtext(side=4," hPa", cex=legend1.cex)} + + # Legend 2: + legend2.xpos <- 0.48 + legend2.width <- 0.25 + legend2.cex <- 1.8 + my.subset2 <- match(values.to.plot2, my.brks.var) + + par(fig=c(legend2.xpos, legend2.xpos + legend2.width, 0.719 + 0.001, 0.744), new=TRUE) + ColorBar3(my.brks.var, cols=my.cols.var, vert=FALSE, cex=legend2.cex, subset=my.subset2) + mtext(side=4,paste0(" ",var.unit[var.num]), cex=legend2.cex) + par(fig=c(legend2.xpos, legend2.xpos + legend2.width, 0.485, 0.508), new=TRUE) + ColorBar3(my.brks.var, cols=my.cols.var, vert=FALSE, cex=legend2.cex, subset=my.subset2) + mtext(side=4,paste0(" ",var.unit[var.num]), cex=legend2.cex) + par(fig=c(legend2.xpos, legend2.xpos + legend2.width, 0.250, 0.273), new=TRUE) + ColorBar3(my.brks.var, cols=my.cols.var, vert=FALSE, cex=legend2.cex, subset=my.subset2)} + mtext(side=4,paste0(" ",var.unit[var.num]), cex=legend2.cex) + par(fig=c(legend2.xpos, legend2.xpos + legend2.width, 0.015, 0.038), new=TRUE) + ColorBar3(my.brks.var, cols=my.cols.var, vert=FALSE, cex=legend2.cex, subset=my.subset2) + mtext(side=4,paste0(" ",var.unit[var.num]), cex=legend2.cex) + + if(!as.pdf) dev.off() # for saving 4 png + +} # close for loop on 'period' + +if(as.pdf) dev.off() # for saving a single pdf with all seasons + + + + + + + + + + + diff --git a/old/weather_regimes_v21.R b/old/weather_regimes_v21.R new file mode 100644 index 0000000000000000000000000000000000000000..06d6af34ad281f05b9b54f2ad059fc4db143dd34 --- /dev/null +++ b/old/weather_regimes_v21.R @@ -0,0 +1,815 @@ +library(s2dverification) # for the function Load() +library(abind) +library(Kendall) +source('/home/Earth/ncortesi/Downloads/scripts/Rfunctions.R') # for the calendar functions +workdir <- "/scratch/Earth/ncortesi/RESILIENCE/Regimes" # working dir with the input files with the WT classifications for each MSLP grid point + +# available reanalysis for the geopotential (g500) or the geopotential height (z500 = g500/9.81) and for var data: +ERAint <- list(path = '/esnas/reconstructions/ecmwf/eraint/$STORE_FREQ$_mean/$VAR_NAME$_f6h/$VAR_NAME$_$YEAR$$MONTH$.nc') +ERAint.name <- "ERA-Interim" + +JRA55 <- list(path = '/esnas/reconstructions/jma/jra-55/$STORE_FREQ$_mean/$VAR_NAME$_f6h/$VAR_NAME$_$YEAR$$MONTH$.nc') +JRA55.name <- "JRA-55" + +rean <- ERAint #JRA55 #ERAint # choose one of the two above reanalysis from where to load the input psl data + +# available forecast systems for the psl and var data: +#ECMWF_S4 <- list(path = paste0('/esnas/exp/ECMWF/seasonal/0001/s004/m001/$STORE_FREQ$_mean/$VAR_NAME$_f6h/$VAR_NAME$_$START_DATE$.nc')) +ECMWF_S4 <- list(path = paste0('/esnas/exp/ECMWF/seasonal/0001/s004/m001/$STORE_FREQ$_mean/psl_f6h/psl_$START_DATE$.nc')) +ECMWF_S4.name <- "ECMWF-S4" + +ECMWF_monthly <- list(path = paste0('/esnas/exp/ECMWF/monthly/ensforhc/6hourly/$VAR_NAME$/2014$MONTH$$DAY$00/$VAR_NAME$_$START_DATE$00.nc')) +ECMWF_monthly.name <- "ECMWF-Monthly" + +forecast <- ECMWF_S4 + +fields <- forecast #rean #forecast # put 'rean' to load pressure fields and var from reanalisis, put 'forecast' to load them from forecast systems + +psl <- "psl" #"g500" # pressure variable to use from the chosen reanalysis +psl.name <- "MSLP" # "Z500" # the name to show in the maps + +year.start <- 1981 #1994 #1979 #1981 +year.end <- 2013 #2010 + +leadtime <- 1:7 #5:11 #1 # if fields=forecast, set the forecast time (in days) you want to compute the weather regimes. +startdate.shift <- 1 # if leadtime=5:11, it's better to shift all startdates of the season 1 week in the past, to fit the exact season of obs.data + # if leadtime=12:18, it's better to shift all startdates of the season 2 weeks in the past, and so on. +forecast.year <- year.end+1 # if fields=forecast, set the forecast year (it is usually the year after year.end) +mes=1 # if fields=forecast, set the month of the first startdate of the forecast year +day=2 # if fields=forecast, set the day of the first startdate of the forecast year + +lat.weighting=FALSE # set it to true to weight psl data on latitude before applying the cluster analysis +sequences=FALSE # set it to TRUE if you want to select only days beloning to sequences of at least 5 consecutive days belonging to the same regime. + +PCA <- FALSE # se to TRUE if you want to apply the PCA before the k-means cluster analysis, FALSE otherwise +variance.explained <- 0.8 # the user can set here the minimum % of variance explained by the PCs (typically 0.8 which is equal to 80%) + +# Coordinates of the box enclosing the study region (the Northern Atlantic in this case): +lat.max <- 81 #81 #80 #70 +lat.min <- 27 #30 #20 #30 +lon.max <- 45 #36 #30 #40 +lon.min <- 274.5 #274.5 #270 #280 # put a positive number here because the geopotential has only positive values of longitud! + +############################################################################################################################# +#ECMWF_monthly <- list(path = paste0('/esnas/exp/ECMWF/monthly/ensforhc/6hourly/psl/2014010200/1994_010200.nc')) +#ECMWF_monthly <- list(path = paste0('/esnas/exp/ECMWF/monthly/ensforhc/6hourly/$VAR_NAME$/2014$MONTH$$DAY$00/$VAR_NAME$_$START_DATE$00.nc')) +rean.name <- unname(ifelse(unlist(rean) == unlist(ERAint), ERAint.name, JRA55.name)) +#forecast.name <- ECMWF_monthly.name +forecast.name <- unname(ifelse(unlist(forecast) == unlist(ECMWF_S4), ECMWF_S4.name, ECMWF_monthly.name)) +fields.name <- unname(ifelse(unlist(fields) == unlist(rean), rean.name, forecast.name)) +n.years <- year.end - year.start + 1 + +var.data <- fields # dataset from which to load the input var data (reanalysis or forecasts) +var.data.name <- fields.name #ECMWF_monthly.name # ERAint.name + +if(lat.min >= lat.max) stop("lat.min cannot be greater than lat.max") + +days.period <- n.days.period <- period.length <- list() +for (p in 1:17){ + days.period[[p]] <- NA + for(y in year.start:year.end) days.period[[p]] <- c(days.period[[p]], n.days.in.a.future.year(year.start, y) + pos.period(y,p)) + days.period[[p]] <- days.period[[p]][-1] # remove the NA at the begin that was introduced only to be able to execute the above command + # number of days belonging to that period from year.start to year.end: + n.days.period[[p]] <- length(days.period[[p]]) + # Number of days belonging to that period in a single year: + period.length[[p]] <- n.days.in.a.period(p,1999) #+ ifelse(period==13 | period==2, 1, 0) # using year 1999 introduce a small error in winter season length because of bisestile years +} + +# load only 1 day of pressure data to detect the minimum and maximum lat and lon values which identify only the North Atlantic area: +if(unlist(fields) == unlist(rean)) domain <- Load(var = psl, exp = NULL, obs = list(fields), '19990101', storefreq = 'daily', leadtimemax = 1, output = 'lonlat', nprocs=1) +if(unlist(fields) == unlist(ECMWF_S4)) domain <- Load(var = "psl", exp = list(fields), sdates=paste0(year.start,'0101'), storefreq = 'daily', dimnames=list(member="number"), leadtimemax=1, nmember=1, output = 'lonlat', nprocs=1) +if(unlist(fields) == unlist(ECMWF_monthly)) domain <- Load(var = psl, exp = NULL, obs = list(fields), paste0(year.start,'0102'), storefreq = 'daily', leadtimemax = 1, output = 'lonlat', nprocs=1) + +n.lat <- length(domain$lat) +n.lon <- length(domain$lon) + +pos.lat.max <- which(lat.max - round(domain$lat,4) >= 0)[1] # select the first latitude of the domain below the maximum latitude +pos.lat.min <- tail(which(round(domain$lat,4) - lat.min >= 0),1) # select the last latitude of the domain over the minumum latitude +pos.lon.max <- tail(which(lon.max - round(domain$lon,4) >= 0),1) +pos.lon.min <- which(round(domain$lon,4) - lon.min >= 0)[1] + +pos.lat <- if(pos.lat.min < pos.lat.max) {pos.lat.min:pos.lat.max} else {pos.lat.max:pos.lat.min} +pos.lon <- c(1:pos.lon.max,pos.lon.min:length(domain$lon)) # beware that it only consider the case where the study area cross the meridian of Greenwhich! +n.pos.lat <- length(pos.lat) +n.pos.lon <- length(pos.lon) + +lat <- domain$lat[pos.lat] # lat values of chosen area only (it is smaller than the whole spatial domain loaded by the data) +lon <- domain$lon[pos.lon] # lon values of chosen area only + +lat.min.area <- domain$lat[pos.lat.min] # min and max lat/lon of the chosen area only (same as lat.max, but its values correspond to actual values in the lat vector) +lat.max.area <- domain$lat[pos.lat.max] +lon.min.area <- domain$lon[pos.lon.min] +lon.max.area <- domain$lon[pos.lon.max] + +#rm(domain) + +# Load psl data: +if(unlist(fields) == unlist(rean)){ # Load daily psl data in the reanalysis case: + psleuFull <-array(NA,c(n.days.in.a.yearly.period(year.start,year.end), n.pos.lat, n.pos.lon)) + + for (y in year.start:year.end){ + var <- Load(var = psl, exp = NULL, obs = list(fields), paste0(y,'0101'), storefreq = 'daily', leadtimemax = n.days.in.a.year(y), output = 'lonlat', + latmin = lat.min, latmax = lat.max, lonmin = lon.min, lonmax = lon.max) + psleuFull[seq.days.in.a.future.year(year.start, y),,] <- var$obs + rm(var) + gc() + } +} + +if(unlist(fields) == unlist(ECMWF_S4)) # in this case, we can load only the data for 1 month at time (since each month needs ~3 GB of memory) + start.month <- 12 + lead.month <- 1 + + n.members <- 15 + + if(start.month <= 9) start.month.char <- paste0("0",as.character(start.month)) + + #n.days.in.a.month(start.month + lead.month -1 , year.start) + chosen.month <- start.month + lead.month - 1 # find which month we want to load data + if(chosen.month > 12) chosen.month <- chosen.month - 12 + leadtime.min <- 1 + n.days.in.a.monthly.period(start.month, chosen.month, year.start) - n.days.in.a.month(chosen.month, year.start) + leadtime.max <- leadtime.min + n.days.in.a.month(chosen.month, year.start) - 1 + n.leadtimes <- leadtime.max - leadtime.min + 1 + #print(paste(leadtime.min, leadtime.max)) + + psleuFull <- Load(var = "psl", exp = list(fields), obs = NULL, sdates=paste0(year.start:year.end, start.month.char,'01'), storefreq = 'daily', dimnames=list(member="number"), nmember=n.members, leadtimemin=leadtime.min, leadtimemax=leadtime.max, output = 'lonlat', latmin = lat.min, latmax = lat.max, lonmin = lon.min, lonmax = lon.max) + +} + +if(unlist(fields) == unlist(ECMWF_monthly)){ + # Load 6-hourly psl data in the forecast case: + psleuFull <-array(NA, c(n.sdates*n.years, n.leadtimes, n.pos.lat, n.pos.lon)) # daily order: 19940102 19950102 ... 20130102 19940109 19950109 ... 20130109 ... + n.leadtimes <- length(leadtime) + sdates <- weekly.seq(forecast.year,mes,day) + n.sdates <- length(sdates) + + for (startdate in 1:n.sdates){ + for(lday in leadtime){ + var <- Load(var = psl, exp = NULL, obs = list(fields), paste0(year.start:year.end, substr(sdates[startdate],5,6), substr(sdates[startdate],7,8) ), storefreq = 'daily', leadtimemin = lday*4-3, leadtimemax = lday*4, output = 'lonlat', latmin = lat.min, latmax = lat.max, lonmin = lon.min, lonmax = lon.max) + + mean.lday <- apply(var$obs, c(3,5,6), mean, na.rm=T) + rm(var) + gc() + + psleuFull[(1+(startdate-1)*n.years):(startdate*n.years), lday-leadtime[1]+1,,] <- mean.lday + rm(mean.lday) + gc() + } + } +} + + +if(psl=="g500") psleuFull <- psleuFull/9.81 # convert geopotential to geopotential height (in m) +if(psl=="psl") psleuFull <- psleuFull/100 # convert MSLP in Pascal to MSLP in hPa +gc() + +save.image(file=paste0(workdir,"/weather_regimes_",fields.name,"_",year.start,"-",year.end,".RData")) +load(file=paste0(workdir,"/weather_regimes_",fields.name,"_",year.start,"-",year.end,".RData")) + +# compute the cluster analysis: +my.PCA <- list() +my.cluster <- list() +tot.variance <- list() +n.pcs <- my.cluster <- c() + +WR.period <- 1 #1:12 #13:16 # 1-12: Jan-Dec; 13-16: Winter-Spring-Summer-Autumn + +for(period in WR.period){ + if(unlist(fields) == unlist(rean)){ pslPeriod <- psleuFull[days.period[[p]],,] # select only days in the chosen period (i.e: winter) + + # in this case of S4 we don't need to select anything because we already loaded only 1 month of data + + if(unlist(fields) == unlist(ECMWF_monthly)){ + my.startdates.period <- months.period(forecast.year,mes,day,period) # get which startdates belong to the chosen period + + days.period <- list() # get which days inside psleuFull belong to the chosen period + for(startdate in my.startdates.period){ + days.period[[p]] <- c(days.period[[p]], (1+(startdate-1)*n.years):(startdate*n.years)) + } + } + + # weight the pressure fields based on latitude: + if(lat.weighting){ + lat.weighted <- cos(lat*pi/180)^0.5 + lat.weighted.array <- InsertDim(InsertDim(lat.weighted,2,n.pos.lon), 1, n.days.period[[p]]) + pslmat <- pslPeriod * lat.weighted.array + rm(lat.weighted.array) + gc() + } + + if(unlist(fields) == unlist(rean)){ + pslmat <- pslPeriod + dim(pslmat) <- c(n.days.period[[p]], n.pos.lat*n.pos.lon) # convert array in a matrix! + rm(pslPeriod, pslPeriod.weighted) + } + + if(unlist(fields) == unlist(ECMWF_S4)){ + pslmat <- psleuFull$mod + dim(pslmat) <- c(1, n.leadtimes*n.years, n.members*n.pos.lat*n.pos.lon) # convert array in a matrix! Beware that it always put the years (33) before the leadtimes! + gc() + } + # # if you are using the monthly forecast system, in winter you must remove the 2nd startdate (20140109) because its data is bad, as you can see with: plot(pslmat[,1:2]) + #if(unlist(fields) == unlist(forecast) && period == 13) pslmat[21:40,] <- NA + + if(PCA){ # compute the PCs: + my.seq <- seq(1, n.pos.lat*n.pos.lon, 9) # select only 1 point of 9 + pslcut <- pslmat[,my.seq] + + my.PCA[[period]] <- princomp(pslcut,cor=FALSE) + tot.variance[[period]] <- head(cumsum(my.PCA[[period]]$sdev^2/sum(my.PCA[[period]]$sdev^2)),50) # check how many PCAs to keep basing on the sum of their expl. variance + n.pcs[period] <- head(as.numeric(which(tot.variance[[period]] > variance.explained)),1) # select only the pcs that explains at least 80% of variance + my.cluster[[period]] <- kmeans(my.PCA[[period]]$scores[,1:n.pcs[period]], centers=4, iter.max=100, nstart=30) # 4 is the number of clusters + rm(pslcut) + } else { # 4 is the number of clusters: + if(unlist(fields) == unlist(rean)) my.cluster[[period]] <- kmeans(pslmat[which(!is.na(pslmat[,1])),], centers=4, iter.max=100, nstart=30) + if(unlist(fields) == unlist(ECMWF_S4)) my.cluster[[period]] <- kmeans(pslmat[1,,], centers=4, iter.max=100, nstart=30) + } + + rm(pslmat) + gc() +} + +var.num <- 2 # Choose a variable. 1: sfcWind 2: tas + +var.name <- c("sfcWind","tas") # name of the 'predictand' variable of the chosen reanalysis +var.name.full <- c("Wind Speed","Temperature") # full name of the 'predictand' variable to put in the title of the graphs +var.unit <- c("m/s", "ºC") # unit of measure of var (for drawing color scales) + +# Load var data: +if(unlist(fields) == unlist(rean)){ # Load daily var data in the reanalysis case: + vareuFull <-array(NA,c(n.days.in.a.yearly.period(year.start,year.end), n.pos.lat, n.pos.lon)) + + for (y in year.start:year.end){ + var <- Load(var = var.name[var.num], exp = NULL, obs = list(var.data), paste0(y,'0101'), storefreq = 'daily', leadtimemax = n.days.in.a.year(y), output = 'lonlat', + latmin = lat.min, latmax = lat.max, lonmin = lon.min, lonmax = lon.max) + vareuFull[seq.days.in.a.future.year(year.start, y),,] <- var$obs + rm(var) + gc() + } + +# in the S4 case, we can load only the data for 1 month at time (since each month needs ~3 GB of memory) +# but at present we ONLY use psl data!!! +#if(unlist(fields) == unlist(ECMWF_S4)) { +# vareuFull <- Load(var = var.name[var.num], exp = list(fields), obs = NULL, sdates=paste0(year.start:year.end, start.month.char,'01'), storefreq = 'daily', dimnames=list(member="number"), nmember=n.members, leadtimemin=leadtime.min, leadtimemax=leadtime.max, output = 'lonlat', latmin = lat.min, latmax = lat.max, lonmin = lon.min, lonmax = lon.max) +#} + +if(unlist(fields) == unlist(ECMWF_monthly)){ # Load 6-hourly var data in the monthly forecast case: + sdates <- weekly.seq(forecast.year,mes,day) + vareuFull <-array(NA,c(length(sdates)*(year.end-year.start+1), n.pos.lat, n.pos.lon)) + + for (startdate in 1:length(sdates)){ + var <- Load(var = var.name[var.num], exp = NULL, obs = list(var.data), paste0(year.start:year.end, substr(sdates[startdate],5,6), substr(sdates[startdate],7,8) ), storefreq = 'daily', leadtimemin=leadtime*4-3, leadtimemax=leadtime*4, output = 'lonlat', latmin = lat.min, latmax = lat.max, lonmin = lon.min, lonmax = lon.max) + temp <- apply(var$obs, c(1,3,5,6), mean, na.rm=T) + vareuFull[(1+(startdate-1)*(year.end-year.start+1)):(startdate*(year.end-year.start+1)),,] <- temp + rm(temp,var) + gc() + } + +} + +#my.grid<-paste0('r',n.lon,'x',n.lat) +#coord <- Load(var = 'sfcWind', exp = list(ECMWF_monthly), obs = NULL, sdates=paste0(2013,'0102'), leadtimemin = 1, leadtimemax=1, output = 'lonlat', nprocs=1, grid=my.grid, method='bilinear') + +save.image(file=paste0(workdir,"/weather_regimes_",fields.name,"_",var.name[var.num],"_",year.start,"-",year.end,".RData")) +load(file=paste0(workdir,"/weather_regimes_",fields.name,"_",var.name[var.num],"_",year.start,"-",year.end,".RData")) + +## # if you want to study WRs at monhly level but using their seasonal time series, you have to copy the WRs time series to the months from 1 to 12: +## if(WR.period <- 1:12){ +## my.cluster[[1]] <- my.cluster[[2]] <- my.cluster[[3]] <- my.cluster[[4]] <- my.cluster[[5]] <- my.cluster[[6]] <- list() +## my.cluster[[7]] <- my.cluster[[8]] <- my.cluster[[9]] <- my.cluster[[10]] <- my.cluster[[11]] <- my.cluster[[12]] <- list() + +## ss <- match(days.period[[13]],days.period[[1]]); ss[which(!is.na(ss))] <- 1; qq <- ss*my.cluster[[13]]$cluster; my.cluster[[1]]$cluster <- qq[!is.na(qq)] +## ss <- match(days.period[[13]],days.period[[2]]); ss[which(!is.na(ss))] <- 1; qq <- ss*my.cluster[[13]]$cluster; my.cluster[[2]]$cluster <- qq[!is.na(qq)] +## ss <- match(days.period[[13]],days.period[[12]]); ss[which(!is.na(ss))] <- 1; qq <- ss*my.cluster[[13]]$cluster; my.cluster[[12]]$cluster <- qq[!is.na(qq)] +## ss <- match(days.period[[14]],days.period[[3]]); ss[which(!is.na(ss))] <- 1; qq <- ss*my.cluster[[14]]$cluster; my.cluster[[3]]$cluster <- qq[!is.na(qq)] +## ss <- match(days.period[[14]],days.period[[4]]); ss[which(!is.na(ss))] <- 1; qq <- ss*my.cluster[[14]]$cluster; my.cluster[[4]]$cluster <- qq[!is.na(qq)] +## ss <- match(days.period[[14]],days.period[[5]]); ss[which(!is.na(ss))] <- 1; qq <- ss*my.cluster[[14]]$cluster; my.cluster[[5]]$cluster <- qq[!is.na(qq)] +## ss <- match(days.period[[15]],days.period[[6]]); ss[which(!is.na(ss))] <- 1; qq <- ss*my.cluster[[15]]$cluster; my.cluster[[6]]$cluster <- qq[!is.na(qq)] +## ss <- match(days.period[[15]],days.period[[7]]); ss[which(!is.na(ss))] <- 1; qq <- ss*my.cluster[[15]]$cluster; my.cluster[[7]]$cluster <- qq[!is.na(qq)] +## ss <- match(days.period[[15]],days.period[[8]]); ss[which(!is.na(ss))] <- 1; qq <- ss*my.cluster[[15]]$cluster; my.cluster[[8]]$cluster <- qq[!is.na(qq)] +## ss <- match(days.period[[16]],days.period[[9]]); ss[which(!is.na(ss))] <- 1; qq <- ss*my.cluster[[16]]$cluster; my.cluster[[9]]$cluster <- qq[!is.na(qq)] +## ss <- match(days.period[[16]],days.period[[10]]); ss[which(!is.na(ss))] <- 1; qq <- ss*my.cluster[[16]]$cluster; my.cluster[[10]]$cluster <- qq[!is.na(qq)] +## ss <- match(days.period[[16]],days.period[[11]]); ss[which(!is.na(ss))] <- 1; qq <- ss*my.cluster[[16]]$cluster; my.cluster[[11]]$cluster <- qq[!is.na(qq)] +## } + +for(period in WR.period){ # 1-12: Jan-Dec; 13-16: Winter-Spring-Summer-Autumn; 17: Year + if(unlist(fields) == unlist(rean)){ + + + } else { + my.startdates.period <- months.period(forecast.year,mes,day,period) + + #if(period == 13) my.startdates.period <- my.startdates.period[-2] # remove the second startdate because it is broken + + days.period <- period.length <- list() + for(startdate in my.startdates.period){ + days.period[[p]] <- c(days.period[[p]], (1+(startdate-1)*(year.end-year.start+1)):(startdate*(year.end-year.start+1))) + } + period.length[[p]] <- length(my.startdates.period) # number of Thusdays in the period + } + + + if(sequences){ + # for each of the 4 clusters, remove the days not belonging to sequences of at least 5 days: + # warning: first 4 days and last 4 days are not take into account y the algorithm and are treated as not belonging to any sequence (sequ=FALSE) + wrdiff <- diff(my.cluster[[period]]$cluster) + + sequ <- rep(FALSE, n.days.period[[p]]) + for(i in 5:(n.days.period[[p]]-5)){ + sequ[i] <- TRUE + if(wrdiff[i] != 0 & wrdiff[i+1] != 0) sequ[i] = FALSE + if(wrdiff[i] != 0 & wrdiff[i+2] != 0) sequ[i] = FALSE + if(wrdiff[i] != 0 & wrdiff[i+3] != 0) sequ[i] = FALSE + if(wrdiff[i] != 0 & wrdiff[i+4] != 0) sequ[i] = FALSE + if(wrdiff[i-1] != 0 & wrdiff[i] != 0) sequ[i] = FALSE + if(wrdiff[i-1] != 0 & wrdiff[i+1] != 0) sequ[i] = FALSE + if(wrdiff[i-1] != 0 & wrdiff[i+2] != 0) sequ[i] = FALSE + if(wrdiff[i-1] != 0 & wrdiff[i+3] != 0) sequ[i] = FALSE + if(wrdiff[i-2] != 0 & wrdiff[i] != 0) sequ[i] = FALSE + if(wrdiff[i-2] != 0 & wrdiff[i+1] != 0) sequ[i] = FALSE + if(wrdiff[i-2] != 0 & wrdiff[i+2] != 0) sequ[i] = FALSE + if(wrdiff[i-3] != 0 & wrdiff[i] != 0) sequ[i] = FALSE + if(wrdiff[i-3] != 0 & wrdiff[i+1] != 0) sequ[i] = FALSE + if(wrdiff[i-4] != 0 & wrdiff[i] != 0) sequ[i] = FALSE + } # close for loop on i + + # sequence of daily cluster numbers without the days that doesn't belong to sequences of 5 or more days: + cluster.sequence <- my.cluster[[period]]$cluster * sequ + rm(wrdiff, sequ) + } else{ + cluster.sequence <- my.cluster[[period]]$cluster + } + + gc() + + # Replace the days belonging to a regime with the days belonging to a sequence of at least 5 days of that regime: + wr1 <- which(cluster.sequence == 1) + wr2 <- which(cluster.sequence == 2) + wr3 <- which(cluster.sequence == 3) + wr4 <- which(cluster.sequence == 4) + + # yearly frequency of each regime: + wr1y <- wr2y <- wr3y <-wr4y <- c() + for(y in year.start:year.end){ + + if(unlist(fields) == unlist(rean)){ + wr1y[y-year.start+1] <- length(which(cluster.sequence[(y-year.start)*period.length[[p]]+(1:period.length[[p]])] == 1)) + wr2y[y-year.start+1] <- length(which(cluster.sequence[(y-year.start)*period.length[[p]]+(1:period.length[[p]])] == 2)) + wr3y[y-year.start+1] <- length(which(cluster.sequence[(y-year.start)*period.length[[p]]+(1:period.length[[p]])] == 3)) + wr4y[y-year.start+1] <- length(which(cluster.sequence[(y-year.start)*period.length[[p]]+(1:period.length[[p]])] == 4)) + } else { + wr1y[y-year.start+1] <- length(which(cluster.sequence[seq((y-year.start+1),length(cluster.sequence), n.years)] == 1)) + wr2y[y-year.start+1] <- length(which(cluster.sequence[seq((y-year.start+1),length(cluster.sequence), n.years)] == 2)) + wr3y[y-year.start+1] <- length(which(cluster.sequence[seq((y-year.start+1),length(cluster.sequence), n.years)] == 3)) + wr4y[y-year.start+1] <- length(which(cluster.sequence[seq((y-year.start+1),length(cluster.sequence), n.years)] == 4)) + } + } + + rm(cluster.sequence) + gc() + + # convert to frequencies in %: + wr1y <- wr1y/period.length[[p]] + wr2y <- wr2y/period.length[[p]] + wr3y <- wr3y/period.length[[p]] + wr4y <- wr4y/period.length[[p]] + + pslPeriod <- psleuFull[days.period[[p]],,] + pslPeriodClim <- apply(pslPeriod, c(2,3), mean, na.rm=T) + pslPeriodClim2 <- InsertDim(pslPeriodClim, 1, n.days.period[[p]]) + pslPeriodAnom <- pslPeriod - pslPeriodClim2 + + rm(pslPeriod, pslPeriodClim, pslPeriodClim2) + gc() + + pslwr1 <- pslPeriodAnom[wr1,,] + pslwr2 <- pslPeriodAnom[wr2,,] + pslwr3 <- pslPeriodAnom[wr3,,] + pslwr4 <- pslPeriodAnom[wr4,,] + rm(pslPeriodAnom) + gc() + + pslwr1mean <- apply(pslwr1,c(2,3),mean,na.rm=T) + pslwr2mean <- apply(pslwr2,c(2,3),mean,na.rm=T) + pslwr3mean <- apply(pslwr3,c(2,3),mean,na.rm=T) + pslwr4mean <- apply(pslwr4,c(2,3),mean,na.rm=T) + + rm(pslwr1,pslwr2,pslwr3,pslwr4) + + # similar selection, but for var instead of psl: + + varPeriod <- vareuFull[days.period[[p]],,] # select only var data during the chosen period + + varPeriodClim <- apply(varPeriod, c(2,3), mean, na.rm=T) + varPeriodClim2 <- InsertDim(varPeriodClim, 1, n.days.period[[p]]) + varPeriodAnom <- varPeriod - varPeriodClim2 + rm(varPeriod, varPeriodClim, varPeriodClim2) + gc() + + #PlotEquiMap2(varPeriodClim[,EU], lon[EU], lat, filled.continents = FALSE, cols=my.cols.var, intxlon=10, intylat=10, cex.lab=1.5) # plot the climatology of the period + + varPeriodAnom1 <- varPeriodAnom[wr1,,] + varPeriodAnom2 <- varPeriodAnom[wr2,,] + varPeriodAnom3 <- varPeriodAnom[wr3,,] + varPeriodAnom4 <- varPeriodAnom[wr4,,] + + varPeriodAnom1mean <- apply(varPeriodAnom1,c(2,3),mean,na.rm=T) + varPeriodAnom2mean <- apply(varPeriodAnom2,c(2,3),mean,na.rm=T) + varPeriodAnom3mean <- apply(varPeriodAnom3,c(2,3),mean,na.rm=T) + varPeriodAnom4mean <- apply(varPeriodAnom4,c(2,3),mean,na.rm=T) + + varPeriodAnomBoth1 <- abind(varPeriodAnom, varPeriodAnom1, along = 1) + pvalue1 <- apply(varPeriodAnomBoth1, c(2,3), function(x) t.test(x[1:n.days.period[[p]]],x[(n.days.period[[p]]+1):length(x)])$p.value) + rm(varPeriodAnomBoth1, varPeriodAnom1) + gc() + + varPeriodAnomBoth2 <- abind(varPeriodAnom, varPeriodAnom2, along = 1) + pvalue2 <- apply(varPeriodAnomBoth2, c(2,3), function(x) t.test(x[1:n.days.period[[p]]],x[(n.days.period[[p]]+1):length(x)])$p.value) + rm(varPeriodAnomBoth2, varPeriodAnom2) + gc() + + varPeriodAnomBoth3 <- abind(varPeriodAnom, varPeriodAnom3, along = 1) + pvalue3 <- apply(varPeriodAnomBoth3, c(2,3), function(x) t.test(x[1:n.days.period[[p]]],x[(n.days.period[[p]]+1):length(x)])$p.value) + rm(varPeriodAnomBoth3, varPeriodAnom3) + gc() + + varPeriodAnomBoth4 <- abind(varPeriodAnom, varPeriodAnom4, along = 1) + pvalue4 <- apply(varPeriodAnomBoth4, c(2,3), function(x) t.test(x[1:n.days.period[[p]]],x[(n.days.period[[p]]+1):length(x)])$p.value) + rm(varPeriodAnomBoth4, varPeriodAnom4) + + rm(varPeriodAnom) + gc() + + # save all the data necessary to redraw the graphs when we know the right regime: + save(workdir,rean.name,fields.name,var.num,var.name,var.name.full,var.unit,year.start,year.end,period,WR.period,lon,lat,pslwr1mean,pslwr2mean,pslwr3mean,pslwr4mean, varPeriodAnom1mean, varPeriodAnom2mean, varPeriodAnom3mean,varPeriodAnom4mean,wr1y,wr2y,wr3y,wr4y,pvalue1,pvalue2,pvalue3,pvalue4,file=paste0(workdir,"/",fields.name,"_",var.name[var.num],"_",my.period[period],"_mapdata.RData")) + + rm(pvalue1,pvalue2,pvalue3,pvalue4) + rm(pslwr1mean,pslwr2mean,pslwr3mean,pslwr4mean) + rm(varPeriodAnom1mean,varPeriodAnom2mean,varPeriodAnom3mean,varPeriodAnom4mean) + gc() + +} # close the for loop on 'period' + + +# run it twice: once, with ordering <- FALSE to look only at the cartography and find visually the right regime names of the four clusters; +# and the second time, to save the ordered cartography: +ordering <- TRUE # If TRUE, order the clusters following the regimes listed in the 'orden' vector (after you manually insert the names). +save.names <- FALSE # if TRUE, also save the cluster names (i.e: the association between clusters and weather regimes); if FALSE, the cluster names are loaded from the file +as.pdf <- FALSE # FALSE: save results as one .png file for each season, TRUE: save only 1 .pdf file with all seasons + +# position of long values of Europe only (without the Atlantic Sea and America): +#if(fields.name == "ERA-Interim") EU <- c(1:which(lon >= lon.max)[1], (length(lon)-30):length(lon)) # restrict area to continental europe only +EU <- c(1:which(lon >= lon.max)[1], (which(lon >= 337)[1]:length(lon))) # restrict area to continental europe only + +# choose an order to give to the cartography, from top to bottom: +orden <- c("NAO+","NAO-","Blocking","Atl.Ridge") + +regime1.name <- orden[1] +regime2.name <- orden[2] +regime3.name <- orden[3] +regime4.name <- orden[4] + +if(save.names){cluster1.name.period <- cluster2.name.period <- cluster3.name.period <- cluster4.name.period <- c()} + +# breaks and colors of the geopotential fields: +if(psl == "g500"){ + #my.brks <- c(-300,-199:200,300) # % Mean anomaly of a WR + my.brks <- c(-300,seq(-200,200,10),300) # % Mean anomaly of a WR + my.brks2 <- c(-300,seq(-200,200,10),300) # % Mean anomaly of a WR for the contour lines + #my.cols <- colorRampPalette((brewer.pal(9,"PuBu")))(length(my.brks)-1) + values.to.plot <- c(-200, -100, 0, 100, 200) # values we want to show in the labels +} + +if(psl == "psl"){ + my.brks <- c(seq(-19,-1,2),0,seq(1,19,2)) # % Mean anomaly of a WR + my.brks2 <- my.brks #c(seq(-20,20,2)) # % Mean anomaly of a WR for the contour lines + values.to.plot <- my.brks #c(-17,-15,-13,-11,-9,-7,-5,-3,-1,0,1,3,5,7,9,11,13,15,17) +} + +my.cols <- colorRampPalette(rev(brewer.pal(11,"RdBu")))(length(my.brks)-1) # blue--white--red colors +my.cols[floor(length(my.cols)/2)] <- my.cols[floor(length(my.cols)/2)+1] <- "white" + +# breaks and colors of the impact maps (valid both for Wind speed anomalies and Temperature anomalies): +#my.brks.var <- c(-20,seq(-10,-3,1),seq(-2.9,2.9,0.1),seq(3,10,1),20) # % Mean anomaly of a WR +#my.brks.var <- c(-20,-2,-1.5,-1,-0.5,-0.2,0.2,0.5,1,1.5,2,20) # % Mean anomaly of a WR +#my.brks.var <- c(-20,seq(-3,3,0.5),20) # % Mean anomaly of a WR +if(var.name[var.num] == "sfcWind") { + my.brks.var <- seq(-3,3,0.5) + values.to.plot2 <- c(-3,-2,-1,0,1,2,3) +} +if(var.name[var.num] == "tas") { + my.brks.var <- seq(-5,5,1) + values.to.plot2 <- my.brks.var #c(-5,-3,-1,0,1,3,5) +} + +#my.cols <- colorRampPalette(as.character(read.csv("/home/Earth/vtorralb/rgbhex.csv",header=F)[,1]))(length(my.brks)-1) # blue--yellow-red colors +my.cols.var <- colorRampPalette(rev(brewer.pal(11,"RdBu")))(length(my.brks.var)-1) # blue--white--red colors +#my.cols.var[floor(length(my.cols.var)/2)] <- my.cols.var[floor(length(my.cols.var)/2)+1] <- "white" + +if(as.pdf)(pdf(file=paste0(workdir,"/",fields.name,"_",var.name[var.num],".pdf"),width=40, height=60)) + +for(period in WR.period){ + load(file=paste0(workdir,"/",fields.name,"_",var.name[var.num],"_",my.period[period],"_mapdata.RData")) + + if(save.names){ + if(period == 1){ # January + cluster2.name="NAO+" + cluster1.name="NAO-" + cluster4.name="Blocking" + cluster3.name="Atl.Ridge" + } + if(period == 2){ # February + cluster3.name="NAO+" + cluster2.name="NAO-" + cluster4.name="Blocking" + cluster1.name="Atl.Ridge" + } + if(period == 3){ # March + cluster3.name="NAO+" + cluster2.name="NAO-" + cluster4.name="Blocking" + cluster1.name="Atl.Ridge" + } + if(period == 4){ # April + cluster2.name="NAO+" + cluster1.name="NAO-" + cluster3.name="Blocking" + cluster4.name="Atl.Ridge" + } + if(period == 5){ # May + cluster4.name="NAO+" + cluster2.name="NAO-" + cluster3.name="Blocking" + cluster1.name="Atl.Ridge" + } + if(period == 6){ # June + cluster4.name="NAO+" + cluster1.name="NAO-" + cluster2.name="Blocking" + cluster3.name="Atl.Ridge" + } + if(period == 7){ # July + cluster4.name="NAO+" + cluster2.name="NAO-" + cluster1.name="Blocking" + cluster3.name="Atl.Ridge" + } + if(period == 8){ # August + cluster2.name="NAO+" + cluster4.name="NAO-" + cluster3.name="Blocking" + cluster1.name="Atl.Ridge" + } + if(period == 9){ # September + cluster4.name="NAO+" + cluster3.name="NAO-" + cluster1.name="Blocking" + cluster2.name="Atl.Ridge" + } + if(period == 10){ # October + cluster1.name="NAO+" + cluster4.name="NAO-" + cluster2.name="Blocking" + cluster3.name="Atl.Ridge" + } + if(period == 11){ # November + cluster2.name="NAO+" + cluster3.name="NAO-" + cluster1.name="Blocking" + cluster4.name="Atl.Ridge" + } + if(period == 12){ # December + cluster2.name="NAO+" + cluster4.name="NAO-" + cluster1.name="Blocking" + cluster3.name="Atl.Ridge" + } + if(period == 13){ # Winter + cluster3.name="NAO+" + cluster4.name="NAO-" + cluster1.name="Blocking" + cluster2.name="Atl.Ridge" + } + if(period == 14){ # Spring + cluster1.name="NAO+" + cluster3.name="NAO-" + cluster2.name="Blocking" + cluster4.name="Atl.Ridge" + } + if(period == 15){ # Summer + cluster2.name="NAO+" + cluster3.name="NAO-" + cluster4.name="Blocking" + cluster1.name="Atl.Ridge" + } + if(period == 16){ # Autumn + cluster4.name="NAO+" + cluster1.name="NAO-" + cluster2.name="Blocking" + cluster3.name="Atl.Ridge" + } + + cluster1.name.period[period] <- cluster1.name + cluster2.name.period[period] <- cluster2.name + cluster3.name.period[period] <- cluster3.name + cluster4.name.period[period] <- cluster4.name + + save(cluster1.name.period, cluster2.name.period, cluster3.name.period, cluster4.name.period, file=paste0(workdir,"/",fields.name,"_ClusterNames.RData")) + + } else { # in this case, we load the cluster names from the file already saved: + load(paste0(workdir,"/",fields.name,"_ClusterNames.RData")) + cluster1.name <- cluster1.name.period[period] + cluster2.name <- cluster2.name.period[period] + cluster3.name <- cluster3.name.period[period] + cluster4.name <- cluster4.name.period[period] + } + + # assign to each cluster its regime: + if(ordering == FALSE){ + cluster1 <- 1; cluster2 <- 2; cluster3 <- 3; cluster4 <- 4 + } else { + cluster1 <- which(orden == cluster1.name) + cluster2 <- which(orden == cluster2.name) + cluster3 <- which(orden == cluster3.name) + cluster4 <- which(orden == cluster4.name) + } + + assign(paste0("map",cluster1), pslwr1mean) + assign(paste0("map",cluster2), pslwr2mean) + assign(paste0("map",cluster3), pslwr3mean) + assign(paste0("map",cluster4), pslwr4mean) + + assign(paste0("imp",cluster1), varPeriodAnom1mean) + assign(paste0("imp",cluster2), varPeriodAnom2mean) + assign(paste0("imp",cluster3), varPeriodAnom3mean) + assign(paste0("imp",cluster4), varPeriodAnom4mean) + + assign(paste0("sig",cluster1), pvalue1) + assign(paste0("sig",cluster2), pvalue2) + assign(paste0("sig",cluster3), pvalue3) + assign(paste0("sig",cluster4), pvalue4) + + assign(paste0("fre",cluster1), wr1y) + assign(paste0("fre",cluster2), wr2y) + assign(paste0("fre",cluster3), wr3y) + assign(paste0("fre",cluster4), wr4y) + + # Visualize the composition with the 3 graphs for all the 4 regimes: its pressure fields, its impact on var and its frequency series: + if(!as.pdf) png(filename=paste0(workdir,"/",fields.name,"_",var.name[var.num],"_",my.period[period],".png"),width=3000,height=3700) + + plot.new() + + # Sheet title: + par(fig=c(0, 1, 0.94, 0.98), new=TRUE) + mtext(paste0("Weather Regimes for ",my.period[period]," season (",year.start,"-",year.end,"). Source: ",fields.name), cex=6, font=2) + + # Centroid maps: + map.xpos <- 0 + map.width <- 0.46 + par(fig=c(map.xpos + 0.003, map.xpos + map.width - 0.003, 0.745, 0.925), new=TRUE) + PlotEquiMap2(rescale(map1,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map1), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, xlabel.dist=1.5, contours.lty="F1FF1F") + par(fig=c(map.xpos, map.xpos + map.width, 0.51, 0.69), new=TRUE) + PlotEquiMap2(rescale(map2,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map2), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F") + par(fig=c(map.xpos, map.xpos + map.width, 0.275, 0.455), new=TRUE) + PlotEquiMap2(rescale(map3,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map3), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F") + par(fig=c(map.xpos, map.xpos + map.width, 0.04, 0.22), new=TRUE) + PlotEquiMap2(rescale(map4,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map4), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F") + + # Title Centroid Maps: + map.title.xpos <- 0.76 + map.title.width <- 0.2 + par(fig=c(map.title.xpos, map.title.xpos + map.title.width, 0.728, 0.729), new=TRUE) + mtext("year", cex=3) + par(fig=c(map.title.xpos, map.title.xpos + map.title.width, 0.494, 0.495), new=TRUE) + mtext("year", cex=3) + par(fig=c(map.title.xpos, map.title.xpos + map.title.width, 0.260, 0.261), new=TRUE) + mtext("year", cex=3) + par(fig=c(map.title.xpos, map.title.xpos + map.title.width, 0.026, 0.027), new=TRUE) + mtext("year", cex=3) + + # Impact maps: + impact.xpos <- 0.47 + impact.width <- 0.26 + par(fig=c(impact.xpos, impact.xpos + impact.width, 0.745, 0.925), new=TRUE) + PlotEquiMap2(rescale(imp1[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=t(sig1[,EU] < 0.05), cex.lab=1.5) + par(fig=c(impact.xpos, impact.xpos + impact.width, 0.51, 0.69), new=TRUE) + PlotEquiMap2(rescale(imp2[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=t(sig2[,EU] < 0.05), cex.lab=1.5) + par(fig=c(impact.xpos, impact.xpos + impact.width, 0.275, 0.455), new=TRUE) + PlotEquiMap2(rescale(imp3[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=t(sig3[,EU] < 0.05), cex.lab=1.5) + par(fig=c(impact.xpos, impact.xpos + impact.width, 0.04, 0.22), new=TRUE) + PlotEquiMap2(rescale(imp4[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=t(sig4[,EU] < 0.05), cex.lab=1.5) + + # Frequency plots: + barplot.xpos <- 0.75 + barplot.width <- 0.25 + par(fig=c(barplot.xpos, barplot.xpos + barplot.width, 0.745, 0.915), new=TRUE) + barplot.freq(100*fre1, year.start, year.end, cex.y=2, cex.x=2, freq.max=60, ylab="", mgp=c(3,1.5,0)) + par(fig=c(barplot.xpos, barplot.xpos + barplot.width,0.510,0.680), new=TRUE) + barplot.freq(100*fre2, year.start, year.end, cex.y=2, cex.x=2, freq.max=60, ylab="", mgp=c(3,1.5,0)) + par(fig=c(barplot.xpos, barplot.xpos + barplot.width,0.275,0.445), new=TRUE) + barplot.freq(100*fre3, year.start, year.end, cex.y=2, cex.x=2, freq.max=60, ylab="", mgp=c(3,1.5,0)) + par(fig=c(barplot.xpos, barplot.xpos + barplot.width,0.040,0.210), new=TRUE) + barplot.freq(100*fre4, year.start, year.end, cex.y=2, cex.x=2, freq.max=60, ylab="", mgp=c(3,1.5,0)) + + # % on Frequency plots: + symbol.xpos <- 0.735 + symbol.width <- 0.01 + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.83, 0.84), new=TRUE) + mtext("%", cex=3.3) + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.60, 0.61), new=TRUE) + mtext("%", cex=3.3) + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.36, 0.37), new=TRUE) + mtext("%", cex=3.3) + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.13, 0.14), new=TRUE) + mtext("%", cex=3.3) + + # Title 1: + title1.xpos <- 0.02 + title1.width <- 0.4 + par(fig=c(title1.xpos, title1.xpos + title1.width, 0.930, 0.935), new=TRUE) + mtext(paste0(regime1.name,": ", psl.name, " Anomaly"), font=2, cex=4) + par(fig=c(title1.xpos, title1.xpos + title1.width, 0.695, 0.700), new=TRUE) + mtext(paste0(regime2.name,": ", psl.name, " Anomaly"), font=2, cex=4) + par(fig=c(title1.xpos, title1.xpos + title1.width, 0.460, 0.465), new=TRUE) + mtext(paste0(regime3.name,": ", psl.name, " Anomaly"), font=2, cex=4) + par(fig=c(title1.xpos, title1.xpos + title1.width, 0.225, 0.230), new=TRUE) + mtext(paste0(regime4.name,": ", psl.name, " Anomaly"), font=2, cex=4) + + # Title 2: + title2.xpos <- 0.42 + title2.width <- 0.35 + par(fig=c(title2.xpos, title2.xpos + title2.width, 0.930, 0.935), new=TRUE) + mtext(paste0(regime1.name, ": Impact on ", var.name.full[var.num]), font=2, cex=4) + par(fig=c(title2.xpos, title2.xpos + title2.width, 0.695, 0.700), new=TRUE) + mtext(paste0(regime2.name, ": Impact on ", var.name.full[var.num]), font=2, cex=4) + par(fig=c(title2.xpos, title2.xpos + title2.width, 0.460, 0.465), new=TRUE) + mtext(paste0(regime3.name, ": Impact on ", var.name.full[var.num]), font=2, cex=4) + par(fig=c(title2.xpos, title2.xpos + title2.width, 0.225, 0.230), new=TRUE) + mtext(paste0(regime4.name, ": Impact on ", var.name.full[var.num]), font=2, cex=4) + + # Title 3: + title3.xpos <- 0.75 + title3.width <-0.25 + par(fig=c(title3.xpos, title3.xpos + title3.width, 0.930, 0.935), new=TRUE) + mtext(paste0(regime1.name, ": Frequency (", round(100*mean(fre1),1), "%)"), font=2, cex=4) + par(fig=c(title3.xpos, title3.xpos + title3.width, 0.695, 0.700), new=TRUE) + mtext(paste0(regime2.name, ": Frequency (", round(100*mean(fre2),1), "%)"), font=2, cex=4) + par(fig=c(title3.xpos, title3.xpos + title3.width, 0.460, 0.465), new=TRUE) + mtext(paste0(regime3.name, ": Frequency (", round(100*mean(fre3),1), "%)"), font=2, cex=4) + par(fig=c(title3.xpos, title3.xpos + title3.width, 0.225, 0.230), new=TRUE) + mtext(paste0(regime4.name, ": Frequency (", round(100*mean(fre4),1), "%)"), font=2, cex=4) + + # Legend 1: + legend1.xpos <- 0.01 + legend1.width <- 0.44 + legend1.cex <- 1.8 + my.subset <- match(values.to.plot, my.brks) + + par(fig=c(legend1.xpos, legend1.xpos + legend1.width, 0.719, 0.744), new=TRUE) # syntax fig=c(xmin, xmax, ymin, ymax) + ColorBar3(my.brks, cols=my.cols, vert=FALSE, cex=legend1.cex, subset=my.subset) + if(psl=="g500") {mtext(side=4," m", cex=2.5)} else {mtext(side=4," hPa", cex=legend1.cex)} + par(fig=c(legend1.xpos, legend1.xpos + legend1.width, 0.485, 0.508), new=TRUE) + ColorBar3(my.brks, cols=my.cols, vert=FALSE, cex=legend1.cex, subset=my.subset) + if(psl=="g500") {mtext(side=4," m", cex=2.5)} else {mtext(side=4," hPa", cex=legend1.cex)} + par(fig=c(legend1.xpos, legend1.xpos + legend1.width, 0.250, 0.273), new=TRUE) + ColorBar3(my.brks, cols=my.cols, vert=FALSE, cex=legend1.cex, subset=my.subset) + if(psl=="g500") {mtext(side=4," m", cex=2.5)} else {mtext(side=4," hPa", cex=legend1.cex) + par(fig=c(legend1.xpos, legend1.xpos + legend1.width, 0.015, 0.038), new=TRUE) + ColorBar3(my.brks, cols=my.cols, vert=FALSE, cex=legend1.cex, subset=my.subset) + if(psl=="g500") {mtext(side=4," m", cex=2.5)} else {mtext(side=4," hPa", cex=legend1.cex)} + + # Legend 2: + legend2.xpos <- 0.48 + legend2.width <- 0.25 + legend2.cex <- 1.8 + my.subset2 <- match(values.to.plot2, my.brks.var) + + par(fig=c(legend2.xpos, legend2.xpos + legend2.width, 0.719 + 0.001, 0.744), new=TRUE) + ColorBar3(my.brks.var, cols=my.cols.var, vert=FALSE, cex=legend2.cex, subset=my.subset2) + mtext(side=4,paste0(" ",var.unit[var.num]), cex=legend2.cex) + par(fig=c(legend2.xpos, legend2.xpos + legend2.width, 0.485, 0.508), new=TRUE) + ColorBar3(my.brks.var, cols=my.cols.var, vert=FALSE, cex=legend2.cex, subset=my.subset2) + mtext(side=4,paste0(" ",var.unit[var.num]), cex=legend2.cex) + par(fig=c(legend2.xpos, legend2.xpos + legend2.width, 0.250, 0.273), new=TRUE) + ColorBar3(my.brks.var, cols=my.cols.var, vert=FALSE, cex=legend2.cex, subset=my.subset2)} + mtext(side=4,paste0(" ",var.unit[var.num]), cex=legend2.cex) + par(fig=c(legend2.xpos, legend2.xpos + legend2.width, 0.015, 0.038), new=TRUE) + ColorBar3(my.brks.var, cols=my.cols.var, vert=FALSE, cex=legend2.cex, subset=my.subset2) + mtext(side=4,paste0(" ",var.unit[var.num]), cex=legend2.cex) + + if(!as.pdf) dev.off() # for saving 4 png + +} # close for loop on 'period' + +if(as.pdf) dev.off() # for saving a single pdf with all seasons + + + + + + + + + + + diff --git a/old/weather_regimes_v21.R~ b/old/weather_regimes_v21.R~ new file mode 100644 index 0000000000000000000000000000000000000000..8bc91c8ba229c91a89fd5f52598daf2445e06dd0 --- /dev/null +++ b/old/weather_regimes_v21.R~ @@ -0,0 +1,773 @@ +library(s2dverification) # for the function Load() +library(abind) +library(Kendall) +source('/home/Earth/ncortesi/Downloads/scripts/Rfunctions.R') # for the calendar functions +workdir <- "/scratch/Earth/ncortesi/RESILIENCE/Regimes" # working dir with the input files with the WT classifications for each MSLP grid point + +# available reanalysis for the geopotential (g500) or the geopotential height (z500 = g500/9.81): +ERAint <- list(path = '/esnas/reconstructions/ecmwf/eraint/$STORE_FREQ$_mean/$VAR_NAME$_f6h/$VAR_NAME$_$YEAR$$MONTH$.nc') +ERAint.name <- "ERA-Interim" + +JRA55 <- list(path = '/esnas/reconstructions/jma/jra-55/$STORE_FREQ$_mean/$VAR_NAME$_f6h/$VAR_NAME$_$YEAR$$MONTH$.nc') +JRA55.name <- "JRA-55" + +rean <- ERAint #JRA55 #ERAint # choose one of the two above reanalysis from where to load the input psl data + +# available forecast systems for the var data: +ECMWF_monthly <- list(path = paste0('/esnas/exp/ECMWF/monthly/ensforhc/6hourly/$VAR_NAME$/2014$MONTH$$DAY$00/$VAR_NAME$_$START_DATE$00.nc')) +ECMWF_monthly.name <- "ECMWF-Monthly" + +forecast <- ECMWF_monthly +forecast.name <- ECMWF_monthly.name + +fields <- rean #forecast # put 'rean' to load pressure fields and var from reanalisis, put 'forecast' to load them from forecast systems +fields.name <- rean.name #forecast.name + +year.start <- 1979 #1994 #1979 #1981 +year.end <- 2013 #2010 + +leadtime <- 1:7 #5:11 #1 # if fields=forecast, set the forecast time (in days) you want to compute the weather regimes. +startdate.shift <- 1 # if leadtime=5:11, it's better to shift all startdates of the season 1 week in the past, to fit the exact season of obs.data + # if leadtime=12:18, it's better to shift all startdates of the season 2 weeks in the past, and so on. +forecast.year <- year.end+1 # if fields=forecast, set the forecast year (it is usually the year after year.end) +mes=1 # if fields=forecast, set the month of the first startdate of the forecast year +day=2 # if fields=forecast, set the day of the first startdate of the forecast year + +psl <- "psl" #"g500" # pressure variable to use from the chosen reanalysis +psl.name <- "MSLP" # "Z500" # the name to show in the maps + +var.data <- rean #ECMWF_monthly # ERAint # chose a dataset from which to load the input var data (reanalysis or forecasts) +var.data.name <- rean.name #ECMWF_monthly.name # ERAint.name + +lat.weighting=FALSE # set it to true to weight psl data on latitude before applying the cluster analysis +sequences=FALSE # set it to TRUE if you want to select only days beloning to sequences of at least 5 consecutive days belonging to the same regime. + +PCA <- FALSE # se to TRUE if you want to apply the PCA before the k-means cluster analysis, FALSE otherwise +variance.explained <- 0.8 # the user can set here the minimum % of variance explained by the PCs (typically 0.8 which is equal to 80%) + +# Coordinates of the box enclosing the study region (the Northern Atlantic in this case): +lat.max <- 81 #81 #80 #70 +lat.min <- 27 #30 #20 #30 +lon.max <- 45 #36 #30 #40 +lon.min <- 274.5 #274.5 #270 #280 # put a positive number here because the geopotential has only positive values of longitud! + +############################################################################################################################# +#ECMWF_monthly <- list(path = paste0('/esnas/exp/ECMWF/monthly/ensforhc/6hourly/psl/2014010200/1994_010200.nc')) +#ECMWF_monthly <- list(path = paste0('/esnas/exp/ECMWF/monthly/ensforhc/6hourly/$VAR_NAME$/2014$MONTH$$DAY$00/$VAR_NAME$_$START_DATE$00.nc')) +rean.name <- unname(ifelse(unlist(rean) == unlist(ERAint), ERAint.name, JRA55.name)) + +if(lat.min >= lat.max) stop("lat.min cannot be greater than lat.max") +n.years <- year.end - year.start + 1 + +days.period <- n.days.period <- period.length <- list() +for (p in 1:17){ + days.period[[p]] <- NA + for(y in year.start:year.end) days.period[[p]] <- c(days.period[[p]], n.days.in.a.future.year(year.start, y) + pos.period(y,p)) + days.period[[p]] <- days.period[[p]][-1] # remove the NA at the begin that was introduced only to be able to execute the above command + # number of days belonging to that period from year.start to year.end: + n.days.period[[p]] <- length(days.period[[p]]) + # Number of days belonging to that period in a single year: + period.length[[p]] <- n.days.in.a.period(p,1999) #+ ifelse(period==13 | period==2, 1, 0) # using year 1999 introduce a small error in winter season length because of bisestile years +} + + +# load only 1 day of pressure data to detect the minimum and maximum lat and lon values which identify only the North Atlantic area: +domain <- Load(var = psl, exp = NULL, obs = list(fields), paste0(year.start,'0102'), storefreq = 'daily', leadtimemax = 1, output = 'lonlat', nprocs=1) +n.lat <- length(domain$lat) +n.lon <- length(domain$lon) + +pos.lat.max <- which(lat.max - round(domain$lat,4) >= 0)[1] # select the first latitude of the domain below the maximum latitude +pos.lat.min <- tail(which(round(domain$lat,4) - lat.min >= 0),1) # select the last latitude of the domain over the minumum latitude +pos.lon.max <- tail(which(lon.max - round(domain$lon,4) >= 0),1) +pos.lon.min <- which(round(domain$lon,4) - lon.min >= 0)[1] + +pos.lat <- if(pos.lat.min < pos.lat.max) {pos.lat.min:pos.lat.max} else {pos.lat.max:pos.lat.min} +pos.lon <- c(1:pos.lon.max,pos.lon.min:length(domain$lon)) # beware that it only consider the case where the study area cross the meridian of Greenwhich! +n.pos.lat <- length(pos.lat) +n.pos.lon <- length(pos.lon) + +lat <- domain$lat[pos.lat] # lat values of chosen area only (it is smaller than the whole spatial domain loaded by the data) +lon <- domain$lon[pos.lon] # lon values of chosen area only + +lat.min.area <- domain$lat[pos.lat.min] # min and max lat/lon of the chosen area only (same as lat.max, but its values correspond to actual values in the lat vector) +lat.max.area <- domain$lat[pos.lat.max] +lon.min.area <- domain$lon[pos.lon.min] +lon.max.area <- domain$lon[pos.lon.max] + +#rm(domain) + +# Load psl data: +if(unlist(fields) == unlist(rean)){ # Load daily psl data in the reanalysis case: + psleuFull <-array(NA,c(n.days.in.a.yearly.period(year.start,year.end), n.pos.lat, n.pos.lon)) + + for (y in year.start:year.end){ + var <- Load(var = psl, exp = NULL, obs = list(fields), paste0(y,'0101'), storefreq = 'daily', leadtimemax = n.days.in.a.year(y), output = 'lonlat', + latmin = lat.min, latmax = lat.max, lonmin = lon.min, lonmax = lon.max) + psleuFull[seq.days.in.a.future.year(year.start, y),,] <- var$obs + rm(var) + gc() + } + +} else { # Load 6-hourly psl data in the forecast case: + psleuFull <-array(NA, c(n.sdates*n.years, n.leadtimes, n.pos.lat, n.pos.lon)) # daily order: 19940102 19950102 ... 20130102 19940109 19950109 ... 20130109 ... + n.leadtimes <- length(leadtime) + sdates <- weekly.seq(forecast.year,mes,day) + n.sdates <- length(sdates) + + for (startdate in 1:n.sdates){ + for(lday in leadtime){ + var <- Load(var = psl, exp = NULL, obs = list(fields), paste0(year.start:year.end, substr(sdates[startdate],5,6), substr(sdates[startdate],7,8) ), storefreq = 'daily', leadtimemin = lday*4-3, leadtimemax = lday*4, output = 'lonlat', latmin = lat.min, latmax = lat.max, lonmin = lon.min, lonmax = lon.max) + + mean.lday <- apply(var$obs, c(3,5,6), mean, na.rm=T) + rm(var) + gc() + + psleuFull[(1+(startdate-1)*n.years):(startdate*n.years), lday-leadtime[1]+1,,] <- mean.lday + rm(mean.lday) + gc() + } + } + +} + +if(psl=="g500") psleuFull <- psleuFull/9.81 # convert geopotential to geopotential height (in m) +if(psl=="psl") psleuFull <- psleuFull/100 # convert MSLP in Pascal to MSLP in hPa +gc() + +save.image(file=paste0(workdir,"/weather_regimes_",fields.name,"_",year.start,"-",year.end,".RData")) +load(file=paste0(workdir,"/weather_regimes_",fields.name,"_",year.start,"-",year.end,".RData")) + +# compute the cluster analysis: +my.PCA <- list() +my.cluster <- list() +tot.variance <- list() +n.pcs <- my.cluster <- c() + +for(period in 13:16){ + if(unlist(fields) == unlist(rean)){ + + } else { + my.startdates.period <- months.period(forecast.year,mes,day,period) # get which startdates belong to the chosen period + + days.period <- list() # get which days inside psleuFull belong to the chosen period + for(startdate in my.startdates.period){ + days.period[[p]] <- c(days.period[[p]], (1+(startdate-1)*n.years):(startdate*n.years)) + } + } + + + pslPeriod <- psleuFull[days.period[[p]],,] # select only days in the chosen period (i.e: winter) + + # weight the pressure fields based on latitude: + if(lat.weighting){ + lat.weighted <- cos(lat*pi/180)^0.5 + lat.weighted.array <- InsertDim(InsertDim(lat.weighted,2,n.pos.lon), 1, n.days.period[[p]]) + pslPeriod.weighted <- pslPeriod * lat.weighted.array + rm(lat.weighted.array) + gc() + } else { + pslPeriod.weighted <- pslPeriod + } + + pslmat <- pslPeriod.weighted + dim(pslmat) <- c(n.days.period[[p]], n.pos.lat*n.pos.lon) # convert array in a matrix! + rm(pslPeriod, pslPeriod.weighted) + gc() + + # # if you are using the monthly forecast system, in winter you must remove the 2nd startdate (20140109) because its data is bad, as you can see with: plot(pslmat[,1:2]) + #if(unlist(fields) == unlist(forecast) && period == 13) pslmat[21:40,] <- NA + + if(PCA){ # compute the PCs: + my.seq <- seq(1, n.pos.lat*n.pos.lon, 9) # select only 1 point of 9 + pslcut <- pslmat[,my.seq] + + my.PCA[[period]] <- princomp(pslcut,cor=FALSE) + tot.variance[[period]] <- head(cumsum(my.PCA[[period]]$sdev^2/sum(my.PCA[[period]]$sdev^2)),50) # check how many PCAs to keep basing on the sum of their expl. variance + n.pcs[period] <- head(as.numeric(which(tot.variance[[period]] > variance.explained)),1) # select only the pcs that explains at least 80% of variance + my.cluster[[period]] <- kmeans(my.PCA[[period]]$scores[,1:n.pcs[period]], centers=4, iter.max=100, nstart=30) # 4 is the number of clusters + rm(pslcut) + } else { # remove from matpsl the years with NA: + my.cluster[[period]] <- kmeans(pslmat[which(!is.na(pslmat[,1])),], centers=4, iter.max=100, nstart=30) # 4 is the number of clusters + } + + rm(pslmat) + gc() +} + +var.num <- 2 # Choose a variable. 1: sfcWind 2: tas + +var.name <- c("sfcWind","tas") # name of the 'predictand' variable of the chosen reanalysis +var.name.full <- c("Wind Speed","Temperature") # full name of the 'predictand' variable to put in the title of the graphs +var.unit <- c("m/s", "ºC") # unit of measure of var (for drawing color scales) + +# Load var data: +if(unlist(fields) == unlist(rean)){ # Load daily var data in the reanalysis case: + vareuFull <-array(NA,c(n.days.in.a.yearly.period(year.start,year.end), n.pos.lat, n.pos.lon)) + + for (y in year.start:year.end){ + var <- Load(var = var.name[var.num], exp = NULL, obs = list(var.data), paste0(y,'0101'), storefreq = 'daily', leadtimemax = n.days.in.a.year(y), output = 'lonlat', + latmin = lat.min, latmax = lat.max, lonmin = lon.min, lonmax = lon.max) + vareuFull[seq.days.in.a.future.year(year.start, y),,] <- var$obs + rm(var) + gc() + } + +} else { # Load 6-hourly var data in the forecast case: + sdates <- weekly.seq(forecast.year,mes,day) + vareuFull <-array(NA,c(length(sdates)*(year.end-year.start+1), n.pos.lat, n.pos.lon)) + + for (startdate in 1:length(sdates)){ + var <- Load(var = var.name[var.num], exp = NULL, obs = list(var.data), paste0(year.start:year.end, substr(sdates[startdate],5,6), substr(sdates[startdate],7,8) ), storefreq = 'daily', leadtimemin=leadtime*4-3, leadtimemax=leadtime*4, output = 'lonlat', latmin = lat.min, latmax = lat.max, lonmin = lon.min, lonmax = lon.max) + temp <- apply(var$obs, c(1,3,5,6), mean, na.rm=T) + vareuFull[(1+(startdate-1)*(year.end-year.start+1)):(startdate*(year.end-year.start+1)),,] <- temp + rm(temp,var) + gc() + } + +} + +#my.grid<-paste0('r',n.lon,'x',n.lat) +#coord <- Load(var = 'sfcWind', exp = list(ECMWF_monthly), obs = NULL, sdates=paste0(2013,'0102'), leadtimemin = 1, leadtimemax=1, output = 'lonlat', nprocs=1, grid=my.grid, method='bilinear') + +save.image(file=paste0(workdir,"/weather_regimes_",fields.name,"_",var.name[var.num],"_",year.start,"-",year.end,".RData")) +load(file=paste0(workdir,"/weather_regimes_",fields.name,"_",var.name[var.num],"_",year.start,"-",year.end,".RData")) + +WR.period <- 1:12 #13:16 # 1-12: Jan-Dec; 13-16: Winter-Spring-Summer-Autumn + +# if you want to study WRs at monhly level but using their seasonal time series, you have to copy the WRs time series to the months from 1 to 12: +if(WR.period <- 1:12){ + my.cluster[[1]] <- my.cluster[[2]] <- my.cluster[[3]] <- my.cluster[[4]] <- my.cluster[[5]] <- my.cluster[[6]] <- list() + my.cluster[[7]] <- my.cluster[[8]] <- my.cluster[[9]] <- my.cluster[[10]] <- my.cluster[[11]] <- my.cluster[[12]] <- list() + + ss <- match(days.period[[13]],days.period[[1]]); ss[which(!is.na(ss))] <- 1; qq <- ss*my.cluster[[13]]$cluster; my.cluster[[1]]$cluster <- qq[!is.na(qq)] + ss <- match(days.period[[13]],days.period[[2]]); ss[which(!is.na(ss))] <- 1; qq <- ss*my.cluster[[13]]$cluster; my.cluster[[2]]$cluster <- qq[!is.na(qq)] + ss <- match(days.period[[13]],days.period[[12]]); ss[which(!is.na(ss))] <- 1; qq <- ss*my.cluster[[13]]$cluster; my.cluster[[12]]$cluster <- qq[!is.na(qq)] + ss <- match(days.period[[14]],days.period[[3]]); ss[which(!is.na(ss))] <- 1; qq <- ss*my.cluster[[14]]$cluster; my.cluster[[3]]$cluster <- qq[!is.na(qq)] + ss <- match(days.period[[14]],days.period[[4]]); ss[which(!is.na(ss))] <- 1; qq <- ss*my.cluster[[14]]$cluster; my.cluster[[4]]$cluster <- qq[!is.na(qq)] + ss <- match(days.period[[14]],days.period[[5]]); ss[which(!is.na(ss))] <- 1; qq <- ss*my.cluster[[14]]$cluster; my.cluster[[5]]$cluster <- qq[!is.na(qq)] + ss <- match(days.period[[15]],days.period[[6]]); ss[which(!is.na(ss))] <- 1; qq <- ss*my.cluster[[15]]$cluster; my.cluster[[6]]$cluster <- qq[!is.na(qq)] + ss <- match(days.period[[15]],days.period[[7]]); ss[which(!is.na(ss))] <- 1; qq <- ss*my.cluster[[15]]$cluster; my.cluster[[7]]$cluster <- qq[!is.na(qq)] + ss <- match(days.period[[15]],days.period[[8]]); ss[which(!is.na(ss))] <- 1; qq <- ss*my.cluster[[15]]$cluster; my.cluster[[8]]$cluster <- qq[!is.na(qq)] + ss <- match(days.period[[16]],days.period[[9]]); ss[which(!is.na(ss))] <- 1; qq <- ss*my.cluster[[16]]$cluster; my.cluster[[9]]$cluster <- qq[!is.na(qq)] + ss <- match(days.period[[16]],days.period[[10]]); ss[which(!is.na(ss))] <- 1; qq <- ss*my.cluster[[16]]$cluster; my.cluster[[10]]$cluster <- qq[!is.na(qq)] + ss <- match(days.period[[16]],days.period[[11]]); ss[which(!is.na(ss))] <- 1; qq <- ss*my.cluster[[16]]$cluster; my.cluster[[11]]$cluster <- qq[!is.na(qq)] +} + +for(period in WR.period){ # 1-12: Jan-Dec; 13-16: Winter-Spring-Summer-Autumn; 17: Year + if(unlist(fields) == unlist(rean)){ + + + } else { + my.startdates.period <- months.period(forecast.year,mes,day,period) + + #if(period == 13) my.startdates.period <- my.startdates.period[-2] # remove the second startdate because it is broken + + days.period <- period.length <- list() + for(startdate in my.startdates.period){ + days.period[[p]] <- c(days.period[[p]], (1+(startdate-1)*(year.end-year.start+1)):(startdate*(year.end-year.start+1))) + } + period.length[[p]] <- length(my.startdates.period) # number of Thusdays in the period + } + + varPeriod <- vareuFull[days.period[[p]],,] # select only var data during the chosen period + + varPeriodClim <- apply(varPeriod, c(2,3), mean, na.rm=T) + varPeriodClim2 <- InsertDim(varPeriodClim, 1, n.days.period[[p]]) + varPeriodAnom <- varPeriod - varPeriodClim2 + rm(varPeriod, varPeriodClim, varPeriodClim2) + gc() + + #PlotEquiMap2(varPeriodClim[,EU], lon[EU], lat, filled.continents = FALSE, cols=my.cols.var, intxlon=10, intylat=10, cex.lab=1.5) # plot the climatology of the period + + if(sequences){ + # for each of the 4 clusters, remove the days not belonging to sequences of at least 5 days: + # warning: first 4 days and last 4 days are not take into account y the algorithm and are treated as not belonging to any sequence (sequ=FALSE) + wrdiff <- diff(my.cluster[[period]]$cluster) + + sequ <- rep(FALSE, n.days.period[[p]]) + for(i in 5:(n.days.period[[p]]-5)){ + sequ[i] <- TRUE + if(wrdiff[i] != 0 & wrdiff[i+1] != 0) sequ[i] = FALSE + if(wrdiff[i] != 0 & wrdiff[i+2] != 0) sequ[i] = FALSE + if(wrdiff[i] != 0 & wrdiff[i+3] != 0) sequ[i] = FALSE + if(wrdiff[i] != 0 & wrdiff[i+4] != 0) sequ[i] = FALSE + if(wrdiff[i-1] != 0 & wrdiff[i] != 0) sequ[i] = FALSE + if(wrdiff[i-1] != 0 & wrdiff[i+1] != 0) sequ[i] = FALSE + if(wrdiff[i-1] != 0 & wrdiff[i+2] != 0) sequ[i] = FALSE + if(wrdiff[i-1] != 0 & wrdiff[i+3] != 0) sequ[i] = FALSE + if(wrdiff[i-2] != 0 & wrdiff[i] != 0) sequ[i] = FALSE + if(wrdiff[i-2] != 0 & wrdiff[i+1] != 0) sequ[i] = FALSE + if(wrdiff[i-2] != 0 & wrdiff[i+2] != 0) sequ[i] = FALSE + if(wrdiff[i-3] != 0 & wrdiff[i] != 0) sequ[i] = FALSE + if(wrdiff[i-3] != 0 & wrdiff[i+1] != 0) sequ[i] = FALSE + if(wrdiff[i-4] != 0 & wrdiff[i] != 0) sequ[i] = FALSE + } # close for loop on i + + # sequence of daily cluster numbers without the days that doesn't belong to sequences of 5 or more days: + cluster.sequence <- my.cluster[[period]]$cluster * sequ + rm(wrdiff, sequ) + } else{ + cluster.sequence <- my.cluster[[period]]$cluster + } + + gc() + + # Replace the days belonging to a regime with the days belonging to a sequence of at least 5 days of that regime: + wr1 <- which(cluster.sequence == 1) + wr2 <- which(cluster.sequence == 2) + wr3 <- which(cluster.sequence == 3) + wr4 <- which(cluster.sequence == 4) + + # yearly frequency of each regime: + wr1y <- wr2y <- wr3y <-wr4y <- c() + for(y in year.start:year.end){ + + if(unlist(fields) == unlist(rean)){ + wr1y[y-year.start+1] <- length(which(cluster.sequence[(y-year.start)*period.length+(1:period.length)] == 1)) + wr2y[y-year.start+1] <- length(which(cluster.sequence[(y-year.start)*period.length+(1:period.length)] == 2)) + wr3y[y-year.start+1] <- length(which(cluster.sequence[(y-year.start)*period.length+(1:period.length)] == 3)) + wr4y[y-year.start+1] <- length(which(cluster.sequence[(y-year.start)*period.length+(1:period.length)] == 4)) + } else { + wr1y[y-year.start+1] <- length(which(cluster.sequence[seq((y-year.start+1),length(cluster.sequence), n.years)] == 1)) + wr2y[y-year.start+1] <- length(which(cluster.sequence[seq((y-year.start+1),length(cluster.sequence), n.years)] == 2)) + wr3y[y-year.start+1] <- length(which(cluster.sequence[seq((y-year.start+1),length(cluster.sequence), n.years)] == 3)) + wr4y[y-year.start+1] <- length(which(cluster.sequence[seq((y-year.start+1),length(cluster.sequence), n.years)] == 4)) + } + } + + rm(cluster.sequence) + gc() + + # convert to frequencies in %: + wr1y <- wr1y/period.length + wr2y <- wr2y/period.length + wr3y <- wr3y/period.length + wr4y <- wr4y/period.length + + varPeriodAnom1 <- varPeriodAnom[wr1,,] + varPeriodAnom2 <- varPeriodAnom[wr2,,] + varPeriodAnom3 <- varPeriodAnom[wr3,,] + varPeriodAnom4 <- varPeriodAnom[wr4,,] + + varPeriodAnom1mean <- apply(varPeriodAnom1,c(2,3),mean,na.rm=T) + varPeriodAnom2mean <- apply(varPeriodAnom2,c(2,3),mean,na.rm=T) + varPeriodAnom3mean <- apply(varPeriodAnom3,c(2,3),mean,na.rm=T) + varPeriodAnom4mean <- apply(varPeriodAnom4,c(2,3),mean,na.rm=T) + + varPeriodAnomBoth1 <- abind(varPeriodAnom, varPeriodAnom1, along = 1) + pvalue1 <- apply(varPeriodAnomBoth1, c(2,3), function(x) t.test(x[1:n.days.period[[p]]],x[(n.days.period[[p]]+1):length(x)])$p.value) + rm(varPeriodAnomBoth1, varPeriodAnom1) + gc() + + varPeriodAnomBoth2 <- abind(varPeriodAnom, varPeriodAnom2, along = 1) + pvalue2 <- apply(varPeriodAnomBoth2, c(2,3), function(x) t.test(x[1:n.days.period[[p]]],x[(n.days.period[[p]]+1):length(x)])$p.value) + rm(varPeriodAnomBoth2, varPeriodAnom2) + gc() + + varPeriodAnomBoth3 <- abind(varPeriodAnom, varPeriodAnom3, along = 1) + pvalue3 <- apply(varPeriodAnomBoth3, c(2,3), function(x) t.test(x[1:n.days.period[[p]]],x[(n.days.period[[p]]+1):length(x)])$p.value) + rm(varPeriodAnomBoth3, varPeriodAnom3) + gc() + + varPeriodAnomBoth4 <- abind(varPeriodAnom, varPeriodAnom4, along = 1) + pvalue4 <- apply(varPeriodAnomBoth4, c(2,3), function(x) t.test(x[1:n.days.period[[p]]],x[(n.days.period[[p]]+1):length(x)])$p.value) + rm(varPeriodAnomBoth4, varPeriodAnom4) + + rm(varPeriodAnom) + gc() + + pslPeriod <- psleuFull[days.period[[p]],,] + pslPeriodClim <- apply(pslPeriod, c(2,3), mean, na.rm=T) + pslPeriodClim2 <- InsertDim(pslPeriodClim, 1, n.days.period[[p]]) + pslPeriodAnom <- pslPeriod - pslPeriodClim2 + + rm(pslPeriod, pslPeriodClim, pslPeriodClim2) + gc() + + pslwr1 <- pslPeriodAnom[wr1,,] + pslwr2 <- pslPeriodAnom[wr2,,] + pslwr3 <- pslPeriodAnom[wr3,,] + pslwr4 <- pslPeriodAnom[wr4,,] + rm(pslPeriodAnom) + gc() + + pslwr1mean <- apply(pslwr1,c(2,3),mean,na.rm=T) + pslwr2mean <- apply(pslwr2,c(2,3),mean,na.rm=T) + pslwr3mean <- apply(pslwr3,c(2,3),mean,na.rm=T) + pslwr4mean <- apply(pslwr4,c(2,3),mean,na.rm=T) + + rm(pslwr1,pslwr2,pslwr3,pslwr4) + + # save all the data necessary to redraw the graphs when we know the right regime: + save(workdir,rean.name,fields.name,var.num,var.name,var.name.full,var.unit,year.start,year.end,period,WR.period,lon,lat,pslwr1mean,pslwr2mean,pslwr3mean,pslwr4mean, varPeriodAnom1mean, varPeriodAnom2mean, varPeriodAnom3mean,varPeriodAnom4mean,wr1y,wr2y,wr3y,wr4y,pvalue1,pvalue2,pvalue3,pvalue4,file=paste0(workdir,"/",fields.name,"_",var.name[var.num],"_",my.period[period],"_mapdata.RData")) + + rm(pvalue1,pvalue2,pvalue3,pvalue4) + rm(pslwr1mean,pslwr2mean,pslwr3mean,pslwr4mean) + rm(varPeriodAnom1mean,varPeriodAnom2mean,varPeriodAnom3mean,varPeriodAnom4mean) + gc() + +} # close the for loop on 'period' + + +# run it twice: once, with ordering <- FALSE to look only at the cartography and find visually the right regime names of the four clusters; +# and the second time, to save the ordered cartography: +ordering <- TRUE # If TRUE, order the clusters following the regimes listed in the 'orden' vector (after you manually insert the names). +save.names <- FALSE # if TRUE, also save the cluster names (i.e: the association between clusters and weather regimes); if FALSE, the cluster names are loaded from the file +as.pdf <- FALSE # FALSE: save results as one .png file for each season, TRUE: save only 1 .pdf file with all seasons + +# position of long values of Europe only (without the Atlantic Sea and America): +#if(fields.name == "ERA-Interim") EU <- c(1:which(lon >= lon.max)[1], (length(lon)-30):length(lon)) # restrict area to continental europe only +EU <- c(1:which(lon >= lon.max)[1], (which(lon >= 337)[1]:length(lon))) # restrict area to continental europe only + +# choose an order to give to the cartography, from top to bottom: +orden <- c("NAO+","NAO-","Blocking","Atl.Ridge") + +regime1.name <- orden[1] +regime2.name <- orden[2] +regime3.name <- orden[3] +regime4.name <- orden[4] + +if(save.names){cluster1.name.period <- cluster2.name.period <- cluster3.name.period <- cluster4.name.period <- c()} + +# breaks and colors of the geopotential fields: +if(psl == "g500"){ + #my.brks <- c(-300,-199:200,300) # % Mean anomaly of a WR + my.brks <- c(-300,seq(-200,200,10),300) # % Mean anomaly of a WR + my.brks2 <- c(-300,seq(-200,200,10),300) # % Mean anomaly of a WR for the contour lines + #my.cols <- colorRampPalette((brewer.pal(9,"PuBu")))(length(my.brks)-1) + values.to.plot <- c(-200, -100, 0, 100, 200) # values we want to show in the labels +} + +if(psl == "psl"){ + my.brks <- c(seq(-19,-1,2),0,seq(1,19,2)) # % Mean anomaly of a WR + my.brks2 <- my.brks #c(seq(-20,20,2)) # % Mean anomaly of a WR for the contour lines + values.to.plot <- my.brks #c(-17,-15,-13,-11,-9,-7,-5,-3,-1,0,1,3,5,7,9,11,13,15,17) +} + +my.cols <- colorRampPalette(rev(brewer.pal(11,"RdBu")))(length(my.brks)-1) # blue--white--red colors +my.cols[floor(length(my.cols)/2)] <- my.cols[floor(length(my.cols)/2)+1] <- "white" + +# breaks and colors of the impact maps (valid both for Wind speed anomalies and Temperature anomalies): +#my.brks.var <- c(-20,seq(-10,-3,1),seq(-2.9,2.9,0.1),seq(3,10,1),20) # % Mean anomaly of a WR +#my.brks.var <- c(-20,-2,-1.5,-1,-0.5,-0.2,0.2,0.5,1,1.5,2,20) # % Mean anomaly of a WR +#my.brks.var <- c(-20,seq(-3,3,0.5),20) # % Mean anomaly of a WR +if(var.name[var.num] == "sfcWind") { + my.brks.var <- seq(-3,3,0.5) + values.to.plot2 <- c(-3,-2,-1,0,1,2,3) +} +if(var.name[var.num] == "tas") { + my.brks.var <- seq(-5,5,1) + values.to.plot2 <- my.brks.var #c(-5,-3,-1,0,1,3,5) +} + +#my.cols <- colorRampPalette(as.character(read.csv("/home/Earth/vtorralb/rgbhex.csv",header=F)[,1]))(length(my.brks)-1) # blue--yellow-red colors +my.cols.var <- colorRampPalette(rev(brewer.pal(11,"RdBu")))(length(my.brks.var)-1) # blue--white--red colors +#my.cols.var[floor(length(my.cols.var)/2)] <- my.cols.var[floor(length(my.cols.var)/2)+1] <- "white" + +if(as.pdf)(pdf(file=paste0(workdir,"/",fields.name,"_",var.name[var.num],".pdf"),width=40, height=60)) + +for(period in WR.period){ + load(file=paste0(workdir,"/",fields.name,"_",var.name[var.num],"_",my.period[period],"_mapdata.RData")) + + if(save.names){ + if(period == 1){ # January + cluster2.name="NAO+" + cluster1.name="NAO-" + cluster4.name="Blocking" + cluster3.name="Atl.Ridge" + } + if(period == 2){ # February + cluster3.name="NAO+" + cluster2.name="NAO-" + cluster4.name="Blocking" + cluster1.name="Atl.Ridge" + } + if(period == 3){ # March + cluster3.name="NAO+" + cluster2.name="NAO-" + cluster4.name="Blocking" + cluster1.name="Atl.Ridge" + } + if(period == 4){ # April + cluster2.name="NAO+" + cluster1.name="NAO-" + cluster3.name="Blocking" + cluster4.name="Atl.Ridge" + } + if(period == 5){ # May + cluster4.name="NAO+" + cluster2.name="NAO-" + cluster3.name="Blocking" + cluster1.name="Atl.Ridge" + } + if(period == 6){ # June + cluster4.name="NAO+" + cluster1.name="NAO-" + cluster2.name="Blocking" + cluster3.name="Atl.Ridge" + } + if(period == 7){ # July + cluster4.name="NAO+" + cluster2.name="NAO-" + cluster1.name="Blocking" + cluster3.name="Atl.Ridge" + } + if(period == 8){ # August + cluster2.name="NAO+" + cluster4.name="NAO-" + cluster3.name="Blocking" + cluster1.name="Atl.Ridge" + } + if(period == 9){ # September + cluster4.name="NAO+" + cluster3.name="NAO-" + cluster1.name="Blocking" + cluster2.name="Atl.Ridge" + } + if(period == 10){ # October + cluster1.name="NAO+" + cluster4.name="NAO-" + cluster2.name="Blocking" + cluster3.name="Atl.Ridge" + } + if(period == 11){ # November + cluster2.name="NAO+" + cluster3.name="NAO-" + cluster1.name="Blocking" + cluster4.name="Atl.Ridge" + } + if(period == 12){ # December + cluster2.name="NAO+" + cluster4.name="NAO-" + cluster1.name="Blocking" + cluster3.name="Atl.Ridge" + } + if(period == 13){ # Winter + cluster3.name="NAO+" + cluster4.name="NAO-" + cluster1.name="Blocking" + cluster2.name="Atl.Ridge" + } + if(period == 14){ # Spring + cluster1.name="NAO+" + cluster3.name="NAO-" + cluster2.name="Blocking" + cluster4.name="Atl.Ridge" + } + if(period == 15){ # Summer + cluster2.name="NAO+" + cluster3.name="NAO-" + cluster4.name="Blocking" + cluster1.name="Atl.Ridge" + } + if(period == 16){ # Autumn + cluster4.name="NAO+" + cluster1.name="NAO-" + cluster2.name="Blocking" + cluster3.name="Atl.Ridge" + } + + cluster1.name.period[period] <- cluster1.name + cluster2.name.period[period] <- cluster2.name + cluster3.name.period[period] <- cluster3.name + cluster4.name.period[period] <- cluster4.name + + save(cluster1.name.period, cluster2.name.period, cluster3.name.period, cluster4.name.period, file=paste0(workdir,"/",fields.name,"_ClusterNames.RData")) + + } else { # in this case, we load the cluster names from the file already saved: + load(paste0(workdir,"/",fields.name,"_ClusterNames.RData")) + cluster1.name <- cluster1.name.period[period] + cluster2.name <- cluster2.name.period[period] + cluster3.name <- cluster3.name.period[period] + cluster4.name <- cluster4.name.period[period] + } + + # assign to each cluster its regime: + if(ordering == FALSE){ + cluster1 <- 1; cluster2 <- 2; cluster3 <- 3; cluster4 <- 4 + } else { + cluster1 <- which(orden == cluster1.name) + cluster2 <- which(orden == cluster2.name) + cluster3 <- which(orden == cluster3.name) + cluster4 <- which(orden == cluster4.name) + } + + assign(paste0("map",cluster1), pslwr1mean) + assign(paste0("map",cluster2), pslwr2mean) + assign(paste0("map",cluster3), pslwr3mean) + assign(paste0("map",cluster4), pslwr4mean) + + assign(paste0("imp",cluster1), varPeriodAnom1mean) + assign(paste0("imp",cluster2), varPeriodAnom2mean) + assign(paste0("imp",cluster3), varPeriodAnom3mean) + assign(paste0("imp",cluster4), varPeriodAnom4mean) + + assign(paste0("sig",cluster1), pvalue1) + assign(paste0("sig",cluster2), pvalue2) + assign(paste0("sig",cluster3), pvalue3) + assign(paste0("sig",cluster4), pvalue4) + + assign(paste0("fre",cluster1), wr1y) + assign(paste0("fre",cluster2), wr2y) + assign(paste0("fre",cluster3), wr3y) + assign(paste0("fre",cluster4), wr4y) + + # Visualize the composition with the 3 graphs for all the 4 regimes: its pressure fields, its impact on var and its frequency series: + if(!as.pdf) png(filename=paste0(workdir,"/",fields.name,"_",var.name[var.num],"_",my.period[period],".png"),width=3000,height=3700) + + plot.new() + + # Sheet title: + par(fig=c(0, 1, 0.94, 0.98), new=TRUE) + mtext(paste0("Weather Regimes for ",my.period[period]," season (",year.start,"-",year.end,"). Source: ",fields.name), cex=6, font=2) + + # Centroid maps: + map.xpos <- 0 + map.width <- 0.46 + par(fig=c(map.xpos + 0.003, map.xpos + map.width - 0.003, 0.745, 0.925), new=TRUE) + PlotEquiMap2(rescale(map1,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map1), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, xlabel.dist=1.5, contours.lty="F1FF1F") + par(fig=c(map.xpos, map.xpos + map.width, 0.51, 0.69), new=TRUE) + PlotEquiMap2(rescale(map2,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map2), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F") + par(fig=c(map.xpos, map.xpos + map.width, 0.275, 0.455), new=TRUE) + PlotEquiMap2(rescale(map3,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map3), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F") + par(fig=c(map.xpos, map.xpos + map.width, 0.04, 0.22), new=TRUE) + PlotEquiMap2(rescale(map4,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map4), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F") + + # Title Centroid Maps: + map.title.xpos <- 0.76 + map.title.width <- 0.2 + par(fig=c(map.title.xpos, map.title.xpos + map.title.width, 0.728, 0.729), new=TRUE) + mtext("year", cex=3) + par(fig=c(map.title.xpos, map.title.xpos + map.title.width, 0.494, 0.495), new=TRUE) + mtext("year", cex=3) + par(fig=c(map.title.xpos, map.title.xpos + map.title.width, 0.260, 0.261), new=TRUE) + mtext("year", cex=3) + par(fig=c(map.title.xpos, map.title.xpos + map.title.width, 0.026, 0.027), new=TRUE) + mtext("year", cex=3) + + # Impact maps: + impact.xpos <- 0.47 + impact.width <- 0.26 + par(fig=c(impact.xpos, impact.xpos + impact.width, 0.745, 0.925), new=TRUE) + PlotEquiMap2(rescale(imp1[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=t(sig1[,EU] < 0.05), cex.lab=1.5) + par(fig=c(impact.xpos, impact.xpos + impact.width, 0.51, 0.69), new=TRUE) + PlotEquiMap2(rescale(imp2[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=t(sig2[,EU] < 0.05), cex.lab=1.5) + par(fig=c(impact.xpos, impact.xpos + impact.width, 0.275, 0.455), new=TRUE) + PlotEquiMap2(rescale(imp3[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=t(sig3[,EU] < 0.05), cex.lab=1.5) + par(fig=c(impact.xpos, impact.xpos + impact.width, 0.04, 0.22), new=TRUE) + PlotEquiMap2(rescale(imp4[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=t(sig4[,EU] < 0.05), cex.lab=1.5) + + # Frequency plots: + barplot.xpos <- 0.75 + barplot.width <- 0.25 + par(fig=c(barplot.xpos, barplot.xpos + barplot.width, 0.745, 0.915), new=TRUE) + barplot.freq(100*fre1, year.start, year.end, cex.y=2, cex.x=2, freq.max=60, ylab="", mgp=c(3,1.5,0)) + par(fig=c(barplot.xpos, barplot.xpos + barplot.width,0.510,0.680), new=TRUE) + barplot.freq(100*fre2, year.start, year.end, cex.y=2, cex.x=2, freq.max=60, ylab="", mgp=c(3,1.5,0)) + par(fig=c(barplot.xpos, barplot.xpos + barplot.width,0.275,0.445), new=TRUE) + barplot.freq(100*fre3, year.start, year.end, cex.y=2, cex.x=2, freq.max=60, ylab="", mgp=c(3,1.5,0)) + par(fig=c(barplot.xpos, barplot.xpos + barplot.width,0.040,0.210), new=TRUE) + barplot.freq(100*fre4, year.start, year.end, cex.y=2, cex.x=2, freq.max=60, ylab="", mgp=c(3,1.5,0)) + + # % on Frequency plots: + symbol.xpos <- 0.735 + symbol.width <- 0.01 + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.83, 0.84), new=TRUE) + mtext("%", cex=3.3) + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.60, 0.61), new=TRUE) + mtext("%", cex=3.3) + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.36, 0.37), new=TRUE) + mtext("%", cex=3.3) + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.13, 0.14), new=TRUE) + mtext("%", cex=3.3) + + # Title 1: + title1.xpos <- 0.02 + title1.width <- 0.4 + par(fig=c(title1.xpos, title1.xpos + title1.width, 0.930, 0.935), new=TRUE) + mtext(paste0(regime1.name,": ", psl.name, " Anomaly"), font=2, cex=4) + par(fig=c(title1.xpos, title1.xpos + title1.width, 0.695, 0.700), new=TRUE) + mtext(paste0(regime2.name,": ", psl.name, " Anomaly"), font=2, cex=4) + par(fig=c(title1.xpos, title1.xpos + title1.width, 0.460, 0.465), new=TRUE) + mtext(paste0(regime3.name,": ", psl.name, " Anomaly"), font=2, cex=4) + par(fig=c(title1.xpos, title1.xpos + title1.width, 0.225, 0.230), new=TRUE) + mtext(paste0(regime4.name,": ", psl.name, " Anomaly"), font=2, cex=4) + + # Title 2: + title2.xpos <- 0.42 + title2.width <- 0.35 + par(fig=c(title2.xpos, title2.xpos + title2.width, 0.930, 0.935), new=TRUE) + mtext(paste0(regime1.name, ": Impact on ", var.name.full[var.num]), font=2, cex=4) + par(fig=c(title2.xpos, title2.xpos + title2.width, 0.695, 0.700), new=TRUE) + mtext(paste0(regime2.name, ": Impact on ", var.name.full[var.num]), font=2, cex=4) + par(fig=c(title2.xpos, title2.xpos + title2.width, 0.460, 0.465), new=TRUE) + mtext(paste0(regime3.name, ": Impact on ", var.name.full[var.num]), font=2, cex=4) + par(fig=c(title2.xpos, title2.xpos + title2.width, 0.225, 0.230), new=TRUE) + mtext(paste0(regime4.name, ": Impact on ", var.name.full[var.num]), font=2, cex=4) + + # Title 3: + title3.xpos <- 0.75 + title3.width <-0.25 + par(fig=c(title3.xpos, title3.xpos + title3.width, 0.930, 0.935), new=TRUE) + mtext(paste0(regime1.name, ": Frequency (", round(100*mean(fre1),1), "%)"), font=2, cex=4) + par(fig=c(title3.xpos, title3.xpos + title3.width, 0.695, 0.700), new=TRUE) + mtext(paste0(regime2.name, ": Frequency (", round(100*mean(fre2),1), "%)"), font=2, cex=4) + par(fig=c(title3.xpos, title3.xpos + title3.width, 0.460, 0.465), new=TRUE) + mtext(paste0(regime3.name, ": Frequency (", round(100*mean(fre3),1), "%)"), font=2, cex=4) + par(fig=c(title3.xpos, title3.xpos + title3.width, 0.225, 0.230), new=TRUE) + mtext(paste0(regime4.name, ": Frequency (", round(100*mean(fre4),1), "%)"), font=2, cex=4) + + # Legend 1: + legend1.xpos <- 0.01 + legend1.width <- 0.44 + legend1.cex <- 1.8 + my.subset <- match(values.to.plot, my.brks) + + par(fig=c(legend1.xpos, legend1.xpos + legend1.width, 0.719, 0.744), new=TRUE) # syntax fig=c(xmin, xmax, ymin, ymax) + ColorBar3(my.brks, cols=my.cols, vert=FALSE, cex=legend1.cex, subset=my.subset) + if(psl=="g500") {mtext(side=4," m", cex=2.5)} else {mtext(side=4," hPa", cex=legend1.cex)} + par(fig=c(legend1.xpos, legend1.xpos + legend1.width, 0.485, 0.508), new=TRUE) + ColorBar3(my.brks, cols=my.cols, vert=FALSE, cex=legend1.cex, subset=my.subset) + if(psl=="g500") {mtext(side=4," m", cex=2.5)} else {mtext(side=4," hPa", cex=legend1.cex)} + par(fig=c(legend1.xpos, legend1.xpos + legend1.width, 0.250, 0.273), new=TRUE) + ColorBar3(my.brks, cols=my.cols, vert=FALSE, cex=legend1.cex, subset=my.subset) + if(psl=="g500") {mtext(side=4," m", cex=2.5)} else {mtext(side=4," hPa", cex=legend1.cex) + par(fig=c(legend1.xpos, legend1.xpos + legend1.width, 0.015, 0.038), new=TRUE) + ColorBar3(my.brks, cols=my.cols, vert=FALSE, cex=legend1.cex, subset=my.subset) + if(psl=="g500") {mtext(side=4," m", cex=2.5)} else {mtext(side=4," hPa", cex=legend1.cex)} + + # Legend 2: + legend2.xpos <- 0.48 + legend2.width <- 0.25 + legend2.cex <- 1.8 + my.subset2 <- match(values.to.plot2, my.brks.var) + + par(fig=c(legend2.xpos, legend2.xpos + legend2.width, 0.719 + 0.001, 0.744), new=TRUE) + ColorBar3(my.brks.var, cols=my.cols.var, vert=FALSE, cex=legend2.cex, subset=my.subset2) + mtext(side=4,paste0(" ",var.unit[var.num]), cex=legend2.cex) + par(fig=c(legend2.xpos, legend2.xpos + legend2.width, 0.485, 0.508), new=TRUE) + ColorBar3(my.brks.var, cols=my.cols.var, vert=FALSE, cex=legend2.cex, subset=my.subset2) + mtext(side=4,paste0(" ",var.unit[var.num]), cex=legend2.cex) + par(fig=c(legend2.xpos, legend2.xpos + legend2.width, 0.250, 0.273), new=TRUE) + ColorBar3(my.brks.var, cols=my.cols.var, vert=FALSE, cex=legend2.cex, subset=my.subset2)} + mtext(side=4,paste0(" ",var.unit[var.num]), cex=legend2.cex) + par(fig=c(legend2.xpos, legend2.xpos + legend2.width, 0.015, 0.038), new=TRUE) + ColorBar3(my.brks.var, cols=my.cols.var, vert=FALSE, cex=legend2.cex, subset=my.subset2) + mtext(side=4,paste0(" ",var.unit[var.num]), cex=legend2.cex) + + if(!as.pdf) dev.off() # for saving 4 png + +} # close for loop on 'period' + +if(as.pdf) dev.off() # for saving a single pdf with all seasons + + + + + + + + + + + diff --git a/old/weather_regimes_v22.R b/old/weather_regimes_v22.R new file mode 100644 index 0000000000000000000000000000000000000000..0b22b1c1b9d29596c3d93f5820e8af8dfa43285b --- /dev/null +++ b/old/weather_regimes_v22.R @@ -0,0 +1,835 @@ +library(s2dverification) # for the function Load() +library(abind) +library(Kendall) +source('/home/Earth/ncortesi/Downloads/scripts/Rfunctions.R') # for the calendar functions +workdir <- "/scratch/Earth/ncortesi/RESILIENCE/Regimes" # working dir with the input files with the WT classifications for each MSLP grid point + +# available reanalysis for the geopotential (g500) or the geopotential height (z500 = g500/9.81) and for var data: +ERAint <- list(path = '/esnas/reconstructions/ecmwf/eraint/$STORE_FREQ$_mean/$VAR_NAME$_f6h/$VAR_NAME$_$YEAR$$MONTH$.nc') +ERAint.name <- "ERA-Interim" + +JRA55 <- list(path = '/esnas/reconstructions/jma/jra-55/$STORE_FREQ$_mean/$VAR_NAME$_f6h/$VAR_NAME$_$YEAR$$MONTH$.nc') +JRA55.name <- "JRA-55" + +rean <- ERAint #JRA55 #ERAint # choose one of the two above reanalysis from where to load the input psl data + +# available forecast systems for the psl and var data: +#ECMWF_S4 <- list(path = paste0('/esnas/exp/ECMWF/seasonal/0001/s004/m001/$STORE_FREQ$_mean/$VAR_NAME$_f6h/$VAR_NAME$_$START_DATE$.nc')) +ECMWF_S4 <- list(path = paste0('/esnas/exp/ECMWF/seasonal/0001/s004/m001/$STORE_FREQ$_mean/psl_f6h/psl_$START_DATE$.nc')) +ECMWF_S4.name <- "ECMWF-S4" + +ECMWF_monthly <- list(path = paste0('/esnas/exp/ECMWF/monthly/ensforhc/6hourly/$VAR_NAME$/2014$MONTH$$DAY$00/$VAR_NAME$_$START_DATE$00.nc')) +ECMWF_monthly.name <- "ECMWF-Monthly" + +forecast <- ECMWF_S4 + +fields <- forecast #rean #forecast # put 'rean' to load pressure fields and var from reanalisis, put 'forecast' to load them from forecast systems + +psl <- "psl" #"g500" # pressure variable to use from the chosen reanalysis +psl.name <- "MSLP" # "Z500" # the name to show in the maps + +year.start <- 1987 #1981 #1994 #1979 #1981 +year.end <- 1990 #2013 #2010 + +#leadtime <- 1:7 #5:11 #1 # if fields=forecast, set the forecast time (in days) you want to compute the weather regimes. +#startdate.shift <- 1 # if leadtime=5:11, it's better to shift all startdates of the season 1 week in the past, to fit the exact season of obs.data + # if leadtime=12:18, it's better to shift all startdates of the season 2 weeks in the past, and so on. +forecast.year <- year.end+1 # if fields=forecast, set the forecast year (it is usually the year after year.end) +mes=1 # if fields=forecast, set the month of the first startdate of the forecast year +day=2 # if fields=forecast, set the day of the first startdate of the forecast year + +lat.weighting=FALSE # set it to true to weight psl data on latitude before applying the cluster analysis +sequences=FALSE # set it to TRUE if you want to select only days beloning to sequences of at least 5 consecutive days belonging to the same regime. + +PCA <- FALSE # se to TRUE if you want to apply the PCA before the k-means cluster analysis, FALSE otherwise +variance.explained <- 0.8 # the user can set here the minimum % of variance explained by the PCs (typically 0.8 which is equal to 80%) + +# Coordinates of the box enclosing the study region (the Northern Atlantic in this case): +lat.max <- 81 #81 #80 #70 +lat.min <- 27 #30 #20 #30 +lon.max <- 45 #36 #30 #40 +lon.min <- 274.5 #274.5 #270 #280 # put a positive number here because the geopotential has only positive values of longitud! + +############################################################################################################################# +#ECMWF_monthly <- list(path = paste0('/esnas/exp/ECMWF/monthly/ensforhc/6hourly/psl/2014010200/1994_010200.nc')) +#ECMWF_monthly <- list(path = paste0('/esnas/exp/ECMWF/monthly/ensforhc/6hourly/$VAR_NAME$/2014$MONTH$$DAY$00/$VAR_NAME$_$START_DATE$00.nc')) +rean.name <- unname(ifelse(unlist(rean) == unlist(ERAint), ERAint.name, JRA55.name)) +#forecast.name <- ECMWF_monthly.name +forecast.name <- unname(ifelse(unlist(forecast) == unlist(ECMWF_S4), ECMWF_S4.name, ECMWF_monthly.name)) +fields.name <- unname(ifelse(unlist(fields) == unlist(rean), rean.name, forecast.name)) +n.years <- year.end - year.start + 1 + +var.data <- fields # dataset from which to load the input var data (reanalysis or forecasts) +var.data.name <- fields.name #ECMWF_monthly.name # ERAint.name + +if(lat.min >= lat.max) stop("lat.min cannot be greater than lat.max") + +days.period <- n.days.period <- period.length <- list() +for (p in 1:17){ + days.period[[p]] <- NA + for(y in year.start:year.end) days.period[[p]] <- c(days.period[[p]], n.days.in.a.future.year(year.start, y) + pos.period(y,p)) + days.period[[p]] <- days.period[[p]][-1] # remove the NA at the begin that was introduced only to be able to execute the above command + # number of days belonging to that period from year.start to year.end: + n.days.period[[p]] <- length(days.period[[p]]) + # Number of days belonging to that period in a single year: + period.length[[p]] <- n.days.in.a.period(p,1999) #+ ifelse(period==13 | period==2, 1, 0) # using year 1999 introduce a small error in winter season length because of bisestile years +} + +# load only 1 day of pressure data to detect the minimum and maximum lat and lon values which identify only the North Atlantic area: +if(unlist(fields) == unlist(rean)) domain <- Load(var = psl, exp = NULL, obs = list(fields), '19990101', storefreq = 'daily', leadtimemax = 1, output = 'lonlat', nprocs=1) +if(unlist(fields) == unlist(ECMWF_S4)) domain <- Load(var = "psl", exp = list(fields), sdates=paste0(year.start,'0101'), storefreq = 'daily', dimnames=list(member="number"), leadtimemax=1, nmember=1, output = 'lonlat', nprocs=1) +if(unlist(fields) == unlist(ECMWF_monthly)) domain <- Load(var = psl, exp = NULL, obs = list(fields), paste0(year.start,'0102'), storefreq = 'daily', leadtimemax = 1, output = 'lonlat', nprocs=1) + +n.lat <- length(domain$lat) +n.lon <- length(domain$lon) + +pos.lat.max <- which(lat.max - round(domain$lat,4) >= 0)[1] # select the first latitude of the domain below the maximum latitude +pos.lat.min <- tail(which(round(domain$lat,4) - lat.min >= 0),1) # select the last latitude of the domain over the minumum latitude +pos.lon.max <- tail(which(lon.max - round(domain$lon,4) >= 0),1) +pos.lon.min <- which(round(domain$lon,4) - lon.min >= 0)[1] + +pos.lat <- if(pos.lat.min < pos.lat.max) {pos.lat.min:pos.lat.max} else {pos.lat.max:pos.lat.min} +pos.lon <- c(1:pos.lon.max,pos.lon.min:length(domain$lon)) # beware that it only consider the case where the study area cross the meridian of Greenwhich! +n.pos.lat <- length(pos.lat) +n.pos.lon <- length(pos.lon) + +lat <- domain$lat[pos.lat] # lat values of chosen area only (it is smaller than the whole spatial domain loaded by the data) +lon <- domain$lon[pos.lon] # lon values of chosen area only + +lat.min.area <- domain$lat[pos.lat.min] # min and max lat/lon of the chosen area only (same as lat.max, but its values correspond to actual values in the lat vector) +lat.max.area <- domain$lat[pos.lat.max] +lon.min.area <- domain$lon[pos.lon.min] +lon.max.area <- domain$lon[pos.lon.max] + +#rm(domain) + +# Load psl data: +if(unlist(fields) == unlist(rean)){ # Load daily psl data in the reanalysis case: + psleuFull <-array(NA,c(n.days.in.a.yearly.period(year.start,year.end), n.pos.lat, n.pos.lon)) + + for (y in year.start:year.end){ + var <- Load(var = psl, exp = NULL, obs = list(fields), paste0(y,'0101'), storefreq = 'daily', leadtimemax = n.days.in.a.year(y), output = 'lonlat', + latmin = lat.min, latmax = lat.max, lonmin = lon.min, lonmax = lon.max) + psleuFull[seq.days.in.a.future.year(year.start, y),,] <- var$obs + rm(var) + gc() + } +} + +if(unlist(fields) == unlist(ECMWF_S4)) # in this case, we can load only the data for 1 month at time (since each month needs ~3 GB of memory) + start.month <- 1 + lead.month <- 1 + + n.members <- 15 + + if(start.month <= 9) start.month.char <- paste0("0",as.character(start.month)) + chosen.month <- start.month + lead.month - 1 # find which month we want to load data + if(chosen.month > 12) chosen.month <- chosen.month - 12 + leadtime.min <- 1 + n.days.in.a.monthly.period(start.month, chosen.month, year.start) - n.days.in.a.month(chosen.month, year.start) + leadtime.max <- leadtime.min + n.days.in.a.month(chosen.month, year.start) - 1 + n.leadtimes <- leadtime.max - leadtime.min + 1 + #print(paste(leadtime.min, leadtime.max)) + + psleuFull <- Load(var = "psl", exp = list(fields), obs = NULL, sdates=paste0(year.start:year.end, start.month.char,'01'), storefreq = 'daily', dimnames=list(member="number"), nmember=n.members, leadtimemin=leadtime.min, leadtimemax=leadtime.max, output = 'lonlat', latmin = lat.min, latmax = lat.max, lonmin = lon.min, lonmax = lon.max) + + # immediatly convert psl in daily anomalies to remove the drift!!! + pslPeriodClim <- apply(psleuFull$mod, c(1,4,5,6), mean, na.rm=T) + pslPeriodClim2 <- InsertDim(InsertDim(pslPeriodClim, 2, n.years), 2, n.members) + rm(pslPeriodClim) + gc() + + psleuFull <- psleuFull$mod - pslPeriodClim2 + rm(pslPeriodClim2) + gc() +} + +if(unlist(fields) == unlist(ECMWF_monthly)){ + # Load 6-hourly psl data in the forecast case: + psleuFull <-array(NA, c(n.sdates*n.years, n.leadtimes, n.pos.lat, n.pos.lon)) # daily order: 19940102 19950102 ... 20130102 19940109 19950109 ... 20130109 ... + n.leadtimes <- length(leadtime) + sdates <- weekly.seq(forecast.year,mes,day) + n.sdates <- length(sdates) + + for (startdate in 1:n.sdates){ + for(lday in leadtime){ + var <- Load(var = psl, exp = NULL, obs = list(fields), paste0(year.start:year.end, substr(sdates[startdate],5,6), substr(sdates[startdate],7,8) ), storefreq = 'daily', leadtimemin = lday*4-3, leadtimemax = lday*4, output = 'lonlat', latmin = lat.min, latmax = lat.max, lonmin = lon.min, lonmax = lon.max) + + mean.lday <- apply(var$obs, c(3,5,6), mean, na.rm=T) + rm(var) + gc() + + psleuFull[(1+(startdate-1)*n.years):(startdate*n.years), lday-leadtime[1]+1,,] <- mean.lday + rm(mean.lday) + gc() + } + } +} + + +if(psl=="g500") psleuFull <- psleuFull/9.81 # convert geopotential to geopotential height (in m) +if(psl=="psl") psleuFull <- psleuFull/100 # convert MSLP in Pascal to MSLP in hPa +gc() + +save.image(file=paste0(workdir,"/weather_regimes_",fields.name,"_",year.start,"-",year.end,".RData")) +load(file=paste0(workdir,"/weather_regimes_",fields.name,"_",year.start,"-",year.end,".RData")) + +# compute the cluster analysis: +my.PCA <- list() +my.cluster <- list() +tot.variance <- list() +n.pcs <- my.cluster <- c() + +WR.period <- 1 #1:12 #13:16 # 1-12: Jan-Dec; 13-16: Winter-Spring-Summer-Autumn + +for(p in WR.period){ + if(unlist(fields) == unlist(rean)){ pslPeriod <- psleuFull[days.period[[p]],,] # select only days in the chosen period (i.e: winter) + + # in this case of S4 we don't need to select anything because we already loaded only 1 month of data + + if(unlist(fields) == unlist(ECMWF_monthly)){ + my.startdates.period <- months.period(forecast.year,mes,day,p) # get which startdates belong to the chosen period + + days.period <- list() # get which days inside psleuFull belong to the chosen period + for(startdate in my.startdates.period){ + days.period[[p]] <- c(days.period[[p]], (1+(startdate-1)*n.years):(startdate*n.years)) + } + } + + # weight the pressure fields based on latitude: + if(lat.weighting){ + lat.weighted <- cos(lat*pi/180)^0.5 + lat.weighted.array <- InsertDim(InsertDim(lat.weighted,2,n.pos.lon), 1, n.days.period[[p]]) + pslmat <- pslPeriod * lat.weighted.array + rm(lat.weighted.array) + gc() + } + + if(unlist(fields) == unlist(rean)){ + pslmat <- pslPeriod + dim(pslmat) <- c(n.days.period[[p]], n.pos.lat*n.pos.lon) # convert array in a matrix! + rm(pslPeriod, pslPeriod.weighted) + } + + if(unlist(fields) == unlist(ECMWF_S4)){ + #pslmat <- psleuFull + dim(psleuFull) <- c(1, n.leadtimes*n.years, n.members*n.pos.lat*n.pos.lon) # convert array in a matrix! Beware that it always put the years (33) before the leadtimes! + #dim(pslmat) <- c(1, 3*33, 15*n.pos.lat*n.pos.lon) # convert array in a matrix! Beware that it always put the years (33) before the leadtimes! + gc() + } + + # # if you are using the monthly forecast system, in winter you must remove the 2nd startdate (20140109) because its data is bad, as you can see with: plot(pslmat[,1:2]) + #if(unlist(fields) == unlist(forecast) && period == 13) pslmat[21:40,] <- NA + + if(PCA){ # compute the PCs: + my.seq <- seq(1, n.pos.lat*n.pos.lon, 9) # select only 1 point of 9 + pslcut <- pslmat[,my.seq] + + my.PCA[[p]] <- princomp(pslcut,cor=FALSE) + tot.variance[[p]] <- head(cumsum(my.PCA[[p]]$sdev^2/sum(my.PCA[[p]]$sdev^2)),50) # check how many PCAs to keep basing on the sum of their expl. variance + n.pcs[p] <- head(as.numeric(which(tot.variance[[p]] > variance.explained)),1) # select only the pcs that explains at least 80% of variance + my.cluster[[p]] <- kmeans(my.PCA[[p]]$scores[,1:n.pcs[p]], centers=4, iter.max=100, nstart=30) # 4 is the number of clusters + rm(pslcut) + } else { # 4 is the number of clusters: + if(unlist(fields) == unlist(rean)) my.cluster[[p]] <- kmeans(pslmat[which(!is.na(pslmat[,1])),], centers=4, iter.max=100, nstart=30) + if(unlist(fields) == unlist(ECMWF_S4)) my.cluster[[p]] <- kmeans(psleuFull[1,,], centers=4, iter.max=100, nstart=30) + } + + rm(pslmat) + gc() +} + +var.num <- 1 # Choose a variable. 1: sfcWind 2: tas + +var.name <- c("sfcWind","tas") # name of the 'predictand' variable of the chosen reanalysis +var.name.full <- c("Wind Speed","Temperature") # full name of the 'predictand' variable to put in the title of the graphs +var.unit <- c("m/s", "ºC") # unit of measure of var (for drawing color scales) + +# Load var data: +if(unlist(fields) == unlist(rean)){ # Load daily var data in the reanalysis case: + vareuFull <-array(NA,c(n.days.in.a.yearly.period(year.start,year.end), n.pos.lat, n.pos.lon)) + + for (y in year.start:year.end){ + var <- Load(var = var.name[var.num], exp = NULL, obs = list(var.data), paste0(y,'0101'), storefreq = 'daily', leadtimemax = n.days.in.a.year(y), output = 'lonlat', + latmin = lat.min, latmax = lat.max, lonmin = lon.min, lonmax = lon.max) + vareuFull[seq.days.in.a.future.year(year.start, y),,] <- var$obs + rm(var) + gc() + } + +# in the S4 case, we can load only the data for 1 month at time (since each month needs ~3 GB of memory) +# but at present we ONLY use psl data!!! +#if(unlist(fields) == unlist(ECMWF_S4)) { +# vareuFull <- Load(var = var.name[var.num], exp = list(fields), obs = NULL, sdates=paste0(year.start:year.end, start.month.char,'01'), storefreq = 'daily', dimnames=list(member="number"), nmember=n.members, leadtimemin=leadtime.min, leadtimemax=leadtime.max, output = 'lonlat', latmin = lat.min, latmax = lat.max, lonmin = lon.min, lonmax = lon.max) +#} + +if(unlist(fields) == unlist(ECMWF_monthly)){ # Load 6-hourly var data in the monthly forecast case: + sdates <- weekly.seq(forecast.year,mes,day) + vareuFull <-array(NA,c(length(sdates)*(year.end-year.start+1), n.pos.lat, n.pos.lon)) + + for (startdate in 1:length(sdates)){ + var <- Load(var = var.name[var.num], exp = NULL, obs = list(var.data), paste0(year.start:year.end, substr(sdates[startdate],5,6), substr(sdates[startdate],7,8) ), storefreq = 'daily', leadtimemin=leadtime*4-3, leadtimemax=leadtime*4, output = 'lonlat', latmin = lat.min, latmax = lat.max, lonmin = lon.min, lonmax = lon.max) + temp <- apply(var$obs, c(1,3,5,6), mean, na.rm=T) + vareuFull[(1+(startdate-1)*(year.end-year.start+1)):(startdate*(year.end-year.start+1)),,] <- temp + rm(temp,var) + gc() + } + +} + +#my.grid<-paste0('r',n.lon,'x',n.lat) +#coord <- Load(var = 'sfcWind', exp = list(ECMWF_monthly), obs = NULL, sdates=paste0(2013,'0102'), leadtimemin = 1, leadtimemax=1, output = 'lonlat', nprocs=1, grid=my.grid, method='bilinear') + +save.image(file=paste0(workdir,"/weather_regimes_",fields.name,"_",var.name[var.num],"_",year.start,"-",year.end,".RData")) +load(file=paste0(workdir,"/weather_regimes_",fields.name,"_",var.name[var.num],"_",year.start,"-",year.end,".RData")) + +## # if you want to study WRs at monhly level but using their seasonal time series, you have to copy the WRs time series to the months from 1 to 12: +## if(WR.period <- 1:12){ +## my.cluster[[1]] <- my.cluster[[2]] <- my.cluster[[3]] <- my.cluster[[4]] <- my.cluster[[5]] <- my.cluster[[6]] <- list() +## my.cluster[[7]] <- my.cluster[[8]] <- my.cluster[[9]] <- my.cluster[[10]] <- my.cluster[[11]] <- my.cluster[[12]] <- list() + +## ss <- match(days.period[[13]],days.period[[1]]); ss[which(!is.na(ss))] <- 1; qq <- ss*my.cluster[[13]]$cluster; my.cluster[[1]]$cluster <- qq[!is.na(qq)] +## ss <- match(days.period[[13]],days.period[[2]]); ss[which(!is.na(ss))] <- 1; qq <- ss*my.cluster[[13]]$cluster; my.cluster[[2]]$cluster <- qq[!is.na(qq)] +## ss <- match(days.period[[13]],days.period[[12]]); ss[which(!is.na(ss))] <- 1; qq <- ss*my.cluster[[13]]$cluster; my.cluster[[12]]$cluster <- qq[!is.na(qq)] +## ss <- match(days.period[[14]],days.period[[3]]); ss[which(!is.na(ss))] <- 1; qq <- ss*my.cluster[[14]]$cluster; my.cluster[[3]]$cluster <- qq[!is.na(qq)] +## ss <- match(days.period[[14]],days.period[[4]]); ss[which(!is.na(ss))] <- 1; qq <- ss*my.cluster[[14]]$cluster; my.cluster[[4]]$cluster <- qq[!is.na(qq)] +## ss <- match(days.period[[14]],days.period[[5]]); ss[which(!is.na(ss))] <- 1; qq <- ss*my.cluster[[14]]$cluster; my.cluster[[5]]$cluster <- qq[!is.na(qq)] +## ss <- match(days.period[[15]],days.period[[6]]); ss[which(!is.na(ss))] <- 1; qq <- ss*my.cluster[[15]]$cluster; my.cluster[[6]]$cluster <- qq[!is.na(qq)] +## ss <- match(days.period[[15]],days.period[[7]]); ss[which(!is.na(ss))] <- 1; qq <- ss*my.cluster[[15]]$cluster; my.cluster[[7]]$cluster <- qq[!is.na(qq)] +## ss <- match(days.period[[15]],days.period[[8]]); ss[which(!is.na(ss))] <- 1; qq <- ss*my.cluster[[15]]$cluster; my.cluster[[8]]$cluster <- qq[!is.na(qq)] +## ss <- match(days.period[[16]],days.period[[9]]); ss[which(!is.na(ss))] <- 1; qq <- ss*my.cluster[[16]]$cluster; my.cluster[[9]]$cluster <- qq[!is.na(qq)] +## ss <- match(days.period[[16]],days.period[[10]]); ss[which(!is.na(ss))] <- 1; qq <- ss*my.cluster[[16]]$cluster; my.cluster[[10]]$cluster <- qq[!is.na(qq)] +## ss <- match(days.period[[16]],days.period[[11]]); ss[which(!is.na(ss))] <- 1; qq <- ss*my.cluster[[16]]$cluster; my.cluster[[11]]$cluster <- qq[!is.na(qq)] +## } + +for(p in WR.period){ # 1-12: Jan-Dec; 13-16: Winter-Spring-Summer-Autumn; 17: Year + + if(unlist(fields) == unlist(ECMWF_monthly)){ + my.startdates.period <- months.period(forecast.year,mes,day,p) + + #if(p == 13) my.startdates.period <- my.startdates.period[-2] # remove the second startdate because it is broken + + days.period <- period.length <- list() + for(startdate in my.startdates.period){ + days.period[[p]] <- c(days.period[[p]], (1+(startdate-1)*(year.end-year.start+1)):(startdate*(year.end-year.start+1))) + } + period.length[[p]] <- length(my.startdates.period) # number of Thusdays in the period + } + + cluster.sequence <- my.cluster[[p]]$cluster + + if(sequences){ # it works only for rean data!!! + # for each of the 4 clusters, remove the days not belonging to sequences of at least 5 days: + # warning: first 4 days and last 4 days are not take into account y the algorithm and are treated as not belonging to any sequence (sequ=FALSE) + wrdiff <- diff(my.cluster[[p]]$cluster) + + sequ <- rep(FALSE, n.days.period[[p]]) + for(i in 5:(n.days.period[[p]]-5)){ + sequ[i] <- TRUE + if(wrdiff[i] != 0 & wrdiff[i+1] != 0) sequ[i] = FALSE + if(wrdiff[i] != 0 & wrdiff[i+2] != 0) sequ[i] = FALSE + if(wrdiff[i] != 0 & wrdiff[i+3] != 0) sequ[i] = FALSE + if(wrdiff[i] != 0 & wrdiff[i+4] != 0) sequ[i] = FALSE + if(wrdiff[i-1] != 0 & wrdiff[i] != 0) sequ[i] = FALSE + if(wrdiff[i-1] != 0 & wrdiff[i+1] != 0) sequ[i] = FALSE + if(wrdiff[i-1] != 0 & wrdiff[i+2] != 0) sequ[i] = FALSE + if(wrdiff[i-1] != 0 & wrdiff[i+3] != 0) sequ[i] = FALSE + if(wrdiff[i-2] != 0 & wrdiff[i] != 0) sequ[i] = FALSE + if(wrdiff[i-2] != 0 & wrdiff[i+1] != 0) sequ[i] = FALSE + if(wrdiff[i-2] != 0 & wrdiff[i+2] != 0) sequ[i] = FALSE + if(wrdiff[i-3] != 0 & wrdiff[i] != 0) sequ[i] = FALSE + if(wrdiff[i-3] != 0 & wrdiff[i+1] != 0) sequ[i] = FALSE + if(wrdiff[i-4] != 0 & wrdiff[i] != 0) sequ[i] = FALSE + } # close for loop on i + + # sequence of daily cluster numbers without the days that doesn't belong to sequences of 5 or more days: + cluster.sequence <- my.cluster[[p]]$cluster * sequ + rm(wrdiff, sequ) + } + + gc() + + # Replace the days belonging to a regime with the days belonging to a sequence of at least 5 days of that regime: + wr1 <- which(cluster.sequence == 1) + wr2 <- which(cluster.sequence == 2) + wr3 <- which(cluster.sequence == 3) + wr4 <- which(cluster.sequence == 4) + + # yearly frequency of each regime: + wr1y <- wr2y <- wr3y <-wr4y <- c() + for(y in year.start:year.end){ + + if(unlist(fields) == unlist(rean)){ + wr1y[y-year.start+1] <- length(which(cluster.sequence[(y-year.start)*period.length[[p]]+(1:period.length[[p]])] == 1)) + wr2y[y-year.start+1] <- length(which(cluster.sequence[(y-year.start)*period.length[[p]]+(1:period.length[[p]])] == 2)) + wr3y[y-year.start+1] <- length(which(cluster.sequence[(y-year.start)*period.length[[p]]+(1:period.length[[p]])] == 3)) + wr4y[y-year.start+1] <- length(which(cluster.sequence[(y-year.start)*period.length[[p]]+(1:period.length[[p]])] == 4)) + } else { # for both S4 and the monthly system, the time order is always: year1day1, year2day1, ... , yearNday1, year1day2, year2day2, ..., yearNday2, etc + wr1y[y-year.start+1] <- length(which(cluster.sequence[seq((y-year.start+1),length(cluster.sequence), n.years)] == 1)) + wr2y[y-year.start+1] <- length(which(cluster.sequence[seq((y-year.start+1),length(cluster.sequence), n.years)] == 2)) + wr3y[y-year.start+1] <- length(which(cluster.sequence[seq((y-year.start+1),length(cluster.sequence), n.years)] == 3)) + wr4y[y-year.start+1] <- length(which(cluster.sequence[seq((y-year.start+1),length(cluster.sequence), n.years)] == 4)) + } + + } + + rm(cluster.sequence) + gc() + + # convert to frequencies in %: + wr1y <- wr1y/period.length[[p]] + wr2y <- wr2y/period.length[[p]] + wr3y <- wr3y/period.length[[p]] + wr4y <- wr4y/period.length[[p]] + + if(unlist(fields) == unlist(rean)){ + pslPeriod <- psleuFull[days.period[[p]],,] + pslPeriodClim <- apply(pslPeriod, c(2,3), mean, na.rm=T) + pslPeriodClim2 <- InsertDim(pslPeriodClim, 1, n.days.period[[p]]) + pslPeriodAnom <- pslPeriod - pslPeriodClim2 + + rm(pslPeriod, pslPeriodClim, pslPeriodClim2) + gc() + + pslwr1 <- pslPeriodAnom[wr1,,] + pslwr2 <- pslPeriodAnom[wr2,,] + pslwr3 <- pslPeriodAnom[wr3,,] + pslwr4 <- pslPeriodAnom[wr4,,] + rm(pslPeriodAnom) + gc() + } + + if(unlist(fields) == unlist(ECMWF_S4)){ + pslwr1 <- psleuFull[wr1,,] + pslwr2 <- pslPeriodAnom[wr2,,] + pslwr3 <- pslPeriodAnom[wr3,,] + pslwr4 <- pslPeriodAnom[wr4,,] + rm(pslPeriodAnom) + gc() + } + + + + pslwr1mean <- apply(pslwr1,c(2,3),mean,na.rm=T) + pslwr2mean <- apply(pslwr2,c(2,3),mean,na.rm=T) + pslwr3mean <- apply(pslwr3,c(2,3),mean,na.rm=T) + pslwr4mean <- apply(pslwr4,c(2,3),mean,na.rm=T) + + rm(pslwr1,pslwr2,pslwr3,pslwr4) + + # similar selection, but for var instead of psl: + + varPeriod <- vareuFull[days.period[[p]],,] # select only var data during the chosen period + + varPeriodClim <- apply(varPeriod, c(2,3), mean, na.rm=T) + varPeriodClim2 <- InsertDim(varPeriodClim, 1, n.days.period[[p]]) + varPeriodAnom <- varPeriod - varPeriodClim2 + rm(varPeriod, varPeriodClim, varPeriodClim2) + gc() + + #PlotEquiMap2(varPeriodClim[,EU], lon[EU], lat, filled.continents = FALSE, cols=my.cols.var, intxlon=10, intylat=10, cex.lab=1.5) # plot the climatology of the period + + varPeriodAnom1 <- varPeriodAnom[wr1,,] + varPeriodAnom2 <- varPeriodAnom[wr2,,] + varPeriodAnom3 <- varPeriodAnom[wr3,,] + varPeriodAnom4 <- varPeriodAnom[wr4,,] + + varPeriodAnom1mean <- apply(varPeriodAnom1,c(2,3),mean,na.rm=T) + varPeriodAnom2mean <- apply(varPeriodAnom2,c(2,3),mean,na.rm=T) + varPeriodAnom3mean <- apply(varPeriodAnom3,c(2,3),mean,na.rm=T) + varPeriodAnom4mean <- apply(varPeriodAnom4,c(2,3),mean,na.rm=T) + + varPeriodAnomBoth1 <- abind(varPeriodAnom, varPeriodAnom1, along = 1) + pvalue1 <- apply(varPeriodAnomBoth1, c(2,3), function(x) t.test(x[1:n.days.period[[p]]],x[(n.days.period[[p]]+1):length(x)])$p.value) + rm(varPeriodAnomBoth1, varPeriodAnom1) + gc() + + varPeriodAnomBoth2 <- abind(varPeriodAnom, varPeriodAnom2, along = 1) + pvalue2 <- apply(varPeriodAnomBoth2, c(2,3), function(x) t.test(x[1:n.days.period[[p]]],x[(n.days.period[[p]]+1):length(x)])$p.value) + rm(varPeriodAnomBoth2, varPeriodAnom2) + gc() + + varPeriodAnomBoth3 <- abind(varPeriodAnom, varPeriodAnom3, along = 1) + pvalue3 <- apply(varPeriodAnomBoth3, c(2,3), function(x) t.test(x[1:n.days.period[[p]]],x[(n.days.period[[p]]+1):length(x)])$p.value) + rm(varPeriodAnomBoth3, varPeriodAnom3) + gc() + + varPeriodAnomBoth4 <- abind(varPeriodAnom, varPeriodAnom4, along = 1) + pvalue4 <- apply(varPeriodAnomBoth4, c(2,3), function(x) t.test(x[1:n.days.period[[p]]],x[(n.days.period[[p]]+1):length(x)])$p.value) + rm(varPeriodAnomBoth4, varPeriodAnom4) + + rm(varPeriodAnom) + gc() + + # save all the data necessary to redraw the graphs when we know the right regime: + save(workdir,rean.name,fields.name,var.num,var.name,var.name.full,var.unit,year.start,year.end,p,WR.period,lon,lat,pslwr1mean,pslwr2mean,pslwr3mean,pslwr4mean, varPeriodAnom1mean, varPeriodAnom2mean, varPeriodAnom3mean,varPeriodAnom4mean,wr1y,wr2y,wr3y,wr4y,pvalue1,pvalue2,pvalue3,pvalue4,file=paste0(workdir,"/",fields.name,"_",var.name[var.num],"_",my.period[p],"_mapdata.RData")) + + rm(pvalue1,pvalue2,pvalue3,pvalue4) + rm(pslwr1mean,pslwr2mean,pslwr3mean,pslwr4mean) + rm(varPeriodAnom1mean,varPeriodAnom2mean,varPeriodAnom3mean,varPeriodAnom4mean) + gc() + +} # close the for loop on 'p' + + +# run it twice: once, with ordering <- FALSE to look only at the cartography and find visually the right regime names of the four clusters; +# and the second time, to save the ordered cartography: +ordering <- TRUE # If TRUE, order the clusters following the regimes listed in the 'orden' vector (after you manually insert the names). +save.names <- FALSE # if TRUE, also save the cluster names (i.e: the association between clusters and weather regimes); if FALSE, the cluster names are loaded from the file +as.pdf <- FALSE # FALSE: save results as one .png file for each season, TRUE: save only 1 .pdf file with all seasons + +# position of long values of Europe only (without the Atlantic Sea and America): +#if(fields.name == "ERA-Interim") EU <- c(1:which(lon >= lon.max)[1], (length(lon)-30):length(lon)) # restrict area to continental europe only +EU <- c(1:which(lon >= lon.max)[1], (which(lon >= 337)[1]:length(lon))) # restrict area to continental europe only + +# choose an order to give to the cartography, from top to bottom: +orden <- c("NAO+","NAO-","Blocking","Atl.Ridge") + +regime1.name <- orden[1] +regime2.name <- orden[2] +regime3.name <- orden[3] +regime4.name <- orden[4] + +if(save.names){cluster1.name.period <- cluster2.name.period <- cluster3.name.period <- cluster4.name.period <- c()} + +# breaks and colors of the geopotential fields: +if(psl == "g500"){ + #my.brks <- c(-300,-199:200,300) # % Mean anomaly of a WR + my.brks <- c(-300,seq(-200,200,10),300) # % Mean anomaly of a WR + my.brks2 <- c(-300,seq(-200,200,10),300) # % Mean anomaly of a WR for the contour lines + #my.cols <- colorRampPalette((brewer.pal(9,"PuBu")))(length(my.brks)-1) + values.to.plot <- c(-200, -100, 0, 100, 200) # values we want to show in the labels +} + +if(psl == "psl"){ + my.brks <- c(seq(-19,-1,2),0,seq(1,19,2)) # % Mean anomaly of a WR + my.brks2 <- my.brks #c(seq(-20,20,2)) # % Mean anomaly of a WR for the contour lines + values.to.plot <- my.brks #c(-17,-15,-13,-11,-9,-7,-5,-3,-1,0,1,3,5,7,9,11,13,15,17) +} + +my.cols <- colorRampPalette(rev(brewer.pal(11,"RdBu")))(length(my.brks)-1) # blue--white--red colors +my.cols[floor(length(my.cols)/2)] <- my.cols[floor(length(my.cols)/2)+1] <- "white" + +# breaks and colors of the impact maps (valid both for Wind speed anomalies and Temperature anomalies): +#my.brks.var <- c(-20,seq(-10,-3,1),seq(-2.9,2.9,0.1),seq(3,10,1),20) # % Mean anomaly of a WR +#my.brks.var <- c(-20,-2,-1.5,-1,-0.5,-0.2,0.2,0.5,1,1.5,2,20) # % Mean anomaly of a WR +#my.brks.var <- c(-20,seq(-3,3,0.5),20) # % Mean anomaly of a WR +if(var.name[var.num] == "sfcWind") { + my.brks.var <- seq(-3,3,0.5) + values.to.plot2 <- c(-3,-2,-1,0,1,2,3) +} +if(var.name[var.num] == "tas") { + my.brks.var <- seq(-5,5,1) + values.to.plot2 <- my.brks.var #c(-5,-3,-1,0,1,3,5) +} + +#my.cols <- colorRampPalette(as.character(read.csv("/home/Earth/vtorralb/rgbhex.csv",header=F)[,1]))(length(my.brks)-1) # blue--yellow-red colors +my.cols.var <- colorRampPalette(rev(brewer.pal(11,"RdBu")))(length(my.brks.var)-1) # blue--white--red colors +#my.cols.var[floor(length(my.cols.var)/2)] <- my.cols.var[floor(length(my.cols.var)/2)+1] <- "white" + +if(as.pdf)(pdf(file=paste0(workdir,"/",fields.name,"_",var.name[var.num],".pdf"),width=40, height=60)) + +for(p in WR.period){ + load(file=paste0(workdir,"/",fields.name,"_",var.name[var.num],"_",my.period[p],"_mapdata.RData")) + + if(save.names){ + if(p == 1){ # January + cluster2.name="NAO+" + cluster1.name="NAO-" + cluster4.name="Blocking" + cluster3.name="Atl.Ridge" + } + if(p == 2){ # February + cluster3.name="NAO+" + cluster2.name="NAO-" + cluster4.name="Blocking" + cluster1.name="Atl.Ridge" + } + if(p == 3){ # March + cluster3.name="NAO+" + cluster2.name="NAO-" + cluster4.name="Blocking" + cluster1.name="Atl.Ridge" + } + if(p == 4){ # April + cluster2.name="NAO+" + cluster1.name="NAO-" + cluster3.name="Blocking" + cluster4.name="Atl.Ridge" + } + if(p == 5){ # May + cluster4.name="NAO+" + cluster2.name="NAO-" + cluster3.name="Blocking" + cluster1.name="Atl.Ridge" + } + if(p == 6){ # June + cluster4.name="NAO+" + cluster1.name="NAO-" + cluster2.name="Blocking" + cluster3.name="Atl.Ridge" + } + if(p == 7){ # July + cluster4.name="NAO+" + cluster2.name="NAO-" + cluster1.name="Blocking" + cluster3.name="Atl.Ridge" + } + if(p == 8){ # August + cluster2.name="NAO+" + cluster4.name="NAO-" + cluster3.name="Blocking" + cluster1.name="Atl.Ridge" + } + if(p == 9){ # September + cluster4.name="NAO+" + cluster3.name="NAO-" + cluster1.name="Blocking" + cluster2.name="Atl.Ridge" + } + if(p == 10){ # October + cluster1.name="NAO+" + cluster4.name="NAO-" + cluster2.name="Blocking" + cluster3.name="Atl.Ridge" + } + if(p == 11){ # November + cluster2.name="NAO+" + cluster3.name="NAO-" + cluster1.name="Blocking" + cluster4.name="Atl.Ridge" + } + if(p == 12){ # December + cluster2.name="NAO+" + cluster4.name="NAO-" + cluster1.name="Blocking" + cluster3.name="Atl.Ridge" + } + if(p == 13){ # Winter + cluster3.name="NAO+" + cluster4.name="NAO-" + cluster1.name="Blocking" + cluster2.name="Atl.Ridge" + } + if(p == 14){ # Spring + cluster1.name="NAO+" + cluster3.name="NAO-" + cluster2.name="Blocking" + cluster4.name="Atl.Ridge" + } + if(p == 15){ # Summer + cluster2.name="NAO+" + cluster3.name="NAO-" + cluster4.name="Blocking" + cluster1.name="Atl.Ridge" + } + if(p == 16){ # Autumn + cluster4.name="NAO+" + cluster1.name="NAO-" + cluster2.name="Blocking" + cluster3.name="Atl.Ridge" + } + + cluster1.name.period[p] <- cluster1.name + cluster2.name.period[p] <- cluster2.name + cluster3.name.period[p] <- cluster3.name + cluster4.name.period[p] <- cluster4.name + + save(cluster1.name.period, cluster2.name.period, cluster3.name.period, cluster4.name.period, file=paste0(workdir,"/",fields.name,"_ClusterNames.RData")) + + } else { # in this case, we load the cluster names from the file already saved: + load(paste0(workdir,"/",fields.name,"_ClusterNames.RData")) + cluster1.name <- cluster1.name.period[p] + cluster2.name <- cluster2.name.period[p] + cluster3.name <- cluster3.name.period[p] + cluster4.name <- cluster4.name.period[p] + } + + # assign to each cluster its regime: + if(ordering == FALSE){ + cluster1 <- 1; cluster2 <- 2; cluster3 <- 3; cluster4 <- 4 + } else { + cluster1 <- which(orden == cluster1.name) + cluster2 <- which(orden == cluster2.name) + cluster3 <- which(orden == cluster3.name) + cluster4 <- which(orden == cluster4.name) + } + + assign(paste0("map",cluster1), pslwr1mean) + assign(paste0("map",cluster2), pslwr2mean) + assign(paste0("map",cluster3), pslwr3mean) + assign(paste0("map",cluster4), pslwr4mean) + + assign(paste0("imp",cluster1), varPeriodAnom1mean) + assign(paste0("imp",cluster2), varPeriodAnom2mean) + assign(paste0("imp",cluster3), varPeriodAnom3mean) + assign(paste0("imp",cluster4), varPeriodAnom4mean) + + assign(paste0("sig",cluster1), pvalue1) + assign(paste0("sig",cluster2), pvalue2) + assign(paste0("sig",cluster3), pvalue3) + assign(paste0("sig",cluster4), pvalue4) + + assign(paste0("fre",cluster1), wr1y) + assign(paste0("fre",cluster2), wr2y) + assign(paste0("fre",cluster3), wr3y) + assign(paste0("fre",cluster4), wr4y) + + # Visualize the composition with the 3 graphs for all the 4 regimes: its pressure fields, its impact on var and its frequency series: + if(!as.pdf) png(filename=paste0(workdir,"/",fields.name,"_",var.name[var.num],"_",my.period[p],".png"),width=3000,height=3700) + + plot.new() + + # Sheet title: + par(fig=c(0, 1, 0.94, 0.98), new=TRUE) + mtext(paste0("Weather Regimes for ",my.period[p]," season (",year.start,"-",year.end,"). Source: ",fields.name), cex=6, font=2) + + # Centroid maps: + map.xpos <- 0 + map.width <- 0.46 + par(fig=c(map.xpos + 0.003, map.xpos + map.width - 0.003, 0.745, 0.925), new=TRUE) + PlotEquiMap2(rescale(map1,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map1), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, xlabel.dist=1.5, contours.lty="F1FF1F") + par(fig=c(map.xpos, map.xpos + map.width, 0.51, 0.69), new=TRUE) + PlotEquiMap2(rescale(map2,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map2), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F") + par(fig=c(map.xpos, map.xpos + map.width, 0.275, 0.455), new=TRUE) + PlotEquiMap2(rescale(map3,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map3), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F") + par(fig=c(map.xpos, map.xpos + map.width, 0.04, 0.22), new=TRUE) + PlotEquiMap2(rescale(map4,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map4), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F") + + # Title Centroid Maps: + map.title.xpos <- 0.76 + map.title.width <- 0.2 + par(fig=c(map.title.xpos, map.title.xpos + map.title.width, 0.728, 0.729), new=TRUE) + mtext("year", cex=3) + par(fig=c(map.title.xpos, map.title.xpos + map.title.width, 0.494, 0.495), new=TRUE) + mtext("year", cex=3) + par(fig=c(map.title.xpos, map.title.xpos + map.title.width, 0.260, 0.261), new=TRUE) + mtext("year", cex=3) + par(fig=c(map.title.xpos, map.title.xpos + map.title.width, 0.026, 0.027), new=TRUE) + mtext("year", cex=3) + + # Impact maps: + impact.xpos <- 0.47 + impact.width <- 0.26 + par(fig=c(impact.xpos, impact.xpos + impact.width, 0.745, 0.925), new=TRUE) + PlotEquiMap2(rescale(imp1[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=t(sig1[,EU] < 0.05), cex.lab=1.5) + par(fig=c(impact.xpos, impact.xpos + impact.width, 0.51, 0.69), new=TRUE) + PlotEquiMap2(rescale(imp2[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=t(sig2[,EU] < 0.05), cex.lab=1.5) + par(fig=c(impact.xpos, impact.xpos + impact.width, 0.275, 0.455), new=TRUE) + PlotEquiMap2(rescale(imp3[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=t(sig3[,EU] < 0.05), cex.lab=1.5) + par(fig=c(impact.xpos, impact.xpos + impact.width, 0.04, 0.22), new=TRUE) + PlotEquiMap2(rescale(imp4[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=t(sig4[,EU] < 0.05), cex.lab=1.5) + + # Frequency plots: + barplot.xpos <- 0.75 + barplot.width <- 0.25 + par(fig=c(barplot.xpos, barplot.xpos + barplot.width, 0.745, 0.915), new=TRUE) + barplot.freq(100*fre1, year.start, year.end, cex.y=2, cex.x=2, freq.max=60, ylab="", mgp=c(3,1.5,0)) + par(fig=c(barplot.xpos, barplot.xpos + barplot.width,0.510,0.680), new=TRUE) + barplot.freq(100*fre2, year.start, year.end, cex.y=2, cex.x=2, freq.max=60, ylab="", mgp=c(3,1.5,0)) + par(fig=c(barplot.xpos, barplot.xpos + barplot.width,0.275,0.445), new=TRUE) + barplot.freq(100*fre3, year.start, year.end, cex.y=2, cex.x=2, freq.max=60, ylab="", mgp=c(3,1.5,0)) + par(fig=c(barplot.xpos, barplot.xpos + barplot.width,0.040,0.210), new=TRUE) + barplot.freq(100*fre4, year.start, year.end, cex.y=2, cex.x=2, freq.max=60, ylab="", mgp=c(3,1.5,0)) + + # % on Frequency plots: + symbol.xpos <- 0.735 + symbol.width <- 0.01 + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.83, 0.84), new=TRUE) + mtext("%", cex=3.3) + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.60, 0.61), new=TRUE) + mtext("%", cex=3.3) + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.36, 0.37), new=TRUE) + mtext("%", cex=3.3) + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.13, 0.14), new=TRUE) + mtext("%", cex=3.3) + + # Title 1: + title1.xpos <- 0.02 + title1.width <- 0.4 + par(fig=c(title1.xpos, title1.xpos + title1.width, 0.930, 0.935), new=TRUE) + mtext(paste0(regime1.name,": ", psl.name, " Anomaly"), font=2, cex=4) + par(fig=c(title1.xpos, title1.xpos + title1.width, 0.695, 0.700), new=TRUE) + mtext(paste0(regime2.name,": ", psl.name, " Anomaly"), font=2, cex=4) + par(fig=c(title1.xpos, title1.xpos + title1.width, 0.460, 0.465), new=TRUE) + mtext(paste0(regime3.name,": ", psl.name, " Anomaly"), font=2, cex=4) + par(fig=c(title1.xpos, title1.xpos + title1.width, 0.225, 0.230), new=TRUE) + mtext(paste0(regime4.name,": ", psl.name, " Anomaly"), font=2, cex=4) + + # Title 2: + title2.xpos <- 0.42 + title2.width <- 0.35 + par(fig=c(title2.xpos, title2.xpos + title2.width, 0.930, 0.935), new=TRUE) + mtext(paste0(regime1.name, ": Impact on ", var.name.full[var.num]), font=2, cex=4) + par(fig=c(title2.xpos, title2.xpos + title2.width, 0.695, 0.700), new=TRUE) + mtext(paste0(regime2.name, ": Impact on ", var.name.full[var.num]), font=2, cex=4) + par(fig=c(title2.xpos, title2.xpos + title2.width, 0.460, 0.465), new=TRUE) + mtext(paste0(regime3.name, ": Impact on ", var.name.full[var.num]), font=2, cex=4) + par(fig=c(title2.xpos, title2.xpos + title2.width, 0.225, 0.230), new=TRUE) + mtext(paste0(regime4.name, ": Impact on ", var.name.full[var.num]), font=2, cex=4) + + # Title 3: + title3.xpos <- 0.75 + title3.width <-0.25 + par(fig=c(title3.xpos, title3.xpos + title3.width, 0.930, 0.935), new=TRUE) + mtext(paste0(regime1.name, ": Frequency (", round(100*mean(fre1),1), "%)"), font=2, cex=4) + par(fig=c(title3.xpos, title3.xpos + title3.width, 0.695, 0.700), new=TRUE) + mtext(paste0(regime2.name, ": Frequency (", round(100*mean(fre2),1), "%)"), font=2, cex=4) + par(fig=c(title3.xpos, title3.xpos + title3.width, 0.460, 0.465), new=TRUE) + mtext(paste0(regime3.name, ": Frequency (", round(100*mean(fre3),1), "%)"), font=2, cex=4) + par(fig=c(title3.xpos, title3.xpos + title3.width, 0.225, 0.230), new=TRUE) + mtext(paste0(regime4.name, ": Frequency (", round(100*mean(fre4),1), "%)"), font=2, cex=4) + + # Legend 1: + legend1.xpos <- 0.01 + legend1.width <- 0.44 + legend1.cex <- 1.8 + my.subset <- match(values.to.plot, my.brks) + + par(fig=c(legend1.xpos, legend1.xpos + legend1.width, 0.719, 0.744), new=TRUE) # syntax fig=c(xmin, xmax, ymin, ymax) + ColorBar3(my.brks, cols=my.cols, vert=FALSE, cex=legend1.cex, subset=my.subset) + if(psl=="g500") {mtext(side=4," m", cex=2.5)} else {mtext(side=4," hPa", cex=legend1.cex)} + par(fig=c(legend1.xpos, legend1.xpos + legend1.width, 0.485, 0.508), new=TRUE) + ColorBar3(my.brks, cols=my.cols, vert=FALSE, cex=legend1.cex, subset=my.subset) + if(psl=="g500") {mtext(side=4," m", cex=2.5)} else {mtext(side=4," hPa", cex=legend1.cex)} + par(fig=c(legend1.xpos, legend1.xpos + legend1.width, 0.250, 0.273), new=TRUE) + ColorBar3(my.brks, cols=my.cols, vert=FALSE, cex=legend1.cex, subset=my.subset) + if(psl=="g500") {mtext(side=4," m", cex=2.5)} else {mtext(side=4," hPa", cex=legend1.cex) + par(fig=c(legend1.xpos, legend1.xpos + legend1.width, 0.015, 0.038), new=TRUE) + ColorBar3(my.brks, cols=my.cols, vert=FALSE, cex=legend1.cex, subset=my.subset) + if(psl=="g500") {mtext(side=4," m", cex=2.5)} else {mtext(side=4," hPa", cex=legend1.cex)} + + # Legend 2: + legend2.xpos <- 0.48 + legend2.width <- 0.25 + legend2.cex <- 1.8 + my.subset2 <- match(values.to.plot2, my.brks.var) + + par(fig=c(legend2.xpos, legend2.xpos + legend2.width, 0.719 + 0.001, 0.744), new=TRUE) + ColorBar3(my.brks.var, cols=my.cols.var, vert=FALSE, cex=legend2.cex, subset=my.subset2) + mtext(side=4,paste0(" ",var.unit[var.num]), cex=legend2.cex) + par(fig=c(legend2.xpos, legend2.xpos + legend2.width, 0.485, 0.508), new=TRUE) + ColorBar3(my.brks.var, cols=my.cols.var, vert=FALSE, cex=legend2.cex, subset=my.subset2) + mtext(side=4,paste0(" ",var.unit[var.num]), cex=legend2.cex) + par(fig=c(legend2.xpos, legend2.xpos + legend2.width, 0.250, 0.273), new=TRUE) + ColorBar3(my.brks.var, cols=my.cols.var, vert=FALSE, cex=legend2.cex, subset=my.subset2)} + mtext(side=4,paste0(" ",var.unit[var.num]), cex=legend2.cex) + par(fig=c(legend2.xpos, legend2.xpos + legend2.width, 0.015, 0.038), new=TRUE) + ColorBar3(my.brks.var, cols=my.cols.var, vert=FALSE, cex=legend2.cex, subset=my.subset2) + mtext(side=4,paste0(" ",var.unit[var.num]), cex=legend2.cex) + + if(!as.pdf) dev.off() # for saving 4 png + +} # close for loop on 'p' + +if(as.pdf) dev.off() # for saving a single pdf with all seasons + + + + + + + + + + + diff --git a/old/weather_regimes_v22.R~ b/old/weather_regimes_v22.R~ new file mode 100644 index 0000000000000000000000000000000000000000..06d6af34ad281f05b9b54f2ad059fc4db143dd34 --- /dev/null +++ b/old/weather_regimes_v22.R~ @@ -0,0 +1,815 @@ +library(s2dverification) # for the function Load() +library(abind) +library(Kendall) +source('/home/Earth/ncortesi/Downloads/scripts/Rfunctions.R') # for the calendar functions +workdir <- "/scratch/Earth/ncortesi/RESILIENCE/Regimes" # working dir with the input files with the WT classifications for each MSLP grid point + +# available reanalysis for the geopotential (g500) or the geopotential height (z500 = g500/9.81) and for var data: +ERAint <- list(path = '/esnas/reconstructions/ecmwf/eraint/$STORE_FREQ$_mean/$VAR_NAME$_f6h/$VAR_NAME$_$YEAR$$MONTH$.nc') +ERAint.name <- "ERA-Interim" + +JRA55 <- list(path = '/esnas/reconstructions/jma/jra-55/$STORE_FREQ$_mean/$VAR_NAME$_f6h/$VAR_NAME$_$YEAR$$MONTH$.nc') +JRA55.name <- "JRA-55" + +rean <- ERAint #JRA55 #ERAint # choose one of the two above reanalysis from where to load the input psl data + +# available forecast systems for the psl and var data: +#ECMWF_S4 <- list(path = paste0('/esnas/exp/ECMWF/seasonal/0001/s004/m001/$STORE_FREQ$_mean/$VAR_NAME$_f6h/$VAR_NAME$_$START_DATE$.nc')) +ECMWF_S4 <- list(path = paste0('/esnas/exp/ECMWF/seasonal/0001/s004/m001/$STORE_FREQ$_mean/psl_f6h/psl_$START_DATE$.nc')) +ECMWF_S4.name <- "ECMWF-S4" + +ECMWF_monthly <- list(path = paste0('/esnas/exp/ECMWF/monthly/ensforhc/6hourly/$VAR_NAME$/2014$MONTH$$DAY$00/$VAR_NAME$_$START_DATE$00.nc')) +ECMWF_monthly.name <- "ECMWF-Monthly" + +forecast <- ECMWF_S4 + +fields <- forecast #rean #forecast # put 'rean' to load pressure fields and var from reanalisis, put 'forecast' to load them from forecast systems + +psl <- "psl" #"g500" # pressure variable to use from the chosen reanalysis +psl.name <- "MSLP" # "Z500" # the name to show in the maps + +year.start <- 1981 #1994 #1979 #1981 +year.end <- 2013 #2010 + +leadtime <- 1:7 #5:11 #1 # if fields=forecast, set the forecast time (in days) you want to compute the weather regimes. +startdate.shift <- 1 # if leadtime=5:11, it's better to shift all startdates of the season 1 week in the past, to fit the exact season of obs.data + # if leadtime=12:18, it's better to shift all startdates of the season 2 weeks in the past, and so on. +forecast.year <- year.end+1 # if fields=forecast, set the forecast year (it is usually the year after year.end) +mes=1 # if fields=forecast, set the month of the first startdate of the forecast year +day=2 # if fields=forecast, set the day of the first startdate of the forecast year + +lat.weighting=FALSE # set it to true to weight psl data on latitude before applying the cluster analysis +sequences=FALSE # set it to TRUE if you want to select only days beloning to sequences of at least 5 consecutive days belonging to the same regime. + +PCA <- FALSE # se to TRUE if you want to apply the PCA before the k-means cluster analysis, FALSE otherwise +variance.explained <- 0.8 # the user can set here the minimum % of variance explained by the PCs (typically 0.8 which is equal to 80%) + +# Coordinates of the box enclosing the study region (the Northern Atlantic in this case): +lat.max <- 81 #81 #80 #70 +lat.min <- 27 #30 #20 #30 +lon.max <- 45 #36 #30 #40 +lon.min <- 274.5 #274.5 #270 #280 # put a positive number here because the geopotential has only positive values of longitud! + +############################################################################################################################# +#ECMWF_monthly <- list(path = paste0('/esnas/exp/ECMWF/monthly/ensforhc/6hourly/psl/2014010200/1994_010200.nc')) +#ECMWF_monthly <- list(path = paste0('/esnas/exp/ECMWF/monthly/ensforhc/6hourly/$VAR_NAME$/2014$MONTH$$DAY$00/$VAR_NAME$_$START_DATE$00.nc')) +rean.name <- unname(ifelse(unlist(rean) == unlist(ERAint), ERAint.name, JRA55.name)) +#forecast.name <- ECMWF_monthly.name +forecast.name <- unname(ifelse(unlist(forecast) == unlist(ECMWF_S4), ECMWF_S4.name, ECMWF_monthly.name)) +fields.name <- unname(ifelse(unlist(fields) == unlist(rean), rean.name, forecast.name)) +n.years <- year.end - year.start + 1 + +var.data <- fields # dataset from which to load the input var data (reanalysis or forecasts) +var.data.name <- fields.name #ECMWF_monthly.name # ERAint.name + +if(lat.min >= lat.max) stop("lat.min cannot be greater than lat.max") + +days.period <- n.days.period <- period.length <- list() +for (p in 1:17){ + days.period[[p]] <- NA + for(y in year.start:year.end) days.period[[p]] <- c(days.period[[p]], n.days.in.a.future.year(year.start, y) + pos.period(y,p)) + days.period[[p]] <- days.period[[p]][-1] # remove the NA at the begin that was introduced only to be able to execute the above command + # number of days belonging to that period from year.start to year.end: + n.days.period[[p]] <- length(days.period[[p]]) + # Number of days belonging to that period in a single year: + period.length[[p]] <- n.days.in.a.period(p,1999) #+ ifelse(period==13 | period==2, 1, 0) # using year 1999 introduce a small error in winter season length because of bisestile years +} + +# load only 1 day of pressure data to detect the minimum and maximum lat and lon values which identify only the North Atlantic area: +if(unlist(fields) == unlist(rean)) domain <- Load(var = psl, exp = NULL, obs = list(fields), '19990101', storefreq = 'daily', leadtimemax = 1, output = 'lonlat', nprocs=1) +if(unlist(fields) == unlist(ECMWF_S4)) domain <- Load(var = "psl", exp = list(fields), sdates=paste0(year.start,'0101'), storefreq = 'daily', dimnames=list(member="number"), leadtimemax=1, nmember=1, output = 'lonlat', nprocs=1) +if(unlist(fields) == unlist(ECMWF_monthly)) domain <- Load(var = psl, exp = NULL, obs = list(fields), paste0(year.start,'0102'), storefreq = 'daily', leadtimemax = 1, output = 'lonlat', nprocs=1) + +n.lat <- length(domain$lat) +n.lon <- length(domain$lon) + +pos.lat.max <- which(lat.max - round(domain$lat,4) >= 0)[1] # select the first latitude of the domain below the maximum latitude +pos.lat.min <- tail(which(round(domain$lat,4) - lat.min >= 0),1) # select the last latitude of the domain over the minumum latitude +pos.lon.max <- tail(which(lon.max - round(domain$lon,4) >= 0),1) +pos.lon.min <- which(round(domain$lon,4) - lon.min >= 0)[1] + +pos.lat <- if(pos.lat.min < pos.lat.max) {pos.lat.min:pos.lat.max} else {pos.lat.max:pos.lat.min} +pos.lon <- c(1:pos.lon.max,pos.lon.min:length(domain$lon)) # beware that it only consider the case where the study area cross the meridian of Greenwhich! +n.pos.lat <- length(pos.lat) +n.pos.lon <- length(pos.lon) + +lat <- domain$lat[pos.lat] # lat values of chosen area only (it is smaller than the whole spatial domain loaded by the data) +lon <- domain$lon[pos.lon] # lon values of chosen area only + +lat.min.area <- domain$lat[pos.lat.min] # min and max lat/lon of the chosen area only (same as lat.max, but its values correspond to actual values in the lat vector) +lat.max.area <- domain$lat[pos.lat.max] +lon.min.area <- domain$lon[pos.lon.min] +lon.max.area <- domain$lon[pos.lon.max] + +#rm(domain) + +# Load psl data: +if(unlist(fields) == unlist(rean)){ # Load daily psl data in the reanalysis case: + psleuFull <-array(NA,c(n.days.in.a.yearly.period(year.start,year.end), n.pos.lat, n.pos.lon)) + + for (y in year.start:year.end){ + var <- Load(var = psl, exp = NULL, obs = list(fields), paste0(y,'0101'), storefreq = 'daily', leadtimemax = n.days.in.a.year(y), output = 'lonlat', + latmin = lat.min, latmax = lat.max, lonmin = lon.min, lonmax = lon.max) + psleuFull[seq.days.in.a.future.year(year.start, y),,] <- var$obs + rm(var) + gc() + } +} + +if(unlist(fields) == unlist(ECMWF_S4)) # in this case, we can load only the data for 1 month at time (since each month needs ~3 GB of memory) + start.month <- 12 + lead.month <- 1 + + n.members <- 15 + + if(start.month <= 9) start.month.char <- paste0("0",as.character(start.month)) + + #n.days.in.a.month(start.month + lead.month -1 , year.start) + chosen.month <- start.month + lead.month - 1 # find which month we want to load data + if(chosen.month > 12) chosen.month <- chosen.month - 12 + leadtime.min <- 1 + n.days.in.a.monthly.period(start.month, chosen.month, year.start) - n.days.in.a.month(chosen.month, year.start) + leadtime.max <- leadtime.min + n.days.in.a.month(chosen.month, year.start) - 1 + n.leadtimes <- leadtime.max - leadtime.min + 1 + #print(paste(leadtime.min, leadtime.max)) + + psleuFull <- Load(var = "psl", exp = list(fields), obs = NULL, sdates=paste0(year.start:year.end, start.month.char,'01'), storefreq = 'daily', dimnames=list(member="number"), nmember=n.members, leadtimemin=leadtime.min, leadtimemax=leadtime.max, output = 'lonlat', latmin = lat.min, latmax = lat.max, lonmin = lon.min, lonmax = lon.max) + +} + +if(unlist(fields) == unlist(ECMWF_monthly)){ + # Load 6-hourly psl data in the forecast case: + psleuFull <-array(NA, c(n.sdates*n.years, n.leadtimes, n.pos.lat, n.pos.lon)) # daily order: 19940102 19950102 ... 20130102 19940109 19950109 ... 20130109 ... + n.leadtimes <- length(leadtime) + sdates <- weekly.seq(forecast.year,mes,day) + n.sdates <- length(sdates) + + for (startdate in 1:n.sdates){ + for(lday in leadtime){ + var <- Load(var = psl, exp = NULL, obs = list(fields), paste0(year.start:year.end, substr(sdates[startdate],5,6), substr(sdates[startdate],7,8) ), storefreq = 'daily', leadtimemin = lday*4-3, leadtimemax = lday*4, output = 'lonlat', latmin = lat.min, latmax = lat.max, lonmin = lon.min, lonmax = lon.max) + + mean.lday <- apply(var$obs, c(3,5,6), mean, na.rm=T) + rm(var) + gc() + + psleuFull[(1+(startdate-1)*n.years):(startdate*n.years), lday-leadtime[1]+1,,] <- mean.lday + rm(mean.lday) + gc() + } + } +} + + +if(psl=="g500") psleuFull <- psleuFull/9.81 # convert geopotential to geopotential height (in m) +if(psl=="psl") psleuFull <- psleuFull/100 # convert MSLP in Pascal to MSLP in hPa +gc() + +save.image(file=paste0(workdir,"/weather_regimes_",fields.name,"_",year.start,"-",year.end,".RData")) +load(file=paste0(workdir,"/weather_regimes_",fields.name,"_",year.start,"-",year.end,".RData")) + +# compute the cluster analysis: +my.PCA <- list() +my.cluster <- list() +tot.variance <- list() +n.pcs <- my.cluster <- c() + +WR.period <- 1 #1:12 #13:16 # 1-12: Jan-Dec; 13-16: Winter-Spring-Summer-Autumn + +for(period in WR.period){ + if(unlist(fields) == unlist(rean)){ pslPeriod <- psleuFull[days.period[[p]],,] # select only days in the chosen period (i.e: winter) + + # in this case of S4 we don't need to select anything because we already loaded only 1 month of data + + if(unlist(fields) == unlist(ECMWF_monthly)){ + my.startdates.period <- months.period(forecast.year,mes,day,period) # get which startdates belong to the chosen period + + days.period <- list() # get which days inside psleuFull belong to the chosen period + for(startdate in my.startdates.period){ + days.period[[p]] <- c(days.period[[p]], (1+(startdate-1)*n.years):(startdate*n.years)) + } + } + + # weight the pressure fields based on latitude: + if(lat.weighting){ + lat.weighted <- cos(lat*pi/180)^0.5 + lat.weighted.array <- InsertDim(InsertDim(lat.weighted,2,n.pos.lon), 1, n.days.period[[p]]) + pslmat <- pslPeriod * lat.weighted.array + rm(lat.weighted.array) + gc() + } + + if(unlist(fields) == unlist(rean)){ + pslmat <- pslPeriod + dim(pslmat) <- c(n.days.period[[p]], n.pos.lat*n.pos.lon) # convert array in a matrix! + rm(pslPeriod, pslPeriod.weighted) + } + + if(unlist(fields) == unlist(ECMWF_S4)){ + pslmat <- psleuFull$mod + dim(pslmat) <- c(1, n.leadtimes*n.years, n.members*n.pos.lat*n.pos.lon) # convert array in a matrix! Beware that it always put the years (33) before the leadtimes! + gc() + } + # # if you are using the monthly forecast system, in winter you must remove the 2nd startdate (20140109) because its data is bad, as you can see with: plot(pslmat[,1:2]) + #if(unlist(fields) == unlist(forecast) && period == 13) pslmat[21:40,] <- NA + + if(PCA){ # compute the PCs: + my.seq <- seq(1, n.pos.lat*n.pos.lon, 9) # select only 1 point of 9 + pslcut <- pslmat[,my.seq] + + my.PCA[[period]] <- princomp(pslcut,cor=FALSE) + tot.variance[[period]] <- head(cumsum(my.PCA[[period]]$sdev^2/sum(my.PCA[[period]]$sdev^2)),50) # check how many PCAs to keep basing on the sum of their expl. variance + n.pcs[period] <- head(as.numeric(which(tot.variance[[period]] > variance.explained)),1) # select only the pcs that explains at least 80% of variance + my.cluster[[period]] <- kmeans(my.PCA[[period]]$scores[,1:n.pcs[period]], centers=4, iter.max=100, nstart=30) # 4 is the number of clusters + rm(pslcut) + } else { # 4 is the number of clusters: + if(unlist(fields) == unlist(rean)) my.cluster[[period]] <- kmeans(pslmat[which(!is.na(pslmat[,1])),], centers=4, iter.max=100, nstart=30) + if(unlist(fields) == unlist(ECMWF_S4)) my.cluster[[period]] <- kmeans(pslmat[1,,], centers=4, iter.max=100, nstart=30) + } + + rm(pslmat) + gc() +} + +var.num <- 2 # Choose a variable. 1: sfcWind 2: tas + +var.name <- c("sfcWind","tas") # name of the 'predictand' variable of the chosen reanalysis +var.name.full <- c("Wind Speed","Temperature") # full name of the 'predictand' variable to put in the title of the graphs +var.unit <- c("m/s", "ºC") # unit of measure of var (for drawing color scales) + +# Load var data: +if(unlist(fields) == unlist(rean)){ # Load daily var data in the reanalysis case: + vareuFull <-array(NA,c(n.days.in.a.yearly.period(year.start,year.end), n.pos.lat, n.pos.lon)) + + for (y in year.start:year.end){ + var <- Load(var = var.name[var.num], exp = NULL, obs = list(var.data), paste0(y,'0101'), storefreq = 'daily', leadtimemax = n.days.in.a.year(y), output = 'lonlat', + latmin = lat.min, latmax = lat.max, lonmin = lon.min, lonmax = lon.max) + vareuFull[seq.days.in.a.future.year(year.start, y),,] <- var$obs + rm(var) + gc() + } + +# in the S4 case, we can load only the data for 1 month at time (since each month needs ~3 GB of memory) +# but at present we ONLY use psl data!!! +#if(unlist(fields) == unlist(ECMWF_S4)) { +# vareuFull <- Load(var = var.name[var.num], exp = list(fields), obs = NULL, sdates=paste0(year.start:year.end, start.month.char,'01'), storefreq = 'daily', dimnames=list(member="number"), nmember=n.members, leadtimemin=leadtime.min, leadtimemax=leadtime.max, output = 'lonlat', latmin = lat.min, latmax = lat.max, lonmin = lon.min, lonmax = lon.max) +#} + +if(unlist(fields) == unlist(ECMWF_monthly)){ # Load 6-hourly var data in the monthly forecast case: + sdates <- weekly.seq(forecast.year,mes,day) + vareuFull <-array(NA,c(length(sdates)*(year.end-year.start+1), n.pos.lat, n.pos.lon)) + + for (startdate in 1:length(sdates)){ + var <- Load(var = var.name[var.num], exp = NULL, obs = list(var.data), paste0(year.start:year.end, substr(sdates[startdate],5,6), substr(sdates[startdate],7,8) ), storefreq = 'daily', leadtimemin=leadtime*4-3, leadtimemax=leadtime*4, output = 'lonlat', latmin = lat.min, latmax = lat.max, lonmin = lon.min, lonmax = lon.max) + temp <- apply(var$obs, c(1,3,5,6), mean, na.rm=T) + vareuFull[(1+(startdate-1)*(year.end-year.start+1)):(startdate*(year.end-year.start+1)),,] <- temp + rm(temp,var) + gc() + } + +} + +#my.grid<-paste0('r',n.lon,'x',n.lat) +#coord <- Load(var = 'sfcWind', exp = list(ECMWF_monthly), obs = NULL, sdates=paste0(2013,'0102'), leadtimemin = 1, leadtimemax=1, output = 'lonlat', nprocs=1, grid=my.grid, method='bilinear') + +save.image(file=paste0(workdir,"/weather_regimes_",fields.name,"_",var.name[var.num],"_",year.start,"-",year.end,".RData")) +load(file=paste0(workdir,"/weather_regimes_",fields.name,"_",var.name[var.num],"_",year.start,"-",year.end,".RData")) + +## # if you want to study WRs at monhly level but using their seasonal time series, you have to copy the WRs time series to the months from 1 to 12: +## if(WR.period <- 1:12){ +## my.cluster[[1]] <- my.cluster[[2]] <- my.cluster[[3]] <- my.cluster[[4]] <- my.cluster[[5]] <- my.cluster[[6]] <- list() +## my.cluster[[7]] <- my.cluster[[8]] <- my.cluster[[9]] <- my.cluster[[10]] <- my.cluster[[11]] <- my.cluster[[12]] <- list() + +## ss <- match(days.period[[13]],days.period[[1]]); ss[which(!is.na(ss))] <- 1; qq <- ss*my.cluster[[13]]$cluster; my.cluster[[1]]$cluster <- qq[!is.na(qq)] +## ss <- match(days.period[[13]],days.period[[2]]); ss[which(!is.na(ss))] <- 1; qq <- ss*my.cluster[[13]]$cluster; my.cluster[[2]]$cluster <- qq[!is.na(qq)] +## ss <- match(days.period[[13]],days.period[[12]]); ss[which(!is.na(ss))] <- 1; qq <- ss*my.cluster[[13]]$cluster; my.cluster[[12]]$cluster <- qq[!is.na(qq)] +## ss <- match(days.period[[14]],days.period[[3]]); ss[which(!is.na(ss))] <- 1; qq <- ss*my.cluster[[14]]$cluster; my.cluster[[3]]$cluster <- qq[!is.na(qq)] +## ss <- match(days.period[[14]],days.period[[4]]); ss[which(!is.na(ss))] <- 1; qq <- ss*my.cluster[[14]]$cluster; my.cluster[[4]]$cluster <- qq[!is.na(qq)] +## ss <- match(days.period[[14]],days.period[[5]]); ss[which(!is.na(ss))] <- 1; qq <- ss*my.cluster[[14]]$cluster; my.cluster[[5]]$cluster <- qq[!is.na(qq)] +## ss <- match(days.period[[15]],days.period[[6]]); ss[which(!is.na(ss))] <- 1; qq <- ss*my.cluster[[15]]$cluster; my.cluster[[6]]$cluster <- qq[!is.na(qq)] +## ss <- match(days.period[[15]],days.period[[7]]); ss[which(!is.na(ss))] <- 1; qq <- ss*my.cluster[[15]]$cluster; my.cluster[[7]]$cluster <- qq[!is.na(qq)] +## ss <- match(days.period[[15]],days.period[[8]]); ss[which(!is.na(ss))] <- 1; qq <- ss*my.cluster[[15]]$cluster; my.cluster[[8]]$cluster <- qq[!is.na(qq)] +## ss <- match(days.period[[16]],days.period[[9]]); ss[which(!is.na(ss))] <- 1; qq <- ss*my.cluster[[16]]$cluster; my.cluster[[9]]$cluster <- qq[!is.na(qq)] +## ss <- match(days.period[[16]],days.period[[10]]); ss[which(!is.na(ss))] <- 1; qq <- ss*my.cluster[[16]]$cluster; my.cluster[[10]]$cluster <- qq[!is.na(qq)] +## ss <- match(days.period[[16]],days.period[[11]]); ss[which(!is.na(ss))] <- 1; qq <- ss*my.cluster[[16]]$cluster; my.cluster[[11]]$cluster <- qq[!is.na(qq)] +## } + +for(period in WR.period){ # 1-12: Jan-Dec; 13-16: Winter-Spring-Summer-Autumn; 17: Year + if(unlist(fields) == unlist(rean)){ + + + } else { + my.startdates.period <- months.period(forecast.year,mes,day,period) + + #if(period == 13) my.startdates.period <- my.startdates.period[-2] # remove the second startdate because it is broken + + days.period <- period.length <- list() + for(startdate in my.startdates.period){ + days.period[[p]] <- c(days.period[[p]], (1+(startdate-1)*(year.end-year.start+1)):(startdate*(year.end-year.start+1))) + } + period.length[[p]] <- length(my.startdates.period) # number of Thusdays in the period + } + + + if(sequences){ + # for each of the 4 clusters, remove the days not belonging to sequences of at least 5 days: + # warning: first 4 days and last 4 days are not take into account y the algorithm and are treated as not belonging to any sequence (sequ=FALSE) + wrdiff <- diff(my.cluster[[period]]$cluster) + + sequ <- rep(FALSE, n.days.period[[p]]) + for(i in 5:(n.days.period[[p]]-5)){ + sequ[i] <- TRUE + if(wrdiff[i] != 0 & wrdiff[i+1] != 0) sequ[i] = FALSE + if(wrdiff[i] != 0 & wrdiff[i+2] != 0) sequ[i] = FALSE + if(wrdiff[i] != 0 & wrdiff[i+3] != 0) sequ[i] = FALSE + if(wrdiff[i] != 0 & wrdiff[i+4] != 0) sequ[i] = FALSE + if(wrdiff[i-1] != 0 & wrdiff[i] != 0) sequ[i] = FALSE + if(wrdiff[i-1] != 0 & wrdiff[i+1] != 0) sequ[i] = FALSE + if(wrdiff[i-1] != 0 & wrdiff[i+2] != 0) sequ[i] = FALSE + if(wrdiff[i-1] != 0 & wrdiff[i+3] != 0) sequ[i] = FALSE + if(wrdiff[i-2] != 0 & wrdiff[i] != 0) sequ[i] = FALSE + if(wrdiff[i-2] != 0 & wrdiff[i+1] != 0) sequ[i] = FALSE + if(wrdiff[i-2] != 0 & wrdiff[i+2] != 0) sequ[i] = FALSE + if(wrdiff[i-3] != 0 & wrdiff[i] != 0) sequ[i] = FALSE + if(wrdiff[i-3] != 0 & wrdiff[i+1] != 0) sequ[i] = FALSE + if(wrdiff[i-4] != 0 & wrdiff[i] != 0) sequ[i] = FALSE + } # close for loop on i + + # sequence of daily cluster numbers without the days that doesn't belong to sequences of 5 or more days: + cluster.sequence <- my.cluster[[period]]$cluster * sequ + rm(wrdiff, sequ) + } else{ + cluster.sequence <- my.cluster[[period]]$cluster + } + + gc() + + # Replace the days belonging to a regime with the days belonging to a sequence of at least 5 days of that regime: + wr1 <- which(cluster.sequence == 1) + wr2 <- which(cluster.sequence == 2) + wr3 <- which(cluster.sequence == 3) + wr4 <- which(cluster.sequence == 4) + + # yearly frequency of each regime: + wr1y <- wr2y <- wr3y <-wr4y <- c() + for(y in year.start:year.end){ + + if(unlist(fields) == unlist(rean)){ + wr1y[y-year.start+1] <- length(which(cluster.sequence[(y-year.start)*period.length[[p]]+(1:period.length[[p]])] == 1)) + wr2y[y-year.start+1] <- length(which(cluster.sequence[(y-year.start)*period.length[[p]]+(1:period.length[[p]])] == 2)) + wr3y[y-year.start+1] <- length(which(cluster.sequence[(y-year.start)*period.length[[p]]+(1:period.length[[p]])] == 3)) + wr4y[y-year.start+1] <- length(which(cluster.sequence[(y-year.start)*period.length[[p]]+(1:period.length[[p]])] == 4)) + } else { + wr1y[y-year.start+1] <- length(which(cluster.sequence[seq((y-year.start+1),length(cluster.sequence), n.years)] == 1)) + wr2y[y-year.start+1] <- length(which(cluster.sequence[seq((y-year.start+1),length(cluster.sequence), n.years)] == 2)) + wr3y[y-year.start+1] <- length(which(cluster.sequence[seq((y-year.start+1),length(cluster.sequence), n.years)] == 3)) + wr4y[y-year.start+1] <- length(which(cluster.sequence[seq((y-year.start+1),length(cluster.sequence), n.years)] == 4)) + } + } + + rm(cluster.sequence) + gc() + + # convert to frequencies in %: + wr1y <- wr1y/period.length[[p]] + wr2y <- wr2y/period.length[[p]] + wr3y <- wr3y/period.length[[p]] + wr4y <- wr4y/period.length[[p]] + + pslPeriod <- psleuFull[days.period[[p]],,] + pslPeriodClim <- apply(pslPeriod, c(2,3), mean, na.rm=T) + pslPeriodClim2 <- InsertDim(pslPeriodClim, 1, n.days.period[[p]]) + pslPeriodAnom <- pslPeriod - pslPeriodClim2 + + rm(pslPeriod, pslPeriodClim, pslPeriodClim2) + gc() + + pslwr1 <- pslPeriodAnom[wr1,,] + pslwr2 <- pslPeriodAnom[wr2,,] + pslwr3 <- pslPeriodAnom[wr3,,] + pslwr4 <- pslPeriodAnom[wr4,,] + rm(pslPeriodAnom) + gc() + + pslwr1mean <- apply(pslwr1,c(2,3),mean,na.rm=T) + pslwr2mean <- apply(pslwr2,c(2,3),mean,na.rm=T) + pslwr3mean <- apply(pslwr3,c(2,3),mean,na.rm=T) + pslwr4mean <- apply(pslwr4,c(2,3),mean,na.rm=T) + + rm(pslwr1,pslwr2,pslwr3,pslwr4) + + # similar selection, but for var instead of psl: + + varPeriod <- vareuFull[days.period[[p]],,] # select only var data during the chosen period + + varPeriodClim <- apply(varPeriod, c(2,3), mean, na.rm=T) + varPeriodClim2 <- InsertDim(varPeriodClim, 1, n.days.period[[p]]) + varPeriodAnom <- varPeriod - varPeriodClim2 + rm(varPeriod, varPeriodClim, varPeriodClim2) + gc() + + #PlotEquiMap2(varPeriodClim[,EU], lon[EU], lat, filled.continents = FALSE, cols=my.cols.var, intxlon=10, intylat=10, cex.lab=1.5) # plot the climatology of the period + + varPeriodAnom1 <- varPeriodAnom[wr1,,] + varPeriodAnom2 <- varPeriodAnom[wr2,,] + varPeriodAnom3 <- varPeriodAnom[wr3,,] + varPeriodAnom4 <- varPeriodAnom[wr4,,] + + varPeriodAnom1mean <- apply(varPeriodAnom1,c(2,3),mean,na.rm=T) + varPeriodAnom2mean <- apply(varPeriodAnom2,c(2,3),mean,na.rm=T) + varPeriodAnom3mean <- apply(varPeriodAnom3,c(2,3),mean,na.rm=T) + varPeriodAnom4mean <- apply(varPeriodAnom4,c(2,3),mean,na.rm=T) + + varPeriodAnomBoth1 <- abind(varPeriodAnom, varPeriodAnom1, along = 1) + pvalue1 <- apply(varPeriodAnomBoth1, c(2,3), function(x) t.test(x[1:n.days.period[[p]]],x[(n.days.period[[p]]+1):length(x)])$p.value) + rm(varPeriodAnomBoth1, varPeriodAnom1) + gc() + + varPeriodAnomBoth2 <- abind(varPeriodAnom, varPeriodAnom2, along = 1) + pvalue2 <- apply(varPeriodAnomBoth2, c(2,3), function(x) t.test(x[1:n.days.period[[p]]],x[(n.days.period[[p]]+1):length(x)])$p.value) + rm(varPeriodAnomBoth2, varPeriodAnom2) + gc() + + varPeriodAnomBoth3 <- abind(varPeriodAnom, varPeriodAnom3, along = 1) + pvalue3 <- apply(varPeriodAnomBoth3, c(2,3), function(x) t.test(x[1:n.days.period[[p]]],x[(n.days.period[[p]]+1):length(x)])$p.value) + rm(varPeriodAnomBoth3, varPeriodAnom3) + gc() + + varPeriodAnomBoth4 <- abind(varPeriodAnom, varPeriodAnom4, along = 1) + pvalue4 <- apply(varPeriodAnomBoth4, c(2,3), function(x) t.test(x[1:n.days.period[[p]]],x[(n.days.period[[p]]+1):length(x)])$p.value) + rm(varPeriodAnomBoth4, varPeriodAnom4) + + rm(varPeriodAnom) + gc() + + # save all the data necessary to redraw the graphs when we know the right regime: + save(workdir,rean.name,fields.name,var.num,var.name,var.name.full,var.unit,year.start,year.end,period,WR.period,lon,lat,pslwr1mean,pslwr2mean,pslwr3mean,pslwr4mean, varPeriodAnom1mean, varPeriodAnom2mean, varPeriodAnom3mean,varPeriodAnom4mean,wr1y,wr2y,wr3y,wr4y,pvalue1,pvalue2,pvalue3,pvalue4,file=paste0(workdir,"/",fields.name,"_",var.name[var.num],"_",my.period[period],"_mapdata.RData")) + + rm(pvalue1,pvalue2,pvalue3,pvalue4) + rm(pslwr1mean,pslwr2mean,pslwr3mean,pslwr4mean) + rm(varPeriodAnom1mean,varPeriodAnom2mean,varPeriodAnom3mean,varPeriodAnom4mean) + gc() + +} # close the for loop on 'period' + + +# run it twice: once, with ordering <- FALSE to look only at the cartography and find visually the right regime names of the four clusters; +# and the second time, to save the ordered cartography: +ordering <- TRUE # If TRUE, order the clusters following the regimes listed in the 'orden' vector (after you manually insert the names). +save.names <- FALSE # if TRUE, also save the cluster names (i.e: the association between clusters and weather regimes); if FALSE, the cluster names are loaded from the file +as.pdf <- FALSE # FALSE: save results as one .png file for each season, TRUE: save only 1 .pdf file with all seasons + +# position of long values of Europe only (without the Atlantic Sea and America): +#if(fields.name == "ERA-Interim") EU <- c(1:which(lon >= lon.max)[1], (length(lon)-30):length(lon)) # restrict area to continental europe only +EU <- c(1:which(lon >= lon.max)[1], (which(lon >= 337)[1]:length(lon))) # restrict area to continental europe only + +# choose an order to give to the cartography, from top to bottom: +orden <- c("NAO+","NAO-","Blocking","Atl.Ridge") + +regime1.name <- orden[1] +regime2.name <- orden[2] +regime3.name <- orden[3] +regime4.name <- orden[4] + +if(save.names){cluster1.name.period <- cluster2.name.period <- cluster3.name.period <- cluster4.name.period <- c()} + +# breaks and colors of the geopotential fields: +if(psl == "g500"){ + #my.brks <- c(-300,-199:200,300) # % Mean anomaly of a WR + my.brks <- c(-300,seq(-200,200,10),300) # % Mean anomaly of a WR + my.brks2 <- c(-300,seq(-200,200,10),300) # % Mean anomaly of a WR for the contour lines + #my.cols <- colorRampPalette((brewer.pal(9,"PuBu")))(length(my.brks)-1) + values.to.plot <- c(-200, -100, 0, 100, 200) # values we want to show in the labels +} + +if(psl == "psl"){ + my.brks <- c(seq(-19,-1,2),0,seq(1,19,2)) # % Mean anomaly of a WR + my.brks2 <- my.brks #c(seq(-20,20,2)) # % Mean anomaly of a WR for the contour lines + values.to.plot <- my.brks #c(-17,-15,-13,-11,-9,-7,-5,-3,-1,0,1,3,5,7,9,11,13,15,17) +} + +my.cols <- colorRampPalette(rev(brewer.pal(11,"RdBu")))(length(my.brks)-1) # blue--white--red colors +my.cols[floor(length(my.cols)/2)] <- my.cols[floor(length(my.cols)/2)+1] <- "white" + +# breaks and colors of the impact maps (valid both for Wind speed anomalies and Temperature anomalies): +#my.brks.var <- c(-20,seq(-10,-3,1),seq(-2.9,2.9,0.1),seq(3,10,1),20) # % Mean anomaly of a WR +#my.brks.var <- c(-20,-2,-1.5,-1,-0.5,-0.2,0.2,0.5,1,1.5,2,20) # % Mean anomaly of a WR +#my.brks.var <- c(-20,seq(-3,3,0.5),20) # % Mean anomaly of a WR +if(var.name[var.num] == "sfcWind") { + my.brks.var <- seq(-3,3,0.5) + values.to.plot2 <- c(-3,-2,-1,0,1,2,3) +} +if(var.name[var.num] == "tas") { + my.brks.var <- seq(-5,5,1) + values.to.plot2 <- my.brks.var #c(-5,-3,-1,0,1,3,5) +} + +#my.cols <- colorRampPalette(as.character(read.csv("/home/Earth/vtorralb/rgbhex.csv",header=F)[,1]))(length(my.brks)-1) # blue--yellow-red colors +my.cols.var <- colorRampPalette(rev(brewer.pal(11,"RdBu")))(length(my.brks.var)-1) # blue--white--red colors +#my.cols.var[floor(length(my.cols.var)/2)] <- my.cols.var[floor(length(my.cols.var)/2)+1] <- "white" + +if(as.pdf)(pdf(file=paste0(workdir,"/",fields.name,"_",var.name[var.num],".pdf"),width=40, height=60)) + +for(period in WR.period){ + load(file=paste0(workdir,"/",fields.name,"_",var.name[var.num],"_",my.period[period],"_mapdata.RData")) + + if(save.names){ + if(period == 1){ # January + cluster2.name="NAO+" + cluster1.name="NAO-" + cluster4.name="Blocking" + cluster3.name="Atl.Ridge" + } + if(period == 2){ # February + cluster3.name="NAO+" + cluster2.name="NAO-" + cluster4.name="Blocking" + cluster1.name="Atl.Ridge" + } + if(period == 3){ # March + cluster3.name="NAO+" + cluster2.name="NAO-" + cluster4.name="Blocking" + cluster1.name="Atl.Ridge" + } + if(period == 4){ # April + cluster2.name="NAO+" + cluster1.name="NAO-" + cluster3.name="Blocking" + cluster4.name="Atl.Ridge" + } + if(period == 5){ # May + cluster4.name="NAO+" + cluster2.name="NAO-" + cluster3.name="Blocking" + cluster1.name="Atl.Ridge" + } + if(period == 6){ # June + cluster4.name="NAO+" + cluster1.name="NAO-" + cluster2.name="Blocking" + cluster3.name="Atl.Ridge" + } + if(period == 7){ # July + cluster4.name="NAO+" + cluster2.name="NAO-" + cluster1.name="Blocking" + cluster3.name="Atl.Ridge" + } + if(period == 8){ # August + cluster2.name="NAO+" + cluster4.name="NAO-" + cluster3.name="Blocking" + cluster1.name="Atl.Ridge" + } + if(period == 9){ # September + cluster4.name="NAO+" + cluster3.name="NAO-" + cluster1.name="Blocking" + cluster2.name="Atl.Ridge" + } + if(period == 10){ # October + cluster1.name="NAO+" + cluster4.name="NAO-" + cluster2.name="Blocking" + cluster3.name="Atl.Ridge" + } + if(period == 11){ # November + cluster2.name="NAO+" + cluster3.name="NAO-" + cluster1.name="Blocking" + cluster4.name="Atl.Ridge" + } + if(period == 12){ # December + cluster2.name="NAO+" + cluster4.name="NAO-" + cluster1.name="Blocking" + cluster3.name="Atl.Ridge" + } + if(period == 13){ # Winter + cluster3.name="NAO+" + cluster4.name="NAO-" + cluster1.name="Blocking" + cluster2.name="Atl.Ridge" + } + if(period == 14){ # Spring + cluster1.name="NAO+" + cluster3.name="NAO-" + cluster2.name="Blocking" + cluster4.name="Atl.Ridge" + } + if(period == 15){ # Summer + cluster2.name="NAO+" + cluster3.name="NAO-" + cluster4.name="Blocking" + cluster1.name="Atl.Ridge" + } + if(period == 16){ # Autumn + cluster4.name="NAO+" + cluster1.name="NAO-" + cluster2.name="Blocking" + cluster3.name="Atl.Ridge" + } + + cluster1.name.period[period] <- cluster1.name + cluster2.name.period[period] <- cluster2.name + cluster3.name.period[period] <- cluster3.name + cluster4.name.period[period] <- cluster4.name + + save(cluster1.name.period, cluster2.name.period, cluster3.name.period, cluster4.name.period, file=paste0(workdir,"/",fields.name,"_ClusterNames.RData")) + + } else { # in this case, we load the cluster names from the file already saved: + load(paste0(workdir,"/",fields.name,"_ClusterNames.RData")) + cluster1.name <- cluster1.name.period[period] + cluster2.name <- cluster2.name.period[period] + cluster3.name <- cluster3.name.period[period] + cluster4.name <- cluster4.name.period[period] + } + + # assign to each cluster its regime: + if(ordering == FALSE){ + cluster1 <- 1; cluster2 <- 2; cluster3 <- 3; cluster4 <- 4 + } else { + cluster1 <- which(orden == cluster1.name) + cluster2 <- which(orden == cluster2.name) + cluster3 <- which(orden == cluster3.name) + cluster4 <- which(orden == cluster4.name) + } + + assign(paste0("map",cluster1), pslwr1mean) + assign(paste0("map",cluster2), pslwr2mean) + assign(paste0("map",cluster3), pslwr3mean) + assign(paste0("map",cluster4), pslwr4mean) + + assign(paste0("imp",cluster1), varPeriodAnom1mean) + assign(paste0("imp",cluster2), varPeriodAnom2mean) + assign(paste0("imp",cluster3), varPeriodAnom3mean) + assign(paste0("imp",cluster4), varPeriodAnom4mean) + + assign(paste0("sig",cluster1), pvalue1) + assign(paste0("sig",cluster2), pvalue2) + assign(paste0("sig",cluster3), pvalue3) + assign(paste0("sig",cluster4), pvalue4) + + assign(paste0("fre",cluster1), wr1y) + assign(paste0("fre",cluster2), wr2y) + assign(paste0("fre",cluster3), wr3y) + assign(paste0("fre",cluster4), wr4y) + + # Visualize the composition with the 3 graphs for all the 4 regimes: its pressure fields, its impact on var and its frequency series: + if(!as.pdf) png(filename=paste0(workdir,"/",fields.name,"_",var.name[var.num],"_",my.period[period],".png"),width=3000,height=3700) + + plot.new() + + # Sheet title: + par(fig=c(0, 1, 0.94, 0.98), new=TRUE) + mtext(paste0("Weather Regimes for ",my.period[period]," season (",year.start,"-",year.end,"). Source: ",fields.name), cex=6, font=2) + + # Centroid maps: + map.xpos <- 0 + map.width <- 0.46 + par(fig=c(map.xpos + 0.003, map.xpos + map.width - 0.003, 0.745, 0.925), new=TRUE) + PlotEquiMap2(rescale(map1,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map1), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, xlabel.dist=1.5, contours.lty="F1FF1F") + par(fig=c(map.xpos, map.xpos + map.width, 0.51, 0.69), new=TRUE) + PlotEquiMap2(rescale(map2,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map2), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F") + par(fig=c(map.xpos, map.xpos + map.width, 0.275, 0.455), new=TRUE) + PlotEquiMap2(rescale(map3,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map3), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F") + par(fig=c(map.xpos, map.xpos + map.width, 0.04, 0.22), new=TRUE) + PlotEquiMap2(rescale(map4,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map4), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F") + + # Title Centroid Maps: + map.title.xpos <- 0.76 + map.title.width <- 0.2 + par(fig=c(map.title.xpos, map.title.xpos + map.title.width, 0.728, 0.729), new=TRUE) + mtext("year", cex=3) + par(fig=c(map.title.xpos, map.title.xpos + map.title.width, 0.494, 0.495), new=TRUE) + mtext("year", cex=3) + par(fig=c(map.title.xpos, map.title.xpos + map.title.width, 0.260, 0.261), new=TRUE) + mtext("year", cex=3) + par(fig=c(map.title.xpos, map.title.xpos + map.title.width, 0.026, 0.027), new=TRUE) + mtext("year", cex=3) + + # Impact maps: + impact.xpos <- 0.47 + impact.width <- 0.26 + par(fig=c(impact.xpos, impact.xpos + impact.width, 0.745, 0.925), new=TRUE) + PlotEquiMap2(rescale(imp1[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=t(sig1[,EU] < 0.05), cex.lab=1.5) + par(fig=c(impact.xpos, impact.xpos + impact.width, 0.51, 0.69), new=TRUE) + PlotEquiMap2(rescale(imp2[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=t(sig2[,EU] < 0.05), cex.lab=1.5) + par(fig=c(impact.xpos, impact.xpos + impact.width, 0.275, 0.455), new=TRUE) + PlotEquiMap2(rescale(imp3[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=t(sig3[,EU] < 0.05), cex.lab=1.5) + par(fig=c(impact.xpos, impact.xpos + impact.width, 0.04, 0.22), new=TRUE) + PlotEquiMap2(rescale(imp4[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=t(sig4[,EU] < 0.05), cex.lab=1.5) + + # Frequency plots: + barplot.xpos <- 0.75 + barplot.width <- 0.25 + par(fig=c(barplot.xpos, barplot.xpos + barplot.width, 0.745, 0.915), new=TRUE) + barplot.freq(100*fre1, year.start, year.end, cex.y=2, cex.x=2, freq.max=60, ylab="", mgp=c(3,1.5,0)) + par(fig=c(barplot.xpos, barplot.xpos + barplot.width,0.510,0.680), new=TRUE) + barplot.freq(100*fre2, year.start, year.end, cex.y=2, cex.x=2, freq.max=60, ylab="", mgp=c(3,1.5,0)) + par(fig=c(barplot.xpos, barplot.xpos + barplot.width,0.275,0.445), new=TRUE) + barplot.freq(100*fre3, year.start, year.end, cex.y=2, cex.x=2, freq.max=60, ylab="", mgp=c(3,1.5,0)) + par(fig=c(barplot.xpos, barplot.xpos + barplot.width,0.040,0.210), new=TRUE) + barplot.freq(100*fre4, year.start, year.end, cex.y=2, cex.x=2, freq.max=60, ylab="", mgp=c(3,1.5,0)) + + # % on Frequency plots: + symbol.xpos <- 0.735 + symbol.width <- 0.01 + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.83, 0.84), new=TRUE) + mtext("%", cex=3.3) + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.60, 0.61), new=TRUE) + mtext("%", cex=3.3) + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.36, 0.37), new=TRUE) + mtext("%", cex=3.3) + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.13, 0.14), new=TRUE) + mtext("%", cex=3.3) + + # Title 1: + title1.xpos <- 0.02 + title1.width <- 0.4 + par(fig=c(title1.xpos, title1.xpos + title1.width, 0.930, 0.935), new=TRUE) + mtext(paste0(regime1.name,": ", psl.name, " Anomaly"), font=2, cex=4) + par(fig=c(title1.xpos, title1.xpos + title1.width, 0.695, 0.700), new=TRUE) + mtext(paste0(regime2.name,": ", psl.name, " Anomaly"), font=2, cex=4) + par(fig=c(title1.xpos, title1.xpos + title1.width, 0.460, 0.465), new=TRUE) + mtext(paste0(regime3.name,": ", psl.name, " Anomaly"), font=2, cex=4) + par(fig=c(title1.xpos, title1.xpos + title1.width, 0.225, 0.230), new=TRUE) + mtext(paste0(regime4.name,": ", psl.name, " Anomaly"), font=2, cex=4) + + # Title 2: + title2.xpos <- 0.42 + title2.width <- 0.35 + par(fig=c(title2.xpos, title2.xpos + title2.width, 0.930, 0.935), new=TRUE) + mtext(paste0(regime1.name, ": Impact on ", var.name.full[var.num]), font=2, cex=4) + par(fig=c(title2.xpos, title2.xpos + title2.width, 0.695, 0.700), new=TRUE) + mtext(paste0(regime2.name, ": Impact on ", var.name.full[var.num]), font=2, cex=4) + par(fig=c(title2.xpos, title2.xpos + title2.width, 0.460, 0.465), new=TRUE) + mtext(paste0(regime3.name, ": Impact on ", var.name.full[var.num]), font=2, cex=4) + par(fig=c(title2.xpos, title2.xpos + title2.width, 0.225, 0.230), new=TRUE) + mtext(paste0(regime4.name, ": Impact on ", var.name.full[var.num]), font=2, cex=4) + + # Title 3: + title3.xpos <- 0.75 + title3.width <-0.25 + par(fig=c(title3.xpos, title3.xpos + title3.width, 0.930, 0.935), new=TRUE) + mtext(paste0(regime1.name, ": Frequency (", round(100*mean(fre1),1), "%)"), font=2, cex=4) + par(fig=c(title3.xpos, title3.xpos + title3.width, 0.695, 0.700), new=TRUE) + mtext(paste0(regime2.name, ": Frequency (", round(100*mean(fre2),1), "%)"), font=2, cex=4) + par(fig=c(title3.xpos, title3.xpos + title3.width, 0.460, 0.465), new=TRUE) + mtext(paste0(regime3.name, ": Frequency (", round(100*mean(fre3),1), "%)"), font=2, cex=4) + par(fig=c(title3.xpos, title3.xpos + title3.width, 0.225, 0.230), new=TRUE) + mtext(paste0(regime4.name, ": Frequency (", round(100*mean(fre4),1), "%)"), font=2, cex=4) + + # Legend 1: + legend1.xpos <- 0.01 + legend1.width <- 0.44 + legend1.cex <- 1.8 + my.subset <- match(values.to.plot, my.brks) + + par(fig=c(legend1.xpos, legend1.xpos + legend1.width, 0.719, 0.744), new=TRUE) # syntax fig=c(xmin, xmax, ymin, ymax) + ColorBar3(my.brks, cols=my.cols, vert=FALSE, cex=legend1.cex, subset=my.subset) + if(psl=="g500") {mtext(side=4," m", cex=2.5)} else {mtext(side=4," hPa", cex=legend1.cex)} + par(fig=c(legend1.xpos, legend1.xpos + legend1.width, 0.485, 0.508), new=TRUE) + ColorBar3(my.brks, cols=my.cols, vert=FALSE, cex=legend1.cex, subset=my.subset) + if(psl=="g500") {mtext(side=4," m", cex=2.5)} else {mtext(side=4," hPa", cex=legend1.cex)} + par(fig=c(legend1.xpos, legend1.xpos + legend1.width, 0.250, 0.273), new=TRUE) + ColorBar3(my.brks, cols=my.cols, vert=FALSE, cex=legend1.cex, subset=my.subset) + if(psl=="g500") {mtext(side=4," m", cex=2.5)} else {mtext(side=4," hPa", cex=legend1.cex) + par(fig=c(legend1.xpos, legend1.xpos + legend1.width, 0.015, 0.038), new=TRUE) + ColorBar3(my.brks, cols=my.cols, vert=FALSE, cex=legend1.cex, subset=my.subset) + if(psl=="g500") {mtext(side=4," m", cex=2.5)} else {mtext(side=4," hPa", cex=legend1.cex)} + + # Legend 2: + legend2.xpos <- 0.48 + legend2.width <- 0.25 + legend2.cex <- 1.8 + my.subset2 <- match(values.to.plot2, my.brks.var) + + par(fig=c(legend2.xpos, legend2.xpos + legend2.width, 0.719 + 0.001, 0.744), new=TRUE) + ColorBar3(my.brks.var, cols=my.cols.var, vert=FALSE, cex=legend2.cex, subset=my.subset2) + mtext(side=4,paste0(" ",var.unit[var.num]), cex=legend2.cex) + par(fig=c(legend2.xpos, legend2.xpos + legend2.width, 0.485, 0.508), new=TRUE) + ColorBar3(my.brks.var, cols=my.cols.var, vert=FALSE, cex=legend2.cex, subset=my.subset2) + mtext(side=4,paste0(" ",var.unit[var.num]), cex=legend2.cex) + par(fig=c(legend2.xpos, legend2.xpos + legend2.width, 0.250, 0.273), new=TRUE) + ColorBar3(my.brks.var, cols=my.cols.var, vert=FALSE, cex=legend2.cex, subset=my.subset2)} + mtext(side=4,paste0(" ",var.unit[var.num]), cex=legend2.cex) + par(fig=c(legend2.xpos, legend2.xpos + legend2.width, 0.015, 0.038), new=TRUE) + ColorBar3(my.brks.var, cols=my.cols.var, vert=FALSE, cex=legend2.cex, subset=my.subset2) + mtext(side=4,paste0(" ",var.unit[var.num]), cex=legend2.cex) + + if(!as.pdf) dev.off() # for saving 4 png + +} # close for loop on 'period' + +if(as.pdf) dev.off() # for saving a single pdf with all seasons + + + + + + + + + + + diff --git a/old/weather_regimes_v23.R b/old/weather_regimes_v23.R new file mode 100644 index 0000000000000000000000000000000000000000..50dd5a91c8f56bad802e1e7a5f301a3a9d02e1d5 --- /dev/null +++ b/old/weather_regimes_v23.R @@ -0,0 +1,854 @@ +library(s2dverification) # for the function Load() +library(abind) +library(Kendall) +source('/home/Earth/ncortesi/Downloads/scripts/Rfunctions.R') # for the calendar functions +workdir <- "/scratch/Earth/ncortesi/RESILIENCE/Regimes" # working dir with the input files with the WT classifications for each MSLP grid point + +# available reanalysis for the geopotential (g500) or the geopotential height (z500 = g500/9.81) and for var data: +ERAint <- list(path = '/esnas/reconstructions/ecmwf/eraint/$STORE_FREQ$_mean/$VAR_NAME$_f6h/$VAR_NAME$_$YEAR$$MONTH$.nc') +ERAint.name <- "ERA-Interim" + +JRA55 <- list(path = '/esnas/reconstructions/jma/jra-55/$STORE_FREQ$_mean/$VAR_NAME$_f6h/$VAR_NAME$_$YEAR$$MONTH$.nc') +JRA55.name <- "JRA-55" + +rean <- ERAint #JRA55 #ERAint # choose one of the two above reanalysis from where to load the input psl data + +# available forecast systems for the psl and var data: +#ECMWF_S4 <- list(path = paste0('/esnas/exp/ECMWF/seasonal/0001/s004/m001/$STORE_FREQ$_mean/$VAR_NAME$_f6h/$VAR_NAME$_$START_DATE$.nc')) +ECMWF_S4 <- list(path = paste0('/esnas/exp/ECMWF/seasonal/0001/s004/m001/$STORE_FREQ$_mean/psl_f6h/psl_$START_DATE$.nc')) +ECMWF_S4.name <- "ECMWF-S4" + +ECMWF_monthly <- list(path = paste0('/esnas/exp/ECMWF/monthly/ensforhc/6hourly/$VAR_NAME$/2014$MONTH$$DAY$00/$VAR_NAME$_$START_DATE$00.nc')) +ECMWF_monthly.name <- "ECMWF-Monthly" + +forecast <- ECMWF_S4 + +fields <- forecast #rean #forecast # put 'rean' to load pressure fields and var from reanalisis, put 'forecast' to load them from forecast systems + +psl <- "psl" #"g500" # pressure variable to use from the chosen reanalysis +psl.name <- "MSLP" # "Z500" # the name to show in the maps + +year.start <- 1982 #1981 #1994 #1979 #1981 +year.end <- 2013 #2013 #2010 + +member.name <- "ensemble" # name of the dimension with the ensemble members inside the netCDF files of S4 +#leadtime <- 1:7 #5:11 #1 # if fields=forecast, set the forecast time (in days) you want to compute the weather regimes. +#startdate.shift <- 1 # if leadtime=5:11, it's better to shift all startdates of the season 1 week in the past, to fit the exact season of obs.data + # if leadtime=12:18, it's better to shift all startdates of the season 2 weeks in the past, and so on. +forecast.year <- year.end+1 # if fields=forecast, set the forecast year (it is usually the year after year.end) +mes=1 # if fields=forecast, set the month of the first startdate of the forecast year +day=2 # if fields=forecast, set the day of the first startdate of the forecast year + +lat.weighting=FALSE # set it to true to weight psl data on latitude before applying the cluster analysis +sequences=FALSE # set it to TRUE if you want to select only days beloning to sequences of at least 5 consecutive days belonging to the same regime. + +PCA <- FALSE # se to TRUE if you want to apply the PCA before the k-means cluster analysis, FALSE otherwise +variance.explained <- 0.8 # the user can set here the minimum % of variance explained by the PCs (typically 0.8 which is equal to 80%) + +# Coordinates of the box enclosing the study region (the Northern Atlantic in this case): +lat.max <- 81 #81 #80 #70 +lat.min <- 27 #30 #20 #30 +lon.max <- 45 #36 #30 #40 +lon.min <- 274.5 #274.5 #270 #280 # put a positive number here because the geopotential has only positive values of longitud! + +############################################################################################################################# +#ECMWF_monthly <- list(path = paste0('/esnas/exp/ECMWF/monthly/ensforhc/6hourly/psl/2014010200/1994_010200.nc')) +#ECMWF_monthly <- list(path = paste0('/esnas/exp/ECMWF/monthly/ensforhc/6hourly/$VAR_NAME$/2014$MONTH$$DAY$00/$VAR_NAME$_$START_DATE$00.nc')) +rean.name <- unname(ifelse(unlist(rean) == unlist(ERAint), ERAint.name, JRA55.name)) +#forecast.name <- ECMWF_monthly.name +forecast.name <- unname(ifelse(unlist(forecast) == unlist(ECMWF_S4), ECMWF_S4.name, ECMWF_monthly.name)) +fields.name <- unname(ifelse(unlist(fields) == unlist(rean), rean.name, forecast.name)) +n.years <- year.end - year.start + 1 + +var.data <- fields # dataset from which to load the input var data (reanalysis or forecasts) +var.data.name <- fields.name #ECMWF_monthly.name # ERAint.name + +if(lat.min >= lat.max) stop("lat.min cannot be greater than lat.max") + +days.period <- n.days.period <- period.length <- list() +for (p in 1:17){ + days.period[[p]] <- NA + for(y in year.start:year.end) days.period[[p]] <- c(days.period[[p]], n.days.in.a.future.year(year.start, y) + pos.period(y,p)) + days.period[[p]] <- days.period[[p]][-1] # remove the NA at the begin that was introduced only to be able to execute the above command + # number of days belonging to that period from year.start to year.end: + n.days.period[[p]] <- length(days.period[[p]]) + # Number of days belonging to that period in a single year: + period.length[[p]] <- n.days.in.a.period(p,1999) #+ ifelse(period==13 | period==2, 1, 0) # using year 1999 introduce a small error in winter season length because of bisestile years +} + +# load only 1 day of pressure data to detect the minimum and maximum lat and lon values which identify only the North Atlantic area: +if(unlist(fields) == unlist(rean)) domain <- Load(var = psl, exp = NULL, obs = list(fields), '19990101', storefreq = 'daily', leadtimemax = 1, output = 'lonlat', nprocs=1) +if(unlist(fields) == unlist(ECMWF_S4)) domain <- Load(var = "psl", exp = list(fields), sdates=paste0(year.start,'0101'), storefreq = 'daily', dimnames=list(member=member.name), leadtimemax=1, nmember=1, output = 'lonlat', nprocs=1) +if(unlist(fields) == unlist(ECMWF_monthly)) domain <- Load(var = psl, exp = NULL, obs = list(fields), paste0(year.start,'0102'), storefreq = 'daily', leadtimemax = 1, output = 'lonlat', nprocs=1) + +n.lat <- length(domain$lat) +n.lon <- length(domain$lon) + +pos.lat.max <- which(lat.max - round(domain$lat,4) >= 0)[1] # select the first latitude of the domain below the maximum latitude +pos.lat.min <- tail(which(round(domain$lat,4) - lat.min >= 0),1) # select the last latitude of the domain over the minumum latitude +pos.lon.max <- tail(which(lon.max - round(domain$lon,4) >= 0),1) +pos.lon.min <- which(round(domain$lon,4) - lon.min >= 0)[1] + +pos.lat <- if(pos.lat.min < pos.lat.max) {pos.lat.min:pos.lat.max} else {pos.lat.max:pos.lat.min} +pos.lon <- c(1:pos.lon.max,pos.lon.min:length(domain$lon)) # beware that it only consider the case where the study area cross the meridian of Greenwhich! +n.pos.lat <- length(pos.lat) +n.pos.lon <- length(pos.lon) + +lat <- domain$lat[pos.lat] # lat values of chosen area only (it is smaller than the whole spatial domain loaded by the data) +lon <- domain$lon[pos.lon] # lon values of chosen area only + +lat.min.area <- domain$lat[pos.lat.min] # min and max lat/lon of the chosen area only (same as lat.max, but its values correspond to actual values in the lat vector) +lat.max.area <- domain$lat[pos.lat.max] +lon.min.area <- domain$lon[pos.lon.min] +lon.max.area <- domain$lon[pos.lon.max] + +#rm(domain) + +# Load psl data: +if(unlist(fields) == unlist(rean)){ # Load daily psl data in the reanalysis case: + psleuFull <-array(NA,c(n.days.in.a.yearly.period(year.start,year.end), n.pos.lat, n.pos.lon)) + + for (y in year.start:year.end){ + var <- Load(var = psl, exp = NULL, obs = list(fields), paste0(y,'0101'), storefreq = 'daily', leadtimemax = n.days.in.a.year(y), output = 'lonlat', + latmin = lat.min, latmax = lat.max, lonmin = lon.min, lonmax = lon.max) + psleuFull[seq.days.in.a.future.year(year.start, y),,] <- var$obs + rm(var) + gc() + } +} + +if(unlist(fields) == unlist(ECMWF_S4)) # in this case, we can load only the data for 1 month at time (since each month needs ~3 GB of memory) + start.month <- 1 + lead.month <- 0 + + n.members <- 15 + + if(start.month <= 9) start.month.char <- paste0("0",as.character(start.month)) + chosen.month <- start.month + lead.month # find which month we want to load data + if(chosen.month > 12) chosen.month <- chosen.month - 12 + leadtime.min <- 1 + n.days.in.a.monthly.period(start.month, chosen.month, year.start) - n.days.in.a.month(chosen.month, year.start) + leadtime.max <- leadtime.min + n.days.in.a.month(chosen.month, year.start) - 1 + n.leadtimes <- leadtime.max - leadtime.min + 1 + #print(paste(leadtime.min, leadtime.max)) + + psleuFull <- Load(var = "psl", exp = list(fields), obs = NULL, sdates=paste0(year.start:year.end, start.month.char,'01'), storefreq = 'daily', dimnames=list(member=member.name), nmember=n.members, leadtimemin=leadtime.min, leadtimemax=leadtime.max, output = 'lonlat', latmin = lat.min, latmax = lat.max, lonmin = lon.min, lonmax = lon.max) + + # immediatly convert psl in daily anomalies to remove the drift!!! + pslPeriodClim <- apply(psleuFull$mod, c(1,4,5,6), mean, na.rm=T) + pslPeriodClim2 <- InsertDim(InsertDim(pslPeriodClim, 2, n.years), 2, n.members) + rm(pslPeriodClim) + gc() + + psleuFull <- psleuFull$mod - pslPeriodClim2 + rm(pslPeriodClim2) + gc() +} + +if(unlist(fields) == unlist(ECMWF_monthly)){ + # Load 6-hourly psl data in the forecast case: + psleuFull <-array(NA, c(n.sdates*n.years, n.leadtimes, n.pos.lat, n.pos.lon)) # daily order: 19940102 19950102 ... 20130102 19940109 19950109 ... 20130109 ... + n.leadtimes <- length(leadtime) + sdates <- weekly.seq(forecast.year,mes,day) + n.sdates <- length(sdates) + + for (startdate in 1:n.sdates){ + for(lday in leadtime){ + var <- Load(var = psl, exp = NULL, obs = list(fields), paste0(year.start:year.end, substr(sdates[startdate],5,6), substr(sdates[startdate],7,8) ), storefreq = 'daily', leadtimemin = lday*4-3, leadtimemax = lday*4, output = 'lonlat', latmin = lat.min, latmax = lat.max, lonmin = lon.min, lonmax = lon.max) + + mean.lday <- apply(var$obs, c(3,5,6), mean, na.rm=T) + rm(var) + gc() + + psleuFull[(1+(startdate-1)*n.years):(startdate*n.years), lday-leadtime[1]+1,,] <- mean.lday + rm(mean.lday) + gc() + } + } +} + + +if(psl=="g500") psleuFull <- psleuFull/9.81 # convert geopotential to geopotential height (in m) +if(psl=="psl") psleuFull <- psleuFull/100 # convert MSLP in Pascal to MSLP in hPa +gc() + +save.image(file=paste0(workdir,"/weather_regimes_",fields.name,"_",year.start,"-",year.end,".RData")) +load(file=paste0(workdir,"/weather_regimes_",fields.name,"_",year.start,"-",year.end,".RData")) + +# compute the cluster analysis: +my.PCA <- list() +my.cluster <- list() +tot.variance <- list() +n.pcs <- my.cluster <- c() + +WR.period <- 1 #1:12 #13:16 # 1-12: Jan-Dec; 13-16: Winter-Spring-Summer-Autumn + +for(p in WR.period){ + if(unlist(fields) == unlist(rean)){ pslPeriod <- psleuFull[days.period[[p]],,] # select only days in the chosen period (i.e: winter) + + # in this case of S4 we don't need to select anything because we already loaded only 1 month of data + + if(unlist(fields) == unlist(ECMWF_monthly)){ + my.startdates.period <- months.period(forecast.year,mes,day,p) # get which startdates belong to the chosen period + + days.period <- list() # get which days inside psleuFull belong to the chosen period + for(startdate in my.startdates.period){ + days.period[[p]] <- c(days.period[[p]], (1+(startdate-1)*n.years):(startdate*n.years)) + } + } + + # weight the pressure fields based on latitude (apply it to anomalies, not to absolute values!!!): + if(lat.weighting){ + lat.weighted <- cos(lat*pi/180)^0.5 + lat.weighted.array <- InsertDim(InsertDim(lat.weighted,2,n.pos.lon), 1, n.days.period[[p]]) + pslmat <- pslPeriod * lat.weighted.array + rm(lat.weighted.array) + gc() + } + + if(unlist(fields) == unlist(rean)){ + pslmat <- pslPeriod + dim(pslmat) <- c(n.days.period[[p]], n.pos.lat*n.pos.lon) # convert array in a matrix! + rm(pslPeriod, pslPeriod.weighted) + } + + if(unlist(fields) == unlist(ECMWF_S4)){ + pslmat <- psleuFull + dim(pslmat) <- c(1, n.leadtimes*n.years, n.members*n.pos.lat*n.pos.lon) # convert array in a matrix! Beware that it always put the years (33) before the leadtimes! + #dim(pslmat) <- c(1, 3*33, 15*n.pos.lat*n.pos.lon) # convert array in a matrix! Beware that it always put the years (33) before the leadtimes! + gc() + } + + # # if you are using the monthly forecast system, in winter you must remove the 2nd startdate (20140109) because its data is bad, as you can see with: plot(pslmat[,1:2]) + #if(unlist(fields) == unlist(forecast) && period == 13) pslmat[21:40,] <- NA + + if(PCA){ # compute the PCs: + my.seq <- seq(1, n.pos.lat*n.pos.lon, 9) # select only 1 point of 9 + pslcut <- pslmat[,my.seq] + + my.PCA[[p]] <- princomp(pslcut,cor=FALSE) + tot.variance[[p]] <- head(cumsum(my.PCA[[p]]$sdev^2/sum(my.PCA[[p]]$sdev^2)),50) # check how many PCAs to keep basing on the sum of their expl. variance + n.pcs[p] <- head(as.numeric(which(tot.variance[[p]] > variance.explained)),1) # select only the pcs that explains at least 80% of variance + my.cluster[[p]] <- kmeans(my.PCA[[p]]$scores[,1:n.pcs[p]], centers=4, iter.max=100, nstart=30) # 4 is the number of clusters + rm(pslcut) + } else { # 4 is the number of clusters: + if(unlist(fields) == unlist(rean)) my.cluster[[p]] <- kmeans(pslmat[which(!is.na(pslmat[,1])),], centers=4, iter.max=100, nstart=30) + if(unlist(fields) == unlist(ECMWF_S4)) my.cluster[[p]] <- kmeans(pslmat[1,,], centers=4, iter.max=100, nstart=30) + } + + rm(pslmat) + gc() +} + +save.image(file=paste0(workdir,"/weather_regimes_",fields.name,"_",year.start,"-",year.end,".RData")) +load(file=paste0(workdir,"/weather_regimes_",fields.name,"_",year.start,"-",year.end,".RData")) + +var.num <- 1 # Choose a variable. 1: sfcWind 2: tas + +var.name <- c("sfcWind","tas") # name of the 'predictand' variable of the chosen reanalysis +var.name.full <- c("Wind Speed","Temperature") # full name of the 'predictand' variable to put in the title of the graphs +var.unit <- c("m/s", "ºC") # unit of measure of var (for drawing color scales) + +# Load var data: +if(unlist(fields) == unlist(rean)){ # Load daily var data in the reanalysis case: + vareuFull <-array(NA,c(n.days.in.a.yearly.period(year.start,year.end), n.pos.lat, n.pos.lon)) + + for (y in year.start:year.end){ + var <- Load(var = var.name[var.num], exp = NULL, obs = list(var.data), paste0(y,'0101'), storefreq = 'daily', leadtimemax = n.days.in.a.year(y), output = 'lonlat', + latmin = lat.min, latmax = lat.max, lonmin = lon.min, lonmax = lon.max) + vareuFull[seq.days.in.a.future.year(year.start, y),,] <- var$obs + rm(var) + gc() + } + +# in the S4 case, we can load only the data for 1 month at time (since each month needs ~3 GB of memory) +# but at present we ONLY use psl data!!! +#if(unlist(fields) == unlist(ECMWF_S4)) { +# vareuFull <- Load(var = var.name[var.num], exp = list(fields), obs = NULL, sdates=paste0(year.start:year.end, start.month.char,'01'), storefreq = 'daily', dimnames=list(member="number"), nmember=n.members, leadtimemin=leadtime.min, leadtimemax=leadtime.max, output = 'lonlat', latmin = lat.min, latmax = lat.max, lonmin = lon.min, lonmax = lon.max) +#} + +if(unlist(fields) == unlist(ECMWF_monthly)){ # Load 6-hourly var data in the monthly forecast case: + sdates <- weekly.seq(forecast.year,mes,day) + vareuFull <-array(NA,c(length(sdates)*(year.end-year.start+1), n.pos.lat, n.pos.lon)) + + for (startdate in 1:length(sdates)){ + var <- Load(var = var.name[var.num], exp = NULL, obs = list(var.data), paste0(year.start:year.end, substr(sdates[startdate],5,6), substr(sdates[startdate],7,8) ), storefreq = 'daily', leadtimemin=leadtime*4-3, leadtimemax=leadtime*4, output = 'lonlat', latmin = lat.min, latmax = lat.max, lonmin = lon.min, lonmax = lon.max) + temp <- apply(var$obs, c(1,3,5,6), mean, na.rm=T) + vareuFull[(1+(startdate-1)*(year.end-year.start+1)):(startdate*(year.end-year.start+1)),,] <- temp + rm(temp,var) + gc() + } + +} + +#my.grid<-paste0('r',n.lon,'x',n.lat) +#coord <- Load(var = 'sfcWind', exp = list(ECMWF_monthly), obs = NULL, sdates=paste0(2013,'0102'), leadtimemin = 1, leadtimemax=1, output = 'lonlat', nprocs=1, grid=my.grid, method='bilinear') + +save.image(file=paste0(workdir,"/weather_regimes_",fields.name,"_",var.name[var.num],"_",year.start,"-",year.end,".RData")) +load(file=paste0(workdir,"/weather_regimes_",fields.name,"_",var.name[var.num],"_",year.start,"-",year.end,".RData")) + +## # if you want to study WRs at monhly level but using their seasonal time series, you have to copy the WRs time series to the months from 1 to 12: +## if(WR.period <- 1:12){ +## my.cluster[[1]] <- my.cluster[[2]] <- my.cluster[[3]] <- my.cluster[[4]] <- my.cluster[[5]] <- my.cluster[[6]] <- list() +## my.cluster[[7]] <- my.cluster[[8]] <- my.cluster[[9]] <- my.cluster[[10]] <- my.cluster[[11]] <- my.cluster[[12]] <- list() + +## ss <- match(days.period[[13]],days.period[[1]]); ss[which(!is.na(ss))] <- 1; qq <- ss*my.cluster[[13]]$cluster; my.cluster[[1]]$cluster <- qq[!is.na(qq)] +## ss <- match(days.period[[13]],days.period[[2]]); ss[which(!is.na(ss))] <- 1; qq <- ss*my.cluster[[13]]$cluster; my.cluster[[2]]$cluster <- qq[!is.na(qq)] +## ss <- match(days.period[[13]],days.period[[12]]); ss[which(!is.na(ss))] <- 1; qq <- ss*my.cluster[[13]]$cluster; my.cluster[[12]]$cluster <- qq[!is.na(qq)] +## ss <- match(days.period[[14]],days.period[[3]]); ss[which(!is.na(ss))] <- 1; qq <- ss*my.cluster[[14]]$cluster; my.cluster[[3]]$cluster <- qq[!is.na(qq)] +## ss <- match(days.period[[14]],days.period[[4]]); ss[which(!is.na(ss))] <- 1; qq <- ss*my.cluster[[14]]$cluster; my.cluster[[4]]$cluster <- qq[!is.na(qq)] +## ss <- match(days.period[[14]],days.period[[5]]); ss[which(!is.na(ss))] <- 1; qq <- ss*my.cluster[[14]]$cluster; my.cluster[[5]]$cluster <- qq[!is.na(qq)] +## ss <- match(days.period[[15]],days.period[[6]]); ss[which(!is.na(ss))] <- 1; qq <- ss*my.cluster[[15]]$cluster; my.cluster[[6]]$cluster <- qq[!is.na(qq)] +## ss <- match(days.period[[15]],days.period[[7]]); ss[which(!is.na(ss))] <- 1; qq <- ss*my.cluster[[15]]$cluster; my.cluster[[7]]$cluster <- qq[!is.na(qq)] +## ss <- match(days.period[[15]],days.period[[8]]); ss[which(!is.na(ss))] <- 1; qq <- ss*my.cluster[[15]]$cluster; my.cluster[[8]]$cluster <- qq[!is.na(qq)] +## ss <- match(days.period[[16]],days.period[[9]]); ss[which(!is.na(ss))] <- 1; qq <- ss*my.cluster[[16]]$cluster; my.cluster[[9]]$cluster <- qq[!is.na(qq)] +## ss <- match(days.period[[16]],days.period[[10]]); ss[which(!is.na(ss))] <- 1; qq <- ss*my.cluster[[16]]$cluster; my.cluster[[10]]$cluster <- qq[!is.na(qq)] +## ss <- match(days.period[[16]],days.period[[11]]); ss[which(!is.na(ss))] <- 1; qq <- ss*my.cluster[[16]]$cluster; my.cluster[[11]]$cluster <- qq[!is.na(qq)] +## } + +for(p in WR.period){ # 1-12: Jan-Dec; 13-16: Winter-Spring-Summer-Autumn; 17: Year + if(unlist(fields) == unlist(ECMWF_monthly)){ + my.startdates.period <- months.period(forecast.year,mes,day,p) + + #if(p == 13) my.startdates.period <- my.startdates.period[-2] # remove the second startdate because it is broken + + days.period <- period.length <- list() + for(startdate in my.startdates.period){ + days.period[[p]] <- c(days.period[[p]], (1+(startdate-1)*(year.end-year.start+1)):(startdate*(year.end-year.start+1))) + } + period.length[[p]] <- length(my.startdates.period) # number of Thusdays in the period + } + + cluster.sequence <- my.cluster[[p]]$cluster + + if(sequences){ # it works only for rean data!!! + # for each of the 4 clusters, remove the days not belonging to sequences of at least 5 days: + # warning: first 4 days and last 4 days are not take into account y the algorithm and are treated as not belonging to any sequence (sequ=FALSE) + wrdiff <- diff(my.cluster[[p]]$cluster) + + sequ <- rep(FALSE, n.days.period[[p]]) + for(i in 5:(n.days.period[[p]]-5)){ + sequ[i] <- TRUE + if(wrdiff[i] != 0 & wrdiff[i+1] != 0) sequ[i] = FALSE + if(wrdiff[i] != 0 & wrdiff[i+2] != 0) sequ[i] = FALSE + if(wrdiff[i] != 0 & wrdiff[i+3] != 0) sequ[i] = FALSE + if(wrdiff[i] != 0 & wrdiff[i+4] != 0) sequ[i] = FALSE + if(wrdiff[i-1] != 0 & wrdiff[i] != 0) sequ[i] = FALSE + if(wrdiff[i-1] != 0 & wrdiff[i+1] != 0) sequ[i] = FALSE + if(wrdiff[i-1] != 0 & wrdiff[i+2] != 0) sequ[i] = FALSE + if(wrdiff[i-1] != 0 & wrdiff[i+3] != 0) sequ[i] = FALSE + if(wrdiff[i-2] != 0 & wrdiff[i] != 0) sequ[i] = FALSE + if(wrdiff[i-2] != 0 & wrdiff[i+1] != 0) sequ[i] = FALSE + if(wrdiff[i-2] != 0 & wrdiff[i+2] != 0) sequ[i] = FALSE + if(wrdiff[i-3] != 0 & wrdiff[i] != 0) sequ[i] = FALSE + if(wrdiff[i-3] != 0 & wrdiff[i+1] != 0) sequ[i] = FALSE + if(wrdiff[i-4] != 0 & wrdiff[i] != 0) sequ[i] = FALSE + } # close for loop on i + + # sequence of daily cluster numbers without the days that doesn't belong to sequences of 5 or more days: + cluster.sequence <- my.cluster[[p]]$cluster * sequ + rm(wrdiff, sequ) + } + + gc() + + # Replace the days belonging to a regime with the days belonging to a sequence of at least 5 days of that regime: + wr1 <- which(cluster.sequence == 1) + wr2 <- which(cluster.sequence == 2) + wr3 <- which(cluster.sequence == 3) + wr4 <- which(cluster.sequence == 4) + + # yearly frequency of each regime: + wr1y <- wr2y <- wr3y <-wr4y <- c() + for(y in year.start:year.end){ + + if(unlist(fields) == unlist(rean)){ + wr1y[y-year.start+1] <- length(which(cluster.sequence[(y-year.start)*period.length[[p]]+(1:period.length[[p]])] == 1)) + wr2y[y-year.start+1] <- length(which(cluster.sequence[(y-year.start)*period.length[[p]]+(1:period.length[[p]])] == 2)) + wr3y[y-year.start+1] <- length(which(cluster.sequence[(y-year.start)*period.length[[p]]+(1:period.length[[p]])] == 3)) + wr4y[y-year.start+1] <- length(which(cluster.sequence[(y-year.start)*period.length[[p]]+(1:period.length[[p]])] == 4)) + } else { # for both S4 and the monthly system, the time order is always: year1day1, year2day1, ... , yearNday1, year1day2, year2day2, ..., yearNday2, etc + wr1y[y-year.start+1] <- length(which(cluster.sequence[seq((y-year.start+1),length(cluster.sequence), n.years)] == 1)) + wr2y[y-year.start+1] <- length(which(cluster.sequence[seq((y-year.start+1),length(cluster.sequence), n.years)] == 2)) + wr3y[y-year.start+1] <- length(which(cluster.sequence[seq((y-year.start+1),length(cluster.sequence), n.years)] == 3)) + wr4y[y-year.start+1] <- length(which(cluster.sequence[seq((y-year.start+1),length(cluster.sequence), n.years)] == 4)) + } + + } + + #rm(cluster.sequence) + #gc() + + # convert to frequencies in %: + wr1y <- wr1y/period.length[[p]] + wr2y <- wr2y/period.length[[p]] + wr3y <- wr3y/period.length[[p]] + wr4y <- wr4y/period.length[[p]] + + if(unlist(fields) == unlist(rean)){ + pslPeriod <- psleuFull[days.period[[p]],,] + pslPeriodClim <- apply(pslPeriod, c(2,3), mean, na.rm=T) + pslPeriodClim2 <- InsertDim(pslPeriodClim, 1, n.days.period[[p]]) + pslPeriodAnom <- pslPeriod - pslPeriodClim2 + + rm(pslPeriod, pslPeriodClim, pslPeriodClim2) + gc() + + pslwr1 <- pslPeriodAnom[wr1,,] + pslwr2 <- pslPeriodAnom[wr2,,] + pslwr3 <- pslPeriodAnom[wr3,,] + pslwr4 <- pslPeriodAnom[wr4,,] + rm(pslPeriodAnom) + gc() + + pslwr1mean <- apply(pslwr1,c(2,3),mean,na.rm=T) + pslwr2mean <- apply(pslwr2,c(2,3),mean,na.rm=T) + pslwr3mean <- apply(pslwr3,c(2,3),mean,na.rm=T) + pslwr4mean <- apply(pslwr4,c(2,3),mean,na.rm=T) + rm(pslwr1,pslwr2,pslwr3,pslwr4) + } + + if(unlist(fields) == unlist(ECMWF_S4)){ + # in this case, psleuFull is already in anomalies: + pslmat <- psleuFull + dim(pslmat) <- c(1, n.members, n.leadtimes*n.years, n.pos.lat,n.pos.lon) + pslwr1 <- pslmat[1,,wr1,,] + pslwr2 <- pslmat[1,,wr2,,] + pslwr3 <- pslmat[1,,wr3,,] + pslwr4 <- pslmat[1,,wr4,,] + rm(pslmat) + gc() + + pslwr1mean <- apply(pslwr1,c(3,4),mean,na.rm=T) + pslwr2mean <- apply(pslwr2,c(3,4),mean,na.rm=T) + pslwr3mean <- apply(pslwr3,c(3,4),mean,na.rm=T) + pslwr4mean <- apply(pslwr4,c(3,4),mean,na.rm=T) + rm(pslwr1,pslwr2,pslwr3,pslwr4) + gc() + } + + # similar selection, but for var instead of psl: + if(unlist(fields) == unlist(rean)){ + varPeriod <- vareuFull[days.period[[p]],,] # select only var data during the chosen period + + varPeriodClim <- apply(varPeriod, c(2,3), mean, na.rm=T) + varPeriodClim2 <- InsertDim(varPeriodClim, 1, n.days.period[[p]]) + varPeriodAnom <- varPeriod - varPeriodClim2 + rm(varPeriod, varPeriodClim, varPeriodClim2) + gc() + + #PlotEquiMap2(varPeriodClim[,EU], lon[EU], lat, filled.continents = FALSE, cols=my.cols.var, intxlon=10, intylat=10, cex.lab=1.5) # plot the climatology of the period + varPeriodAnom1 <- varPeriodAnom[wr1,,] + varPeriodAnom2 <- varPeriodAnom[wr2,,] + varPeriodAnom3 <- varPeriodAnom[wr3,,] + varPeriodAnom4 <- varPeriodAnom[wr4,,] + + varPeriodAnom1mean <- apply(varPeriodAnom1,c(2,3),mean,na.rm=T) + varPeriodAnom2mean <- apply(varPeriodAnom2,c(2,3),mean,na.rm=T) + varPeriodAnom3mean <- apply(varPeriodAnom3,c(2,3),mean,na.rm=T) + varPeriodAnom4mean <- apply(varPeriodAnom4,c(2,3),mean,na.rm=T) + + varPeriodAnomBoth1 <- abind(varPeriodAnom, varPeriodAnom1, along = 1) + pvalue1 <- apply(varPeriodAnomBoth1, c(2,3), function(x) t.test(x[1:n.days.period[[p]]],x[(n.days.period[[p]]+1):length(x)])$p.value) + rm(varPeriodAnomBoth1, varPeriodAnom1) + gc() + + varPeriodAnomBoth2 <- abind(varPeriodAnom, varPeriodAnom2, along = 1) + pvalue2 <- apply(varPeriodAnomBoth2, c(2,3), function(x) t.test(x[1:n.days.period[[p]]],x[(n.days.period[[p]]+1):length(x)])$p.value) + rm(varPeriodAnomBoth2, varPeriodAnom2) + gc() + + varPeriodAnomBoth3 <- abind(varPeriodAnom, varPeriodAnom3, along = 1) + pvalue3 <- apply(varPeriodAnomBoth3, c(2,3), function(x) t.test(x[1:n.days.period[[p]]],x[(n.days.period[[p]]+1):length(x)])$p.value) + rm(varPeriodAnomBoth3, varPeriodAnom3) + gc() + + varPeriodAnomBoth4 <- abind(varPeriodAnom, varPeriodAnom4, along = 1) + pvalue4 <- apply(varPeriodAnomBoth4, c(2,3), function(x) t.test(x[1:n.days.period[[p]]],x[(n.days.period[[p]]+1):length(x)])$p.value) + rm(varPeriodAnomBoth4, varPeriodAnom4) + + rm(varPeriodAnom) + gc() + + # save all the data necessary to redraw the graphs when we know the right regime: + save(workdir,rean.name,fields.name,var.num,var.name,var.name.full,var.unit,year.start,year.end,p,WR.period,lon,lat,pslwr1mean,pslwr2mean,pslwr3mean,pslwr4mean, varPeriodAnom1mean, varPeriodAnom2mean, varPeriodAnom3mean,varPeriodAnom4mean,wr1y,wr2y,wr3y,wr4y,pvalue1,pvalue2,pvalue3,pvalue4,file=paste0(workdir,"/",fields.name,"_",var.name[var.num],"_",my.period[p],"_mapdata.RData")) + + rm(pvalue1,pvalue2,pvalue3,pvalue4) + rm(pslwr1mean,pslwr2mean,pslwr3mean,pslwr4mean) + rm(varPeriodAnom1mean,varPeriodAnom2mean,varPeriodAnom3mean,varPeriodAnom4mean) + gc() + } + + if(unlist(fields) == unlist(ECMWF_S4)){ + save(workdir,rean.name,fields.name,var.num,var.name,var.name.full,var.unit,year.start,year.end,p,WR.period,lon,lat,pslwr1mean,pslwr2mean,pslwr3mean,pslwr4mean,wr1y,wr2y,wr3y,wr4y,file=paste0(workdir,"/",fields.name,"_",var.name[var.num],"_",my.period[p],"_mapdata.RData")) + } +} # close the for loop on 'p' + + +# run it twice: once, with ordering <- FALSE to look only at the cartography and find visually the right regime names of the four clusters; +# and the second time, to save the ordered cartography: +ordering <- FALSE # If TRUE, order the clusters following the regimes listed in the 'orden' vector (after you manually insert the names). +save.names <- TRUE # if TRUE, also save the cluster names (i.e: the association between clusters and weather regimes); if FALSE, the cluster names are loaded from the file +as.pdf <- TRUE # FALSE: save results as one .png file for each season, TRUE: save only 1 .pdf file with all seasons + +# position of long values of Europe only (without the Atlantic Sea and America): +#if(fields.name == "ERA-Interim") EU <- c(1:which(lon >= lon.max)[1], (length(lon)-30):length(lon)) # restrict area to continental europe only +EU <- c(1:which(lon >= lon.max)[1], (which(lon >= 337)[1]:length(lon))) # restrict area to continental europe only + +# choose an order to give to the cartography, from top to bottom: +orden <- c("NAO+","NAO-","Blocking","Atl.Ridge") + +regime1.name <- orden[1] +regime2.name <- orden[2] +regime3.name <- orden[3] +regime4.name <- orden[4] + +if(save.names){cluster1.name.period <- cluster2.name.period <- cluster3.name.period <- cluster4.name.period <- c()} + +# breaks and colors of the geopotential fields: +if(psl == "g500"){ + #my.brks <- c(-300,-199:200,300) # % Mean anomaly of a WR + my.brks <- c(-300,seq(-200,200,10),300) # % Mean anomaly of a WR + my.brks2 <- c(-300,seq(-200,200,10),300) # % Mean anomaly of a WR for the contour lines + #my.cols <- colorRampPalette((brewer.pal(9,"PuBu")))(length(my.brks)-1) + values.to.plot <- c(-200, -100, 0, 100, 200) # values we want to show in the labels +} + +if(psl == "psl"){ + my.brks <- c(seq(-19,-1,2),0,seq(1,19,2)) # % Mean anomaly of a WR + # my.brks <- c(seq(-19,-1,2),0,seq(1,19,2)) # % Mean anomaly of a WR + + my.brks2 <- my.brks #c(seq(-20,20,2)) # % Mean anomaly of a WR for the contour lines + values.to.plot <- my.brks #c(-17,-15,-13,-11,-9,-7,-5,-3,-1,0,1,3,5,7,9,11,13,15,17) +} + +my.cols <- colorRampPalette(rev(brewer.pal(11,"RdBu")))(length(my.brks)-1) # blue--white--red colors +my.cols[floor(length(my.cols)/2)] <- my.cols[floor(length(my.cols)/2)+1] <- "white" + +# breaks and colors of the impact maps (valid both for Wind speed anomalies and Temperature anomalies): +#my.brks.var <- c(-20,seq(-10,-3,1),seq(-2.9,2.9,0.1),seq(3,10,1),20) # % Mean anomaly of a WR +#my.brks.var <- c(-20,-2,-1.5,-1,-0.5,-0.2,0.2,0.5,1,1.5,2,20) # % Mean anomaly of a WR +#my.brks.var <- c(-20,seq(-3,3,0.5),20) # % Mean anomaly of a WR +if(var.name[var.num] == "sfcWind") { + my.brks.var <- seq(-3,3,0.5) + values.to.plot2 <- c(-3,-2,-1,0,1,2,3) +} +if(var.name[var.num] == "tas") { + my.brks.var <- seq(-5,5,1) + values.to.plot2 <- my.brks.var #c(-5,-3,-1,0,1,3,5) +} + +#my.cols <- colorRampPalette(as.character(read.csv("/home/Earth/vtorralb/rgbhex.csv",header=F)[,1]))(length(my.brks)-1) # blue--yellow-red colors +my.cols.var <- colorRampPalette(rev(brewer.pal(11,"RdBu")))(length(my.brks.var)-1) # blue--white--red colors +#my.cols.var[floor(length(my.cols.var)/2)] <- my.cols.var[floor(length(my.cols.var)/2)+1] <- "white" + +if(as.pdf)(pdf(file=paste0(workdir,"/",fields.name,"_",var.name[var.num],".pdf"),width=40, height=60)) + +for(p in WR.period){ + load(file=paste0(workdir,"/",fields.name,"_",var.name[var.num],"_",my.period[p],"_mapdata.RData")) + + if(save.names){ + if(p == 1){ # January + cluster2.name="NAO+" + cluster1.name="NAO-" + cluster4.name="Blocking" + cluster3.name="Atl.Ridge" + } + if(p == 2){ # February + cluster3.name="NAO+" + cluster2.name="NAO-" + cluster4.name="Blocking" + cluster1.name="Atl.Ridge" + } + if(p == 3){ # March + cluster3.name="NAO+" + cluster2.name="NAO-" + cluster4.name="Blocking" + cluster1.name="Atl.Ridge" + } + if(p == 4){ # April + cluster2.name="NAO+" + cluster1.name="NAO-" + cluster3.name="Blocking" + cluster4.name="Atl.Ridge" + } + if(p == 5){ # May + cluster4.name="NAO+" + cluster2.name="NAO-" + cluster3.name="Blocking" + cluster1.name="Atl.Ridge" + } + if(p == 6){ # June + cluster4.name="NAO+" + cluster1.name="NAO-" + cluster2.name="Blocking" + cluster3.name="Atl.Ridge" + } + if(p == 7){ # July + cluster4.name="NAO+" + cluster2.name="NAO-" + cluster1.name="Blocking" + cluster3.name="Atl.Ridge" + } + if(p == 8){ # August + cluster2.name="NAO+" + cluster4.name="NAO-" + cluster3.name="Blocking" + cluster1.name="Atl.Ridge" + } + if(p == 9){ # September + cluster4.name="NAO+" + cluster3.name="NAO-" + cluster1.name="Blocking" + cluster2.name="Atl.Ridge" + } + if(p == 10){ # October + cluster1.name="NAO+" + cluster4.name="NAO-" + cluster2.name="Blocking" + cluster3.name="Atl.Ridge" + } + if(p == 11){ # November + cluster2.name="NAO+" + cluster3.name="NAO-" + cluster1.name="Blocking" + cluster4.name="Atl.Ridge" + } + if(p == 12){ # December + cluster2.name="NAO+" + cluster4.name="NAO-" + cluster1.name="Blocking" + cluster3.name="Atl.Ridge" + } + if(p == 13){ # Winter + cluster3.name="NAO+" + cluster4.name="NAO-" + cluster1.name="Blocking" + cluster2.name="Atl.Ridge" + } + if(p == 14){ # Spring + cluster1.name="NAO+" + cluster3.name="NAO-" + cluster2.name="Blocking" + cluster4.name="Atl.Ridge" + } + if(p == 15){ # Summer + cluster2.name="NAO+" + cluster3.name="NAO-" + cluster4.name="Blocking" + cluster1.name="Atl.Ridge" + } + if(p == 16){ # Autumn + cluster4.name="NAO+" + cluster1.name="NAO-" + cluster2.name="Blocking" + cluster3.name="Atl.Ridge" + } + + cluster1.name.period[p] <- cluster1.name + cluster2.name.period[p] <- cluster2.name + cluster3.name.period[p] <- cluster3.name + cluster4.name.period[p] <- cluster4.name + + save(cluster1.name.period, cluster2.name.period, cluster3.name.period, cluster4.name.period, file=paste0(workdir,"/",fields.name,"_ClusterNames.RData")) + + } else { # in this case, we load the cluster names from the file already saved: + load(paste0(workdir,"/",fields.name,"_ClusterNames.RData")) + cluster1.name <- cluster1.name.period[p] + cluster2.name <- cluster2.name.period[p] + cluster3.name <- cluster3.name.period[p] + cluster4.name <- cluster4.name.period[p] + } + + # assign to each cluster its regime: + if(ordering == FALSE){ + cluster1 <- 1; cluster2 <- 2; cluster3 <- 3; cluster4 <- 4 + } else { + cluster1 <- which(orden == cluster1.name) + cluster2 <- which(orden == cluster2.name) + cluster3 <- which(orden == cluster3.name) + cluster4 <- which(orden == cluster4.name) + } + + assign(paste0("map",cluster1), pslwr1mean) + assign(paste0("map",cluster2), pslwr2mean) + assign(paste0("map",cluster3), pslwr3mean) + assign(paste0("map",cluster4), pslwr4mean) + + if(unlist(fields) == unlist(rean)){ + assign(paste0("imp",cluster1), varPeriodAnom1mean) + assign(paste0("imp",cluster2), varPeriodAnom2mean) + assign(paste0("imp",cluster3), varPeriodAnom3mean) + assign(paste0("imp",cluster4), varPeriodAnom4mean) + + assign(paste0("sig",cluster1), pvalue1) + assign(paste0("sig",cluster2), pvalue2) + assign(paste0("sig",cluster3), pvalue3) + assign(paste0("sig",cluster4), pvalue4) + } + + assign(paste0("fre",cluster1), wr1y) + assign(paste0("fre",cluster2), wr2y) + assign(paste0("fre",cluster3), wr3y) + assign(paste0("fre",cluster4), wr4y) + + # Visualize the composition with the 3 graphs for all the 4 regimes: its pressure fields, its impact on var and its frequency series: + if(!as.pdf) png(filename=paste0(workdir,"/",fields.name,"_",var.name[var.num],"_",my.period[p],".png"),width=3000,height=3700) + + plot.new() + + # Sheet title: + par(fig=c(0, 1, 0.94, 0.98), new=TRUE) + mtext(paste0("Weather Regimes for ",my.period[p]," season (",year.start,"-",year.end,"). Source: ",fields.name), cex=6, font=2) + + # Centroid maps: + map.xpos <- 0 + map.width <- 0.46 + par(fig=c(map.xpos + 0.003, map.xpos + map.width - 0.003, 0.745, 0.925), new=TRUE) + PlotEquiMap2(rescale(map1,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map1), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, xlabel.dist=1.5, contours.lty="F1FF1F") + par(fig=c(map.xpos, map.xpos + map.width, 0.51, 0.69), new=TRUE) + PlotEquiMap2(rescale(map2,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map2), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F") + par(fig=c(map.xpos, map.xpos + map.width, 0.275, 0.455), new=TRUE) + PlotEquiMap2(rescale(map3,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map3), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F") + par(fig=c(map.xpos, map.xpos + map.width, 0.04, 0.22), new=TRUE) + PlotEquiMap2(rescale(map4,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map4), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F") + + # Title Centroid Maps: + map.title.xpos <- 0.76 + map.title.width <- 0.2 + par(fig=c(map.title.xpos, map.title.xpos + map.title.width, 0.728, 0.729), new=TRUE) + mtext("year", cex=3) + par(fig=c(map.title.xpos, map.title.xpos + map.title.width, 0.494, 0.495), new=TRUE) + mtext("year", cex=3) + par(fig=c(map.title.xpos, map.title.xpos + map.title.width, 0.260, 0.261), new=TRUE) + mtext("year", cex=3) + par(fig=c(map.title.xpos, map.title.xpos + map.title.width, 0.026, 0.027), new=TRUE) + mtext("year", cex=3) + + # Impact maps: + if(unlist(fields) == unlist(rean)){ + impact.xpos <- 0.47 + impact.width <- 0.26 + par(fig=c(impact.xpos, impact.xpos + impact.width, 0.745, 0.925), new=TRUE) + PlotEquiMap2(rescale(imp1[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=t(sig1[,EU] < 0.05), cex.lab=1.5) + par(fig=c(impact.xpos, impact.xpos + impact.width, 0.51, 0.69), new=TRUE) + PlotEquiMap2(rescale(imp2[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=t(sig2[,EU] < 0.05), cex.lab=1.5) + par(fig=c(impact.xpos, impact.xpos + impact.width, 0.275, 0.455), new=TRUE) + PlotEquiMap2(rescale(imp3[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=t(sig3[,EU] < 0.05), cex.lab=1.5) + par(fig=c(impact.xpos, impact.xpos + impact.width, 0.04, 0.22), new=TRUE) + PlotEquiMap2(rescale(imp4[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=t(sig4[,EU] < 0.05), cex.lab=1.5) + } + + # Frequency plots: + barplot.xpos <- 0.75 + barplot.width <- 0.25 + par(fig=c(barplot.xpos, barplot.xpos + barplot.width, 0.745, 0.915), new=TRUE) + barplot.freq(100*fre1, year.start, year.end, cex.y=2, cex.x=2, freq.max=60, ylab="", mgp=c(3,1.5,0)) + par(fig=c(barplot.xpos, barplot.xpos + barplot.width,0.510,0.680), new=TRUE) + barplot.freq(100*fre2, year.start, year.end, cex.y=2, cex.x=2, freq.max=60, ylab="", mgp=c(3,1.5,0)) + par(fig=c(barplot.xpos, barplot.xpos + barplot.width,0.275,0.445), new=TRUE) + barplot.freq(100*fre3, year.start, year.end, cex.y=2, cex.x=2, freq.max=60, ylab="", mgp=c(3,1.5,0)) + par(fig=c(barplot.xpos, barplot.xpos + barplot.width,0.040,0.210), new=TRUE) + barplot.freq(100*fre4, year.start, year.end, cex.y=2, cex.x=2, freq.max=60, ylab="", mgp=c(3,1.5,0)) + + # % on Frequency plots: + symbol.xpos <- 0.735 + symbol.width <- 0.01 + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.83, 0.84), new=TRUE) + mtext("%", cex=3.3) + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.60, 0.61), new=TRUE) + mtext("%", cex=3.3) + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.36, 0.37), new=TRUE) + mtext("%", cex=3.3) + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.13, 0.14), new=TRUE) + mtext("%", cex=3.3) + + # Title 1: + title1.xpos <- 0.02 + title1.width <- 0.4 + par(fig=c(title1.xpos, title1.xpos + title1.width, 0.930, 0.935), new=TRUE) + mtext(paste0(regime1.name,": ", psl.name, " Anomaly"), font=2, cex=4) + par(fig=c(title1.xpos, title1.xpos + title1.width, 0.695, 0.700), new=TRUE) + mtext(paste0(regime2.name,": ", psl.name, " Anomaly"), font=2, cex=4) + par(fig=c(title1.xpos, title1.xpos + title1.width, 0.460, 0.465), new=TRUE) + mtext(paste0(regime3.name,": ", psl.name, " Anomaly"), font=2, cex=4) + par(fig=c(title1.xpos, title1.xpos + title1.width, 0.225, 0.230), new=TRUE) + mtext(paste0(regime4.name,": ", psl.name, " Anomaly"), font=2, cex=4) + + # Title 2: + title2.xpos <- 0.42 + title2.width <- 0.35 + par(fig=c(title2.xpos, title2.xpos + title2.width, 0.930, 0.935), new=TRUE) + mtext(paste0(regime1.name, ": Impact on ", var.name.full[var.num]), font=2, cex=4) + par(fig=c(title2.xpos, title2.xpos + title2.width, 0.695, 0.700), new=TRUE) + mtext(paste0(regime2.name, ": Impact on ", var.name.full[var.num]), font=2, cex=4) + par(fig=c(title2.xpos, title2.xpos + title2.width, 0.460, 0.465), new=TRUE) + mtext(paste0(regime3.name, ": Impact on ", var.name.full[var.num]), font=2, cex=4) + par(fig=c(title2.xpos, title2.xpos + title2.width, 0.225, 0.230), new=TRUE) + mtext(paste0(regime4.name, ": Impact on ", var.name.full[var.num]), font=2, cex=4) + + # Title 3: + title3.xpos <- 0.75 + title3.width <-0.25 + par(fig=c(title3.xpos, title3.xpos + title3.width, 0.930, 0.935), new=TRUE) + mtext(paste0(regime1.name, ": Frequency (", round(100*mean(fre1),1), "%)"), font=2, cex=4) + par(fig=c(title3.xpos, title3.xpos + title3.width, 0.695, 0.700), new=TRUE) + mtext(paste0(regime2.name, ": Frequency (", round(100*mean(fre2),1), "%)"), font=2, cex=4) + par(fig=c(title3.xpos, title3.xpos + title3.width, 0.460, 0.465), new=TRUE) + mtext(paste0(regime3.name, ": Frequency (", round(100*mean(fre3),1), "%)"), font=2, cex=4) + par(fig=c(title3.xpos, title3.xpos + title3.width, 0.225, 0.230), new=TRUE) + mtext(paste0(regime4.name, ": Frequency (", round(100*mean(fre4),1), "%)"), font=2, cex=4) + + # Legend 1: + legend1.xpos <- 0.01 + legend1.width <- 0.44 + legend1.cex <- 1.8 + my.subset <- match(values.to.plot, my.brks) + + par(fig=c(legend1.xpos, legend1.xpos + legend1.width, 0.719, 0.744), new=TRUE) # syntax fig=c(xmin, xmax, ymin, ymax) + ColorBar3(my.brks, cols=my.cols, vert=FALSE, cex=legend1.cex, subset=my.subset) + if(psl=="g500") {mtext(side=4," m", cex=2.5)} else {mtext(side=4," hPa", cex=legend1.cex)} + par(fig=c(legend1.xpos, legend1.xpos + legend1.width, 0.485, 0.508), new=TRUE) + ColorBar3(my.brks, cols=my.cols, vert=FALSE, cex=legend1.cex, subset=my.subset) + if(psl=="g500") {mtext(side=4," m", cex=2.5)} else {mtext(side=4," hPa", cex=legend1.cex)} + par(fig=c(legend1.xpos, legend1.xpos + legend1.width, 0.250, 0.273), new=TRUE) + ColorBar3(my.brks, cols=my.cols, vert=FALSE, cex=legend1.cex, subset=my.subset) + if(psl=="g500") {mtext(side=4," m", cex=2.5)} else {mtext(side=4," hPa", cex=legend1.cex) + par(fig=c(legend1.xpos, legend1.xpos + legend1.width, 0.015, 0.038), new=TRUE) + ColorBar3(my.brks, cols=my.cols, vert=FALSE, cex=legend1.cex, subset=my.subset) + if(psl=="g500") {mtext(side=4," m", cex=2.5)} else {mtext(side=4," hPa", cex=legend1.cex)} + + # Legend 2: + legend2.xpos <- 0.48 + legend2.width <- 0.25 + legend2.cex <- 1.8 + my.subset2 <- match(values.to.plot2, my.brks.var) + + par(fig=c(legend2.xpos, legend2.xpos + legend2.width, 0.719 + 0.001, 0.744), new=TRUE) + ColorBar3(my.brks.var, cols=my.cols.var, vert=FALSE, cex=legend2.cex, subset=my.subset2) + mtext(side=4,paste0(" ",var.unit[var.num]), cex=legend2.cex) + par(fig=c(legend2.xpos, legend2.xpos + legend2.width, 0.485, 0.508), new=TRUE) + ColorBar3(my.brks.var, cols=my.cols.var, vert=FALSE, cex=legend2.cex, subset=my.subset2) + mtext(side=4,paste0(" ",var.unit[var.num]), cex=legend2.cex) + par(fig=c(legend2.xpos, legend2.xpos + legend2.width, 0.250, 0.273), new=TRUE) + ColorBar3(my.brks.var, cols=my.cols.var, vert=FALSE, cex=legend2.cex, subset=my.subset2)} + mtext(side=4,paste0(" ",var.unit[var.num]), cex=legend2.cex) + par(fig=c(legend2.xpos, legend2.xpos + legend2.width, 0.015, 0.038), new=TRUE) + ColorBar3(my.brks.var, cols=my.cols.var, vert=FALSE, cex=legend2.cex, subset=my.subset2) + mtext(side=4,paste0(" ",var.unit[var.num]), cex=legend2.cex) + + if(!as.pdf) dev.off() # for saving 4 png + +} # close for loop on 'p' + +if(as.pdf) dev.off() # for saving a single pdf with all seasons + + + + + + + + + + + diff --git a/old/weather_regimes_v23.R~ b/old/weather_regimes_v23.R~ new file mode 100644 index 0000000000000000000000000000000000000000..bf2bfa228bc6eb138e1ed4d8dc89ab90ba565a3b --- /dev/null +++ b/old/weather_regimes_v23.R~ @@ -0,0 +1,854 @@ +library(s2dverification) # for the function Load() +library(abind) +library(Kendall) +source('/home/Earth/ncortesi/Downloads/scripts/Rfunctions.R') # for the calendar functions +workdir <- "/scratch/Earth/ncortesi/RESILIENCE/Regimes" # working dir with the input files with the WT classifications for each MSLP grid point + +# available reanalysis for the geopotential (g500) or the geopotential height (z500 = g500/9.81) and for var data: +ERAint <- list(path = '/esnas/reconstructions/ecmwf/eraint/$STORE_FREQ$_mean/$VAR_NAME$_f6h/$VAR_NAME$_$YEAR$$MONTH$.nc') +ERAint.name <- "ERA-Interim" + +JRA55 <- list(path = '/esnas/reconstructions/jma/jra-55/$STORE_FREQ$_mean/$VAR_NAME$_f6h/$VAR_NAME$_$YEAR$$MONTH$.nc') +JRA55.name <- "JRA-55" + +rean <- ERAint #JRA55 #ERAint # choose one of the two above reanalysis from where to load the input psl data + +# available forecast systems for the psl and var data: +#ECMWF_S4 <- list(path = paste0('/esnas/exp/ECMWF/seasonal/0001/s004/m001/$STORE_FREQ$_mean/$VAR_NAME$_f6h/$VAR_NAME$_$START_DATE$.nc')) +ECMWF_S4 <- list(path = paste0('/esnas/exp/ECMWF/seasonal/0001/s004/m001/$STORE_FREQ$_mean/psl_f6h/psl_$START_DATE$.nc')) +ECMWF_S4.name <- "ECMWF-S4" + +ECMWF_monthly <- list(path = paste0('/esnas/exp/ECMWF/monthly/ensforhc/6hourly/$VAR_NAME$/2014$MONTH$$DAY$00/$VAR_NAME$_$START_DATE$00.nc')) +ECMWF_monthly.name <- "ECMWF-Monthly" + +forecast <- ECMWF_S4 + +fields <- forecast #rean #forecast # put 'rean' to load pressure fields and var from reanalisis, put 'forecast' to load them from forecast systems + +psl <- "psl" #"g500" # pressure variable to use from the chosen reanalysis +psl.name <- "MSLP" # "Z500" # the name to show in the maps + +year.start <- 1984 #1981 #1994 #1979 #1981 +year.end <- 1990 #2013 #2010 + +member.name <- "ensemble" # name of the dimension with the ensemble members inside the netCDF files of S4 +#leadtime <- 1:7 #5:11 #1 # if fields=forecast, set the forecast time (in days) you want to compute the weather regimes. +#startdate.shift <- 1 # if leadtime=5:11, it's better to shift all startdates of the season 1 week in the past, to fit the exact season of obs.data + # if leadtime=12:18, it's better to shift all startdates of the season 2 weeks in the past, and so on. +forecast.year <- year.end+1 # if fields=forecast, set the forecast year (it is usually the year after year.end) +mes=1 # if fields=forecast, set the month of the first startdate of the forecast year +day=2 # if fields=forecast, set the day of the first startdate of the forecast year + +lat.weighting=FALSE # set it to true to weight psl data on latitude before applying the cluster analysis +sequences=FALSE # set it to TRUE if you want to select only days beloning to sequences of at least 5 consecutive days belonging to the same regime. + +PCA <- FALSE # se to TRUE if you want to apply the PCA before the k-means cluster analysis, FALSE otherwise +variance.explained <- 0.8 # the user can set here the minimum % of variance explained by the PCs (typically 0.8 which is equal to 80%) + +# Coordinates of the box enclosing the study region (the Northern Atlantic in this case): +lat.max <- 81 #81 #80 #70 +lat.min <- 27 #30 #20 #30 +lon.max <- 45 #36 #30 #40 +lon.min <- 274.5 #274.5 #270 #280 # put a positive number here because the geopotential has only positive values of longitud! + +############################################################################################################################# +#ECMWF_monthly <- list(path = paste0('/esnas/exp/ECMWF/monthly/ensforhc/6hourly/psl/2014010200/1994_010200.nc')) +#ECMWF_monthly <- list(path = paste0('/esnas/exp/ECMWF/monthly/ensforhc/6hourly/$VAR_NAME$/2014$MONTH$$DAY$00/$VAR_NAME$_$START_DATE$00.nc')) +rean.name <- unname(ifelse(unlist(rean) == unlist(ERAint), ERAint.name, JRA55.name)) +#forecast.name <- ECMWF_monthly.name +forecast.name <- unname(ifelse(unlist(forecast) == unlist(ECMWF_S4), ECMWF_S4.name, ECMWF_monthly.name)) +fields.name <- unname(ifelse(unlist(fields) == unlist(rean), rean.name, forecast.name)) +n.years <- year.end - year.start + 1 + +var.data <- fields # dataset from which to load the input var data (reanalysis or forecasts) +var.data.name <- fields.name #ECMWF_monthly.name # ERAint.name + +if(lat.min >= lat.max) stop("lat.min cannot be greater than lat.max") + +days.period <- n.days.period <- period.length <- list() +for (p in 1:17){ + days.period[[p]] <- NA + for(y in year.start:year.end) days.period[[p]] <- c(days.period[[p]], n.days.in.a.future.year(year.start, y) + pos.period(y,p)) + days.period[[p]] <- days.period[[p]][-1] # remove the NA at the begin that was introduced only to be able to execute the above command + # number of days belonging to that period from year.start to year.end: + n.days.period[[p]] <- length(days.period[[p]]) + # Number of days belonging to that period in a single year: + period.length[[p]] <- n.days.in.a.period(p,1999) #+ ifelse(period==13 | period==2, 1, 0) # using year 1999 introduce a small error in winter season length because of bisestile years +} + +# load only 1 day of pressure data to detect the minimum and maximum lat and lon values which identify only the North Atlantic area: +if(unlist(fields) == unlist(rean)) domain <- Load(var = psl, exp = NULL, obs = list(fields), '19990101', storefreq = 'daily', leadtimemax = 1, output = 'lonlat', nprocs=1) +if(unlist(fields) == unlist(ECMWF_S4)) domain <- Load(var = "psl", exp = list(fields), sdates=paste0(year.start,'0101'), storefreq = 'daily', dimnames=list(member=member.name), leadtimemax=1, nmember=1, output = 'lonlat', nprocs=1) +if(unlist(fields) == unlist(ECMWF_monthly)) domain <- Load(var = psl, exp = NULL, obs = list(fields), paste0(year.start,'0102'), storefreq = 'daily', leadtimemax = 1, output = 'lonlat', nprocs=1) + +n.lat <- length(domain$lat) +n.lon <- length(domain$lon) + +pos.lat.max <- which(lat.max - round(domain$lat,4) >= 0)[1] # select the first latitude of the domain below the maximum latitude +pos.lat.min <- tail(which(round(domain$lat,4) - lat.min >= 0),1) # select the last latitude of the domain over the minumum latitude +pos.lon.max <- tail(which(lon.max - round(domain$lon,4) >= 0),1) +pos.lon.min <- which(round(domain$lon,4) - lon.min >= 0)[1] + +pos.lat <- if(pos.lat.min < pos.lat.max) {pos.lat.min:pos.lat.max} else {pos.lat.max:pos.lat.min} +pos.lon <- c(1:pos.lon.max,pos.lon.min:length(domain$lon)) # beware that it only consider the case where the study area cross the meridian of Greenwhich! +n.pos.lat <- length(pos.lat) +n.pos.lon <- length(pos.lon) + +lat <- domain$lat[pos.lat] # lat values of chosen area only (it is smaller than the whole spatial domain loaded by the data) +lon <- domain$lon[pos.lon] # lon values of chosen area only + +lat.min.area <- domain$lat[pos.lat.min] # min and max lat/lon of the chosen area only (same as lat.max, but its values correspond to actual values in the lat vector) +lat.max.area <- domain$lat[pos.lat.max] +lon.min.area <- domain$lon[pos.lon.min] +lon.max.area <- domain$lon[pos.lon.max] + +#rm(domain) + +# Load psl data: +if(unlist(fields) == unlist(rean)){ # Load daily psl data in the reanalysis case: + psleuFull <-array(NA,c(n.days.in.a.yearly.period(year.start,year.end), n.pos.lat, n.pos.lon)) + + for (y in year.start:year.end){ + var <- Load(var = psl, exp = NULL, obs = list(fields), paste0(y,'0101'), storefreq = 'daily', leadtimemax = n.days.in.a.year(y), output = 'lonlat', + latmin = lat.min, latmax = lat.max, lonmin = lon.min, lonmax = lon.max) + psleuFull[seq.days.in.a.future.year(year.start, y),,] <- var$obs + rm(var) + gc() + } +} + +if(unlist(fields) == unlist(ECMWF_S4)) # in this case, we can load only the data for 1 month at time (since each month needs ~3 GB of memory) + start.month <- 11 + lead.month <- 1 + + n.members <- 15 + + if(start.month <= 9) start.month.char <- paste0("0",as.character(start.month)) + chosen.month <- start.month + lead.month # find which month we want to load data + if(chosen.month > 12) chosen.month <- chosen.month - 12 + leadtime.min <- 1 + n.days.in.a.monthly.period(start.month, chosen.month, year.start) - n.days.in.a.month(chosen.month, year.start) + leadtime.max <- leadtime.min + n.days.in.a.month(chosen.month, year.start) - 1 + n.leadtimes <- leadtime.max - leadtime.min + 1 + print(paste(leadtime.min, leadtime.max)) + + psleuFull <- Load(var = "psl", exp = list(fields), obs = NULL, sdates=paste0(year.start:year.end, start.month.char,'01'), storefreq = 'daily', dimnames=list(member=member.name), nmember=n.members, leadtimemin=leadtime.min, leadtimemax=leadtime.max, output = 'lonlat', latmin = lat.min, latmax = lat.max, lonmin = lon.min, lonmax = lon.max) + + # immediatly convert psl in daily anomalies to remove the drift!!! + pslPeriodClim <- apply(psleuFull$mod, c(1,4,5,6), mean, na.rm=T) + pslPeriodClim2 <- InsertDim(InsertDim(pslPeriodClim, 2, n.years), 2, n.members) + rm(pslPeriodClim) + gc() + + psleuFull <- psleuFull$mod - pslPeriodClim2 + rm(pslPeriodClim2) + gc() +} + +if(unlist(fields) == unlist(ECMWF_monthly)){ + # Load 6-hourly psl data in the forecast case: + psleuFull <-array(NA, c(n.sdates*n.years, n.leadtimes, n.pos.lat, n.pos.lon)) # daily order: 19940102 19950102 ... 20130102 19940109 19950109 ... 20130109 ... + n.leadtimes <- length(leadtime) + sdates <- weekly.seq(forecast.year,mes,day) + n.sdates <- length(sdates) + + for (startdate in 1:n.sdates){ + for(lday in leadtime){ + var <- Load(var = psl, exp = NULL, obs = list(fields), paste0(year.start:year.end, substr(sdates[startdate],5,6), substr(sdates[startdate],7,8) ), storefreq = 'daily', leadtimemin = lday*4-3, leadtimemax = lday*4, output = 'lonlat', latmin = lat.min, latmax = lat.max, lonmin = lon.min, lonmax = lon.max) + + mean.lday <- apply(var$obs, c(3,5,6), mean, na.rm=T) + rm(var) + gc() + + psleuFull[(1+(startdate-1)*n.years):(startdate*n.years), lday-leadtime[1]+1,,] <- mean.lday + rm(mean.lday) + gc() + } + } +} + + +if(psl=="g500") psleuFull <- psleuFull/9.81 # convert geopotential to geopotential height (in m) +if(psl=="psl") psleuFull <- psleuFull/100 # convert MSLP in Pascal to MSLP in hPa +gc() + +save.image(file=paste0(workdir,"/weather_regimes_",fields.name,"_",year.start,"-",year.end,".RData")) +load(file=paste0(workdir,"/weather_regimes_",fields.name,"_",year.start,"-",year.end,".RData")) + +# compute the cluster analysis: +my.PCA <- list() +my.cluster <- list() +tot.variance <- list() +n.pcs <- my.cluster <- c() + +WR.period <- 1 #1:12 #13:16 # 1-12: Jan-Dec; 13-16: Winter-Spring-Summer-Autumn + +for(p in WR.period){ + if(unlist(fields) == unlist(rean)){ pslPeriod <- psleuFull[days.period[[p]],,] # select only days in the chosen period (i.e: winter) + + # in this case of S4 we don't need to select anything because we already loaded only 1 month of data + + if(unlist(fields) == unlist(ECMWF_monthly)){ + my.startdates.period <- months.period(forecast.year,mes,day,p) # get which startdates belong to the chosen period + + days.period <- list() # get which days inside psleuFull belong to the chosen period + for(startdate in my.startdates.period){ + days.period[[p]] <- c(days.period[[p]], (1+(startdate-1)*n.years):(startdate*n.years)) + } + } + + # weight the pressure fields based on latitude (apply it to anomalies, not to absolute values!!!): + if(lat.weighting){ + lat.weighted <- cos(lat*pi/180)^0.5 + lat.weighted.array <- InsertDim(InsertDim(lat.weighted,2,n.pos.lon), 1, n.days.period[[p]]) + pslmat <- pslPeriod * lat.weighted.array + rm(lat.weighted.array) + gc() + } + + if(unlist(fields) == unlist(rean)){ + pslmat <- pslPeriod + dim(pslmat) <- c(n.days.period[[p]], n.pos.lat*n.pos.lon) # convert array in a matrix! + rm(pslPeriod, pslPeriod.weighted) + } + + if(unlist(fields) == unlist(ECMWF_S4)){ + pslmat <- psleuFull + dim(pslmat) <- c(1, n.leadtimes*n.years, n.members*n.pos.lat*n.pos.lon) # convert array in a matrix! Beware that it always put the years (33) before the leadtimes! + #dim(pslmat) <- c(1, 3*33, 15*n.pos.lat*n.pos.lon) # convert array in a matrix! Beware that it always put the years (33) before the leadtimes! + gc() + } + + # # if you are using the monthly forecast system, in winter you must remove the 2nd startdate (20140109) because its data is bad, as you can see with: plot(pslmat[,1:2]) + #if(unlist(fields) == unlist(forecast) && period == 13) pslmat[21:40,] <- NA + + if(PCA){ # compute the PCs: + my.seq <- seq(1, n.pos.lat*n.pos.lon, 9) # select only 1 point of 9 + pslcut <- pslmat[,my.seq] + + my.PCA[[p]] <- princomp(pslcut,cor=FALSE) + tot.variance[[p]] <- head(cumsum(my.PCA[[p]]$sdev^2/sum(my.PCA[[p]]$sdev^2)),50) # check how many PCAs to keep basing on the sum of their expl. variance + n.pcs[p] <- head(as.numeric(which(tot.variance[[p]] > variance.explained)),1) # select only the pcs that explains at least 80% of variance + my.cluster[[p]] <- kmeans(my.PCA[[p]]$scores[,1:n.pcs[p]], centers=4, iter.max=100, nstart=30) # 4 is the number of clusters + rm(pslcut) + } else { # 4 is the number of clusters: + if(unlist(fields) == unlist(rean)) my.cluster[[p]] <- kmeans(pslmat[which(!is.na(pslmat[,1])),], centers=4, iter.max=100, nstart=30) + if(unlist(fields) == unlist(ECMWF_S4)) my.cluster[[p]] <- kmeans(pslmat[1,,], centers=4, iter.max=100, nstart=30) + } + + rm(pslmat) + gc() +} + +save.image(file=paste0(workdir,"/weather_regimes_",fields.name,"_",year.start,"-",year.end,".RData")) +load(file=paste0(workdir,"/weather_regimes_",fields.name,"_",year.start,"-",year.end,".RData")) + +var.num <- 1 # Choose a variable. 1: sfcWind 2: tas + +var.name <- c("sfcWind","tas") # name of the 'predictand' variable of the chosen reanalysis +var.name.full <- c("Wind Speed","Temperature") # full name of the 'predictand' variable to put in the title of the graphs +var.unit <- c("m/s", "ºC") # unit of measure of var (for drawing color scales) + +# Load var data: +if(unlist(fields) == unlist(rean)){ # Load daily var data in the reanalysis case: + vareuFull <-array(NA,c(n.days.in.a.yearly.period(year.start,year.end), n.pos.lat, n.pos.lon)) + + for (y in year.start:year.end){ + var <- Load(var = var.name[var.num], exp = NULL, obs = list(var.data), paste0(y,'0101'), storefreq = 'daily', leadtimemax = n.days.in.a.year(y), output = 'lonlat', + latmin = lat.min, latmax = lat.max, lonmin = lon.min, lonmax = lon.max) + vareuFull[seq.days.in.a.future.year(year.start, y),,] <- var$obs + rm(var) + gc() + } + +# in the S4 case, we can load only the data for 1 month at time (since each month needs ~3 GB of memory) +# but at present we ONLY use psl data!!! +#if(unlist(fields) == unlist(ECMWF_S4)) { +# vareuFull <- Load(var = var.name[var.num], exp = list(fields), obs = NULL, sdates=paste0(year.start:year.end, start.month.char,'01'), storefreq = 'daily', dimnames=list(member="number"), nmember=n.members, leadtimemin=leadtime.min, leadtimemax=leadtime.max, output = 'lonlat', latmin = lat.min, latmax = lat.max, lonmin = lon.min, lonmax = lon.max) +#} + +if(unlist(fields) == unlist(ECMWF_monthly)){ # Load 6-hourly var data in the monthly forecast case: + sdates <- weekly.seq(forecast.year,mes,day) + vareuFull <-array(NA,c(length(sdates)*(year.end-year.start+1), n.pos.lat, n.pos.lon)) + + for (startdate in 1:length(sdates)){ + var <- Load(var = var.name[var.num], exp = NULL, obs = list(var.data), paste0(year.start:year.end, substr(sdates[startdate],5,6), substr(sdates[startdate],7,8) ), storefreq = 'daily', leadtimemin=leadtime*4-3, leadtimemax=leadtime*4, output = 'lonlat', latmin = lat.min, latmax = lat.max, lonmin = lon.min, lonmax = lon.max) + temp <- apply(var$obs, c(1,3,5,6), mean, na.rm=T) + vareuFull[(1+(startdate-1)*(year.end-year.start+1)):(startdate*(year.end-year.start+1)),,] <- temp + rm(temp,var) + gc() + } + +} + +#my.grid<-paste0('r',n.lon,'x',n.lat) +#coord <- Load(var = 'sfcWind', exp = list(ECMWF_monthly), obs = NULL, sdates=paste0(2013,'0102'), leadtimemin = 1, leadtimemax=1, output = 'lonlat', nprocs=1, grid=my.grid, method='bilinear') + +save.image(file=paste0(workdir,"/weather_regimes_",fields.name,"_",var.name[var.num],"_",year.start,"-",year.end,".RData")) +load(file=paste0(workdir,"/weather_regimes_",fields.name,"_",var.name[var.num],"_",year.start,"-",year.end,".RData")) + +## # if you want to study WRs at monhly level but using their seasonal time series, you have to copy the WRs time series to the months from 1 to 12: +## if(WR.period <- 1:12){ +## my.cluster[[1]] <- my.cluster[[2]] <- my.cluster[[3]] <- my.cluster[[4]] <- my.cluster[[5]] <- my.cluster[[6]] <- list() +## my.cluster[[7]] <- my.cluster[[8]] <- my.cluster[[9]] <- my.cluster[[10]] <- my.cluster[[11]] <- my.cluster[[12]] <- list() + +## ss <- match(days.period[[13]],days.period[[1]]); ss[which(!is.na(ss))] <- 1; qq <- ss*my.cluster[[13]]$cluster; my.cluster[[1]]$cluster <- qq[!is.na(qq)] +## ss <- match(days.period[[13]],days.period[[2]]); ss[which(!is.na(ss))] <- 1; qq <- ss*my.cluster[[13]]$cluster; my.cluster[[2]]$cluster <- qq[!is.na(qq)] +## ss <- match(days.period[[13]],days.period[[12]]); ss[which(!is.na(ss))] <- 1; qq <- ss*my.cluster[[13]]$cluster; my.cluster[[12]]$cluster <- qq[!is.na(qq)] +## ss <- match(days.period[[14]],days.period[[3]]); ss[which(!is.na(ss))] <- 1; qq <- ss*my.cluster[[14]]$cluster; my.cluster[[3]]$cluster <- qq[!is.na(qq)] +## ss <- match(days.period[[14]],days.period[[4]]); ss[which(!is.na(ss))] <- 1; qq <- ss*my.cluster[[14]]$cluster; my.cluster[[4]]$cluster <- qq[!is.na(qq)] +## ss <- match(days.period[[14]],days.period[[5]]); ss[which(!is.na(ss))] <- 1; qq <- ss*my.cluster[[14]]$cluster; my.cluster[[5]]$cluster <- qq[!is.na(qq)] +## ss <- match(days.period[[15]],days.period[[6]]); ss[which(!is.na(ss))] <- 1; qq <- ss*my.cluster[[15]]$cluster; my.cluster[[6]]$cluster <- qq[!is.na(qq)] +## ss <- match(days.period[[15]],days.period[[7]]); ss[which(!is.na(ss))] <- 1; qq <- ss*my.cluster[[15]]$cluster; my.cluster[[7]]$cluster <- qq[!is.na(qq)] +## ss <- match(days.period[[15]],days.period[[8]]); ss[which(!is.na(ss))] <- 1; qq <- ss*my.cluster[[15]]$cluster; my.cluster[[8]]$cluster <- qq[!is.na(qq)] +## ss <- match(days.period[[16]],days.period[[9]]); ss[which(!is.na(ss))] <- 1; qq <- ss*my.cluster[[16]]$cluster; my.cluster[[9]]$cluster <- qq[!is.na(qq)] +## ss <- match(days.period[[16]],days.period[[10]]); ss[which(!is.na(ss))] <- 1; qq <- ss*my.cluster[[16]]$cluster; my.cluster[[10]]$cluster <- qq[!is.na(qq)] +## ss <- match(days.period[[16]],days.period[[11]]); ss[which(!is.na(ss))] <- 1; qq <- ss*my.cluster[[16]]$cluster; my.cluster[[11]]$cluster <- qq[!is.na(qq)] +## } + +for(p in WR.period){ # 1-12: Jan-Dec; 13-16: Winter-Spring-Summer-Autumn; 17: Year + if(unlist(fields) == unlist(ECMWF_monthly)){ + my.startdates.period <- months.period(forecast.year,mes,day,p) + + #if(p == 13) my.startdates.period <- my.startdates.period[-2] # remove the second startdate because it is broken + + days.period <- period.length <- list() + for(startdate in my.startdates.period){ + days.period[[p]] <- c(days.period[[p]], (1+(startdate-1)*(year.end-year.start+1)):(startdate*(year.end-year.start+1))) + } + period.length[[p]] <- length(my.startdates.period) # number of Thusdays in the period + } + + cluster.sequence <- my.cluster[[p]]$cluster + + if(sequences){ # it works only for rean data!!! + # for each of the 4 clusters, remove the days not belonging to sequences of at least 5 days: + # warning: first 4 days and last 4 days are not take into account y the algorithm and are treated as not belonging to any sequence (sequ=FALSE) + wrdiff <- diff(my.cluster[[p]]$cluster) + + sequ <- rep(FALSE, n.days.period[[p]]) + for(i in 5:(n.days.period[[p]]-5)){ + sequ[i] <- TRUE + if(wrdiff[i] != 0 & wrdiff[i+1] != 0) sequ[i] = FALSE + if(wrdiff[i] != 0 & wrdiff[i+2] != 0) sequ[i] = FALSE + if(wrdiff[i] != 0 & wrdiff[i+3] != 0) sequ[i] = FALSE + if(wrdiff[i] != 0 & wrdiff[i+4] != 0) sequ[i] = FALSE + if(wrdiff[i-1] != 0 & wrdiff[i] != 0) sequ[i] = FALSE + if(wrdiff[i-1] != 0 & wrdiff[i+1] != 0) sequ[i] = FALSE + if(wrdiff[i-1] != 0 & wrdiff[i+2] != 0) sequ[i] = FALSE + if(wrdiff[i-1] != 0 & wrdiff[i+3] != 0) sequ[i] = FALSE + if(wrdiff[i-2] != 0 & wrdiff[i] != 0) sequ[i] = FALSE + if(wrdiff[i-2] != 0 & wrdiff[i+1] != 0) sequ[i] = FALSE + if(wrdiff[i-2] != 0 & wrdiff[i+2] != 0) sequ[i] = FALSE + if(wrdiff[i-3] != 0 & wrdiff[i] != 0) sequ[i] = FALSE + if(wrdiff[i-3] != 0 & wrdiff[i+1] != 0) sequ[i] = FALSE + if(wrdiff[i-4] != 0 & wrdiff[i] != 0) sequ[i] = FALSE + } # close for loop on i + + # sequence of daily cluster numbers without the days that doesn't belong to sequences of 5 or more days: + cluster.sequence <- my.cluster[[p]]$cluster * sequ + rm(wrdiff, sequ) + } + + gc() + + # Replace the days belonging to a regime with the days belonging to a sequence of at least 5 days of that regime: + wr1 <- which(cluster.sequence == 1) + wr2 <- which(cluster.sequence == 2) + wr3 <- which(cluster.sequence == 3) + wr4 <- which(cluster.sequence == 4) + + # yearly frequency of each regime: + wr1y <- wr2y <- wr3y <-wr4y <- c() + for(y in year.start:year.end){ + + if(unlist(fields) == unlist(rean)){ + wr1y[y-year.start+1] <- length(which(cluster.sequence[(y-year.start)*period.length[[p]]+(1:period.length[[p]])] == 1)) + wr2y[y-year.start+1] <- length(which(cluster.sequence[(y-year.start)*period.length[[p]]+(1:period.length[[p]])] == 2)) + wr3y[y-year.start+1] <- length(which(cluster.sequence[(y-year.start)*period.length[[p]]+(1:period.length[[p]])] == 3)) + wr4y[y-year.start+1] <- length(which(cluster.sequence[(y-year.start)*period.length[[p]]+(1:period.length[[p]])] == 4)) + } else { # for both S4 and the monthly system, the time order is always: year1day1, year2day1, ... , yearNday1, year1day2, year2day2, ..., yearNday2, etc + wr1y[y-year.start+1] <- length(which(cluster.sequence[seq((y-year.start+1),length(cluster.sequence), n.years)] == 1)) + wr2y[y-year.start+1] <- length(which(cluster.sequence[seq((y-year.start+1),length(cluster.sequence), n.years)] == 2)) + wr3y[y-year.start+1] <- length(which(cluster.sequence[seq((y-year.start+1),length(cluster.sequence), n.years)] == 3)) + wr4y[y-year.start+1] <- length(which(cluster.sequence[seq((y-year.start+1),length(cluster.sequence), n.years)] == 4)) + } + + } + + #rm(cluster.sequence) + #gc() + + # convert to frequencies in %: + wr1y <- wr1y/period.length[[p]] + wr2y <- wr2y/period.length[[p]] + wr3y <- wr3y/period.length[[p]] + wr4y <- wr4y/period.length[[p]] + + if(unlist(fields) == unlist(rean)){ + pslPeriod <- psleuFull[days.period[[p]],,] + pslPeriodClim <- apply(pslPeriod, c(2,3), mean, na.rm=T) + pslPeriodClim2 <- InsertDim(pslPeriodClim, 1, n.days.period[[p]]) + pslPeriodAnom <- pslPeriod - pslPeriodClim2 + + rm(pslPeriod, pslPeriodClim, pslPeriodClim2) + gc() + + pslwr1 <- pslPeriodAnom[wr1,,] + pslwr2 <- pslPeriodAnom[wr2,,] + pslwr3 <- pslPeriodAnom[wr3,,] + pslwr4 <- pslPeriodAnom[wr4,,] + rm(pslPeriodAnom) + gc() + + pslwr1mean <- apply(pslwr1,c(2,3),mean,na.rm=T) + pslwr2mean <- apply(pslwr2,c(2,3),mean,na.rm=T) + pslwr3mean <- apply(pslwr3,c(2,3),mean,na.rm=T) + pslwr4mean <- apply(pslwr4,c(2,3),mean,na.rm=T) + rm(pslwr1,pslwr2,pslwr3,pslwr4) + } + + if(unlist(fields) == unlist(ECMWF_S4)){ + # in this case, psleuFull is already in anomalies: + pslmat <- psleuFull + dim(pslmat) <- c(1, n.members, n.leadtimes*n.years, n.pos.lat,n.pos.lon) + pslwr1 <- pslmat[1,,wr1,,] + pslwr2 <- pslmat[1,,wr2,,] + pslwr3 <- pslmat[1,,wr3,,] + pslwr4 <- pslmat[1,,wr4,,] + rm(pslmat) + gc() + + pslwr1mean <- apply(pslwr1,c(3,4),mean,na.rm=T) + pslwr2mean <- apply(pslwr2,c(3,4),mean,na.rm=T) + pslwr3mean <- apply(pslwr3,c(3,4),mean,na.rm=T) + pslwr4mean <- apply(pslwr4,c(3,4),mean,na.rm=T) + rm(pslwr1,pslwr2,pslwr3,pslwr4) + gc() + } + + # similar selection, but for var instead of psl: + if(unlist(fields) == unlist(rean)){ + varPeriod <- vareuFull[days.period[[p]],,] # select only var data during the chosen period + + varPeriodClim <- apply(varPeriod, c(2,3), mean, na.rm=T) + varPeriodClim2 <- InsertDim(varPeriodClim, 1, n.days.period[[p]]) + varPeriodAnom <- varPeriod - varPeriodClim2 + rm(varPeriod, varPeriodClim, varPeriodClim2) + gc() + + #PlotEquiMap2(varPeriodClim[,EU], lon[EU], lat, filled.continents = FALSE, cols=my.cols.var, intxlon=10, intylat=10, cex.lab=1.5) # plot the climatology of the period + varPeriodAnom1 <- varPeriodAnom[wr1,,] + varPeriodAnom2 <- varPeriodAnom[wr2,,] + varPeriodAnom3 <- varPeriodAnom[wr3,,] + varPeriodAnom4 <- varPeriodAnom[wr4,,] + + varPeriodAnom1mean <- apply(varPeriodAnom1,c(2,3),mean,na.rm=T) + varPeriodAnom2mean <- apply(varPeriodAnom2,c(2,3),mean,na.rm=T) + varPeriodAnom3mean <- apply(varPeriodAnom3,c(2,3),mean,na.rm=T) + varPeriodAnom4mean <- apply(varPeriodAnom4,c(2,3),mean,na.rm=T) + + varPeriodAnomBoth1 <- abind(varPeriodAnom, varPeriodAnom1, along = 1) + pvalue1 <- apply(varPeriodAnomBoth1, c(2,3), function(x) t.test(x[1:n.days.period[[p]]],x[(n.days.period[[p]]+1):length(x)])$p.value) + rm(varPeriodAnomBoth1, varPeriodAnom1) + gc() + + varPeriodAnomBoth2 <- abind(varPeriodAnom, varPeriodAnom2, along = 1) + pvalue2 <- apply(varPeriodAnomBoth2, c(2,3), function(x) t.test(x[1:n.days.period[[p]]],x[(n.days.period[[p]]+1):length(x)])$p.value) + rm(varPeriodAnomBoth2, varPeriodAnom2) + gc() + + varPeriodAnomBoth3 <- abind(varPeriodAnom, varPeriodAnom3, along = 1) + pvalue3 <- apply(varPeriodAnomBoth3, c(2,3), function(x) t.test(x[1:n.days.period[[p]]],x[(n.days.period[[p]]+1):length(x)])$p.value) + rm(varPeriodAnomBoth3, varPeriodAnom3) + gc() + + varPeriodAnomBoth4 <- abind(varPeriodAnom, varPeriodAnom4, along = 1) + pvalue4 <- apply(varPeriodAnomBoth4, c(2,3), function(x) t.test(x[1:n.days.period[[p]]],x[(n.days.period[[p]]+1):length(x)])$p.value) + rm(varPeriodAnomBoth4, varPeriodAnom4) + + rm(varPeriodAnom) + gc() + + # save all the data necessary to redraw the graphs when we know the right regime: + save(workdir,rean.name,fields.name,var.num,var.name,var.name.full,var.unit,year.start,year.end,p,WR.period,lon,lat,pslwr1mean,pslwr2mean,pslwr3mean,pslwr4mean, varPeriodAnom1mean, varPeriodAnom2mean, varPeriodAnom3mean,varPeriodAnom4mean,wr1y,wr2y,wr3y,wr4y,pvalue1,pvalue2,pvalue3,pvalue4,file=paste0(workdir,"/",fields.name,"_",var.name[var.num],"_",my.period[p],"_mapdata.RData")) + + rm(pvalue1,pvalue2,pvalue3,pvalue4) + rm(pslwr1mean,pslwr2mean,pslwr3mean,pslwr4mean) + rm(varPeriodAnom1mean,varPeriodAnom2mean,varPeriodAnom3mean,varPeriodAnom4mean) + gc() + } + + if(unlist(fields) == unlist(ECMWF_S4)){ + save(workdir,rean.name,fields.name,var.num,var.name,var.name.full,var.unit,year.start,year.end,p,WR.period,lon,lat,pslwr1mean,pslwr2mean,pslwr3mean,pslwr4mean,wr1y,wr2y,wr3y,wr4y,file=paste0(workdir,"/",fields.name,"_",var.name[var.num],"_",my.period[p],"_mapdata.RData")) + } +} # close the for loop on 'p' + + +# run it twice: once, with ordering <- FALSE to look only at the cartography and find visually the right regime names of the four clusters; +# and the second time, to save the ordered cartography: +ordering <- FALSE # If TRUE, order the clusters following the regimes listed in the 'orden' vector (after you manually insert the names). +save.names <- TRUE # if TRUE, also save the cluster names (i.e: the association between clusters and weather regimes); if FALSE, the cluster names are loaded from the file +as.pdf <- TRUE # FALSE: save results as one .png file for each season, TRUE: save only 1 .pdf file with all seasons + +# position of long values of Europe only (without the Atlantic Sea and America): +#if(fields.name == "ERA-Interim") EU <- c(1:which(lon >= lon.max)[1], (length(lon)-30):length(lon)) # restrict area to continental europe only +EU <- c(1:which(lon >= lon.max)[1], (which(lon >= 337)[1]:length(lon))) # restrict area to continental europe only + +# choose an order to give to the cartography, from top to bottom: +orden <- c("NAO+","NAO-","Blocking","Atl.Ridge") + +regime1.name <- orden[1] +regime2.name <- orden[2] +regime3.name <- orden[3] +regime4.name <- orden[4] + +if(save.names){cluster1.name.period <- cluster2.name.period <- cluster3.name.period <- cluster4.name.period <- c()} + +# breaks and colors of the geopotential fields: +if(psl == "g500"){ + #my.brks <- c(-300,-199:200,300) # % Mean anomaly of a WR + my.brks <- c(-300,seq(-200,200,10),300) # % Mean anomaly of a WR + my.brks2 <- c(-300,seq(-200,200,10),300) # % Mean anomaly of a WR for the contour lines + #my.cols <- colorRampPalette((brewer.pal(9,"PuBu")))(length(my.brks)-1) + values.to.plot <- c(-200, -100, 0, 100, 200) # values we want to show in the labels +} + +if(psl == "psl"){ + my.brks <- c(seq(-19,-1,2),0,seq(1,19,2)) # % Mean anomaly of a WR + # my.brks <- c(seq(-19,-1,2),0,seq(1,19,2)) # % Mean anomaly of a WR + + my.brks2 <- my.brks #c(seq(-20,20,2)) # % Mean anomaly of a WR for the contour lines + values.to.plot <- my.brks #c(-17,-15,-13,-11,-9,-7,-5,-3,-1,0,1,3,5,7,9,11,13,15,17) +} + +my.cols <- colorRampPalette(rev(brewer.pal(11,"RdBu")))(length(my.brks)-1) # blue--white--red colors +my.cols[floor(length(my.cols)/2)] <- my.cols[floor(length(my.cols)/2)+1] <- "white" + +# breaks and colors of the impact maps (valid both for Wind speed anomalies and Temperature anomalies): +#my.brks.var <- c(-20,seq(-10,-3,1),seq(-2.9,2.9,0.1),seq(3,10,1),20) # % Mean anomaly of a WR +#my.brks.var <- c(-20,-2,-1.5,-1,-0.5,-0.2,0.2,0.5,1,1.5,2,20) # % Mean anomaly of a WR +#my.brks.var <- c(-20,seq(-3,3,0.5),20) # % Mean anomaly of a WR +if(var.name[var.num] == "sfcWind") { + my.brks.var <- seq(-3,3,0.5) + values.to.plot2 <- c(-3,-2,-1,0,1,2,3) +} +if(var.name[var.num] == "tas") { + my.brks.var <- seq(-5,5,1) + values.to.plot2 <- my.brks.var #c(-5,-3,-1,0,1,3,5) +} + +#my.cols <- colorRampPalette(as.character(read.csv("/home/Earth/vtorralb/rgbhex.csv",header=F)[,1]))(length(my.brks)-1) # blue--yellow-red colors +my.cols.var <- colorRampPalette(rev(brewer.pal(11,"RdBu")))(length(my.brks.var)-1) # blue--white--red colors +#my.cols.var[floor(length(my.cols.var)/2)] <- my.cols.var[floor(length(my.cols.var)/2)+1] <- "white" + +if(as.pdf)(pdf(file=paste0(workdir,"/",fields.name,"_",var.name[var.num],".pdf"),width=40, height=60)) + +for(p in WR.period){ + load(file=paste0(workdir,"/",fields.name,"_",var.name[var.num],"_",my.period[p],"_mapdata.RData")) + + if(save.names){ + if(p == 1){ # January + cluster2.name="NAO+" + cluster1.name="NAO-" + cluster4.name="Blocking" + cluster3.name="Atl.Ridge" + } + if(p == 2){ # February + cluster3.name="NAO+" + cluster2.name="NAO-" + cluster4.name="Blocking" + cluster1.name="Atl.Ridge" + } + if(p == 3){ # March + cluster3.name="NAO+" + cluster2.name="NAO-" + cluster4.name="Blocking" + cluster1.name="Atl.Ridge" + } + if(p == 4){ # April + cluster2.name="NAO+" + cluster1.name="NAO-" + cluster3.name="Blocking" + cluster4.name="Atl.Ridge" + } + if(p == 5){ # May + cluster4.name="NAO+" + cluster2.name="NAO-" + cluster3.name="Blocking" + cluster1.name="Atl.Ridge" + } + if(p == 6){ # June + cluster4.name="NAO+" + cluster1.name="NAO-" + cluster2.name="Blocking" + cluster3.name="Atl.Ridge" + } + if(p == 7){ # July + cluster4.name="NAO+" + cluster2.name="NAO-" + cluster1.name="Blocking" + cluster3.name="Atl.Ridge" + } + if(p == 8){ # August + cluster2.name="NAO+" + cluster4.name="NAO-" + cluster3.name="Blocking" + cluster1.name="Atl.Ridge" + } + if(p == 9){ # September + cluster4.name="NAO+" + cluster3.name="NAO-" + cluster1.name="Blocking" + cluster2.name="Atl.Ridge" + } + if(p == 10){ # October + cluster1.name="NAO+" + cluster4.name="NAO-" + cluster2.name="Blocking" + cluster3.name="Atl.Ridge" + } + if(p == 11){ # November + cluster2.name="NAO+" + cluster3.name="NAO-" + cluster1.name="Blocking" + cluster4.name="Atl.Ridge" + } + if(p == 12){ # December + cluster2.name="NAO+" + cluster4.name="NAO-" + cluster1.name="Blocking" + cluster3.name="Atl.Ridge" + } + if(p == 13){ # Winter + cluster3.name="NAO+" + cluster4.name="NAO-" + cluster1.name="Blocking" + cluster2.name="Atl.Ridge" + } + if(p == 14){ # Spring + cluster1.name="NAO+" + cluster3.name="NAO-" + cluster2.name="Blocking" + cluster4.name="Atl.Ridge" + } + if(p == 15){ # Summer + cluster2.name="NAO+" + cluster3.name="NAO-" + cluster4.name="Blocking" + cluster1.name="Atl.Ridge" + } + if(p == 16){ # Autumn + cluster4.name="NAO+" + cluster1.name="NAO-" + cluster2.name="Blocking" + cluster3.name="Atl.Ridge" + } + + cluster1.name.period[p] <- cluster1.name + cluster2.name.period[p] <- cluster2.name + cluster3.name.period[p] <- cluster3.name + cluster4.name.period[p] <- cluster4.name + + save(cluster1.name.period, cluster2.name.period, cluster3.name.period, cluster4.name.period, file=paste0(workdir,"/",fields.name,"_ClusterNames.RData")) + + } else { # in this case, we load the cluster names from the file already saved: + load(paste0(workdir,"/",fields.name,"_ClusterNames.RData")) + cluster1.name <- cluster1.name.period[p] + cluster2.name <- cluster2.name.period[p] + cluster3.name <- cluster3.name.period[p] + cluster4.name <- cluster4.name.period[p] + } + + # assign to each cluster its regime: + if(ordering == FALSE){ + cluster1 <- 1; cluster2 <- 2; cluster3 <- 3; cluster4 <- 4 + } else { + cluster1 <- which(orden == cluster1.name) + cluster2 <- which(orden == cluster2.name) + cluster3 <- which(orden == cluster3.name) + cluster4 <- which(orden == cluster4.name) + } + + assign(paste0("map",cluster1), pslwr1mean) + assign(paste0("map",cluster2), pslwr2mean) + assign(paste0("map",cluster3), pslwr3mean) + assign(paste0("map",cluster4), pslwr4mean) + + if(unlist(fields) == unlist(rean)){ + assign(paste0("imp",cluster1), varPeriodAnom1mean) + assign(paste0("imp",cluster2), varPeriodAnom2mean) + assign(paste0("imp",cluster3), varPeriodAnom3mean) + assign(paste0("imp",cluster4), varPeriodAnom4mean) + + assign(paste0("sig",cluster1), pvalue1) + assign(paste0("sig",cluster2), pvalue2) + assign(paste0("sig",cluster3), pvalue3) + assign(paste0("sig",cluster4), pvalue4) + } + + assign(paste0("fre",cluster1), wr1y) + assign(paste0("fre",cluster2), wr2y) + assign(paste0("fre",cluster3), wr3y) + assign(paste0("fre",cluster4), wr4y) + + # Visualize the composition with the 3 graphs for all the 4 regimes: its pressure fields, its impact on var and its frequency series: + if(!as.pdf) png(filename=paste0(workdir,"/",fields.name,"_",var.name[var.num],"_",my.period[p],".png"),width=3000,height=3700) + + plot.new() + + # Sheet title: + par(fig=c(0, 1, 0.94, 0.98), new=TRUE) + mtext(paste0("Weather Regimes for ",my.period[p]," season (",year.start,"-",year.end,"). Source: ",fields.name), cex=6, font=2) + + # Centroid maps: + map.xpos <- 0 + map.width <- 0.46 + par(fig=c(map.xpos + 0.003, map.xpos + map.width - 0.003, 0.745, 0.925), new=TRUE) + PlotEquiMap2(rescale(map1,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map1), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, xlabel.dist=1.5, contours.lty="F1FF1F") + par(fig=c(map.xpos, map.xpos + map.width, 0.51, 0.69), new=TRUE) + PlotEquiMap2(rescale(map2,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map2), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F") + par(fig=c(map.xpos, map.xpos + map.width, 0.275, 0.455), new=TRUE) + PlotEquiMap2(rescale(map3,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map3), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F") + par(fig=c(map.xpos, map.xpos + map.width, 0.04, 0.22), new=TRUE) + PlotEquiMap2(rescale(map4,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map4), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F") + + # Title Centroid Maps: + map.title.xpos <- 0.76 + map.title.width <- 0.2 + par(fig=c(map.title.xpos, map.title.xpos + map.title.width, 0.728, 0.729), new=TRUE) + mtext("year", cex=3) + par(fig=c(map.title.xpos, map.title.xpos + map.title.width, 0.494, 0.495), new=TRUE) + mtext("year", cex=3) + par(fig=c(map.title.xpos, map.title.xpos + map.title.width, 0.260, 0.261), new=TRUE) + mtext("year", cex=3) + par(fig=c(map.title.xpos, map.title.xpos + map.title.width, 0.026, 0.027), new=TRUE) + mtext("year", cex=3) + + # Impact maps: + if(unlist(fields) == unlist(rean)){ + impact.xpos <- 0.47 + impact.width <- 0.26 + par(fig=c(impact.xpos, impact.xpos + impact.width, 0.745, 0.925), new=TRUE) + PlotEquiMap2(rescale(imp1[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=t(sig1[,EU] < 0.05), cex.lab=1.5) + par(fig=c(impact.xpos, impact.xpos + impact.width, 0.51, 0.69), new=TRUE) + PlotEquiMap2(rescale(imp2[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=t(sig2[,EU] < 0.05), cex.lab=1.5) + par(fig=c(impact.xpos, impact.xpos + impact.width, 0.275, 0.455), new=TRUE) + PlotEquiMap2(rescale(imp3[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=t(sig3[,EU] < 0.05), cex.lab=1.5) + par(fig=c(impact.xpos, impact.xpos + impact.width, 0.04, 0.22), new=TRUE) + PlotEquiMap2(rescale(imp4[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=t(sig4[,EU] < 0.05), cex.lab=1.5) + } + + # Frequency plots: + barplot.xpos <- 0.75 + barplot.width <- 0.25 + par(fig=c(barplot.xpos, barplot.xpos + barplot.width, 0.745, 0.915), new=TRUE) + barplot.freq(100*fre1, year.start, year.end, cex.y=2, cex.x=2, freq.max=60, ylab="", mgp=c(3,1.5,0)) + par(fig=c(barplot.xpos, barplot.xpos + barplot.width,0.510,0.680), new=TRUE) + barplot.freq(100*fre2, year.start, year.end, cex.y=2, cex.x=2, freq.max=60, ylab="", mgp=c(3,1.5,0)) + par(fig=c(barplot.xpos, barplot.xpos + barplot.width,0.275,0.445), new=TRUE) + barplot.freq(100*fre3, year.start, year.end, cex.y=2, cex.x=2, freq.max=60, ylab="", mgp=c(3,1.5,0)) + par(fig=c(barplot.xpos, barplot.xpos + barplot.width,0.040,0.210), new=TRUE) + barplot.freq(100*fre4, year.start, year.end, cex.y=2, cex.x=2, freq.max=60, ylab="", mgp=c(3,1.5,0)) + + # % on Frequency plots: + symbol.xpos <- 0.735 + symbol.width <- 0.01 + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.83, 0.84), new=TRUE) + mtext("%", cex=3.3) + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.60, 0.61), new=TRUE) + mtext("%", cex=3.3) + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.36, 0.37), new=TRUE) + mtext("%", cex=3.3) + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.13, 0.14), new=TRUE) + mtext("%", cex=3.3) + + # Title 1: + title1.xpos <- 0.02 + title1.width <- 0.4 + par(fig=c(title1.xpos, title1.xpos + title1.width, 0.930, 0.935), new=TRUE) + mtext(paste0(regime1.name,": ", psl.name, " Anomaly"), font=2, cex=4) + par(fig=c(title1.xpos, title1.xpos + title1.width, 0.695, 0.700), new=TRUE) + mtext(paste0(regime2.name,": ", psl.name, " Anomaly"), font=2, cex=4) + par(fig=c(title1.xpos, title1.xpos + title1.width, 0.460, 0.465), new=TRUE) + mtext(paste0(regime3.name,": ", psl.name, " Anomaly"), font=2, cex=4) + par(fig=c(title1.xpos, title1.xpos + title1.width, 0.225, 0.230), new=TRUE) + mtext(paste0(regime4.name,": ", psl.name, " Anomaly"), font=2, cex=4) + + # Title 2: + title2.xpos <- 0.42 + title2.width <- 0.35 + par(fig=c(title2.xpos, title2.xpos + title2.width, 0.930, 0.935), new=TRUE) + mtext(paste0(regime1.name, ": Impact on ", var.name.full[var.num]), font=2, cex=4) + par(fig=c(title2.xpos, title2.xpos + title2.width, 0.695, 0.700), new=TRUE) + mtext(paste0(regime2.name, ": Impact on ", var.name.full[var.num]), font=2, cex=4) + par(fig=c(title2.xpos, title2.xpos + title2.width, 0.460, 0.465), new=TRUE) + mtext(paste0(regime3.name, ": Impact on ", var.name.full[var.num]), font=2, cex=4) + par(fig=c(title2.xpos, title2.xpos + title2.width, 0.225, 0.230), new=TRUE) + mtext(paste0(regime4.name, ": Impact on ", var.name.full[var.num]), font=2, cex=4) + + # Title 3: + title3.xpos <- 0.75 + title3.width <-0.25 + par(fig=c(title3.xpos, title3.xpos + title3.width, 0.930, 0.935), new=TRUE) + mtext(paste0(regime1.name, ": Frequency (", round(100*mean(fre1),1), "%)"), font=2, cex=4) + par(fig=c(title3.xpos, title3.xpos + title3.width, 0.695, 0.700), new=TRUE) + mtext(paste0(regime2.name, ": Frequency (", round(100*mean(fre2),1), "%)"), font=2, cex=4) + par(fig=c(title3.xpos, title3.xpos + title3.width, 0.460, 0.465), new=TRUE) + mtext(paste0(regime3.name, ": Frequency (", round(100*mean(fre3),1), "%)"), font=2, cex=4) + par(fig=c(title3.xpos, title3.xpos + title3.width, 0.225, 0.230), new=TRUE) + mtext(paste0(regime4.name, ": Frequency (", round(100*mean(fre4),1), "%)"), font=2, cex=4) + + # Legend 1: + legend1.xpos <- 0.01 + legend1.width <- 0.44 + legend1.cex <- 1.8 + my.subset <- match(values.to.plot, my.brks) + + par(fig=c(legend1.xpos, legend1.xpos + legend1.width, 0.719, 0.744), new=TRUE) # syntax fig=c(xmin, xmax, ymin, ymax) + ColorBar3(my.brks, cols=my.cols, vert=FALSE, cex=legend1.cex, subset=my.subset) + if(psl=="g500") {mtext(side=4," m", cex=2.5)} else {mtext(side=4," hPa", cex=legend1.cex)} + par(fig=c(legend1.xpos, legend1.xpos + legend1.width, 0.485, 0.508), new=TRUE) + ColorBar3(my.brks, cols=my.cols, vert=FALSE, cex=legend1.cex, subset=my.subset) + if(psl=="g500") {mtext(side=4," m", cex=2.5)} else {mtext(side=4," hPa", cex=legend1.cex)} + par(fig=c(legend1.xpos, legend1.xpos + legend1.width, 0.250, 0.273), new=TRUE) + ColorBar3(my.brks, cols=my.cols, vert=FALSE, cex=legend1.cex, subset=my.subset) + if(psl=="g500") {mtext(side=4," m", cex=2.5)} else {mtext(side=4," hPa", cex=legend1.cex) + par(fig=c(legend1.xpos, legend1.xpos + legend1.width, 0.015, 0.038), new=TRUE) + ColorBar3(my.brks, cols=my.cols, vert=FALSE, cex=legend1.cex, subset=my.subset) + if(psl=="g500") {mtext(side=4," m", cex=2.5)} else {mtext(side=4," hPa", cex=legend1.cex)} + + # Legend 2: + legend2.xpos <- 0.48 + legend2.width <- 0.25 + legend2.cex <- 1.8 + my.subset2 <- match(values.to.plot2, my.brks.var) + + par(fig=c(legend2.xpos, legend2.xpos + legend2.width, 0.719 + 0.001, 0.744), new=TRUE) + ColorBar3(my.brks.var, cols=my.cols.var, vert=FALSE, cex=legend2.cex, subset=my.subset2) + mtext(side=4,paste0(" ",var.unit[var.num]), cex=legend2.cex) + par(fig=c(legend2.xpos, legend2.xpos + legend2.width, 0.485, 0.508), new=TRUE) + ColorBar3(my.brks.var, cols=my.cols.var, vert=FALSE, cex=legend2.cex, subset=my.subset2) + mtext(side=4,paste0(" ",var.unit[var.num]), cex=legend2.cex) + par(fig=c(legend2.xpos, legend2.xpos + legend2.width, 0.250, 0.273), new=TRUE) + ColorBar3(my.brks.var, cols=my.cols.var, vert=FALSE, cex=legend2.cex, subset=my.subset2)} + mtext(side=4,paste0(" ",var.unit[var.num]), cex=legend2.cex) + par(fig=c(legend2.xpos, legend2.xpos + legend2.width, 0.015, 0.038), new=TRUE) + ColorBar3(my.brks.var, cols=my.cols.var, vert=FALSE, cex=legend2.cex, subset=my.subset2) + mtext(side=4,paste0(" ",var.unit[var.num]), cex=legend2.cex) + + if(!as.pdf) dev.off() # for saving 4 png + +} # close for loop on 'p' + +if(as.pdf) dev.off() # for saving a single pdf with all seasons + + + + + + + + + + + diff --git a/old/weather_regimes_v24.R b/old/weather_regimes_v24.R new file mode 100644 index 0000000000000000000000000000000000000000..952429b8a4497eec917fa9175d9fee2a10ac04d5 --- /dev/null +++ b/old/weather_regimes_v24.R @@ -0,0 +1,883 @@ +library(s2dverification) # for the function Load() +library(abind) +library(Kendall) +library(reshape2) +source('/home/Earth/ncortesi/Downloads/scripts/Rfunctions.R') # for the calendar functions +workdir <- "/scratch/Earth/ncortesi/RESILIENCE/Regimes" # working dir with the input files with the WT classifications for each MSLP grid point + +# available reanalysis for the geopotential (g500) or the geopotential height (z500 = g500/9.81) and for var data: +ERAint <- '/esnas/reconstructions/ecmwf/eraint/$STORE_FREQ$_mean/$VAR_NAME$_f6h/$VAR_NAME$_$YEAR$$MONTH$.nc' +ERAint.name <- "ERA-Interim" + +JRA55 <- '/esnas/reconstructions/jma/jra-55/$STORE_FREQ$_mean/$VAR_NAME$_f6h/$VAR_NAME$_$YEAR$$MONTH$.nc' +JRA55.name <- "JRA-55" + +rean <- ERAint #JRA55 #ERAint # choose one of the two above reanalysis from where to load the input psl data + +# available forecast systems for the psl and var data: +#ECMWF_S4 <- list(path = paste0('/esnas/exp/ECMWF/seasonal/0001/s004/m001/$STORE_FREQ$_mean/$VAR_NAME$_f6h/$VAR_NAME$_$START_DATE$.nc')) +#ECMWF_S4 <- '/esnas/exp/ECMWF/seasonal/0001/s004/m001/$STORE_FREQ$_mean/psl_f6h/psl_$START_DATE$.nc' +ECMWF_S4 <- '/esnas/exp/ecmwf/system4_m1/$STORE_FREQ$_mean/psl_f6h/psl_$START_DATE$.nc' +ECMWF_S4.name <- "ECMWF-S4" + +ECMWF_monthly <- '/esnas/exp/ECMWF/monthly/ensforhc/6hourly/$VAR_NAME$/2014$MONTH$$DAY$00/$VAR_NAME$_$START_DATE$00.nc' +ECMWF_monthly.name <- "ECMWF-Monthly" + +forecast <- ECMWF_S4 + +fields <- forecast #rean #forecast # put 'rean' to load pressure fields and var from reanalisis, put 'forecast' to load them from forecast systems + +psl <- "psl" #"g500" # pressure variable to use from the chosen reanalysis +psl.name <- "MSLP" # "Z500" # the name to show in the maps + +year.start <- 1982 #1981 #1994 #1979 #1981 +year.end <- 2013 #2013 #2010 + +member.name <- "ensemble" # name of the dimension with the ensemble members inside the netCDF files of S4 + +PCA <- FALSE # se to TRUE if you want to apply the PCA before the k-means cluster analysis, FALSE otherwise +variance.explained <- 0.8 # the user can set here the minimum % of variance explained by the PCs (typically 0.8 which is equal to 80%) + +lat.weighting=FALSE # set it to true to weight psl data on latitude before applying the cluster analysis +sequences=FALSE # set it to TRUE if you want to select only days beloning to sequences of at least 5 consecutive days belonging to the same regime. + +# Coordinates of the box enclosing the study region (the Northern Atlantic in this case): +lat.max <- 81 #81 #80 #70 +lat.min <- 27 #30 #20 #30 +lon.max <- 45 #36 #30 #40 +lon.min <- 274.5 #274.5 #270 #280 # put a positive number here because the geopotential has only positive values of longitud! + +# Only for Monthly forecasts: +#leadtime <- 1:7 #5:11 #1 # if fields=forecast, set the forecast time (in days) you want to compute the weather regimes. +#startdate.shift <- 1 # if leadtime=5:11, it's better to shift all startdates of the season 1 week in the past, to fit the exact season of obs.data + # if leadtime=12:18, it's better to shift all startdates of the season 2 weeks in the past, and so on. +#forecast.year <- year.end+1 # if fields=forecast, set the forecast year (it is usually the year after year.end) +#mes=1 # if fields=forecast, set the month of the first startdate of the forecast year +#day=2 # if fields=forecast, set the day of the first startdate of the forecast year + +############################################################################################################################# +#ECMWF_monthly <- list(path = paste0('/esnas/exp/ECMWF/monthly/ensforhc/6hourly/psl/2014010200/1994_010200.nc')) +#ECMWF_monthly <- list(path = paste0('/esnas/exp/ECMWF/monthly/ensforhc/6hourly/$VAR_NAME$/2014$MONTH$$DAY$00/$VAR_NAME$_$START_DATE$00.nc')) +rean.name <- unname(ifelse(rean == ERAint, ERAint.name, JRA55.name)) +forecast.name <- unname(ifelse(forecast == ECMWF_S4, ECMWF_S4.name, ECMWF_monthly.name)) +fields.name <- unname(ifelse(fields == rean, rean.name, forecast.name)) +n.years <- year.end - year.start + 1 + +var.data <- fields # dataset from which to load the input var data (reanalysis or forecasts) +var.data.name <- fields.name #ECMWF_monthly.name # ERAint.name + +if(lat.min >= lat.max) stop("lat.min cannot be greater than lat.max") + +days.period <- n.days.period <- period.length <- list() +for (pp in 1:17){ + days.period[[pp]] <- NA + for(y in year.start:year.end) days.period[[pp]] <- c(days.period[[pp]], n.days.in.a.future.year(year.start, y) + pos.period(y,pp)) + days.period[[pp]] <- days.period[[pp]][-1] # remove the NA at the begin that was introduced only to be able to execute the above command + # number of days belonging to that period from year.start to year.end: + n.days.period[[pp]] <- length(days.period[[pp]]) + # Number of days belonging to that period in a single year: + period.length[[pp]] <- n.days.in.a.period(pp,1999) #+ ifelse(period==13 | period==2, 1, 0) # using year 1999 introduce a small error in winter season length because of bisestile years +} + +# load only 1 day of pressure data to detect the minimum and maximum lat and lon values which identify only the North Atlantic area: +#my.grid<-paste0('r',n.lon,'x',n.lat) +if(fields.name == rean.name) domain <- Load(var = psl, exp = NULL, obs = list(list(path=fields)), '19990101', storefreq = 'daily', leadtimemax = 1, output = 'lonlat', nprocs=1) +if(fields.name == ECMWF_S4.name) domain <- Load(var = "psl", exp = list(list(path=fields)), sdates=paste0(year.start,'0101'), storefreq = 'daily', dimnames=list(member=member.name), leadtimemax=1, nmember=1, output = 'lonlat', nprocs=1) #grid=my.grid, method='bilinear', nprocs=1) +if(fields.name == ECMWF_monthly.name) domain <- Load(var = psl, exp = NULL, obs = list(list(path=fields)), paste0(year.start,'0102'), storefreq = 'daily', leadtimemax = 1, output = 'lonlat', nprocs=1) + +n.lat <- length(domain$lat) +n.lon <- length(domain$lon) + +pos.lat.max <- which(lat.max - round(domain$lat,4) >= 0)[1] # select the first latitude of the domain below the maximum latitude +pos.lat.min <- tail(which(round(domain$lat,4) - lat.min >= 0),1) # select the last latitude of the domain over the minumum latitude +pos.lon.max <- tail(which(lon.max - round(domain$lon,4) >= 0),1) +pos.lon.min <- which(round(domain$lon,4) - lon.min >= 0)[1] + +pos.lat <- if(pos.lat.min < pos.lat.max) {pos.lat.min:pos.lat.max} else {pos.lat.max:pos.lat.min} +pos.lon <- c(1:pos.lon.max,pos.lon.min:length(domain$lon)) # beware that it only consider the case where the study area cross the meridian of Greenwhich! +n.pos.lat <- length(pos.lat) +n.pos.lon <- length(pos.lon) + +lat <- domain$lat[pos.lat] # lat values of chosen area only (it is smaller than the whole spatial domain loaded by the data) +lon <- domain$lon[pos.lon] # lon values of chosen area only + +lat.min.area <- domain$lat[pos.lat.min] # min and max lat/lon of the chosen area only (same as lat.max, but its values correspond to actual values in the lat vector) +lat.max.area <- domain$lat[pos.lat.max] +lon.min.area <- domain$lon[pos.lon.min] +lon.max.area <- domain$lon[pos.lon.max] + +#rm(domain) + +# Load psl data: +if(fields.name == rean.name){ # Load daily psl data in the reanalysis case: + psleuFull <-array(NA,c(n.days.in.a.yearly.period(year.start,year.end), n.pos.lat, n.pos.lon)) + + for (y in year.start:year.end){ + var <- Load(var = psl, exp = NULL, obs = list(list(path=fields)), paste0(y,'0101'), storefreq = 'daily', leadtimemax = n.days.in.a.year(y), output = 'lonlat', + latmin = lat.min, latmax = lat.max, lonmin = lon.min, lonmax = lon.max) + psleuFull[seq.days.in.a.future.year(year.start, y),,] <- var$obs + rm(var) + gc() + } +} + +if(fields.name == ECMWF_S4.name) # in this case, we can load only the data for 1 month at time (since each month needs ~3 GB of memory) + start.month <- 1 # start date (1=January, 12=December) + lead.month <- 1 # lead time in months (0= same as start date, 1= the month following the start date month, etc.) + + n.members <- 15 # number of members of the forecast system to use + + if(start.month <= 9) {start.month.char <- paste0("0",as.character(start.month))} else {start.month.char=start.month} + chosen.month <- start.month + lead.month # find which month we want to load data + if(chosen.month > 12) chosen.month <- chosen.month - 12 + leadtime.min <- 1 + n.days.in.a.monthly.period(start.month, chosen.month, year.start) - n.days.in.a.month(chosen.month, year.start) + leadtime.max <- leadtime.min + n.days.in.a.month(chosen.month, year.start) - 1 + n.leadtimes <- leadtime.max - leadtime.min + 1 + print(paste("First leadtime:",leadtime.min, "Last leadtime:", leadtime.max, n.leadtimes)) + + psleuFull <- array(NA,c(1, n.members, n.years, n.leadtimes, n.pos.lat, n.pos.lon)) + for (y in year.start:year.end){ + temp <- Load(var = "psl", exp = list(list(path=fields)), obs = NULL, sdates=paste0(y, start.month.char,'01'), storefreq = 'daily', dimnames=list(member=member.name), nmember=n.members, leadtimemin=leadtime.min, leadtimemax=leadtime.max, output = 'lonlat', latmin = lat.min, latmax = lat.max, lonmin = lon.min, lonmax = lon.max) + psleuFull[1,, + } + + # Old quick load command (it was removed because it desn't take into account bisestile years): + psleuFull <- Load(var = "psl", exp = list(list(path=fields)), obs = NULL, sdates=paste0(year.start:year.end, start.month.char,'01'), storefreq = 'daily', dimnames=list(member=member.name), nmember=n.members, leadtimemin=leadtime.min, leadtimemax=leadtime.max, output = 'lonlat', latmin = lat.min, latmax = lat.max, lonmin = lon.min, lonmax = lon.max) + + # immediatly convert psl in daily anomalies to remove the drift!!! + pslPeriodClim <- apply(psleuFull$mod, c(1,4,5,6), mean, na.rm=T) + pslPeriodClim2 <- InsertDim(InsertDim(pslPeriodClim, 2, n.years), 2, n.members) + rm(pslPeriodClim) + gc() + + psleuFull <- psleuFull$mod - pslPeriodClim2 + rm(pslPeriodClim2) + gc() +} + +if(fields.name == ECMWF_monthly.name){ + # Load 6-hourly psl data in the forecast case: + psleuFull <-array(NA, c(n.sdates*n.years, n.leadtimes, n.pos.lat, n.pos.lon)) # daily order: 19940102 19950102 ... 20130102 19940109 19950109 ... 20130109 ... + n.leadtimes <- length(leadtime) + sdates <- weekly.seq(forecast.year,mes,day) + n.sdates <- length(sdates) + + for (startdate in 1:n.sdates){ + for(lday in leadtime){ + var <- Load(var = psl, exp = NULL, obs = list(list(path=fields), paste0(year.start:year.end, substr(sdates[startdate],5,6), substr(sdates[startdate],7,8) ), storefreq = 'daily', leadtimemin = lday*4-3, leadtimemax = lday*4, output = 'lonlat', latmin = lat.min, latmax = lat.max, lonmin = lon.min, lonmax = lon.max) + + mean.lday <- apply(var$obs, c(3,5,6), mean, na.rm=T) + rm(var) + gc() + + psleuFull[(1+(startdate-1)*n.years):(startdate*n.years), lday-leadtime[1]+1,,] <- mean.lday + rm(mean.lday) + gc() + } + } +} + +if(psl=="g500") psleuFull <- psleuFull/9.81 # convert geopotential to geopotential height (in m) +if(psl=="psl") psleuFull <- psleuFull/100 # convert MSLP in Pascal to MSLP in hPa +gc() + +#save.image(file=paste0(workdir,"/weather_regimes_",fields.name,"_",year.start,"-",year.end,".RData")) +#load(file=paste0(workdir,"/weather_regimes_",fields.name,"_",year.start,"-",year.end,".RData")) + +# compute the cluster analysis: +my.PCA <- list() +my.cluster <- list() +tot.variance <- list() +n.pcs <- my.cluster <- c() + +WR.period <- 1 #1:12 #13:16 # 1-12: Jan-Dec; 13-16: Winter-Spring-Summer-Autumn + +for(p in WR.period){ + # Select only the data of the month/season we want: + if(fields.name == rean.name){ pslPeriod <- psleuFull[days.period[[p]],,] # select only days in the chosen period (i.e: winter) + + # in this case of S4 we don't need to select anything because we already loaded only 1 month of data! + + if(fields.name == ECMWF_monthly.name){ + my.startdates.period <- months.period(forecast.year,mes,day,p) # get which startdates belong to the chosen period + + days.period <- list() # get which days inside psleuFull belong to the chosen period + for(startdate in my.startdates.period){ + days.period[[p]] <- c(days.period[[p]], (1+(startdate-1)*n.years):(startdate*n.years)) + } + + # in winter you must remove the 2nd startdate (20140109) because its data is bad, as you can see with: plot(pslmat[,1:2]) + # if(fields.name == forecast.name && period == 13) pslmat[21:40,] <- NA + } + + # weight the pressure fields based on latitude (apply it to anomalies, not to absolute values!): + if(lat.weighting){ + lat.weighted <- cos(lat*pi/180)^0.5 + lat.weighted.array <- InsertDim(InsertDim(lat.weighted,2,n.pos.lon), 1, n.days.period[[p]]) + pslmat <- pslPeriod * lat.weighted.array + rm(lat.weighted.array) + gc() + } + + # order psleuFull to use it as input of the cluster analysis: + if(fields.name == rean.name){ + pslmat <- pslPeriod + dim(pslmat) <- c(n.days.period[[p]], n.pos.lat*n.pos.lon) # convert array in a matrix! + rm(pslPeriod, pslPeriod.weighted) + } + + if(fields.name == ECMWF_S4.name){ + #pslmat <- melt(psleuFull[1,,,,,]) + #temp <- acast(pslmat, Var2+Var3~Var4+Var5+Var1) + pslmat <- unname(acast(melt(psleuFull[1,,,,,]), Var2 + Var3 ~ Var1 + Var4 + Var5)) # Var1: ensemb.members, var2: start years, var3: lead months, var4: lat, var5: lon + # note that the data order in pslmat[,X] is: year1day1, year1day2, ... , year1day31, year2day1, year2day2, ... , year2day28 + gc() + + # not working properly, probably there is a bug: + # pslmat <- psleuFull + # dim(pslmat) <- c(1, n.leadtimes*n.years, n.members*n.pos.lat*n.pos.lon) # convert array in a matrix! Beware that it always put the years before the leadtimes! + # i.e: dim(pslmat) <- c(1, 3*33, 15*n.pos.lat*n.pos.lon) + } + + # compute the PCs or not: + if(PCA){ + my.seq <- seq(1, n.pos.lat*n.pos.lon, 9) # select only 1 point of 9 + pslcut <- pslmat[,my.seq] + + my.PCA[[p]] <- princomp(pslcut,cor=FALSE) + tot.variance[[p]] <- head(cumsum(my.PCA[[p]]$sdev^2/sum(my.PCA[[p]]$sdev^2)),50) # check how many PCAs to keep basing on the sum of their expl. variance + n.pcs[p] <- head(as.numeric(which(tot.variance[[p]] > variance.explained)),1) # select only the pcs that explains at least 80% of variance + my.cluster[[p]] <- kmeans(my.PCA[[p]]$scores[,1:n.pcs[p]], centers=4, iter.max=100, nstart=30) # 4 is the number of clusters + rm(pslcut) + } else { # 4 is the number of clusters: + if(fields.name == rean.name) my.cluster[[p]] <- kmeans(pslmat[which(!is.na(pslmat[,1])),], centers=4, iter.max=100, nstart=30) + + # check it with: matrix(my.cluster[[p]]$cluster, n.leadtimes, n.years) + if(fields.name == ECMWF_S4.name) my.cluster[[p]] <- kmeans(pslmat, centers=4, iter.max=100, nstart=30, trace=TRUE) + + } + + rm(pslmat) + gc() +} + +#save.image(file=paste0(workdir,"/weather_regimes_",fields.name,"_",year.start,"-",year.end,".RData")) +#load(file=paste0(workdir,"/weather_regimes_",fields.name,"_",year.start,"-",year.end,".RData")) + +var.num <- 1 # Choose a variable. 1: sfcWind 2: tas + +var.name <- c("sfcWind","tas") # name of the 'predictand' variable of the chosen reanalysis +var.name.full <- c("Wind Speed","Temperature") # full name of the 'predictand' variable to put in the title of the graphs +var.unit <- c("m/s", "ºC") # unit of measure of var (for drawing color scales) + +# Load var data: +if(fields.name == rean.name){ # Load daily var data in the reanalysis case: + vareuFull <-array(NA,c(n.days.in.a.yearly.period(year.start,year.end), n.pos.lat, n.pos.lon)) + + for (y in year.start:year.end){ + var <- Load(var = var.name[var.num], exp = NULL, obs = list(var.data), paste0(y,'0101'), storefreq = 'daily', leadtimemax = n.days.in.a.year(y), output = 'lonlat', + latmin = lat.min, latmax = lat.max, lonmin = lon.min, lonmax = lon.max) + vareuFull[seq.days.in.a.future.year(year.start, y),,] <- var$obs + rm(var) + gc() + } + +# in the S4 case, we can load only the data for 1 month at time (since each month needs ~3 GB of memory) +# but at present we ONLY use psl data!!! +#if(fields.name == ECMWF_S4.name) { +# vareuFull <- Load(var = var.name[var.num], exp = list(fields), obs = NULL, sdates=paste0(year.start:year.end, start.month.char,'01'), storefreq = 'daily', dimnames=list(member="number"), nmember=n.members, leadtimemin=leadtime.min, leadtimemax=leadtime.max, output = 'lonlat', latmin = lat.min, latmax = lat.max, lonmin = lon.min, lonmax = lon.max) +#} + +if(fields.name == ECMWF_monthly.name){ # Load 6-hourly var data in the monthly forecast case: + sdates <- weekly.seq(forecast.year,mes,day) + vareuFull <-array(NA,c(length(sdates)*(year.end-year.start+1), n.pos.lat, n.pos.lon)) + + for (startdate in 1:length(sdates)){ + var <- Load(var = var.name[var.num], exp = NULL, obs = list(var.data), paste0(year.start:year.end, substr(sdates[startdate],5,6), substr(sdates[startdate],7,8) ), storefreq = 'daily', leadtimemin=leadtime*4-3, leadtimemax=leadtime*4, output = 'lonlat', latmin = lat.min, latmax = lat.max, lonmin = lon.min, lonmax = lon.max) + temp <- apply(var$obs, c(1,3,5,6), mean, na.rm=T) + vareuFull[(1+(startdate-1)*(year.end-year.start+1)):(startdate*(year.end-year.start+1)),,] <- temp + rm(temp,var) + gc() + } + +} + +#my.grid<-paste0('r',n.lon,'x',n.lat) +#coord <- Load(var = 'sfcWind', exp = list(ECMWF_monthly), obs = NULL, sdates=paste0(2013,'0102'), leadtimemin = 1, leadtimemax=1, output = 'lonlat', nprocs=1, grid=my.grid, method='bilinear') + +save.image(file=paste0(workdir,"/weather_regimes_",fields.name,"_",var.name[var.num],"_",year.start,"-",year.end,".RData")) +load(file=paste0(workdir,"/weather_regimes_",fields.name,"_",var.name[var.num],"_",year.start,"-",year.end,".RData")) + +## # if you want to study WRs at monhly level but using their seasonal time series, you have to copy the WRs time series to the months from 1 to 12: +## if(WR.period <- 1:12){ +## my.cluster[[1]] <- my.cluster[[2]] <- my.cluster[[3]] <- my.cluster[[4]] <- my.cluster[[5]] <- my.cluster[[6]] <- list() +## my.cluster[[7]] <- my.cluster[[8]] <- my.cluster[[9]] <- my.cluster[[10]] <- my.cluster[[11]] <- my.cluster[[12]] <- list() + +## ss <- match(days.period[[13]],days.period[[1]]); ss[which(!is.na(ss))] <- 1; qq <- ss*my.cluster[[13]]$cluster; my.cluster[[1]]$cluster <- qq[!is.na(qq)] +## ss <- match(days.period[[13]],days.period[[2]]); ss[which(!is.na(ss))] <- 1; qq <- ss*my.cluster[[13]]$cluster; my.cluster[[2]]$cluster <- qq[!is.na(qq)] +## ss <- match(days.period[[13]],days.period[[12]]); ss[which(!is.na(ss))] <- 1; qq <- ss*my.cluster[[13]]$cluster; my.cluster[[12]]$cluster <- qq[!is.na(qq)] +## ss <- match(days.period[[14]],days.period[[3]]); ss[which(!is.na(ss))] <- 1; qq <- ss*my.cluster[[14]]$cluster; my.cluster[[3]]$cluster <- qq[!is.na(qq)] +## ss <- match(days.period[[14]],days.period[[4]]); ss[which(!is.na(ss))] <- 1; qq <- ss*my.cluster[[14]]$cluster; my.cluster[[4]]$cluster <- qq[!is.na(qq)] +## ss <- match(days.period[[14]],days.period[[5]]); ss[which(!is.na(ss))] <- 1; qq <- ss*my.cluster[[14]]$cluster; my.cluster[[5]]$cluster <- qq[!is.na(qq)] +## ss <- match(days.period[[15]],days.period[[6]]); ss[which(!is.na(ss))] <- 1; qq <- ss*my.cluster[[15]]$cluster; my.cluster[[6]]$cluster <- qq[!is.na(qq)] +## ss <- match(days.period[[15]],days.period[[7]]); ss[which(!is.na(ss))] <- 1; qq <- ss*my.cluster[[15]]$cluster; my.cluster[[7]]$cluster <- qq[!is.na(qq)] +## ss <- match(days.period[[15]],days.period[[8]]); ss[which(!is.na(ss))] <- 1; qq <- ss*my.cluster[[15]]$cluster; my.cluster[[8]]$cluster <- qq[!is.na(qq)] +## ss <- match(days.period[[16]],days.period[[9]]); ss[which(!is.na(ss))] <- 1; qq <- ss*my.cluster[[16]]$cluster; my.cluster[[9]]$cluster <- qq[!is.na(qq)] +## ss <- match(days.period[[16]],days.period[[10]]); ss[which(!is.na(ss))] <- 1; qq <- ss*my.cluster[[16]]$cluster; my.cluster[[10]]$cluster <- qq[!is.na(qq)] +## ss <- match(days.period[[16]],days.period[[11]]); ss[which(!is.na(ss))] <- 1; qq <- ss*my.cluster[[16]]$cluster; my.cluster[[11]]$cluster <- qq[!is.na(qq)] +## } + +for(p in WR.period){ # 1-12: Jan-Dec; 13-16: Winter-Spring-Summer-Autumn; 17: Year + if(fields.name == ECMWF_monthly.name){ + my.startdates.period <- months.period(forecast.year,mes,day,p) + + #if(p == 13) my.startdates.period <- my.startdates.period[-2] # remove the second startdate because it is broken + + days.period <- period.length <- list() + for(startdate in my.startdates.period){ + days.period[[p]] <- c(days.period[[p]], (1+(startdate-1)*(year.end-year.start+1)):(startdate*(year.end-year.start+1))) + } + period.length[[p]] <- length(my.startdates.period) # number of Thusdays in the period + } + + if(sequences){ # it works only for rean data!!! + # for each of the 4 clusters, remove the days not belonging to sequences of at least 5 days: + # warning: first 4 days and last 4 days are not take into account y the algorithm and are treated as not belonging to any sequence (sequ=FALSE) + wrdiff <- diff(my.cluster[[p]]$cluster) + + sequ <- rep(FALSE, n.days.period[[p]]) + for(i in 5:(n.days.period[[p]]-5)){ + sequ[i] <- TRUE + if(wrdiff[i] != 0 & wrdiff[i+1] != 0) sequ[i] = FALSE + if(wrdiff[i] != 0 & wrdiff[i+2] != 0) sequ[i] = FALSE + if(wrdiff[i] != 0 & wrdiff[i+3] != 0) sequ[i] = FALSE + if(wrdiff[i] != 0 & wrdiff[i+4] != 0) sequ[i] = FALSE + if(wrdiff[i-1] != 0 & wrdiff[i] != 0) sequ[i] = FALSE + if(wrdiff[i-1] != 0 & wrdiff[i+1] != 0) sequ[i] = FALSE + if(wrdiff[i-1] != 0 & wrdiff[i+2] != 0) sequ[i] = FALSE + if(wrdiff[i-1] != 0 & wrdiff[i+3] != 0) sequ[i] = FALSE + if(wrdiff[i-2] != 0 & wrdiff[i] != 0) sequ[i] = FALSE + if(wrdiff[i-2] != 0 & wrdiff[i+1] != 0) sequ[i] = FALSE + if(wrdiff[i-2] != 0 & wrdiff[i+2] != 0) sequ[i] = FALSE + if(wrdiff[i-3] != 0 & wrdiff[i] != 0) sequ[i] = FALSE + if(wrdiff[i-3] != 0 & wrdiff[i+1] != 0) sequ[i] = FALSE + if(wrdiff[i-4] != 0 & wrdiff[i] != 0) sequ[i] = FALSE + } # close for loop on i + + # sequence of daily cluster numbers without the days that doesn't belong to sequences of 5 or more days: + cluster.sequence <- my.cluster[[p]]$cluster * sequ + rm(wrdiff, sequ) + } + + gc() + + cluster.sequence <- my.cluster[[p]]$cluster + + # Replace the days belonging to a regime with the days belonging to a sequence of at least 5 days of that regime: + wr1 <- which(cluster.sequence == 1) + wr2 <- which(cluster.sequence == 2) + wr3 <- which(cluster.sequence == 3) + wr4 <- which(cluster.sequence == 4) + + # yearly frequency of each regime: + wr1y <- wr2y <- wr3y <-wr4y <- c() + for(y in year.start:year.end){ + # for both reanalysis and S4, the time order is always: year1day1, year1day2, ..., year1day31, year2day1, year2day2, ..., year2day28, etc, + if(fields.name == rean.name || fields.name == ECMWF_S4.name){ + wr1y[y-year.start+1] <- length(which(cluster.sequence[(y-year.start)*period.length[[p]]+(1:period.length[[p]])] == 1)) + wr2y[y-year.start+1] <- length(which(cluster.sequence[(y-year.start)*period.length[[p]]+(1:period.length[[p]])] == 2)) + wr3y[y-year.start+1] <- length(which(cluster.sequence[(y-year.start)*period.length[[p]]+(1:period.length[[p]])] == 3)) + wr4y[y-year.start+1] <- length(which(cluster.sequence[(y-year.start)*period.length[[p]]+(1:period.length[[p]])] == 4)) + } else { # for both S4 and the monthly system, the time order is always: year1day1, year2day1, ... , yearNday1, year1day2, year2day2, ..., yearNday2, etc. + wr1y[y-year.start+1] <- length(which(cluster.sequence[seq((y-year.start+1),length(cluster.sequence), n.years)] == 1)) + wr2y[y-year.start+1] <- length(which(cluster.sequence[seq((y-year.start+1),length(cluster.sequence), n.years)] == 2)) + wr3y[y-year.start+1] <- length(which(cluster.sequence[seq((y-year.start+1),length(cluster.sequence), n.years)] == 3)) + wr4y[y-year.start+1] <- length(which(cluster.sequence[seq((y-year.start+1),length(cluster.sequence), n.years)] == 4)) + } + + } + + #rm(cluster.sequence) + #gc() + + # convert to frequencies in %: + wr1y <- wr1y/period.length[[p]] + wr2y <- wr2y/period.length[[p]] + wr3y <- wr3y/period.length[[p]] + wr4y <- wr4y/period.length[[p]] + + if(fields.name == rean.name){ + pslPeriod <- psleuFull[days.period[[p]],,] + pslPeriodClim <- apply(pslPeriod, c(2,3), mean, na.rm=T) + pslPeriodClim2 <- InsertDim(pslPeriodClim, 1, n.days.period[[p]]) + pslPeriodAnom <- pslPeriod - pslPeriodClim2 + + rm(pslPeriod, pslPeriodClim, pslPeriodClim2) + gc() + + pslwr1 <- pslPeriodAnom[wr1,,] + pslwr2 <- pslPeriodAnom[wr2,,] + pslwr3 <- pslPeriodAnom[wr3,,] + pslwr4 <- pslPeriodAnom[wr4,,] + rm(pslPeriodAnom) + gc() + + pslwr1mean <- apply(pslwr1,c(2,3),mean,na.rm=T) + pslwr2mean <- apply(pslwr2,c(2,3),mean,na.rm=T) + pslwr3mean <- apply(pslwr3,c(2,3),mean,na.rm=T) + pslwr4mean <- apply(pslwr4,c(2,3),mean,na.rm=T) + rm(pslwr1,pslwr2,pslwr3,pslwr4) + } + + if(fields.name == ECMWF_S4.name){ + # in this case, psleuFull is already in anomalies: + pslmat <- unname(acast(melt(psleuFull[1,,,,,]), Var1 ~ Var2 + Var3 ~ Var4 ~ Var5)) # Var1: ensemb.members, var2: start years, var3: lead months, var4: lat, var5: lon + #pslmat <- psleuFull + #dim(pslmat) <- c(1, n.members, n.leadtimes*n.years, n.pos.lat, n.pos.lon) # not working + pslwr1 <- pslmat[,wr1,,] + pslwr2 <- pslmat[,wr2,,] + pslwr3 <- pslmat[,wr3,,] + pslwr4 <- pslmat[,wr4,,] + rm(pslmat) + gc() + + pslwr1mean <- apply(pslwr1,c(3,4),mean,na.rm=T) + pslwr2mean <- apply(pslwr2,c(3,4),mean,na.rm=T) + pslwr3mean <- apply(pslwr3,c(3,4),mean,na.rm=T) + pslwr4mean <- apply(pslwr4,c(3,4),mean,na.rm=T) + rm(pslwr1,pslwr2,pslwr3,pslwr4) + gc() + } + + + if(fields.name == rean.name){ + # similar selection of above, but for var instead of psl: + varPeriod <- vareuFull[days.period[[p]],,] # select only var data during the chosen period + + varPeriodClim <- apply(varPeriod, c(2,3), mean, na.rm=T) + varPeriodClim2 <- InsertDim(varPeriodClim, 1, n.days.period[[p]]) + varPeriodAnom <- varPeriod - varPeriodClim2 + rm(varPeriod, varPeriodClim, varPeriodClim2) + gc() + + #PlotEquiMap2(varPeriodClim[,EU], lon[EU], lat, filled.continents = FALSE, cols=my.cols.var, intxlon=10, intylat=10, cex.lab=1.5) # plot the climatology of the period + varPeriodAnom1 <- varPeriodAnom[wr1,,] + varPeriodAnom2 <- varPeriodAnom[wr2,,] + varPeriodAnom3 <- varPeriodAnom[wr3,,] + varPeriodAnom4 <- varPeriodAnom[wr4,,] + + varPeriodAnom1mean <- apply(varPeriodAnom1,c(2,3),mean,na.rm=T) + varPeriodAnom2mean <- apply(varPeriodAnom2,c(2,3),mean,na.rm=T) + varPeriodAnom3mean <- apply(varPeriodAnom3,c(2,3),mean,na.rm=T) + varPeriodAnom4mean <- apply(varPeriodAnom4,c(2,3),mean,na.rm=T) + + varPeriodAnomBoth1 <- abind(varPeriodAnom, varPeriodAnom1, along = 1) + pvalue1 <- apply(varPeriodAnomBoth1, c(2,3), function(x) t.test(x[1:n.days.period[[p]]],x[(n.days.period[[p]]+1):length(x)])$p.value) + rm(varPeriodAnomBoth1, varPeriodAnom1) + gc() + + varPeriodAnomBoth2 <- abind(varPeriodAnom, varPeriodAnom2, along = 1) + pvalue2 <- apply(varPeriodAnomBoth2, c(2,3), function(x) t.test(x[1:n.days.period[[p]]],x[(n.days.period[[p]]+1):length(x)])$p.value) + rm(varPeriodAnomBoth2, varPeriodAnom2) + gc() + + varPeriodAnomBoth3 <- abind(varPeriodAnom, varPeriodAnom3, along = 1) + pvalue3 <- apply(varPeriodAnomBoth3, c(2,3), function(x) t.test(x[1:n.days.period[[p]]],x[(n.days.period[[p]]+1):length(x)])$p.value) + rm(varPeriodAnomBoth3, varPeriodAnom3) + gc() + + varPeriodAnomBoth4 <- abind(varPeriodAnom, varPeriodAnom4, along = 1) + pvalue4 <- apply(varPeriodAnomBoth4, c(2,3), function(x) t.test(x[1:n.days.period[[p]]],x[(n.days.period[[p]]+1):length(x)])$p.value) + rm(varPeriodAnomBoth4, varPeriodAnom4) + + rm(varPeriodAnom) + gc() + + # save all the data necessary to redraw the graphs when we know the right regime: + save(workdir,rean.name,fields.name,var.num,var.name,var.name.full,var.unit,year.start,year.end,p,WR.period,lon,lat,pslwr1mean,pslwr2mean,pslwr3mean,pslwr4mean, varPeriodAnom1mean, varPeriodAnom2mean, varPeriodAnom3mean,varPeriodAnom4mean,wr1y,wr2y,wr3y,wr4y,pvalue1,pvalue2,pvalue3,pvalue4,file=paste0(workdir,"/",fields.name,"_",var.name[var.num],"_",my.period[p],"_mapdata.RData")) + + rm(pvalue1,pvalue2,pvalue3,pvalue4) + rm(pslwr1mean,pslwr2mean,pslwr3mean,pslwr4mean) + rm(varPeriodAnom1mean,varPeriodAnom2mean,varPeriodAnom3mean,varPeriodAnom4mean) + gc() + } + + if(fields.name == ECMWF_S4.name){ # save all the data necessary to draw the graphs (but not the impact maps) + save(workdir,rean.name,fields.name,var.num,var.name,var.name.full,var.unit,year.start,year.end,p,WR.period,lon,lat,pslwr1mean,pslwr2mean,pslwr3mean,pslwr4mean,wr1y,wr2y,wr3y,wr4y,file=paste0(workdir,"/",fields.name,"_",var.name[var.num],"_",my.period[p],"_mapdata.RData")) + rm(pslwr1mean,pslwr2mean,pslwr3mean,pslwr4mean) + gc() + } +} # close the for loop on 'p' + + +# run it twice: once, with ordering <- FALSE to look only at the cartography and find visually the right regime names of the four clusters; +# and the second time, to save the ordered cartography: +ordering <- TRUE # If TRUE, order the clusters following the regimes listed in the 'orden' vector (after you manually insert the names). +save.names <- TRUE # if TRUE, also save the cluster names (i.e: the association between clusters and weather regimes); if FALSE, the cluster names are loaded from the file +as.pdf <- FALSE # FALSE: save results as one .png file for each season, TRUE: save only 1 .pdf file with all seasons + +# position of long values of Europe only (without the Atlantic Sea and America): +#if(fields.name == "ERA-Interim") EU <- c(1:which(lon >= lon.max)[1], (length(lon)-30):length(lon)) # restrict area to continental europe only +EU <- c(1:which(lon >= lon.max)[1], (which(lon >= 337)[1]:length(lon))) # restrict area to continental europe only + +# choose an order to give to the cartography, from top to bottom: +orden <- c("NAO+","NAO-","Blocking","Atl.Ridge") + +regime1.name <- orden[1] +regime2.name <- orden[2] +regime3.name <- orden[3] +regime4.name <- orden[4] + +if(save.names){cluster1.name.period <- cluster2.name.period <- cluster3.name.period <- cluster4.name.period <- c()} + +# breaks and colors of the geopotential fields: +if(psl == "g500"){ + #my.brks <- c(-300,-199:200,300) # % Mean anomaly of a WR + my.brks <- c(-300,seq(-200,200,10),300) # % Mean anomaly of a WR + my.brks2 <- c(-300,seq(-200,200,10),300) # % Mean anomaly of a WR for the contour lines + #my.cols <- colorRampPalette((brewer.pal(9,"PuBu")))(length(my.brks)-1) + values.to.plot <- c(-200, -100, 0, 100, 200) # values we want to show in the labels +} + +if(psl == "psl"){ + my.brks <- c(seq(-19,-1,2),0,seq(1,19,2)) # % Mean anomaly of a WR + # my.brks <- c(seq(-19,-1,2),0,seq(1,19,2)) # % Mean anomaly of a WR + + my.brks2 <- my.brks #c(seq(-20,20,2)) # % Mean anomaly of a WR for the contour lines + values.to.plot <- my.brks #c(-17,-15,-13,-11,-9,-7,-5,-3,-1,0,1,3,5,7,9,11,13,15,17) +} + +my.cols <- colorRampPalette(rev(brewer.pal(11,"RdBu")))(length(my.brks)-1) # blue--white--red colors +my.cols[floor(length(my.cols)/2)] <- my.cols[floor(length(my.cols)/2)+1] <- "white" + +# breaks and colors of the impact maps (valid both for Wind speed anomalies and Temperature anomalies): +#my.brks.var <- c(-20,seq(-10,-3,1),seq(-2.9,2.9,0.1),seq(3,10,1),20) # % Mean anomaly of a WR +#my.brks.var <- c(-20,-2,-1.5,-1,-0.5,-0.2,0.2,0.5,1,1.5,2,20) # % Mean anomaly of a WR +#my.brks.var <- c(-20,seq(-3,3,0.5),20) # % Mean anomaly of a WR +if(var.name[var.num] == "sfcWind") { + my.brks.var <- seq(-3,3,0.5) + values.to.plot2 <- c(-3,-2,-1,0,1,2,3) +} +if(var.name[var.num] == "tas") { + my.brks.var <- seq(-5,5,1) + values.to.plot2 <- my.brks.var #c(-5,-3,-1,0,1,3,5) +} + +#my.cols <- colorRampPalette(as.character(read.csv("/home/Earth/vtorralb/rgbhex.csv",header=F)[,1]))(length(my.brks)-1) # blue--yellow-red colors +my.cols.var <- colorRampPalette(rev(brewer.pal(11,"RdBu")))(length(my.brks.var)-1) # blue--white--red colors +#my.cols.var[floor(length(my.cols.var)/2)] <- my.cols.var[floor(length(my.cols.var)/2)+1] <- "white" + +if(as.pdf)(pdf(file=paste0(workdir,"/",fields.name,"_",var.name[var.num],"_",my.period[p],".pdf"),width=40, height=60)) + +for(p in WR.period){ + load(file=paste0(workdir,"/",fields.name,"_",var.name[var.num],"_",my.period[p],"_mapdata.RData")) + + if(save.names){ + if(p == 1){ # January + cluster3.name="NAO+" + cluster2.name="NAO-" + cluster4.name="Blocking" + cluster1.name="Atl.Ridge" + } + if(p == 2){ # February + cluster3.name="NAO+" + cluster2.name="NAO-" + cluster4.name="Blocking" + cluster1.name="Atl.Ridge" + } + if(p == 3){ # March + cluster3.name="NAO+" + cluster2.name="NAO-" + cluster4.name="Blocking" + cluster1.name="Atl.Ridge" + } + if(p == 4){ # April + cluster2.name="NAO+" + cluster1.name="NAO-" + cluster3.name="Blocking" + cluster4.name="Atl.Ridge" + } + if(p == 5){ # May + cluster4.name="NAO+" + cluster2.name="NAO-" + cluster3.name="Blocking" + cluster1.name="Atl.Ridge" + } + if(p == 6){ # June + cluster4.name="NAO+" + cluster1.name="NAO-" + cluster2.name="Blocking" + cluster3.name="Atl.Ridge" + } + if(p == 7){ # July + cluster4.name="NAO+" + cluster2.name="NAO-" + cluster1.name="Blocking" + cluster3.name="Atl.Ridge" + } + if(p == 8){ # August + cluster2.name="NAO+" + cluster4.name="NAO-" + cluster3.name="Blocking" + cluster1.name="Atl.Ridge" + } + if(p == 9){ # September + cluster4.name="NAO+" + cluster3.name="NAO-" + cluster1.name="Blocking" + cluster2.name="Atl.Ridge" + } + if(p == 10){ # October + cluster1.name="NAO+" + cluster4.name="NAO-" + cluster2.name="Blocking" + cluster3.name="Atl.Ridge" + } + if(p == 11){ # November + cluster2.name="NAO+" + cluster3.name="NAO-" + cluster1.name="Blocking" + cluster4.name="Atl.Ridge" + } + if(p == 12){ # December + cluster2.name="NAO+" + cluster4.name="NAO-" + cluster1.name="Blocking" + cluster3.name="Atl.Ridge" + } + if(p == 13){ # Winter + cluster3.name="NAO+" + cluster4.name="NAO-" + cluster1.name="Blocking" + cluster2.name="Atl.Ridge" + } + if(p == 14){ # Spring + cluster1.name="NAO+" + cluster3.name="NAO-" + cluster2.name="Blocking" + cluster4.name="Atl.Ridge" + } + if(p == 15){ # Summer + cluster2.name="NAO+" + cluster3.name="NAO-" + cluster4.name="Blocking" + cluster1.name="Atl.Ridge" + } + if(p == 16){ # Autumn + cluster4.name="NAO+" + cluster1.name="NAO-" + cluster2.name="Blocking" + cluster3.name="Atl.Ridge" + } + + cluster1.name.period[p] <- cluster1.name + cluster2.name.period[p] <- cluster2.name + cluster3.name.period[p] <- cluster3.name + cluster4.name.period[p] <- cluster4.name + + save(cluster1.name.period, cluster2.name.period, cluster3.name.period, cluster4.name.period, file=paste0(workdir,"/",fields.name,"_ClusterNames.RData")) + + } else { # in this case, we load the cluster names from the file already saved: + load(paste0(workdir,"/",fields.name,"_ClusterNames.RData")) + cluster1.name <- cluster1.name.period[p] + cluster2.name <- cluster2.name.period[p] + cluster3.name <- cluster3.name.period[p] + cluster4.name <- cluster4.name.period[p] + } + + # assign to each cluster its regime: + if(ordering == FALSE){ + cluster1 <- 1; cluster2 <- 2; cluster3 <- 3; cluster4 <- 4 + } else { + cluster1 <- which(orden == cluster1.name) + cluster2 <- which(orden == cluster2.name) + cluster3 <- which(orden == cluster3.name) + cluster4 <- which(orden == cluster4.name) + } + + assign(paste0("map",cluster1), pslwr1mean) + assign(paste0("map",cluster2), pslwr2mean) + assign(paste0("map",cluster3), pslwr3mean) + assign(paste0("map",cluster4), pslwr4mean) + + if(fields.name == rean.name){ + assign(paste0("imp",cluster1), varPeriodAnom1mean) + assign(paste0("imp",cluster2), varPeriodAnom2mean) + assign(paste0("imp",cluster3), varPeriodAnom3mean) + assign(paste0("imp",cluster4), varPeriodAnom4mean) + + assign(paste0("sig",cluster1), pvalue1) + assign(paste0("sig",cluster2), pvalue2) + assign(paste0("sig",cluster3), pvalue3) + assign(paste0("sig",cluster4), pvalue4) + } + + assign(paste0("fre",cluster1), wr1y) + assign(paste0("fre",cluster2), wr2y) + assign(paste0("fre",cluster3), wr3y) + assign(paste0("fre",cluster4), wr4y) + + # Visualize the composition with the 3 graphs for all the 4 regimes: its pressure fields, its impact on var and its frequency series: + if(!as.pdf) png(filename=paste0(workdir,"/",fields.name,"_",var.name[var.num],"_",my.period[p],".png"),width=3000,height=3700) + + plot.new() + + # Sheet title: + par(fig=c(0, 1, 0.94, 0.98), new=TRUE) + mtext(paste0("Weather Regimes for ",my.period[p]," season (",year.start,"-",year.end,"). Source: ",fields.name), cex=6, font=2) + + # Centroid maps: + map.xpos <- 0 + map.width <- 0.46 + par(fig=c(map.xpos + 0.003, map.xpos + map.width - 0.003, 0.745, 0.925), new=TRUE) + PlotEquiMap2(rescale(map1,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map1), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, xlabel.dist=1.5, contours.lty="F1FF1F") + par(fig=c(map.xpos, map.xpos + map.width, 0.51, 0.69), new=TRUE) + PlotEquiMap2(rescale(map2,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map2), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F") + par(fig=c(map.xpos, map.xpos + map.width, 0.275, 0.455), new=TRUE) + PlotEquiMap2(rescale(map3,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map3), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F") + par(fig=c(map.xpos, map.xpos + map.width, 0.04, 0.22), new=TRUE) + PlotEquiMap2(rescale(map4,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map4), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F") + + # Title Centroid Maps: + map.title.xpos <- 0.76 + map.title.width <- 0.2 + par(fig=c(map.title.xpos, map.title.xpos + map.title.width, 0.728, 0.729), new=TRUE) + mtext("year", cex=3) + par(fig=c(map.title.xpos, map.title.xpos + map.title.width, 0.494, 0.495), new=TRUE) + mtext("year", cex=3) + par(fig=c(map.title.xpos, map.title.xpos + map.title.width, 0.260, 0.261), new=TRUE) + mtext("year", cex=3) + par(fig=c(map.title.xpos, map.title.xpos + map.title.width, 0.026, 0.027), new=TRUE) + mtext("year", cex=3) + + # Impact maps: + if(fields.name == rean.name){ + impact.xpos <- 0.47 + impact.width <- 0.26 + par(fig=c(impact.xpos, impact.xpos + impact.width, 0.745, 0.925), new=TRUE) + PlotEquiMap2(rescale(imp1[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=t(sig1[,EU] < 0.05), cex.lab=1.5) + par(fig=c(impact.xpos, impact.xpos + impact.width, 0.51, 0.69), new=TRUE) + PlotEquiMap2(rescale(imp2[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=t(sig2[,EU] < 0.05), cex.lab=1.5) + par(fig=c(impact.xpos, impact.xpos + impact.width, 0.275, 0.455), new=TRUE) + PlotEquiMap2(rescale(imp3[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=t(sig3[,EU] < 0.05), cex.lab=1.5) + par(fig=c(impact.xpos, impact.xpos + impact.width, 0.04, 0.22), new=TRUE) + PlotEquiMap2(rescale(imp4[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=t(sig4[,EU] < 0.05), cex.lab=1.5) + } + + # Frequency plots: + barplot.xpos <- 0.75 + barplot.width <- 0.25 + freq.max=100 + par(fig=c(barplot.xpos, barplot.xpos + barplot.width, 0.745, 0.915), new=TRUE) + barplot.freq(100*fre1, year.start, year.end, cex.y=2, cex.x=2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0)) + par(fig=c(barplot.xpos, barplot.xpos + barplot.width,0.510,0.680), new=TRUE) + barplot.freq(100*fre2, year.start, year.end, cex.y=2, cex.x=2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0)) + par(fig=c(barplot.xpos, barplot.xpos + barplot.width,0.275,0.445), new=TRUE) + barplot.freq(100*fre3, year.start, year.end, cex.y=2, cex.x=2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0)) + par(fig=c(barplot.xpos, barplot.xpos + barplot.width,0.040,0.210), new=TRUE) + barplot.freq(100*fre4, year.start, year.end, cex.y=2, cex.x=2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0)) + + # % on Frequency plots: + symbol.xpos <- 0.735 + symbol.width <- 0.01 + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.83, 0.84), new=TRUE) + mtext("%", cex=3.3) + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.60, 0.61), new=TRUE) + mtext("%", cex=3.3) + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.36, 0.37), new=TRUE) + mtext("%", cex=3.3) + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.13, 0.14), new=TRUE) + mtext("%", cex=3.3) + + # Title 1: + title1.xpos <- 0.02 + title1.width <- 0.4 + par(fig=c(title1.xpos, title1.xpos + title1.width, 0.930, 0.935), new=TRUE) + mtext(paste0(regime1.name,": ", psl.name, " Anomaly"), font=2, cex=4) + par(fig=c(title1.xpos, title1.xpos + title1.width, 0.695, 0.700), new=TRUE) + mtext(paste0(regime2.name,": ", psl.name, " Anomaly"), font=2, cex=4) + par(fig=c(title1.xpos, title1.xpos + title1.width, 0.460, 0.465), new=TRUE) + mtext(paste0(regime3.name,": ", psl.name, " Anomaly"), font=2, cex=4) + par(fig=c(title1.xpos, title1.xpos + title1.width, 0.225, 0.230), new=TRUE) + mtext(paste0(regime4.name,": ", psl.name, " Anomaly"), font=2, cex=4) + + # Title 2: + if(fields.name == rean.name){ + title2.xpos <- 0.42 + title2.width <- 0.35 + par(fig=c(title2.xpos, title2.xpos + title2.width, 0.930, 0.935), new=TRUE) + mtext(paste0(regime1.name, ": Impact on ", var.name.full[var.num]), font=2, cex=4) + par(fig=c(title2.xpos, title2.xpos + title2.width, 0.695, 0.700), new=TRUE) + mtext(paste0(regime2.name, ": Impact on ", var.name.full[var.num]), font=2, cex=4) + par(fig=c(title2.xpos, title2.xpos + title2.width, 0.460, 0.465), new=TRUE) + mtext(paste0(regime3.name, ": Impact on ", var.name.full[var.num]), font=2, cex=4) + par(fig=c(title2.xpos, title2.xpos + title2.width, 0.225, 0.230), new=TRUE) + mtext(paste0(regime4.name, ": Impact on ", var.name.full[var.num]), font=2, cex=4) + } + + # Title 3: + title3.xpos <- 0.75 + title3.width <-0.25 + par(fig=c(title3.xpos, title3.xpos + title3.width, 0.930, 0.935), new=TRUE) + mtext(paste0(regime1.name, ": Frequency (", round(100*mean(fre1),1), "%)"), font=2, cex=4) + par(fig=c(title3.xpos, title3.xpos + title3.width, 0.695, 0.700), new=TRUE) + mtext(paste0(regime2.name, ": Frequency (", round(100*mean(fre2),1), "%)"), font=2, cex=4) + par(fig=c(title3.xpos, title3.xpos + title3.width, 0.460, 0.465), new=TRUE) + mtext(paste0(regime3.name, ": Frequency (", round(100*mean(fre3),1), "%)"), font=2, cex=4) + par(fig=c(title3.xpos, title3.xpos + title3.width, 0.225, 0.230), new=TRUE) + mtext(paste0(regime4.name, ": Frequency (", round(100*mean(fre4),1), "%)"), font=2, cex=4) + + # Legend 1: + legend1.xpos <- 0.01 + legend1.width <- 0.44 + legend1.cex <- 1.8 + my.subset <- match(values.to.plot, my.brks) + par(fig=c(legend1.xpos, legend1.xpos + legend1.width, 0.719, 0.744), new=TRUE) # syntax fig=c(xmin, xmax, ymin, ymax) + ColorBar3(my.brks, cols=my.cols, vert=FALSE, cex=legend1.cex, subset=my.subset) + if(psl=="g500") {mtext(side=4," m", cex=2.5)} else {mtext(side=4," hPa", cex=legend1.cex)} + par(fig=c(legend1.xpos, legend1.xpos + legend1.width, 0.485, 0.508), new=TRUE) + ColorBar3(my.brks, cols=my.cols, vert=FALSE, cex=legend1.cex, subset=my.subset) + if(psl=="g500") {mtext(side=4," m", cex=2.5)} else {mtext(side=4," hPa", cex=legend1.cex)} + par(fig=c(legend1.xpos, legend1.xpos + legend1.width, 0.250, 0.273), new=TRUE) + ColorBar3(my.brks, cols=my.cols, vert=FALSE, cex=legend1.cex, subset=my.subset) + if(psl=="g500") {mtext(side=4," m", cex=2.5)} else {mtext(side=4," hPa", cex=legend1.cex)} + par(fig=c(legend1.xpos, legend1.xpos + legend1.width, 0.015, 0.038), new=TRUE) + ColorBar3(my.brks, cols=my.cols, vert=FALSE, cex=legend1.cex, subset=my.subset) + if(psl=="g500") {mtext(side=4," m", cex=2.5)} else {mtext(side=4," hPa", cex=legend1.cex)} + + # Legend 2: + if(fields.name == rean.name){ + legend2.xpos <- 0.48 + legend2.width <- 0.25 + legend2.cex <- 1.8 + my.subset2 <- match(values.to.plot2, my.brks.var) + par(fig=c(legend2.xpos, legend2.xpos + legend2.width, 0.719 + 0.001, 0.744), new=TRUE) + ColorBar3(my.brks.var, cols=my.cols.var, vert=FALSE, cex=legend2.cex, subset=my.subset2) + mtext(side=4,paste0(" ",var.unit[var.num]), cex=legend2.cex) + par(fig=c(legend2.xpos, legend2.xpos + legend2.width, 0.485, 0.508), new=TRUE) + ColorBar3(my.brks.var, cols=my.cols.var, vert=FALSE, cex=legend2.cex, subset=my.subset2) + mtext(side=4,paste0(" ",var.unit[var.num]), cex=legend2.cex) + par(fig=c(legend2.xpos, legend2.xpos + legend2.width, 0.250, 0.273), new=TRUE) + ColorBar3(my.brks.var, cols=my.cols.var, vert=FALSE, cex=legend2.cex, subset=my.subset2) + mtext(side=4,paste0(" ",var.unit[var.num]), cex=legend2.cex) + par(fig=c(legend2.xpos, legend2.xpos + legend2.width, 0.015, 0.038), new=TRUE) + ColorBar3(my.brks.var, cols=my.cols.var, vert=FALSE, cex=legend2.cex, subset=my.subset2) + mtext(side=4,paste0(" ",var.unit[var.num]), cex=legend2.cex) + } + + if(!as.pdf) dev.off() # for saving 4 png + +} # close for loop on 'p' + +if(as.pdf) dev.off() # for saving a single pdf with all seasons + + + + + + + + + + + diff --git a/old/weather_regimes_v24.R~ b/old/weather_regimes_v24.R~ new file mode 100644 index 0000000000000000000000000000000000000000..92ccaba8e356d8c7117132b9fe455838d0a86d09 --- /dev/null +++ b/old/weather_regimes_v24.R~ @@ -0,0 +1,877 @@ +library(s2dverification) # for the function Load() +library(abind) +library(Kendall) +library(reshape2) +source('/home/Earth/ncortesi/Downloads/scripts/Rfunctions.R') # for the calendar functions +workdir <- "/scratch/Earth/ncortesi/RESILIENCE/Regimes" # working dir with the input files with the WT classifications for each MSLP grid point + +# available reanalysis for the geopotential (g500) or the geopotential height (z500 = g500/9.81) and for var data: +ERAint <- '/esnas/reconstructions/ecmwf/eraint/$STORE_FREQ$_mean/$VAR_NAME$_f6h/$VAR_NAME$_$YEAR$$MONTH$.nc' +ERAint.name <- "ERA-Interim" + +JRA55 <- '/esnas/reconstructions/jma/jra-55/$STORE_FREQ$_mean/$VAR_NAME$_f6h/$VAR_NAME$_$YEAR$$MONTH$.nc' +JRA55.name <- "JRA-55" + +rean <- ERAint #JRA55 #ERAint # choose one of the two above reanalysis from where to load the input psl data + +# available forecast systems for the psl and var data: +#ECMWF_S4 <- list(path = paste0('/esnas/exp/ECMWF/seasonal/0001/s004/m001/$STORE_FREQ$_mean/$VAR_NAME$_f6h/$VAR_NAME$_$START_DATE$.nc')) +#ECMWF_S4 <- '/esnas/exp/ECMWF/seasonal/0001/s004/m001/$STORE_FREQ$_mean/psl_f6h/psl_$START_DATE$.nc' +ECMWF_S4 <- '/esnas/exp/ecmwf/system4_m1/$STORE_FREQ$_mean/psl_f6h/psl_$START_DATE$.nc' +ECMWF_S4.name <- "ECMWF-S4" + +ECMWF_monthly <- '/esnas/exp/ECMWF/monthly/ensforhc/6hourly/$VAR_NAME$/2014$MONTH$$DAY$00/$VAR_NAME$_$START_DATE$00.nc' +ECMWF_monthly.name <- "ECMWF-Monthly" + +forecast <- ECMWF_S4 + +fields <- forecast #rean #forecast # put 'rean' to load pressure fields and var from reanalisis, put 'forecast' to load them from forecast systems + +psl <- "psl" #"g500" # pressure variable to use from the chosen reanalysis +psl.name <- "MSLP" # "Z500" # the name to show in the maps + +year.start <- 1982 #1981 #1994 #1979 #1981 +year.end <- 2013 #2013 #2010 + +member.name <- "ensemble" # name of the dimension with the ensemble members inside the netCDF files of S4 + +PCA <- FALSE # se to TRUE if you want to apply the PCA before the k-means cluster analysis, FALSE otherwise +variance.explained <- 0.8 # the user can set here the minimum % of variance explained by the PCs (typically 0.8 which is equal to 80%) + +lat.weighting=FALSE # set it to true to weight psl data on latitude before applying the cluster analysis +sequences=FALSE # set it to TRUE if you want to select only days beloning to sequences of at least 5 consecutive days belonging to the same regime. + +# Coordinates of the box enclosing the study region (the Northern Atlantic in this case): +lat.max <- 81 #81 #80 #70 +lat.min <- 27 #30 #20 #30 +lon.max <- 45 #36 #30 #40 +lon.min <- 274.5 #274.5 #270 #280 # put a positive number here because the geopotential has only positive values of longitud! + +# Only for Monthly forecasts: +#leadtime <- 1:7 #5:11 #1 # if fields=forecast, set the forecast time (in days) you want to compute the weather regimes. +#startdate.shift <- 1 # if leadtime=5:11, it's better to shift all startdates of the season 1 week in the past, to fit the exact season of obs.data + # if leadtime=12:18, it's better to shift all startdates of the season 2 weeks in the past, and so on. +#forecast.year <- year.end+1 # if fields=forecast, set the forecast year (it is usually the year after year.end) +#mes=1 # if fields=forecast, set the month of the first startdate of the forecast year +#day=2 # if fields=forecast, set the day of the first startdate of the forecast year + +############################################################################################################################# +#ECMWF_monthly <- list(path = paste0('/esnas/exp/ECMWF/monthly/ensforhc/6hourly/psl/2014010200/1994_010200.nc')) +#ECMWF_monthly <- list(path = paste0('/esnas/exp/ECMWF/monthly/ensforhc/6hourly/$VAR_NAME$/2014$MONTH$$DAY$00/$VAR_NAME$_$START_DATE$00.nc')) +rean.name <- unname(ifelse(rean == ERAint, ERAint.name, JRA55.name)) +forecast.name <- unname(ifelse(forecast == ECMWF_S4, ECMWF_S4.name, ECMWF_monthly.name)) +fields.name <- unname(ifelse(fields == rean, rean.name, forecast.name)) +n.years <- year.end - year.start + 1 + +var.data <- fields # dataset from which to load the input var data (reanalysis or forecasts) +var.data.name <- fields.name #ECMWF_monthly.name # ERAint.name + +if(lat.min >= lat.max) stop("lat.min cannot be greater than lat.max") + +days.period <- n.days.period <- period.length <- list() +for (pp in 1:17){ + days.period[[pp]] <- NA + for(y in year.start:year.end) days.period[[pp]] <- c(days.period[[pp]], n.days.in.a.future.year(year.start, y) + pos.period(y,pp)) + days.period[[pp]] <- days.period[[pp]][-1] # remove the NA at the begin that was introduced only to be able to execute the above command + # number of days belonging to that period from year.start to year.end: + n.days.period[[pp]] <- length(days.period[[pp]]) + # Number of days belonging to that period in a single year: + period.length[[pp]] <- n.days.in.a.period(pp,1999) #+ ifelse(period==13 | period==2, 1, 0) # using year 1999 introduce a small error in winter season length because of bisestile years +} + +# load only 1 day of pressure data to detect the minimum and maximum lat and lon values which identify only the North Atlantic area: +if(fields.name == rean.name) domain <- Load(var = psl, exp = NULL, obs = list(list(path=fields)), '19990101', storefreq = 'daily', leadtimemax = 1, output = 'lonlat', nprocs=1) +if(fields.name == ECMWF_S4.name) domain <- Load(var = "psl", exp = list(list(path=fields)), sdates=paste0(year.start,'0101'), storefreq = 'daily', dimnames=list(member=member.name), leadtimemax=1, nmember=1, output = 'lonlat', nprocs=1) +if(fields.name == ECMWF_monthly.name) domain <- Load(var = psl, exp = NULL, obs = list(list(path=fields)), paste0(year.start,'0102'), storefreq = 'daily', leadtimemax = 1, output = 'lonlat', nprocs=1) + +n.lat <- length(domain$lat) +n.lon <- length(domain$lon) + +pos.lat.max <- which(lat.max - round(domain$lat,4) >= 0)[1] # select the first latitude of the domain below the maximum latitude +pos.lat.min <- tail(which(round(domain$lat,4) - lat.min >= 0),1) # select the last latitude of the domain over the minumum latitude +pos.lon.max <- tail(which(lon.max - round(domain$lon,4) >= 0),1) +pos.lon.min <- which(round(domain$lon,4) - lon.min >= 0)[1] + +pos.lat <- if(pos.lat.min < pos.lat.max) {pos.lat.min:pos.lat.max} else {pos.lat.max:pos.lat.min} +pos.lon <- c(1:pos.lon.max,pos.lon.min:length(domain$lon)) # beware that it only consider the case where the study area cross the meridian of Greenwhich! +n.pos.lat <- length(pos.lat) +n.pos.lon <- length(pos.lon) + +lat <- domain$lat[pos.lat] # lat values of chosen area only (it is smaller than the whole spatial domain loaded by the data) +lon <- domain$lon[pos.lon] # lon values of chosen area only + +lat.min.area <- domain$lat[pos.lat.min] # min and max lat/lon of the chosen area only (same as lat.max, but its values correspond to actual values in the lat vector) +lat.max.area <- domain$lat[pos.lat.max] +lon.min.area <- domain$lon[pos.lon.min] +lon.max.area <- domain$lon[pos.lon.max] + +#rm(domain) + +# Load psl data: +if(fields.name == rean.name){ # Load daily psl data in the reanalysis case: + psleuFull <-array(NA,c(n.days.in.a.yearly.period(year.start,year.end), n.pos.lat, n.pos.lon)) + + for (y in year.start:year.end){ + var <- Load(var = psl, exp = NULL, obs = list(list(path=fields)), paste0(y,'0101'), storefreq = 'daily', leadtimemax = n.days.in.a.year(y), output = 'lonlat', + latmin = lat.min, latmax = lat.max, lonmin = lon.min, lonmax = lon.max) + psleuFull[seq.days.in.a.future.year(year.start, y),,] <- var$obs + rm(var) + gc() + } +} + +if(fields.name == ECMWF_S4.name) # in this case, we can load only the data for 1 month at time (since each month needs ~3 GB of memory) + start.month <- 12 # start date (1=January, 12=December) + lead.month <- 1 # lead time in months (0= same as start date, 1= the month following the start date month, etc.) + + n.members <- 15 # number of members of the forecast system to use + + if(start.month <= 9) {start.month.char <- paste0("0",as.character(start.month))} else {start.month.char=start.month} + chosen.month <- start.month + lead.month # find which month we want to load data + if(chosen.month > 12) chosen.month <- chosen.month - 12 + leadtime.min <- 1 + n.days.in.a.monthly.period(start.month, chosen.month, year.start) - n.days.in.a.month(chosen.month, year.start) + leadtime.max <- leadtime.min + n.days.in.a.month(chosen.month, year.start) - 1 + n.leadtimes <- leadtime.max - leadtime.min + 1 + #print(paste(leadtime.min, leadtime.max)) + + psleuFull <- Load(var = "psl", exp = list(list(path=fields)), obs = NULL, sdates=paste0(year.start:year.end, start.month.char,'01'), storefreq = 'daily', dimnames=list(member=member.name), nmember=n.members, leadtimemin=leadtime.min, leadtimemax=leadtime.max, output = 'lonlat', latmin = lat.min, latmax = lat.max, lonmin = lon.min, lonmax = lon.max) + + # immediatly convert psl in daily anomalies to remove the drift!!! + pslPeriodClim <- apply(psleuFull$mod, c(1,4,5,6), mean, na.rm=T) + pslPeriodClim2 <- InsertDim(InsertDim(pslPeriodClim, 2, n.years), 2, n.members) + rm(pslPeriodClim) + gc() + + psleuFull <- psleuFull$mod - pslPeriodClim2 + rm(pslPeriodClim2) + gc() +} + +if(fields.name == ECMWF_monthly.name){ + # Load 6-hourly psl data in the forecast case: + psleuFull <-array(NA, c(n.sdates*n.years, n.leadtimes, n.pos.lat, n.pos.lon)) # daily order: 19940102 19950102 ... 20130102 19940109 19950109 ... 20130109 ... + n.leadtimes <- length(leadtime) + sdates <- weekly.seq(forecast.year,mes,day) + n.sdates <- length(sdates) + + for (startdate in 1:n.sdates){ + for(lday in leadtime){ + var <- Load(var = psl, exp = NULL, obs = list(list(path=fields), paste0(year.start:year.end, substr(sdates[startdate],5,6), substr(sdates[startdate],7,8) ), storefreq = 'daily', leadtimemin = lday*4-3, leadtimemax = lday*4, output = 'lonlat', latmin = lat.min, latmax = lat.max, lonmin = lon.min, lonmax = lon.max) + + mean.lday <- apply(var$obs, c(3,5,6), mean, na.rm=T) + rm(var) + gc() + + psleuFull[(1+(startdate-1)*n.years):(startdate*n.years), lday-leadtime[1]+1,,] <- mean.lday + rm(mean.lday) + gc() + } + } +} + +if(psl=="g500") psleuFull <- psleuFull/9.81 # convert geopotential to geopotential height (in m) +if(psl=="psl") psleuFull <- psleuFull/100 # convert MSLP in Pascal to MSLP in hPa +gc() + +#save.image(file=paste0(workdir,"/weather_regimes_",fields.name,"_",year.start,"-",year.end,".RData")) +#load(file=paste0(workdir,"/weather_regimes_",fields.name,"_",year.start,"-",year.end,".RData")) + +# compute the cluster analysis: +my.PCA <- list() +my.cluster <- list() +tot.variance <- list() +n.pcs <- my.cluster <- c() + +WR.period <- 1 #1:12 #13:16 # 1-12: Jan-Dec; 13-16: Winter-Spring-Summer-Autumn + +for(p in WR.period){ + # Select only the data of the month/season we want: + if(fields.name == rean.name){ pslPeriod <- psleuFull[days.period[[p]],,] # select only days in the chosen period (i.e: winter) + + # in this case of S4 we don't need to select anything because we already loaded only 1 month of data! + + if(fields.name == ECMWF_monthly.name){ + my.startdates.period <- months.period(forecast.year,mes,day,p) # get which startdates belong to the chosen period + + days.period <- list() # get which days inside psleuFull belong to the chosen period + for(startdate in my.startdates.period){ + days.period[[p]] <- c(days.period[[p]], (1+(startdate-1)*n.years):(startdate*n.years)) + } + + # in winter you must remove the 2nd startdate (20140109) because its data is bad, as you can see with: plot(pslmat[,1:2]) + # if(fields.name == forecast.name && period == 13) pslmat[21:40,] <- NA + } + + # weight the pressure fields based on latitude (apply it to anomalies, not to absolute values!): + if(lat.weighting){ + lat.weighted <- cos(lat*pi/180)^0.5 + lat.weighted.array <- InsertDim(InsertDim(lat.weighted,2,n.pos.lon), 1, n.days.period[[p]]) + pslmat <- pslPeriod * lat.weighted.array + rm(lat.weighted.array) + gc() + } + + # order psleuFull to use it as input of the cluster analysis: + if(fields.name == rean.name){ + pslmat <- pslPeriod + dim(pslmat) <- c(n.days.period[[p]], n.pos.lat*n.pos.lon) # convert array in a matrix! + rm(pslPeriod, pslPeriod.weighted) + } + + if(fields.name == ECMWF_S4.name){ + #pslmat <- melt(psleuFull[1,,,,,]) + #temp <- acast(pslmat, Var2+Var3~Var4+Var5+Var1) + pslmat <- unname(acast(melt(psleuFull[1,,,,,]), Var2 + Var3 ~ Var1 + Var4 + Var5)) # Var1: ensemb.members, var2: start years, var3: lead months, var4: lat, var5: lon + # note that the data order in pslmat[,X] is: year1day1, year1day2, ... , year1day31, year2day1, year2day2, ... year2day31 + gc() + + # not working properly, probably there is a bug: + # pslmat <- psleuFull + # dim(pslmat) <- c(1, n.leadtimes*n.years, n.members*n.pos.lat*n.pos.lon) # convert array in a matrix! Beware that it always put the years before the leadtimes! + # i.e: dim(pslmat) <- c(1, 3*33, 15*n.pos.lat*n.pos.lon) + } + + # compute the PCs or not: + if(PCA){ + my.seq <- seq(1, n.pos.lat*n.pos.lon, 9) # select only 1 point of 9 + pslcut <- pslmat[,my.seq] + + my.PCA[[p]] <- princomp(pslcut,cor=FALSE) + tot.variance[[p]] <- head(cumsum(my.PCA[[p]]$sdev^2/sum(my.PCA[[p]]$sdev^2)),50) # check how many PCAs to keep basing on the sum of their expl. variance + n.pcs[p] <- head(as.numeric(which(tot.variance[[p]] > variance.explained)),1) # select only the pcs that explains at least 80% of variance + my.cluster[[p]] <- kmeans(my.PCA[[p]]$scores[,1:n.pcs[p]], centers=4, iter.max=100, nstart=30) # 4 is the number of clusters + rm(pslcut) + } else { # 4 is the number of clusters: + if(fields.name == rean.name) my.cluster[[p]] <- kmeans(pslmat[which(!is.na(pslmat[,1])),], centers=4, iter.max=100, nstart=30) + + # check it with: matrix(my.cluster[[p]]$cluster, n.leadtimes, n.years) + if(fields.name == ECMWF_S4.name) my.cluster[[p]] <- kmeans(pslmat, centers=4, iter.max=100, nstart=30, trace=TRUE) + + save.image(file=paste0(workdir,"/weather_regimes_",fields.name,"_",var.name[var.num],"_",year.start,"-",year.end,".RData")) + + } + + rm(pslmat) + gc() +} + +#save.image(file=paste0(workdir,"/weather_regimes_",fields.name,"_",year.start,"-",year.end,".RData")) +#load(file=paste0(workdir,"/weather_regimes_",fields.name,"_",year.start,"-",year.end,".RData")) + +var.num <- 1 # Choose a variable. 1: sfcWind 2: tas + +var.name <- c("sfcWind","tas") # name of the 'predictand' variable of the chosen reanalysis +var.name.full <- c("Wind Speed","Temperature") # full name of the 'predictand' variable to put in the title of the graphs +var.unit <- c("m/s", "ºC") # unit of measure of var (for drawing color scales) + +# Load var data: +if(fields.name == rean.name){ # Load daily var data in the reanalysis case: + vareuFull <-array(NA,c(n.days.in.a.yearly.period(year.start,year.end), n.pos.lat, n.pos.lon)) + + for (y in year.start:year.end){ + var <- Load(var = var.name[var.num], exp = NULL, obs = list(var.data), paste0(y,'0101'), storefreq = 'daily', leadtimemax = n.days.in.a.year(y), output = 'lonlat', + latmin = lat.min, latmax = lat.max, lonmin = lon.min, lonmax = lon.max) + vareuFull[seq.days.in.a.future.year(year.start, y),,] <- var$obs + rm(var) + gc() + } + +# in the S4 case, we can load only the data for 1 month at time (since each month needs ~3 GB of memory) +# but at present we ONLY use psl data!!! +#if(fields.name == ECMWF_S4.name) { +# vareuFull <- Load(var = var.name[var.num], exp = list(fields), obs = NULL, sdates=paste0(year.start:year.end, start.month.char,'01'), storefreq = 'daily', dimnames=list(member="number"), nmember=n.members, leadtimemin=leadtime.min, leadtimemax=leadtime.max, output = 'lonlat', latmin = lat.min, latmax = lat.max, lonmin = lon.min, lonmax = lon.max) +#} + +if(fields.name == ECMWF_monthly.name){ # Load 6-hourly var data in the monthly forecast case: + sdates <- weekly.seq(forecast.year,mes,day) + vareuFull <-array(NA,c(length(sdates)*(year.end-year.start+1), n.pos.lat, n.pos.lon)) + + for (startdate in 1:length(sdates)){ + var <- Load(var = var.name[var.num], exp = NULL, obs = list(var.data), paste0(year.start:year.end, substr(sdates[startdate],5,6), substr(sdates[startdate],7,8) ), storefreq = 'daily', leadtimemin=leadtime*4-3, leadtimemax=leadtime*4, output = 'lonlat', latmin = lat.min, latmax = lat.max, lonmin = lon.min, lonmax = lon.max) + temp <- apply(var$obs, c(1,3,5,6), mean, na.rm=T) + vareuFull[(1+(startdate-1)*(year.end-year.start+1)):(startdate*(year.end-year.start+1)),,] <- temp + rm(temp,var) + gc() + } + +} + +#my.grid<-paste0('r',n.lon,'x',n.lat) +#coord <- Load(var = 'sfcWind', exp = list(ECMWF_monthly), obs = NULL, sdates=paste0(2013,'0102'), leadtimemin = 1, leadtimemax=1, output = 'lonlat', nprocs=1, grid=my.grid, method='bilinear') + +save.image(file=paste0(workdir,"/weather_regimes_",fields.name,"_",var.name[var.num],"_",year.start,"-",year.end,".RData")) +load(file=paste0(workdir,"/weather_regimes_",fields.name,"_",var.name[var.num],"_",year.start,"-",year.end,".RData")) + +## # if you want to study WRs at monhly level but using their seasonal time series, you have to copy the WRs time series to the months from 1 to 12: +## if(WR.period <- 1:12){ +## my.cluster[[1]] <- my.cluster[[2]] <- my.cluster[[3]] <- my.cluster[[4]] <- my.cluster[[5]] <- my.cluster[[6]] <- list() +## my.cluster[[7]] <- my.cluster[[8]] <- my.cluster[[9]] <- my.cluster[[10]] <- my.cluster[[11]] <- my.cluster[[12]] <- list() + +## ss <- match(days.period[[13]],days.period[[1]]); ss[which(!is.na(ss))] <- 1; qq <- ss*my.cluster[[13]]$cluster; my.cluster[[1]]$cluster <- qq[!is.na(qq)] +## ss <- match(days.period[[13]],days.period[[2]]); ss[which(!is.na(ss))] <- 1; qq <- ss*my.cluster[[13]]$cluster; my.cluster[[2]]$cluster <- qq[!is.na(qq)] +## ss <- match(days.period[[13]],days.period[[12]]); ss[which(!is.na(ss))] <- 1; qq <- ss*my.cluster[[13]]$cluster; my.cluster[[12]]$cluster <- qq[!is.na(qq)] +## ss <- match(days.period[[14]],days.period[[3]]); ss[which(!is.na(ss))] <- 1; qq <- ss*my.cluster[[14]]$cluster; my.cluster[[3]]$cluster <- qq[!is.na(qq)] +## ss <- match(days.period[[14]],days.period[[4]]); ss[which(!is.na(ss))] <- 1; qq <- ss*my.cluster[[14]]$cluster; my.cluster[[4]]$cluster <- qq[!is.na(qq)] +## ss <- match(days.period[[14]],days.period[[5]]); ss[which(!is.na(ss))] <- 1; qq <- ss*my.cluster[[14]]$cluster; my.cluster[[5]]$cluster <- qq[!is.na(qq)] +## ss <- match(days.period[[15]],days.period[[6]]); ss[which(!is.na(ss))] <- 1; qq <- ss*my.cluster[[15]]$cluster; my.cluster[[6]]$cluster <- qq[!is.na(qq)] +## ss <- match(days.period[[15]],days.period[[7]]); ss[which(!is.na(ss))] <- 1; qq <- ss*my.cluster[[15]]$cluster; my.cluster[[7]]$cluster <- qq[!is.na(qq)] +## ss <- match(days.period[[15]],days.period[[8]]); ss[which(!is.na(ss))] <- 1; qq <- ss*my.cluster[[15]]$cluster; my.cluster[[8]]$cluster <- qq[!is.na(qq)] +## ss <- match(days.period[[16]],days.period[[9]]); ss[which(!is.na(ss))] <- 1; qq <- ss*my.cluster[[16]]$cluster; my.cluster[[9]]$cluster <- qq[!is.na(qq)] +## ss <- match(days.period[[16]],days.period[[10]]); ss[which(!is.na(ss))] <- 1; qq <- ss*my.cluster[[16]]$cluster; my.cluster[[10]]$cluster <- qq[!is.na(qq)] +## ss <- match(days.period[[16]],days.period[[11]]); ss[which(!is.na(ss))] <- 1; qq <- ss*my.cluster[[16]]$cluster; my.cluster[[11]]$cluster <- qq[!is.na(qq)] +## } + +for(p in WR.period){ # 1-12: Jan-Dec; 13-16: Winter-Spring-Summer-Autumn; 17: Year + if(fields.name == ECMWF_monthly.name){ + my.startdates.period <- months.period(forecast.year,mes,day,p) + + #if(p == 13) my.startdates.period <- my.startdates.period[-2] # remove the second startdate because it is broken + + days.period <- period.length <- list() + for(startdate in my.startdates.period){ + days.period[[p]] <- c(days.period[[p]], (1+(startdate-1)*(year.end-year.start+1)):(startdate*(year.end-year.start+1))) + } + period.length[[p]] <- length(my.startdates.period) # number of Thusdays in the period + } + + if(sequences){ # it works only for rean data!!! + # for each of the 4 clusters, remove the days not belonging to sequences of at least 5 days: + # warning: first 4 days and last 4 days are not take into account y the algorithm and are treated as not belonging to any sequence (sequ=FALSE) + wrdiff <- diff(my.cluster[[p]]$cluster) + + sequ <- rep(FALSE, n.days.period[[p]]) + for(i in 5:(n.days.period[[p]]-5)){ + sequ[i] <- TRUE + if(wrdiff[i] != 0 & wrdiff[i+1] != 0) sequ[i] = FALSE + if(wrdiff[i] != 0 & wrdiff[i+2] != 0) sequ[i] = FALSE + if(wrdiff[i] != 0 & wrdiff[i+3] != 0) sequ[i] = FALSE + if(wrdiff[i] != 0 & wrdiff[i+4] != 0) sequ[i] = FALSE + if(wrdiff[i-1] != 0 & wrdiff[i] != 0) sequ[i] = FALSE + if(wrdiff[i-1] != 0 & wrdiff[i+1] != 0) sequ[i] = FALSE + if(wrdiff[i-1] != 0 & wrdiff[i+2] != 0) sequ[i] = FALSE + if(wrdiff[i-1] != 0 & wrdiff[i+3] != 0) sequ[i] = FALSE + if(wrdiff[i-2] != 0 & wrdiff[i] != 0) sequ[i] = FALSE + if(wrdiff[i-2] != 0 & wrdiff[i+1] != 0) sequ[i] = FALSE + if(wrdiff[i-2] != 0 & wrdiff[i+2] != 0) sequ[i] = FALSE + if(wrdiff[i-3] != 0 & wrdiff[i] != 0) sequ[i] = FALSE + if(wrdiff[i-3] != 0 & wrdiff[i+1] != 0) sequ[i] = FALSE + if(wrdiff[i-4] != 0 & wrdiff[i] != 0) sequ[i] = FALSE + } # close for loop on i + + # sequence of daily cluster numbers without the days that doesn't belong to sequences of 5 or more days: + cluster.sequence <- my.cluster[[p]]$cluster * sequ + rm(wrdiff, sequ) + } + + gc() + + cluster.sequence <- my.cluster[[p]]$cluster + + # Replace the days belonging to a regime with the days belonging to a sequence of at least 5 days of that regime: + wr1 <- which(cluster.sequence == 1) + wr2 <- which(cluster.sequence == 2) + wr3 <- which(cluster.sequence == 3) + wr4 <- which(cluster.sequence == 4) + + # yearly frequency of each regime: + wr1y <- wr2y <- wr3y <-wr4y <- c() + for(y in year.start:year.end){ + # for both reanalysis and S4, the time order is always: year1day1, year1day2, ..., year1day31, year2day1, year2day2, ..., year2day28, etc, + if(fields.name == rean.name || fields.name == ECMWF_S4.name){ + wr1y[y-year.start+1] <- length(which(cluster.sequence[(y-year.start)*period.length[[p]]+(1:period.length[[p]])] == 1)) + wr2y[y-year.start+1] <- length(which(cluster.sequence[(y-year.start)*period.length[[p]]+(1:period.length[[p]])] == 2)) + wr3y[y-year.start+1] <- length(which(cluster.sequence[(y-year.start)*period.length[[p]]+(1:period.length[[p]])] == 3)) + wr4y[y-year.start+1] <- length(which(cluster.sequence[(y-year.start)*period.length[[p]]+(1:period.length[[p]])] == 4)) + } else { # for both S4 and the monthly system, the time order is always: year1day1, year2day1, ... , yearNday1, year1day2, year2day2, ..., yearNday2, etc. + wr1y[y-year.start+1] <- length(which(cluster.sequence[seq((y-year.start+1),length(cluster.sequence), n.years)] == 1)) + wr2y[y-year.start+1] <- length(which(cluster.sequence[seq((y-year.start+1),length(cluster.sequence), n.years)] == 2)) + wr3y[y-year.start+1] <- length(which(cluster.sequence[seq((y-year.start+1),length(cluster.sequence), n.years)] == 3)) + wr4y[y-year.start+1] <- length(which(cluster.sequence[seq((y-year.start+1),length(cluster.sequence), n.years)] == 4)) + } + + } + + #rm(cluster.sequence) + #gc() + + # convert to frequencies in %: + wr1y <- wr1y/period.length[[p]] + wr2y <- wr2y/period.length[[p]] + wr3y <- wr3y/period.length[[p]] + wr4y <- wr4y/period.length[[p]] + + if(fields.name == rean.name){ + pslPeriod <- psleuFull[days.period[[p]],,] + pslPeriodClim <- apply(pslPeriod, c(2,3), mean, na.rm=T) + pslPeriodClim2 <- InsertDim(pslPeriodClim, 1, n.days.period[[p]]) + pslPeriodAnom <- pslPeriod - pslPeriodClim2 + + rm(pslPeriod, pslPeriodClim, pslPeriodClim2) + gc() + + pslwr1 <- pslPeriodAnom[wr1,,] + pslwr2 <- pslPeriodAnom[wr2,,] + pslwr3 <- pslPeriodAnom[wr3,,] + pslwr4 <- pslPeriodAnom[wr4,,] + rm(pslPeriodAnom) + gc() + + pslwr1mean <- apply(pslwr1,c(2,3),mean,na.rm=T) + pslwr2mean <- apply(pslwr2,c(2,3),mean,na.rm=T) + pslwr3mean <- apply(pslwr3,c(2,3),mean,na.rm=T) + pslwr4mean <- apply(pslwr4,c(2,3),mean,na.rm=T) + rm(pslwr1,pslwr2,pslwr3,pslwr4) + } + + if(fields.name == ECMWF_S4.name){ + # in this case, psleuFull is already in anomalies: + pslmat <- unname(acast(melt(psleuFull[1,,,,,]), Var1 ~ Var2 + Var3 ~ Var4 ~ Var5)) # Var1: ensemb.members, var2: start years, var3: lead months, var4: lat, var5: lon + #pslmat <- psleuFull + #dim(pslmat) <- c(1, n.members, n.leadtimes*n.years, n.pos.lat, n.pos.lon) # not working + pslwr1 <- pslmat[,wr1,,] + pslwr2 <- pslmat[,wr2,,] + pslwr3 <- pslmat[,wr3,,] + pslwr4 <- pslmat[,wr4,,] + rm(pslmat) + gc() + + pslwr1mean <- apply(pslwr1,c(3,4),mean,na.rm=T) + pslwr2mean <- apply(pslwr2,c(3,4),mean,na.rm=T) + pslwr3mean <- apply(pslwr3,c(3,4),mean,na.rm=T) + pslwr4mean <- apply(pslwr4,c(3,4),mean,na.rm=T) + rm(pslwr1,pslwr2,pslwr3,pslwr4) + gc() + } + + + if(fields.name == rean.name){ + # similar selection of above, but for var instead of psl: + varPeriod <- vareuFull[days.period[[p]],,] # select only var data during the chosen period + + varPeriodClim <- apply(varPeriod, c(2,3), mean, na.rm=T) + varPeriodClim2 <- InsertDim(varPeriodClim, 1, n.days.period[[p]]) + varPeriodAnom <- varPeriod - varPeriodClim2 + rm(varPeriod, varPeriodClim, varPeriodClim2) + gc() + + #PlotEquiMap2(varPeriodClim[,EU], lon[EU], lat, filled.continents = FALSE, cols=my.cols.var, intxlon=10, intylat=10, cex.lab=1.5) # plot the climatology of the period + varPeriodAnom1 <- varPeriodAnom[wr1,,] + varPeriodAnom2 <- varPeriodAnom[wr2,,] + varPeriodAnom3 <- varPeriodAnom[wr3,,] + varPeriodAnom4 <- varPeriodAnom[wr4,,] + + varPeriodAnom1mean <- apply(varPeriodAnom1,c(2,3),mean,na.rm=T) + varPeriodAnom2mean <- apply(varPeriodAnom2,c(2,3),mean,na.rm=T) + varPeriodAnom3mean <- apply(varPeriodAnom3,c(2,3),mean,na.rm=T) + varPeriodAnom4mean <- apply(varPeriodAnom4,c(2,3),mean,na.rm=T) + + varPeriodAnomBoth1 <- abind(varPeriodAnom, varPeriodAnom1, along = 1) + pvalue1 <- apply(varPeriodAnomBoth1, c(2,3), function(x) t.test(x[1:n.days.period[[p]]],x[(n.days.period[[p]]+1):length(x)])$p.value) + rm(varPeriodAnomBoth1, varPeriodAnom1) + gc() + + varPeriodAnomBoth2 <- abind(varPeriodAnom, varPeriodAnom2, along = 1) + pvalue2 <- apply(varPeriodAnomBoth2, c(2,3), function(x) t.test(x[1:n.days.period[[p]]],x[(n.days.period[[p]]+1):length(x)])$p.value) + rm(varPeriodAnomBoth2, varPeriodAnom2) + gc() + + varPeriodAnomBoth3 <- abind(varPeriodAnom, varPeriodAnom3, along = 1) + pvalue3 <- apply(varPeriodAnomBoth3, c(2,3), function(x) t.test(x[1:n.days.period[[p]]],x[(n.days.period[[p]]+1):length(x)])$p.value) + rm(varPeriodAnomBoth3, varPeriodAnom3) + gc() + + varPeriodAnomBoth4 <- abind(varPeriodAnom, varPeriodAnom4, along = 1) + pvalue4 <- apply(varPeriodAnomBoth4, c(2,3), function(x) t.test(x[1:n.days.period[[p]]],x[(n.days.period[[p]]+1):length(x)])$p.value) + rm(varPeriodAnomBoth4, varPeriodAnom4) + + rm(varPeriodAnom) + gc() + + # save all the data necessary to redraw the graphs when we know the right regime: + save(workdir,rean.name,fields.name,var.num,var.name,var.name.full,var.unit,year.start,year.end,p,WR.period,lon,lat,pslwr1mean,pslwr2mean,pslwr3mean,pslwr4mean, varPeriodAnom1mean, varPeriodAnom2mean, varPeriodAnom3mean,varPeriodAnom4mean,wr1y,wr2y,wr3y,wr4y,pvalue1,pvalue2,pvalue3,pvalue4,file=paste0(workdir,"/",fields.name,"_",var.name[var.num],"_",my.period[p],"_mapdata.RData")) + + rm(pvalue1,pvalue2,pvalue3,pvalue4) + rm(pslwr1mean,pslwr2mean,pslwr3mean,pslwr4mean) + rm(varPeriodAnom1mean,varPeriodAnom2mean,varPeriodAnom3mean,varPeriodAnom4mean) + gc() + } + + if(fields.name == ECMWF_S4.name){ # save all the data necessary to draw the graphs (but not the impact maps) + save(workdir,rean.name,fields.name,var.num,var.name,var.name.full,var.unit,year.start,year.end,p,WR.period,lon,lat,pslwr1mean,pslwr2mean,pslwr3mean,pslwr4mean,wr1y,wr2y,wr3y,wr4y,file=paste0(workdir,"/",fields.name,"_",var.name[var.num],"_",my.period[p],"_mapdata.RData")) + rm(pslwr1mean,pslwr2mean,pslwr3mean,pslwr4mean) + gc() + } +} # close the for loop on 'p' + + +# run it twice: once, with ordering <- FALSE to look only at the cartography and find visually the right regime names of the four clusters; +# and the second time, to save the ordered cartography: +ordering <- TRUE # If TRUE, order the clusters following the regimes listed in the 'orden' vector (after you manually insert the names). +save.names <- TRUE # if TRUE, also save the cluster names (i.e: the association between clusters and weather regimes); if FALSE, the cluster names are loaded from the file +as.pdf <- FALSE # FALSE: save results as one .png file for each season, TRUE: save only 1 .pdf file with all seasons + +# position of long values of Europe only (without the Atlantic Sea and America): +#if(fields.name == "ERA-Interim") EU <- c(1:which(lon >= lon.max)[1], (length(lon)-30):length(lon)) # restrict area to continental europe only +EU <- c(1:which(lon >= lon.max)[1], (which(lon >= 337)[1]:length(lon))) # restrict area to continental europe only + +# choose an order to give to the cartography, from top to bottom: +orden <- c("NAO+","NAO-","Blocking","Atl.Ridge") + +regime1.name <- orden[1] +regime2.name <- orden[2] +regime3.name <- orden[3] +regime4.name <- orden[4] + +if(save.names){cluster1.name.period <- cluster2.name.period <- cluster3.name.period <- cluster4.name.period <- c()} + +# breaks and colors of the geopotential fields: +if(psl == "g500"){ + #my.brks <- c(-300,-199:200,300) # % Mean anomaly of a WR + my.brks <- c(-300,seq(-200,200,10),300) # % Mean anomaly of a WR + my.brks2 <- c(-300,seq(-200,200,10),300) # % Mean anomaly of a WR for the contour lines + #my.cols <- colorRampPalette((brewer.pal(9,"PuBu")))(length(my.brks)-1) + values.to.plot <- c(-200, -100, 0, 100, 200) # values we want to show in the labels +} + +if(psl == "psl"){ + my.brks <- c(seq(-19,-1,2),0,seq(1,19,2)) # % Mean anomaly of a WR + # my.brks <- c(seq(-19,-1,2),0,seq(1,19,2)) # % Mean anomaly of a WR + + my.brks2 <- my.brks #c(seq(-20,20,2)) # % Mean anomaly of a WR for the contour lines + values.to.plot <- my.brks #c(-17,-15,-13,-11,-9,-7,-5,-3,-1,0,1,3,5,7,9,11,13,15,17) +} + +my.cols <- colorRampPalette(rev(brewer.pal(11,"RdBu")))(length(my.brks)-1) # blue--white--red colors +my.cols[floor(length(my.cols)/2)] <- my.cols[floor(length(my.cols)/2)+1] <- "white" + +# breaks and colors of the impact maps (valid both for Wind speed anomalies and Temperature anomalies): +#my.brks.var <- c(-20,seq(-10,-3,1),seq(-2.9,2.9,0.1),seq(3,10,1),20) # % Mean anomaly of a WR +#my.brks.var <- c(-20,-2,-1.5,-1,-0.5,-0.2,0.2,0.5,1,1.5,2,20) # % Mean anomaly of a WR +#my.brks.var <- c(-20,seq(-3,3,0.5),20) # % Mean anomaly of a WR +if(var.name[var.num] == "sfcWind") { + my.brks.var <- seq(-3,3,0.5) + values.to.plot2 <- c(-3,-2,-1,0,1,2,3) +} +if(var.name[var.num] == "tas") { + my.brks.var <- seq(-5,5,1) + values.to.plot2 <- my.brks.var #c(-5,-3,-1,0,1,3,5) +} + +#my.cols <- colorRampPalette(as.character(read.csv("/home/Earth/vtorralb/rgbhex.csv",header=F)[,1]))(length(my.brks)-1) # blue--yellow-red colors +my.cols.var <- colorRampPalette(rev(brewer.pal(11,"RdBu")))(length(my.brks.var)-1) # blue--white--red colors +#my.cols.var[floor(length(my.cols.var)/2)] <- my.cols.var[floor(length(my.cols.var)/2)+1] <- "white" + +if(as.pdf)(pdf(file=paste0(workdir,"/",fields.name,"_",var.name[var.num],"_",my.period[p],".pdf"),width=40, height=60)) + +for(p in WR.period){ + load(file=paste0(workdir,"/",fields.name,"_",var.name[var.num],"_",my.period[p],"_mapdata.RData")) + + if(save.names){ + if(p == 1){ # January + cluster3.name="NAO+" + cluster2.name="NAO-" + cluster4.name="Blocking" + cluster1.name="Atl.Ridge" + } + if(p == 2){ # February + cluster3.name="NAO+" + cluster2.name="NAO-" + cluster4.name="Blocking" + cluster1.name="Atl.Ridge" + } + if(p == 3){ # March + cluster3.name="NAO+" + cluster2.name="NAO-" + cluster4.name="Blocking" + cluster1.name="Atl.Ridge" + } + if(p == 4){ # April + cluster2.name="NAO+" + cluster1.name="NAO-" + cluster3.name="Blocking" + cluster4.name="Atl.Ridge" + } + if(p == 5){ # May + cluster4.name="NAO+" + cluster2.name="NAO-" + cluster3.name="Blocking" + cluster1.name="Atl.Ridge" + } + if(p == 6){ # June + cluster4.name="NAO+" + cluster1.name="NAO-" + cluster2.name="Blocking" + cluster3.name="Atl.Ridge" + } + if(p == 7){ # July + cluster4.name="NAO+" + cluster2.name="NAO-" + cluster1.name="Blocking" + cluster3.name="Atl.Ridge" + } + if(p == 8){ # August + cluster2.name="NAO+" + cluster4.name="NAO-" + cluster3.name="Blocking" + cluster1.name="Atl.Ridge" + } + if(p == 9){ # September + cluster4.name="NAO+" + cluster3.name="NAO-" + cluster1.name="Blocking" + cluster2.name="Atl.Ridge" + } + if(p == 10){ # October + cluster1.name="NAO+" + cluster4.name="NAO-" + cluster2.name="Blocking" + cluster3.name="Atl.Ridge" + } + if(p == 11){ # November + cluster2.name="NAO+" + cluster3.name="NAO-" + cluster1.name="Blocking" + cluster4.name="Atl.Ridge" + } + if(p == 12){ # December + cluster2.name="NAO+" + cluster4.name="NAO-" + cluster1.name="Blocking" + cluster3.name="Atl.Ridge" + } + if(p == 13){ # Winter + cluster3.name="NAO+" + cluster4.name="NAO-" + cluster1.name="Blocking" + cluster2.name="Atl.Ridge" + } + if(p == 14){ # Spring + cluster1.name="NAO+" + cluster3.name="NAO-" + cluster2.name="Blocking" + cluster4.name="Atl.Ridge" + } + if(p == 15){ # Summer + cluster2.name="NAO+" + cluster3.name="NAO-" + cluster4.name="Blocking" + cluster1.name="Atl.Ridge" + } + if(p == 16){ # Autumn + cluster4.name="NAO+" + cluster1.name="NAO-" + cluster2.name="Blocking" + cluster3.name="Atl.Ridge" + } + + cluster1.name.period[p] <- cluster1.name + cluster2.name.period[p] <- cluster2.name + cluster3.name.period[p] <- cluster3.name + cluster4.name.period[p] <- cluster4.name + + save(cluster1.name.period, cluster2.name.period, cluster3.name.period, cluster4.name.period, file=paste0(workdir,"/",fields.name,"_ClusterNames.RData")) + + } else { # in this case, we load the cluster names from the file already saved: + load(paste0(workdir,"/",fields.name,"_ClusterNames.RData")) + cluster1.name <- cluster1.name.period[p] + cluster2.name <- cluster2.name.period[p] + cluster3.name <- cluster3.name.period[p] + cluster4.name <- cluster4.name.period[p] + } + + # assign to each cluster its regime: + if(ordering == FALSE){ + cluster1 <- 1; cluster2 <- 2; cluster3 <- 3; cluster4 <- 4 + } else { + cluster1 <- which(orden == cluster1.name) + cluster2 <- which(orden == cluster2.name) + cluster3 <- which(orden == cluster3.name) + cluster4 <- which(orden == cluster4.name) + } + + assign(paste0("map",cluster1), pslwr1mean) + assign(paste0("map",cluster2), pslwr2mean) + assign(paste0("map",cluster3), pslwr3mean) + assign(paste0("map",cluster4), pslwr4mean) + + if(fields.name == rean.name){ + assign(paste0("imp",cluster1), varPeriodAnom1mean) + assign(paste0("imp",cluster2), varPeriodAnom2mean) + assign(paste0("imp",cluster3), varPeriodAnom3mean) + assign(paste0("imp",cluster4), varPeriodAnom4mean) + + assign(paste0("sig",cluster1), pvalue1) + assign(paste0("sig",cluster2), pvalue2) + assign(paste0("sig",cluster3), pvalue3) + assign(paste0("sig",cluster4), pvalue4) + } + + assign(paste0("fre",cluster1), wr1y) + assign(paste0("fre",cluster2), wr2y) + assign(paste0("fre",cluster3), wr3y) + assign(paste0("fre",cluster4), wr4y) + + # Visualize the composition with the 3 graphs for all the 4 regimes: its pressure fields, its impact on var and its frequency series: + if(!as.pdf) png(filename=paste0(workdir,"/",fields.name,"_",var.name[var.num],"_",my.period[p],".png"),width=3000,height=3700) + + plot.new() + + # Sheet title: + par(fig=c(0, 1, 0.94, 0.98), new=TRUE) + mtext(paste0("Weather Regimes for ",my.period[p]," season (",year.start,"-",year.end,"). Source: ",fields.name), cex=6, font=2) + + # Centroid maps: + map.xpos <- 0 + map.width <- 0.46 + par(fig=c(map.xpos + 0.003, map.xpos + map.width - 0.003, 0.745, 0.925), new=TRUE) + PlotEquiMap2(rescale(map1,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map1), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, xlabel.dist=1.5, contours.lty="F1FF1F") + par(fig=c(map.xpos, map.xpos + map.width, 0.51, 0.69), new=TRUE) + PlotEquiMap2(rescale(map2,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map2), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F") + par(fig=c(map.xpos, map.xpos + map.width, 0.275, 0.455), new=TRUE) + PlotEquiMap2(rescale(map3,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map3), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F") + par(fig=c(map.xpos, map.xpos + map.width, 0.04, 0.22), new=TRUE) + PlotEquiMap2(rescale(map4,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map4), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F") + + # Title Centroid Maps: + map.title.xpos <- 0.76 + map.title.width <- 0.2 + par(fig=c(map.title.xpos, map.title.xpos + map.title.width, 0.728, 0.729), new=TRUE) + mtext("year", cex=3) + par(fig=c(map.title.xpos, map.title.xpos + map.title.width, 0.494, 0.495), new=TRUE) + mtext("year", cex=3) + par(fig=c(map.title.xpos, map.title.xpos + map.title.width, 0.260, 0.261), new=TRUE) + mtext("year", cex=3) + par(fig=c(map.title.xpos, map.title.xpos + map.title.width, 0.026, 0.027), new=TRUE) + mtext("year", cex=3) + + # Impact maps: + if(fields.name == rean.name){ + impact.xpos <- 0.47 + impact.width <- 0.26 + par(fig=c(impact.xpos, impact.xpos + impact.width, 0.745, 0.925), new=TRUE) + PlotEquiMap2(rescale(imp1[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=t(sig1[,EU] < 0.05), cex.lab=1.5) + par(fig=c(impact.xpos, impact.xpos + impact.width, 0.51, 0.69), new=TRUE) + PlotEquiMap2(rescale(imp2[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=t(sig2[,EU] < 0.05), cex.lab=1.5) + par(fig=c(impact.xpos, impact.xpos + impact.width, 0.275, 0.455), new=TRUE) + PlotEquiMap2(rescale(imp3[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=t(sig3[,EU] < 0.05), cex.lab=1.5) + par(fig=c(impact.xpos, impact.xpos + impact.width, 0.04, 0.22), new=TRUE) + PlotEquiMap2(rescale(imp4[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=t(sig4[,EU] < 0.05), cex.lab=1.5) + } + + # Frequency plots: + barplot.xpos <- 0.75 + barplot.width <- 0.25 + freq.max=100 + par(fig=c(barplot.xpos, barplot.xpos + barplot.width, 0.745, 0.915), new=TRUE) + barplot.freq(100*fre1, year.start, year.end, cex.y=2, cex.x=2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0)) + par(fig=c(barplot.xpos, barplot.xpos + barplot.width,0.510,0.680), new=TRUE) + barplot.freq(100*fre2, year.start, year.end, cex.y=2, cex.x=2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0)) + par(fig=c(barplot.xpos, barplot.xpos + barplot.width,0.275,0.445), new=TRUE) + barplot.freq(100*fre3, year.start, year.end, cex.y=2, cex.x=2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0)) + par(fig=c(barplot.xpos, barplot.xpos + barplot.width,0.040,0.210), new=TRUE) + barplot.freq(100*fre4, year.start, year.end, cex.y=2, cex.x=2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0)) + + # % on Frequency plots: + symbol.xpos <- 0.735 + symbol.width <- 0.01 + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.83, 0.84), new=TRUE) + mtext("%", cex=3.3) + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.60, 0.61), new=TRUE) + mtext("%", cex=3.3) + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.36, 0.37), new=TRUE) + mtext("%", cex=3.3) + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.13, 0.14), new=TRUE) + mtext("%", cex=3.3) + + # Title 1: + title1.xpos <- 0.02 + title1.width <- 0.4 + par(fig=c(title1.xpos, title1.xpos + title1.width, 0.930, 0.935), new=TRUE) + mtext(paste0(regime1.name,": ", psl.name, " Anomaly"), font=2, cex=4) + par(fig=c(title1.xpos, title1.xpos + title1.width, 0.695, 0.700), new=TRUE) + mtext(paste0(regime2.name,": ", psl.name, " Anomaly"), font=2, cex=4) + par(fig=c(title1.xpos, title1.xpos + title1.width, 0.460, 0.465), new=TRUE) + mtext(paste0(regime3.name,": ", psl.name, " Anomaly"), font=2, cex=4) + par(fig=c(title1.xpos, title1.xpos + title1.width, 0.225, 0.230), new=TRUE) + mtext(paste0(regime4.name,": ", psl.name, " Anomaly"), font=2, cex=4) + + # Title 2: + if(fields.name == rean.name){ + title2.xpos <- 0.42 + title2.width <- 0.35 + par(fig=c(title2.xpos, title2.xpos + title2.width, 0.930, 0.935), new=TRUE) + mtext(paste0(regime1.name, ": Impact on ", var.name.full[var.num]), font=2, cex=4) + par(fig=c(title2.xpos, title2.xpos + title2.width, 0.695, 0.700), new=TRUE) + mtext(paste0(regime2.name, ": Impact on ", var.name.full[var.num]), font=2, cex=4) + par(fig=c(title2.xpos, title2.xpos + title2.width, 0.460, 0.465), new=TRUE) + mtext(paste0(regime3.name, ": Impact on ", var.name.full[var.num]), font=2, cex=4) + par(fig=c(title2.xpos, title2.xpos + title2.width, 0.225, 0.230), new=TRUE) + mtext(paste0(regime4.name, ": Impact on ", var.name.full[var.num]), font=2, cex=4) + } + + # Title 3: + title3.xpos <- 0.75 + title3.width <-0.25 + par(fig=c(title3.xpos, title3.xpos + title3.width, 0.930, 0.935), new=TRUE) + mtext(paste0(regime1.name, ": Frequency (", round(100*mean(fre1),1), "%)"), font=2, cex=4) + par(fig=c(title3.xpos, title3.xpos + title3.width, 0.695, 0.700), new=TRUE) + mtext(paste0(regime2.name, ": Frequency (", round(100*mean(fre2),1), "%)"), font=2, cex=4) + par(fig=c(title3.xpos, title3.xpos + title3.width, 0.460, 0.465), new=TRUE) + mtext(paste0(regime3.name, ": Frequency (", round(100*mean(fre3),1), "%)"), font=2, cex=4) + par(fig=c(title3.xpos, title3.xpos + title3.width, 0.225, 0.230), new=TRUE) + mtext(paste0(regime4.name, ": Frequency (", round(100*mean(fre4),1), "%)"), font=2, cex=4) + + # Legend 1: + legend1.xpos <- 0.01 + legend1.width <- 0.44 + legend1.cex <- 1.8 + my.subset <- match(values.to.plot, my.brks) + par(fig=c(legend1.xpos, legend1.xpos + legend1.width, 0.719, 0.744), new=TRUE) # syntax fig=c(xmin, xmax, ymin, ymax) + ColorBar3(my.brks, cols=my.cols, vert=FALSE, cex=legend1.cex, subset=my.subset) + if(psl=="g500") {mtext(side=4," m", cex=2.5)} else {mtext(side=4," hPa", cex=legend1.cex)} + par(fig=c(legend1.xpos, legend1.xpos + legend1.width, 0.485, 0.508), new=TRUE) + ColorBar3(my.brks, cols=my.cols, vert=FALSE, cex=legend1.cex, subset=my.subset) + if(psl=="g500") {mtext(side=4," m", cex=2.5)} else {mtext(side=4," hPa", cex=legend1.cex)} + par(fig=c(legend1.xpos, legend1.xpos + legend1.width, 0.250, 0.273), new=TRUE) + ColorBar3(my.brks, cols=my.cols, vert=FALSE, cex=legend1.cex, subset=my.subset) + if(psl=="g500") {mtext(side=4," m", cex=2.5)} else {mtext(side=4," hPa", cex=legend1.cex)} + par(fig=c(legend1.xpos, legend1.xpos + legend1.width, 0.015, 0.038), new=TRUE) + ColorBar3(my.brks, cols=my.cols, vert=FALSE, cex=legend1.cex, subset=my.subset) + if(psl=="g500") {mtext(side=4," m", cex=2.5)} else {mtext(side=4," hPa", cex=legend1.cex)} + + # Legend 2: + if(fields.name == rean.name){ + legend2.xpos <- 0.48 + legend2.width <- 0.25 + legend2.cex <- 1.8 + my.subset2 <- match(values.to.plot2, my.brks.var) + par(fig=c(legend2.xpos, legend2.xpos + legend2.width, 0.719 + 0.001, 0.744), new=TRUE) + ColorBar3(my.brks.var, cols=my.cols.var, vert=FALSE, cex=legend2.cex, subset=my.subset2) + mtext(side=4,paste0(" ",var.unit[var.num]), cex=legend2.cex) + par(fig=c(legend2.xpos, legend2.xpos + legend2.width, 0.485, 0.508), new=TRUE) + ColorBar3(my.brks.var, cols=my.cols.var, vert=FALSE, cex=legend2.cex, subset=my.subset2) + mtext(side=4,paste0(" ",var.unit[var.num]), cex=legend2.cex) + par(fig=c(legend2.xpos, legend2.xpos + legend2.width, 0.250, 0.273), new=TRUE) + ColorBar3(my.brks.var, cols=my.cols.var, vert=FALSE, cex=legend2.cex, subset=my.subset2) + mtext(side=4,paste0(" ",var.unit[var.num]), cex=legend2.cex) + par(fig=c(legend2.xpos, legend2.xpos + legend2.width, 0.015, 0.038), new=TRUE) + ColorBar3(my.brks.var, cols=my.cols.var, vert=FALSE, cex=legend2.cex, subset=my.subset2) + mtext(side=4,paste0(" ",var.unit[var.num]), cex=legend2.cex) + } + + if(!as.pdf) dev.off() # for saving 4 png + +} # close for loop on 'p' + +if(as.pdf) dev.off() # for saving a single pdf with all seasons + + + + + + + + + + + diff --git a/old/weather_regimes_v25.R b/old/weather_regimes_v25.R new file mode 100644 index 0000000000000000000000000000000000000000..c54c88037169cb937c9449e66631e8284ca1316d --- /dev/null +++ b/old/weather_regimes_v25.R @@ -0,0 +1,907 @@ +library(s2dverification) # for the function Load() +library(abind) +library(Kendall) +library(reshape2) +source('/home/Earth/ncortesi/Downloads/scripts/Rfunctions.R') # for the calendar functions +workdir <- "/scratch/Earth/ncortesi/RESILIENCE/Regimes" # working dir with the input files with the WT classifications for each MSLP grid point + +# available reanalysis for the geopotential (g500) or the geopotential height (z500 = g500/9.81) and for var data: +ERAint <- '/esnas/reconstructions/ecmwf/eraint/$STORE_FREQ$_mean/$VAR_NAME$_f6h/$VAR_NAME$_$YEAR$$MONTH$.nc' +ERAint.name <- "ERA-Interim" + +JRA55 <- '/esnas/reconstructions/jma/jra-55/$STORE_FREQ$_mean/$VAR_NAME$_f6h/$VAR_NAME$_$YEAR$$MONTH$.nc' +JRA55.name <- "JRA-55" + +rean <- ERAint #JRA55 #ERAint # choose one of the two above reanalysis from where to load the input psl data + +# available forecast systems for the psl and var data: +#ECMWF_S4 <- list(path = paste0('/esnas/exp/ECMWF/seasonal/0001/s004/m001/$STORE_FREQ$_mean/$VAR_NAME$_f6h/$VAR_NAME$_$START_DATE$.nc')) +#ECMWF_S4 <- '/esnas/exp/ECMWF/seasonal/0001/s004/m001/$STORE_FREQ$_mean/psl_f6h/psl_$START_DATE$.nc' +ECMWF_S4 <- '/esnas/exp/ecmwf/system4_m1/$STORE_FREQ$_mean/psl_f6h/psl_$START_DATE$.nc' +ECMWF_S4.name <- "ECMWF-S4" + +ECMWF_monthly <- '/esnas/exp/ECMWF/monthly/ensforhc/6hourly/$VAR_NAME$/2014$MONTH$$DAY$00/$VAR_NAME$_$START_DATE$00.nc' +ECMWF_monthly.name <- "ECMWF-Monthly" + +forecast <- ECMWF_S4 + +fields <- forecast #rean #forecast # put 'rean' to load pressure fields and var from reanalisis, put 'forecast' to load them from forecast systems + +psl <- "psl" #"g500" # pressure variable to use from the chosen reanalysis +psl.name <- "MSLP" # "Z500" # the name to show in the maps + +year.start <- 1982 #1981 #1994 #1979 #1981 +year.end <- 2013 #2013 #2010 + +member.name <- "ensemble" # name of the dimension with the ensemble members inside the netCDF files of S4 + +res <- 0.75 # set the resolution you want to interpolate the psl and var data when loading it (i.e: set to 0.75 to use the same res of ERA-Interim) + # interpolation method is BILINEAR. +PCA <- FALSE # set it to TRUE if you want to apply the PCA before the k-means cluster analysis, FALSE otherwise +variance.explained <- 0.8 # the user can set here the minimum % of variance explained by the PCs (typically 0.8 which is equal to 80%) + +lat.weighting=FALSE # set it to true to weight psl data on latitude before applying the cluster analysis +sequences=FALSE # set it to TRUE if you want to select only days beloning to sequences of at least 5 consecutive days belonging to the same regime. + +# Coordinates of the box enclosing the study region (the Northern Atlantic in this case): +lat.max <- 81 #81 #80 #70 +lat.min <- 27 #30 #20 #30 +lon.max <- 45 #36 #30 #40 +lon.min <- 274.5 #274.5 #270 #280 # put a positive number here because the geopotential has only positive values of longitud! + +# Only for Monthly forecasts: +#leadtime <- 1:7 #5:11 #1 # if fields=forecast, set the forecast time (in days) you want to compute the weather regimes. +#startdate.shift <- 1 # if leadtime=5:11, it's better to shift all startdates of the season 1 week in the past, to fit the exact season of obs.data + # if leadtime=12:18, it's better to shift all startdates of the season 2 weeks in the past, and so on. +#forecast.year <- year.end+1 # if fields=forecast, set the forecast year (it is usually the year after year.end) +#mes=1 # if fields=forecast, set the month of the first startdate of the forecast year +#day=2 # if fields=forecast, set the day of the first startdate of the forecast year + +############################################################################################################################# +#ECMWF_monthly <- list(path = paste0('/esnas/exp/ECMWF/monthly/ensforhc/6hourly/psl/2014010200/1994_010200.nc')) +#ECMWF_monthly <- list(path = paste0('/esnas/exp/ECMWF/monthly/ensforhc/6hourly/$VAR_NAME$/2014$MONTH$$DAY$00/$VAR_NAME$_$START_DATE$00.nc')) +rean.name <- unname(ifelse(rean == ERAint, ERAint.name, JRA55.name)) +forecast.name <- unname(ifelse(forecast == ECMWF_S4, ECMWF_S4.name, ECMWF_monthly.name)) +fields.name <- unname(ifelse(fields == rean, rean.name, forecast.name)) +n.years <- year.end - year.start + 1 + +var.data <- fields # dataset from which to load the input var data (reanalysis or forecasts) +var.data.name <- fields.name #ECMWF_monthly.name # ERAint.name + +if(lat.min >= lat.max) stop("lat.min cannot be greater than lat.max") + +days.period <- n.days.period <- period.length <- list() +for (pp in 1:17){ + days.period[[pp]] <- NA + for(y in year.start:year.end) days.period[[pp]] <- c(days.period[[pp]], n.days.in.a.future.year(year.start, y) + pos.period(y,pp)) + days.period[[pp]] <- days.period[[pp]][-1] # remove the NA at the begin that was introduced only to be able to execute the above command + # number of days belonging to that period from year.start to year.end: + n.days.period[[pp]] <- length(days.period[[pp]]) + # Number of days belonging to that period in a single year: + period.length[[pp]] <- n.days.in.a.period(pp,1999) #+ ifelse(period==13 | period==2, 1, 0) # using year 1999 introduce a small error in winter season length because of bisestile years +} + +# Create a regular lat/lon grid to interpolate the data: +n.lon <- 360/res +n.lat <- (180/res)+1 +my.grid <- paste0('r',n.lon,'x',n.lat) +#domain<-list() +#domain$lon <- seq(0,359.999999999,res) +#domain$lat <- c(rev(seq(0,90,res)),seq(res[1],-90,-res)) # Notice that the regular grid is always centered at lat=0 and lon=0! + +# load only 1 day of pressure data to detect the minimum and maximum lat and lon values which identify only the North Atlantic area: +if(fields.name == rean.name) domain <- Load(var = psl, exp = NULL, obs = list(list(path=fields)), '19990101', storefreq = 'daily', leadtimemax = 1, output = 'lonlat', nprocs=1) +# in the case of S4, we also interpolate it (notice that if the S4 grid has the same grid of the chosen grid (typically ERA-Interim), Load() leaves the S4 grid unchanged): +if(fields.name == ECMWF_S4.name) domain <- Load(var = "psl", exp = list(list(path=fields)), sdates=paste0(year.start,'0101'), storefreq = 'daily', dimnames=list(member=member.name), leadtimemax=1, nmember=1, output = 'lonlat', grid=my.grid, method='bilinear') + +#if(fields.name == ECMWF_monthly.name) domain <- Load(var = psl, exp = NULL, obs = list(list(path=fields)), paste0(year.start,'0102'), storefreq = 'daily', leadtimemax = 1, output = 'lonlat', nprocs=1) +n.lat <- length(domain$lat) +n.lon <- length(domain$lon) + +pos.lat.max <- which(lat.max - round(domain$lat,4) >= 0)[1] # select the first latitude of the domain below the maximum latitude +pos.lat.min <- tail(which(round(domain$lat,4) - lat.min >= 0),1) # select the last latitude of the domain over the minumum latitude +pos.lon.max <- tail(which(lon.max - round(domain$lon,4) >= 0),1) +pos.lon.min <- which(round(domain$lon,4) - lon.min >= 0)[1] + +pos.lat <- if(pos.lat.min < pos.lat.max) {pos.lat.min:pos.lat.max} else {pos.lat.max:pos.lat.min} +pos.lon <- c(1:pos.lon.max,pos.lon.min:length(domain$lon)) # beware that it only consider the case where the study area cross the meridian of Greenwhich! +n.pos.lat <- length(pos.lat) +n.pos.lon <- length(pos.lon) + +lat <- domain$lat[pos.lat] # lat values of chosen area only (it is smaller than the whole spatial domain loaded by the data) +lon <- domain$lon[pos.lon] # lon values of chosen area only + +#lat.min.area <- domain$lat[pos.lat.min] # min and max lat/lon of the chosen area only (same as lat.max, but its values correspond to actual values in the lat vector) +#lat.max.area <- domain$lat[pos.lat.max] +#lon.min.area <- domain$lon[pos.lon.min] +#lon.max.area <- domain$lon[pos.lon.max] + +#rm(domain) + +# Load psl data: +if(fields.name == rean.name){ # Load daily psl data in the reanalysis case: + psleuFull <-array(NA,c(n.days.in.a.yearly.period(year.start,year.end), n.pos.lat, n.pos.lon)) + + for (y in year.start:year.end){ + var <- Load(var = psl, exp = NULL, obs = list(list(path=fields)), paste0(y,'0101'), storefreq = 'daily', leadtimemax = n.days.in.a.year(y), output = 'lonlat', + latmin = lat.min, latmax = lat.max, lonmin = lon.min, lonmax = lon.max) + psleuFull[seq.days.in.a.future.year(year.start, y),,] <- var$obs + rm(var) + gc() + } +} + +if(fields.name == ECMWF_S4.name) # in this case, we can load only the data for 1 month at time (since each month needs ~3 GB of memory) + + start.month <- 1 # start month (1=January, 12=December) + n.members <- 15 # number of members of the forecast system to use + + if(start.month <= 9) {start.month.char <- paste0("0",as.character(start.month))} else {start.month.char=start.month} + psleuFull <- Load(var = "psl", exp = list(list(path=fields)), obs = NULL, sdates=paste0(year.start:year.end, start.month.char,'01'), dimnames=list(member=member.name), nmember=n.members, output = 'lonlat', latmin = lat.min, latmax = lat.max, lonmin = lon.min, lonmax = lon.max, grid=my.grid, method='bilinear') + + psleuFull <- Load(var = "psl", exp = list(list(path=fields)), obs = NULL, sdates=paste0(year.start, start.month.char,'01'), dimnames=list(member=member.name), nmember=n.members, leadtimemax=216, storefreq='daily', output = 'lonlat', latmin = lat.min, latmax = lat.max, lonmin = lon.min, lonmax = lon.max) + + ## # alternative method to load only 1 lead month: + ## start.month <- 1 # start date (1=January, 12=December) + ## n.members <- 15 # number of members of the forecast system to use + ## lead.month <- 1 # lead time in months (0= same as start date, 1= the month following the start date month, etc.) + ## + ## if(start.month <= 9) {start.month.char <- paste0("0",as.character(start.month))} else {start.month.char=start.month} + ## chosen.month <- start.month + lead.month # find which month we want to load data + ## if(chosen.month > 12) chosen.month <- chosen.month - 12 + ## leadtime.min <- 1 + n.days.in.a.monthly.period(start.month, chosen.month, year.start) - n.days.in.a.month(chosen.month, year.start) + ## leadtime.max <- leadtime.min + n.days.in.a.month(chosen.month, year.start) - 1 + ## n.leadtimes <- leadtime.max - leadtime.min + 1 + ## print(paste("First leadtime:",leadtime.min, " Last leadtime:", leadtime.max, " Num.leadtimes:",n.leadtimes)) + ## psleuFull <- Load(var = "psl", exp = list(list(path=fields)), obs = NULL, sdates=paste0(year.start:year.end, start.month.char,'01'), storefreq = 'daily', dimnames=list(member=member.name), nmember=n.members, leadtimemin=leadtime.min, leadtimemax=leadtime.max, output = 'lonlat', latmin = lat.min, latmax = lat.max, lonmin = lon.min, lonmax = lon.max) + + + ## psleuFull <- array(NA,c(1, n.members, n.years, n.leadtimes, n.pos.lat, n.pos.lon)) + ## for (y in year.start:year.end){ + ## # compute the leadtimes min and max for the month to be loaded, to take into account if it belongs to a bisestile year: + ## leadtime.min <- 1 + n.days.in.a.monthly.period(start.month, chosen.month, year.start) - n.days.in.a.month(chosen.month, year.start) + ## leadtime.max <- leadtime.min + n.days.in.a.month(chosen.month, year.start) - 1 + ## n.leadtimes <- leadtime.max - leadtime.min + 1 + ## print(paste("First leadtime:",leadtime.min, " Last leadtime:", leadtime.max, " Num.leadtimes:",n.leadtimes)) + ## temp <- Load(var = "psl", exp = list(list(path=fields)), obs = NULL, sdates=paste0(y, start.month.char,'01'), storefreq = 'daily', dimnames=list(member=member.name), nmember=n.members, leadtimemin=leadtime.min, leadtimemax=leadtime.max, output = 'lonlat', latmin = lat.min, latmax = lat.max, lonmin = lon.min, lonmax = lon.max) + ## pselFull <- abind(psleuFull, temp$mod, along=3) + ## } + + + # immediatly convert psl in daily anomalies to remove the drift!!! + pslPeriodClim <- apply(psleuFull$mod, c(1,4,5,6), mean, na.rm=T) + pslPeriodClim2 <- InsertDim(InsertDim(pslPeriodClim, 2, n.years), 2, n.members) + rm(pslPeriodClim) + gc() + + psleuFull <- psleuFull$mod - pslPeriodClim2 + rm(pslPeriodClim2) + gc() +} + +if(fields.name == ECMWF_monthly.name){ + # Load 6-hourly psl data in the forecast case: + psleuFull <-array(NA, c(n.sdates*n.years, n.leadtimes, n.pos.lat, n.pos.lon)) # daily order: 19940102 19950102 ... 20130102 19940109 19950109 ... 20130109 ... + n.leadtimes <- length(leadtime) + sdates <- weekly.seq(forecast.year,mes,day) + n.sdates <- length(sdates) + + for (startdate in 1:n.sdates){ + for(lday in leadtime){ + var <- Load(var = psl, exp = NULL, obs = list(list(path=fields), paste0(year.start:year.end, substr(sdates[startdate],5,6), substr(sdates[startdate],7,8) ), storefreq = 'daily', leadtimemin = lday*4-3, leadtimemax = lday*4, output = 'lonlat', latmin = lat.min, latmax = lat.max, lonmin = lon.min, lonmax = lon.max) + + mean.lday <- apply(var$obs, c(3,5,6), mean, na.rm=T) + rm(var) + gc() + + psleuFull[(1+(startdate-1)*n.years):(startdate*n.years), lday-leadtime[1]+1,,] <- mean.lday + rm(mean.lday) + gc() + } + } +} + +if(psl=="g500") psleuFull <- psleuFull/9.81 # convert geopotential to geopotential height (in m) +if(psl=="psl") psleuFull <- psleuFull/100 # convert MSLP in Pascal to MSLP in hPa +gc() + +#save.image(file=paste0(workdir,"/weather_regimes_",fields.name,"_",year.start,"-",year.end,".RData")) +#load(file=paste0(workdir,"/weather_regimes_",fields.name,"_",year.start,"-",year.end,".RData")) + +# compute the cluster analysis: +my.PCA <- list() +my.cluster <- list() +tot.variance <- list() +n.pcs <- my.cluster <- c() + +WR.period <- 1 #1:12 #13:16 # 1-12: Jan-Dec; 13-16: Winter-Spring-Summer-Autumn + +for(p in WR.period){ + # Select only the data of the month/season we want: + if(fields.name == rean.name){ pslPeriod <- psleuFull[days.period[[p]],,] # select only days in the chosen period (i.e: winter) + + # in this case of S4 we don't need to select anything because we already loaded only 1 month of data! + + if(fields.name == ECMWF_monthly.name){ + my.startdates.period <- months.period(forecast.year,mes,day,p) # get which startdates belong to the chosen period + + days.period <- list() # get which days inside psleuFull belong to the chosen period + for(startdate in my.startdates.period){ + days.period[[p]] <- c(days.period[[p]], (1+(startdate-1)*n.years):(startdate*n.years)) + } + + # in winter you must remove the 2nd startdate (20140109) because its data is bad, as you can see with: plot(pslmat[,1:2]) + # if(fields.name == forecast.name && period == 13) pslmat[21:40,] <- NA + } + + # weight the pressure fields based on latitude (apply it to anomalies, not to absolute values!): + if(lat.weighting){ + lat.weighted <- cos(lat*pi/180)^0.5 + lat.weighted.array <- InsertDim(InsertDim(lat.weighted,2,n.pos.lon), 1, n.days.period[[p]]) + pslmat <- pslPeriod * lat.weighted.array + rm(lat.weighted.array) + gc() + } + + # order psleuFull to use it as input of the cluster analysis: + if(fields.name == rean.name){ + pslmat <- pslPeriod + dim(pslmat) <- c(n.days.period[[p]], n.pos.lat*n.pos.lon) # convert array in a matrix! + rm(pslPeriod, pslPeriod.weighted) + } + + if(fields.name == ECMWF_S4.name){ + #pslmat <- melt(psleuFull[1,,,,,]) + #temp <- acast(pslmat, Var2+Var3~Var4+Var5+Var1) + pslmat <- unname(acast(melt(psleuFull[1,,,,,]), Var2 + Var3 ~ Var1 + Var4 + Var5)) # Var1: ensemb.members, var2: start years, var3: lead months, var4: lat, var5: lon + # note that the data order in pslmat[,X] is: year1day1, year1day2, ... , year1day31, year2day1, year2day2, ... , year2day28 + gc() + + # not working properly, probably there is a bug: + # pslmat <- psleuFull + # dim(pslmat) <- c(1, n.leadtimes*n.years, n.members*n.pos.lat*n.pos.lon) # convert array in a matrix! Beware that it always put the years before the leadtimes! + # i.e: dim(pslmat) <- c(1, 3*33, 15*n.pos.lat*n.pos.lon) + } + + # compute the PCs or not: + if(PCA){ + my.seq <- seq(1, n.pos.lat*n.pos.lon, 9) # select only 1 point of 9 + pslcut <- pslmat[,my.seq] + + my.PCA[[p]] <- princomp(pslcut,cor=FALSE) + tot.variance[[p]] <- head(cumsum(my.PCA[[p]]$sdev^2/sum(my.PCA[[p]]$sdev^2)),50) # check how many PCAs to keep basing on the sum of their expl. variance + n.pcs[p] <- head(as.numeric(which(tot.variance[[p]] > variance.explained)),1) # select only the pcs that explains at least 80% of variance + my.cluster[[p]] <- kmeans(my.PCA[[p]]$scores[,1:n.pcs[p]], centers=4, iter.max=100, nstart=30) # 4 is the number of clusters + rm(pslcut) + } else { # 4 is the number of clusters: + if(fields.name == rean.name) my.cluster[[p]] <- kmeans(pslmat[which(!is.na(pslmat[,1])),], centers=4, iter.max=100, nstart=30) + + # check it with: matrix(my.cluster[[p]]$cluster, n.leadtimes, n.years) + if(fields.name == ECMWF_S4.name) my.cluster[[p]] <- kmeans(pslmat, centers=4, iter.max=100, nstart=30, trace=TRUE) + + } + + rm(pslmat) + gc() +} + +#save.image(file=paste0(workdir,"/weather_regimes_",fields.name,"_",year.start,"-",year.end,".RData")) +#load(file=paste0(workdir,"/weather_regimes_",fields.name,"_",year.start,"-",year.end,".RData")) + +var.num <- 1 # Choose a variable. 1: sfcWind 2: tas + +var.name <- c("sfcWind","tas") # name of the 'predictand' variable of the chosen reanalysis +var.name.full <- c("Wind Speed","Temperature") # full name of the 'predictand' variable to put in the title of the graphs +var.unit <- c("m/s", "ºC") # unit of measure of var (for drawing color scales) + +# Load var data: +if(fields.name == rean.name){ # Load daily var data in the reanalysis case: + vareuFull <-array(NA,c(n.days.in.a.yearly.period(year.start,year.end), n.pos.lat, n.pos.lon)) + + for (y in year.start:year.end){ + var <- Load(var = var.name[var.num], exp = NULL, obs = list(var.data), paste0(y,'0101'), storefreq = 'daily', leadtimemax = n.days.in.a.year(y), output = 'lonlat', + latmin = lat.min, latmax = lat.max, lonmin = lon.min, lonmax = lon.max) + vareuFull[seq.days.in.a.future.year(year.start, y),,] <- var$obs + rm(var) + gc() + } + +# in the S4 case, we can load only the data for 1 month at time (since each month needs ~3 GB of memory) +# but at present we ONLY use psl data!!! +#if(fields.name == ECMWF_S4.name) { +# vareuFull <- Load(var = var.name[var.num], exp = list(fields), obs = NULL, sdates=paste0(year.start:year.end, start.month.char,'01'), storefreq = 'daily', dimnames=list(member="number"), nmember=n.members, leadtimemin=leadtime.min, leadtimemax=leadtime.max, output = 'lonlat', latmin = lat.min, latmax = lat.max, lonmin = lon.min, lonmax = lon.max) +#} + +if(fields.name == ECMWF_monthly.name){ # Load 6-hourly var data in the monthly forecast case: + sdates <- weekly.seq(forecast.year,mes,day) + vareuFull <-array(NA,c(length(sdates)*(year.end-year.start+1), n.pos.lat, n.pos.lon)) + + for (startdate in 1:length(sdates)){ + var <- Load(var = var.name[var.num], exp = NULL, obs = list(var.data), paste0(year.start:year.end, substr(sdates[startdate],5,6), substr(sdates[startdate],7,8) ), storefreq = 'daily', leadtimemin=leadtime*4-3, leadtimemax=leadtime*4, output = 'lonlat', latmin = lat.min, latmax = lat.max, lonmin = lon.min, lonmax = lon.max) + temp <- apply(var$obs, c(1,3,5,6), mean, na.rm=T) + vareuFull[(1+(startdate-1)*(year.end-year.start+1)):(startdate*(year.end-year.start+1)),,] <- temp + rm(temp,var) + gc() + } + +} + +#my.grid<-paste0('r',n.lon,'x',n.lat) +#coord <- Load(var = 'sfcWind', exp = list(ECMWF_monthly), obs = NULL, sdates=paste0(2013,'0102'), leadtimemin = 1, leadtimemax=1, output = 'lonlat', nprocs=1, grid=my.grid, method='bilinear') + +save.image(file=paste0(workdir,"/weather_regimes_",fields.name,"_",var.name[var.num],"_",year.start,"-",year.end,".RData")) +load(file=paste0(workdir,"/weather_regimes_",fields.name,"_",var.name[var.num],"_",year.start,"-",year.end,".RData")) + +## # if you want to study WRs at monhly level but using their seasonal time series, you have to copy the WRs time series to the months from 1 to 12: +## if(WR.period <- 1:12){ +## my.cluster[[1]] <- my.cluster[[2]] <- my.cluster[[3]] <- my.cluster[[4]] <- my.cluster[[5]] <- my.cluster[[6]] <- list() +## my.cluster[[7]] <- my.cluster[[8]] <- my.cluster[[9]] <- my.cluster[[10]] <- my.cluster[[11]] <- my.cluster[[12]] <- list() + +## ss <- match(days.period[[13]],days.period[[1]]); ss[which(!is.na(ss))] <- 1; qq <- ss*my.cluster[[13]]$cluster; my.cluster[[1]]$cluster <- qq[!is.na(qq)] +## ss <- match(days.period[[13]],days.period[[2]]); ss[which(!is.na(ss))] <- 1; qq <- ss*my.cluster[[13]]$cluster; my.cluster[[2]]$cluster <- qq[!is.na(qq)] +## ss <- match(days.period[[13]],days.period[[12]]); ss[which(!is.na(ss))] <- 1; qq <- ss*my.cluster[[13]]$cluster; my.cluster[[12]]$cluster <- qq[!is.na(qq)] +## ss <- match(days.period[[14]],days.period[[3]]); ss[which(!is.na(ss))] <- 1; qq <- ss*my.cluster[[14]]$cluster; my.cluster[[3]]$cluster <- qq[!is.na(qq)] +## ss <- match(days.period[[14]],days.period[[4]]); ss[which(!is.na(ss))] <- 1; qq <- ss*my.cluster[[14]]$cluster; my.cluster[[4]]$cluster <- qq[!is.na(qq)] +## ss <- match(days.period[[14]],days.period[[5]]); ss[which(!is.na(ss))] <- 1; qq <- ss*my.cluster[[14]]$cluster; my.cluster[[5]]$cluster <- qq[!is.na(qq)] +## ss <- match(days.period[[15]],days.period[[6]]); ss[which(!is.na(ss))] <- 1; qq <- ss*my.cluster[[15]]$cluster; my.cluster[[6]]$cluster <- qq[!is.na(qq)] +## ss <- match(days.period[[15]],days.period[[7]]); ss[which(!is.na(ss))] <- 1; qq <- ss*my.cluster[[15]]$cluster; my.cluster[[7]]$cluster <- qq[!is.na(qq)] +## ss <- match(days.period[[15]],days.period[[8]]); ss[which(!is.na(ss))] <- 1; qq <- ss*my.cluster[[15]]$cluster; my.cluster[[8]]$cluster <- qq[!is.na(qq)] +## ss <- match(days.period[[16]],days.period[[9]]); ss[which(!is.na(ss))] <- 1; qq <- ss*my.cluster[[16]]$cluster; my.cluster[[9]]$cluster <- qq[!is.na(qq)] +## ss <- match(days.period[[16]],days.period[[10]]); ss[which(!is.na(ss))] <- 1; qq <- ss*my.cluster[[16]]$cluster; my.cluster[[10]]$cluster <- qq[!is.na(qq)] +## ss <- match(days.period[[16]],days.period[[11]]); ss[which(!is.na(ss))] <- 1; qq <- ss*my.cluster[[16]]$cluster; my.cluster[[11]]$cluster <- qq[!is.na(qq)] +## } + +for(p in WR.period){ # 1-12: Jan-Dec; 13-16: Winter-Spring-Summer-Autumn; 17: Year + if(fields.name == ECMWF_monthly.name){ + my.startdates.period <- months.period(forecast.year,mes,day,p) + + #if(p == 13) my.startdates.period <- my.startdates.period[-2] # remove the second startdate because it is broken + + days.period <- period.length <- list() + for(startdate in my.startdates.period){ + days.period[[p]] <- c(days.period[[p]], (1+(startdate-1)*(year.end-year.start+1)):(startdate*(year.end-year.start+1))) + } + period.length[[p]] <- length(my.startdates.period) # number of Thusdays in the period + } + + if(sequences){ # it works only for rean data!!! + # for each of the 4 clusters, remove the days not belonging to sequences of at least 5 days: + # warning: first 4 days and last 4 days are not take into account y the algorithm and are treated as not belonging to any sequence (sequ=FALSE) + wrdiff <- diff(my.cluster[[p]]$cluster) + + sequ <- rep(FALSE, n.days.period[[p]]) + for(i in 5:(n.days.period[[p]]-5)){ + sequ[i] <- TRUE + if(wrdiff[i] != 0 & wrdiff[i+1] != 0) sequ[i] = FALSE + if(wrdiff[i] != 0 & wrdiff[i+2] != 0) sequ[i] = FALSE + if(wrdiff[i] != 0 & wrdiff[i+3] != 0) sequ[i] = FALSE + if(wrdiff[i] != 0 & wrdiff[i+4] != 0) sequ[i] = FALSE + if(wrdiff[i-1] != 0 & wrdiff[i] != 0) sequ[i] = FALSE + if(wrdiff[i-1] != 0 & wrdiff[i+1] != 0) sequ[i] = FALSE + if(wrdiff[i-1] != 0 & wrdiff[i+2] != 0) sequ[i] = FALSE + if(wrdiff[i-1] != 0 & wrdiff[i+3] != 0) sequ[i] = FALSE + if(wrdiff[i-2] != 0 & wrdiff[i] != 0) sequ[i] = FALSE + if(wrdiff[i-2] != 0 & wrdiff[i+1] != 0) sequ[i] = FALSE + if(wrdiff[i-2] != 0 & wrdiff[i+2] != 0) sequ[i] = FALSE + if(wrdiff[i-3] != 0 & wrdiff[i] != 0) sequ[i] = FALSE + if(wrdiff[i-3] != 0 & wrdiff[i+1] != 0) sequ[i] = FALSE + if(wrdiff[i-4] != 0 & wrdiff[i] != 0) sequ[i] = FALSE + } # close for loop on i + + # sequence of daily cluster numbers without the days that doesn't belong to sequences of 5 or more days: + cluster.sequence <- my.cluster[[p]]$cluster * sequ + rm(wrdiff, sequ) + } + + gc() + + cluster.sequence <- my.cluster[[p]]$cluster + + # Replace the days belonging to a regime with the days belonging to a sequence of at least 5 days of that regime: + wr1 <- which(cluster.sequence == 1) + wr2 <- which(cluster.sequence == 2) + wr3 <- which(cluster.sequence == 3) + wr4 <- which(cluster.sequence == 4) + + # yearly frequency of each regime: + wr1y <- wr2y <- wr3y <-wr4y <- c() + for(y in year.start:year.end){ + # for both reanalysis and S4, the time order is always: year1day1, year1day2, ..., year1day31, year2day1, year2day2, ..., year2day28, etc, + if(fields.name == rean.name || fields.name == ECMWF_S4.name){ + wr1y[y-year.start+1] <- length(which(cluster.sequence[(y-year.start)*period.length[[p]]+(1:period.length[[p]])] == 1)) + wr2y[y-year.start+1] <- length(which(cluster.sequence[(y-year.start)*period.length[[p]]+(1:period.length[[p]])] == 2)) + wr3y[y-year.start+1] <- length(which(cluster.sequence[(y-year.start)*period.length[[p]]+(1:period.length[[p]])] == 3)) + wr4y[y-year.start+1] <- length(which(cluster.sequence[(y-year.start)*period.length[[p]]+(1:period.length[[p]])] == 4)) + } else { # for both S4 and the monthly system, the time order is always: year1day1, year2day1, ... , yearNday1, year1day2, year2day2, ..., yearNday2, etc. + wr1y[y-year.start+1] <- length(which(cluster.sequence[seq((y-year.start+1),length(cluster.sequence), n.years)] == 1)) + wr2y[y-year.start+1] <- length(which(cluster.sequence[seq((y-year.start+1),length(cluster.sequence), n.years)] == 2)) + wr3y[y-year.start+1] <- length(which(cluster.sequence[seq((y-year.start+1),length(cluster.sequence), n.years)] == 3)) + wr4y[y-year.start+1] <- length(which(cluster.sequence[seq((y-year.start+1),length(cluster.sequence), n.years)] == 4)) + } + + } + + #rm(cluster.sequence) + #gc() + + # convert to frequencies in %: + wr1y <- wr1y/period.length[[p]] + wr2y <- wr2y/period.length[[p]] + wr3y <- wr3y/period.length[[p]] + wr4y <- wr4y/period.length[[p]] + + if(fields.name == rean.name){ + pslPeriod <- psleuFull[days.period[[p]],,] + pslPeriodClim <- apply(pslPeriod, c(2,3), mean, na.rm=T) + pslPeriodClim2 <- InsertDim(pslPeriodClim, 1, n.days.period[[p]]) + pslPeriodAnom <- pslPeriod - pslPeriodClim2 + + rm(pslPeriod, pslPeriodClim, pslPeriodClim2) + gc() + + pslwr1 <- pslPeriodAnom[wr1,,] + pslwr2 <- pslPeriodAnom[wr2,,] + pslwr3 <- pslPeriodAnom[wr3,,] + pslwr4 <- pslPeriodAnom[wr4,,] + rm(pslPeriodAnom) + gc() + + pslwr1mean <- apply(pslwr1,c(2,3),mean,na.rm=T) + pslwr2mean <- apply(pslwr2,c(2,3),mean,na.rm=T) + pslwr3mean <- apply(pslwr3,c(2,3),mean,na.rm=T) + pslwr4mean <- apply(pslwr4,c(2,3),mean,na.rm=T) + rm(pslwr1,pslwr2,pslwr3,pslwr4) + } + + if(fields.name == ECMWF_S4.name){ + # in this case, psleuFull is already in anomalies: + pslmat <- unname(acast(melt(psleuFull[1,,,,,]), Var1 ~ Var2 + Var3 ~ Var4 ~ Var5)) # Var1: ensemb.members, var2: start years, var3: lead months, var4: lat, var5: lon + #pslmat <- psleuFull + #dim(pslmat) <- c(1, n.members, n.leadtimes*n.years, n.pos.lat, n.pos.lon) # not working + pslwr1 <- pslmat[,wr1,,] + pslwr2 <- pslmat[,wr2,,] + pslwr3 <- pslmat[,wr3,,] + pslwr4 <- pslmat[,wr4,,] + rm(pslmat) + gc() + + pslwr1mean <- apply(pslwr1,c(3,4),mean,na.rm=T) + pslwr2mean <- apply(pslwr2,c(3,4),mean,na.rm=T) + pslwr3mean <- apply(pslwr3,c(3,4),mean,na.rm=T) + pslwr4mean <- apply(pslwr4,c(3,4),mean,na.rm=T) + rm(pslwr1,pslwr2,pslwr3,pslwr4) + gc() + } + + + if(fields.name == rean.name){ + # similar selection of above, but for var instead of psl: + varPeriod <- vareuFull[days.period[[p]],,] # select only var data during the chosen period + + varPeriodClim <- apply(varPeriod, c(2,3), mean, na.rm=T) + varPeriodClim2 <- InsertDim(varPeriodClim, 1, n.days.period[[p]]) + varPeriodAnom <- varPeriod - varPeriodClim2 + rm(varPeriod, varPeriodClim, varPeriodClim2) + gc() + + #PlotEquiMap2(varPeriodClim[,EU], lon[EU], lat, filled.continents = FALSE, cols=my.cols.var, intxlon=10, intylat=10, cex.lab=1.5) # plot the climatology of the period + varPeriodAnom1 <- varPeriodAnom[wr1,,] + varPeriodAnom2 <- varPeriodAnom[wr2,,] + varPeriodAnom3 <- varPeriodAnom[wr3,,] + varPeriodAnom4 <- varPeriodAnom[wr4,,] + + varPeriodAnom1mean <- apply(varPeriodAnom1,c(2,3),mean,na.rm=T) + varPeriodAnom2mean <- apply(varPeriodAnom2,c(2,3),mean,na.rm=T) + varPeriodAnom3mean <- apply(varPeriodAnom3,c(2,3),mean,na.rm=T) + varPeriodAnom4mean <- apply(varPeriodAnom4,c(2,3),mean,na.rm=T) + + varPeriodAnomBoth1 <- abind(varPeriodAnom, varPeriodAnom1, along = 1) + pvalue1 <- apply(varPeriodAnomBoth1, c(2,3), function(x) t.test(x[1:n.days.period[[p]]],x[(n.days.period[[p]]+1):length(x)])$p.value) + rm(varPeriodAnomBoth1, varPeriodAnom1) + gc() + + varPeriodAnomBoth2 <- abind(varPeriodAnom, varPeriodAnom2, along = 1) + pvalue2 <- apply(varPeriodAnomBoth2, c(2,3), function(x) t.test(x[1:n.days.period[[p]]],x[(n.days.period[[p]]+1):length(x)])$p.value) + rm(varPeriodAnomBoth2, varPeriodAnom2) + gc() + + varPeriodAnomBoth3 <- abind(varPeriodAnom, varPeriodAnom3, along = 1) + pvalue3 <- apply(varPeriodAnomBoth3, c(2,3), function(x) t.test(x[1:n.days.period[[p]]],x[(n.days.period[[p]]+1):length(x)])$p.value) + rm(varPeriodAnomBoth3, varPeriodAnom3) + gc() + + varPeriodAnomBoth4 <- abind(varPeriodAnom, varPeriodAnom4, along = 1) + pvalue4 <- apply(varPeriodAnomBoth4, c(2,3), function(x) t.test(x[1:n.days.period[[p]]],x[(n.days.period[[p]]+1):length(x)])$p.value) + rm(varPeriodAnomBoth4, varPeriodAnom4) + + rm(varPeriodAnom) + gc() + + # save all the data necessary to redraw the graphs when we know the right regime: + save(workdir,rean.name,fields.name,var.num,var.name,var.name.full,var.unit,year.start,year.end,p,WR.period,lon,lat,pslwr1mean,pslwr2mean,pslwr3mean,pslwr4mean, varPeriodAnom1mean, varPeriodAnom2mean, varPeriodAnom3mean,varPeriodAnom4mean,wr1y,wr2y,wr3y,wr4y,pvalue1,pvalue2,pvalue3,pvalue4,file=paste0(workdir,"/",fields.name,"_",var.name[var.num],"_",my.period[p],"_mapdata.RData")) + + rm(pvalue1,pvalue2,pvalue3,pvalue4) + rm(pslwr1mean,pslwr2mean,pslwr3mean,pslwr4mean) + rm(varPeriodAnom1mean,varPeriodAnom2mean,varPeriodAnom3mean,varPeriodAnom4mean) + gc() + } + + if(fields.name == ECMWF_S4.name){ # save all the data necessary to draw the graphs (but not the impact maps) + save(workdir,rean.name,fields.name,var.num,var.name,var.name.full,var.unit,year.start,year.end,p,WR.period,lon,lat,pslwr1mean,pslwr2mean,pslwr3mean,pslwr4mean,wr1y,wr2y,wr3y,wr4y,file=paste0(workdir,"/",fields.name,"_",var.name[var.num],"_",my.period[p],"_mapdata.RData")) + rm(pslwr1mean,pslwr2mean,pslwr3mean,pslwr4mean) + gc() + } +} # close the for loop on 'p' + + +# run it twice: once, with ordering <- FALSE to look only at the cartography and find visually the right regime names of the four clusters; +# and the second time, to save the ordered cartography: +ordering <- TRUE # If TRUE, order the clusters following the regimes listed in the 'orden' vector (after you manually insert the names). +save.names <- TRUE # if TRUE, also save the cluster names (i.e: the association between clusters and weather regimes); if FALSE, the cluster names are loaded from the file +as.pdf <- FALSE # FALSE: save results as one .png file for each season, TRUE: save only 1 .pdf file with all seasons + +# position of long values of Europe only (without the Atlantic Sea and America): +#if(fields.name == "ERA-Interim") EU <- c(1:which(lon >= lon.max)[1], (length(lon)-30):length(lon)) # restrict area to continental europe only +EU <- c(1:which(lon >= lon.max)[1], (which(lon >= 337)[1]:length(lon))) # restrict area to continental europe only + +# choose an order to give to the cartography, from top to bottom: +orden <- c("NAO+","NAO-","Blocking","Atl.Ridge") + +regime1.name <- orden[1] +regime2.name <- orden[2] +regime3.name <- orden[3] +regime4.name <- orden[4] + +if(save.names){cluster1.name.period <- cluster2.name.period <- cluster3.name.period <- cluster4.name.period <- c()} + +# breaks and colors of the geopotential fields: +if(psl == "g500"){ + #my.brks <- c(-300,-199:200,300) # % Mean anomaly of a WR + my.brks <- c(-300,seq(-200,200,10),300) # % Mean anomaly of a WR + my.brks2 <- c(-300,seq(-200,200,10),300) # % Mean anomaly of a WR for the contour lines + #my.cols <- colorRampPalette((brewer.pal(9,"PuBu")))(length(my.brks)-1) + values.to.plot <- c(-200, -100, 0, 100, 200) # values we want to show in the labels +} + +if(psl == "psl"){ + my.brks <- c(seq(-19,-1,2),0,seq(1,19,2)) # % Mean anomaly of a WR + # my.brks <- c(seq(-19,-1,2),0,seq(1,19,2)) # % Mean anomaly of a WR + + my.brks2 <- my.brks #c(seq(-20,20,2)) # % Mean anomaly of a WR for the contour lines + values.to.plot <- my.brks #c(-17,-15,-13,-11,-9,-7,-5,-3,-1,0,1,3,5,7,9,11,13,15,17) +} + +my.cols <- colorRampPalette(rev(brewer.pal(11,"RdBu")))(length(my.brks)-1) # blue--white--red colors +my.cols[floor(length(my.cols)/2)] <- my.cols[floor(length(my.cols)/2)+1] <- "white" + +# breaks and colors of the impact maps (valid both for Wind speed anomalies and Temperature anomalies): +#my.brks.var <- c(-20,seq(-10,-3,1),seq(-2.9,2.9,0.1),seq(3,10,1),20) # % Mean anomaly of a WR +#my.brks.var <- c(-20,-2,-1.5,-1,-0.5,-0.2,0.2,0.5,1,1.5,2,20) # % Mean anomaly of a WR +#my.brks.var <- c(-20,seq(-3,3,0.5),20) # % Mean anomaly of a WR +if(var.name[var.num] == "sfcWind") { + my.brks.var <- seq(-3,3,0.5) + values.to.plot2 <- c(-3,-2,-1,0,1,2,3) +} +if(var.name[var.num] == "tas") { + my.brks.var <- seq(-5,5,1) + values.to.plot2 <- my.brks.var #c(-5,-3,-1,0,1,3,5) +} + +#my.cols <- colorRampPalette(as.character(read.csv("/home/Earth/vtorralb/rgbhex.csv",header=F)[,1]))(length(my.brks)-1) # blue--yellow-red colors +my.cols.var <- colorRampPalette(rev(brewer.pal(11,"RdBu")))(length(my.brks.var)-1) # blue--white--red colors +#my.cols.var[floor(length(my.cols.var)/2)] <- my.cols.var[floor(length(my.cols.var)/2)+1] <- "white" + +if(as.pdf)(pdf(file=paste0(workdir,"/",fields.name,"_",var.name[var.num],"_",my.period[p],".pdf"),width=40, height=60)) + +for(p in WR.period){ + load(file=paste0(workdir,"/",fields.name,"_",var.name[var.num],"_",my.period[p],"_mapdata.RData")) + + if(save.names){ + if(p == 1){ # January + cluster3.name="NAO+" + cluster2.name="NAO-" + cluster4.name="Blocking" + cluster1.name="Atl.Ridge" + } + if(p == 2){ # February + cluster3.name="NAO+" + cluster2.name="NAO-" + cluster4.name="Blocking" + cluster1.name="Atl.Ridge" + } + if(p == 3){ # March + cluster3.name="NAO+" + cluster2.name="NAO-" + cluster4.name="Blocking" + cluster1.name="Atl.Ridge" + } + if(p == 4){ # April + cluster2.name="NAO+" + cluster1.name="NAO-" + cluster3.name="Blocking" + cluster4.name="Atl.Ridge" + } + if(p == 5){ # May + cluster4.name="NAO+" + cluster2.name="NAO-" + cluster3.name="Blocking" + cluster1.name="Atl.Ridge" + } + if(p == 6){ # June + cluster4.name="NAO+" + cluster1.name="NAO-" + cluster2.name="Blocking" + cluster3.name="Atl.Ridge" + } + if(p == 7){ # July + cluster4.name="NAO+" + cluster2.name="NAO-" + cluster1.name="Blocking" + cluster3.name="Atl.Ridge" + } + if(p == 8){ # August + cluster2.name="NAO+" + cluster4.name="NAO-" + cluster3.name="Blocking" + cluster1.name="Atl.Ridge" + } + if(p == 9){ # September + cluster4.name="NAO+" + cluster3.name="NAO-" + cluster1.name="Blocking" + cluster2.name="Atl.Ridge" + } + if(p == 10){ # October + cluster1.name="NAO+" + cluster4.name="NAO-" + cluster2.name="Blocking" + cluster3.name="Atl.Ridge" + } + if(p == 11){ # November + cluster2.name="NAO+" + cluster3.name="NAO-" + cluster1.name="Blocking" + cluster4.name="Atl.Ridge" + } + if(p == 12){ # December + cluster2.name="NAO+" + cluster4.name="NAO-" + cluster1.name="Blocking" + cluster3.name="Atl.Ridge" + } + if(p == 13){ # Winter + cluster3.name="NAO+" + cluster4.name="NAO-" + cluster1.name="Blocking" + cluster2.name="Atl.Ridge" + } + if(p == 14){ # Spring + cluster1.name="NAO+" + cluster3.name="NAO-" + cluster2.name="Blocking" + cluster4.name="Atl.Ridge" + } + if(p == 15){ # Summer + cluster2.name="NAO+" + cluster3.name="NAO-" + cluster4.name="Blocking" + cluster1.name="Atl.Ridge" + } + if(p == 16){ # Autumn + cluster4.name="NAO+" + cluster1.name="NAO-" + cluster2.name="Blocking" + cluster3.name="Atl.Ridge" + } + + cluster1.name.period[p] <- cluster1.name + cluster2.name.period[p] <- cluster2.name + cluster3.name.period[p] <- cluster3.name + cluster4.name.period[p] <- cluster4.name + + save(cluster1.name.period, cluster2.name.period, cluster3.name.period, cluster4.name.period, file=paste0(workdir,"/",fields.name,"_ClusterNames.RData")) + + } else { # in this case, we load the cluster names from the file already saved: + load(paste0(workdir,"/",fields.name,"_ClusterNames.RData")) + cluster1.name <- cluster1.name.period[p] + cluster2.name <- cluster2.name.period[p] + cluster3.name <- cluster3.name.period[p] + cluster4.name <- cluster4.name.period[p] + } + + # assign to each cluster its regime: + if(ordering == FALSE){ + cluster1 <- 1; cluster2 <- 2; cluster3 <- 3; cluster4 <- 4 + } else { + cluster1 <- which(orden == cluster1.name) + cluster2 <- which(orden == cluster2.name) + cluster3 <- which(orden == cluster3.name) + cluster4 <- which(orden == cluster4.name) + } + + assign(paste0("map",cluster1), pslwr1mean) + assign(paste0("map",cluster2), pslwr2mean) + assign(paste0("map",cluster3), pslwr3mean) + assign(paste0("map",cluster4), pslwr4mean) + + if(fields.name == rean.name){ + assign(paste0("imp",cluster1), varPeriodAnom1mean) + assign(paste0("imp",cluster2), varPeriodAnom2mean) + assign(paste0("imp",cluster3), varPeriodAnom3mean) + assign(paste0("imp",cluster4), varPeriodAnom4mean) + + assign(paste0("sig",cluster1), pvalue1) + assign(paste0("sig",cluster2), pvalue2) + assign(paste0("sig",cluster3), pvalue3) + assign(paste0("sig",cluster4), pvalue4) + } + + assign(paste0("fre",cluster1), wr1y) + assign(paste0("fre",cluster2), wr2y) + assign(paste0("fre",cluster3), wr3y) + assign(paste0("fre",cluster4), wr4y) + + # Visualize the composition with the 3 graphs for all the 4 regimes: its pressure fields, its impact on var and its frequency series: + if(!as.pdf) png(filename=paste0(workdir,"/",fields.name,"_",var.name[var.num],"_",my.period[p],".png"),width=3000,height=3700) + + plot.new() + + # Sheet title: + par(fig=c(0, 1, 0.94, 0.98), new=TRUE) + mtext(paste0("Weather Regimes for ",my.period[p]," season (",year.start,"-",year.end,"). Source: ",fields.name), cex=6, font=2) + + # Centroid maps: + map.xpos <- 0 + map.width <- 0.46 + par(fig=c(map.xpos + 0.003, map.xpos + map.width - 0.003, 0.745, 0.925), new=TRUE) + PlotEquiMap2(rescale(map1,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map1), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, xlabel.dist=1.5, contours.lty="F1FF1F") + par(fig=c(map.xpos, map.xpos + map.width, 0.51, 0.69), new=TRUE) + PlotEquiMap2(rescale(map2,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map2), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F") + par(fig=c(map.xpos, map.xpos + map.width, 0.275, 0.455), new=TRUE) + PlotEquiMap2(rescale(map3,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map3), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F") + par(fig=c(map.xpos, map.xpos + map.width, 0.04, 0.22), new=TRUE) + PlotEquiMap2(rescale(map4,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map4), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F") + + # Title Centroid Maps: + map.title.xpos <- 0.76 + map.title.width <- 0.2 + par(fig=c(map.title.xpos, map.title.xpos + map.title.width, 0.728, 0.729), new=TRUE) + mtext("year", cex=3) + par(fig=c(map.title.xpos, map.title.xpos + map.title.width, 0.494, 0.495), new=TRUE) + mtext("year", cex=3) + par(fig=c(map.title.xpos, map.title.xpos + map.title.width, 0.260, 0.261), new=TRUE) + mtext("year", cex=3) + par(fig=c(map.title.xpos, map.title.xpos + map.title.width, 0.026, 0.027), new=TRUE) + mtext("year", cex=3) + + # Impact maps: + if(fields.name == rean.name){ + impact.xpos <- 0.47 + impact.width <- 0.26 + par(fig=c(impact.xpos, impact.xpos + impact.width, 0.745, 0.925), new=TRUE) + PlotEquiMap2(rescale(imp1[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=t(sig1[,EU] < 0.05), cex.lab=1.5) + par(fig=c(impact.xpos, impact.xpos + impact.width, 0.51, 0.69), new=TRUE) + PlotEquiMap2(rescale(imp2[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=t(sig2[,EU] < 0.05), cex.lab=1.5) + par(fig=c(impact.xpos, impact.xpos + impact.width, 0.275, 0.455), new=TRUE) + PlotEquiMap2(rescale(imp3[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=t(sig3[,EU] < 0.05), cex.lab=1.5) + par(fig=c(impact.xpos, impact.xpos + impact.width, 0.04, 0.22), new=TRUE) + PlotEquiMap2(rescale(imp4[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=t(sig4[,EU] < 0.05), cex.lab=1.5) + } + + # Frequency plots: + barplot.xpos <- 0.75 + barplot.width <- 0.25 + freq.max=100 + par(fig=c(barplot.xpos, barplot.xpos + barplot.width, 0.745, 0.915), new=TRUE) + barplot.freq(100*fre1, year.start, year.end, cex.y=2, cex.x=2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0)) + par(fig=c(barplot.xpos, barplot.xpos + barplot.width,0.510,0.680), new=TRUE) + barplot.freq(100*fre2, year.start, year.end, cex.y=2, cex.x=2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0)) + par(fig=c(barplot.xpos, barplot.xpos + barplot.width,0.275,0.445), new=TRUE) + barplot.freq(100*fre3, year.start, year.end, cex.y=2, cex.x=2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0)) + par(fig=c(barplot.xpos, barplot.xpos + barplot.width,0.040,0.210), new=TRUE) + barplot.freq(100*fre4, year.start, year.end, cex.y=2, cex.x=2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0)) + + # % on Frequency plots: + symbol.xpos <- 0.735 + symbol.width <- 0.01 + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.83, 0.84), new=TRUE) + mtext("%", cex=3.3) + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.60, 0.61), new=TRUE) + mtext("%", cex=3.3) + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.36, 0.37), new=TRUE) + mtext("%", cex=3.3) + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.13, 0.14), new=TRUE) + mtext("%", cex=3.3) + + # Title 1: + title1.xpos <- 0.02 + title1.width <- 0.4 + par(fig=c(title1.xpos, title1.xpos + title1.width, 0.930, 0.935), new=TRUE) + mtext(paste0(regime1.name,": ", psl.name, " Anomaly"), font=2, cex=4) + par(fig=c(title1.xpos, title1.xpos + title1.width, 0.695, 0.700), new=TRUE) + mtext(paste0(regime2.name,": ", psl.name, " Anomaly"), font=2, cex=4) + par(fig=c(title1.xpos, title1.xpos + title1.width, 0.460, 0.465), new=TRUE) + mtext(paste0(regime3.name,": ", psl.name, " Anomaly"), font=2, cex=4) + par(fig=c(title1.xpos, title1.xpos + title1.width, 0.225, 0.230), new=TRUE) + mtext(paste0(regime4.name,": ", psl.name, " Anomaly"), font=2, cex=4) + + # Title 2: + if(fields.name == rean.name){ + title2.xpos <- 0.42 + title2.width <- 0.35 + par(fig=c(title2.xpos, title2.xpos + title2.width, 0.930, 0.935), new=TRUE) + mtext(paste0(regime1.name, ": Impact on ", var.name.full[var.num]), font=2, cex=4) + par(fig=c(title2.xpos, title2.xpos + title2.width, 0.695, 0.700), new=TRUE) + mtext(paste0(regime2.name, ": Impact on ", var.name.full[var.num]), font=2, cex=4) + par(fig=c(title2.xpos, title2.xpos + title2.width, 0.460, 0.465), new=TRUE) + mtext(paste0(regime3.name, ": Impact on ", var.name.full[var.num]), font=2, cex=4) + par(fig=c(title2.xpos, title2.xpos + title2.width, 0.225, 0.230), new=TRUE) + mtext(paste0(regime4.name, ": Impact on ", var.name.full[var.num]), font=2, cex=4) + } + + # Title 3: + title3.xpos <- 0.75 + title3.width <-0.25 + par(fig=c(title3.xpos, title3.xpos + title3.width, 0.930, 0.935), new=TRUE) + mtext(paste0(regime1.name, ": Frequency (", round(100*mean(fre1),1), "%)"), font=2, cex=4) + par(fig=c(title3.xpos, title3.xpos + title3.width, 0.695, 0.700), new=TRUE) + mtext(paste0(regime2.name, ": Frequency (", round(100*mean(fre2),1), "%)"), font=2, cex=4) + par(fig=c(title3.xpos, title3.xpos + title3.width, 0.460, 0.465), new=TRUE) + mtext(paste0(regime3.name, ": Frequency (", round(100*mean(fre3),1), "%)"), font=2, cex=4) + par(fig=c(title3.xpos, title3.xpos + title3.width, 0.225, 0.230), new=TRUE) + mtext(paste0(regime4.name, ": Frequency (", round(100*mean(fre4),1), "%)"), font=2, cex=4) + + # Legend 1: + legend1.xpos <- 0.01 + legend1.width <- 0.44 + legend1.cex <- 1.8 + my.subset <- match(values.to.plot, my.brks) + par(fig=c(legend1.xpos, legend1.xpos + legend1.width, 0.719, 0.744), new=TRUE) # syntax fig=c(xmin, xmax, ymin, ymax) + ColorBar3(my.brks, cols=my.cols, vert=FALSE, cex=legend1.cex, subset=my.subset) + if(psl=="g500") {mtext(side=4," m", cex=2.5)} else {mtext(side=4," hPa", cex=legend1.cex)} + par(fig=c(legend1.xpos, legend1.xpos + legend1.width, 0.485, 0.508), new=TRUE) + ColorBar3(my.brks, cols=my.cols, vert=FALSE, cex=legend1.cex, subset=my.subset) + if(psl=="g500") {mtext(side=4," m", cex=2.5)} else {mtext(side=4," hPa", cex=legend1.cex)} + par(fig=c(legend1.xpos, legend1.xpos + legend1.width, 0.250, 0.273), new=TRUE) + ColorBar3(my.brks, cols=my.cols, vert=FALSE, cex=legend1.cex, subset=my.subset) + if(psl=="g500") {mtext(side=4," m", cex=2.5)} else {mtext(side=4," hPa", cex=legend1.cex)} + par(fig=c(legend1.xpos, legend1.xpos + legend1.width, 0.015, 0.038), new=TRUE) + ColorBar3(my.brks, cols=my.cols, vert=FALSE, cex=legend1.cex, subset=my.subset) + if(psl=="g500") {mtext(side=4," m", cex=2.5)} else {mtext(side=4," hPa", cex=legend1.cex)} + + # Legend 2: + if(fields.name == rean.name){ + legend2.xpos <- 0.48 + legend2.width <- 0.25 + legend2.cex <- 1.8 + my.subset2 <- match(values.to.plot2, my.brks.var) + par(fig=c(legend2.xpos, legend2.xpos + legend2.width, 0.719 + 0.001, 0.744), new=TRUE) + ColorBar3(my.brks.var, cols=my.cols.var, vert=FALSE, cex=legend2.cex, subset=my.subset2) + mtext(side=4,paste0(" ",var.unit[var.num]), cex=legend2.cex) + par(fig=c(legend2.xpos, legend2.xpos + legend2.width, 0.485, 0.508), new=TRUE) + ColorBar3(my.brks.var, cols=my.cols.var, vert=FALSE, cex=legend2.cex, subset=my.subset2) + mtext(side=4,paste0(" ",var.unit[var.num]), cex=legend2.cex) + par(fig=c(legend2.xpos, legend2.xpos + legend2.width, 0.250, 0.273), new=TRUE) + ColorBar3(my.brks.var, cols=my.cols.var, vert=FALSE, cex=legend2.cex, subset=my.subset2) + mtext(side=4,paste0(" ",var.unit[var.num]), cex=legend2.cex) + par(fig=c(legend2.xpos, legend2.xpos + legend2.width, 0.015, 0.038), new=TRUE) + ColorBar3(my.brks.var, cols=my.cols.var, vert=FALSE, cex=legend2.cex, subset=my.subset2) + mtext(side=4,paste0(" ",var.unit[var.num]), cex=legend2.cex) + } + + if(!as.pdf) dev.off() # for saving 4 png + +} # close for loop on 'p' + +if(as.pdf) dev.off() # for saving a single pdf with all seasons + + + + + + + + + + + diff --git a/old/weather_regimes_v25.R~ b/old/weather_regimes_v25.R~ new file mode 100644 index 0000000000000000000000000000000000000000..44e3be5c864e7a248928d0233f6190aa5ef0cb93 --- /dev/null +++ b/old/weather_regimes_v25.R~ @@ -0,0 +1,907 @@ +library(s2dverification) # for the function Load() +library(abind) +library(Kendall) +library(reshape2) +source('/home/Earth/ncortesi/Downloads/scripts/Rfunctions.R') # for the calendar functions +workdir <- "/scratch/Earth/ncortesi/RESILIENCE/Regimes" # working dir with the input files with the WT classifications for each MSLP grid point + +# available reanalysis for the geopotential (g500) or the geopotential height (z500 = g500/9.81) and for var data: +ERAint <- '/esnas/reconstructions/ecmwf/eraint/$STORE_FREQ$_mean/$VAR_NAME$_f6h/$VAR_NAME$_$YEAR$$MONTH$.nc' +ERAint.name <- "ERA-Interim" + +JRA55 <- '/esnas/reconstructions/jma/jra-55/$STORE_FREQ$_mean/$VAR_NAME$_f6h/$VAR_NAME$_$YEAR$$MONTH$.nc' +JRA55.name <- "JRA-55" + +rean <- ERAint #JRA55 #ERAint # choose one of the two above reanalysis from where to load the input psl data + +# available forecast systems for the psl and var data: +#ECMWF_S4 <- list(path = paste0('/esnas/exp/ECMWF/seasonal/0001/s004/m001/$STORE_FREQ$_mean/$VAR_NAME$_f6h/$VAR_NAME$_$START_DATE$.nc')) +#ECMWF_S4 <- '/esnas/exp/ECMWF/seasonal/0001/s004/m001/$STORE_FREQ$_mean/psl_f6h/psl_$START_DATE$.nc' +ECMWF_S4 <- '/esnas/exp/ecmwf/system4_m1/$STORE_FREQ$_mean/psl_f6h/psl_$START_DATE$.nc' +ECMWF_S4.name <- "ECMWF-S4" + +ECMWF_monthly <- '/esnas/exp/ECMWF/monthly/ensforhc/6hourly/$VAR_NAME$/2014$MONTH$$DAY$00/$VAR_NAME$_$START_DATE$00.nc' +ECMWF_monthly.name <- "ECMWF-Monthly" + +forecast <- ECMWF_S4 + +fields <- forecast #rean #forecast # put 'rean' to load pressure fields and var from reanalisis, put 'forecast' to load them from forecast systems + +psl <- "psl" #"g500" # pressure variable to use from the chosen reanalysis +psl.name <- "MSLP" # "Z500" # the name to show in the maps + +year.start <- 1982 #1981 #1994 #1979 #1981 +year.end <- 2013 #2013 #2010 + +member.name <- "ensemble" # name of the dimension with the ensemble members inside the netCDF files of S4 + +res <- 0.75 # set the resolution you want to interpolate the psl and var data when loading it (i.e: set to 0.75 to use the same res of ERA-Interim) + # interpolation method is BILINEAR. +PCA <- FALSE # set it to TRUE if you want to apply the PCA before the k-means cluster analysis, FALSE otherwise +variance.explained <- 0.8 # the user can set here the minimum % of variance explained by the PCs (typically 0.8 which is equal to 80%) + +lat.weighting=FALSE # set it to true to weight psl data on latitude before applying the cluster analysis +sequences=FALSE # set it to TRUE if you want to select only days beloning to sequences of at least 5 consecutive days belonging to the same regime. + +# Coordinates of the box enclosing the study region (the Northern Atlantic in this case): +lat.max <- 81 #81 #80 #70 +lat.min <- 27 #30 #20 #30 +lon.max <- 45 #36 #30 #40 +lon.min <- 274.5 #274.5 #270 #280 # put a positive number here because the geopotential has only positive values of longitud! + +# Only for Monthly forecasts: +#leadtime <- 1:7 #5:11 #1 # if fields=forecast, set the forecast time (in days) you want to compute the weather regimes. +#startdate.shift <- 1 # if leadtime=5:11, it's better to shift all startdates of the season 1 week in the past, to fit the exact season of obs.data + # if leadtime=12:18, it's better to shift all startdates of the season 2 weeks in the past, and so on. +#forecast.year <- year.end+1 # if fields=forecast, set the forecast year (it is usually the year after year.end) +#mes=1 # if fields=forecast, set the month of the first startdate of the forecast year +#day=2 # if fields=forecast, set the day of the first startdate of the forecast year + +############################################################################################################################# +#ECMWF_monthly <- list(path = paste0('/esnas/exp/ECMWF/monthly/ensforhc/6hourly/psl/2014010200/1994_010200.nc')) +#ECMWF_monthly <- list(path = paste0('/esnas/exp/ECMWF/monthly/ensforhc/6hourly/$VAR_NAME$/2014$MONTH$$DAY$00/$VAR_NAME$_$START_DATE$00.nc')) +rean.name <- unname(ifelse(rean == ERAint, ERAint.name, JRA55.name)) +forecast.name <- unname(ifelse(forecast == ECMWF_S4, ECMWF_S4.name, ECMWF_monthly.name)) +fields.name <- unname(ifelse(fields == rean, rean.name, forecast.name)) +n.years <- year.end - year.start + 1 + +var.data <- fields # dataset from which to load the input var data (reanalysis or forecasts) +var.data.name <- fields.name #ECMWF_monthly.name # ERAint.name + +if(lat.min >= lat.max) stop("lat.min cannot be greater than lat.max") + +days.period <- n.days.period <- period.length <- list() +for (pp in 1:17){ + days.period[[pp]] <- NA + for(y in year.start:year.end) days.period[[pp]] <- c(days.period[[pp]], n.days.in.a.future.year(year.start, y) + pos.period(y,pp)) + days.period[[pp]] <- days.period[[pp]][-1] # remove the NA at the begin that was introduced only to be able to execute the above command + # number of days belonging to that period from year.start to year.end: + n.days.period[[pp]] <- length(days.period[[pp]]) + # Number of days belonging to that period in a single year: + period.length[[pp]] <- n.days.in.a.period(pp,1999) #+ ifelse(period==13 | period==2, 1, 0) # using year 1999 introduce a small error in winter season length because of bisestile years +} + +# Create a regular lat/lon grid to interpolate the data: +n.lon <- 360/res +n.lat <- (180/res)+1 +my.grid <- paste0('r',n.lon,'x',n.lat) +#domain<-list() +#domain$lon <- seq(0,359.999999999,res) +#domain$lat <- c(rev(seq(0,90,res)),seq(res[1],-90,-res)) # Notice that the regular grid is always centered at lat=0 and lon=0! + +# load only 1 day of pressure data to detect the minimum and maximum lat and lon values which identify only the North Atlantic area: +if(fields.name == rean.name) domain <- Load(var = psl, exp = NULL, obs = list(list(path=fields)), '19990101', storefreq = 'daily', leadtimemax = 1, output = 'lonlat', nprocs=1) +# in the case of S4, we also interpolate it (notice that if the S4 grid has the same grid of the chosen grid (typically ERA-Interim), Load() leaves the S4 grid unchanged): +if(fields.name == ECMWF_S4.name) domain <- Load(var = "psl", exp = list(list(path=fields)), sdates=paste0(year.start,'0101'), storefreq = 'daily', dimnames=list(member=member.name), leadtimemax=1, nmember=1, output = 'lonlat', grid=my.grid, method='bilinear', nprocs=1) + +#if(fields.name == ECMWF_monthly.name) domain <- Load(var = psl, exp = NULL, obs = list(list(path=fields)), paste0(year.start,'0102'), storefreq = 'daily', leadtimemax = 1, output = 'lonlat', nprocs=1) +n.lat <- length(domain$lat) +n.lon <- length(domain$lon) + +pos.lat.max <- which(lat.max - round(domain$lat,4) >= 0)[1] # select the first latitude of the domain below the maximum latitude +pos.lat.min <- tail(which(round(domain$lat,4) - lat.min >= 0),1) # select the last latitude of the domain over the minumum latitude +pos.lon.max <- tail(which(lon.max - round(domain$lon,4) >= 0),1) +pos.lon.min <- which(round(domain$lon,4) - lon.min >= 0)[1] + +pos.lat <- if(pos.lat.min < pos.lat.max) {pos.lat.min:pos.lat.max} else {pos.lat.max:pos.lat.min} +pos.lon <- c(1:pos.lon.max,pos.lon.min:length(domain$lon)) # beware that it only consider the case where the study area cross the meridian of Greenwhich! +n.pos.lat <- length(pos.lat) +n.pos.lon <- length(pos.lon) + +lat <- domain$lat[pos.lat] # lat values of chosen area only (it is smaller than the whole spatial domain loaded by the data) +lon <- domain$lon[pos.lon] # lon values of chosen area only + +#lat.min.area <- domain$lat[pos.lat.min] # min and max lat/lon of the chosen area only (same as lat.max, but its values correspond to actual values in the lat vector) +#lat.max.area <- domain$lat[pos.lat.max] +#lon.min.area <- domain$lon[pos.lon.min] +#lon.max.area <- domain$lon[pos.lon.max] + +#rm(domain) + +# Load psl data: +if(fields.name == rean.name){ # Load daily psl data in the reanalysis case: + psleuFull <-array(NA,c(n.days.in.a.yearly.period(year.start,year.end), n.pos.lat, n.pos.lon)) + + for (y in year.start:year.end){ + var <- Load(var = psl, exp = NULL, obs = list(list(path=fields)), paste0(y,'0101'), storefreq = 'daily', leadtimemax = n.days.in.a.year(y), output = 'lonlat', + latmin = lat.min, latmax = lat.max, lonmin = lon.min, lonmax = lon.max) + psleuFull[seq.days.in.a.future.year(year.start, y),,] <- var$obs + rm(var) + gc() + } +} + +if(fields.name == ECMWF_S4.name) # in this case, we can load only the data for 1 month at time (since each month needs ~3 GB of memory) + + start.month <- 1 # start month (1=January, 12=December) + n.members <- 15 # number of members of the forecast system to use + + if(start.month <= 9) {start.month.char <- paste0("0",as.character(start.month))} else {start.month.char=start.month} + psleuFull <- Load(var = "psl", exp = list(list(path=fields)), obs = NULL, sdates=paste0(year.start:year.end, start.month.char,'01'), dimnames=list(member=member.name), nmember=n.members, output = 'lonlat', latmin = lat.min, latmax = lat.max, lonmin = lon.min, lonmax = lon.max, grid=my.grid, method='bilinear') + + psleuFull <- Load(var = "psl", exp = list(list(path=fields)), obs = NULL, sdates=paste0(year.start, start.month.char,'01'), dimnames=list(member=member.name), nmember=n.members, leadtimemax=216, storefreq='daily', output = 'lonlat', latmin = lat.min, latmax = lat.max, lonmin = lon.min, lonmax = lon.max) + + ## # alternative method to load only 1 lead month: + ## start.month <- 1 # start date (1=January, 12=December) + ## n.members <- 15 # number of members of the forecast system to use + ## lead.month <- 1 # lead time in months (0= same as start date, 1= the month following the start date month, etc.) + ## + ## if(start.month <= 9) {start.month.char <- paste0("0",as.character(start.month))} else {start.month.char=start.month} + ## chosen.month <- start.month + lead.month # find which month we want to load data + ## if(chosen.month > 12) chosen.month <- chosen.month - 12 + ## leadtime.min <- 1 + n.days.in.a.monthly.period(start.month, chosen.month, year.start) - n.days.in.a.month(chosen.month, year.start) + ## leadtime.max <- leadtime.min + n.days.in.a.month(chosen.month, year.start) - 1 + ## n.leadtimes <- leadtime.max - leadtime.min + 1 + ## print(paste("First leadtime:",leadtime.min, " Last leadtime:", leadtime.max, " Num.leadtimes:",n.leadtimes)) + ## psleuFull <- Load(var = "psl", exp = list(list(path=fields)), obs = NULL, sdates=paste0(year.start:year.end, start.month.char,'01'), storefreq = 'daily', dimnames=list(member=member.name), nmember=n.members, leadtimemin=leadtime.min, leadtimemax=leadtime.max, output = 'lonlat', latmin = lat.min, latmax = lat.max, lonmin = lon.min, lonmax = lon.max) + + + ## psleuFull <- array(NA,c(1, n.members, n.years, n.leadtimes, n.pos.lat, n.pos.lon)) + ## for (y in year.start:year.end){ + ## # compute the leadtimes min and max for the month to be loaded, to take into account if it belongs to a bisestile year: + ## leadtime.min <- 1 + n.days.in.a.monthly.period(start.month, chosen.month, year.start) - n.days.in.a.month(chosen.month, year.start) + ## leadtime.max <- leadtime.min + n.days.in.a.month(chosen.month, year.start) - 1 + ## n.leadtimes <- leadtime.max - leadtime.min + 1 + ## print(paste("First leadtime:",leadtime.min, " Last leadtime:", leadtime.max, " Num.leadtimes:",n.leadtimes)) + ## temp <- Load(var = "psl", exp = list(list(path=fields)), obs = NULL, sdates=paste0(y, start.month.char,'01'), storefreq = 'daily', dimnames=list(member=member.name), nmember=n.members, leadtimemin=leadtime.min, leadtimemax=leadtime.max, output = 'lonlat', latmin = lat.min, latmax = lat.max, lonmin = lon.min, lonmax = lon.max) + ## pselFull <- abind(psleuFull, temp$mod, along=3) + ## } + + + # immediatly convert psl in daily anomalies to remove the drift!!! + pslPeriodClim <- apply(psleuFull$mod, c(1,4,5,6), mean, na.rm=T) + pslPeriodClim2 <- InsertDim(InsertDim(pslPeriodClim, 2, n.years), 2, n.members) + rm(pslPeriodClim) + gc() + + psleuFull <- psleuFull$mod - pslPeriodClim2 + rm(pslPeriodClim2) + gc() +} + +if(fields.name == ECMWF_monthly.name){ + # Load 6-hourly psl data in the forecast case: + psleuFull <-array(NA, c(n.sdates*n.years, n.leadtimes, n.pos.lat, n.pos.lon)) # daily order: 19940102 19950102 ... 20130102 19940109 19950109 ... 20130109 ... + n.leadtimes <- length(leadtime) + sdates <- weekly.seq(forecast.year,mes,day) + n.sdates <- length(sdates) + + for (startdate in 1:n.sdates){ + for(lday in leadtime){ + var <- Load(var = psl, exp = NULL, obs = list(list(path=fields), paste0(year.start:year.end, substr(sdates[startdate],5,6), substr(sdates[startdate],7,8) ), storefreq = 'daily', leadtimemin = lday*4-3, leadtimemax = lday*4, output = 'lonlat', latmin = lat.min, latmax = lat.max, lonmin = lon.min, lonmax = lon.max) + + mean.lday <- apply(var$obs, c(3,5,6), mean, na.rm=T) + rm(var) + gc() + + psleuFull[(1+(startdate-1)*n.years):(startdate*n.years), lday-leadtime[1]+1,,] <- mean.lday + rm(mean.lday) + gc() + } + } +} + +if(psl=="g500") psleuFull <- psleuFull/9.81 # convert geopotential to geopotential height (in m) +if(psl=="psl") psleuFull <- psleuFull/100 # convert MSLP in Pascal to MSLP in hPa +gc() + +#save.image(file=paste0(workdir,"/weather_regimes_",fields.name,"_",year.start,"-",year.end,".RData")) +#load(file=paste0(workdir,"/weather_regimes_",fields.name,"_",year.start,"-",year.end,".RData")) + +# compute the cluster analysis: +my.PCA <- list() +my.cluster <- list() +tot.variance <- list() +n.pcs <- my.cluster <- c() + +WR.period <- 1 #1:12 #13:16 # 1-12: Jan-Dec; 13-16: Winter-Spring-Summer-Autumn + +for(p in WR.period){ + # Select only the data of the month/season we want: + if(fields.name == rean.name){ pslPeriod <- psleuFull[days.period[[p]],,] # select only days in the chosen period (i.e: winter) + + # in this case of S4 we don't need to select anything because we already loaded only 1 month of data! + + if(fields.name == ECMWF_monthly.name){ + my.startdates.period <- months.period(forecast.year,mes,day,p) # get which startdates belong to the chosen period + + days.period <- list() # get which days inside psleuFull belong to the chosen period + for(startdate in my.startdates.period){ + days.period[[p]] <- c(days.period[[p]], (1+(startdate-1)*n.years):(startdate*n.years)) + } + + # in winter you must remove the 2nd startdate (20140109) because its data is bad, as you can see with: plot(pslmat[,1:2]) + # if(fields.name == forecast.name && period == 13) pslmat[21:40,] <- NA + } + + # weight the pressure fields based on latitude (apply it to anomalies, not to absolute values!): + if(lat.weighting){ + lat.weighted <- cos(lat*pi/180)^0.5 + lat.weighted.array <- InsertDim(InsertDim(lat.weighted,2,n.pos.lon), 1, n.days.period[[p]]) + pslmat <- pslPeriod * lat.weighted.array + rm(lat.weighted.array) + gc() + } + + # order psleuFull to use it as input of the cluster analysis: + if(fields.name == rean.name){ + pslmat <- pslPeriod + dim(pslmat) <- c(n.days.period[[p]], n.pos.lat*n.pos.lon) # convert array in a matrix! + rm(pslPeriod, pslPeriod.weighted) + } + + if(fields.name == ECMWF_S4.name){ + #pslmat <- melt(psleuFull[1,,,,,]) + #temp <- acast(pslmat, Var2+Var3~Var4+Var5+Var1) + pslmat <- unname(acast(melt(psleuFull[1,,,,,]), Var2 + Var3 ~ Var1 + Var4 + Var5)) # Var1: ensemb.members, var2: start years, var3: lead months, var4: lat, var5: lon + # note that the data order in pslmat[,X] is: year1day1, year1day2, ... , year1day31, year2day1, year2day2, ... , year2day28 + gc() + + # not working properly, probably there is a bug: + # pslmat <- psleuFull + # dim(pslmat) <- c(1, n.leadtimes*n.years, n.members*n.pos.lat*n.pos.lon) # convert array in a matrix! Beware that it always put the years before the leadtimes! + # i.e: dim(pslmat) <- c(1, 3*33, 15*n.pos.lat*n.pos.lon) + } + + # compute the PCs or not: + if(PCA){ + my.seq <- seq(1, n.pos.lat*n.pos.lon, 9) # select only 1 point of 9 + pslcut <- pslmat[,my.seq] + + my.PCA[[p]] <- princomp(pslcut,cor=FALSE) + tot.variance[[p]] <- head(cumsum(my.PCA[[p]]$sdev^2/sum(my.PCA[[p]]$sdev^2)),50) # check how many PCAs to keep basing on the sum of their expl. variance + n.pcs[p] <- head(as.numeric(which(tot.variance[[p]] > variance.explained)),1) # select only the pcs that explains at least 80% of variance + my.cluster[[p]] <- kmeans(my.PCA[[p]]$scores[,1:n.pcs[p]], centers=4, iter.max=100, nstart=30) # 4 is the number of clusters + rm(pslcut) + } else { # 4 is the number of clusters: + if(fields.name == rean.name) my.cluster[[p]] <- kmeans(pslmat[which(!is.na(pslmat[,1])),], centers=4, iter.max=100, nstart=30) + + # check it with: matrix(my.cluster[[p]]$cluster, n.leadtimes, n.years) + if(fields.name == ECMWF_S4.name) my.cluster[[p]] <- kmeans(pslmat, centers=4, iter.max=100, nstart=30, trace=TRUE) + + } + + rm(pslmat) + gc() +} + +#save.image(file=paste0(workdir,"/weather_regimes_",fields.name,"_",year.start,"-",year.end,".RData")) +#load(file=paste0(workdir,"/weather_regimes_",fields.name,"_",year.start,"-",year.end,".RData")) + +var.num <- 1 # Choose a variable. 1: sfcWind 2: tas + +var.name <- c("sfcWind","tas") # name of the 'predictand' variable of the chosen reanalysis +var.name.full <- c("Wind Speed","Temperature") # full name of the 'predictand' variable to put in the title of the graphs +var.unit <- c("m/s", "ºC") # unit of measure of var (for drawing color scales) + +# Load var data: +if(fields.name == rean.name){ # Load daily var data in the reanalysis case: + vareuFull <-array(NA,c(n.days.in.a.yearly.period(year.start,year.end), n.pos.lat, n.pos.lon)) + + for (y in year.start:year.end){ + var <- Load(var = var.name[var.num], exp = NULL, obs = list(var.data), paste0(y,'0101'), storefreq = 'daily', leadtimemax = n.days.in.a.year(y), output = 'lonlat', + latmin = lat.min, latmax = lat.max, lonmin = lon.min, lonmax = lon.max) + vareuFull[seq.days.in.a.future.year(year.start, y),,] <- var$obs + rm(var) + gc() + } + +# in the S4 case, we can load only the data for 1 month at time (since each month needs ~3 GB of memory) +# but at present we ONLY use psl data!!! +#if(fields.name == ECMWF_S4.name) { +# vareuFull <- Load(var = var.name[var.num], exp = list(fields), obs = NULL, sdates=paste0(year.start:year.end, start.month.char,'01'), storefreq = 'daily', dimnames=list(member="number"), nmember=n.members, leadtimemin=leadtime.min, leadtimemax=leadtime.max, output = 'lonlat', latmin = lat.min, latmax = lat.max, lonmin = lon.min, lonmax = lon.max) +#} + +if(fields.name == ECMWF_monthly.name){ # Load 6-hourly var data in the monthly forecast case: + sdates <- weekly.seq(forecast.year,mes,day) + vareuFull <-array(NA,c(length(sdates)*(year.end-year.start+1), n.pos.lat, n.pos.lon)) + + for (startdate in 1:length(sdates)){ + var <- Load(var = var.name[var.num], exp = NULL, obs = list(var.data), paste0(year.start:year.end, substr(sdates[startdate],5,6), substr(sdates[startdate],7,8) ), storefreq = 'daily', leadtimemin=leadtime*4-3, leadtimemax=leadtime*4, output = 'lonlat', latmin = lat.min, latmax = lat.max, lonmin = lon.min, lonmax = lon.max) + temp <- apply(var$obs, c(1,3,5,6), mean, na.rm=T) + vareuFull[(1+(startdate-1)*(year.end-year.start+1)):(startdate*(year.end-year.start+1)),,] <- temp + rm(temp,var) + gc() + } + +} + +#my.grid<-paste0('r',n.lon,'x',n.lat) +#coord <- Load(var = 'sfcWind', exp = list(ECMWF_monthly), obs = NULL, sdates=paste0(2013,'0102'), leadtimemin = 1, leadtimemax=1, output = 'lonlat', nprocs=1, grid=my.grid, method='bilinear') + +save.image(file=paste0(workdir,"/weather_regimes_",fields.name,"_",var.name[var.num],"_",year.start,"-",year.end,".RData")) +load(file=paste0(workdir,"/weather_regimes_",fields.name,"_",var.name[var.num],"_",year.start,"-",year.end,".RData")) + +## # if you want to study WRs at monhly level but using their seasonal time series, you have to copy the WRs time series to the months from 1 to 12: +## if(WR.period <- 1:12){ +## my.cluster[[1]] <- my.cluster[[2]] <- my.cluster[[3]] <- my.cluster[[4]] <- my.cluster[[5]] <- my.cluster[[6]] <- list() +## my.cluster[[7]] <- my.cluster[[8]] <- my.cluster[[9]] <- my.cluster[[10]] <- my.cluster[[11]] <- my.cluster[[12]] <- list() + +## ss <- match(days.period[[13]],days.period[[1]]); ss[which(!is.na(ss))] <- 1; qq <- ss*my.cluster[[13]]$cluster; my.cluster[[1]]$cluster <- qq[!is.na(qq)] +## ss <- match(days.period[[13]],days.period[[2]]); ss[which(!is.na(ss))] <- 1; qq <- ss*my.cluster[[13]]$cluster; my.cluster[[2]]$cluster <- qq[!is.na(qq)] +## ss <- match(days.period[[13]],days.period[[12]]); ss[which(!is.na(ss))] <- 1; qq <- ss*my.cluster[[13]]$cluster; my.cluster[[12]]$cluster <- qq[!is.na(qq)] +## ss <- match(days.period[[14]],days.period[[3]]); ss[which(!is.na(ss))] <- 1; qq <- ss*my.cluster[[14]]$cluster; my.cluster[[3]]$cluster <- qq[!is.na(qq)] +## ss <- match(days.period[[14]],days.period[[4]]); ss[which(!is.na(ss))] <- 1; qq <- ss*my.cluster[[14]]$cluster; my.cluster[[4]]$cluster <- qq[!is.na(qq)] +## ss <- match(days.period[[14]],days.period[[5]]); ss[which(!is.na(ss))] <- 1; qq <- ss*my.cluster[[14]]$cluster; my.cluster[[5]]$cluster <- qq[!is.na(qq)] +## ss <- match(days.period[[15]],days.period[[6]]); ss[which(!is.na(ss))] <- 1; qq <- ss*my.cluster[[15]]$cluster; my.cluster[[6]]$cluster <- qq[!is.na(qq)] +## ss <- match(days.period[[15]],days.period[[7]]); ss[which(!is.na(ss))] <- 1; qq <- ss*my.cluster[[15]]$cluster; my.cluster[[7]]$cluster <- qq[!is.na(qq)] +## ss <- match(days.period[[15]],days.period[[8]]); ss[which(!is.na(ss))] <- 1; qq <- ss*my.cluster[[15]]$cluster; my.cluster[[8]]$cluster <- qq[!is.na(qq)] +## ss <- match(days.period[[16]],days.period[[9]]); ss[which(!is.na(ss))] <- 1; qq <- ss*my.cluster[[16]]$cluster; my.cluster[[9]]$cluster <- qq[!is.na(qq)] +## ss <- match(days.period[[16]],days.period[[10]]); ss[which(!is.na(ss))] <- 1; qq <- ss*my.cluster[[16]]$cluster; my.cluster[[10]]$cluster <- qq[!is.na(qq)] +## ss <- match(days.period[[16]],days.period[[11]]); ss[which(!is.na(ss))] <- 1; qq <- ss*my.cluster[[16]]$cluster; my.cluster[[11]]$cluster <- qq[!is.na(qq)] +## } + +for(p in WR.period){ # 1-12: Jan-Dec; 13-16: Winter-Spring-Summer-Autumn; 17: Year + if(fields.name == ECMWF_monthly.name){ + my.startdates.period <- months.period(forecast.year,mes,day,p) + + #if(p == 13) my.startdates.period <- my.startdates.period[-2] # remove the second startdate because it is broken + + days.period <- period.length <- list() + for(startdate in my.startdates.period){ + days.period[[p]] <- c(days.period[[p]], (1+(startdate-1)*(year.end-year.start+1)):(startdate*(year.end-year.start+1))) + } + period.length[[p]] <- length(my.startdates.period) # number of Thusdays in the period + } + + if(sequences){ # it works only for rean data!!! + # for each of the 4 clusters, remove the days not belonging to sequences of at least 5 days: + # warning: first 4 days and last 4 days are not take into account y the algorithm and are treated as not belonging to any sequence (sequ=FALSE) + wrdiff <- diff(my.cluster[[p]]$cluster) + + sequ <- rep(FALSE, n.days.period[[p]]) + for(i in 5:(n.days.period[[p]]-5)){ + sequ[i] <- TRUE + if(wrdiff[i] != 0 & wrdiff[i+1] != 0) sequ[i] = FALSE + if(wrdiff[i] != 0 & wrdiff[i+2] != 0) sequ[i] = FALSE + if(wrdiff[i] != 0 & wrdiff[i+3] != 0) sequ[i] = FALSE + if(wrdiff[i] != 0 & wrdiff[i+4] != 0) sequ[i] = FALSE + if(wrdiff[i-1] != 0 & wrdiff[i] != 0) sequ[i] = FALSE + if(wrdiff[i-1] != 0 & wrdiff[i+1] != 0) sequ[i] = FALSE + if(wrdiff[i-1] != 0 & wrdiff[i+2] != 0) sequ[i] = FALSE + if(wrdiff[i-1] != 0 & wrdiff[i+3] != 0) sequ[i] = FALSE + if(wrdiff[i-2] != 0 & wrdiff[i] != 0) sequ[i] = FALSE + if(wrdiff[i-2] != 0 & wrdiff[i+1] != 0) sequ[i] = FALSE + if(wrdiff[i-2] != 0 & wrdiff[i+2] != 0) sequ[i] = FALSE + if(wrdiff[i-3] != 0 & wrdiff[i] != 0) sequ[i] = FALSE + if(wrdiff[i-3] != 0 & wrdiff[i+1] != 0) sequ[i] = FALSE + if(wrdiff[i-4] != 0 & wrdiff[i] != 0) sequ[i] = FALSE + } # close for loop on i + + # sequence of daily cluster numbers without the days that doesn't belong to sequences of 5 or more days: + cluster.sequence <- my.cluster[[p]]$cluster * sequ + rm(wrdiff, sequ) + } + + gc() + + cluster.sequence <- my.cluster[[p]]$cluster + + # Replace the days belonging to a regime with the days belonging to a sequence of at least 5 days of that regime: + wr1 <- which(cluster.sequence == 1) + wr2 <- which(cluster.sequence == 2) + wr3 <- which(cluster.sequence == 3) + wr4 <- which(cluster.sequence == 4) + + # yearly frequency of each regime: + wr1y <- wr2y <- wr3y <-wr4y <- c() + for(y in year.start:year.end){ + # for both reanalysis and S4, the time order is always: year1day1, year1day2, ..., year1day31, year2day1, year2day2, ..., year2day28, etc, + if(fields.name == rean.name || fields.name == ECMWF_S4.name){ + wr1y[y-year.start+1] <- length(which(cluster.sequence[(y-year.start)*period.length[[p]]+(1:period.length[[p]])] == 1)) + wr2y[y-year.start+1] <- length(which(cluster.sequence[(y-year.start)*period.length[[p]]+(1:period.length[[p]])] == 2)) + wr3y[y-year.start+1] <- length(which(cluster.sequence[(y-year.start)*period.length[[p]]+(1:period.length[[p]])] == 3)) + wr4y[y-year.start+1] <- length(which(cluster.sequence[(y-year.start)*period.length[[p]]+(1:period.length[[p]])] == 4)) + } else { # for both S4 and the monthly system, the time order is always: year1day1, year2day1, ... , yearNday1, year1day2, year2day2, ..., yearNday2, etc. + wr1y[y-year.start+1] <- length(which(cluster.sequence[seq((y-year.start+1),length(cluster.sequence), n.years)] == 1)) + wr2y[y-year.start+1] <- length(which(cluster.sequence[seq((y-year.start+1),length(cluster.sequence), n.years)] == 2)) + wr3y[y-year.start+1] <- length(which(cluster.sequence[seq((y-year.start+1),length(cluster.sequence), n.years)] == 3)) + wr4y[y-year.start+1] <- length(which(cluster.sequence[seq((y-year.start+1),length(cluster.sequence), n.years)] == 4)) + } + + } + + #rm(cluster.sequence) + #gc() + + # convert to frequencies in %: + wr1y <- wr1y/period.length[[p]] + wr2y <- wr2y/period.length[[p]] + wr3y <- wr3y/period.length[[p]] + wr4y <- wr4y/period.length[[p]] + + if(fields.name == rean.name){ + pslPeriod <- psleuFull[days.period[[p]],,] + pslPeriodClim <- apply(pslPeriod, c(2,3), mean, na.rm=T) + pslPeriodClim2 <- InsertDim(pslPeriodClim, 1, n.days.period[[p]]) + pslPeriodAnom <- pslPeriod - pslPeriodClim2 + + rm(pslPeriod, pslPeriodClim, pslPeriodClim2) + gc() + + pslwr1 <- pslPeriodAnom[wr1,,] + pslwr2 <- pslPeriodAnom[wr2,,] + pslwr3 <- pslPeriodAnom[wr3,,] + pslwr4 <- pslPeriodAnom[wr4,,] + rm(pslPeriodAnom) + gc() + + pslwr1mean <- apply(pslwr1,c(2,3),mean,na.rm=T) + pslwr2mean <- apply(pslwr2,c(2,3),mean,na.rm=T) + pslwr3mean <- apply(pslwr3,c(2,3),mean,na.rm=T) + pslwr4mean <- apply(pslwr4,c(2,3),mean,na.rm=T) + rm(pslwr1,pslwr2,pslwr3,pslwr4) + } + + if(fields.name == ECMWF_S4.name){ + # in this case, psleuFull is already in anomalies: + pslmat <- unname(acast(melt(psleuFull[1,,,,,]), Var1 ~ Var2 + Var3 ~ Var4 ~ Var5)) # Var1: ensemb.members, var2: start years, var3: lead months, var4: lat, var5: lon + #pslmat <- psleuFull + #dim(pslmat) <- c(1, n.members, n.leadtimes*n.years, n.pos.lat, n.pos.lon) # not working + pslwr1 <- pslmat[,wr1,,] + pslwr2 <- pslmat[,wr2,,] + pslwr3 <- pslmat[,wr3,,] + pslwr4 <- pslmat[,wr4,,] + rm(pslmat) + gc() + + pslwr1mean <- apply(pslwr1,c(3,4),mean,na.rm=T) + pslwr2mean <- apply(pslwr2,c(3,4),mean,na.rm=T) + pslwr3mean <- apply(pslwr3,c(3,4),mean,na.rm=T) + pslwr4mean <- apply(pslwr4,c(3,4),mean,na.rm=T) + rm(pslwr1,pslwr2,pslwr3,pslwr4) + gc() + } + + + if(fields.name == rean.name){ + # similar selection of above, but for var instead of psl: + varPeriod <- vareuFull[days.period[[p]],,] # select only var data during the chosen period + + varPeriodClim <- apply(varPeriod, c(2,3), mean, na.rm=T) + varPeriodClim2 <- InsertDim(varPeriodClim, 1, n.days.period[[p]]) + varPeriodAnom <- varPeriod - varPeriodClim2 + rm(varPeriod, varPeriodClim, varPeriodClim2) + gc() + + #PlotEquiMap2(varPeriodClim[,EU], lon[EU], lat, filled.continents = FALSE, cols=my.cols.var, intxlon=10, intylat=10, cex.lab=1.5) # plot the climatology of the period + varPeriodAnom1 <- varPeriodAnom[wr1,,] + varPeriodAnom2 <- varPeriodAnom[wr2,,] + varPeriodAnom3 <- varPeriodAnom[wr3,,] + varPeriodAnom4 <- varPeriodAnom[wr4,,] + + varPeriodAnom1mean <- apply(varPeriodAnom1,c(2,3),mean,na.rm=T) + varPeriodAnom2mean <- apply(varPeriodAnom2,c(2,3),mean,na.rm=T) + varPeriodAnom3mean <- apply(varPeriodAnom3,c(2,3),mean,na.rm=T) + varPeriodAnom4mean <- apply(varPeriodAnom4,c(2,3),mean,na.rm=T) + + varPeriodAnomBoth1 <- abind(varPeriodAnom, varPeriodAnom1, along = 1) + pvalue1 <- apply(varPeriodAnomBoth1, c(2,3), function(x) t.test(x[1:n.days.period[[p]]],x[(n.days.period[[p]]+1):length(x)])$p.value) + rm(varPeriodAnomBoth1, varPeriodAnom1) + gc() + + varPeriodAnomBoth2 <- abind(varPeriodAnom, varPeriodAnom2, along = 1) + pvalue2 <- apply(varPeriodAnomBoth2, c(2,3), function(x) t.test(x[1:n.days.period[[p]]],x[(n.days.period[[p]]+1):length(x)])$p.value) + rm(varPeriodAnomBoth2, varPeriodAnom2) + gc() + + varPeriodAnomBoth3 <- abind(varPeriodAnom, varPeriodAnom3, along = 1) + pvalue3 <- apply(varPeriodAnomBoth3, c(2,3), function(x) t.test(x[1:n.days.period[[p]]],x[(n.days.period[[p]]+1):length(x)])$p.value) + rm(varPeriodAnomBoth3, varPeriodAnom3) + gc() + + varPeriodAnomBoth4 <- abind(varPeriodAnom, varPeriodAnom4, along = 1) + pvalue4 <- apply(varPeriodAnomBoth4, c(2,3), function(x) t.test(x[1:n.days.period[[p]]],x[(n.days.period[[p]]+1):length(x)])$p.value) + rm(varPeriodAnomBoth4, varPeriodAnom4) + + rm(varPeriodAnom) + gc() + + # save all the data necessary to redraw the graphs when we know the right regime: + save(workdir,rean.name,fields.name,var.num,var.name,var.name.full,var.unit,year.start,year.end,p,WR.period,lon,lat,pslwr1mean,pslwr2mean,pslwr3mean,pslwr4mean, varPeriodAnom1mean, varPeriodAnom2mean, varPeriodAnom3mean,varPeriodAnom4mean,wr1y,wr2y,wr3y,wr4y,pvalue1,pvalue2,pvalue3,pvalue4,file=paste0(workdir,"/",fields.name,"_",var.name[var.num],"_",my.period[p],"_mapdata.RData")) + + rm(pvalue1,pvalue2,pvalue3,pvalue4) + rm(pslwr1mean,pslwr2mean,pslwr3mean,pslwr4mean) + rm(varPeriodAnom1mean,varPeriodAnom2mean,varPeriodAnom3mean,varPeriodAnom4mean) + gc() + } + + if(fields.name == ECMWF_S4.name){ # save all the data necessary to draw the graphs (but not the impact maps) + save(workdir,rean.name,fields.name,var.num,var.name,var.name.full,var.unit,year.start,year.end,p,WR.period,lon,lat,pslwr1mean,pslwr2mean,pslwr3mean,pslwr4mean,wr1y,wr2y,wr3y,wr4y,file=paste0(workdir,"/",fields.name,"_",var.name[var.num],"_",my.period[p],"_mapdata.RData")) + rm(pslwr1mean,pslwr2mean,pslwr3mean,pslwr4mean) + gc() + } +} # close the for loop on 'p' + + +# run it twice: once, with ordering <- FALSE to look only at the cartography and find visually the right regime names of the four clusters; +# and the second time, to save the ordered cartography: +ordering <- TRUE # If TRUE, order the clusters following the regimes listed in the 'orden' vector (after you manually insert the names). +save.names <- TRUE # if TRUE, also save the cluster names (i.e: the association between clusters and weather regimes); if FALSE, the cluster names are loaded from the file +as.pdf <- FALSE # FALSE: save results as one .png file for each season, TRUE: save only 1 .pdf file with all seasons + +# position of long values of Europe only (without the Atlantic Sea and America): +#if(fields.name == "ERA-Interim") EU <- c(1:which(lon >= lon.max)[1], (length(lon)-30):length(lon)) # restrict area to continental europe only +EU <- c(1:which(lon >= lon.max)[1], (which(lon >= 337)[1]:length(lon))) # restrict area to continental europe only + +# choose an order to give to the cartography, from top to bottom: +orden <- c("NAO+","NAO-","Blocking","Atl.Ridge") + +regime1.name <- orden[1] +regime2.name <- orden[2] +regime3.name <- orden[3] +regime4.name <- orden[4] + +if(save.names){cluster1.name.period <- cluster2.name.period <- cluster3.name.period <- cluster4.name.period <- c()} + +# breaks and colors of the geopotential fields: +if(psl == "g500"){ + #my.brks <- c(-300,-199:200,300) # % Mean anomaly of a WR + my.brks <- c(-300,seq(-200,200,10),300) # % Mean anomaly of a WR + my.brks2 <- c(-300,seq(-200,200,10),300) # % Mean anomaly of a WR for the contour lines + #my.cols <- colorRampPalette((brewer.pal(9,"PuBu")))(length(my.brks)-1) + values.to.plot <- c(-200, -100, 0, 100, 200) # values we want to show in the labels +} + +if(psl == "psl"){ + my.brks <- c(seq(-19,-1,2),0,seq(1,19,2)) # % Mean anomaly of a WR + # my.brks <- c(seq(-19,-1,2),0,seq(1,19,2)) # % Mean anomaly of a WR + + my.brks2 <- my.brks #c(seq(-20,20,2)) # % Mean anomaly of a WR for the contour lines + values.to.plot <- my.brks #c(-17,-15,-13,-11,-9,-7,-5,-3,-1,0,1,3,5,7,9,11,13,15,17) +} + +my.cols <- colorRampPalette(rev(brewer.pal(11,"RdBu")))(length(my.brks)-1) # blue--white--red colors +my.cols[floor(length(my.cols)/2)] <- my.cols[floor(length(my.cols)/2)+1] <- "white" + +# breaks and colors of the impact maps (valid both for Wind speed anomalies and Temperature anomalies): +#my.brks.var <- c(-20,seq(-10,-3,1),seq(-2.9,2.9,0.1),seq(3,10,1),20) # % Mean anomaly of a WR +#my.brks.var <- c(-20,-2,-1.5,-1,-0.5,-0.2,0.2,0.5,1,1.5,2,20) # % Mean anomaly of a WR +#my.brks.var <- c(-20,seq(-3,3,0.5),20) # % Mean anomaly of a WR +if(var.name[var.num] == "sfcWind") { + my.brks.var <- seq(-3,3,0.5) + values.to.plot2 <- c(-3,-2,-1,0,1,2,3) +} +if(var.name[var.num] == "tas") { + my.brks.var <- seq(-5,5,1) + values.to.plot2 <- my.brks.var #c(-5,-3,-1,0,1,3,5) +} + +#my.cols <- colorRampPalette(as.character(read.csv("/home/Earth/vtorralb/rgbhex.csv",header=F)[,1]))(length(my.brks)-1) # blue--yellow-red colors +my.cols.var <- colorRampPalette(rev(brewer.pal(11,"RdBu")))(length(my.brks.var)-1) # blue--white--red colors +#my.cols.var[floor(length(my.cols.var)/2)] <- my.cols.var[floor(length(my.cols.var)/2)+1] <- "white" + +if(as.pdf)(pdf(file=paste0(workdir,"/",fields.name,"_",var.name[var.num],"_",my.period[p],".pdf"),width=40, height=60)) + +for(p in WR.period){ + load(file=paste0(workdir,"/",fields.name,"_",var.name[var.num],"_",my.period[p],"_mapdata.RData")) + + if(save.names){ + if(p == 1){ # January + cluster3.name="NAO+" + cluster2.name="NAO-" + cluster4.name="Blocking" + cluster1.name="Atl.Ridge" + } + if(p == 2){ # February + cluster3.name="NAO+" + cluster2.name="NAO-" + cluster4.name="Blocking" + cluster1.name="Atl.Ridge" + } + if(p == 3){ # March + cluster3.name="NAO+" + cluster2.name="NAO-" + cluster4.name="Blocking" + cluster1.name="Atl.Ridge" + } + if(p == 4){ # April + cluster2.name="NAO+" + cluster1.name="NAO-" + cluster3.name="Blocking" + cluster4.name="Atl.Ridge" + } + if(p == 5){ # May + cluster4.name="NAO+" + cluster2.name="NAO-" + cluster3.name="Blocking" + cluster1.name="Atl.Ridge" + } + if(p == 6){ # June + cluster4.name="NAO+" + cluster1.name="NAO-" + cluster2.name="Blocking" + cluster3.name="Atl.Ridge" + } + if(p == 7){ # July + cluster4.name="NAO+" + cluster2.name="NAO-" + cluster1.name="Blocking" + cluster3.name="Atl.Ridge" + } + if(p == 8){ # August + cluster2.name="NAO+" + cluster4.name="NAO-" + cluster3.name="Blocking" + cluster1.name="Atl.Ridge" + } + if(p == 9){ # September + cluster4.name="NAO+" + cluster3.name="NAO-" + cluster1.name="Blocking" + cluster2.name="Atl.Ridge" + } + if(p == 10){ # October + cluster1.name="NAO+" + cluster4.name="NAO-" + cluster2.name="Blocking" + cluster3.name="Atl.Ridge" + } + if(p == 11){ # November + cluster2.name="NAO+" + cluster3.name="NAO-" + cluster1.name="Blocking" + cluster4.name="Atl.Ridge" + } + if(p == 12){ # December + cluster2.name="NAO+" + cluster4.name="NAO-" + cluster1.name="Blocking" + cluster3.name="Atl.Ridge" + } + if(p == 13){ # Winter + cluster3.name="NAO+" + cluster4.name="NAO-" + cluster1.name="Blocking" + cluster2.name="Atl.Ridge" + } + if(p == 14){ # Spring + cluster1.name="NAO+" + cluster3.name="NAO-" + cluster2.name="Blocking" + cluster4.name="Atl.Ridge" + } + if(p == 15){ # Summer + cluster2.name="NAO+" + cluster3.name="NAO-" + cluster4.name="Blocking" + cluster1.name="Atl.Ridge" + } + if(p == 16){ # Autumn + cluster4.name="NAO+" + cluster1.name="NAO-" + cluster2.name="Blocking" + cluster3.name="Atl.Ridge" + } + + cluster1.name.period[p] <- cluster1.name + cluster2.name.period[p] <- cluster2.name + cluster3.name.period[p] <- cluster3.name + cluster4.name.period[p] <- cluster4.name + + save(cluster1.name.period, cluster2.name.period, cluster3.name.period, cluster4.name.period, file=paste0(workdir,"/",fields.name,"_ClusterNames.RData")) + + } else { # in this case, we load the cluster names from the file already saved: + load(paste0(workdir,"/",fields.name,"_ClusterNames.RData")) + cluster1.name <- cluster1.name.period[p] + cluster2.name <- cluster2.name.period[p] + cluster3.name <- cluster3.name.period[p] + cluster4.name <- cluster4.name.period[p] + } + + # assign to each cluster its regime: + if(ordering == FALSE){ + cluster1 <- 1; cluster2 <- 2; cluster3 <- 3; cluster4 <- 4 + } else { + cluster1 <- which(orden == cluster1.name) + cluster2 <- which(orden == cluster2.name) + cluster3 <- which(orden == cluster3.name) + cluster4 <- which(orden == cluster4.name) + } + + assign(paste0("map",cluster1), pslwr1mean) + assign(paste0("map",cluster2), pslwr2mean) + assign(paste0("map",cluster3), pslwr3mean) + assign(paste0("map",cluster4), pslwr4mean) + + if(fields.name == rean.name){ + assign(paste0("imp",cluster1), varPeriodAnom1mean) + assign(paste0("imp",cluster2), varPeriodAnom2mean) + assign(paste0("imp",cluster3), varPeriodAnom3mean) + assign(paste0("imp",cluster4), varPeriodAnom4mean) + + assign(paste0("sig",cluster1), pvalue1) + assign(paste0("sig",cluster2), pvalue2) + assign(paste0("sig",cluster3), pvalue3) + assign(paste0("sig",cluster4), pvalue4) + } + + assign(paste0("fre",cluster1), wr1y) + assign(paste0("fre",cluster2), wr2y) + assign(paste0("fre",cluster3), wr3y) + assign(paste0("fre",cluster4), wr4y) + + # Visualize the composition with the 3 graphs for all the 4 regimes: its pressure fields, its impact on var and its frequency series: + if(!as.pdf) png(filename=paste0(workdir,"/",fields.name,"_",var.name[var.num],"_",my.period[p],".png"),width=3000,height=3700) + + plot.new() + + # Sheet title: + par(fig=c(0, 1, 0.94, 0.98), new=TRUE) + mtext(paste0("Weather Regimes for ",my.period[p]," season (",year.start,"-",year.end,"). Source: ",fields.name), cex=6, font=2) + + # Centroid maps: + map.xpos <- 0 + map.width <- 0.46 + par(fig=c(map.xpos + 0.003, map.xpos + map.width - 0.003, 0.745, 0.925), new=TRUE) + PlotEquiMap2(rescale(map1,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map1), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, xlabel.dist=1.5, contours.lty="F1FF1F") + par(fig=c(map.xpos, map.xpos + map.width, 0.51, 0.69), new=TRUE) + PlotEquiMap2(rescale(map2,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map2), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F") + par(fig=c(map.xpos, map.xpos + map.width, 0.275, 0.455), new=TRUE) + PlotEquiMap2(rescale(map3,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map3), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F") + par(fig=c(map.xpos, map.xpos + map.width, 0.04, 0.22), new=TRUE) + PlotEquiMap2(rescale(map4,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map4), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F") + + # Title Centroid Maps: + map.title.xpos <- 0.76 + map.title.width <- 0.2 + par(fig=c(map.title.xpos, map.title.xpos + map.title.width, 0.728, 0.729), new=TRUE) + mtext("year", cex=3) + par(fig=c(map.title.xpos, map.title.xpos + map.title.width, 0.494, 0.495), new=TRUE) + mtext("year", cex=3) + par(fig=c(map.title.xpos, map.title.xpos + map.title.width, 0.260, 0.261), new=TRUE) + mtext("year", cex=3) + par(fig=c(map.title.xpos, map.title.xpos + map.title.width, 0.026, 0.027), new=TRUE) + mtext("year", cex=3) + + # Impact maps: + if(fields.name == rean.name){ + impact.xpos <- 0.47 + impact.width <- 0.26 + par(fig=c(impact.xpos, impact.xpos + impact.width, 0.745, 0.925), new=TRUE) + PlotEquiMap2(rescale(imp1[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=t(sig1[,EU] < 0.05), cex.lab=1.5) + par(fig=c(impact.xpos, impact.xpos + impact.width, 0.51, 0.69), new=TRUE) + PlotEquiMap2(rescale(imp2[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=t(sig2[,EU] < 0.05), cex.lab=1.5) + par(fig=c(impact.xpos, impact.xpos + impact.width, 0.275, 0.455), new=TRUE) + PlotEquiMap2(rescale(imp3[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=t(sig3[,EU] < 0.05), cex.lab=1.5) + par(fig=c(impact.xpos, impact.xpos + impact.width, 0.04, 0.22), new=TRUE) + PlotEquiMap2(rescale(imp4[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=t(sig4[,EU] < 0.05), cex.lab=1.5) + } + + # Frequency plots: + barplot.xpos <- 0.75 + barplot.width <- 0.25 + freq.max=100 + par(fig=c(barplot.xpos, barplot.xpos + barplot.width, 0.745, 0.915), new=TRUE) + barplot.freq(100*fre1, year.start, year.end, cex.y=2, cex.x=2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0)) + par(fig=c(barplot.xpos, barplot.xpos + barplot.width,0.510,0.680), new=TRUE) + barplot.freq(100*fre2, year.start, year.end, cex.y=2, cex.x=2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0)) + par(fig=c(barplot.xpos, barplot.xpos + barplot.width,0.275,0.445), new=TRUE) + barplot.freq(100*fre3, year.start, year.end, cex.y=2, cex.x=2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0)) + par(fig=c(barplot.xpos, barplot.xpos + barplot.width,0.040,0.210), new=TRUE) + barplot.freq(100*fre4, year.start, year.end, cex.y=2, cex.x=2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0)) + + # % on Frequency plots: + symbol.xpos <- 0.735 + symbol.width <- 0.01 + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.83, 0.84), new=TRUE) + mtext("%", cex=3.3) + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.60, 0.61), new=TRUE) + mtext("%", cex=3.3) + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.36, 0.37), new=TRUE) + mtext("%", cex=3.3) + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.13, 0.14), new=TRUE) + mtext("%", cex=3.3) + + # Title 1: + title1.xpos <- 0.02 + title1.width <- 0.4 + par(fig=c(title1.xpos, title1.xpos + title1.width, 0.930, 0.935), new=TRUE) + mtext(paste0(regime1.name,": ", psl.name, " Anomaly"), font=2, cex=4) + par(fig=c(title1.xpos, title1.xpos + title1.width, 0.695, 0.700), new=TRUE) + mtext(paste0(regime2.name,": ", psl.name, " Anomaly"), font=2, cex=4) + par(fig=c(title1.xpos, title1.xpos + title1.width, 0.460, 0.465), new=TRUE) + mtext(paste0(regime3.name,": ", psl.name, " Anomaly"), font=2, cex=4) + par(fig=c(title1.xpos, title1.xpos + title1.width, 0.225, 0.230), new=TRUE) + mtext(paste0(regime4.name,": ", psl.name, " Anomaly"), font=2, cex=4) + + # Title 2: + if(fields.name == rean.name){ + title2.xpos <- 0.42 + title2.width <- 0.35 + par(fig=c(title2.xpos, title2.xpos + title2.width, 0.930, 0.935), new=TRUE) + mtext(paste0(regime1.name, ": Impact on ", var.name.full[var.num]), font=2, cex=4) + par(fig=c(title2.xpos, title2.xpos + title2.width, 0.695, 0.700), new=TRUE) + mtext(paste0(regime2.name, ": Impact on ", var.name.full[var.num]), font=2, cex=4) + par(fig=c(title2.xpos, title2.xpos + title2.width, 0.460, 0.465), new=TRUE) + mtext(paste0(regime3.name, ": Impact on ", var.name.full[var.num]), font=2, cex=4) + par(fig=c(title2.xpos, title2.xpos + title2.width, 0.225, 0.230), new=TRUE) + mtext(paste0(regime4.name, ": Impact on ", var.name.full[var.num]), font=2, cex=4) + } + + # Title 3: + title3.xpos <- 0.75 + title3.width <-0.25 + par(fig=c(title3.xpos, title3.xpos + title3.width, 0.930, 0.935), new=TRUE) + mtext(paste0(regime1.name, ": Frequency (", round(100*mean(fre1),1), "%)"), font=2, cex=4) + par(fig=c(title3.xpos, title3.xpos + title3.width, 0.695, 0.700), new=TRUE) + mtext(paste0(regime2.name, ": Frequency (", round(100*mean(fre2),1), "%)"), font=2, cex=4) + par(fig=c(title3.xpos, title3.xpos + title3.width, 0.460, 0.465), new=TRUE) + mtext(paste0(regime3.name, ": Frequency (", round(100*mean(fre3),1), "%)"), font=2, cex=4) + par(fig=c(title3.xpos, title3.xpos + title3.width, 0.225, 0.230), new=TRUE) + mtext(paste0(regime4.name, ": Frequency (", round(100*mean(fre4),1), "%)"), font=2, cex=4) + + # Legend 1: + legend1.xpos <- 0.01 + legend1.width <- 0.44 + legend1.cex <- 1.8 + my.subset <- match(values.to.plot, my.brks) + par(fig=c(legend1.xpos, legend1.xpos + legend1.width, 0.719, 0.744), new=TRUE) # syntax fig=c(xmin, xmax, ymin, ymax) + ColorBar3(my.brks, cols=my.cols, vert=FALSE, cex=legend1.cex, subset=my.subset) + if(psl=="g500") {mtext(side=4," m", cex=2.5)} else {mtext(side=4," hPa", cex=legend1.cex)} + par(fig=c(legend1.xpos, legend1.xpos + legend1.width, 0.485, 0.508), new=TRUE) + ColorBar3(my.brks, cols=my.cols, vert=FALSE, cex=legend1.cex, subset=my.subset) + if(psl=="g500") {mtext(side=4," m", cex=2.5)} else {mtext(side=4," hPa", cex=legend1.cex)} + par(fig=c(legend1.xpos, legend1.xpos + legend1.width, 0.250, 0.273), new=TRUE) + ColorBar3(my.brks, cols=my.cols, vert=FALSE, cex=legend1.cex, subset=my.subset) + if(psl=="g500") {mtext(side=4," m", cex=2.5)} else {mtext(side=4," hPa", cex=legend1.cex)} + par(fig=c(legend1.xpos, legend1.xpos + legend1.width, 0.015, 0.038), new=TRUE) + ColorBar3(my.brks, cols=my.cols, vert=FALSE, cex=legend1.cex, subset=my.subset) + if(psl=="g500") {mtext(side=4," m", cex=2.5)} else {mtext(side=4," hPa", cex=legend1.cex)} + + # Legend 2: + if(fields.name == rean.name){ + legend2.xpos <- 0.48 + legend2.width <- 0.25 + legend2.cex <- 1.8 + my.subset2 <- match(values.to.plot2, my.brks.var) + par(fig=c(legend2.xpos, legend2.xpos + legend2.width, 0.719 + 0.001, 0.744), new=TRUE) + ColorBar3(my.brks.var, cols=my.cols.var, vert=FALSE, cex=legend2.cex, subset=my.subset2) + mtext(side=4,paste0(" ",var.unit[var.num]), cex=legend2.cex) + par(fig=c(legend2.xpos, legend2.xpos + legend2.width, 0.485, 0.508), new=TRUE) + ColorBar3(my.brks.var, cols=my.cols.var, vert=FALSE, cex=legend2.cex, subset=my.subset2) + mtext(side=4,paste0(" ",var.unit[var.num]), cex=legend2.cex) + par(fig=c(legend2.xpos, legend2.xpos + legend2.width, 0.250, 0.273), new=TRUE) + ColorBar3(my.brks.var, cols=my.cols.var, vert=FALSE, cex=legend2.cex, subset=my.subset2) + mtext(side=4,paste0(" ",var.unit[var.num]), cex=legend2.cex) + par(fig=c(legend2.xpos, legend2.xpos + legend2.width, 0.015, 0.038), new=TRUE) + ColorBar3(my.brks.var, cols=my.cols.var, vert=FALSE, cex=legend2.cex, subset=my.subset2) + mtext(side=4,paste0(" ",var.unit[var.num]), cex=legend2.cex) + } + + if(!as.pdf) dev.off() # for saving 4 png + +} # close for loop on 'p' + +if(as.pdf) dev.off() # for saving a single pdf with all seasons + + + + + + + + + + + diff --git a/old/weather_regimes_v26.R b/old/weather_regimes_v26.R new file mode 100644 index 0000000000000000000000000000000000000000..fbdd09e0182cb5005b2b0829b97709ee5eef212c --- /dev/null +++ b/old/weather_regimes_v26.R @@ -0,0 +1,923 @@ +library(s2dverification) # for the function Load() +library(abind) +library(Kendall) +library(reshape2) +source('/home/Earth/ncortesi/Downloads/scripts/Rfunctions.R') # for the calendar functions +workdir <- "/scratch/Earth/ncortesi/RESILIENCE/Regimes" # working dir with the input files with the WT classifications for each MSLP grid point + +# available reanalysis for the geopotential (g500) or the geopotential height (z500 = g500/9.81) and for var data: +ERAint <- '/esnas/reconstructions/ecmwf/eraint/$STORE_FREQ$_mean/$VAR_NAME$_f6h/$VAR_NAME$_$YEAR$$MONTH$.nc' +ERAint.name <- "ERA-Interim" + +JRA55 <- '/esnas/reconstructions/jma/jra-55/$STORE_FREQ$_mean/$VAR_NAME$_f6h/$VAR_NAME$_$YEAR$$MONTH$.nc' +JRA55.name <- "JRA-55" + +rean <- ERAint #JRA55 #ERAint # choose one of the two above reanalysis from where to load the input psl data + +# available forecast systems for the psl and var data: +#ECMWF_S4 <- list(path = paste0('/esnas/exp/ECMWF/seasonal/0001/s004/m001/$STORE_FREQ$_mean/$VAR_NAME$_f6h/$VAR_NAME$_$START_DATE$.nc')) +#ECMWF_S4 <- '/esnas/exp/ECMWF/seasonal/0001/s004/m001/$STORE_FREQ$_mean/psl_f6h/psl_$START_DATE$.nc' +ECMWF_S4 <- '/esnas/exp/ecmwf/system4_m1/$STORE_FREQ$_mean/psl_f6h/psl_$START_DATE$.nc' +ECMWF_S4.name <- "ECMWF-S4" + +ECMWF_monthly <- '/esnas/exp/ECMWF/monthly/ensforhc/6hourly/$VAR_NAME$/2014$MONTH$$DAY$00/$VAR_NAME$_$START_DATE$00.nc' +ECMWF_monthly.name <- "ECMWF-Monthly" + +forecast <- ECMWF_S4 + +fields <- forecast #rean #forecast # put 'rean' to load pressure fields and var from reanalisis, put 'forecast' to load them from forecast systems + +psl <- "psl" #"g500" # pressure variable to use from the chosen reanalysis +psl.name <- "MSLP" # "Z500" # the name to show in the maps + +year.start <- 1982 #1981 #1994 #1979 #1981 +year.end <- 2013 #2013 #2010 + +member.name <- "ensemble" # name of the dimension with the ensemble members inside the netCDF files of S4 + +res <- 0.75 # set the resolution you want to interpolate the psl and var data when loading it (i.e: set to 0.75 to use the same res of ERA-Interim) + # interpolation method is BILINEAR. +PCA <- FALSE # set it to TRUE if you want to apply the PCA before the k-means cluster analysis, FALSE otherwise +variance.explained <- 0.8 # the user can set here the minimum % of variance explained by the PCs (typically 0.8 which is equal to 80%) + +lat.weighting=FALSE # set it to true to weight psl data on latitude before applying the cluster analysis +sequences=FALSE # set it to TRUE if you want to select only days beloning to sequences of at least 5 consecutive days belonging to the same regime. + +# Coordinates of the box enclosing the study region (the Northern Atlantic in this case): +lat.max <- 81 #81 #80 #70 +lat.min <- 27 #30 #20 #30 +lon.max <- 45 #36 #30 #40 +lon.min <- 274.5 #274.5 #270 #280 # put a positive number here because the geopotential has only positive values of longitud! + +# Only for Monthly forecasts: +#leadtime <- 1:7 #5:11 #1 # if fields=forecast, set the forecast time (in days) you want to compute the weather regimes. +#startdate.shift <- 1 # if leadtime=5:11, it's better to shift all startdates of the season 1 week in the past, to fit the exact season of obs.data + # if leadtime=12:18, it's better to shift all startdates of the season 2 weeks in the past, and so on. +#forecast.year <- year.end+1 # if fields=forecast, set the forecast year (it is usually the year after year.end) +#mes=1 # if fields=forecast, set the month of the first startdate of the forecast year +#day=2 # if fields=forecast, set the day of the first startdate of the forecast year + +############################################################################################################################# +#ECMWF_monthly <- list(path = paste0('/esnas/exp/ECMWF/monthly/ensforhc/6hourly/psl/2014010200/1994_010200.nc')) +#ECMWF_monthly <- list(path = paste0('/esnas/exp/ECMWF/monthly/ensforhc/6hourly/$VAR_NAME$/2014$MONTH$$DAY$00/$VAR_NAME$_$START_DATE$00.nc')) +rean.name <- unname(ifelse(rean == ERAint, ERAint.name, JRA55.name)) +forecast.name <- unname(ifelse(forecast == ECMWF_S4, ECMWF_S4.name, ECMWF_monthly.name)) +fields.name <- unname(ifelse(fields == rean, rean.name, forecast.name)) +n.years <- year.end - year.start + 1 + +var.data <- fields # dataset from which to load the input var data (reanalysis or forecasts) +var.data.name <- fields.name #ECMWF_monthly.name # ERAint.name + +if(lat.min >= lat.max) stop("lat.min cannot be greater than lat.max") + +days.period <- n.days.period <- period.length <- list() +for (pp in 1:17){ + days.period[[pp]] <- NA + for(y in year.start:year.end) days.period[[pp]] <- c(days.period[[pp]], n.days.in.a.future.year(year.start, y) + pos.period(y,pp)) + days.period[[pp]] <- days.period[[pp]][-1] # remove the NA at the begin that was introduced only to be able to execute the above command + # number of days belonging to that period from year.start to year.end: + n.days.period[[pp]] <- length(days.period[[pp]]) + # Number of days belonging to that period in a single year: + period.length[[pp]] <- n.days.in.a.period(pp,1999) #+ ifelse(period==13 | period==2, 1, 0) # using year 1999 introduce a small error in winter season length because of bisestile years +} + +# Create a regular lat/lon grid to interpolate the data: +n.lon.grid <- 360/res +n.lat.grid <- (180/res)+1 +my.grid <- paste0('r',n.lon.grid,'x',n.lat.grid) +#domain<-list() +#domain$lon <- seq(0,359.999999999,res) +#domain$lat <- c(rev(seq(0,90,res)),seq(res[1],-90,-res)) # Notice that the regular grid is always centered at lat=0 and lon=0! + +# load only 1 day of pressure data to detect the minimum and maximum lat and lon values which identify only the North Atlantic area: +if(fields.name == rean.name) domain <- Load(var = psl, exp = NULL, obs = list(list(path=fields)), '19990101', storefreq = 'daily', leadtimemax = 1, output = 'lonlat', nprocs=1) +# in the case of S4, we also interpolate it (notice that if the S4 grid has the same grid of the chosen grid (typically ERA-Interim), Load() leaves the S4 grid unchanged): +if(fields.name == ECMWF_S4.name) domain <- Load(var = "psl", exp = list(list(path=fields)), sdates=paste0(year.start,'0101'), storefreq = 'daily', dimnames=list(member=member.name), leadtimemax=1, nmember=1, output = 'lonlat') #, grid=my.grid, method='bilinear', nprocs=1) + +#if(fields.name == ECMWF_monthly.name) domain <- Load(var = psl, exp = NULL, obs = list(list(path=fields)), paste0(year.start,'0102'), storefreq = 'daily', leadtimemax = 1, output = 'lonlat', nprocs=1) + +n.lon <- length(domain$lon) +n.lat <- length(domain$lat) + +pos.lat.max <- which(lat.max - round(domain$lat,4) >= 0)[1] # select the first latitude of the domain below the maximum latitude +pos.lat.min <- tail(which(round(domain$lat,4) - lat.min >= 0),1) # select the last latitude of the domain over the minumum latitude +pos.lon.max <- tail(which(lon.max - round(domain$lon,4) >= 0),1) +pos.lon.min <- which(round(domain$lon,4) - lon.min >= 0)[1] + +pos.lat <- if(pos.lat.min < pos.lat.max) {pos.lat.min:pos.lat.max} else {pos.lat.max:pos.lat.min} +pos.lon <- c(1:pos.lon.max,pos.lon.min:length(domain$lon)) # beware that it only consider the case where the study area cross the meridian of Greenwhich! +n.pos.lat <- length(pos.lat) +n.pos.lon <- length(pos.lon) + +lat <- domain$lat[pos.lat] # lat values of chosen area only (it is smaller than the whole spatial domain loaded by the data) +lon <- domain$lon[pos.lon] # lon values of chosen area only + +#lat.min.area <- domain$lat[pos.lat.min] # min and max lat/lon of the chosen area only (same as lat.max, but its values correspond to actual values in the lat vector) +#lat.max.area <- domain$lat[pos.lat.max] +#lon.min.area <- domain$lon[pos.lon.min] +#lon.max.area <- domain$lon[pos.lon.max] + +#rm(domain) + +# Load psl data: +if(fields.name == rean.name){ # Load daily psl data in the reanalysis case: + psleuFull <-array(NA,c(n.days.in.a.yearly.period(year.start,year.end), n.pos.lat, n.pos.lon)) + + for (y in year.start:year.end){ + var <- Load(var = psl, exp = NULL, obs = list(list(path=fields)), paste0(y,'0101'), storefreq = 'daily', leadtimemax = n.days.in.a.year(y), output = 'lonlat', + latmin = lat.min, latmax = lat.max, lonmin = lon.min, lonmax = lon.max) + psleuFull[seq.days.in.a.future.year(year.start, y),,] <- var$obs + rm(var) + gc() + } +} + +if(fields.name == ECMWF_S4.name) # in this case, we can load only the data for 1 month at time (since each month needs ~3 GB of memory) + start.month <- 1 # start month (1=January, 12=December) + n.members <- 15 # number of members of the forecast system to use + + if(start.month <= 9) {start.month.char <- paste0("0",as.character(start.month))} else {start.month.char=start.month} + + psleuFull <- Load(var = psl, exp = list(list(path=fields)), obs = NULL, sdates=paste0(year.start:year.end, start.month.char,'01'), dimnames=list(member=member.name), nmember=n.members, leadtimemax=216, storefreq='daily', output = 'lonlat', latmin = lat.min, latmax = lat.max, lonmin = lon.min, lonmax = lon.max) #, grid=my.grid, method='bilinear') + + # immediatly convert psl in daily anomalies to remove the drift! + pslPeriodClim <- apply(psleuFull$mod, c(1,4,5,6), mean, na.rm=T) + pslPeriodClim2 <- InsertDim(InsertDim(pslPeriodClim, 2, n.years), 2, n.members) + rm(pslPeriodClim) + gc() + + psleuFull <- psleuFull$mod - pslPeriodClim2 + rm(pslPeriodClim2) + gc() +} + +if(fields.name == ECMWF_monthly.name){ + # Load 6-hourly psl data in the forecast case: + psleuFull <-array(NA, c(n.sdates*n.years, n.leadtimes, n.pos.lat, n.pos.lon)) # daily order: 19940102 19950102 ... 20130102 19940109 19950109 ... 20130109 ... + n.leadtimes <- length(leadtime) + sdates <- weekly.seq(forecast.year,mes,day) + n.sdates <- length(sdates) + + for (startdate in 1:n.sdates){ + for(lday in leadtime){ + var <- Load(var = psl, exp = NULL, obs = list(list(path=fields), paste0(year.start:year.end, substr(sdates[startdate],5,6), substr(sdates[startdate],7,8) ), storefreq = 'daily', leadtimemin = lday*4-3, leadtimemax = lday*4, output = 'lonlat', latmin = lat.min, latmax = lat.max, lonmin = lon.min, lonmax = lon.max) + + mean.lday <- apply(var$obs, c(3,5,6), mean, na.rm=T) + rm(var) + gc() + + psleuFull[(1+(startdate-1)*n.years):(startdate*n.years), lday-leadtime[1]+1,,] <- mean.lday + rm(mean.lday) + gc() + } + } +} + +if(psl=="g500") psleuFull <- psleuFull/9.81 # convert geopotential to geopotential height (in m) +if(psl=="psl") psleuFull <- psleuFull/100 # convert MSLP in Pascal to MSLP in hPa +gc() + +#save.image(file=paste0(workdir,"/weather_regimes_",fields.name,"_",year.start,"-",year.end,".RData")) +#load(file=paste0(workdir,"/weather_regimes_",fields.name,"_",year.start,"-",year.end,".RData")) + +# compute the cluster analysis: +my.PCA <- list() +my.cluster <- list() +my.cluster.matrix <- list() +tot.variance <- list() +n.pcs <- my.cluster <- c() + +WR.period <- 1 #1:12 #13:16 # 1-12: Jan-Dec; 13-16: Winter-Spring-Summer-Autumn +p=1 + +for(p in WR.period){ + # Select only the data of the month/season we want: + if(fields.name == rean.name){ pslPeriod <- psleuFull[days.period[[p]],,] # select only days in the chosen period (i.e: winter) + + # in this case of S4 we don't need to select anything because we already loaded only 1 month of data! + + if(fields.name == ECMWF_monthly.name){ + my.startdates.period <- months.period(forecast.year,mes,day,p) # get which startdates belong to the chosen period + + days.period <- list() # get which days inside psleuFull belong to the chosen period + for(startdate in my.startdates.period){ + days.period[[p]] <- c(days.period[[p]], (1+(startdate-1)*n.years):(startdate*n.years)) + } + + # in winter you must remove the 2nd startdate (20140109) because its data is bad, as you can see with: plot(pslmat[,1:2]) + # if(fields.name == forecast.name && period == 13) pslmat[21:40,] <- NA + } + + # weight the pressure fields based on latitude (apply it to anomalies, not to absolute values!): + if(lat.weighting){ + lat.weighted <- cos(lat*pi/180)^0.5 + lat.weighted.array <- InsertDim(InsertDim(lat.weighted,2,n.pos.lon), 1, n.days.period[[p]]) + pslmat <- pslPeriod * lat.weighted.array + rm(lat.weighted.array) + gc() + } + + # select period data from psleuFull to use it as input of the cluster analysis: + if(fields.name == rean.name){ + pslmat <- pslPeriod + dim(pslmat) <- c(n.days.period[[p]], n.pos.lat*n.pos.lon) # convert array in a matrix! + rm(pslPeriod, pslPeriod.weighted) + } + + if(fields.name == ECMWF_S4.name){ + lead.month=1 + + chosen.month <- start.month + lead.month # find which month we want to load data + if(chosen.month > 12) chosen.month <- chosen.month - 12 + + # select only the data of one lead month: + + for(y in year.start:year.end){ + leadtime.min <- 1 + n.days.in.a.monthly.period(start.month, chosen.month, y) - n.days.in.a.month(chosen.month, y) + leadtime.max <- leadtime.min + n.days.in.a.month(chosen.month, y) - 1 + if (n.days.in.a.month(chosen.month, y) == 29) leadtime.max <- leadtime.max - 1 # remove the 29th of February to have the same n. of elements for all years + int.leadtimes <- leadtime.min:leadtime.max + n.leadtimes <- leadtime.max - leadtime.min + 1 + + if(y == year.start) { + pslPeriod <- psleuFull[,,1,int.leadtimes,,,drop=FALSE] + } else { + pslPeriod <- unname(abind(pslPeriod, psleuFull[,,y-year.start+1,int.leadtimes,,,drop=FALSE], along=3)) + } + } + + pslmat <- unname(acast(melt(pslPeriod[1,,,,,]), Var2 + Var3 ~ Var1 + Var4 + Var5)) # Var1: ensemb.members, var2: start years, var3: lead months, var4: lat, var5: lon + # note that the data order in pslmat[,X] is: year1day1, year1day2, ... , year1day31, year2day1, year2day2, ... , year2day28 + + # Var2: ensemb.members, var3: start years, var4: lead months, var5: lat, var6: lon + pslmat <- unname(acast(melt(pslPeriod[1,,,,,, drop=FALSE]), Var3 + Var4 ~ Var2 + Var5 + Var6)) + + #rm(pslPeriod) + gc() + } + + # compute the PCs or not: + if(PCA){ + my.seq <- seq(1, n.pos.lat*n.pos.lon, 9) # select only 1 point of 9 + pslcut <- pslmat[,my.seq] + + my.PCA[[p]] <- princomp(pslcut,cor=FALSE) + tot.variance[[p]] <- head(cumsum(my.PCA[[p]]$sdev^2/sum(my.PCA[[p]]$sdev^2)),50) # check how many PCAs to keep basing on the sum of their expl. variance + n.pcs[p] <- head(as.numeric(which(tot.variance[[p]] > variance.explained)),1) # select only the pcs that explains at least 80% of variance + my.cluster[[p]] <- kmeans(my.PCA[[p]]$scores[,1:n.pcs[p]], centers=4, iter.max=100, nstart=30) # 4 is the number of clusters + rm(pslcut) + } else { # 4 is the number of clusters: + if(fields.name == rean.name) my.cluster[[p]] <- kmeans(pslmat[which(!is.na(pslmat[,1])),], centers=4, iter.max=100, nstart=30) + if(fields.name == ECMWF_S4.name) { + my.cluster[[p]] <- kmeans(pslmat, centers=4, iter.max=100, nstart=30, trace=TRUE) + my.cluster.matrix[[p]] <- matrix(my.cluster[[p]]$cluster, n.leadtimes, n.years) + } + } + + rm(pslmat) + gc() +} + +#save.image(file=paste0(workdir,"/weather_regimes_",fields.name,"_",year.start,"-",year.end,".RData")) +#load(file=paste0(workdir,"/weather_regimes_",fields.name,"_",year.start,"-",year.end,".RData")) + +var.num <- 1 # Choose a variable. 1: sfcWind 2: tas + +var.name <- c("sfcWind","tas") # name of the 'predictand' variable of the chosen reanalysis +var.name.full <- c("Wind Speed","Temperature") # full name of the 'predictand' variable to put in the title of the graphs +var.unit <- c("m/s", "ºC") # unit of measure of var (for drawing color scales) + +# Load var data: +if(fields.name == rean.name){ # Load daily var data in the reanalysis case: + vareuFull <-array(NA,c(n.days.in.a.yearly.period(year.start,year.end), n.pos.lat, n.pos.lon)) + + for (y in year.start:year.end){ + var <- Load(var = var.name[var.num], exp = NULL, obs = list(var.data), paste0(y,'0101'), storefreq = 'daily', leadtimemax = n.days.in.a.year(y), output = 'lonlat', + latmin = lat.min, latmax = lat.max, lonmin = lon.min, lonmax = lon.max) + vareuFull[seq.days.in.a.future.year(year.start, y),,] <- var$obs + rm(var) + gc() + } + +# in the S4 case, we can load only the data for 1 month at time (since each month needs ~3 GB of memory) +# but at present we ONLY use psl data!!! +#if(fields.name == ECMWF_S4.name) { +# vareuFull <- Load(var = var.name[var.num], exp = list(fields), obs = NULL, sdates=paste0(year.start:year.end, start.month.char,'01'), storefreq = 'daily', dimnames=list(member="number"), nmember=n.members, leadtimemin=leadtime.min, leadtimemax=leadtime.max, output = 'lonlat', latmin = lat.min, latmax = lat.max, lonmin = lon.min, lonmax = lon.max) +#} + +if(fields.name == ECMWF_monthly.name){ # Load 6-hourly var data in the monthly forecast case: + sdates <- weekly.seq(forecast.year,mes,day) + vareuFull <-array(NA,c(length(sdates)*(year.end-year.start+1), n.pos.lat, n.pos.lon)) + + for (startdate in 1:length(sdates)){ + var <- Load(var = var.name[var.num], exp = NULL, obs = list(var.data), paste0(year.start:year.end, substr(sdates[startdate],5,6), substr(sdates[startdate],7,8) ), storefreq = 'daily', leadtimemin=leadtime*4-3, leadtimemax=leadtime*4, output = 'lonlat', latmin = lat.min, latmax = lat.max, lonmin = lon.min, lonmax = lon.max) + temp <- apply(var$obs, c(1,3,5,6), mean, na.rm=T) + vareuFull[(1+(startdate-1)*(year.end-year.start+1)):(startdate*(year.end-year.start+1)),,] <- temp + rm(temp,var) + gc() + } + +} + +#my.grid<-paste0('r',n.lon,'x',n.lat) +#coord <- Load(var = 'sfcWind', exp = list(ECMWF_monthly), obs = NULL, sdates=paste0(2013,'0102'), leadtimemin = 1, leadtimemax=1, output = 'lonlat', nprocs=1, grid=my.grid, method='bilinear') + +save.image(file=paste0(workdir,"/weather_regimes_",fields.name,"_",var.name[var.num],"_",year.start,"-",year.end,".RData")) +load(file=paste0(workdir,"/weather_regimes_",fields.name,"_",var.name[var.num],"_",year.start,"-",year.end,".RData")) + +## # if you want to study WRs at monhly level but using their seasonal time series, you have to copy the WRs time series to the months from 1 to 12: +## if(WR.period <- 1:12){ +## my.cluster[[1]] <- my.cluster[[2]] <- my.cluster[[3]] <- my.cluster[[4]] <- my.cluster[[5]] <- my.cluster[[6]] <- list() +## my.cluster[[7]] <- my.cluster[[8]] <- my.cluster[[9]] <- my.cluster[[10]] <- my.cluster[[11]] <- my.cluster[[12]] <- list() + +## ss <- match(days.period[[13]],days.period[[1]]); ss[which(!is.na(ss))] <- 1; qq <- ss*my.cluster[[13]]$cluster; my.cluster[[1]]$cluster <- qq[!is.na(qq)] +## ss <- match(days.period[[13]],days.period[[2]]); ss[which(!is.na(ss))] <- 1; qq <- ss*my.cluster[[13]]$cluster; my.cluster[[2]]$cluster <- qq[!is.na(qq)] +## ss <- match(days.period[[13]],days.period[[12]]); ss[which(!is.na(ss))] <- 1; qq <- ss*my.cluster[[13]]$cluster; my.cluster[[12]]$cluster <- qq[!is.na(qq)] +## ss <- match(days.period[[14]],days.period[[3]]); ss[which(!is.na(ss))] <- 1; qq <- ss*my.cluster[[14]]$cluster; my.cluster[[3]]$cluster <- qq[!is.na(qq)] +## ss <- match(days.period[[14]],days.period[[4]]); ss[which(!is.na(ss))] <- 1; qq <- ss*my.cluster[[14]]$cluster; my.cluster[[4]]$cluster <- qq[!is.na(qq)] +## ss <- match(days.period[[14]],days.period[[5]]); ss[which(!is.na(ss))] <- 1; qq <- ss*my.cluster[[14]]$cluster; my.cluster[[5]]$cluster <- qq[!is.na(qq)] +## ss <- match(days.period[[15]],days.period[[6]]); ss[which(!is.na(ss))] <- 1; qq <- ss*my.cluster[[15]]$cluster; my.cluster[[6]]$cluster <- qq[!is.na(qq)] +## ss <- match(days.period[[15]],days.period[[7]]); ss[which(!is.na(ss))] <- 1; qq <- ss*my.cluster[[15]]$cluster; my.cluster[[7]]$cluster <- qq[!is.na(qq)] +## ss <- match(days.period[[15]],days.period[[8]]); ss[which(!is.na(ss))] <- 1; qq <- ss*my.cluster[[15]]$cluster; my.cluster[[8]]$cluster <- qq[!is.na(qq)] +## ss <- match(days.period[[16]],days.period[[9]]); ss[which(!is.na(ss))] <- 1; qq <- ss*my.cluster[[16]]$cluster; my.cluster[[9]]$cluster <- qq[!is.na(qq)] +## ss <- match(days.period[[16]],days.period[[10]]); ss[which(!is.na(ss))] <- 1; qq <- ss*my.cluster[[16]]$cluster; my.cluster[[10]]$cluster <- qq[!is.na(qq)] +## ss <- match(days.period[[16]],days.period[[11]]); ss[which(!is.na(ss))] <- 1; qq <- ss*my.cluster[[16]]$cluster; my.cluster[[11]]$cluster <- qq[!is.na(qq)] +## } + +for(p in WR.period){ # 1-12: Jan-Dec; 13-16: Winter-Spring-Summer-Autumn; 17: Year + + if(sequences){ # it works only for rean data!!! + # for each of the 4 clusters, remove the days not belonging to sequences of at least 5 days: + # warning: first 4 days and last 4 days are not take into account y the algorithm and are treated as not belonging to any sequence (sequ=FALSE) + wrdiff <- diff(my.cluster[[p]]$cluster) + + sequ <- rep(FALSE, n.days.period[[p]]) + for(i in 5:(n.days.period[[p]]-5)){ + sequ[i] <- TRUE + if(wrdiff[i] != 0 & wrdiff[i+1] != 0) sequ[i] = FALSE + if(wrdiff[i] != 0 & wrdiff[i+2] != 0) sequ[i] = FALSE + if(wrdiff[i] != 0 & wrdiff[i+3] != 0) sequ[i] = FALSE + if(wrdiff[i] != 0 & wrdiff[i+4] != 0) sequ[i] = FALSE + if(wrdiff[i-1] != 0 & wrdiff[i] != 0) sequ[i] = FALSE + if(wrdiff[i-1] != 0 & wrdiff[i+1] != 0) sequ[i] = FALSE + if(wrdiff[i-1] != 0 & wrdiff[i+2] != 0) sequ[i] = FALSE + if(wrdiff[i-1] != 0 & wrdiff[i+3] != 0) sequ[i] = FALSE + if(wrdiff[i-2] != 0 & wrdiff[i] != 0) sequ[i] = FALSE + if(wrdiff[i-2] != 0 & wrdiff[i+1] != 0) sequ[i] = FALSE + if(wrdiff[i-2] != 0 & wrdiff[i+2] != 0) sequ[i] = FALSE + if(wrdiff[i-3] != 0 & wrdiff[i] != 0) sequ[i] = FALSE + if(wrdiff[i-3] != 0 & wrdiff[i+1] != 0) sequ[i] = FALSE + if(wrdiff[i-4] != 0 & wrdiff[i] != 0) sequ[i] = FALSE + } # close for loop on i + + # sequence of daily cluster numbers without the days that doesn't belong to sequences of 5 or more days: + cluster.sequence <- my.cluster[[p]]$cluster * sequ + rm(wrdiff, sequ) + } + + gc() + + cluster.sequence <- my.cluster[[p]]$cluster + + + # yearly frequency of each regime: + if(fields.name == rean.name){ + # Replace the days belonging to a regime with the days belonging to a sequence of at least 5 days of that regime: + wr1 <- which(cluster.sequence == 1) + wr2 <- which(cluster.sequence == 2) + wr3 <- which(cluster.sequence == 3) + wr4 <- which(cluster.sequence == 4) + + wr1y <- wr2y <- wr3y <-wr4y <- c() + for(y in year.start:year.end){ + # for both reanalysis and S4, the time order is always: year1day1, year1day2, ..., year1day31, year2day1, year2day2, ..., year2day28, etc, + wr1y[y-year.start+1] <- length(which(cluster.sequence[(y-year.start)*period.length[[p]]+(1:period.length[[p]])] == 1)) + wr2y[y-year.start+1] <- length(which(cluster.sequence[(y-year.start)*period.length[[p]]+(1:period.length[[p]])] == 2)) + wr3y[y-year.start+1] <- length(which(cluster.sequence[(y-year.start)*period.length[[p]]+(1:period.length[[p]])] == 3)) + wr4y[y-year.start+1] <- length(which(cluster.sequence[(y-year.start)*period.length[[p]]+(1:period.length[[p]])] == 4)) + } + + # convert to frequencies in %: + wr1y <- wr1y/period.length[[p]] + wr2y <- wr2y/period.length[[p]] + wr3y <- wr3y/period.length[[p]] + wr4y <- wr4y/period.length[[p]] + + pslPeriod <- psleuFull[days.period[[p]],,] + pslPeriodClim <- apply(pslPeriod, c(2,3), mean, na.rm=T) + pslPeriodClim2 <- InsertDim(pslPeriodClim, 1, n.days.period[[p]]) + pslPeriodAnom <- pslPeriod - pslPeriodClim2 + + rm(pslPeriod, pslPeriodClim, pslPeriodClim2) + gc() + + pslwr1 <- pslPeriodAnom[wr1,,] + pslwr2 <- pslPeriodAnom[wr2,,] + pslwr3 <- pslPeriodAnom[wr3,,] + pslwr4 <- pslPeriodAnom[wr4,,] + + rm(pslPeriodAnom) + gc() + + pslwr1mean <- apply(pslwr1,c(2,3),mean,na.rm=T) + pslwr2mean <- apply(pslwr2,c(2,3),mean,na.rm=T) + pslwr3mean <- apply(pslwr3,c(2,3),mean,na.rm=T) + pslwr4mean <- apply(pslwr4,c(2,3),mean,na.rm=T) + rm(pslwr1,pslwr2,pslwr3,pslwr4) + } + + + if(fields.name == ECMWF_S4.name){ + cluster.sequence <- as.vector(my.cluster.matrix[[p]]) # cannot use my.cluster$cluster directly, because it includes bisestile days! + + wr1 <- which(cluster.sequence == 1) + wr2 <- which(cluster.sequence == 2) + wr3 <- which(cluster.sequence == 3) + wr4 <- which(cluster.sequence == 4) + + wr1y <- colSums(my.cluster.matrix[[p]] == 1) + wr2y <- colSums(my.cluster.matrix[[p]] == 2) + wr3y <- colSums(my.cluster.matrix[[p]] == 3) + wr4y <- colSums(my.cluster.matrix[[p]] == 4) + + # convert to frequencies in %: + wr1y <- wr1y/n.leadtimes + wr2y <- wr2y/n.leadtimes + wr3y <- wr3y/n.leadtimes + wr4y <- wr4y/n.leadtimes + + # in this case, must use pslPeriod instead of psleuFull. Both are already in anomalies: + pslmat <- unname(acast(melt(pslPeriod[1,,,,,]), Var1 ~ Var2 + Var3 ~ Var4 ~ Var5)) # Var1: ensemb.members, var2: start years, var3: lead months, var4: lat, var5: lon + #pslmat <- psleuFull + #dim(pslmat) <- c(1, n.members, n.leadtimes*n.years, n.pos.lat, n.pos.lon) # not working + + pslwr1 <- pslmat[,wr1,,] + pslwr2 <- pslmat[,wr2,,] + pslwr3 <- pslmat[,wr3,,] + pslwr4 <- pslmat[,wr4,,] + rm(pslmat) + gc() + + pslwr1mean <- apply(pslwr1,c(3,4),mean,na.rm=T) + pslwr2mean <- apply(pslwr2,c(3,4),mean,na.rm=T) + pslwr3mean <- apply(pslwr3,c(3,4),mean,na.rm=T) + pslwr4mean <- apply(pslwr4,c(3,4),mean,na.rm=T) + rm(pslwr1,pslwr2,pslwr3,pslwr4) + gc() + } + + # for the monthly system, the time order is always: year1day1, year2day1, ... , yearNday1, year1day2, year2day2, ..., yearNday2, etc.: + if(fields.name == ECMWF_monthly.name) { + if(fields.name == ECMWF_monthly.name){ + my.startdates.period <- months.period(forecast.year,mes,day,p) + + #if(p == 13) my.startdates.period <- my.startdates.period[-2] # remove the second startdate because it is broken + + days.period <- period.length <- list() + for(startdate in my.startdates.period){ + days.period[[p]] <- c(days.period[[p]], (1+(startdate-1)*(year.end-year.start+1)):(startdate*(year.end-year.start+1))) + } + period.length[[p]] <- length(my.startdates.period) # number of Thusdays in the period + } + + wr1y <- wr2y <- wr3y <-wr4y <- c() + wr1y[y-year.start+1] <- length(which(cluster.sequence[seq((y-year.start+1),length(cluster.sequence), n.years)] == 1)) + wr2y[y-year.start+1] <- length(which(cluster.sequence[seq((y-year.start+1),length(cluster.sequence), n.years)] == 2)) + wr3y[y-year.start+1] <- length(which(cluster.sequence[seq((y-year.start+1),length(cluster.sequence), n.years)] == 3)) + wr4y[y-year.start+1] <- length(which(cluster.sequence[seq((y-year.start+1),length(cluster.sequence), n.years)] == 4)) + } + + + # in the reanalysis case, we also measure the impact maps: + if(fields.name == rean.name){ + # similar selection of above, but for var instead of psl: + varPeriod <- vareuFull[days.period[[p]],,] # select only var data during the chosen period + + varPeriodClim <- apply(varPeriod, c(2,3), mean, na.rm=T) + varPeriodClim2 <- InsertDim(varPeriodClim, 1, n.days.period[[p]]) + varPeriodAnom <- varPeriod - varPeriodClim2 + rm(varPeriod, varPeriodClim, varPeriodClim2) + gc() + + #PlotEquiMap2(varPeriodClim[,EU], lon[EU], lat, filled.continents = FALSE, cols=my.cols.var, intxlon=10, intylat=10, cex.lab=1.5) # plot the climatology of the period + varPeriodAnom1 <- varPeriodAnom[wr1,,] + varPeriodAnom2 <- varPeriodAnom[wr2,,] + varPeriodAnom3 <- varPeriodAnom[wr3,,] + varPeriodAnom4 <- varPeriodAnom[wr4,,] + + varPeriodAnom1mean <- apply(varPeriodAnom1,c(2,3),mean,na.rm=T) + varPeriodAnom2mean <- apply(varPeriodAnom2,c(2,3),mean,na.rm=T) + varPeriodAnom3mean <- apply(varPeriodAnom3,c(2,3),mean,na.rm=T) + varPeriodAnom4mean <- apply(varPeriodAnom4,c(2,3),mean,na.rm=T) + + varPeriodAnomBoth1 <- abind(varPeriodAnom, varPeriodAnom1, along = 1) + pvalue1 <- apply(varPeriodAnomBoth1, c(2,3), function(x) t.test(x[1:n.days.period[[p]]],x[(n.days.period[[p]]+1):length(x)])$p.value) + rm(varPeriodAnomBoth1, varPeriodAnom1) + gc() + + varPeriodAnomBoth2 <- abind(varPeriodAnom, varPeriodAnom2, along = 1) + pvalue2 <- apply(varPeriodAnomBoth2, c(2,3), function(x) t.test(x[1:n.days.period[[p]]],x[(n.days.period[[p]]+1):length(x)])$p.value) + rm(varPeriodAnomBoth2, varPeriodAnom2) + gc() + + varPeriodAnomBoth3 <- abind(varPeriodAnom, varPeriodAnom3, along = 1) + pvalue3 <- apply(varPeriodAnomBoth3, c(2,3), function(x) t.test(x[1:n.days.period[[p]]],x[(n.days.period[[p]]+1):length(x)])$p.value) + rm(varPeriodAnomBoth3, varPeriodAnom3) + gc() + + varPeriodAnomBoth4 <- abind(varPeriodAnom, varPeriodAnom4, along = 1) + pvalue4 <- apply(varPeriodAnomBoth4, c(2,3), function(x) t.test(x[1:n.days.period[[p]]],x[(n.days.period[[p]]+1):length(x)])$p.value) + rm(varPeriodAnomBoth4, varPeriodAnom4) + + rm(varPeriodAnom) + gc() + + # save all the data necessary to redraw the graphs when we know the right regime: + save(workdir,rean.name,fields.name,var.num,var.name,var.name.full,var.unit,year.start,year.end,p,WR.period,lon,lat,pslwr1mean,pslwr2mean,pslwr3mean,pslwr4mean, varPeriodAnom1mean, varPeriodAnom2mean, varPeriodAnom3mean,varPeriodAnom4mean,wr1y,wr2y,wr3y,wr4y,pvalue1,pvalue2,pvalue3,pvalue4,cluster.sequence,file=paste0(workdir,"/",fields.name,"_",var.name[var.num],"_",my.period[p],"_mapdata.RData")) + + rm(pvalue1,pvalue2,pvalue3,pvalue4) + rm(pslwr1mean,pslwr2mean,pslwr3mean,pslwr4mean) + rm(varPeriodAnom1mean,varPeriodAnom2mean,varPeriodAnom3mean,varPeriodAnom4mean) + gc() + } + + if(fields.name == ECMWF_S4.name){ # save all the data necessary to draw the graphs (but not the impact maps) + save(workdir,rean.name,fields.name,var.num,var.name,var.name.full,var.unit,year.start,year.end,p,WR.period,lon,lat,pslwr1mean,pslwr2mean,pslwr3mean,pslwr4mean,wr1y,wr2y,wr3y,wr4y,n.leadtimes,cluster.sequence,file=paste0(workdir,"/",fields.name,"_",var.name[var.num],"_",my.period[p],"_mapdata.RData")) + rm(pslwr1mean,pslwr2mean,pslwr3mean,pslwr4mean) + gc() + } +} # close the for loop on 'p' + + +# run it twice: once, with ordering <- FALSE to look only at the cartography and find visually the right regime names of the four clusters; +# and the second time, to save the ordered cartography: +ordering <- FALSE # If TRUE, order the clusters following the regimes listed in the 'orden' vector (after you manually insert the names). +save.names <- TRUE # if TRUE, also save the cluster names (i.e: the association between clusters and weather regimes); if FALSE, the cluster names are loaded from the file +as.pdf <- TRUE # FALSE: save results as one .png file for each season, TRUE: save only 1 .pdf file with all seasons + +# position of long values of Europe only (without the Atlantic Sea and America): +#if(fields.name == "ERA-Interim") EU <- c(1:which(lon >= lon.max)[1], (length(lon)-30):length(lon)) # restrict area to continental europe only +EU <- c(1:which(lon >= lon.max)[1], (which(lon >= 337)[1]:length(lon))) # restrict area to continental europe only + +# choose an order to give to the cartography, from top to bottom: +orden <- c("NAO+","NAO-","Blocking","Atl.Ridge") + +regime1.name <- orden[1] +regime2.name <- orden[2] +regime3.name <- orden[3] +regime4.name <- orden[4] + +if(save.names){cluster1.name.period <- cluster2.name.period <- cluster3.name.period <- cluster4.name.period <- c()} + +# breaks and colors of the geopotential fields: +if(psl == "g500"){ + #my.brks <- c(-300,-199:200,300) # % Mean anomaly of a WR + my.brks <- c(-300,seq(-200,200,10),300) # % Mean anomaly of a WR + my.brks2 <- c(-300,seq(-200,200,10),300) # % Mean anomaly of a WR for the contour lines + #my.cols <- colorRampPalette((brewer.pal(9,"PuBu")))(length(my.brks)-1) + values.to.plot <- c(-200, -100, 0, 100, 200) # values we want to show in the labels +} + +if(psl == "psl"){ + my.brks <- c(seq(-19,-1,2),0,seq(1,19,2)) # % Mean anomaly of a WR + # my.brks <- c(seq(-19,-1,2),0,seq(1,19,2)) # % Mean anomaly of a WR + + my.brks2 <- my.brks #c(seq(-20,20,2)) # % Mean anomaly of a WR for the contour lines + values.to.plot <- my.brks #c(-17,-15,-13,-11,-9,-7,-5,-3,-1,0,1,3,5,7,9,11,13,15,17) +} + +my.cols <- colorRampPalette(rev(brewer.pal(11,"RdBu")))(length(my.brks)-1) # blue--white--red colors +my.cols[floor(length(my.cols)/2)] <- my.cols[floor(length(my.cols)/2)+1] <- "white" + +# breaks and colors of the impact maps (valid both for Wind speed anomalies and Temperature anomalies): +#my.brks.var <- c(-20,seq(-10,-3,1),seq(-2.9,2.9,0.1),seq(3,10,1),20) # % Mean anomaly of a WR +#my.brks.var <- c(-20,-2,-1.5,-1,-0.5,-0.2,0.2,0.5,1,1.5,2,20) # % Mean anomaly of a WR +#my.brks.var <- c(-20,seq(-3,3,0.5),20) # % Mean anomaly of a WR +if(var.name[var.num] == "sfcWind") { + my.brks.var <- seq(-3,3,0.5) + values.to.plot2 <- c(-3,-2,-1,0,1,2,3) +} +if(var.name[var.num] == "tas") { + my.brks.var <- seq(-5,5,1) + values.to.plot2 <- my.brks.var #c(-5,-3,-1,0,1,3,5) +} + +#my.cols <- colorRampPalette(as.character(read.csv("/home/Earth/vtorralb/rgbhex.csv",header=F)[,1]))(length(my.brks)-1) # blue--yellow-red colors +my.cols.var <- colorRampPalette(rev(brewer.pal(11,"RdBu")))(length(my.brks.var)-1) # blue--white--red colors +#my.cols.var[floor(length(my.cols.var)/2)] <- my.cols.var[floor(length(my.cols.var)/2)+1] <- "white" + +if(as.pdf)(pdf(file=paste0(workdir,"/",fields.name,"_",var.name[var.num],"_",my.period[p],".pdf"),width=40, height=60)) + +for(p in WR.period){ + load(file=paste0(workdir,"/",fields.name,"_",var.name[var.num],"_",my.period[p],"_mapdata.RData")) + + if(save.names){ + if(p == 1){ # January + cluster3.name="NAO+" + cluster2.name="NAO-" + cluster4.name="Blocking" + cluster1.name="Atl.Ridge" + } + if(p == 2){ # February + cluster3.name="NAO+" + cluster2.name="NAO-" + cluster4.name="Blocking" + cluster1.name="Atl.Ridge" + } + if(p == 3){ # March + cluster3.name="NAO+" + cluster2.name="NAO-" + cluster4.name="Blocking" + cluster1.name="Atl.Ridge" + } + if(p == 4){ # April + cluster2.name="NAO+" + cluster1.name="NAO-" + cluster3.name="Blocking" + cluster4.name="Atl.Ridge" + } + if(p == 5){ # May + cluster4.name="NAO+" + cluster2.name="NAO-" + cluster3.name="Blocking" + cluster1.name="Atl.Ridge" + } + if(p == 6){ # June + cluster4.name="NAO+" + cluster1.name="NAO-" + cluster2.name="Blocking" + cluster3.name="Atl.Ridge" + } + if(p == 7){ # July + cluster4.name="NAO+" + cluster2.name="NAO-" + cluster1.name="Blocking" + cluster3.name="Atl.Ridge" + } + if(p == 8){ # August + cluster2.name="NAO+" + cluster4.name="NAO-" + cluster3.name="Blocking" + cluster1.name="Atl.Ridge" + } + if(p == 9){ # September + cluster4.name="NAO+" + cluster3.name="NAO-" + cluster1.name="Blocking" + cluster2.name="Atl.Ridge" + } + if(p == 10){ # October + cluster1.name="NAO+" + cluster4.name="NAO-" + cluster2.name="Blocking" + cluster3.name="Atl.Ridge" + } + if(p == 11){ # November + cluster2.name="NAO+" + cluster3.name="NAO-" + cluster1.name="Blocking" + cluster4.name="Atl.Ridge" + } + if(p == 12){ # December + cluster2.name="NAO+" + cluster4.name="NAO-" + cluster1.name="Blocking" + cluster3.name="Atl.Ridge" + } + if(p == 13){ # Winter + cluster3.name="NAO+" + cluster4.name="NAO-" + cluster1.name="Blocking" + cluster2.name="Atl.Ridge" + } + if(p == 14){ # Spring + cluster1.name="NAO+" + cluster3.name="NAO-" + cluster2.name="Blocking" + cluster4.name="Atl.Ridge" + } + if(p == 15){ # Summer + cluster2.name="NAO+" + cluster3.name="NAO-" + cluster4.name="Blocking" + cluster1.name="Atl.Ridge" + } + if(p == 16){ # Autumn + cluster4.name="NAO+" + cluster1.name="NAO-" + cluster2.name="Blocking" + cluster3.name="Atl.Ridge" + } + + cluster1.name.period[p] <- cluster1.name + cluster2.name.period[p] <- cluster2.name + cluster3.name.period[p] <- cluster3.name + cluster4.name.period[p] <- cluster4.name + + save(cluster1.name.period, cluster2.name.period, cluster3.name.period, cluster4.name.period, file=paste0(workdir,"/",fields.name,"_ClusterNames.RData")) + + } else { # in this case, we load the cluster names from the file already saved: + load(paste0(workdir,"/",fields.name,"_ClusterNames.RData")) + cluster1.name <- cluster1.name.period[p] + cluster2.name <- cluster2.name.period[p] + cluster3.name <- cluster3.name.period[p] + cluster4.name <- cluster4.name.period[p] + } + + # assign to each cluster its regime: + if(ordering == FALSE){ + cluster1 <- 1; cluster2 <- 2; cluster3 <- 3; cluster4 <- 4 + } else { + cluster1 <- which(orden == cluster1.name) + cluster2 <- which(orden == cluster2.name) + cluster3 <- which(orden == cluster3.name) + cluster4 <- which(orden == cluster4.name) + } + + assign(paste0("map",cluster1), pslwr1mean) + assign(paste0("map",cluster2), pslwr2mean) + assign(paste0("map",cluster3), pslwr3mean) + assign(paste0("map",cluster4), pslwr4mean) + + assign(paste0("fre",cluster1), wr1y) + assign(paste0("fre",cluster2), wr2y) + assign(paste0("fre",cluster3), wr3y) + assign(paste0("fre",cluster4), wr4y) + + if(fields.name == rean.name){ + assign(paste0("imp",cluster1), varPeriodAnom1mean) + assign(paste0("imp",cluster2), varPeriodAnom2mean) + assign(paste0("imp",cluster3), varPeriodAnom3mean) + assign(paste0("imp",cluster4), varPeriodAnom4mean) + + assign(paste0("sig",cluster1), pvalue1) + assign(paste0("sig",cluster2), pvalue2) + assign(paste0("sig",cluster3), pvalue3) + assign(paste0("sig",cluster4), pvalue4) + } + + + # Visualize the composition with the 3 graphs for all the 4 regimes: its pressure fields, its impact on var and its frequency series: + if(!as.pdf) png(filename=paste0(workdir,"/",fields.name,"_",var.name[var.num],"_",my.period[p],".png"),width=3000,height=3700) + + plot.new() + + # Sheet title: + par(fig=c(0, 1, 0.94, 0.98), new=TRUE) + mtext(paste0("Weather Regimes for ",my.period[p]," season (",year.start,"-",year.end,"). Source: ",fields.name), cex=6, font=2) + + # Centroid maps: + map.xpos <- 0 + map.width <- 0.46 + par(fig=c(map.xpos + 0.003, map.xpos + map.width - 0.003, 0.745, 0.925), new=TRUE) + PlotEquiMap2(rescale(map1,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map1), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, xlabel.dist=1.5, contours.lty="F1FF1F") + par(fig=c(map.xpos, map.xpos + map.width, 0.51, 0.69), new=TRUE) + PlotEquiMap2(rescale(map2,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map2), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F") + par(fig=c(map.xpos, map.xpos + map.width, 0.275, 0.455), new=TRUE) + PlotEquiMap2(rescale(map3,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map3), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F") + par(fig=c(map.xpos, map.xpos + map.width, 0.04, 0.22), new=TRUE) + PlotEquiMap2(rescale(map4,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map4), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F") + + # Title Centroid Maps: + map.title.xpos <- 0.76 + map.title.width <- 0.2 + par(fig=c(map.title.xpos, map.title.xpos + map.title.width, 0.728, 0.729), new=TRUE) + mtext("year", cex=3) + par(fig=c(map.title.xpos, map.title.xpos + map.title.width, 0.494, 0.495), new=TRUE) + mtext("year", cex=3) + par(fig=c(map.title.xpos, map.title.xpos + map.title.width, 0.260, 0.261), new=TRUE) + mtext("year", cex=3) + par(fig=c(map.title.xpos, map.title.xpos + map.title.width, 0.026, 0.027), new=TRUE) + mtext("year", cex=3) + + # Impact maps: + if(fields.name == rean.name){ + impact.xpos <- 0.47 + impact.width <- 0.26 + par(fig=c(impact.xpos, impact.xpos + impact.width, 0.745, 0.925), new=TRUE) + PlotEquiMap2(rescale(imp1[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=t(sig1[,EU] < 0.05), cex.lab=1.5) + par(fig=c(impact.xpos, impact.xpos + impact.width, 0.51, 0.69), new=TRUE) + PlotEquiMap2(rescale(imp2[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=t(sig2[,EU] < 0.05), cex.lab=1.5) + par(fig=c(impact.xpos, impact.xpos + impact.width, 0.275, 0.455), new=TRUE) + PlotEquiMap2(rescale(imp3[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=t(sig3[,EU] < 0.05), cex.lab=1.5) + par(fig=c(impact.xpos, impact.xpos + impact.width, 0.04, 0.22), new=TRUE) + PlotEquiMap2(rescale(imp4[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=t(sig4[,EU] < 0.05), cex.lab=1.5) + } + + # Frequency plots: + barplot.xpos <- 0.75 + barplot.width <- 0.25 + freq.max=100 + par(fig=c(barplot.xpos, barplot.xpos + barplot.width, 0.745, 0.915), new=TRUE) + barplot.freq(100*fre1, year.start, year.end, cex.y=2, cex.x=2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0)) + par(fig=c(barplot.xpos, barplot.xpos + barplot.width,0.510,0.680), new=TRUE) + barplot.freq(100*fre2, year.start, year.end, cex.y=2, cex.x=2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0)) + par(fig=c(barplot.xpos, barplot.xpos + barplot.width,0.275,0.445), new=TRUE) + barplot.freq(100*fre3, year.start, year.end, cex.y=2, cex.x=2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0)) + par(fig=c(barplot.xpos, barplot.xpos + barplot.width,0.040,0.210), new=TRUE) + barplot.freq(100*fre4, year.start, year.end, cex.y=2, cex.x=2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0)) + + # % on Frequency plots: + symbol.xpos <- 0.735 + symbol.width <- 0.01 + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.83, 0.84), new=TRUE) + mtext("%", cex=3.3) + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.60, 0.61), new=TRUE) + mtext("%", cex=3.3) + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.36, 0.37), new=TRUE) + mtext("%", cex=3.3) + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.13, 0.14), new=TRUE) + mtext("%", cex=3.3) + + # Title 1: + title1.xpos <- 0.02 + title1.width <- 0.4 + par(fig=c(title1.xpos, title1.xpos + title1.width, 0.930, 0.935), new=TRUE) + mtext(paste0(regime1.name,": ", psl.name, " Anomaly"), font=2, cex=4) + par(fig=c(title1.xpos, title1.xpos + title1.width, 0.695, 0.700), new=TRUE) + mtext(paste0(regime2.name,": ", psl.name, " Anomaly"), font=2, cex=4) + par(fig=c(title1.xpos, title1.xpos + title1.width, 0.460, 0.465), new=TRUE) + mtext(paste0(regime3.name,": ", psl.name, " Anomaly"), font=2, cex=4) + par(fig=c(title1.xpos, title1.xpos + title1.width, 0.225, 0.230), new=TRUE) + mtext(paste0(regime4.name,": ", psl.name, " Anomaly"), font=2, cex=4) + + # Title 2: + if(fields.name == rean.name){ + title2.xpos <- 0.42 + title2.width <- 0.35 + par(fig=c(title2.xpos, title2.xpos + title2.width, 0.930, 0.935), new=TRUE) + mtext(paste0(regime1.name, ": Impact on ", var.name.full[var.num]), font=2, cex=4) + par(fig=c(title2.xpos, title2.xpos + title2.width, 0.695, 0.700), new=TRUE) + mtext(paste0(regime2.name, ": Impact on ", var.name.full[var.num]), font=2, cex=4) + par(fig=c(title2.xpos, title2.xpos + title2.width, 0.460, 0.465), new=TRUE) + mtext(paste0(regime3.name, ": Impact on ", var.name.full[var.num]), font=2, cex=4) + par(fig=c(title2.xpos, title2.xpos + title2.width, 0.225, 0.230), new=TRUE) + mtext(paste0(regime4.name, ": Impact on ", var.name.full[var.num]), font=2, cex=4) + } + + # Title 3: + title3.xpos <- 0.75 + title3.width <-0.25 + par(fig=c(title3.xpos, title3.xpos + title3.width, 0.930, 0.935), new=TRUE) + mtext(paste0(regime1.name, ": Frequency (", round(100*mean(fre1),1), "%)"), font=2, cex=4) + par(fig=c(title3.xpos, title3.xpos + title3.width, 0.695, 0.700), new=TRUE) + mtext(paste0(regime2.name, ": Frequency (", round(100*mean(fre2),1), "%)"), font=2, cex=4) + par(fig=c(title3.xpos, title3.xpos + title3.width, 0.460, 0.465), new=TRUE) + mtext(paste0(regime3.name, ": Frequency (", round(100*mean(fre3),1), "%)"), font=2, cex=4) + par(fig=c(title3.xpos, title3.xpos + title3.width, 0.225, 0.230), new=TRUE) + mtext(paste0(regime4.name, ": Frequency (", round(100*mean(fre4),1), "%)"), font=2, cex=4) + + # Legend 1: + legend1.xpos <- 0.01 + legend1.width <- 0.44 + legend1.cex <- 1.8 + my.subset <- match(values.to.plot, my.brks) + par(fig=c(legend1.xpos, legend1.xpos + legend1.width, 0.719, 0.744), new=TRUE) # syntax fig=c(xmin, xmax, ymin, ymax) + ColorBar3(my.brks, cols=my.cols, vert=FALSE, cex=legend1.cex, subset=my.subset) + if(psl=="g500") {mtext(side=4," m", cex=2.5)} else {mtext(side=4," hPa", cex=legend1.cex)} + par(fig=c(legend1.xpos, legend1.xpos + legend1.width, 0.485, 0.508), new=TRUE) + ColorBar3(my.brks, cols=my.cols, vert=FALSE, cex=legend1.cex, subset=my.subset) + if(psl=="g500") {mtext(side=4," m", cex=2.5)} else {mtext(side=4," hPa", cex=legend1.cex)} + par(fig=c(legend1.xpos, legend1.xpos + legend1.width, 0.250, 0.273), new=TRUE) + ColorBar3(my.brks, cols=my.cols, vert=FALSE, cex=legend1.cex, subset=my.subset) + if(psl=="g500") {mtext(side=4," m", cex=2.5)} else {mtext(side=4," hPa", cex=legend1.cex)} + par(fig=c(legend1.xpos, legend1.xpos + legend1.width, 0.015, 0.038), new=TRUE) + ColorBar3(my.brks, cols=my.cols, vert=FALSE, cex=legend1.cex, subset=my.subset) + if(psl=="g500") {mtext(side=4," m", cex=2.5)} else {mtext(side=4," hPa", cex=legend1.cex)} + + # Legend 2: + if(fields.name == rean.name){ + legend2.xpos <- 0.48 + legend2.width <- 0.25 + legend2.cex <- 1.8 + my.subset2 <- match(values.to.plot2, my.brks.var) + par(fig=c(legend2.xpos, legend2.xpos + legend2.width, 0.719 + 0.001, 0.744), new=TRUE) + ColorBar3(my.brks.var, cols=my.cols.var, vert=FALSE, cex=legend2.cex, subset=my.subset2) + mtext(side=4,paste0(" ",var.unit[var.num]), cex=legend2.cex) + par(fig=c(legend2.xpos, legend2.xpos + legend2.width, 0.485, 0.508), new=TRUE) + ColorBar3(my.brks.var, cols=my.cols.var, vert=FALSE, cex=legend2.cex, subset=my.subset2) + mtext(side=4,paste0(" ",var.unit[var.num]), cex=legend2.cex) + par(fig=c(legend2.xpos, legend2.xpos + legend2.width, 0.250, 0.273), new=TRUE) + ColorBar3(my.brks.var, cols=my.cols.var, vert=FALSE, cex=legend2.cex, subset=my.subset2) + mtext(side=4,paste0(" ",var.unit[var.num]), cex=legend2.cex) + par(fig=c(legend2.xpos, legend2.xpos + legend2.width, 0.015, 0.038), new=TRUE) + ColorBar3(my.brks.var, cols=my.cols.var, vert=FALSE, cex=legend2.cex, subset=my.subset2) + mtext(side=4,paste0(" ",var.unit[var.num]), cex=legend2.cex) + } + + if(!as.pdf) dev.off() # for saving 4 png + +} # close for loop on 'p' + +if(as.pdf) dev.off() # for saving a single pdf with all seasons + + + + + + + +# somma <- pslPeriod[1,,1,3,,]; giorni<-list(); giorni[[1]] <- c(4:5, 17:28); giorni[[2]] <-27:28; giorni[[4]] <- 1:21; giorni[[6]]<- 7:28; giorni[[7]]<-1:3; giorni[[9]]<-26:28; giorni[[11]]<-c(16:21,27:28); giorni[[12]]<-c(22:28); giorni[[13]]<-c(4,5); for(year in c(1,2,4,6,7,9,11,12,13)) for(day in giorni[[year]]) somma <- somma + pslPeriod[1,,year,day,,] + + + diff --git a/old/weather_regimes_v26.R~ b/old/weather_regimes_v26.R~ new file mode 100644 index 0000000000000000000000000000000000000000..b8f88aee0012412f9fa321fe6c1e24560d454855 --- /dev/null +++ b/old/weather_regimes_v26.R~ @@ -0,0 +1,918 @@ +library(s2dverification) # for the function Load() +library(abind) +library(Kendall) +library(reshape2) +source('/home/Earth/ncortesi/Downloads/scripts/Rfunctions.R') # for the calendar functions +workdir <- "/scratch/Earth/ncortesi/RESILIENCE/Regimes" # working dir with the input files with the WT classifications for each MSLP grid point + +# available reanalysis for the geopotential (g500) or the geopotential height (z500 = g500/9.81) and for var data: +ERAint <- '/esnas/reconstructions/ecmwf/eraint/$STORE_FREQ$_mean/$VAR_NAME$_f6h/$VAR_NAME$_$YEAR$$MONTH$.nc' +ERAint.name <- "ERA-Interim" + +JRA55 <- '/esnas/reconstructions/jma/jra-55/$STORE_FREQ$_mean/$VAR_NAME$_f6h/$VAR_NAME$_$YEAR$$MONTH$.nc' +JRA55.name <- "JRA-55" + +rean <- ERAint #JRA55 #ERAint # choose one of the two above reanalysis from where to load the input psl data + +# available forecast systems for the psl and var data: +#ECMWF_S4 <- list(path = paste0('/esnas/exp/ECMWF/seasonal/0001/s004/m001/$STORE_FREQ$_mean/$VAR_NAME$_f6h/$VAR_NAME$_$START_DATE$.nc')) +#ECMWF_S4 <- '/esnas/exp/ECMWF/seasonal/0001/s004/m001/$STORE_FREQ$_mean/psl_f6h/psl_$START_DATE$.nc' +ECMWF_S4 <- '/esnas/exp/ecmwf/system4_m1/$STORE_FREQ$_mean/psl_f6h/psl_$START_DATE$.nc' +ECMWF_S4.name <- "ECMWF-S4" + +ECMWF_monthly <- '/esnas/exp/ECMWF/monthly/ensforhc/6hourly/$VAR_NAME$/2014$MONTH$$DAY$00/$VAR_NAME$_$START_DATE$00.nc' +ECMWF_monthly.name <- "ECMWF-Monthly" + +forecast <- ECMWF_S4 + +fields <- forecast #rean #forecast # put 'rean' to load pressure fields and var from reanalisis, put 'forecast' to load them from forecast systems + +psl <- "psl" #"g500" # pressure variable to use from the chosen reanalysis +psl.name <- "MSLP" # "Z500" # the name to show in the maps + +year.start <- 1982 #1981 #1994 #1979 #1981 +year.end <- 2013 #2013 #2010 + +member.name <- "ensemble" # name of the dimension with the ensemble members inside the netCDF files of S4 + +res <- 0.75 # set the resolution you want to interpolate the psl and var data when loading it (i.e: set to 0.75 to use the same res of ERA-Interim) + # interpolation method is BILINEAR. +PCA <- FALSE # set it to TRUE if you want to apply the PCA before the k-means cluster analysis, FALSE otherwise +variance.explained <- 0.8 # the user can set here the minimum % of variance explained by the PCs (typically 0.8 which is equal to 80%) + +lat.weighting=FALSE # set it to true to weight psl data on latitude before applying the cluster analysis +sequences=FALSE # set it to TRUE if you want to select only days beloning to sequences of at least 5 consecutive days belonging to the same regime. + +# Coordinates of the box enclosing the study region (the Northern Atlantic in this case): +lat.max <- 81 #81 #80 #70 +lat.min <- 27 #30 #20 #30 +lon.max <- 45 #36 #30 #40 +lon.min <- 274.5 #274.5 #270 #280 # put a positive number here because the geopotential has only positive values of longitud! + +# Only for Monthly forecasts: +#leadtime <- 1:7 #5:11 #1 # if fields=forecast, set the forecast time (in days) you want to compute the weather regimes. +#startdate.shift <- 1 # if leadtime=5:11, it's better to shift all startdates of the season 1 week in the past, to fit the exact season of obs.data + # if leadtime=12:18, it's better to shift all startdates of the season 2 weeks in the past, and so on. +#forecast.year <- year.end+1 # if fields=forecast, set the forecast year (it is usually the year after year.end) +#mes=1 # if fields=forecast, set the month of the first startdate of the forecast year +#day=2 # if fields=forecast, set the day of the first startdate of the forecast year + +############################################################################################################################# +#ECMWF_monthly <- list(path = paste0('/esnas/exp/ECMWF/monthly/ensforhc/6hourly/psl/2014010200/1994_010200.nc')) +#ECMWF_monthly <- list(path = paste0('/esnas/exp/ECMWF/monthly/ensforhc/6hourly/$VAR_NAME$/2014$MONTH$$DAY$00/$VAR_NAME$_$START_DATE$00.nc')) +rean.name <- unname(ifelse(rean == ERAint, ERAint.name, JRA55.name)) +forecast.name <- unname(ifelse(forecast == ECMWF_S4, ECMWF_S4.name, ECMWF_monthly.name)) +fields.name <- unname(ifelse(fields == rean, rean.name, forecast.name)) +n.years <- year.end - year.start + 1 + +var.data <- fields # dataset from which to load the input var data (reanalysis or forecasts) +var.data.name <- fields.name #ECMWF_monthly.name # ERAint.name + +if(lat.min >= lat.max) stop("lat.min cannot be greater than lat.max") + +days.period <- n.days.period <- period.length <- list() +for (pp in 1:17){ + days.period[[pp]] <- NA + for(y in year.start:year.end) days.period[[pp]] <- c(days.period[[pp]], n.days.in.a.future.year(year.start, y) + pos.period(y,pp)) + days.period[[pp]] <- days.period[[pp]][-1] # remove the NA at the begin that was introduced only to be able to execute the above command + # number of days belonging to that period from year.start to year.end: + n.days.period[[pp]] <- length(days.period[[pp]]) + # Number of days belonging to that period in a single year: + period.length[[pp]] <- n.days.in.a.period(pp,1999) #+ ifelse(period==13 | period==2, 1, 0) # using year 1999 introduce a small error in winter season length because of bisestile years +} + +# Create a regular lat/lon grid to interpolate the data: +n.lon.grid <- 360/res +n.lat.grid <- (180/res)+1 +my.grid <- paste0('r',n.lon.grid,'x',n.lat.grid) +#domain<-list() +#domain$lon <- seq(0,359.999999999,res) +#domain$lat <- c(rev(seq(0,90,res)),seq(res[1],-90,-res)) # Notice that the regular grid is always centered at lat=0 and lon=0! + +# load only 1 day of pressure data to detect the minimum and maximum lat and lon values which identify only the North Atlantic area: +if(fields.name == rean.name) domain <- Load(var = psl, exp = NULL, obs = list(list(path=fields)), '19990101', storefreq = 'daily', leadtimemax = 1, output = 'lonlat', nprocs=1) +# in the case of S4, we also interpolate it (notice that if the S4 grid has the same grid of the chosen grid (typically ERA-Interim), Load() leaves the S4 grid unchanged): +if(fields.name == ECMWF_S4.name) domain <- Load(var = "psl", exp = list(list(path=fields)), sdates=paste0(year.start,'0101'), storefreq = 'daily', dimnames=list(member=member.name), leadtimemax=1, nmember=1, output = 'lonlat') #, grid=my.grid, method='bilinear', nprocs=1) + +#if(fields.name == ECMWF_monthly.name) domain <- Load(var = psl, exp = NULL, obs = list(list(path=fields)), paste0(year.start,'0102'), storefreq = 'daily', leadtimemax = 1, output = 'lonlat', nprocs=1) + +n.lon <- length(domain$lon) +n.lat <- length(domain$lat) + +pos.lat.max <- which(lat.max - round(domain$lat,4) >= 0)[1] # select the first latitude of the domain below the maximum latitude +pos.lat.min <- tail(which(round(domain$lat,4) - lat.min >= 0),1) # select the last latitude of the domain over the minumum latitude +pos.lon.max <- tail(which(lon.max - round(domain$lon,4) >= 0),1) +pos.lon.min <- which(round(domain$lon,4) - lon.min >= 0)[1] + +pos.lat <- if(pos.lat.min < pos.lat.max) {pos.lat.min:pos.lat.max} else {pos.lat.max:pos.lat.min} +pos.lon <- c(1:pos.lon.max,pos.lon.min:length(domain$lon)) # beware that it only consider the case where the study area cross the meridian of Greenwhich! +n.pos.lat <- length(pos.lat) +n.pos.lon <- length(pos.lon) + +lat <- domain$lat[pos.lat] # lat values of chosen area only (it is smaller than the whole spatial domain loaded by the data) +lon <- domain$lon[pos.lon] # lon values of chosen area only + +#lat.min.area <- domain$lat[pos.lat.min] # min and max lat/lon of the chosen area only (same as lat.max, but its values correspond to actual values in the lat vector) +#lat.max.area <- domain$lat[pos.lat.max] +#lon.min.area <- domain$lon[pos.lon.min] +#lon.max.area <- domain$lon[pos.lon.max] + +#rm(domain) + +# Load psl data: +if(fields.name == rean.name){ # Load daily psl data in the reanalysis case: + psleuFull <-array(NA,c(n.days.in.a.yearly.period(year.start,year.end), n.pos.lat, n.pos.lon)) + + for (y in year.start:year.end){ + var <- Load(var = psl, exp = NULL, obs = list(list(path=fields)), paste0(y,'0101'), storefreq = 'daily', leadtimemax = n.days.in.a.year(y), output = 'lonlat', + latmin = lat.min, latmax = lat.max, lonmin = lon.min, lonmax = lon.max) + psleuFull[seq.days.in.a.future.year(year.start, y),,] <- var$obs + rm(var) + gc() + } +} + +if(fields.name == ECMWF_S4.name) # in this case, we can load only the data for 1 month at time (since each month needs ~3 GB of memory) + start.month <- 1 # start month (1=January, 12=December) + n.members <- 15 # number of members of the forecast system to use + + if(start.month <= 9) {start.month.char <- paste0("0",as.character(start.month))} else {start.month.char=start.month} + + psleuFull <- Load(var = psl, exp = list(list(path=fields)), obs = NULL, sdates=paste0(year.start:year.end, start.month.char,'01'), dimnames=list(member=member.name), nmember=n.members, leadtimemax=216, storefreq='daily', output = 'lonlat', latmin = lat.min, latmax = lat.max, lonmin = lon.min, lonmax = lon.max) #, grid=my.grid, method='bilinear') + + # immediatly convert psl in daily anomalies to remove the drift! + pslPeriodClim <- apply(psleuFull$mod, c(1,4,5,6), mean, na.rm=T) + pslPeriodClim2 <- InsertDim(InsertDim(pslPeriodClim, 2, n.years), 2, n.members) + rm(pslPeriodClim) + gc() + + psleuFull <- psleuFull$mod - pslPeriodClim2 + rm(pslPeriodClim2) + gc() +} + +if(fields.name == ECMWF_monthly.name){ + # Load 6-hourly psl data in the forecast case: + psleuFull <-array(NA, c(n.sdates*n.years, n.leadtimes, n.pos.lat, n.pos.lon)) # daily order: 19940102 19950102 ... 20130102 19940109 19950109 ... 20130109 ... + n.leadtimes <- length(leadtime) + sdates <- weekly.seq(forecast.year,mes,day) + n.sdates <- length(sdates) + + for (startdate in 1:n.sdates){ + for(lday in leadtime){ + var <- Load(var = psl, exp = NULL, obs = list(list(path=fields), paste0(year.start:year.end, substr(sdates[startdate],5,6), substr(sdates[startdate],7,8) ), storefreq = 'daily', leadtimemin = lday*4-3, leadtimemax = lday*4, output = 'lonlat', latmin = lat.min, latmax = lat.max, lonmin = lon.min, lonmax = lon.max) + + mean.lday <- apply(var$obs, c(3,5,6), mean, na.rm=T) + rm(var) + gc() + + psleuFull[(1+(startdate-1)*n.years):(startdate*n.years), lday-leadtime[1]+1,,] <- mean.lday + rm(mean.lday) + gc() + } + } +} + +if(psl=="g500") psleuFull <- psleuFull/9.81 # convert geopotential to geopotential height (in m) +if(psl=="psl") psleuFull <- psleuFull/100 # convert MSLP in Pascal to MSLP in hPa +gc() + +#save.image(file=paste0(workdir,"/weather_regimes_",fields.name,"_",year.start,"-",year.end,".RData")) +#load(file=paste0(workdir,"/weather_regimes_",fields.name,"_",year.start,"-",year.end,".RData")) + +# compute the cluster analysis: +my.PCA <- list() +my.cluster <- list() +my.cluster.matrix <- list() +tot.variance <- list() +n.pcs <- my.cluster <- c() + +WR.period <- 1 #1:12 #13:16 # 1-12: Jan-Dec; 13-16: Winter-Spring-Summer-Autumn + +for(p in WR.period){ + # Select only the data of the month/season we want: + if(fields.name == rean.name){ pslPeriod <- psleuFull[days.period[[p]],,] # select only days in the chosen period (i.e: winter) + + # in this case of S4 we don't need to select anything because we already loaded only 1 month of data! + + if(fields.name == ECMWF_monthly.name){ + my.startdates.period <- months.period(forecast.year,mes,day,p) # get which startdates belong to the chosen period + + days.period <- list() # get which days inside psleuFull belong to the chosen period + for(startdate in my.startdates.period){ + days.period[[p]] <- c(days.period[[p]], (1+(startdate-1)*n.years):(startdate*n.years)) + } + + # in winter you must remove the 2nd startdate (20140109) because its data is bad, as you can see with: plot(pslmat[,1:2]) + # if(fields.name == forecast.name && period == 13) pslmat[21:40,] <- NA + } + + # weight the pressure fields based on latitude (apply it to anomalies, not to absolute values!): + if(lat.weighting){ + lat.weighted <- cos(lat*pi/180)^0.5 + lat.weighted.array <- InsertDim(InsertDim(lat.weighted,2,n.pos.lon), 1, n.days.period[[p]]) + pslmat <- pslPeriod * lat.weighted.array + rm(lat.weighted.array) + gc() + } + + # select period data from psleuFull to use it as input of the cluster analysis: + if(fields.name == rean.name){ + pslmat <- pslPeriod + dim(pslmat) <- c(n.days.period[[p]], n.pos.lat*n.pos.lon) # convert array in a matrix! + rm(pslPeriod, pslPeriod.weighted) + } + + if(fields.name == ECMWF_S4.name){ + lead.month=1 + + chosen.month <- start.month + lead.month # find which month we want to load data + if(chosen.month > 12) chosen.month <- chosen.month - 12 + + # select only the data of one lead month: + + for(y in year.start:year.end){ + leadtime.min <- 1 + n.days.in.a.monthly.period(start.month, chosen.month, y) - n.days.in.a.month(chosen.month, y) + leadtime.max <- leadtime.min + n.days.in.a.month(chosen.month, y) - 1 + if (n.days.in.a.month(chosen.month, y) == 29) leadtime.max <- leadtime.max - 1 # remove the 29th of February to have the same n. of elements for all years + int.leadtimes <- leadtime.min:leadtime.max + n.leadtimes <- leadtime.max - leadtime.min + 1 + + if(y == year.start) { + pslPeriod <- psleuFull[,,1,int.leadtimes,,,drop=FALSE] + } else { + pslPeriod <- unname(abind(pslPeriod, psleuFull[,,y-year.start+1,int.leadtimes,,,drop=FALSE], along=3)) + } + } + + pslmat <- unname(acast(melt(pslPeriod[1,,,,,]), Var2 + Var3 ~ Var1 + Var4 + Var5)) # Var1: ensemb.members, var2: start years, var3: lead months, var4: lat, var5: lon + # note that the data order in pslmat[,X] is: year1day1, year1day2, ... , year1day31, year2day1, year2day2, ... , year2day28 + #rm(pslPeriod) + gc() + } + + # compute the PCs or not: + if(PCA){ + my.seq <- seq(1, n.pos.lat*n.pos.lon, 9) # select only 1 point of 9 + pslcut <- pslmat[,my.seq] + + my.PCA[[p]] <- princomp(pslcut,cor=FALSE) + tot.variance[[p]] <- head(cumsum(my.PCA[[p]]$sdev^2/sum(my.PCA[[p]]$sdev^2)),50) # check how many PCAs to keep basing on the sum of their expl. variance + n.pcs[p] <- head(as.numeric(which(tot.variance[[p]] > variance.explained)),1) # select only the pcs that explains at least 80% of variance + my.cluster[[p]] <- kmeans(my.PCA[[p]]$scores[,1:n.pcs[p]], centers=4, iter.max=100, nstart=30) # 4 is the number of clusters + rm(pslcut) + } else { # 4 is the number of clusters: + if(fields.name == rean.name) my.cluster[[p]] <- kmeans(pslmat[which(!is.na(pslmat[,1])),], centers=4, iter.max=100, nstart=30) + if(fields.name == ECMWF_S4.name) { + my.cluster[[p]] <- kmeans(pslmat, centers=4, iter.max=100, nstart=30, trace=TRUE) + my.cluster.matrix[[p]] <- matrix(my.cluster[[p]]$cluster, n.leadtimes, n.years) + } + } + + rm(pslmat) + gc() +} + +#save.image(file=paste0(workdir,"/weather_regimes_",fields.name,"_",year.start,"-",year.end,".RData")) +#load(file=paste0(workdir,"/weather_regimes_",fields.name,"_",year.start,"-",year.end,".RData")) + +var.num <- 1 # Choose a variable. 1: sfcWind 2: tas + +var.name <- c("sfcWind","tas") # name of the 'predictand' variable of the chosen reanalysis +var.name.full <- c("Wind Speed","Temperature") # full name of the 'predictand' variable to put in the title of the graphs +var.unit <- c("m/s", "ºC") # unit of measure of var (for drawing color scales) + +# Load var data: +if(fields.name == rean.name){ # Load daily var data in the reanalysis case: + vareuFull <-array(NA,c(n.days.in.a.yearly.period(year.start,year.end), n.pos.lat, n.pos.lon)) + + for (y in year.start:year.end){ + var <- Load(var = var.name[var.num], exp = NULL, obs = list(var.data), paste0(y,'0101'), storefreq = 'daily', leadtimemax = n.days.in.a.year(y), output = 'lonlat', + latmin = lat.min, latmax = lat.max, lonmin = lon.min, lonmax = lon.max) + vareuFull[seq.days.in.a.future.year(year.start, y),,] <- var$obs + rm(var) + gc() + } + +# in the S4 case, we can load only the data for 1 month at time (since each month needs ~3 GB of memory) +# but at present we ONLY use psl data!!! +#if(fields.name == ECMWF_S4.name) { +# vareuFull <- Load(var = var.name[var.num], exp = list(fields), obs = NULL, sdates=paste0(year.start:year.end, start.month.char,'01'), storefreq = 'daily', dimnames=list(member="number"), nmember=n.members, leadtimemin=leadtime.min, leadtimemax=leadtime.max, output = 'lonlat', latmin = lat.min, latmax = lat.max, lonmin = lon.min, lonmax = lon.max) +#} + +if(fields.name == ECMWF_monthly.name){ # Load 6-hourly var data in the monthly forecast case: + sdates <- weekly.seq(forecast.year,mes,day) + vareuFull <-array(NA,c(length(sdates)*(year.end-year.start+1), n.pos.lat, n.pos.lon)) + + for (startdate in 1:length(sdates)){ + var <- Load(var = var.name[var.num], exp = NULL, obs = list(var.data), paste0(year.start:year.end, substr(sdates[startdate],5,6), substr(sdates[startdate],7,8) ), storefreq = 'daily', leadtimemin=leadtime*4-3, leadtimemax=leadtime*4, output = 'lonlat', latmin = lat.min, latmax = lat.max, lonmin = lon.min, lonmax = lon.max) + temp <- apply(var$obs, c(1,3,5,6), mean, na.rm=T) + vareuFull[(1+(startdate-1)*(year.end-year.start+1)):(startdate*(year.end-year.start+1)),,] <- temp + rm(temp,var) + gc() + } + +} + +#my.grid<-paste0('r',n.lon,'x',n.lat) +#coord <- Load(var = 'sfcWind', exp = list(ECMWF_monthly), obs = NULL, sdates=paste0(2013,'0102'), leadtimemin = 1, leadtimemax=1, output = 'lonlat', nprocs=1, grid=my.grid, method='bilinear') + +save.image(file=paste0(workdir,"/weather_regimes_",fields.name,"_",var.name[var.num],"_",year.start,"-",year.end,".RData")) +load(file=paste0(workdir,"/weather_regimes_",fields.name,"_",var.name[var.num],"_",year.start,"-",year.end,".RData")) + +## # if you want to study WRs at monhly level but using their seasonal time series, you have to copy the WRs time series to the months from 1 to 12: +## if(WR.period <- 1:12){ +## my.cluster[[1]] <- my.cluster[[2]] <- my.cluster[[3]] <- my.cluster[[4]] <- my.cluster[[5]] <- my.cluster[[6]] <- list() +## my.cluster[[7]] <- my.cluster[[8]] <- my.cluster[[9]] <- my.cluster[[10]] <- my.cluster[[11]] <- my.cluster[[12]] <- list() + +## ss <- match(days.period[[13]],days.period[[1]]); ss[which(!is.na(ss))] <- 1; qq <- ss*my.cluster[[13]]$cluster; my.cluster[[1]]$cluster <- qq[!is.na(qq)] +## ss <- match(days.period[[13]],days.period[[2]]); ss[which(!is.na(ss))] <- 1; qq <- ss*my.cluster[[13]]$cluster; my.cluster[[2]]$cluster <- qq[!is.na(qq)] +## ss <- match(days.period[[13]],days.period[[12]]); ss[which(!is.na(ss))] <- 1; qq <- ss*my.cluster[[13]]$cluster; my.cluster[[12]]$cluster <- qq[!is.na(qq)] +## ss <- match(days.period[[14]],days.period[[3]]); ss[which(!is.na(ss))] <- 1; qq <- ss*my.cluster[[14]]$cluster; my.cluster[[3]]$cluster <- qq[!is.na(qq)] +## ss <- match(days.period[[14]],days.period[[4]]); ss[which(!is.na(ss))] <- 1; qq <- ss*my.cluster[[14]]$cluster; my.cluster[[4]]$cluster <- qq[!is.na(qq)] +## ss <- match(days.period[[14]],days.period[[5]]); ss[which(!is.na(ss))] <- 1; qq <- ss*my.cluster[[14]]$cluster; my.cluster[[5]]$cluster <- qq[!is.na(qq)] +## ss <- match(days.period[[15]],days.period[[6]]); ss[which(!is.na(ss))] <- 1; qq <- ss*my.cluster[[15]]$cluster; my.cluster[[6]]$cluster <- qq[!is.na(qq)] +## ss <- match(days.period[[15]],days.period[[7]]); ss[which(!is.na(ss))] <- 1; qq <- ss*my.cluster[[15]]$cluster; my.cluster[[7]]$cluster <- qq[!is.na(qq)] +## ss <- match(days.period[[15]],days.period[[8]]); ss[which(!is.na(ss))] <- 1; qq <- ss*my.cluster[[15]]$cluster; my.cluster[[8]]$cluster <- qq[!is.na(qq)] +## ss <- match(days.period[[16]],days.period[[9]]); ss[which(!is.na(ss))] <- 1; qq <- ss*my.cluster[[16]]$cluster; my.cluster[[9]]$cluster <- qq[!is.na(qq)] +## ss <- match(days.period[[16]],days.period[[10]]); ss[which(!is.na(ss))] <- 1; qq <- ss*my.cluster[[16]]$cluster; my.cluster[[10]]$cluster <- qq[!is.na(qq)] +## ss <- match(days.period[[16]],days.period[[11]]); ss[which(!is.na(ss))] <- 1; qq <- ss*my.cluster[[16]]$cluster; my.cluster[[11]]$cluster <- qq[!is.na(qq)] +## } + +for(p in WR.period){ # 1-12: Jan-Dec; 13-16: Winter-Spring-Summer-Autumn; 17: Year + + if(sequences){ # it works only for rean data!!! + # for each of the 4 clusters, remove the days not belonging to sequences of at least 5 days: + # warning: first 4 days and last 4 days are not take into account y the algorithm and are treated as not belonging to any sequence (sequ=FALSE) + wrdiff <- diff(my.cluster[[p]]$cluster) + + sequ <- rep(FALSE, n.days.period[[p]]) + for(i in 5:(n.days.period[[p]]-5)){ + sequ[i] <- TRUE + if(wrdiff[i] != 0 & wrdiff[i+1] != 0) sequ[i] = FALSE + if(wrdiff[i] != 0 & wrdiff[i+2] != 0) sequ[i] = FALSE + if(wrdiff[i] != 0 & wrdiff[i+3] != 0) sequ[i] = FALSE + if(wrdiff[i] != 0 & wrdiff[i+4] != 0) sequ[i] = FALSE + if(wrdiff[i-1] != 0 & wrdiff[i] != 0) sequ[i] = FALSE + if(wrdiff[i-1] != 0 & wrdiff[i+1] != 0) sequ[i] = FALSE + if(wrdiff[i-1] != 0 & wrdiff[i+2] != 0) sequ[i] = FALSE + if(wrdiff[i-1] != 0 & wrdiff[i+3] != 0) sequ[i] = FALSE + if(wrdiff[i-2] != 0 & wrdiff[i] != 0) sequ[i] = FALSE + if(wrdiff[i-2] != 0 & wrdiff[i+1] != 0) sequ[i] = FALSE + if(wrdiff[i-2] != 0 & wrdiff[i+2] != 0) sequ[i] = FALSE + if(wrdiff[i-3] != 0 & wrdiff[i] != 0) sequ[i] = FALSE + if(wrdiff[i-3] != 0 & wrdiff[i+1] != 0) sequ[i] = FALSE + if(wrdiff[i-4] != 0 & wrdiff[i] != 0) sequ[i] = FALSE + } # close for loop on i + + # sequence of daily cluster numbers without the days that doesn't belong to sequences of 5 or more days: + cluster.sequence <- my.cluster[[p]]$cluster * sequ + rm(wrdiff, sequ) + } + + gc() + + cluster.sequence <- my.cluster[[p]]$cluster + + + # yearly frequency of each regime: + if(fields.name == rean.name){ + # Replace the days belonging to a regime with the days belonging to a sequence of at least 5 days of that regime: + wr1 <- which(cluster.sequence == 1) + wr2 <- which(cluster.sequence == 2) + wr3 <- which(cluster.sequence == 3) + wr4 <- which(cluster.sequence == 4) + + wr1y <- wr2y <- wr3y <-wr4y <- c() + for(y in year.start:year.end){ + # for both reanalysis and S4, the time order is always: year1day1, year1day2, ..., year1day31, year2day1, year2day2, ..., year2day28, etc, + wr1y[y-year.start+1] <- length(which(cluster.sequence[(y-year.start)*period.length[[p]]+(1:period.length[[p]])] == 1)) + wr2y[y-year.start+1] <- length(which(cluster.sequence[(y-year.start)*period.length[[p]]+(1:period.length[[p]])] == 2)) + wr3y[y-year.start+1] <- length(which(cluster.sequence[(y-year.start)*period.length[[p]]+(1:period.length[[p]])] == 3)) + wr4y[y-year.start+1] <- length(which(cluster.sequence[(y-year.start)*period.length[[p]]+(1:period.length[[p]])] == 4)) + } + + # convert to frequencies in %: + wr1y <- wr1y/period.length[[p]] + wr2y <- wr2y/period.length[[p]] + wr3y <- wr3y/period.length[[p]] + wr4y <- wr4y/period.length[[p]] + + pslPeriod <- psleuFull[days.period[[p]],,] + pslPeriodClim <- apply(pslPeriod, c(2,3), mean, na.rm=T) + pslPeriodClim2 <- InsertDim(pslPeriodClim, 1, n.days.period[[p]]) + pslPeriodAnom <- pslPeriod - pslPeriodClim2 + + rm(pslPeriod, pslPeriodClim, pslPeriodClim2) + gc() + + pslwr1 <- pslPeriodAnom[wr1,,] + pslwr2 <- pslPeriodAnom[wr2,,] + pslwr3 <- pslPeriodAnom[wr3,,] + pslwr4 <- pslPeriodAnom[wr4,,] + + rm(pslPeriodAnom) + gc() + + pslwr1mean <- apply(pslwr1,c(2,3),mean,na.rm=T) + pslwr2mean <- apply(pslwr2,c(2,3),mean,na.rm=T) + pslwr3mean <- apply(pslwr3,c(2,3),mean,na.rm=T) + pslwr4mean <- apply(pslwr4,c(2,3),mean,na.rm=T) + rm(pslwr1,pslwr2,pslwr3,pslwr4) + } + + + if(fields.name == ECMWF_S4.name){ + cluster.sequence <- as.vector(my.cluster.matrix[[p]]) # cannot use my.cluster$cluster directly, because it includes bisestile days! + + wr1 <- which(cluster.sequence == 1) + wr2 <- which(cluster.sequence == 2) + wr3 <- which(cluster.sequence == 3) + wr4 <- which(cluster.sequence == 4) + + wr1y <- colSums(my.cluster.matrix[[p]] == 1) + wr2y <- colSums(my.cluster.matrix[[p]] == 2) + wr3y <- colSums(my.cluster.matrix[[p]] == 3) + wr4y <- colSums(my.cluster.matrix[[p]] == 4) + + # convert to frequencies in %: + wr1y <- wr1y/n.leadtimes + wr2y <- wr2y/n.leadtimes + wr3y <- wr3y/n.leadtimes + wr4y <- wr4y/n.leadtimes + + # in this case, must use pslPeriod instead of psleuFull. Both are already in anomalies: + pslmat <- unname(acast(melt(pslPeriod[1,,,,,]), Var1 ~ Var2 + Var3 ~ Var4 ~ Var5)) # Var1: ensemb.members, var2: start years, var3: lead months, var4: lat, var5: lon + #pslmat <- psleuFull + #dim(pslmat) <- c(1, n.members, n.leadtimes*n.years, n.pos.lat, n.pos.lon) # not working + + pslwr1 <- pslmat[,wr1,,] + pslwr2 <- pslmat[,wr2,,] + pslwr3 <- pslmat[,wr3,,] + pslwr4 <- pslmat[,wr4,,] + rm(pslmat) + gc() + + pslwr1mean <- apply(pslwr1,c(3,4),mean,na.rm=T) + pslwr2mean <- apply(pslwr2,c(3,4),mean,na.rm=T) + pslwr3mean <- apply(pslwr3,c(3,4),mean,na.rm=T) + pslwr4mean <- apply(pslwr4,c(3,4),mean,na.rm=T) + rm(pslwr1,pslwr2,pslwr3,pslwr4) + gc() + } + + # for the monthly system, the time order is always: year1day1, year2day1, ... , yearNday1, year1day2, year2day2, ..., yearNday2, etc.: + if(fields.name == ECMWF_monthly.name) { + if(fields.name == ECMWF_monthly.name){ + my.startdates.period <- months.period(forecast.year,mes,day,p) + + #if(p == 13) my.startdates.period <- my.startdates.period[-2] # remove the second startdate because it is broken + + days.period <- period.length <- list() + for(startdate in my.startdates.period){ + days.period[[p]] <- c(days.period[[p]], (1+(startdate-1)*(year.end-year.start+1)):(startdate*(year.end-year.start+1))) + } + period.length[[p]] <- length(my.startdates.period) # number of Thusdays in the period + } + + wr1y <- wr2y <- wr3y <-wr4y <- c() + wr1y[y-year.start+1] <- length(which(cluster.sequence[seq((y-year.start+1),length(cluster.sequence), n.years)] == 1)) + wr2y[y-year.start+1] <- length(which(cluster.sequence[seq((y-year.start+1),length(cluster.sequence), n.years)] == 2)) + wr3y[y-year.start+1] <- length(which(cluster.sequence[seq((y-year.start+1),length(cluster.sequence), n.years)] == 3)) + wr4y[y-year.start+1] <- length(which(cluster.sequence[seq((y-year.start+1),length(cluster.sequence), n.years)] == 4)) + } + + + # in the reanalysis case, we also measure the impact maps: + if(fields.name == rean.name){ + # similar selection of above, but for var instead of psl: + varPeriod <- vareuFull[days.period[[p]],,] # select only var data during the chosen period + + varPeriodClim <- apply(varPeriod, c(2,3), mean, na.rm=T) + varPeriodClim2 <- InsertDim(varPeriodClim, 1, n.days.period[[p]]) + varPeriodAnom <- varPeriod - varPeriodClim2 + rm(varPeriod, varPeriodClim, varPeriodClim2) + gc() + + #PlotEquiMap2(varPeriodClim[,EU], lon[EU], lat, filled.continents = FALSE, cols=my.cols.var, intxlon=10, intylat=10, cex.lab=1.5) # plot the climatology of the period + varPeriodAnom1 <- varPeriodAnom[wr1,,] + varPeriodAnom2 <- varPeriodAnom[wr2,,] + varPeriodAnom3 <- varPeriodAnom[wr3,,] + varPeriodAnom4 <- varPeriodAnom[wr4,,] + + varPeriodAnom1mean <- apply(varPeriodAnom1,c(2,3),mean,na.rm=T) + varPeriodAnom2mean <- apply(varPeriodAnom2,c(2,3),mean,na.rm=T) + varPeriodAnom3mean <- apply(varPeriodAnom3,c(2,3),mean,na.rm=T) + varPeriodAnom4mean <- apply(varPeriodAnom4,c(2,3),mean,na.rm=T) + + varPeriodAnomBoth1 <- abind(varPeriodAnom, varPeriodAnom1, along = 1) + pvalue1 <- apply(varPeriodAnomBoth1, c(2,3), function(x) t.test(x[1:n.days.period[[p]]],x[(n.days.period[[p]]+1):length(x)])$p.value) + rm(varPeriodAnomBoth1, varPeriodAnom1) + gc() + + varPeriodAnomBoth2 <- abind(varPeriodAnom, varPeriodAnom2, along = 1) + pvalue2 <- apply(varPeriodAnomBoth2, c(2,3), function(x) t.test(x[1:n.days.period[[p]]],x[(n.days.period[[p]]+1):length(x)])$p.value) + rm(varPeriodAnomBoth2, varPeriodAnom2) + gc() + + varPeriodAnomBoth3 <- abind(varPeriodAnom, varPeriodAnom3, along = 1) + pvalue3 <- apply(varPeriodAnomBoth3, c(2,3), function(x) t.test(x[1:n.days.period[[p]]],x[(n.days.period[[p]]+1):length(x)])$p.value) + rm(varPeriodAnomBoth3, varPeriodAnom3) + gc() + + varPeriodAnomBoth4 <- abind(varPeriodAnom, varPeriodAnom4, along = 1) + pvalue4 <- apply(varPeriodAnomBoth4, c(2,3), function(x) t.test(x[1:n.days.period[[p]]],x[(n.days.period[[p]]+1):length(x)])$p.value) + rm(varPeriodAnomBoth4, varPeriodAnom4) + + rm(varPeriodAnom) + gc() + + # save all the data necessary to redraw the graphs when we know the right regime: + save(workdir,rean.name,fields.name,var.num,var.name,var.name.full,var.unit,year.start,year.end,p,WR.period,lon,lat,pslwr1mean,pslwr2mean,pslwr3mean,pslwr4mean, varPeriodAnom1mean, varPeriodAnom2mean, varPeriodAnom3mean,varPeriodAnom4mean,wr1y,wr2y,wr3y,wr4y,pvalue1,pvalue2,pvalue3,pvalue4,cluster.sequence,file=paste0(workdir,"/",fields.name,"_",var.name[var.num],"_",my.period[p],"_mapdata.RData")) + + rm(pvalue1,pvalue2,pvalue3,pvalue4) + rm(pslwr1mean,pslwr2mean,pslwr3mean,pslwr4mean) + rm(varPeriodAnom1mean,varPeriodAnom2mean,varPeriodAnom3mean,varPeriodAnom4mean) + gc() + } + + if(fields.name == ECMWF_S4.name){ # save all the data necessary to draw the graphs (but not the impact maps) + save(workdir,rean.name,fields.name,var.num,var.name,var.name.full,var.unit,year.start,year.end,p,WR.period,lon,lat,pslwr1mean,pslwr2mean,pslwr3mean,pslwr4mean,wr1y,wr2y,wr3y,wr4y,n.leadtimes,cluster.sequence,file=paste0(workdir,"/",fields.name,"_",var.name[var.num],"_",my.period[p],"_mapdata.RData")) + rm(pslwr1mean,pslwr2mean,pslwr3mean,pslwr4mean) + gc() + } +} # close the for loop on 'p' + + +# run it twice: once, with ordering <- FALSE to look only at the cartography and find visually the right regime names of the four clusters; +# and the second time, to save the ordered cartography: +ordering <- FALSE # If TRUE, order the clusters following the regimes listed in the 'orden' vector (after you manually insert the names). +save.names <- TRUE # if TRUE, also save the cluster names (i.e: the association between clusters and weather regimes); if FALSE, the cluster names are loaded from the file +as.pdf <- TRUE # FALSE: save results as one .png file for each season, TRUE: save only 1 .pdf file with all seasons + +# position of long values of Europe only (without the Atlantic Sea and America): +#if(fields.name == "ERA-Interim") EU <- c(1:which(lon >= lon.max)[1], (length(lon)-30):length(lon)) # restrict area to continental europe only +EU <- c(1:which(lon >= lon.max)[1], (which(lon >= 337)[1]:length(lon))) # restrict area to continental europe only + +# choose an order to give to the cartography, from top to bottom: +orden <- c("NAO+","NAO-","Blocking","Atl.Ridge") + +regime1.name <- orden[1] +regime2.name <- orden[2] +regime3.name <- orden[3] +regime4.name <- orden[4] + +if(save.names){cluster1.name.period <- cluster2.name.period <- cluster3.name.period <- cluster4.name.period <- c()} + +# breaks and colors of the geopotential fields: +if(psl == "g500"){ + #my.brks <- c(-300,-199:200,300) # % Mean anomaly of a WR + my.brks <- c(-300,seq(-200,200,10),300) # % Mean anomaly of a WR + my.brks2 <- c(-300,seq(-200,200,10),300) # % Mean anomaly of a WR for the contour lines + #my.cols <- colorRampPalette((brewer.pal(9,"PuBu")))(length(my.brks)-1) + values.to.plot <- c(-200, -100, 0, 100, 200) # values we want to show in the labels +} + +if(psl == "psl"){ + my.brks <- c(seq(-19,-1,2),0,seq(1,19,2)) # % Mean anomaly of a WR + # my.brks <- c(seq(-19,-1,2),0,seq(1,19,2)) # % Mean anomaly of a WR + + my.brks2 <- my.brks #c(seq(-20,20,2)) # % Mean anomaly of a WR for the contour lines + values.to.plot <- my.brks #c(-17,-15,-13,-11,-9,-7,-5,-3,-1,0,1,3,5,7,9,11,13,15,17) +} + +my.cols <- colorRampPalette(rev(brewer.pal(11,"RdBu")))(length(my.brks)-1) # blue--white--red colors +my.cols[floor(length(my.cols)/2)] <- my.cols[floor(length(my.cols)/2)+1] <- "white" + +# breaks and colors of the impact maps (valid both for Wind speed anomalies and Temperature anomalies): +#my.brks.var <- c(-20,seq(-10,-3,1),seq(-2.9,2.9,0.1),seq(3,10,1),20) # % Mean anomaly of a WR +#my.brks.var <- c(-20,-2,-1.5,-1,-0.5,-0.2,0.2,0.5,1,1.5,2,20) # % Mean anomaly of a WR +#my.brks.var <- c(-20,seq(-3,3,0.5),20) # % Mean anomaly of a WR +if(var.name[var.num] == "sfcWind") { + my.brks.var <- seq(-3,3,0.5) + values.to.plot2 <- c(-3,-2,-1,0,1,2,3) +} +if(var.name[var.num] == "tas") { + my.brks.var <- seq(-5,5,1) + values.to.plot2 <- my.brks.var #c(-5,-3,-1,0,1,3,5) +} + +#my.cols <- colorRampPalette(as.character(read.csv("/home/Earth/vtorralb/rgbhex.csv",header=F)[,1]))(length(my.brks)-1) # blue--yellow-red colors +my.cols.var <- colorRampPalette(rev(brewer.pal(11,"RdBu")))(length(my.brks.var)-1) # blue--white--red colors +#my.cols.var[floor(length(my.cols.var)/2)] <- my.cols.var[floor(length(my.cols.var)/2)+1] <- "white" + +if(as.pdf)(pdf(file=paste0(workdir,"/",fields.name,"_",var.name[var.num],"_",my.period[p],".pdf"),width=40, height=60)) + +for(p in WR.period){ + load(file=paste0(workdir,"/",fields.name,"_",var.name[var.num],"_",my.period[p],"_mapdata.RData")) + + if(save.names){ + if(p == 1){ # January + cluster3.name="NAO+" + cluster2.name="NAO-" + cluster4.name="Blocking" + cluster1.name="Atl.Ridge" + } + if(p == 2){ # February + cluster3.name="NAO+" + cluster2.name="NAO-" + cluster4.name="Blocking" + cluster1.name="Atl.Ridge" + } + if(p == 3){ # March + cluster3.name="NAO+" + cluster2.name="NAO-" + cluster4.name="Blocking" + cluster1.name="Atl.Ridge" + } + if(p == 4){ # April + cluster2.name="NAO+" + cluster1.name="NAO-" + cluster3.name="Blocking" + cluster4.name="Atl.Ridge" + } + if(p == 5){ # May + cluster4.name="NAO+" + cluster2.name="NAO-" + cluster3.name="Blocking" + cluster1.name="Atl.Ridge" + } + if(p == 6){ # June + cluster4.name="NAO+" + cluster1.name="NAO-" + cluster2.name="Blocking" + cluster3.name="Atl.Ridge" + } + if(p == 7){ # July + cluster4.name="NAO+" + cluster2.name="NAO-" + cluster1.name="Blocking" + cluster3.name="Atl.Ridge" + } + if(p == 8){ # August + cluster2.name="NAO+" + cluster4.name="NAO-" + cluster3.name="Blocking" + cluster1.name="Atl.Ridge" + } + if(p == 9){ # September + cluster4.name="NAO+" + cluster3.name="NAO-" + cluster1.name="Blocking" + cluster2.name="Atl.Ridge" + } + if(p == 10){ # October + cluster1.name="NAO+" + cluster4.name="NAO-" + cluster2.name="Blocking" + cluster3.name="Atl.Ridge" + } + if(p == 11){ # November + cluster2.name="NAO+" + cluster3.name="NAO-" + cluster1.name="Blocking" + cluster4.name="Atl.Ridge" + } + if(p == 12){ # December + cluster2.name="NAO+" + cluster4.name="NAO-" + cluster1.name="Blocking" + cluster3.name="Atl.Ridge" + } + if(p == 13){ # Winter + cluster3.name="NAO+" + cluster4.name="NAO-" + cluster1.name="Blocking" + cluster2.name="Atl.Ridge" + } + if(p == 14){ # Spring + cluster1.name="NAO+" + cluster3.name="NAO-" + cluster2.name="Blocking" + cluster4.name="Atl.Ridge" + } + if(p == 15){ # Summer + cluster2.name="NAO+" + cluster3.name="NAO-" + cluster4.name="Blocking" + cluster1.name="Atl.Ridge" + } + if(p == 16){ # Autumn + cluster4.name="NAO+" + cluster1.name="NAO-" + cluster2.name="Blocking" + cluster3.name="Atl.Ridge" + } + + cluster1.name.period[p] <- cluster1.name + cluster2.name.period[p] <- cluster2.name + cluster3.name.period[p] <- cluster3.name + cluster4.name.period[p] <- cluster4.name + + save(cluster1.name.period, cluster2.name.period, cluster3.name.period, cluster4.name.period, file=paste0(workdir,"/",fields.name,"_ClusterNames.RData")) + + } else { # in this case, we load the cluster names from the file already saved: + load(paste0(workdir,"/",fields.name,"_ClusterNames.RData")) + cluster1.name <- cluster1.name.period[p] + cluster2.name <- cluster2.name.period[p] + cluster3.name <- cluster3.name.period[p] + cluster4.name <- cluster4.name.period[p] + } + + # assign to each cluster its regime: + if(ordering == FALSE){ + cluster1 <- 1; cluster2 <- 2; cluster3 <- 3; cluster4 <- 4 + } else { + cluster1 <- which(orden == cluster1.name) + cluster2 <- which(orden == cluster2.name) + cluster3 <- which(orden == cluster3.name) + cluster4 <- which(orden == cluster4.name) + } + + assign(paste0("map",cluster1), pslwr1mean) + assign(paste0("map",cluster2), pslwr2mean) + assign(paste0("map",cluster3), pslwr3mean) + assign(paste0("map",cluster4), pslwr4mean) + + assign(paste0("fre",cluster1), wr1y) + assign(paste0("fre",cluster2), wr2y) + assign(paste0("fre",cluster3), wr3y) + assign(paste0("fre",cluster4), wr4y) + + if(fields.name == rean.name){ + assign(paste0("imp",cluster1), varPeriodAnom1mean) + assign(paste0("imp",cluster2), varPeriodAnom2mean) + assign(paste0("imp",cluster3), varPeriodAnom3mean) + assign(paste0("imp",cluster4), varPeriodAnom4mean) + + assign(paste0("sig",cluster1), pvalue1) + assign(paste0("sig",cluster2), pvalue2) + assign(paste0("sig",cluster3), pvalue3) + assign(paste0("sig",cluster4), pvalue4) + } + + + # Visualize the composition with the 3 graphs for all the 4 regimes: its pressure fields, its impact on var and its frequency series: + if(!as.pdf) png(filename=paste0(workdir,"/",fields.name,"_",var.name[var.num],"_",my.period[p],".png"),width=3000,height=3700) + + plot.new() + + # Sheet title: + par(fig=c(0, 1, 0.94, 0.98), new=TRUE) + mtext(paste0("Weather Regimes for ",my.period[p]," season (",year.start,"-",year.end,"). Source: ",fields.name), cex=6, font=2) + + # Centroid maps: + map.xpos <- 0 + map.width <- 0.46 + par(fig=c(map.xpos + 0.003, map.xpos + map.width - 0.003, 0.745, 0.925), new=TRUE) + PlotEquiMap2(rescale(map1,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map1), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, xlabel.dist=1.5, contours.lty="F1FF1F") + par(fig=c(map.xpos, map.xpos + map.width, 0.51, 0.69), new=TRUE) + PlotEquiMap2(rescale(map2,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map2), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F") + par(fig=c(map.xpos, map.xpos + map.width, 0.275, 0.455), new=TRUE) + PlotEquiMap2(rescale(map3,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map3), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F") + par(fig=c(map.xpos, map.xpos + map.width, 0.04, 0.22), new=TRUE) + PlotEquiMap2(rescale(map4,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map4), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F") + + # Title Centroid Maps: + map.title.xpos <- 0.76 + map.title.width <- 0.2 + par(fig=c(map.title.xpos, map.title.xpos + map.title.width, 0.728, 0.729), new=TRUE) + mtext("year", cex=3) + par(fig=c(map.title.xpos, map.title.xpos + map.title.width, 0.494, 0.495), new=TRUE) + mtext("year", cex=3) + par(fig=c(map.title.xpos, map.title.xpos + map.title.width, 0.260, 0.261), new=TRUE) + mtext("year", cex=3) + par(fig=c(map.title.xpos, map.title.xpos + map.title.width, 0.026, 0.027), new=TRUE) + mtext("year", cex=3) + + # Impact maps: + if(fields.name == rean.name){ + impact.xpos <- 0.47 + impact.width <- 0.26 + par(fig=c(impact.xpos, impact.xpos + impact.width, 0.745, 0.925), new=TRUE) + PlotEquiMap2(rescale(imp1[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=t(sig1[,EU] < 0.05), cex.lab=1.5) + par(fig=c(impact.xpos, impact.xpos + impact.width, 0.51, 0.69), new=TRUE) + PlotEquiMap2(rescale(imp2[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=t(sig2[,EU] < 0.05), cex.lab=1.5) + par(fig=c(impact.xpos, impact.xpos + impact.width, 0.275, 0.455), new=TRUE) + PlotEquiMap2(rescale(imp3[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=t(sig3[,EU] < 0.05), cex.lab=1.5) + par(fig=c(impact.xpos, impact.xpos + impact.width, 0.04, 0.22), new=TRUE) + PlotEquiMap2(rescale(imp4[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=t(sig4[,EU] < 0.05), cex.lab=1.5) + } + + # Frequency plots: + barplot.xpos <- 0.75 + barplot.width <- 0.25 + freq.max=100 + par(fig=c(barplot.xpos, barplot.xpos + barplot.width, 0.745, 0.915), new=TRUE) + barplot.freq(100*fre1, year.start, year.end, cex.y=2, cex.x=2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0)) + par(fig=c(barplot.xpos, barplot.xpos + barplot.width,0.510,0.680), new=TRUE) + barplot.freq(100*fre2, year.start, year.end, cex.y=2, cex.x=2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0)) + par(fig=c(barplot.xpos, barplot.xpos + barplot.width,0.275,0.445), new=TRUE) + barplot.freq(100*fre3, year.start, year.end, cex.y=2, cex.x=2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0)) + par(fig=c(barplot.xpos, barplot.xpos + barplot.width,0.040,0.210), new=TRUE) + barplot.freq(100*fre4, year.start, year.end, cex.y=2, cex.x=2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0)) + + # % on Frequency plots: + symbol.xpos <- 0.735 + symbol.width <- 0.01 + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.83, 0.84), new=TRUE) + mtext("%", cex=3.3) + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.60, 0.61), new=TRUE) + mtext("%", cex=3.3) + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.36, 0.37), new=TRUE) + mtext("%", cex=3.3) + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.13, 0.14), new=TRUE) + mtext("%", cex=3.3) + + # Title 1: + title1.xpos <- 0.02 + title1.width <- 0.4 + par(fig=c(title1.xpos, title1.xpos + title1.width, 0.930, 0.935), new=TRUE) + mtext(paste0(regime1.name,": ", psl.name, " Anomaly"), font=2, cex=4) + par(fig=c(title1.xpos, title1.xpos + title1.width, 0.695, 0.700), new=TRUE) + mtext(paste0(regime2.name,": ", psl.name, " Anomaly"), font=2, cex=4) + par(fig=c(title1.xpos, title1.xpos + title1.width, 0.460, 0.465), new=TRUE) + mtext(paste0(regime3.name,": ", psl.name, " Anomaly"), font=2, cex=4) + par(fig=c(title1.xpos, title1.xpos + title1.width, 0.225, 0.230), new=TRUE) + mtext(paste0(regime4.name,": ", psl.name, " Anomaly"), font=2, cex=4) + + # Title 2: + if(fields.name == rean.name){ + title2.xpos <- 0.42 + title2.width <- 0.35 + par(fig=c(title2.xpos, title2.xpos + title2.width, 0.930, 0.935), new=TRUE) + mtext(paste0(regime1.name, ": Impact on ", var.name.full[var.num]), font=2, cex=4) + par(fig=c(title2.xpos, title2.xpos + title2.width, 0.695, 0.700), new=TRUE) + mtext(paste0(regime2.name, ": Impact on ", var.name.full[var.num]), font=2, cex=4) + par(fig=c(title2.xpos, title2.xpos + title2.width, 0.460, 0.465), new=TRUE) + mtext(paste0(regime3.name, ": Impact on ", var.name.full[var.num]), font=2, cex=4) + par(fig=c(title2.xpos, title2.xpos + title2.width, 0.225, 0.230), new=TRUE) + mtext(paste0(regime4.name, ": Impact on ", var.name.full[var.num]), font=2, cex=4) + } + + # Title 3: + title3.xpos <- 0.75 + title3.width <-0.25 + par(fig=c(title3.xpos, title3.xpos + title3.width, 0.930, 0.935), new=TRUE) + mtext(paste0(regime1.name, ": Frequency (", round(100*mean(fre1),1), "%)"), font=2, cex=4) + par(fig=c(title3.xpos, title3.xpos + title3.width, 0.695, 0.700), new=TRUE) + mtext(paste0(regime2.name, ": Frequency (", round(100*mean(fre2),1), "%)"), font=2, cex=4) + par(fig=c(title3.xpos, title3.xpos + title3.width, 0.460, 0.465), new=TRUE) + mtext(paste0(regime3.name, ": Frequency (", round(100*mean(fre3),1), "%)"), font=2, cex=4) + par(fig=c(title3.xpos, title3.xpos + title3.width, 0.225, 0.230), new=TRUE) + mtext(paste0(regime4.name, ": Frequency (", round(100*mean(fre4),1), "%)"), font=2, cex=4) + + # Legend 1: + legend1.xpos <- 0.01 + legend1.width <- 0.44 + legend1.cex <- 1.8 + my.subset <- match(values.to.plot, my.brks) + par(fig=c(legend1.xpos, legend1.xpos + legend1.width, 0.719, 0.744), new=TRUE) # syntax fig=c(xmin, xmax, ymin, ymax) + ColorBar3(my.brks, cols=my.cols, vert=FALSE, cex=legend1.cex, subset=my.subset) + if(psl=="g500") {mtext(side=4," m", cex=2.5)} else {mtext(side=4," hPa", cex=legend1.cex)} + par(fig=c(legend1.xpos, legend1.xpos + legend1.width, 0.485, 0.508), new=TRUE) + ColorBar3(my.brks, cols=my.cols, vert=FALSE, cex=legend1.cex, subset=my.subset) + if(psl=="g500") {mtext(side=4," m", cex=2.5)} else {mtext(side=4," hPa", cex=legend1.cex)} + par(fig=c(legend1.xpos, legend1.xpos + legend1.width, 0.250, 0.273), new=TRUE) + ColorBar3(my.brks, cols=my.cols, vert=FALSE, cex=legend1.cex, subset=my.subset) + if(psl=="g500") {mtext(side=4," m", cex=2.5)} else {mtext(side=4," hPa", cex=legend1.cex)} + par(fig=c(legend1.xpos, legend1.xpos + legend1.width, 0.015, 0.038), new=TRUE) + ColorBar3(my.brks, cols=my.cols, vert=FALSE, cex=legend1.cex, subset=my.subset) + if(psl=="g500") {mtext(side=4," m", cex=2.5)} else {mtext(side=4," hPa", cex=legend1.cex)} + + # Legend 2: + if(fields.name == rean.name){ + legend2.xpos <- 0.48 + legend2.width <- 0.25 + legend2.cex <- 1.8 + my.subset2 <- match(values.to.plot2, my.brks.var) + par(fig=c(legend2.xpos, legend2.xpos + legend2.width, 0.719 + 0.001, 0.744), new=TRUE) + ColorBar3(my.brks.var, cols=my.cols.var, vert=FALSE, cex=legend2.cex, subset=my.subset2) + mtext(side=4,paste0(" ",var.unit[var.num]), cex=legend2.cex) + par(fig=c(legend2.xpos, legend2.xpos + legend2.width, 0.485, 0.508), new=TRUE) + ColorBar3(my.brks.var, cols=my.cols.var, vert=FALSE, cex=legend2.cex, subset=my.subset2) + mtext(side=4,paste0(" ",var.unit[var.num]), cex=legend2.cex) + par(fig=c(legend2.xpos, legend2.xpos + legend2.width, 0.250, 0.273), new=TRUE) + ColorBar3(my.brks.var, cols=my.cols.var, vert=FALSE, cex=legend2.cex, subset=my.subset2) + mtext(side=4,paste0(" ",var.unit[var.num]), cex=legend2.cex) + par(fig=c(legend2.xpos, legend2.xpos + legend2.width, 0.015, 0.038), new=TRUE) + ColorBar3(my.brks.var, cols=my.cols.var, vert=FALSE, cex=legend2.cex, subset=my.subset2) + mtext(side=4,paste0(" ",var.unit[var.num]), cex=legend2.cex) + } + + if(!as.pdf) dev.off() # for saving 4 png + +} # close for loop on 'p' + +if(as.pdf) dev.off() # for saving a single pdf with all seasons + + + + + + + +# somma <- pslPeriod[1,,1,3,,]; giorni<-list(); giorni[[1]] <- c(4:5, 17:28); giorni[[2]] <-27:28; giorni[[4]] <- 1:21; giorni[[6]]<- 7:28; giorni[[7]]<-1:3; giorni[[9]]<-26:28; giorni[[11]]<-c(16:21,27:28); giorni[[12]]<-c(22:28); giorni[[13]]<-c(4,5); for(year in c(1,2,4,6,7,9,11,12,13)) for(day in giorni[[year]]) somma <- somma + pslPeriod[1,,year,day,,] + + + diff --git a/old/weather_regimes_v27.R b/old/weather_regimes_v27.R new file mode 100644 index 0000000000000000000000000000000000000000..ae99315878a8c8a302113637e26d8487af6c873d --- /dev/null +++ b/old/weather_regimes_v27.R @@ -0,0 +1,958 @@ +library(s2dverification) # for the function Load() +library(abind) +library(Kendall) +library(reshape2) +library(TTR) +source('/home/Earth/ncortesi/Downloads/scripts/Rfunctions.R') # for the calendar functions +workdir <- "/scratch/Earth/ncortesi/RESILIENCE/Regimes" # working dir with the input files with the WT classifications for each MSLP grid point + +# available reanalysis for the geopotential (g500) or the geopotential height (z500 = g500/9.81) and for var data: +ERAint <- '/esnas/reconstructions/ecmwf/eraint/$STORE_FREQ$_mean/$VAR_NAME$_f6h/$VAR_NAME$_$YEAR$$MONTH$.nc' +ERAint.name <- "ERA-Interim" + +JRA55 <- '/esnas/reconstructions/jma/jra-55/$STORE_FREQ$_mean/$VAR_NAME$_f6h/$VAR_NAME$_$YEAR$$MONTH$.nc' +JRA55.name <- "JRA-55" + +rean <- ERAint #JRA55 #ERAint # choose one of the two above reanalysis from where to load the input psl data + +# available forecast systems for the psl and var data: +#ECMWF_S4 <- list(path = paste0('/esnas/exp/ECMWF/seasonal/0001/s004/m001/$STORE_FREQ$_mean/$VAR_NAME$_f6h/$VAR_NAME$_$START_DATE$.nc')) +#ECMWF_S4 <- '/esnas/exp/ECMWF/seasonal/0001/s004/m001/$STORE_FREQ$_mean/psl_f6h/psl_$START_DATE$.nc' +ECMWF_S4 <- '/esnas/exp/ecmwf/system4_m1/$STORE_FREQ$_mean/psl_f6h/psl_$START_DATE$.nc' +ECMWF_S4.name <- "ECMWF-S4" + +ECMWF_monthly <- '/esnas/exp/ECMWF/monthly/ensforhc/6hourly/$VAR_NAME$/2014$MONTH$$DAY$00/$VAR_NAME$_$START_DATE$00.nc' +ECMWF_monthly.name <- "ECMWF-Monthly" + +forecast <- ECMWF_S4 + +fields <- forecast #rean #forecast # put 'rean' to load pressure fields and var from reanalisis, put 'forecast' to load them from forecast systems + +psl <- "psl" #"g500" # pressure variable to use from the chosen reanalysis +psl.name <- "MSLP" # "Z500" # the name to show in the maps + +year.start <- 1982 #1981 #1994 #1979 #1981 +year.end <- 2013 #2013 #2010 + +member.name <- "ensemble" # name of the dimension with the ensemble members inside the netCDF files of S4 + +res <- 0.75 # set the resolution you want to interpolate the psl and var data when loading it (i.e: set to 0.75 to use the same res of ERA-Interim) + # interpolation method is BILINEAR. +PCA <- FALSE # set it to TRUE if you want to apply the PCA before the k-means cluster analysis, FALSE otherwise +variance.explained <- 0.8 # the user can set here the minimum % of variance explained by the PCs (typically 0.8 which is equal to 80%) + +lat.weighting=FALSE # set it to true to weight psl data on latitude before applying the cluster analysis +sequences=FALSE # set it to TRUE if you want to select only days beloning to sequences of at least 5 consecutive days belonging to the same regime. + +# Coordinates of the box enclosing the study region (the Northern Atlantic in this case): +lat.max <- 81 #81 #80 #70 +lat.min <- 27 #30 #20 #30 +lon.max <- 45 #36 #30 #40 +lon.min <- 274.5 #274.5 #270 #280 # put a positive number here because the geopotential has only positive values of longitud! + +# Only for Monthly forecasts: +#leadtime <- 1:7 #5:11 #1 # if fields=forecast, set the forecast time (in days) you want to compute the weather regimes. +#startdate.shift <- 1 # if leadtime=5:11, it's better to shift all startdates of the season 1 week in the past, to fit the exact season of obs.data + # if leadtime=12:18, it's better to shift all startdates of the season 2 weeks in the past, and so on. +#forecast.year <- year.end+1 # if fields=forecast, set the forecast year (it is usually the year after year.end) +#mes=1 # if fields=forecast, set the month of the first startdate of the forecast year +#day=2 # if fields=forecast, set the day of the first startdate of the forecast year + +############################################################################################################################# +#ECMWF_monthly <- list(path = paste0('/esnas/exp/ECMWF/monthly/ensforhc/6hourly/psl/2014010200/1994_010200.nc')) +#ECMWF_monthly <- list(path = paste0('/esnas/exp/ECMWF/monthly/ensforhc/6hourly/$VAR_NAME$/2014$MONTH$$DAY$00/$VAR_NAME$_$START_DATE$00.nc')) +rean.name <- unname(ifelse(rean == ERAint, ERAint.name, JRA55.name)) +forecast.name <- unname(ifelse(forecast == ECMWF_S4, ECMWF_S4.name, ECMWF_monthly.name)) +fields.name <- unname(ifelse(fields == rean, rean.name, forecast.name)) +n.years <- year.end - year.start + 1 + +var.data <- fields # dataset from which to load the input var data (reanalysis or forecasts) +var.data.name <- fields.name #ECMWF_monthly.name # ERAint.name + +if(lat.min >= lat.max) stop("lat.min cannot be greater than lat.max") + +days.period <- n.days.period <- period.length <- list() +for (pp in 1:17){ + days.period[[pp]] <- NA + for(y in year.start:year.end) days.period[[pp]] <- c(days.period[[pp]], n.days.in.a.future.year(year.start, y) + pos.period(y,pp)) + days.period[[pp]] <- days.period[[pp]][-1] # remove the NA at the begin that was introduced only to be able to execute the above command + # number of days belonging to that period from year.start to year.end: + n.days.period[[pp]] <- length(days.period[[pp]]) + # Number of days belonging to that period in a single year: + period.length[[pp]] <- n.days.in.a.period(pp,1999) #+ ifelse(period==13 | period==2, 1, 0) # using year 1999 introduce a small error in winter season length because of bisestile years +} + +# Create a regular lat/lon grid to interpolate the data: +n.lon.grid <- 360/res +n.lat.grid <- (180/res)+1 +my.grid <- paste0('r',n.lon.grid,'x',n.lat.grid) +#domain<-list() +#domain$lon <- seq(0,359.999999999,res) +#domain$lat <- c(rev(seq(0,90,res)),seq(res[1],-90,-res)) # Notice that the regular grid is always centered at lat=0 and lon=0! + +# load only 1 day of pressure data to detect the minimum and maximum lat and lon values which identify only the North Atlantic area: +if(fields.name == rean.name) domain <- Load(var = psl, exp = NULL, obs = list(list(path=fields)), '19990101', storefreq = 'daily', leadtimemax = 1, output = 'lonlat', nprocs=1) +# in the case of S4, we also interpolate it (notice that if the S4 grid has the same grid of the chosen grid (typically ERA-Interim), Load() leaves the S4 grid unchanged): +if(fields.name == ECMWF_S4.name) domain <- Load(var = "psl", exp = list(list(path=fields)), sdates=paste0(year.start,'0101'), storefreq = 'daily', dimnames=list(member=member.name), leadtimemax=1, nmember=1, output = 'lonlat') #, grid=my.grid, method='bilinear', nprocs=1) + +#if(fields.name == ECMWF_monthly.name) domain <- Load(var = psl, exp = NULL, obs = list(list(path=fields)), paste0(year.start,'0102'), storefreq = 'daily', leadtimemax = 1, output = 'lonlat', nprocs=1) + +n.lon <- length(domain$lon) +n.lat <- length(domain$lat) + +pos.lat.max <- which(lat.max - round(domain$lat,4) >= 0)[1] # select the first latitude of the domain below the maximum latitude +pos.lat.min <- tail(which(round(domain$lat,4) - lat.min >= 0),1) # select the last latitude of the domain over the minumum latitude +pos.lon.max <- tail(which(lon.max - round(domain$lon,4) >= 0),1) +pos.lon.min <- which(round(domain$lon,4) - lon.min >= 0)[1] + +pos.lat <- if(pos.lat.min < pos.lat.max) {pos.lat.min:pos.lat.max} else {pos.lat.max:pos.lat.min} +pos.lon <- c(1:pos.lon.max,pos.lon.min:length(domain$lon)) # beware that it only consider the case where the study area cross the meridian of Greenwhich! +n.pos.lat <- length(pos.lat) +n.pos.lon <- length(pos.lon) + +lat <- domain$lat[pos.lat] # lat values of chosen area only (it is smaller than the whole spatial domain loaded by the data) +lon <- domain$lon[pos.lon] # lon values of chosen area only + +#lat.min.area <- domain$lat[pos.lat.min] # min and max lat/lon of the chosen area only (same as lat.max, but its values correspond to actual values in the lat vector) +#lat.max.area <- domain$lat[pos.lat.max] +#lon.min.area <- domain$lon[pos.lon.min] +#lon.max.area <- domain$lon[pos.lon.max] + +#rm(domain) + +# Load psl data: +if(fields.name == rean.name){ # Load daily psl data in the reanalysis case: + psleuFull <-array(NA,c(n.days.in.a.yearly.period(year.start,year.end), n.pos.lat, n.pos.lon)) + + for (y in year.start:year.end){ + var <- Load(var = psl, exp = NULL, obs = list(list(path=fields)), paste0(y,'0101'), storefreq = 'daily', leadtimemax = n.days.in.a.year(y), output = 'lonlat', + latmin = lat.min, latmax = lat.max, lonmin = lon.min, lonmax = lon.max) + psleuFull[seq.days.in.a.future.year(year.start, y),,] <- var$obs + rm(var) + gc() + } +} + +if(fields.name == ECMWF_S4.name) # in this case, we can load only the data for 1 month at time (since each month needs ~3 GB of memory) + start.month <- 1 # start month (1=January, 12=December) + n.members <- 15 # number of members of the forecast system to use + + if(start.month <= 9) {start.month.char <- paste0("0",as.character(start.month))} else {start.month.char=start.month} + + psleuFull <- Load(var = psl, exp = list(list(path=fields)), obs = NULL, sdates=paste0(year.start:year.end, start.month.char,'01'), dimnames=list(member=member.name), nmember=n.members, leadtimemax=216, storefreq='daily', output = 'lonlat', latmin = lat.min, latmax = lat.max, lonmin = lon.min, lonmax = lon.max) #, grid=my.grid, method='bilinear') + + # immediatly convert psl in daily anomalies to remove the drift! + pslPeriodClim <- apply(psleuFull$mod, c(1,4,5,6), mean, na.rm=T) + pslPeriodClim2 <- InsertDim(InsertDim(pslPeriodClim, 2, n.years), 2, n.members) + rm(pslPeriodClim) + gc() + + psleuFull <- psleuFull$mod - pslPeriodClim2 + rm(pslPeriodClim2) + gc() + + # daily anomalies computed with the 10-days leadtime window AFTER each day: + pslPeriodClim10 <- array(NA, c(1,15,32,206,55,131)) + for(year in 1:32){ + for(lead in 1:206){ + pslPeriodClim10[1,,year,lead,,] <- (psleuFull$mod[1,,year,lead,,] + psleuFull$mod[1,,year,lead+1,,] + psleuFull$mod[1,,year,lead+2,,] + psleuFull$mod[1,,year,lead+3,,] + psleuFull$mod[1,,year,lead+5,,] + psleuFull$mod[1,,year,lead+6,,] + psleuFull$mod[1,,year,lead+7,,] + psleuFull$mod[1,,year,lead+8,,] + psleuFull$mod[1,,year,lead+9,,] + psleuFull$mod[1,,year,lead+10,,])/10 + } + } + pslPeriodClim10mean <- apply(pslPeriodClim10, c(1,4,5,6), mean, na.rm=T) + pslPeriodClim10mean2 <- InsertDim(InsertDim(pslPeriodClim10mean,2,32),2,15) + + psleuFull10 <- psleuFull$mod[1,,,1:206,,,drop=F] - pslPeriodClim10mean2 + #psleuFull10mean <- apply(psleuFull10, c(1,3,4,5,6),mean,na.rm=T) + psleuFull <- psleuFull10 + rm(pslPeriodClim2, psleuFull10, pslPeriodClim10, pslPeriodClim10mean2, psleuFull10mean, pslPeriodClim10mean) + gc() + +} + +if(fields.name == ECMWF_monthly.name){ + # Load 6-hourly psl data in the forecast case: + psleuFull <-array(NA, c(n.sdates*n.years, n.leadtimes, n.pos.lat, n.pos.lon)) # daily order: 19940102 19950102 ... 20130102 19940109 19950109 ... 20130109 ... + n.leadtimes <- length(leadtime) + sdates <- weekly.seq(forecast.year,mes,day) + n.sdates <- length(sdates) + + for (startdate in 1:n.sdates){ + for(lday in leadtime){ + var <- Load(var = psl, exp = NULL, obs = list(list(path=fields), paste0(year.start:year.end, substr(sdates[startdate],5,6), substr(sdates[startdate],7,8) ), storefreq = 'daily', leadtimemin = lday*4-3, leadtimemax = lday*4, output = 'lonlat', latmin = lat.min, latmax = lat.max, lonmin = lon.min, lonmax = lon.max) + + mean.lday <- apply(var$obs, c(3,5,6), mean, na.rm=T) + rm(var) + gc() + + psleuFull[(1+(startdate-1)*n.years):(startdate*n.years), lday-leadtime[1]+1,,] <- mean.lday + rm(mean.lday) + gc() + } + } +} + +if(psl=="g500") psleuFull <- psleuFull/9.81 # convert geopotential to geopotential height (in m) +if(psl=="psl") psleuFull <- psleuFull/100 # convert MSLP in Pascal to MSLP in hPa +gc() + +#save.image(file=paste0(workdir,"/weather_regimes_",fields.name,"_",year.start,"-",year.end,".RData")) +#load(file=paste0(workdir,"/weather_regimes_",fields.name,"_",year.start,"-",year.end,".RData")) + +# compute the cluster analysis: +my.PCA <- list() +my.cluster <- list() +my.cluster.array <- list() +tot.variance <- list() +n.pcs <- my.cluster <- c() + +WR.period <- 1 #1:12 #13:16 # 1-12: Jan-Dec; 13-16: Winter-Spring-Summer-Autumn +p=1 + +for(p in WR.period){ + # Select only the data of the month/season we want: + if(fields.name == rean.name){ pslPeriod <- psleuFull[days.period[[p]],,] # select only days in the chosen period (i.e: winter) + + # in this case of S4 we don't need to select anything because we already loaded only 1 month of data! + + if(fields.name == ECMWF_monthly.name){ + my.startdates.period <- months.period(forecast.year,mes,day,p) # get which startdates belong to the chosen period + + days.period <- list() # get which days inside psleuFull belong to the chosen period + for(startdate in my.startdates.period){ + days.period[[p]] <- c(days.period[[p]], (1+(startdate-1)*n.years):(startdate*n.years)) + } + + # in winter you must remove the 2nd startdate (20140109) because its data is bad, as you can see with: plot(psl.kmeans[,1:2]) + # if(fields.name == forecast.name && period == 13) psl.kmeans[21:40,] <- NA + } + + # weight the pressure fields based on latitude (apply it to anomalies, not to absolute values!): + if(lat.weighting){ + lat.weighted <- cos(lat*pi/180)^0.5 + lat.weighted.array <- InsertDim(InsertDim(lat.weighted,2,n.pos.lon), 1, n.days.period[[p]]) + psl.kmeans <- pslPeriod * lat.weighted.array + rm(lat.weighted.array) + gc() + } + + # select period data from psleuFull to use it as input of the cluster analysis: + if(fields.name == rean.name){ + psl.kmeans <- pslPeriod + dim(psl.kmeans) <- c(n.days.period[[p]], n.pos.lat*n.pos.lon) # convert array in a matrix! + rm(pslPeriod, pslPeriod.weighted) + } + + if(fields.name == ECMWF_S4.name){ + lead.month=1 + + chosen.month <- start.month + lead.month # find which month we want to load data + if(chosen.month > 12) chosen.month <- chosen.month - 12 + + # select only the data of one lead month: + + for(y in year.start:year.end){ + leadtime.min <- 1 + n.days.in.a.monthly.period(start.month, chosen.month, y) - n.days.in.a.month(chosen.month, y) + leadtime.max <- leadtime.min + n.days.in.a.month(chosen.month, y) - 1 + if (n.days.in.a.month(chosen.month, y) == 29) leadtime.max <- leadtime.max - 1 # remove the 29th of February to have the same n. of elements for all years + int.leadtimes <- leadtime.min:leadtime.max + n.leadtimes <- leadtime.max - leadtime.min + 1 + + if(y == year.start) { + pslPeriod <- psleuFull[,,1,int.leadtimes,,,drop=FALSE] + } else { + pslPeriod <- unname(abind(pslPeriod, psleuFull[,,y-year.start+1,int.leadtimes,,,drop=FALSE], along=3)) + } + } + + # note that the data order in psl.kmeans[,X] is: year1day1, year1day2, ... , year1day31, year2day1, year2day2, ... , year2day28 + psl.melted <- melt(pslPeriod[1,,,,,, drop=FALSE], varnames=c("Exp","Member","StartYear","LeadDay","Lat","Lon")) + psl.kmeans <- unname(acast(psl.melted, Member + StartYear + LeadDay ~ Lat + Lon)) + + #rm(pslPeriod) + gc() + } + + # compute the PCs or not: + if(PCA){ + my.seq <- seq(1, n.pos.lat*n.pos.lon, 9) # select only 1 point of 9 + pslcut <- psl.kmeans[,my.seq] + + my.PCA[[p]] <- princomp(pslcut,cor=FALSE) + tot.variance[[p]] <- head(cumsum(my.PCA[[p]]$sdev^2/sum(my.PCA[[p]]$sdev^2)),50) # check how many PCAs to keep basing on the sum of their expl. variance + n.pcs[p] <- head(as.numeric(which(tot.variance[[p]] > variance.explained)),1) # select only the pcs that explains at least 80% of variance + my.cluster[[p]] <- kmeans(my.PCA[[p]]$scores[,1:n.pcs[p]], centers=4, iter.max=100, nstart=30) # 4 is the number of clusters + rm(pslcut) + } else { # 4 is the number of clusters: + if(fields.name == rean.name) my.cluster[[p]] <- kmeans(psl.kmeans[which(!is.na(psl.kmeans[,1])),], centers=4, iter.max=100, nstart=30) + if(fields.name == ECMWF_S4.name) { + my.cluster[[p]] <- kmeans(psl.kmeans, centers=4, iter.max=100, nstart=30, trace=TRUE) + my.cluster.array[[p]] <- array(my.cluster[[p]]$cluster, c(n.leadtimes, n.years, n.members)) # in reverse order compared to the definition of psl.kmeans + + #plot(SMA(my.cluster[[p]]$centers[1,],201),type="l",col="gold",ylim=c(-20,20));lines(SMA(my.cluster[[p]]$centers[2,],201),type="l",col="red"); lines(SMA(my.cluster[[p]]$centers[3,],201),type="l",col="green");lines(SMA(my.cluster[[p]]$centers[4,],201),type="l",col="blue");lines(SMA(psl.kmeans[29,],201),type="l",col="gray");lines(rep(0,14410),type="l",col="black",lty=2) + } + } + + rm(psl.kmeans) + gc() +} + +#save.image(file=paste0(workdir,"/weather_regimes_",fields.name,"_",year.start,"-",year.end,".RData")) +#load(file=paste0(workdir,"/weather_regimes_",fields.name,"_",year.start,"-",year.end,".RData")) + +var.num <- 1 # Choose a variable. 1: sfcWind 2: tas + +var.name <- c("sfcWind","tas") # name of the 'predictand' variable of the chosen reanalysis +var.name.full <- c("Wind Speed","Temperature") # full name of the 'predictand' variable to put in the title of the graphs +var.unit <- c("m/s", "ºC") # unit of measure of var (for drawing color scales) + +# Load var data: +if(fields.name == rean.name){ # Load daily var data in the reanalysis case: + vareuFull <-array(NA,c(n.days.in.a.yearly.period(year.start,year.end), n.pos.lat, n.pos.lon)) + + for (y in year.start:year.end){ + var <- Load(var = var.name[var.num], exp = NULL, obs = list(var.data), paste0(y,'0101'), storefreq = 'daily', leadtimemax = n.days.in.a.year(y), output = 'lonlat', + latmin = lat.min, latmax = lat.max, lonmin = lon.min, lonmax = lon.max) + vareuFull[seq.days.in.a.future.year(year.start, y),,] <- var$obs + rm(var) + gc() + } + +# in the S4 case, we can load only the data for 1 month at time (since each month needs ~3 GB of memory) +# but at present we ONLY use psl data!!! +#if(fields.name == ECMWF_S4.name) { +# vareuFull <- Load(var = var.name[var.num], exp = list(fields), obs = NULL, sdates=paste0(year.start:year.end, start.month.char,'01'), storefreq = 'daily', dimnames=list(member="number"), nmember=n.members, leadtimemin=leadtime.min, leadtimemax=leadtime.max, output = 'lonlat', latmin = lat.min, latmax = lat.max, lonmin = lon.min, lonmax = lon.max) +#} + +if(fields.name == ECMWF_monthly.name){ # Load 6-hourly var data in the monthly forecast case: + sdates <- weekly.seq(forecast.year,mes,day) + vareuFull <-array(NA,c(length(sdates)*(year.end-year.start+1), n.pos.lat, n.pos.lon)) + + for (startdate in 1:length(sdates)){ + var <- Load(var = var.name[var.num], exp = NULL, obs = list(var.data), paste0(year.start:year.end, substr(sdates[startdate],5,6), substr(sdates[startdate],7,8) ), storefreq = 'daily', leadtimemin=leadtime*4-3, leadtimemax=leadtime*4, output = 'lonlat', latmin = lat.min, latmax = lat.max, lonmin = lon.min, lonmax = lon.max) + temp <- apply(var$obs, c(1,3,5,6), mean, na.rm=T) + vareuFull[(1+(startdate-1)*(year.end-year.start+1)):(startdate*(year.end-year.start+1)),,] <- temp + rm(temp,var) + gc() + } + +} + +#my.grid<-paste0('r',n.lon,'x',n.lat) +#coord <- Load(var = 'sfcWind', exp = list(ECMWF_monthly), obs = NULL, sdates=paste0(2013,'0102'), leadtimemin = 1, leadtimemax=1, output = 'lonlat', nprocs=1, grid=my.grid, method='bilinear') + +save.image(file=paste0(workdir,"/weather_regimes_",fields.name,"_",var.name[var.num],"_",year.start,"-",year.end,".RData")) +load(file=paste0(workdir,"/weather_regimes_",fields.name,"_",var.name[var.num],"_",year.start,"-",year.end,".RData")) + +## # if you want to study WRs at monhly level but using their seasonal time series, you have to copy the WRs time series to the months from 1 to 12: +## if(WR.period <- 1:12){ +## my.cluster[[1]] <- my.cluster[[2]] <- my.cluster[[3]] <- my.cluster[[4]] <- my.cluster[[5]] <- my.cluster[[6]] <- list() +## my.cluster[[7]] <- my.cluster[[8]] <- my.cluster[[9]] <- my.cluster[[10]] <- my.cluster[[11]] <- my.cluster[[12]] <- list() + +## ss <- match(days.period[[13]],days.period[[1]]); ss[which(!is.na(ss))] <- 1; qq <- ss*my.cluster[[13]]$cluster; my.cluster[[1]]$cluster <- qq[!is.na(qq)] +## ss <- match(days.period[[13]],days.period[[2]]); ss[which(!is.na(ss))] <- 1; qq <- ss*my.cluster[[13]]$cluster; my.cluster[[2]]$cluster <- qq[!is.na(qq)] +## ss <- match(days.period[[13]],days.period[[12]]); ss[which(!is.na(ss))] <- 1; qq <- ss*my.cluster[[13]]$cluster; my.cluster[[12]]$cluster <- qq[!is.na(qq)] +## ss <- match(days.period[[14]],days.period[[3]]); ss[which(!is.na(ss))] <- 1; qq <- ss*my.cluster[[14]]$cluster; my.cluster[[3]]$cluster <- qq[!is.na(qq)] +## ss <- match(days.period[[14]],days.period[[4]]); ss[which(!is.na(ss))] <- 1; qq <- ss*my.cluster[[14]]$cluster; my.cluster[[4]]$cluster <- qq[!is.na(qq)] +## ss <- match(days.period[[14]],days.period[[5]]); ss[which(!is.na(ss))] <- 1; qq <- ss*my.cluster[[14]]$cluster; my.cluster[[5]]$cluster <- qq[!is.na(qq)] +## ss <- match(days.period[[15]],days.period[[6]]); ss[which(!is.na(ss))] <- 1; qq <- ss*my.cluster[[15]]$cluster; my.cluster[[6]]$cluster <- qq[!is.na(qq)] +## ss <- match(days.period[[15]],days.period[[7]]); ss[which(!is.na(ss))] <- 1; qq <- ss*my.cluster[[15]]$cluster; my.cluster[[7]]$cluster <- qq[!is.na(qq)] +## ss <- match(days.period[[15]],days.period[[8]]); ss[which(!is.na(ss))] <- 1; qq <- ss*my.cluster[[15]]$cluster; my.cluster[[8]]$cluster <- qq[!is.na(qq)] +## ss <- match(days.period[[16]],days.period[[9]]); ss[which(!is.na(ss))] <- 1; qq <- ss*my.cluster[[16]]$cluster; my.cluster[[9]]$cluster <- qq[!is.na(qq)] +## ss <- match(days.period[[16]],days.period[[10]]); ss[which(!is.na(ss))] <- 1; qq <- ss*my.cluster[[16]]$cluster; my.cluster[[10]]$cluster <- qq[!is.na(qq)] +## ss <- match(days.period[[16]],days.period[[11]]); ss[which(!is.na(ss))] <- 1; qq <- ss*my.cluster[[16]]$cluster; my.cluster[[11]]$cluster <- qq[!is.na(qq)] +## } + +for(p in WR.period){ # 1-12: Jan-Dec; 13-16: Winter-Spring-Summer-Autumn; 17: Year + + if(sequences){ # it works only for rean data!!! + # for each of the 4 clusters, remove the days not belonging to sequences of at least 5 days: + # warning: first 4 days and last 4 days are not take into account y the algorithm and are treated as not belonging to any sequence (sequ=FALSE) + wrdiff <- diff(my.cluster[[p]]$cluster) + + sequ <- rep(FALSE, n.days.period[[p]]) + for(i in 5:(n.days.period[[p]]-5)){ + sequ[i] <- TRUE + if(wrdiff[i] != 0 & wrdiff[i+1] != 0) sequ[i] = FALSE + if(wrdiff[i] != 0 & wrdiff[i+2] != 0) sequ[i] = FALSE + if(wrdiff[i] != 0 & wrdiff[i+3] != 0) sequ[i] = FALSE + if(wrdiff[i] != 0 & wrdiff[i+4] != 0) sequ[i] = FALSE + if(wrdiff[i-1] != 0 & wrdiff[i] != 0) sequ[i] = FALSE + if(wrdiff[i-1] != 0 & wrdiff[i+1] != 0) sequ[i] = FALSE + if(wrdiff[i-1] != 0 & wrdiff[i+2] != 0) sequ[i] = FALSE + if(wrdiff[i-1] != 0 & wrdiff[i+3] != 0) sequ[i] = FALSE + if(wrdiff[i-2] != 0 & wrdiff[i] != 0) sequ[i] = FALSE + if(wrdiff[i-2] != 0 & wrdiff[i+1] != 0) sequ[i] = FALSE + if(wrdiff[i-2] != 0 & wrdiff[i+2] != 0) sequ[i] = FALSE + if(wrdiff[i-3] != 0 & wrdiff[i] != 0) sequ[i] = FALSE + if(wrdiff[i-3] != 0 & wrdiff[i+1] != 0) sequ[i] = FALSE + if(wrdiff[i-4] != 0 & wrdiff[i] != 0) sequ[i] = FALSE + } # close for loop on i + + # sequence of daily cluster numbers without the days that doesn't belong to sequences of 5 or more days: + cluster.sequence <- my.cluster[[p]]$cluster * sequ + rm(wrdiff, sequ) + } + + gc() + + # yearly frequency of each regime: + if(fields.name == rean.name){ + cluster.sequence <- my.cluster[[p]]$cluster + + # Replace the days belonging to a regime with the days belonging to a sequence of at least 5 days of that regime: + wr1 <- which(cluster.sequence == 1) + wr2 <- which(cluster.sequence == 2) + wr3 <- which(cluster.sequence == 3) + wr4 <- which(cluster.sequence == 4) + + wr1y <- wr2y <- wr3y <-wr4y <- c() + for(y in year.start:year.end){ + # for both reanalysis and S4, the time order is always: year1day1, year1day2, ..., year1day31, year2day1, year2day2, ..., year2day28, etc, + wr1y[y-year.start+1] <- length(which(cluster.sequence[(y-year.start)*period.length[[p]]+(1:period.length[[p]])] == 1)) + wr2y[y-year.start+1] <- length(which(cluster.sequence[(y-year.start)*period.length[[p]]+(1:period.length[[p]])] == 2)) + wr3y[y-year.start+1] <- length(which(cluster.sequence[(y-year.start)*period.length[[p]]+(1:period.length[[p]])] == 3)) + wr4y[y-year.start+1] <- length(which(cluster.sequence[(y-year.start)*period.length[[p]]+(1:period.length[[p]])] == 4)) + } + + # convert to frequencies in %: + wr1y <- wr1y/period.length[[p]] + wr2y <- wr2y/period.length[[p]] + wr3y <- wr3y/period.length[[p]] + wr4y <- wr4y/period.length[[p]] + + pslPeriod <- psleuFull[days.period[[p]],,] + pslPeriodClim <- apply(pslPeriod, c(2,3), mean, na.rm=T) + pslPeriodClim2 <- InsertDim(pslPeriodClim, 1, n.days.period[[p]]) + pslPeriodAnom <- pslPeriod - pslPeriodClim2 + + rm(pslPeriod, pslPeriodClim, pslPeriodClim2) + gc() + + pslwr1 <- pslPeriodAnom[wr1,,] + pslwr2 <- pslPeriodAnom[wr2,,] + pslwr3 <- pslPeriodAnom[wr3,,] + pslwr4 <- pslPeriodAnom[wr4,,] + + rm(pslPeriodAnom) + gc() + + pslwr1mean <- apply(pslwr1,c(2,3),mean,na.rm=T) + pslwr2mean <- apply(pslwr2,c(2,3),mean,na.rm=T) + pslwr3mean <- apply(pslwr3,c(2,3),mean,na.rm=T) + pslwr4mean <- apply(pslwr4,c(2,3),mean,na.rm=T) + rm(pslwr1,pslwr2,pslwr3,pslwr4) + + } + + + if(fields.name == ECMWF_S4.name){ + cluster.sequence <- as.vector(my.cluster.array[[p]]) # cannot use my.cluster$cluster directly, because it includes bisestile days! + + wr1 <- which(cluster.sequence == 1) + wr2 <- which(cluster.sequence == 2) + wr3 <- which(cluster.sequence == 3) + wr4 <- which(cluster.sequence == 4) + + wr1y <- colSums(my.cluster.array[[p]] == 1) + wr2y <- colSums(my.cluster.array[[p]] == 2) + wr3y <- colSums(my.cluster.array[[p]] == 3) + wr4y <- colSums(my.cluster.array[[p]] == 4) + + # convert to frequencies in %: + wr1y <- wr1y/n.leadtimes + wr2y <- wr2y/n.leadtimes + wr3y <- wr3y/n.leadtimes + wr4y <- wr4y/n.leadtimes + + # in this case, must use pslPeriod instead of psleuFull. Both are already in anomalies: + pslmat <- unname(acast(psl.melted, Member ~ StartYear + LeadDay ~ Lat ~ Lon)) + #pslmat <- unname(acast(melt(pslPeriod[1,1:2,,,,, drop=FALSE],varnames=c("Exp","Member","StartYear","LeadDay","Lat","Lon")), Member ~ StartYear + LeadDay ~ Lat ~ Lon)) + + pslwr1 <- pslmat[,wr1,,, drop=F] + pslwr2 <- pslmat[,wr2,,, drop=F] + pslwr3 <- pslmat[,wr3,,, drop=F] + pslwr4 <- pslmat[,wr4,,, drop=F] + + pslwr1meanPartial <- apply(pslwr1, c(1,3,4), mean, na.rm=T) + pslwr2meanPartial <- apply(pslwr2, c(1,3,4), mean, na.rm=T) + pslwr3meanPartial <- apply(pslwr3, c(1,3,4), mean, na.rm=T) + pslwr4meanPartial <- apply(pslwr4, c(1,3,4), mean, na.rm=T) + + PlotEquiMap(rescale(pslwr1meanPartial[1,,],my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2) + PlotEquiMap(rescale(pslwr1meanPartial[2,,],my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2) + + n=0 + for(sy in 1:32) { + for(ld in 1:28){ + if(my.cluster.array[[p]][ld,sy] == 1){ + n=n+1 + if(n == 1) {temp <- pslPeriod[1,2,sy,ld,,]} + temp <- temp + pslPeriod[1,2,sy,ld,,] + } + } + } + PlotEquiMap(rescale(temp/n,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, drawleg=F) + + + pslwr1mean <- apply(pslwr1, c(3,4), mean, na.rm=T) + pslwr2mean <- apply(pslwr2, c(3,4), mean, na.rm=T) + pslwr3mean <- apply(pslwr3, c(3,4), mean, na.rm=T) + pslwr4mean <- apply(pslwr4, c(3,4), mean, na.rm=T) + + rm(pslwr1,pslwr2,pslwr3,pslwr4) + rm(pslmat) + gc() + + #pslwr1meanPartial <- apply(pslmat[,wr1,,,drop=F], c(1,3,4), mean, na.rm=T) + #pslwr2meanPartial <- apply(pslmat[,wr2,,,drop=F], c(1,3,4), mean, na.rm=T) + #pslwr3meanPartial <- apply(pslmat[,wr3,,,drop=F], c(1,3,4), mean, na.rm=T) + #pslwr4meanPartial <- apply(pslmat[,wr4,,,drop=F], c(1,3,4), mean, na.rm=T) + + } + + # for the monthly system, the time order is always: year1day1, year2day1, ... , yearNday1, year1day2, year2day2, ..., yearNday2, etc.: + if(fields.name == ECMWF_monthly.name) { + if(fields.name == ECMWF_monthly.name){ + my.startdates.period <- months.period(forecast.year,mes,day,p) + + #if(p == 13) my.startdates.period <- my.startdates.period[-2] # remove the second startdate because it is broken + + days.period <- period.length <- list() + for(startdate in my.startdates.period){ + days.period[[p]] <- c(days.period[[p]], (1+(startdate-1)*(year.end-year.start+1)):(startdate*(year.end-year.start+1))) + } + period.length[[p]] <- length(my.startdates.period) # number of Thusdays in the period + } + + wr1y <- wr2y <- wr3y <-wr4y <- c() + wr1y[y-year.start+1] <- length(which(cluster.sequence[seq((y-year.start+1),length(cluster.sequence), n.years)] == 1)) + wr2y[y-year.start+1] <- length(which(cluster.sequence[seq((y-year.start+1),length(cluster.sequence), n.years)] == 2)) + wr3y[y-year.start+1] <- length(which(cluster.sequence[seq((y-year.start+1),length(cluster.sequence), n.years)] == 3)) + wr4y[y-year.start+1] <- length(which(cluster.sequence[seq((y-year.start+1),length(cluster.sequence), n.years)] == 4)) + } + + + # in the reanalysis case, we also measure the impact maps: + if(fields.name == rean.name){ + # similar selection of above, but for var instead of psl: + varPeriod <- vareuFull[days.period[[p]],,] # select only var data during the chosen period + + varPeriodClim <- apply(varPeriod, c(2,3), mean, na.rm=T) + varPeriodClim2 <- InsertDim(varPeriodClim, 1, n.days.period[[p]]) + varPeriodAnom <- varPeriod - varPeriodClim2 + rm(varPeriod, varPeriodClim, varPeriodClim2) + gc() + + #PlotEquiMap2(varPeriodClim[,EU], lon[EU], lat, filled.continents = FALSE, cols=my.cols.var, intxlon=10, intylat=10, cex.lab=1.5) # plot the climatology of the period + varPeriodAnom1 <- varPeriodAnom[wr1,,] + varPeriodAnom2 <- varPeriodAnom[wr2,,] + varPeriodAnom3 <- varPeriodAnom[wr3,,] + varPeriodAnom4 <- varPeriodAnom[wr4,,] + + varPeriodAnom1mean <- apply(varPeriodAnom1,c(2,3),mean,na.rm=T) + varPeriodAnom2mean <- apply(varPeriodAnom2,c(2,3),mean,na.rm=T) + varPeriodAnom3mean <- apply(varPeriodAnom3,c(2,3),mean,na.rm=T) + varPeriodAnom4mean <- apply(varPeriodAnom4,c(2,3),mean,na.rm=T) + + varPeriodAnomBoth1 <- abind(varPeriodAnom, varPeriodAnom1, along = 1) + pvalue1 <- apply(varPeriodAnomBoth1, c(2,3), function(x) t.test(x[1:n.days.period[[p]]],x[(n.days.period[[p]]+1):length(x)])$p.value) + rm(varPeriodAnomBoth1, varPeriodAnom1) + gc() + + varPeriodAnomBoth2 <- abind(varPeriodAnom, varPeriodAnom2, along = 1) + pvalue2 <- apply(varPeriodAnomBoth2, c(2,3), function(x) t.test(x[1:n.days.period[[p]]],x[(n.days.period[[p]]+1):length(x)])$p.value) + rm(varPeriodAnomBoth2, varPeriodAnom2) + gc() + + varPeriodAnomBoth3 <- abind(varPeriodAnom, varPeriodAnom3, along = 1) + pvalue3 <- apply(varPeriodAnomBoth3, c(2,3), function(x) t.test(x[1:n.days.period[[p]]],x[(n.days.period[[p]]+1):length(x)])$p.value) + rm(varPeriodAnomBoth3, varPeriodAnom3) + gc() + + varPeriodAnomBoth4 <- abind(varPeriodAnom, varPeriodAnom4, along = 1) + pvalue4 <- apply(varPeriodAnomBoth4, c(2,3), function(x) t.test(x[1:n.days.period[[p]]],x[(n.days.period[[p]]+1):length(x)])$p.value) + rm(varPeriodAnomBoth4, varPeriodAnom4) + + rm(varPeriodAnom) + gc() + + # save all the data necessary to redraw the graphs when we know the right regime: + save(workdir,rean.name,fields.name,var.num,var.name,var.name.full,var.unit,year.start,year.end,p,WR.period,lon,lat,pslwr1mean,pslwr2mean,pslwr3mean,pslwr4mean, varPeriodAnom1mean, varPeriodAnom2mean, varPeriodAnom3mean,varPeriodAnom4mean,wr1y,wr2y,wr3y,wr4y,pvalue1,pvalue2,pvalue3,pvalue4,cluster.sequence,file=paste0(workdir,"/",fields.name,"_",var.name[var.num],"_",my.period[p],"_mapdata.RData")) + + rm(pvalue1,pvalue2,pvalue3,pvalue4) + rm(pslwr1mean,pslwr2mean,pslwr3mean,pslwr4mean) + rm(varPeriodAnom1mean,varPeriodAnom2mean,varPeriodAnom3mean,varPeriodAnom4mean) + gc() + } + + if(fields.name == ECMWF_S4.name){ # save all the data necessary to draw the graphs (but not the impact maps) + save(workdir,rean.name,fields.name,var.num,var.name,var.name.full,var.unit,year.start,year.end,p,WR.period,lon,lat,pslwr1mean,pslwr2mean,pslwr3mean,pslwr4mean,wr1y,wr2y,wr3y,wr4y,n.leadtimes,cluster.sequence,file=paste0(workdir,"/",fields.name,"_",var.name[var.num],"_",my.period[p],"_mapdata.RData")) + rm(pslwr1mean,pslwr2mean,pslwr3mean,pslwr4mean) + gc() + } +} # close the for loop on 'p' + + +# run it twice: once, with ordering <- FALSE to look only at the cartography and find visually the right regime names of the four clusters; +# and the second time, to save the ordered cartography: +ordering <- F # If TRUE, order the clusters following the regimes listed in the 'orden' vector (after you manually insert the names). +as.pdf <- T # FALSE: save results as one .png file for each season, TRUE: save only 1 .pdf file with all seasons +save.names <- TRUE # if TRUE, also save the cluster names (i.e: the association between clusters and weather regimes); if FALSE, the cluster names are loaded from the file + + +# position of long values of Europe only (without the Atlantic Sea and America): +#if(fields.name == "ERA-Interim") EU <- c(1:which(lon >= lon.max)[1], (length(lon)-30):length(lon)) # restrict area to continental europe only +EU <- c(1:which(lon >= lon.max)[1], (which(lon >= 337)[1]:length(lon))) # restrict area to continental europe only + +# choose an order to give to the cartography, from top to bottom: +orden <- c("NAO+","NAO-","Blocking","Atl.Ridge") + +regime1.name <- orden[1] +regime2.name <- orden[2] +regime3.name <- orden[3] +regime4.name <- orden[4] + +if(save.names){cluster1.name.period <- cluster2.name.period <- cluster3.name.period <- cluster4.name.period <- c()} + +# breaks and colors of the geopotential fields: +if(psl == "g500"){ + #my.brks <- c(-300,-199:200,300) # % Mean anomaly of a WR + my.brks <- c(-300,seq(-200,200,10),300) # % Mean anomaly of a WR + my.brks2 <- c(-300,seq(-200,200,10),300) # % Mean anomaly of a WR for the contour lines + #my.cols <- colorRampPalette((brewer.pal(9,"PuBu")))(length(my.brks)-1) + values.to.plot <- c(-200, -100, 0, 100, 200) # values we want to show in the labels +} + +if(psl == "psl"){ + my.brks <- c(seq(-19,-1,2),0,seq(1,19,2)) # % Mean anomaly of a WR + # my.brks <- c(seq(-19,-1,2),0,seq(1,19,2)) # % Mean anomaly of a WR + + my.brks2 <- my.brks #c(seq(-20,20,2)) # % Mean anomaly of a WR for the contour lines + values.to.plot <- my.brks #c(-17,-15,-13,-11,-9,-7,-5,-3,-1,0,1,3,5,7,9,11,13,15,17) +} + +my.cols <- colorRampPalette(rev(brewer.pal(11,"RdBu")))(length(my.brks)-1) # blue--white--red colors +my.cols[floor(length(my.cols)/2)] <- my.cols[floor(length(my.cols)/2)+1] <- "white" + +# breaks and colors of the impact maps (valid both for Wind speed anomalies and Temperature anomalies): +#my.brks.var <- c(-20,seq(-10,-3,1),seq(-2.9,2.9,0.1),seq(3,10,1),20) # % Mean anomaly of a WR +#my.brks.var <- c(-20,-2,-1.5,-1,-0.5,-0.2,0.2,0.5,1,1.5,2,20) # % Mean anomaly of a WR +#my.brks.var <- c(-20,seq(-3,3,0.5),20) # % Mean anomaly of a WR +if(var.name[var.num] == "sfcWind") { + my.brks.var <- seq(-3,3,0.5) + values.to.plot2 <- c(-3,-2,-1,0,1,2,3) +} +if(var.name[var.num] == "tas") { + my.brks.var <- seq(-5,5,1) + values.to.plot2 <- my.brks.var #c(-5,-3,-1,0,1,3,5) +} + +#my.cols <- colorRampPalette(as.character(read.csv("/home/Earth/vtorralb/rgbhex.csv",header=F)[,1]))(length(my.brks)-1) # blue--yellow-red colors +my.cols.var <- colorRampPalette(rev(brewer.pal(11,"RdBu")))(length(my.brks.var)-1) # blue--white--red colors +#my.cols.var[floor(length(my.cols.var)/2)] <- my.cols.var[floor(length(my.cols.var)/2)+1] <- "white" + +if(as.pdf)(pdf(file=paste0(workdir,"/",fields.name,"_",var.name[var.num],"_",my.period[p],".pdf"),width=40, height=60)) + +for(p in WR.period){ + load(file=paste0(workdir,"/",fields.name,"_",var.name[var.num],"_",my.period[p],"_mapdata.RData")) + + if(save.names){ + if(p == 1){ # January + cluster2.name="NAO+" + cluster1.name="NAO-" + cluster3.name="Blocking" + cluster4.name="Atl.Ridge" + } + if(p == 2){ # February + cluster3.name="NAO+" + cluster2.name="NAO-" + cluster4.name="Blocking" + cluster1.name="Atl.Ridge" + } + if(p == 3){ # March + cluster3.name="NAO+" + cluster2.name="NAO-" + cluster4.name="Blocking" + cluster1.name="Atl.Ridge" + } + if(p == 4){ # April + cluster2.name="NAO+" + cluster1.name="NAO-" + cluster3.name="Blocking" + cluster4.name="Atl.Ridge" + } + if(p == 5){ # May + cluster4.name="NAO+" + cluster2.name="NAO-" + cluster3.name="Blocking" + cluster1.name="Atl.Ridge" + } + if(p == 6){ # June + cluster4.name="NAO+" + cluster1.name="NAO-" + cluster2.name="Blocking" + cluster3.name="Atl.Ridge" + } + if(p == 7){ # July + cluster4.name="NAO+" + cluster2.name="NAO-" + cluster1.name="Blocking" + cluster3.name="Atl.Ridge" + } + if(p == 8){ # August + cluster2.name="NAO+" + cluster4.name="NAO-" + cluster3.name="Blocking" + cluster1.name="Atl.Ridge" + } + if(p == 9){ # September + cluster4.name="NAO+" + cluster3.name="NAO-" + cluster1.name="Blocking" + cluster2.name="Atl.Ridge" + } + if(p == 10){ # October + cluster1.name="NAO+" + cluster4.name="NAO-" + cluster2.name="Blocking" + cluster3.name="Atl.Ridge" + } + if(p == 11){ # November + cluster2.name="NAO+" + cluster3.name="NAO-" + cluster1.name="Blocking" + cluster4.name="Atl.Ridge" + } + if(p == 12){ # December + cluster2.name="NAO+" + cluster4.name="NAO-" + cluster1.name="Blocking" + cluster3.name="Atl.Ridge" + } + if(p == 13){ # Winter + cluster3.name="NAO+" + cluster4.name="NAO-" + cluster1.name="Blocking" + cluster2.name="Atl.Ridge" + } + if(p == 14){ # Spring + cluster1.name="NAO+" + cluster3.name="NAO-" + cluster2.name="Blocking" + cluster4.name="Atl.Ridge" + } + if(p == 15){ # Summer + cluster2.name="NAO+" + cluster3.name="NAO-" + cluster4.name="Blocking" + cluster1.name="Atl.Ridge" + } + if(p == 16){ # Autumn + cluster4.name="NAO+" + cluster1.name="NAO-" + cluster2.name="Blocking" + cluster3.name="Atl.Ridge" + } + + cluster1.name.period[p] <- cluster1.name + cluster2.name.period[p] <- cluster2.name + cluster3.name.period[p] <- cluster3.name + cluster4.name.period[p] <- cluster4.name + + save(cluster1.name.period, cluster2.name.period, cluster3.name.period, cluster4.name.period, file=paste0(workdir,"/",fields.name,"_ClusterNames.RData")) + + } else { # in this case, we load the cluster names from the file already saved: + load(paste0(workdir,"/",fields.name,"_ClusterNames.RData")) + cluster1.name <- cluster1.name.period[p] + cluster2.name <- cluster2.name.period[p] + cluster3.name <- cluster3.name.period[p] + cluster4.name <- cluster4.name.period[p] + } + + # assign to each cluster its regime: + if(ordering == FALSE){ + cluster1 <- 1; cluster2 <- 2; cluster3 <- 3; cluster4 <- 4 + } else { + cluster1 <- which(orden == cluster1.name) + cluster2 <- which(orden == cluster2.name) + cluster3 <- which(orden == cluster3.name) + cluster4 <- which(orden == cluster4.name) + } + + assign(paste0("map",cluster1), pslwr1mean) + assign(paste0("map",cluster2), pslwr2mean) + assign(paste0("map",cluster3), pslwr3mean) + assign(paste0("map",cluster4), pslwr4mean) + + assign(paste0("fre",cluster1), wr1y) + assign(paste0("fre",cluster2), wr2y) + assign(paste0("fre",cluster3), wr3y) + assign(paste0("fre",cluster4), wr4y) + + if(fields.name == rean.name){ + assign(paste0("imp",cluster1), varPeriodAnom1mean) + assign(paste0("imp",cluster2), varPeriodAnom2mean) + assign(paste0("imp",cluster3), varPeriodAnom3mean) + assign(paste0("imp",cluster4), varPeriodAnom4mean) + + assign(paste0("sig",cluster1), pvalue1) + assign(paste0("sig",cluster2), pvalue2) + assign(paste0("sig",cluster3), pvalue3) + assign(paste0("sig",cluster4), pvalue4) + } + + + # Visualize the composition with the 3 graphs for all the 4 regimes: its pressure fields, its impact on var and its frequency series: + if(!as.pdf) png(filename=paste0(workdir,"/",fields.name,"_",var.name[var.num],"_",my.period[p],".png"),width=3000,height=3700) + + plot.new() + + # Sheet title: + par(fig=c(0, 1, 0.94, 0.98), new=TRUE) + mtext(paste0("Weather Regimes for ",my.period[p]," season (",year.start,"-",year.end,"). Source: ",fields.name), cex=6, font=2) + + # Centroid maps: + map.xpos <- 0 + map.width <- 0.46 + par(fig=c(map.xpos + 0.003, map.xpos + map.width - 0.003, 0.745, 0.925), new=TRUE) + PlotEquiMap2(rescale(map1,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map1), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, xlabel.dist=1.5, contours.lty="F1FF1F") + par(fig=c(map.xpos, map.xpos + map.width, 0.51, 0.69), new=TRUE) + PlotEquiMap2(rescale(map2,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map2), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F") + par(fig=c(map.xpos, map.xpos + map.width, 0.275, 0.455), new=TRUE) + PlotEquiMap2(rescale(map3,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map3), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F") + par(fig=c(map.xpos, map.xpos + map.width, 0.04, 0.22), new=TRUE) + PlotEquiMap2(rescale(map4,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map4), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F") + + # Title Centroid Maps: + map.title.xpos <- 0.76 + map.title.width <- 0.2 + par(fig=c(map.title.xpos, map.title.xpos + map.title.width, 0.728, 0.729), new=TRUE) + mtext("year", cex=3) + par(fig=c(map.title.xpos, map.title.xpos + map.title.width, 0.494, 0.495), new=TRUE) + mtext("year", cex=3) + par(fig=c(map.title.xpos, map.title.xpos + map.title.width, 0.260, 0.261), new=TRUE) + mtext("year", cex=3) + par(fig=c(map.title.xpos, map.title.xpos + map.title.width, 0.026, 0.027), new=TRUE) + mtext("year", cex=3) + + # Impact maps: + if(fields.name == rean.name){ + impact.xpos <- 0.47 + impact.width <- 0.26 + par(fig=c(impact.xpos, impact.xpos + impact.width, 0.745, 0.925), new=TRUE) + PlotEquiMap2(rescale(imp1[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=t(sig1[,EU] < 0.05), cex.lab=1.5) + par(fig=c(impact.xpos, impact.xpos + impact.width, 0.51, 0.69), new=TRUE) + PlotEquiMap2(rescale(imp2[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=t(sig2[,EU] < 0.05), cex.lab=1.5) + par(fig=c(impact.xpos, impact.xpos + impact.width, 0.275, 0.455), new=TRUE) + PlotEquiMap2(rescale(imp3[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=t(sig3[,EU] < 0.05), cex.lab=1.5) + par(fig=c(impact.xpos, impact.xpos + impact.width, 0.04, 0.22), new=TRUE) + PlotEquiMap2(rescale(imp4[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=t(sig4[,EU] < 0.05), cex.lab=1.5) + } + + # Frequency plots: + barplot.xpos <- 0.75 + barplot.width <- 0.25 + freq.max=100 + par(fig=c(barplot.xpos, barplot.xpos + barplot.width, 0.745, 0.915), new=TRUE) + barplot.freq(100*fre1, year.start, year.end, cex.y=2, cex.x=2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0)) + par(fig=c(barplot.xpos, barplot.xpos + barplot.width,0.510,0.680), new=TRUE) + barplot.freq(100*fre2, year.start, year.end, cex.y=2, cex.x=2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0)) + par(fig=c(barplot.xpos, barplot.xpos + barplot.width,0.275,0.445), new=TRUE) + barplot.freq(100*fre3, year.start, year.end, cex.y=2, cex.x=2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0)) + par(fig=c(barplot.xpos, barplot.xpos + barplot.width,0.040,0.210), new=TRUE) + barplot.freq(100*fre4, year.start, year.end, cex.y=2, cex.x=2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0)) + + # % on Frequency plots: + symbol.xpos <- 0.735 + symbol.width <- 0.01 + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.83, 0.84), new=TRUE) + mtext("%", cex=3.3) + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.60, 0.61), new=TRUE) + mtext("%", cex=3.3) + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.36, 0.37), new=TRUE) + mtext("%", cex=3.3) + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.13, 0.14), new=TRUE) + mtext("%", cex=3.3) + + # Title 1: + title1.xpos <- 0.02 + title1.width <- 0.4 + par(fig=c(title1.xpos, title1.xpos + title1.width, 0.930, 0.935), new=TRUE) + mtext(paste0(regime1.name,": ", psl.name, " Anomaly"), font=2, cex=4) + par(fig=c(title1.xpos, title1.xpos + title1.width, 0.695, 0.700), new=TRUE) + mtext(paste0(regime2.name,": ", psl.name, " Anomaly"), font=2, cex=4) + par(fig=c(title1.xpos, title1.xpos + title1.width, 0.460, 0.465), new=TRUE) + mtext(paste0(regime3.name,": ", psl.name, " Anomaly"), font=2, cex=4) + par(fig=c(title1.xpos, title1.xpos + title1.width, 0.225, 0.230), new=TRUE) + mtext(paste0(regime4.name,": ", psl.name, " Anomaly"), font=2, cex=4) + + # Title 2: + if(fields.name == rean.name){ + title2.xpos <- 0.42 + title2.width <- 0.35 + par(fig=c(title2.xpos, title2.xpos + title2.width, 0.930, 0.935), new=TRUE) + mtext(paste0(regime1.name, ": Impact on ", var.name.full[var.num]), font=2, cex=4) + par(fig=c(title2.xpos, title2.xpos + title2.width, 0.695, 0.700), new=TRUE) + mtext(paste0(regime2.name, ": Impact on ", var.name.full[var.num]), font=2, cex=4) + par(fig=c(title2.xpos, title2.xpos + title2.width, 0.460, 0.465), new=TRUE) + mtext(paste0(regime3.name, ": Impact on ", var.name.full[var.num]), font=2, cex=4) + par(fig=c(title2.xpos, title2.xpos + title2.width, 0.225, 0.230), new=TRUE) + mtext(paste0(regime4.name, ": Impact on ", var.name.full[var.num]), font=2, cex=4) + } + + # Title 3: + title3.xpos <- 0.75 + title3.width <-0.25 + par(fig=c(title3.xpos, title3.xpos + title3.width, 0.930, 0.935), new=TRUE) + mtext(paste0(regime1.name, ": Frequency (", round(100*mean(fre1),1), "%)"), font=2, cex=4) + par(fig=c(title3.xpos, title3.xpos + title3.width, 0.695, 0.700), new=TRUE) + mtext(paste0(regime2.name, ": Frequency (", round(100*mean(fre2),1), "%)"), font=2, cex=4) + par(fig=c(title3.xpos, title3.xpos + title3.width, 0.460, 0.465), new=TRUE) + mtext(paste0(regime3.name, ": Frequency (", round(100*mean(fre3),1), "%)"), font=2, cex=4) + par(fig=c(title3.xpos, title3.xpos + title3.width, 0.225, 0.230), new=TRUE) + mtext(paste0(regime4.name, ": Frequency (", round(100*mean(fre4),1), "%)"), font=2, cex=4) + + # Legend 1: + legend1.xpos <- 0.01 + legend1.width <- 0.44 + legend1.cex <- 1.8 + my.subset <- match(values.to.plot, my.brks) + par(fig=c(legend1.xpos, legend1.xpos + legend1.width, 0.719, 0.744), new=TRUE) # syntax fig=c(xmin, xmax, ymin, ymax) + ColorBar3(my.brks, cols=my.cols, vert=FALSE, cex=legend1.cex, subset=my.subset) + if(psl=="g500") {mtext(side=4," m", cex=2.5)} else {mtext(side=4," hPa", cex=legend1.cex)} + par(fig=c(legend1.xpos, legend1.xpos + legend1.width, 0.485, 0.508), new=TRUE) + ColorBar3(my.brks, cols=my.cols, vert=FALSE, cex=legend1.cex, subset=my.subset) + if(psl=="g500") {mtext(side=4," m", cex=2.5)} else {mtext(side=4," hPa", cex=legend1.cex)} + par(fig=c(legend1.xpos, legend1.xpos + legend1.width, 0.250, 0.273), new=TRUE) + ColorBar3(my.brks, cols=my.cols, vert=FALSE, cex=legend1.cex, subset=my.subset) + if(psl=="g500") {mtext(side=4," m", cex=2.5)} else {mtext(side=4," hPa", cex=legend1.cex)} + par(fig=c(legend1.xpos, legend1.xpos + legend1.width, 0.015, 0.038), new=TRUE) + ColorBar3(my.brks, cols=my.cols, vert=FALSE, cex=legend1.cex, subset=my.subset) + if(psl=="g500") {mtext(side=4," m", cex=2.5)} else {mtext(side=4," hPa", cex=legend1.cex)} + + # Legend 2: + if(fields.name == rean.name){ + legend2.xpos <- 0.48 + legend2.width <- 0.25 + legend2.cex <- 1.8 + my.subset2 <- match(values.to.plot2, my.brks.var) + par(fig=c(legend2.xpos, legend2.xpos + legend2.width, 0.719 + 0.001, 0.744), new=TRUE) + ColorBar3(my.brks.var, cols=my.cols.var, vert=FALSE, cex=legend2.cex, subset=my.subset2) + mtext(side=4,paste0(" ",var.unit[var.num]), cex=legend2.cex) + par(fig=c(legend2.xpos, legend2.xpos + legend2.width, 0.485, 0.508), new=TRUE) + ColorBar3(my.brks.var, cols=my.cols.var, vert=FALSE, cex=legend2.cex, subset=my.subset2) + mtext(side=4,paste0(" ",var.unit[var.num]), cex=legend2.cex) + par(fig=c(legend2.xpos, legend2.xpos + legend2.width, 0.250, 0.273), new=TRUE) + ColorBar3(my.brks.var, cols=my.cols.var, vert=FALSE, cex=legend2.cex, subset=my.subset2) + mtext(side=4,paste0(" ",var.unit[var.num]), cex=legend2.cex) + par(fig=c(legend2.xpos, legend2.xpos + legend2.width, 0.015, 0.038), new=TRUE) + ColorBar3(my.brks.var, cols=my.cols.var, vert=FALSE, cex=legend2.cex, subset=my.subset2) + mtext(side=4,paste0(" ",var.unit[var.num]), cex=legend2.cex) + } + + if(!as.pdf) dev.off() # for saving 4 png + +} # close for loop on 'p' + +if(as.pdf) dev.off() # for saving a single pdf with all seasons + diff --git a/old/weather_regimes_v27.R~ b/old/weather_regimes_v27.R~ new file mode 100644 index 0000000000000000000000000000000000000000..7a9a3e89369ec676cbf9844689cad4283ac9bda9 --- /dev/null +++ b/old/weather_regimes_v27.R~ @@ -0,0 +1,957 @@ +library(s2dverification) # for the function Load() +library(abind) +library(Kendall) +library(reshape2) +source('/home/Earth/ncortesi/Downloads/scripts/Rfunctions.R') # for the calendar functions +workdir <- "/scratch/Earth/ncortesi/RESILIENCE/Regimes" # working dir with the input files with the WT classifications for each MSLP grid point + +# available reanalysis for the geopotential (g500) or the geopotential height (z500 = g500/9.81) and for var data: +ERAint <- '/esnas/reconstructions/ecmwf/eraint/$STORE_FREQ$_mean/$VAR_NAME$_f6h/$VAR_NAME$_$YEAR$$MONTH$.nc' +ERAint.name <- "ERA-Interim" + +JRA55 <- '/esnas/reconstructions/jma/jra-55/$STORE_FREQ$_mean/$VAR_NAME$_f6h/$VAR_NAME$_$YEAR$$MONTH$.nc' +JRA55.name <- "JRA-55" + +rean <- ERAint #JRA55 #ERAint # choose one of the two above reanalysis from where to load the input psl data + +# available forecast systems for the psl and var data: +#ECMWF_S4 <- list(path = paste0('/esnas/exp/ECMWF/seasonal/0001/s004/m001/$STORE_FREQ$_mean/$VAR_NAME$_f6h/$VAR_NAME$_$START_DATE$.nc')) +#ECMWF_S4 <- '/esnas/exp/ECMWF/seasonal/0001/s004/m001/$STORE_FREQ$_mean/psl_f6h/psl_$START_DATE$.nc' +ECMWF_S4 <- '/esnas/exp/ecmwf/system4_m1/$STORE_FREQ$_mean/psl_f6h/psl_$START_DATE$.nc' +ECMWF_S4.name <- "ECMWF-S4" + +ECMWF_monthly <- '/esnas/exp/ECMWF/monthly/ensforhc/6hourly/$VAR_NAME$/2014$MONTH$$DAY$00/$VAR_NAME$_$START_DATE$00.nc' +ECMWF_monthly.name <- "ECMWF-Monthly" + +forecast <- ECMWF_S4 + +fields <- forecast #rean #forecast # put 'rean' to load pressure fields and var from reanalisis, put 'forecast' to load them from forecast systems + +psl <- "psl" #"g500" # pressure variable to use from the chosen reanalysis +psl.name <- "MSLP" # "Z500" # the name to show in the maps + +year.start <- 1982 #1981 #1994 #1979 #1981 +year.end <- 2013 #2013 #2010 + +member.name <- "ensemble" # name of the dimension with the ensemble members inside the netCDF files of S4 + +res <- 0.75 # set the resolution you want to interpolate the psl and var data when loading it (i.e: set to 0.75 to use the same res of ERA-Interim) + # interpolation method is BILINEAR. +PCA <- FALSE # set it to TRUE if you want to apply the PCA before the k-means cluster analysis, FALSE otherwise +variance.explained <- 0.8 # the user can set here the minimum % of variance explained by the PCs (typically 0.8 which is equal to 80%) + +lat.weighting=FALSE # set it to true to weight psl data on latitude before applying the cluster analysis +sequences=FALSE # set it to TRUE if you want to select only days beloning to sequences of at least 5 consecutive days belonging to the same regime. + +# Coordinates of the box enclosing the study region (the Northern Atlantic in this case): +lat.max <- 81 #81 #80 #70 +lat.min <- 27 #30 #20 #30 +lon.max <- 45 #36 #30 #40 +lon.min <- 274.5 #274.5 #270 #280 # put a positive number here because the geopotential has only positive values of longitud! + +# Only for Monthly forecasts: +#leadtime <- 1:7 #5:11 #1 # if fields=forecast, set the forecast time (in days) you want to compute the weather regimes. +#startdate.shift <- 1 # if leadtime=5:11, it's better to shift all startdates of the season 1 week in the past, to fit the exact season of obs.data + # if leadtime=12:18, it's better to shift all startdates of the season 2 weeks in the past, and so on. +#forecast.year <- year.end+1 # if fields=forecast, set the forecast year (it is usually the year after year.end) +#mes=1 # if fields=forecast, set the month of the first startdate of the forecast year +#day=2 # if fields=forecast, set the day of the first startdate of the forecast year + +############################################################################################################################# +#ECMWF_monthly <- list(path = paste0('/esnas/exp/ECMWF/monthly/ensforhc/6hourly/psl/2014010200/1994_010200.nc')) +#ECMWF_monthly <- list(path = paste0('/esnas/exp/ECMWF/monthly/ensforhc/6hourly/$VAR_NAME$/2014$MONTH$$DAY$00/$VAR_NAME$_$START_DATE$00.nc')) +rean.name <- unname(ifelse(rean == ERAint, ERAint.name, JRA55.name)) +forecast.name <- unname(ifelse(forecast == ECMWF_S4, ECMWF_S4.name, ECMWF_monthly.name)) +fields.name <- unname(ifelse(fields == rean, rean.name, forecast.name)) +n.years <- year.end - year.start + 1 + +var.data <- fields # dataset from which to load the input var data (reanalysis or forecasts) +var.data.name <- fields.name #ECMWF_monthly.name # ERAint.name + +if(lat.min >= lat.max) stop("lat.min cannot be greater than lat.max") + +days.period <- n.days.period <- period.length <- list() +for (pp in 1:17){ + days.period[[pp]] <- NA + for(y in year.start:year.end) days.period[[pp]] <- c(days.period[[pp]], n.days.in.a.future.year(year.start, y) + pos.period(y,pp)) + days.period[[pp]] <- days.period[[pp]][-1] # remove the NA at the begin that was introduced only to be able to execute the above command + # number of days belonging to that period from year.start to year.end: + n.days.period[[pp]] <- length(days.period[[pp]]) + # Number of days belonging to that period in a single year: + period.length[[pp]] <- n.days.in.a.period(pp,1999) #+ ifelse(period==13 | period==2, 1, 0) # using year 1999 introduce a small error in winter season length because of bisestile years +} + +# Create a regular lat/lon grid to interpolate the data: +n.lon.grid <- 360/res +n.lat.grid <- (180/res)+1 +my.grid <- paste0('r',n.lon.grid,'x',n.lat.grid) +#domain<-list() +#domain$lon <- seq(0,359.999999999,res) +#domain$lat <- c(rev(seq(0,90,res)),seq(res[1],-90,-res)) # Notice that the regular grid is always centered at lat=0 and lon=0! + +# load only 1 day of pressure data to detect the minimum and maximum lat and lon values which identify only the North Atlantic area: +if(fields.name == rean.name) domain <- Load(var = psl, exp = NULL, obs = list(list(path=fields)), '19990101', storefreq = 'daily', leadtimemax = 1, output = 'lonlat', nprocs=1) +# in the case of S4, we also interpolate it (notice that if the S4 grid has the same grid of the chosen grid (typically ERA-Interim), Load() leaves the S4 grid unchanged): +if(fields.name == ECMWF_S4.name) domain <- Load(var = "psl", exp = list(list(path=fields)), sdates=paste0(year.start,'0101'), storefreq = 'daily', dimnames=list(member=member.name), leadtimemax=1, nmember=1, output = 'lonlat') #, grid=my.grid, method='bilinear', nprocs=1) + +#if(fields.name == ECMWF_monthly.name) domain <- Load(var = psl, exp = NULL, obs = list(list(path=fields)), paste0(year.start,'0102'), storefreq = 'daily', leadtimemax = 1, output = 'lonlat', nprocs=1) + +n.lon <- length(domain$lon) +n.lat <- length(domain$lat) + +pos.lat.max <- which(lat.max - round(domain$lat,4) >= 0)[1] # select the first latitude of the domain below the maximum latitude +pos.lat.min <- tail(which(round(domain$lat,4) - lat.min >= 0),1) # select the last latitude of the domain over the minumum latitude +pos.lon.max <- tail(which(lon.max - round(domain$lon,4) >= 0),1) +pos.lon.min <- which(round(domain$lon,4) - lon.min >= 0)[1] + +pos.lat <- if(pos.lat.min < pos.lat.max) {pos.lat.min:pos.lat.max} else {pos.lat.max:pos.lat.min} +pos.lon <- c(1:pos.lon.max,pos.lon.min:length(domain$lon)) # beware that it only consider the case where the study area cross the meridian of Greenwhich! +n.pos.lat <- length(pos.lat) +n.pos.lon <- length(pos.lon) + +lat <- domain$lat[pos.lat] # lat values of chosen area only (it is smaller than the whole spatial domain loaded by the data) +lon <- domain$lon[pos.lon] # lon values of chosen area only + +#lat.min.area <- domain$lat[pos.lat.min] # min and max lat/lon of the chosen area only (same as lat.max, but its values correspond to actual values in the lat vector) +#lat.max.area <- domain$lat[pos.lat.max] +#lon.min.area <- domain$lon[pos.lon.min] +#lon.max.area <- domain$lon[pos.lon.max] + +#rm(domain) + +# Load psl data: +if(fields.name == rean.name){ # Load daily psl data in the reanalysis case: + psleuFull <-array(NA,c(n.days.in.a.yearly.period(year.start,year.end), n.pos.lat, n.pos.lon)) + + for (y in year.start:year.end){ + var <- Load(var = psl, exp = NULL, obs = list(list(path=fields)), paste0(y,'0101'), storefreq = 'daily', leadtimemax = n.days.in.a.year(y), output = 'lonlat', + latmin = lat.min, latmax = lat.max, lonmin = lon.min, lonmax = lon.max) + psleuFull[seq.days.in.a.future.year(year.start, y),,] <- var$obs + rm(var) + gc() + } +} + +if(fields.name == ECMWF_S4.name) # in this case, we can load only the data for 1 month at time (since each month needs ~3 GB of memory) + start.month <- 1 # start month (1=January, 12=December) + n.members <- 15 # number of members of the forecast system to use + + if(start.month <= 9) {start.month.char <- paste0("0",as.character(start.month))} else {start.month.char=start.month} + + psleuFull <- Load(var = psl, exp = list(list(path=fields)), obs = NULL, sdates=paste0(year.start:year.end, start.month.char,'01'), dimnames=list(member=member.name), nmember=n.members, leadtimemax=216, storefreq='daily', output = 'lonlat', latmin = lat.min, latmax = lat.max, lonmin = lon.min, lonmax = lon.max) #, grid=my.grid, method='bilinear') + + # immediatly convert psl in daily anomalies to remove the drift! + pslPeriodClim <- apply(psleuFull$mod, c(1,4,5,6), mean, na.rm=T) + pslPeriodClim2 <- InsertDim(InsertDim(pslPeriodClim, 2, n.years), 2, n.members) + rm(pslPeriodClim) + gc() + + psleuFull <- psleuFull$mod - pslPeriodClim2 + rm(pslPeriodClim2) + gc() + + # daily anomalies computed with the 10-days leadtime window AFTER each day: + pslPeriodClim10 <- array(NA, c(1,15,32,206,55,131)) + for(year in 1:32){ + for(lead in 1:206){ + pslPeriodClim10[1,,year,lead,,] <- (psleuFull$mod[1,,year,lead,,] + psleuFull$mod[1,,year,lead+1,,] + psleuFull$mod[1,,year,lead+2,,] + psleuFull$mod[1,,year,lead+3,,] + psleuFull$mod[1,,year,lead+5,,] + psleuFull$mod[1,,year,lead+6,,] + psleuFull$mod[1,,year,lead+7,,] + psleuFull$mod[1,,year,lead+8,,] + psleuFull$mod[1,,year,lead+9,,] + psleuFull$mod[1,,year,lead+10,,])/10 + } + } + pslPeriodClim10mean <- apply(pslPeriodClim10, c(1,4,5,6), mean, na.rm=T) + pslPeriodClim10mean2 <- InsertDim(InsertDim(pslPeriodClim10mean,2,32),2,15) + + psleuFull10 <- psleuFull$mod[1,,,1:206,,,drop=F] - pslPeriodClim10mean2 + #psleuFull10mean <- apply(psleuFull10, c(1,3,4,5,6),mean,na.rm=T) + psleuFull <- psleuFull10 + rm(pslPeriodClim2, psleuFull10, pslPeriodClim10, pslPeriodClim10mean2, psleuFull10mean, pslPeriodClim10mean) + gc() + +} + +if(fields.name == ECMWF_monthly.name){ + # Load 6-hourly psl data in the forecast case: + psleuFull <-array(NA, c(n.sdates*n.years, n.leadtimes, n.pos.lat, n.pos.lon)) # daily order: 19940102 19950102 ... 20130102 19940109 19950109 ... 20130109 ... + n.leadtimes <- length(leadtime) + sdates <- weekly.seq(forecast.year,mes,day) + n.sdates <- length(sdates) + + for (startdate in 1:n.sdates){ + for(lday in leadtime){ + var <- Load(var = psl, exp = NULL, obs = list(list(path=fields), paste0(year.start:year.end, substr(sdates[startdate],5,6), substr(sdates[startdate],7,8) ), storefreq = 'daily', leadtimemin = lday*4-3, leadtimemax = lday*4, output = 'lonlat', latmin = lat.min, latmax = lat.max, lonmin = lon.min, lonmax = lon.max) + + mean.lday <- apply(var$obs, c(3,5,6), mean, na.rm=T) + rm(var) + gc() + + psleuFull[(1+(startdate-1)*n.years):(startdate*n.years), lday-leadtime[1]+1,,] <- mean.lday + rm(mean.lday) + gc() + } + } +} + +if(psl=="g500") psleuFull <- psleuFull/9.81 # convert geopotential to geopotential height (in m) +if(psl=="psl") psleuFull <- psleuFull/100 # convert MSLP in Pascal to MSLP in hPa +gc() + +#save.image(file=paste0(workdir,"/weather_regimes_",fields.name,"_",year.start,"-",year.end,".RData")) +#load(file=paste0(workdir,"/weather_regimes_",fields.name,"_",year.start,"-",year.end,".RData")) + +# compute the cluster analysis: +my.PCA <- list() +my.cluster <- list() +my.cluster.matrix <- list() +tot.variance <- list() +n.pcs <- my.cluster <- c() + +WR.period <- 1 #1:12 #13:16 # 1-12: Jan-Dec; 13-16: Winter-Spring-Summer-Autumn +p=1 + +for(p in WR.period){ + # Select only the data of the month/season we want: + if(fields.name == rean.name){ pslPeriod <- psleuFull[days.period[[p]],,] # select only days in the chosen period (i.e: winter) + + # in this case of S4 we don't need to select anything because we already loaded only 1 month of data! + + if(fields.name == ECMWF_monthly.name){ + my.startdates.period <- months.period(forecast.year,mes,day,p) # get which startdates belong to the chosen period + + days.period <- list() # get which days inside psleuFull belong to the chosen period + for(startdate in my.startdates.period){ + days.period[[p]] <- c(days.period[[p]], (1+(startdate-1)*n.years):(startdate*n.years)) + } + + # in winter you must remove the 2nd startdate (20140109) because its data is bad, as you can see with: plot(psl.kmeans[,1:2]) + # if(fields.name == forecast.name && period == 13) psl.kmeans[21:40,] <- NA + } + + # weight the pressure fields based on latitude (apply it to anomalies, not to absolute values!): + if(lat.weighting){ + lat.weighted <- cos(lat*pi/180)^0.5 + lat.weighted.array <- InsertDim(InsertDim(lat.weighted,2,n.pos.lon), 1, n.days.period[[p]]) + psl.kmeans <- pslPeriod * lat.weighted.array + rm(lat.weighted.array) + gc() + } + + # select period data from psleuFull to use it as input of the cluster analysis: + if(fields.name == rean.name){ + psl.kmeans <- pslPeriod + dim(psl.kmeans) <- c(n.days.period[[p]], n.pos.lat*n.pos.lon) # convert array in a matrix! + rm(pslPeriod, pslPeriod.weighted) + } + + if(fields.name == ECMWF_S4.name){ + lead.month=1 + + chosen.month <- start.month + lead.month # find which month we want to load data + if(chosen.month > 12) chosen.month <- chosen.month - 12 + + # select only the data of one lead month: + + for(y in year.start:year.end){ + leadtime.min <- 1 + n.days.in.a.monthly.period(start.month, chosen.month, y) - n.days.in.a.month(chosen.month, y) + leadtime.max <- leadtime.min + n.days.in.a.month(chosen.month, y) - 1 + if (n.days.in.a.month(chosen.month, y) == 29) leadtime.max <- leadtime.max - 1 # remove the 29th of February to have the same n. of elements for all years + int.leadtimes <- leadtime.min:leadtime.max + n.leadtimes <- leadtime.max - leadtime.min + 1 + + if(y == year.start) { + pslPeriod <- psleuFull[,,1,int.leadtimes,,,drop=FALSE] + } else { + pslPeriod <- unname(abind(pslPeriod, psleuFull[,,y-year.start+1,int.leadtimes,,,drop=FALSE], along=3)) + } + } + + # note that the data order in psl.kmeans[,X] is: year1day1, year1day2, ... , year1day31, year2day1, year2day2, ... , year2day28 + psl.melted <- melt(pslPeriod[1,1:2,,,,, drop=FALSE], varnames=c("Exp","Member","StartYear","LeadDay","Lat","Lon")) + psl.kmeans <- unname(acast(psl.melted, StartYear + LeadDay ~ Lat + Lon + Member)) + + #rm(pslPeriod) + gc() + } + + # compute the PCs or not: + if(PCA){ + my.seq <- seq(1, n.pos.lat*n.pos.lon, 9) # select only 1 point of 9 + pslcut <- psl.kmeans[,my.seq] + + my.PCA[[p]] <- princomp(pslcut,cor=FALSE) + tot.variance[[p]] <- head(cumsum(my.PCA[[p]]$sdev^2/sum(my.PCA[[p]]$sdev^2)),50) # check how many PCAs to keep basing on the sum of their expl. variance + n.pcs[p] <- head(as.numeric(which(tot.variance[[p]] > variance.explained)),1) # select only the pcs that explains at least 80% of variance + my.cluster[[p]] <- kmeans(my.PCA[[p]]$scores[,1:n.pcs[p]], centers=4, iter.max=100, nstart=30) # 4 is the number of clusters + rm(pslcut) + } else { # 4 is the number of clusters: + if(fields.name == rean.name) my.cluster[[p]] <- kmeans(psl.kmeans[which(!is.na(psl.kmeans[,1])),], centers=4, iter.max=100, nstart=30) + if(fields.name == ECMWF_S4.name) { + my.cluster[[p]] <- kmeans(psl.kmeans, centers=4, iter.max=100, nstart=30, trace=TRUE) + my.cluster.matrix[[p]] <- matrix(my.cluster[[p]]$cluster, n.leadtimes, n.years) + + plot(SMA(my.cluster[[p]]$centers[1,],201),type="l",col="gold",ylim=c(-20,20));lines(SMA(my.cluster[[p]]$centers[2,],201),type="l",col="red"); lines(SMA(my.cluster[[p]]$centers[3,],201),type="l",col="green");lines(SMA(my.cluster[[p]]$centers[4,],201),type="l",col="blue");lines(SMA(psl.kmeans[29,],201),type="l",col="gray");lines(rep(0,14410),type="l",col="black",lty=2) + } + } + + rm(psl.kmeans) + gc() +} + +#save.image(file=paste0(workdir,"/weather_regimes_",fields.name,"_",year.start,"-",year.end,".RData")) +#load(file=paste0(workdir,"/weather_regimes_",fields.name,"_",year.start,"-",year.end,".RData")) + +var.num <- 1 # Choose a variable. 1: sfcWind 2: tas + +var.name <- c("sfcWind","tas") # name of the 'predictand' variable of the chosen reanalysis +var.name.full <- c("Wind Speed","Temperature") # full name of the 'predictand' variable to put in the title of the graphs +var.unit <- c("m/s", "ºC") # unit of measure of var (for drawing color scales) + +# Load var data: +if(fields.name == rean.name){ # Load daily var data in the reanalysis case: + vareuFull <-array(NA,c(n.days.in.a.yearly.period(year.start,year.end), n.pos.lat, n.pos.lon)) + + for (y in year.start:year.end){ + var <- Load(var = var.name[var.num], exp = NULL, obs = list(var.data), paste0(y,'0101'), storefreq = 'daily', leadtimemax = n.days.in.a.year(y), output = 'lonlat', + latmin = lat.min, latmax = lat.max, lonmin = lon.min, lonmax = lon.max) + vareuFull[seq.days.in.a.future.year(year.start, y),,] <- var$obs + rm(var) + gc() + } + +# in the S4 case, we can load only the data for 1 month at time (since each month needs ~3 GB of memory) +# but at present we ONLY use psl data!!! +#if(fields.name == ECMWF_S4.name) { +# vareuFull <- Load(var = var.name[var.num], exp = list(fields), obs = NULL, sdates=paste0(year.start:year.end, start.month.char,'01'), storefreq = 'daily', dimnames=list(member="number"), nmember=n.members, leadtimemin=leadtime.min, leadtimemax=leadtime.max, output = 'lonlat', latmin = lat.min, latmax = lat.max, lonmin = lon.min, lonmax = lon.max) +#} + +if(fields.name == ECMWF_monthly.name){ # Load 6-hourly var data in the monthly forecast case: + sdates <- weekly.seq(forecast.year,mes,day) + vareuFull <-array(NA,c(length(sdates)*(year.end-year.start+1), n.pos.lat, n.pos.lon)) + + for (startdate in 1:length(sdates)){ + var <- Load(var = var.name[var.num], exp = NULL, obs = list(var.data), paste0(year.start:year.end, substr(sdates[startdate],5,6), substr(sdates[startdate],7,8) ), storefreq = 'daily', leadtimemin=leadtime*4-3, leadtimemax=leadtime*4, output = 'lonlat', latmin = lat.min, latmax = lat.max, lonmin = lon.min, lonmax = lon.max) + temp <- apply(var$obs, c(1,3,5,6), mean, na.rm=T) + vareuFull[(1+(startdate-1)*(year.end-year.start+1)):(startdate*(year.end-year.start+1)),,] <- temp + rm(temp,var) + gc() + } + +} + +#my.grid<-paste0('r',n.lon,'x',n.lat) +#coord <- Load(var = 'sfcWind', exp = list(ECMWF_monthly), obs = NULL, sdates=paste0(2013,'0102'), leadtimemin = 1, leadtimemax=1, output = 'lonlat', nprocs=1, grid=my.grid, method='bilinear') + +save.image(file=paste0(workdir,"/weather_regimes_",fields.name,"_",var.name[var.num],"_",year.start,"-",year.end,".RData")) +load(file=paste0(workdir,"/weather_regimes_",fields.name,"_",var.name[var.num],"_",year.start,"-",year.end,".RData")) + +## # if you want to study WRs at monhly level but using their seasonal time series, you have to copy the WRs time series to the months from 1 to 12: +## if(WR.period <- 1:12){ +## my.cluster[[1]] <- my.cluster[[2]] <- my.cluster[[3]] <- my.cluster[[4]] <- my.cluster[[5]] <- my.cluster[[6]] <- list() +## my.cluster[[7]] <- my.cluster[[8]] <- my.cluster[[9]] <- my.cluster[[10]] <- my.cluster[[11]] <- my.cluster[[12]] <- list() + +## ss <- match(days.period[[13]],days.period[[1]]); ss[which(!is.na(ss))] <- 1; qq <- ss*my.cluster[[13]]$cluster; my.cluster[[1]]$cluster <- qq[!is.na(qq)] +## ss <- match(days.period[[13]],days.period[[2]]); ss[which(!is.na(ss))] <- 1; qq <- ss*my.cluster[[13]]$cluster; my.cluster[[2]]$cluster <- qq[!is.na(qq)] +## ss <- match(days.period[[13]],days.period[[12]]); ss[which(!is.na(ss))] <- 1; qq <- ss*my.cluster[[13]]$cluster; my.cluster[[12]]$cluster <- qq[!is.na(qq)] +## ss <- match(days.period[[14]],days.period[[3]]); ss[which(!is.na(ss))] <- 1; qq <- ss*my.cluster[[14]]$cluster; my.cluster[[3]]$cluster <- qq[!is.na(qq)] +## ss <- match(days.period[[14]],days.period[[4]]); ss[which(!is.na(ss))] <- 1; qq <- ss*my.cluster[[14]]$cluster; my.cluster[[4]]$cluster <- qq[!is.na(qq)] +## ss <- match(days.period[[14]],days.period[[5]]); ss[which(!is.na(ss))] <- 1; qq <- ss*my.cluster[[14]]$cluster; my.cluster[[5]]$cluster <- qq[!is.na(qq)] +## ss <- match(days.period[[15]],days.period[[6]]); ss[which(!is.na(ss))] <- 1; qq <- ss*my.cluster[[15]]$cluster; my.cluster[[6]]$cluster <- qq[!is.na(qq)] +## ss <- match(days.period[[15]],days.period[[7]]); ss[which(!is.na(ss))] <- 1; qq <- ss*my.cluster[[15]]$cluster; my.cluster[[7]]$cluster <- qq[!is.na(qq)] +## ss <- match(days.period[[15]],days.period[[8]]); ss[which(!is.na(ss))] <- 1; qq <- ss*my.cluster[[15]]$cluster; my.cluster[[8]]$cluster <- qq[!is.na(qq)] +## ss <- match(days.period[[16]],days.period[[9]]); ss[which(!is.na(ss))] <- 1; qq <- ss*my.cluster[[16]]$cluster; my.cluster[[9]]$cluster <- qq[!is.na(qq)] +## ss <- match(days.period[[16]],days.period[[10]]); ss[which(!is.na(ss))] <- 1; qq <- ss*my.cluster[[16]]$cluster; my.cluster[[10]]$cluster <- qq[!is.na(qq)] +## ss <- match(days.period[[16]],days.period[[11]]); ss[which(!is.na(ss))] <- 1; qq <- ss*my.cluster[[16]]$cluster; my.cluster[[11]]$cluster <- qq[!is.na(qq)] +## } + +for(p in WR.period){ # 1-12: Jan-Dec; 13-16: Winter-Spring-Summer-Autumn; 17: Year + + if(sequences){ # it works only for rean data!!! + # for each of the 4 clusters, remove the days not belonging to sequences of at least 5 days: + # warning: first 4 days and last 4 days are not take into account y the algorithm and are treated as not belonging to any sequence (sequ=FALSE) + wrdiff <- diff(my.cluster[[p]]$cluster) + + sequ <- rep(FALSE, n.days.period[[p]]) + for(i in 5:(n.days.period[[p]]-5)){ + sequ[i] <- TRUE + if(wrdiff[i] != 0 & wrdiff[i+1] != 0) sequ[i] = FALSE + if(wrdiff[i] != 0 & wrdiff[i+2] != 0) sequ[i] = FALSE + if(wrdiff[i] != 0 & wrdiff[i+3] != 0) sequ[i] = FALSE + if(wrdiff[i] != 0 & wrdiff[i+4] != 0) sequ[i] = FALSE + if(wrdiff[i-1] != 0 & wrdiff[i] != 0) sequ[i] = FALSE + if(wrdiff[i-1] != 0 & wrdiff[i+1] != 0) sequ[i] = FALSE + if(wrdiff[i-1] != 0 & wrdiff[i+2] != 0) sequ[i] = FALSE + if(wrdiff[i-1] != 0 & wrdiff[i+3] != 0) sequ[i] = FALSE + if(wrdiff[i-2] != 0 & wrdiff[i] != 0) sequ[i] = FALSE + if(wrdiff[i-2] != 0 & wrdiff[i+1] != 0) sequ[i] = FALSE + if(wrdiff[i-2] != 0 & wrdiff[i+2] != 0) sequ[i] = FALSE + if(wrdiff[i-3] != 0 & wrdiff[i] != 0) sequ[i] = FALSE + if(wrdiff[i-3] != 0 & wrdiff[i+1] != 0) sequ[i] = FALSE + if(wrdiff[i-4] != 0 & wrdiff[i] != 0) sequ[i] = FALSE + } # close for loop on i + + # sequence of daily cluster numbers without the days that doesn't belong to sequences of 5 or more days: + cluster.sequence <- my.cluster[[p]]$cluster * sequ + rm(wrdiff, sequ) + } + + gc() + + # yearly frequency of each regime: + if(fields.name == rean.name){ + cluster.sequence <- my.cluster[[p]]$cluster + + # Replace the days belonging to a regime with the days belonging to a sequence of at least 5 days of that regime: + wr1 <- which(cluster.sequence == 1) + wr2 <- which(cluster.sequence == 2) + wr3 <- which(cluster.sequence == 3) + wr4 <- which(cluster.sequence == 4) + + wr1y <- wr2y <- wr3y <-wr4y <- c() + for(y in year.start:year.end){ + # for both reanalysis and S4, the time order is always: year1day1, year1day2, ..., year1day31, year2day1, year2day2, ..., year2day28, etc, + wr1y[y-year.start+1] <- length(which(cluster.sequence[(y-year.start)*period.length[[p]]+(1:period.length[[p]])] == 1)) + wr2y[y-year.start+1] <- length(which(cluster.sequence[(y-year.start)*period.length[[p]]+(1:period.length[[p]])] == 2)) + wr3y[y-year.start+1] <- length(which(cluster.sequence[(y-year.start)*period.length[[p]]+(1:period.length[[p]])] == 3)) + wr4y[y-year.start+1] <- length(which(cluster.sequence[(y-year.start)*period.length[[p]]+(1:period.length[[p]])] == 4)) + } + + # convert to frequencies in %: + wr1y <- wr1y/period.length[[p]] + wr2y <- wr2y/period.length[[p]] + wr3y <- wr3y/period.length[[p]] + wr4y <- wr4y/period.length[[p]] + + pslPeriod <- psleuFull[days.period[[p]],,] + pslPeriodClim <- apply(pslPeriod, c(2,3), mean, na.rm=T) + pslPeriodClim2 <- InsertDim(pslPeriodClim, 1, n.days.period[[p]]) + pslPeriodAnom <- pslPeriod - pslPeriodClim2 + + rm(pslPeriod, pslPeriodClim, pslPeriodClim2) + gc() + + pslwr1 <- pslPeriodAnom[wr1,,] + pslwr2 <- pslPeriodAnom[wr2,,] + pslwr3 <- pslPeriodAnom[wr3,,] + pslwr4 <- pslPeriodAnom[wr4,,] + + rm(pslPeriodAnom) + gc() + + pslwr1mean <- apply(pslwr1,c(2,3),mean,na.rm=T) + pslwr2mean <- apply(pslwr2,c(2,3),mean,na.rm=T) + pslwr3mean <- apply(pslwr3,c(2,3),mean,na.rm=T) + pslwr4mean <- apply(pslwr4,c(2,3),mean,na.rm=T) + rm(pslwr1,pslwr2,pslwr3,pslwr4) + + } + + + if(fields.name == ECMWF_S4.name){ + cluster.sequence <- as.vector(my.cluster.matrix[[p]]) # cannot use my.cluster$cluster directly, because it includes bisestile days! + + wr1 <- which(cluster.sequence == 1) + wr2 <- which(cluster.sequence == 2) + wr3 <- which(cluster.sequence == 3) + wr4 <- which(cluster.sequence == 4) + + wr1y <- colSums(my.cluster.matrix[[p]] == 1) + wr2y <- colSums(my.cluster.matrix[[p]] == 2) + wr3y <- colSums(my.cluster.matrix[[p]] == 3) + wr4y <- colSums(my.cluster.matrix[[p]] == 4) + + # convert to frequencies in %: + wr1y <- wr1y/n.leadtimes + wr2y <- wr2y/n.leadtimes + wr3y <- wr3y/n.leadtimes + wr4y <- wr4y/n.leadtimes + + # in this case, must use pslPeriod instead of psleuFull. Both are already in anomalies: + pslmat <- unname(acast(psl.melted, Member ~ StartYear + LeadDay ~ Lat ~ Lon)) + #pslmat <- unname(acast(melt(pslPeriod[1,1:2,,,,, drop=FALSE],varnames=c("Exp","Member","StartYear","LeadDay","Lat","Lon")), Member ~ StartYear + LeadDay ~ Lat ~ Lon)) + + pslwr1 <- pslmat[,wr1,,, drop=F] + pslwr2 <- pslmat[,wr2,,, drop=F] + pslwr3 <- pslmat[,wr3,,, drop=F] + pslwr4 <- pslmat[,wr4,,, drop=F] + + pslwr1meanPartial <- apply(pslwr1, c(1,3,4), mean, na.rm=T) + pslwr2meanPartial <- apply(pslwr2, c(1,3,4), mean, na.rm=T) + pslwr3meanPartial <- apply(pslwr3, c(1,3,4), mean, na.rm=T) + pslwr4meanPartial <- apply(pslwr4, c(1,3,4), mean, na.rm=T) + + PlotEquiMap(rescale(pslwr1meanPartial[1,,],my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2) + PlotEquiMap(rescale(pslwr1meanPartial[2,,],my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2) + + n=0 + for(sy in 1:32) { + for(ld in 1:28){ + if(my.cluster.matrix[[p]][ld,sy] == 1){ + n=n+1 + if(n == 1) {temp <- pslPeriod[1,2,sy,ld,,]} + temp <- temp + pslPeriod[1,2,sy,ld,,] + } + } + } + PlotEquiMap(rescale(temp/n,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, drawleg=F) + + + pslwr1mean <- apply(pslwr1, c(3,4), mean, na.rm=T) + pslwr2mean <- apply(pslwr2, c(3,4), mean, na.rm=T) + pslwr3mean <- apply(pslwr3, c(3,4), mean, na.rm=T) + pslwr4mean <- apply(pslwr4, c(3,4), mean, na.rm=T) + + rm(pslwr1,pslwr2,pslwr3,pslwr4) + rm(pslmat) + gc() + + #pslwr1meanPartial <- apply(pslmat[,wr1,,,drop=F], c(1,3,4), mean, na.rm=T) + #pslwr2meanPartial <- apply(pslmat[,wr2,,,drop=F], c(1,3,4), mean, na.rm=T) + #pslwr3meanPartial <- apply(pslmat[,wr3,,,drop=F], c(1,3,4), mean, na.rm=T) + #pslwr4meanPartial <- apply(pslmat[,wr4,,,drop=F], c(1,3,4), mean, na.rm=T) + + } + + # for the monthly system, the time order is always: year1day1, year2day1, ... , yearNday1, year1day2, year2day2, ..., yearNday2, etc.: + if(fields.name == ECMWF_monthly.name) { + if(fields.name == ECMWF_monthly.name){ + my.startdates.period <- months.period(forecast.year,mes,day,p) + + #if(p == 13) my.startdates.period <- my.startdates.period[-2] # remove the second startdate because it is broken + + days.period <- period.length <- list() + for(startdate in my.startdates.period){ + days.period[[p]] <- c(days.period[[p]], (1+(startdate-1)*(year.end-year.start+1)):(startdate*(year.end-year.start+1))) + } + period.length[[p]] <- length(my.startdates.period) # number of Thusdays in the period + } + + wr1y <- wr2y <- wr3y <-wr4y <- c() + wr1y[y-year.start+1] <- length(which(cluster.sequence[seq((y-year.start+1),length(cluster.sequence), n.years)] == 1)) + wr2y[y-year.start+1] <- length(which(cluster.sequence[seq((y-year.start+1),length(cluster.sequence), n.years)] == 2)) + wr3y[y-year.start+1] <- length(which(cluster.sequence[seq((y-year.start+1),length(cluster.sequence), n.years)] == 3)) + wr4y[y-year.start+1] <- length(which(cluster.sequence[seq((y-year.start+1),length(cluster.sequence), n.years)] == 4)) + } + + + # in the reanalysis case, we also measure the impact maps: + if(fields.name == rean.name){ + # similar selection of above, but for var instead of psl: + varPeriod <- vareuFull[days.period[[p]],,] # select only var data during the chosen period + + varPeriodClim <- apply(varPeriod, c(2,3), mean, na.rm=T) + varPeriodClim2 <- InsertDim(varPeriodClim, 1, n.days.period[[p]]) + varPeriodAnom <- varPeriod - varPeriodClim2 + rm(varPeriod, varPeriodClim, varPeriodClim2) + gc() + + #PlotEquiMap2(varPeriodClim[,EU], lon[EU], lat, filled.continents = FALSE, cols=my.cols.var, intxlon=10, intylat=10, cex.lab=1.5) # plot the climatology of the period + varPeriodAnom1 <- varPeriodAnom[wr1,,] + varPeriodAnom2 <- varPeriodAnom[wr2,,] + varPeriodAnom3 <- varPeriodAnom[wr3,,] + varPeriodAnom4 <- varPeriodAnom[wr4,,] + + varPeriodAnom1mean <- apply(varPeriodAnom1,c(2,3),mean,na.rm=T) + varPeriodAnom2mean <- apply(varPeriodAnom2,c(2,3),mean,na.rm=T) + varPeriodAnom3mean <- apply(varPeriodAnom3,c(2,3),mean,na.rm=T) + varPeriodAnom4mean <- apply(varPeriodAnom4,c(2,3),mean,na.rm=T) + + varPeriodAnomBoth1 <- abind(varPeriodAnom, varPeriodAnom1, along = 1) + pvalue1 <- apply(varPeriodAnomBoth1, c(2,3), function(x) t.test(x[1:n.days.period[[p]]],x[(n.days.period[[p]]+1):length(x)])$p.value) + rm(varPeriodAnomBoth1, varPeriodAnom1) + gc() + + varPeriodAnomBoth2 <- abind(varPeriodAnom, varPeriodAnom2, along = 1) + pvalue2 <- apply(varPeriodAnomBoth2, c(2,3), function(x) t.test(x[1:n.days.period[[p]]],x[(n.days.period[[p]]+1):length(x)])$p.value) + rm(varPeriodAnomBoth2, varPeriodAnom2) + gc() + + varPeriodAnomBoth3 <- abind(varPeriodAnom, varPeriodAnom3, along = 1) + pvalue3 <- apply(varPeriodAnomBoth3, c(2,3), function(x) t.test(x[1:n.days.period[[p]]],x[(n.days.period[[p]]+1):length(x)])$p.value) + rm(varPeriodAnomBoth3, varPeriodAnom3) + gc() + + varPeriodAnomBoth4 <- abind(varPeriodAnom, varPeriodAnom4, along = 1) + pvalue4 <- apply(varPeriodAnomBoth4, c(2,3), function(x) t.test(x[1:n.days.period[[p]]],x[(n.days.period[[p]]+1):length(x)])$p.value) + rm(varPeriodAnomBoth4, varPeriodAnom4) + + rm(varPeriodAnom) + gc() + + # save all the data necessary to redraw the graphs when we know the right regime: + save(workdir,rean.name,fields.name,var.num,var.name,var.name.full,var.unit,year.start,year.end,p,WR.period,lon,lat,pslwr1mean,pslwr2mean,pslwr3mean,pslwr4mean, varPeriodAnom1mean, varPeriodAnom2mean, varPeriodAnom3mean,varPeriodAnom4mean,wr1y,wr2y,wr3y,wr4y,pvalue1,pvalue2,pvalue3,pvalue4,cluster.sequence,file=paste0(workdir,"/",fields.name,"_",var.name[var.num],"_",my.period[p],"_mapdata.RData")) + + rm(pvalue1,pvalue2,pvalue3,pvalue4) + rm(pslwr1mean,pslwr2mean,pslwr3mean,pslwr4mean) + rm(varPeriodAnom1mean,varPeriodAnom2mean,varPeriodAnom3mean,varPeriodAnom4mean) + gc() + } + + if(fields.name == ECMWF_S4.name){ # save all the data necessary to draw the graphs (but not the impact maps) + save(workdir,rean.name,fields.name,var.num,var.name,var.name.full,var.unit,year.start,year.end,p,WR.period,lon,lat,pslwr1mean,pslwr2mean,pslwr3mean,pslwr4mean,wr1y,wr2y,wr3y,wr4y,n.leadtimes,cluster.sequence,file=paste0(workdir,"/",fields.name,"_",var.name[var.num],"_",my.period[p],"_mapdata.RData")) + rm(pslwr1mean,pslwr2mean,pslwr3mean,pslwr4mean) + gc() + } +} # close the for loop on 'p' + + +# run it twice: once, with ordering <- FALSE to look only at the cartography and find visually the right regime names of the four clusters; +# and the second time, to save the ordered cartography: +ordering <- F # If TRUE, order the clusters following the regimes listed in the 'orden' vector (after you manually insert the names). +as.pdf <- T # FALSE: save results as one .png file for each season, TRUE: save only 1 .pdf file with all seasons +save.names <- TRUE # if TRUE, also save the cluster names (i.e: the association between clusters and weather regimes); if FALSE, the cluster names are loaded from the file + + +# position of long values of Europe only (without the Atlantic Sea and America): +#if(fields.name == "ERA-Interim") EU <- c(1:which(lon >= lon.max)[1], (length(lon)-30):length(lon)) # restrict area to continental europe only +EU <- c(1:which(lon >= lon.max)[1], (which(lon >= 337)[1]:length(lon))) # restrict area to continental europe only + +# choose an order to give to the cartography, from top to bottom: +orden <- c("NAO+","NAO-","Blocking","Atl.Ridge") + +regime1.name <- orden[1] +regime2.name <- orden[2] +regime3.name <- orden[3] +regime4.name <- orden[4] + +if(save.names){cluster1.name.period <- cluster2.name.period <- cluster3.name.period <- cluster4.name.period <- c()} + +# breaks and colors of the geopotential fields: +if(psl == "g500"){ + #my.brks <- c(-300,-199:200,300) # % Mean anomaly of a WR + my.brks <- c(-300,seq(-200,200,10),300) # % Mean anomaly of a WR + my.brks2 <- c(-300,seq(-200,200,10),300) # % Mean anomaly of a WR for the contour lines + #my.cols <- colorRampPalette((brewer.pal(9,"PuBu")))(length(my.brks)-1) + values.to.plot <- c(-200, -100, 0, 100, 200) # values we want to show in the labels +} + +if(psl == "psl"){ + my.brks <- c(seq(-19,-1,2),0,seq(1,19,2)) # % Mean anomaly of a WR + # my.brks <- c(seq(-19,-1,2),0,seq(1,19,2)) # % Mean anomaly of a WR + + my.brks2 <- my.brks #c(seq(-20,20,2)) # % Mean anomaly of a WR for the contour lines + values.to.plot <- my.brks #c(-17,-15,-13,-11,-9,-7,-5,-3,-1,0,1,3,5,7,9,11,13,15,17) +} + +my.cols <- colorRampPalette(rev(brewer.pal(11,"RdBu")))(length(my.brks)-1) # blue--white--red colors +my.cols[floor(length(my.cols)/2)] <- my.cols[floor(length(my.cols)/2)+1] <- "white" + +# breaks and colors of the impact maps (valid both for Wind speed anomalies and Temperature anomalies): +#my.brks.var <- c(-20,seq(-10,-3,1),seq(-2.9,2.9,0.1),seq(3,10,1),20) # % Mean anomaly of a WR +#my.brks.var <- c(-20,-2,-1.5,-1,-0.5,-0.2,0.2,0.5,1,1.5,2,20) # % Mean anomaly of a WR +#my.brks.var <- c(-20,seq(-3,3,0.5),20) # % Mean anomaly of a WR +if(var.name[var.num] == "sfcWind") { + my.brks.var <- seq(-3,3,0.5) + values.to.plot2 <- c(-3,-2,-1,0,1,2,3) +} +if(var.name[var.num] == "tas") { + my.brks.var <- seq(-5,5,1) + values.to.plot2 <- my.brks.var #c(-5,-3,-1,0,1,3,5) +} + +#my.cols <- colorRampPalette(as.character(read.csv("/home/Earth/vtorralb/rgbhex.csv",header=F)[,1]))(length(my.brks)-1) # blue--yellow-red colors +my.cols.var <- colorRampPalette(rev(brewer.pal(11,"RdBu")))(length(my.brks.var)-1) # blue--white--red colors +#my.cols.var[floor(length(my.cols.var)/2)] <- my.cols.var[floor(length(my.cols.var)/2)+1] <- "white" + +if(as.pdf)(pdf(file=paste0(workdir,"/",fields.name,"_",var.name[var.num],"_",my.period[p],".pdf"),width=40, height=60)) + +for(p in WR.period){ + load(file=paste0(workdir,"/",fields.name,"_",var.name[var.num],"_",my.period[p],"_mapdata.RData")) + + if(save.names){ + if(p == 1){ # January + cluster2.name="NAO+" + cluster1.name="NAO-" + cluster3.name="Blocking" + cluster4.name="Atl.Ridge" + } + if(p == 2){ # February + cluster3.name="NAO+" + cluster2.name="NAO-" + cluster4.name="Blocking" + cluster1.name="Atl.Ridge" + } + if(p == 3){ # March + cluster3.name="NAO+" + cluster2.name="NAO-" + cluster4.name="Blocking" + cluster1.name="Atl.Ridge" + } + if(p == 4){ # April + cluster2.name="NAO+" + cluster1.name="NAO-" + cluster3.name="Blocking" + cluster4.name="Atl.Ridge" + } + if(p == 5){ # May + cluster4.name="NAO+" + cluster2.name="NAO-" + cluster3.name="Blocking" + cluster1.name="Atl.Ridge" + } + if(p == 6){ # June + cluster4.name="NAO+" + cluster1.name="NAO-" + cluster2.name="Blocking" + cluster3.name="Atl.Ridge" + } + if(p == 7){ # July + cluster4.name="NAO+" + cluster2.name="NAO-" + cluster1.name="Blocking" + cluster3.name="Atl.Ridge" + } + if(p == 8){ # August + cluster2.name="NAO+" + cluster4.name="NAO-" + cluster3.name="Blocking" + cluster1.name="Atl.Ridge" + } + if(p == 9){ # September + cluster4.name="NAO+" + cluster3.name="NAO-" + cluster1.name="Blocking" + cluster2.name="Atl.Ridge" + } + if(p == 10){ # October + cluster1.name="NAO+" + cluster4.name="NAO-" + cluster2.name="Blocking" + cluster3.name="Atl.Ridge" + } + if(p == 11){ # November + cluster2.name="NAO+" + cluster3.name="NAO-" + cluster1.name="Blocking" + cluster4.name="Atl.Ridge" + } + if(p == 12){ # December + cluster2.name="NAO+" + cluster4.name="NAO-" + cluster1.name="Blocking" + cluster3.name="Atl.Ridge" + } + if(p == 13){ # Winter + cluster3.name="NAO+" + cluster4.name="NAO-" + cluster1.name="Blocking" + cluster2.name="Atl.Ridge" + } + if(p == 14){ # Spring + cluster1.name="NAO+" + cluster3.name="NAO-" + cluster2.name="Blocking" + cluster4.name="Atl.Ridge" + } + if(p == 15){ # Summer + cluster2.name="NAO+" + cluster3.name="NAO-" + cluster4.name="Blocking" + cluster1.name="Atl.Ridge" + } + if(p == 16){ # Autumn + cluster4.name="NAO+" + cluster1.name="NAO-" + cluster2.name="Blocking" + cluster3.name="Atl.Ridge" + } + + cluster1.name.period[p] <- cluster1.name + cluster2.name.period[p] <- cluster2.name + cluster3.name.period[p] <- cluster3.name + cluster4.name.period[p] <- cluster4.name + + save(cluster1.name.period, cluster2.name.period, cluster3.name.period, cluster4.name.period, file=paste0(workdir,"/",fields.name,"_ClusterNames.RData")) + + } else { # in this case, we load the cluster names from the file already saved: + load(paste0(workdir,"/",fields.name,"_ClusterNames.RData")) + cluster1.name <- cluster1.name.period[p] + cluster2.name <- cluster2.name.period[p] + cluster3.name <- cluster3.name.period[p] + cluster4.name <- cluster4.name.period[p] + } + + # assign to each cluster its regime: + if(ordering == FALSE){ + cluster1 <- 1; cluster2 <- 2; cluster3 <- 3; cluster4 <- 4 + } else { + cluster1 <- which(orden == cluster1.name) + cluster2 <- which(orden == cluster2.name) + cluster3 <- which(orden == cluster3.name) + cluster4 <- which(orden == cluster4.name) + } + + assign(paste0("map",cluster1), pslwr1mean) + assign(paste0("map",cluster2), pslwr2mean) + assign(paste0("map",cluster3), pslwr3mean) + assign(paste0("map",cluster4), pslwr4mean) + + assign(paste0("fre",cluster1), wr1y) + assign(paste0("fre",cluster2), wr2y) + assign(paste0("fre",cluster3), wr3y) + assign(paste0("fre",cluster4), wr4y) + + if(fields.name == rean.name){ + assign(paste0("imp",cluster1), varPeriodAnom1mean) + assign(paste0("imp",cluster2), varPeriodAnom2mean) + assign(paste0("imp",cluster3), varPeriodAnom3mean) + assign(paste0("imp",cluster4), varPeriodAnom4mean) + + assign(paste0("sig",cluster1), pvalue1) + assign(paste0("sig",cluster2), pvalue2) + assign(paste0("sig",cluster3), pvalue3) + assign(paste0("sig",cluster4), pvalue4) + } + + + # Visualize the composition with the 3 graphs for all the 4 regimes: its pressure fields, its impact on var and its frequency series: + if(!as.pdf) png(filename=paste0(workdir,"/",fields.name,"_",var.name[var.num],"_",my.period[p],".png"),width=3000,height=3700) + + plot.new() + + # Sheet title: + par(fig=c(0, 1, 0.94, 0.98), new=TRUE) + mtext(paste0("Weather Regimes for ",my.period[p]," season (",year.start,"-",year.end,"). Source: ",fields.name), cex=6, font=2) + + # Centroid maps: + map.xpos <- 0 + map.width <- 0.46 + par(fig=c(map.xpos + 0.003, map.xpos + map.width - 0.003, 0.745, 0.925), new=TRUE) + PlotEquiMap2(rescale(map1,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map1), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, xlabel.dist=1.5, contours.lty="F1FF1F") + par(fig=c(map.xpos, map.xpos + map.width, 0.51, 0.69), new=TRUE) + PlotEquiMap2(rescale(map2,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map2), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F") + par(fig=c(map.xpos, map.xpos + map.width, 0.275, 0.455), new=TRUE) + PlotEquiMap2(rescale(map3,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map3), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F") + par(fig=c(map.xpos, map.xpos + map.width, 0.04, 0.22), new=TRUE) + PlotEquiMap2(rescale(map4,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map4), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F") + + # Title Centroid Maps: + map.title.xpos <- 0.76 + map.title.width <- 0.2 + par(fig=c(map.title.xpos, map.title.xpos + map.title.width, 0.728, 0.729), new=TRUE) + mtext("year", cex=3) + par(fig=c(map.title.xpos, map.title.xpos + map.title.width, 0.494, 0.495), new=TRUE) + mtext("year", cex=3) + par(fig=c(map.title.xpos, map.title.xpos + map.title.width, 0.260, 0.261), new=TRUE) + mtext("year", cex=3) + par(fig=c(map.title.xpos, map.title.xpos + map.title.width, 0.026, 0.027), new=TRUE) + mtext("year", cex=3) + + # Impact maps: + if(fields.name == rean.name){ + impact.xpos <- 0.47 + impact.width <- 0.26 + par(fig=c(impact.xpos, impact.xpos + impact.width, 0.745, 0.925), new=TRUE) + PlotEquiMap2(rescale(imp1[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=t(sig1[,EU] < 0.05), cex.lab=1.5) + par(fig=c(impact.xpos, impact.xpos + impact.width, 0.51, 0.69), new=TRUE) + PlotEquiMap2(rescale(imp2[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=t(sig2[,EU] < 0.05), cex.lab=1.5) + par(fig=c(impact.xpos, impact.xpos + impact.width, 0.275, 0.455), new=TRUE) + PlotEquiMap2(rescale(imp3[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=t(sig3[,EU] < 0.05), cex.lab=1.5) + par(fig=c(impact.xpos, impact.xpos + impact.width, 0.04, 0.22), new=TRUE) + PlotEquiMap2(rescale(imp4[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=t(sig4[,EU] < 0.05), cex.lab=1.5) + } + + # Frequency plots: + barplot.xpos <- 0.75 + barplot.width <- 0.25 + freq.max=100 + par(fig=c(barplot.xpos, barplot.xpos + barplot.width, 0.745, 0.915), new=TRUE) + barplot.freq(100*fre1, year.start, year.end, cex.y=2, cex.x=2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0)) + par(fig=c(barplot.xpos, barplot.xpos + barplot.width,0.510,0.680), new=TRUE) + barplot.freq(100*fre2, year.start, year.end, cex.y=2, cex.x=2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0)) + par(fig=c(barplot.xpos, barplot.xpos + barplot.width,0.275,0.445), new=TRUE) + barplot.freq(100*fre3, year.start, year.end, cex.y=2, cex.x=2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0)) + par(fig=c(barplot.xpos, barplot.xpos + barplot.width,0.040,0.210), new=TRUE) + barplot.freq(100*fre4, year.start, year.end, cex.y=2, cex.x=2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0)) + + # % on Frequency plots: + symbol.xpos <- 0.735 + symbol.width <- 0.01 + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.83, 0.84), new=TRUE) + mtext("%", cex=3.3) + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.60, 0.61), new=TRUE) + mtext("%", cex=3.3) + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.36, 0.37), new=TRUE) + mtext("%", cex=3.3) + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.13, 0.14), new=TRUE) + mtext("%", cex=3.3) + + # Title 1: + title1.xpos <- 0.02 + title1.width <- 0.4 + par(fig=c(title1.xpos, title1.xpos + title1.width, 0.930, 0.935), new=TRUE) + mtext(paste0(regime1.name,": ", psl.name, " Anomaly"), font=2, cex=4) + par(fig=c(title1.xpos, title1.xpos + title1.width, 0.695, 0.700), new=TRUE) + mtext(paste0(regime2.name,": ", psl.name, " Anomaly"), font=2, cex=4) + par(fig=c(title1.xpos, title1.xpos + title1.width, 0.460, 0.465), new=TRUE) + mtext(paste0(regime3.name,": ", psl.name, " Anomaly"), font=2, cex=4) + par(fig=c(title1.xpos, title1.xpos + title1.width, 0.225, 0.230), new=TRUE) + mtext(paste0(regime4.name,": ", psl.name, " Anomaly"), font=2, cex=4) + + # Title 2: + if(fields.name == rean.name){ + title2.xpos <- 0.42 + title2.width <- 0.35 + par(fig=c(title2.xpos, title2.xpos + title2.width, 0.930, 0.935), new=TRUE) + mtext(paste0(regime1.name, ": Impact on ", var.name.full[var.num]), font=2, cex=4) + par(fig=c(title2.xpos, title2.xpos + title2.width, 0.695, 0.700), new=TRUE) + mtext(paste0(regime2.name, ": Impact on ", var.name.full[var.num]), font=2, cex=4) + par(fig=c(title2.xpos, title2.xpos + title2.width, 0.460, 0.465), new=TRUE) + mtext(paste0(regime3.name, ": Impact on ", var.name.full[var.num]), font=2, cex=4) + par(fig=c(title2.xpos, title2.xpos + title2.width, 0.225, 0.230), new=TRUE) + mtext(paste0(regime4.name, ": Impact on ", var.name.full[var.num]), font=2, cex=4) + } + + # Title 3: + title3.xpos <- 0.75 + title3.width <-0.25 + par(fig=c(title3.xpos, title3.xpos + title3.width, 0.930, 0.935), new=TRUE) + mtext(paste0(regime1.name, ": Frequency (", round(100*mean(fre1),1), "%)"), font=2, cex=4) + par(fig=c(title3.xpos, title3.xpos + title3.width, 0.695, 0.700), new=TRUE) + mtext(paste0(regime2.name, ": Frequency (", round(100*mean(fre2),1), "%)"), font=2, cex=4) + par(fig=c(title3.xpos, title3.xpos + title3.width, 0.460, 0.465), new=TRUE) + mtext(paste0(regime3.name, ": Frequency (", round(100*mean(fre3),1), "%)"), font=2, cex=4) + par(fig=c(title3.xpos, title3.xpos + title3.width, 0.225, 0.230), new=TRUE) + mtext(paste0(regime4.name, ": Frequency (", round(100*mean(fre4),1), "%)"), font=2, cex=4) + + # Legend 1: + legend1.xpos <- 0.01 + legend1.width <- 0.44 + legend1.cex <- 1.8 + my.subset <- match(values.to.plot, my.brks) + par(fig=c(legend1.xpos, legend1.xpos + legend1.width, 0.719, 0.744), new=TRUE) # syntax fig=c(xmin, xmax, ymin, ymax) + ColorBar3(my.brks, cols=my.cols, vert=FALSE, cex=legend1.cex, subset=my.subset) + if(psl=="g500") {mtext(side=4," m", cex=2.5)} else {mtext(side=4," hPa", cex=legend1.cex)} + par(fig=c(legend1.xpos, legend1.xpos + legend1.width, 0.485, 0.508), new=TRUE) + ColorBar3(my.brks, cols=my.cols, vert=FALSE, cex=legend1.cex, subset=my.subset) + if(psl=="g500") {mtext(side=4," m", cex=2.5)} else {mtext(side=4," hPa", cex=legend1.cex)} + par(fig=c(legend1.xpos, legend1.xpos + legend1.width, 0.250, 0.273), new=TRUE) + ColorBar3(my.brks, cols=my.cols, vert=FALSE, cex=legend1.cex, subset=my.subset) + if(psl=="g500") {mtext(side=4," m", cex=2.5)} else {mtext(side=4," hPa", cex=legend1.cex)} + par(fig=c(legend1.xpos, legend1.xpos + legend1.width, 0.015, 0.038), new=TRUE) + ColorBar3(my.brks, cols=my.cols, vert=FALSE, cex=legend1.cex, subset=my.subset) + if(psl=="g500") {mtext(side=4," m", cex=2.5)} else {mtext(side=4," hPa", cex=legend1.cex)} + + # Legend 2: + if(fields.name == rean.name){ + legend2.xpos <- 0.48 + legend2.width <- 0.25 + legend2.cex <- 1.8 + my.subset2 <- match(values.to.plot2, my.brks.var) + par(fig=c(legend2.xpos, legend2.xpos + legend2.width, 0.719 + 0.001, 0.744), new=TRUE) + ColorBar3(my.brks.var, cols=my.cols.var, vert=FALSE, cex=legend2.cex, subset=my.subset2) + mtext(side=4,paste0(" ",var.unit[var.num]), cex=legend2.cex) + par(fig=c(legend2.xpos, legend2.xpos + legend2.width, 0.485, 0.508), new=TRUE) + ColorBar3(my.brks.var, cols=my.cols.var, vert=FALSE, cex=legend2.cex, subset=my.subset2) + mtext(side=4,paste0(" ",var.unit[var.num]), cex=legend2.cex) + par(fig=c(legend2.xpos, legend2.xpos + legend2.width, 0.250, 0.273), new=TRUE) + ColorBar3(my.brks.var, cols=my.cols.var, vert=FALSE, cex=legend2.cex, subset=my.subset2) + mtext(side=4,paste0(" ",var.unit[var.num]), cex=legend2.cex) + par(fig=c(legend2.xpos, legend2.xpos + legend2.width, 0.015, 0.038), new=TRUE) + ColorBar3(my.brks.var, cols=my.cols.var, vert=FALSE, cex=legend2.cex, subset=my.subset2) + mtext(side=4,paste0(" ",var.unit[var.num]), cex=legend2.cex) + } + + if(!as.pdf) dev.off() # for saving 4 png + +} # close for loop on 'p' + +if(as.pdf) dev.off() # for saving a single pdf with all seasons + diff --git a/old/weather_regimes_v28.R b/old/weather_regimes_v28.R new file mode 100644 index 0000000000000000000000000000000000000000..315db158c58a16b3ec717de01697fd4b3ec114b6 --- /dev/null +++ b/old/weather_regimes_v28.R @@ -0,0 +1,953 @@ +library(s2dverification) # for the function Load() +library(abind) +library(Kendall) +library(reshape2) +library(TTR) +source('/home/Earth/ncortesi/Downloads/scripts/Rfunctions.R') # for the calendar functions +workdir <- "/scratch/Earth/ncortesi/RESILIENCE/Regimes" # working dir with the input files with the WT classifications for each MSLP grid point + +# available reanalysis for the geopotential (g500) or the geopotential height (z500 = g500/9.81) and for var data: +ERAint <- '/esnas/reconstructions/ecmwf/eraint/$STORE_FREQ$_mean/$VAR_NAME$_f6h/$VAR_NAME$_$YEAR$$MONTH$.nc' +ERAint.name <- "ERA-Interim" + +JRA55 <- '/esnas/reconstructions/jma/jra-55/$STORE_FREQ$_mean/$VAR_NAME$_f6h/$VAR_NAME$_$YEAR$$MONTH$.nc' +JRA55.name <- "JRA-55" + +rean <- ERAint #JRA55 #ERAint # choose one of the two above reanalysis from where to load the input psl data + +# available forecast systems for the psl and var data: +#ECMWF_S4 <- list(path = paste0('/esnas/exp/ECMWF/seasonal/0001/s004/m001/$STORE_FREQ$_mean/$VAR_NAME$_f6h/$VAR_NAME$_$START_DATE$.nc')) +#ECMWF_S4 <- '/esnas/exp/ECMWF/seasonal/0001/s004/m001/$STORE_FREQ$_mean/psl_f6h/psl_$START_DATE$.nc' +ECMWF_S4 <- '/esnas/exp/ecmwf/system4_m1/$STORE_FREQ$_mean/psl_f6h/psl_$START_DATE$.nc' +ECMWF_S4.name <- "ECMWF-S4" + +ECMWF_monthly <- '/esnas/exp/ECMWF/monthly/ensforhc/6hourly/$VAR_NAME$/2014$MONTH$$DAY$00/$VAR_NAME$_$START_DATE$00.nc' +ECMWF_monthly.name <- "ECMWF-Monthly" + +forecast <- ECMWF_S4 + +fields <- forecast #rean #forecast # put 'rean' to load pressure fields and var from reanalisis, put 'forecast' to load them from forecast systems + +psl <- "psl" #"g500" # pressure variable to use from the chosen reanalysis +psl.name <- "MSLP" # "Z500" # the name to show in the maps + +year.start <- 1982 #1981 #1994 #1979 #1981 +year.end <- 2013 #2013 #2010 + +member.name <- "ensemble" # name of the dimension with the ensemble members inside the netCDF files of S4 + +res <- 0.75 # set the resolution you want to interpolate the psl and var data when loading it (i.e: set to 0.75 to use the same res of ERA-Interim) + # interpolation method is BILINEAR. +PCA <- FALSE # set it to TRUE if you want to apply the PCA before the k-means cluster analysis, FALSE otherwise +variance.explained <- 0.8 # the user can set here the minimum % of variance explained by the PCs (typically 0.8 which is equal to 80%) + +lat.weighting=FALSE # set it to true to weight psl data on latitude before applying the cluster analysis +sequences=FALSE # set it to TRUE if you want to select only days beloning to sequences of at least 5 consecutive days belonging to the same regime. + +# Coordinates of the box enclosing the study region (the Northern Atlantic in this case): +lat.max <- 81 #81 #80 #70 +lat.min <- 27 #30 #20 #30 +lon.max <- 45 #36 #30 #40 +lon.min <- 274.5 #274.5 #270 #280 # put a positive number here because the geopotential has only positive values of longitud! + +# Only for Monthly forecasts: +#leadtime <- 1:7 #5:11 #1 # if fields=forecast, set the forecast time (in days) you want to compute the weather regimes. +#startdate.shift <- 1 # if leadtime=5:11, it's better to shift all startdates of the season 1 week in the past, to fit the exact season of obs.data + # if leadtime=12:18, it's better to shift all startdates of the season 2 weeks in the past, and so on. +#forecast.year <- year.end+1 # if fields=forecast, set the forecast year (it is usually the year after year.end) +#mes=1 # if fields=forecast, set the month of the first startdate of the forecast year +#day=2 # if fields=forecast, set the day of the first startdate of the forecast year + +############################################################################################################################# +#ECMWF_monthly <- list(path = paste0('/esnas/exp/ECMWF/monthly/ensforhc/6hourly/psl/2014010200/1994_010200.nc')) +#ECMWF_monthly <- list(path = paste0('/esnas/exp/ECMWF/monthly/ensforhc/6hourly/$VAR_NAME$/2014$MONTH$$DAY$00/$VAR_NAME$_$START_DATE$00.nc')) +rean.name <- unname(ifelse(rean == ERAint, ERAint.name, JRA55.name)) +forecast.name <- unname(ifelse(forecast == ECMWF_S4, ECMWF_S4.name, ECMWF_monthly.name)) +fields.name <- unname(ifelse(fields == rean, rean.name, forecast.name)) +n.years <- year.end - year.start + 1 + +var.data <- fields # dataset from which to load the input var data (reanalysis or forecasts) +var.data.name <- fields.name #ECMWF_monthly.name # ERAint.name + +if(lat.min >= lat.max) stop("lat.min cannot be greater than lat.max") + +days.period <- n.days.period <- period.length <- list() +for (pp in 1:17){ + days.period[[pp]] <- NA + for(y in year.start:year.end) days.period[[pp]] <- c(days.period[[pp]], n.days.in.a.future.year(year.start, y) + pos.period(y,pp)) + days.period[[pp]] <- days.period[[pp]][-1] # remove the NA at the begin that was introduced only to be able to execute the above command + # number of days belonging to that period from year.start to year.end: + n.days.period[[pp]] <- length(days.period[[pp]]) + # Number of days belonging to that period in a single year: + period.length[[pp]] <- n.days.in.a.period(pp,1999) #+ ifelse(period==13 | period==2, 1, 0) # using year 1999 introduce a small error in winter season length because of bisestile years +} + +# Create a regular lat/lon grid to interpolate the data: +n.lon.grid <- 360/res +n.lat.grid <- (180/res)+1 +my.grid <- paste0('r',n.lon.grid,'x',n.lat.grid) +#domain<-list() +#domain$lon <- seq(0,359.999999999,res) +#domain$lat <- c(rev(seq(0,90,res)),seq(res[1],-90,-res)) # Notice that the regular grid is always centered at lat=0 and lon=0! + +# load only 1 day of pressure data to detect the minimum and maximum lat and lon values which identify only the North Atlantic area: +if(fields.name == rean.name) domain <- Load(var = psl, exp = NULL, obs = list(list(path=fields)), '19990101', storefreq = 'daily', leadtimemax = 1, output = 'lonlat', nprocs=1) +# in the case of S4, we also interpolate it (notice that if the S4 grid has the same grid of the chosen grid (typically ERA-Interim), Load() leaves the S4 grid unchanged): +if(fields.name == ECMWF_S4.name) domain <- Load(var = "psl", exp = list(list(path=fields)), sdates=paste0(year.start,'0101'), storefreq = 'daily', dimnames=list(member=member.name), leadtimemax=1, nmember=1, output = 'lonlat') #, grid=my.grid, method='bilinear', nprocs=1) + +#if(fields.name == ECMWF_monthly.name) domain <- Load(var = psl, exp = NULL, obs = list(list(path=fields)), paste0(year.start,'0102'), storefreq = 'daily', leadtimemax = 1, output = 'lonlat', nprocs=1) + +n.lon <- length(domain$lon) +n.lat <- length(domain$lat) + +pos.lat.max <- which(lat.max - round(domain$lat,4) >= 0)[1] # select the first latitude of the domain below the maximum latitude +pos.lat.min <- tail(which(round(domain$lat,4) - lat.min >= 0),1) # select the last latitude of the domain over the minumum latitude +pos.lon.max <- tail(which(lon.max - round(domain$lon,4) >= 0),1) +pos.lon.min <- which(round(domain$lon,4) - lon.min >= 0)[1] + +pos.lat <- if(pos.lat.min < pos.lat.max) {pos.lat.min:pos.lat.max} else {pos.lat.max:pos.lat.min} +pos.lon <- c(1:pos.lon.max,pos.lon.min:length(domain$lon)) # beware that it only consider the case where the study area cross the meridian of Greenwhich! +n.pos.lat <- length(pos.lat) +n.pos.lon <- length(pos.lon) + +lat <- domain$lat[pos.lat] # lat values of chosen area only (it is smaller than the whole spatial domain loaded by the data) +lon <- domain$lon[pos.lon] # lon values of chosen area only + +#lat.min.area <- domain$lat[pos.lat.min] # min and max lat/lon of the chosen area only (same as lat.max, but its values correspond to actual values in the lat vector) +#lat.max.area <- domain$lat[pos.lat.max] +#lon.min.area <- domain$lon[pos.lon.min] +#lon.max.area <- domain$lon[pos.lon.max] + +#rm(domain) + +# Load psl data: +if(fields.name == rean.name){ # Load daily psl data in the reanalysis case: + psleuFull <-array(NA,c(n.days.in.a.yearly.period(year.start,year.end), n.pos.lat, n.pos.lon)) + + for (y in year.start:year.end){ + var <- Load(var = psl, exp = NULL, obs = list(list(path=fields)), paste0(y,'0101'), storefreq = 'daily', leadtimemax = n.days.in.a.year(y), output = 'lonlat', + latmin = lat.min, latmax = lat.max, lonmin = lon.min, lonmax = lon.max) + psleuFull[seq.days.in.a.future.year(year.start, y),,] <- var$obs + rm(var) + gc() + } +} + +if(fields.name == ECMWF_S4.name) # in this case, we can load only the data for 1 month at time (since each month needs ~3 GB of memory) + start.month <- 1 # start month (1=January, 12=December) + n.members <- 15 # number of members of the forecast system to use + + if(start.month <= 9) {start.month.char <- paste0("0",as.character(start.month))} else {start.month.char=start.month} + + psleuFull <- Load(var = psl, exp = list(list(path=fields)), obs = NULL, sdates=paste0(year.start:year.end, start.month.char,'01'), dimnames=list(member=member.name), nmember=n.members, leadtimemax=216, storefreq='daily', output = 'lonlat', latmin = lat.min, latmax = lat.max, lonmin = lon.min, lonmax = lon.max) #, grid=my.grid, method='bilinear') + + # immediatly convert psl in daily anomalies to remove the drift! + pslPeriodClim <- apply(psleuFull$mod, c(1,4,5,6), mean, na.rm=T) + pslPeriodClim2 <- InsertDim(InsertDim(pslPeriodClim, 2, n.years), 2, n.members) + rm(pslPeriodClim) + gc() + + psleuFull <- psleuFull$mod - pslPeriodClim2 + rm(pslPeriodClim2) + gc() + + # daily anomalies computed with the 10-days leadtime window including the 9 days AFTER each day: + pslPeriodClim10 <- array(NA, c(1,n.members,n.years,n.leadtimes-9,n.pos.lat,n.pos.lon)) + for(year in 1:n.years){ + for(lead in 1:(n.leadtimes-9)){ + pslPeriodClim10[1,,year,lead,,] <- (psleuFull$mod[1,,year,lead,,] + psleuFull$mod[1,,year,lead+1,,] + psleuFull$mod[1,,year,lead+2,,] + psleuFull$mod[1,,year,lead+3,,] + psleuFull$mod[1,,year,lead+4,,] + psleuFull$mod[1,,year,lead+5,,] + psleuFull$mod[1,,year,lead+6,,] + psleuFull$mod[1,,year,lead+7,,] + psleuFull$mod[1,,year,lead+8,,] + psleuFull$mod[1,,year,lead+9,,])/10 + } + } + pslPeriodClim10mean <- apply(pslPeriodClim10, c(1,4,5,6), mean, na.rm=T) + pslPeriodClim10mean2 <- InsertDim(InsertDim(pslPeriodClim10mean,2,32),2,15) + + psleuFull10 <- psleuFull$mod[1,,,1:206,,,drop=F] - pslPeriodClim10mean2 + #psleuFull10mean <- apply(psleuFull10, c(1,3,4,5,6),mean,na.rm=T) + psleuFull <- psleuFull10 + rm(pslPeriodClim2, psleuFull10, pslPeriodClim10, pslPeriodClim10mean2, psleuFull10mean, pslPeriodClim10mean) + gc() + +} + +if(fields.name == ECMWF_monthly.name){ + # Load 6-hourly psl data in the forecast case: + psleuFull <-array(NA, c(n.sdates*n.years, n.leadtimes, n.pos.lat, n.pos.lon)) # daily order: 19940102 19950102 ... 20130102 19940109 19950109 ... 20130109 ... + n.leadtimes <- length(leadtime) + sdates <- weekly.seq(forecast.year,mes,day) + n.sdates <- length(sdates) + + for (startdate in 1:n.sdates){ + for(lday in leadtime){ + var <- Load(var = psl, exp = NULL, obs = list(list(path=fields), paste0(year.start:year.end, substr(sdates[startdate],5,6), substr(sdates[startdate],7,8) ), storefreq = 'daily', leadtimemin = lday*4-3, leadtimemax = lday*4, output = 'lonlat', latmin = lat.min, latmax = lat.max, lonmin = lon.min, lonmax = lon.max) + + mean.lday <- apply(var$obs, c(3,5,6), mean, na.rm=T) + rm(var) + gc() + + psleuFull[(1+(startdate-1)*n.years):(startdate*n.years), lday-leadtime[1]+1,,] <- mean.lday + rm(mean.lday) + gc() + } + } +} + +if(psl=="g500") psleuFull <- psleuFull/9.81 # convert geopotential to geopotential height (in m) +if(psl=="psl") psleuFull <- psleuFull/100 # convert MSLP in Pascal to MSLP in hPa +gc() + +#save.image(file=paste0(workdir,"/weather_regimes_",fields.name,"_",year.start,"-",year.end,".RData")) +#load(file=paste0(workdir,"/weather_regimes_",fields.name,"_",year.start,"-",year.end,".RData")) + +# compute the cluster analysis: +my.PCA <- list() +my.cluster <- list() +my.cluster.array <- list() +tot.variance <- list() +n.pcs <- my.cluster <- c() + +WR.period <- 1 #1:12 #13:16 # 1-12: Jan-Dec; 13-16: Winter-Spring-Summer-Autumn +p=1 + +for(p in WR.period){ + # Select only the data of the month/season we want: + if(fields.name == rean.name){ pslPeriod <- psleuFull[days.period[[p]],,] # select only days in the chosen period (i.e: winter) + + # in this case of S4 we don't need to select anything because we already loaded only 1 month of data! + + if(fields.name == ECMWF_monthly.name){ + my.startdates.period <- months.period(forecast.year,mes,day,p) # get which startdates belong to the chosen period + + days.period <- list() # get which days inside psleuFull belong to the chosen period + for(startdate in my.startdates.period){ + days.period[[p]] <- c(days.period[[p]], (1+(startdate-1)*n.years):(startdate*n.years)) + } + + # in winter you must remove the 2nd startdate (20140109) because its data is bad, as you can see with: plot(psl.kmeans[,1:2]) + # if(fields.name == forecast.name && period == 13) psl.kmeans[21:40,] <- NA + } + + # weight the pressure fields based on latitude (apply it to anomalies, not to absolute values!): + if(lat.weighting){ + lat.weighted <- cos(lat*pi/180)^0.5 + lat.weighted.array <- InsertDim(InsertDim(lat.weighted,2,n.pos.lon), 1, n.days.period[[p]]) + psl.kmeans <- pslPeriod * lat.weighted.array + rm(lat.weighted.array) + gc() + } + + # select period data from psleuFull to use it as input of the cluster analysis: + if(fields.name == rean.name){ + psl.kmeans <- pslPeriod + dim(psl.kmeans) <- c(n.days.period[[p]], n.pos.lat*n.pos.lon) # convert array in a matrix! + rm(pslPeriod, pslPeriod.weighted) + } + + if(fields.name == ECMWF_S4.name){ + lead.month=2 + + chosen.month <- start.month + lead.month # find which month we want to load data + if(chosen.month > 12) chosen.month <- chosen.month - 12 + + # select only the data of one lead month: + for(y in year.start:year.end){ + leadtime.min <- 1 + n.days.in.a.monthly.period(start.month, chosen.month, y) - n.days.in.a.month(chosen.month, y) + leadtime.max <- leadtime.min + n.days.in.a.month(chosen.month, y) - 1 + if (n.days.in.a.month(chosen.month, y) == 29) leadtime.max <- leadtime.max - 1 # remove the 29th of February to have the same n. of elements for all years + int.leadtimes <- leadtime.min:leadtime.max + n.leadtimes <- leadtime.max - leadtime.min + 1 + + if(y == year.start) { + pslPeriod <- psleuFull[,,1,int.leadtimes,,,drop=FALSE] + } else { + pslPeriod <- unname(abind(pslPeriod, psleuFull[,,y-year.start+1,int.leadtimes,,,drop=FALSE], along=3)) + } + } + + # note that the data order in psl.kmeans[,X] is: year1day1, year1day2, ... , year1day31, year2day1, year2day2, ... , year2day28 + psl.melted <- melt(pslPeriod[1,,,,,, drop=FALSE], varnames=c("Exp","Member","StartYear","LeadDay","Lat","Lon")) + psl.kmeans <- unname(acast(psl.melted, Member + StartYear + LeadDay ~ Lat + Lon)) + + #rm(pslPeriod) + gc() + } + + # compute the PCs or not: + if(PCA){ + my.seq <- seq(1, n.pos.lat*n.pos.lon, 9) # select only 1 point of 9 + pslcut <- psl.kmeans[,my.seq] + + my.PCA[[p]] <- princomp(pslcut,cor=FALSE) + tot.variance[[p]] <- head(cumsum(my.PCA[[p]]$sdev^2/sum(my.PCA[[p]]$sdev^2)),50) # check how many PCAs to keep basing on the sum of their expl. variance + n.pcs[p] <- head(as.numeric(which(tot.variance[[p]] > variance.explained)),1) # select only the pcs that explains at least 80% of variance + my.cluster[[p]] <- kmeans(my.PCA[[p]]$scores[,1:n.pcs[p]], centers=4, iter.max=100, nstart=30) # 4 is the number of clusters + rm(pslcut) + } else { # 4 is the number of clusters: + if(fields.name == rean.name) my.cluster[[p]] <- kmeans(psl.kmeans[which(!is.na(psl.kmeans[,1])),], centers=4, iter.max=100, nstart=30) + if(fields.name == ECMWF_S4.name) { + my.cluster[[p]] <- kmeans(psl.kmeans, centers=4, iter.max=100, nstart=30, trace=TRUE) + + my.cluster.array[[p]] <- array(my.cluster[[p]]$cluster, c(n.leadtimes, n.years, n.members)) # in reverse order compared to the definition of psl.kmeans + + #plot(SMA(my.cluster[[p]]$centers[1,],201),type="l",col="gold",ylim=c(-20,20));lines(SMA(my.cluster[[p]]$centers[2,],201),type="l",col="red"); lines(SMA(my.cluster[[p]]$centers[3,],201),type="l",col="green");lines(SMA(my.cluster[[p]]$centers[4,],201),type="l",col="blue");lines(SMA(psl.kmeans[29,],201),type="l",col="gray");lines(rep(0,14410),type="l",col="black",lty=2) + } + } + + rm(psl.kmeans) + gc() +} + +#save.image(file=paste0(workdir,"/weather_regimes_",fields.name,"_",year.start,"-",year.end,".RData")) +#load(file=paste0(workdir,"/weather_regimes_",fields.name,"_",year.start,"-",year.end,".RData")) + +var.num <- 1 # Choose a variable. 1: sfcWind 2: tas + +var.name <- c("sfcWind","tas") # name of the 'predictand' variable of the chosen reanalysis +var.name.full <- c("Wind Speed","Temperature") # full name of the 'predictand' variable to put in the title of the graphs +var.unit <- c("m/s", "ºC") # unit of measure of var (for drawing color scales) + +# Load var data: +if(fields.name == rean.name){ # Load daily var data in the reanalysis case: + vareuFull <-array(NA,c(n.days.in.a.yearly.period(year.start,year.end), n.pos.lat, n.pos.lon)) + + for (y in year.start:year.end){ + var <- Load(var = var.name[var.num], exp = NULL, obs = list(var.data), paste0(y,'0101'), storefreq = 'daily', leadtimemax = n.days.in.a.year(y), output = 'lonlat', + latmin = lat.min, latmax = lat.max, lonmin = lon.min, lonmax = lon.max) + vareuFull[seq.days.in.a.future.year(year.start, y),,] <- var$obs + rm(var) + gc() + } + +# in the S4 case, we can load only the data for 1 month at time (since each month needs ~3 GB of memory) +# but at present we ONLY use psl data!!! +#if(fields.name == ECMWF_S4.name) { +# vareuFull <- Load(var = var.name[var.num], exp = list(fields), obs = NULL, sdates=paste0(year.start:year.end, start.month.char,'01'), storefreq = 'daily', dimnames=list(member="number"), nmember=n.members, leadtimemin=leadtime.min, leadtimemax=leadtime.max, output = 'lonlat', latmin = lat.min, latmax = lat.max, lonmin = lon.min, lonmax = lon.max) +#} + +if(fields.name == ECMWF_monthly.name){ # Load 6-hourly var data in the monthly forecast case: + sdates <- weekly.seq(forecast.year,mes,day) + vareuFull <-array(NA,c(length(sdates)*(year.end-year.start+1), n.pos.lat, n.pos.lon)) + + for (startdate in 1:length(sdates)){ + var <- Load(var = var.name[var.num], exp = NULL, obs = list(var.data), paste0(year.start:year.end, substr(sdates[startdate],5,6), substr(sdates[startdate],7,8) ), storefreq = 'daily', leadtimemin=leadtime*4-3, leadtimemax=leadtime*4, output = 'lonlat', latmin = lat.min, latmax = lat.max, lonmin = lon.min, lonmax = lon.max) + temp <- apply(var$obs, c(1,3,5,6), mean, na.rm=T) + vareuFull[(1+(startdate-1)*(year.end-year.start+1)):(startdate*(year.end-year.start+1)),,] <- temp + rm(temp,var) + gc() + } + +} + +#my.grid<-paste0('r',n.lon,'x',n.lat) +#coord <- Load(var = 'sfcWind', exp = list(ECMWF_monthly), obs = NULL, sdates=paste0(2013,'0102'), leadtimemin = 1, leadtimemax=1, output = 'lonlat', nprocs=1, grid=my.grid, method='bilinear') + +save.image(file=paste0(workdir,"/weather_regimes_",fields.name,"_",var.name[var.num],"_",year.start,"-",year.end,".RData")) +load(file=paste0(workdir,"/weather_regimes_",fields.name,"_",var.name[var.num],"_",year.start,"-",year.end,".RData")) + +## # if you want to study WRs at monhly level but using their seasonal time series, you have to copy the WRs time series to the months from 1 to 12: +## if(WR.period <- 1:12){ +## my.cluster[[1]] <- my.cluster[[2]] <- my.cluster[[3]] <- my.cluster[[4]] <- my.cluster[[5]] <- my.cluster[[6]] <- list() +## my.cluster[[7]] <- my.cluster[[8]] <- my.cluster[[9]] <- my.cluster[[10]] <- my.cluster[[11]] <- my.cluster[[12]] <- list() + +## ss <- match(days.period[[13]],days.period[[1]]); ss[which(!is.na(ss))] <- 1; qq <- ss*my.cluster[[13]]$cluster; my.cluster[[1]]$cluster <- qq[!is.na(qq)] +## ss <- match(days.period[[13]],days.period[[2]]); ss[which(!is.na(ss))] <- 1; qq <- ss*my.cluster[[13]]$cluster; my.cluster[[2]]$cluster <- qq[!is.na(qq)] +## ss <- match(days.period[[13]],days.period[[12]]); ss[which(!is.na(ss))] <- 1; qq <- ss*my.cluster[[13]]$cluster; my.cluster[[12]]$cluster <- qq[!is.na(qq)] +## ss <- match(days.period[[14]],days.period[[3]]); ss[which(!is.na(ss))] <- 1; qq <- ss*my.cluster[[14]]$cluster; my.cluster[[3]]$cluster <- qq[!is.na(qq)] +## ss <- match(days.period[[14]],days.period[[4]]); ss[which(!is.na(ss))] <- 1; qq <- ss*my.cluster[[14]]$cluster; my.cluster[[4]]$cluster <- qq[!is.na(qq)] +## ss <- match(days.period[[14]],days.period[[5]]); ss[which(!is.na(ss))] <- 1; qq <- ss*my.cluster[[14]]$cluster; my.cluster[[5]]$cluster <- qq[!is.na(qq)] +## ss <- match(days.period[[15]],days.period[[6]]); ss[which(!is.na(ss))] <- 1; qq <- ss*my.cluster[[15]]$cluster; my.cluster[[6]]$cluster <- qq[!is.na(qq)] +## ss <- match(days.period[[15]],days.period[[7]]); ss[which(!is.na(ss))] <- 1; qq <- ss*my.cluster[[15]]$cluster; my.cluster[[7]]$cluster <- qq[!is.na(qq)] +## ss <- match(days.period[[15]],days.period[[8]]); ss[which(!is.na(ss))] <- 1; qq <- ss*my.cluster[[15]]$cluster; my.cluster[[8]]$cluster <- qq[!is.na(qq)] +## ss <- match(days.period[[16]],days.period[[9]]); ss[which(!is.na(ss))] <- 1; qq <- ss*my.cluster[[16]]$cluster; my.cluster[[9]]$cluster <- qq[!is.na(qq)] +## ss <- match(days.period[[16]],days.period[[10]]); ss[which(!is.na(ss))] <- 1; qq <- ss*my.cluster[[16]]$cluster; my.cluster[[10]]$cluster <- qq[!is.na(qq)] +## ss <- match(days.period[[16]],days.period[[11]]); ss[which(!is.na(ss))] <- 1; qq <- ss*my.cluster[[16]]$cluster; my.cluster[[11]]$cluster <- qq[!is.na(qq)] +## } + +for(p in WR.period){ # 1-12: Jan-Dec; 13-16: Winter-Spring-Summer-Autumn; 17: Year + + if(fields.name == rean.name){ + cluster.sequence <- my.cluster[[p]]$cluster + + if(sequences){ # it works only for rean data!!! + # for each of the 4 clusters, remove the days not belonging to sequences of at least 5 days: + # warning: first 4 days and last 4 days are not take into account y the algorithm and are treated as not belonging to any sequence (sequ=FALSE) + wrdiff <- diff(my.cluster[[p]]$cluster) + + sequ <- rep(FALSE, n.days.period[[p]]) + for(i in 5:(n.days.period[[p]]-5)){ + sequ[i] <- TRUE + if(wrdiff[i] != 0 & wrdiff[i+1] != 0) sequ[i] = FALSE + if(wrdiff[i] != 0 & wrdiff[i+2] != 0) sequ[i] = FALSE + if(wrdiff[i] != 0 & wrdiff[i+3] != 0) sequ[i] = FALSE + if(wrdiff[i] != 0 & wrdiff[i+4] != 0) sequ[i] = FALSE + if(wrdiff[i-1] != 0 & wrdiff[i] != 0) sequ[i] = FALSE + if(wrdiff[i-1] != 0 & wrdiff[i+1] != 0) sequ[i] = FALSE + if(wrdiff[i-1] != 0 & wrdiff[i+2] != 0) sequ[i] = FALSE + if(wrdiff[i-1] != 0 & wrdiff[i+3] != 0) sequ[i] = FALSE + if(wrdiff[i-2] != 0 & wrdiff[i] != 0) sequ[i] = FALSE + if(wrdiff[i-2] != 0 & wrdiff[i+1] != 0) sequ[i] = FALSE + if(wrdiff[i-2] != 0 & wrdiff[i+2] != 0) sequ[i] = FALSE + if(wrdiff[i-3] != 0 & wrdiff[i] != 0) sequ[i] = FALSE + if(wrdiff[i-3] != 0 & wrdiff[i+1] != 0) sequ[i] = FALSE + if(wrdiff[i-4] != 0 & wrdiff[i] != 0) sequ[i] = FALSE + } # close for loop on i + + # sequence of daily cluster numbers without the days that doesn't belong to sequences of 5 or more days: + cluster.sequence <- my.cluster[[p]]$cluster * sequ + rm(wrdiff, sequ) + } + + # Replace the days belonging to a regime with the days belonging to a sequence of at least 5 days of that regime: + wr1 <- which(cluster.sequence == 1) + wr2 <- which(cluster.sequence == 2) + wr3 <- which(cluster.sequence == 3) + wr4 <- which(cluster.sequence == 4) + + # yearly frequency of each regime: + wr1y <- wr2y <- wr3y <-wr4y <- c() + for(y in year.start:year.end){ + # for both reanalysis and S4, the time order is always: year1day1, year1day2, ..., year1day31, year2day1, year2day2, ..., year2day28, etc, + wr1y[y-year.start+1] <- length(which(cluster.sequence[(y-year.start)*period.length[[p]]+(1:period.length[[p]])] == 1)) + wr2y[y-year.start+1] <- length(which(cluster.sequence[(y-year.start)*period.length[[p]]+(1:period.length[[p]])] == 2)) + wr3y[y-year.start+1] <- length(which(cluster.sequence[(y-year.start)*period.length[[p]]+(1:period.length[[p]])] == 3)) + wr4y[y-year.start+1] <- length(which(cluster.sequence[(y-year.start)*period.length[[p]]+(1:period.length[[p]])] == 4)) + } + + # convert to frequencies in %: + wr1y <- wr1y/period.length[[p]] + wr2y <- wr2y/period.length[[p]] + wr3y <- wr3y/period.length[[p]] + wr4y <- wr4y/period.length[[p]] + + pslPeriod <- psleuFull[days.period[[p]],,] + pslPeriodClim <- apply(pslPeriod, c(2,3), mean, na.rm=T) + pslPeriodClim2 <- InsertDim(pslPeriodClim, 1, n.days.period[[p]]) + pslPeriodAnom <- pslPeriod - pslPeriodClim2 + + rm(pslPeriod, pslPeriodClim, pslPeriodClim2) + gc() + + pslwr1 <- pslPeriodAnom[wr1,,] + pslwr2 <- pslPeriodAnom[wr2,,] + pslwr3 <- pslPeriodAnom[wr3,,] + pslwr4 <- pslPeriodAnom[wr4,,] + + rm(pslPeriodAnom) + gc() + + pslwr1mean <- apply(pslwr1,c(2,3),mean,na.rm=T) + pslwr2mean <- apply(pslwr2,c(2,3),mean,na.rm=T) + pslwr3mean <- apply(pslwr3,c(2,3),mean,na.rm=T) + pslwr4mean <- apply(pslwr4,c(2,3),mean,na.rm=T) + rm(pslwr1,pslwr2,pslwr3,pslwr4) + + # in the reanalysis case, we also measure the impact maps: + + # similar selection of above, but for var instead of psl: + varPeriod <- vareuFull[days.period[[p]],,] # select only var data during the chosen period + + varPeriodClim <- apply(varPeriod, c(2,3), mean, na.rm=T) + varPeriodClim2 <- InsertDim(varPeriodClim, 1, n.days.period[[p]]) + varPeriodAnom <- varPeriod - varPeriodClim2 + rm(varPeriod, varPeriodClim, varPeriodClim2) + gc() + + #PlotEquiMap2(varPeriodClim[,EU], lon[EU], lat, filled.continents = FALSE, cols=my.cols.var, intxlon=10, intylat=10, cex.lab=1.5) # plot the climatology of the period + varPeriodAnom1 <- varPeriodAnom[wr1,,] + varPeriodAnom2 <- varPeriodAnom[wr2,,] + varPeriodAnom3 <- varPeriodAnom[wr3,,] + varPeriodAnom4 <- varPeriodAnom[wr4,,] + + varPeriodAnom1mean <- apply(varPeriodAnom1,c(2,3),mean,na.rm=T) + varPeriodAnom2mean <- apply(varPeriodAnom2,c(2,3),mean,na.rm=T) + varPeriodAnom3mean <- apply(varPeriodAnom3,c(2,3),mean,na.rm=T) + varPeriodAnom4mean <- apply(varPeriodAnom4,c(2,3),mean,na.rm=T) + + varPeriodAnomBoth1 <- abind(varPeriodAnom, varPeriodAnom1, along = 1) + pvalue1 <- apply(varPeriodAnomBoth1, c(2,3), function(x) t.test(x[1:n.days.period[[p]]],x[(n.days.period[[p]]+1):length(x)])$p.value) + rm(varPeriodAnomBoth1, varPeriodAnom1) + gc() + + varPeriodAnomBoth2 <- abind(varPeriodAnom, varPeriodAnom2, along = 1) + pvalue2 <- apply(varPeriodAnomBoth2, c(2,3), function(x) t.test(x[1:n.days.period[[p]]],x[(n.days.period[[p]]+1):length(x)])$p.value) + rm(varPeriodAnomBoth2, varPeriodAnom2) + gc() + + varPeriodAnomBoth3 <- abind(varPeriodAnom, varPeriodAnom3, along = 1) + pvalue3 <- apply(varPeriodAnomBoth3, c(2,3), function(x) t.test(x[1:n.days.period[[p]]],x[(n.days.period[[p]]+1):length(x)])$p.value) + rm(varPeriodAnomBoth3, varPeriodAnom3) + gc() + + varPeriodAnomBoth4 <- abind(varPeriodAnom, varPeriodAnom4, along = 1) + pvalue4 <- apply(varPeriodAnomBoth4, c(2,3), function(x) t.test(x[1:n.days.period[[p]]],x[(n.days.period[[p]]+1):length(x)])$p.value) + rm(varPeriodAnomBoth4, varPeriodAnom4) + + rm(varPeriodAnom) + gc() + + # save all the data necessary to redraw the graphs when we know the right regime: + save(workdir,rean.name,fields.name,var.num,var.name,var.name.full,var.unit,year.start,year.end,p,WR.period,lon,lat,pslwr1mean,pslwr2mean,pslwr3mean,pslwr4mean, varPeriodAnom1mean, varPeriodAnom2mean, varPeriodAnom3mean,varPeriodAnom4mean,wr1y,wr2y,wr3y,wr4y,pvalue1,pvalue2,pvalue3,pvalue4,cluster.sequence,file=paste0(workdir,"/",fields.name,"_",var.name[var.num],"_",my.period[p],"_mapdata.RData")) + + rm(pvalue1,pvalue2,pvalue3,pvalue4) + rm(pslwr1mean,pslwr2mean,pslwr3mean,pslwr4mean) + rm(varPeriodAnom1mean,varPeriodAnom2mean,varPeriodAnom3mean,varPeriodAnom4mean) + gc() + + } + + + if(fields.name == ECMWF_S4.name){ + cluster.sequence <- my.cluster[[p]]$cluster #as.vector(my.cluster.array[[p]]) + + wr1 <- which(cluster.sequence == 1) + wr2 <- which(cluster.sequence == 2) + wr3 <- which(cluster.sequence == 3) + wr4 <- which(cluster.sequence == 4) + + wr1y <- apply(my.cluster.array[[p]] == 1, 2, sum) + wr2y <- apply(my.cluster.array[[p]] == 2, 2, sum) + wr3y <- apply(my.cluster.array[[p]] == 3, 2, sum) + wr4y <- apply(my.cluster.array[[p]] == 4, 2, sum) + + # convert to frequencies in %: + wr1y <- wr1y/(n.leadtimes*n.members) + wr2y <- wr2y/(n.leadtimes*n.members) + wr3y <- wr3y/(n.leadtimes*n.members) + wr4y <- wr4y/(n.leadtimes*n.members) + + # in this case, must use psl.melted instead of psleuFull. Both are already in anomalies: + pslmat <- unname(acast(psl.melted, Member + StartYear + LeadDay ~ Lat ~ Lon)) + #pslmat <- unname(acast(melt(pslPeriod[1,1:2,,,,, drop=FALSE],varnames=c("Exp","Member","StartYear","LeadDay","Lat","Lon")), Member ~ StartYear + LeadDay ~ Lat ~ Lon)) + + pslwr1 <- pslmat[wr1,,, drop=F] + pslwr2 <- pslmat[wr2,,, drop=F] + pslwr3 <- pslmat[wr3,,, drop=F] + pslwr4 <- pslmat[wr4,,, drop=F] + + pslwr1mean <- apply(pslwr1, c(2,3), mean, na.rm=T) + pslwr2mean <- apply(pslwr2, c(2,3), mean, na.rm=T) + pslwr3mean <- apply(pslwr3, c(2,3), mean, na.rm=T) + pslwr4mean <- apply(pslwr4, c(2,3), mean, na.rm=T) + + rm(pslwr1,pslwr2,pslwr3,pslwr4) + rm(pslmat) + gc() + + # save all the data necessary to draw the graphs (but not the impact maps) + save(workdir,rean.name,fields.name,var.num,var.name,var.name.full,var.unit,year.start,year.end,p,WR.period,lon,lat,pslwr1mean,pslwr2mean,pslwr3mean,pslwr4mean,wr1y,wr2y,wr3y,wr4y,n.leadtimes,cluster.sequence,file=paste0(workdir,"/",fields.name,"_",var.name[var.num],"_",my.period[p],"_mapdata.RData")) + rm(pslwr1mean,pslwr2mean,pslwr3mean,pslwr4mean) + gc() + + #pslwr1meanPartial <- apply(pslwr1, c(1,3,4), mean, na.rm=T) + #pslwr2meanPartial <- apply(pslwr2, c(1,3,4), mean, na.rm=T) + #pslwr3meanPartial <- apply(pslwr3, c(1,3,4), mean, na.rm=T) + #pslwr4meanPartial <- apply(pslwr4, c(1,3,4), mean, na.rm=T) + + #PlotEquiMap(rescale(pslwr1meanPartial[1,,],my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2) + #PlotEquiMap(rescale(pslwr1meanPartial[2,,],my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2) + + #n=0 + #for(sy in 1:32) { + # for(ld in 1:28){ + # if(my.cluster.array[[p]][ld,sy] == 1){ + # n=n+1 + # if(n == 1) {temp <- pslPeriod[1,2,sy,ld,,]} + # temp <- temp + pslPeriod[1,2,sy,ld,,] + # } + # } + #} + #PlotEquiMap(rescale(temp/n,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, drawleg=F) + + #pslwr1meanPartial <- apply(pslmat[,wr1,,,drop=F], c(1,3,4), mean, na.rm=T) + #pslwr2meanPartial <- apply(pslmat[,wr2,,,drop=F], c(1,3,4), mean, na.rm=T) + #pslwr3meanPartial <- apply(pslmat[,wr3,,,drop=F], c(1,3,4), mean, na.rm=T) + #pslwr4meanPartial <- apply(pslmat[,wr4,,,drop=F], c(1,3,4), mean, na.rm=T) + + } + + # for the monthly system, the time order is always: year1day1, year2day1, ... , yearNday1, year1day2, year2day2, ..., yearNday2, etc.: + if(fields.name == ECMWF_monthly.name) { + if(fields.name == ECMWF_monthly.name){ + my.startdates.period <- months.period(forecast.year,mes,day,p) + + #if(p == 13) my.startdates.period <- my.startdates.period[-2] # remove the second startdate because it is broken + + days.period <- period.length <- list() + for(startdate in my.startdates.period){ + days.period[[p]] <- c(days.period[[p]], (1+(startdate-1)*(year.end-year.start+1)):(startdate*(year.end-year.start+1))) + } + period.length[[p]] <- length(my.startdates.period) # number of Thusdays in the period + } + + wr1y <- wr2y <- wr3y <-wr4y <- c() + wr1y[y-year.start+1] <- length(which(cluster.sequence[seq((y-year.start+1),length(cluster.sequence), n.years)] == 1)) + wr2y[y-year.start+1] <- length(which(cluster.sequence[seq((y-year.start+1),length(cluster.sequence), n.years)] == 2)) + wr3y[y-year.start+1] <- length(which(cluster.sequence[seq((y-year.start+1),length(cluster.sequence), n.years)] == 3)) + wr4y[y-year.start+1] <- length(which(cluster.sequence[seq((y-year.start+1),length(cluster.sequence), n.years)] == 4)) + } + +} # close the for loop on 'p' + + +# run it twice: once, with ordering <- FALSE to look only at the cartography and find visually the right regime names of the four clusters; +# and the second time, to save the ordered cartography: +ordering <- F # If TRUE, order the clusters following the regimes listed in the 'orden' vector (after you manually insert the names). +as.pdf <- T # FALSE: save results as one .png file for each season, TRUE: save only 1 .pdf file with all seasons +save.names <- TRUE # if TRUE, also save the cluster names (i.e: the association between clusters and weather regimes); if FALSE, the cluster names are loaded from the file + + +# position of long values of Europe only (without the Atlantic Sea and America): +#if(fields.name == "ERA-Interim") EU <- c(1:which(lon >= lon.max)[1], (length(lon)-30):length(lon)) # restrict area to continental europe only +EU <- c(1:which(lon >= lon.max)[1], (which(lon >= 337)[1]:length(lon))) # restrict area to continental europe only + +# choose an order to give to the cartography, from top to bottom: +orden <- c("NAO+","NAO-","Blocking","Atl.Ridge") + +regime1.name <- orden[1] +regime2.name <- orden[2] +regime3.name <- orden[3] +regime4.name <- orden[4] + +if(save.names){cluster1.name.period <- cluster2.name.period <- cluster3.name.period <- cluster4.name.period <- c()} + +# breaks and colors of the geopotential fields: +if(psl == "g500"){ + #my.brks <- c(-300,-199:200,300) # % Mean anomaly of a WR + my.brks <- c(-300,seq(-200,200,10),300) # % Mean anomaly of a WR + my.brks2 <- c(-300,seq(-200,200,10),300) # % Mean anomaly of a WR for the contour lines + #my.cols <- colorRampPalette((brewer.pal(9,"PuBu")))(length(my.brks)-1) + values.to.plot <- c(-200, -100, 0, 100, 200) # values we want to show in the labels +} + +if(psl == "psl"){ + my.brks <- c(seq(-19,-1,2),0,seq(1,19,2)) # % Mean anomaly of a WR + # my.brks <- c(seq(-19,-1,2),0,seq(1,19,2)) # % Mean anomaly of a WR + + my.brks2 <- my.brks #c(seq(-20,20,2)) # % Mean anomaly of a WR for the contour lines + values.to.plot <- my.brks #c(-17,-15,-13,-11,-9,-7,-5,-3,-1,0,1,3,5,7,9,11,13,15,17) +} + +my.cols <- colorRampPalette(rev(brewer.pal(11,"RdBu")))(length(my.brks)-1) # blue--white--red colors +my.cols[floor(length(my.cols)/2)] <- my.cols[floor(length(my.cols)/2)+1] <- "white" + +# breaks and colors of the impact maps (valid both for Wind speed anomalies and Temperature anomalies): +#my.brks.var <- c(-20,seq(-10,-3,1),seq(-2.9,2.9,0.1),seq(3,10,1),20) # % Mean anomaly of a WR +#my.brks.var <- c(-20,-2,-1.5,-1,-0.5,-0.2,0.2,0.5,1,1.5,2,20) # % Mean anomaly of a WR +#my.brks.var <- c(-20,seq(-3,3,0.5),20) # % Mean anomaly of a WR +if(var.name[var.num] == "sfcWind") { + my.brks.var <- seq(-3,3,0.5) + values.to.plot2 <- c(-3,-2,-1,0,1,2,3) +} +if(var.name[var.num] == "tas") { + my.brks.var <- seq(-5,5,1) + values.to.plot2 <- my.brks.var #c(-5,-3,-1,0,1,3,5) +} + +#my.cols <- colorRampPalette(as.character(read.csv("/home/Earth/vtorralb/rgbhex.csv",header=F)[,1]))(length(my.brks)-1) # blue--yellow-red colors +my.cols.var <- colorRampPalette(rev(brewer.pal(11,"RdBu")))(length(my.brks.var)-1) # blue--white--red colors +#my.cols.var[floor(length(my.cols.var)/2)] <- my.cols.var[floor(length(my.cols.var)/2)+1] <- "white" + +if(as.pdf)(pdf(file=paste0(workdir,"/",fields.name,"_",var.name[var.num],"_",my.period[p],".pdf"),width=40, height=60)) + +for(p in WR.period){ + load(file=paste0(workdir,"/",fields.name,"_",var.name[var.num],"_",my.period[p],"_mapdata.RData")) + + if(save.names){ + if(p == 1){ # January + cluster2.name="NAO+" + cluster1.name="NAO-" + cluster3.name="Blocking" + cluster4.name="Atl.Ridge" + } + if(p == 2){ # February + cluster3.name="NAO+" + cluster2.name="NAO-" + cluster4.name="Blocking" + cluster1.name="Atl.Ridge" + } + if(p == 3){ # March + cluster3.name="NAO+" + cluster2.name="NAO-" + cluster4.name="Blocking" + cluster1.name="Atl.Ridge" + } + if(p == 4){ # April + cluster2.name="NAO+" + cluster1.name="NAO-" + cluster3.name="Blocking" + cluster4.name="Atl.Ridge" + } + if(p == 5){ # May + cluster4.name="NAO+" + cluster2.name="NAO-" + cluster3.name="Blocking" + cluster1.name="Atl.Ridge" + } + if(p == 6){ # June + cluster4.name="NAO+" + cluster1.name="NAO-" + cluster2.name="Blocking" + cluster3.name="Atl.Ridge" + } + if(p == 7){ # July + cluster4.name="NAO+" + cluster2.name="NAO-" + cluster1.name="Blocking" + cluster3.name="Atl.Ridge" + } + if(p == 8){ # August + cluster2.name="NAO+" + cluster4.name="NAO-" + cluster3.name="Blocking" + cluster1.name="Atl.Ridge" + } + if(p == 9){ # September + cluster4.name="NAO+" + cluster3.name="NAO-" + cluster1.name="Blocking" + cluster2.name="Atl.Ridge" + } + if(p == 10){ # October + cluster1.name="NAO+" + cluster4.name="NAO-" + cluster2.name="Blocking" + cluster3.name="Atl.Ridge" + } + if(p == 11){ # November + cluster2.name="NAO+" + cluster3.name="NAO-" + cluster1.name="Blocking" + cluster4.name="Atl.Ridge" + } + if(p == 12){ # December + cluster2.name="NAO+" + cluster4.name="NAO-" + cluster1.name="Blocking" + cluster3.name="Atl.Ridge" + } + if(p == 13){ # Winter + cluster3.name="NAO+" + cluster4.name="NAO-" + cluster1.name="Blocking" + cluster2.name="Atl.Ridge" + } + if(p == 14){ # Spring + cluster1.name="NAO+" + cluster3.name="NAO-" + cluster2.name="Blocking" + cluster4.name="Atl.Ridge" + } + if(p == 15){ # Summer + cluster2.name="NAO+" + cluster3.name="NAO-" + cluster4.name="Blocking" + cluster1.name="Atl.Ridge" + } + if(p == 16){ # Autumn + cluster4.name="NAO+" + cluster1.name="NAO-" + cluster2.name="Blocking" + cluster3.name="Atl.Ridge" + } + + cluster1.name.period[p] <- cluster1.name + cluster2.name.period[p] <- cluster2.name + cluster3.name.period[p] <- cluster3.name + cluster4.name.period[p] <- cluster4.name + + save(cluster1.name.period, cluster2.name.period, cluster3.name.period, cluster4.name.period, file=paste0(workdir,"/",fields.name,"_ClusterNames.RData")) + + } else { # in this case, we load the cluster names from the file already saved: + load(paste0(workdir,"/",fields.name,"_ClusterNames.RData")) + cluster1.name <- cluster1.name.period[p] + cluster2.name <- cluster2.name.period[p] + cluster3.name <- cluster3.name.period[p] + cluster4.name <- cluster4.name.period[p] + } + + # assign to each cluster its regime: + if(ordering == FALSE){ + cluster1 <- 1; cluster2 <- 2; cluster3 <- 3; cluster4 <- 4 + } else { + cluster1 <- which(orden == cluster1.name) + cluster2 <- which(orden == cluster2.name) + cluster3 <- which(orden == cluster3.name) + cluster4 <- which(orden == cluster4.name) + } + + assign(paste0("map",cluster1), pslwr1mean) + assign(paste0("map",cluster2), pslwr2mean) + assign(paste0("map",cluster3), pslwr3mean) + assign(paste0("map",cluster4), pslwr4mean) + + assign(paste0("fre",cluster1), wr1y) + assign(paste0("fre",cluster2), wr2y) + assign(paste0("fre",cluster3), wr3y) + assign(paste0("fre",cluster4), wr4y) + + if(fields.name == rean.name){ + assign(paste0("imp",cluster1), varPeriodAnom1mean) + assign(paste0("imp",cluster2), varPeriodAnom2mean) + assign(paste0("imp",cluster3), varPeriodAnom3mean) + assign(paste0("imp",cluster4), varPeriodAnom4mean) + + assign(paste0("sig",cluster1), pvalue1) + assign(paste0("sig",cluster2), pvalue2) + assign(paste0("sig",cluster3), pvalue3) + assign(paste0("sig",cluster4), pvalue4) + } + + + # Visualize the composition with the 3 graphs for all the 4 regimes: its pressure fields, its impact on var and its frequency series: + if(!as.pdf) png(filename=paste0(workdir,"/",fields.name,"_",var.name[var.num],"_",my.period[p],".png"),width=3000,height=3700) + + plot.new() + + # Sheet title: + par(fig=c(0, 1, 0.94, 0.98), new=TRUE) + mtext(paste0("Weather Regimes for ",my.period[p]," season (",year.start,"-",year.end,"). Source: ",fields.name), cex=6, font=2) + + # Centroid maps: + map.xpos <- 0 + map.width <- 0.46 + par(fig=c(map.xpos + 0.003, map.xpos + map.width - 0.003, 0.745, 0.925), new=TRUE) + PlotEquiMap2(rescale(map1,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map1), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, xlabel.dist=1.5, contours.lty="F1FF1F") + par(fig=c(map.xpos, map.xpos + map.width, 0.51, 0.69), new=TRUE) + PlotEquiMap2(rescale(map2,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map2), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F") + par(fig=c(map.xpos, map.xpos + map.width, 0.275, 0.455), new=TRUE) + PlotEquiMap2(rescale(map3,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map3), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F") + par(fig=c(map.xpos, map.xpos + map.width, 0.04, 0.22), new=TRUE) + PlotEquiMap2(rescale(map4,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map4), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F") + + # Title Centroid Maps: + map.title.xpos <- 0.76 + map.title.width <- 0.2 + par(fig=c(map.title.xpos, map.title.xpos + map.title.width, 0.728, 0.729), new=TRUE) + mtext("year", cex=3) + par(fig=c(map.title.xpos, map.title.xpos + map.title.width, 0.494, 0.495), new=TRUE) + mtext("year", cex=3) + par(fig=c(map.title.xpos, map.title.xpos + map.title.width, 0.260, 0.261), new=TRUE) + mtext("year", cex=3) + par(fig=c(map.title.xpos, map.title.xpos + map.title.width, 0.026, 0.027), new=TRUE) + mtext("year", cex=3) + + # Impact maps: + if(fields.name == rean.name){ + impact.xpos <- 0.47 + impact.width <- 0.26 + par(fig=c(impact.xpos, impact.xpos + impact.width, 0.745, 0.925), new=TRUE) + PlotEquiMap2(rescale(imp1[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=t(sig1[,EU] < 0.05), cex.lab=1.5) + par(fig=c(impact.xpos, impact.xpos + impact.width, 0.51, 0.69), new=TRUE) + PlotEquiMap2(rescale(imp2[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=t(sig2[,EU] < 0.05), cex.lab=1.5) + par(fig=c(impact.xpos, impact.xpos + impact.width, 0.275, 0.455), new=TRUE) + PlotEquiMap2(rescale(imp3[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=t(sig3[,EU] < 0.05), cex.lab=1.5) + par(fig=c(impact.xpos, impact.xpos + impact.width, 0.04, 0.22), new=TRUE) + PlotEquiMap2(rescale(imp4[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=t(sig4[,EU] < 0.05), cex.lab=1.5) + } + + # Frequency plots: + barplot.xpos <- 0.75 + barplot.width <- 0.25 + freq.max=100 + par(fig=c(barplot.xpos, barplot.xpos + barplot.width, 0.745, 0.915), new=TRUE) + barplot.freq(100*fre1, year.start, year.end, cex.y=2, cex.x=2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0)) + par(fig=c(barplot.xpos, barplot.xpos + barplot.width,0.510,0.680), new=TRUE) + barplot.freq(100*fre2, year.start, year.end, cex.y=2, cex.x=2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0)) + par(fig=c(barplot.xpos, barplot.xpos + barplot.width,0.275,0.445), new=TRUE) + barplot.freq(100*fre3, year.start, year.end, cex.y=2, cex.x=2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0)) + par(fig=c(barplot.xpos, barplot.xpos + barplot.width,0.040,0.210), new=TRUE) + barplot.freq(100*fre4, year.start, year.end, cex.y=2, cex.x=2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0)) + + # % on Frequency plots: + symbol.xpos <- 0.735 + symbol.width <- 0.01 + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.83, 0.84), new=TRUE) + mtext("%", cex=3.3) + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.60, 0.61), new=TRUE) + mtext("%", cex=3.3) + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.36, 0.37), new=TRUE) + mtext("%", cex=3.3) + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.13, 0.14), new=TRUE) + mtext("%", cex=3.3) + + # Title 1: + title1.xpos <- 0.02 + title1.width <- 0.4 + par(fig=c(title1.xpos, title1.xpos + title1.width, 0.930, 0.935), new=TRUE) + mtext(paste0(regime1.name,": ", psl.name, " Anomaly"), font=2, cex=4) + par(fig=c(title1.xpos, title1.xpos + title1.width, 0.695, 0.700), new=TRUE) + mtext(paste0(regime2.name,": ", psl.name, " Anomaly"), font=2, cex=4) + par(fig=c(title1.xpos, title1.xpos + title1.width, 0.460, 0.465), new=TRUE) + mtext(paste0(regime3.name,": ", psl.name, " Anomaly"), font=2, cex=4) + par(fig=c(title1.xpos, title1.xpos + title1.width, 0.225, 0.230), new=TRUE) + mtext(paste0(regime4.name,": ", psl.name, " Anomaly"), font=2, cex=4) + + # Title 2: + if(fields.name == rean.name){ + title2.xpos <- 0.42 + title2.width <- 0.35 + par(fig=c(title2.xpos, title2.xpos + title2.width, 0.930, 0.935), new=TRUE) + mtext(paste0(regime1.name, ": Impact on ", var.name.full[var.num]), font=2, cex=4) + par(fig=c(title2.xpos, title2.xpos + title2.width, 0.695, 0.700), new=TRUE) + mtext(paste0(regime2.name, ": Impact on ", var.name.full[var.num]), font=2, cex=4) + par(fig=c(title2.xpos, title2.xpos + title2.width, 0.460, 0.465), new=TRUE) + mtext(paste0(regime3.name, ": Impact on ", var.name.full[var.num]), font=2, cex=4) + par(fig=c(title2.xpos, title2.xpos + title2.width, 0.225, 0.230), new=TRUE) + mtext(paste0(regime4.name, ": Impact on ", var.name.full[var.num]), font=2, cex=4) + } + + # Title 3: + title3.xpos <- 0.75 + title3.width <-0.25 + par(fig=c(title3.xpos, title3.xpos + title3.width, 0.930, 0.935), new=TRUE) + mtext(paste0(regime1.name, ": Frequency (", round(100*mean(fre1),1), "%)"), font=2, cex=4) + par(fig=c(title3.xpos, title3.xpos + title3.width, 0.695, 0.700), new=TRUE) + mtext(paste0(regime2.name, ": Frequency (", round(100*mean(fre2),1), "%)"), font=2, cex=4) + par(fig=c(title3.xpos, title3.xpos + title3.width, 0.460, 0.465), new=TRUE) + mtext(paste0(regime3.name, ": Frequency (", round(100*mean(fre3),1), "%)"), font=2, cex=4) + par(fig=c(title3.xpos, title3.xpos + title3.width, 0.225, 0.230), new=TRUE) + mtext(paste0(regime4.name, ": Frequency (", round(100*mean(fre4),1), "%)"), font=2, cex=4) + + # Legend 1: + legend1.xpos <- 0.01 + legend1.width <- 0.44 + legend1.cex <- 1.8 + my.subset <- match(values.to.plot, my.brks) + par(fig=c(legend1.xpos, legend1.xpos + legend1.width, 0.719, 0.744), new=TRUE) # syntax fig=c(xmin, xmax, ymin, ymax) + ColorBar3(my.brks, cols=my.cols, vert=FALSE, cex=legend1.cex, subset=my.subset) + if(psl=="g500") {mtext(side=4," m", cex=2.5)} else {mtext(side=4," hPa", cex=legend1.cex)} + par(fig=c(legend1.xpos, legend1.xpos + legend1.width, 0.485, 0.508), new=TRUE) + ColorBar3(my.brks, cols=my.cols, vert=FALSE, cex=legend1.cex, subset=my.subset) + if(psl=="g500") {mtext(side=4," m", cex=2.5)} else {mtext(side=4," hPa", cex=legend1.cex)} + par(fig=c(legend1.xpos, legend1.xpos + legend1.width, 0.250, 0.273), new=TRUE) + ColorBar3(my.brks, cols=my.cols, vert=FALSE, cex=legend1.cex, subset=my.subset) + if(psl=="g500") {mtext(side=4," m", cex=2.5)} else {mtext(side=4," hPa", cex=legend1.cex)} + par(fig=c(legend1.xpos, legend1.xpos + legend1.width, 0.015, 0.038), new=TRUE) + ColorBar3(my.brks, cols=my.cols, vert=FALSE, cex=legend1.cex, subset=my.subset) + if(psl=="g500") {mtext(side=4," m", cex=2.5)} else {mtext(side=4," hPa", cex=legend1.cex)} + + # Legend 2: + if(fields.name == rean.name){ + legend2.xpos <- 0.48 + legend2.width <- 0.25 + legend2.cex <- 1.8 + my.subset2 <- match(values.to.plot2, my.brks.var) + par(fig=c(legend2.xpos, legend2.xpos + legend2.width, 0.719 + 0.001, 0.744), new=TRUE) + ColorBar3(my.brks.var, cols=my.cols.var, vert=FALSE, cex=legend2.cex, subset=my.subset2) + mtext(side=4,paste0(" ",var.unit[var.num]), cex=legend2.cex) + par(fig=c(legend2.xpos, legend2.xpos + legend2.width, 0.485, 0.508), new=TRUE) + ColorBar3(my.brks.var, cols=my.cols.var, vert=FALSE, cex=legend2.cex, subset=my.subset2) + mtext(side=4,paste0(" ",var.unit[var.num]), cex=legend2.cex) + par(fig=c(legend2.xpos, legend2.xpos + legend2.width, 0.250, 0.273), new=TRUE) + ColorBar3(my.brks.var, cols=my.cols.var, vert=FALSE, cex=legend2.cex, subset=my.subset2) + mtext(side=4,paste0(" ",var.unit[var.num]), cex=legend2.cex) + par(fig=c(legend2.xpos, legend2.xpos + legend2.width, 0.015, 0.038), new=TRUE) + ColorBar3(my.brks.var, cols=my.cols.var, vert=FALSE, cex=legend2.cex, subset=my.subset2) + mtext(side=4,paste0(" ",var.unit[var.num]), cex=legend2.cex) + } + + if(!as.pdf) dev.off() # for saving 4 png + +} # close for loop on 'p' + +if(as.pdf) dev.off() # for saving a single pdf with all seasons + diff --git a/old/weather_regimes_v28.R~ b/old/weather_regimes_v28.R~ new file mode 100644 index 0000000000000000000000000000000000000000..df0b0a4d4bd81c4686caf1c594b46f270348ec28 --- /dev/null +++ b/old/weather_regimes_v28.R~ @@ -0,0 +1,957 @@ +library(s2dverification) # for the function Load() +library(abind) +library(Kendall) +library(reshape2) +library(TTR) +source('/home/Earth/ncortesi/Downloads/scripts/Rfunctions.R') # for the calendar functions +workdir <- "/scratch/Earth/ncortesi/RESILIENCE/Regimes" # working dir with the input files with the WT classifications for each MSLP grid point + +# available reanalysis for the geopotential (g500) or the geopotential height (z500 = g500/9.81) and for var data: +ERAint <- '/esnas/reconstructions/ecmwf/eraint/$STORE_FREQ$_mean/$VAR_NAME$_f6h/$VAR_NAME$_$YEAR$$MONTH$.nc' +ERAint.name <- "ERA-Interim" + +JRA55 <- '/esnas/reconstructions/jma/jra-55/$STORE_FREQ$_mean/$VAR_NAME$_f6h/$VAR_NAME$_$YEAR$$MONTH$.nc' +JRA55.name <- "JRA-55" + +rean <- ERAint #JRA55 #ERAint # choose one of the two above reanalysis from where to load the input psl data + +# available forecast systems for the psl and var data: +#ECMWF_S4 <- list(path = paste0('/esnas/exp/ECMWF/seasonal/0001/s004/m001/$STORE_FREQ$_mean/$VAR_NAME$_f6h/$VAR_NAME$_$START_DATE$.nc')) +#ECMWF_S4 <- '/esnas/exp/ECMWF/seasonal/0001/s004/m001/$STORE_FREQ$_mean/psl_f6h/psl_$START_DATE$.nc' +ECMWF_S4 <- '/esnas/exp/ecmwf/system4_m1/$STORE_FREQ$_mean/psl_f6h/psl_$START_DATE$.nc' +ECMWF_S4.name <- "ECMWF-S4" + +ECMWF_monthly <- '/esnas/exp/ECMWF/monthly/ensforhc/6hourly/$VAR_NAME$/2014$MONTH$$DAY$00/$VAR_NAME$_$START_DATE$00.nc' +ECMWF_monthly.name <- "ECMWF-Monthly" + +forecast <- ECMWF_S4 + +fields <- forecast #rean #forecast # put 'rean' to load pressure fields and var from reanalisis, put 'forecast' to load them from forecast systems + +psl <- "psl" #"g500" # pressure variable to use from the chosen reanalysis +psl.name <- "MSLP" # "Z500" # the name to show in the maps + +year.start <- 1982 #1981 #1994 #1979 #1981 +year.end <- 2013 #2013 #2010 + +member.name <- "ensemble" # name of the dimension with the ensemble members inside the netCDF files of S4 + +res <- 0.75 # set the resolution you want to interpolate the psl and var data when loading it (i.e: set to 0.75 to use the same res of ERA-Interim) + # interpolation method is BILINEAR. +PCA <- FALSE # set it to TRUE if you want to apply the PCA before the k-means cluster analysis, FALSE otherwise +variance.explained <- 0.8 # the user can set here the minimum % of variance explained by the PCs (typically 0.8 which is equal to 80%) + +lat.weighting=FALSE # set it to true to weight psl data on latitude before applying the cluster analysis +sequences=FALSE # set it to TRUE if you want to select only days beloning to sequences of at least 5 consecutive days belonging to the same regime. + +# Coordinates of the box enclosing the study region (the Northern Atlantic in this case): +lat.max <- 81 #81 #80 #70 +lat.min <- 27 #30 #20 #30 +lon.max <- 45 #36 #30 #40 +lon.min <- 274.5 #274.5 #270 #280 # put a positive number here because the geopotential has only positive values of longitud! + +# Only for Monthly forecasts: +#leadtime <- 1:7 #5:11 #1 # if fields=forecast, set the forecast time (in days) you want to compute the weather regimes. +#startdate.shift <- 1 # if leadtime=5:11, it's better to shift all startdates of the season 1 week in the past, to fit the exact season of obs.data + # if leadtime=12:18, it's better to shift all startdates of the season 2 weeks in the past, and so on. +#forecast.year <- year.end+1 # if fields=forecast, set the forecast year (it is usually the year after year.end) +#mes=1 # if fields=forecast, set the month of the first startdate of the forecast year +#day=2 # if fields=forecast, set the day of the first startdate of the forecast year + +############################################################################################################################# +#ECMWF_monthly <- list(path = paste0('/esnas/exp/ECMWF/monthly/ensforhc/6hourly/psl/2014010200/1994_010200.nc')) +#ECMWF_monthly <- list(path = paste0('/esnas/exp/ECMWF/monthly/ensforhc/6hourly/$VAR_NAME$/2014$MONTH$$DAY$00/$VAR_NAME$_$START_DATE$00.nc')) +rean.name <- unname(ifelse(rean == ERAint, ERAint.name, JRA55.name)) +forecast.name <- unname(ifelse(forecast == ECMWF_S4, ECMWF_S4.name, ECMWF_monthly.name)) +fields.name <- unname(ifelse(fields == rean, rean.name, forecast.name)) +n.years <- year.end - year.start + 1 + +var.data <- fields # dataset from which to load the input var data (reanalysis or forecasts) +var.data.name <- fields.name #ECMWF_monthly.name # ERAint.name + +if(lat.min >= lat.max) stop("lat.min cannot be greater than lat.max") + +days.period <- n.days.period <- period.length <- list() +for (pp in 1:17){ + days.period[[pp]] <- NA + for(y in year.start:year.end) days.period[[pp]] <- c(days.period[[pp]], n.days.in.a.future.year(year.start, y) + pos.period(y,pp)) + days.period[[pp]] <- days.period[[pp]][-1] # remove the NA at the begin that was introduced only to be able to execute the above command + # number of days belonging to that period from year.start to year.end: + n.days.period[[pp]] <- length(days.period[[pp]]) + # Number of days belonging to that period in a single year: + period.length[[pp]] <- n.days.in.a.period(pp,1999) #+ ifelse(period==13 | period==2, 1, 0) # using year 1999 introduce a small error in winter season length because of bisestile years +} + +# Create a regular lat/lon grid to interpolate the data: +n.lon.grid <- 360/res +n.lat.grid <- (180/res)+1 +my.grid <- paste0('r',n.lon.grid,'x',n.lat.grid) +#domain<-list() +#domain$lon <- seq(0,359.999999999,res) +#domain$lat <- c(rev(seq(0,90,res)),seq(res[1],-90,-res)) # Notice that the regular grid is always centered at lat=0 and lon=0! + +# load only 1 day of pressure data to detect the minimum and maximum lat and lon values which identify only the North Atlantic area: +if(fields.name == rean.name) domain <- Load(var = psl, exp = NULL, obs = list(list(path=fields)), '19990101', storefreq = 'daily', leadtimemax = 1, output = 'lonlat', nprocs=1) +# in the case of S4, we also interpolate it (notice that if the S4 grid has the same grid of the chosen grid (typically ERA-Interim), Load() leaves the S4 grid unchanged): +if(fields.name == ECMWF_S4.name) domain <- Load(var = "psl", exp = list(list(path=fields)), sdates=paste0(year.start,'0101'), storefreq = 'daily', dimnames=list(member=member.name), leadtimemax=1, nmember=1, output = 'lonlat') #, grid=my.grid, method='bilinear', nprocs=1) + +#if(fields.name == ECMWF_monthly.name) domain <- Load(var = psl, exp = NULL, obs = list(list(path=fields)), paste0(year.start,'0102'), storefreq = 'daily', leadtimemax = 1, output = 'lonlat', nprocs=1) + +n.lon <- length(domain$lon) +n.lat <- length(domain$lat) + +pos.lat.max <- which(lat.max - round(domain$lat,4) >= 0)[1] # select the first latitude of the domain below the maximum latitude +pos.lat.min <- tail(which(round(domain$lat,4) - lat.min >= 0),1) # select the last latitude of the domain over the minumum latitude +pos.lon.max <- tail(which(lon.max - round(domain$lon,4) >= 0),1) +pos.lon.min <- which(round(domain$lon,4) - lon.min >= 0)[1] + +pos.lat <- if(pos.lat.min < pos.lat.max) {pos.lat.min:pos.lat.max} else {pos.lat.max:pos.lat.min} +pos.lon <- c(1:pos.lon.max,pos.lon.min:length(domain$lon)) # beware that it only consider the case where the study area cross the meridian of Greenwhich! +n.pos.lat <- length(pos.lat) +n.pos.lon <- length(pos.lon) + +lat <- domain$lat[pos.lat] # lat values of chosen area only (it is smaller than the whole spatial domain loaded by the data) +lon <- domain$lon[pos.lon] # lon values of chosen area only + +#lat.min.area <- domain$lat[pos.lat.min] # min and max lat/lon of the chosen area only (same as lat.max, but its values correspond to actual values in the lat vector) +#lat.max.area <- domain$lat[pos.lat.max] +#lon.min.area <- domain$lon[pos.lon.min] +#lon.max.area <- domain$lon[pos.lon.max] + +#rm(domain) + +# Load psl data: +if(fields.name == rean.name){ # Load daily psl data in the reanalysis case: + psleuFull <-array(NA,c(n.days.in.a.yearly.period(year.start,year.end), n.pos.lat, n.pos.lon)) + + for (y in year.start:year.end){ + var <- Load(var = psl, exp = NULL, obs = list(list(path=fields)), paste0(y,'0101'), storefreq = 'daily', leadtimemax = n.days.in.a.year(y), output = 'lonlat', + latmin = lat.min, latmax = lat.max, lonmin = lon.min, lonmax = lon.max) + psleuFull[seq.days.in.a.future.year(year.start, y),,] <- var$obs + rm(var) + gc() + } +} + +if(fields.name == ECMWF_S4.name) # in this case, we can load only the data for 1 month at time (since each month needs ~3 GB of memory) + start.month <- 1 # start month (1=January, 12=December) + n.members <- 15 # number of members of the forecast system to use + + if(start.month <= 9) {start.month.char <- paste0("0",as.character(start.month))} else {start.month.char=start.month} + + psleuFull <- Load(var = psl, exp = list(list(path=fields)), obs = NULL, sdates=paste0(year.start:year.end, start.month.char,'01'), dimnames=list(member=member.name), nmember=n.members, leadtimemax=216, storefreq='daily', output = 'lonlat', latmin = lat.min, latmax = lat.max, lonmin = lon.min, lonmax = lon.max) #, grid=my.grid, method='bilinear') + + # immediatly convert psl in daily anomalies to remove the drift! + pslPeriodClim <- apply(psleuFull$mod, c(1,4,5,6), mean, na.rm=T) + pslPeriodClim2 <- InsertDim(InsertDim(pslPeriodClim, 2, n.years), 2, n.members) + rm(pslPeriodClim) + gc() + + psleuFull <- psleuFull$mod - pslPeriodClim2 + rm(pslPeriodClim2) + gc() + + # daily anomalies computed with the 10-days leadtime window including the 9 days AFTER each day: + pslPeriodClim10 <- array(NA, c(1,n.members,n.years,n.leadtimes-9,n.pos.lat,n.pos.lon)) + for(year in 1:n.years){ + for(lead in 1:(n.leadtimes-9)){ + pslPeriodClim10[1,,year,lead,,] <- (psleuFull$mod[1,,year,lead,,] + psleuFull$mod[1,,year,lead+1,,] + psleuFull$mod[1,,year,lead+2,,] + psleuFull$mod[1,,year,lead+3,,] + psleuFull$mod[1,,year,lead+4,,] + psleuFull$mod[1,,year,lead+5,,] + psleuFull$mod[1,,year,lead+6,,] + psleuFull$mod[1,,year,lead+7,,] + psleuFull$mod[1,,year,lead+8,,] + psleuFull$mod[1,,year,lead+9,,])/10 + } + } + pslPeriodClim10mean <- apply(pslPeriodClim10, c(1,4,5,6), mean, na.rm=T) + pslPeriodClim10mean2 <- InsertDim(InsertDim(pslPeriodClim10mean,2,32),2,15) + + psleuFull10 <- psleuFull$mod[1,,,1:206,,,drop=F] - pslPeriodClim10mean2 + #psleuFull10mean <- apply(psleuFull10, c(1,3,4,5,6),mean,na.rm=T) + psleuFull <- psleuFull10 + rm(pslPeriodClim2, psleuFull10, pslPeriodClim10, pslPeriodClim10mean2, psleuFull10mean, pslPeriodClim10mean) + gc() + +} + +if(fields.name == ECMWF_monthly.name){ + # Load 6-hourly psl data in the forecast case: + psleuFull <-array(NA, c(n.sdates*n.years, n.leadtimes, n.pos.lat, n.pos.lon)) # daily order: 19940102 19950102 ... 20130102 19940109 19950109 ... 20130109 ... + n.leadtimes <- length(leadtime) + sdates <- weekly.seq(forecast.year,mes,day) + n.sdates <- length(sdates) + + for (startdate in 1:n.sdates){ + for(lday in leadtime){ + var <- Load(var = psl, exp = NULL, obs = list(list(path=fields), paste0(year.start:year.end, substr(sdates[startdate],5,6), substr(sdates[startdate],7,8) ), storefreq = 'daily', leadtimemin = lday*4-3, leadtimemax = lday*4, output = 'lonlat', latmin = lat.min, latmax = lat.max, lonmin = lon.min, lonmax = lon.max) + + mean.lday <- apply(var$obs, c(3,5,6), mean, na.rm=T) + rm(var) + gc() + + psleuFull[(1+(startdate-1)*n.years):(startdate*n.years), lday-leadtime[1]+1,,] <- mean.lday + rm(mean.lday) + gc() + } + } +} + +if(psl=="g500") psleuFull <- psleuFull/9.81 # convert geopotential to geopotential height (in m) +if(psl=="psl") psleuFull <- psleuFull/100 # convert MSLP in Pascal to MSLP in hPa +gc() + +#save.image(file=paste0(workdir,"/weather_regimes_",fields.name,"_",year.start,"-",year.end,".RData")) +#load(file=paste0(workdir,"/weather_regimes_",fields.name,"_",year.start,"-",year.end,".RData")) + +# compute the cluster analysis: +my.PCA <- list() +my.cluster <- list() +my.cluster.array <- list() +tot.variance <- list() +n.pcs <- my.cluster <- c() + +WR.period <- 1 #1:12 #13:16 # 1-12: Jan-Dec; 13-16: Winter-Spring-Summer-Autumn +p=1 + +for(p in WR.period){ + # Select only the data of the month/season we want: + if(fields.name == rean.name){ pslPeriod <- psleuFull[days.period[[p]],,] # select only days in the chosen period (i.e: winter) + + # in this case of S4 we don't need to select anything because we already loaded only 1 month of data! + + if(fields.name == ECMWF_monthly.name){ + my.startdates.period <- months.period(forecast.year,mes,day,p) # get which startdates belong to the chosen period + + days.period <- list() # get which days inside psleuFull belong to the chosen period + for(startdate in my.startdates.period){ + days.period[[p]] <- c(days.period[[p]], (1+(startdate-1)*n.years):(startdate*n.years)) + } + + # in winter you must remove the 2nd startdate (20140109) because its data is bad, as you can see with: plot(psl.kmeans[,1:2]) + # if(fields.name == forecast.name && period == 13) psl.kmeans[21:40,] <- NA + } + + # weight the pressure fields based on latitude (apply it to anomalies, not to absolute values!): + if(lat.weighting){ + lat.weighted <- cos(lat*pi/180)^0.5 + lat.weighted.array <- InsertDim(InsertDim(lat.weighted,2,n.pos.lon), 1, n.days.period[[p]]) + psl.kmeans <- pslPeriod * lat.weighted.array + rm(lat.weighted.array) + gc() + } + + # select period data from psleuFull to use it as input of the cluster analysis: + if(fields.name == rean.name){ + psl.kmeans <- pslPeriod + dim(psl.kmeans) <- c(n.days.period[[p]], n.pos.lat*n.pos.lon) # convert array in a matrix! + rm(pslPeriod, pslPeriod.weighted) + } + + if(fields.name == ECMWF_S4.name){ + lead.month=0 + + chosen.month <- start.month + lead.month # find which month we want to load data + if(chosen.month > 12) chosen.month <- chosen.month - 12 + + # select only the data of one lead month: + for(y in year.start:year.end){ + leadtime.min <- 1 + n.days.in.a.monthly.period(start.month, chosen.month, y) - n.days.in.a.month(chosen.month, y) + leadtime.max <- leadtime.min + n.days.in.a.month(chosen.month, y) - 1 + if (n.days.in.a.month(chosen.month, y) == 29) leadtime.max <- leadtime.max - 1 # remove the 29th of February to have the same n. of elements for all years + int.leadtimes <- leadtime.min:leadtime.max + n.leadtimes <- leadtime.max - leadtime.min + 1 + + if(y == year.start) { + pslPeriod <- psleuFull[,,1,int.leadtimes,,,drop=FALSE] + } else { + pslPeriod <- unname(abind(pslPeriod, psleuFull[,,y-year.start+1,int.leadtimes,,,drop=FALSE], along=3)) + } + } + + # note that the data order in psl.kmeans[,X] is: year1day1, year1day2, ... , year1day31, year2day1, year2day2, ... , year2day28 + psl.melted <- melt(pslPeriod[1,,,,,, drop=FALSE], varnames=c("Exp","Member","StartYear","LeadDay","Lat","Lon")) + psl.kmeans <- unname(acast(psl.melted, Member + StartYear + LeadDay ~ Lat + Lon)) + + #rm(pslPeriod) + gc() + } + + # compute the PCs or not: + if(PCA){ + my.seq <- seq(1, n.pos.lat*n.pos.lon, 9) # select only 1 point of 9 + pslcut <- psl.kmeans[,my.seq] + + my.PCA[[p]] <- princomp(pslcut,cor=FALSE) + tot.variance[[p]] <- head(cumsum(my.PCA[[p]]$sdev^2/sum(my.PCA[[p]]$sdev^2)),50) # check how many PCAs to keep basing on the sum of their expl. variance + n.pcs[p] <- head(as.numeric(which(tot.variance[[p]] > variance.explained)),1) # select only the pcs that explains at least 80% of variance + my.cluster[[p]] <- kmeans(my.PCA[[p]]$scores[,1:n.pcs[p]], centers=4, iter.max=100, nstart=30) # 4 is the number of clusters + rm(pslcut) + } else { # 4 is the number of clusters: + if(fields.name == rean.name) my.cluster[[p]] <- kmeans(psl.kmeans[which(!is.na(psl.kmeans[,1])),], centers=4, iter.max=100, nstart=30) + if(fields.name == ECMWF_S4.name) { + my.cluster[[p]] <- kmeans(psl.kmeans, centers=4, iter.max=100, nstart=30, trace=TRUE) + + #my.cluster.array[[p]] <- array(my.cluster[[p]]$cluster, c(n.leadtimes, n.years, n.members)) # in reverse order compared to the definition of psl.kmeans + + #plot(SMA(my.cluster[[p]]$centers[1,],201),type="l",col="gold",ylim=c(-20,20));lines(SMA(my.cluster[[p]]$centers[2,],201),type="l",col="red"); lines(SMA(my.cluster[[p]]$centers[3,],201),type="l",col="green");lines(SMA(my.cluster[[p]]$centers[4,],201),type="l",col="blue");lines(SMA(psl.kmeans[29,],201),type="l",col="gray");lines(rep(0,14410),type="l",col="black",lty=2) + } + } + + rm(psl.kmeans) + gc() +} + +#save.image(file=paste0(workdir,"/weather_regimes_",fields.name,"_",year.start,"-",year.end,".RData")) +#load(file=paste0(workdir,"/weather_regimes_",fields.name,"_",year.start,"-",year.end,".RData")) + +var.num <- 1 # Choose a variable. 1: sfcWind 2: tas + +var.name <- c("sfcWind","tas") # name of the 'predictand' variable of the chosen reanalysis +var.name.full <- c("Wind Speed","Temperature") # full name of the 'predictand' variable to put in the title of the graphs +var.unit <- c("m/s", "ºC") # unit of measure of var (for drawing color scales) + +# Load var data: +if(fields.name == rean.name){ # Load daily var data in the reanalysis case: + vareuFull <-array(NA,c(n.days.in.a.yearly.period(year.start,year.end), n.pos.lat, n.pos.lon)) + + for (y in year.start:year.end){ + var <- Load(var = var.name[var.num], exp = NULL, obs = list(var.data), paste0(y,'0101'), storefreq = 'daily', leadtimemax = n.days.in.a.year(y), output = 'lonlat', + latmin = lat.min, latmax = lat.max, lonmin = lon.min, lonmax = lon.max) + vareuFull[seq.days.in.a.future.year(year.start, y),,] <- var$obs + rm(var) + gc() + } + +# in the S4 case, we can load only the data for 1 month at time (since each month needs ~3 GB of memory) +# but at present we ONLY use psl data!!! +#if(fields.name == ECMWF_S4.name) { +# vareuFull <- Load(var = var.name[var.num], exp = list(fields), obs = NULL, sdates=paste0(year.start:year.end, start.month.char,'01'), storefreq = 'daily', dimnames=list(member="number"), nmember=n.members, leadtimemin=leadtime.min, leadtimemax=leadtime.max, output = 'lonlat', latmin = lat.min, latmax = lat.max, lonmin = lon.min, lonmax = lon.max) +#} + +if(fields.name == ECMWF_monthly.name){ # Load 6-hourly var data in the monthly forecast case: + sdates <- weekly.seq(forecast.year,mes,day) + vareuFull <-array(NA,c(length(sdates)*(year.end-year.start+1), n.pos.lat, n.pos.lon)) + + for (startdate in 1:length(sdates)){ + var <- Load(var = var.name[var.num], exp = NULL, obs = list(var.data), paste0(year.start:year.end, substr(sdates[startdate],5,6), substr(sdates[startdate],7,8) ), storefreq = 'daily', leadtimemin=leadtime*4-3, leadtimemax=leadtime*4, output = 'lonlat', latmin = lat.min, latmax = lat.max, lonmin = lon.min, lonmax = lon.max) + temp <- apply(var$obs, c(1,3,5,6), mean, na.rm=T) + vareuFull[(1+(startdate-1)*(year.end-year.start+1)):(startdate*(year.end-year.start+1)),,] <- temp + rm(temp,var) + gc() + } + +} + +#my.grid<-paste0('r',n.lon,'x',n.lat) +#coord <- Load(var = 'sfcWind', exp = list(ECMWF_monthly), obs = NULL, sdates=paste0(2013,'0102'), leadtimemin = 1, leadtimemax=1, output = 'lonlat', nprocs=1, grid=my.grid, method='bilinear') + +save.image(file=paste0(workdir,"/weather_regimes_",fields.name,"_",var.name[var.num],"_",year.start,"-",year.end,".RData")) +load(file=paste0(workdir,"/weather_regimes_",fields.name,"_",var.name[var.num],"_",year.start,"-",year.end,".RData")) + +## # if you want to study WRs at monhly level but using their seasonal time series, you have to copy the WRs time series to the months from 1 to 12: +## if(WR.period <- 1:12){ +## my.cluster[[1]] <- my.cluster[[2]] <- my.cluster[[3]] <- my.cluster[[4]] <- my.cluster[[5]] <- my.cluster[[6]] <- list() +## my.cluster[[7]] <- my.cluster[[8]] <- my.cluster[[9]] <- my.cluster[[10]] <- my.cluster[[11]] <- my.cluster[[12]] <- list() + +## ss <- match(days.period[[13]],days.period[[1]]); ss[which(!is.na(ss))] <- 1; qq <- ss*my.cluster[[13]]$cluster; my.cluster[[1]]$cluster <- qq[!is.na(qq)] +## ss <- match(days.period[[13]],days.period[[2]]); ss[which(!is.na(ss))] <- 1; qq <- ss*my.cluster[[13]]$cluster; my.cluster[[2]]$cluster <- qq[!is.na(qq)] +## ss <- match(days.period[[13]],days.period[[12]]); ss[which(!is.na(ss))] <- 1; qq <- ss*my.cluster[[13]]$cluster; my.cluster[[12]]$cluster <- qq[!is.na(qq)] +## ss <- match(days.period[[14]],days.period[[3]]); ss[which(!is.na(ss))] <- 1; qq <- ss*my.cluster[[14]]$cluster; my.cluster[[3]]$cluster <- qq[!is.na(qq)] +## ss <- match(days.period[[14]],days.period[[4]]); ss[which(!is.na(ss))] <- 1; qq <- ss*my.cluster[[14]]$cluster; my.cluster[[4]]$cluster <- qq[!is.na(qq)] +## ss <- match(days.period[[14]],days.period[[5]]); ss[which(!is.na(ss))] <- 1; qq <- ss*my.cluster[[14]]$cluster; my.cluster[[5]]$cluster <- qq[!is.na(qq)] +## ss <- match(days.period[[15]],days.period[[6]]); ss[which(!is.na(ss))] <- 1; qq <- ss*my.cluster[[15]]$cluster; my.cluster[[6]]$cluster <- qq[!is.na(qq)] +## ss <- match(days.period[[15]],days.period[[7]]); ss[which(!is.na(ss))] <- 1; qq <- ss*my.cluster[[15]]$cluster; my.cluster[[7]]$cluster <- qq[!is.na(qq)] +## ss <- match(days.period[[15]],days.period[[8]]); ss[which(!is.na(ss))] <- 1; qq <- ss*my.cluster[[15]]$cluster; my.cluster[[8]]$cluster <- qq[!is.na(qq)] +## ss <- match(days.period[[16]],days.period[[9]]); ss[which(!is.na(ss))] <- 1; qq <- ss*my.cluster[[16]]$cluster; my.cluster[[9]]$cluster <- qq[!is.na(qq)] +## ss <- match(days.period[[16]],days.period[[10]]); ss[which(!is.na(ss))] <- 1; qq <- ss*my.cluster[[16]]$cluster; my.cluster[[10]]$cluster <- qq[!is.na(qq)] +## ss <- match(days.period[[16]],days.period[[11]]); ss[which(!is.na(ss))] <- 1; qq <- ss*my.cluster[[16]]$cluster; my.cluster[[11]]$cluster <- qq[!is.na(qq)] +## } + +for(p in WR.period){ # 1-12: Jan-Dec; 13-16: Winter-Spring-Summer-Autumn; 17: Year + + if(sequences){ # it works only for rean data!!! + # for each of the 4 clusters, remove the days not belonging to sequences of at least 5 days: + # warning: first 4 days and last 4 days are not take into account y the algorithm and are treated as not belonging to any sequence (sequ=FALSE) + wrdiff <- diff(my.cluster[[p]]$cluster) + + sequ <- rep(FALSE, n.days.period[[p]]) + for(i in 5:(n.days.period[[p]]-5)){ + sequ[i] <- TRUE + if(wrdiff[i] != 0 & wrdiff[i+1] != 0) sequ[i] = FALSE + if(wrdiff[i] != 0 & wrdiff[i+2] != 0) sequ[i] = FALSE + if(wrdiff[i] != 0 & wrdiff[i+3] != 0) sequ[i] = FALSE + if(wrdiff[i] != 0 & wrdiff[i+4] != 0) sequ[i] = FALSE + if(wrdiff[i-1] != 0 & wrdiff[i] != 0) sequ[i] = FALSE + if(wrdiff[i-1] != 0 & wrdiff[i+1] != 0) sequ[i] = FALSE + if(wrdiff[i-1] != 0 & wrdiff[i+2] != 0) sequ[i] = FALSE + if(wrdiff[i-1] != 0 & wrdiff[i+3] != 0) sequ[i] = FALSE + if(wrdiff[i-2] != 0 & wrdiff[i] != 0) sequ[i] = FALSE + if(wrdiff[i-2] != 0 & wrdiff[i+1] != 0) sequ[i] = FALSE + if(wrdiff[i-2] != 0 & wrdiff[i+2] != 0) sequ[i] = FALSE + if(wrdiff[i-3] != 0 & wrdiff[i] != 0) sequ[i] = FALSE + if(wrdiff[i-3] != 0 & wrdiff[i+1] != 0) sequ[i] = FALSE + if(wrdiff[i-4] != 0 & wrdiff[i] != 0) sequ[i] = FALSE + } # close for loop on i + + # sequence of daily cluster numbers without the days that doesn't belong to sequences of 5 or more days: + cluster.sequence <- my.cluster[[p]]$cluster * sequ + rm(wrdiff, sequ) + } + + gc() + + # yearly frequency of each regime: + if(fields.name == rean.name){ + cluster.sequence <- my.cluster[[p]]$cluster + + # Replace the days belonging to a regime with the days belonging to a sequence of at least 5 days of that regime: + wr1 <- which(cluster.sequence == 1) + wr2 <- which(cluster.sequence == 2) + wr3 <- which(cluster.sequence == 3) + wr4 <- which(cluster.sequence == 4) + + wr1y <- wr2y <- wr3y <-wr4y <- c() + for(y in year.start:year.end){ + # for both reanalysis and S4, the time order is always: year1day1, year1day2, ..., year1day31, year2day1, year2day2, ..., year2day28, etc, + wr1y[y-year.start+1] <- length(which(cluster.sequence[(y-year.start)*period.length[[p]]+(1:period.length[[p]])] == 1)) + wr2y[y-year.start+1] <- length(which(cluster.sequence[(y-year.start)*period.length[[p]]+(1:period.length[[p]])] == 2)) + wr3y[y-year.start+1] <- length(which(cluster.sequence[(y-year.start)*period.length[[p]]+(1:period.length[[p]])] == 3)) + wr4y[y-year.start+1] <- length(which(cluster.sequence[(y-year.start)*period.length[[p]]+(1:period.length[[p]])] == 4)) + } + + # convert to frequencies in %: + wr1y <- wr1y/period.length[[p]] + wr2y <- wr2y/period.length[[p]] + wr3y <- wr3y/period.length[[p]] + wr4y <- wr4y/period.length[[p]] + + pslPeriod <- psleuFull[days.period[[p]],,] + pslPeriodClim <- apply(pslPeriod, c(2,3), mean, na.rm=T) + pslPeriodClim2 <- InsertDim(pslPeriodClim, 1, n.days.period[[p]]) + pslPeriodAnom <- pslPeriod - pslPeriodClim2 + + rm(pslPeriod, pslPeriodClim, pslPeriodClim2) + gc() + + pslwr1 <- pslPeriodAnom[wr1,,] + pslwr2 <- pslPeriodAnom[wr2,,] + pslwr3 <- pslPeriodAnom[wr3,,] + pslwr4 <- pslPeriodAnom[wr4,,] + + rm(pslPeriodAnom) + gc() + + pslwr1mean <- apply(pslwr1,c(2,3),mean,na.rm=T) + pslwr2mean <- apply(pslwr2,c(2,3),mean,na.rm=T) + pslwr3mean <- apply(pslwr3,c(2,3),mean,na.rm=T) + pslwr4mean <- apply(pslwr4,c(2,3),mean,na.rm=T) + rm(pslwr1,pslwr2,pslwr3,pslwr4) + + } + + + if(fields.name == ECMWF_S4.name){ + cluster.sequence <- my.cluster[[p]]$cluster #as.vector(my.cluster.array[[p]]) + + wr1 <- which(cluster.sequence == 1) + wr2 <- which(cluster.sequence == 2) + wr3 <- which(cluster.sequence == 3) + wr4 <- which(cluster.sequence == 4) + + wr1y <- apply(my.cluster.array[[p]] == 1, 2, sum) + wr2y <- apply(my.cluster.array[[p]] == 2, 2, sum) + wr3y <- apply(my.cluster.array[[p]] == 3, 2, sum) + wr4y <- apply(my.cluster.array[[p]] == 4, 2, sum) + + # convert to frequencies in %: + wr1y <- wr1y/(n.leadtimes*n.members) + wr2y <- wr2y/(n.leadtimes*n.members) + wr3y <- wr3y/(n.leadtimes*n.members) + wr4y <- wr4y/(n.leadtimes*n.members) + + # in this case, must use psl.melted instead of psleuFull. Both are already in anomalies: + pslmat <- unname(acast(psl.melted, Member + StartYear + LeadDay ~ Lat ~ Lon)) + #pslmat <- unname(acast(melt(pslPeriod[1,1:2,,,,, drop=FALSE],varnames=c("Exp","Member","StartYear","LeadDay","Lat","Lon")), Member ~ StartYear + LeadDay ~ Lat ~ Lon)) + + pslwr1 <- pslmat[wr1,,, drop=F] + pslwr2 <- pslmat[wr2,,, drop=F] + pslwr3 <- pslmat[wr3,,, drop=F] + pslwr4 <- pslmat[wr4,,, drop=F] + + pslwr1mean <- apply(pslwr1, c(2,3), mean, na.rm=T) + pslwr2mean <- apply(pslwr2, c(2,3), mean, na.rm=T) + pslwr3mean <- apply(pslwr3, c(2,3), mean, na.rm=T) + pslwr4mean <- apply(pslwr4, c(2,3), mean, na.rm=T) + + rm(pslwr1,pslwr2,pslwr3,pslwr4) + rm(pslmat) + gc() + + #pslwr1meanPartial <- apply(pslwr1, c(1,3,4), mean, na.rm=T) + #pslwr2meanPartial <- apply(pslwr2, c(1,3,4), mean, na.rm=T) + #pslwr3meanPartial <- apply(pslwr3, c(1,3,4), mean, na.rm=T) + #pslwr4meanPartial <- apply(pslwr4, c(1,3,4), mean, na.rm=T) + + #PlotEquiMap(rescale(pslwr1meanPartial[1,,],my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2) + #PlotEquiMap(rescale(pslwr1meanPartial[2,,],my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2) + + #n=0 + #for(sy in 1:32) { + # for(ld in 1:28){ + # if(my.cluster.array[[p]][ld,sy] == 1){ + # n=n+1 + # if(n == 1) {temp <- pslPeriod[1,2,sy,ld,,]} + # temp <- temp + pslPeriod[1,2,sy,ld,,] + # } + # } + #} + #PlotEquiMap(rescale(temp/n,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, drawleg=F) + + #pslwr1meanPartial <- apply(pslmat[,wr1,,,drop=F], c(1,3,4), mean, na.rm=T) + #pslwr2meanPartial <- apply(pslmat[,wr2,,,drop=F], c(1,3,4), mean, na.rm=T) + #pslwr3meanPartial <- apply(pslmat[,wr3,,,drop=F], c(1,3,4), mean, na.rm=T) + #pslwr4meanPartial <- apply(pslmat[,wr4,,,drop=F], c(1,3,4), mean, na.rm=T) + + } + + # for the monthly system, the time order is always: year1day1, year2day1, ... , yearNday1, year1day2, year2day2, ..., yearNday2, etc.: + if(fields.name == ECMWF_monthly.name) { + if(fields.name == ECMWF_monthly.name){ + my.startdates.period <- months.period(forecast.year,mes,day,p) + + #if(p == 13) my.startdates.period <- my.startdates.period[-2] # remove the second startdate because it is broken + + days.period <- period.length <- list() + for(startdate in my.startdates.period){ + days.period[[p]] <- c(days.period[[p]], (1+(startdate-1)*(year.end-year.start+1)):(startdate*(year.end-year.start+1))) + } + period.length[[p]] <- length(my.startdates.period) # number of Thusdays in the period + } + + wr1y <- wr2y <- wr3y <-wr4y <- c() + wr1y[y-year.start+1] <- length(which(cluster.sequence[seq((y-year.start+1),length(cluster.sequence), n.years)] == 1)) + wr2y[y-year.start+1] <- length(which(cluster.sequence[seq((y-year.start+1),length(cluster.sequence), n.years)] == 2)) + wr3y[y-year.start+1] <- length(which(cluster.sequence[seq((y-year.start+1),length(cluster.sequence), n.years)] == 3)) + wr4y[y-year.start+1] <- length(which(cluster.sequence[seq((y-year.start+1),length(cluster.sequence), n.years)] == 4)) + } + + + # in the reanalysis case, we also measure the impact maps: + if(fields.name == rean.name){ + # similar selection of above, but for var instead of psl: + varPeriod <- vareuFull[days.period[[p]],,] # select only var data during the chosen period + + varPeriodClim <- apply(varPeriod, c(2,3), mean, na.rm=T) + varPeriodClim2 <- InsertDim(varPeriodClim, 1, n.days.period[[p]]) + varPeriodAnom <- varPeriod - varPeriodClim2 + rm(varPeriod, varPeriodClim, varPeriodClim2) + gc() + + #PlotEquiMap2(varPeriodClim[,EU], lon[EU], lat, filled.continents = FALSE, cols=my.cols.var, intxlon=10, intylat=10, cex.lab=1.5) # plot the climatology of the period + varPeriodAnom1 <- varPeriodAnom[wr1,,] + varPeriodAnom2 <- varPeriodAnom[wr2,,] + varPeriodAnom3 <- varPeriodAnom[wr3,,] + varPeriodAnom4 <- varPeriodAnom[wr4,,] + + varPeriodAnom1mean <- apply(varPeriodAnom1,c(2,3),mean,na.rm=T) + varPeriodAnom2mean <- apply(varPeriodAnom2,c(2,3),mean,na.rm=T) + varPeriodAnom3mean <- apply(varPeriodAnom3,c(2,3),mean,na.rm=T) + varPeriodAnom4mean <- apply(varPeriodAnom4,c(2,3),mean,na.rm=T) + + varPeriodAnomBoth1 <- abind(varPeriodAnom, varPeriodAnom1, along = 1) + pvalue1 <- apply(varPeriodAnomBoth1, c(2,3), function(x) t.test(x[1:n.days.period[[p]]],x[(n.days.period[[p]]+1):length(x)])$p.value) + rm(varPeriodAnomBoth1, varPeriodAnom1) + gc() + + varPeriodAnomBoth2 <- abind(varPeriodAnom, varPeriodAnom2, along = 1) + pvalue2 <- apply(varPeriodAnomBoth2, c(2,3), function(x) t.test(x[1:n.days.period[[p]]],x[(n.days.period[[p]]+1):length(x)])$p.value) + rm(varPeriodAnomBoth2, varPeriodAnom2) + gc() + + varPeriodAnomBoth3 <- abind(varPeriodAnom, varPeriodAnom3, along = 1) + pvalue3 <- apply(varPeriodAnomBoth3, c(2,3), function(x) t.test(x[1:n.days.period[[p]]],x[(n.days.period[[p]]+1):length(x)])$p.value) + rm(varPeriodAnomBoth3, varPeriodAnom3) + gc() + + varPeriodAnomBoth4 <- abind(varPeriodAnom, varPeriodAnom4, along = 1) + pvalue4 <- apply(varPeriodAnomBoth4, c(2,3), function(x) t.test(x[1:n.days.period[[p]]],x[(n.days.period[[p]]+1):length(x)])$p.value) + rm(varPeriodAnomBoth4, varPeriodAnom4) + + rm(varPeriodAnom) + gc() + + # save all the data necessary to redraw the graphs when we know the right regime: + save(workdir,rean.name,fields.name,var.num,var.name,var.name.full,var.unit,year.start,year.end,p,WR.period,lon,lat,pslwr1mean,pslwr2mean,pslwr3mean,pslwr4mean, varPeriodAnom1mean, varPeriodAnom2mean, varPeriodAnom3mean,varPeriodAnom4mean,wr1y,wr2y,wr3y,wr4y,pvalue1,pvalue2,pvalue3,pvalue4,cluster.sequence,file=paste0(workdir,"/",fields.name,"_",var.name[var.num],"_",my.period[p],"_mapdata.RData")) + + rm(pvalue1,pvalue2,pvalue3,pvalue4) + rm(pslwr1mean,pslwr2mean,pslwr3mean,pslwr4mean) + rm(varPeriodAnom1mean,varPeriodAnom2mean,varPeriodAnom3mean,varPeriodAnom4mean) + gc() + } + + if(fields.name == ECMWF_S4.name){ # save all the data necessary to draw the graphs (but not the impact maps) + save(workdir,rean.name,fields.name,var.num,var.name,var.name.full,var.unit,year.start,year.end,p,WR.period,lon,lat,pslwr1mean,pslwr2mean,pslwr3mean,pslwr4mean,wr1y,wr2y,wr3y,wr4y,n.leadtimes,cluster.sequence,file=paste0(workdir,"/",fields.name,"_",var.name[var.num],"_",my.period[p],"_mapdata.RData")) + rm(pslwr1mean,pslwr2mean,pslwr3mean,pslwr4mean) + gc() + } +} # close the for loop on 'p' + + +# run it twice: once, with ordering <- FALSE to look only at the cartography and find visually the right regime names of the four clusters; +# and the second time, to save the ordered cartography: +ordering <- F # If TRUE, order the clusters following the regimes listed in the 'orden' vector (after you manually insert the names). +as.pdf <- T # FALSE: save results as one .png file for each season, TRUE: save only 1 .pdf file with all seasons +save.names <- TRUE # if TRUE, also save the cluster names (i.e: the association between clusters and weather regimes); if FALSE, the cluster names are loaded from the file + + +# position of long values of Europe only (without the Atlantic Sea and America): +#if(fields.name == "ERA-Interim") EU <- c(1:which(lon >= lon.max)[1], (length(lon)-30):length(lon)) # restrict area to continental europe only +EU <- c(1:which(lon >= lon.max)[1], (which(lon >= 337)[1]:length(lon))) # restrict area to continental europe only + +# choose an order to give to the cartography, from top to bottom: +orden <- c("NAO+","NAO-","Blocking","Atl.Ridge") + +regime1.name <- orden[1] +regime2.name <- orden[2] +regime3.name <- orden[3] +regime4.name <- orden[4] + +if(save.names){cluster1.name.period <- cluster2.name.period <- cluster3.name.period <- cluster4.name.period <- c()} + +# breaks and colors of the geopotential fields: +if(psl == "g500"){ + #my.brks <- c(-300,-199:200,300) # % Mean anomaly of a WR + my.brks <- c(-300,seq(-200,200,10),300) # % Mean anomaly of a WR + my.brks2 <- c(-300,seq(-200,200,10),300) # % Mean anomaly of a WR for the contour lines + #my.cols <- colorRampPalette((brewer.pal(9,"PuBu")))(length(my.brks)-1) + values.to.plot <- c(-200, -100, 0, 100, 200) # values we want to show in the labels +} + +if(psl == "psl"){ + my.brks <- c(seq(-19,-1,2),0,seq(1,19,2)) # % Mean anomaly of a WR + # my.brks <- c(seq(-19,-1,2),0,seq(1,19,2)) # % Mean anomaly of a WR + + my.brks2 <- my.brks #c(seq(-20,20,2)) # % Mean anomaly of a WR for the contour lines + values.to.plot <- my.brks #c(-17,-15,-13,-11,-9,-7,-5,-3,-1,0,1,3,5,7,9,11,13,15,17) +} + +my.cols <- colorRampPalette(rev(brewer.pal(11,"RdBu")))(length(my.brks)-1) # blue--white--red colors +my.cols[floor(length(my.cols)/2)] <- my.cols[floor(length(my.cols)/2)+1] <- "white" + +# breaks and colors of the impact maps (valid both for Wind speed anomalies and Temperature anomalies): +#my.brks.var <- c(-20,seq(-10,-3,1),seq(-2.9,2.9,0.1),seq(3,10,1),20) # % Mean anomaly of a WR +#my.brks.var <- c(-20,-2,-1.5,-1,-0.5,-0.2,0.2,0.5,1,1.5,2,20) # % Mean anomaly of a WR +#my.brks.var <- c(-20,seq(-3,3,0.5),20) # % Mean anomaly of a WR +if(var.name[var.num] == "sfcWind") { + my.brks.var <- seq(-3,3,0.5) + values.to.plot2 <- c(-3,-2,-1,0,1,2,3) +} +if(var.name[var.num] == "tas") { + my.brks.var <- seq(-5,5,1) + values.to.plot2 <- my.brks.var #c(-5,-3,-1,0,1,3,5) +} + +#my.cols <- colorRampPalette(as.character(read.csv("/home/Earth/vtorralb/rgbhex.csv",header=F)[,1]))(length(my.brks)-1) # blue--yellow-red colors +my.cols.var <- colorRampPalette(rev(brewer.pal(11,"RdBu")))(length(my.brks.var)-1) # blue--white--red colors +#my.cols.var[floor(length(my.cols.var)/2)] <- my.cols.var[floor(length(my.cols.var)/2)+1] <- "white" + +if(as.pdf)(pdf(file=paste0(workdir,"/",fields.name,"_",var.name[var.num],"_",my.period[p],".pdf"),width=40, height=60)) + +for(p in WR.period){ + load(file=paste0(workdir,"/",fields.name,"_",var.name[var.num],"_",my.period[p],"_mapdata.RData")) + + if(save.names){ + if(p == 1){ # January + cluster2.name="NAO+" + cluster1.name="NAO-" + cluster3.name="Blocking" + cluster4.name="Atl.Ridge" + } + if(p == 2){ # February + cluster3.name="NAO+" + cluster2.name="NAO-" + cluster4.name="Blocking" + cluster1.name="Atl.Ridge" + } + if(p == 3){ # March + cluster3.name="NAO+" + cluster2.name="NAO-" + cluster4.name="Blocking" + cluster1.name="Atl.Ridge" + } + if(p == 4){ # April + cluster2.name="NAO+" + cluster1.name="NAO-" + cluster3.name="Blocking" + cluster4.name="Atl.Ridge" + } + if(p == 5){ # May + cluster4.name="NAO+" + cluster2.name="NAO-" + cluster3.name="Blocking" + cluster1.name="Atl.Ridge" + } + if(p == 6){ # June + cluster4.name="NAO+" + cluster1.name="NAO-" + cluster2.name="Blocking" + cluster3.name="Atl.Ridge" + } + if(p == 7){ # July + cluster4.name="NAO+" + cluster2.name="NAO-" + cluster1.name="Blocking" + cluster3.name="Atl.Ridge" + } + if(p == 8){ # August + cluster2.name="NAO+" + cluster4.name="NAO-" + cluster3.name="Blocking" + cluster1.name="Atl.Ridge" + } + if(p == 9){ # September + cluster4.name="NAO+" + cluster3.name="NAO-" + cluster1.name="Blocking" + cluster2.name="Atl.Ridge" + } + if(p == 10){ # October + cluster1.name="NAO+" + cluster4.name="NAO-" + cluster2.name="Blocking" + cluster3.name="Atl.Ridge" + } + if(p == 11){ # November + cluster2.name="NAO+" + cluster3.name="NAO-" + cluster1.name="Blocking" + cluster4.name="Atl.Ridge" + } + if(p == 12){ # December + cluster2.name="NAO+" + cluster4.name="NAO-" + cluster1.name="Blocking" + cluster3.name="Atl.Ridge" + } + if(p == 13){ # Winter + cluster3.name="NAO+" + cluster4.name="NAO-" + cluster1.name="Blocking" + cluster2.name="Atl.Ridge" + } + if(p == 14){ # Spring + cluster1.name="NAO+" + cluster3.name="NAO-" + cluster2.name="Blocking" + cluster4.name="Atl.Ridge" + } + if(p == 15){ # Summer + cluster2.name="NAO+" + cluster3.name="NAO-" + cluster4.name="Blocking" + cluster1.name="Atl.Ridge" + } + if(p == 16){ # Autumn + cluster4.name="NAO+" + cluster1.name="NAO-" + cluster2.name="Blocking" + cluster3.name="Atl.Ridge" + } + + cluster1.name.period[p] <- cluster1.name + cluster2.name.period[p] <- cluster2.name + cluster3.name.period[p] <- cluster3.name + cluster4.name.period[p] <- cluster4.name + + save(cluster1.name.period, cluster2.name.period, cluster3.name.period, cluster4.name.period, file=paste0(workdir,"/",fields.name,"_ClusterNames.RData")) + + } else { # in this case, we load the cluster names from the file already saved: + load(paste0(workdir,"/",fields.name,"_ClusterNames.RData")) + cluster1.name <- cluster1.name.period[p] + cluster2.name <- cluster2.name.period[p] + cluster3.name <- cluster3.name.period[p] + cluster4.name <- cluster4.name.period[p] + } + + # assign to each cluster its regime: + if(ordering == FALSE){ + cluster1 <- 1; cluster2 <- 2; cluster3 <- 3; cluster4 <- 4 + } else { + cluster1 <- which(orden == cluster1.name) + cluster2 <- which(orden == cluster2.name) + cluster3 <- which(orden == cluster3.name) + cluster4 <- which(orden == cluster4.name) + } + + assign(paste0("map",cluster1), pslwr1mean) + assign(paste0("map",cluster2), pslwr2mean) + assign(paste0("map",cluster3), pslwr3mean) + assign(paste0("map",cluster4), pslwr4mean) + + assign(paste0("fre",cluster1), wr1y) + assign(paste0("fre",cluster2), wr2y) + assign(paste0("fre",cluster3), wr3y) + assign(paste0("fre",cluster4), wr4y) + + if(fields.name == rean.name){ + assign(paste0("imp",cluster1), varPeriodAnom1mean) + assign(paste0("imp",cluster2), varPeriodAnom2mean) + assign(paste0("imp",cluster3), varPeriodAnom3mean) + assign(paste0("imp",cluster4), varPeriodAnom4mean) + + assign(paste0("sig",cluster1), pvalue1) + assign(paste0("sig",cluster2), pvalue2) + assign(paste0("sig",cluster3), pvalue3) + assign(paste0("sig",cluster4), pvalue4) + } + + + # Visualize the composition with the 3 graphs for all the 4 regimes: its pressure fields, its impact on var and its frequency series: + if(!as.pdf) png(filename=paste0(workdir,"/",fields.name,"_",var.name[var.num],"_",my.period[p],".png"),width=3000,height=3700) + + plot.new() + + # Sheet title: + par(fig=c(0, 1, 0.94, 0.98), new=TRUE) + mtext(paste0("Weather Regimes for ",my.period[p]," season (",year.start,"-",year.end,"). Source: ",fields.name), cex=6, font=2) + + # Centroid maps: + map.xpos <- 0 + map.width <- 0.46 + par(fig=c(map.xpos + 0.003, map.xpos + map.width - 0.003, 0.745, 0.925), new=TRUE) + PlotEquiMap2(rescale(map1,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map1), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, xlabel.dist=1.5, contours.lty="F1FF1F") + par(fig=c(map.xpos, map.xpos + map.width, 0.51, 0.69), new=TRUE) + PlotEquiMap2(rescale(map2,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map2), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F") + par(fig=c(map.xpos, map.xpos + map.width, 0.275, 0.455), new=TRUE) + PlotEquiMap2(rescale(map3,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map3), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F") + par(fig=c(map.xpos, map.xpos + map.width, 0.04, 0.22), new=TRUE) + PlotEquiMap2(rescale(map4,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map4), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F") + + # Title Centroid Maps: + map.title.xpos <- 0.76 + map.title.width <- 0.2 + par(fig=c(map.title.xpos, map.title.xpos + map.title.width, 0.728, 0.729), new=TRUE) + mtext("year", cex=3) + par(fig=c(map.title.xpos, map.title.xpos + map.title.width, 0.494, 0.495), new=TRUE) + mtext("year", cex=3) + par(fig=c(map.title.xpos, map.title.xpos + map.title.width, 0.260, 0.261), new=TRUE) + mtext("year", cex=3) + par(fig=c(map.title.xpos, map.title.xpos + map.title.width, 0.026, 0.027), new=TRUE) + mtext("year", cex=3) + + # Impact maps: + if(fields.name == rean.name){ + impact.xpos <- 0.47 + impact.width <- 0.26 + par(fig=c(impact.xpos, impact.xpos + impact.width, 0.745, 0.925), new=TRUE) + PlotEquiMap2(rescale(imp1[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=t(sig1[,EU] < 0.05), cex.lab=1.5) + par(fig=c(impact.xpos, impact.xpos + impact.width, 0.51, 0.69), new=TRUE) + PlotEquiMap2(rescale(imp2[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=t(sig2[,EU] < 0.05), cex.lab=1.5) + par(fig=c(impact.xpos, impact.xpos + impact.width, 0.275, 0.455), new=TRUE) + PlotEquiMap2(rescale(imp3[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=t(sig3[,EU] < 0.05), cex.lab=1.5) + par(fig=c(impact.xpos, impact.xpos + impact.width, 0.04, 0.22), new=TRUE) + PlotEquiMap2(rescale(imp4[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=t(sig4[,EU] < 0.05), cex.lab=1.5) + } + + # Frequency plots: + barplot.xpos <- 0.75 + barplot.width <- 0.25 + freq.max=100 + par(fig=c(barplot.xpos, barplot.xpos + barplot.width, 0.745, 0.915), new=TRUE) + barplot.freq(100*fre1, year.start, year.end, cex.y=2, cex.x=2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0)) + par(fig=c(barplot.xpos, barplot.xpos + barplot.width,0.510,0.680), new=TRUE) + barplot.freq(100*fre2, year.start, year.end, cex.y=2, cex.x=2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0)) + par(fig=c(barplot.xpos, barplot.xpos + barplot.width,0.275,0.445), new=TRUE) + barplot.freq(100*fre3, year.start, year.end, cex.y=2, cex.x=2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0)) + par(fig=c(barplot.xpos, barplot.xpos + barplot.width,0.040,0.210), new=TRUE) + barplot.freq(100*fre4, year.start, year.end, cex.y=2, cex.x=2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0)) + + # % on Frequency plots: + symbol.xpos <- 0.735 + symbol.width <- 0.01 + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.83, 0.84), new=TRUE) + mtext("%", cex=3.3) + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.60, 0.61), new=TRUE) + mtext("%", cex=3.3) + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.36, 0.37), new=TRUE) + mtext("%", cex=3.3) + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.13, 0.14), new=TRUE) + mtext("%", cex=3.3) + + # Title 1: + title1.xpos <- 0.02 + title1.width <- 0.4 + par(fig=c(title1.xpos, title1.xpos + title1.width, 0.930, 0.935), new=TRUE) + mtext(paste0(regime1.name,": ", psl.name, " Anomaly"), font=2, cex=4) + par(fig=c(title1.xpos, title1.xpos + title1.width, 0.695, 0.700), new=TRUE) + mtext(paste0(regime2.name,": ", psl.name, " Anomaly"), font=2, cex=4) + par(fig=c(title1.xpos, title1.xpos + title1.width, 0.460, 0.465), new=TRUE) + mtext(paste0(regime3.name,": ", psl.name, " Anomaly"), font=2, cex=4) + par(fig=c(title1.xpos, title1.xpos + title1.width, 0.225, 0.230), new=TRUE) + mtext(paste0(regime4.name,": ", psl.name, " Anomaly"), font=2, cex=4) + + # Title 2: + if(fields.name == rean.name){ + title2.xpos <- 0.42 + title2.width <- 0.35 + par(fig=c(title2.xpos, title2.xpos + title2.width, 0.930, 0.935), new=TRUE) + mtext(paste0(regime1.name, ": Impact on ", var.name.full[var.num]), font=2, cex=4) + par(fig=c(title2.xpos, title2.xpos + title2.width, 0.695, 0.700), new=TRUE) + mtext(paste0(regime2.name, ": Impact on ", var.name.full[var.num]), font=2, cex=4) + par(fig=c(title2.xpos, title2.xpos + title2.width, 0.460, 0.465), new=TRUE) + mtext(paste0(regime3.name, ": Impact on ", var.name.full[var.num]), font=2, cex=4) + par(fig=c(title2.xpos, title2.xpos + title2.width, 0.225, 0.230), new=TRUE) + mtext(paste0(regime4.name, ": Impact on ", var.name.full[var.num]), font=2, cex=4) + } + + # Title 3: + title3.xpos <- 0.75 + title3.width <-0.25 + par(fig=c(title3.xpos, title3.xpos + title3.width, 0.930, 0.935), new=TRUE) + mtext(paste0(regime1.name, ": Frequency (", round(100*mean(fre1),1), "%)"), font=2, cex=4) + par(fig=c(title3.xpos, title3.xpos + title3.width, 0.695, 0.700), new=TRUE) + mtext(paste0(regime2.name, ": Frequency (", round(100*mean(fre2),1), "%)"), font=2, cex=4) + par(fig=c(title3.xpos, title3.xpos + title3.width, 0.460, 0.465), new=TRUE) + mtext(paste0(regime3.name, ": Frequency (", round(100*mean(fre3),1), "%)"), font=2, cex=4) + par(fig=c(title3.xpos, title3.xpos + title3.width, 0.225, 0.230), new=TRUE) + mtext(paste0(regime4.name, ": Frequency (", round(100*mean(fre4),1), "%)"), font=2, cex=4) + + # Legend 1: + legend1.xpos <- 0.01 + legend1.width <- 0.44 + legend1.cex <- 1.8 + my.subset <- match(values.to.plot, my.brks) + par(fig=c(legend1.xpos, legend1.xpos + legend1.width, 0.719, 0.744), new=TRUE) # syntax fig=c(xmin, xmax, ymin, ymax) + ColorBar3(my.brks, cols=my.cols, vert=FALSE, cex=legend1.cex, subset=my.subset) + if(psl=="g500") {mtext(side=4," m", cex=2.5)} else {mtext(side=4," hPa", cex=legend1.cex)} + par(fig=c(legend1.xpos, legend1.xpos + legend1.width, 0.485, 0.508), new=TRUE) + ColorBar3(my.brks, cols=my.cols, vert=FALSE, cex=legend1.cex, subset=my.subset) + if(psl=="g500") {mtext(side=4," m", cex=2.5)} else {mtext(side=4," hPa", cex=legend1.cex)} + par(fig=c(legend1.xpos, legend1.xpos + legend1.width, 0.250, 0.273), new=TRUE) + ColorBar3(my.brks, cols=my.cols, vert=FALSE, cex=legend1.cex, subset=my.subset) + if(psl=="g500") {mtext(side=4," m", cex=2.5)} else {mtext(side=4," hPa", cex=legend1.cex)} + par(fig=c(legend1.xpos, legend1.xpos + legend1.width, 0.015, 0.038), new=TRUE) + ColorBar3(my.brks, cols=my.cols, vert=FALSE, cex=legend1.cex, subset=my.subset) + if(psl=="g500") {mtext(side=4," m", cex=2.5)} else {mtext(side=4," hPa", cex=legend1.cex)} + + # Legend 2: + if(fields.name == rean.name){ + legend2.xpos <- 0.48 + legend2.width <- 0.25 + legend2.cex <- 1.8 + my.subset2 <- match(values.to.plot2, my.brks.var) + par(fig=c(legend2.xpos, legend2.xpos + legend2.width, 0.719 + 0.001, 0.744), new=TRUE) + ColorBar3(my.brks.var, cols=my.cols.var, vert=FALSE, cex=legend2.cex, subset=my.subset2) + mtext(side=4,paste0(" ",var.unit[var.num]), cex=legend2.cex) + par(fig=c(legend2.xpos, legend2.xpos + legend2.width, 0.485, 0.508), new=TRUE) + ColorBar3(my.brks.var, cols=my.cols.var, vert=FALSE, cex=legend2.cex, subset=my.subset2) + mtext(side=4,paste0(" ",var.unit[var.num]), cex=legend2.cex) + par(fig=c(legend2.xpos, legend2.xpos + legend2.width, 0.250, 0.273), new=TRUE) + ColorBar3(my.brks.var, cols=my.cols.var, vert=FALSE, cex=legend2.cex, subset=my.subset2) + mtext(side=4,paste0(" ",var.unit[var.num]), cex=legend2.cex) + par(fig=c(legend2.xpos, legend2.xpos + legend2.width, 0.015, 0.038), new=TRUE) + ColorBar3(my.brks.var, cols=my.cols.var, vert=FALSE, cex=legend2.cex, subset=my.subset2) + mtext(side=4,paste0(" ",var.unit[var.num]), cex=legend2.cex) + } + + if(!as.pdf) dev.off() # for saving 4 png + +} # close for loop on 'p' + +if(as.pdf) dev.off() # for saving a single pdf with all seasons + diff --git a/old/weather_regimes_v29.R b/old/weather_regimes_v29.R new file mode 100644 index 0000000000000000000000000000000000000000..ba6a382c8e15e510c4c7359eb5aab8b02d9a394d --- /dev/null +++ b/old/weather_regimes_v29.R @@ -0,0 +1,1121 @@ +library(s2dverification) # for the function Load() +library(abind) +library(Kendall) +library(reshape2) +library(TTR) +source('/home/Earth/ncortesi/Downloads/scripts/Rfunctions.R') # for the calendar functions +workdir <- "/scratch/Earth/ncortesi/RESILIENCE/Regimes" # working dir with the input files with the WT classifications for each MSLP grid point + +# available reanalysis for the geopotential (g500) or the geopotential height (z500 = g500/9.81) and for var data: +ERAint <- '/esnas/reconstructions/ecmwf/eraint/$STORE_FREQ$_mean/$VAR_NAME$_f6h/$VAR_NAME$_$YEAR$$MONTH$.nc' +ERAint.name <- "ERA-Interim" + +JRA55 <- '/esnas/reconstructions/jma/jra-55/$STORE_FREQ$_mean/$VAR_NAME$_f6h/$VAR_NAME$_$YEAR$$MONTH$.nc' +JRA55.name <- "JRA-55" + +rean <- ERAint #JRA55 #ERAint # choose one of the two above reanalysis from where to load the input psl data + +# available forecast systems for the psl and var data: +#ECMWF_S4 <- list(path = paste0('/esnas/exp/ECMWF/seasonal/0001/s004/m001/$STORE_FREQ$_mean/$VAR_NAME$_f6h/$VAR_NAME$_$START_DATE$.nc')) +#ECMWF_S4 <- '/esnas/exp/ECMWF/seasonal/0001/s004/m001/$STORE_FREQ$_mean/psl_f6h/psl_$START_DATE$.nc' +ECMWF_S4 <- '/esnas/exp/ecmwf/system4_m1/$STORE_FREQ$_mean/$VAR_NAME$_f6h/$VAR_NAME$_$START_DATE$.nc' +ECMWF_S4.name <- "ECMWF-S4" + +ECMWF_monthly <- '/esnas/exp/ECMWF/monthly/ensforhc/6hourly/$VAR_NAME$/2014$MONTH$$DAY$00/$VAR_NAME$_$START_DATE$00.nc' +ECMWF_monthly.name <- "ECMWF-Monthly" + +forecast <- ECMWF_S4 + +fields <- forecast #rean #forecast # put 'rean' to load pressure fields and var from reanalisis, put 'forecast' to load them from forecast systems + +psl <- "psl" #"g500" # pressure variable to use from the chosen reanalysis +psl.name <- "MSLP" # "Z500" # the name to show in the maps + +year.start <- 1982 #1981 #1994 #1979 #1981 +year.end <- 2013 #2013 #2010 + +member.name <- "ensemble" # name of the dimension with the ensemble members inside the netCDF files of S4 + +res <- 0.75 # set the resolution you want to interpolate the psl and var data when loading it (i.e: set to 0.75 to use the same res of ERA-Interim) + # interpolation method is BILINEAR. +PCA <- FALSE # set it to TRUE if you want to apply the PCA before the k-means cluster analysis, FALSE otherwise +variance.explained <- 0.8 # the user can set here the minimum % of variance explained by the PCs (typically 0.8 which is equal to 80%) + +lat.weighting=FALSE # set it to true to weight psl data on latitude before applying the cluster analysis +sequences=FALSE # set it to TRUE if you want to select only days beloning to sequences of at least 5 consecutive days belonging to the same regime. + +# Coordinates of the box enclosing the study region (the Northern Atlantic in this case): +lat.max <- 81 #81 #80 #70 +lat.min <- 27 #30 #20 #30 +lon.max <- 45 #36 #30 #40 +lon.min <- 274.5 #274.5 #270 #280 # put a positive number here because the geopotential has only positive values of longitud! + +var.num <- 1 # Choose a variable. 1: sfcWind 2: tas + +var.name <- c("sfcWind","tas") # name of the 'predictand' variable of the chosen reanalysis +var.name.full <- c("Wind Speed","Temperature") # full name of the 'predictand' variable to put in the title of the graphs +var.unit <- c("m/s", "ºC") # unit of measure of var (for drawing color scales) + +# Only for Reanalysis:: +WR.period <- 1:12 #13:16 # 1-12: Jan-Dec; 13-16: Winter-Spring-Summer-Autumn + +# only for Seasonal forecasts: +n.members <- 15 # number of members of the forecast system to use +startM <- 1 # start month (1=January, 12=December) +leadM <- 0 # select only the data of one lead month: + +# Only for Monthly forecasts: +#leadtime <- 1:7 #5:11 #1 # if fields=forecast, set the forecast time (in days) you want to compute the weather regimes. +#startdate.shift <- 1 # if leadtime=5:11, it's better to shift all startdates of the season 1 week in the past, to fit the exact season of obs.data + # if leadtime=12:18, it's better to shift all startdates of the season 2 weeks in the past, and so on. +#forecast.year <- year.end+1 # if fields=forecast, set the forecast year (it is usually the year after year.end) +#mes=1 # if fields=forecast, set the month of the first startdate of the forecast year +#day=2 # if fields=forecast, set the day of the first startdate of the forecast year + +############################################################################################################################# +#ECMWF_monthly <- list(path = paste0('/esnas/exp/ECMWF/monthly/ensforhc/6hourly/psl/2014010200/1994_010200.nc')) +#ECMWF_monthly <- list(path = paste0('/esnas/exp/ECMWF/monthly/ensforhc/6hourly/$VAR_NAME$/2014$MONTH$$DAY$00/$VAR_NAME$_$START_DATE$00.nc')) +rean.name <- unname(ifelse(rean == ERAint, ERAint.name, JRA55.name)) +forecast.name <- unname(ifelse(forecast == ECMWF_S4, ECMWF_S4.name, ECMWF_monthly.name)) +fields.name <- unname(ifelse(fields == rean, rean.name, forecast.name)) +if(fields.name == forecast.name) WR.period = 1 +n.years <- year.end - year.start + 1 + +# in case the script is run with two arguments, they are assigned to the two below variables: +script.arg <- as.integer(commandArgs(TRUE)[1]) +if(length(script.arg) == 2){ + start.month <- script.arg[1] + lead.month <- script.arg[2] +} else { + start.month <- startM + lead.month <- leadM +} + +var.data <- fields # dataset from which to load the input var data (reanalysis or forecasts) +var.data.name <- fields.name #ECMWF_monthly.name # ERAint.name + +if(lat.min >= lat.max) stop("lat.min cannot be greater than lat.max") + +days.period <- n.days.period <- period.length <- list() +for (pp in 1:17){ + days.period[[pp]] <- NA + for(y in year.start:year.end) days.period[[pp]] <- c(days.period[[pp]], n.days.in.a.future.year(year.start, y) + pos.period(y,pp)) + days.period[[pp]] <- days.period[[pp]][-1] # remove the NA at the begin that was introduced only to be able to execute the above command + # number of days belonging to that period from year.start to year.end: + n.days.period[[pp]] <- length(days.period[[pp]]) + # Number of days belonging to that period in a single year: + period.length[[pp]] <- n.days.in.a.period(pp,1999) #+ ifelse(period==13 | period==2, 1, 0) # using year 1999 introduce a small error in winter season length because of bisestile years +} + +# Create a regular lat/lon grid to interpolate the data: +n.lon.grid <- 360/res +n.lat.grid <- (180/res)+1 +my.grid <- paste0('r',n.lon.grid,'x',n.lat.grid) +#domain<-list() +#domain$lon <- seq(0,359.999999999,res) +#domain$lat <- c(rev(seq(0,90,res)),seq(res[1],-90,-res)) # Notice that the regular grid is always centered at lat=0 and lon=0! + +# load only 1 day of pressure data to detect the minimum and maximum lat and lon values which identify only the North Atlantic area: +if(fields.name == rean.name) domain <- Load(var = psl, exp = NULL, obs = list(list(path=fields)), '19990101', storefreq = 'daily', leadtimemax = 1, output = 'lonlat', nprocs=1) +# in the case of S4, we also interpolate it (notice that if the S4 grid has the same grid of the chosen grid (typically ERA-Interim), Load() leaves the S4 grid unchanged): +if(fields.name == ECMWF_S4.name) domain <- Load(var = "psl", exp = list(list(path=fields)), sdates=paste0(year.start,'0101'), storefreq = 'daily', dimnames=list(member=member.name), leadtimemax=1, nmember=1, output = 'lonlat') #, grid=my.grid, method='bilinear', nprocs=1) + +#if(fields.name == ECMWF_monthly.name) domain <- Load(var = psl, exp = NULL, obs = list(list(path=fields)), paste0(year.start,'0102'), storefreq = 'daily', leadtimemax = 1, output = 'lonlat', nprocs=1) + +n.lon <- length(domain$lon) +n.lat <- length(domain$lat) + +pos.lat.max <- which(lat.max - round(domain$lat,4) >= 0)[1] # select the first latitude of the domain below the maximum latitude +pos.lat.min <- tail(which(round(domain$lat,4) - lat.min >= 0),1) # select the last latitude of the domain over the minumum latitude +pos.lon.max <- tail(which(lon.max - round(domain$lon,4) >= 0),1) +pos.lon.min <- which(round(domain$lon,4) - lon.min >= 0)[1] + +pos.lat <- if(pos.lat.min < pos.lat.max) {pos.lat.min:pos.lat.max} else {pos.lat.max:pos.lat.min} +pos.lon <- c(1:pos.lon.max,pos.lon.min:length(domain$lon)) # beware that it only consider the case where the study area cross the meridian of Greenwhich! +n.pos.lat <- length(pos.lat) +n.pos.lon <- length(pos.lon) + +lat <- domain$lat[pos.lat] # lat values of chosen area only (it is smaller than the whole spatial domain loaded by the data) +lon <- domain$lon[pos.lon] # lon values of chosen area only + +#lat.min.area <- domain$lat[pos.lat.min] # min and max lat/lon of the chosen area only (same as lat.max, but its values correspond to actual values in the lat vector) +#lat.max.area <- domain$lat[pos.lat.max] +#lon.min.area <- domain$lon[pos.lon.min] +#lon.max.area <- domain$lon[pos.lon.max] + +#rm(domain) + +# Load psl data: +if(fields.name == rean.name){ # Load daily psl data in the reanalysis case: + psleuFull <-array(NA,c(n.days.in.a.yearly.period(year.start,year.end), n.pos.lat, n.pos.lon)) + + for (y in year.start:year.end){ + var <- Load(var = psl, exp = NULL, obs = list(list(path=fields)), paste0(y,'0101'), storefreq = 'daily', leadtimemax = n.days.in.a.year(y), output = 'lonlat', + latmin = lat.min, latmax = lat.max, lonmin = lon.min, lonmax = lon.max) + psleuFull[seq.days.in.a.future.year(year.start, y),,] <- var$obs + rm(var) + gc() + } +} + +if(fields.name == ECMWF_S4.name) # in this case, we can load only the data for 1 month at time (since each month needs ~3 GB of memory) + if(start.month <= 9) {start.month.char <- paste0("0",as.character(start.month))} else {start.month.char <- start.month} + + psleuFull <- Load(var = psl, exp = list(list(path=fields)), obs = NULL, sdates=paste0(year.start:year.end, start.month.char,'01'), dimnames=list(member=member.name), nmember=n.members, leadtimemax=216, storefreq='daily', output = 'lonlat', latmin = lat.min, latmax = lat.max, lonmin = lon.min, lonmax = lon.max)$mod #, grid=my.grid, method='bilinear') + +} + +if(fields.name == ECMWF_monthly.name){ + # Load 6-hourly psl data in the forecast case: + psleuFull <-array(NA, c(n.sdates*n.years, n.leadtimes, n.pos.lat, n.pos.lon)) # daily order: 19940102 19950102 ... 20130102 19940109 19950109 ... 20130109 ... + n.leadtimes <- length(leadtime) + sdates <- weekly.seq(forecast.year,mes,day) + n.sdates <- length(sdates) + + for (startdate in 1:n.sdates){ + for(lday in leadtime){ + var <- Load(var = psl, exp = NULL, obs = list(list(path=fields), paste0(year.start:year.end, substr(sdates[startdate],5,6), substr(sdates[startdate],7,8) ), storefreq = 'daily', leadtimemin = lday*4-3, leadtimemax = lday*4, output = 'lonlat', latmin = lat.min, latmax = lat.max, lonmin = lon.min, lonmax = lon.max) + + mean.lday <- apply(var$obs, c(3,5,6), mean, na.rm=T) + rm(var) + gc() + + psleuFull[(1+(startdate-1)*n.years):(startdate*n.years), lday-leadtime[1]+1,,] <- mean.lday + rm(mean.lday) + gc() + } + } +} + +if(psl=="g500") psleuFull <- psleuFull/9.81 # convert geopotential to geopotential height (in m) +if(psl=="psl") psleuFull <- psleuFull/100 # convert MSLP in Pascal to MSLP in hPa +gc() + +#save.image(file=paste0(workdir,"/weather_regimes_",fields.name,"_",year.start,"-",year.end,".RData")) +#load(file=paste0(workdir,"/weather_regimes_",fields.name,"_",year.start,"-",year.end,".RData")) + +# compute the cluster analysis: +my.PCA <- list() +my.cluster <- list() +my.cluster.array <- list() +tot.variance <- list() +n.pcs <- my.cluster <- c() + +#p=1 # for the debug +for(p in WR.period){ + # Select only the data of the month/season we want: + if(fields.name == rean.name){ + pslPeriod <- psleuFull[days.period[[p]],,] # select only days in the chosen period (i.e: winter) + + # weight the pressure fields based on latitude (apply it to anomalies, not to absolute values!): + if(lat.weighting){ + lat.weighted <- cos(lat*pi/180)^0.5 + lat.weighted.array <- InsertDim(InsertDim(lat.weighted,2,n.pos.lon), 1, n.days.period[[p]]) + psl.kmeans <- pslPeriod * lat.weighted.array + rm(lat.weighted.array) + gc() + } + + # select period data from psleuFull to use it as input of the cluster analysis: + psl.kmeans <- pslPeriod + dim(psl.kmeans) <- c(n.days.period[[p]], n.pos.lat*n.pos.lon) # convert array in a matrix! + rm(pslPeriod, pslPeriod.weighted) + } + + # in this case of S4 we don't need to select anything because we already loaded only 1 month of data! + if(fields.name == ECMWF_S4.name){ + + ## climatology plots: + + ## load eraint data: + ## ERAint <- '/esnas/reconstructions/ecmwf/eraint/$STORE_FREQ$_mean/$VAR_NAME$_f6h/$VAR_NAME$_$YEAR$$MONTH$.nc' + ## my.years <- year.start:year.end + ## var <- Load(var = "sfcWind", exp = NULL, obs = list(list(path=ERAint)), paste0(my.years,'0101'), storefreq = 'daily', leadtimemax = 216, output = 'lonlat',latmin = lat.min, latmax = lat.max, lonmin = lon.min, lonmax = lon.max) # -273.16 for tas # or /100 for psl + + ## # check S4 lon and lat: + ## #varS4 <- Load(var = psl, exp = list(list(path=fields)), obs = NULL, sdates=paste0(year.start, start.month.char,'01'), dimnames=list(member=member.name), nmember=n.members, leadtimemax=1, storefreq='daily', output = 'lonlat', latmin = lat.min, latmax = lat.max, lonmin = lon.min, lonmax = lon.max) #, grid=my.grid, method='bilinear') + + ## # draw climatologies: + ## drift <- apply(psleuFull[,,,,1,1,drop=F], c(1,2,4), mean, na.rm=T) + ## #matplot(t(drift[1,,]),type="l", col=1:15, lty=2, pch=19, cex=.5) + ## #matplot(t(drift[1,,1:31]),type="b", col=1:15, lty=1, pch=19, cex=.5) # zoom over the first leadtime + + ## matplot(t(drift[1,,]),type="l", col="gray60", lty=3, pch=19, cex=.1) #, ylim=c(-25,5)) + + ## true.climate <- apply(psleuFull[,,,,1,1,drop=F], c(1,4), mean, na.rm=T) + ## lines(true.climate[1,],type="l", col="red", lty=1, pch=19, lwd=2) + + ## true.climate.5d <- stats::filter(true.climate[1,], rep(1/5,5), sides=2) + ## lines(true.climate.5d,type="l", col="orange", lty=1, pch=19, lwd=2) + + ## s4.data <- data.frame(s4=true.climate[1,], day=1:216) + ## s4.loess <- loess(s4 ~ day, s4.data, span=0.50) + ## s4.pred <- predict(s4.loess) + ## lines(s4.pred,type="l", col="purple", lty=1, pch=19, lwd=2) + + ## col.monthly <- "brown" + ## true.climate1 <- apply(psleuFull[,,,1:31,1,1,drop=F], 1, mean, na.rm=T) + ## lines(rep(true.climate1,31),type="l", col=col.monthly, lty=1, pch=19, lwd=2) + ## true.climate2 <- apply(psleuFull[,,,32:59,1,1,drop=F], 1, mean, na.rm=T) + ## lines(c(rep(NA,31),rep(true.climate2,28)),type="l", col=col.monthly, lty=1, pch=19, lwd=2) + ## true.climate3 <- apply(psleuFull[,,,60:90,1,1,drop=F], 1, mean, na.rm=T) + ## lines(c(rep(NA,59),rep(true.climate3,31)),type="l", col=col.monthly, lty=1, pch=19, lwd=2) + ## true.climate4 <- apply(psleuFull[,,,91:120,1,1,drop=F], 1, mean, na.rm=T) + ## lines(c(rep(NA,90),rep(true.climate4,30)),type="l", col=col.monthly, lty=1, pch=19, lwd=2) + ## true.climate5 <- apply(psleuFull[,,,121:151,1,1,drop=F], 1, mean, na.rm=T) + ## lines(c(rep(NA,120),rep(true.climate5,31)),type="l", col=col.monthly, lty=1, pch=19, lwd=2) + ## true.climate6 <- apply(psleuFull[,,,151:180,1,1,drop=F], 1, mean, na.rm=T) + ## lines(c(rep(NA,150),rep(true.climate6,30)),type="l", col=col.monthly, lty=1, pch=19, lwd=2) + ## true.climate7 <- apply(psleuFull[,,,181:211,1,1,drop=F], 1, mean, na.rm=T) + ## lines(c(rep(NA,180),rep(true.climate7,31)),type="l", col=col.monthly, lty=1, pch=19, lwd=2) + + + ## era <- apply(var$obs[,,,,1,1,drop=F], c(1,2,4), mean, na.rm=T) + ## plot(era[1,1,1:216],type="l", col="black", lty=1, pch=19, lwd=2) + + ## era.5d <- stats::filter(era[1,1,1:216], rep(1/5,5), sides=2) + ## lines(era.5d,type="l", col="darkgreen", lty=1, pch=19, lwd=2) + + ## era.data <- data.frame(era=era[1,1,1:216], day=1:216) + ## era.loess <- loess(era ~ day, era.data, span=0.50) + ## era.pred <- predict(era.loess) + ## lines(era.pred,type="l", col="blue", lty=1, pch=19, lwd=2) + + ## era.data2 <- data.frame(era=era[1,1,1:216], day=1:216) + ## era.loess2 <- loess(era ~ day, era.data2, span=0.3) + ## era.pred2 <- predict(era.loess2) + ## lines(era.pred2,type="l", col="purple", lty=1, pch=19, lwd=2) + + ## era.data3 <- data.frame(era=era[1,1,1:216], day=1:216) + ## era.loess3 <- loess(era ~ day, era.data3, span=0.35) + ## era.pred3 <- predict(era.loess3) + + ## col.monthly.erai <- "turquoise3" + ## era1 <- apply(var$obs[,,,1:31,1,1,drop=F], c(1,2), mean, na.rm=T) + ## lines(rep(era1,31),type="l", col=col.monthly.erai, lty=1, pch=19, lwd=2) + ## era2 <- apply(var$obs[,,,32:59,1,1,drop=F], c(1,2), mean, na.rm=T) + ## lines(c(rep(NA,31),rep(era2,28)),type="l", col=col.monthly.erai, lty=1, pch=19, lwd=2) + ## era3 <- apply(var$obs[,,,60:90,1,1,drop=F], c(1,2), mean, na.rm=T) + ## lines(c(rep(NA,59),rep(era3,31)),type="l", col=col.monthly.erai, lty=1, pch=19, lwd=2) + ## era4 <- apply(var$obs[,,,91:120,1,1,drop=F], c(1,2), mean, na.rm=T) + ## lines(c(rep(NA,90),rep(era4,30)),type="l", col=col.monthly.erai, lty=1, pch=19, lwd=2) + ## era5 <- apply(var$obs[,,,121:151,1,1,drop=F], c(1,2), mean, na.rm=T) + ## lines(c(rep(NA,120),rep(era5,31)),type="l", col=col.monthly.erai, lty=1, pch=19, lwd=2) + ## era6 <- apply(var$obs[,,,151:180,1,1,drop=F], c(1,2), mean, na.rm=T) + ## lines(c(rep(NA,150),rep(era6,30)),type="l", col=col.monthly.erai, lty=1, pch=19, lwd=2) + ## era7 <- apply(var$obs[,,,181:211,1,1,drop=F], c(1,2), mean, na.rm=T) + ## lines(c(rep(NA,180),rep(era7,30)),type="l", col=col.monthly.erai, lty=1, pch=19, lwd=2) + + + ## # anomalias: + ## point.type <- "l" + ## true.climate.anom <- psleuFull[1,1,pos.last.year,,1,1] - true.climate[1,] + ## plot(true.climate.anom,type=point.type, col="red", lty=1, pch=19, lwd=2, cex=.3) + ## lines(rep(0,216)) + + ## true.climate.5d.anom <- psleuFull[1,1,pos.last.year,,1,1] - true.climate.5d + ## lines(true.climate.5d.anom,type=point.type, col="orange", lty=1, pch=19, lwd=2, cex=.3) + + ## s4.pred.anom <- psleuFull[1,1,pos.last.year,,1,1] - s4.pred + ## lines(s4.pred.anom,type=point.type, col="purple", lty=1, pch=19, lwd=2, cex=.3) + + ## s4.monthly.clim <- c(rep(true.climate1,31),rep(true.climate2,28),rep(true.climate3,31),rep(true.climate4,30),rep(true.climate5,31),rep(true.climate6,30),rep(true.climate7,31), rep(NA,4)) + ## s4.monthly.anom <- psleuFull[1,1,pos.last.year,,1,1] - s4.monthly.clim + ## lines(s4.monthly.anom,type=point.type, col="brown", lty=1, pch=19, lwd=2, cex=.3) + + + ## pos.last.year <- dim(var$obs)[3] + ## era.anom <- var$obs[1,1,pos.last.year,1:216,1,1] - era[1,1,1:216] + ## plot(era.anom,type=point.type, col="black", lty=1, pch=19, lwd=2) + ## lines(rep(0,216)) + + ## era.anom.5d <- var$obs[1,1,pos.last.year,1:216,1,1] - era.5d[1:216] + ## lines(era.anom.5d,type=point.type, col="darkgreen", lty=1, pch=19, lwd=2) + + ## era.anom.pred <- var$obs[1,1,pos.last.year,1:216,1,1] - era.pred[1:216] + ## lines(era.anom.pred,type=point.type, col="blue", lty=1, pch=19, lwd=2) + + ## era.anom.pred2 <- var$obs[1,1,pos.last.year,1:216,1,1] - era.pred2[1:216] + ## era.anom.pred3 <- var$obs[1,1,pos.last.year,1:216,1,1] - era.pred3[1:216] + + ## era.monthly.clim <- c(rep(era1,31),rep(era2,28),rep(era3,31),rep(era4,30),rep(era5,31),rep(era6,30),rep(era7,31), rep(NA,4)) + ## era.anom.monthly <- var$obs[1,1,pos.last.year,1:216,1,1] - era.monthly.clim + ## lines(era.anom.monthly,type=point.type, col="turquoise3", lty=1, pch=19, lwd=2) + + ## # Nube plot: + ## plot(era.anom.pred,type="l", col="blue", lty=1, pch=19, lwd=2) + ## grid(nx=10,lwd=2) + ## lines(era.anom.pred2,type="l", col="purple", lty=1, pch=19, lwd=2) + ## lines(era.anom.pred3,type="l", col="turquoise3", lty=1, pch=19, lwd=2) + ## lines(era.anom.5d,type="l", col="darkgreen", lty=1, pch=19, lwd=2) + + ## lines(era.5d,type="l", col="darkgreen", lty=1, pch=19, lwd=2) + ## lines(era.pred,type="l", col="blue", lty=1, pch=19, lwd=2) + ## lines(rep(0,216)) + + ## lines(era.pred2,type="l", col="purple", lty=1, pch=19, lwd=2) + ## lines(era.pred3,type="l", col="", lty=1, pch=19, lwd=2) + + + ## # fit alfa parameter for each grid point: + ## i=1 + ## j=1 + ## era <- apply(var$obs[,,,,i,j,drop=F], c(1,2,4), mean, na.rm=T) + ## era.data <- data.frame(era=era[1,1,1:216], day=1:216) + ## k=0; error <- c() + ## for (alfa in seq(0.25,0.50,0.01)){ + ## k=k+1 + ## era.loess <- loess(era ~ day, era.data, span=alfa) + ## era.pred <- predict(era.loess) + ## error[k] <- mean(abs(era.pred - era[1,1,1:216])) + ## } + + # convert psl in daily anomalies: + pslPeriodClim <- apply(psleuFull, c(1,4,5,6), mean, na.rm=T) + pslPeriodClim2 <- InsertDim(InsertDim(pslPeriodClim, 2, n.years), 2, n.members) + rm(pslPeriodClim) + gc() + + psleuFull <- psleuFull - pslPeriodClim2 + rm(pslPeriodClim2) + gc() + + ## # convert psl in daily anomalies computed with the 10-days leadtime window including the 9 days AFTER each day: + ## n.leadtimes <- dim(psleuFull)[4] + ## pslPeriodClim10 <- array(NA, c(1, n.members, n.years, n.leadtimes-9,n.pos.lat,n.pos.lon)) + ## for(year in 1:n.years){ + ## for(lead in 1:(n.leadtimes-9)){ + ## pslPeriodClim10[1,,year,lead,,] <- (psleuFull[1,,year,lead,,] + psleuFull[1,,year,lead+1,,] + psleuFull[1,,year,lead+2,,] + psleuFull[1,,year,lead+3,,] + psleuFull[1,,year,lead+4,,] + psleuFull[1,,year,lead+5,,] + psleuFull[1,,year,lead+6,,] + psleuFull[1,,year,lead+7,,] + psleuFull[1,,year,lead+8,,] + psleuFull[1,,year,lead+9,,])/10 + ## } + ## } + + ## pslPeriodClim10mean <- apply(pslPeriodClim10, c(1,4,5,6), mean, na.rm=T) + ## pslPeriodClim10mean2 <- InsertDim(InsertDim(pslPeriodClim10mean,2,n.years),2,n.members) + + ## psleuFull10 <- psleuFull[1,,,1:(n.leadtimes-9),,,drop=F] - pslPeriodClim10mean2 + + ## psleuFull <- psleuFull10 + ## rm(pslPeriodClim2, psleuFull10, pslPeriodClim10, pslPeriodClim10mean2, psleuFull10mean, pslPeriodClim10mean) + ## gc() + + chosen.month <- start.month + lead.month # find which month we want to select data + if(chosen.month > 12) chosen.month <- chosen.month - 12 + + for(y in year.start:year.end){ + leadtime.min <- 1 + n.days.in.a.monthly.period(start.month, chosen.month, y) - n.days.in.a.month(chosen.month, y) + leadtime.max <- leadtime.min + n.days.in.a.month(chosen.month, y) - 1 + if (n.days.in.a.month(chosen.month, y) == 29) leadtime.max <- leadtime.max - 1 # remove the 29th of February to have the same n. of elements for all years + int.leadtimes <- leadtime.min:leadtime.max + n.leadtimes <- leadtime.max - leadtime.min + 1 + + if(y == year.start) { + pslPeriod <- psleuFull[,,1,int.leadtimes,,,drop=FALSE] + } else { + pslPeriod <- unname(abind(pslPeriod, psleuFull[,,y-year.start+1,int.leadtimes,,,drop=FALSE], along=3)) + } + } + + + ## # if you didn't convert psl data in anomalies until now, you can convert psl data in monthly anomalies: + ## pslPeriodClim <- apply(pslPeriod, c(1,5,6), mean, na.rm=T) + ## pslPeriodClim2 <- InsertDim(InsertDim(InsertDim(pslPeriodClim,2,n.leadtimes), 2, n.years), 2, n.members) + + ## pslPeriod <- pslPeriod - pslPeriodClim2 + ## rm(pslPeriodClim, pslPeriodClim2) + ## gc() + + # note that the data order in psl.kmeans[,X] is: year1day1, year1day2, ... , year1day31, year2day1, year2day2, ... , year2day28 + psl.melted <- melt(pslPeriod[1,,,,,, drop=FALSE], varnames=c("Exp","Member","StartYear","LeadDay","Lat","Lon")) + psl.kmeans <- unname(acast(psl.melted, Member + StartYear + LeadDay ~ Lat + Lon)) + + #rm(pslPeriod) + gc() + } + + if(fields.name == ECMWF_monthly.name){ + my.startdates.period <- months.period(forecast.year,mes,day,p) # get which startdates belong to the chosen period + + days.period <- list() # get which days inside psleuFull belong to the chosen period + for(startdate in my.startdates.period){ + days.period[[p]] <- c(days.period[[p]], (1+(startdate-1)*n.years):(startdate*n.years)) + } + + # in winter you must remove the 2nd startdate (20140109) because its data is bad, as you can see with: plot(psl.kmeans[,1:2]) + # if(fields.name == forecast.name && period == 13) psl.kmeans[21:40,] <- NA + } + + + # compute the PCs or not: + if(PCA){ + my.seq <- seq(1, n.pos.lat*n.pos.lon, 9) # select only 1 point of 9 + pslcut <- psl.kmeans[,my.seq] + + my.PCA[[p]] <- princomp(pslcut,cor=FALSE) + tot.variance[[p]] <- head(cumsum(my.PCA[[p]]$sdev^2/sum(my.PCA[[p]]$sdev^2)),50) # check how many PCAs to keep basing on the sum of their expl. variance + n.pcs[p] <- head(as.numeric(which(tot.variance[[p]] > variance.explained)),1) # select only the pcs that explains at least 80% of variance + my.cluster[[p]] <- kmeans(my.PCA[[p]]$scores[,1:n.pcs[p]], centers=4, iter.max=100, nstart=30) # 4 is the number of clusters + rm(pslcut) + } else { # 4 is the number of clusters: + if(fields.name == rean.name) my.cluster[[p]] <- kmeans(psl.kmeans[which(!is.na(psl.kmeans[,1])),], centers=4, iter.max=100, nstart=30) + if(fields.name == ECMWF_S4.name) { + my.cluster[[p]] <- kmeans(psl.kmeans, centers=4, iter.max=100, nstart=30, trace=TRUE) + my.cluster.array[[p]] <- array(my.cluster[[p]]$cluster, c(n.leadtimes, n.years, n.members)) # in reverse order compared to the definition of psl.kmeans + + #plot(SMA(my.cluster[[p]]$centers[1,],201),type="l",col="gold",ylim=c(-20,20));lines(SMA(my.cluster[[p]]$centers[2,],201),type="l",col="red"); lines(SMA(my.cluster[[p]]$centers[3,],201),type="l",col="green");lines(SMA(my.cluster[[p]]$centers[4,],201),type="l",col="blue");lines(SMA(psl.kmeans[29,],201),type="l",col="gray");lines(rep(0,14410),type="l",col="black",lty=2) + } + } + + rm(psl.kmeans) + gc() +} + + +#save.image(file=paste0(workdir,"/weather_regimes_",fields.name,"_",year.start,"-",year.end,".RData")) +#load(file=paste0(workdir,"/weather_regimes_",fields.name,"_",year.start,"-",year.end,".RData")) + +# Load var data: +if(fields.name == rean.name){ # Load daily var data in the reanalysis case: + vareuFull <-array(NA,c(n.days.in.a.yearly.period(year.start,year.end), n.pos.lat, n.pos.lon)) + + for (y in year.start:year.end){ + var <- Load(var = var.name[var.num], exp = NULL, obs = list(var.data), paste0(y,'0101'), storefreq = 'daily', leadtimemax = n.days.in.a.year(y), output = 'lonlat', + latmin = lat.min, latmax = lat.max, lonmin = lon.min, lonmax = lon.max) + vareuFull[seq.days.in.a.future.year(year.start, y),,] <- var$obs + rm(var) + gc() + } + +if(fields.name == ECMWF_S4.name) { + if(start.month <= 9) {start.month.char <- paste0("0",as.character(start.month))} else {start.month.char <- start.month} + + my.years <- c(1982:1993) #year.start:year.end + vareuFull <- Load(var = var.name[var.num], exp = list(list(path=fields)), obs = NULL, sdates=paste0(my.years, start.month.char,'01'), dimnames=list(member=member.name), nmember=n.members, leadtimemax=216, storefreq='daily', output = 'lonlat', latmin = lat.min, latmax = lat.max, lonmin = lon.min, lonmax = lon.max)$mod #, grid=my.grid, method='bilinear') +} + +if(fields.name == ECMWF_monthly.name){ # Load 6-hourly var data in the monthly forecast case: + sdates <- weekly.seq(forecast.year,mes,day) + vareuFull <-array(NA,c(length(sdates)*(year.end-year.start+1), n.pos.lat, n.pos.lon)) + + for (startdate in 1:length(sdates)){ + var <- Load(var = var.name[var.num], exp = NULL, obs = list(var.data), paste0(year.start:year.end, substr(sdates[startdate],5,6), substr(sdates[startdate],7,8) ), storefreq = 'daily', leadtimemin=leadtime*4-3, leadtimemax=leadtime*4, output = 'lonlat', latmin = lat.min, latmax = lat.max, lonmin = lon.min, lonmax = lon.max) + temp <- apply(var$obs, c(1,3,5,6), mean, na.rm=T) + vareuFull[(1+(startdate-1)*(year.end-year.start+1)):(startdate*(year.end-year.start+1)),,] <- temp + rm(temp,var) + gc() + } + +} + +#my.grid<-paste0('r',n.lon,'x',n.lat) +#coord <- Load(var = 'sfcWind', exp = list(ECMWF_monthly), obs = NULL, sdates=paste0(2013,'0102'), leadtimemin = 1, leadtimemax=1, output = 'lonlat', nprocs=1, grid=my.grid, method='bilinear') + +save.image(file=paste0(workdir,"/weather_regimes_",fields.name,"_",var.name[var.num],"_",year.start,"-",year.end,".RData")) +load(file=paste0(workdir,"/weather_regimes_",fields.name,"_",var.name[var.num],"_",year.start,"-",year.end,".RData")) + +## # if you want to study WRs at monhly level but using their seasonal time series, you have to copy the WRs time series to the months from 1 to 12: +## if(WR.period <- 1:12){ +## my.cluster[[1]] <- my.cluster[[2]] <- my.cluster[[3]] <- my.cluster[[4]] <- my.cluster[[5]] <- my.cluster[[6]] <- list() +## my.cluster[[7]] <- my.cluster[[8]] <- my.cluster[[9]] <- my.cluster[[10]] <- my.cluster[[11]] <- my.cluster[[12]] <- list() + +## ss <- match(days.period[[13]],days.period[[1]]); ss[which(!is.na(ss))] <- 1; qq <- ss*my.cluster[[13]]$cluster; my.cluster[[1]]$cluster <- qq[!is.na(qq)] +## ss <- match(days.period[[13]],days.period[[2]]); ss[which(!is.na(ss))] <- 1; qq <- ss*my.cluster[[13]]$cluster; my.cluster[[2]]$cluster <- qq[!is.na(qq)] +## ss <- match(days.period[[13]],days.period[[12]]); ss[which(!is.na(ss))] <- 1; qq <- ss*my.cluster[[13]]$cluster; my.cluster[[12]]$cluster <- qq[!is.na(qq)] +## ss <- match(days.period[[14]],days.period[[3]]); ss[which(!is.na(ss))] <- 1; qq <- ss*my.cluster[[14]]$cluster; my.cluster[[3]]$cluster <- qq[!is.na(qq)] +## ss <- match(days.period[[14]],days.period[[4]]); ss[which(!is.na(ss))] <- 1; qq <- ss*my.cluster[[14]]$cluster; my.cluster[[4]]$cluster <- qq[!is.na(qq)] +## ss <- match(days.period[[14]],days.period[[5]]); ss[which(!is.na(ss))] <- 1; qq <- ss*my.cluster[[14]]$cluster; my.cluster[[5]]$cluster <- qq[!is.na(qq)] +## ss <- match(days.period[[15]],days.period[[6]]); ss[which(!is.na(ss))] <- 1; qq <- ss*my.cluster[[15]]$cluster; my.cluster[[6]]$cluster <- qq[!is.na(qq)] +## ss <- match(days.period[[15]],days.period[[7]]); ss[which(!is.na(ss))] <- 1; qq <- ss*my.cluster[[15]]$cluster; my.cluster[[7]]$cluster <- qq[!is.na(qq)] +## ss <- match(days.period[[15]],days.period[[8]]); ss[which(!is.na(ss))] <- 1; qq <- ss*my.cluster[[15]]$cluster; my.cluster[[8]]$cluster <- qq[!is.na(qq)] +## ss <- match(days.period[[16]],days.period[[9]]); ss[which(!is.na(ss))] <- 1; qq <- ss*my.cluster[[16]]$cluster; my.cluster[[9]]$cluster <- qq[!is.na(qq)] +## ss <- match(days.period[[16]],days.period[[10]]); ss[which(!is.na(ss))] <- 1; qq <- ss*my.cluster[[16]]$cluster; my.cluster[[10]]$cluster <- qq[!is.na(qq)] +## ss <- match(days.period[[16]],days.period[[11]]); ss[which(!is.na(ss))] <- 1; qq <- ss*my.cluster[[16]]$cluster; my.cluster[[11]]$cluster <- qq[!is.na(qq)] +## } + +for(p in WR.period){ # 1-12: Jan-Dec; 13-16: Winter-Spring-Summer-Autumn; 17: Year + + if(fields.name == rean.name){ + cluster.sequence <- my.cluster[[p]]$cluster + + if(sequences){ # it works only for rean data!!! + # for each of the 4 clusters, remove the days not belonging to sequences of at least 5 days: + # warning: first 4 days and last 4 days are not take into account y the algorithm and are treated as not belonging to any sequence (sequ=FALSE) + wrdiff <- diff(my.cluster[[p]]$cluster) + + sequ <- rep(FALSE, n.days.period[[p]]) + for(i in 5:(n.days.period[[p]]-5)){ + sequ[i] <- TRUE + if(wrdiff[i] != 0 & wrdiff[i+1] != 0) sequ[i] = FALSE + if(wrdiff[i] != 0 & wrdiff[i+2] != 0) sequ[i] = FALSE + if(wrdiff[i] != 0 & wrdiff[i+3] != 0) sequ[i] = FALSE + if(wrdiff[i] != 0 & wrdiff[i+4] != 0) sequ[i] = FALSE + if(wrdiff[i-1] != 0 & wrdiff[i] != 0) sequ[i] = FALSE + if(wrdiff[i-1] != 0 & wrdiff[i+1] != 0) sequ[i] = FALSE + if(wrdiff[i-1] != 0 & wrdiff[i+2] != 0) sequ[i] = FALSE + if(wrdiff[i-1] != 0 & wrdiff[i+3] != 0) sequ[i] = FALSE + if(wrdiff[i-2] != 0 & wrdiff[i] != 0) sequ[i] = FALSE + if(wrdiff[i-2] != 0 & wrdiff[i+1] != 0) sequ[i] = FALSE + if(wrdiff[i-2] != 0 & wrdiff[i+2] != 0) sequ[i] = FALSE + if(wrdiff[i-3] != 0 & wrdiff[i] != 0) sequ[i] = FALSE + if(wrdiff[i-3] != 0 & wrdiff[i+1] != 0) sequ[i] = FALSE + if(wrdiff[i-4] != 0 & wrdiff[i] != 0) sequ[i] = FALSE + } # close for loop on i + + # sequence of daily cluster numbers without the days that doesn't belong to sequences of 5 or more days: + cluster.sequence <- my.cluster[[p]]$cluster * sequ + rm(wrdiff, sequ) + } + + # Replace the days belonging to a regime with the days belonging to a sequence of at least 5 days of that regime: + wr1 <- which(cluster.sequence == 1) + wr2 <- which(cluster.sequence == 2) + wr3 <- which(cluster.sequence == 3) + wr4 <- which(cluster.sequence == 4) + + # yearly frequency of each regime: + wr1y <- wr2y <- wr3y <-wr4y <- c() + for(y in year.start:year.end){ + # for both reanalysis and S4, the time order is always: year1day1, year1day2, ..., year1day31, year2day1, year2day2, ..., year2day28, etc, + wr1y[y-year.start+1] <- length(which(cluster.sequence[(y-year.start)*period.length[[p]]+(1:period.length[[p]])] == 1)) + wr2y[y-year.start+1] <- length(which(cluster.sequence[(y-year.start)*period.length[[p]]+(1:period.length[[p]])] == 2)) + wr3y[y-year.start+1] <- length(which(cluster.sequence[(y-year.start)*period.length[[p]]+(1:period.length[[p]])] == 3)) + wr4y[y-year.start+1] <- length(which(cluster.sequence[(y-year.start)*period.length[[p]]+(1:period.length[[p]])] == 4)) + } + + # convert to frequencies in %: + wr1y <- wr1y/period.length[[p]] + wr2y <- wr2y/period.length[[p]] + wr3y <- wr3y/period.length[[p]] + wr4y <- wr4y/period.length[[p]] + + pslPeriod <- psleuFull[days.period[[p]],,] + pslPeriodClim <- apply(pslPeriod, c(2,3), mean, na.rm=T) + pslPeriodClim2 <- InsertDim(pslPeriodClim, 1, n.days.period[[p]]) + pslPeriodAnom <- pslPeriod - pslPeriodClim2 + + rm(pslPeriod, pslPeriodClim, pslPeriodClim2) + gc() + + pslwr1 <- pslPeriodAnom[wr1,,] + pslwr2 <- pslPeriodAnom[wr2,,] + pslwr3 <- pslPeriodAnom[wr3,,] + pslwr4 <- pslPeriodAnom[wr4,,] + + rm(pslPeriodAnom) + gc() + + pslwr1mean <- apply(pslwr1,c(2,3),mean,na.rm=T) + pslwr2mean <- apply(pslwr2,c(2,3),mean,na.rm=T) + pslwr3mean <- apply(pslwr3,c(2,3),mean,na.rm=T) + pslwr4mean <- apply(pslwr4,c(2,3),mean,na.rm=T) + rm(pslwr1,pslwr2,pslwr3,pslwr4) + + # in the reanalysis case, we also measure the impact maps: + + # similar selection of above, but for var instead of psl: + varPeriod <- vareuFull[days.period[[p]],,] # select only var data during the chosen period + + varPeriodClim <- apply(varPeriod, c(2,3), mean, na.rm=T) + varPeriodClim2 <- InsertDim(varPeriodClim, 1, n.days.period[[p]]) + varPeriodAnom <- varPeriod - varPeriodClim2 + rm(varPeriod, varPeriodClim, varPeriodClim2) + gc() + + #PlotEquiMap2(varPeriodClim[,EU], lon[EU], lat, filled.continents = FALSE, cols=my.cols.var, intxlon=10, intylat=10, cex.lab=1.5) # plot the climatology of the period + varPeriodAnom1 <- varPeriodAnom[wr1,,] + varPeriodAnom2 <- varPeriodAnom[wr2,,] + varPeriodAnom3 <- varPeriodAnom[wr3,,] + varPeriodAnom4 <- varPeriodAnom[wr4,,] + + varPeriodAnom1mean <- apply(varPeriodAnom1,c(2,3),mean,na.rm=T) + varPeriodAnom2mean <- apply(varPeriodAnom2,c(2,3),mean,na.rm=T) + varPeriodAnom3mean <- apply(varPeriodAnom3,c(2,3),mean,na.rm=T) + varPeriodAnom4mean <- apply(varPeriodAnom4,c(2,3),mean,na.rm=T) + + varPeriodAnomBoth1 <- abind(varPeriodAnom, varPeriodAnom1, along = 1) + pvalue1 <- apply(varPeriodAnomBoth1, c(2,3), function(x) t.test(x[1:n.days.period[[p]]],x[(n.days.period[[p]]+1):length(x)])$p.value) + rm(varPeriodAnomBoth1, varPeriodAnom1) + gc() + + varPeriodAnomBoth2 <- abind(varPeriodAnom, varPeriodAnom2, along = 1) + pvalue2 <- apply(varPeriodAnomBoth2, c(2,3), function(x) t.test(x[1:n.days.period[[p]]],x[(n.days.period[[p]]+1):length(x)])$p.value) + rm(varPeriodAnomBoth2, varPeriodAnom2) + gc() + + varPeriodAnomBoth3 <- abind(varPeriodAnom, varPeriodAnom3, along = 1) + pvalue3 <- apply(varPeriodAnomBoth3, c(2,3), function(x) t.test(x[1:n.days.period[[p]]],x[(n.days.period[[p]]+1):length(x)])$p.value) + rm(varPeriodAnomBoth3, varPeriodAnom3) + gc() + + varPeriodAnomBoth4 <- abind(varPeriodAnom, varPeriodAnom4, along = 1) + pvalue4 <- apply(varPeriodAnomBoth4, c(2,3), function(x) t.test(x[1:n.days.period[[p]]],x[(n.days.period[[p]]+1):length(x)])$p.value) + rm(varPeriodAnomBoth4, varPeriodAnom4) + + rm(varPeriodAnom) + gc() + + # save all the data necessary to redraw the graphs when we know the right regime: + save(workdir,rean.name,fields.name,var.num,var.name,var.name.full,var.unit,year.start,year.end,p,WR.period,lon,lat,pslwr1mean,pslwr2mean,pslwr3mean,pslwr4mean, varPeriodAnom1mean, varPeriodAnom2mean, varPeriodAnom3mean,varPeriodAnom4mean,wr1y,wr2y,wr3y,wr4y,pvalue1,pvalue2,pvalue3,pvalue4,cluster.sequence,file=paste0(workdir,"/",fields.name,"_",var.name[var.num],"_",my.period[p],"_mapdata.RData")) + + rm(pvalue1,pvalue2,pvalue3,pvalue4) + rm(pslwr1mean,pslwr2mean,pslwr3mean,pslwr4mean) + rm(varPeriodAnom1mean,varPeriodAnom2mean,varPeriodAnom3mean,varPeriodAnom4mean) + gc() + + } + + if(fields.name == ECMWF_S4.name){ + cluster.sequence <- my.cluster[[p]]$cluster #as.vector(my.cluster.array[[p]]) + + wr1 <- which(cluster.sequence == 1) + wr2 <- which(cluster.sequence == 2) + wr3 <- which(cluster.sequence == 3) + wr4 <- which(cluster.sequence == 4) + + wr1y <- apply(my.cluster.array[[p]] == 1, 2, sum) + wr2y <- apply(my.cluster.array[[p]] == 2, 2, sum) + wr3y <- apply(my.cluster.array[[p]] == 3, 2, sum) + wr4y <- apply(my.cluster.array[[p]] == 4, 2, sum) + + # convert to frequencies in %: + wr1y <- wr1y/(n.leadtimes*n.members) + wr2y <- wr2y/(n.leadtimes*n.members) + wr3y <- wr3y/(n.leadtimes*n.members) + wr4y <- wr4y/(n.leadtimes*n.members) + + # in this case, must use psl.melted instead of psleuFull. Both are already in anomalies: + pslmat <- unname(acast(psl.melted, Member + StartYear + LeadDay ~ Lat ~ Lon)) + #pslmat <- unname(acast(melt(pslPeriod[1,1:2,,,,, drop=FALSE],varnames=c("Exp","Member","StartYear","LeadDay","Lat","Lon")), Member ~ StartYear + LeadDay ~ Lat ~ Lon)) + + pslwr1 <- pslmat[wr1,,, drop=F] + pslwr2 <- pslmat[wr2,,, drop=F] + pslwr3 <- pslmat[wr3,,, drop=F] + pslwr4 <- pslmat[wr4,,, drop=F] + + pslwr1mean <- apply(pslwr1, c(2,3), mean, na.rm=T) + pslwr2mean <- apply(pslwr2, c(2,3), mean, na.rm=T) + pslwr3mean <- apply(pslwr3, c(2,3), mean, na.rm=T) + pslwr4mean <- apply(pslwr4, c(2,3), mean, na.rm=T) + + rm(pslwr1,pslwr2,pslwr3,pslwr4) + rm(pslmat) + gc() + + # save all the data necessary to draw the graphs (but not the impact maps) + save(workdir,rean.name,fields.name,var.num,var.name,var.name.full,var.unit,year.start,year.end,p,WR.period,lon,lat,pslwr1mean,pslwr2mean,pslwr3mean,pslwr4mean,wr1y,wr2y,wr3y,wr4y,n.leadtimes,cluster.sequence,file=paste0(workdir,"/",fields.name,"_",var.name[var.num],"_",my.period[p],"_mapdata.RData")) + rm(pslwr1mean,pslwr2mean,pslwr3mean,pslwr4mean) + gc() + + #pslwr1meanPartial <- apply(pslwr1, c(1,3,4), mean, na.rm=T) + #pslwr2meanPartial <- apply(pslwr2, c(1,3,4), mean, na.rm=T) + #pslwr3meanPartial <- apply(pslwr3, c(1,3,4), mean, na.rm=T) + #pslwr4meanPartial <- apply(pslwr4, c(1,3,4), mean, na.rm=T) + + #PlotEquiMap(rescale(pslwr1meanPartial[1,,],my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2) + #PlotEquiMap(rescale(pslwr1meanPartial[2,,],my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2) + + #n=0 + #for(sy in 1:32) { + # for(ld in 1:28){ + # if(my.cluster.array[[p]][ld,sy] == 1){ + # n=n+1 + # if(n == 1) {temp <- pslPeriod[1,2,sy,ld,,]} + # temp <- temp + pslPeriod[1,2,sy,ld,,] + # } + # } + #} + #PlotEquiMap(rescale(temp/n,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, drawleg=F) + + #pslwr1meanPartial <- apply(pslmat[,wr1,,,drop=F], c(1,3,4), mean, na.rm=T) + #pslwr2meanPartial <- apply(pslmat[,wr2,,,drop=F], c(1,3,4), mean, na.rm=T) + #pslwr3meanPartial <- apply(pslmat[,wr3,,,drop=F], c(1,3,4), mean, na.rm=T) + #pslwr4meanPartial <- apply(pslmat[,wr4,,,drop=F], c(1,3,4), mean, na.rm=T) + + } + + # for the monthly system, the time order is always: year1day1, year2day1, ... , yearNday1, year1day2, year2day2, ..., yearNday2, etc.: + if(fields.name == ECMWF_monthly.name) { + if(fields.name == ECMWF_monthly.name){ + my.startdates.period <- months.period(forecast.year,mes,day,p) + + #if(p == 13) my.startdates.period <- my.startdates.period[-2] # remove the second startdate because it is broken + + days.period <- period.length <- list() + for(startdate in my.startdates.period){ + days.period[[p]] <- c(days.period[[p]], (1+(startdate-1)*(year.end-year.start+1)):(startdate*(year.end-year.start+1))) + } + period.length[[p]] <- length(my.startdates.period) # number of Thusdays in the period + } + + wr1y <- wr2y <- wr3y <-wr4y <- c() + wr1y[y-year.start+1] <- length(which(cluster.sequence[seq((y-year.start+1),length(cluster.sequence), n.years)] == 1)) + wr2y[y-year.start+1] <- length(which(cluster.sequence[seq((y-year.start+1),length(cluster.sequence), n.years)] == 2)) + wr3y[y-year.start+1] <- length(which(cluster.sequence[seq((y-year.start+1),length(cluster.sequence), n.years)] == 3)) + wr4y[y-year.start+1] <- length(which(cluster.sequence[seq((y-year.start+1),length(cluster.sequence), n.years)] == 4)) + } + +} # close the for loop on 'p' + + +# run it twice: once, with ordering <- FALSE to look only at the cartography and find visually the right regime names of the four clusters; +# and the second time, to save the ordered cartography: +ordering <- T # If TRUE, order the clusters following the regimes listed in the 'orden' vector (after you manually insert the names). +as.pdf <- F # FALSE: save results as one .png file for each season, TRUE: save only 1 .pdf file with all seasons +save.names <- T # if TRUE, also save the cluster names (i.e: the association between clusters and weather regimes); if FALSE, the cluster names are loaded from the file + +# position of long values of Europe only (without the Atlantic Sea and America): +#if(fields.name == "ERA-Interim") EU <- c(1:which(lon >= lon.max)[1], (length(lon)-30):length(lon)) # restrict area to continental europe only +EU <- c(1:which(lon >= lon.max)[1], (which(lon >= 337)[1]:length(lon))) # restrict area to continental europe only + +# choose an order to give to the cartography, from top to bottom: +orden <- c("NAO+","NAO-","Blocking","Atl.Ridge") + +regime1.name <- orden[1] +regime2.name <- orden[2] +regime3.name <- orden[3] +regime4.name <- orden[4] + +if(save.names){cluster1.name.period <- cluster2.name.period <- cluster3.name.period <- cluster4.name.period <- c()} + +# breaks and colors of the geopotential fields: +if(psl == "g500"){ + #my.brks <- c(-300,-199:200,300) # % Mean anomaly of a WR + my.brks <- c(-300,seq(-200,200,10),300) # % Mean anomaly of a WR + my.brks2 <- c(-300,seq(-200,200,10),300) # % Mean anomaly of a WR for the contour lines + #my.cols <- colorRampPalette((brewer.pal(9,"PuBu")))(length(my.brks)-1) + values.to.plot <- c(-200, -100, 0, 100, 200) # values we want to show in the labels +} + +if(psl == "psl"){ + my.brks <- c(seq(-19,-1,2),0,seq(1,19,2)) # % Mean anomaly of a WR + # my.brks <- c(seq(-19,-1,2),0,seq(1,19,2)) # % Mean anomaly of a WR + + my.brks2 <- my.brks #c(seq(-20,20,2)) # % Mean anomaly of a WR for the contour lines + values.to.plot <- my.brks #c(-17,-15,-13,-11,-9,-7,-5,-3,-1,0,1,3,5,7,9,11,13,15,17) +} + +my.cols <- colorRampPalette(rev(brewer.pal(11,"RdBu")))(length(my.brks)-1) # blue--white--red colors +my.cols[floor(length(my.cols)/2)] <- my.cols[floor(length(my.cols)/2)+1] <- "white" + +# breaks and colors of the impact maps (valid both for Wind speed anomalies and Temperature anomalies): +#my.brks.var <- c(-20,seq(-10,-3,1),seq(-2.9,2.9,0.1),seq(3,10,1),20) # % Mean anomaly of a WR +#my.brks.var <- c(-20,-2,-1.5,-1,-0.5,-0.2,0.2,0.5,1,1.5,2,20) # % Mean anomaly of a WR +#my.brks.var <- c(-20,seq(-3,3,0.5),20) # % Mean anomaly of a WR +if(var.name[var.num] == "sfcWind") { + my.brks.var <- seq(-3,3,0.5) + values.to.plot2 <- c(-3,-2,-1,0,1,2,3) +} +if(var.name[var.num] == "tas") { + my.brks.var <- seq(-5,5,1) + values.to.plot2 <- my.brks.var #c(-5,-3,-1,0,1,3,5) +} + +#my.cols <- colorRampPalette(as.character(read.csv("/home/Earth/vtorralb/rgbhex.csv",header=F)[,1]))(length(my.brks)-1) # blue--yellow-red colors +my.cols.var <- colorRampPalette(rev(brewer.pal(11,"RdBu")))(length(my.brks.var)-1) # blue--white--red colors +#my.cols.var[floor(length(my.cols.var)/2)] <- my.cols.var[floor(length(my.cols.var)/2)+1] <- "white" + +if(as.pdf)(pdf(file=paste0(workdir,"/",fields.name,"_",var.name[var.num],"_",my.period[p],".pdf"),width=40, height=60)) + +for(p in WR.period){ + load(file=paste0(workdir,"/",fields.name,"_",var.name[var.num],"_",my.period[p],"_mapdata.RData")) + + if(save.names){ + if(p == 1){ # January + cluster2.name="NAO+" + cluster4.name="NAO-" + cluster1.name="Blocking" + cluster3.name="Atl.Ridge" + } + if(p == 2){ # February + cluster3.name="NAO+" + cluster2.name="NAO-" + cluster4.name="Blocking" + cluster1.name="Atl.Ridge" + } + if(p == 3){ # March + cluster3.name="NAO+" + cluster2.name="NAO-" + cluster4.name="Blocking" + cluster1.name="Atl.Ridge" + } + if(p == 4){ # April + cluster2.name="NAO+" + cluster1.name="NAO-" + cluster3.name="Blocking" + cluster4.name="Atl.Ridge" + } + if(p == 5){ # May + cluster4.name="NAO+" + cluster2.name="NAO-" + cluster3.name="Blocking" + cluster1.name="Atl.Ridge" + } + if(p == 6){ # June + cluster4.name="NAO+" + cluster1.name="NAO-" + cluster2.name="Blocking" + cluster3.name="Atl.Ridge" + } + if(p == 7){ # July + cluster4.name="NAO+" + cluster2.name="NAO-" + cluster1.name="Blocking" + cluster3.name="Atl.Ridge" + } + if(p == 8){ # August + cluster2.name="NAO+" + cluster4.name="NAO-" + cluster3.name="Blocking" + cluster1.name="Atl.Ridge" + } + if(p == 9){ # September + cluster4.name="NAO+" + cluster3.name="NAO-" + cluster1.name="Blocking" + cluster2.name="Atl.Ridge" + } + if(p == 10){ # October + cluster1.name="NAO+" + cluster4.name="NAO-" + cluster2.name="Blocking" + cluster3.name="Atl.Ridge" + } + if(p == 11){ # November + cluster2.name="NAO+" + cluster3.name="NAO-" + cluster1.name="Blocking" + cluster4.name="Atl.Ridge" + } + if(p == 12){ # December + cluster2.name="NAO+" + cluster4.name="NAO-" + cluster1.name="Blocking" + cluster3.name="Atl.Ridge" + } + if(p == 13){ # Winter + cluster3.name="NAO+" + cluster4.name="NAO-" + cluster1.name="Blocking" + cluster2.name="Atl.Ridge" + } + if(p == 14){ # Spring + cluster1.name="NAO+" + cluster3.name="NAO-" + cluster2.name="Blocking" + cluster4.name="Atl.Ridge" + } + if(p == 15){ # Summer + cluster2.name="NAO+" + cluster3.name="NAO-" + cluster4.name="Blocking" + cluster1.name="Atl.Ridge" + } + if(p == 16){ # Autumn + cluster4.name="NAO+" + cluster1.name="NAO-" + cluster2.name="Blocking" + cluster3.name="Atl.Ridge" + } + + cluster1.name.period[p] <- cluster1.name + cluster2.name.period[p] <- cluster2.name + cluster3.name.period[p] <- cluster3.name + cluster4.name.period[p] <- cluster4.name + + save(cluster1.name.period, cluster2.name.period, cluster3.name.period, cluster4.name.period, file=paste0(workdir,"/",fields.name,"_ClusterNames.RData")) + + } else { # in this case, we load the cluster names from the file already saved: + load(paste0(workdir,"/",fields.name,"_ClusterNames.RData")) + cluster1.name <- cluster1.name.period[p] + cluster2.name <- cluster2.name.period[p] + cluster3.name <- cluster3.name.period[p] + cluster4.name <- cluster4.name.period[p] + } + + # assign to each cluster its regime: + if(ordering == FALSE){ + cluster1 <- 1; cluster2 <- 2; cluster3 <- 3; cluster4 <- 4 + } else { + cluster1 <- which(orden == cluster1.name) + cluster2 <- which(orden == cluster2.name) + cluster3 <- which(orden == cluster3.name) + cluster4 <- which(orden == cluster4.name) + } + + assign(paste0("map",cluster1), pslwr1mean) + assign(paste0("map",cluster2), pslwr2mean) + assign(paste0("map",cluster3), pslwr3mean) + assign(paste0("map",cluster4), pslwr4mean) + + assign(paste0("fre",cluster1), wr1y) + assign(paste0("fre",cluster2), wr2y) + assign(paste0("fre",cluster3), wr3y) + assign(paste0("fre",cluster4), wr4y) + + if(fields.name == rean.name){ + assign(paste0("imp",cluster1), varPeriodAnom1mean) + assign(paste0("imp",cluster2), varPeriodAnom2mean) + assign(paste0("imp",cluster3), varPeriodAnom3mean) + assign(paste0("imp",cluster4), varPeriodAnom4mean) + + assign(paste0("sig",cluster1), pvalue1) + assign(paste0("sig",cluster2), pvalue2) + assign(paste0("sig",cluster3), pvalue3) + assign(paste0("sig",cluster4), pvalue4) + } + + + # Visualize the composition with the 3 graphs for all the 4 regimes: its pressure fields, its impact on var and its frequency series: + if(!as.pdf) png(filename=paste0(workdir,"/",fields.name,"_",var.name[var.num],"_",my.period[p],".png"),width=3000,height=3700) + + plot.new() + + # Sheet title: + par(fig=c(0, 1, 0.94, 0.98), new=TRUE) + mtext(paste0("Weather Regimes for ",my.period[p]," season (",year.start,"-",year.end,"). Source: ",fields.name), cex=6, font=2) + + # Centroid maps: + map.xpos <- 0 + map.width <- 0.46 + par(fig=c(map.xpos + 0.003, map.xpos + map.width - 0.003, 0.745, 0.925), new=TRUE) + PlotEquiMap2(rescale(map1,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map1), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, xlabel.dist=1.5, contours.lty="F1FF1F") + par(fig=c(map.xpos, map.xpos + map.width, 0.51, 0.69), new=TRUE) + PlotEquiMap2(rescale(map2,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map2), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F") + par(fig=c(map.xpos, map.xpos + map.width, 0.275, 0.455), new=TRUE) + PlotEquiMap2(rescale(map3,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map3), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F") + par(fig=c(map.xpos, map.xpos + map.width, 0.04, 0.22), new=TRUE) + PlotEquiMap2(rescale(map4,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map4), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F") + + # Title Centroid Maps: + map.title.xpos <- 0.76 + map.title.width <- 0.2 + par(fig=c(map.title.xpos, map.title.xpos + map.title.width, 0.728, 0.729), new=TRUE) + mtext("year", cex=3) + par(fig=c(map.title.xpos, map.title.xpos + map.title.width, 0.494, 0.495), new=TRUE) + mtext("year", cex=3) + par(fig=c(map.title.xpos, map.title.xpos + map.title.width, 0.260, 0.261), new=TRUE) + mtext("year", cex=3) + par(fig=c(map.title.xpos, map.title.xpos + map.title.width, 0.026, 0.027), new=TRUE) + mtext("year", cex=3) + + # Impact maps: + if(fields.name == rean.name){ + impact.xpos <- 0.47 + impact.width <- 0.26 + par(fig=c(impact.xpos, impact.xpos + impact.width, 0.745, 0.925), new=TRUE) + PlotEquiMap2(rescale(imp1[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=t(sig1[,EU] < 0.05), cex.lab=1.5) + par(fig=c(impact.xpos, impact.xpos + impact.width, 0.51, 0.69), new=TRUE) + PlotEquiMap2(rescale(imp2[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=t(sig2[,EU] < 0.05), cex.lab=1.5) + par(fig=c(impact.xpos, impact.xpos + impact.width, 0.275, 0.455), new=TRUE) + PlotEquiMap2(rescale(imp3[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=t(sig3[,EU] < 0.05), cex.lab=1.5) + par(fig=c(impact.xpos, impact.xpos + impact.width, 0.04, 0.22), new=TRUE) + PlotEquiMap2(rescale(imp4[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=t(sig4[,EU] < 0.05), cex.lab=1.5) + } + + # Frequency plots: + barplot.xpos <- 0.75 + barplot.width <- 0.25 + freq.max=60 + + par(fig=c(barplot.xpos, barplot.xpos + barplot.width, 0.745, 0.915), new=TRUE) + barplot.freq(100*fre1, year.start, year.end, cex.y=2, cex.x=2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0)) + par(fig=c(barplot.xpos, barplot.xpos + barplot.width,0.510,0.680), new=TRUE) + barplot.freq(100*fre2, year.start, year.end, cex.y=2, cex.x=2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0)) + par(fig=c(barplot.xpos, barplot.xpos + barplot.width,0.275,0.445), new=TRUE) + barplot.freq(100*fre3, year.start, year.end, cex.y=2, cex.x=2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0)) + par(fig=c(barplot.xpos, barplot.xpos + barplot.width,0.040,0.210), new=TRUE) + barplot.freq(100*fre4, year.start, year.end, cex.y=2, cex.x=2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0)) + + # % on Frequency plots: + symbol.xpos <- 0.735 + symbol.width <- 0.01 + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.83, 0.84), new=TRUE) + mtext("%", cex=3.3) + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.60, 0.61), new=TRUE) + mtext("%", cex=3.3) + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.36, 0.37), new=TRUE) + mtext("%", cex=3.3) + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.13, 0.14), new=TRUE) + mtext("%", cex=3.3) + + # Title 1: + title1.xpos <- 0.02 + title1.width <- 0.4 + par(fig=c(title1.xpos, title1.xpos + title1.width, 0.930, 0.935), new=TRUE) + mtext(paste0(regime1.name,": ", psl.name, " Anomaly"), font=2, cex=4) + par(fig=c(title1.xpos, title1.xpos + title1.width, 0.695, 0.700), new=TRUE) + mtext(paste0(regime2.name,": ", psl.name, " Anomaly"), font=2, cex=4) + par(fig=c(title1.xpos, title1.xpos + title1.width, 0.460, 0.465), new=TRUE) + mtext(paste0(regime3.name,": ", psl.name, " Anomaly"), font=2, cex=4) + par(fig=c(title1.xpos, title1.xpos + title1.width, 0.225, 0.230), new=TRUE) + mtext(paste0(regime4.name,": ", psl.name, " Anomaly"), font=2, cex=4) + + # Title 2: + if(fields.name == rean.name){ + title2.xpos <- 0.42 + title2.width <- 0.35 + par(fig=c(title2.xpos, title2.xpos + title2.width, 0.930, 0.935), new=TRUE) + mtext(paste0(regime1.name, ": Impact on ", var.name.full[var.num]), font=2, cex=4) + par(fig=c(title2.xpos, title2.xpos + title2.width, 0.695, 0.700), new=TRUE) + mtext(paste0(regime2.name, ": Impact on ", var.name.full[var.num]), font=2, cex=4) + par(fig=c(title2.xpos, title2.xpos + title2.width, 0.460, 0.465), new=TRUE) + mtext(paste0(regime3.name, ": Impact on ", var.name.full[var.num]), font=2, cex=4) + par(fig=c(title2.xpos, title2.xpos + title2.width, 0.225, 0.230), new=TRUE) + mtext(paste0(regime4.name, ": Impact on ", var.name.full[var.num]), font=2, cex=4) + } + + # Title 3: + title3.xpos <- 0.75 + title3.width <-0.25 + par(fig=c(title3.xpos, title3.xpos + title3.width, 0.930, 0.935), new=TRUE) + mtext(paste0(regime1.name, ": Frequency (", round(100*mean(fre1),1), "%)"), font=2, cex=4) + par(fig=c(title3.xpos, title3.xpos + title3.width, 0.695, 0.700), new=TRUE) + mtext(paste0(regime2.name, ": Frequency (", round(100*mean(fre2),1), "%)"), font=2, cex=4) + par(fig=c(title3.xpos, title3.xpos + title3.width, 0.460, 0.465), new=TRUE) + mtext(paste0(regime3.name, ": Frequency (", round(100*mean(fre3),1), "%)"), font=2, cex=4) + par(fig=c(title3.xpos, title3.xpos + title3.width, 0.225, 0.230), new=TRUE) + mtext(paste0(regime4.name, ": Frequency (", round(100*mean(fre4),1), "%)"), font=2, cex=4) + + # Legend 1: + legend1.xpos <- 0.01 + legend1.width <- 0.44 + legend1.cex <- 1.8 + my.subset <- match(values.to.plot, my.brks) + par(fig=c(legend1.xpos, legend1.xpos + legend1.width, 0.719, 0.744), new=TRUE) # syntax fig=c(xmin, xmax, ymin, ymax) + ColorBar3(my.brks, cols=my.cols, vert=FALSE, cex=legend1.cex, subset=my.subset) + if(psl=="g500") {mtext(side=4," m", cex=2.5)} else {mtext(side=4," hPa", cex=legend1.cex)} + par(fig=c(legend1.xpos, legend1.xpos + legend1.width, 0.485, 0.508), new=TRUE) + ColorBar3(my.brks, cols=my.cols, vert=FALSE, cex=legend1.cex, subset=my.subset) + if(psl=="g500") {mtext(side=4," m", cex=2.5)} else {mtext(side=4," hPa", cex=legend1.cex)} + par(fig=c(legend1.xpos, legend1.xpos + legend1.width, 0.250, 0.273), new=TRUE) + ColorBar3(my.brks, cols=my.cols, vert=FALSE, cex=legend1.cex, subset=my.subset) + if(psl=="g500") {mtext(side=4," m", cex=2.5)} else {mtext(side=4," hPa", cex=legend1.cex)} + par(fig=c(legend1.xpos, legend1.xpos + legend1.width, 0.015, 0.038), new=TRUE) + ColorBar3(my.brks, cols=my.cols, vert=FALSE, cex=legend1.cex, subset=my.subset) + if(psl=="g500") {mtext(side=4," m", cex=2.5)} else {mtext(side=4," hPa", cex=legend1.cex)} + + # Legend 2: + if(fields.name == rean.name){ + legend2.xpos <- 0.48 + legend2.width <- 0.25 + legend2.cex <- 1.8 + my.subset2 <- match(values.to.plot2, my.brks.var) + par(fig=c(legend2.xpos, legend2.xpos + legend2.width, 0.719 + 0.001, 0.744), new=TRUE) + ColorBar3(my.brks.var, cols=my.cols.var, vert=FALSE, cex=legend2.cex, subset=my.subset2) + mtext(side=4,paste0(" ",var.unit[var.num]), cex=legend2.cex) + par(fig=c(legend2.xpos, legend2.xpos + legend2.width, 0.485, 0.508), new=TRUE) + ColorBar3(my.brks.var, cols=my.cols.var, vert=FALSE, cex=legend2.cex, subset=my.subset2) + mtext(side=4,paste0(" ",var.unit[var.num]), cex=legend2.cex) + par(fig=c(legend2.xpos, legend2.xpos + legend2.width, 0.250, 0.273), new=TRUE) + ColorBar3(my.brks.var, cols=my.cols.var, vert=FALSE, cex=legend2.cex, subset=my.subset2) + mtext(side=4,paste0(" ",var.unit[var.num]), cex=legend2.cex) + par(fig=c(legend2.xpos, legend2.xpos + legend2.width, 0.015, 0.038), new=TRUE) + ColorBar3(my.brks.var, cols=my.cols.var, vert=FALSE, cex=legend2.cex, subset=my.subset2) + mtext(side=4,paste0(" ",var.unit[var.num]), cex=legend2.cex) + } + + if(!as.pdf) dev.off() # for saving 4 png + +} # close for loop on 'p' + +if(as.pdf) dev.off() # for saving a single pdf with all seasons + diff --git a/old/weather_regimes_v29.R~ b/old/weather_regimes_v29.R~ new file mode 100644 index 0000000000000000000000000000000000000000..37d3f398737091559645b39aa79a51ea60e37053 --- /dev/null +++ b/old/weather_regimes_v29.R~ @@ -0,0 +1,1068 @@ +library(s2dverification) # for the function Load() +library(abind) +library(Kendall) +library(reshape2) +library(TTR) +source('/home/Earth/ncortesi/Downloads/scripts/Rfunctions.R') # for the calendar functions +workdir <- "/scratch/Earth/ncortesi/RESILIENCE/Regimes" # working dir with the input files with the WT classifications for each MSLP grid point + +# available reanalysis for the geopotential (g500) or the geopotential height (z500 = g500/9.81) and for var data: +ERAint <- '/esnas/reconstructions/ecmwf/eraint/$STORE_FREQ$_mean/$VAR_NAME$_f6h/$VAR_NAME$_$YEAR$$MONTH$.nc' +ERAint.name <- "ERA-Interim" + +JRA55 <- '/esnas/reconstructions/jma/jra-55/$STORE_FREQ$_mean/$VAR_NAME$_f6h/$VAR_NAME$_$YEAR$$MONTH$.nc' +JRA55.name <- "JRA-55" + +rean <- ERAint #JRA55 #ERAint # choose one of the two above reanalysis from where to load the input psl data + +# available forecast systems for the psl and var data: +#ECMWF_S4 <- list(path = paste0('/esnas/exp/ECMWF/seasonal/0001/s004/m001/$STORE_FREQ$_mean/$VAR_NAME$_f6h/$VAR_NAME$_$START_DATE$.nc')) +#ECMWF_S4 <- '/esnas/exp/ECMWF/seasonal/0001/s004/m001/$STORE_FREQ$_mean/psl_f6h/psl_$START_DATE$.nc' +ECMWF_S4 <- '/esnas/exp/ecmwf/system4_m1/$STORE_FREQ$_mean/$VAR_NAME$_f6h/$VAR_NAME$_$START_DATE$.nc' +ECMWF_S4.name <- "ECMWF-S4" + +ECMWF_monthly <- '/esnas/exp/ECMWF/monthly/ensforhc/6hourly/$VAR_NAME$/2014$MONTH$$DAY$00/$VAR_NAME$_$START_DATE$00.nc' +ECMWF_monthly.name <- "ECMWF-Monthly" + +forecast <- ECMWF_S4 + +fields <- forecast #rean #forecast # put 'rean' to load pressure fields and var from reanalisis, put 'forecast' to load them from forecast systems + +psl <- "psl" #"g500" # pressure variable to use from the chosen reanalysis +psl.name <- "MSLP" # "Z500" # the name to show in the maps + +year.start <- 1982 #1981 #1994 #1979 #1981 +year.end <- 2013 #2013 #2010 + +member.name <- "ensemble" # name of the dimension with the ensemble members inside the netCDF files of S4 + +res <- 0.75 # set the resolution you want to interpolate the psl and var data when loading it (i.e: set to 0.75 to use the same res of ERA-Interim) + # interpolation method is BILINEAR. +PCA <- FALSE # set it to TRUE if you want to apply the PCA before the k-means cluster analysis, FALSE otherwise +variance.explained <- 0.8 # the user can set here the minimum % of variance explained by the PCs (typically 0.8 which is equal to 80%) + +lat.weighting=FALSE # set it to true to weight psl data on latitude before applying the cluster analysis +sequences=FALSE # set it to TRUE if you want to select only days beloning to sequences of at least 5 consecutive days belonging to the same regime. + +# Coordinates of the box enclosing the study region (the Northern Atlantic in this case): +lat.max <- 81 #81 #80 #70 +lat.min <- 27 #30 #20 #30 +lon.max <- 45 #36 #30 #40 +lon.min <- 274.5 #274.5 #270 #280 # put a positive number here because the geopotential has only positive values of longitud! + +var.num <- 1 # Choose a variable. 1: sfcWind 2: tas + +var.name <- c("sfcWind","tas") # name of the 'predictand' variable of the chosen reanalysis +var.name.full <- c("Wind Speed","Temperature") # full name of the 'predictand' variable to put in the title of the graphs +var.unit <- c("m/s", "ºC") # unit of measure of var (for drawing color scales) + +# only for Seasonal forecasts: +start.month <- 1 # start month (1=January, 12=December) +lead.month <- 0 # select only the data of one lead month: +n.members <- 15 # number of members of the forecast system to use + +# Only for Monthly forecasts: +#leadtime <- 1:7 #5:11 #1 # if fields=forecast, set the forecast time (in days) you want to compute the weather regimes. +#startdate.shift <- 1 # if leadtime=5:11, it's better to shift all startdates of the season 1 week in the past, to fit the exact season of obs.data + # if leadtime=12:18, it's better to shift all startdates of the season 2 weeks in the past, and so on. +#forecast.year <- year.end+1 # if fields=forecast, set the forecast year (it is usually the year after year.end) +#mes=1 # if fields=forecast, set the month of the first startdate of the forecast year +#day=2 # if fields=forecast, set the day of the first startdate of the forecast year + +############################################################################################################################# +#ECMWF_monthly <- list(path = paste0('/esnas/exp/ECMWF/monthly/ensforhc/6hourly/psl/2014010200/1994_010200.nc')) +#ECMWF_monthly <- list(path = paste0('/esnas/exp/ECMWF/monthly/ensforhc/6hourly/$VAR_NAME$/2014$MONTH$$DAY$00/$VAR_NAME$_$START_DATE$00.nc')) +rean.name <- unname(ifelse(rean == ERAint, ERAint.name, JRA55.name)) +forecast.name <- unname(ifelse(forecast == ECMWF_S4, ECMWF_S4.name, ECMWF_monthly.name)) +fields.name <- unname(ifelse(fields == rean, rean.name, forecast.name)) +n.years <- year.end - year.start + 1 + +var.data <- fields # dataset from which to load the input var data (reanalysis or forecasts) +var.data.name <- fields.name #ECMWF_monthly.name # ERAint.name + +if(lat.min >= lat.max) stop("lat.min cannot be greater than lat.max") + +days.period <- n.days.period <- period.length <- list() +for (pp in 1:17){ + days.period[[pp]] <- NA + for(y in year.start:year.end) days.period[[pp]] <- c(days.period[[pp]], n.days.in.a.future.year(year.start, y) + pos.period(y,pp)) + days.period[[pp]] <- days.period[[pp]][-1] # remove the NA at the begin that was introduced only to be able to execute the above command + # number of days belonging to that period from year.start to year.end: + n.days.period[[pp]] <- length(days.period[[pp]]) + # Number of days belonging to that period in a single year: + period.length[[pp]] <- n.days.in.a.period(pp,1999) #+ ifelse(period==13 | period==2, 1, 0) # using year 1999 introduce a small error in winter season length because of bisestile years +} + +# Create a regular lat/lon grid to interpolate the data: +n.lon.grid <- 360/res +n.lat.grid <- (180/res)+1 +my.grid <- paste0('r',n.lon.grid,'x',n.lat.grid) +#domain<-list() +#domain$lon <- seq(0,359.999999999,res) +#domain$lat <- c(rev(seq(0,90,res)),seq(res[1],-90,-res)) # Notice that the regular grid is always centered at lat=0 and lon=0! + +# load only 1 day of pressure data to detect the minimum and maximum lat and lon values which identify only the North Atlantic area: +if(fields.name == rean.name) domain <- Load(var = psl, exp = NULL, obs = list(list(path=fields)), '19990101', storefreq = 'daily', leadtimemax = 1, output = 'lonlat', nprocs=1) +# in the case of S4, we also interpolate it (notice that if the S4 grid has the same grid of the chosen grid (typically ERA-Interim), Load() leaves the S4 grid unchanged): +if(fields.name == ECMWF_S4.name) domain <- Load(var = "psl", exp = list(list(path=fields)), sdates=paste0(year.start,'0101'), storefreq = 'daily', dimnames=list(member=member.name), leadtimemax=1, nmember=1, output = 'lonlat') #, grid=my.grid, method='bilinear', nprocs=1) + +#if(fields.name == ECMWF_monthly.name) domain <- Load(var = psl, exp = NULL, obs = list(list(path=fields)), paste0(year.start,'0102'), storefreq = 'daily', leadtimemax = 1, output = 'lonlat', nprocs=1) + +n.lon <- length(domain$lon) +n.lat <- length(domain$lat) + +pos.lat.max <- which(lat.max - round(domain$lat,4) >= 0)[1] # select the first latitude of the domain below the maximum latitude +pos.lat.min <- tail(which(round(domain$lat,4) - lat.min >= 0),1) # select the last latitude of the domain over the minumum latitude +pos.lon.max <- tail(which(lon.max - round(domain$lon,4) >= 0),1) +pos.lon.min <- which(round(domain$lon,4) - lon.min >= 0)[1] + +pos.lat <- if(pos.lat.min < pos.lat.max) {pos.lat.min:pos.lat.max} else {pos.lat.max:pos.lat.min} +pos.lon <- c(1:pos.lon.max,pos.lon.min:length(domain$lon)) # beware that it only consider the case where the study area cross the meridian of Greenwhich! +n.pos.lat <- length(pos.lat) +n.pos.lon <- length(pos.lon) + +lat <- domain$lat[pos.lat] # lat values of chosen area only (it is smaller than the whole spatial domain loaded by the data) +lon <- domain$lon[pos.lon] # lon values of chosen area only + +#lat.min.area <- domain$lat[pos.lat.min] # min and max lat/lon of the chosen area only (same as lat.max, but its values correspond to actual values in the lat vector) +#lat.max.area <- domain$lat[pos.lat.max] +#lon.min.area <- domain$lon[pos.lon.min] +#lon.max.area <- domain$lon[pos.lon.max] + +#rm(domain) + +# Load psl data: +if(fields.name == rean.name){ # Load daily psl data in the reanalysis case: + psleuFull <-array(NA,c(n.days.in.a.yearly.period(year.start,year.end), n.pos.lat, n.pos.lon)) + + for (y in year.start:year.end){ + var <- Load(var = psl, exp = NULL, obs = list(list(path=fields)), paste0(y,'0101'), storefreq = 'daily', leadtimemax = n.days.in.a.year(y), output = 'lonlat', + latmin = lat.min, latmax = lat.max, lonmin = lon.min, lonmax = lon.max) + psleuFull[seq.days.in.a.future.year(year.start, y),,] <- var$obs + rm(var) + gc() + } +} + +if(fields.name == ECMWF_S4.name) # in this case, we can load only the data for 1 month at time (since each month needs ~3 GB of memory) + if(start.month <= 9) {start.month.char <- paste0("0",as.character(start.month))} else {start.month.char <- start.month} + + psleuFull <- Load(var = psl, exp = list(list(path=fields)), obs = NULL, sdates=paste0(year.start:year.end, start.month.char,'01'), dimnames=list(member=member.name), nmember=n.members, leadtimemax=216, storefreq='daily', output = 'lonlat', latmin = lat.min, latmax = lat.max, lonmin = lon.min, lonmax = lon.max)$mod #, grid=my.grid, method='bilinear') + +} + +if(fields.name == ECMWF_monthly.name){ + # Load 6-hourly psl data in the forecast case: + psleuFull <-array(NA, c(n.sdates*n.years, n.leadtimes, n.pos.lat, n.pos.lon)) # daily order: 19940102 19950102 ... 20130102 19940109 19950109 ... 20130109 ... + n.leadtimes <- length(leadtime) + sdates <- weekly.seq(forecast.year,mes,day) + n.sdates <- length(sdates) + + for (startdate in 1:n.sdates){ + for(lday in leadtime){ + var <- Load(var = psl, exp = NULL, obs = list(list(path=fields), paste0(year.start:year.end, substr(sdates[startdate],5,6), substr(sdates[startdate],7,8) ), storefreq = 'daily', leadtimemin = lday*4-3, leadtimemax = lday*4, output = 'lonlat', latmin = lat.min, latmax = lat.max, lonmin = lon.min, lonmax = lon.max) + + mean.lday <- apply(var$obs, c(3,5,6), mean, na.rm=T) + rm(var) + gc() + + psleuFull[(1+(startdate-1)*n.years):(startdate*n.years), lday-leadtime[1]+1,,] <- mean.lday + rm(mean.lday) + gc() + } + } +} + +if(psl=="g500") psleuFull <- psleuFull/9.81 # convert geopotential to geopotential height (in m) +if(psl=="psl") psleuFull <- psleuFull/100 # convert MSLP in Pascal to MSLP in hPa +gc() + +#save.image(file=paste0(workdir,"/weather_regimes_",fields.name,"_",year.start,"-",year.end,".RData")) +#load(file=paste0(workdir,"/weather_regimes_",fields.name,"_",year.start,"-",year.end,".RData")) + +# compute the cluster analysis: +my.PCA <- list() +my.cluster <- list() +my.cluster.array <- list() +tot.variance <- list() +n.pcs <- my.cluster <- c() + +WR.period <- 1 #1:12 #13:16 # 1-12: Jan-Dec; 13-16: Winter-Spring-Summer-Autumn +p=1 # for the debug + +for(p in WR.period){ + # Select only the data of the month/season we want: + if(fields.name == rean.name){ + pslPeriod <- psleuFull[days.period[[p]],,] # select only days in the chosen period (i.e: winter) + + # weight the pressure fields based on latitude (apply it to anomalies, not to absolute values!): + if(lat.weighting){ + lat.weighted <- cos(lat*pi/180)^0.5 + lat.weighted.array <- InsertDim(InsertDim(lat.weighted,2,n.pos.lon), 1, n.days.period[[p]]) + psl.kmeans <- pslPeriod * lat.weighted.array + rm(lat.weighted.array) + gc() + } + + # select period data from psleuFull to use it as input of the cluster analysis: + psl.kmeans <- pslPeriod + dim(psl.kmeans) <- c(n.days.period[[p]], n.pos.lat*n.pos.lon) # convert array in a matrix! + rm(pslPeriod, pslPeriod.weighted) + } + + # in this case of S4 we don't need to select anything because we already loaded only 1 month of data! + if(fields.name == ECMWF_S4.name){ + + # load eraint data: + ERAint <- '/esnas/reconstructions/ecmwf/eraint/$STORE_FREQ$_mean/$VAR_NAME$_f6h/$VAR_NAME$_$YEAR$$MONTH$.nc' + my.years <- 1982:1993 #year.start:year.end + var <- Load(var = "sfcWind", exp = NULL, obs = list(list(path=ERAint)), paste0(my.years,'0101'), storefreq = 'daily', leadtimemax = 216, output = 'lonlat',latmin = lat.min, latmax = lat.max, lonmin = lon.min, lonmax = lon.max) # -273.16 for tas # or /100 for psl + + # check S4 lon and lat: + #varS4 <- Load(var = psl, exp = list(list(path=fields)), obs = NULL, sdates=paste0(year.start, start.month.char,'01'), dimnames=list(member=member.name), nmember=n.members, leadtimemax=1, storefreq='daily', output = 'lonlat', latmin = lat.min, latmax = lat.max, lonmin = lon.min, lonmax = lon.max) #, grid=my.grid, method='bilinear') + + # draw climatologies: + drift <- apply(psleuFull[,,,,1,1,drop=F], c(1,2,4), mean, na.rm=T) + #matplot(t(drift[1,,]),type="l", col=1:15, lty=2, pch=19, cex=.5) + #matplot(t(drift[1,,1:31]),type="b", col=1:15, lty=1, pch=19, cex=.5) # zoom over the first leadtime + + matplot(t(drift[1,,]),type="l", col="gray60", lty=3, pch=19, cex=.1) #, ylim=c(-25,5)) + + true.climate <- apply(psleuFull[,,,,1,1,drop=F], c(1,4), mean, na.rm=T) + lines(true.climate[1,],type="l", col="red", lty=1, pch=19, lwd=2) + + true.climate.5d <- stats::filter(true.climate[1,], rep(1/5,5), sides=2) + lines(true.climate.5d,type="l", col="orange", lty=1, pch=19, lwd=2) + + s4.data <- data.frame(s4=true.climate[1,], day=1:216) + s4.loess <- loess(s4 ~ day, s4.data, span=0.50) + s4.pred <- predict(s4.loess) + lines(s4.pred,type="l", col="purple", lty=1, pch=19, lwd=2) + + col.monthly <- "brown" + true.climate1 <- apply(psleuFull[,,,1:31,1,1,drop=F], 1, mean, na.rm=T) + lines(rep(true.climate1,31),type="l", col=col.monthly, lty=1, pch=19, lwd=2) + true.climate2 <- apply(psleuFull[,,,32:59,1,1,drop=F], 1, mean, na.rm=T) + lines(c(rep(NA,31),rep(true.climate2,28)),type="l", col=col.monthly, lty=1, pch=19, lwd=2) + true.climate3 <- apply(psleuFull[,,,60:90,1,1,drop=F], 1, mean, na.rm=T) + lines(c(rep(NA,59),rep(true.climate3,31)),type="l", col=col.monthly, lty=1, pch=19, lwd=2) + true.climate4 <- apply(psleuFull[,,,91:120,1,1,drop=F], 1, mean, na.rm=T) + lines(c(rep(NA,90),rep(true.climate4,30)),type="l", col=col.monthly, lty=1, pch=19, lwd=2) + true.climate5 <- apply(psleuFull[,,,121:151,1,1,drop=F], 1, mean, na.rm=T) + lines(c(rep(NA,120),rep(true.climate5,31)),type="l", col=col.monthly, lty=1, pch=19, lwd=2) + true.climate6 <- apply(psleuFull[,,,151:180,1,1,drop=F], 1, mean, na.rm=T) + lines(c(rep(NA,150),rep(true.climate6,30)),type="l", col=col.monthly, lty=1, pch=19, lwd=2) + true.climate7 <- apply(psleuFull[,,,181:211,1,1,drop=F], 1, mean, na.rm=T) + lines(c(rep(NA,180),rep(true.climate7,31)),type="l", col=col.monthly, lty=1, pch=19, lwd=2) + + + era <- apply(var$obs[,,,,1,1,drop=F], c(1,2,4), mean, na.rm=T) + plot(era[1,1,1:216],type="l", col="black", lty=1, pch=19, lwd=2) + + era.5d <- stats::filter(era[1,1,1:216], rep(1/5,5), sides=2) + lines(era.5d,type="l", col="darkgreen", lty=1, pch=19, lwd=2) + + era.data <- data.frame(era=era[1,1,1:216], day=1:216) + era.loess <- loess(era ~ day, era.data, span=0.50) + era.pred <- predict(era.loess) + lines(era.pred,type="l", col="blue", lty=1, pch=19, lwd=2) + + col.monthly.erai <- "turquoise3" + era1 <- apply(var$obs[,,,1:31,1,1,drop=F], c(1,2), mean, na.rm=T) + lines(rep(era1,31),type="l", col=col.monthly.erai, lty=1, pch=19, lwd=2) + era2 <- apply(var$obs[,,,32:59,1,1,drop=F], c(1,2), mean, na.rm=T) + lines(c(rep(NA,31),rep(era2,28)),type="l", col=col.monthly.erai, lty=1, pch=19, lwd=2) + era3 <- apply(var$obs[,,,60:90,1,1,drop=F], c(1,2), mean, na.rm=T) + lines(c(rep(NA,59),rep(era3,31)),type="l", col=col.monthly.erai, lty=1, pch=19, lwd=2) + era4 <- apply(var$obs[,,,91:120,1,1,drop=F], c(1,2), mean, na.rm=T) + lines(c(rep(NA,90),rep(era4,30)),type="l", col=col.monthly.erai, lty=1, pch=19, lwd=2) + era5 <- apply(var$obs[,,,121:151,1,1,drop=F], c(1,2), mean, na.rm=T) + lines(c(rep(NA,120),rep(era5,31)),type="l", col=col.monthly.erai, lty=1, pch=19, lwd=2) + era6 <- apply(var$obs[,,,151:180,1,1,drop=F], c(1,2), mean, na.rm=T) + lines(c(rep(NA,150),rep(era6,30)),type="l", col=col.monthly.erai, lty=1, pch=19, lwd=2) + era7 <- apply(var$obs[,,,181:211,1,1,drop=F], c(1,2), mean, na.rm=T) + lines(c(rep(NA,180),rep(era7,30)),type="l", col=col.monthly.erai, lty=1, pch=19, lwd=2) + + + # anomalias: + point.type <- "l" + true.climate.anom <- psleuFull[1,1,pos.last.year,,1,1] - true.climate[1,] + plot(true.climate.anom,type=point.type, col="red", lty=1, pch=19, lwd=2, cex=.3) + lines(rep(0,216)) + + true.climate.5d.anom <- psleuFull[1,1,pos.last.year,,1,1] - true.climate.5d + lines(true.climate.5d.anom,type=point.type, col="orange", lty=1, pch=19, lwd=2, cex=.3) + + s4.pred.anom <- psleuFull[1,1,pos.last.year,,1,1] - s4.pred + lines(s4.pred.anom,type=point.type, col="purple", lty=1, pch=19, lwd=2, cex=.3) + + s4.monthly.clim <- c(rep(true.climate1,31),rep(true.climate2,28),rep(true.climate3,31),rep(true.climate4,30),rep(true.climate5,31),rep(true.climate6,30),rep(true.climate7,31), rep(NA,4)) + s4.monthly.anom <- psleuFull[1,1,pos.last.year,,1,1] - s4.monthly.clim + lines(s4.monthly.anom,type=point.type, col="brown", lty=1, pch=19, lwd=2, cex=.3) + + + pos.last.year <- dim(var$obs)[3] + era.anom <- var$obs[1,1,pos.last.year,1:216,1,1] - era[1,1,1:216] + plot(era.anom,type=point.type, col="black", lty=1, pch=19, lwd=2) + lines(rep(0,216)) + + era.anom.5d <- var$obs[1,1,pos.last.year,1:216,1,1] - era.5d[1:216] + lines(era.anom.5d,type=point.type, col="darkgreen", lty=1, pch=19, lwd=2) + + era.anom.pred <- var$obs[1,1,pos.last.year,1:216,1,1] - era.pred[1:216] + lines(era.anom.pred,type=point.type, col="blue", lty=1, pch=19, lwd=2) + + era.monthly.clim <- c(rep(era1,31),rep(era2,28),rep(era3,31),rep(era4,30),rep(era5,31),rep(era6,30),rep(era7,31), rep(NA,4)) + era.anom.monthly <- var$obs[1,1,pos.last.year,1:216,1,1] - era.monthly.clim + lines(era.anom.monthly,type=point.type, col="turquoise3", lty=1, pch=19, lwd=2) + + + # immediatly convert psl in daily anomalies to remove the drift! + pslPeriodClim <- apply(psleuFull, c(1,4,5,6), mean, na.rm=T) + pslPeriodClim2 <- InsertDim(InsertDim(pslPeriodClim, 2, n.years), 2, n.members) + rm(pslPeriodClim) + gc() + + psleuFull <- psleuFull - pslPeriodClim2 + rm(pslPeriodClim2) + gc() + + # convert psl in daily anomalies computed with the 10-days leadtime window including the 9 days AFTER each day: + n.leadtimes <- dim(psleuFull)[4] + pslPeriodClim10 <- array(NA, c(1, n.members, n.years, n.leadtimes-9,n.pos.lat,n.pos.lon)) + for(year in 1:n.years){ + for(lead in 1:(n.leadtimes-9)){ + pslPeriodClim10[1,,year,lead,,] <- (psleuFull[1,,year,lead,,] + psleuFull[1,,year,lead+1,,] + psleuFull[1,,year,lead+2,,] + psleuFull[1,,year,lead+3,,] + psleuFull[1,,year,lead+4,,] + psleuFull[1,,year,lead+5,,] + psleuFull[1,,year,lead+6,,] + psleuFull[1,,year,lead+7,,] + psleuFull[1,,year,lead+8,,] + psleuFull[1,,year,lead+9,,])/10 + } + } + + pslPeriodClim10mean <- apply(pslPeriodClim10, c(1,4,5,6), mean, na.rm=T) + pslPeriodClim10mean2 <- InsertDim(InsertDim(pslPeriodClim10mean,2,n.years),2,n.members) + + psleuFull10 <- psleuFull[1,,,1:(n.leadtimes-9),,,drop=F] - pslPeriodClim10mean2 + + psleuFull <- psleuFull10 + rm(pslPeriodClim2, psleuFull10, pslPeriodClim10, pslPeriodClim10mean2, psleuFull10mean, pslPeriodClim10mean) + gc() + + chosen.month <- start.month + lead.month # find which month we want to select data + if(chosen.month > 12) chosen.month <- chosen.month - 12 + + for(y in year.start:year.end){ + leadtime.min <- 1 + n.days.in.a.monthly.period(start.month, chosen.month, y) - n.days.in.a.month(chosen.month, y) + leadtime.max <- leadtime.min + n.days.in.a.month(chosen.month, y) - 1 + if (n.days.in.a.month(chosen.month, y) == 29) leadtime.max <- leadtime.max - 1 # remove the 29th of February to have the same n. of elements for all years + int.leadtimes <- leadtime.min:leadtime.max + n.leadtimes <- leadtime.max - leadtime.min + 1 + + if(y == year.start) { + pslPeriod <- psleuFull[,,1,int.leadtimes,,,drop=FALSE] + } else { + pslPeriod <- unname(abind(pslPeriod, psleuFull[,,y-year.start+1,int.leadtimes,,,drop=FALSE], along=3)) + } + } + + + # if you didn't convert psl data in anomalies until now, you can convert psl data in monthly anomalies: + pslPeriodClim <- apply(pslPeriod, c(1,5,6), mean, na.rm=T) + pslPeriodClim2 <- InsertDim(InsertDim(InsertDim(pslPeriodClim,2,n.leadtimes), 2, n.years), 2, n.members) + + pslPeriod <- pslPeriod - pslPeriodClim2 + rm(pslPeriodClim, pslPeriodClim2) + gc() + + # note that the data order in psl.kmeans[,X] is: year1day1, year1day2, ... , year1day31, year2day1, year2day2, ... , year2day28 + psl.melted <- melt(pslPeriod[1,,,,,, drop=FALSE], varnames=c("Exp","Member","StartYear","LeadDay","Lat","Lon")) + psl.kmeans <- unname(acast(psl.melted, Member + StartYear + LeadDay ~ Lat + Lon)) + + #rm(pslPeriod) + gc() + } + + if(fields.name == ECMWF_monthly.name){ + my.startdates.period <- months.period(forecast.year,mes,day,p) # get which startdates belong to the chosen period + + days.period <- list() # get which days inside psleuFull belong to the chosen period + for(startdate in my.startdates.period){ + days.period[[p]] <- c(days.period[[p]], (1+(startdate-1)*n.years):(startdate*n.years)) + } + + # in winter you must remove the 2nd startdate (20140109) because its data is bad, as you can see with: plot(psl.kmeans[,1:2]) + # if(fields.name == forecast.name && period == 13) psl.kmeans[21:40,] <- NA + } + + + # compute the PCs or not: + if(PCA){ + my.seq <- seq(1, n.pos.lat*n.pos.lon, 9) # select only 1 point of 9 + pslcut <- psl.kmeans[,my.seq] + + my.PCA[[p]] <- princomp(pslcut,cor=FALSE) + tot.variance[[p]] <- head(cumsum(my.PCA[[p]]$sdev^2/sum(my.PCA[[p]]$sdev^2)),50) # check how many PCAs to keep basing on the sum of their expl. variance + n.pcs[p] <- head(as.numeric(which(tot.variance[[p]] > variance.explained)),1) # select only the pcs that explains at least 80% of variance + my.cluster[[p]] <- kmeans(my.PCA[[p]]$scores[,1:n.pcs[p]], centers=4, iter.max=100, nstart=30) # 4 is the number of clusters + rm(pslcut) + } else { # 4 is the number of clusters: + if(fields.name == rean.name) my.cluster[[p]] <- kmeans(psl.kmeans[which(!is.na(psl.kmeans[,1])),], centers=4, iter.max=100, nstart=30) + if(fields.name == ECMWF_S4.name) { + my.cluster[[p]] <- kmeans(psl.kmeans, centers=4, iter.max=100, nstart=30, trace=TRUE) + my.cluster.array[[p]] <- array(my.cluster[[p]]$cluster, c(n.leadtimes, n.years, n.members)) # in reverse order compared to the definition of psl.kmeans + + #plot(SMA(my.cluster[[p]]$centers[1,],201),type="l",col="gold",ylim=c(-20,20));lines(SMA(my.cluster[[p]]$centers[2,],201),type="l",col="red"); lines(SMA(my.cluster[[p]]$centers[3,],201),type="l",col="green");lines(SMA(my.cluster[[p]]$centers[4,],201),type="l",col="blue");lines(SMA(psl.kmeans[29,],201),type="l",col="gray");lines(rep(0,14410),type="l",col="black",lty=2) + } + } + + rm(psl.kmeans) + gc() +} + + +#save.image(file=paste0(workdir,"/weather_regimes_",fields.name,"_",year.start,"-",year.end,".RData")) +#load(file=paste0(workdir,"/weather_regimes_",fields.name,"_",year.start,"-",year.end,".RData")) + +# Load var data: +if(fields.name == rean.name){ # Load daily var data in the reanalysis case: + vareuFull <-array(NA,c(n.days.in.a.yearly.period(year.start,year.end), n.pos.lat, n.pos.lon)) + + for (y in year.start:year.end){ + var <- Load(var = var.name[var.num], exp = NULL, obs = list(var.data), paste0(y,'0101'), storefreq = 'daily', leadtimemax = n.days.in.a.year(y), output = 'lonlat', + latmin = lat.min, latmax = lat.max, lonmin = lon.min, lonmax = lon.max) + vareuFull[seq.days.in.a.future.year(year.start, y),,] <- var$obs + rm(var) + gc() + } + +if(fields.name == ECMWF_S4.name) { + if(start.month <= 9) {start.month.char <- paste0("0",as.character(start.month))} else {start.month.char <- start.month} + + my.years <- c(1982:1993) #year.start:year.end + vareuFull <- Load(var = var.name[var.num], exp = list(list(path=fields)), obs = NULL, sdates=paste0(my.years, start.month.char,'01'), dimnames=list(member=member.name), nmember=n.members, leadtimemax=216, storefreq='daily', output = 'lonlat', latmin = lat.min, latmax = lat.max, lonmin = lon.min, lonmax = lon.max)$mod #, grid=my.grid, method='bilinear') +} + +if(fields.name == ECMWF_monthly.name){ # Load 6-hourly var data in the monthly forecast case: + sdates <- weekly.seq(forecast.year,mes,day) + vareuFull <-array(NA,c(length(sdates)*(year.end-year.start+1), n.pos.lat, n.pos.lon)) + + for (startdate in 1:length(sdates)){ + var <- Load(var = var.name[var.num], exp = NULL, obs = list(var.data), paste0(year.start:year.end, substr(sdates[startdate],5,6), substr(sdates[startdate],7,8) ), storefreq = 'daily', leadtimemin=leadtime*4-3, leadtimemax=leadtime*4, output = 'lonlat', latmin = lat.min, latmax = lat.max, lonmin = lon.min, lonmax = lon.max) + temp <- apply(var$obs, c(1,3,5,6), mean, na.rm=T) + vareuFull[(1+(startdate-1)*(year.end-year.start+1)):(startdate*(year.end-year.start+1)),,] <- temp + rm(temp,var) + gc() + } + +} + +#my.grid<-paste0('r',n.lon,'x',n.lat) +#coord <- Load(var = 'sfcWind', exp = list(ECMWF_monthly), obs = NULL, sdates=paste0(2013,'0102'), leadtimemin = 1, leadtimemax=1, output = 'lonlat', nprocs=1, grid=my.grid, method='bilinear') + +save.image(file=paste0(workdir,"/weather_regimes_",fields.name,"_",var.name[var.num],"_",year.start,"-",year.end,".RData")) +load(file=paste0(workdir,"/weather_regimes_",fields.name,"_",var.name[var.num],"_",year.start,"-",year.end,".RData")) + +## # if you want to study WRs at monhly level but using their seasonal time series, you have to copy the WRs time series to the months from 1 to 12: +## if(WR.period <- 1:12){ +## my.cluster[[1]] <- my.cluster[[2]] <- my.cluster[[3]] <- my.cluster[[4]] <- my.cluster[[5]] <- my.cluster[[6]] <- list() +## my.cluster[[7]] <- my.cluster[[8]] <- my.cluster[[9]] <- my.cluster[[10]] <- my.cluster[[11]] <- my.cluster[[12]] <- list() + +## ss <- match(days.period[[13]],days.period[[1]]); ss[which(!is.na(ss))] <- 1; qq <- ss*my.cluster[[13]]$cluster; my.cluster[[1]]$cluster <- qq[!is.na(qq)] +## ss <- match(days.period[[13]],days.period[[2]]); ss[which(!is.na(ss))] <- 1; qq <- ss*my.cluster[[13]]$cluster; my.cluster[[2]]$cluster <- qq[!is.na(qq)] +## ss <- match(days.period[[13]],days.period[[12]]); ss[which(!is.na(ss))] <- 1; qq <- ss*my.cluster[[13]]$cluster; my.cluster[[12]]$cluster <- qq[!is.na(qq)] +## ss <- match(days.period[[14]],days.period[[3]]); ss[which(!is.na(ss))] <- 1; qq <- ss*my.cluster[[14]]$cluster; my.cluster[[3]]$cluster <- qq[!is.na(qq)] +## ss <- match(days.period[[14]],days.period[[4]]); ss[which(!is.na(ss))] <- 1; qq <- ss*my.cluster[[14]]$cluster; my.cluster[[4]]$cluster <- qq[!is.na(qq)] +## ss <- match(days.period[[14]],days.period[[5]]); ss[which(!is.na(ss))] <- 1; qq <- ss*my.cluster[[14]]$cluster; my.cluster[[5]]$cluster <- qq[!is.na(qq)] +## ss <- match(days.period[[15]],days.period[[6]]); ss[which(!is.na(ss))] <- 1; qq <- ss*my.cluster[[15]]$cluster; my.cluster[[6]]$cluster <- qq[!is.na(qq)] +## ss <- match(days.period[[15]],days.period[[7]]); ss[which(!is.na(ss))] <- 1; qq <- ss*my.cluster[[15]]$cluster; my.cluster[[7]]$cluster <- qq[!is.na(qq)] +## ss <- match(days.period[[15]],days.period[[8]]); ss[which(!is.na(ss))] <- 1; qq <- ss*my.cluster[[15]]$cluster; my.cluster[[8]]$cluster <- qq[!is.na(qq)] +## ss <- match(days.period[[16]],days.period[[9]]); ss[which(!is.na(ss))] <- 1; qq <- ss*my.cluster[[16]]$cluster; my.cluster[[9]]$cluster <- qq[!is.na(qq)] +## ss <- match(days.period[[16]],days.period[[10]]); ss[which(!is.na(ss))] <- 1; qq <- ss*my.cluster[[16]]$cluster; my.cluster[[10]]$cluster <- qq[!is.na(qq)] +## ss <- match(days.period[[16]],days.period[[11]]); ss[which(!is.na(ss))] <- 1; qq <- ss*my.cluster[[16]]$cluster; my.cluster[[11]]$cluster <- qq[!is.na(qq)] +## } + +for(p in WR.period){ # 1-12: Jan-Dec; 13-16: Winter-Spring-Summer-Autumn; 17: Year + + if(fields.name == rean.name){ + cluster.sequence <- my.cluster[[p]]$cluster + + if(sequences){ # it works only for rean data!!! + # for each of the 4 clusters, remove the days not belonging to sequences of at least 5 days: + # warning: first 4 days and last 4 days are not take into account y the algorithm and are treated as not belonging to any sequence (sequ=FALSE) + wrdiff <- diff(my.cluster[[p]]$cluster) + + sequ <- rep(FALSE, n.days.period[[p]]) + for(i in 5:(n.days.period[[p]]-5)){ + sequ[i] <- TRUE + if(wrdiff[i] != 0 & wrdiff[i+1] != 0) sequ[i] = FALSE + if(wrdiff[i] != 0 & wrdiff[i+2] != 0) sequ[i] = FALSE + if(wrdiff[i] != 0 & wrdiff[i+3] != 0) sequ[i] = FALSE + if(wrdiff[i] != 0 & wrdiff[i+4] != 0) sequ[i] = FALSE + if(wrdiff[i-1] != 0 & wrdiff[i] != 0) sequ[i] = FALSE + if(wrdiff[i-1] != 0 & wrdiff[i+1] != 0) sequ[i] = FALSE + if(wrdiff[i-1] != 0 & wrdiff[i+2] != 0) sequ[i] = FALSE + if(wrdiff[i-1] != 0 & wrdiff[i+3] != 0) sequ[i] = FALSE + if(wrdiff[i-2] != 0 & wrdiff[i] != 0) sequ[i] = FALSE + if(wrdiff[i-2] != 0 & wrdiff[i+1] != 0) sequ[i] = FALSE + if(wrdiff[i-2] != 0 & wrdiff[i+2] != 0) sequ[i] = FALSE + if(wrdiff[i-3] != 0 & wrdiff[i] != 0) sequ[i] = FALSE + if(wrdiff[i-3] != 0 & wrdiff[i+1] != 0) sequ[i] = FALSE + if(wrdiff[i-4] != 0 & wrdiff[i] != 0) sequ[i] = FALSE + } # close for loop on i + + # sequence of daily cluster numbers without the days that doesn't belong to sequences of 5 or more days: + cluster.sequence <- my.cluster[[p]]$cluster * sequ + rm(wrdiff, sequ) + } + + # Replace the days belonging to a regime with the days belonging to a sequence of at least 5 days of that regime: + wr1 <- which(cluster.sequence == 1) + wr2 <- which(cluster.sequence == 2) + wr3 <- which(cluster.sequence == 3) + wr4 <- which(cluster.sequence == 4) + + # yearly frequency of each regime: + wr1y <- wr2y <- wr3y <-wr4y <- c() + for(y in year.start:year.end){ + # for both reanalysis and S4, the time order is always: year1day1, year1day2, ..., year1day31, year2day1, year2day2, ..., year2day28, etc, + wr1y[y-year.start+1] <- length(which(cluster.sequence[(y-year.start)*period.length[[p]]+(1:period.length[[p]])] == 1)) + wr2y[y-year.start+1] <- length(which(cluster.sequence[(y-year.start)*period.length[[p]]+(1:period.length[[p]])] == 2)) + wr3y[y-year.start+1] <- length(which(cluster.sequence[(y-year.start)*period.length[[p]]+(1:period.length[[p]])] == 3)) + wr4y[y-year.start+1] <- length(which(cluster.sequence[(y-year.start)*period.length[[p]]+(1:period.length[[p]])] == 4)) + } + + # convert to frequencies in %: + wr1y <- wr1y/period.length[[p]] + wr2y <- wr2y/period.length[[p]] + wr3y <- wr3y/period.length[[p]] + wr4y <- wr4y/period.length[[p]] + + pslPeriod <- psleuFull[days.period[[p]],,] + pslPeriodClim <- apply(pslPeriod, c(2,3), mean, na.rm=T) + pslPeriodClim2 <- InsertDim(pslPeriodClim, 1, n.days.period[[p]]) + pslPeriodAnom <- pslPeriod - pslPeriodClim2 + + rm(pslPeriod, pslPeriodClim, pslPeriodClim2) + gc() + + pslwr1 <- pslPeriodAnom[wr1,,] + pslwr2 <- pslPeriodAnom[wr2,,] + pslwr3 <- pslPeriodAnom[wr3,,] + pslwr4 <- pslPeriodAnom[wr4,,] + + rm(pslPeriodAnom) + gc() + + pslwr1mean <- apply(pslwr1,c(2,3),mean,na.rm=T) + pslwr2mean <- apply(pslwr2,c(2,3),mean,na.rm=T) + pslwr3mean <- apply(pslwr3,c(2,3),mean,na.rm=T) + pslwr4mean <- apply(pslwr4,c(2,3),mean,na.rm=T) + rm(pslwr1,pslwr2,pslwr3,pslwr4) + + # in the reanalysis case, we also measure the impact maps: + + # similar selection of above, but for var instead of psl: + varPeriod <- vareuFull[days.period[[p]],,] # select only var data during the chosen period + + varPeriodClim <- apply(varPeriod, c(2,3), mean, na.rm=T) + varPeriodClim2 <- InsertDim(varPeriodClim, 1, n.days.period[[p]]) + varPeriodAnom <- varPeriod - varPeriodClim2 + rm(varPeriod, varPeriodClim, varPeriodClim2) + gc() + + #PlotEquiMap2(varPeriodClim[,EU], lon[EU], lat, filled.continents = FALSE, cols=my.cols.var, intxlon=10, intylat=10, cex.lab=1.5) # plot the climatology of the period + varPeriodAnom1 <- varPeriodAnom[wr1,,] + varPeriodAnom2 <- varPeriodAnom[wr2,,] + varPeriodAnom3 <- varPeriodAnom[wr3,,] + varPeriodAnom4 <- varPeriodAnom[wr4,,] + + varPeriodAnom1mean <- apply(varPeriodAnom1,c(2,3),mean,na.rm=T) + varPeriodAnom2mean <- apply(varPeriodAnom2,c(2,3),mean,na.rm=T) + varPeriodAnom3mean <- apply(varPeriodAnom3,c(2,3),mean,na.rm=T) + varPeriodAnom4mean <- apply(varPeriodAnom4,c(2,3),mean,na.rm=T) + + varPeriodAnomBoth1 <- abind(varPeriodAnom, varPeriodAnom1, along = 1) + pvalue1 <- apply(varPeriodAnomBoth1, c(2,3), function(x) t.test(x[1:n.days.period[[p]]],x[(n.days.period[[p]]+1):length(x)])$p.value) + rm(varPeriodAnomBoth1, varPeriodAnom1) + gc() + + varPeriodAnomBoth2 <- abind(varPeriodAnom, varPeriodAnom2, along = 1) + pvalue2 <- apply(varPeriodAnomBoth2, c(2,3), function(x) t.test(x[1:n.days.period[[p]]],x[(n.days.period[[p]]+1):length(x)])$p.value) + rm(varPeriodAnomBoth2, varPeriodAnom2) + gc() + + varPeriodAnomBoth3 <- abind(varPeriodAnom, varPeriodAnom3, along = 1) + pvalue3 <- apply(varPeriodAnomBoth3, c(2,3), function(x) t.test(x[1:n.days.period[[p]]],x[(n.days.period[[p]]+1):length(x)])$p.value) + rm(varPeriodAnomBoth3, varPeriodAnom3) + gc() + + varPeriodAnomBoth4 <- abind(varPeriodAnom, varPeriodAnom4, along = 1) + pvalue4 <- apply(varPeriodAnomBoth4, c(2,3), function(x) t.test(x[1:n.days.period[[p]]],x[(n.days.period[[p]]+1):length(x)])$p.value) + rm(varPeriodAnomBoth4, varPeriodAnom4) + + rm(varPeriodAnom) + gc() + + # save all the data necessary to redraw the graphs when we know the right regime: + save(workdir,rean.name,fields.name,var.num,var.name,var.name.full,var.unit,year.start,year.end,p,WR.period,lon,lat,pslwr1mean,pslwr2mean,pslwr3mean,pslwr4mean, varPeriodAnom1mean, varPeriodAnom2mean, varPeriodAnom3mean,varPeriodAnom4mean,wr1y,wr2y,wr3y,wr4y,pvalue1,pvalue2,pvalue3,pvalue4,cluster.sequence,file=paste0(workdir,"/",fields.name,"_",var.name[var.num],"_",my.period[p],"_mapdata.RData")) + + rm(pvalue1,pvalue2,pvalue3,pvalue4) + rm(pslwr1mean,pslwr2mean,pslwr3mean,pslwr4mean) + rm(varPeriodAnom1mean,varPeriodAnom2mean,varPeriodAnom3mean,varPeriodAnom4mean) + gc() + + } + + if(fields.name == ECMWF_S4.name){ + cluster.sequence <- my.cluster[[p]]$cluster #as.vector(my.cluster.array[[p]]) + + wr1 <- which(cluster.sequence == 1) + wr2 <- which(cluster.sequence == 2) + wr3 <- which(cluster.sequence == 3) + wr4 <- which(cluster.sequence == 4) + + wr1y <- apply(my.cluster.array[[p]] == 1, 2, sum) + wr2y <- apply(my.cluster.array[[p]] == 2, 2, sum) + wr3y <- apply(my.cluster.array[[p]] == 3, 2, sum) + wr4y <- apply(my.cluster.array[[p]] == 4, 2, sum) + + # convert to frequencies in %: + wr1y <- wr1y/(n.leadtimes*n.members) + wr2y <- wr2y/(n.leadtimes*n.members) + wr3y <- wr3y/(n.leadtimes*n.members) + wr4y <- wr4y/(n.leadtimes*n.members) + + # in this case, must use psl.melted instead of psleuFull. Both are already in anomalies: + pslmat <- unname(acast(psl.melted, Member + StartYear + LeadDay ~ Lat ~ Lon)) + #pslmat <- unname(acast(melt(pslPeriod[1,1:2,,,,, drop=FALSE],varnames=c("Exp","Member","StartYear","LeadDay","Lat","Lon")), Member ~ StartYear + LeadDay ~ Lat ~ Lon)) + + pslwr1 <- pslmat[wr1,,, drop=F] + pslwr2 <- pslmat[wr2,,, drop=F] + pslwr3 <- pslmat[wr3,,, drop=F] + pslwr4 <- pslmat[wr4,,, drop=F] + + pslwr1mean <- apply(pslwr1, c(2,3), mean, na.rm=T) + pslwr2mean <- apply(pslwr2, c(2,3), mean, na.rm=T) + pslwr3mean <- apply(pslwr3, c(2,3), mean, na.rm=T) + pslwr4mean <- apply(pslwr4, c(2,3), mean, na.rm=T) + + rm(pslwr1,pslwr2,pslwr3,pslwr4) + rm(pslmat) + gc() + + # save all the data necessary to draw the graphs (but not the impact maps) + save(workdir,rean.name,fields.name,var.num,var.name,var.name.full,var.unit,year.start,year.end,p,WR.period,lon,lat,pslwr1mean,pslwr2mean,pslwr3mean,pslwr4mean,wr1y,wr2y,wr3y,wr4y,n.leadtimes,cluster.sequence,file=paste0(workdir,"/",fields.name,"_",var.name[var.num],"_",my.period[p],"_mapdata.RData")) + rm(pslwr1mean,pslwr2mean,pslwr3mean,pslwr4mean) + gc() + + #pslwr1meanPartial <- apply(pslwr1, c(1,3,4), mean, na.rm=T) + #pslwr2meanPartial <- apply(pslwr2, c(1,3,4), mean, na.rm=T) + #pslwr3meanPartial <- apply(pslwr3, c(1,3,4), mean, na.rm=T) + #pslwr4meanPartial <- apply(pslwr4, c(1,3,4), mean, na.rm=T) + + #PlotEquiMap(rescale(pslwr1meanPartial[1,,],my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2) + #PlotEquiMap(rescale(pslwr1meanPartial[2,,],my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2) + + #n=0 + #for(sy in 1:32) { + # for(ld in 1:28){ + # if(my.cluster.array[[p]][ld,sy] == 1){ + # n=n+1 + # if(n == 1) {temp <- pslPeriod[1,2,sy,ld,,]} + # temp <- temp + pslPeriod[1,2,sy,ld,,] + # } + # } + #} + #PlotEquiMap(rescale(temp/n,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, drawleg=F) + + #pslwr1meanPartial <- apply(pslmat[,wr1,,,drop=F], c(1,3,4), mean, na.rm=T) + #pslwr2meanPartial <- apply(pslmat[,wr2,,,drop=F], c(1,3,4), mean, na.rm=T) + #pslwr3meanPartial <- apply(pslmat[,wr3,,,drop=F], c(1,3,4), mean, na.rm=T) + #pslwr4meanPartial <- apply(pslmat[,wr4,,,drop=F], c(1,3,4), mean, na.rm=T) + + } + + # for the monthly system, the time order is always: year1day1, year2day1, ... , yearNday1, year1day2, year2day2, ..., yearNday2, etc.: + if(fields.name == ECMWF_monthly.name) { + if(fields.name == ECMWF_monthly.name){ + my.startdates.period <- months.period(forecast.year,mes,day,p) + + #if(p == 13) my.startdates.period <- my.startdates.period[-2] # remove the second startdate because it is broken + + days.period <- period.length <- list() + for(startdate in my.startdates.period){ + days.period[[p]] <- c(days.period[[p]], (1+(startdate-1)*(year.end-year.start+1)):(startdate*(year.end-year.start+1))) + } + period.length[[p]] <- length(my.startdates.period) # number of Thusdays in the period + } + + wr1y <- wr2y <- wr3y <-wr4y <- c() + wr1y[y-year.start+1] <- length(which(cluster.sequence[seq((y-year.start+1),length(cluster.sequence), n.years)] == 1)) + wr2y[y-year.start+1] <- length(which(cluster.sequence[seq((y-year.start+1),length(cluster.sequence), n.years)] == 2)) + wr3y[y-year.start+1] <- length(which(cluster.sequence[seq((y-year.start+1),length(cluster.sequence), n.years)] == 3)) + wr4y[y-year.start+1] <- length(which(cluster.sequence[seq((y-year.start+1),length(cluster.sequence), n.years)] == 4)) + } + +} # close the for loop on 'p' + + +# run it twice: once, with ordering <- FALSE to look only at the cartography and find visually the right regime names of the four clusters; +# and the second time, to save the ordered cartography: +ordering <- T # If TRUE, order the clusters following the regimes listed in the 'orden' vector (after you manually insert the names). +as.pdf <- F # FALSE: save results as one .png file for each season, TRUE: save only 1 .pdf file with all seasons +save.names <- T # if TRUE, also save the cluster names (i.e: the association between clusters and weather regimes); if FALSE, the cluster names are loaded from the file + +# position of long values of Europe only (without the Atlantic Sea and America): +#if(fields.name == "ERA-Interim") EU <- c(1:which(lon >= lon.max)[1], (length(lon)-30):length(lon)) # restrict area to continental europe only +EU <- c(1:which(lon >= lon.max)[1], (which(lon >= 337)[1]:length(lon))) # restrict area to continental europe only + +# choose an order to give to the cartography, from top to bottom: +orden <- c("NAO+","NAO-","Blocking","Atl.Ridge") + +regime1.name <- orden[1] +regime2.name <- orden[2] +regime3.name <- orden[3] +regime4.name <- orden[4] + +if(save.names){cluster1.name.period <- cluster2.name.period <- cluster3.name.period <- cluster4.name.period <- c()} + +# breaks and colors of the geopotential fields: +if(psl == "g500"){ + #my.brks <- c(-300,-199:200,300) # % Mean anomaly of a WR + my.brks <- c(-300,seq(-200,200,10),300) # % Mean anomaly of a WR + my.brks2 <- c(-300,seq(-200,200,10),300) # % Mean anomaly of a WR for the contour lines + #my.cols <- colorRampPalette((brewer.pal(9,"PuBu")))(length(my.brks)-1) + values.to.plot <- c(-200, -100, 0, 100, 200) # values we want to show in the labels +} + +if(psl == "psl"){ + my.brks <- c(seq(-19,-1,2),0,seq(1,19,2)) # % Mean anomaly of a WR + # my.brks <- c(seq(-19,-1,2),0,seq(1,19,2)) # % Mean anomaly of a WR + + my.brks2 <- my.brks #c(seq(-20,20,2)) # % Mean anomaly of a WR for the contour lines + values.to.plot <- my.brks #c(-17,-15,-13,-11,-9,-7,-5,-3,-1,0,1,3,5,7,9,11,13,15,17) +} + +my.cols <- colorRampPalette(rev(brewer.pal(11,"RdBu")))(length(my.brks)-1) # blue--white--red colors +my.cols[floor(length(my.cols)/2)] <- my.cols[floor(length(my.cols)/2)+1] <- "white" + +# breaks and colors of the impact maps (valid both for Wind speed anomalies and Temperature anomalies): +#my.brks.var <- c(-20,seq(-10,-3,1),seq(-2.9,2.9,0.1),seq(3,10,1),20) # % Mean anomaly of a WR +#my.brks.var <- c(-20,-2,-1.5,-1,-0.5,-0.2,0.2,0.5,1,1.5,2,20) # % Mean anomaly of a WR +#my.brks.var <- c(-20,seq(-3,3,0.5),20) # % Mean anomaly of a WR +if(var.name[var.num] == "sfcWind") { + my.brks.var <- seq(-3,3,0.5) + values.to.plot2 <- c(-3,-2,-1,0,1,2,3) +} +if(var.name[var.num] == "tas") { + my.brks.var <- seq(-5,5,1) + values.to.plot2 <- my.brks.var #c(-5,-3,-1,0,1,3,5) +} + +#my.cols <- colorRampPalette(as.character(read.csv("/home/Earth/vtorralb/rgbhex.csv",header=F)[,1]))(length(my.brks)-1) # blue--yellow-red colors +my.cols.var <- colorRampPalette(rev(brewer.pal(11,"RdBu")))(length(my.brks.var)-1) # blue--white--red colors +#my.cols.var[floor(length(my.cols.var)/2)] <- my.cols.var[floor(length(my.cols.var)/2)+1] <- "white" + +if(as.pdf)(pdf(file=paste0(workdir,"/",fields.name,"_",var.name[var.num],"_",my.period[p],".pdf"),width=40, height=60)) + +for(p in WR.period){ + load(file=paste0(workdir,"/",fields.name,"_",var.name[var.num],"_",my.period[p],"_mapdata.RData")) + + if(save.names){ + if(p == 1){ # January + cluster2.name="NAO+" + cluster4.name="NAO-" + cluster1.name="Blocking" + cluster3.name="Atl.Ridge" + } + if(p == 2){ # February + cluster3.name="NAO+" + cluster2.name="NAO-" + cluster4.name="Blocking" + cluster1.name="Atl.Ridge" + } + if(p == 3){ # March + cluster3.name="NAO+" + cluster2.name="NAO-" + cluster4.name="Blocking" + cluster1.name="Atl.Ridge" + } + if(p == 4){ # April + cluster2.name="NAO+" + cluster1.name="NAO-" + cluster3.name="Blocking" + cluster4.name="Atl.Ridge" + } + if(p == 5){ # May + cluster4.name="NAO+" + cluster2.name="NAO-" + cluster3.name="Blocking" + cluster1.name="Atl.Ridge" + } + if(p == 6){ # June + cluster4.name="NAO+" + cluster1.name="NAO-" + cluster2.name="Blocking" + cluster3.name="Atl.Ridge" + } + if(p == 7){ # July + cluster4.name="NAO+" + cluster2.name="NAO-" + cluster1.name="Blocking" + cluster3.name="Atl.Ridge" + } + if(p == 8){ # August + cluster2.name="NAO+" + cluster4.name="NAO-" + cluster3.name="Blocking" + cluster1.name="Atl.Ridge" + } + if(p == 9){ # September + cluster4.name="NAO+" + cluster3.name="NAO-" + cluster1.name="Blocking" + cluster2.name="Atl.Ridge" + } + if(p == 10){ # October + cluster1.name="NAO+" + cluster4.name="NAO-" + cluster2.name="Blocking" + cluster3.name="Atl.Ridge" + } + if(p == 11){ # November + cluster2.name="NAO+" + cluster3.name="NAO-" + cluster1.name="Blocking" + cluster4.name="Atl.Ridge" + } + if(p == 12){ # December + cluster2.name="NAO+" + cluster4.name="NAO-" + cluster1.name="Blocking" + cluster3.name="Atl.Ridge" + } + if(p == 13){ # Winter + cluster3.name="NAO+" + cluster4.name="NAO-" + cluster1.name="Blocking" + cluster2.name="Atl.Ridge" + } + if(p == 14){ # Spring + cluster1.name="NAO+" + cluster3.name="NAO-" + cluster2.name="Blocking" + cluster4.name="Atl.Ridge" + } + if(p == 15){ # Summer + cluster2.name="NAO+" + cluster3.name="NAO-" + cluster4.name="Blocking" + cluster1.name="Atl.Ridge" + } + if(p == 16){ # Autumn + cluster4.name="NAO+" + cluster1.name="NAO-" + cluster2.name="Blocking" + cluster3.name="Atl.Ridge" + } + + cluster1.name.period[p] <- cluster1.name + cluster2.name.period[p] <- cluster2.name + cluster3.name.period[p] <- cluster3.name + cluster4.name.period[p] <- cluster4.name + + save(cluster1.name.period, cluster2.name.period, cluster3.name.period, cluster4.name.period, file=paste0(workdir,"/",fields.name,"_ClusterNames.RData")) + + } else { # in this case, we load the cluster names from the file already saved: + load(paste0(workdir,"/",fields.name,"_ClusterNames.RData")) + cluster1.name <- cluster1.name.period[p] + cluster2.name <- cluster2.name.period[p] + cluster3.name <- cluster3.name.period[p] + cluster4.name <- cluster4.name.period[p] + } + + # assign to each cluster its regime: + if(ordering == FALSE){ + cluster1 <- 1; cluster2 <- 2; cluster3 <- 3; cluster4 <- 4 + } else { + cluster1 <- which(orden == cluster1.name) + cluster2 <- which(orden == cluster2.name) + cluster3 <- which(orden == cluster3.name) + cluster4 <- which(orden == cluster4.name) + } + + assign(paste0("map",cluster1), pslwr1mean) + assign(paste0("map",cluster2), pslwr2mean) + assign(paste0("map",cluster3), pslwr3mean) + assign(paste0("map",cluster4), pslwr4mean) + + assign(paste0("fre",cluster1), wr1y) + assign(paste0("fre",cluster2), wr2y) + assign(paste0("fre",cluster3), wr3y) + assign(paste0("fre",cluster4), wr4y) + + if(fields.name == rean.name){ + assign(paste0("imp",cluster1), varPeriodAnom1mean) + assign(paste0("imp",cluster2), varPeriodAnom2mean) + assign(paste0("imp",cluster3), varPeriodAnom3mean) + assign(paste0("imp",cluster4), varPeriodAnom4mean) + + assign(paste0("sig",cluster1), pvalue1) + assign(paste0("sig",cluster2), pvalue2) + assign(paste0("sig",cluster3), pvalue3) + assign(paste0("sig",cluster4), pvalue4) + } + + + # Visualize the composition with the 3 graphs for all the 4 regimes: its pressure fields, its impact on var and its frequency series: + if(!as.pdf) png(filename=paste0(workdir,"/",fields.name,"_",var.name[var.num],"_",my.period[p],".png"),width=3000,height=3700) + + plot.new() + + # Sheet title: + par(fig=c(0, 1, 0.94, 0.98), new=TRUE) + mtext(paste0("Weather Regimes for ",my.period[p]," season (",year.start,"-",year.end,"). Source: ",fields.name), cex=6, font=2) + + # Centroid maps: + map.xpos <- 0 + map.width <- 0.46 + par(fig=c(map.xpos + 0.003, map.xpos + map.width - 0.003, 0.745, 0.925), new=TRUE) + PlotEquiMap2(rescale(map1,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map1), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, xlabel.dist=1.5, contours.lty="F1FF1F") + par(fig=c(map.xpos, map.xpos + map.width, 0.51, 0.69), new=TRUE) + PlotEquiMap2(rescale(map2,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map2), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F") + par(fig=c(map.xpos, map.xpos + map.width, 0.275, 0.455), new=TRUE) + PlotEquiMap2(rescale(map3,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map3), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F") + par(fig=c(map.xpos, map.xpos + map.width, 0.04, 0.22), new=TRUE) + PlotEquiMap2(rescale(map4,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map4), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F") + + # Title Centroid Maps: + map.title.xpos <- 0.76 + map.title.width <- 0.2 + par(fig=c(map.title.xpos, map.title.xpos + map.title.width, 0.728, 0.729), new=TRUE) + mtext("year", cex=3) + par(fig=c(map.title.xpos, map.title.xpos + map.title.width, 0.494, 0.495), new=TRUE) + mtext("year", cex=3) + par(fig=c(map.title.xpos, map.title.xpos + map.title.width, 0.260, 0.261), new=TRUE) + mtext("year", cex=3) + par(fig=c(map.title.xpos, map.title.xpos + map.title.width, 0.026, 0.027), new=TRUE) + mtext("year", cex=3) + + # Impact maps: + if(fields.name == rean.name){ + impact.xpos <- 0.47 + impact.width <- 0.26 + par(fig=c(impact.xpos, impact.xpos + impact.width, 0.745, 0.925), new=TRUE) + PlotEquiMap2(rescale(imp1[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=t(sig1[,EU] < 0.05), cex.lab=1.5) + par(fig=c(impact.xpos, impact.xpos + impact.width, 0.51, 0.69), new=TRUE) + PlotEquiMap2(rescale(imp2[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=t(sig2[,EU] < 0.05), cex.lab=1.5) + par(fig=c(impact.xpos, impact.xpos + impact.width, 0.275, 0.455), new=TRUE) + PlotEquiMap2(rescale(imp3[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=t(sig3[,EU] < 0.05), cex.lab=1.5) + par(fig=c(impact.xpos, impact.xpos + impact.width, 0.04, 0.22), new=TRUE) + PlotEquiMap2(rescale(imp4[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=t(sig4[,EU] < 0.05), cex.lab=1.5) + } + + # Frequency plots: + barplot.xpos <- 0.75 + barplot.width <- 0.25 + freq.max=60 + + par(fig=c(barplot.xpos, barplot.xpos + barplot.width, 0.745, 0.915), new=TRUE) + barplot.freq(100*fre1, year.start, year.end, cex.y=2, cex.x=2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0)) + par(fig=c(barplot.xpos, barplot.xpos + barplot.width,0.510,0.680), new=TRUE) + barplot.freq(100*fre2, year.start, year.end, cex.y=2, cex.x=2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0)) + par(fig=c(barplot.xpos, barplot.xpos + barplot.width,0.275,0.445), new=TRUE) + barplot.freq(100*fre3, year.start, year.end, cex.y=2, cex.x=2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0)) + par(fig=c(barplot.xpos, barplot.xpos + barplot.width,0.040,0.210), new=TRUE) + barplot.freq(100*fre4, year.start, year.end, cex.y=2, cex.x=2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0)) + + # % on Frequency plots: + symbol.xpos <- 0.735 + symbol.width <- 0.01 + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.83, 0.84), new=TRUE) + mtext("%", cex=3.3) + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.60, 0.61), new=TRUE) + mtext("%", cex=3.3) + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.36, 0.37), new=TRUE) + mtext("%", cex=3.3) + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.13, 0.14), new=TRUE) + mtext("%", cex=3.3) + + # Title 1: + title1.xpos <- 0.02 + title1.width <- 0.4 + par(fig=c(title1.xpos, title1.xpos + title1.width, 0.930, 0.935), new=TRUE) + mtext(paste0(regime1.name,": ", psl.name, " Anomaly"), font=2, cex=4) + par(fig=c(title1.xpos, title1.xpos + title1.width, 0.695, 0.700), new=TRUE) + mtext(paste0(regime2.name,": ", psl.name, " Anomaly"), font=2, cex=4) + par(fig=c(title1.xpos, title1.xpos + title1.width, 0.460, 0.465), new=TRUE) + mtext(paste0(regime3.name,": ", psl.name, " Anomaly"), font=2, cex=4) + par(fig=c(title1.xpos, title1.xpos + title1.width, 0.225, 0.230), new=TRUE) + mtext(paste0(regime4.name,": ", psl.name, " Anomaly"), font=2, cex=4) + + # Title 2: + if(fields.name == rean.name){ + title2.xpos <- 0.42 + title2.width <- 0.35 + par(fig=c(title2.xpos, title2.xpos + title2.width, 0.930, 0.935), new=TRUE) + mtext(paste0(regime1.name, ": Impact on ", var.name.full[var.num]), font=2, cex=4) + par(fig=c(title2.xpos, title2.xpos + title2.width, 0.695, 0.700), new=TRUE) + mtext(paste0(regime2.name, ": Impact on ", var.name.full[var.num]), font=2, cex=4) + par(fig=c(title2.xpos, title2.xpos + title2.width, 0.460, 0.465), new=TRUE) + mtext(paste0(regime3.name, ": Impact on ", var.name.full[var.num]), font=2, cex=4) + par(fig=c(title2.xpos, title2.xpos + title2.width, 0.225, 0.230), new=TRUE) + mtext(paste0(regime4.name, ": Impact on ", var.name.full[var.num]), font=2, cex=4) + } + + # Title 3: + title3.xpos <- 0.75 + title3.width <-0.25 + par(fig=c(title3.xpos, title3.xpos + title3.width, 0.930, 0.935), new=TRUE) + mtext(paste0(regime1.name, ": Frequency (", round(100*mean(fre1),1), "%)"), font=2, cex=4) + par(fig=c(title3.xpos, title3.xpos + title3.width, 0.695, 0.700), new=TRUE) + mtext(paste0(regime2.name, ": Frequency (", round(100*mean(fre2),1), "%)"), font=2, cex=4) + par(fig=c(title3.xpos, title3.xpos + title3.width, 0.460, 0.465), new=TRUE) + mtext(paste0(regime3.name, ": Frequency (", round(100*mean(fre3),1), "%)"), font=2, cex=4) + par(fig=c(title3.xpos, title3.xpos + title3.width, 0.225, 0.230), new=TRUE) + mtext(paste0(regime4.name, ": Frequency (", round(100*mean(fre4),1), "%)"), font=2, cex=4) + + # Legend 1: + legend1.xpos <- 0.01 + legend1.width <- 0.44 + legend1.cex <- 1.8 + my.subset <- match(values.to.plot, my.brks) + par(fig=c(legend1.xpos, legend1.xpos + legend1.width, 0.719, 0.744), new=TRUE) # syntax fig=c(xmin, xmax, ymin, ymax) + ColorBar3(my.brks, cols=my.cols, vert=FALSE, cex=legend1.cex, subset=my.subset) + if(psl=="g500") {mtext(side=4," m", cex=2.5)} else {mtext(side=4," hPa", cex=legend1.cex)} + par(fig=c(legend1.xpos, legend1.xpos + legend1.width, 0.485, 0.508), new=TRUE) + ColorBar3(my.brks, cols=my.cols, vert=FALSE, cex=legend1.cex, subset=my.subset) + if(psl=="g500") {mtext(side=4," m", cex=2.5)} else {mtext(side=4," hPa", cex=legend1.cex)} + par(fig=c(legend1.xpos, legend1.xpos + legend1.width, 0.250, 0.273), new=TRUE) + ColorBar3(my.brks, cols=my.cols, vert=FALSE, cex=legend1.cex, subset=my.subset) + if(psl=="g500") {mtext(side=4," m", cex=2.5)} else {mtext(side=4," hPa", cex=legend1.cex)} + par(fig=c(legend1.xpos, legend1.xpos + legend1.width, 0.015, 0.038), new=TRUE) + ColorBar3(my.brks, cols=my.cols, vert=FALSE, cex=legend1.cex, subset=my.subset) + if(psl=="g500") {mtext(side=4," m", cex=2.5)} else {mtext(side=4," hPa", cex=legend1.cex)} + + # Legend 2: + if(fields.name == rean.name){ + legend2.xpos <- 0.48 + legend2.width <- 0.25 + legend2.cex <- 1.8 + my.subset2 <- match(values.to.plot2, my.brks.var) + par(fig=c(legend2.xpos, legend2.xpos + legend2.width, 0.719 + 0.001, 0.744), new=TRUE) + ColorBar3(my.brks.var, cols=my.cols.var, vert=FALSE, cex=legend2.cex, subset=my.subset2) + mtext(side=4,paste0(" ",var.unit[var.num]), cex=legend2.cex) + par(fig=c(legend2.xpos, legend2.xpos + legend2.width, 0.485, 0.508), new=TRUE) + ColorBar3(my.brks.var, cols=my.cols.var, vert=FALSE, cex=legend2.cex, subset=my.subset2) + mtext(side=4,paste0(" ",var.unit[var.num]), cex=legend2.cex) + par(fig=c(legend2.xpos, legend2.xpos + legend2.width, 0.250, 0.273), new=TRUE) + ColorBar3(my.brks.var, cols=my.cols.var, vert=FALSE, cex=legend2.cex, subset=my.subset2) + mtext(side=4,paste0(" ",var.unit[var.num]), cex=legend2.cex) + par(fig=c(legend2.xpos, legend2.xpos + legend2.width, 0.015, 0.038), new=TRUE) + ColorBar3(my.brks.var, cols=my.cols.var, vert=FALSE, cex=legend2.cex, subset=my.subset2) + mtext(side=4,paste0(" ",var.unit[var.num]), cex=legend2.cex) + } + + if(!as.pdf) dev.off() # for saving 4 png + +} # close for loop on 'p' + +if(as.pdf) dev.off() # for saving a single pdf with all seasons + diff --git a/old/weather_regimes_v3.R b/old/weather_regimes_v3.R new file mode 100644 index 0000000000000000000000000000000000000000..30ab708d7b34011cf5289dd15a3d052ace8b7400 --- /dev/null +++ b/old/weather_regimes_v3.R @@ -0,0 +1,209 @@ + +library(s2dverification) # for the function Load() +source('/home/Earth/ncortesi/Downloads/scripts/Rfunctions.R') # for the calendar functions + +# available reanalysis: +ERAint <- list(path = '/esnas/reconstructions/ecmwf/eraint/$STORE_FREQ$_mean/$VAR_NAME$_f6h/$VAR_NAME$_$YEAR$$MONTH$.nc') +ERAint.name <- "ERA-Interim" +JRA55 <- list(path = '/esnas/reconstructions/jma/jra-55/$STORE_FREQ$_mean/$VAR_NAME$_f6h/$VAR_NAME$_$YEAR$$MONTH$.nc') +JRA55.name <- "JRA-55" + +workdir <- "/scratch/Earth/ncortesi/RESILIENCE/Regimes" # working dir with the input files with the WT classifications for each MSLP grid point +mapdir <- "/scratch/Earth/ncortesi/RESILIENCE/Regimes_maps" # output dir with seasonal and yearly maps + +year.start <- 1979 +year.end <- 2013 + +rean <- ERAint # choose one of the two above reanalysis +rean.name <- ERAint.name + +var.name <- "sfcWind" #"tas" #"sfcWind" # name of the 'predictand' variable of the chosen reanalysis + +# Coordinates of the box enclosing the study region (the Northern Atlantic in this case): +lat.max <- 70 +lat.min <- 30 +lon.max <- 40 +lon.min <- 280 # put a positive number here because Z500 has only positive values of longitud! + +############################################################################################################################# + +# load only 1 day of z500 to detect the minimum and maximum lat and lon values to identify only the North Atlantic +domain <- Load(var = 'z500', exp = NULL, obs = list(rean), paste0(year.start,'0101'), storefreq = 'daily', leadtimemax = 1, output = 'lonlat', nprocs=1) +pos.lat.max <- which(abs(domain$lat-lat.max)==min(abs(domain$lat-lat.max))) +pos.lat.min <- which(abs(domain$lat-lat.min)==min(abs(domain$lat-lat.min))) +pos.lon.max <- which(abs(domain$lon-lon.max)==min(abs(domain$lon-lon.max))) +pos.lon.min <- which(abs(domain$lon-lon.min)==min(abs(domain$lon-lon.min))) +pos.lat <- if(pos.lat.min < pos.lat.max) {pos.lat.min:pos.lat.max} else { pos.lat.max:pos.lat.min} +pos.lon <- c(1:pos.lon.max,pos.lon.min:length(domain$lon)) # beware that it only consider the case where the study region cross the meridian of Greenwhich! +n.pos.lat <- length(pos.lat) +n.pos.lon <- length(pos.lon) + +z500euFull <-array(NA,c(n.days.in.a.yearly.period(year.start,year.end), n.pos.lat, n.pos.lon)) + +## for (y in year.start:year.end){ +## var <- Load(var = 'z500', exp = NULL, obs = list(rean), paste0(y,'0101'), storefreq = 'daily', leadtimemax = n.days.in.a.year(y), output = 'lonlat') +## z500eu <- var$obs[,,,,pos.lat,pos.lon] +## z500euFull[seq.days.in.a.future.year(year.start, y),,] <- z500eu +## rm(z500eu) +## gc() +## } + +for (y in year.start:year.end){ + var <- Load(var = 'z500', exp = NULL, obs = list(rean), paste0(y,'0101'), storefreq = 'daily', leadtimemax = n.days.in.a.year(y), output = 'lonlat', + latmin = domain$lat[pos.lat.min], latmax = domain$lat[pos.lat.max], lonmin = domain$lon[pos.lon.min], lonmax = domain$lon[pos.lon.max]) + z500euFull[seq.days.in.a.future.year(year.start, y),,] <- var$obs + rm(var) + gc() +} + + +lat <- var$lat[pos.lat] +lon <- var$lon[pos.lon] + +period = 13 # (winter) + +days.period <- NA +for(y in year.start:year.end) days.period <- c(days.period, n.days.in.a.future.year(year.start, y) + pos.period(y,period)) +days.period <- days.period[-1] # remove the NA at the begin that was introduced only to be able to execture the above command +n.days.period <- length(days.period) + +z500 <- z500euFull[days.period,,] # select only days in the chosen period (i.e: winter) + +z500mat <- z500 + +dim(z500mat) <- c(n.days.period, n.pos.lat*n.pos.lon) # convert array in a matrix! + +my.seq <- seq(1, n.pos.lat*n.pos.lon, 9) # select only 1 point of 9 + +z500cut <- z500mat[,my.seq] + +my.PCA <- princomp(z500cut,cor=FALSE) + +head(cumsum(my.PCA$sdev^2/sum(my.PCA$sdev^2)),20) # check how many PCAs to keep basing on the sum of their explained variance + +my.cluster <- kmeans(my.PCA$scores[,1:7], 4) # 4 is the number of clusters, 7 the number of EOFs which explains ~80% of variance + +rm(z500euFull, z500mat, var) +gc() + +# Load wind data: + +vareuFull <-array(NA,c(n.days.in.a.yearly.period(year.start,year.end),n.pos.lat,n.pos.lon)) + +## for (y in year.start:year.end){ +## var <- Load(var = var.name, exp = NULL, obs = list(rean), paste0(y,'0101'), storefreq = 'daily', leadtimemax = n.days.in.a.year(y), output = 'lonlat') +## vareu <- var$obs[,,,,pos.lat,pos.lon] +## vareuFull[seq.days.in.a.future.year(year.start, y),,] <- vareu +## } + +for (y in year.start:year.end){ + var <- Load(var = var.name, exp = NULL, obs = list(rean), paste0(y,'0101'), storefreq = 'daily', leadtimemax = n.days.in.a.year(y), output = 'lonlat', + latmin = domain$lat[pos.lat.min], latmax = domain$lat[pos.lat.max], lonmin = domain$lon[pos.lon.min], lonmax = domain$lon[pos.lon.max]) + vareuFull[seq.days.in.a.future.year(year.start, y),,] <- var$obs + rm(var) + gc() +} + + +save.image(file=paste0(workdir,"/weather_regimes_",rean.name,"_",var.name,"_",year.start,"-",year.end,"_", my.period[period],".RData")) +load(file=paste0(workdir,"/weather_regimes_",rean.name,"_",var.name,"_",year.start,"-",year.end,"_", my.period[period],".RData")) + +varPeriod <- vareuFull[days.period,,] + +varPeriodClim <- apply(varPeriod,c(2,3),mean,na.rm=T) +varPeriodClim2 <- InsertDim(varPeriodClim, 1, n.days.period) + +varPeriodAnom <- varPeriod - varPeriodClim2 + +wr1 <- which(my.cluster$cluster==1) +wr2 <- which(my.cluster$cluster==2) +wr3 <- which(my.cluster$cluster==3) +wr4 <- which(my.cluster$cluster==4) + +varPeriodAnom1 <- varPeriodAnom[wr1,,] +varPeriodAnom2 <- varPeriodAnom[wr2,,] +varPeriodAnom3 <- varPeriodAnom[wr3,,] +varPeriodAnom4 <- varPeriodAnom[wr4,,] + +varPeriodAnom1mean <- apply(varPeriodAnom1,c(2,3),mean,na.rm=T) +varPeriodAnom2mean <- apply(varPeriodAnom2,c(2,3),mean,na.rm=T) +varPeriodAnom3mean <- apply(varPeriodAnom3,c(2,3),mean,na.rm=T) +varPeriodAnom4mean <- apply(varPeriodAnom4,c(2,3),mean,na.rm=T) + +z500wr1 <- z500[wr1,,] +z500wr2 <- z500[wr2,,] +z500wr3 <- z500[wr3,,] +z500wr4 <- z500[wr4,,] + +z500wr1mean <- apply(z500wr1,c(2,3),mean,na.rm=T) +z500wr2mean <- apply(z500wr2,c(2,3),mean,na.rm=T) +z500wr3mean <- apply(z500wr3,c(2,3),mean,na.rm=T) +z500wr4mean <- apply(z500wr4,c(2,3),mean,na.rm=T) + +# Mean z500 maps: + +my.brks <- c(seq(48800,57100,1)) # % Mean anomaly of a WR +my.cols <- colorRampPalette(as.character(read.csv("/home/Earth/vtorralb/rgbhex.csv",header=F)[,1]))(length(my.brks)-1) # blue--yellow-red colors + +PlotEquiMap(z500wr1mean,lon,lat,filled.continents = FALSE, brks=my.brks, cols=my.cols) # NAO- +PlotEquiMap(z500wr2mean,lon,lat,filled.continents = FALSE, brks=my.brks, cols=my.cols) # blocking +PlotEquiMap(z500wr3mean,lon,lat,filled.continents = FALSE, brks=my.brks, cols=my.cols) # atlantic ridge +PlotEquiMap(z500wr4mean,lon,lat,filled.continents = FALSE, brks=my.brks, cols=my.cols) # NAO+ + + +# Average wind anomalies: + +my.brks <- c(seq(-10,-3,1),seq(-2.9,2.9,0.1),seq(3,10,1)) # % Mean anomaly of a WR +my.cols <- colorRampPalette(as.character(read.csv("/home/Earth/vtorralb/rgbhex.csv",header=F)[,1]))(length(my.brks)-1) # blue--yellow-red colors + +png(filename=paste0(mapdir,"/",rean,"_",var.name,"_",my.period[period],"NAO-.png"),width=1000,height=700) +layout(matrix(c(rep(1,9),2), 10, 1, byrow = TRUE)) +PlotEquiMap(varPeriodAnom1mean,lon,lat,filled.continents = FALSE, brks=my.brks, cols=my.cols, toptitle="NAO-",drawleg=F) +ColorBar(my.brks, cols=my.cols, vert=FALSE, subsampleg=1, cex=1.2) +dev.off() + +png(filename=paste0(mapdir,"/",rean,"_",var.name,"_", my.period[period],"Blocking.png"), width=1000, height=700) +layout(matrix(c(rep(1,9),2), 10, 1, byrow = TRUE)) +PlotEquiMap(varPeriodAnom2mean,lon,lat,filled.continents = FALSE, brks=my.brks, cols=my.cols, toptitle="Blocking", drawleg=F) +ColorBar(my.brks, cols=my.cols, vert=FALSE, subsampleg=1, cex=1.2) +dev.off() + +png(filename=paste0(mapdir,"/",rean,"_",var.name,"_", my.period[period],"Atlantic Ridge.png"),width=1000,height=700) +layout(matrix(c(rep(1,9),2), 10, 1, byrow = TRUE)) +PlotEquiMap(varPeriodAnom3mean,lon,lat,filled.continents = FALSE, brks=my.brks, cols=my.cols, toptitle="Atlantic Ridge", drawleg=F) +ColorBar(my.brks, cols=my.cols, vert=FALSE, subsampleg=1, cex=1.2) +dev.off() + +png(filename=paste0(mapdir,"/",rean,"_",var.name,"_", my.period[period],"NAO+.png"),width=1000,height=700) +layout(matrix(c(rep(1,9),2), 10, 1, byrow = TRUE)) +PlotEquiMap(varPeriodAnom4mean,lon,lat,filled.continents = FALSE, brks=my.brks, cols=my.cols, toptitle="NAO+", drawleg=F) +ColorBar(my.brks, cols=my.cols, vert=FALSE, subsampleg=1, cex=1.2) +dev.off() + + +# save as .pdf instead of .png: +pdf(file=paste0(mapdir,"/",rean,"_",var.name,"_", my.period[period],"NAO-.pdf"),width=1000,height=700) +layout(matrix(c(rep(1,9),2), 10, 1, byrow = TRUE)) +PlotEquiMap(varPeriodAnom1mean,lon,lat,filled.continents = FALSE, brks=my.brks, cols=my.cols, toptitle="NAO-",drawleg=F) +ColorBar(my.brks, cols=my.cols, vert=FALSE, subsampleg=1, cex=1.2) +dev.off() + +pdf(file=paste0(mapdir,"/",rean,"_",var.name,"_", my.period[period],"Blocking.pdf"), width=1000, height=700) +layout(matrix(c(rep(1,9),2), 10, 1, byrow = TRUE)) +PlotEquiMap(varPeriodAnom2mean,lon,lat,filled.continents = FALSE, brks=my.brks, cols=my.cols, toptitle="Blocking", drawleg=F) +ColorBar(my.brks, cols=my.cols, vert=FALSE, subsampleg=1, cex=1.2) +dev.off() + +pdf(file=paste0(mapdir,"/",rean,"_",var.name,"_", my.period[period],"Atlantic Ridge.pdf"),width=1000,height=700) +layout(matrix(c(rep(1,9),2), 10, 1, byrow = TRUE)) +PlotEquiMap(varPeriodAnom3mean,lon,lat,filled.continents = FALSE, brks=my.brks, cols=my.cols, toptitle="Atlantic Ridge", drawleg=F) +ColorBar(my.brks, cols=my.cols, vert=FALSE, subsampleg=1, cex=1.2) +dev.off() + +pdf(file=paste0(mapdir,"/",rean,"_",var.name,"_", my.period[period],"NAO+.pdf"),width=1000,height=700) +layout(matrix(c(rep(1,9),2), 10, 1, byrow = TRUE)) +PlotEquiMap(varPeriodAnom4mean,lon,lat,filled.continents = FALSE, brks=my.brks, cols=my.cols, toptitle="NAO+", drawleg=F) +ColorBar(my.brks, cols=my.cols, vert=FALSE, subsampleg=1, cex=1.2) +dev.off() + +rm(varPeriodeuFull,varPeriodeu,varPeriod,var) diff --git a/old/weather_regimes_v3.R~ b/old/weather_regimes_v3.R~ new file mode 100644 index 0000000000000000000000000000000000000000..41baa898f7a03c827eaf359c0fc59f27da4f7f8b --- /dev/null +++ b/old/weather_regimes_v3.R~ @@ -0,0 +1,187 @@ + +library(s2dverification) # for the function Load() +source('/home/Earth/ncortesi/Downloads/scripts/Rfunctions.R') # for the calendar functions +# reanalysis available: +ERAint <- list(path = '/esnas/reconstructions/ecmwf/eraint/$STORE_FREQ$_mean/$VAR_NAME$_f6h/$VAR_NAME$_$YEAR$$MONTH$.nc') +JRA55 <- list(path = '/esnas/reconstructions/jma/jra-55/$STORE_FREQ$_mean/$VAR_NAME$_f6h/$VAR_NAME$_$YEAR$$MONTH$.nc') + +workdir <- "/scratch/Earth/ncortesi/RESILIENCE/Regimes" # working dir with the input files with the WT classifications for each MSLP grid point +mapdir <- "/scratch/Earth/ncortesi/RESILIENCE/Regimes_maps" # output dir with seasonal and yearly maps + +year.start <- 1979 +year.end <- 2013 + +rean <- ERAint # choose one of the two above reanalysis + +var.name <- "tas" #"sfcWind" # name of the 'predictand' variable of the chosen reanalysis + +# Coordinates of the box enclosing the study region (the Northern Atlantic in this case): +lat.max <- 70 +lat.min <- 30 +lon.max <- 40 +lon.min <- 280 # put a positive number here because Z500 has only positive values of longitud! + +############################################################################################################################# + +# load only 1 day of z500 to detect the minimum and maximum lat and lon values to identify only the North Atlantic +domain <- Load(var = 'z500', exp = NULL, obs = list(rean), paste0(year.start,'0101'), storefreq = 'daily', leadtimemax = 1, output = 'lonlat', nprocs=1) +pos.lat.max <- which(abs(domain$lat-lat.max)==min(abs(domain$lat-lat.max))) +pos.lat.min <- which(abs(domain$lat-lat.min)==min(abs(domain$lat-lat.min))) +pos.lon.max <- which(abs(domain$lon-lon.max)==min(abs(domain$lon-lon.max))) +pos.lon.min <- which(abs(domain$lon-lon.min)==min(abs(domain$lon-lon.min))) +pos.lat <- if(pos.lat.min < pos.lat.max) {pos.lat.min:pos.lat.max} else { pos.lat.max:pos.lat.min} +pos.lon <- c(1:pos.lon.max,pos.lon.min:length(domain$lon)) # beware that it only consider the case where the study region cross the meridian of Greenwhich! +n.pos.lat <- length(pos.lat) +n.pos.lon <- length(pos.lon) + +z500euFull <-array(NA,c(n.days.in.a.yearly.period(year.start,year.end), n.pos.lat, n.pos.lon)) + +for (y in year.start:year.end){ + var <- Load(var = 'z500', exp = NULL, obs = list(rean), paste0(y,'0101'), storefreq = 'daily', leadtimemax = n.days.in.a.year(y), output = 'lonlat') + z500eu <- var$obs[,,,,pos.lat,pos.lon] + z500euFull[seq.days.in.a.future.year(year.start, y),,] <- z500eu + rm(z500eu) + gc() +} + +lat <- var$lat[pos.lat] +lon <- var$lon[pos.lon] + +period = 13 # (winter) + +days.period <- NA +for(y in year.start:year.end) days.period <- c(days.period, n.days.in.a.future.year(year.start, y) + pos.period(y,period)) +days.period <- days.period[-1] # remove the NA at the begin that was introduced only to be able to execture the above command +n.days.period <- length(days.period) + +z500 <- z500euFull[days.period,,] # select only days in the chosen period (i.e: winter) + +z500mat <- z500 + +dim(z500mat) <- c(head(dim(z500mat),1), n.pos.lat*172) # convert array in a matrix! + +my.seq <- seq(1,9976,9) + +z500cut <- z500mat[,my.seq] + +my.PCA <- princomp(z500cut,cor=FALSE) + +head(cumsum(my.PCA$sdev^2/sum(my.PCA$sdev^2)),20) # check how many PCAs to keep basing on the sum of their explained variance + +my.cluster <- kmeans(my.PCA$scores[,1:7], 4) # 4 is the number of clusters, 7 the number of EOFs which explains in our case the 79% of variance + +rm(z500euFull,z500eu,z500mat,var) +gc() + +# Load wind data: + +vareuFull <-array(NA,c(n.days.in.a.yearly.period(year.start,year.end),n.pos.lat,n.pos.lon)) + +for (y in year.start:year.end){ + var <- Load(var = var.name, exp = NULL, obs = list(rean), paste0(y,'0101'), storefreq = 'daily', leadtimemax = n.days.in.a.year(y), output = 'lonlat') + vareu <- var$obs[,,,,pos.lat,pos.lon] + vareuFull[seq.days.in.a.future.year(year.start, y),,] <- vareu +} + +save.image(file=paste0(workdir,"/weather_regimes_",rean,"_",var.name,".RData")) +load(file=paste0(workdir,"/weather_regimes_",rean,"_",var.name,".RData")) + +varPeriod <- vareuFull[days.period,,] + +varPeriodClim <- apply(varPeriod,c(2,3),mean,na.rm=T) +varPeriodClim2 <- InsertDim(varPeriodClim,1,2708) + +varPeriodAnom <- varPeriod - varPeriodClim2 + +wr1 <- which(my.cluster$cluster==1) +wr2 <- which(my.cluster$cluster==2) +wr3 <- which(my.cluster$cluster==3) +wr4 <- which(my.cluster$cluster==4) + +varPeriodAnom1 <- varPeriodAnom[wr1,,] +varPeriodAnom2 <- varPeriodAnom[wr2,,] +varPeriodAnom3 <- varPeriodAnom[wr3,,] +varPeriodAnom4 <- varPeriodAnom[wr4,,] + +varPeriodAnom1mean <- apply(varPeriodAnom1,c(2,3),mean,na.rm=T) +varPeriodAnom2mean <- apply(varPeriodAnom2,c(2,3),mean,na.rm=T) +varPeriodAnom3mean <- apply(varPeriodAnom3,c(2,3),mean,na.rm=T) +varPeriodAnom4mean <- apply(varPeriodAnom4,c(2,3),mean,na.rm=T) + +z500wr1 <- z500[wr1,,] +z500wr2 <- z500[wr2,,] +z500wr3 <- z500[wr3,,] +z500wr4 <- z500[wr4,,] + +z500wr1mean <- apply(z500wr1,c(2,3),mean,na.rm=T) +z500wr2mean <- apply(z500wr2,c(2,3),mean,na.rm=T) +z500wr3mean <- apply(z500wr3,c(2,3),mean,na.rm=T) +z500wr4mean <- apply(z500wr4,c(2,3),mean,na.rm=T) + +# Mean z500 maps: + +my.brks <- c(seq(48800,57100,1)) # % Mean anomaly of a WR +my.cols <- colorRampPalette(as.character(read.csv("/home/Earth/vtorralb/rgbhex.csv",header=F)[,1]))(length(my.brks)-1) # blue--yellow-red colors + +PlotEquiMap(z500wr1mean,lon,lat,filled.continents = FALSE, brks=my.brks, cols=my.cols) # NAO- +PlotEquiMap(z500wr2mean,lon,lat,filled.continents = FALSE, brks=my.brks, cols=my.cols) # blocking +PlotEquiMap(z500wr3mean,lon,lat,filled.continents = FALSE, brks=my.brks, cols=my.cols) # atlantic ridge +PlotEquiMap(z500wr4mean,lon,lat,filled.continents = FALSE, brks=my.brks, cols=my.cols) # NAO+ + + +# Average wind anomalies: + +my.brks <- c(seq(-10,-3,1),seq(-2.9,2.9,0.1),seq(3,10,1)) # % Mean anomaly of a WR +my.cols <- colorRampPalette(as.character(read.csv("/home/Earth/vtorralb/rgbhex.csv",header=F)[,1]))(length(my.brks)-1) # blue--yellow-red colors + +png(filename=paste0(mapdir,"/",rean,"_",var.name,"_",my.period[period],"NAO-.png"),width=1000,height=700) +layout(matrix(c(rep(1,9),2), 10, 1, byrow = TRUE)) +PlotEquiMap(varPeriodAnom1mean,lon,lat,filled.continents = FALSE, brks=my.brks, cols=my.cols, toptitle="NAO-",drawleg=F) +ColorBar(my.brks, cols=my.cols, vert=FALSE, subsampleg=1, cex=1.2) +dev.off() + +png(filename=paste0(mapdir,"/",rean,"_",var.name,"_", my.period[period],"Blocking.png"), width=1000, height=700) +layout(matrix(c(rep(1,9),2), 10, 1, byrow = TRUE)) +PlotEquiMap(varPeriodAnom2mean,lon,lat,filled.continents = FALSE, brks=my.brks, cols=my.cols, toptitle="Blocking", drawleg=F) +ColorBar(my.brks, cols=my.cols, vert=FALSE, subsampleg=1, cex=1.2) +dev.off() + +png(filename=paste0(mapdir,"/",rean,"_",var.name,"_", my.period[period],"Atlantic Ridge.png"),width=1000,height=700) +layout(matrix(c(rep(1,9),2), 10, 1, byrow = TRUE)) +PlotEquiMap(varPeriodAnom3mean,lon,lat,filled.continents = FALSE, brks=my.brks, cols=my.cols, toptitle="Atlantic Ridge", drawleg=F) +ColorBar(my.brks, cols=my.cols, vert=FALSE, subsampleg=1, cex=1.2) +dev.off() + +png(filename=paste0(mapdir,"/",rean,"_",var.name,"_", my.period[period],"NAO+.png"),width=1000,height=700) +layout(matrix(c(rep(1,9),2), 10, 1, byrow = TRUE)) +PlotEquiMap(varPeriodAnom4mean,lon,lat,filled.continents = FALSE, brks=my.brks, cols=my.cols, toptitle="NAO+", drawleg=F) +ColorBar(my.brks, cols=my.cols, vert=FALSE, subsampleg=1, cex=1.2) +dev.off() + + +# save as .pdf instead of .png: +pdf(file=paste0(mapdir,"/",rean,"_",var.name,"_", my.period[period],"NAO-.pdf"),width=1000,height=700) +layout(matrix(c(rep(1,9),2), 10, 1, byrow = TRUE)) +PlotEquiMap(varPeriodAnom1mean,lon,lat,filled.continents = FALSE, brks=my.brks, cols=my.cols, toptitle="NAO-",drawleg=F) +ColorBar(my.brks, cols=my.cols, vert=FALSE, subsampleg=1, cex=1.2) +dev.off() + +pdf(file=paste0(mapdir,"/",rean,"_",var.name,"_", my.period[period],"Blocking.pdf"), width=1000, height=700) +layout(matrix(c(rep(1,9),2), 10, 1, byrow = TRUE)) +PlotEquiMap(varPeriodAnom2mean,lon,lat,filled.continents = FALSE, brks=my.brks, cols=my.cols, toptitle="Blocking", drawleg=F) +ColorBar(my.brks, cols=my.cols, vert=FALSE, subsampleg=1, cex=1.2) +dev.off() + +pdf(file=paste0(mapdir,"/",rean,"_",var.name,"_", my.period[period],"Atlantic Ridge.pdf"),width=1000,height=700) +layout(matrix(c(rep(1,9),2), 10, 1, byrow = TRUE)) +PlotEquiMap(varPeriodAnom3mean,lon,lat,filled.continents = FALSE, brks=my.brks, cols=my.cols, toptitle="Atlantic Ridge", drawleg=F) +ColorBar(my.brks, cols=my.cols, vert=FALSE, subsampleg=1, cex=1.2) +dev.off() + +pdf(file=paste0(mapdir,"/",rean,"_",var.name,"_", my.period[period],"NAO+.pdf"),width=1000,height=700) +layout(matrix(c(rep(1,9),2), 10, 1, byrow = TRUE)) +PlotEquiMap(varPeriodAnom4mean,lon,lat,filled.continents = FALSE, brks=my.brks, cols=my.cols, toptitle="NAO+", drawleg=F) +ColorBar(my.brks, cols=my.cols, vert=FALSE, subsampleg=1, cex=1.2) +dev.off() + +rm(varPeriodeuFull,varPeriodeu,varPeriod,var) diff --git a/old/weather_regimes_v30.R b/old/weather_regimes_v30.R new file mode 100644 index 0000000000000000000000000000000000000000..763f459ada5f2a9530f12e693cf18c8837cb8300 --- /dev/null +++ b/old/weather_regimes_v30.R @@ -0,0 +1,1140 @@ +library(s2dverification) # for the function Load() +library(abind) +library(Kendall) +library(reshape2) +library(TTR) +source('/home/Earth/ncortesi/Downloads/scripts/Rfunctions.R') # for the calendar functions +workdir <- "/scratch/Earth/ncortesi/RESILIENCE/Regimes" # working dir with the input files with the WT classifications for each MSLP grid point + +# available reanalysis for the geopotential (g500) or the geopotential height (z500 = g500/9.81) and for var data: +ERAint <- '/esnas/reconstructions/ecmwf/eraint/$STORE_FREQ$_mean/$VAR_NAME$_f6h/$VAR_NAME$_$YEAR$$MONTH$.nc' +ERAint.name <- "ERA-Interim" + +JRA55 <- '/esnas/reconstructions/jma/jra-55/$STORE_FREQ$_mean/$VAR_NAME$_f6h/$VAR_NAME$_$YEAR$$MONTH$.nc' +JRA55.name <- "JRA-55" + +rean <- ERAint #JRA55 #ERAint # choose one of the two above reanalysis from where to load the input psl data + +# available forecast systems for the psl and var data: +#ECMWF_S4 <- list(path = paste0('/esnas/exp/ECMWF/seasonal/0001/s004/m001/$STORE_FREQ$_mean/$VAR_NAME$_f6h/$VAR_NAME$_$START_DATE$.nc')) +#ECMWF_S4 <- '/esnas/exp/ECMWF/seasonal/0001/s004/m001/$STORE_FREQ$_mean/psl_f6h/psl_$START_DATE$.nc' +ECMWF_S4 <- '/esnas/exp/ecmwf/system4_m1/$STORE_FREQ$_mean/$VAR_NAME$_f6h/$VAR_NAME$_$START_DATE$.nc' +ECMWF_S4.name <- "ECMWF-S4" + +ECMWF_monthly <- '/esnas/exp/ECMWF/monthly/ensforhc/6hourly/$VAR_NAME$/2014$MONTH$$DAY$00/$VAR_NAME$_$START_DATE$00.nc' +ECMWF_monthly.name <- "ECMWF-Monthly" + +forecast <- ECMWF_S4 + +fields <- forecast #rean #forecast # put 'rean' to load pressure fields and var from reanalisis, put 'forecast' to load them from forecast systems + +psl <- "psl" #"g500" # pressure variable to use from the chosen reanalysis +psl.name <- "MSLP" # "Z500" # the name to show in the maps + +year.start <- 1982 #1981 #1994 #1979 #1981 +year.end <- 2013 #2013 #2010 + +member.name <- "ensemble" # name of the dimension with the ensemble members inside the netCDF files of S4 + +res <- 0.75 # set the resolution you want to interpolate the psl and var data when loading it (i.e: set to 0.75 to use the same res of ERA-Interim) + # interpolation method is BILINEAR. +PCA <- FALSE # set it to TRUE if you want to apply the PCA before the k-means cluster analysis, FALSE otherwise +variance.explained <- 0.8 # the user can set here the minimum % of variance explained by the PCs (typically 0.8 which is equal to 80%) + +lat.weighting=FALSE # set it to true to weight psl data on latitude before applying the cluster analysis +sequences=FALSE # set it to TRUE if you want to select only days beloning to sequences of at least 5 consecutive days belonging to the same regime. + +# Coordinates of the box enclosing the study region (the Northern Atlantic in this case): +lat.max <- 81 #81 #80 #70 +lat.min <- 27 #30 #20 #30 +lon.max <- 45 #36 #30 #40 +lon.min <- 274.5 #274.5 #270 #280 # put a positive number here because the geopotential has only positive values of longitud! + +var.num <- 1 # Choose a variable. 1: sfcWind 2: tas + +var.name <- c("sfcWind","tas") # name of the 'predictand' variable of the chosen reanalysis +var.name.full <- c("Wind Speed","Temperature") # full name of the 'predictand' variable to put in the title of the graphs +var.unit <- c("m/s", "ºC") # unit of measure of var (for drawing color scales) + +# Only for Reanalysis:: +WR.period <- 1:12 #13:16 # 1-12: Jan-Dec; 13-16: Winter-Spring-Summer-Autumn [bypassed by the optional argument of the script] + +# only for Seasonal forecasts: +n.members <- 15 # number of members of the forecast system to load +n.leadtimes <- 216 # number of leadtimes of the forecast system to load +startM <- 1 # start month (1=January, 12=December) [bypassed by the optional arguments of the script] +leadM <- 0 # select only the data of one lead month: [bypassed by the optional arguments of the script] + +# Only for Monthly forecasts: +#leadtime <- 1:7 #5:11 #1 # if fields=forecast, set the forecast time (in days) you want to compute the weather regimes. +#startdate.shift <- 1 # if leadtime=5:11, it's better to shift all startdates of the season 1 week in the past, to fit the exact season of obs.data + # if leadtime=12:18, it's better to shift all startdates of the season 2 weeks in the past, and so on. +#forecast.year <- year.end+1 # if fields=forecast, set the forecast year (it is usually the year after year.end) +#mes=1 # if fields=forecast, set the month of the first startdate of the forecast year +#day=2 # if fields=forecast, set the day of the first startdate of the forecast year + +visualize <- 0 # if 0, it only save the data to plot in the graphs and the frequency time series. + # if 1, it only visualize the graphs loading the saved data + # if 2, it save the data and plot the graphs at the same time. + +############################################################################################################################# +#ECMWF_monthly <- list(path = paste0('/esnas/exp/ECMWF/monthly/ensforhc/6hourly/psl/2014010200/1994_010200.nc')) +#ECMWF_monthly <- list(path = paste0('/esnas/exp/ECMWF/monthly/ensforhc/6hourly/$VAR_NAME$/2014$MONTH$$DAY$00/$VAR_NAME$_$START_DATE$00.nc')) +rean.name <- unname(ifelse(rean == ERAint, ERAint.name, JRA55.name)) +forecast.name <- unname(ifelse(forecast == ECMWF_S4, ECMWF_S4.name, ECMWF_monthly.name)) +fields.name <- unname(ifelse(fields == rean, rean.name, forecast.name)) +if(fields.name == forecast.name) WR.period = 1 +n.years <- year.end - year.start + 1 + +# in case the script is run with two arguments, they are assigned to the two below variables: +script.arg <- as.integer(commandArgs(TRUE)) + +if(length(script.arg) == 2){ + start.month <- script.arg[1] + lead.month <- script.arg[2] + WR.period <- start.month +} else { + start.month <- startM + lead.month <- leadM + WR.period <- start.month +} + +# in case the script is run with 1 argument, it is assumed you are using Reanalysis: +if(length(script.arg) == 1) WR.period <- script.arg[1] + +var.data <- fields # dataset from which to load the input var data (reanalysis or forecasts) +var.data.name <- fields.name #ECMWF_monthly.name # ERAint.name + +if(lat.min >= lat.max) stop("lat.min cannot be greater than lat.max") + +days.period <- n.days.period <- period.length <- list() +for (pp in 1:17){ + days.period[[pp]] <- NA + for(y in year.start:year.end) days.period[[pp]] <- c(days.period[[pp]], n.days.in.a.future.year(year.start, y) + pos.period(y,pp)) + days.period[[pp]] <- days.period[[pp]][-1] # remove the NA at the begin that was introduced only to be able to execute the above command + # number of days belonging to that period from year.start to year.end: + n.days.period[[pp]] <- length(days.period[[pp]]) + # Number of days belonging to that period in a single year: + period.length[[pp]] <- n.days.in.a.period(pp,1999) #+ ifelse(period==13 | period==2, 1, 0) # using year 1999 introduce a small error in winter season length because of bisestile years +} + +# Create a regular lat/lon grid to interpolate the data: +n.lon.grid <- 360/res +n.lat.grid <- (180/res)+1 +my.grid <- paste0('r',n.lon.grid,'x',n.lat.grid) +#domain<-list() +#domain$lon <- seq(0,359.999999999,res) +#domain$lat <- c(rev(seq(0,90,res)),seq(res[1],-90,-res)) # Notice that the regular grid is always centered at lat=0 and lon=0! + +# load only 1 day of pressure data to detect the minimum and maximum lat and lon values which identify only the North Atlantic area: +if(fields.name == rean.name) domain <- Load(var = psl, exp = NULL, obs = list(list(path=fields)), '19990101', storefreq = 'daily', leadtimemax = 1, output = 'lonlat', nprocs=1) +# in the case of S4, we also interpolate it (notice that if the S4 grid has the same grid of the chosen grid (typically ERA-Interim), Load() leaves the S4 grid unchanged): +if(fields.name == ECMWF_S4.name) domain <- Load(var = "psl", exp = list(list(path=fields)), sdates=paste0(year.start,'0101'), storefreq = 'daily', dimnames=list(member=member.name), leadtimemax=1, nmember=1, output = 'lonlat') #, grid=my.grid, method='bilinear', nprocs=1) + +#if(fields.name == ECMWF_monthly.name) domain <- Load(var = psl, exp = NULL, obs = list(list(path=fields)), paste0(year.start,'0102'), storefreq = 'daily', leadtimemax = 1, output = 'lonlat', nprocs=1) + +n.lon <- length(domain$lon) +n.lat <- length(domain$lat) + +pos.lat.max <- which(lat.max - round(domain$lat,4) >= 0)[1] # select the first latitude of the domain below the maximum latitude +pos.lat.min <- tail(which(round(domain$lat,4) - lat.min >= 0),1) # select the last latitude of the domain over the minumum latitude +pos.lon.max <- tail(which(lon.max - round(domain$lon,4) >= 0),1) +pos.lon.min <- which(round(domain$lon,4) - lon.min >= 0)[1] + +pos.lat <- if(pos.lat.min < pos.lat.max) {pos.lat.min:pos.lat.max} else {pos.lat.max:pos.lat.min} +pos.lon <- c(1:pos.lon.max,pos.lon.min:length(domain$lon)) # beware that it only consider the case where the study area cross the meridian of Greenwhich! +n.pos.lat <- length(pos.lat) +n.pos.lon <- length(pos.lon) + +lat <- domain$lat[pos.lat] # lat values of chosen area only (it is smaller than the whole spatial domain loaded by the data) +lon <- domain$lon[pos.lon] # lon values of chosen area only + +#lat.min.area <- domain$lat[pos.lat.min] # min and max lat/lon of the chosen area only (same as lat.max, but its values correspond to actual values in the lat vector) +#lat.max.area <- domain$lat[pos.lat.max] +#lon.min.area <- domain$lon[pos.lon.min] +#lon.max.area <- domain$lon[pos.lon.max] + +#rm(domain) +if(visualize != 1){ + +# Load psl data: +if(fields.name == rean.name){ # Load daily psl data in the reanalysis case: + psleuFull <-array(NA,c(n.days.in.a.yearly.period(year.start,year.end), n.pos.lat, n.pos.lon)) + + for (y in year.start:year.end){ + var <- Load(var = psl, exp = NULL, obs = list(list(path=fields)), paste0(y,'0101'), storefreq = 'daily', leadtimemax = n.days.in.a.year(y), output = 'lonlat', + latmin = lat.min, latmax = lat.max, lonmin = lon.min, lonmax = lon.max) + psleuFull[seq.days.in.a.future.year(year.start, y),,] <- var$obs + rm(var) + gc() + } +} + +if(fields.name == ECMWF_S4.name) # in this case, we can load only the data for 1 month at time (since each month needs ~3 GB of memory) + if(start.month <= 9) {start.month.char <- paste0("0",as.character(start.month))} else {start.month.char <- start.month} + + psleuFull <- Load(var = psl, exp = list(list(path=fields)), obs = NULL, sdates=paste0(year.start:year.end, start.month.char,'01'), dimnames=list(member=member.name), nmember=n.members, leadtimemax=n.leadtimes, storefreq='daily', output = 'lonlat', latmin = lat.min, latmax = lat.max, lonmin = lon.min, lonmax = lon.max, nprocs=1)$mod #, grid=my.grid, method='bilinear') + +} + +if(fields.name == ECMWF_monthly.name){ + # Load 6-hourly psl data in the forecast case: + psleuFull <-array(NA, c(n.sdates*n.years, n.leadtimes, n.pos.lat, n.pos.lon)) # daily order: 19940102 19950102 ... 20130102 19940109 19950109 ... 20130109 ... + n.leadtimes <- length(leadtime) + sdates <- weekly.seq(forecast.year,mes,day) + n.sdates <- length(sdates) + + for (startdate in 1:n.sdates){ + for(lday in leadtime){ + var <- Load(var = psl, exp = NULL, obs = list(list(path=fields)), paste0(year.start:year.end, substr(sdates[startdate],5,6), substr(sdates[startdate],7,8) ), storefreq = 'daily', leadtimemin = lday*4-3, leadtimemax = lday*4, output = 'lonlat', latmin = lat.min, latmax = lat.max, lonmin = lon.min, lonmax = lon.max) + + mean.lday <- apply(var$obs, c(3,5,6), mean, na.rm=T) + rm(var) + gc() + + psleuFull[(1+(startdate-1)*n.years):(startdate*n.years), lday-leadtime[1]+1,,] <- mean.lday + rm(mean.lday) + gc() + } + } +} + +if(psl=="g500") psleuFull <- psleuFull/9.81 # convert geopotential to geopotential height (in m) +if(psl=="psl") psleuFull <- psleuFull/100 # convert MSLP in Pascal to MSLP in hPa +gc() + +#save.image(file=paste0(workdir,"/weather_regimes_",fields.name,"_",year.start,"-",year.end,".RData")) +#load(file=paste0(workdir,"/weather_regimes_",fields.name,"_",year.start,"-",year.end,".RData")) + +# compute the cluster analysis: +my.PCA <- list() +my.cluster <- list() +my.cluster.array <- list() +tot.variance <- list() +n.pcs <- my.cluster <- c() + +#p=1 # for the debug +for(p in WR.period){ + # Select only the data of the month/season we want: + if(fields.name == rean.name){ + pslPeriod <- psleuFull[days.period[[p]],,] # select only days in the chosen period (i.e: winter) + + # weight the pressure fields based on latitude (apply it to anomalies, not to absolute values!): + if(lat.weighting){ + lat.weighted <- cos(lat*pi/180)^0.5 + lat.weighted.array <- InsertDim(InsertDim(lat.weighted,2,n.pos.lon), 1, n.days.period[[p]]) + psl.kmeans <- pslPeriod * lat.weighted.array + rm(lat.weighted.array) + gc() + } + + # select period data from psleuFull to use it as input of the cluster analysis: + psl.kmeans <- pslPeriod + dim(psl.kmeans) <- c(n.days.period[[p]], n.pos.lat*n.pos.lon) # convert array in a matrix! + rm(pslPeriod, pslPeriod.weighted) + } + + # in this case of S4 we don't need to select anything because we already loaded only 1 month of data! + if(fields.name == ECMWF_S4.name){ + + ## climatology plots: + + ## load eraint data: + ## ERAint <- '/esnas/reconstructions/ecmwf/eraint/$STORE_FREQ$_mean/$VAR_NAME$_f6h/$VAR_NAME$_$YEAR$$MONTH$.nc' + ## my.years <- year.start:year.end + ## var <- Load(var = "sfcWind", exp = NULL, obs = list(list(path=ERAint)), paste0(my.years,'0101'), storefreq = 'daily', leadtimemax = 216, output = 'lonlat',latmin = lat.min, latmax = lat.max, lonmin = lon.min, lonmax = lon.max) # -273.16 for tas # or /100 for psl + + ## # check S4 lon and lat: + ## #varS4 <- Load(var = psl, exp = list(list(path=fields)), obs = NULL, sdates=paste0(year.start, start.month.char,'01'), dimnames=list(member=member.name), nmember=n.members, leadtimemax=1, storefreq='daily', output = 'lonlat', latmin = lat.min, latmax = lat.max, lonmin = lon.min, lonmax = lon.max) #, grid=my.grid, method='bilinear') + + ## # draw climatologies: + ## drift <- apply(psleuFull[,,,,1,1,drop=F], c(1,2,4), mean, na.rm=T) + ## #matplot(t(drift[1,,]),type="l", col=1:15, lty=2, pch=19, cex=.5) + ## #matplot(t(drift[1,,1:31]),type="b", col=1:15, lty=1, pch=19, cex=.5) # zoom over the first leadtime + + ## matplot(t(drift[1,,]),type="l", col="gray60", lty=3, pch=19, cex=.1) #, ylim=c(-25,5)) + + ## true.climate <- apply(psleuFull[,,,,1,1,drop=F], c(1,4), mean, na.rm=T) + ## lines(true.climate[1,],type="l", col="red", lty=1, pch=19, lwd=2) + + ## true.climate.5d <- stats::filter(true.climate[1,], rep(1/5,5), sides=2) + ## lines(true.climate.5d,type="l", col="orange", lty=1, pch=19, lwd=2) + + ## s4.data <- data.frame(s4=true.climate[1,], day=1:216) + ## s4.loess <- loess(s4 ~ day, s4.data, span=0.50) + ## s4.pred <- predict(s4.loess) + ## lines(s4.pred,type="l", col="purple", lty=1, pch=19, lwd=2) + + ## col.monthly <- "brown" + ## true.climate1 <- apply(psleuFull[,,,1:31,1,1,drop=F], 1, mean, na.rm=T) + ## lines(rep(true.climate1,31),type="l", col=col.monthly, lty=1, pch=19, lwd=2) + ## true.climate2 <- apply(psleuFull[,,,32:59,1,1,drop=F], 1, mean, na.rm=T) + ## lines(c(rep(NA,31),rep(true.climate2,28)),type="l", col=col.monthly, lty=1, pch=19, lwd=2) + ## true.climate3 <- apply(psleuFull[,,,60:90,1,1,drop=F], 1, mean, na.rm=T) + ## lines(c(rep(NA,59),rep(true.climate3,31)),type="l", col=col.monthly, lty=1, pch=19, lwd=2) + ## true.climate4 <- apply(psleuFull[,,,91:120,1,1,drop=F], 1, mean, na.rm=T) + ## lines(c(rep(NA,90),rep(true.climate4,30)),type="l", col=col.monthly, lty=1, pch=19, lwd=2) + ## true.climate5 <- apply(psleuFull[,,,121:151,1,1,drop=F], 1, mean, na.rm=T) + ## lines(c(rep(NA,120),rep(true.climate5,31)),type="l", col=col.monthly, lty=1, pch=19, lwd=2) + ## true.climate6 <- apply(psleuFull[,,,151:180,1,1,drop=F], 1, mean, na.rm=T) + ## lines(c(rep(NA,150),rep(true.climate6,30)),type="l", col=col.monthly, lty=1, pch=19, lwd=2) + ## true.climate7 <- apply(psleuFull[,,,181:211,1,1,drop=F], 1, mean, na.rm=T) + ## lines(c(rep(NA,180),rep(true.climate7,31)),type="l", col=col.monthly, lty=1, pch=19, lwd=2) + + + ## era <- apply(var$obs[,,,,1,1,drop=F], c(1,2,4), mean, na.rm=T) + ## plot(era[1,1,1:216],type="l", col="black", lty=1, pch=19, lwd=2) + + ## era.5d <- stats::filter(era[1,1,1:216], rep(1/5,5), sides=2) + ## lines(era.5d,type="l", col="darkgreen", lty=1, pch=19, lwd=2) + + ## era.data <- data.frame(era=era[1,1,1:216], day=1:216) + ## era.loess <- loess(era ~ day, era.data, span=0.50) + ## era.pred <- predict(era.loess) + ## lines(era.pred,type="l", col="blue", lty=1, pch=19, lwd=2) + + ## era.data2 <- data.frame(era=era[1,1,1:216], day=1:216) + ## era.loess2 <- loess(era ~ day, era.data2, span=0.3) + ## era.pred2 <- predict(era.loess2) + ## lines(era.pred2,type="l", col="purple", lty=1, pch=19, lwd=2) + + ## era.data3 <- data.frame(era=era[1,1,1:216], day=1:216) + ## era.loess3 <- loess(era ~ day, era.data3, span=0.35) + ## era.pred3 <- predict(era.loess3) + + ## col.monthly.erai <- "turquoise3" + ## era1 <- apply(var$obs[,,,1:31,1,1,drop=F], c(1,2), mean, na.rm=T) + ## lines(rep(era1,31),type="l", col=col.monthly.erai, lty=1, pch=19, lwd=2) + ## era2 <- apply(var$obs[,,,32:59,1,1,drop=F], c(1,2), mean, na.rm=T) + ## lines(c(rep(NA,31),rep(era2,28)),type="l", col=col.monthly.erai, lty=1, pch=19, lwd=2) + ## era3 <- apply(var$obs[,,,60:90,1,1,drop=F], c(1,2), mean, na.rm=T) + ## lines(c(rep(NA,59),rep(era3,31)),type="l", col=col.monthly.erai, lty=1, pch=19, lwd=2) + ## era4 <- apply(var$obs[,,,91:120,1,1,drop=F], c(1,2), mean, na.rm=T) + ## lines(c(rep(NA,90),rep(era4,30)),type="l", col=col.monthly.erai, lty=1, pch=19, lwd=2) + ## era5 <- apply(var$obs[,,,121:151,1,1,drop=F], c(1,2), mean, na.rm=T) + ## lines(c(rep(NA,120),rep(era5,31)),type="l", col=col.monthly.erai, lty=1, pch=19, lwd=2) + ## era6 <- apply(var$obs[,,,151:180,1,1,drop=F], c(1,2), mean, na.rm=T) + ## lines(c(rep(NA,150),rep(era6,30)),type="l", col=col.monthly.erai, lty=1, pch=19, lwd=2) + ## era7 <- apply(var$obs[,,,181:211,1,1,drop=F], c(1,2), mean, na.rm=T) + ## lines(c(rep(NA,180),rep(era7,30)),type="l", col=col.monthly.erai, lty=1, pch=19, lwd=2) + + + ## # anomalias: + ## point.type <- "l" + ## true.climate.anom <- psleuFull[1,1,pos.last.year,,1,1] - true.climate[1,] + ## plot(true.climate.anom,type=point.type, col="red", lty=1, pch=19, lwd=2, cex=.3) + ## lines(rep(0,216)) + + ## true.climate.5d.anom <- psleuFull[1,1,pos.last.year,,1,1] - true.climate.5d + ## lines(true.climate.5d.anom,type=point.type, col="orange", lty=1, pch=19, lwd=2, cex=.3) + + ## s4.pred.anom <- psleuFull[1,1,pos.last.year,,1,1] - s4.pred + ## lines(s4.pred.anom,type=point.type, col="purple", lty=1, pch=19, lwd=2, cex=.3) + + ## s4.monthly.clim <- c(rep(true.climate1,31),rep(true.climate2,28),rep(true.climate3,31),rep(true.climate4,30),rep(true.climate5,31),rep(true.climate6,30),rep(true.climate7,31), rep(NA,4)) + ## s4.monthly.anom <- psleuFull[1,1,pos.last.year,,1,1] - s4.monthly.clim + ## lines(s4.monthly.anom,type=point.type, col="brown", lty=1, pch=19, lwd=2, cex=.3) + + + ## pos.last.year <- dim(var$obs)[3] + ## era.anom <- var$obs[1,1,pos.last.year,1:216,1,1] - era[1,1,1:216] + ## plot(era.anom,type=point.type, col="black", lty=1, pch=19, lwd=2) + ## lines(rep(0,216)) + + ## era.anom.5d <- var$obs[1,1,pos.last.year,1:216,1,1] - era.5d[1:216] + ## lines(era.anom.5d,type=point.type, col="darkgreen", lty=1, pch=19, lwd=2) + + ## era.anom.pred <- var$obs[1,1,pos.last.year,1:216,1,1] - era.pred[1:216] + ## lines(era.anom.pred,type=point.type, col="blue", lty=1, pch=19, lwd=2) + + ## era.anom.pred2 <- var$obs[1,1,pos.last.year,1:216,1,1] - era.pred2[1:216] + ## era.anom.pred3 <- var$obs[1,1,pos.last.year,1:216,1,1] - era.pred3[1:216] + + ## era.monthly.clim <- c(rep(era1,31),rep(era2,28),rep(era3,31),rep(era4,30),rep(era5,31),rep(era6,30),rep(era7,31), rep(NA,4)) + ## era.anom.monthly <- var$obs[1,1,pos.last.year,1:216,1,1] - era.monthly.clim + ## lines(era.anom.monthly,type=point.type, col="turquoise3", lty=1, pch=19, lwd=2) + + ## # Nube plot: + ## plot(era.anom.pred,type="l", col="blue", lty=1, pch=19, lwd=2) + ## grid(nx=10,lwd=2) + ## lines(era.anom.pred2,type="l", col="purple", lty=1, pch=19, lwd=2) + ## lines(era.anom.pred3,type="l", col="turquoise3", lty=1, pch=19, lwd=2) + ## lines(era.anom.5d,type="l", col="darkgreen", lty=1, pch=19, lwd=2) + + ## lines(era.5d,type="l", col="darkgreen", lty=1, pch=19, lwd=2) + ## lines(era.pred,type="l", col="blue", lty=1, pch=19, lwd=2) + ## lines(rep(0,216)) + + ## lines(era.pred2,type="l", col="purple", lty=1, pch=19, lwd=2) + ## lines(era.pred3,type="l", col="", lty=1, pch=19, lwd=2) + + + ## # fit alfa parameter for each grid point: + ## i=1 + ## j=1 + ## era <- apply(var$obs[,,,,i,j,drop=F], c(1,2,4), mean, na.rm=T) + ## era.data <- data.frame(era=era[1,1,1:216], day=1:216) + ## k=0; error <- c() + ## for (alfa in seq(0.25,0.50,0.01)){ + ## k=k+1 + ## era.loess <- loess(era ~ day, era.data, span=alfa) + ## era.pred <- predict(era.loess) + ## error[k] <- mean(abs(era.pred - era[1,1,1:216])) + ## } + + # convert psl in daily anomalies with the LOESS filter: + pslPeriodClim <- apply(psleuFull, c(1,4,5,6), mean, na.rm=T) + pslPeriodClimLoess <- pslPeriodClim + for(i in n.pos.lat){ + for(j in n.pos.lon){ + my.data <- data.frame(ens.mean=pslPeriodClim[1,,i,j], day=1:n.leadtimes) + my.loess <- loess(ens.mean ~ day, my.data, span=0.35) + pslPeriodClimLoess[1,,i,j] <- predict(my.loess) + } + } + + pslPeriodClim2 <- InsertDim(InsertDim(pslPeriodClimLoess, 2, n.years), 2, n.members) + + ## s4.data <- data.frame(s4=pslPeriodClim[1,], day=1:216) + ## s4.loess <- loess(s4 ~ day, s4.data, span=0.35) + ## s4.pred <- predict(s4.loess) + rm(pslPeriodClim) + gc() + + psleuFull <- psleuFull - pslPeriodClim2 + rm(pslPeriodClim2) + gc() + + ## # convert psl in daily anomalies computed with the 10-days leadtime window including the 9 days AFTER each day: + ## n.leadtimes <- dim(psleuFull)[4] + ## pslPeriodClim10 <- array(NA, c(1, n.members, n.years, n.leadtimes-9,n.pos.lat,n.pos.lon)) + ## for(year in 1:n.years){ + ## for(lead in 1:(n.leadtimes-9)){ + ## pslPeriodClim10[1,,year,lead,,] <- (psleuFull[1,,year,lead,,] + psleuFull[1,,year,lead+1,,] + psleuFull[1,,year,lead+2,,] + psleuFull[1,,year,lead+3,,] + psleuFull[1,,year,lead+4,,] + psleuFull[1,,year,lead+5,,] + psleuFull[1,,year,lead+6,,] + psleuFull[1,,year,lead+7,,] + psleuFull[1,,year,lead+8,,] + psleuFull[1,,year,lead+9,,])/10 + ## } + ## } + + ## pslPeriodClim10mean <- apply(pslPeriodClim10, c(1,4,5,6), mean, na.rm=T) + ## pslPeriodClim10mean2 <- InsertDim(InsertDim(pslPeriodClim10mean,2,n.years),2,n.members) + + ## psleuFull10 <- psleuFull[1,,,1:(n.leadtimes-9),,,drop=F] - pslPeriodClim10mean2 + + ## psleuFull <- psleuFull10 + ## rm(pslPeriodClim2, psleuFull10, pslPeriodClim10, pslPeriodClim10mean2, psleuFull10mean, pslPeriodClim10mean) + ## gc() + + chosen.month <- start.month + lead.month # find which month we want to select data + if(chosen.month > 12) chosen.month <- chosen.month - 12 + + for(y in year.start:year.end){ + leadtime.min <- 1 + n.days.in.a.monthly.period(start.month, chosen.month, y) - n.days.in.a.month(chosen.month, y) + leadtime.max <- leadtime.min + n.days.in.a.month(chosen.month, y) - 1 + if (n.days.in.a.month(chosen.month, y) == 29) leadtime.max <- leadtime.max - 1 # remove the 29th of February to have the same n. of elements for all years + int.leadtimes <- leadtime.min:leadtime.max + n.leadtimes <- leadtime.max - leadtime.min + 1 + + if(y == year.start) { + pslPeriod <- psleuFull[,,1,int.leadtimes,,,drop=FALSE] + } else { + pslPeriod <- unname(abind(pslPeriod, psleuFull[,,y-year.start+1,int.leadtimes,,,drop=FALSE], along=3)) + } + } + + + ## # if you didn't convert psl data in anomalies until now, you can convert psl data in monthly anomalies: + ## pslPeriodClim <- apply(pslPeriod, c(1,5,6), mean, na.rm=T) + ## pslPeriodClim2 <- InsertDim(InsertDim(InsertDim(pslPeriodClim,2,n.leadtimes), 2, n.years), 2, n.members) + + ## pslPeriod <- pslPeriod - pslPeriodClim2 + ## rm(pslPeriodClim, pslPeriodClim2) + ## gc() + + # note that the data order in psl.kmeans[,X] is: year1day1, year1day2, ... , year1day31, year2day1, year2day2, ... , year2day28 + psl.melted <- melt(pslPeriod[1,,,,,, drop=FALSE], varnames=c("Exp","Member","StartYear","LeadDay","Lat","Lon")) + psl.kmeans <- unname(acast(psl.melted, Member + StartYear + LeadDay ~ Lat + Lon)) + + #rm(pslPeriod) + gc() + } + + if(fields.name == ECMWF_monthly.name){ + my.startdates.period <- months.period(forecast.year,mes,day,p) # get which startdates belong to the chosen period + + days.period <- list() # get which days inside psleuFull belong to the chosen period + for(startdate in my.startdates.period){ + days.period[[p]] <- c(days.period[[p]], (1+(startdate-1)*n.years):(startdate*n.years)) + } + + # in winter you must remove the 2nd startdate (20140109) because its data is bad, as you can see with: plot(psl.kmeans[,1:2]) + # if(fields.name == forecast.name && period == 13) psl.kmeans[21:40,] <- NA + } + + + # compute the PCs or not: + if(PCA){ + my.seq <- seq(1, n.pos.lat*n.pos.lon, 9) # select only 1 point of 9 + pslcut <- psl.kmeans[,my.seq] + + my.PCA[[p]] <- princomp(pslcut,cor=FALSE) + tot.variance[[p]] <- head(cumsum(my.PCA[[p]]$sdev^2/sum(my.PCA[[p]]$sdev^2)),50) # check how many PCAs to keep basing on the sum of their expl. variance + n.pcs[p] <- head(as.numeric(which(tot.variance[[p]] > variance.explained)),1) # select only the pcs that explains at least 80% of variance + my.cluster[[p]] <- kmeans(my.PCA[[p]]$scores[,1:n.pcs[p]], centers=4, iter.max=100, nstart=30) # 4 is the number of clusters + rm(pslcut) + } else { # 4 is the number of clusters: + if(fields.name == rean.name) my.cluster[[p]] <- kmeans(psl.kmeans[which(!is.na(psl.kmeans[,1])),], centers=4, iter.max=100, nstart=30) + if(fields.name == ECMWF_S4.name) { + my.cluster[[p]] <- kmeans(psl.kmeans, centers=4, iter.max=100, nstart=30, trace=TRUE) + my.cluster.array[[p]] <- array(my.cluster[[p]]$cluster, c(n.leadtimes, n.years, n.members)) # in reverse order compared to the definition of psl.kmeans + + + #plot(SMA(my.cluster[[p]]$centers[1,],201),type="l",col="gold",ylim=c(-20,20));lines(SMA(my.cluster[[p]]$centers[2,],201),type="l",col="red"); lines(SMA(my.cluster[[p]]$centers[3,],201),type="l",col="green");lines(SMA(my.cluster[[p]]$centers[4,],201),type="l",col="blue");lines(SMA(psl.kmeans[29,],201),type="l",col="gray");lines(rep(0,14410),type="l",col="black",lty=2) + } + } + + rm(psl.kmeans) + gc() +} + + +#save.image(file=paste0(workdir,"/weather_regimes_",fields.name,"_",year.start,"-",year.end,".RData")) +#load(file=paste0(workdir,"/weather_regimes_",fields.name,"_",year.start,"-",year.end,".RData")) + +# Load var data: +if(fields.name == rean.name){ # Load daily var data in the reanalysis case: + vareuFull <-array(NA,c(n.days.in.a.yearly.period(year.start,year.end), n.pos.lat, n.pos.lon)) + + for (y in year.start:year.end){ + var <- Load(var = var.name[var.num], exp = NULL, obs = list(var.data), paste0(y,'0101'), storefreq = 'daily', leadtimemax = n.days.in.a.year(y), output = 'lonlat', + latmin = lat.min, latmax = lat.max, lonmin = lon.min, lonmax = lon.max) + vareuFull[seq.days.in.a.future.year(year.start, y),,] <- var$obs + rm(var) + gc() + } +} + +## if(fields.name == ECMWF_S4.name) { +## if(start.month <= 9) {start.month.char <- paste0("0",as.character(start.month))} else {start.month.char <- start.month} + +## my.years <- year.start:year.end +## vareuFull <- Load(var = var.name[var.num], exp = list(list(path=fields)), obs = NULL, sdates=paste0(my.years, start.month.char,'01'), dimnames=list(member=member.name), nmember=n.members, leadtimemax=216, storefreq='daily', output = 'lonlat', latmin = lat.min, latmax = lat.max, lonmin = lon.min, lonmax = lon.max)$mod #, grid=my.grid, method='bilinear') +## } + +if(fields.name == ECMWF_monthly.name){ # Load 6-hourly var data in the monthly forecast case: + sdates <- weekly.seq(forecast.year,mes,day) + vareuFull <-array(NA,c(length(sdates)*(year.end-year.start+1), n.pos.lat, n.pos.lon)) + + for (startdate in 1:length(sdates)){ + var <- Load(var = var.name[var.num], exp = NULL, obs = list(var.data), paste0(year.start:year.end, substr(sdates[startdate],5,6), substr(sdates[startdate],7,8) ), storefreq = 'daily', leadtimemin=leadtime*4-3, leadtimemax=leadtime*4, output = 'lonlat', latmin = lat.min, latmax = lat.max, lonmin = lon.min, lonmax = lon.max) + temp <- apply(var$obs, c(1,3,5,6), mean, na.rm=T) + vareuFull[(1+(startdate-1)*(year.end-year.start+1)):(startdate*(year.end-year.start+1)),,] <- temp + rm(temp,var) + gc() + } + +} + +#my.grid<-paste0('r',n.lon,'x',n.lat) +#coord <- Load(var = 'sfcWind', exp = list(ECMWF_monthly), obs = NULL, sdates=paste0(2013,'0102'), leadtimemin = 1, leadtimemax=1, output = 'lonlat', nprocs=1, grid=my.grid, method='bilinear') + +#save.image(file=paste0(workdir,"/weather_regimes_",fields.name,"_",var.name[var.num],"_",year.start,"-",year.end,".RData")) +#load(file=paste0(workdir,"/weather_regimes_",fields.name,"_",var.name[var.num],"_",year.start,"-",year.end,".RData")) + +for(p in WR.period){ # 1-12: Jan-Dec; 13-16: Winter-Spring-Summer-Autumn; 17: Year + + if(fields.name == rean.name){ + cluster.sequence <- my.cluster[[p]]$cluster + + if(sequences){ # it works only for rean data!!! + # for each of the 4 clusters, remove the days not belonging to sequences of at least 5 days: + # warning: first 4 days and last 4 days are not take into account y the algorithm and are treated as not belonging to any sequence (sequ=FALSE) + wrdiff <- diff(my.cluster[[p]]$cluster) + + sequ <- rep(FALSE, n.days.period[[p]]) + for(i in 5:(n.days.period[[p]]-5)){ + sequ[i] <- TRUE + if(wrdiff[i] != 0 & wrdiff[i+1] != 0) sequ[i] = FALSE + if(wrdiff[i] != 0 & wrdiff[i+2] != 0) sequ[i] = FALSE + if(wrdiff[i] != 0 & wrdiff[i+3] != 0) sequ[i] = FALSE + if(wrdiff[i] != 0 & wrdiff[i+4] != 0) sequ[i] = FALSE + if(wrdiff[i-1] != 0 & wrdiff[i] != 0) sequ[i] = FALSE + if(wrdiff[i-1] != 0 & wrdiff[i+1] != 0) sequ[i] = FALSE + if(wrdiff[i-1] != 0 & wrdiff[i+2] != 0) sequ[i] = FALSE + if(wrdiff[i-1] != 0 & wrdiff[i+3] != 0) sequ[i] = FALSE + if(wrdiff[i-2] != 0 & wrdiff[i] != 0) sequ[i] = FALSE + if(wrdiff[i-2] != 0 & wrdiff[i+1] != 0) sequ[i] = FALSE + if(wrdiff[i-2] != 0 & wrdiff[i+2] != 0) sequ[i] = FALSE + if(wrdiff[i-3] != 0 & wrdiff[i] != 0) sequ[i] = FALSE + if(wrdiff[i-3] != 0 & wrdiff[i+1] != 0) sequ[i] = FALSE + if(wrdiff[i-4] != 0 & wrdiff[i] != 0) sequ[i] = FALSE + } # close for loop on i + + # sequence of daily cluster numbers without the days that doesn't belong to sequences of 5 or more days: + cluster.sequence <- my.cluster[[p]]$cluster * sequ + rm(wrdiff, sequ) + } + + # Replace the days belonging to a regime with the days belonging to a sequence of at least 5 days of that regime: + wr1 <- which(cluster.sequence == 1) + wr2 <- which(cluster.sequence == 2) + wr3 <- which(cluster.sequence == 3) + wr4 <- which(cluster.sequence == 4) + + # yearly frequency of each regime: + wr1y <- wr2y <- wr3y <-wr4y <- c() + for(y in year.start:year.end){ + # for both reanalysis and S4, the time order is always: year1day1, year1day2, ..., year1day31, year2day1, year2day2, ..., year2day28, etc, + wr1y[y-year.start+1] <- length(which(cluster.sequence[(y-year.start)*period.length[[p]]+(1:period.length[[p]])] == 1)) + wr2y[y-year.start+1] <- length(which(cluster.sequence[(y-year.start)*period.length[[p]]+(1:period.length[[p]])] == 2)) + wr3y[y-year.start+1] <- length(which(cluster.sequence[(y-year.start)*period.length[[p]]+(1:period.length[[p]])] == 3)) + wr4y[y-year.start+1] <- length(which(cluster.sequence[(y-year.start)*period.length[[p]]+(1:period.length[[p]])] == 4)) + } + + # convert to frequencies in %: + wr1y <- wr1y/period.length[[p]] + wr2y <- wr2y/period.length[[p]] + wr3y <- wr3y/period.length[[p]] + wr4y <- wr4y/period.length[[p]] + + pslPeriod <- psleuFull[days.period[[p]],,] + pslPeriodClim <- apply(pslPeriod, c(2,3), mean, na.rm=T) + pslPeriodClim2 <- InsertDim(pslPeriodClim, 1, n.days.period[[p]]) + pslPeriodAnom <- pslPeriod - pslPeriodClim2 + + rm(pslPeriod, pslPeriodClim, pslPeriodClim2) + gc() + + pslwr1 <- pslPeriodAnom[wr1,,] + pslwr2 <- pslPeriodAnom[wr2,,] + pslwr3 <- pslPeriodAnom[wr3,,] + pslwr4 <- pslPeriodAnom[wr4,,] + + rm(pslPeriodAnom) + gc() + + pslwr1mean <- apply(pslwr1,c(2,3),mean,na.rm=T) + pslwr2mean <- apply(pslwr2,c(2,3),mean,na.rm=T) + pslwr3mean <- apply(pslwr3,c(2,3),mean,na.rm=T) + pslwr4mean <- apply(pslwr4,c(2,3),mean,na.rm=T) + rm(pslwr1,pslwr2,pslwr3,pslwr4) + + # in the reanalysis case, we also measure the impact maps: + + # similar selection of above, but for var instead of psl: + varPeriod <- vareuFull[days.period[[p]],,] # select only var data during the chosen period + + varPeriodClim <- apply(varPeriod, c(2,3), mean, na.rm=T) + varPeriodClim2 <- InsertDim(varPeriodClim, 1, n.days.period[[p]]) + varPeriodAnom <- varPeriod - varPeriodClim2 + rm(varPeriod, varPeriodClim, varPeriodClim2) + gc() + + #PlotEquiMap2(varPeriodClim[,EU], lon[EU], lat, filled.continents = FALSE, cols=my.cols.var, intxlon=10, intylat=10, cex.lab=1.5) # plot the climatology of the period + varPeriodAnom1 <- varPeriodAnom[wr1,,] + varPeriodAnom2 <- varPeriodAnom[wr2,,] + varPeriodAnom3 <- varPeriodAnom[wr3,,] + varPeriodAnom4 <- varPeriodAnom[wr4,,] + + varPeriodAnom1mean <- apply(varPeriodAnom1,c(2,3),mean,na.rm=T) + varPeriodAnom2mean <- apply(varPeriodAnom2,c(2,3),mean,na.rm=T) + varPeriodAnom3mean <- apply(varPeriodAnom3,c(2,3),mean,na.rm=T) + varPeriodAnom4mean <- apply(varPeriodAnom4,c(2,3),mean,na.rm=T) + + varPeriodAnomBoth1 <- abind(varPeriodAnom, varPeriodAnom1, along = 1) + pvalue1 <- apply(varPeriodAnomBoth1, c(2,3), function(x) t.test(x[1:n.days.period[[p]]],x[(n.days.period[[p]]+1):length(x)])$p.value) + rm(varPeriodAnomBoth1, varPeriodAnom1) + gc() + + varPeriodAnomBoth2 <- abind(varPeriodAnom, varPeriodAnom2, along = 1) + pvalue2 <- apply(varPeriodAnomBoth2, c(2,3), function(x) t.test(x[1:n.days.period[[p]]],x[(n.days.period[[p]]+1):length(x)])$p.value) + rm(varPeriodAnomBoth2, varPeriodAnom2) + gc() + + varPeriodAnomBoth3 <- abind(varPeriodAnom, varPeriodAnom3, along = 1) + pvalue3 <- apply(varPeriodAnomBoth3, c(2,3), function(x) t.test(x[1:n.days.period[[p]]],x[(n.days.period[[p]]+1):length(x)])$p.value) + rm(varPeriodAnomBoth3, varPeriodAnom3) + gc() + + varPeriodAnomBoth4 <- abind(varPeriodAnom, varPeriodAnom4, along = 1) + pvalue4 <- apply(varPeriodAnomBoth4, c(2,3), function(x) t.test(x[1:n.days.period[[p]]],x[(n.days.period[[p]]+1):length(x)])$p.value) + rm(varPeriodAnomBoth4, varPeriodAnom4) + + rm(varPeriodAnom) + gc() + + # save all the data necessary to redraw the graphs when we know the right regime: + save(workdir,rean.name,fields.name,var.num,var.name,var.name.full,var.unit,year.start,year.end,p,WR.period,lon,lat,pslwr1mean,pslwr2mean,pslwr3mean,pslwr4mean, varPeriodAnom1mean, varPeriodAnom2mean, varPeriodAnom3mean,varPeriodAnom4mean,wr1y,wr2y,wr3y,wr4y,pvalue1,pvalue2,pvalue3,pvalue4,cluster.sequence,file=paste0(workdir,"/",fields.name,"_",var.name[var.num],"_",my.period[p],"_mapdata.RData")) + + rm(pvalue1,pvalue2,pvalue3,pvalue4) + rm(pslwr1mean,pslwr2mean,pslwr3mean,pslwr4mean) + rm(varPeriodAnom1mean,varPeriodAnom2mean,varPeriodAnom3mean,varPeriodAnom4mean) + gc() + + } + + if(fields.name == ECMWF_S4.name){ + cluster.sequence <- my.cluster[[p]]$cluster #as.vector(my.cluster.array[[p]]) + + wr1 <- which(cluster.sequence == 1) + wr2 <- which(cluster.sequence == 2) + wr3 <- which(cluster.sequence == 3) + wr4 <- which(cluster.sequence == 4) + + wr1y <- apply(my.cluster.array[[p]] == 1, 2, sum) + wr2y <- apply(my.cluster.array[[p]] == 2, 2, sum) + wr3y <- apply(my.cluster.array[[p]] == 3, 2, sum) + wr4y <- apply(my.cluster.array[[p]] == 4, 2, sum) + + # convert to frequencies in %: + wr1y <- wr1y/(n.leadtimes*n.members) + wr2y <- wr2y/(n.leadtimes*n.members) + wr3y <- wr3y/(n.leadtimes*n.members) + wr4y <- wr4y/(n.leadtimes*n.members) + + # in this case, must use psl.melted instead of psleuFull. Both are already in anomalies: + pslmat <- unname(acast(psl.melted, Member + StartYear + LeadDay ~ Lat ~ Lon)) + #pslmat <- unname(acast(melt(pslPeriod[1,1:2,,,,, drop=FALSE],varnames=c("Exp","Member","StartYear","LeadDay","Lat","Lon")), Member ~ StartYear + LeadDay ~ Lat ~ Lon)) + + pslwr1 <- pslmat[wr1,,, drop=F] + pslwr2 <- pslmat[wr2,,, drop=F] + pslwr3 <- pslmat[wr3,,, drop=F] + pslwr4 <- pslmat[wr4,,, drop=F] + + pslwr1mean <- apply(pslwr1, c(2,3), mean, na.rm=T) + pslwr2mean <- apply(pslwr2, c(2,3), mean, na.rm=T) + pslwr3mean <- apply(pslwr3, c(2,3), mean, na.rm=T) + pslwr4mean <- apply(pslwr4, c(2,3), mean, na.rm=T) + + rm(pslwr1,pslwr2,pslwr3,pslwr4) + rm(pslmat) + gc() + + # save all the data necessary to draw the graphs (but not the impact maps) + save(lon, lon.max, psl, my.period, p, workdir,rean.name,fields.name,var.num,var.name,var.name.full,var.unit,year.start,year.end, p, WR.period, lon, lat, pslwr1mean, pslwr2mean,pslwr3mean,pslwr4mean,wr1y,wr2y,wr3y,wr4y,n.leadtimes,cluster.sequence,file=paste0(workdir,"/",fields.name,"_",var.name[var.num],"_",my.period[p],"_leadmonth_",lead.month,"_mapdata.RData")) + + rm(pslwr1mean,pslwr2mean,pslwr3mean,pslwr4mean) + gc() + + #pslwr1meanPartial <- apply(pslwr1, c(1,3,4), mean, na.rm=T) + #pslwr2meanPartial <- apply(pslwr2, c(1,3,4), mean, na.rm=T) + #pslwr3meanPartial <- apply(pslwr3, c(1,3,4), mean, na.rm=T) + #pslwr4meanPartial <- apply(pslwr4, c(1,3,4), mean, na.rm=T) + + #PlotEquiMap(rescale(pslwr1meanPartial[1,,],my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2) + #PlotEquiMap(rescale(pslwr1meanPartial[2,,],my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2) + + #n=0 + #for(sy in 1:32) { + # for(ld in 1:28){ + # if(my.cluster.array[[p]][ld,sy] == 1){ + # n=n+1 + # if(n == 1) {temp <- pslPeriod[1,2,sy,ld,,]} + # temp <- temp + pslPeriod[1,2,sy,ld,,] + # } + # } + #} + #PlotEquiMap(rescale(temp/n,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, drawleg=F) + + #pslwr1meanPartial <- apply(pslmat[,wr1,,,drop=F], c(1,3,4), mean, na.rm=T) + #pslwr2meanPartial <- apply(pslmat[,wr2,,,drop=F], c(1,3,4), mean, na.rm=T) + #pslwr3meanPartial <- apply(pslmat[,wr3,,,drop=F], c(1,3,4), mean, na.rm=T) + #pslwr4meanPartial <- apply(pslmat[,wr4,,,drop=F], c(1,3,4), mean, na.rm=T) + + } + + # for the monthly system, the time order is always: year1day1, year2day1, ... , yearNday1, year1day2, year2day2, ..., yearNday2, etc.: + if(fields.name == ECMWF_monthly.name) { + if(fields.name == ECMWF_monthly.name){ + my.startdates.period <- months.period(forecast.year,mes,day,p) + + #if(p == 13) my.startdates.period <- my.startdates.period[-2] # remove the second startdate because it is broken + + days.period <- period.length <- list() + for(startdate in my.startdates.period){ + days.period[[p]] <- c(days.period[[p]], (1+(startdate-1)*(year.end-year.start+1)):(startdate*(year.end-year.start+1))) + } + period.length[[p]] <- length(my.startdates.period) # number of Thusdays in the period + } + + wr1y <- wr2y <- wr3y <-wr4y <- c() + wr1y[y-year.start+1] <- length(which(cluster.sequence[seq((y-year.start+1),length(cluster.sequence), n.years)] == 1)) + wr2y[y-year.start+1] <- length(which(cluster.sequence[seq((y-year.start+1),length(cluster.sequence), n.years)] == 2)) + wr3y[y-year.start+1] <- length(which(cluster.sequence[seq((y-year.start+1),length(cluster.sequence), n.years)] == 3)) + wr4y[y-year.start+1] <- length(which(cluster.sequence[seq((y-year.start+1),length(cluster.sequence), n.years)] == 4)) + } + +} # close the for loop on 'p' + +} # close if on 'visualize' + +if(visualize != 0){ + +# run it twice: once, with ordering <- FALSE to look only at the cartography and find visually the right regime names of the four clusters; +# and the second time, to save the ordered cartography: +ordering <- T # If TRUE, order the clusters following the regimes listed in the 'orden' vector (after you manually insert the names). +as.pdf <- F # FALSE: save results as one .png file for each season, TRUE: save only 1 .pdf file with all seasons +save.names <- T # if TRUE, also save the cluster names (i.e: the association between clusters and weather regimes); if FALSE, the cluster names are loaded from the file + +# position of long values of Europe only (without the Atlantic Sea and America): +#if(fields.name == "ERA-Interim") EU <- c(1:which(lon >= lon.max)[1], (length(lon)-30):length(lon)) # restrict area to continental europe only +EU <- c(1:which(lon >= lon.max)[1], (which(lon >= 337)[1]:length(lon))) # restrict area to continental europe only + +# choose an order to give to the cartography, from top to bottom: +orden <- c("NAO+","NAO-","Blocking","Atl.Ridge") + +regime1.name <- orden[1] +regime2.name <- orden[2] +regime3.name <- orden[3] +regime4.name <- orden[4] + +if(save.names){cluster1.name.period <- cluster2.name.period <- cluster3.name.period <- cluster4.name.period <- c()} + +# breaks and colors of the geopotential fields: +if(psl == "g500"){ + #my.brks <- c(-300,-199:200,300) # % Mean anomaly of a WR + my.brks <- c(-300,seq(-200,200,10),300) # % Mean anomaly of a WR + my.brks2 <- c(-300,seq(-200,200,10),300) # % Mean anomaly of a WR for the contour lines + #my.cols <- colorRampPalette((brewer.pal(9,"PuBu")))(length(my.brks)-1) + values.to.plot <- c(-200, -100, 0, 100, 200) # values we want to show in the labels +} + +if(psl == "psl"){ + my.brks <- c(seq(-19,-1,2),0,seq(1,19,2)) # % Mean anomaly of a WR + # my.brks <- c(seq(-19,-1,2),0,seq(1,19,2)) # % Mean anomaly of a WR + + my.brks2 <- my.brks #c(seq(-20,20,2)) # % Mean anomaly of a WR for the contour lines + values.to.plot <- my.brks #c(-17,-15,-13,-11,-9,-7,-5,-3,-1,0,1,3,5,7,9,11,13,15,17) +} + +my.cols <- colorRampPalette(rev(brewer.pal(11,"RdBu")))(length(my.brks)-1) # blue--white--red colors +my.cols[floor(length(my.cols)/2)] <- my.cols[floor(length(my.cols)/2)+1] <- "white" + +# breaks and colors of the impact maps (valid both for Wind speed anomalies and Temperature anomalies): +#my.brks.var <- c(-20,seq(-10,-3,1),seq(-2.9,2.9,0.1),seq(3,10,1),20) # % Mean anomaly of a WR +#my.brks.var <- c(-20,-2,-1.5,-1,-0.5,-0.2,0.2,0.5,1,1.5,2,20) # % Mean anomaly of a WR +#my.brks.var <- c(-20,seq(-3,3,0.5),20) # % Mean anomaly of a WR +if(var.name[var.num] == "sfcWind") { + my.brks.var <- seq(-3,3,0.5) + values.to.plot2 <- c(-3,-2,-1,0,1,2,3) +} +if(var.name[var.num] == "tas") { + my.brks.var <- seq(-5,5,1) + values.to.plot2 <- my.brks.var #c(-5,-3,-1,0,1,3,5) +} + +#my.cols <- colorRampPalette(as.character(read.csv("/home/Earth/vtorralb/rgbhex.csv",header=F)[,1]))(length(my.brks)-1) # blue--yellow-red colors +my.cols.var <- colorRampPalette(rev(brewer.pal(11,"RdBu")))(length(my.brks.var)-1) # blue--white--red colors +#my.cols.var[floor(length(my.cols.var)/2)] <- my.cols.var[floor(length(my.cols.var)/2)+1] <- "white" + +if(as.pdf && fields.name==rean.name)(pdf(file=paste0(workdir,"/",fields.name,"_",var.name[var.num],"_",my.period[p],".pdf"),width=40, height=60)) +if(as.pdf && fields.name==forecast.name)(pdf(file=paste0(workdir,"/",fields.name,"_",var.name[var.num],"_",my.period[p],"_leadmonth_",lead.month,".pdf"),width=40,height=60)) + +for(p in WR.period){ + if(fields.name==rean.name) load(file=paste0(workdir,"/",fields.name,"_",var.name[var.num],"_",my.period[p],"_mapdata.RData")) + if(fields.name==forecast.name) load(file=paste0(workdir,"/",fields.name,"_",var.name[var.num],"_",my.period[p],"_leadmonth_",lead.month,"_mapdata.RData")) + + if(save.names){ + if(p == 1){ # January + cluster3.name="NAO+" + cluster4.name="NAO-" + cluster1.name="Blocking" + cluster2.name="Atl.Ridge" + } + if(p == 2){ # February + cluster3.name="NAO+" + cluster2.name="NAO-" + cluster4.name="Blocking" + cluster1.name="Atl.Ridge" + } + if(p == 3){ # March + cluster3.name="NAO+" + cluster2.name="NAO-" + cluster4.name="Blocking" + cluster1.name="Atl.Ridge" + } + if(p == 4){ # April + cluster2.name="NAO+" + cluster1.name="NAO-" + cluster3.name="Blocking" + cluster4.name="Atl.Ridge" + } + if(p == 5){ # May + cluster4.name="NAO+" + cluster2.name="NAO-" + cluster3.name="Blocking" + cluster1.name="Atl.Ridge" + } + if(p == 6){ # June + cluster4.name="NAO+" + cluster1.name="NAO-" + cluster2.name="Blocking" + cluster3.name="Atl.Ridge" + } + if(p == 7){ # July + cluster4.name="NAO+" + cluster2.name="NAO-" + cluster1.name="Blocking" + cluster3.name="Atl.Ridge" + } + if(p == 8){ # August + cluster2.name="NAO+" + cluster4.name="NAO-" + cluster3.name="Blocking" + cluster1.name="Atl.Ridge" + } + if(p == 9){ # September + cluster4.name="NAO+" + cluster3.name="NAO-" + cluster1.name="Blocking" + cluster2.name="Atl.Ridge" + } + if(p == 10){ # October + cluster1.name="NAO+" + cluster4.name="NAO-" + cluster2.name="Blocking" + cluster3.name="Atl.Ridge" + } + if(p == 11){ # November + cluster2.name="NAO+" + cluster3.name="NAO-" + cluster1.name="Blocking" + cluster4.name="Atl.Ridge" + } + if(p == 12){ # December + cluster2.name="NAO+" + cluster4.name="NAO-" + cluster1.name="Blocking" + cluster3.name="Atl.Ridge" + } + if(p == 13){ # Winter + cluster3.name="NAO+" + cluster4.name="NAO-" + cluster1.name="Blocking" + cluster2.name="Atl.Ridge" + } + if(p == 14){ # Spring + cluster1.name="NAO+" + cluster3.name="NAO-" + cluster2.name="Blocking" + cluster4.name="Atl.Ridge" + } + if(p == 15){ # Summer + cluster2.name="NAO+" + cluster3.name="NAO-" + cluster4.name="Blocking" + cluster1.name="Atl.Ridge" + } + if(p == 16){ # Autumn + cluster4.name="NAO+" + cluster1.name="NAO-" + cluster2.name="Blocking" + cluster3.name="Atl.Ridge" + } + + cluster1.name.period[p] <- cluster1.name + cluster2.name.period[p] <- cluster2.name + cluster3.name.period[p] <- cluster3.name + cluster4.name.period[p] <- cluster4.name + + if(fields.name==rean.name) save(cluster1.name.period, cluster2.name.period, cluster3.name.period, cluster4.name.period, file=paste0(workdir,"/",fields.name,"_", var.name[var.num],"_",my.period[p],"_ClusterNames.RData")) + if(fields.name==forecast.name) save(cluster1.name.period, cluster2.name.period, cluster3.name.period, cluster4.name.period, file=paste0(workdir,"/",fields.name,"_", var.name[var.num],"_",my.period[p],"_leadmonth_",lead.month,"_ClusterNames.RData")) + + } else { # in this case, we load the cluster names from the file already saved: + if(fields.name==rean.name) load(paste0(workdir,"/",fields.name,"_", var.name[var.num],"_",my.period[p],"_ClusterNames.RData")) + if(fields.name==forecast.name) load(paste0(workdir,"/",fields.name,"_", var.name[var.num],"_",my.period[p],"_leadmonth_",lead.month,"_ClusterNames.RData")) + + cluster1.name <- cluster1.name.period[p] + cluster2.name <- cluster2.name.period[p] + cluster3.name <- cluster3.name.period[p] + cluster4.name <- cluster4.name.period[p] + } + + # assign to each cluster its regime: + if(ordering == FALSE){ + cluster1 <- 1; cluster2 <- 2; cluster3 <- 3; cluster4 <- 4 + } else { + cluster1 <- which(orden == cluster1.name) + cluster2 <- which(orden == cluster2.name) + cluster3 <- which(orden == cluster3.name) + cluster4 <- which(orden == cluster4.name) + } + + assign(paste0("map",cluster1), pslwr1mean) + assign(paste0("map",cluster2), pslwr2mean) + assign(paste0("map",cluster3), pslwr3mean) + assign(paste0("map",cluster4), pslwr4mean) + + assign(paste0("fre",cluster1), wr1y) + assign(paste0("fre",cluster2), wr2y) + assign(paste0("fre",cluster3), wr3y) + assign(paste0("fre",cluster4), wr4y) + + if(fields.name == rean.name){ + assign(paste0("imp",cluster1), varPeriodAnom1mean) + assign(paste0("imp",cluster2), varPeriodAnom2mean) + assign(paste0("imp",cluster3), varPeriodAnom3mean) + assign(paste0("imp",cluster4), varPeriodAnom4mean) + + assign(paste0("sig",cluster1), pvalue1) + assign(paste0("sig",cluster2), pvalue2) + assign(paste0("sig",cluster3), pvalue3) + assign(paste0("sig",cluster4), pvalue4) + } + + + # Visualize the composition with the 3 graphs for all the 4 regimes: its pressure fields, its impact on var and its frequency series: + if(!as.pdf && fields.name==rean.name) png(filename=paste0(workdir,"/",fields.name,"_",var.name[var.num],"_",my.period[p],".png"),width=3000,height=3700) + if(!as.pdf && fields.name==forecast.name) png(filename=paste0(workdir,"/",fields.name,"_",var.name[var.num],"_",my.period[p],"_leadmonth_",lead.month,".png"),width=3000,height=3700) + + plot.new() + + # Sheet title: + par(fig=c(0, 1, 0.94, 0.98), new=TRUE) + mtext(paste0("Weather Regimes for ",my.period[p]," season (",year.start,"-",year.end,"). Source: ",fields.name), cex=6, font=2) + + # Centroid maps: + map.xpos <- 0 + map.width <- 0.46 + par(fig=c(map.xpos + 0.003, map.xpos + map.width - 0.003, 0.745, 0.925), new=TRUE) + PlotEquiMap2(rescale(map1,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map1), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, xlabel.dist=1.5, contours.lty="F1FF1F") + par(fig=c(map.xpos, map.xpos + map.width, 0.51, 0.69), new=TRUE) + PlotEquiMap2(rescale(map2,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map2), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F") + par(fig=c(map.xpos, map.xpos + map.width, 0.275, 0.455), new=TRUE) + PlotEquiMap2(rescale(map3,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map3), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F") + par(fig=c(map.xpos, map.xpos + map.width, 0.04, 0.22), new=TRUE) + PlotEquiMap2(rescale(map4,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map4), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F") + + # Title Centroid Maps: + map.title.xpos <- 0.76 + map.title.width <- 0.2 + par(fig=c(map.title.xpos, map.title.xpos + map.title.width, 0.728, 0.729), new=TRUE) + mtext("year", cex=3) + par(fig=c(map.title.xpos, map.title.xpos + map.title.width, 0.494, 0.495), new=TRUE) + mtext("year", cex=3) + par(fig=c(map.title.xpos, map.title.xpos + map.title.width, 0.260, 0.261), new=TRUE) + mtext("year", cex=3) + par(fig=c(map.title.xpos, map.title.xpos + map.title.width, 0.026, 0.027), new=TRUE) + mtext("year", cex=3) + + # Impact maps: + if(fields.name == rean.name){ + impact.xpos <- 0.47 + impact.width <- 0.26 + par(fig=c(impact.xpos, impact.xpos + impact.width, 0.745, 0.925), new=TRUE) + PlotEquiMap2(rescale(imp1[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=t(sig1[,EU] < 0.05), cex.lab=1.5) + par(fig=c(impact.xpos, impact.xpos + impact.width, 0.51, 0.69), new=TRUE) + PlotEquiMap2(rescale(imp2[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=t(sig2[,EU] < 0.05), cex.lab=1.5) + par(fig=c(impact.xpos, impact.xpos + impact.width, 0.275, 0.455), new=TRUE) + PlotEquiMap2(rescale(imp3[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=t(sig3[,EU] < 0.05), cex.lab=1.5) + par(fig=c(impact.xpos, impact.xpos + impact.width, 0.04, 0.22), new=TRUE) + PlotEquiMap2(rescale(imp4[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=t(sig4[,EU] < 0.05), cex.lab=1.5) + } + + # Frequency plots: + barplot.xpos <- 0.75 + barplot.width <- 0.25 + freq.max=60 + + par(fig=c(barplot.xpos, barplot.xpos + barplot.width, 0.745, 0.915), new=TRUE) + barplot.freq(100*fre1, year.start, year.end, cex.y=2, cex.x=2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0)) + par(fig=c(barplot.xpos, barplot.xpos + barplot.width,0.510,0.680), new=TRUE) + barplot.freq(100*fre2, year.start, year.end, cex.y=2, cex.x=2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0)) + par(fig=c(barplot.xpos, barplot.xpos + barplot.width,0.275,0.445), new=TRUE) + barplot.freq(100*fre3, year.start, year.end, cex.y=2, cex.x=2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0)) + par(fig=c(barplot.xpos, barplot.xpos + barplot.width,0.040,0.210), new=TRUE) + barplot.freq(100*fre4, year.start, year.end, cex.y=2, cex.x=2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0)) + + # % on Frequency plots: + symbol.xpos <- 0.735 + symbol.width <- 0.01 + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.83, 0.84), new=TRUE) + mtext("%", cex=3.3) + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.60, 0.61), new=TRUE) + mtext("%", cex=3.3) + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.36, 0.37), new=TRUE) + mtext("%", cex=3.3) + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.13, 0.14), new=TRUE) + mtext("%", cex=3.3) + + # Title 1: + title1.xpos <- 0.02 + title1.width <- 0.4 + par(fig=c(title1.xpos, title1.xpos + title1.width, 0.930, 0.935), new=TRUE) + mtext(paste0(regime1.name,": ", psl.name, " Anomaly"), font=2, cex=4) + par(fig=c(title1.xpos, title1.xpos + title1.width, 0.695, 0.700), new=TRUE) + mtext(paste0(regime2.name,": ", psl.name, " Anomaly"), font=2, cex=4) + par(fig=c(title1.xpos, title1.xpos + title1.width, 0.460, 0.465), new=TRUE) + mtext(paste0(regime3.name,": ", psl.name, " Anomaly"), font=2, cex=4) + par(fig=c(title1.xpos, title1.xpos + title1.width, 0.225, 0.230), new=TRUE) + mtext(paste0(regime4.name,": ", psl.name, " Anomaly"), font=2, cex=4) + + # Title 2: + if(fields.name == rean.name){ + title2.xpos <- 0.42 + title2.width <- 0.35 + par(fig=c(title2.xpos, title2.xpos + title2.width, 0.930, 0.935), new=TRUE) + mtext(paste0(regime1.name, ": Impact on ", var.name.full[var.num]), font=2, cex=4) + par(fig=c(title2.xpos, title2.xpos + title2.width, 0.695, 0.700), new=TRUE) + mtext(paste0(regime2.name, ": Impact on ", var.name.full[var.num]), font=2, cex=4) + par(fig=c(title2.xpos, title2.xpos + title2.width, 0.460, 0.465), new=TRUE) + mtext(paste0(regime3.name, ": Impact on ", var.name.full[var.num]), font=2, cex=4) + par(fig=c(title2.xpos, title2.xpos + title2.width, 0.225, 0.230), new=TRUE) + mtext(paste0(regime4.name, ": Impact on ", var.name.full[var.num]), font=2, cex=4) + } + + # Title 3: + title3.xpos <- 0.75 + title3.width <-0.25 + par(fig=c(title3.xpos, title3.xpos + title3.width, 0.930, 0.935), new=TRUE) + mtext(paste0(regime1.name, ": Frequency (", round(100*mean(fre1),1), "%)"), font=2, cex=4) + par(fig=c(title3.xpos, title3.xpos + title3.width, 0.695, 0.700), new=TRUE) + mtext(paste0(regime2.name, ": Frequency (", round(100*mean(fre2),1), "%)"), font=2, cex=4) + par(fig=c(title3.xpos, title3.xpos + title3.width, 0.460, 0.465), new=TRUE) + mtext(paste0(regime3.name, ": Frequency (", round(100*mean(fre3),1), "%)"), font=2, cex=4) + par(fig=c(title3.xpos, title3.xpos + title3.width, 0.225, 0.230), new=TRUE) + mtext(paste0(regime4.name, ": Frequency (", round(100*mean(fre4),1), "%)"), font=2, cex=4) + + # Legend 1: + legend1.xpos <- 0.01 + legend1.width <- 0.44 + legend1.cex <- 1.8 + my.subset <- match(values.to.plot, my.brks) + par(fig=c(legend1.xpos, legend1.xpos + legend1.width, 0.719, 0.744), new=TRUE) # syntax fig=c(xmin, xmax, ymin, ymax) + ColorBar3(my.brks, cols=my.cols, vert=FALSE, cex=legend1.cex, subset=my.subset) + if(psl=="g500") {mtext(side=4," m", cex=2.5)} else {mtext(side=4," hPa", cex=legend1.cex)} + par(fig=c(legend1.xpos, legend1.xpos + legend1.width, 0.485, 0.508), new=TRUE) + ColorBar3(my.brks, cols=my.cols, vert=FALSE, cex=legend1.cex, subset=my.subset) + if(psl=="g500") {mtext(side=4," m", cex=2.5)} else {mtext(side=4," hPa", cex=legend1.cex)} + par(fig=c(legend1.xpos, legend1.xpos + legend1.width, 0.250, 0.273), new=TRUE) + ColorBar3(my.brks, cols=my.cols, vert=FALSE, cex=legend1.cex, subset=my.subset) + if(psl=="g500") {mtext(side=4," m", cex=2.5)} else {mtext(side=4," hPa", cex=legend1.cex)} + par(fig=c(legend1.xpos, legend1.xpos + legend1.width, 0.015, 0.038), new=TRUE) + ColorBar3(my.brks, cols=my.cols, vert=FALSE, cex=legend1.cex, subset=my.subset) + if(psl=="g500") {mtext(side=4," m", cex=2.5)} else {mtext(side=4," hPa", cex=legend1.cex)} + + # Legend 2: + if(fields.name == rean.name){ + legend2.xpos <- 0.48 + legend2.width <- 0.25 + legend2.cex <- 1.8 + my.subset2 <- match(values.to.plot2, my.brks.var) + par(fig=c(legend2.xpos, legend2.xpos + legend2.width, 0.719 + 0.001, 0.744), new=TRUE) + ColorBar3(my.brks.var, cols=my.cols.var, vert=FALSE, cex=legend2.cex, subset=my.subset2) + mtext(side=4,paste0(" ",var.unit[var.num]), cex=legend2.cex) + par(fig=c(legend2.xpos, legend2.xpos + legend2.width, 0.485, 0.508), new=TRUE) + ColorBar3(my.brks.var, cols=my.cols.var, vert=FALSE, cex=legend2.cex, subset=my.subset2) + mtext(side=4,paste0(" ",var.unit[var.num]), cex=legend2.cex) + par(fig=c(legend2.xpos, legend2.xpos + legend2.width, 0.250, 0.273), new=TRUE) + ColorBar3(my.brks.var, cols=my.cols.var, vert=FALSE, cex=legend2.cex, subset=my.subset2) + mtext(side=4,paste0(" ",var.unit[var.num]), cex=legend2.cex) + par(fig=c(legend2.xpos, legend2.xpos + legend2.width, 0.015, 0.038), new=TRUE) + ColorBar3(my.brks.var, cols=my.cols.var, vert=FALSE, cex=legend2.cex, subset=my.subset2) + mtext(side=4,paste0(" ",var.unit[var.num]), cex=legend2.cex) + } + + if(!as.pdf) dev.off() # for saving 4 png + +} # close for loop on 'p' + +if(as.pdf) dev.off() # for saving a single pdf with all seasons + +} # close if on 'visualize' diff --git a/old/weather_regimes_v30.R~ b/old/weather_regimes_v30.R~ new file mode 100644 index 0000000000000000000000000000000000000000..5cb34d79354da9772011cc80990b6bbd4a283fad --- /dev/null +++ b/old/weather_regimes_v30.R~ @@ -0,0 +1,1140 @@ +library(s2dverification) # for the function Load() +library(abind) +library(Kendall) +library(reshape2) +library(TTR) +source('/home/Earth/ncortesi/Downloads/scripts/Rfunctions.R') # for the calendar functions +workdir <- "/scratch/Earth/ncortesi/RESILIENCE/Regimes" # working dir with the input files with the WT classifications for each MSLP grid point + +# available reanalysis for the geopotential (g500) or the geopotential height (z500 = g500/9.81) and for var data: +ERAint <- '/esnas/reconstructions/ecmwf/eraint/$STORE_FREQ$_mean/$VAR_NAME$_f6h/$VAR_NAME$_$YEAR$$MONTH$.nc' +ERAint.name <- "ERA-Interim" + +JRA55 <- '/esnas/reconstructions/jma/jra-55/$STORE_FREQ$_mean/$VAR_NAME$_f6h/$VAR_NAME$_$YEAR$$MONTH$.nc' +JRA55.name <- "JRA-55" + +rean <- ERAint #JRA55 #ERAint # choose one of the two above reanalysis from where to load the input psl data + +# available forecast systems for the psl and var data: +#ECMWF_S4 <- list(path = paste0('/esnas/exp/ECMWF/seasonal/0001/s004/m001/$STORE_FREQ$_mean/$VAR_NAME$_f6h/$VAR_NAME$_$START_DATE$.nc')) +#ECMWF_S4 <- '/esnas/exp/ECMWF/seasonal/0001/s004/m001/$STORE_FREQ$_mean/psl_f6h/psl_$START_DATE$.nc' +ECMWF_S4 <- '/esnas/exp/ecmwf/system4_m1/$STORE_FREQ$_mean/$VAR_NAME$_f6h/$VAR_NAME$_$START_DATE$.nc' +ECMWF_S4.name <- "ECMWF-S4" + +ECMWF_monthly <- '/esnas/exp/ECMWF/monthly/ensforhc/6hourly/$VAR_NAME$/2014$MONTH$$DAY$00/$VAR_NAME$_$START_DATE$00.nc' +ECMWF_monthly.name <- "ECMWF-Monthly" + +forecast <- ECMWF_S4 + +fields <- forecast #rean #forecast # put 'rean' to load pressure fields and var from reanalisis, put 'forecast' to load them from forecast systems + +psl <- "psl" #"g500" # pressure variable to use from the chosen reanalysis +psl.name <- "MSLP" # "Z500" # the name to show in the maps + +year.start <- 1982 #1981 #1994 #1979 #1981 +year.end <- 2013 #2013 #2010 + +member.name <- "ensemble" # name of the dimension with the ensemble members inside the netCDF files of S4 + +res <- 0.75 # set the resolution you want to interpolate the psl and var data when loading it (i.e: set to 0.75 to use the same res of ERA-Interim) + # interpolation method is BILINEAR. +PCA <- FALSE # set it to TRUE if you want to apply the PCA before the k-means cluster analysis, FALSE otherwise +variance.explained <- 0.8 # the user can set here the minimum % of variance explained by the PCs (typically 0.8 which is equal to 80%) + +lat.weighting=FALSE # set it to true to weight psl data on latitude before applying the cluster analysis +sequences=FALSE # set it to TRUE if you want to select only days beloning to sequences of at least 5 consecutive days belonging to the same regime. + +# Coordinates of the box enclosing the study region (the Northern Atlantic in this case): +lat.max <- 81 #81 #80 #70 +lat.min <- 27 #30 #20 #30 +lon.max <- 45 #36 #30 #40 +lon.min <- 274.5 #274.5 #270 #280 # put a positive number here because the geopotential has only positive values of longitud! + +var.num <- 1 # Choose a variable. 1: sfcWind 2: tas + +var.name <- c("sfcWind","tas") # name of the 'predictand' variable of the chosen reanalysis +var.name.full <- c("Wind Speed","Temperature") # full name of the 'predictand' variable to put in the title of the graphs +var.unit <- c("m/s", "ºC") # unit of measure of var (for drawing color scales) + +# Only for Reanalysis:: +WR.period <- 1:12 #13:16 # 1-12: Jan-Dec; 13-16: Winter-Spring-Summer-Autumn [bypassed by the optional argument of the script] + +# only for Seasonal forecasts: +n.members <- 15 # number of members of the forecast system to load +n.leadtimes <- 216 # number of leadtimes of the forecast system to load +startM <- 1 # start month (1=January, 12=December) [bypassed by the optional arguments of the script] +leadM <- 0 # select only the data of one lead month: [bypassed by the optional arguments of the script] + +# Only for Monthly forecasts: +#leadtime <- 1:7 #5:11 #1 # if fields=forecast, set the forecast time (in days) you want to compute the weather regimes. +#startdate.shift <- 1 # if leadtime=5:11, it's better to shift all startdates of the season 1 week in the past, to fit the exact season of obs.data + # if leadtime=12:18, it's better to shift all startdates of the season 2 weeks in the past, and so on. +#forecast.year <- year.end+1 # if fields=forecast, set the forecast year (it is usually the year after year.end) +#mes=1 # if fields=forecast, set the month of the first startdate of the forecast year +#day=2 # if fields=forecast, set the day of the first startdate of the forecast year + +visualize <- 0 # if 0, it only save the data to plot in the graphs and the frequency time series. + # if 1, it only visualize the graphs loading the saved data + # if 2, it save the data and plot the graphs at the same time. + +############################################################################################################################# +#ECMWF_monthly <- list(path = paste0('/esnas/exp/ECMWF/monthly/ensforhc/6hourly/psl/2014010200/1994_010200.nc')) +#ECMWF_monthly <- list(path = paste0('/esnas/exp/ECMWF/monthly/ensforhc/6hourly/$VAR_NAME$/2014$MONTH$$DAY$00/$VAR_NAME$_$START_DATE$00.nc')) +rean.name <- unname(ifelse(rean == ERAint, ERAint.name, JRA55.name)) +forecast.name <- unname(ifelse(forecast == ECMWF_S4, ECMWF_S4.name, ECMWF_monthly.name)) +fields.name <- unname(ifelse(fields == rean, rean.name, forecast.name)) +if(fields.name == forecast.name) WR.period = 1 +n.years <- year.end - year.start + 1 + +# in case the script is run with two arguments, they are assigned to the two below variables: +script.arg <- as.integer(commandArgs(TRUE)) + +if(length(script.arg) == 2){ + start.month <- script.arg[1] + lead.month <- script.arg[2] + WR.period <- start.month +} else { + start.month <- startM + lead.month <- leadM + WR.period <- start.month +} + +# in case the script is run with 1 argument, it is assumed you are using Reanalysis: +if(length(script.arg) == 1) WR.period <- script.arg[1] + +var.data <- fields # dataset from which to load the input var data (reanalysis or forecasts) +var.data.name <- fields.name #ECMWF_monthly.name # ERAint.name + +if(lat.min >= lat.max) stop("lat.min cannot be greater than lat.max") + +days.period <- n.days.period <- period.length <- list() +for (pp in 1:17){ + days.period[[pp]] <- NA + for(y in year.start:year.end) days.period[[pp]] <- c(days.period[[pp]], n.days.in.a.future.year(year.start, y) + pos.period(y,pp)) + days.period[[pp]] <- days.period[[pp]][-1] # remove the NA at the begin that was introduced only to be able to execute the above command + # number of days belonging to that period from year.start to year.end: + n.days.period[[pp]] <- length(days.period[[pp]]) + # Number of days belonging to that period in a single year: + period.length[[pp]] <- n.days.in.a.period(pp,1999) #+ ifelse(period==13 | period==2, 1, 0) # using year 1999 introduce a small error in winter season length because of bisestile years +} + +# Create a regular lat/lon grid to interpolate the data: +n.lon.grid <- 360/res +n.lat.grid <- (180/res)+1 +my.grid <- paste0('r',n.lon.grid,'x',n.lat.grid) +#domain<-list() +#domain$lon <- seq(0,359.999999999,res) +#domain$lat <- c(rev(seq(0,90,res)),seq(res[1],-90,-res)) # Notice that the regular grid is always centered at lat=0 and lon=0! + +# load only 1 day of pressure data to detect the minimum and maximum lat and lon values which identify only the North Atlantic area: +if(fields.name == rean.name) domain <- Load(var = psl, exp = NULL, obs = list(list(path=fields)), '19990101', storefreq = 'daily', leadtimemax = 1, output = 'lonlat', nprocs=1) +# in the case of S4, we also interpolate it (notice that if the S4 grid has the same grid of the chosen grid (typically ERA-Interim), Load() leaves the S4 grid unchanged): +if(fields.name == ECMWF_S4.name) domain <- Load(var = "psl", exp = list(list(path=fields)), sdates=paste0(year.start,'0101'), storefreq = 'daily', dimnames=list(member=member.name), leadtimemax=1, nmember=1, output = 'lonlat') #, grid=my.grid, method='bilinear', nprocs=1) + +#if(fields.name == ECMWF_monthly.name) domain <- Load(var = psl, exp = NULL, obs = list(list(path=fields)), paste0(year.start,'0102'), storefreq = 'daily', leadtimemax = 1, output = 'lonlat', nprocs=1) + +n.lon <- length(domain$lon) +n.lat <- length(domain$lat) + +pos.lat.max <- which(lat.max - round(domain$lat,4) >= 0)[1] # select the first latitude of the domain below the maximum latitude +pos.lat.min <- tail(which(round(domain$lat,4) - lat.min >= 0),1) # select the last latitude of the domain over the minumum latitude +pos.lon.max <- tail(which(lon.max - round(domain$lon,4) >= 0),1) +pos.lon.min <- which(round(domain$lon,4) - lon.min >= 0)[1] + +pos.lat <- if(pos.lat.min < pos.lat.max) {pos.lat.min:pos.lat.max} else {pos.lat.max:pos.lat.min} +pos.lon <- c(1:pos.lon.max,pos.lon.min:length(domain$lon)) # beware that it only consider the case where the study area cross the meridian of Greenwhich! +n.pos.lat <- length(pos.lat) +n.pos.lon <- length(pos.lon) + +lat <- domain$lat[pos.lat] # lat values of chosen area only (it is smaller than the whole spatial domain loaded by the data) +lon <- domain$lon[pos.lon] # lon values of chosen area only + +#lat.min.area <- domain$lat[pos.lat.min] # min and max lat/lon of the chosen area only (same as lat.max, but its values correspond to actual values in the lat vector) +#lat.max.area <- domain$lat[pos.lat.max] +#lon.min.area <- domain$lon[pos.lon.min] +#lon.max.area <- domain$lon[pos.lon.max] + +#rm(domain) +if(visualize != 1){ + +# Load psl data: +if(fields.name == rean.name){ # Load daily psl data in the reanalysis case: + psleuFull <-array(NA,c(n.days.in.a.yearly.period(year.start,year.end), n.pos.lat, n.pos.lon)) + + for (y in year.start:year.end){ + var <- Load(var = psl, exp = NULL, obs = list(list(path=fields)), paste0(y,'0101'), storefreq = 'daily', leadtimemax = n.days.in.a.year(y), output = 'lonlat', + latmin = lat.min, latmax = lat.max, lonmin = lon.min, lonmax = lon.max) + psleuFull[seq.days.in.a.future.year(year.start, y),,] <- var$obs + rm(var) + gc() + } +} + +if(fields.name == ECMWF_S4.name) # in this case, we can load only the data for 1 month at time (since each month needs ~3 GB of memory) + if(start.month <= 9) {start.month.char <- paste0("0",as.character(start.month))} else {start.month.char <- start.month} + + psleuFull <- Load(var = psl, exp = list(list(path=fields)), obs = NULL, sdates=paste0(year.start:year.end, start.month.char,'01'), dimnames=list(member=member.name), nmember=n.members, leadtimemax=n.leadtimes, storefreq='daily', output = 'lonlat', latmin = lat.min, latmax = lat.max, lonmin = lon.min, lonmax = lon.max)$mod #, grid=my.grid, method='bilinear') + +} + +if(fields.name == ECMWF_monthly.name){ + # Load 6-hourly psl data in the forecast case: + psleuFull <-array(NA, c(n.sdates*n.years, n.leadtimes, n.pos.lat, n.pos.lon)) # daily order: 19940102 19950102 ... 20130102 19940109 19950109 ... 20130109 ... + n.leadtimes <- length(leadtime) + sdates <- weekly.seq(forecast.year,mes,day) + n.sdates <- length(sdates) + + for (startdate in 1:n.sdates){ + for(lday in leadtime){ + var <- Load(var = psl, exp = NULL, obs = list(list(path=fields)), paste0(year.start:year.end, substr(sdates[startdate],5,6), substr(sdates[startdate],7,8) ), storefreq = 'daily', leadtimemin = lday*4-3, leadtimemax = lday*4, output = 'lonlat', latmin = lat.min, latmax = lat.max, lonmin = lon.min, lonmax = lon.max) + + mean.lday <- apply(var$obs, c(3,5,6), mean, na.rm=T) + rm(var) + gc() + + psleuFull[(1+(startdate-1)*n.years):(startdate*n.years), lday-leadtime[1]+1,,] <- mean.lday + rm(mean.lday) + gc() + } + } +} + +if(psl=="g500") psleuFull <- psleuFull/9.81 # convert geopotential to geopotential height (in m) +if(psl=="psl") psleuFull <- psleuFull/100 # convert MSLP in Pascal to MSLP in hPa +gc() + +#save.image(file=paste0(workdir,"/weather_regimes_",fields.name,"_",year.start,"-",year.end,".RData")) +#load(file=paste0(workdir,"/weather_regimes_",fields.name,"_",year.start,"-",year.end,".RData")) + +# compute the cluster analysis: +my.PCA <- list() +my.cluster <- list() +my.cluster.array <- list() +tot.variance <- list() +n.pcs <- my.cluster <- c() + +#p=1 # for the debug +for(p in WR.period){ + # Select only the data of the month/season we want: + if(fields.name == rean.name){ + pslPeriod <- psleuFull[days.period[[p]],,] # select only days in the chosen period (i.e: winter) + + # weight the pressure fields based on latitude (apply it to anomalies, not to absolute values!): + if(lat.weighting){ + lat.weighted <- cos(lat*pi/180)^0.5 + lat.weighted.array <- InsertDim(InsertDim(lat.weighted,2,n.pos.lon), 1, n.days.period[[p]]) + psl.kmeans <- pslPeriod * lat.weighted.array + rm(lat.weighted.array) + gc() + } + + # select period data from psleuFull to use it as input of the cluster analysis: + psl.kmeans <- pslPeriod + dim(psl.kmeans) <- c(n.days.period[[p]], n.pos.lat*n.pos.lon) # convert array in a matrix! + rm(pslPeriod, pslPeriod.weighted) + } + + # in this case of S4 we don't need to select anything because we already loaded only 1 month of data! + if(fields.name == ECMWF_S4.name){ + + ## climatology plots: + + ## load eraint data: + ## ERAint <- '/esnas/reconstructions/ecmwf/eraint/$STORE_FREQ$_mean/$VAR_NAME$_f6h/$VAR_NAME$_$YEAR$$MONTH$.nc' + ## my.years <- year.start:year.end + ## var <- Load(var = "sfcWind", exp = NULL, obs = list(list(path=ERAint)), paste0(my.years,'0101'), storefreq = 'daily', leadtimemax = 216, output = 'lonlat',latmin = lat.min, latmax = lat.max, lonmin = lon.min, lonmax = lon.max) # -273.16 for tas # or /100 for psl + + ## # check S4 lon and lat: + ## #varS4 <- Load(var = psl, exp = list(list(path=fields)), obs = NULL, sdates=paste0(year.start, start.month.char,'01'), dimnames=list(member=member.name), nmember=n.members, leadtimemax=1, storefreq='daily', output = 'lonlat', latmin = lat.min, latmax = lat.max, lonmin = lon.min, lonmax = lon.max) #, grid=my.grid, method='bilinear') + + ## # draw climatologies: + ## drift <- apply(psleuFull[,,,,1,1,drop=F], c(1,2,4), mean, na.rm=T) + ## #matplot(t(drift[1,,]),type="l", col=1:15, lty=2, pch=19, cex=.5) + ## #matplot(t(drift[1,,1:31]),type="b", col=1:15, lty=1, pch=19, cex=.5) # zoom over the first leadtime + + ## matplot(t(drift[1,,]),type="l", col="gray60", lty=3, pch=19, cex=.1) #, ylim=c(-25,5)) + + ## true.climate <- apply(psleuFull[,,,,1,1,drop=F], c(1,4), mean, na.rm=T) + ## lines(true.climate[1,],type="l", col="red", lty=1, pch=19, lwd=2) + + ## true.climate.5d <- stats::filter(true.climate[1,], rep(1/5,5), sides=2) + ## lines(true.climate.5d,type="l", col="orange", lty=1, pch=19, lwd=2) + + ## s4.data <- data.frame(s4=true.climate[1,], day=1:216) + ## s4.loess <- loess(s4 ~ day, s4.data, span=0.50) + ## s4.pred <- predict(s4.loess) + ## lines(s4.pred,type="l", col="purple", lty=1, pch=19, lwd=2) + + ## col.monthly <- "brown" + ## true.climate1 <- apply(psleuFull[,,,1:31,1,1,drop=F], 1, mean, na.rm=T) + ## lines(rep(true.climate1,31),type="l", col=col.monthly, lty=1, pch=19, lwd=2) + ## true.climate2 <- apply(psleuFull[,,,32:59,1,1,drop=F], 1, mean, na.rm=T) + ## lines(c(rep(NA,31),rep(true.climate2,28)),type="l", col=col.monthly, lty=1, pch=19, lwd=2) + ## true.climate3 <- apply(psleuFull[,,,60:90,1,1,drop=F], 1, mean, na.rm=T) + ## lines(c(rep(NA,59),rep(true.climate3,31)),type="l", col=col.monthly, lty=1, pch=19, lwd=2) + ## true.climate4 <- apply(psleuFull[,,,91:120,1,1,drop=F], 1, mean, na.rm=T) + ## lines(c(rep(NA,90),rep(true.climate4,30)),type="l", col=col.monthly, lty=1, pch=19, lwd=2) + ## true.climate5 <- apply(psleuFull[,,,121:151,1,1,drop=F], 1, mean, na.rm=T) + ## lines(c(rep(NA,120),rep(true.climate5,31)),type="l", col=col.monthly, lty=1, pch=19, lwd=2) + ## true.climate6 <- apply(psleuFull[,,,151:180,1,1,drop=F], 1, mean, na.rm=T) + ## lines(c(rep(NA,150),rep(true.climate6,30)),type="l", col=col.monthly, lty=1, pch=19, lwd=2) + ## true.climate7 <- apply(psleuFull[,,,181:211,1,1,drop=F], 1, mean, na.rm=T) + ## lines(c(rep(NA,180),rep(true.climate7,31)),type="l", col=col.monthly, lty=1, pch=19, lwd=2) + + + ## era <- apply(var$obs[,,,,1,1,drop=F], c(1,2,4), mean, na.rm=T) + ## plot(era[1,1,1:216],type="l", col="black", lty=1, pch=19, lwd=2) + + ## era.5d <- stats::filter(era[1,1,1:216], rep(1/5,5), sides=2) + ## lines(era.5d,type="l", col="darkgreen", lty=1, pch=19, lwd=2) + + ## era.data <- data.frame(era=era[1,1,1:216], day=1:216) + ## era.loess <- loess(era ~ day, era.data, span=0.50) + ## era.pred <- predict(era.loess) + ## lines(era.pred,type="l", col="blue", lty=1, pch=19, lwd=2) + + ## era.data2 <- data.frame(era=era[1,1,1:216], day=1:216) + ## era.loess2 <- loess(era ~ day, era.data2, span=0.3) + ## era.pred2 <- predict(era.loess2) + ## lines(era.pred2,type="l", col="purple", lty=1, pch=19, lwd=2) + + ## era.data3 <- data.frame(era=era[1,1,1:216], day=1:216) + ## era.loess3 <- loess(era ~ day, era.data3, span=0.35) + ## era.pred3 <- predict(era.loess3) + + ## col.monthly.erai <- "turquoise3" + ## era1 <- apply(var$obs[,,,1:31,1,1,drop=F], c(1,2), mean, na.rm=T) + ## lines(rep(era1,31),type="l", col=col.monthly.erai, lty=1, pch=19, lwd=2) + ## era2 <- apply(var$obs[,,,32:59,1,1,drop=F], c(1,2), mean, na.rm=T) + ## lines(c(rep(NA,31),rep(era2,28)),type="l", col=col.monthly.erai, lty=1, pch=19, lwd=2) + ## era3 <- apply(var$obs[,,,60:90,1,1,drop=F], c(1,2), mean, na.rm=T) + ## lines(c(rep(NA,59),rep(era3,31)),type="l", col=col.monthly.erai, lty=1, pch=19, lwd=2) + ## era4 <- apply(var$obs[,,,91:120,1,1,drop=F], c(1,2), mean, na.rm=T) + ## lines(c(rep(NA,90),rep(era4,30)),type="l", col=col.monthly.erai, lty=1, pch=19, lwd=2) + ## era5 <- apply(var$obs[,,,121:151,1,1,drop=F], c(1,2), mean, na.rm=T) + ## lines(c(rep(NA,120),rep(era5,31)),type="l", col=col.monthly.erai, lty=1, pch=19, lwd=2) + ## era6 <- apply(var$obs[,,,151:180,1,1,drop=F], c(1,2), mean, na.rm=T) + ## lines(c(rep(NA,150),rep(era6,30)),type="l", col=col.monthly.erai, lty=1, pch=19, lwd=2) + ## era7 <- apply(var$obs[,,,181:211,1,1,drop=F], c(1,2), mean, na.rm=T) + ## lines(c(rep(NA,180),rep(era7,30)),type="l", col=col.monthly.erai, lty=1, pch=19, lwd=2) + + + ## # anomalias: + ## point.type <- "l" + ## true.climate.anom <- psleuFull[1,1,pos.last.year,,1,1] - true.climate[1,] + ## plot(true.climate.anom,type=point.type, col="red", lty=1, pch=19, lwd=2, cex=.3) + ## lines(rep(0,216)) + + ## true.climate.5d.anom <- psleuFull[1,1,pos.last.year,,1,1] - true.climate.5d + ## lines(true.climate.5d.anom,type=point.type, col="orange", lty=1, pch=19, lwd=2, cex=.3) + + ## s4.pred.anom <- psleuFull[1,1,pos.last.year,,1,1] - s4.pred + ## lines(s4.pred.anom,type=point.type, col="purple", lty=1, pch=19, lwd=2, cex=.3) + + ## s4.monthly.clim <- c(rep(true.climate1,31),rep(true.climate2,28),rep(true.climate3,31),rep(true.climate4,30),rep(true.climate5,31),rep(true.climate6,30),rep(true.climate7,31), rep(NA,4)) + ## s4.monthly.anom <- psleuFull[1,1,pos.last.year,,1,1] - s4.monthly.clim + ## lines(s4.monthly.anom,type=point.type, col="brown", lty=1, pch=19, lwd=2, cex=.3) + + + ## pos.last.year <- dim(var$obs)[3] + ## era.anom <- var$obs[1,1,pos.last.year,1:216,1,1] - era[1,1,1:216] + ## plot(era.anom,type=point.type, col="black", lty=1, pch=19, lwd=2) + ## lines(rep(0,216)) + + ## era.anom.5d <- var$obs[1,1,pos.last.year,1:216,1,1] - era.5d[1:216] + ## lines(era.anom.5d,type=point.type, col="darkgreen", lty=1, pch=19, lwd=2) + + ## era.anom.pred <- var$obs[1,1,pos.last.year,1:216,1,1] - era.pred[1:216] + ## lines(era.anom.pred,type=point.type, col="blue", lty=1, pch=19, lwd=2) + + ## era.anom.pred2 <- var$obs[1,1,pos.last.year,1:216,1,1] - era.pred2[1:216] + ## era.anom.pred3 <- var$obs[1,1,pos.last.year,1:216,1,1] - era.pred3[1:216] + + ## era.monthly.clim <- c(rep(era1,31),rep(era2,28),rep(era3,31),rep(era4,30),rep(era5,31),rep(era6,30),rep(era7,31), rep(NA,4)) + ## era.anom.monthly <- var$obs[1,1,pos.last.year,1:216,1,1] - era.monthly.clim + ## lines(era.anom.monthly,type=point.type, col="turquoise3", lty=1, pch=19, lwd=2) + + ## # Nube plot: + ## plot(era.anom.pred,type="l", col="blue", lty=1, pch=19, lwd=2) + ## grid(nx=10,lwd=2) + ## lines(era.anom.pred2,type="l", col="purple", lty=1, pch=19, lwd=2) + ## lines(era.anom.pred3,type="l", col="turquoise3", lty=1, pch=19, lwd=2) + ## lines(era.anom.5d,type="l", col="darkgreen", lty=1, pch=19, lwd=2) + + ## lines(era.5d,type="l", col="darkgreen", lty=1, pch=19, lwd=2) + ## lines(era.pred,type="l", col="blue", lty=1, pch=19, lwd=2) + ## lines(rep(0,216)) + + ## lines(era.pred2,type="l", col="purple", lty=1, pch=19, lwd=2) + ## lines(era.pred3,type="l", col="", lty=1, pch=19, lwd=2) + + + ## # fit alfa parameter for each grid point: + ## i=1 + ## j=1 + ## era <- apply(var$obs[,,,,i,j,drop=F], c(1,2,4), mean, na.rm=T) + ## era.data <- data.frame(era=era[1,1,1:216], day=1:216) + ## k=0; error <- c() + ## for (alfa in seq(0.25,0.50,0.01)){ + ## k=k+1 + ## era.loess <- loess(era ~ day, era.data, span=alfa) + ## era.pred <- predict(era.loess) + ## error[k] <- mean(abs(era.pred - era[1,1,1:216])) + ## } + + # convert psl in daily anomalies with the LOESS filter: + pslPeriodClim <- apply(psleuFull, c(1,4,5,6), mean, na.rm=T) + pslPeriodClimLoess <- pslPeriodClim + for(i in n.pos.lat){ + for(j in n.pos.lon){ + my.data <- data.frame(ens.mean=pslPeriodClim[1,,i,j], day=1:n.leadtimes) + my.loess <- loess(ens.mean ~ day, my.data, span=0.35) + pslPeriodClimLoess[1,,i,j] <- predict(my.loess) + } + } + + pslPeriodClim2 <- InsertDim(InsertDim(pslPeriodClimLoess, 2, n.years), 2, n.members) + + ## s4.data <- data.frame(s4=pslPeriodClim[1,], day=1:216) + ## s4.loess <- loess(s4 ~ day, s4.data, span=0.35) + ## s4.pred <- predict(s4.loess) + rm(pslPeriodClim) + gc() + + psleuFull <- psleuFull - pslPeriodClim2 + rm(pslPeriodClim2) + gc() + + ## # convert psl in daily anomalies computed with the 10-days leadtime window including the 9 days AFTER each day: + ## n.leadtimes <- dim(psleuFull)[4] + ## pslPeriodClim10 <- array(NA, c(1, n.members, n.years, n.leadtimes-9,n.pos.lat,n.pos.lon)) + ## for(year in 1:n.years){ + ## for(lead in 1:(n.leadtimes-9)){ + ## pslPeriodClim10[1,,year,lead,,] <- (psleuFull[1,,year,lead,,] + psleuFull[1,,year,lead+1,,] + psleuFull[1,,year,lead+2,,] + psleuFull[1,,year,lead+3,,] + psleuFull[1,,year,lead+4,,] + psleuFull[1,,year,lead+5,,] + psleuFull[1,,year,lead+6,,] + psleuFull[1,,year,lead+7,,] + psleuFull[1,,year,lead+8,,] + psleuFull[1,,year,lead+9,,])/10 + ## } + ## } + + ## pslPeriodClim10mean <- apply(pslPeriodClim10, c(1,4,5,6), mean, na.rm=T) + ## pslPeriodClim10mean2 <- InsertDim(InsertDim(pslPeriodClim10mean,2,n.years),2,n.members) + + ## psleuFull10 <- psleuFull[1,,,1:(n.leadtimes-9),,,drop=F] - pslPeriodClim10mean2 + + ## psleuFull <- psleuFull10 + ## rm(pslPeriodClim2, psleuFull10, pslPeriodClim10, pslPeriodClim10mean2, psleuFull10mean, pslPeriodClim10mean) + ## gc() + + chosen.month <- start.month + lead.month # find which month we want to select data + if(chosen.month > 12) chosen.month <- chosen.month - 12 + + for(y in year.start:year.end){ + leadtime.min <- 1 + n.days.in.a.monthly.period(start.month, chosen.month, y) - n.days.in.a.month(chosen.month, y) + leadtime.max <- leadtime.min + n.days.in.a.month(chosen.month, y) - 1 + if (n.days.in.a.month(chosen.month, y) == 29) leadtime.max <- leadtime.max - 1 # remove the 29th of February to have the same n. of elements for all years + int.leadtimes <- leadtime.min:leadtime.max + n.leadtimes <- leadtime.max - leadtime.min + 1 + + if(y == year.start) { + pslPeriod <- psleuFull[,,1,int.leadtimes,,,drop=FALSE] + } else { + pslPeriod <- unname(abind(pslPeriod, psleuFull[,,y-year.start+1,int.leadtimes,,,drop=FALSE], along=3)) + } + } + + + ## # if you didn't convert psl data in anomalies until now, you can convert psl data in monthly anomalies: + ## pslPeriodClim <- apply(pslPeriod, c(1,5,6), mean, na.rm=T) + ## pslPeriodClim2 <- InsertDim(InsertDim(InsertDim(pslPeriodClim,2,n.leadtimes), 2, n.years), 2, n.members) + + ## pslPeriod <- pslPeriod - pslPeriodClim2 + ## rm(pslPeriodClim, pslPeriodClim2) + ## gc() + + # note that the data order in psl.kmeans[,X] is: year1day1, year1day2, ... , year1day31, year2day1, year2day2, ... , year2day28 + psl.melted <- melt(pslPeriod[1,,,,,, drop=FALSE], varnames=c("Exp","Member","StartYear","LeadDay","Lat","Lon")) + psl.kmeans <- unname(acast(psl.melted, Member + StartYear + LeadDay ~ Lat + Lon)) + + #rm(pslPeriod) + gc() + } + + if(fields.name == ECMWF_monthly.name){ + my.startdates.period <- months.period(forecast.year,mes,day,p) # get which startdates belong to the chosen period + + days.period <- list() # get which days inside psleuFull belong to the chosen period + for(startdate in my.startdates.period){ + days.period[[p]] <- c(days.period[[p]], (1+(startdate-1)*n.years):(startdate*n.years)) + } + + # in winter you must remove the 2nd startdate (20140109) because its data is bad, as you can see with: plot(psl.kmeans[,1:2]) + # if(fields.name == forecast.name && period == 13) psl.kmeans[21:40,] <- NA + } + + + # compute the PCs or not: + if(PCA){ + my.seq <- seq(1, n.pos.lat*n.pos.lon, 9) # select only 1 point of 9 + pslcut <- psl.kmeans[,my.seq] + + my.PCA[[p]] <- princomp(pslcut,cor=FALSE) + tot.variance[[p]] <- head(cumsum(my.PCA[[p]]$sdev^2/sum(my.PCA[[p]]$sdev^2)),50) # check how many PCAs to keep basing on the sum of their expl. variance + n.pcs[p] <- head(as.numeric(which(tot.variance[[p]] > variance.explained)),1) # select only the pcs that explains at least 80% of variance + my.cluster[[p]] <- kmeans(my.PCA[[p]]$scores[,1:n.pcs[p]], centers=4, iter.max=100, nstart=30) # 4 is the number of clusters + rm(pslcut) + } else { # 4 is the number of clusters: + if(fields.name == rean.name) my.cluster[[p]] <- kmeans(psl.kmeans[which(!is.na(psl.kmeans[,1])),], centers=4, iter.max=100, nstart=30) + if(fields.name == ECMWF_S4.name) { + my.cluster[[p]] <- kmeans(psl.kmeans, centers=4, iter.max=100, nstart=30, trace=TRUE) + my.cluster.array[[p]] <- array(my.cluster[[p]]$cluster, c(n.leadtimes, n.years, n.members)) # in reverse order compared to the definition of psl.kmeans + + + #plot(SMA(my.cluster[[p]]$centers[1,],201),type="l",col="gold",ylim=c(-20,20));lines(SMA(my.cluster[[p]]$centers[2,],201),type="l",col="red"); lines(SMA(my.cluster[[p]]$centers[3,],201),type="l",col="green");lines(SMA(my.cluster[[p]]$centers[4,],201),type="l",col="blue");lines(SMA(psl.kmeans[29,],201),type="l",col="gray");lines(rep(0,14410),type="l",col="black",lty=2) + } + } + + rm(psl.kmeans) + gc() +} + + +#save.image(file=paste0(workdir,"/weather_regimes_",fields.name,"_",year.start,"-",year.end,".RData")) +#load(file=paste0(workdir,"/weather_regimes_",fields.name,"_",year.start,"-",year.end,".RData")) + +# Load var data: +if(fields.name == rean.name){ # Load daily var data in the reanalysis case: + vareuFull <-array(NA,c(n.days.in.a.yearly.period(year.start,year.end), n.pos.lat, n.pos.lon)) + + for (y in year.start:year.end){ + var <- Load(var = var.name[var.num], exp = NULL, obs = list(var.data), paste0(y,'0101'), storefreq = 'daily', leadtimemax = n.days.in.a.year(y), output = 'lonlat', + latmin = lat.min, latmax = lat.max, lonmin = lon.min, lonmax = lon.max) + vareuFull[seq.days.in.a.future.year(year.start, y),,] <- var$obs + rm(var) + gc() + } +} + +## if(fields.name == ECMWF_S4.name) { +## if(start.month <= 9) {start.month.char <- paste0("0",as.character(start.month))} else {start.month.char <- start.month} + +## my.years <- year.start:year.end +## vareuFull <- Load(var = var.name[var.num], exp = list(list(path=fields)), obs = NULL, sdates=paste0(my.years, start.month.char,'01'), dimnames=list(member=member.name), nmember=n.members, leadtimemax=216, storefreq='daily', output = 'lonlat', latmin = lat.min, latmax = lat.max, lonmin = lon.min, lonmax = lon.max)$mod #, grid=my.grid, method='bilinear') +## } + +if(fields.name == ECMWF_monthly.name){ # Load 6-hourly var data in the monthly forecast case: + sdates <- weekly.seq(forecast.year,mes,day) + vareuFull <-array(NA,c(length(sdates)*(year.end-year.start+1), n.pos.lat, n.pos.lon)) + + for (startdate in 1:length(sdates)){ + var <- Load(var = var.name[var.num], exp = NULL, obs = list(var.data), paste0(year.start:year.end, substr(sdates[startdate],5,6), substr(sdates[startdate],7,8) ), storefreq = 'daily', leadtimemin=leadtime*4-3, leadtimemax=leadtime*4, output = 'lonlat', latmin = lat.min, latmax = lat.max, lonmin = lon.min, lonmax = lon.max) + temp <- apply(var$obs, c(1,3,5,6), mean, na.rm=T) + vareuFull[(1+(startdate-1)*(year.end-year.start+1)):(startdate*(year.end-year.start+1)),,] <- temp + rm(temp,var) + gc() + } + +} + +#my.grid<-paste0('r',n.lon,'x',n.lat) +#coord <- Load(var = 'sfcWind', exp = list(ECMWF_monthly), obs = NULL, sdates=paste0(2013,'0102'), leadtimemin = 1, leadtimemax=1, output = 'lonlat', nprocs=1, grid=my.grid, method='bilinear') + +#save.image(file=paste0(workdir,"/weather_regimes_",fields.name,"_",var.name[var.num],"_",year.start,"-",year.end,".RData")) +#load(file=paste0(workdir,"/weather_regimes_",fields.name,"_",var.name[var.num],"_",year.start,"-",year.end,".RData")) + +for(p in WR.period){ # 1-12: Jan-Dec; 13-16: Winter-Spring-Summer-Autumn; 17: Year + + if(fields.name == rean.name){ + cluster.sequence <- my.cluster[[p]]$cluster + + if(sequences){ # it works only for rean data!!! + # for each of the 4 clusters, remove the days not belonging to sequences of at least 5 days: + # warning: first 4 days and last 4 days are not take into account y the algorithm and are treated as not belonging to any sequence (sequ=FALSE) + wrdiff <- diff(my.cluster[[p]]$cluster) + + sequ <- rep(FALSE, n.days.period[[p]]) + for(i in 5:(n.days.period[[p]]-5)){ + sequ[i] <- TRUE + if(wrdiff[i] != 0 & wrdiff[i+1] != 0) sequ[i] = FALSE + if(wrdiff[i] != 0 & wrdiff[i+2] != 0) sequ[i] = FALSE + if(wrdiff[i] != 0 & wrdiff[i+3] != 0) sequ[i] = FALSE + if(wrdiff[i] != 0 & wrdiff[i+4] != 0) sequ[i] = FALSE + if(wrdiff[i-1] != 0 & wrdiff[i] != 0) sequ[i] = FALSE + if(wrdiff[i-1] != 0 & wrdiff[i+1] != 0) sequ[i] = FALSE + if(wrdiff[i-1] != 0 & wrdiff[i+2] != 0) sequ[i] = FALSE + if(wrdiff[i-1] != 0 & wrdiff[i+3] != 0) sequ[i] = FALSE + if(wrdiff[i-2] != 0 & wrdiff[i] != 0) sequ[i] = FALSE + if(wrdiff[i-2] != 0 & wrdiff[i+1] != 0) sequ[i] = FALSE + if(wrdiff[i-2] != 0 & wrdiff[i+2] != 0) sequ[i] = FALSE + if(wrdiff[i-3] != 0 & wrdiff[i] != 0) sequ[i] = FALSE + if(wrdiff[i-3] != 0 & wrdiff[i+1] != 0) sequ[i] = FALSE + if(wrdiff[i-4] != 0 & wrdiff[i] != 0) sequ[i] = FALSE + } # close for loop on i + + # sequence of daily cluster numbers without the days that doesn't belong to sequences of 5 or more days: + cluster.sequence <- my.cluster[[p]]$cluster * sequ + rm(wrdiff, sequ) + } + + # Replace the days belonging to a regime with the days belonging to a sequence of at least 5 days of that regime: + wr1 <- which(cluster.sequence == 1) + wr2 <- which(cluster.sequence == 2) + wr3 <- which(cluster.sequence == 3) + wr4 <- which(cluster.sequence == 4) + + # yearly frequency of each regime: + wr1y <- wr2y <- wr3y <-wr4y <- c() + for(y in year.start:year.end){ + # for both reanalysis and S4, the time order is always: year1day1, year1day2, ..., year1day31, year2day1, year2day2, ..., year2day28, etc, + wr1y[y-year.start+1] <- length(which(cluster.sequence[(y-year.start)*period.length[[p]]+(1:period.length[[p]])] == 1)) + wr2y[y-year.start+1] <- length(which(cluster.sequence[(y-year.start)*period.length[[p]]+(1:period.length[[p]])] == 2)) + wr3y[y-year.start+1] <- length(which(cluster.sequence[(y-year.start)*period.length[[p]]+(1:period.length[[p]])] == 3)) + wr4y[y-year.start+1] <- length(which(cluster.sequence[(y-year.start)*period.length[[p]]+(1:period.length[[p]])] == 4)) + } + + # convert to frequencies in %: + wr1y <- wr1y/period.length[[p]] + wr2y <- wr2y/period.length[[p]] + wr3y <- wr3y/period.length[[p]] + wr4y <- wr4y/period.length[[p]] + + pslPeriod <- psleuFull[days.period[[p]],,] + pslPeriodClim <- apply(pslPeriod, c(2,3), mean, na.rm=T) + pslPeriodClim2 <- InsertDim(pslPeriodClim, 1, n.days.period[[p]]) + pslPeriodAnom <- pslPeriod - pslPeriodClim2 + + rm(pslPeriod, pslPeriodClim, pslPeriodClim2) + gc() + + pslwr1 <- pslPeriodAnom[wr1,,] + pslwr2 <- pslPeriodAnom[wr2,,] + pslwr3 <- pslPeriodAnom[wr3,,] + pslwr4 <- pslPeriodAnom[wr4,,] + + rm(pslPeriodAnom) + gc() + + pslwr1mean <- apply(pslwr1,c(2,3),mean,na.rm=T) + pslwr2mean <- apply(pslwr2,c(2,3),mean,na.rm=T) + pslwr3mean <- apply(pslwr3,c(2,3),mean,na.rm=T) + pslwr4mean <- apply(pslwr4,c(2,3),mean,na.rm=T) + rm(pslwr1,pslwr2,pslwr3,pslwr4) + + # in the reanalysis case, we also measure the impact maps: + + # similar selection of above, but for var instead of psl: + varPeriod <- vareuFull[days.period[[p]],,] # select only var data during the chosen period + + varPeriodClim <- apply(varPeriod, c(2,3), mean, na.rm=T) + varPeriodClim2 <- InsertDim(varPeriodClim, 1, n.days.period[[p]]) + varPeriodAnom <- varPeriod - varPeriodClim2 + rm(varPeriod, varPeriodClim, varPeriodClim2) + gc() + + #PlotEquiMap2(varPeriodClim[,EU], lon[EU], lat, filled.continents = FALSE, cols=my.cols.var, intxlon=10, intylat=10, cex.lab=1.5) # plot the climatology of the period + varPeriodAnom1 <- varPeriodAnom[wr1,,] + varPeriodAnom2 <- varPeriodAnom[wr2,,] + varPeriodAnom3 <- varPeriodAnom[wr3,,] + varPeriodAnom4 <- varPeriodAnom[wr4,,] + + varPeriodAnom1mean <- apply(varPeriodAnom1,c(2,3),mean,na.rm=T) + varPeriodAnom2mean <- apply(varPeriodAnom2,c(2,3),mean,na.rm=T) + varPeriodAnom3mean <- apply(varPeriodAnom3,c(2,3),mean,na.rm=T) + varPeriodAnom4mean <- apply(varPeriodAnom4,c(2,3),mean,na.rm=T) + + varPeriodAnomBoth1 <- abind(varPeriodAnom, varPeriodAnom1, along = 1) + pvalue1 <- apply(varPeriodAnomBoth1, c(2,3), function(x) t.test(x[1:n.days.period[[p]]],x[(n.days.period[[p]]+1):length(x)])$p.value) + rm(varPeriodAnomBoth1, varPeriodAnom1) + gc() + + varPeriodAnomBoth2 <- abind(varPeriodAnom, varPeriodAnom2, along = 1) + pvalue2 <- apply(varPeriodAnomBoth2, c(2,3), function(x) t.test(x[1:n.days.period[[p]]],x[(n.days.period[[p]]+1):length(x)])$p.value) + rm(varPeriodAnomBoth2, varPeriodAnom2) + gc() + + varPeriodAnomBoth3 <- abind(varPeriodAnom, varPeriodAnom3, along = 1) + pvalue3 <- apply(varPeriodAnomBoth3, c(2,3), function(x) t.test(x[1:n.days.period[[p]]],x[(n.days.period[[p]]+1):length(x)])$p.value) + rm(varPeriodAnomBoth3, varPeriodAnom3) + gc() + + varPeriodAnomBoth4 <- abind(varPeriodAnom, varPeriodAnom4, along = 1) + pvalue4 <- apply(varPeriodAnomBoth4, c(2,3), function(x) t.test(x[1:n.days.period[[p]]],x[(n.days.period[[p]]+1):length(x)])$p.value) + rm(varPeriodAnomBoth4, varPeriodAnom4) + + rm(varPeriodAnom) + gc() + + # save all the data necessary to redraw the graphs when we know the right regime: + save(workdir,rean.name,fields.name,var.num,var.name,var.name.full,var.unit,year.start,year.end,p,WR.period,lon,lat,pslwr1mean,pslwr2mean,pslwr3mean,pslwr4mean, varPeriodAnom1mean, varPeriodAnom2mean, varPeriodAnom3mean,varPeriodAnom4mean,wr1y,wr2y,wr3y,wr4y,pvalue1,pvalue2,pvalue3,pvalue4,cluster.sequence,file=paste0(workdir,"/",fields.name,"_",var.name[var.num],"_",my.period[p],"_mapdata.RData")) + + rm(pvalue1,pvalue2,pvalue3,pvalue4) + rm(pslwr1mean,pslwr2mean,pslwr3mean,pslwr4mean) + rm(varPeriodAnom1mean,varPeriodAnom2mean,varPeriodAnom3mean,varPeriodAnom4mean) + gc() + + } + + if(fields.name == ECMWF_S4.name){ + cluster.sequence <- my.cluster[[p]]$cluster #as.vector(my.cluster.array[[p]]) + + wr1 <- which(cluster.sequence == 1) + wr2 <- which(cluster.sequence == 2) + wr3 <- which(cluster.sequence == 3) + wr4 <- which(cluster.sequence == 4) + + wr1y <- apply(my.cluster.array[[p]] == 1, 2, sum) + wr2y <- apply(my.cluster.array[[p]] == 2, 2, sum) + wr3y <- apply(my.cluster.array[[p]] == 3, 2, sum) + wr4y <- apply(my.cluster.array[[p]] == 4, 2, sum) + + # convert to frequencies in %: + wr1y <- wr1y/(n.leadtimes*n.members) + wr2y <- wr2y/(n.leadtimes*n.members) + wr3y <- wr3y/(n.leadtimes*n.members) + wr4y <- wr4y/(n.leadtimes*n.members) + + # in this case, must use psl.melted instead of psleuFull. Both are already in anomalies: + pslmat <- unname(acast(psl.melted, Member + StartYear + LeadDay ~ Lat ~ Lon)) + #pslmat <- unname(acast(melt(pslPeriod[1,1:2,,,,, drop=FALSE],varnames=c("Exp","Member","StartYear","LeadDay","Lat","Lon")), Member ~ StartYear + LeadDay ~ Lat ~ Lon)) + + pslwr1 <- pslmat[wr1,,, drop=F] + pslwr2 <- pslmat[wr2,,, drop=F] + pslwr3 <- pslmat[wr3,,, drop=F] + pslwr4 <- pslmat[wr4,,, drop=F] + + pslwr1mean <- apply(pslwr1, c(2,3), mean, na.rm=T) + pslwr2mean <- apply(pslwr2, c(2,3), mean, na.rm=T) + pslwr3mean <- apply(pslwr3, c(2,3), mean, na.rm=T) + pslwr4mean <- apply(pslwr4, c(2,3), mean, na.rm=T) + + rm(pslwr1,pslwr2,pslwr3,pslwr4) + rm(pslmat) + gc() + + # save all the data necessary to draw the graphs (but not the impact maps) + save(lon, lon.max, psl, my.period, p, workdir,rean.name,fields.name,var.num,var.name,var.name.full,var.unit,year.start,year.end, p, WR.period, lon, lat, pslwr1mean, pslwr2mean,pslwr3mean,pslwr4mean,wr1y,wr2y,wr3y,wr4y,n.leadtimes,cluster.sequence,file=paste0(workdir,"/",fields.name,"_",var.name[var.num],"_",my.period[p],"_leadmonth_",lead.month,"_mapdata.RData")) + + rm(pslwr1mean,pslwr2mean,pslwr3mean,pslwr4mean) + gc() + + #pslwr1meanPartial <- apply(pslwr1, c(1,3,4), mean, na.rm=T) + #pslwr2meanPartial <- apply(pslwr2, c(1,3,4), mean, na.rm=T) + #pslwr3meanPartial <- apply(pslwr3, c(1,3,4), mean, na.rm=T) + #pslwr4meanPartial <- apply(pslwr4, c(1,3,4), mean, na.rm=T) + + #PlotEquiMap(rescale(pslwr1meanPartial[1,,],my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2) + #PlotEquiMap(rescale(pslwr1meanPartial[2,,],my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2) + + #n=0 + #for(sy in 1:32) { + # for(ld in 1:28){ + # if(my.cluster.array[[p]][ld,sy] == 1){ + # n=n+1 + # if(n == 1) {temp <- pslPeriod[1,2,sy,ld,,]} + # temp <- temp + pslPeriod[1,2,sy,ld,,] + # } + # } + #} + #PlotEquiMap(rescale(temp/n,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, drawleg=F) + + #pslwr1meanPartial <- apply(pslmat[,wr1,,,drop=F], c(1,3,4), mean, na.rm=T) + #pslwr2meanPartial <- apply(pslmat[,wr2,,,drop=F], c(1,3,4), mean, na.rm=T) + #pslwr3meanPartial <- apply(pslmat[,wr3,,,drop=F], c(1,3,4), mean, na.rm=T) + #pslwr4meanPartial <- apply(pslmat[,wr4,,,drop=F], c(1,3,4), mean, na.rm=T) + + } + + # for the monthly system, the time order is always: year1day1, year2day1, ... , yearNday1, year1day2, year2day2, ..., yearNday2, etc.: + if(fields.name == ECMWF_monthly.name) { + if(fields.name == ECMWF_monthly.name){ + my.startdates.period <- months.period(forecast.year,mes,day,p) + + #if(p == 13) my.startdates.period <- my.startdates.period[-2] # remove the second startdate because it is broken + + days.period <- period.length <- list() + for(startdate in my.startdates.period){ + days.period[[p]] <- c(days.period[[p]], (1+(startdate-1)*(year.end-year.start+1)):(startdate*(year.end-year.start+1))) + } + period.length[[p]] <- length(my.startdates.period) # number of Thusdays in the period + } + + wr1y <- wr2y <- wr3y <-wr4y <- c() + wr1y[y-year.start+1] <- length(which(cluster.sequence[seq((y-year.start+1),length(cluster.sequence), n.years)] == 1)) + wr2y[y-year.start+1] <- length(which(cluster.sequence[seq((y-year.start+1),length(cluster.sequence), n.years)] == 2)) + wr3y[y-year.start+1] <- length(which(cluster.sequence[seq((y-year.start+1),length(cluster.sequence), n.years)] == 3)) + wr4y[y-year.start+1] <- length(which(cluster.sequence[seq((y-year.start+1),length(cluster.sequence), n.years)] == 4)) + } + +} # close the for loop on 'p' + +} # close if on 'visualize' + +if(visualize != 0){ + +# run it twice: once, with ordering <- FALSE to look only at the cartography and find visually the right regime names of the four clusters; +# and the second time, to save the ordered cartography: +ordering <- T # If TRUE, order the clusters following the regimes listed in the 'orden' vector (after you manually insert the names). +as.pdf <- F # FALSE: save results as one .png file for each season, TRUE: save only 1 .pdf file with all seasons +save.names <- T # if TRUE, also save the cluster names (i.e: the association between clusters and weather regimes); if FALSE, the cluster names are loaded from the file + +# position of long values of Europe only (without the Atlantic Sea and America): +#if(fields.name == "ERA-Interim") EU <- c(1:which(lon >= lon.max)[1], (length(lon)-30):length(lon)) # restrict area to continental europe only +EU <- c(1:which(lon >= lon.max)[1], (which(lon >= 337)[1]:length(lon))) # restrict area to continental europe only + +# choose an order to give to the cartography, from top to bottom: +orden <- c("NAO+","NAO-","Blocking","Atl.Ridge") + +regime1.name <- orden[1] +regime2.name <- orden[2] +regime3.name <- orden[3] +regime4.name <- orden[4] + +if(save.names){cluster1.name.period <- cluster2.name.period <- cluster3.name.period <- cluster4.name.period <- c()} + +# breaks and colors of the geopotential fields: +if(psl == "g500"){ + #my.brks <- c(-300,-199:200,300) # % Mean anomaly of a WR + my.brks <- c(-300,seq(-200,200,10),300) # % Mean anomaly of a WR + my.brks2 <- c(-300,seq(-200,200,10),300) # % Mean anomaly of a WR for the contour lines + #my.cols <- colorRampPalette((brewer.pal(9,"PuBu")))(length(my.brks)-1) + values.to.plot <- c(-200, -100, 0, 100, 200) # values we want to show in the labels +} + +if(psl == "psl"){ + my.brks <- c(seq(-19,-1,2),0,seq(1,19,2)) # % Mean anomaly of a WR + # my.brks <- c(seq(-19,-1,2),0,seq(1,19,2)) # % Mean anomaly of a WR + + my.brks2 <- my.brks #c(seq(-20,20,2)) # % Mean anomaly of a WR for the contour lines + values.to.plot <- my.brks #c(-17,-15,-13,-11,-9,-7,-5,-3,-1,0,1,3,5,7,9,11,13,15,17) +} + +my.cols <- colorRampPalette(rev(brewer.pal(11,"RdBu")))(length(my.brks)-1) # blue--white--red colors +my.cols[floor(length(my.cols)/2)] <- my.cols[floor(length(my.cols)/2)+1] <- "white" + +# breaks and colors of the impact maps (valid both for Wind speed anomalies and Temperature anomalies): +#my.brks.var <- c(-20,seq(-10,-3,1),seq(-2.9,2.9,0.1),seq(3,10,1),20) # % Mean anomaly of a WR +#my.brks.var <- c(-20,-2,-1.5,-1,-0.5,-0.2,0.2,0.5,1,1.5,2,20) # % Mean anomaly of a WR +#my.brks.var <- c(-20,seq(-3,3,0.5),20) # % Mean anomaly of a WR +if(var.name[var.num] == "sfcWind") { + my.brks.var <- seq(-3,3,0.5) + values.to.plot2 <- c(-3,-2,-1,0,1,2,3) +} +if(var.name[var.num] == "tas") { + my.brks.var <- seq(-5,5,1) + values.to.plot2 <- my.brks.var #c(-5,-3,-1,0,1,3,5) +} + +#my.cols <- colorRampPalette(as.character(read.csv("/home/Earth/vtorralb/rgbhex.csv",header=F)[,1]))(length(my.brks)-1) # blue--yellow-red colors +my.cols.var <- colorRampPalette(rev(brewer.pal(11,"RdBu")))(length(my.brks.var)-1) # blue--white--red colors +#my.cols.var[floor(length(my.cols.var)/2)] <- my.cols.var[floor(length(my.cols.var)/2)+1] <- "white" + +if(as.pdf && fields.name==rean.name)(pdf(file=paste0(workdir,"/",fields.name,"_",var.name[var.num],"_",my.period[p],".pdf"),width=40, height=60)) +if(as.pdf && fields.name==forecast.name)(pdf(file=paste0(workdir,"/",fields.name,"_",var.name[var.num],"_",my.period[p],"_leadmonth_",lead.month,".pdf"),width=40,height=60)) + +for(p in WR.period){ + if(fields.name==rean.name) load(file=paste0(workdir,"/",fields.name,"_",var.name[var.num],"_",my.period[p],"_mapdata.RData")) + if(fields.name==forecast.name) load(file=paste0(workdir,"/",fields.name,"_",var.name[var.num],"_",my.period[p],"_leadmonth_",lead.month,"_mapdata.RData")) + + if(save.names){ + if(p == 1){ # January + cluster3.name="NAO+" + cluster4.name="NAO-" + cluster1.name="Blocking" + cluster2.name="Atl.Ridge" + } + if(p == 2){ # February + cluster3.name="NAO+" + cluster2.name="NAO-" + cluster4.name="Blocking" + cluster1.name="Atl.Ridge" + } + if(p == 3){ # March + cluster3.name="NAO+" + cluster2.name="NAO-" + cluster4.name="Blocking" + cluster1.name="Atl.Ridge" + } + if(p == 4){ # April + cluster2.name="NAO+" + cluster1.name="NAO-" + cluster3.name="Blocking" + cluster4.name="Atl.Ridge" + } + if(p == 5){ # May + cluster4.name="NAO+" + cluster2.name="NAO-" + cluster3.name="Blocking" + cluster1.name="Atl.Ridge" + } + if(p == 6){ # June + cluster4.name="NAO+" + cluster1.name="NAO-" + cluster2.name="Blocking" + cluster3.name="Atl.Ridge" + } + if(p == 7){ # July + cluster4.name="NAO+" + cluster2.name="NAO-" + cluster1.name="Blocking" + cluster3.name="Atl.Ridge" + } + if(p == 8){ # August + cluster2.name="NAO+" + cluster4.name="NAO-" + cluster3.name="Blocking" + cluster1.name="Atl.Ridge" + } + if(p == 9){ # September + cluster4.name="NAO+" + cluster3.name="NAO-" + cluster1.name="Blocking" + cluster2.name="Atl.Ridge" + } + if(p == 10){ # October + cluster1.name="NAO+" + cluster4.name="NAO-" + cluster2.name="Blocking" + cluster3.name="Atl.Ridge" + } + if(p == 11){ # November + cluster2.name="NAO+" + cluster3.name="NAO-" + cluster1.name="Blocking" + cluster4.name="Atl.Ridge" + } + if(p == 12){ # December + cluster2.name="NAO+" + cluster4.name="NAO-" + cluster1.name="Blocking" + cluster3.name="Atl.Ridge" + } + if(p == 13){ # Winter + cluster3.name="NAO+" + cluster4.name="NAO-" + cluster1.name="Blocking" + cluster2.name="Atl.Ridge" + } + if(p == 14){ # Spring + cluster1.name="NAO+" + cluster3.name="NAO-" + cluster2.name="Blocking" + cluster4.name="Atl.Ridge" + } + if(p == 15){ # Summer + cluster2.name="NAO+" + cluster3.name="NAO-" + cluster4.name="Blocking" + cluster1.name="Atl.Ridge" + } + if(p == 16){ # Autumn + cluster4.name="NAO+" + cluster1.name="NAO-" + cluster2.name="Blocking" + cluster3.name="Atl.Ridge" + } + + cluster1.name.period[p] <- cluster1.name + cluster2.name.period[p] <- cluster2.name + cluster3.name.period[p] <- cluster3.name + cluster4.name.period[p] <- cluster4.name + + if(fields.name==rean.name) save(cluster1.name.period, cluster2.name.period, cluster3.name.period, cluster4.name.period, file=paste0(workdir,"/",fields.name,"_", var.name[var.num],"_",my.period[p],"_ClusterNames.RData")) + if(fields.name==forecast.name) save(cluster1.name.period, cluster2.name.period, cluster3.name.period, cluster4.name.period, file=paste0(workdir,"/",fields.name,"_", var.name[var.num],"_",my.period[p],"_leadmonth_",lead.month,"_ClusterNames.RData")) + + } else { # in this case, we load the cluster names from the file already saved: + if(fields.name==rean.name) load(paste0(workdir,"/",fields.name,"_", var.name[var.num],"_",my.period[p],"_ClusterNames.RData")) + if(fields.name==forecast.name) load(paste0(workdir,"/",fields.name,"_", var.name[var.num],"_",my.period[p],"_leadmonth_",lead.month,"_ClusterNames.RData")) + + cluster1.name <- cluster1.name.period[p] + cluster2.name <- cluster2.name.period[p] + cluster3.name <- cluster3.name.period[p] + cluster4.name <- cluster4.name.period[p] + } + + # assign to each cluster its regime: + if(ordering == FALSE){ + cluster1 <- 1; cluster2 <- 2; cluster3 <- 3; cluster4 <- 4 + } else { + cluster1 <- which(orden == cluster1.name) + cluster2 <- which(orden == cluster2.name) + cluster3 <- which(orden == cluster3.name) + cluster4 <- which(orden == cluster4.name) + } + + assign(paste0("map",cluster1), pslwr1mean) + assign(paste0("map",cluster2), pslwr2mean) + assign(paste0("map",cluster3), pslwr3mean) + assign(paste0("map",cluster4), pslwr4mean) + + assign(paste0("fre",cluster1), wr1y) + assign(paste0("fre",cluster2), wr2y) + assign(paste0("fre",cluster3), wr3y) + assign(paste0("fre",cluster4), wr4y) + + if(fields.name == rean.name){ + assign(paste0("imp",cluster1), varPeriodAnom1mean) + assign(paste0("imp",cluster2), varPeriodAnom2mean) + assign(paste0("imp",cluster3), varPeriodAnom3mean) + assign(paste0("imp",cluster4), varPeriodAnom4mean) + + assign(paste0("sig",cluster1), pvalue1) + assign(paste0("sig",cluster2), pvalue2) + assign(paste0("sig",cluster3), pvalue3) + assign(paste0("sig",cluster4), pvalue4) + } + + + # Visualize the composition with the 3 graphs for all the 4 regimes: its pressure fields, its impact on var and its frequency series: + if(!as.pdf && fields.name==rean.name) png(filename=paste0(workdir,"/",fields.name,"_",var.name[var.num],"_",my.period[p],".png"),width=3000,height=3700) + if(!as.pdf && fields.name==forecast.name) png(filename=paste0(workdir,"/",fields.name,"_",var.name[var.num],"_",my.period[p],"_leadmonth_",lead.month,".png"),width=3000,height=3700) + + plot.new() + + # Sheet title: + par(fig=c(0, 1, 0.94, 0.98), new=TRUE) + mtext(paste0("Weather Regimes for ",my.period[p]," season (",year.start,"-",year.end,"). Source: ",fields.name), cex=6, font=2) + + # Centroid maps: + map.xpos <- 0 + map.width <- 0.46 + par(fig=c(map.xpos + 0.003, map.xpos + map.width - 0.003, 0.745, 0.925), new=TRUE) + PlotEquiMap2(rescale(map1,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map1), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, xlabel.dist=1.5, contours.lty="F1FF1F") + par(fig=c(map.xpos, map.xpos + map.width, 0.51, 0.69), new=TRUE) + PlotEquiMap2(rescale(map2,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map2), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F") + par(fig=c(map.xpos, map.xpos + map.width, 0.275, 0.455), new=TRUE) + PlotEquiMap2(rescale(map3,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map3), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F") + par(fig=c(map.xpos, map.xpos + map.width, 0.04, 0.22), new=TRUE) + PlotEquiMap2(rescale(map4,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map4), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F") + + # Title Centroid Maps: + map.title.xpos <- 0.76 + map.title.width <- 0.2 + par(fig=c(map.title.xpos, map.title.xpos + map.title.width, 0.728, 0.729), new=TRUE) + mtext("year", cex=3) + par(fig=c(map.title.xpos, map.title.xpos + map.title.width, 0.494, 0.495), new=TRUE) + mtext("year", cex=3) + par(fig=c(map.title.xpos, map.title.xpos + map.title.width, 0.260, 0.261), new=TRUE) + mtext("year", cex=3) + par(fig=c(map.title.xpos, map.title.xpos + map.title.width, 0.026, 0.027), new=TRUE) + mtext("year", cex=3) + + # Impact maps: + if(fields.name == rean.name){ + impact.xpos <- 0.47 + impact.width <- 0.26 + par(fig=c(impact.xpos, impact.xpos + impact.width, 0.745, 0.925), new=TRUE) + PlotEquiMap2(rescale(imp1[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=t(sig1[,EU] < 0.05), cex.lab=1.5) + par(fig=c(impact.xpos, impact.xpos + impact.width, 0.51, 0.69), new=TRUE) + PlotEquiMap2(rescale(imp2[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=t(sig2[,EU] < 0.05), cex.lab=1.5) + par(fig=c(impact.xpos, impact.xpos + impact.width, 0.275, 0.455), new=TRUE) + PlotEquiMap2(rescale(imp3[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=t(sig3[,EU] < 0.05), cex.lab=1.5) + par(fig=c(impact.xpos, impact.xpos + impact.width, 0.04, 0.22), new=TRUE) + PlotEquiMap2(rescale(imp4[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=t(sig4[,EU] < 0.05), cex.lab=1.5) + } + + # Frequency plots: + barplot.xpos <- 0.75 + barplot.width <- 0.25 + freq.max=60 + + par(fig=c(barplot.xpos, barplot.xpos + barplot.width, 0.745, 0.915), new=TRUE) + barplot.freq(100*fre1, year.start, year.end, cex.y=2, cex.x=2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0)) + par(fig=c(barplot.xpos, barplot.xpos + barplot.width,0.510,0.680), new=TRUE) + barplot.freq(100*fre2, year.start, year.end, cex.y=2, cex.x=2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0)) + par(fig=c(barplot.xpos, barplot.xpos + barplot.width,0.275,0.445), new=TRUE) + barplot.freq(100*fre3, year.start, year.end, cex.y=2, cex.x=2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0)) + par(fig=c(barplot.xpos, barplot.xpos + barplot.width,0.040,0.210), new=TRUE) + barplot.freq(100*fre4, year.start, year.end, cex.y=2, cex.x=2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0)) + + # % on Frequency plots: + symbol.xpos <- 0.735 + symbol.width <- 0.01 + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.83, 0.84), new=TRUE) + mtext("%", cex=3.3) + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.60, 0.61), new=TRUE) + mtext("%", cex=3.3) + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.36, 0.37), new=TRUE) + mtext("%", cex=3.3) + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.13, 0.14), new=TRUE) + mtext("%", cex=3.3) + + # Title 1: + title1.xpos <- 0.02 + title1.width <- 0.4 + par(fig=c(title1.xpos, title1.xpos + title1.width, 0.930, 0.935), new=TRUE) + mtext(paste0(regime1.name,": ", psl.name, " Anomaly"), font=2, cex=4) + par(fig=c(title1.xpos, title1.xpos + title1.width, 0.695, 0.700), new=TRUE) + mtext(paste0(regime2.name,": ", psl.name, " Anomaly"), font=2, cex=4) + par(fig=c(title1.xpos, title1.xpos + title1.width, 0.460, 0.465), new=TRUE) + mtext(paste0(regime3.name,": ", psl.name, " Anomaly"), font=2, cex=4) + par(fig=c(title1.xpos, title1.xpos + title1.width, 0.225, 0.230), new=TRUE) + mtext(paste0(regime4.name,": ", psl.name, " Anomaly"), font=2, cex=4) + + # Title 2: + if(fields.name == rean.name){ + title2.xpos <- 0.42 + title2.width <- 0.35 + par(fig=c(title2.xpos, title2.xpos + title2.width, 0.930, 0.935), new=TRUE) + mtext(paste0(regime1.name, ": Impact on ", var.name.full[var.num]), font=2, cex=4) + par(fig=c(title2.xpos, title2.xpos + title2.width, 0.695, 0.700), new=TRUE) + mtext(paste0(regime2.name, ": Impact on ", var.name.full[var.num]), font=2, cex=4) + par(fig=c(title2.xpos, title2.xpos + title2.width, 0.460, 0.465), new=TRUE) + mtext(paste0(regime3.name, ": Impact on ", var.name.full[var.num]), font=2, cex=4) + par(fig=c(title2.xpos, title2.xpos + title2.width, 0.225, 0.230), new=TRUE) + mtext(paste0(regime4.name, ": Impact on ", var.name.full[var.num]), font=2, cex=4) + } + + # Title 3: + title3.xpos <- 0.75 + title3.width <-0.25 + par(fig=c(title3.xpos, title3.xpos + title3.width, 0.930, 0.935), new=TRUE) + mtext(paste0(regime1.name, ": Frequency (", round(100*mean(fre1),1), "%)"), font=2, cex=4) + par(fig=c(title3.xpos, title3.xpos + title3.width, 0.695, 0.700), new=TRUE) + mtext(paste0(regime2.name, ": Frequency (", round(100*mean(fre2),1), "%)"), font=2, cex=4) + par(fig=c(title3.xpos, title3.xpos + title3.width, 0.460, 0.465), new=TRUE) + mtext(paste0(regime3.name, ": Frequency (", round(100*mean(fre3),1), "%)"), font=2, cex=4) + par(fig=c(title3.xpos, title3.xpos + title3.width, 0.225, 0.230), new=TRUE) + mtext(paste0(regime4.name, ": Frequency (", round(100*mean(fre4),1), "%)"), font=2, cex=4) + + # Legend 1: + legend1.xpos <- 0.01 + legend1.width <- 0.44 + legend1.cex <- 1.8 + my.subset <- match(values.to.plot, my.brks) + par(fig=c(legend1.xpos, legend1.xpos + legend1.width, 0.719, 0.744), new=TRUE) # syntax fig=c(xmin, xmax, ymin, ymax) + ColorBar3(my.brks, cols=my.cols, vert=FALSE, cex=legend1.cex, subset=my.subset) + if(psl=="g500") {mtext(side=4," m", cex=2.5)} else {mtext(side=4," hPa", cex=legend1.cex)} + par(fig=c(legend1.xpos, legend1.xpos + legend1.width, 0.485, 0.508), new=TRUE) + ColorBar3(my.brks, cols=my.cols, vert=FALSE, cex=legend1.cex, subset=my.subset) + if(psl=="g500") {mtext(side=4," m", cex=2.5)} else {mtext(side=4," hPa", cex=legend1.cex)} + par(fig=c(legend1.xpos, legend1.xpos + legend1.width, 0.250, 0.273), new=TRUE) + ColorBar3(my.brks, cols=my.cols, vert=FALSE, cex=legend1.cex, subset=my.subset) + if(psl=="g500") {mtext(side=4," m", cex=2.5)} else {mtext(side=4," hPa", cex=legend1.cex)} + par(fig=c(legend1.xpos, legend1.xpos + legend1.width, 0.015, 0.038), new=TRUE) + ColorBar3(my.brks, cols=my.cols, vert=FALSE, cex=legend1.cex, subset=my.subset) + if(psl=="g500") {mtext(side=4," m", cex=2.5)} else {mtext(side=4," hPa", cex=legend1.cex)} + + # Legend 2: + if(fields.name == rean.name){ + legend2.xpos <- 0.48 + legend2.width <- 0.25 + legend2.cex <- 1.8 + my.subset2 <- match(values.to.plot2, my.brks.var) + par(fig=c(legend2.xpos, legend2.xpos + legend2.width, 0.719 + 0.001, 0.744), new=TRUE) + ColorBar3(my.brks.var, cols=my.cols.var, vert=FALSE, cex=legend2.cex, subset=my.subset2) + mtext(side=4,paste0(" ",var.unit[var.num]), cex=legend2.cex) + par(fig=c(legend2.xpos, legend2.xpos + legend2.width, 0.485, 0.508), new=TRUE) + ColorBar3(my.brks.var, cols=my.cols.var, vert=FALSE, cex=legend2.cex, subset=my.subset2) + mtext(side=4,paste0(" ",var.unit[var.num]), cex=legend2.cex) + par(fig=c(legend2.xpos, legend2.xpos + legend2.width, 0.250, 0.273), new=TRUE) + ColorBar3(my.brks.var, cols=my.cols.var, vert=FALSE, cex=legend2.cex, subset=my.subset2) + mtext(side=4,paste0(" ",var.unit[var.num]), cex=legend2.cex) + par(fig=c(legend2.xpos, legend2.xpos + legend2.width, 0.015, 0.038), new=TRUE) + ColorBar3(my.brks.var, cols=my.cols.var, vert=FALSE, cex=legend2.cex, subset=my.subset2) + mtext(side=4,paste0(" ",var.unit[var.num]), cex=legend2.cex) + } + + if(!as.pdf) dev.off() # for saving 4 png + +} # close for loop on 'p' + +if(as.pdf) dev.off() # for saving a single pdf with all seasons + +} # close if on 'visualize' diff --git a/old/weather_regimes_v31.R b/old/weather_regimes_v31.R new file mode 100644 index 0000000000000000000000000000000000000000..a1c5f73d5d4bf23b491a6bcd739648c676313d7e --- /dev/null +++ b/old/weather_regimes_v31.R @@ -0,0 +1,1135 @@ +library(s2dverification) # for the function Load() +library(abind) +library(Kendall) +library(reshape2) +library(TTR) +source('/home/Earth/ncortesi/Downloads/scripts/Rfunctions.R') # for the calendar functions +workdir <- "/scratch/Earth/ncortesi/RESILIENCE/Regimes" # working dir with the input files with the WT classifications for each MSLP grid point + +# available reanalysis for the geopotential (g500) or the geopotential height (z500 = g500/9.81) and for var data: +ERAint <- '/esnas/reconstructions/ecmwf/eraint/$STORE_FREQ$_mean/$VAR_NAME$_f6h/$VAR_NAME$_$YEAR$$MONTH$.nc' +ERAint.name <- "ERA-Interim" + +JRA55 <- '/esnas/reconstructions/jma/jra-55/$STORE_FREQ$_mean/$VAR_NAME$_f6h/$VAR_NAME$_$YEAR$$MONTH$.nc' +JRA55.name <- "JRA-55" + +rean <- ERAint #JRA55 #ERAint # choose one of the two above reanalysis from where to load the input psl data + +# available forecast systems for the psl and var data: +#ECMWF_S4 <- list(path = paste0('/esnas/exp/ECMWF/seasonal/0001/s004/m001/$STORE_FREQ$_mean/$VAR_NAME$_f6h/$VAR_NAME$_$START_DATE$.nc')) +#ECMWF_S4 <- '/esnas/exp/ECMWF/seasonal/0001/s004/m001/$STORE_FREQ$_mean/psl_f6h/psl_$START_DATE$.nc' +ECMWF_S4 <- '/esnas/exp/ecmwf/system4_m1/$STORE_FREQ$_mean/$VAR_NAME$_f6h/$VAR_NAME$_$START_DATE$.nc' +ECMWF_S4.name <- "ECMWF-S4" + +ECMWF_monthly <- '/esnas/exp/ECMWF/monthly/ensforhc/6hourly/$VAR_NAME$/2014$MONTH$$DAY$00/$VAR_NAME$_$START_DATE$00.nc' +ECMWF_monthly.name <- "ECMWF-Monthly" + +forecast <- ECMWF_S4 + +fields <- forecast #rean #forecast # put 'rean' to load pressure fields and var from reanalisis, put 'forecast' to load them from forecast systems + +psl <- "psl" #"g500" # pressure variable to use from the chosen reanalysis +psl.name <- "MSLP" # "Z500" # the name to show in the maps + +year.start <- 1982 #1981 #1994 #1979 #1981 +year.end <- 2013 #2013 #2010 + +member.name <- "ensemble" # name of the dimension with the ensemble members inside the netCDF files of S4 + +res <- 0.75 # set the resolution you want to interpolate the psl and var data when loading it (i.e: set to 0.75 to use the same res of ERA-Interim) + # interpolation method is BILINEAR. +PCA <- FALSE # set it to TRUE if you want to apply the PCA before the k-means cluster analysis, FALSE otherwise +variance.explained <- 0.8 # the user can set here the minimum % of variance explained by the PCs (typically 0.8 which is equal to 80%) + +lat.weighting=FALSE # set it to true to weight psl data on latitude before applying the cluster analysis +sequences=FALSE # set it to TRUE if you want to select only days beloning to sequences of at least 5 consecutive days belonging to the same regime. + +# Coordinates of the box enclosing the study region (the Northern Atlantic in this case): +lat.max <- 81 #81 #80 #70 +lat.min <- 27 #30 #20 #30 +lon.max <- 45 #36 #30 #40 +lon.min <- 274.5 #274.5 #270 #280 # put a positive number here because the geopotential has only positive values of longitud! + +var.num <- 1 # Choose a variable. 1: sfcWind 2: tas + +var.name <- c("sfcWind","tas") # name of the 'predictand' variable of the chosen reanalysis +var.name.full <- c("Wind Speed","Temperature") # full name of the 'predictand' variable to put in the title of the graphs +var.unit <- c("m/s", "ºC") # unit of measure of var (for drawing color scales) + +# Only for Reanalysis:: +WR.period <- 1:12 #13:16 # 1-12: Jan-Dec; 13-16: Winter-Spring-Summer-Autumn [bypassed by the optional argument of the script] + +# only for Seasonal forecasts: +n.members <- 15 # number of members of the forecast system to load +n.leadtimes <- 216 # number of leadtimes of the forecast system to load +startM <- 1 # start month (1=January, 12=December) [bypassed by the optional arguments of the script] +leadM <- 1 # select only the data of one lead month: [bypassed by the optional arguments of the script] + +# Only for Monthly forecasts: +#leadtime <- 1:7 #5:11 #1 # if fields=forecast, set the forecast time (in days) you want to compute the weather regimes. +#startdate.shift <- 1 # if leadtime=5:11, it's better to shift all startdates of the season 1 week in the past, to fit the exact season of obs.data + # if leadtime=12:18, it's better to shift all startdates of the season 2 weeks in the past, and so on. +#forecast.year <- year.end+1 # if fields=forecast, set the forecast year (it is usually the year after year.end) +#mes=1 # if fields=forecast, set the month of the first startdate of the forecast year +#day=2 # if fields=forecast, set the day of the first startdate of the forecast year + +############################################################################################################################# +#ECMWF_monthly <- list(path = paste0('/esnas/exp/ECMWF/monthly/ensforhc/6hourly/psl/2014010200/1994_010200.nc')) +#ECMWF_monthly <- list(path = paste0('/esnas/exp/ECMWF/monthly/ensforhc/6hourly/$VAR_NAME$/2014$MONTH$$DAY$00/$VAR_NAME$_$START_DATE$00.nc')) +rean.name <- unname(ifelse(rean == ERAint, ERAint.name, JRA55.name)) +forecast.name <- unname(ifelse(forecast == ECMWF_S4, ECMWF_S4.name, ECMWF_monthly.name)) +fields.name <- unname(ifelse(fields == rean, rean.name, forecast.name)) +if(fields.name == forecast.name) WR.period = 1 +n.years <- year.end - year.start + 1 + +# in case the script is run with two arguments, they are assigned to the two below variables: +script.arg <- as.integer(commandArgs(TRUE)) + +if(length(script.arg) == 2){ + start.month <- script.arg[1] + lead.month <- script.arg[2] + WR.period <- start.month +} else { + start.month <- startM + lead.month <- leadM + WR.period <- start.month +} + +# in case the script is run with 1 argument, it is assumed you are using Reanalysis: +if(length(script.arg) == 1) WR.period <- script.arg[1] + +var.data <- fields # dataset from which to load the input var data (reanalysis or forecasts) +var.data.name <- fields.name #ECMWF_monthly.name # ERAint.name + +if(lat.min >= lat.max) stop("lat.min cannot be greater than lat.max") + +days.period <- n.days.period <- period.length <- list() +for (pp in 1:17){ + days.period[[pp]] <- NA + for(y in year.start:year.end) days.period[[pp]] <- c(days.period[[pp]], n.days.in.a.future.year(year.start, y) + pos.period(y,pp)) + days.period[[pp]] <- days.period[[pp]][-1] # remove the NA at the begin that was introduced only to be able to execute the above command + # number of days belonging to that period from year.start to year.end: + n.days.period[[pp]] <- length(days.period[[pp]]) + # Number of days belonging to that period in a single year: + period.length[[pp]] <- n.days.in.a.period(pp,1999) #+ ifelse(period==13 | period==2, 1, 0) # using year 1999 introduce a small error in winter season length because of bisestile years +} + +# Create a regular lat/lon grid to interpolate the data: +n.lon.grid <- 360/res +n.lat.grid <- (180/res)+1 +my.grid <- paste0('r',n.lon.grid,'x',n.lat.grid) +#domain<-list() +#domain$lon <- seq(0,359.999999999,res) +#domain$lat <- c(rev(seq(0,90,res)),seq(res[1],-90,-res)) # Notice that the regular grid is always centered at lat=0 and lon=0! + +# load only 1 day of pressure data to detect the minimum and maximum lat and lon values which identify only the North Atlantic area: +if(fields.name == rean.name) domain <- Load(var = psl, exp = NULL, obs = list(list(path=fields)), '19990101', storefreq = 'daily', leadtimemax = 1, output = 'lonlat', nprocs=1) +# in the case of S4, we also interpolate it (notice that if the S4 grid has the same grid of the chosen grid (typically ERA-Interim), Load() leaves the S4 grid unchanged): +if(fields.name == ECMWF_S4.name) domain <- Load(var = "psl", exp = list(list(path=fields)), sdates=paste0(year.start,'0101'), storefreq = 'daily', dimnames=list(member=member.name), leadtimemax=1, nmember=1, output = 'lonlat') #, grid=my.grid, method='bilinear', nprocs=1) + +#if(fields.name == ECMWF_monthly.name) domain <- Load(var = psl, exp = NULL, obs = list(list(path=fields)), paste0(year.start,'0102'), storefreq = 'daily', leadtimemax = 1, output = 'lonlat', nprocs=1) + +n.lon <- length(domain$lon) +n.lat <- length(domain$lat) + +pos.lat.max <- which(lat.max - round(domain$lat,4) >= 0)[1] # select the first latitude of the domain below the maximum latitude +pos.lat.min <- tail(which(round(domain$lat,4) - lat.min >= 0),1) # select the last latitude of the domain over the minumum latitude +pos.lon.max <- tail(which(lon.max - round(domain$lon,4) >= 0),1) +pos.lon.min <- which(round(domain$lon,4) - lon.min >= 0)[1] + +pos.lat <- if(pos.lat.min < pos.lat.max) {pos.lat.min:pos.lat.max} else {pos.lat.max:pos.lat.min} +pos.lon <- c(1:pos.lon.max,pos.lon.min:length(domain$lon)) # beware that it only consider the case where the study area cross the meridian of Greenwhich! +n.pos.lat <- length(pos.lat) +n.pos.lon <- length(pos.lon) + +lat <- domain$lat[pos.lat] # lat values of chosen area only (it is smaller than the whole spatial domain loaded by the data) +lon <- domain$lon[pos.lon] # lon values of chosen area only + +#lat.min.area <- domain$lat[pos.lat.min] # min and max lat/lon of the chosen area only (same as lat.max, but its values correspond to actual values in the lat vector) +#lat.max.area <- domain$lat[pos.lat.max] +#lon.min.area <- domain$lon[pos.lon.min] +#lon.max.area <- domain$lon[pos.lon.max] + +#rm(domain) + +# Load psl data: +if(fields.name == rean.name){ # Load daily psl data in the reanalysis case: + psleuFull <-array(NA,c(n.days.in.a.yearly.period(year.start,year.end), n.pos.lat, n.pos.lon)) + + for (y in year.start:year.end){ + var <- Load(var = psl, exp = NULL, obs = list(list(path=fields)), paste0(y,'0101'), storefreq = 'daily', leadtimemax = n.days.in.a.year(y), output = 'lonlat', + latmin = lat.min, latmax = lat.max, lonmin = lon.min, lonmax = lon.max) + psleuFull[seq.days.in.a.future.year(year.start, y),,] <- var$obs + rm(var) + gc() + } +} + +if(fields.name == ECMWF_S4.name){ # in this case, we can load only the data for 1 month at time (since each month needs ~3 GB of memory) + if(start.month <= 9) {start.month.char <- paste0("0",as.character(start.month))} else {start.month.char <- start.month} + + psleuFull <- Load(var = psl, exp = list(list(path=fields)), obs = NULL, sdates=paste0(year.start:year.end, start.month.char,'01'), dimnames=list(member=member.name), nmember=n.members, leadtimemax=n.leadtimes, storefreq='daily', output = 'lonlat', latmin = lat.min, latmax = lat.max, lonmin = lon.min, lonmax = lon.max, nprocs=1)$mod #, grid=my.grid, method='bilinear') + +} + +if(fields.name == ECMWF_monthly.name){ + # Load 6-hourly psl data in the forecast case: + psleuFull <-array(NA, c(n.sdates*n.years, n.leadtimes, n.pos.lat, n.pos.lon)) # daily order: 19940102 19950102 ... 20130102 19940109 19950109 ... 20130109 ... + n.leadtimes <- length(leadtime) + sdates <- weekly.seq(forecast.year,mes,day) + n.sdates <- length(sdates) + + for (startdate in 1:n.sdates){ + for(lday in leadtime){ + var <- Load(var = psl, exp = NULL, obs = list(list(path=fields)), paste0(year.start:year.end, substr(sdates[startdate],5,6), substr(sdates[startdate],7,8) ), storefreq = 'daily', leadtimemin = lday*4-3, leadtimemax = lday*4, output = 'lonlat', latmin = lat.min, latmax = lat.max, lonmin = lon.min, lonmax = lon.max) + + mean.lday <- apply(var$obs, c(3,5,6), mean, na.rm=T) + rm(var) + gc() + + psleuFull[(1+(startdate-1)*n.years):(startdate*n.years), lday-leadtime[1]+1,,] <- mean.lday + rm(mean.lday) + gc() + } + } +} + +if(psl=="g500") psleuFull <- psleuFull/9.81 # convert geopotential to geopotential height (in m) +if(psl=="psl") psleuFull <- psleuFull/100 # convert MSLP in Pascal to MSLP in hPa +gc() + +#save.image(file=paste0(workdir,"/weather_regimes_",fields.name,"_",year.start,"-",year.end,".RData")) +#load(file=paste0(workdir,"/weather_regimes_",fields.name,"_",year.start,"-",year.end,".RData")) + +# compute the cluster analysis: +my.PCA <- list() +my.cluster <- list() +my.cluster.array <- list() +tot.variance <- list() +n.pcs <- my.cluster <- c() + +#p=1 # for the debug +for(p in WR.period){ + # Select only the data of the month/season we want: + if(fields.name == rean.name){ + pslPeriod <- psleuFull[days.period[[p]],,] # select only days in the chosen period (i.e: winter) + + # weight the pressure fields based on latitude (apply it to anomalies, not to absolute values!): + if(lat.weighting){ + lat.weighted <- cos(lat*pi/180)^0.5 + lat.weighted.array <- InsertDim(InsertDim(lat.weighted,2,n.pos.lon), 1, n.days.period[[p]]) + psl.kmeans <- pslPeriod * lat.weighted.array + rm(lat.weighted.array) + gc() + } + + # select period data from psleuFull to use it as input of the cluster analysis: + psl.kmeans <- pslPeriod + dim(psl.kmeans) <- c(n.days.period[[p]], n.pos.lat*n.pos.lon) # convert array in a matrix! + rm(pslPeriod, pslPeriod.weighted) + } + + # in this case of S4 we don't need to select anything because we already loaded only 1 month of data! + if(fields.name == ECMWF_S4.name){ + + ## climatology plots: + + ## load eraint data: + ## ERAint <- '/esnas/reconstructions/ecmwf/eraint/$STORE_FREQ$_mean/$VAR_NAME$_f6h/$VAR_NAME$_$YEAR$$MONTH$.nc' + ## my.years <- year.start:year.end + ## var <- Load(var = "sfcWind", exp = NULL, obs = list(list(path=ERAint)), paste0(my.years,'0101'), storefreq = 'daily', leadtimemax = 216, output = 'lonlat',latmin = lat.min, latmax = lat.max, lonmin = lon.min, lonmax = lon.max) # -273.16 for tas # or /100 for psl + + ## # check S4 lon and lat: + ## #varS4 <- Load(var = psl, exp = list(list(path=fields)), obs = NULL, sdates=paste0(year.start, start.month.char,'01'), dimnames=list(member=member.name), nmember=n.members, leadtimemax=1, storefreq='daily', output = 'lonlat', latmin = lat.min, latmax = lat.max, lonmin = lon.min, lonmax = lon.max) #, grid=my.grid, method='bilinear') + + ## # draw climatologies: + ## drift <- apply(psleuFull[,,,,1,1,drop=F], c(1,2,4), mean, na.rm=T) + ## #matplot(t(drift[1,,]),type="l", col=1:15, lty=2, pch=19, cex=.5) + ## #matplot(t(drift[1,,1:31]),type="b", col=1:15, lty=1, pch=19, cex=.5) # zoom over the first leadtime + + ## matplot(t(drift[1,,]),type="l", col="gray60", lty=3, pch=19, cex=.1) #, ylim=c(-25,5)) + + ## true.climate <- apply(psleuFull[,,,,1,1,drop=F], c(1,4), mean, na.rm=T) + ## lines(true.climate[1,],type="l", col="red", lty=1, pch=19, lwd=2) + + ## true.climate.5d <- stats::filter(true.climate[1,], rep(1/5,5), sides=2) + ## lines(true.climate.5d,type="l", col="orange", lty=1, pch=19, lwd=2) + + ## s4.data <- data.frame(s4=true.climate[1,], day=1:216) + ## s4.loess <- loess(s4 ~ day, s4.data, span=0.50) + ## s4.pred <- predict(s4.loess) + ## lines(s4.pred,type="l", col="purple", lty=1, pch=19, lwd=2) + + ## col.monthly <- "brown" + ## true.climate1 <- apply(psleuFull[,,,1:31,1,1,drop=F], 1, mean, na.rm=T) + ## lines(rep(true.climate1,31),type="l", col=col.monthly, lty=1, pch=19, lwd=2) + ## true.climate2 <- apply(psleuFull[,,,32:59,1,1,drop=F], 1, mean, na.rm=T) + ## lines(c(rep(NA,31),rep(true.climate2,28)),type="l", col=col.monthly, lty=1, pch=19, lwd=2) + ## true.climate3 <- apply(psleuFull[,,,60:90,1,1,drop=F], 1, mean, na.rm=T) + ## lines(c(rep(NA,59),rep(true.climate3,31)),type="l", col=col.monthly, lty=1, pch=19, lwd=2) + ## true.climate4 <- apply(psleuFull[,,,91:120,1,1,drop=F], 1, mean, na.rm=T) + ## lines(c(rep(NA,90),rep(true.climate4,30)),type="l", col=col.monthly, lty=1, pch=19, lwd=2) + ## true.climate5 <- apply(psleuFull[,,,121:151,1,1,drop=F], 1, mean, na.rm=T) + ## lines(c(rep(NA,120),rep(true.climate5,31)),type="l", col=col.monthly, lty=1, pch=19, lwd=2) + ## true.climate6 <- apply(psleuFull[,,,151:180,1,1,drop=F], 1, mean, na.rm=T) + ## lines(c(rep(NA,150),rep(true.climate6,30)),type="l", col=col.monthly, lty=1, pch=19, lwd=2) + ## true.climate7 <- apply(psleuFull[,,,181:211,1,1,drop=F], 1, mean, na.rm=T) + ## lines(c(rep(NA,180),rep(true.climate7,31)),type="l", col=col.monthly, lty=1, pch=19, lwd=2) + + + ## era <- apply(var$obs[,,,,1,1,drop=F], c(1,2,4), mean, na.rm=T) + ## plot(era[1,1,1:216],type="l", col="black", lty=1, pch=19, lwd=2) + + ## era.5d <- stats::filter(era[1,1,1:216], rep(1/5,5), sides=2) + ## lines(era.5d,type="l", col="darkgreen", lty=1, pch=19, lwd=2) + + ## era.data <- data.frame(era=era[1,1,1:216], day=1:216) + ## era.loess <- loess(era ~ day, era.data, span=0.50) + ## era.pred <- predict(era.loess) + ## lines(era.pred,type="l", col="blue", lty=1, pch=19, lwd=2) + + ## era.data2 <- data.frame(era=era[1,1,1:216], day=1:216) + ## era.loess2 <- loess(era ~ day, era.data2, span=0.3) + ## era.pred2 <- predict(era.loess2) + ## lines(era.pred2,type="l", col="purple", lty=1, pch=19, lwd=2) + + ## era.data3 <- data.frame(era=era[1,1,1:216], day=1:216) + ## era.loess3 <- loess(era ~ day, era.data3, span=0.35) + ## era.pred3 <- predict(era.loess3) + + ## col.monthly.erai <- "turquoise3" + ## era1 <- apply(var$obs[,,,1:31,1,1,drop=F], c(1,2), mean, na.rm=T) + ## lines(rep(era1,31),type="l", col=col.monthly.erai, lty=1, pch=19, lwd=2) + ## era2 <- apply(var$obs[,,,32:59,1,1,drop=F], c(1,2), mean, na.rm=T) + ## lines(c(rep(NA,31),rep(era2,28)),type="l", col=col.monthly.erai, lty=1, pch=19, lwd=2) + ## era3 <- apply(var$obs[,,,60:90,1,1,drop=F], c(1,2), mean, na.rm=T) + ## lines(c(rep(NA,59),rep(era3,31)),type="l", col=col.monthly.erai, lty=1, pch=19, lwd=2) + ## era4 <- apply(var$obs[,,,91:120,1,1,drop=F], c(1,2), mean, na.rm=T) + ## lines(c(rep(NA,90),rep(era4,30)),type="l", col=col.monthly.erai, lty=1, pch=19, lwd=2) + ## era5 <- apply(var$obs[,,,121:151,1,1,drop=F], c(1,2), mean, na.rm=T) + ## lines(c(rep(NA,120),rep(era5,31)),type="l", col=col.monthly.erai, lty=1, pch=19, lwd=2) + ## era6 <- apply(var$obs[,,,151:180,1,1,drop=F], c(1,2), mean, na.rm=T) + ## lines(c(rep(NA,150),rep(era6,30)),type="l", col=col.monthly.erai, lty=1, pch=19, lwd=2) + ## era7 <- apply(var$obs[,,,181:211,1,1,drop=F], c(1,2), mean, na.rm=T) + ## lines(c(rep(NA,180),rep(era7,30)),type="l", col=col.monthly.erai, lty=1, pch=19, lwd=2) + + + ## # anomalias: + ## point.type <- "l" + ## true.climate.anom <- psleuFull[1,1,pos.last.year,,1,1] - true.climate[1,] + ## plot(true.climate.anom,type=point.type, col="red", lty=1, pch=19, lwd=2, cex=.3) + ## lines(rep(0,216)) + + ## true.climate.5d.anom <- psleuFull[1,1,pos.last.year,,1,1] - true.climate.5d + ## lines(true.climate.5d.anom,type=point.type, col="orange", lty=1, pch=19, lwd=2, cex=.3) + + ## s4.pred.anom <- psleuFull[1,1,pos.last.year,,1,1] - s4.pred + ## lines(s4.pred.anom,type=point.type, col="purple", lty=1, pch=19, lwd=2, cex=.3) + + ## s4.monthly.clim <- c(rep(true.climate1,31),rep(true.climate2,28),rep(true.climate3,31),rep(true.climate4,30),rep(true.climate5,31),rep(true.climate6,30),rep(true.climate7,31), rep(NA,4)) + ## s4.monthly.anom <- psleuFull[1,1,pos.last.year,,1,1] - s4.monthly.clim + ## lines(s4.monthly.anom,type=point.type, col="brown", lty=1, pch=19, lwd=2, cex=.3) + + + ## pos.last.year <- dim(var$obs)[3] + ## era.anom <- var$obs[1,1,pos.last.year,1:216,1,1] - era[1,1,1:216] + ## plot(era.anom,type=point.type, col="black", lty=1, pch=19, lwd=2) + ## lines(rep(0,216)) + + ## era.anom.5d <- var$obs[1,1,pos.last.year,1:216,1,1] - era.5d[1:216] + ## lines(era.anom.5d,type=point.type, col="darkgreen", lty=1, pch=19, lwd=2) + + ## era.anom.pred <- var$obs[1,1,pos.last.year,1:216,1,1] - era.pred[1:216] + ## lines(era.anom.pred,type=point.type, col="blue", lty=1, pch=19, lwd=2) + + ## era.anom.pred2 <- var$obs[1,1,pos.last.year,1:216,1,1] - era.pred2[1:216] + ## era.anom.pred3 <- var$obs[1,1,pos.last.year,1:216,1,1] - era.pred3[1:216] + + ## era.monthly.clim <- c(rep(era1,31),rep(era2,28),rep(era3,31),rep(era4,30),rep(era5,31),rep(era6,30),rep(era7,31), rep(NA,4)) + ## era.anom.monthly <- var$obs[1,1,pos.last.year,1:216,1,1] - era.monthly.clim + ## lines(era.anom.monthly,type=point.type, col="turquoise3", lty=1, pch=19, lwd=2) + + ## # Nube plot: + ## plot(era.anom.pred,type="l", col="blue", lty=1, pch=19, lwd=2) + ## grid(nx=10,lwd=2) + ## lines(era.anom.pred2,type="l", col="purple", lty=1, pch=19, lwd=2) + ## lines(era.anom.pred3,type="l", col="turquoise3", lty=1, pch=19, lwd=2) + ## lines(era.anom.5d,type="l", col="darkgreen", lty=1, pch=19, lwd=2) + + ## lines(era.5d,type="l", col="darkgreen", lty=1, pch=19, lwd=2) + ## lines(era.pred,type="l", col="blue", lty=1, pch=19, lwd=2) + ## lines(rep(0,216)) + + ## lines(era.pred2,type="l", col="purple", lty=1, pch=19, lwd=2) + ## lines(era.pred3,type="l", col="", lty=1, pch=19, lwd=2) + + + ## # fit alfa parameter for each grid point: + ## i=1 + ## j=1 + ## era <- apply(var$obs[,,,,i,j,drop=F], c(1,2,4), mean, na.rm=T) + ## era.data <- data.frame(era=era[1,1,1:216], day=1:216) + ## k=0; error <- c() + ## for (alfa in seq(0.25,0.50,0.01)){ + ## k=k+1 + ## era.loess <- loess(era ~ day, era.data, span=alfa) + ## era.pred <- predict(era.loess) + ## error[k] <- mean(abs(era.pred - era[1,1,1:216])) + ## } + + # convert psl in daily anomalies with the LOESS filter: + pslPeriodClim <- apply(psleuFull, c(1,4,5,6), mean, na.rm=T) + pslPeriodClimLoess <- pslPeriodClim + for(i in n.pos.lat){ + for(j in n.pos.lon){ + my.data <- data.frame(ens.mean=pslPeriodClim[1,,i,j], day=1:n.leadtimes) + my.loess <- loess(ens.mean ~ day, my.data, span=0.35) + pslPeriodClimLoess[1,,i,j] <- predict(my.loess) + } + } + + pslPeriodClim2 <- InsertDim(InsertDim(pslPeriodClimLoess, 2, n.years), 2, n.members) + + ## s4.data <- data.frame(s4=pslPeriodClim[1,], day=1:216) + ## s4.loess <- loess(s4 ~ day, s4.data, span=0.35) + ## s4.pred <- predict(s4.loess) + rm(pslPeriodClim) + gc() + + psleuFull <- psleuFull - pslPeriodClim2 + rm(pslPeriodClim2) + gc() + + ## # convert psl in daily anomalies computed with the 10-days leadtime window including the 9 days AFTER each day: + ## n.leadtimes <- dim(psleuFull)[4] + ## pslPeriodClim10 <- array(NA, c(1, n.members, n.years, n.leadtimes-9,n.pos.lat,n.pos.lon)) + ## for(year in 1:n.years){ + ## for(lead in 1:(n.leadtimes-9)){ + ## pslPeriodClim10[1,,year,lead,,] <- (psleuFull[1,,year,lead,,] + psleuFull[1,,year,lead+1,,] + psleuFull[1,,year,lead+2,,] + psleuFull[1,,year,lead+3,,] + psleuFull[1,,year,lead+4,,] + psleuFull[1,,year,lead+5,,] + psleuFull[1,,year,lead+6,,] + psleuFull[1,,year,lead+7,,] + psleuFull[1,,year,lead+8,,] + psleuFull[1,,year,lead+9,,])/10 + ## } + ## } + + ## pslPeriodClim10mean <- apply(pslPeriodClim10, c(1,4,5,6), mean, na.rm=T) + ## pslPeriodClim10mean2 <- InsertDim(InsertDim(pslPeriodClim10mean,2,n.years),2,n.members) + + ## psleuFull10 <- psleuFull[1,,,1:(n.leadtimes-9),,,drop=F] - pslPeriodClim10mean2 + + ## psleuFull <- psleuFull10 + ## rm(pslPeriodClim2, psleuFull10, pslPeriodClim10, pslPeriodClim10mean2, psleuFull10mean, pslPeriodClim10mean) + ## gc() + + chosen.month <- start.month + lead.month # find which month we want to select data + if(chosen.month > 12) chosen.month <- chosen.month - 12 + + for(y in year.start:year.end){ + leadtime.min <- 1 + n.days.in.a.monthly.period(start.month, chosen.month, y) - n.days.in.a.month(chosen.month, y) + leadtime.max <- leadtime.min + n.days.in.a.month(chosen.month, y) - 1 + if (n.days.in.a.month(chosen.month, y) == 29) leadtime.max <- leadtime.max - 1 # remove the 29th of February to have the same n. of elements for all years + int.leadtimes <- leadtime.min:leadtime.max + n.leadtimes <- leadtime.max - leadtime.min + 1 + + if(y == year.start) { + pslPeriod <- psleuFull[,,1,int.leadtimes,,,drop=FALSE] + } else { + pslPeriod <- unname(abind(pslPeriod, psleuFull[,,y-year.start+1,int.leadtimes,,,drop=FALSE], along=3)) + } + } + + + ## # if you didn't convert psl data in anomalies until now, you can convert psl data in monthly anomalies: + ## pslPeriodClim <- apply(pslPeriod, c(1,5,6), mean, na.rm=T) + ## pslPeriodClim2 <- InsertDim(InsertDim(InsertDim(pslPeriodClim,2,n.leadtimes), 2, n.years), 2, n.members) + + ## pslPeriod <- pslPeriod - pslPeriodClim2 + ## rm(pslPeriodClim, pslPeriodClim2) + ## gc() + + # note that the data order in psl.kmeans[,X] is: year1day1, year1day2, ... , year1day31, year2day1, year2day2, ... , year2day28 + psl.melted <- melt(pslPeriod[1,,,,,, drop=FALSE], varnames=c("Exp","Member","StartYear","LeadDay","Lat","Lon")) + psl.kmeans <- unname(acast(psl.melted, Member + StartYear + LeadDay ~ Lat + Lon)) + + #rm(pslPeriod) + gc() + } + + if(fields.name == ECMWF_monthly.name){ + my.startdates.period <- months.period(forecast.year,mes,day,p) # get which startdates belong to the chosen period + + days.period <- list() # get which days inside psleuFull belong to the chosen period + for(startdate in my.startdates.period){ + days.period[[p]] <- c(days.period[[p]], (1+(startdate-1)*n.years):(startdate*n.years)) + } + + # in winter you must remove the 2nd startdate (20140109) because its data is bad, as you can see with: plot(psl.kmeans[,1:2]) + # if(fields.name == forecast.name && period == 13) psl.kmeans[21:40,] <- NA + } + + + # compute the PCs or not: + if(PCA){ + my.seq <- seq(1, n.pos.lat*n.pos.lon, 9) # select only 1 point of 9 + pslcut <- psl.kmeans[,my.seq] + + my.PCA[[p]] <- princomp(pslcut,cor=FALSE) + tot.variance[[p]] <- head(cumsum(my.PCA[[p]]$sdev^2/sum(my.PCA[[p]]$sdev^2)),50) # check how many PCAs to keep basing on the sum of their expl. variance + n.pcs[p] <- head(as.numeric(which(tot.variance[[p]] > variance.explained)),1) # select only the pcs that explains at least 80% of variance + my.cluster[[p]] <- kmeans(my.PCA[[p]]$scores[,1:n.pcs[p]], centers=4, iter.max=100, nstart=30) # 4 is the number of clusters + rm(pslcut) + } else { # 4 is the number of clusters: + if(fields.name == rean.name) my.cluster[[p]] <- kmeans(psl.kmeans[which(!is.na(psl.kmeans[,1])),], centers=4, iter.max=100, nstart=30) + if(fields.name == ECMWF_S4.name) { + my.cluster[[p]] <- kmeans(psl.kmeans, centers=4, iter.max=100, nstart=30, trace=TRUE) + my.cluster.array[[p]] <- array(my.cluster[[p]]$cluster, c(n.leadtimes, n.years, n.members)) # in reverse order compared to the definition of psl.kmeans + + + #plot(SMA(my.cluster[[p]]$centers[1,],201),type="l",col="gold",ylim=c(-20,20));lines(SMA(my.cluster[[p]]$centers[2,],201),type="l",col="red"); lines(SMA(my.cluster[[p]]$centers[3,],201),type="l",col="green");lines(SMA(my.cluster[[p]]$centers[4,],201),type="l",col="blue");lines(SMA(psl.kmeans[29,],201),type="l",col="gray");lines(rep(0,14410),type="l",col="black",lty=2) + } + } + + rm(psl.kmeans) + gc() +} + + +#save.image(file=paste0(workdir,"/weather_regimes_",fields.name,"_",year.start,"-",year.end,".RData")) +#load(file=paste0(workdir,"/weather_regimes_",fields.name,"_",year.start,"-",year.end,".RData")) + +# Load var data: +if(fields.name == rean.name){ # Load daily var data in the reanalysis case: + vareuFull <-array(NA,c(n.days.in.a.yearly.period(year.start,year.end), n.pos.lat, n.pos.lon)) + + for (y in year.start:year.end){ + var <- Load(var = var.name[var.num], exp = NULL, obs = list(var.data), paste0(y,'0101'), storefreq = 'daily', leadtimemax = n.days.in.a.year(y), output = 'lonlat', + latmin = lat.min, latmax = lat.max, lonmin = lon.min, lonmax = lon.max) + vareuFull[seq.days.in.a.future.year(year.start, y),,] <- var$obs + rm(var) + gc() + } +} + +## if(fields.name == ECMWF_S4.name) { +## if(start.month <= 9) {start.month.char <- paste0("0",as.character(start.month))} else {start.month.char <- start.month} + +## my.years <- year.start:year.end +## vareuFull <- Load(var = var.name[var.num], exp = list(list(path=fields)), obs = NULL, sdates=paste0(my.years, start.month.char,'01'), dimnames=list(member=member.name), nmember=n.members, leadtimemax=216, storefreq='daily', output = 'lonlat', latmin = lat.min, latmax = lat.max, lonmin = lon.min, lonmax = lon.max)$mod #, grid=my.grid, method='bilinear') +## } + +if(fields.name == ECMWF_monthly.name){ # Load 6-hourly var data in the monthly forecast case: + sdates <- weekly.seq(forecast.year,mes,day) + vareuFull <-array(NA,c(length(sdates)*(year.end-year.start+1), n.pos.lat, n.pos.lon)) + + for (startdate in 1:length(sdates)){ + var <- Load(var = var.name[var.num], exp = NULL, obs = list(var.data), paste0(year.start:year.end, substr(sdates[startdate],5,6), substr(sdates[startdate],7,8) ), storefreq = 'daily', leadtimemin=leadtime*4-3, leadtimemax=leadtime*4, output = 'lonlat', latmin = lat.min, latmax = lat.max, lonmin = lon.min, lonmax = lon.max) + temp <- apply(var$obs, c(1,3,5,6), mean, na.rm=T) + vareuFull[(1+(startdate-1)*(year.end-year.start+1)):(startdate*(year.end-year.start+1)),,] <- temp + rm(temp,var) + gc() + } + +} + +#my.grid<-paste0('r',n.lon,'x',n.lat) +#coord <- Load(var = 'sfcWind', exp = list(ECMWF_monthly), obs = NULL, sdates=paste0(2013,'0102'), leadtimemin = 1, leadtimemax=1, output = 'lonlat', nprocs=1, grid=my.grid, method='bilinear') + +#save.image(file=paste0(workdir,"/weather_regimes_",fields.name,"_",var.name[var.num],"_",year.start,"-",year.end,".RData")) +#load(file=paste0(workdir,"/weather_regimes_",fields.name,"_",var.name[var.num],"_",year.start,"-",year.end,".RData")) + +for(p in WR.period){ # 1-12: Jan-Dec; 13-16: Winter-Spring-Summer-Autumn; 17: Year + + if(fields.name == rean.name){ + cluster.sequence <- my.cluster[[p]]$cluster + + if(sequences){ # it works only for rean data!!! + # for each of the 4 clusters, remove the days not belonging to sequences of at least 5 days: + # warning: first 4 days and last 4 days are not take into account y the algorithm and are treated as not belonging to any sequence (sequ=FALSE) + wrdiff <- diff(my.cluster[[p]]$cluster) + + sequ <- rep(FALSE, n.days.period[[p]]) + for(i in 5:(n.days.period[[p]]-5)){ + sequ[i] <- TRUE + if(wrdiff[i] != 0 & wrdiff[i+1] != 0) sequ[i] = FALSE + if(wrdiff[i] != 0 & wrdiff[i+2] != 0) sequ[i] = FALSE + if(wrdiff[i] != 0 & wrdiff[i+3] != 0) sequ[i] = FALSE + if(wrdiff[i] != 0 & wrdiff[i+4] != 0) sequ[i] = FALSE + if(wrdiff[i-1] != 0 & wrdiff[i] != 0) sequ[i] = FALSE + if(wrdiff[i-1] != 0 & wrdiff[i+1] != 0) sequ[i] = FALSE + if(wrdiff[i-1] != 0 & wrdiff[i+2] != 0) sequ[i] = FALSE + if(wrdiff[i-1] != 0 & wrdiff[i+3] != 0) sequ[i] = FALSE + if(wrdiff[i-2] != 0 & wrdiff[i] != 0) sequ[i] = FALSE + if(wrdiff[i-2] != 0 & wrdiff[i+1] != 0) sequ[i] = FALSE + if(wrdiff[i-2] != 0 & wrdiff[i+2] != 0) sequ[i] = FALSE + if(wrdiff[i-3] != 0 & wrdiff[i] != 0) sequ[i] = FALSE + if(wrdiff[i-3] != 0 & wrdiff[i+1] != 0) sequ[i] = FALSE + if(wrdiff[i-4] != 0 & wrdiff[i] != 0) sequ[i] = FALSE + } # close for loop on i + + # sequence of daily cluster numbers without the days that doesn't belong to sequences of 5 or more days: + cluster.sequence <- my.cluster[[p]]$cluster * sequ + rm(wrdiff, sequ) + } + + # Replace the days belonging to a regime with the days belonging to a sequence of at least 5 days of that regime: + wr1 <- which(cluster.sequence == 1) + wr2 <- which(cluster.sequence == 2) + wr3 <- which(cluster.sequence == 3) + wr4 <- which(cluster.sequence == 4) + + # yearly frequency of each regime: + wr1y <- wr2y <- wr3y <-wr4y <- c() + for(y in year.start:year.end){ + # for both reanalysis and S4, the time order is always: year1day1, year1day2, ..., year1day31, year2day1, year2day2, ..., year2day28, etc, + wr1y[y-year.start+1] <- length(which(cluster.sequence[(y-year.start)*period.length[[p]]+(1:period.length[[p]])] == 1)) + wr2y[y-year.start+1] <- length(which(cluster.sequence[(y-year.start)*period.length[[p]]+(1:period.length[[p]])] == 2)) + wr3y[y-year.start+1] <- length(which(cluster.sequence[(y-year.start)*period.length[[p]]+(1:period.length[[p]])] == 3)) + wr4y[y-year.start+1] <- length(which(cluster.sequence[(y-year.start)*period.length[[p]]+(1:period.length[[p]])] == 4)) + } + + # convert to frequencies in %: + wr1y <- wr1y/period.length[[p]] + wr2y <- wr2y/period.length[[p]] + wr3y <- wr3y/period.length[[p]] + wr4y <- wr4y/period.length[[p]] + + pslPeriod <- psleuFull[days.period[[p]],,] + pslPeriodClim <- apply(pslPeriod, c(2,3), mean, na.rm=T) + pslPeriodClim2 <- InsertDim(pslPeriodClim, 1, n.days.period[[p]]) + pslPeriodAnom <- pslPeriod - pslPeriodClim2 + + rm(pslPeriod, pslPeriodClim, pslPeriodClim2) + gc() + + pslwr1 <- pslPeriodAnom[wr1,,] + pslwr2 <- pslPeriodAnom[wr2,,] + pslwr3 <- pslPeriodAnom[wr3,,] + pslwr4 <- pslPeriodAnom[wr4,,] + + rm(pslPeriodAnom) + gc() + + pslwr1mean <- apply(pslwr1,c(2,3),mean,na.rm=T) + pslwr2mean <- apply(pslwr2,c(2,3),mean,na.rm=T) + pslwr3mean <- apply(pslwr3,c(2,3),mean,na.rm=T) + pslwr4mean <- apply(pslwr4,c(2,3),mean,na.rm=T) + rm(pslwr1,pslwr2,pslwr3,pslwr4) + + # in the reanalysis case, we also measure the impact maps: + + # similar selection of above, but for var instead of psl: + varPeriod <- vareuFull[days.period[[p]],,] # select only var data during the chosen period + + varPeriodClim <- apply(varPeriod, c(2,3), mean, na.rm=T) + varPeriodClim2 <- InsertDim(varPeriodClim, 1, n.days.period[[p]]) + varPeriodAnom <- varPeriod - varPeriodClim2 + rm(varPeriod, varPeriodClim, varPeriodClim2) + gc() + + #PlotEquiMap2(varPeriodClim[,EU], lon[EU], lat, filled.continents = FALSE, cols=my.cols.var, intxlon=10, intylat=10, cex.lab=1.5) # plot the climatology of the period + varPeriodAnom1 <- varPeriodAnom[wr1,,] + varPeriodAnom2 <- varPeriodAnom[wr2,,] + varPeriodAnom3 <- varPeriodAnom[wr3,,] + varPeriodAnom4 <- varPeriodAnom[wr4,,] + + varPeriodAnom1mean <- apply(varPeriodAnom1,c(2,3),mean,na.rm=T) + varPeriodAnom2mean <- apply(varPeriodAnom2,c(2,3),mean,na.rm=T) + varPeriodAnom3mean <- apply(varPeriodAnom3,c(2,3),mean,na.rm=T) + varPeriodAnom4mean <- apply(varPeriodAnom4,c(2,3),mean,na.rm=T) + + varPeriodAnomBoth1 <- abind(varPeriodAnom, varPeriodAnom1, along = 1) + pvalue1 <- apply(varPeriodAnomBoth1, c(2,3), function(x) t.test(x[1:n.days.period[[p]]],x[(n.days.period[[p]]+1):length(x)])$p.value) + rm(varPeriodAnomBoth1, varPeriodAnom1) + gc() + + varPeriodAnomBoth2 <- abind(varPeriodAnom, varPeriodAnom2, along = 1) + pvalue2 <- apply(varPeriodAnomBoth2, c(2,3), function(x) t.test(x[1:n.days.period[[p]]],x[(n.days.period[[p]]+1):length(x)])$p.value) + rm(varPeriodAnomBoth2, varPeriodAnom2) + gc() + + varPeriodAnomBoth3 <- abind(varPeriodAnom, varPeriodAnom3, along = 1) + pvalue3 <- apply(varPeriodAnomBoth3, c(2,3), function(x) t.test(x[1:n.days.period[[p]]],x[(n.days.period[[p]]+1):length(x)])$p.value) + rm(varPeriodAnomBoth3, varPeriodAnom3) + gc() + + varPeriodAnomBoth4 <- abind(varPeriodAnom, varPeriodAnom4, along = 1) + pvalue4 <- apply(varPeriodAnomBoth4, c(2,3), function(x) t.test(x[1:n.days.period[[p]]],x[(n.days.period[[p]]+1):length(x)])$p.value) + rm(varPeriodAnomBoth4, varPeriodAnom4) + + rm(varPeriodAnom) + gc() + + # save all the data necessary to redraw the graphs when we know the right regime: + save(workdir,rean.name,fields.name,var.num,var.name,var.name.full,var.unit,year.start,year.end,p,WR.period,lon,lat,pslwr1mean,pslwr2mean,pslwr3mean,pslwr4mean, varPeriodAnom1mean, varPeriodAnom2mean, varPeriodAnom3mean,varPeriodAnom4mean,wr1y,wr2y,wr3y,wr4y,pvalue1,pvalue2,pvalue3,pvalue4,cluster.sequence,file=paste0(workdir,"/",fields.name,"_",var.name[var.num],"_",my.period[p],"_mapdata.RData")) + + rm(pvalue1,pvalue2,pvalue3,pvalue4) + rm(pslwr1mean,pslwr2mean,pslwr3mean,pslwr4mean) + rm(varPeriodAnom1mean,varPeriodAnom2mean,varPeriodAnom3mean,varPeriodAnom4mean) + gc() + + } + + if(fields.name == ECMWF_S4.name){ + cluster.sequence <- my.cluster[[p]]$cluster #as.vector(my.cluster.array[[p]]) + + wr1 <- which(cluster.sequence == 1) + wr2 <- which(cluster.sequence == 2) + wr3 <- which(cluster.sequence == 3) + wr4 <- which(cluster.sequence == 4) + + wr1y <- apply(my.cluster.array[[p]] == 1, 2, sum) + wr2y <- apply(my.cluster.array[[p]] == 2, 2, sum) + wr3y <- apply(my.cluster.array[[p]] == 3, 2, sum) + wr4y <- apply(my.cluster.array[[p]] == 4, 2, sum) + + # convert to frequencies in %: + wr1y <- wr1y/(n.leadtimes*n.members) + wr2y <- wr2y/(n.leadtimes*n.members) + wr3y <- wr3y/(n.leadtimes*n.members) + wr4y <- wr4y/(n.leadtimes*n.members) + + # in this case, must use psl.melted instead of psleuFull. Both are already in anomalies: + pslmat <- unname(acast(psl.melted, Member + StartYear + LeadDay ~ Lat ~ Lon)) + #pslmat <- unname(acast(melt(pslPeriod[1,1:2,,,,, drop=FALSE],varnames=c("Exp","Member","StartYear","LeadDay","Lat","Lon")), Member ~ StartYear + LeadDay ~ Lat ~ Lon)) + + pslwr1 <- pslmat[wr1,,, drop=F] + pslwr2 <- pslmat[wr2,,, drop=F] + pslwr3 <- pslmat[wr3,,, drop=F] + pslwr4 <- pslmat[wr4,,, drop=F] + + pslwr1mean <- apply(pslwr1, c(2,3), mean, na.rm=T) + pslwr2mean <- apply(pslwr2, c(2,3), mean, na.rm=T) + pslwr3mean <- apply(pslwr3, c(2,3), mean, na.rm=T) + pslwr4mean <- apply(pslwr4, c(2,3), mean, na.rm=T) + + rm(pslwr1,pslwr2,pslwr3,pslwr4) + rm(pslmat) + gc() + + # save all the data necessary to draw the graphs (but not the impact maps) + save(lon, lon.max, psl, my.period, p, workdir,rean.name,fields.name,var.num,var.name,var.name.full,var.unit,year.start,year.end, p, WR.period, lon, lat, pslwr1mean, pslwr2mean,pslwr3mean,pslwr4mean,wr1y,wr2y,wr3y,wr4y,n.leadtimes,cluster.sequence,file=paste0(workdir,"/",fields.name,"_",var.name[var.num],"_",my.period[p],"_leadmonth_",lead.month,"_mapdata.RData")) + + rm(pslwr1mean,pslwr2mean,pslwr3mean,pslwr4mean) + gc() + + #pslwr1meanPartial <- apply(pslwr1, c(1,3,4), mean, na.rm=T) + #pslwr2meanPartial <- apply(pslwr2, c(1,3,4), mean, na.rm=T) + #pslwr3meanPartial <- apply(pslwr3, c(1,3,4), mean, na.rm=T) + #pslwr4meanPartial <- apply(pslwr4, c(1,3,4), mean, na.rm=T) + + #PlotEquiMap(rescale(pslwr1meanPartial[1,,],my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2) + #PlotEquiMap(rescale(pslwr1meanPartial[2,,],my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2) + + #n=0 + #for(sy in 1:32) { + # for(ld in 1:28){ + # if(my.cluster.array[[p]][ld,sy] == 1){ + # n=n+1 + # if(n == 1) {temp <- pslPeriod[1,2,sy,ld,,]} + # temp <- temp + pslPeriod[1,2,sy,ld,,] + # } + # } + #} + #PlotEquiMap(rescale(temp/n,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, drawleg=F) + + #pslwr1meanPartial <- apply(pslmat[,wr1,,,drop=F], c(1,3,4), mean, na.rm=T) + #pslwr2meanPartial <- apply(pslmat[,wr2,,,drop=F], c(1,3,4), mean, na.rm=T) + #pslwr3meanPartial <- apply(pslmat[,wr3,,,drop=F], c(1,3,4), mean, na.rm=T) + #pslwr4meanPartial <- apply(pslmat[,wr4,,,drop=F], c(1,3,4), mean, na.rm=T) + + } + + # for the monthly system, the time order is always: year1day1, year2day1, ... , yearNday1, year1day2, year2day2, ..., yearNday2, etc.: + if(fields.name == ECMWF_monthly.name) { + if(fields.name == ECMWF_monthly.name){ + my.startdates.period <- months.period(forecast.year,mes,day,p) + + #if(p == 13) my.startdates.period <- my.startdates.period[-2] # remove the second startdate because it is broken + + days.period <- period.length <- list() + for(startdate in my.startdates.period){ + days.period[[p]] <- c(days.period[[p]], (1+(startdate-1)*(year.end-year.start+1)):(startdate*(year.end-year.start+1))) + } + period.length[[p]] <- length(my.startdates.period) # number of Thusdays in the period + } + + wr1y <- wr2y <- wr3y <-wr4y <- c() + wr1y[y-year.start+1] <- length(which(cluster.sequence[seq((y-year.start+1),length(cluster.sequence), n.years)] == 1)) + wr2y[y-year.start+1] <- length(which(cluster.sequence[seq((y-year.start+1),length(cluster.sequence), n.years)] == 2)) + wr3y[y-year.start+1] <- length(which(cluster.sequence[seq((y-year.start+1),length(cluster.sequence), n.years)] == 3)) + wr4y[y-year.start+1] <- length(which(cluster.sequence[seq((y-year.start+1),length(cluster.sequence), n.years)] == 4)) + } + +} # close the for loop on 'p' + + + + + +# run it twice: once, with ordering <- FALSE to look only at the cartography and find visually the right regime names of the four clusters; +# and the second time, to save the ordered cartography: +ordering <- T # If TRUE, order the clusters following the regimes listed in the 'orden' vector (after you manually insert the names). +as.pdf <- F # FALSE: save results as one .png file for each season, TRUE: save only 1 .pdf file with all seasons +save.names <- T # if TRUE, also save the cluster names (i.e: the association between clusters and weather regimes); if FALSE, the cluster names are loaded from the file + +# position of long values of Europe only (without the Atlantic Sea and America): +#if(fields.name == "ERA-Interim") EU <- c(1:which(lon >= lon.max)[1], (length(lon)-30):length(lon)) # restrict area to continental europe only +EU <- c(1:which(lon >= lon.max)[1], (which(lon >= 337)[1]:length(lon))) # restrict area to continental europe only + +# choose an order to give to the cartography, from top to bottom: +orden <- c("NAO+","NAO-","Blocking","Atl.Ridge") + +regime1.name <- orden[1] +regime2.name <- orden[2] +regime3.name <- orden[3] +regime4.name <- orden[4] + +if(save.names){cluster1.name.period <- cluster2.name.period <- cluster3.name.period <- cluster4.name.period <- c()} + +# breaks and colors of the geopotential fields: +if(psl == "g500"){ + #my.brks <- c(-300,-199:200,300) # % Mean anomaly of a WR + my.brks <- c(-300,seq(-200,200,10),300) # % Mean anomaly of a WR + my.brks2 <- c(-300,seq(-200,200,10),300) # % Mean anomaly of a WR for the contour lines + #my.cols <- colorRampPalette((brewer.pal(9,"PuBu")))(length(my.brks)-1) + values.to.plot <- c(-200, -100, 0, 100, 200) # values we want to show in the labels +} + +if(psl == "psl"){ + my.brks <- c(seq(-19,-1,2),0,seq(1,19,2)) # % Mean anomaly of a WR + # my.brks <- c(seq(-19,-1,2),0,seq(1,19,2)) # % Mean anomaly of a WR + + my.brks2 <- my.brks #c(seq(-20,20,2)) # % Mean anomaly of a WR for the contour lines + values.to.plot <- my.brks #c(-17,-15,-13,-11,-9,-7,-5,-3,-1,0,1,3,5,7,9,11,13,15,17) +} + +my.cols <- colorRampPalette(rev(brewer.pal(11,"RdBu")))(length(my.brks)-1) # blue--white--red colors +my.cols[floor(length(my.cols)/2)] <- my.cols[floor(length(my.cols)/2)+1] <- "white" + +# breaks and colors of the impact maps (valid both for Wind speed anomalies and Temperature anomalies): +#my.brks.var <- c(-20,seq(-10,-3,1),seq(-2.9,2.9,0.1),seq(3,10,1),20) # % Mean anomaly of a WR +#my.brks.var <- c(-20,-2,-1.5,-1,-0.5,-0.2,0.2,0.5,1,1.5,2,20) # % Mean anomaly of a WR +#my.brks.var <- c(-20,seq(-3,3,0.5),20) # % Mean anomaly of a WR +if(var.name[var.num] == "sfcWind") { + my.brks.var <- seq(-3,3,0.5) + values.to.plot2 <- c(-3,-2,-1,0,1,2,3) +} +if(var.name[var.num] == "tas") { + my.brks.var <- seq(-5,5,1) + values.to.plot2 <- my.brks.var #c(-5,-3,-1,0,1,3,5) +} + +#my.cols <- colorRampPalette(as.character(read.csv("/home/Earth/vtorralb/rgbhex.csv",header=F)[,1]))(length(my.brks)-1) # blue--yellow-red colors +my.cols.var <- colorRampPalette(rev(brewer.pal(11,"RdBu")))(length(my.brks.var)-1) # blue--white--red colors +#my.cols.var[floor(length(my.cols.var)/2)] <- my.cols.var[floor(length(my.cols.var)/2)+1] <- "white" + +if(as.pdf && fields.name==rean.name)(pdf(file=paste0(workdir,"/",fields.name,"_",var.name[var.num],"_",my.period[p],".pdf"),width=40, height=60)) +if(as.pdf && fields.name==forecast.name)(pdf(file=paste0(workdir,"/",fields.name,"_",var.name[var.num],"_",my.period[p],"_leadmonth_",lead.month,".pdf"),width=40,height=60)) + +for(p in WR.period){ + if(fields.name==rean.name) load(file=paste0(workdir,"/",fields.name,"_",var.name[var.num],"_",my.period[p],"_mapdata.RData")) + if(fields.name==forecast.name) load(file=paste0(workdir,"/",fields.name,"_",var.name[var.num],"_",my.period[p],"_leadmonth_",lead.month,"_mapdata.RData")) + + if(save.names){ + if(p == 1){ # January + cluster1.name="NAO+" + cluster4.name="NAO-" + cluster3.name="Blocking" + cluster2.name="Atl.Ridge" + } + if(p == 2){ # February + cluster3.name="NAO+" + cluster2.name="NAO-" + cluster4.name="Blocking" + cluster1.name="Atl.Ridge" + } + if(p == 3){ # March + cluster3.name="NAO+" + cluster2.name="NAO-" + cluster4.name="Blocking" + cluster1.name="Atl.Ridge" + } + if(p == 4){ # April + cluster2.name="NAO+" + cluster1.name="NAO-" + cluster3.name="Blocking" + cluster4.name="Atl.Ridge" + } + if(p == 5){ # May + cluster4.name="NAO+" + cluster2.name="NAO-" + cluster3.name="Blocking" + cluster1.name="Atl.Ridge" + } + if(p == 6){ # June + cluster4.name="NAO+" + cluster1.name="NAO-" + cluster2.name="Blocking" + cluster3.name="Atl.Ridge" + } + if(p == 7){ # July + cluster4.name="NAO+" + cluster2.name="NAO-" + cluster1.name="Blocking" + cluster3.name="Atl.Ridge" + } + if(p == 8){ # August + cluster2.name="NAO+" + cluster4.name="NAO-" + cluster3.name="Blocking" + cluster1.name="Atl.Ridge" + } + if(p == 9){ # September + cluster4.name="NAO+" + cluster3.name="NAO-" + cluster1.name="Blocking" + cluster2.name="Atl.Ridge" + } + if(p == 10){ # October + cluster1.name="NAO+" + cluster4.name="NAO-" + cluster2.name="Blocking" + cluster3.name="Atl.Ridge" + } + if(p == 11){ # November + cluster2.name="NAO+" + cluster3.name="NAO-" + cluster1.name="Blocking" + cluster4.name="Atl.Ridge" + } + if(p == 12){ # December + cluster2.name="NAO+" + cluster4.name="NAO-" + cluster1.name="Blocking" + cluster3.name="Atl.Ridge" + } + if(p == 13){ # Winter + cluster3.name="NAO+" + cluster4.name="NAO-" + cluster1.name="Blocking" + cluster2.name="Atl.Ridge" + } + if(p == 14){ # Spring + cluster1.name="NAO+" + cluster3.name="NAO-" + cluster2.name="Blocking" + cluster4.name="Atl.Ridge" + } + if(p == 15){ # Summer + cluster2.name="NAO+" + cluster3.name="NAO-" + cluster4.name="Blocking" + cluster1.name="Atl.Ridge" + } + if(p == 16){ # Autumn + cluster4.name="NAO+" + cluster1.name="NAO-" + cluster2.name="Blocking" + cluster3.name="Atl.Ridge" + } + + cluster1.name.period[p] <- cluster1.name + cluster2.name.period[p] <- cluster2.name + cluster3.name.period[p] <- cluster3.name + cluster4.name.period[p] <- cluster4.name + + if(fields.name==rean.name) save(cluster1.name.period, cluster2.name.period, cluster3.name.period, cluster4.name.period, file=paste0(workdir,"/",fields.name,"_", var.name[var.num],"_",my.period[p],"_ClusterNames.RData")) + if(fields.name==forecast.name) save(cluster1.name.period, cluster2.name.period, cluster3.name.period, cluster4.name.period, file=paste0(workdir,"/",fields.name,"_", var.name[var.num],"_",my.period[p],"_leadmonth_",lead.month,"_ClusterNames.RData")) + + } else { # in this case, we load the cluster names from the file already saved: + if(fields.name==rean.name) load(paste0(workdir,"/",fields.name,"_", var.name[var.num],"_",my.period[p],"_ClusterNames.RData")) + if(fields.name==forecast.name) load(paste0(workdir,"/",fields.name,"_", var.name[var.num],"_",my.period[p],"_leadmonth_",lead.month,"_ClusterNames.RData")) + + cluster1.name <- cluster1.name.period[p] + cluster2.name <- cluster2.name.period[p] + cluster3.name <- cluster3.name.period[p] + cluster4.name <- cluster4.name.period[p] + } + + # assign to each cluster its regime: + if(ordering == FALSE){ + cluster1 <- 1; cluster2 <- 2; cluster3 <- 3; cluster4 <- 4 + } else { + cluster1 <- which(orden == cluster1.name) + cluster2 <- which(orden == cluster2.name) + cluster3 <- which(orden == cluster3.name) + cluster4 <- which(orden == cluster4.name) + } + + assign(paste0("map",cluster1), pslwr1mean) + assign(paste0("map",cluster2), pslwr2mean) + assign(paste0("map",cluster3), pslwr3mean) + assign(paste0("map",cluster4), pslwr4mean) + + assign(paste0("fre",cluster1), wr1y) + assign(paste0("fre",cluster2), wr2y) + assign(paste0("fre",cluster3), wr3y) + assign(paste0("fre",cluster4), wr4y) + + if(fields.name == rean.name){ + assign(paste0("imp",cluster1), varPeriodAnom1mean) + assign(paste0("imp",cluster2), varPeriodAnom2mean) + assign(paste0("imp",cluster3), varPeriodAnom3mean) + assign(paste0("imp",cluster4), varPeriodAnom4mean) + + assign(paste0("sig",cluster1), pvalue1) + assign(paste0("sig",cluster2), pvalue2) + assign(paste0("sig",cluster3), pvalue3) + assign(paste0("sig",cluster4), pvalue4) + } + + + # Visualize the composition with the 3 graphs for all the 4 regimes: its pressure fields, its impact on var and its frequency series: + if(!as.pdf && fields.name==rean.name) png(filename=paste0(workdir,"/",fields.name,"_",var.name[var.num],"_",my.period[p],".png"),width=3000,height=3700) + if(!as.pdf && fields.name==forecast.name) png(filename=paste0(workdir,"/",fields.name,"_",var.name[var.num],"_",my.period[p],"_leadmonth_",lead.month,".png"),width=3000,height=3700) + + plot.new() + + # Sheet title: + par(fig=c(0, 1, 0.94, 0.98), new=TRUE) + mtext(paste0("Weather Regimes for ",my.period[p]," season (",year.start,"-",year.end,"). Source: ",fields.name), cex=6, font=2) + + # Centroid maps: + map.xpos <- 0 + map.width <- 0.46 + par(fig=c(map.xpos + 0.003, map.xpos + map.width - 0.003, 0.745, 0.925), new=TRUE) + PlotEquiMap2(rescale(map1,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map1), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, xlabel.dist=1.5, contours.lty="F1FF1F") + par(fig=c(map.xpos, map.xpos + map.width, 0.51, 0.69), new=TRUE) + PlotEquiMap2(rescale(map2,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map2), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F") + par(fig=c(map.xpos, map.xpos + map.width, 0.275, 0.455), new=TRUE) + PlotEquiMap2(rescale(map3,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map3), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F") + par(fig=c(map.xpos, map.xpos + map.width, 0.04, 0.22), new=TRUE) + PlotEquiMap2(rescale(map4,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map4), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F") + + # Title Centroid Maps: + map.title.xpos <- 0.76 + map.title.width <- 0.2 + par(fig=c(map.title.xpos, map.title.xpos + map.title.width, 0.728, 0.729), new=TRUE) + mtext("year", cex=3) + par(fig=c(map.title.xpos, map.title.xpos + map.title.width, 0.494, 0.495), new=TRUE) + mtext("year", cex=3) + par(fig=c(map.title.xpos, map.title.xpos + map.title.width, 0.260, 0.261), new=TRUE) + mtext("year", cex=3) + par(fig=c(map.title.xpos, map.title.xpos + map.title.width, 0.026, 0.027), new=TRUE) + mtext("year", cex=3) + + # Impact maps: + if(fields.name == rean.name){ + impact.xpos <- 0.47 + impact.width <- 0.26 + par(fig=c(impact.xpos, impact.xpos + impact.width, 0.745, 0.925), new=TRUE) + PlotEquiMap2(rescale(imp1[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=t(sig1[,EU] < 0.05), cex.lab=1.5) + par(fig=c(impact.xpos, impact.xpos + impact.width, 0.51, 0.69), new=TRUE) + PlotEquiMap2(rescale(imp2[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=t(sig2[,EU] < 0.05), cex.lab=1.5) + par(fig=c(impact.xpos, impact.xpos + impact.width, 0.275, 0.455), new=TRUE) + PlotEquiMap2(rescale(imp3[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=t(sig3[,EU] < 0.05), cex.lab=1.5) + par(fig=c(impact.xpos, impact.xpos + impact.width, 0.04, 0.22), new=TRUE) + PlotEquiMap2(rescale(imp4[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=t(sig4[,EU] < 0.05), cex.lab=1.5) + } + + # Frequency plots: + barplot.xpos <- 0.75 + barplot.width <- 0.25 + freq.max=60 + + par(fig=c(barplot.xpos, barplot.xpos + barplot.width, 0.745, 0.915), new=TRUE) + barplot.freq(100*fre1, year.start, year.end, cex.y=2, cex.x=2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0)) + par(fig=c(barplot.xpos, barplot.xpos + barplot.width,0.510,0.680), new=TRUE) + barplot.freq(100*fre2, year.start, year.end, cex.y=2, cex.x=2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0)) + par(fig=c(barplot.xpos, barplot.xpos + barplot.width,0.275,0.445), new=TRUE) + barplot.freq(100*fre3, year.start, year.end, cex.y=2, cex.x=2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0)) + par(fig=c(barplot.xpos, barplot.xpos + barplot.width,0.040,0.210), new=TRUE) + barplot.freq(100*fre4, year.start, year.end, cex.y=2, cex.x=2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0)) + + # % on Frequency plots: + symbol.xpos <- 0.735 + symbol.width <- 0.01 + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.83, 0.84), new=TRUE) + mtext("%", cex=3.3) + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.60, 0.61), new=TRUE) + mtext("%", cex=3.3) + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.36, 0.37), new=TRUE) + mtext("%", cex=3.3) + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.13, 0.14), new=TRUE) + mtext("%", cex=3.3) + + # Title 1: + title1.xpos <- 0.02 + title1.width <- 0.4 + par(fig=c(title1.xpos, title1.xpos + title1.width, 0.930, 0.935), new=TRUE) + mtext(paste0(regime1.name,": ", psl.name, " Anomaly"), font=2, cex=4) + par(fig=c(title1.xpos, title1.xpos + title1.width, 0.695, 0.700), new=TRUE) + mtext(paste0(regime2.name,": ", psl.name, " Anomaly"), font=2, cex=4) + par(fig=c(title1.xpos, title1.xpos + title1.width, 0.460, 0.465), new=TRUE) + mtext(paste0(regime3.name,": ", psl.name, " Anomaly"), font=2, cex=4) + par(fig=c(title1.xpos, title1.xpos + title1.width, 0.225, 0.230), new=TRUE) + mtext(paste0(regime4.name,": ", psl.name, " Anomaly"), font=2, cex=4) + + # Title 2: + if(fields.name == rean.name){ + title2.xpos <- 0.42 + title2.width <- 0.35 + par(fig=c(title2.xpos, title2.xpos + title2.width, 0.930, 0.935), new=TRUE) + mtext(paste0(regime1.name, ": Impact on ", var.name.full[var.num]), font=2, cex=4) + par(fig=c(title2.xpos, title2.xpos + title2.width, 0.695, 0.700), new=TRUE) + mtext(paste0(regime2.name, ": Impact on ", var.name.full[var.num]), font=2, cex=4) + par(fig=c(title2.xpos, title2.xpos + title2.width, 0.460, 0.465), new=TRUE) + mtext(paste0(regime3.name, ": Impact on ", var.name.full[var.num]), font=2, cex=4) + par(fig=c(title2.xpos, title2.xpos + title2.width, 0.225, 0.230), new=TRUE) + mtext(paste0(regime4.name, ": Impact on ", var.name.full[var.num]), font=2, cex=4) + } + + # Title 3: + title3.xpos <- 0.75 + title3.width <-0.25 + par(fig=c(title3.xpos, title3.xpos + title3.width, 0.930, 0.935), new=TRUE) + mtext(paste0(regime1.name, ": Frequency (", round(100*mean(fre1),1), "%)"), font=2, cex=4) + par(fig=c(title3.xpos, title3.xpos + title3.width, 0.695, 0.700), new=TRUE) + mtext(paste0(regime2.name, ": Frequency (", round(100*mean(fre2),1), "%)"), font=2, cex=4) + par(fig=c(title3.xpos, title3.xpos + title3.width, 0.460, 0.465), new=TRUE) + mtext(paste0(regime3.name, ": Frequency (", round(100*mean(fre3),1), "%)"), font=2, cex=4) + par(fig=c(title3.xpos, title3.xpos + title3.width, 0.225, 0.230), new=TRUE) + mtext(paste0(regime4.name, ": Frequency (", round(100*mean(fre4),1), "%)"), font=2, cex=4) + + # Legend 1: + legend1.xpos <- 0.01 + legend1.width <- 0.44 + legend1.cex <- 1.8 + my.subset <- match(values.to.plot, my.brks) + par(fig=c(legend1.xpos, legend1.xpos + legend1.width, 0.719, 0.744), new=TRUE) # syntax fig=c(xmin, xmax, ymin, ymax) + ColorBar3(my.brks, cols=my.cols, vert=FALSE, cex=legend1.cex, subset=my.subset) + if(psl=="g500") {mtext(side=4," m", cex=2.5)} else {mtext(side=4," hPa", cex=legend1.cex)} + par(fig=c(legend1.xpos, legend1.xpos + legend1.width, 0.485, 0.508), new=TRUE) + ColorBar3(my.brks, cols=my.cols, vert=FALSE, cex=legend1.cex, subset=my.subset) + if(psl=="g500") {mtext(side=4," m", cex=2.5)} else {mtext(side=4," hPa", cex=legend1.cex)} + par(fig=c(legend1.xpos, legend1.xpos + legend1.width, 0.250, 0.273), new=TRUE) + ColorBar3(my.brks, cols=my.cols, vert=FALSE, cex=legend1.cex, subset=my.subset) + if(psl=="g500") {mtext(side=4," m", cex=2.5)} else {mtext(side=4," hPa", cex=legend1.cex)} + par(fig=c(legend1.xpos, legend1.xpos + legend1.width, 0.015, 0.038), new=TRUE) + ColorBar3(my.brks, cols=my.cols, vert=FALSE, cex=legend1.cex, subset=my.subset) + if(psl=="g500") {mtext(side=4," m", cex=2.5)} else {mtext(side=4," hPa", cex=legend1.cex)} + + # Legend 2: + if(fields.name == rean.name){ + legend2.xpos <- 0.48 + legend2.width <- 0.25 + legend2.cex <- 1.8 + my.subset2 <- match(values.to.plot2, my.brks.var) + par(fig=c(legend2.xpos, legend2.xpos + legend2.width, 0.719 + 0.001, 0.744), new=TRUE) + ColorBar3(my.brks.var, cols=my.cols.var, vert=FALSE, cex=legend2.cex, subset=my.subset2) + mtext(side=4,paste0(" ",var.unit[var.num]), cex=legend2.cex) + par(fig=c(legend2.xpos, legend2.xpos + legend2.width, 0.485, 0.508), new=TRUE) + ColorBar3(my.brks.var, cols=my.cols.var, vert=FALSE, cex=legend2.cex, subset=my.subset2) + mtext(side=4,paste0(" ",var.unit[var.num]), cex=legend2.cex) + par(fig=c(legend2.xpos, legend2.xpos + legend2.width, 0.250, 0.273), new=TRUE) + ColorBar3(my.brks.var, cols=my.cols.var, vert=FALSE, cex=legend2.cex, subset=my.subset2) + mtext(side=4,paste0(" ",var.unit[var.num]), cex=legend2.cex) + par(fig=c(legend2.xpos, legend2.xpos + legend2.width, 0.015, 0.038), new=TRUE) + ColorBar3(my.brks.var, cols=my.cols.var, vert=FALSE, cex=legend2.cex, subset=my.subset2) + mtext(side=4,paste0(" ",var.unit[var.num]), cex=legend2.cex) + } + + if(!as.pdf) dev.off() # for saving 4 png + +} # close for loop on 'p' + +if(as.pdf) dev.off() # for saving a single pdf with all seasons + + diff --git a/old/weather_regimes_v31.R~ b/old/weather_regimes_v31.R~ new file mode 100644 index 0000000000000000000000000000000000000000..22640ea03d3b1cb4a5fbf761c3bc1a5043cfe98f --- /dev/null +++ b/old/weather_regimes_v31.R~ @@ -0,0 +1,1139 @@ +library(s2dverification) # for the function Load() +library(abind) +library(Kendall) +library(reshape2) +library(TTR) +source('/home/Earth/ncortesi/Downloads/scripts/Rfunctions.R') # for the calendar functions +workdir <- "/scratch/Earth/ncortesi/RESILIENCE/Regimes" # working dir with the input files with the WT classifications for each MSLP grid point + +# available reanalysis for the geopotential (g500) or the geopotential height (z500 = g500/9.81) and for var data: +ERAint <- '/esnas/reconstructions/ecmwf/eraint/$STORE_FREQ$_mean/$VAR_NAME$_f6h/$VAR_NAME$_$YEAR$$MONTH$.nc' +ERAint.name <- "ERA-Interim" + +JRA55 <- '/esnas/reconstructions/jma/jra-55/$STORE_FREQ$_mean/$VAR_NAME$_f6h/$VAR_NAME$_$YEAR$$MONTH$.nc' +JRA55.name <- "JRA-55" + +rean <- ERAint #JRA55 #ERAint # choose one of the two above reanalysis from where to load the input psl data + +# available forecast systems for the psl and var data: +#ECMWF_S4 <- list(path = paste0('/esnas/exp/ECMWF/seasonal/0001/s004/m001/$STORE_FREQ$_mean/$VAR_NAME$_f6h/$VAR_NAME$_$START_DATE$.nc')) +#ECMWF_S4 <- '/esnas/exp/ECMWF/seasonal/0001/s004/m001/$STORE_FREQ$_mean/psl_f6h/psl_$START_DATE$.nc' +ECMWF_S4 <- '/esnas/exp/ecmwf/system4_m1/$STORE_FREQ$_mean/$VAR_NAME$_f6h/$VAR_NAME$_$START_DATE$.nc' +ECMWF_S4.name <- "ECMWF-S4" + +ECMWF_monthly <- '/esnas/exp/ECMWF/monthly/ensforhc/6hourly/$VAR_NAME$/2014$MONTH$$DAY$00/$VAR_NAME$_$START_DATE$00.nc' +ECMWF_monthly.name <- "ECMWF-Monthly" + +forecast <- ECMWF_S4 + +fields <- forecast #rean #forecast # put 'rean' to load pressure fields and var from reanalisis, put 'forecast' to load them from forecast systems + +psl <- "psl" #"g500" # pressure variable to use from the chosen reanalysis +psl.name <- "MSLP" # "Z500" # the name to show in the maps + +year.start <- 1982 #1981 #1994 #1979 #1981 +year.end <- 2013 #2013 #2010 + +member.name <- "ensemble" # name of the dimension with the ensemble members inside the netCDF files of S4 + +res <- 0.75 # set the resolution you want to interpolate the psl and var data when loading it (i.e: set to 0.75 to use the same res of ERA-Interim) + # interpolation method is BILINEAR. +PCA <- FALSE # set it to TRUE if you want to apply the PCA before the k-means cluster analysis, FALSE otherwise +variance.explained <- 0.8 # the user can set here the minimum % of variance explained by the PCs (typically 0.8 which is equal to 80%) + +lat.weighting=FALSE # set it to true to weight psl data on latitude before applying the cluster analysis +sequences=FALSE # set it to TRUE if you want to select only days beloning to sequences of at least 5 consecutive days belonging to the same regime. + +# Coordinates of the box enclosing the study region (the Northern Atlantic in this case): +lat.max <- 81 #81 #80 #70 +lat.min <- 27 #30 #20 #30 +lon.max <- 45 #36 #30 #40 +lon.min <- 274.5 #274.5 #270 #280 # put a positive number here because the geopotential has only positive values of longitud! + +var.num <- 1 # Choose a variable. 1: sfcWind 2: tas + +var.name <- c("sfcWind","tas") # name of the 'predictand' variable of the chosen reanalysis +var.name.full <- c("Wind Speed","Temperature") # full name of the 'predictand' variable to put in the title of the graphs +var.unit <- c("m/s", "ºC") # unit of measure of var (for drawing color scales) + +# Only for Reanalysis:: +WR.period <- 1:12 #13:16 # 1-12: Jan-Dec; 13-16: Winter-Spring-Summer-Autumn [bypassed by the optional argument of the script] + +# only for Seasonal forecasts: +n.members <- 15 # number of members of the forecast system to load +n.leadtimes <- 216 # number of leadtimes of the forecast system to load +startM <- 1 # start month (1=January, 12=December) [bypassed by the optional arguments of the script] +leadM <- 1 # select only the data of one lead month: [bypassed by the optional arguments of the script] + +# Only for Monthly forecasts: +#leadtime <- 1:7 #5:11 #1 # if fields=forecast, set the forecast time (in days) you want to compute the weather regimes. +#startdate.shift <- 1 # if leadtime=5:11, it's better to shift all startdates of the season 1 week in the past, to fit the exact season of obs.data + # if leadtime=12:18, it's better to shift all startdates of the season 2 weeks in the past, and so on. +#forecast.year <- year.end+1 # if fields=forecast, set the forecast year (it is usually the year after year.end) +#mes=1 # if fields=forecast, set the month of the first startdate of the forecast year +#day=2 # if fields=forecast, set the day of the first startdate of the forecast year + +visualize <- 0 # if 0, it only save the data to plot in the graphs and the frequency time series. + # if 1, it only visualize the graphs loading the saved data + # if 2, it save the data and plot the graphs at the same time. + +############################################################################################################################# +#ECMWF_monthly <- list(path = paste0('/esnas/exp/ECMWF/monthly/ensforhc/6hourly/psl/2014010200/1994_010200.nc')) +#ECMWF_monthly <- list(path = paste0('/esnas/exp/ECMWF/monthly/ensforhc/6hourly/$VAR_NAME$/2014$MONTH$$DAY$00/$VAR_NAME$_$START_DATE$00.nc')) +rean.name <- unname(ifelse(rean == ERAint, ERAint.name, JRA55.name)) +forecast.name <- unname(ifelse(forecast == ECMWF_S4, ECMWF_S4.name, ECMWF_monthly.name)) +fields.name <- unname(ifelse(fields == rean, rean.name, forecast.name)) +if(fields.name == forecast.name) WR.period = 1 +n.years <- year.end - year.start + 1 + +# in case the script is run with two arguments, they are assigned to the two below variables: +script.arg <- as.integer(commandArgs(TRUE)) + +if(length(script.arg) == 2){ + start.month <- script.arg[1] + lead.month <- script.arg[2] + WR.period <- start.month +} else { + start.month <- startM + lead.month <- leadM + WR.period <- start.month +} + +# in case the script is run with 1 argument, it is assumed you are using Reanalysis: +if(length(script.arg) == 1) WR.period <- script.arg[1] + +var.data <- fields # dataset from which to load the input var data (reanalysis or forecasts) +var.data.name <- fields.name #ECMWF_monthly.name # ERAint.name + +if(lat.min >= lat.max) stop("lat.min cannot be greater than lat.max") + +days.period <- n.days.period <- period.length <- list() +for (pp in 1:17){ + days.period[[pp]] <- NA + for(y in year.start:year.end) days.period[[pp]] <- c(days.period[[pp]], n.days.in.a.future.year(year.start, y) + pos.period(y,pp)) + days.period[[pp]] <- days.period[[pp]][-1] # remove the NA at the begin that was introduced only to be able to execute the above command + # number of days belonging to that period from year.start to year.end: + n.days.period[[pp]] <- length(days.period[[pp]]) + # Number of days belonging to that period in a single year: + period.length[[pp]] <- n.days.in.a.period(pp,1999) #+ ifelse(period==13 | period==2, 1, 0) # using year 1999 introduce a small error in winter season length because of bisestile years +} + +# Create a regular lat/lon grid to interpolate the data: +n.lon.grid <- 360/res +n.lat.grid <- (180/res)+1 +my.grid <- paste0('r',n.lon.grid,'x',n.lat.grid) +#domain<-list() +#domain$lon <- seq(0,359.999999999,res) +#domain$lat <- c(rev(seq(0,90,res)),seq(res[1],-90,-res)) # Notice that the regular grid is always centered at lat=0 and lon=0! + +# load only 1 day of pressure data to detect the minimum and maximum lat and lon values which identify only the North Atlantic area: +if(fields.name == rean.name) domain <- Load(var = psl, exp = NULL, obs = list(list(path=fields)), '19990101', storefreq = 'daily', leadtimemax = 1, output = 'lonlat', nprocs=1) +# in the case of S4, we also interpolate it (notice that if the S4 grid has the same grid of the chosen grid (typically ERA-Interim), Load() leaves the S4 grid unchanged): +if(fields.name == ECMWF_S4.name) domain <- Load(var = "psl", exp = list(list(path=fields)), sdates=paste0(year.start,'0101'), storefreq = 'daily', dimnames=list(member=member.name), leadtimemax=1, nmember=1, output = 'lonlat') #, grid=my.grid, method='bilinear', nprocs=1) + +#if(fields.name == ECMWF_monthly.name) domain <- Load(var = psl, exp = NULL, obs = list(list(path=fields)), paste0(year.start,'0102'), storefreq = 'daily', leadtimemax = 1, output = 'lonlat', nprocs=1) + +n.lon <- length(domain$lon) +n.lat <- length(domain$lat) + +pos.lat.max <- which(lat.max - round(domain$lat,4) >= 0)[1] # select the first latitude of the domain below the maximum latitude +pos.lat.min <- tail(which(round(domain$lat,4) - lat.min >= 0),1) # select the last latitude of the domain over the minumum latitude +pos.lon.max <- tail(which(lon.max - round(domain$lon,4) >= 0),1) +pos.lon.min <- which(round(domain$lon,4) - lon.min >= 0)[1] + +pos.lat <- if(pos.lat.min < pos.lat.max) {pos.lat.min:pos.lat.max} else {pos.lat.max:pos.lat.min} +pos.lon <- c(1:pos.lon.max,pos.lon.min:length(domain$lon)) # beware that it only consider the case where the study area cross the meridian of Greenwhich! +n.pos.lat <- length(pos.lat) +n.pos.lon <- length(pos.lon) + +lat <- domain$lat[pos.lat] # lat values of chosen area only (it is smaller than the whole spatial domain loaded by the data) +lon <- domain$lon[pos.lon] # lon values of chosen area only + +#lat.min.area <- domain$lat[pos.lat.min] # min and max lat/lon of the chosen area only (same as lat.max, but its values correspond to actual values in the lat vector) +#lat.max.area <- domain$lat[pos.lat.max] +#lon.min.area <- domain$lon[pos.lon.min] +#lon.max.area <- domain$lon[pos.lon.max] + +#rm(domain) + +# Load psl data: +if(fields.name == rean.name){ # Load daily psl data in the reanalysis case: + psleuFull <-array(NA,c(n.days.in.a.yearly.period(year.start,year.end), n.pos.lat, n.pos.lon)) + + for (y in year.start:year.end){ + var <- Load(var = psl, exp = NULL, obs = list(list(path=fields)), paste0(y,'0101'), storefreq = 'daily', leadtimemax = n.days.in.a.year(y), output = 'lonlat', + latmin = lat.min, latmax = lat.max, lonmin = lon.min, lonmax = lon.max) + psleuFull[seq.days.in.a.future.year(year.start, y),,] <- var$obs + rm(var) + gc() + } +} + +if(fields.name == ECMWF_S4.name){ # in this case, we can load only the data for 1 month at time (since each month needs ~3 GB of memory) + if(start.month <= 9) {start.month.char <- paste0("0",as.character(start.month))} else {start.month.char <- start.month} + + psleuFull <- Load(var = psl, exp = list(list(path=fields)), obs = NULL, sdates=paste0(year.start:year.end, start.month.char,'01'), dimnames=list(member=member.name), nmember=n.members, leadtimemax=n.leadtimes, storefreq='daily', output = 'lonlat', latmin = lat.min, latmax = lat.max, lonmin = lon.min, lonmax = lon.max, nprocs=1)$mod #, grid=my.grid, method='bilinear') + +} + +if(fields.name == ECMWF_monthly.name){ + # Load 6-hourly psl data in the forecast case: + psleuFull <-array(NA, c(n.sdates*n.years, n.leadtimes, n.pos.lat, n.pos.lon)) # daily order: 19940102 19950102 ... 20130102 19940109 19950109 ... 20130109 ... + n.leadtimes <- length(leadtime) + sdates <- weekly.seq(forecast.year,mes,day) + n.sdates <- length(sdates) + + for (startdate in 1:n.sdates){ + for(lday in leadtime){ + var <- Load(var = psl, exp = NULL, obs = list(list(path=fields)), paste0(year.start:year.end, substr(sdates[startdate],5,6), substr(sdates[startdate],7,8) ), storefreq = 'daily', leadtimemin = lday*4-3, leadtimemax = lday*4, output = 'lonlat', latmin = lat.min, latmax = lat.max, lonmin = lon.min, lonmax = lon.max) + + mean.lday <- apply(var$obs, c(3,5,6), mean, na.rm=T) + rm(var) + gc() + + psleuFull[(1+(startdate-1)*n.years):(startdate*n.years), lday-leadtime[1]+1,,] <- mean.lday + rm(mean.lday) + gc() + } + } +} + +if(psl=="g500") psleuFull <- psleuFull/9.81 # convert geopotential to geopotential height (in m) +if(psl=="psl") psleuFull <- psleuFull/100 # convert MSLP in Pascal to MSLP in hPa +gc() + +#save.image(file=paste0(workdir,"/weather_regimes_",fields.name,"_",year.start,"-",year.end,".RData")) +#load(file=paste0(workdir,"/weather_regimes_",fields.name,"_",year.start,"-",year.end,".RData")) + +# compute the cluster analysis: +my.PCA <- list() +my.cluster <- list() +my.cluster.array <- list() +tot.variance <- list() +n.pcs <- my.cluster <- c() + +#p=1 # for the debug +for(p in WR.period){ + # Select only the data of the month/season we want: + if(fields.name == rean.name){ + pslPeriod <- psleuFull[days.period[[p]],,] # select only days in the chosen period (i.e: winter) + + # weight the pressure fields based on latitude (apply it to anomalies, not to absolute values!): + if(lat.weighting){ + lat.weighted <- cos(lat*pi/180)^0.5 + lat.weighted.array <- InsertDim(InsertDim(lat.weighted,2,n.pos.lon), 1, n.days.period[[p]]) + psl.kmeans <- pslPeriod * lat.weighted.array + rm(lat.weighted.array) + gc() + } + + # select period data from psleuFull to use it as input of the cluster analysis: + psl.kmeans <- pslPeriod + dim(psl.kmeans) <- c(n.days.period[[p]], n.pos.lat*n.pos.lon) # convert array in a matrix! + rm(pslPeriod, pslPeriod.weighted) + } + + # in this case of S4 we don't need to select anything because we already loaded only 1 month of data! + if(fields.name == ECMWF_S4.name){ + + ## climatology plots: + + ## load eraint data: + ## ERAint <- '/esnas/reconstructions/ecmwf/eraint/$STORE_FREQ$_mean/$VAR_NAME$_f6h/$VAR_NAME$_$YEAR$$MONTH$.nc' + ## my.years <- year.start:year.end + ## var <- Load(var = "sfcWind", exp = NULL, obs = list(list(path=ERAint)), paste0(my.years,'0101'), storefreq = 'daily', leadtimemax = 216, output = 'lonlat',latmin = lat.min, latmax = lat.max, lonmin = lon.min, lonmax = lon.max) # -273.16 for tas # or /100 for psl + + ## # check S4 lon and lat: + ## #varS4 <- Load(var = psl, exp = list(list(path=fields)), obs = NULL, sdates=paste0(year.start, start.month.char,'01'), dimnames=list(member=member.name), nmember=n.members, leadtimemax=1, storefreq='daily', output = 'lonlat', latmin = lat.min, latmax = lat.max, lonmin = lon.min, lonmax = lon.max) #, grid=my.grid, method='bilinear') + + ## # draw climatologies: + ## drift <- apply(psleuFull[,,,,1,1,drop=F], c(1,2,4), mean, na.rm=T) + ## #matplot(t(drift[1,,]),type="l", col=1:15, lty=2, pch=19, cex=.5) + ## #matplot(t(drift[1,,1:31]),type="b", col=1:15, lty=1, pch=19, cex=.5) # zoom over the first leadtime + + ## matplot(t(drift[1,,]),type="l", col="gray60", lty=3, pch=19, cex=.1) #, ylim=c(-25,5)) + + ## true.climate <- apply(psleuFull[,,,,1,1,drop=F], c(1,4), mean, na.rm=T) + ## lines(true.climate[1,],type="l", col="red", lty=1, pch=19, lwd=2) + + ## true.climate.5d <- stats::filter(true.climate[1,], rep(1/5,5), sides=2) + ## lines(true.climate.5d,type="l", col="orange", lty=1, pch=19, lwd=2) + + ## s4.data <- data.frame(s4=true.climate[1,], day=1:216) + ## s4.loess <- loess(s4 ~ day, s4.data, span=0.50) + ## s4.pred <- predict(s4.loess) + ## lines(s4.pred,type="l", col="purple", lty=1, pch=19, lwd=2) + + ## col.monthly <- "brown" + ## true.climate1 <- apply(psleuFull[,,,1:31,1,1,drop=F], 1, mean, na.rm=T) + ## lines(rep(true.climate1,31),type="l", col=col.monthly, lty=1, pch=19, lwd=2) + ## true.climate2 <- apply(psleuFull[,,,32:59,1,1,drop=F], 1, mean, na.rm=T) + ## lines(c(rep(NA,31),rep(true.climate2,28)),type="l", col=col.monthly, lty=1, pch=19, lwd=2) + ## true.climate3 <- apply(psleuFull[,,,60:90,1,1,drop=F], 1, mean, na.rm=T) + ## lines(c(rep(NA,59),rep(true.climate3,31)),type="l", col=col.monthly, lty=1, pch=19, lwd=2) + ## true.climate4 <- apply(psleuFull[,,,91:120,1,1,drop=F], 1, mean, na.rm=T) + ## lines(c(rep(NA,90),rep(true.climate4,30)),type="l", col=col.monthly, lty=1, pch=19, lwd=2) + ## true.climate5 <- apply(psleuFull[,,,121:151,1,1,drop=F], 1, mean, na.rm=T) + ## lines(c(rep(NA,120),rep(true.climate5,31)),type="l", col=col.monthly, lty=1, pch=19, lwd=2) + ## true.climate6 <- apply(psleuFull[,,,151:180,1,1,drop=F], 1, mean, na.rm=T) + ## lines(c(rep(NA,150),rep(true.climate6,30)),type="l", col=col.monthly, lty=1, pch=19, lwd=2) + ## true.climate7 <- apply(psleuFull[,,,181:211,1,1,drop=F], 1, mean, na.rm=T) + ## lines(c(rep(NA,180),rep(true.climate7,31)),type="l", col=col.monthly, lty=1, pch=19, lwd=2) + + + ## era <- apply(var$obs[,,,,1,1,drop=F], c(1,2,4), mean, na.rm=T) + ## plot(era[1,1,1:216],type="l", col="black", lty=1, pch=19, lwd=2) + + ## era.5d <- stats::filter(era[1,1,1:216], rep(1/5,5), sides=2) + ## lines(era.5d,type="l", col="darkgreen", lty=1, pch=19, lwd=2) + + ## era.data <- data.frame(era=era[1,1,1:216], day=1:216) + ## era.loess <- loess(era ~ day, era.data, span=0.50) + ## era.pred <- predict(era.loess) + ## lines(era.pred,type="l", col="blue", lty=1, pch=19, lwd=2) + + ## era.data2 <- data.frame(era=era[1,1,1:216], day=1:216) + ## era.loess2 <- loess(era ~ day, era.data2, span=0.3) + ## era.pred2 <- predict(era.loess2) + ## lines(era.pred2,type="l", col="purple", lty=1, pch=19, lwd=2) + + ## era.data3 <- data.frame(era=era[1,1,1:216], day=1:216) + ## era.loess3 <- loess(era ~ day, era.data3, span=0.35) + ## era.pred3 <- predict(era.loess3) + + ## col.monthly.erai <- "turquoise3" + ## era1 <- apply(var$obs[,,,1:31,1,1,drop=F], c(1,2), mean, na.rm=T) + ## lines(rep(era1,31),type="l", col=col.monthly.erai, lty=1, pch=19, lwd=2) + ## era2 <- apply(var$obs[,,,32:59,1,1,drop=F], c(1,2), mean, na.rm=T) + ## lines(c(rep(NA,31),rep(era2,28)),type="l", col=col.monthly.erai, lty=1, pch=19, lwd=2) + ## era3 <- apply(var$obs[,,,60:90,1,1,drop=F], c(1,2), mean, na.rm=T) + ## lines(c(rep(NA,59),rep(era3,31)),type="l", col=col.monthly.erai, lty=1, pch=19, lwd=2) + ## era4 <- apply(var$obs[,,,91:120,1,1,drop=F], c(1,2), mean, na.rm=T) + ## lines(c(rep(NA,90),rep(era4,30)),type="l", col=col.monthly.erai, lty=1, pch=19, lwd=2) + ## era5 <- apply(var$obs[,,,121:151,1,1,drop=F], c(1,2), mean, na.rm=T) + ## lines(c(rep(NA,120),rep(era5,31)),type="l", col=col.monthly.erai, lty=1, pch=19, lwd=2) + ## era6 <- apply(var$obs[,,,151:180,1,1,drop=F], c(1,2), mean, na.rm=T) + ## lines(c(rep(NA,150),rep(era6,30)),type="l", col=col.monthly.erai, lty=1, pch=19, lwd=2) + ## era7 <- apply(var$obs[,,,181:211,1,1,drop=F], c(1,2), mean, na.rm=T) + ## lines(c(rep(NA,180),rep(era7,30)),type="l", col=col.monthly.erai, lty=1, pch=19, lwd=2) + + + ## # anomalias: + ## point.type <- "l" + ## true.climate.anom <- psleuFull[1,1,pos.last.year,,1,1] - true.climate[1,] + ## plot(true.climate.anom,type=point.type, col="red", lty=1, pch=19, lwd=2, cex=.3) + ## lines(rep(0,216)) + + ## true.climate.5d.anom <- psleuFull[1,1,pos.last.year,,1,1] - true.climate.5d + ## lines(true.climate.5d.anom,type=point.type, col="orange", lty=1, pch=19, lwd=2, cex=.3) + + ## s4.pred.anom <- psleuFull[1,1,pos.last.year,,1,1] - s4.pred + ## lines(s4.pred.anom,type=point.type, col="purple", lty=1, pch=19, lwd=2, cex=.3) + + ## s4.monthly.clim <- c(rep(true.climate1,31),rep(true.climate2,28),rep(true.climate3,31),rep(true.climate4,30),rep(true.climate5,31),rep(true.climate6,30),rep(true.climate7,31), rep(NA,4)) + ## s4.monthly.anom <- psleuFull[1,1,pos.last.year,,1,1] - s4.monthly.clim + ## lines(s4.monthly.anom,type=point.type, col="brown", lty=1, pch=19, lwd=2, cex=.3) + + + ## pos.last.year <- dim(var$obs)[3] + ## era.anom <- var$obs[1,1,pos.last.year,1:216,1,1] - era[1,1,1:216] + ## plot(era.anom,type=point.type, col="black", lty=1, pch=19, lwd=2) + ## lines(rep(0,216)) + + ## era.anom.5d <- var$obs[1,1,pos.last.year,1:216,1,1] - era.5d[1:216] + ## lines(era.anom.5d,type=point.type, col="darkgreen", lty=1, pch=19, lwd=2) + + ## era.anom.pred <- var$obs[1,1,pos.last.year,1:216,1,1] - era.pred[1:216] + ## lines(era.anom.pred,type=point.type, col="blue", lty=1, pch=19, lwd=2) + + ## era.anom.pred2 <- var$obs[1,1,pos.last.year,1:216,1,1] - era.pred2[1:216] + ## era.anom.pred3 <- var$obs[1,1,pos.last.year,1:216,1,1] - era.pred3[1:216] + + ## era.monthly.clim <- c(rep(era1,31),rep(era2,28),rep(era3,31),rep(era4,30),rep(era5,31),rep(era6,30),rep(era7,31), rep(NA,4)) + ## era.anom.monthly <- var$obs[1,1,pos.last.year,1:216,1,1] - era.monthly.clim + ## lines(era.anom.monthly,type=point.type, col="turquoise3", lty=1, pch=19, lwd=2) + + ## # Nube plot: + ## plot(era.anom.pred,type="l", col="blue", lty=1, pch=19, lwd=2) + ## grid(nx=10,lwd=2) + ## lines(era.anom.pred2,type="l", col="purple", lty=1, pch=19, lwd=2) + ## lines(era.anom.pred3,type="l", col="turquoise3", lty=1, pch=19, lwd=2) + ## lines(era.anom.5d,type="l", col="darkgreen", lty=1, pch=19, lwd=2) + + ## lines(era.5d,type="l", col="darkgreen", lty=1, pch=19, lwd=2) + ## lines(era.pred,type="l", col="blue", lty=1, pch=19, lwd=2) + ## lines(rep(0,216)) + + ## lines(era.pred2,type="l", col="purple", lty=1, pch=19, lwd=2) + ## lines(era.pred3,type="l", col="", lty=1, pch=19, lwd=2) + + + ## # fit alfa parameter for each grid point: + ## i=1 + ## j=1 + ## era <- apply(var$obs[,,,,i,j,drop=F], c(1,2,4), mean, na.rm=T) + ## era.data <- data.frame(era=era[1,1,1:216], day=1:216) + ## k=0; error <- c() + ## for (alfa in seq(0.25,0.50,0.01)){ + ## k=k+1 + ## era.loess <- loess(era ~ day, era.data, span=alfa) + ## era.pred <- predict(era.loess) + ## error[k] <- mean(abs(era.pred - era[1,1,1:216])) + ## } + + # convert psl in daily anomalies with the LOESS filter: + pslPeriodClim <- apply(psleuFull, c(1,4,5,6), mean, na.rm=T) + pslPeriodClimLoess <- pslPeriodClim + for(i in n.pos.lat){ + for(j in n.pos.lon){ + my.data <- data.frame(ens.mean=pslPeriodClim[1,,i,j], day=1:n.leadtimes) + my.loess <- loess(ens.mean ~ day, my.data, span=0.35) + pslPeriodClimLoess[1,,i,j] <- predict(my.loess) + } + } + + pslPeriodClim2 <- InsertDim(InsertDim(pslPeriodClimLoess, 2, n.years), 2, n.members) + + ## s4.data <- data.frame(s4=pslPeriodClim[1,], day=1:216) + ## s4.loess <- loess(s4 ~ day, s4.data, span=0.35) + ## s4.pred <- predict(s4.loess) + rm(pslPeriodClim) + gc() + + psleuFull <- psleuFull - pslPeriodClim2 + rm(pslPeriodClim2) + gc() + + ## # convert psl in daily anomalies computed with the 10-days leadtime window including the 9 days AFTER each day: + ## n.leadtimes <- dim(psleuFull)[4] + ## pslPeriodClim10 <- array(NA, c(1, n.members, n.years, n.leadtimes-9,n.pos.lat,n.pos.lon)) + ## for(year in 1:n.years){ + ## for(lead in 1:(n.leadtimes-9)){ + ## pslPeriodClim10[1,,year,lead,,] <- (psleuFull[1,,year,lead,,] + psleuFull[1,,year,lead+1,,] + psleuFull[1,,year,lead+2,,] + psleuFull[1,,year,lead+3,,] + psleuFull[1,,year,lead+4,,] + psleuFull[1,,year,lead+5,,] + psleuFull[1,,year,lead+6,,] + psleuFull[1,,year,lead+7,,] + psleuFull[1,,year,lead+8,,] + psleuFull[1,,year,lead+9,,])/10 + ## } + ## } + + ## pslPeriodClim10mean <- apply(pslPeriodClim10, c(1,4,5,6), mean, na.rm=T) + ## pslPeriodClim10mean2 <- InsertDim(InsertDim(pslPeriodClim10mean,2,n.years),2,n.members) + + ## psleuFull10 <- psleuFull[1,,,1:(n.leadtimes-9),,,drop=F] - pslPeriodClim10mean2 + + ## psleuFull <- psleuFull10 + ## rm(pslPeriodClim2, psleuFull10, pslPeriodClim10, pslPeriodClim10mean2, psleuFull10mean, pslPeriodClim10mean) + ## gc() + + chosen.month <- start.month + lead.month # find which month we want to select data + if(chosen.month > 12) chosen.month <- chosen.month - 12 + + for(y in year.start:year.end){ + leadtime.min <- 1 + n.days.in.a.monthly.period(start.month, chosen.month, y) - n.days.in.a.month(chosen.month, y) + leadtime.max <- leadtime.min + n.days.in.a.month(chosen.month, y) - 1 + if (n.days.in.a.month(chosen.month, y) == 29) leadtime.max <- leadtime.max - 1 # remove the 29th of February to have the same n. of elements for all years + int.leadtimes <- leadtime.min:leadtime.max + n.leadtimes <- leadtime.max - leadtime.min + 1 + + if(y == year.start) { + pslPeriod <- psleuFull[,,1,int.leadtimes,,,drop=FALSE] + } else { + pslPeriod <- unname(abind(pslPeriod, psleuFull[,,y-year.start+1,int.leadtimes,,,drop=FALSE], along=3)) + } + } + + + ## # if you didn't convert psl data in anomalies until now, you can convert psl data in monthly anomalies: + ## pslPeriodClim <- apply(pslPeriod, c(1,5,6), mean, na.rm=T) + ## pslPeriodClim2 <- InsertDim(InsertDim(InsertDim(pslPeriodClim,2,n.leadtimes), 2, n.years), 2, n.members) + + ## pslPeriod <- pslPeriod - pslPeriodClim2 + ## rm(pslPeriodClim, pslPeriodClim2) + ## gc() + + # note that the data order in psl.kmeans[,X] is: year1day1, year1day2, ... , year1day31, year2day1, year2day2, ... , year2day28 + psl.melted <- melt(pslPeriod[1,,,,,, drop=FALSE], varnames=c("Exp","Member","StartYear","LeadDay","Lat","Lon")) + psl.kmeans <- unname(acast(psl.melted, Member + StartYear + LeadDay ~ Lat + Lon)) + + #rm(pslPeriod) + gc() + } + + if(fields.name == ECMWF_monthly.name){ + my.startdates.period <- months.period(forecast.year,mes,day,p) # get which startdates belong to the chosen period + + days.period <- list() # get which days inside psleuFull belong to the chosen period + for(startdate in my.startdates.period){ + days.period[[p]] <- c(days.period[[p]], (1+(startdate-1)*n.years):(startdate*n.years)) + } + + # in winter you must remove the 2nd startdate (20140109) because its data is bad, as you can see with: plot(psl.kmeans[,1:2]) + # if(fields.name == forecast.name && period == 13) psl.kmeans[21:40,] <- NA + } + + + # compute the PCs or not: + if(PCA){ + my.seq <- seq(1, n.pos.lat*n.pos.lon, 9) # select only 1 point of 9 + pslcut <- psl.kmeans[,my.seq] + + my.PCA[[p]] <- princomp(pslcut,cor=FALSE) + tot.variance[[p]] <- head(cumsum(my.PCA[[p]]$sdev^2/sum(my.PCA[[p]]$sdev^2)),50) # check how many PCAs to keep basing on the sum of their expl. variance + n.pcs[p] <- head(as.numeric(which(tot.variance[[p]] > variance.explained)),1) # select only the pcs that explains at least 80% of variance + my.cluster[[p]] <- kmeans(my.PCA[[p]]$scores[,1:n.pcs[p]], centers=4, iter.max=100, nstart=30) # 4 is the number of clusters + rm(pslcut) + } else { # 4 is the number of clusters: + if(fields.name == rean.name) my.cluster[[p]] <- kmeans(psl.kmeans[which(!is.na(psl.kmeans[,1])),], centers=4, iter.max=100, nstart=30) + if(fields.name == ECMWF_S4.name) { + my.cluster[[p]] <- kmeans(psl.kmeans, centers=4, iter.max=100, nstart=30, trace=TRUE) + my.cluster.array[[p]] <- array(my.cluster[[p]]$cluster, c(n.leadtimes, n.years, n.members)) # in reverse order compared to the definition of psl.kmeans + + + #plot(SMA(my.cluster[[p]]$centers[1,],201),type="l",col="gold",ylim=c(-20,20));lines(SMA(my.cluster[[p]]$centers[2,],201),type="l",col="red"); lines(SMA(my.cluster[[p]]$centers[3,],201),type="l",col="green");lines(SMA(my.cluster[[p]]$centers[4,],201),type="l",col="blue");lines(SMA(psl.kmeans[29,],201),type="l",col="gray");lines(rep(0,14410),type="l",col="black",lty=2) + } + } + + rm(psl.kmeans) + gc() +} + + +#save.image(file=paste0(workdir,"/weather_regimes_",fields.name,"_",year.start,"-",year.end,".RData")) +#load(file=paste0(workdir,"/weather_regimes_",fields.name,"_",year.start,"-",year.end,".RData")) + +# Load var data: +if(fields.name == rean.name){ # Load daily var data in the reanalysis case: + vareuFull <-array(NA,c(n.days.in.a.yearly.period(year.start,year.end), n.pos.lat, n.pos.lon)) + + for (y in year.start:year.end){ + var <- Load(var = var.name[var.num], exp = NULL, obs = list(var.data), paste0(y,'0101'), storefreq = 'daily', leadtimemax = n.days.in.a.year(y), output = 'lonlat', + latmin = lat.min, latmax = lat.max, lonmin = lon.min, lonmax = lon.max) + vareuFull[seq.days.in.a.future.year(year.start, y),,] <- var$obs + rm(var) + gc() + } +} + +## if(fields.name == ECMWF_S4.name) { +## if(start.month <= 9) {start.month.char <- paste0("0",as.character(start.month))} else {start.month.char <- start.month} + +## my.years <- year.start:year.end +## vareuFull <- Load(var = var.name[var.num], exp = list(list(path=fields)), obs = NULL, sdates=paste0(my.years, start.month.char,'01'), dimnames=list(member=member.name), nmember=n.members, leadtimemax=216, storefreq='daily', output = 'lonlat', latmin = lat.min, latmax = lat.max, lonmin = lon.min, lonmax = lon.max)$mod #, grid=my.grid, method='bilinear') +## } + +if(fields.name == ECMWF_monthly.name){ # Load 6-hourly var data in the monthly forecast case: + sdates <- weekly.seq(forecast.year,mes,day) + vareuFull <-array(NA,c(length(sdates)*(year.end-year.start+1), n.pos.lat, n.pos.lon)) + + for (startdate in 1:length(sdates)){ + var <- Load(var = var.name[var.num], exp = NULL, obs = list(var.data), paste0(year.start:year.end, substr(sdates[startdate],5,6), substr(sdates[startdate],7,8) ), storefreq = 'daily', leadtimemin=leadtime*4-3, leadtimemax=leadtime*4, output = 'lonlat', latmin = lat.min, latmax = lat.max, lonmin = lon.min, lonmax = lon.max) + temp <- apply(var$obs, c(1,3,5,6), mean, na.rm=T) + vareuFull[(1+(startdate-1)*(year.end-year.start+1)):(startdate*(year.end-year.start+1)),,] <- temp + rm(temp,var) + gc() + } + +} + +#my.grid<-paste0('r',n.lon,'x',n.lat) +#coord <- Load(var = 'sfcWind', exp = list(ECMWF_monthly), obs = NULL, sdates=paste0(2013,'0102'), leadtimemin = 1, leadtimemax=1, output = 'lonlat', nprocs=1, grid=my.grid, method='bilinear') + +#save.image(file=paste0(workdir,"/weather_regimes_",fields.name,"_",var.name[var.num],"_",year.start,"-",year.end,".RData")) +#load(file=paste0(workdir,"/weather_regimes_",fields.name,"_",var.name[var.num],"_",year.start,"-",year.end,".RData")) + +for(p in WR.period){ # 1-12: Jan-Dec; 13-16: Winter-Spring-Summer-Autumn; 17: Year + + if(fields.name == rean.name){ + cluster.sequence <- my.cluster[[p]]$cluster + + if(sequences){ # it works only for rean data!!! + # for each of the 4 clusters, remove the days not belonging to sequences of at least 5 days: + # warning: first 4 days and last 4 days are not take into account y the algorithm and are treated as not belonging to any sequence (sequ=FALSE) + wrdiff <- diff(my.cluster[[p]]$cluster) + + sequ <- rep(FALSE, n.days.period[[p]]) + for(i in 5:(n.days.period[[p]]-5)){ + sequ[i] <- TRUE + if(wrdiff[i] != 0 & wrdiff[i+1] != 0) sequ[i] = FALSE + if(wrdiff[i] != 0 & wrdiff[i+2] != 0) sequ[i] = FALSE + if(wrdiff[i] != 0 & wrdiff[i+3] != 0) sequ[i] = FALSE + if(wrdiff[i] != 0 & wrdiff[i+4] != 0) sequ[i] = FALSE + if(wrdiff[i-1] != 0 & wrdiff[i] != 0) sequ[i] = FALSE + if(wrdiff[i-1] != 0 & wrdiff[i+1] != 0) sequ[i] = FALSE + if(wrdiff[i-1] != 0 & wrdiff[i+2] != 0) sequ[i] = FALSE + if(wrdiff[i-1] != 0 & wrdiff[i+3] != 0) sequ[i] = FALSE + if(wrdiff[i-2] != 0 & wrdiff[i] != 0) sequ[i] = FALSE + if(wrdiff[i-2] != 0 & wrdiff[i+1] != 0) sequ[i] = FALSE + if(wrdiff[i-2] != 0 & wrdiff[i+2] != 0) sequ[i] = FALSE + if(wrdiff[i-3] != 0 & wrdiff[i] != 0) sequ[i] = FALSE + if(wrdiff[i-3] != 0 & wrdiff[i+1] != 0) sequ[i] = FALSE + if(wrdiff[i-4] != 0 & wrdiff[i] != 0) sequ[i] = FALSE + } # close for loop on i + + # sequence of daily cluster numbers without the days that doesn't belong to sequences of 5 or more days: + cluster.sequence <- my.cluster[[p]]$cluster * sequ + rm(wrdiff, sequ) + } + + # Replace the days belonging to a regime with the days belonging to a sequence of at least 5 days of that regime: + wr1 <- which(cluster.sequence == 1) + wr2 <- which(cluster.sequence == 2) + wr3 <- which(cluster.sequence == 3) + wr4 <- which(cluster.sequence == 4) + + # yearly frequency of each regime: + wr1y <- wr2y <- wr3y <-wr4y <- c() + for(y in year.start:year.end){ + # for both reanalysis and S4, the time order is always: year1day1, year1day2, ..., year1day31, year2day1, year2day2, ..., year2day28, etc, + wr1y[y-year.start+1] <- length(which(cluster.sequence[(y-year.start)*period.length[[p]]+(1:period.length[[p]])] == 1)) + wr2y[y-year.start+1] <- length(which(cluster.sequence[(y-year.start)*period.length[[p]]+(1:period.length[[p]])] == 2)) + wr3y[y-year.start+1] <- length(which(cluster.sequence[(y-year.start)*period.length[[p]]+(1:period.length[[p]])] == 3)) + wr4y[y-year.start+1] <- length(which(cluster.sequence[(y-year.start)*period.length[[p]]+(1:period.length[[p]])] == 4)) + } + + # convert to frequencies in %: + wr1y <- wr1y/period.length[[p]] + wr2y <- wr2y/period.length[[p]] + wr3y <- wr3y/period.length[[p]] + wr4y <- wr4y/period.length[[p]] + + pslPeriod <- psleuFull[days.period[[p]],,] + pslPeriodClim <- apply(pslPeriod, c(2,3), mean, na.rm=T) + pslPeriodClim2 <- InsertDim(pslPeriodClim, 1, n.days.period[[p]]) + pslPeriodAnom <- pslPeriod - pslPeriodClim2 + + rm(pslPeriod, pslPeriodClim, pslPeriodClim2) + gc() + + pslwr1 <- pslPeriodAnom[wr1,,] + pslwr2 <- pslPeriodAnom[wr2,,] + pslwr3 <- pslPeriodAnom[wr3,,] + pslwr4 <- pslPeriodAnom[wr4,,] + + rm(pslPeriodAnom) + gc() + + pslwr1mean <- apply(pslwr1,c(2,3),mean,na.rm=T) + pslwr2mean <- apply(pslwr2,c(2,3),mean,na.rm=T) + pslwr3mean <- apply(pslwr3,c(2,3),mean,na.rm=T) + pslwr4mean <- apply(pslwr4,c(2,3),mean,na.rm=T) + rm(pslwr1,pslwr2,pslwr3,pslwr4) + + # in the reanalysis case, we also measure the impact maps: + + # similar selection of above, but for var instead of psl: + varPeriod <- vareuFull[days.period[[p]],,] # select only var data during the chosen period + + varPeriodClim <- apply(varPeriod, c(2,3), mean, na.rm=T) + varPeriodClim2 <- InsertDim(varPeriodClim, 1, n.days.period[[p]]) + varPeriodAnom <- varPeriod - varPeriodClim2 + rm(varPeriod, varPeriodClim, varPeriodClim2) + gc() + + #PlotEquiMap2(varPeriodClim[,EU], lon[EU], lat, filled.continents = FALSE, cols=my.cols.var, intxlon=10, intylat=10, cex.lab=1.5) # plot the climatology of the period + varPeriodAnom1 <- varPeriodAnom[wr1,,] + varPeriodAnom2 <- varPeriodAnom[wr2,,] + varPeriodAnom3 <- varPeriodAnom[wr3,,] + varPeriodAnom4 <- varPeriodAnom[wr4,,] + + varPeriodAnom1mean <- apply(varPeriodAnom1,c(2,3),mean,na.rm=T) + varPeriodAnom2mean <- apply(varPeriodAnom2,c(2,3),mean,na.rm=T) + varPeriodAnom3mean <- apply(varPeriodAnom3,c(2,3),mean,na.rm=T) + varPeriodAnom4mean <- apply(varPeriodAnom4,c(2,3),mean,na.rm=T) + + varPeriodAnomBoth1 <- abind(varPeriodAnom, varPeriodAnom1, along = 1) + pvalue1 <- apply(varPeriodAnomBoth1, c(2,3), function(x) t.test(x[1:n.days.period[[p]]],x[(n.days.period[[p]]+1):length(x)])$p.value) + rm(varPeriodAnomBoth1, varPeriodAnom1) + gc() + + varPeriodAnomBoth2 <- abind(varPeriodAnom, varPeriodAnom2, along = 1) + pvalue2 <- apply(varPeriodAnomBoth2, c(2,3), function(x) t.test(x[1:n.days.period[[p]]],x[(n.days.period[[p]]+1):length(x)])$p.value) + rm(varPeriodAnomBoth2, varPeriodAnom2) + gc() + + varPeriodAnomBoth3 <- abind(varPeriodAnom, varPeriodAnom3, along = 1) + pvalue3 <- apply(varPeriodAnomBoth3, c(2,3), function(x) t.test(x[1:n.days.period[[p]]],x[(n.days.period[[p]]+1):length(x)])$p.value) + rm(varPeriodAnomBoth3, varPeriodAnom3) + gc() + + varPeriodAnomBoth4 <- abind(varPeriodAnom, varPeriodAnom4, along = 1) + pvalue4 <- apply(varPeriodAnomBoth4, c(2,3), function(x) t.test(x[1:n.days.period[[p]]],x[(n.days.period[[p]]+1):length(x)])$p.value) + rm(varPeriodAnomBoth4, varPeriodAnom4) + + rm(varPeriodAnom) + gc() + + # save all the data necessary to redraw the graphs when we know the right regime: + save(workdir,rean.name,fields.name,var.num,var.name,var.name.full,var.unit,year.start,year.end,p,WR.period,lon,lat,pslwr1mean,pslwr2mean,pslwr3mean,pslwr4mean, varPeriodAnom1mean, varPeriodAnom2mean, varPeriodAnom3mean,varPeriodAnom4mean,wr1y,wr2y,wr3y,wr4y,pvalue1,pvalue2,pvalue3,pvalue4,cluster.sequence,file=paste0(workdir,"/",fields.name,"_",var.name[var.num],"_",my.period[p],"_mapdata.RData")) + + rm(pvalue1,pvalue2,pvalue3,pvalue4) + rm(pslwr1mean,pslwr2mean,pslwr3mean,pslwr4mean) + rm(varPeriodAnom1mean,varPeriodAnom2mean,varPeriodAnom3mean,varPeriodAnom4mean) + gc() + + } + + if(fields.name == ECMWF_S4.name){ + cluster.sequence <- my.cluster[[p]]$cluster #as.vector(my.cluster.array[[p]]) + + wr1 <- which(cluster.sequence == 1) + wr2 <- which(cluster.sequence == 2) + wr3 <- which(cluster.sequence == 3) + wr4 <- which(cluster.sequence == 4) + + wr1y <- apply(my.cluster.array[[p]] == 1, 2, sum) + wr2y <- apply(my.cluster.array[[p]] == 2, 2, sum) + wr3y <- apply(my.cluster.array[[p]] == 3, 2, sum) + wr4y <- apply(my.cluster.array[[p]] == 4, 2, sum) + + # convert to frequencies in %: + wr1y <- wr1y/(n.leadtimes*n.members) + wr2y <- wr2y/(n.leadtimes*n.members) + wr3y <- wr3y/(n.leadtimes*n.members) + wr4y <- wr4y/(n.leadtimes*n.members) + + # in this case, must use psl.melted instead of psleuFull. Both are already in anomalies: + pslmat <- unname(acast(psl.melted, Member + StartYear + LeadDay ~ Lat ~ Lon)) + #pslmat <- unname(acast(melt(pslPeriod[1,1:2,,,,, drop=FALSE],varnames=c("Exp","Member","StartYear","LeadDay","Lat","Lon")), Member ~ StartYear + LeadDay ~ Lat ~ Lon)) + + pslwr1 <- pslmat[wr1,,, drop=F] + pslwr2 <- pslmat[wr2,,, drop=F] + pslwr3 <- pslmat[wr3,,, drop=F] + pslwr4 <- pslmat[wr4,,, drop=F] + + pslwr1mean <- apply(pslwr1, c(2,3), mean, na.rm=T) + pslwr2mean <- apply(pslwr2, c(2,3), mean, na.rm=T) + pslwr3mean <- apply(pslwr3, c(2,3), mean, na.rm=T) + pslwr4mean <- apply(pslwr4, c(2,3), mean, na.rm=T) + + rm(pslwr1,pslwr2,pslwr3,pslwr4) + rm(pslmat) + gc() + + # save all the data necessary to draw the graphs (but not the impact maps) + save(lon, lon.max, psl, my.period, p, workdir,rean.name,fields.name,var.num,var.name,var.name.full,var.unit,year.start,year.end, p, WR.period, lon, lat, pslwr1mean, pslwr2mean,pslwr3mean,pslwr4mean,wr1y,wr2y,wr3y,wr4y,n.leadtimes,cluster.sequence,file=paste0(workdir,"/",fields.name,"_",var.name[var.num],"_",my.period[p],"_leadmonth_",lead.month,"_mapdata.RData")) + + rm(pslwr1mean,pslwr2mean,pslwr3mean,pslwr4mean) + gc() + + #pslwr1meanPartial <- apply(pslwr1, c(1,3,4), mean, na.rm=T) + #pslwr2meanPartial <- apply(pslwr2, c(1,3,4), mean, na.rm=T) + #pslwr3meanPartial <- apply(pslwr3, c(1,3,4), mean, na.rm=T) + #pslwr4meanPartial <- apply(pslwr4, c(1,3,4), mean, na.rm=T) + + #PlotEquiMap(rescale(pslwr1meanPartial[1,,],my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2) + #PlotEquiMap(rescale(pslwr1meanPartial[2,,],my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2) + + #n=0 + #for(sy in 1:32) { + # for(ld in 1:28){ + # if(my.cluster.array[[p]][ld,sy] == 1){ + # n=n+1 + # if(n == 1) {temp <- pslPeriod[1,2,sy,ld,,]} + # temp <- temp + pslPeriod[1,2,sy,ld,,] + # } + # } + #} + #PlotEquiMap(rescale(temp/n,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, drawleg=F) + + #pslwr1meanPartial <- apply(pslmat[,wr1,,,drop=F], c(1,3,4), mean, na.rm=T) + #pslwr2meanPartial <- apply(pslmat[,wr2,,,drop=F], c(1,3,4), mean, na.rm=T) + #pslwr3meanPartial <- apply(pslmat[,wr3,,,drop=F], c(1,3,4), mean, na.rm=T) + #pslwr4meanPartial <- apply(pslmat[,wr4,,,drop=F], c(1,3,4), mean, na.rm=T) + + } + + # for the monthly system, the time order is always: year1day1, year2day1, ... , yearNday1, year1day2, year2day2, ..., yearNday2, etc.: + if(fields.name == ECMWF_monthly.name) { + if(fields.name == ECMWF_monthly.name){ + my.startdates.period <- months.period(forecast.year,mes,day,p) + + #if(p == 13) my.startdates.period <- my.startdates.period[-2] # remove the second startdate because it is broken + + days.period <- period.length <- list() + for(startdate in my.startdates.period){ + days.period[[p]] <- c(days.period[[p]], (1+(startdate-1)*(year.end-year.start+1)):(startdate*(year.end-year.start+1))) + } + period.length[[p]] <- length(my.startdates.period) # number of Thusdays in the period + } + + wr1y <- wr2y <- wr3y <-wr4y <- c() + wr1y[y-year.start+1] <- length(which(cluster.sequence[seq((y-year.start+1),length(cluster.sequence), n.years)] == 1)) + wr2y[y-year.start+1] <- length(which(cluster.sequence[seq((y-year.start+1),length(cluster.sequence), n.years)] == 2)) + wr3y[y-year.start+1] <- length(which(cluster.sequence[seq((y-year.start+1),length(cluster.sequence), n.years)] == 3)) + wr4y[y-year.start+1] <- length(which(cluster.sequence[seq((y-year.start+1),length(cluster.sequence), n.years)] == 4)) + } + +} # close the for loop on 'p' + + + + + +# run it twice: once, with ordering <- FALSE to look only at the cartography and find visually the right regime names of the four clusters; +# and the second time, to save the ordered cartography: +ordering <- T # If TRUE, order the clusters following the regimes listed in the 'orden' vector (after you manually insert the names). +as.pdf <- F # FALSE: save results as one .png file for each season, TRUE: save only 1 .pdf file with all seasons +save.names <- T # if TRUE, also save the cluster names (i.e: the association between clusters and weather regimes); if FALSE, the cluster names are loaded from the file + +# position of long values of Europe only (without the Atlantic Sea and America): +#if(fields.name == "ERA-Interim") EU <- c(1:which(lon >= lon.max)[1], (length(lon)-30):length(lon)) # restrict area to continental europe only +EU <- c(1:which(lon >= lon.max)[1], (which(lon >= 337)[1]:length(lon))) # restrict area to continental europe only + +# choose an order to give to the cartography, from top to bottom: +orden <- c("NAO+","NAO-","Blocking","Atl.Ridge") + +regime1.name <- orden[1] +regime2.name <- orden[2] +regime3.name <- orden[3] +regime4.name <- orden[4] + +if(save.names){cluster1.name.period <- cluster2.name.period <- cluster3.name.period <- cluster4.name.period <- c()} + +# breaks and colors of the geopotential fields: +if(psl == "g500"){ + #my.brks <- c(-300,-199:200,300) # % Mean anomaly of a WR + my.brks <- c(-300,seq(-200,200,10),300) # % Mean anomaly of a WR + my.brks2 <- c(-300,seq(-200,200,10),300) # % Mean anomaly of a WR for the contour lines + #my.cols <- colorRampPalette((brewer.pal(9,"PuBu")))(length(my.brks)-1) + values.to.plot <- c(-200, -100, 0, 100, 200) # values we want to show in the labels +} + +if(psl == "psl"){ + my.brks <- c(seq(-19,-1,2),0,seq(1,19,2)) # % Mean anomaly of a WR + # my.brks <- c(seq(-19,-1,2),0,seq(1,19,2)) # % Mean anomaly of a WR + + my.brks2 <- my.brks #c(seq(-20,20,2)) # % Mean anomaly of a WR for the contour lines + values.to.plot <- my.brks #c(-17,-15,-13,-11,-9,-7,-5,-3,-1,0,1,3,5,7,9,11,13,15,17) +} + +my.cols <- colorRampPalette(rev(brewer.pal(11,"RdBu")))(length(my.brks)-1) # blue--white--red colors +my.cols[floor(length(my.cols)/2)] <- my.cols[floor(length(my.cols)/2)+1] <- "white" + +# breaks and colors of the impact maps (valid both for Wind speed anomalies and Temperature anomalies): +#my.brks.var <- c(-20,seq(-10,-3,1),seq(-2.9,2.9,0.1),seq(3,10,1),20) # % Mean anomaly of a WR +#my.brks.var <- c(-20,-2,-1.5,-1,-0.5,-0.2,0.2,0.5,1,1.5,2,20) # % Mean anomaly of a WR +#my.brks.var <- c(-20,seq(-3,3,0.5),20) # % Mean anomaly of a WR +if(var.name[var.num] == "sfcWind") { + my.brks.var <- seq(-3,3,0.5) + values.to.plot2 <- c(-3,-2,-1,0,1,2,3) +} +if(var.name[var.num] == "tas") { + my.brks.var <- seq(-5,5,1) + values.to.plot2 <- my.brks.var #c(-5,-3,-1,0,1,3,5) +} + +#my.cols <- colorRampPalette(as.character(read.csv("/home/Earth/vtorralb/rgbhex.csv",header=F)[,1]))(length(my.brks)-1) # blue--yellow-red colors +my.cols.var <- colorRampPalette(rev(brewer.pal(11,"RdBu")))(length(my.brks.var)-1) # blue--white--red colors +#my.cols.var[floor(length(my.cols.var)/2)] <- my.cols.var[floor(length(my.cols.var)/2)+1] <- "white" + +if(as.pdf && fields.name==rean.name)(pdf(file=paste0(workdir,"/",fields.name,"_",var.name[var.num],"_",my.period[p],".pdf"),width=40, height=60)) +if(as.pdf && fields.name==forecast.name)(pdf(file=paste0(workdir,"/",fields.name,"_",var.name[var.num],"_",my.period[p],"_leadmonth_",lead.month,".pdf"),width=40,height=60)) + +for(p in WR.period){ + if(fields.name==rean.name) load(file=paste0(workdir,"/",fields.name,"_",var.name[var.num],"_",my.period[p],"_mapdata.RData")) + if(fields.name==forecast.name) load(file=paste0(workdir,"/",fields.name,"_",var.name[var.num],"_",my.period[p],"_leadmonth_",lead.month,"_mapdata.RData")) + + if(save.names){ + if(p == 1){ # January + cluster3.name="NAO+" + cluster4.name="NAO-" + cluster1.name="Blocking" + cluster2.name="Atl.Ridge" + } + if(p == 2){ # February + cluster3.name="NAO+" + cluster2.name="NAO-" + cluster4.name="Blocking" + cluster1.name="Atl.Ridge" + } + if(p == 3){ # March + cluster3.name="NAO+" + cluster2.name="NAO-" + cluster4.name="Blocking" + cluster1.name="Atl.Ridge" + } + if(p == 4){ # April + cluster2.name="NAO+" + cluster1.name="NAO-" + cluster3.name="Blocking" + cluster4.name="Atl.Ridge" + } + if(p == 5){ # May + cluster4.name="NAO+" + cluster2.name="NAO-" + cluster3.name="Blocking" + cluster1.name="Atl.Ridge" + } + if(p == 6){ # June + cluster4.name="NAO+" + cluster1.name="NAO-" + cluster2.name="Blocking" + cluster3.name="Atl.Ridge" + } + if(p == 7){ # July + cluster4.name="NAO+" + cluster2.name="NAO-" + cluster1.name="Blocking" + cluster3.name="Atl.Ridge" + } + if(p == 8){ # August + cluster2.name="NAO+" + cluster4.name="NAO-" + cluster3.name="Blocking" + cluster1.name="Atl.Ridge" + } + if(p == 9){ # September + cluster4.name="NAO+" + cluster3.name="NAO-" + cluster1.name="Blocking" + cluster2.name="Atl.Ridge" + } + if(p == 10){ # October + cluster1.name="NAO+" + cluster4.name="NAO-" + cluster2.name="Blocking" + cluster3.name="Atl.Ridge" + } + if(p == 11){ # November + cluster2.name="NAO+" + cluster3.name="NAO-" + cluster1.name="Blocking" + cluster4.name="Atl.Ridge" + } + if(p == 12){ # December + cluster2.name="NAO+" + cluster4.name="NAO-" + cluster1.name="Blocking" + cluster3.name="Atl.Ridge" + } + if(p == 13){ # Winter + cluster3.name="NAO+" + cluster4.name="NAO-" + cluster1.name="Blocking" + cluster2.name="Atl.Ridge" + } + if(p == 14){ # Spring + cluster1.name="NAO+" + cluster3.name="NAO-" + cluster2.name="Blocking" + cluster4.name="Atl.Ridge" + } + if(p == 15){ # Summer + cluster2.name="NAO+" + cluster3.name="NAO-" + cluster4.name="Blocking" + cluster1.name="Atl.Ridge" + } + if(p == 16){ # Autumn + cluster4.name="NAO+" + cluster1.name="NAO-" + cluster2.name="Blocking" + cluster3.name="Atl.Ridge" + } + + cluster1.name.period[p] <- cluster1.name + cluster2.name.period[p] <- cluster2.name + cluster3.name.period[p] <- cluster3.name + cluster4.name.period[p] <- cluster4.name + + if(fields.name==rean.name) save(cluster1.name.period, cluster2.name.period, cluster3.name.period, cluster4.name.period, file=paste0(workdir,"/",fields.name,"_", var.name[var.num],"_",my.period[p],"_ClusterNames.RData")) + if(fields.name==forecast.name) save(cluster1.name.period, cluster2.name.period, cluster3.name.period, cluster4.name.period, file=paste0(workdir,"/",fields.name,"_", var.name[var.num],"_",my.period[p],"_leadmonth_",lead.month,"_ClusterNames.RData")) + + } else { # in this case, we load the cluster names from the file already saved: + if(fields.name==rean.name) load(paste0(workdir,"/",fields.name,"_", var.name[var.num],"_",my.period[p],"_ClusterNames.RData")) + if(fields.name==forecast.name) load(paste0(workdir,"/",fields.name,"_", var.name[var.num],"_",my.period[p],"_leadmonth_",lead.month,"_ClusterNames.RData")) + + cluster1.name <- cluster1.name.period[p] + cluster2.name <- cluster2.name.period[p] + cluster3.name <- cluster3.name.period[p] + cluster4.name <- cluster4.name.period[p] + } + + # assign to each cluster its regime: + if(ordering == FALSE){ + cluster1 <- 1; cluster2 <- 2; cluster3 <- 3; cluster4 <- 4 + } else { + cluster1 <- which(orden == cluster1.name) + cluster2 <- which(orden == cluster2.name) + cluster3 <- which(orden == cluster3.name) + cluster4 <- which(orden == cluster4.name) + } + + assign(paste0("map",cluster1), pslwr1mean) + assign(paste0("map",cluster2), pslwr2mean) + assign(paste0("map",cluster3), pslwr3mean) + assign(paste0("map",cluster4), pslwr4mean) + + assign(paste0("fre",cluster1), wr1y) + assign(paste0("fre",cluster2), wr2y) + assign(paste0("fre",cluster3), wr3y) + assign(paste0("fre",cluster4), wr4y) + + if(fields.name == rean.name){ + assign(paste0("imp",cluster1), varPeriodAnom1mean) + assign(paste0("imp",cluster2), varPeriodAnom2mean) + assign(paste0("imp",cluster3), varPeriodAnom3mean) + assign(paste0("imp",cluster4), varPeriodAnom4mean) + + assign(paste0("sig",cluster1), pvalue1) + assign(paste0("sig",cluster2), pvalue2) + assign(paste0("sig",cluster3), pvalue3) + assign(paste0("sig",cluster4), pvalue4) + } + + + # Visualize the composition with the 3 graphs for all the 4 regimes: its pressure fields, its impact on var and its frequency series: + if(!as.pdf && fields.name==rean.name) png(filename=paste0(workdir,"/",fields.name,"_",var.name[var.num],"_",my.period[p],".png"),width=3000,height=3700) + if(!as.pdf && fields.name==forecast.name) png(filename=paste0(workdir,"/",fields.name,"_",var.name[var.num],"_",my.period[p],"_leadmonth_",lead.month,".png"),width=3000,height=3700) + + plot.new() + + # Sheet title: + par(fig=c(0, 1, 0.94, 0.98), new=TRUE) + mtext(paste0("Weather Regimes for ",my.period[p]," season (",year.start,"-",year.end,"). Source: ",fields.name), cex=6, font=2) + + # Centroid maps: + map.xpos <- 0 + map.width <- 0.46 + par(fig=c(map.xpos + 0.003, map.xpos + map.width - 0.003, 0.745, 0.925), new=TRUE) + PlotEquiMap2(rescale(map1,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map1), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, xlabel.dist=1.5, contours.lty="F1FF1F") + par(fig=c(map.xpos, map.xpos + map.width, 0.51, 0.69), new=TRUE) + PlotEquiMap2(rescale(map2,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map2), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F") + par(fig=c(map.xpos, map.xpos + map.width, 0.275, 0.455), new=TRUE) + PlotEquiMap2(rescale(map3,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map3), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F") + par(fig=c(map.xpos, map.xpos + map.width, 0.04, 0.22), new=TRUE) + PlotEquiMap2(rescale(map4,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map4), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F") + + # Title Centroid Maps: + map.title.xpos <- 0.76 + map.title.width <- 0.2 + par(fig=c(map.title.xpos, map.title.xpos + map.title.width, 0.728, 0.729), new=TRUE) + mtext("year", cex=3) + par(fig=c(map.title.xpos, map.title.xpos + map.title.width, 0.494, 0.495), new=TRUE) + mtext("year", cex=3) + par(fig=c(map.title.xpos, map.title.xpos + map.title.width, 0.260, 0.261), new=TRUE) + mtext("year", cex=3) + par(fig=c(map.title.xpos, map.title.xpos + map.title.width, 0.026, 0.027), new=TRUE) + mtext("year", cex=3) + + # Impact maps: + if(fields.name == rean.name){ + impact.xpos <- 0.47 + impact.width <- 0.26 + par(fig=c(impact.xpos, impact.xpos + impact.width, 0.745, 0.925), new=TRUE) + PlotEquiMap2(rescale(imp1[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=t(sig1[,EU] < 0.05), cex.lab=1.5) + par(fig=c(impact.xpos, impact.xpos + impact.width, 0.51, 0.69), new=TRUE) + PlotEquiMap2(rescale(imp2[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=t(sig2[,EU] < 0.05), cex.lab=1.5) + par(fig=c(impact.xpos, impact.xpos + impact.width, 0.275, 0.455), new=TRUE) + PlotEquiMap2(rescale(imp3[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=t(sig3[,EU] < 0.05), cex.lab=1.5) + par(fig=c(impact.xpos, impact.xpos + impact.width, 0.04, 0.22), new=TRUE) + PlotEquiMap2(rescale(imp4[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=t(sig4[,EU] < 0.05), cex.lab=1.5) + } + + # Frequency plots: + barplot.xpos <- 0.75 + barplot.width <- 0.25 + freq.max=60 + + par(fig=c(barplot.xpos, barplot.xpos + barplot.width, 0.745, 0.915), new=TRUE) + barplot.freq(100*fre1, year.start, year.end, cex.y=2, cex.x=2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0)) + par(fig=c(barplot.xpos, barplot.xpos + barplot.width,0.510,0.680), new=TRUE) + barplot.freq(100*fre2, year.start, year.end, cex.y=2, cex.x=2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0)) + par(fig=c(barplot.xpos, barplot.xpos + barplot.width,0.275,0.445), new=TRUE) + barplot.freq(100*fre3, year.start, year.end, cex.y=2, cex.x=2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0)) + par(fig=c(barplot.xpos, barplot.xpos + barplot.width,0.040,0.210), new=TRUE) + barplot.freq(100*fre4, year.start, year.end, cex.y=2, cex.x=2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0)) + + # % on Frequency plots: + symbol.xpos <- 0.735 + symbol.width <- 0.01 + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.83, 0.84), new=TRUE) + mtext("%", cex=3.3) + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.60, 0.61), new=TRUE) + mtext("%", cex=3.3) + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.36, 0.37), new=TRUE) + mtext("%", cex=3.3) + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.13, 0.14), new=TRUE) + mtext("%", cex=3.3) + + # Title 1: + title1.xpos <- 0.02 + title1.width <- 0.4 + par(fig=c(title1.xpos, title1.xpos + title1.width, 0.930, 0.935), new=TRUE) + mtext(paste0(regime1.name,": ", psl.name, " Anomaly"), font=2, cex=4) + par(fig=c(title1.xpos, title1.xpos + title1.width, 0.695, 0.700), new=TRUE) + mtext(paste0(regime2.name,": ", psl.name, " Anomaly"), font=2, cex=4) + par(fig=c(title1.xpos, title1.xpos + title1.width, 0.460, 0.465), new=TRUE) + mtext(paste0(regime3.name,": ", psl.name, " Anomaly"), font=2, cex=4) + par(fig=c(title1.xpos, title1.xpos + title1.width, 0.225, 0.230), new=TRUE) + mtext(paste0(regime4.name,": ", psl.name, " Anomaly"), font=2, cex=4) + + # Title 2: + if(fields.name == rean.name){ + title2.xpos <- 0.42 + title2.width <- 0.35 + par(fig=c(title2.xpos, title2.xpos + title2.width, 0.930, 0.935), new=TRUE) + mtext(paste0(regime1.name, ": Impact on ", var.name.full[var.num]), font=2, cex=4) + par(fig=c(title2.xpos, title2.xpos + title2.width, 0.695, 0.700), new=TRUE) + mtext(paste0(regime2.name, ": Impact on ", var.name.full[var.num]), font=2, cex=4) + par(fig=c(title2.xpos, title2.xpos + title2.width, 0.460, 0.465), new=TRUE) + mtext(paste0(regime3.name, ": Impact on ", var.name.full[var.num]), font=2, cex=4) + par(fig=c(title2.xpos, title2.xpos + title2.width, 0.225, 0.230), new=TRUE) + mtext(paste0(regime4.name, ": Impact on ", var.name.full[var.num]), font=2, cex=4) + } + + # Title 3: + title3.xpos <- 0.75 + title3.width <-0.25 + par(fig=c(title3.xpos, title3.xpos + title3.width, 0.930, 0.935), new=TRUE) + mtext(paste0(regime1.name, ": Frequency (", round(100*mean(fre1),1), "%)"), font=2, cex=4) + par(fig=c(title3.xpos, title3.xpos + title3.width, 0.695, 0.700), new=TRUE) + mtext(paste0(regime2.name, ": Frequency (", round(100*mean(fre2),1), "%)"), font=2, cex=4) + par(fig=c(title3.xpos, title3.xpos + title3.width, 0.460, 0.465), new=TRUE) + mtext(paste0(regime3.name, ": Frequency (", round(100*mean(fre3),1), "%)"), font=2, cex=4) + par(fig=c(title3.xpos, title3.xpos + title3.width, 0.225, 0.230), new=TRUE) + mtext(paste0(regime4.name, ": Frequency (", round(100*mean(fre4),1), "%)"), font=2, cex=4) + + # Legend 1: + legend1.xpos <- 0.01 + legend1.width <- 0.44 + legend1.cex <- 1.8 + my.subset <- match(values.to.plot, my.brks) + par(fig=c(legend1.xpos, legend1.xpos + legend1.width, 0.719, 0.744), new=TRUE) # syntax fig=c(xmin, xmax, ymin, ymax) + ColorBar3(my.brks, cols=my.cols, vert=FALSE, cex=legend1.cex, subset=my.subset) + if(psl=="g500") {mtext(side=4," m", cex=2.5)} else {mtext(side=4," hPa", cex=legend1.cex)} + par(fig=c(legend1.xpos, legend1.xpos + legend1.width, 0.485, 0.508), new=TRUE) + ColorBar3(my.brks, cols=my.cols, vert=FALSE, cex=legend1.cex, subset=my.subset) + if(psl=="g500") {mtext(side=4," m", cex=2.5)} else {mtext(side=4," hPa", cex=legend1.cex)} + par(fig=c(legend1.xpos, legend1.xpos + legend1.width, 0.250, 0.273), new=TRUE) + ColorBar3(my.brks, cols=my.cols, vert=FALSE, cex=legend1.cex, subset=my.subset) + if(psl=="g500") {mtext(side=4," m", cex=2.5)} else {mtext(side=4," hPa", cex=legend1.cex)} + par(fig=c(legend1.xpos, legend1.xpos + legend1.width, 0.015, 0.038), new=TRUE) + ColorBar3(my.brks, cols=my.cols, vert=FALSE, cex=legend1.cex, subset=my.subset) + if(psl=="g500") {mtext(side=4," m", cex=2.5)} else {mtext(side=4," hPa", cex=legend1.cex)} + + # Legend 2: + if(fields.name == rean.name){ + legend2.xpos <- 0.48 + legend2.width <- 0.25 + legend2.cex <- 1.8 + my.subset2 <- match(values.to.plot2, my.brks.var) + par(fig=c(legend2.xpos, legend2.xpos + legend2.width, 0.719 + 0.001, 0.744), new=TRUE) + ColorBar3(my.brks.var, cols=my.cols.var, vert=FALSE, cex=legend2.cex, subset=my.subset2) + mtext(side=4,paste0(" ",var.unit[var.num]), cex=legend2.cex) + par(fig=c(legend2.xpos, legend2.xpos + legend2.width, 0.485, 0.508), new=TRUE) + ColorBar3(my.brks.var, cols=my.cols.var, vert=FALSE, cex=legend2.cex, subset=my.subset2) + mtext(side=4,paste0(" ",var.unit[var.num]), cex=legend2.cex) + par(fig=c(legend2.xpos, legend2.xpos + legend2.width, 0.250, 0.273), new=TRUE) + ColorBar3(my.brks.var, cols=my.cols.var, vert=FALSE, cex=legend2.cex, subset=my.subset2) + mtext(side=4,paste0(" ",var.unit[var.num]), cex=legend2.cex) + par(fig=c(legend2.xpos, legend2.xpos + legend2.width, 0.015, 0.038), new=TRUE) + ColorBar3(my.brks.var, cols=my.cols.var, vert=FALSE, cex=legend2.cex, subset=my.subset2) + mtext(side=4,paste0(" ",var.unit[var.num]), cex=legend2.cex) + } + + if(!as.pdf) dev.off() # for saving 4 png + +} # close for loop on 'p' + +if(as.pdf) dev.off() # for saving a single pdf with all seasons + + diff --git a/old/weather_regimes_v32.R b/old/weather_regimes_v32.R new file mode 100644 index 0000000000000000000000000000000000000000..40daba7dbce12c240d83fd85edd774e75dc8fae0 --- /dev/null +++ b/old/weather_regimes_v32.R @@ -0,0 +1,776 @@ + +# Creation: 28/6/2016 +# Author: Nicola Cortesi +# Aim: To compute the four Euro-Atlantic weather regimes from a reanalysis or from a forecast system +# I/O: input must be in a format compatible with Load(); 1 output .RData file is created in the 'workdir' folder. +# Assumption: its output is need by the script weather_regimes_maps.R +# Branch: weather_regimes + +library(s2dverification) # for the function Load() +library(abind) +library(Kendall) +library(reshape2) +library(TTR) +source('/home/Earth/ncortesi/Downloads/scripts/Rfunctions.R') # for the calendar functions +workdir <- "/scratch/Earth/ncortesi/RESILIENCE/Regimes" # working dir with the input files with the WT classifications for each MSLP grid point + +# available reanalysis for the geopotential (g500) or the geopotential height (z500 = g500/9.81) and for var data: +ERAint <- '/esnas/reconstructions/ecmwf/eraint/$STORE_FREQ$_mean/$VAR_NAME$_f6h/$VAR_NAME$_$YEAR$$MONTH$.nc' +ERAint.name <- "ERA-Interim" + +JRA55 <- '/esnas/reconstructions/jma/jra-55/$STORE_FREQ$_mean/$VAR_NAME$_f6h/$VAR_NAME$_$YEAR$$MONTH$.nc' +JRA55.name <- "JRA-55" + +rean <- ERAint #JRA55 #ERAint # choose one of the two above reanalysis from where to load the input psl data + +# available forecast systems for the psl and var data: +#ECMWF_S4 <- list(path = paste0('/esnas/exp/ECMWF/seasonal/0001/s004/m001/$STORE_FREQ$_mean/$VAR_NAME$_f6h/$VAR_NAME$_$START_DATE$.nc')) +#ECMWF_S4 <- '/esnas/exp/ECMWF/seasonal/0001/s004/m001/$STORE_FREQ$_mean/psl_f6h/psl_$START_DATE$.nc' +ECMWF_S4 <- '/esnas/exp/ecmwf/system4_m1/$STORE_FREQ$_mean/$VAR_NAME$_f6h/$VAR_NAME$_$START_DATE$.nc' +ECMWF_S4.name <- "ECMWF-S4" +member.name <- "ensemble" # name of the dimension with the ensemble members inside the netCDF files of S4 + +ECMWF_monthly <- '/esnas/exp/ECMWF/monthly/ensforhc/6hourly/$VAR_NAME$/2014$MONTH$$DAY$00/$VAR_NAME$_$START_DATE$00.nc' +ECMWF_monthly.name <- "ECMWF-Monthly" + +forecast <- ECMWF_S4 + +fields <- rean #forecast # put 'rean' to load pressure fields and var from reanalisis, put 'forecast' to load them from forecast systems + +psl <- "psl" #"g500" # pressure variable to use from the chosen reanalysis +psl.name <- "SLP" # "Z500" # the name to show in the maps + +year.start <- 1982 #1981 #1994 #1979 #1981 +year.end <- 2013 #2013 #2010 + +res <- 0.75 # set the resolution you want to interpolate the psl and var data when loading it (i.e: set to 0.75 to use the same res of ERA-Interim) + # interpolation method is BILINEAR. +PCA <- FALSE # set it to TRUE if you want to apply the PCA before the k-means cluster analysis, FALSE otherwise +variance.explained <- 0.8 # the user can set here the minimum % of variance explained by the PCs (typically 0.8 which is equal to 80%) + +lat.weighting=FALSE # set it to true to weight psl data on latitude before applying the cluster analysis +sequences=FALSE # set it to TRUE if you want to select only days beloning to sequences of at least 5 consecutive days belonging to the same regime. + +# Coordinates of the box enclosing the study region (the Northern Atlantic in this case): +lat.max <- 81 #81 #80 #70 +lat.min <- 27 #30 #20 #30 +lon.max <- 45 #36 #30 #40 +lon.min <- 274.5 #274.5 #270 #280 # put a positive number here because the geopotential has only positive values of longitud! + +var.num <- 1 # Choose a variable. 1: sfcWind 2: tas + +var.name <- c("sfcWind","tas") # name of the 'predictand' variable of the chosen reanalysis +var.name.full <- c("Wind Speed","Temperature") # full name of the 'predictand' variable to put in the title of the graphs +var.unit <- c("m/s", "ºC") # unit of measure of var (for drawing color scales) + +# Only for Reanalysis:: +WR.period <- 1:12 #13:16 # 1-12: Jan-Dec; 13-16: Winter-Spring-Summer-Autumn [bypassed by the optional argument of the script] + +# only for Seasonal forecasts: +n.members <- 15 # number of members of the forecast system to load +n.leadtimes <- 216 # number of leadtimes of the forecast system to load +startM <- 1 # start month (1=January, 12=December) [bypassed by the optional arguments of the script] +leadM <- 1 # select only the data of one lead month: [bypassed by the optional arguments of the script] + +# Only for Monthly forecasts: +#leadtime <- 1:7 #5:11 #1 # if fields=forecast, set the forecast time (in days) you want to compute the weather regimes. +#startdate.shift <- 1 # if leadtime=5:11, it's better to shift all startdates of the season 1 week in the past, to fit the exact season of obs.data + # if leadtime=12:18, it's better to shift all startdates of the season 2 weeks in the past, and so on. +#forecast.year <- year.end+1 # if fields=forecast, set the forecast year (it is usually the year after year.end) +#mes=1 # if fields=forecast, set the month of the first startdate of the forecast year +#day=2 # if fields=forecast, set the day of the first startdate of the forecast year + +############################################################################################################################# +#ECMWF_monthly <- list(path = paste0('/esnas/exp/ECMWF/monthly/ensforhc/6hourly/psl/2014010200/1994_010200.nc')) +#ECMWF_monthly <- list(path = paste0('/esnas/exp/ECMWF/monthly/ensforhc/6hourly/$VAR_NAME$/2014$MONTH$$DAY$00/$VAR_NAME$_$START_DATE$00.nc')) +rean.name <- unname(ifelse(rean == ERAint, ERAint.name, JRA55.name)) +forecast.name <- unname(ifelse(forecast == ECMWF_S4, ECMWF_S4.name, ECMWF_monthly.name)) +fields.name <- unname(ifelse(fields == rean, rean.name, forecast.name)) +if(fields.name == forecast.name) WR.period = 1 +n.years <- year.end - year.start + 1 + +# in case the script is run with two arguments, they are assigned to the two below variables: +script.arg <- as.integer(commandArgs(TRUE)) + +if(length(script.arg) == 2){ + start.month <- script.arg[1] + lead.month <- script.arg[2] + WR.period <- start.month +} else { + start.month <- startM + lead.month <- leadM + WR.period <- start.month +} + +# in case the script is run with 1 argument, it is assumed you are using Reanalysis: +if(length(script.arg) == 1) WR.period <- script.arg[1] + +var.data <- fields # dataset from which to load the input var data (reanalysis or forecasts) +var.data.name <- fields.name #ECMWF_monthly.name # ERAint.name + +if(lat.min >= lat.max) stop("lat.min cannot be greater than lat.max") + +days.period <- n.days.period <- period.length <- list() +for (pp in 1:17){ + days.period[[pp]] <- NA + for(y in year.start:year.end) days.period[[pp]] <- c(days.period[[pp]], n.days.in.a.future.year(year.start, y) + pos.period(y,pp)) + days.period[[pp]] <- days.period[[pp]][-1] # remove the NA at the begin that was introduced only to be able to execute the above command + # number of days belonging to that period from year.start to year.end: + n.days.period[[pp]] <- length(days.period[[pp]]) + # Number of days belonging to that period in a single year: + period.length[[pp]] <- n.days.in.a.period(pp,1999) #+ ifelse(period==13 | period==2, 1, 0) # using year 1999 introduce a small error in winter season length because of bisestile years +} + +# Create a regular lat/lon grid to interpolate the data to the chosen res: (Notice that the regular grid is always centered at lat=0 and lon=0!) +n.lon.grid <- 360/res +n.lat.grid <- (180/res)+1 +my.grid <- paste0('r',n.lon.grid,'x',n.lat.grid) +#domain<-list() +#domain$lon <- seq(0,359.999999999,res) +#domain$lat <- c(rev(seq(0,90,res)),seq(res[1],-90,-res)) + +# load only 1 day of pressure data to detect the minimum and maximum lat and lon values which identify only the North Atlantic area: +if(fields.name == rean.name) domain <- Load(var = psl, exp = NULL, obs = list(list(path=fields)), '19990101', storefreq = 'daily', leadtimemax = 1, output = 'lonlat', nprocs=1) +# in the case of S4, we also interpolate it (notice that if the S4 grid has the same grid of the chosen grid (typically ERA-Interim), Load() leaves the S4 grid unchanged): +if(fields.name == ECMWF_S4.name) domain <- Load(var = "psl", exp = list(list(path=fields)), sdates=paste0(year.start,'0101'), storefreq = 'daily', dimnames=list(member=member.name), leadtimemax=1, nmember=1, output = 'lonlat', grid=my.grid, method='bilinear', nprocs=1) + +#if(fields.name == ECMWF_monthly.name) domain <- Load(var = psl, exp = NULL, obs = list(list(path=fields)), paste0(year.start,'0102'), storefreq = 'daily', leadtimemax = 1, output = 'lonlat', nprocs=1) + +n.lon <- length(domain$lon) +n.lat <- length(domain$lat) + +pos.lat.max <- which(lat.max - round(domain$lat,4) >= 0)[1] # select the first latitude of the domain below the maximum latitude +pos.lat.min <- tail(which(round(domain$lat,4) - lat.min >= 0),1) # select the last latitude of the domain over the minumum latitude +pos.lon.max <- tail(which(lon.max - round(domain$lon,4) >= 0),1) +pos.lon.min <- which(round(domain$lon,4) - lon.min >= 0)[1] + +pos.lat <- if(pos.lat.min < pos.lat.max) {pos.lat.min:pos.lat.max} else {pos.lat.max:pos.lat.min} +pos.lon <- c(1:pos.lon.max,pos.lon.min:length(domain$lon)) # beware that it only consider the case where the study area cross the meridian of Greenwhich! +n.pos.lat <- length(pos.lat) +n.pos.lon <- length(pos.lon) + +lat <- domain$lat[pos.lat] # lat values of chosen area only (it is smaller than the whole spatial domain loaded by the data) +lon <- domain$lon[pos.lon] # lon values of chosen area only + +#lat.min.area <- domain$lat[pos.lat.min] # min and max lat/lon of the chosen area only (same as lat.max, but its values correspond to actual values in the lat vector) +#lat.max.area <- domain$lat[pos.lat.max] +#lon.min.area <- domain$lon[pos.lon.min] +#lon.max.area <- domain$lon[pos.lon.max] + +#rm(domain) + +# Load psl data: +if(fields.name == rean.name){ # Load daily psl data in the reanalysis case: + psleuFull <-array(NA,c(n.days.in.a.yearly.period(year.start,year.end), n.pos.lat, n.pos.lon)) + + for (y in year.start:year.end){ + var <- Load(var = psl, exp = NULL, obs = list(list(path=fields)), paste0(y,'0101'), storefreq = 'daily', leadtimemax = n.days.in.a.year(y), output = 'lonlat', + latmin = lat.min, latmax = lat.max, lonmin = lon.min, lonmax = lon.max) + psleuFull[seq.days.in.a.future.year(year.start, y),,] <- var$obs + rm(var) + gc() + } +} + +if(fields.name == ECMWF_S4.name){ # in this case, we can load only the data for 1 month at time (since each month needs ~3 GB of memory) + if(start.month <= 9) {start.month.char <- paste0("0",as.character(start.month))} else {start.month.char <- start.month} + + psleuFull <- Load(var = psl, exp = list(list(path=fields)), obs = NULL, sdates=paste0(year.start:year.end, start.month.char,'01'), dimnames=list(member=member.name), nmember=n.members, leadtimemax=n.leadtimes, storefreq='daily', output = 'lonlat', latmin = lat.min, latmax = lat.max, lonmin = lon.min, lonmax = lon.max, nprocs=1)$mod #, grid=my.grid, method='bilinear') + + #Load(var = "tas", exp = list(list(path="/esnas/exp/ecmwf/system4_m1/$STORE_FREQ$_mean/$VAR_NAME$_f6h/$VAR_NAME$_$START_DATE$.nc")), obs = NULL, sdates='19820101', dimnames=list(member="ensemble"), nmember=1, leadtimemax=1, storefreq='daily', output = 'lonlat', grid="r480x241", lonmin=20) + +} + +if(fields.name == ECMWF_monthly.name){ + # Load 6-hourly psl data in the forecast case: + psleuFull <-array(NA, c(n.sdates*n.years, n.leadtimes, n.pos.lat, n.pos.lon)) # daily order: 19940102 19950102 ... 20130102 19940109 19950109 ... 20130109 ... + n.leadtimes <- length(leadtime) + sdates <- weekly.seq(forecast.year,mes,day) + n.sdates <- length(sdates) + + for (startdate in 1:n.sdates){ + for(lday in leadtime){ + var <- Load(var = psl, exp = NULL, obs = list(list(path=fields)), paste0(year.start:year.end, substr(sdates[startdate],5,6), substr(sdates[startdate],7,8) ), storefreq = 'daily', leadtimemin = lday*4-3, leadtimemax = lday*4, output = 'lonlat', latmin = lat.min, latmax = lat.max, lonmin = lon.min, lonmax = lon.max) + + mean.lday <- apply(var$obs, c(3,5,6), mean, na.rm=T) + rm(var) + gc() + + psleuFull[(1+(startdate-1)*n.years):(startdate*n.years), lday-leadtime[1]+1,,] <- mean.lday + rm(mean.lday) + gc() + } + } +} + +if(psl=="g500") psleuFull <- psleuFull/9.81 # convert geopotential to geopotential height (in m) +if(psl=="psl") psleuFull <- psleuFull/100 # convert MSLP in Pascal to MSLP in hPa +gc() + +#save.image(file=paste0(workdir,"/weather_regimes_",fields.name,"_",year.start,"-",year.end,".RData")) +#load(file=paste0(workdir,"/weather_regimes_",fields.name,"_",year.start,"-",year.end,".RData")) + +# compute the cluster analysis: +my.PCA <- list() +my.cluster <- list() +my.cluster.array <- list() +tot.variance <- list() +n.pcs <- my.cluster <- c() + +#WR.period=1 # for the debug +for(p in WR.period){ + # Select only the data of the month/season we want: + if(fields.name == rean.name){ + pslPeriod <- psleuFull[days.period[[p]],,] # select only days in the chosen period (i.e: winter) + + # weight the pressure fields based on latitude (apply it to anomalies, not to absolute values!): + if(lat.weighting){ + lat.weighted <- cos(lat*pi/180)^0.5 + lat.weighted.array <- InsertDim(InsertDim(lat.weighted,2,n.pos.lon), 1, n.days.period[[p]]) + psl.kmeans <- pslPeriod * lat.weighted.array + rm(lat.weighted.array) + gc() + } + + # select period data from psleuFull to use it as input of the cluster analysis: + psl.kmeans <- pslPeriod + dim(psl.kmeans) <- c(n.days.period[[p]], n.pos.lat*n.pos.lon) # convert array in a matrix! + rm(pslPeriod, pslPeriod.weighted) + } + + # in this case of S4 we don't need to select anything because we already loaded only 1 month of data! + if(fields.name == ECMWF_S4.name){ + + ## climatology plots: + + ## load eraint data: + ## ERAint <- '/esnas/reconstructions/ecmwf/eraint/$STORE_FREQ$_mean/$VAR_NAME$_f6h/$VAR_NAME$_$YEAR$$MONTH$.nc' + ## my.years <- year.start:year.end + ## var <- Load(var = "sfcWind", exp = NULL, obs = list(list(path=ERAint)), paste0(my.years,'0101'), storefreq = 'daily', leadtimemax = 216, output = 'lonlat',latmin = lat.min, latmax = lat.max, lonmin = lon.min, lonmax = lon.max) # -273.16 for tas # or /100 for psl + + ## # check S4 lon and lat: + ## #varS4 <- Load(var = psl, exp = list(list(path=fields)), obs = NULL, sdates=paste0(year.start, start.month.char,'01'), dimnames=list(member=member.name), nmember=n.members, leadtimemax=1, storefreq='daily', output = 'lonlat', latmin = lat.min, latmax = lat.max, lonmin = lon.min, lonmax = lon.max) #, grid=my.grid, method='bilinear') + + ## # draw climatologies: + ## drift <- apply(psleuFull[,,,,1,1,drop=F], c(1,2,4), mean, na.rm=T) + ## #matplot(t(drift[1,,]),type="l", col=1:15, lty=2, pch=19, cex=.5) + ## #matplot(t(drift[1,,1:31]),type="b", col=1:15, lty=1, pch=19, cex=.5) # zoom over the first leadtime + + ## matplot(t(drift[1,,]),type="l", col="gray60", lty=3, pch=19, cex=.1) #, ylim=c(-25,5)) + + ## true.climate <- apply(psleuFull[,,,,1,1,drop=F], c(1,4), mean, na.rm=T) + ## lines(true.climate[1,],type="l", col="red", lty=1, pch=19, lwd=2) + + ## true.climate.5d <- stats::filter(true.climate[1,], rep(1/5,5), sides=2) + ## lines(true.climate.5d,type="l", col="orange", lty=1, pch=19, lwd=2) + + ## s4.data <- data.frame(s4=true.climate[1,], day=1:216) + ## s4.loess <- loess(s4 ~ day, s4.data, span=0.50) + ## s4.pred <- predict(s4.loess) + ## lines(s4.pred,type="l", col="purple", lty=1, pch=19, lwd=2) + + ## col.monthly <- "brown" + ## true.climate1 <- apply(psleuFull[,,,1:31,1,1,drop=F], 1, mean, na.rm=T) + ## lines(rep(true.climate1,31),type="l", col=col.monthly, lty=1, pch=19, lwd=2) + ## true.climate2 <- apply(psleuFull[,,,32:59,1,1,drop=F], 1, mean, na.rm=T) + ## lines(c(rep(NA,31),rep(true.climate2,28)),type="l", col=col.monthly, lty=1, pch=19, lwd=2) + ## true.climate3 <- apply(psleuFull[,,,60:90,1,1,drop=F], 1, mean, na.rm=T) + ## lines(c(rep(NA,59),rep(true.climate3,31)),type="l", col=col.monthly, lty=1, pch=19, lwd=2) + ## true.climate4 <- apply(psleuFull[,,,91:120,1,1,drop=F], 1, mean, na.rm=T) + ## lines(c(rep(NA,90),rep(true.climate4,30)),type="l", col=col.monthly, lty=1, pch=19, lwd=2) + ## true.climate5 <- apply(psleuFull[,,,121:151,1,1,drop=F], 1, mean, na.rm=T) + ## lines(c(rep(NA,120),rep(true.climate5,31)),type="l", col=col.monthly, lty=1, pch=19, lwd=2) + ## true.climate6 <- apply(psleuFull[,,,151:180,1,1,drop=F], 1, mean, na.rm=T) + ## lines(c(rep(NA,150),rep(true.climate6,30)),type="l", col=col.monthly, lty=1, pch=19, lwd=2) + ## true.climate7 <- apply(psleuFull[,,,181:211,1,1,drop=F], 1, mean, na.rm=T) + ## lines(c(rep(NA,180),rep(true.climate7,31)),type="l", col=col.monthly, lty=1, pch=19, lwd=2) + + + ## era <- apply(var$obs[,,,,1,1,drop=F], c(1,2,4), mean, na.rm=T) + ## plot(era[1,1,1:216],type="l", col="black", lty=1, pch=19, lwd=2) + + ## era.5d <- stats::filter(era[1,1,1:216], rep(1/5,5), sides=2) + ## lines(era.5d,type="l", col="darkgreen", lty=1, pch=19, lwd=2) + + ## era.data <- data.frame(era=era[1,1,1:216], day=1:216) + ## era.loess <- loess(era ~ day, era.data, span=0.50) + ## era.pred <- predict(era.loess) + ## lines(era.pred,type="l", col="blue", lty=1, pch=19, lwd=2) + + ## era.data2 <- data.frame(era=era[1,1,1:216], day=1:216) + ## era.loess2 <- loess(era ~ day, era.data2, span=0.3) + ## era.pred2 <- predict(era.loess2) + ## lines(era.pred2,type="l", col="purple", lty=1, pch=19, lwd=2) + + ## era.data3 <- data.frame(era=era[1,1,1:216], day=1:216) + ## era.loess3 <- loess(era ~ day, era.data3, span=0.35) + ## era.pred3 <- predict(era.loess3) + + ## col.monthly.erai <- "turquoise3" + ## era1 <- apply(var$obs[,,,1:31,1,1,drop=F], c(1,2), mean, na.rm=T) + ## lines(rep(era1,31),type="l", col=col.monthly.erai, lty=1, pch=19, lwd=2) + ## era2 <- apply(var$obs[,,,32:59,1,1,drop=F], c(1,2), mean, na.rm=T) + ## lines(c(rep(NA,31),rep(era2,28)),type="l", col=col.monthly.erai, lty=1, pch=19, lwd=2) + ## era3 <- apply(var$obs[,,,60:90,1,1,drop=F], c(1,2), mean, na.rm=T) + ## lines(c(rep(NA,59),rep(era3,31)),type="l", col=col.monthly.erai, lty=1, pch=19, lwd=2) + ## era4 <- apply(var$obs[,,,91:120,1,1,drop=F], c(1,2), mean, na.rm=T) + ## lines(c(rep(NA,90),rep(era4,30)),type="l", col=col.monthly.erai, lty=1, pch=19, lwd=2) + ## era5 <- apply(var$obs[,,,121:151,1,1,drop=F], c(1,2), mean, na.rm=T) + ## lines(c(rep(NA,120),rep(era5,31)),type="l", col=col.monthly.erai, lty=1, pch=19, lwd=2) + ## era6 <- apply(var$obs[,,,151:180,1,1,drop=F], c(1,2), mean, na.rm=T) + ## lines(c(rep(NA,150),rep(era6,30)),type="l", col=col.monthly.erai, lty=1, pch=19, lwd=2) + ## era7 <- apply(var$obs[,,,181:211,1,1,drop=F], c(1,2), mean, na.rm=T) + ## lines(c(rep(NA,180),rep(era7,30)),type="l", col=col.monthly.erai, lty=1, pch=19, lwd=2) + + + ## # anomalias: + ## point.type <- "l" + ## true.climate.anom <- psleuFull[1,1,pos.last.year,,1,1] - true.climate[1,] + ## plot(true.climate.anom,type=point.type, col="red", lty=1, pch=19, lwd=2, cex=.3) + ## lines(rep(0,216)) + + ## true.climate.5d.anom <- psleuFull[1,1,pos.last.year,,1,1] - true.climate.5d + ## lines(true.climate.5d.anom,type=point.type, col="orange", lty=1, pch=19, lwd=2, cex=.3) + + ## s4.pred.anom <- psleuFull[1,1,pos.last.year,,1,1] - s4.pred + ## lines(s4.pred.anom,type=point.type, col="purple", lty=1, pch=19, lwd=2, cex=.3) + + ## s4.monthly.clim <- c(rep(true.climate1,31),rep(true.climate2,28),rep(true.climate3,31),rep(true.climate4,30),rep(true.climate5,31),rep(true.climate6,30),rep(true.climate7,31), rep(NA,4)) + ## s4.monthly.anom <- psleuFull[1,1,pos.last.year,,1,1] - s4.monthly.clim + ## lines(s4.monthly.anom,type=point.type, col="brown", lty=1, pch=19, lwd=2, cex=.3) + + + ## pos.last.year <- dim(var$obs)[3] + ## era.anom <- var$obs[1,1,pos.last.year,1:216,1,1] - era[1,1,1:216] + ## plot(era.anom,type=point.type, col="black", lty=1, pch=19, lwd=2) + ## lines(rep(0,216)) + + ## era.anom.5d <- var$obs[1,1,pos.last.year,1:216,1,1] - era.5d[1:216] + ## lines(era.anom.5d,type=point.type, col="darkgreen", lty=1, pch=19, lwd=2) + + ## era.anom.pred <- var$obs[1,1,pos.last.year,1:216,1,1] - era.pred[1:216] + ## lines(era.anom.pred,type=point.type, col="blue", lty=1, pch=19, lwd=2) + + ## era.anom.pred2 <- var$obs[1,1,pos.last.year,1:216,1,1] - era.pred2[1:216] + ## era.anom.pred3 <- var$obs[1,1,pos.last.year,1:216,1,1] - era.pred3[1:216] + + ## era.monthly.clim <- c(rep(era1,31),rep(era2,28),rep(era3,31),rep(era4,30),rep(era5,31),rep(era6,30),rep(era7,31), rep(NA,4)) + ## era.anom.monthly <- var$obs[1,1,pos.last.year,1:216,1,1] - era.monthly.clim + ## lines(era.anom.monthly,type=point.type, col="turquoise3", lty=1, pch=19, lwd=2) + + ## # Nube plot: + ## plot(era.anom.pred,type="l", col="blue", lty=1, pch=19, lwd=2) + ## grid(nx=10,lwd=2) + ## lines(era.anom.pred2,type="l", col="purple", lty=1, pch=19, lwd=2) + ## lines(era.anom.pred3,type="l", col="turquoise3", lty=1, pch=19, lwd=2) + ## lines(era.anom.5d,type="l", col="darkgreen", lty=1, pch=19, lwd=2) + + ## lines(era.5d,type="l", col="darkgreen", lty=1, pch=19, lwd=2) + ## lines(era.pred,type="l", col="blue", lty=1, pch=19, lwd=2) + ## lines(rep(0,216)) + + ## lines(era.pred2,type="l", col="purple", lty=1, pch=19, lwd=2) + ## lines(era.pred3,type="l", col="", lty=1, pch=19, lwd=2) + + + ## # fit alfa parameter for each grid point: + ## i=1 + ## j=1 + ## era <- apply(var$obs[,,,,i,j,drop=F], c(1,2,4), mean, na.rm=T) + ## era.data <- data.frame(era=era[1,1,1:216], day=1:216) + ## k=0; error <- c() + ## for (alfa in seq(0.25,0.50,0.01)){ + ## k=k+1 + ## era.loess <- loess(era ~ day, era.data, span=alfa) + ## era.pred <- predict(era.loess) + ## error[k] <- mean(abs(era.pred - era[1,1,1:216])) + ## } + + # convert psl in daily anomalies with the LOESS filter: + pslPeriodClim <- apply(psleuFull, c(1,4,5,6), mean, na.rm=T) + pslPeriodClimLoess <- pslPeriodClim + for(i in n.pos.lat){ + for(j in n.pos.lon){ + my.data <- data.frame(ens.mean=pslPeriodClim[1,,i,j], day=1:n.leadtimes) + my.loess <- loess(ens.mean ~ day, my.data, span=0.35) + pslPeriodClimLoess[1,,i,j] <- predict(my.loess) + } + } + + pslPeriodClim2 <- InsertDim(InsertDim(pslPeriodClimLoess, 2, n.years), 2, n.members) + + ## s4.data <- data.frame(s4=pslPeriodClim[1,], day=1:216) + ## s4.loess <- loess(s4 ~ day, s4.data, span=0.35) + ## s4.pred <- predict(s4.loess) + rm(pslPeriodClim) + gc() + + psleuFull <- psleuFull - pslPeriodClim2 + rm(pslPeriodClim2) + gc() + + ## # convert psl in daily anomalies computed with the 10-days leadtime window including the 9 days AFTER each day: + ## n.leadtimes <- dim(psleuFull)[4] + ## pslPeriodClim10 <- array(NA, c(1, n.members, n.years, n.leadtimes-9,n.pos.lat,n.pos.lon)) + ## for(year in 1:n.years){ + ## for(lead in 1:(n.leadtimes-9)){ + ## pslPeriodClim10[1,,year,lead,,] <- (psleuFull[1,,year,lead,,] + psleuFull[1,,year,lead+1,,] + psleuFull[1,,year,lead+2,,] + psleuFull[1,,year,lead+3,,] + psleuFull[1,,year,lead+4,,] + psleuFull[1,,year,lead+5,,] + psleuFull[1,,year,lead+6,,] + psleuFull[1,,year,lead+7,,] + psleuFull[1,,year,lead+8,,] + psleuFull[1,,year,lead+9,,])/10 + ## } + ## } + + ## pslPeriodClim10mean <- apply(pslPeriodClim10, c(1,4,5,6), mean, na.rm=T) + ## pslPeriodClim10mean2 <- InsertDim(InsertDim(pslPeriodClim10mean,2,n.years),2,n.members) + + ## psleuFull10 <- psleuFull[1,,,1:(n.leadtimes-9),,,drop=F] - pslPeriodClim10mean2 + + ## psleuFull <- psleuFull10 + ## rm(pslPeriodClim2, psleuFull10, pslPeriodClim10, pslPeriodClim10mean2, psleuFull10mean, pslPeriodClim10mean) + ## gc() + + # select data only for the startmonth and leadmonth to study: + chosen.month <- start.month + lead.month # find which month we want to select data + if(chosen.month > 12) chosen.month <- chosen.month - 12 + + for(y in year.start:year.end){ + leadtime.min <- 1 + n.days.in.a.monthly.period(start.month, chosen.month, y) - n.days.in.a.month(chosen.month, y) + leadtime.max <- leadtime.min + n.days.in.a.month(chosen.month, y) - 1 + if (n.days.in.a.month(chosen.month, y) == 29) leadtime.max <- leadtime.max - 1 # remove the 29th of February to have the same n. of elements for all years + int.leadtimes <- leadtime.min:leadtime.max + n.leadtimes <- leadtime.max - leadtime.min + 1 + + if(y == year.start) { + pslPeriod <- psleuFull[,,1,int.leadtimes,,,drop=FALSE] + } else { + pslPeriod <- unname(abind(pslPeriod, psleuFull[,,y-year.start+1,int.leadtimes,,,drop=FALSE], along=3)) + } + } + + + ## # if you didn't convert psl data in anomalies until now, you can convert psl data in monthly anomalies: + ## pslPeriodClim <- apply(pslPeriod, c(1,5,6), mean, na.rm=T) + ## pslPeriodClim2 <- InsertDim(InsertDim(InsertDim(pslPeriodClim,2,n.leadtimes), 2, n.years), 2, n.members) + + ## pslPeriod <- pslPeriod - pslPeriodClim2 + ## rm(pslPeriodClim, pslPeriodClim2) + ## gc() + + # note that the data order in psl.kmeans[,X] is: year1day1, year1day2, ... , year1day31, year2day1, year2day2, ... , year2day28 + psl.melted <- melt(pslPeriod[1,,,,,, drop=FALSE], varnames=c("Exp","Member","StartYear","LeadDay","Lat","Lon")) + psl.kmeans <- unname(acast(psl.melted, Member + StartYear + LeadDay ~ Lat + Lon)) + + #rm(pslPeriod) + gc() + } + + if(fields.name == ECMWF_monthly.name){ + my.startdates.period <- months.period(forecast.year,mes,day,p) # get which startdates belong to the chosen period + + days.period <- list() # get which days inside psleuFull belong to the chosen period + for(startdate in my.startdates.period){ + days.period[[p]] <- c(days.period[[p]], (1+(startdate-1)*n.years):(startdate*n.years)) + } + + # in winter you must remove the 2nd startdate (20140109) because its data is bad, as you can see with: plot(psl.kmeans[,1:2]) + # if(fields.name == forecast.name && period == 13) psl.kmeans[21:40,] <- NA + } + + + # compute the PCs or not: + if(PCA){ + my.seq <- seq(1, n.pos.lat*n.pos.lon, 9) # select only 1 point of 9 + pslcut <- psl.kmeans[,my.seq] + + my.PCA[[p]] <- princomp(pslcut,cor=FALSE) + tot.variance[[p]] <- head(cumsum(my.PCA[[p]]$sdev^2/sum(my.PCA[[p]]$sdev^2)),50) # check how many PCAs to keep basing on the sum of their expl. variance + n.pcs[p] <- head(as.numeric(which(tot.variance[[p]] > variance.explained)),1) # select only the pcs that explains at least 80% of variance + my.cluster[[p]] <- kmeans(my.PCA[[p]]$scores[,1:n.pcs[p]], centers=4, iter.max=100, nstart=30) # 4 is the number of clusters + rm(pslcut) + } else { # 4 is the number of clusters: + if(fields.name == rean.name) my.cluster[[p]] <- kmeans(psl.kmeans[which(!is.na(psl.kmeans[,1])),], centers=4, iter.max=100, nstart=30) + if(fields.name == ECMWF_S4.name) { + my.cluster[[p]] <- kmeans(psl.kmeans, centers=4, iter.max=100, nstart=30, trace=TRUE) + my.cluster.array[[p]] <- array(my.cluster[[p]]$cluster, c(n.leadtimes, n.years, n.members)) # in reverse order compared to the definition of psl.kmeans + + #plot(SMA(my.cluster[[p]]$centers[1,],201),type="l",col="gold",ylim=c(-20,20));lines(SMA(my.cluster[[p]]$centers[2,],201),type="l",col="red"); lines(SMA(my.cluster[[p]]$centers[3,],201),type="l",col="green");lines(SMA(my.cluster[[p]]$centers[4,],201),type="l",col="blue");lines(SMA(psl.kmeans[29,],201),type="l",col="gray");lines(rep(0,14410),type="l",col="black",lty=2) + } + } + + # save the time series output of the cluster analysis: + save(my.cluster, my.cluster.array, my.PCA, tot.variance, n.pcs, file=paste0(workdir,"/",fields.name,"_",var.name[var.num],"_",my.period[p],"_ClusterData.RData")) + rm(psl.kmeans) + gc() + + +#save.image(file=paste0(workdir,"/weather_regimes_",fields.name,"_",year.start,"-",year.end,".RData")) +#load(file=paste0(workdir,"/weather_regimes_",fields.name,"_",year.start,"-",year.end,".RData")) + +# Load var data: +if(fields.name == rean.name){ # Load daily var data in the reanalysis case: + vareuFull <-array(NA,c(n.days.in.a.yearly.period(year.start,year.end), n.pos.lat, n.pos.lon)) + + for (y in year.start:year.end){ + var <- Load(var = var.name[var.num], exp = NULL, obs = list(var.data), paste0(y,'0101'), storefreq = 'daily', leadtimemax = n.days.in.a.year(y), output = 'lonlat', + latmin = lat.min, latmax = lat.max, lonmin = lon.min, lonmax = lon.max) + vareuFull[seq.days.in.a.future.year(year.start, y),,] <- var$obs + rm(var) + gc() + } +} + +## if(fields.name == ECMWF_S4.name) { +## if(start.month <= 9) {start.month.char <- paste0("0",as.character(start.month))} else {start.month.char <- start.month} + +## my.years <- year.start:year.end +## vareuFull <- Load(var = var.name[var.num], exp = list(list(path=fields)), obs = NULL, sdates=paste0(my.years, start.month.char,'01'), dimnames=list(member=member.name), nmember=n.members, leadtimemax=216, storefreq='daily', output = 'lonlat', latmin = lat.min, latmax = lat.max, lonmin = lon.min, lonmax = lon.max)$mod #, grid=my.grid, method='bilinear') +## } + +if(fields.name == ECMWF_monthly.name){ # Load 6-hourly var data in the monthly forecast case: + sdates <- weekly.seq(forecast.year,mes,day) + vareuFull <-array(NA,c(length(sdates)*(year.end-year.start+1), n.pos.lat, n.pos.lon)) + + for (startdate in 1:length(sdates)){ + var <- Load(var = var.name[var.num], exp = NULL, obs = list(var.data), paste0(year.start:year.end, substr(sdates[startdate],5,6), substr(sdates[startdate],7,8) ), storefreq = 'daily', leadtimemin=leadtime*4-3, leadtimemax=leadtime*4, output = 'lonlat', latmin = lat.min, latmax = lat.max, lonmin = lon.min, lonmax = lon.max) + temp <- apply(var$obs, c(1,3,5,6), mean, na.rm=T) + vareuFull[(1+(startdate-1)*(year.end-year.start+1)):(startdate*(year.end-year.start+1)),,] <- temp + rm(temp,var) + gc() + } + +} + +#my.grid<-paste0('r',n.lon,'x',n.lat) +#coord <- Load(var = 'sfcWind', exp = list(ECMWF_monthly), obs = NULL, sdates=paste0(2013,'0102'), leadtimemin = 1, leadtimemax=1, output = 'lonlat', nprocs=1, grid=my.grid, method='bilinear') + +#save.image(file=paste0(workdir,"/weather_regimes_",fields.name,"_",var.name[var.num],"_",year.start,"-",year.end,".RData")) +#load(file=paste0(workdir,"/weather_regimes_",fields.name,"_",var.name[var.num],"_",year.start,"-",year.end,".RData")) + +for(p in WR.period){ # 1-12: Jan-Dec; 13-16: Winter-Spring-Summer-Autumn; 17: Year + + if(fields.name == rean.name){ + cluster.sequence <- my.cluster[[p]]$cluster + + if(sequences){ # it works only for rean data!!! + # for each of the 4 clusters, remove the days not belonging to sequences of at least 5 days: + # warning: first 4 days and last 4 days are not take into account y the algorithm and are treated as not belonging to any sequence (sequ=FALSE) + wrdiff <- diff(my.cluster[[p]]$cluster) + + sequ <- rep(FALSE, n.days.period[[p]]) + for(i in 5:(n.days.period[[p]]-5)){ + sequ[i] <- TRUE + if(wrdiff[i] != 0 & wrdiff[i+1] != 0) sequ[i] = FALSE + if(wrdiff[i] != 0 & wrdiff[i+2] != 0) sequ[i] = FALSE + if(wrdiff[i] != 0 & wrdiff[i+3] != 0) sequ[i] = FALSE + if(wrdiff[i] != 0 & wrdiff[i+4] != 0) sequ[i] = FALSE + if(wrdiff[i-1] != 0 & wrdiff[i] != 0) sequ[i] = FALSE + if(wrdiff[i-1] != 0 & wrdiff[i+1] != 0) sequ[i] = FALSE + if(wrdiff[i-1] != 0 & wrdiff[i+2] != 0) sequ[i] = FALSE + if(wrdiff[i-1] != 0 & wrdiff[i+3] != 0) sequ[i] = FALSE + if(wrdiff[i-2] != 0 & wrdiff[i] != 0) sequ[i] = FALSE + if(wrdiff[i-2] != 0 & wrdiff[i+1] != 0) sequ[i] = FALSE + if(wrdiff[i-2] != 0 & wrdiff[i+2] != 0) sequ[i] = FALSE + if(wrdiff[i-3] != 0 & wrdiff[i] != 0) sequ[i] = FALSE + if(wrdiff[i-3] != 0 & wrdiff[i+1] != 0) sequ[i] = FALSE + if(wrdiff[i-4] != 0 & wrdiff[i] != 0) sequ[i] = FALSE + } # close for loop on i + + # sequence of daily cluster numbers without the days that doesn't belong to sequences of 5 or more days: + cluster.sequence <- my.cluster[[p]]$cluster * sequ + rm(wrdiff, sequ) + } + + # Replace the days belonging to a regime with the days belonging to a sequence of at least 5 days of that regime: + wr1 <- which(cluster.sequence == 1) + wr2 <- which(cluster.sequence == 2) + wr3 <- which(cluster.sequence == 3) + wr4 <- which(cluster.sequence == 4) + + # yearly frequency of each regime: + wr1y <- wr2y <- wr3y <-wr4y <- c() + for(y in year.start:year.end){ + # for both reanalysis and S4, the time order is always: year1day1, year1day2, ..., year1day31, year2day1, year2day2, ..., year2day28, etc, + wr1y[y-year.start+1] <- length(which(cluster.sequence[(y-year.start)*period.length[[p]]+(1:period.length[[p]])] == 1)) + wr2y[y-year.start+1] <- length(which(cluster.sequence[(y-year.start)*period.length[[p]]+(1:period.length[[p]])] == 2)) + wr3y[y-year.start+1] <- length(which(cluster.sequence[(y-year.start)*period.length[[p]]+(1:period.length[[p]])] == 3)) + wr4y[y-year.start+1] <- length(which(cluster.sequence[(y-year.start)*period.length[[p]]+(1:period.length[[p]])] == 4)) + } + + # convert to frequencies in %: + wr1y <- wr1y/period.length[[p]] + wr2y <- wr2y/period.length[[p]] + wr3y <- wr3y/period.length[[p]] + wr4y <- wr4y/period.length[[p]] + + pslPeriod <- psleuFull[days.period[[p]],,] + pslPeriodClim <- apply(pslPeriod, c(2,3), mean, na.rm=T) + pslPeriodClim2 <- InsertDim(pslPeriodClim, 1, n.days.period[[p]]) + pslPeriodAnom <- pslPeriod - pslPeriodClim2 + + rm(pslPeriod, pslPeriodClim, pslPeriodClim2) + gc() + + pslwr1 <- pslPeriodAnom[wr1,,] + pslwr2 <- pslPeriodAnom[wr2,,] + pslwr3 <- pslPeriodAnom[wr3,,] + pslwr4 <- pslPeriodAnom[wr4,,] + + rm(pslPeriodAnom) + gc() + + pslwr1mean <- apply(pslwr1,c(2,3),mean,na.rm=T) + pslwr2mean <- apply(pslwr2,c(2,3),mean,na.rm=T) + pslwr3mean <- apply(pslwr3,c(2,3),mean,na.rm=T) + pslwr4mean <- apply(pslwr4,c(2,3),mean,na.rm=T) + rm(pslwr1,pslwr2,pslwr3,pslwr4) + + # in the reanalysis case, we also measure the impact maps: + + # similar selection of above, but for var instead of psl: + varPeriod <- vareuFull[days.period[[p]],,] # select only var data during the chosen period + + varPeriodClim <- apply(varPeriod, c(2,3), mean, na.rm=T) + varPeriodClim2 <- InsertDim(varPeriodClim, 1, n.days.period[[p]]) + varPeriodAnom <- varPeriod - varPeriodClim2 + rm(varPeriod, varPeriodClim, varPeriodClim2) + gc() + + #PlotEquiMap2(varPeriodClim[,EU], lon[EU], lat, filled.continents = FALSE, cols=my.cols.var, intxlon=10, intylat=10, cex.lab=1.5) # plot the climatology of the period + varPeriodAnom1 <- varPeriodAnom[wr1,,] + varPeriodAnom2 <- varPeriodAnom[wr2,,] + varPeriodAnom3 <- varPeriodAnom[wr3,,] + varPeriodAnom4 <- varPeriodAnom[wr4,,] + + varPeriodAnom1mean <- apply(varPeriodAnom1,c(2,3),mean,na.rm=T) + varPeriodAnom2mean <- apply(varPeriodAnom2,c(2,3),mean,na.rm=T) + varPeriodAnom3mean <- apply(varPeriodAnom3,c(2,3),mean,na.rm=T) + varPeriodAnom4mean <- apply(varPeriodAnom4,c(2,3),mean,na.rm=T) + + varPeriodAnomBoth1 <- abind(varPeriodAnom, varPeriodAnom1, along = 1) + pvalue1 <- apply(varPeriodAnomBoth1, c(2,3), function(x) t.test(x[1:n.days.period[[p]]],x[(n.days.period[[p]]+1):length(x)])$p.value) + rm(varPeriodAnomBoth1, varPeriodAnom1) + gc() + + varPeriodAnomBoth2 <- abind(varPeriodAnom, varPeriodAnom2, along = 1) + pvalue2 <- apply(varPeriodAnomBoth2, c(2,3), function(x) t.test(x[1:n.days.period[[p]]],x[(n.days.period[[p]]+1):length(x)])$p.value) + rm(varPeriodAnomBoth2, varPeriodAnom2) + gc() + + varPeriodAnomBoth3 <- abind(varPeriodAnom, varPeriodAnom3, along = 1) + pvalue3 <- apply(varPeriodAnomBoth3, c(2,3), function(x) t.test(x[1:n.days.period[[p]]],x[(n.days.period[[p]]+1):length(x)])$p.value) + rm(varPeriodAnomBoth3, varPeriodAnom3) + gc() + + varPeriodAnomBoth4 <- abind(varPeriodAnom, varPeriodAnom4, along = 1) + pvalue4 <- apply(varPeriodAnomBoth4, c(2,3), function(x) t.test(x[1:n.days.period[[p]]],x[(n.days.period[[p]]+1):length(x)])$p.value) + rm(varPeriodAnomBoth4, varPeriodAnom4) + + rm(varPeriodAnom) + gc() + + # save all the data necessary to redraw the graphs when we know the right regime: + save(lon, lon.max, lat, psl, psl.name, my.period, p, workdir,rean.name,fields.name,var.num,var.name,var.name.full,var.unit,year.start,year.end,WR.period,pslwr1mean,pslwr2mean,pslwr3mean,pslwr4mean, varPeriodAnom1mean, varPeriodAnom2mean, varPeriodAnom3mean,varPeriodAnom4mean,wr1y,wr2y,wr3y,wr4y,pvalue1,pvalue2,pvalue3,pvalue4,cluster.sequence,file=paste0(workdir,"/",fields.name,"_",var.name[var.num],"_",my.period[p],"_mapdata.RData")) + + rm(pvalue1,pvalue2,pvalue3,pvalue4) + rm(pslwr1mean,pslwr2mean,pslwr3mean,pslwr4mean) + rm(varPeriodAnom1mean,varPeriodAnom2mean,varPeriodAnom3mean,varPeriodAnom4mean) + gc() + + } + + if(fields.name == ECMWF_S4.name){ + cluster.sequence <- my.cluster[[p]]$cluster #as.vector(my.cluster.array[[p]]) + + wr1 <- which(cluster.sequence == 1) + wr2 <- which(cluster.sequence == 2) + wr3 <- which(cluster.sequence == 3) + wr4 <- which(cluster.sequence == 4) + + wr1y <- apply(my.cluster.array[[p]] == 1, 2, sum) + wr2y <- apply(my.cluster.array[[p]] == 2, 2, sum) + wr3y <- apply(my.cluster.array[[p]] == 3, 2, sum) + wr4y <- apply(my.cluster.array[[p]] == 4, 2, sum) + + # convert to frequencies in %: + wr1y <- wr1y/(n.leadtimes*n.members) + wr2y <- wr2y/(n.leadtimes*n.members) + wr3y <- wr3y/(n.leadtimes*n.members) + wr4y <- wr4y/(n.leadtimes*n.members) + + # in this case, must use psl.melted instead of psleuFull. Both are already in anomalies: + # (problem: psl.melted depends on the startdate and leadtime, psleuFull no) + pslmat <- unname(acast(psl.melted, Member + StartYear + LeadDay ~ Lat ~ Lon)) + #pslmat <- unname(acast(melt(pslPeriod[1,1:2,,,,, drop=FALSE],varnames=c("Exp","Member","StartYear","LeadDay","Lat","Lon")), Member ~ StartYear + LeadDay ~ Lat ~ Lon)) + + pslwr1 <- pslmat[wr1,,, drop=F] + pslwr2 <- pslmat[wr2,,, drop=F] + pslwr3 <- pslmat[wr3,,, drop=F] + pslwr4 <- pslmat[wr4,,, drop=F] + + pslwr1mean <- apply(pslwr1, c(2,3), mean, na.rm=T) + pslwr2mean <- apply(pslwr2, c(2,3), mean, na.rm=T) + pslwr3mean <- apply(pslwr3, c(2,3), mean, na.rm=T) + pslwr4mean <- apply(pslwr4, c(2,3), mean, na.rm=T) + + rm(pslwr1,pslwr2,pslwr3,pslwr4) + rm(pslmat) + gc() + + # save all the data necessary to draw the graphs (but not the impact maps) + save(lon, lon.max, lat, psl, psl.name, my.period, p, workdir,rean.name,fields.name,var.num,var.name,var.name.full,var.unit,year.start,year.end, WR.period, pslwr1mean, pslwr2mean,pslwr3mean,pslwr4mean,wr1y,wr2y,wr3y,wr4y,n.leadtimes,cluster.sequence,file=paste0(workdir,"/",fields.name,"_",var.name[var.num],"_",my.period[p],"_leadmonth_",lead.month,"_mapdata.RData")) + + rm(pslwr1mean,pslwr2mean,pslwr3mean,pslwr4mean) + gc() + + #pslwr1meanPartial <- apply(pslwr1, c(1,3,4), mean, na.rm=T) + #pslwr2meanPartial <- apply(pslwr2, c(1,3,4), mean, na.rm=T) + #pslwr3meanPartial <- apply(pslwr3, c(1,3,4), mean, na.rm=T) + #pslwr4meanPartial <- apply(pslwr4, c(1,3,4), mean, na.rm=T) + + #PlotEquiMap(rescale(pslwr1meanPartial[1,,],my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2) + #PlotEquiMap(rescale(pslwr1meanPartial[2,,],my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2) + + #n=0 + #for(sy in 1:32) { + # for(ld in 1:28){ + # if(my.cluster.array[[p]][ld,sy] == 1){ + # n=n+1 + # if(n == 1) {temp <- pslPeriod[1,2,sy,ld,,]} + # temp <- temp + pslPeriod[1,2,sy,ld,,] + # } + # } + #} + #PlotEquiMap(rescale(temp/n,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, drawleg=F) + + #pslwr1meanPartial <- apply(pslmat[,wr1,,,drop=F], c(1,3,4), mean, na.rm=T) + #pslwr2meanPartial <- apply(pslmat[,wr2,,,drop=F], c(1,3,4), mean, na.rm=T) + #pslwr3meanPartial <- apply(pslmat[,wr3,,,drop=F], c(1,3,4), mean, na.rm=T) + #pslwr4meanPartial <- apply(pslmat[,wr4,,,drop=F], c(1,3,4), mean, na.rm=T) + + } + + # for the monthly system, the time order is always: year1day1, year2day1, ... , yearNday1, year1day2, year2day2, ..., yearNday2, etc.: + if(fields.name == ECMWF_monthly.name) { + if(fields.name == ECMWF_monthly.name){ + my.startdates.period <- months.period(forecast.year,mes,day,p) + + #if(p == 13) my.startdates.period <- my.startdates.period[-2] # remove the second startdate because it is broken + + days.period <- period.length <- list() + for(startdate in my.startdates.period){ + days.period[[p]] <- c(days.period[[p]], (1+(startdate-1)*(year.end-year.start+1)):(startdate*(year.end-year.start+1))) + } + period.length[[p]] <- length(my.startdates.period) # number of Thusdays in the period + } + + wr1y <- wr2y <- wr3y <-wr4y <- c() + wr1y[y-year.start+1] <- length(which(cluster.sequence[seq((y-year.start+1),length(cluster.sequence), n.years)] == 1)) + wr2y[y-year.start+1] <- length(which(cluster.sequence[seq((y-year.start+1),length(cluster.sequence), n.years)] == 2)) + wr3y[y-year.start+1] <- length(which(cluster.sequence[seq((y-year.start+1),length(cluster.sequence), n.years)] == 3)) + wr4y[y-year.start+1] <- length(which(cluster.sequence[seq((y-year.start+1),length(cluster.sequence), n.years)] == 4)) + } + +} # close the for loop on 'p' + + + + + diff --git a/old/weather_regimes_v32.R~ b/old/weather_regimes_v32.R~ new file mode 100644 index 0000000000000000000000000000000000000000..c5431cc792dd42f101d0299c8d118880a7fcb03e --- /dev/null +++ b/old/weather_regimes_v32.R~ @@ -0,0 +1,775 @@ + +# Creation: 28/6/2016 +# Author: Nicola Cortesi +# Aim: To compute the four Euro-Atlantic weather regimes from a reanalysis or from a forecast system +# I/O: input must be in a format compatible with Load(); 1 output .RData file is created in the 'workdir' folder. +# Assumption: its output is need by the script weather_regimes_maps.R +# Branch: weather_regimes + +library(s2dverification) # for the function Load() +library(abind) +library(Kendall) +library(reshape2) +library(TTR) +source('/home/Earth/ncortesi/Downloads/scripts/Rfunctions.R') # for the calendar functions +workdir <- "/scratch/Earth/ncortesi/RESILIENCE/Regimes" # working dir with the input files with the WT classifications for each MSLP grid point + +# available reanalysis for the geopotential (g500) or the geopotential height (z500 = g500/9.81) and for var data: +ERAint <- '/esnas/reconstructions/ecmwf/eraint/$STORE_FREQ$_mean/$VAR_NAME$_f6h/$VAR_NAME$_$YEAR$$MONTH$.nc' +ERAint.name <- "ERA-Interim" + +JRA55 <- '/esnas/reconstructions/jma/jra-55/$STORE_FREQ$_mean/$VAR_NAME$_f6h/$VAR_NAME$_$YEAR$$MONTH$.nc' +JRA55.name <- "JRA-55" + +rean <- ERAint #JRA55 #ERAint # choose one of the two above reanalysis from where to load the input psl data + +# available forecast systems for the psl and var data: +#ECMWF_S4 <- list(path = paste0('/esnas/exp/ECMWF/seasonal/0001/s004/m001/$STORE_FREQ$_mean/$VAR_NAME$_f6h/$VAR_NAME$_$START_DATE$.nc')) +#ECMWF_S4 <- '/esnas/exp/ECMWF/seasonal/0001/s004/m001/$STORE_FREQ$_mean/psl_f6h/psl_$START_DATE$.nc' +ECMWF_S4 <- '/esnas/exp/ecmwf/system4_m1/$STORE_FREQ$_mean/$VAR_NAME$_f6h/$VAR_NAME$_$START_DATE$.nc' +ECMWF_S4.name <- "ECMWF-S4" + +ECMWF_monthly <- '/esnas/exp/ECMWF/monthly/ensforhc/6hourly/$VAR_NAME$/2014$MONTH$$DAY$00/$VAR_NAME$_$START_DATE$00.nc' +ECMWF_monthly.name <- "ECMWF-Monthly" + +forecast <- ECMWF_S4 + +fields <- forecast #rean #forecast # put 'rean' to load pressure fields and var from reanalisis, put 'forecast' to load them from forecast systems + +psl <- "psl" #"g500" # pressure variable to use from the chosen reanalysis +psl.name <- "SLP" # "Z500" # the name to show in the maps + +year.start <- 1982 #1981 #1994 #1979 #1981 +year.end <- 2013 #2013 #2010 + +member.name <- "ensemble" # name of the dimension with the ensemble members inside the netCDF files of S4 + +res <- 0.75 # set the resolution you want to interpolate the psl and var data when loading it (i.e: set to 0.75 to use the same res of ERA-Interim) + # interpolation method is BILINEAR. +PCA <- FALSE # set it to TRUE if you want to apply the PCA before the k-means cluster analysis, FALSE otherwise +variance.explained <- 0.8 # the user can set here the minimum % of variance explained by the PCs (typically 0.8 which is equal to 80%) + +lat.weighting=FALSE # set it to true to weight psl data on latitude before applying the cluster analysis +sequences=FALSE # set it to TRUE if you want to select only days beloning to sequences of at least 5 consecutive days belonging to the same regime. + +# Coordinates of the box enclosing the study region (the Northern Atlantic in this case): +lat.max <- 81 #81 #80 #70 +lat.min <- 27 #30 #20 #30 +lon.max <- 45 #36 #30 #40 +lon.min <- 274.5 #274.5 #270 #280 # put a positive number here because the geopotential has only positive values of longitud! + +var.num <- 1 # Choose a variable. 1: sfcWind 2: tas + +var.name <- c("sfcWind","tas") # name of the 'predictand' variable of the chosen reanalysis +var.name.full <- c("Wind Speed","Temperature") # full name of the 'predictand' variable to put in the title of the graphs +var.unit <- c("m/s", "ºC") # unit of measure of var (for drawing color scales) + +# Only for Reanalysis:: +WR.period <- 1:12 #13:16 # 1-12: Jan-Dec; 13-16: Winter-Spring-Summer-Autumn [bypassed by the optional argument of the script] + +# only for Seasonal forecasts: +n.members <- 15 # number of members of the forecast system to load +n.leadtimes <- 216 # number of leadtimes of the forecast system to load +startM <- 1 # start month (1=January, 12=December) [bypassed by the optional arguments of the script] +leadM <- 1 # select only the data of one lead month: [bypassed by the optional arguments of the script] + +# Only for Monthly forecasts: +#leadtime <- 1:7 #5:11 #1 # if fields=forecast, set the forecast time (in days) you want to compute the weather regimes. +#startdate.shift <- 1 # if leadtime=5:11, it's better to shift all startdates of the season 1 week in the past, to fit the exact season of obs.data + # if leadtime=12:18, it's better to shift all startdates of the season 2 weeks in the past, and so on. +#forecast.year <- year.end+1 # if fields=forecast, set the forecast year (it is usually the year after year.end) +#mes=1 # if fields=forecast, set the month of the first startdate of the forecast year +#day=2 # if fields=forecast, set the day of the first startdate of the forecast year + +############################################################################################################################# +#ECMWF_monthly <- list(path = paste0('/esnas/exp/ECMWF/monthly/ensforhc/6hourly/psl/2014010200/1994_010200.nc')) +#ECMWF_monthly <- list(path = paste0('/esnas/exp/ECMWF/monthly/ensforhc/6hourly/$VAR_NAME$/2014$MONTH$$DAY$00/$VAR_NAME$_$START_DATE$00.nc')) +rean.name <- unname(ifelse(rean == ERAint, ERAint.name, JRA55.name)) +forecast.name <- unname(ifelse(forecast == ECMWF_S4, ECMWF_S4.name, ECMWF_monthly.name)) +fields.name <- unname(ifelse(fields == rean, rean.name, forecast.name)) +if(fields.name == forecast.name) WR.period = 1 +n.years <- year.end - year.start + 1 + +# in case the script is run with two arguments, they are assigned to the two below variables: +script.arg <- as.integer(commandArgs(TRUE)) + +if(length(script.arg) == 2){ + start.month <- script.arg[1] + lead.month <- script.arg[2] + WR.period <- start.month +} else { + start.month <- startM + lead.month <- leadM + WR.period <- start.month +} + +# in case the script is run with 1 argument, it is assumed you are using Reanalysis: +if(length(script.arg) == 1) WR.period <- script.arg[1] + +var.data <- fields # dataset from which to load the input var data (reanalysis or forecasts) +var.data.name <- fields.name #ECMWF_monthly.name # ERAint.name + +if(lat.min >= lat.max) stop("lat.min cannot be greater than lat.max") + +days.period <- n.days.period <- period.length <- list() +for (pp in 1:17){ + days.period[[pp]] <- NA + for(y in year.start:year.end) days.period[[pp]] <- c(days.period[[pp]], n.days.in.a.future.year(year.start, y) + pos.period(y,pp)) + days.period[[pp]] <- days.period[[pp]][-1] # remove the NA at the begin that was introduced only to be able to execute the above command + # number of days belonging to that period from year.start to year.end: + n.days.period[[pp]] <- length(days.period[[pp]]) + # Number of days belonging to that period in a single year: + period.length[[pp]] <- n.days.in.a.period(pp,1999) #+ ifelse(period==13 | period==2, 1, 0) # using year 1999 introduce a small error in winter season length because of bisestile years +} + +# Create a regular lat/lon grid to interpolate the data: +n.lon.grid <- 360/res +n.lat.grid <- (180/res)+1 +my.grid <- paste0('r',n.lon.grid,'x',n.lat.grid) +#domain<-list() +#domain$lon <- seq(0,359.999999999,res) +#domain$lat <- c(rev(seq(0,90,res)),seq(res[1],-90,-res)) # Notice that the regular grid is always centered at lat=0 and lon=0! + +# load only 1 day of pressure data to detect the minimum and maximum lat and lon values which identify only the North Atlantic area: +if(fields.name == rean.name) domain <- Load(var = psl, exp = NULL, obs = list(list(path=fields)), '19990101', storefreq = 'daily', leadtimemax = 1, output = 'lonlat', nprocs=1) +# in the case of S4, we also interpolate it (notice that if the S4 grid has the same grid of the chosen grid (typically ERA-Interim), Load() leaves the S4 grid unchanged): +if(fields.name == ECMWF_S4.name) domain <- Load(var = "psl", exp = list(list(path=fields)), sdates=paste0(year.start,'0101'), storefreq = 'daily', dimnames=list(member=member.name), leadtimemax=1, nmember=1, output = 'lonlat', grid=my.grid, method='bilinear', nprocs=1) + +#if(fields.name == ECMWF_monthly.name) domain <- Load(var = psl, exp = NULL, obs = list(list(path=fields)), paste0(year.start,'0102'), storefreq = 'daily', leadtimemax = 1, output = 'lonlat', nprocs=1) + +n.lon <- length(domain$lon) +n.lat <- length(domain$lat) + +pos.lat.max <- which(lat.max - round(domain$lat,4) >= 0)[1] # select the first latitude of the domain below the maximum latitude +pos.lat.min <- tail(which(round(domain$lat,4) - lat.min >= 0),1) # select the last latitude of the domain over the minumum latitude +pos.lon.max <- tail(which(lon.max - round(domain$lon,4) >= 0),1) +pos.lon.min <- which(round(domain$lon,4) - lon.min >= 0)[1] + +pos.lat <- if(pos.lat.min < pos.lat.max) {pos.lat.min:pos.lat.max} else {pos.lat.max:pos.lat.min} +pos.lon <- c(1:pos.lon.max,pos.lon.min:length(domain$lon)) # beware that it only consider the case where the study area cross the meridian of Greenwhich! +n.pos.lat <- length(pos.lat) +n.pos.lon <- length(pos.lon) + +lat <- domain$lat[pos.lat] # lat values of chosen area only (it is smaller than the whole spatial domain loaded by the data) +lon <- domain$lon[pos.lon] # lon values of chosen area only + +#lat.min.area <- domain$lat[pos.lat.min] # min and max lat/lon of the chosen area only (same as lat.max, but its values correspond to actual values in the lat vector) +#lat.max.area <- domain$lat[pos.lat.max] +#lon.min.area <- domain$lon[pos.lon.min] +#lon.max.area <- domain$lon[pos.lon.max] + +#rm(domain) + +# Load psl data: +if(fields.name == rean.name){ # Load daily psl data in the reanalysis case: + psleuFull <-array(NA,c(n.days.in.a.yearly.period(year.start,year.end), n.pos.lat, n.pos.lon)) + + for (y in year.start:year.end){ + var <- Load(var = psl, exp = NULL, obs = list(list(path=fields)), paste0(y,'0101'), storefreq = 'daily', leadtimemax = n.days.in.a.year(y), output = 'lonlat', + latmin = lat.min, latmax = lat.max, lonmin = lon.min, lonmax = lon.max) + psleuFull[seq.days.in.a.future.year(year.start, y),,] <- var$obs + rm(var) + gc() + } +} + +if(fields.name == ECMWF_S4.name){ # in this case, we can load only the data for 1 month at time (since each month needs ~3 GB of memory) + if(start.month <= 9) {start.month.char <- paste0("0",as.character(start.month))} else {start.month.char <- start.month} + + psleuFull <- Load(var = psl, exp = list(list(path=fields)), obs = NULL, sdates=paste0(year.start:year.end, start.month.char,'01'), dimnames=list(member=member.name), nmember=n.members, leadtimemax=n.leadtimes, storefreq='daily', output = 'lonlat', latmin = lat.min, latmax = lat.max, lonmin = lon.min, lonmax = lon.max, nprocs=1)$mod #, grid=my.grid, method='bilinear') + + #Load(var = "tas", exp = list(list(path="/esnas/exp/ecmwf/system4_m1/$STORE_FREQ$_mean/$VAR_NAME$_f6h/$VAR_NAME$_$START_DATE$.nc")), obs = NULL, sdates='19820101', dimnames=list(member="ensemble"), nmember=1, leadtimemax=1, storefreq='daily', output = 'lonlat', grid="r480x241", lonmin=20) + +} + +if(fields.name == ECMWF_monthly.name){ + # Load 6-hourly psl data in the forecast case: + psleuFull <-array(NA, c(n.sdates*n.years, n.leadtimes, n.pos.lat, n.pos.lon)) # daily order: 19940102 19950102 ... 20130102 19940109 19950109 ... 20130109 ... + n.leadtimes <- length(leadtime) + sdates <- weekly.seq(forecast.year,mes,day) + n.sdates <- length(sdates) + + for (startdate in 1:n.sdates){ + for(lday in leadtime){ + var <- Load(var = psl, exp = NULL, obs = list(list(path=fields)), paste0(year.start:year.end, substr(sdates[startdate],5,6), substr(sdates[startdate],7,8) ), storefreq = 'daily', leadtimemin = lday*4-3, leadtimemax = lday*4, output = 'lonlat', latmin = lat.min, latmax = lat.max, lonmin = lon.min, lonmax = lon.max) + + mean.lday <- apply(var$obs, c(3,5,6), mean, na.rm=T) + rm(var) + gc() + + psleuFull[(1+(startdate-1)*n.years):(startdate*n.years), lday-leadtime[1]+1,,] <- mean.lday + rm(mean.lday) + gc() + } + } +} + +if(psl=="g500") psleuFull <- psleuFull/9.81 # convert geopotential to geopotential height (in m) +if(psl=="psl") psleuFull <- psleuFull/100 # convert MSLP in Pascal to MSLP in hPa +gc() + +#save.image(file=paste0(workdir,"/weather_regimes_",fields.name,"_",year.start,"-",year.end,".RData")) +#load(file=paste0(workdir,"/weather_regimes_",fields.name,"_",year.start,"-",year.end,".RData")) + +# compute the cluster analysis: +my.PCA <- list() +my.cluster <- list() +my.cluster.array <- list() +tot.variance <- list() +n.pcs <- my.cluster <- c() + +#p=1 # for the debug +for(p in WR.period){ + # Select only the data of the month/season we want: + if(fields.name == rean.name){ + pslPeriod <- psleuFull[days.period[[p]],,] # select only days in the chosen period (i.e: winter) + + # weight the pressure fields based on latitude (apply it to anomalies, not to absolute values!): + if(lat.weighting){ + lat.weighted <- cos(lat*pi/180)^0.5 + lat.weighted.array <- InsertDim(InsertDim(lat.weighted,2,n.pos.lon), 1, n.days.period[[p]]) + psl.kmeans <- pslPeriod * lat.weighted.array + rm(lat.weighted.array) + gc() + } + + # select period data from psleuFull to use it as input of the cluster analysis: + psl.kmeans <- pslPeriod + dim(psl.kmeans) <- c(n.days.period[[p]], n.pos.lat*n.pos.lon) # convert array in a matrix! + rm(pslPeriod, pslPeriod.weighted) + } + + # in this case of S4 we don't need to select anything because we already loaded only 1 month of data! + if(fields.name == ECMWF_S4.name){ + + ## climatology plots: + + ## load eraint data: + ## ERAint <- '/esnas/reconstructions/ecmwf/eraint/$STORE_FREQ$_mean/$VAR_NAME$_f6h/$VAR_NAME$_$YEAR$$MONTH$.nc' + ## my.years <- year.start:year.end + ## var <- Load(var = "sfcWind", exp = NULL, obs = list(list(path=ERAint)), paste0(my.years,'0101'), storefreq = 'daily', leadtimemax = 216, output = 'lonlat',latmin = lat.min, latmax = lat.max, lonmin = lon.min, lonmax = lon.max) # -273.16 for tas # or /100 for psl + + ## # check S4 lon and lat: + ## #varS4 <- Load(var = psl, exp = list(list(path=fields)), obs = NULL, sdates=paste0(year.start, start.month.char,'01'), dimnames=list(member=member.name), nmember=n.members, leadtimemax=1, storefreq='daily', output = 'lonlat', latmin = lat.min, latmax = lat.max, lonmin = lon.min, lonmax = lon.max) #, grid=my.grid, method='bilinear') + + ## # draw climatologies: + ## drift <- apply(psleuFull[,,,,1,1,drop=F], c(1,2,4), mean, na.rm=T) + ## #matplot(t(drift[1,,]),type="l", col=1:15, lty=2, pch=19, cex=.5) + ## #matplot(t(drift[1,,1:31]),type="b", col=1:15, lty=1, pch=19, cex=.5) # zoom over the first leadtime + + ## matplot(t(drift[1,,]),type="l", col="gray60", lty=3, pch=19, cex=.1) #, ylim=c(-25,5)) + + ## true.climate <- apply(psleuFull[,,,,1,1,drop=F], c(1,4), mean, na.rm=T) + ## lines(true.climate[1,],type="l", col="red", lty=1, pch=19, lwd=2) + + ## true.climate.5d <- stats::filter(true.climate[1,], rep(1/5,5), sides=2) + ## lines(true.climate.5d,type="l", col="orange", lty=1, pch=19, lwd=2) + + ## s4.data <- data.frame(s4=true.climate[1,], day=1:216) + ## s4.loess <- loess(s4 ~ day, s4.data, span=0.50) + ## s4.pred <- predict(s4.loess) + ## lines(s4.pred,type="l", col="purple", lty=1, pch=19, lwd=2) + + ## col.monthly <- "brown" + ## true.climate1 <- apply(psleuFull[,,,1:31,1,1,drop=F], 1, mean, na.rm=T) + ## lines(rep(true.climate1,31),type="l", col=col.monthly, lty=1, pch=19, lwd=2) + ## true.climate2 <- apply(psleuFull[,,,32:59,1,1,drop=F], 1, mean, na.rm=T) + ## lines(c(rep(NA,31),rep(true.climate2,28)),type="l", col=col.monthly, lty=1, pch=19, lwd=2) + ## true.climate3 <- apply(psleuFull[,,,60:90,1,1,drop=F], 1, mean, na.rm=T) + ## lines(c(rep(NA,59),rep(true.climate3,31)),type="l", col=col.monthly, lty=1, pch=19, lwd=2) + ## true.climate4 <- apply(psleuFull[,,,91:120,1,1,drop=F], 1, mean, na.rm=T) + ## lines(c(rep(NA,90),rep(true.climate4,30)),type="l", col=col.monthly, lty=1, pch=19, lwd=2) + ## true.climate5 <- apply(psleuFull[,,,121:151,1,1,drop=F], 1, mean, na.rm=T) + ## lines(c(rep(NA,120),rep(true.climate5,31)),type="l", col=col.monthly, lty=1, pch=19, lwd=2) + ## true.climate6 <- apply(psleuFull[,,,151:180,1,1,drop=F], 1, mean, na.rm=T) + ## lines(c(rep(NA,150),rep(true.climate6,30)),type="l", col=col.monthly, lty=1, pch=19, lwd=2) + ## true.climate7 <- apply(psleuFull[,,,181:211,1,1,drop=F], 1, mean, na.rm=T) + ## lines(c(rep(NA,180),rep(true.climate7,31)),type="l", col=col.monthly, lty=1, pch=19, lwd=2) + + + ## era <- apply(var$obs[,,,,1,1,drop=F], c(1,2,4), mean, na.rm=T) + ## plot(era[1,1,1:216],type="l", col="black", lty=1, pch=19, lwd=2) + + ## era.5d <- stats::filter(era[1,1,1:216], rep(1/5,5), sides=2) + ## lines(era.5d,type="l", col="darkgreen", lty=1, pch=19, lwd=2) + + ## era.data <- data.frame(era=era[1,1,1:216], day=1:216) + ## era.loess <- loess(era ~ day, era.data, span=0.50) + ## era.pred <- predict(era.loess) + ## lines(era.pred,type="l", col="blue", lty=1, pch=19, lwd=2) + + ## era.data2 <- data.frame(era=era[1,1,1:216], day=1:216) + ## era.loess2 <- loess(era ~ day, era.data2, span=0.3) + ## era.pred2 <- predict(era.loess2) + ## lines(era.pred2,type="l", col="purple", lty=1, pch=19, lwd=2) + + ## era.data3 <- data.frame(era=era[1,1,1:216], day=1:216) + ## era.loess3 <- loess(era ~ day, era.data3, span=0.35) + ## era.pred3 <- predict(era.loess3) + + ## col.monthly.erai <- "turquoise3" + ## era1 <- apply(var$obs[,,,1:31,1,1,drop=F], c(1,2), mean, na.rm=T) + ## lines(rep(era1,31),type="l", col=col.monthly.erai, lty=1, pch=19, lwd=2) + ## era2 <- apply(var$obs[,,,32:59,1,1,drop=F], c(1,2), mean, na.rm=T) + ## lines(c(rep(NA,31),rep(era2,28)),type="l", col=col.monthly.erai, lty=1, pch=19, lwd=2) + ## era3 <- apply(var$obs[,,,60:90,1,1,drop=F], c(1,2), mean, na.rm=T) + ## lines(c(rep(NA,59),rep(era3,31)),type="l", col=col.monthly.erai, lty=1, pch=19, lwd=2) + ## era4 <- apply(var$obs[,,,91:120,1,1,drop=F], c(1,2), mean, na.rm=T) + ## lines(c(rep(NA,90),rep(era4,30)),type="l", col=col.monthly.erai, lty=1, pch=19, lwd=2) + ## era5 <- apply(var$obs[,,,121:151,1,1,drop=F], c(1,2), mean, na.rm=T) + ## lines(c(rep(NA,120),rep(era5,31)),type="l", col=col.monthly.erai, lty=1, pch=19, lwd=2) + ## era6 <- apply(var$obs[,,,151:180,1,1,drop=F], c(1,2), mean, na.rm=T) + ## lines(c(rep(NA,150),rep(era6,30)),type="l", col=col.monthly.erai, lty=1, pch=19, lwd=2) + ## era7 <- apply(var$obs[,,,181:211,1,1,drop=F], c(1,2), mean, na.rm=T) + ## lines(c(rep(NA,180),rep(era7,30)),type="l", col=col.monthly.erai, lty=1, pch=19, lwd=2) + + + ## # anomalias: + ## point.type <- "l" + ## true.climate.anom <- psleuFull[1,1,pos.last.year,,1,1] - true.climate[1,] + ## plot(true.climate.anom,type=point.type, col="red", lty=1, pch=19, lwd=2, cex=.3) + ## lines(rep(0,216)) + + ## true.climate.5d.anom <- psleuFull[1,1,pos.last.year,,1,1] - true.climate.5d + ## lines(true.climate.5d.anom,type=point.type, col="orange", lty=1, pch=19, lwd=2, cex=.3) + + ## s4.pred.anom <- psleuFull[1,1,pos.last.year,,1,1] - s4.pred + ## lines(s4.pred.anom,type=point.type, col="purple", lty=1, pch=19, lwd=2, cex=.3) + + ## s4.monthly.clim <- c(rep(true.climate1,31),rep(true.climate2,28),rep(true.climate3,31),rep(true.climate4,30),rep(true.climate5,31),rep(true.climate6,30),rep(true.climate7,31), rep(NA,4)) + ## s4.monthly.anom <- psleuFull[1,1,pos.last.year,,1,1] - s4.monthly.clim + ## lines(s4.monthly.anom,type=point.type, col="brown", lty=1, pch=19, lwd=2, cex=.3) + + + ## pos.last.year <- dim(var$obs)[3] + ## era.anom <- var$obs[1,1,pos.last.year,1:216,1,1] - era[1,1,1:216] + ## plot(era.anom,type=point.type, col="black", lty=1, pch=19, lwd=2) + ## lines(rep(0,216)) + + ## era.anom.5d <- var$obs[1,1,pos.last.year,1:216,1,1] - era.5d[1:216] + ## lines(era.anom.5d,type=point.type, col="darkgreen", lty=1, pch=19, lwd=2) + + ## era.anom.pred <- var$obs[1,1,pos.last.year,1:216,1,1] - era.pred[1:216] + ## lines(era.anom.pred,type=point.type, col="blue", lty=1, pch=19, lwd=2) + + ## era.anom.pred2 <- var$obs[1,1,pos.last.year,1:216,1,1] - era.pred2[1:216] + ## era.anom.pred3 <- var$obs[1,1,pos.last.year,1:216,1,1] - era.pred3[1:216] + + ## era.monthly.clim <- c(rep(era1,31),rep(era2,28),rep(era3,31),rep(era4,30),rep(era5,31),rep(era6,30),rep(era7,31), rep(NA,4)) + ## era.anom.monthly <- var$obs[1,1,pos.last.year,1:216,1,1] - era.monthly.clim + ## lines(era.anom.monthly,type=point.type, col="turquoise3", lty=1, pch=19, lwd=2) + + ## # Nube plot: + ## plot(era.anom.pred,type="l", col="blue", lty=1, pch=19, lwd=2) + ## grid(nx=10,lwd=2) + ## lines(era.anom.pred2,type="l", col="purple", lty=1, pch=19, lwd=2) + ## lines(era.anom.pred3,type="l", col="turquoise3", lty=1, pch=19, lwd=2) + ## lines(era.anom.5d,type="l", col="darkgreen", lty=1, pch=19, lwd=2) + + ## lines(era.5d,type="l", col="darkgreen", lty=1, pch=19, lwd=2) + ## lines(era.pred,type="l", col="blue", lty=1, pch=19, lwd=2) + ## lines(rep(0,216)) + + ## lines(era.pred2,type="l", col="purple", lty=1, pch=19, lwd=2) + ## lines(era.pred3,type="l", col="", lty=1, pch=19, lwd=2) + + + ## # fit alfa parameter for each grid point: + ## i=1 + ## j=1 + ## era <- apply(var$obs[,,,,i,j,drop=F], c(1,2,4), mean, na.rm=T) + ## era.data <- data.frame(era=era[1,1,1:216], day=1:216) + ## k=0; error <- c() + ## for (alfa in seq(0.25,0.50,0.01)){ + ## k=k+1 + ## era.loess <- loess(era ~ day, era.data, span=alfa) + ## era.pred <- predict(era.loess) + ## error[k] <- mean(abs(era.pred - era[1,1,1:216])) + ## } + + # convert psl in daily anomalies with the LOESS filter: + pslPeriodClim <- apply(psleuFull, c(1,4,5,6), mean, na.rm=T) + pslPeriodClimLoess <- pslPeriodClim + for(i in n.pos.lat){ + for(j in n.pos.lon){ + my.data <- data.frame(ens.mean=pslPeriodClim[1,,i,j], day=1:n.leadtimes) + my.loess <- loess(ens.mean ~ day, my.data, span=0.35) + pslPeriodClimLoess[1,,i,j] <- predict(my.loess) + } + } + + pslPeriodClim2 <- InsertDim(InsertDim(pslPeriodClimLoess, 2, n.years), 2, n.members) + + ## s4.data <- data.frame(s4=pslPeriodClim[1,], day=1:216) + ## s4.loess <- loess(s4 ~ day, s4.data, span=0.35) + ## s4.pred <- predict(s4.loess) + rm(pslPeriodClim) + gc() + + psleuFull <- psleuFull - pslPeriodClim2 + rm(pslPeriodClim2) + gc() + + ## # convert psl in daily anomalies computed with the 10-days leadtime window including the 9 days AFTER each day: + ## n.leadtimes <- dim(psleuFull)[4] + ## pslPeriodClim10 <- array(NA, c(1, n.members, n.years, n.leadtimes-9,n.pos.lat,n.pos.lon)) + ## for(year in 1:n.years){ + ## for(lead in 1:(n.leadtimes-9)){ + ## pslPeriodClim10[1,,year,lead,,] <- (psleuFull[1,,year,lead,,] + psleuFull[1,,year,lead+1,,] + psleuFull[1,,year,lead+2,,] + psleuFull[1,,year,lead+3,,] + psleuFull[1,,year,lead+4,,] + psleuFull[1,,year,lead+5,,] + psleuFull[1,,year,lead+6,,] + psleuFull[1,,year,lead+7,,] + psleuFull[1,,year,lead+8,,] + psleuFull[1,,year,lead+9,,])/10 + ## } + ## } + + ## pslPeriodClim10mean <- apply(pslPeriodClim10, c(1,4,5,6), mean, na.rm=T) + ## pslPeriodClim10mean2 <- InsertDim(InsertDim(pslPeriodClim10mean,2,n.years),2,n.members) + + ## psleuFull10 <- psleuFull[1,,,1:(n.leadtimes-9),,,drop=F] - pslPeriodClim10mean2 + + ## psleuFull <- psleuFull10 + ## rm(pslPeriodClim2, psleuFull10, pslPeriodClim10, pslPeriodClim10mean2, psleuFull10mean, pslPeriodClim10mean) + ## gc() + + chosen.month <- start.month + lead.month # find which month we want to select data + if(chosen.month > 12) chosen.month <- chosen.month - 12 + + for(y in year.start:year.end){ + leadtime.min <- 1 + n.days.in.a.monthly.period(start.month, chosen.month, y) - n.days.in.a.month(chosen.month, y) + leadtime.max <- leadtime.min + n.days.in.a.month(chosen.month, y) - 1 + if (n.days.in.a.month(chosen.month, y) == 29) leadtime.max <- leadtime.max - 1 # remove the 29th of February to have the same n. of elements for all years + int.leadtimes <- leadtime.min:leadtime.max + n.leadtimes <- leadtime.max - leadtime.min + 1 + + if(y == year.start) { + pslPeriod <- psleuFull[,,1,int.leadtimes,,,drop=FALSE] + } else { + pslPeriod <- unname(abind(pslPeriod, psleuFull[,,y-year.start+1,int.leadtimes,,,drop=FALSE], along=3)) + } + } + + + ## # if you didn't convert psl data in anomalies until now, you can convert psl data in monthly anomalies: + ## pslPeriodClim <- apply(pslPeriod, c(1,5,6), mean, na.rm=T) + ## pslPeriodClim2 <- InsertDim(InsertDim(InsertDim(pslPeriodClim,2,n.leadtimes), 2, n.years), 2, n.members) + + ## pslPeriod <- pslPeriod - pslPeriodClim2 + ## rm(pslPeriodClim, pslPeriodClim2) + ## gc() + + # note that the data order in psl.kmeans[,X] is: year1day1, year1day2, ... , year1day31, year2day1, year2day2, ... , year2day28 + psl.melted <- melt(pslPeriod[1,,,,,, drop=FALSE], varnames=c("Exp","Member","StartYear","LeadDay","Lat","Lon")) + psl.kmeans <- unname(acast(psl.melted, Member + StartYear + LeadDay ~ Lat + Lon)) + + #rm(pslPeriod) + gc() + } + + if(fields.name == ECMWF_monthly.name){ + my.startdates.period <- months.period(forecast.year,mes,day,p) # get which startdates belong to the chosen period + + days.period <- list() # get which days inside psleuFull belong to the chosen period + for(startdate in my.startdates.period){ + days.period[[p]] <- c(days.period[[p]], (1+(startdate-1)*n.years):(startdate*n.years)) + } + + # in winter you must remove the 2nd startdate (20140109) because its data is bad, as you can see with: plot(psl.kmeans[,1:2]) + # if(fields.name == forecast.name && period == 13) psl.kmeans[21:40,] <- NA + } + + + # compute the PCs or not: + if(PCA){ + my.seq <- seq(1, n.pos.lat*n.pos.lon, 9) # select only 1 point of 9 + pslcut <- psl.kmeans[,my.seq] + + my.PCA[[p]] <- princomp(pslcut,cor=FALSE) + tot.variance[[p]] <- head(cumsum(my.PCA[[p]]$sdev^2/sum(my.PCA[[p]]$sdev^2)),50) # check how many PCAs to keep basing on the sum of their expl. variance + n.pcs[p] <- head(as.numeric(which(tot.variance[[p]] > variance.explained)),1) # select only the pcs that explains at least 80% of variance + my.cluster[[p]] <- kmeans(my.PCA[[p]]$scores[,1:n.pcs[p]], centers=4, iter.max=100, nstart=30) # 4 is the number of clusters + rm(pslcut) + } else { # 4 is the number of clusters: + if(fields.name == rean.name) my.cluster[[p]] <- kmeans(psl.kmeans[which(!is.na(psl.kmeans[,1])),], centers=4, iter.max=100, nstart=30) + if(fields.name == ECMWF_S4.name) { + my.cluster[[p]] <- kmeans(psl.kmeans, centers=4, iter.max=100, nstart=30, trace=TRUE) + my.cluster.array[[p]] <- array(my.cluster[[p]]$cluster, c(n.leadtimes, n.years, n.members)) # in reverse order compared to the definition of psl.kmeans + + + #plot(SMA(my.cluster[[p]]$centers[1,],201),type="l",col="gold",ylim=c(-20,20));lines(SMA(my.cluster[[p]]$centers[2,],201),type="l",col="red"); lines(SMA(my.cluster[[p]]$centers[3,],201),type="l",col="green");lines(SMA(my.cluster[[p]]$centers[4,],201),type="l",col="blue");lines(SMA(psl.kmeans[29,],201),type="l",col="gray");lines(rep(0,14410),type="l",col="black",lty=2) + } + } + + rm(psl.kmeans) + gc() +} + + +#save.image(file=paste0(workdir,"/weather_regimes_",fields.name,"_",year.start,"-",year.end,".RData")) +#load(file=paste0(workdir,"/weather_regimes_",fields.name,"_",year.start,"-",year.end,".RData")) + +# Load var data: +if(fields.name == rean.name){ # Load daily var data in the reanalysis case: + vareuFull <-array(NA,c(n.days.in.a.yearly.period(year.start,year.end), n.pos.lat, n.pos.lon)) + + for (y in year.start:year.end){ + var <- Load(var = var.name[var.num], exp = NULL, obs = list(var.data), paste0(y,'0101'), storefreq = 'daily', leadtimemax = n.days.in.a.year(y), output = 'lonlat', + latmin = lat.min, latmax = lat.max, lonmin = lon.min, lonmax = lon.max) + vareuFull[seq.days.in.a.future.year(year.start, y),,] <- var$obs + rm(var) + gc() + } +} + +## if(fields.name == ECMWF_S4.name) { +## if(start.month <= 9) {start.month.char <- paste0("0",as.character(start.month))} else {start.month.char <- start.month} + +## my.years <- year.start:year.end +## vareuFull <- Load(var = var.name[var.num], exp = list(list(path=fields)), obs = NULL, sdates=paste0(my.years, start.month.char,'01'), dimnames=list(member=member.name), nmember=n.members, leadtimemax=216, storefreq='daily', output = 'lonlat', latmin = lat.min, latmax = lat.max, lonmin = lon.min, lonmax = lon.max)$mod #, grid=my.grid, method='bilinear') +## } + +if(fields.name == ECMWF_monthly.name){ # Load 6-hourly var data in the monthly forecast case: + sdates <- weekly.seq(forecast.year,mes,day) + vareuFull <-array(NA,c(length(sdates)*(year.end-year.start+1), n.pos.lat, n.pos.lon)) + + for (startdate in 1:length(sdates)){ + var <- Load(var = var.name[var.num], exp = NULL, obs = list(var.data), paste0(year.start:year.end, substr(sdates[startdate],5,6), substr(sdates[startdate],7,8) ), storefreq = 'daily', leadtimemin=leadtime*4-3, leadtimemax=leadtime*4, output = 'lonlat', latmin = lat.min, latmax = lat.max, lonmin = lon.min, lonmax = lon.max) + temp <- apply(var$obs, c(1,3,5,6), mean, na.rm=T) + vareuFull[(1+(startdate-1)*(year.end-year.start+1)):(startdate*(year.end-year.start+1)),,] <- temp + rm(temp,var) + gc() + } + +} + +#my.grid<-paste0('r',n.lon,'x',n.lat) +#coord <- Load(var = 'sfcWind', exp = list(ECMWF_monthly), obs = NULL, sdates=paste0(2013,'0102'), leadtimemin = 1, leadtimemax=1, output = 'lonlat', nprocs=1, grid=my.grid, method='bilinear') + +#save.image(file=paste0(workdir,"/weather_regimes_",fields.name,"_",var.name[var.num],"_",year.start,"-",year.end,".RData")) +#load(file=paste0(workdir,"/weather_regimes_",fields.name,"_",var.name[var.num],"_",year.start,"-",year.end,".RData")) + +for(p in WR.period){ # 1-12: Jan-Dec; 13-16: Winter-Spring-Summer-Autumn; 17: Year + + if(fields.name == rean.name){ + cluster.sequence <- my.cluster[[p]]$cluster + + if(sequences){ # it works only for rean data!!! + # for each of the 4 clusters, remove the days not belonging to sequences of at least 5 days: + # warning: first 4 days and last 4 days are not take into account y the algorithm and are treated as not belonging to any sequence (sequ=FALSE) + wrdiff <- diff(my.cluster[[p]]$cluster) + + sequ <- rep(FALSE, n.days.period[[p]]) + for(i in 5:(n.days.period[[p]]-5)){ + sequ[i] <- TRUE + if(wrdiff[i] != 0 & wrdiff[i+1] != 0) sequ[i] = FALSE + if(wrdiff[i] != 0 & wrdiff[i+2] != 0) sequ[i] = FALSE + if(wrdiff[i] != 0 & wrdiff[i+3] != 0) sequ[i] = FALSE + if(wrdiff[i] != 0 & wrdiff[i+4] != 0) sequ[i] = FALSE + if(wrdiff[i-1] != 0 & wrdiff[i] != 0) sequ[i] = FALSE + if(wrdiff[i-1] != 0 & wrdiff[i+1] != 0) sequ[i] = FALSE + if(wrdiff[i-1] != 0 & wrdiff[i+2] != 0) sequ[i] = FALSE + if(wrdiff[i-1] != 0 & wrdiff[i+3] != 0) sequ[i] = FALSE + if(wrdiff[i-2] != 0 & wrdiff[i] != 0) sequ[i] = FALSE + if(wrdiff[i-2] != 0 & wrdiff[i+1] != 0) sequ[i] = FALSE + if(wrdiff[i-2] != 0 & wrdiff[i+2] != 0) sequ[i] = FALSE + if(wrdiff[i-3] != 0 & wrdiff[i] != 0) sequ[i] = FALSE + if(wrdiff[i-3] != 0 & wrdiff[i+1] != 0) sequ[i] = FALSE + if(wrdiff[i-4] != 0 & wrdiff[i] != 0) sequ[i] = FALSE + } # close for loop on i + + # sequence of daily cluster numbers without the days that doesn't belong to sequences of 5 or more days: + cluster.sequence <- my.cluster[[p]]$cluster * sequ + rm(wrdiff, sequ) + } + + # Replace the days belonging to a regime with the days belonging to a sequence of at least 5 days of that regime: + wr1 <- which(cluster.sequence == 1) + wr2 <- which(cluster.sequence == 2) + wr3 <- which(cluster.sequence == 3) + wr4 <- which(cluster.sequence == 4) + + # yearly frequency of each regime: + wr1y <- wr2y <- wr3y <-wr4y <- c() + for(y in year.start:year.end){ + # for both reanalysis and S4, the time order is always: year1day1, year1day2, ..., year1day31, year2day1, year2day2, ..., year2day28, etc, + wr1y[y-year.start+1] <- length(which(cluster.sequence[(y-year.start)*period.length[[p]]+(1:period.length[[p]])] == 1)) + wr2y[y-year.start+1] <- length(which(cluster.sequence[(y-year.start)*period.length[[p]]+(1:period.length[[p]])] == 2)) + wr3y[y-year.start+1] <- length(which(cluster.sequence[(y-year.start)*period.length[[p]]+(1:period.length[[p]])] == 3)) + wr4y[y-year.start+1] <- length(which(cluster.sequence[(y-year.start)*period.length[[p]]+(1:period.length[[p]])] == 4)) + } + + # convert to frequencies in %: + wr1y <- wr1y/period.length[[p]] + wr2y <- wr2y/period.length[[p]] + wr3y <- wr3y/period.length[[p]] + wr4y <- wr4y/period.length[[p]] + + pslPeriod <- psleuFull[days.period[[p]],,] + pslPeriodClim <- apply(pslPeriod, c(2,3), mean, na.rm=T) + pslPeriodClim2 <- InsertDim(pslPeriodClim, 1, n.days.period[[p]]) + pslPeriodAnom <- pslPeriod - pslPeriodClim2 + + rm(pslPeriod, pslPeriodClim, pslPeriodClim2) + gc() + + pslwr1 <- pslPeriodAnom[wr1,,] + pslwr2 <- pslPeriodAnom[wr2,,] + pslwr3 <- pslPeriodAnom[wr3,,] + pslwr4 <- pslPeriodAnom[wr4,,] + + rm(pslPeriodAnom) + gc() + + pslwr1mean <- apply(pslwr1,c(2,3),mean,na.rm=T) + pslwr2mean <- apply(pslwr2,c(2,3),mean,na.rm=T) + pslwr3mean <- apply(pslwr3,c(2,3),mean,na.rm=T) + pslwr4mean <- apply(pslwr4,c(2,3),mean,na.rm=T) + rm(pslwr1,pslwr2,pslwr3,pslwr4) + + # in the reanalysis case, we also measure the impact maps: + + # similar selection of above, but for var instead of psl: + varPeriod <- vareuFull[days.period[[p]],,] # select only var data during the chosen period + + varPeriodClim <- apply(varPeriod, c(2,3), mean, na.rm=T) + varPeriodClim2 <- InsertDim(varPeriodClim, 1, n.days.period[[p]]) + varPeriodAnom <- varPeriod - varPeriodClim2 + rm(varPeriod, varPeriodClim, varPeriodClim2) + gc() + + #PlotEquiMap2(varPeriodClim[,EU], lon[EU], lat, filled.continents = FALSE, cols=my.cols.var, intxlon=10, intylat=10, cex.lab=1.5) # plot the climatology of the period + varPeriodAnom1 <- varPeriodAnom[wr1,,] + varPeriodAnom2 <- varPeriodAnom[wr2,,] + varPeriodAnom3 <- varPeriodAnom[wr3,,] + varPeriodAnom4 <- varPeriodAnom[wr4,,] + + varPeriodAnom1mean <- apply(varPeriodAnom1,c(2,3),mean,na.rm=T) + varPeriodAnom2mean <- apply(varPeriodAnom2,c(2,3),mean,na.rm=T) + varPeriodAnom3mean <- apply(varPeriodAnom3,c(2,3),mean,na.rm=T) + varPeriodAnom4mean <- apply(varPeriodAnom4,c(2,3),mean,na.rm=T) + + varPeriodAnomBoth1 <- abind(varPeriodAnom, varPeriodAnom1, along = 1) + pvalue1 <- apply(varPeriodAnomBoth1, c(2,3), function(x) t.test(x[1:n.days.period[[p]]],x[(n.days.period[[p]]+1):length(x)])$p.value) + rm(varPeriodAnomBoth1, varPeriodAnom1) + gc() + + varPeriodAnomBoth2 <- abind(varPeriodAnom, varPeriodAnom2, along = 1) + pvalue2 <- apply(varPeriodAnomBoth2, c(2,3), function(x) t.test(x[1:n.days.period[[p]]],x[(n.days.period[[p]]+1):length(x)])$p.value) + rm(varPeriodAnomBoth2, varPeriodAnom2) + gc() + + varPeriodAnomBoth3 <- abind(varPeriodAnom, varPeriodAnom3, along = 1) + pvalue3 <- apply(varPeriodAnomBoth3, c(2,3), function(x) t.test(x[1:n.days.period[[p]]],x[(n.days.period[[p]]+1):length(x)])$p.value) + rm(varPeriodAnomBoth3, varPeriodAnom3) + gc() + + varPeriodAnomBoth4 <- abind(varPeriodAnom, varPeriodAnom4, along = 1) + pvalue4 <- apply(varPeriodAnomBoth4, c(2,3), function(x) t.test(x[1:n.days.period[[p]]],x[(n.days.period[[p]]+1):length(x)])$p.value) + rm(varPeriodAnomBoth4, varPeriodAnom4) + + rm(varPeriodAnom) + gc() + + # save all the data necessary to redraw the graphs when we know the right regime: + save(lon, lon.max, lat, psl, psl.name, my.period, p, workdir,rean.name,fields.name,var.num,var.name,var.name.full,var.unit,year.start,year.end,WR.period,pslwr1mean,pslwr2mean,pslwr3mean,pslwr4mean, varPeriodAnom1mean, varPeriodAnom2mean, varPeriodAnom3mean,varPeriodAnom4mean,wr1y,wr2y,wr3y,wr4y,pvalue1,pvalue2,pvalue3,pvalue4,cluster.sequence,file=paste0(workdir,"/",fields.name,"_",var.name[var.num],"_",my.period[p],"_mapdata.RData")) + + rm(pvalue1,pvalue2,pvalue3,pvalue4) + rm(pslwr1mean,pslwr2mean,pslwr3mean,pslwr4mean) + rm(varPeriodAnom1mean,varPeriodAnom2mean,varPeriodAnom3mean,varPeriodAnom4mean) + gc() + + } + + if(fields.name == ECMWF_S4.name){ + cluster.sequence <- my.cluster[[p]]$cluster #as.vector(my.cluster.array[[p]]) + + wr1 <- which(cluster.sequence == 1) + wr2 <- which(cluster.sequence == 2) + wr3 <- which(cluster.sequence == 3) + wr4 <- which(cluster.sequence == 4) + + wr1y <- apply(my.cluster.array[[p]] == 1, 2, sum) + wr2y <- apply(my.cluster.array[[p]] == 2, 2, sum) + wr3y <- apply(my.cluster.array[[p]] == 3, 2, sum) + wr4y <- apply(my.cluster.array[[p]] == 4, 2, sum) + + # convert to frequencies in %: + wr1y <- wr1y/(n.leadtimes*n.members) + wr2y <- wr2y/(n.leadtimes*n.members) + wr3y <- wr3y/(n.leadtimes*n.members) + wr4y <- wr4y/(n.leadtimes*n.members) + + # in this case, must use psl.melted instead of psleuFull. Both are already in anomalies: + pslmat <- unname(acast(psl.melted, Member + StartYear + LeadDay ~ Lat ~ Lon)) + #pslmat <- unname(acast(melt(pslPeriod[1,1:2,,,,, drop=FALSE],varnames=c("Exp","Member","StartYear","LeadDay","Lat","Lon")), Member ~ StartYear + LeadDay ~ Lat ~ Lon)) + + pslwr1 <- pslmat[wr1,,, drop=F] + pslwr2 <- pslmat[wr2,,, drop=F] + pslwr3 <- pslmat[wr3,,, drop=F] + pslwr4 <- pslmat[wr4,,, drop=F] + + pslwr1mean <- apply(pslwr1, c(2,3), mean, na.rm=T) + pslwr2mean <- apply(pslwr2, c(2,3), mean, na.rm=T) + pslwr3mean <- apply(pslwr3, c(2,3), mean, na.rm=T) + pslwr4mean <- apply(pslwr4, c(2,3), mean, na.rm=T) + + rm(pslwr1,pslwr2,pslwr3,pslwr4) + rm(pslmat) + gc() + + # save all the data necessary to draw the graphs (but not the impact maps) + save(lon, lon.max, lat, psl, psl.name, my.period, p, workdir,rean.name,fields.name,var.num,var.name,var.name.full,var.unit,year.start,year.end, WR.period, pslwr1mean, pslwr2mean,pslwr3mean,pslwr4mean,wr1y,wr2y,wr3y,wr4y,n.leadtimes,cluster.sequence,file=paste0(workdir,"/",fields.name,"_",var.name[var.num],"_",my.period[p],"_leadmonth_",lead.month,"_mapdata.RData")) + + rm(pslwr1mean,pslwr2mean,pslwr3mean,pslwr4mean) + gc() + + #pslwr1meanPartial <- apply(pslwr1, c(1,3,4), mean, na.rm=T) + #pslwr2meanPartial <- apply(pslwr2, c(1,3,4), mean, na.rm=T) + #pslwr3meanPartial <- apply(pslwr3, c(1,3,4), mean, na.rm=T) + #pslwr4meanPartial <- apply(pslwr4, c(1,3,4), mean, na.rm=T) + + #PlotEquiMap(rescale(pslwr1meanPartial[1,,],my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2) + #PlotEquiMap(rescale(pslwr1meanPartial[2,,],my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2) + + #n=0 + #for(sy in 1:32) { + # for(ld in 1:28){ + # if(my.cluster.array[[p]][ld,sy] == 1){ + # n=n+1 + # if(n == 1) {temp <- pslPeriod[1,2,sy,ld,,]} + # temp <- temp + pslPeriod[1,2,sy,ld,,] + # } + # } + #} + #PlotEquiMap(rescale(temp/n,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, drawleg=F) + + #pslwr1meanPartial <- apply(pslmat[,wr1,,,drop=F], c(1,3,4), mean, na.rm=T) + #pslwr2meanPartial <- apply(pslmat[,wr2,,,drop=F], c(1,3,4), mean, na.rm=T) + #pslwr3meanPartial <- apply(pslmat[,wr3,,,drop=F], c(1,3,4), mean, na.rm=T) + #pslwr4meanPartial <- apply(pslmat[,wr4,,,drop=F], c(1,3,4), mean, na.rm=T) + + } + + # for the monthly system, the time order is always: year1day1, year2day1, ... , yearNday1, year1day2, year2day2, ..., yearNday2, etc.: + if(fields.name == ECMWF_monthly.name) { + if(fields.name == ECMWF_monthly.name){ + my.startdates.period <- months.period(forecast.year,mes,day,p) + + #if(p == 13) my.startdates.period <- my.startdates.period[-2] # remove the second startdate because it is broken + + days.period <- period.length <- list() + for(startdate in my.startdates.period){ + days.period[[p]] <- c(days.period[[p]], (1+(startdate-1)*(year.end-year.start+1)):(startdate*(year.end-year.start+1))) + } + period.length[[p]] <- length(my.startdates.period) # number of Thusdays in the period + } + + wr1y <- wr2y <- wr3y <-wr4y <- c() + wr1y[y-year.start+1] <- length(which(cluster.sequence[seq((y-year.start+1),length(cluster.sequence), n.years)] == 1)) + wr2y[y-year.start+1] <- length(which(cluster.sequence[seq((y-year.start+1),length(cluster.sequence), n.years)] == 2)) + wr3y[y-year.start+1] <- length(which(cluster.sequence[seq((y-year.start+1),length(cluster.sequence), n.years)] == 3)) + wr4y[y-year.start+1] <- length(which(cluster.sequence[seq((y-year.start+1),length(cluster.sequence), n.years)] == 4)) + } + +} # close the for loop on 'p' + + + + + diff --git a/old/weather_regimes_v33.R b/old/weather_regimes_v33.R new file mode 100644 index 0000000000000000000000000000000000000000..6f0542f23f6ab9ad39752e749400021c42e903d2 --- /dev/null +++ b/old/weather_regimes_v33.R @@ -0,0 +1,666 @@ + +# Creation: 28/6/2016 +# Author: Nicola Cortesi +# Aim: To compute the four Euro-Atlantic weather regimes from a reanalysis or from a forecast system +# I/O: input must be in a format compatible with Load(); 1 output .RData file is created in the 'workdir' folder. +# Assumption: its output is need by the script weather_regimes_maps.R +# Branch: weather_regimes + +library(s2dverification) # for the function Load() +library(abind) +library(reshape2) +#library(TTR) +source('/home/Earth/ncortesi/Downloads/scripts/Rfunctions.R') # for the calendar functions +workdir <- "/scratch/Earth/ncortesi/RESILIENCE/Regimes" # working dir + +# available reanalysis for the geopotential (g500) or the geopotential height (z500 = g500/9.81) and for var data: +ERAint <- '/esnas/reconstructions/ecmwf/eraint/$STORE_FREQ$_mean/$VAR_NAME$_f6h/$VAR_NAME$_$YEAR$$MONTH$.nc' +ERAint.name <- "ERA-Interim" + +JRA55 <- '/esnas/reconstructions/jma/jra-55/$STORE_FREQ$_mean/$VAR_NAME$_f6h/$VAR_NAME$_$YEAR$$MONTH$.nc' +JRA55.name <- "JRA-55" + +rean <- ERAint #JRA55 #ERAint # choose one of the two above reanalysis from where to load the input psl data + +# available forecast systems for the psl and var data: +#ECMWF_S4 <- list(path = paste0('/esnas/exp/ECMWF/seasonal/0001/s004/m001/$STORE_FREQ$_mean/$VAR_NAME$_f6h/$VAR_NAME$_$START_DATE$.nc')) +#ECMWF_S4 <- '/esnas/exp/ECMWF/seasonal/0001/s004/m001/$STORE_FREQ$_mean/psl_f6h/psl_$START_DATE$.nc' +ECMWF_S4 <- '/esnas/exp/ecmwf/system4_m1/$STORE_FREQ$_mean/$VAR_NAME$_f6h/$VAR_NAME$_$START_DATE$.nc' +ECMWF_S4.name <- "ECMWF-S4" +member.name <- "ensemble" # name of the dimension with the ensemble members inside the netCDF files of S4 + +ECMWF_monthly <- '/esnas/exp/ECMWF/monthly/ensforhc/6hourly/$VAR_NAME$/2014$MONTH$$DAY$00/$VAR_NAME$_$START_DATE$00.nc' +ECMWF_monthly.name <- "ECMWF-Monthly" + +forecast <- ECMWF_S4 + +fields <- forecast #rean # put 'rean' to load pressure fields and var from reanalisis, put 'forecast' to load them from forecast systems + +psl <- "psl" #"g500" # pressure variable to use from the chosen reanalysis +psl.name <- "SLP" # "Z500" # the name to show in the maps + +year.start <- 1981 #1982 #1981 #1994 #1979 #1981 +year.end <- 2013 #2013 #2010 + +res <- 0.75 # set the resolution you want to interpolate the psl and var data when loading it (i.e: set to 0.75 to use the same res of ERA-Interim) + # interpolation method is BILINEAR. + +PCA <- FALSE # set it to TRUE if you want to apply the PCA before the k-means cluster analysis, FALSE otherwise +variance.explained <- 0.8 # the user can set here the minimum % of variance explained by the PCs (typically 0.8 which is equal to 80%) + +lat.weighting=FALSE # set it to true to weight psl data on latitude before applying the cluster analysis +sequences=FALSE # set it to TRUE if you want to select only days beloning to sequences of at least 5 consecutive days belonging to the same regime. + +# Coordinates of the box enclosing the study region (the Northern Atlantic in this case): +lat.max <- 81 #81 #80 #70 +lat.min <- 27 #30 #20 #30 +lon.max <- 45 #36 #30 #40 +lon.min <- 274.5 #274.5 #270 #280 # put a positive number here because the geopotential has only positive values of longitud! + +var.num <- 1 # Choose a variable. 1: sfcWind 2: tas + +var.name <- c("sfcWind","tas") # name of the 'predictand' variable of the chosen reanalysis +var.name.full <- c("Wind Speed","Temperature") # full name of the 'predictand' variable to put in the title of the graphs +var.unit <- c("m/s", "ºC") # unit of measure of var (for drawing color scales) + +# Only for Reanalysis:: +WR.period <- 1:12 #13:16 # 1-12: Jan-Dec; 13-16: Winter-Spring-Summer-Autumn [bypassed by the optional argument of the script] + +# Only for Seasonal forecasts: +startM <- 2 # start month (1=January, 12=December) [bypassed by the optional arguments of the script] +leadM <- 1 # select only the data of one lead month: [bypassed by the optional arguments of the script] +n.members <- 15 # number of members of the forecast system to load +n.leadtimes <- 216 # number of leadtimes of the forecast system to load + +# Only for Monthly forecasts: +#leadtime <- 1:7 #5:11 #1 # if fields=forecast, set the forecast time (in days) you want to compute the weather regimes. +#startdate.shift <- 1 # if leadtime=5:11, it's better to shift all startdates of the season 1 week in the past, to fit the exact season of obs.data + # if leadtime=12:18, it's better to shift all startdates of the season 2 weeks in the past, and so on. +#forecast.year <- year.end+1 # if fields=forecast, set the forecast year (it is usually the year after year.end) +#mes=1 # if fields=forecast, set the month of the first startdate of the forecast year +#day=2 # if fields=forecast, set the day of the first startdate of the forecast year + +############################################################################################################################# +#ECMWF_monthly <- list(path = paste0('/esnas/exp/ECMWF/monthly/ensforhc/6hourly/psl/2014010200/1994_010200.nc')) +#ECMWF_monthly <- list(path = paste0('/esnas/exp/ECMWF/monthly/ensforhc/6hourly/$VAR_NAME$/2014$MONTH$$DAY$00/$VAR_NAME$_$START_DATE$00.nc')) +rean.name <- unname(ifelse(rean == ERAint, ERAint.name, JRA55.name)) +forecast.name <- unname(ifelse(forecast == ECMWF_S4, ECMWF_S4.name, ECMWF_monthly.name)) +fields.name <- unname(ifelse(fields == rean, rean.name, forecast.name)) +if(fields.name == forecast.name) WR.period = 1 +n.years <- year.end - year.start + 1 + +# in case the script is run with two arguments, they are assigned to the two below variables: +script.arg <- as.integer(commandArgs(TRUE)) + +if(length(script.arg) == 2){ + start.month <- script.arg[1] + lead.month <- script.arg[2] + WR.period <- start.month +} else { + start.month <- startM + lead.month <- leadM + WR.period <- start.month +} + +# in case the script is run with 1 argument, it is assumed you are using a reanalysis: +if(length(script.arg) == 1) WR.period <- script.arg[1] + +var.data <- fields # dataset from which to load the input var data (reanalysis or forecasts) +var.data.name <- fields.name #ECMWF_monthly.name # ERAint.name + +if(lat.min >= lat.max) stop("lat.min cannot be greater than lat.max") + +days.period <- n.days.period <- period.length <- list() +for (pp in 1:17){ + days.period[[pp]] <- NA + for(y in year.start:year.end) days.period[[pp]] <- c(days.period[[pp]], n.days.in.a.future.year(year.start, y) + pos.period(y,pp)) + days.period[[pp]] <- days.period[[pp]][-1] # remove the NA at the begin that was introduced only to be able to execute the above command + # number of days belonging to that period from year.start to year.end: + n.days.period[[pp]] <- length(days.period[[pp]]) + # Number of days belonging to that period in a single year: + period.length[[pp]] <- n.days.in.a.period(pp,1999) #+ ifelse(period==13 | period==2, 1, 0) # using year 1999 introduce a small error in winter season length because of bisestile years +} + +# Create a regular lat/lon grid to interpolate the data to the chosen res: (Notice that the regular grid is always centered at lat=0 and lon=0!) +n.lon.grid <- 360/res +n.lat.grid <- (180/res)+1 +my.grid <- paste0('r',n.lon.grid,'x',n.lat.grid) +#domain<-list() +#domain$lon <- seq(0,359.999999999,res) +#domain$lat <- c(rev(seq(0,90,res)),seq(res[1],-90,-res)) + +# load only 1 day of pressure data to detect the minimum and maximum lat and lon values which identify only the North Atlantic area: +if(fields.name == rean.name) domain <- Load(var = psl, exp = NULL, obs = list(list(path=fields)), '19990101', storefreq = 'daily', leadtimemax = 1, output = 'lonlat', nprocs=1) +# in the case of S4, we also interpolate it to the same resolution of the reanalysis: +# (notice that if the S4 grid has the same grid of the chosen grid (typically ERA-Interim), Load() leaves the S4 grid unchanged) +if(fields.name == ECMWF_S4.name) domain <- Load(var = psl, exp = list(list(path=fields)), obs=NULL, sdates=paste0(year.start,'0101'), storefreq = 'daily', dimnames=list(member=member.name), leadtimemax=1, nmember=1, output = 'lonlat', nprocs=1, grid=my.grid, method='bilinear') + +#domain <- Load(var = "psl", exp = list(list(path="/esnas/exp/ecmwf/system4_m1/$STORE_FREQ$_mean/$VAR_NAME$_f6h/$VAR_NAME$_$START_DATE$.nc")), obs=NULL, sdates='19820101', storefreq = 'daily', leadtimemax=1, nmember=1, output = 'lonlat', grid='r480x241', lonmax=100) + +if(fields.name == ECMWF_monthly.name) domain <- Load(var = psl, exp = NULL, obs = list(list(path=fields)), paste0(year.start,'0102'), storefreq = 'daily', leadtimemax = 1, output = 'lonlat', nprocs=1) + +n.lon <- length(domain$lon) +n.lat <- length(domain$lat) + +pos.lat.max <- which(lat.max - round(domain$lat,4) >= 0)[1] # select the first latitude of the domain below the maximum latitude +pos.lat.min <- tail(which(round(domain$lat,4) - lat.min >= 0),1) # select the last latitude of the domain over the minumum latitude +pos.lon.max <- tail(which(lon.max - round(domain$lon,4) >= 0),1) +pos.lon.min <- which(round(domain$lon,4) - lon.min >= 0)[1] + +pos.lat <- if(pos.lat.min < pos.lat.max) {pos.lat.min:pos.lat.max} else {pos.lat.max:pos.lat.min} +pos.lon <- c(1:pos.lon.max,pos.lon.min:length(domain$lon)) # beware that it only consider the case where the study area cross the meridian of Greenwhich! +n.pos.lat <- length(pos.lat) +n.pos.lon <- length(pos.lon) + +lat <- domain$lat[pos.lat] # lat values of chosen area only (it is smaller than the whole spatial domain loaded by the data) +if (lat[2] < lat[1]) lat <- rev(lat) # reverse lat values if they are in decreasing order +lon <- domain$lon[pos.lon] # lon values of chosen area only + +#lat.min.area <- domain$lat[pos.lat.min] # min and max lat/lon of the chosen area only (same as lat.max, but its values correspond to actual values in the lat vector) +#lat.max.area <- domain$lat[pos.lat.max] +#lon.min.area <- domain$lon[pos.lon.min] +#lon.max.area <- domain$lon[pos.lon.max] + +#rm(domain) + +# Load psl data (interpolating it to the same reanlaysis grid, if necessary): +if(fields.name == rean.name){ # Load daily psl data in the reanalysis case: + psleuFull <-array(NA,c(n.days.in.a.yearly.period(year.start,year.end), n.pos.lat, n.pos.lon)) + + for (y in year.start:year.end){ + var <- Load(var = psl, exp = NULL, obs = list(list(path=fields)), paste0(y,'0101'), storefreq = 'daily', leadtimemax = n.days.in.a.year(y), output = 'lonlat', + latmin = lat.min, latmax = lat.max, lonmin = lon.min, lonmax = lon.max) + psleuFull[seq.days.in.a.future.year(year.start, y),,] <- var$obs + rm(var) + gc() + } +} + +if(fields.name == ECMWF_S4.name){ # in this case, we can load only the data for 1 month at time (since each month needs ~3 GB of memory) + if(start.month <= 9) {start.month.char <- paste0("0",as.character(start.month))} else {start.month.char <- start.month} + + #psleuFull <- Load(var = psl, exp = list(list(path=fields)), obs = NULL, sdates=paste0(years, start.month.char,'01'), dimnames=list(member=member.name), nmember=n.members, leadtimemax=n.leadtimes, storefreq='daily', output = 'lonlat', latmin = lat.min, latmax = lat.max, lonmin = lon.min, lonmax = lon.max, nprocs=1, grid=my.grid, method='bilinear')$mod # not working + + fields2 <- gsub("\\$STORE_FREQ\\$","daily",fields) + fields3 <- gsub("\\$VAR_NAME\\$",psl,fields2) + + years <- c() + for(y in year.start:year.end){ + fields4 <- gsub("\\$START_DATE\\$",paste0(y, start.month.char,'01'),fields3) + + num.lead <- as.integer(system(paste0("ncks -m ",fields4,"| grep -E -i 'psl size' | cut -d '=' -f2 | cut -d '*' -f1"), intern=T)) # alternative version for SMP! + num.memb <- as.integer(system(paste0("ncks -m ",fields4,"| grep -E -i 'psl size' | cut -d '=' -f2 | cut -d '*' -f2"), intern=T)) + num.lat <- as.integer(system(paste0("ncks -m ",fields4,"| grep -E -i 'psl size' | cut -d '=' -f2 | cut -d '*' -f3"), intern=T)) + num.lon <- as.integer(system(paste0("ncks -m ",fields4,"| grep -E -i 'psl size' | cut -d '=' -f2 | cut -d '*' -f4"), intern=T)) + + if(num.lead == n.leadtimes && num.memb >= n.members && num.lat == 181 && num.lon == 360) years <- c(years,y) + } + + n.years <- length(years) + + psleuFull <- Load(var = psl, exp = list(list(path=fields)), obs = NULL, sdates=paste0(years, '0201'), dimnames=list(member=member.name), nmember=n.members, leadtimemax=n.leadtimes, storefreq='daily', output = 'lonlat', latmin = lat.min, latmax = lat.max, lonmin = lon.min, lonmax = lon.max, grid=my.grid, method='bilinear', nprocs=4)$mod + +} + +if(fields.name == ECMWF_monthly.name){ + # Load 6-hourly psl data in the forecast case: + psleuFull <-array(NA, c(n.sdates*n.years, n.leadtimes, n.pos.lat, n.pos.lon)) # daily order: 19940102 19950102 ... 20130102 19940109 19950109 ... 20130109 ... + n.leadtimes <- length(leadtime) + sdates <- weekly.seq(forecast.year,mes,day) + n.sdates <- length(sdates) + + for (startdate in 1:n.sdates){ + for(lday in leadtime){ + var <- Load(var = psl, exp = NULL, obs = list(list(path=fields)), paste0(year.start:year.end, substr(sdates[startdate],5,6), substr(sdates[startdate],7,8) ), storefreq = 'daily', leadtimemin = lday*4-3, leadtimemax = lday*4, output = 'lonlat', latmin = lat.min, latmax = lat.max, lonmin = lon.min, lonmax = lon.max) + + mean.lday <- apply(var$obs, c(3,5,6), mean, na.rm=T) + rm(var) + gc() + + psleuFull[(1+(startdate-1)*n.years):(startdate*n.years), lday-leadtime[1]+1,,] <- mean.lday + rm(mean.lday) + gc() + } + } +} + +if(psl=="g500") psleuFull <- psleuFull/9.81 # convert geopotential to geopotential height (in m) +if(psl=="psl") psleuFull <- psleuFull/100 # convert MSLP in Pascal to MSLP in hPa +gc() + +#save.image(file=paste0(workdir,"/weather_regimes_",fields.name,"_",year.start,"-",year.end,".RData")) +#load(file=paste0(workdir,"/weather_regimes_",fields.name,"_",year.start,"-",year.end,".RData")) + +# compute the cluster analysis: +my.PCA <- list() +my.cluster <- list() +my.cluster.array <- list() +tot.variance <- list() +n.pcs <- my.cluster <- c() + +#WR.period=1 # for the debug +for(p in WR.period){ + # Select only the data of the month/season we want: + if(fields.name == rean.name){ + pslPeriod <- psleuFull[days.period[[p]],,] # select only days in the chosen period (i.e: winter) + + # weight the pressure fields based on latitude (apply it to anomalies, not to absolute values!): + if(lat.weighting){ + lat.weighted <- cos(lat*pi/180)^0.5 + lat.weighted.array <- InsertDim(InsertDim(lat.weighted,2,n.pos.lon), 1, n.days.period[[p]]) + psl.kmeans <- pslPeriod * lat.weighted.array + rm(lat.weighted.array) + gc() + } + + # select period data from psleuFull to use it as input of the cluster analysis: + psl.kmeans <- pslPeriod + dim(psl.kmeans) <- c(n.days.period[[p]], n.pos.lat*n.pos.lon) # convert array in a matrix! + rm(pslPeriod, pslPeriod.weighted) + } + + # in case of S4 we don't need to select anything because we already loaded only 1 month of data! + if(fields.name == ECMWF_S4.name){ + + # convert psl in daily anomalies with the LOESS filter: + pslPeriodClim <- apply(psleuFull, c(1,4,5,6), mean, na.rm=T) + pslPeriodClimLoess <- pslPeriodClim + for(i in n.pos.lat){ + for(j in n.pos.lon){ + my.data <- data.frame(ens.mean=pslPeriodClim[1,,i,j], day=1:n.leadtimes) + my.loess <- loess(ens.mean ~ day, my.data, span=0.35) + pslPeriodClimLoess[1,,i,j] <- predict(my.loess) + } + } + + rm(pslPeriodClim) + gc() + + pslPeriodClim2 <- InsertDim(InsertDim(pslPeriodClimLoess, 2, n.years), 2, n.members) + + ## s4.data <- data.frame(s4=pslPeriodClim[1,], day=1:216) + ## s4.loess <- loess(s4 ~ day, s4.data, span=0.35) + ## s4.pred <- predict(s4.loess) + + psleuFull <- psleuFull - pslPeriodClim2 + rm(pslPeriodClim2) + gc() + + ## # convert psl in daily anomalies computed with the 10-days leadtime window including the 9 days AFTER each day: + ## n.leadtimes <- dim(psleuFull)[4] + ## pslPeriodClim10 <- array(NA, c(1, n.members, n.years, n.leadtimes-9,n.pos.lat,n.pos.lon)) + ## for(year in 1:n.years){ + ## for(lead in 1:(n.leadtimes-9)){ + ## pslPeriodClim10[1,,year,lead,,] <- (psleuFull[1,,year,lead,,] + psleuFull[1,,year,lead+1,,] + psleuFull[1,,year,lead+2,,] + psleuFull[1,,year,lead+3,,] + psleuFull[1,,year,lead+4,,] + psleuFull[1,,year,lead+5,,] + psleuFull[1,,year,lead+6,,] + psleuFull[1,,year,lead+7,,] + psleuFull[1,,year,lead+8,,] + psleuFull[1,,year,lead+9,,])/10 + ## } + ## } + + ## pslPeriodClim10mean <- apply(pslPeriodClim10, c(1,4,5,6), mean, na.rm=T) + ## pslPeriodClim10mean2 <- InsertDim(InsertDim(pslPeriodClim10mean,2,n.years),2,n.members) + + ## psleuFull10 <- psleuFull[1,,,1:(n.leadtimes-9),,,drop=F] - pslPeriodClim10mean2 + + ## psleuFull <- psleuFull10 + ## rm(pslPeriodClim2, psleuFull10, pslPeriodClim10, pslPeriodClim10mean2, psleuFull10mean, pslPeriodClim10mean) + ## gc() + + # select data only for the startmonth and leadmonth to study: + chosen.month <- start.month + lead.month # find which month we want to select data + if(chosen.month > 12) chosen.month <- chosen.month - 12 + + i=0 + for(y in years){ + i=i+1 + leadtime.min <- 1 + n.days.in.a.monthly.period(start.month, chosen.month, y) - n.days.in.a.month(chosen.month, y) + leadtime.max <- leadtime.min + n.days.in.a.month(chosen.month, y) - 1 + if (n.days.in.a.month(chosen.month, y) == 29) leadtime.max <- leadtime.max - 1 # remove the 29th of February to have the same n. of elements for all years + int.leadtimes <- leadtime.min:leadtime.max + n.leadtimes <- leadtime.max - leadtime.min + 1 + + if(y == year.start) { + pslPeriod <- psleuFull[,,1,int.leadtimes,,,drop=FALSE] + } else { + pslPeriod <- unname(abind(pslPeriod, psleuFull[,,i,int.leadtimes,,,drop=FALSE], along=3)) + } + } + + + ## # if you didn't convert psl data in anomalies until now, you can convert psl data in monthly anomalies: + ## pslPeriodClim <- apply(pslPeriod, c(1,5,6), mean, na.rm=T) + ## pslPeriodClim2 <- InsertDim(InsertDim(InsertDim(pslPeriodClim,2,n.leadtimes), 2, n.years), 2, n.members) + + ## pslPeriod <- pslPeriod - pslPeriodClim2 + ## rm(pslPeriodClim, pslPeriodClim2) + ## gc() + + # note that the data order in psl.kmeans[,X] is: year1day1, year1day2, ... , year1day31, year2day1, year2day2, ... , year2day28 + psl.melted <- melt(pslPeriod[1,,,,,, drop=FALSE], varnames=c("Exp","Member","StartYear","LeadDay","Lat","Lon")) + psl.kmeans <- unname(acast(psl.melted, Member + StartYear + LeadDay ~ Lat + Lon)) + + #rm(pslPeriod) + gc() + } + + if(fields.name == ECMWF_monthly.name){ + my.startdates.period <- months.period(forecast.year,mes,day,p) # get which startdates belong to the chosen period + + days.period <- list() # get which days inside psleuFull belong to the chosen period + for(startdate in my.startdates.period){ + days.period[[p]] <- c(days.period[[p]], (1+(startdate-1)*n.years):(startdate*n.years)) + } + + # in winter you must remove the 2nd startdate (20140109) because its data is bad, as you can see with: plot(psl.kmeans[,1:2]) + # if(fields.name == forecast.name && period == 13) psl.kmeans[21:40,] <- NA + } + + + # compute the PCs or not: + if(PCA){ + my.seq <- seq(1, n.pos.lat*n.pos.lon, 9) # select only 1 point of 9 + pslcut <- psl.kmeans[,my.seq] + + my.PCA[[p]] <- princomp(pslcut,cor=FALSE) + tot.variance[[p]] <- head(cumsum(my.PCA[[p]]$sdev^2/sum(my.PCA[[p]]$sdev^2)),50) # check how many PCAs to keep basing on the sum of their expl. variance + n.pcs[p] <- head(as.numeric(which(tot.variance[[p]] > variance.explained)),1) # select only the pcs that explains at least 80% of variance + my.cluster[[p]] <- kmeans(my.PCA[[p]]$scores[,1:n.pcs[p]], centers=4, iter.max=100, nstart=30) # 4 is the number of clusters + rm(pslcut) + } else { # 4 is the number of clusters: + if(fields.name == rean.name){ + my.cluster[[p]] <- kmeans(psl.kmeans[which(!is.na(psl.kmeans[,1])),], centers=4, iter.max=100, nstart=30) + # save the time series output of the cluster analysis: + save(my.cluster, my.cluster.array, my.PCA, tot.variance, n.pcs, file=paste0(workdir,"/",fields.name,"_",my.period[p],"_ClusterData.RData")) + } + + if(fields.name == ECMWF_S4.name) { + my.cluster[[p]] <- kmeans(psl.kmeans, centers=4, iter.max=100, nstart=30, trace=TRUE) + my.cluster.array[[p]] <- array(my.cluster[[p]]$cluster, c(n.leadtimes, n.years, n.members)) # in reverse order compared to the definition of psl.kmeans + + # save the time series output of the cluster analysis: + save(my.cluster, my.cluster.array, my.PCA,tot.variance,n.pcs, file=paste0(workdir,"/",fields.name,"_",my.period[p],"_leadmonth_",lead.month,"_ClusterData.RData")) + + #plot(SMA(my.cluster[[p]]$centers[1,],201),type="l",col="gold",ylim=c(-20,20));lines(SMA(my.cluster[[p]]$centers[2,],201),type="l",col="red"); lines(SMA(my.cluster[[p]]$centers[3,],201),type="l",col="green");lines(SMA(my.cluster[[p]]$centers[4,],201),type="l",col="blue");lines(SMA(psl.kmeans[29,],201),type="l",col="gray");lines(rep(0,14410),type="l",col="black",lty=2) + } + } + + rm(psl.kmeans) + gc() + +} # close for on 'p' + +#save.image(file=paste0(workdir,"/weather_regimes_",fields.name,"_",year.start,"-",year.end,".RData")) +#load(file=paste0(workdir,"/weather_regimes_",fields.name,"_",year.start,"-",year.end,".RData")) + +# Load var data: +if(fields.name == rean.name){ # Load daily var data in the reanalysis case: + vareuFull <-array(NA,c(n.days.in.a.yearly.period(year.start,year.end), n.pos.lat, n.pos.lon)) + + for (y in year.start:year.end){ + var <- Load(var = var.name[var.num], exp = NULL, obs = list(var.data), paste0(y,'0101'), storefreq = 'daily', leadtimemax = n.days.in.a.year(y), output = 'lonlat', + latmin = lat.min, latmax = lat.max, lonmin = lon.min, lonmax = lon.max) + vareuFull[seq.days.in.a.future.year(year.start, y),,] <- var$obs + rm(var) + gc() + } +} + +## if(fields.name == ECMWF_S4.name) { +## if(start.month <= 9) {start.month.char <- paste0("0",as.character(start.month))} else {start.month.char <- start.month} + +## my.years <- year.start:year.end +## vareuFull <- Load(var = var.name[var.num], exp = list(list(path=fields)), obs = NULL, sdates=paste0(my.years, start.month.char,'01'), dimnames=list(member=member.name), nmember=n.members, leadtimemax=216, storefreq='daily', output = 'lonlat', latmin = lat.min, latmax = lat.max, lonmin = lon.min, lonmax = lon.max)$mod #, grid=my.grid, method='bilinear') +## } + +if(fields.name == ECMWF_monthly.name){ # Load 6-hourly var data in the monthly forecast case: + sdates <- weekly.seq(forecast.year,mes,day) + vareuFull <-array(NA,c(length(sdates)*(year.end-year.start+1), n.pos.lat, n.pos.lon)) + + for (startdate in 1:length(sdates)){ + var <- Load(var = var.name[var.num], exp = NULL, obs = list(var.data), paste0(year.start:year.end, substr(sdates[startdate],5,6), substr(sdates[startdate],7,8) ), storefreq = 'daily', leadtimemin=leadtime*4-3, leadtimemax=leadtime*4, output = 'lonlat', latmin = lat.min, latmax = lat.max, lonmin = lon.min, lonmax = lon.max) + temp <- apply(var$obs, c(1,3,5,6), mean, na.rm=T) + vareuFull[(1+(startdate-1)*(year.end-year.start+1)):(startdate*(year.end-year.start+1)),,] <- temp + rm(temp,var) + gc() + } + +} + +#my.grid<-paste0('r',n.lon,'x',n.lat) +#coord <- Load(var = 'sfcWind', exp = list(ECMWF_monthly), obs = NULL, sdates=paste0(2013,'0102'), leadtimemin = 1, leadtimemax=1, output = 'lonlat', nprocs=1, grid=my.grid, method='bilinear') + +#save.image(file=paste0(workdir,"/weather_regimes_",fields.name,"_",var.name[var.num],"_",year.start,"-",year.end,".RData")) +#load(file=paste0(workdir,"/weather_regimes_",fields.name,"_",var.name[var.num],"_",year.start,"-",year.end,".RData")) + +for(p in WR.period){ # 1-12: Jan-Dec; 13-16: Winter-Spring-Summer-Autumn; 17: Year + + if(fields.name == rean.name){ + cluster.sequence <- my.cluster[[p]]$cluster + + if(sequences){ # it works only for rean data!!! + # for each of the 4 clusters, remove the days not belonging to sequences of at least 5 days: + # warning: first 4 days and last 4 days are not take into account y the algorithm and are treated as not belonging to any sequence (sequ=FALSE) + wrdiff <- diff(my.cluster[[p]]$cluster) + + sequ <- rep(FALSE, n.days.period[[p]]) + for(i in 5:(n.days.period[[p]]-5)){ + sequ[i] <- TRUE + if(wrdiff[i] != 0 & wrdiff[i+1] != 0) sequ[i] = FALSE + if(wrdiff[i] != 0 & wrdiff[i+2] != 0) sequ[i] = FALSE + if(wrdiff[i] != 0 & wrdiff[i+3] != 0) sequ[i] = FALSE + if(wrdiff[i] != 0 & wrdiff[i+4] != 0) sequ[i] = FALSE + if(wrdiff[i-1] != 0 & wrdiff[i] != 0) sequ[i] = FALSE + if(wrdiff[i-1] != 0 & wrdiff[i+1] != 0) sequ[i] = FALSE + if(wrdiff[i-1] != 0 & wrdiff[i+2] != 0) sequ[i] = FALSE + if(wrdiff[i-1] != 0 & wrdiff[i+3] != 0) sequ[i] = FALSE + if(wrdiff[i-2] != 0 & wrdiff[i] != 0) sequ[i] = FALSE + if(wrdiff[i-2] != 0 & wrdiff[i+1] != 0) sequ[i] = FALSE + if(wrdiff[i-2] != 0 & wrdiff[i+2] != 0) sequ[i] = FALSE + if(wrdiff[i-3] != 0 & wrdiff[i] != 0) sequ[i] = FALSE + if(wrdiff[i-3] != 0 & wrdiff[i+1] != 0) sequ[i] = FALSE + if(wrdiff[i-4] != 0 & wrdiff[i] != 0) sequ[i] = FALSE + } # close for loop on i + + # sequence of daily cluster numbers without the days that doesn't belong to sequences of 5 or more days: + cluster.sequence <- my.cluster[[p]]$cluster * sequ + rm(wrdiff, sequ) + } + + # Replace the days belonging to a regime with the days belonging to a sequence of at least 5 days of that regime: + wr1 <- which(cluster.sequence == 1) + wr2 <- which(cluster.sequence == 2) + wr3 <- which(cluster.sequence == 3) + wr4 <- which(cluster.sequence == 4) + + # yearly frequency of each regime: + wr1y <- wr2y <- wr3y <-wr4y <- c() + for(y in year.start:year.end){ + # for both reanalysis and S4, the time order is always: year1day1, year1day2, ..., year1day31, year2day1, year2day2, ..., year2day28, etc, + wr1y[y-year.start+1] <- length(which(cluster.sequence[(y-year.start)*period.length[[p]]+(1:period.length[[p]])] == 1)) + wr2y[y-year.start+1] <- length(which(cluster.sequence[(y-year.start)*period.length[[p]]+(1:period.length[[p]])] == 2)) + wr3y[y-year.start+1] <- length(which(cluster.sequence[(y-year.start)*period.length[[p]]+(1:period.length[[p]])] == 3)) + wr4y[y-year.start+1] <- length(which(cluster.sequence[(y-year.start)*period.length[[p]]+(1:period.length[[p]])] == 4)) + } + + # convert to frequencies in %: + wr1y <- wr1y/period.length[[p]] + wr2y <- wr2y/period.length[[p]] + wr3y <- wr3y/period.length[[p]] + wr4y <- wr4y/period.length[[p]] + + pslPeriod <- psleuFull[days.period[[p]],,] + pslPeriodClim <- apply(pslPeriod, c(2,3), mean, na.rm=T) + pslPeriodClim2 <- InsertDim(pslPeriodClim, 1, n.days.period[[p]]) + pslPeriodAnom <- pslPeriod - pslPeriodClim2 + + rm(pslPeriod, pslPeriodClim, pslPeriodClim2) + gc() + + pslwr1 <- pslPeriodAnom[wr1,,] + pslwr2 <- pslPeriodAnom[wr2,,] + pslwr3 <- pslPeriodAnom[wr3,,] + pslwr4 <- pslPeriodAnom[wr4,,] + + rm(pslPeriodAnom) + gc() + + pslwr1mean <- apply(pslwr1,c(2,3),mean,na.rm=T) + pslwr2mean <- apply(pslwr2,c(2,3),mean,na.rm=T) + pslwr3mean <- apply(pslwr3,c(2,3),mean,na.rm=T) + pslwr4mean <- apply(pslwr4,c(2,3),mean,na.rm=T) + rm(pslwr1,pslwr2,pslwr3,pslwr4) + + # in the reanalysis case, we also measure the impact maps: + + # similar selection of above, but for var instead of psl: + varPeriod <- vareuFull[days.period[[p]],,] # select only var data during the chosen period + + varPeriodClim <- apply(varPeriod, c(2,3), mean, na.rm=T) + varPeriodClim2 <- InsertDim(varPeriodClim, 1, n.days.period[[p]]) + varPeriodAnom <- varPeriod - varPeriodClim2 + rm(varPeriod, varPeriodClim, varPeriodClim2) + gc() + + #PlotEquiMap2(varPeriodClim[,EU], lon[EU], lat, filled.continents = FALSE, cols=my.cols.var, intxlon=10, intylat=10, cex.lab=1.5) # plot the climatology of the period + varPeriodAnom1 <- varPeriodAnom[wr1,,] + varPeriodAnom2 <- varPeriodAnom[wr2,,] + varPeriodAnom3 <- varPeriodAnom[wr3,,] + varPeriodAnom4 <- varPeriodAnom[wr4,,] + + varPeriodAnom1mean <- apply(varPeriodAnom1,c(2,3),mean,na.rm=T) + varPeriodAnom2mean <- apply(varPeriodAnom2,c(2,3),mean,na.rm=T) + varPeriodAnom3mean <- apply(varPeriodAnom3,c(2,3),mean,na.rm=T) + varPeriodAnom4mean <- apply(varPeriodAnom4,c(2,3),mean,na.rm=T) + + varPeriodAnomBoth1 <- abind(varPeriodAnom, varPeriodAnom1, along = 1) + pvalue1 <- apply(varPeriodAnomBoth1, c(2,3), function(x) t.test(x[1:n.days.period[[p]]],x[(n.days.period[[p]]+1):length(x)])$p.value) + rm(varPeriodAnomBoth1, varPeriodAnom1) + gc() + + varPeriodAnomBoth2 <- abind(varPeriodAnom, varPeriodAnom2, along = 1) + pvalue2 <- apply(varPeriodAnomBoth2, c(2,3), function(x) t.test(x[1:n.days.period[[p]]],x[(n.days.period[[p]]+1):length(x)])$p.value) + rm(varPeriodAnomBoth2, varPeriodAnom2) + gc() + + varPeriodAnomBoth3 <- abind(varPeriodAnom, varPeriodAnom3, along = 1) + pvalue3 <- apply(varPeriodAnomBoth3, c(2,3), function(x) t.test(x[1:n.days.period[[p]]],x[(n.days.period[[p]]+1):length(x)])$p.value) + rm(varPeriodAnomBoth3, varPeriodAnom3) + gc() + + varPeriodAnomBoth4 <- abind(varPeriodAnom, varPeriodAnom4, along = 1) + pvalue4 <- apply(varPeriodAnomBoth4, c(2,3), function(x) t.test(x[1:n.days.period[[p]]],x[(n.days.period[[p]]+1):length(x)])$p.value) + rm(varPeriodAnomBoth4, varPeriodAnom4) + + rm(varPeriodAnom) + gc() + + # save all the data necessary to redraw the graphs when we know the right regime: + save(lon, lon.max, lat, psl, psl.name, my.period, p, workdir,rean.name,fields.name,var.num,var.name,var.name.full,var.unit,year.start,year.end,WR.period,pslwr1mean,pslwr2mean,pslwr3mean,pslwr4mean, varPeriodAnom1mean, varPeriodAnom2mean, varPeriodAnom3mean,varPeriodAnom4mean,wr1y,wr2y,wr3y,wr4y,pvalue1,pvalue2,pvalue3,pvalue4,cluster.sequence,file=paste0(workdir,"/",fields.name,"_",var.name[var.num],"_",my.period[p],"_mapdata.RData")) + + rm(pvalue1,pvalue2,pvalue3,pvalue4) + rm(pslwr1mean,pslwr2mean,pslwr3mean,pslwr4mean) + rm(varPeriodAnom1mean,varPeriodAnom2mean,varPeriodAnom3mean,varPeriodAnom4mean) + gc() + + } + + if(fields.name == ECMWF_S4.name){ + + #load(file=paste0(workdir,"/",fields.name,"_",my.period[p],"_leadmonth_",lead.month,"_ClusterData.RData")) + cluster.sequence <- my.cluster[[p]]$cluster #as.vector(my.cluster.array[[p]]) + + wr1 <- which(cluster.sequence == 1) + wr2 <- which(cluster.sequence == 2) + wr3 <- which(cluster.sequence == 3) + wr4 <- which(cluster.sequence == 4) + + wr1y <- apply(my.cluster.array[[p]] == 1, 2, sum) + wr2y <- apply(my.cluster.array[[p]] == 2, 2, sum) + wr3y <- apply(my.cluster.array[[p]] == 3, 2, sum) + wr4y <- apply(my.cluster.array[[p]] == 4, 2, sum) + + # convert to frequencies in %: + wr1y <- wr1y/(n.leadtimes*n.members) + wr2y <- wr2y/(n.leadtimes*n.members) + wr3y <- wr3y/(n.leadtimes*n.members) + wr4y <- wr4y/(n.leadtimes*n.members) + + # in this case, must use psl.melted instead of psleuFull. Both are already in anomalies: + # (psl.melted depends on the startdate and leadtime, psleuFull no) + gc() + pslmat <- unname(acast(psl.melted, Member + StartYear + LeadDay ~ Lat ~ Lon)) + #pslmat <- unname(acast(melt(pslPeriod[1,1:2,,,,, drop=FALSE],varnames=c("Exp","Member","StartYear","LeadDay","Lat","Lon")), Member ~ StartYear + LeadDay ~ Lat ~ Lon)) + + + pslwr1 <- pslmat[wr1,,, drop=F] + pslwr2 <- pslmat[wr2,,, drop=F] + pslwr3 <- pslmat[wr3,,, drop=F] + pslwr4 <- pslmat[wr4,,, drop=F] + + pslwr1mean <- apply(pslwr1, c(2,3), mean, na.rm=T) + pslwr2mean <- apply(pslwr2, c(2,3), mean, na.rm=T) + pslwr3mean <- apply(pslwr3, c(2,3), mean, na.rm=T) + pslwr4mean <- apply(pslwr4, c(2,3), mean, na.rm=T) + + rm(pslwr1,pslwr2,pslwr3,pslwr4) + rm(pslmat) + gc() + + # save all the data necessary to draw the graphs (but not the impact maps) + save(lon, lon.max, lat, psl, psl.name, my.period, p, workdir,rean.name,fields.name,var.num,var.name,var.name.full,var.unit,year.start,year.end, years, n.years, WR.period, pslwr1mean, pslwr2mean,pslwr3mean,pslwr4mean,wr1y,wr2y,wr3y,wr4y,n.leadtimes,cluster.sequence,file=paste0(workdir,"/",fields.name,"_",var.name[var.num],"_",my.period[p],"_leadmonth_",lead.month,"_mapdata.RData")) + + rm(pslwr1mean,pslwr2mean,pslwr3mean,pslwr4mean) + gc() + + #pslwr1meanPartial <- apply(pslwr1, c(1,3,4), mean, na.rm=T) + #pslwr2meanPartial <- apply(pslwr2, c(1,3,4), mean, na.rm=T) + #pslwr3meanPartial <- apply(pslwr3, c(1,3,4), mean, na.rm=T) + #pslwr4meanPartial <- apply(pslwr4, c(1,3,4), mean, na.rm=T) + + #PlotEquiMap(rescale(pslwr1meanPartial[1,,],my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2) + #PlotEquiMap(rescale(pslwr1meanPartial[2,,],my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2) + + #n=0 + #for(sy in 1:32) { + # for(ld in 1:28){ + # if(my.cluster.array[[p]][ld,sy] == 1){ + # n=n+1 + # if(n == 1) {temp <- pslPeriod[1,2,sy,ld,,]} + # temp <- temp + pslPeriod[1,2,sy,ld,,] + # } + # } + #} + #PlotEquiMap(rescale(temp/n,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, drawleg=F) + + #pslwr1meanPartial <- apply(pslmat[,wr1,,,drop=F], c(1,3,4), mean, na.rm=T) + #pslwr2meanPartial <- apply(pslmat[,wr2,,,drop=F], c(1,3,4), mean, na.rm=T) + #pslwr3meanPartial <- apply(pslmat[,wr3,,,drop=F], c(1,3,4), mean, na.rm=T) + #pslwr4meanPartial <- apply(pslmat[,wr4,,,drop=F], c(1,3,4), mean, na.rm=T) + + } + + # for the monthly system, the time order is always: year1day1, year2day1, ... , yearNday1, year1day2, year2day2, ..., yearNday2, etc.: + if(fields.name == ECMWF_monthly.name) { + if(fields.name == ECMWF_monthly.name){ + my.startdates.period <- months.period(forecast.year,mes,day,p) + + #if(p == 13) my.startdates.period <- my.startdates.period[-2] # remove the second startdate because it is broken + + days.period <- period.length <- list() + for(startdate in my.startdates.period){ + days.period[[p]] <- c(days.period[[p]], (1+(startdate-1)*(year.end-year.start+1)):(startdate*(year.end-year.start+1))) + } + period.length[[p]] <- length(my.startdates.period) # number of Thusdays in the period + } + + wr1y <- wr2y <- wr3y <-wr4y <- c() + wr1y[y-year.start+1] <- length(which(cluster.sequence[seq((y-year.start+1),length(cluster.sequence), n.years)] == 1)) + wr2y[y-year.start+1] <- length(which(cluster.sequence[seq((y-year.start+1),length(cluster.sequence), n.years)] == 2)) + wr3y[y-year.start+1] <- length(which(cluster.sequence[seq((y-year.start+1),length(cluster.sequence), n.years)] == 3)) + wr4y[y-year.start+1] <- length(which(cluster.sequence[seq((y-year.start+1),length(cluster.sequence), n.years)] == 4)) + } + +} # close the for loop on 'p' + + + + + diff --git a/old/weather_regimes_v33.R~ b/old/weather_regimes_v33.R~ new file mode 100644 index 0000000000000000000000000000000000000000..1be9ca69f4dca2e5cd784e71504a839f6b92098c --- /dev/null +++ b/old/weather_regimes_v33.R~ @@ -0,0 +1,662 @@ + +# Creation: 28/6/2016 +# Author: Nicola Cortesi +# Aim: To compute the four Euro-Atlantic weather regimes from a reanalysis or from a forecast system +# I/O: input must be in a format compatible with Load(); 1 output .RData file is created in the 'workdir' folder. +# Assumption: its output is need by the script weather_regimes_maps.R +# Branch: weather_regimes + +library(s2dverification) # for the function Load() +library(abind) +library(reshape2) +#library(TTR) +source('/home/Earth/ncortesi/Downloads/scripts/Rfunctions.R') # for the calendar functions +workdir <- "/scratch/Earth/ncortesi/RESILIENCE/Regimes" # working dir + +# available reanalysis for the geopotential (g500) or the geopotential height (z500 = g500/9.81) and for var data: +ERAint <- '/esnas/reconstructions/ecmwf/eraint/$STORE_FREQ$_mean/$VAR_NAME$_f6h/$VAR_NAME$_$YEAR$$MONTH$.nc' +ERAint.name <- "ERA-Interim" + +JRA55 <- '/esnas/reconstructions/jma/jra-55/$STORE_FREQ$_mean/$VAR_NAME$_f6h/$VAR_NAME$_$YEAR$$MONTH$.nc' +JRA55.name <- "JRA-55" + +rean <- ERAint #JRA55 #ERAint # choose one of the two above reanalysis from where to load the input psl data + +# available forecast systems for the psl and var data: +#ECMWF_S4 <- list(path = paste0('/esnas/exp/ECMWF/seasonal/0001/s004/m001/$STORE_FREQ$_mean/$VAR_NAME$_f6h/$VAR_NAME$_$START_DATE$.nc')) +#ECMWF_S4 <- '/esnas/exp/ECMWF/seasonal/0001/s004/m001/$STORE_FREQ$_mean/psl_f6h/psl_$START_DATE$.nc' +ECMWF_S4 <- '/esnas/exp/ecmwf/system4_m1/$STORE_FREQ$_mean/$VAR_NAME$_f6h/$VAR_NAME$_$START_DATE$.nc' +ECMWF_S4.name <- "ECMWF-S4" +member.name <- "ensemble" # name of the dimension with the ensemble members inside the netCDF files of S4 + +ECMWF_monthly <- '/esnas/exp/ECMWF/monthly/ensforhc/6hourly/$VAR_NAME$/2014$MONTH$$DAY$00/$VAR_NAME$_$START_DATE$00.nc' +ECMWF_monthly.name <- "ECMWF-Monthly" + +forecast <- ECMWF_S4 + +fields <- forecast #rean # put 'rean' to load pressure fields and var from reanalisis, put 'forecast' to load them from forecast systems + +psl <- "psl" #"g500" # pressure variable to use from the chosen reanalysis +psl.name <- "SLP" # "Z500" # the name to show in the maps + +year.start <- 1981 #1982 #1981 #1994 #1979 #1981 +year.end <- 2013 #2013 #2010 + +res <- 0.75 # set the resolution you want to interpolate the psl and var data when loading it (i.e: set to 0.75 to use the same res of ERA-Interim) + # interpolation method is BILINEAR. + +PCA <- FALSE # set it to TRUE if you want to apply the PCA before the k-means cluster analysis, FALSE otherwise +variance.explained <- 0.8 # the user can set here the minimum % of variance explained by the PCs (typically 0.8 which is equal to 80%) + +lat.weighting=FALSE # set it to true to weight psl data on latitude before applying the cluster analysis +sequences=FALSE # set it to TRUE if you want to select only days beloning to sequences of at least 5 consecutive days belonging to the same regime. + +# Coordinates of the box enclosing the study region (the Northern Atlantic in this case): +lat.max <- 81 #81 #80 #70 +lat.min <- 27 #30 #20 #30 +lon.max <- 45 #36 #30 #40 +lon.min <- 274.5 #274.5 #270 #280 # put a positive number here because the geopotential has only positive values of longitud! + +var.num <- 1 # Choose a variable. 1: sfcWind 2: tas + +var.name <- c("sfcWind","tas") # name of the 'predictand' variable of the chosen reanalysis +var.name.full <- c("Wind Speed","Temperature") # full name of the 'predictand' variable to put in the title of the graphs +var.unit <- c("m/s", "ºC") # unit of measure of var (for drawing color scales) + +# Only for Reanalysis:: +WR.period <- 1:12 #13:16 # 1-12: Jan-Dec; 13-16: Winter-Spring-Summer-Autumn [bypassed by the optional argument of the script] + +# Only for Seasonal forecasts: +startM <- 2 # start month (1=January, 12=December) [bypassed by the optional arguments of the script] +leadM <- 1 # select only the data of one lead month: [bypassed by the optional arguments of the script] +n.members <- 15 # number of members of the forecast system to load +n.leadtimes <- 216 # number of leadtimes of the forecast system to load + +# Only for Monthly forecasts: +#leadtime <- 1:7 #5:11 #1 # if fields=forecast, set the forecast time (in days) you want to compute the weather regimes. +#startdate.shift <- 1 # if leadtime=5:11, it's better to shift all startdates of the season 1 week in the past, to fit the exact season of obs.data + # if leadtime=12:18, it's better to shift all startdates of the season 2 weeks in the past, and so on. +#forecast.year <- year.end+1 # if fields=forecast, set the forecast year (it is usually the year after year.end) +#mes=1 # if fields=forecast, set the month of the first startdate of the forecast year +#day=2 # if fields=forecast, set the day of the first startdate of the forecast year + +############################################################################################################################# +#ECMWF_monthly <- list(path = paste0('/esnas/exp/ECMWF/monthly/ensforhc/6hourly/psl/2014010200/1994_010200.nc')) +#ECMWF_monthly <- list(path = paste0('/esnas/exp/ECMWF/monthly/ensforhc/6hourly/$VAR_NAME$/2014$MONTH$$DAY$00/$VAR_NAME$_$START_DATE$00.nc')) +rean.name <- unname(ifelse(rean == ERAint, ERAint.name, JRA55.name)) +forecast.name <- unname(ifelse(forecast == ECMWF_S4, ECMWF_S4.name, ECMWF_monthly.name)) +fields.name <- unname(ifelse(fields == rean, rean.name, forecast.name)) +if(fields.name == forecast.name) WR.period = 1 +n.years <- year.end - year.start + 1 + +# in case the script is run with two arguments, they are assigned to the two below variables: +script.arg <- as.integer(commandArgs(TRUE)) + +if(length(script.arg) == 2){ + start.month <- script.arg[1] + lead.month <- script.arg[2] + WR.period <- start.month +} else { + start.month <- startM + lead.month <- leadM + WR.period <- start.month +} + +# in case the script is run with 1 argument, it is assumed you are using a reanalysis: +if(length(script.arg) == 1) WR.period <- script.arg[1] + +var.data <- fields # dataset from which to load the input var data (reanalysis or forecasts) +var.data.name <- fields.name #ECMWF_monthly.name # ERAint.name + +if(lat.min >= lat.max) stop("lat.min cannot be greater than lat.max") + +days.period <- n.days.period <- period.length <- list() +for (pp in 1:17){ + days.period[[pp]] <- NA + for(y in year.start:year.end) days.period[[pp]] <- c(days.period[[pp]], n.days.in.a.future.year(year.start, y) + pos.period(y,pp)) + days.period[[pp]] <- days.period[[pp]][-1] # remove the NA at the begin that was introduced only to be able to execute the above command + # number of days belonging to that period from year.start to year.end: + n.days.period[[pp]] <- length(days.period[[pp]]) + # Number of days belonging to that period in a single year: + period.length[[pp]] <- n.days.in.a.period(pp,1999) #+ ifelse(period==13 | period==2, 1, 0) # using year 1999 introduce a small error in winter season length because of bisestile years +} + +# Create a regular lat/lon grid to interpolate the data to the chosen res: (Notice that the regular grid is always centered at lat=0 and lon=0!) +n.lon.grid <- 360/res +n.lat.grid <- (180/res)+1 +my.grid <- paste0('r',n.lon.grid,'x',n.lat.grid) +#domain<-list() +#domain$lon <- seq(0,359.999999999,res) +#domain$lat <- c(rev(seq(0,90,res)),seq(res[1],-90,-res)) + +# load only 1 day of pressure data to detect the minimum and maximum lat and lon values which identify only the North Atlantic area: +if(fields.name == rean.name) domain <- Load(var = psl, exp = NULL, obs = list(list(path=fields)), '19990101', storefreq = 'daily', leadtimemax = 1, output = 'lonlat', nprocs=1) +# in the case of S4, we also interpolate it to the same resolution of the reanalysis: +# (notice that if the S4 grid has the same grid of the chosen grid (typically ERA-Interim), Load() leaves the S4 grid unchanged) +if(fields.name == ECMWF_S4.name) domain <- Load(var = psl, exp = list(list(path=fields)), obs=NULL, sdates=paste0(year.start,'0101'), storefreq = 'daily', dimnames=list(member=member.name), leadtimemax=1, nmember=1, output = 'lonlat', nprocs=1, grid=my.grid, method='bilinear') + +#domain <- Load(var = "psl", exp = list(list(path="/esnas/exp/ecmwf/system4_m1/$STORE_FREQ$_mean/$VAR_NAME$_f6h/$VAR_NAME$_$START_DATE$.nc")), obs=NULL, sdates='19820101', storefreq = 'daily', leadtimemax=1, nmember=1, output = 'lonlat', grid='r480x241', lonmax=100) + +if(fields.name == ECMWF_monthly.name) domain <- Load(var = psl, exp = NULL, obs = list(list(path=fields)), paste0(year.start,'0102'), storefreq = 'daily', leadtimemax = 1, output = 'lonlat', nprocs=1) + +n.lon <- length(domain$lon) +n.lat <- length(domain$lat) + +pos.lat.max <- which(lat.max - round(domain$lat,4) >= 0)[1] # select the first latitude of the domain below the maximum latitude +pos.lat.min <- tail(which(round(domain$lat,4) - lat.min >= 0),1) # select the last latitude of the domain over the minumum latitude +pos.lon.max <- tail(which(lon.max - round(domain$lon,4) >= 0),1) +pos.lon.min <- which(round(domain$lon,4) - lon.min >= 0)[1] + +pos.lat <- if(pos.lat.min < pos.lat.max) {pos.lat.min:pos.lat.max} else {pos.lat.max:pos.lat.min} +pos.lon <- c(1:pos.lon.max,pos.lon.min:length(domain$lon)) # beware that it only consider the case where the study area cross the meridian of Greenwhich! +n.pos.lat <- length(pos.lat) +n.pos.lon <- length(pos.lon) + +lat <- domain$lat[pos.lat] # lat values of chosen area only (it is smaller than the whole spatial domain loaded by the data) +if (lat[2] < lat[1]) lat <- rev(lat) # reverse lat values if they are in decreasing order +lon <- domain$lon[pos.lon] # lon values of chosen area only + +#lat.min.area <- domain$lat[pos.lat.min] # min and max lat/lon of the chosen area only (same as lat.max, but its values correspond to actual values in the lat vector) +#lat.max.area <- domain$lat[pos.lat.max] +#lon.min.area <- domain$lon[pos.lon.min] +#lon.max.area <- domain$lon[pos.lon.max] + +#rm(domain) + +# Load psl data (interpolating it to the same reanlaysis grid, if necessary): +if(fields.name == rean.name){ # Load daily psl data in the reanalysis case: + psleuFull <-array(NA,c(n.days.in.a.yearly.period(year.start,year.end), n.pos.lat, n.pos.lon)) + + for (y in year.start:year.end){ + var <- Load(var = psl, exp = NULL, obs = list(list(path=fields)), paste0(y,'0101'), storefreq = 'daily', leadtimemax = n.days.in.a.year(y), output = 'lonlat', + latmin = lat.min, latmax = lat.max, lonmin = lon.min, lonmax = lon.max) + psleuFull[seq.days.in.a.future.year(year.start, y),,] <- var$obs + rm(var) + gc() + } +} + +if(fields.name == ECMWF_S4.name){ # in this case, we can load only the data for 1 month at time (since each month needs ~3 GB of memory) + if(start.month <= 9) {start.month.char <- paste0("0",as.character(start.month))} else {start.month.char <- start.month} + + #psleuFull <- Load(var = psl, exp = list(list(path=fields)), obs = NULL, sdates=paste0(years, start.month.char,'01'), dimnames=list(member=member.name), nmember=n.members, leadtimemax=n.leadtimes, storefreq='daily', output = 'lonlat', latmin = lat.min, latmax = lat.max, lonmin = lon.min, lonmax = lon.max, nprocs=1, grid=my.grid, method='bilinear')$mod # not working + + fields2 <- gsub("\\$STORE_FREQ\\$","daily",fields) + fields3 <- gsub("\\$VAR_NAME\\$",psl,fields2) + + years <- c() + for(y in year.start:year.end){ + fields4 <- gsub("\\$START_DATE\\$",paste0(y, start.month.char,'01'),fields3) + + num.lead <- as.integer(system(paste0("ncks -m ",fields4,"| grep -E -i 'psl size' | cut -d '=' -f2 | cut -d '*' -f1"), intern=T)) + num.memb <- as.integer(system(paste0("ncks -m ",fields4,"| grep -E -i 'psl size' | cut -d '=' -f2 | cut -d '*' -f2"), intern=T)) + num.lat <- as.integer(system(paste0("ncks -m ",fields4,"| grep -E -i 'psl size' | cut -d '=' -f2 | cut -d '*' -f3"), intern=T)) + num.lon <- as.integer(system(paste0("ncks -m ",fields4,"| grep -E -i 'psl size' | cut -d '=' -f2 | cut -d '*' -f4"), intern=T)) + + if(num.lead == n.leadtimes && num.memb >= n.members && num.lat == 181 && num.lon == 360) years <- c(years,y) + } + + n.years <- length(years) + + psleuFull <- Load(var = psl, exp = list(list(path=fields)), obs = NULL, sdates=paste0(years, '0201'), dimnames=list(member=member.name), nmember=n.members, leadtimemax=n.leadtimes, storefreq='daily', output = 'lonlat', latmin = lat.min, latmax = lat.max, lonmin = lon.min, lonmax = lon.max, grid=my.grid, method='bilinear', nprocs=1)$mod + +} + +if(fields.name == ECMWF_monthly.name){ + # Load 6-hourly psl data in the forecast case: + psleuFull <-array(NA, c(n.sdates*n.years, n.leadtimes, n.pos.lat, n.pos.lon)) # daily order: 19940102 19950102 ... 20130102 19940109 19950109 ... 20130109 ... + n.leadtimes <- length(leadtime) + sdates <- weekly.seq(forecast.year,mes,day) + n.sdates <- length(sdates) + + for (startdate in 1:n.sdates){ + for(lday in leadtime){ + var <- Load(var = psl, exp = NULL, obs = list(list(path=fields)), paste0(year.start:year.end, substr(sdates[startdate],5,6), substr(sdates[startdate],7,8) ), storefreq = 'daily', leadtimemin = lday*4-3, leadtimemax = lday*4, output = 'lonlat', latmin = lat.min, latmax = lat.max, lonmin = lon.min, lonmax = lon.max) + + mean.lday <- apply(var$obs, c(3,5,6), mean, na.rm=T) + rm(var) + gc() + + psleuFull[(1+(startdate-1)*n.years):(startdate*n.years), lday-leadtime[1]+1,,] <- mean.lday + rm(mean.lday) + gc() + } + } +} + +if(psl=="g500") psleuFull <- psleuFull/9.81 # convert geopotential to geopotential height (in m) +if(psl=="psl") psleuFull <- psleuFull/100 # convert MSLP in Pascal to MSLP in hPa +gc() + +#save.image(file=paste0(workdir,"/weather_regimes_",fields.name,"_",year.start,"-",year.end,".RData")) +#load(file=paste0(workdir,"/weather_regimes_",fields.name,"_",year.start,"-",year.end,".RData")) + +# compute the cluster analysis: +my.PCA <- list() +my.cluster <- list() +my.cluster.array <- list() +tot.variance <- list() +n.pcs <- my.cluster <- c() + +#WR.period=1 # for the debug +for(p in WR.period){ + # Select only the data of the month/season we want: + if(fields.name == rean.name){ + pslPeriod <- psleuFull[days.period[[p]],,] # select only days in the chosen period (i.e: winter) + + # weight the pressure fields based on latitude (apply it to anomalies, not to absolute values!): + if(lat.weighting){ + lat.weighted <- cos(lat*pi/180)^0.5 + lat.weighted.array <- InsertDim(InsertDim(lat.weighted,2,n.pos.lon), 1, n.days.period[[p]]) + psl.kmeans <- pslPeriod * lat.weighted.array + rm(lat.weighted.array) + gc() + } + + # select period data from psleuFull to use it as input of the cluster analysis: + psl.kmeans <- pslPeriod + dim(psl.kmeans) <- c(n.days.period[[p]], n.pos.lat*n.pos.lon) # convert array in a matrix! + rm(pslPeriod, pslPeriod.weighted) + } + + # in case of S4 we don't need to select anything because we already loaded only 1 month of data! + if(fields.name == ECMWF_S4.name){ + + # convert psl in daily anomalies with the LOESS filter: + pslPeriodClim <- apply(psleuFull, c(1,4,5,6), mean, na.rm=T) + pslPeriodClimLoess <- pslPeriodClim + for(i in n.pos.lat){ + for(j in n.pos.lon){ + my.data <- data.frame(ens.mean=pslPeriodClim[1,,i,j], day=1:n.leadtimes) + my.loess <- loess(ens.mean ~ day, my.data, span=0.35) + pslPeriodClimLoess[1,,i,j] <- predict(my.loess) + } + } + + rm(pslPeriodClim) + gc() + + pslPeriodClim2 <- InsertDim(InsertDim(pslPeriodClimLoess, 2, n.years), 2, n.members) + + ## s4.data <- data.frame(s4=pslPeriodClim[1,], day=1:216) + ## s4.loess <- loess(s4 ~ day, s4.data, span=0.35) + ## s4.pred <- predict(s4.loess) + + psleuFull <- psleuFull - pslPeriodClim2 + rm(pslPeriodClim2) + gc() + + ## # convert psl in daily anomalies computed with the 10-days leadtime window including the 9 days AFTER each day: + ## n.leadtimes <- dim(psleuFull)[4] + ## pslPeriodClim10 <- array(NA, c(1, n.members, n.years, n.leadtimes-9,n.pos.lat,n.pos.lon)) + ## for(year in 1:n.years){ + ## for(lead in 1:(n.leadtimes-9)){ + ## pslPeriodClim10[1,,year,lead,,] <- (psleuFull[1,,year,lead,,] + psleuFull[1,,year,lead+1,,] + psleuFull[1,,year,lead+2,,] + psleuFull[1,,year,lead+3,,] + psleuFull[1,,year,lead+4,,] + psleuFull[1,,year,lead+5,,] + psleuFull[1,,year,lead+6,,] + psleuFull[1,,year,lead+7,,] + psleuFull[1,,year,lead+8,,] + psleuFull[1,,year,lead+9,,])/10 + ## } + ## } + + ## pslPeriodClim10mean <- apply(pslPeriodClim10, c(1,4,5,6), mean, na.rm=T) + ## pslPeriodClim10mean2 <- InsertDim(InsertDim(pslPeriodClim10mean,2,n.years),2,n.members) + + ## psleuFull10 <- psleuFull[1,,,1:(n.leadtimes-9),,,drop=F] - pslPeriodClim10mean2 + + ## psleuFull <- psleuFull10 + ## rm(pslPeriodClim2, psleuFull10, pslPeriodClim10, pslPeriodClim10mean2, psleuFull10mean, pslPeriodClim10mean) + ## gc() + + # select data only for the startmonth and leadmonth to study: + chosen.month <- start.month + lead.month # find which month we want to select data + if(chosen.month > 12) chosen.month <- chosen.month - 12 + + i=0 + for(y in years){ + i=i+1 + leadtime.min <- 1 + n.days.in.a.monthly.period(start.month, chosen.month, y) - n.days.in.a.month(chosen.month, y) + leadtime.max <- leadtime.min + n.days.in.a.month(chosen.month, y) - 1 + if (n.days.in.a.month(chosen.month, y) == 29) leadtime.max <- leadtime.max - 1 # remove the 29th of February to have the same n. of elements for all years + int.leadtimes <- leadtime.min:leadtime.max + n.leadtimes <- leadtime.max - leadtime.min + 1 + + if(y == year.start) { + pslPeriod <- psleuFull[,,1,int.leadtimes,,,drop=FALSE] + } else { + pslPeriod <- unname(abind(pslPeriod, psleuFull[,,i,int.leadtimes,,,drop=FALSE], along=3)) + } + } + + + ## # if you didn't convert psl data in anomalies until now, you can convert psl data in monthly anomalies: + ## pslPeriodClim <- apply(pslPeriod, c(1,5,6), mean, na.rm=T) + ## pslPeriodClim2 <- InsertDim(InsertDim(InsertDim(pslPeriodClim,2,n.leadtimes), 2, n.years), 2, n.members) + + ## pslPeriod <- pslPeriod - pslPeriodClim2 + ## rm(pslPeriodClim, pslPeriodClim2) + ## gc() + + # note that the data order in psl.kmeans[,X] is: year1day1, year1day2, ... , year1day31, year2day1, year2day2, ... , year2day28 + psl.melted <- melt(pslPeriod[1,,,,,, drop=FALSE], varnames=c("Exp","Member","StartYear","LeadDay","Lat","Lon")) + psl.kmeans <- unname(acast(psl.melted, Member + StartYear + LeadDay ~ Lat + Lon)) + + #rm(pslPeriod) + gc() + } + + if(fields.name == ECMWF_monthly.name){ + my.startdates.period <- months.period(forecast.year,mes,day,p) # get which startdates belong to the chosen period + + days.period <- list() # get which days inside psleuFull belong to the chosen period + for(startdate in my.startdates.period){ + days.period[[p]] <- c(days.period[[p]], (1+(startdate-1)*n.years):(startdate*n.years)) + } + + # in winter you must remove the 2nd startdate (20140109) because its data is bad, as you can see with: plot(psl.kmeans[,1:2]) + # if(fields.name == forecast.name && period == 13) psl.kmeans[21:40,] <- NA + } + + + # compute the PCs or not: + if(PCA){ + my.seq <- seq(1, n.pos.lat*n.pos.lon, 9) # select only 1 point of 9 + pslcut <- psl.kmeans[,my.seq] + + my.PCA[[p]] <- princomp(pslcut,cor=FALSE) + tot.variance[[p]] <- head(cumsum(my.PCA[[p]]$sdev^2/sum(my.PCA[[p]]$sdev^2)),50) # check how many PCAs to keep basing on the sum of their expl. variance + n.pcs[p] <- head(as.numeric(which(tot.variance[[p]] > variance.explained)),1) # select only the pcs that explains at least 80% of variance + my.cluster[[p]] <- kmeans(my.PCA[[p]]$scores[,1:n.pcs[p]], centers=4, iter.max=100, nstart=30) # 4 is the number of clusters + rm(pslcut) + } else { # 4 is the number of clusters: + if(fields.name == rean.name){ + my.cluster[[p]] <- kmeans(psl.kmeans[which(!is.na(psl.kmeans[,1])),], centers=4, iter.max=100, nstart=30) + # save the time series output of the cluster analysis: + save(my.cluster, my.cluster.array, my.PCA, tot.variance, n.pcs, file=paste0(workdir,"/",fields.name,"_",my.period[p],"_ClusterData.RData")) + } + + if(fields.name == ECMWF_S4.name) { + my.cluster[[p]] <- kmeans(psl.kmeans, centers=4, iter.max=100, nstart=30, trace=TRUE) + my.cluster.array[[p]] <- array(my.cluster[[p]]$cluster, c(n.leadtimes, n.years, n.members)) # in reverse order compared to the definition of psl.kmeans + + # save the time series output of the cluster analysis: + save(my.cluster, my.cluster.array, my.PCA,tot.variance,n.pcs, file=paste0(workdir,"/",fields.name,"_",my.period[p],"_leadmonth_",lead.month,"_ClusterData.RData")) + + #plot(SMA(my.cluster[[p]]$centers[1,],201),type="l",col="gold",ylim=c(-20,20));lines(SMA(my.cluster[[p]]$centers[2,],201),type="l",col="red"); lines(SMA(my.cluster[[p]]$centers[3,],201),type="l",col="green");lines(SMA(my.cluster[[p]]$centers[4,],201),type="l",col="blue");lines(SMA(psl.kmeans[29,],201),type="l",col="gray");lines(rep(0,14410),type="l",col="black",lty=2) + } + } + + rm(psl.kmeans) + gc() + +} # close for on 'p' + +#save.image(file=paste0(workdir,"/weather_regimes_",fields.name,"_",year.start,"-",year.end,".RData")) +#load(file=paste0(workdir,"/weather_regimes_",fields.name,"_",year.start,"-",year.end,".RData")) + +# Load var data: +if(fields.name == rean.name){ # Load daily var data in the reanalysis case: + vareuFull <-array(NA,c(n.days.in.a.yearly.period(year.start,year.end), n.pos.lat, n.pos.lon)) + + for (y in year.start:year.end){ + var <- Load(var = var.name[var.num], exp = NULL, obs = list(var.data), paste0(y,'0101'), storefreq = 'daily', leadtimemax = n.days.in.a.year(y), output = 'lonlat', + latmin = lat.min, latmax = lat.max, lonmin = lon.min, lonmax = lon.max) + vareuFull[seq.days.in.a.future.year(year.start, y),,] <- var$obs + rm(var) + gc() + } +} + +## if(fields.name == ECMWF_S4.name) { +## if(start.month <= 9) {start.month.char <- paste0("0",as.character(start.month))} else {start.month.char <- start.month} + +## my.years <- year.start:year.end +## vareuFull <- Load(var = var.name[var.num], exp = list(list(path=fields)), obs = NULL, sdates=paste0(my.years, start.month.char,'01'), dimnames=list(member=member.name), nmember=n.members, leadtimemax=216, storefreq='daily', output = 'lonlat', latmin = lat.min, latmax = lat.max, lonmin = lon.min, lonmax = lon.max)$mod #, grid=my.grid, method='bilinear') +## } + +if(fields.name == ECMWF_monthly.name){ # Load 6-hourly var data in the monthly forecast case: + sdates <- weekly.seq(forecast.year,mes,day) + vareuFull <-array(NA,c(length(sdates)*(year.end-year.start+1), n.pos.lat, n.pos.lon)) + + for (startdate in 1:length(sdates)){ + var <- Load(var = var.name[var.num], exp = NULL, obs = list(var.data), paste0(year.start:year.end, substr(sdates[startdate],5,6), substr(sdates[startdate],7,8) ), storefreq = 'daily', leadtimemin=leadtime*4-3, leadtimemax=leadtime*4, output = 'lonlat', latmin = lat.min, latmax = lat.max, lonmin = lon.min, lonmax = lon.max) + temp <- apply(var$obs, c(1,3,5,6), mean, na.rm=T) + vareuFull[(1+(startdate-1)*(year.end-year.start+1)):(startdate*(year.end-year.start+1)),,] <- temp + rm(temp,var) + gc() + } + +} + +#my.grid<-paste0('r',n.lon,'x',n.lat) +#coord <- Load(var = 'sfcWind', exp = list(ECMWF_monthly), obs = NULL, sdates=paste0(2013,'0102'), leadtimemin = 1, leadtimemax=1, output = 'lonlat', nprocs=1, grid=my.grid, method='bilinear') + +#save.image(file=paste0(workdir,"/weather_regimes_",fields.name,"_",var.name[var.num],"_",year.start,"-",year.end,".RData")) +#load(file=paste0(workdir,"/weather_regimes_",fields.name,"_",var.name[var.num],"_",year.start,"-",year.end,".RData")) + +for(p in WR.period){ # 1-12: Jan-Dec; 13-16: Winter-Spring-Summer-Autumn; 17: Year + + if(fields.name == rean.name){ + cluster.sequence <- my.cluster[[p]]$cluster + + if(sequences){ # it works only for rean data!!! + # for each of the 4 clusters, remove the days not belonging to sequences of at least 5 days: + # warning: first 4 days and last 4 days are not take into account y the algorithm and are treated as not belonging to any sequence (sequ=FALSE) + wrdiff <- diff(my.cluster[[p]]$cluster) + + sequ <- rep(FALSE, n.days.period[[p]]) + for(i in 5:(n.days.period[[p]]-5)){ + sequ[i] <- TRUE + if(wrdiff[i] != 0 & wrdiff[i+1] != 0) sequ[i] = FALSE + if(wrdiff[i] != 0 & wrdiff[i+2] != 0) sequ[i] = FALSE + if(wrdiff[i] != 0 & wrdiff[i+3] != 0) sequ[i] = FALSE + if(wrdiff[i] != 0 & wrdiff[i+4] != 0) sequ[i] = FALSE + if(wrdiff[i-1] != 0 & wrdiff[i] != 0) sequ[i] = FALSE + if(wrdiff[i-1] != 0 & wrdiff[i+1] != 0) sequ[i] = FALSE + if(wrdiff[i-1] != 0 & wrdiff[i+2] != 0) sequ[i] = FALSE + if(wrdiff[i-1] != 0 & wrdiff[i+3] != 0) sequ[i] = FALSE + if(wrdiff[i-2] != 0 & wrdiff[i] != 0) sequ[i] = FALSE + if(wrdiff[i-2] != 0 & wrdiff[i+1] != 0) sequ[i] = FALSE + if(wrdiff[i-2] != 0 & wrdiff[i+2] != 0) sequ[i] = FALSE + if(wrdiff[i-3] != 0 & wrdiff[i] != 0) sequ[i] = FALSE + if(wrdiff[i-3] != 0 & wrdiff[i+1] != 0) sequ[i] = FALSE + if(wrdiff[i-4] != 0 & wrdiff[i] != 0) sequ[i] = FALSE + } # close for loop on i + + # sequence of daily cluster numbers without the days that doesn't belong to sequences of 5 or more days: + cluster.sequence <- my.cluster[[p]]$cluster * sequ + rm(wrdiff, sequ) + } + + # Replace the days belonging to a regime with the days belonging to a sequence of at least 5 days of that regime: + wr1 <- which(cluster.sequence == 1) + wr2 <- which(cluster.sequence == 2) + wr3 <- which(cluster.sequence == 3) + wr4 <- which(cluster.sequence == 4) + + # yearly frequency of each regime: + wr1y <- wr2y <- wr3y <-wr4y <- c() + for(y in year.start:year.end){ + # for both reanalysis and S4, the time order is always: year1day1, year1day2, ..., year1day31, year2day1, year2day2, ..., year2day28, etc, + wr1y[y-year.start+1] <- length(which(cluster.sequence[(y-year.start)*period.length[[p]]+(1:period.length[[p]])] == 1)) + wr2y[y-year.start+1] <- length(which(cluster.sequence[(y-year.start)*period.length[[p]]+(1:period.length[[p]])] == 2)) + wr3y[y-year.start+1] <- length(which(cluster.sequence[(y-year.start)*period.length[[p]]+(1:period.length[[p]])] == 3)) + wr4y[y-year.start+1] <- length(which(cluster.sequence[(y-year.start)*period.length[[p]]+(1:period.length[[p]])] == 4)) + } + + # convert to frequencies in %: + wr1y <- wr1y/period.length[[p]] + wr2y <- wr2y/period.length[[p]] + wr3y <- wr3y/period.length[[p]] + wr4y <- wr4y/period.length[[p]] + + pslPeriod <- psleuFull[days.period[[p]],,] + pslPeriodClim <- apply(pslPeriod, c(2,3), mean, na.rm=T) + pslPeriodClim2 <- InsertDim(pslPeriodClim, 1, n.days.period[[p]]) + pslPeriodAnom <- pslPeriod - pslPeriodClim2 + + rm(pslPeriod, pslPeriodClim, pslPeriodClim2) + gc() + + pslwr1 <- pslPeriodAnom[wr1,,] + pslwr2 <- pslPeriodAnom[wr2,,] + pslwr3 <- pslPeriodAnom[wr3,,] + pslwr4 <- pslPeriodAnom[wr4,,] + + rm(pslPeriodAnom) + gc() + + pslwr1mean <- apply(pslwr1,c(2,3),mean,na.rm=T) + pslwr2mean <- apply(pslwr2,c(2,3),mean,na.rm=T) + pslwr3mean <- apply(pslwr3,c(2,3),mean,na.rm=T) + pslwr4mean <- apply(pslwr4,c(2,3),mean,na.rm=T) + rm(pslwr1,pslwr2,pslwr3,pslwr4) + + # in the reanalysis case, we also measure the impact maps: + + # similar selection of above, but for var instead of psl: + varPeriod <- vareuFull[days.period[[p]],,] # select only var data during the chosen period + + varPeriodClim <- apply(varPeriod, c(2,3), mean, na.rm=T) + varPeriodClim2 <- InsertDim(varPeriodClim, 1, n.days.period[[p]]) + varPeriodAnom <- varPeriod - varPeriodClim2 + rm(varPeriod, varPeriodClim, varPeriodClim2) + gc() + + #PlotEquiMap2(varPeriodClim[,EU], lon[EU], lat, filled.continents = FALSE, cols=my.cols.var, intxlon=10, intylat=10, cex.lab=1.5) # plot the climatology of the period + varPeriodAnom1 <- varPeriodAnom[wr1,,] + varPeriodAnom2 <- varPeriodAnom[wr2,,] + varPeriodAnom3 <- varPeriodAnom[wr3,,] + varPeriodAnom4 <- varPeriodAnom[wr4,,] + + varPeriodAnom1mean <- apply(varPeriodAnom1,c(2,3),mean,na.rm=T) + varPeriodAnom2mean <- apply(varPeriodAnom2,c(2,3),mean,na.rm=T) + varPeriodAnom3mean <- apply(varPeriodAnom3,c(2,3),mean,na.rm=T) + varPeriodAnom4mean <- apply(varPeriodAnom4,c(2,3),mean,na.rm=T) + + varPeriodAnomBoth1 <- abind(varPeriodAnom, varPeriodAnom1, along = 1) + pvalue1 <- apply(varPeriodAnomBoth1, c(2,3), function(x) t.test(x[1:n.days.period[[p]]],x[(n.days.period[[p]]+1):length(x)])$p.value) + rm(varPeriodAnomBoth1, varPeriodAnom1) + gc() + + varPeriodAnomBoth2 <- abind(varPeriodAnom, varPeriodAnom2, along = 1) + pvalue2 <- apply(varPeriodAnomBoth2, c(2,3), function(x) t.test(x[1:n.days.period[[p]]],x[(n.days.period[[p]]+1):length(x)])$p.value) + rm(varPeriodAnomBoth2, varPeriodAnom2) + gc() + + varPeriodAnomBoth3 <- abind(varPeriodAnom, varPeriodAnom3, along = 1) + pvalue3 <- apply(varPeriodAnomBoth3, c(2,3), function(x) t.test(x[1:n.days.period[[p]]],x[(n.days.period[[p]]+1):length(x)])$p.value) + rm(varPeriodAnomBoth3, varPeriodAnom3) + gc() + + varPeriodAnomBoth4 <- abind(varPeriodAnom, varPeriodAnom4, along = 1) + pvalue4 <- apply(varPeriodAnomBoth4, c(2,3), function(x) t.test(x[1:n.days.period[[p]]],x[(n.days.period[[p]]+1):length(x)])$p.value) + rm(varPeriodAnomBoth4, varPeriodAnom4) + + rm(varPeriodAnom) + gc() + + # save all the data necessary to redraw the graphs when we know the right regime: + save(lon, lon.max, lat, psl, psl.name, my.period, p, workdir,rean.name,fields.name,var.num,var.name,var.name.full,var.unit,year.start,year.end,WR.period,pslwr1mean,pslwr2mean,pslwr3mean,pslwr4mean, varPeriodAnom1mean, varPeriodAnom2mean, varPeriodAnom3mean,varPeriodAnom4mean,wr1y,wr2y,wr3y,wr4y,pvalue1,pvalue2,pvalue3,pvalue4,cluster.sequence,file=paste0(workdir,"/",fields.name,"_",var.name[var.num],"_",my.period[p],"_mapdata.RData")) + + rm(pvalue1,pvalue2,pvalue3,pvalue4) + rm(pslwr1mean,pslwr2mean,pslwr3mean,pslwr4mean) + rm(varPeriodAnom1mean,varPeriodAnom2mean,varPeriodAnom3mean,varPeriodAnom4mean) + gc() + + } + + if(fields.name == ECMWF_S4.name){ + cluster.sequence <- my.cluster[[p]]$cluster #as.vector(my.cluster.array[[p]]) + + wr1 <- which(cluster.sequence == 1) + wr2 <- which(cluster.sequence == 2) + wr3 <- which(cluster.sequence == 3) + wr4 <- which(cluster.sequence == 4) + + wr1y <- apply(my.cluster.array[[p]] == 1, 2, sum) + wr2y <- apply(my.cluster.array[[p]] == 2, 2, sum) + wr3y <- apply(my.cluster.array[[p]] == 3, 2, sum) + wr4y <- apply(my.cluster.array[[p]] == 4, 2, sum) + + # convert to frequencies in %: + wr1y <- wr1y/(n.leadtimes*n.members) + wr2y <- wr2y/(n.leadtimes*n.members) + wr3y <- wr3y/(n.leadtimes*n.members) + wr4y <- wr4y/(n.leadtimes*n.members) + + # in this case, must use psl.melted instead of psleuFull. Both are already in anomalies: + # (psl.melted depends on the startdate and leadtime, psleuFull no) + pslmat <- unname(acast(psl.melted, Member + StartYear + LeadDay ~ Lat ~ Lon)) + #pslmat <- unname(acast(melt(pslPeriod[1,1:2,,,,, drop=FALSE],varnames=c("Exp","Member","StartYear","LeadDay","Lat","Lon")), Member ~ StartYear + LeadDay ~ Lat ~ Lon)) + + pslwr1 <- pslmat[wr1,,, drop=F] + pslwr2 <- pslmat[wr2,,, drop=F] + pslwr3 <- pslmat[wr3,,, drop=F] + pslwr4 <- pslmat[wr4,,, drop=F] + + pslwr1mean <- apply(pslwr1, c(2,3), mean, na.rm=T) + pslwr2mean <- apply(pslwr2, c(2,3), mean, na.rm=T) + pslwr3mean <- apply(pslwr3, c(2,3), mean, na.rm=T) + pslwr4mean <- apply(pslwr4, c(2,3), mean, na.rm=T) + + rm(pslwr1,pslwr2,pslwr3,pslwr4) + rm(pslmat) + gc() + + # save all the data necessary to draw the graphs (but not the impact maps) + save(lon, lon.max, lat, psl, psl.name, my.period, p, workdir,rean.name,fields.name,var.num,var.name,var.name.full,var.unit,year.start,year.end, WR.period, pslwr1mean, pslwr2mean,pslwr3mean,pslwr4mean,wr1y,wr2y,wr3y,wr4y,n.leadtimes,cluster.sequence,file=paste0(workdir,"/",fields.name,"_",var.name[var.num],"_",my.period[p],"_leadmonth_",lead.month,"_mapdata.RData")) + + rm(pslwr1mean,pslwr2mean,pslwr3mean,pslwr4mean) + gc() + + #pslwr1meanPartial <- apply(pslwr1, c(1,3,4), mean, na.rm=T) + #pslwr2meanPartial <- apply(pslwr2, c(1,3,4), mean, na.rm=T) + #pslwr3meanPartial <- apply(pslwr3, c(1,3,4), mean, na.rm=T) + #pslwr4meanPartial <- apply(pslwr4, c(1,3,4), mean, na.rm=T) + + #PlotEquiMap(rescale(pslwr1meanPartial[1,,],my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2) + #PlotEquiMap(rescale(pslwr1meanPartial[2,,],my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2) + + #n=0 + #for(sy in 1:32) { + # for(ld in 1:28){ + # if(my.cluster.array[[p]][ld,sy] == 1){ + # n=n+1 + # if(n == 1) {temp <- pslPeriod[1,2,sy,ld,,]} + # temp <- temp + pslPeriod[1,2,sy,ld,,] + # } + # } + #} + #PlotEquiMap(rescale(temp/n,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, drawleg=F) + + #pslwr1meanPartial <- apply(pslmat[,wr1,,,drop=F], c(1,3,4), mean, na.rm=T) + #pslwr2meanPartial <- apply(pslmat[,wr2,,,drop=F], c(1,3,4), mean, na.rm=T) + #pslwr3meanPartial <- apply(pslmat[,wr3,,,drop=F], c(1,3,4), mean, na.rm=T) + #pslwr4meanPartial <- apply(pslmat[,wr4,,,drop=F], c(1,3,4), mean, na.rm=T) + + } + + # for the monthly system, the time order is always: year1day1, year2day1, ... , yearNday1, year1day2, year2day2, ..., yearNday2, etc.: + if(fields.name == ECMWF_monthly.name) { + if(fields.name == ECMWF_monthly.name){ + my.startdates.period <- months.period(forecast.year,mes,day,p) + + #if(p == 13) my.startdates.period <- my.startdates.period[-2] # remove the second startdate because it is broken + + days.period <- period.length <- list() + for(startdate in my.startdates.period){ + days.period[[p]] <- c(days.period[[p]], (1+(startdate-1)*(year.end-year.start+1)):(startdate*(year.end-year.start+1))) + } + period.length[[p]] <- length(my.startdates.period) # number of Thusdays in the period + } + + wr1y <- wr2y <- wr3y <-wr4y <- c() + wr1y[y-year.start+1] <- length(which(cluster.sequence[seq((y-year.start+1),length(cluster.sequence), n.years)] == 1)) + wr2y[y-year.start+1] <- length(which(cluster.sequence[seq((y-year.start+1),length(cluster.sequence), n.years)] == 2)) + wr3y[y-year.start+1] <- length(which(cluster.sequence[seq((y-year.start+1),length(cluster.sequence), n.years)] == 3)) + wr4y[y-year.start+1] <- length(which(cluster.sequence[seq((y-year.start+1),length(cluster.sequence), n.years)] == 4)) + } + +} # close the for loop on 'p' + + + + + diff --git a/old/weather_regimes_v34.R b/old/weather_regimes_v34.R new file mode 100644 index 0000000000000000000000000000000000000000..8374248c0f9bb887dceeaac9f68ee8499b323d20 --- /dev/null +++ b/old/weather_regimes_v34.R @@ -0,0 +1,727 @@ + +# Creation: 28/6/2016 +# Author: Nicola Cortesi +# Aim: To compute the four Euro-Atlantic weather regimes from a reanalysis or from a forecast system +# I/O: input must be in a format compatible with Load(); 1 output .RData file is created in the 'workdir' folder. +# Assumption: its output is need by the script weather_regimes_maps.R +# Branch: weather_regimes + +library(s2dverification) # for the function Load() +library(abind) +library(reshape2) # after each undate of s2dverification, it's better to remove and install this package again because sometimes it gets broken! +#library(TTR) +source('/home/Earth/ncortesi/Downloads/scripts/Rfunctions.R') # for the calendar functions +workdir <- "/scratch/Earth/ncortesi/RESILIENCE/Regimes" # working dir +# module load NCO # : load it from the terminal before running this script interactively in the SMP machine, if you didn't load it in the .bashrc + +# available reanalysis for the geopotential (g500) or the geopotential height (z500 = g500/9.81) and for var data: +ERAint <- '/esnas/reconstructions/ecmwf/eraint/$STORE_FREQ$_mean/$VAR_NAME$_f6h/$VAR_NAME$_$YEAR$$MONTH$.nc' +ERAint.name <- "ERA-Interim" + +JRA55 <- '/esnas/reconstructions/jma/jra-55/$STORE_FREQ$_mean/$VAR_NAME$_f6h/$VAR_NAME$_$YEAR$$MONTH$.nc' +JRA55.name <- "JRA-55" + +rean <- ERAint #JRA55 #ERAint # choose one of the two above reanalysis from where to load the input psl data + +# available forecast systems for the psl and var data: +#ECMWF_S4 <- list(path = paste0('/esnas/exp/ECMWF/seasonal/0001/s004/m001/$STORE_FREQ$_mean/$VAR_NAME$_f6h/$VAR_NAME$_$START_DATE$.nc')) +#ECMWF_S4 <- '/esnas/exp/ECMWF/seasonal/0001/s004/m001/$STORE_FREQ$_mean/psl_f6h/psl_$START_DATE$.nc' +ECMWF_S4 <- '/esnas/exp/ecmwf/system4_m1/$STORE_FREQ$_mean/$VAR_NAME$_f6h/$VAR_NAME$_$START_DATE$.nc' +ECMWF_S4.name <- "ECMWF-S4" +member.name <- "ensemble" # name of the dimension with the ensemble members inside the netCDF files of S4 + +ECMWF_monthly <- '/esnas/exp/ECMWF/monthly/ensforhc/6hourly/$VAR_NAME$/2014$MONTH$$DAY$00/$VAR_NAME$_$START_DATE$00.nc' +ECMWF_monthly.name <- "ECMWF-Monthly" + +forecast <- ECMWF_S4 + +fields <- forecast #rean # put 'rean' to load pressure fields and var from reanalisis, put 'forecast' to load them from forecast systems + +psl <- "psl" #"g500" # pressure variable to use from the chosen reanalysis +psl.name <- "SLP" # "Z500" # the name to show in the maps + +year.start <- 1981 #1982 #1981 #1994 #1979 #1981 +year.end <- 2013 #2013 #2010 + +res <- 0.75 # set the resolution you want to interpolate the psl and var data when loading it (i.e: set to 0.75 to use the same res of ERA-Interim) + # interpolation method is BILINEAR. + +PCA <- FALSE # set it to TRUE if you want to apply the PCA before the k-means cluster analysis, FALSE otherwise +variance.explained <- 0.8 # the user can set here the minimum % of variance explained by the PCs (typically 0.8 which is equal to 80%) + +lat.weighting=FALSE # set it to true to weight psl data on latitude before applying the cluster analysis +sequences=FALSE # set it to TRUE if you want to select only days beloning to sequences of at least 5 consecutive days belonging to the same regime. + +# Coordinates of the box enclosing the study region (the Northern Atlantic in this case): +lat.max <- 81 #81 #80 #70 +lat.min <- 27 #30 #20 #30 +lon.max <- 45 #36 #30 #40 +lon.min <- 274.5 #274.5 #270 #280 # put a positive number here because the geopotential has only positive values of longitud! + +var.num <- 2 # Choose a variable. 1: sfcWind 2: tas + +var.name <- c("sfcWind","tas") # name of the 'predictand' variable of the chosen reanalysis +var.name.full <- c("Wind Speed","Temperature") # full name of the 'predictand' variable to put in the title of the graphs +var.unit <- c("m/s", "ºC") # unit of measure of var (for drawing color scales) + +# Only for Reanalysis:: +WR.period <- 1:12 #13:16 # 1-12: Jan-Dec; 13-16: Winter-Spring-Summer-Autumn [bypassed by the optional argument of the script] + +# Only for Seasonal forecasts: +startM <- 9 # start month (1=January, 12=December) [bypassed by the optional arguments of the script] +leadM <- 3 # select only the data of one lead month: [bypassed by the optional arguments of the script] +n.members <- 15 # number of members of the forecast system to load +n.leadtimes <- 216 # number of leadtimes of the forecast system to load + +# Only for Monthly forecasts: +#leadtime <- 1:7 #5:11 #1 # if fields=forecast, set the forecast time (in days) you want to compute the weather regimes. +#startdate.shift <- 1 # if leadtime=5:11, it's better to shift all startdates of the season 1 week in the past, to fit the exact season of obs.data + # if leadtime=12:18, it's better to shift all startdates of the season 2 weeks in the past, and so on. +#forecast.year <- year.end+1 # if fields=forecast, set the forecast year (it is usually the year after year.end) +#mes=1 # if fields=forecast, set the month of the first startdate of the forecast year +#day=2 # if fields=forecast, set the day of the first startdate of the forecast year + +############################################################################################################################# +#ECMWF_monthly <- list(path = paste0('/esnas/exp/ECMWF/monthly/ensforhc/6hourly/psl/2014010200/1994_010200.nc')) +#ECMWF_monthly <- list(path = paste0('/esnas/exp/ECMWF/monthly/ensforhc/6hourly/$VAR_NAME$/2014$MONTH$$DAY$00/$VAR_NAME$_$START_DATE$00.nc')) +rean.name <- unname(ifelse(rean == ERAint, ERAint.name, JRA55.name)) +forecast.name <- unname(ifelse(forecast == ECMWF_S4, ECMWF_S4.name, ECMWF_monthly.name)) +fields.name <- unname(ifelse(fields == rean, rean.name, forecast.name)) +if(fields.name == forecast.name) WR.period = 1 +n.years <- year.end - year.start + 1 + +cat("workdir\n") +save(workdir,rean.name,fields.name, file=paste0(workdir,"/",fields.name,"_",my.period[p],"_leadmonth_",lead.month,"_mapdata.RData")) + +if(PCA=TRUE){ + + +# in case the script is run with two arguments, they are assigned to the two variables below: +script.arg <- as.integer(commandArgs(TRUE)) + +if(length(script.arg) >= 2){ + start.month <- script.arg[1] + lead.month <- script.arg[2] + WR.period <- start.month +} else { + start.month <- startM + lead.month <- leadM + WR.period <- start.month +} + +if(length(script.arg) == 3){ # in this case, we are running the script in the SMP machine and we need to override the variables below: + source('/gpfs/projects/bsc32/bsc32842/Rfunctions.R') # for the calendar function + workdir <- "/gpfs/projects/bsc32/bsc32842/RESILIENCE" # working dir +} + +# in case the script is run with 1 argument, it is assumed you are using a reanalysis: +if(length(script.arg) == 1) WR.period <- script.arg[1] + +var.data <- fields # dataset from which to load the input var data (reanalysis or forecasts) +var.data.name <- fields.name #ECMWF_monthly.name # ERAint.name + +if(lat.min >= lat.max) stop("lat.min cannot be greater than lat.max") + +days.period <- n.days.period <- period.length <- list() +for (pp in 1:17){ + days.period[[pp]] <- NA + for(y in year.start:year.end) days.period[[pp]] <- c(days.period[[pp]], n.days.in.a.future.year(year.start, y) + pos.period(y,pp)) + days.period[[pp]] <- days.period[[pp]][-1] # remove the NA at the begin that was introduced only to be able to execute the above command + # number of days belonging to that period from year.start to year.end: + n.days.period[[pp]] <- length(days.period[[pp]]) + # Number of days belonging to that period in a single year: + period.length[[pp]] <- n.days.in.a.period(pp,1999) #+ ifelse(period==13 | period==2, 1, 0) # using year 1999 introduce a small error in winter season length because of bisestile years +} + +# Create a regular lat/lon grid to interpolate the data to the chosen res: (Notice that the regular grid is always centered at lat=0 and lon=0!) +n.lon.grid <- 360/res +n.lat.grid <- (180/res)+1 +my.grid <- paste0('r',n.lon.grid,'x',n.lat.grid) +#domain<-list() +#domain$lon <- seq(0,359.999999999,res) +#domain$lat <- c(rev(seq(0,90,res)),seq(res[1],-90,-res)) + +cat("Loading lat/lon. Please wait...\n") +# load only 1 day of pressure data to detect the minimum and maximum lat and lon values which identify only the North Atlantic area: +if(fields.name == rean.name) domain <- Load(var = psl, exp = NULL, obs = list(list(path=fields)), '19990101', storefreq = 'daily', leadtimemax = 1, output = 'lonlat', nprocs=1) + +### Real test: +### lat <- +### domain <- Load(var = "psl", exp = list(list(path="/esnas/exp/ecmwf/system4_m1/$STORE_FREQ$_mean/$VAR_NAME$_f6h/$VAR_NAME$_$START_DATE$.nc")), obs=NULL, sdates=paste0(1982:2011,'0101'), storefreq = 'daily', leadtimemax=n.leadtimes, nmember=n.members, output = 'lonlat', latmin = lat[chunk], latmax = lat[chunk+2], nprocs=1) + +### if (fields.name="pippo"){ + +# in the case of S4, we also interpolate it to the same resolution of the reanalysis: +# (notice that if the S4 grid has the same grid of the chosen grid (typically ERA-Interim), Load() leaves the S4 grid unchanged) +if(fields.name == ECMWF_S4.name) domain <- Load(var = psl, exp = list(list(path=fields)), obs=NULL, sdates=paste0(year.start,'0101'), storefreq = 'daily', dimnames=list(member=member.name), leadtimemax=1, nmember=1, output = 'lonlat', grid=my.grid, method='bilinear', nprocs=1) + +#domain <- Load(var = "psl", exp = list(list(path="/esnas/exp/ecmwf/system4_m1/$STORE_FREQ$_mean/$VAR_NAME$_f6h/$VAR_NAME$_$START_DATE$.nc")), obs=NULL, sdates='19820101', storefreq = 'daily', leadtimemax=1, nmember=1, output = 'lonlat', grid='r480x241', lonmax=100) + +if(fields.name == ECMWF_monthly.name) domain <- Load(var = psl, exp = NULL, obs = list(list(path=fields)), paste0(year.start,'0102'), storefreq = 'daily', leadtimemax = 1, output = 'lonlat', nprocs=1) + +n.lon <- length(domain$lon) +n.lat <- length(domain$lat) + +pos.lat.max <- which(lat.max - round(domain$lat,4) >= 0)[1] # select the first latitude of the domain below the maximum latitude +pos.lat.min <- tail(which(round(domain$lat,4) - lat.min >= 0),1) # select the last latitude of the domain over the minumum latitude +pos.lon.max <- tail(which(lon.max - round(domain$lon,4) >= 0),1) +pos.lon.min <- which(round(domain$lon,4) - lon.min >= 0)[1] + +pos.lat <- if(pos.lat.min < pos.lat.max) {pos.lat.min:pos.lat.max} else {pos.lat.max:pos.lat.min} +pos.lon <- c(1:pos.lon.max,pos.lon.min:length(domain$lon)) # beware that it only consider the case where the study area cross the meridian of Greenwhich! +n.pos.lat <- length(pos.lat) +n.pos.lon <- length(pos.lon) + +lat <- domain$lat[pos.lat] # lat values of chosen area only (it is smaller than the whole spatial domain loaded by the data) +#if (lat[2] < lat[1]) lat <- rev(lat) # reverse lat values if they are in decreasing order +lon <- domain$lon[pos.lon] # lon values of chosen area only + +#lat.min.area <- domain$lat[pos.lat.min] # min and max lat/lon of the chosen area only (same as lat.max, but its values correspond to actual values in the lat vector) +#lat.max.area <- domain$lat[pos.lat.max] +#lon.min.area <- domain$lon[pos.lon.min] +#lon.max.area <- domain$lon[pos.lon.max] + +#rm(domain) + + +# Load psl data (interpolating it to the same reanlaysis grid, if necessary): +cat("Loading data. Please wait...\n") + +if(fields.name == rean.name){ # Load daily psl data in the reanalysis case: + psleuFull <-array(NA,c(n.days.in.a.yearly.period(year.start,year.end), n.pos.lat, n.pos.lon)) + + for (y in year.start:year.end){ + var <- Load(var = psl, exp = NULL, obs = list(list(path=fields)), paste0(y,'0101'), storefreq = 'daily', leadtimemax = n.days.in.a.year(y), output = 'lonlat', + latmin = lat.min, latmax = lat.max, lonmin = lon.min, lonmax = lon.max) + psleuFull[seq.days.in.a.future.year(year.start, y),,] <- var$obs + rm(var) + gc() + } +} + +if(fields.name == ECMWF_S4.name){ # in this case, we can load only the data for 1 month at time (since each month needs ~3 GB of memory) + if(start.month <= 9) {start.month.char <- paste0("0",as.character(start.month))} else {start.month.char <- start.month} + + #psleuFull <- Load(var = psl, exp = list(list(path=fields)), obs = NULL, sdates=paste0(years, start.month.char,'01'), dimnames=list(member=member.name), nmember=n.members, leadtimemax=n.leadtimes, storefreq='daily', output = 'lonlat', latmin = lat.min, latmax = lat.max, lonmin = lon.min, lonmax = lon.max, nprocs=1, grid=my.grid, method='bilinear')$mod # not working + + fields2 <- gsub("\\$STORE_FREQ\\$","daily",fields) + fields3 <- gsub("\\$VAR_NAME\\$",psl,fields2) + + years <- c() + for(y in year.start:year.end){ + fields4 <- gsub("\\$START_DATE\\$",paste0(y, start.month.char,'01'),fields3) + + char.lead <- nchar(system(paste0("ncks -m ",fields4,"| grep -E -i 'psl size' | cut -d '*' -f1"), intern=T)) + # beware, in this way it can only take into account leadtimes from 100 to 999, but it is the only way in the SMP machine: + #num.lead <- as.integer(substr(system(paste0("ncks -m ",fields4,"| grep -E -i 'psl size' | cut -d '*' -f1"), intern=T),char.lead-3,char.lead)) + num.lead <- as.integer(system(paste0("ncks -m ",fields4,"| grep -E -i 'psl size' | cut -d '=' -f2 | cut -d '*' -f1"), intern=T)) # don't work well on the SMP + num.memb <- as.integer(system(paste0("ncks -m ",fields4,"| grep -E -i 'psl size' | cut -d '=' -f2 | cut -d '*' -f2"), intern=T)) + num.lat <- as.integer(system(paste0("ncks -m ",fields4,"| grep -E -i 'psl size' | cut -d '=' -f2 | cut -d '*' -f3"), intern=T)) + num.lon <- as.integer(system(paste0("ncks -m ",fields4,"| grep -E -i 'psl size' | cut -d '=' -f2 | cut -d '*' -f4"), intern=T)) + + if(num.lead == n.leadtimes && num.memb >= n.members && num.lat == 181 && num.lon == 360) years <- c(years,y) + } + + n.years <- length(years) + + #Load(var = "psl", exp = list(list(path="/esnas/exp/ecmwf/system4_m1/$STORE_FREQ$_mean/$VAR_NAME$_f6h/$VAR_NAME$_$START_DATE$.nc")), obs = NULL, sdates=paste0(1982:2013,'0101'), nmember=15, leadtimemax=216, storefreq='daily', output = 'lonlat', grid=my.grid, nprocs=1) + + + psleuFull <- Load(var = psl, exp = list(list(path=fields)), obs = NULL, sdates=paste0(years, start.month.char,'01'), dimnames=list(member=member.name), nmember=n.members, leadtimemax=n.leadtimes, storefreq='daily', output = 'lonlat', latmin = lat.min, latmax = lat.max, lonmin = lon.min, lonmax = lon.max, grid=my.grid, method='bilinear', nprocs=1)$mod + +} + +if(fields.name == ECMWF_monthly.name){ + # Load 6-hourly psl data in the forecast case: + psleuFull <-array(NA, c(n.sdates*n.years, n.leadtimes, n.pos.lat, n.pos.lon)) # daily order: 19940102 19950102 ... 20130102 19940109 19950109 ... 20130109 ... + n.leadtimes <- length(leadtime) + sdates <- weekly.seq(forecast.year,mes,day) + n.sdates <- length(sdates) + + for (startdate in 1:n.sdates){ + for(lday in leadtime){ + var <- Load(var = psl, exp = NULL, obs = list(list(path=fields)), paste0(year.start:year.end, substr(sdates[startdate],5,6), substr(sdates[startdate],7,8) ), storefreq = 'daily', leadtimemin = lday*4-3, leadtimemax = lday*4, output = 'lonlat', latmin = lat.min, latmax = lat.max, lonmin = lon.min, lonmax = lon.max) + + mean.lday <- apply(var$obs, c(3,5,6), mean, na.rm=T) + rm(var) + gc() + + psleuFull[(1+(startdate-1)*n.years):(startdate*n.years), lday-leadtime[1]+1,,] <- mean.lday + rm(mean.lday) + gc() + } + } +} + +if(psl=="g500") psleuFull <- psleuFull/9.81 # convert geopotential to geopotential height (in m) +if(psl=="psl") psleuFull <- psleuFull/100 # convert MSLP in Pascal to MSLP in hPa +gc() + +#save.image(file=paste0(workdir,"/weather_regimes_",fields.name,"_",year.start,"-",year.end,".RData")) +#load(file=paste0(workdir,"/weather_regimes_",fields.name,"_",year.start,"-",year.end,".RData")) + +# compute the cluster analysis: +my.PCA <- list() +my.cluster <- list() +my.cluster.array <- list() +tot.variance <- list() +n.pcs <- my.cluster <- c() + +#WR.period=1 # for the debug +for(p in WR.period){ + # Select only the data of the month/season we want: + if(fields.name == rean.name){ + pslPeriod <- psleuFull[days.period[[p]],,] # select only days in the chosen period (i.e: winter) + + # weight the pressure fields based on latitude (apply it to anomalies, not to absolute values!): + if(lat.weighting){ + lat.weighted <- cos(lat*pi/180)^0.5 + lat.weighted.array <- InsertDim(InsertDim(lat.weighted,2,n.pos.lon), 1, n.days.period[[p]]) + psl.kmeans <- pslPeriod * lat.weighted.array + rm(lat.weighted.array) + gc() + } + + # select period data from psleuFull to use it as input of the cluster analysis: + psl.kmeans <- pslPeriod + dim(psl.kmeans) <- c(n.days.period[[p]], n.pos.lat*n.pos.lon) # convert array in a matrix! + rm(pslPeriod, pslPeriod.weighted) + } + + # in case of S4 we don't need to select anything because we already loaded only 1 month of data! + if(fields.name == ECMWF_S4.name){ + + # convert psl in daily anomalies with the LOESS filter: + cat("Calculating anomalies. Please wait...\n") + pslPeriodClim <- apply(psleuFull, c(1,4,5,6), mean, na.rm=T) + pslPeriodClimLoess <- pslPeriodClim + for(i in n.pos.lat){ + for(j in n.pos.lon){ + my.data <- data.frame(ens.mean=pslPeriodClim[1,,i,j], day=1:n.leadtimes) + my.loess <- loess(ens.mean ~ day, my.data, span=0.35) + pslPeriodClimLoess[1,,i,j] <- predict(my.loess) + } + } + + rm(pslPeriodClim) + gc() + + pslPeriodClim2 <- InsertDim(InsertDim(pslPeriodClimLoess, 2, n.years), 2, n.members) + + ## s4.data <- data.frame(s4=pslPeriodClim[1,], day=1:216) + ## s4.loess <- loess(s4 ~ day, s4.data, span=0.35) + ## s4.pred <- predict(s4.loess) + + psleuFull <- psleuFull - pslPeriodClim2 + rm(pslPeriodClim2) + gc() + + ## # convert psl in daily anomalies computed with the 10-days leadtime window including the 9 days AFTER each day: + ## n.leadtimes <- dim(psleuFull)[4] + ## pslPeriodClim10 <- array(NA, c(1, n.members, n.years, n.leadtimes-9,n.pos.lat,n.pos.lon)) + ## for(year in 1:n.years){ + ## for(lead in 1:(n.leadtimes-9)){ + ## pslPeriodClim10[1,,year,lead,,] <- (psleuFull[1,,year,lead,,] + psleuFull[1,,year,lead+1,,] + psleuFull[1,,year,lead+2,,] + psleuFull[1,,year,lead+3,,] + psleuFull[1,,year,lead+4,,] + psleuFull[1,,year,lead+5,,] + psleuFull[1,,year,lead+6,,] + psleuFull[1,,year,lead+7,,] + psleuFull[1,,year,lead+8,,] + psleuFull[1,,year,lead+9,,])/10 + ## } + ## } + + ## pslPeriodClim10mean <- apply(pslPeriodClim10, c(1,4,5,6), mean, na.rm=T) + ## pslPeriodClim10mean2 <- InsertDim(InsertDim(pslPeriodClim10mean,2,n.years),2,n.members) + + ## psleuFull10 <- psleuFull[1,,,1:(n.leadtimes-9),,,drop=F] - pslPeriodClim10mean2 + + ## psleuFull <- psleuFull10 + ## rm(pslPeriodClim2, psleuFull10, pslPeriodClim10, pslPeriodClim10mean2, psleuFull10mean, pslPeriodClim10mean) + ## gc() + + # select data only for the startmonth and leadmonth to study: + cat("Extracting data. Please wait...\n") + chosen.month <- start.month + lead.month # find which month we want to select data + if(chosen.month > 12) chosen.month <- chosen.month - 12 + + i=0 + for(y in years){ + i=i+1 + leadtime.min <- 1 + n.days.in.a.monthly.period(start.month, chosen.month, y) - n.days.in.a.month(chosen.month, y) + leadtime.max <- leadtime.min + n.days.in.a.month(chosen.month, y) - 1 + if (n.days.in.a.month(chosen.month, y) == 29) leadtime.max <- leadtime.max - 1 # remove the 29th of February to have the same n. of elements for all years + int.leadtimes <- leadtime.min:leadtime.max + num.leadtimes <- leadtime.max - leadtime.min + 1 # number of days in the chosen month (different from 'n.leadtimes',which is the total number of days in the file!) + # by construction it is equal to: n.days.in.a.month(chosen.month, y) + if(y == years[1]) { + pslPeriod <- psleuFull[,,1,int.leadtimes,,,drop=FALSE] + } else { + pslPeriod <- unname(abind(pslPeriod, psleuFull[,,i,int.leadtimes,,,drop=FALSE], along=3)) + } + } + + # note that the data order in psl.kmeans[,X] is: year1day1, year1day2, ... , year1day31, year2day1, year2day2, ... , year2day28 + gc() + cat("Preformatting data. Please wait...\n") + psl.melted <- melt(pslPeriod[1,,,,,, drop=FALSE], varnames=c("Exp","Member","StartYear","LeadDay","Lat","Lon")) + gc() + cat("Preformatting data. Please wait......\n") + + # this function eats much memory!!! + psl.kmeans <- unname(acast(psl.melted, Member + StartYear + LeadDay ~ Lat + Lon)) + #gc() + + #psl.kmeans <- array(NA,c(dim(pslPeriod))) + #n.rows <- nrow(psl.melted) + #a <- psl.melted$Member + #b <- psl.melted$StartYear + #c <- psl.melted$LeadDay + #d <- psl.melted$Lat + #e <- psl.melted$Lon + #f <- psl.melted$value + + # this function to convert a data.frame in an array is much less memory-eater than acast but a bit slower: + #for(i in 1:n.rows) psl.kmeans[1,a[i],b[i],c[i],d[i],e[i]] <- f[i] + gc() + #rm(pslPeriod); gc() + } + + if(fields.name == ECMWF_monthly.name){ + my.startdates.period <- months.period(forecast.year,mes,day,p) # get which startdates belong to the chosen period + + days.period <- list() # get which days inside psleuFull belong to the chosen period + for(startdate in my.startdates.period){ + days.period[[p]] <- c(days.period[[p]], (1+(startdate-1)*n.years):(startdate*n.years)) + } + + # in winter you must remove the 2nd startdate (20140109) because its data is bad, as you can see with: plot(psl.kmeans[,1:2]) + # if(fields.name == forecast.name && period == 13) psl.kmeans[21:40,] <- NA + } + + + # compute the clustering: + cat("Clustering data. Please wait...\n") + if(PCA){ + my.seq <- seq(1, n.pos.lat*n.pos.lon, 9) # select only 1 point of 9 + pslcut <- psl.kmeans[,my.seq] + + my.PCA[[p]] <- princomp(pslcut,cor=FALSE) + tot.variance[[p]] <- head(cumsum(my.PCA[[p]]$sdev^2/sum(my.PCA[[p]]$sdev^2)),50) # check how many PCAs to keep basing on the sum of their expl. variance + n.pcs[p] <- head(as.numeric(which(tot.variance[[p]] > variance.explained)),1) # select only the pcs that explains at least 80% of variance + my.cluster[[p]] <- kmeans(my.PCA[[p]]$scores[,1:n.pcs[p]], centers=4, iter.max=100, nstart=30) # 4 is the number of clusters + rm(pslcut) + } else { # 4 is the number of clusters: + if(fields.name == rean.name){ + my.cluster[[p]] <- kmeans(psl.kmeans[which(!is.na(psl.kmeans[,1])),], centers=4, iter.max=100, nstart=30) + # save the time series output of the cluster analysis: + save(my.cluster, my.cluster.array, my.PCA, tot.variance, n.pcs, file=paste0(workdir,"/",fields.name,"_",my.period[p],"_ClusterData.RData")) + } + + if(fields.name == ECMWF_S4.name) { + my.cluster[[p]] <- kmeans(psl.kmeans, centers=4, iter.max=100, nstart=30, trace=TRUE) + my.cluster.array[[p]] <- array(my.cluster[[p]]$cluster, c(num.leadtimes, n.years, n.members)) # in reverse order compared to the definition of psl.kmeans + + # save the time series output of the cluster analysis: + save(my.cluster, my.cluster.array, my.PCA,tot.variance,n.pcs, file=paste0(workdir,"/",fields.name,"_",my.period[p],"_leadmonth_",lead.month,"_ClusterData.RData")) + + #plot(SMA(my.cluster[[p]]$centers[1,],201),type="l",col="gold",ylim=c(-20,20));lines(SMA(my.cluster[[p]]$centers[2,],201),type="l",col="red"); lines(SMA(my.cluster[[p]]$centers[3,],201),type="l",col="green");lines(SMA(my.cluster[[p]]$centers[4,],201),type="l",col="blue");lines(SMA(psl.kmeans[29,],201),type="l",col="gray");lines(rep(0,14410),type="l",col="black",lty=2) + } + } + + rm(psl.kmeans) + gc() + +} # close for on 'p' + +#save.image(file=paste0(workdir,"/weather_regimes_",fields.name,"_",year.start,"-",year.end,".RData")) +#load(file=paste0(workdir,"/weather_regimes_",fields.name,"_",year.start,"-",year.end,".RData")) + +if(fields.name == ECMWF_monthly.name){ # Load 6-hourly var data in the monthly forecast case: + sdates <- weekly.seq(forecast.year,mes,day) + vareuFull <-array(NA,c(length(sdates)*(year.end-year.start+1), n.pos.lat, n.pos.lon)) + + for (startdate in 1:length(sdates)){ + var <- Load(var = var.name[var.num], exp = NULL, obs = list(var.data), paste0(year.start:year.end, substr(sdates[startdate],5,6), substr(sdates[startdate],7,8) ), storefreq = 'daily', leadtimemin=leadtime*4-3, leadtimemax=leadtime*4, output = 'lonlat', latmin = lat.min, latmax = lat.max, lonmin = lon.min, lonmax = lon.max) + temp <- apply(var$obs, c(1,3,5,6), mean, na.rm=T) + vareuFull[(1+(startdate-1)*(year.end-year.start+1)):(startdate*(year.end-year.start+1)),,] <- temp + rm(temp,var) + gc() + } + +} + +#my.grid<-paste0('r',n.lon,'x',n.lat) +#coord <- Load(var = 'sfcWind', exp = list(ECMWF_monthly), obs = NULL, sdates=paste0(2013,'0102'), leadtimemin = 1, leadtimemax=1, output = 'lonlat', nprocs=1, grid=my.grid, method='bilinear') + +#save.image(file=paste0(workdir,"/weather_regimes_",fields.name,"_",var.name[var.num],"_",year.start,"-",year.end,".RData")) +#load(file=paste0(workdir,"/weather_regimes_",fields.name,"_",var.name[var.num],"_",year.start,"-",year.end,".RData")) + +for(p in WR.period){ # 1-12: Jan-Dec; 13-16: Winter-Spring-Summer-Autumn; 17: Year + + if(fields.name == rean.name){ + cluster.sequence <- my.cluster[[p]]$cluster + + if(sequences){ # it works only for rean data!!! + # for each of the 4 clusters, remove the days not belonging to sequences of at least 5 days: + # warning: first 4 days and last 4 days are not take into account y the algorithm and are treated as not belonging to any sequence (sequ=FALSE) + wrdiff <- diff(my.cluster[[p]]$cluster) + + sequ <- rep(FALSE, n.days.period[[p]]) + for(i in 5:(n.days.period[[p]]-5)){ + sequ[i] <- TRUE + if(wrdiff[i] != 0 & wrdiff[i+1] != 0) sequ[i] = FALSE + if(wrdiff[i] != 0 & wrdiff[i+2] != 0) sequ[i] = FALSE + if(wrdiff[i] != 0 & wrdiff[i+3] != 0) sequ[i] = FALSE + if(wrdiff[i] != 0 & wrdiff[i+4] != 0) sequ[i] = FALSE + if(wrdiff[i-1] != 0 & wrdiff[i] != 0) sequ[i] = FALSE + if(wrdiff[i-1] != 0 & wrdiff[i+1] != 0) sequ[i] = FALSE + if(wrdiff[i-1] != 0 & wrdiff[i+2] != 0) sequ[i] = FALSE + if(wrdiff[i-1] != 0 & wrdiff[i+3] != 0) sequ[i] = FALSE + if(wrdiff[i-2] != 0 & wrdiff[i] != 0) sequ[i] = FALSE + if(wrdiff[i-2] != 0 & wrdiff[i+1] != 0) sequ[i] = FALSE + if(wrdiff[i-2] != 0 & wrdiff[i+2] != 0) sequ[i] = FALSE + if(wrdiff[i-3] != 0 & wrdiff[i] != 0) sequ[i] = FALSE + if(wrdiff[i-3] != 0 & wrdiff[i+1] != 0) sequ[i] = FALSE + if(wrdiff[i-4] != 0 & wrdiff[i] != 0) sequ[i] = FALSE + } # close for loop on i + + # sequence of daily cluster numbers without the days that doesn't belong to sequences of 5 or more days: + cluster.sequence <- my.cluster[[p]]$cluster * sequ + rm(wrdiff, sequ) + } + + # Replace the days belonging to a regime with the days belonging to a sequence of at least 5 days of that regime: + wr1 <- which(cluster.sequence == 1) + wr2 <- which(cluster.sequence == 2) + wr3 <- which(cluster.sequence == 3) + wr4 <- which(cluster.sequence == 4) + + # yearly frequency of each regime: + wr1y <- wr2y <- wr3y <-wr4y <- c() + for(y in year.start:year.end){ + # for both reanalysis and S4, the time order is always: year1day1, year1day2, ..., year1day31, year2day1, year2day2, ..., year2day28, etc, + wr1y[y-year.start+1] <- length(which(cluster.sequence[(y-year.start)*period.length[[p]]+(1:period.length[[p]])] == 1)) + wr2y[y-year.start+1] <- length(which(cluster.sequence[(y-year.start)*period.length[[p]]+(1:period.length[[p]])] == 2)) + wr3y[y-year.start+1] <- length(which(cluster.sequence[(y-year.start)*period.length[[p]]+(1:period.length[[p]])] == 3)) + wr4y[y-year.start+1] <- length(which(cluster.sequence[(y-year.start)*period.length[[p]]+(1:period.length[[p]])] == 4)) + } + + # convert to frequencies in %: + wr1y <- wr1y/period.length[[p]] + wr2y <- wr2y/period.length[[p]] + wr3y <- wr3y/period.length[[p]] + wr4y <- wr4y/period.length[[p]] + + pslPeriod <- psleuFull[days.period[[p]],,] + pslPeriodClim <- apply(pslPeriod, c(2,3), mean, na.rm=T) + pslPeriodClim2 <- InsertDim(pslPeriodClim, 1, n.days.period[[p]]) + pslPeriodAnom <- pslPeriod - pslPeriodClim2 + + rm(pslPeriod, pslPeriodClim, pslPeriodClim2) + gc() + + pslwr1 <- pslPeriodAnom[wr1,,] + pslwr2 <- pslPeriodAnom[wr2,,] + pslwr3 <- pslPeriodAnom[wr3,,] + pslwr4 <- pslPeriodAnom[wr4,,] + + rm(pslPeriodAnom) + gc() + + pslwr1mean <- apply(pslwr1,c(2,3),mean,na.rm=T) + pslwr2mean <- apply(pslwr2,c(2,3),mean,na.rm=T) + pslwr3mean <- apply(pslwr3,c(2,3),mean,na.rm=T) + pslwr4mean <- apply(pslwr4,c(2,3),mean,na.rm=T) + rm(pslwr1,pslwr2,pslwr3,pslwr4) + + # in the reanalysis case, we also measure the impact maps: + + # similar selection of above, but for var instead of psl: + varPeriod <- vareuFull[days.period[[p]],,] # select only var data during the chosen period + + varPeriodClim <- apply(varPeriod, c(2,3), mean, na.rm=T) + varPeriodClim2 <- InsertDim(varPeriodClim, 1, n.days.period[[p]]) + varPeriodAnom <- varPeriod - varPeriodClim2 + rm(varPeriod, varPeriodClim, varPeriodClim2) + gc() + + #PlotEquiMap2(varPeriodClim[,EU], lon[EU], lat, filled.continents = FALSE, cols=my.cols.var, intxlon=10, intylat=10, cex.lab=1.5) # plot the climatology of the period + varPeriodAnom1 <- varPeriodAnom[wr1,,] + varPeriodAnom2 <- varPeriodAnom[wr2,,] + varPeriodAnom3 <- varPeriodAnom[wr3,,] + varPeriodAnom4 <- varPeriodAnom[wr4,,] + + varPeriodAnom1mean <- apply(varPeriodAnom1,c(2,3),mean,na.rm=T) + varPeriodAnom2mean <- apply(varPeriodAnom2,c(2,3),mean,na.rm=T) + varPeriodAnom3mean <- apply(varPeriodAnom3,c(2,3),mean,na.rm=T) + varPeriodAnom4mean <- apply(varPeriodAnom4,c(2,3),mean,na.rm=T) + + varPeriodAnomBoth1 <- abind(varPeriodAnom, varPeriodAnom1, along = 1) + pvalue1 <- apply(varPeriodAnomBoth1, c(2,3), function(x) t.test(x[1:n.days.period[[p]]],x[(n.days.period[[p]]+1):length(x)])$p.value) + rm(varPeriodAnomBoth1, varPeriodAnom1) + gc() + + varPeriodAnomBoth2 <- abind(varPeriodAnom, varPeriodAnom2, along = 1) + pvalue2 <- apply(varPeriodAnomBoth2, c(2,3), function(x) t.test(x[1:n.days.period[[p]]],x[(n.days.period[[p]]+1):length(x)])$p.value) + rm(varPeriodAnomBoth2, varPeriodAnom2) + gc() + + varPeriodAnomBoth3 <- abind(varPeriodAnom, varPeriodAnom3, along = 1) + pvalue3 <- apply(varPeriodAnomBoth3, c(2,3), function(x) t.test(x[1:n.days.period[[p]]],x[(n.days.period[[p]]+1):length(x)])$p.value) + rm(varPeriodAnomBoth3, varPeriodAnom3) + gc() + + varPeriodAnomBoth4 <- abind(varPeriodAnom, varPeriodAnom4, along = 1) + pvalue4 <- apply(varPeriodAnomBoth4, c(2,3), function(x) t.test(x[1:n.days.period[[p]]],x[(n.days.period[[p]]+1):length(x)])$p.value) + rm(varPeriodAnomBoth4, varPeriodAnom4) + + rm(varPeriodAnom) + gc() + + # save all the data necessary to redraw the graphs when we know the right regime: + save(lon, lon.max, lon.min, lat, lat.max, lat.min, psl, psl.name, my.period, p, workdir,rean.name,fields.name,var.num,var.name,var.name.full,var.unit,year.start,year.end,WR.period,pslwr1mean,pslwr2mean,pslwr3mean,pslwr4mean, varPeriodAnom1mean, varPeriodAnom2mean, varPeriodAnom3mean,varPeriodAnom4mean,wr1y,wr2y,wr3y,wr4y,pvalue1,pvalue2,pvalue3,pvalue4,cluster.sequence,file=paste0(workdir,"/",fields.name,"_",my.period[p],"_mapdata.RData")) + + rm(pvalue1,pvalue2,pvalue3,pvalue4) + rm(pslwr1mean,pslwr2mean,pslwr3mean,pslwr4mean) + rm(varPeriodAnom1mean,varPeriodAnom2mean,varPeriodAnom3mean,varPeriodAnom4mean) + gc() + + } + + if(fields.name == ECMWF_S4.name){ + cat("Computing regime anomalies and frequencies. Please wait...\n") + + load(file=paste0(workdir,"/",fields.name,"_",my.period[p],"_leadmonth_",lead.month,"_ClusterData.RData")) + cluster.sequence <- my.cluster[[p]]$cluster #as.vector(my.cluster.array[[p]]) + + wr1 <- which(cluster.sequence == 1) + wr2 <- which(cluster.sequence == 2) + wr3 <- which(cluster.sequence == 3) + wr4 <- which(cluster.sequence == 4) + + wr1y <- apply(my.cluster.array[[p]] == 1, 2, sum) + wr2y <- apply(my.cluster.array[[p]] == 2, 2, sum) + wr3y <- apply(my.cluster.array[[p]] == 3, 2, sum) + wr4y <- apply(my.cluster.array[[p]] == 4, 2, sum) + + # convert to frequencies in %: + wr1y <- wr1y/(num.leadtimes*n.members) # in this case, n.leadtimes is the number of days of one month of the cluser time series + wr2y <- wr2y/(num.leadtimes*n.members) + wr3y <- wr3y/(num.leadtimes*n.members) + wr4y <- wr4y/(num.leadtimes*n.members) + + # same as above, but to measure the maximum frequence between all the members: + wr1yMax <- apply(apply(my.cluster.array[[p]] == 1, c(2,3), sum),1,max) + wr2yMax <- apply(apply(my.cluster.array[[p]] == 2, c(2,3), sum),1,max) + wr3yMax <- apply(apply(my.cluster.array[[p]] == 3, c(2,3), sum),1,max) + wr4yMax <- apply(apply(my.cluster.array[[p]] == 4, c(2,3), sum),1,max) + + wr1yMax <- wr1yMax/num.leadtimes + wr2yMax <- wr2yMax/num.leadtimes + wr3yMax <- wr3yMax/num.leadtimes + wr4yMax <- wr4yMax/num.leadtimes + + wr1yMin <- apply(apply(my.cluster.array[[p]] == 1, c(2,3), sum),1,min) + wr2yMin <- apply(apply(my.cluster.array[[p]] == 2, c(2,3), sum),1,min) + wr3yMin <- apply(apply(my.cluster.array[[p]] == 3, c(2,3), sum),1,min) + wr4yMin <- apply(apply(my.cluster.array[[p]] == 4, c(2,3), sum),1,min) + + wr1yMin <- wr1yMin/num.leadtimes + wr2yMin <- wr2yMin/num.leadtimes + wr3yMin <- wr3yMin/num.leadtimes + wr4yMin <- wr4yMin/num.leadtimes + + cat("Computing regime anomalies and frequencies. Please wait......\n") + + # in this case, must use psl.melted instead of psleuFull. Both are already in anomalies: + # (psl.melted depends on the startdate and leadtime, psleuFull no) + gc() + pslmat <- unname(acast(psl.melted, Member + StartYear + LeadDay ~ Lat ~ Lon)) + #pslmat <- unname(acast(melt(pslPeriod[1,1:2,,,,, drop=FALSE],varnames=c("Exp","Member","StartYear","LeadDay","Lat","Lon")), Member ~ StartYear + LeadDay ~ Lat ~ Lon)) + gc() + + #for(a in 1:2){ + # for(b in 1:3){ + # for(c in 1:4){ + # pslmat[1, a + (b-1)*a + (c-1)*a*b,,] <- pslPeriod[1,a,b,c,,] + # } + # } + #} + + pslwr1 <- pslmat[wr1,,, drop=F] + pslwr2 <- pslmat[wr2,,, drop=F] + pslwr3 <- pslmat[wr3,,, drop=F] + pslwr4 <- pslmat[wr4,,, drop=F] + + pslwr1mean <- apply(pslwr1, c(2,3), mean, na.rm=T) + pslwr2mean <- apply(pslwr2, c(2,3), mean, na.rm=T) + pslwr3mean <- apply(pslwr3, c(2,3), mean, na.rm=T) + pslwr4mean <- apply(pslwr4, c(2,3), mean, na.rm=T) + + rm(pslwr1,pslwr2,pslwr3,pslwr4) + rm(pslmat) + gc() + + # save all the data necessary to draw the graphs (but not the impact maps) + save(lon, lon.max, lat, psl, psl.name, my.period, p, workdir,rean.name,fields.name,var.num,var.name,var.name.full,var.unit,year.start,year.end, years, n.years, WR.period, pslwr1mean, pslwr2mean, pslwr3mean, pslwr4mean, wr1y, wr2y, wr3y, wr4y, wr1yMax, wr2yMax, wr3yMax, wr4yMax, wr1yMin, wr2yMin, wr3yMin, wr4yMin, n.leadtimes, num.leadtimes, cluster.sequence,file=paste0(workdir,"/",fields.name,"_",my.period[p],"_leadmonth_",lead.month,"_mapdata.RData")) + + rm(pslwr1mean,pslwr2mean,pslwr3mean,pslwr4mean) + gc() + + } + + # for the monthly system, the time order is always: year1day1, year2day1, ... , yearNday1, year1day2, year2day2, ..., yearNday2, etc.: + if(fields.name == ECMWF_monthly.name) { + if(fields.name == ECMWF_monthly.name){ + my.startdates.period <- months.period(forecast.year,mes,day,p) + + #if(p == 13) my.startdates.period <- my.startdates.period[-2] # remove the second startdate because it is broken + + days.period <- period.length <- list() + for(startdate in my.startdates.period){ + days.period[[p]] <- c(days.period[[p]], (1+(startdate-1)*(year.end-year.start+1)):(startdate*(year.end-year.start+1))) + } + period.length[[p]] <- length(my.startdates.period) # number of Thusdays in the period + } + + wr1y <- wr2y <- wr3y <-wr4y <- c() + wr1y[y-year.start+1] <- length(which(cluster.sequence[seq((y-year.start+1),length(cluster.sequence), n.years)] == 1)) + wr2y[y-year.start+1] <- length(which(cluster.sequence[seq((y-year.start+1),length(cluster.sequence), n.years)] == 2)) + wr3y[y-year.start+1] <- length(which(cluster.sequence[seq((y-year.start+1),length(cluster.sequence), n.years)] == 3)) + wr4y[y-year.start+1] <- length(which(cluster.sequence[seq((y-year.start+1),length(cluster.sequence), n.years)] == 4)) + } + + cat("Finished!\n") +} # close the for loop on 'p' + + + + + + + + + + + + + + +# Load var data: +if(fields.name == rean.name){ # Load daily var data in the reanalysis case: + vareuFull <-array(NA,c(n.days.in.a.yearly.period(year.start,year.end), n.pos.lat, n.pos.lon)) + + for (y in year.start:year.end){ + var <- Load(var = var.name[var.num], exp = NULL, obs = list(var.data), paste0(y,'0101'), storefreq = 'daily', leadtimemax = n.days.in.a.year(y), output = 'lonlat', + latmin = lat.min, latmax = lat.max, lonmin = lon.min, lonmax = lon.max) + vareuFull[seq.days.in.a.future.year(year.start, y),,] <- var$obs + rm(var) + gc() + } +} + +## if(fields.name == ECMWF_S4.name) { +## if(start.month <= 9) {start.month.char <- paste0("0",as.character(start.month))} else {start.month.char <- start.month} + +## my.years <- year.start:year.end +## vareuFull <- Load(var = var.name[var.num], exp = list(list(path=fields)), obs = NULL, sdates=paste0(my.years, start.month.char,'01'), dimnames=list(member=member.name), nmember=n.members, leadtimemax=216, storefreq='daily', output = 'lonlat', latmin = lat.min, latmax = lat.max, lonmin = lon.min, lonmax = lon.max)$mod #, grid=my.grid, method='bilinear') +## } + + + +} diff --git a/old/weather_regimes_v34.R~ b/old/weather_regimes_v34.R~ new file mode 100644 index 0000000000000000000000000000000000000000..d144947ee84d1d365f2b673d872f79dc0d253811 --- /dev/null +++ b/old/weather_regimes_v34.R~ @@ -0,0 +1,726 @@ + +# Creation: 28/6/2016 +# Author: Nicola Cortesi +# Aim: To compute the four Euro-Atlantic weather regimes from a reanalysis or from a forecast system +# I/O: input must be in a format compatible with Load(); 1 output .RData file is created in the 'workdir' folder. +# Assumption: its output is need by the script weather_regimes_maps.R +# Branch: weather_regimes + +library(s2dverification) # for the function Load() +library(abind) +library(reshape2) # after each undate of s2dverification, it's better to remove and install this package again because sometimes it gets broken! +#library(TTR) +source('/home/Earth/ncortesi/Downloads/scripts/Rfunctions.R') # for the calendar functions +workdir <- "/scratch/Earth/ncortesi/RESILIENCE/Regimes" # working dir +# module load NCO # : load it from the terminal before running this script interactively in the SMP machine, if you didn't load it in the .bashrc + +# available reanalysis for the geopotential (g500) or the geopotential height (z500 = g500/9.81) and for var data: +ERAint <- '/esnas/reconstructions/ecmwf/eraint/$STORE_FREQ$_mean/$VAR_NAME$_f6h/$VAR_NAME$_$YEAR$$MONTH$.nc' +ERAint.name <- "ERA-Interim" + +JRA55 <- '/esnas/reconstructions/jma/jra-55/$STORE_FREQ$_mean/$VAR_NAME$_f6h/$VAR_NAME$_$YEAR$$MONTH$.nc' +JRA55.name <- "JRA-55" + +rean <- ERAint #JRA55 #ERAint # choose one of the two above reanalysis from where to load the input psl data + +# available forecast systems for the psl and var data: +#ECMWF_S4 <- list(path = paste0('/esnas/exp/ECMWF/seasonal/0001/s004/m001/$STORE_FREQ$_mean/$VAR_NAME$_f6h/$VAR_NAME$_$START_DATE$.nc')) +#ECMWF_S4 <- '/esnas/exp/ECMWF/seasonal/0001/s004/m001/$STORE_FREQ$_mean/psl_f6h/psl_$START_DATE$.nc' +ECMWF_S4 <- '/esnas/exp/ecmwf/system4_m1/$STORE_FREQ$_mean/$VAR_NAME$_f6h/$VAR_NAME$_$START_DATE$.nc' +ECMWF_S4.name <- "ECMWF-S4" +member.name <- "ensemble" # name of the dimension with the ensemble members inside the netCDF files of S4 + +ECMWF_monthly <- '/esnas/exp/ECMWF/monthly/ensforhc/6hourly/$VAR_NAME$/2014$MONTH$$DAY$00/$VAR_NAME$_$START_DATE$00.nc' +ECMWF_monthly.name <- "ECMWF-Monthly" + +forecast <- ECMWF_S4 + +fields <- forecast #rean # put 'rean' to load pressure fields and var from reanalisis, put 'forecast' to load them from forecast systems + +psl <- "psl" #"g500" # pressure variable to use from the chosen reanalysis +psl.name <- "SLP" # "Z500" # the name to show in the maps + +year.start <- 1981 #1982 #1981 #1994 #1979 #1981 +year.end <- 2013 #2013 #2010 + +res <- 0.75 # set the resolution you want to interpolate the psl and var data when loading it (i.e: set to 0.75 to use the same res of ERA-Interim) + # interpolation method is BILINEAR. + +PCA <- FALSE # set it to TRUE if you want to apply the PCA before the k-means cluster analysis, FALSE otherwise +variance.explained <- 0.8 # the user can set here the minimum % of variance explained by the PCs (typically 0.8 which is equal to 80%) + +lat.weighting=FALSE # set it to true to weight psl data on latitude before applying the cluster analysis +sequences=FALSE # set it to TRUE if you want to select only days beloning to sequences of at least 5 consecutive days belonging to the same regime. + +# Coordinates of the box enclosing the study region (the Northern Atlantic in this case): +lat.max <- 81 #81 #80 #70 +lat.min <- 27 #30 #20 #30 +lon.max <- 45 #36 #30 #40 +lon.min <- 274.5 #274.5 #270 #280 # put a positive number here because the geopotential has only positive values of longitud! + +var.num <- 2 # Choose a variable. 1: sfcWind 2: tas + +var.name <- c("sfcWind","tas") # name of the 'predictand' variable of the chosen reanalysis +var.name.full <- c("Wind Speed","Temperature") # full name of the 'predictand' variable to put in the title of the graphs +var.unit <- c("m/s", "ºC") # unit of measure of var (for drawing color scales) + +# Only for Reanalysis:: +WR.period <- 1:12 #13:16 # 1-12: Jan-Dec; 13-16: Winter-Spring-Summer-Autumn [bypassed by the optional argument of the script] + +# Only for Seasonal forecasts: +startM <- 9 # start month (1=January, 12=December) [bypassed by the optional arguments of the script] +leadM <- 3 # select only the data of one lead month: [bypassed by the optional arguments of the script] +n.members <- 15 # number of members of the forecast system to load +n.leadtimes <- 216 # number of leadtimes of the forecast system to load + +# Only for Monthly forecasts: +#leadtime <- 1:7 #5:11 #1 # if fields=forecast, set the forecast time (in days) you want to compute the weather regimes. +#startdate.shift <- 1 # if leadtime=5:11, it's better to shift all startdates of the season 1 week in the past, to fit the exact season of obs.data + # if leadtime=12:18, it's better to shift all startdates of the season 2 weeks in the past, and so on. +#forecast.year <- year.end+1 # if fields=forecast, set the forecast year (it is usually the year after year.end) +#mes=1 # if fields=forecast, set the month of the first startdate of the forecast year +#day=2 # if fields=forecast, set the day of the first startdate of the forecast year + +############################################################################################################################# +#ECMWF_monthly <- list(path = paste0('/esnas/exp/ECMWF/monthly/ensforhc/6hourly/psl/2014010200/1994_010200.nc')) +#ECMWF_monthly <- list(path = paste0('/esnas/exp/ECMWF/monthly/ensforhc/6hourly/$VAR_NAME$/2014$MONTH$$DAY$00/$VAR_NAME$_$START_DATE$00.nc')) +rean.name <- unname(ifelse(rean == ERAint, ERAint.name, JRA55.name)) +forecast.name <- unname(ifelse(forecast == ECMWF_S4, ECMWF_S4.name, ECMWF_monthly.name)) +fields.name <- unname(ifelse(fields == rean, rean.name, forecast.name)) +if(fields.name == forecast.name) WR.period = 1 +n.years <- year.end - year.start + 1 + +# in case the script is run with two arguments, they are assigned to the two variables below: +script.arg <- as.integer(commandArgs(TRUE)) + +if(length(script.arg) >= 2){ + start.month <- script.arg[1] + lead.month <- script.arg[2] + WR.period <- start.month +} else { + start.month <- startM + lead.month <- leadM + WR.period <- start.month +} + +if(length(script.arg) == 3){ # in this case, we are running the script in the SMP machine and we need to override the variables below: + source('/gpfs/projects/bsc32/bsc32842/Rfunctions.R') # for the calendar function + workdir <- "/gpfs/projects/bsc32/bsc32842/RESILIENCE" # working dir +} + +# in case the script is run with 1 argument, it is assumed you are using a reanalysis: +if(length(script.arg) == 1) WR.period <- script.arg[1] + +var.data <- fields # dataset from which to load the input var data (reanalysis or forecasts) +var.data.name <- fields.name #ECMWF_monthly.name # ERAint.name + +if(lat.min >= lat.max) stop("lat.min cannot be greater than lat.max") + +days.period <- n.days.period <- period.length <- list() +for (pp in 1:17){ + days.period[[pp]] <- NA + for(y in year.start:year.end) days.period[[pp]] <- c(days.period[[pp]], n.days.in.a.future.year(year.start, y) + pos.period(y,pp)) + days.period[[pp]] <- days.period[[pp]][-1] # remove the NA at the begin that was introduced only to be able to execute the above command + # number of days belonging to that period from year.start to year.end: + n.days.period[[pp]] <- length(days.period[[pp]]) + # Number of days belonging to that period in a single year: + period.length[[pp]] <- n.days.in.a.period(pp,1999) #+ ifelse(period==13 | period==2, 1, 0) # using year 1999 introduce a small error in winter season length because of bisestile years +} + +# Create a regular lat/lon grid to interpolate the data to the chosen res: (Notice that the regular grid is always centered at lat=0 and lon=0!) +n.lon.grid <- 360/res +n.lat.grid <- (180/res)+1 +my.grid <- paste0('r',n.lon.grid,'x',n.lat.grid) +#domain<-list() +#domain$lon <- seq(0,359.999999999,res) +#domain$lat <- c(rev(seq(0,90,res)),seq(res[1],-90,-res)) + +cat("Loading lat/lon. Please wait...\n") +# load only 1 day of pressure data to detect the minimum and maximum lat and lon values which identify only the North Atlantic area: +if(fields.name == rean.name) domain <- Load(var = psl, exp = NULL, obs = list(list(path=fields)), '19990101', storefreq = 'daily', leadtimemax = 1, output = 'lonlat', nprocs=1) + +### Real test: +### lat <- +### domain <- Load(var = "psl", exp = list(list(path="/esnas/exp/ecmwf/system4_m1/$STORE_FREQ$_mean/$VAR_NAME$_f6h/$VAR_NAME$_$START_DATE$.nc")), obs=NULL, sdates=paste0(1982:2011,'0101'), storefreq = 'daily', leadtimemax=n.leadtimes, nmember=n.members, output = 'lonlat', latmin = lat[chunk], latmax = lat[chunk+2], nprocs=1) + +### if (fields.name="pippo"){ + +# in the case of S4, we also interpolate it to the same resolution of the reanalysis: +# (notice that if the S4 grid has the same grid of the chosen grid (typically ERA-Interim), Load() leaves the S4 grid unchanged) +if(fields.name == ECMWF_S4.name) domain <- Load(var = psl, exp = list(list(path=fields)), obs=NULL, sdates=paste0(year.start,'0101'), storefreq = 'daily', dimnames=list(member=member.name), leadtimemax=1, nmember=1, output = 'lonlat', grid=my.grid, method='bilinear', nprocs=1) + +#domain <- Load(var = "psl", exp = list(list(path="/esnas/exp/ecmwf/system4_m1/$STORE_FREQ$_mean/$VAR_NAME$_f6h/$VAR_NAME$_$START_DATE$.nc")), obs=NULL, sdates='19820101', storefreq = 'daily', leadtimemax=1, nmember=1, output = 'lonlat', grid='r480x241', lonmax=100) + +if(fields.name == ECMWF_monthly.name) domain <- Load(var = psl, exp = NULL, obs = list(list(path=fields)), paste0(year.start,'0102'), storefreq = 'daily', leadtimemax = 1, output = 'lonlat', nprocs=1) + +n.lon <- length(domain$lon) +n.lat <- length(domain$lat) + +pos.lat.max <- which(lat.max - round(domain$lat,4) >= 0)[1] # select the first latitude of the domain below the maximum latitude +pos.lat.min <- tail(which(round(domain$lat,4) - lat.min >= 0),1) # select the last latitude of the domain over the minumum latitude +pos.lon.max <- tail(which(lon.max - round(domain$lon,4) >= 0),1) +pos.lon.min <- which(round(domain$lon,4) - lon.min >= 0)[1] + +pos.lat <- if(pos.lat.min < pos.lat.max) {pos.lat.min:pos.lat.max} else {pos.lat.max:pos.lat.min} +pos.lon <- c(1:pos.lon.max,pos.lon.min:length(domain$lon)) # beware that it only consider the case where the study area cross the meridian of Greenwhich! +n.pos.lat <- length(pos.lat) +n.pos.lon <- length(pos.lon) + +lat <- domain$lat[pos.lat] # lat values of chosen area only (it is smaller than the whole spatial domain loaded by the data) +#if (lat[2] < lat[1]) lat <- rev(lat) # reverse lat values if they are in decreasing order +lon <- domain$lon[pos.lon] # lon values of chosen area only + +#lat.min.area <- domain$lat[pos.lat.min] # min and max lat/lon of the chosen area only (same as lat.max, but its values correspond to actual values in the lat vector) +#lat.max.area <- domain$lat[pos.lat.max] +#lon.min.area <- domain$lon[pos.lon.min] +#lon.max.area <- domain$lon[pos.lon.max] + +#rm(domain) + + save(lon, lon.max, lat, psl, psl.name, workdir,rean.name,fields.name,file=paste0(workdir,"/",fields.name,"_",my.period[p],"_leadmonth_",lead.month,"_mapdata.RData")) + + + +if(PCA=TRUE){ + +# Load psl data (interpolating it to the same reanlaysis grid, if necessary): +cat("Loading data. Please wait...\n") + +if(fields.name == rean.name){ # Load daily psl data in the reanalysis case: + psleuFull <-array(NA,c(n.days.in.a.yearly.period(year.start,year.end), n.pos.lat, n.pos.lon)) + + for (y in year.start:year.end){ + var <- Load(var = psl, exp = NULL, obs = list(list(path=fields)), paste0(y,'0101'), storefreq = 'daily', leadtimemax = n.days.in.a.year(y), output = 'lonlat', + latmin = lat.min, latmax = lat.max, lonmin = lon.min, lonmax = lon.max) + psleuFull[seq.days.in.a.future.year(year.start, y),,] <- var$obs + rm(var) + gc() + } +} + +if(fields.name == ECMWF_S4.name){ # in this case, we can load only the data for 1 month at time (since each month needs ~3 GB of memory) + if(start.month <= 9) {start.month.char <- paste0("0",as.character(start.month))} else {start.month.char <- start.month} + + #psleuFull <- Load(var = psl, exp = list(list(path=fields)), obs = NULL, sdates=paste0(years, start.month.char,'01'), dimnames=list(member=member.name), nmember=n.members, leadtimemax=n.leadtimes, storefreq='daily', output = 'lonlat', latmin = lat.min, latmax = lat.max, lonmin = lon.min, lonmax = lon.max, nprocs=1, grid=my.grid, method='bilinear')$mod # not working + + fields2 <- gsub("\\$STORE_FREQ\\$","daily",fields) + fields3 <- gsub("\\$VAR_NAME\\$",psl,fields2) + + years <- c() + for(y in year.start:year.end){ + fields4 <- gsub("\\$START_DATE\\$",paste0(y, start.month.char,'01'),fields3) + + char.lead <- nchar(system(paste0("ncks -m ",fields4,"| grep -E -i 'psl size' | cut -d '*' -f1"), intern=T)) + # beware, in this way it can only take into account leadtimes from 100 to 999, but it is the only way in the SMP machine: + #num.lead <- as.integer(substr(system(paste0("ncks -m ",fields4,"| grep -E -i 'psl size' | cut -d '*' -f1"), intern=T),char.lead-3,char.lead)) + num.lead <- as.integer(system(paste0("ncks -m ",fields4,"| grep -E -i 'psl size' | cut -d '=' -f2 | cut -d '*' -f1"), intern=T)) # don't work well on the SMP + num.memb <- as.integer(system(paste0("ncks -m ",fields4,"| grep -E -i 'psl size' | cut -d '=' -f2 | cut -d '*' -f2"), intern=T)) + num.lat <- as.integer(system(paste0("ncks -m ",fields4,"| grep -E -i 'psl size' | cut -d '=' -f2 | cut -d '*' -f3"), intern=T)) + num.lon <- as.integer(system(paste0("ncks -m ",fields4,"| grep -E -i 'psl size' | cut -d '=' -f2 | cut -d '*' -f4"), intern=T)) + + if(num.lead == n.leadtimes && num.memb >= n.members && num.lat == 181 && num.lon == 360) years <- c(years,y) + } + + n.years <- length(years) + + #Load(var = "psl", exp = list(list(path="/esnas/exp/ecmwf/system4_m1/$STORE_FREQ$_mean/$VAR_NAME$_f6h/$VAR_NAME$_$START_DATE$.nc")), obs = NULL, sdates=paste0(1982:2013,'0101'), nmember=15, leadtimemax=216, storefreq='daily', output = 'lonlat', grid=my.grid, nprocs=1) + + + psleuFull <- Load(var = psl, exp = list(list(path=fields)), obs = NULL, sdates=paste0(years, start.month.char,'01'), dimnames=list(member=member.name), nmember=n.members, leadtimemax=n.leadtimes, storefreq='daily', output = 'lonlat', latmin = lat.min, latmax = lat.max, lonmin = lon.min, lonmax = lon.max, grid=my.grid, method='bilinear', nprocs=1)$mod + +} + +if(fields.name == ECMWF_monthly.name){ + # Load 6-hourly psl data in the forecast case: + psleuFull <-array(NA, c(n.sdates*n.years, n.leadtimes, n.pos.lat, n.pos.lon)) # daily order: 19940102 19950102 ... 20130102 19940109 19950109 ... 20130109 ... + n.leadtimes <- length(leadtime) + sdates <- weekly.seq(forecast.year,mes,day) + n.sdates <- length(sdates) + + for (startdate in 1:n.sdates){ + for(lday in leadtime){ + var <- Load(var = psl, exp = NULL, obs = list(list(path=fields)), paste0(year.start:year.end, substr(sdates[startdate],5,6), substr(sdates[startdate],7,8) ), storefreq = 'daily', leadtimemin = lday*4-3, leadtimemax = lday*4, output = 'lonlat', latmin = lat.min, latmax = lat.max, lonmin = lon.min, lonmax = lon.max) + + mean.lday <- apply(var$obs, c(3,5,6), mean, na.rm=T) + rm(var) + gc() + + psleuFull[(1+(startdate-1)*n.years):(startdate*n.years), lday-leadtime[1]+1,,] <- mean.lday + rm(mean.lday) + gc() + } + } +} + +if(psl=="g500") psleuFull <- psleuFull/9.81 # convert geopotential to geopotential height (in m) +if(psl=="psl") psleuFull <- psleuFull/100 # convert MSLP in Pascal to MSLP in hPa +gc() + +#save.image(file=paste0(workdir,"/weather_regimes_",fields.name,"_",year.start,"-",year.end,".RData")) +#load(file=paste0(workdir,"/weather_regimes_",fields.name,"_",year.start,"-",year.end,".RData")) + +# compute the cluster analysis: +my.PCA <- list() +my.cluster <- list() +my.cluster.array <- list() +tot.variance <- list() +n.pcs <- my.cluster <- c() + +#WR.period=1 # for the debug +for(p in WR.period){ + # Select only the data of the month/season we want: + if(fields.name == rean.name){ + pslPeriod <- psleuFull[days.period[[p]],,] # select only days in the chosen period (i.e: winter) + + # weight the pressure fields based on latitude (apply it to anomalies, not to absolute values!): + if(lat.weighting){ + lat.weighted <- cos(lat*pi/180)^0.5 + lat.weighted.array <- InsertDim(InsertDim(lat.weighted,2,n.pos.lon), 1, n.days.period[[p]]) + psl.kmeans <- pslPeriod * lat.weighted.array + rm(lat.weighted.array) + gc() + } + + # select period data from psleuFull to use it as input of the cluster analysis: + psl.kmeans <- pslPeriod + dim(psl.kmeans) <- c(n.days.period[[p]], n.pos.lat*n.pos.lon) # convert array in a matrix! + rm(pslPeriod, pslPeriod.weighted) + } + + # in case of S4 we don't need to select anything because we already loaded only 1 month of data! + if(fields.name == ECMWF_S4.name){ + + # convert psl in daily anomalies with the LOESS filter: + cat("Calculating anomalies. Please wait...\n") + pslPeriodClim <- apply(psleuFull, c(1,4,5,6), mean, na.rm=T) + pslPeriodClimLoess <- pslPeriodClim + for(i in n.pos.lat){ + for(j in n.pos.lon){ + my.data <- data.frame(ens.mean=pslPeriodClim[1,,i,j], day=1:n.leadtimes) + my.loess <- loess(ens.mean ~ day, my.data, span=0.35) + pslPeriodClimLoess[1,,i,j] <- predict(my.loess) + } + } + + rm(pslPeriodClim) + gc() + + pslPeriodClim2 <- InsertDim(InsertDim(pslPeriodClimLoess, 2, n.years), 2, n.members) + + ## s4.data <- data.frame(s4=pslPeriodClim[1,], day=1:216) + ## s4.loess <- loess(s4 ~ day, s4.data, span=0.35) + ## s4.pred <- predict(s4.loess) + + psleuFull <- psleuFull - pslPeriodClim2 + rm(pslPeriodClim2) + gc() + + ## # convert psl in daily anomalies computed with the 10-days leadtime window including the 9 days AFTER each day: + ## n.leadtimes <- dim(psleuFull)[4] + ## pslPeriodClim10 <- array(NA, c(1, n.members, n.years, n.leadtimes-9,n.pos.lat,n.pos.lon)) + ## for(year in 1:n.years){ + ## for(lead in 1:(n.leadtimes-9)){ + ## pslPeriodClim10[1,,year,lead,,] <- (psleuFull[1,,year,lead,,] + psleuFull[1,,year,lead+1,,] + psleuFull[1,,year,lead+2,,] + psleuFull[1,,year,lead+3,,] + psleuFull[1,,year,lead+4,,] + psleuFull[1,,year,lead+5,,] + psleuFull[1,,year,lead+6,,] + psleuFull[1,,year,lead+7,,] + psleuFull[1,,year,lead+8,,] + psleuFull[1,,year,lead+9,,])/10 + ## } + ## } + + ## pslPeriodClim10mean <- apply(pslPeriodClim10, c(1,4,5,6), mean, na.rm=T) + ## pslPeriodClim10mean2 <- InsertDim(InsertDim(pslPeriodClim10mean,2,n.years),2,n.members) + + ## psleuFull10 <- psleuFull[1,,,1:(n.leadtimes-9),,,drop=F] - pslPeriodClim10mean2 + + ## psleuFull <- psleuFull10 + ## rm(pslPeriodClim2, psleuFull10, pslPeriodClim10, pslPeriodClim10mean2, psleuFull10mean, pslPeriodClim10mean) + ## gc() + + # select data only for the startmonth and leadmonth to study: + cat("Extracting data. Please wait...\n") + chosen.month <- start.month + lead.month # find which month we want to select data + if(chosen.month > 12) chosen.month <- chosen.month - 12 + + i=0 + for(y in years){ + i=i+1 + leadtime.min <- 1 + n.days.in.a.monthly.period(start.month, chosen.month, y) - n.days.in.a.month(chosen.month, y) + leadtime.max <- leadtime.min + n.days.in.a.month(chosen.month, y) - 1 + if (n.days.in.a.month(chosen.month, y) == 29) leadtime.max <- leadtime.max - 1 # remove the 29th of February to have the same n. of elements for all years + int.leadtimes <- leadtime.min:leadtime.max + num.leadtimes <- leadtime.max - leadtime.min + 1 # number of days in the chosen month (different from 'n.leadtimes',which is the total number of days in the file!) + # by construction it is equal to: n.days.in.a.month(chosen.month, y) + if(y == years[1]) { + pslPeriod <- psleuFull[,,1,int.leadtimes,,,drop=FALSE] + } else { + pslPeriod <- unname(abind(pslPeriod, psleuFull[,,i,int.leadtimes,,,drop=FALSE], along=3)) + } + } + + # note that the data order in psl.kmeans[,X] is: year1day1, year1day2, ... , year1day31, year2day1, year2day2, ... , year2day28 + gc() + cat("Preformatting data. Please wait...\n") + psl.melted <- melt(pslPeriod[1,,,,,, drop=FALSE], varnames=c("Exp","Member","StartYear","LeadDay","Lat","Lon")) + gc() + cat("Preformatting data. Please wait......\n") + + # this function eats much memory!!! + psl.kmeans <- unname(acast(psl.melted, Member + StartYear + LeadDay ~ Lat + Lon)) + #gc() + + #psl.kmeans <- array(NA,c(dim(pslPeriod))) + #n.rows <- nrow(psl.melted) + #a <- psl.melted$Member + #b <- psl.melted$StartYear + #c <- psl.melted$LeadDay + #d <- psl.melted$Lat + #e <- psl.melted$Lon + #f <- psl.melted$value + + # this function to convert a data.frame in an array is much less memory-eater than acast but a bit slower: + #for(i in 1:n.rows) psl.kmeans[1,a[i],b[i],c[i],d[i],e[i]] <- f[i] + gc() + #rm(pslPeriod); gc() + } + + if(fields.name == ECMWF_monthly.name){ + my.startdates.period <- months.period(forecast.year,mes,day,p) # get which startdates belong to the chosen period + + days.period <- list() # get which days inside psleuFull belong to the chosen period + for(startdate in my.startdates.period){ + days.period[[p]] <- c(days.period[[p]], (1+(startdate-1)*n.years):(startdate*n.years)) + } + + # in winter you must remove the 2nd startdate (20140109) because its data is bad, as you can see with: plot(psl.kmeans[,1:2]) + # if(fields.name == forecast.name && period == 13) psl.kmeans[21:40,] <- NA + } + + + # compute the clustering: + cat("Clustering data. Please wait...\n") + if(PCA){ + my.seq <- seq(1, n.pos.lat*n.pos.lon, 9) # select only 1 point of 9 + pslcut <- psl.kmeans[,my.seq] + + my.PCA[[p]] <- princomp(pslcut,cor=FALSE) + tot.variance[[p]] <- head(cumsum(my.PCA[[p]]$sdev^2/sum(my.PCA[[p]]$sdev^2)),50) # check how many PCAs to keep basing on the sum of their expl. variance + n.pcs[p] <- head(as.numeric(which(tot.variance[[p]] > variance.explained)),1) # select only the pcs that explains at least 80% of variance + my.cluster[[p]] <- kmeans(my.PCA[[p]]$scores[,1:n.pcs[p]], centers=4, iter.max=100, nstart=30) # 4 is the number of clusters + rm(pslcut) + } else { # 4 is the number of clusters: + if(fields.name == rean.name){ + my.cluster[[p]] <- kmeans(psl.kmeans[which(!is.na(psl.kmeans[,1])),], centers=4, iter.max=100, nstart=30) + # save the time series output of the cluster analysis: + save(my.cluster, my.cluster.array, my.PCA, tot.variance, n.pcs, file=paste0(workdir,"/",fields.name,"_",my.period[p],"_ClusterData.RData")) + } + + if(fields.name == ECMWF_S4.name) { + my.cluster[[p]] <- kmeans(psl.kmeans, centers=4, iter.max=100, nstart=30, trace=TRUE) + my.cluster.array[[p]] <- array(my.cluster[[p]]$cluster, c(num.leadtimes, n.years, n.members)) # in reverse order compared to the definition of psl.kmeans + + # save the time series output of the cluster analysis: + save(my.cluster, my.cluster.array, my.PCA,tot.variance,n.pcs, file=paste0(workdir,"/",fields.name,"_",my.period[p],"_leadmonth_",lead.month,"_ClusterData.RData")) + + #plot(SMA(my.cluster[[p]]$centers[1,],201),type="l",col="gold",ylim=c(-20,20));lines(SMA(my.cluster[[p]]$centers[2,],201),type="l",col="red"); lines(SMA(my.cluster[[p]]$centers[3,],201),type="l",col="green");lines(SMA(my.cluster[[p]]$centers[4,],201),type="l",col="blue");lines(SMA(psl.kmeans[29,],201),type="l",col="gray");lines(rep(0,14410),type="l",col="black",lty=2) + } + } + + rm(psl.kmeans) + gc() + +} # close for on 'p' + +#save.image(file=paste0(workdir,"/weather_regimes_",fields.name,"_",year.start,"-",year.end,".RData")) +#load(file=paste0(workdir,"/weather_regimes_",fields.name,"_",year.start,"-",year.end,".RData")) + +if(fields.name == ECMWF_monthly.name){ # Load 6-hourly var data in the monthly forecast case: + sdates <- weekly.seq(forecast.year,mes,day) + vareuFull <-array(NA,c(length(sdates)*(year.end-year.start+1), n.pos.lat, n.pos.lon)) + + for (startdate in 1:length(sdates)){ + var <- Load(var = var.name[var.num], exp = NULL, obs = list(var.data), paste0(year.start:year.end, substr(sdates[startdate],5,6), substr(sdates[startdate],7,8) ), storefreq = 'daily', leadtimemin=leadtime*4-3, leadtimemax=leadtime*4, output = 'lonlat', latmin = lat.min, latmax = lat.max, lonmin = lon.min, lonmax = lon.max) + temp <- apply(var$obs, c(1,3,5,6), mean, na.rm=T) + vareuFull[(1+(startdate-1)*(year.end-year.start+1)):(startdate*(year.end-year.start+1)),,] <- temp + rm(temp,var) + gc() + } + +} + +#my.grid<-paste0('r',n.lon,'x',n.lat) +#coord <- Load(var = 'sfcWind', exp = list(ECMWF_monthly), obs = NULL, sdates=paste0(2013,'0102'), leadtimemin = 1, leadtimemax=1, output = 'lonlat', nprocs=1, grid=my.grid, method='bilinear') + +#save.image(file=paste0(workdir,"/weather_regimes_",fields.name,"_",var.name[var.num],"_",year.start,"-",year.end,".RData")) +#load(file=paste0(workdir,"/weather_regimes_",fields.name,"_",var.name[var.num],"_",year.start,"-",year.end,".RData")) + +for(p in WR.period){ # 1-12: Jan-Dec; 13-16: Winter-Spring-Summer-Autumn; 17: Year + + if(fields.name == rean.name){ + cluster.sequence <- my.cluster[[p]]$cluster + + if(sequences){ # it works only for rean data!!! + # for each of the 4 clusters, remove the days not belonging to sequences of at least 5 days: + # warning: first 4 days and last 4 days are not take into account y the algorithm and are treated as not belonging to any sequence (sequ=FALSE) + wrdiff <- diff(my.cluster[[p]]$cluster) + + sequ <- rep(FALSE, n.days.period[[p]]) + for(i in 5:(n.days.period[[p]]-5)){ + sequ[i] <- TRUE + if(wrdiff[i] != 0 & wrdiff[i+1] != 0) sequ[i] = FALSE + if(wrdiff[i] != 0 & wrdiff[i+2] != 0) sequ[i] = FALSE + if(wrdiff[i] != 0 & wrdiff[i+3] != 0) sequ[i] = FALSE + if(wrdiff[i] != 0 & wrdiff[i+4] != 0) sequ[i] = FALSE + if(wrdiff[i-1] != 0 & wrdiff[i] != 0) sequ[i] = FALSE + if(wrdiff[i-1] != 0 & wrdiff[i+1] != 0) sequ[i] = FALSE + if(wrdiff[i-1] != 0 & wrdiff[i+2] != 0) sequ[i] = FALSE + if(wrdiff[i-1] != 0 & wrdiff[i+3] != 0) sequ[i] = FALSE + if(wrdiff[i-2] != 0 & wrdiff[i] != 0) sequ[i] = FALSE + if(wrdiff[i-2] != 0 & wrdiff[i+1] != 0) sequ[i] = FALSE + if(wrdiff[i-2] != 0 & wrdiff[i+2] != 0) sequ[i] = FALSE + if(wrdiff[i-3] != 0 & wrdiff[i] != 0) sequ[i] = FALSE + if(wrdiff[i-3] != 0 & wrdiff[i+1] != 0) sequ[i] = FALSE + if(wrdiff[i-4] != 0 & wrdiff[i] != 0) sequ[i] = FALSE + } # close for loop on i + + # sequence of daily cluster numbers without the days that doesn't belong to sequences of 5 or more days: + cluster.sequence <- my.cluster[[p]]$cluster * sequ + rm(wrdiff, sequ) + } + + # Replace the days belonging to a regime with the days belonging to a sequence of at least 5 days of that regime: + wr1 <- which(cluster.sequence == 1) + wr2 <- which(cluster.sequence == 2) + wr3 <- which(cluster.sequence == 3) + wr4 <- which(cluster.sequence == 4) + + # yearly frequency of each regime: + wr1y <- wr2y <- wr3y <-wr4y <- c() + for(y in year.start:year.end){ + # for both reanalysis and S4, the time order is always: year1day1, year1day2, ..., year1day31, year2day1, year2day2, ..., year2day28, etc, + wr1y[y-year.start+1] <- length(which(cluster.sequence[(y-year.start)*period.length[[p]]+(1:period.length[[p]])] == 1)) + wr2y[y-year.start+1] <- length(which(cluster.sequence[(y-year.start)*period.length[[p]]+(1:period.length[[p]])] == 2)) + wr3y[y-year.start+1] <- length(which(cluster.sequence[(y-year.start)*period.length[[p]]+(1:period.length[[p]])] == 3)) + wr4y[y-year.start+1] <- length(which(cluster.sequence[(y-year.start)*period.length[[p]]+(1:period.length[[p]])] == 4)) + } + + # convert to frequencies in %: + wr1y <- wr1y/period.length[[p]] + wr2y <- wr2y/period.length[[p]] + wr3y <- wr3y/period.length[[p]] + wr4y <- wr4y/period.length[[p]] + + pslPeriod <- psleuFull[days.period[[p]],,] + pslPeriodClim <- apply(pslPeriod, c(2,3), mean, na.rm=T) + pslPeriodClim2 <- InsertDim(pslPeriodClim, 1, n.days.period[[p]]) + pslPeriodAnom <- pslPeriod - pslPeriodClim2 + + rm(pslPeriod, pslPeriodClim, pslPeriodClim2) + gc() + + pslwr1 <- pslPeriodAnom[wr1,,] + pslwr2 <- pslPeriodAnom[wr2,,] + pslwr3 <- pslPeriodAnom[wr3,,] + pslwr4 <- pslPeriodAnom[wr4,,] + + rm(pslPeriodAnom) + gc() + + pslwr1mean <- apply(pslwr1,c(2,3),mean,na.rm=T) + pslwr2mean <- apply(pslwr2,c(2,3),mean,na.rm=T) + pslwr3mean <- apply(pslwr3,c(2,3),mean,na.rm=T) + pslwr4mean <- apply(pslwr4,c(2,3),mean,na.rm=T) + rm(pslwr1,pslwr2,pslwr3,pslwr4) + + # in the reanalysis case, we also measure the impact maps: + + # similar selection of above, but for var instead of psl: + varPeriod <- vareuFull[days.period[[p]],,] # select only var data during the chosen period + + varPeriodClim <- apply(varPeriod, c(2,3), mean, na.rm=T) + varPeriodClim2 <- InsertDim(varPeriodClim, 1, n.days.period[[p]]) + varPeriodAnom <- varPeriod - varPeriodClim2 + rm(varPeriod, varPeriodClim, varPeriodClim2) + gc() + + #PlotEquiMap2(varPeriodClim[,EU], lon[EU], lat, filled.continents = FALSE, cols=my.cols.var, intxlon=10, intylat=10, cex.lab=1.5) # plot the climatology of the period + varPeriodAnom1 <- varPeriodAnom[wr1,,] + varPeriodAnom2 <- varPeriodAnom[wr2,,] + varPeriodAnom3 <- varPeriodAnom[wr3,,] + varPeriodAnom4 <- varPeriodAnom[wr4,,] + + varPeriodAnom1mean <- apply(varPeriodAnom1,c(2,3),mean,na.rm=T) + varPeriodAnom2mean <- apply(varPeriodAnom2,c(2,3),mean,na.rm=T) + varPeriodAnom3mean <- apply(varPeriodAnom3,c(2,3),mean,na.rm=T) + varPeriodAnom4mean <- apply(varPeriodAnom4,c(2,3),mean,na.rm=T) + + varPeriodAnomBoth1 <- abind(varPeriodAnom, varPeriodAnom1, along = 1) + pvalue1 <- apply(varPeriodAnomBoth1, c(2,3), function(x) t.test(x[1:n.days.period[[p]]],x[(n.days.period[[p]]+1):length(x)])$p.value) + rm(varPeriodAnomBoth1, varPeriodAnom1) + gc() + + varPeriodAnomBoth2 <- abind(varPeriodAnom, varPeriodAnom2, along = 1) + pvalue2 <- apply(varPeriodAnomBoth2, c(2,3), function(x) t.test(x[1:n.days.period[[p]]],x[(n.days.period[[p]]+1):length(x)])$p.value) + rm(varPeriodAnomBoth2, varPeriodAnom2) + gc() + + varPeriodAnomBoth3 <- abind(varPeriodAnom, varPeriodAnom3, along = 1) + pvalue3 <- apply(varPeriodAnomBoth3, c(2,3), function(x) t.test(x[1:n.days.period[[p]]],x[(n.days.period[[p]]+1):length(x)])$p.value) + rm(varPeriodAnomBoth3, varPeriodAnom3) + gc() + + varPeriodAnomBoth4 <- abind(varPeriodAnom, varPeriodAnom4, along = 1) + pvalue4 <- apply(varPeriodAnomBoth4, c(2,3), function(x) t.test(x[1:n.days.period[[p]]],x[(n.days.period[[p]]+1):length(x)])$p.value) + rm(varPeriodAnomBoth4, varPeriodAnom4) + + rm(varPeriodAnom) + gc() + + # save all the data necessary to redraw the graphs when we know the right regime: + save(lon, lon.max, lon.min, lat, lat.max, lat.min, psl, psl.name, my.period, p, workdir,rean.name,fields.name,var.num,var.name,var.name.full,var.unit,year.start,year.end,WR.period,pslwr1mean,pslwr2mean,pslwr3mean,pslwr4mean, varPeriodAnom1mean, varPeriodAnom2mean, varPeriodAnom3mean,varPeriodAnom4mean,wr1y,wr2y,wr3y,wr4y,pvalue1,pvalue2,pvalue3,pvalue4,cluster.sequence,file=paste0(workdir,"/",fields.name,"_",my.period[p],"_mapdata.RData")) + + rm(pvalue1,pvalue2,pvalue3,pvalue4) + rm(pslwr1mean,pslwr2mean,pslwr3mean,pslwr4mean) + rm(varPeriodAnom1mean,varPeriodAnom2mean,varPeriodAnom3mean,varPeriodAnom4mean) + gc() + + } + + if(fields.name == ECMWF_S4.name){ + cat("Computing regime anomalies and frequencies. Please wait...\n") + + load(file=paste0(workdir,"/",fields.name,"_",my.period[p],"_leadmonth_",lead.month,"_ClusterData.RData")) + cluster.sequence <- my.cluster[[p]]$cluster #as.vector(my.cluster.array[[p]]) + + wr1 <- which(cluster.sequence == 1) + wr2 <- which(cluster.sequence == 2) + wr3 <- which(cluster.sequence == 3) + wr4 <- which(cluster.sequence == 4) + + wr1y <- apply(my.cluster.array[[p]] == 1, 2, sum) + wr2y <- apply(my.cluster.array[[p]] == 2, 2, sum) + wr3y <- apply(my.cluster.array[[p]] == 3, 2, sum) + wr4y <- apply(my.cluster.array[[p]] == 4, 2, sum) + + # convert to frequencies in %: + wr1y <- wr1y/(num.leadtimes*n.members) # in this case, n.leadtimes is the number of days of one month of the cluser time series + wr2y <- wr2y/(num.leadtimes*n.members) + wr3y <- wr3y/(num.leadtimes*n.members) + wr4y <- wr4y/(num.leadtimes*n.members) + + # same as above, but to measure the maximum frequence between all the members: + wr1yMax <- apply(apply(my.cluster.array[[p]] == 1, c(2,3), sum),1,max) + wr2yMax <- apply(apply(my.cluster.array[[p]] == 2, c(2,3), sum),1,max) + wr3yMax <- apply(apply(my.cluster.array[[p]] == 3, c(2,3), sum),1,max) + wr4yMax <- apply(apply(my.cluster.array[[p]] == 4, c(2,3), sum),1,max) + + wr1yMax <- wr1yMax/num.leadtimes + wr2yMax <- wr2yMax/num.leadtimes + wr3yMax <- wr3yMax/num.leadtimes + wr4yMax <- wr4yMax/num.leadtimes + + wr1yMin <- apply(apply(my.cluster.array[[p]] == 1, c(2,3), sum),1,min) + wr2yMin <- apply(apply(my.cluster.array[[p]] == 2, c(2,3), sum),1,min) + wr3yMin <- apply(apply(my.cluster.array[[p]] == 3, c(2,3), sum),1,min) + wr4yMin <- apply(apply(my.cluster.array[[p]] == 4, c(2,3), sum),1,min) + + wr1yMin <- wr1yMin/num.leadtimes + wr2yMin <- wr2yMin/num.leadtimes + wr3yMin <- wr3yMin/num.leadtimes + wr4yMin <- wr4yMin/num.leadtimes + + cat("Computing regime anomalies and frequencies. Please wait......\n") + + # in this case, must use psl.melted instead of psleuFull. Both are already in anomalies: + # (psl.melted depends on the startdate and leadtime, psleuFull no) + gc() + pslmat <- unname(acast(psl.melted, Member + StartYear + LeadDay ~ Lat ~ Lon)) + #pslmat <- unname(acast(melt(pslPeriod[1,1:2,,,,, drop=FALSE],varnames=c("Exp","Member","StartYear","LeadDay","Lat","Lon")), Member ~ StartYear + LeadDay ~ Lat ~ Lon)) + gc() + + #for(a in 1:2){ + # for(b in 1:3){ + # for(c in 1:4){ + # pslmat[1, a + (b-1)*a + (c-1)*a*b,,] <- pslPeriod[1,a,b,c,,] + # } + # } + #} + + pslwr1 <- pslmat[wr1,,, drop=F] + pslwr2 <- pslmat[wr2,,, drop=F] + pslwr3 <- pslmat[wr3,,, drop=F] + pslwr4 <- pslmat[wr4,,, drop=F] + + pslwr1mean <- apply(pslwr1, c(2,3), mean, na.rm=T) + pslwr2mean <- apply(pslwr2, c(2,3), mean, na.rm=T) + pslwr3mean <- apply(pslwr3, c(2,3), mean, na.rm=T) + pslwr4mean <- apply(pslwr4, c(2,3), mean, na.rm=T) + + rm(pslwr1,pslwr2,pslwr3,pslwr4) + rm(pslmat) + gc() + + # save all the data necessary to draw the graphs (but not the impact maps) + save(lon, lon.max, lat, psl, psl.name, my.period, p, workdir,rean.name,fields.name,var.num,var.name,var.name.full,var.unit,year.start,year.end, years, n.years, WR.period, pslwr1mean, pslwr2mean, pslwr3mean, pslwr4mean, wr1y, wr2y, wr3y, wr4y, wr1yMax, wr2yMax, wr3yMax, wr4yMax, wr1yMin, wr2yMin, wr3yMin, wr4yMin, n.leadtimes, num.leadtimes, cluster.sequence,file=paste0(workdir,"/",fields.name,"_",my.period[p],"_leadmonth_",lead.month,"_mapdata.RData")) + + rm(pslwr1mean,pslwr2mean,pslwr3mean,pslwr4mean) + gc() + + } + + # for the monthly system, the time order is always: year1day1, year2day1, ... , yearNday1, year1day2, year2day2, ..., yearNday2, etc.: + if(fields.name == ECMWF_monthly.name) { + if(fields.name == ECMWF_monthly.name){ + my.startdates.period <- months.period(forecast.year,mes,day,p) + + #if(p == 13) my.startdates.period <- my.startdates.period[-2] # remove the second startdate because it is broken + + days.period <- period.length <- list() + for(startdate in my.startdates.period){ + days.period[[p]] <- c(days.period[[p]], (1+(startdate-1)*(year.end-year.start+1)):(startdate*(year.end-year.start+1))) + } + period.length[[p]] <- length(my.startdates.period) # number of Thusdays in the period + } + + wr1y <- wr2y <- wr3y <-wr4y <- c() + wr1y[y-year.start+1] <- length(which(cluster.sequence[seq((y-year.start+1),length(cluster.sequence), n.years)] == 1)) + wr2y[y-year.start+1] <- length(which(cluster.sequence[seq((y-year.start+1),length(cluster.sequence), n.years)] == 2)) + wr3y[y-year.start+1] <- length(which(cluster.sequence[seq((y-year.start+1),length(cluster.sequence), n.years)] == 3)) + wr4y[y-year.start+1] <- length(which(cluster.sequence[seq((y-year.start+1),length(cluster.sequence), n.years)] == 4)) + } + + cat("Finished!\n") +} # close the for loop on 'p' + + + + + + + + + + + + + + +# Load var data: +if(fields.name == rean.name){ # Load daily var data in the reanalysis case: + vareuFull <-array(NA,c(n.days.in.a.yearly.period(year.start,year.end), n.pos.lat, n.pos.lon)) + + for (y in year.start:year.end){ + var <- Load(var = var.name[var.num], exp = NULL, obs = list(var.data), paste0(y,'0101'), storefreq = 'daily', leadtimemax = n.days.in.a.year(y), output = 'lonlat', + latmin = lat.min, latmax = lat.max, lonmin = lon.min, lonmax = lon.max) + vareuFull[seq.days.in.a.future.year(year.start, y),,] <- var$obs + rm(var) + gc() + } +} + +## if(fields.name == ECMWF_S4.name) { +## if(start.month <= 9) {start.month.char <- paste0("0",as.character(start.month))} else {start.month.char <- start.month} + +## my.years <- year.start:year.end +## vareuFull <- Load(var = var.name[var.num], exp = list(list(path=fields)), obs = NULL, sdates=paste0(my.years, start.month.char,'01'), dimnames=list(member=member.name), nmember=n.members, leadtimemax=216, storefreq='daily', output = 'lonlat', latmin = lat.min, latmax = lat.max, lonmin = lon.min, lonmax = lon.max)$mod #, grid=my.grid, method='bilinear') +## } + + + +} diff --git a/old/weather_regimes_v35.R b/old/weather_regimes_v35.R new file mode 100644 index 0000000000000000000000000000000000000000..599cb969e897ab889b8f49d25befc2ee32c68895 --- /dev/null +++ b/old/weather_regimes_v35.R @@ -0,0 +1,720 @@ + +# Creation: 6/2016 +# Author: Nicola Cortesi +# Aim: To compute the four Euro-Atlantic weather regimes from a reanalysis or from a forecast system +# I/O: input must be in a format compatible with Load(); 1 output .RData file is created in the 'workdir' folder. +# Assumption: its output are needed by the scripts weather_regimes_impact.R and weather_regimes_maps.R +# Branch: weather_regimes + +rm(list=ls()) +library(s2dverification) # for the function Load() +library(abind) +library(reshape2) # after each undate of s2dverification, it's better to remove and install this package again because sometimes it gets broken! + +SMP=FALSE # if TRUE, the script is assumed to run on the SMP Machine + +if(SMP){ + source('/gpfs/projects/bsc32/bsc32842/Rfunctions.R') + workdir <- "/gpfs/projects/bsc32/bsc32842/RESILIENCE" # working dir where output files will be generated +} else { + source('/home/Earth/ncortesi/Downloads/scripts/Rfunctions.R') + workdir <- "/scratch/Earth/ncortesi/RESILIENCE/Regimes" # working dir where output files will be generated +} + +# module load NCO # : load it from the terminal before running this script interactively in the SMP machine, if you didn't load it in the .bashrc + +# available reanalysis for the geopotential (g500) or the geopotential height (z500 = g500/9.81) and for var data: +ERAint <- '/esnas/reconstructions/ecmwf/eraint/$STORE_FREQ$_mean/$VAR_NAME$_f6h/$VAR_NAME$_$YEAR$$MONTH$.nc' +ERAint.name <- "ERA-Interim" + +JRA55 <- '/esnas/reconstructions/jma/jra-55/$STORE_FREQ$_mean/$VAR_NAME$_f6h/$VAR_NAME$_$YEAR$$MONTH$.nc' +JRA55.name <- "JRA-55" + +rean <- ERAint #JRA55 #ERAint # choose one of the two above reanalysis from where to load the input psl data + +# available forecast systems for the psl and var data: +#ECMWF_S4 <- list(path = paste0('/esnas/exp/ECMWF/seasonal/0001/s004/m001/$STORE_FREQ$_mean/$VAR_NAME$_f6h/$VAR_NAME$_$START_DATE$.nc')) +#ECMWF_S4 <- '/esnas/exp/ECMWF/seasonal/0001/s004/m001/$STORE_FREQ$_mean/psl_f6h/psl_$START_DATE$.nc' +ECMWF_S4 <- '/esnas/exp/ecmwf/system4_m1/$STORE_FREQ$_mean/$VAR_NAME$_f6h/$VAR_NAME$_$START_DATE$.nc' +ECMWF_S4.name <- "ECMWF-S4" +member.name <- "ensemble" # name of the dimension with the ensemble members inside the netCDF files of S4 + +ECMWF_monthly <- '/esnas/exp/ECMWF/monthly/ensforhc/6hourly/$VAR_NAME$/2014$MONTH$$DAY$00/$VAR_NAME$_$START_DATE$00.nc' +ECMWF_monthly.name <- "ECMWF-Monthly" + +forecast <- ECMWF_S4 + +fields <- rean # put 'rean' to load pressure fields and var from reanalisis, or put 'forecast' to load them from forecast systems + +psl <- "psl" #"g500" # pressure variable to use from the chosen reanalysis +psl.name <- "SLP" # "Z500" # the name to show in the maps + +year.start <- 1981 #1982 #1981 #1994 #1979 #1981 +year.end <- 2015 #2013 #2010 + +missing.forecasts=FALSE # set it to TRUE if there can be missing hindcasts files in the forecast psl data, so it will load only the available years and deals with NA +res <- 0.75 # set the resolution you want to interpolate the psl and var data when loading it (i.e: set to 0.75 to use the same res of ERA-Interim) + # interpolation method is BILINEAR. +PCA <- TRUE # set it to TRUE if you want to apply the PCA before the k-means cluster analysis, FALSE otherwise +variance.explained <- 0.8 # the user can set here the minimum % of variance explained by the PCs (typically 0.8 which is equal to 80%) + +lat.weighting=TRUE # set it to true to weight psl data on latitude before applying the cluster analysis +sequences=FALSE # set it to TRUE if you want to select only days beloning to sequences of at least 5 consecutive days belonging to the same regime. + +# Coordinates of the box enclosing the study region (the Northern Atlantic in this case): +lat.max <- 81 #81 #80 #70 +lat.min <- 27 #30 #20 #30 +lon.max <- 45 #36 #30 #40 +lon.min <- 274.5 #274.5 #270 #280 # put a positive number here because the geopotential has only positive vaues of longitud! + +# Only for reanalysis: +WR.period <- 11 #1:12 #13:16 # 1-12: Jan-Dec; 13-16: Winter-Spring-Summer-Autumn [bypassed by the optional argument of the script] + +# Only for Seasonal forecasts: +startM <- 1 # start month (1=January, 12=December) [bypassed by the optional arguments of the script] +leadM <- 1 # select only the data of one lead month: [bypassed by the optional arguments of the script] +n.members <- 15 # number of members of the forecast system to load +n.leadtimes <- 216 # number of leadtimes of the forecast system to load + +# Only for Monthly forecasts: +#leadtime <- 1:7 #5:11 #1 # if fields=forecast, set the forecast time (in days) you want to compute the weather regimes. +#startdate.shift <- 1 # if leadtime=5:11, it's better to shift all startdates of the season 1 week in the past, to fit the exact season of obs.data + # if leadtime=12:18, it's better to shift all startdates of the season 2 weeks in the past, and so on. +#forecast.year <- year.end+1 # if fields=forecast, set the forecast year (it is usually the year after year.end) +#mes=1 # if fields=forecast, set the month of the first startdate of the forecast year +#day=2 # if fields=forecast, set the day of the first startdate of the forecast year + +############################################################################################################################# +rean.name <- unname(ifelse(rean == ERAint, ERAint.name, JRA55.name)) +forecast.name <- unname(ifelse(forecast == ECMWF_S4, ECMWF_S4.name, ECMWF_monthly.name)) +fields.name <- unname(ifelse(fields == rean, rean.name, forecast.name)) +n.years <- year.end - year.start + 1 + +# in case the script is run with one argument, it is assumed a reanalysis is being used and the argument becomes the month we want to perform the clustering; +# in case the script is run with two arguments, it is assumed a forecast is used and the first argument represents the startmonth and the second one the leadmonth. +# in case the script is run with no arguments, the values of the variables inside the script are used: +script.arg <- as.integer(commandArgs(TRUE)) + +if(length(script.arg) == 0){ + start.month <- startM + WR.period <- start.month + if(fields.name == forecast.name) lead.month <- leadM +} + +# in case the script is run with 1 argument, it is assumed you are using a reanalysis: +if(length(script.arg) == 1){ + fields <- rean + fields.name <- rean.name + WR.period <- script.arg[1] +} + +if(length(script.arg) >= 2){ + fields <- forecast + fields.name <- forecast.name + start.month <- script.arg[1] + lead.month <- script.arg[2] + WR.period <- start.month +} + +if(length(script.arg) == 3){ # in this case, we are running the script in the SMP machine and we need to override the variables below: + source('/gpfs/projects/bsc32/bsc32842/Rfunctions.R') # for the calendar function + workdir <- "/gpfs/projects/bsc32/bsc32842/RESILIENCE" # working dir +} + +if(length(script.arg) >= 2) {lead.comment <- paste0("Lead month: ", lead.month)} else {lead.comment <- ""} +cat(paste0("Field: ", fields.name, " Period: ", WR.period, " ", lead.comment, "\n")) + +if(lat.min >= lat.max) stop("lat.min cannot be greater than lat.max") + +#days.period <- n.days.period <- period.length <- list() +#for (pp in 1:17){ +# days.period[[pp]] <- NA +# for(y in year.start:year.end) days.period[[pp]] <- c(days.period[[pp]], n.days.in.a.future.year(year.start, y) + pos.period(y,pp)) +# days.period[[pp]] <- days.period[[pp]][-1] # remove the NA at the begin that was introduced only to be able to execute the above command +# # number of days belonging to that period from year.start to year.end: +# n.days.period[[pp]] <- length(days.period[[pp]]) +# # Number of days belonging to that period in a single year: +# period.length[[pp]] <- n.days.in.a.period(pp,1999) #+ ifelse(period==13 | period==2, 1, 0) # using year 1999 introduce a small error in winter season length because of #bisestile years +#} + +# Create a regular lat/lon grid to interpolate the data to the chosen res: (Notice that the regular grid is always centered at lat=0 and lon=0!) +n.lon.grid <- 360/res +n.lat.grid <- (180/res)+1 +my.grid <- paste0('r',n.lon.grid,'x',n.lat.grid) +#domain<-list() +#domain$lon <- seq(0,359.999999999,res) +#domain$lat <- c(rev(seq(0,90,res)),seq(res[1],-90,-res)) + +cat("Loading lat/lon. Please wait...\n") +# load only 1 day of pressure data to detect the minimum and maximum lat and lon values which identify only the North Atlantic area: +if(fields.name == rean.name) domain <- Load(var = psl, exp = NULL, obs = list(list(path=fields)), '19990101', storefreq = 'daily', leadtimemax = 1, output = 'lonlat', nprocs=1) + +### Real test: +### lat <- +### domain <- Load(var = "psl", exp = list(list(path="/esnas/exp/ecmwf/system4_m1/$STORE_FREQ$_mean/$VAR_NAME$_f6h/$VAR_NAME$_$START_DATE$.nc")), obs=NULL, sdates=paste0(1982:2011,'0101'), storefreq = 'daily', leadtimemax=n.leadtimes, nmember=n.members, output = 'lonlat', latmin = lat[chunk], latmax = lat[chunk+2], nprocs=1) + +### if (fields.name="pippo"){ + +# in the case of S4, we also interpolate it to the same resolution of the reanalysis: +# (notice that if the S4 grid has the same grid of the chosen grid (typically ERA-Interim), Load() leaves the S4 grid unchanged) +if(fields.name == ECMWF_S4.name) domain <- Load(var = psl, exp = list(list(path=fields)), obs=NULL, sdates=paste0(year.start,'0101'), storefreq = 'daily', dimnames=list(member=member.name), leadtimemax=1, nmember=1, output = 'lonlat', grid=my.grid, method='bilinear', nprocs=1) + +#domain <- Load(var = "psl", exp = list(list(path="/esnas/exp/ecmwf/system4_m1/$STORE_FREQ$_mean/$VAR_NAME$_f6h/$VAR_NAME$_$START_DATE$.nc")), obs=NULL, sdates='19820101', storefreq = 'daily', leadtimemax=1, nmember=1, output = 'lonlat', grid='r480x241', lonmax=100) + +if(fields.name == ECMWF_monthly.name) domain <- Load(var = psl, exp = NULL, obs = list(list(path=fields)), paste0(year.start,'0102'), storefreq = 'daily', leadtimemax = 1, output = 'lonlat', nprocs=1) + +n.lon <- length(domain$lon) +n.lat <- length(domain$lat) + +pos.lat.max <- which(lat.max - round(domain$lat,4) >= 0)[1] # select the first latitude of the domain below the maximum latitude +pos.lat.min <- tail(which(round(domain$lat,4) - lat.min >= 0),1) # select the last latitude of the domain over the minumum latitude +pos.lon.max <- tail(which(lon.max - round(domain$lon,4) >= 0),1) +pos.lon.min <- which(round(domain$lon,4) - lon.min >= 0)[1] + +pos.lat <- if(pos.lat.min < pos.lat.max) {pos.lat.min:pos.lat.max} else {pos.lat.max:pos.lat.min} +pos.lon <- c(1:pos.lon.max,pos.lon.min:length(domain$lon)) # beware that it only consider the case where the study area cross the meridian of Greenwhich! +n.pos.lat <- length(pos.lat) +n.pos.lon <- length(pos.lon) + +lat <- domain$lat[pos.lat] # lat values of chosen area only (it is smaller than the whole spatial domain loaded by the data) +#if (lat[2] < lat[1]) lat <- rev(lat) # reverse lat values if they are in decreasing order +lon <- domain$lon[pos.lon] # lon values of chosen area only + +#lat.min.area <- domain$lat[pos.lat.min] # min and max lat/lon of the chosen area only (same as lat.max, but its values correspond to actual values in the lat vector) +#lat.max.area <- domain$lat[pos.lat.max] +#lon.min.area <- domain$lon[pos.lon.min] +#lon.max.area <- domain$lon[pos.lon.max] + +#rm(domain) + +# Load psl data (interpolating it to the same reanlaysis grid, if necessary): +cat("Loading data. Please wait...\n") + +if(fields.name == rean.name){ # Load daily psl data of all years in the reanalysis case: + psleuFull <-array(NA,c(1,1,n.years,365, n.pos.lat, n.pos.lon)) + + #for (y in year.start:year.end){ + # var <- Load(var = psl, exp = NULL, obs = list(list(path=fields)), paste0(y,'0101'), storefreq = 'daily', leadtimemax = n.days.in.a.year(y), output = 'lonlat', + # latmin = lat.min, latmax = lat.max, lonmin = lon.min, lonmax = lon.max, nprocs=1) + # psleuFull[seq.days.in.a.future.year(year.start, y),,] <- var$obs + # rm(var) + # gc() + #} + + psleuFull366 <- Load(var = psl, exp = NULL, obs = list(list(path=fields)), paste0(year.start:year.end,'0101'), storefreq = 'daily', leadtimemax = 366, output = 'lonlat', latmin = lat.min, latmax = lat.max, lonmin = lon.min, lonmax = lon.max)$obs + + # remove bisestile days to have all arrays with the same dimensions: + cat("Removing bisestile days. Please wait...\n") + psleuFull[,,,,,] <- psleuFull366[,,,1:365,,] + + for(y in year.start:year.end){ + y <- y - year.start + 1 + if(n.days.in.a.year(y) == 366) psleuFull[,,y,60:365,,] <- psleuFull366[,,y,61:366,,] # take the march to december period removing the 29th of February + } + + rm(psleuFull366) + gc() + + # convert psl in daily anomalies with the LOESS filter: + cat("Calculating anomalies. Please wait...\n") + pslPeriodClim <- apply(psleuFull, c(1,2,4,5,6), mean, na.rm=T) + + pslPeriodClimLoess <- pslPeriodClim + for(i in n.pos.lat){ + for(j in n.pos.lon){ + my.data <- data.frame(ens.mean=pslPeriodClim[1,1,,i,j], day=1:365) + my.loess <- loess(ens.mean ~ day, my.data, span=0.35) + pslPeriodClimLoess[1,1,,i,j] <- predict(my.loess) + } + } + + rm(pslPeriodClim) + gc() + + pslPeriodClim2 <- InsertDim(pslPeriodClimLoess, 3, n.years) + + ## s4.data <- data.frame(s4=pslPeriodClim[1,], day=1:216) + ## s4.loess <- loess(s4 ~ day, s4.data, span=0.35) + ## s4.pred <- predict(s4.loess) + + psleuFull <- psleuFull - pslPeriodClim2 + rm(pslPeriodClim2) + gc() + +} + +if(fields.name == ECMWF_S4.name){ # in the forecast case, we load only the data for 1 start month at time and all the lead times (since each month needs ~3 GB of memory) + if(start.month <= 9) {start.month.char <- paste0("0",as.character(start.month))} else {start.month.char <- start.month} + + fields2 <- gsub("\\$STORE_FREQ\\$","daily",fields) + fields3 <- gsub("\\$VAR_NAME\\$",psl,fields2) + + if(missing.forecasts == TRUE){ + years <- c() + for(y in year.start:year.end){ + fields4 <- gsub("\\$START_DATE\\$",paste0(y, start.month.char,'01'),fields3) + + char.lead <- nchar(system(paste0("ncks -m ",fields4,"| grep -E -i 'psl size' | cut -d '*' -f1"), intern=T)) + # beware, in this way it can only take into account leadtimes from 100 to 999, but it is the only way in the SMP machine: + #num.lead <- as.integer(substr(system(paste0("ncks -m ",fields4,"| grep -E -i 'psl size' | cut -d '*' -f1"), intern=T),char.lead-3,char.lead)) + num.lead <- as.integer(system(paste0("ncks -m ",fields4,"| grep -E -i 'psl size' | cut -d '=' -f2 | cut -d '*' -f1"), intern=T)) # don't work well on the SMP + num.memb <- as.integer(system(paste0("ncks -m ",fields4,"| grep -E -i 'psl size' | cut -d '=' -f2 | cut -d '*' -f2"), intern=T)) + num.lat <- as.integer(system(paste0("ncks -m ",fields4,"| grep -E -i 'psl size' | cut -d '=' -f2 | cut -d '*' -f3"), intern=T)) + num.lon <- as.integer(system(paste0("ncks -m ",fields4,"| grep -E -i 'psl size' | cut -d '=' -f2 | cut -d '*' -f4"), intern=T)) + + #if(num.lead == n.leadtimes && num.memb >= n.members && num.lat == 181 && num.lon == 360) years <- c(years,y) + if(num.lead == n.leadtimes && num.memb >= n.members) years <- c(years,y) + } + + } else { years <- year.start:year.end } + + n.years.full <- length(years) + + psleuFull <- Load(var = psl, exp = list(list(path=fields)), obs = NULL, sdates=paste0(years, start.month.char,'01'), dimnames=list(member=member.name), nmember=n.members, leadtimemax=n.leadtimes, storefreq='daily', output = 'lonlat', latmin = lat.min, latmax = lat.max, lonmin = lon.min, lonmax = lon.max, grid=my.grid, method='bilinear')$mod + + # convert psl in daily anomalies with the LOESS filter: + cat("Calculating anomalies. Please wait...\n") + pslPeriodClim <- apply(psleuFull, c(1,4,5,6), mean, na.rm=T) + pslPeriodClimLoess <- pslPeriodClim + for(i in n.pos.lat){ + for(j in n.pos.lon){ + my.data <- data.frame(ens.mean=pslPeriodClim[1,,i,j], day=1:n.leadtimes) + my.loess <- loess(ens.mean ~ day, my.data, span=0.35) + pslPeriodClimLoess[1,,i,j] <- predict(my.loess) + } + } + + rm(pslPeriodClim) + gc() + + pslPeriodClim2 <- InsertDim(InsertDim(pslPeriodClimLoess, 2, n.years.full), 2, n.members) + + ## s4.data <- data.frame(s4=pslPeriodClim[1,], day=1:216) + ## s4.loess <- loess(s4 ~ day, s4.data, span=0.35) + ## s4.pred <- predict(s4.loess) + + psleuFull <- psleuFull - pslPeriodClim2 + rm(pslPeriodClim2) + gc() + +} + +if(fields.name == ECMWF_monthly.name){ + # Load 6-hourly psl data in the forecast case: + psleuFull <-array(NA, c(n.sdates*n.years, n.leadtimes, n.pos.lat, n.pos.lon)) # daily order: 19940102 19950102 ... 20130102 19940109 19950109 ... 20130109 ... + n.leadtimes <- length(leadtime) + sdates <- weekly.seq(forecast.year,mes,day) + n.sdates <- length(sdates) + + for (startdate in 1:n.sdates){ + for(lday in leadtime){ + var <- Load(var = psl, exp = NULL, obs = list(list(path=fields)), paste0(year.start:year.end, substr(sdates[startdate],5,6), substr(sdates[startdate],7,8) ), storefreq = 'daily', leadtimemin = lday*4-3, leadtimemax = lday*4, output = 'lonlat', latmin = lat.min, latmax = lat.max, lonmin = lon.min, lonmax = lon.max) + + mean.lday <- apply(var$obs, c(3,5,6), mean, na.rm=T) + rm(var) + gc() + + psleuFull[(1+(startdate-1)*n.years):(startdate*n.years), lday-leadtime[1]+1,,] <- mean.lday + rm(mean.lday) + gc() + } + } +} + +if(psl=="g500") psleuFull <- psleuFull/9.81 # convert geopotential to geopotential height (in m) +if(psl=="psl") psleuFull <- psleuFull/100 # convert MSLP in Pascal to MSLP in hPa +gc() + +#save.image(file=paste0(workdir,"/weather_regimes_",fields.name,"_",year.start,"-",year.end,".RData")) +#load(file=paste0(workdir,"/weather_regimes_",fields.name,"_",year.start,"-",year.end,".RData")) + +# compute the cluster analysis: +my.PCA <- list() +my.cluster <- list() +my.cluster.array <- list() +tot.variance <- list() +n.pcs <- my.cluster <- c() + +#WR.period=1 # for the debug +for(p in WR.period){ + # Select only the data of the month/season we want: + if(fields.name == rean.name){ + + #pslPeriod <- psleuFull[days.period[[p]],,] # select only days in the chosen period (i.e: winter) + pslPeriod <- psleuFull[1,1,,pos.period(2001,p),,] # select only days in the chosen period (i.e: winter) + + pslPeriod <- psleuFull[1,1,,274:365,,] # select only days in the chosen period (i.e: winter) + + # weight the pressure fields based on latitude (apply it to anomalies, not to absolute values!): + if(lat.weighting){ + lat.weighted <- cos(lat*pi/180)^0.5 + lat.weighted.array <- InsertDim(InsertDim(lat.weighted,2,n.pos.lon), 1, n.days.period[[p]]) + psl.kmeans <- pslPeriod * lat.weighted.array + rm(lat.weighted.array) + gc() + } + + gc() + cat("Preformatting data for clustering. Please wait...\n") + psl.melted <- melt(pslPeriod[,,,, drop=FALSE], varnames=c("Year","Day","Lat","Lon")) + gc() + cat("Preformatting data for clustering. Please wait......\n") + + # this function eats much memory!!! + psl.kmeans <- unname(acast(psl.melted, Year + Day ~ Lat + Lon)) + #gc() + + # select period data from psleuFull to use it as input of the cluster analysis: + #psl.kmeans <- pslPeriod + #dim(psl.kmeans) <- c(n.days.period[[p]], n.pos.lat*n.pos.lon) # convert array in a matrix! + #rm(pslPeriod, pslPeriod.weighted) + } + + # in case of S4 we don't need to select anything because we already loaded only 1 month of data! + if(fields.name == ECMWF_S4.name){ + + # select data only for the startmonth and leadmonth to study: + cat("Extracting data. Please wait...\n") + chosen.month <- start.month + lead.month # find which month we want to select data + if(chosen.month > 12) chosen.month <- chosen.month - 12 + + i=0 + for(y in years){ + i=i+1 + leadtime.min <- 1 + n.days.in.a.monthly.period(start.month, chosen.month, y) - n.days.in.a.month(chosen.month, y) + leadtime.max <- leadtime.min + n.days.in.a.month(chosen.month, y) - 1 + if (n.days.in.a.month(chosen.month, y) == 29) leadtime.max <- leadtime.max - 1 # remove the 29th of February to have the same n. of elements for all years + int.leadtimes <- leadtime.min:leadtime.max + num.leadtimes <- leadtime.max - leadtime.min + 1 # number of days in the chosen month (different from 'n.leadtimes',which is the total number of days in the file!) + # by construction it is equal to: n.days.in.a.month(chosen.month, y) + if(y == years[1]) { + pslPeriod <- psleuFull[,,1,int.leadtimes,,,drop=FALSE] + } else { + pslPeriod <- unname(abind(pslPeriod, psleuFull[,,i,int.leadtimes,,,drop=FALSE], along=3)) + } + } + + # note that the data order in psl.kmeans[,X] is: year1day1, year1day2, ... , year1day31, year2day1, year2day2, ... , year2day28 + gc() + cat("Preformatting data. Please wait...\n") + psl.melted <- melt(pslPeriod[1,,,,,, drop=FALSE], varnames=c("Exp","Member","StartYear","LeadDay","Lat","Lon")) + gc() + cat("Preformatting data. Please wait......\n") + + # this function eats much memory!!! + psl.kmeans <- unname(acast(psl.melted, Member + StartYear + LeadDay ~ Lat + Lon)) + #gc() + + #psl.kmeans <- array(NA,c(dim(pslPeriod))) + #n.rows <- nrow(psl.melted) + #a <- psl.melted$Member + #b <- psl.melted$StartYear + #c <- psl.melted$LeadDay + #d <- psl.melted$Lat + #e <- psl.melted$Lon + #f <- psl.melted$value + + # this function to convert a data.frame in an array is much less memory-eater than acast but a bit slower: + #for(i in 1:n.rows) psl.kmeans[1,a[i],b[i],c[i],d[i],e[i]] <- f[i] + gc() + #rm(pslPeriod); gc() + } + + if(fields.name == ECMWF_monthly.name){ + my.startdates.period <- months.period(forecast.year,mes,day,p) # get which startdates belong to the chosen period (remember that there are no bisestile day left!) + + days.period <- list() # get which days inside psleuFull belong to the chosen period + for(startdate in my.startdates.period){ + days.period[[p]] <- c(days.period[[p]], (1+(startdate-1)*n.years):(startdate*n.years)) + } + + # in winter you must remove the 2nd startdate (20140109) because its data is bad, as you can see with: plot(psl.kmeans[,1:2]) + # if(fields.name == forecast.name && period == 13) psl.kmeans[21:40,] <- NA + } + + + # compute the clustering: + cat("Clustering data. Please wait...\n") + if(PCA){ + my.seq <- seq(1, n.pos.lat*n.pos.lon, 16) # select only 1 point of 9 + pslcut <- psl.kmeans[,my.seq] + + my.PCA[[p]] <- princomp(pslcut,cor=FALSE) + tot.variance[[p]] <- head(cumsum(my.PCA[[p]]$sdev^2/sum(my.PCA[[p]]$sdev^2)),50) # check how many PCAs to keep basing on the sum of their expl. variance + n.pcs[p] <- head(as.numeric(which(tot.variance[[p]] > variance.explained)),1) # select only the pcs that explains at least 80% of variance + my.cluster[[p]] <- kmeans(my.PCA[[p]]$scores[,1:n.pcs[p]], centers=4, iter.max=100, nstart=30) # 4 is the number of clusters + rm(pslcut) + } else { # 4 is the number of clusters: + + my.cluster[[p]] <- kmeans(psl.kmeans, centers=4, iter.max=100, nstart=30, trace=FALSE) + + #if(fields.name == rean.name){ + #my.cluster[[p]] <- kmeans(psl.kmeans[which(!is.na(psl.kmeans[,1])),], centers=4, iter.max=100, nstart=30, trace=FALSE) + # save the time series output of the cluster analysis: + #save(my.cluster, my.cluster.array, my.PCA, tot.variance, n.pcs, file=paste0(workdir,"/",fields.name,"_",my.period[p],"_series.RData")) + #} + + if(fields.name == ECMWF_S4.name) { + my.cluster.array[[p]] <- array(my.cluster[[p]]$cluster, c(num.leadtimes, n.years.full, n.members)) # in reverse order compared to the definition of psl.kmeans + + # save the time series output of the cluster analysis: + #save(my.cluster, my.cluster.array, my.PCA, tot.variance, n.pcs, file=paste0(workdir,"/",fields.name,"_",my.period[p],"_leadmonth_",lead.month,"_series.RData")) + + #plot(SMA(my.cluster[[p]]$centers[1,],201),type="l",col="gold",ylim=c(-20,20));lines(SMA(my.cluster[[p]]$centers[2,],201),type="l",col="red"); lines(SMA(my.cluster[[p]]$centers[3,],201),type="l",col="green");lines(SMA(my.cluster[[p]]$centers[4,],201),type="l",col="blue");lines(SMA(psl.kmeans[29,],201),type="l",col="gray");lines(rep(0,14410),type="l",col="black",lty=2) + } + } + + rm(psl.kmeans) + gc() + +} # close for on 'p' + +#save.image(file=paste0(workdir,"/weather_regimes_",fields.name,"_",year.start,"-",year.end,".RData")) +#load(file=paste0(workdir,"/weather_regimes_",fields.name,"_",year.start,"-",year.end,".RData")) + +#my.grid<-paste0('r',n.lon,'x',n.lat) +#coord <- Load(var = 'sfcWind', exp = list(ECMWF_monthly), obs = NULL, sdates=paste0(2013,'0102'), leadtimemin = 1, leadtimemax=1, output = 'lonlat', nprocs=1, grid=my.grid, method='bilinear') + +# measure regime anomalies: + +for(p in WR.period){ # 1-12: Jan-Dec; 13-16: Winter-Spring-Summer-Autumn; 17: Year + + if(fields.name == rean.name){ + cluster.sequence <- my.cluster[[p]]$cluster + + running_cluster == FALSE + if (running_cluster == TRUE){ + days.month <- c() + for(y in 1:n.years){ + days.month.new <- 33:62 + 92*(y-1) + days.month <- c(days.month, days.month.new) + } + + } + + cluster.vector <- my.cluster[[p]]$cluster[days.month] # select only november days of all years + cluster.sequence <- cluster.vector + + if(sequences){ # it works only for rean data!!! + # for each of the 4 clusters, remove the days not belonging to sequences of at least 5 days: + # warning: first 4 days and last 4 days are not take into account y the algorithm and are treated as not belonging to any sequence (sequ=FALSE) + wrdiff <- diff(my.cluster[[p]]$cluster) + + sequ <- rep(FALSE, n.days.period[[p]]) + for(i in 5:(n.days.period[[p]]-5)){ + sequ[i] <- TRUE + if(wrdiff[i] != 0 & wrdiff[i+1] != 0) sequ[i] = FALSE + if(wrdiff[i] != 0 & wrdiff[i+2] != 0) sequ[i] = FALSE + if(wrdiff[i] != 0 & wrdiff[i+3] != 0) sequ[i] = FALSE + if(wrdiff[i] != 0 & wrdiff[i+4] != 0) sequ[i] = FALSE + if(wrdiff[i-1] != 0 & wrdiff[i] != 0) sequ[i] = FALSE + if(wrdiff[i-1] != 0 & wrdiff[i+1] != 0) sequ[i] = FALSE + if(wrdiff[i-1] != 0 & wrdiff[i+2] != 0) sequ[i] = FALSE + if(wrdiff[i-1] != 0 & wrdiff[i+3] != 0) sequ[i] = FALSE + if(wrdiff[i-2] != 0 & wrdiff[i] != 0) sequ[i] = FALSE + if(wrdiff[i-2] != 0 & wrdiff[i+1] != 0) sequ[i] = FALSE + if(wrdiff[i-2] != 0 & wrdiff[i+2] != 0) sequ[i] = FALSE + if(wrdiff[i-3] != 0 & wrdiff[i] != 0) sequ[i] = FALSE + if(wrdiff[i-3] != 0 & wrdiff[i+1] != 0) sequ[i] = FALSE + if(wrdiff[i-4] != 0 & wrdiff[i] != 0) sequ[i] = FALSE + } # close for loop on i + + # sequence of daily cluster numbers without the days that doesn't belong to sequences of 5 or more days: + cluster.sequence <- my.cluster[[p]]$cluster * sequ + rm(wrdiff, sequ) + } + + # Replace the days belonging to a regime with the days belonging to a sequence of at least 5 days of that regime: + wr1 <- which(cluster.sequence == 1) + wr2 <- which(cluster.sequence == 2) + wr3 <- which(cluster.sequence == 3) + wr4 <- which(cluster.sequence == 4) + + # yearly frequency of each regime: + wr1y <- wr2y <- wr3y <-wr4y <- c() + np <- n.days.in.a.period(p,2001) + + for(y in year.start:year.end){ + # for both reanalysis and S4, the time order is always: year1day1, year1day2, ..., year1day31, year2day1, year2day2, ..., year2day28, etc, + wr1y[y-year.start+1] <- length(which(cluster.sequence[(y-year.start)*np + (1:np)] == 1)) + wr2y[y-year.start+1] <- length(which(cluster.sequence[(y-year.start)*np + (1:np)] == 2)) + wr3y[y-year.start+1] <- length(which(cluster.sequence[(y-year.start)*np + (1:np)] == 3)) + wr4y[y-year.start+1] <- length(which(cluster.sequence[(y-year.start)*np + (1:np)] == 4)) + } + + # convert to frequencies in %: + wr1y <- wr1y/np + wr2y <- wr2y/np + wr3y <- wr3y/np + wr4y <- wr4y/np + + gc() + + pslmat <- unname(acast(psl.melted, Year + Day ~ Lat ~ Lon)) + pslmat.old <- pslmat.new <- pslmat + pslmat <- pslmat.new[days.month,,] + gc() + + pslwr1 <- pslmat[wr1,,, drop=F] + pslwr2 <- pslmat[wr2,,, drop=F] + pslwr3 <- pslmat[wr3,,, drop=F] + pslwr4 <- pslmat[wr4,,, drop=F] + + pslwr1mean <- apply(pslwr1, c(2,3), mean, na.rm=T) + pslwr2mean <- apply(pslwr2, c(2,3), mean, na.rm=T) + pslwr3mean <- apply(pslwr3, c(2,3), mean, na.rm=T) + pslwr4mean <- apply(pslwr4, c(2,3), mean, na.rm=T) + + # spatial mean for each day to save for the meaure of the FairRPSS: + pslwr1spmean <- apply(pslwr1, 1, mean, na.rm=T) + pslwr2spmean <- apply(pslwr2, 1, mean, na.rm=T) + pslwr3spmean <- apply(pslwr3, 1, mean, na.rm=T) + pslwr4spmean <- apply(pslwr4, 1, mean, na.rm=T) + + # save all the data necessary to redraw the graphs when we know the right regime: + save(my.cluster, lon, lon.max, lon.min, lat, lat.max, lat.min, pos.lat, pos.lon, n.pos.lat, n.pos.lon, psl, psl.name, my.period, p, workdir, rean.name, fields.name, year.start, year.end, n.years, WR.period, pslwr1mean, pslwr2mean, pslwr3mean, pslwr4mean, pslwr1spmean, pslwr2spmean, pslwr3spmean, pslwr4spmean, wr1y,wr2y,wr3y,wr4y,file=paste0(workdir,"/",fields.name,"_",my.period[p],"_psl.RData")) + + rm(pslwr1mean,pslwr2mean,pslwr3mean,pslwr4mean) + rm(pslwr1,pslwr2,pslwr3,pslwr4) + gc() + + } + + if(fields.name == ECMWF_S4.name){ + cat("Computing regime anomalies and frequencies. Please wait...\n") + + #load(file=paste0(workdir,"/",fields.name,"_",my.period[p],"_leadmonth_",lead.month,"_ClusterData.RData")) + + cluster.sequence <- my.cluster[[p]]$cluster #as.vector(my.cluster.array[[p]]) + + wr1 <- which(cluster.sequence == 1) + wr2 <- which(cluster.sequence == 2) + wr3 <- which(cluster.sequence == 3) + wr4 <- which(cluster.sequence == 4) + + wr1y <- apply(my.cluster.array[[p]] == 1, 2, sum) + wr2y <- apply(my.cluster.array[[p]] == 2, 2, sum) + wr3y <- apply(my.cluster.array[[p]] == 3, 2, sum) + wr4y <- apply(my.cluster.array[[p]] == 4, 2, sum) + + # convert to frequencies in %: + wr1y <- wr1y/(num.leadtimes*n.members) # in this case, n.leadtimes is the number of days of one month of the cluser time series + wr2y <- wr2y/(num.leadtimes*n.members) + wr3y <- wr3y/(num.leadtimes*n.members) + wr4y <- wr4y/(num.leadtimes*n.members) + + # same as above, but to measure the maximum frequence between all the members: + wr1yMax <- apply(apply(my.cluster.array[[p]] == 1, c(2,3), sum),1,max) + wr2yMax <- apply(apply(my.cluster.array[[p]] == 2, c(2,3), sum),1,max) + wr3yMax <- apply(apply(my.cluster.array[[p]] == 3, c(2,3), sum),1,max) + wr4yMax <- apply(apply(my.cluster.array[[p]] == 4, c(2,3), sum),1,max) + + wr1yMax <- wr1yMax/num.leadtimes + wr2yMax <- wr2yMax/num.leadtimes + wr3yMax <- wr3yMax/num.leadtimes + wr4yMax <- wr4yMax/num.leadtimes + + wr1yMin <- apply(apply(my.cluster.array[[p]] == 1, c(2,3), sum),1,min) + wr2yMin <- apply(apply(my.cluster.array[[p]] == 2, c(2,3), sum),1,min) + wr3yMin <- apply(apply(my.cluster.array[[p]] == 3, c(2,3), sum),1,min) + wr4yMin <- apply(apply(my.cluster.array[[p]] == 4, c(2,3), sum),1,min) + + wr1yMin <- wr1yMin/num.leadtimes + wr2yMin <- wr2yMin/num.leadtimes + wr3yMin <- wr3yMin/num.leadtimes + wr4yMin <- wr4yMin/num.leadtimes + + cat("Computing regime anomalies and frequencies. Please wait......\n") + + # in this case, must use psl.melted instead of psleuFull. Both are already in anomalies: + # (psl.melted depends on the startdate and leadtime, psleuFull no) + gc() + pslmat <- unname(acast(psl.melted, Member + StartYear + LeadDay ~ Lat ~ Lon)) + #pslmat <- unname(acast(melt(pslPeriod[1,1:2,,,,, drop=FALSE],varnames=c("Exp","Member","StartYear","LeadDay","Lat","Lon")), Member ~ StartYear + LeadDay ~ Lat ~ Lon)) + gc() + + #for(a in 1:2){ + # for(b in 1:3){ + # for(c in 1:4){ + # pslmat[1, a + (b-1)*a + (c-1)*a*b,,] <- pslPeriod[1,a,b,c,,] + # } + # } + #} + + pslwr1 <- pslmat[wr1,,, drop=F] + pslwr2 <- pslmat[wr2,,, drop=F] + pslwr3 <- pslmat[wr3,,, drop=F] + pslwr4 <- pslmat[wr4,,, drop=F] + + pslwr1mean <- apply(pslwr1, c(2,3), mean, na.rm=T) + pslwr2mean <- apply(pslwr2, c(2,3), mean, na.rm=T) + pslwr3mean <- apply(pslwr3, c(2,3), mean, na.rm=T) + pslwr4mean <- apply(pslwr4, c(2,3), mean, na.rm=T) + + # spatial mean for each day to save for the meaure of the FairRPSS: + pslwr1spmean <- apply(pslwr1, 1, mean, na.rm=T) + pslwr2spmean <- apply(pslwr2, 1, mean, na.rm=T) + pslwr3spmean <- apply(pslwr3, 1, mean, na.rm=T) + pslwr4spmean <- apply(pslwr4, 1, mean, na.rm=T) + + rm(pslmat) + gc() + + # save all the data necessary to draw the graphs (but not the impact maps) + save(my.cluster, my.cluster.array, lon, lon.max, lat, pos.lat, pos.lon, n.pos.lat, n.pos.lon, psl, psl.name, my.period, p, workdir,rean.name,fields.name, year.start,year.end, years, n.years, n.years.full, WR.period, pslwr1mean, pslwr2mean, pslwr3mean, pslwr4mean, pslwr1spmean, pslwr2spmean, pslwr3spmean, pslwr4spmean, wr1y, wr2y, wr3y, wr4y, wr1yMax, wr2yMax, wr3yMax, wr4yMax, wr1yMin, wr2yMin, wr3yMin, wr4yMin, n.leadtimes, num.leadtimes, file=paste0(workdir,"/",fields.name,"_",my.period[p],"_leadmonth_",lead.month,"_psl.RData")) + + rm(pslwr1mean,pslwr2mean,pslwr3mean,pslwr4mean) + rm(pslwr1,pslwr2,pslwr3,pslwr4) + gc() + + } + + # for the monthly system, the time order is always: year1day1, year2day1, ... , yearNday1, year1day2, year2day2, ..., yearNday2, etc.: + if(fields.name == ECMWF_monthly.name) { + if(fields.name == ECMWF_monthly.name){ + my.startdates.period <- months.period(forecast.year,mes,day,p) + + #if(p == 13) my.startdates.period <- my.startdates.period[-2] # remove the second startdate because it is broken + + days.period <- period.length <- list() + for(startdate in my.startdates.period){ + days.period[[p]] <- c(days.period[[p]], (1+(startdate-1)*(year.end-year.start+1)):(startdate*(year.end-year.start+1))) + } + period.length[[p]] <- length(my.startdates.period) # number of Thusdays in the period + } + + wr1y <- wr2y <- wr3y <-wr4y <- c() + wr1y[y-year.start+1] <- length(which(cluster.sequence[seq((y-year.start+1),length(cluster.sequence), n.years)] == 1)) + wr2y[y-year.start+1] <- length(which(cluster.sequence[seq((y-year.start+1),length(cluster.sequence), n.years)] == 2)) + wr3y[y-year.start+1] <- length(which(cluster.sequence[seq((y-year.start+1),length(cluster.sequence), n.years)] == 3)) + wr4y[y-year.start+1] <- length(which(cluster.sequence[seq((y-year.start+1),length(cluster.sequence), n.years)] == 4)) + } + + cat("Finished!\n") +} # close the for loop on 'p' + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/old/weather_regimes_v35.R~ b/old/weather_regimes_v35.R~ new file mode 100644 index 0000000000000000000000000000000000000000..01dbb404f071453ed3ce929174c3ea7fdbe7eeac --- /dev/null +++ b/old/weather_regimes_v35.R~ @@ -0,0 +1,701 @@ + +# Creation: 6/2016 +# Author: Nicola Cortesi +# Aim: To compute the four Euro-Atlantic weather regimes from a reanalysis or from a forecast system +# I/O: input must be in a format compatible with Load(); 1 output .RData file is created in the 'workdir' folder. +# Assumption: its output are needed by the scripts weather_regimes_impact.R and weather_regimes_maps.R +# Branch: weather_regimes + +SMP=FALSE # if TRUE, the script is assumed to run on the SMP Machine + +library(s2dverification) # for the function Load() +library(abind) +library(reshape2) # after each undate of s2dverification, it's better to remove and install this package again because sometimes it gets broken! + +if(SMP){ + source('/gpfs/projects/bsc32/bsc32842/Rfunctions.R') + workdir <- "/gpfs/projects/bsc32/bsc32842/RESILIENCE" # working dir where output files will be generated +} else { + source('/home/Earth/ncortesi/Downloads/scripts/Rfunctions.R') + workdir <- "/scratch/Earth/ncortesi/RESILIENCE/Regimes" # working dir where output files will be generated +} + +# module load NCO # : load it from the terminal before running this script interactively in the SMP machine, if you didn't load it in the .bashrc + +# available reanalysis for the geopotential (g500) or the geopotential height (z500 = g500/9.81) and for var data: +ERAint <- '/esnas/reconstructions/ecmwf/eraint/$STORE_FREQ$_mean/$VAR_NAME$_f6h/$VAR_NAME$_$YEAR$$MONTH$.nc' +ERAint.name <- "ERA-Interim" + +JRA55 <- '/esnas/reconstructions/jma/jra-55/$STORE_FREQ$_mean/$VAR_NAME$_f6h/$VAR_NAME$_$YEAR$$MONTH$.nc' +JRA55.name <- "JRA-55" + +rean <- ERAint #JRA55 #ERAint # choose one of the two above reanalysis from where to load the input psl data + +# available forecast systems for the psl and var data: +#ECMWF_S4 <- list(path = paste0('/esnas/exp/ECMWF/seasonal/0001/s004/m001/$STORE_FREQ$_mean/$VAR_NAME$_f6h/$VAR_NAME$_$START_DATE$.nc')) +#ECMWF_S4 <- '/esnas/exp/ECMWF/seasonal/0001/s004/m001/$STORE_FREQ$_mean/psl_f6h/psl_$START_DATE$.nc' +ECMWF_S4 <- '/esnas/exp/ecmwf/system4_m1/$STORE_FREQ$_mean/$VAR_NAME$_f6h/$VAR_NAME$_$START_DATE$.nc' +ECMWF_S4.name <- "ECMWF-S4" +member.name <- "ensemble" # name of the dimension with the ensemble members inside the netCDF files of S4 + +ECMWF_monthly <- '/esnas/exp/ECMWF/monthly/ensforhc/6hourly/$VAR_NAME$/2014$MONTH$$DAY$00/$VAR_NAME$_$START_DATE$00.nc' +ECMWF_monthly.name <- "ECMWF-Monthly" + +forecast <- ECMWF_S4 + +fields <- rean # put 'rean' to load pressure fields and var from reanalisis, or put 'forecast' to load them from forecast systems + +psl <- "psl" #"g500" # pressure variable to use from the chosen reanalysis +psl.name <- "SLP" # "Z500" # the name to show in the maps + +year.start <- 1981 #1982 #1981 #1994 #1979 #1981 +year.end <- 2015 #2013 #2010 + +missing.forecasts=FALSE # set it to TRUE if there can be missing hindcasts files in the forecast psl data, so it will load only the available years and deals with NA +res <- 0.75 # set the resolution you want to interpolate the psl and var data when loading it (i.e: set to 0.75 to use the same res of ERA-Interim) + # interpolation method is BILINEAR. +PCA <- TRUE # set it to TRUE if you want to apply the PCA before the k-means cluster analysis, FALSE otherwise +variance.explained <- 0.8 # the user can set here the minimum % of variance explained by the PCs (typically 0.8 which is equal to 80%) + +lat.weighting=TRUE # set it to true to weight psl data on latitude before applying the cluster analysis +sequences=FALSE # set it to TRUE if you want to select only days beloning to sequences of at least 5 consecutive days belonging to the same regime. + +# Coordinates of the box enclosing the study region (the Northern Atlantic in this case): +lat.max <- 81 #81 #80 #70 +lat.min <- 27 #30 #20 #30 +lon.max <- 45 #36 #30 #40 +lon.min <- 274.5 #274.5 #270 #280 # put a positive number here because the geopotential has only positive vaues of longitud! + +# Only for reanalysis: +WR.period <- 11 #1:12 #13:16 # 1-12: Jan-Dec; 13-16: Winter-Spring-Summer-Autumn [bypassed by the optional argument of the script] + +# Only for Seasonal forecasts: +startM <- 1 # start month (1=January, 12=December) [bypassed by the optional arguments of the script] +leadM <- 1 # select only the data of one lead month: [bypassed by the optional arguments of the script] +n.members <- 15 # number of members of the forecast system to load +n.leadtimes <- 216 # number of leadtimes of the forecast system to load + +# Only for Monthly forecasts: +#leadtime <- 1:7 #5:11 #1 # if fields=forecast, set the forecast time (in days) you want to compute the weather regimes. +#startdate.shift <- 1 # if leadtime=5:11, it's better to shift all startdates of the season 1 week in the past, to fit the exact season of obs.data + # if leadtime=12:18, it's better to shift all startdates of the season 2 weeks in the past, and so on. +#forecast.year <- year.end+1 # if fields=forecast, set the forecast year (it is usually the year after year.end) +#mes=1 # if fields=forecast, set the month of the first startdate of the forecast year +#day=2 # if fields=forecast, set the day of the first startdate of the forecast year + +############################################################################################################################# +rean.name <- unname(ifelse(rean == ERAint, ERAint.name, JRA55.name)) +forecast.name <- unname(ifelse(forecast == ECMWF_S4, ECMWF_S4.name, ECMWF_monthly.name)) +fields.name <- unname(ifelse(fields == rean, rean.name, forecast.name)) +n.years <- year.end - year.start + 1 + +# in case the script is run with one argument, it is assumed a reanalysis is being used and the argument becomes the month we want to perform the clustering; +# in case the script is run with two arguments, it is assumed a forecast is used and the first argument represents the startmonth and the second one the leadmonth. +# in case the script is run with no arguments, the values of the variables inside the script are used: +script.arg <- as.integer(commandArgs(TRUE)) + +if(length(script.arg) == 0){ + start.month <- startM + WR.period <- start.month + if(fields.name == forecast.name) lead.month <- leadM +} + +# in case the script is run with 1 argument, it is assumed you are using a reanalysis: +if(length(script.arg) == 1){ + fields <- rean + fields.name <- rean.name + WR.period <- script.arg[1] +} + +if(length(script.arg) >= 2){ + fields <- forecast + fields.name <- forecast.name + start.month <- script.arg[1] + lead.month <- script.arg[2] + WR.period <- start.month +} + +if(length(script.arg) == 3){ # in this case, we are running the script in the SMP machine and we need to override the variables below: + source('/gpfs/projects/bsc32/bsc32842/Rfunctions.R') # for the calendar function + workdir <- "/gpfs/projects/bsc32/bsc32842/RESILIENCE" # working dir +} + +if(length(script.arg) >= 2) {lead.comment <- paste0("Lead month: ", lead.month)} else {lead.comment <- ""} +cat(paste0("Field: ", fields.name, " Period: ", WR.period, " ", lead.comment, "\n")) + +if(lat.min >= lat.max) stop("lat.min cannot be greater than lat.max") + +#days.period <- n.days.period <- period.length <- list() +#for (pp in 1:17){ +# days.period[[pp]] <- NA +# for(y in year.start:year.end) days.period[[pp]] <- c(days.period[[pp]], n.days.in.a.future.year(year.start, y) + pos.period(y,pp)) +# days.period[[pp]] <- days.period[[pp]][-1] # remove the NA at the begin that was introduced only to be able to execute the above command +# # number of days belonging to that period from year.start to year.end: +# n.days.period[[pp]] <- length(days.period[[pp]]) +# # Number of days belonging to that period in a single year: +# period.length[[pp]] <- n.days.in.a.period(pp,1999) #+ ifelse(period==13 | period==2, 1, 0) # using year 1999 introduce a small error in winter season length because of #bisestile years +#} + +# Create a regular lat/lon grid to interpolate the data to the chosen res: (Notice that the regular grid is always centered at lat=0 and lon=0!) +n.lon.grid <- 360/res +n.lat.grid <- (180/res)+1 +my.grid <- paste0('r',n.lon.grid,'x',n.lat.grid) +#domain<-list() +#domain$lon <- seq(0,359.999999999,res) +#domain$lat <- c(rev(seq(0,90,res)),seq(res[1],-90,-res)) + +cat("Loading lat/lon. Please wait...\n") +# load only 1 day of pressure data to detect the minimum and maximum lat and lon values which identify only the North Atlantic area: +if(fields.name == rean.name) domain <- Load(var = psl, exp = NULL, obs = list(list(path=fields)), '19990101', storefreq = 'daily', leadtimemax = 1, output = 'lonlat', nprocs=1) + +### Real test: +### lat <- +### domain <- Load(var = "psl", exp = list(list(path="/esnas/exp/ecmwf/system4_m1/$STORE_FREQ$_mean/$VAR_NAME$_f6h/$VAR_NAME$_$START_DATE$.nc")), obs=NULL, sdates=paste0(1982:2011,'0101'), storefreq = 'daily', leadtimemax=n.leadtimes, nmember=n.members, output = 'lonlat', latmin = lat[chunk], latmax = lat[chunk+2], nprocs=1) + +### if (fields.name="pippo"){ + +# in the case of S4, we also interpolate it to the same resolution of the reanalysis: +# (notice that if the S4 grid has the same grid of the chosen grid (typically ERA-Interim), Load() leaves the S4 grid unchanged) +if(fields.name == ECMWF_S4.name) domain <- Load(var = psl, exp = list(list(path=fields)), obs=NULL, sdates=paste0(year.start,'0101'), storefreq = 'daily', dimnames=list(member=member.name), leadtimemax=1, nmember=1, output = 'lonlat', grid=my.grid, method='bilinear', nprocs=1) + +#domain <- Load(var = "psl", exp = list(list(path="/esnas/exp/ecmwf/system4_m1/$STORE_FREQ$_mean/$VAR_NAME$_f6h/$VAR_NAME$_$START_DATE$.nc")), obs=NULL, sdates='19820101', storefreq = 'daily', leadtimemax=1, nmember=1, output = 'lonlat', grid='r480x241', lonmax=100) + +if(fields.name == ECMWF_monthly.name) domain <- Load(var = psl, exp = NULL, obs = list(list(path=fields)), paste0(year.start,'0102'), storefreq = 'daily', leadtimemax = 1, output = 'lonlat', nprocs=1) + +n.lon <- length(domain$lon) +n.lat <- length(domain$lat) + +pos.lat.max <- which(lat.max - round(domain$lat,4) >= 0)[1] # select the first latitude of the domain below the maximum latitude +pos.lat.min <- tail(which(round(domain$lat,4) - lat.min >= 0),1) # select the last latitude of the domain over the minumum latitude +pos.lon.max <- tail(which(lon.max - round(domain$lon,4) >= 0),1) +pos.lon.min <- which(round(domain$lon,4) - lon.min >= 0)[1] + +pos.lat <- if(pos.lat.min < pos.lat.max) {pos.lat.min:pos.lat.max} else {pos.lat.max:pos.lat.min} +pos.lon <- c(1:pos.lon.max,pos.lon.min:length(domain$lon)) # beware that it only consider the case where the study area cross the meridian of Greenwhich! +n.pos.lat <- length(pos.lat) +n.pos.lon <- length(pos.lon) + +lat <- domain$lat[pos.lat] # lat values of chosen area only (it is smaller than the whole spatial domain loaded by the data) +#if (lat[2] < lat[1]) lat <- rev(lat) # reverse lat values if they are in decreasing order +lon <- domain$lon[pos.lon] # lon values of chosen area only + +#lat.min.area <- domain$lat[pos.lat.min] # min and max lat/lon of the chosen area only (same as lat.max, but its values correspond to actual values in the lat vector) +#lat.max.area <- domain$lat[pos.lat.max] +#lon.min.area <- domain$lon[pos.lon.min] +#lon.max.area <- domain$lon[pos.lon.max] + +#rm(domain) + +# Load psl data (interpolating it to the same reanlaysis grid, if necessary): +cat("Loading data. Please wait...\n") + +if(fields.name == rean.name){ # Load daily psl data of all years in the reanalysis case: + psleuFull <-array(NA,c(1,1,n.years,365, n.pos.lat, n.pos.lon)) + + #for (y in year.start:year.end){ + # var <- Load(var = psl, exp = NULL, obs = list(list(path=fields)), paste0(y,'0101'), storefreq = 'daily', leadtimemax = n.days.in.a.year(y), output = 'lonlat', + # latmin = lat.min, latmax = lat.max, lonmin = lon.min, lonmax = lon.max, nprocs=1) + # psleuFull[seq.days.in.a.future.year(year.start, y),,] <- var$obs + # rm(var) + # gc() + #} + + psleuFull366 <- Load(var = psl, exp = NULL, obs = list(list(path=fields)), paste0(year.start:year.end,'0101'), storefreq = 'daily', leadtimemax = 366, output = 'lonlat', latmin = lat.min, latmax = lat.max, lonmin = lon.min, lonmax = lon.max)$obs + + # remove bisestile days to have all arrays with the same dimensions: + cat("Removing bisestile days. Please wait...\n") + psleuFull[,,,,,] <- psleuFull366[,,,1:365,,] + + for(y in year.start:year.end){ + y <- y - year.start + 1 + if(n.days.in.a.year(y) == 366) psleuFull[,,y,60:365,,] <- psleuFull366[,,y,61:366,,] # take the march to december period removing the 29th of February + } + + rm(psleuFull366) + gc() + + # convert psl in daily anomalies with the LOESS filter: + cat("Calculating anomalies. Please wait...\n") + pslPeriodClim <- apply(psleuFull, c(1,2,4,5,6), mean, na.rm=T) + + pslPeriodClimLoess <- pslPeriodClim + for(i in n.pos.lat){ + for(j in n.pos.lon){ + my.data <- data.frame(ens.mean=pslPeriodClim[1,1,,i,j], day=1:365) + my.loess <- loess(ens.mean ~ day, my.data, span=0.35) + pslPeriodClimLoess[1,1,,i,j] <- predict(my.loess) + } + } + + rm(pslPeriodClim) + gc() + + pslPeriodClim2 <- InsertDim(pslPeriodClimLoess, 3, n.years) + + ## s4.data <- data.frame(s4=pslPeriodClim[1,], day=1:216) + ## s4.loess <- loess(s4 ~ day, s4.data, span=0.35) + ## s4.pred <- predict(s4.loess) + + psleuFull <- psleuFull - pslPeriodClim2 + rm(pslPeriodClim2) + gc() + +} + +if(fields.name == ECMWF_S4.name){ # in the forecast case, we load only the data for 1 start month at time and all the lead times (since each month needs ~3 GB of memory) + if(start.month <= 9) {start.month.char <- paste0("0",as.character(start.month))} else {start.month.char <- start.month} + + fields2 <- gsub("\\$STORE_FREQ\\$","daily",fields) + fields3 <- gsub("\\$VAR_NAME\\$",psl,fields2) + + if(missing.forecasts == TRUE){ + years <- c() + for(y in year.start:year.end){ + fields4 <- gsub("\\$START_DATE\\$",paste0(y, start.month.char,'01'),fields3) + + char.lead <- nchar(system(paste0("ncks -m ",fields4,"| grep -E -i 'psl size' | cut -d '*' -f1"), intern=T)) + # beware, in this way it can only take into account leadtimes from 100 to 999, but it is the only way in the SMP machine: + #num.lead <- as.integer(substr(system(paste0("ncks -m ",fields4,"| grep -E -i 'psl size' | cut -d '*' -f1"), intern=T),char.lead-3,char.lead)) + num.lead <- as.integer(system(paste0("ncks -m ",fields4,"| grep -E -i 'psl size' | cut -d '=' -f2 | cut -d '*' -f1"), intern=T)) # don't work well on the SMP + num.memb <- as.integer(system(paste0("ncks -m ",fields4,"| grep -E -i 'psl size' | cut -d '=' -f2 | cut -d '*' -f2"), intern=T)) + num.lat <- as.integer(system(paste0("ncks -m ",fields4,"| grep -E -i 'psl size' | cut -d '=' -f2 | cut -d '*' -f3"), intern=T)) + num.lon <- as.integer(system(paste0("ncks -m ",fields4,"| grep -E -i 'psl size' | cut -d '=' -f2 | cut -d '*' -f4"), intern=T)) + + #if(num.lead == n.leadtimes && num.memb >= n.members && num.lat == 181 && num.lon == 360) years <- c(years,y) + if(num.lead == n.leadtimes && num.memb >= n.members) years <- c(years,y) + } + + } else { years <- year.start:year.end } + + n.years.full <- length(years) + + psleuFull <- Load(var = psl, exp = list(list(path=fields)), obs = NULL, sdates=paste0(years, start.month.char,'01'), dimnames=list(member=member.name), nmember=n.members, leadtimemax=n.leadtimes, storefreq='daily', output = 'lonlat', latmin = lat.min, latmax = lat.max, lonmin = lon.min, lonmax = lon.max, grid=my.grid, method='bilinear')$mod + + # convert psl in daily anomalies with the LOESS filter: + cat("Calculating anomalies. Please wait...\n") + pslPeriodClim <- apply(psleuFull, c(1,4,5,6), mean, na.rm=T) + pslPeriodClimLoess <- pslPeriodClim + for(i in n.pos.lat){ + for(j in n.pos.lon){ + my.data <- data.frame(ens.mean=pslPeriodClim[1,,i,j], day=1:n.leadtimes) + my.loess <- loess(ens.mean ~ day, my.data, span=0.35) + pslPeriodClimLoess[1,,i,j] <- predict(my.loess) + } + } + + rm(pslPeriodClim) + gc() + + pslPeriodClim2 <- InsertDim(InsertDim(pslPeriodClimLoess, 2, n.years.full), 2, n.members) + + ## s4.data <- data.frame(s4=pslPeriodClim[1,], day=1:216) + ## s4.loess <- loess(s4 ~ day, s4.data, span=0.35) + ## s4.pred <- predict(s4.loess) + + psleuFull <- psleuFull - pslPeriodClim2 + rm(pslPeriodClim2) + gc() + +} + +if(fields.name == ECMWF_monthly.name){ + # Load 6-hourly psl data in the forecast case: + psleuFull <-array(NA, c(n.sdates*n.years, n.leadtimes, n.pos.lat, n.pos.lon)) # daily order: 19940102 19950102 ... 20130102 19940109 19950109 ... 20130109 ... + n.leadtimes <- length(leadtime) + sdates <- weekly.seq(forecast.year,mes,day) + n.sdates <- length(sdates) + + for (startdate in 1:n.sdates){ + for(lday in leadtime){ + var <- Load(var = psl, exp = NULL, obs = list(list(path=fields)), paste0(year.start:year.end, substr(sdates[startdate],5,6), substr(sdates[startdate],7,8) ), storefreq = 'daily', leadtimemin = lday*4-3, leadtimemax = lday*4, output = 'lonlat', latmin = lat.min, latmax = lat.max, lonmin = lon.min, lonmax = lon.max) + + mean.lday <- apply(var$obs, c(3,5,6), mean, na.rm=T) + rm(var) + gc() + + psleuFull[(1+(startdate-1)*n.years):(startdate*n.years), lday-leadtime[1]+1,,] <- mean.lday + rm(mean.lday) + gc() + } + } +} + +if(psl=="g500") psleuFull <- psleuFull/9.81 # convert geopotential to geopotential height (in m) +if(psl=="psl") psleuFull <- psleuFull/100 # convert MSLP in Pascal to MSLP in hPa +gc() + +#save.image(file=paste0(workdir,"/weather_regimes_",fields.name,"_",year.start,"-",year.end,".RData")) +#load(file=paste0(workdir,"/weather_regimes_",fields.name,"_",year.start,"-",year.end,".RData")) + +# compute the cluster analysis: +my.PCA <- list() +my.cluster <- list() +my.cluster.array <- list() +tot.variance <- list() +n.pcs <- my.cluster <- c() + +#WR.period=1 # for the debug +for(p in WR.period){ + # Select only the data of the month/season we want: + if(fields.name == rean.name){ + + #pslPeriod <- psleuFull[days.period[[p]],,] # select only days in the chosen period (i.e: winter) + pslPeriod <- psleuFull[1,1,,pos.period(2001,p),,] # select only days in the chosen period (i.e: winter) + + # weight the pressure fields based on latitude (apply it to anomalies, not to absolute values!): + if(lat.weighting){ + lat.weighted <- cos(lat*pi/180)^0.5 + lat.weighted.array <- InsertDim(InsertDim(lat.weighted,2,n.pos.lon), 1, n.days.period[[p]]) + psl.kmeans <- pslPeriod * lat.weighted.array + rm(lat.weighted.array) + gc() + } + + gc() + cat("Preformatting data for clustering. Please wait...\n") + psl.melted <- melt(pslPeriod[,,,, drop=FALSE], varnames=c("Year","Day","Lat","Lon")) + gc() + cat("Preformatting data for clustering. Please wait......\n") + + # this function eats much memory!!! + psl.kmeans <- unname(acast(psl.melted, Year + Day ~ Lat + Lon)) + #gc() + + # select period data from psleuFull to use it as input of the cluster analysis: + #psl.kmeans <- pslPeriod + #dim(psl.kmeans) <- c(n.days.period[[p]], n.pos.lat*n.pos.lon) # convert array in a matrix! + #rm(pslPeriod, pslPeriod.weighted) + } + + # in case of S4 we don't need to select anything because we already loaded only 1 month of data! + if(fields.name == ECMWF_S4.name){ + + # select data only for the startmonth and leadmonth to study: + cat("Extracting data. Please wait...\n") + chosen.month <- start.month + lead.month # find which month we want to select data + if(chosen.month > 12) chosen.month <- chosen.month - 12 + + i=0 + for(y in years){ + i=i+1 + leadtime.min <- 1 + n.days.in.a.monthly.period(start.month, chosen.month, y) - n.days.in.a.month(chosen.month, y) + leadtime.max <- leadtime.min + n.days.in.a.month(chosen.month, y) - 1 + if (n.days.in.a.month(chosen.month, y) == 29) leadtime.max <- leadtime.max - 1 # remove the 29th of February to have the same n. of elements for all years + int.leadtimes <- leadtime.min:leadtime.max + num.leadtimes <- leadtime.max - leadtime.min + 1 # number of days in the chosen month (different from 'n.leadtimes',which is the total number of days in the file!) + # by construction it is equal to: n.days.in.a.month(chosen.month, y) + if(y == years[1]) { + pslPeriod <- psleuFull[,,1,int.leadtimes,,,drop=FALSE] + } else { + pslPeriod <- unname(abind(pslPeriod, psleuFull[,,i,int.leadtimes,,,drop=FALSE], along=3)) + } + } + + # note that the data order in psl.kmeans[,X] is: year1day1, year1day2, ... , year1day31, year2day1, year2day2, ... , year2day28 + gc() + cat("Preformatting data. Please wait...\n") + psl.melted <- melt(pslPeriod[1,,,,,, drop=FALSE], varnames=c("Exp","Member","StartYear","LeadDay","Lat","Lon")) + gc() + cat("Preformatting data. Please wait......\n") + + # this function eats much memory!!! + psl.kmeans <- unname(acast(psl.melted, Member + StartYear + LeadDay ~ Lat + Lon)) + #gc() + + #psl.kmeans <- array(NA,c(dim(pslPeriod))) + #n.rows <- nrow(psl.melted) + #a <- psl.melted$Member + #b <- psl.melted$StartYear + #c <- psl.melted$LeadDay + #d <- psl.melted$Lat + #e <- psl.melted$Lon + #f <- psl.melted$value + + # this function to convert a data.frame in an array is much less memory-eater than acast but a bit slower: + #for(i in 1:n.rows) psl.kmeans[1,a[i],b[i],c[i],d[i],e[i]] <- f[i] + gc() + #rm(pslPeriod); gc() + } + + if(fields.name == ECMWF_monthly.name){ + my.startdates.period <- months.period(forecast.year,mes,day,p) # get which startdates belong to the chosen period (remember that there are no bisestile day left!) + + days.period <- list() # get which days inside psleuFull belong to the chosen period + for(startdate in my.startdates.period){ + days.period[[p]] <- c(days.period[[p]], (1+(startdate-1)*n.years):(startdate*n.years)) + } + + # in winter you must remove the 2nd startdate (20140109) because its data is bad, as you can see with: plot(psl.kmeans[,1:2]) + # if(fields.name == forecast.name && period == 13) psl.kmeans[21:40,] <- NA + } + + + # compute the clustering: + cat("Clustering data. Please wait...\n") + if(PCA){ + my.seq <- seq(1, n.pos.lat*n.pos.lon, 9) # select only 1 point of 9 + pslcut <- psl.kmeans[,my.seq] + + my.PCA[[p]] <- princomp(pslcut,cor=FALSE) + tot.variance[[p]] <- head(cumsum(my.PCA[[p]]$sdev^2/sum(my.PCA[[p]]$sdev^2)),50) # check how many PCAs to keep basing on the sum of their expl. variance + n.pcs[p] <- head(as.numeric(which(tot.variance[[p]] > variance.explained)),1) # select only the pcs that explains at least 80% of variance + my.cluster[[p]] <- kmeans(my.PCA[[p]]$scores[,1:n.pcs[p]], centers=4, iter.max=100, nstart=30) # 4 is the number of clusters + rm(pslcut) + } else { # 4 is the number of clusters: + + my.cluster[[p]] <- kmeans(psl.kmeans, centers=4, iter.max=100, nstart=30, trace=FALSE) + + #if(fields.name == rean.name){ + #my.cluster[[p]] <- kmeans(psl.kmeans[which(!is.na(psl.kmeans[,1])),], centers=4, iter.max=100, nstart=30, trace=FALSE) + # save the time series output of the cluster analysis: + #save(my.cluster, my.cluster.array, my.PCA, tot.variance, n.pcs, file=paste0(workdir,"/",fields.name,"_",my.period[p],"_series.RData")) + #} + + if(fields.name == ECMWF_S4.name) { + my.cluster.array[[p]] <- array(my.cluster[[p]]$cluster, c(num.leadtimes, n.years.full, n.members)) # in reverse order compared to the definition of psl.kmeans + + # save the time series output of the cluster analysis: + #save(my.cluster, my.cluster.array, my.PCA, tot.variance, n.pcs, file=paste0(workdir,"/",fields.name,"_",my.period[p],"_leadmonth_",lead.month,"_series.RData")) + + #plot(SMA(my.cluster[[p]]$centers[1,],201),type="l",col="gold",ylim=c(-20,20));lines(SMA(my.cluster[[p]]$centers[2,],201),type="l",col="red"); lines(SMA(my.cluster[[p]]$centers[3,],201),type="l",col="green");lines(SMA(my.cluster[[p]]$centers[4,],201),type="l",col="blue");lines(SMA(psl.kmeans[29,],201),type="l",col="gray");lines(rep(0,14410),type="l",col="black",lty=2) + } + } + + rm(psl.kmeans) + gc() + +} # close for on 'p' + +#save.image(file=paste0(workdir,"/weather_regimes_",fields.name,"_",year.start,"-",year.end,".RData")) +#load(file=paste0(workdir,"/weather_regimes_",fields.name,"_",year.start,"-",year.end,".RData")) + +#my.grid<-paste0('r',n.lon,'x',n.lat) +#coord <- Load(var = 'sfcWind', exp = list(ECMWF_monthly), obs = NULL, sdates=paste0(2013,'0102'), leadtimemin = 1, leadtimemax=1, output = 'lonlat', nprocs=1, grid=my.grid, method='bilinear') + +# measure regime anomalies: + +for(p in WR.period){ # 1-12: Jan-Dec; 13-16: Winter-Spring-Summer-Autumn; 17: Year + + if(fields.name == rean.name){ + cluster.sequence <- my.cluster[[p]]$cluster + + if(sequences){ # it works only for rean data!!! + # for each of the 4 clusters, remove the days not belonging to sequences of at least 5 days: + # warning: first 4 days and last 4 days are not take into account y the algorithm and are treated as not belonging to any sequence (sequ=FALSE) + wrdiff <- diff(my.cluster[[p]]$cluster) + + sequ <- rep(FALSE, n.days.period[[p]]) + for(i in 5:(n.days.period[[p]]-5)){ + sequ[i] <- TRUE + if(wrdiff[i] != 0 & wrdiff[i+1] != 0) sequ[i] = FALSE + if(wrdiff[i] != 0 & wrdiff[i+2] != 0) sequ[i] = FALSE + if(wrdiff[i] != 0 & wrdiff[i+3] != 0) sequ[i] = FALSE + if(wrdiff[i] != 0 & wrdiff[i+4] != 0) sequ[i] = FALSE + if(wrdiff[i-1] != 0 & wrdiff[i] != 0) sequ[i] = FALSE + if(wrdiff[i-1] != 0 & wrdiff[i+1] != 0) sequ[i] = FALSE + if(wrdiff[i-1] != 0 & wrdiff[i+2] != 0) sequ[i] = FALSE + if(wrdiff[i-1] != 0 & wrdiff[i+3] != 0) sequ[i] = FALSE + if(wrdiff[i-2] != 0 & wrdiff[i] != 0) sequ[i] = FALSE + if(wrdiff[i-2] != 0 & wrdiff[i+1] != 0) sequ[i] = FALSE + if(wrdiff[i-2] != 0 & wrdiff[i+2] != 0) sequ[i] = FALSE + if(wrdiff[i-3] != 0 & wrdiff[i] != 0) sequ[i] = FALSE + if(wrdiff[i-3] != 0 & wrdiff[i+1] != 0) sequ[i] = FALSE + if(wrdiff[i-4] != 0 & wrdiff[i] != 0) sequ[i] = FALSE + } # close for loop on i + + # sequence of daily cluster numbers without the days that doesn't belong to sequences of 5 or more days: + cluster.sequence <- my.cluster[[p]]$cluster * sequ + rm(wrdiff, sequ) + } + + # Replace the days belonging to a regime with the days belonging to a sequence of at least 5 days of that regime: + wr1 <- which(cluster.sequence == 1) + wr2 <- which(cluster.sequence == 2) + wr3 <- which(cluster.sequence == 3) + wr4 <- which(cluster.sequence == 4) + + # yearly frequency of each regime: + wr1y <- wr2y <- wr3y <-wr4y <- c() + np <- n.days.in.a.period(p,2001) + + for(y in year.start:year.end){ + # for both reanalysis and S4, the time order is always: year1day1, year1day2, ..., year1day31, year2day1, year2day2, ..., year2day28, etc, + wr1y[y-year.start+1] <- length(which(cluster.sequence[(y-year.start)*np + (1:np)] == 1)) + wr2y[y-year.start+1] <- length(which(cluster.sequence[(y-year.start)*np + (1:np)] == 2)) + wr3y[y-year.start+1] <- length(which(cluster.sequence[(y-year.start)*np + (1:np)] == 3)) + wr4y[y-year.start+1] <- length(which(cluster.sequence[(y-year.start)*np + (1:np)] == 4)) + } + + # convert to frequencies in %: + wr1y <- wr1y/np + wr2y <- wr2y/np + wr3y <- wr3y/np + wr4y <- wr4y/np + + gc() + pslmat <- unname(acast(psl.melted, Year + Day ~ Lat ~ Lon)) + gc() + + pslwr1 <- pslmat[wr1,,, drop=F] + pslwr2 <- pslmat[wr2,,, drop=F] + pslwr3 <- pslmat[wr3,,, drop=F] + pslwr4 <- pslmat[wr4,,, drop=F] + + pslwr1mean <- apply(pslwr1, c(2,3), mean, na.rm=T) + pslwr2mean <- apply(pslwr2, c(2,3), mean, na.rm=T) + pslwr3mean <- apply(pslwr3, c(2,3), mean, na.rm=T) + pslwr4mean <- apply(pslwr4, c(2,3), mean, na.rm=T) + + # spatial mean for each day to save for the meaure of the FairRPSS: + pslwr1spmean <- apply(pslwr1, 1, mean, na.rm=T) + pslwr2spmean <- apply(pslwr2, 1, mean, na.rm=T) + pslwr3spmean <- apply(pslwr3, 1, mean, na.rm=T) + pslwr4spmean <- apply(pslwr4, 1, mean, na.rm=T) + + # save all the data necessary to redraw the graphs when we know the right regime: + save(my.cluster, lon, lon.max, lon.min, lat, lat.max, lat.min, pos.lat, pos.lon, n.pos.lat, n.pos.lon, psl, psl.name, my.period, p, workdir, rean.name, fields.name, year.start, year.end, n.years, WR.period, pslwr1mean, pslwr2mean, pslwr3mean, pslwr4mean, pslwr1spmean, pslwr2spmean, pslwr3spmean, pslwr4spmean, wr1y,wr2y,wr3y,wr4y,file=paste0(workdir,"/",fields.name,"_",my.period[p],"_psl.RData")) + + rm(pslwr1mean,pslwr2mean,pslwr3mean,pslwr4mean) + rm(pslwr1,pslwr2,pslwr3,pslwr4) + gc() + + } + + if(fields.name == ECMWF_S4.name){ + cat("Computing regime anomalies and frequencies. Please wait...\n") + + #load(file=paste0(workdir,"/",fields.name,"_",my.period[p],"_leadmonth_",lead.month,"_ClusterData.RData")) + + cluster.sequence <- my.cluster[[p]]$cluster #as.vector(my.cluster.array[[p]]) + + wr1 <- which(cluster.sequence == 1) + wr2 <- which(cluster.sequence == 2) + wr3 <- which(cluster.sequence == 3) + wr4 <- which(cluster.sequence == 4) + + wr1y <- apply(my.cluster.array[[p]] == 1, 2, sum) + wr2y <- apply(my.cluster.array[[p]] == 2, 2, sum) + wr3y <- apply(my.cluster.array[[p]] == 3, 2, sum) + wr4y <- apply(my.cluster.array[[p]] == 4, 2, sum) + + # convert to frequencies in %: + wr1y <- wr1y/(num.leadtimes*n.members) # in this case, n.leadtimes is the number of days of one month of the cluser time series + wr2y <- wr2y/(num.leadtimes*n.members) + wr3y <- wr3y/(num.leadtimes*n.members) + wr4y <- wr4y/(num.leadtimes*n.members) + + # same as above, but to measure the maximum frequence between all the members: + wr1yMax <- apply(apply(my.cluster.array[[p]] == 1, c(2,3), sum),1,max) + wr2yMax <- apply(apply(my.cluster.array[[p]] == 2, c(2,3), sum),1,max) + wr3yMax <- apply(apply(my.cluster.array[[p]] == 3, c(2,3), sum),1,max) + wr4yMax <- apply(apply(my.cluster.array[[p]] == 4, c(2,3), sum),1,max) + + wr1yMax <- wr1yMax/num.leadtimes + wr2yMax <- wr2yMax/num.leadtimes + wr3yMax <- wr3yMax/num.leadtimes + wr4yMax <- wr4yMax/num.leadtimes + + wr1yMin <- apply(apply(my.cluster.array[[p]] == 1, c(2,3), sum),1,min) + wr2yMin <- apply(apply(my.cluster.array[[p]] == 2, c(2,3), sum),1,min) + wr3yMin <- apply(apply(my.cluster.array[[p]] == 3, c(2,3), sum),1,min) + wr4yMin <- apply(apply(my.cluster.array[[p]] == 4, c(2,3), sum),1,min) + + wr1yMin <- wr1yMin/num.leadtimes + wr2yMin <- wr2yMin/num.leadtimes + wr3yMin <- wr3yMin/num.leadtimes + wr4yMin <- wr4yMin/num.leadtimes + + cat("Computing regime anomalies and frequencies. Please wait......\n") + + # in this case, must use psl.melted instead of psleuFull. Both are already in anomalies: + # (psl.melted depends on the startdate and leadtime, psleuFull no) + gc() + pslmat <- unname(acast(psl.melted, Member + StartYear + LeadDay ~ Lat ~ Lon)) + #pslmat <- unname(acast(melt(pslPeriod[1,1:2,,,,, drop=FALSE],varnames=c("Exp","Member","StartYear","LeadDay","Lat","Lon")), Member ~ StartYear + LeadDay ~ Lat ~ Lon)) + gc() + + #for(a in 1:2){ + # for(b in 1:3){ + # for(c in 1:4){ + # pslmat[1, a + (b-1)*a + (c-1)*a*b,,] <- pslPeriod[1,a,b,c,,] + # } + # } + #} + + pslwr1 <- pslmat[wr1,,, drop=F] + pslwr2 <- pslmat[wr2,,, drop=F] + pslwr3 <- pslmat[wr3,,, drop=F] + pslwr4 <- pslmat[wr4,,, drop=F] + + pslwr1mean <- apply(pslwr1, c(2,3), mean, na.rm=T) + pslwr2mean <- apply(pslwr2, c(2,3), mean, na.rm=T) + pslwr3mean <- apply(pslwr3, c(2,3), mean, na.rm=T) + pslwr4mean <- apply(pslwr4, c(2,3), mean, na.rm=T) + + # spatial mean for each day to save for the meaure of the FairRPSS: + pslwr1spmean <- apply(pslwr1, 1, mean, na.rm=T) + pslwr2spmean <- apply(pslwr2, 1, mean, na.rm=T) + pslwr3spmean <- apply(pslwr3, 1, mean, na.rm=T) + pslwr4spmean <- apply(pslwr4, 1, mean, na.rm=T) + + rm(pslmat) + gc() + + # save all the data necessary to draw the graphs (but not the impact maps) + save(my.cluster, my.cluster.array, lon, lon.max, lat, pos.lat, pos.lon, n.pos.lat, n.pos.lon, psl, psl.name, my.period, p, workdir,rean.name,fields.name, year.start,year.end, years, n.years, n.years.full, WR.period, pslwr1mean, pslwr2mean, pslwr3mean, pslwr4mean, pslwr1spmean, pslwr2spmean, pslwr3spmean, pslwr4spmean, wr1y, wr2y, wr3y, wr4y, wr1yMax, wr2yMax, wr3yMax, wr4yMax, wr1yMin, wr2yMin, wr3yMin, wr4yMin, n.leadtimes, num.leadtimes, file=paste0(workdir,"/",fields.name,"_",my.period[p],"_leadmonth_",lead.month,"_psl.RData")) + + rm(pslwr1mean,pslwr2mean,pslwr3mean,pslwr4mean) + rm(pslwr1,pslwr2,pslwr3,pslwr4) + gc() + + } + + # for the monthly system, the time order is always: year1day1, year2day1, ... , yearNday1, year1day2, year2day2, ..., yearNday2, etc.: + if(fields.name == ECMWF_monthly.name) { + if(fields.name == ECMWF_monthly.name){ + my.startdates.period <- months.period(forecast.year,mes,day,p) + + #if(p == 13) my.startdates.period <- my.startdates.period[-2] # remove the second startdate because it is broken + + days.period <- period.length <- list() + for(startdate in my.startdates.period){ + days.period[[p]] <- c(days.period[[p]], (1+(startdate-1)*(year.end-year.start+1)):(startdate*(year.end-year.start+1))) + } + period.length[[p]] <- length(my.startdates.period) # number of Thusdays in the period + } + + wr1y <- wr2y <- wr3y <-wr4y <- c() + wr1y[y-year.start+1] <- length(which(cluster.sequence[seq((y-year.start+1),length(cluster.sequence), n.years)] == 1)) + wr2y[y-year.start+1] <- length(which(cluster.sequence[seq((y-year.start+1),length(cluster.sequence), n.years)] == 2)) + wr3y[y-year.start+1] <- length(which(cluster.sequence[seq((y-year.start+1),length(cluster.sequence), n.years)] == 3)) + wr4y[y-year.start+1] <- length(which(cluster.sequence[seq((y-year.start+1),length(cluster.sequence), n.years)] == 4)) + } + + cat("Finished!\n") +} # close the for loop on 'p' + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/old/weather_regimes_v35_test.R b/old/weather_regimes_v35_test.R new file mode 100644 index 0000000000000000000000000000000000000000..9be098dfc7963caf59a99c22ea1c171c2d6167a4 --- /dev/null +++ b/old/weather_regimes_v35_test.R @@ -0,0 +1,33 @@ + +# Creation: 6/2016 +# Author: Nicola Cortesi +# Aim: To compute the four Euro-Atlantic weather regimes from a reanalysis or from a forecast system +# I/O: input must be in a format compatible with Load(); 1 output .RData file is created in the 'workdir' folder. +# Assumption: its output are needed by the scripts weather_regimes_impact.R and weather_regimes_maps.R +# Branch: weather_regimes + +SMP=FALSE # if TRUE, the script is assumed to run on the SMP Machine + +library(s2dverification) # for the function Load() +library(abind) +library(reshape2) # after each undate of s2dverification, it's better to remove and install this package again because sometimes it gets broken! + +a=0 +for(i in 1:100000000){ + a=1 +} + + + + + + + + + + + + + + + diff --git a/old/weather_regimes_v35_test.R~ b/old/weather_regimes_v35_test.R~ new file mode 100644 index 0000000000000000000000000000000000000000..b53c79f7b7105c8f7526e031997b8046fbc52499 --- /dev/null +++ b/old/weather_regimes_v35_test.R~ @@ -0,0 +1,689 @@ + +# Creation: 6/2016 +# Author: Nicola Cortesi +# Aim: To compute the four Euro-Atlantic weather regimes from a reanalysis or from a forecast system +# I/O: input must be in a format compatible with Load(); 1 output .RData file is created in the 'workdir' folder. +# Assumption: its output are needed by the scripts weather_regimes_impact.R and weather_regimes_maps.R +# Branch: weather_regimes + +SMP=TRUE # if TRUE, the script is assumed to run on the SMP Machine + +library(s2dverification) # for the function Load() +library(abind) +library(reshape2) # after each undate of s2dverification, it's better to remove and install this package again because sometimes it gets broken! + +if(SMP){ + source('/gpfs/projects/bsc32/bsc32842/Rfunctions.R') + workdir <- "/gpfs/projects/bsc32/bsc32842/RESILIENCE" # working dir where output files will be generated +} else { + source('/home/Earth/ncortesi/Downloads/scripts/Rfunctions.R') + workdir <- "/scratch/Earth/ncortesi/RESILIENCE/Regimes" # working dir where output files will be generated +} + +# module load NCO # : load it from the terminal before running this script interactively in the SMP machine, if you didn't load it in the .bashrc + +# available reanalysis for the geopotential (g500) or the geopotential height (z500 = g500/9.81) and for var data: +ERAint <- '/esnas/reconstructions/ecmwf/eraint/$STORE_FREQ$_mean/$VAR_NAME$_f6h/$VAR_NAME$_$YEAR$$MONTH$.nc' +ERAint.name <- "ERA-Interim" + +JRA55 <- '/esnas/reconstructions/jma/jra-55/$STORE_FREQ$_mean/$VAR_NAME$_f6h/$VAR_NAME$_$YEAR$$MONTH$.nc' +JRA55.name <- "JRA-55" + +rean <- ERAint #JRA55 #ERAint # choose one of the two above reanalysis from where to load the input psl data + +# available forecast systems for the psl and var data: +#ECMWF_S4 <- list(path = paste0('/esnas/exp/ECMWF/seasonal/0001/s004/m001/$STORE_FREQ$_mean/$VAR_NAME$_f6h/$VAR_NAME$_$START_DATE$.nc')) +#ECMWF_S4 <- '/esnas/exp/ECMWF/seasonal/0001/s004/m001/$STORE_FREQ$_mean/psl_f6h/psl_$START_DATE$.nc' +ECMWF_S4 <- '/esnas/exp/ecmwf/system4_m1/$STORE_FREQ$_mean/$VAR_NAME$_f6h/$VAR_NAME$_$START_DATE$.nc' +ECMWF_S4.name <- "ECMWF-S4" +member.name <- "ensemble" # name of the dimension with the ensemble members inside the netCDF files of S4 + +ECMWF_monthly <- '/esnas/exp/ECMWF/monthly/ensforhc/6hourly/$VAR_NAME$/2014$MONTH$$DAY$00/$VAR_NAME$_$START_DATE$00.nc' +ECMWF_monthly.name <- "ECMWF-Monthly" + +forecast <- ECMWF_S4 + +fields <- forecast # put 'rean' to load pressure fields and var from reanalisis, or put 'forecast' to load them from forecast systems + +psl <- "psl" #"g500" # pressure variable to use from the chosen reanalysis +psl.name <- "SLP" # "Z500" # the name to show in the maps + +year.start <- 1981 #1982 #1981 #1994 #1979 #1981 +year.end <- 2015 #2013 #2010 + +missing.forecasts=FALSE # set it to TRUE if there can be missing hindcasts files in the forecast psl data, so it will load only the available years and deals with NA +res <- 0.75 # set the resolution you want to interpolate the psl and var data when loading it (i.e: set to 0.75 to use the same res of ERA-Interim) + # interpolation method is BILINEAR. +PCA <- FALSE # set it to TRUE if you want to apply the PCA before the k-means cluster analysis, FALSE otherwise +variance.explained <- 0.8 # the user can set here the minimum % of variance explained by the PCs (typically 0.8 which is equal to 80%) + +lat.weighting=FALSE # set it to true to weight psl data on latitude before applying the cluster analysis +sequences=FALSE # set it to TRUE if you want to select only days beloning to sequences of at least 5 consecutive days belonging to the same regime. + +# Coordinates of the box enclosing the study region (the Northern Atlantic in this case): +lat.max <- 81 #81 #80 #70 +lat.min <- 27 #30 #20 #30 +lon.max <- 45 #36 #30 #40 +lon.min <- 274.5 #274.5 #270 #280 # put a positive number here because the geopotential has only positive vaues of longitud! + +# Only for reanalysis: +WR.period <- 1 #1:12 #13:16 # 1-12: Jan-Dec; 13-16: Winter-Spring-Summer-Autumn [bypassed by the optional argument of the script] + +# Only for Seasonal forecasts: +startM <- 1 # start month (1=January, 12=December) [bypassed by the optional arguments of the script] +leadM <- 1 # select only the data of one lead month: [bypassed by the optional arguments of the script] +n.members <- 15 # number of members of the forecast system to load +n.leadtimes <- 216 # number of leadtimes of the forecast system to load + +# Only for Monthly forecasts: +#leadtime <- 1:7 #5:11 #1 # if fields=forecast, set the forecast time (in days) you want to compute the weather regimes. +#startdate.shift <- 1 # if leadtime=5:11, it's better to shift all startdates of the season 1 week in the past, to fit the exact season of obs.data + # if leadtime=12:18, it's better to shift all startdates of the season 2 weeks in the past, and so on. +#forecast.year <- year.end+1 # if fields=forecast, set the forecast year (it is usually the year after year.end) +#mes=1 # if fields=forecast, set the month of the first startdate of the forecast year +#day=2 # if fields=forecast, set the day of the first startdate of the forecast year + +############################################################################################################################# +rean.name <- unname(ifelse(rean == ERAint, ERAint.name, JRA55.name)) +forecast.name <- unname(ifelse(forecast == ECMWF_S4, ECMWF_S4.name, ECMWF_monthly.name)) +fields.name <- unname(ifelse(fields == rean, rean.name, forecast.name)) +n.years <- year.end - year.start + 1 + +# in case the script is run with one argument, it is assumed a reanalysis is being used and the argument becomes the month we want to perform the clustering; +# in case the script is run with two arguments, it is assumed a forecast is used and the first argument represents the startmonth and the second one the leadmonth. +# in case the script is run with no arguments, the values of the variables inside the script are used: +script.arg <- as.integer(commandArgs(TRUE)) + +if(length(script.arg) == 0){ + start.month <- startM + WR.period <- start.month + if(fields.name == forecast.name) lead.month <- leadM +} + +# in case the script is run with 1 argument, it is assumed you are using a reanalysis: +if(length(script.arg) == 1){ + fields <- rean + fields.name <- rean.name + WR.period <- script.arg[1] +} + +if(length(script.arg) >= 2){ + fields <- forecast + fields.name <- forecast.name + start.month <- script.arg[1] + lead.month <- script.arg[2] + WR.period <- start.month +} + +if(length(script.arg) == 3){ # in this case, we are running the script in the SMP machine and we need to override the variables below: + source('/gpfs/projects/bsc32/bsc32842/Rfunctions.R') # for the calendar function + workdir <- "/gpfs/projects/bsc32/bsc32842/RESILIENCE" # working dir +} + +if(length(script.arg) >= 2) {lead.comment <- paste0("Lead month: ", lead.month)} else {lead.comment <- ""} +cat(paste0("Field: ", fields.name, " Period: ", WR.period, " ", lead.comment, "\n")) + +if(lat.min >= lat.max) stop("lat.min cannot be greater than lat.max") + +#days.period <- n.days.period <- period.length <- list() +#for (pp in 1:17){ +# days.period[[pp]] <- NA +# for(y in year.start:year.end) days.period[[pp]] <- c(days.period[[pp]], n.days.in.a.future.year(year.start, y) + pos.period(y,pp)) +# days.period[[pp]] <- days.period[[pp]][-1] # remove the NA at the begin that was introduced only to be able to execute the above command +# # number of days belonging to that period from year.start to year.end: +# n.days.period[[pp]] <- length(days.period[[pp]]) +# # Number of days belonging to that period in a single year: +# period.length[[pp]] <- n.days.in.a.period(pp,1999) #+ ifelse(period==13 | period==2, 1, 0) # using year 1999 introduce a small error in winter season length because of #bisestile years +#} + +# Create a regular lat/lon grid to interpolate the data to the chosen res: (Notice that the regular grid is always centered at lat=0 and lon=0!) +n.lon.grid <- 360/res +n.lat.grid <- (180/res)+1 +my.grid <- paste0('r',n.lon.grid,'x',n.lat.grid) +#domain<-list() +#domain$lon <- seq(0,359.999999999,res) +#domain$lat <- c(rev(seq(0,90,res)),seq(res[1],-90,-res)) + +cat("Loading lat/lon. Please wait...\n") +# load only 1 day of pressure data to detect the minimum and maximum lat and lon values which identify only the North Atlantic area: +if(fields.name == rean.name) domain <- Load(var = psl, exp = NULL, obs = list(list(path=fields)), '19990101', storefreq = 'daily', leadtimemax = 1, output = 'lonlat', nprocs=1) + +### Real test: +### lat <- +### domain <- Load(var = "psl", exp = list(list(path="/esnas/exp/ecmwf/system4_m1/$STORE_FREQ$_mean/$VAR_NAME$_f6h/$VAR_NAME$_$START_DATE$.nc")), obs=NULL, sdates=paste0(1982:2011,'0101'), storefreq = 'daily', leadtimemax=n.leadtimes, nmember=n.members, output = 'lonlat', latmin = lat[chunk], latmax = lat[chunk+2], nprocs=1) + +### if (fields.name="pippo"){ + +# in the case of S4, we also interpolate it to the same resolution of the reanalysis: +# (notice that if the S4 grid has the same grid of the chosen grid (typically ERA-Interim), Load() leaves the S4 grid unchanged) +if(fields.name == ECMWF_S4.name) domain <- Load(var = psl, exp = list(list(path=fields)), obs=NULL, sdates=paste0(year.start,'0101'), storefreq = 'daily', dimnames=list(member=member.name), leadtimemax=1, nmember=1, output = 'lonlat', grid=my.grid, method='bilinear', nprocs=1) + +#domain <- Load(var = "psl", exp = list(list(path="/esnas/exp/ecmwf/system4_m1/$STORE_FREQ$_mean/$VAR_NAME$_f6h/$VAR_NAME$_$START_DATE$.nc")), obs=NULL, sdates='19820101', storefreq = 'daily', leadtimemax=1, nmember=1, output = 'lonlat', grid='r480x241', lonmax=100) + +if(fields.name == ECMWF_monthly.name) domain <- Load(var = psl, exp = NULL, obs = list(list(path=fields)), paste0(year.start,'0102'), storefreq = 'daily', leadtimemax = 1, output = 'lonlat', nprocs=1) + +n.lon <- length(domain$lon) +n.lat <- length(domain$lat) + +pos.lat.max <- which(lat.max - round(domain$lat,4) >= 0)[1] # select the first latitude of the domain below the maximum latitude +pos.lat.min <- tail(which(round(domain$lat,4) - lat.min >= 0),1) # select the last latitude of the domain over the minumum latitude +pos.lon.max <- tail(which(lon.max - round(domain$lon,4) >= 0),1) +pos.lon.min <- which(round(domain$lon,4) - lon.min >= 0)[1] + +pos.lat <- if(pos.lat.min < pos.lat.max) {pos.lat.min:pos.lat.max} else {pos.lat.max:pos.lat.min} +pos.lon <- c(1:pos.lon.max,pos.lon.min:length(domain$lon)) # beware that it only consider the case where the study area cross the meridian of Greenwhich! +n.pos.lat <- length(pos.lat) +n.pos.lon <- length(pos.lon) + +lat <- domain$lat[pos.lat] # lat values of chosen area only (it is smaller than the whole spatial domain loaded by the data) +#if (lat[2] < lat[1]) lat <- rev(lat) # reverse lat values if they are in decreasing order +lon <- domain$lon[pos.lon] # lon values of chosen area only + +#lat.min.area <- domain$lat[pos.lat.min] # min and max lat/lon of the chosen area only (same as lat.max, but its values correspond to actual values in the lat vector) +#lat.max.area <- domain$lat[pos.lat.max] +#lon.min.area <- domain$lon[pos.lon.min] +#lon.max.area <- domain$lon[pos.lon.max] + +#rm(domain) + +# Load psl data (interpolating it to the same reanlaysis grid, if necessary): +cat("Loading data. Please wait...\n") + +if(fields.name == rean.name){ # Load daily psl data of all years in the reanalysis case: + psleuFull <-array(NA,c(1,1,n.years,365, n.pos.lat, n.pos.lon)) + + #for (y in year.start:year.end){ + # var <- Load(var = psl, exp = NULL, obs = list(list(path=fields)), paste0(y,'0101'), storefreq = 'daily', leadtimemax = n.days.in.a.year(y), output = 'lonlat', + # latmin = lat.min, latmax = lat.max, lonmin = lon.min, lonmax = lon.max, nprocs=1) + # psleuFull[seq.days.in.a.future.year(year.start, y),,] <- var$obs + # rm(var) + # gc() + #} + + psleuFull366 <- Load(var = psl, exp = NULL, obs = list(list(path=fields)), paste0(year.start:year.end,'0101'), storefreq = 'daily', leadtimemax = 366, output = 'lonlat', latmin = lat.min, latmax = lat.max, lonmin = lon.min, lonmax = lon.max, nprocs=1)$obs + + # remove bisestile days to have all arrays with the same dimensions: + cat("Removing bisestile days. Please wait...\n") + psleuFull[,,,,,] <- psleuFull366[,,,1:365,,] + + for(y in year.start:year.end){ + y <- y - year.start + 1 + if(n.days.in.a.year(y) == 366) psleuFull[,,y,60:365,,] <- psleuFull366[,,y,61:366,,] # take the march to december period removing the 29th of February + } + + rm(psleuFull366) + gc() + + # convert psl in daily anomalies with the LOESS filter: + cat("Calculating anomalies. Please wait...\n") + pslPeriodClim <- apply(psleuFull, c(1,2,4,5,6), mean, na.rm=T) + + pslPeriodClimLoess <- pslPeriodClim + for(i in n.pos.lat){ + for(j in n.pos.lon){ + my.data <- data.frame(ens.mean=pslPeriodClim[1,1,,i,j], day=1:365) + my.loess <- loess(ens.mean ~ day, my.data, span=0.35) + pslPeriodClimLoess[1,1,,i,j] <- predict(my.loess) + } + } + + rm(pslPeriodClim) + gc() + + pslPeriodClim2 <- InsertDim(pslPeriodClimLoess, 3, n.years) + + ## s4.data <- data.frame(s4=pslPeriodClim[1,], day=1:216) + ## s4.loess <- loess(s4 ~ day, s4.data, span=0.35) + ## s4.pred <- predict(s4.loess) + + psleuFull <- psleuFull - pslPeriodClim2 + rm(pslPeriodClim2) + gc() + +} + +if(fields.name == ECMWF_S4.name){ # in the forecast case, we load only the data for 1 start month at time (since each month needs ~3 GB of memory) + if(start.month <= 9) {start.month.char <- paste0("0",as.character(start.month))} else {start.month.char <- start.month} + + fields2 <- gsub("\\$STORE_FREQ\\$","daily",fields) + fields3 <- gsub("\\$VAR_NAME\\$",psl,fields2) + + if(missing.forecasts == TRUE){ + years <- c() + for(y in year.start:year.end){ + fields4 <- gsub("\\$START_DATE\\$",paste0(y, start.month.char,'01'),fields3) + + char.lead <- nchar(system(paste0("ncks -m ",fields4,"| grep -E -i 'psl size' | cut -d '*' -f1"), intern=T)) + # beware, in this way it can only take into account leadtimes from 100 to 999, but it is the only way in the SMP machine: + #num.lead <- as.integer(substr(system(paste0("ncks -m ",fields4,"| grep -E -i 'psl size' | cut -d '*' -f1"), intern=T),char.lead-3,char.lead)) + num.lead <- as.integer(system(paste0("ncks -m ",fields4,"| grep -E -i 'psl size' | cut -d '=' -f2 | cut -d '*' -f1"), intern=T)) # don't work well on the SMP + num.memb <- as.integer(system(paste0("ncks -m ",fields4,"| grep -E -i 'psl size' | cut -d '=' -f2 | cut -d '*' -f2"), intern=T)) + num.lat <- as.integer(system(paste0("ncks -m ",fields4,"| grep -E -i 'psl size' | cut -d '=' -f2 | cut -d '*' -f3"), intern=T)) + num.lon <- as.integer(system(paste0("ncks -m ",fields4,"| grep -E -i 'psl size' | cut -d '=' -f2 | cut -d '*' -f4"), intern=T)) + + #if(num.lead == n.leadtimes && num.memb >= n.members && num.lat == 181 && num.lon == 360) years <- c(years,y) + if(num.lead == n.leadtimes && num.memb >= n.members) years <- c(years,y) + } + + } else { years <- year.start:year.end } + + n.years.full <- length(years) + + psleuFull <- Load(var = psl, exp = list(list(path=fields)), obs = NULL, sdates=paste0(years, start.month.char,'01'), dimnames=list(member=member.name), nmember=n.members, leadtimemax=n.leadtimes, storefreq='daily', output = 'lonlat', latmin = lat.min, latmax = lat.max, lonmin = lon.min, lonmax = lon.max, grid=my.grid, method='bilinear', nprocs=1)$mod + + # convert psl in daily anomalies with the LOESS filter: + cat("Calculating anomalies. Please wait...\n") + pslPeriodClim <- apply(psleuFull, c(1,4,5,6), mean, na.rm=T) + pslPeriodClimLoess <- pslPeriodClim + for(i in n.pos.lat){ + for(j in n.pos.lon){ + my.data <- data.frame(ens.mean=pslPeriodClim[1,,i,j], day=1:n.leadtimes) + my.loess <- loess(ens.mean ~ day, my.data, span=0.35) + pslPeriodClimLoess[1,,i,j] <- predict(my.loess) + } + } + + rm(pslPeriodClim) + gc() + + pslPeriodClim2 <- InsertDim(InsertDim(pslPeriodClimLoess, 2, n.years.full), 2, n.members) + + ## s4.data <- data.frame(s4=pslPeriodClim[1,], day=1:216) + ## s4.loess <- loess(s4 ~ day, s4.data, span=0.35) + ## s4.pred <- predict(s4.loess) + + psleuFull <- psleuFull - pslPeriodClim2 + rm(pslPeriodClim2) + gc() + +} + +if(fields.name == ECMWF_monthly.name){ + # Load 6-hourly psl data in the forecast case: + psleuFull <-array(NA, c(n.sdates*n.years, n.leadtimes, n.pos.lat, n.pos.lon)) # daily order: 19940102 19950102 ... 20130102 19940109 19950109 ... 20130109 ... + n.leadtimes <- length(leadtime) + sdates <- weekly.seq(forecast.year,mes,day) + n.sdates <- length(sdates) + + for (startdate in 1:n.sdates){ + for(lday in leadtime){ + var <- Load(var = psl, exp = NULL, obs = list(list(path=fields)), paste0(year.start:year.end, substr(sdates[startdate],5,6), substr(sdates[startdate],7,8) ), storefreq = 'daily', leadtimemin = lday*4-3, leadtimemax = lday*4, output = 'lonlat', latmin = lat.min, latmax = lat.max, lonmin = lon.min, lonmax = lon.max) + + mean.lday <- apply(var$obs, c(3,5,6), mean, na.rm=T) + rm(var) + gc() + + psleuFull[(1+(startdate-1)*n.years):(startdate*n.years), lday-leadtime[1]+1,,] <- mean.lday + rm(mean.lday) + gc() + } + } +} + +if(psl=="g500") psleuFull <- psleuFull/9.81 # convert geopotential to geopotential height (in m) +if(psl=="psl") psleuFull <- psleuFull/100 # convert MSLP in Pascal to MSLP in hPa +gc() + +#save.image(file=paste0(workdir,"/weather_regimes_",fields.name,"_",year.start,"-",year.end,".RData")) +#load(file=paste0(workdir,"/weather_regimes_",fields.name,"_",year.start,"-",year.end,".RData")) + +# compute the cluster analysis: +my.PCA <- list() +my.cluster <- list() +my.cluster.array <- list() +tot.variance <- list() +n.pcs <- my.cluster <- c() + +#WR.period=1 # for the debug +for(p in WR.period){ + # Select only the data of the month/season we want: + if(fields.name == rean.name){ + + #pslPeriod <- psleuFull[days.period[[p]],,] # select only days in the chosen period (i.e: winter) + pslPeriod <- psleuFull[1,1,,pos.period(2001,p),,] # select only days in the chosen period (i.e: winter) + + # weight the pressure fields based on latitude (apply it to anomalies, not to absolute values!): + if(lat.weighting){ + lat.weighted <- cos(lat*pi/180)^0.5 + lat.weighted.array <- InsertDim(InsertDim(lat.weighted,2,n.pos.lon), 1, n.days.period[[p]]) + psl.kmeans <- pslPeriod * lat.weighted.array + rm(lat.weighted.array) + gc() + } + + gc() + cat("Preformatting data for clustering. Please wait...\n") + psl.melted <- melt(pslPeriod[,,,, drop=FALSE], varnames=c("Year","Day","Lat","Lon")) + gc() + cat("Preformatting data for clustering. Please wait......\n") + + # this function eats much memory!!! + psl.kmeans <- unname(acast(psl.melted, Year + Day ~ Lat + Lon)) + #gc() + + # select period data from psleuFull to use it as input of the cluster analysis: + #psl.kmeans <- pslPeriod + #dim(psl.kmeans) <- c(n.days.period[[p]], n.pos.lat*n.pos.lon) # convert array in a matrix! + #rm(pslPeriod, pslPeriod.weighted) + } + + # in case of S4 we don't need to select anything because we already loaded only 1 month of data! + if(fields.name == ECMWF_S4.name){ + + # select data only for the startmonth and leadmonth to study: + cat("Extracting data. Please wait...\n") + chosen.month <- start.month + lead.month # find which month we want to select data + if(chosen.month > 12) chosen.month <- chosen.month - 12 + + i=0 + for(y in years){ + i=i+1 + leadtime.min <- 1 + n.days.in.a.monthly.period(start.month, chosen.month, y) - n.days.in.a.month(chosen.month, y) + leadtime.max <- leadtime.min + n.days.in.a.month(chosen.month, y) - 1 + if (n.days.in.a.month(chosen.month, y) == 29) leadtime.max <- leadtime.max - 1 # remove the 29th of February to have the same n. of elements for all years + int.leadtimes <- leadtime.min:leadtime.max + num.leadtimes <- leadtime.max - leadtime.min + 1 # number of days in the chosen month (different from 'n.leadtimes',which is the total number of days in the file!) + # by construction it is equal to: n.days.in.a.month(chosen.month, y) + if(y == years[1]) { + pslPeriod <- psleuFull[,,1,int.leadtimes,,,drop=FALSE] + } else { + pslPeriod <- unname(abind(pslPeriod, psleuFull[,,i,int.leadtimes,,,drop=FALSE], along=3)) + } + } + + # note that the data order in psl.kmeans[,X] is: year1day1, year1day2, ... , year1day31, year2day1, year2day2, ... , year2day28 + gc() + cat("Preformatting data. Please wait...\n") + psl.melted <- melt(pslPeriod[1,,,,,, drop=FALSE], varnames=c("Exp","Member","StartYear","LeadDay","Lat","Lon")) + gc() + cat("Preformatting data. Please wait......\n") + + # this function eats much memory!!! + psl.kmeans <- unname(acast(psl.melted, Member + StartYear + LeadDay ~ Lat + Lon)) + #gc() + + #psl.kmeans <- array(NA,c(dim(pslPeriod))) + #n.rows <- nrow(psl.melted) + #a <- psl.melted$Member + #b <- psl.melted$StartYear + #c <- psl.melted$LeadDay + #d <- psl.melted$Lat + #e <- psl.melted$Lon + #f <- psl.melted$value + + # this function to convert a data.frame in an array is much less memory-eater than acast but a bit slower: + #for(i in 1:n.rows) psl.kmeans[1,a[i],b[i],c[i],d[i],e[i]] <- f[i] + gc() + #rm(pslPeriod); gc() + } + + if(fields.name == ECMWF_monthly.name){ + my.startdates.period <- months.period(forecast.year,mes,day,p) # get which startdates belong to the chosen period + + days.period <- list() # get which days inside psleuFull belong to the chosen period + for(startdate in my.startdates.period){ + days.period[[p]] <- c(days.period[[p]], (1+(startdate-1)*n.years):(startdate*n.years)) + } + + # in winter you must remove the 2nd startdate (20140109) because its data is bad, as you can see with: plot(psl.kmeans[,1:2]) + # if(fields.name == forecast.name && period == 13) psl.kmeans[21:40,] <- NA + } + + + # compute the clustering: + cat("Clustering data. Please wait...\n") + if(PCA){ + my.seq <- seq(1, n.pos.lat*n.pos.lon, 9) # select only 1 point of 9 + pslcut <- psl.kmeans[,my.seq] + + my.PCA[[p]] <- princomp(pslcut,cor=FALSE) + tot.variance[[p]] <- head(cumsum(my.PCA[[p]]$sdev^2/sum(my.PCA[[p]]$sdev^2)),50) # check how many PCAs to keep basing on the sum of their expl. variance + n.pcs[p] <- head(as.numeric(which(tot.variance[[p]] > variance.explained)),1) # select only the pcs that explains at least 80% of variance + my.cluster[[p]] <- kmeans(my.PCA[[p]]$scores[,1:n.pcs[p]], centers=4, iter.max=100, nstart=30) # 4 is the number of clusters + rm(pslcut) + } else { # 4 is the number of clusters: + + my.cluster[[p]] <- kmeans(psl.kmeans, centers=4, iter.max=100, nstart=30, trace=FALSE) + + #if(fields.name == rean.name){ + #my.cluster[[p]] <- kmeans(psl.kmeans[which(!is.na(psl.kmeans[,1])),], centers=4, iter.max=100, nstart=30, trace=FALSE) + # save the time series output of the cluster analysis: + #save(my.cluster, my.cluster.array, my.PCA, tot.variance, n.pcs, file=paste0(workdir,"/",fields.name,"_",my.period[p],"_series.RData")) + #} + + if(fields.name == ECMWF_S4.name) { + my.cluster.array[[p]] <- array(my.cluster[[p]]$cluster, c(num.leadtimes, n.years.full, n.members)) # in reverse order compared to the definition of psl.kmeans + + # save the time series output of the cluster analysis: + #save(my.cluster, my.cluster.array, my.PCA, tot.variance, n.pcs, file=paste0(workdir,"/",fields.name,"_",my.period[p],"_leadmonth_",lead.month,"_series.RData")) + + #plot(SMA(my.cluster[[p]]$centers[1,],201),type="l",col="gold",ylim=c(-20,20));lines(SMA(my.cluster[[p]]$centers[2,],201),type="l",col="red"); lines(SMA(my.cluster[[p]]$centers[3,],201),type="l",col="green");lines(SMA(my.cluster[[p]]$centers[4,],201),type="l",col="blue");lines(SMA(psl.kmeans[29,],201),type="l",col="gray");lines(rep(0,14410),type="l",col="black",lty=2) + } + } + + rm(psl.kmeans) + gc() + +} # close for on 'p' + +#save.image(file=paste0(workdir,"/weather_regimes_",fields.name,"_",year.start,"-",year.end,".RData")) +#load(file=paste0(workdir,"/weather_regimes_",fields.name,"_",year.start,"-",year.end,".RData")) + +#my.grid<-paste0('r',n.lon,'x',n.lat) +#coord <- Load(var = 'sfcWind', exp = list(ECMWF_monthly), obs = NULL, sdates=paste0(2013,'0102'), leadtimemin = 1, leadtimemax=1, output = 'lonlat', nprocs=1, grid=my.grid, method='bilinear') + +# measure regime anomalies: + +for(p in WR.period){ # 1-12: Jan-Dec; 13-16: Winter-Spring-Summer-Autumn; 17: Year + + if(fields.name == rean.name){ + cluster.sequence <- my.cluster[[p]]$cluster + + if(sequences){ # it works only for rean data!!! + # for each of the 4 clusters, remove the days not belonging to sequences of at least 5 days: + # warning: first 4 days and last 4 days are not take into account y the algorithm and are treated as not belonging to any sequence (sequ=FALSE) + wrdiff <- diff(my.cluster[[p]]$cluster) + + sequ <- rep(FALSE, n.days.period[[p]]) + for(i in 5:(n.days.period[[p]]-5)){ + sequ[i] <- TRUE + if(wrdiff[i] != 0 & wrdiff[i+1] != 0) sequ[i] = FALSE + if(wrdiff[i] != 0 & wrdiff[i+2] != 0) sequ[i] = FALSE + if(wrdiff[i] != 0 & wrdiff[i+3] != 0) sequ[i] = FALSE + if(wrdiff[i] != 0 & wrdiff[i+4] != 0) sequ[i] = FALSE + if(wrdiff[i-1] != 0 & wrdiff[i] != 0) sequ[i] = FALSE + if(wrdiff[i-1] != 0 & wrdiff[i+1] != 0) sequ[i] = FALSE + if(wrdiff[i-1] != 0 & wrdiff[i+2] != 0) sequ[i] = FALSE + if(wrdiff[i-1] != 0 & wrdiff[i+3] != 0) sequ[i] = FALSE + if(wrdiff[i-2] != 0 & wrdiff[i] != 0) sequ[i] = FALSE + if(wrdiff[i-2] != 0 & wrdiff[i+1] != 0) sequ[i] = FALSE + if(wrdiff[i-2] != 0 & wrdiff[i+2] != 0) sequ[i] = FALSE + if(wrdiff[i-3] != 0 & wrdiff[i] != 0) sequ[i] = FALSE + if(wrdiff[i-3] != 0 & wrdiff[i+1] != 0) sequ[i] = FALSE + if(wrdiff[i-4] != 0 & wrdiff[i] != 0) sequ[i] = FALSE + } # close for loop on i + + # sequence of daily cluster numbers without the days that doesn't belong to sequences of 5 or more days: + cluster.sequence <- my.cluster[[p]]$cluster * sequ + rm(wrdiff, sequ) + } + + # Replace the days belonging to a regime with the days belonging to a sequence of at least 5 days of that regime: + wr1 <- which(cluster.sequence == 1) + wr2 <- which(cluster.sequence == 2) + wr3 <- which(cluster.sequence == 3) + wr4 <- which(cluster.sequence == 4) + + # yearly frequency of each regime: + wr1y <- wr2y <- wr3y <-wr4y <- c() + np <- n.days.in.a.period(p,2001) + + for(y in year.start:year.end){ + # for both reanalysis and S4, the time order is always: year1day1, year1day2, ..., year1day31, year2day1, year2day2, ..., year2day28, etc, + wr1y[y-year.start+1] <- length(which(cluster.sequence[(y-year.start)*np + (1:np)] == 1)) + wr2y[y-year.start+1] <- length(which(cluster.sequence[(y-year.start)*np + (1:np)] == 2)) + wr3y[y-year.start+1] <- length(which(cluster.sequence[(y-year.start)*np + (1:np)] == 3)) + wr4y[y-year.start+1] <- length(which(cluster.sequence[(y-year.start)*np + (1:np)] == 4)) + } + + # convert to frequencies in %: + wr1y <- wr1y/np + wr2y <- wr2y/np + wr3y <- wr3y/np + wr4y <- wr4y/np + + gc() + pslmat <- unname(acast(psl.melted, Year + Day ~ Lat ~ Lon)) + gc() + + pslwr1 <- pslmat[wr1,,, drop=F] + pslwr2 <- pslmat[wr2,,, drop=F] + pslwr3 <- pslmat[wr3,,, drop=F] + pslwr4 <- pslmat[wr4,,, drop=F] + + pslwr1mean <- apply(pslwr1, c(2,3), mean, na.rm=T) + pslwr2mean <- apply(pslwr2, c(2,3), mean, na.rm=T) + pslwr3mean <- apply(pslwr3, c(2,3), mean, na.rm=T) + pslwr4mean <- apply(pslwr4, c(2,3), mean, na.rm=T) + + rm(pslwr1,pslwr2,pslwr3,pslwr4) + + # save all the data necessary to redraw the graphs when we know the right regime: + save(my.cluster, lon, lon.max, lon.min, lat, lat.max, lat.min, pos.lat, pos.lon, n.pos.lat, n.pos.lon, psl, psl.name, my.period, p, workdir, rean.name, fields.name, year.start, year.end, n.years, WR.period, pslwr1, pslwr2, pslwr3, pslwr4, pslwr1mean, pslwr2mean, pslwr3mean, pslwr4mean, wr1y,wr2y,wr3y,wr4y,file=paste0(workdir,"/",fields.name,"_",my.period[p],"_psl.RData")) + + rm(pslwr1mean,pslwr2mean,pslwr3mean,pslwr4mean) + gc() + + } + + if(fields.name == ECMWF_S4.name){ + cat("Computing regime anomalies and frequencies. Please wait...\n") + + #load(file=paste0(workdir,"/",fields.name,"_",my.period[p],"_leadmonth_",lead.month,"_ClusterData.RData")) + cluster.sequence <- my.cluster[[p]]$cluster #as.vector(my.cluster.array[[p]]) + + wr1 <- which(cluster.sequence == 1) + wr2 <- which(cluster.sequence == 2) + wr3 <- which(cluster.sequence == 3) + wr4 <- which(cluster.sequence == 4) + + wr1y <- apply(my.cluster.array[[p]] == 1, 2, sum) + wr2y <- apply(my.cluster.array[[p]] == 2, 2, sum) + wr3y <- apply(my.cluster.array[[p]] == 3, 2, sum) + wr4y <- apply(my.cluster.array[[p]] == 4, 2, sum) + + # convert to frequencies in %: + wr1y <- wr1y/(num.leadtimes*n.members) # in this case, n.leadtimes is the number of days of one month of the cluser time series + wr2y <- wr2y/(num.leadtimes*n.members) + wr3y <- wr3y/(num.leadtimes*n.members) + wr4y <- wr4y/(num.leadtimes*n.members) + + # same as above, but to measure the maximum frequence between all the members: + wr1yMax <- apply(apply(my.cluster.array[[p]] == 1, c(2,3), sum),1,max) + wr2yMax <- apply(apply(my.cluster.array[[p]] == 2, c(2,3), sum),1,max) + wr3yMax <- apply(apply(my.cluster.array[[p]] == 3, c(2,3), sum),1,max) + wr4yMax <- apply(apply(my.cluster.array[[p]] == 4, c(2,3), sum),1,max) + + wr1yMax <- wr1yMax/num.leadtimes + wr2yMax <- wr2yMax/num.leadtimes + wr3yMax <- wr3yMax/num.leadtimes + wr4yMax <- wr4yMax/num.leadtimes + + wr1yMin <- apply(apply(my.cluster.array[[p]] == 1, c(2,3), sum),1,min) + wr2yMin <- apply(apply(my.cluster.array[[p]] == 2, c(2,3), sum),1,min) + wr3yMin <- apply(apply(my.cluster.array[[p]] == 3, c(2,3), sum),1,min) + wr4yMin <- apply(apply(my.cluster.array[[p]] == 4, c(2,3), sum),1,min) + + wr1yMin <- wr1yMin/num.leadtimes + wr2yMin <- wr2yMin/num.leadtimes + wr3yMin <- wr3yMin/num.leadtimes + wr4yMin <- wr4yMin/num.leadtimes + + cat("Computing regime anomalies and frequencies. Please wait......\n") + + # in this case, must use psl.melted instead of psleuFull. Both are already in anomalies: + # (psl.melted depends on the startdate and leadtime, psleuFull no) + gc() + pslmat <- unname(acast(psl.melted, Member + StartYear + LeadDay ~ Lat ~ Lon)) + #pslmat <- unname(acast(melt(pslPeriod[1,1:2,,,,, drop=FALSE],varnames=c("Exp","Member","StartYear","LeadDay","Lat","Lon")), Member ~ StartYear + LeadDay ~ Lat ~ Lon)) + gc() + + #for(a in 1:2){ + # for(b in 1:3){ + # for(c in 1:4){ + # pslmat[1, a + (b-1)*a + (c-1)*a*b,,] <- pslPeriod[1,a,b,c,,] + # } + # } + #} + + pslwr1 <- pslmat[wr1,,, drop=F] + pslwr2 <- pslmat[wr2,,, drop=F] + pslwr3 <- pslmat[wr3,,, drop=F] + pslwr4 <- pslmat[wr4,,, drop=F] + + pslwr1mean <- apply(pslwr1, c(2,3), mean, na.rm=T) + pslwr2mean <- apply(pslwr2, c(2,3), mean, na.rm=T) + pslwr3mean <- apply(pslwr3, c(2,3), mean, na.rm=T) + pslwr4mean <- apply(pslwr4, c(2,3), mean, na.rm=T) + + rm(pslwr1,pslwr2,pslwr3,pslwr4) + rm(pslmat) + gc() + + # save all the data necessary to draw the graphs (but not the impact maps) + save(my.cluster, my.cluster.array, lon, lon.max, lat, pos.lat, pos.lon, n.pos.lat, n.pos.lon, psl, psl.name, my.period, p, workdir,rean.name,fields.name, year.start,year.end, years, n.years, n.years.full, WR.period, pslwr1, pslwr2, pslwr3, pslwr4, pslwr1mean, pslwr2mean, pslwr3mean, pslwr4mean, wr1y, wr2y, wr3y, wr4y, wr1yMax, wr2yMax, wr3yMax, wr4yMax, wr1yMin, wr2yMin, wr3yMin, wr4yMin, n.leadtimes, num.leadtimes, file=paste0(workdir,"/",fields.name,"_",my.period[p],"_leadmonth_",lead.month,"_psl.RData")) + + rm(pslwr1mean,pslwr2mean,pslwr3mean,pslwr4mean) + gc() + + } + + # for the monthly system, the time order is always: year1day1, year2day1, ... , yearNday1, year1day2, year2day2, ..., yearNday2, etc.: + if(fields.name == ECMWF_monthly.name) { + if(fields.name == ECMWF_monthly.name){ + my.startdates.period <- months.period(forecast.year,mes,day,p) + + #if(p == 13) my.startdates.period <- my.startdates.period[-2] # remove the second startdate because it is broken + + days.period <- period.length <- list() + for(startdate in my.startdates.period){ + days.period[[p]] <- c(days.period[[p]], (1+(startdate-1)*(year.end-year.start+1)):(startdate*(year.end-year.start+1))) + } + period.length[[p]] <- length(my.startdates.period) # number of Thusdays in the period + } + + wr1y <- wr2y <- wr3y <-wr4y <- c() + wr1y[y-year.start+1] <- length(which(cluster.sequence[seq((y-year.start+1),length(cluster.sequence), n.years)] == 1)) + wr2y[y-year.start+1] <- length(which(cluster.sequence[seq((y-year.start+1),length(cluster.sequence), n.years)] == 2)) + wr3y[y-year.start+1] <- length(which(cluster.sequence[seq((y-year.start+1),length(cluster.sequence), n.years)] == 3)) + wr4y[y-year.start+1] <- length(which(cluster.sequence[seq((y-year.start+1),length(cluster.sequence), n.years)] == 4)) + } + + cat("Finished!\n") +} # close the for loop on 'p' + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/old/weather_regimes_v36.R b/old/weather_regimes_v36.R new file mode 100644 index 0000000000000000000000000000000000000000..5dd91263221988a9c0479815a8877cec83ed3d9f --- /dev/null +++ b/old/weather_regimes_v36.R @@ -0,0 +1,776 @@ + +# Creation: 6/2016 +# Author: Nicola Cortesi +# Aim: To compute the four Euro-Atlantic weather regimes from a reanalysis or from a forecast system +# I/O: input must be in a format compatible with Load(); 1 output .RData file is created in the 'workdir' folder. +# Assumption: its output are needed by the scripts weather_regimes_impact.R and weather_regimes_maps.R +# Branch: weather_regimes + +rm(list=ls()) +library(s2dverification) # for the function Load() +library(abind) +library(reshape2) # after each undate of s2dverification, it's better to remove and install this package again because sometimes it gets broken! + +SMP=FALSE # if TRUE, the script is assumed to run on the SMP Machine + +if(SMP){ + source('/gpfs/projects/bsc32/bsc32842/Rfunctions.R') + workdir <- "/gpfs/projects/bsc32/bsc32842/RESILIENCE" # working dir where output files will be generated +} else { + source('/home/Earth/ncortesi/Downloads/scripts/Rfunctions.R') + workdir <- "/scratch/Earth/ncortesi/RESILIENCE/Regimes" # working dir where output files will be generated +} + +# module load NCO # : load it from the terminal before running this script interactively in the SMP machine, if you didn't load it in the .bashrc + +# available reanalysis for the geopotential (g500) or the geopotential height (z500 = g500/9.81) and for var data: +ERAint <- '/esnas/reconstructions/ecmwf/eraint/$STORE_FREQ$_mean/$VAR_NAME$_f6h/$VAR_NAME$_$YEAR$$MONTH$.nc' +ERAint.name <- "ERA-Interim" + +JRA55 <- '/esnas/reconstructions/jma/jra-55/$STORE_FREQ$_mean/$VAR_NAME$_f6h/$VAR_NAME$_$YEAR$$MONTH$.nc' +JRA55.name <- "JRA-55" + +rean <- ERAint #JRA55 #ERAint # choose one of the two above reanalysis from where to load the input psl data + +# available forecast systems for the psl and var data: +#ECMWF_S4 <- list(path = paste0('/esnas/exp/ECMWF/seasonal/0001/s004/m001/$STORE_FREQ$_mean/$VAR_NAME$_f6h/$VAR_NAME$_$START_DATE$.nc')) +#ECMWF_S4 <- '/esnas/exp/ECMWF/seasonal/0001/s004/m001/$STORE_FREQ$_mean/psl_f6h/psl_$START_DATE$.nc' +ECMWF_S4 <- '/esnas/exp/ecmwf/system4_m1/$STORE_FREQ$_mean/$VAR_NAME$_f6h/$VAR_NAME$_$START_DATE$.nc' +ECMWF_S4.name <- "ECMWF-S4" +member.name <- "ensemble" # name of the dimension with the ensemble members inside the netCDF files of S4 + +ECMWF_monthly <- '/esnas/exp/ECMWF/monthly/ensforhc/6hourly/$VAR_NAME$/2014$MONTH$$DAY$00/$VAR_NAME$_$START_DATE$00.nc' +ECMWF_monthly.name <- "ECMWF-Monthly" + +forecast <- ECMWF_S4 + +fields <- rean # put 'rean' to load pressure fields and var from reanalisis, or put 'forecast' to load them from forecast systems + +psl <- "g500" #"psl" #"g500" # pressure variable to use from the chosen reanalysis +psl.name <- "Z500" #"SLP" # "Z500" # the name to show in the maps + +year.start <- 1981 #1982 #1981 #1994 #1979 #1981 +year.end <- 2015 #2013 #2010 + +missing.forecasts <- FALSE # set it to TRUE if there can be missing hindcasts files in the forecast psl data, so it will load only the available years and deals with NA +res <- 0.75 # set the resolution you want to interpolate the psl and var data when loading it (i.e: set to 0.75 to use the same res of ERA-Interim) + # interpolation method is BILINEAR. + +running.cluster <- TRUE # add to the clustering also the daily SLP data of the two closer months to the month to use +lat.weighting <- TRUE # set it to true to weight psl data on latitude before applying the cluster analysis +sequences <-FALSE # set it to TRUE if you want to select only days beloning to sequences of at least 5 consecutive days belonging to the same regime. + +PCA <- FALSE # set it to TRUE if you want to apply the PCA before the k-means cluster analysis, FALSE otherwise +variance.explained <- 0.8 # the user can set here the minimum % of variance explained by the PCs (typically 0.8 which is equal to 80%) + +# Coordinates of the box enclosing the study region (the Northern Atlantic in this case): +lat.max <- 81 #81 #80 #70 +lat.min <- 27 #30 #20 #30 +lon.max <- 45 #36 #30 #40 +lon.min <- 274.5 #274.5 #270 #280 # put a positive number here because the geopotential has only positive vaues of longitud! + +# Only for reanalysis: +WR.period <- 1:12 #13:16 # 1-12: Jan-Dec; 13-16: Winter-Spring-Summer-Autumn [bypassed by the optional argument of the script] + +# Only for Seasonal forecasts: +startM <- 1 # start month (1=January, 12=December) [bypassed by the optional arguments of the script] +leadM <- 1 # select only the data of one lead month: [bypassed by the optional arguments of the script] +n.members <- 15 # number of members of the forecast system to load +n.leadtimes <- 216 # number of leadtimes of the forecast system to load + +# Only for Monthly forecasts: +#leadtime <- 1:7 #5:11 #1 # if fields=forecast, set the forecast time (in days) you want to compute the weather regimes. +#startdate.shift <- 1 # if leadtime=5:11, it's better to shift all startdates of the season 1 week in the past, to fit the exact season of obs.data + # if leadtime=12:18, it's better to shift all startdates of the season 2 weeks in the past, and so on. +#forecast.year <- year.end+1 # if fields=forecast, set the forecast year (it is usually the year after year.end) +#mes=1 # if fields=forecast, set the month of the first startdate of the forecast year +#day=2 # if fields=forecast, set the day of the first startdate of the forecast year + +############################################################################################################################# +rean.name <- unname(ifelse(rean == ERAint, ERAint.name, JRA55.name)) +forecast.name <- unname(ifelse(forecast == ECMWF_S4, ECMWF_S4.name, ECMWF_monthly.name)) +fields.name <- unname(ifelse(fields == rean, rean.name, forecast.name)) +n.years <- year.end - year.start + 1 + +# in case the script is run with one argument, it is assumed a reanalysis is being used and the argument becomes the month we want to perform the clustering; +# in case the script is run with two arguments, it is assumed a forecast is used and the first argument represents the startmonth and the second one the leadmonth. +# in case the script is run with no arguments, the values of the variables inside the script are used: +script.arg <- as.integer(commandArgs(TRUE)) + +if(length(script.arg) == 0){ + start.month <- startM + #WR.period <- start.month + if(fields.name == forecast.name) lead.month <- leadM +} + +# in case the script is run with 1 argument, it is assumed you are using a reanalysis: +if(length(script.arg) == 1){ + fields <- rean + fields.name <- rean.name + WR.period <- script.arg[1] +} + +if(length(script.arg) >= 2){ + fields <- forecast + fields.name <- forecast.name + start.month <- script.arg[1] + lead.month <- script.arg[2] + WR.period <- start.month +} + +if(length(script.arg) == 3){ # in this case, we are running the script in the SMP machine and we need to override the variables below: + source('/gpfs/projects/bsc32/bsc32842/Rfunctions.R') # for the calendar function + workdir <- "/gpfs/projects/bsc32/bsc32842/RESILIENCE" # working dir +} + +if(length(script.arg) >= 2) {lead.comment <- paste0("Lead month: ", lead.month)} else {lead.comment <- ""} +cat(paste0("Field: ", fields.name, " Period: ", WR.period, " ", lead.comment, "\n")) + +if(lat.min >= lat.max) stop("lat.min cannot be greater than lat.max") + +#days.period <- n.days.period <- period.length <- list() +#for (pp in 1:17){ +# days.period[[pp]] <- NA +# for(y in year.start:year.end) days.period[[pp]] <- c(days.period[[pp]], n.days.in.a.future.year(year.start, y) + pos.period(y,pp)) +# days.period[[pp]] <- days.period[[pp]][-1] # remove the NA at the begin that was introduced only to be able to execute the above command +# # number of days belonging to that period from year.start to year.end: +# n.days.period[[pp]] <- length(days.period[[pp]]) +# # Number of days belonging to that period in a single year: +# period.length[[pp]] <- n.days.in.a.period(pp,1999) #+ ifelse(period==13 | period==2, 1, 0) # using year 1999 introduce a small error in winter season length because of #bisestile years +#} + +# Create a regular lat/lon grid to interpolate the data to the chosen res: (Notice that the regular grid is always centered at lat=0 and lon=0!) +n.lon.grid <- 360/res +n.lat.grid <- (180/res)+1 +my.grid <- paste0('r',n.lon.grid,'x',n.lat.grid) +#domain<-list() +#domain$lon <- seq(0,359.999999999,res) +#domain$lat <- c(rev(seq(0,90,res)),seq(res[1],-90,-res)) + +cat("Loading lat/lon. Please wait...\n") +# load only 1 day of pressure data to detect the minimum and maximum lat and lon values which identify only the North Atlantic area: +if(fields.name == rean.name) domain <- Load(var = psl, exp = NULL, obs = list(list(path=fields)), '19990101', storefreq = 'daily', leadtimemax = 1, output = 'lonlat', nprocs=1) + +### Real test: +### lat <- +### domain <- Load(var = "psl", exp = list(list(path="/esnas/exp/ecmwf/system4_m1/$STORE_FREQ$_mean/$VAR_NAME$_f6h/$VAR_NAME$_$START_DATE$.nc")), obs=NULL, sdates=paste0(1982:2011,'0101'), storefreq = 'daily', leadtimemax=n.leadtimes, nmember=n.members, output = 'lonlat', latmin = lat[chunk], latmax = lat[chunk+2], nprocs=1) + +### if (fields.name="pippo"){ + +# in the case of S4, we also interpolate it to the same resolution of the reanalysis: +# (notice that if the S4 grid has the same grid of the chosen grid (typically ERA-Interim), Load() leaves the S4 grid unchanged) +if(fields.name == ECMWF_S4.name) domain <- Load(var = psl, exp = list(list(path=fields)), obs=NULL, sdates=paste0(year.start,'0101'), storefreq = 'daily', dimnames=list(member=member.name), leadtimemax=1, nmember=1, output = 'lonlat', grid=my.grid, method='bilinear', nprocs=1) + +#domain <- Load(var = "psl", exp = list(list(path="/esnas/exp/ecmwf/system4_m1/$STORE_FREQ$_mean/$VAR_NAME$_f6h/$VAR_NAME$_$START_DATE$.nc")), obs=NULL, sdates='19820101', storefreq = 'daily', leadtimemax=1, nmember=1, output = 'lonlat', grid='r480x241', lonmax=100) + +if(fields.name == ECMWF_monthly.name) domain <- Load(var = psl, exp = NULL, obs = list(list(path=fields)), paste0(year.start,'0102'), storefreq = 'daily', leadtimemax = 1, output = 'lonlat', nprocs=1) + +n.lon <- length(domain$lon) +n.lat <- length(domain$lat) + +pos.lat.max <- which(lat.max - round(domain$lat,4) >= 0)[1] # select the first latitude of the domain below the maximum latitude +pos.lat.min <- tail(which(round(domain$lat,4) - lat.min >= 0),1) # select the last latitude of the domain over the minumum latitude +pos.lon.max <- tail(which(lon.max - round(domain$lon,4) >= 0),1) +pos.lon.min <- which(round(domain$lon,4) - lon.min >= 0)[1] + +pos.lat <- if(pos.lat.min < pos.lat.max) {pos.lat.min:pos.lat.max} else {pos.lat.max:pos.lat.min} +pos.lon <- c(1:pos.lon.max,pos.lon.min:length(domain$lon)) # beware that it only consider the case where the study area cross the meridian of Greenwhich! +n.pos.lat <- length(pos.lat) +n.pos.lon <- length(pos.lon) + +lat <- domain$lat[pos.lat] # lat values of chosen area only (it is smaller than the whole spatial domain loaded by the data) +#if (lat[2] < lat[1]) lat <- rev(lat) # reverse lat values if they are in decreasing order +lon <- domain$lon[pos.lon] # lon values of chosen area only + +#lat.min.area <- domain$lat[pos.lat.min] # min and max lat/lon of the chosen area only (same as lat.max, but its values correspond to actual values in the lat vector) +#lat.max.area <- domain$lat[pos.lat.max] +#lon.min.area <- domain$lon[pos.lon.min] +#lon.max.area <- domain$lon[pos.lon.max] + +#rm(domain) + +# Load psl data (interpolating it to the same reanlaysis grid, if necessary): +cat("Loading data. Please wait...\n") + +if(fields.name == rean.name){ # Load daily psl data of all years in the reanalysis case: + psleuFull <-array(NA,c(1,1,n.years,365, n.pos.lat, n.pos.lon)) + + #for (y in year.start:year.end){ + # var <- Load(var = psl, exp = NULL, obs = list(list(path=fields)), paste0(y,'0101'), storefreq = 'daily', leadtimemax = n.days.in.a.year(y), output = 'lonlat', + # latmin = lat.min, latmax = lat.max, lonmin = lon.min, lonmax = lon.max, nprocs=1) + # psleuFull[seq.days.in.a.future.year(year.start, y),,] <- var$obs + # rm(var) + # gc() + #} + + psleuFull366 <- Load(var = psl, exp = NULL, obs = list(list(path=fields)), paste0(year.start:year.end,'0101'), storefreq = 'daily', leadtimemax = 366, output = 'lonlat', latmin = lat.min, latmax = lat.max, lonmin = lon.min, lonmax = lon.max)$obs + + # remove bisestile days to have all arrays with the same dimensions: + cat("Removing bisestile days. Please wait...\n") + psleuFull[,,,,,] <- psleuFull366[,,,1:365,,] + + for(y in year.start:year.end){ + y <- y - year.start + 1 + if(n.days.in.a.year(y) == 366) psleuFull[,,y,60:365,,] <- psleuFull366[,,y,61:366,,] # take the march to december period removing the 29th of February + } + + rm(psleuFull366) + gc() + + # convert psl in daily anomalies with the LOESS filter: + cat("Calculating anomalies. Please wait...\n") + pslPeriodClim <- apply(psleuFull, c(1,2,4,5,6), mean, na.rm=T) + + pslPeriodClimLoess <- pslPeriodClim + for(i in n.pos.lat){ + for(j in n.pos.lon){ + my.data <- data.frame(ens.mean=pslPeriodClim[1,1,,i,j], day=1:365) + my.loess <- loess(ens.mean ~ day, my.data, span=0.35) + pslPeriodClimLoess[1,1,,i,j] <- predict(my.loess) + } + } + + rm(pslPeriodClim) + gc() + + pslPeriodClim2 <- InsertDim(pslPeriodClimLoess, 3, n.years) + + ## s4.data <- data.frame(s4=pslPeriodClim[1,], day=1:216) + ## s4.loess <- loess(s4 ~ day, s4.data, span=0.35) + ## s4.pred <- predict(s4.loess) + + psleuFull <- psleuFull - pslPeriodClim2 + rm(pslPeriodClim2) + gc() + +} + +if(fields.name == ECMWF_S4.name){ # in the forecast case, we load only the data for 1 start month at time and all the lead times (since each month needs ~3 GB of memory) + if(start.month <= 9) {start.month.char <- paste0("0",as.character(start.month))} else {start.month.char <- start.month} + + fields2 <- gsub("\\$STORE_FREQ\\$","daily",fields) + fields3 <- gsub("\\$VAR_NAME\\$",psl,fields2) + + if(missing.forecasts == TRUE){ + years <- c() + for(y in year.start:year.end){ + fields4 <- gsub("\\$START_DATE\\$",paste0(y, start.month.char,'01'),fields3) + + char.lead <- nchar(system(paste0("ncks -m ",fields4,"| grep -E -i 'psl size' | cut -d '*' -f1"), intern=T)) + # beware, in this way it can only take into account leadtimes from 100 to 999, but it is the only way in the SMP machine: + #num.lead <- as.integer(substr(system(paste0("ncks -m ",fields4,"| grep -E -i 'psl size' | cut -d '*' -f1"), intern=T),char.lead-3,char.lead)) + num.lead <- as.integer(system(paste0("ncks -m ",fields4,"| grep -E -i 'psl size' | cut -d '=' -f2 | cut -d '*' -f1"), intern=T)) # don't work well on the SMP + num.memb <- as.integer(system(paste0("ncks -m ",fields4,"| grep -E -i 'psl size' | cut -d '=' -f2 | cut -d '*' -f2"), intern=T)) + num.lat <- as.integer(system(paste0("ncks -m ",fields4,"| grep -E -i 'psl size' | cut -d '=' -f2 | cut -d '*' -f3"), intern=T)) + num.lon <- as.integer(system(paste0("ncks -m ",fields4,"| grep -E -i 'psl size' | cut -d '=' -f2 | cut -d '*' -f4"), intern=T)) + + #if(num.lead == n.leadtimes && num.memb >= n.members && num.lat == 181 && num.lon == 360) years <- c(years,y) + if(num.lead == n.leadtimes && num.memb >= n.members) years <- c(years,y) + } + + } else { years <- year.start:year.end } + + n.years.full <- length(years) + + psleuFull <- Load(var = psl, exp = list(list(path=fields)), obs = NULL, sdates=paste0(years, start.month.char,'01'), dimnames=list(member=member.name), nmember=n.members, leadtimemax=n.leadtimes, storefreq='daily', output = 'lonlat', latmin = lat.min, latmax = lat.max, lonmin = lon.min, lonmax = lon.max, grid=my.grid, method='bilinear')$mod + + # convert psl in daily anomalies with the LOESS filter: + cat("Calculating anomalies. Please wait...\n") + pslPeriodClim <- apply(psleuFull, c(1,4,5,6), mean, na.rm=T) + pslPeriodClimLoess <- pslPeriodClim + for(i in n.pos.lat){ + for(j in n.pos.lon){ + my.data <- data.frame(ens.mean=pslPeriodClim[1,,i,j], day=1:n.leadtimes) + my.loess <- loess(ens.mean ~ day, my.data, span=0.35) + pslPeriodClimLoess[1,,i,j] <- predict(my.loess) + } + } + + rm(pslPeriodClim) + gc() + + pslPeriodClim2 <- InsertDim(InsertDim(pslPeriodClimLoess, 2, n.years.full), 2, n.members) + + ## s4.data <- data.frame(s4=pslPeriodClim[1,], day=1:216) + ## s4.loess <- loess(s4 ~ day, s4.data, span=0.35) + ## s4.pred <- predict(s4.loess) + + psleuFull <- psleuFull - pslPeriodClim2 + rm(pslPeriodClim2) + gc() + +} + +if(fields.name == ECMWF_monthly.name){ + # Load 6-hourly psl data in the forecast case: + psleuFull <-array(NA, c(n.sdates*n.years, n.leadtimes, n.pos.lat, n.pos.lon)) # daily order: 19940102 19950102 ... 20130102 19940109 19950109 ... 20130109 ... + n.leadtimes <- length(leadtime) + sdates <- weekly.seq(forecast.year,mes,day) + n.sdates <- length(sdates) + + for (startdate in 1:n.sdates){ + for(lday in leadtime){ + var <- Load(var = psl, exp = NULL, obs = list(list(path=fields)), paste0(year.start:year.end, substr(sdates[startdate],5,6), substr(sdates[startdate],7,8) ), storefreq = 'daily', leadtimemin = lday*4-3, leadtimemax = lday*4, output = 'lonlat', latmin = lat.min, latmax = lat.max, lonmin = lon.min, lonmax = lon.max) + + mean.lday <- apply(var$obs, c(3,5,6), mean, na.rm=T) + rm(var) + gc() + + psleuFull[(1+(startdate-1)*n.years):(startdate*n.years), lday-leadtime[1]+1,,] <- mean.lday + rm(mean.lday) + gc() + } + } +} + +if(psl=="g500") psleuFull <- psleuFull/9.81 # convert geopotential to geopotential height (in m) +if(psl=="psl") psleuFull <- psleuFull/100 # convert MSLP in Pascal to MSLP in hPa +gc() + +#save.image(file=paste0(workdir,"/weather_regimes_",fields.name,"_",year.start,"-",year.end,".RData")) +#load(file=paste0(workdir,"/weather_regimes_",fields.name,"_",year.start,"-",year.end,".RData")) + +# compute the cluster analysis: +my.PCA <- list() +my.cluster <- list() +my.cluster.array <- list() +tot.variance <- list() +n.pcs <- my.cluster <- c() + +#WR.period=1 # for the debug +for(p in WR.period){ + # Select only the data of the month/season we want: + if(fields.name == rean.name){ + + #pslPeriod <- psleuFull[days.period[[p]],,] # select only days in the chosen period (i.e: winter) + if(running.cluster == TRUE && p < 13) { + pslPeriod <- psleuFull[1,1,,pos.month.extended(2001,p),,] # select only days in the period of 3 months centered on the target month p + } else { + pslPeriod <- psleuFull[1,1,,pos.period(2001,p),,] # select only days in the chosen period (i.e: winter) + } + + # weight the pressure fields based on latitude (apply it to anomalies, not to absolute values!): + if(lat.weighting){ + lat.weighted <- cos(lat*pi/180)^0.5 + lat.weighted.array <- InsertDim(InsertDim(InsertDim(lat.weighted,2,n.pos.lon), 1, length(pos.month.extended(2001,p))), 1, n.years) + pslPeriod <- pslPeriod * lat.weighted.array + rm(lat.weighted.array) + gc() + } + + gc() + cat("Preformatting data for clustering. Please wait...\n") + psl.melted <- melt(pslPeriod[,,,, drop=FALSE], varnames=c("Year","Day","Lat","Lon")) + gc() + cat("Preformatting data for clustering. Please wait......\n") + + # this function eats much memory!!! + psl.kmeans <- unname(acast(psl.melted, Year + Day ~ Lat + Lon)) + #gc() + + # select period data from psleuFull to use it as input of the cluster analysis: + #psl.kmeans <- pslPeriod + #dim(psl.kmeans) <- c(n.days.period[[p]], n.pos.lat*n.pos.lon) # convert array in a matrix! + #rm(pslPeriod, pslPeriod.weighted) + } + + # in case of S4 we don't need to select anything because we already loaded only 1 month of data! + if(fields.name == ECMWF_S4.name){ + + # select data only for the startmonth and leadmonth to study: + cat("Extracting data. Please wait...\n") + chosen.month <- start.month + lead.month # find which month we want to select data + if(chosen.month > 12) chosen.month <- chosen.month - 12 + + i=0 + for(y in years){ + i=i+1 + leadtime.min <- 1 + n.days.in.a.monthly.period(start.month, chosen.month, y) - n.days.in.a.month(chosen.month, y) + leadtime.max <- leadtime.min + n.days.in.a.month(chosen.month, y) - 1 + if (n.days.in.a.month(chosen.month, y) == 29) leadtime.max <- leadtime.max - 1 # remove the 29th of February to have the same n. of elements for all years + int.leadtimes <- leadtime.min:leadtime.max + num.leadtimes <- leadtime.max - leadtime.min + 1 # number of days in the chosen month (different from 'n.leadtimes',which is the total number of days in the file!) + # by construction it is equal to: n.days.in.a.month(chosen.month, y) + if(y == years[1]) { + pslPeriod <- psleuFull[,,1,int.leadtimes,,,drop=FALSE] + } else { + pslPeriod <- unname(abind(pslPeriod, psleuFull[,,i,int.leadtimes,,,drop=FALSE], along=3)) + } + } + + # weight the pressure fields based on latitude (apply it to anomalies, not to absolute values!): + if(lat.weighting){ + lat.weighted <- cos(lat*pi/180)^0.5 + lat.weighted.array <- InsertDim(InsertDim(InsertDim(lat.weighted,2,n.pos.lon), 1, length(pos.month.extended(2001,p))), 1, n.years) + pslPeriod <- pslPeriod * lat.weighted.array ######### adapt!!!!!!!!!!! + rm(lat.weighted.array) + gc() + } + + # note that the data order in psl.kmeans[,X] is: year1day1, year1day2, ... , year1day31, year2day1, year2day2, ... , year2day28 + gc() + cat("Preformatting data. Please wait...\n") + psl.melted <- melt(pslPeriod[1,,,,,, drop=FALSE], varnames=c("Exp","Member","StartYear","LeadDay","Lat","Lon")) + gc() + cat("Preformatting data. Please wait......\n") + + # this function eats much memory!!! + psl.kmeans <- unname(acast(psl.melted, Member + StartYear + LeadDay ~ Lat + Lon)) + #gc() + + #psl.kmeans <- array(NA,c(dim(pslPeriod))) + #n.rows <- nrow(psl.melted) + #a <- psl.melted$Member + #b <- psl.melted$StartYear + #c <- psl.melted$LeadDay + #d <- psl.melted$Lat + #e <- psl.melted$Lon + #f <- psl.melted$value + + # this function to convert a data.frame in an array is much less memory-eater than acast but a bit slower: + #for(i in 1:n.rows) psl.kmeans[1,a[i],b[i],c[i],d[i],e[i]] <- f[i] + gc() + #rm(pslPeriod); gc() + } + + if(fields.name == ECMWF_monthly.name){ + my.startdates.period <- months.period(forecast.year,mes,day,p) # get which startdates belong to the chosen period (remember that there are no bisestile day left!) + + days.period <- list() # get which days inside psleuFull belong to the chosen period + for(startdate in my.startdates.period){ + days.period[[p]] <- c(days.period[[p]], (1+(startdate-1)*n.years):(startdate*n.years)) + } + + # in winter you must remove the 2nd startdate (20140109) because its data is bad, as you can see with: plot(psl.kmeans[,1:2]) + # if(fields.name == forecast.name && period == 13) psl.kmeans[21:40,] <- NA + } + + + # compute the clustering: + cat("Clustering data. Please wait...\n") + if(PCA){ + my.seq <- seq(1, n.pos.lat*n.pos.lon, 16) # select only 1 point of 9 + pslcut <- psl.kmeans[,my.seq] + + my.PCA[[p]] <- princomp(pslcut,cor=FALSE) + tot.variance[[p]] <- head(cumsum(my.PCA[[p]]$sdev^2/sum(my.PCA[[p]]$sdev^2)),50) # check how many PCAs to keep basing on the sum of their expl. variance + n.pcs[p] <- head(as.numeric(which(tot.variance[[p]] > variance.explained)),1) # select only the pcs that explains at least 80% of variance + my.cluster[[p]] <- kmeans(my.PCA[[p]]$scores[,1:n.pcs[p]], centers=4, iter.max=100, nstart=30) # 4 is the number of clusters + rm(pslcut) + } else { # 4 is the number of clusters: + + my.cluster <- kmeans(psl.kmeans, centers=4, iter.max=100, nstart=30, trace=FALSE) + + #if(fields.name == rean.name){ + #my.cluster[[p]] <- kmeans(psl.kmeans[which(!is.na(psl.kmeans[,1])),], centers=4, iter.max=100, nstart=30, trace=FALSE) + # save the time series output of the cluster analysis: + #save(my.cluster, my.cluster.array, my.PCA, tot.variance, n.pcs, file=paste0(workdir,"/",fields.name,"_",my.period[p],"_series.RData")) + #} + + if(fields.name == ECMWF_S4.name) { + my.cluster.array <- array(my.cluster$cluster, c(num.leadtimes, n.years.full, n.members)) # in reverse order compared to the definition of psl.kmeans + + # save the time series output of the cluster analysis: + #save(my.cluster, my.cluster.array, my.PCA, tot.variance, n.pcs, file=paste0(workdir,"/",fields.name,"_",my.period[p],"_leadmonth_",lead.month,"_series.RData")) + + #plot(SMA(my.cluster$centers[1,],201),type="l",col="gold",ylim=c(-20,20));lines(SMA(my.cluster$centers[2,],201),type="l",col="red"); lines(SMA(my.cluster$centers[3,],201),type="l",col="green");lines(SMA(my.cluster$centers[4,],201),type="l",col="blue");lines(SMA(psl.kmeans[29,],201),type="l",col="gray");lines(rep(0,14410),type="l",col="black",lty=2) + } + } + + rm(psl.kmeans) + gc() + +#} # close for on 'p' + +#save.image(file=paste0(workdir,"/weather_regimes_",fields.name,"_",year.start,"-",year.end,".RData")) +#load(file=paste0(workdir,"/weather_regimes_",fields.name,"_",year.start,"-",year.end,".RData")) + +#my.grid<-paste0('r',n.lon,'x',n.lat) +#coord <- Load(var = 'sfcWind', exp = list(ECMWF_monthly), obs = NULL, sdates=paste0(2013,'0102'), leadtimemin = 1, leadtimemax=1, output = 'lonlat', nprocs=1, grid=my.grid, method='bilinear') + +# measure regime anomalies: + +#for(p in WR.period){ # 1-12: Jan-Dec; 13-16: Winter-Spring-Summer-Autumn; 17: Year + + if(fields.name == rean.name){ + + if (running.cluster == TRUE && p < 13){ + n.days.period <- length(pos.month.extended(2001,p)) + + days.month <- days.month.new <- days.month.full <- c() + days.month <- which(pos.month.extended(2001,p) == pos.month(2001,p)[1])-1 + 1:length(pos.month(2001,p)) + + for(y in 1:n.years){ + days.month.new <- days.month + n.days.period*(y-1) + days.month.full <- c(days.month.full, days.month.new) + #print(days.month.new) + #print(days.month) + } + + cluster.sequence <- my.cluster$cluster[days.month.full] # select only the days of the cluster series inside the target month p + + } else { + + cluster.sequence <- my.cluster$cluster + + } + + if(sequences){ # it works only for rean data!!! + # for each of the 4 clusters, remove the days not belonging to sequences of at least 5 days: + # warning: first 4 days and last 4 days are not take into account y the algorithm and are treated as not belonging to any sequence (sequ=FALSE) + wrdiff <- diff(my.cluster$cluster) + + sequ <- rep(FALSE, n.days.period[[p]]) + for(i in 5:(n.days.period[[p]]-5)){ + sequ[i] <- TRUE + if(wrdiff[i] != 0 & wrdiff[i+1] != 0) sequ[i] = FALSE + if(wrdiff[i] != 0 & wrdiff[i+2] != 0) sequ[i] = FALSE + if(wrdiff[i] != 0 & wrdiff[i+3] != 0) sequ[i] = FALSE + if(wrdiff[i] != 0 & wrdiff[i+4] != 0) sequ[i] = FALSE + if(wrdiff[i-1] != 0 & wrdiff[i] != 0) sequ[i] = FALSE + if(wrdiff[i-1] != 0 & wrdiff[i+1] != 0) sequ[i] = FALSE + if(wrdiff[i-1] != 0 & wrdiff[i+2] != 0) sequ[i] = FALSE + if(wrdiff[i-1] != 0 & wrdiff[i+3] != 0) sequ[i] = FALSE + if(wrdiff[i-2] != 0 & wrdiff[i] != 0) sequ[i] = FALSE + if(wrdiff[i-2] != 0 & wrdiff[i+1] != 0) sequ[i] = FALSE + if(wrdiff[i-2] != 0 & wrdiff[i+2] != 0) sequ[i] = FALSE + if(wrdiff[i-3] != 0 & wrdiff[i] != 0) sequ[i] = FALSE + if(wrdiff[i-3] != 0 & wrdiff[i+1] != 0) sequ[i] = FALSE + if(wrdiff[i-4] != 0 & wrdiff[i] != 0) sequ[i] = FALSE + } # close for loop on i + + # sequence of daily cluster numbers without the days that doesn't belong to sequences of 5 or more days: + cluster.sequence <- my.cluster$cluster * sequ + rm(wrdiff, sequ) + } + + # Replace the days belonging to a regime with the days belonging to a sequence of at least 5 days of that regime: + wr1 <- which(cluster.sequence == 1) + wr2 <- which(cluster.sequence == 2) + wr3 <- which(cluster.sequence == 3) + wr4 <- which(cluster.sequence == 4) + + # yearly frequency of each regime: + wr1y <- wr2y <- wr3y <-wr4y <- c() + np <- n.days.in.a.period(p,2001) + + for(y in year.start:year.end){ + # for both reanalysis and S4, the time order is always: year1day1, year1day2, ..., year1day31, year2day1, year2day2, ..., year2day28, etc, + wr1y[y-year.start+1] <- length(which(cluster.sequence[(y-year.start)*np + (1:np)] == 1)) + wr2y[y-year.start+1] <- length(which(cluster.sequence[(y-year.start)*np + (1:np)] == 2)) + wr3y[y-year.start+1] <- length(which(cluster.sequence[(y-year.start)*np + (1:np)] == 3)) + wr4y[y-year.start+1] <- length(which(cluster.sequence[(y-year.start)*np + (1:np)] == 4)) + } + + # convert to frequencies in %: + wr1y <- wr1y/np + wr2y <- wr2y/np + wr3y <- wr3y/np + wr4y <- wr4y/np + + gc() + + pslmat <- unname(acast(psl.melted, Year + Day ~ Lat ~ Lon)) + + pslmat.new <- pslmat + pslmat <- pslmat.new[days.month.full,,] + gc() + + pslwr1 <- pslmat[wr1,,, drop=F] + pslwr2 <- pslmat[wr2,,, drop=F] + pslwr3 <- pslmat[wr3,,, drop=F] + pslwr4 <- pslmat[wr4,,, drop=F] + + pslwr1mean <- apply(pslwr1, c(2,3), mean, na.rm=T) + pslwr2mean <- apply(pslwr2, c(2,3), mean, na.rm=T) + pslwr3mean <- apply(pslwr3, c(2,3), mean, na.rm=T) + pslwr4mean <- apply(pslwr4, c(2,3), mean, na.rm=T) + + # spatial mean for each day to save for the meaure of the FairRPSS: + pslwr1spmean <- apply(pslwr1, 1, mean, na.rm=T) + pslwr2spmean <- apply(pslwr2, 1, mean, na.rm=T) + pslwr3spmean <- apply(pslwr3, 1, mean, na.rm=T) + pslwr4spmean <- apply(pslwr4, 1, mean, na.rm=T) + + # save all the data necessary to redraw the graphs when we know the right regime: + save(my.cluster, lon, lon.max, lon.min, lat, lat.max, lat.min, pos.lat, pos.lon, n.pos.lat, n.pos.lon, psl, psl.name, my.period, p, workdir, rean.name, fields.name, year.start, year.end, n.years, WR.period, pslwr1mean, pslwr2mean, pslwr3mean, pslwr4mean, pslwr1spmean, pslwr2spmean, pslwr3spmean, pslwr4spmean, wr1y,wr2y,wr3y,wr4y,file=paste0(workdir,"/",fields.name,"_",my.period[p],"_psl.RData")) + + # just to plot the ERA-Interim running cluster monthly maps: + EU <- c(1:which(lon >= lon.max)[1], (which(lon >= 337)[1]:length(lon))) # restrict area to continental europe only + if(psl == "g500"){ + my.brks <- c(-300,seq(-200,200,10),300) # % Mean anomaly of a WR + my.brks2 <- c(-300,seq(-200,200,10),300) # % Mean anomaly of a WR for the contour lines + values.to.plot <- c(-200, -100, 0, 100, 200) # values we want to show in the labels + } + + if(psl == "psl"){ + my.brks <- c(seq(-19,-1,2),0,seq(1,19,2)) # % Mean anomaly of a WR + my.brks2 <- my.brks #c(seq(-20,20,2)) # % Mean anomaly of a WR for the contour lines + values.to.plot <- my.brks #c(-17,-15,-13,-11,-9,-7,-5,-3,-1,0,1,3,5,7,9,11,13,15,17) + } + my.cols <- colorRampPalette(rev(brewer.pal(11,"RdBu")))(length(my.brks)-1) # blue--white--red colors + my.cols[floor(length(my.cols)/2)] <- my.cols[floor(length(my.cols)/2)+1] <- "white" + + map1 <- pslwr1mean + map2 <- pslwr2mean + map3 <- pslwr3mean + map4 <- pslwr4mean + png(filename=paste0(workdir,"/",fields.name,"_",my.period[p],"_cluster1.png"),width=3000,height=3700) + PlotEquiMap2(rescale(map1,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map1), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, xlabel.dist=1.5, contours.lty="F1FF1F") + dev.off() + png(filename=paste0(workdir,"/",fields.name,"_",my.period[p],"_cluster2.png"),width=3000,height=3700) + PlotEquiMap2(rescale(map2,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map2), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F") + dev.off() + png(filename=paste0(workdir,"/",fields.name,"_",my.period[p],"_cluster3.png"),width=3000,height=3700) + PlotEquiMap2(rescale(map3,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map3), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F") + dev.off() + png(filename=paste0(workdir,"/",fields.name,"_",my.period[p],"_cluster4.png"),width=3000,height=3700) + PlotEquiMap2(rescale(map4,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map4), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F") + dev.off() + + + + rm(pslwr1mean,pslwr2mean,pslwr3mean,pslwr4mean) + rm(pslwr1,pslwr2,pslwr3,pslwr4) + gc() + + } + + if(fields.name == ECMWF_S4.name){ + cat("Computing regime anomalies and frequencies. Please wait...\n") + + #load(file=paste0(workdir,"/",fields.name,"_",my.period[p],"_leadmonth_",lead.month,"_ClusterData.RData")) + + cluster.sequence <- my.cluster$cluster #as.vector(my.cluster.array) + + wr1 <- which(cluster.sequence == 1) + wr2 <- which(cluster.sequence == 2) + wr3 <- which(cluster.sequence == 3) + wr4 <- which(cluster.sequence == 4) + + wr1y <- apply(my.cluster.array == 1, 2, sum) + wr2y <- apply(my.cluster.array == 2, 2, sum) + wr3y <- apply(my.cluster.array == 3, 2, sum) + wr4y <- apply(my.cluster.array == 4, 2, sum) + + # convert to frequencies in %: + wr1y <- wr1y/(num.leadtimes*n.members) # in this case, n.leadtimes is the number of days of one month of the cluser time series + wr2y <- wr2y/(num.leadtimes*n.members) + wr3y <- wr3y/(num.leadtimes*n.members) + wr4y <- wr4y/(num.leadtimes*n.members) + + # same as above, but to measure the maximum frequence between all the members: + wr1yMax <- apply(apply(my.cluster.array == 1, c(2,3), sum),1,max) + wr2yMax <- apply(apply(my.cluster.array == 2, c(2,3), sum),1,max) + wr3yMax <- apply(apply(my.cluster.array == 3, c(2,3), sum),1,max) + wr4yMax <- apply(apply(my.cluster.array == 4, c(2,3), sum),1,max) + + wr1yMax <- wr1yMax/num.leadtimes + wr2yMax <- wr2yMax/num.leadtimes + wr3yMax <- wr3yMax/num.leadtimes + wr4yMax <- wr4yMax/num.leadtimes + + wr1yMin <- apply(apply(my.cluster.array == 1, c(2,3), sum),1,min) + wr2yMin <- apply(apply(my.cluster.array == 2, c(2,3), sum),1,min) + wr3yMin <- apply(apply(my.cluster.array == 3, c(2,3), sum),1,min) + wr4yMin <- apply(apply(my.cluster.array == 4, c(2,3), sum),1,min) + + wr1yMin <- wr1yMin/num.leadtimes + wr2yMin <- wr2yMin/num.leadtimes + wr3yMin <- wr3yMin/num.leadtimes + wr4yMin <- wr4yMin/num.leadtimes + + cat("Computing regime anomalies and frequencies. Please wait......\n") + + # in this case, must use psl.melted instead of psleuFull. Both are already in anomalies: + # (psl.melted depends on the startdate and leadtime, psleuFull no) + gc() + pslmat <- unname(acast(psl.melted, Member + StartYear + LeadDay ~ Lat ~ Lon)) + #pslmat <- unname(acast(melt(pslPeriod[1,1:2,,,,, drop=FALSE],varnames=c("Exp","Member","StartYear","LeadDay","Lat","Lon")), Member ~ StartYear + LeadDay ~ Lat ~ Lon)) + gc() + + #for(a in 1:2){ + # for(b in 1:3){ + # for(c in 1:4){ + # pslmat[1, a + (b-1)*a + (c-1)*a*b,,] <- pslPeriod[1,a,b,c,,] + # } + # } + #} + + pslwr1 <- pslmat[wr1,,, drop=F] + pslwr2 <- pslmat[wr2,,, drop=F] + pslwr3 <- pslmat[wr3,,, drop=F] + pslwr4 <- pslmat[wr4,,, drop=F] + + pslwr1mean <- apply(pslwr1, c(2,3), mean, na.rm=T) + pslwr2mean <- apply(pslwr2, c(2,3), mean, na.rm=T) + pslwr3mean <- apply(pslwr3, c(2,3), mean, na.rm=T) + pslwr4mean <- apply(pslwr4, c(2,3), mean, na.rm=T) + + # spatial mean for each day to save for the meaure of the FairRPSS: + pslwr1spmean <- apply(pslwr1, 1, mean, na.rm=T) + pslwr2spmean <- apply(pslwr2, 1, mean, na.rm=T) + pslwr3spmean <- apply(pslwr3, 1, mean, na.rm=T) + pslwr4spmean <- apply(pslwr4, 1, mean, na.rm=T) + + rm(pslmat) + gc() + + # save all the data necessary to draw the graphs (but not the impact maps) + save(my.cluster, my.cluster.array, lon, lon.max, lat, pos.lat, pos.lon, n.pos.lat, n.pos.lon, psl, psl.name, my.period, p, workdir,rean.name,fields.name, year.start,year.end, years, n.years, n.years.full, WR.period, pslwr1mean, pslwr2mean, pslwr3mean, pslwr4mean, pslwr1spmean, pslwr2spmean, pslwr3spmean, pslwr4spmean, wr1y, wr2y, wr3y, wr4y, wr1yMax, wr2yMax, wr3yMax, wr4yMax, wr1yMin, wr2yMin, wr3yMin, wr4yMin, n.leadtimes, num.leadtimes, file=paste0(workdir,"/",fields.name,"_",my.period[p],"_leadmonth_",lead.month,"_psl.RData")) + + rm(pslwr1mean,pslwr2mean,pslwr3mean,pslwr4mean) + rm(pslwr1,pslwr2,pslwr3,pslwr4) + gc() + + } + + # for the monthly system, the time order is always: year1day1, year2day1, ... , yearNday1, year1day2, year2day2, ..., yearNday2, etc.: + if(fields.name == ECMWF_monthly.name) { + if(fields.name == ECMWF_monthly.name){ + my.startdates.period <- months.period(forecast.year,mes,day,p) + + #if(p == 13) my.startdates.period <- my.startdates.period[-2] # remove the second startdate because it is broken + + days.period <- period.length <- list() + for(startdate in my.startdates.period){ + days.period[[p]] <- c(days.period[[p]], (1+(startdate-1)*(year.end-year.start+1)):(startdate*(year.end-year.start+1))) + } + period.length[[p]] <- length(my.startdates.period) # number of Thusdays in the period + } + + wr1y <- wr2y <- wr3y <-wr4y <- c() + wr1y[y-year.start+1] <- length(which(cluster.sequence[seq((y-year.start+1),length(cluster.sequence), n.years)] == 1)) + wr2y[y-year.start+1] <- length(which(cluster.sequence[seq((y-year.start+1),length(cluster.sequence), n.years)] == 2)) + wr3y[y-year.start+1] <- length(which(cluster.sequence[seq((y-year.start+1),length(cluster.sequence), n.years)] == 3)) + wr4y[y-year.start+1] <- length(which(cluster.sequence[seq((y-year.start+1),length(cluster.sequence), n.years)] == 4)) + } + + cat("Finished!\n") +} # close the for loop on 'p' + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/old/weather_regimes_v36.R~ b/old/weather_regimes_v36.R~ new file mode 100644 index 0000000000000000000000000000000000000000..5e3acdbcab910e9698d69fcdbc7e307f0003c25b --- /dev/null +++ b/old/weather_regimes_v36.R~ @@ -0,0 +1,768 @@ + +# Creation: 6/2016 +# Author: Nicola Cortesi +# Aim: To compute the four Euro-Atlantic weather regimes from a reanalysis or from a forecast system +# I/O: input must be in a format compatible with Load(); 1 output .RData file is created in the 'workdir' folder. +# Assumption: its output are needed by the scripts weather_regimes_impact.R and weather_regimes_maps.R +# Branch: weather_regimes + +rm(list=ls()) +library(s2dverification) # for the function Load() +library(abind) +library(reshape2) # after each undate of s2dverification, it's better to remove and install this package again because sometimes it gets broken! + +SMP=FALSE # if TRUE, the script is assumed to run on the SMP Machine + +if(SMP){ + source('/gpfs/projects/bsc32/bsc32842/Rfunctions.R') + workdir <- "/gpfs/projects/bsc32/bsc32842/RESILIENCE" # working dir where output files will be generated +} else { + source('/home/Earth/ncortesi/Downloads/scripts/Rfunctions.R') + workdir <- "/scratch/Earth/ncortesi/RESILIENCE/Regimes" # working dir where output files will be generated +} + +# module load NCO # : load it from the terminal before running this script interactively in the SMP machine, if you didn't load it in the .bashrc + +# available reanalysis for the geopotential (g500) or the geopotential height (z500 = g500/9.81) and for var data: +ERAint <- '/esnas/reconstructions/ecmwf/eraint/$STORE_FREQ$_mean/$VAR_NAME$_f6h/$VAR_NAME$_$YEAR$$MONTH$.nc' +ERAint.name <- "ERA-Interim" + +JRA55 <- '/esnas/reconstructions/jma/jra-55/$STORE_FREQ$_mean/$VAR_NAME$_f6h/$VAR_NAME$_$YEAR$$MONTH$.nc' +JRA55.name <- "JRA-55" + +rean <- ERAint #JRA55 #ERAint # choose one of the two above reanalysis from where to load the input psl data + +# available forecast systems for the psl and var data: +#ECMWF_S4 <- list(path = paste0('/esnas/exp/ECMWF/seasonal/0001/s004/m001/$STORE_FREQ$_mean/$VAR_NAME$_f6h/$VAR_NAME$_$START_DATE$.nc')) +#ECMWF_S4 <- '/esnas/exp/ECMWF/seasonal/0001/s004/m001/$STORE_FREQ$_mean/psl_f6h/psl_$START_DATE$.nc' +ECMWF_S4 <- '/esnas/exp/ecmwf/system4_m1/$STORE_FREQ$_mean/$VAR_NAME$_f6h/$VAR_NAME$_$START_DATE$.nc' +ECMWF_S4.name <- "ECMWF-S4" +member.name <- "ensemble" # name of the dimension with the ensemble members inside the netCDF files of S4 + +ECMWF_monthly <- '/esnas/exp/ECMWF/monthly/ensforhc/6hourly/$VAR_NAME$/2014$MONTH$$DAY$00/$VAR_NAME$_$START_DATE$00.nc' +ECMWF_monthly.name <- "ECMWF-Monthly" + +forecast <- ECMWF_S4 + +fields <- rean # put 'rean' to load pressure fields and var from reanalisis, or put 'forecast' to load them from forecast systems + +psl <- "psl" #"g500" # pressure variable to use from the chosen reanalysis +psl.name <- "SLP" # "Z500" # the name to show in the maps + +year.start <- 1981 #1982 #1981 #1994 #1979 #1981 +year.end <- 2015 #2013 #2010 + +missing.forecasts <- FALSE # set it to TRUE if there can be missing hindcasts files in the forecast psl data, so it will load only the available years and deals with NA +res <- 0.75 # set the resolution you want to interpolate the psl and var data when loading it (i.e: set to 0.75 to use the same res of ERA-Interim) + # interpolation method is BILINEAR. + +running.cluster <- TRUE # add to the clustering also the daily SLP data of the two closer months to the month to use +lat.weighting <- FALSE # set it to true to weight psl data on latitude before applying the cluster analysis +sequences <-FALSE # set it to TRUE if you want to select only days beloning to sequences of at least 5 consecutive days belonging to the same regime. + +PCA <- FALSE # set it to TRUE if you want to apply the PCA before the k-means cluster analysis, FALSE otherwise +variance.explained <- 0.8 # the user can set here the minimum % of variance explained by the PCs (typically 0.8 which is equal to 80%) + +# Coordinates of the box enclosing the study region (the Northern Atlantic in this case): +lat.max <- 81 #81 #80 #70 +lat.min <- 27 #30 #20 #30 +lon.max <- 45 #36 #30 #40 +lon.min <- 274.5 #274.5 #270 #280 # put a positive number here because the geopotential has only positive vaues of longitud! + +# Only for reanalysis: +WR.period <- 12 #1:12 #13:16 # 1-12: Jan-Dec; 13-16: Winter-Spring-Summer-Autumn [bypassed by the optional argument of the script] + +# Only for Seasonal forecasts: +startM <- 1 # start month (1=January, 12=December) [bypassed by the optional arguments of the script] +leadM <- 1 # select only the data of one lead month: [bypassed by the optional arguments of the script] +n.members <- 15 # number of members of the forecast system to load +n.leadtimes <- 216 # number of leadtimes of the forecast system to load + +# Only for Monthly forecasts: +#leadtime <- 1:7 #5:11 #1 # if fields=forecast, set the forecast time (in days) you want to compute the weather regimes. +#startdate.shift <- 1 # if leadtime=5:11, it's better to shift all startdates of the season 1 week in the past, to fit the exact season of obs.data + # if leadtime=12:18, it's better to shift all startdates of the season 2 weeks in the past, and so on. +#forecast.year <- year.end+1 # if fields=forecast, set the forecast year (it is usually the year after year.end) +#mes=1 # if fields=forecast, set the month of the first startdate of the forecast year +#day=2 # if fields=forecast, set the day of the first startdate of the forecast year + +############################################################################################################################# +rean.name <- unname(ifelse(rean == ERAint, ERAint.name, JRA55.name)) +forecast.name <- unname(ifelse(forecast == ECMWF_S4, ECMWF_S4.name, ECMWF_monthly.name)) +fields.name <- unname(ifelse(fields == rean, rean.name, forecast.name)) +n.years <- year.end - year.start + 1 + +# in case the script is run with one argument, it is assumed a reanalysis is being used and the argument becomes the month we want to perform the clustering; +# in case the script is run with two arguments, it is assumed a forecast is used and the first argument represents the startmonth and the second one the leadmonth. +# in case the script is run with no arguments, the values of the variables inside the script are used: +script.arg <- as.integer(commandArgs(TRUE)) + +if(length(script.arg) == 0){ + start.month <- startM + #WR.period <- start.month + if(fields.name == forecast.name) lead.month <- leadM +} + +# in case the script is run with 1 argument, it is assumed you are using a reanalysis: +if(length(script.arg) == 1){ + fields <- rean + fields.name <- rean.name + WR.period <- script.arg[1] +} + +if(length(script.arg) >= 2){ + fields <- forecast + fields.name <- forecast.name + start.month <- script.arg[1] + lead.month <- script.arg[2] + WR.period <- start.month +} + +if(length(script.arg) == 3){ # in this case, we are running the script in the SMP machine and we need to override the variables below: + source('/gpfs/projects/bsc32/bsc32842/Rfunctions.R') # for the calendar function + workdir <- "/gpfs/projects/bsc32/bsc32842/RESILIENCE" # working dir +} + +if(length(script.arg) >= 2) {lead.comment <- paste0("Lead month: ", lead.month)} else {lead.comment <- ""} +cat(paste0("Field: ", fields.name, " Period: ", WR.period, " ", lead.comment, "\n")) + +if(lat.min >= lat.max) stop("lat.min cannot be greater than lat.max") + +#days.period <- n.days.period <- period.length <- list() +#for (pp in 1:17){ +# days.period[[pp]] <- NA +# for(y in year.start:year.end) days.period[[pp]] <- c(days.period[[pp]], n.days.in.a.future.year(year.start, y) + pos.period(y,pp)) +# days.period[[pp]] <- days.period[[pp]][-1] # remove the NA at the begin that was introduced only to be able to execute the above command +# # number of days belonging to that period from year.start to year.end: +# n.days.period[[pp]] <- length(days.period[[pp]]) +# # Number of days belonging to that period in a single year: +# period.length[[pp]] <- n.days.in.a.period(pp,1999) #+ ifelse(period==13 | period==2, 1, 0) # using year 1999 introduce a small error in winter season length because of #bisestile years +#} + +# Create a regular lat/lon grid to interpolate the data to the chosen res: (Notice that the regular grid is always centered at lat=0 and lon=0!) +n.lon.grid <- 360/res +n.lat.grid <- (180/res)+1 +my.grid <- paste0('r',n.lon.grid,'x',n.lat.grid) +#domain<-list() +#domain$lon <- seq(0,359.999999999,res) +#domain$lat <- c(rev(seq(0,90,res)),seq(res[1],-90,-res)) + +cat("Loading lat/lon. Please wait...\n") +# load only 1 day of pressure data to detect the minimum and maximum lat and lon values which identify only the North Atlantic area: +if(fields.name == rean.name) domain <- Load(var = psl, exp = NULL, obs = list(list(path=fields)), '19990101', storefreq = 'daily', leadtimemax = 1, output = 'lonlat', nprocs=1) + +### Real test: +### lat <- +### domain <- Load(var = "psl", exp = list(list(path="/esnas/exp/ecmwf/system4_m1/$STORE_FREQ$_mean/$VAR_NAME$_f6h/$VAR_NAME$_$START_DATE$.nc")), obs=NULL, sdates=paste0(1982:2011,'0101'), storefreq = 'daily', leadtimemax=n.leadtimes, nmember=n.members, output = 'lonlat', latmin = lat[chunk], latmax = lat[chunk+2], nprocs=1) + +### if (fields.name="pippo"){ + +# in the case of S4, we also interpolate it to the same resolution of the reanalysis: +# (notice that if the S4 grid has the same grid of the chosen grid (typically ERA-Interim), Load() leaves the S4 grid unchanged) +if(fields.name == ECMWF_S4.name) domain <- Load(var = psl, exp = list(list(path=fields)), obs=NULL, sdates=paste0(year.start,'0101'), storefreq = 'daily', dimnames=list(member=member.name), leadtimemax=1, nmember=1, output = 'lonlat', grid=my.grid, method='bilinear', nprocs=1) + +#domain <- Load(var = "psl", exp = list(list(path="/esnas/exp/ecmwf/system4_m1/$STORE_FREQ$_mean/$VAR_NAME$_f6h/$VAR_NAME$_$START_DATE$.nc")), obs=NULL, sdates='19820101', storefreq = 'daily', leadtimemax=1, nmember=1, output = 'lonlat', grid='r480x241', lonmax=100) + +if(fields.name == ECMWF_monthly.name) domain <- Load(var = psl, exp = NULL, obs = list(list(path=fields)), paste0(year.start,'0102'), storefreq = 'daily', leadtimemax = 1, output = 'lonlat', nprocs=1) + +n.lon <- length(domain$lon) +n.lat <- length(domain$lat) + +pos.lat.max <- which(lat.max - round(domain$lat,4) >= 0)[1] # select the first latitude of the domain below the maximum latitude +pos.lat.min <- tail(which(round(domain$lat,4) - lat.min >= 0),1) # select the last latitude of the domain over the minumum latitude +pos.lon.max <- tail(which(lon.max - round(domain$lon,4) >= 0),1) +pos.lon.min <- which(round(domain$lon,4) - lon.min >= 0)[1] + +pos.lat <- if(pos.lat.min < pos.lat.max) {pos.lat.min:pos.lat.max} else {pos.lat.max:pos.lat.min} +pos.lon <- c(1:pos.lon.max,pos.lon.min:length(domain$lon)) # beware that it only consider the case where the study area cross the meridian of Greenwhich! +n.pos.lat <- length(pos.lat) +n.pos.lon <- length(pos.lon) + +lat <- domain$lat[pos.lat] # lat values of chosen area only (it is smaller than the whole spatial domain loaded by the data) +#if (lat[2] < lat[1]) lat <- rev(lat) # reverse lat values if they are in decreasing order +lon <- domain$lon[pos.lon] # lon values of chosen area only + +#lat.min.area <- domain$lat[pos.lat.min] # min and max lat/lon of the chosen area only (same as lat.max, but its values correspond to actual values in the lat vector) +#lat.max.area <- domain$lat[pos.lat.max] +#lon.min.area <- domain$lon[pos.lon.min] +#lon.max.area <- domain$lon[pos.lon.max] + +#rm(domain) + +# Load psl data (interpolating it to the same reanlaysis grid, if necessary): +cat("Loading data. Please wait...\n") + +if(fields.name == rean.name){ # Load daily psl data of all years in the reanalysis case: + psleuFull <-array(NA,c(1,1,n.years,365, n.pos.lat, n.pos.lon)) + + #for (y in year.start:year.end){ + # var <- Load(var = psl, exp = NULL, obs = list(list(path=fields)), paste0(y,'0101'), storefreq = 'daily', leadtimemax = n.days.in.a.year(y), output = 'lonlat', + # latmin = lat.min, latmax = lat.max, lonmin = lon.min, lonmax = lon.max, nprocs=1) + # psleuFull[seq.days.in.a.future.year(year.start, y),,] <- var$obs + # rm(var) + # gc() + #} + + psleuFull366 <- Load(var = psl, exp = NULL, obs = list(list(path=fields)), paste0(year.start:year.end,'0101'), storefreq = 'daily', leadtimemax = 366, output = 'lonlat', latmin = lat.min, latmax = lat.max, lonmin = lon.min, lonmax = lon.max)$obs + + # remove bisestile days to have all arrays with the same dimensions: + cat("Removing bisestile days. Please wait...\n") + psleuFull[,,,,,] <- psleuFull366[,,,1:365,,] + + for(y in year.start:year.end){ + y <- y - year.start + 1 + if(n.days.in.a.year(y) == 366) psleuFull[,,y,60:365,,] <- psleuFull366[,,y,61:366,,] # take the march to december period removing the 29th of February + } + + rm(psleuFull366) + gc() + + # convert psl in daily anomalies with the LOESS filter: + cat("Calculating anomalies. Please wait...\n") + pslPeriodClim <- apply(psleuFull, c(1,2,4,5,6), mean, na.rm=T) + + pslPeriodClimLoess <- pslPeriodClim + for(i in n.pos.lat){ + for(j in n.pos.lon){ + my.data <- data.frame(ens.mean=pslPeriodClim[1,1,,i,j], day=1:365) + my.loess <- loess(ens.mean ~ day, my.data, span=0.35) + pslPeriodClimLoess[1,1,,i,j] <- predict(my.loess) + } + } + + rm(pslPeriodClim) + gc() + + pslPeriodClim2 <- InsertDim(pslPeriodClimLoess, 3, n.years) + + ## s4.data <- data.frame(s4=pslPeriodClim[1,], day=1:216) + ## s4.loess <- loess(s4 ~ day, s4.data, span=0.35) + ## s4.pred <- predict(s4.loess) + + psleuFull <- psleuFull - pslPeriodClim2 + rm(pslPeriodClim2) + gc() + +} + +if(fields.name == ECMWF_S4.name){ # in the forecast case, we load only the data for 1 start month at time and all the lead times (since each month needs ~3 GB of memory) + if(start.month <= 9) {start.month.char <- paste0("0",as.character(start.month))} else {start.month.char <- start.month} + + fields2 <- gsub("\\$STORE_FREQ\\$","daily",fields) + fields3 <- gsub("\\$VAR_NAME\\$",psl,fields2) + + if(missing.forecasts == TRUE){ + years <- c() + for(y in year.start:year.end){ + fields4 <- gsub("\\$START_DATE\\$",paste0(y, start.month.char,'01'),fields3) + + char.lead <- nchar(system(paste0("ncks -m ",fields4,"| grep -E -i 'psl size' | cut -d '*' -f1"), intern=T)) + # beware, in this way it can only take into account leadtimes from 100 to 999, but it is the only way in the SMP machine: + #num.lead <- as.integer(substr(system(paste0("ncks -m ",fields4,"| grep -E -i 'psl size' | cut -d '*' -f1"), intern=T),char.lead-3,char.lead)) + num.lead <- as.integer(system(paste0("ncks -m ",fields4,"| grep -E -i 'psl size' | cut -d '=' -f2 | cut -d '*' -f1"), intern=T)) # don't work well on the SMP + num.memb <- as.integer(system(paste0("ncks -m ",fields4,"| grep -E -i 'psl size' | cut -d '=' -f2 | cut -d '*' -f2"), intern=T)) + num.lat <- as.integer(system(paste0("ncks -m ",fields4,"| grep -E -i 'psl size' | cut -d '=' -f2 | cut -d '*' -f3"), intern=T)) + num.lon <- as.integer(system(paste0("ncks -m ",fields4,"| grep -E -i 'psl size' | cut -d '=' -f2 | cut -d '*' -f4"), intern=T)) + + #if(num.lead == n.leadtimes && num.memb >= n.members && num.lat == 181 && num.lon == 360) years <- c(years,y) + if(num.lead == n.leadtimes && num.memb >= n.members) years <- c(years,y) + } + + } else { years <- year.start:year.end } + + n.years.full <- length(years) + + psleuFull <- Load(var = psl, exp = list(list(path=fields)), obs = NULL, sdates=paste0(years, start.month.char,'01'), dimnames=list(member=member.name), nmember=n.members, leadtimemax=n.leadtimes, storefreq='daily', output = 'lonlat', latmin = lat.min, latmax = lat.max, lonmin = lon.min, lonmax = lon.max, grid=my.grid, method='bilinear')$mod + + # convert psl in daily anomalies with the LOESS filter: + cat("Calculating anomalies. Please wait...\n") + pslPeriodClim <- apply(psleuFull, c(1,4,5,6), mean, na.rm=T) + pslPeriodClimLoess <- pslPeriodClim + for(i in n.pos.lat){ + for(j in n.pos.lon){ + my.data <- data.frame(ens.mean=pslPeriodClim[1,,i,j], day=1:n.leadtimes) + my.loess <- loess(ens.mean ~ day, my.data, span=0.35) + pslPeriodClimLoess[1,,i,j] <- predict(my.loess) + } + } + + rm(pslPeriodClim) + gc() + + pslPeriodClim2 <- InsertDim(InsertDim(pslPeriodClimLoess, 2, n.years.full), 2, n.members) + + ## s4.data <- data.frame(s4=pslPeriodClim[1,], day=1:216) + ## s4.loess <- loess(s4 ~ day, s4.data, span=0.35) + ## s4.pred <- predict(s4.loess) + + psleuFull <- psleuFull - pslPeriodClim2 + rm(pslPeriodClim2) + gc() + +} + +if(fields.name == ECMWF_monthly.name){ + # Load 6-hourly psl data in the forecast case: + psleuFull <-array(NA, c(n.sdates*n.years, n.leadtimes, n.pos.lat, n.pos.lon)) # daily order: 19940102 19950102 ... 20130102 19940109 19950109 ... 20130109 ... + n.leadtimes <- length(leadtime) + sdates <- weekly.seq(forecast.year,mes,day) + n.sdates <- length(sdates) + + for (startdate in 1:n.sdates){ + for(lday in leadtime){ + var <- Load(var = psl, exp = NULL, obs = list(list(path=fields)), paste0(year.start:year.end, substr(sdates[startdate],5,6), substr(sdates[startdate],7,8) ), storefreq = 'daily', leadtimemin = lday*4-3, leadtimemax = lday*4, output = 'lonlat', latmin = lat.min, latmax = lat.max, lonmin = lon.min, lonmax = lon.max) + + mean.lday <- apply(var$obs, c(3,5,6), mean, na.rm=T) + rm(var) + gc() + + psleuFull[(1+(startdate-1)*n.years):(startdate*n.years), lday-leadtime[1]+1,,] <- mean.lday + rm(mean.lday) + gc() + } + } +} + +if(psl=="g500") psleuFull <- psleuFull/9.81 # convert geopotential to geopotential height (in m) +if(psl=="psl") psleuFull <- psleuFull/100 # convert MSLP in Pascal to MSLP in hPa +gc() + +#save.image(file=paste0(workdir,"/weather_regimes_",fields.name,"_",year.start,"-",year.end,".RData")) +#load(file=paste0(workdir,"/weather_regimes_",fields.name,"_",year.start,"-",year.end,".RData")) + +# compute the cluster analysis: +my.PCA <- list() +my.cluster <- list() +my.cluster.array <- list() +tot.variance <- list() +n.pcs <- my.cluster <- c() + +#WR.period=1 # for the debug +for(p in WR.period){ + # Select only the data of the month/season we want: + if(fields.name == rean.name){ + + #pslPeriod <- psleuFull[days.period[[p]],,] # select only days in the chosen period (i.e: winter) + if(running.cluster == TRUE && p < 13) { + pslPeriod <- psleuFull[1,1,,pos.month.extended(2001,p),,] # select only days in the period of 3 months centered on the target month p + } else { + pslPeriod <- psleuFull[1,1,,pos.period(2001,p),,] # select only days in the chosen period (i.e: winter) + } + + # weight the pressure fields based on latitude (apply it to anomalies, not to absolute values!): + if(lat.weighting){ + lat.weighted <- cos(lat*pi/180)^0.5 + lat.weighted.array <- InsertDim(InsertDim(InsertDim(lat.weighted,2,n.pos.lon), 1, length(pos.month.extended(2001,p))), 1, n.years) + pslPeriod <- pslPeriod * lat.weighted.array + rm(lat.weighted.array) + gc() + } + + gc() + cat("Preformatting data for clustering. Please wait...\n") + psl.melted <- melt(pslPeriod[,,,, drop=FALSE], varnames=c("Year","Day","Lat","Lon")) + gc() + cat("Preformatting data for clustering. Please wait......\n") + + # this function eats much memory!!! + psl.kmeans <- unname(acast(psl.melted, Year + Day ~ Lat + Lon)) + #gc() + + # select period data from psleuFull to use it as input of the cluster analysis: + #psl.kmeans <- pslPeriod + #dim(psl.kmeans) <- c(n.days.period[[p]], n.pos.lat*n.pos.lon) # convert array in a matrix! + #rm(pslPeriod, pslPeriod.weighted) + } + + # in case of S4 we don't need to select anything because we already loaded only 1 month of data! + if(fields.name == ECMWF_S4.name){ + + # select data only for the startmonth and leadmonth to study: + cat("Extracting data. Please wait...\n") + chosen.month <- start.month + lead.month # find which month we want to select data + if(chosen.month > 12) chosen.month <- chosen.month - 12 + + i=0 + for(y in years){ + i=i+1 + leadtime.min <- 1 + n.days.in.a.monthly.period(start.month, chosen.month, y) - n.days.in.a.month(chosen.month, y) + leadtime.max <- leadtime.min + n.days.in.a.month(chosen.month, y) - 1 + if (n.days.in.a.month(chosen.month, y) == 29) leadtime.max <- leadtime.max - 1 # remove the 29th of February to have the same n. of elements for all years + int.leadtimes <- leadtime.min:leadtime.max + num.leadtimes <- leadtime.max - leadtime.min + 1 # number of days in the chosen month (different from 'n.leadtimes',which is the total number of days in the file!) + # by construction it is equal to: n.days.in.a.month(chosen.month, y) + if(y == years[1]) { + pslPeriod <- psleuFull[,,1,int.leadtimes,,,drop=FALSE] + } else { + pslPeriod <- unname(abind(pslPeriod, psleuFull[,,i,int.leadtimes,,,drop=FALSE], along=3)) + } + } + + # weight the pressure fields based on latitude (apply it to anomalies, not to absolute values!): + if(lat.weighting){ + lat.weighted <- cos(lat*pi/180)^0.5 + lat.weighted.array <- InsertDim(InsertDim(InsertDim(lat.weighted,2,n.pos.lon), 1, length(pos.month.extended(2001,p))), 1, n.years) + pslPeriod <- pslPeriod * lat.weighted.array ######### adapt!!!!!!!!!!! + rm(lat.weighted.array) + gc() + } + + # note that the data order in psl.kmeans[,X] is: year1day1, year1day2, ... , year1day31, year2day1, year2day2, ... , year2day28 + gc() + cat("Preformatting data. Please wait...\n") + psl.melted <- melt(pslPeriod[1,,,,,, drop=FALSE], varnames=c("Exp","Member","StartYear","LeadDay","Lat","Lon")) + gc() + cat("Preformatting data. Please wait......\n") + + # this function eats much memory!!! + psl.kmeans <- unname(acast(psl.melted, Member + StartYear + LeadDay ~ Lat + Lon)) + #gc() + + #psl.kmeans <- array(NA,c(dim(pslPeriod))) + #n.rows <- nrow(psl.melted) + #a <- psl.melted$Member + #b <- psl.melted$StartYear + #c <- psl.melted$LeadDay + #d <- psl.melted$Lat + #e <- psl.melted$Lon + #f <- psl.melted$value + + # this function to convert a data.frame in an array is much less memory-eater than acast but a bit slower: + #for(i in 1:n.rows) psl.kmeans[1,a[i],b[i],c[i],d[i],e[i]] <- f[i] + gc() + #rm(pslPeriod); gc() + } + + if(fields.name == ECMWF_monthly.name){ + my.startdates.period <- months.period(forecast.year,mes,day,p) # get which startdates belong to the chosen period (remember that there are no bisestile day left!) + + days.period <- list() # get which days inside psleuFull belong to the chosen period + for(startdate in my.startdates.period){ + days.period[[p]] <- c(days.period[[p]], (1+(startdate-1)*n.years):(startdate*n.years)) + } + + # in winter you must remove the 2nd startdate (20140109) because its data is bad, as you can see with: plot(psl.kmeans[,1:2]) + # if(fields.name == forecast.name && period == 13) psl.kmeans[21:40,] <- NA + } + + + # compute the clustering: + cat("Clustering data. Please wait...\n") + if(PCA){ + my.seq <- seq(1, n.pos.lat*n.pos.lon, 16) # select only 1 point of 9 + pslcut <- psl.kmeans[,my.seq] + + my.PCA[[p]] <- princomp(pslcut,cor=FALSE) + tot.variance[[p]] <- head(cumsum(my.PCA[[p]]$sdev^2/sum(my.PCA[[p]]$sdev^2)),50) # check how many PCAs to keep basing on the sum of their expl. variance + n.pcs[p] <- head(as.numeric(which(tot.variance[[p]] > variance.explained)),1) # select only the pcs that explains at least 80% of variance + my.cluster[[p]] <- kmeans(my.PCA[[p]]$scores[,1:n.pcs[p]], centers=4, iter.max=100, nstart=30) # 4 is the number of clusters + rm(pslcut) + } else { # 4 is the number of clusters: + + my.cluster <- kmeans(psl.kmeans, centers=4, iter.max=100, nstart=30, trace=FALSE) + + #if(fields.name == rean.name){ + #my.cluster[[p]] <- kmeans(psl.kmeans[which(!is.na(psl.kmeans[,1])),], centers=4, iter.max=100, nstart=30, trace=FALSE) + # save the time series output of the cluster analysis: + #save(my.cluster, my.cluster.array, my.PCA, tot.variance, n.pcs, file=paste0(workdir,"/",fields.name,"_",my.period[p],"_series.RData")) + #} + + if(fields.name == ECMWF_S4.name) { + my.cluster.array <- array(my.cluster$cluster, c(num.leadtimes, n.years.full, n.members)) # in reverse order compared to the definition of psl.kmeans + + # save the time series output of the cluster analysis: + #save(my.cluster, my.cluster.array, my.PCA, tot.variance, n.pcs, file=paste0(workdir,"/",fields.name,"_",my.period[p],"_leadmonth_",lead.month,"_series.RData")) + + #plot(SMA(my.cluster$centers[1,],201),type="l",col="gold",ylim=c(-20,20));lines(SMA(my.cluster$centers[2,],201),type="l",col="red"); lines(SMA(my.cluster$centers[3,],201),type="l",col="green");lines(SMA(my.cluster$centers[4,],201),type="l",col="blue");lines(SMA(psl.kmeans[29,],201),type="l",col="gray");lines(rep(0,14410),type="l",col="black",lty=2) + } + } + + rm(psl.kmeans) + gc() + +#} # close for on 'p' + +#save.image(file=paste0(workdir,"/weather_regimes_",fields.name,"_",year.start,"-",year.end,".RData")) +#load(file=paste0(workdir,"/weather_regimes_",fields.name,"_",year.start,"-",year.end,".RData")) + +#my.grid<-paste0('r',n.lon,'x',n.lat) +#coord <- Load(var = 'sfcWind', exp = list(ECMWF_monthly), obs = NULL, sdates=paste0(2013,'0102'), leadtimemin = 1, leadtimemax=1, output = 'lonlat', nprocs=1, grid=my.grid, method='bilinear') + +# measure regime anomalies: + +#for(p in WR.period){ # 1-12: Jan-Dec; 13-16: Winter-Spring-Summer-Autumn; 17: Year + + if(fields.name == rean.name){ + + if (running.cluster == TRUE && p < 13){ + n.days.period <- length(pos.month.extended(2001,p)) + + days.month <- days.month.new <- days.month.full <- c() + days.month <- which(pos.month.extended(2001,p) == pos.month(2001,p)[1])-1 + 1:length(pos.month(2001,p)) + + for(y in 1:n.years){ + days.month.new <- days.month + n.days.period*(y-1) + days.month.full <- c(days.month.full, days.month.new) + #print(days.month.new) + #print(days.month) + } + + cluster.sequence <- my.cluster$cluster[days.month.full] # select only the days of the cluster series inside the target month p + + } else { + + cluster.sequence <- my.cluster$cluster + + } + + if(sequences){ # it works only for rean data!!! + # for each of the 4 clusters, remove the days not belonging to sequences of at least 5 days: + # warning: first 4 days and last 4 days are not take into account y the algorithm and are treated as not belonging to any sequence (sequ=FALSE) + wrdiff <- diff(my.cluster$cluster) + + sequ <- rep(FALSE, n.days.period[[p]]) + for(i in 5:(n.days.period[[p]]-5)){ + sequ[i] <- TRUE + if(wrdiff[i] != 0 & wrdiff[i+1] != 0) sequ[i] = FALSE + if(wrdiff[i] != 0 & wrdiff[i+2] != 0) sequ[i] = FALSE + if(wrdiff[i] != 0 & wrdiff[i+3] != 0) sequ[i] = FALSE + if(wrdiff[i] != 0 & wrdiff[i+4] != 0) sequ[i] = FALSE + if(wrdiff[i-1] != 0 & wrdiff[i] != 0) sequ[i] = FALSE + if(wrdiff[i-1] != 0 & wrdiff[i+1] != 0) sequ[i] = FALSE + if(wrdiff[i-1] != 0 & wrdiff[i+2] != 0) sequ[i] = FALSE + if(wrdiff[i-1] != 0 & wrdiff[i+3] != 0) sequ[i] = FALSE + if(wrdiff[i-2] != 0 & wrdiff[i] != 0) sequ[i] = FALSE + if(wrdiff[i-2] != 0 & wrdiff[i+1] != 0) sequ[i] = FALSE + if(wrdiff[i-2] != 0 & wrdiff[i+2] != 0) sequ[i] = FALSE + if(wrdiff[i-3] != 0 & wrdiff[i] != 0) sequ[i] = FALSE + if(wrdiff[i-3] != 0 & wrdiff[i+1] != 0) sequ[i] = FALSE + if(wrdiff[i-4] != 0 & wrdiff[i] != 0) sequ[i] = FALSE + } # close for loop on i + + # sequence of daily cluster numbers without the days that doesn't belong to sequences of 5 or more days: + cluster.sequence <- my.cluster$cluster * sequ + rm(wrdiff, sequ) + } + + # Replace the days belonging to a regime with the days belonging to a sequence of at least 5 days of that regime: + wr1 <- which(cluster.sequence == 1) + wr2 <- which(cluster.sequence == 2) + wr3 <- which(cluster.sequence == 3) + wr4 <- which(cluster.sequence == 4) + + # yearly frequency of each regime: + wr1y <- wr2y <- wr3y <-wr4y <- c() + np <- n.days.in.a.period(p,2001) + + for(y in year.start:year.end){ + # for both reanalysis and S4, the time order is always: year1day1, year1day2, ..., year1day31, year2day1, year2day2, ..., year2day28, etc, + wr1y[y-year.start+1] <- length(which(cluster.sequence[(y-year.start)*np + (1:np)] == 1)) + wr2y[y-year.start+1] <- length(which(cluster.sequence[(y-year.start)*np + (1:np)] == 2)) + wr3y[y-year.start+1] <- length(which(cluster.sequence[(y-year.start)*np + (1:np)] == 3)) + wr4y[y-year.start+1] <- length(which(cluster.sequence[(y-year.start)*np + (1:np)] == 4)) + } + + # convert to frequencies in %: + wr1y <- wr1y/np + wr2y <- wr2y/np + wr3y <- wr3y/np + wr4y <- wr4y/np + + gc() + + pslmat <- unname(acast(psl.melted, Year + Day ~ Lat ~ Lon)) + + pslmat.new <- pslmat + pslmat <- pslmat.new[days.month.full,,] + gc() + + pslwr1 <- pslmat[wr1,,, drop=F] + pslwr2 <- pslmat[wr2,,, drop=F] + pslwr3 <- pslmat[wr3,,, drop=F] + pslwr4 <- pslmat[wr4,,, drop=F] + + pslwr1mean <- apply(pslwr1, c(2,3), mean, na.rm=T) + pslwr2mean <- apply(pslwr2, c(2,3), mean, na.rm=T) + pslwr3mean <- apply(pslwr3, c(2,3), mean, na.rm=T) + pslwr4mean <- apply(pslwr4, c(2,3), mean, na.rm=T) + + # spatial mean for each day to save for the meaure of the FairRPSS: + pslwr1spmean <- apply(pslwr1, 1, mean, na.rm=T) + pslwr2spmean <- apply(pslwr2, 1, mean, na.rm=T) + pslwr3spmean <- apply(pslwr3, 1, mean, na.rm=T) + pslwr4spmean <- apply(pslwr4, 1, mean, na.rm=T) + + # save all the data necessary to redraw the graphs when we know the right regime: + save(my.cluster, lon, lon.max, lon.min, lat, lat.max, lat.min, pos.lat, pos.lon, n.pos.lat, n.pos.lon, psl, psl.name, my.period, p, workdir, rean.name, fields.name, year.start, year.end, n.years, WR.period, pslwr1mean, pslwr2mean, pslwr3mean, pslwr4mean, pslwr1spmean, pslwr2spmean, pslwr3spmean, pslwr4spmean, wr1y,wr2y,wr3y,wr4y,file=paste0(workdir,"/",fields.name,"_",my.period[p],"_psl.RData")) + + # just to plot the ERA-Interim running cluster monthly maps: + EU <- c(1:which(lon >= lon.max)[1], (which(lon >= 337)[1]:length(lon))) # restrict area to continental europe only + my.brks <- c(seq(-19,-1,2),0,seq(1,19,2)) # % Mean anomaly of a WR + my.brks2 <- my.brks #c(seq(-20,20,2)) # % Mean anomaly of a WR for the contour lines + values.to.plot <- my.brks #c(-17,-15,-13,-11,-9,-7,-5,-3,-1,0,1,3,5,7,9,11,13,15,17) + my.cols <- colorRampPalette(rev(brewer.pal(11,"RdBu")))(length(my.brks)-1) # blue--white--red colors + my.cols[floor(length(my.cols)/2)] <- my.cols[floor(length(my.cols)/2)+1] <- "white" + + map1 <- pslwr1mean + map2 <- pslwr2mean + map3 <- pslwr3mean + map4 <- pslwr4mean + png(filename=paste0(workdir,"/",fields.name,"_",my.period[p],"_cluster1.png"),width=3000,height=3700) + PlotEquiMap2(rescale(map1,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map1), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, xlabel.dist=1.5, contours.lty="F1FF1F") + dev.off() + png(filename=paste0(workdir,"/",fields.name,"_",my.period[p],"_cluster2.png"),width=3000,height=3700) + PlotEquiMap2(rescale(map2,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map2), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F") + dev.off() + png(filename=paste0(workdir,"/",fields.name,"_",my.period[p],"_cluster3.png"),width=3000,height=3700) + PlotEquiMap2(rescale(map3,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map3), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F") + dev.off() + png(filename=paste0(workdir,"/",fields.name,"_",my.period[p],"_cluster4.png"),width=3000,height=3700) + PlotEquiMap2(rescale(map4,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map4), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F") + dev.off() + + + + rm(pslwr1mean,pslwr2mean,pslwr3mean,pslwr4mean) + rm(pslwr1,pslwr2,pslwr3,pslwr4) + gc() + + } + + if(fields.name == ECMWF_S4.name){ + cat("Computing regime anomalies and frequencies. Please wait...\n") + + #load(file=paste0(workdir,"/",fields.name,"_",my.period[p],"_leadmonth_",lead.month,"_ClusterData.RData")) + + cluster.sequence <- my.cluster$cluster #as.vector(my.cluster.array) + + wr1 <- which(cluster.sequence == 1) + wr2 <- which(cluster.sequence == 2) + wr3 <- which(cluster.sequence == 3) + wr4 <- which(cluster.sequence == 4) + + wr1y <- apply(my.cluster.array == 1, 2, sum) + wr2y <- apply(my.cluster.array == 2, 2, sum) + wr3y <- apply(my.cluster.array == 3, 2, sum) + wr4y <- apply(my.cluster.array == 4, 2, sum) + + # convert to frequencies in %: + wr1y <- wr1y/(num.leadtimes*n.members) # in this case, n.leadtimes is the number of days of one month of the cluser time series + wr2y <- wr2y/(num.leadtimes*n.members) + wr3y <- wr3y/(num.leadtimes*n.members) + wr4y <- wr4y/(num.leadtimes*n.members) + + # same as above, but to measure the maximum frequence between all the members: + wr1yMax <- apply(apply(my.cluster.array == 1, c(2,3), sum),1,max) + wr2yMax <- apply(apply(my.cluster.array == 2, c(2,3), sum),1,max) + wr3yMax <- apply(apply(my.cluster.array == 3, c(2,3), sum),1,max) + wr4yMax <- apply(apply(my.cluster.array == 4, c(2,3), sum),1,max) + + wr1yMax <- wr1yMax/num.leadtimes + wr2yMax <- wr2yMax/num.leadtimes + wr3yMax <- wr3yMax/num.leadtimes + wr4yMax <- wr4yMax/num.leadtimes + + wr1yMin <- apply(apply(my.cluster.array == 1, c(2,3), sum),1,min) + wr2yMin <- apply(apply(my.cluster.array == 2, c(2,3), sum),1,min) + wr3yMin <- apply(apply(my.cluster.array == 3, c(2,3), sum),1,min) + wr4yMin <- apply(apply(my.cluster.array == 4, c(2,3), sum),1,min) + + wr1yMin <- wr1yMin/num.leadtimes + wr2yMin <- wr2yMin/num.leadtimes + wr3yMin <- wr3yMin/num.leadtimes + wr4yMin <- wr4yMin/num.leadtimes + + cat("Computing regime anomalies and frequencies. Please wait......\n") + + # in this case, must use psl.melted instead of psleuFull. Both are already in anomalies: + # (psl.melted depends on the startdate and leadtime, psleuFull no) + gc() + pslmat <- unname(acast(psl.melted, Member + StartYear + LeadDay ~ Lat ~ Lon)) + #pslmat <- unname(acast(melt(pslPeriod[1,1:2,,,,, drop=FALSE],varnames=c("Exp","Member","StartYear","LeadDay","Lat","Lon")), Member ~ StartYear + LeadDay ~ Lat ~ Lon)) + gc() + + #for(a in 1:2){ + # for(b in 1:3){ + # for(c in 1:4){ + # pslmat[1, a + (b-1)*a + (c-1)*a*b,,] <- pslPeriod[1,a,b,c,,] + # } + # } + #} + + pslwr1 <- pslmat[wr1,,, drop=F] + pslwr2 <- pslmat[wr2,,, drop=F] + pslwr3 <- pslmat[wr3,,, drop=F] + pslwr4 <- pslmat[wr4,,, drop=F] + + pslwr1mean <- apply(pslwr1, c(2,3), mean, na.rm=T) + pslwr2mean <- apply(pslwr2, c(2,3), mean, na.rm=T) + pslwr3mean <- apply(pslwr3, c(2,3), mean, na.rm=T) + pslwr4mean <- apply(pslwr4, c(2,3), mean, na.rm=T) + + # spatial mean for each day to save for the meaure of the FairRPSS: + pslwr1spmean <- apply(pslwr1, 1, mean, na.rm=T) + pslwr2spmean <- apply(pslwr2, 1, mean, na.rm=T) + pslwr3spmean <- apply(pslwr3, 1, mean, na.rm=T) + pslwr4spmean <- apply(pslwr4, 1, mean, na.rm=T) + + rm(pslmat) + gc() + + # save all the data necessary to draw the graphs (but not the impact maps) + save(my.cluster, my.cluster.array, lon, lon.max, lat, pos.lat, pos.lon, n.pos.lat, n.pos.lon, psl, psl.name, my.period, p, workdir,rean.name,fields.name, year.start,year.end, years, n.years, n.years.full, WR.period, pslwr1mean, pslwr2mean, pslwr3mean, pslwr4mean, pslwr1spmean, pslwr2spmean, pslwr3spmean, pslwr4spmean, wr1y, wr2y, wr3y, wr4y, wr1yMax, wr2yMax, wr3yMax, wr4yMax, wr1yMin, wr2yMin, wr3yMin, wr4yMin, n.leadtimes, num.leadtimes, file=paste0(workdir,"/",fields.name,"_",my.period[p],"_leadmonth_",lead.month,"_psl.RData")) + + rm(pslwr1mean,pslwr2mean,pslwr3mean,pslwr4mean) + rm(pslwr1,pslwr2,pslwr3,pslwr4) + gc() + + } + + # for the monthly system, the time order is always: year1day1, year2day1, ... , yearNday1, year1day2, year2day2, ..., yearNday2, etc.: + if(fields.name == ECMWF_monthly.name) { + if(fields.name == ECMWF_monthly.name){ + my.startdates.period <- months.period(forecast.year,mes,day,p) + + #if(p == 13) my.startdates.period <- my.startdates.period[-2] # remove the second startdate because it is broken + + days.period <- period.length <- list() + for(startdate in my.startdates.period){ + days.period[[p]] <- c(days.period[[p]], (1+(startdate-1)*(year.end-year.start+1)):(startdate*(year.end-year.start+1))) + } + period.length[[p]] <- length(my.startdates.period) # number of Thusdays in the period + } + + wr1y <- wr2y <- wr3y <-wr4y <- c() + wr1y[y-year.start+1] <- length(which(cluster.sequence[seq((y-year.start+1),length(cluster.sequence), n.years)] == 1)) + wr2y[y-year.start+1] <- length(which(cluster.sequence[seq((y-year.start+1),length(cluster.sequence), n.years)] == 2)) + wr3y[y-year.start+1] <- length(which(cluster.sequence[seq((y-year.start+1),length(cluster.sequence), n.years)] == 3)) + wr4y[y-year.start+1] <- length(which(cluster.sequence[seq((y-year.start+1),length(cluster.sequence), n.years)] == 4)) + } + + cat("Finished!\n") +} # close the for loop on 'p' + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/old/weather_regimes_v37.R b/old/weather_regimes_v37.R new file mode 100644 index 0000000000000000000000000000000000000000..b854a3abf3c87fd797fb7db995105d1a6ec7c629 --- /dev/null +++ b/old/weather_regimes_v37.R @@ -0,0 +1,819 @@ + +# Creation: 6/2016 +# Author: Nicola Cortesi +# Aim: To compute the four Euro-Atlantic weather regimes from a reanalysis or from a forecast system +# +# I/O: input must be in a format compatible with Load(); 1 output .RData file is created in the 'workdir' folder. +# You can also run this script from terminal with: Rscript ~/scripts/weather_regimes_maps.R +# +# Assumption: its output are needed by the scripts weather_regimes_impact.R and weather_regimes.R +# +# Branch: weather_regimes + +rm(list=ls()) +library(s2dverification) # for the function Load() +library(abind) +library(reshape2) # after each undate of s2dverification, it's better to remove and install this package again because sometimes it gets broken! + +SMP <- FALSE # if TRUE, the script is assumed to run on the SMP Machine + +if(SMP){ + source('/gpfs/projects/bsc32/bsc32842/Rfunctions.R') + workdir <- "/gpfs/projects/bsc32/bsc32842/RESILIENCE" # working dir where output files will be generated +} else { + source('/home/Earth/ncortesi/scripts/Rfunctions.R') + workdir <- "/scratch/Earth/ncortesi/RESILIENCE/Regimes" # working dir where output files will be generated +} + +# module load NCO # : load it from the terminal before running this script interactively in the SMP machine, if you didn't load it in the .bashrc + +# available reanalysis for the geopotential (g500) or the geopotential height (z500 = g500/9.81) and for var data: +ERAint <- '/esnas/reconstructions/ecmwf/eraint/$STORE_FREQ$_mean/$VAR_NAME$_f6h/$VAR_NAME$_$YEAR$$MONTH$.nc' +#ERAint <- '/esarchive/old-files/recon_ecmwf_erainterim/$STORE_FREQ$_mean/$VAR_NAME$_f6h/$VAR_NAME$_$YEAR$$MONTH$.nc' +ERAint.name <- "ERA-Interim" + +JRA55 <- '/esnas/recon/jma/jra55/$STORE_FREQ$_mean/$VAR_NAME$_f6h/$VAR_NAME$_$YEAR$$MONTH$.nc' +JRA55.name <- "JRA-55" + +rean <- ERAint #JRA55 #ERAint # choose one of the two above reanalysis from where to load the input psl data + +# available forecast systems for the psl and var data: +#ECMWF_S4 <- list(path = paste0('/esnas/exp/ECMWF/seasonal/0001/s004/m001/$STORE_FREQ$_mean/$VAR_NAME$_f6h/$VAR_NAME$_$START_DATE$.nc')) +#ECMWF_S4 <- '/esnas/exp/ECMWF/seasonal/0001/s004/m001/$STORE_FREQ$_mean/psl_f6h/psl_$START_DATE$.nc' +ECMWF_S4 <- '/esnas/exp/ecmwf/system4_m1/$STORE_FREQ$_mean/$VAR_NAME$_f6h/$VAR_NAME$_$START_DATE$.nc' +ECMWF_S4.name <- "ECMWF-S4" +member.name <- "ensemble" # name of the dimension with the ensemble members inside the netCDF files of S4 + +ECMWF_monthly <- '/esnas/exp/ECMWF/monthly/ensforhc/6hourly/$VAR_NAME$/2014$MONTH$$DAY$00/$VAR_NAME$_$START_DATE$00.nc' +ECMWF_monthly.name <- "ECMWF-Monthly" + +# choose a forecast system: +forecast <- ECMWF_S4 + +# put 'rean' to load pressure fields and var from reanalisis, or put 'forecast' to load them from forecast systems: +fields <- forecast #rean #forecast + +psl <- "psl" #"g500" # pressure variable to use from the chosen reanalysis +psl.name <- "SLP" # "Z500" # the name to show in the maps + +year.start <- 1981 #1979 #1981 #1982 #1981 #1994 #1979 #1981 +year.end <- 2015 #2016 #2013 #2015 + +missing.forecasts <- FALSE # set it to TRUE if there can be missing hindcasts files in the forecast psl data, so it will load only the available years and deals with NA +res <- 0.75 # set the resolution you want to interpolate the psl and var data when loading it (i.e: set to 0.75 to use the same res of ERA-Interim) + # interpolation method is BILINEAR. + +LOESS <- TRUE # To apply the LOESS filter or not to the climatology +running.cluster <- TRUE # add to the clustering also the daily SLP data of the two closer months to the month to use (only for monthly analysis) +lat.weighting <- FALSE # set it to true to weight psl data on latitude before applying the cluster analysis +sequences <-FALSE # set it to TRUE if you want to select only days beloning to sequences of at least 5 consecutive days belonging to the same regime. + +PCA <- FALSE # set it to TRUE if you want to apply the PCA before the k-means cluster analysis, FALSE otherwise +variance.explained <- 0.8 # the user can set here the minimum % of variance explained by the PCs (typically 0.8 which is equal to 80%) + +# Coordinates of the box enclosing the study region (the Northern Atlantic in this case): +lat.max <- 81 #81 #80 #70 +lat.min <- 27 #30 #20 #30 +lon.max <- 45 #36 #30 #40 +lon.min <- 274.5 #274.5 #270 #280 # put a positive number here because the geopotential has only positive vaues of longitud! + +# Only for reanalysis: +WR.period <- 11 # 13:16 #1:12 #13:16 # 1-12: Jan-Dec; 13-16: Winter-Spring-Summer-Autumn [bypassed by the optional argument of the script] + +# Only for Seasonal forecasts: +startM <- 1 # start month (1=January, 12=December) [bypassed by the optional arguments of the script] +leadM <- 0 # select only the data of one lead month: [bypassed by the optional arguments of the script] +n.members <- 15 # number of members of the forecast system to load +n.leadtimes <- 216 # number of leadtimes of the forecast system to load + +# Only for Monthly forecasts: +#leadtime <- 1:7 #5:11 #1 # if fields=forecast, set the forecast time (in days) you want to compute the weather regimes. +#startdate.shift <- 1 # if leadtime=5:11, it's better to shift all startdates of the season 1 week in the past, to fit the exact season of obs.data + # if leadtime=12:18, it's better to shift all startdates of the season 2 weeks in the past, and so on. +#forecast.year <- year.end+1 # if fields=forecast, set the forecast year (it is usually the year after year.end) +#mes=1 # if fields=forecast, set the month of the first startdate of the forecast year +#day=2 # if fields=forecast, set the day of the first startdate of the forecast year + +############################################################################################################################# +rean.name <- unname(ifelse(rean == ERAint, ERAint.name, JRA55.name)) +forecast.name <- unname(ifelse(forecast == ECMWF_S4, ECMWF_S4.name, ECMWF_monthly.name)) +fields.name <- unname(ifelse(fields == rean, rean.name, forecast.name)) +n.years <- year.end - year.start + 1 + +# in case the script is run with one argument, it is assumed a reanalysis is being used and the argument becomes the month we want to perform the clustering; +# in case the script is run with two arguments, it is assumed a forecast is used and the first argument represents the startmonth and the second one the leadmonth. +# in case the script is run with no arguments, the values of the variables inside the script are used: +script.arg <- as.integer(commandArgs(TRUE)) + +if(length(script.arg) == 0){ + start.month <- startM + #WR.period <- start.month + if(fields.name == forecast.name) lead.month <- leadM +} + +# in case the script is run with 1 argument, it is assumed you are using a reanalysis: +if(length(script.arg) == 1){ + fields <- rean + fields.name <- rean.name + WR.period <- script.arg[1] +} + +if(length(script.arg) >= 2){ + fields <- forecast + fields.name <- forecast.name + start.month <- script.arg[1] + lead.month <- script.arg[2] + WR.period <- start.month +} + +if(length(script.arg) == 3){ # in this case, we are running the script in the SMP machine and we need to override the variables below: + source('/gpfs/projects/bsc32/bsc32842/Rfunctions.R') # for the calendar function + workdir <- "/gpfs/projects/bsc32/bsc32842/RESILIENCE" # working dir +} + +if(length(script.arg) >= 2) {lead.comment <- paste0("Lead month: ", lead.month)} else {lead.comment <- ""} +cat(paste0("Field: ", fields.name, " Period: ", WR.period, " ", lead.comment, "\n")) + +if(lat.min >= lat.max) stop("lat.min cannot be greater than lat.max") + +#days.period <- n.days.period <- period.length <- list() +#for (pp in 1:17){ +# days.period[[pp]] <- NA +# for(y in year.start:year.end) days.period[[pp]] <- c(days.period[[pp]], n.days.in.a.future.year(year.start, y) + pos.period(y,pp)) +# days.period[[pp]] <- days.period[[pp]][-1] # remove the NA at the begin that was introduced only to be able to execute the above command +# # number of days belonging to that period from year.start to year.end: +# n.days.period[[pp]] <- length(days.period[[pp]]) +# # Number of days belonging to that period in a single year: +# period.length[[pp]] <- n.days.in.a.period(pp,1999) #+ ifelse(period==13 | period==2, 1, 0) # using year 1999 introduce a small error in winter season length because of #bisestile years +#} + +# Create a regular lat/lon grid to interpolate the data to the chosen res: (Notice that the regular grid is always centered at lat=0 and lon=0!) +n.lon.grid <- 360/res +n.lat.grid <- (180/res)+1 +my.grid <- paste0('r',n.lon.grid,'x',n.lat.grid) +#domain<-list() +#domain$lon <- seq(0,359.999999999,res) +#domain$lat <- c(rev(seq(0,90,res)),seq(res[1],-90,-res)) + +cat("Loading lat/lon. Please wait...\n") +# load only 1 day of pressure data to detect the minimum and maximum lat and lon values which identify only the North Atlantic area: +if(fields.name == rean.name) domain <- Load(var = psl, exp = NULL, obs = list(list(path=fields)), '19990101', storefreq = 'daily', leadtimemax = 1, output = 'lonlat', nprocs=1) + +### Real test: +### lat <- +### domain <- Load(var = "psl", exp = list(list(path="/esnas/exp/ecmwf/system4_m1/$STORE_FREQ$_mean/$VAR_NAME$_f6h/$VAR_NAME$_$START_DATE$.nc")), obs=NULL, sdates=paste0(1982:2011,'0101'), storefreq = 'daily', leadtimemax=n.leadtimes, nmember=n.members, output = 'lonlat', latmin = lat[chunk], latmax = lat[chunk+2], nprocs=1) + +### if (fields.name="pippo"){ + +# in the case of S4, we also interpolate it to the same resolution of the reanalysis: +# (notice that if the S4 grid has the same grid of the chosen grid (typically ERA-Interim), Load() leaves the S4 grid unchanged) +if(fields.name == ECMWF_S4.name) domain <- Load(var = psl, exp = list(list(path=fields)), obs=NULL, sdates=paste0(year.start,'0101'), storefreq = 'daily', dimnames=list(member=member.name), leadtimemax=1, nmember=1, output = 'lonlat', grid=my.grid, method='bilinear', nprocs=1) + +#domain <- Load(var = "psl", exp = list(list(path="/esnas/exp/ecmwf/system4_m1/$STORE_FREQ$_mean/$VAR_NAME$_f6h/$VAR_NAME$_$START_DATE$.nc")), obs=NULL, sdates='19820101', storefreq = 'daily', leadtimemax=1, nmember=1, output = 'lonlat', grid='r480x241', lonmax=100) + +if(fields.name == ECMWF_monthly.name) domain <- Load(var = psl, exp = NULL, obs = list(list(path=fields)), paste0(year.start,'0102'), storefreq = 'daily', leadtimemax = 1, output = 'lonlat', nprocs=1) + +n.lon <- length(domain$lon) +n.lat <- length(domain$lat) + +pos.lat.max <- which(lat.max - round(domain$lat,4) >= 0)[1] # select the first latitude of the domain below the maximum latitude +pos.lat.min <- tail(which(round(domain$lat,4) - lat.min >= 0),1) # select the last latitude of the domain over the minumum latitude +pos.lon.max <- tail(which(lon.max - round(domain$lon,4) >= 0),1) +pos.lon.min <- which(round(domain$lon,4) - lon.min >= 0)[1] + +pos.lat <- if(pos.lat.min < pos.lat.max) {pos.lat.min:pos.lat.max} else {pos.lat.max:pos.lat.min} +pos.lon <- c(1:pos.lon.max,pos.lon.min:length(domain$lon)) # beware that it only consider the case where the study area cross the meridian of Greenwhich! +n.pos.lat <- length(pos.lat) +n.pos.lon <- length(pos.lon) + +lat <- domain$lat[pos.lat] # lat values of chosen area only (it is smaller than the whole spatial domain loaded by the data) +#if (lat[2] < lat[1]) lat <- rev(lat) # reverse lat values if they are in decreasing order +lon <- domain$lon[pos.lon] # lon values of chosen area only + +#lat.min.area <- domain$lat[pos.lat.min] # min and max lat/lon of the chosen area only (same as lat.max, but its values correspond to actual values in the lat vector) +#lat.max.area <- domain$lat[pos.lat.max] +#lon.min.area <- domain$lon[pos.lon.min] +#lon.max.area <- domain$lon[pos.lon.max] + +#rm(domain) + +# Load psl data (interpolating it to the same reanlaysis grid, if necessary): +cat("Loading data. Please wait...\n") + +if(fields.name == rean.name){ # Load daily psl data of all years in the reanalysis case: + psleuFull <-array(NA,c(1,1,n.years,365, n.pos.lat, n.pos.lon)) + + #for (y in year.start:year.end){ + # var <- Load(var = psl, exp = NULL, obs = list(list(path=fields)), paste0(y,'0101'), storefreq = 'daily', leadtimemax = n.days.in.a.year(y), output = 'lonlat', + # latmin = lat.min, latmax = lat.max, lonmin = lon.min, lonmax = lon.max, nprocs=1) + # psleuFull[seq.days.in.a.future.year(year.start, y),,] <- var$obs + # rm(var) + # gc() + #} + + psleuFull366 <- Load(var = psl, exp = NULL, obs = list(list(path=fields)), paste0(year.start:year.end,'0101'), storefreq = 'daily', leadtimemax = 366, output = 'lonlat', latmin = lat.min, latmax = lat.max, lonmin = lon.min, lonmax = lon.max)$obs + + # remove bisestile days to have all arrays with the same dimensions: + cat("Removing bisestile days. Please wait...\n") + psleuFull[,,,,,] <- psleuFull366[,,,1:365,,] + + for(y in year.start:year.end){ + y2 <- y - year.start + 1 + if(n.days.in.a.year(y) == 366) psleuFull[,,y2,60:365,,] <- psleuFull366[,,y2,61:366,,] # take the march to december period removing the 29th of February + } + + rm(psleuFull366) + gc() + + # convert psl in daily anomalies with the LOESS filter: + cat("Calculating anomalies. Please wait...\n") + pslPeriodClim <- apply(psleuFull, c(1,2,4,5,6), mean, na.rm=T) + + if(LOESS == TRUE){ + pslPeriodClimLoess <- pslPeriodClim + for(i in n.pos.lat){ + for(j in n.pos.lon){ + my.data <- data.frame(ens.mean=pslPeriodClim[1,1,,i,j], day=1:365) + my.loess <- loess(ens.mean ~ day, my.data, span=0.35) + pslPeriodClimLoess[1,1,,i,j] <- predict(my.loess) + } + } + + rm(pslPeriodClim) + gc() + + pslPeriodClim2 <- InsertDim(pslPeriodClimLoess, 3, n.years) + + } else { + pslPeriodClim2 <- InsertDim(pslPeriodClim, 3, n.years) + + rm(pslPeriodClim) + gc() + } + + ## s4.data <- data.frame(s4=pslPeriodClim[1,], day=1:216) + ## s4.loess <- loess(s4 ~ day, s4.data, span=0.35) + ## s4.pred <- predict(s4.loess) + + psleuFull <- psleuFull - pslPeriodClim2 + rm(pslPeriodClim2) + gc() + +} # close if on fields.name == rean + +if(fields.name == ECMWF_S4.name){ # in the forecast case, we load only the data for 1 start month at time and all the lead times (since each month needs ~3 GB of memory) + if(start.month <= 9) {start.month.char <- paste0("0",as.character(start.month))} else {start.month.char <- start.month} + + fields2 <- gsub("\\$STORE_FREQ\\$","daily",fields) + fields3 <- gsub("\\$VAR_NAME\\$",psl,fields2) + + if(missing.forecasts == TRUE){ + years <- c() + for(y in year.start:year.end){ + fields4 <- gsub("\\$START_DATE\\$",paste0(y, start.month.char,'01'),fields3) + + char.lead <- nchar(system(paste0("ncks -m ",fields4,"| grep -E -i 'psl size' | cut -d '*' -f1"), intern=T)) + # beware, in this way it can only take into account leadtimes from 100 to 999, but it is the only way in the SMP machine: + #num.lead <- as.integer(substr(system(paste0("ncks -m ",fields4,"| grep -E -i 'psl size' | cut -d '*' -f1"), intern=T),char.lead-3,char.lead)) + num.lead <- as.integer(system(paste0("ncks -m ",fields4,"| grep -E -i 'psl size' | cut -d '=' -f2 | cut -d '*' -f1"), intern=T)) # don't work well on the SMP + num.memb <- as.integer(system(paste0("ncks -m ",fields4,"| grep -E -i 'psl size' | cut -d '=' -f2 | cut -d '*' -f2"), intern=T)) + num.lat <- as.integer(system(paste0("ncks -m ",fields4,"| grep -E -i 'psl size' | cut -d '=' -f2 | cut -d '*' -f3"), intern=T)) + num.lon <- as.integer(system(paste0("ncks -m ",fields4,"| grep -E -i 'psl size' | cut -d '=' -f2 | cut -d '*' -f4"), intern=T)) + + #if(num.lead == n.leadtimes && num.memb >= n.members && num.lat == 181 && num.lon == 360) years <- c(years,y) + if(num.lead == n.leadtimes && num.memb >= n.members) years <- c(years,y) + } + + } else { + years <- year.start:year.end + } + + n.years.full <- length(years) + + if(running.cluster == TRUE && p < 13) { + psleuFull <- Load(var = psl, exp = list(list(path=fields)), obs = NULL, sdates=paste0(years, start.month.char,'01'), dimnames=list(member=member.name), nmember=n.members, leadtimemax=n.leadtimes, storefreq='daily', output = 'lonlat', latmin = lat.min, latmax = lat.max, lonmin = lon.min, lonmax = lon.max, grid=my.grid, method='bilinear', nprocs=1)$mod + } else { + start.month1 <- ifelse(start.month == 1, 12, start.month-1) + if(start.month1 <= 9) {start.month1.char <- paste0("0",as.character(start.month1))} else {start.month1.char <- start.month1} + + start.month2.char <- start.month.char + + start.month3 <- ifelse(start.month == 12, 1, start.month+1) + if(start.month3 <= 9) {start.month3.char <- paste0("0",as.character(start.month3))} else {start.month3.char <- start.month3} + + # load three months of data, inserting all them in the third dimension of the array, one month at time: + psleuFull <- Load(var = psl, exp = list(list(path=fields)), obs = NULL, sdates=c(paste0(years, start.month1.char,'01'), paste0(years, start.month2.char,'01'), paste0(years, start.month3.char,'01')), dimnames=list(member=member.name), nmember=n.members, leadtimemax=n.leadtimes, storefreq='daily', output = 'lonlat', latmin = lat.min, latmax = lat.max, lonmin = lon.min, lonmax = lon.max, grid=my.grid, method='bilinear', nprocs=1)$mod + + } + + + if(LOESS == TRUE){ + # convert psl in daily anomalies with the LOESS filter: + cat("Calculating anomalies. Please wait...\n") + pslPeriodClim <- apply(psleuFull, c(1,4,5,6), mean, na.rm=T) + + pslPeriodClimLoess <- pslPeriodClim + for(i in n.pos.lat){ + for(j in n.pos.lon){ + my.data <- data.frame(ens.mean=pslPeriodClim[1,,i,j], day=1:n.leadtimes) + my.loess <- loess(ens.mean ~ day, my.data, span=0.35) + pslPeriodClimLoess[1,,i,j] <- predict(my.loess) + } + } + + rm(pslPeriodClim) + gc() + + pslPeriodClim2 <- InsertDim(InsertDim(pslPeriodClimLoess, 2, n.years.full), 2, n.members) + + } else { + + pslPeriodClim2 <- InsertDim(InsertDim(pslPeriodClim, 2, n.years.full), 2, n.members) + rm(pslPeriodClim) + gc() + } + + ## s4.data <- data.frame(s4=pslPeriodClim[1,], day=1:216) + ## s4.loess <- loess(s4 ~ day, s4.data, span=0.35) + ## s4.pred <- predict(s4.loess) + + psleuFull <- psleuFull - pslPeriodClim2 + rm(pslPeriodClim2) + gc() + +} # close if on fields.name=forecasts.name + +if(fields.name == ECMWF_monthly.name){ + # Load 6-hourly psl data in the forecast case: + psleuFull <-array(NA, c(n.sdates*n.years, n.leadtimes, n.pos.lat, n.pos.lon)) # daily order: 19940102 19950102 ... 20130102 19940109 19950109 ... 20130109 ... + n.leadtimes <- length(leadtime) + sdates <- weekly.seq(forecast.year,mes,day) + n.sdates <- length(sdates) + + for (startdate in 1:n.sdates){ + for(lday in leadtime){ + var <- Load(var = psl, exp = NULL, obs = list(list(path=fields)), paste0(year.start:year.end, substr(sdates[startdate],5,6), substr(sdates[startdate],7,8) ), storefreq = 'daily', leadtimemin = lday*4-3, leadtimemax = lday*4, output = 'lonlat', latmin = lat.min, latmax = lat.max, lonmin = lon.min, lonmax = lon.max) + + mean.lday <- apply(var$obs, c(3,5,6), mean, na.rm=T) + rm(var) + gc() + + psleuFull[(1+(startdate-1)*n.years):(startdate*n.years), lday-leadtime[1]+1,,] <- mean.lday + rm(mean.lday) + gc() + } + } +} + +if(psl=="g500") psleuFull <- psleuFull/9.81 # convert geopotential to geopotential height (in m) +if(psl=="psl") psleuFull <- psleuFull/100 # convert MSLP in Pascal to MSLP in hPa +gc() + +#save.image(file=paste0(workdir,"/weather_regimes_",fields.name,"_",year.start,"-",year.end,".RData")) +#load(file=paste0(workdir,"/weather_regimes_",fields.name,"_",year.start,"-",year.end,".RData")) + +# compute the cluster analysis: +my.PCA <- list() +my.cluster <- list() +my.cluster.array <- list() +tot.variance <- list() +n.pcs <- my.cluster <- c() + +#WR.period=1 # for the debug +for(p in WR.period){ + # Select only the data of the month/season we want: + if(fields.name == rean.name){ + + #pslPeriod <- psleuFull[days.period[[p]],,] # select only days in the chosen period (i.e: winter) + if(running.cluster == TRUE && p < 13) { + pslPeriod <- psleuFull[1,1,,pos.month.extended(2001,p),,] # select all days in the period of 3 months centered on the target month p + } else { + pslPeriod <- psleuFull[1,1,,pos.period(2001,p),,] # select only days in the chosen period (i.e: winter) + } + + # weight the pressure fields based on latitude (apply it to anomalies, not to absolute values!): + if(lat.weighting){ + lat.weighted <- cos(lat*pi/180)^0.5 + lat.weighted.array <- InsertDim(InsertDim(InsertDim(lat.weighted,2,n.pos.lon), 1, length(pos.month.extended(2001,p))), 1, n.years) + pslPeriod <- pslPeriod * lat.weighted.array + rm(lat.weighted.array) + gc() + } + + gc() + cat("Preformatting data for clustering. Please wait...\n") + psl.melted <- melt(pslPeriod[,,,, drop=FALSE], varnames=c("Year","Day","Lat","Lon")) + gc() + cat("Preformatting data for clustering. Please wait......\n") + + # this function eats much memory!!! + psl.kmeans <- unname(acast(psl.melted, Year + Day ~ Lat + Lon)) + #gc() + + # select period data from psleuFull to use it as input of the cluster analysis: + #psl.kmeans <- pslPeriod + #dim(psl.kmeans) <- c(n.days.period[[p]], n.pos.lat*n.pos.lon) # convert array in a matrix! + #rm(pslPeriod, pslPeriod.weighted) + } + + # in case of S4 we don't need to select anything because we already loaded only 1 month of data! + if(fields.name == ECMWF_S4.name){ + + # select data only for the startmonth and leadmonth to study: + cat("Extracting data. Please wait...\n") + chosen.month <- start.month + lead.month # find which month we want to select data + if(chosen.month > 12) chosen.month <- chosen.month - 12 + + i=0 + for(y in years){ + i=i+1 + leadtime.min <- 1 + n.days.in.a.monthly.period(start.month, chosen.month, y) - n.days.in.a.month(chosen.month, y) + leadtime.max <- leadtime.min + n.days.in.a.month(chosen.month, y) - 1 + #leadtime.max <- 61 + + if (n.days.in.a.month(chosen.month, y) == 29) leadtime.max <- leadtime.max - 1 # remove the 29th of February to have the same n. of elements for all years + int.leadtimes <- leadtime.min:leadtime.max + num.leadtimes <- leadtime.max - leadtime.min + 1 # number of days in the chosen month (different from 'n.leadtimes',which is the total number of days in the file!) + # by construction it is equal to: n.days.in.a.month(chosen.month, y) + if(y == years[1]) { + pslPeriod <- psleuFull[,,1,int.leadtimes,,,drop=FALSE] + } else { + pslPeriod <- unname(abind(pslPeriod, psleuFull[,,i,int.leadtimes,,,drop=FALSE], along=3)) + } + } + + # weight the pressure fields based on latitude (apply it to anomalies, not to absolute values!): + if(lat.weighting){ + lat.weighted <- cos(lat*pi/180)^0.5 + lat.weighted.array <- InsertDim(InsertDim(InsertDim(lat.weighted,2,n.pos.lon), 1, length(pos.month.extended(2001,p))), 1, n.years) + pslPeriod <- pslPeriod * lat.weighted.array ######### adapt!!!!!!!!!!! + rm(lat.weighted.array) + gc() + } + + # note that the data order in psl.kmeans[,X] is: year1day1, year1day2, ... , year1day31, year2day1, year2day2, ... , year2day28 + gc() + cat("Preformatting data. Please wait...\n") + psl.melted <- melt(pslPeriod[1,,,,,, drop=FALSE], varnames=c("Exp","Member","StartYear","LeadDay","Lat","Lon")) + gc() + cat("Preformatting data. Please wait......\n") + + # this function eats much memory!!! + psl.kmeans <- unname(acast(psl.melted, Member + StartYear + LeadDay ~ Lat + Lon)) + #gc() + + #psl.kmeans <- array(NA,c(dim(pslPeriod))) + #n.rows <- nrow(psl.melted) + #a <- psl.melted$Member + #b <- psl.melted$StartYear + #c <- psl.melted$LeadDay + #d <- psl.melted$Lat + #e <- psl.melted$Lon + #f <- psl.melted$value + + # this function to convert a data.frame in an array is much less memory-eater than acast but a bit slower: + #for(i in 1:n.rows) psl.kmeans[1,a[i],b[i],c[i],d[i],e[i]] <- f[i] + gc() + #rm(pslPeriod); gc() + } + + if(fields.name == ECMWF_monthly.name){ + my.startdates.period <- months.period(forecast.year,mes,day,p) # get which startdates belong to the chosen period (remember that there are no bisestile day left!) + + days.period <- list() # get which days inside psleuFull belong to the chosen period + for(startdate in my.startdates.period){ + days.period[[p]] <- c(days.period[[p]], (1+(startdate-1)*n.years):(startdate*n.years)) + } + + # in winter you must remove the 2nd startdate (20140109) because its data is bad, as you can see with: plot(psl.kmeans[,1:2]) + # if(fields.name == forecast.name && period == 13) psl.kmeans[21:40,] <- NA + } + + + # compute the clustering: + cat("Clustering data. Please wait...\n") + if(PCA){ + my.seq <- seq(1, n.pos.lat*n.pos.lon, 16) # select only 1 point of 9 + pslcut <- psl.kmeans[,my.seq] + + my.PCA <- princomp(pslcut,cor=FALSE) + tot.variance <- head(cumsum(my.PCA$sdev^2/sum(my.PCA$sdev^2)),50) # check how many PCAs to keep basing on the sum of their expl. variance + n.pcs <- head(as.numeric(which(tot.variance > variance.explained)),1) # select only the pcs that explains at least 80% of variance + my.cluster <- kmeans(my.PCA$scores[,1:n.pcs], centers=4, iter.max=100, nstart=30) # 4 is the number of clusters + rm(pslcut) + } else { # 4 is the number of clusters: + + my.cluster <- kmeans(psl.kmeans, centers=4, iter.max=100, nstart=30, trace=FALSE) + + gc() + + #save(my.cluster, file=paste0(workdir,"/",fields.name,"_",my.period[p],"_leadmonth_",lead.month,"_series.RData")) + + #if(fields.name == rean.name){ + #my.cluster[[p]] <- kmeans(psl.kmeans[which(!is.na(psl.kmeans[,1])),], centers=4, iter.max=100, nstart=30, trace=FALSE) + # save the time series output of the cluster analysis: + #save(my.cluster, my.cluster.array, my.PCA, tot.variance, n.pcs, file=paste0(workdir,"/",fields.name,"_",my.period[p],"_series.RData")) + #} + + if(fields.name == ECMWF_S4.name) { + my.cluster.array <- array(my.cluster$cluster, c(num.leadtimes, n.years.full, n.members)) # in reverse order compared to the definition of psl.kmeans + + # save the time series output of the cluster analysis: + #save(my.cluster, my.cluster.array, my.PCA, tot.variance, n.pcs, file=paste0(workdir,"/",fields.name,"_",my.period[p],"_leadmonth_",lead.month,"_series.RData")) + + #plot(SMA(my.cluster$centers[1,],201),type="l",col="gold",ylim=c(-20,20));lines(SMA(my.cluster$centers[2,],201),type="l",col="red"); lines(SMA(my.cluster$centers[3,],201),type="l",col="green");lines(SMA(my.cluster$centers[4,],201),type="l",col="blue");lines(SMA(psl.kmeans[29,],201),type="l",col="gray");lines(rep(0,14410),type="l",col="black",lty=2) + } + } + + rm(psl.kmeans) + gc() + +#} # close for on 'p' + +#save.image(file=paste0(workdir,"/weather_regimes_",fields.name,"_",year.start,"-",year.end,".RData")) +#load(file=paste0(workdir,"/weather_regimes_",fields.name,"_",year.start,"-",year.end,".RData")) + +#my.grid<-paste0('r',n.lon,'x',n.lat) +#coord <- Load(var = 'sfcWind', exp = list(ECMWF_monthly), obs = NULL, sdates=paste0(2013,'0102'), leadtimemin = 1, leadtimemax=1, output = 'lonlat', nprocs=1, grid=my.grid, method='bilinear') + +# measure regime anomalies: + +#for(p in WR.period){ # 1-12: Jan-Dec; 13-16: Winter-Spring-Summer-Autumn; 17: Year + + if(fields.name == rean.name){ + + if(running.cluster == TRUE && p < 13){ # select only the days of the 3-months cluster series inside the target month p + n.days.period <- length(pos.month.extended(2001,p)) # total number of days in the three months + + days.month <- days.month.new <- days.month.full <- c() + days.month <- which(pos.month.extended(2001,p) == pos.month(2001,p)[1])-1 + 1:length(pos.month(2001,p)) + + for(y in 1:n.years){ + days.month.new <- days.month + n.days.period*(y-1) + days.month.full <- c(days.month.full, days.month.new) + #print(days.month.new) + #print(days.month) + } + + cluster.sequence <- my.cluster$cluster[days.month.full] # select only the days of the cluster series inside the target month p + + } else { + cluster.sequence <- my.cluster$cluster + } + + if(sequences){ # it works only for rean data!!! + # for each of the 4 clusters, remove the days not belonging to sequences of at least 5 days: + # warning: first 4 days and last 4 days are not take into account y the algorithm and are treated as not belonging to any sequence (sequ=FALSE) + wrdiff <- diff(my.cluster$cluster) + + sequ <- rep(FALSE, n.days.period[[p]]) + for(i in 5:(n.days.period[[p]]-5)){ + sequ[i] <- TRUE + if(wrdiff[i] != 0 & wrdiff[i+1] != 0) sequ[i] = FALSE + if(wrdiff[i] != 0 & wrdiff[i+2] != 0) sequ[i] = FALSE + if(wrdiff[i] != 0 & wrdiff[i+3] != 0) sequ[i] = FALSE + if(wrdiff[i] != 0 & wrdiff[i+4] != 0) sequ[i] = FALSE + if(wrdiff[i-1] != 0 & wrdiff[i] != 0) sequ[i] = FALSE + if(wrdiff[i-1] != 0 & wrdiff[i+1] != 0) sequ[i] = FALSE + if(wrdiff[i-1] != 0 & wrdiff[i+2] != 0) sequ[i] = FALSE + if(wrdiff[i-1] != 0 & wrdiff[i+3] != 0) sequ[i] = FALSE + if(wrdiff[i-2] != 0 & wrdiff[i] != 0) sequ[i] = FALSE + if(wrdiff[i-2] != 0 & wrdiff[i+1] != 0) sequ[i] = FALSE + if(wrdiff[i-2] != 0 & wrdiff[i+2] != 0) sequ[i] = FALSE + if(wrdiff[i-3] != 0 & wrdiff[i] != 0) sequ[i] = FALSE + if(wrdiff[i-3] != 0 & wrdiff[i+1] != 0) sequ[i] = FALSE + if(wrdiff[i-4] != 0 & wrdiff[i] != 0) sequ[i] = FALSE + } # close for loop on i + + # sequence of daily cluster numbers without the days that doesn't belong to sequences of 5 or more days: + cluster.sequence <- my.cluster$cluster * sequ + rm(wrdiff, sequ) + } + + # Replace the days belonging to a regime with the days belonging to a sequence of at least 5 days of that regime: + wr1 <- which(cluster.sequence == 1) + wr2 <- which(cluster.sequence == 2) + wr3 <- which(cluster.sequence == 3) + wr4 <- which(cluster.sequence == 4) + + # yearly frequency of each regime: + wr1y <- wr2y <- wr3y <-wr4y <- c() + np <- n.days.in.a.period(p,2001) + + for(y in year.start:year.end){ + # for both reanalysis and S4, the time order is always: year1day1, year1day2, ..., year1day31, year2day1, year2day2, ..., year2day28, etc, + wr1y[y-year.start+1] <- length(which(cluster.sequence[(y-year.start)*np + (1:np)] == 1)) + wr2y[y-year.start+1] <- length(which(cluster.sequence[(y-year.start)*np + (1:np)] == 2)) + wr3y[y-year.start+1] <- length(which(cluster.sequence[(y-year.start)*np + (1:np)] == 3)) + wr4y[y-year.start+1] <- length(which(cluster.sequence[(y-year.start)*np + (1:np)] == 4)) + } + + # convert to frequencies in %: + wr1y <- wr1y/np + wr2y <- wr2y/np + wr3y <- wr3y/np + wr4y <- wr4y/np + + gc() + + pslmat <- unname(acast(psl.melted, Year + Day ~ Lat ~ Lon)) + + if(running.cluster == TRUE){ + pslmat.new <- pslmat + pslmat <- pslmat.new[days.month.full,,] + gc() + } + + pslwr1 <- pslmat[wr1,,, drop=F] + pslwr2 <- pslmat[wr2,,, drop=F] + pslwr3 <- pslmat[wr3,,, drop=F] + pslwr4 <- pslmat[wr4,,, drop=F] + + pslwr1mean <- apply(pslwr1, c(2,3), mean, na.rm=T) + pslwr2mean <- apply(pslwr2, c(2,3), mean, na.rm=T) + pslwr3mean <- apply(pslwr3, c(2,3), mean, na.rm=T) + pslwr4mean <- apply(pslwr4, c(2,3), mean, na.rm=T) + + # spatial mean for each day to save for the meaure of the FairRPSS: + pslwr1spmean <- apply(pslwr1, 1, mean, na.rm=T) + pslwr2spmean <- apply(pslwr2, 1, mean, na.rm=T) + pslwr3spmean <- apply(pslwr3, 1, mean, na.rm=T) + pslwr4spmean <- apply(pslwr4, 1, mean, na.rm=T) + + # save all the data necessary to redraw the graphs when we know the right regime: + save(my.cluster, lon, lon.max, lon.min, lat, lat.max, lat.min, pos.lat, pos.lon, n.pos.lat, n.pos.lon, psl, psl.name, my.period, p, workdir, rean.name, fields.name, year.start, year.end, n.years, WR.period, pslwr1mean, pslwr2mean, pslwr3mean, pslwr4mean, pslwr1spmean, pslwr2spmean, pslwr3spmean, pslwr4spmean, wr1y,wr2y,wr3y,wr4y,file=paste0(workdir,"/",fields.name,"_",my.period[p],"_psl.RData")) + + # just to save the plots of the ERA-Interim monthly regime anomalies with the running cluster now instead than loading the next script: + EU <- c(1:which(lon >= lon.max)[1], (which(lon >= 337)[1]:length(lon))) # restrict area to continental europe only + if(psl == "g500"){ + my.brks <- c(-300,seq(-200,200,10),300) # % Mean anomaly of a WR + my.brks2 <- c(-300,seq(-200,200,10),300) # % Mean anomaly of a WR for the contour lines + values.to.plot <- c(-200, -100, 0, 100, 200) # values we want to show in the labels + } + + if(psl == "psl"){ + my.brks <- c(seq(-19,-1,2),0,seq(1,19,2)) # % Mean anomaly of a WR + my.brks2 <- my.brks #c(seq(-20,20,2)) # % Mean anomaly of a WR for the contour lines + values.to.plot <- my.brks #c(-17,-15,-13,-11,-9,-7,-5,-3,-1,0,1,3,5,7,9,11,13,15,17) + } + my.cols <- colorRampPalette(rev(brewer.pal(11,"RdBu")))(length(my.brks)-1) # blue--white--red colors + my.cols[floor(length(my.cols)/2)] <- my.cols[floor(length(my.cols)/2)+1] <- "white" + + map1 <- pslwr1mean + map2 <- pslwr2mean + map3 <- pslwr3mean + map4 <- pslwr4mean + png(filename=paste0(workdir,"/",fields.name,"_",my.period[p],"_cluster1.png"),width=1000,height=1200) + PlotEquiMap2(rescale(map1,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map1), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, xlabel.dist=1.5, contours.lty="F1FF1F") + dev.off() + png(filename=paste0(workdir,"/",fields.name,"_",my.period[p],"_cluster2.png"),width=1000,height=1200) + PlotEquiMap2(rescale(map2,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map2), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F") + dev.off() + png(filename=paste0(workdir,"/",fields.name,"_",my.period[p],"_cluster3.png"),width=1000,height=1200) + PlotEquiMap2(rescale(map3,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map3), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F") + dev.off() + png(filename=paste0(workdir,"/",fields.name,"_",my.period[p],"_cluster4.png"),width=1000,height=1200) + PlotEquiMap2(rescale(map4,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map4), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F") + dev.off() + + + + rm(pslwr1mean,pslwr2mean,pslwr3mean,pslwr4mean) + rm(pslwr1,pslwr2,pslwr3,pslwr4) + gc() + + } + + if(fields.name == ECMWF_S4.name){ + cat("Computing regime anomalies and frequencies. Please wait...\n") + + #load(file=paste0(workdir,"/",fields.name,"_",my.period[p],"_leadmonth_",lead.month,"_ClusterData.RData")) + + cluster.sequence <- my.cluster$cluster #as.vector(my.cluster.array) + + wr1 <- which(cluster.sequence == 1) + wr2 <- which(cluster.sequence == 2) + wr3 <- which(cluster.sequence == 3) + wr4 <- which(cluster.sequence == 4) + + wr1y <- apply(my.cluster.array == 1, 2, sum) + wr2y <- apply(my.cluster.array == 2, 2, sum) + wr3y <- apply(my.cluster.array == 3, 2, sum) + wr4y <- apply(my.cluster.array == 4, 2, sum) + + # convert to frequencies in %: + wr1y <- wr1y/(num.leadtimes*n.members) # in this case, n.leadtimes is the number of days of one month of the cluser time series + wr2y <- wr2y/(num.leadtimes*n.members) + wr3y <- wr3y/(num.leadtimes*n.members) + wr4y <- wr4y/(num.leadtimes*n.members) + + # same as above, but to measure the maximum frequence between all the members: + wr1yMax <- apply(apply(my.cluster.array == 1, c(2,3), sum),1,max) + wr2yMax <- apply(apply(my.cluster.array == 2, c(2,3), sum),1,max) + wr3yMax <- apply(apply(my.cluster.array == 3, c(2,3), sum),1,max) + wr4yMax <- apply(apply(my.cluster.array == 4, c(2,3), sum),1,max) + + wr1yMax <- wr1yMax/num.leadtimes + wr2yMax <- wr2yMax/num.leadtimes + wr3yMax <- wr3yMax/num.leadtimes + wr4yMax <- wr4yMax/num.leadtimes + + wr1yMin <- apply(apply(my.cluster.array == 1, c(2,3), sum),1,min) + wr2yMin <- apply(apply(my.cluster.array == 2, c(2,3), sum),1,min) + wr3yMin <- apply(apply(my.cluster.array == 3, c(2,3), sum),1,min) + wr4yMin <- apply(apply(my.cluster.array == 4, c(2,3), sum),1,min) + + wr1yMin <- wr1yMin/num.leadtimes + wr2yMin <- wr2yMin/num.leadtimes + wr3yMin <- wr3yMin/num.leadtimes + wr4yMin <- wr4yMin/num.leadtimes + + cat("Computing regime anomalies and frequencies. Please wait......\n") + + # in this case, must use psl.melted instead of psleuFull. Both are already in anomalies: + # (psl.melted depends on the startdate and leadtime, psleuFull no) + gc() + pslmat <- unname(acast(psl.melted, Member + StartYear + LeadDay ~ Lat ~ Lon)) + #pslmat <- unname(acast(melt(pslPeriod[1,1:2,,,,, drop=FALSE],varnames=c("Exp","Member","StartYear","LeadDay","Lat","Lon")), Member ~ StartYear + LeadDay ~ Lat ~ Lon)) + gc() + + #for(a in 1:2){ + # for(b in 1:3){ + # for(c in 1:4){ + # pslmat[1, a + (b-1)*a + (c-1)*a*b,,] <- pslPeriod[1,a,b,c,,] + # } + # } + #} + + pslwr1 <- pslmat[wr1,,, drop=F] + pslwr2 <- pslmat[wr2,,, drop=F] + pslwr3 <- pslmat[wr3,,, drop=F] + pslwr4 <- pslmat[wr4,,, drop=F] + + pslwr1mean <- apply(pslwr1, c(2,3), mean, na.rm=T) + pslwr2mean <- apply(pslwr2, c(2,3), mean, na.rm=T) + pslwr3mean <- apply(pslwr3, c(2,3), mean, na.rm=T) + pslwr4mean <- apply(pslwr4, c(2,3), mean, na.rm=T) + + # spatial mean for each day to save for the meaure of the FairRPSS: + pslwr1spmean <- apply(pslwr1, 1, mean, na.rm=T) + pslwr2spmean <- apply(pslwr2, 1, mean, na.rm=T) + pslwr3spmean <- apply(pslwr3, 1, mean, na.rm=T) + pslwr4spmean <- apply(pslwr4, 1, mean, na.rm=T) + + rm(pslmat) + gc() + + # save all the data necessary to draw the graphs (but not the impact maps) + save(my.cluster, my.cluster.array, lon, lon.max, lat, pos.lat, pos.lon, n.pos.lat, n.pos.lon, psl, psl.name, my.period, p, workdir,rean.name,fields.name, year.start,year.end, years, n.years, n.years.full, WR.period, pslwr1mean, pslwr2mean, pslwr3mean, pslwr4mean, pslwr1spmean, pslwr2spmean, pslwr3spmean, pslwr4spmean, wr1y, wr2y, wr3y, wr4y, wr1yMax, wr2yMax, wr3yMax, wr4yMax, wr1yMin, wr2yMin, wr3yMin, wr4yMin, n.leadtimes, num.leadtimes, file=paste0(workdir,"/",fields.name,"_",my.period[p],"_leadmonth_",lead.month,"_psl.RData")) + + rm(pslwr1mean,pslwr2mean,pslwr3mean,pslwr4mean) + rm(pslwr1,pslwr2,pslwr3,pslwr4) + gc() + + } + + # for the monthly system, the time order is always: year1day1, year2day1, ... , yearNday1, year1day2, year2day2, ..., yearNday2, etc.: + if(fields.name == ECMWF_monthly.name) { + if(fields.name == ECMWF_monthly.name){ + my.startdates.period <- months.period(forecast.year,mes,day,p) + + #if(p == 13) my.startdates.period <- my.startdates.period[-2] # remove the second startdate because it is broken + + days.period <- period.length <- list() + for(startdate in my.startdates.period){ + days.period[[p]] <- c(days.period[[p]], (1+(startdate-1)*(year.end-year.start+1)):(startdate*(year.end-year.start+1))) + } + period.length[[p]] <- length(my.startdates.period) # number of Thusdays in the period + } + + wr1y <- wr2y <- wr3y <-wr4y <- c() + wr1y[y-year.start+1] <- length(which(cluster.sequence[seq((y-year.start+1),length(cluster.sequence), n.years)] == 1)) + wr2y[y-year.start+1] <- length(which(cluster.sequence[seq((y-year.start+1),length(cluster.sequence), n.years)] == 2)) + wr3y[y-year.start+1] <- length(which(cluster.sequence[seq((y-year.start+1),length(cluster.sequence), n.years)] == 3)) + wr4y[y-year.start+1] <- length(which(cluster.sequence[seq((y-year.start+1),length(cluster.sequence), n.years)] == 4)) + } + + cat("Finished!\n") +} # close the for loop on 'p' + + + + + + + + + + + + + + + + + + + + + diff --git a/old/weather_regimes_v37.R~ b/old/weather_regimes_v37.R~ new file mode 100644 index 0000000000000000000000000000000000000000..5c0e55696eb416390c14000264fa2fc80f73f3d4 --- /dev/null +++ b/old/weather_regimes_v37.R~ @@ -0,0 +1,803 @@ + +# Creation: 6/2016 +# Author: Nicola Cortesi +# Aim: To compute the four Euro-Atlantic weather regimes from a reanalysis or from a forecast system +# +# I/O: input must be in a format compatible with Load(); 1 output .RData file is created in the 'workdir' folder. +# You can also run this script from terminal with: Rscript ~/scripts/weather_regimes_maps.R +# +# Assumption: its output are needed by the scripts weather_regimes_impact.R and weather_regimes.R +# +# Branch: weather_regimes + +rm(list=ls()) +library(s2dverification) # for the function Load() +library(abind) +library(reshape2) # after each undate of s2dverification, it's better to remove and install this package again because sometimes it gets broken! + +SMP <- FALSE # if TRUE, the script is assumed to run on the SMP Machine + +if(SMP){ + source('/gpfs/projects/bsc32/bsc32842/Rfunctions.R') + workdir <- "/gpfs/projects/bsc32/bsc32842/RESILIENCE" # working dir where output files will be generated +} else { + source('/home/Earth/ncortesi/scripts/Rfunctions.R') + workdir <- "/scratch/Earth/ncortesi/RESILIENCE/Regimes" # working dir where output files will be generated +} + +# module load NCO # : load it from the terminal before running this script interactively in the SMP machine, if you didn't load it in the .bashrc + +# available reanalysis for the geopotential (g500) or the geopotential height (z500 = g500/9.81) and for var data: +ERAint <- '/esnas/reconstructions/ecmwf/eraint/$STORE_FREQ$_mean/$VAR_NAME$_f6h/$VAR_NAME$_$YEAR$$MONTH$.nc' +#ERAint <- '/esarchive/old-files/recon_ecmwf_erainterim/$STORE_FREQ$_mean/$VAR_NAME$_f6h/$VAR_NAME$_$YEAR$$MONTH$.nc' +ERAint.name <- "ERA-Interim" + +JRA55 <- '/esnas/recon/jma/jra55/$STORE_FREQ$_mean/$VAR_NAME$_f6h/$VAR_NAME$_$YEAR$$MONTH$.nc' +JRA55.name <- "JRA-55" + +rean <- ERAint #JRA55 #ERAint # choose one of the two above reanalysis from where to load the input psl data + +# available forecast systems for the psl and var data: +#ECMWF_S4 <- list(path = paste0('/esnas/exp/ECMWF/seasonal/0001/s004/m001/$STORE_FREQ$_mean/$VAR_NAME$_f6h/$VAR_NAME$_$START_DATE$.nc')) +#ECMWF_S4 <- '/esnas/exp/ECMWF/seasonal/0001/s004/m001/$STORE_FREQ$_mean/psl_f6h/psl_$START_DATE$.nc' +ECMWF_S4 <- '/esnas/exp/ecmwf/system4_m1/$STORE_FREQ$_mean/$VAR_NAME$_f6h/$VAR_NAME$_$START_DATE$.nc' +ECMWF_S4.name <- "ECMWF-S4" +member.name <- "ensemble" # name of the dimension with the ensemble members inside the netCDF files of S4 + +ECMWF_monthly <- '/esnas/exp/ECMWF/monthly/ensforhc/6hourly/$VAR_NAME$/2014$MONTH$$DAY$00/$VAR_NAME$_$START_DATE$00.nc' +ECMWF_monthly.name <- "ECMWF-Monthly" + +# choose a forecast system: +forecast <- ECMWF_S4 + +# put 'rean' to load pressure fields and var from reanalisis, or put 'forecast' to load them from forecast systems: +fields <- rean #forecast + +psl <- "psl" #"g500" # pressure variable to use from the chosen reanalysis +psl.name <- "SLP" # "Z500" # the name to show in the maps + +year.start <- 1979 #1981 #1982 #1981 #1994 #1979 #1981 +year.end <- 2016 #2013 #2015 + +missing.forecasts <- FALSE # set it to TRUE if there can be missing hindcasts files in the forecast psl data, so it will load only the available years and deals with NA +res <- 0.75 # set the resolution you want to interpolate the psl and var data when loading it (i.e: set to 0.75 to use the same res of ERA-Interim) + # interpolation method is BILINEAR. + +LOESS <- TRUE # To apply the LOESS filter or not to the climatology +running.cluster <- FALSE # add to the clustering also the daily SLP data of the two closer months to the month to use (only for reanalysis) +lat.weighting <- FALSE # set it to true to weight psl data on latitude before applying the cluster analysis +sequences <-FALSE # set it to TRUE if you want to select only days beloning to sequences of at least 5 consecutive days belonging to the same regime. + +PCA <- FALSE # set it to TRUE if you want to apply the PCA before the k-means cluster analysis, FALSE otherwise +variance.explained <- 0.8 # the user can set here the minimum % of variance explained by the PCs (typically 0.8 which is equal to 80%) + +# Coordinates of the box enclosing the study region (the Northern Atlantic in this case): +lat.max <- 81 #81 #80 #70 +lat.min <- 27 #30 #20 #30 +lon.max <- 45 #36 #30 #40 +lon.min <- 274.5 #274.5 #270 #280 # put a positive number here because the geopotential has only positive vaues of longitud! + +# Only for reanalysis: +WR.period <- 11 # 13:16 #1:12 #13:16 # 1-12: Jan-Dec; 13-16: Winter-Spring-Summer-Autumn [bypassed by the optional argument of the script] + +# Only for Seasonal forecasts: +startM <- 1 # start month (1=January, 12=December) [bypassed by the optional arguments of the script] +leadM <- 0 # select only the data of one lead month: [bypassed by the optional arguments of the script] +n.members <- 15 # number of members of the forecast system to load +n.leadtimes <- 216 # number of leadtimes of the forecast system to load + +# Only for Monthly forecasts: +#leadtime <- 1:7 #5:11 #1 # if fields=forecast, set the forecast time (in days) you want to compute the weather regimes. +#startdate.shift <- 1 # if leadtime=5:11, it's better to shift all startdates of the season 1 week in the past, to fit the exact season of obs.data + # if leadtime=12:18, it's better to shift all startdates of the season 2 weeks in the past, and so on. +#forecast.year <- year.end+1 # if fields=forecast, set the forecast year (it is usually the year after year.end) +#mes=1 # if fields=forecast, set the month of the first startdate of the forecast year +#day=2 # if fields=forecast, set the day of the first startdate of the forecast year + +############################################################################################################################# +rean.name <- unname(ifelse(rean == ERAint, ERAint.name, JRA55.name)) +forecast.name <- unname(ifelse(forecast == ECMWF_S4, ECMWF_S4.name, ECMWF_monthly.name)) +fields.name <- unname(ifelse(fields == rean, rean.name, forecast.name)) +n.years <- year.end - year.start + 1 + +# in case the script is run with one argument, it is assumed a reanalysis is being used and the argument becomes the month we want to perform the clustering; +# in case the script is run with two arguments, it is assumed a forecast is used and the first argument represents the startmonth and the second one the leadmonth. +# in case the script is run with no arguments, the values of the variables inside the script are used: +script.arg <- as.integer(commandArgs(TRUE)) + +if(length(script.arg) == 0){ + start.month <- startM + #WR.period <- start.month + if(fields.name == forecast.name) lead.month <- leadM +} + +# in case the script is run with 1 argument, it is assumed you are using a reanalysis: +if(length(script.arg) == 1){ + fields <- rean + fields.name <- rean.name + WR.period <- script.arg[1] +} + +if(length(script.arg) >= 2){ + fields <- forecast + fields.name <- forecast.name + start.month <- script.arg[1] + lead.month <- script.arg[2] + WR.period <- start.month +} + +if(length(script.arg) == 3){ # in this case, we are running the script in the SMP machine and we need to override the variables below: + source('/gpfs/projects/bsc32/bsc32842/Rfunctions.R') # for the calendar function + workdir <- "/gpfs/projects/bsc32/bsc32842/RESILIENCE" # working dir +} + +if(length(script.arg) >= 2) {lead.comment <- paste0("Lead month: ", lead.month)} else {lead.comment <- ""} +cat(paste0("Field: ", fields.name, " Period: ", WR.period, " ", lead.comment, "\n")) + +if(lat.min >= lat.max) stop("lat.min cannot be greater than lat.max") + +#days.period <- n.days.period <- period.length <- list() +#for (pp in 1:17){ +# days.period[[pp]] <- NA +# for(y in year.start:year.end) days.period[[pp]] <- c(days.period[[pp]], n.days.in.a.future.year(year.start, y) + pos.period(y,pp)) +# days.period[[pp]] <- days.period[[pp]][-1] # remove the NA at the begin that was introduced only to be able to execute the above command +# # number of days belonging to that period from year.start to year.end: +# n.days.period[[pp]] <- length(days.period[[pp]]) +# # Number of days belonging to that period in a single year: +# period.length[[pp]] <- n.days.in.a.period(pp,1999) #+ ifelse(period==13 | period==2, 1, 0) # using year 1999 introduce a small error in winter season length because of #bisestile years +#} + +# Create a regular lat/lon grid to interpolate the data to the chosen res: (Notice that the regular grid is always centered at lat=0 and lon=0!) +n.lon.grid <- 360/res +n.lat.grid <- (180/res)+1 +my.grid <- paste0('r',n.lon.grid,'x',n.lat.grid) +#domain<-list() +#domain$lon <- seq(0,359.999999999,res) +#domain$lat <- c(rev(seq(0,90,res)),seq(res[1],-90,-res)) + +cat("Loading lat/lon. Please wait...\n") +# load only 1 day of pressure data to detect the minimum and maximum lat and lon values which identify only the North Atlantic area: +if(fields.name == rean.name) domain <- Load(var = psl, exp = NULL, obs = list(list(path=fields)), '19990101', storefreq = 'daily', leadtimemax = 1, output = 'lonlat', nprocs=1) + +### Real test: +### lat <- +### domain <- Load(var = "psl", exp = list(list(path="/esnas/exp/ecmwf/system4_m1/$STORE_FREQ$_mean/$VAR_NAME$_f6h/$VAR_NAME$_$START_DATE$.nc")), obs=NULL, sdates=paste0(1982:2011,'0101'), storefreq = 'daily', leadtimemax=n.leadtimes, nmember=n.members, output = 'lonlat', latmin = lat[chunk], latmax = lat[chunk+2], nprocs=1) + +### if (fields.name="pippo"){ + +# in the case of S4, we also interpolate it to the same resolution of the reanalysis: +# (notice that if the S4 grid has the same grid of the chosen grid (typically ERA-Interim), Load() leaves the S4 grid unchanged) +if(fields.name == ECMWF_S4.name) domain <- Load(var = psl, exp = list(list(path=fields)), obs=NULL, sdates=paste0(year.start,'0101'), storefreq = 'daily', dimnames=list(member=member.name), leadtimemax=1, nmember=1, output = 'lonlat', grid=my.grid, method='bilinear', nprocs=1) + +#domain <- Load(var = "psl", exp = list(list(path="/esnas/exp/ecmwf/system4_m1/$STORE_FREQ$_mean/$VAR_NAME$_f6h/$VAR_NAME$_$START_DATE$.nc")), obs=NULL, sdates='19820101', storefreq = 'daily', leadtimemax=1, nmember=1, output = 'lonlat', grid='r480x241', lonmax=100) + +if(fields.name == ECMWF_monthly.name) domain <- Load(var = psl, exp = NULL, obs = list(list(path=fields)), paste0(year.start,'0102'), storefreq = 'daily', leadtimemax = 1, output = 'lonlat', nprocs=1) + +n.lon <- length(domain$lon) +n.lat <- length(domain$lat) + +pos.lat.max <- which(lat.max - round(domain$lat,4) >= 0)[1] # select the first latitude of the domain below the maximum latitude +pos.lat.min <- tail(which(round(domain$lat,4) - lat.min >= 0),1) # select the last latitude of the domain over the minumum latitude +pos.lon.max <- tail(which(lon.max - round(domain$lon,4) >= 0),1) +pos.lon.min <- which(round(domain$lon,4) - lon.min >= 0)[1] + +pos.lat <- if(pos.lat.min < pos.lat.max) {pos.lat.min:pos.lat.max} else {pos.lat.max:pos.lat.min} +pos.lon <- c(1:pos.lon.max,pos.lon.min:length(domain$lon)) # beware that it only consider the case where the study area cross the meridian of Greenwhich! +n.pos.lat <- length(pos.lat) +n.pos.lon <- length(pos.lon) + +lat <- domain$lat[pos.lat] # lat values of chosen area only (it is smaller than the whole spatial domain loaded by the data) +#if (lat[2] < lat[1]) lat <- rev(lat) # reverse lat values if they are in decreasing order +lon <- domain$lon[pos.lon] # lon values of chosen area only + +#lat.min.area <- domain$lat[pos.lat.min] # min and max lat/lon of the chosen area only (same as lat.max, but its values correspond to actual values in the lat vector) +#lat.max.area <- domain$lat[pos.lat.max] +#lon.min.area <- domain$lon[pos.lon.min] +#lon.max.area <- domain$lon[pos.lon.max] + +#rm(domain) + +# Load psl data (interpolating it to the same reanlaysis grid, if necessary): +cat("Loading data. Please wait...\n") + +if(fields.name == rean.name){ # Load daily psl data of all years in the reanalysis case: + psleuFull <-array(NA,c(1,1,n.years,365, n.pos.lat, n.pos.lon)) + + #for (y in year.start:year.end){ + # var <- Load(var = psl, exp = NULL, obs = list(list(path=fields)), paste0(y,'0101'), storefreq = 'daily', leadtimemax = n.days.in.a.year(y), output = 'lonlat', + # latmin = lat.min, latmax = lat.max, lonmin = lon.min, lonmax = lon.max, nprocs=1) + # psleuFull[seq.days.in.a.future.year(year.start, y),,] <- var$obs + # rm(var) + # gc() + #} + + psleuFull366 <- Load(var = psl, exp = NULL, obs = list(list(path=fields)), paste0(year.start:year.end,'0101'), storefreq = 'daily', leadtimemax = 366, output = 'lonlat', latmin = lat.min, latmax = lat.max, lonmin = lon.min, lonmax = lon.max)$obs + + # remove bisestile days to have all arrays with the same dimensions: + cat("Removing bisestile days. Please wait...\n") + psleuFull[,,,,,] <- psleuFull366[,,,1:365,,] + + for(y in year.start:year.end){ + y2 <- y - year.start + 1 + if(n.days.in.a.year(y) == 366) psleuFull[,,y2,60:365,,] <- psleuFull366[,,y2,61:366,,] # take the march to december period removing the 29th of February + } + + rm(psleuFull366) + gc() + + # convert psl in daily anomalies with the LOESS filter: + cat("Calculating anomalies. Please wait...\n") + pslPeriodClim <- apply(psleuFull, c(1,2,4,5,6), mean, na.rm=T) + + if(LOESS == TRUE){ + pslPeriodClimLoess <- pslPeriodClim + for(i in n.pos.lat){ + for(j in n.pos.lon){ + my.data <- data.frame(ens.mean=pslPeriodClim[1,1,,i,j], day=1:365) + my.loess <- loess(ens.mean ~ day, my.data, span=0.35) + pslPeriodClimLoess[1,1,,i,j] <- predict(my.loess) + } + } + + rm(pslPeriodClim) + gc() + + pslPeriodClim2 <- InsertDim(pslPeriodClimLoess, 3, n.years) + + } else { + pslPeriodClim2 <- InsertDim(pslPeriodClim, 3, n.years) + + rm(pslPeriodClim) + gc() + } + + ## s4.data <- data.frame(s4=pslPeriodClim[1,], day=1:216) + ## s4.loess <- loess(s4 ~ day, s4.data, span=0.35) + ## s4.pred <- predict(s4.loess) + + psleuFull <- psleuFull - pslPeriodClim2 + rm(pslPeriodClim2) + gc() + +} # close if on fields.name == rean + +if(fields.name == ECMWF_S4.name){ # in the forecast case, we load only the data for 1 start month at time and all the lead times (since each month needs ~3 GB of memory) + if(start.month <= 9) {start.month.char <- paste0("0",as.character(start.month))} else {start.month.char <- start.month} + + fields2 <- gsub("\\$STORE_FREQ\\$","daily",fields) + fields3 <- gsub("\\$VAR_NAME\\$",psl,fields2) + + if(missing.forecasts == TRUE){ + years <- c() + for(y in year.start:year.end){ + fields4 <- gsub("\\$START_DATE\\$",paste0(y, start.month.char,'01'),fields3) + + char.lead <- nchar(system(paste0("ncks -m ",fields4,"| grep -E -i 'psl size' | cut -d '*' -f1"), intern=T)) + # beware, in this way it can only take into account leadtimes from 100 to 999, but it is the only way in the SMP machine: + #num.lead <- as.integer(substr(system(paste0("ncks -m ",fields4,"| grep -E -i 'psl size' | cut -d '*' -f1"), intern=T),char.lead-3,char.lead)) + num.lead <- as.integer(system(paste0("ncks -m ",fields4,"| grep -E -i 'psl size' | cut -d '=' -f2 | cut -d '*' -f1"), intern=T)) # don't work well on the SMP + num.memb <- as.integer(system(paste0("ncks -m ",fields4,"| grep -E -i 'psl size' | cut -d '=' -f2 | cut -d '*' -f2"), intern=T)) + num.lat <- as.integer(system(paste0("ncks -m ",fields4,"| grep -E -i 'psl size' | cut -d '=' -f2 | cut -d '*' -f3"), intern=T)) + num.lon <- as.integer(system(paste0("ncks -m ",fields4,"| grep -E -i 'psl size' | cut -d '=' -f2 | cut -d '*' -f4"), intern=T)) + + #if(num.lead == n.leadtimes && num.memb >= n.members && num.lat == 181 && num.lon == 360) years <- c(years,y) + if(num.lead == n.leadtimes && num.memb >= n.members) years <- c(years,y) + } + + } else { + years <- year.start:year.end + } + + n.years.full <- length(years) + + psleuFull <- Load(var = psl, exp = list(list(path=fields)), obs = NULL, sdates=paste0(years, start.month.char,'01'), dimnames=list(member=member.name), nmember=n.members, leadtimemax=n.leadtimes, storefreq='daily', output = 'lonlat', latmin = lat.min, latmax = lat.max, lonmin = lon.min, lonmax = lon.max, grid=my.grid, method='bilinear')$mod + + if(LOESS == TRUE){ + # convert psl in daily anomalies with the LOESS filter: + cat("Calculating anomalies. Please wait...\n") + pslPeriodClim <- apply(psleuFull, c(1,4,5,6), mean, na.rm=T) + pslPeriodClimLoess <- pslPeriodClim + for(i in n.pos.lat){ + for(j in n.pos.lon){ + my.data <- data.frame(ens.mean=pslPeriodClim[1,,i,j], day=1:n.leadtimes) + my.loess <- loess(ens.mean ~ day, my.data, span=0.35) + pslPeriodClimLoess[1,,i,j] <- predict(my.loess) + } + } + + rm(pslPeriodClim) + gc() + + pslPeriodClim2 <- InsertDim(InsertDim(pslPeriodClimLoess, 2, n.years.full), 2, n.members) + + } else { + + pslPeriodClim2 <- InsertDim(InsertDim(pslPeriodClim, 2, n.years.full), 2, n.members) + rm(pslPeriodClim) + gc() + } + + ## s4.data <- data.frame(s4=pslPeriodClim[1,], day=1:216) + ## s4.loess <- loess(s4 ~ day, s4.data, span=0.35) + ## s4.pred <- predict(s4.loess) + + psleuFull <- psleuFull - pslPeriodClim2 + rm(pslPeriodClim2) + gc() + +} # close if on fields.name=forecasts.name + +if(fields.name == ECMWF_monthly.name){ + # Load 6-hourly psl data in the forecast case: + psleuFull <-array(NA, c(n.sdates*n.years, n.leadtimes, n.pos.lat, n.pos.lon)) # daily order: 19940102 19950102 ... 20130102 19940109 19950109 ... 20130109 ... + n.leadtimes <- length(leadtime) + sdates <- weekly.seq(forecast.year,mes,day) + n.sdates <- length(sdates) + + for (startdate in 1:n.sdates){ + for(lday in leadtime){ + var <- Load(var = psl, exp = NULL, obs = list(list(path=fields)), paste0(year.start:year.end, substr(sdates[startdate],5,6), substr(sdates[startdate],7,8) ), storefreq = 'daily', leadtimemin = lday*4-3, leadtimemax = lday*4, output = 'lonlat', latmin = lat.min, latmax = lat.max, lonmin = lon.min, lonmax = lon.max) + + mean.lday <- apply(var$obs, c(3,5,6), mean, na.rm=T) + rm(var) + gc() + + psleuFull[(1+(startdate-1)*n.years):(startdate*n.years), lday-leadtime[1]+1,,] <- mean.lday + rm(mean.lday) + gc() + } + } +} + +if(psl=="g500") psleuFull <- psleuFull/9.81 # convert geopotential to geopotential height (in m) +if(psl=="psl") psleuFull <- psleuFull/100 # convert MSLP in Pascal to MSLP in hPa +gc() + +#save.image(file=paste0(workdir,"/weather_regimes_",fields.name,"_",year.start,"-",year.end,".RData")) +#load(file=paste0(workdir,"/weather_regimes_",fields.name,"_",year.start,"-",year.end,".RData")) + +# compute the cluster analysis: +my.PCA <- list() +my.cluster <- list() +my.cluster.array <- list() +tot.variance <- list() +n.pcs <- my.cluster <- c() + +#WR.period=1 # for the debug +for(p in WR.period){ + # Select only the data of the month/season we want: + if(fields.name == rean.name){ + + #pslPeriod <- psleuFull[days.period[[p]],,] # select only days in the chosen period (i.e: winter) + if(running.cluster == TRUE && p < 13) { + pslPeriod <- psleuFull[1,1,,pos.month.extended(2001,p),,] # select all days in the period of 3 months centered on the target month p + } else { + pslPeriod <- psleuFull[1,1,,pos.period(2001,p),,] # select only days in the chosen period (i.e: winter) + } + + # weight the pressure fields based on latitude (apply it to anomalies, not to absolute values!): + if(lat.weighting){ + lat.weighted <- cos(lat*pi/180)^0.5 + lat.weighted.array <- InsertDim(InsertDim(InsertDim(lat.weighted,2,n.pos.lon), 1, length(pos.month.extended(2001,p))), 1, n.years) + pslPeriod <- pslPeriod * lat.weighted.array + rm(lat.weighted.array) + gc() + } + + gc() + cat("Preformatting data for clustering. Please wait...\n") + psl.melted <- melt(pslPeriod[,,,, drop=FALSE], varnames=c("Year","Day","Lat","Lon")) + gc() + cat("Preformatting data for clustering. Please wait......\n") + + # this function eats much memory!!! + psl.kmeans <- unname(acast(psl.melted, Year + Day ~ Lat + Lon)) + #gc() + + # select period data from psleuFull to use it as input of the cluster analysis: + #psl.kmeans <- pslPeriod + #dim(psl.kmeans) <- c(n.days.period[[p]], n.pos.lat*n.pos.lon) # convert array in a matrix! + #rm(pslPeriod, pslPeriod.weighted) + } + + # in case of S4 we don't need to select anything because we already loaded only 1 month of data! + if(fields.name == ECMWF_S4.name){ + + # select data only for the startmonth and leadmonth to study: + cat("Extracting data. Please wait...\n") + chosen.month <- start.month + lead.month # find which month we want to select data + if(chosen.month > 12) chosen.month <- chosen.month - 12 + + i=0 + for(y in years){ + i=i+1 + leadtime.min <- 1 + n.days.in.a.monthly.period(start.month, chosen.month, y) - n.days.in.a.month(chosen.month, y) + leadtime.max <- leadtime.min + n.days.in.a.month(chosen.month, y) - 1 + #leadtime.max <- 61 + + if (n.days.in.a.month(chosen.month, y) == 29) leadtime.max <- leadtime.max - 1 # remove the 29th of February to have the same n. of elements for all years + int.leadtimes <- leadtime.min:leadtime.max + num.leadtimes <- leadtime.max - leadtime.min + 1 # number of days in the chosen month (different from 'n.leadtimes',which is the total number of days in the file!) + # by construction it is equal to: n.days.in.a.month(chosen.month, y) + if(y == years[1]) { + pslPeriod <- psleuFull[,,1,int.leadtimes,,,drop=FALSE] + } else { + pslPeriod <- unname(abind(pslPeriod, psleuFull[,,i,int.leadtimes,,,drop=FALSE], along=3)) + } + } + + # weight the pressure fields based on latitude (apply it to anomalies, not to absolute values!): + if(lat.weighting){ + lat.weighted <- cos(lat*pi/180)^0.5 + lat.weighted.array <- InsertDim(InsertDim(InsertDim(lat.weighted,2,n.pos.lon), 1, length(pos.month.extended(2001,p))), 1, n.years) + pslPeriod <- pslPeriod * lat.weighted.array ######### adapt!!!!!!!!!!! + rm(lat.weighted.array) + gc() + } + + # note that the data order in psl.kmeans[,X] is: year1day1, year1day2, ... , year1day31, year2day1, year2day2, ... , year2day28 + gc() + cat("Preformatting data. Please wait...\n") + psl.melted <- melt(pslPeriod[1,,,,,, drop=FALSE], varnames=c("Exp","Member","StartYear","LeadDay","Lat","Lon")) + gc() + cat("Preformatting data. Please wait......\n") + + # this function eats much memory!!! + psl.kmeans <- unname(acast(psl.melted, Member + StartYear + LeadDay ~ Lat + Lon)) + #gc() + + #psl.kmeans <- array(NA,c(dim(pslPeriod))) + #n.rows <- nrow(psl.melted) + #a <- psl.melted$Member + #b <- psl.melted$StartYear + #c <- psl.melted$LeadDay + #d <- psl.melted$Lat + #e <- psl.melted$Lon + #f <- psl.melted$value + + # this function to convert a data.frame in an array is much less memory-eater than acast but a bit slower: + #for(i in 1:n.rows) psl.kmeans[1,a[i],b[i],c[i],d[i],e[i]] <- f[i] + gc() + #rm(pslPeriod); gc() + } + + if(fields.name == ECMWF_monthly.name){ + my.startdates.period <- months.period(forecast.year,mes,day,p) # get which startdates belong to the chosen period (remember that there are no bisestile day left!) + + days.period <- list() # get which days inside psleuFull belong to the chosen period + for(startdate in my.startdates.period){ + days.period[[p]] <- c(days.period[[p]], (1+(startdate-1)*n.years):(startdate*n.years)) + } + + # in winter you must remove the 2nd startdate (20140109) because its data is bad, as you can see with: plot(psl.kmeans[,1:2]) + # if(fields.name == forecast.name && period == 13) psl.kmeans[21:40,] <- NA + } + + + # compute the clustering: + cat("Clustering data. Please wait...\n") + if(PCA){ + my.seq <- seq(1, n.pos.lat*n.pos.lon, 16) # select only 1 point of 9 + pslcut <- psl.kmeans[,my.seq] + + my.PCA <- princomp(pslcut,cor=FALSE) + tot.variance <- head(cumsum(my.PCA$sdev^2/sum(my.PCA$sdev^2)),50) # check how many PCAs to keep basing on the sum of their expl. variance + n.pcs <- head(as.numeric(which(tot.variance > variance.explained)),1) # select only the pcs that explains at least 80% of variance + my.cluster <- kmeans(my.PCA$scores[,1:n.pcs], centers=4, iter.max=100, nstart=30) # 4 is the number of clusters + rm(pslcut) + } else { # 4 is the number of clusters: + + my.cluster <- kmeans(psl.kmeans, centers=4, iter.max=100, nstart=30, trace=FALSE) + + gc() + + #save(my.cluster, file=paste0(workdir,"/",fields.name,"_",my.period[p],"_leadmonth_",lead.month,"_series.RData")) + + #if(fields.name == rean.name){ + #my.cluster[[p]] <- kmeans(psl.kmeans[which(!is.na(psl.kmeans[,1])),], centers=4, iter.max=100, nstart=30, trace=FALSE) + # save the time series output of the cluster analysis: + #save(my.cluster, my.cluster.array, my.PCA, tot.variance, n.pcs, file=paste0(workdir,"/",fields.name,"_",my.period[p],"_series.RData")) + #} + + if(fields.name == ECMWF_S4.name) { + my.cluster.array <- array(my.cluster$cluster, c(num.leadtimes, n.years.full, n.members)) # in reverse order compared to the definition of psl.kmeans + + # save the time series output of the cluster analysis: + #save(my.cluster, my.cluster.array, my.PCA, tot.variance, n.pcs, file=paste0(workdir,"/",fields.name,"_",my.period[p],"_leadmonth_",lead.month,"_series.RData")) + + #plot(SMA(my.cluster$centers[1,],201),type="l",col="gold",ylim=c(-20,20));lines(SMA(my.cluster$centers[2,],201),type="l",col="red"); lines(SMA(my.cluster$centers[3,],201),type="l",col="green");lines(SMA(my.cluster$centers[4,],201),type="l",col="blue");lines(SMA(psl.kmeans[29,],201),type="l",col="gray");lines(rep(0,14410),type="l",col="black",lty=2) + } + } + + rm(psl.kmeans) + gc() + +#} # close for on 'p' + +#save.image(file=paste0(workdir,"/weather_regimes_",fields.name,"_",year.start,"-",year.end,".RData")) +#load(file=paste0(workdir,"/weather_regimes_",fields.name,"_",year.start,"-",year.end,".RData")) + +#my.grid<-paste0('r',n.lon,'x',n.lat) +#coord <- Load(var = 'sfcWind', exp = list(ECMWF_monthly), obs = NULL, sdates=paste0(2013,'0102'), leadtimemin = 1, leadtimemax=1, output = 'lonlat', nprocs=1, grid=my.grid, method='bilinear') + +# measure regime anomalies: + +#for(p in WR.period){ # 1-12: Jan-Dec; 13-16: Winter-Spring-Summer-Autumn; 17: Year + + if(fields.name == rean.name){ + + if(running.cluster == TRUE && p < 13){ # select only the days of the 3-months cluster series inside the target month p + n.days.period <- length(pos.month.extended(2001,p)) # total number of days in the three months + + days.month <- days.month.new <- days.month.full <- c() + days.month <- which(pos.month.extended(2001,p) == pos.month(2001,p)[1])-1 + 1:length(pos.month(2001,p)) + + for(y in 1:n.years){ + days.month.new <- days.month + n.days.period*(y-1) + days.month.full <- c(days.month.full, days.month.new) + #print(days.month.new) + #print(days.month) + } + + cluster.sequence <- my.cluster$cluster[days.month.full] # select only the days of the cluster series inside the target month p + + } else { + cluster.sequence <- my.cluster$cluster + } + + if(sequences){ # it works only for rean data!!! + # for each of the 4 clusters, remove the days not belonging to sequences of at least 5 days: + # warning: first 4 days and last 4 days are not take into account y the algorithm and are treated as not belonging to any sequence (sequ=FALSE) + wrdiff <- diff(my.cluster$cluster) + + sequ <- rep(FALSE, n.days.period[[p]]) + for(i in 5:(n.days.period[[p]]-5)){ + sequ[i] <- TRUE + if(wrdiff[i] != 0 & wrdiff[i+1] != 0) sequ[i] = FALSE + if(wrdiff[i] != 0 & wrdiff[i+2] != 0) sequ[i] = FALSE + if(wrdiff[i] != 0 & wrdiff[i+3] != 0) sequ[i] = FALSE + if(wrdiff[i] != 0 & wrdiff[i+4] != 0) sequ[i] = FALSE + if(wrdiff[i-1] != 0 & wrdiff[i] != 0) sequ[i] = FALSE + if(wrdiff[i-1] != 0 & wrdiff[i+1] != 0) sequ[i] = FALSE + if(wrdiff[i-1] != 0 & wrdiff[i+2] != 0) sequ[i] = FALSE + if(wrdiff[i-1] != 0 & wrdiff[i+3] != 0) sequ[i] = FALSE + if(wrdiff[i-2] != 0 & wrdiff[i] != 0) sequ[i] = FALSE + if(wrdiff[i-2] != 0 & wrdiff[i+1] != 0) sequ[i] = FALSE + if(wrdiff[i-2] != 0 & wrdiff[i+2] != 0) sequ[i] = FALSE + if(wrdiff[i-3] != 0 & wrdiff[i] != 0) sequ[i] = FALSE + if(wrdiff[i-3] != 0 & wrdiff[i+1] != 0) sequ[i] = FALSE + if(wrdiff[i-4] != 0 & wrdiff[i] != 0) sequ[i] = FALSE + } # close for loop on i + + # sequence of daily cluster numbers without the days that doesn't belong to sequences of 5 or more days: + cluster.sequence <- my.cluster$cluster * sequ + rm(wrdiff, sequ) + } + + # Replace the days belonging to a regime with the days belonging to a sequence of at least 5 days of that regime: + wr1 <- which(cluster.sequence == 1) + wr2 <- which(cluster.sequence == 2) + wr3 <- which(cluster.sequence == 3) + wr4 <- which(cluster.sequence == 4) + + # yearly frequency of each regime: + wr1y <- wr2y <- wr3y <-wr4y <- c() + np <- n.days.in.a.period(p,2001) + + for(y in year.start:year.end){ + # for both reanalysis and S4, the time order is always: year1day1, year1day2, ..., year1day31, year2day1, year2day2, ..., year2day28, etc, + wr1y[y-year.start+1] <- length(which(cluster.sequence[(y-year.start)*np + (1:np)] == 1)) + wr2y[y-year.start+1] <- length(which(cluster.sequence[(y-year.start)*np + (1:np)] == 2)) + wr3y[y-year.start+1] <- length(which(cluster.sequence[(y-year.start)*np + (1:np)] == 3)) + wr4y[y-year.start+1] <- length(which(cluster.sequence[(y-year.start)*np + (1:np)] == 4)) + } + + # convert to frequencies in %: + wr1y <- wr1y/np + wr2y <- wr2y/np + wr3y <- wr3y/np + wr4y <- wr4y/np + + gc() + + pslmat <- unname(acast(psl.melted, Year + Day ~ Lat ~ Lon)) + + if(running.cluster == TRUE){ + pslmat.new <- pslmat + pslmat <- pslmat.new[days.month.full,,] + gc() + } + + pslwr1 <- pslmat[wr1,,, drop=F] + pslwr2 <- pslmat[wr2,,, drop=F] + pslwr3 <- pslmat[wr3,,, drop=F] + pslwr4 <- pslmat[wr4,,, drop=F] + + pslwr1mean <- apply(pslwr1, c(2,3), mean, na.rm=T) + pslwr2mean <- apply(pslwr2, c(2,3), mean, na.rm=T) + pslwr3mean <- apply(pslwr3, c(2,3), mean, na.rm=T) + pslwr4mean <- apply(pslwr4, c(2,3), mean, na.rm=T) + + # spatial mean for each day to save for the meaure of the FairRPSS: + pslwr1spmean <- apply(pslwr1, 1, mean, na.rm=T) + pslwr2spmean <- apply(pslwr2, 1, mean, na.rm=T) + pslwr3spmean <- apply(pslwr3, 1, mean, na.rm=T) + pslwr4spmean <- apply(pslwr4, 1, mean, na.rm=T) + + # save all the data necessary to redraw the graphs when we know the right regime: + save(my.cluster, lon, lon.max, lon.min, lat, lat.max, lat.min, pos.lat, pos.lon, n.pos.lat, n.pos.lon, psl, psl.name, my.period, p, workdir, rean.name, fields.name, year.start, year.end, n.years, WR.period, pslwr1mean, pslwr2mean, pslwr3mean, pslwr4mean, pslwr1spmean, pslwr2spmean, pslwr3spmean, pslwr4spmean, wr1y,wr2y,wr3y,wr4y,file=paste0(workdir,"/",fields.name,"_",my.period[p],"_psl.RData")) + + # just to save the plots of the ERA-Interim monthly regime anomalies with the running cluster now instead than loading the next script: + EU <- c(1:which(lon >= lon.max)[1], (which(lon >= 337)[1]:length(lon))) # restrict area to continental europe only + if(psl == "g500"){ + my.brks <- c(-300,seq(-200,200,10),300) # % Mean anomaly of a WR + my.brks2 <- c(-300,seq(-200,200,10),300) # % Mean anomaly of a WR for the contour lines + values.to.plot <- c(-200, -100, 0, 100, 200) # values we want to show in the labels + } + + if(psl == "psl"){ + my.brks <- c(seq(-19,-1,2),0,seq(1,19,2)) # % Mean anomaly of a WR + my.brks2 <- my.brks #c(seq(-20,20,2)) # % Mean anomaly of a WR for the contour lines + values.to.plot <- my.brks #c(-17,-15,-13,-11,-9,-7,-5,-3,-1,0,1,3,5,7,9,11,13,15,17) + } + my.cols <- colorRampPalette(rev(brewer.pal(11,"RdBu")))(length(my.brks)-1) # blue--white--red colors + my.cols[floor(length(my.cols)/2)] <- my.cols[floor(length(my.cols)/2)+1] <- "white" + + map1 <- pslwr1mean + map2 <- pslwr2mean + map3 <- pslwr3mean + map4 <- pslwr4mean + png(filename=paste0(workdir,"/",fields.name,"_",my.period[p],"_cluster1.png"),width=1000,height=1200) + PlotEquiMap2(rescale(map1,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map1), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, xlabel.dist=1.5, contours.lty="F1FF1F") + dev.off() + png(filename=paste0(workdir,"/",fields.name,"_",my.period[p],"_cluster2.png"),width=1000,height=1200) + PlotEquiMap2(rescale(map2,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map2), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F") + dev.off() + png(filename=paste0(workdir,"/",fields.name,"_",my.period[p],"_cluster3.png"),width=1000,height=1200) + PlotEquiMap2(rescale(map3,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map3), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F") + dev.off() + png(filename=paste0(workdir,"/",fields.name,"_",my.period[p],"_cluster4.png"),width=1000,height=1200) + PlotEquiMap2(rescale(map4,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map4), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F") + dev.off() + + + + rm(pslwr1mean,pslwr2mean,pslwr3mean,pslwr4mean) + rm(pslwr1,pslwr2,pslwr3,pslwr4) + gc() + + } + + if(fields.name == ECMWF_S4.name){ + cat("Computing regime anomalies and frequencies. Please wait...\n") + + #load(file=paste0(workdir,"/",fields.name,"_",my.period[p],"_leadmonth_",lead.month,"_ClusterData.RData")) + + cluster.sequence <- my.cluster$cluster #as.vector(my.cluster.array) + + wr1 <- which(cluster.sequence == 1) + wr2 <- which(cluster.sequence == 2) + wr3 <- which(cluster.sequence == 3) + wr4 <- which(cluster.sequence == 4) + + wr1y <- apply(my.cluster.array == 1, 2, sum) + wr2y <- apply(my.cluster.array == 2, 2, sum) + wr3y <- apply(my.cluster.array == 3, 2, sum) + wr4y <- apply(my.cluster.array == 4, 2, sum) + + # convert to frequencies in %: + wr1y <- wr1y/(num.leadtimes*n.members) # in this case, n.leadtimes is the number of days of one month of the cluser time series + wr2y <- wr2y/(num.leadtimes*n.members) + wr3y <- wr3y/(num.leadtimes*n.members) + wr4y <- wr4y/(num.leadtimes*n.members) + + # same as above, but to measure the maximum frequence between all the members: + wr1yMax <- apply(apply(my.cluster.array == 1, c(2,3), sum),1,max) + wr2yMax <- apply(apply(my.cluster.array == 2, c(2,3), sum),1,max) + wr3yMax <- apply(apply(my.cluster.array == 3, c(2,3), sum),1,max) + wr4yMax <- apply(apply(my.cluster.array == 4, c(2,3), sum),1,max) + + wr1yMax <- wr1yMax/num.leadtimes + wr2yMax <- wr2yMax/num.leadtimes + wr3yMax <- wr3yMax/num.leadtimes + wr4yMax <- wr4yMax/num.leadtimes + + wr1yMin <- apply(apply(my.cluster.array == 1, c(2,3), sum),1,min) + wr2yMin <- apply(apply(my.cluster.array == 2, c(2,3), sum),1,min) + wr3yMin <- apply(apply(my.cluster.array == 3, c(2,3), sum),1,min) + wr4yMin <- apply(apply(my.cluster.array == 4, c(2,3), sum),1,min) + + wr1yMin <- wr1yMin/num.leadtimes + wr2yMin <- wr2yMin/num.leadtimes + wr3yMin <- wr3yMin/num.leadtimes + wr4yMin <- wr4yMin/num.leadtimes + + cat("Computing regime anomalies and frequencies. Please wait......\n") + + # in this case, must use psl.melted instead of psleuFull. Both are already in anomalies: + # (psl.melted depends on the startdate and leadtime, psleuFull no) + gc() + pslmat <- unname(acast(psl.melted, Member + StartYear + LeadDay ~ Lat ~ Lon)) + #pslmat <- unname(acast(melt(pslPeriod[1,1:2,,,,, drop=FALSE],varnames=c("Exp","Member","StartYear","LeadDay","Lat","Lon")), Member ~ StartYear + LeadDay ~ Lat ~ Lon)) + gc() + + #for(a in 1:2){ + # for(b in 1:3){ + # for(c in 1:4){ + # pslmat[1, a + (b-1)*a + (c-1)*a*b,,] <- pslPeriod[1,a,b,c,,] + # } + # } + #} + + pslwr1 <- pslmat[wr1,,, drop=F] + pslwr2 <- pslmat[wr2,,, drop=F] + pslwr3 <- pslmat[wr3,,, drop=F] + pslwr4 <- pslmat[wr4,,, drop=F] + + pslwr1mean <- apply(pslwr1, c(2,3), mean, na.rm=T) + pslwr2mean <- apply(pslwr2, c(2,3), mean, na.rm=T) + pslwr3mean <- apply(pslwr3, c(2,3), mean, na.rm=T) + pslwr4mean <- apply(pslwr4, c(2,3), mean, na.rm=T) + + # spatial mean for each day to save for the meaure of the FairRPSS: + pslwr1spmean <- apply(pslwr1, 1, mean, na.rm=T) + pslwr2spmean <- apply(pslwr2, 1, mean, na.rm=T) + pslwr3spmean <- apply(pslwr3, 1, mean, na.rm=T) + pslwr4spmean <- apply(pslwr4, 1, mean, na.rm=T) + + rm(pslmat) + gc() + + # save all the data necessary to draw the graphs (but not the impact maps) + save(my.cluster, my.cluster.array, lon, lon.max, lat, pos.lat, pos.lon, n.pos.lat, n.pos.lon, psl, psl.name, my.period, p, workdir,rean.name,fields.name, year.start,year.end, years, n.years, n.years.full, WR.period, pslwr1mean, pslwr2mean, pslwr3mean, pslwr4mean, pslwr1spmean, pslwr2spmean, pslwr3spmean, pslwr4spmean, wr1y, wr2y, wr3y, wr4y, wr1yMax, wr2yMax, wr3yMax, wr4yMax, wr1yMin, wr2yMin, wr3yMin, wr4yMin, n.leadtimes, num.leadtimes, file=paste0(workdir,"/",fields.name,"_",my.period[p],"_leadmonth_",lead.month,"_psl.RData")) + + rm(pslwr1mean,pslwr2mean,pslwr3mean,pslwr4mean) + rm(pslwr1,pslwr2,pslwr3,pslwr4) + gc() + + } + + # for the monthly system, the time order is always: year1day1, year2day1, ... , yearNday1, year1day2, year2day2, ..., yearNday2, etc.: + if(fields.name == ECMWF_monthly.name) { + if(fields.name == ECMWF_monthly.name){ + my.startdates.period <- months.period(forecast.year,mes,day,p) + + #if(p == 13) my.startdates.period <- my.startdates.period[-2] # remove the second startdate because it is broken + + days.period <- period.length <- list() + for(startdate in my.startdates.period){ + days.period[[p]] <- c(days.period[[p]], (1+(startdate-1)*(year.end-year.start+1)):(startdate*(year.end-year.start+1))) + } + period.length[[p]] <- length(my.startdates.period) # number of Thusdays in the period + } + + wr1y <- wr2y <- wr3y <-wr4y <- c() + wr1y[y-year.start+1] <- length(which(cluster.sequence[seq((y-year.start+1),length(cluster.sequence), n.years)] == 1)) + wr2y[y-year.start+1] <- length(which(cluster.sequence[seq((y-year.start+1),length(cluster.sequence), n.years)] == 2)) + wr3y[y-year.start+1] <- length(which(cluster.sequence[seq((y-year.start+1),length(cluster.sequence), n.years)] == 3)) + wr4y[y-year.start+1] <- length(which(cluster.sequence[seq((y-year.start+1),length(cluster.sequence), n.years)] == 4)) + } + + cat("Finished!\n") +} # close the for loop on 'p' + + + + + + + + + + + + + + + + + + + + + diff --git a/old/weather_regimes_v38.R b/old/weather_regimes_v38.R new file mode 100644 index 0000000000000000000000000000000000000000..6f23a697c6aa3c756111e4ea96fa5015b153abf1 --- /dev/null +++ b/old/weather_regimes_v38.R @@ -0,0 +1,923 @@ + +# Creation: 6/2016 +# Author: Nicola Cortesi +# Aim: To compute the four Euro-Atlantic weather regimes from a reanalysis or from a forecast system +# +# I/O: input must be in a format compatible with Load(); 1 output .RData file is created in the 'workdir' folder. +# You can also run this script from terminal with: Rscript ~/scripts/weather_regimes_maps.R +# +# Assumption: its output are needed by the scripts weather_regimes_impact.R and weather_regimes.R +# +# Branch: weather_regimes + +rm(list=ls()) +library(s2dverification) # for the function Load() +library(abind) +library(reshape2) # after each undate of s2dverification, it's better to remove and install this package again because sometimes it gets broken! + +SMP <- FALSE # if TRUE, the script is assumed to run on the SMP Machine + + +if(SMP){ + source('/gpfs/projects/bsc32/bsc32842/Rfunctions.R') + workdir <- "/gpfs/projects/bsc32/bsc32842/RESILIENCE" # working dir where output files will be generated +} else { + source('/home/Earth/ncortesi/scripts/Rfunctions.R') + workdir <- "/scratch/Earth/ncortesi/RESILIENCE/Regimes" # working dir where output files will be generated +} + +# module load NCO # : load it from the terminal before running this script interactively in the SMP machine, if you didn't load it in the .bashrc + +# available reanalysis for the geopotential (g500) or the geopotential height (z500 = g500/9.81) and for var data: +ERAint <- '/esnas/reconstructions/ecmwf/eraint/$STORE_FREQ$_mean/$VAR_NAME$_f6h/$VAR_NAME$_$YEAR$$MONTH$.nc' +#ERAint <- '/esarchive/old-files/recon_ecmwf_erainterim/$STORE_FREQ$_mean/$VAR_NAME$_f6h/$VAR_NAME$_$YEAR$$MONTH$.nc' +ERAint.name <- "ERA-Interim" + +JRA55 <- '/esnas/recon/jma/jra55/$STORE_FREQ$_mean/$VAR_NAME$_f6h/$VAR_NAME$_$YEAR$$MONTH$.nc' +JRA55.name <- "JRA-55" + +rean <- ERAint #JRA55 #ERAint # choose one of the two above reanalysis from where to load the input psl data + +# available forecast systems for the psl and var data: +#ECMWF_S4 <- list(path = paste0('/esnas/exp/ECMWF/seasonal/0001/s004/m001/$STORE_FREQ$_mean/$VAR_NAME$_f6h/$VAR_NAME$_$START_DATE$.nc')) +#ECMWF_S4 <- '/esnas/exp/ECMWF/seasonal/0001/s004/m001/$STORE_FREQ$_mean/psl_f6h/psl_$START_DATE$.nc' +ECMWF_S4 <- '/esnas/exp/ecmwf/system4_m1/$STORE_FREQ$_mean/$VAR_NAME$_f6h/$VAR_NAME$_$START_DATE$.nc' +ECMWF_S4.name <- "ECMWF-S4" +member.name <- "ensemble" # name of the dimension with the ensemble members inside the netCDF files of S4 + +ECMWF_monthly <- '/esnas/exp/ECMWF/monthly/ensforhc/6hourly/$VAR_NAME$/2014$MONTH$$DAY$00/$VAR_NAME$_$START_DATE$00.nc' +ECMWF_monthly.name <- "ECMWF-Monthly" + +# choose a forecast system: +forecast <- ECMWF_S4 + +# put 'rean' to load pressure fields and var from reanalisis, or put 'forecast' to load them from forecast systems: +fields <- forecast #rean #forecast + +psl <- "psl" #"g500" # pressure variable to use from the chosen reanalysis +psl.name <- "SLP" # "Z500" # the name to show in the maps + +year.start <- 1981 #1979 #1981 #1982 #1981 #1994 #1979 #1981 +year.end <- 2015 #2016 #2013 #2015 + +missing.forecasts <- FALSE # set it to TRUE if there can be missing hindcasts files in the forecast psl data, so it will load only the available years and deals with NA +res <- 0.75 # set the resolution you want to interpolate the psl and var data when loading it (i.e: set to 0.75 to use the same res of ERA-Interim) + # interpolation method is BILINEAR. + +LOESS <- TRUE # To apply the LOESS filter or not to the climatology +running.cluster <- TRUE # add to the clustering also the daily SLP data of the two closer months to the month to use (only for monthly analysis) +lat.weighting <- FALSE # set it to true to weight psl data on latitude before applying the cluster analysis +sequences <-FALSE # set it to TRUE if you want to select only days beloning to sequences of at least 5 consecutive days belonging to the same regime. + +PCA <- FALSE # set it to TRUE if you want to apply the PCA before the k-means cluster analysis, FALSE otherwise +variance.explained <- 0.8 # the user can set here the minimum % of variance explained by the PCs (typically 0.8 which is equal to 80%) + +# Coordinates of the box enclosing the study region (the Northern Atlantic in this case): +lat.max <- 81 #81 #80 #70 +lat.min <- 27 #30 #20 #30 +lon.max <- 45 #36 #30 #40 +lon.min <- 274.5 #274.5 #270 #280 # put a positive number here because the geopotential has only positive vaues of longitud! + +# for both reanalysis and forecasts: +WR.period <- 9 # 13:16 #1:12 #13:16 # 1-12: Jan-Dec; 13-16: Winter-Spring-Summer-Autumn [bypassed by the optional argument of the script] + +# Only for Seasonal forecasts: +startM <- 9 # start month (1=January, 12=December) [bypassed by the optional arguments of the script] +leadM <- 0 # select only the data of one lead month: [bypassed by the optional arguments of the script] +n.members <- 15 # number of members of the forecast system to load +n.leadtimes <- 216 # number of leadtimes of the forecast system to load + +# Only for Monthly forecasts: +#leadtime <- 1:7 #5:11 #1 # if fields=forecast, set the forecast time (in days) you want to compute the weather regimes. +#startdate.shift <- 1 # if leadtime=5:11, it's better to shift all startdates of the season 1 week in the past, to fit the exact season of obs.data + # if leadtime=12:18, it's better to shift all startdates of the season 2 weeks in the past, and so on. +#forecast.year <- year.end+1 # if fields=forecast, set the forecast year (it is usually the year after year.end) +#mes=1 # if fields=forecast, set the month of the first startdate of the forecast year +#day=2 # if fields=forecast, set the day of the first startdate of the forecast year + +############################################################################################################################# +rean.name <- unname(ifelse(rean == ERAint, ERAint.name, JRA55.name)) +forecast.name <- unname(ifelse(forecast == ECMWF_S4, ECMWF_S4.name, ECMWF_monthly.name)) +fields.name <- unname(ifelse(fields == rean, rean.name, forecast.name)) +n.years <- year.end - year.start + 1 + +# in case the script is run with one argument, it is assumed a reanalysis is being used and the argument becomes the month we want to perform the clustering; +# in case the script is run with two arguments, it is assumed a forecast is used and the first argument represents the startmonth and the second one the leadmonth. +# in case the script is run with no arguments, the values of the variables inside the script are used: +script.arg <- as.integer(commandArgs(TRUE)) + +if(length(script.arg) == 0){ + start.month <- startM + #WR.period <- start.month + if(fields.name == forecast.name) lead.month <- leadM +} + +# in case the script is run with 1 argument, it is assumed you are using a reanalysis: +if(length(script.arg) == 1){ + fields <- rean + fields.name <- rean.name + WR.period <- script.arg[1] +} + +if(length(script.arg) >= 2){ + fields <- forecast + fields.name <- forecast.name + start.month <- script.arg[1] + lead.month <- script.arg[2] + WR.period <- start.month +} + +if(length(script.arg) == 3){ # in this case, we are running the script in the SMP machine and we need to override the variables below: + source('/gpfs/projects/bsc32/bsc32842/Rfunctions.R') # for the calendar function + workdir <- "/gpfs/projects/bsc32/bsc32842/RESILIENCE" # working dir +} + +if(length(script.arg) >= 2) {lead.comment <- paste0("Lead month: ", lead.month)} else {lead.comment <- ""} +cat(paste0("Field: ", fields.name, " Period: ", WR.period, " ", lead.comment, "\n")) + +if(lat.min >= lat.max) stop("lat.min cannot be greater than lat.max") + +#days.period <- n.days.period <- period.length <- list() +#for (pp in 1:17){ +# days.period[[pp]] <- NA +# for(y in year.start:year.end) days.period[[pp]] <- c(days.period[[pp]], n.days.in.a.future.year(year.start, y) + pos.period(y,pp)) +# days.period[[pp]] <- days.period[[pp]][-1] # remove the NA at the begin that was introduced only to be able to execute the above command +# # number of days belonging to that period from year.start to year.end: +# n.days.period[[pp]] <- length(days.period[[pp]]) +# # Number of days belonging to that period in a single year: +# period.length[[pp]] <- n.days.in.a.period(pp,1999) #+ ifelse(period==13 | period==2, 1, 0) # using year 1999 introduce a small error in winter season length because of #bisestile years +#} + +# Create a regular lat/lon grid to interpolate the data to the chosen res: (Notice that the regular grid is always centered at lat=0 and lon=0!) +n.lon.grid <- 360/res +n.lat.grid <- (180/res)+1 +my.grid <- paste0('r',n.lon.grid,'x',n.lat.grid) +#domain<-list() +#domain$lon <- seq(0,359.999999999,res) +#domain$lat <- c(rev(seq(0,90,res)),seq(res[1],-90,-res)) + +cat("Loading lat/lon. Please wait...\n") +# load only 1 day of pressure data to detect the minimum and maximum lat and lon values which identify only the North Atlantic area: +if(fields.name == rean.name) domain <- Load(var = psl, exp = NULL, obs = list(list(path=fields)), '19990101', storefreq = 'daily', leadtimemax = 1, output = 'lonlat') + +### Real test: +### lat <- +### domain <- Load(var = "psl", exp = list(list(path="/esnas/exp/ecmwf/system4_m1/$STORE_FREQ$_mean/$VAR_NAME$_f6h/$VAR_NAME$_$START_DATE$.nc")), obs=NULL, sdates=paste0(1982:2011,'0101'), storefreq = 'daily', leadtimemax=n.leadtimes, nmember=n.members, output = 'lonlat', latmin = lat[chunk], latmax = lat[chunk+2], nprocs=1) + +### if (fields.name="pippo"){ + +# in the case of S4, we also interpolate it to the same resolution of the reanalysis: +# (notice that if the S4 grid has the same grid of the chosen grid (typically ERA-Interim), Load() leaves the S4 grid unchanged) +if(fields.name == ECMWF_S4.name) domain <- Load(var = psl, exp = list(list(path=fields)), obs=NULL, sdates=paste0(year.start,'0101'), storefreq = 'daily', dimnames=list(member=member.name), leadtimemax=1, nmember=1, output = 'lonlat', grid=my.grid, method='bilinear', nprocs=1) + +#domain <- Load(var = "psl", exp = list(list(path="/esnas/exp/ecmwf/system4_m1/$STORE_FREQ$_mean/$VAR_NAME$_f6h/$VAR_NAME$_$START_DATE$.nc")), obs=NULL, sdates='19820101', storefreq = 'daily', leadtimemax=1, nmember=1, output = 'lonlat', grid='r480x241', lonmax=100) + +if(fields.name == ECMWF_monthly.name) domain <- Load(var = psl, exp = NULL, obs = list(list(path=fields)), paste0(year.start,'0102'), storefreq = 'daily', leadtimemax = 1, output = 'lonlat') + +n.lon <- length(domain$lon) +n.lat <- length(domain$lat) + +pos.lat.max <- which(lat.max - round(domain$lat,4) >= 0)[1] # select the first latitude of the domain below the maximum latitude +pos.lat.min <- tail(which(round(domain$lat,4) - lat.min >= 0),1) # select the last latitude of the domain over the minumum latitude +pos.lon.max <- tail(which(lon.max - round(domain$lon,4) >= 0),1) +pos.lon.min <- which(round(domain$lon,4) - lon.min >= 0)[1] + +pos.lat <- if(pos.lat.min < pos.lat.max) {pos.lat.min:pos.lat.max} else {pos.lat.max:pos.lat.min} +pos.lon <- c(1:pos.lon.max,pos.lon.min:length(domain$lon)) # beware that it only consider the case where the study area cross the meridian of Greenwhich! +n.pos.lat <- length(pos.lat) +n.pos.lon <- length(pos.lon) + +lat <- domain$lat[pos.lat] # lat values of chosen area only (it is smaller than the whole spatial domain loaded by the data) +#if (lat[2] < lat[1]) lat <- rev(lat) # reverse lat values if they are in decreasing order +lon <- domain$lon[pos.lon] # lon values of chosen area only + +#lat.min.area <- domain$lat[pos.lat.min] # min and max lat/lon of the chosen area only (same as lat.max, but its values correspond to actual values in the lat vector) +#lat.max.area <- domain$lat[pos.lat.max] +#lon.min.area <- domain$lon[pos.lon.min] +#lon.max.area <- domain$lon[pos.lon.max] + +#rm(domain) + +# Load psl data (interpolating it to the same reanlaysis grid, if necessary): +cat("Loading data. Please wait...\n") + +if(fields.name == rean.name){ # Load daily psl data of all years in the reanalysis case: + psleuFull <-array(NA,c(1,1,n.years,365, n.pos.lat, n.pos.lon)) + + #for (y in year.start:year.end){ + # var <- Load(var = psl, exp = NULL, obs = list(list(path=fields)), paste0(y,'0101'), storefreq = 'daily', leadtimemax = n.days.in.a.year(y), output = 'lonlat', + # latmin = lat.min, latmax = lat.max, lonmin = lon.min, lonmax = lon.max, nprocs=1) + # psleuFull[seq.days.in.a.future.year(year.start, y),,] <- var$obs + # rm(var) + # gc() + #} + + psleuFull366 <- Load(var = psl, exp = NULL, obs = list(list(path=fields)), paste0(year.start:year.end,'0101'), storefreq = 'daily', leadtimemax = 366, output = 'lonlat', latmin = lat.min, latmax = lat.max, lonmin = lon.min, lonmax = lon.max)$obs + + # remove bisestile days to have all arrays with the same dimensions: + cat("Removing bisestile days. Please wait...\n") + psleuFull[,,,,,] <- psleuFull366[,,,1:365,,] + + for(y in year.start:year.end){ + y2 <- y - year.start + 1 + if(n.days.in.a.year(y) == 366) psleuFull[,,y2,60:365,,] <- psleuFull366[,,y2,61:366,,] # take the march to december period removing the 29th of February + } + + rm(psleuFull366) + gc() + + # convert psl in daily anomalies with the LOESS filter: + cat("Calculating anomalies. Please wait...\n") + pslPeriodClim <- apply(psleuFull, c(1,2,4,5,6), mean, na.rm=T) + + if(LOESS == TRUE){ + pslPeriodClimLoess <- pslPeriodClim + for(i in n.pos.lat){ + for(j in n.pos.lon){ + my.data <- data.frame(ens.mean=pslPeriodClim[1,1,,i,j], day=1:365) + my.loess <- loess(ens.mean ~ day, my.data, span=0.35) + pslPeriodClimLoess[1,1,,i,j] <- predict(my.loess) + } + } + + rm(pslPeriodClim) + gc() + + pslPeriodClim2 <- InsertDim(pslPeriodClimLoess, 3, n.years) + + } else { + pslPeriodClim2 <- InsertDim(pslPeriodClim, 3, n.years) + + rm(pslPeriodClim) + gc() + } + + ## s4.data <- data.frame(s4=pslPeriodClim[1,], day=1:216) + ## s4.loess <- loess(s4 ~ day, s4.data, span=0.35) + ## s4.pred <- predict(s4.loess) + + psleuFull <- psleuFull - pslPeriodClim2 + rm(pslPeriodClim2) + gc() + +} # close if on fields.name == rean + +if(fields.name == ECMWF_S4.name){ # in the forecast case, we load only the data for 1 start month at time and all the lead times (since each month needs ~3 GB of memory) + if(start.month <= 9) {start.month.char <- paste0("0",as.character(start.month))} else {start.month.char <- start.month} + + fields2 <- gsub("\\$STORE_FREQ\\$","daily",fields) + fields3 <- gsub("\\$VAR_NAME\\$",psl,fields2) + + if(missing.forecasts == TRUE){ + years <- c() + for(y in year.start:year.end){ + fields4 <- gsub("\\$START_DATE\\$",paste0(y, start.month.char,'01'),fields3) + + char.lead <- nchar(system(paste0("ncks -m ",fields4,"| grep -E -i 'psl size' | cut -d '*' -f1"), intern=T)) + # beware, in this way it can only take into account leadtimes from 100 to 999, but it is the only way in the SMP machine: + #num.lead <- as.integer(substr(system(paste0("ncks -m ",fields4,"| grep -E -i 'psl size' | cut -d '*' -f1"), intern=T),char.lead-3,char.lead)) + num.lead <- as.integer(system(paste0("ncks -m ",fields4,"| grep -E -i 'psl size' | cut -d '=' -f2 | cut -d '*' -f1"), intern=T)) # don't work well on the SMP + num.memb <- as.integer(system(paste0("ncks -m ",fields4,"| grep -E -i 'psl size' | cut -d '=' -f2 | cut -d '*' -f2"), intern=T)) + num.lat <- as.integer(system(paste0("ncks -m ",fields4,"| grep -E -i 'psl size' | cut -d '=' -f2 | cut -d '*' -f3"), intern=T)) + num.lon <- as.integer(system(paste0("ncks -m ",fields4,"| grep -E -i 'psl size' | cut -d '=' -f2 | cut -d '*' -f4"), intern=T)) + + #if(num.lead == n.leadtimes && num.memb >= n.members && num.lat == 181 && num.lon == 360) years <- c(years,y) + if(num.lead == n.leadtimes && num.memb >= n.members) years <- c(years,y) + } + + } else { + years <- year.start:year.end + } + + n.years.full <- length(years) + + if(running.cluster == TRUE) { + + start.month1 <- ifelse(start.month == 1, 12, start.month-1) + if(start.month1 <= 9) {start.month1.char <- paste0("0",as.character(start.month1))} else {start.month1.char <- start.month1} + + start.month2.char <- start.month.char + + start.month3 <- ifelse(start.month == 12, 1, start.month+1) + if(start.month3 <= 9) {start.month3.char <- paste0("0",as.character(start.month3))} else {start.month3.char <- start.month3} + + # load three months of data, inserting all them in the third dimension of the array, one month at time: + psleuFull1 <- Load(var = psl, exp = list(list(path=fields)), obs = NULL, sdates=paste0(years, start.month1.char,'01'), dimnames=list(member=member.name), nmember=n.members, leadtimemax=n.leadtimes, storefreq='daily', output = 'lonlat', latmin = lat.min, latmax = lat.max, lonmin = lon.min, lonmax = lon.max, grid=my.grid, method='bilinear')$mod + gc() + + psleuFull2 <- Load(var = psl, exp = list(list(path=fields)), obs = NULL, sdates=paste0(years, start.month2.char,'01'), dimnames=list(member=member.name), nmember=n.members, leadtimemax=n.leadtimes, storefreq='daily', output = 'lonlat', latmin = lat.min, latmax = lat.max, lonmin = lon.min, lonmax = lon.max, grid=my.grid, method='bilinear')$mod + gc() + + psleuFull3 <- Load(var = psl, exp = list(list(path=fields)), obs = NULL, sdates=paste0(years, start.month3.char,'01'), dimnames=list(member=member.name), nmember=n.members, leadtimemax=n.leadtimes, storefreq='daily', output = 'lonlat', latmin = lat.min, latmax = lat.max, lonmin = lon.min, lonmax = lon.max, grid=my.grid, method='bilinear')$mod + gc() + + } else { + + psleuFull <- Load(var = psl, exp = list(list(path=fields)), obs = NULL, sdates=paste0(years, start.month.char,'01'), dimnames=list(member=member.name), nmember=n.members, leadtimemax=n.leadtimes, storefreq='daily', output = 'lonlat', latmin = lat.min, latmax = lat.max, lonmin = lon.min, lonmax = lon.max, grid=my.grid, method='bilinear', nprocs=1)$mod + + } + + if(LOESS == TRUE){ + # convert psl in daily anomalies with the LOESS filter: + cat("Calculating anomalies. Please wait...\n") + + if(running.cluster == TRUE) { + pslPeriodClim1 <- apply(psleuFull1, c(1,4,5,6), mean, na.rm=T) + pslPeriodClim2 <- apply(psleuFull2, c(1,4,5,6), mean, na.rm=T) + pslPeriodClim3 <- apply(psleuFull3, c(1,4,5,6), mean, na.rm=T) + + pslPeriodClimLoess1 <- pslPeriodClim1 + pslPeriodClimLoess2 <- pslPeriodClim2 + pslPeriodClimLoess3 <- pslPeriodClim3 + + for(i in n.pos.lat){ + for(j in n.pos.lon){ + my.data1 <- data.frame(ens.mean=pslPeriodClim1[1,,i,j], day=1:n.leadtimes) + my.loess1 <- loess(ens.mean ~ day, my.data1, span=0.35) + pslPeriodClimLoess1[1,,i,j] <- predict(my.loess1) + rm(my.data1, my.loess1) + gc() + + my.data2 <- data.frame(ens.mean=pslPeriodClim2[1,,i,j], day=1:n.leadtimes) + my.loess2 <- loess(ens.mean ~ day, my.data2, span=0.35) + pslPeriodClimLoess2[1,,i,j] <- predict(my.loess2) + rm(my.data2, my.loess2) + gc() + + my.data3 <- data.frame(ens.mean=pslPeriodClim3[1,,i,j], day=1:n.leadtimes) + my.loess3 <- loess(ens.mean ~ day, my.data3, span=0.35) + pslPeriodClimLoess3[1,,i,j] <- predict(my.loess3) + rm(my.data3, my.loess3) + gc() + } + } + + rm(pslPeriodClim1, pslPeriodClim2, pslPeriodClim3) + gc() + + pslPeriodClimDos1 <- InsertDim(InsertDim(pslPeriodClimLoess1, 2, n.years.full), 2, n.members) + pslPeriodClimDos2 <- InsertDim(InsertDim(pslPeriodClimLoess2, 2, n.years.full), 2, n.members) + pslPeriodClimDos3 <- InsertDim(InsertDim(pslPeriodClimLoess3, 2, n.years.full), 2, n.members) + + pslPeriodClimDos <- unname(abind(pslPeriodClimDos1, pslPeriodClimDos2, pslPeriodClimDos3, along=3)) + rm(pslPeriodClimDos1, pslPeriodClimDos2, pslPeriodClimDos3) + gc() + + psleuFull <- unname(abind(psleuFull1, psleuFull2, psleuFull3, along=3)) # add the three months in the year dimension + rm(psleuFull1, psleuFull2, psleuFull3) + gc() + + } else { # in case of no running cluster: + + pslPeriodClim <- apply(psleuFull, c(1,4,5,6), mean, na.rm=T) + + pslPeriodClimLoess <- pslPeriodClim + for(i in n.pos.lat){ + for(j in n.pos.lon){ + my.data <- data.frame(ens.mean=pslPeriodClim[1,,i,j], day=1:n.leadtimes) + my.loess <- loess(ens.mean ~ day, my.data, span=0.35) + pslPeriodClimLoess[1,,i,j] <- predict(my.loess) + } + } + + rm(pslPeriodClim) + gc() + + pslPeriodClimDos <- InsertDim(InsertDim(pslPeriodClimLoess, 2, n.years.full), 2, n.members) + + } # close if on running cluster + + } else { #in case of no LOESS: + + if(running.cluster == TRUE) { + # in this case, the climatology is measured FOR EACH MONTH INDIPENDENTLY, instead of using a seasonal value: + pslPeriodClim1 <- apply(psleuFull1, c(1,4,5,6), mean, na.rm=T) + pslPeriodClim2 <- apply(psleuFull2, c(1,4,5,6), mean, na.rm=T) + pslPeriodClim3 <- apply(psleuFull3, c(1,4,5,6), mean, na.rm=T) + + pslPeriodClim <- unname(abind(pslPeriodClim1, pslPeriodClim2, pslPeriodClim3)) + + pslPeriodClimDos <- InsertDim(InsertDim(pslPeriodClim, 2, n.years.full), 2, n.members) + rm(pslPeriodClim) + gc() + + psleuFull <- unname(abind(psleuFull1, psleuFull2, psleuFull3, along=3)) + rm(psleuFull1, psleuFull2, psleuFull3) + gc() + + } else {# in case of no running cluster: + + pslPeriodClimDos <- InsertDim(InsertDim(pslPeriodClim, 2, n.years.full), 2, n.members) + rm(pslPeriodClim) + gc() + } + } + + ## s4.data <- data.frame(s4=pslPeriodClim[1,], day=1:216) + ## s4.loess <- loess(s4 ~ day, s4.data, span=0.35) + ## s4.pred <- predict(s4.loess) + + psleuFull <- psleuFull - pslPeriodClimDos + rm(pslPeriodClimDos) + gc() + +} # close if on fields.name=forecasts.name + +if(fields.name == ECMWF_monthly.name){ + # Load 6-hourly psl data in the forecast case: + psleuFull <-array(NA, c(n.sdates*n.years, n.leadtimes, n.pos.lat, n.pos.lon)) # daily order: 19940102 19950102 ... 20130102 19940109 19950109 ... 20130109 ... + n.leadtimes <- length(leadtime) + sdates <- weekly.seq(forecast.year,mes,day) + n.sdates <- length(sdates) + + for (startdate in 1:n.sdates){ + for(lday in leadtime){ + var <- Load(var = psl, exp = NULL, obs = list(list(path=fields)), paste0(year.start:year.end, substr(sdates[startdate],5,6), substr(sdates[startdate],7,8) ), storefreq = 'daily', leadtimemin = lday*4-3, leadtimemax = lday*4, output = 'lonlat', latmin = lat.min, latmax = lat.max, lonmin = lon.min, lonmax = lon.max) + + mean.lday <- apply(var$obs, c(3,5,6), mean, na.rm=T) + rm(var) + gc() + + psleuFull[(1+(startdate-1)*n.years):(startdate*n.years), lday-leadtime[1]+1,,] <- mean.lday + rm(mean.lday) + gc() + } + } +} + +if(psl=="g500") psleuFull <- psleuFull/9.81 # convert geopotential to geopotential height (in m) +if(psl=="psl") psleuFull <- psleuFull/100 # convert MSLP in Pascal to MSLP in hPa +gc() + +#save.image(file=paste0(workdir,"/weather_regimes_",fields.name,"_",year.start,"-",year.end,".RData")) +#load(file=paste0(workdir,"/weather_regimes_",fields.name,"_",year.start,"-",year.end,".RData")) + +#save.image("/scratch/Earth/ncortesi/RESILIENCE/Regimes/test.RData") + +# compute the cluster analysis: +my.PCA <- list() +my.cluster <- list() +my.cluster.array <- list() +tot.variance <- list() +n.pcs <- my.cluster <- c() + +#WR.period=1 # for the debug +for(p in WR.period){ + # Select only the data of the month/season we want: + if(fields.name == rean.name){ + + #pslPeriod <- psleuFull[days.period[[p]],,] # select only days in the chosen period (i.e: winter) + if(running.cluster == TRUE) { + pslPeriod <- psleuFull[1,1,,pos.month.extended(2001,p),,] # select all days in the period of 3 months centered on the target month p + } else { + pslPeriod <- psleuFull[1,1,,pos.period(2001,p),,] # select only days in the chosen period (i.e: winter) + } + + # weight the pressure fields based on latitude (apply it to anomalies, not to absolute values!): + if(lat.weighting){ + lat.weighted <- cos(lat*pi/180)^0.5 + lat.weighted.array <- InsertDim(InsertDim(InsertDim(lat.weighted,2,n.pos.lon), 1, length(pos.month.extended(2001,p))), 1, n.years) + pslPeriod <- pslPeriod * lat.weighted.array + rm(lat.weighted.array) + gc() + } + + gc() + cat("Preformatting data for clustering. Please wait...\n") + psl.melted <- melt(pslPeriod[,,,, drop=FALSE], varnames=c("Year","Day","Lat","Lon")) + gc() + + cat("Preformatting data for clustering. Please wait......\n") + + # this function eats much memory!!! + psl.kmeans <- unname(acast(psl.melted, Year + Day ~ Lat + Lon)) + #gc() + + # select period data from psleuFull to use it as input of the cluster analysis: + #psl.kmeans <- pslPeriod + #dim(psl.kmeans) <- c(n.days.period[[p]], n.pos.lat*n.pos.lon) # convert array in a matrix! + #rm(pslPeriod, pslPeriod.weighted) + } + + # in case of S4 we don't need to select anything because we already loaded only 1 month of data! + if(fields.name == ECMWF_S4.name){ + if(running.cluster == TRUE && p < 13) { + # if you want to fully implement the running cluster of the monthly S4 data, you have to finish selecting automatically the 3-months running period + # generalizing the command below (which at present only work for the month of January and lead time 0): + + # Select ASO and lead time 0, for our case study: + pslPeriod <- psleuFull[,,,1:30,,, drop=FALSE] + + } else { + + # select data only for the startmonth and leadmonth to study: + cat("Extracting data. Please wait...\n") + chosen.month <- start.month + lead.month # find which month we want to select data + if(chosen.month > 12) chosen.month <- chosen.month - 12 + + # remove the 29th of February to have the same n. of elements for all years + i=0 + for(y in years){ + i=i+1 + leadtime.min <- 1 + n.days.in.a.monthly.period(start.month, chosen.month, y) - n.days.in.a.month(chosen.month, y) + leadtime.max <- leadtime.min + n.days.in.a.month(chosen.month, y) - 1 + #leadtime.max <- 61 + + if (n.days.in.a.month(chosen.month, y) == 29) leadtime.max <- leadtime.max - 1 + int.leadtimes <- leadtime.min:leadtime.max + num.leadtimes <- leadtime.max - leadtime.min + 1 # number of days in the chosen month (different from 'n.leadtimes',which is the total number of days in the file!) + # by construction it is equal to: n.days.in.a.month(chosen.month, y) + if(y == years[1]) { + pslPeriod <- psleuFull[,,1,int.leadtimes,,,drop=FALSE] + } else { + pslPeriod <- unname(abind(pslPeriod, psleuFull[,,i,int.leadtimes,,,drop=FALSE], along=3)) + } + } + + } + + # weight the pressure fields based on latitude (apply it to anomalies, not to absolute values!): + if(lat.weighting){ + lat.weighted <- cos(lat*pi/180)^0.5 + lat.weighted.array <- InsertDim(InsertDim(InsertDim(lat.weighted,2,n.pos.lon), 1, length(pos.month.extended(2001,p))), 1, n.years) + pslPeriod <- pslPeriod * lat.weighted.array ######### adapt!!!!!!!!!!! + rm(lat.weighted.array) + gc() + } + + # note that the data order in psl.kmeans[,X] is: year1day1, year1day2, ... , year1day31, year2day1, year2day2, ... , year2day28 + gc() + cat("Preformatting data. Please wait...\n") + psl.melted <- melt(pslPeriod[1,,,,,, drop=FALSE], varnames=c("Exp","Member","StartYear","LeadDay","Lat","Lon")) + gc() + + save(psl.melted,file=paste0(workdir,"/psl_melted.RData")) + + cat("Preformatting data. Please wait......\n") + # This function eats much memory!!! + psl.kmeans <- unname(acast(psl.melted, Member + StartYear + LeadDay ~ Lat + Lon)) + #gc() + + save(psl.kmeans,file=paste0(workdir,"/psl_kmeans.RData")) + + my.cluster <- kmeans(psl.kmeans, centers=4, iter.max=100, nstart=30, trace=FALSE) + save(my.cluster,file=paste0(workdir,"/my_cluster.RData")) + + + #psl.kmeans <- array(NA,c(dim(pslPeriod))) + #n.rows <- nrow(psl.melted) + #a <- psl.melted$Member + #b <- psl.melted$StartYear + #c <- psl.melted$LeadDay + #d <- psl.melted$Lat + #e <- psl.melted$Lon + #f <- psl.melted$value + + # this function to convert a data.frame in an array is much less memory-eater than acast but a bit slower: + #for(i in 1:n.rows) psl.kmeans[1,a[i],b[i],c[i],d[i],e[i]] <- f[i] + gc() + #rm(pslPeriod); gc() + } + + if(fields.name == ECMWF_monthly.name){ + my.startdates.period <- months.period(forecast.year,mes,day,p) # get which startdates belong to the chosen period (remember that there are no bisestile day left!) + + days.period <- list() # get which days inside psleuFull belong to the chosen period + for(startdate in my.startdates.period){ + days.period[[p]] <- c(days.period[[p]], (1+(startdate-1)*n.years):(startdate*n.years)) + } + + # in winter you must remove the 2nd startdate (20140109) because its data is bad, as you can see with: plot(psl.kmeans[,1:2]) + # if(fields.name == forecast.name && period == 13) psl.kmeans[21:40,] <- NA + } + + + # compute the clustering: + cat("Clustering data. Please wait...\n") + if(PCA){ + my.seq <- seq(1, n.pos.lat*n.pos.lon, 16) # select only 1 point of 9 + pslcut <- psl.kmeans[,my.seq] + + my.PCA <- princomp(pslcut,cor=FALSE) + tot.variance <- head(cumsum(my.PCA$sdev^2/sum(my.PCA$sdev^2)),50) # check how many PCAs to keep basing on the sum of their expl. variance + n.pcs <- head(as.numeric(which(tot.variance > variance.explained)),1) # select only the pcs that explains at least 80% of variance + my.cluster <- kmeans(my.PCA$scores[,1:n.pcs], centers=4, iter.max=100, nstart=30) # 4 is the number of clusters + rm(pslcut) + } else { # 4 is the number of clusters: + + my.cluster <- kmeans(psl.kmeans, centers=4, iter.max=100, nstart=30, trace=FALSE) + + gc() + + #save(my.cluster, file=paste0(workdir,"/",fields.name,"_",my.period[p],"_leadmonth_",lead.month,"_series.RData")) + + #if(fields.name == rean.name){ + #my.cluster[[p]] <- kmeans(psl.kmeans[which(!is.na(psl.kmeans[,1])),], centers=4, iter.max=100, nstart=30, trace=FALSE) + # save the time series output of the cluster analysis: + #save(my.cluster, my.cluster.array, my.PCA, tot.variance, n.pcs, file=paste0(workdir,"/",fields.name,"_",my.period[p],"_series.RData")) + #} + + if(fields.name == ECMWF_S4.name) { + my.cluster.array <- array(my.cluster$cluster, c(num.leadtimes, n.years.full, n.members)) # in reverse order compared to the definition of psl.kmeans + + # save the time series output of the cluster analysis: + #save(my.cluster, my.cluster.array, my.PCA, tot.variance, n.pcs, file=paste0(workdir,"/",fields.name,"_",my.period[p],"_leadmonth_",lead.month,"_series.RData")) + + #plot(SMA(my.cluster$centers[1,],201),type="l",col="gold",ylim=c(-20,20));lines(SMA(my.cluster$centers[2,],201),type="l",col="red"); lines(SMA(my.cluster$centers[3,],201),type="l",col="green");lines(SMA(my.cluster$centers[4,],201),type="l",col="blue");lines(SMA(psl.kmeans[29,],201),type="l",col="gray");lines(rep(0,14410),type="l",col="black",lty=2) + } + } + + rm(psl.kmeans) + gc() + +#} # close for on 'p' + +#save.image(file=paste0(workdir,"/weather_regimes_",fields.name,"_",year.start,"-",year.end,".RData")) +#load(file=paste0(workdir,"/weather_regimes_",fields.name,"_",year.start,"-",year.end,".RData")) + +#my.grid<-paste0('r',n.lon,'x',n.lat) +#coord <- Load(var = 'sfcWind', exp = list(ECMWF_monthly), obs = NULL, sdates=paste0(2013,'0102'), leadtimemin = 1, leadtimemax=1, output = 'lonlat', nprocs=1, grid=my.grid, method='bilinear') + +# measure regime anomalies: + +#for(p in WR.period){ # 1-12: Jan-Dec; 13-16: Winter-Spring-Summer-Autumn; 17: Year + + if(fields.name == rean.name){ + + if(running.cluster == TRUE && p < 13){ # select only the days of the 3-months cluster series inside the target month p + n.days.period <- length(pos.month.extended(2001,p)) # total number of days in the three months + + days.month <- days.month.new <- days.month.full <- c() + days.month <- which(pos.month.extended(2001,p) == pos.month(2001,p)[1])-1 + 1:length(pos.month(2001,p)) + + for(y in 1:n.years){ + days.month.new <- days.month + n.days.period*(y-1) + days.month.full <- c(days.month.full, days.month.new) + #print(days.month.new) + #print(days.month) + } + + cluster.sequence <- my.cluster$cluster[days.month.full] # select only the days of the cluster series inside the target month p + + } else { + cluster.sequence <- my.cluster$cluster + } + + if(sequences){ # it works only for rean data!!! + # for each of the 4 clusters, remove the days not belonging to sequences of at least 5 days: + # warning: first 4 days and last 4 days are not take into account y the algorithm and are treated as not belonging to any sequence (sequ=FALSE) + wrdiff <- diff(my.cluster$cluster) + + sequ <- rep(FALSE, n.days.period[[p]]) + for(i in 5:(n.days.period[[p]]-5)){ + sequ[i] <- TRUE + if(wrdiff[i] != 0 & wrdiff[i+1] != 0) sequ[i] = FALSE + if(wrdiff[i] != 0 & wrdiff[i+2] != 0) sequ[i] = FALSE + if(wrdiff[i] != 0 & wrdiff[i+3] != 0) sequ[i] = FALSE + if(wrdiff[i] != 0 & wrdiff[i+4] != 0) sequ[i] = FALSE + if(wrdiff[i-1] != 0 & wrdiff[i] != 0) sequ[i] = FALSE + if(wrdiff[i-1] != 0 & wrdiff[i+1] != 0) sequ[i] = FALSE + if(wrdiff[i-1] != 0 & wrdiff[i+2] != 0) sequ[i] = FALSE + if(wrdiff[i-1] != 0 & wrdiff[i+3] != 0) sequ[i] = FALSE + if(wrdiff[i-2] != 0 & wrdiff[i] != 0) sequ[i] = FALSE + if(wrdiff[i-2] != 0 & wrdiff[i+1] != 0) sequ[i] = FALSE + if(wrdiff[i-2] != 0 & wrdiff[i+2] != 0) sequ[i] = FALSE + if(wrdiff[i-3] != 0 & wrdiff[i] != 0) sequ[i] = FALSE + if(wrdiff[i-3] != 0 & wrdiff[i+1] != 0) sequ[i] = FALSE + if(wrdiff[i-4] != 0 & wrdiff[i] != 0) sequ[i] = FALSE + } # close for loop on i + + # sequence of daily cluster numbers without the days that doesn't belong to sequences of 5 or more days: + cluster.sequence <- my.cluster$cluster * sequ + rm(wrdiff, sequ) + } + + # Replace the days belonging to a regime with the days belonging to a sequence of at least 5 days of that regime: + wr1 <- which(cluster.sequence == 1) + wr2 <- which(cluster.sequence == 2) + wr3 <- which(cluster.sequence == 3) + wr4 <- which(cluster.sequence == 4) + + # yearly frequency of each regime: + wr1y <- wr2y <- wr3y <-wr4y <- c() + np <- n.days.in.a.period(p,2001) + + for(y in year.start:year.end){ + # for both reanalysis and S4, the time order is always: year1day1, year1day2, ..., year1day31, year2day1, year2day2, ..., year2day28, etc, + wr1y[y-year.start+1] <- length(which(cluster.sequence[(y-year.start)*np + (1:np)] == 1)) + wr2y[y-year.start+1] <- length(which(cluster.sequence[(y-year.start)*np + (1:np)] == 2)) + wr3y[y-year.start+1] <- length(which(cluster.sequence[(y-year.start)*np + (1:np)] == 3)) + wr4y[y-year.start+1] <- length(which(cluster.sequence[(y-year.start)*np + (1:np)] == 4)) + } + + # convert to frequencies in %: + wr1y <- wr1y/np + wr2y <- wr2y/np + wr3y <- wr3y/np + wr4y <- wr4y/np + + gc() + + pslmat <- unname(acast(psl.melted, Year + Day ~ Lat ~ Lon)) + + if(running.cluster == TRUE){ + pslmat.new <- pslmat + pslmat <- pslmat.new[days.month.full,,] + gc() + } + + pslwr1 <- pslmat[wr1,,, drop=F] + pslwr2 <- pslmat[wr2,,, drop=F] + pslwr3 <- pslmat[wr3,,, drop=F] + pslwr4 <- pslmat[wr4,,, drop=F] + + pslwr1mean <- apply(pslwr1, c(2,3), mean, na.rm=T) + pslwr2mean <- apply(pslwr2, c(2,3), mean, na.rm=T) + pslwr3mean <- apply(pslwr3, c(2,3), mean, na.rm=T) + pslwr4mean <- apply(pslwr4, c(2,3), mean, na.rm=T) + + # spatial mean for each day to save for the meaure of the FairRPSS: + pslwr1spmean <- apply(pslwr1, 1, mean, na.rm=T) + pslwr2spmean <- apply(pslwr2, 1, mean, na.rm=T) + pslwr3spmean <- apply(pslwr3, 1, mean, na.rm=T) + pslwr4spmean <- apply(pslwr4, 1, mean, na.rm=T) + + # save all the data necessary to redraw the graphs when we know the right regime: + save(my.cluster, lon, lon.max, lon.min, lat, lat.max, lat.min, pos.lat, pos.lon, n.pos.lat, n.pos.lon, psl, psl.name, my.period, p, workdir, rean.name, fields.name, year.start, year.end, n.years, WR.period, pslwr1mean, pslwr2mean, pslwr3mean, pslwr4mean, pslwr1spmean, pslwr2spmean, pslwr3spmean, pslwr4spmean, wr1y,wr2y,wr3y,wr4y,file=paste0(workdir,"/",fields.name,"_",my.period[p],"_psl.RData")) + + # just to save the plots of the ERA-Interim monthly regime anomalies with the running cluster now instead than loading the next script: + EU <- c(1:which(lon >= lon.max)[1], (which(lon >= 337)[1]:length(lon))) # restrict area to continental europe only + if(psl == "g500"){ + my.brks <- c(-300,seq(-200,200,10),300) # % Mean anomaly of a WR + my.brks2 <- c(-300,seq(-200,200,10),300) # % Mean anomaly of a WR for the contour lines + values.to.plot <- c(-200, -100, 0, 100, 200) # values we want to show in the labels + } + + if(psl == "psl"){ + my.brks <- c(seq(-19,-1,2),0,seq(1,19,2)) # % Mean anomaly of a WR + my.brks2 <- my.brks #c(seq(-20,20,2)) # % Mean anomaly of a WR for the contour lines + values.to.plot <- my.brks #c(-17,-15,-13,-11,-9,-7,-5,-3,-1,0,1,3,5,7,9,11,13,15,17) + } + + my.cols <- colorRampPalette(rev(brewer.pal(11,"RdBu")))(length(my.brks)-1) # blue--white--red colors + my.cols[floor(length(my.cols)/2)] <- my.cols[floor(length(my.cols)/2)+1] <- "white" + + map1 <- pslwr1mean + map2 <- pslwr2mean + map3 <- pslwr3mean + map4 <- pslwr4mean + + png(filename=paste0(workdir,"/",fields.name,"_",my.period[p],"_cluster1.png"),width=1000,height=1200) + PlotEquiMap2(rescale(map1,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map1), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, xlabel.dist=1.5, contours.lty="F1FF1F") + dev.off() + png(filename=paste0(workdir,"/",fields.name,"_",my.period[p],"_cluster2.png"),width=1000,height=1200) + PlotEquiMap2(rescale(map2,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map2), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F") + dev.off() + png(filename=paste0(workdir,"/",fields.name,"_",my.period[p],"_cluster3.png"),width=1000,height=1200) + PlotEquiMap2(rescale(map3,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map3), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F") + dev.off() + png(filename=paste0(workdir,"/",fields.name,"_",my.period[p],"_cluster4.png"),width=1000,height=1200) + PlotEquiMap2(rescale(map4,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map4), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F") + dev.off() + + + + rm(pslwr1mean,pslwr2mean,pslwr3mean,pslwr4mean) + rm(pslwr1,pslwr2,pslwr3,pslwr4) + gc() + + } + + if(fields.name == ECMWF_S4.name){ + cat("Computing regime anomalies and frequencies. Please wait...\n") + + #load(file=paste0(workdir,"/",fields.name,"_",my.period[p],"_leadmonth_",lead.month,"_ClusterData.RData")) + + cluster.sequence <- my.cluster$cluster #as.vector(my.cluster.array) + + wr1 <- which(cluster.sequence == 1) + wr2 <- which(cluster.sequence == 2) + wr3 <- which(cluster.sequence == 3) + wr4 <- which(cluster.sequence == 4) + + wr1y <- apply(my.cluster.array == 1, 2, sum) + wr2y <- apply(my.cluster.array == 2, 2, sum) + wr3y <- apply(my.cluster.array == 3, 2, sum) + wr4y <- apply(my.cluster.array == 4, 2, sum) + + # convert to frequencies in %: + wr1y <- wr1y/(num.leadtimes*n.members) # in this case, n.leadtimes is the number of days of one month of the cluser time series + wr2y <- wr2y/(num.leadtimes*n.members) + wr3y <- wr3y/(num.leadtimes*n.members) + wr4y <- wr4y/(num.leadtimes*n.members) + + # same as above, but to measure the maximum frequence between all the members: + wr1yMax <- apply(apply(my.cluster.array == 1, c(2,3), sum),1,max) + wr2yMax <- apply(apply(my.cluster.array == 2, c(2,3), sum),1,max) + wr3yMax <- apply(apply(my.cluster.array == 3, c(2,3), sum),1,max) + wr4yMax <- apply(apply(my.cluster.array == 4, c(2,3), sum),1,max) + + wr1yMax <- wr1yMax/num.leadtimes + wr2yMax <- wr2yMax/num.leadtimes + wr3yMax <- wr3yMax/num.leadtimes + wr4yMax <- wr4yMax/num.leadtimes + + wr1yMin <- apply(apply(my.cluster.array == 1, c(2,3), sum),1,min) + wr2yMin <- apply(apply(my.cluster.array == 2, c(2,3), sum),1,min) + wr3yMin <- apply(apply(my.cluster.array == 3, c(2,3), sum),1,min) + wr4yMin <- apply(apply(my.cluster.array == 4, c(2,3), sum),1,min) + + wr1yMin <- wr1yMin/num.leadtimes + wr2yMin <- wr2yMin/num.leadtimes + wr3yMin <- wr3yMin/num.leadtimes + wr4yMin <- wr4yMin/num.leadtimes + + cat("Computing regime anomalies and frequencies. Please wait......\n") + + # in this case, must use psl.melted instead of psleuFull. Both are already in anomalies: + # (psl.melted depends on the startdate and leadtime, psleuFull no) + gc() + pslmat <- unname(acast(psl.melted, Member + StartYear + LeadDay ~ Lat ~ Lon)) + #pslmat <- unname(acast(melt(pslPeriod[1,1:2,,,,, drop=FALSE],varnames=c("Exp","Member","StartYear","LeadDay","Lat","Lon")), Member ~ StartYear + LeadDay ~ Lat ~ Lon)) + gc() + + #for(a in 1:2){ + # for(b in 1:3){ + # for(c in 1:4){ + # pslmat[1, a + (b-1)*a + (c-1)*a*b,,] <- pslPeriod[1,a,b,c,,] + # } + # } + #} + + pslwr1 <- pslmat[wr1,,, drop=F] + pslwr2 <- pslmat[wr2,,, drop=F] + pslwr3 <- pslmat[wr3,,, drop=F] + pslwr4 <- pslmat[wr4,,, drop=F] + + pslwr1mean <- apply(pslwr1, c(2,3), mean, na.rm=T) + pslwr2mean <- apply(pslwr2, c(2,3), mean, na.rm=T) + pslwr3mean <- apply(pslwr3, c(2,3), mean, na.rm=T) + pslwr4mean <- apply(pslwr4, c(2,3), mean, na.rm=T) + + # spatial mean for each day to save for the meaure of the FairRPSS: + pslwr1spmean <- apply(pslwr1, 1, mean, na.rm=T) + pslwr2spmean <- apply(pslwr2, 1, mean, na.rm=T) + pslwr3spmean <- apply(pslwr3, 1, mean, na.rm=T) + pslwr4spmean <- apply(pslwr4, 1, mean, na.rm=T) + + rm(pslmat) + gc() + + # save all the data necessary to draw the graphs (but not the impact maps) + save(my.cluster, my.cluster.array, lon, lon.max, lat, pos.lat, pos.lon, n.pos.lat, n.pos.lon, psl, psl.name, my.period, p, workdir,rean.name,fields.name, year.start,year.end, years, n.years, n.years.full, WR.period, pslwr1mean, pslwr2mean, pslwr3mean, pslwr4mean, pslwr1spmean, pslwr2spmean, pslwr3spmean, pslwr4spmean, wr1y, wr2y, wr3y, wr4y, wr1yMax, wr2yMax, wr3yMax, wr4yMax, wr1yMin, wr2yMin, wr3yMin, wr4yMin, n.leadtimes, num.leadtimes, file=paste0(workdir,"/",fields.name,"_",my.period[p],"_leadmonth_",lead.month,"_psl.RData")) + + rm(pslwr1mean,pslwr2mean,pslwr3mean,pslwr4mean) + rm(pslwr1,pslwr2,pslwr3,pslwr4) + gc() + + } + + # for the monthly system, the time order is always: year1day1, year2day1, ... , yearNday1, year1day2, year2day2, ..., yearNday2, etc.: + if(fields.name == ECMWF_monthly.name) { + if(fields.name == ECMWF_monthly.name){ + my.startdates.period <- months.period(forecast.year,mes,day,p) + + #if(p == 13) my.startdates.period <- my.startdates.period[-2] # remove the second startdate because it is broken + + days.period <- period.length <- list() + for(startdate in my.startdates.period){ + days.period[[p]] <- c(days.period[[p]], (1+(startdate-1)*(year.end-year.start+1)):(startdate*(year.end-year.start+1))) + } + period.length[[p]] <- length(my.startdates.period) # number of Thusdays in the period + } + + wr1y <- wr2y <- wr3y <-wr4y <- c() + wr1y[y-year.start+1] <- length(which(cluster.sequence[seq((y-year.start+1),length(cluster.sequence), n.years)] == 1)) + wr2y[y-year.start+1] <- length(which(cluster.sequence[seq((y-year.start+1),length(cluster.sequence), n.years)] == 2)) + wr3y[y-year.start+1] <- length(which(cluster.sequence[seq((y-year.start+1),length(cluster.sequence), n.years)] == 3)) + wr4y[y-year.start+1] <- length(which(cluster.sequence[seq((y-year.start+1),length(cluster.sequence), n.years)] == 4)) + } + + cat("Finished!\n") +} # close the for loop on 'p' + + + + + + + + + + + + + + + + + + + + + diff --git a/old/weather_regimes_v38.R~ b/old/weather_regimes_v38.R~ new file mode 100644 index 0000000000000000000000000000000000000000..2bb468d083986aa58f76a7605f22e9afde10a0e0 --- /dev/null +++ b/old/weather_regimes_v38.R~ @@ -0,0 +1,921 @@ + +# Creation: 6/2016 +# Author: Nicola Cortesi +# Aim: To compute the four Euro-Atlantic weather regimes from a reanalysis or from a forecast system +# +# I/O: input must be in a format compatible with Load(); 1 output .RData file is created in the 'workdir' folder. +# You can also run this script from terminal with: Rscript ~/scripts/weather_regimes_maps.R +# +# Assumption: its output are needed by the scripts weather_regimes_impact.R and weather_regimes.R +# +# Branch: weather_regimes + +rm(list=ls()) +library(s2dverification) # for the function Load() +library(abind) +library(reshape2) # after each undate of s2dverification, it's better to remove and install this package again because sometimes it gets broken! + +SMP <- FALSE # if TRUE, the script is assumed to run on the SMP Machine + + +if(SMP){ + source('/gpfs/projects/bsc32/bsc32842/Rfunctions.R') + workdir <- "/gpfs/projects/bsc32/bsc32842/RESILIENCE" # working dir where output files will be generated +} else { + source('/home/Earth/ncortesi/scripts/Rfunctions.R') + workdir <- "/scratch/Earth/ncortesi/RESILIENCE/Regimes" # working dir where output files will be generated +} + +# module load NCO # : load it from the terminal before running this script interactively in the SMP machine, if you didn't load it in the .bashrc + +# available reanalysis for the geopotential (g500) or the geopotential height (z500 = g500/9.81) and for var data: +ERAint <- '/esnas/reconstructions/ecmwf/eraint/$STORE_FREQ$_mean/$VAR_NAME$_f6h/$VAR_NAME$_$YEAR$$MONTH$.nc' +#ERAint <- '/esarchive/old-files/recon_ecmwf_erainterim/$STORE_FREQ$_mean/$VAR_NAME$_f6h/$VAR_NAME$_$YEAR$$MONTH$.nc' +ERAint.name <- "ERA-Interim" + +JRA55 <- '/esnas/recon/jma/jra55/$STORE_FREQ$_mean/$VAR_NAME$_f6h/$VAR_NAME$_$YEAR$$MONTH$.nc' +JRA55.name <- "JRA-55" + +rean <- ERAint #JRA55 #ERAint # choose one of the two above reanalysis from where to load the input psl data + +# available forecast systems for the psl and var data: +#ECMWF_S4 <- list(path = paste0('/esnas/exp/ECMWF/seasonal/0001/s004/m001/$STORE_FREQ$_mean/$VAR_NAME$_f6h/$VAR_NAME$_$START_DATE$.nc')) +#ECMWF_S4 <- '/esnas/exp/ECMWF/seasonal/0001/s004/m001/$STORE_FREQ$_mean/psl_f6h/psl_$START_DATE$.nc' +ECMWF_S4 <- '/esnas/exp/ecmwf/system4_m1/$STORE_FREQ$_mean/$VAR_NAME$_f6h/$VAR_NAME$_$START_DATE$.nc' +ECMWF_S4.name <- "ECMWF-S4" +member.name <- "ensemble" # name of the dimension with the ensemble members inside the netCDF files of S4 + +ECMWF_monthly <- '/esnas/exp/ECMWF/monthly/ensforhc/6hourly/$VAR_NAME$/2014$MONTH$$DAY$00/$VAR_NAME$_$START_DATE$00.nc' +ECMWF_monthly.name <- "ECMWF-Monthly" + +# choose a forecast system: +forecast <- ECMWF_S4 + +# put 'rean' to load pressure fields and var from reanalisis, or put 'forecast' to load them from forecast systems: +fields <- forecast #rean #forecast + +psl <- "psl" #"g500" # pressure variable to use from the chosen reanalysis +psl.name <- "SLP" # "Z500" # the name to show in the maps + +year.start <- 1981 #1979 #1981 #1982 #1981 #1994 #1979 #1981 +year.end <- 2015 #2016 #2013 #2015 + +missing.forecasts <- FALSE # set it to TRUE if there can be missing hindcasts files in the forecast psl data, so it will load only the available years and deals with NA +res <- 0.75 # set the resolution you want to interpolate the psl and var data when loading it (i.e: set to 0.75 to use the same res of ERA-Interim) + # interpolation method is BILINEAR. + +LOESS <- TRUE # To apply the LOESS filter or not to the climatology +running.cluster <- TRUE # add to the clustering also the daily SLP data of the two closer months to the month to use (only for monthly analysis) +lat.weighting <- FALSE # set it to true to weight psl data on latitude before applying the cluster analysis +sequences <-FALSE # set it to TRUE if you want to select only days beloning to sequences of at least 5 consecutive days belonging to the same regime. + +PCA <- FALSE # set it to TRUE if you want to apply the PCA before the k-means cluster analysis, FALSE otherwise +variance.explained <- 0.8 # the user can set here the minimum % of variance explained by the PCs (typically 0.8 which is equal to 80%) + +# Coordinates of the box enclosing the study region (the Northern Atlantic in this case): +lat.max <- 81 #81 #80 #70 +lat.min <- 27 #30 #20 #30 +lon.max <- 45 #36 #30 #40 +lon.min <- 274.5 #274.5 #270 #280 # put a positive number here because the geopotential has only positive vaues of longitud! + +# Only for reanalysis: +WR.period <- 1 # 13:16 #1:12 #13:16 # 1-12: Jan-Dec; 13-16: Winter-Spring-Summer-Autumn [bypassed by the optional argument of the script] + +# Only for Seasonal forecasts: +startM <- 9 # start month (1=January, 12=December) [bypassed by the optional arguments of the script] +leadM <- 0 # select only the data of one lead month: [bypassed by the optional arguments of the script] +n.members <- 15 # number of members of the forecast system to load +n.leadtimes <- 216 # number of leadtimes of the forecast system to load + +# Only for Monthly forecasts: +#leadtime <- 1:7 #5:11 #1 # if fields=forecast, set the forecast time (in days) you want to compute the weather regimes. +#startdate.shift <- 1 # if leadtime=5:11, it's better to shift all startdates of the season 1 week in the past, to fit the exact season of obs.data + # if leadtime=12:18, it's better to shift all startdates of the season 2 weeks in the past, and so on. +#forecast.year <- year.end+1 # if fields=forecast, set the forecast year (it is usually the year after year.end) +#mes=1 # if fields=forecast, set the month of the first startdate of the forecast year +#day=2 # if fields=forecast, set the day of the first startdate of the forecast year + +############################################################################################################################# +rean.name <- unname(ifelse(rean == ERAint, ERAint.name, JRA55.name)) +forecast.name <- unname(ifelse(forecast == ECMWF_S4, ECMWF_S4.name, ECMWF_monthly.name)) +fields.name <- unname(ifelse(fields == rean, rean.name, forecast.name)) +n.years <- year.end - year.start + 1 + +# in case the script is run with one argument, it is assumed a reanalysis is being used and the argument becomes the month we want to perform the clustering; +# in case the script is run with two arguments, it is assumed a forecast is used and the first argument represents the startmonth and the second one the leadmonth. +# in case the script is run with no arguments, the values of the variables inside the script are used: +script.arg <- as.integer(commandArgs(TRUE)) + +if(length(script.arg) == 0){ + start.month <- startM + #WR.period <- start.month + if(fields.name == forecast.name) lead.month <- leadM +} + +# in case the script is run with 1 argument, it is assumed you are using a reanalysis: +if(length(script.arg) == 1){ + fields <- rean + fields.name <- rean.name + WR.period <- script.arg[1] +} + +if(length(script.arg) >= 2){ + fields <- forecast + fields.name <- forecast.name + start.month <- script.arg[1] + lead.month <- script.arg[2] + WR.period <- start.month +} + +if(length(script.arg) == 3){ # in this case, we are running the script in the SMP machine and we need to override the variables below: + source('/gpfs/projects/bsc32/bsc32842/Rfunctions.R') # for the calendar function + workdir <- "/gpfs/projects/bsc32/bsc32842/RESILIENCE" # working dir +} + +if(length(script.arg) >= 2) {lead.comment <- paste0("Lead month: ", lead.month)} else {lead.comment <- ""} +cat(paste0("Field: ", fields.name, " Period: ", WR.period, " ", lead.comment, "\n")) + +if(lat.min >= lat.max) stop("lat.min cannot be greater than lat.max") + +#days.period <- n.days.period <- period.length <- list() +#for (pp in 1:17){ +# days.period[[pp]] <- NA +# for(y in year.start:year.end) days.period[[pp]] <- c(days.period[[pp]], n.days.in.a.future.year(year.start, y) + pos.period(y,pp)) +# days.period[[pp]] <- days.period[[pp]][-1] # remove the NA at the begin that was introduced only to be able to execute the above command +# # number of days belonging to that period from year.start to year.end: +# n.days.period[[pp]] <- length(days.period[[pp]]) +# # Number of days belonging to that period in a single year: +# period.length[[pp]] <- n.days.in.a.period(pp,1999) #+ ifelse(period==13 | period==2, 1, 0) # using year 1999 introduce a small error in winter season length because of #bisestile years +#} + +# Create a regular lat/lon grid to interpolate the data to the chosen res: (Notice that the regular grid is always centered at lat=0 and lon=0!) +n.lon.grid <- 360/res +n.lat.grid <- (180/res)+1 +my.grid <- paste0('r',n.lon.grid,'x',n.lat.grid) +#domain<-list() +#domain$lon <- seq(0,359.999999999,res) +#domain$lat <- c(rev(seq(0,90,res)),seq(res[1],-90,-res)) + +cat("Loading lat/lon. Please wait...\n") +# load only 1 day of pressure data to detect the minimum and maximum lat and lon values which identify only the North Atlantic area: +if(fields.name == rean.name) domain <- Load(var = psl, exp = NULL, obs = list(list(path=fields)), '19990101', storefreq = 'daily', leadtimemax = 1, output = 'lonlat') + +### Real test: +### lat <- +### domain <- Load(var = "psl", exp = list(list(path="/esnas/exp/ecmwf/system4_m1/$STORE_FREQ$_mean/$VAR_NAME$_f6h/$VAR_NAME$_$START_DATE$.nc")), obs=NULL, sdates=paste0(1982:2011,'0101'), storefreq = 'daily', leadtimemax=n.leadtimes, nmember=n.members, output = 'lonlat', latmin = lat[chunk], latmax = lat[chunk+2], nprocs=1) + +### if (fields.name="pippo"){ + +# in the case of S4, we also interpolate it to the same resolution of the reanalysis: +# (notice that if the S4 grid has the same grid of the chosen grid (typically ERA-Interim), Load() leaves the S4 grid unchanged) +if(fields.name == ECMWF_S4.name) domain <- Load(var = psl, exp = list(list(path=fields)), obs=NULL, sdates=paste0(year.start,'0101'), storefreq = 'daily', dimnames=list(member=member.name), leadtimemax=1, nmember=1, output = 'lonlat', grid=my.grid, method='bilinear', nprocs=1) + +#domain <- Load(var = "psl", exp = list(list(path="/esnas/exp/ecmwf/system4_m1/$STORE_FREQ$_mean/$VAR_NAME$_f6h/$VAR_NAME$_$START_DATE$.nc")), obs=NULL, sdates='19820101', storefreq = 'daily', leadtimemax=1, nmember=1, output = 'lonlat', grid='r480x241', lonmax=100) + +if(fields.name == ECMWF_monthly.name) domain <- Load(var = psl, exp = NULL, obs = list(list(path=fields)), paste0(year.start,'0102'), storefreq = 'daily', leadtimemax = 1, output = 'lonlat') + +n.lon <- length(domain$lon) +n.lat <- length(domain$lat) + +pos.lat.max <- which(lat.max - round(domain$lat,4) >= 0)[1] # select the first latitude of the domain below the maximum latitude +pos.lat.min <- tail(which(round(domain$lat,4) - lat.min >= 0),1) # select the last latitude of the domain over the minumum latitude +pos.lon.max <- tail(which(lon.max - round(domain$lon,4) >= 0),1) +pos.lon.min <- which(round(domain$lon,4) - lon.min >= 0)[1] + +pos.lat <- if(pos.lat.min < pos.lat.max) {pos.lat.min:pos.lat.max} else {pos.lat.max:pos.lat.min} +pos.lon <- c(1:pos.lon.max,pos.lon.min:length(domain$lon)) # beware that it only consider the case where the study area cross the meridian of Greenwhich! +n.pos.lat <- length(pos.lat) +n.pos.lon <- length(pos.lon) + +lat <- domain$lat[pos.lat] # lat values of chosen area only (it is smaller than the whole spatial domain loaded by the data) +#if (lat[2] < lat[1]) lat <- rev(lat) # reverse lat values if they are in decreasing order +lon <- domain$lon[pos.lon] # lon values of chosen area only + +#lat.min.area <- domain$lat[pos.lat.min] # min and max lat/lon of the chosen area only (same as lat.max, but its values correspond to actual values in the lat vector) +#lat.max.area <- domain$lat[pos.lat.max] +#lon.min.area <- domain$lon[pos.lon.min] +#lon.max.area <- domain$lon[pos.lon.max] + +#rm(domain) + +# Load psl data (interpolating it to the same reanlaysis grid, if necessary): +cat("Loading data. Please wait...\n") + +if(fields.name == rean.name){ # Load daily psl data of all years in the reanalysis case: + psleuFull <-array(NA,c(1,1,n.years,365, n.pos.lat, n.pos.lon)) + + #for (y in year.start:year.end){ + # var <- Load(var = psl, exp = NULL, obs = list(list(path=fields)), paste0(y,'0101'), storefreq = 'daily', leadtimemax = n.days.in.a.year(y), output = 'lonlat', + # latmin = lat.min, latmax = lat.max, lonmin = lon.min, lonmax = lon.max, nprocs=1) + # psleuFull[seq.days.in.a.future.year(year.start, y),,] <- var$obs + # rm(var) + # gc() + #} + + psleuFull366 <- Load(var = psl, exp = NULL, obs = list(list(path=fields)), paste0(year.start:year.end,'0101'), storefreq = 'daily', leadtimemax = 366, output = 'lonlat', latmin = lat.min, latmax = lat.max, lonmin = lon.min, lonmax = lon.max)$obs + + # remove bisestile days to have all arrays with the same dimensions: + cat("Removing bisestile days. Please wait...\n") + psleuFull[,,,,,] <- psleuFull366[,,,1:365,,] + + for(y in year.start:year.end){ + y2 <- y - year.start + 1 + if(n.days.in.a.year(y) == 366) psleuFull[,,y2,60:365,,] <- psleuFull366[,,y2,61:366,,] # take the march to december period removing the 29th of February + } + + rm(psleuFull366) + gc() + + # convert psl in daily anomalies with the LOESS filter: + cat("Calculating anomalies. Please wait...\n") + pslPeriodClim <- apply(psleuFull, c(1,2,4,5,6), mean, na.rm=T) + + if(LOESS == TRUE){ + pslPeriodClimLoess <- pslPeriodClim + for(i in n.pos.lat){ + for(j in n.pos.lon){ + my.data <- data.frame(ens.mean=pslPeriodClim[1,1,,i,j], day=1:365) + my.loess <- loess(ens.mean ~ day, my.data, span=0.35) + pslPeriodClimLoess[1,1,,i,j] <- predict(my.loess) + } + } + + rm(pslPeriodClim) + gc() + + pslPeriodClim2 <- InsertDim(pslPeriodClimLoess, 3, n.years) + + } else { + pslPeriodClim2 <- InsertDim(pslPeriodClim, 3, n.years) + + rm(pslPeriodClim) + gc() + } + + ## s4.data <- data.frame(s4=pslPeriodClim[1,], day=1:216) + ## s4.loess <- loess(s4 ~ day, s4.data, span=0.35) + ## s4.pred <- predict(s4.loess) + + psleuFull <- psleuFull - pslPeriodClim2 + rm(pslPeriodClim2) + gc() + +} # close if on fields.name == rean + +if(fields.name == ECMWF_S4.name){ # in the forecast case, we load only the data for 1 start month at time and all the lead times (since each month needs ~3 GB of memory) + if(start.month <= 9) {start.month.char <- paste0("0",as.character(start.month))} else {start.month.char <- start.month} + + fields2 <- gsub("\\$STORE_FREQ\\$","daily",fields) + fields3 <- gsub("\\$VAR_NAME\\$",psl,fields2) + + if(missing.forecasts == TRUE){ + years <- c() + for(y in year.start:year.end){ + fields4 <- gsub("\\$START_DATE\\$",paste0(y, start.month.char,'01'),fields3) + + char.lead <- nchar(system(paste0("ncks -m ",fields4,"| grep -E -i 'psl size' | cut -d '*' -f1"), intern=T)) + # beware, in this way it can only take into account leadtimes from 100 to 999, but it is the only way in the SMP machine: + #num.lead <- as.integer(substr(system(paste0("ncks -m ",fields4,"| grep -E -i 'psl size' | cut -d '*' -f1"), intern=T),char.lead-3,char.lead)) + num.lead <- as.integer(system(paste0("ncks -m ",fields4,"| grep -E -i 'psl size' | cut -d '=' -f2 | cut -d '*' -f1"), intern=T)) # don't work well on the SMP + num.memb <- as.integer(system(paste0("ncks -m ",fields4,"| grep -E -i 'psl size' | cut -d '=' -f2 | cut -d '*' -f2"), intern=T)) + num.lat <- as.integer(system(paste0("ncks -m ",fields4,"| grep -E -i 'psl size' | cut -d '=' -f2 | cut -d '*' -f3"), intern=T)) + num.lon <- as.integer(system(paste0("ncks -m ",fields4,"| grep -E -i 'psl size' | cut -d '=' -f2 | cut -d '*' -f4"), intern=T)) + + #if(num.lead == n.leadtimes && num.memb >= n.members && num.lat == 181 && num.lon == 360) years <- c(years,y) + if(num.lead == n.leadtimes && num.memb >= n.members) years <- c(years,y) + } + + } else { + years <- year.start:year.end + } + + n.years.full <- length(years) + + if(running.cluster == TRUE) { + + start.month1 <- ifelse(start.month == 1, 12, start.month-1) + if(start.month1 <= 9) {start.month1.char <- paste0("0",as.character(start.month1))} else {start.month1.char <- start.month1} + + start.month2.char <- start.month.char + + start.month3 <- ifelse(start.month == 12, 1, start.month+1) + if(start.month3 <= 9) {start.month3.char <- paste0("0",as.character(start.month3))} else {start.month3.char <- start.month3} + + # load three months of data, inserting all them in the third dimension of the array, one month at time: + psleuFull1 <- Load(var = psl, exp = list(list(path=fields)), obs = NULL, sdates=paste0(years, start.month1.char,'01'), dimnames=list(member=member.name), nmember=n.members, leadtimemax=n.leadtimes, storefreq='daily', output = 'lonlat', latmin = lat.min, latmax = lat.max, lonmin = lon.min, lonmax = lon.max, grid=my.grid, method='bilinear')$mod + gc() + + psleuFull2 <- Load(var = psl, exp = list(list(path=fields)), obs = NULL, sdates=paste0(years, start.month2.char,'01'), dimnames=list(member=member.name), nmember=n.members, leadtimemax=n.leadtimes, storefreq='daily', output = 'lonlat', latmin = lat.min, latmax = lat.max, lonmin = lon.min, lonmax = lon.max, grid=my.grid, method='bilinear')$mod + gc() + + psleuFull3 <- Load(var = psl, exp = list(list(path=fields)), obs = NULL, sdates=paste0(years, start.month3.char,'01'), dimnames=list(member=member.name), nmember=n.members, leadtimemax=n.leadtimes, storefreq='daily', output = 'lonlat', latmin = lat.min, latmax = lat.max, lonmin = lon.min, lonmax = lon.max, grid=my.grid, method='bilinear')$mod + gc() + + } else { + + psleuFull <- Load(var = psl, exp = list(list(path=fields)), obs = NULL, sdates=paste0(years, start.month.char,'01'), dimnames=list(member=member.name), nmember=n.members, leadtimemax=n.leadtimes, storefreq='daily', output = 'lonlat', latmin = lat.min, latmax = lat.max, lonmin = lon.min, lonmax = lon.max, grid=my.grid, method='bilinear', nprocs=1)$mod + + } + + if(LOESS == TRUE){ + # convert psl in daily anomalies with the LOESS filter: + cat("Calculating anomalies. Please wait...\n") + + if(running.cluster == TRUE) { + pslPeriodClim1 <- apply(psleuFull1, c(1,4,5,6), mean, na.rm=T) + pslPeriodClim2 <- apply(psleuFull2, c(1,4,5,6), mean, na.rm=T) + pslPeriodClim3 <- apply(psleuFull3, c(1,4,5,6), mean, na.rm=T) + + pslPeriodClimLoess1 <- pslPeriodClim1 + pslPeriodClimLoess2 <- pslPeriodClim2 + pslPeriodClimLoess3 <- pslPeriodClim3 + + for(i in n.pos.lat){ + for(j in n.pos.lon){ + my.data1 <- data.frame(ens.mean=pslPeriodClim1[1,,i,j], day=1:n.leadtimes) + my.loess1 <- loess(ens.mean ~ day, my.data1, span=0.35) + pslPeriodClimLoess1[1,,i,j] <- predict(my.loess1) + rm(my.data1, my.loess1) + gc() + + my.data2 <- data.frame(ens.mean=pslPeriodClim2[1,,i,j], day=1:n.leadtimes) + my.loess2 <- loess(ens.mean ~ day, my.data2, span=0.35) + pslPeriodClimLoess2[1,,i,j] <- predict(my.loess2) + rm(my.data2, my.loess2) + gc() + + my.data3 <- data.frame(ens.mean=pslPeriodClim3[1,,i,j], day=1:n.leadtimes) + my.loess3 <- loess(ens.mean ~ day, my.data3, span=0.35) + pslPeriodClimLoess3[1,,i,j] <- predict(my.loess3) + rm(my.data3, my.loess3) + gc() + } + } + + rm(pslPeriodClim1, pslPeriodClim2, pslPeriodClim3) + gc() + + pslPeriodClimDos1 <- InsertDim(InsertDim(pslPeriodClimLoess1, 2, n.years.full), 2, n.members) + pslPeriodClimDos2 <- InsertDim(InsertDim(pslPeriodClimLoess2, 2, n.years.full), 2, n.members) + pslPeriodClimDos3 <- InsertDim(InsertDim(pslPeriodClimLoess3, 2, n.years.full), 2, n.members) + + pslPeriodClimDos <- unname(abind(pslPeriodClimDos1, pslPeriodClimDos2, pslPeriodClimDos3, along=3)) + rm(pslPeriodClimDos1, pslPeriodClimDos2, pslPeriodClimDos3) + gc() + + psleuFull <- unname(abind(psleuFull1, psleuFull2, psleuFull3, along=3)) + rm(psleuFull1, psleuFull2, psleuFull3) + gc() + + } else { # in case of no running cluster: + + pslPeriodClim <- apply(psleuFull, c(1,4,5,6), mean, na.rm=T) + + pslPeriodClimLoess <- pslPeriodClim + for(i in n.pos.lat){ + for(j in n.pos.lon){ + my.data <- data.frame(ens.mean=pslPeriodClim[1,,i,j], day=1:n.leadtimes) + my.loess <- loess(ens.mean ~ day, my.data, span=0.35) + pslPeriodClimLoess[1,,i,j] <- predict(my.loess) + } + } + + rm(pslPeriodClim) + gc() + + pslPeriodClimDos <- InsertDim(InsertDim(pslPeriodClimLoess, 2, n.years.full), 2, n.members) + + } # close if on running cluster + + } else { #in case of no LOESS: + + if(running.cluster == TRUE) { + # in this case, the climatology is measured FOR EACH MONTH INDIPENDENTLY, instead of using a seasonal value: + pslPeriodClim1 <- apply(psleuFull1, c(1,4,5,6), mean, na.rm=T) + pslPeriodClim2 <- apply(psleuFull2, c(1,4,5,6), mean, na.rm=T) + pslPeriodClim3 <- apply(psleuFull3, c(1,4,5,6), mean, na.rm=T) + + pslPeriodClim <- unname(abind(pslPeriodClim1, pslPeriodClim2, pslPeriodClim3)) + + pslPeriodClimDos <- InsertDim(InsertDim(pslPeriodClim, 2, n.years.full), 2, n.members) + rm(pslPeriodClim) + gc() + + psleuFull <- unname(abind(psleuFull1, psleuFull2, psleuFull3, along=3)) + rm(psleuFull1, psleuFull2, psleuFull3) + gc() + + } else {# in case of no running cluster: + + pslPeriodClimDos <- InsertDim(InsertDim(pslPeriodClim, 2, n.years.full), 2, n.members) + rm(pslPeriodClim) + gc() + } + } + + ## s4.data <- data.frame(s4=pslPeriodClim[1,], day=1:216) + ## s4.loess <- loess(s4 ~ day, s4.data, span=0.35) + ## s4.pred <- predict(s4.loess) + + psleuFull <- psleuFull - pslPeriodClimDos + rm(pslPeriodClimDos) + gc() + +} # close if on fields.name=forecasts.name + +if(fields.name == ECMWF_monthly.name){ + # Load 6-hourly psl data in the forecast case: + psleuFull <-array(NA, c(n.sdates*n.years, n.leadtimes, n.pos.lat, n.pos.lon)) # daily order: 19940102 19950102 ... 20130102 19940109 19950109 ... 20130109 ... + n.leadtimes <- length(leadtime) + sdates <- weekly.seq(forecast.year,mes,day) + n.sdates <- length(sdates) + + for (startdate in 1:n.sdates){ + for(lday in leadtime){ + var <- Load(var = psl, exp = NULL, obs = list(list(path=fields)), paste0(year.start:year.end, substr(sdates[startdate],5,6), substr(sdates[startdate],7,8) ), storefreq = 'daily', leadtimemin = lday*4-3, leadtimemax = lday*4, output = 'lonlat', latmin = lat.min, latmax = lat.max, lonmin = lon.min, lonmax = lon.max) + + mean.lday <- apply(var$obs, c(3,5,6), mean, na.rm=T) + rm(var) + gc() + + psleuFull[(1+(startdate-1)*n.years):(startdate*n.years), lday-leadtime[1]+1,,] <- mean.lday + rm(mean.lday) + gc() + } + } +} + +if(psl=="g500") psleuFull <- psleuFull/9.81 # convert geopotential to geopotential height (in m) +if(psl=="psl") psleuFull <- psleuFull/100 # convert MSLP in Pascal to MSLP in hPa +gc() + +#save.image(file=paste0(workdir,"/weather_regimes_",fields.name,"_",year.start,"-",year.end,".RData")) +#load(file=paste0(workdir,"/weather_regimes_",fields.name,"_",year.start,"-",year.end,".RData")) + +#save.image("/scratch/Earth/ncortesi/RESILIENCE/Regimes/test.RData") + +# compute the cluster analysis: +my.PCA <- list() +my.cluster <- list() +my.cluster.array <- list() +tot.variance <- list() +n.pcs <- my.cluster <- c() + +#WR.period=1 # for the debug +for(p in WR.period){ + # Select only the data of the month/season we want: + if(fields.name == rean.name){ + + #pslPeriod <- psleuFull[days.period[[p]],,] # select only days in the chosen period (i.e: winter) + if(running.cluster == TRUE) { + pslPeriod <- psleuFull[1,1,,pos.month.extended(2001,p),,] # select all days in the period of 3 months centered on the target month p + } else { + pslPeriod <- psleuFull[1,1,,pos.period(2001,p),,] # select only days in the chosen period (i.e: winter) + } + + # weight the pressure fields based on latitude (apply it to anomalies, not to absolute values!): + if(lat.weighting){ + lat.weighted <- cos(lat*pi/180)^0.5 + lat.weighted.array <- InsertDim(InsertDim(InsertDim(lat.weighted,2,n.pos.lon), 1, length(pos.month.extended(2001,p))), 1, n.years) + pslPeriod <- pslPeriod * lat.weighted.array + rm(lat.weighted.array) + gc() + } + + gc() + cat("Preformatting data for clustering. Please wait...\n") + psl.melted <- melt(pslPeriod[,,,, drop=FALSE], varnames=c("Year","Day","Lat","Lon")) + gc() + cat("Preformatting data for clustering. Please wait......\n") + + # this function eats much memory!!! + psl.kmeans <- unname(acast(psl.melted, Year + Day ~ Lat + Lon)) + #gc() + + # select period data from psleuFull to use it as input of the cluster analysis: + #psl.kmeans <- pslPeriod + #dim(psl.kmeans) <- c(n.days.period[[p]], n.pos.lat*n.pos.lon) # convert array in a matrix! + #rm(pslPeriod, pslPeriod.weighted) + } + + # in case of S4 we don't need to select anything because we already loaded only 1 month of data! + if(fields.name == ECMWF_S4.name){ + if(running.cluster == TRUE && p < 13) { + # if you want to fully implement the running cluster of the monthly S4 data, you have to finish selecting automatically the 3-months running period + # generalizing the command below (which at present only work for the month of January an lead time 0): + + # Select DJF (lead time 0) for our case study, excluding 29 of february): + pslPeriod <- psleuFull[,,,1:90,,, drop=FALSE] + + } else { + + # select data only for the startmonth and leadmonth to study: + cat("Extracting data. Please wait...\n") + chosen.month <- start.month + lead.month # find which month we want to select data + if(chosen.month > 12) chosen.month <- chosen.month - 12 + + # remove the 29th of February to have the same n. of elements for all years + i=0 + for(y in years){ + i=i+1 + leadtime.min <- 1 + n.days.in.a.monthly.period(start.month, chosen.month, y) - n.days.in.a.month(chosen.month, y) + leadtime.max <- leadtime.min + n.days.in.a.month(chosen.month, y) - 1 + #leadtime.max <- 61 + + if (n.days.in.a.month(chosen.month, y) == 29) leadtime.max <- leadtime.max - 1 + int.leadtimes <- leadtime.min:leadtime.max + num.leadtimes <- leadtime.max - leadtime.min + 1 # number of days in the chosen month (different from 'n.leadtimes',which is the total number of days in the file!) + # by construction it is equal to: n.days.in.a.month(chosen.month, y) + + if(y == years[1]) { + pslPeriod <- psleuFull[,,1,int.leadtimes,,,drop=FALSE] + } else { + pslPeriod <- unname(abind(pslPeriod, psleuFull[,,i,int.leadtimes,,,drop=FALSE], along=3)) + } + } + + } + + # weight the pressure fields based on latitude (apply it to anomalies, not to absolute values!): + if(lat.weighting){ + lat.weighted <- cos(lat*pi/180)^0.5 + lat.weighted.array <- InsertDim(InsertDim(InsertDim(lat.weighted,2,n.pos.lon), 1, length(pos.month.extended(2001,p))), 1, n.years) + pslPeriod <- pslPeriod * lat.weighted.array ######### adapt!!!!!!!!!!! + rm(lat.weighted.array) + gc() + } + + # note that the data order in psl.kmeans[,X] is: year1day1, year1day2, ... , year1day31, year2day1, year2day2, ... , year2day28 + gc() + cat("Preformatting data. Please wait...\n") + psl.melted <- melt(pslPeriod[1,,,,,, drop=FALSE], varnames=c("Exp","Member","StartYear","LeadDay","Lat","Lon")) + gc() + + save(psl.melted,file=paste0(workdir,"/psl_melted.RData")) + + cat("Preformatting data. Please wait......\n") + # This function eats much memory!!! + psl.kmeans <- unname(acast(psl.melted, Member + StartYear + LeadDay ~ Lat + Lon)) + #gc() + + save(psl.kmeans,file=paste0(workdir,"/psl_kmeans.RData")) + + my.cluster <- kmeans(psl.kmeans, centers=4, iter.max=100, nstart=30, trace=FALSE) + save(my.cluster,file=paste0(workdir,"/my_cluster.RData")) + + + #psl.kmeans <- array(NA,c(dim(pslPeriod))) + #n.rows <- nrow(psl.melted) + #a <- psl.melted$Member + #b <- psl.melted$StartYear + #c <- psl.melted$LeadDay + #d <- psl.melted$Lat + #e <- psl.melted$Lon + #f <- psl.melted$value + + # this function to convert a data.frame in an array is much less memory-eater than acast but a bit slower: + #for(i in 1:n.rows) psl.kmeans[1,a[i],b[i],c[i],d[i],e[i]] <- f[i] + gc() + #rm(pslPeriod); gc() + } + + if(fields.name == ECMWF_monthly.name){ + my.startdates.period <- months.period(forecast.year,mes,day,p) # get which startdates belong to the chosen period (remember that there are no bisestile day left!) + + days.period <- list() # get which days inside psleuFull belong to the chosen period + for(startdate in my.startdates.period){ + days.period[[p]] <- c(days.period[[p]], (1+(startdate-1)*n.years):(startdate*n.years)) + } + + # in winter you must remove the 2nd startdate (20140109) because its data is bad, as you can see with: plot(psl.kmeans[,1:2]) + # if(fields.name == forecast.name && period == 13) psl.kmeans[21:40,] <- NA + } + + + # compute the clustering: + cat("Clustering data. Please wait...\n") + if(PCA){ + my.seq <- seq(1, n.pos.lat*n.pos.lon, 16) # select only 1 point of 9 + pslcut <- psl.kmeans[,my.seq] + + my.PCA <- princomp(pslcut,cor=FALSE) + tot.variance <- head(cumsum(my.PCA$sdev^2/sum(my.PCA$sdev^2)),50) # check how many PCAs to keep basing on the sum of their expl. variance + n.pcs <- head(as.numeric(which(tot.variance > variance.explained)),1) # select only the pcs that explains at least 80% of variance + my.cluster <- kmeans(my.PCA$scores[,1:n.pcs], centers=4, iter.max=100, nstart=30) # 4 is the number of clusters + rm(pslcut) + } else { # 4 is the number of clusters: + + my.cluster <- kmeans(psl.kmeans, centers=4, iter.max=100, nstart=30, trace=FALSE) + + gc() + + #save(my.cluster, file=paste0(workdir,"/",fields.name,"_",my.period[p],"_leadmonth_",lead.month,"_series.RData")) + + #if(fields.name == rean.name){ + #my.cluster[[p]] <- kmeans(psl.kmeans[which(!is.na(psl.kmeans[,1])),], centers=4, iter.max=100, nstart=30, trace=FALSE) + # save the time series output of the cluster analysis: + #save(my.cluster, my.cluster.array, my.PCA, tot.variance, n.pcs, file=paste0(workdir,"/",fields.name,"_",my.period[p],"_series.RData")) + #} + + if(fields.name == ECMWF_S4.name) { + my.cluster.array <- array(my.cluster$cluster, c(num.leadtimes, n.years.full, n.members)) # in reverse order compared to the definition of psl.kmeans + + # save the time series output of the cluster analysis: + #save(my.cluster, my.cluster.array, my.PCA, tot.variance, n.pcs, file=paste0(workdir,"/",fields.name,"_",my.period[p],"_leadmonth_",lead.month,"_series.RData")) + + #plot(SMA(my.cluster$centers[1,],201),type="l",col="gold",ylim=c(-20,20));lines(SMA(my.cluster$centers[2,],201),type="l",col="red"); lines(SMA(my.cluster$centers[3,],201),type="l",col="green");lines(SMA(my.cluster$centers[4,],201),type="l",col="blue");lines(SMA(psl.kmeans[29,],201),type="l",col="gray");lines(rep(0,14410),type="l",col="black",lty=2) + } + } + + rm(psl.kmeans) + gc() + +#} # close for on 'p' + +#save.image(file=paste0(workdir,"/weather_regimes_",fields.name,"_",year.start,"-",year.end,".RData")) +#load(file=paste0(workdir,"/weather_regimes_",fields.name,"_",year.start,"-",year.end,".RData")) + +#my.grid<-paste0('r',n.lon,'x',n.lat) +#coord <- Load(var = 'sfcWind', exp = list(ECMWF_monthly), obs = NULL, sdates=paste0(2013,'0102'), leadtimemin = 1, leadtimemax=1, output = 'lonlat', nprocs=1, grid=my.grid, method='bilinear') + +# measure regime anomalies: + +#for(p in WR.period){ # 1-12: Jan-Dec; 13-16: Winter-Spring-Summer-Autumn; 17: Year + + if(fields.name == rean.name){ + + if(running.cluster == TRUE && p < 13){ # select only the days of the 3-months cluster series inside the target month p + n.days.period <- length(pos.month.extended(2001,p)) # total number of days in the three months + + days.month <- days.month.new <- days.month.full <- c() + days.month <- which(pos.month.extended(2001,p) == pos.month(2001,p)[1])-1 + 1:length(pos.month(2001,p)) + + for(y in 1:n.years){ + days.month.new <- days.month + n.days.period*(y-1) + days.month.full <- c(days.month.full, days.month.new) + #print(days.month.new) + #print(days.month) + } + + cluster.sequence <- my.cluster$cluster[days.month.full] # select only the days of the cluster series inside the target month p + + } else { + cluster.sequence <- my.cluster$cluster + } + + if(sequences){ # it works only for rean data!!! + # for each of the 4 clusters, remove the days not belonging to sequences of at least 5 days: + # warning: first 4 days and last 4 days are not take into account y the algorithm and are treated as not belonging to any sequence (sequ=FALSE) + wrdiff <- diff(my.cluster$cluster) + + sequ <- rep(FALSE, n.days.period[[p]]) + for(i in 5:(n.days.period[[p]]-5)){ + sequ[i] <- TRUE + if(wrdiff[i] != 0 & wrdiff[i+1] != 0) sequ[i] = FALSE + if(wrdiff[i] != 0 & wrdiff[i+2] != 0) sequ[i] = FALSE + if(wrdiff[i] != 0 & wrdiff[i+3] != 0) sequ[i] = FALSE + if(wrdiff[i] != 0 & wrdiff[i+4] != 0) sequ[i] = FALSE + if(wrdiff[i-1] != 0 & wrdiff[i] != 0) sequ[i] = FALSE + if(wrdiff[i-1] != 0 & wrdiff[i+1] != 0) sequ[i] = FALSE + if(wrdiff[i-1] != 0 & wrdiff[i+2] != 0) sequ[i] = FALSE + if(wrdiff[i-1] != 0 & wrdiff[i+3] != 0) sequ[i] = FALSE + if(wrdiff[i-2] != 0 & wrdiff[i] != 0) sequ[i] = FALSE + if(wrdiff[i-2] != 0 & wrdiff[i+1] != 0) sequ[i] = FALSE + if(wrdiff[i-2] != 0 & wrdiff[i+2] != 0) sequ[i] = FALSE + if(wrdiff[i-3] != 0 & wrdiff[i] != 0) sequ[i] = FALSE + if(wrdiff[i-3] != 0 & wrdiff[i+1] != 0) sequ[i] = FALSE + if(wrdiff[i-4] != 0 & wrdiff[i] != 0) sequ[i] = FALSE + } # close for loop on i + + # sequence of daily cluster numbers without the days that doesn't belong to sequences of 5 or more days: + cluster.sequence <- my.cluster$cluster * sequ + rm(wrdiff, sequ) + } + + # Replace the days belonging to a regime with the days belonging to a sequence of at least 5 days of that regime: + wr1 <- which(cluster.sequence == 1) + wr2 <- which(cluster.sequence == 2) + wr3 <- which(cluster.sequence == 3) + wr4 <- which(cluster.sequence == 4) + + # yearly frequency of each regime: + wr1y <- wr2y <- wr3y <-wr4y <- c() + np <- n.days.in.a.period(p,2001) + + for(y in year.start:year.end){ + # for both reanalysis and S4, the time order is always: year1day1, year1day2, ..., year1day31, year2day1, year2day2, ..., year2day28, etc, + wr1y[y-year.start+1] <- length(which(cluster.sequence[(y-year.start)*np + (1:np)] == 1)) + wr2y[y-year.start+1] <- length(which(cluster.sequence[(y-year.start)*np + (1:np)] == 2)) + wr3y[y-year.start+1] <- length(which(cluster.sequence[(y-year.start)*np + (1:np)] == 3)) + wr4y[y-year.start+1] <- length(which(cluster.sequence[(y-year.start)*np + (1:np)] == 4)) + } + + # convert to frequencies in %: + wr1y <- wr1y/np + wr2y <- wr2y/np + wr3y <- wr3y/np + wr4y <- wr4y/np + + gc() + + pslmat <- unname(acast(psl.melted, Year + Day ~ Lat ~ Lon)) + + if(running.cluster == TRUE){ + pslmat.new <- pslmat + pslmat <- pslmat.new[days.month.full,,] + gc() + } + + pslwr1 <- pslmat[wr1,,, drop=F] + pslwr2 <- pslmat[wr2,,, drop=F] + pslwr3 <- pslmat[wr3,,, drop=F] + pslwr4 <- pslmat[wr4,,, drop=F] + + pslwr1mean <- apply(pslwr1, c(2,3), mean, na.rm=T) + pslwr2mean <- apply(pslwr2, c(2,3), mean, na.rm=T) + pslwr3mean <- apply(pslwr3, c(2,3), mean, na.rm=T) + pslwr4mean <- apply(pslwr4, c(2,3), mean, na.rm=T) + + # spatial mean for each day to save for the meaure of the FairRPSS: + pslwr1spmean <- apply(pslwr1, 1, mean, na.rm=T) + pslwr2spmean <- apply(pslwr2, 1, mean, na.rm=T) + pslwr3spmean <- apply(pslwr3, 1, mean, na.rm=T) + pslwr4spmean <- apply(pslwr4, 1, mean, na.rm=T) + + # save all the data necessary to redraw the graphs when we know the right regime: + save(my.cluster, lon, lon.max, lon.min, lat, lat.max, lat.min, pos.lat, pos.lon, n.pos.lat, n.pos.lon, psl, psl.name, my.period, p, workdir, rean.name, fields.name, year.start, year.end, n.years, WR.period, pslwr1mean, pslwr2mean, pslwr3mean, pslwr4mean, pslwr1spmean, pslwr2spmean, pslwr3spmean, pslwr4spmean, wr1y,wr2y,wr3y,wr4y,file=paste0(workdir,"/",fields.name,"_",my.period[p],"_psl.RData")) + + # just to save the plots of the ERA-Interim monthly regime anomalies with the running cluster now instead than loading the next script: + EU <- c(1:which(lon >= lon.max)[1], (which(lon >= 337)[1]:length(lon))) # restrict area to continental europe only + if(psl == "g500"){ + my.brks <- c(-300,seq(-200,200,10),300) # % Mean anomaly of a WR + my.brks2 <- c(-300,seq(-200,200,10),300) # % Mean anomaly of a WR for the contour lines + values.to.plot <- c(-200, -100, 0, 100, 200) # values we want to show in the labels + } + + if(psl == "psl"){ + my.brks <- c(seq(-19,-1,2),0,seq(1,19,2)) # % Mean anomaly of a WR + my.brks2 <- my.brks #c(seq(-20,20,2)) # % Mean anomaly of a WR for the contour lines + values.to.plot <- my.brks #c(-17,-15,-13,-11,-9,-7,-5,-3,-1,0,1,3,5,7,9,11,13,15,17) + } + my.cols <- colorRampPalette(rev(brewer.pal(11,"RdBu")))(length(my.brks)-1) # blue--white--red colors + my.cols[floor(length(my.cols)/2)] <- my.cols[floor(length(my.cols)/2)+1] <- "white" + + map1 <- pslwr1mean + map2 <- pslwr2mean + map3 <- pslwr3mean + map4 <- pslwr4mean + png(filename=paste0(workdir,"/",fields.name,"_",my.period[p],"_cluster1.png"),width=1000,height=1200) + PlotEquiMap2(rescale(map1,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map1), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, xlabel.dist=1.5, contours.lty="F1FF1F") + dev.off() + png(filename=paste0(workdir,"/",fields.name,"_",my.period[p],"_cluster2.png"),width=1000,height=1200) + PlotEquiMap2(rescale(map2,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map2), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F") + dev.off() + png(filename=paste0(workdir,"/",fields.name,"_",my.period[p],"_cluster3.png"),width=1000,height=1200) + PlotEquiMap2(rescale(map3,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map3), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F") + dev.off() + png(filename=paste0(workdir,"/",fields.name,"_",my.period[p],"_cluster4.png"),width=1000,height=1200) + PlotEquiMap2(rescale(map4,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map4), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F") + dev.off() + + + + rm(pslwr1mean,pslwr2mean,pslwr3mean,pslwr4mean) + rm(pslwr1,pslwr2,pslwr3,pslwr4) + gc() + + } + + if(fields.name == ECMWF_S4.name){ + cat("Computing regime anomalies and frequencies. Please wait...\n") + + #load(file=paste0(workdir,"/",fields.name,"_",my.period[p],"_leadmonth_",lead.month,"_ClusterData.RData")) + + cluster.sequence <- my.cluster$cluster #as.vector(my.cluster.array) + + wr1 <- which(cluster.sequence == 1) + wr2 <- which(cluster.sequence == 2) + wr3 <- which(cluster.sequence == 3) + wr4 <- which(cluster.sequence == 4) + + wr1y <- apply(my.cluster.array == 1, 2, sum) + wr2y <- apply(my.cluster.array == 2, 2, sum) + wr3y <- apply(my.cluster.array == 3, 2, sum) + wr4y <- apply(my.cluster.array == 4, 2, sum) + + # convert to frequencies in %: + wr1y <- wr1y/(num.leadtimes*n.members) # in this case, n.leadtimes is the number of days of one month of the cluser time series + wr2y <- wr2y/(num.leadtimes*n.members) + wr3y <- wr3y/(num.leadtimes*n.members) + wr4y <- wr4y/(num.leadtimes*n.members) + + # same as above, but to measure the maximum frequence between all the members: + wr1yMax <- apply(apply(my.cluster.array == 1, c(2,3), sum),1,max) + wr2yMax <- apply(apply(my.cluster.array == 2, c(2,3), sum),1,max) + wr3yMax <- apply(apply(my.cluster.array == 3, c(2,3), sum),1,max) + wr4yMax <- apply(apply(my.cluster.array == 4, c(2,3), sum),1,max) + + wr1yMax <- wr1yMax/num.leadtimes + wr2yMax <- wr2yMax/num.leadtimes + wr3yMax <- wr3yMax/num.leadtimes + wr4yMax <- wr4yMax/num.leadtimes + + wr1yMin <- apply(apply(my.cluster.array == 1, c(2,3), sum),1,min) + wr2yMin <- apply(apply(my.cluster.array == 2, c(2,3), sum),1,min) + wr3yMin <- apply(apply(my.cluster.array == 3, c(2,3), sum),1,min) + wr4yMin <- apply(apply(my.cluster.array == 4, c(2,3), sum),1,min) + + wr1yMin <- wr1yMin/num.leadtimes + wr2yMin <- wr2yMin/num.leadtimes + wr3yMin <- wr3yMin/num.leadtimes + wr4yMin <- wr4yMin/num.leadtimes + + cat("Computing regime anomalies and frequencies. Please wait......\n") + + # in this case, must use psl.melted instead of psleuFull. Both are already in anomalies: + # (psl.melted depends on the startdate and leadtime, psleuFull no) + gc() + pslmat <- unname(acast(psl.melted, Member + StartYear + LeadDay ~ Lat ~ Lon)) + #pslmat <- unname(acast(melt(pslPeriod[1,1:2,,,,, drop=FALSE],varnames=c("Exp","Member","StartYear","LeadDay","Lat","Lon")), Member ~ StartYear + LeadDay ~ Lat ~ Lon)) + gc() + + #for(a in 1:2){ + # for(b in 1:3){ + # for(c in 1:4){ + # pslmat[1, a + (b-1)*a + (c-1)*a*b,,] <- pslPeriod[1,a,b,c,,] + # } + # } + #} + + pslwr1 <- pslmat[wr1,,, drop=F] + pslwr2 <- pslmat[wr2,,, drop=F] + pslwr3 <- pslmat[wr3,,, drop=F] + pslwr4 <- pslmat[wr4,,, drop=F] + + pslwr1mean <- apply(pslwr1, c(2,3), mean, na.rm=T) + pslwr2mean <- apply(pslwr2, c(2,3), mean, na.rm=T) + pslwr3mean <- apply(pslwr3, c(2,3), mean, na.rm=T) + pslwr4mean <- apply(pslwr4, c(2,3), mean, na.rm=T) + + # spatial mean for each day to save for the meaure of the FairRPSS: + pslwr1spmean <- apply(pslwr1, 1, mean, na.rm=T) + pslwr2spmean <- apply(pslwr2, 1, mean, na.rm=T) + pslwr3spmean <- apply(pslwr3, 1, mean, na.rm=T) + pslwr4spmean <- apply(pslwr4, 1, mean, na.rm=T) + + rm(pslmat) + gc() + + # save all the data necessary to draw the graphs (but not the impact maps) + save(my.cluster, my.cluster.array, lon, lon.max, lat, pos.lat, pos.lon, n.pos.lat, n.pos.lon, psl, psl.name, my.period, p, workdir,rean.name,fields.name, year.start,year.end, years, n.years, n.years.full, WR.period, pslwr1mean, pslwr2mean, pslwr3mean, pslwr4mean, pslwr1spmean, pslwr2spmean, pslwr3spmean, pslwr4spmean, wr1y, wr2y, wr3y, wr4y, wr1yMax, wr2yMax, wr3yMax, wr4yMax, wr1yMin, wr2yMin, wr3yMin, wr4yMin, n.leadtimes, num.leadtimes, file=paste0(workdir,"/",fields.name,"_",my.period[p],"_leadmonth_",lead.month,"_psl.RData")) + + rm(pslwr1mean,pslwr2mean,pslwr3mean,pslwr4mean) + rm(pslwr1,pslwr2,pslwr3,pslwr4) + gc() + + } + + # for the monthly system, the time order is always: year1day1, year2day1, ... , yearNday1, year1day2, year2day2, ..., yearNday2, etc.: + if(fields.name == ECMWF_monthly.name) { + if(fields.name == ECMWF_monthly.name){ + my.startdates.period <- months.period(forecast.year,mes,day,p) + + #if(p == 13) my.startdates.period <- my.startdates.period[-2] # remove the second startdate because it is broken + + days.period <- period.length <- list() + for(startdate in my.startdates.period){ + days.period[[p]] <- c(days.period[[p]], (1+(startdate-1)*(year.end-year.start+1)):(startdate*(year.end-year.start+1))) + } + period.length[[p]] <- length(my.startdates.period) # number of Thusdays in the period + } + + wr1y <- wr2y <- wr3y <-wr4y <- c() + wr1y[y-year.start+1] <- length(which(cluster.sequence[seq((y-year.start+1),length(cluster.sequence), n.years)] == 1)) + wr2y[y-year.start+1] <- length(which(cluster.sequence[seq((y-year.start+1),length(cluster.sequence), n.years)] == 2)) + wr3y[y-year.start+1] <- length(which(cluster.sequence[seq((y-year.start+1),length(cluster.sequence), n.years)] == 3)) + wr4y[y-year.start+1] <- length(which(cluster.sequence[seq((y-year.start+1),length(cluster.sequence), n.years)] == 4)) + } + + cat("Finished!\n") +} # close the for loop on 'p' + + + + + + + + + + + + + + + + + + + + + diff --git a/old/weather_regimes_v39.R b/old/weather_regimes_v39.R new file mode 100644 index 0000000000000000000000000000000000000000..93413c3c3988c6754e424d5ca294c140b9cda26e --- /dev/null +++ b/old/weather_regimes_v39.R @@ -0,0 +1,1014 @@ + +# Creation: 6/2016 +# Author: Nicola Cortesi +# Aim: To compute the four Euro-Atlantic weather regimes from a reanalysis or from a forecast system +# +# I/O: input must be in a format compatible with Load(); 1 output .RData file is created in the 'workdir' folder. +# You can also run this script from terminal with: Rscript ~/scripts/weather_regimes_maps.R +# +# Assumption: its output are needed by the scripts weather_regimes_impact.R and weather_regimes.R +# +# Branch: weather_regimes + +rm(list=ls()) +library(s2dverification) # for the function Load() +library(abind) +library(reshape2) # after each update of s2dverification, it's better to remove and install this package again because sometimes it gets broken! + +SMP <- FALSE # if TRUE, the script is assumed to run on the SMP Machine + +if(SMP){ + source('/gpfs/projects/bsc32/bsc32842/Rfunctions.R') + workdir <- "/gpfs/projects/bsc32/bsc32842/RESILIENCE" # working dir where output files will be generated +} else { + source('/home/Earth/ncortesi/scripts/Rfunctions.R') + workdir <- "/scratch/Earth/ncortesi/RESILIENCE/Regimes" # working dir where output files will be generated +} + +# module load NCO # : load it from the terminal before running this script interactively in the SMP machine, if you didn't load it in the .bashrc + +# available reanalysis for the geopotential (g500) or the geopotential height (z500 = g500/9.81) and for var data: +ERAint <- '/esnas/recon/ecmwf/erainterim/$STORE_FREQ$_mean/$VAR_NAME$_f6h/$VAR_NAME$_$YEAR$$MONTH$.nc' +#ERAint <- '/esarchive/old-files/recon_ecmwf_erainterim/$STORE_FREQ$_mean/$VAR_NAME$_f6h/$VAR_NAME$_$YEAR$$MONTH$.nc' +ERAint.name <- "ERA-Interim" + +JRA55 <- '/esnas/recon/jma/jra55/$STORE_FREQ$_mean/$VAR_NAME$_f6h/$VAR_NAME$_$YEAR$$MONTH$.nc' +JRA55.name <- "JRA-55" + +NCEP <- '/esnas/recon/noaa/ncep-reanalysis/$STORE_FREQ$_mean/$VAR_NAME$_f6h/$VAR_NAME$_$YEAR$$MONTH$.nc' +NCEP.name <- "NCEP" + +rean <- NCEP #JRA55 #ERAint #NCEP # choose one of the two above reanalysis from where to load the input psl data + +# available forecast systems for the psl and var data: +#ECMWF_S4 <- list(path = paste0('/esnas/exp/ECMWF/seasonal/0001/s004/m001/$STORE_FREQ$_mean/$VAR_NAME$_f6h/$VAR_NAME$_$START_DATE$.nc')) +#ECMWF_S4 <- '/esnas/exp/ECMWF/seasonal/0001/s004/m001/$STORE_FREQ$_mean/psl_f6h/psl_$START_DATE$.nc' +ECMWF_S4 <- '/esnas/exp/ecmwf/system4_m1/$STORE_FREQ$_mean/$VAR_NAME$_f6h/$VAR_NAME$_$START_DATE$.nc' +ECMWF_S4.name <- "ECMWF-S4" +member.name <- "ensemble" # name of the dimension with the ensemble members inside the netCDF files of S4 + +ECMWF_monthly <- '/esnas/exp/ECMWF/monthly/ensforhc/6hourly/$VAR_NAME$/2014$MONTH$$DAY$00/$VAR_NAME$_$START_DATE$00.nc' +ECMWF_monthly.name <- "ECMWF-Monthly" + +# choose a forecast system: +forecast <- ECMWF_S4 + +# put 'rean' to load pressure fields and var from reanalisis, or put 'forecast' to load them from forecast systems: +fields <- rean #forecast + +psl <- "psl" #"g500" # pressure variable to use from the chosen reanalysis +psl.name <- "SLP" # "Z500" # the name to show in the maps + +year.start <- 1981 #1979 #1981 #1982 #1981 #1994 #1979 #1981 +year.end <- 2016 #2016 #2013 #2015 + +missing.forecasts <- FALSE # set it to TRUE if there can be missing hindcasts files in the forecast psl data, so it will load only the available years and deals with NA +res <- 0.75 # set the resolution you want to interpolate the psl and var data when loading it (i.e: set to 0.75 to use the same res of ERA-Interim) + # interpolation method is BILINEAR. + +LOESS <- TRUE # To apply the LOESS filter or not to the climatology + +running.cluster <- TRUE # add to the clustering also the daily SLP data of the two closer months to the month to use (only for monthly analysis) +lat.weighting <- TRUE # set it to true to weight psl data on latitude before applying the cluster analysis + +sequences <-FALSE # set it to TRUE if you want to select only days beloning to sequences of at least 5 consecutive days belonging to the same regime. + +PCA <- FALSE # set it to TRUE if you want to apply the PCA before the k-means cluster analysis, FALSE otherwise +variance.explained <- 0.8 # the user can set here the minimum % of variance explained by the PCs (typically 0.8 which is equal to 80%) + +# Coordinates of the box enclosing the study region (the Northern Atlantic in this case): +lat.max <- 81 #81 #80 #70 +lat.min <- 27 #30 #20 #30 +lon.max <- 45 #36 #30 #40 +lon.min <- 274.5 #274.5 #270 #280 # put a positive number here because the geopotential has only positive vaues of longitud! + +# Only for reanalysis: +WR.period <- 1:12 # 13:16 #1:12 #13:16 # 1-12: Jan-Dec; 13-16: Winter-Spring-Summer-Autumn [bypassed by the optional argument of the script] + +# Only for Seasonal forecasts: +startM <- 1 # start month (1=January, 12=December) [bypassed by the optional arguments of the script] +leadM <- 0 # select only the data of one lead month: [bypassed by the optional arguments of the script] +n.members <- 15 # number of members of the forecast system to load +n.leadtimes <- 216 # number of leadtimes of the forecast system to load + +# Only for Monthly forecasts: +#leadtime <- 1:7 #5:11 #1 # if fields=forecast, set the forecast time (in days) you want to compute the weather regimes. +#startdate.shift <- 1 # if leadtime=5:11, it's better to shift all startdates of the season 1 week in the past, to fit the exact season of obs.data + # if leadtime=12:18, it's better to shift all startdates of the season 2 weeks in the past, and so on. +#forecast.year <- year.end+1 # if fields=forecast, set the forecast year (it is usually the year after year.end) +#mes=1 # if fields=forecast, set the month of the first startdate of the forecast year +#day=2 # if fields=forecast, set the day of the first startdate of the forecast year + +############################################################################################################################# +rean.name <- unname(ifelse(rean == ERAint, ERAint.name, ifelse(rean == JRA55, JRA55.name, NCEP.name))) + +forecast.name <- unname(ifelse(forecast == ECMWF_S4, ECMWF_S4.name, ECMWF_monthly.name)) +fields.name <- unname(ifelse(fields == rean, rean.name, forecast.name)) +n.years <- year.end - year.start + 1 + +# in case the script is run with one argument, it is assumed a reanalysis is being used and the argument becomes the month we want to perform the clustering; +# in case the script is run with two arguments, it is assumed a forecast is used and the first argument represents the startmonth and the second one the leadmonth. +# in case the script is run with no arguments, the values of the variables inside the script are used: +script.arg <- as.integer(commandArgs(TRUE)) + +if(length(script.arg) == 0){ + start.month <- startM + #WR.period <- start.month + if(fields.name == forecast.name) lead.month <- leadM +} + +# in case the script is run with 1 argument, it is assumed you are using a reanalysis: +if(length(script.arg) == 1){ + fields <- rean + fields.name <- rean.name + WR.period <- script.arg[1] +} + +if(length(script.arg) >= 2){ + fields <- forecast + fields.name <- forecast.name + start.month <- script.arg[1] + lead.month <- script.arg[2] + WR.period <- start.month +} + +if(length(script.arg) == 3){ # in this case, we are running the script in the SMP machine and we need to override the variables below: + source('/gpfs/projects/bsc32/bsc32842/Rfunctions.R') # for the calendar function + workdir <- "/gpfs/projects/bsc32/bsc32842/RESILIENCE" # working dir +} + +if(length(script.arg) >= 2) {lead.comment <- paste0("Lead month: ", lead.month)} else {lead.comment <- ""} +cat(paste0("Field: ", fields.name, " Period: ", WR.period, " ", lead.comment, "\n")) + +if(lat.min >= lat.max) stop("lat.min cannot be greater than lat.max") + +#days.period <- n.days.period <- period.length <- list() +#for (pp in 1:17){ +# days.period[[pp]] <- NA +# for(y in year.start:year.end) days.period[[pp]] <- c(days.period[[pp]], n.days.in.a.future.year(year.start, y) + pos.period(y,pp)) +# days.period[[pp]] <- days.period[[pp]][-1] # remove the NA at the begin that was introduced only to be able to execute the above command +# # number of days belonging to that period from year.start to year.end: +# n.days.period[[pp]] <- length(days.period[[pp]]) +# # Number of days belonging to that period in a single year: +# period.length[[pp]] <- n.days.in.a.period(pp,1999) #+ ifelse(period==13 | period==2, 1, 0) # using year 1999 introduce a small error in winter season length because of #bisestile years +#} + +# Create a regular lat/lon grid to interpolate the data to the chosen res: (Notice that the regular grid is always centered at lat=0 and lon=0!) +n.lon.grid <- 360/res +n.lat.grid <- (180/res)+1 +my.grid <- paste0('r',n.lon.grid,'x',n.lat.grid) +#domain<-list() +#domain$lon <- seq(0,359.999999999,res) +#domain$lat <- c(rev(seq(0,90,res)),seq(res[1],-90,-res)) + +cat("Loading lat/lon. Please wait...\n") +# load only 1 day of pressure data to detect the minimum and maximum lat and lon values which identify only the North Atlantic area: +if(fields.name == rean.name) domain <- Load(var = psl, exp = NULL, obs = list(list(path=fields)), '19990101', storefreq = 'daily', leadtimemax = 1, output = 'lonlat', nprocs=1) + +### Real test: +### lat <- +### domain <- Load(var = "psl", exp = list(list(path="/esnas/exp/ecmwf/system4_m1/$STORE_FREQ$_mean/$VAR_NAME$_f6h/$VAR_NAME$_$START_DATE$.nc")), obs=NULL, sdates=paste0(1982:2011,'0101'), storefreq = 'daily', leadtimemax=n.leadtimes, nmember=n.members, output = 'lonlat', latmin = lat[chunk], latmax = lat[chunk+2], nprocs=1) + +### if (fields.name="pippo"){ + +# in the case of S4, we also interpolate it to the same resolution of the reanalysis: +# (notice that if the S4 grid has the same grid of the chosen grid (typically ERA-Interim), Load() leaves the S4 grid unchanged) +if(fields.name == ECMWF_S4.name) domain <- Load(var = psl, exp = list(list(path=fields)), obs=NULL, sdates=paste0(year.start,'0101'), storefreq = 'daily', dimnames=list(member=member.name), leadtimemax=1, nmember=1, output = 'lonlat', grid=my.grid, method='bilinear', nprocs=1) + +#domain <- Load(var = "psl", exp = list(list(path="/esnas/exp/ecmwf/system4_m1/$STORE_FREQ$_mean/$VAR_NAME$_f6h/$VAR_NAME$_$START_DATE$.nc")), obs=NULL, sdates='19820101', storefreq = 'daily', leadtimemax=1, nmember=1, output = 'lonlat', grid='r480x241', lonmax=100) + +if(fields.name == ECMWF_monthly.name) domain <- Load(var = psl, exp = NULL, obs = list(list(path=fields)), paste0(year.start,'0102'), storefreq = 'daily', leadtimemax = 1, output = 'lonlat') + +n.lon <- length(domain$lon) +n.lat <- length(domain$lat) + +pos.lat.max <- which(lat.max - round(domain$lat,4) >= 0)[1] # select the first latitude of the domain below the maximum latitude +pos.lat.min <- tail(which(round(domain$lat,4) - lat.min >= 0),1) # select the last latitude of the domain over the minumum latitude +pos.lon.max <- tail(which(lon.max - round(domain$lon,4) >= 0),1) +pos.lon.min <- which(round(domain$lon,4) - lon.min >= 0)[1] + +pos.lat <- if(pos.lat.min < pos.lat.max) {pos.lat.min:pos.lat.max} else {pos.lat.max:pos.lat.min} +pos.lon <- c(1:pos.lon.max,pos.lon.min:length(domain$lon)) # beware that it only consider the case where the study area cross the meridian of Greenwhich! +n.pos.lat <- length(pos.lat) +n.pos.lon <- length(pos.lon) + +lat <- domain$lat[pos.lat] # lat values of chosen area only (it is smaller than the whole spatial domain loaded by the data) +#if (lat[2] < lat[1]) lat <- rev(lat) # reverse lat values if they are in decreasing order +lon <- domain$lon[pos.lon] # lon values of chosen area only + +#lat.min.area <- domain$lat[pos.lat.min] # min and max lat/lon of the chosen area only (same as lat.max, but its values correspond to actual values in the lat vector) +#lat.max.area <- domain$lat[pos.lat.max] +#lon.min.area <- domain$lon[pos.lon.min] +#lon.max.area <- domain$lon[pos.lon.max] + +#rm(domain) + +# Load psl data (interpolating it to the same reanlaysis grid, if necessary): +cat("Loading data. Please wait...\n") + +if(fields.name == rean.name){ # Load daily psl data of all years in the reanalysis case: + psleuFull <-array(NA,c(1,1,n.years,365, n.pos.lat, n.pos.lon)) + + #for (y in year.start:year.end){ + # var <- Load(var = psl, exp = NULL, obs = list(list(path=fields)), paste0(y,'0101'), storefreq = 'daily', leadtimemax = n.days.in.a.year(y), output = 'lonlat', + # latmin = lat.min, latmax = lat.max, lonmin = lon.min, lonmax = lon.max, nprocs=1) + # psleuFull[seq.days.in.a.future.year(year.start, y),,] <- var$obs + # rm(var) + # gc() + #} + + psleuFull366 <- Load(var = psl, exp = NULL, obs = list(list(path=fields)), paste0(year.start:year.end,'0101'), storefreq = 'daily', leadtimemax = 366, output = 'lonlat', latmin = lat.min, latmax = lat.max, lonmin = lon.min, lonmax = lon.max, nprocs=8)$obs + + # remove bisestile days to have all arrays with the same dimensions: + cat("Removing bisestile days. Please wait...\n") + psleuFull[,,,,,] <- psleuFull366[,,,1:365,,] + + for(y in year.start:year.end){ + y2 <- y - year.start + 1 + if(n.days.in.a.year(y) == 366) psleuFull[,,y2,60:365,,] <- psleuFull366[,,y2,61:366,,] # take the march to december period removing the 29th of February + } + + rm(psleuFull366) + gc() + + + if(running.cluster == TRUE && rean.name == "ERA-interim" && WR.period == 10) { + # in this case, last month of last year might not have in /esnas the data for the 3rd month of the running cluster. Load() set these values to NA, + # but the clustering analysis is not able to process NA, so we have to remove the NA for that month and replace with the values of the previous year: + + psleuFull[,,36,305:334,,] <- psleuFull[,,35,305:334,,] + + + } + + # convert psl in daily anomalies with the LOESS filter: + cat("Calculating anomalies. Please wait...\n") + pslPeriodClim <- apply(psleuFull, c(1,2,4,5,6), mean, na.rm=T) + + if(LOESS == TRUE){ + pslPeriodClimLoess <- pslPeriodClim + + for(i in n.pos.lat){ + for(j in n.pos.lon){ + my.data <- data.frame(ens.mean=pslPeriodClim[1,1,,i,j], day=1:365) + my.loess <- loess(ens.mean ~ day, my.data, span=0.35) + pslPeriodClimLoess[1,1,,i,j] <- predict(my.loess) + } + } + + rm(pslPeriodClim) + gc() + + pslPeriodClim2 <- InsertDim(pslPeriodClimLoess, 3, n.years) + + } else { # leave the daily climatology to compute the anomalies: + pslPeriodClim2 <- InsertDim(pslPeriodClim, 3, n.years) + + rm(pslPeriodClim) + gc() + } + + ## s4.data <- data.frame(s4=pslPeriodClim[1,], day=1:216) + ## s4.loess <- loess(s4 ~ day, s4.data, span=0.35) + ## s4.pred <- predict(s4.loess) + + psleuFull <- psleuFull - pslPeriodClim2 + rm(pslPeriodClim2) + gc() + + + # Compute climatology and anomalies (with LOESS filter) for sfcWind data, and draw monthly anomalies, if you want to compare the mean daily wind speed anomalies reconstructed + # by the weather regimes classification with those observed by ERA-Interim: + sfcWind366 <- Load(var = "sfcWind", exp = NULL, obs = list(list(path=fields)), paste0(year.start:year.end,'0101'), storefreq = 'daily', leadtimemax = 366, output = 'lonlat', latmin = lat.min, latmax = lat.max, lonmin = lon.min, lonmax = lon.max, nprocs=8)$obs + + sfcWind <- sfcWind366[,,,1:365,,] + + for(y in year.start:year.end){ + y2 <- y - year.start + 1 + if(n.days.in.a.year(y) == 366) sfcWind[y2,60:365,,] <- sfcWind366[1,1,y2,61:366,,] # take the march to december period removing the 29th of February + } + + rm(sfcWind366) + gc() + + sfcWindClimDaily <- apply(sfcWind, c(2,3,4), mean, na.rm=T) + + sfcWindClimLoess <- sfcWindClimDaily + for(i in n.pos.lat){ + for(j in n.pos.lon){ + my.data <- data.frame(ens.mean=sfcWindClimDaily[,i,j], day=1:365) + my.loess <- loess(ens.mean ~ day, my.data, span=0.35) + sfcWindClimLoess[,i,j] <- predict(my.loess) + } + } + + rm(sfcWindClimDaily) + gc() + + sfcWindClim2 <- InsertDim(sfcWindClimLoess, 1, n.years) + + sfcWindAnom <- sfcWind - sfcWindClim2 + + rm(sfcWindClimLoess) + gc() + + year.test <- 2016 # select only anomalies of a chosen month and year, and average them: + month.test <- 11 + + pos.year.start <- year.test - year.start + 1 + + sfcWindAnomPeriod <- sfcWindAnom[pos.year.start,pos.period(1,month.test),,] + + sfcWindAnomPeriodMean <- apply(sfcWindAnomPeriod, c(2,3), mean, na.rm=TRUE) + + EU <- c(1:which(lon >= lon.max)[1], (which(lon >= 337)[1]:length(lon))) # restrict area to continental europe only + + my.brks.var <- seq(-3,3,0.5) + values.to.plot2 <- c(-3,-2.5,-2,-1.5,-1,0,0.5,1,1.5,2,2.5,3) + + my.cols.var <- colorRampPalette(rev(brewer.pal(11,"RdBu")))(length(my.brks.var)-1) # blue--white--red colors + + fileoutput <- paste0(workdir,"/",fields.name,"_",my.period[month.test],"_wind_anomaly.png") + + png(filename=fileoutput,width=1000,height=1000) + + par(fig=c(0, 1, 0.07, 0.98), new=TRUE) + PlotEquiMap(rescale(sfcWindAnomPeriodMean[,EU], my.brks.var[1]+0.001, tail(my.brks.var,1)), lon[EU], lat, filled.continents=FALSE, brks=my.brks.var, cols=my.cols.var, title_scale=1.2, intxlon=10, intylat=10, drawleg=F) #, cex.lab=1.5) + + #my.subset2 <- match(values.to.plot2, my.brks.var) + #legend2.cex <- 1.8 + + par(fig=c(0.03, 1, 0, 0.08), new=TRUE) + ColorBar(my.brks.var, cols=my.cols.var, vert=FALSE, label_scale=1.8) #, subset=my.subset2) + + par(fig=c(0.95, 1, 0, 0.012), new=TRUE) + mtext("m/s", cex=1.8) + + dev.off() + + fileoutput2 <- paste0(workdir,"/",fields.name,"_",my.period[month.test],"_wind_anomaly_fig2cat.png") + + system(paste0("~/scripts/fig2catalog.sh -s 0.8 -c 'Region: North Atlantic (27°N-81°N, 85.5°W-45°E)\nReference dataset: NCEP/NCAR' ", fileoutput," ", fileoutput2)) + + # to format it manually from the bash shell (you must run it from your workstation until the identity command of ImageMagick will be loaded un the cluster): + sh ~/scripts/fig2catalog.sh -x 20 -t 'NCEP / 10-m wind speed / anomaly \nOctober / 2016' -c 'Region: North Atlantic (27°N-81°N, 85.5°W-45°E)\nReference dataset: NCEP/NCAR reanalysis' NCEP_October_wind_anomaly.png NCEP_October_wind_anomaly_catalogue.png + + # expression(~degree~C) + +} # close if on fields.name == rean + +if(fields.name == ECMWF_S4.name){ # in the forecast case, we load only the data for 1 start month at time and all the lead times (since each month needs ~3 GB of memory) + if(start.month <= 9) {start.month.char <- paste0("0",as.character(start.month))} else {start.month.char <- start.month} + + fields2 <- gsub("\\$STORE_FREQ\\$","daily",fields) + fields3 <- gsub("\\$VAR_NAME\\$",psl,fields2) + + if(missing.forecasts == TRUE){ + years <- c() + for(y in year.start:year.end){ + fields4 <- gsub("\\$START_DATE\\$",paste0(y, start.month.char,'01'),fields3) + + char.lead <- nchar(system(paste0("ncks -m ",fields4,"| grep -E -i 'psl size' | cut -d '*' -f1"), intern=T)) + # beware, in this way it can only take into account leadtimes from 100 to 999, but it is the only way in the SMP machine: + #num.lead <- as.integer(substr(system(paste0("ncks -m ",fields4,"| grep -E -i 'psl size' | cut -d '*' -f1"), intern=T),char.lead-3,char.lead)) + num.lead <- as.integer(system(paste0("ncks -m ",fields4,"| grep -E -i 'psl size' | cut -d '=' -f2 | cut -d '*' -f1"), intern=T)) # don't work well on the SMP + num.memb <- as.integer(system(paste0("ncks -m ",fields4,"| grep -E -i 'psl size' | cut -d '=' -f2 | cut -d '*' -f2"), intern=T)) + num.lat <- as.integer(system(paste0("ncks -m ",fields4,"| grep -E -i 'psl size' | cut -d '=' -f2 | cut -d '*' -f3"), intern=T)) + num.lon <- as.integer(system(paste0("ncks -m ",fields4,"| grep -E -i 'psl size' | cut -d '=' -f2 | cut -d '*' -f4"), intern=T)) + + #if(num.lead == n.leadtimes && num.memb >= n.members && num.lat == 181 && num.lon == 360) years <- c(years,y) + if(num.lead == n.leadtimes && num.memb >= n.members) years <- c(years,y) + } + + } else { + years <- year.start:year.end + } + + n.years.full <- length(years) + + if(running.cluster == TRUE) { + + start.month1 <- ifelse(start.month == 1, 12, start.month-1) + if(start.month1 <= 9) {start.month1.char <- paste0("0",as.character(start.month1))} else {start.month1.char <- start.month1} + + start.month2.char <- start.month.char + + start.month3 <- ifelse(start.month == 12, 1, start.month+1) + if(start.month3 <= 9) {start.month3.char <- paste0("0",as.character(start.month3))} else {start.month3.char <- start.month3} + + # load three months of data, inserting all them in the third dimension of the array, one month at time: + psleuFull1 <- Load(var = psl, exp = list(list(path=fields)), obs = NULL, sdates=paste0(years, start.month1.char,'01'), dimnames=list(member=member.name), nmember=n.members, leadtimemax=n.leadtimes, storefreq='daily', output = 'lonlat', latmin = lat.min, latmax = lat.max, lonmin = lon.min, lonmax = lon.max, grid=my.grid, method='bilinear')$mod + gc() + + psleuFull2 <- Load(var = psl, exp = list(list(path=fields)), obs = NULL, sdates=paste0(years, start.month2.char,'01'), dimnames=list(member=member.name), nmember=n.members, leadtimemax=n.leadtimes, storefreq='daily', output = 'lonlat', latmin = lat.min, latmax = lat.max, lonmin = lon.min, lonmax = lon.max, grid=my.grid, method='bilinear')$mod + gc() + + psleuFull3 <- Load(var = psl, exp = list(list(path=fields)), obs = NULL, sdates=paste0(years, start.month3.char,'01'), dimnames=list(member=member.name), nmember=n.members, leadtimemax=n.leadtimes, storefreq='daily', output = 'lonlat', latmin = lat.min, latmax = lat.max, lonmin = lon.min, lonmax = lon.max, grid=my.grid, method='bilinear')$mod + gc() + + } else { + + psleuFull <- Load(var = psl, exp = list(list(path=fields)), obs = NULL, sdates=paste0(years, start.month.char,'01'), dimnames=list(member=member.name), nmember=n.members, leadtimemax=n.leadtimes, storefreq='daily', output = 'lonlat', latmin = lat.min, latmax = lat.max, lonmin = lon.min, lonmax = lon.max, grid=my.grid, method='bilinear', nprocs=1)$mod + + } + + if(LOESS == TRUE){ + # convert psl in daily anomalies with the LOESS filter: + cat("Calculating anomalies. Please wait...\n") + + if(running.cluster == TRUE) { + pslPeriodClim1 <- apply(psleuFull1, c(1,4,5,6), mean, na.rm=T) + pslPeriodClim2 <- apply(psleuFull2, c(1,4,5,6), mean, na.rm=T) + pslPeriodClim3 <- apply(psleuFull3, c(1,4,5,6), mean, na.rm=T) + + pslPeriodClimLoess1 <- pslPeriodClim1 + pslPeriodClimLoess2 <- pslPeriodClim2 + pslPeriodClimLoess3 <- pslPeriodClim3 + + for(i in n.pos.lat){ + for(j in n.pos.lon){ + my.data1 <- data.frame(ens.mean=pslPeriodClim1[1,,i,j], day=1:n.leadtimes) + my.loess1 <- loess(ens.mean ~ day, my.data1, span=0.35) + pslPeriodClimLoess1[1,,i,j] <- predict(my.loess1) + rm(my.data1, my.loess1) + gc() + + my.data2 <- data.frame(ens.mean=pslPeriodClim2[1,,i,j], day=1:n.leadtimes) + my.loess2 <- loess(ens.mean ~ day, my.data2, span=0.35) + pslPeriodClimLoess2[1,,i,j] <- predict(my.loess2) + rm(my.data2, my.loess2) + gc() + + my.data3 <- data.frame(ens.mean=pslPeriodClim3[1,,i,j], day=1:n.leadtimes) + my.loess3 <- loess(ens.mean ~ day, my.data3, span=0.35) + pslPeriodClimLoess3[1,,i,j] <- predict(my.loess3) + rm(my.data3, my.loess3) + gc() + } + } + + rm(pslPeriodClim1, pslPeriodClim2, pslPeriodClim3) + gc() + + pslPeriodClimDos1 <- InsertDim(InsertDim(pslPeriodClimLoess1, 2, n.years.full), 2, n.members) + pslPeriodClimDos2 <- InsertDim(InsertDim(pslPeriodClimLoess2, 2, n.years.full), 2, n.members) + pslPeriodClimDos3 <- InsertDim(InsertDim(pslPeriodClimLoess3, 2, n.years.full), 2, n.members) + + pslPeriodClimDos <- unname(abind(pslPeriodClimDos1, pslPeriodClimDos2, pslPeriodClimDos3, along=3)) + rm(pslPeriodClimDos1, pslPeriodClimDos2, pslPeriodClimDos3) + gc() + + psleuFull <- unname(abind(psleuFull1, psleuFull2, psleuFull3, along=3)) + rm(psleuFull1, psleuFull2, psleuFull3) + gc() + + } else { # in case of no running cluster: + + pslPeriodClim <- apply(psleuFull, c(1,4,5,6), mean, na.rm=T) + + pslPeriodClimLoess <- pslPeriodClim + for(i in n.pos.lat){ + for(j in n.pos.lon){ + my.data <- data.frame(ens.mean=pslPeriodClim[1,,i,j], day=1:n.leadtimes) + my.loess <- loess(ens.mean ~ day, my.data, span=0.35) + pslPeriodClimLoess[1,,i,j] <- predict(my.loess) + } + } + + rm(pslPeriodClim) + gc() + + pslPeriodClimDos <- InsertDim(InsertDim(pslPeriodClimLoess, 2, n.years.full), 2, n.members) + + } # close if on running cluster + + } else { #in case of no LOESS: + + if(running.cluster == TRUE) { + # in this case, the climatology is measured FOR EACH MONTH INDIPENDENTLY, instead of using a seasonal value: + pslPeriodClim1 <- apply(psleuFull1, c(1,4,5,6), mean, na.rm=T) + pslPeriodClim2 <- apply(psleuFull2, c(1,4,5,6), mean, na.rm=T) + pslPeriodClim3 <- apply(psleuFull3, c(1,4,5,6), mean, na.rm=T) + + pslPeriodClim <- unname(abind(pslPeriodClim1, pslPeriodClim2, pslPeriodClim3)) + + pslPeriodClimDos <- InsertDim(InsertDim(pslPeriodClim, 2, n.years.full), 2, n.members) + rm(pslPeriodClim) + gc() + + psleuFull <- unname(abind(psleuFull1, psleuFull2, psleuFull3, along=3)) + rm(psleuFull1, psleuFull2, psleuFull3) + gc() + + } else {# in case of no running cluster: + + pslPeriodClimDos <- InsertDim(InsertDim(pslPeriodClim, 2, n.years.full), 2, n.members) + rm(pslPeriodClim) + gc() + } + } + + ## s4.data <- data.frame(s4=pslPeriodClim[1,], day=1:216) + ## s4.loess <- loess(s4 ~ day, s4.data, span=0.35) + ## s4.pred <- predict(s4.loess) + + psleuFull <- psleuFull - pslPeriodClimDos + rm(pslPeriodClimDos) + gc() + +} # close if on fields.name=forecasts.name + +if(fields.name == ECMWF_monthly.name){ + # Load 6-hourly psl data in the forecast case: + psleuFull <-array(NA, c(n.sdates*n.years, n.leadtimes, n.pos.lat, n.pos.lon)) # daily order: 19940102 19950102 ... 20130102 19940109 19950109 ... 20130109 ... + n.leadtimes <- length(leadtime) + sdates <- weekly.seq(forecast.year,mes,day) + n.sdates <- length(sdates) + + for (startdate in 1:n.sdates){ + for(lday in leadtime){ + var <- Load(var = psl, exp = NULL, obs = list(list(path=fields)), paste0(year.start:year.end, substr(sdates[startdate],5,6), substr(sdates[startdate],7,8) ), storefreq = 'daily', leadtimemin = lday*4-3, leadtimemax = lday*4, output = 'lonlat', latmin = lat.min, latmax = lat.max, lonmin = lon.min, lonmax = lon.max) + + mean.lday <- apply(var$obs, c(3,5,6), mean, na.rm=T) + rm(var) + gc() + + psleuFull[(1+(startdate-1)*n.years):(startdate*n.years), lday-leadtime[1]+1,,] <- mean.lday + rm(mean.lday) + gc() + } + } +} + +if(psl=="g500") psleuFull <- psleuFull/9.81 # convert geopotential to geopotential height (in m) +if(psl=="psl") psleuFull <- psleuFull/100 # convert MSLP in Pascal to MSLP in hPa +gc() + +#save.image(file=paste0(workdir,"/weather_regimes_",fields.name,"_",year.start,"-",year.end,".RData")) +#load(file=paste0(workdir,"/weather_regimes_",fields.name,"_",year.start,"-",year.end,".RData")) + +#save.image("/scratch/Earth/ncortesi/RESILIENCE/Regimes/test.R") + +# compute the cluster analysis: +my.PCA <- list() +my.cluster <- list() +my.cluster.array <- list() +tot.variance <- list() +n.pcs <- my.cluster <- c() + +#WR.period=1 # for the debug +for(p in WR.period){ + # Select only the data of the month/season we want: + if(fields.name == rean.name){ + + #pslPeriod <- psleuFull[days.period[[p]],,] # select only days in the chosen period (i.e: winter) + if(running.cluster == TRUE) { + pslPeriod <- psleuFull[1,1,,pos.month.extended(2001,p),,] # select all days in the period of 3 months centered on the target month p + } else { + pslPeriod <- psleuFull[1,1,,pos.period(2001,p),,] # select only days in the chosen period (i.e: winter) + } + + # weight the pressure fields based on latitude (apply it to anomalies, not to absolute values!): + if(lat.weighting){ + lat.weighted <- cos(lat*pi/180)^0.5 + lat.weighted.array <- InsertDim(InsertDim(InsertDim(lat.weighted,2,n.pos.lon), 1, length(pos.month.extended(2001,p))), 1, n.years) + pslPeriod <- pslPeriod * lat.weighted.array + rm(lat.weighted.array) + gc() + } + + gc() + cat("Preformatting data for clustering. Please wait...\n") + psl.melted <- melt(pslPeriod[,,,, drop=FALSE], varnames=c("Year","Day","Lat","Lon")) + gc() + + cat("Preformatting data for clustering. Please wait......\n") + # this function eats much memory!!! + psl.kmeans <- unname(acast(psl.melted, Year + Day ~ Lat + Lon)) + #gc() + + + # select period data from psleuFull to use it as input of the cluster analysis: + #psl.kmeans <- pslPeriod + #dim(psl.kmeans) <- c(n.days.period[[p]], n.pos.lat*n.pos.lon) # convert array in a matrix! + #rm(pslPeriod, pslPeriod.weighted) + } + + # in case of S4 we don't need to select anything because we already loaded only 1 month of data! + if(fields.name == ECMWF_S4.name){ + if(running.cluster == TRUE && p < 13) { + # if you want to fully implement the running cluster of the monthly S4 data, you have to finish selecting automatically the 3-months running period + # generalizing the command below (which at present only work for the month of January an lead time 0): + + # Select DJF (lead time 0) for our case study, excluding 29 of february): + pslPeriod <- psleuFull[,,,1:90,,, drop=FALSE] + + } else { + + # select data only for the startmonth and leadmonth to study: + cat("Extracting data. Please wait...\n") + chosen.month <- start.month + lead.month # find which month we want to select data + if(chosen.month > 12) chosen.month <- chosen.month - 12 + + # remove the 29th of February to have the same n. of elements for all years + i=0 + for(y in years){ + i=i+1 + leadtime.min <- 1 + n.days.in.a.monthly.period(start.month, chosen.month, y) - n.days.in.a.month(chosen.month, y) + leadtime.max <- leadtime.min + n.days.in.a.month(chosen.month, y) - 1 + #leadtime.max <- 61 + + if (n.days.in.a.month(chosen.month, y) == 29) leadtime.max <- leadtime.max - 1 + int.leadtimes <- leadtime.min:leadtime.max + num.leadtimes <- leadtime.max - leadtime.min + 1 # number of days in the chosen month (different from 'n.leadtimes',which is the total number of days in the file!) + # by construction it is equal to: n.days.in.a.month(chosen.month, y) + + if(y == years[1]) { + pslPeriod <- psleuFull[,,1,int.leadtimes,,,drop=FALSE] + } else { + pslPeriod <- unname(abind(pslPeriod, psleuFull[,,i,int.leadtimes,,,drop=FALSE], along=3)) + } + } + + } + + # weight the pressure fields based on latitude (apply it to anomalies, not to absolute values!): + if(lat.weighting){ + lat.weighted <- cos(lat*pi/180)^0.5 + lat.weighted.array <- InsertDim(InsertDim(InsertDim(lat.weighted,2,n.pos.lon), 1, length(pos.month.extended(2001,p))), 1, n.years) + pslPeriod <- pslPeriod * lat.weighted.array ######### adapt!!!!!!!!!!! + rm(lat.weighted.array) + gc() + } + + # note that the data order in psl.kmeans[,X] is: year1day1, year1day2, ... , year1day31, year2day1, year2day2, ... , year2day28 + gc() + cat("Preformatting data. Please wait...\n") + psl.melted <- melt(pslPeriod[1,,,,,, drop=FALSE], varnames=c("Exp","Member","StartYear","LeadDay","Lat","Lon")) + gc() + + #save(psl.melted,file=paste0(workdir,"/psl_melted.RData")) + + cat("Preformatting data. Please wait......\n") + # This function eats much memory!!! + psl.kmeans <- unname(acast(psl.melted, Member + StartYear + LeadDay ~ Lat + Lon)) + #gc() + + #save(psl.kmeans,file=paste0(workdir,"/psl_kmeans.RData")) + #my.cluster <- kmeans(psl.kmeans, centers=4, iter.max=100, nstart=30, trace=FALSE) + #save(my.cluster,file=paste0(workdir,"/my_cluster.RData")) + + + #psl.kmeans <- array(NA,c(dim(pslPeriod))) + #n.rows <- nrow(psl.melted) + #a <- psl.melted$Member + #b <- psl.melted$StartYear + #c <- psl.melted$LeadDay + #d <- psl.melted$Lat + #e <- psl.melted$Lon + #f <- psl.melted$value + + # this function to convert a data.frame in an array is much less memory-eater than acast but a bit slower: + #for(i in 1:n.rows) psl.kmeans[1,a[i],b[i],c[i],d[i],e[i]] <- f[i] + gc() + #rm(pslPeriod); gc() + } + + if(fields.name == ECMWF_monthly.name){ + my.startdates.period <- months.period(forecast.year,mes,day,p) # get which startdates belong to the chosen period (remember that there are no bisestile day left!) + + days.period <- list() # get which days inside psleuFull belong to the chosen period + for(startdate in my.startdates.period){ + days.period[[p]] <- c(days.period[[p]], (1+(startdate-1)*n.years):(startdate*n.years)) + } + + # in winter you must remove the 2nd startdate (20140109) because its data is bad, as you can see with: plot(psl.kmeans[,1:2]) + # if(fields.name == forecast.name && period == 13) psl.kmeans[21:40,] <- NA + } + + + # compute the clustering: + cat("Clustering data. Please wait...\n") + if(PCA){ + my.seq <- seq(1, n.pos.lat*n.pos.lon, 16) # select only 1 point of 9 + pslcut <- psl.kmeans[,my.seq] + + my.PCA <- princomp(pslcut,cor=FALSE) + tot.variance <- head(cumsum(my.PCA$sdev^2/sum(my.PCA$sdev^2)),50) # check how many PCAs to keep basing on the sum of their expl. variance + n.pcs <- head(as.numeric(which(tot.variance > variance.explained)),1) # select only the pcs that explains at least 80% of variance + my.cluster <- kmeans(my.PCA$scores[,1:n.pcs], centers=4, iter.max=100, nstart=30) # 4 is the number of clusters + rm(pslcut) + } else { # 4 is the number of clusters: + + my.cluster <- kmeans(psl.kmeans, centers=4, iter.max=100, nstart=30, trace=FALSE) + + gc() + + #save(my.cluster, file=paste0(workdir,"/",fields.name,"_",my.period[p],"_leadmonth_",lead.month,"_series.RData")) + + #if(fields.name == rean.name){ + #my.cluster[[p]] <- kmeans(psl.kmeans[which(!is.na(psl.kmeans[,1])),], centers=4, iter.max=100, nstart=30, trace=FALSE) + # save the time series output of the cluster analysis: + #save(my.cluster, my.cluster.array, my.PCA, tot.variance, n.pcs, file=paste0(workdir,"/",fields.name,"_",my.period[p],"_series.RData")) + #} + + if(fields.name == ECMWF_S4.name) { + my.cluster.array <- array(my.cluster$cluster, c(num.leadtimes, n.years.full, n.members)) # in reverse order compared to the definition of psl.kmeans + + # save the time series output of the cluster analysis: + #save(my.cluster, my.cluster.array, my.PCA, tot.variance, n.pcs, file=paste0(workdir,"/",fields.name,"_",my.period[p],"_leadmonth_",lead.month,"_series.RData")) + + #plot(SMA(my.cluster$centers[1,],201),type="l",col="gold",ylim=c(-20,20));lines(SMA(my.cluster$centers[2,],201),type="l",col="red"); lines(SMA(my.cluster$centers[3,],201),type="l",col="green");lines(SMA(my.cluster$centers[4,],201),type="l",col="blue");lines(SMA(psl.kmeans[29,],201),type="l",col="gray");lines(rep(0,14410),type="l",col="black",lty=2) + } + } + + rm(psl.kmeans) + gc() + +#} # close for on 'p' + +#save.image(file=paste0(workdir,"/weather_regimes_",fields.name,"_",year.start,"-",year.end,".RData")) +#load(file=paste0(workdir,"/weather_regimes_",fields.name,"_",year.start,"-",year.end,".RData")) + +#my.grid<-paste0('r',n.lon,'x',n.lat) +#coord <- Load(var = 'sfcWind', exp = list(ECMWF_monthly), obs = NULL, sdates=paste0(2013,'0102'), leadtimemin = 1, leadtimemax=1, output = 'lonlat', nprocs=1, grid=my.grid, method='bilinear') + +# measure regime anomalies: + +#for(p in WR.period){ # 1-12: Jan-Dec; 13-16: Winter-Spring-Summer-Autumn; 17: Year + + if(fields.name == rean.name){ + + if(running.cluster == TRUE && p < 13){ # select only the days of the 3-months cluster series inside the target month p + n.days.period <- length(pos.month.extended(2001,p)) # total number of days in the three months + + days.month <- days.month.new <- days.month.full <- c() + days.month <- which(pos.month.extended(2001,p) == pos.month(2001,p)[1])-1 + 1:length(pos.month(2001,p)) + + for(y in 1:n.years){ + days.month.new <- days.month + n.days.period*(y-1) + days.month.full <- c(days.month.full, days.month.new) + #print(days.month.new) + #print(days.month) + } + + cluster.sequence <- my.cluster$cluster[days.month.full] # select only the days of the cluster series inside the target month p + + } else { + cluster.sequence <- my.cluster$cluster + } + + if(sequences){ # it works only for rean data!!! + # for each of the 4 clusters, remove the days not belonging to sequences of at least 5 days: + # warning: first 4 days and last 4 days are not take into account y the algorithm and are treated as not belonging to any sequence (sequ=FALSE) + wrdiff <- diff(my.cluster$cluster) + + sequ <- rep(FALSE, n.days.period[[p]]) + for(i in 5:(n.days.period[[p]]-5)){ + sequ[i] <- TRUE + if(wrdiff[i] != 0 & wrdiff[i+1] != 0) sequ[i] = FALSE + if(wrdiff[i] != 0 & wrdiff[i+2] != 0) sequ[i] = FALSE + if(wrdiff[i] != 0 & wrdiff[i+3] != 0) sequ[i] = FALSE + if(wrdiff[i] != 0 & wrdiff[i+4] != 0) sequ[i] = FALSE + if(wrdiff[i-1] != 0 & wrdiff[i] != 0) sequ[i] = FALSE + if(wrdiff[i-1] != 0 & wrdiff[i+1] != 0) sequ[i] = FALSE + if(wrdiff[i-1] != 0 & wrdiff[i+2] != 0) sequ[i] = FALSE + if(wrdiff[i-1] != 0 & wrdiff[i+3] != 0) sequ[i] = FALSE + if(wrdiff[i-2] != 0 & wrdiff[i] != 0) sequ[i] = FALSE + if(wrdiff[i-2] != 0 & wrdiff[i+1] != 0) sequ[i] = FALSE + if(wrdiff[i-2] != 0 & wrdiff[i+2] != 0) sequ[i] = FALSE + if(wrdiff[i-3] != 0 & wrdiff[i] != 0) sequ[i] = FALSE + if(wrdiff[i-3] != 0 & wrdiff[i+1] != 0) sequ[i] = FALSE + if(wrdiff[i-4] != 0 & wrdiff[i] != 0) sequ[i] = FALSE + } # close for loop on i + + # sequence of daily cluster numbers without the days that doesn't belong to sequences of 5 or more days: + cluster.sequence <- my.cluster$cluster * sequ + rm(wrdiff, sequ) + } + + # Replace the days belonging to a regime with the days belonging to a sequence of at least 5 days of that regime: + wr1 <- which(cluster.sequence == 1) + wr2 <- which(cluster.sequence == 2) + wr3 <- which(cluster.sequence == 3) + wr4 <- which(cluster.sequence == 4) + + # yearly frequency of each regime: + wr1y <- wr2y <- wr3y <-wr4y <- c() + np <- n.days.in.a.period(p,2001) + + for(y in year.start:year.end){ + # for both reanalysis and S4, the time order is always: year1day1, year1day2, ..., year1day31, year2day1, year2day2, ..., year2day28, etc, + wr1y[y-year.start+1] <- length(which(cluster.sequence[(y-year.start)*np + (1:np)] == 1)) + wr2y[y-year.start+1] <- length(which(cluster.sequence[(y-year.start)*np + (1:np)] == 2)) + wr3y[y-year.start+1] <- length(which(cluster.sequence[(y-year.start)*np + (1:np)] == 3)) + wr4y[y-year.start+1] <- length(which(cluster.sequence[(y-year.start)*np + (1:np)] == 4)) + } + + # convert to frequencies in %: + wr1y <- wr1y/np + wr2y <- wr2y/np + wr3y <- wr3y/np + wr4y <- wr4y/np + + gc() + + pslmat <- unname(acast(psl.melted, Year + Day ~ Lat ~ Lon)) + + if(running.cluster == TRUE){ + pslmat.new <- pslmat + pslmat <- pslmat.new[days.month.full,,] + gc() + } + + pslwr1 <- pslmat[wr1,,, drop=F] + pslwr2 <- pslmat[wr2,,, drop=F] + pslwr3 <- pslmat[wr3,,, drop=F] + pslwr4 <- pslmat[wr4,,, drop=F] + + pslwr1mean <- apply(pslwr1, c(2,3), mean, na.rm=T) + pslwr2mean <- apply(pslwr2, c(2,3), mean, na.rm=T) + pslwr3mean <- apply(pslwr3, c(2,3), mean, na.rm=T) + pslwr4mean <- apply(pslwr4, c(2,3), mean, na.rm=T) + + # spatial mean for each day to save for the meaure of the FairRPSS: + pslwr1spmean <- apply(pslwr1, 1, mean, na.rm=T) + pslwr2spmean <- apply(pslwr2, 1, mean, na.rm=T) + pslwr3spmean <- apply(pslwr3, 1, mean, na.rm=T) + pslwr4spmean <- apply(pslwr4, 1, mean, na.rm=T) + + # save all the data necessary to redraw the graphs when we know the right regime: + save(my.cluster, lon, lon.max, lon.min, lat, lat.max, lat.min, pos.lat, pos.lon, n.pos.lat, n.pos.lon, psl, psl.name, my.period, p, workdir, rean.name, fields.name, year.start, year.end, n.years, WR.period, pslwr1mean, pslwr2mean, pslwr3mean, pslwr4mean, pslwr1spmean, pslwr2spmean, pslwr3spmean, pslwr4spmean, wr1y,wr2y,wr3y,wr4y,file=paste0(workdir,"/",fields.name,"_",my.period[p],"_psl.RData")) + + # immediatly save the plots of the ERA-Interim monthly regime anomalies with the running cluster instead than loading them in the next script: + EU <- c(1:which(lon >= lon.max)[1], (which(lon >= 337)[1]:length(lon))) # restrict area to continental europe only + if(psl == "g500"){ + my.brks <- c(-300,seq(-200,200,10),300) # % Mean anomaly of a WR + my.brks2 <- c(-300,seq(-200,200,10),300) # % Mean anomaly of a WR for the contour lines + values.to.plot <- c(-200, -100, 0, 100, 200) # values we want to show in the labels + } + + if(psl == "psl"){ + my.brks <- c(seq(-19,-1,2),0,seq(1,19,2)) # % Mean anomaly of a WR + my.brks2 <- my.brks #c(seq(-20,20,2)) # % Mean anomaly of a WR for the contour lines + values.to.plot <- my.brks #c(-17,-15,-13,-11,-9,-7,-5,-3,-1,0,1,3,5,7,9,11,13,15,17) + } + my.cols <- colorRampPalette(rev(brewer.pal(11,"RdBu")))(length(my.brks)-1) # blue--white--red colors + my.cols[floor(length(my.cols)/2)] <- my.cols[floor(length(my.cols)/2)+1] <- "white" + + map1 <- pslwr1mean + map2 <- pslwr2mean + map3 <- pslwr3mean + map4 <- pslwr4mean + png(filename=paste0(workdir,"/",fields.name,"_",my.period[p],"_cluster1.png"),width=1000,height=1200) + PlotEquiMap2(rescale(map1,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map1), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, xlabel.dist=1.5, contours.lty="F1FF1F") + dev.off() + png(filename=paste0(workdir,"/",fields.name,"_",my.period[p],"_cluster2.png"),width=1000,height=1200) + PlotEquiMap2(rescale(map2,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map2), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F") + dev.off() + png(filename=paste0(workdir,"/",fields.name,"_",my.period[p],"_cluster3.png"),width=1000,height=1200) + PlotEquiMap2(rescale(map3,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map3), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F") + dev.off() + png(filename=paste0(workdir,"/",fields.name,"_",my.period[p],"_cluster4.png"),width=1000,height=1200) + PlotEquiMap2(rescale(map4,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map4), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F") + dev.off() + + rm(pslwr1mean,pslwr2mean,pslwr3mean,pslwr4mean) + rm(pslwr1,pslwr2,pslwr3,pslwr4) + gc() + + } + + if(fields.name == ECMWF_S4.name){ + cat("Computing regime anomalies and frequencies. Please wait...\n") + + #load(file=paste0(workdir,"/",fields.name,"_",my.period[p],"_leadmonth_",lead.month,"_ClusterData.RData")) + + cluster.sequence <- my.cluster$cluster #as.vector(my.cluster.array) + + wr1 <- which(cluster.sequence == 1) + wr2 <- which(cluster.sequence == 2) + wr3 <- which(cluster.sequence == 3) + wr4 <- which(cluster.sequence == 4) + + wr1y <- apply(my.cluster.array == 1, 2, sum) + wr2y <- apply(my.cluster.array == 2, 2, sum) + wr3y <- apply(my.cluster.array == 3, 2, sum) + wr4y <- apply(my.cluster.array == 4, 2, sum) + + # convert to frequencies in %: + wr1y <- wr1y/(num.leadtimes*n.members) # in this case, n.leadtimes is the number of days of one month of the cluser time series + wr2y <- wr2y/(num.leadtimes*n.members) + wr3y <- wr3y/(num.leadtimes*n.members) + wr4y <- wr4y/(num.leadtimes*n.members) + + # same as above, but to measure the maximum frequence between all the members: + wr1yMax <- apply(apply(my.cluster.array == 1, c(2,3), sum),1,max) + wr2yMax <- apply(apply(my.cluster.array == 2, c(2,3), sum),1,max) + wr3yMax <- apply(apply(my.cluster.array == 3, c(2,3), sum),1,max) + wr4yMax <- apply(apply(my.cluster.array == 4, c(2,3), sum),1,max) + + wr1yMax <- wr1yMax/num.leadtimes + wr2yMax <- wr2yMax/num.leadtimes + wr3yMax <- wr3yMax/num.leadtimes + wr4yMax <- wr4yMax/num.leadtimes + + wr1yMin <- apply(apply(my.cluster.array == 1, c(2,3), sum),1,min) + wr2yMin <- apply(apply(my.cluster.array == 2, c(2,3), sum),1,min) + wr3yMin <- apply(apply(my.cluster.array == 3, c(2,3), sum),1,min) + wr4yMin <- apply(apply(my.cluster.array == 4, c(2,3), sum),1,min) + + wr1yMin <- wr1yMin/num.leadtimes + wr2yMin <- wr2yMin/num.leadtimes + wr3yMin <- wr3yMin/num.leadtimes + wr4yMin <- wr4yMin/num.leadtimes + + cat("Computing regime anomalies and frequencies. Please wait......\n") + + # in this case, must use psl.melted instead of psleuFull. Both are already in anomalies: + # (psl.melted depends on the startdate and leadtime, psleuFull no) + gc() + pslmat <- unname(acast(psl.melted, Member + StartYear + LeadDay ~ Lat ~ Lon)) + #pslmat <- unname(acast(melt(pslPeriod[1,1:2,,,,, drop=FALSE],varnames=c("Exp","Member","StartYear","LeadDay","Lat","Lon")), Member ~ StartYear + LeadDay ~ Lat ~ Lon)) + gc() + + #for(a in 1:2){ + # for(b in 1:3){ + # for(c in 1:4){ + # pslmat[1, a + (b-1)*a + (c-1)*a*b,,] <- pslPeriod[1,a,b,c,,] + # } + # } + #} + + pslwr1 <- pslmat[wr1,,, drop=F] + pslwr2 <- pslmat[wr2,,, drop=F] + pslwr3 <- pslmat[wr3,,, drop=F] + pslwr4 <- pslmat[wr4,,, drop=F] + + pslwr1mean <- apply(pslwr1, c(2,3), mean, na.rm=T) + pslwr2mean <- apply(pslwr2, c(2,3), mean, na.rm=T) + pslwr3mean <- apply(pslwr3, c(2,3), mean, na.rm=T) + pslwr4mean <- apply(pslwr4, c(2,3), mean, na.rm=T) + + # spatial mean for each day to save for the meaure of the FairRPSS: + pslwr1spmean <- apply(pslwr1, 1, mean, na.rm=T) + pslwr2spmean <- apply(pslwr2, 1, mean, na.rm=T) + pslwr3spmean <- apply(pslwr3, 1, mean, na.rm=T) + pslwr4spmean <- apply(pslwr4, 1, mean, na.rm=T) + + rm(pslmat) + gc() + + # save all the data necessary to draw the graphs (but not the impact maps) + save(my.cluster, my.cluster.array, lon, lon.max, lat, pos.lat, pos.lon, n.pos.lat, n.pos.lon, psl, psl.name, my.period, p, workdir,rean.name,fields.name, year.start,year.end, years, n.years, n.years.full, WR.period, pslwr1mean, pslwr2mean, pslwr3mean, pslwr4mean, pslwr1spmean, pslwr2spmean, pslwr3spmean, pslwr4spmean, wr1y, wr2y, wr3y, wr4y, wr1yMax, wr2yMax, wr3yMax, wr4yMax, wr1yMin, wr2yMin, wr3yMin, wr4yMin, n.leadtimes, num.leadtimes, file=paste0(workdir,"/",fields.name,"_",my.period[p],"_leadmonth_",lead.month,"_psl.RData")) + + rm(pslwr1mean,pslwr2mean,pslwr3mean,pslwr4mean) + rm(pslwr1,pslwr2,pslwr3,pslwr4) + gc() + + } + + # for the monthly system, the time order is always: year1day1, year2day1, ... , yearNday1, year1day2, year2day2, ..., yearNday2, etc.: + if(fields.name == ECMWF_monthly.name) { + if(fields.name == ECMWF_monthly.name){ + my.startdates.period <- months.period(forecast.year,mes,day,p) + + #if(p == 13) my.startdates.period <- my.startdates.period[-2] # remove the second startdate because it is broken + + days.period <- period.length <- list() + for(startdate in my.startdates.period){ + days.period[[p]] <- c(days.period[[p]], (1+(startdate-1)*(year.end-year.start+1)):(startdate*(year.end-year.start+1))) + } + period.length[[p]] <- length(my.startdates.period) # number of Thusdays in the period + } + + wr1y <- wr2y <- wr3y <-wr4y <- c() + wr1y[y-year.start+1] <- length(which(cluster.sequence[seq((y-year.start+1),length(cluster.sequence), n.years)] == 1)) + wr2y[y-year.start+1] <- length(which(cluster.sequence[seq((y-year.start+1),length(cluster.sequence), n.years)] == 2)) + wr3y[y-year.start+1] <- length(which(cluster.sequence[seq((y-year.start+1),length(cluster.sequence), n.years)] == 3)) + wr4y[y-year.start+1] <- length(which(cluster.sequence[seq((y-year.start+1),length(cluster.sequence), n.years)] == 4)) + } + + cat("Finished!\n") +} # close the for loop on 'p' + + + + + + + + + + + + + + + + + + + + + diff --git a/old/weather_regimes_v39.R~ b/old/weather_regimes_v39.R~ new file mode 100644 index 0000000000000000000000000000000000000000..99b8b8c7a9777b076aae62ebe4994bca56036e1b --- /dev/null +++ b/old/weather_regimes_v39.R~ @@ -0,0 +1,1080 @@ + +# Creation: 6/2016 +# Author: Nicola Cortesi +# Aim: To compute the four Euro-Atlantic weather regimes from a reanalysis or from a forecast system +# +# I/O: input must be in a format compatible with Load(); 1 output .RData file is created in the 'workdir' folder. +# You can also run this script from terminal with: Rscript ~/scripts/weather_regimes_maps.R +# +# Assumption: its output are needed by the scripts weather_regimes_impact.R and weather_regimes.R +# +# Branch: weather_regimes + +rm(list=ls()) +library(s2dverification) # for the function Load() +library(abind) +library(reshape2) # after each update of s2dverification, it's better to remove and install this package again because sometimes it gets broken! + +SMP <- FALSE # if TRUE, the script is assumed to run on the SMP Machine + +if(SMP){ + source('/gpfs/projects/bsc32/bsc32842/Rfunctions.R') + workdir <- "/gpfs/projects/bsc32/bsc32842/RESILIENCE" # working dir where output files will be generated +} else { + source('/home/Earth/ncortesi/scripts/Rfunctions.R') + workdir <- "/scratch/Earth/ncortesi/RESILIENCE/Regimes" # working dir where output files will be generated +} + +# module load NCO # : load it from the terminal before running this script interactively in the SMP machine, if you didn't load it in the .bashrc + +# available reanalysis for the geopotential (g500) or the geopotential height (z500 = g500/9.81) and for var data: +ERAint <- '/esnas/recon/ecmwf/erainterim/$STORE_FREQ$_mean/$VAR_NAME$_f6h/$VAR_NAME$_$YEAR$$MONTH$.nc' +#ERAint <- '/esarchive/old-files/recon_ecmwf_erainterim/$STORE_FREQ$_mean/$VAR_NAME$_f6h/$VAR_NAME$_$YEAR$$MONTH$.nc' +ERAint <- '/esnas/recon/ecmwf/erainterim/6hourly/$VAR_NAME$/$VAR_NAME$_$YEAR$$MONTH$.nc' # subdaily data!!!!! +ERAint.name <- "ERA-Interim" + +JRA55 <- '/esnas/recon/jma/jra55/$STORE_FREQ$_mean/$VAR_NAME$_f6h/$VAR_NAME$_$YEAR$$MONTH$.nc' +JRA55.name <- "JRA-55" + +NCEP <- '/esnas/recon/noaa/ncep-reanalysis/$STORE_FREQ$_mean/$VAR_NAME$_f6h/$VAR_NAME$_$YEAR$$MONTH$.nc' +NCEP.name <- "NCEP" + +rean <- ERAint #JRA55 #ERAint #NCEP # choose one of the two above reanalysis from where to load the input psl data + +# available forecast systems for the psl and var data: +#ECMWF_S4 <- list(path = paste0('/esnas/exp/ECMWF/seasonal/0001/s004/m001/$STORE_FREQ$_mean/$VAR_NAME$_f6h/$VAR_NAME$_$START_DATE$.nc')) +#ECMWF_S4 <- '/esnas/exp/ECMWF/seasonal/0001/s004/m001/$STORE_FREQ$_mean/psl_f6h/psl_$START_DATE$.nc' +ECMWF_S4 <- '/esnas/exp/ecmwf/system4_m1/$STORE_FREQ$_mean/$VAR_NAME$_f6h/$VAR_NAME$_$START_DATE$.nc' +ECMWF_S4.name <- "ECMWF-S4" +member.name <- "ensemble" # name of the dimension with the ensemble members inside the netCDF files of S4 + +ECMWF_monthly <- '/esnas/exp/ECMWF/monthly/ensforhc/6hourly/$VAR_NAME$/2014$MONTH$$DAY$00/$VAR_NAME$_$START_DATE$00.nc' +ECMWF_monthly.name <- "ECMWF-Monthly" + +# choose a forecast system: +forecast <- ECMWF_S4 + +# put 'rean' to load pressure fields and var from reanalisis, or put 'forecast' to load them from forecast systems: +fields <- rean #forecast + +psl <- "psl" #"g500" # pressure variable to use from the chosen reanalysis +psl.name <- "SLP" # "Z500" # the name to show in the maps + +year.start <- 1981 #1979 #1981 #1982 #1981 #1994 #1979 #1981 +year.end <- 2016 #2016 #2013 #2015 + +missing.forecasts <- FALSE # set it to TRUE if there can be missing hindcasts files in the forecast psl data, so it will load only the available years and deals with NA + +LOESS <- TRUE # To apply the LOESS filter or not to the climatology +running.cluster <- TRUE # add to the clustering also the daily SLP data of the two closer months to the month to use (only for monthly analysis) +lat.weighting <- TRUE # set it to true to weight psl data on latitude before applying the cluster analysis +subdaily <- TRUE # id TRUE, compute the clustering using 6-hourly data instead of daily data, to be more robust (only for reanalysis with 6-hourly data avail.) + + + +sequences <-FALSE # set it to TRUE if you want to select only days beloning to sequences of at least 5 consecutive days belonging to the same regime. +PCA <- FALSE # set it to TRUE if you want to apply the PCA before the k-means cluster analysis, FALSE otherwise +variance.explained <- 0.8 # the user can set here the minimum % of variance explained by the PCs (typically 0.8 which is equal to 80%) + +# Coordinates of the box enclosing the study region (the Northern Atlantic in this case): +lat.max <- 81 #81 #80 #70 +lat.min <- 27 #30 #20 #30 +lon.max <- 45 #36 #30 #40 +lon.min <- 274.5 #274.5 #270 #280 # put a positive number here because the geopotential has only positive vaues of longitud! + +# Only for reanalysis: +WR.period <- 1:12 # 13:16 #1:12 #13:16 # 1-12: Jan-Dec; 13-16: Winter-Spring-Summer-Autumn [bypassed by the optional argument of the script] + +# Only for Seasonal forecasts: +startM <- 1 # start month (1=January, 12=December) [bypassed by the optional arguments of the script] +leadM <- 0 # select only the data of one lead month: [bypassed by the optional arguments of the script] +n.members <- 15 # number of members of the forecast system to load +n.leadtimes <- 216 # number of leadtimes of the forecast system to load + +res <- 0.75 # set the resolution you want to interpolate the seasonal psl and var data when loading it (i.e: set to 0.75 to use the same res of ERA-Interim) + # interpolation method is BILINEAR. + +# Only for Monthly forecasts: +#leadtime <- 1:7 #5:11 #1 # if fields=forecast, set the forecast time (in days) you want to compute the weather regimes. +#startdate.shift <- 1 # if leadtime=5:11, it's better to shift all startdates of the season 1 week in the past, to fit the exact season of obs.data + # if leadtime=12:18, it's better to shift all startdates of the season 2 weeks in the past, and so on. +#forecast.year <- year.end+1 # if fields=forecast, set the forecast year (it is usually the year after year.end) +#mes=1 # if fields=forecast, set the month of the first startdate of the forecast year +#day=2 # if fields=forecast, set the day of the first startdate of the forecast year + +############################################################################################################################# +rean.name <- unname(ifelse(rean == ERAint, ERAint.name, ifelse(rean == JRA55, JRA55.name, NCEP.name))) + +forecast.name <- unname(ifelse(forecast == ECMWF_S4, ECMWF_S4.name, ECMWF_monthly.name)) +fields.name <- unname(ifelse(fields == rean, rean.name, forecast.name)) +n.years <- year.end - year.start + 1 + +# in case the script is run with one argument, it is assumed a reanalysis is being used and the argument becomes the month we want to perform the clustering; +# in case the script is run with two arguments, it is assumed a forecast is used and the first argument represents the startmonth and the second one the leadmonth. +# in case the script is run with no arguments, the values of the variables inside the script are used: +script.arg <- as.integer(commandArgs(TRUE)) + +if(length(script.arg) == 0){ + start.month <- startM + #WR.period <- start.month + if(fields.name == forecast.name) lead.month <- leadM +} + +# in case the script is run with 1 argument, it is assumed you are using a reanalysis: +if(length(script.arg) == 1){ + fields <- rean + fields.name <- rean.name + WR.period <- script.arg[1] +} + +if(length(script.arg) >= 2){ + fields <- forecast + fields.name <- forecast.name + start.month <- script.arg[1] + lead.month <- script.arg[2] + WR.period <- start.month +} + +if(length(script.arg) == 3){ # in this case, we are running the script in the SMP machine and we need to override the variables below: + source('/gpfs/projects/bsc32/bsc32842/Rfunctions.R') # for the calendar function + workdir <- "/gpfs/projects/bsc32/bsc32842/RESILIENCE" # working dir +} + +if(length(script.arg) >= 2) {lead.comment <- paste0("Lead month: ", lead.month)} else {lead.comment <- ""} +cat(paste0("Field: ", fields.name, " Period: ", WR.period, " ", lead.comment, "\n")) + +if(lat.min >= lat.max) stop("lat.min cannot be greater than lat.max") + +#days.period <- n.days.period <- period.length <- list() +#for (pp in 1:17){ +# days.period[[pp]] <- NA +# for(y in year.start:year.end) days.period[[pp]] <- c(days.period[[pp]], n.days.in.a.future.year(year.start, y) + pos.period(y,pp)) +# days.period[[pp]] <- days.period[[pp]][-1] # remove the NA at the begin that was introduced only to be able to execute the above command +# # number of days belonging to that period from year.start to year.end: +# n.days.period[[pp]] <- length(days.period[[pp]]) +# # Number of days belonging to that period in a single year: +# period.length[[pp]] <- n.days.in.a.period(pp,1999) #+ ifelse(period==13 | period==2, 1, 0) # using year 1999 introduce a small error in winter season length because of #bisestile years +#} + +# Create a regular lat/lon grid to interpolate the data to the chosen res: (Notice that the regular grid is always centered at lat=0 and lon=0!) +n.lon.grid <- 360/res +n.lat.grid <- (180/res)+1 +my.grid <- paste0('r',n.lon.grid,'x',n.lat.grid) +#domain<-list() +#domain$lon <- seq(0,359.999999999,res) +#domain$lat <- c(rev(seq(0,90,res)),seq(res[1],-90,-res)) + +cat("Loading lat/lon. Please wait...\n") +# load only 1 day of pressure data to detect the minimum and maximum lat and lon values which identify only the North Atlantic area: +if(fields.name == rean.name) domain <- Load(var = psl, exp = NULL, obs = list(list(path=fields)), '19990101', storefreq = 'daily', leadtimemax = 1, output = 'lonlat', nprocs=1) + +### Real test: +### lat <- +### domain <- Load(var = "psl", exp = list(list(path="/esnas/exp/ecmwf/system4_m1/$STORE_FREQ$_mean/$VAR_NAME$_f6h/$VAR_NAME$_$START_DATE$.nc")), obs=NULL, sdates=paste0(1982:2011,'0101'), storefreq = 'daily', leadtimemax=n.leadtimes, nmember=n.members, output = 'lonlat', latmin = lat[chunk], latmax = lat[chunk+2], nprocs=1) + +### if (fields.name="pippo"){ + +# in the case of S4, we also interpolate it to the same resolution of the reanalysis: +# (notice that if the S4 grid has the same grid of the chosen grid (typically ERA-Interim), Load() leaves the S4 grid unchanged) +if(fields.name == ECMWF_S4.name) domain <- Load(var = psl, exp = list(list(path=fields)), obs=NULL, sdates=paste0(year.start,'0101'), storefreq = 'daily', dimnames=list(member=member.name), leadtimemax=1, nmember=1, output = 'lonlat', grid=my.grid, method='bilinear', nprocs=1) + +#domain <- Load(var = "psl", exp = list(list(path="/esnas/exp/ecmwf/system4_m1/$STORE_FREQ$_mean/$VAR_NAME$_f6h/$VAR_NAME$_$START_DATE$.nc")), obs=NULL, sdates='19820101', storefreq = 'daily', leadtimemax=1, nmember=1, output = 'lonlat', grid='r480x241', lonmax=100) + +if(fields.name == ECMWF_monthly.name) domain <- Load(var = psl, exp = NULL, obs = list(list(path=fields)), paste0(year.start,'0102'), storefreq = 'daily', leadtimemax = 1, output = 'lonlat') + +n.lon <- length(domain$lon) +n.lat <- length(domain$lat) + +pos.lat.max <- which(lat.max - round(domain$lat,4) >= 0)[1] # select the first latitude of the domain below the maximum latitude +pos.lat.min <- tail(which(round(domain$lat,4) - lat.min >= 0),1) # select the last latitude of the domain over the minumum latitude +pos.lon.max <- tail(which(lon.max - round(domain$lon,4) >= 0),1) +pos.lon.min <- which(round(domain$lon,4) - lon.min >= 0)[1] + +pos.lat <- if(pos.lat.min < pos.lat.max) {pos.lat.min:pos.lat.max} else {pos.lat.max:pos.lat.min} +pos.lon <- c(1:pos.lon.max,pos.lon.min:length(domain$lon)) # beware that it only consider the case where the study area cross the meridian of Greenwhich! +n.pos.lat <- length(pos.lat) +n.pos.lon <- length(pos.lon) + +lat <- domain$lat[pos.lat] # lat values of chosen area only (it is smaller than the whole spatial domain loaded by the data) +#if (lat[2] < lat[1]) lat <- rev(lat) # reverse lat values if they are in decreasing order +lon <- domain$lon[pos.lon] # lon values of chosen area only + +#lat.min.area <- domain$lat[pos.lat.min] # min and max lat/lon of the chosen area only (same as lat.max, but its values correspond to actual values in the lat vector) +#lat.max.area <- domain$lat[pos.lat.max] +#lon.min.area <- domain$lon[pos.lon.min] +#lon.max.area <- domain$lon[pos.lon.max] + +#rm(domain) + +# Load psl data (interpolating it to the same reanlaysis grid, if necessary): +cat("Loading data. Please wait...\n") + +if(fields.name == rean.name && subdaily == FALSE){ # Load daily psl data of all years in the reanalysis case: + psleuFull <-array(NA,c(1,1,n.years,365, n.pos.lat, n.pos.lon)) + + psleuFull366 <- Load(var = psl, exp = NULL, obs = list(list(path=fields)), paste0(year.start:year.end,'0101'), storefreq = 'daily', leadtimemax = 366, output = 'lonlat', latmin = lat.min, latmax = lat.max, lonmin = lon.min, lonmax = lon.max, nprocs=8)$obs + + # remove bisestile days to have all arrays with the same dimensions: + cat("Removing bisestile days. Please wait...\n") + psleuFull[,,,,,] <- psleuFull366[,,,1:365,,] + + for(y in year.start:year.end){ + y2 <- y - year.start + 1 + if(n.days.in.a.year(y) == 366) psleuFull[,,y2,60:365,,] <- psleuFull366[,,y2,61:366,,] # take the march to december period removing the 29th of February + } + + rm(psleuFull366) + gc() + + + if(running.cluster == TRUE && rean.name == "ERA-interim" && WR.period == 10) { + # in this case, last month of last year might not have in /esnas the data for the 3rd month of the running cluster. Load() set these values to NA, + # but the clustering analysis is not able to process NA, so we have to remove the NA for that month and replace with the values of the previous year: + + psleuFull[,,36,305:334,,] <- psleuFull[,,35,305:334,,] + + + } + + # convert psl in daily anomalies with the LOESS filter: + cat("Calculating anomalies. Please wait...\n") + pslPeriodClim <- apply(psleuFull, c(1,2,4,5,6), mean, na.rm=T) + + if(LOESS == TRUE){ + pslPeriodClimLoess <- pslPeriodClim + + for(i in n.pos.lat){ + for(j in n.pos.lon){ + my.data <- data.frame(ens.mean=pslPeriodClim[1,1,,i,j], day=1:365) + my.loess <- loess(ens.mean ~ day, my.data, span=0.35) + pslPeriodClimLoess[1,1,,i,j] <- predict(my.loess) + } + } + + rm(pslPeriodClim) + gc() + + pslPeriodClim2 <- InsertDim(pslPeriodClimLoess, 3, n.years) + + } else { # leave the daily climatology to compute the anomalies: + pslPeriodClim2 <- InsertDim(pslPeriodClim, 3, n.years) + + rm(pslPeriodClim) + gc() + } + + ## s4.data <- data.frame(s4=pslPeriodClim[1,], day=1:216) + ## s4.loess <- loess(s4 ~ day, s4.data, span=0.35) + ## s4.pred <- predict(s4.loess) + + psleuFull <- psleuFull - pslPeriodClim2 + rm(pslPeriodClim2) + gc() + +} # close if on fields.name == rean and subdaily == FALSE){ + + + + +if(fields.name == rean.name) { + # Compute climatology and anomalies (with LOESS filter) for sfcWind data, and draw monthly anomalies, if you want to compare the mean daily wind speed anomalies reconstructed by the weather regimes classification with those observed by the reanalysis: + + sfcWind366 <- Load(var = "sfcWind", exp = NULL, obs = list(list(path=fields)), paste0(year.start:year.end,'0101'), storefreq = 'daily', leadtimemax = 366, output = 'lonlat', latmin = lat.min, latmax = lat.max, lonmin = lon.min, lonmax = lon.max, nprocs=8)$obs + + sfcWind <- sfcWind366[,,,1:365,,] + + for(y in year.start:year.end){ + y2 <- y - year.start + 1 + if(n.days.in.a.year(y) == 366) sfcWind[y2,60:365,,] <- sfcWind366[1,1,y2,61:366,,] # take the march to december period removing the 29th of February + } + + rm(sfcWind366) + gc() + + sfcWindClimDaily <- apply(sfcWind, c(2,3,4), mean, na.rm=T) + + sfcWindClimLoess <- sfcWindClimDaily + for(i in n.pos.lat){ + for(j in n.pos.lon){ + my.data <- data.frame(ens.mean=sfcWindClimDaily[,i,j], day=1:365) + my.loess <- loess(ens.mean ~ day, my.data, span=0.35) + sfcWindClimLoess[,i,j] <- predict(my.loess) + } + } + + rm(sfcWindClimDaily) + gc() + + sfcWindClim2 <- InsertDim(sfcWindClimLoess, 1, n.years) + + sfcWindAnom <- sfcWind - sfcWindClim2 + + rm(sfcWindClimLoess) + gc() + + year.test <- 2016 # select only anomalies of a chosen month and year, and average them: + month.test <- 10 + pos.year.start <- year.test - year.start + 1 + + sfcWindAnomPeriod <- sfcWindAnom[pos.year.start,pos.period(year.test,month.test),,] + + sfcWindAnomPeriodMean <- apply(sfcWindAnomPeriod, c(2,3), mean, na.rm=TRUE) + + EU <- c(1:which(lon >= lon.max)[1], (which(lon >= 337)[1]:length(lon))) # restrict area to continental europe only + + my.brks.var <- seq(-3,3,0.5) + values.to.plot2 <- c(-3,-2.5,-2,-1.5,-1,0,0.5,1,1.5,2,2.5,3) + + my.cols.var <- colorRampPalette(rev(brewer.pal(11,"RdBu")))(length(my.brks.var)-1) # blue--white--red colors + + fileoutput <- paste0(workdir,"/",fields.name,"_",my.period[month.test],"_wind_anomaly.png") + + png(filename=fileoutput,width=1000,height=1000) + + par(fig=c(0, 1, 0.07, 0.98), new=TRUE) + PlotEquiMap(rescale(sfcWindAnomPeriodMean[,EU], my.brks.var[1]+0.001, tail(my.brks.var,1)), lon[EU], lat, filled.continents=FALSE, brks=my.brks.var, cols=my.cols.var, title_scale=1.2, intxlon=10, intylat=10, drawleg=F) #, cex.lab=1.5) + + #my.subset2 <- match(values.to.plot2, my.brks.var) + #legend2.cex <- 1.8 + + par(fig=c(0.03, 1, 0, 0.08), new=TRUE) + ColorBar(my.brks.var, cols=my.cols.var, vert=FALSE, label_scale=1.8) #, subset=my.subset2) + + par(fig=c(0.95, 1, 0, 0.012), new=TRUE) + mtext("m/s", cex=1.8) + + dev.off() + + fileoutput2 <- paste0(workdir,"/",fields.name,"_",my.period[month.test],"_wind_anomaly_fig2cat.png") + + system(paste0("~/scripts/fig2catalog.sh -s 0.8 -c 'Region: North Atlantic (27°N-81°N, 85.5°W-45°E)\nReference dataset: NCEP/NCAR' ", fileoutput," ", fileoutput2)) + + # to format it manually from the bash shell (you must run it from your workstation until the identity command of ImageMagick will be loaded un the cluster): + # sh ~/scripts/fig2catalog.sh -x 20 -t 'NCEP / 10-m wind speed / anomaly \nOctober / 2016' -c 'Region: North Atlantic (27°N-81°N, 85.5°W-45°E)\nReference dataset: NCEP/NCAR reanalysis' NCEP_October_wind_anomaly.png NCEP_October_wind_anomaly_catalogue.png + + # expression(~degree~C) + +} + + +# Load 6-hourly psl data of all years in the reanalysis case: +if(fields.name == rean.name && subdaily == TRUE){ + psleuFull <-array(NA,c(1,1,n.years,365*4, n.pos.lat, n.pos.lon)) + + psleuFull366 <- Load(var = psl, exp = NULL, obs = list(list(path=fields)), paste0(year.start:year.end,'0101'), storefreq = 'daily', leadtimemax = 366*4, output = 'lonlat', latmin = lat.min, latmax = lat.max, lonmin = lon.min, lonmax = lon.max, nprocs=8) + + # remove bisestile days to have all arrays with the same dimensions: + cat("Removing bisestile days. Please wait...\n") + psleuFull[,,,,,] <- psleuFull366[,,,1:365,,] + + for(y in year.start:year.end){ + y2 <- y - year.start + 1 + if(n.days.in.a.year(y) == 366) psleuFull[,,y2,60:365,,] <- psleuFull366[,,y2,61:366,,] # take the march to december period removing the 29th of February + } + + rm(psleuFull366) + gc() + + + if(running.cluster == TRUE && rean.name == "ERA-interim" && WR.period == 10) { + # in this case, last month of last year might not have in /esnas the data for the 3rd month of the running cluster. Load() set these values to NA, + # but the clustering analysis is not able to process NA, so we have to remove the NA for that month and replace with the values of the previous year: + + psleuFull[,,36,305:334,,] <- psleuFull[,,35,305:334,,] + + + } + + # convert psl in daily anomalies with the LOESS filter: + cat("Calculating anomalies. Please wait...\n") + pslPeriodClim <- apply(psleuFull, c(1,2,4,5,6), mean, na.rm=T) + + if(LOESS == TRUE){ + pslPeriodClimLoess <- pslPeriodClim + + for(i in n.pos.lat){ + for(j in n.pos.lon){ + my.data <- data.frame(ens.mean=pslPeriodClim[1,1,,i,j], day=1:365) + my.loess <- loess(ens.mean ~ day, my.data, span=0.35) + pslPeriodClimLoess[1,1,,i,j] <- predict(my.loess) + } + } + + rm(pslPeriodClim) + gc() + + pslPeriodClim2 <- InsertDim(pslPeriodClimLoess, 3, n.years) + + } else { # leave the daily climatology to compute the anomalies: + pslPeriodClim2 <- InsertDim(pslPeriodClim, 3, n.years) + + rm(pslPeriodClim) + gc() + } + + ## s4.data <- data.frame(s4=pslPeriodClim[1,], day=1:216) + ## s4.loess <- loess(s4 ~ day, s4.data, span=0.35) + ## s4.pred <- predict(s4.loess) + + psleuFull <- psleuFull - pslPeriodClim2 + rm(pslPeriodClim2) + gc() + +} # close if on fields.name == rean & subdaily == TRUE + + +if(fields.name == ECMWF_S4.name){ # in the forecast case, we load only the data for 1 start month at time and all the lead times (since each month needs ~3 GB of memory) + if(start.month <= 9) {start.month.char <- paste0("0",as.character(start.month))} else {start.month.char <- start.month} + + fields2 <- gsub("\\$STORE_FREQ\\$","daily",fields) + fields3 <- gsub("\\$VAR_NAME\\$",psl,fields2) + + if(missing.forecasts == TRUE){ + years <- c() + for(y in year.start:year.end){ + fields4 <- gsub("\\$START_DATE\\$",paste0(y, start.month.char,'01'),fields3) + + char.lead <- nchar(system(paste0("ncks -m ",fields4,"| grep -E -i 'psl size' | cut -d '*' -f1"), intern=T)) + # beware, in this way it can only take into account leadtimes from 100 to 999, but it is the only way in the SMP machine: + #num.lead <- as.integer(substr(system(paste0("ncks -m ",fields4,"| grep -E -i 'psl size' | cut -d '*' -f1"), intern=T),char.lead-3,char.lead)) + num.lead <- as.integer(system(paste0("ncks -m ",fields4,"| grep -E -i 'psl size' | cut -d '=' -f2 | cut -d '*' -f1"), intern=T)) # don't work well on the SMP + num.memb <- as.integer(system(paste0("ncks -m ",fields4,"| grep -E -i 'psl size' | cut -d '=' -f2 | cut -d '*' -f2"), intern=T)) + num.lat <- as.integer(system(paste0("ncks -m ",fields4,"| grep -E -i 'psl size' | cut -d '=' -f2 | cut -d '*' -f3"), intern=T)) + num.lon <- as.integer(system(paste0("ncks -m ",fields4,"| grep -E -i 'psl size' | cut -d '=' -f2 | cut -d '*' -f4"), intern=T)) + + #if(num.lead == n.leadtimes && num.memb >= n.members && num.lat == 181 && num.lon == 360) years <- c(years,y) + if(num.lead == n.leadtimes && num.memb >= n.members) years <- c(years,y) + } + + } else { + years <- year.start:year.end + } + + n.years.full <- length(years) + + if(running.cluster == TRUE) { + + start.month1 <- ifelse(start.month == 1, 12, start.month-1) + if(start.month1 <= 9) {start.month1.char <- paste0("0",as.character(start.month1))} else {start.month1.char <- start.month1} + + start.month2.char <- start.month.char + + start.month3 <- ifelse(start.month == 12, 1, start.month+1) + if(start.month3 <= 9) {start.month3.char <- paste0("0",as.character(start.month3))} else {start.month3.char <- start.month3} + + # load three months of data, inserting all them in the third dimension of the array, one month at time: + psleuFull1 <- Load(var = psl, exp = list(list(path=fields)), obs = NULL, sdates=paste0(years, start.month1.char,'01'), dimnames=list(member=member.name), nmember=n.members, leadtimemax=n.leadtimes, storefreq='daily', output = 'lonlat', latmin = lat.min, latmax = lat.max, lonmin = lon.min, lonmax = lon.max, grid=my.grid, method='bilinear')$mod + gc() + + psleuFull2 <- Load(var = psl, exp = list(list(path=fields)), obs = NULL, sdates=paste0(years, start.month2.char,'01'), dimnames=list(member=member.name), nmember=n.members, leadtimemax=n.leadtimes, storefreq='daily', output = 'lonlat', latmin = lat.min, latmax = lat.max, lonmin = lon.min, lonmax = lon.max, grid=my.grid, method='bilinear')$mod + gc() + + psleuFull3 <- Load(var = psl, exp = list(list(path=fields)), obs = NULL, sdates=paste0(years, start.month3.char,'01'), dimnames=list(member=member.name), nmember=n.members, leadtimemax=n.leadtimes, storefreq='daily', output = 'lonlat', latmin = lat.min, latmax = lat.max, lonmin = lon.min, lonmax = lon.max, grid=my.grid, method='bilinear')$mod + gc() + + } else { + + psleuFull <- Load(var = psl, exp = list(list(path=fields)), obs = NULL, sdates=paste0(years, start.month.char,'01'), dimnames=list(member=member.name), nmember=n.members, leadtimemax=n.leadtimes, storefreq='daily', output = 'lonlat', latmin = lat.min, latmax = lat.max, lonmin = lon.min, lonmax = lon.max, grid=my.grid, method='bilinear', nprocs=1)$mod + + } + + if(LOESS == TRUE){ + # convert psl in daily anomalies with the LOESS filter: + cat("Calculating anomalies. Please wait...\n") + + if(running.cluster == TRUE) { + pslPeriodClim1 <- apply(psleuFull1, c(1,4,5,6), mean, na.rm=T) + pslPeriodClim2 <- apply(psleuFull2, c(1,4,5,6), mean, na.rm=T) + pslPeriodClim3 <- apply(psleuFull3, c(1,4,5,6), mean, na.rm=T) + + pslPeriodClimLoess1 <- pslPeriodClim1 + pslPeriodClimLoess2 <- pslPeriodClim2 + pslPeriodClimLoess3 <- pslPeriodClim3 + + for(i in n.pos.lat){ + for(j in n.pos.lon){ + my.data1 <- data.frame(ens.mean=pslPeriodClim1[1,,i,j], day=1:n.leadtimes) + my.loess1 <- loess(ens.mean ~ day, my.data1, span=0.35) + pslPeriodClimLoess1[1,,i,j] <- predict(my.loess1) + rm(my.data1, my.loess1) + gc() + + my.data2 <- data.frame(ens.mean=pslPeriodClim2[1,,i,j], day=1:n.leadtimes) + my.loess2 <- loess(ens.mean ~ day, my.data2, span=0.35) + pslPeriodClimLoess2[1,,i,j] <- predict(my.loess2) + rm(my.data2, my.loess2) + gc() + + my.data3 <- data.frame(ens.mean=pslPeriodClim3[1,,i,j], day=1:n.leadtimes) + my.loess3 <- loess(ens.mean ~ day, my.data3, span=0.35) + pslPeriodClimLoess3[1,,i,j] <- predict(my.loess3) + rm(my.data3, my.loess3) + gc() + } + } + + rm(pslPeriodClim1, pslPeriodClim2, pslPeriodClim3) + gc() + + pslPeriodClimDos1 <- InsertDim(InsertDim(pslPeriodClimLoess1, 2, n.years.full), 2, n.members) + pslPeriodClimDos2 <- InsertDim(InsertDim(pslPeriodClimLoess2, 2, n.years.full), 2, n.members) + pslPeriodClimDos3 <- InsertDim(InsertDim(pslPeriodClimLoess3, 2, n.years.full), 2, n.members) + + pslPeriodClimDos <- unname(abind(pslPeriodClimDos1, pslPeriodClimDos2, pslPeriodClimDos3, along=3)) + rm(pslPeriodClimDos1, pslPeriodClimDos2, pslPeriodClimDos3) + gc() + + psleuFull <- unname(abind(psleuFull1, psleuFull2, psleuFull3, along=3)) + rm(psleuFull1, psleuFull2, psleuFull3) + gc() + + } else { # in case of no running cluster: + + pslPeriodClim <- apply(psleuFull, c(1,4,5,6), mean, na.rm=T) + + pslPeriodClimLoess <- pslPeriodClim + for(i in n.pos.lat){ + for(j in n.pos.lon){ + my.data <- data.frame(ens.mean=pslPeriodClim[1,,i,j], day=1:n.leadtimes) + my.loess <- loess(ens.mean ~ day, my.data, span=0.35) + pslPeriodClimLoess[1,,i,j] <- predict(my.loess) + } + } + + rm(pslPeriodClim) + gc() + + pslPeriodClimDos <- InsertDim(InsertDim(pslPeriodClimLoess, 2, n.years.full), 2, n.members) + + } # close if on running cluster + + } else { #in case of no LOESS: + + if(running.cluster == TRUE) { + # in this case, the climatology is measured FOR EACH MONTH INDIPENDENTLY, instead of using a seasonal value: + pslPeriodClim1 <- apply(psleuFull1, c(1,4,5,6), mean, na.rm=T) + pslPeriodClim2 <- apply(psleuFull2, c(1,4,5,6), mean, na.rm=T) + pslPeriodClim3 <- apply(psleuFull3, c(1,4,5,6), mean, na.rm=T) + + pslPeriodClim <- unname(abind(pslPeriodClim1, pslPeriodClim2, pslPeriodClim3)) + + pslPeriodClimDos <- InsertDim(InsertDim(pslPeriodClim, 2, n.years.full), 2, n.members) + rm(pslPeriodClim) + gc() + + psleuFull <- unname(abind(psleuFull1, psleuFull2, psleuFull3, along=3)) + rm(psleuFull1, psleuFull2, psleuFull3) + gc() + + } else {# in case of no running cluster: + + pslPeriodClimDos <- InsertDim(InsertDim(pslPeriodClim, 2, n.years.full), 2, n.members) + rm(pslPeriodClim) + gc() + } + } + + ## s4.data <- data.frame(s4=pslPeriodClim[1,], day=1:216) + ## s4.loess <- loess(s4 ~ day, s4.data, span=0.35) + ## s4.pred <- predict(s4.loess) + + psleuFull <- psleuFull - pslPeriodClimDos + rm(pslPeriodClimDos) + gc() + +} # close if on fields.name=forecasts.name + +if(fields.name == ECMWF_monthly.name){ + # Load 6-hourly psl data in the forecast case: + psleuFull <-array(NA, c(n.sdates*n.years, n.leadtimes, n.pos.lat, n.pos.lon)) # daily order: 19940102 19950102 ... 20130102 19940109 19950109 ... 20130109 ... + n.leadtimes <- length(leadtime) + sdates <- weekly.seq(forecast.year,mes,day) + n.sdates <- length(sdates) + + for (startdate in 1:n.sdates){ + for(lday in leadtime){ + var <- Load(var = psl, exp = NULL, obs = list(list(path=fields)), paste0(year.start:year.end, substr(sdates[startdate],5,6), substr(sdates[startdate],7,8) ), storefreq = 'daily', leadtimemin = lday*4-3, leadtimemax = lday*4, output = 'lonlat', latmin = lat.min, latmax = lat.max, lonmin = lon.min, lonmax = lon.max) + + mean.lday <- apply(var$obs, c(3,5,6), mean, na.rm=T) + rm(var) + gc() + + psleuFull[(1+(startdate-1)*n.years):(startdate*n.years), lday-leadtime[1]+1,,] <- mean.lday + rm(mean.lday) + gc() + } + } +} + +if(psl=="g500") psleuFull <- psleuFull/9.81 # convert geopotential to geopotential height (in m) +if(psl=="psl") psleuFull <- psleuFull/100 # convert MSLP in Pascal to MSLP in hPa +gc() + +#save.image(file=paste0(workdir,"/weather_regimes_",fields.name,"_",year.start,"-",year.end,".RData")) +#load(file=paste0(workdir,"/weather_regimes_",fields.name,"_",year.start,"-",year.end,".RData")) + +#save.image("/scratch/Earth/ncortesi/RESILIENCE/Regimes/test.R") + +# compute the cluster analysis: +my.PCA <- list() +my.cluster <- list() +my.cluster.array <- list() +tot.variance <- list() +n.pcs <- my.cluster <- c() + +#WR.period=1 # for the debug +for(p in WR.period){ + # Select only the data of the month/season we want: + if(fields.name == rean.name){ + + #pslPeriod <- psleuFull[days.period[[p]],,] # select only days in the chosen period (i.e: winter) + if(running.cluster == TRUE) { + pslPeriod <- psleuFull[1,1,,pos.month.extended(2001,p),,] # select all days in the period of 3 months centered on the target month p + } else { + pslPeriod <- psleuFull[1,1,,pos.period(2001,p),,] # select only days in the chosen period (i.e: winter) + } + + # weight the pressure fields based on latitude (apply it to anomalies, not to absolute values!): + if(lat.weighting){ + lat.weighted <- cos(lat*pi/180)^0.5 + lat.weighted.array <- InsertDim(InsertDim(InsertDim(lat.weighted,2,n.pos.lon), 1, length(pos.month.extended(2001,p))), 1, n.years) + pslPeriod <- pslPeriod * lat.weighted.array + rm(lat.weighted.array) + gc() + } + + gc() + cat("Preformatting data for clustering. Please wait...\n") + psl.melted <- melt(pslPeriod[,,,, drop=FALSE], varnames=c("Year","Day","Lat","Lon")) + gc() + + cat("Preformatting data for clustering. Please wait......\n") + # this function eats much memory!!! + psl.kmeans <- unname(acast(psl.melted, Year + Day ~ Lat + Lon)) + #gc() + + + # select period data from psleuFull to use it as input of the cluster analysis: + #psl.kmeans <- pslPeriod + #dim(psl.kmeans) <- c(n.days.period[[p]], n.pos.lat*n.pos.lon) # convert array in a matrix! + #rm(pslPeriod, pslPeriod.weighted) + } + + # in case of S4 we don't need to select anything because we already loaded only 1 month of data! + if(fields.name == ECMWF_S4.name){ + if(running.cluster == TRUE && p < 13) { + # if you want to fully implement the running cluster of the monthly S4 data, you have to finish selecting automatically the 3-months running period + # generalizing the command below (which at present only work for the month of January an lead time 0): + + # Select DJF (lead time 0) for our case study, excluding 29 of february): + pslPeriod <- psleuFull[,,,1:90,,, drop=FALSE] + + } else { + + # select data only for the startmonth and leadmonth to study: + cat("Extracting data. Please wait...\n") + chosen.month <- start.month + lead.month # find which month we want to select data + if(chosen.month > 12) chosen.month <- chosen.month - 12 + + # remove the 29th of February to have the same n. of elements for all years + i=0 + for(y in years){ + i=i+1 + leadtime.min <- 1 + n.days.in.a.monthly.period(start.month, chosen.month, y) - n.days.in.a.month(chosen.month, y) + leadtime.max <- leadtime.min + n.days.in.a.month(chosen.month, y) - 1 + #leadtime.max <- 61 + + if (n.days.in.a.month(chosen.month, y) == 29) leadtime.max <- leadtime.max - 1 + int.leadtimes <- leadtime.min:leadtime.max + num.leadtimes <- leadtime.max - leadtime.min + 1 # number of days in the chosen month (different from 'n.leadtimes',which is the total number of days in the file!) + # by construction it is equal to: n.days.in.a.month(chosen.month, y) + + if(y == years[1]) { + pslPeriod <- psleuFull[,,1,int.leadtimes,,,drop=FALSE] + } else { + pslPeriod <- unname(abind(pslPeriod, psleuFull[,,i,int.leadtimes,,,drop=FALSE], along=3)) + } + } + + } + + # weight the pressure fields based on latitude (apply it to anomalies, not to absolute values!): + if(lat.weighting){ + lat.weighted <- cos(lat*pi/180)^0.5 + lat.weighted.array <- InsertDim(InsertDim(InsertDim(lat.weighted,2,n.pos.lon), 1, length(pos.month.extended(2001,p))), 1, n.years) + pslPeriod <- pslPeriod * lat.weighted.array ######### adapt!!!!!!!!!!! + rm(lat.weighted.array) + gc() + } + + # note that the data order in psl.kmeans[,X] is: year1day1, year1day2, ... , year1day31, year2day1, year2day2, ... , year2day28 + gc() + cat("Preformatting data. Please wait...\n") + psl.melted <- melt(pslPeriod[1,,,,,, drop=FALSE], varnames=c("Exp","Member","StartYear","LeadDay","Lat","Lon")) + gc() + + #save(psl.melted,file=paste0(workdir,"/psl_melted.RData")) + + cat("Preformatting data. Please wait......\n") + # This function eats much memory!!! + psl.kmeans <- unname(acast(psl.melted, Member + StartYear + LeadDay ~ Lat + Lon)) + #gc() + + #save(psl.kmeans,file=paste0(workdir,"/psl_kmeans.RData")) + #my.cluster <- kmeans(psl.kmeans, centers=4, iter.max=100, nstart=30, trace=FALSE) + #save(my.cluster,file=paste0(workdir,"/my_cluster.RData")) + + + #psl.kmeans <- array(NA,c(dim(pslPeriod))) + #n.rows <- nrow(psl.melted) + #a <- psl.melted$Member + #b <- psl.melted$StartYear + #c <- psl.melted$LeadDay + #d <- psl.melted$Lat + #e <- psl.melted$Lon + #f <- psl.melted$value + + # this function to convert a data.frame in an array is much less memory-eater than acast but a bit slower: + #for(i in 1:n.rows) psl.kmeans[1,a[i],b[i],c[i],d[i],e[i]] <- f[i] + gc() + #rm(pslPeriod); gc() + } + + if(fields.name == ECMWF_monthly.name){ + my.startdates.period <- months.period(forecast.year,mes,day,p) # get which startdates belong to the chosen period (remember that there are no bisestile day left!) + + days.period <- list() # get which days inside psleuFull belong to the chosen period + for(startdate in my.startdates.period){ + days.period[[p]] <- c(days.period[[p]], (1+(startdate-1)*n.years):(startdate*n.years)) + } + + # in winter you must remove the 2nd startdate (20140109) because its data is bad, as you can see with: plot(psl.kmeans[,1:2]) + # if(fields.name == forecast.name && period == 13) psl.kmeans[21:40,] <- NA + } + + + # compute the clustering: + cat("Clustering data. Please wait...\n") + if(PCA){ + my.seq <- seq(1, n.pos.lat*n.pos.lon, 16) # select only 1 point of 9 + pslcut <- psl.kmeans[,my.seq] + + my.PCA <- princomp(pslcut,cor=FALSE) + tot.variance <- head(cumsum(my.PCA$sdev^2/sum(my.PCA$sdev^2)),50) # check how many PCAs to keep basing on the sum of their expl. variance + n.pcs <- head(as.numeric(which(tot.variance > variance.explained)),1) # select only the pcs that explains at least 80% of variance + my.cluster <- kmeans(my.PCA$scores[,1:n.pcs], centers=4, iter.max=100, nstart=30) # 4 is the number of clusters + rm(pslcut) + } else { # 4 is the number of clusters: + + my.cluster <- kmeans(psl.kmeans, centers=4, iter.max=100, nstart=30, trace=FALSE) + + gc() + + #save(my.cluster, file=paste0(workdir,"/",fields.name,"_",my.period[p],"_leadmonth_",lead.month,"_series.RData")) + + #if(fields.name == rean.name){ + #my.cluster[[p]] <- kmeans(psl.kmeans[which(!is.na(psl.kmeans[,1])),], centers=4, iter.max=100, nstart=30, trace=FALSE) + # save the time series output of the cluster analysis: + #save(my.cluster, my.cluster.array, my.PCA, tot.variance, n.pcs, file=paste0(workdir,"/",fields.name,"_",my.period[p],"_series.RData")) + #} + + if(fields.name == ECMWF_S4.name) { + my.cluster.array <- array(my.cluster$cluster, c(num.leadtimes, n.years.full, n.members)) # in reverse order compared to the definition of psl.kmeans + + # save the time series output of the cluster analysis: + #save(my.cluster, my.cluster.array, my.PCA, tot.variance, n.pcs, file=paste0(workdir,"/",fields.name,"_",my.period[p],"_leadmonth_",lead.month,"_series.RData")) + + #plot(SMA(my.cluster$centers[1,],201),type="l",col="gold",ylim=c(-20,20));lines(SMA(my.cluster$centers[2,],201),type="l",col="red"); lines(SMA(my.cluster$centers[3,],201),type="l",col="green");lines(SMA(my.cluster$centers[4,],201),type="l",col="blue");lines(SMA(psl.kmeans[29,],201),type="l",col="gray");lines(rep(0,14410),type="l",col="black",lty=2) + } + } + + rm(psl.kmeans) + gc() + +#} # close for on 'p' + +#save.image(file=paste0(workdir,"/weather_regimes_",fields.name,"_",year.start,"-",year.end,".RData")) +#load(file=paste0(workdir,"/weather_regimes_",fields.name,"_",year.start,"-",year.end,".RData")) + +#my.grid<-paste0('r',n.lon,'x',n.lat) +#coord <- Load(var = 'sfcWind', exp = list(ECMWF_monthly), obs = NULL, sdates=paste0(2013,'0102'), leadtimemin = 1, leadtimemax=1, output = 'lonlat', nprocs=1, grid=my.grid, method='bilinear') + +# measure regime anomalies: + +#for(p in WR.period){ # 1-12: Jan-Dec; 13-16: Winter-Spring-Summer-Autumn; 17: Year + + if(fields.name == rean.name){ + + if(running.cluster == TRUE && p < 13){ # select only the days of the 3-months cluster series inside the target month p + n.days.period <- length(pos.month.extended(2001,p)) # total number of days in the three months + + days.month <- days.month.new <- days.month.full <- c() + days.month <- which(pos.month.extended(2001,p) == pos.month(2001,p)[1])-1 + 1:length(pos.month(2001,p)) + + for(y in 1:n.years){ + days.month.new <- days.month + n.days.period*(y-1) + days.month.full <- c(days.month.full, days.month.new) + #print(days.month.new) + #print(days.month) + } + + cluster.sequence <- my.cluster$cluster[days.month.full] # select only the days of the cluster series inside the target month p + + } else { + cluster.sequence <- my.cluster$cluster + } + + if(sequences){ # it works only for rean data!!! + # for each of the 4 clusters, remove the days not belonging to sequences of at least 5 days: + # warning: first 4 days and last 4 days are not take into account y the algorithm and are treated as not belonging to any sequence (sequ=FALSE) + wrdiff <- diff(my.cluster$cluster) + + sequ <- rep(FALSE, n.days.period[[p]]) + for(i in 5:(n.days.period[[p]]-5)){ + sequ[i] <- TRUE + if(wrdiff[i] != 0 & wrdiff[i+1] != 0) sequ[i] = FALSE + if(wrdiff[i] != 0 & wrdiff[i+2] != 0) sequ[i] = FALSE + if(wrdiff[i] != 0 & wrdiff[i+3] != 0) sequ[i] = FALSE + if(wrdiff[i] != 0 & wrdiff[i+4] != 0) sequ[i] = FALSE + if(wrdiff[i-1] != 0 & wrdiff[i] != 0) sequ[i] = FALSE + if(wrdiff[i-1] != 0 & wrdiff[i+1] != 0) sequ[i] = FALSE + if(wrdiff[i-1] != 0 & wrdiff[i+2] != 0) sequ[i] = FALSE + if(wrdiff[i-1] != 0 & wrdiff[i+3] != 0) sequ[i] = FALSE + if(wrdiff[i-2] != 0 & wrdiff[i] != 0) sequ[i] = FALSE + if(wrdiff[i-2] != 0 & wrdiff[i+1] != 0) sequ[i] = FALSE + if(wrdiff[i-2] != 0 & wrdiff[i+2] != 0) sequ[i] = FALSE + if(wrdiff[i-3] != 0 & wrdiff[i] != 0) sequ[i] = FALSE + if(wrdiff[i-3] != 0 & wrdiff[i+1] != 0) sequ[i] = FALSE + if(wrdiff[i-4] != 0 & wrdiff[i] != 0) sequ[i] = FALSE + } # close for loop on i + + # sequence of daily cluster numbers without the days that doesn't belong to sequences of 5 or more days: + cluster.sequence <- my.cluster$cluster * sequ + rm(wrdiff, sequ) + } + + # Replace the days belonging to a regime with the days belonging to a sequence of at least 5 days of that regime: + wr1 <- which(cluster.sequence == 1) + wr2 <- which(cluster.sequence == 2) + wr3 <- which(cluster.sequence == 3) + wr4 <- which(cluster.sequence == 4) + + # yearly frequency of each regime: + wr1y <- wr2y <- wr3y <-wr4y <- c() + np <- n.days.in.a.period(p,2001) + + for(y in year.start:year.end){ + # for both reanalysis and S4, the time order is always: year1day1, year1day2, ..., year1day31, year2day1, year2day2, ..., year2day28, etc, + wr1y[y-year.start+1] <- length(which(cluster.sequence[(y-year.start)*np + (1:np)] == 1)) + wr2y[y-year.start+1] <- length(which(cluster.sequence[(y-year.start)*np + (1:np)] == 2)) + wr3y[y-year.start+1] <- length(which(cluster.sequence[(y-year.start)*np + (1:np)] == 3)) + wr4y[y-year.start+1] <- length(which(cluster.sequence[(y-year.start)*np + (1:np)] == 4)) + } + + # convert to frequencies in %: + wr1y <- wr1y/np + wr2y <- wr2y/np + wr3y <- wr3y/np + wr4y <- wr4y/np + + gc() + + pslmat <- unname(acast(psl.melted, Year + Day ~ Lat ~ Lon)) + + if(running.cluster == TRUE){ + pslmat.new <- pslmat + pslmat <- pslmat.new[days.month.full,,] + gc() + } + + pslwr1 <- pslmat[wr1,,, drop=F] + pslwr2 <- pslmat[wr2,,, drop=F] + pslwr3 <- pslmat[wr3,,, drop=F] + pslwr4 <- pslmat[wr4,,, drop=F] + + pslwr1mean <- apply(pslwr1, c(2,3), mean, na.rm=T) + pslwr2mean <- apply(pslwr2, c(2,3), mean, na.rm=T) + pslwr3mean <- apply(pslwr3, c(2,3), mean, na.rm=T) + pslwr4mean <- apply(pslwr4, c(2,3), mean, na.rm=T) + + # spatial mean for each day to save for the meaure of the FairRPSS: + pslwr1spmean <- apply(pslwr1, 1, mean, na.rm=T) + pslwr2spmean <- apply(pslwr2, 1, mean, na.rm=T) + pslwr3spmean <- apply(pslwr3, 1, mean, na.rm=T) + pslwr4spmean <- apply(pslwr4, 1, mean, na.rm=T) + + # save all the data necessary to redraw the graphs when we know the right regime: + save(my.cluster, lon, lon.max, lon.min, lat, lat.max, lat.min, pos.lat, pos.lon, n.pos.lat, n.pos.lon, psl, psl.name, my.period, p, workdir, rean.name, fields.name, year.start, year.end, n.years, WR.period, pslwr1mean, pslwr2mean, pslwr3mean, pslwr4mean, pslwr1spmean, pslwr2spmean, pslwr3spmean, pslwr4spmean, wr1y,wr2y,wr3y,wr4y,file=paste0(workdir,"/",fields.name,"_",my.period[p],"_psl.RData")) + + # immediatly save the plots of the ERA-Interim monthly regime anomalies with the running cluster instead than loading them in the next script: + EU <- c(1:which(lon >= lon.max)[1], (which(lon >= 337)[1]:length(lon))) # restrict area to continental europe only + if(psl == "g500"){ + my.brks <- c(-300,seq(-200,200,10),300) # % Mean anomaly of a WR + my.brks2 <- c(-300,seq(-200,200,10),300) # % Mean anomaly of a WR for the contour lines + values.to.plot <- c(-200, -100, 0, 100, 200) # values we want to show in the labels + } + + if(psl == "psl"){ + my.brks <- c(seq(-19,-1,2),0,seq(1,19,2)) # % Mean anomaly of a WR + my.brks2 <- my.brks #c(seq(-20,20,2)) # % Mean anomaly of a WR for the contour lines + values.to.plot <- my.brks #c(-17,-15,-13,-11,-9,-7,-5,-3,-1,0,1,3,5,7,9,11,13,15,17) + } + my.cols <- colorRampPalette(rev(brewer.pal(11,"RdBu")))(length(my.brks)-1) # blue--white--red colors + my.cols[floor(length(my.cols)/2)] <- my.cols[floor(length(my.cols)/2)+1] <- "white" + + map1 <- pslwr1mean + map2 <- pslwr2mean + map3 <- pslwr3mean + map4 <- pslwr4mean + png(filename=paste0(workdir,"/",fields.name,"_",my.period[p],"_cluster1.png"),width=1000,height=1200) + PlotEquiMap2(rescale(map1,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map1), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, xlabel.dist=1.5, contours.lty="F1FF1F") + dev.off() + png(filename=paste0(workdir,"/",fields.name,"_",my.period[p],"_cluster2.png"),width=1000,height=1200) + PlotEquiMap2(rescale(map2,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map2), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F") + dev.off() + png(filename=paste0(workdir,"/",fields.name,"_",my.period[p],"_cluster3.png"),width=1000,height=1200) + PlotEquiMap2(rescale(map3,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map3), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F") + dev.off() + png(filename=paste0(workdir,"/",fields.name,"_",my.period[p],"_cluster4.png"),width=1000,height=1200) + PlotEquiMap2(rescale(map4,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map4), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F") + dev.off() + + rm(pslwr1mean,pslwr2mean,pslwr3mean,pslwr4mean) + rm(pslwr1,pslwr2,pslwr3,pslwr4) + gc() + + } + + if(fields.name == ECMWF_S4.name){ + cat("Computing regime anomalies and frequencies. Please wait...\n") + + #load(file=paste0(workdir,"/",fields.name,"_",my.period[p],"_leadmonth_",lead.month,"_ClusterData.RData")) + + cluster.sequence <- my.cluster$cluster #as.vector(my.cluster.array) + + wr1 <- which(cluster.sequence == 1) + wr2 <- which(cluster.sequence == 2) + wr3 <- which(cluster.sequence == 3) + wr4 <- which(cluster.sequence == 4) + + wr1y <- apply(my.cluster.array == 1, 2, sum) + wr2y <- apply(my.cluster.array == 2, 2, sum) + wr3y <- apply(my.cluster.array == 3, 2, sum) + wr4y <- apply(my.cluster.array == 4, 2, sum) + + # convert to frequencies in %: + wr1y <- wr1y/(num.leadtimes*n.members) # in this case, n.leadtimes is the number of days of one month of the cluser time series + wr2y <- wr2y/(num.leadtimes*n.members) + wr3y <- wr3y/(num.leadtimes*n.members) + wr4y <- wr4y/(num.leadtimes*n.members) + + # same as above, but to measure the maximum frequence between all the members: + wr1yMax <- apply(apply(my.cluster.array == 1, c(2,3), sum),1,max) + wr2yMax <- apply(apply(my.cluster.array == 2, c(2,3), sum),1,max) + wr3yMax <- apply(apply(my.cluster.array == 3, c(2,3), sum),1,max) + wr4yMax <- apply(apply(my.cluster.array == 4, c(2,3), sum),1,max) + + wr1yMax <- wr1yMax/num.leadtimes + wr2yMax <- wr2yMax/num.leadtimes + wr3yMax <- wr3yMax/num.leadtimes + wr4yMax <- wr4yMax/num.leadtimes + + wr1yMin <- apply(apply(my.cluster.array == 1, c(2,3), sum),1,min) + wr2yMin <- apply(apply(my.cluster.array == 2, c(2,3), sum),1,min) + wr3yMin <- apply(apply(my.cluster.array == 3, c(2,3), sum),1,min) + wr4yMin <- apply(apply(my.cluster.array == 4, c(2,3), sum),1,min) + + wr1yMin <- wr1yMin/num.leadtimes + wr2yMin <- wr2yMin/num.leadtimes + wr3yMin <- wr3yMin/num.leadtimes + wr4yMin <- wr4yMin/num.leadtimes + + cat("Computing regime anomalies and frequencies. Please wait......\n") + + # in this case, must use psl.melted instead of psleuFull. Both are already in anomalies: + # (psl.melted depends on the startdate and leadtime, psleuFull no) + gc() + pslmat <- unname(acast(psl.melted, Member + StartYear + LeadDay ~ Lat ~ Lon)) + #pslmat <- unname(acast(melt(pslPeriod[1,1:2,,,,, drop=FALSE],varnames=c("Exp","Member","StartYear","LeadDay","Lat","Lon")), Member ~ StartYear + LeadDay ~ Lat ~ Lon)) + gc() + + #for(a in 1:2){ + # for(b in 1:3){ + # for(c in 1:4){ + # pslmat[1, a + (b-1)*a + (c-1)*a*b,,] <- pslPeriod[1,a,b,c,,] + # } + # } + #} + + pslwr1 <- pslmat[wr1,,, drop=F] + pslwr2 <- pslmat[wr2,,, drop=F] + pslwr3 <- pslmat[wr3,,, drop=F] + pslwr4 <- pslmat[wr4,,, drop=F] + + pslwr1mean <- apply(pslwr1, c(2,3), mean, na.rm=T) + pslwr2mean <- apply(pslwr2, c(2,3), mean, na.rm=T) + pslwr3mean <- apply(pslwr3, c(2,3), mean, na.rm=T) + pslwr4mean <- apply(pslwr4, c(2,3), mean, na.rm=T) + + # spatial mean for each day to save for the meaure of the FairRPSS: + pslwr1spmean <- apply(pslwr1, 1, mean, na.rm=T) + pslwr2spmean <- apply(pslwr2, 1, mean, na.rm=T) + pslwr3spmean <- apply(pslwr3, 1, mean, na.rm=T) + pslwr4spmean <- apply(pslwr4, 1, mean, na.rm=T) + + rm(pslmat) + gc() + + # save all the data necessary to draw the graphs (but not the impact maps) + save(my.cluster, my.cluster.array, lon, lon.max, lat, pos.lat, pos.lon, n.pos.lat, n.pos.lon, psl, psl.name, my.period, p, workdir,rean.name,fields.name, year.start,year.end, years, n.years, n.years.full, WR.period, pslwr1mean, pslwr2mean, pslwr3mean, pslwr4mean, pslwr1spmean, pslwr2spmean, pslwr3spmean, pslwr4spmean, wr1y, wr2y, wr3y, wr4y, wr1yMax, wr2yMax, wr3yMax, wr4yMax, wr1yMin, wr2yMin, wr3yMin, wr4yMin, n.leadtimes, num.leadtimes, file=paste0(workdir,"/",fields.name,"_",my.period[p],"_leadmonth_",lead.month,"_psl.RData")) + + rm(pslwr1mean,pslwr2mean,pslwr3mean,pslwr4mean) + rm(pslwr1,pslwr2,pslwr3,pslwr4) + gc() + + } + + # for the monthly system, the time order is always: year1day1, year2day1, ... , yearNday1, year1day2, year2day2, ..., yearNday2, etc.: + if(fields.name == ECMWF_monthly.name) { + if(fields.name == ECMWF_monthly.name){ + my.startdates.period <- months.period(forecast.year,mes,day,p) + + #if(p == 13) my.startdates.period <- my.startdates.period[-2] # remove the second startdate because it is broken + + days.period <- period.length <- list() + for(startdate in my.startdates.period){ + days.period[[p]] <- c(days.period[[p]], (1+(startdate-1)*(year.end-year.start+1)):(startdate*(year.end-year.start+1))) + } + period.length[[p]] <- length(my.startdates.period) # number of Thusdays in the period + } + + wr1y <- wr2y <- wr3y <-wr4y <- c() + wr1y[y-year.start+1] <- length(which(cluster.sequence[seq((y-year.start+1),length(cluster.sequence), n.years)] == 1)) + wr2y[y-year.start+1] <- length(which(cluster.sequence[seq((y-year.start+1),length(cluster.sequence), n.years)] == 2)) + wr3y[y-year.start+1] <- length(which(cluster.sequence[seq((y-year.start+1),length(cluster.sequence), n.years)] == 3)) + wr4y[y-year.start+1] <- length(which(cluster.sequence[seq((y-year.start+1),length(cluster.sequence), n.years)] == 4)) + } + + cat("Finished!\n") +} # close the for loop on 'p' + + + + + + + + + + + + + + + + + + + + + diff --git a/old/weather_regimes_v4.R b/old/weather_regimes_v4.R new file mode 100644 index 0000000000000000000000000000000000000000..9e46fc1deed95df94ce126e9c3c49c79e5e9d71d --- /dev/null +++ b/old/weather_regimes_v4.R @@ -0,0 +1,263 @@ + +library(s2dverification) # for the function Load() +library(abind) +source('/home/Earth/ncortesi/Downloads/scripts/Rfunctions.R') # for the calendar functions + +# available reanalysis for the geopotential (g500) or the geopotential height (z500 = g500/9.81): +ERAint <- list(path = '/esnas/reconstructions/ecmwf/eraint/$STORE_FREQ$_mean/$VAR_NAME$_f6h/$VAR_NAME$_$YEAR$$MONTH$.nc') +ERAint.name <- "ERA-Interim" +JRA55 <- list(path = '/esnas/reconstructions/jma/jra-55/$STORE_FREQ$_mean/$VAR_NAME$_f6h/$VAR_NAME$_$YEAR$$MONTH$.nc') +JRA55.name <- "JRA-55" + +workdir <- "/scratch/Earth/ncortesi/RESILIENCE/Regimes" # working dir with the input files with the WT classifications for each MSLP grid point +mapdir <- "/scratch/Earth/ncortesi/RESILIENCE/Regimes_maps" # output dir with seasonal and yearly maps + +rean <- ERAint # choose one of the two above reanalysis +rean.name <- ERAint.name + +psl <- "g500" # pressure variable to use from the chosen reanalysis +var.name <- "tas" #"sfcWind" # name of the 'predictand' variable of the chosen reanalysis + +year.start <- 1979 +year.end <- 2013 + +period = 13 # (winter) # 1-12: Jan-Dec; 13-16: Winter-Spring-Summer-Autumn; 17: Year + +# Coordinates of the box enclosing the study region (the Northern Atlantic in this case): +lat.max <- 70 +lat.min <- 30 +lon.max <- 40 +lon.min <- 280 # put a positive number here because the geopotential has only positive values of longitud! + +############################################################################################################################# + +# load only 1 day of geopot to detect the minimum and maximum lat and lon values to identify only the North Atlantic +domain <- Load(var = psl, exp = NULL, obs = list(rean), paste0(year.start,'0101'), storefreq = 'daily', leadtimemax = 1, output = 'lonlat', nprocs=1) +pos.lat.max <- which(abs(domain$lat-lat.max)==min(abs(domain$lat-lat.max))) +pos.lat.min <- which(abs(domain$lat-lat.min)==min(abs(domain$lat-lat.min))) +pos.lon.max <- which(abs(domain$lon-lon.max)==min(abs(domain$lon-lon.max))) +pos.lon.min <- which(abs(domain$lon-lon.min)==min(abs(domain$lon-lon.min))) +pos.lat <- if(pos.lat.min < pos.lat.max) {pos.lat.min:pos.lat.max} else { pos.lat.max:pos.lat.min} +pos.lon <- c(1:pos.lon.max,pos.lon.min:length(domain$lon)) # beware that it only consider the case where the study region cross the meridian of Greenwhich! +n.pos.lat <- length(pos.lat) +n.pos.lon <- length(pos.lon) +lat <- domain$lat[pos.lat] # lat of chosen area only (it is smaller than the whole spatial domain) +lon <- domain$lon[pos.lon] # lon of chosen area only + +z500euFull <-array(NA,c(n.days.in.a.yearly.period(year.start,year.end), n.pos.lat, n.pos.lon)) + +## for (y in year.start:year.end){ +## var <- Load(var = 'z500', exp = NULL, obs = list(rean), paste0(y,'0101'), storefreq = 'daily', leadtimemax = n.days.in.a.year(y), output = 'lonlat') +## z500eu <- var$obs[,,,,pos.lat,pos.lon] +## z500euFull[seq.days.in.a.future.year(year.start, y),,] <- z500eu +## rm(z500eu) +## gc() +## } + +if(domain$lat[pos.lat.min] >= domain$lat[pos.lat.max]) stop("lat.min cannot be higher than lat.max") + +for (y in year.start:year.end){ + var <- Load(var = psl, exp = NULL, obs = list(rean), paste0(y,'0101'), storefreq = 'daily', leadtimemax = n.days.in.a.year(y), output = 'lonlat', + latmin = domain$lat[pos.lat.min], latmax = domain$lat[pos.lat.max], lonmin = domain$lon[pos.lon.min], lonmax = domain$lon[pos.lon.max]) + z500euFull[seq.days.in.a.future.year(year.start, y),,] <- var$obs + rm(var) + gc() +} + + +# each time you want to change variable 'period' and/or 'var', set period to 13 and load the winter dataset where all input data is stored: +load(file=paste0(workdir,"/weather_regimes_",rean.name,"_",var.name,"_",year.start,"-",year.end,"_", my.period[period],".RData")) + +days.period <- NA +for(y in year.start:year.end) days.period <- c(days.period, n.days.in.a.future.year(year.start, y) + pos.period(y,period)) +days.period <- days.period[-1] # remove the NA at the begin that was introduced only to be able to execture the above command +n.days.period <- length(days.period) + +z500 <- z500euFull[days.period,,] # select only days in the chosen period (i.e: winter) + +z500mat <- z500 + +dim(z500mat) <- c(n.days.period, n.pos.lat*n.pos.lon) # convert array in a matrix! + +my.seq <- seq(1, n.pos.lat*n.pos.lon, 9) # select only 1 point of 9 + +z500cut <- z500mat[,my.seq] + +my.PCA <- princomp(z500cut,cor=FALSE) + +head(cumsum(my.PCA$sdev^2/sum(my.PCA$sdev^2)),20) # check how many PCAs to keep basing on the sum of their explained variance + +my.cluster <- kmeans(my.PCA$scores[,1:8], 4) # 4 is the number of clusters, 7 the number of EOFs which explains ~80% of variance + +rm(z500mat) +gc() + +# Load wind data: + +vareuFull <-array(NA,c(n.days.in.a.yearly.period(year.start,year.end),n.pos.lat,n.pos.lon)) + +## for (y in year.start:year.end){ +## var <- Load(var = var.name, exp = NULL, obs = list(rean), paste0(y,'0101'), storefreq = 'daily', leadtimemax = n.days.in.a.year(y), output = 'lonlat') +## vareu <- var$obs[,,,,pos.lat,pos.lon] +## vareuFull[seq.days.in.a.future.year(year.start, y),,] <- vareu +## } + +for (y in year.start:year.end){ + var <- Load(var = var.name, exp = NULL, obs = list(rean), paste0(y,'0101'), storefreq = 'daily', leadtimemax = n.days.in.a.year(y), output = 'lonlat', + latmin = domain$lat[pos.lat.min], latmax = domain$lat[pos.lat.max], lonmin = domain$lon[pos.lon.min], lonmax = domain$lon[pos.lon.max]) + vareuFull[seq.days.in.a.future.year(year.start, y),,] <- var$obs + rm(var) + gc() +} + +save.image(file=paste0(workdir,"/weather_regimes_",rean.name,"_",var.name,"_",year.start,"-",year.end,"_", my.period[period],".RData")) + +varPeriod <- vareuFull[days.period,,] # select only var data during the chosen period + +varPeriodClim <- apply(varPeriod,c(2,3),mean,na.rm=T) +varPeriodClim2 <- InsertDim(varPeriodClim, 1, n.days.period) + +varPeriodAnom <- varPeriod - varPeriodClim2 +rm(varPeriod, varPeriodClim2) +gc() + +wr1 <- which(my.cluster$cluster==1) +wr2 <- which(my.cluster$cluster==2) +wr3 <- which(my.cluster$cluster==3) +wr4 <- which(my.cluster$cluster==4) + +varPeriodAnom1 <- varPeriodAnom[wr1,,] +varPeriodAnom2 <- varPeriodAnom[wr2,,] +varPeriodAnom3 <- varPeriodAnom[wr3,,] +varPeriodAnom4 <- varPeriodAnom[wr4,,] + +varPeriodAnom1mean <- apply(varPeriodAnom1,c(2,3),mean,na.rm=T) +varPeriodAnom2mean <- apply(varPeriodAnom2,c(2,3),mean,na.rm=T) +varPeriodAnom3mean <- apply(varPeriodAnom3,c(2,3),mean,na.rm=T) +varPeriodAnom4mean <- apply(varPeriodAnom4,c(2,3),mean,na.rm=T) + +varPeriodAnomBoth1 <- abind(varPeriodAnom, varPeriodAnom1, along = 1) +varPeriodAnomBoth2 <- abind(varPeriodAnom, varPeriodAnom2, along = 1) +varPeriodAnomBoth3 <- abind(varPeriodAnom, varPeriodAnom3, along = 1) +varPeriodAnomBoth4 <- abind(varPeriodAnom, varPeriodAnom4, along = 1) + +pvalue1 <- apply(varPeriodAnomBoth1, c(2,3), function(x) t.test(x[1:n.days.period],x[(n.days.period+1):length(x)])$p.value) +pvalue2 <- apply(varPeriodAnomBoth2, c(2,3), function(x) t.test(x[1:n.days.period],x[(n.days.period+1):length(x)])$p.value) +pvalue3 <- apply(varPeriodAnomBoth3, c(2,3), function(x) t.test(x[1:n.days.period],x[(n.days.period+1):length(x)])$p.value) +pvalue4 <- apply(varPeriodAnomBoth4, c(2,3), function(x) t.test(x[1:n.days.period],x[(n.days.period+1):length(x)])$p.value) +rm(varPeriodAnom, varPeriodAnomBoth1, varPeriodAnomBoth2, varPeriodAnomBoth3, varPeriodAnomBoth4) +gc() + +z500wr1 <- z500[wr1,,] +z500wr2 <- z500[wr2,,] +z500wr3 <- z500[wr3,,] +z500wr4 <- z500[wr4,,] + +z500wr1mean <- apply(z500wr1,c(2,3),mean,na.rm=T) +z500wr2mean <- apply(z500wr2,c(2,3),mean,na.rm=T) +z500wr3mean <- apply(z500wr3,c(2,3),mean,na.rm=T) +z500wr4mean <- apply(z500wr4,c(2,3),mean,na.rm=T) + +# Mean z500 maps: + +my.brks <- c(48000, seq(48501,57100,1), 60000) # % Mean anomaly of a WR +my.brks2 <- c(48000, seq(48500,58000,500)) # % Mean anomaly of a WR +my.cols <- colorRampPalette((brewer.pal(9,"PuBu")))(length(my.brks)-1) # blue--white--red colors + +# First check each cluster to find to which regime it corrsponds: +PlotEquiMap(z500wr1mean,lon,lat,filled.continents = FALSE, brks=my.brks, cols=my.cols, contours=t(z500wr1mean), brks2=my.brks2) +PlotEquiMap(z500wr2mean,lon,lat,filled.continents = FALSE, brks=my.brks, cols=my.cols, contours=t(z500wr2mean), brks2=my.brks2) +PlotEquiMap(z500wr3mean,lon,lat,filled.continents = FALSE, brks=my.brks, cols=my.cols, contours=t(z500wr3mean), brks2=my.brks2) +PlotEquiMap(z500wr4mean,lon,lat,filled.continents = FALSE, brks=my.brks, cols=my.cols, contours=t(z500wr4mean), brks2=my.brks2) + +# Assign to each cluster the name of its regime: +regime1.name <- "Cluster #1" +regime2.name <- "Cluster #2" +regime3.name <- "Cluster #3" +regime4.name <- "Cluster #4" + +# Visualize and save average geopotential associated to each regime: +png(filename=paste0(mapdir,"/",rean.name,"_",var.name,"_",my.period[period],"_",regime1.name,".png"),width=1000,height=700) +layout(matrix(c(rep(1,9),2), 10, 1, byrow = TRUE)) +PlotEquiMap(z500wr1mean, lon, lat, filled.continents = FALSE, brks=my.brks, cols=my.cols, toptitle=regime1.name, contours=t(z500wr1mean), brks2=my.brks2, drawleg=F) +ColorBar(my.brks, cols=my.cols, vert=FALSE, subsampleg=100, cex=1.2) +dev.off() + +png(filename=paste0(mapdir,"/",rean.name,"_",var.name,"_",my.period[period],"_",regime2.name,".png"),width=1000,height=700) +layout(matrix(c(rep(1,9),2), 10, 1, byrow = TRUE)) +PlotEquiMap(z500wr2mean, lon, lat, filled.continents = FALSE, brks=my.brks, cols=my.cols, toptitle=regime2.name, contours=t(z500wr2mean), brks2=my.brks2, drawleg=F) +ColorBar(my.brks, cols=my.cols, vert=FALSE, subsampleg=100, cex=1.2) +dev.off() + +png(filename=paste0(mapdir,"/",rean.name,"_",var.name,"_",my.period[period],"_",regime3.name,".png"),width=1000,height=700) +layout(matrix(c(rep(1,9),2), 10, 1, byrow = TRUE)) +PlotEquiMap(z500wr3mean, lon, lat, filled.continents = FALSE, brks=my.brks, cols=my.cols, toptitle=regime3.name, contours=t(z500wr3mean), brks2=my.brks2, drawleg=F) +ColorBar(my.brks, cols=my.cols, vert=FALSE, subsampleg=100, cex=1.2) +dev.off() + +png(filename=paste0(mapdir,"/",rean.name,"_",var.name,"_",my.period[period],"_",regime4.name,".png"),width=1000,height=700) +layout(matrix(c(rep(1,9),2), 10, 1, byrow = TRUE)) +PlotEquiMap(z500wr4mean, lon, lat, filled.continents = FALSE, brks=my.brks, cols=my.cols, toptitle=regime4.name, contours=t(z500wr4mean), brks2=my.brks2, drawleg=F) +ColorBar(my.brks, cols=my.cols, vert=FALSE, subsampleg=100, cex=1.2) +dev.off() + + +# Visualize and save average wind anomalies associated to each regime: +my.brks.var <- c(seq(-10,-3,1),seq(-2.9,2.9,0.1),seq(3,10,1)) # % Mean anomaly of a WR +#my.cols <- colorRampPalette(as.character(read.csv("/home/Earth/vtorralb/rgbhex.csv",header=F)[,1]))(length(my.brks)-1) # blue--yellow-red colors +my.cols.var <- colorRampPalette(rev(brewer.pal(11,"RdBu")))(length(my.brks.var)-1) # blue--white--red colors + +png(filename=paste0(mapdir,"/",rean.name,"_",var.name,"_",my.period[period],"_",regime1.name,"_anomalies.png"),width=1000,height=700) +layout(matrix(c(rep(1,9),2), 10, 1, byrow = TRUE)) +PlotEquiMap(varPeriodAnom1mean, lon, lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, toptitle=paste(regime1.name, "Anomalies"), drawleg=F, dots=t(pvalue1 < 0.05)) +ColorBar(my.brks.var, cols=my.cols.var, vert=FALSE, subsampleg=1, cex=1.2) +dev.off() + +png(filename=paste0(mapdir,"/",rean.name,"_",var.name,"_", my.period[period],"_",regime2.name,"_anomalies.png"), width=1000, height=700) +layout(matrix(c(rep(1,9),2), 10, 1, byrow = TRUE)) +PlotEquiMap(varPeriodAnom2mean,lon,lat,filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, toptitle=paste(regime2.name, "Anomalies"), drawleg=F, dots=t(pvalue2 < 0.05)) +ColorBar(my.brks.var, cols=my.cols.var, vert=FALSE, subsampleg=1, cex=1.2) +dev.off() + +png(filename=paste0(mapdir,"/",rean.name,"_",var.name,"_", my.period[period],"_",regime3.name,"_anomalies.png"),width=1000,height=700) +layout(matrix(c(rep(1,9),2), 10, 1, byrow = TRUE)) +PlotEquiMap(varPeriodAnom3mean,lon,lat,filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, toptitle=paste(regime3.name, "Anomalies"), drawleg=F, dots=t(pvalue3 < 0.05)) +ColorBar(my.brks.var, cols=my.cols.var, vert=FALSE, subsampleg=1, cex=1.2) +dev.off() + +png(filename=paste0(mapdir,"/",rean.name,"_",var.name,"_", my.period[period],"_",regime4.name,"_anomalies.png"),width=1000,height=700) +layout(matrix(c(rep(1,9),2), 10, 1, byrow = TRUE)) +PlotEquiMap(varPeriodAnom4mean,lon,lat,filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, toptitle=paste(regime4.name, "Anomalies"), drawleg=F, dots=t(pvalue4 < 0.05)) +ColorBar(my.brks.var, cols=my.cols.var, vert=FALSE, subsampleg=1, cex=1.2) +dev.off() + + + + + +# save as .pdf instead of .png: +pdf(file=paste0(mapdir,"/",rean,"_",var.name,"_", my.period[period],"NAO-.pdf"),width=1000,height=700) +layout(matrix(c(rep(1,9),2), 10, 1, byrow = TRUE)) +PlotEquiMap(varPeriodAnom1mean,lon,lat,filled.continents = FALSE, brks=my.brks, cols=my.cols, toptitle="NAO-",drawleg=F) +ColorBar(my.brks, cols=my.cols, vert=FALSE, subsampleg=1, cex=1.2) +dev.off() + +pdf(file=paste0(mapdir,"/",rean,"_",var.name,"_", my.period[period],"Blocking.pdf"), width=1000, height=700) +layout(matrix(c(rep(1,9),2), 10, 1, byrow = TRUE)) +PlotEquiMap(varPeriodAnom2mean,lon,lat,filled.continents = FALSE, brks=my.brks, cols=my.cols, toptitle="Blocking", drawleg=F) +ColorBar(my.brks, cols=my.cols, vert=FALSE, subsampleg=1, cex=1.2) +dev.off() + +pdf(file=paste0(mapdir,"/",rean,"_",var.name,"_", my.period[period],"Atlantic_Ridge.pdf"),width=1000,height=700) +layout(matrix(c(rep(1,9),2), 10, 1, byrow = TRUE)) +PlotEquiMap(varPeriodAnom3mean,lon,lat,filled.continents = FALSE, brks=my.brks, cols=my.cols, toptitle="Atlantic Ridge", drawleg=F) +ColorBar(my.brks, cols=my.cols, vert=FALSE, subsampleg=1, cex=1.2) +dev.off() + +pdf(file=paste0(mapdir,"/",rean,"_",var.name,"_", my.period[period],"NAO+.pdf"),width=1000,height=700) +layout(matrix(c(rep(1,9),2), 10, 1, byrow = TRUE)) +PlotEquiMap(varPeriodAnom4mean,lon,lat,filled.continents = FALSE, brks=my.brks, cols=my.cols, toptitle="NAO+", drawleg=F) +ColorBar(my.brks, cols=my.cols, vert=FALSE, subsampleg=1, cex=1.2) +dev.off() + +rm(varPeriodeuFull,varPeriodeu,varPeriod,var) diff --git a/old/weather_regimes_v4.R~ b/old/weather_regimes_v4.R~ new file mode 100644 index 0000000000000000000000000000000000000000..d97c2d7744635635e8b817beeb580c32bdfa7707 --- /dev/null +++ b/old/weather_regimes_v4.R~ @@ -0,0 +1,262 @@ + +library(s2dverification) # for the function Load() +library(abind) +source('/home/Earth/ncortesi/Downloads/scripts/Rfunctions.R') # for the calendar functions + +# available reanalysis: +ERAint <- list(path = '/esnas/reconstructions/ecmwf/eraint/$STORE_FREQ$_mean/$VAR_NAME$_f6h/$VAR_NAME$_$YEAR$$MONTH$.nc') +ERAint.name <- "ERA-Interim" +JRA55 <- list(path = '/esnas/reconstructions/jma/jra-55/$STORE_FREQ$_mean/$VAR_NAME$_f6h/$VAR_NAME$_$YEAR$$MONTH$.nc') +JRA55.name <- "JRA-55" + +workdir <- "/scratch/Earth/ncortesi/RESILIENCE/Regimes" # working dir with the input files with the WT classifications for each MSLP grid point +mapdir <- "/scratch/Earth/ncortesi/RESILIENCE/Regimes_maps" # output dir with seasonal and yearly maps + +rean <- ERAint # choose one of the two above reanalysis +rean.name <- ERAint.name + +var.name <- "sfcWind" #"tas" #"sfcWind" # name of the 'predictand' variable of the chosen reanalysis + +year.start <- 1979 +year.end <- 2013 + +period = 15 # (winter) # 1-12: Jan-Dec; 13-16: Winter-Spring-Summer-Autumn; 17: Year + +# Coordinates of the box enclosing the study region (the Northern Atlantic in this case): +lat.max <- 70 +lat.min <- 30 +lon.max <- 40 +lon.min <- 280 # put a positive number here because Z500 has only positive values of longitud! + +############################################################################################################################# + +# load only 1 day of z500 to detect the minimum and maximum lat and lon values to identify only the North Atlantic +domain <- Load(var = 'z500', exp = NULL, obs = list(rean), paste0(year.start,'0101'), storefreq = 'daily', leadtimemax = 1, output = 'lonlat', nprocs=1) +pos.lat.max <- which(abs(domain$lat-lat.max)==min(abs(domain$lat-lat.max))) +pos.lat.min <- which(abs(domain$lat-lat.min)==min(abs(domain$lat-lat.min))) +pos.lon.max <- which(abs(domain$lon-lon.max)==min(abs(domain$lon-lon.max))) +pos.lon.min <- which(abs(domain$lon-lon.min)==min(abs(domain$lon-lon.min))) +pos.lat <- if(pos.lat.min < pos.lat.max) {pos.lat.min:pos.lat.max} else { pos.lat.max:pos.lat.min} +pos.lon <- c(1:pos.lon.max,pos.lon.min:length(domain$lon)) # beware that it only consider the case where the study region cross the meridian of Greenwhich! +n.pos.lat <- length(pos.lat) +n.pos.lon <- length(pos.lon) + +z500euFull <-array(NA,c(n.days.in.a.yearly.period(year.start,year.end), n.pos.lat, n.pos.lon)) + +## for (y in year.start:year.end){ +## var <- Load(var = 'z500', exp = NULL, obs = list(rean), paste0(y,'0101'), storefreq = 'daily', leadtimemax = n.days.in.a.year(y), output = 'lonlat') +## z500eu <- var$obs[,,,,pos.lat,pos.lon] +## z500euFull[seq.days.in.a.future.year(year.start, y),,] <- z500eu +## rm(z500eu) +## gc() +## } + +if(domain$lat[pos.lat.min] >= domain$lat[pos.lat.max]) stop("lat.min cannot be higher than lat.max") + +for (y in year.start:year.end){ + var <- Load(var = 'z500', exp = NULL, obs = list(rean), paste0(y,'0101'), storefreq = 'daily', leadtimemax = n.days.in.a.year(y), output = 'lonlat', + latmin = domain$lat[pos.lat.min], latmax = domain$lat[pos.lat.max], lonmin = domain$lon[pos.lon.min], lonmax = domain$lon[pos.lon.max]) + z500euFull[seq.days.in.a.future.year(year.start, y),,] <- var$obs + rm(var) + gc() +} + +lat <- var$lat[pos.lat] # lat of domain points only +lon <- var$lon[pos.lon] # lon of domain points only + +# each time you want to change variable 'period', set period to 13 and load the winter dataset where all input data is stored: +load(file=paste0(workdir,"/weather_regimes_",rean.name,"_",var.name,"_",year.start,"-",year.end,"_", my.period[period],".RData")) + +days.period <- NA +for(y in year.start:year.end) days.period <- c(days.period, n.days.in.a.future.year(year.start, y) + pos.period(y,period)) +days.period <- days.period[-1] # remove the NA at the begin that was introduced only to be able to execture the above command +n.days.period <- length(days.period) + +z500 <- z500euFull[days.period,,] # select only days in the chosen period (i.e: winter) + +z500mat <- z500 + +dim(z500mat) <- c(n.days.period, n.pos.lat*n.pos.lon) # convert array in a matrix! + +my.seq <- seq(1, n.pos.lat*n.pos.lon, 9) # select only 1 point of 9 + +z500cut <- z500mat[,my.seq] + +my.PCA <- princomp(z500cut,cor=FALSE) + +head(cumsum(my.PCA$sdev^2/sum(my.PCA$sdev^2)),20) # check how many PCAs to keep basing on the sum of their explained variance + +my.cluster <- kmeans(my.PCA$scores[,1:8], 4) # 4 is the number of clusters, 7 the number of EOFs which explains ~80% of variance + +rm(z500euFull, z500mat, var) +gc() + +# Load wind data: + +vareuFull <-array(NA,c(n.days.in.a.yearly.period(year.start,year.end),n.pos.lat,n.pos.lon)) + +## for (y in year.start:year.end){ +## var <- Load(var = var.name, exp = NULL, obs = list(rean), paste0(y,'0101'), storefreq = 'daily', leadtimemax = n.days.in.a.year(y), output = 'lonlat') +## vareu <- var$obs[,,,,pos.lat,pos.lon] +## vareuFull[seq.days.in.a.future.year(year.start, y),,] <- vareu +## } + +for (y in year.start:year.end){ + var <- Load(var = var.name, exp = NULL, obs = list(rean), paste0(y,'0101'), storefreq = 'daily', leadtimemax = n.days.in.a.year(y), output = 'lonlat', + latmin = domain$lat[pos.lat.min], latmax = domain$lat[pos.lat.max], lonmin = domain$lon[pos.lon.min], lonmax = domain$lon[pos.lon.max]) + vareuFull[seq.days.in.a.future.year(year.start, y),,] <- var$obs + rm(var) + gc() +} + +save.image(file=paste0(workdir,"/weather_regimes_",rean.name,"_",var.name,"_",year.start,"-",year.end,"_", my.period[period],".RData")) + +varPeriod <- vareuFull[days.period,,] # select only var data during the chosen period + +varPeriodClim <- apply(varPeriod,c(2,3),mean,na.rm=T) +varPeriodClim2 <- InsertDim(varPeriodClim, 1, n.days.period) + +varPeriodAnom <- varPeriod - varPeriodClim2 +rm(varPeriod, varPeriodClim2) +gc() + +wr1 <- which(my.cluster$cluster==1) +wr2 <- which(my.cluster$cluster==2) +wr3 <- which(my.cluster$cluster==3) +wr4 <- which(my.cluster$cluster==4) + +varPeriodAnom1 <- varPeriodAnom[wr1,,] +varPeriodAnom2 <- varPeriodAnom[wr2,,] +varPeriodAnom3 <- varPeriodAnom[wr3,,] +varPeriodAnom4 <- varPeriodAnom[wr4,,] + +varPeriodAnom1mean <- apply(varPeriodAnom1,c(2,3),mean,na.rm=T) +varPeriodAnom2mean <- apply(varPeriodAnom2,c(2,3),mean,na.rm=T) +varPeriodAnom3mean <- apply(varPeriodAnom3,c(2,3),mean,na.rm=T) +varPeriodAnom4mean <- apply(varPeriodAnom4,c(2,3),mean,na.rm=T) + +varPeriodAnomBoth1 <- abind(varPeriodAnom, varPeriodAnom1, along = 1) +varPeriodAnomBoth2 <- abind(varPeriodAnom, varPeriodAnom2, along = 1) +varPeriodAnomBoth3 <- abind(varPeriodAnom, varPeriodAnom3, along = 1) +varPeriodAnomBoth4 <- abind(varPeriodAnom, varPeriodAnom4, along = 1) + +pvalue1 <- apply(varPeriodAnomBoth1, c(2,3), function(x) t.test(x[1:n.days.period],x[(n.days.period+1):length(x)])$p.value) +pvalue2 <- apply(varPeriodAnomBoth2, c(2,3), function(x) t.test(x[1:n.days.period],x[(n.days.period+1):length(x)])$p.value) +pvalue3 <- apply(varPeriodAnomBoth3, c(2,3), function(x) t.test(x[1:n.days.period],x[(n.days.period+1):length(x)])$p.value) +pvalue4 <- apply(varPeriodAnomBoth4, c(2,3), function(x) t.test(x[1:n.days.period],x[(n.days.period+1):length(x)])$p.value) +rm(varPeriodAnom, varPeriodAnomBoth1, varPeriodAnomBoth2, varPeriodAnomBoth3, varPeriodAnomBoth4) +gc() + +z500wr1 <- z500[wr1,,] +z500wr2 <- z500[wr2,,] +z500wr3 <- z500[wr3,,] +z500wr4 <- z500[wr4,,] + +z500wr1mean <- apply(z500wr1,c(2,3),mean,na.rm=T) +z500wr2mean <- apply(z500wr2,c(2,3),mean,na.rm=T) +z500wr3mean <- apply(z500wr3,c(2,3),mean,na.rm=T) +z500wr4mean <- apply(z500wr4,c(2,3),mean,na.rm=T) + +# Mean z500 maps: + +my.brks <- c(48000, seq(48501,57100,1), 60000) # % Mean anomaly of a WR +my.brks2 <- c(48000, seq(48500,58000,500)) # % Mean anomaly of a WR +my.cols <- colorRampPalette((brewer.pal(9,"PuBu")))(length(my.brks)-1) # blue--white--red colors + +# First check each cluster to find to which regime it corrsponds: +PlotEquiMap(z500wr1mean,lon,lat,filled.continents = FALSE, brks=my.brks, cols=my.cols, contours=t(z500wr1mean), brks2=my.brks2) +PlotEquiMap(z500wr2mean,lon,lat,filled.continents = FALSE, brks=my.brks, cols=my.cols, contours=t(z500wr2mean), brks2=my.brks2) +PlotEquiMap(z500wr3mean,lon,lat,filled.continents = FALSE, brks=my.brks, cols=my.cols, contours=t(z500wr3mean), brks2=my.brks2) +PlotEquiMap(z500wr4mean,lon,lat,filled.continents = FALSE, brks=my.brks, cols=my.cols, contours=t(z500wr4mean), brks2=my.brks2) + +# Assign to each cluster the name of its regime: +regime1.name <- "Cluster #1" +regime2.name <- "Cluster #2" +regime3.name <- "Cluster #3" +regime4.name <- "Cluster #4" + +# Visualize and save average geopotential associated to each regime: +png(filename=paste0(mapdir,"/",rean.name,"_",var.name,"_",my.period[period],"_",regime1.name,".png"),width=1000,height=700) +layout(matrix(c(rep(1,9),2), 10, 1, byrow = TRUE)) +PlotEquiMap(z500wr1mean, lon, lat, filled.continents = FALSE, brks=my.brks, cols=my.cols, toptitle=regime1.name, contours=t(z500wr1mean), brks2=my.brks2, drawleg=F) +ColorBar(my.brks, cols=my.cols, vert=FALSE, subsampleg=100, cex=1.2) +dev.off() + +png(filename=paste0(mapdir,"/",rean.name,"_",var.name,"_",my.period[period],"_",regime2.name,".png"),width=1000,height=700) +layout(matrix(c(rep(1,9),2), 10, 1, byrow = TRUE)) +PlotEquiMap(z500wr2mean, lon, lat, filled.continents = FALSE, brks=my.brks, cols=my.cols, toptitle=regime2.name, contours=t(z500wr2mean), brks2=my.brks2, drawleg=F) +ColorBar(my.brks, cols=my.cols, vert=FALSE, subsampleg=100, cex=1.2) +dev.off() + +png(filename=paste0(mapdir,"/",rean.name,"_",var.name,"_",my.period[period],"_",regime3.name,".png"),width=1000,height=700) +layout(matrix(c(rep(1,9),2), 10, 1, byrow = TRUE)) +PlotEquiMap(z500wr3mean, lon, lat, filled.continents = FALSE, brks=my.brks, cols=my.cols, toptitle=regime3.name, contours=t(z500wr3mean), brks2=my.brks2, drawleg=F) +ColorBar(my.brks, cols=my.cols, vert=FALSE, subsampleg=100, cex=1.2) +dev.off() + +png(filename=paste0(mapdir,"/",rean.name,"_",var.name,"_",my.period[period],"_",regime4.name,".png"),width=1000,height=700) +layout(matrix(c(rep(1,9),2), 10, 1, byrow = TRUE)) +PlotEquiMap(z500wr4mean, lon, lat, filled.continents = FALSE, brks=my.brks, cols=my.cols, toptitle=regime4.name, contours=t(z500wr4mean), brks2=my.brks2, drawleg=F) +ColorBar(my.brks, cols=my.cols, vert=FALSE, subsampleg=100, cex=1.2) +dev.off() + + +# Visualize and save average wind anomalies associated to each regime: +my.brks.var <- c(seq(-10,-3,1),seq(-2.9,2.9,0.1),seq(3,10,1)) # % Mean anomaly of a WR +#my.cols <- colorRampPalette(as.character(read.csv("/home/Earth/vtorralb/rgbhex.csv",header=F)[,1]))(length(my.brks)-1) # blue--yellow-red colors +my.cols.var <- colorRampPalette(rev(brewer.pal(11,"RdBu")))(length(my.brks.var)-1) # blue--white--red colors + +png(filename=paste0(mapdir,"/",rean.name,"_",var.name,"_",my.period[period],"_",regime1.name,"_anomalies.png"),width=1000,height=700) +layout(matrix(c(rep(1,9),2), 10, 1, byrow = TRUE)) +PlotEquiMap(varPeriodAnom1mean, lon, lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, toptitle=paste(regime1.name, "Anomalies"), drawleg=F, dots=t(pvalue1 < 0.05)) +ColorBar(my.brks.var, cols=my.cols.var, vert=FALSE, subsampleg=1, cex=1.2) +dev.off() + +png(filename=paste0(mapdir,"/",rean.name,"_",var.name,"_", my.period[period],"_",regime2.name,"_anomalies.png"), width=1000, height=700) +layout(matrix(c(rep(1,9),2), 10, 1, byrow = TRUE)) +PlotEquiMap(varPeriodAnom2mean,lon,lat,filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, toptitle=paste(regime2.name, "Anomalies"), drawleg=F, dots=t(pvalue2 < 0.05)) +ColorBar(my.brks.var, cols=my.cols.var, vert=FALSE, subsampleg=1, cex=1.2) +dev.off() + +png(filename=paste0(mapdir,"/",rean.name,"_",var.name,"_", my.period[period],"_",regime3.name,"_anomalies.png"),width=1000,height=700) +layout(matrix(c(rep(1,9),2), 10, 1, byrow = TRUE)) +PlotEquiMap(varPeriodAnom3mean,lon,lat,filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, toptitle=paste(regime3.name, "Anomalies"), drawleg=F, dots=t(pvalue3 < 0.05)) +ColorBar(my.brks.var, cols=my.cols.var, vert=FALSE, subsampleg=1, cex=1.2) +dev.off() + +png(filename=paste0(mapdir,"/",rean.name,"_",var.name,"_", my.period[period],"_",regime4.name,"_anomalies.png"),width=1000,height=700) +layout(matrix(c(rep(1,9),2), 10, 1, byrow = TRUE)) +PlotEquiMap(varPeriodAnom4mean,lon,lat,filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, toptitle=paste(regime4.name, "Anomalies"), drawleg=F, dots=t(pvalue4 < 0.05)) +ColorBar(my.brks.var, cols=my.cols.var, vert=FALSE, subsampleg=1, cex=1.2) +dev.off() + + + + + +# save as .pdf instead of .png: +pdf(file=paste0(mapdir,"/",rean,"_",var.name,"_", my.period[period],"NAO-.pdf"),width=1000,height=700) +layout(matrix(c(rep(1,9),2), 10, 1, byrow = TRUE)) +PlotEquiMap(varPeriodAnom1mean,lon,lat,filled.continents = FALSE, brks=my.brks, cols=my.cols, toptitle="NAO-",drawleg=F) +ColorBar(my.brks, cols=my.cols, vert=FALSE, subsampleg=1, cex=1.2) +dev.off() + +pdf(file=paste0(mapdir,"/",rean,"_",var.name,"_", my.period[period],"Blocking.pdf"), width=1000, height=700) +layout(matrix(c(rep(1,9),2), 10, 1, byrow = TRUE)) +PlotEquiMap(varPeriodAnom2mean,lon,lat,filled.continents = FALSE, brks=my.brks, cols=my.cols, toptitle="Blocking", drawleg=F) +ColorBar(my.brks, cols=my.cols, vert=FALSE, subsampleg=1, cex=1.2) +dev.off() + +pdf(file=paste0(mapdir,"/",rean,"_",var.name,"_", my.period[period],"Atlantic_Ridge.pdf"),width=1000,height=700) +layout(matrix(c(rep(1,9),2), 10, 1, byrow = TRUE)) +PlotEquiMap(varPeriodAnom3mean,lon,lat,filled.continents = FALSE, brks=my.brks, cols=my.cols, toptitle="Atlantic Ridge", drawleg=F) +ColorBar(my.brks, cols=my.cols, vert=FALSE, subsampleg=1, cex=1.2) +dev.off() + +pdf(file=paste0(mapdir,"/",rean,"_",var.name,"_", my.period[period],"NAO+.pdf"),width=1000,height=700) +layout(matrix(c(rep(1,9),2), 10, 1, byrow = TRUE)) +PlotEquiMap(varPeriodAnom4mean,lon,lat,filled.continents = FALSE, brks=my.brks, cols=my.cols, toptitle="NAO+", drawleg=F) +ColorBar(my.brks, cols=my.cols, vert=FALSE, subsampleg=1, cex=1.2) +dev.off() + +rm(varPeriodeuFull,varPeriodeu,varPeriod,var) diff --git a/old/weather_regimes_v40.R b/old/weather_regimes_v40.R new file mode 100644 index 0000000000000000000000000000000000000000..d5226cecfa1096e61107f985be747a3732e10737 --- /dev/null +++ b/old/weather_regimes_v40.R @@ -0,0 +1,1090 @@ + +# Creation: 6/2016 +# Author: Nicola Cortesi +# Aim: To compute the four Euro-Atlantic weather regimes from a reanalysis or from a forecast system +# +# I/O: input must be in a format compatible with Load(); 1 output .RData file is created in the 'workdir' folder. +# You can also run this script from terminal with: Rscript ~/scripts/weather_regimes_maps.R +# +# Assumption: its output are needed by the scripts weather_regimes_impact.R and weather_regimes.R +# +# Branch: weather_regimes + +rm(list=ls()) +library(s2dverification) # for the function Load() +library(abind) +library(reshape2) # after each update of s2dverification, it's better to remove and install this package again because sometimes it gets broken! + +SMP <- FALSE # if TRUE, the script is assumed to run on the SMP Machine + +if(SMP){ + source('/gpfs/projects/bsc32/bsc32842/Rfunctions.R') + workdir <- "/gpfs/projects/bsc32/bsc32842/RESILIENCE" # working dir where output files will be generated +} else { + source('/home/Earth/ncortesi/scripts/Rfunctions.R') + workdir <- "/scratch/Earth/ncortesi/RESILIENCE/Regimes" # working dir where output files will be generated +} + +# module load NCO # : load it from the terminal before running this script interactively in the SMP machine, if you didn't load it in the .bashrc + +# available reanalysis for the geopotential (g500) or the geopotential height (z500 = g500/9.81) and for var data: +ERAint <- '/esnas/recon/ecmwf/erainterim/$STORE_FREQ$_mean/$VAR_NAME$_f6h/$VAR_NAME$_$YEAR$$MONTH$.nc' +#ERAint <- '/esarchive/old-files/recon_ecmwf_erainterim/$STORE_FREQ$_mean/$VAR_NAME$_f6h/$VAR_NAME$_$YEAR$$MONTH$.nc' +ERAint <- '/esnas/recon/ecmwf/erainterim/6hourly/$VAR_NAME$/$VAR_NAME$_$YEAR$$MONTH$.nc' # subdaily data!!!!! +ERAint.name <- "ERA-Interim" + +JRA55 <- '/esnas/recon/jma/jra55/$STORE_FREQ$_mean/$VAR_NAME$_f6h/$VAR_NAME$_$YEAR$$MONTH$.nc' +JRA55.name <- "JRA-55" + +NCEP <- '/esnas/recon/noaa/ncep-reanalysis/$STORE_FREQ$_mean/$VAR_NAME$_f6h/$VAR_NAME$_$YEAR$$MONTH$.nc' +NCEP.name <- "NCEP" + +rean <- ERAint #JRA55 #ERAint #NCEP # choose one of the two above reanalysis from where to load the input psl data + +# available forecast systems for the psl and var data: +#ECMWF_S4 <- list(path = paste0('/esnas/exp/ECMWF/seasonal/0001/s004/m001/$STORE_FREQ$_mean/$VAR_NAME$_f6h/$VAR_NAME$_$START_DATE$.nc')) +#ECMWF_S4 <- '/esnas/exp/ECMWF/seasonal/0001/s004/m001/$STORE_FREQ$_mean/psl_f6h/psl_$START_DATE$.nc' +ECMWF_S4 <- '/esnas/exp/ecmwf/system4_m1/$STORE_FREQ$_mean/$VAR_NAME$_f6h/$VAR_NAME$_$START_DATE$.nc' +ECMWF_S4.name <- "ECMWF-S4" +member.name <- "ensemble" # name of the dimension with the ensemble members inside the netCDF files of S4 + +ECMWF_monthly <- '/esnas/exp/ECMWF/monthly/ensforhc/6hourly/$VAR_NAME$/2014$MONTH$$DAY$00/$VAR_NAME$_$START_DATE$00.nc' +ECMWF_monthly.name <- "ECMWF-Monthly" + +# choose a forecast system: +forecast <- ECMWF_S4 + +# put 'rean' to load pressure fields and var from reanalisis, or put 'forecast' to load them from forecast systems: +fields <- rean #forecast + +psl <- "psl" #"g500" # pressure variable to use from the chosen reanalysis +psl.name <- "SLP" # "Z500" # the name to show in the maps + +year.start <- 1981 #1979 #1981 #1982 #1981 #1994 #1979 #1981 +year.end <- 2016 #2016 #2013 #2015 + +missing.forecasts <- FALSE # set it to TRUE if there can be missing hindcasts files in the forecast psl data, so it will load only the available years and deals with NA + +LOESS <- TRUE # To apply the LOESS filter or not to the climatology +running.cluster <- TRUE # add to the clustering also the daily SLP data of the two closer months to the month to use (only for monthly analysis) +lat.weighting <- TRUE # set it to true to weight psl data on latitude before applying the cluster analysis +subdaily <- TRUE # id TRUE, compute the clustering using 6-hourly data instead of daily data, to be more robust (only for reanalysis with 6-hourly data avail.) +wind_anomalies <- FALSE # if TRUE, also save maps with with monthly wind spped anomalies for a chosen year over Europe + +sequences <-FALSE # set it to TRUE if you want to select only days beloning to sequences of at least 5 consecutive days belonging to the same regime. +PCA <- FALSE # set it to TRUE if you want to apply the PCA before the k-means cluster analysis, FALSE otherwise +variance.explained <- 0.8 # the user can set here the minimum % of variance explained by the PCs (typically 0.8 which is equal to 80%) + +# Coordinates of the box enclosing the study region (the Northern Atlantic in this case): +lat.max <- 81 #81 #80 #70 +lat.min <- 27 #30 #20 #30 +lon.max <- 45 #36 #30 #40 +lon.min <- 274.5 #274.5 #270 #280 # put a positive number here because the geopotential has only positive vaues of longitud! + +# Only for reanalysis: +WR.period <- 1:12 # 13:16 #1:12 #13:16 # 1-12: Jan-Dec; 13-16: Winter-Spring-Summer-Autumn [bypassed by the optional argument of the script] + +# Only for Seasonal forecasts: +startM <- 1 # start month (1=January, 12=December) [bypassed by the optional arguments of the script] +leadM <- 0 # select only the data of one lead month: [bypassed by the optional arguments of the script] +n.members <- 15 # number of members of the forecast system to load +n.leadtimes <- 216 # number of leadtimes of the forecast system to load + +res <- 0.75 # set the resolution you want to interpolate the seasonal psl and var data when loading it (i.e: set to 0.75 to use the same res of ERA-Interim) + # interpolation method is BILINEAR. + +# Only for Monthly forecasts: +#leadtime <- 1:7 #5:11 #1 # if fields=forecast, set the forecast time (in days) you want to compute the weather regimes. +#startdate.shift <- 1 # if leadtime=5:11, it's better to shift all startdates of the season 1 week in the past, to fit the exact season of obs.data + # if leadtime=12:18, it's better to shift all startdates of the season 2 weeks in the past, and so on. +#forecast.year <- year.end+1 # if fields=forecast, set the forecast year (it is usually the year after year.end) +#mes=1 # if fields=forecast, set the month of the first startdate of the forecast year +#day=2 # if fields=forecast, set the day of the first startdate of the forecast year + +############################################################################################################################# +rean.name <- unname(ifelse(rean == ERAint, ERAint.name, ifelse(rean == JRA55, JRA55.name, NCEP.name))) + +forecast.name <- unname(ifelse(forecast == ECMWF_S4, ECMWF_S4.name, ECMWF_monthly.name)) +fields.name <- unname(ifelse(fields == rean, rean.name, forecast.name)) +n.years <- year.end - year.start + 1 + +# in case the script is run with one argument, it is assumed a reanalysis is being used and the argument becomes the month we want to perform the clustering; +# in case the script is run with two arguments, it is assumed a forecast is used and the first argument represents the startmonth and the second one the leadmonth. +# in case the script is run with no arguments, the values of the variables inside the script are used: +script.arg <- as.integer(commandArgs(TRUE)) + +if(length(script.arg) == 0){ + start.month <- startM + #WR.period <- start.month + if(fields.name == forecast.name) lead.month <- leadM +} + +# in case the script is run with 1 argument, it is assumed you are using a reanalysis: +if(length(script.arg) == 1){ + fields <- rean + fields.name <- rean.name + WR.period <- script.arg[1] +} + +if(length(script.arg) >= 2){ + fields <- forecast + fields.name <- forecast.name + start.month <- script.arg[1] + lead.month <- script.arg[2] + WR.period <- start.month +} + +if(length(script.arg) == 3){ # in this case, we are running the script in the SMP machine and we need to override the variables below: + source('/gpfs/projects/bsc32/bsc32842/Rfunctions.R') # for the calendar function + workdir <- "/gpfs/projects/bsc32/bsc32842/RESILIENCE" # working dir +} + +if(length(script.arg) >= 2) {lead.comment <- paste0("Lead month: ", lead.month)} else {lead.comment <- ""} +cat(paste0("Field: ", fields.name, " Period: ", WR.period, " ", lead.comment, "\n")) + +if(lat.min >= lat.max) stop("lat.min cannot be greater than lat.max") + +#days.period <- n.days.period <- period.length <- list() +#for (pp in 1:17){ +# days.period[[pp]] <- NA +# for(y in year.start:year.end) days.period[[pp]] <- c(days.period[[pp]], n.days.in.a.future.year(year.start, y) + pos.period(y,pp)) +# days.period[[pp]] <- days.period[[pp]][-1] # remove the NA at the begin that was introduced only to be able to execute the above command +# # number of days belonging to that period from year.start to year.end: +# n.days.period[[pp]] <- length(days.period[[pp]]) +# # Number of days belonging to that period in a single year: +# period.length[[pp]] <- n.days.in.a.period(pp,1999) #+ ifelse(period==13 | period==2, 1, 0) # using year 1999 introduce a small error in winter season length because of #bisestile years +#} + +# Create a regular lat/lon grid to interpolate the data to the chosen res: (Notice that the regular grid is always centered at lat=0 and lon=0!) +n.lon.grid <- 360/res +n.lat.grid <- (180/res)+1 +my.grid <- paste0('r',n.lon.grid,'x',n.lat.grid) +#domain<-list() +#domain$lon <- seq(0,359.999999999,res) +#domain$lat <- c(rev(seq(0,90,res)),seq(res[1],-90,-res)) + +cat("Loading lat/lon. Please wait...\n") +# load only 1 day of pressure data to detect the minimum and maximum lat and lon values which identify only the North Atlantic area: +if(fields.name == rean.name) domain <- Load(var = psl, exp = NULL, obs = list(list(path=fields)), '19990101', storefreq = 'daily', leadtimemax = 1, output = 'lonlat', nprocs=1) + +### Real test: +### lat <- +### domain <- Load(var = "psl", exp = list(list(path="/esnas/exp/ecmwf/system4_m1/$STORE_FREQ$_mean/$VAR_NAME$_f6h/$VAR_NAME$_$START_DATE$.nc")), obs=NULL, sdates=paste0(1982:2011,'0101'), storefreq = 'daily', leadtimemax=n.leadtimes, nmember=n.members, output = 'lonlat', latmin = lat[chunk], latmax = lat[chunk+2], nprocs=1) + +### if (fields.name="pippo"){ + +# in the case of S4, we also interpolate it to the same resolution of the reanalysis: +# (notice that if the S4 grid has the same grid of the chosen grid (typically ERA-Interim), Load() leaves the S4 grid unchanged) +if(fields.name == ECMWF_S4.name) domain <- Load(var = psl, exp = list(list(path=fields)), obs=NULL, sdates=paste0(year.start,'0101'), storefreq = 'daily', dimnames=list(member=member.name), leadtimemax=1, nmember=1, output = 'lonlat', grid=my.grid, method='bilinear', nprocs=1) + +#domain <- Load(var = "psl", exp = list(list(path="/esnas/exp/ecmwf/system4_m1/$STORE_FREQ$_mean/$VAR_NAME$_f6h/$VAR_NAME$_$START_DATE$.nc")), obs=NULL, sdates='19820101', storefreq = 'daily', leadtimemax=1, nmember=1, output = 'lonlat', grid='r480x241', lonmax=100) + +if(fields.name == ECMWF_monthly.name) domain <- Load(var = psl, exp = NULL, obs = list(list(path=fields)), paste0(year.start,'0102'), storefreq = 'daily', leadtimemax = 1, output = 'lonlat') + +n.lon <- length(domain$lon) +n.lat <- length(domain$lat) + +pos.lat.max <- which(lat.max - round(domain$lat,4) >= 0)[1] # select the first latitude of the domain below the maximum latitude +pos.lat.min <- tail(which(round(domain$lat,4) - lat.min >= 0),1) # select the last latitude of the domain over the minumum latitude +pos.lon.max <- tail(which(lon.max - round(domain$lon,4) >= 0),1) +pos.lon.min <- which(round(domain$lon,4) - lon.min >= 0)[1] + +pos.lat <- if(pos.lat.min < pos.lat.max) {pos.lat.min:pos.lat.max} else {pos.lat.max:pos.lat.min} +pos.lon <- c(1:pos.lon.max,pos.lon.min:length(domain$lon)) # beware that it only consider the case where the study area cross the meridian of Greenwhich! +n.pos.lat <- length(pos.lat) +n.pos.lon <- length(pos.lon) + +lat <- domain$lat[pos.lat] # lat values of chosen area only (it is smaller than the whole spatial domain loaded by the data) +#if (lat[2] < lat[1]) lat <- rev(lat) # reverse lat values if they are in decreasing order +lon <- domain$lon[pos.lon] # lon values of chosen area only + +#lat.min.area <- domain$lat[pos.lat.min] # min and max lat/lon of the chosen area only (same as lat.max, but its values correspond to actual values in the lat vector) +#lat.max.area <- domain$lat[pos.lat.max] +#lon.min.area <- domain$lon[pos.lon.min] +#lon.max.area <- domain$lon[pos.lon.max] + +#rm(domain) + +# Load psl data (interpolating it to the same reanlaysis grid, if necessary): +cat("Loading data. Please wait...\n") + +if(fields.name == rean.name && subdaily == FALSE){ # Load daily psl data of all years in the reanalysis case: + psleuFull <-array(NA,c(1,1,n.years,365, n.pos.lat, n.pos.lon)) + + psleuFull366 <- Load(var = psl, exp = NULL, obs = list(list(path=fields)), paste0(year.start:year.end,'0101'), storefreq = 'daily', leadtimemax = 366, output = 'lonlat', latmin = lat.min, latmax = lat.max, lonmin = lon.min, lonmax = lon.max, nprocs=8)$obs + + # remove bisestile days to have all arrays with the same dimensions: + cat("Removing bisestile days. Please wait...\n") + psleuFull[,,,,,] <- psleuFull366[,,,1:365,,] + + for(y in year.start:year.end){ + y2 <- y - year.start + 1 + if(n.days.in.a.year(y) == 366) psleuFull[,,y2,60:365,,] <- psleuFull366[,,y2,61:366,,] # take the march to december period removing the 29th of February + } + + rm(psleuFull366) + gc() + + + if(running.cluster == TRUE && rean.name == "ERA-interim" && WR.period == 10) { + # in this case, last month of last year might not have in /esnas the data for the 3rd month of the running cluster. Load() set these values to NA, + # but the clustering analysis is not able to process NA, so we have to remove the NA for that month and replace with the values of the previous year: + + psleuFull[,,36,305:334,,] <- psleuFull[,,35,305:334,,] + + + } + + # convert psl in daily anomalies with the LOESS filter: + cat("Calculating anomalies. Please wait...\n") + pslPeriodClim <- apply(psleuFull, c(1,2,4,5,6), mean, na.rm=T) + + if(LOESS == TRUE){ + pslPeriodClimLoess <- pslPeriodClim + + for(i in n.pos.lat){ + for(j in n.pos.lon){ + my.data <- data.frame(ens.mean=pslPeriodClim[1,1,,i,j], day=1:365) + my.loess <- loess(ens.mean ~ day, my.data, span=0.35) + pslPeriodClimLoess[1,1,,i,j] <- predict(my.loess) + } + } + + rm(pslPeriodClim) + gc() + + pslPeriodClim2 <- InsertDim(pslPeriodClimLoess, 3, n.years) + + } else { # leave the daily climatology to compute the anomalies: + pslPeriodClim2 <- InsertDim(pslPeriodClim, 3, n.years) + + rm(pslPeriodClim) + gc() + } + + ## s4.data <- data.frame(s4=pslPeriodClim[1,], day=1:216) + ## s4.loess <- loess(s4 ~ day, s4.data, span=0.35) + ## s4.pred <- predict(s4.loess) + + psleuFull <- psleuFull - pslPeriodClim2 + rm(pslPeriodClim2) + gc() + +} # close if on fields.name == rean and subdaily == FALSE){ + + +# Load 6-hourly psl data of all years in the reanalysis case: +if(fields.name == rean.name && subdaily == TRUE){ + + #sdates <- as.vector(sapply(year.start:year.end, function(x) paste0(x, sprintf("%02d", 1:12), '01'))) + #my.exp <- list(path=fields) + #psleuFull366 <- Load(var = psl, exp = list(my.exp), NULL, sdates, output = 'lonlat', latmin = lat.min, latmax = lat.max, lonmin = lon.min, lonmax = lon.max)$obs + #dim(data$obs) <- c(dim(data$obs)[1:2], 1, dim(data$obs)[3]*dim(data$obs)[4], dim(data$obs)[5:6]) + + my.exp <- list(path=fields) + + # Load January data: + psleu1 <- Load(var = psl, exp = list(list(path=fields)), obs = NULL, sdates = paste0(year.start:year.end,'0101'), storefreq = 'daily', output = 'lonlat', latmin = lat.min, latmax = lat.max, lonmin = lon.min, lonmax = lon.max)$mod + str(psleuFull366$mod) + + # Load february data (it automatically discards the 29th of February): + psleu2 <- Load(var = psl, exp = list(list(path=fields)), obs = NULL, sdates = paste0(year.start:year.end,'0201'), storefreq = 'daily', output = 'lonlat', latmin = lat.min, latmax = lat.max, lonmin = lon.min, lonmax = lon.max)$mod + + # Load March data: + psleu3 <- Load(var = psl, exp = list(list(path=fields)), obs = NULL, sdates = paste0(year.start:year.end,'0301'), storefreq = 'daily', output = 'lonlat', latmin = lat.min, latmax = lat.max, lonmin = lon.min, lonmax = lon.max)$mod + psleu4 <- Load(var = psl, exp = list(list(path=fields)), obs = NULL, sdates = paste0(year.start:year.end,'0401'), storefreq = 'daily', output = 'lonlat', latmin = lat.min, latmax = lat.max, lonmin = lon.min, lonmax = lon.max)$mod + psleu5 <- Load(var = psl, exp = list(list(path=fields)), obs = NULL, sdates = paste0(year.start:year.end,'0501'), storefreq = 'daily', output = 'lonlat', latmin = lat.min, latmax = lat.max, lonmin = lon.min, lonmax = lon.max)$mod + psleu6 <- Load(var = psl, exp = list(list(path=fields)), obs = NULL, sdates = paste0(year.start:year.end,'0601'), storefreq = 'daily', output = 'lonlat', latmin = lat.min, latmax = lat.max, lonmin = lon.min, lonmax = lon.max)$mod + psleu7 <- Load(var = psl, exp = list(list(path=fields)), obs = NULL, sdates = paste0(year.start:year.end,'0701'), storefreq = 'daily', output = 'lonlat', latmin = lat.min, latmax = lat.max, lonmin = lon.min, lonmax = lon.max)$mod + psleu8 <- Load(var = psl, exp = list(list(path=fields)), obs = NULL, sdates = paste0(year.start:year.end,'0801'), storefreq = 'daily', output = 'lonlat', latmin = lat.min, latmax = lat.max, lonmin = lon.min, lonmax = lon.max)$mod + psleu9 <- Load(var = psl, exp = list(list(path=fields)), obs = NULL, sdates = paste0(year.start:year.end,'0901'), storefreq = 'daily', output = 'lonlat', latmin = lat.min, latmax = lat.max, lonmin = lon.min, lonmax = lon.max)$mod + psleu10 <- Load(var = psl, exp = list(list(path=fields)), obs = NULL, sdates = paste0(year.start:year.end,'1001'), storefreq = 'daily', output = 'lonlat', latmin = lat.min, latmax = lat.max, lonmin = lon.min, lonmax = lon.max)$mod + psleu11 <- Load(var = psl, exp = list(list(path=fields)), obs = NULL, sdates = paste0(year.start:year.end,'1101'), storefreq = 'daily', output = 'lonlat', latmin = lat.min, latmax = lat.max, lonmin = lon.min, lonmax = lon.max)$mod + psleu12 <- Load(var = psl, exp = list(list(path=fields)), obs = NULL, sdates = paste0(year.start:year.end,'1201'), storefreq = 'daily', output = 'lonlat', latmin = lat.min, latmax = lat.max, lonmin = lon.min, lonmax = lon.max)$mod + + psleuFull <- abind(psleu1, psleu2, psleu3, psleu4, psleu5, psleu6, psleu7, psleu8, psleu9, psleu10, psleu11, psleu12, along=4) + rm(psleu1, psleu2, psleu3, psleu4, psleu5, psleu6, psleu7, psleu8, psleu9, psleu10, psleu11, psleu12, along=4) + gc() + + if(running.cluster == TRUE && rean.name == "ERA-interim" && WR.period == 10) { + # in this case, last month of last year might not have in /esnas the data for the 3rd month of the running cluster. Load() set these values to NA, + # but the clustering analysis is not able to process NA, so we have to remove the NA for that month and replace with the values of the previous year: + + psleuFull[,,36,305:334,,] <- psleuFull[,,35,305:334,,] + + + } + + # convert psl in daily anomalies with the LOESS filter: + cat("Calculating anomalies. Please wait...\n") + pslPeriodClim <- apply(psleuFull, c(1,2,4,5,6), mean, na.rm=T) + + if(LOESS == TRUE){ + pslPeriodClimLoess <- pslPeriodClim + + for(i in n.pos.lat){ + for(j in n.pos.lon){ + my.data <- data.frame(ens.mean=pslPeriodClim[1,1,,i,j], day=1:365) + my.loess <- loess(ens.mean ~ day, my.data, span=0.35) + pslPeriodClimLoess[1,1,,i,j] <- predict(my.loess) + } + } + + rm(pslPeriodClim) + gc() + + pslPeriodClim2 <- InsertDim(pslPeriodClimLoess, 3, n.years) + + } else { # leave the daily climatology to compute the anomalies: + pslPeriodClim2 <- InsertDim(pslPeriodClim, 3, n.years) + + rm(pslPeriodClim) + gc() + } + + ## s4.data <- data.frame(s4=pslPeriodClim[1,], day=1:216) + ## s4.loess <- loess(s4 ~ day, s4.data, span=0.35) + ## s4.pred <- predict(s4.loess) + + psleuFull <- psleuFull - pslPeriodClim2 + rm(pslPeriodClim2) + gc() + +} # close if on fields.name == rean & subdaily == TRUE + + +if(fields.name == ECMWF_S4.name){ # in the forecast case, we load only the data for 1 start month at time and all the lead times (since each month needs ~3 GB of memory) + if(start.month <= 9) {start.month.char <- paste0("0",as.character(start.month))} else {start.month.char <- start.month} + + fields2 <- gsub("\\$STORE_FREQ\\$","daily",fields) + fields3 <- gsub("\\$VAR_NAME\\$",psl,fields2) + + if(missing.forecasts == TRUE){ + years <- c() + for(y in year.start:year.end){ + fields4 <- gsub("\\$START_DATE\\$",paste0(y, start.month.char,'01'),fields3) + + char.lead <- nchar(system(paste0("ncks -m ",fields4,"| grep -E -i 'psl size' | cut -d '*' -f1"), intern=T)) + # beware, in this way it can only take into account leadtimes from 100 to 999, but it is the only way in the SMP machine: + #num.lead <- as.integer(substr(system(paste0("ncks -m ",fields4,"| grep -E -i 'psl size' | cut -d '*' -f1"), intern=T),char.lead-3,char.lead)) + num.lead <- as.integer(system(paste0("ncks -m ",fields4,"| grep -E -i 'psl size' | cut -d '=' -f2 | cut -d '*' -f1"), intern=T)) # don't work well on the SMP + num.memb <- as.integer(system(paste0("ncks -m ",fields4,"| grep -E -i 'psl size' | cut -d '=' -f2 | cut -d '*' -f2"), intern=T)) + num.lat <- as.integer(system(paste0("ncks -m ",fields4,"| grep -E -i 'psl size' | cut -d '=' -f2 | cut -d '*' -f3"), intern=T)) + num.lon <- as.integer(system(paste0("ncks -m ",fields4,"| grep -E -i 'psl size' | cut -d '=' -f2 | cut -d '*' -f4"), intern=T)) + + #if(num.lead == n.leadtimes && num.memb >= n.members && num.lat == 181 && num.lon == 360) years <- c(years,y) + if(num.lead == n.leadtimes && num.memb >= n.members) years <- c(years,y) + } + + } else { + years <- year.start:year.end + } + + n.years.full <- length(years) + + if(running.cluster == TRUE) { + + start.month1 <- ifelse(start.month == 1, 12, start.month-1) + if(start.month1 <= 9) {start.month1.char <- paste0("0",as.character(start.month1))} else {start.month1.char <- start.month1} + + start.month2.char <- start.month.char + + start.month3 <- ifelse(start.month == 12, 1, start.month+1) + if(start.month3 <= 9) {start.month3.char <- paste0("0",as.character(start.month3))} else {start.month3.char <- start.month3} + + # load three months of data, inserting all them in the third dimension of the array, one month at time: + psleuFull1 <- Load(var = psl, exp = list(list(path=fields)), obs = NULL, sdates=paste0(years, start.month1.char,'01'), dimnames=list(member=member.name), nmember=n.members, leadtimemax=n.leadtimes, storefreq='daily', output = 'lonlat', latmin = lat.min, latmax = lat.max, lonmin = lon.min, lonmax = lon.max, grid=my.grid, method='bilinear')$mod + gc() + + psleuFull2 <- Load(var = psl, exp = list(list(path=fields)), obs = NULL, sdates=paste0(years, start.month2.char,'01'), dimnames=list(member=member.name), nmember=n.members, leadtimemax=n.leadtimes, storefreq='daily', output = 'lonlat', latmin = lat.min, latmax = lat.max, lonmin = lon.min, lonmax = lon.max, grid=my.grid, method='bilinear')$mod + gc() + + psleuFull3 <- Load(var = psl, exp = list(list(path=fields)), obs = NULL, sdates=paste0(years, start.month3.char,'01'), dimnames=list(member=member.name), nmember=n.members, leadtimemax=n.leadtimes, storefreq='daily', output = 'lonlat', latmin = lat.min, latmax = lat.max, lonmin = lon.min, lonmax = lon.max, grid=my.grid, method='bilinear')$mod + gc() + + } else { + + psleuFull <- Load(var = psl, exp = list(list(path=fields)), obs = NULL, sdates=paste0(years, start.month.char,'01'), dimnames=list(member=member.name), nmember=n.members, leadtimemax=n.leadtimes, storefreq='daily', output = 'lonlat', latmin = lat.min, latmax = lat.max, lonmin = lon.min, lonmax = lon.max, grid=my.grid, method='bilinear', nprocs=1)$mod + + } + + if(LOESS == TRUE){ + # convert psl in daily anomalies with the LOESS filter: + cat("Calculating anomalies. Please wait...\n") + + if(running.cluster == TRUE) { + pslPeriodClim1 <- apply(psleuFull1, c(1,4,5,6), mean, na.rm=T) + pslPeriodClim2 <- apply(psleuFull2, c(1,4,5,6), mean, na.rm=T) + pslPeriodClim3 <- apply(psleuFull3, c(1,4,5,6), mean, na.rm=T) + + pslPeriodClimLoess1 <- pslPeriodClim1 + pslPeriodClimLoess2 <- pslPeriodClim2 + pslPeriodClimLoess3 <- pslPeriodClim3 + + for(i in n.pos.lat){ + for(j in n.pos.lon){ + my.data1 <- data.frame(ens.mean=pslPeriodClim1[1,,i,j], day=1:n.leadtimes) + my.loess1 <- loess(ens.mean ~ day, my.data1, span=0.35) + pslPeriodClimLoess1[1,,i,j] <- predict(my.loess1) + rm(my.data1, my.loess1) + gc() + + my.data2 <- data.frame(ens.mean=pslPeriodClim2[1,,i,j], day=1:n.leadtimes) + my.loess2 <- loess(ens.mean ~ day, my.data2, span=0.35) + pslPeriodClimLoess2[1,,i,j] <- predict(my.loess2) + rm(my.data2, my.loess2) + gc() + + my.data3 <- data.frame(ens.mean=pslPeriodClim3[1,,i,j], day=1:n.leadtimes) + my.loess3 <- loess(ens.mean ~ day, my.data3, span=0.35) + pslPeriodClimLoess3[1,,i,j] <- predict(my.loess3) + rm(my.data3, my.loess3) + gc() + } + } + + rm(pslPeriodClim1, pslPeriodClim2, pslPeriodClim3) + gc() + + pslPeriodClimDos1 <- InsertDim(InsertDim(pslPeriodClimLoess1, 2, n.years.full), 2, n.members) + pslPeriodClimDos2 <- InsertDim(InsertDim(pslPeriodClimLoess2, 2, n.years.full), 2, n.members) + pslPeriodClimDos3 <- InsertDim(InsertDim(pslPeriodClimLoess3, 2, n.years.full), 2, n.members) + + pslPeriodClimDos <- unname(abind(pslPeriodClimDos1, pslPeriodClimDos2, pslPeriodClimDos3, along=3)) + rm(pslPeriodClimDos1, pslPeriodClimDos2, pslPeriodClimDos3) + gc() + + psleuFull <- unname(abind(psleuFull1, psleuFull2, psleuFull3, along=3)) + rm(psleuFull1, psleuFull2, psleuFull3) + gc() + + } else { # in case of no running cluster: + + pslPeriodClim <- apply(psleuFull, c(1,4,5,6), mean, na.rm=T) + + pslPeriodClimLoess <- pslPeriodClim + for(i in n.pos.lat){ + for(j in n.pos.lon){ + my.data <- data.frame(ens.mean=pslPeriodClim[1,,i,j], day=1:n.leadtimes) + my.loess <- loess(ens.mean ~ day, my.data, span=0.35) + pslPeriodClimLoess[1,,i,j] <- predict(my.loess) + } + } + + rm(pslPeriodClim) + gc() + + pslPeriodClimDos <- InsertDim(InsertDim(pslPeriodClimLoess, 2, n.years.full), 2, n.members) + + } # close if on running cluster + + } else { #in case of no LOESS: + + if(running.cluster == TRUE) { + # in this case, the climatology is measured FOR EACH MONTH INDIPENDENTLY, instead of using a seasonal value: + pslPeriodClim1 <- apply(psleuFull1, c(1,4,5,6), mean, na.rm=T) + pslPeriodClim2 <- apply(psleuFull2, c(1,4,5,6), mean, na.rm=T) + pslPeriodClim3 <- apply(psleuFull3, c(1,4,5,6), mean, na.rm=T) + + pslPeriodClim <- unname(abind(pslPeriodClim1, pslPeriodClim2, pslPeriodClim3)) + + pslPeriodClimDos <- InsertDim(InsertDim(pslPeriodClim, 2, n.years.full), 2, n.members) + rm(pslPeriodClim) + gc() + + psleuFull <- unname(abind(psleuFull1, psleuFull2, psleuFull3, along=3)) + rm(psleuFull1, psleuFull2, psleuFull3) + gc() + + } else {# in case of no running cluster: + + pslPeriodClimDos <- InsertDim(InsertDim(pslPeriodClim, 2, n.years.full), 2, n.members) + rm(pslPeriodClim) + gc() + } + } + + ## s4.data <- data.frame(s4=pslPeriodClim[1,], day=1:216) + ## s4.loess <- loess(s4 ~ day, s4.data, span=0.35) + ## s4.pred <- predict(s4.loess) + + psleuFull <- psleuFull - pslPeriodClimDos + rm(pslPeriodClimDos) + gc() + +} # close if on fields.name=forecasts.name + +if(fields.name == ECMWF_monthly.name){ + # Load 6-hourly psl data in the forecast case: + psleuFull <-array(NA, c(n.sdates*n.years, n.leadtimes, n.pos.lat, n.pos.lon)) # daily order: 19940102 19950102 ... 20130102 19940109 19950109 ... 20130109 ... + n.leadtimes <- length(leadtime) + sdates <- weekly.seq(forecast.year,mes,day) + n.sdates <- length(sdates) + + for (startdate in 1:n.sdates){ + for(lday in leadtime){ + var <- Load(var = psl, exp = NULL, obs = list(list(path=fields)), paste0(year.start:year.end, substr(sdates[startdate],5,6), substr(sdates[startdate],7,8) ), storefreq = 'daily', leadtimemin = lday*4-3, leadtimemax = lday*4, output = 'lonlat', latmin = lat.min, latmax = lat.max, lonmin = lon.min, lonmax = lon.max) + + mean.lday <- apply(var$obs, c(3,5,6), mean, na.rm=T) + rm(var) + gc() + + psleuFull[(1+(startdate-1)*n.years):(startdate*n.years), lday-leadtime[1]+1,,] <- mean.lday + rm(mean.lday) + gc() + } + } +} + +if(psl=="g500") psleuFull <- psleuFull/9.81 # convert geopotential to geopotential height (in m) +if(psl=="psl") psleuFull <- psleuFull/100 # convert MSLP in Pascal to MSLP in hPa +gc() + +#save.image(file=paste0(workdir,"/weather_regimes_",fields.name,"_",year.start,"-",year.end,".RData")) +#load(file=paste0(workdir,"/weather_regimes_",fields.name,"_",year.start,"-",year.end,".RData")) + +#save.image("/scratch/Earth/ncortesi/RESILIENCE/Regimes/test.R") + +# compute the cluster analysis: +my.PCA <- list() +my.cluster <- list() +my.cluster.array <- list() +tot.variance <- list() +n.pcs <- my.cluster <- c() + +#WR.period=1 # for the debug +for(p in WR.period){ + # Select only the data of the month/season we want: + if(fields.name == rean.name){ + + #pslPeriod <- psleuFull[days.period[[p]],,] # select only days in the chosen period (i.e: winter) + if(running.cluster == TRUE) { + pslPeriod <- psleuFull[1,1,,pos.month.extended(2001,p),,] # select all days in the period of 3 months centered on the target month p + } else { + pslPeriod <- psleuFull[1,1,,pos.period(2001,p),,] # select only days in the chosen period (i.e: winter) + } + + # weight the pressure fields based on latitude (apply it to anomalies, not to absolute values!): + if(lat.weighting){ + lat.weighted <- cos(lat*pi/180)^0.5 + lat.weighted.array <- InsertDim(InsertDim(InsertDim(lat.weighted,2,n.pos.lon), 1, length(pos.month.extended(2001,p))), 1, n.years) + pslPeriod <- pslPeriod * lat.weighted.array + rm(lat.weighted.array) + gc() + } + + gc() + cat("Preformatting data for clustering. Please wait...\n") + psl.melted <- melt(pslPeriod[,,,, drop=FALSE], varnames=c("Year","Day","Lat","Lon")) + gc() + + cat("Preformatting data for clustering. Please wait......\n") + # this function eats much memory!!! + psl.kmeans <- unname(acast(psl.melted, Year + Day ~ Lat + Lon)) + #gc() + + + # select period data from psleuFull to use it as input of the cluster analysis: + #psl.kmeans <- pslPeriod + #dim(psl.kmeans) <- c(n.days.period[[p]], n.pos.lat*n.pos.lon) # convert array in a matrix! + #rm(pslPeriod, pslPeriod.weighted) + } + + # in case of S4 we don't need to select anything because we already loaded only 1 month of data! + if(fields.name == ECMWF_S4.name){ + if(running.cluster == TRUE && p < 13) { + # if you want to fully implement the running cluster of the monthly S4 data, you have to finish selecting automatically the 3-months running period + # generalizing the command below (which at present only work for the month of January an lead time 0): + + # Select DJF (lead time 0) for our case study, excluding 29 of february): + pslPeriod <- psleuFull[,,,1:90,,, drop=FALSE] + + } else { + + # select data only for the startmonth and leadmonth to study: + cat("Extracting data. Please wait...\n") + chosen.month <- start.month + lead.month # find which month we want to select data + if(chosen.month > 12) chosen.month <- chosen.month - 12 + + # remove the 29th of February to have the same n. of elements for all years + i=0 + for(y in years){ + i=i+1 + leadtime.min <- 1 + n.days.in.a.monthly.period(start.month, chosen.month, y) - n.days.in.a.month(chosen.month, y) + leadtime.max <- leadtime.min + n.days.in.a.month(chosen.month, y) - 1 + #leadtime.max <- 61 + + if (n.days.in.a.month(chosen.month, y) == 29) leadtime.max <- leadtime.max - 1 + int.leadtimes <- leadtime.min:leadtime.max + num.leadtimes <- leadtime.max - leadtime.min + 1 # number of days in the chosen month (different from 'n.leadtimes',which is the total number of days in the file!) + # by construction it is equal to: n.days.in.a.month(chosen.month, y) + + if(y == years[1]) { + pslPeriod <- psleuFull[,,1,int.leadtimes,,,drop=FALSE] + } else { + pslPeriod <- unname(abind(pslPeriod, psleuFull[,,i,int.leadtimes,,,drop=FALSE], along=3)) + } + } + + } + + # weight the pressure fields based on latitude (apply it to anomalies, not to absolute values!): + if(lat.weighting){ + lat.weighted <- cos(lat*pi/180)^0.5 + lat.weighted.array <- InsertDim(InsertDim(InsertDim(lat.weighted,2,n.pos.lon), 1, length(pos.month.extended(2001,p))), 1, n.years) + pslPeriod <- pslPeriod * lat.weighted.array ######### adapt!!!!!!!!!!! + rm(lat.weighted.array) + gc() + } + + # note that the data order in psl.kmeans[,X] is: year1day1, year1day2, ... , year1day31, year2day1, year2day2, ... , year2day28 + gc() + cat("Preformatting data. Please wait...\n") + psl.melted <- melt(pslPeriod[1,,,,,, drop=FALSE], varnames=c("Exp","Member","StartYear","LeadDay","Lat","Lon")) + gc() + + #save(psl.melted,file=paste0(workdir,"/psl_melted.RData")) + + cat("Preformatting data. Please wait......\n") + # This function eats much memory!!! + psl.kmeans <- unname(acast(psl.melted, Member + StartYear + LeadDay ~ Lat + Lon)) + #gc() + + #save(psl.kmeans,file=paste0(workdir,"/psl_kmeans.RData")) + #my.cluster <- kmeans(psl.kmeans, centers=4, iter.max=100, nstart=30, trace=FALSE) + #save(my.cluster,file=paste0(workdir,"/my_cluster.RData")) + + + #psl.kmeans <- array(NA,c(dim(pslPeriod))) + #n.rows <- nrow(psl.melted) + #a <- psl.melted$Member + #b <- psl.melted$StartYear + #c <- psl.melted$LeadDay + #d <- psl.melted$Lat + #e <- psl.melted$Lon + #f <- psl.melted$value + + # this function to convert a data.frame in an array is much less memory-eater than acast but a bit slower: + #for(i in 1:n.rows) psl.kmeans[1,a[i],b[i],c[i],d[i],e[i]] <- f[i] + gc() + #rm(pslPeriod); gc() + } + + if(fields.name == ECMWF_monthly.name){ + my.startdates.period <- months.period(forecast.year,mes,day,p) # get which startdates belong to the chosen period (remember that there are no bisestile day left!) + + days.period <- list() # get which days inside psleuFull belong to the chosen period + for(startdate in my.startdates.period){ + days.period[[p]] <- c(days.period[[p]], (1+(startdate-1)*n.years):(startdate*n.years)) + } + + # in winter you must remove the 2nd startdate (20140109) because its data is bad, as you can see with: plot(psl.kmeans[,1:2]) + # if(fields.name == forecast.name && period == 13) psl.kmeans[21:40,] <- NA + } + + + # compute the clustering: + cat("Clustering data. Please wait...\n") + if(PCA){ + my.seq <- seq(1, n.pos.lat*n.pos.lon, 16) # select only 1 point of 9 + pslcut <- psl.kmeans[,my.seq] + + my.PCA <- princomp(pslcut,cor=FALSE) + tot.variance <- head(cumsum(my.PCA$sdev^2/sum(my.PCA$sdev^2)),50) # check how many PCAs to keep basing on the sum of their expl. variance + n.pcs <- head(as.numeric(which(tot.variance > variance.explained)),1) # select only the pcs that explains at least 80% of variance + my.cluster <- kmeans(my.PCA$scores[,1:n.pcs], centers=4, iter.max=100, nstart=30) # 4 is the number of clusters + rm(pslcut) + } else { # 4 is the number of clusters: + + my.cluster <- kmeans(psl.kmeans, centers=4, iter.max=100, nstart=30, trace=FALSE) + + gc() + + #save(my.cluster, file=paste0(workdir,"/",fields.name,"_",my.period[p],"_leadmonth_",lead.month,"_series.RData")) + + #if(fields.name == rean.name){ + #my.cluster[[p]] <- kmeans(psl.kmeans[which(!is.na(psl.kmeans[,1])),], centers=4, iter.max=100, nstart=30, trace=FALSE) + # save the time series output of the cluster analysis: + #save(my.cluster, my.cluster.array, my.PCA, tot.variance, n.pcs, file=paste0(workdir,"/",fields.name,"_",my.period[p],"_series.RData")) + #} + + if(fields.name == ECMWF_S4.name) { + my.cluster.array <- array(my.cluster$cluster, c(num.leadtimes, n.years.full, n.members)) # in reverse order compared to the definition of psl.kmeans + + # save the time series output of the cluster analysis: + #save(my.cluster, my.cluster.array, my.PCA, tot.variance, n.pcs, file=paste0(workdir,"/",fields.name,"_",my.period[p],"_leadmonth_",lead.month,"_series.RData")) + + #plot(SMA(my.cluster$centers[1,],201),type="l",col="gold",ylim=c(-20,20));lines(SMA(my.cluster$centers[2,],201),type="l",col="red"); lines(SMA(my.cluster$centers[3,],201),type="l",col="green");lines(SMA(my.cluster$centers[4,],201),type="l",col="blue");lines(SMA(psl.kmeans[29,],201),type="l",col="gray");lines(rep(0,14410),type="l",col="black",lty=2) + } + } + + rm(psl.kmeans) + gc() + +#} # close for on 'p' + +#save.image(file=paste0(workdir,"/weather_regimes_",fields.name,"_",year.start,"-",year.end,".RData")) +#load(file=paste0(workdir,"/weather_regimes_",fields.name,"_",year.start,"-",year.end,".RData")) + +#my.grid<-paste0('r',n.lon,'x',n.lat) +#coord <- Load(var = 'sfcWind', exp = list(ECMWF_monthly), obs = NULL, sdates=paste0(2013,'0102'), leadtimemin = 1, leadtimemax=1, output = 'lonlat', nprocs=1, grid=my.grid, method='bilinear') + +# measure regime anomalies: + +#for(p in WR.period){ # 1-12: Jan-Dec; 13-16: Winter-Spring-Summer-Autumn; 17: Year + + if(fields.name == rean.name){ + + if(running.cluster == TRUE && p < 13){ # select only the days of the 3-months cluster series inside the target month p + n.days.period <- length(pos.month.extended(2001,p)) # total number of days in the three months + + days.month <- days.month.new <- days.month.full <- c() + days.month <- which(pos.month.extended(2001,p) == pos.month(2001,p)[1])-1 + 1:length(pos.month(2001,p)) + + for(y in 1:n.years){ + days.month.new <- days.month + n.days.period*(y-1) + days.month.full <- c(days.month.full, days.month.new) + #print(days.month.new) + #print(days.month) + } + + cluster.sequence <- my.cluster$cluster[days.month.full] # select only the days of the cluster series inside the target month p + + } else { + cluster.sequence <- my.cluster$cluster + } + + if(sequences){ # it works only for rean data!!! + # for each of the 4 clusters, remove the days not belonging to sequences of at least 5 days: + # warning: first 4 days and last 4 days are not take into account y the algorithm and are treated as not belonging to any sequence (sequ=FALSE) + wrdiff <- diff(my.cluster$cluster) + + sequ <- rep(FALSE, n.days.period[[p]]) + for(i in 5:(n.days.period[[p]]-5)){ + sequ[i] <- TRUE + if(wrdiff[i] != 0 & wrdiff[i+1] != 0) sequ[i] = FALSE + if(wrdiff[i] != 0 & wrdiff[i+2] != 0) sequ[i] = FALSE + if(wrdiff[i] != 0 & wrdiff[i+3] != 0) sequ[i] = FALSE + if(wrdiff[i] != 0 & wrdiff[i+4] != 0) sequ[i] = FALSE + if(wrdiff[i-1] != 0 & wrdiff[i] != 0) sequ[i] = FALSE + if(wrdiff[i-1] != 0 & wrdiff[i+1] != 0) sequ[i] = FALSE + if(wrdiff[i-1] != 0 & wrdiff[i+2] != 0) sequ[i] = FALSE + if(wrdiff[i-1] != 0 & wrdiff[i+3] != 0) sequ[i] = FALSE + if(wrdiff[i-2] != 0 & wrdiff[i] != 0) sequ[i] = FALSE + if(wrdiff[i-2] != 0 & wrdiff[i+1] != 0) sequ[i] = FALSE + if(wrdiff[i-2] != 0 & wrdiff[i+2] != 0) sequ[i] = FALSE + if(wrdiff[i-3] != 0 & wrdiff[i] != 0) sequ[i] = FALSE + if(wrdiff[i-3] != 0 & wrdiff[i+1] != 0) sequ[i] = FALSE + if(wrdiff[i-4] != 0 & wrdiff[i] != 0) sequ[i] = FALSE + } # close for loop on i + + # sequence of daily cluster numbers without the days that doesn't belong to sequences of 5 or more days: + cluster.sequence <- my.cluster$cluster * sequ + rm(wrdiff, sequ) + } + + # Replace the days belonging to a regime with the days belonging to a sequence of at least 5 days of that regime: + wr1 <- which(cluster.sequence == 1) + wr2 <- which(cluster.sequence == 2) + wr3 <- which(cluster.sequence == 3) + wr4 <- which(cluster.sequence == 4) + + # yearly frequency of each regime: + wr1y <- wr2y <- wr3y <-wr4y <- c() + np <- n.days.in.a.period(p,2001) + + for(y in year.start:year.end){ + # for both reanalysis and S4, the time order is always: year1day1, year1day2, ..., year1day31, year2day1, year2day2, ..., year2day28, etc, + wr1y[y-year.start+1] <- length(which(cluster.sequence[(y-year.start)*np + (1:np)] == 1)) + wr2y[y-year.start+1] <- length(which(cluster.sequence[(y-year.start)*np + (1:np)] == 2)) + wr3y[y-year.start+1] <- length(which(cluster.sequence[(y-year.start)*np + (1:np)] == 3)) + wr4y[y-year.start+1] <- length(which(cluster.sequence[(y-year.start)*np + (1:np)] == 4)) + } + + # convert to frequencies in %: + wr1y <- wr1y/np + wr2y <- wr2y/np + wr3y <- wr3y/np + wr4y <- wr4y/np + + gc() + + pslmat <- unname(acast(psl.melted, Year + Day ~ Lat ~ Lon)) + + if(running.cluster == TRUE){ + pslmat.new <- pslmat + pslmat <- pslmat.new[days.month.full,,] + gc() + } + + pslwr1 <- pslmat[wr1,,, drop=F] + pslwr2 <- pslmat[wr2,,, drop=F] + pslwr3 <- pslmat[wr3,,, drop=F] + pslwr4 <- pslmat[wr4,,, drop=F] + + pslwr1mean <- apply(pslwr1, c(2,3), mean, na.rm=T) + pslwr2mean <- apply(pslwr2, c(2,3), mean, na.rm=T) + pslwr3mean <- apply(pslwr3, c(2,3), mean, na.rm=T) + pslwr4mean <- apply(pslwr4, c(2,3), mean, na.rm=T) + + # spatial mean for each day to save for the meaure of the FairRPSS: + pslwr1spmean <- apply(pslwr1, 1, mean, na.rm=T) + pslwr2spmean <- apply(pslwr2, 1, mean, na.rm=T) + pslwr3spmean <- apply(pslwr3, 1, mean, na.rm=T) + pslwr4spmean <- apply(pslwr4, 1, mean, na.rm=T) + + # save all the data necessary to redraw the graphs when we know the right regime: + save(my.cluster, lon, lon.max, lon.min, lat, lat.max, lat.min, pos.lat, pos.lon, n.pos.lat, n.pos.lon, psl, psl.name, my.period, p, workdir, rean.name, fields.name, year.start, year.end, n.years, WR.period, pslwr1mean, pslwr2mean, pslwr3mean, pslwr4mean, pslwr1spmean, pslwr2spmean, pslwr3spmean, pslwr4spmean, wr1y,wr2y,wr3y,wr4y,file=paste0(workdir,"/",fields.name,"_",my.period[p],"_psl.RData")) + + # immediatly save the plots of the ERA-Interim monthly regime anomalies with the running cluster instead than loading them in the next script: + EU <- c(1:which(lon >= lon.max)[1], (which(lon >= 337)[1]:length(lon))) # restrict area to continental europe only + if(psl == "g500"){ + my.brks <- c(-300,seq(-200,200,10),300) # % Mean anomaly of a WR + my.brks2 <- c(-300,seq(-200,200,10),300) # % Mean anomaly of a WR for the contour lines + values.to.plot <- c(-200, -100, 0, 100, 200) # values we want to show in the labels + } + + if(psl == "psl"){ + my.brks <- c(seq(-19,-1,2),0,seq(1,19,2)) # % Mean anomaly of a WR + my.brks2 <- my.brks #c(seq(-20,20,2)) # % Mean anomaly of a WR for the contour lines + values.to.plot <- my.brks #c(-17,-15,-13,-11,-9,-7,-5,-3,-1,0,1,3,5,7,9,11,13,15,17) + } + my.cols <- colorRampPalette(rev(brewer.pal(11,"RdBu")))(length(my.brks)-1) # blue--white--red colors + my.cols[floor(length(my.cols)/2)] <- my.cols[floor(length(my.cols)/2)+1] <- "white" + + map1 <- pslwr1mean + map2 <- pslwr2mean + map3 <- pslwr3mean + map4 <- pslwr4mean + png(filename=paste0(workdir,"/",fields.name,"_",my.period[p],"_cluster1.png"),width=1000,height=1200) + PlotEquiMap2(rescale(map1,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map1), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, xlabel.dist=1.5, contours.lty="F1FF1F") + dev.off() + png(filename=paste0(workdir,"/",fields.name,"_",my.period[p],"_cluster2.png"),width=1000,height=1200) + PlotEquiMap2(rescale(map2,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map2), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F") + dev.off() + png(filename=paste0(workdir,"/",fields.name,"_",my.period[p],"_cluster3.png"),width=1000,height=1200) + PlotEquiMap2(rescale(map3,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map3), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F") + dev.off() + png(filename=paste0(workdir,"/",fields.name,"_",my.period[p],"_cluster4.png"),width=1000,height=1200) + PlotEquiMap2(rescale(map4,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map4), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F") + dev.off() + + rm(pslwr1mean,pslwr2mean,pslwr3mean,pslwr4mean) + rm(pslwr1,pslwr2,pslwr3,pslwr4) + gc() + + } + + if(fields.name == ECMWF_S4.name){ + cat("Computing regime anomalies and frequencies. Please wait...\n") + + #load(file=paste0(workdir,"/",fields.name,"_",my.period[p],"_leadmonth_",lead.month,"_ClusterData.RData")) + + cluster.sequence <- my.cluster$cluster #as.vector(my.cluster.array) + + wr1 <- which(cluster.sequence == 1) + wr2 <- which(cluster.sequence == 2) + wr3 <- which(cluster.sequence == 3) + wr4 <- which(cluster.sequence == 4) + + wr1y <- apply(my.cluster.array == 1, 2, sum) + wr2y <- apply(my.cluster.array == 2, 2, sum) + wr3y <- apply(my.cluster.array == 3, 2, sum) + wr4y <- apply(my.cluster.array == 4, 2, sum) + + # convert to frequencies in %: + wr1y <- wr1y/(num.leadtimes*n.members) # in this case, n.leadtimes is the number of days of one month of the cluser time series + wr2y <- wr2y/(num.leadtimes*n.members) + wr3y <- wr3y/(num.leadtimes*n.members) + wr4y <- wr4y/(num.leadtimes*n.members) + + # same as above, but to measure the maximum frequence between all the members: + wr1yMax <- apply(apply(my.cluster.array == 1, c(2,3), sum),1,max) + wr2yMax <- apply(apply(my.cluster.array == 2, c(2,3), sum),1,max) + wr3yMax <- apply(apply(my.cluster.array == 3, c(2,3), sum),1,max) + wr4yMax <- apply(apply(my.cluster.array == 4, c(2,3), sum),1,max) + + wr1yMax <- wr1yMax/num.leadtimes + wr2yMax <- wr2yMax/num.leadtimes + wr3yMax <- wr3yMax/num.leadtimes + wr4yMax <- wr4yMax/num.leadtimes + + wr1yMin <- apply(apply(my.cluster.array == 1, c(2,3), sum),1,min) + wr2yMin <- apply(apply(my.cluster.array == 2, c(2,3), sum),1,min) + wr3yMin <- apply(apply(my.cluster.array == 3, c(2,3), sum),1,min) + wr4yMin <- apply(apply(my.cluster.array == 4, c(2,3), sum),1,min) + + wr1yMin <- wr1yMin/num.leadtimes + wr2yMin <- wr2yMin/num.leadtimes + wr3yMin <- wr3yMin/num.leadtimes + wr4yMin <- wr4yMin/num.leadtimes + + cat("Computing regime anomalies and frequencies. Please wait......\n") + + # in this case, must use psl.melted instead of psleuFull. Both are already in anomalies: + # (psl.melted depends on the startdate and leadtime, psleuFull no) + gc() + pslmat <- unname(acast(psl.melted, Member + StartYear + LeadDay ~ Lat ~ Lon)) + #pslmat <- unname(acast(melt(pslPeriod[1,1:2,,,,, drop=FALSE],varnames=c("Exp","Member","StartYear","LeadDay","Lat","Lon")), Member ~ StartYear + LeadDay ~ Lat ~ Lon)) + gc() + + #for(a in 1:2){ + # for(b in 1:3){ + # for(c in 1:4){ + # pslmat[1, a + (b-1)*a + (c-1)*a*b,,] <- pslPeriod[1,a,b,c,,] + # } + # } + #} + + pslwr1 <- pslmat[wr1,,, drop=F] + pslwr2 <- pslmat[wr2,,, drop=F] + pslwr3 <- pslmat[wr3,,, drop=F] + pslwr4 <- pslmat[wr4,,, drop=F] + + pslwr1mean <- apply(pslwr1, c(2,3), mean, na.rm=T) + pslwr2mean <- apply(pslwr2, c(2,3), mean, na.rm=T) + pslwr3mean <- apply(pslwr3, c(2,3), mean, na.rm=T) + pslwr4mean <- apply(pslwr4, c(2,3), mean, na.rm=T) + + # spatial mean for each day to save for the meaure of the FairRPSS: + pslwr1spmean <- apply(pslwr1, 1, mean, na.rm=T) + pslwr2spmean <- apply(pslwr2, 1, mean, na.rm=T) + pslwr3spmean <- apply(pslwr3, 1, mean, na.rm=T) + pslwr4spmean <- apply(pslwr4, 1, mean, na.rm=T) + + rm(pslmat) + gc() + + # save all the data necessary to draw the graphs (but not the impact maps) + save(my.cluster, my.cluster.array, lon, lon.max, lat, pos.lat, pos.lon, n.pos.lat, n.pos.lon, psl, psl.name, my.period, p, workdir,rean.name,fields.name, year.start,year.end, years, n.years, n.years.full, WR.period, pslwr1mean, pslwr2mean, pslwr3mean, pslwr4mean, pslwr1spmean, pslwr2spmean, pslwr3spmean, pslwr4spmean, wr1y, wr2y, wr3y, wr4y, wr1yMax, wr2yMax, wr3yMax, wr4yMax, wr1yMin, wr2yMin, wr3yMin, wr4yMin, n.leadtimes, num.leadtimes, file=paste0(workdir,"/",fields.name,"_",my.period[p],"_leadmonth_",lead.month,"_psl.RData")) + + rm(pslwr1mean,pslwr2mean,pslwr3mean,pslwr4mean) + rm(pslwr1,pslwr2,pslwr3,pslwr4) + gc() + + } + + # for the monthly system, the time order is always: year1day1, year2day1, ... , yearNday1, year1day2, year2day2, ..., yearNday2, etc.: + if(fields.name == ECMWF_monthly.name) { + if(fields.name == ECMWF_monthly.name){ + my.startdates.period <- months.period(forecast.year,mes,day,p) + + #if(p == 13) my.startdates.period <- my.startdates.period[-2] # remove the second startdate because it is broken + + days.period <- period.length <- list() + for(startdate in my.startdates.period){ + days.period[[p]] <- c(days.period[[p]], (1+(startdate-1)*(year.end-year.start+1)):(startdate*(year.end-year.start+1))) + } + period.length[[p]] <- length(my.startdates.period) # number of Thusdays in the period + } + + wr1y <- wr2y <- wr3y <-wr4y <- c() + wr1y[y-year.start+1] <- length(which(cluster.sequence[seq((y-year.start+1),length(cluster.sequence), n.years)] == 1)) + wr2y[y-year.start+1] <- length(which(cluster.sequence[seq((y-year.start+1),length(cluster.sequence), n.years)] == 2)) + wr3y[y-year.start+1] <- length(which(cluster.sequence[seq((y-year.start+1),length(cluster.sequence), n.years)] == 3)) + wr4y[y-year.start+1] <- length(which(cluster.sequence[seq((y-year.start+1),length(cluster.sequence), n.years)] == 4)) + } + + cat("Finished!\n") +} # close the for loop on 'p' + + + + + + + + + + + + +if(wind_anomalies) { + # Compute climatology and anomalies (with LOESS filter) for sfcWind data, and draw monthly anomalies, if you want to compare the mean daily wind speed anomalies reconstructed by the weather regimes classification with those observed by the reanalysis: + + sfcWind366 <- Load(var = "sfcWind", exp = NULL, obs = list(list(path=fields)), paste0(year.start:year.end,'0101'), storefreq = 'daily', leadtimemax = 366, output = 'lonlat', latmin = lat.min, latmax = lat.max, lonmin = lon.min, lonmax = lon.max, nprocs=8)$obs + + sfcWind <- sfcWind366[,,,1:365,,] + + for(y in year.start:year.end){ + y2 <- y - year.start + 1 + if(n.days.in.a.year(y) == 366) sfcWind[y2,60:365,,] <- sfcWind366[1,1,y2,61:366,,] # take the march to december period removing the 29th of February + } + + rm(sfcWind366) + gc() + + sfcWindClimDaily <- apply(sfcWind, c(2,3,4), mean, na.rm=T) + + sfcWindClimLoess <- sfcWindClimDaily + for(i in n.pos.lat){ + for(j in n.pos.lon){ + my.data <- data.frame(ens.mean=sfcWindClimDaily[,i,j], day=1:365) + my.loess <- loess(ens.mean ~ day, my.data, span=0.35) + sfcWindClimLoess[,i,j] <- predict(my.loess) + } + } + + rm(sfcWindClimDaily) + gc() + + sfcWindClim2 <- InsertDim(sfcWindClimLoess, 1, n.years) + + sfcWindAnom <- sfcWind - sfcWindClim2 + + rm(sfcWindClimLoess) + gc() + + year.test <- 2016 # select only anomalies of a chosen month and year, and average them: + month.test <- 10 + pos.year.start <- year.test - year.start + 1 + + sfcWindAnomPeriod <- sfcWindAnom[pos.year.start,pos.period(year.test,month.test),,] + + sfcWindAnomPeriodMean <- apply(sfcWindAnomPeriod, c(2,3), mean, na.rm=TRUE) + + EU <- c(1:which(lon >= lon.max)[1], (which(lon >= 337)[1]:length(lon))) # restrict area to continental europe only + + my.brks.var <- seq(-3,3,0.5) + values.to.plot2 <- c(-3,-2.5,-2,-1.5,-1,0,0.5,1,1.5,2,2.5,3) + + my.cols.var <- colorRampPalette(rev(brewer.pal(11,"RdBu")))(length(my.brks.var)-1) # blue--white--red colors + + fileoutput <- paste0(workdir,"/",fields.name,"_",my.period[month.test],"_wind_anomaly.png") + + png(filename=fileoutput,width=1000,height=1000) + + par(fig=c(0, 1, 0.07, 0.98), new=TRUE) + PlotEquiMap(rescale(sfcWindAnomPeriodMean[,EU], my.brks.var[1]+0.001, tail(my.brks.var,1)), lon[EU], lat, filled.continents=FALSE, brks=my.brks.var, cols=my.cols.var, title_scale=1.2, intxlon=10, intylat=10, drawleg=F) #, cex.lab=1.5) + + #my.subset2 <- match(values.to.plot2, my.brks.var) + #legend2.cex <- 1.8 + + par(fig=c(0.03, 1, 0, 0.08), new=TRUE) + ColorBar(my.brks.var, cols=my.cols.var, vert=FALSE, label_scale=1.8) #, subset=my.subset2) + + par(fig=c(0.95, 1, 0, 0.012), new=TRUE) + mtext("m/s", cex=1.8) + + dev.off() + + fileoutput2 <- paste0(workdir,"/",fields.name,"_",my.period[month.test],"_wind_anomaly_fig2cat.png") + + system(paste0("~/scripts/fig2catalog.sh -s 0.8 -c 'Region: North Atlantic (27°N-81°N, 85.5°W-45°E)\nReference dataset: NCEP/NCAR' ", fileoutput," ", fileoutput2)) + + # to format it manually from the bash shell (you must run it from your workstation until the identity command of ImageMagick will be loaded un the cluster): + # sh ~/scripts/fig2catalog.sh -x 20 -t 'NCEP / 10-m wind speed / anomaly \nOctober / 2016' -c 'Region: North Atlantic (27°N-81°N, 85.5°W-45°E)\nReference dataset: NCEP/NCAR reanalysis' NCEP_October_wind_anomaly.png NCEP_October_wind_anomaly_catalogue.png + + # expression(~degree~C) + +} + + + + + + + + + + diff --git a/old/weather_regimes_v40.R~ b/old/weather_regimes_v40.R~ new file mode 100644 index 0000000000000000000000000000000000000000..99b8b8c7a9777b076aae62ebe4994bca56036e1b --- /dev/null +++ b/old/weather_regimes_v40.R~ @@ -0,0 +1,1080 @@ + +# Creation: 6/2016 +# Author: Nicola Cortesi +# Aim: To compute the four Euro-Atlantic weather regimes from a reanalysis or from a forecast system +# +# I/O: input must be in a format compatible with Load(); 1 output .RData file is created in the 'workdir' folder. +# You can also run this script from terminal with: Rscript ~/scripts/weather_regimes_maps.R +# +# Assumption: its output are needed by the scripts weather_regimes_impact.R and weather_regimes.R +# +# Branch: weather_regimes + +rm(list=ls()) +library(s2dverification) # for the function Load() +library(abind) +library(reshape2) # after each update of s2dverification, it's better to remove and install this package again because sometimes it gets broken! + +SMP <- FALSE # if TRUE, the script is assumed to run on the SMP Machine + +if(SMP){ + source('/gpfs/projects/bsc32/bsc32842/Rfunctions.R') + workdir <- "/gpfs/projects/bsc32/bsc32842/RESILIENCE" # working dir where output files will be generated +} else { + source('/home/Earth/ncortesi/scripts/Rfunctions.R') + workdir <- "/scratch/Earth/ncortesi/RESILIENCE/Regimes" # working dir where output files will be generated +} + +# module load NCO # : load it from the terminal before running this script interactively in the SMP machine, if you didn't load it in the .bashrc + +# available reanalysis for the geopotential (g500) or the geopotential height (z500 = g500/9.81) and for var data: +ERAint <- '/esnas/recon/ecmwf/erainterim/$STORE_FREQ$_mean/$VAR_NAME$_f6h/$VAR_NAME$_$YEAR$$MONTH$.nc' +#ERAint <- '/esarchive/old-files/recon_ecmwf_erainterim/$STORE_FREQ$_mean/$VAR_NAME$_f6h/$VAR_NAME$_$YEAR$$MONTH$.nc' +ERAint <- '/esnas/recon/ecmwf/erainterim/6hourly/$VAR_NAME$/$VAR_NAME$_$YEAR$$MONTH$.nc' # subdaily data!!!!! +ERAint.name <- "ERA-Interim" + +JRA55 <- '/esnas/recon/jma/jra55/$STORE_FREQ$_mean/$VAR_NAME$_f6h/$VAR_NAME$_$YEAR$$MONTH$.nc' +JRA55.name <- "JRA-55" + +NCEP <- '/esnas/recon/noaa/ncep-reanalysis/$STORE_FREQ$_mean/$VAR_NAME$_f6h/$VAR_NAME$_$YEAR$$MONTH$.nc' +NCEP.name <- "NCEP" + +rean <- ERAint #JRA55 #ERAint #NCEP # choose one of the two above reanalysis from where to load the input psl data + +# available forecast systems for the psl and var data: +#ECMWF_S4 <- list(path = paste0('/esnas/exp/ECMWF/seasonal/0001/s004/m001/$STORE_FREQ$_mean/$VAR_NAME$_f6h/$VAR_NAME$_$START_DATE$.nc')) +#ECMWF_S4 <- '/esnas/exp/ECMWF/seasonal/0001/s004/m001/$STORE_FREQ$_mean/psl_f6h/psl_$START_DATE$.nc' +ECMWF_S4 <- '/esnas/exp/ecmwf/system4_m1/$STORE_FREQ$_mean/$VAR_NAME$_f6h/$VAR_NAME$_$START_DATE$.nc' +ECMWF_S4.name <- "ECMWF-S4" +member.name <- "ensemble" # name of the dimension with the ensemble members inside the netCDF files of S4 + +ECMWF_monthly <- '/esnas/exp/ECMWF/monthly/ensforhc/6hourly/$VAR_NAME$/2014$MONTH$$DAY$00/$VAR_NAME$_$START_DATE$00.nc' +ECMWF_monthly.name <- "ECMWF-Monthly" + +# choose a forecast system: +forecast <- ECMWF_S4 + +# put 'rean' to load pressure fields and var from reanalisis, or put 'forecast' to load them from forecast systems: +fields <- rean #forecast + +psl <- "psl" #"g500" # pressure variable to use from the chosen reanalysis +psl.name <- "SLP" # "Z500" # the name to show in the maps + +year.start <- 1981 #1979 #1981 #1982 #1981 #1994 #1979 #1981 +year.end <- 2016 #2016 #2013 #2015 + +missing.forecasts <- FALSE # set it to TRUE if there can be missing hindcasts files in the forecast psl data, so it will load only the available years and deals with NA + +LOESS <- TRUE # To apply the LOESS filter or not to the climatology +running.cluster <- TRUE # add to the clustering also the daily SLP data of the two closer months to the month to use (only for monthly analysis) +lat.weighting <- TRUE # set it to true to weight psl data on latitude before applying the cluster analysis +subdaily <- TRUE # id TRUE, compute the clustering using 6-hourly data instead of daily data, to be more robust (only for reanalysis with 6-hourly data avail.) + + + +sequences <-FALSE # set it to TRUE if you want to select only days beloning to sequences of at least 5 consecutive days belonging to the same regime. +PCA <- FALSE # set it to TRUE if you want to apply the PCA before the k-means cluster analysis, FALSE otherwise +variance.explained <- 0.8 # the user can set here the minimum % of variance explained by the PCs (typically 0.8 which is equal to 80%) + +# Coordinates of the box enclosing the study region (the Northern Atlantic in this case): +lat.max <- 81 #81 #80 #70 +lat.min <- 27 #30 #20 #30 +lon.max <- 45 #36 #30 #40 +lon.min <- 274.5 #274.5 #270 #280 # put a positive number here because the geopotential has only positive vaues of longitud! + +# Only for reanalysis: +WR.period <- 1:12 # 13:16 #1:12 #13:16 # 1-12: Jan-Dec; 13-16: Winter-Spring-Summer-Autumn [bypassed by the optional argument of the script] + +# Only for Seasonal forecasts: +startM <- 1 # start month (1=January, 12=December) [bypassed by the optional arguments of the script] +leadM <- 0 # select only the data of one lead month: [bypassed by the optional arguments of the script] +n.members <- 15 # number of members of the forecast system to load +n.leadtimes <- 216 # number of leadtimes of the forecast system to load + +res <- 0.75 # set the resolution you want to interpolate the seasonal psl and var data when loading it (i.e: set to 0.75 to use the same res of ERA-Interim) + # interpolation method is BILINEAR. + +# Only for Monthly forecasts: +#leadtime <- 1:7 #5:11 #1 # if fields=forecast, set the forecast time (in days) you want to compute the weather regimes. +#startdate.shift <- 1 # if leadtime=5:11, it's better to shift all startdates of the season 1 week in the past, to fit the exact season of obs.data + # if leadtime=12:18, it's better to shift all startdates of the season 2 weeks in the past, and so on. +#forecast.year <- year.end+1 # if fields=forecast, set the forecast year (it is usually the year after year.end) +#mes=1 # if fields=forecast, set the month of the first startdate of the forecast year +#day=2 # if fields=forecast, set the day of the first startdate of the forecast year + +############################################################################################################################# +rean.name <- unname(ifelse(rean == ERAint, ERAint.name, ifelse(rean == JRA55, JRA55.name, NCEP.name))) + +forecast.name <- unname(ifelse(forecast == ECMWF_S4, ECMWF_S4.name, ECMWF_monthly.name)) +fields.name <- unname(ifelse(fields == rean, rean.name, forecast.name)) +n.years <- year.end - year.start + 1 + +# in case the script is run with one argument, it is assumed a reanalysis is being used and the argument becomes the month we want to perform the clustering; +# in case the script is run with two arguments, it is assumed a forecast is used and the first argument represents the startmonth and the second one the leadmonth. +# in case the script is run with no arguments, the values of the variables inside the script are used: +script.arg <- as.integer(commandArgs(TRUE)) + +if(length(script.arg) == 0){ + start.month <- startM + #WR.period <- start.month + if(fields.name == forecast.name) lead.month <- leadM +} + +# in case the script is run with 1 argument, it is assumed you are using a reanalysis: +if(length(script.arg) == 1){ + fields <- rean + fields.name <- rean.name + WR.period <- script.arg[1] +} + +if(length(script.arg) >= 2){ + fields <- forecast + fields.name <- forecast.name + start.month <- script.arg[1] + lead.month <- script.arg[2] + WR.period <- start.month +} + +if(length(script.arg) == 3){ # in this case, we are running the script in the SMP machine and we need to override the variables below: + source('/gpfs/projects/bsc32/bsc32842/Rfunctions.R') # for the calendar function + workdir <- "/gpfs/projects/bsc32/bsc32842/RESILIENCE" # working dir +} + +if(length(script.arg) >= 2) {lead.comment <- paste0("Lead month: ", lead.month)} else {lead.comment <- ""} +cat(paste0("Field: ", fields.name, " Period: ", WR.period, " ", lead.comment, "\n")) + +if(lat.min >= lat.max) stop("lat.min cannot be greater than lat.max") + +#days.period <- n.days.period <- period.length <- list() +#for (pp in 1:17){ +# days.period[[pp]] <- NA +# for(y in year.start:year.end) days.period[[pp]] <- c(days.period[[pp]], n.days.in.a.future.year(year.start, y) + pos.period(y,pp)) +# days.period[[pp]] <- days.period[[pp]][-1] # remove the NA at the begin that was introduced only to be able to execute the above command +# # number of days belonging to that period from year.start to year.end: +# n.days.period[[pp]] <- length(days.period[[pp]]) +# # Number of days belonging to that period in a single year: +# period.length[[pp]] <- n.days.in.a.period(pp,1999) #+ ifelse(period==13 | period==2, 1, 0) # using year 1999 introduce a small error in winter season length because of #bisestile years +#} + +# Create a regular lat/lon grid to interpolate the data to the chosen res: (Notice that the regular grid is always centered at lat=0 and lon=0!) +n.lon.grid <- 360/res +n.lat.grid <- (180/res)+1 +my.grid <- paste0('r',n.lon.grid,'x',n.lat.grid) +#domain<-list() +#domain$lon <- seq(0,359.999999999,res) +#domain$lat <- c(rev(seq(0,90,res)),seq(res[1],-90,-res)) + +cat("Loading lat/lon. Please wait...\n") +# load only 1 day of pressure data to detect the minimum and maximum lat and lon values which identify only the North Atlantic area: +if(fields.name == rean.name) domain <- Load(var = psl, exp = NULL, obs = list(list(path=fields)), '19990101', storefreq = 'daily', leadtimemax = 1, output = 'lonlat', nprocs=1) + +### Real test: +### lat <- +### domain <- Load(var = "psl", exp = list(list(path="/esnas/exp/ecmwf/system4_m1/$STORE_FREQ$_mean/$VAR_NAME$_f6h/$VAR_NAME$_$START_DATE$.nc")), obs=NULL, sdates=paste0(1982:2011,'0101'), storefreq = 'daily', leadtimemax=n.leadtimes, nmember=n.members, output = 'lonlat', latmin = lat[chunk], latmax = lat[chunk+2], nprocs=1) + +### if (fields.name="pippo"){ + +# in the case of S4, we also interpolate it to the same resolution of the reanalysis: +# (notice that if the S4 grid has the same grid of the chosen grid (typically ERA-Interim), Load() leaves the S4 grid unchanged) +if(fields.name == ECMWF_S4.name) domain <- Load(var = psl, exp = list(list(path=fields)), obs=NULL, sdates=paste0(year.start,'0101'), storefreq = 'daily', dimnames=list(member=member.name), leadtimemax=1, nmember=1, output = 'lonlat', grid=my.grid, method='bilinear', nprocs=1) + +#domain <- Load(var = "psl", exp = list(list(path="/esnas/exp/ecmwf/system4_m1/$STORE_FREQ$_mean/$VAR_NAME$_f6h/$VAR_NAME$_$START_DATE$.nc")), obs=NULL, sdates='19820101', storefreq = 'daily', leadtimemax=1, nmember=1, output = 'lonlat', grid='r480x241', lonmax=100) + +if(fields.name == ECMWF_monthly.name) domain <- Load(var = psl, exp = NULL, obs = list(list(path=fields)), paste0(year.start,'0102'), storefreq = 'daily', leadtimemax = 1, output = 'lonlat') + +n.lon <- length(domain$lon) +n.lat <- length(domain$lat) + +pos.lat.max <- which(lat.max - round(domain$lat,4) >= 0)[1] # select the first latitude of the domain below the maximum latitude +pos.lat.min <- tail(which(round(domain$lat,4) - lat.min >= 0),1) # select the last latitude of the domain over the minumum latitude +pos.lon.max <- tail(which(lon.max - round(domain$lon,4) >= 0),1) +pos.lon.min <- which(round(domain$lon,4) - lon.min >= 0)[1] + +pos.lat <- if(pos.lat.min < pos.lat.max) {pos.lat.min:pos.lat.max} else {pos.lat.max:pos.lat.min} +pos.lon <- c(1:pos.lon.max,pos.lon.min:length(domain$lon)) # beware that it only consider the case where the study area cross the meridian of Greenwhich! +n.pos.lat <- length(pos.lat) +n.pos.lon <- length(pos.lon) + +lat <- domain$lat[pos.lat] # lat values of chosen area only (it is smaller than the whole spatial domain loaded by the data) +#if (lat[2] < lat[1]) lat <- rev(lat) # reverse lat values if they are in decreasing order +lon <- domain$lon[pos.lon] # lon values of chosen area only + +#lat.min.area <- domain$lat[pos.lat.min] # min and max lat/lon of the chosen area only (same as lat.max, but its values correspond to actual values in the lat vector) +#lat.max.area <- domain$lat[pos.lat.max] +#lon.min.area <- domain$lon[pos.lon.min] +#lon.max.area <- domain$lon[pos.lon.max] + +#rm(domain) + +# Load psl data (interpolating it to the same reanlaysis grid, if necessary): +cat("Loading data. Please wait...\n") + +if(fields.name == rean.name && subdaily == FALSE){ # Load daily psl data of all years in the reanalysis case: + psleuFull <-array(NA,c(1,1,n.years,365, n.pos.lat, n.pos.lon)) + + psleuFull366 <- Load(var = psl, exp = NULL, obs = list(list(path=fields)), paste0(year.start:year.end,'0101'), storefreq = 'daily', leadtimemax = 366, output = 'lonlat', latmin = lat.min, latmax = lat.max, lonmin = lon.min, lonmax = lon.max, nprocs=8)$obs + + # remove bisestile days to have all arrays with the same dimensions: + cat("Removing bisestile days. Please wait...\n") + psleuFull[,,,,,] <- psleuFull366[,,,1:365,,] + + for(y in year.start:year.end){ + y2 <- y - year.start + 1 + if(n.days.in.a.year(y) == 366) psleuFull[,,y2,60:365,,] <- psleuFull366[,,y2,61:366,,] # take the march to december period removing the 29th of February + } + + rm(psleuFull366) + gc() + + + if(running.cluster == TRUE && rean.name == "ERA-interim" && WR.period == 10) { + # in this case, last month of last year might not have in /esnas the data for the 3rd month of the running cluster. Load() set these values to NA, + # but the clustering analysis is not able to process NA, so we have to remove the NA for that month and replace with the values of the previous year: + + psleuFull[,,36,305:334,,] <- psleuFull[,,35,305:334,,] + + + } + + # convert psl in daily anomalies with the LOESS filter: + cat("Calculating anomalies. Please wait...\n") + pslPeriodClim <- apply(psleuFull, c(1,2,4,5,6), mean, na.rm=T) + + if(LOESS == TRUE){ + pslPeriodClimLoess <- pslPeriodClim + + for(i in n.pos.lat){ + for(j in n.pos.lon){ + my.data <- data.frame(ens.mean=pslPeriodClim[1,1,,i,j], day=1:365) + my.loess <- loess(ens.mean ~ day, my.data, span=0.35) + pslPeriodClimLoess[1,1,,i,j] <- predict(my.loess) + } + } + + rm(pslPeriodClim) + gc() + + pslPeriodClim2 <- InsertDim(pslPeriodClimLoess, 3, n.years) + + } else { # leave the daily climatology to compute the anomalies: + pslPeriodClim2 <- InsertDim(pslPeriodClim, 3, n.years) + + rm(pslPeriodClim) + gc() + } + + ## s4.data <- data.frame(s4=pslPeriodClim[1,], day=1:216) + ## s4.loess <- loess(s4 ~ day, s4.data, span=0.35) + ## s4.pred <- predict(s4.loess) + + psleuFull <- psleuFull - pslPeriodClim2 + rm(pslPeriodClim2) + gc() + +} # close if on fields.name == rean and subdaily == FALSE){ + + + + +if(fields.name == rean.name) { + # Compute climatology and anomalies (with LOESS filter) for sfcWind data, and draw monthly anomalies, if you want to compare the mean daily wind speed anomalies reconstructed by the weather regimes classification with those observed by the reanalysis: + + sfcWind366 <- Load(var = "sfcWind", exp = NULL, obs = list(list(path=fields)), paste0(year.start:year.end,'0101'), storefreq = 'daily', leadtimemax = 366, output = 'lonlat', latmin = lat.min, latmax = lat.max, lonmin = lon.min, lonmax = lon.max, nprocs=8)$obs + + sfcWind <- sfcWind366[,,,1:365,,] + + for(y in year.start:year.end){ + y2 <- y - year.start + 1 + if(n.days.in.a.year(y) == 366) sfcWind[y2,60:365,,] <- sfcWind366[1,1,y2,61:366,,] # take the march to december period removing the 29th of February + } + + rm(sfcWind366) + gc() + + sfcWindClimDaily <- apply(sfcWind, c(2,3,4), mean, na.rm=T) + + sfcWindClimLoess <- sfcWindClimDaily + for(i in n.pos.lat){ + for(j in n.pos.lon){ + my.data <- data.frame(ens.mean=sfcWindClimDaily[,i,j], day=1:365) + my.loess <- loess(ens.mean ~ day, my.data, span=0.35) + sfcWindClimLoess[,i,j] <- predict(my.loess) + } + } + + rm(sfcWindClimDaily) + gc() + + sfcWindClim2 <- InsertDim(sfcWindClimLoess, 1, n.years) + + sfcWindAnom <- sfcWind - sfcWindClim2 + + rm(sfcWindClimLoess) + gc() + + year.test <- 2016 # select only anomalies of a chosen month and year, and average them: + month.test <- 10 + pos.year.start <- year.test - year.start + 1 + + sfcWindAnomPeriod <- sfcWindAnom[pos.year.start,pos.period(year.test,month.test),,] + + sfcWindAnomPeriodMean <- apply(sfcWindAnomPeriod, c(2,3), mean, na.rm=TRUE) + + EU <- c(1:which(lon >= lon.max)[1], (which(lon >= 337)[1]:length(lon))) # restrict area to continental europe only + + my.brks.var <- seq(-3,3,0.5) + values.to.plot2 <- c(-3,-2.5,-2,-1.5,-1,0,0.5,1,1.5,2,2.5,3) + + my.cols.var <- colorRampPalette(rev(brewer.pal(11,"RdBu")))(length(my.brks.var)-1) # blue--white--red colors + + fileoutput <- paste0(workdir,"/",fields.name,"_",my.period[month.test],"_wind_anomaly.png") + + png(filename=fileoutput,width=1000,height=1000) + + par(fig=c(0, 1, 0.07, 0.98), new=TRUE) + PlotEquiMap(rescale(sfcWindAnomPeriodMean[,EU], my.brks.var[1]+0.001, tail(my.brks.var,1)), lon[EU], lat, filled.continents=FALSE, brks=my.brks.var, cols=my.cols.var, title_scale=1.2, intxlon=10, intylat=10, drawleg=F) #, cex.lab=1.5) + + #my.subset2 <- match(values.to.plot2, my.brks.var) + #legend2.cex <- 1.8 + + par(fig=c(0.03, 1, 0, 0.08), new=TRUE) + ColorBar(my.brks.var, cols=my.cols.var, vert=FALSE, label_scale=1.8) #, subset=my.subset2) + + par(fig=c(0.95, 1, 0, 0.012), new=TRUE) + mtext("m/s", cex=1.8) + + dev.off() + + fileoutput2 <- paste0(workdir,"/",fields.name,"_",my.period[month.test],"_wind_anomaly_fig2cat.png") + + system(paste0("~/scripts/fig2catalog.sh -s 0.8 -c 'Region: North Atlantic (27°N-81°N, 85.5°W-45°E)\nReference dataset: NCEP/NCAR' ", fileoutput," ", fileoutput2)) + + # to format it manually from the bash shell (you must run it from your workstation until the identity command of ImageMagick will be loaded un the cluster): + # sh ~/scripts/fig2catalog.sh -x 20 -t 'NCEP / 10-m wind speed / anomaly \nOctober / 2016' -c 'Region: North Atlantic (27°N-81°N, 85.5°W-45°E)\nReference dataset: NCEP/NCAR reanalysis' NCEP_October_wind_anomaly.png NCEP_October_wind_anomaly_catalogue.png + + # expression(~degree~C) + +} + + +# Load 6-hourly psl data of all years in the reanalysis case: +if(fields.name == rean.name && subdaily == TRUE){ + psleuFull <-array(NA,c(1,1,n.years,365*4, n.pos.lat, n.pos.lon)) + + psleuFull366 <- Load(var = psl, exp = NULL, obs = list(list(path=fields)), paste0(year.start:year.end,'0101'), storefreq = 'daily', leadtimemax = 366*4, output = 'lonlat', latmin = lat.min, latmax = lat.max, lonmin = lon.min, lonmax = lon.max, nprocs=8) + + # remove bisestile days to have all arrays with the same dimensions: + cat("Removing bisestile days. Please wait...\n") + psleuFull[,,,,,] <- psleuFull366[,,,1:365,,] + + for(y in year.start:year.end){ + y2 <- y - year.start + 1 + if(n.days.in.a.year(y) == 366) psleuFull[,,y2,60:365,,] <- psleuFull366[,,y2,61:366,,] # take the march to december period removing the 29th of February + } + + rm(psleuFull366) + gc() + + + if(running.cluster == TRUE && rean.name == "ERA-interim" && WR.period == 10) { + # in this case, last month of last year might not have in /esnas the data for the 3rd month of the running cluster. Load() set these values to NA, + # but the clustering analysis is not able to process NA, so we have to remove the NA for that month and replace with the values of the previous year: + + psleuFull[,,36,305:334,,] <- psleuFull[,,35,305:334,,] + + + } + + # convert psl in daily anomalies with the LOESS filter: + cat("Calculating anomalies. Please wait...\n") + pslPeriodClim <- apply(psleuFull, c(1,2,4,5,6), mean, na.rm=T) + + if(LOESS == TRUE){ + pslPeriodClimLoess <- pslPeriodClim + + for(i in n.pos.lat){ + for(j in n.pos.lon){ + my.data <- data.frame(ens.mean=pslPeriodClim[1,1,,i,j], day=1:365) + my.loess <- loess(ens.mean ~ day, my.data, span=0.35) + pslPeriodClimLoess[1,1,,i,j] <- predict(my.loess) + } + } + + rm(pslPeriodClim) + gc() + + pslPeriodClim2 <- InsertDim(pslPeriodClimLoess, 3, n.years) + + } else { # leave the daily climatology to compute the anomalies: + pslPeriodClim2 <- InsertDim(pslPeriodClim, 3, n.years) + + rm(pslPeriodClim) + gc() + } + + ## s4.data <- data.frame(s4=pslPeriodClim[1,], day=1:216) + ## s4.loess <- loess(s4 ~ day, s4.data, span=0.35) + ## s4.pred <- predict(s4.loess) + + psleuFull <- psleuFull - pslPeriodClim2 + rm(pslPeriodClim2) + gc() + +} # close if on fields.name == rean & subdaily == TRUE + + +if(fields.name == ECMWF_S4.name){ # in the forecast case, we load only the data for 1 start month at time and all the lead times (since each month needs ~3 GB of memory) + if(start.month <= 9) {start.month.char <- paste0("0",as.character(start.month))} else {start.month.char <- start.month} + + fields2 <- gsub("\\$STORE_FREQ\\$","daily",fields) + fields3 <- gsub("\\$VAR_NAME\\$",psl,fields2) + + if(missing.forecasts == TRUE){ + years <- c() + for(y in year.start:year.end){ + fields4 <- gsub("\\$START_DATE\\$",paste0(y, start.month.char,'01'),fields3) + + char.lead <- nchar(system(paste0("ncks -m ",fields4,"| grep -E -i 'psl size' | cut -d '*' -f1"), intern=T)) + # beware, in this way it can only take into account leadtimes from 100 to 999, but it is the only way in the SMP machine: + #num.lead <- as.integer(substr(system(paste0("ncks -m ",fields4,"| grep -E -i 'psl size' | cut -d '*' -f1"), intern=T),char.lead-3,char.lead)) + num.lead <- as.integer(system(paste0("ncks -m ",fields4,"| grep -E -i 'psl size' | cut -d '=' -f2 | cut -d '*' -f1"), intern=T)) # don't work well on the SMP + num.memb <- as.integer(system(paste0("ncks -m ",fields4,"| grep -E -i 'psl size' | cut -d '=' -f2 | cut -d '*' -f2"), intern=T)) + num.lat <- as.integer(system(paste0("ncks -m ",fields4,"| grep -E -i 'psl size' | cut -d '=' -f2 | cut -d '*' -f3"), intern=T)) + num.lon <- as.integer(system(paste0("ncks -m ",fields4,"| grep -E -i 'psl size' | cut -d '=' -f2 | cut -d '*' -f4"), intern=T)) + + #if(num.lead == n.leadtimes && num.memb >= n.members && num.lat == 181 && num.lon == 360) years <- c(years,y) + if(num.lead == n.leadtimes && num.memb >= n.members) years <- c(years,y) + } + + } else { + years <- year.start:year.end + } + + n.years.full <- length(years) + + if(running.cluster == TRUE) { + + start.month1 <- ifelse(start.month == 1, 12, start.month-1) + if(start.month1 <= 9) {start.month1.char <- paste0("0",as.character(start.month1))} else {start.month1.char <- start.month1} + + start.month2.char <- start.month.char + + start.month3 <- ifelse(start.month == 12, 1, start.month+1) + if(start.month3 <= 9) {start.month3.char <- paste0("0",as.character(start.month3))} else {start.month3.char <- start.month3} + + # load three months of data, inserting all them in the third dimension of the array, one month at time: + psleuFull1 <- Load(var = psl, exp = list(list(path=fields)), obs = NULL, sdates=paste0(years, start.month1.char,'01'), dimnames=list(member=member.name), nmember=n.members, leadtimemax=n.leadtimes, storefreq='daily', output = 'lonlat', latmin = lat.min, latmax = lat.max, lonmin = lon.min, lonmax = lon.max, grid=my.grid, method='bilinear')$mod + gc() + + psleuFull2 <- Load(var = psl, exp = list(list(path=fields)), obs = NULL, sdates=paste0(years, start.month2.char,'01'), dimnames=list(member=member.name), nmember=n.members, leadtimemax=n.leadtimes, storefreq='daily', output = 'lonlat', latmin = lat.min, latmax = lat.max, lonmin = lon.min, lonmax = lon.max, grid=my.grid, method='bilinear')$mod + gc() + + psleuFull3 <- Load(var = psl, exp = list(list(path=fields)), obs = NULL, sdates=paste0(years, start.month3.char,'01'), dimnames=list(member=member.name), nmember=n.members, leadtimemax=n.leadtimes, storefreq='daily', output = 'lonlat', latmin = lat.min, latmax = lat.max, lonmin = lon.min, lonmax = lon.max, grid=my.grid, method='bilinear')$mod + gc() + + } else { + + psleuFull <- Load(var = psl, exp = list(list(path=fields)), obs = NULL, sdates=paste0(years, start.month.char,'01'), dimnames=list(member=member.name), nmember=n.members, leadtimemax=n.leadtimes, storefreq='daily', output = 'lonlat', latmin = lat.min, latmax = lat.max, lonmin = lon.min, lonmax = lon.max, grid=my.grid, method='bilinear', nprocs=1)$mod + + } + + if(LOESS == TRUE){ + # convert psl in daily anomalies with the LOESS filter: + cat("Calculating anomalies. Please wait...\n") + + if(running.cluster == TRUE) { + pslPeriodClim1 <- apply(psleuFull1, c(1,4,5,6), mean, na.rm=T) + pslPeriodClim2 <- apply(psleuFull2, c(1,4,5,6), mean, na.rm=T) + pslPeriodClim3 <- apply(psleuFull3, c(1,4,5,6), mean, na.rm=T) + + pslPeriodClimLoess1 <- pslPeriodClim1 + pslPeriodClimLoess2 <- pslPeriodClim2 + pslPeriodClimLoess3 <- pslPeriodClim3 + + for(i in n.pos.lat){ + for(j in n.pos.lon){ + my.data1 <- data.frame(ens.mean=pslPeriodClim1[1,,i,j], day=1:n.leadtimes) + my.loess1 <- loess(ens.mean ~ day, my.data1, span=0.35) + pslPeriodClimLoess1[1,,i,j] <- predict(my.loess1) + rm(my.data1, my.loess1) + gc() + + my.data2 <- data.frame(ens.mean=pslPeriodClim2[1,,i,j], day=1:n.leadtimes) + my.loess2 <- loess(ens.mean ~ day, my.data2, span=0.35) + pslPeriodClimLoess2[1,,i,j] <- predict(my.loess2) + rm(my.data2, my.loess2) + gc() + + my.data3 <- data.frame(ens.mean=pslPeriodClim3[1,,i,j], day=1:n.leadtimes) + my.loess3 <- loess(ens.mean ~ day, my.data3, span=0.35) + pslPeriodClimLoess3[1,,i,j] <- predict(my.loess3) + rm(my.data3, my.loess3) + gc() + } + } + + rm(pslPeriodClim1, pslPeriodClim2, pslPeriodClim3) + gc() + + pslPeriodClimDos1 <- InsertDim(InsertDim(pslPeriodClimLoess1, 2, n.years.full), 2, n.members) + pslPeriodClimDos2 <- InsertDim(InsertDim(pslPeriodClimLoess2, 2, n.years.full), 2, n.members) + pslPeriodClimDos3 <- InsertDim(InsertDim(pslPeriodClimLoess3, 2, n.years.full), 2, n.members) + + pslPeriodClimDos <- unname(abind(pslPeriodClimDos1, pslPeriodClimDos2, pslPeriodClimDos3, along=3)) + rm(pslPeriodClimDos1, pslPeriodClimDos2, pslPeriodClimDos3) + gc() + + psleuFull <- unname(abind(psleuFull1, psleuFull2, psleuFull3, along=3)) + rm(psleuFull1, psleuFull2, psleuFull3) + gc() + + } else { # in case of no running cluster: + + pslPeriodClim <- apply(psleuFull, c(1,4,5,6), mean, na.rm=T) + + pslPeriodClimLoess <- pslPeriodClim + for(i in n.pos.lat){ + for(j in n.pos.lon){ + my.data <- data.frame(ens.mean=pslPeriodClim[1,,i,j], day=1:n.leadtimes) + my.loess <- loess(ens.mean ~ day, my.data, span=0.35) + pslPeriodClimLoess[1,,i,j] <- predict(my.loess) + } + } + + rm(pslPeriodClim) + gc() + + pslPeriodClimDos <- InsertDim(InsertDim(pslPeriodClimLoess, 2, n.years.full), 2, n.members) + + } # close if on running cluster + + } else { #in case of no LOESS: + + if(running.cluster == TRUE) { + # in this case, the climatology is measured FOR EACH MONTH INDIPENDENTLY, instead of using a seasonal value: + pslPeriodClim1 <- apply(psleuFull1, c(1,4,5,6), mean, na.rm=T) + pslPeriodClim2 <- apply(psleuFull2, c(1,4,5,6), mean, na.rm=T) + pslPeriodClim3 <- apply(psleuFull3, c(1,4,5,6), mean, na.rm=T) + + pslPeriodClim <- unname(abind(pslPeriodClim1, pslPeriodClim2, pslPeriodClim3)) + + pslPeriodClimDos <- InsertDim(InsertDim(pslPeriodClim, 2, n.years.full), 2, n.members) + rm(pslPeriodClim) + gc() + + psleuFull <- unname(abind(psleuFull1, psleuFull2, psleuFull3, along=3)) + rm(psleuFull1, psleuFull2, psleuFull3) + gc() + + } else {# in case of no running cluster: + + pslPeriodClimDos <- InsertDim(InsertDim(pslPeriodClim, 2, n.years.full), 2, n.members) + rm(pslPeriodClim) + gc() + } + } + + ## s4.data <- data.frame(s4=pslPeriodClim[1,], day=1:216) + ## s4.loess <- loess(s4 ~ day, s4.data, span=0.35) + ## s4.pred <- predict(s4.loess) + + psleuFull <- psleuFull - pslPeriodClimDos + rm(pslPeriodClimDos) + gc() + +} # close if on fields.name=forecasts.name + +if(fields.name == ECMWF_monthly.name){ + # Load 6-hourly psl data in the forecast case: + psleuFull <-array(NA, c(n.sdates*n.years, n.leadtimes, n.pos.lat, n.pos.lon)) # daily order: 19940102 19950102 ... 20130102 19940109 19950109 ... 20130109 ... + n.leadtimes <- length(leadtime) + sdates <- weekly.seq(forecast.year,mes,day) + n.sdates <- length(sdates) + + for (startdate in 1:n.sdates){ + for(lday in leadtime){ + var <- Load(var = psl, exp = NULL, obs = list(list(path=fields)), paste0(year.start:year.end, substr(sdates[startdate],5,6), substr(sdates[startdate],7,8) ), storefreq = 'daily', leadtimemin = lday*4-3, leadtimemax = lday*4, output = 'lonlat', latmin = lat.min, latmax = lat.max, lonmin = lon.min, lonmax = lon.max) + + mean.lday <- apply(var$obs, c(3,5,6), mean, na.rm=T) + rm(var) + gc() + + psleuFull[(1+(startdate-1)*n.years):(startdate*n.years), lday-leadtime[1]+1,,] <- mean.lday + rm(mean.lday) + gc() + } + } +} + +if(psl=="g500") psleuFull <- psleuFull/9.81 # convert geopotential to geopotential height (in m) +if(psl=="psl") psleuFull <- psleuFull/100 # convert MSLP in Pascal to MSLP in hPa +gc() + +#save.image(file=paste0(workdir,"/weather_regimes_",fields.name,"_",year.start,"-",year.end,".RData")) +#load(file=paste0(workdir,"/weather_regimes_",fields.name,"_",year.start,"-",year.end,".RData")) + +#save.image("/scratch/Earth/ncortesi/RESILIENCE/Regimes/test.R") + +# compute the cluster analysis: +my.PCA <- list() +my.cluster <- list() +my.cluster.array <- list() +tot.variance <- list() +n.pcs <- my.cluster <- c() + +#WR.period=1 # for the debug +for(p in WR.period){ + # Select only the data of the month/season we want: + if(fields.name == rean.name){ + + #pslPeriod <- psleuFull[days.period[[p]],,] # select only days in the chosen period (i.e: winter) + if(running.cluster == TRUE) { + pslPeriod <- psleuFull[1,1,,pos.month.extended(2001,p),,] # select all days in the period of 3 months centered on the target month p + } else { + pslPeriod <- psleuFull[1,1,,pos.period(2001,p),,] # select only days in the chosen period (i.e: winter) + } + + # weight the pressure fields based on latitude (apply it to anomalies, not to absolute values!): + if(lat.weighting){ + lat.weighted <- cos(lat*pi/180)^0.5 + lat.weighted.array <- InsertDim(InsertDim(InsertDim(lat.weighted,2,n.pos.lon), 1, length(pos.month.extended(2001,p))), 1, n.years) + pslPeriod <- pslPeriod * lat.weighted.array + rm(lat.weighted.array) + gc() + } + + gc() + cat("Preformatting data for clustering. Please wait...\n") + psl.melted <- melt(pslPeriod[,,,, drop=FALSE], varnames=c("Year","Day","Lat","Lon")) + gc() + + cat("Preformatting data for clustering. Please wait......\n") + # this function eats much memory!!! + psl.kmeans <- unname(acast(psl.melted, Year + Day ~ Lat + Lon)) + #gc() + + + # select period data from psleuFull to use it as input of the cluster analysis: + #psl.kmeans <- pslPeriod + #dim(psl.kmeans) <- c(n.days.period[[p]], n.pos.lat*n.pos.lon) # convert array in a matrix! + #rm(pslPeriod, pslPeriod.weighted) + } + + # in case of S4 we don't need to select anything because we already loaded only 1 month of data! + if(fields.name == ECMWF_S4.name){ + if(running.cluster == TRUE && p < 13) { + # if you want to fully implement the running cluster of the monthly S4 data, you have to finish selecting automatically the 3-months running period + # generalizing the command below (which at present only work for the month of January an lead time 0): + + # Select DJF (lead time 0) for our case study, excluding 29 of february): + pslPeriod <- psleuFull[,,,1:90,,, drop=FALSE] + + } else { + + # select data only for the startmonth and leadmonth to study: + cat("Extracting data. Please wait...\n") + chosen.month <- start.month + lead.month # find which month we want to select data + if(chosen.month > 12) chosen.month <- chosen.month - 12 + + # remove the 29th of February to have the same n. of elements for all years + i=0 + for(y in years){ + i=i+1 + leadtime.min <- 1 + n.days.in.a.monthly.period(start.month, chosen.month, y) - n.days.in.a.month(chosen.month, y) + leadtime.max <- leadtime.min + n.days.in.a.month(chosen.month, y) - 1 + #leadtime.max <- 61 + + if (n.days.in.a.month(chosen.month, y) == 29) leadtime.max <- leadtime.max - 1 + int.leadtimes <- leadtime.min:leadtime.max + num.leadtimes <- leadtime.max - leadtime.min + 1 # number of days in the chosen month (different from 'n.leadtimes',which is the total number of days in the file!) + # by construction it is equal to: n.days.in.a.month(chosen.month, y) + + if(y == years[1]) { + pslPeriod <- psleuFull[,,1,int.leadtimes,,,drop=FALSE] + } else { + pslPeriod <- unname(abind(pslPeriod, psleuFull[,,i,int.leadtimes,,,drop=FALSE], along=3)) + } + } + + } + + # weight the pressure fields based on latitude (apply it to anomalies, not to absolute values!): + if(lat.weighting){ + lat.weighted <- cos(lat*pi/180)^0.5 + lat.weighted.array <- InsertDim(InsertDim(InsertDim(lat.weighted,2,n.pos.lon), 1, length(pos.month.extended(2001,p))), 1, n.years) + pslPeriod <- pslPeriod * lat.weighted.array ######### adapt!!!!!!!!!!! + rm(lat.weighted.array) + gc() + } + + # note that the data order in psl.kmeans[,X] is: year1day1, year1day2, ... , year1day31, year2day1, year2day2, ... , year2day28 + gc() + cat("Preformatting data. Please wait...\n") + psl.melted <- melt(pslPeriod[1,,,,,, drop=FALSE], varnames=c("Exp","Member","StartYear","LeadDay","Lat","Lon")) + gc() + + #save(psl.melted,file=paste0(workdir,"/psl_melted.RData")) + + cat("Preformatting data. Please wait......\n") + # This function eats much memory!!! + psl.kmeans <- unname(acast(psl.melted, Member + StartYear + LeadDay ~ Lat + Lon)) + #gc() + + #save(psl.kmeans,file=paste0(workdir,"/psl_kmeans.RData")) + #my.cluster <- kmeans(psl.kmeans, centers=4, iter.max=100, nstart=30, trace=FALSE) + #save(my.cluster,file=paste0(workdir,"/my_cluster.RData")) + + + #psl.kmeans <- array(NA,c(dim(pslPeriod))) + #n.rows <- nrow(psl.melted) + #a <- psl.melted$Member + #b <- psl.melted$StartYear + #c <- psl.melted$LeadDay + #d <- psl.melted$Lat + #e <- psl.melted$Lon + #f <- psl.melted$value + + # this function to convert a data.frame in an array is much less memory-eater than acast but a bit slower: + #for(i in 1:n.rows) psl.kmeans[1,a[i],b[i],c[i],d[i],e[i]] <- f[i] + gc() + #rm(pslPeriod); gc() + } + + if(fields.name == ECMWF_monthly.name){ + my.startdates.period <- months.period(forecast.year,mes,day,p) # get which startdates belong to the chosen period (remember that there are no bisestile day left!) + + days.period <- list() # get which days inside psleuFull belong to the chosen period + for(startdate in my.startdates.period){ + days.period[[p]] <- c(days.period[[p]], (1+(startdate-1)*n.years):(startdate*n.years)) + } + + # in winter you must remove the 2nd startdate (20140109) because its data is bad, as you can see with: plot(psl.kmeans[,1:2]) + # if(fields.name == forecast.name && period == 13) psl.kmeans[21:40,] <- NA + } + + + # compute the clustering: + cat("Clustering data. Please wait...\n") + if(PCA){ + my.seq <- seq(1, n.pos.lat*n.pos.lon, 16) # select only 1 point of 9 + pslcut <- psl.kmeans[,my.seq] + + my.PCA <- princomp(pslcut,cor=FALSE) + tot.variance <- head(cumsum(my.PCA$sdev^2/sum(my.PCA$sdev^2)),50) # check how many PCAs to keep basing on the sum of their expl. variance + n.pcs <- head(as.numeric(which(tot.variance > variance.explained)),1) # select only the pcs that explains at least 80% of variance + my.cluster <- kmeans(my.PCA$scores[,1:n.pcs], centers=4, iter.max=100, nstart=30) # 4 is the number of clusters + rm(pslcut) + } else { # 4 is the number of clusters: + + my.cluster <- kmeans(psl.kmeans, centers=4, iter.max=100, nstart=30, trace=FALSE) + + gc() + + #save(my.cluster, file=paste0(workdir,"/",fields.name,"_",my.period[p],"_leadmonth_",lead.month,"_series.RData")) + + #if(fields.name == rean.name){ + #my.cluster[[p]] <- kmeans(psl.kmeans[which(!is.na(psl.kmeans[,1])),], centers=4, iter.max=100, nstart=30, trace=FALSE) + # save the time series output of the cluster analysis: + #save(my.cluster, my.cluster.array, my.PCA, tot.variance, n.pcs, file=paste0(workdir,"/",fields.name,"_",my.period[p],"_series.RData")) + #} + + if(fields.name == ECMWF_S4.name) { + my.cluster.array <- array(my.cluster$cluster, c(num.leadtimes, n.years.full, n.members)) # in reverse order compared to the definition of psl.kmeans + + # save the time series output of the cluster analysis: + #save(my.cluster, my.cluster.array, my.PCA, tot.variance, n.pcs, file=paste0(workdir,"/",fields.name,"_",my.period[p],"_leadmonth_",lead.month,"_series.RData")) + + #plot(SMA(my.cluster$centers[1,],201),type="l",col="gold",ylim=c(-20,20));lines(SMA(my.cluster$centers[2,],201),type="l",col="red"); lines(SMA(my.cluster$centers[3,],201),type="l",col="green");lines(SMA(my.cluster$centers[4,],201),type="l",col="blue");lines(SMA(psl.kmeans[29,],201),type="l",col="gray");lines(rep(0,14410),type="l",col="black",lty=2) + } + } + + rm(psl.kmeans) + gc() + +#} # close for on 'p' + +#save.image(file=paste0(workdir,"/weather_regimes_",fields.name,"_",year.start,"-",year.end,".RData")) +#load(file=paste0(workdir,"/weather_regimes_",fields.name,"_",year.start,"-",year.end,".RData")) + +#my.grid<-paste0('r',n.lon,'x',n.lat) +#coord <- Load(var = 'sfcWind', exp = list(ECMWF_monthly), obs = NULL, sdates=paste0(2013,'0102'), leadtimemin = 1, leadtimemax=1, output = 'lonlat', nprocs=1, grid=my.grid, method='bilinear') + +# measure regime anomalies: + +#for(p in WR.period){ # 1-12: Jan-Dec; 13-16: Winter-Spring-Summer-Autumn; 17: Year + + if(fields.name == rean.name){ + + if(running.cluster == TRUE && p < 13){ # select only the days of the 3-months cluster series inside the target month p + n.days.period <- length(pos.month.extended(2001,p)) # total number of days in the three months + + days.month <- days.month.new <- days.month.full <- c() + days.month <- which(pos.month.extended(2001,p) == pos.month(2001,p)[1])-1 + 1:length(pos.month(2001,p)) + + for(y in 1:n.years){ + days.month.new <- days.month + n.days.period*(y-1) + days.month.full <- c(days.month.full, days.month.new) + #print(days.month.new) + #print(days.month) + } + + cluster.sequence <- my.cluster$cluster[days.month.full] # select only the days of the cluster series inside the target month p + + } else { + cluster.sequence <- my.cluster$cluster + } + + if(sequences){ # it works only for rean data!!! + # for each of the 4 clusters, remove the days not belonging to sequences of at least 5 days: + # warning: first 4 days and last 4 days are not take into account y the algorithm and are treated as not belonging to any sequence (sequ=FALSE) + wrdiff <- diff(my.cluster$cluster) + + sequ <- rep(FALSE, n.days.period[[p]]) + for(i in 5:(n.days.period[[p]]-5)){ + sequ[i] <- TRUE + if(wrdiff[i] != 0 & wrdiff[i+1] != 0) sequ[i] = FALSE + if(wrdiff[i] != 0 & wrdiff[i+2] != 0) sequ[i] = FALSE + if(wrdiff[i] != 0 & wrdiff[i+3] != 0) sequ[i] = FALSE + if(wrdiff[i] != 0 & wrdiff[i+4] != 0) sequ[i] = FALSE + if(wrdiff[i-1] != 0 & wrdiff[i] != 0) sequ[i] = FALSE + if(wrdiff[i-1] != 0 & wrdiff[i+1] != 0) sequ[i] = FALSE + if(wrdiff[i-1] != 0 & wrdiff[i+2] != 0) sequ[i] = FALSE + if(wrdiff[i-1] != 0 & wrdiff[i+3] != 0) sequ[i] = FALSE + if(wrdiff[i-2] != 0 & wrdiff[i] != 0) sequ[i] = FALSE + if(wrdiff[i-2] != 0 & wrdiff[i+1] != 0) sequ[i] = FALSE + if(wrdiff[i-2] != 0 & wrdiff[i+2] != 0) sequ[i] = FALSE + if(wrdiff[i-3] != 0 & wrdiff[i] != 0) sequ[i] = FALSE + if(wrdiff[i-3] != 0 & wrdiff[i+1] != 0) sequ[i] = FALSE + if(wrdiff[i-4] != 0 & wrdiff[i] != 0) sequ[i] = FALSE + } # close for loop on i + + # sequence of daily cluster numbers without the days that doesn't belong to sequences of 5 or more days: + cluster.sequence <- my.cluster$cluster * sequ + rm(wrdiff, sequ) + } + + # Replace the days belonging to a regime with the days belonging to a sequence of at least 5 days of that regime: + wr1 <- which(cluster.sequence == 1) + wr2 <- which(cluster.sequence == 2) + wr3 <- which(cluster.sequence == 3) + wr4 <- which(cluster.sequence == 4) + + # yearly frequency of each regime: + wr1y <- wr2y <- wr3y <-wr4y <- c() + np <- n.days.in.a.period(p,2001) + + for(y in year.start:year.end){ + # for both reanalysis and S4, the time order is always: year1day1, year1day2, ..., year1day31, year2day1, year2day2, ..., year2day28, etc, + wr1y[y-year.start+1] <- length(which(cluster.sequence[(y-year.start)*np + (1:np)] == 1)) + wr2y[y-year.start+1] <- length(which(cluster.sequence[(y-year.start)*np + (1:np)] == 2)) + wr3y[y-year.start+1] <- length(which(cluster.sequence[(y-year.start)*np + (1:np)] == 3)) + wr4y[y-year.start+1] <- length(which(cluster.sequence[(y-year.start)*np + (1:np)] == 4)) + } + + # convert to frequencies in %: + wr1y <- wr1y/np + wr2y <- wr2y/np + wr3y <- wr3y/np + wr4y <- wr4y/np + + gc() + + pslmat <- unname(acast(psl.melted, Year + Day ~ Lat ~ Lon)) + + if(running.cluster == TRUE){ + pslmat.new <- pslmat + pslmat <- pslmat.new[days.month.full,,] + gc() + } + + pslwr1 <- pslmat[wr1,,, drop=F] + pslwr2 <- pslmat[wr2,,, drop=F] + pslwr3 <- pslmat[wr3,,, drop=F] + pslwr4 <- pslmat[wr4,,, drop=F] + + pslwr1mean <- apply(pslwr1, c(2,3), mean, na.rm=T) + pslwr2mean <- apply(pslwr2, c(2,3), mean, na.rm=T) + pslwr3mean <- apply(pslwr3, c(2,3), mean, na.rm=T) + pslwr4mean <- apply(pslwr4, c(2,3), mean, na.rm=T) + + # spatial mean for each day to save for the meaure of the FairRPSS: + pslwr1spmean <- apply(pslwr1, 1, mean, na.rm=T) + pslwr2spmean <- apply(pslwr2, 1, mean, na.rm=T) + pslwr3spmean <- apply(pslwr3, 1, mean, na.rm=T) + pslwr4spmean <- apply(pslwr4, 1, mean, na.rm=T) + + # save all the data necessary to redraw the graphs when we know the right regime: + save(my.cluster, lon, lon.max, lon.min, lat, lat.max, lat.min, pos.lat, pos.lon, n.pos.lat, n.pos.lon, psl, psl.name, my.period, p, workdir, rean.name, fields.name, year.start, year.end, n.years, WR.period, pslwr1mean, pslwr2mean, pslwr3mean, pslwr4mean, pslwr1spmean, pslwr2spmean, pslwr3spmean, pslwr4spmean, wr1y,wr2y,wr3y,wr4y,file=paste0(workdir,"/",fields.name,"_",my.period[p],"_psl.RData")) + + # immediatly save the plots of the ERA-Interim monthly regime anomalies with the running cluster instead than loading them in the next script: + EU <- c(1:which(lon >= lon.max)[1], (which(lon >= 337)[1]:length(lon))) # restrict area to continental europe only + if(psl == "g500"){ + my.brks <- c(-300,seq(-200,200,10),300) # % Mean anomaly of a WR + my.brks2 <- c(-300,seq(-200,200,10),300) # % Mean anomaly of a WR for the contour lines + values.to.plot <- c(-200, -100, 0, 100, 200) # values we want to show in the labels + } + + if(psl == "psl"){ + my.brks <- c(seq(-19,-1,2),0,seq(1,19,2)) # % Mean anomaly of a WR + my.brks2 <- my.brks #c(seq(-20,20,2)) # % Mean anomaly of a WR for the contour lines + values.to.plot <- my.brks #c(-17,-15,-13,-11,-9,-7,-5,-3,-1,0,1,3,5,7,9,11,13,15,17) + } + my.cols <- colorRampPalette(rev(brewer.pal(11,"RdBu")))(length(my.brks)-1) # blue--white--red colors + my.cols[floor(length(my.cols)/2)] <- my.cols[floor(length(my.cols)/2)+1] <- "white" + + map1 <- pslwr1mean + map2 <- pslwr2mean + map3 <- pslwr3mean + map4 <- pslwr4mean + png(filename=paste0(workdir,"/",fields.name,"_",my.period[p],"_cluster1.png"),width=1000,height=1200) + PlotEquiMap2(rescale(map1,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map1), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, xlabel.dist=1.5, contours.lty="F1FF1F") + dev.off() + png(filename=paste0(workdir,"/",fields.name,"_",my.period[p],"_cluster2.png"),width=1000,height=1200) + PlotEquiMap2(rescale(map2,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map2), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F") + dev.off() + png(filename=paste0(workdir,"/",fields.name,"_",my.period[p],"_cluster3.png"),width=1000,height=1200) + PlotEquiMap2(rescale(map3,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map3), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F") + dev.off() + png(filename=paste0(workdir,"/",fields.name,"_",my.period[p],"_cluster4.png"),width=1000,height=1200) + PlotEquiMap2(rescale(map4,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map4), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F") + dev.off() + + rm(pslwr1mean,pslwr2mean,pslwr3mean,pslwr4mean) + rm(pslwr1,pslwr2,pslwr3,pslwr4) + gc() + + } + + if(fields.name == ECMWF_S4.name){ + cat("Computing regime anomalies and frequencies. Please wait...\n") + + #load(file=paste0(workdir,"/",fields.name,"_",my.period[p],"_leadmonth_",lead.month,"_ClusterData.RData")) + + cluster.sequence <- my.cluster$cluster #as.vector(my.cluster.array) + + wr1 <- which(cluster.sequence == 1) + wr2 <- which(cluster.sequence == 2) + wr3 <- which(cluster.sequence == 3) + wr4 <- which(cluster.sequence == 4) + + wr1y <- apply(my.cluster.array == 1, 2, sum) + wr2y <- apply(my.cluster.array == 2, 2, sum) + wr3y <- apply(my.cluster.array == 3, 2, sum) + wr4y <- apply(my.cluster.array == 4, 2, sum) + + # convert to frequencies in %: + wr1y <- wr1y/(num.leadtimes*n.members) # in this case, n.leadtimes is the number of days of one month of the cluser time series + wr2y <- wr2y/(num.leadtimes*n.members) + wr3y <- wr3y/(num.leadtimes*n.members) + wr4y <- wr4y/(num.leadtimes*n.members) + + # same as above, but to measure the maximum frequence between all the members: + wr1yMax <- apply(apply(my.cluster.array == 1, c(2,3), sum),1,max) + wr2yMax <- apply(apply(my.cluster.array == 2, c(2,3), sum),1,max) + wr3yMax <- apply(apply(my.cluster.array == 3, c(2,3), sum),1,max) + wr4yMax <- apply(apply(my.cluster.array == 4, c(2,3), sum),1,max) + + wr1yMax <- wr1yMax/num.leadtimes + wr2yMax <- wr2yMax/num.leadtimes + wr3yMax <- wr3yMax/num.leadtimes + wr4yMax <- wr4yMax/num.leadtimes + + wr1yMin <- apply(apply(my.cluster.array == 1, c(2,3), sum),1,min) + wr2yMin <- apply(apply(my.cluster.array == 2, c(2,3), sum),1,min) + wr3yMin <- apply(apply(my.cluster.array == 3, c(2,3), sum),1,min) + wr4yMin <- apply(apply(my.cluster.array == 4, c(2,3), sum),1,min) + + wr1yMin <- wr1yMin/num.leadtimes + wr2yMin <- wr2yMin/num.leadtimes + wr3yMin <- wr3yMin/num.leadtimes + wr4yMin <- wr4yMin/num.leadtimes + + cat("Computing regime anomalies and frequencies. Please wait......\n") + + # in this case, must use psl.melted instead of psleuFull. Both are already in anomalies: + # (psl.melted depends on the startdate and leadtime, psleuFull no) + gc() + pslmat <- unname(acast(psl.melted, Member + StartYear + LeadDay ~ Lat ~ Lon)) + #pslmat <- unname(acast(melt(pslPeriod[1,1:2,,,,, drop=FALSE],varnames=c("Exp","Member","StartYear","LeadDay","Lat","Lon")), Member ~ StartYear + LeadDay ~ Lat ~ Lon)) + gc() + + #for(a in 1:2){ + # for(b in 1:3){ + # for(c in 1:4){ + # pslmat[1, a + (b-1)*a + (c-1)*a*b,,] <- pslPeriod[1,a,b,c,,] + # } + # } + #} + + pslwr1 <- pslmat[wr1,,, drop=F] + pslwr2 <- pslmat[wr2,,, drop=F] + pslwr3 <- pslmat[wr3,,, drop=F] + pslwr4 <- pslmat[wr4,,, drop=F] + + pslwr1mean <- apply(pslwr1, c(2,3), mean, na.rm=T) + pslwr2mean <- apply(pslwr2, c(2,3), mean, na.rm=T) + pslwr3mean <- apply(pslwr3, c(2,3), mean, na.rm=T) + pslwr4mean <- apply(pslwr4, c(2,3), mean, na.rm=T) + + # spatial mean for each day to save for the meaure of the FairRPSS: + pslwr1spmean <- apply(pslwr1, 1, mean, na.rm=T) + pslwr2spmean <- apply(pslwr2, 1, mean, na.rm=T) + pslwr3spmean <- apply(pslwr3, 1, mean, na.rm=T) + pslwr4spmean <- apply(pslwr4, 1, mean, na.rm=T) + + rm(pslmat) + gc() + + # save all the data necessary to draw the graphs (but not the impact maps) + save(my.cluster, my.cluster.array, lon, lon.max, lat, pos.lat, pos.lon, n.pos.lat, n.pos.lon, psl, psl.name, my.period, p, workdir,rean.name,fields.name, year.start,year.end, years, n.years, n.years.full, WR.period, pslwr1mean, pslwr2mean, pslwr3mean, pslwr4mean, pslwr1spmean, pslwr2spmean, pslwr3spmean, pslwr4spmean, wr1y, wr2y, wr3y, wr4y, wr1yMax, wr2yMax, wr3yMax, wr4yMax, wr1yMin, wr2yMin, wr3yMin, wr4yMin, n.leadtimes, num.leadtimes, file=paste0(workdir,"/",fields.name,"_",my.period[p],"_leadmonth_",lead.month,"_psl.RData")) + + rm(pslwr1mean,pslwr2mean,pslwr3mean,pslwr4mean) + rm(pslwr1,pslwr2,pslwr3,pslwr4) + gc() + + } + + # for the monthly system, the time order is always: year1day1, year2day1, ... , yearNday1, year1day2, year2day2, ..., yearNday2, etc.: + if(fields.name == ECMWF_monthly.name) { + if(fields.name == ECMWF_monthly.name){ + my.startdates.period <- months.period(forecast.year,mes,day,p) + + #if(p == 13) my.startdates.period <- my.startdates.period[-2] # remove the second startdate because it is broken + + days.period <- period.length <- list() + for(startdate in my.startdates.period){ + days.period[[p]] <- c(days.period[[p]], (1+(startdate-1)*(year.end-year.start+1)):(startdate*(year.end-year.start+1))) + } + period.length[[p]] <- length(my.startdates.period) # number of Thusdays in the period + } + + wr1y <- wr2y <- wr3y <-wr4y <- c() + wr1y[y-year.start+1] <- length(which(cluster.sequence[seq((y-year.start+1),length(cluster.sequence), n.years)] == 1)) + wr2y[y-year.start+1] <- length(which(cluster.sequence[seq((y-year.start+1),length(cluster.sequence), n.years)] == 2)) + wr3y[y-year.start+1] <- length(which(cluster.sequence[seq((y-year.start+1),length(cluster.sequence), n.years)] == 3)) + wr4y[y-year.start+1] <- length(which(cluster.sequence[seq((y-year.start+1),length(cluster.sequence), n.years)] == 4)) + } + + cat("Finished!\n") +} # close the for loop on 'p' + + + + + + + + + + + + + + + + + + + + + diff --git a/old/weather_regimes_v41.R b/old/weather_regimes_v41.R new file mode 100644 index 0000000000000000000000000000000000000000..80a5a9aa4c121e0a9905beb4951a78bffd27b938 --- /dev/null +++ b/old/weather_regimes_v41.R @@ -0,0 +1,1400 @@ + +# Creation: 6/2016 +# Author: Nicola Cortesi +# Aim: To compute the four Euro-Atlantic weather regimes from a reanalysis or from a forecast system +# +# I/O: input must be in a format compatible with Load(); 1 output .RData file is created in the 'workdir' folder. +# You can also run this script from terminal with: Rscript ~/scripts/weather_regimes_maps.R +# +# Assumption: its output are needed by the scripts weather_regimes_impact.R and weather_regimes.R +# +# Branch: weather_regimes + +rm(list=ls()) +library(s2dverification) # for the function Load() +library(abind) +library(reshape2) # after each update of s2dverification, it's better to remove and install this package again because sometimes it gets broken! + +SMP <- FALSE # if TRUE, the script is assumed to run on the SMP Machine + +if(SMP){ + source('/gpfs/projects/bsc32/bsc32842/Rfunctions.R') + workdir <- "/gpfs/projects/bsc32/bsc32842/RESILIENCE" # working dir where output files will be generated +} else { + source('/home/Earth/ncortesi/scripts/Rfunctions.R') + workdir <- "/scratch/Earth/ncortesi/RESILIENCE/Regimes" # working dir where output files will be generated +} + +# module load NCO # : load it from the terminal before running this script interactively in the SMP machine, if you didn't load it in the .bashrc + +# available reanalysis for the geopotential (g500) or the geopotential height (z500 = g500/9.81) and for var data: +ERAint <- '/esnas/recon/ecmwf/erainterim/$STORE_FREQ$_mean/$VAR_NAME$_f6h/$VAR_NAME$_$YEAR$$MONTH$.nc' +#ERAint <- '/esarchive/old-files/recon_ecmwf_erainterim/$STORE_FREQ$_mean/$VAR_NAME$_f6h/$VAR_NAME$_$YEAR$$MONTH$.nc' +ERAint <- '/esnas/recon/ecmwf/erainterim/6hourly/$VAR_NAME$/$VAR_NAME$_$YEAR$$MONTH$.nc' # subdaily data!!!!! +ERAint.name <- "ERA-Interim" + +JRA55 <- '/esnas/recon/jma/jra55/$STORE_FREQ$_mean/$VAR_NAME$_f6h/$VAR_NAME$_$YEAR$$MONTH$.nc' +JRA55.name <- "JRA-55" + +NCEP <- '/esnas/recon/noaa/ncep-reanalysis/$STORE_FREQ$_mean/$VAR_NAME$_f6h/$VAR_NAME$_$YEAR$$MONTH$.nc' +NCEP.name <- "NCEP" + +rean <- NCEP #JRA55 #JRA55 #ERAint #NCEP # choose one of the two above reanalysis from where to load the input psl data + +# available forecast systems for the psl and var data: +#ECMWF_S4 <- list(path = paste0('/esnas/exp/ECMWF/seasonal/0001/s004/m001/$STORE_FREQ$_mean/$VAR_NAME$_f6h/$VAR_NAME$_$START_DATE$.nc')) +#ECMWF_S4 <- '/esnas/exp/ECMWF/seasonal/0001/s004/m001/$STORE_FREQ$_mean/psl_f6h/psl_$START_DATE$.nc' +ECMWF_S4 <- '/esnas/exp/ecmwf/system4_m1/$STORE_FREQ$_mean/$VAR_NAME$_f6h/$VAR_NAME$_$START_DATE$.nc' +ECMWF_S4.name <- "ECMWF-S4" +member.name <- "ensemble" # name of the dimension with the ensemble members inside the netCDF files of S4 + +ECMWF_monthly <- '/esnas/exp/ECMWF/monthly/ensforhc/6hourly/$VAR_NAME$/2014$MONTH$$DAY$00/$VAR_NAME$_$START_DATE$00.nc' +ECMWF_monthly.name <- "ECMWF-Monthly" + +# choose a forecast system: +forecast <- ECMWF_S4 + +# put 'rean' to load pressure fields and var from reanalisis, or put 'forecast' to load them from forecast systems: +fields <- rean #forecast + +psl <- "psl" #"g500" # pressure variable to use from the chosen reanalysis +psl.name <- "SLP" # "Z500" # the name to show in the maps + +year.start <- 1981 #1979 #1981 #1982 #1981 #1994 #1979 #1981 +year.end <- 2016 #2016 #2013 #2015 + +LOESS <- TRUE # To apply the LOESS filter or not to the climatology +running.cluster <- FALSE # add to the clustering also the daily SLP data of the two closer months to the month to use (only for monthly analysis) +lat.weighting <- TRUE # set it to true to weight psl data on latitude before applying the cluster analysis +subdaily <- FALSE # id TRUE, compute the clustering using 6-hourly data instead of daily data, to be more robust (only for reanalysis with 6-hourly data avail.) + +monthly_anomalies <- TRUE # if TRUE, also save maps with with monthly wind speed and slp anomalies for the chosen years and months set below over Europe +year.chosen <- 2016 +month.chosen <- 1:12 +#rean.dir <- "/scratch/Earth/ncortesi/RESILIENCE/Regimes/52_JRA55_monthly_1981-2016_LOESS_filter_lat_corr" +rean.dir <- "/scratch/Earth/ncortesi/RESILIENCE/Regimes/50_NCEP_monthly_1981-2016_LOESS_filter_lat_corr" + +missing.forecasts <- TRUE # set it to TRUE if there can be missing hindcasts files in the forecast psl data, so it will load only the available years and deals with NA +sequences <-FALSE # set it to TRUE if you want to select only days beloning to sequences of at least 5 consecutive days belonging to the same regime. +PCA <- FALSE # set it to TRUE if you want to apply the PCA before the k-means cluster analysis, FALSE otherwise +variance.explained <- 0.8 # the user can set here the minimum % of variance explained by the PCs (typically 0.8 which is equal to 80%) + +# Coordinates of the box enclosing the study region (the Northern Atlantic in this case): +lat.max <- 81 #81 #80 #70 +lat.min <- 27 #30 #20 #30 +lon.max <- 45 #36 #30 #40 +lon.min <- 274.5 #274.5 #270 #280 # put a positive number here because the geopotential has only positive vaues of longitud! + +# Only for reanalysis: +WR.period <- 1:12 # 13:16 #1:12 #13:16 # 1-12: Jan-Dec; 13-16: Winter-Spring-Summer-Autumn [bypassed by the optional argument of the script] + +# Only for Seasonal forecasts: +startM <- 1 # start month (1=January, 12=December) [bypassed by the optional arguments of the script] +leadM <- 0 # select only the data of one lead month: [bypassed by the optional arguments of the script] +n.members <- 15 # number of members of the forecast system to load +n.leadtimes <- 216 # number of leadtimes of the forecast system to load + +res <- 0.75 # set the resolution you want to interpolate the seasonal psl and var data when loading it (i.e: set to 0.75 to use the same res of ERA-Interim) + # interpolation method is BILINEAR. + +# Only for Monthly forecasts: +#leadtime <- 1:7 #5:11 #1 # if fields=forecast, set the forecast time (in days) you want to compute the weather regimes. +#startdate.shift <- 1 # if leadtime=5:11, it's better to shift all startdates of the season 1 week in the past, to fit the exact season of obs.data + # if leadtime=12:18, it's better to shift all startdates of the season 2 weeks in the past, and so on. +#forecast.year <- year.end+1 # if fields=forecast, set the forecast year (it is usually the year after year.end) +#mes=1 # if fields=forecast, set the month of the first startdate of the forecast year +#day=2 # if fields=forecast, set the day of the first startdate of the forecast year + +############################################################################################################################# +rean.name <- unname(ifelse(rean == ERAint, ERAint.name, ifelse(rean == JRA55, JRA55.name, NCEP.name))) + +forecast.name <- unname(ifelse(forecast == ECMWF_S4, ECMWF_S4.name, ECMWF_monthly.name)) +fields.name <- unname(ifelse(fields == rean, rean.name, forecast.name)) +n.years <- year.end - year.start + 1 + +# in case the script is run with one argument, it is assumed a reanalysis is being used and the argument becomes the month we want to perform the clustering; +# in case the script is run with two arguments, it is assumed a forecast is used and the first argument represents the startmonth and the second one the leadmonth. +# in case the script is run with no arguments, the values of the variables inside the script are used: +script.arg <- as.integer(commandArgs(TRUE)) + +if(length(script.arg) == 0){ + start.month <- startM + #WR.period <- start.month + if(fields.name == forecast.name) lead.month <- leadM +} + +# in case the script is run with 1 argument, it is assumed you are using a reanalysis: +if(length(script.arg) == 1){ + fields <- rean + fields.name <- rean.name + WR.period <- script.arg[1] +} + +if(length(script.arg) >= 2){ + fields <- forecast + fields.name <- forecast.name + start.month <- script.arg[1] + lead.month <- script.arg[2] + WR.period <- start.month +} + +if(length(script.arg) == 3){ # in this case, we are running the script in the SMP machine and we need to override the variables below: + source('/gpfs/projects/bsc32/bsc32842/Rfunctions.R') # for the calendar function + workdir <- "/gpfs/projects/bsc32/bsc32842/RESILIENCE" # working dir +} + +if(length(script.arg) >= 2) {lead.comment <- paste0("Lead month: ", lead.month)} else {lead.comment <- ""} +cat(paste0("Field: ", fields.name, " Period: ", WR.period, " ", lead.comment, "\n")) + +if(lat.min >= lat.max) stop("lat.min cannot be greater than lat.max") + +#days.period <- n.days.period <- period.length <- list() +#for (pp in 1:17){ +# days.period[[pp]] <- NA +# for(y in year.start:year.end) days.period[[pp]] <- c(days.period[[pp]], n.days.in.a.future.year(year.start, y) + pos.period(y,pp)) +# days.period[[pp]] <- days.period[[pp]][-1] # remove the NA at the begin that was introduced only to be able to execute the above command +# # number of days belonging to that period from year.start to year.end: +# n.days.period[[pp]] <- length(days.period[[pp]]) +# # Number of days belonging to that period in a single year: +# period.length[[pp]] <- n.days.in.a.period(pp,1999) #+ ifelse(period==13 | period==2, 1, 0) # using year 1999 introduce a small error in winter season length because of #bisestile years +#} + +# Create a regular lat/lon grid to interpolate the data to the chosen res: (Notice that the regular grid is always centered at lat=0 and lon=0!) +n.lon.grid <- 360/res +n.lat.grid <- (180/res)+1 +my.grid <- paste0('r',n.lon.grid,'x',n.lat.grid) +#domain<-list() +#domain$lon <- seq(0,359.999999999,res) +#domain$lat <- c(rev(seq(0,90,res)),seq(res[1],-90,-res)) + +cat("Loading lat/lon. Please wait...\n") +# load only 1 day of pressure data to detect the minimum and maximum lat and lon values which identify only the North Atlantic area: +if(fields.name == rean.name) domain <- Load(var = psl, exp = NULL, obs = list(list(path=fields)), '19990101', storefreq = 'daily', leadtimemax = 1, output = 'lonlat', nprocs=1) + +### Real test: +### lat <- +### domain <- Load(var = "psl", exp = list(list(path="/esnas/exp/ecmwf/system4_m1/$STORE_FREQ$_mean/$VAR_NAME$_f6h/$VAR_NAME$_$START_DATE$.nc")), obs=NULL, sdates=paste0(1982:2011,'0101'), storefreq = 'daily', leadtimemax=n.leadtimes, nmember=n.members, output = 'lonlat', latmin = lat[chunk], latmax = lat[chunk+2], nprocs=1) + +### if (fields.name="pippo"){ + +# in the case of S4, we also interpolate it to the same resolution of the reanalysis: +# (notice that if the S4 grid has the same grid of the chosen grid (typically ERA-Interim), Load() leaves the S4 grid unchanged) +if(fields.name == ECMWF_S4.name) domain <- Load(var = psl, exp = list(list(path=fields)), obs=NULL, sdates=paste0(year.start,'0101'), storefreq = 'daily', dimnames=list(member=member.name), leadtimemax=1, nmember=1, output = 'lonlat', grid=my.grid, method='bilinear', nprocs=1) + +#domain <- Load(var = "psl", exp = list(list(path="/esnas/exp/ecmwf/system4_m1/$STORE_FREQ$_mean/$VAR_NAME$_f6h/$VAR_NAME$_$START_DATE$.nc")), obs=NULL, sdates='19820101', storefreq = 'daily', leadtimemax=1, nmember=1, output = 'lonlat', grid='r480x241', lonmax=100) + +if(fields.name == ECMWF_monthly.name) domain <- Load(var = psl, exp = NULL, obs = list(list(path=fields)), paste0(year.start,'0102'), storefreq = 'daily', leadtimemax = 1, output = 'lonlat') + +n.lon <- length(domain$lon) +n.lat <- length(domain$lat) + +pos.lat.max <- which(lat.max - round(domain$lat,4) >= 0)[1] # select the first latitude of the domain below the maximum latitude +pos.lat.min <- tail(which(round(domain$lat,4) - lat.min >= 0),1) # select the last latitude of the domain over the minumum latitude +pos.lon.max <- tail(which(lon.max - round(domain$lon,4) >= 0),1) +pos.lon.min <- which(round(domain$lon,4) - lon.min >= 0)[1] + +pos.lat <- if(pos.lat.min < pos.lat.max) {pos.lat.min:pos.lat.max} else {pos.lat.max:pos.lat.min} +pos.lon <- c(1:pos.lon.max,pos.lon.min:length(domain$lon)) # beware that it only consider the case where the study area cross the meridian of Greenwhich! +n.pos.lat <- length(pos.lat) +n.pos.lon <- length(pos.lon) + +lat <- domain$lat[pos.lat] # lat values of chosen area only (it is smaller than the whole spatial domain loaded by the data) +#if (lat[2] < lat[1]) lat <- rev(lat) # reverse lat values if they are in decreasing order +lon <- domain$lon[pos.lon] # lon values of chosen area only + +#lat.min.area <- domain$lat[pos.lat.min] # min and max lat/lon of the chosen area only (same as lat.max, but its values correspond to actual values in the lat vector) +#lat.max.area <- domain$lat[pos.lat.max] +#lon.min.area <- domain$lon[pos.lon.min] +#lon.max.area <- domain$lon[pos.lon.max] + +#rm(domain) + +# Load psl data (interpolating it to the same reanalysis grid, if necessary): +cat("Loading data. Please wait...\n") + +if(fields.name == rean.name && subdaily == FALSE){ # Load daily psl data of all years in the reanalysis case: + psleuFull <-array(NA,c(1,1,n.years,365, n.pos.lat, n.pos.lon)) + + psleuFull366 <- Load(var = psl, exp = NULL, obs = list(list(path=fields)), paste0(year.start:year.end,'0101'), storefreq = 'daily', leadtimemax = 366, output = 'lonlat', latmin = lat.min, latmax = lat.max, lonmin = lon.min, lonmax = lon.max, nprocs=8)$obs + + # remove bisestile days to have all arrays with the same dimensions: + cat("Removing bisestile days. Please wait...\n") + psleuFull[,,,,,] <- psleuFull366[,,,1:365,,] + + for(y in year.start:year.end){ + y2 <- y - year.start + 1 + if(n.days.in.a.year(y) == 366) psleuFull[,,y2,60:365,,] <- psleuFull366[,,y2,61:366,,] # take the march to december period removing the 29th of February + } + + rm(psleuFull366) + gc() + + + if(running.cluster == TRUE && rean.name == "ERA-interim" && year.end == 2016) { + # in caso of Octuber, which is the last month of last year of data available, November data for the 3rd month of the running cluster is not available. + # Load() set these values to NA, + # but the clustering analysis is not able to process NA, so we have to remove the NA for that month and replace with the values of the previous year: + psleuFull[,,36,305:334,,] <- psleuFull[,,35,305:334,,] + } + + # convert psl in daily anomalies with the LOESS filter: + cat("Calculating anomalies. Please wait...\n") + pslPeriodClim <- apply(psleuFull, c(1,2,4,5,6), mean, na.rm=T) + + if(LOESS == TRUE){ + pslPeriodClimLoess <- array(NA, dim(pslPeriodClim)) + + for(i in 1:n.pos.lat){ + for(j in 1:n.pos.lon){ + my.data <- data.frame(ens.mean=pslPeriodClim[1,1,,i,j], day=1:365) + my.loess <- loess(ens.mean ~ day, my.data, span=0.35) + pslPeriodClimLoess[1,1,,i,j] <- predict(my.loess) + } + } + + rm(pslPeriodClim) + gc() + + pslPeriodClim2 <- InsertDim(pslPeriodClimLoess, 3, n.years) + + } else { # leave the daily climatology to compute the anomalies: + pslPeriodClim2 <- InsertDim(pslPeriodClim, 3, n.years) + + rm(pslPeriodClim) + gc() + } + + ## s4.data <- data.frame(s4=pslPeriodClim[1,], day=1:216) + ## s4.loess <- loess(s4 ~ day, s4.data, span=0.35) + ## s4.pred <- predict(s4.loess) + + psleuFull <- psleuFull - pslPeriodClim2 + rm(pslPeriodClim2) + gc() + +} # close if on fields.name == rean and subdaily == FALSE){ + + +# Load 6-hourly psl data of all years in the reanalysis case: +if(fields.name == rean.name && subdaily == TRUE){ + + #sdates <- as.vector(sapply(year.start:year.end, function(x) paste0(x, sprintf("%02d", 1:12), '01'))) + #my.exp <- list(path=fields) + #psleuFull366 <- Load(var = psl, exp = list(my.exp), NULL, sdates, output = 'lonlat', latmin = lat.min, latmax = lat.max, lonmin = lon.min, lonmax = lon.max)$obs + #dim(data$obs) <- c(dim(data$obs)[1:2], 1, dim(data$obs)[3]*dim(data$obs)[4], dim(data$obs)[5:6]) + + my.exp <- list(path=fields) + + # Load January data: + psleu1 <- Load(var = psl, exp = list(list(path=fields)), obs = NULL, sdates = paste0(year.start:year.end,'0101'), storefreq = 'daily', output = 'lonlat', latmin = lat.min, latmax = lat.max, lonmin = lon.min, lonmax = lon.max)$mod + + # Load february data (it automatically discards the 29th of February): + psleu2 <- Load(var = psl, exp = list(list(path=fields)), obs = NULL, sdates = paste0(year.start:year.end,'0201'), storefreq = 'daily', output = 'lonlat', latmin = lat.min, latmax = lat.max, lonmin = lon.min, lonmax = lon.max)$mod + + # Load March data: + psleu3 <- Load(var = psl, exp = list(list(path=fields)), obs = NULL, sdates = paste0(year.start:year.end,'0301'), storefreq = 'daily', output = 'lonlat', latmin = lat.min, latmax = lat.max, lonmin = lon.min, lonmax = lon.max)$mod + psleu4 <- Load(var = psl, exp = list(list(path=fields)), obs = NULL, sdates = paste0(year.start:year.end,'0401'), storefreq = 'daily', output = 'lonlat', latmin = lat.min, latmax = lat.max, lonmin = lon.min, lonmax = lon.max)$mod + psleu5 <- Load(var = psl, exp = list(list(path=fields)), obs = NULL, sdates = paste0(year.start:year.end,'0501'), storefreq = 'daily', output = 'lonlat', latmin = lat.min, latmax = lat.max, lonmin = lon.min, lonmax = lon.max)$mod + psleu6 <- Load(var = psl, exp = list(list(path=fields)), obs = NULL, sdates = paste0(year.start:year.end,'0601'), storefreq = 'daily', output = 'lonlat', latmin = lat.min, latmax = lat.max, lonmin = lon.min, lonmax = lon.max)$mod + psleu7 <- Load(var = psl, exp = list(list(path=fields)), obs = NULL, sdates = paste0(year.start:year.end,'0701'), storefreq = 'daily', output = 'lonlat', latmin = lat.min, latmax = lat.max, lonmin = lon.min, lonmax = lon.max)$mod + psleu8 <- Load(var = psl, exp = list(list(path=fields)), obs = NULL, sdates = paste0(year.start:year.end,'0801'), storefreq = 'daily', output = 'lonlat', latmin = lat.min, latmax = lat.max, lonmin = lon.min, lonmax = lon.max)$mod + psleu9 <- Load(var = psl, exp = list(list(path=fields)), obs = NULL, sdates = paste0(year.start:year.end,'0901'), storefreq = 'daily', output = 'lonlat', latmin = lat.min, latmax = lat.max, lonmin = lon.min, lonmax = lon.max)$mod + psleu10 <- Load(var = psl, exp = list(list(path=fields)), obs = NULL, sdates = paste0(year.start:year.end,'1001'), storefreq = 'daily', output = 'lonlat', latmin = lat.min, latmax = lat.max, lonmin = lon.min, lonmax = lon.max)$mod + psleu11 <- Load(var = psl, exp = list(list(path=fields)), obs = NULL, sdates = paste0(year.start:year.end,'1101'), storefreq = 'daily', output = 'lonlat', latmin = lat.min, latmax = lat.max, lonmin = lon.min, lonmax = lon.max)$mod + psleu12 <- Load(var = psl, exp = list(list(path=fields)), obs = NULL, sdates = paste0(year.start:year.end,'1201'), storefreq = 'daily', output = 'lonlat', latmin = lat.min, latmax = lat.max, lonmin = lon.min, lonmax = lon.max)$mod + + if(running.cluster == TRUE && rean.name == "ERA-interim") { + # in this case, last month of last year might not have in /esnas the data for the 3rd month of the running cluster. Load() set these values to NA, + # but the clustering analysis is not able to process NA, so we have to remove the NA for that month and replace with the values of the previous year: + psleu11[,,36,,,] <- psleu11[,,35,,,] + psleu12[,,36,,,] <- psleu12[,,35,,,] + + } + + psleuFull <- abind(psleu1, psleu2, psleu3, psleu4, psleu5, psleu6, psleu7, psleu8, psleu9, psleu10, psleu11, psleu12, along=4) + rm(psleu1, psleu2, psleu3, psleu4, psleu5, psleu6, psleu7, psleu8, psleu9, psleu10, psleu11, psleu12) + gc() + + # convert psl in daily anomalies with the LOESS filter: + cat("Calculating anomalies. Please wait...\n") + pslPeriodClim <- apply(psleuFull, c(1,2,4,5,6), mean, na.rm=T) + + if(LOESS == TRUE){ + # separate psl data in the four hours of the day ( 0.00, 6.00, 12.00, 18.00) + pslPeriodClim1 <- pslPeriodClim[1,1,seq(1,1460,4),,] + pslPeriodClim2 <- pslPeriodClim[1,1,seq(2,1460,4),,] + pslPeriodClim3 <- pslPeriodClim[1,1,seq(3,1460,4),,] + pslPeriodClim4 <- pslPeriodClim[1,1,seq(4,1460,4),,] + + rm(pslPeriodClim) + gc() + + pslPeriodClimLoess1 <- array(NA, dim(pslPeriodClim1)) + pslPeriodClimLoess2 <- array(NA, dim(pslPeriodClim2)) + pslPeriodClimLoess3 <- array(NA, dim(pslPeriodClim3)) + pslPeriodClimLoess4 <- array(NA, dim(pslPeriodClim4)) + + for(i in 1:n.pos.lat){ + for(j in 1:n.pos.lon){ + my.data1 <- data.frame(ens.mean=pslPeriodClim1[,i,j], hourly=1:(1460/4)) + my.loess1 <- loess(ens.mean ~ hourly, my.data1, span=0.35) + pslPeriodClimLoess1[,i,j] <- predict(my.loess1) + + my.data2 <- data.frame(ens.mean=pslPeriodClim2[,i,j], hourly=1:(1460/4)) + my.loess2 <- loess(ens.mean ~ hourly, my.data2, span=0.35) + pslPeriodClimLoess2[,i,j] <- predict(my.loess2) + + my.data3 <- data.frame(ens.mean=pslPeriodClim3[,i,j], hourly=1:(1460/4)) + my.loess3 <- loess(ens.mean ~ hourly, my.data3, span=0.35) + pslPeriodClimLoess3[,i,j] <- predict(my.loess3) + + my.data4 <- data.frame(ens.mean=pslPeriodClim4[,i,j], hourly=1:(1460/4)) + my.loess4 <- loess(ens.mean ~ hourly, my.data4, span=0.35) + pslPeriodClimLoess4[,i,j] <- predict(my.loess4) + + } + } + + rm(my.data1, my.data2, my.data3, my.data4, my.loess1, my.loess2, my.loess3, my.loess4) + rm(pslPeriodClim1, pslPeriodClim2, pslPeriodClim3, pslPeriodClim4) + gc() + + s1 <- seq(1,1460,4) + s2 <- seq(2,1460,4) + s3 <- seq(3,1460,4) + s4 <- seq(4,1460,4) + + pslPeriodClimLoess <- array(NA,c(365*4,dim(pslPeriodClimLoess1)[2:3])) + + for(day in 1:365){ + pslPeriodClimLoess[s1[day],,] <- pslPeriodClimLoess1[day,,] + pslPeriodClimLoess[s2[day],,] <- pslPeriodClimLoess2[day,,] + pslPeriodClimLoess[s3[day],,] <- pslPeriodClimLoess3[day,,] + pslPeriodClimLoess[s4[day],,] <- pslPeriodClimLoess4[day,,] + } + + pslPeriodClim2 <- InsertDim(pslPeriodClimLoess, 1, n.years) + + } else { # leave the daily climatology to compute the anomalies: + pslPeriodClim2 <- InsertDim(pslPeriodClim, 3, n.years) + + rm(pslPeriodClim) + gc() + } + + ## s4.data <- data.frame(s4=pslPeriodClim[1,], day=1:216) + ## s4.loess <- loess(s4 ~ day, s4.data, span=0.35) + ## s4.pred <- predict(s4.loess) + + psleuFull <- psleuFull[1,1,,,,] - pslPeriodClim2 + + rm(pslPeriodClim2) + gc() + +} # close if on fields.name == rean & subdaily == TRUE + + +if(fields.name == ECMWF_S4.name){ # in the forecast case, we load only the data for 1 start month at time and all the lead times (since each month needs ~3 GB of memory) + if(start.month <= 9) {start.month.char <- paste0("0",as.character(start.month))} else {start.month.char <- start.month} + + fields2 <- gsub("\\$STORE_FREQ\\$","daily",fields) + fields3 <- gsub("\\$VAR_NAME\\$",psl,fields2) + + if(missing.forecasts == TRUE){ + years <- c() + for(y in year.start:year.end){ + fields4 <- gsub("\\$START_DATE\\$",paste0(y, start.month.char,'01'),fields3) + + char.lead <- nchar(system(paste0("ncks -m ",fields4,"| grep -E -i 'psl size' | cut -d '*' -f1"), intern=T)) + # beware, in this way it can only take into account leadtimes from 100 to 999, but it is the only way in the SMP machine: + #num.lead <- as.integer(substr(system(paste0("ncks -m ",fields4,"| grep -E -i 'psl size' | cut -d '*' -f1"), intern=T),char.lead-3,char.lead)) + num.lead <- as.integer(system(paste0("ncks -m ",fields4,"| grep -E -i 'psl size' | cut -d '=' -f2 | cut -d '*' -f1"), intern=T)) # don't work well on the SMP + num.memb <- as.integer(system(paste0("ncks -m ",fields4,"| grep -E -i 'psl size' | cut -d '=' -f2 | cut -d '*' -f2"), intern=T)) + num.lat <- as.integer(system(paste0("ncks -m ",fields4,"| grep -E -i 'psl size' | cut -d '=' -f2 | cut -d '*' -f3"), intern=T)) + num.lon <- as.integer(system(paste0("ncks -m ",fields4,"| grep -E -i 'psl size' | cut -d '=' -f2 | cut -d '*' -f4"), intern=T)) + + #if(num.lead == n.leadtimes && num.memb >= n.members && num.lat == 181 && num.lon == 360) years <- c(years,y) + if(num.lead == n.leadtimes && num.memb >= n.members) years <- c(years,y) + } + + } else { + years <- year.start:year.end + } + + n.years.full <- length(years) + + if(running.cluster == TRUE) { + + start.month1 <- ifelse(start.month == 1, 12, start.month-1) + if(start.month1 <= 9) {start.month1.char <- paste0("0",as.character(start.month1))} else {start.month1.char <- start.month1} + + start.month2.char <- start.month.char + + start.month3 <- ifelse(start.month == 12, 1, start.month+1) + if(start.month3 <= 9) {start.month3.char <- paste0("0",as.character(start.month3))} else {start.month3.char <- start.month3} + + # load three months of data, inserting all them in the third dimension of the array, one month at time: + psleuFull1 <- Load(var = psl, exp = list(list(path=fields)), obs = NULL, sdates=paste0(years, start.month1.char,'01'), dimnames=list(member=member.name), nmember=n.members, leadtimemax=n.leadtimes, storefreq='daily', output = 'lonlat', latmin = lat.min, latmax = lat.max, lonmin = lon.min, lonmax = lon.max, grid=my.grid, method='bilinear')$mod + gc() + + psleuFull2 <- Load(var = psl, exp = list(list(path=fields)), obs = NULL, sdates=paste0(years, start.month2.char,'01'), dimnames=list(member=member.name), nmember=n.members, leadtimemax=n.leadtimes, storefreq='daily', output = 'lonlat', latmin = lat.min, latmax = lat.max, lonmin = lon.min, lonmax = lon.max, grid=my.grid, method='bilinear')$mod + gc() + + psleuFull3 <- Load(var = psl, exp = list(list(path=fields)), obs = NULL, sdates=paste0(years, start.month3.char,'01'), dimnames=list(member=member.name), nmember=n.members, leadtimemax=n.leadtimes, storefreq='daily', output = 'lonlat', latmin = lat.min, latmax = lat.max, lonmin = lon.min, lonmax = lon.max, grid=my.grid, method='bilinear')$mod + gc() + + } else { + + psleuFull <- Load(var = psl, exp = list(list(path=fields)), obs = NULL, sdates=paste0(years, start.month.char,'01'), dimnames=list(member=member.name), nmember=n.members, leadtimemax=n.leadtimes, storefreq='daily', output = 'lonlat', latmin = lat.min, latmax = lat.max, lonmin = lon.min, lonmax = lon.max, grid=my.grid, method='bilinear', nprocs=1)$mod + + } + + if(LOESS == TRUE){ + # convert psl in daily anomalies with the LOESS filter: + cat("Calculating anomalies. Please wait...\n") + + if(running.cluster == TRUE) { + pslPeriodClim1 <- apply(psleuFull1, c(1,4,5,6), mean, na.rm=T) + pslPeriodClim2 <- apply(psleuFull2, c(1,4,5,6), mean, na.rm=T) + pslPeriodClim3 <- apply(psleuFull3, c(1,4,5,6), mean, na.rm=T) + + pslPeriodClimLoess1 <- pslPeriodClim1 + pslPeriodClimLoess2 <- pslPeriodClim2 + pslPeriodClimLoess3 <- pslPeriodClim3 + + for(i in 1:n.pos.lat){ + for(j in 1:n.pos.lon){ + my.data1 <- data.frame(ens.mean=pslPeriodClim1[1,,i,j], day=1:n.leadtimes) + my.loess1 <- loess(ens.mean ~ day, my.data1, span=0.35) + pslPeriodClimLoess1[1,,i,j] <- predict(my.loess1) + rm(my.data1, my.loess1) + gc() + + my.data2 <- data.frame(ens.mean=pslPeriodClim2[1,,i,j], day=1:n.leadtimes) + my.loess2 <- loess(ens.mean ~ day, my.data2, span=0.35) + pslPeriodClimLoess2[1,,i,j] <- predict(my.loess2) + rm(my.data2, my.loess2) + gc() + + my.data3 <- data.frame(ens.mean=pslPeriodClim3[1,,i,j], day=1:n.leadtimes) + my.loess3 <- loess(ens.mean ~ day, my.data3, span=0.35) + pslPeriodClimLoess3[1,,i,j] <- predict(my.loess3) + rm(my.data3, my.loess3) + gc() + } + } + + rm(pslPeriodClim1, pslPeriodClim2, pslPeriodClim3) + gc() + + pslPeriodClimDos1 <- InsertDim(InsertDim(pslPeriodClimLoess1, 2, n.years.full), 2, n.members) + pslPeriodClimDos2 <- InsertDim(InsertDim(pslPeriodClimLoess2, 2, n.years.full), 2, n.members) + pslPeriodClimDos3 <- InsertDim(InsertDim(pslPeriodClimLoess3, 2, n.years.full), 2, n.members) + + pslPeriodClimDos <- unname(abind(pslPeriodClimDos1, pslPeriodClimDos2, pslPeriodClimDos3, along=3)) + rm(pslPeriodClimDos1, pslPeriodClimDos2, pslPeriodClimDos3) + gc() + + psleuFull <- unname(abind(psleuFull1, psleuFull2, psleuFull3, along=3)) + rm(psleuFull1, psleuFull2, psleuFull3) + gc() + + } else { # in case of no running cluster: + + pslPeriodClim <- apply(psleuFull, c(1,4,5,6), mean, na.rm=T) + + pslPeriodClimLoess <- pslPeriodClim + for(i in n.pos.lat){ + for(j in n.pos.lon){ + my.data <- data.frame(ens.mean=pslPeriodClim[1,,i,j], day=1:n.leadtimes) + my.loess <- loess(ens.mean ~ day, my.data, span=0.35) + pslPeriodClimLoess[1,,i,j] <- predict(my.loess) + } + } + + rm(pslPeriodClim) + gc() + + pslPeriodClimDos <- InsertDim(InsertDim(pslPeriodClimLoess, 2, n.years.full), 2, n.members) + + } # close if on running cluster + + } else { #in case of no LOESS: + + if(running.cluster == TRUE) { + # in this case, the climatology is measured FOR EACH MONTH INDIPENDENTLY, instead of using a seasonal value: + pslPeriodClim1 <- apply(psleuFull1, c(1,4,5,6), mean, na.rm=T) + pslPeriodClim2 <- apply(psleuFull2, c(1,4,5,6), mean, na.rm=T) + pslPeriodClim3 <- apply(psleuFull3, c(1,4,5,6), mean, na.rm=T) + + pslPeriodClim <- unname(abind(pslPeriodClim1, pslPeriodClim2, pslPeriodClim3)) + + pslPeriodClimDos <- InsertDim(InsertDim(pslPeriodClim, 2, n.years.full), 2, n.members) + rm(pslPeriodClim) + gc() + + psleuFull <- unname(abind(psleuFull1, psleuFull2, psleuFull3, along=3)) + rm(psleuFull1, psleuFull2, psleuFull3) + gc() + + } else {# in case of no running cluster: + + pslPeriodClimDos <- InsertDim(InsertDim(pslPeriodClim, 2, n.years.full), 2, n.members) + rm(pslPeriodClim) + gc() + } + } + + ## s4.data <- data.frame(s4=pslPeriodClim[1,], day=1:216) + ## s4.loess <- loess(s4 ~ day, s4.data, span=0.35) + ## s4.pred <- predict(s4.loess) + + psleuFull <- psleuFull - pslPeriodClimDos + rm(pslPeriodClimDos) + gc() + +} # close if on fields.name=forecasts.name + +if(fields.name == ECMWF_monthly.name){ + # Load 6-hourly psl data in the forecast case: + psleuFull <-array(NA, c(n.sdates*n.years, n.leadtimes, n.pos.lat, n.pos.lon)) # daily order: 19940102 19950102 ... 20130102 19940109 19950109 ... 20130109 ... + n.leadtimes <- length(leadtime) + sdates <- weekly.seq(forecast.year,mes,day) + n.sdates <- length(sdates) + + for (startdate in 1:n.sdates){ + for(lday in leadtime){ + var <- Load(var = psl, exp = NULL, obs = list(list(path=fields)), paste0(year.start:year.end, substr(sdates[startdate],5,6), substr(sdates[startdate],7,8) ), storefreq = 'daily', leadtimemin = lday*4-3, leadtimemax = lday*4, output = 'lonlat', latmin = lat.min, latmax = lat.max, lonmin = lon.min, lonmax = lon.max) + + mean.lday <- apply(var$obs, c(3,5,6), mean, na.rm=T) + rm(var) + gc() + + psleuFull[(1+(startdate-1)*n.years):(startdate*n.years), lday-leadtime[1]+1,,] <- mean.lday + rm(mean.lday) + gc() + } + } +} + +if(psl=="g500") psleuFull <- psleuFull/9.81 # convert geopotential to geopotential height (in m) +if(psl=="psl") psleuFull <- psleuFull/100 # convert MSLP in Pascal to MSLP in hPa +gc() + +#save.image(file=paste0(workdir,"/weather_regimes_",fields.name,"_",year.start,"-",year.end,".RData")) +#load(file=paste0(workdir,"/weather_regimes_",fields.name,"_",year.start,"-",year.end,".RData")) + +#save.image("/scratch/Earth/ncortesi/RESILIENCE/Regimes/test.R") + +# compute the cluster analysis: +my.PCA <- list() +my.cluster <- list() +my.cluster.array <- list() +tot.variance <- list() +n.pcs <- my.cluster <- c() + +#WR.period=1 # for the debug +for(p in WR.period){ + # Select only the data of the month/season we want: + if(fields.name == rean.name){ + + #pslPeriod <- psleuFull[days.period[[p]],,] # select only days in the chosen period (i.e: winter) + if(running.cluster) { + if(subdaily){ + my.hours <- sort(c(pos.month.extended(1,p)*4-3, pos.month.extended(1,p)*4-2, pos.month.extended(1,p)*4-1, pos.month.extended(1,p)*4)) + if(p == 1) my.hours <- c(1337:1460,1:236) + if(p == 12) my.hours <- c(1217:1460,1:124) + + pslPeriod <- psleuFull[,my.hours,,] # select all days in the period of 3 months centered on the target month p + + } else { + pslPeriod <- psleuFull[1,1,,pos.month.extended(1,p),,] # select all days in the period of 3 months centered on the target month p + } + + } else { + + if(subdaily){ + my.hours <- sort(c(pos.month(1,p)*4-3, pos.month(1,p)*4-2, pos.month(1,p)*4-1, pos.month(1,p)*4)) + pslPeriod <- psleuFull[,my.hours,,] + } else { + pslPeriod <- psleuFull[1,1,,pos.period(1,p),,] # select only days in the chosen period (i.e: winter) + } + } + + # weight the pressure fields based on latitude (apply it to anomalies, not to absolute values!): + if(lat.weighting){ + lat.weighted <- cos(lat*pi/180)^0.5 + if(!running.cluster) lat.weighted.array <- InsertDim(InsertDim(InsertDim(lat.weighted,2,n.pos.lon), 1, length(pos.month(2001,p))), 1, n.years) + if(running.cluster) lat.weighted.array <- InsertDim(InsertDim(InsertDim(lat.weighted,2,n.pos.lon), 1, length(pos.month.extended(2001,p))), 1, n.years) + + pslPeriod <- pslPeriod * lat.weighted.array + + rm(lat.weighted.array) + gc() + } + + gc() + cat("Preformatting data for clustering. Please wait...\n") + psl.melted <- melt(pslPeriod[,,,, drop=FALSE], varnames=c("Year","Day","Lat","Lon")) + gc() + + cat("Preformatting data for clustering. Please wait......\n") + # this function eats much memory!!! + psl.kmeans <- unname(acast(psl.melted, Year + Day ~ Lat + Lon)) + #gc() + + + # select period data from psleuFull to use it as input of the cluster analysis: + #psl.kmeans <- pslPeriod + #dim(psl.kmeans) <- c(n.days.period[[p]], n.pos.lat*n.pos.lon) # convert array in a matrix! + #rm(pslPeriod, pslPeriod.weighted) + } + + # in case of S4 we don't need to select anything because we already loaded only 1 month of data! + if(fields.name == ECMWF_S4.name){ + if(running.cluster == TRUE && p < 13) { + # if you want to fully implement the running cluster of the monthly S4 data, you have to finish selecting automatically the 3-months running period + # generalizing the command below (which at present only work for the month of January an lead time 0): + + # Select DJF (lead time 0) for our case study, excluding 29 of february): + pslPeriod <- psleuFull[,,,1:90,,, drop=FALSE] + + } else { + + # select data only for the startmonth and leadmonth to study: + cat("Extracting data. Please wait...\n") + chosen.month <- start.month + lead.month # find which month we want to select data + if(chosen.month > 12) chosen.month <- chosen.month - 12 + + # remove the 29th of February to have the same n. of elements for all years + i=0 + for(y in years){ + i=i+1 + leadtime.min <- 1 + n.days.in.a.monthly.period(start.month, chosen.month, y) - n.days.in.a.month(chosen.month, y) + leadtime.max <- leadtime.min + n.days.in.a.month(chosen.month, y) - 1 + #leadtime.max <- 61 + + if (n.days.in.a.month(chosen.month, y) == 29) leadtime.max <- leadtime.max - 1 + int.leadtimes <- leadtime.min:leadtime.max + num.leadtimes <- leadtime.max - leadtime.min + 1 # number of days in the chosen month (different from 'n.leadtimes',which is the total number of days in the file!) + # by construction it is equal to: n.days.in.a.month(chosen.month, y) + + if(y == years[1]) { + pslPeriod <- psleuFull[,,1,int.leadtimes,,,drop=FALSE] + } else { + pslPeriod <- unname(abind(pslPeriod, psleuFull[,,i,int.leadtimes,,,drop=FALSE], along=3)) + } + } + + } + + # weight the pressure fields based on latitude (apply it to anomalies, not to absolute values!): + if(lat.weighting){ + lat.weighted <- cos(lat*pi/180)^0.5 + lat.weighted.array <- InsertDim(InsertDim(InsertDim(lat.weighted,2,n.pos.lon), 1, length(pos.month.extended(2001,p))), 1, n.years) + pslPeriod <- pslPeriod * lat.weighted.array ######### adapt!!!!!!!!!!! + rm(lat.weighted.array) + gc() + } + + # note that the data order in psl.kmeans[,X] is: year1day1, year1day2, ... , year1day31, year2day1, year2day2, ... , year2day28 + gc() + cat("Preformatting data. Please wait...\n") + psl.melted <- melt(pslPeriod[1,,,,,, drop=FALSE], varnames=c("Exp","Member","StartYear","LeadDay","Lat","Lon")) + gc() + + #save(psl.melted,file=paste0(workdir,"/psl_melted.RData")) + + cat("Preformatting data. Please wait......\n") + # This function eats much memory!!! + psl.kmeans <- unname(acast(psl.melted, Member + StartYear + LeadDay ~ Lat + Lon)) + #gc() + + #save(psl.kmeans,file=paste0(workdir,"/psl_kmeans.RData")) + #my.cluster <- kmeans(psl.kmeans, centers=4, iter.max=100, nstart=30, trace=FALSE) + #save(my.cluster,file=paste0(workdir,"/my_cluster.RData")) + + + #psl.kmeans <- array(NA,c(dim(pslPeriod))) + #n.rows <- nrow(psl.melted) + #a <- psl.melted$Member + #b <- psl.melted$StartYear + #c <- psl.melted$LeadDay + #d <- psl.melted$Lat + #e <- psl.melted$Lon + #f <- psl.melted$value + + # this function to convert a data.frame in an array is much less memory-eater than acast but a bit slower: + #for(i in 1:n.rows) psl.kmeans[1,a[i],b[i],c[i],d[i],e[i]] <- f[i] + gc() + #rm(pslPeriod); gc() + } + + if(fields.name == ECMWF_monthly.name){ + my.startdates.period <- months.period(forecast.year,mes,day,p) # get which startdates belong to the chosen period (remember that there are no bisestile day left!) + + days.period <- list() # get which days inside psleuFull belong to the chosen period + for(startdate in my.startdates.period){ + days.period[[p]] <- c(days.period[[p]], (1+(startdate-1)*n.years):(startdate*n.years)) + } + + # in winter you must remove the 2nd startdate (20140109) because its data is bad, as you can see with: plot(psl.kmeans[,1:2]) + # if(fields.name == forecast.name && period == 13) psl.kmeans[21:40,] <- NA + } + + + # compute the clustering: + cat("Clustering data. Please wait...\n") + if(PCA){ + my.seq <- seq(1, n.pos.lat*n.pos.lon, 16) # select only 1 point of 9 + pslcut <- psl.kmeans[,my.seq] + + my.PCA <- princomp(pslcut,cor=FALSE) + tot.variance <- head(cumsum(my.PCA$sdev^2/sum(my.PCA$sdev^2)),50) # check how many PCAs to keep basing on the sum of their expl. variance + n.pcs <- head(as.numeric(which(tot.variance > variance.explained)),1) # select only the pcs that explains at least 80% of variance + my.cluster <- kmeans(my.PCA$scores[,1:n.pcs], centers=4, iter.max=100, nstart=30) # 4 is the number of clusters + rm(pslcut) + } else { # 4 is the number of clusters: + + my.cluster <- kmeans(psl.kmeans, centers=4, iter.max=100, nstart=30, trace=FALSE) + + gc() + + #save(my.cluster, file=paste0(workdir,"/",fields.name,"_",my.period[p],"_leadmonth_",lead.month,"_series.RData")) + + #if(fields.name == rean.name){ + #my.cluster[[p]] <- kmeans(psl.kmeans[which(!is.na(psl.kmeans[,1])),], centers=4, iter.max=100, nstart=30, trace=FALSE) + # save the time series output of the cluster analysis: + #save(my.cluster, my.cluster.array, my.PCA, tot.variance, n.pcs, file=paste0(workdir,"/",fields.name,"_",my.period[p],"_series.RData")) + #} + + if(fields.name == ECMWF_S4.name) { + my.cluster.array <- array(my.cluster$cluster, c(num.leadtimes, n.years.full, n.members)) # in reverse order compared to the definition of psl.kmeans + + # save the time series output of the cluster analysis: + #save(my.cluster, my.cluster.array, my.PCA, tot.variance, n.pcs, file=paste0(workdir,"/",fields.name,"_",my.period[p],"_leadmonth_",lead.month,"_series.RData")) + + #plot(SMA(my.cluster$centers[1,],201),type="l",col="gold",ylim=c(-20,20));lines(SMA(my.cluster$centers[2,],201),type="l",col="red"); lines(SMA(my.cluster$centers[3,],201),type="l",col="green");lines(SMA(my.cluster$centers[4,],201),type="l",col="blue");lines(SMA(psl.kmeans[29,],201),type="l",col="gray");lines(rep(0,14410),type="l",col="black",lty=2) + } + } + + rm(psl.kmeans) + gc() + +#} # close for on 'p' + +#save.image(file=paste0(workdir,"/weather_regimes_",fields.name,"_",year.start,"-",year.end,".RData")) +#load(file=paste0(workdir,"/weather_regimes_",fields.name,"_",year.start,"-",year.end,".RData")) + +#my.grid<-paste0('r',n.lon,'x',n.lat) +#coord <- Load(var = 'sfcWind', exp = list(ECMWF_monthly), obs = NULL, sdates=paste0(2013,'0102'), leadtimemin = 1, leadtimemax=1, output = 'lonlat', nprocs=1, grid=my.grid, method='bilinear') + +# measure regime anomalies: + +#for(p in WR.period){ # 1-12: Jan-Dec; 13-16: Winter-Spring-Summer-Autumn; 17: Year + + if(fields.name == rean.name){ + + if(running.cluster == TRUE && p < 13){ # select only the days of the 3-months cluster series inside the target month p + n.days.period <- length(pos.month.extended(2001,p)) # total number of days in the three months + + days.month <- days.month.new <- days.month.full <- c() + days.month <- which(pos.month.extended(2001,p) == pos.month(2001,p)[1]) -1 + 1:length(pos.month(2001,p)) + + for(y in 1:n.years){ + days.month.new <- days.month + n.days.period*(y-1) + days.month.full <- c(days.month.full, days.month.new) + #print(days.month.new) + #print(days.month) + } + + # in this cases, we are not selecting days but 6-hourly intervals: + if(subdaily == TRUE) days.month.full <- sort(c(days.month.full*4, days.month.full*4+1, days.month.full*4+2, days.month.full*4+3)) + + # select only the days of the cluster series inside the target month p: + cluster.sequence <- my.cluster$cluster[days.month.full] + + } else { + + cluster.sequence <- my.cluster$cluster + } + + # convert cluster sequence from 6-hourly to daily: + if(subdaily){ + type <- c() + + for(day in 1:(length(cluster.sequence)/4)){ + #day <- ceiling(hour/4) + hourly <- cluster.sequence[(1+(day-1)*4):(4+(day-1)*4)] + + t1 <- length(which(hourly == 1)) + t2 <- length(which(hourly == 2)) + t3 <- length(which(hourly == 3)) + t4 <- length(which(hourly == 4)) + tt <- c(t1,t2,t3,t4) + + # if all the 4 time steps belong to the same regime, assign it to this day: + if(length(unique(hourly)) == 1) type[day] <- hourly[1] + + # if there are two different regimes, check if one has a higher frequency: + if(length(unique(hourly)) == 2){ + if(any(tt == 3)){ # if 3 of the 4 time intervals belong to the same weather regime, assign this day to it + type[day] <- which(tt == 3) + } else { # in this case both regimes occur in 2 of the 4 time steps; arbitrary assign the regime occurring at 12.00 of that day + type[day] <- hourly[3] + } + } + + # if there are three different regimes, assign it to the only possible regime with 2 time steps in that day: + if(length(unique(hourly)) == 3) type[day] <- which(tt == 2) + + # if there are four different regimes (a very rare event!), assign it to the regime occurring at 12.00 of that day: + if(length(unique(hourly)) == 3) type[day] <- hourly[3] + + } # close for on day + + } # close for on subdaily + + + if(sequences){ # it works only for rean data!!! + # for each of the 4 clusters, remove the days not belonging to sequences of at least 5 days: + # warning: first 4 days and last 4 days are not take into account y the algorithm and are treated as not belonging to any sequence (sequ=FALSE) + wrdiff <- diff(my.cluster$cluster) + + sequ <- rep(FALSE, n.days.period[[p]]) + for(i in 5:(n.days.period[[p]]-5)){ + sequ[i] <- TRUE + if(wrdiff[i] != 0 & wrdiff[i+1] != 0) sequ[i] = FALSE + if(wrdiff[i] != 0 & wrdiff[i+2] != 0) sequ[i] = FALSE + if(wrdiff[i] != 0 & wrdiff[i+3] != 0) sequ[i] = FALSE + if(wrdiff[i] != 0 & wrdiff[i+4] != 0) sequ[i] = FALSE + if(wrdiff[i-1] != 0 & wrdiff[i] != 0) sequ[i] = FALSE + if(wrdiff[i-1] != 0 & wrdiff[i+1] != 0) sequ[i] = FALSE + if(wrdiff[i-1] != 0 & wrdiff[i+2] != 0) sequ[i] = FALSE + if(wrdiff[i-1] != 0 & wrdiff[i+3] != 0) sequ[i] = FALSE + if(wrdiff[i-2] != 0 & wrdiff[i] != 0) sequ[i] = FALSE + if(wrdiff[i-2] != 0 & wrdiff[i+1] != 0) sequ[i] = FALSE + if(wrdiff[i-2] != 0 & wrdiff[i+2] != 0) sequ[i] = FALSE + if(wrdiff[i-3] != 0 & wrdiff[i] != 0) sequ[i] = FALSE + if(wrdiff[i-3] != 0 & wrdiff[i+1] != 0) sequ[i] = FALSE + if(wrdiff[i-4] != 0 & wrdiff[i] != 0) sequ[i] = FALSE + } # close for loop on i + + # sequence of daily cluster numbers without the days that doesn't belong to sequences of 5 or more days: + cluster.sequence <- my.cluster$cluster * sequ + rm(wrdiff, sequ) + } + + + # Replace the days belonging to a regime with the days belonging to a sequence of at least 5 days of that regime: + wr1 <- which(cluster.sequence == 1) + wr2 <- which(cluster.sequence == 2) + wr3 <- which(cluster.sequence == 3) + wr4 <- which(cluster.sequence == 4) + + # yearly frequency of each regime: + wr1y <- wr2y <- wr3y <-wr4y <- c() + + mod.subdaily <- ifelse(subdaily,4,1) + np <- n.days.in.a.period(p,1)*mod.subdaily + + for(y in year.start:year.end){ + # for both reanalysis and S4, the time order is always: year1day1, year1day2, ..., year1day31, year2day1, year2day2, ..., year2day28, etc, + wr1y[y-year.start+1] <- length(which(cluster.sequence[(y-year.start)*np + (1:np)] == 1)) + wr2y[y-year.start+1] <- length(which(cluster.sequence[(y-year.start)*np + (1:np)] == 2)) + wr3y[y-year.start+1] <- length(which(cluster.sequence[(y-year.start)*np + (1:np)] == 3)) + wr4y[y-year.start+1] <- length(which(cluster.sequence[(y-year.start)*np + (1:np)] == 4)) + } + + # convert to frequencies in %: + wr1y <- wr1y/np + wr2y <- wr2y/np + wr3y <- wr3y/np + wr4y <- wr4y/np + + gc() + + + # measure regime anomalies: + pslmat <- unname(acast(psl.melted, Year + Day ~ Lat ~ Lon)) + + if(running.cluster == TRUE){ + pslmat.new <- pslmat + pslmat <- pslmat.new[days.month.full,,] + gc() + } + + pslwr1 <- pslmat[wr1,,, drop=F] + pslwr2 <- pslmat[wr2,,, drop=F] + pslwr3 <- pslmat[wr3,,, drop=F] + pslwr4 <- pslmat[wr4,,, drop=F] + + pslwr1mean <- apply(pslwr1, c(2,3), mean, na.rm=T) + pslwr2mean <- apply(pslwr2, c(2,3), mean, na.rm=T) + pslwr3mean <- apply(pslwr3, c(2,3), mean, na.rm=T) + pslwr4mean <- apply(pslwr4, c(2,3), mean, na.rm=T) + + # spatial mean for each day to save for the meaure of the FairRPSS: + pslwr1spmean <- apply(pslwr1, 1, mean, na.rm=T) + pslwr2spmean <- apply(pslwr2, 1, mean, na.rm=T) + pslwr3spmean <- apply(pslwr3, 1, mean, na.rm=T) + pslwr4spmean <- apply(pslwr4, 1, mean, na.rm=T) + + + # save all the data necessary to redraw the graphs when we know the right regime: + save(my.cluster, cluster.sequence, lon, lon.max, lon.min, lat, lat.max, lat.min, pos.lat, pos.lon, n.pos.lat, n.pos.lon, psl, psl.name, my.period, p, workdir, rean.name, fields.name, year.start, year.end, n.years, WR.period, pslwr1mean, pslwr2mean, pslwr3mean, pslwr4mean, pslwr1spmean, pslwr2spmean, pslwr3spmean, pslwr4spmean, wr1y,wr2y,wr3y,wr4y,wr1,wr2,wr3,wr4,file=paste0(workdir,"/",fields.name,"_",my.period[p],"_psl.RData")) + + # immediatly save the plots of the ERA-Interim monthly regime anomalies with the running cluster instead than loading them in the next script: + EU <- c(1:which(lon >= lon.max)[1], (which(lon >= 337)[1]:length(lon))) # restrict area to continental europe only + + if(psl == "g500"){ + my.brks <- c(-300,seq(-200,200,10),300) # % Mean anomaly of a WR + my.brks2 <- c(-300,seq(-200,200,10),300) # % Mean anomaly of a WR for the contour lines + values.to.plot <- c(-200, -100, 0, 100, 200) # values we want to show in the labels + } + + if(psl == "psl"){ + my.brks <- c(seq(-19,-1,2),0,seq(1,19,2)) # % Mean anomaly of a WR + my.brks2 <- my.brks #c(seq(-20,20,2)) # % Mean anomaly of a WR for the contour lines + values.to.plot <- my.brks #c(-17,-15,-13,-11,-9,-7,-5,-3,-1,0,1,3,5,7,9,11,13,15,17) + } + my.cols <- colorRampPalette(rev(brewer.pal(11,"RdBu")))(length(my.brks)-1) # blue--white--red colors + my.cols[floor(length(my.cols)/2)] <- my.cols[floor(length(my.cols)/2)+1] <- "white" + + map1 <- pslwr1mean + map2 <- pslwr2mean + map3 <- pslwr3mean + map4 <- pslwr4mean + png(filename=paste0(workdir,"/",fields.name,"_",my.period[p],"_cluster1.png"),width=1000,height=1200) + PlotEquiMap2(rescale(map1,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map1), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, xlabel.dist=1.5, contours.lty="F1FF1F") + dev.off() + png(filename=paste0(workdir,"/",fields.name,"_",my.period[p],"_cluster2.png"),width=1000,height=1200) + PlotEquiMap2(rescale(map2,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map2), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F") + dev.off() + png(filename=paste0(workdir,"/",fields.name,"_",my.period[p],"_cluster3.png"),width=1000,height=1200) + PlotEquiMap2(rescale(map3,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map3), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F") + dev.off() + png(filename=paste0(workdir,"/",fields.name,"_",my.period[p],"_cluster4.png"),width=1000,height=1200) + PlotEquiMap2(rescale(map4,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map4), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F") + dev.off() + + rm(pslwr1mean,pslwr2mean,pslwr3mean,pslwr4mean) + rm(pslwr1,pslwr2,pslwr3,pslwr4) + gc() + + } + + if(fields.name == ECMWF_S4.name){ + cat("Computing regime anomalies and frequencies. Please wait...\n") + + #load(file=paste0(workdir,"/",fields.name,"_",my.period[p],"_leadmonth_",lead.month,"_ClusterData.RData")) + + cluster.sequence <- my.cluster$cluster #as.vector(my.cluster.array) + + wr1 <- which(cluster.sequence == 1) + wr2 <- which(cluster.sequence == 2) + wr3 <- which(cluster.sequence == 3) + wr4 <- which(cluster.sequence == 4) + + wr1y <- apply(my.cluster.array == 1, 2, sum) + wr2y <- apply(my.cluster.array == 2, 2, sum) + wr3y <- apply(my.cluster.array == 3, 2, sum) + wr4y <- apply(my.cluster.array == 4, 2, sum) + + # convert to frequencies in %: + wr1y <- wr1y/(num.leadtimes*n.members) # in this case, n.leadtimes is the number of days of one month of the cluser time series + wr2y <- wr2y/(num.leadtimes*n.members) + wr3y <- wr3y/(num.leadtimes*n.members) + wr4y <- wr4y/(num.leadtimes*n.members) + + # same as above, but to measure the maximum frequence between all the members: + wr1yMax <- apply(apply(my.cluster.array == 1, c(2,3), sum),1,max) + wr2yMax <- apply(apply(my.cluster.array == 2, c(2,3), sum),1,max) + wr3yMax <- apply(apply(my.cluster.array == 3, c(2,3), sum),1,max) + wr4yMax <- apply(apply(my.cluster.array == 4, c(2,3), sum),1,max) + + wr1yMax <- wr1yMax/num.leadtimes + wr2yMax <- wr2yMax/num.leadtimes + wr3yMax <- wr3yMax/num.leadtimes + wr4yMax <- wr4yMax/num.leadtimes + + wr1yMin <- apply(apply(my.cluster.array == 1, c(2,3), sum),1,min) + wr2yMin <- apply(apply(my.cluster.array == 2, c(2,3), sum),1,min) + wr3yMin <- apply(apply(my.cluster.array == 3, c(2,3), sum),1,min) + wr4yMin <- apply(apply(my.cluster.array == 4, c(2,3), sum),1,min) + + wr1yMin <- wr1yMin/num.leadtimes + wr2yMin <- wr2yMin/num.leadtimes + wr3yMin <- wr3yMin/num.leadtimes + wr4yMin <- wr4yMin/num.leadtimes + + cat("Computing regime anomalies and frequencies. Please wait......\n") + + # in this case, must use psl.melted instead of psleuFull. Both are already in anomalies: + # (psl.melted depends on the startdate and leadtime, psleuFull no) + gc() + pslmat <- unname(acast(psl.melted, Member + StartYear + LeadDay ~ Lat ~ Lon)) + #pslmat <- unname(acast(melt(pslPeriod[1,1:2,,,,, drop=FALSE],varnames=c("Exp","Member","StartYear","LeadDay","Lat","Lon")), Member ~ StartYear + LeadDay ~ Lat ~ Lon)) + gc() + + #for(a in 1:2){ + # for(b in 1:3){ + # for(c in 1:4){ + # pslmat[1, a + (b-1)*a + (c-1)*a*b,,] <- pslPeriod[1,a,b,c,,] + # } + # } + #} + + pslwr1 <- pslmat[wr1,,, drop=F] + pslwr2 <- pslmat[wr2,,, drop=F] + pslwr3 <- pslmat[wr3,,, drop=F] + pslwr4 <- pslmat[wr4,,, drop=F] + + pslwr1mean <- apply(pslwr1, c(2,3), mean, na.rm=T) + pslwr2mean <- apply(pslwr2, c(2,3), mean, na.rm=T) + pslwr3mean <- apply(pslwr3, c(2,3), mean, na.rm=T) + pslwr4mean <- apply(pslwr4, c(2,3), mean, na.rm=T) + + # spatial mean for each day to save for the meaure of the FairRPSS: + pslwr1spmean <- apply(pslwr1, 1, mean, na.rm=T) + pslwr2spmean <- apply(pslwr2, 1, mean, na.rm=T) + pslwr3spmean <- apply(pslwr3, 1, mean, na.rm=T) + pslwr4spmean <- apply(pslwr4, 1, mean, na.rm=T) + + rm(pslmat) + gc() + + # save all the data necessary to draw the graphs (but not the impact maps) + save(my.cluster, cluster.sequence, my.cluster.array, lon, lon.max, lat, pos.lat, pos.lon, n.pos.lat, n.pos.lon, psl, psl.name, my.period, p, workdir,rean.name,fields.name, year.start,year.end, years, n.years, n.years.full, WR.period, pslwr1mean, pslwr2mean, pslwr3mean, pslwr4mean, pslwr1spmean, pslwr2spmean, pslwr3spmean, pslwr4spmean, wr1y, wr2y, wr3y, wr4y, wr1yMax, wr2yMax, wr3yMax, wr4yMax, wr1yMin, wr2yMin, wr3yMin, wr4yMin, wr1, wr2, wr3, wr4, n.leadtimes, num.leadtimes, file=paste0(workdir,"/",fields.name,"_",my.period[p],"_leadmonth_",lead.month,"_psl.RData")) + + rm(pslwr1mean,pslwr2mean,pslwr3mean,pslwr4mean) + rm(pslwr1,pslwr2,pslwr3,pslwr4) + gc() + + } + + # for the monthly system, the time order is always: year1day1, year2day1, ... , yearNday1, year1day2, year2day2, ..., yearNday2, etc.: + if(fields.name == ECMWF_monthly.name) { + if(fields.name == ECMWF_monthly.name){ + my.startdates.period <- months.period(forecast.year,mes,day,p) + + #if(p == 13) my.startdates.period <- my.startdates.period[-2] # remove the second startdate because it is broken + + days.period <- period.length <- list() + for(startdate in my.startdates.period){ + days.period[[p]] <- c(days.period[[p]], (1+(startdate-1)*(year.end-year.start+1)):(startdate*(year.end-year.start+1))) + } + period.length[[p]] <- length(my.startdates.period) # number of Thusdays in the period + } + + wr1y <- wr2y <- wr3y <-wr4y <- c() + wr1y[y-year.start+1] <- length(which(cluster.sequence[seq((y-year.start+1),length(cluster.sequence), n.years)] == 1)) + wr2y[y-year.start+1] <- length(which(cluster.sequence[seq((y-year.start+1),length(cluster.sequence), n.years)] == 2)) + wr3y[y-year.start+1] <- length(which(cluster.sequence[seq((y-year.start+1),length(cluster.sequence), n.years)] == 3)) + wr4y[y-year.start+1] <- length(which(cluster.sequence[seq((y-year.start+1),length(cluster.sequence), n.years)] == 4)) + } + + cat("Finished!\n") +} # close the for loop on 'p' + + + + + + + + + + + + +if(monthly_anomalies) { + # Compute climatology and anomalies (with LOESS filter) for sfcWind data, and draw monthly anomalies, if you want to compare the mean daily wind speed anomalies reconstructed by the weather regimes classification with those observed by the reanalysis: + + sfcWind366 <- Load(var = "sfcWind", exp = NULL, obs = list(list(path=fields)), paste0(year.start:year.end,'0101'), storefreq = 'daily', leadtimemax = 366, output = 'lonlat', latmin = lat.min, latmax = lat.max, lonmin = lon.min, lonmax = lon.max, nprocs=8)$obs + + sfcWind <- sfcWind366[,,,1:365,,] + + for(y in year.start:year.end){ + y2 <- y - year.start + 1 + if(n.days.in.a.year(y) == 366) sfcWind[y2,60:365,,] <- sfcWind366[1,1,y2,61:366,,] # take the march to december period removing the 29th of February + } + + rm(sfcWind366) + gc() + + sfcWindClimDaily <- apply(sfcWind, c(2,3,4), mean, na.rm=T) + + sfcWindClimLoess <- sfcWindClimDaily + for(i in n.pos.lat){ + for(j in n.pos.lon){ + my.data <- data.frame(ens.mean=sfcWindClimDaily[,i,j], day=1:365) + my.loess <- loess(ens.mean ~ day, my.data, span=0.35) + sfcWindClimLoess[,i,j] <- predict(my.loess) + } + } + + rm(sfcWindClimDaily) + gc() + + sfcWindClim2 <- InsertDim(sfcWindClimLoess, 1, n.years) + + sfcWindAnom <- sfcWind - sfcWindClim2 + + rm(sfcWindClimLoess) + gc() + + # same as above but for computing climatology and anomalies (with LOESS filter) for psl data, and draw monthly slp anomalies: + slp366 <- Load(var = psl, exp = NULL, obs = list(list(path=fields)), paste0(year.start:year.end,'0101'), storefreq = 'daily', leadtimemax = 366, output = 'lonlat', latmin = lat.min, latmax = lat.max, lonmin = lon.min, lonmax = lon.max, nprocs=8)$obs + + slp <- slp366[,,,1:365,,] + + for(y in year.start:year.end){ + y2 <- y - year.start + 1 + if(n.days.in.a.year(y) == 366) slp[y2,60:365,,] <- slp366[1,1,y2,61:366,,] # take the march to december period removing the 29th of February + } + + rm(slp366) + gc() + + slpClimDaily <- apply(slp, c(2,3,4), mean, na.rm=T) + + slpClimLoess <- slpClimDaily + for(i in n.pos.lat){ + for(j in n.pos.lon){ + my.data <- data.frame(ens.mean=slpClimDaily[,i,j], day=1:365) + my.loess <- loess(ens.mean ~ day, my.data, span=0.35) + slpClimLoess[,i,j] <- predict(my.loess) + } + } + + rm(slpClimDaily) + gc() + + slpClim2 <- InsertDim(slpClimLoess, 1, n.years) + + slpAnom <- slp - slpClim2 + + rm(slpClimLoess) + gc() + + if(psl == "psl") slpAnom <- slpAnom/100 # convert MSLP in Pascal to MSLP in hPa + + EU <- c(1:which(lon >= lon.max)[1], (which(lon >= 337)[1]:length(lon))) # restrict area to continental europe only + + my.brks.var <- seq(-3,3,0.5) + my.cols.var <- colorRampPalette(rev(brewer.pal(11,"RdBu")))(length(my.brks.var)-1) # blue--white--red colors + + # same but for slp: + my.brks.var2 <- c(seq(-21,-1,2),0,seq(1,21,2)) + my.cols.var2 <- colorRampPalette(rev(brewer.pal(11,"RdBu")))(length(my.brks.var2)-1) # blue--red colors + my.cols.var2[floor(length(my.cols.var2)/2)] <- my.cols.var2[floor(length(my.cols.var2)/2)+1] <- "white" + + # save all monthly anomaly maps: + for(year.test in year.chosen){ + for(month.test in month.chosen){ + #year.test <- 2016; month.test <- 10 # for the debug + + pos.year.test <- year.test - year.start + 1 + + # wind anomaly for the chosen year and month: + sfcWindAnomPeriod <- sfcWindAnom[pos.year.test, pos.period(1,month.test),,] + + sfcWindAnomPeriodMean <- apply(sfcWindAnomPeriod, c(2,3), mean, na.rm=TRUE) + + # psl anomaly for the chosen year and month: + slpAnomPeriod <- slpAnom[pos.year.test,pos.period(1,month.test),,] + + slpAnomPeriodMean <- apply(slpAnomPeriod, c(2,3), mean, na.rm=TRUE) + + fileoutput <- paste0(workdir,"/",fields.name,"_",my.period[month.test],"_monthly_anomaly.png") + + png(filename=fileoutput,width=2000,height=1000) + + par(fig=c(0, 0.5, 0.08, 0.98), new=TRUE) + PlotEquiMap(rescale(sfcWindAnomPeriodMean[,EU], my.brks.var[1], tail(my.brks.var,1)), lon[EU], lat, filled.continents=FALSE, brks=my.brks.var, cols=my.cols.var, title_scale=1.2, intxlon=10, intylat=10, drawleg=F) #, cex.lab=1.5) + + #my.subset2 <- match(values.to.plot2, my.brks.var) + #legend2.cex <- 1.8 + + par(fig=c(0.03, 0.5, 0.015, 0.09), new=TRUE) + ColorBar(my.brks.var, cols=my.cols.var, vert=FALSE, label_scale=1.8, triangle_ends=c(FALSE,FALSE)) #, subset=my.subset2) + + par(fig=c(0.47, 0.5, 0, 0.028), new=TRUE) + mtext("m/s", cex=1.8) + + par(fig=c(0.5, 1, 0.1, 0.98), new=TRUE) + + PlotEquiMap2(rescale(slpAnomPeriodMean[,EU], my.brks.var2[1], tail(my.brks.var2,1)), lon[EU], lat, filled.continents=FALSE, continents.col="black", brks=my.brks.var2, brks2=my.brks.var2, cols=my.cols.var2, intxlon=10, intylat=10, drawleg=F, contours=t(slpAnomPeriodMean[,EU]), contours.lty="F1FF1F", cex.lab=1) + + # you could almost use the normal PlotEquiMap funcion below, it only needs to set the black line for anomaly=0 and decrease the size of the contour labels: + # PlotEquiMap(rescale(slpAnomPeriodMean[,EU], my.brks.var2[1], tail(my.brks.var2,1)), lon[EU], lat, filled.continents=FALSE, brks=my.brks.var2, brks2=my.brks.var2, cols=my.cols.var2, intxlon=10, intylat=10, drawleg=F, contours=(slpAnomPeriodMean[,EU]), contour_lty="F1FF1F", contour_label_scale=10, title_scale=1.2) #, cex.lab=1.5) + + ##my.subset2 <- match(values.to.plot2, my.brks.var) + #legend2.cex <- 1.8 + + par(fig=c(0.5, 1, 0, 0.09), new=TRUE) + #ColorBar2(my.brks.var2, cols=my.cols.var2, vert=FALSE, my.ticks=-0.5 + 1:length(my.brks.var2), my.labels=my.brks.var2) #, subsampleg=1, label_scale=1.8) #, subset=my.subset2) + ColorBar(my.brks.var2, cols=my.cols.var2, vert=FALSE, triangle_ends=c(FALSE,FALSE), label_scale=1.8, subsampleg=2) #my.ticks=-0.5 + 1:length(my.brks.var2), my.labels=my.brks.var2) subset=my.subset2) + + par(fig=c(0.97, 1, 0, 0.027), new=TRUE) + mtext("hPa", cex=1.8) + + par(fig=c(0.74, 0.76, 0, 0.027), new=TRUE) + mtext("0", cex=1.8) + + dev.off() + + # same image formatted for the catalogue: + fileoutput2 <- paste0(workdir,"/",fields.name,"_",my.period[month.test],"_monthly_anomaly_fig2cat.png") + + system(paste0("~/scripts/fig2catalog.sh -s 0.8 -t '",rean.name," / 10m wind speed and sea level pressure / Monthly anomalies \n",month.name[month.test]," / ",year.test,"' -c 'Region: North Atlantic (27°N-81°N, 85.5°W-45°E)\nReference dataset: ",rean.name," reanalysis' ", fileoutput," ", fileoutput2)) + + + # mean wind speed and slp anomaly only during days of the chosen year: + load(paste0(rean.dir,"/",fields.name,"_",my.period[month.test],"_psl.RData")) # load cluster.sequence + + # arrange the regime sequence in an array, to separate months from years: + cluster2D <- array(cluster.sequence, c(n.days.in.a.period(p,1),n.years)) + #cluster2D <- array(my.cluster$cluster, c(n.days.in.a.period(p,1),n.years)) # only for NCEP + + cluster.test <- cluster2D[,pos.year.test] + + # wind anomaly for the chosen year and month and cluster: + sfcWind1 <- sfcWindAnomPeriod[which(cluster.test == 1),,,drop=FALSE] + sfcWind2 <- sfcWindAnomPeriod[which(cluster.test == 2),,,drop=FALSE] + sfcWind3 <- sfcWindAnomPeriod[which(cluster.test == 3),,,drop=FALSE] + sfcWind4 <- sfcWindAnomPeriod[which(cluster.test == 4),,,drop=FALSE] + + # in case a regime has NO days in that month, we must set it to NA: + if(length(which(cluster.test == 1)) == 0 ) sfcWind1 <- array(NA, c(1,dim(sfcWindAnomPeriod)[2:3])) + if(length(which(cluster.test == 2)) == 0 ) sfcWind2 <- array(NA, c(1,dim(sfcWindAnomPeriod)[2:3])) + if(length(which(cluster.test == 3)) == 0 ) sfcWind3 <- array(NA, c(1,dim(sfcWindAnomPeriod)[2:3])) + if(length(which(cluster.test == 4)) == 0 ) sfcWind4 <- array(NA, c(1,dim(sfcWindAnomPeriod)[2:3])) + + sfcWindMean1 <- apply(sfcWind1, c(2,3), mean, na.rm=FALSE) + sfcWindMean2 <- apply(sfcWind2, c(2,3), mean, na.rm=FALSE) + sfcWindMean3 <- apply(sfcWind3, c(2,3), mean, na.rm=FALSE) + sfcWindMean4 <- apply(sfcWind4, c(2,3), mean, na.rm=FALSE) + + # load regime names: + load(paste0(rean.dir,"/",rean.name,"_", my.period[p],"_","ClusterNames",".RData")) + + orden <- c("NAO+","NAO-","Blocking","Atl.Ridge") + cluster1 <- which(orden == cluster1.name) + cluster2 <- which(orden == cluster2.name) + cluster3 <- which(orden == cluster3.name) + cluster4 <- which(orden == cluster4.name) + + assign(paste0("imp.test",cluster1), sfcWindMean1) + assign(paste0("imp.test",cluster2), sfcWindMean2) + assign(paste0("imp.test",cluster3), sfcWindMean3) + assign(paste0("imp.test",cluster4), sfcWindMean4) + + # psl anomaly for the chosen year and month and cluster: + slp1 <- slpAnomPeriod[which(cluster.test == 1),,,drop=FALSE] + slp2 <- slpAnomPeriod[which(cluster.test == 2),,,drop=FALSE] + slp3 <- slpAnomPeriod[which(cluster.test == 3),,,drop=FALSE] + slp4 <- slpAnomPeriod[which(cluster.test == 4),,,drop=FALSE] + + # in case a regime has NO days in that month, we must set it to NA: + if(length(which(cluster.test == 1)) == 0 ) slp1 <- array(NA, c(1,dim(slpAnomPeriod)[2:3])) + if(length(which(cluster.test == 2)) == 0 ) slp2 <- array(NA, c(1,dim(slpAnomPeriod)[2:3])) + if(length(which(cluster.test == 3)) == 0 ) slp3 <- array(NA, c(1,dim(slpAnomPeriod)[2:3])) + if(length(which(cluster.test == 4)) == 0 ) slp4 <- array(NA, c(1,dim(slpAnomPeriod)[2:3])) + + slpMean1 <- apply(slp1, c(2,3), mean, na.rm=FALSE) + slpMean2 <- apply(slp2, c(2,3), mean, na.rm=FALSE) + slpMean3 <- apply(slp3, c(2,3), mean, na.rm=FALSE) + slpMean4 <- apply(slp4, c(2,3), mean, na.rm=FALSE) + + assign(paste0("psl.test",cluster1), slpMean1) + assign(paste0("psl.test",cluster2), slpMean2) + assign(paste0("psl.test",cluster3), slpMean3) + assign(paste0("psl.test",cluster4), slpMean4) + + fileoutput.test <- paste0(rean.dir,"/",fields.name,"_",my.period[month.test],"_monthly_anomaly_regimes.png") + + png(filename=fileoutput.test,width=1000,height=2000) + + par(fig=c(0, 0.5, 0.77, 0.97), new=TRUE) + PlotEquiMap2(rescale(imp.test1[,EU], my.brks.var[1], tail(my.brks.var,1)), lon[EU], lat, filled.continents=FALSE, continents.col="black", brks=my.brks.var, cols=my.cols.var, cex.lab=1.2, intxlon=10, intylat=10, drawleg=F) + par(fig=c(0, 0.5, 0.54, 0.74), new=TRUE) + PlotEquiMap2(rescale(imp.test2[,EU], my.brks.var[1], tail(my.brks.var,1)), lon[EU], lat, filled.continents=FALSE, continents.col="black", brks=my.brks.var, cols=my.cols.var, cex.lab=1.2, intxlon=10, intylat=10, drawleg=F) + par(fig=c(0, 0.5, 0.31, 0.51), new=TRUE) + PlotEquiMap2(rescale(imp.test3[,EU], my.brks.var[1], tail(my.brks.var,1)), lon[EU], lat, filled.continents=FALSE, continents.col="black", brks=my.brks.var, cols=my.cols.var, cex.lab=1.2, intxlon=10, intylat=10, drawleg=F) + par(fig=c(0, 0.5, 0.08, 0.28), new=TRUE) + PlotEquiMap2(rescale(imp.test4[,EU], my.brks.var[1], tail(my.brks.var,1)), lon[EU], lat, filled.continents=FALSE, continents.col="black", brks=my.brks.var, cols=my.cols.var, cex.lab=1.2, intxlon=10, intylat=10, drawleg=F) + + par(fig=c(0,0.5,0.955,0.975), new=TRUE) + mtext(paste0(orden[1], " wind speed anomaly "), font=2, cex=2) + par(fig=c(0,0.5,0.725,0.745), new=TRUE) + mtext(paste0(orden[2], " wind speed anomaly "), font=2, cex=2) + par(fig=c(0,0.5,0.495,0.515), new=TRUE) + mtext(paste0(orden[3], " wind speed anomaly "), font=2, cex=2) + par(fig=c(0,0.5,0.265,0.285), new=TRUE) + mtext(paste0(orden[4], " wind speed anomaly "), font=2, cex=2) + + par(fig=c(0.03, 0.5, 0.015, 0.06), new=TRUE) + ColorBar(my.brks.var, cols=my.cols.var, vert=FALSE, label_scale=1.8, triangle_ends=c(FALSE,FALSE)) #, subset=my.subset2) + + par(fig=c(0.48, 0.5, 0.01, 0.044), new=TRUE) + mtext("m/s", cex=1.6) + + # right figures: + par(fig=c(0.5, 1, 0.77, 0.97), new=TRUE) + PlotEquiMap2(rescale(psl.test1[,EU], my.brks.var2[1], tail(my.brks.var2,1)), lon[EU], lat, filled.continents=FALSE, continents.col="black", brks=my.brks.var2, brks2=my.brks.var2, cols=my.cols.var2, intxlon=10, intylat=10, drawleg=F, contours=t(psl.test1[,EU]), contours.lty="F1FF1F", cex.lab=1) + par(fig=c(0.5, 1, 0.54, 0.74), new=TRUE) + PlotEquiMap2(rescale(psl.test2[,EU], my.brks.var2[1], tail(my.brks.var2,1)), lon[EU], lat, filled.continents=FALSE, continents.col="black", brks=my.brks.var2, brks2=my.brks.var2, cols=my.cols.var2, intxlon=10, intylat=10, drawleg=F, contours=t(psl.test2[,EU]), contours.lty="F1FF1F", cex.lab=1) + par(fig=c(0.5, 1, 0.31, 0.51), new=TRUE) + PlotEquiMap2(rescale(psl.test3[,EU], my.brks.var2[1], tail(my.brks.var2,1)), lon[EU], lat, filled.continents=FALSE, continents.col="black", brks=my.brks.var2, brks2=my.brks.var2, cols=my.cols.var2, intxlon=10, intylat=10, drawleg=F, contours=t(psl.test3[,EU]), contours.lty="F1FF1F", cex.lab=1) + par(fig=c(0.5, 1, 0.08, 0.28), new=TRUE) + PlotEquiMap2(rescale(psl.test4[,EU], my.brks.var2[1], tail(my.brks.var2,1)), lon[EU], lat, filled.continents=FALSE, continents.col="black", brks=my.brks.var2, brks2=my.brks.var2, cols=my.cols.var2, intxlon=10, intylat=10, drawleg=F, contours=t(psl.test4[,EU]), contours.lty="F1FF1F", cex.lab=1) + + par(fig=c(0.5,1,0.955,0.975), new=TRUE) + mtext(paste0(orden[1], " sea level pressure anomaly "), font=2, cex=2) + par(fig=c(0.5,1,0.725,0.745), new=TRUE) + mtext(paste0(orden[2], " sea level pressure anomaly "), font=2, cex=2) + par(fig=c(0.5,1,0.495,0.515), new=TRUE) + mtext(paste0(orden[3], " sea level pressure anomaly "), font=2, cex=2) + par(fig=c(0.5,1,0.265,0.285), new=TRUE) + mtext(paste0(orden[4], " sea level pressure anomaly "), font=2, cex=2) + + par(fig=c(0.5, 0.99, 0.015, 0.06), new=TRUE) + #ColorBar2(my.brks.var2, cols=my.cols.var2, vert=FALSE, my.ticks=-0.5 + 1:length(my.brks.var2), my.labels=my.brks.var2) #, subsampleg=1, label_scale=1.8) #, subset=my.subset2) + ColorBar(my.brks.var2, cols=my.cols.var2, vert=FALSE, triangle_ends=c(FALSE,FALSE), label_scale=1.8, subsampleg=2) #my.ticks=-0.5 + 1:length(my.brks.var2), my.labels=my.brks.var2) subset=my.subset2) + + par(fig=c(0.971, 0.997, 0.01, 0.044), new=TRUE) + mtext("hPa", cex=1.6) + + par(fig=c(0.74, 0.76, 0, 0.027), new=TRUE) + mtext("0", cex=1.8) + + dev.off() + + # same image formatted for the catalogue: + fileoutput2.test <- paste0(rean.dir,"/",fields.name,"_",my.period[month.test],"_monthly_anomaly_regimes_fig2cat.png") + + system(paste0("~/scripts/fig2catalog.sh -s 0.8 -m 40 -t '",rean.name," / 10m wind speed and sea level pressure / Monthly regime anomalies \n",month.name[month.test]," / ",year.test,"' -c 'Region: North Atlantic (27°N-81°N, 85.5°W-45°E)\nReference dataset: ",rean.name," reanalysis' ", fileoutput.test," ", fileoutput2.test)) + + + } # close for on year.test + } # close for on month.test + + + + + + + +} # close for on monthly_anomalies + + + + + + + + + + diff --git a/old/weather_regimes_v41.R~ b/old/weather_regimes_v41.R~ new file mode 100644 index 0000000000000000000000000000000000000000..49338eca674e7098a422b80f832d323fadcb1ffe --- /dev/null +++ b/old/weather_regimes_v41.R~ @@ -0,0 +1,1404 @@ + +# Creation: 6/2016 +# Author: Nicola Cortesi +# Aim: To compute the four Euro-Atlantic weather regimes from a reanalysis or from a forecast system +# +# I/O: input must be in a format compatible with Load(); 1 output .RData file is created in the 'workdir' folder. +# You can also run this script from terminal with: Rscript ~/scripts/weather_regimes_maps.R +# +# Assumption: its output are needed by the scripts weather_regimes_impact.R and weather_regimes.R +# +# Branch: weather_regimes + +rm(list=ls()) +library(s2dverification) # for the function Load() +library(abind) +library(reshape2) # after each update of s2dverification, it's better to remove and install this package again because sometimes it gets broken! + +SMP <- FALSE # if TRUE, the script is assumed to run on the SMP Machine + +if(SMP){ + source('/gpfs/projects/bsc32/bsc32842/Rfunctions.R') + workdir <- "/gpfs/projects/bsc32/bsc32842/RESILIENCE" # working dir where output files will be generated +} else { + source('/home/Earth/ncortesi/scripts/Rfunctions.R') + workdir <- "/scratch/Earth/ncortesi/RESILIENCE/Regimes" # working dir where output files will be generated +} + +# module load NCO # : load it from the terminal before running this script interactively in the SMP machine, if you didn't load it in the .bashrc + +# available reanalysis for the geopotential (g500) or the geopotential height (z500 = g500/9.81) and for var data: +ERAint <- '/esnas/recon/ecmwf/erainterim/$STORE_FREQ$_mean/$VAR_NAME$_f6h/$VAR_NAME$_$YEAR$$MONTH$.nc' +#ERAint <- '/esarchive/old-files/recon_ecmwf_erainterim/$STORE_FREQ$_mean/$VAR_NAME$_f6h/$VAR_NAME$_$YEAR$$MONTH$.nc' +ERAint <- '/esnas/recon/ecmwf/erainterim/6hourly/$VAR_NAME$/$VAR_NAME$_$YEAR$$MONTH$.nc' # subdaily data!!!!! +ERAint.name <- "ERA-Interim" + +JRA55 <- '/esnas/recon/jma/jra55/$STORE_FREQ$_mean/$VAR_NAME$_f6h/$VAR_NAME$_$YEAR$$MONTH$.nc' +JRA55.name <- "JRA-55" + +NCEP <- '/esnas/recon/noaa/ncep-reanalysis/$STORE_FREQ$_mean/$VAR_NAME$_f6h/$VAR_NAME$_$YEAR$$MONTH$.nc' +NCEP.name <- "NCEP" + +rean <- NCEP #JRA55 #JRA55 #ERAint #NCEP # choose one of the two above reanalysis from where to load the input psl data + +# available forecast systems for the psl and var data: +#ECMWF_S4 <- list(path = paste0('/esnas/exp/ECMWF/seasonal/0001/s004/m001/$STORE_FREQ$_mean/$VAR_NAME$_f6h/$VAR_NAME$_$START_DATE$.nc')) +#ECMWF_S4 <- '/esnas/exp/ECMWF/seasonal/0001/s004/m001/$STORE_FREQ$_mean/psl_f6h/psl_$START_DATE$.nc' +ECMWF_S4 <- '/esnas/exp/ecmwf/system4_m1/$STORE_FREQ$_mean/$VAR_NAME$_f6h/$VAR_NAME$_$START_DATE$.nc' +ECMWF_S4.name <- "ECMWF-S4" +member.name <- "ensemble" # name of the dimension with the ensemble members inside the netCDF files of S4 + +ECMWF_monthly <- '/esnas/exp/ECMWF/monthly/ensforhc/6hourly/$VAR_NAME$/2014$MONTH$$DAY$00/$VAR_NAME$_$START_DATE$00.nc' +ECMWF_monthly.name <- "ECMWF-Monthly" + +# choose a forecast system: +forecast <- ECMWF_S4 + +# put 'rean' to load pressure fields and var from reanalisis, or put 'forecast' to load them from forecast systems: +fields <- rean #forecast + +psl <- "psl" #"g500" # pressure variable to use from the chosen reanalysis +psl.name <- "SLP" # "Z500" # the name to show in the maps + +year.start <- 1981 #1979 #1981 #1982 #1981 #1994 #1979 #1981 +year.end <- 2016 #2016 #2013 #2015 + +LOESS <- TRUE # To apply the LOESS filter or not to the climatology +running.cluster <- FALSE # add to the clustering also the daily SLP data of the two closer months to the month to use (only for monthly analysis) +lat.weighting <- TRUE # set it to true to weight psl data on latitude before applying the cluster analysis +subdaily <- FALSE # id TRUE, compute the clustering using 6-hourly data instead of daily data, to be more robust (only for reanalysis with 6-hourly data avail.) + +monthly_anomalies <- TRUE # if TRUE, also save maps with with monthly wind speed and slp anomalies for the chosen years and months set below over Europe +year.chosen <- 2016 +month.chosen <- 1:12 +#rean.dir <- "/scratch/Earth/ncortesi/RESILIENCE/Regimes/52_JRA55_monthly_1981-2016_LOESS_filter_lat_corr" +rean.dir <- "/scratch/Earth/ncortesi/RESILIENCE/Regimes/50_NCEP_monthly_1981-2016_LOESS_filter_lat_corr" + +missing.forecasts <- TRUE # set it to TRUE if there can be missing hindcasts files in the forecast psl data, so it will load only the available years and deals with NA +sequences <-FALSE # set it to TRUE if you want to select only days beloning to sequences of at least 5 consecutive days belonging to the same regime. +PCA <- FALSE # set it to TRUE if you want to apply the PCA before the k-means cluster analysis, FALSE otherwise +variance.explained <- 0.8 # the user can set here the minimum % of variance explained by the PCs (typically 0.8 which is equal to 80%) + +# Coordinates of the box enclosing the study region (the Northern Atlantic in this case): +lat.max <- 81 #81 #80 #70 +lat.min <- 27 #30 #20 #30 +lon.max <- 45 #36 #30 #40 +lon.min <- 274.5 #274.5 #270 #280 # put a positive number here because the geopotential has only positive vaues of longitud! + +# Only for reanalysis: +WR.period <- 1:12 # 13:16 #1:12 #13:16 # 1-12: Jan-Dec; 13-16: Winter-Spring-Summer-Autumn [bypassed by the optional argument of the script] + +# Only for Seasonal forecasts: +startM <- 1 # start month (1=January, 12=December) [bypassed by the optional arguments of the script] +leadM <- 0 # select only the data of one lead month: [bypassed by the optional arguments of the script] +n.members <- 15 # number of members of the forecast system to load +n.leadtimes <- 216 # number of leadtimes of the forecast system to load + +res <- 0.75 # set the resolution you want to interpolate the seasonal psl and var data when loading it (i.e: set to 0.75 to use the same res of ERA-Interim) + # interpolation method is BILINEAR. + +# Only for Monthly forecasts: +#leadtime <- 1:7 #5:11 #1 # if fields=forecast, set the forecast time (in days) you want to compute the weather regimes. +#startdate.shift <- 1 # if leadtime=5:11, it's better to shift all startdates of the season 1 week in the past, to fit the exact season of obs.data + # if leadtime=12:18, it's better to shift all startdates of the season 2 weeks in the past, and so on. +#forecast.year <- year.end+1 # if fields=forecast, set the forecast year (it is usually the year after year.end) +#mes=1 # if fields=forecast, set the month of the first startdate of the forecast year +#day=2 # if fields=forecast, set the day of the first startdate of the forecast year + +############################################################################################################################# +rean.name <- unname(ifelse(rean == ERAint, ERAint.name, ifelse(rean == JRA55, JRA55.name, NCEP.name))) + +forecast.name <- unname(ifelse(forecast == ECMWF_S4, ECMWF_S4.name, ECMWF_monthly.name)) +fields.name <- unname(ifelse(fields == rean, rean.name, forecast.name)) +n.years <- year.end - year.start + 1 + +# in case the script is run with one argument, it is assumed a reanalysis is being used and the argument becomes the month we want to perform the clustering; +# in case the script is run with two arguments, it is assumed a forecast is used and the first argument represents the startmonth and the second one the leadmonth. +# in case the script is run with no arguments, the values of the variables inside the script are used: +script.arg <- as.integer(commandArgs(TRUE)) + +if(length(script.arg) == 0){ + start.month <- startM + #WR.period <- start.month + if(fields.name == forecast.name) lead.month <- leadM +} + +# in case the script is run with 1 argument, it is assumed you are using a reanalysis: +if(length(script.arg) == 1){ + fields <- rean + fields.name <- rean.name + WR.period <- script.arg[1] +} + +if(length(script.arg) >= 2){ + fields <- forecast + fields.name <- forecast.name + start.month <- script.arg[1] + lead.month <- script.arg[2] + WR.period <- start.month +} + +if(length(script.arg) == 3){ # in this case, we are running the script in the SMP machine and we need to override the variables below: + source('/gpfs/projects/bsc32/bsc32842/Rfunctions.R') # for the calendar function + workdir <- "/gpfs/projects/bsc32/bsc32842/RESILIENCE" # working dir +} + +if(length(script.arg) >= 2) {lead.comment <- paste0("Lead month: ", lead.month)} else {lead.comment <- ""} +cat(paste0("Field: ", fields.name, " Period: ", WR.period, " ", lead.comment, "\n")) + +if(lat.min >= lat.max) stop("lat.min cannot be greater than lat.max") + +#days.period <- n.days.period <- period.length <- list() +#for (pp in 1:17){ +# days.period[[pp]] <- NA +# for(y in year.start:year.end) days.period[[pp]] <- c(days.period[[pp]], n.days.in.a.future.year(year.start, y) + pos.period(y,pp)) +# days.period[[pp]] <- days.period[[pp]][-1] # remove the NA at the begin that was introduced only to be able to execute the above command +# # number of days belonging to that period from year.start to year.end: +# n.days.period[[pp]] <- length(days.period[[pp]]) +# # Number of days belonging to that period in a single year: +# period.length[[pp]] <- n.days.in.a.period(pp,1999) #+ ifelse(period==13 | period==2, 1, 0) # using year 1999 introduce a small error in winter season length because of #bisestile years +#} + +# Create a regular lat/lon grid to interpolate the data to the chosen res: (Notice that the regular grid is always centered at lat=0 and lon=0!) +n.lon.grid <- 360/res +n.lat.grid <- (180/res)+1 +my.grid <- paste0('r',n.lon.grid,'x',n.lat.grid) +#domain<-list() +#domain$lon <- seq(0,359.999999999,res) +#domain$lat <- c(rev(seq(0,90,res)),seq(res[1],-90,-res)) + +cat("Loading lat/lon. Please wait...\n") +# load only 1 day of pressure data to detect the minimum and maximum lat and lon values which identify only the North Atlantic area: +if(fields.name == rean.name) domain <- Load(var = psl, exp = NULL, obs = list(list(path=fields)), '19990101', storefreq = 'daily', leadtimemax = 1, output = 'lonlat', nprocs=1) + +### Real test: +### lat <- +### domain <- Load(var = "psl", exp = list(list(path="/esnas/exp/ecmwf/system4_m1/$STORE_FREQ$_mean/$VAR_NAME$_f6h/$VAR_NAME$_$START_DATE$.nc")), obs=NULL, sdates=paste0(1982:2011,'0101'), storefreq = 'daily', leadtimemax=n.leadtimes, nmember=n.members, output = 'lonlat', latmin = lat[chunk], latmax = lat[chunk+2], nprocs=1) + +### if (fields.name="pippo"){ + +# in the case of S4, we also interpolate it to the same resolution of the reanalysis: +# (notice that if the S4 grid has the same grid of the chosen grid (typically ERA-Interim), Load() leaves the S4 grid unchanged) +if(fields.name == ECMWF_S4.name) domain <- Load(var = psl, exp = list(list(path=fields)), obs=NULL, sdates=paste0(year.start,'0101'), storefreq = 'daily', dimnames=list(member=member.name), leadtimemax=1, nmember=1, output = 'lonlat', grid=my.grid, method='bilinear', nprocs=1) + +#domain <- Load(var = "psl", exp = list(list(path="/esnas/exp/ecmwf/system4_m1/$STORE_FREQ$_mean/$VAR_NAME$_f6h/$VAR_NAME$_$START_DATE$.nc")), obs=NULL, sdates='19820101', storefreq = 'daily', leadtimemax=1, nmember=1, output = 'lonlat', grid='r480x241', lonmax=100) + +if(fields.name == ECMWF_monthly.name) domain <- Load(var = psl, exp = NULL, obs = list(list(path=fields)), paste0(year.start,'0102'), storefreq = 'daily', leadtimemax = 1, output = 'lonlat') + +n.lon <- length(domain$lon) +n.lat <- length(domain$lat) + +pos.lat.max <- which(lat.max - round(domain$lat,4) >= 0)[1] # select the first latitude of the domain below the maximum latitude +pos.lat.min <- tail(which(round(domain$lat,4) - lat.min >= 0),1) # select the last latitude of the domain over the minumum latitude +pos.lon.max <- tail(which(lon.max - round(domain$lon,4) >= 0),1) +pos.lon.min <- which(round(domain$lon,4) - lon.min >= 0)[1] + +pos.lat <- if(pos.lat.min < pos.lat.max) {pos.lat.min:pos.lat.max} else {pos.lat.max:pos.lat.min} +pos.lon <- c(1:pos.lon.max,pos.lon.min:length(domain$lon)) # beware that it only consider the case where the study area cross the meridian of Greenwhich! +n.pos.lat <- length(pos.lat) +n.pos.lon <- length(pos.lon) + +lat <- domain$lat[pos.lat] # lat values of chosen area only (it is smaller than the whole spatial domain loaded by the data) +#if (lat[2] < lat[1]) lat <- rev(lat) # reverse lat values if they are in decreasing order +lon <- domain$lon[pos.lon] # lon values of chosen area only + +#lat.min.area <- domain$lat[pos.lat.min] # min and max lat/lon of the chosen area only (same as lat.max, but its values correspond to actual values in the lat vector) +#lat.max.area <- domain$lat[pos.lat.max] +#lon.min.area <- domain$lon[pos.lon.min] +#lon.max.area <- domain$lon[pos.lon.max] + +#rm(domain) + +# Load psl data (interpolating it to the same reanalysis grid, if necessary): +cat("Loading data. Please wait...\n") + +if(fields.name == rean.name && subdaily == FALSE){ # Load daily psl data of all years in the reanalysis case: + psleuFull <-array(NA,c(1,1,n.years,365, n.pos.lat, n.pos.lon)) + + psleuFull366 <- Load(var = psl, exp = NULL, obs = list(list(path=fields)), paste0(year.start:year.end,'0101'), storefreq = 'daily', leadtimemax = 366, output = 'lonlat', latmin = lat.min, latmax = lat.max, lonmin = lon.min, lonmax = lon.max, nprocs=8)$obs + + # remove bisestile days to have all arrays with the same dimensions: + cat("Removing bisestile days. Please wait...\n") + psleuFull[,,,,,] <- psleuFull366[,,,1:365,,] + + for(y in year.start:year.end){ + y2 <- y - year.start + 1 + if(n.days.in.a.year(y) == 366) psleuFull[,,y2,60:365,,] <- psleuFull366[,,y2,61:366,,] # take the march to december period removing the 29th of February + } + + rm(psleuFull366) + gc() + + + if(running.cluster == TRUE && rean.name == "ERA-interim" && year.end == 2016) { + # in caso of Octuber, which is the last month of last year of data available, November data for the 3rd month of the running cluster is not available. + # Load() set these values to NA, + # but the clustering analysis is not able to process NA, so we have to remove the NA for that month and replace with the values of the previous year: + psleuFull[,,36,305:334,,] <- psleuFull[,,35,305:334,,] + } + + # convert psl in daily anomalies with the LOESS filter: + cat("Calculating anomalies. Please wait...\n") + pslPeriodClim <- apply(psleuFull, c(1,2,4,5,6), mean, na.rm=T) + + if(LOESS == TRUE){ + pslPeriodClimLoess <- array(NA, dim(pslPeriodClim)) + + for(i in 1:n.pos.lat){ + for(j in 1:n.pos.lon){ + my.data <- data.frame(ens.mean=pslPeriodClim[1,1,,i,j], day=1:365) + my.loess <- loess(ens.mean ~ day, my.data, span=0.35) + pslPeriodClimLoess[1,1,,i,j] <- predict(my.loess) + } + } + + rm(pslPeriodClim) + gc() + + pslPeriodClim2 <- InsertDim(pslPeriodClimLoess, 3, n.years) + + } else { # leave the daily climatology to compute the anomalies: + pslPeriodClim2 <- InsertDim(pslPeriodClim, 3, n.years) + + rm(pslPeriodClim) + gc() + } + + ## s4.data <- data.frame(s4=pslPeriodClim[1,], day=1:216) + ## s4.loess <- loess(s4 ~ day, s4.data, span=0.35) + ## s4.pred <- predict(s4.loess) + + psleuFull <- psleuFull - pslPeriodClim2 + rm(pslPeriodClim2) + gc() + +} # close if on fields.name == rean and subdaily == FALSE){ + + +# Load 6-hourly psl data of all years in the reanalysis case: +if(fields.name == rean.name && subdaily == TRUE){ + + #sdates <- as.vector(sapply(year.start:year.end, function(x) paste0(x, sprintf("%02d", 1:12), '01'))) + #my.exp <- list(path=fields) + #psleuFull366 <- Load(var = psl, exp = list(my.exp), NULL, sdates, output = 'lonlat', latmin = lat.min, latmax = lat.max, lonmin = lon.min, lonmax = lon.max)$obs + #dim(data$obs) <- c(dim(data$obs)[1:2], 1, dim(data$obs)[3]*dim(data$obs)[4], dim(data$obs)[5:6]) + + my.exp <- list(path=fields) + + # Load January data: + psleu1 <- Load(var = psl, exp = list(list(path=fields)), obs = NULL, sdates = paste0(year.start:year.end,'0101'), storefreq = 'daily', output = 'lonlat', latmin = lat.min, latmax = lat.max, lonmin = lon.min, lonmax = lon.max)$mod + + # Load february data (it automatically discards the 29th of February): + psleu2 <- Load(var = psl, exp = list(list(path=fields)), obs = NULL, sdates = paste0(year.start:year.end,'0201'), storefreq = 'daily', output = 'lonlat', latmin = lat.min, latmax = lat.max, lonmin = lon.min, lonmax = lon.max)$mod + + # Load March data: + psleu3 <- Load(var = psl, exp = list(list(path=fields)), obs = NULL, sdates = paste0(year.start:year.end,'0301'), storefreq = 'daily', output = 'lonlat', latmin = lat.min, latmax = lat.max, lonmin = lon.min, lonmax = lon.max)$mod + psleu4 <- Load(var = psl, exp = list(list(path=fields)), obs = NULL, sdates = paste0(year.start:year.end,'0401'), storefreq = 'daily', output = 'lonlat', latmin = lat.min, latmax = lat.max, lonmin = lon.min, lonmax = lon.max)$mod + psleu5 <- Load(var = psl, exp = list(list(path=fields)), obs = NULL, sdates = paste0(year.start:year.end,'0501'), storefreq = 'daily', output = 'lonlat', latmin = lat.min, latmax = lat.max, lonmin = lon.min, lonmax = lon.max)$mod + psleu6 <- Load(var = psl, exp = list(list(path=fields)), obs = NULL, sdates = paste0(year.start:year.end,'0601'), storefreq = 'daily', output = 'lonlat', latmin = lat.min, latmax = lat.max, lonmin = lon.min, lonmax = lon.max)$mod + psleu7 <- Load(var = psl, exp = list(list(path=fields)), obs = NULL, sdates = paste0(year.start:year.end,'0701'), storefreq = 'daily', output = 'lonlat', latmin = lat.min, latmax = lat.max, lonmin = lon.min, lonmax = lon.max)$mod + psleu8 <- Load(var = psl, exp = list(list(path=fields)), obs = NULL, sdates = paste0(year.start:year.end,'0801'), storefreq = 'daily', output = 'lonlat', latmin = lat.min, latmax = lat.max, lonmin = lon.min, lonmax = lon.max)$mod + psleu9 <- Load(var = psl, exp = list(list(path=fields)), obs = NULL, sdates = paste0(year.start:year.end,'0901'), storefreq = 'daily', output = 'lonlat', latmin = lat.min, latmax = lat.max, lonmin = lon.min, lonmax = lon.max)$mod + psleu10 <- Load(var = psl, exp = list(list(path=fields)), obs = NULL, sdates = paste0(year.start:year.end,'1001'), storefreq = 'daily', output = 'lonlat', latmin = lat.min, latmax = lat.max, lonmin = lon.min, lonmax = lon.max)$mod + psleu11 <- Load(var = psl, exp = list(list(path=fields)), obs = NULL, sdates = paste0(year.start:year.end,'1101'), storefreq = 'daily', output = 'lonlat', latmin = lat.min, latmax = lat.max, lonmin = lon.min, lonmax = lon.max)$mod + psleu12 <- Load(var = psl, exp = list(list(path=fields)), obs = NULL, sdates = paste0(year.start:year.end,'1201'), storefreq = 'daily', output = 'lonlat', latmin = lat.min, latmax = lat.max, lonmin = lon.min, lonmax = lon.max)$mod + + if(running.cluster == TRUE && rean.name == "ERA-interim") { + # in this case, last month of last year might not have in /esnas the data for the 3rd month of the running cluster. Load() set these values to NA, + # but the clustering analysis is not able to process NA, so we have to remove the NA for that month and replace with the values of the previous year: + psleu11[,,36,,,] <- psleu11[,,35,,,] + psleu12[,,36,,,] <- psleu12[,,35,,,] + + } + + psleuFull <- abind(psleu1, psleu2, psleu3, psleu4, psleu5, psleu6, psleu7, psleu8, psleu9, psleu10, psleu11, psleu12, along=4) + rm(psleu1, psleu2, psleu3, psleu4, psleu5, psleu6, psleu7, psleu8, psleu9, psleu10, psleu11, psleu12) + gc() + + # convert psl in daily anomalies with the LOESS filter: + cat("Calculating anomalies. Please wait...\n") + pslPeriodClim <- apply(psleuFull, c(1,2,4,5,6), mean, na.rm=T) + + if(LOESS == TRUE){ + # separate psl data in the four hours of the day ( 0.00, 6.00, 12.00, 18.00) + pslPeriodClim1 <- pslPeriodClim[1,1,seq(1,1460,4),,] + pslPeriodClim2 <- pslPeriodClim[1,1,seq(2,1460,4),,] + pslPeriodClim3 <- pslPeriodClim[1,1,seq(3,1460,4),,] + pslPeriodClim4 <- pslPeriodClim[1,1,seq(4,1460,4),,] + + rm(pslPeriodClim) + gc() + + pslPeriodClimLoess1 <- array(NA, dim(pslPeriodClim1)) + pslPeriodClimLoess2 <- array(NA, dim(pslPeriodClim2)) + pslPeriodClimLoess3 <- array(NA, dim(pslPeriodClim3)) + pslPeriodClimLoess4 <- array(NA, dim(pslPeriodClim4)) + + for(i in 1:n.pos.lat){ + for(j in 1:n.pos.lon){ + my.data1 <- data.frame(ens.mean=pslPeriodClim1[,i,j], hourly=1:(1460/4)) + my.loess1 <- loess(ens.mean ~ hourly, my.data1, span=0.35) + pslPeriodClimLoess1[,i,j] <- predict(my.loess1) + + my.data2 <- data.frame(ens.mean=pslPeriodClim2[,i,j], hourly=1:(1460/4)) + my.loess2 <- loess(ens.mean ~ hourly, my.data2, span=0.35) + pslPeriodClimLoess2[,i,j] <- predict(my.loess2) + + my.data3 <- data.frame(ens.mean=pslPeriodClim3[,i,j], hourly=1:(1460/4)) + my.loess3 <- loess(ens.mean ~ hourly, my.data3, span=0.35) + pslPeriodClimLoess3[,i,j] <- predict(my.loess3) + + my.data4 <- data.frame(ens.mean=pslPeriodClim4[,i,j], hourly=1:(1460/4)) + my.loess4 <- loess(ens.mean ~ hourly, my.data4, span=0.35) + pslPeriodClimLoess4[,i,j] <- predict(my.loess4) + + } + } + + rm(my.data1, my.data2, my.data3, my.data4, my.loess1, my.loess2, my.loess3, my.loess4) + rm(pslPeriodClim1, pslPeriodClim2, pslPeriodClim3, pslPeriodClim4) + gc() + + s1 <- seq(1,1460,4) + s2 <- seq(2,1460,4) + s3 <- seq(3,1460,4) + s4 <- seq(4,1460,4) + + pslPeriodClimLoess <- array(NA,c(365*4,dim(pslPeriodClimLoess1)[2:3])) + + for(day in 1:365){ + pslPeriodClimLoess[s1[day],,] <- pslPeriodClimLoess1[day,,] + pslPeriodClimLoess[s2[day],,] <- pslPeriodClimLoess2[day,,] + pslPeriodClimLoess[s3[day],,] <- pslPeriodClimLoess3[day,,] + pslPeriodClimLoess[s4[day],,] <- pslPeriodClimLoess4[day,,] + } + + pslPeriodClim2 <- InsertDim(pslPeriodClimLoess, 1, n.years) + + } else { # leave the daily climatology to compute the anomalies: + pslPeriodClim2 <- InsertDim(pslPeriodClim, 3, n.years) + + rm(pslPeriodClim) + gc() + } + + ## s4.data <- data.frame(s4=pslPeriodClim[1,], day=1:216) + ## s4.loess <- loess(s4 ~ day, s4.data, span=0.35) + ## s4.pred <- predict(s4.loess) + + psleuFull <- psleuFull[1,1,,,,] - pslPeriodClim2 + + rm(pslPeriodClim2) + gc() + +} # close if on fields.name == rean & subdaily == TRUE + + +if(fields.name == ECMWF_S4.name){ # in the forecast case, we load only the data for 1 start month at time and all the lead times (since each month needs ~3 GB of memory) + if(start.month <= 9) {start.month.char <- paste0("0",as.character(start.month))} else {start.month.char <- start.month} + + fields2 <- gsub("\\$STORE_FREQ\\$","daily",fields) + fields3 <- gsub("\\$VAR_NAME\\$",psl,fields2) + + if(missing.forecasts == TRUE){ + years <- c() + for(y in year.start:year.end){ + fields4 <- gsub("\\$START_DATE\\$",paste0(y, start.month.char,'01'),fields3) + + char.lead <- nchar(system(paste0("ncks -m ",fields4,"| grep -E -i 'psl size' | cut -d '*' -f1"), intern=T)) + # beware, in this way it can only take into account leadtimes from 100 to 999, but it is the only way in the SMP machine: + #num.lead <- as.integer(substr(system(paste0("ncks -m ",fields4,"| grep -E -i 'psl size' | cut -d '*' -f1"), intern=T),char.lead-3,char.lead)) + num.lead <- as.integer(system(paste0("ncks -m ",fields4,"| grep -E -i 'psl size' | cut -d '=' -f2 | cut -d '*' -f1"), intern=T)) # don't work well on the SMP + num.memb <- as.integer(system(paste0("ncks -m ",fields4,"| grep -E -i 'psl size' | cut -d '=' -f2 | cut -d '*' -f2"), intern=T)) + num.lat <- as.integer(system(paste0("ncks -m ",fields4,"| grep -E -i 'psl size' | cut -d '=' -f2 | cut -d '*' -f3"), intern=T)) + num.lon <- as.integer(system(paste0("ncks -m ",fields4,"| grep -E -i 'psl size' | cut -d '=' -f2 | cut -d '*' -f4"), intern=T)) + + #if(num.lead == n.leadtimes && num.memb >= n.members && num.lat == 181 && num.lon == 360) years <- c(years,y) + if(num.lead == n.leadtimes && num.memb >= n.members) years <- c(years,y) + } + + } else { + years <- year.start:year.end + } + + n.years.full <- length(years) + + if(running.cluster == TRUE) { + + start.month1 <- ifelse(start.month == 1, 12, start.month-1) + if(start.month1 <= 9) {start.month1.char <- paste0("0",as.character(start.month1))} else {start.month1.char <- start.month1} + + start.month2.char <- start.month.char + + start.month3 <- ifelse(start.month == 12, 1, start.month+1) + if(start.month3 <= 9) {start.month3.char <- paste0("0",as.character(start.month3))} else {start.month3.char <- start.month3} + + # load three months of data, inserting all them in the third dimension of the array, one month at time: + psleuFull1 <- Load(var = psl, exp = list(list(path=fields)), obs = NULL, sdates=paste0(years, start.month1.char,'01'), dimnames=list(member=member.name), nmember=n.members, leadtimemax=n.leadtimes, storefreq='daily', output = 'lonlat', latmin = lat.min, latmax = lat.max, lonmin = lon.min, lonmax = lon.max, grid=my.grid, method='bilinear')$mod + gc() + + psleuFull2 <- Load(var = psl, exp = list(list(path=fields)), obs = NULL, sdates=paste0(years, start.month2.char,'01'), dimnames=list(member=member.name), nmember=n.members, leadtimemax=n.leadtimes, storefreq='daily', output = 'lonlat', latmin = lat.min, latmax = lat.max, lonmin = lon.min, lonmax = lon.max, grid=my.grid, method='bilinear')$mod + gc() + + psleuFull3 <- Load(var = psl, exp = list(list(path=fields)), obs = NULL, sdates=paste0(years, start.month3.char,'01'), dimnames=list(member=member.name), nmember=n.members, leadtimemax=n.leadtimes, storefreq='daily', output = 'lonlat', latmin = lat.min, latmax = lat.max, lonmin = lon.min, lonmax = lon.max, grid=my.grid, method='bilinear')$mod + gc() + + } else { + + psleuFull <- Load(var = psl, exp = list(list(path=fields)), obs = NULL, sdates=paste0(years, start.month.char,'01'), dimnames=list(member=member.name), nmember=n.members, leadtimemax=n.leadtimes, storefreq='daily', output = 'lonlat', latmin = lat.min, latmax = lat.max, lonmin = lon.min, lonmax = lon.max, grid=my.grid, method='bilinear', nprocs=1)$mod + + } + + if(LOESS == TRUE){ + # convert psl in daily anomalies with the LOESS filter: + cat("Calculating anomalies. Please wait...\n") + + if(running.cluster == TRUE) { + pslPeriodClim1 <- apply(psleuFull1, c(1,4,5,6), mean, na.rm=T) + pslPeriodClim2 <- apply(psleuFull2, c(1,4,5,6), mean, na.rm=T) + pslPeriodClim3 <- apply(psleuFull3, c(1,4,5,6), mean, na.rm=T) + + pslPeriodClimLoess1 <- pslPeriodClim1 + pslPeriodClimLoess2 <- pslPeriodClim2 + pslPeriodClimLoess3 <- pslPeriodClim3 + + for(i in 1:n.pos.lat){ + for(j in 1:n.pos.lon){ + my.data1 <- data.frame(ens.mean=pslPeriodClim1[1,,i,j], day=1:n.leadtimes) + my.loess1 <- loess(ens.mean ~ day, my.data1, span=0.35) + pslPeriodClimLoess1[1,,i,j] <- predict(my.loess1) + rm(my.data1, my.loess1) + gc() + + my.data2 <- data.frame(ens.mean=pslPeriodClim2[1,,i,j], day=1:n.leadtimes) + my.loess2 <- loess(ens.mean ~ day, my.data2, span=0.35) + pslPeriodClimLoess2[1,,i,j] <- predict(my.loess2) + rm(my.data2, my.loess2) + gc() + + my.data3 <- data.frame(ens.mean=pslPeriodClim3[1,,i,j], day=1:n.leadtimes) + my.loess3 <- loess(ens.mean ~ day, my.data3, span=0.35) + pslPeriodClimLoess3[1,,i,j] <- predict(my.loess3) + rm(my.data3, my.loess3) + gc() + } + } + + rm(pslPeriodClim1, pslPeriodClim2, pslPeriodClim3) + gc() + + pslPeriodClimDos1 <- InsertDim(InsertDim(pslPeriodClimLoess1, 2, n.years.full), 2, n.members) + pslPeriodClimDos2 <- InsertDim(InsertDim(pslPeriodClimLoess2, 2, n.years.full), 2, n.members) + pslPeriodClimDos3 <- InsertDim(InsertDim(pslPeriodClimLoess3, 2, n.years.full), 2, n.members) + + pslPeriodClimDos <- unname(abind(pslPeriodClimDos1, pslPeriodClimDos2, pslPeriodClimDos3, along=3)) + rm(pslPeriodClimDos1, pslPeriodClimDos2, pslPeriodClimDos3) + gc() + + psleuFull <- unname(abind(psleuFull1, psleuFull2, psleuFull3, along=3)) + rm(psleuFull1, psleuFull2, psleuFull3) + gc() + + } else { # in case of no running cluster: + + pslPeriodClim <- apply(psleuFull, c(1,4,5,6), mean, na.rm=T) + + pslPeriodClimLoess <- pslPeriodClim + for(i in n.pos.lat){ + for(j in n.pos.lon){ + my.data <- data.frame(ens.mean=pslPeriodClim[1,,i,j], day=1:n.leadtimes) + my.loess <- loess(ens.mean ~ day, my.data, span=0.35) + pslPeriodClimLoess[1,,i,j] <- predict(my.loess) + } + } + + rm(pslPeriodClim) + gc() + + pslPeriodClimDos <- InsertDim(InsertDim(pslPeriodClimLoess, 2, n.years.full), 2, n.members) + + } # close if on running cluster + + } else { #in case of no LOESS: + + if(running.cluster == TRUE) { + # in this case, the climatology is measured FOR EACH MONTH INDIPENDENTLY, instead of using a seasonal value: + pslPeriodClim1 <- apply(psleuFull1, c(1,4,5,6), mean, na.rm=T) + pslPeriodClim2 <- apply(psleuFull2, c(1,4,5,6), mean, na.rm=T) + pslPeriodClim3 <- apply(psleuFull3, c(1,4,5,6), mean, na.rm=T) + + pslPeriodClim <- unname(abind(pslPeriodClim1, pslPeriodClim2, pslPeriodClim3)) + + pslPeriodClimDos <- InsertDim(InsertDim(pslPeriodClim, 2, n.years.full), 2, n.members) + rm(pslPeriodClim) + gc() + + psleuFull <- unname(abind(psleuFull1, psleuFull2, psleuFull3, along=3)) + rm(psleuFull1, psleuFull2, psleuFull3) + gc() + + } else {# in case of no running cluster: + + pslPeriodClimDos <- InsertDim(InsertDim(pslPeriodClim, 2, n.years.full), 2, n.members) + rm(pslPeriodClim) + gc() + } + } + + ## s4.data <- data.frame(s4=pslPeriodClim[1,], day=1:216) + ## s4.loess <- loess(s4 ~ day, s4.data, span=0.35) + ## s4.pred <- predict(s4.loess) + + psleuFull <- psleuFull - pslPeriodClimDos + rm(pslPeriodClimDos) + gc() + +} # close if on fields.name=forecasts.name + +if(fields.name == ECMWF_monthly.name){ + # Load 6-hourly psl data in the forecast case: + psleuFull <-array(NA, c(n.sdates*n.years, n.leadtimes, n.pos.lat, n.pos.lon)) # daily order: 19940102 19950102 ... 20130102 19940109 19950109 ... 20130109 ... + n.leadtimes <- length(leadtime) + sdates <- weekly.seq(forecast.year,mes,day) + n.sdates <- length(sdates) + + for (startdate in 1:n.sdates){ + for(lday in leadtime){ + var <- Load(var = psl, exp = NULL, obs = list(list(path=fields)), paste0(year.start:year.end, substr(sdates[startdate],5,6), substr(sdates[startdate],7,8) ), storefreq = 'daily', leadtimemin = lday*4-3, leadtimemax = lday*4, output = 'lonlat', latmin = lat.min, latmax = lat.max, lonmin = lon.min, lonmax = lon.max) + + mean.lday <- apply(var$obs, c(3,5,6), mean, na.rm=T) + rm(var) + gc() + + psleuFull[(1+(startdate-1)*n.years):(startdate*n.years), lday-leadtime[1]+1,,] <- mean.lday + rm(mean.lday) + gc() + } + } +} + +if(psl=="g500") psleuFull <- psleuFull/9.81 # convert geopotential to geopotential height (in m) +if(psl=="psl") psleuFull <- psleuFull/100 # convert MSLP in Pascal to MSLP in hPa +gc() + +#save.image(file=paste0(workdir,"/weather_regimes_",fields.name,"_",year.start,"-",year.end,".RData")) +#load(file=paste0(workdir,"/weather_regimes_",fields.name,"_",year.start,"-",year.end,".RData")) + +#save.image("/scratch/Earth/ncortesi/RESILIENCE/Regimes/test.R") + +# compute the cluster analysis: +my.PCA <- list() +my.cluster <- list() +my.cluster.array <- list() +tot.variance <- list() +n.pcs <- my.cluster <- c() + +#WR.period=1 # for the debug +for(p in WR.period){ + # Select only the data of the month/season we want: + if(fields.name == rean.name){ + + #pslPeriod <- psleuFull[days.period[[p]],,] # select only days in the chosen period (i.e: winter) + if(running.cluster) { + if(subdaily){ + my.hours <- sort(c(pos.month.extended(1,p)*4-3, pos.month.extended(1,p)*4-2, pos.month.extended(1,p)*4-1, pos.month.extended(1,p)*4)) + if(p == 1) my.hours <- c(1337:1460,1:236) + if(p == 12) my.hours <- c(1217:1460,1:124) + + pslPeriod <- psleuFull[,my.hours,,] # select all days in the period of 3 months centered on the target month p + + } else { + pslPeriod <- psleuFull[1,1,,pos.month.extended(1,p),,] # select all days in the period of 3 months centered on the target month p + } + + } else { + + if(subdaily){ + my.hours <- sort(c(pos.month(1,p)*4-3, pos.month(1,p)*4-2, pos.month(1,p)*4-1, pos.month(1,p)*4)) + pslPeriod <- psleuFull[,my.hours,,] + } else { + pslPeriod <- psleuFull[1,1,,pos.period(1,p),,] # select only days in the chosen period (i.e: winter) + } + } + + # weight the pressure fields based on latitude (apply it to anomalies, not to absolute values!): + if(lat.weighting){ + lat.weighted <- cos(lat*pi/180)^0.5 + if(!running.cluster) lat.weighted.array <- InsertDim(InsertDim(InsertDim(lat.weighted,2,n.pos.lon), 1, length(pos.month(2001,p))), 1, n.years) + if(running.cluster) lat.weighted.array <- InsertDim(InsertDim(InsertDim(lat.weighted,2,n.pos.lon), 1, length(pos.month.extended(2001,p))), 1, n.years) + + pslPeriod <- pslPeriod * lat.weighted.array + + rm(lat.weighted.array) + gc() + } + + gc() + cat("Preformatting data for clustering. Please wait...\n") + psl.melted <- melt(pslPeriod[,,,, drop=FALSE], varnames=c("Year","Day","Lat","Lon")) + gc() + + cat("Preformatting data for clustering. Please wait......\n") + # this function eats much memory!!! + psl.kmeans <- unname(acast(psl.melted, Year + Day ~ Lat + Lon)) + #gc() + + + # select period data from psleuFull to use it as input of the cluster analysis: + #psl.kmeans <- pslPeriod + #dim(psl.kmeans) <- c(n.days.period[[p]], n.pos.lat*n.pos.lon) # convert array in a matrix! + #rm(pslPeriod, pslPeriod.weighted) + } + + # in case of S4 we don't need to select anything because we already loaded only 1 month of data! + if(fields.name == ECMWF_S4.name){ + if(running.cluster == TRUE && p < 13) { + # if you want to fully implement the running cluster of the monthly S4 data, you have to finish selecting automatically the 3-months running period + # generalizing the command below (which at present only work for the month of January an lead time 0): + + # Select DJF (lead time 0) for our case study, excluding 29 of february): + pslPeriod <- psleuFull[,,,1:90,,, drop=FALSE] + + } else { + + # select data only for the startmonth and leadmonth to study: + cat("Extracting data. Please wait...\n") + chosen.month <- start.month + lead.month # find which month we want to select data + if(chosen.month > 12) chosen.month <- chosen.month - 12 + + # remove the 29th of February to have the same n. of elements for all years + i=0 + for(y in years){ + i=i+1 + leadtime.min <- 1 + n.days.in.a.monthly.period(start.month, chosen.month, y) - n.days.in.a.month(chosen.month, y) + leadtime.max <- leadtime.min + n.days.in.a.month(chosen.month, y) - 1 + #leadtime.max <- 61 + + if (n.days.in.a.month(chosen.month, y) == 29) leadtime.max <- leadtime.max - 1 + int.leadtimes <- leadtime.min:leadtime.max + num.leadtimes <- leadtime.max - leadtime.min + 1 # number of days in the chosen month (different from 'n.leadtimes',which is the total number of days in the file!) + # by construction it is equal to: n.days.in.a.month(chosen.month, y) + + if(y == years[1]) { + pslPeriod <- psleuFull[,,1,int.leadtimes,,,drop=FALSE] + } else { + pslPeriod <- unname(abind(pslPeriod, psleuFull[,,i,int.leadtimes,,,drop=FALSE], along=3)) + } + } + + } + + # weight the pressure fields based on latitude (apply it to anomalies, not to absolute values!): + if(lat.weighting){ + lat.weighted <- cos(lat*pi/180)^0.5 + lat.weighted.array <- InsertDim(InsertDim(InsertDim(lat.weighted,2,n.pos.lon), 1, length(pos.month.extended(2001,p))), 1, n.years) + pslPeriod <- pslPeriod * lat.weighted.array ######### adapt!!!!!!!!!!! + rm(lat.weighted.array) + gc() + } + + # note that the data order in psl.kmeans[,X] is: year1day1, year1day2, ... , year1day31, year2day1, year2day2, ... , year2day28 + gc() + cat("Preformatting data. Please wait...\n") + psl.melted <- melt(pslPeriod[1,,,,,, drop=FALSE], varnames=c("Exp","Member","StartYear","LeadDay","Lat","Lon")) + gc() + + #save(psl.melted,file=paste0(workdir,"/psl_melted.RData")) + + cat("Preformatting data. Please wait......\n") + # This function eats much memory!!! + psl.kmeans <- unname(acast(psl.melted, Member + StartYear + LeadDay ~ Lat + Lon)) + #gc() + + #save(psl.kmeans,file=paste0(workdir,"/psl_kmeans.RData")) + #my.cluster <- kmeans(psl.kmeans, centers=4, iter.max=100, nstart=30, trace=FALSE) + #save(my.cluster,file=paste0(workdir,"/my_cluster.RData")) + + + #psl.kmeans <- array(NA,c(dim(pslPeriod))) + #n.rows <- nrow(psl.melted) + #a <- psl.melted$Member + #b <- psl.melted$StartYear + #c <- psl.melted$LeadDay + #d <- psl.melted$Lat + #e <- psl.melted$Lon + #f <- psl.melted$value + + # this function to convert a data.frame in an array is much less memory-eater than acast but a bit slower: + #for(i in 1:n.rows) psl.kmeans[1,a[i],b[i],c[i],d[i],e[i]] <- f[i] + gc() + #rm(pslPeriod); gc() + } + + if(fields.name == ECMWF_monthly.name){ + my.startdates.period <- months.period(forecast.year,mes,day,p) # get which startdates belong to the chosen period (remember that there are no bisestile day left!) + + days.period <- list() # get which days inside psleuFull belong to the chosen period + for(startdate in my.startdates.period){ + days.period[[p]] <- c(days.period[[p]], (1+(startdate-1)*n.years):(startdate*n.years)) + } + + # in winter you must remove the 2nd startdate (20140109) because its data is bad, as you can see with: plot(psl.kmeans[,1:2]) + # if(fields.name == forecast.name && period == 13) psl.kmeans[21:40,] <- NA + } + + + # compute the clustering: + cat("Clustering data. Please wait...\n") + if(PCA){ + my.seq <- seq(1, n.pos.lat*n.pos.lon, 16) # select only 1 point of 9 + pslcut <- psl.kmeans[,my.seq] + + my.PCA <- princomp(pslcut,cor=FALSE) + tot.variance <- head(cumsum(my.PCA$sdev^2/sum(my.PCA$sdev^2)),50) # check how many PCAs to keep basing on the sum of their expl. variance + n.pcs <- head(as.numeric(which(tot.variance > variance.explained)),1) # select only the pcs that explains at least 80% of variance + my.cluster <- kmeans(my.PCA$scores[,1:n.pcs], centers=4, iter.max=100, nstart=30) # 4 is the number of clusters + rm(pslcut) + } else { # 4 is the number of clusters: + + my.cluster <- kmeans(psl.kmeans, centers=4, iter.max=100, nstart=30, trace=FALSE) + + gc() + + #save(my.cluster, file=paste0(workdir,"/",fields.name,"_",my.period[p],"_leadmonth_",lead.month,"_series.RData")) + + #if(fields.name == rean.name){ + #my.cluster[[p]] <- kmeans(psl.kmeans[which(!is.na(psl.kmeans[,1])),], centers=4, iter.max=100, nstart=30, trace=FALSE) + # save the time series output of the cluster analysis: + #save(my.cluster, my.cluster.array, my.PCA, tot.variance, n.pcs, file=paste0(workdir,"/",fields.name,"_",my.period[p],"_series.RData")) + #} + + if(fields.name == ECMWF_S4.name) { + my.cluster.array <- array(my.cluster$cluster, c(num.leadtimes, n.years.full, n.members)) # in reverse order compared to the definition of psl.kmeans + + # save the time series output of the cluster analysis: + #save(my.cluster, my.cluster.array, my.PCA, tot.variance, n.pcs, file=paste0(workdir,"/",fields.name,"_",my.period[p],"_leadmonth_",lead.month,"_series.RData")) + + #plot(SMA(my.cluster$centers[1,],201),type="l",col="gold",ylim=c(-20,20));lines(SMA(my.cluster$centers[2,],201),type="l",col="red"); lines(SMA(my.cluster$centers[3,],201),type="l",col="green");lines(SMA(my.cluster$centers[4,],201),type="l",col="blue");lines(SMA(psl.kmeans[29,],201),type="l",col="gray");lines(rep(0,14410),type="l",col="black",lty=2) + } + } + + rm(psl.kmeans) + gc() + +#} # close for on 'p' + +#save.image(file=paste0(workdir,"/weather_regimes_",fields.name,"_",year.start,"-",year.end,".RData")) +#load(file=paste0(workdir,"/weather_regimes_",fields.name,"_",year.start,"-",year.end,".RData")) + +#my.grid<-paste0('r',n.lon,'x',n.lat) +#coord <- Load(var = 'sfcWind', exp = list(ECMWF_monthly), obs = NULL, sdates=paste0(2013,'0102'), leadtimemin = 1, leadtimemax=1, output = 'lonlat', nprocs=1, grid=my.grid, method='bilinear') + +# measure regime anomalies: + +#for(p in WR.period){ # 1-12: Jan-Dec; 13-16: Winter-Spring-Summer-Autumn; 17: Year + + if(fields.name == rean.name){ + + if(running.cluster == TRUE && p < 13){ # select only the days of the 3-months cluster series inside the target month p + n.days.period <- length(pos.month.extended(2001,p)) # total number of days in the three months + + days.month <- days.month.new <- days.month.full <- c() + days.month <- which(pos.month.extended(2001,p) == pos.month(2001,p)[1]) -1 + 1:length(pos.month(2001,p)) + + for(y in 1:n.years){ + days.month.new <- days.month + n.days.period*(y-1) + days.month.full <- c(days.month.full, days.month.new) + #print(days.month.new) + #print(days.month) + } + + # in this cases, we are not selecting days but 6-hourly intervals: + if(subdaily == TRUE) days.month.full <- sort(c(days.month.full*4, days.month.full*4+1, days.month.full*4+2, days.month.full*4+3)) + + # select only the days of the cluster series inside the target month p: + cluster.sequence <- my.cluster$cluster[days.month.full] + + } else { + + cluster.sequence <- my.cluster$cluster + } + + # convert cluster sequence from 6-hourly to daily: + if(subdaily){ + type <- c() + + for(day in 1:(length(cluster.sequence)/4)){ + #day <- ceiling(hour/4) + hourly <- cluster.sequence[(1+(day-1)*4):(4+(day-1)*4)] + + t1 <- length(which(hourly == 1)) + t2 <- length(which(hourly == 2)) + t3 <- length(which(hourly == 3)) + t4 <- length(which(hourly == 4)) + tt <- c(t1,t2,t3,t4) + + # if all the 4 time steps belong to the same regime, assign it to this day: + if(length(unique(hourly)) == 1) type[day] <- hourly[1] + + # if there are two different regimes, check if one has a higher frequency: + if(length(unique(hourly)) == 2){ + if(any(tt == 3)){ # if 3 of the 4 time intervals belong to the same weather regime, assign this day to it + type[day] <- which(tt == 3) + } else { # in this case both regimes occur in 2 of the 4 time steps; arbitrary assign the regime occurring at 12.00 of that day + type[day] <- hourly[3] + } + } + + # if there are three different regimes, assign it to the only possible regime with 2 time steps in that day: + if(length(unique(hourly)) == 3) type[day] <- which(tt == 2) + + # if there are four different regimes (a very rare event!), assign it to the regime occurring at 12.00 of that day: + if(length(unique(hourly)) == 3) type[day] <- hourly[3] + + } # close for on day + + } # close for on subdaily + + + if(sequences){ # it works only for rean data!!! + # for each of the 4 clusters, remove the days not belonging to sequences of at least 5 days: + # warning: first 4 days and last 4 days are not take into account y the algorithm and are treated as not belonging to any sequence (sequ=FALSE) + wrdiff <- diff(my.cluster$cluster) + + sequ <- rep(FALSE, n.days.period[[p]]) + for(i in 5:(n.days.period[[p]]-5)){ + sequ[i] <- TRUE + if(wrdiff[i] != 0 & wrdiff[i+1] != 0) sequ[i] = FALSE + if(wrdiff[i] != 0 & wrdiff[i+2] != 0) sequ[i] = FALSE + if(wrdiff[i] != 0 & wrdiff[i+3] != 0) sequ[i] = FALSE + if(wrdiff[i] != 0 & wrdiff[i+4] != 0) sequ[i] = FALSE + if(wrdiff[i-1] != 0 & wrdiff[i] != 0) sequ[i] = FALSE + if(wrdiff[i-1] != 0 & wrdiff[i+1] != 0) sequ[i] = FALSE + if(wrdiff[i-1] != 0 & wrdiff[i+2] != 0) sequ[i] = FALSE + if(wrdiff[i-1] != 0 & wrdiff[i+3] != 0) sequ[i] = FALSE + if(wrdiff[i-2] != 0 & wrdiff[i] != 0) sequ[i] = FALSE + if(wrdiff[i-2] != 0 & wrdiff[i+1] != 0) sequ[i] = FALSE + if(wrdiff[i-2] != 0 & wrdiff[i+2] != 0) sequ[i] = FALSE + if(wrdiff[i-3] != 0 & wrdiff[i] != 0) sequ[i] = FALSE + if(wrdiff[i-3] != 0 & wrdiff[i+1] != 0) sequ[i] = FALSE + if(wrdiff[i-4] != 0 & wrdiff[i] != 0) sequ[i] = FALSE + } # close for loop on i + + # sequence of daily cluster numbers without the days that doesn't belong to sequences of 5 or more days: + cluster.sequence <- my.cluster$cluster * sequ + rm(wrdiff, sequ) + } + + + # Replace the days belonging to a regime with the days belonging to a sequence of at least 5 days of that regime: + wr1 <- which(cluster.sequence == 1) + wr2 <- which(cluster.sequence == 2) + wr3 <- which(cluster.sequence == 3) + wr4 <- which(cluster.sequence == 4) + + # yearly frequency of each regime: + wr1y <- wr2y <- wr3y <-wr4y <- c() + + mod.subdaily <- ifelse(subdaily,4,1) + np <- n.days.in.a.period(p,1)*mod.subdaily + + for(y in year.start:year.end){ + # for both reanalysis and S4, the time order is always: year1day1, year1day2, ..., year1day31, year2day1, year2day2, ..., year2day28, etc, + wr1y[y-year.start+1] <- length(which(cluster.sequence[(y-year.start)*np + (1:np)] == 1)) + wr2y[y-year.start+1] <- length(which(cluster.sequence[(y-year.start)*np + (1:np)] == 2)) + wr3y[y-year.start+1] <- length(which(cluster.sequence[(y-year.start)*np + (1:np)] == 3)) + wr4y[y-year.start+1] <- length(which(cluster.sequence[(y-year.start)*np + (1:np)] == 4)) + } + + # convert to frequencies in %: + wr1y <- wr1y/np + wr2y <- wr2y/np + wr3y <- wr3y/np + wr4y <- wr4y/np + + gc() + + + # measure regime anomalies: + pslmat <- unname(acast(psl.melted, Year + Day ~ Lat ~ Lon)) + + if(running.cluster == TRUE){ + pslmat.new <- pslmat + pslmat <- pslmat.new[days.month.full,,] + gc() + } + + pslwr1 <- pslmat[wr1,,, drop=F] + pslwr2 <- pslmat[wr2,,, drop=F] + pslwr3 <- pslmat[wr3,,, drop=F] + pslwr4 <- pslmat[wr4,,, drop=F] + + pslwr1mean <- apply(pslwr1, c(2,3), mean, na.rm=T) + pslwr2mean <- apply(pslwr2, c(2,3), mean, na.rm=T) + pslwr3mean <- apply(pslwr3, c(2,3), mean, na.rm=T) + pslwr4mean <- apply(pslwr4, c(2,3), mean, na.rm=T) + + # spatial mean for each day to save for the meaure of the FairRPSS: + pslwr1spmean <- apply(pslwr1, 1, mean, na.rm=T) + pslwr2spmean <- apply(pslwr2, 1, mean, na.rm=T) + pslwr3spmean <- apply(pslwr3, 1, mean, na.rm=T) + pslwr4spmean <- apply(pslwr4, 1, mean, na.rm=T) + + + # save all the data necessary to redraw the graphs when we know the right regime: + save(my.cluster, cluster.sequence, lon, lon.max, lon.min, lat, lat.max, lat.min, pos.lat, pos.lon, n.pos.lat, n.pos.lon, psl, psl.name, my.period, p, workdir, rean.name, fields.name, year.start, year.end, n.years, WR.period, pslwr1mean, pslwr2mean, pslwr3mean, pslwr4mean, pslwr1spmean, pslwr2spmean, pslwr3spmean, pslwr4spmean, wr1y,wr2y,wr3y,wr4y,wr1,wr2,wr3,wr4,file=paste0(workdir,"/",fields.name,"_",my.period[p],"_psl.RData")) + + # immediatly save the plots of the ERA-Interim monthly regime anomalies with the running cluster instead than loading them in the next script: + EU <- c(1:which(lon >= lon.max)[1], (which(lon >= 337)[1]:length(lon))) # restrict area to continental europe only + + if(psl == "g500"){ + my.brks <- c(-300,seq(-200,200,10),300) # % Mean anomaly of a WR + my.brks2 <- c(-300,seq(-200,200,10),300) # % Mean anomaly of a WR for the contour lines + values.to.plot <- c(-200, -100, 0, 100, 200) # values we want to show in the labels + } + + if(psl == "psl"){ + my.brks <- c(seq(-19,-1,2),0,seq(1,19,2)) # % Mean anomaly of a WR + my.brks2 <- my.brks #c(seq(-20,20,2)) # % Mean anomaly of a WR for the contour lines + values.to.plot <- my.brks #c(-17,-15,-13,-11,-9,-7,-5,-3,-1,0,1,3,5,7,9,11,13,15,17) + } + my.cols <- colorRampPalette(rev(brewer.pal(11,"RdBu")))(length(my.brks)-1) # blue--white--red colors + my.cols[floor(length(my.cols)/2)] <- my.cols[floor(length(my.cols)/2)+1] <- "white" + + map1 <- pslwr1mean + map2 <- pslwr2mean + map3 <- pslwr3mean + map4 <- pslwr4mean + png(filename=paste0(workdir,"/",fields.name,"_",my.period[p],"_cluster1.png"),width=1000,height=1200) + PlotEquiMap2(rescale(map1,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map1), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, xlabel.dist=1.5, contours.lty="F1FF1F") + dev.off() + png(filename=paste0(workdir,"/",fields.name,"_",my.period[p],"_cluster2.png"),width=1000,height=1200) + PlotEquiMap2(rescale(map2,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map2), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F") + dev.off() + png(filename=paste0(workdir,"/",fields.name,"_",my.period[p],"_cluster3.png"),width=1000,height=1200) + PlotEquiMap2(rescale(map3,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map3), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F") + dev.off() + png(filename=paste0(workdir,"/",fields.name,"_",my.period[p],"_cluster4.png"),width=1000,height=1200) + PlotEquiMap2(rescale(map4,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map4), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F") + dev.off() + + rm(pslwr1mean,pslwr2mean,pslwr3mean,pslwr4mean) + rm(pslwr1,pslwr2,pslwr3,pslwr4) + gc() + + } + + if(fields.name == ECMWF_S4.name){ + cat("Computing regime anomalies and frequencies. Please wait...\n") + + #load(file=paste0(workdir,"/",fields.name,"_",my.period[p],"_leadmonth_",lead.month,"_ClusterData.RData")) + + cluster.sequence <- my.cluster$cluster #as.vector(my.cluster.array) + + wr1 <- which(cluster.sequence == 1) + wr2 <- which(cluster.sequence == 2) + wr3 <- which(cluster.sequence == 3) + wr4 <- which(cluster.sequence == 4) + + wr1y <- apply(my.cluster.array == 1, 2, sum) + wr2y <- apply(my.cluster.array == 2, 2, sum) + wr3y <- apply(my.cluster.array == 3, 2, sum) + wr4y <- apply(my.cluster.array == 4, 2, sum) + + # convert to frequencies in %: + wr1y <- wr1y/(num.leadtimes*n.members) # in this case, n.leadtimes is the number of days of one month of the cluser time series + wr2y <- wr2y/(num.leadtimes*n.members) + wr3y <- wr3y/(num.leadtimes*n.members) + wr4y <- wr4y/(num.leadtimes*n.members) + + # same as above, but to measure the maximum frequence between all the members: + wr1yMax <- apply(apply(my.cluster.array == 1, c(2,3), sum),1,max) + wr2yMax <- apply(apply(my.cluster.array == 2, c(2,3), sum),1,max) + wr3yMax <- apply(apply(my.cluster.array == 3, c(2,3), sum),1,max) + wr4yMax <- apply(apply(my.cluster.array == 4, c(2,3), sum),1,max) + + wr1yMax <- wr1yMax/num.leadtimes + wr2yMax <- wr2yMax/num.leadtimes + wr3yMax <- wr3yMax/num.leadtimes + wr4yMax <- wr4yMax/num.leadtimes + + wr1yMin <- apply(apply(my.cluster.array == 1, c(2,3), sum),1,min) + wr2yMin <- apply(apply(my.cluster.array == 2, c(2,3), sum),1,min) + wr3yMin <- apply(apply(my.cluster.array == 3, c(2,3), sum),1,min) + wr4yMin <- apply(apply(my.cluster.array == 4, c(2,3), sum),1,min) + + wr1yMin <- wr1yMin/num.leadtimes + wr2yMin <- wr2yMin/num.leadtimes + wr3yMin <- wr3yMin/num.leadtimes + wr4yMin <- wr4yMin/num.leadtimes + + cat("Computing regime anomalies and frequencies. Please wait......\n") + + # in this case, must use psl.melted instead of psleuFull. Both are already in anomalies: + # (psl.melted depends on the startdate and leadtime, psleuFull no) + gc() + pslmat <- unname(acast(psl.melted, Member + StartYear + LeadDay ~ Lat ~ Lon)) + #pslmat <- unname(acast(melt(pslPeriod[1,1:2,,,,, drop=FALSE],varnames=c("Exp","Member","StartYear","LeadDay","Lat","Lon")), Member ~ StartYear + LeadDay ~ Lat ~ Lon)) + gc() + + #for(a in 1:2){ + # for(b in 1:3){ + # for(c in 1:4){ + # pslmat[1, a + (b-1)*a + (c-1)*a*b,,] <- pslPeriod[1,a,b,c,,] + # } + # } + #} + + pslwr1 <- pslmat[wr1,,, drop=F] + pslwr2 <- pslmat[wr2,,, drop=F] + pslwr3 <- pslmat[wr3,,, drop=F] + pslwr4 <- pslmat[wr4,,, drop=F] + + pslwr1mean <- apply(pslwr1, c(2,3), mean, na.rm=T) + pslwr2mean <- apply(pslwr2, c(2,3), mean, na.rm=T) + pslwr3mean <- apply(pslwr3, c(2,3), mean, na.rm=T) + pslwr4mean <- apply(pslwr4, c(2,3), mean, na.rm=T) + + # spatial mean for each day to save for the meaure of the FairRPSS: + pslwr1spmean <- apply(pslwr1, 1, mean, na.rm=T) + pslwr2spmean <- apply(pslwr2, 1, mean, na.rm=T) + pslwr3spmean <- apply(pslwr3, 1, mean, na.rm=T) + pslwr4spmean <- apply(pslwr4, 1, mean, na.rm=T) + + rm(pslmat) + gc() + + # save all the data necessary to draw the graphs (but not the impact maps) + save(my.cluster, cluster.sequence, my.cluster.array, lon, lon.max, lat, pos.lat, pos.lon, n.pos.lat, n.pos.lon, psl, psl.name, my.period, p, workdir,rean.name,fields.name, year.start,year.end, years, n.years, n.years.full, WR.period, pslwr1mean, pslwr2mean, pslwr3mean, pslwr4mean, pslwr1spmean, pslwr2spmean, pslwr3spmean, pslwr4spmean, wr1y, wr2y, wr3y, wr4y, wr1yMax, wr2yMax, wr3yMax, wr4yMax, wr1yMin, wr2yMin, wr3yMin, wr4yMin, wr1, wr2, wr3, wr4, n.leadtimes, num.leadtimes, file=paste0(workdir,"/",fields.name,"_",my.period[p],"_leadmonth_",lead.month,"_psl.RData")) + + rm(pslwr1mean,pslwr2mean,pslwr3mean,pslwr4mean) + rm(pslwr1,pslwr2,pslwr3,pslwr4) + gc() + + } + + # for the monthly system, the time order is always: year1day1, year2day1, ... , yearNday1, year1day2, year2day2, ..., yearNday2, etc.: + if(fields.name == ECMWF_monthly.name) { + if(fields.name == ECMWF_monthly.name){ + my.startdates.period <- months.period(forecast.year,mes,day,p) + + #if(p == 13) my.startdates.period <- my.startdates.period[-2] # remove the second startdate because it is broken + + days.period <- period.length <- list() + for(startdate in my.startdates.period){ + days.period[[p]] <- c(days.period[[p]], (1+(startdate-1)*(year.end-year.start+1)):(startdate*(year.end-year.start+1))) + } + period.length[[p]] <- length(my.startdates.period) # number of Thusdays in the period + } + + wr1y <- wr2y <- wr3y <-wr4y <- c() + wr1y[y-year.start+1] <- length(which(cluster.sequence[seq((y-year.start+1),length(cluster.sequence), n.years)] == 1)) + wr2y[y-year.start+1] <- length(which(cluster.sequence[seq((y-year.start+1),length(cluster.sequence), n.years)] == 2)) + wr3y[y-year.start+1] <- length(which(cluster.sequence[seq((y-year.start+1),length(cluster.sequence), n.years)] == 3)) + wr4y[y-year.start+1] <- length(which(cluster.sequence[seq((y-year.start+1),length(cluster.sequence), n.years)] == 4)) + } + + cat("Finished!\n") +} # close the for loop on 'p' + + + + + + + + + + + + +if(monthly_anomalies) { + # Compute climatology and anomalies (with LOESS filter) for sfcWind data, and draw monthly anomalies, if you want to compare the mean daily wind speed anomalies reconstructed by the weather regimes classification with those observed by the reanalysis: + + sfcWind366 <- Load(var = "sfcWind", exp = NULL, obs = list(list(path=fields)), paste0(year.start:year.end,'0101'), storefreq = 'daily', leadtimemax = 366, output = 'lonlat', latmin = lat.min, latmax = lat.max, lonmin = lon.min, lonmax = lon.max, nprocs=8)$obs + + sfcWind <- sfcWind366[,,,1:365,,] + + for(y in year.start:year.end){ + y2 <- y - year.start + 1 + if(n.days.in.a.year(y) == 366) sfcWind[y2,60:365,,] <- sfcWind366[1,1,y2,61:366,,] # take the march to december period removing the 29th of February + } + + rm(sfcWind366) + gc() + + sfcWindClimDaily <- apply(sfcWind, c(2,3,4), mean, na.rm=T) + + sfcWindClimLoess <- sfcWindClimDaily + for(i in n.pos.lat){ + for(j in n.pos.lon){ + my.data <- data.frame(ens.mean=sfcWindClimDaily[,i,j], day=1:365) + my.loess <- loess(ens.mean ~ day, my.data, span=0.35) + sfcWindClimLoess[,i,j] <- predict(my.loess) + } + } + + rm(sfcWindClimDaily) + gc() + + sfcWindClim2 <- InsertDim(sfcWindClimLoess, 1, n.years) + + sfcWindAnom <- sfcWind - sfcWindClim2 + + rm(sfcWindClimLoess) + gc() + + # same as above but for computing climatology and anomalies (with LOESS filter) for psl data, and draw monthly slp anomalies: + slp366 <- Load(var = psl, exp = NULL, obs = list(list(path=fields)), paste0(year.start:year.end,'0101'), storefreq = 'daily', leadtimemax = 366, output = 'lonlat', latmin = lat.min, latmax = lat.max, lonmin = lon.min, lonmax = lon.max, nprocs=8)$obs + + slp <- slp366[,,,1:365,,] + + for(y in year.start:year.end){ + y2 <- y - year.start + 1 + if(n.days.in.a.year(y) == 366) slp[y2,60:365,,] <- slp366[1,1,y2,61:366,,] # take the march to december period removing the 29th of February + } + + rm(slp366) + gc() + + slpClimDaily <- apply(slp, c(2,3,4), mean, na.rm=T) + + slpClimLoess <- slpClimDaily + for(i in n.pos.lat){ + for(j in n.pos.lon){ + my.data <- data.frame(ens.mean=slpClimDaily[,i,j], day=1:365) + my.loess <- loess(ens.mean ~ day, my.data, span=0.35) + slpClimLoess[,i,j] <- predict(my.loess) + } + } + + rm(slpClimDaily) + gc() + + slpClim2 <- InsertDim(slpClimLoess, 1, n.years) + + slpAnom <- slp - slpClim2 + + rm(slpClimLoess) + gc() + + if(psl == "psl") slpAnom <- slpAnom/100 # convert MSLP in Pascal to MSLP in hPa + + EU <- c(1:which(lon >= lon.max)[1], (which(lon >= 337)[1]:length(lon))) # restrict area to continental europe only + + my.brks.var <- seq(-3,3,0.5) + my.cols.var <- colorRampPalette(rev(brewer.pal(11,"RdBu")))(length(my.brks.var)-1) # blue--white--red colors + + # same but for slp: + my.brks.var2 <- c(seq(-21,-1,2),0,seq(1,21,2)) + my.cols.var2 <- colorRampPalette(rev(brewer.pal(11,"RdBu")))(length(my.brks.var2)-1) # blue--red colors + my.cols.var2[floor(length(my.cols.var2)/2)] <- my.cols.var2[floor(length(my.cols.var2)/2)+1] <- "white" + + # save all monthly anomaly maps: + for(year.test in year.chosen){ + for(month.test in month.chosen){ + #year.test <- 2016; month.test <- 10 # for the debug + + pos.year.test <- year.test - year.start + 1 + + # wind anomaly for the chosen year and month: + sfcWindAnomPeriod <- sfcWindAnom[pos.year.test, pos.period(1,month.test),,] + + sfcWindAnomPeriodMean <- apply(sfcWindAnomPeriod, c(2,3), mean, na.rm=TRUE) + + # psl anomaly for the chosen year and month: + slpAnomPeriod <- slpAnom[pos.year.test,pos.period(1,month.test),,] + + slpAnomPeriodMean <- apply(slpAnomPeriod, c(2,3), mean, na.rm=TRUE) + + fileoutput <- paste0(workdir,"/",fields.name,"_",my.period[month.test],"_monthly_anomaly.png") + + png(filename=fileoutput,width=2000,height=1000) + + par(fig=c(0, 0.5, 0.08, 0.98), new=TRUE) + PlotEquiMap(rescale(sfcWindAnomPeriodMean[,EU], my.brks.var[1], tail(my.brks.var,1)), lon[EU], lat, filled.continents=FALSE, brks=my.brks.var, cols=my.cols.var, title_scale=1.2, intxlon=10, intylat=10, drawleg=F) #, cex.lab=1.5) + + #my.subset2 <- match(values.to.plot2, my.brks.var) + #legend2.cex <- 1.8 + + par(fig=c(0.03, 0.5, 0.015, 0.09), new=TRUE) + ColorBar(my.brks.var, cols=my.cols.var, vert=FALSE, label_scale=1.8, triangle_ends=c(FALSE,FALSE)) #, subset=my.subset2) + + par(fig=c(0.47, 0.5, 0, 0.028), new=TRUE) + mtext("m/s", cex=1.8) + + par(fig=c(0.5, 1, 0.1, 0.98), new=TRUE) + + PlotEquiMap2(rescale(slpAnomPeriodMean[,EU], my.brks.var2[1], tail(my.brks.var2,1)), lon[EU], lat, filled.continents=FALSE, continents.col="black", brks=my.brks.var2, brks2=my.brks.var2, cols=my.cols.var2, intxlon=10, intylat=10, drawleg=F, contours=t(slpAnomPeriodMean[,EU]), contours.lty="F1FF1F", cex.lab=1) + + # you could almost use the normal PlotEquiMap funcion below, it only needs to set the black line for anomaly=0 and decrease the size of the contour labels: + # PlotEquiMap(rescale(slpAnomPeriodMean[,EU], my.brks.var2[1], tail(my.brks.var2,1)), lon[EU], lat, filled.continents=FALSE, brks=my.brks.var2, brks2=my.brks.var2, cols=my.cols.var2, intxlon=10, intylat=10, drawleg=F, contours=(slpAnomPeriodMean[,EU]), contour_lty="F1FF1F", contour_label_scale=10, title_scale=1.2) #, cex.lab=1.5) + + ##my.subset2 <- match(values.to.plot2, my.brks.var) + #legend2.cex <- 1.8 + + par(fig=c(0.5, 1, 0, 0.09), new=TRUE) + #ColorBar2(my.brks.var2, cols=my.cols.var2, vert=FALSE, my.ticks=-0.5 + 1:length(my.brks.var2), my.labels=my.brks.var2) #, subsampleg=1, label_scale=1.8) #, subset=my.subset2) + ColorBar(my.brks.var2, cols=my.cols.var2, vert=FALSE, triangle_ends=c(FALSE,FALSE), label_scale=1.8, subsampleg=2) #my.ticks=-0.5 + 1:length(my.brks.var2), my.labels=my.brks.var2) subset=my.subset2) + + par(fig=c(0.97, 1, 0, 0.027), new=TRUE) + mtext("hPa", cex=1.8) + + par(fig=c(0.74, 0.76, 0, 0.027), new=TRUE) + mtext("0", cex=1.8) + + dev.off() + + # same image formatted for the catalogue: + fileoutput2 <- paste0(workdir,"/",fields.name,"_",my.period[month.test],"_monthly_anomaly_fig2cat.png") + + system(paste0("~/scripts/fig2catalog.sh -s 0.8 -t '",rean.name," / 10m wind speed and sea level pressure / Monthly anomalies \n",month.name[month.test]," / ",year.test,"' -c 'Region: North Atlantic (27°N-81°N, 85.5°W-45°E)\nReference dataset: ",rean.name," reanalysis' ", fileoutput," ", fileoutput2)) + + + # mean wind speed and slp anomaly only during days of the chosen year: + load(paste0(rean.dir,"/",fields.name,"_",my.period[month.test],"_psl.RData")) # load cluster.sequence + + # arrange the regime sequence in an array, to separate months from years: + cluster2D <- array(cluster.sequence, c(n.days.in.a.period(p,1),n.years)) + #cluster2D <- array(my.cluster$cluster, c(n.days.in.a.period(p,1),n.years)) # only for NCEP + + cluster.test <- cluster2D[,pos.year.test] + + # wind anomaly for the chosen year and month and cluster: + sfcWind1 <- sfcWindAnomPeriod[which(cluster.test == 1),,,drop=FALSE] + sfcWind2 <- sfcWindAnomPeriod[which(cluster.test == 2),,,drop=FALSE] + sfcWind3 <- sfcWindAnomPeriod[which(cluster.test == 3),,,drop=FALSE] + sfcWind4 <- sfcWindAnomPeriod[which(cluster.test == 4),,,drop=FALSE] + + # in case a regime has NO days in that month, we must set it to NA: + if(length(which(cluster.test == 1)) == 0 ) sfcWind1 <- array(NA, c(1,dim(sfcWindAnomPeriod)[2:3])) + if(length(which(cluster.test == 2)) == 0 ) sfcWind2 <- array(NA, c(1,dim(sfcWindAnomPeriod)[2:3])) + if(length(which(cluster.test == 3)) == 0 ) sfcWind3 <- array(NA, c(1,dim(sfcWindAnomPeriod)[2:3])) + if(length(which(cluster.test == 4)) == 0 ) sfcWind4 <- array(NA, c(1,dim(sfcWindAnomPeriod)[2:3])) + + sfcWindMean1 <- apply(sfcWind1, c(2,3), mean, na.rm=FALSE) + sfcWindMean2 <- apply(sfcWind2, c(2,3), mean, na.rm=FALSE) + sfcWindMean3 <- apply(sfcWind3, c(2,3), mean, na.rm=FALSE) + sfcWindMean4 <- apply(sfcWind4, c(2,3), mean, na.rm=FALSE) + + # load regime names: + load(paste0(rean.dir,"/",rean.name,"_", my.period[p],"_","ClusterNames",".RData")) + + orden <- c("NAO+","NAO-","Blocking","Atl.Ridge") + cluster1 <- which(orden == cluster1.name) + cluster2 <- which(orden == cluster2.name) + cluster3 <- which(orden == cluster3.name) + cluster4 <- which(orden == cluster4.name) + + assign(paste0("imp.test",cluster1), sfcWindMean1) + assign(paste0("imp.test",cluster2), sfcWindMean2) + assign(paste0("imp.test",cluster3), sfcWindMean3) + assign(paste0("imp.test",cluster4), sfcWindMean4) + + # psl anomaly for the chosen year and month and cluster: + slp1 <- slpAnomPeriod[which(cluster.test == 1),,,drop=FALSE] + slp2 <- slpAnomPeriod[which(cluster.test == 2),,,drop=FALSE] + slp3 <- slpAnomPeriod[which(cluster.test == 3),,,drop=FALSE] + slp4 <- slpAnomPeriod[which(cluster.test == 4),,,drop=FALSE] + + # in case a regime has NO days in that month, we must set it to NA: + if(length(which(cluster.test == 1)) == 0 ) slp1 <- array(NA, c(1,dim(slpAnomPeriod)[2:3])) + if(length(which(cluster.test == 2)) == 0 ) slp2 <- array(NA, c(1,dim(slpAnomPeriod)[2:3])) + if(length(which(cluster.test == 3)) == 0 ) slp3 <- array(NA, c(1,dim(slpAnomPeriod)[2:3])) + if(length(which(cluster.test == 4)) == 0 ) slp4 <- array(NA, c(1,dim(slpAnomPeriod)[2:3])) + + slpMean1 <- apply(slp1, c(2,3), mean, na.rm=FALSE) + slpMean2 <- apply(slp2, c(2,3), mean, na.rm=FALSE) + slpMean3 <- apply(slp3, c(2,3), mean, na.rm=FALSE) + slpMean4 <- apply(slp4, c(2,3), mean, na.rm=FALSE) + + assign(paste0("psl.test",cluster1), slpMean1) + assign(paste0("psl.test",cluster2), slpMean2) + assign(paste0("psl.test",cluster3), slpMean3) + assign(paste0("psl.test",cluster4), slpMean4) + + fileoutput.test <- paste0(rean.dir,"/",fields.name,"_",my.period[month.test],"_monthly_anomaly_regimes.png") + + png(filename=fileoutput.test,width=1000,height=2000) + + par(fig=c(0, 0.5, 0.77, 0.97), new=TRUE) + PlotEquiMap2(rescale(imp.test1[,EU], my.brks.var[1], tail(my.brks.var,1)), lon[EU], lat, filled.continents=FALSE, continents.col="black", brks=my.brks.var, cols=my.cols.var, cex.lab=1.2, intxlon=10, intylat=10, drawleg=F) + par(fig=c(0, 0.5, 0.54, 0.74), new=TRUE) + PlotEquiMap2(rescale(imp.test2[,EU], my.brks.var[1], tail(my.brks.var,1)), lon[EU], lat, filled.continents=FALSE, continents.col="black", brks=my.brks.var, cols=my.cols.var, cex.lab=1.2, intxlon=10, intylat=10, drawleg=F) + par(fig=c(0, 0.5, 0.31, 0.51), new=TRUE) + PlotEquiMap2(rescale(imp.test3[,EU], my.brks.var[1], tail(my.brks.var,1)), lon[EU], lat, filled.continents=FALSE, continents.col="black", brks=my.brks.var, cols=my.cols.var, cex.lab=1.2, intxlon=10, intylat=10, drawleg=F) + par(fig=c(0, 0.5, 0.08, 0.28), new=TRUE) + PlotEquiMap2(rescale(imp.test4[,EU], my.brks.var[1], tail(my.brks.var,1)), lon[EU], lat, filled.continents=FALSE, continents.col="black", brks=my.brks.var, cols=my.cols.var, cex.lab=1.2, intxlon=10, intylat=10, drawleg=F) + + par(fig=c(0,0.5,0.955,0.975), new=TRUE) + mtext(paste0(orden[1], " wind speed anomaly "), font=2, cex=2) + par(fig=c(0,0.5,0.725,0.745), new=TRUE) + mtext(paste0(orden[2], " wind speed anomaly "), font=2, cex=2) + par(fig=c(0,0.5,0.495,0.515), new=TRUE) + mtext(paste0(orden[3], " wind speed anomaly "), font=2, cex=2) + par(fig=c(0,0.5,0.265,0.285), new=TRUE) + mtext(paste0(orden[4], " wind speed anomaly "), font=2, cex=2) + + par(fig=c(0.03, 0.5, 0.015, 0.06), new=TRUE) + ColorBar(my.brks.var, cols=my.cols.var, vert=FALSE, label_scale=1.8, triangle_ends=c(FALSE,FALSE)) #, subset=my.subset2) + + par(fig=c(0.48, 0.5, 0.01, 0.044), new=TRUE) + mtext("m/s", cex=1.6) + + # right figures: + par(fig=c(0.5, 1, 0.77, 0.97), new=TRUE) + PlotEquiMap2(rescale(psl.test1[,EU], my.brks.var2[1], tail(my.brks.var2,1)), lon[EU], lat, filled.continents=FALSE, continents.col="black", brks=my.brks.var2, brks2=my.brks.var2, cols=my.cols.var2, intxlon=10, intylat=10, drawleg=F, contours=t(psl.test1[,EU]), contours.lty="F1FF1F", cex.lab=1) + par(fig=c(0.5, 1, 0.54, 0.74), new=TRUE) + PlotEquiMap2(rescale(psl.test2[,EU], my.brks.var2[1], tail(my.brks.var2,1)), lon[EU], lat, filled.continents=FALSE, continents.col="black", brks=my.brks.var2, brks2=my.brks.var2, cols=my.cols.var2, intxlon=10, intylat=10, drawleg=F, contours=t(psl.test2[,EU]), contours.lty="F1FF1F", cex.lab=1) + par(fig=c(0.5, 1, 0.31, 0.51), new=TRUE) + PlotEquiMap2(rescale(psl.test3[,EU], my.brks.var2[1], tail(my.brks.var2,1)), lon[EU], lat, filled.continents=FALSE, continents.col="black", brks=my.brks.var2, brks2=my.brks.var2, cols=my.cols.var2, intxlon=10, intylat=10, drawleg=F, contours=t(psl.test3[,EU]), contours.lty="F1FF1F", cex.lab=1) + par(fig=c(0.5, 1, 0.08, 0.28), new=TRUE) + PlotEquiMap2(rescale(psl.test4[,EU], my.brks.var2[1], tail(my.brks.var2,1)), lon[EU], lat, filled.continents=FALSE, continents.col="black", brks=my.brks.var2, brks2=my.brks.var2, cols=my.cols.var2, intxlon=10, intylat=10, drawleg=F, contours=t(psl.test4[,EU]), contours.lty="F1FF1F", cex.lab=1) + + par(fig=c(0.5,1,0.955,0.975), new=TRUE) + mtext(paste0(orden[1], " sea level pressure anomaly "), font=2, cex=2) + par(fig=c(0.5,1,0.725,0.745), new=TRUE) + mtext(paste0(orden[2], " sea level pressure anomaly "), font=2, cex=2) + par(fig=c(0.5,1,0.495,0.515), new=TRUE) + mtext(paste0(orden[3], " sea level pressure anomaly "), font=2, cex=2) + par(fig=c(0.5,1,0.265,0.285), new=TRUE) + mtext(paste0(orden[4], " sea level pressure anomaly "), font=2, cex=2) + + par(fig=c(0.5, 0.99, 0.015, 0.06), new=TRUE) + #ColorBar2(my.brks.var2, cols=my.cols.var2, vert=FALSE, my.ticks=-0.5 + 1:length(my.brks.var2), my.labels=my.brks.var2) #, subsampleg=1, label_scale=1.8) #, subset=my.subset2) + ColorBar(my.brks.var2, cols=my.cols.var2, vert=FALSE, triangle_ends=c(FALSE,FALSE), label_scale=1.8, subsampleg=2) #my.ticks=-0.5 + 1:length(my.brks.var2), my.labels=my.brks.var2) subset=my.subset2) + + par(fig=c(0.971, 0.997, 0.01, 0.044), new=TRUE) + mtext("hPa", cex=1.6) + + par(fig=c(0.74, 0.76, 0, 0.027), new=TRUE) + mtext("0", cex=1.8) + + dev.off() + + # same image formatted for the catalogue: + fileoutput2.test <- paste0(rean.dir,"/",fields.name,"_",my.period[month.test],"_monthly_anomaly_regimes_fig2cat.png") + + system(paste0("~/scripts/fig2catalog.sh -s 0.8 -m 40 -t '",rean.name," / 10m wind speed and sea level pressure / Monthly regime anomalies \n",month.name[month.test]," / ",year.test,"' -c 'Region: North Atlantic (27°N-81°N, 85.5°W-45°E)\nReference dataset: ",rean.name," reanalysis' ", fileoutput.test," ", fileoutput2.test)) + + + } # close for on year.test + } # close for on month.test + + + + + + + + + + + +} # close for on monthly_anomalies + + + + + + + + + + diff --git a/old/weather_regimes_v42.R b/old/weather_regimes_v42.R new file mode 100644 index 0000000000000000000000000000000000000000..cc17847e12dc93a9e3c3088b1088ec2a2af2043a --- /dev/null +++ b/old/weather_regimes_v42.R @@ -0,0 +1,1104 @@ + +# Creation: 6/2016 +# Author: Nicola Cortesi +# Aim: To compute the four Euro-Atlantic weather regimes from a reanalysis or from a forecast system +# +# I/O: input must be in a format compatible with Load(); 1 output .RData file is created in the 'workdir' folder. +# You can also run this script from terminal with: Rscript ~/scripts/weather_regimes_maps.R +# +# Assumption: its output are needed by the scripts weather_regimes_impact.R and weather_regimes.R +# +# Branch: weather_regimes + +rm(list=ls()) +library(s2dverification) # for the function Load() +library(abind) +library(reshape2) # after each update of s2dverification, it's better to remove and install this package again because sometimes it gets broken! + +SMP <- FALSE # if TRUE, the script is assumed to run on the SMP Machine + +if(SMP){ + source('/gpfs/projects/bsc32/bsc32842/Rfunctions.R') + workdir <- "/gpfs/projects/bsc32/bsc32842/RESILIENCE" # working dir where output files will be generated +} else { + source('/home/Earth/ncortesi/scripts/Rfunctions.R') + workdir <- "/scratch/Earth/ncortesi/RESILIENCE/Regimes" # working dir where output files will be generated +} + +# module load NCO # : load it from the terminal before running this script interactively in the SMP machine, if you didn't load it in the .bashrc + +# available reanalysis for the geopotential (g500) or the geopotential height (z500 = g500/9.81) and for var data: +ERAint <- '/esnas/recon/ecmwf/erainterim/$STORE_FREQ$_mean/$VAR_NAME$_f6h/$VAR_NAME$_$YEAR$$MONTH$.nc' +#ERAint <- '/esarchive/old-files/recon_ecmwf_erainterim/$STORE_FREQ$_mean/$VAR_NAME$_f6h/$VAR_NAME$_$YEAR$$MONTH$.nc' +ERAint <- '/esnas/recon/ecmwf/erainterim/6hourly/$VAR_NAME$/$VAR_NAME$_$YEAR$$MONTH$.nc' # subdaily data!!!!! +ERAint.name <- "ERA-Interim" + +JRA55 <- '/esnas/recon/jma/jra55/$STORE_FREQ$_mean/$VAR_NAME$_f6h/$VAR_NAME$_$YEAR$$MONTH$.nc' +JRA55.name <- "JRA-55" + +NCEP <- '/esnas/recon/noaa/ncep-reanalysis/$STORE_FREQ$_mean/$VAR_NAME$_f6h/$VAR_NAME$_$YEAR$$MONTH$.nc' +NCEP.name <- "NCEP" + +rean <- JRA55 #JRA55 #ERAint #NCEP # choose one of the two above reanalysis from where to load the input psl data + +# available forecast systems for the psl and var data: +#ECMWF_S4 <- list(path = paste0('/esnas/exp/ECMWF/seasonal/0001/s004/m001/$STORE_FREQ$_mean/$VAR_NAME$_f6h/$VAR_NAME$_$START_DATE$.nc')) +#ECMWF_S4 <- '/esnas/exp/ECMWF/seasonal/0001/s004/m001/$STORE_FREQ$_mean/psl_f6h/psl_$START_DATE$.nc' +ECMWF_S4 <- '/esnas/exp/ecmwf/system4_m1/$STORE_FREQ$_mean/$VAR_NAME$_f6h/$VAR_NAME$_$START_DATE$.nc' +ECMWF_S4.name <- "ECMWF-S4" +member.name <- "ensemble" # name of the dimension with the ensemble members inside the netCDF files of S4 + +ECMWF_monthly <- '/esnas/exp/ECMWF/monthly/ensforhc/6hourly/$VAR_NAME$/2014$MONTH$$DAY$00/$VAR_NAME$_$START_DATE$00.nc' +ECMWF_monthly.name <- "ECMWF-Monthly" + +# choose a forecast system: +forecast <- ECMWF_S4 + +# put 'rean' to load pressure fields and var from reanalisis, or put 'forecast' to load them from forecast systems: +fields <- rean #forecast + +psl <- "psl" #"g500" # pressure variable to use from the chosen reanalysis +psl.name <- "SLP" # "Z500" # the name to show in the maps + +year.start <- 1981 #1979 #1981 #1982 #1981 #1994 #1979 #1981 +year.end <- 2017 #2016 #2013 #2015 + +LOESS <- TRUE # To apply the LOESS filter or not to the climatology +running.cluster <- FALSE # add to the clustering also the daily SLP data of the two closer months to the month to use (only for monthly analysis) +running.15 <- TRUE # add to the clustering also the daily SLP data of the 15 days of the two closer months (only for monthly analysis). You cannot set to TRUE both + # this variable and 'running.cluster' above +lat.weighting <- TRUE # set it to true to weight psl data on latitude before applying the cluster analysis +subdaily <- FALSE # id TRUE, compute the clustering using 6-hourly data instead of daily data, to be more robust (only for reanalysis with 6-hourly data avail.) + +sequences <-FALSE # set it to TRUE if you want to select only days beloning to sequences of at least 5 consecutive days belonging to the same regime. +PCA <- FALSE # set it to TRUE if you want to apply the PCA before the k-means cluster analysis, FALSE otherwise +variance.explained <- 0.8 # the user can set here the minimum % of variance explained by the PCs (typically 0.8 which is equal to 80%) + +# Coordinates of the box enclosing the study region (the Northern Atlantic in this case): +lat.max <- 81 #81 #80 #70 +lat.min <- 27 #30 #20 #30 +lon.max <- 45 #36 #30 #40 +lon.min <- 274.5 #274.5 #270 #280 # put a positive number here because the geopotential has only positive vaues of longitud! + +# Only for reanalysis: +WR.period <- 1:12 # 13:16 #1:12 #13:16 # 1-12: Jan-Dec; 13-16: Winter-Spring-Summer-Autumn [bypassed by the optional argument of the script] + +# Only for Seasonal forecasts: +startM <- 1 # start month (1=January, 12=December) [bypassed by the optional arguments of the script] +leadM <- 0 # select only the data of one lead month: [bypassed by the optional arguments of the script] +n.members <- 15 # number of members of the forecast system to load +n.leadtimes <- 216 # number of leadtimes of the forecast system to load +missing.forecasts <- FALSE # set it to TRUE if there can be missing hindcasts files in the forecast psl data, so it will load only the available years and deals with NA +res <- 0.75 # set the resolution you want to interpolate the seasonal psl and var data when loading it (i.e: set to 0.75 to use the same res of ERA-Interim) + # interpolation method is BILINEAR. + +# Only for Monthly forecasts: +#leadtime <- 1:7 #5:11 #1 # if fields=forecast, set the forecast time (in days) you want to compute the weather regimes. +#startdate.shift <- 1 # if leadtime=5:11, it's better to shift all startdates of the season 1 week in the past, to fit the exact season of obs.data + # if leadtime=12:18, it's better to shift all startdates of the season 2 weeks in the past, and so on. +#forecast.year <- year.end+1 # if fields=forecast, set the forecast year (it is usually the year after year.end) +#mes=1 # if fields=forecast, set the month of the first startdate of the forecast year +#day=2 # if fields=forecast, set the day of the first startdate of the forecast year + +############################################################################################################################# +rean.name <- unname(ifelse(rean == ERAint, ERAint.name, ifelse(rean == JRA55, JRA55.name, NCEP.name))) + +forecast.name <- unname(ifelse(forecast == ECMWF_S4, ECMWF_S4.name, ECMWF_monthly.name)) +fields.name <- unname(ifelse(fields == rean, rean.name, forecast.name)) +n.years <- year.end - year.start + 1 + +# in case the script is run with one argument, it is assumed a reanalysis is being used and the argument becomes the month we want to perform the clustering; +# in case the script is run with two arguments, it is assumed a forecast is used and the first argument represents the startmonth and the second one the leadmonth. +# in case the script is run with no arguments, the values of the variables inside the script are used: +script.arg <- as.integer(commandArgs(TRUE)) + +if(length(script.arg) == 0){ + start.month <- startM + #WR.period <- start.month + if(fields.name == forecast.name) lead.month <- leadM +} + +# in case the script is run with 1 argument, it is assumed you are using a reanalysis: +if(length(script.arg) == 1){ + fields <- rean + fields.name <- rean.name + WR.period <- script.arg[1] +} + +if(length(script.arg) >= 2){ + fields <- forecast + fields.name <- forecast.name + start.month <- script.arg[1] + lead.month <- script.arg[2] + WR.period <- start.month +} + +if(length(script.arg) == 3){ # in this case, we are running the script in the SMP machine and we need to override the variables below: + source('/gpfs/projects/bsc32/bsc32842/Rfunctions.R') # for the calendar function + workdir <- "/gpfs/projects/bsc32/bsc32842/RESILIENCE" # working dir +} + +if(length(script.arg) >= 2) {lead.comment <- paste0("Lead month: ", lead.month)} else {lead.comment <- ""} +cat(paste0("Field: ", fields.name, " Period: ", WR.period, " ", lead.comment, "\n")) + +if(lat.min >= lat.max) stop("lat.min cannot be greater than lat.max") + +#days.period <- n.days.period <- period.length <- list() +#for (pp in 1:17){ +# days.period[[pp]] <- NA +# for(y in year.start:year.end) days.period[[pp]] <- c(days.period[[pp]], n.days.in.a.future.year(year.start, y) + pos.period(y,pp)) +# days.period[[pp]] <- days.period[[pp]][-1] # remove the NA at the begin that was introduced only to be able to execute the above command +# # number of days belonging to that period from year.start to year.end: +# n.days.period[[pp]] <- length(days.period[[pp]]) +# # Number of days belonging to that period in a single year: +# period.length[[pp]] <- n.days.in.a.period(pp,1999) #+ ifelse(period==13 | period==2, 1, 0) # using year 1999 introduce a small error in winter season length because of #bisestile years +#} + +# Create a regular lat/lon grid to interpolate the data to the chosen res: (Notice that the regular grid is always centered at lat=0 and lon=0!) +n.lon.grid <- 360/res +n.lat.grid <- (180/res)+1 +my.grid <- paste0('r',n.lon.grid,'x',n.lat.grid) +#domain<-list() +#domain$lon <- seq(0,359.999999999,res) +#domain$lat <- c(rev(seq(0,90,res)),seq(res[1],-90,-res)) + +cat("Loading lat/lon. Please wait...\n") +# load only 1 day of pressure data to detect the minimum and maximum lat and lon values which identify only the North Atlantic area: +if(fields.name == rean.name) domain <- Load(var = psl, exp = NULL, obs = list(list(path=fields)), paste0(year.start,'0101'), storefreq = 'daily', leadtimemax = 1, output = 'lonlat', nprocs=1) + +### Real test: +### lat <- +### domain <- Load(var = "psl", exp = list(list(path="/esnas/exp/ecmwf/system4_m1/$STORE_FREQ$_mean/$VAR_NAME$_f6h/$VAR_NAME$_$START_DATE$.nc")), obs=NULL, sdates=paste0(1982:2011,'0101'), storefreq = 'daily', leadtimemax=n.leadtimes, nmember=n.members, output = 'lonlat', latmin = lat[chunk], latmax = lat[chunk+2], nprocs=1) + +### if (fields.name="pippo"){ + +# in the case of S4, we also interpolate it to the same resolution of the reanalysis: +# (notice that if the S4 grid has the same grid of the chosen grid (typically ERA-Interim), Load() leaves the S4 grid unchanged) +if(fields.name == ECMWF_S4.name) domain <- Load(var = psl, exp = list(list(path=fields)), obs=NULL, sdates=paste0(year.start,'0101'), storefreq = 'daily', dimnames=list(member=member.name), leadtimemax=1, nmember=1, output = 'lonlat', grid=my.grid, method='bilinear', nprocs=1) + +#domain <- Load(var = "psl", exp = list(list(path="/esnas/exp/ecmwf/system4_m1/$STORE_FREQ$_mean/$VAR_NAME$_f6h/$VAR_NAME$_$START_DATE$.nc")), obs=NULL, sdates='19820101', storefreq = 'daily', leadtimemax=1, nmember=1, output = 'lonlat', grid='r480x241', lonmax=100) + +if(fields.name == ECMWF_monthly.name) domain <- Load(var = psl, exp = NULL, obs = list(list(path=fields)), paste0(year.start,'0102'), storefreq = 'daily', leadtimemax = 1, output = 'lonlat') + +n.lon <- length(domain$lon) +n.lat <- length(domain$lat) + +pos.lat.max <- which(lat.max - round(domain$lat,4) >= 0)[1] # select the first latitude of the domain below the maximum latitude +pos.lat.min <- tail(which(round(domain$lat,4) - lat.min >= 0),1) # select the last latitude of the domain over the minumum latitude +pos.lon.max <- tail(which(lon.max - round(domain$lon,4) >= 0),1) +pos.lon.min <- which(round(domain$lon,4) - lon.min >= 0)[1] + +pos.lat <- if(pos.lat.min < pos.lat.max) {pos.lat.min:pos.lat.max} else {pos.lat.max:pos.lat.min} +pos.lon <- c(1:pos.lon.max,pos.lon.min:length(domain$lon)) # beware that it only consider the case where the study area cross the meridian of Greenwhich! +n.pos.lat <- length(pos.lat) +n.pos.lon <- length(pos.lon) + +lat <- domain$lat[pos.lat] # lat values of chosen area only (it is smaller than the whole spatial domain loaded by the data) +#if (lat[2] < lat[1]) lat <- rev(lat) # reverse lat values if they are in decreasing order +lon <- domain$lon[pos.lon] # lon values of chosen area only + +#lat.min.area <- domain$lat[pos.lat.min] # min and max lat/lon of the chosen area only (same as lat.max, but its values correspond to actual values in the lat vector) +#lat.max.area <- domain$lat[pos.lat.max] +#lon.min.area <- domain$lon[pos.lon.min] +#lon.max.area <- domain$lon[pos.lon.max] + +#rm(domain) + +# Load psl data (interpolating it to the same reanalysis grid, if necessary): +cat("Loading data. Please wait...\n") + +if(fields.name == rean.name && subdaily == FALSE){ # Load daily psl data of all years in the reanalysis case: + psleuFull <-array(NA,c(1,1,n.years,365, n.pos.lat, n.pos.lon)) + + psleuFull366 <- Load(var = psl, exp = NULL, obs = list(list(path=fields)), paste0(year.start:year.end,'0101'), storefreq = 'daily', leadtimemax = 366, output = 'lonlat', latmin = lat.min, latmax = lat.max, lonmin = lon.min, lonmax = lon.max, nprocs=8)$obs + + # remove bisestile days to have all arrays with the same dimensions: + cat("Removing bisestile days. Please wait...\n") + psleuFull[,,,,,] <- psleuFull366[,,,1:365,,] + + for(y in year.start:year.end){ + y2 <- y - year.start + 1 + if(n.days.in.a.year(y) == 366) psleuFull[,,y2,60:365,,] <- psleuFull366[,,y2,61:366,,] # take the march to december period removing the 29th of February + } + + rm(psleuFull366) + gc() + + + if(running.cluster == TRUE && rean.name == "ERA-interim" && year.end == 2016) { + ## in caso of October, which is the last month of last year of data available, November data for the 3rd month of the running cluster is not available. + ## Load() set these values to NA, + ## but the clustering analysis is not able to process NA, so we have to remove the NA for that month and replace with the values of the previous year, + ## just to be able to do the clustering; the values of the daily cluster series for November will never be selected, after the clustering is finished: + ## note that for JRA-55, January 2017 data is already available, so it is not necessary to remove NAs, even when doing the running cluster for December 2016! + + ## the months after the last month of the last year loaded are filled with NA by Load(); the clustering algorithm doesn't want NA, so + psleuFull[,,n.years,305:334,,] <- psleuFull[,,n.years-1,305:334,,] + } + + # convert psl in daily anomalies with the LOESS filter: + cat("Calculating anomalies. Please wait...\n") + pslPeriodClim <- apply(psleuFull, c(1,2,4,5,6), mean, na.rm=T) + + if(LOESS == TRUE){ + pslPeriodClimLoess <- array(NA, dim(pslPeriodClim)) + + for(i in 1:n.pos.lat){ + for(j in 1:n.pos.lon){ + my.data <- data.frame(ens.mean=pslPeriodClim[1,1,,i,j], day=1:365) + my.loess <- loess(ens.mean ~ day, my.data, span=0.35) + pslPeriodClimLoess[1,1,,i,j] <- predict(my.loess) + } + } + + rm(pslPeriodClim) + gc() + + pslPeriodClim2 <- InsertDim(pslPeriodClimLoess, 3, n.years) + + } else { # leave the daily climatology to compute the anomalies: + pslPeriodClim2 <- InsertDim(pslPeriodClim, 3, n.years) + + rm(pslPeriodClim) + gc() + } + + ## s4.data <- data.frame(s4=pslPeriodClim[1,], day=1:216) + ## s4.loess <- loess(s4 ~ day, s4.data, span=0.35) + ## s4.pred <- predict(s4.loess) + + psleuFull <- psleuFull - pslPeriodClim2 + rm(pslPeriodClim2) + gc() + +} # close if on fields.name == rean and subdaily == FALSE){ + + +# Load 6-hourly psl data of all years in the reanalysis case: +if(fields.name == rean.name && subdaily == TRUE){ + + #sdates <- as.vector(sapply(year.start:year.end, function(x) paste0(x, sprintf("%02d", 1:12), '01'))) + #my.exp <- list(path=fields) + #psleuFull366 <- Load(var = psl, exp = list(my.exp), NULL, sdates, output = 'lonlat', latmin = lat.min, latmax = lat.max, lonmin = lon.min, lonmax = lon.max)$obs + #dim(data$obs) <- c(dim(data$obs)[1:2], 1, dim(data$obs)[3]*dim(data$obs)[4], dim(data$obs)[5:6]) + + my.exp <- list(path=fields) + + # Load January data: + psleu1 <- Load(var = psl, exp = list(list(path=fields)), obs = NULL, sdates = paste0(year.start:year.end,'0101'), storefreq = 'daily', output = 'lonlat', latmin = lat.min, latmax = lat.max, lonmin = lon.min, lonmax = lon.max)$mod + + # Load february data (it automatically discards the 29th of February): + psleu2 <- Load(var = psl, exp = list(list(path=fields)), obs = NULL, sdates = paste0(year.start:year.end,'0201'), storefreq = 'daily', output = 'lonlat', latmin = lat.min, latmax = lat.max, lonmin = lon.min, lonmax = lon.max)$mod + + # Load March data: + psleu3 <- Load(var = psl, exp = list(list(path=fields)), obs = NULL, sdates = paste0(year.start:year.end,'0301'), storefreq = 'daily', output = 'lonlat', latmin = lat.min, latmax = lat.max, lonmin = lon.min, lonmax = lon.max)$mod + psleu4 <- Load(var = psl, exp = list(list(path=fields)), obs = NULL, sdates = paste0(year.start:year.end,'0401'), storefreq = 'daily', output = 'lonlat', latmin = lat.min, latmax = lat.max, lonmin = lon.min, lonmax = lon.max)$mod + psleu5 <- Load(var = psl, exp = list(list(path=fields)), obs = NULL, sdates = paste0(year.start:year.end,'0501'), storefreq = 'daily', output = 'lonlat', latmin = lat.min, latmax = lat.max, lonmin = lon.min, lonmax = lon.max)$mod + psleu6 <- Load(var = psl, exp = list(list(path=fields)), obs = NULL, sdates = paste0(year.start:year.end,'0601'), storefreq = 'daily', output = 'lonlat', latmin = lat.min, latmax = lat.max, lonmin = lon.min, lonmax = lon.max)$mod + psleu7 <- Load(var = psl, exp = list(list(path=fields)), obs = NULL, sdates = paste0(year.start:year.end,'0701'), storefreq = 'daily', output = 'lonlat', latmin = lat.min, latmax = lat.max, lonmin = lon.min, lonmax = lon.max)$mod + psleu8 <- Load(var = psl, exp = list(list(path=fields)), obs = NULL, sdates = paste0(year.start:year.end,'0801'), storefreq = 'daily', output = 'lonlat', latmin = lat.min, latmax = lat.max, lonmin = lon.min, lonmax = lon.max)$mod + psleu9 <- Load(var = psl, exp = list(list(path=fields)), obs = NULL, sdates = paste0(year.start:year.end,'0901'), storefreq = 'daily', output = 'lonlat', latmin = lat.min, latmax = lat.max, lonmin = lon.min, lonmax = lon.max)$mod + psleu10 <- Load(var = psl, exp = list(list(path=fields)), obs = NULL, sdates = paste0(year.start:year.end,'1001'), storefreq = 'daily', output = 'lonlat', latmin = lat.min, latmax = lat.max, lonmin = lon.min, lonmax = lon.max)$mod + psleu11 <- Load(var = psl, exp = list(list(path=fields)), obs = NULL, sdates = paste0(year.start:year.end,'1101'), storefreq = 'daily', output = 'lonlat', latmin = lat.min, latmax = lat.max, lonmin = lon.min, lonmax = lon.max)$mod + psleu12 <- Load(var = psl, exp = list(list(path=fields)), obs = NULL, sdates = paste0(year.start:year.end,'1201'), storefreq = 'daily', output = 'lonlat', latmin = lat.min, latmax = lat.max, lonmin = lon.min, lonmax = lon.max)$mod + + if(running.cluster == TRUE && rean.name == "ERA-interim") { + # in this case, last month of last year might not have in /esnas the data for the 3rd month of the running cluster. Load() set these values to NA, + # but the clustering analysis is not able to process NA, so we have to remove the NA for that month and replace with the values of the previous year: + psleu11[,,n.years,,,] <- psleu11[,,n.years-1,,,] + psleu12[,,n.years,,,] <- psleu12[,,n.years-1,,,] + + } + + psleuFull <- abind(psleu1, psleu2, psleu3, psleu4, psleu5, psleu6, psleu7, psleu8, psleu9, psleu10, psleu11, psleu12, along=4) + rm(psleu1, psleu2, psleu3, psleu4, psleu5, psleu6, psleu7, psleu8, psleu9, psleu10, psleu11, psleu12) + gc() + + # convert psl in daily anomalies with the LOESS filter: + cat("Calculating anomalies. Please wait...\n") + pslPeriodClim <- apply(psleuFull, c(1,2,4,5,6), mean, na.rm=T) + + if(LOESS == TRUE){ + # separate psl data in the four hours of the day ( 0.00, 6.00, 12.00, 18.00) + pslPeriodClim1 <- pslPeriodClim[1,1,seq(1,1460,4),,] + pslPeriodClim2 <- pslPeriodClim[1,1,seq(2,1460,4),,] + pslPeriodClim3 <- pslPeriodClim[1,1,seq(3,1460,4),,] + pslPeriodClim4 <- pslPeriodClim[1,1,seq(4,1460,4),,] + + rm(pslPeriodClim) + gc() + + pslPeriodClimLoess1 <- array(NA, dim(pslPeriodClim1)) + pslPeriodClimLoess2 <- array(NA, dim(pslPeriodClim2)) + pslPeriodClimLoess3 <- array(NA, dim(pslPeriodClim3)) + pslPeriodClimLoess4 <- array(NA, dim(pslPeriodClim4)) + + for(i in 1:n.pos.lat){ + for(j in 1:n.pos.lon){ + my.data1 <- data.frame(ens.mean=pslPeriodClim1[,i,j], hourly=1:(1460/4)) + my.loess1 <- loess(ens.mean ~ hourly, my.data1, span=0.35) + pslPeriodClimLoess1[,i,j] <- predict(my.loess1) + + my.data2 <- data.frame(ens.mean=pslPeriodClim2[,i,j], hourly=1:(1460/4)) + my.loess2 <- loess(ens.mean ~ hourly, my.data2, span=0.35) + pslPeriodClimLoess2[,i,j] <- predict(my.loess2) + + my.data3 <- data.frame(ens.mean=pslPeriodClim3[,i,j], hourly=1:(1460/4)) + my.loess3 <- loess(ens.mean ~ hourly, my.data3, span=0.35) + pslPeriodClimLoess3[,i,j] <- predict(my.loess3) + + my.data4 <- data.frame(ens.mean=pslPeriodClim4[,i,j], hourly=1:(1460/4)) + my.loess4 <- loess(ens.mean ~ hourly, my.data4, span=0.35) + pslPeriodClimLoess4[,i,j] <- predict(my.loess4) + + } + } + + rm(my.data1, my.data2, my.data3, my.data4, my.loess1, my.loess2, my.loess3, my.loess4) + rm(pslPeriodClim1, pslPeriodClim2, pslPeriodClim3, pslPeriodClim4) + gc() + + s1 <- seq(1,1460,4) + s2 <- seq(2,1460,4) + s3 <- seq(3,1460,4) + s4 <- seq(4,1460,4) + + pslPeriodClimLoess <- array(NA,c(365*4,dim(pslPeriodClimLoess1)[2:3])) + + for(day in 1:365){ + pslPeriodClimLoess[s1[day],,] <- pslPeriodClimLoess1[day,,] + pslPeriodClimLoess[s2[day],,] <- pslPeriodClimLoess2[day,,] + pslPeriodClimLoess[s3[day],,] <- pslPeriodClimLoess3[day,,] + pslPeriodClimLoess[s4[day],,] <- pslPeriodClimLoess4[day,,] + } + + pslPeriodClim2 <- InsertDim(pslPeriodClimLoess, 1, n.years) + + } else { # leave the daily climatology to compute the anomalies: + pslPeriodClim2 <- InsertDim(pslPeriodClim, 3, n.years) + + rm(pslPeriodClim) + gc() + } + + ## s4.data <- data.frame(s4=pslPeriodClim[1,], day=1:216) + ## s4.loess <- loess(s4 ~ day, s4.data, span=0.35) + ## s4.pred <- predict(s4.loess) + + psleuFull <- psleuFull[1,1,,,,] - pslPeriodClim2 + + rm(pslPeriodClim2) + gc() + +} # close if on fields.name == rean & subdaily == TRUE + + +if(fields.name == ECMWF_S4.name){ # in the forecast case, we load only the data for 1 start month at time and all the lead times (since each month needs ~3 GB of memory) + if(start.month <= 9) {start.month.char <- paste0("0",as.character(start.month))} else {start.month.char <- start.month} + + fields2 <- gsub("\\$STORE_FREQ\\$","daily",fields) + fields3 <- gsub("\\$VAR_NAME\\$",psl,fields2) + + if(missing.forecasts == TRUE){ + years <- c() + for(y in year.start:year.end){ + fields4 <- gsub("\\$START_DATE\\$",paste0(y, start.month.char,'01'),fields3) + + char.lead <- nchar(system(paste0("ncks -m ",fields4,"| grep -E -i 'psl size' | cut -d '*' -f1"), intern=T)) + # beware, in this way it can only take into account leadtimes from 100 to 999, but it is the only way in the SMP machine: + #num.lead <- as.integer(substr(system(paste0("ncks -m ",fields4,"| grep -E -i 'psl size' | cut -d '*' -f1"), intern=T),char.lead-3,char.lead)) + num.lead <- as.integer(system(paste0("ncks -m ",fields4,"| grep -E -i 'psl size' | cut -d '=' -f2 | cut -d '*' -f1"), intern=T)) # don't work well on the SMP + num.memb <- as.integer(system(paste0("ncks -m ",fields4,"| grep -E -i 'psl size' | cut -d '=' -f2 | cut -d '*' -f2"), intern=T)) + num.lat <- as.integer(system(paste0("ncks -m ",fields4,"| grep -E -i 'psl size' | cut -d '=' -f2 | cut -d '*' -f3"), intern=T)) + num.lon <- as.integer(system(paste0("ncks -m ",fields4,"| grep -E -i 'psl size' | cut -d '=' -f2 | cut -d '*' -f4"), intern=T)) + + #if(num.lead == n.leadtimes && num.memb >= n.members && num.lat == 181 && num.lon == 360) years <- c(years,y) + if(num.lead == n.leadtimes && num.memb >= n.members) years <- c(years,y) + } + + } else { + years <- year.start:year.end + } + + n.years.full <- length(years) + + if(running.cluster == TRUE) { + + start.month1 <- ifelse(start.month == 1, 12, start.month-1) + if(start.month1 <= 9) {start.month1.char <- paste0("0",as.character(start.month1))} else {start.month1.char <- start.month1} + + start.month2.char <- start.month.char + + start.month3 <- ifelse(start.month == 12, 1, start.month+1) + if(start.month3 <= 9) {start.month3.char <- paste0("0",as.character(start.month3))} else {start.month3.char <- start.month3} + + # load three months of data, inserting all them in the third dimension of the array, one month at time: + psleuFull1 <- Load(var = psl, exp = list(list(path=fields)), obs = NULL, sdates=paste0(years, start.month1.char,'01'), dimnames=list(member=member.name), nmember=n.members, leadtimemax=n.leadtimes, storefreq='daily', output = 'lonlat', latmin = lat.min, latmax = lat.max, lonmin = lon.min, lonmax = lon.max, grid=my.grid, method='bilinear')$mod + gc() + + psleuFull2 <- Load(var = psl, exp = list(list(path=fields)), obs = NULL, sdates=paste0(years, start.month2.char,'01'), dimnames=list(member=member.name), nmember=n.members, leadtimemax=n.leadtimes, storefreq='daily', output = 'lonlat', latmin = lat.min, latmax = lat.max, lonmin = lon.min, lonmax = lon.max, grid=my.grid, method='bilinear')$mod + gc() + + psleuFull3 <- Load(var = psl, exp = list(list(path=fields)), obs = NULL, sdates=paste0(years, start.month3.char,'01'), dimnames=list(member=member.name), nmember=n.members, leadtimemax=n.leadtimes, storefreq='daily', output = 'lonlat', latmin = lat.min, latmax = lat.max, lonmin = lon.min, lonmax = lon.max, grid=my.grid, method='bilinear')$mod + gc() + + } else { + + psleuFull <- Load(var = psl, exp = list(list(path=fields)), obs = NULL, sdates=paste0(years, start.month.char,'01'), dimnames=list(member=member.name), nmember=n.members, leadtimemax=n.leadtimes, storefreq='daily', output = 'lonlat', latmin = lat.min, latmax = lat.max, lonmin = lon.min, lonmax = lon.max, grid=my.grid, method='bilinear', nprocs=1)$mod + + } + + if(LOESS == TRUE){ + # convert psl in daily anomalies with the LOESS filter: + cat("Calculating anomalies. Please wait...\n") + + if(running.cluster == TRUE) { + pslPeriodClim1 <- apply(psleuFull1, c(1,4,5,6), mean, na.rm=T) + pslPeriodClim2 <- apply(psleuFull2, c(1,4,5,6), mean, na.rm=T) + pslPeriodClim3 <- apply(psleuFull3, c(1,4,5,6), mean, na.rm=T) + + pslPeriodClimLoess1 <- pslPeriodClim1 + pslPeriodClimLoess2 <- pslPeriodClim2 + pslPeriodClimLoess3 <- pslPeriodClim3 + + for(i in 1:n.pos.lat){ + for(j in 1:n.pos.lon){ + my.data1 <- data.frame(ens.mean=pslPeriodClim1[1,,i,j], day=1:n.leadtimes) + my.loess1 <- loess(ens.mean ~ day, my.data1, span=0.35) + pslPeriodClimLoess1[1,,i,j] <- predict(my.loess1) + rm(my.data1, my.loess1) + gc() + + my.data2 <- data.frame(ens.mean=pslPeriodClim2[1,,i,j], day=1:n.leadtimes) + my.loess2 <- loess(ens.mean ~ day, my.data2, span=0.35) + pslPeriodClimLoess2[1,,i,j] <- predict(my.loess2) + rm(my.data2, my.loess2) + gc() + + my.data3 <- data.frame(ens.mean=pslPeriodClim3[1,,i,j], day=1:n.leadtimes) + my.loess3 <- loess(ens.mean ~ day, my.data3, span=0.35) + pslPeriodClimLoess3[1,,i,j] <- predict(my.loess3) + rm(my.data3, my.loess3) + gc() + } + } + + rm(pslPeriodClim1, pslPeriodClim2, pslPeriodClim3) + gc() + + pslPeriodClimDos1 <- InsertDim(InsertDim(pslPeriodClimLoess1, 2, n.years.full), 2, n.members) + pslPeriodClimDos2 <- InsertDim(InsertDim(pslPeriodClimLoess2, 2, n.years.full), 2, n.members) + pslPeriodClimDos3 <- InsertDim(InsertDim(pslPeriodClimLoess3, 2, n.years.full), 2, n.members) + + pslPeriodClimDos <- unname(abind(pslPeriodClimDos1, pslPeriodClimDos2, pslPeriodClimDos3, along=3)) + rm(pslPeriodClimDos1, pslPeriodClimDos2, pslPeriodClimDos3) + gc() + + psleuFull <- unname(abind(psleuFull1, psleuFull2, psleuFull3, along=3)) + rm(psleuFull1, psleuFull2, psleuFull3) + gc() + + } else { # in case of no running cluster: + + pslPeriodClim <- apply(psleuFull, c(1,4,5,6), mean, na.rm=T) + + pslPeriodClimLoess <- pslPeriodClim + for(i in n.pos.lat){ + for(j in n.pos.lon){ + my.data <- data.frame(ens.mean=pslPeriodClim[1,,i,j], day=1:n.leadtimes) + my.loess <- loess(ens.mean ~ day, my.data, span=0.35) + pslPeriodClimLoess[1,,i,j] <- predict(my.loess) + } + } + + rm(pslPeriodClim) + gc() + + pslPeriodClimDos <- InsertDim(InsertDim(pslPeriodClimLoess, 2, n.years.full), 2, n.members) + + } # close if on running cluster + + } else { #in case of no LOESS: + + if(running.cluster == TRUE) { + # in this case, the climatology is measured FOR EACH MONTH INDIPENDENTLY, instead of using a seasonal value: + pslPeriodClim1 <- apply(psleuFull1, c(1,4,5,6), mean, na.rm=T) + pslPeriodClim2 <- apply(psleuFull2, c(1,4,5,6), mean, na.rm=T) + pslPeriodClim3 <- apply(psleuFull3, c(1,4,5,6), mean, na.rm=T) + + pslPeriodClim <- unname(abind(pslPeriodClim1, pslPeriodClim2, pslPeriodClim3)) + + pslPeriodClimDos <- InsertDim(InsertDim(pslPeriodClim, 2, n.years.full), 2, n.members) + rm(pslPeriodClim) + gc() + + psleuFull <- unname(abind(psleuFull1, psleuFull2, psleuFull3, along=3)) + rm(psleuFull1, psleuFull2, psleuFull3) + gc() + + } else {# in case of no running cluster: + + pslPeriodClimDos <- InsertDim(InsertDim(pslPeriodClim, 2, n.years.full), 2, n.members) + rm(pslPeriodClim) + gc() + } + } + + ## s4.data <- data.frame(s4=pslPeriodClim[1,], day=1:216) + ## s4.loess <- loess(s4 ~ day, s4.data, span=0.35) + ## s4.pred <- predict(s4.loess) + + psleuFull <- psleuFull - pslPeriodClimDos + rm(pslPeriodClimDos) + gc() + +} # close if on fields.name=forecasts.name + +if(fields.name == ECMWF_monthly.name){ + # Load 6-hourly psl data in the forecast case: + psleuFull <-array(NA, c(n.sdates*n.years, n.leadtimes, n.pos.lat, n.pos.lon)) # daily order: 19940102 19950102 ... 20130102 19940109 19950109 ... 20130109 ... + n.leadtimes <- length(leadtime) + sdates <- weekly.seq(forecast.year,mes,day) + n.sdates <- length(sdates) + + for (startdate in 1:n.sdates){ + for(lday in leadtime){ + var <- Load(var = psl, exp = NULL, obs = list(list(path=fields)), paste0(year.start:year.end, substr(sdates[startdate],5,6), substr(sdates[startdate],7,8) ), storefreq = 'daily', leadtimemin = lday*4-3, leadtimemax = lday*4, output = 'lonlat', latmin = lat.min, latmax = lat.max, lonmin = lon.min, lonmax = lon.max) + + mean.lday <- apply(var$obs, c(3,5,6), mean, na.rm=T) + rm(var) + gc() + + psleuFull[(1+(startdate-1)*n.years):(startdate*n.years), lday-leadtime[1]+1,,] <- mean.lday + rm(mean.lday) + gc() + } + } +} + +if(psl=="g500") psleuFull <- psleuFull/9.81 # convert geopotential to geopotential height (in m) +if(psl=="psl") psleuFull <- psleuFull/100 # convert MSLP in Pascal to MSLP in hPa +gc() + +#save.image(file=paste0(workdir,"/weather_regimes_",fields.name,"_",year.start,"-",year.end,".RData")) +#load(file=paste0(workdir,"/weather_regimes_",fields.name,"_",year.start,"-",year.end,".RData")) + +#save.image("/scratch/Earth/ncortesi/RESILIENCE/Regimes/test.R") + +# compute the cluster analysis: +my.PCA <- list() +my.cluster <- list() +my.cluster.array <- list() +tot.variance <- list() +n.pcs <- my.cluster <- c() + +#WR.period=1 # for the debug +for(p in WR.period){ + # Select only the data of the month/season we want: + if(fields.name == rean.name){ + + #pslPeriod <- psleuFull[days.period[[p]],,] # select only days in the chosen period (i.e: winter) + if(running.cluster) { + if(subdaily){ + my.hours <- sort(c(pos.month.extended(1,p)*4-3, pos.month.extended(1,p)*4-2, pos.month.extended(1,p)*4-1, pos.month.extended(1,p)*4)) + if(p == 1) my.hours <- c(1337:1460,1:236) + if(p == 12) my.hours <- c(1217:1460,1:124) + + pslPeriod <- psleuFull[,my.hours,,] # select all days in the period of 3 months centered on the target month p + + } else { + pslPeriod <- psleuFull[1,1,,pos.month.extended(1,p),,] # select all days in the period of 3 months centered on the target month p + } + + } else { + + if(subdaily){ + my.hours <- sort(c(pos.month(1,p)*4-3, pos.month(1,p)*4-2, pos.month(1,p)*4-1, pos.month(1,p)*4)) + pslPeriod <- psleuFull[,my.hours,,] + } else { + pslPeriod <- psleuFull[1,1,,pos.period(1,p),,] # select only days in the chosen period (i.e: winter) + } + } + + # weight the pressure fields based on latitude (apply it to anomalies, not to absolute values!): + if(lat.weighting){ + lat.weighted <- cos(lat*pi/180)^0.5 + if(!running.cluster) lat.weighted.array <- InsertDim(InsertDim(InsertDim(lat.weighted,2,n.pos.lon), 1, length(pos.period(2001,p))), 1, n.years) + if(running.cluster) lat.weighted.array <- InsertDim(InsertDim(InsertDim(lat.weighted,2,n.pos.lon), 1, length(pos.month.extended(2001,p))), 1, n.years) + + pslPeriod <- pslPeriod * lat.weighted.array + + rm(lat.weighted.array) + gc() + } + + gc() + cat("Preformatting data for clustering. Please wait...\n") + psl.melted <- melt(pslPeriod[,,,, drop=FALSE], varnames=c("Year","Day","Lat","Lon")) + gc() + + cat("Preformatting data for clustering. Please wait......\n") + # this function eats much memory!!! + psl.kmeans <- unname(acast(psl.melted, Year + Day ~ Lat + Lon)) + #gc() + + + # select period data from psleuFull to use it as input of the cluster analysis: + #psl.kmeans <- pslPeriod + #dim(psl.kmeans) <- c(n.days.period[[p]], n.pos.lat*n.pos.lon) # convert array in a matrix! + #rm(pslPeriod, pslPeriod.weighted) + } + + # in case of S4 we don't need to select anything because we already loaded only 1 month of data! + if(fields.name == ECMWF_S4.name){ + if(running.cluster == TRUE && p < 13) { + # if you want to fully implement the running cluster of the monthly S4 data, you have to finish selecting automatically the 3-months running period + # generalizing the command below (which at present only work for the month of January an lead time 0): + + # Select DJF (lead time 0) for our case study, excluding 29 of february): + pslPeriod <- psleuFull[,,,1:90,,, drop=FALSE] + + } else { + + # select data only for the startmonth and leadmonth to study: + cat("Extracting data. Please wait...\n") + chosen.month <- start.month + lead.month # find which month we want to select data + if(chosen.month > 12) chosen.month <- chosen.month - 12 + + # remove the 29th of February to have the same n. of elements for all years + i=0 + for(y in years){ + i=i+1 + leadtime.min <- 1 + n.days.in.a.monthly.period(start.month, chosen.month, y) - n.days.in.a.month(chosen.month, y) + leadtime.max <- leadtime.min + n.days.in.a.month(chosen.month, y) - 1 + #leadtime.max <- 61 + + if (n.days.in.a.month(chosen.month, y) == 29) leadtime.max <- leadtime.max - 1 + int.leadtimes <- leadtime.min:leadtime.max + num.leadtimes <- leadtime.max - leadtime.min + 1 # number of days in the chosen month (different from 'n.leadtimes',which is the total number of days in the file!) + # by construction it is equal to: n.days.in.a.month(chosen.month, y) + + if(y == years[1]) { + pslPeriod <- psleuFull[,,1,int.leadtimes,,,drop=FALSE] + } else { + pslPeriod <- unname(abind(pslPeriod, psleuFull[,,i,int.leadtimes,,,drop=FALSE], along=3)) + } + } + + } + + # weight the pressure fields based on latitude (apply it to anomalies, not to absolute values!): + if(lat.weighting){ + lat.weighted <- cos(lat*pi/180)^0.5 + lat.weighted.array <- InsertDim(InsertDim(InsertDim(lat.weighted,2,n.pos.lon), 1, length(pos.month.extended(2001,p))), 1, n.years) + pslPeriod <- pslPeriod * lat.weighted.array ######### adapt!!!!!!!!!!! + rm(lat.weighted.array) + gc() + } + + # note that the data order in psl.kmeans[,X] is: year1day1, year1day2, ... , year1day31, year2day1, year2day2, ... , year2day28 + gc() + cat("Preformatting data. Please wait...\n") + psl.melted <- melt(pslPeriod[1,,,,,, drop=FALSE], varnames=c("Exp","Member","StartYear","LeadDay","Lat","Lon")) + gc() + + #save(psl.melted,file=paste0(workdir,"/psl_melted.RData")) + + cat("Preformatting data. Please wait......\n") + # This function eats much memory!!! + psl.kmeans <- unname(acast(psl.melted, Member + StartYear + LeadDay ~ Lat + Lon)) + #gc() + + #save(psl.kmeans,file=paste0(workdir,"/psl_kmeans.RData")) + #my.cluster <- kmeans(psl.kmeans, centers=4, iter.max=100, nstart=30, trace=FALSE) + #save(my.cluster,file=paste0(workdir,"/my_cluster.RData")) + + + #psl.kmeans <- array(NA,c(dim(pslPeriod))) + #n.rows <- nrow(psl.melted) + #a <- psl.melted$Member + #b <- psl.melted$StartYear + #c <- psl.melted$LeadDay + #d <- psl.melted$Lat + #e <- psl.melted$Lon + #f <- psl.melted$value + + # this function to convert a data.frame in an array is much less memory-eater than acast but a bit slower: + #for(i in 1:n.rows) psl.kmeans[1,a[i],b[i],c[i],d[i],e[i]] <- f[i] + gc() + #rm(pslPeriod); gc() + } + + if(fields.name == ECMWF_monthly.name){ + my.startdates.period <- months.period(forecast.year,mes,day,p) # get which startdates belong to the chosen period (remember that there are no bisestile day left!) + + days.period <- list() # get which days inside psleuFull belong to the chosen period + for(startdate in my.startdates.period){ + days.period[[p]] <- c(days.period[[p]], (1+(startdate-1)*n.years):(startdate*n.years)) + } + + # in winter you must remove the 2nd startdate (20140109) because its data is bad, as you can see with: plot(psl.kmeans[,1:2]) + # if(fields.name == forecast.name && period == 13) psl.kmeans[21:40,] <- NA + } + + + # compute the clustering: + cat("Clustering data. Please wait...\n") + if(PCA){ + my.seq <- seq(1, n.pos.lat*n.pos.lon, 16) # select only 1 point of 9 + pslcut <- psl.kmeans[,my.seq] + + my.PCA <- princomp(pslcut,cor=FALSE) + tot.variance <- head(cumsum(my.PCA$sdev^2/sum(my.PCA$sdev^2)),50) # check how many PCAs to keep basing on the sum of their expl. variance + n.pcs <- head(as.numeric(which(tot.variance > variance.explained)),1) # select only the pcs that explains at least 80% of variance + my.cluster <- kmeans(my.PCA$scores[,1:n.pcs], centers=4, iter.max=100, nstart=30) # 4 is the number of clusters + rm(pslcut) + } else { # 4 is the number of clusters: + + my.cluster <- kmeans(psl.kmeans, centers=4, iter.max=100, nstart=30, trace=FALSE) + + gc() + + #save(my.cluster, file=paste0(workdir,"/",fields.name,"_",my.period[p],"_leadmonth_",lead.month,"_series.RData")) + + #if(fields.name == rean.name){ + #my.cluster[[p]] <- kmeans(psl.kmeans[which(!is.na(psl.kmeans[,1])),], centers=4, iter.max=100, nstart=30, trace=FALSE) + # save the time series output of the cluster analysis: + #save(my.cluster, my.cluster.array, my.PCA, tot.variance, n.pcs, file=paste0(workdir,"/",fields.name,"_",my.period[p],"_series.RData")) + #} + + if(fields.name == ECMWF_S4.name) { + my.cluster.array <- array(my.cluster$cluster, c(num.leadtimes, n.years.full, n.members)) # in reverse order compared to the definition of psl.kmeans + + # save the time series output of the cluster analysis: + #save(my.cluster, my.cluster.array, my.PCA, tot.variance, n.pcs, file=paste0(workdir,"/",fields.name,"_",my.period[p],"_leadmonth_",lead.month,"_series.RData")) + + #plot(SMA(my.cluster$centers[1,],201),type="l",col="gold",ylim=c(-20,20));lines(SMA(my.cluster$centers[2,],201),type="l",col="red"); lines(SMA(my.cluster$centers[3,],201),type="l",col="green");lines(SMA(my.cluster$centers[4,],201),type="l",col="blue");lines(SMA(psl.kmeans[29,],201),type="l",col="gray");lines(rep(0,14410),type="l",col="black",lty=2) + } + } + + rm(psl.kmeans) + gc() + +#} # close for on 'p' + +#save.image(file=paste0(workdir,"/weather_regimes_",fields.name,"_",year.start,"-",year.end,".RData")) +#load(file=paste0(workdir,"/weather_regimes_",fields.name,"_",year.start,"-",year.end,".RData")) + +#my.grid<-paste0('r',n.lon,'x',n.lat) +#coord <- Load(var = 'sfcWind', exp = list(ECMWF_monthly), obs = NULL, sdates=paste0(2013,'0102'), leadtimemin = 1, leadtimemax=1, output = 'lonlat', nprocs=1, grid=my.grid, method='bilinear') + +# measure regime anomalies: + +#for(p in WR.period){ # 1-12: Jan-Dec; 13-16: Winter-Spring-Summer-Autumn; 17: Year + + if(fields.name == rean.name){ + + if(running.cluster == TRUE && p < 13){ # select only the days of the 3-months cluster series inside the target month p + n.days.period <- length(pos.month.extended(2001,p)) # total number of days in the three months + + days.month <- days.month.new <- days.month.full <- c() + days.month <- which(pos.month.extended(2001,p) == pos.month(2001,p)[1]) -1 + 1:length(pos.month(2001,p)) + + for(y in 1:n.years){ + days.month.new <- days.month + n.days.period*(y-1) + days.month.full <- c(days.month.full, days.month.new) + #print(days.month.new) + #print(days.month) + } + + # in this cases, we are not selecting days but 6-hourly intervals: + if(subdaily == TRUE) days.month.full <- sort(c(days.month.full*4, days.month.full*4+1, days.month.full*4+2, days.month.full*4+3)) + + # select only the days of the cluster series inside the target month p: + cluster.sequence <- my.cluster$cluster[days.month.full] + + } else { + + cluster.sequence <- my.cluster$cluster + } + + # convert cluster sequence from 6-hourly to daily: + if(subdaily){ + type <- c() + + for(day in 1:(length(cluster.sequence)/4)){ + #day <- ceiling(hour/4) + hourly <- cluster.sequence[(1+(day-1)*4):(4+(day-1)*4)] + + t1 <- length(which(hourly == 1)) + t2 <- length(which(hourly == 2)) + t3 <- length(which(hourly == 3)) + t4 <- length(which(hourly == 4)) + tt <- c(t1,t2,t3,t4) + + # if all the 4 time steps belong to the same regime, assign it to this day: + if(length(unique(hourly)) == 1) type[day] <- hourly[1] + + # if there are two different regimes, check if one has a higher frequency: + if(length(unique(hourly)) == 2){ + if(any(tt == 3)){ # if 3 of the 4 time intervals belong to the same weather regime, assign this day to it + type[day] <- which(tt == 3) + } else { # in this case both regimes occur in 2 of the 4 time steps; arbitrary assign the regime occurring at 12.00 of that day + type[day] <- hourly[3] + } + } + + # if there are three different regimes, assign it to the only possible regime with 2 time steps in that day: + if(length(unique(hourly)) == 3) type[day] <- which(tt == 2) + + # if there are four different regimes (a very rare event!), assign it to the regime occurring at 12.00 of that day: + if(length(unique(hourly)) == 3) type[day] <- hourly[3] + + } # close for on day + + } # close for on subdaily + + + if(sequences){ # it works only for rean data!!! + # for each of the 4 clusters, remove the days not belonging to sequences of at least 5 days: + # warning: first 4 days and last 4 days are not take into account y the algorithm and are treated as not belonging to any sequence (sequ=FALSE) + wrdiff <- diff(my.cluster$cluster) + + sequ <- rep(FALSE, n.days.period[[p]]) + for(i in 5:(n.days.period[[p]]-5)){ + sequ[i] <- TRUE + if(wrdiff[i] != 0 & wrdiff[i+1] != 0) sequ[i] = FALSE + if(wrdiff[i] != 0 & wrdiff[i+2] != 0) sequ[i] = FALSE + if(wrdiff[i] != 0 & wrdiff[i+3] != 0) sequ[i] = FALSE + if(wrdiff[i] != 0 & wrdiff[i+4] != 0) sequ[i] = FALSE + if(wrdiff[i-1] != 0 & wrdiff[i] != 0) sequ[i] = FALSE + if(wrdiff[i-1] != 0 & wrdiff[i+1] != 0) sequ[i] = FALSE + if(wrdiff[i-1] != 0 & wrdiff[i+2] != 0) sequ[i] = FALSE + if(wrdiff[i-1] != 0 & wrdiff[i+3] != 0) sequ[i] = FALSE + if(wrdiff[i-2] != 0 & wrdiff[i] != 0) sequ[i] = FALSE + if(wrdiff[i-2] != 0 & wrdiff[i+1] != 0) sequ[i] = FALSE + if(wrdiff[i-2] != 0 & wrdiff[i+2] != 0) sequ[i] = FALSE + if(wrdiff[i-3] != 0 & wrdiff[i] != 0) sequ[i] = FALSE + if(wrdiff[i-3] != 0 & wrdiff[i+1] != 0) sequ[i] = FALSE + if(wrdiff[i-4] != 0 & wrdiff[i] != 0) sequ[i] = FALSE + } # close for loop on i + + # sequence of daily cluster numbers without the days that doesn't belong to sequences of 5 or more days: + cluster.sequence <- my.cluster$cluster * sequ + rm(wrdiff, sequ) + } + + + # Replace the days belonging to a regime with the days belonging to a sequence of at least 5 days of that regime: + wr1 <- which(cluster.sequence == 1) + wr2 <- which(cluster.sequence == 2) + wr3 <- which(cluster.sequence == 3) + wr4 <- which(cluster.sequence == 4) + + # yearly frequency of each regime: + wr1y <- wr2y <- wr3y <-wr4y <- c() + + mod.subdaily <- ifelse(subdaily,4,1) + np <- n.days.in.a.period(p,2001)*mod.subdaily + + for(y in year.start:year.end){ + # for both reanalysis and S4, the time order is always: year1day1, year1day2, ..., year1day31, year2day1, year2day2, ..., year2day28, etc, + wr1y[y-year.start+1] <- length(which(cluster.sequence[(y-year.start)*np + (1:np)] == 1)) + wr2y[y-year.start+1] <- length(which(cluster.sequence[(y-year.start)*np + (1:np)] == 2)) + wr3y[y-year.start+1] <- length(which(cluster.sequence[(y-year.start)*np + (1:np)] == 3)) + wr4y[y-year.start+1] <- length(which(cluster.sequence[(y-year.start)*np + (1:np)] == 4)) + } + + # convert to frequencies in %: + wr1y <- wr1y/np + wr2y <- wr2y/np + wr3y <- wr3y/np + wr4y <- wr4y/np + + gc() + + + # measure regime anomalies: + pslmat <- unname(acast(psl.melted, Year + Day ~ Lat ~ Lon)) + + if(running.cluster == TRUE){ + pslmat.new <- pslmat + pslmat <- pslmat.new[days.month.full,,] + gc() + } + + pslwr1 <- pslmat[wr1,,, drop=F] + pslwr2 <- pslmat[wr2,,, drop=F] + pslwr3 <- pslmat[wr3,,, drop=F] + pslwr4 <- pslmat[wr4,,, drop=F] + + pslwr1mean <- apply(pslwr1, c(2,3), mean, na.rm=T) + pslwr2mean <- apply(pslwr2, c(2,3), mean, na.rm=T) + pslwr3mean <- apply(pslwr3, c(2,3), mean, na.rm=T) + pslwr4mean <- apply(pslwr4, c(2,3), mean, na.rm=T) + + # spatial mean for each day to save for the meaure of the FairRPSS: + pslwr1spmean <- apply(pslwr1, 1, mean, na.rm=T) + pslwr2spmean <- apply(pslwr2, 1, mean, na.rm=T) + pslwr3spmean <- apply(pslwr3, 1, mean, na.rm=T) + pslwr4spmean <- apply(pslwr4, 1, mean, na.rm=T) + + + # save all the data necessary to redraw the graphs when we know the right regime: + save(my.cluster, cluster.sequence, lon, lon.max, lon.min, lat, lat.max, lat.min, pos.lat, pos.lon, n.pos.lat, n.pos.lon, psl, psl.name, my.period, p, workdir, rean.name, fields.name, year.start, year.end, n.years, WR.period, pslwr1mean, pslwr2mean, pslwr3mean, pslwr4mean, pslwr1spmean, pslwr2spmean, pslwr3spmean, pslwr4spmean, wr1y,wr2y,wr3y,wr4y,wr1,wr2,wr3,wr4,file=paste0(workdir,"/",fields.name,"_",my.period[p],"_psl.RData")) + + # immediatly save the plots of the ERA-Interim monthly regime anomalies with the running cluster instead than loading them in the next script: + EU <- c(1:which(lon >= lon.max)[1], (which(lon >= 337)[1]:length(lon))) # restrict area to continental europe only + + if(psl == "g500"){ + my.brks <- c(-300,seq(-200,200,10),300) # % Mean anomaly of a WR + my.brks2 <- c(-300,seq(-200,200,10),300) # % Mean anomaly of a WR for the contour lines + values.to.plot <- c(-200, -100, 0, 100, 200) # values we want to show in the labels + } + + if(psl == "psl"){ + my.brks <- c(seq(-19,-1,2),0,seq(1,19,2)) # % Mean anomaly of a WR + my.brks2 <- my.brks #c(seq(-20,20,2)) # % Mean anomaly of a WR for the contour lines + values.to.plot <- my.brks #c(-17,-15,-13,-11,-9,-7,-5,-3,-1,0,1,3,5,7,9,11,13,15,17) + } + my.cols <- colorRampPalette(rev(brewer.pal(11,"RdBu")))(length(my.brks)-1) # blue--white--red colors + my.cols[floor(length(my.cols)/2)] <- my.cols[floor(length(my.cols)/2)+1] <- "white" + + map1 <- pslwr1mean + map2 <- pslwr2mean + map3 <- pslwr3mean + map4 <- pslwr4mean + png(filename=paste0(workdir,"/",fields.name,"_",my.period[p],"_cluster1.png"),width=1000,height=1200) + PlotEquiMap2(rescale(map1,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map1), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, xlabel.dist=1.5, contours.lty="F1FF1F") + dev.off() + png(filename=paste0(workdir,"/",fields.name,"_",my.period[p],"_cluster2.png"),width=1000,height=1200) + PlotEquiMap2(rescale(map2,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map2), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F") + dev.off() + png(filename=paste0(workdir,"/",fields.name,"_",my.period[p],"_cluster3.png"),width=1000,height=1200) + PlotEquiMap2(rescale(map3,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map3), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F") + dev.off() + png(filename=paste0(workdir,"/",fields.name,"_",my.period[p],"_cluster4.png"),width=1000,height=1200) + PlotEquiMap2(rescale(map4,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map4), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F") + dev.off() + + rm(pslwr1mean,pslwr2mean,pslwr3mean,pslwr4mean) + rm(pslwr1,pslwr2,pslwr3,pslwr4) + gc() + + } + + if(fields.name == ECMWF_S4.name){ + cat("Computing regime anomalies and frequencies. Please wait...\n") + + #load(file=paste0(workdir,"/",fields.name,"_",my.period[p],"_leadmonth_",lead.month,"_ClusterData.RData")) + + cluster.sequence <- my.cluster$cluster #as.vector(my.cluster.array) + + wr1 <- which(cluster.sequence == 1) + wr2 <- which(cluster.sequence == 2) + wr3 <- which(cluster.sequence == 3) + wr4 <- which(cluster.sequence == 4) + + wr1y <- apply(my.cluster.array == 1, 2, sum) + wr2y <- apply(my.cluster.array == 2, 2, sum) + wr3y <- apply(my.cluster.array == 3, 2, sum) + wr4y <- apply(my.cluster.array == 4, 2, sum) + + # convert to frequencies in %: + wr1y <- wr1y/(num.leadtimes*n.members) # in this case, n.leadtimes is the number of days of one month of the cluser time series + wr2y <- wr2y/(num.leadtimes*n.members) + wr3y <- wr3y/(num.leadtimes*n.members) + wr4y <- wr4y/(num.leadtimes*n.members) + + # same as above, but to measure the maximum frequence between all the members: + wr1yMax <- apply(apply(my.cluster.array == 1, c(2,3), sum),1,max) + wr2yMax <- apply(apply(my.cluster.array == 2, c(2,3), sum),1,max) + wr3yMax <- apply(apply(my.cluster.array == 3, c(2,3), sum),1,max) + wr4yMax <- apply(apply(my.cluster.array == 4, c(2,3), sum),1,max) + + wr1yMax <- wr1yMax/num.leadtimes + wr2yMax <- wr2yMax/num.leadtimes + wr3yMax <- wr3yMax/num.leadtimes + wr4yMax <- wr4yMax/num.leadtimes + + wr1yMin <- apply(apply(my.cluster.array == 1, c(2,3), sum),1,min) + wr2yMin <- apply(apply(my.cluster.array == 2, c(2,3), sum),1,min) + wr3yMin <- apply(apply(my.cluster.array == 3, c(2,3), sum),1,min) + wr4yMin <- apply(apply(my.cluster.array == 4, c(2,3), sum),1,min) + + wr1yMin <- wr1yMin/num.leadtimes + wr2yMin <- wr2yMin/num.leadtimes + wr3yMin <- wr3yMin/num.leadtimes + wr4yMin <- wr4yMin/num.leadtimes + + cat("Computing regime anomalies and frequencies. Please wait......\n") + + # in this case, must use psl.melted instead of psleuFull. Both are already in anomalies: + # (psl.melted depends on the startdate and leadtime, psleuFull no) + gc() + pslmat <- unname(acast(psl.melted, Member + StartYear + LeadDay ~ Lat ~ Lon)) + #pslmat <- unname(acast(melt(pslPeriod[1,1:2,,,,, drop=FALSE],varnames=c("Exp","Member","StartYear","LeadDay","Lat","Lon")), Member ~ StartYear + LeadDay ~ Lat ~ Lon)) + gc() + + #for(a in 1:2){ + # for(b in 1:3){ + # for(c in 1:4){ + # pslmat[1, a + (b-1)*a + (c-1)*a*b,,] <- pslPeriod[1,a,b,c,,] + # } + # } + #} + + pslwr1 <- pslmat[wr1,,, drop=F] + pslwr2 <- pslmat[wr2,,, drop=F] + pslwr3 <- pslmat[wr3,,, drop=F] + pslwr4 <- pslmat[wr4,,, drop=F] + + pslwr1mean <- apply(pslwr1, c(2,3), mean, na.rm=T) + pslwr2mean <- apply(pslwr2, c(2,3), mean, na.rm=T) + pslwr3mean <- apply(pslwr3, c(2,3), mean, na.rm=T) + pslwr4mean <- apply(pslwr4, c(2,3), mean, na.rm=T) + + # spatial mean for each day to save for the meaure of the FairRPSS: + pslwr1spmean <- apply(pslwr1, 1, mean, na.rm=T) + pslwr2spmean <- apply(pslwr2, 1, mean, na.rm=T) + pslwr3spmean <- apply(pslwr3, 1, mean, na.rm=T) + pslwr4spmean <- apply(pslwr4, 1, mean, na.rm=T) + + rm(pslmat) + gc() + + # save all the data necessary to draw the graphs (but not the impact maps) + save(my.cluster, cluster.sequence, my.cluster.array, lon, lon.max, lat, pos.lat, pos.lon, n.pos.lat, n.pos.lon, psl, psl.name, my.period, p, workdir,rean.name,fields.name, year.start,year.end, years, n.years, n.years.full, WR.period, pslwr1mean, pslwr2mean, pslwr3mean, pslwr4mean, pslwr1spmean, pslwr2spmean, pslwr3spmean, pslwr4spmean, wr1y, wr2y, wr3y, wr4y, wr1yMax, wr2yMax, wr3yMax, wr4yMax, wr1yMin, wr2yMin, wr3yMin, wr4yMin, wr1, wr2, wr3, wr4, n.leadtimes, num.leadtimes, file=paste0(workdir,"/",fields.name,"_",my.period[p],"_leadmonth_",lead.month,"_psl.RData")) + + rm(pslwr1mean,pslwr2mean,pslwr3mean,pslwr4mean) + rm(pslwr1,pslwr2,pslwr3,pslwr4) + gc() + + } + + # for the monthly system, the time order is always: year1day1, year2day1, ... , yearNday1, year1day2, year2day2, ..., yearNday2, etc.: + if(fields.name == ECMWF_monthly.name) { + if(fields.name == ECMWF_monthly.name){ + my.startdates.period <- months.period(forecast.year,mes,day,p) + + #if(p == 13) my.startdates.period <- my.startdates.period[-2] # remove the second startdate because it is broken + + days.period <- period.length <- list() + for(startdate in my.startdates.period){ + days.period[[p]] <- c(days.period[[p]], (1+(startdate-1)*(year.end-year.start+1)):(startdate*(year.end-year.start+1))) + } + period.length[[p]] <- length(my.startdates.period) # number of Thusdays in the period + } + + wr1y <- wr2y <- wr3y <-wr4y <- c() + wr1y[y-year.start+1] <- length(which(cluster.sequence[seq((y-year.start+1),length(cluster.sequence), n.years)] == 1)) + wr2y[y-year.start+1] <- length(which(cluster.sequence[seq((y-year.start+1),length(cluster.sequence), n.years)] == 2)) + wr3y[y-year.start+1] <- length(which(cluster.sequence[seq((y-year.start+1),length(cluster.sequence), n.years)] == 3)) + wr4y[y-year.start+1] <- length(which(cluster.sequence[seq((y-year.start+1),length(cluster.sequence), n.years)] == 4)) + } + + cat("Finished!\n") +} # close the for loop on 'p' + + + + + + + diff --git a/old/weather_regimes_v42.R~ b/old/weather_regimes_v42.R~ new file mode 100644 index 0000000000000000000000000000000000000000..c6bb46205a8072ca21d31057a5d2e4f97452aada --- /dev/null +++ b/old/weather_regimes_v42.R~ @@ -0,0 +1,1099 @@ + +# Creation: 6/2016 +# Author: Nicola Cortesi +# Aim: To compute the four Euro-Atlantic weather regimes from a reanalysis or from a forecast system +# +# I/O: input must be in a format compatible with Load(); 1 output .RData file is created in the 'workdir' folder. +# You can also run this script from terminal with: Rscript ~/scripts/weather_regimes_maps.R +# +# Assumption: its output are needed by the scripts weather_regimes_impact.R and weather_regimes.R +# +# Branch: weather_regimes + +rm(list=ls()) +library(s2dverification) # for the function Load() +library(abind) +library(reshape2) # after each update of s2dverification, it's better to remove and install this package again because sometimes it gets broken! + +SMP <- FALSE # if TRUE, the script is assumed to run on the SMP Machine + +if(SMP){ + source('/gpfs/projects/bsc32/bsc32842/Rfunctions.R') + workdir <- "/gpfs/projects/bsc32/bsc32842/RESILIENCE" # working dir where output files will be generated +} else { + source('/home/Earth/ncortesi/scripts/Rfunctions.R') + workdir <- "/scratch/Earth/ncortesi/RESILIENCE/Regimes" # working dir where output files will be generated +} + +# module load NCO # : load it from the terminal before running this script interactively in the SMP machine, if you didn't load it in the .bashrc + +# available reanalysis for the geopotential (g500) or the geopotential height (z500 = g500/9.81) and for var data: +ERAint <- '/esnas/recon/ecmwf/erainterim/$STORE_FREQ$_mean/$VAR_NAME$_f6h/$VAR_NAME$_$YEAR$$MONTH$.nc' +#ERAint <- '/esarchive/old-files/recon_ecmwf_erainterim/$STORE_FREQ$_mean/$VAR_NAME$_f6h/$VAR_NAME$_$YEAR$$MONTH$.nc' +ERAint <- '/esnas/recon/ecmwf/erainterim/6hourly/$VAR_NAME$/$VAR_NAME$_$YEAR$$MONTH$.nc' # subdaily data!!!!! +ERAint.name <- "ERA-Interim" + +JRA55 <- '/esnas/recon/jma/jra55/$STORE_FREQ$_mean/$VAR_NAME$_f6h/$VAR_NAME$_$YEAR$$MONTH$.nc' +JRA55.name <- "JRA-55" + +NCEP <- '/esnas/recon/noaa/ncep-reanalysis/$STORE_FREQ$_mean/$VAR_NAME$_f6h/$VAR_NAME$_$YEAR$$MONTH$.nc' +NCEP.name <- "NCEP" + +rean <- JRA55 #JRA55 #ERAint #NCEP # choose one of the two above reanalysis from where to load the input psl data + +# available forecast systems for the psl and var data: +#ECMWF_S4 <- list(path = paste0('/esnas/exp/ECMWF/seasonal/0001/s004/m001/$STORE_FREQ$_mean/$VAR_NAME$_f6h/$VAR_NAME$_$START_DATE$.nc')) +#ECMWF_S4 <- '/esnas/exp/ECMWF/seasonal/0001/s004/m001/$STORE_FREQ$_mean/psl_f6h/psl_$START_DATE$.nc' +ECMWF_S4 <- '/esnas/exp/ecmwf/system4_m1/$STORE_FREQ$_mean/$VAR_NAME$_f6h/$VAR_NAME$_$START_DATE$.nc' +ECMWF_S4.name <- "ECMWF-S4" +member.name <- "ensemble" # name of the dimension with the ensemble members inside the netCDF files of S4 + +ECMWF_monthly <- '/esnas/exp/ECMWF/monthly/ensforhc/6hourly/$VAR_NAME$/2014$MONTH$$DAY$00/$VAR_NAME$_$START_DATE$00.nc' +ECMWF_monthly.name <- "ECMWF-Monthly" + +# choose a forecast system: +forecast <- ECMWF_S4 + +# put 'rean' to load pressure fields and var from reanalisis, or put 'forecast' to load them from forecast systems: +fields <- rean #forecast + +psl <- "psl" #"g500" # pressure variable to use from the chosen reanalysis +psl.name <- "SLP" # "Z500" # the name to show in the maps + +year.start <- 1981 #1979 #1981 #1982 #1981 #1994 #1979 #1981 +year.end <- 2016 #2016 #2013 #2015 + +LOESS <- TRUE # To apply the LOESS filter or not to the climatology +running.cluster <- TRUE # add to the clustering also the daily SLP data of the two closer months to the month to use (only for monthly analysis) +lat.weighting <- TRUE # set it to true to weight psl data on latitude before applying the cluster analysis +subdaily <- FALSE # id TRUE, compute the clustering using 6-hourly data instead of daily data, to be more robust (only for reanalysis with 6-hourly data avail.) + +missing.forecasts <- FALSE # set it to TRUE if there can be missing hindcasts files in the forecast psl data, so it will load only the available years and deals with NA +sequences <-FALSE # set it to TRUE if you want to select only days beloning to sequences of at least 5 consecutive days belonging to the same regime. +PCA <- FALSE # set it to TRUE if you want to apply the PCA before the k-means cluster analysis, FALSE otherwise +variance.explained <- 0.8 # the user can set here the minimum % of variance explained by the PCs (typically 0.8 which is equal to 80%) + +# Coordinates of the box enclosing the study region (the Northern Atlantic in this case): +lat.max <- 81 #81 #80 #70 +lat.min <- 27 #30 #20 #30 +lon.max <- 45 #36 #30 #40 +lon.min <- 274.5 #274.5 #270 #280 # put a positive number here because the geopotential has only positive vaues of longitud! + +# Only for reanalysis: +WR.period <- 1:12 # 13:16 #1:12 #13:16 # 1-12: Jan-Dec; 13-16: Winter-Spring-Summer-Autumn [bypassed by the optional argument of the script] + +# Only for Seasonal forecasts: +startM <- 1 # start month (1=January, 12=December) [bypassed by the optional arguments of the script] +leadM <- 0 # select only the data of one lead month: [bypassed by the optional arguments of the script] +n.members <- 15 # number of members of the forecast system to load +n.leadtimes <- 216 # number of leadtimes of the forecast system to load + +res <- 0.75 # set the resolution you want to interpolate the seasonal psl and var data when loading it (i.e: set to 0.75 to use the same res of ERA-Interim) + # interpolation method is BILINEAR. + +# Only for Monthly forecasts: +#leadtime <- 1:7 #5:11 #1 # if fields=forecast, set the forecast time (in days) you want to compute the weather regimes. +#startdate.shift <- 1 # if leadtime=5:11, it's better to shift all startdates of the season 1 week in the past, to fit the exact season of obs.data + # if leadtime=12:18, it's better to shift all startdates of the season 2 weeks in the past, and so on. +#forecast.year <- year.end+1 # if fields=forecast, set the forecast year (it is usually the year after year.end) +#mes=1 # if fields=forecast, set the month of the first startdate of the forecast year +#day=2 # if fields=forecast, set the day of the first startdate of the forecast year + +############################################################################################################################# +rean.name <- unname(ifelse(rean == ERAint, ERAint.name, ifelse(rean == JRA55, JRA55.name, NCEP.name))) + +forecast.name <- unname(ifelse(forecast == ECMWF_S4, ECMWF_S4.name, ECMWF_monthly.name)) +fields.name <- unname(ifelse(fields == rean, rean.name, forecast.name)) +n.years <- year.end - year.start + 1 + +# in case the script is run with one argument, it is assumed a reanalysis is being used and the argument becomes the month we want to perform the clustering; +# in case the script is run with two arguments, it is assumed a forecast is used and the first argument represents the startmonth and the second one the leadmonth. +# in case the script is run with no arguments, the values of the variables inside the script are used: +script.arg <- as.integer(commandArgs(TRUE)) + +if(length(script.arg) == 0){ + start.month <- startM + #WR.period <- start.month + if(fields.name == forecast.name) lead.month <- leadM +} + +# in case the script is run with 1 argument, it is assumed you are using a reanalysis: +if(length(script.arg) == 1){ + fields <- rean + fields.name <- rean.name + WR.period <- script.arg[1] +} + +if(length(script.arg) >= 2){ + fields <- forecast + fields.name <- forecast.name + start.month <- script.arg[1] + lead.month <- script.arg[2] + WR.period <- start.month +} + +if(length(script.arg) == 3){ # in this case, we are running the script in the SMP machine and we need to override the variables below: + source('/gpfs/projects/bsc32/bsc32842/Rfunctions.R') # for the calendar function + workdir <- "/gpfs/projects/bsc32/bsc32842/RESILIENCE" # working dir +} + +if(length(script.arg) >= 2) {lead.comment <- paste0("Lead month: ", lead.month)} else {lead.comment <- ""} +cat(paste0("Field: ", fields.name, " Period: ", WR.period, " ", lead.comment, "\n")) + +if(lat.min >= lat.max) stop("lat.min cannot be greater than lat.max") + +#days.period <- n.days.period <- period.length <- list() +#for (pp in 1:17){ +# days.period[[pp]] <- NA +# for(y in year.start:year.end) days.period[[pp]] <- c(days.period[[pp]], n.days.in.a.future.year(year.start, y) + pos.period(y,pp)) +# days.period[[pp]] <- days.period[[pp]][-1] # remove the NA at the begin that was introduced only to be able to execute the above command +# # number of days belonging to that period from year.start to year.end: +# n.days.period[[pp]] <- length(days.period[[pp]]) +# # Number of days belonging to that period in a single year: +# period.length[[pp]] <- n.days.in.a.period(pp,1999) #+ ifelse(period==13 | period==2, 1, 0) # using year 1999 introduce a small error in winter season length because of #bisestile years +#} + +# Create a regular lat/lon grid to interpolate the data to the chosen res: (Notice that the regular grid is always centered at lat=0 and lon=0!) +n.lon.grid <- 360/res +n.lat.grid <- (180/res)+1 +my.grid <- paste0('r',n.lon.grid,'x',n.lat.grid) +#domain<-list() +#domain$lon <- seq(0,359.999999999,res) +#domain$lat <- c(rev(seq(0,90,res)),seq(res[1],-90,-res)) + +cat("Loading lat/lon. Please wait...\n") +# load only 1 day of pressure data to detect the minimum and maximum lat and lon values which identify only the North Atlantic area: +if(fields.name == rean.name) domain <- Load(var = psl, exp = NULL, obs = list(list(path=fields)), '19990101', storefreq = 'daily', leadtimemax = 1, output = 'lonlat', nprocs=1) + +### Real test: +### lat <- +### domain <- Load(var = "psl", exp = list(list(path="/esnas/exp/ecmwf/system4_m1/$STORE_FREQ$_mean/$VAR_NAME$_f6h/$VAR_NAME$_$START_DATE$.nc")), obs=NULL, sdates=paste0(1982:2011,'0101'), storefreq = 'daily', leadtimemax=n.leadtimes, nmember=n.members, output = 'lonlat', latmin = lat[chunk], latmax = lat[chunk+2], nprocs=1) + +### if (fields.name="pippo"){ + +# in the case of S4, we also interpolate it to the same resolution of the reanalysis: +# (notice that if the S4 grid has the same grid of the chosen grid (typically ERA-Interim), Load() leaves the S4 grid unchanged) +if(fields.name == ECMWF_S4.name) domain <- Load(var = psl, exp = list(list(path=fields)), obs=NULL, sdates=paste0(year.start,'0101'), storefreq = 'daily', dimnames=list(member=member.name), leadtimemax=1, nmember=1, output = 'lonlat', grid=my.grid, method='bilinear', nprocs=1) + +#domain <- Load(var = "psl", exp = list(list(path="/esnas/exp/ecmwf/system4_m1/$STORE_FREQ$_mean/$VAR_NAME$_f6h/$VAR_NAME$_$START_DATE$.nc")), obs=NULL, sdates='19820101', storefreq = 'daily', leadtimemax=1, nmember=1, output = 'lonlat', grid='r480x241', lonmax=100) + +if(fields.name == ECMWF_monthly.name) domain <- Load(var = psl, exp = NULL, obs = list(list(path=fields)), paste0(year.start,'0102'), storefreq = 'daily', leadtimemax = 1, output = 'lonlat') + +n.lon <- length(domain$lon) +n.lat <- length(domain$lat) + +pos.lat.max <- which(lat.max - round(domain$lat,4) >= 0)[1] # select the first latitude of the domain below the maximum latitude +pos.lat.min <- tail(which(round(domain$lat,4) - lat.min >= 0),1) # select the last latitude of the domain over the minumum latitude +pos.lon.max <- tail(which(lon.max - round(domain$lon,4) >= 0),1) +pos.lon.min <- which(round(domain$lon,4) - lon.min >= 0)[1] + +pos.lat <- if(pos.lat.min < pos.lat.max) {pos.lat.min:pos.lat.max} else {pos.lat.max:pos.lat.min} +pos.lon <- c(1:pos.lon.max,pos.lon.min:length(domain$lon)) # beware that it only consider the case where the study area cross the meridian of Greenwhich! +n.pos.lat <- length(pos.lat) +n.pos.lon <- length(pos.lon) + +lat <- domain$lat[pos.lat] # lat values of chosen area only (it is smaller than the whole spatial domain loaded by the data) +#if (lat[2] < lat[1]) lat <- rev(lat) # reverse lat values if they are in decreasing order +lon <- domain$lon[pos.lon] # lon values of chosen area only + +#lat.min.area <- domain$lat[pos.lat.min] # min and max lat/lon of the chosen area only (same as lat.max, but its values correspond to actual values in the lat vector) +#lat.max.area <- domain$lat[pos.lat.max] +#lon.min.area <- domain$lon[pos.lon.min] +#lon.max.area <- domain$lon[pos.lon.max] + +#rm(domain) + +# Load psl data (interpolating it to the same reanalysis grid, if necessary): +cat("Loading data. Please wait...\n") + +if(fields.name == rean.name && subdaily == FALSE){ # Load daily psl data of all years in the reanalysis case: + psleuFull <-array(NA,c(1,1,n.years,365, n.pos.lat, n.pos.lon)) + + psleuFull366 <- Load(var = psl, exp = NULL, obs = list(list(path=fields)), paste0(year.start:year.end,'0101'), storefreq = 'daily', leadtimemax = 366, output = 'lonlat', latmin = lat.min, latmax = lat.max, lonmin = lon.min, lonmax = lon.max, nprocs=8)$obs + + # remove bisestile days to have all arrays with the same dimensions: + cat("Removing bisestile days. Please wait...\n") + psleuFull[,,,,,] <- psleuFull366[,,,1:365,,] + + for(y in year.start:year.end){ + y2 <- y - year.start + 1 + if(n.days.in.a.year(y) == 366) psleuFull[,,y2,60:365,,] <- psleuFull366[,,y2,61:366,,] # take the march to december period removing the 29th of February + } + + rm(psleuFull366) + gc() + + + if(running.cluster == TRUE && rean.name == "ERA-interim" && year.end == 2016) { + # in caso of Octuber, which is the last month of last year of data available, November data for the 3rd month of the running cluster is not available. + # Load() set these values to NA, + # but the clustering analysis is not able to process NA, so we have to remove the NA for that month and replace with the values of the previous year: + psleuFull[,,36,305:334,,] <- psleuFull[,,35,305:334,,] + } + + # convert psl in daily anomalies with the LOESS filter: + cat("Calculating anomalies. Please wait...\n") + pslPeriodClim <- apply(psleuFull, c(1,2,4,5,6), mean, na.rm=T) + + if(LOESS == TRUE){ + pslPeriodClimLoess <- array(NA, dim(pslPeriodClim)) + + for(i in 1:n.pos.lat){ + for(j in 1:n.pos.lon){ + my.data <- data.frame(ens.mean=pslPeriodClim[1,1,,i,j], day=1:365) + my.loess <- loess(ens.mean ~ day, my.data, span=0.35) + pslPeriodClimLoess[1,1,,i,j] <- predict(my.loess) + } + } + + rm(pslPeriodClim) + gc() + + pslPeriodClim2 <- InsertDim(pslPeriodClimLoess, 3, n.years) + + } else { # leave the daily climatology to compute the anomalies: + pslPeriodClim2 <- InsertDim(pslPeriodClim, 3, n.years) + + rm(pslPeriodClim) + gc() + } + + ## s4.data <- data.frame(s4=pslPeriodClim[1,], day=1:216) + ## s4.loess <- loess(s4 ~ day, s4.data, span=0.35) + ## s4.pred <- predict(s4.loess) + + psleuFull <- psleuFull - pslPeriodClim2 + rm(pslPeriodClim2) + gc() + +} # close if on fields.name == rean and subdaily == FALSE){ + + +# Load 6-hourly psl data of all years in the reanalysis case: +if(fields.name == rean.name && subdaily == TRUE){ + + #sdates <- as.vector(sapply(year.start:year.end, function(x) paste0(x, sprintf("%02d", 1:12), '01'))) + #my.exp <- list(path=fields) + #psleuFull366 <- Load(var = psl, exp = list(my.exp), NULL, sdates, output = 'lonlat', latmin = lat.min, latmax = lat.max, lonmin = lon.min, lonmax = lon.max)$obs + #dim(data$obs) <- c(dim(data$obs)[1:2], 1, dim(data$obs)[3]*dim(data$obs)[4], dim(data$obs)[5:6]) + + my.exp <- list(path=fields) + + # Load January data: + psleu1 <- Load(var = psl, exp = list(list(path=fields)), obs = NULL, sdates = paste0(year.start:year.end,'0101'), storefreq = 'daily', output = 'lonlat', latmin = lat.min, latmax = lat.max, lonmin = lon.min, lonmax = lon.max)$mod + + # Load february data (it automatically discards the 29th of February): + psleu2 <- Load(var = psl, exp = list(list(path=fields)), obs = NULL, sdates = paste0(year.start:year.end,'0201'), storefreq = 'daily', output = 'lonlat', latmin = lat.min, latmax = lat.max, lonmin = lon.min, lonmax = lon.max)$mod + + # Load March data: + psleu3 <- Load(var = psl, exp = list(list(path=fields)), obs = NULL, sdates = paste0(year.start:year.end,'0301'), storefreq = 'daily', output = 'lonlat', latmin = lat.min, latmax = lat.max, lonmin = lon.min, lonmax = lon.max)$mod + psleu4 <- Load(var = psl, exp = list(list(path=fields)), obs = NULL, sdates = paste0(year.start:year.end,'0401'), storefreq = 'daily', output = 'lonlat', latmin = lat.min, latmax = lat.max, lonmin = lon.min, lonmax = lon.max)$mod + psleu5 <- Load(var = psl, exp = list(list(path=fields)), obs = NULL, sdates = paste0(year.start:year.end,'0501'), storefreq = 'daily', output = 'lonlat', latmin = lat.min, latmax = lat.max, lonmin = lon.min, lonmax = lon.max)$mod + psleu6 <- Load(var = psl, exp = list(list(path=fields)), obs = NULL, sdates = paste0(year.start:year.end,'0601'), storefreq = 'daily', output = 'lonlat', latmin = lat.min, latmax = lat.max, lonmin = lon.min, lonmax = lon.max)$mod + psleu7 <- Load(var = psl, exp = list(list(path=fields)), obs = NULL, sdates = paste0(year.start:year.end,'0701'), storefreq = 'daily', output = 'lonlat', latmin = lat.min, latmax = lat.max, lonmin = lon.min, lonmax = lon.max)$mod + psleu8 <- Load(var = psl, exp = list(list(path=fields)), obs = NULL, sdates = paste0(year.start:year.end,'0801'), storefreq = 'daily', output = 'lonlat', latmin = lat.min, latmax = lat.max, lonmin = lon.min, lonmax = lon.max)$mod + psleu9 <- Load(var = psl, exp = list(list(path=fields)), obs = NULL, sdates = paste0(year.start:year.end,'0901'), storefreq = 'daily', output = 'lonlat', latmin = lat.min, latmax = lat.max, lonmin = lon.min, lonmax = lon.max)$mod + psleu10 <- Load(var = psl, exp = list(list(path=fields)), obs = NULL, sdates = paste0(year.start:year.end,'1001'), storefreq = 'daily', output = 'lonlat', latmin = lat.min, latmax = lat.max, lonmin = lon.min, lonmax = lon.max)$mod + psleu11 <- Load(var = psl, exp = list(list(path=fields)), obs = NULL, sdates = paste0(year.start:year.end,'1101'), storefreq = 'daily', output = 'lonlat', latmin = lat.min, latmax = lat.max, lonmin = lon.min, lonmax = lon.max)$mod + psleu12 <- Load(var = psl, exp = list(list(path=fields)), obs = NULL, sdates = paste0(year.start:year.end,'1201'), storefreq = 'daily', output = 'lonlat', latmin = lat.min, latmax = lat.max, lonmin = lon.min, lonmax = lon.max)$mod + + if(running.cluster == TRUE && rean.name == "ERA-interim") { + # in this case, last month of last year might not have in /esnas the data for the 3rd month of the running cluster. Load() set these values to NA, + # but the clustering analysis is not able to process NA, so we have to remove the NA for that month and replace with the values of the previous year: + psleu11[,,36,,,] <- psleu11[,,35,,,] + psleu12[,,36,,,] <- psleu12[,,35,,,] + + } + + psleuFull <- abind(psleu1, psleu2, psleu3, psleu4, psleu5, psleu6, psleu7, psleu8, psleu9, psleu10, psleu11, psleu12, along=4) + rm(psleu1, psleu2, psleu3, psleu4, psleu5, psleu6, psleu7, psleu8, psleu9, psleu10, psleu11, psleu12) + gc() + + # convert psl in daily anomalies with the LOESS filter: + cat("Calculating anomalies. Please wait...\n") + pslPeriodClim <- apply(psleuFull, c(1,2,4,5,6), mean, na.rm=T) + + if(LOESS == TRUE){ + # separate psl data in the four hours of the day ( 0.00, 6.00, 12.00, 18.00) + pslPeriodClim1 <- pslPeriodClim[1,1,seq(1,1460,4),,] + pslPeriodClim2 <- pslPeriodClim[1,1,seq(2,1460,4),,] + pslPeriodClim3 <- pslPeriodClim[1,1,seq(3,1460,4),,] + pslPeriodClim4 <- pslPeriodClim[1,1,seq(4,1460,4),,] + + rm(pslPeriodClim) + gc() + + pslPeriodClimLoess1 <- array(NA, dim(pslPeriodClim1)) + pslPeriodClimLoess2 <- array(NA, dim(pslPeriodClim2)) + pslPeriodClimLoess3 <- array(NA, dim(pslPeriodClim3)) + pslPeriodClimLoess4 <- array(NA, dim(pslPeriodClim4)) + + for(i in 1:n.pos.lat){ + for(j in 1:n.pos.lon){ + my.data1 <- data.frame(ens.mean=pslPeriodClim1[,i,j], hourly=1:(1460/4)) + my.loess1 <- loess(ens.mean ~ hourly, my.data1, span=0.35) + pslPeriodClimLoess1[,i,j] <- predict(my.loess1) + + my.data2 <- data.frame(ens.mean=pslPeriodClim2[,i,j], hourly=1:(1460/4)) + my.loess2 <- loess(ens.mean ~ hourly, my.data2, span=0.35) + pslPeriodClimLoess2[,i,j] <- predict(my.loess2) + + my.data3 <- data.frame(ens.mean=pslPeriodClim3[,i,j], hourly=1:(1460/4)) + my.loess3 <- loess(ens.mean ~ hourly, my.data3, span=0.35) + pslPeriodClimLoess3[,i,j] <- predict(my.loess3) + + my.data4 <- data.frame(ens.mean=pslPeriodClim4[,i,j], hourly=1:(1460/4)) + my.loess4 <- loess(ens.mean ~ hourly, my.data4, span=0.35) + pslPeriodClimLoess4[,i,j] <- predict(my.loess4) + + } + } + + rm(my.data1, my.data2, my.data3, my.data4, my.loess1, my.loess2, my.loess3, my.loess4) + rm(pslPeriodClim1, pslPeriodClim2, pslPeriodClim3, pslPeriodClim4) + gc() + + s1 <- seq(1,1460,4) + s2 <- seq(2,1460,4) + s3 <- seq(3,1460,4) + s4 <- seq(4,1460,4) + + pslPeriodClimLoess <- array(NA,c(365*4,dim(pslPeriodClimLoess1)[2:3])) + + for(day in 1:365){ + pslPeriodClimLoess[s1[day],,] <- pslPeriodClimLoess1[day,,] + pslPeriodClimLoess[s2[day],,] <- pslPeriodClimLoess2[day,,] + pslPeriodClimLoess[s3[day],,] <- pslPeriodClimLoess3[day,,] + pslPeriodClimLoess[s4[day],,] <- pslPeriodClimLoess4[day,,] + } + + pslPeriodClim2 <- InsertDim(pslPeriodClimLoess, 1, n.years) + + } else { # leave the daily climatology to compute the anomalies: + pslPeriodClim2 <- InsertDim(pslPeriodClim, 3, n.years) + + rm(pslPeriodClim) + gc() + } + + ## s4.data <- data.frame(s4=pslPeriodClim[1,], day=1:216) + ## s4.loess <- loess(s4 ~ day, s4.data, span=0.35) + ## s4.pred <- predict(s4.loess) + + psleuFull <- psleuFull[1,1,,,,] - pslPeriodClim2 + + rm(pslPeriodClim2) + gc() + +} # close if on fields.name == rean & subdaily == TRUE + + +if(fields.name == ECMWF_S4.name){ # in the forecast case, we load only the data for 1 start month at time and all the lead times (since each month needs ~3 GB of memory) + if(start.month <= 9) {start.month.char <- paste0("0",as.character(start.month))} else {start.month.char <- start.month} + + fields2 <- gsub("\\$STORE_FREQ\\$","daily",fields) + fields3 <- gsub("\\$VAR_NAME\\$",psl,fields2) + + if(missing.forecasts == TRUE){ + years <- c() + for(y in year.start:year.end){ + fields4 <- gsub("\\$START_DATE\\$",paste0(y, start.month.char,'01'),fields3) + + char.lead <- nchar(system(paste0("ncks -m ",fields4,"| grep -E -i 'psl size' | cut -d '*' -f1"), intern=T)) + # beware, in this way it can only take into account leadtimes from 100 to 999, but it is the only way in the SMP machine: + #num.lead <- as.integer(substr(system(paste0("ncks -m ",fields4,"| grep -E -i 'psl size' | cut -d '*' -f1"), intern=T),char.lead-3,char.lead)) + num.lead <- as.integer(system(paste0("ncks -m ",fields4,"| grep -E -i 'psl size' | cut -d '=' -f2 | cut -d '*' -f1"), intern=T)) # don't work well on the SMP + num.memb <- as.integer(system(paste0("ncks -m ",fields4,"| grep -E -i 'psl size' | cut -d '=' -f2 | cut -d '*' -f2"), intern=T)) + num.lat <- as.integer(system(paste0("ncks -m ",fields4,"| grep -E -i 'psl size' | cut -d '=' -f2 | cut -d '*' -f3"), intern=T)) + num.lon <- as.integer(system(paste0("ncks -m ",fields4,"| grep -E -i 'psl size' | cut -d '=' -f2 | cut -d '*' -f4"), intern=T)) + + #if(num.lead == n.leadtimes && num.memb >= n.members && num.lat == 181 && num.lon == 360) years <- c(years,y) + if(num.lead == n.leadtimes && num.memb >= n.members) years <- c(years,y) + } + + } else { + years <- year.start:year.end + } + + n.years.full <- length(years) + + if(running.cluster == TRUE) { + + start.month1 <- ifelse(start.month == 1, 12, start.month-1) + if(start.month1 <= 9) {start.month1.char <- paste0("0",as.character(start.month1))} else {start.month1.char <- start.month1} + + start.month2.char <- start.month.char + + start.month3 <- ifelse(start.month == 12, 1, start.month+1) + if(start.month3 <= 9) {start.month3.char <- paste0("0",as.character(start.month3))} else {start.month3.char <- start.month3} + + # load three months of data, inserting all them in the third dimension of the array, one month at time: + psleuFull1 <- Load(var = psl, exp = list(list(path=fields)), obs = NULL, sdates=paste0(years, start.month1.char,'01'), dimnames=list(member=member.name), nmember=n.members, leadtimemax=n.leadtimes, storefreq='daily', output = 'lonlat', latmin = lat.min, latmax = lat.max, lonmin = lon.min, lonmax = lon.max, grid=my.grid, method='bilinear')$mod + gc() + + psleuFull2 <- Load(var = psl, exp = list(list(path=fields)), obs = NULL, sdates=paste0(years, start.month2.char,'01'), dimnames=list(member=member.name), nmember=n.members, leadtimemax=n.leadtimes, storefreq='daily', output = 'lonlat', latmin = lat.min, latmax = lat.max, lonmin = lon.min, lonmax = lon.max, grid=my.grid, method='bilinear')$mod + gc() + + psleuFull3 <- Load(var = psl, exp = list(list(path=fields)), obs = NULL, sdates=paste0(years, start.month3.char,'01'), dimnames=list(member=member.name), nmember=n.members, leadtimemax=n.leadtimes, storefreq='daily', output = 'lonlat', latmin = lat.min, latmax = lat.max, lonmin = lon.min, lonmax = lon.max, grid=my.grid, method='bilinear')$mod + gc() + + } else { + + psleuFull <- Load(var = psl, exp = list(list(path=fields)), obs = NULL, sdates=paste0(years, start.month.char,'01'), dimnames=list(member=member.name), nmember=n.members, leadtimemax=n.leadtimes, storefreq='daily', output = 'lonlat', latmin = lat.min, latmax = lat.max, lonmin = lon.min, lonmax = lon.max, grid=my.grid, method='bilinear', nprocs=1)$mod + + } + + if(LOESS == TRUE){ + # convert psl in daily anomalies with the LOESS filter: + cat("Calculating anomalies. Please wait...\n") + + if(running.cluster == TRUE) { + pslPeriodClim1 <- apply(psleuFull1, c(1,4,5,6), mean, na.rm=T) + pslPeriodClim2 <- apply(psleuFull2, c(1,4,5,6), mean, na.rm=T) + pslPeriodClim3 <- apply(psleuFull3, c(1,4,5,6), mean, na.rm=T) + + pslPeriodClimLoess1 <- pslPeriodClim1 + pslPeriodClimLoess2 <- pslPeriodClim2 + pslPeriodClimLoess3 <- pslPeriodClim3 + + for(i in 1:n.pos.lat){ + for(j in 1:n.pos.lon){ + my.data1 <- data.frame(ens.mean=pslPeriodClim1[1,,i,j], day=1:n.leadtimes) + my.loess1 <- loess(ens.mean ~ day, my.data1, span=0.35) + pslPeriodClimLoess1[1,,i,j] <- predict(my.loess1) + rm(my.data1, my.loess1) + gc() + + my.data2 <- data.frame(ens.mean=pslPeriodClim2[1,,i,j], day=1:n.leadtimes) + my.loess2 <- loess(ens.mean ~ day, my.data2, span=0.35) + pslPeriodClimLoess2[1,,i,j] <- predict(my.loess2) + rm(my.data2, my.loess2) + gc() + + my.data3 <- data.frame(ens.mean=pslPeriodClim3[1,,i,j], day=1:n.leadtimes) + my.loess3 <- loess(ens.mean ~ day, my.data3, span=0.35) + pslPeriodClimLoess3[1,,i,j] <- predict(my.loess3) + rm(my.data3, my.loess3) + gc() + } + } + + rm(pslPeriodClim1, pslPeriodClim2, pslPeriodClim3) + gc() + + pslPeriodClimDos1 <- InsertDim(InsertDim(pslPeriodClimLoess1, 2, n.years.full), 2, n.members) + pslPeriodClimDos2 <- InsertDim(InsertDim(pslPeriodClimLoess2, 2, n.years.full), 2, n.members) + pslPeriodClimDos3 <- InsertDim(InsertDim(pslPeriodClimLoess3, 2, n.years.full), 2, n.members) + + pslPeriodClimDos <- unname(abind(pslPeriodClimDos1, pslPeriodClimDos2, pslPeriodClimDos3, along=3)) + rm(pslPeriodClimDos1, pslPeriodClimDos2, pslPeriodClimDos3) + gc() + + psleuFull <- unname(abind(psleuFull1, psleuFull2, psleuFull3, along=3)) + rm(psleuFull1, psleuFull2, psleuFull3) + gc() + + } else { # in case of no running cluster: + + pslPeriodClim <- apply(psleuFull, c(1,4,5,6), mean, na.rm=T) + + pslPeriodClimLoess <- pslPeriodClim + for(i in n.pos.lat){ + for(j in n.pos.lon){ + my.data <- data.frame(ens.mean=pslPeriodClim[1,,i,j], day=1:n.leadtimes) + my.loess <- loess(ens.mean ~ day, my.data, span=0.35) + pslPeriodClimLoess[1,,i,j] <- predict(my.loess) + } + } + + rm(pslPeriodClim) + gc() + + pslPeriodClimDos <- InsertDim(InsertDim(pslPeriodClimLoess, 2, n.years.full), 2, n.members) + + } # close if on running cluster + + } else { #in case of no LOESS: + + if(running.cluster == TRUE) { + # in this case, the climatology is measured FOR EACH MONTH INDIPENDENTLY, instead of using a seasonal value: + pslPeriodClim1 <- apply(psleuFull1, c(1,4,5,6), mean, na.rm=T) + pslPeriodClim2 <- apply(psleuFull2, c(1,4,5,6), mean, na.rm=T) + pslPeriodClim3 <- apply(psleuFull3, c(1,4,5,6), mean, na.rm=T) + + pslPeriodClim <- unname(abind(pslPeriodClim1, pslPeriodClim2, pslPeriodClim3)) + + pslPeriodClimDos <- InsertDim(InsertDim(pslPeriodClim, 2, n.years.full), 2, n.members) + rm(pslPeriodClim) + gc() + + psleuFull <- unname(abind(psleuFull1, psleuFull2, psleuFull3, along=3)) + rm(psleuFull1, psleuFull2, psleuFull3) + gc() + + } else {# in case of no running cluster: + + pslPeriodClimDos <- InsertDim(InsertDim(pslPeriodClim, 2, n.years.full), 2, n.members) + rm(pslPeriodClim) + gc() + } + } + + ## s4.data <- data.frame(s4=pslPeriodClim[1,], day=1:216) + ## s4.loess <- loess(s4 ~ day, s4.data, span=0.35) + ## s4.pred <- predict(s4.loess) + + psleuFull <- psleuFull - pslPeriodClimDos + rm(pslPeriodClimDos) + gc() + +} # close if on fields.name=forecasts.name + +if(fields.name == ECMWF_monthly.name){ + # Load 6-hourly psl data in the forecast case: + psleuFull <-array(NA, c(n.sdates*n.years, n.leadtimes, n.pos.lat, n.pos.lon)) # daily order: 19940102 19950102 ... 20130102 19940109 19950109 ... 20130109 ... + n.leadtimes <- length(leadtime) + sdates <- weekly.seq(forecast.year,mes,day) + n.sdates <- length(sdates) + + for (startdate in 1:n.sdates){ + for(lday in leadtime){ + var <- Load(var = psl, exp = NULL, obs = list(list(path=fields)), paste0(year.start:year.end, substr(sdates[startdate],5,6), substr(sdates[startdate],7,8) ), storefreq = 'daily', leadtimemin = lday*4-3, leadtimemax = lday*4, output = 'lonlat', latmin = lat.min, latmax = lat.max, lonmin = lon.min, lonmax = lon.max) + + mean.lday <- apply(var$obs, c(3,5,6), mean, na.rm=T) + rm(var) + gc() + + psleuFull[(1+(startdate-1)*n.years):(startdate*n.years), lday-leadtime[1]+1,,] <- mean.lday + rm(mean.lday) + gc() + } + } +} + +if(psl=="g500") psleuFull <- psleuFull/9.81 # convert geopotential to geopotential height (in m) +if(psl=="psl") psleuFull <- psleuFull/100 # convert MSLP in Pascal to MSLP in hPa +gc() + +#save.image(file=paste0(workdir,"/weather_regimes_",fields.name,"_",year.start,"-",year.end,".RData")) +#load(file=paste0(workdir,"/weather_regimes_",fields.name,"_",year.start,"-",year.end,".RData")) + +#save.image("/scratch/Earth/ncortesi/RESILIENCE/Regimes/test.R") + +# compute the cluster analysis: +my.PCA <- list() +my.cluster <- list() +my.cluster.array <- list() +tot.variance <- list() +n.pcs <- my.cluster <- c() + +#WR.period=1 # for the debug +for(p in WR.period){ + # Select only the data of the month/season we want: + if(fields.name == rean.name){ + + #pslPeriod <- psleuFull[days.period[[p]],,] # select only days in the chosen period (i.e: winter) + if(running.cluster) { + if(subdaily){ + my.hours <- sort(c(pos.month.extended(1,p)*4-3, pos.month.extended(1,p)*4-2, pos.month.extended(1,p)*4-1, pos.month.extended(1,p)*4)) + if(p == 1) my.hours <- c(1337:1460,1:236) + if(p == 12) my.hours <- c(1217:1460,1:124) + + pslPeriod <- psleuFull[,my.hours,,] # select all days in the period of 3 months centered on the target month p + + } else { + pslPeriod <- psleuFull[1,1,,pos.month.extended(1,p),,] # select all days in the period of 3 months centered on the target month p + } + + } else { + + if(subdaily){ + my.hours <- sort(c(pos.month(1,p)*4-3, pos.month(1,p)*4-2, pos.month(1,p)*4-1, pos.month(1,p)*4)) + pslPeriod <- psleuFull[,my.hours,,] + } else { + pslPeriod <- psleuFull[1,1,,pos.period(1,p),,] # select only days in the chosen period (i.e: winter) + } + } + + # weight the pressure fields based on latitude (apply it to anomalies, not to absolute values!): + if(lat.weighting){ + lat.weighted <- cos(lat*pi/180)^0.5 + if(!running.cluster) lat.weighted.array <- InsertDim(InsertDim(InsertDim(lat.weighted,2,n.pos.lon), 1, length(pos.period(2001,p))), 1, n.years) + if(running.cluster) lat.weighted.array <- InsertDim(InsertDim(InsertDim(lat.weighted,2,n.pos.lon), 1, length(pos.month.extended(2001,p))), 1, n.years) + + pslPeriod <- pslPeriod * lat.weighted.array + + rm(lat.weighted.array) + gc() + } + + gc() + cat("Preformatting data for clustering. Please wait...\n") + psl.melted <- melt(pslPeriod[,,,, drop=FALSE], varnames=c("Year","Day","Lat","Lon")) + gc() + + cat("Preformatting data for clustering. Please wait......\n") + # this function eats much memory!!! + psl.kmeans <- unname(acast(psl.melted, Year + Day ~ Lat + Lon)) + #gc() + + + # select period data from psleuFull to use it as input of the cluster analysis: + #psl.kmeans <- pslPeriod + #dim(psl.kmeans) <- c(n.days.period[[p]], n.pos.lat*n.pos.lon) # convert array in a matrix! + #rm(pslPeriod, pslPeriod.weighted) + } + + # in case of S4 we don't need to select anything because we already loaded only 1 month of data! + if(fields.name == ECMWF_S4.name){ + if(running.cluster == TRUE && p < 13) { + # if you want to fully implement the running cluster of the monthly S4 data, you have to finish selecting automatically the 3-months running period + # generalizing the command below (which at present only work for the month of January an lead time 0): + + # Select DJF (lead time 0) for our case study, excluding 29 of february): + pslPeriod <- psleuFull[,,,1:90,,, drop=FALSE] + + } else { + + # select data only for the startmonth and leadmonth to study: + cat("Extracting data. Please wait...\n") + chosen.month <- start.month + lead.month # find which month we want to select data + if(chosen.month > 12) chosen.month <- chosen.month - 12 + + # remove the 29th of February to have the same n. of elements for all years + i=0 + for(y in years){ + i=i+1 + leadtime.min <- 1 + n.days.in.a.monthly.period(start.month, chosen.month, y) - n.days.in.a.month(chosen.month, y) + leadtime.max <- leadtime.min + n.days.in.a.month(chosen.month, y) - 1 + #leadtime.max <- 61 + + if (n.days.in.a.month(chosen.month, y) == 29) leadtime.max <- leadtime.max - 1 + int.leadtimes <- leadtime.min:leadtime.max + num.leadtimes <- leadtime.max - leadtime.min + 1 # number of days in the chosen month (different from 'n.leadtimes',which is the total number of days in the file!) + # by construction it is equal to: n.days.in.a.month(chosen.month, y) + + if(y == years[1]) { + pslPeriod <- psleuFull[,,1,int.leadtimes,,,drop=FALSE] + } else { + pslPeriod <- unname(abind(pslPeriod, psleuFull[,,i,int.leadtimes,,,drop=FALSE], along=3)) + } + } + + } + + # weight the pressure fields based on latitude (apply it to anomalies, not to absolute values!): + if(lat.weighting){ + lat.weighted <- cos(lat*pi/180)^0.5 + lat.weighted.array <- InsertDim(InsertDim(InsertDim(lat.weighted,2,n.pos.lon), 1, length(pos.month.extended(2001,p))), 1, n.years) + pslPeriod <- pslPeriod * lat.weighted.array ######### adapt!!!!!!!!!!! + rm(lat.weighted.array) + gc() + } + + # note that the data order in psl.kmeans[,X] is: year1day1, year1day2, ... , year1day31, year2day1, year2day2, ... , year2day28 + gc() + cat("Preformatting data. Please wait...\n") + psl.melted <- melt(pslPeriod[1,,,,,, drop=FALSE], varnames=c("Exp","Member","StartYear","LeadDay","Lat","Lon")) + gc() + + #save(psl.melted,file=paste0(workdir,"/psl_melted.RData")) + + cat("Preformatting data. Please wait......\n") + # This function eats much memory!!! + psl.kmeans <- unname(acast(psl.melted, Member + StartYear + LeadDay ~ Lat + Lon)) + #gc() + + #save(psl.kmeans,file=paste0(workdir,"/psl_kmeans.RData")) + #my.cluster <- kmeans(psl.kmeans, centers=4, iter.max=100, nstart=30, trace=FALSE) + #save(my.cluster,file=paste0(workdir,"/my_cluster.RData")) + + + #psl.kmeans <- array(NA,c(dim(pslPeriod))) + #n.rows <- nrow(psl.melted) + #a <- psl.melted$Member + #b <- psl.melted$StartYear + #c <- psl.melted$LeadDay + #d <- psl.melted$Lat + #e <- psl.melted$Lon + #f <- psl.melted$value + + # this function to convert a data.frame in an array is much less memory-eater than acast but a bit slower: + #for(i in 1:n.rows) psl.kmeans[1,a[i],b[i],c[i],d[i],e[i]] <- f[i] + gc() + #rm(pslPeriod); gc() + } + + if(fields.name == ECMWF_monthly.name){ + my.startdates.period <- months.period(forecast.year,mes,day,p) # get which startdates belong to the chosen period (remember that there are no bisestile day left!) + + days.period <- list() # get which days inside psleuFull belong to the chosen period + for(startdate in my.startdates.period){ + days.period[[p]] <- c(days.period[[p]], (1+(startdate-1)*n.years):(startdate*n.years)) + } + + # in winter you must remove the 2nd startdate (20140109) because its data is bad, as you can see with: plot(psl.kmeans[,1:2]) + # if(fields.name == forecast.name && period == 13) psl.kmeans[21:40,] <- NA + } + + + # compute the clustering: + cat("Clustering data. Please wait...\n") + if(PCA){ + my.seq <- seq(1, n.pos.lat*n.pos.lon, 16) # select only 1 point of 9 + pslcut <- psl.kmeans[,my.seq] + + my.PCA <- princomp(pslcut,cor=FALSE) + tot.variance <- head(cumsum(my.PCA$sdev^2/sum(my.PCA$sdev^2)),50) # check how many PCAs to keep basing on the sum of their expl. variance + n.pcs <- head(as.numeric(which(tot.variance > variance.explained)),1) # select only the pcs that explains at least 80% of variance + my.cluster <- kmeans(my.PCA$scores[,1:n.pcs], centers=4, iter.max=100, nstart=30) # 4 is the number of clusters + rm(pslcut) + } else { # 4 is the number of clusters: + + my.cluster <- kmeans(psl.kmeans, centers=4, iter.max=100, nstart=30, trace=FALSE) + + gc() + + #save(my.cluster, file=paste0(workdir,"/",fields.name,"_",my.period[p],"_leadmonth_",lead.month,"_series.RData")) + + #if(fields.name == rean.name){ + #my.cluster[[p]] <- kmeans(psl.kmeans[which(!is.na(psl.kmeans[,1])),], centers=4, iter.max=100, nstart=30, trace=FALSE) + # save the time series output of the cluster analysis: + #save(my.cluster, my.cluster.array, my.PCA, tot.variance, n.pcs, file=paste0(workdir,"/",fields.name,"_",my.period[p],"_series.RData")) + #} + + if(fields.name == ECMWF_S4.name) { + my.cluster.array <- array(my.cluster$cluster, c(num.leadtimes, n.years.full, n.members)) # in reverse order compared to the definition of psl.kmeans + + # save the time series output of the cluster analysis: + #save(my.cluster, my.cluster.array, my.PCA, tot.variance, n.pcs, file=paste0(workdir,"/",fields.name,"_",my.period[p],"_leadmonth_",lead.month,"_series.RData")) + + #plot(SMA(my.cluster$centers[1,],201),type="l",col="gold",ylim=c(-20,20));lines(SMA(my.cluster$centers[2,],201),type="l",col="red"); lines(SMA(my.cluster$centers[3,],201),type="l",col="green");lines(SMA(my.cluster$centers[4,],201),type="l",col="blue");lines(SMA(psl.kmeans[29,],201),type="l",col="gray");lines(rep(0,14410),type="l",col="black",lty=2) + } + } + + rm(psl.kmeans) + gc() + +#} # close for on 'p' + +#save.image(file=paste0(workdir,"/weather_regimes_",fields.name,"_",year.start,"-",year.end,".RData")) +#load(file=paste0(workdir,"/weather_regimes_",fields.name,"_",year.start,"-",year.end,".RData")) + +#my.grid<-paste0('r',n.lon,'x',n.lat) +#coord <- Load(var = 'sfcWind', exp = list(ECMWF_monthly), obs = NULL, sdates=paste0(2013,'0102'), leadtimemin = 1, leadtimemax=1, output = 'lonlat', nprocs=1, grid=my.grid, method='bilinear') + +# measure regime anomalies: + +#for(p in WR.period){ # 1-12: Jan-Dec; 13-16: Winter-Spring-Summer-Autumn; 17: Year + + if(fields.name == rean.name){ + + if(running.cluster == TRUE && p < 13){ # select only the days of the 3-months cluster series inside the target month p + n.days.period <- length(pos.month.extended(2001,p)) # total number of days in the three months + + days.month <- days.month.new <- days.month.full <- c() + days.month <- which(pos.month.extended(2001,p) == pos.month(2001,p)[1]) -1 + 1:length(pos.month(2001,p)) + + for(y in 1:n.years){ + days.month.new <- days.month + n.days.period*(y-1) + days.month.full <- c(days.month.full, days.month.new) + #print(days.month.new) + #print(days.month) + } + + # in this cases, we are not selecting days but 6-hourly intervals: + if(subdaily == TRUE) days.month.full <- sort(c(days.month.full*4, days.month.full*4+1, days.month.full*4+2, days.month.full*4+3)) + + # select only the days of the cluster series inside the target month p: + cluster.sequence <- my.cluster$cluster[days.month.full] + + } else { + + cluster.sequence <- my.cluster$cluster + } + + # convert cluster sequence from 6-hourly to daily: + if(subdaily){ + type <- c() + + for(day in 1:(length(cluster.sequence)/4)){ + #day <- ceiling(hour/4) + hourly <- cluster.sequence[(1+(day-1)*4):(4+(day-1)*4)] + + t1 <- length(which(hourly == 1)) + t2 <- length(which(hourly == 2)) + t3 <- length(which(hourly == 3)) + t4 <- length(which(hourly == 4)) + tt <- c(t1,t2,t3,t4) + + # if all the 4 time steps belong to the same regime, assign it to this day: + if(length(unique(hourly)) == 1) type[day] <- hourly[1] + + # if there are two different regimes, check if one has a higher frequency: + if(length(unique(hourly)) == 2){ + if(any(tt == 3)){ # if 3 of the 4 time intervals belong to the same weather regime, assign this day to it + type[day] <- which(tt == 3) + } else { # in this case both regimes occur in 2 of the 4 time steps; arbitrary assign the regime occurring at 12.00 of that day + type[day] <- hourly[3] + } + } + + # if there are three different regimes, assign it to the only possible regime with 2 time steps in that day: + if(length(unique(hourly)) == 3) type[day] <- which(tt == 2) + + # if there are four different regimes (a very rare event!), assign it to the regime occurring at 12.00 of that day: + if(length(unique(hourly)) == 3) type[day] <- hourly[3] + + } # close for on day + + } # close for on subdaily + + + if(sequences){ # it works only for rean data!!! + # for each of the 4 clusters, remove the days not belonging to sequences of at least 5 days: + # warning: first 4 days and last 4 days are not take into account y the algorithm and are treated as not belonging to any sequence (sequ=FALSE) + wrdiff <- diff(my.cluster$cluster) + + sequ <- rep(FALSE, n.days.period[[p]]) + for(i in 5:(n.days.period[[p]]-5)){ + sequ[i] <- TRUE + if(wrdiff[i] != 0 & wrdiff[i+1] != 0) sequ[i] = FALSE + if(wrdiff[i] != 0 & wrdiff[i+2] != 0) sequ[i] = FALSE + if(wrdiff[i] != 0 & wrdiff[i+3] != 0) sequ[i] = FALSE + if(wrdiff[i] != 0 & wrdiff[i+4] != 0) sequ[i] = FALSE + if(wrdiff[i-1] != 0 & wrdiff[i] != 0) sequ[i] = FALSE + if(wrdiff[i-1] != 0 & wrdiff[i+1] != 0) sequ[i] = FALSE + if(wrdiff[i-1] != 0 & wrdiff[i+2] != 0) sequ[i] = FALSE + if(wrdiff[i-1] != 0 & wrdiff[i+3] != 0) sequ[i] = FALSE + if(wrdiff[i-2] != 0 & wrdiff[i] != 0) sequ[i] = FALSE + if(wrdiff[i-2] != 0 & wrdiff[i+1] != 0) sequ[i] = FALSE + if(wrdiff[i-2] != 0 & wrdiff[i+2] != 0) sequ[i] = FALSE + if(wrdiff[i-3] != 0 & wrdiff[i] != 0) sequ[i] = FALSE + if(wrdiff[i-3] != 0 & wrdiff[i+1] != 0) sequ[i] = FALSE + if(wrdiff[i-4] != 0 & wrdiff[i] != 0) sequ[i] = FALSE + } # close for loop on i + + # sequence of daily cluster numbers without the days that doesn't belong to sequences of 5 or more days: + cluster.sequence <- my.cluster$cluster * sequ + rm(wrdiff, sequ) + } + + + # Replace the days belonging to a regime with the days belonging to a sequence of at least 5 days of that regime: + wr1 <- which(cluster.sequence == 1) + wr2 <- which(cluster.sequence == 2) + wr3 <- which(cluster.sequence == 3) + wr4 <- which(cluster.sequence == 4) + + # yearly frequency of each regime: + wr1y <- wr2y <- wr3y <-wr4y <- c() + + mod.subdaily <- ifelse(subdaily,4,1) + np <- n.days.in.a.period(p,2001)*mod.subdaily + + for(y in year.start:year.end){ + # for both reanalysis and S4, the time order is always: year1day1, year1day2, ..., year1day31, year2day1, year2day2, ..., year2day28, etc, + wr1y[y-year.start+1] <- length(which(cluster.sequence[(y-year.start)*np + (1:np)] == 1)) + wr2y[y-year.start+1] <- length(which(cluster.sequence[(y-year.start)*np + (1:np)] == 2)) + wr3y[y-year.start+1] <- length(which(cluster.sequence[(y-year.start)*np + (1:np)] == 3)) + wr4y[y-year.start+1] <- length(which(cluster.sequence[(y-year.start)*np + (1:np)] == 4)) + } + + # convert to frequencies in %: + wr1y <- wr1y/np + wr2y <- wr2y/np + wr3y <- wr3y/np + wr4y <- wr4y/np + + gc() + + + # measure regime anomalies: + pslmat <- unname(acast(psl.melted, Year + Day ~ Lat ~ Lon)) + + if(running.cluster == TRUE){ + pslmat.new <- pslmat + pslmat <- pslmat.new[days.month.full,,] + gc() + } + + pslwr1 <- pslmat[wr1,,, drop=F] + pslwr2 <- pslmat[wr2,,, drop=F] + pslwr3 <- pslmat[wr3,,, drop=F] + pslwr4 <- pslmat[wr4,,, drop=F] + + pslwr1mean <- apply(pslwr1, c(2,3), mean, na.rm=T) + pslwr2mean <- apply(pslwr2, c(2,3), mean, na.rm=T) + pslwr3mean <- apply(pslwr3, c(2,3), mean, na.rm=T) + pslwr4mean <- apply(pslwr4, c(2,3), mean, na.rm=T) + + # spatial mean for each day to save for the meaure of the FairRPSS: + pslwr1spmean <- apply(pslwr1, 1, mean, na.rm=T) + pslwr2spmean <- apply(pslwr2, 1, mean, na.rm=T) + pslwr3spmean <- apply(pslwr3, 1, mean, na.rm=T) + pslwr4spmean <- apply(pslwr4, 1, mean, na.rm=T) + + + # save all the data necessary to redraw the graphs when we know the right regime: + save(my.cluster, cluster.sequence, lon, lon.max, lon.min, lat, lat.max, lat.min, pos.lat, pos.lon, n.pos.lat, n.pos.lon, psl, psl.name, my.period, p, workdir, rean.name, fields.name, year.start, year.end, n.years, WR.period, pslwr1mean, pslwr2mean, pslwr3mean, pslwr4mean, pslwr1spmean, pslwr2spmean, pslwr3spmean, pslwr4spmean, wr1y,wr2y,wr3y,wr4y,wr1,wr2,wr3,wr4,file=paste0(workdir,"/",fields.name,"_",my.period[p],"_psl.RData")) + + # immediatly save the plots of the ERA-Interim monthly regime anomalies with the running cluster instead than loading them in the next script: + EU <- c(1:which(lon >= lon.max)[1], (which(lon >= 337)[1]:length(lon))) # restrict area to continental europe only + + if(psl == "g500"){ + my.brks <- c(-300,seq(-200,200,10),300) # % Mean anomaly of a WR + my.brks2 <- c(-300,seq(-200,200,10),300) # % Mean anomaly of a WR for the contour lines + values.to.plot <- c(-200, -100, 0, 100, 200) # values we want to show in the labels + } + + if(psl == "psl"){ + my.brks <- c(seq(-19,-1,2),0,seq(1,19,2)) # % Mean anomaly of a WR + my.brks2 <- my.brks #c(seq(-20,20,2)) # % Mean anomaly of a WR for the contour lines + values.to.plot <- my.brks #c(-17,-15,-13,-11,-9,-7,-5,-3,-1,0,1,3,5,7,9,11,13,15,17) + } + my.cols <- colorRampPalette(rev(brewer.pal(11,"RdBu")))(length(my.brks)-1) # blue--white--red colors + my.cols[floor(length(my.cols)/2)] <- my.cols[floor(length(my.cols)/2)+1] <- "white" + + map1 <- pslwr1mean + map2 <- pslwr2mean + map3 <- pslwr3mean + map4 <- pslwr4mean + png(filename=paste0(workdir,"/",fields.name,"_",my.period[p],"_cluster1.png"),width=1000,height=1200) + PlotEquiMap2(rescale(map1,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map1), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, xlabel.dist=1.5, contours.lty="F1FF1F") + dev.off() + png(filename=paste0(workdir,"/",fields.name,"_",my.period[p],"_cluster2.png"),width=1000,height=1200) + PlotEquiMap2(rescale(map2,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map2), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F") + dev.off() + png(filename=paste0(workdir,"/",fields.name,"_",my.period[p],"_cluster3.png"),width=1000,height=1200) + PlotEquiMap2(rescale(map3,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map3), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F") + dev.off() + png(filename=paste0(workdir,"/",fields.name,"_",my.period[p],"_cluster4.png"),width=1000,height=1200) + PlotEquiMap2(rescale(map4,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map4), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F") + dev.off() + + rm(pslwr1mean,pslwr2mean,pslwr3mean,pslwr4mean) + rm(pslwr1,pslwr2,pslwr3,pslwr4) + gc() + + } + + if(fields.name == ECMWF_S4.name){ + cat("Computing regime anomalies and frequencies. Please wait...\n") + + #load(file=paste0(workdir,"/",fields.name,"_",my.period[p],"_leadmonth_",lead.month,"_ClusterData.RData")) + + cluster.sequence <- my.cluster$cluster #as.vector(my.cluster.array) + + wr1 <- which(cluster.sequence == 1) + wr2 <- which(cluster.sequence == 2) + wr3 <- which(cluster.sequence == 3) + wr4 <- which(cluster.sequence == 4) + + wr1y <- apply(my.cluster.array == 1, 2, sum) + wr2y <- apply(my.cluster.array == 2, 2, sum) + wr3y <- apply(my.cluster.array == 3, 2, sum) + wr4y <- apply(my.cluster.array == 4, 2, sum) + + # convert to frequencies in %: + wr1y <- wr1y/(num.leadtimes*n.members) # in this case, n.leadtimes is the number of days of one month of the cluser time series + wr2y <- wr2y/(num.leadtimes*n.members) + wr3y <- wr3y/(num.leadtimes*n.members) + wr4y <- wr4y/(num.leadtimes*n.members) + + # same as above, but to measure the maximum frequence between all the members: + wr1yMax <- apply(apply(my.cluster.array == 1, c(2,3), sum),1,max) + wr2yMax <- apply(apply(my.cluster.array == 2, c(2,3), sum),1,max) + wr3yMax <- apply(apply(my.cluster.array == 3, c(2,3), sum),1,max) + wr4yMax <- apply(apply(my.cluster.array == 4, c(2,3), sum),1,max) + + wr1yMax <- wr1yMax/num.leadtimes + wr2yMax <- wr2yMax/num.leadtimes + wr3yMax <- wr3yMax/num.leadtimes + wr4yMax <- wr4yMax/num.leadtimes + + wr1yMin <- apply(apply(my.cluster.array == 1, c(2,3), sum),1,min) + wr2yMin <- apply(apply(my.cluster.array == 2, c(2,3), sum),1,min) + wr3yMin <- apply(apply(my.cluster.array == 3, c(2,3), sum),1,min) + wr4yMin <- apply(apply(my.cluster.array == 4, c(2,3), sum),1,min) + + wr1yMin <- wr1yMin/num.leadtimes + wr2yMin <- wr2yMin/num.leadtimes + wr3yMin <- wr3yMin/num.leadtimes + wr4yMin <- wr4yMin/num.leadtimes + + cat("Computing regime anomalies and frequencies. Please wait......\n") + + # in this case, must use psl.melted instead of psleuFull. Both are already in anomalies: + # (psl.melted depends on the startdate and leadtime, psleuFull no) + gc() + pslmat <- unname(acast(psl.melted, Member + StartYear + LeadDay ~ Lat ~ Lon)) + #pslmat <- unname(acast(melt(pslPeriod[1,1:2,,,,, drop=FALSE],varnames=c("Exp","Member","StartYear","LeadDay","Lat","Lon")), Member ~ StartYear + LeadDay ~ Lat ~ Lon)) + gc() + + #for(a in 1:2){ + # for(b in 1:3){ + # for(c in 1:4){ + # pslmat[1, a + (b-1)*a + (c-1)*a*b,,] <- pslPeriod[1,a,b,c,,] + # } + # } + #} + + pslwr1 <- pslmat[wr1,,, drop=F] + pslwr2 <- pslmat[wr2,,, drop=F] + pslwr3 <- pslmat[wr3,,, drop=F] + pslwr4 <- pslmat[wr4,,, drop=F] + + pslwr1mean <- apply(pslwr1, c(2,3), mean, na.rm=T) + pslwr2mean <- apply(pslwr2, c(2,3), mean, na.rm=T) + pslwr3mean <- apply(pslwr3, c(2,3), mean, na.rm=T) + pslwr4mean <- apply(pslwr4, c(2,3), mean, na.rm=T) + + # spatial mean for each day to save for the meaure of the FairRPSS: + pslwr1spmean <- apply(pslwr1, 1, mean, na.rm=T) + pslwr2spmean <- apply(pslwr2, 1, mean, na.rm=T) + pslwr3spmean <- apply(pslwr3, 1, mean, na.rm=T) + pslwr4spmean <- apply(pslwr4, 1, mean, na.rm=T) + + rm(pslmat) + gc() + + # save all the data necessary to draw the graphs (but not the impact maps) + save(my.cluster, cluster.sequence, my.cluster.array, lon, lon.max, lat, pos.lat, pos.lon, n.pos.lat, n.pos.lon, psl, psl.name, my.period, p, workdir,rean.name,fields.name, year.start,year.end, years, n.years, n.years.full, WR.period, pslwr1mean, pslwr2mean, pslwr3mean, pslwr4mean, pslwr1spmean, pslwr2spmean, pslwr3spmean, pslwr4spmean, wr1y, wr2y, wr3y, wr4y, wr1yMax, wr2yMax, wr3yMax, wr4yMax, wr1yMin, wr2yMin, wr3yMin, wr4yMin, wr1, wr2, wr3, wr4, n.leadtimes, num.leadtimes, file=paste0(workdir,"/",fields.name,"_",my.period[p],"_leadmonth_",lead.month,"_psl.RData")) + + rm(pslwr1mean,pslwr2mean,pslwr3mean,pslwr4mean) + rm(pslwr1,pslwr2,pslwr3,pslwr4) + gc() + + } + + # for the monthly system, the time order is always: year1day1, year2day1, ... , yearNday1, year1day2, year2day2, ..., yearNday2, etc.: + if(fields.name == ECMWF_monthly.name) { + if(fields.name == ECMWF_monthly.name){ + my.startdates.period <- months.period(forecast.year,mes,day,p) + + #if(p == 13) my.startdates.period <- my.startdates.period[-2] # remove the second startdate because it is broken + + days.period <- period.length <- list() + for(startdate in my.startdates.period){ + days.period[[p]] <- c(days.period[[p]], (1+(startdate-1)*(year.end-year.start+1)):(startdate*(year.end-year.start+1))) + } + period.length[[p]] <- length(my.startdates.period) # number of Thusdays in the period + } + + wr1y <- wr2y <- wr3y <-wr4y <- c() + wr1y[y-year.start+1] <- length(which(cluster.sequence[seq((y-year.start+1),length(cluster.sequence), n.years)] == 1)) + wr2y[y-year.start+1] <- length(which(cluster.sequence[seq((y-year.start+1),length(cluster.sequence), n.years)] == 2)) + wr3y[y-year.start+1] <- length(which(cluster.sequence[seq((y-year.start+1),length(cluster.sequence), n.years)] == 3)) + wr4y[y-year.start+1] <- length(which(cluster.sequence[seq((y-year.start+1),length(cluster.sequence), n.years)] == 4)) + } + + cat("Finished!\n") +} # close the for loop on 'p' + + + + + + + diff --git a/old/weather_regimes_v5.R b/old/weather_regimes_v5.R new file mode 100644 index 0000000000000000000000000000000000000000..e3cfe185e4133aa83c211a641a42ffd90bd7679e --- /dev/null +++ b/old/weather_regimes_v5.R @@ -0,0 +1,288 @@ + +library(s2dverification) # for the function Load() +library(abind) +source('/home/Earth/ncortesi/Downloads/scripts/Rfunctions.R') # for the calendar functions + +# available reanalysis for the geopotential (g500) or the geopotential height (z500 = g500/9.81): +ERAint <- list(path = '/esnas/reconstructions/ecmwf/eraint/$STORE_FREQ$_mean/$VAR_NAME$_f6h/$VAR_NAME$_$YEAR$$MONTH$.nc') +ERAint.name <- "ERA-Interim" +JRA55 <- list(path = '/esnas/reconstructions/jma/jra-55/$STORE_FREQ$_mean/$VAR_NAME$_f6h/$VAR_NAME$_$YEAR$$MONTH$.nc') +JRA55.name <- "JRA-55" + +workdir <- "/scratch/Earth/ncortesi/RESILIENCE/Regimes" # working dir with the input files with the WT classifications for each MSLP grid point +mapdir <- "/scratch/Earth/ncortesi/RESILIENCE/Regimes_maps" # output dir with seasonal and yearly maps + +rean <- ERAint # choose one of the two above reanalysis +rean.name <- ERAint.name + +psl <- "g500" # pressure variable to use from the chosen reanalysis +var.name <- "tas" #"sfcWind" # name of the 'predictand' variable of the chosen reanalysis + +year.start <- 1979 +year.end <- 2013 + +variance.explained <- 0.8 # the user can set here the minimum % of variance explained by the PCs (typically 0.8 which is equal to 80%) +period = 13 # (winter) # 1-12: Jan-Dec; 13-16: Winter-Spring-Summer-Autumn; 17: Year + +# Coordinates of the box enclosing the study region (the Northern Atlantic in this case): +lat.max <- 70 +lat.min <- 30 +lon.max <- 40 +lon.min <- 280 # put a positive number here because the geopotential has only positive values of longitud! + +############################################################################################################################# + +# load only 1 day of geopot to detect the minimum and maximum lat and lon values to identify only the North Atlantic +domain <- Load(var = psl, exp = NULL, obs = list(rean), paste0(year.start,'0101'), storefreq = 'daily', leadtimemax = 1, output = 'lonlat', nprocs=1) +pos.lat.max <- which(abs(domain$lat-lat.max)==min(abs(domain$lat-lat.max))) +pos.lat.min <- which(abs(domain$lat-lat.min)==min(abs(domain$lat-lat.min))) +pos.lon.max <- which(abs(domain$lon-lon.max)==min(abs(domain$lon-lon.max))) +pos.lon.min <- which(abs(domain$lon-lon.min)==min(abs(domain$lon-lon.min))) +pos.lat <- if(pos.lat.min < pos.lat.max) {pos.lat.min:pos.lat.max} else { pos.lat.max:pos.lat.min} +pos.lon <- c(1:pos.lon.max,pos.lon.min:length(domain$lon)) # beware that it only consider the case where the study region cross the meridian of Greenwhich! +n.pos.lat <- length(pos.lat) +n.pos.lon <- length(pos.lon) +lat <- domain$lat[pos.lat] # lat of chosen area only (it is smaller than the whole spatial domain) +lon <- domain$lon[pos.lon] # lon of chosen area only +if(domain$lat[pos.lat.min] >= domain$lat[pos.lat.max]) stop("lat.min cannot be higher than lat.max") + +# Load psl data: +psleuFull <-array(NA,c(n.days.in.a.yearly.period(year.start,year.end), n.pos.lat, n.pos.lon)) + +for (y in year.start:year.end){ + var <- Load(var = psl, exp = NULL, obs = list(rean), paste0(y,'0101'), storefreq = 'daily', leadtimemax = n.days.in.a.year(y), output = 'lonlat', + latmin = domain$lat[pos.lat.min], latmax = domain$lat[pos.lat.max], lonmin = domain$lon[pos.lon.min], lonmax = domain$lon[pos.lon.max]) + psleuFull[seq.days.in.a.future.year(year.start, y),,] <- var$obs + rm(var) + gc() +} + +# Load var data: +vareuFull <-array(NA,c(n.days.in.a.yearly.period(year.start,year.end),n.pos.lat,n.pos.lon)) + +for (y in year.start:year.end){ + var <- Load(var = var.name, exp = NULL, obs = list(rean), paste0(y,'0101'), storefreq = 'daily', leadtimemax = n.days.in.a.year(y), output = 'lonlat', + latmin = domain$lat[pos.lat.min], latmax = domain$lat[pos.lat.max], lonmin = domain$lon[pos.lon.min], lonmax = domain$lon[pos.lon.max]) + vareuFull[seq.days.in.a.future.year(year.start, y),,] <- var$obs + rm(var) + gc() +} + +save.image(file=paste0(workdir,"/weather_regimes_",rean.name,"_",var.name,"_",year.start,"-",year.end,"_", my.period[period],".RData")) +load(file=paste0(workdir,"/weather_regimes_",rean.name,"_",var.name,"_",year.start,"-",year.end,"_", my.period[period],".RData")) + +# each time you want to change variable 'period' and/or 'var', start from here: + +period=16 + +days.period <- NA +for(y in year.start:year.end) days.period <- c(days.period, n.days.in.a.future.year(year.start, y) + pos.period(y,period)) +days.period <- days.period[-1] # remove the NA at the begin that was introduced only to be able to execture the above command +n.days.period <- length(days.period) + +pslPeriod <- psleuFull[days.period,,] # select only days in the chosen period (i.e: winter) + +pslmat <- pslPeriod + +dim(pslmat) <- c(n.days.period, n.pos.lat*n.pos.lon) # convert array in a matrix! + +my.seq <- seq(1, n.pos.lat*n.pos.lon, 9) # select only 1 point of 9 + +pslcut <- pslmat[,my.seq] + +my.PCA <- princomp(pslcut,cor=FALSE) + +tot.variance <- head(cumsum(my.PCA$sdev^2/sum(my.PCA$sdev^2)),50) # check how many PCAs to keep basing on the sum of their explained variance + +n.pcs <- head(as.numeric(which(tot.variance > variance.explained)),1) # select only the pcs that explains at least 80% of variance + +my.cluster <- kmeans(my.PCA$scores[,1:n.pcs], 4) # 4 is the number of clusters, 7 the number of EOFs which explains ~80% of variance + +rm(pslmat) +gc() + + + + +varPeriod <- vareuFull[days.period,,] # select only var data during the chosen period + +varPeriodClim <- apply(varPeriod, c(2,3), mean, na.rm=T) +varPeriodClim2 <- InsertDim(varPeriodClim, 1, n.days.period) + +varPeriodAnom <- varPeriod - varPeriodClim2 +rm(varPeriod, varPeriodClim2) +gc() + +wr1 <- which(my.cluster$cluster==1) +wr2 <- which(my.cluster$cluster==2) +wr3 <- which(my.cluster$cluster==3) +wr4 <- which(my.cluster$cluster==4) + +period.length <- n.days.in.a.period(period,1999) # using year 1999 introduce a small error in winter season because of bisestile years + +wr1y <- wr2y <- wr3y <-wr4y <- c() +for(y in year.start:year.end){ + wr1y[y-year.start+1] <- length(which(my.cluster$cluster[(y-year.start)*period.length+(1:period.length)] == 1)) + wr2y[y-year.start+1] <- length(which(my.cluster$cluster[(y-year.start)*period.length+(1:period.length)] == 2)) + wr3y[y-year.start+1] <- length(which(my.cluster$cluster[(y-year.start)*period.length+(1:period.length)] == 3)) + wr4y[y-year.start+1] <- length(which(my.cluster$cluster[(y-year.start)*period.length+(1:period.length)] == 4)) +} + +# convert to frequencies in %: +wr1y <- wr1y/period.length +wr2y <- wr2y/period.length +wr3y <- wr3y/period.length +wr4y <- wr4y/period.length + +varPeriodAnom1 <- varPeriodAnom[wr1,,] +varPeriodAnom2 <- varPeriodAnom[wr2,,] +varPeriodAnom3 <- varPeriodAnom[wr3,,] +varPeriodAnom4 <- varPeriodAnom[wr4,,] + +varPeriodAnom1mean <- apply(varPeriodAnom1,c(2,3),mean,na.rm=T) +varPeriodAnom2mean <- apply(varPeriodAnom2,c(2,3),mean,na.rm=T) +varPeriodAnom3mean <- apply(varPeriodAnom3,c(2,3),mean,na.rm=T) +varPeriodAnom4mean <- apply(varPeriodAnom4,c(2,3),mean,na.rm=T) + +varPeriodAnomBoth1 <- abind(varPeriodAnom, varPeriodAnom1, along = 1) +varPeriodAnomBoth2 <- abind(varPeriodAnom, varPeriodAnom2, along = 1) +varPeriodAnomBoth3 <- abind(varPeriodAnom, varPeriodAnom3, along = 1) +varPeriodAnomBoth4 <- abind(varPeriodAnom, varPeriodAnom4, along = 1) + +pvalue1 <- apply(varPeriodAnomBoth1, c(2,3), function(x) t.test(x[1:n.days.period],x[(n.days.period+1):length(x)])$p.value) +pvalue2 <- apply(varPeriodAnomBoth2, c(2,3), function(x) t.test(x[1:n.days.period],x[(n.days.period+1):length(x)])$p.value) +pvalue3 <- apply(varPeriodAnomBoth3, c(2,3), function(x) t.test(x[1:n.days.period],x[(n.days.period+1):length(x)])$p.value) +pvalue4 <- apply(varPeriodAnomBoth4, c(2,3), function(x) t.test(x[1:n.days.period],x[(n.days.period+1):length(x)])$p.value) +rm(varPeriodAnom, varPeriodAnomBoth1, varPeriodAnomBoth2, varPeriodAnomBoth3, varPeriodAnomBoth4) +gc() + +pslPeriodClim <- apply(pslPeriod, c(2,3), mean, na.rm=T) +pslPeriodClim2 <- InsertDim(pslPeriodClim, 1, n.days.period) + +pslPeriodAnom <- pslPeriod - pslPeriodClim2 +rm(pslPeriod, pslPeriodClim, pslPeriodClim2) +gc() + +pslwr1 <- pslPeriodAnom[wr1,,] +pslwr2 <- pslPeriodAnom[wr2,,] +pslwr3 <- pslPeriodAnom[wr3,,] +pslwr4 <- pslPeriodAnom[wr4,,] + +pslwr1mean <- apply(pslwr1,c(2,3),mean,na.rm=T) +pslwr2mean <- apply(pslwr2,c(2,3),mean,na.rm=T) +pslwr3mean <- apply(pslwr3,c(2,3),mean,na.rm=T) +pslwr4mean <- apply(pslwr4,c(2,3),mean,na.rm=T) + +# Mean psl maps: + +#my.brks <- c(48000, seq(48501,57100,1), 60000) # % Mean anomaly of a WR +#my.brks2 <- c(48000, seq(48500,58000,500)) # % Mean anomaly of a WR +my.brks <- c(-3000,-2000:2000,3000) # % Mean anomaly of a WR +my.brks2 <- c(-3000,seq(-2000,2000,100),3000) # % Mean anomaly of a WR +#my.cols <- colorRampPalette((brewer.pal(9,"PuBu")))(length(my.brks)-1) +my.cols <- colorRampPalette(rev(brewer.pal(11,"RdBu")))(length(my.brks)-1) # blue--white--red colors +# First check each cluster to find to which regime it corrsponds: +#PlotEquiMap(pslwr1mean,lon,lat,filled.continents = FALSE, brks=my.brks, cols=my.cols, contours=t(pslwr1mean), brks2=my.brks2) +#PlotEquiMap(pslwr2mean,lon,lat,filled.continents = FALSE, brks=my.brks, cols=my.cols, contours=t(pslwr2mean), brks2=my.brks2) +#PlotEquiMap(pslwr3mean,lon,lat,filled.continents = FALSE, brks=my.brks, cols=my.cols, contours=t(pslwr3mean), brks2=my.brks2) +#PlotEquiMap(pslwr4mean,lon,lat,filled.continents = FALSE, brks=my.brks, cols=my.cols, contours=t(pslwr4mean), brks2=my.brks2) + +# Assign to each cluster the name of its regime: +regime1.name <- "Cluster #1" +regime2.name <- "Cluster #2" +regime3.name <- "Cluster #3" +regime4.name <- "Cluster #4" + +# Visualize and save average geopotential associated to each regime: +png(filename=paste0(mapdir,"/",rean.name,"_",var.name,"_",my.period[period],"_",regime1.name,"_geop_field.png"),width=1000,height=700) +layout(matrix(c(rep(1,9),2), 10, 1, byrow = TRUE)) +PlotEquiMap(pslwr1mean, lon, lat, filled.continents = FALSE, brks=my.brks, cols=my.cols, toptitle=regime1.name, contours=t(pslwr1mean), brks2=my.brks2, drawleg=F) +ColorBar(my.brks, cols=my.cols, vert=FALSE, subsampleg=100, cex=1.2) +dev.off() + +png(filename=paste0(mapdir,"/",rean.name,"_",var.name,"_",my.period[period],"_",regime2.name,"_geop_field.png"),width=1000,height=700) +layout(matrix(c(rep(1,9),2), 10, 1, byrow = TRUE)) +PlotEquiMap(pslwr2mean, lon, lat, filled.continents = FALSE, brks=my.brks, cols=my.cols, toptitle=regime2.name, contours=t(pslwr2mean), brks2=my.brks2, drawleg=F) +ColorBar(my.brks, cols=my.cols, vert=FALSE, subsampleg=100, cex=1.2) +dev.off() + +png(filename=paste0(mapdir,"/",rean.name,"_",var.name,"_",my.period[period],"_",regime3.name,"_geop_field.png"),width=1000,height=700) +layout(matrix(c(rep(1,9),2), 10, 1, byrow = TRUE)) +PlotEquiMap(pslwr3mean, lon, lat, filled.continents = FALSE, brks=my.brks, cols=my.cols, toptitle=regime3.name, contours=t(pslwr3mean), brks2=my.brks2, drawleg=F) +ColorBar(my.brks, cols=my.cols, vert=FALSE, subsampleg=100, cex=1.2);dev.off() + +png(filename=paste0(mapdir,"/",rean.name,"_",var.name,"_",my.period[period],"_",regime4.name,"_geop_field.png"),width=1000,height=700) +layout(matrix(c(rep(1,9),2), 10, 1, byrow = TRUE)) +PlotEquiMap(pslwr4mean, lon, lat, filled.continents = FALSE, brks=my.brks, cols=my.cols, toptitle=regime4.name, contours=t(pslwr4mean), brks2=my.brks2, drawleg=F) +ColorBar(my.brks, cols=my.cols, vert=FALSE, subsampleg=100, cex=1.2);dev.off() + +# Visualize and save average wind anomalies associated to each regime: +my.brks.var <- c(-20,seq(-10,-3,1),seq(-2.9,2.9,0.1),seq(3,10,1),20) # % Mean anomaly of a WR +#my.cols <- colorRampPalette(as.character(read.csv("/home/Earth/vtorralb/rgbhex.csv",header=F)[,1]))(length(my.brks)-1) # blue--yellow-red colors +my.cols.var <- colorRampPalette(rev(brewer.pal(11,"RdBu")))(length(my.brks.var)-1) # blue--white--red colors + +png(filename=paste0(mapdir,"/",rean.name,"_",var.name,"_",my.period[period],"_",regime1.name,"_anomalies.png"),width=1000,height=700) +layout(matrix(c(rep(1,9),2), 10, 1, byrow = TRUE)) +title1 <- paste(regime1.name, "impact on", var.name, "anomalies") +PlotEquiMap(varPeriodAnom1mean, lon, lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, toptitle=title1, drawleg=F, dots=t(pvalue1 < 0.05)) +ColorBar(my.brks.var, cols=my.cols.var, vert=FALSE, subsampleg=1, cex=1.2);dev.off() + +png(filename=paste0(mapdir,"/",rean.name,"_",var.name,"_", my.period[period],"_",regime2.name,"_anomalies.png"), width=1000, height=700) +layout(matrix(c(rep(1,9),2), 10, 1, byrow = TRUE)) +title2 <- paste(regime2.name, "impact on", var.name, "anomalies") +PlotEquiMap(varPeriodAnom2mean,lon,lat,filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, toptitle=title2, drawleg=F, dots=t(pvalue2 < 0.05)) +ColorBar(my.brks.var, cols=my.cols.var, vert=FALSE, subsampleg=1, cex=1.2);dev.off() + +png(filename=paste0(mapdir,"/",rean.name,"_",var.name,"_", my.period[period],"_",regime3.name,"_anomalies.png"),width=1000,height=700) +layout(matrix(c(rep(1,9),2), 10, 1, byrow = TRUE)) +title3 <- paste(regime3.name, "impact on", var.name, "anomalies") +PlotEquiMap(varPeriodAnom3mean,lon,lat,filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, toptitle=title3, drawleg=F, dots=t(pvalue3 < 0.05)) +ColorBar(my.brks.var, cols=my.cols.var, vert=FALSE, subsampleg=1, cex=1.2);dev.off() + +png(filename=paste0(mapdir,"/",rean.name,"_",var.name,"_", my.period[period],"_",regime4.name,"_anomalies.png"),width=1000,height=700) +layout(matrix(c(rep(1,9),2), 10, 1, byrow = TRUE)) +title4 <- paste(regime4.name, "impact on", var.name, "anomalies") +PlotEquiMap(varPeriodAnom4mean,lon,lat,filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, toptitle=title4, drawleg=F, dots=t(pvalue4 < 0.05)) +ColorBar(my.brks.var, cols=my.cols.var, vert=FALSE, subsampleg=1, cex=1.2);dev.off() + + +## # Visualize and save frequency series associated to each regime: +png(filename=paste0(mapdir,"/",rean.name,"_",var.name,"_", my.period[period],"_",regime1.name,"_frequency.png"),width=400,height=300) +barplot.freq(wr1y, year.start, year.end, title=paste("Frequency of", regime1.name,"Regime"));dev.off() + +png(filename=paste0(mapdir,"/",rean.name,"_",var.name,"_", my.period[period],"_",regime2.name,"_frequency.png"),width=400,height=300) +barplot.freq(wr2y, year.start, year.end, title=paste("Frequency of", regime2.name,"Regime"));dev.off() + +png(filename=paste0(mapdir,"/",rean.name,"_",var.name,"_", my.period[period],"_",regime3.name,"_frequency.png"),width=400,height=300) +barplot.freq(wr3y, year.start, year.end, title=paste("Frequency of", regime3.name,"Regime"));dev.off() + +png(filename=paste0(mapdir,"/",rean.name,"_",var.name,"_", my.period[period],"_",regime4.name,"_frequency.png"),width=400,height=300) +barplot.freq(wr4y, year.start, year.end, title=paste("Frequency of", regime4.name, "Regime"));dev.off() + + +## # save as .pdf instead of .png: +## pdf(file=paste0(mapdir,"/",rean,"_",var.name,"_", my.period[period],"NAO-.pdf"),width=1000,height=700) +## layout(matrix(c(rep(1,9),2), 10, 1, byrow = TRUE)) +## PlotEquiMap(varPeriodAnom1mean,lon,lat,filled.continents = FALSE, brks=my.brks, cols=my.cols, toptitle="NAO-",drawleg=F) +## ColorBar(my.brks, cols=my.cols, vert=FALSE, subsampleg=1, cex=1.2) +## dev.off() + +## pdf(file=paste0(mapdir,"/",rean,"_",var.name,"_", my.period[period],"Blocking.pdf"), width=1000, height=700) +## layout(matrix(c(rep(1,9),2), 10, 1, byrow = TRUE)) +## PlotEquiMap(varPeriodAnom2mean,lon,lat,filled.continents = FALSE, brks=my.brks, cols=my.cols, toptitle="Blocking", drawleg=F) +## ColorBar(my.brks, cols=my.cols, vert=FALSE, subsampleg=1, cex=1.2) +## dev.off() + +## pdf(file=paste0(mapdir,"/",rean,"_",var.name,"_", my.period[period],"Atlantic_Ridge.pdf"),width=1000,height=700) +## layout(matrix(c(rep(1,9),2), 10, 1, byrow = TRUE)) +## PlotEquiMap(varPeriodAnom3mean,lon,lat,filled.continents = FALSE, brks=my.brks, cols=my.cols, toptitle="Atlantic Ridge", drawleg=F) +## ColorBar(my.brks, cols=my.cols, vert=FALSE, subsampleg=1, cex=1.2) +## dev.off() + +## pdf(file=paste0(mapdir,"/",rean,"_",var.name,"_", my.period[period],"NAO+.pdf"),width=1000,height=700) +## layout(matrix(c(rep(1,9),2), 10, 1, byrow = TRUE)) +## PlotEquiMap(varPeriodAnom4mean,lon,lat,filled.continents = FALSE, brks=my.brks, cols=my.cols, toptitle="NAO+", drawleg=F) +## ColorBar(my.brks, cols=my.cols, vert=FALSE, subsampleg=1, cex=1.2) +## dev.off() + +## rm(varPeriodeuFull,varPeriodeu,varPeriod,var) diff --git a/old/weather_regimes_v5.R~ b/old/weather_regimes_v5.R~ new file mode 100644 index 0000000000000000000000000000000000000000..9e46fc1deed95df94ce126e9c3c49c79e5e9d71d --- /dev/null +++ b/old/weather_regimes_v5.R~ @@ -0,0 +1,263 @@ + +library(s2dverification) # for the function Load() +library(abind) +source('/home/Earth/ncortesi/Downloads/scripts/Rfunctions.R') # for the calendar functions + +# available reanalysis for the geopotential (g500) or the geopotential height (z500 = g500/9.81): +ERAint <- list(path = '/esnas/reconstructions/ecmwf/eraint/$STORE_FREQ$_mean/$VAR_NAME$_f6h/$VAR_NAME$_$YEAR$$MONTH$.nc') +ERAint.name <- "ERA-Interim" +JRA55 <- list(path = '/esnas/reconstructions/jma/jra-55/$STORE_FREQ$_mean/$VAR_NAME$_f6h/$VAR_NAME$_$YEAR$$MONTH$.nc') +JRA55.name <- "JRA-55" + +workdir <- "/scratch/Earth/ncortesi/RESILIENCE/Regimes" # working dir with the input files with the WT classifications for each MSLP grid point +mapdir <- "/scratch/Earth/ncortesi/RESILIENCE/Regimes_maps" # output dir with seasonal and yearly maps + +rean <- ERAint # choose one of the two above reanalysis +rean.name <- ERAint.name + +psl <- "g500" # pressure variable to use from the chosen reanalysis +var.name <- "tas" #"sfcWind" # name of the 'predictand' variable of the chosen reanalysis + +year.start <- 1979 +year.end <- 2013 + +period = 13 # (winter) # 1-12: Jan-Dec; 13-16: Winter-Spring-Summer-Autumn; 17: Year + +# Coordinates of the box enclosing the study region (the Northern Atlantic in this case): +lat.max <- 70 +lat.min <- 30 +lon.max <- 40 +lon.min <- 280 # put a positive number here because the geopotential has only positive values of longitud! + +############################################################################################################################# + +# load only 1 day of geopot to detect the minimum and maximum lat and lon values to identify only the North Atlantic +domain <- Load(var = psl, exp = NULL, obs = list(rean), paste0(year.start,'0101'), storefreq = 'daily', leadtimemax = 1, output = 'lonlat', nprocs=1) +pos.lat.max <- which(abs(domain$lat-lat.max)==min(abs(domain$lat-lat.max))) +pos.lat.min <- which(abs(domain$lat-lat.min)==min(abs(domain$lat-lat.min))) +pos.lon.max <- which(abs(domain$lon-lon.max)==min(abs(domain$lon-lon.max))) +pos.lon.min <- which(abs(domain$lon-lon.min)==min(abs(domain$lon-lon.min))) +pos.lat <- if(pos.lat.min < pos.lat.max) {pos.lat.min:pos.lat.max} else { pos.lat.max:pos.lat.min} +pos.lon <- c(1:pos.lon.max,pos.lon.min:length(domain$lon)) # beware that it only consider the case where the study region cross the meridian of Greenwhich! +n.pos.lat <- length(pos.lat) +n.pos.lon <- length(pos.lon) +lat <- domain$lat[pos.lat] # lat of chosen area only (it is smaller than the whole spatial domain) +lon <- domain$lon[pos.lon] # lon of chosen area only + +z500euFull <-array(NA,c(n.days.in.a.yearly.period(year.start,year.end), n.pos.lat, n.pos.lon)) + +## for (y in year.start:year.end){ +## var <- Load(var = 'z500', exp = NULL, obs = list(rean), paste0(y,'0101'), storefreq = 'daily', leadtimemax = n.days.in.a.year(y), output = 'lonlat') +## z500eu <- var$obs[,,,,pos.lat,pos.lon] +## z500euFull[seq.days.in.a.future.year(year.start, y),,] <- z500eu +## rm(z500eu) +## gc() +## } + +if(domain$lat[pos.lat.min] >= domain$lat[pos.lat.max]) stop("lat.min cannot be higher than lat.max") + +for (y in year.start:year.end){ + var <- Load(var = psl, exp = NULL, obs = list(rean), paste0(y,'0101'), storefreq = 'daily', leadtimemax = n.days.in.a.year(y), output = 'lonlat', + latmin = domain$lat[pos.lat.min], latmax = domain$lat[pos.lat.max], lonmin = domain$lon[pos.lon.min], lonmax = domain$lon[pos.lon.max]) + z500euFull[seq.days.in.a.future.year(year.start, y),,] <- var$obs + rm(var) + gc() +} + + +# each time you want to change variable 'period' and/or 'var', set period to 13 and load the winter dataset where all input data is stored: +load(file=paste0(workdir,"/weather_regimes_",rean.name,"_",var.name,"_",year.start,"-",year.end,"_", my.period[period],".RData")) + +days.period <- NA +for(y in year.start:year.end) days.period <- c(days.period, n.days.in.a.future.year(year.start, y) + pos.period(y,period)) +days.period <- days.period[-1] # remove the NA at the begin that was introduced only to be able to execture the above command +n.days.period <- length(days.period) + +z500 <- z500euFull[days.period,,] # select only days in the chosen period (i.e: winter) + +z500mat <- z500 + +dim(z500mat) <- c(n.days.period, n.pos.lat*n.pos.lon) # convert array in a matrix! + +my.seq <- seq(1, n.pos.lat*n.pos.lon, 9) # select only 1 point of 9 + +z500cut <- z500mat[,my.seq] + +my.PCA <- princomp(z500cut,cor=FALSE) + +head(cumsum(my.PCA$sdev^2/sum(my.PCA$sdev^2)),20) # check how many PCAs to keep basing on the sum of their explained variance + +my.cluster <- kmeans(my.PCA$scores[,1:8], 4) # 4 is the number of clusters, 7 the number of EOFs which explains ~80% of variance + +rm(z500mat) +gc() + +# Load wind data: + +vareuFull <-array(NA,c(n.days.in.a.yearly.period(year.start,year.end),n.pos.lat,n.pos.lon)) + +## for (y in year.start:year.end){ +## var <- Load(var = var.name, exp = NULL, obs = list(rean), paste0(y,'0101'), storefreq = 'daily', leadtimemax = n.days.in.a.year(y), output = 'lonlat') +## vareu <- var$obs[,,,,pos.lat,pos.lon] +## vareuFull[seq.days.in.a.future.year(year.start, y),,] <- vareu +## } + +for (y in year.start:year.end){ + var <- Load(var = var.name, exp = NULL, obs = list(rean), paste0(y,'0101'), storefreq = 'daily', leadtimemax = n.days.in.a.year(y), output = 'lonlat', + latmin = domain$lat[pos.lat.min], latmax = domain$lat[pos.lat.max], lonmin = domain$lon[pos.lon.min], lonmax = domain$lon[pos.lon.max]) + vareuFull[seq.days.in.a.future.year(year.start, y),,] <- var$obs + rm(var) + gc() +} + +save.image(file=paste0(workdir,"/weather_regimes_",rean.name,"_",var.name,"_",year.start,"-",year.end,"_", my.period[period],".RData")) + +varPeriod <- vareuFull[days.period,,] # select only var data during the chosen period + +varPeriodClim <- apply(varPeriod,c(2,3),mean,na.rm=T) +varPeriodClim2 <- InsertDim(varPeriodClim, 1, n.days.period) + +varPeriodAnom <- varPeriod - varPeriodClim2 +rm(varPeriod, varPeriodClim2) +gc() + +wr1 <- which(my.cluster$cluster==1) +wr2 <- which(my.cluster$cluster==2) +wr3 <- which(my.cluster$cluster==3) +wr4 <- which(my.cluster$cluster==4) + +varPeriodAnom1 <- varPeriodAnom[wr1,,] +varPeriodAnom2 <- varPeriodAnom[wr2,,] +varPeriodAnom3 <- varPeriodAnom[wr3,,] +varPeriodAnom4 <- varPeriodAnom[wr4,,] + +varPeriodAnom1mean <- apply(varPeriodAnom1,c(2,3),mean,na.rm=T) +varPeriodAnom2mean <- apply(varPeriodAnom2,c(2,3),mean,na.rm=T) +varPeriodAnom3mean <- apply(varPeriodAnom3,c(2,3),mean,na.rm=T) +varPeriodAnom4mean <- apply(varPeriodAnom4,c(2,3),mean,na.rm=T) + +varPeriodAnomBoth1 <- abind(varPeriodAnom, varPeriodAnom1, along = 1) +varPeriodAnomBoth2 <- abind(varPeriodAnom, varPeriodAnom2, along = 1) +varPeriodAnomBoth3 <- abind(varPeriodAnom, varPeriodAnom3, along = 1) +varPeriodAnomBoth4 <- abind(varPeriodAnom, varPeriodAnom4, along = 1) + +pvalue1 <- apply(varPeriodAnomBoth1, c(2,3), function(x) t.test(x[1:n.days.period],x[(n.days.period+1):length(x)])$p.value) +pvalue2 <- apply(varPeriodAnomBoth2, c(2,3), function(x) t.test(x[1:n.days.period],x[(n.days.period+1):length(x)])$p.value) +pvalue3 <- apply(varPeriodAnomBoth3, c(2,3), function(x) t.test(x[1:n.days.period],x[(n.days.period+1):length(x)])$p.value) +pvalue4 <- apply(varPeriodAnomBoth4, c(2,3), function(x) t.test(x[1:n.days.period],x[(n.days.period+1):length(x)])$p.value) +rm(varPeriodAnom, varPeriodAnomBoth1, varPeriodAnomBoth2, varPeriodAnomBoth3, varPeriodAnomBoth4) +gc() + +z500wr1 <- z500[wr1,,] +z500wr2 <- z500[wr2,,] +z500wr3 <- z500[wr3,,] +z500wr4 <- z500[wr4,,] + +z500wr1mean <- apply(z500wr1,c(2,3),mean,na.rm=T) +z500wr2mean <- apply(z500wr2,c(2,3),mean,na.rm=T) +z500wr3mean <- apply(z500wr3,c(2,3),mean,na.rm=T) +z500wr4mean <- apply(z500wr4,c(2,3),mean,na.rm=T) + +# Mean z500 maps: + +my.brks <- c(48000, seq(48501,57100,1), 60000) # % Mean anomaly of a WR +my.brks2 <- c(48000, seq(48500,58000,500)) # % Mean anomaly of a WR +my.cols <- colorRampPalette((brewer.pal(9,"PuBu")))(length(my.brks)-1) # blue--white--red colors + +# First check each cluster to find to which regime it corrsponds: +PlotEquiMap(z500wr1mean,lon,lat,filled.continents = FALSE, brks=my.brks, cols=my.cols, contours=t(z500wr1mean), brks2=my.brks2) +PlotEquiMap(z500wr2mean,lon,lat,filled.continents = FALSE, brks=my.brks, cols=my.cols, contours=t(z500wr2mean), brks2=my.brks2) +PlotEquiMap(z500wr3mean,lon,lat,filled.continents = FALSE, brks=my.brks, cols=my.cols, contours=t(z500wr3mean), brks2=my.brks2) +PlotEquiMap(z500wr4mean,lon,lat,filled.continents = FALSE, brks=my.brks, cols=my.cols, contours=t(z500wr4mean), brks2=my.brks2) + +# Assign to each cluster the name of its regime: +regime1.name <- "Cluster #1" +regime2.name <- "Cluster #2" +regime3.name <- "Cluster #3" +regime4.name <- "Cluster #4" + +# Visualize and save average geopotential associated to each regime: +png(filename=paste0(mapdir,"/",rean.name,"_",var.name,"_",my.period[period],"_",regime1.name,".png"),width=1000,height=700) +layout(matrix(c(rep(1,9),2), 10, 1, byrow = TRUE)) +PlotEquiMap(z500wr1mean, lon, lat, filled.continents = FALSE, brks=my.brks, cols=my.cols, toptitle=regime1.name, contours=t(z500wr1mean), brks2=my.brks2, drawleg=F) +ColorBar(my.brks, cols=my.cols, vert=FALSE, subsampleg=100, cex=1.2) +dev.off() + +png(filename=paste0(mapdir,"/",rean.name,"_",var.name,"_",my.period[period],"_",regime2.name,".png"),width=1000,height=700) +layout(matrix(c(rep(1,9),2), 10, 1, byrow = TRUE)) +PlotEquiMap(z500wr2mean, lon, lat, filled.continents = FALSE, brks=my.brks, cols=my.cols, toptitle=regime2.name, contours=t(z500wr2mean), brks2=my.brks2, drawleg=F) +ColorBar(my.brks, cols=my.cols, vert=FALSE, subsampleg=100, cex=1.2) +dev.off() + +png(filename=paste0(mapdir,"/",rean.name,"_",var.name,"_",my.period[period],"_",regime3.name,".png"),width=1000,height=700) +layout(matrix(c(rep(1,9),2), 10, 1, byrow = TRUE)) +PlotEquiMap(z500wr3mean, lon, lat, filled.continents = FALSE, brks=my.brks, cols=my.cols, toptitle=regime3.name, contours=t(z500wr3mean), brks2=my.brks2, drawleg=F) +ColorBar(my.brks, cols=my.cols, vert=FALSE, subsampleg=100, cex=1.2) +dev.off() + +png(filename=paste0(mapdir,"/",rean.name,"_",var.name,"_",my.period[period],"_",regime4.name,".png"),width=1000,height=700) +layout(matrix(c(rep(1,9),2), 10, 1, byrow = TRUE)) +PlotEquiMap(z500wr4mean, lon, lat, filled.continents = FALSE, brks=my.brks, cols=my.cols, toptitle=regime4.name, contours=t(z500wr4mean), brks2=my.brks2, drawleg=F) +ColorBar(my.brks, cols=my.cols, vert=FALSE, subsampleg=100, cex=1.2) +dev.off() + + +# Visualize and save average wind anomalies associated to each regime: +my.brks.var <- c(seq(-10,-3,1),seq(-2.9,2.9,0.1),seq(3,10,1)) # % Mean anomaly of a WR +#my.cols <- colorRampPalette(as.character(read.csv("/home/Earth/vtorralb/rgbhex.csv",header=F)[,1]))(length(my.brks)-1) # blue--yellow-red colors +my.cols.var <- colorRampPalette(rev(brewer.pal(11,"RdBu")))(length(my.brks.var)-1) # blue--white--red colors + +png(filename=paste0(mapdir,"/",rean.name,"_",var.name,"_",my.period[period],"_",regime1.name,"_anomalies.png"),width=1000,height=700) +layout(matrix(c(rep(1,9),2), 10, 1, byrow = TRUE)) +PlotEquiMap(varPeriodAnom1mean, lon, lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, toptitle=paste(regime1.name, "Anomalies"), drawleg=F, dots=t(pvalue1 < 0.05)) +ColorBar(my.brks.var, cols=my.cols.var, vert=FALSE, subsampleg=1, cex=1.2) +dev.off() + +png(filename=paste0(mapdir,"/",rean.name,"_",var.name,"_", my.period[period],"_",regime2.name,"_anomalies.png"), width=1000, height=700) +layout(matrix(c(rep(1,9),2), 10, 1, byrow = TRUE)) +PlotEquiMap(varPeriodAnom2mean,lon,lat,filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, toptitle=paste(regime2.name, "Anomalies"), drawleg=F, dots=t(pvalue2 < 0.05)) +ColorBar(my.brks.var, cols=my.cols.var, vert=FALSE, subsampleg=1, cex=1.2) +dev.off() + +png(filename=paste0(mapdir,"/",rean.name,"_",var.name,"_", my.period[period],"_",regime3.name,"_anomalies.png"),width=1000,height=700) +layout(matrix(c(rep(1,9),2), 10, 1, byrow = TRUE)) +PlotEquiMap(varPeriodAnom3mean,lon,lat,filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, toptitle=paste(regime3.name, "Anomalies"), drawleg=F, dots=t(pvalue3 < 0.05)) +ColorBar(my.brks.var, cols=my.cols.var, vert=FALSE, subsampleg=1, cex=1.2) +dev.off() + +png(filename=paste0(mapdir,"/",rean.name,"_",var.name,"_", my.period[period],"_",regime4.name,"_anomalies.png"),width=1000,height=700) +layout(matrix(c(rep(1,9),2), 10, 1, byrow = TRUE)) +PlotEquiMap(varPeriodAnom4mean,lon,lat,filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, toptitle=paste(regime4.name, "Anomalies"), drawleg=F, dots=t(pvalue4 < 0.05)) +ColorBar(my.brks.var, cols=my.cols.var, vert=FALSE, subsampleg=1, cex=1.2) +dev.off() + + + + + +# save as .pdf instead of .png: +pdf(file=paste0(mapdir,"/",rean,"_",var.name,"_", my.period[period],"NAO-.pdf"),width=1000,height=700) +layout(matrix(c(rep(1,9),2), 10, 1, byrow = TRUE)) +PlotEquiMap(varPeriodAnom1mean,lon,lat,filled.continents = FALSE, brks=my.brks, cols=my.cols, toptitle="NAO-",drawleg=F) +ColorBar(my.brks, cols=my.cols, vert=FALSE, subsampleg=1, cex=1.2) +dev.off() + +pdf(file=paste0(mapdir,"/",rean,"_",var.name,"_", my.period[period],"Blocking.pdf"), width=1000, height=700) +layout(matrix(c(rep(1,9),2), 10, 1, byrow = TRUE)) +PlotEquiMap(varPeriodAnom2mean,lon,lat,filled.continents = FALSE, brks=my.brks, cols=my.cols, toptitle="Blocking", drawleg=F) +ColorBar(my.brks, cols=my.cols, vert=FALSE, subsampleg=1, cex=1.2) +dev.off() + +pdf(file=paste0(mapdir,"/",rean,"_",var.name,"_", my.period[period],"Atlantic_Ridge.pdf"),width=1000,height=700) +layout(matrix(c(rep(1,9),2), 10, 1, byrow = TRUE)) +PlotEquiMap(varPeriodAnom3mean,lon,lat,filled.continents = FALSE, brks=my.brks, cols=my.cols, toptitle="Atlantic Ridge", drawleg=F) +ColorBar(my.brks, cols=my.cols, vert=FALSE, subsampleg=1, cex=1.2) +dev.off() + +pdf(file=paste0(mapdir,"/",rean,"_",var.name,"_", my.period[period],"NAO+.pdf"),width=1000,height=700) +layout(matrix(c(rep(1,9),2), 10, 1, byrow = TRUE)) +PlotEquiMap(varPeriodAnom4mean,lon,lat,filled.continents = FALSE, brks=my.brks, cols=my.cols, toptitle="NAO+", drawleg=F) +ColorBar(my.brks, cols=my.cols, vert=FALSE, subsampleg=1, cex=1.2) +dev.off() + +rm(varPeriodeuFull,varPeriodeu,varPeriod,var) diff --git a/old/weather_regimes_v6.R b/old/weather_regimes_v6.R new file mode 100644 index 0000000000000000000000000000000000000000..c6d168d57db989da5fd08a4c64b2369c070db45a --- /dev/null +++ b/old/weather_regimes_v6.R @@ -0,0 +1,326 @@ + +library(s2dverification) # for the function Load() +library(abind) +source('/home/Earth/ncortesi/Downloads/scripts/Rfunctions.R') # for the calendar functions + +# available reanalysis for the geopotential (g500) or the geopotential height (z500 = g500/9.81): +ERAint <- list(path = '/esnas/reconstructions/ecmwf/eraint/$STORE_FREQ$_mean/$VAR_NAME$_f6h/$VAR_NAME$_$YEAR$$MONTH$.nc') +ERAint.name <- "ERA-Interim" +JRA55 <- list(path = '/esnas/reconstructions/jma/jra-55/$STORE_FREQ$_mean/$VAR_NAME$_f6h/$VAR_NAME$_$YEAR$$MONTH$.nc') +JRA55.name <- "JRA-55" + +workdir <- "/scratch/Earth/ncortesi/RESILIENCE/Regimes" # working dir with the input files with the WT classifications for each MSLP grid point +mapdir <- "/scratch/Earth/ncortesi/RESILIENCE/Regimes_maps" # output dir with seasonal and yearly maps + +rean <- ERAint # choose one of the two above reanalysis +rean.name <- ERAint.name + +psl <- "g500" # pressure variable to use from the chosen reanalysis +var.name <- "tas" #"sfcWind" # name of the 'predictand' variable of the chosen reanalysis +var.name.full <- "Temperature" # full name of the 'predictand' variable to put in the title of the graphs + +year.start <- 1979 +year.end <- 2013 + +variance.explained <- 0.8 # the user can set here the minimum % of variance explained by the PCs (typically 0.8 which is equal to 80%) +period = 13 # (winter) # 1-12: Jan-Dec; 13-16: Winter-Spring-Summer-Autumn; 17: Year + +# Coordinates of the box enclosing the study region (the Northern Atlantic in this case): +lat.max <- 70 +lat.min <- 30 +lon.max <- 40 +lon.min <- 280 # put a positive number here because the geopotential has only positive values of longitud! + +############################################################################################################################# + +# load only 1 day of geopot to detect the minimum and maximum lat and lon values to identify only the North Atlantic +domain <- Load(var = psl, exp = NULL, obs = list(rean), paste0(year.start,'0101'), storefreq = 'daily', leadtimemax = 1, output = 'lonlat', nprocs=1) +pos.lat.max <- which(abs(domain$lat-lat.max)==min(abs(domain$lat-lat.max))) +pos.lat.min <- which(abs(domain$lat-lat.min)==min(abs(domain$lat-lat.min))) +pos.lon.max <- which(abs(domain$lon-lon.max)==min(abs(domain$lon-lon.max))) +pos.lon.min <- which(abs(domain$lon-lon.min)==min(abs(domain$lon-lon.min))) +pos.lat <- if(pos.lat.min < pos.lat.max) {pos.lat.min:pos.lat.max} else { pos.lat.max:pos.lat.min} +pos.lon <- c(1:pos.lon.max,pos.lon.min:length(domain$lon)) # beware that it only consider the case where the study region cross the meridian of Greenwhich! +n.pos.lat <- length(pos.lat) +n.pos.lon <- length(pos.lon) +lat <- domain$lat[pos.lat] # lat of chosen area only (it is smaller than the whole spatial domain) +lon <- domain$lon[pos.lon] # lon of chosen area only +if(domain$lat[pos.lat.min] >= domain$lat[pos.lat.max]) stop("lat.min cannot be higher than lat.max") + +# Load psl data: +psleuFull <-array(NA,c(n.days.in.a.yearly.period(year.start,year.end), n.pos.lat, n.pos.lon)) + +for (y in year.start:year.end){ + var <- Load(var = psl, exp = NULL, obs = list(rean), paste0(y,'0101'), storefreq = 'daily', leadtimemax = n.days.in.a.year(y), output = 'lonlat', + latmin = domain$lat[pos.lat.min], latmax = domain$lat[pos.lat.max], lonmin = domain$lon[pos.lon.min], lonmax = domain$lon[pos.lon.max]) + psleuFull[seq.days.in.a.future.year(year.start, y),,] <- var$obs + rm(var) + gc() +} + +# Load var data: +vareuFull <-array(NA,c(n.days.in.a.yearly.period(year.start,year.end),n.pos.lat,n.pos.lon)) + +for (y in year.start:year.end){ + var <- Load(var = var.name, exp = NULL, obs = list(rean), paste0(y,'0101'), storefreq = 'daily', leadtimemax = n.days.in.a.year(y), output = 'lonlat', + latmin = domain$lat[pos.lat.min], latmax = domain$lat[pos.lat.max], lonmin = domain$lon[pos.lon.min], lonmax = domain$lon[pos.lon.max]) + vareuFull[seq.days.in.a.future.year(year.start, y),,] <- var$obs + rm(var) + gc() +} + + +# each time you want to change variable 'period', start from here: +load(file=paste0(workdir,"/weather_regimes_",rean.name,"_",var.name,"_",year.start,"-",year.end,"_", my.period[period],".RData")) +period=13 + +days.period <- NA +for(y in year.start:year.end) days.period <- c(days.period, n.days.in.a.future.year(year.start, y) + pos.period(y,period)) +days.period <- days.period[-1] # remove the NA at the begin that was introduced only to be able to execture the above command +n.days.period <- length(days.period) + +pslPeriod <- psleuFull[days.period,,] # select only days in the chosen period (i.e: winter) + +pslmat <- pslPeriod + +dim(pslmat) <- c(n.days.period, n.pos.lat*n.pos.lon) # convert array in a matrix! + +my.seq <- seq(1, n.pos.lat*n.pos.lon, 9) # select only 1 point of 9 + +pslcut <- pslmat[,my.seq] + +my.PCA <- princomp(pslcut,cor=FALSE) + +tot.variance <- head(cumsum(my.PCA$sdev^2/sum(my.PCA$sdev^2)),50) # check how many PCAs to keep basing on the sum of their explained variance + +n.pcs <- head(as.numeric(which(tot.variance > variance.explained)),1) # select only the pcs that explains at least 80% of variance + +my.cluster <- kmeans(my.PCA$scores[,1:n.pcs], 4) # 4 is the number of clusters, 7 the number of EOFs which explains ~80% of variance + +rm(pslmat) +gc() + + + + +varPeriod <- vareuFull[days.period,,] # select only var data during the chosen period + +varPeriodClim <- apply(varPeriod, c(2,3), mean, na.rm=T) +varPeriodClim2 <- InsertDim(varPeriodClim, 1, n.days.period) + +varPeriodAnom <- varPeriod - varPeriodClim2 +rm(varPeriod, varPeriodClim2) +gc() + +wr1 <- which(my.cluster$cluster==1) +wr2 <- which(my.cluster$cluster==2) +wr3 <- which(my.cluster$cluster==3) +wr4 <- which(my.cluster$cluster==4) + +period.length <- n.days.in.a.period(period,1999) # using year 1999 introduce a small error in winter season because of bisestile years + +wr1y <- wr2y <- wr3y <-wr4y <- c() +for(y in year.start:year.end){ + wr1y[y-year.start+1] <- length(which(my.cluster$cluster[(y-year.start)*period.length+(1:period.length)] == 1)) + wr2y[y-year.start+1] <- length(which(my.cluster$cluster[(y-year.start)*period.length+(1:period.length)] == 2)) + wr3y[y-year.start+1] <- length(which(my.cluster$cluster[(y-year.start)*period.length+(1:period.length)] == 3)) + wr4y[y-year.start+1] <- length(which(my.cluster$cluster[(y-year.start)*period.length+(1:period.length)] == 4)) +} + +# convert to frequencies in %: +wr1y <- wr1y/period.length +wr2y <- wr2y/period.length +wr3y <- wr3y/period.length +wr4y <- wr4y/period.length + +varPeriodAnom1 <- varPeriodAnom[wr1,,] +varPeriodAnom2 <- varPeriodAnom[wr2,,] +varPeriodAnom3 <- varPeriodAnom[wr3,,] +varPeriodAnom4 <- varPeriodAnom[wr4,,] + +varPeriodAnom1mean <- apply(varPeriodAnom1,c(2,3),mean,na.rm=T) +varPeriodAnom2mean <- apply(varPeriodAnom2,c(2,3),mean,na.rm=T) +varPeriodAnom3mean <- apply(varPeriodAnom3,c(2,3),mean,na.rm=T) +varPeriodAnom4mean <- apply(varPeriodAnom4,c(2,3),mean,na.rm=T) + +varPeriodAnomBoth1 <- abind(varPeriodAnom, varPeriodAnom1, along = 1) +varPeriodAnomBoth2 <- abind(varPeriodAnom, varPeriodAnom2, along = 1) +varPeriodAnomBoth3 <- abind(varPeriodAnom, varPeriodAnom3, along = 1) +varPeriodAnomBoth4 <- abind(varPeriodAnom, varPeriodAnom4, along = 1) + +pvalue1 <- apply(varPeriodAnomBoth1, c(2,3), function(x) t.test(x[1:n.days.period],x[(n.days.period+1):length(x)])$p.value) +pvalue2 <- apply(varPeriodAnomBoth2, c(2,3), function(x) t.test(x[1:n.days.period],x[(n.days.period+1):length(x)])$p.value) +pvalue3 <- apply(varPeriodAnomBoth3, c(2,3), function(x) t.test(x[1:n.days.period],x[(n.days.period+1):length(x)])$p.value) +pvalue4 <- apply(varPeriodAnomBoth4, c(2,3), function(x) t.test(x[1:n.days.period],x[(n.days.period+1):length(x)])$p.value) +rm(varPeriodAnom, varPeriodAnomBoth1, varPeriodAnomBoth2, varPeriodAnomBoth3, varPeriodAnomBoth4) +gc() + +pslPeriodClim <- apply(pslPeriod, c(2,3), mean, na.rm=T) +pslPeriodClim2 <- InsertDim(pslPeriodClim, 1, n.days.period) + +pslPeriodAnom <- pslPeriod - pslPeriodClim2 +rm(pslPeriod, pslPeriodClim, pslPeriodClim2) +gc() + +pslwr1 <- pslPeriodAnom[wr1,,] +pslwr2 <- pslPeriodAnom[wr2,,] +pslwr3 <- pslPeriodAnom[wr3,,] +pslwr4 <- pslPeriodAnom[wr4,,] + +pslwr1mean <- apply(pslwr1,c(2,3),mean,na.rm=T) +pslwr2mean <- apply(pslwr2,c(2,3),mean,na.rm=T) +pslwr3mean <- apply(pslwr3,c(2,3),mean,na.rm=T) +pslwr4mean <- apply(pslwr4,c(2,3),mean,na.rm=T) + +# Mean psl maps: + +#my.brks <- c(48000, seq(48501,57100,1), 60000) # % Mean anomaly of a WR +#my.brks2 <- c(48000, seq(48500,58000,500)) # % Mean anomaly of a WR +my.brks <- c(-3000,-1999:2000,3000) # % Mean anomaly of a WR +my.brks2 <- c(-3000,seq(-2000,2000,100),3000) # % Mean anomaly of a WR +#my.cols <- colorRampPalette((brewer.pal(9,"PuBu")))(length(my.brks)-1) +my.cols <- colorRampPalette(rev(brewer.pal(11,"RdBu")))(length(my.brks)-1) # blue--white--red colors +# First check each cluster to find to which regime it corrsponds: +#PlotEquiMap(pslwr1mean,lon,lat,filled.continents = FALSE, brks=my.brks, cols=my.cols, contours=t(pslwr1mean), brks2=my.brks2) +#PlotEquiMap(pslwr2mean,lon,lat,filled.continents = FALSE, brks=my.brks, cols=my.cols, contours=t(pslwr2mean), brks2=my.brks2) +#PlotEquiMap(pslwr3mean,lon,lat,filled.continents = FALSE, brks=my.brks, cols=my.cols, contours=t(pslwr3mean), brks2=my.brks2) +#PlotEquiMap(pslwr4mean,lon,lat,filled.continents = FALSE, brks=my.brks, cols=my.cols, contours=t(pslwr4mean), brks2=my.brks2) + +# Assign to each cluster the name of its regime: +regime1.name <- "Cluster #1" +regime2.name <- "Cluster #2" +regime3.name <- "Cluster #3" +regime4.name <- "Cluster #4" + +# Visualize and save average geopotential associated to each regime: +png(filename=paste0(mapdir,"/",rean.name,"_",var.name,"_",my.period[period],"_",regime1.name,"_geop_field.png"),width=1000,height=700) +layout(matrix(c(rep(1,9),2), 10, 1, byrow = TRUE)) +PlotEquiMap(pslwr1mean, lon, lat, filled.continents = FALSE, brks=my.brks, cols=my.cols, toptitle=regime1.name, contours=t(pslwr1mean), brks2=my.brks2, drawleg=F) +ColorBar(my.brks, cols=my.cols, vert=FALSE, subsampleg=100, cex=1.2) +dev.off() + +png(filename=paste0(mapdir,"/",rean.name,"_",var.name,"_",my.period[period],"_",regime2.name,"_geop_field.png"),width=1000,height=700) +layout(matrix(c(rep(1,9),2), 10, 1, byrow = TRUE)) +PlotEquiMap(pslwr2mean, lon, lat, filled.continents = FALSE, brks=my.brks, cols=my.cols, toptitle=regime2.name, contours=t(pslwr2mean), brks2=my.brks2, drawleg=F) +ColorBar(my.brks, cols=my.cols, vert=FALSE, subsampleg=100, cex=1.2) +dev.off() + +png(filename=paste0(mapdir,"/",rean.name,"_",var.name,"_",my.period[period],"_",regime3.name,"_geop_field.png"),width=1000,height=700) +layout(matrix(c(rep(1,9),2), 10, 1, byrow = TRUE)) +PlotEquiMap(pslwr3mean, lon, lat, filled.continents = FALSE, brks=my.brks, cols=my.cols, toptitle=regime3.name, contours=t(pslwr3mean), brks2=my.brks2, drawleg=F) +ColorBar(my.brks, cols=my.cols, vert=FALSE, subsampleg=100, cex=1.2);dev.off() + +png(filename=paste0(mapdir,"/",rean.name,"_",var.name,"_",my.period[period],"_",regime4.name,"_geop_field.png"),width=1000,height=700) +layout(matrix(c(rep(1,9),2), 10, 1, byrow = TRUE)) +PlotEquiMap(pslwr4mean, lon, lat, filled.continents = FALSE, brks=my.brks, cols=my.cols, toptitle=regime4.name, contours=t(pslwr4mean), brks2=my.brks2, drawleg=F) +ColorBar(my.brks, cols=my.cols, vert=FALSE, subsampleg=100, cex=1.2);dev.off() + +# Visualize and save average wind anomalies associated to each regime: +my.brks.var <- c(-20,seq(-10,-3,1),seq(-2.9,2.9,0.1),seq(3,10,1),20) # % Mean anomaly of a WR +#my.cols <- colorRampPalette(as.character(read.csv("/home/Earth/vtorralb/rgbhex.csv",header=F)[,1]))(length(my.brks)-1) # blue--yellow-red colors +my.cols.var <- colorRampPalette(rev(brewer.pal(11,"RdBu")))(length(my.brks.var)-1) # blue--white--red colors + +png(filename=paste0(mapdir,"/",rean.name,"_",var.name,"_",my.period[period],"_",regime1.name,"_anomalies.png"),width=1000,height=700) +layout(matrix(c(rep(1,9),2), 10, 1, byrow = TRUE)) +title1 <- paste(regime1.name, "impact on", var.name, "anomalies") +PlotEquiMap(varPeriodAnom1mean, lon, lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, toptitle=title1, drawleg=F, dots=t(pvalue1 < 0.05)) +ColorBar(my.brks.var, cols=my.cols.var, vert=FALSE, subsampleg=1, cex=1.2);dev.off() + +png(filename=paste0(mapdir,"/",rean.name,"_",var.name,"_", my.period[period],"_",regime2.name,"_anomalies.png"), width=1000, height=700) +layout(matrix(c(rep(1,9),2), 10, 1, byrow = TRUE)) +title2 <- paste(regime2.name, "impact on", var.name, "anomalies") +PlotEquiMap(varPeriodAnom2mean,lon,lat,filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, toptitle=title2, drawleg=F, dots=t(pvalue2 < 0.05)) +ColorBar(my.brks.var, cols=my.cols.var, vert=FALSE, subsampleg=1, cex=1.2);dev.off() + +png(filename=paste0(mapdir,"/",rean.name,"_",var.name,"_", my.period[period],"_",regime3.name,"_anomalies.png"),width=1000,height=700) +layout(matrix(c(rep(1,9),2), 10, 1, byrow = TRUE)) +title3 <- paste(regime3.name, "impact on", var.name, "anomalies") +PlotEquiMap(varPeriodAnom3mean,lon,lat,filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, toptitle=title3, drawleg=F, dots=t(pvalue3 < 0.05)) +ColorBar(my.brks.var, cols=my.cols.var, vert=FALSE, subsampleg=1, cex=1.2);dev.off() + +png(filename=paste0(mapdir,"/",rean.name,"_",var.name,"_", my.period[period],"_",regime4.name,"_anomalies.png"),width=1000,height=700) +layout(matrix(c(rep(1,9),2), 10, 1, byrow = TRUE)) +title4 <- paste(regime4.name, "impact on", var.name, "anomalies") +PlotEquiMap(varPeriodAnom4mean,lon,lat,filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, toptitle=title4, drawleg=F, dots=t(pvalue4 < 0.05)) +ColorBar(my.brks.var, cols=my.cols.var, vert=FALSE, subsampleg=1, cex=1.2);dev.off() + + +## # Visualize and save frequency series associated to each regime: +png(filename=paste0(mapdir,"/",rean.name,"_",var.name,"_", my.period[period],"_",regime1.name,"_frequency.png"),width=400,height=300) +barplot.freq(wr1y, year.start, year.end, title=paste("Frequency of", regime1.name,"Regime"));dev.off() + +png(filename=paste0(mapdir,"/",rean.name,"_",var.name,"_", my.period[period],"_",regime2.name,"_frequency.png"),width=400,height=300) +barplot.freq(wr2y, year.start, year.end, title=paste("Frequency of", regime2.name,"Regime"));dev.off() + +png(filename=paste0(mapdir,"/",rean.name,"_",var.name,"_", my.period[period],"_",regime3.name,"_frequency.png"),width=400,height=300) +barplot.freq(wr3y, year.start, year.end, title=paste("Frequency of", regime3.name,"Regime"));dev.off() + +png(filename=paste0(mapdir,"/",rean.name,"_",var.name,"_", my.period[period],"_",regime4.name,"_frequency.png"),width=400,height=300) +barplot.freq(wr4y, year.start, year.end, title=paste("Frequency of", regime4.name, "Regime"));dev.off() + + +# Visualize the composition with the 3 graphs for each single regime: its pressure fields, its impact on var and its frequency serie: + +png(filename=paste0(mapdir,"/",rean.name,"_",var.name,"_",my.period[period],"_",regime1.name,"_composite.png"),width=1600,height=600) +layout(matrix(c(rep(1,9),2,rep(3,9),4,rep(5,10),6,rep(7,8),8), 10, 4, byrow = FALSE), widths=c(2,2,0.2,2)) +#layout.show(8) +EU <- c(1:54,130:161) # position of long values of Europe +title1 <- paste(regime1.name, "geopotential field") +PlotEquiMap(pslwr1mean[,EU], lon[EU], lat, filled.continents = FALSE, brks=my.brks, cols=my.cols, toptitle=title1, contours=t(pslwr1mean[,EU]), brks2=my.brks2, drawleg=F) +ColorBar(my.brks, cols=my.cols, vert=FALSE, subsampleg=100, cex=1.2) +title1 <- paste(regime1.name, "impact on", var.name.full) +PlotEquiMap(varPeriodAnom1mean[,EU], lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, toptitle=title1, drawleg=F, dots=t(pvalue1 < 0.05)) +ColorBar(my.brks.var, cols=my.cols.var, vert=FALSE, subsampleg=1, cex=1.2) +plot(0, type="n", axes=F) +plot(0, type="n", axes=F) +title(paste(regime1.name, "frequency"), cex.main=1.7, line=-1) +barplot.freq(wr1y, year.start, year.end) +dev.off() + +png(filename=paste0(mapdir,"/",rean.name,"_",var.name,"_",my.period[period],"_",regime2.name,"_composite.png"),width=1600,height=600) +layout(matrix(c(rep(1,9),2,rep(3,9),4,rep(5,10),6,rep(7,8),8), 10, 4, byrow = FALSE), widths=c(2,2,0.2,2)) +#layout.show(8) +EU <- c(1:54,130:161) # position of long values of Europe +title2 <- paste(regime2.name, "geopotential field") +PlotEquiMap(pslwr2mean[,EU], lon[EU], lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, toptitle=title2, contours=t(pslwr2mean[,EU]), brks2=my.brks2, drawleg=F) +ColorBar(my.brks, cols=my.cols, vert=FALSE, subsampleg=100, cex=1.2) +title2 <- paste(regime2.name, "impact on", var.name.full) +PlotEquiMap(varPeriodAnom2mean[,EU], lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, toptitle=title2, drawleg=F, dots=t(pvalue2 < 0.05)) +ColorBar(my.brks.var, cols=my.cols.var, vert=FALSE, subsampleg=1, cex=1.2) +plot(0, type="n", axes=F);plot(0, type="n", axes=F) +title(paste(regime2.name, "frequency"), cex.main=1.7, line=-1) +barplot.freq(wr2y, year.start, year.end) +dev.off() + +png(filename=paste0(mapdir,"/",rean.name,"_",var.name,"_",my.period[period],"_",regime3.name,"_composite.png"),width=1600,height=600) +layout(matrix(c(rep(1,9),2,rep(3,9),4,rep(5,10),6,rep(7,8),8), 10, 4, byrow = FALSE), widths=c(2,2,0.2,2)) +#layout.show(7) +EU <- c(1:54,130:161) # position of long values of Europe +title3 <- paste(regime3.name, "geopotential field") +PlotEquiMap(pslwr3mean[,EU], lon[EU], lat, filled.continents = FALSE, brks=my.brks, cols=my.cols, toptitle=title3, contours=t(pslwr3mean[,EU]), brks2=my.brks2, drawleg=F) +ColorBar(my.brks, cols=my.cols, vert=FALSE, subsampleg=100, cex=1.2) +title3 <- paste(regime3.name, "impact on", var.name.full) +PlotEquiMap(varPeriodAnom3mean[,EU], lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, toptitle=title3, drawleg=F, dots=t(pvalue3 < 0.05)) +ColorBar(my.brks.var, cols=my.cols.var, vert=FALSE, subsampleg=1, cex=1.2) +plot(0, type="n", axes=F); plot(0, type="n", axes=F) +title(paste(regime3.name, "frequency"), cex.main=1.7, line=-1) +barplot.freq(wr3y, year.start, year.end) +dev.off() + +png(filename=paste0(mapdir,"/",rean.name,"_",var.name,"_",my.period[period],"_",regime4.name,"_composite.png"),width=1600,height=600) +layout(matrix(c(rep(1,9),2,rep(3,9),4,rep(5,10),6,rep(7,8),8), 10, 4, byrow = FALSE), widths=c(2,2,0.2,2)) +#layout.show(8) +EU <- c(1:54,130:161) # position of long values of Europe +title4 <- paste(regime4.name, "geopotential field") +PlotEquiMap(pslwr4mean[,EU], lon[EU], lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, toptitle=title4, contours=t(pslwr4mean[,EU]), brks2=my.brks2, drawleg=F) +ColorBar(my.brks, cols=my.cols, vert=FALSE, subsampleg=100, cex=1.2) +title4 <- paste(regime4.name, "impact on", var.name.full) +PlotEquiMap(varPeriodAnom4mean[,EU], lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, toptitle=title4, drawleg=F, dots=t(pvalue4 < 0.05)) +ColorBar(my.brks.var, cols=my.cols.var, vert=FALSE, subsampleg=1, cex=1.2) +plot(0, type="n", axes=F);plot(0, type="n", axes=F) +title(paste(regime4.name, "frequency"), cex.main=1.7, line=-1) +barplot.freq(wr4y, year.start, year.end) +dev.off() + + +save.image(file=paste0(workdir,"/weather_regimes_",rean.name,"_",var.name,"_",year.start,"-",year.end,"_", my.period[period],".RData")) diff --git a/old/weather_regimes_v6.R~ b/old/weather_regimes_v6.R~ new file mode 100644 index 0000000000000000000000000000000000000000..5c9fee7bf73b1b599366aa859007e9c71d32afa4 --- /dev/null +++ b/old/weather_regimes_v6.R~ @@ -0,0 +1,289 @@ + +library(s2dverification) # for the function Load() +library(abind) +source('/home/Earth/ncortesi/Downloads/scripts/Rfunctions.R') # for the calendar functions + +# available reanalysis for the geopotential (g500) or the geopotential height (z500 = g500/9.81): +ERAint <- list(path = '/esnas/reconstructions/ecmwf/eraint/$STORE_FREQ$_mean/$VAR_NAME$_f6h/$VAR_NAME$_$YEAR$$MONTH$.nc') +ERAint.name <- "ERA-Interim" +JRA55 <- list(path = '/esnas/reconstructions/jma/jra-55/$STORE_FREQ$_mean/$VAR_NAME$_f6h/$VAR_NAME$_$YEAR$$MONTH$.nc') +JRA55.name <- "JRA-55" + +workdir <- "/scratch/Earth/ncortesi/RESILIENCE/Regimes" # working dir with the input files with the WT classifications for each MSLP grid point +mapdir <- "/scratch/Earth/ncortesi/RESILIENCE/Regimes_maps" # output dir with seasonal and yearly maps + +rean <- ERAint # choose one of the two above reanalysis +rean.name <- ERAint.name + +psl <- "g500" # pressure variable to use from the chosen reanalysis +var.name <- "tas" #"sfcWind" # name of the 'predictand' variable of the chosen reanalysis + +year.start <- 1979 +year.end <- 2013 + +variance.explained <- 0.8 # the user can set here the minimum % of variance explained by the PCs (typically 0.8 which is equal to 80%) +period = 13 # (winter) # 1-12: Jan-Dec; 13-16: Winter-Spring-Summer-Autumn; 17: Year + +# Coordinates of the box enclosing the study region (the Northern Atlantic in this case): +lat.max <- 70 +lat.min <- 30 +lon.max <- 40 +lon.min <- 280 # put a positive number here because the geopotential has only positive values of longitud! + +############################################################################################################################# + +# load only 1 day of geopot to detect the minimum and maximum lat and lon values to identify only the North Atlantic +domain <- Load(var = psl, exp = NULL, obs = list(rean), paste0(year.start,'0101'), storefreq = 'daily', leadtimemax = 1, output = 'lonlat', nprocs=1) +pos.lat.max <- which(abs(domain$lat-lat.max)==min(abs(domain$lat-lat.max))) +pos.lat.min <- which(abs(domain$lat-lat.min)==min(abs(domain$lat-lat.min))) +pos.lon.max <- which(abs(domain$lon-lon.max)==min(abs(domain$lon-lon.max))) +pos.lon.min <- which(abs(domain$lon-lon.min)==min(abs(domain$lon-lon.min))) +pos.lat <- if(pos.lat.min < pos.lat.max) {pos.lat.min:pos.lat.max} else { pos.lat.max:pos.lat.min} +pos.lon <- c(1:pos.lon.max,pos.lon.min:length(domain$lon)) # beware that it only consider the case where the study region cross the meridian of Greenwhich! +n.pos.lat <- length(pos.lat) +n.pos.lon <- length(pos.lon) +lat <- domain$lat[pos.lat] # lat of chosen area only (it is smaller than the whole spatial domain) +lon <- domain$lon[pos.lon] # lon of chosen area only +if(domain$lat[pos.lat.min] >= domain$lat[pos.lat.max]) stop("lat.min cannot be higher than lat.max") + +# Load psl data: +psleuFull <-array(NA,c(n.days.in.a.yearly.period(year.start,year.end), n.pos.lat, n.pos.lon)) + +for (y in year.start:year.end){ + var <- Load(var = psl, exp = NULL, obs = list(rean), paste0(y,'0101'), storefreq = 'daily', leadtimemax = n.days.in.a.year(y), output = 'lonlat', + latmin = domain$lat[pos.lat.min], latmax = domain$lat[pos.lat.max], lonmin = domain$lon[pos.lon.min], lonmax = domain$lon[pos.lon.max]) + psleuFull[seq.days.in.a.future.year(year.start, y),,] <- var$obs + rm(var) + gc() +} + +# Load var data: +vareuFull <-array(NA,c(n.days.in.a.yearly.period(year.start,year.end),n.pos.lat,n.pos.lon)) + +for (y in year.start:year.end){ + var <- Load(var = var.name, exp = NULL, obs = list(rean), paste0(y,'0101'), storefreq = 'daily', leadtimemax = n.days.in.a.year(y), output = 'lonlat', + latmin = domain$lat[pos.lat.min], latmax = domain$lat[pos.lat.max], lonmin = domain$lon[pos.lon.min], lonmax = domain$lon[pos.lon.max]) + vareuFull[seq.days.in.a.future.year(year.start, y),,] <- var$obs + rm(var) + gc() +} + + +load(file=paste0(workdir,"/weather_regimes_",rean.name,"_",var.name,"_",year.start,"-",year.end,"_", my.period[period],".RData")) + +# each time you want to change variable 'period' and/or 'var', start from here: +period=16 + +days.period <- NA +for(y in year.start:year.end) days.period <- c(days.period, n.days.in.a.future.year(year.start, y) + pos.period(y,period)) +days.period <- days.period[-1] # remove the NA at the begin that was introduced only to be able to execture the above command +n.days.period <- length(days.period) + +pslPeriod <- psleuFull[days.period,,] # select only days in the chosen period (i.e: winter) + +pslmat <- pslPeriod + +dim(pslmat) <- c(n.days.period, n.pos.lat*n.pos.lon) # convert array in a matrix! + +my.seq <- seq(1, n.pos.lat*n.pos.lon, 9) # select only 1 point of 9 + +pslcut <- pslmat[,my.seq] + +my.PCA <- princomp(pslcut,cor=FALSE) + +tot.variance <- head(cumsum(my.PCA$sdev^2/sum(my.PCA$sdev^2)),50) # check how many PCAs to keep basing on the sum of their explained variance + +n.pcs <- head(as.numeric(which(tot.variance > variance.explained)),1) # select only the pcs that explains at least 80% of variance + +my.cluster <- kmeans(my.PCA$scores[,1:n.pcs], 4) # 4 is the number of clusters, 7 the number of EOFs which explains ~80% of variance + +rm(pslmat) +gc() + + + + +varPeriod <- vareuFull[days.period,,] # select only var data during the chosen period + +varPeriodClim <- apply(varPeriod, c(2,3), mean, na.rm=T) +varPeriodClim2 <- InsertDim(varPeriodClim, 1, n.days.period) + +varPeriodAnom <- varPeriod - varPeriodClim2 +rm(varPeriod, varPeriodClim2) +gc() + +wr1 <- which(my.cluster$cluster==1) +wr2 <- which(my.cluster$cluster==2) +wr3 <- which(my.cluster$cluster==3) +wr4 <- which(my.cluster$cluster==4) + +period.length <- n.days.in.a.period(period,1999) # using year 1999 introduce a small error in winter season because of bisestile years + +wr1y <- wr2y <- wr3y <-wr4y <- c() +for(y in year.start:year.end){ + wr1y[y-year.start+1] <- length(which(my.cluster$cluster[(y-year.start)*period.length+(1:period.length)] == 1)) + wr2y[y-year.start+1] <- length(which(my.cluster$cluster[(y-year.start)*period.length+(1:period.length)] == 2)) + wr3y[y-year.start+1] <- length(which(my.cluster$cluster[(y-year.start)*period.length+(1:period.length)] == 3)) + wr4y[y-year.start+1] <- length(which(my.cluster$cluster[(y-year.start)*period.length+(1:period.length)] == 4)) +} + +# convert to frequencies in %: +wr1y <- wr1y/period.length +wr2y <- wr2y/period.length +wr3y <- wr3y/period.length +wr4y <- wr4y/period.length + +varPeriodAnom1 <- varPeriodAnom[wr1,,] +varPeriodAnom2 <- varPeriodAnom[wr2,,] +varPeriodAnom3 <- varPeriodAnom[wr3,,] +varPeriodAnom4 <- varPeriodAnom[wr4,,] + +varPeriodAnom1mean <- apply(varPeriodAnom1,c(2,3),mean,na.rm=T) +varPeriodAnom2mean <- apply(varPeriodAnom2,c(2,3),mean,na.rm=T) +varPeriodAnom3mean <- apply(varPeriodAnom3,c(2,3),mean,na.rm=T) +varPeriodAnom4mean <- apply(varPeriodAnom4,c(2,3),mean,na.rm=T) + +varPeriodAnomBoth1 <- abind(varPeriodAnom, varPeriodAnom1, along = 1) +varPeriodAnomBoth2 <- abind(varPeriodAnom, varPeriodAnom2, along = 1) +varPeriodAnomBoth3 <- abind(varPeriodAnom, varPeriodAnom3, along = 1) +varPeriodAnomBoth4 <- abind(varPeriodAnom, varPeriodAnom4, along = 1) + +pvalue1 <- apply(varPeriodAnomBoth1, c(2,3), function(x) t.test(x[1:n.days.period],x[(n.days.period+1):length(x)])$p.value) +pvalue2 <- apply(varPeriodAnomBoth2, c(2,3), function(x) t.test(x[1:n.days.period],x[(n.days.period+1):length(x)])$p.value) +pvalue3 <- apply(varPeriodAnomBoth3, c(2,3), function(x) t.test(x[1:n.days.period],x[(n.days.period+1):length(x)])$p.value) +pvalue4 <- apply(varPeriodAnomBoth4, c(2,3), function(x) t.test(x[1:n.days.period],x[(n.days.period+1):length(x)])$p.value) +rm(varPeriodAnom, varPeriodAnomBoth1, varPeriodAnomBoth2, varPeriodAnomBoth3, varPeriodAnomBoth4) +gc() + +pslPeriodClim <- apply(pslPeriod, c(2,3), mean, na.rm=T) +pslPeriodClim2 <- InsertDim(pslPeriodClim, 1, n.days.period) + +pslPeriodAnom <- pslPeriod - pslPeriodClim2 +rm(pslPeriod, pslPeriodClim, pslPeriodClim2) +gc() + +pslwr1 <- pslPeriodAnom[wr1,,] +pslwr2 <- pslPeriodAnom[wr2,,] +pslwr3 <- pslPeriodAnom[wr3,,] +pslwr4 <- pslPeriodAnom[wr4,,] + +pslwr1mean <- apply(pslwr1,c(2,3),mean,na.rm=T) +pslwr2mean <- apply(pslwr2,c(2,3),mean,na.rm=T) +pslwr3mean <- apply(pslwr3,c(2,3),mean,na.rm=T) +pslwr4mean <- apply(pslwr4,c(2,3),mean,na.rm=T) + +# Mean psl maps: + +#my.brks <- c(48000, seq(48501,57100,1), 60000) # % Mean anomaly of a WR +#my.brks2 <- c(48000, seq(48500,58000,500)) # % Mean anomaly of a WR +my.brks <- c(-3000,-2000:2000,3000) # % Mean anomaly of a WR +my.brks2 <- c(-3000,seq(-2000,2000,100),3000) # % Mean anomaly of a WR +#my.cols <- colorRampPalette((brewer.pal(9,"PuBu")))(length(my.brks)-1) +my.cols <- colorRampPalette(rev(brewer.pal(11,"RdBu")))(length(my.brks)-1) # blue--white--red colors +# First check each cluster to find to which regime it corrsponds: +#PlotEquiMap(pslwr1mean,lon,lat,filled.continents = FALSE, brks=my.brks, cols=my.cols, contours=t(pslwr1mean), brks2=my.brks2) +#PlotEquiMap(pslwr2mean,lon,lat,filled.continents = FALSE, brks=my.brks, cols=my.cols, contours=t(pslwr2mean), brks2=my.brks2) +#PlotEquiMap(pslwr3mean,lon,lat,filled.continents = FALSE, brks=my.brks, cols=my.cols, contours=t(pslwr3mean), brks2=my.brks2) +#PlotEquiMap(pslwr4mean,lon,lat,filled.continents = FALSE, brks=my.brks, cols=my.cols, contours=t(pslwr4mean), brks2=my.brks2) + +# Assign to each cluster the name of its regime: +regime1.name <- "Cluster #1" +regime2.name <- "Cluster #2" +regime3.name <- "Cluster #3" +regime4.name <- "Cluster #4" + +# Visualize and save average geopotential associated to each regime: +png(filename=paste0(mapdir,"/",rean.name,"_",var.name,"_",my.period[period],"_",regime1.name,"_geop_field.png"),width=1000,height=700) +layout(matrix(c(rep(1,9),2), 10, 1, byrow = TRUE)) +PlotEquiMap(pslwr1mean, lon, lat, filled.continents = FALSE, brks=my.brks, cols=my.cols, toptitle=regime1.name, contours=t(pslwr1mean), brks2=my.brks2, drawleg=F) +ColorBar(my.brks, cols=my.cols, vert=FALSE, subsampleg=100, cex=1.2) +dev.off() + +png(filename=paste0(mapdir,"/",rean.name,"_",var.name,"_",my.period[period],"_",regime2.name,"_geop_field.png"),width=1000,height=700) +layout(matrix(c(rep(1,9),2), 10, 1, byrow = TRUE)) +PlotEquiMap(pslwr2mean, lon, lat, filled.continents = FALSE, brks=my.brks, cols=my.cols, toptitle=regime2.name, contours=t(pslwr2mean), brks2=my.brks2, drawleg=F) +ColorBar(my.brks, cols=my.cols, vert=FALSE, subsampleg=100, cex=1.2) +dev.off() + +png(filename=paste0(mapdir,"/",rean.name,"_",var.name,"_",my.period[period],"_",regime3.name,"_geop_field.png"),width=1000,height=700) +layout(matrix(c(rep(1,9),2), 10, 1, byrow = TRUE)) +PlotEquiMap(pslwr3mean, lon, lat, filled.continents = FALSE, brks=my.brks, cols=my.cols, toptitle=regime3.name, contours=t(pslwr3mean), brks2=my.brks2, drawleg=F) +ColorBar(my.brks, cols=my.cols, vert=FALSE, subsampleg=100, cex=1.2);dev.off() + +png(filename=paste0(mapdir,"/",rean.name,"_",var.name,"_",my.period[period],"_",regime4.name,"_geop_field.png"),width=1000,height=700) +layout(matrix(c(rep(1,9),2), 10, 1, byrow = TRUE)) +PlotEquiMap(pslwr4mean, lon, lat, filled.continents = FALSE, brks=my.brks, cols=my.cols, toptitle=regime4.name, contours=t(pslwr4mean), brks2=my.brks2, drawleg=F) +ColorBar(my.brks, cols=my.cols, vert=FALSE, subsampleg=100, cex=1.2);dev.off() + +# Visualize and save average wind anomalies associated to each regime: +my.brks.var <- c(-20,seq(-10,-3,1),seq(-2.9,2.9,0.1),seq(3,10,1),20) # % Mean anomaly of a WR +#my.cols <- colorRampPalette(as.character(read.csv("/home/Earth/vtorralb/rgbhex.csv",header=F)[,1]))(length(my.brks)-1) # blue--yellow-red colors +my.cols.var <- colorRampPalette(rev(brewer.pal(11,"RdBu")))(length(my.brks.var)-1) # blue--white--red colors + +png(filename=paste0(mapdir,"/",rean.name,"_",var.name,"_",my.period[period],"_",regime1.name,"_anomalies.png"),width=1000,height=700) +layout(matrix(c(rep(1,9),2), 10, 1, byrow = TRUE)) +title1 <- paste(regime1.name, "impact on", var.name, "anomalies") +PlotEquiMap(varPeriodAnom1mean, lon, lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, toptitle=title1, drawleg=F, dots=t(pvalue1 < 0.05)) +ColorBar(my.brks.var, cols=my.cols.var, vert=FALSE, subsampleg=1, cex=1.2);dev.off() + +png(filename=paste0(mapdir,"/",rean.name,"_",var.name,"_", my.period[period],"_",regime2.name,"_anomalies.png"), width=1000, height=700) +layout(matrix(c(rep(1,9),2), 10, 1, byrow = TRUE)) +title2 <- paste(regime2.name, "impact on", var.name, "anomalies") +PlotEquiMap(varPeriodAnom2mean,lon,lat,filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, toptitle=title2, drawleg=F, dots=t(pvalue2 < 0.05)) +ColorBar(my.brks.var, cols=my.cols.var, vert=FALSE, subsampleg=1, cex=1.2);dev.off() + +png(filename=paste0(mapdir,"/",rean.name,"_",var.name,"_", my.period[period],"_",regime3.name,"_anomalies.png"),width=1000,height=700) +layout(matrix(c(rep(1,9),2), 10, 1, byrow = TRUE)) +title3 <- paste(regime3.name, "impact on", var.name, "anomalies") +PlotEquiMap(varPeriodAnom3mean,lon,lat,filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, toptitle=title3, drawleg=F, dots=t(pvalue3 < 0.05)) +ColorBar(my.brks.var, cols=my.cols.var, vert=FALSE, subsampleg=1, cex=1.2);dev.off() + +png(filename=paste0(mapdir,"/",rean.name,"_",var.name,"_", my.period[period],"_",regime4.name,"_anomalies.png"),width=1000,height=700) +layout(matrix(c(rep(1,9),2), 10, 1, byrow = TRUE)) +title4 <- paste(regime4.name, "impact on", var.name, "anomalies") +PlotEquiMap(varPeriodAnom4mean,lon,lat,filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, toptitle=title4, drawleg=F, dots=t(pvalue4 < 0.05)) +ColorBar(my.brks.var, cols=my.cols.var, vert=FALSE, subsampleg=1, cex=1.2);dev.off() + + +## # Visualize and save frequency series associated to each regime: +png(filename=paste0(mapdir,"/",rean.name,"_",var.name,"_", my.period[period],"_",regime1.name,"_frequency.png"),width=400,height=300) +barplot.freq(wr1y, year.start, year.end, title=paste("Frequency of", regime1.name,"Regime"));dev.off() + +png(filename=paste0(mapdir,"/",rean.name,"_",var.name,"_", my.period[period],"_",regime2.name,"_frequency.png"),width=400,height=300) +barplot.freq(wr2y, year.start, year.end, title=paste("Frequency of", regime2.name,"Regime"));dev.off() + +png(filename=paste0(mapdir,"/",rean.name,"_",var.name,"_", my.period[period],"_",regime3.name,"_frequency.png"),width=400,height=300) +barplot.freq(wr3y, year.start, year.end, title=paste("Frequency of", regime3.name,"Regime"));dev.off() + +png(filename=paste0(mapdir,"/",rean.name,"_",var.name,"_", my.period[period],"_",regime4.name,"_frequency.png"),width=400,height=300) +barplot.freq(wr4y, year.start, year.end, title=paste("Frequency of", regime4.name, "Regime"));dev.off() + + +save.image(file=paste0(workdir,"/weather_regimes_",rean.name,"_",var.name,"_",year.start,"-",year.end,"_", my.period[period],".RData")) + +## # save as .pdf instead of .png: +## pdf(file=paste0(mapdir,"/",rean,"_",var.name,"_", my.period[period],"NAO-.pdf"),width=1000,height=700) +## layout(matrix(c(rep(1,9),2), 10, 1, byrow = TRUE)) +## PlotEquiMap(varPeriodAnom1mean,lon,lat,filled.continents = FALSE, brks=my.brks, cols=my.cols, toptitle="NAO-",drawleg=F) +## ColorBar(my.brks, cols=my.cols, vert=FALSE, subsampleg=1, cex=1.2) +## dev.off() + +## pdf(file=paste0(mapdir,"/",rean,"_",var.name,"_", my.period[period],"Blocking.pdf"), width=1000, height=700) +## layout(matrix(c(rep(1,9),2), 10, 1, byrow = TRUE)) +## PlotEquiMap(varPeriodAnom2mean,lon,lat,filled.continents = FALSE, brks=my.brks, cols=my.cols, toptitle="Blocking", drawleg=F) +## ColorBar(my.brks, cols=my.cols, vert=FALSE, subsampleg=1, cex=1.2) +## dev.off() + +## pdf(file=paste0(mapdir,"/",rean,"_",var.name,"_", my.period[period],"Atlantic_Ridge.pdf"),width=1000,height=700) +## layout(matrix(c(rep(1,9),2), 10, 1, byrow = TRUE)) +## PlotEquiMap(varPeriodAnom3mean,lon,lat,filled.continents = FALSE, brks=my.brks, cols=my.cols, toptitle="Atlantic Ridge", drawleg=F) +## ColorBar(my.brks, cols=my.cols, vert=FALSE, subsampleg=1, cex=1.2) +## dev.off() + +## pdf(file=paste0(mapdir,"/",rean,"_",var.name,"_", my.period[period],"NAO+.pdf"),width=1000,height=700) +## layout(matrix(c(rep(1,9),2), 10, 1, byrow = TRUE)) +## PlotEquiMap(varPeriodAnom4mean,lon,lat,filled.continents = FALSE, brks=my.brks, cols=my.cols, toptitle="NAO+", drawleg=F) +## ColorBar(my.brks, cols=my.cols, vert=FALSE, subsampleg=1, cex=1.2) +## dev.off() + +## rm(varPeriodeuFull,varPeriodeu,varPeriod,var) diff --git a/old/weather_regimes_v7.R b/old/weather_regimes_v7.R new file mode 100644 index 0000000000000000000000000000000000000000..671af1adb6cf78100caaa912f51cd9cad38b8805 --- /dev/null +++ b/old/weather_regimes_v7.R @@ -0,0 +1,317 @@ + +library(s2dverification) # for the function Load() +library(abind) +source('/home/Earth/ncortesi/Downloads/scripts/Rfunctions.R') # for the calendar functions + +# available reanalysis for the geopotential (g500) or the geopotential height (z500 = g500/9.81): +ERAint <- list(path = '/esnas/reconstructions/ecmwf/eraint/$STORE_FREQ$_mean/$VAR_NAME$_f6h/$VAR_NAME$_$YEAR$$MONTH$.nc') + +ERAint.name <- "ERA-Interim" +JRA55 <- list(path = '/esnas/reconstructions/jma/jra-55/$STORE_FREQ$_mean/$VAR_NAME$_f6h/$VAR_NAME$_$YEAR$$MONTH$.nc') +JRA55.name <- "JRA-55" + +workdir <- "/scratch/Earth/ncortesi/RESILIENCE/Regimes" # working dir with the input files with the WT classifications for each MSLP grid point +mapdir <- "/scratch/Earth/ncortesi/RESILIENCE/Regimes_maps" # output dir with seasonal and yearly maps + +rean <- ERAint # choose one of the two above reanalysis from where to load the input psl and var data +rean.name <- ERAint.name + +psl <- "g500" # pressure variable to use from the chosen reanalysis + +var.name <- "tas" #"sfcWind" #"tas" # name of the 'predictand' variable of the chosen reanalysis +var.name.full <- "Temperature" #"Wind Speed" #"Temperature" # full name of the 'predictand' variable to put in the title of the graphs +var.unit <- "ºC" #"m/s" # unit of measure (for drawing color scales) + +year.start <- 1979 +year.end <- 2013 + +variance.explained <- 0.8 # the user can set here the minimum % of variance explained by the PCs (typically 0.8 which is equal to 80%) + +# Coordinates of the box enclosing the study region (the Northern Atlantic in this case): +lat.max <- 70 +lat.min <- 30 +lon.max <- 40 +lon.min <- 280 # put a positive number here because the geopotential has only positive values of longitud! + +############################################################################################################################# + +# load only 1 day of geopot to detect the minimum and maximum lat and lon values to identify only the North Atlantic +domain <- Load(var = psl, exp = NULL, obs = list(rean), paste0(year.start,'0101'), storefreq = 'daily', leadtimemax = 1, output = 'lonlat', nprocs=1) +pos.lat.max <- which(abs(domain$lat-lat.max)==min(abs(domain$lat-lat.max))) +pos.lat.min <- which(abs(domain$lat-lat.min)==min(abs(domain$lat-lat.min))) +pos.lon.max <- which(abs(domain$lon-lon.max)==min(abs(domain$lon-lon.max))) +pos.lon.min <- which(abs(domain$lon-lon.min)==min(abs(domain$lon-lon.min))) +pos.lat <- if(pos.lat.min < pos.lat.max) {pos.lat.min:pos.lat.max} else { pos.lat.max:pos.lat.min} +pos.lon <- c(1:pos.lon.max,pos.lon.min:length(domain$lon)) # beware that it only consider the case where the study region cross the meridian of Greenwhich! +n.pos.lat <- length(pos.lat) +n.pos.lon <- length(pos.lon) +lat <- domain$lat[pos.lat] # lat of chosen area only (it is smaller than the whole spatial domain) +lon <- domain$lon[pos.lon] # lon of chosen area only +if(domain$lat[pos.lat.min] >= domain$lat[pos.lat.max]) stop("lat.min cannot be higher than lat.max") + +# Load psl data: +psleuFull <-array(NA,c(n.days.in.a.yearly.period(year.start,year.end), n.pos.lat, n.pos.lon)) + +for (y in year.start:year.end){ + var <- Load(var = psl, exp = NULL, obs = list(rean), paste0(y,'0101'), storefreq = 'daily', leadtimemax = n.days.in.a.year(y), output = 'lonlat', + latmin = domain$lat[pos.lat.min], latmax = domain$lat[pos.lat.max], lonmin = domain$lon[pos.lon.min], lonmax = domain$lon[pos.lon.max]) + psleuFull[seq.days.in.a.future.year(year.start, y),,] <- var$obs + rm(var) + gc() +} + +if(psl=="g500") psleuFull <- psleuFull/9.81 # convert geopotential to geopotential height + +# Load var data: +vareuFull <-array(NA,c(n.days.in.a.yearly.period(year.start,year.end),n.pos.lat,n.pos.lon)) + +for (y in year.start:year.end){ + var <- Load(var = var.name, exp = NULL, obs = list(rean), paste0(y,'0101'), storefreq = 'daily', leadtimemax = n.days.in.a.year(y), output = 'lonlat', + latmin = domain$lat[pos.lat.min], latmax = domain$lat[pos.lat.max], lonmin = domain$lon[pos.lon.min], lonmax = domain$lon[pos.lon.max]) + vareuFull[seq.days.in.a.future.year(year.start, y),,] <- var$obs + rm(var) + gc() +} + +save.image(file=paste0(workdir,"/weather_regimes_",rean.name,"_",var.name,"_",year.start,"-",year.end,".RData")) + +# each time you want to change only variable 'period', start from here loading the saved data: +load(file=paste0(workdir,"/weather_regimes_",rean.name,"_",var.name,"_",year.start,"-",year.end,".RData")) + +for(period in 13:16){ # 1-12: Jan-Dec; 13-16: Winter-Spring-Summer-Autumn; 17: Year + + days.period <- NA + for(y in year.start:year.end) days.period <- c(days.period, n.days.in.a.future.year(year.start, y) + pos.period(y,period)) + days.period <- days.period[-1] # remove the NA at the begin that was introduced only to be able to execure the above command + n.days.period <- length(days.period) + + pslPeriod <- psleuFull[days.period,,] # select only days in the chosen period (i.e: winter) + + pslmat <- pslPeriod + + dim(pslmat) <- c(n.days.period, n.pos.lat*n.pos.lon) # convert array in a matrix! + + my.seq <- seq(1, n.pos.lat*n.pos.lon, 9) # select only 1 point of 9 + + pslcut <- pslmat[,my.seq] + + my.PCA <- princomp(pslcut,cor=FALSE) + + tot.variance <- head(cumsum(my.PCA$sdev^2/sum(my.PCA$sdev^2)),50) # check how many PCAs to keep basing on the sum of their explained variance + + n.pcs <- head(as.numeric(which(tot.variance > variance.explained)),1) # select only the pcs that explains at least 80% of variance + + my.cluster <- kmeans(my.PCA$scores[,1:n.pcs], 4) # 4 is the number of clusters, 7 the number of EOFs which explains ~80% of variance + + rm(pslmat) + gc() + + varPeriod <- vareuFull[days.period,,] # select only var data during the chosen period + + varPeriodClim <- apply(varPeriod, c(2,3), mean, na.rm=T) + varPeriodClim2 <- InsertDim(varPeriodClim, 1, n.days.period) + + varPeriodAnom <- varPeriod - varPeriodClim2 + rm(varPeriod, varPeriodClim2) + gc() + + wr1 <- which(my.cluster$cluster==1) + wr2 <- which(my.cluster$cluster==2) + wr3 <- which(my.cluster$cluster==3) + wr4 <- which(my.cluster$cluster==4) + + period.length <- n.days.in.a.period(period,1999) # using year 1999 introduce a small error in winter season because of bisestile years + + wr1y <- wr2y <- wr3y <-wr4y <- c() + for(y in year.start:year.end){ + wr1y[y-year.start+1] <- length(which(my.cluster$cluster[(y-year.start)*period.length+(1:period.length)] == 1)) + wr2y[y-year.start+1] <- length(which(my.cluster$cluster[(y-year.start)*period.length+(1:period.length)] == 2)) + wr3y[y-year.start+1] <- length(which(my.cluster$cluster[(y-year.start)*period.length+(1:period.length)] == 3)) + wr4y[y-year.start+1] <- length(which(my.cluster$cluster[(y-year.start)*period.length+(1:period.length)] == 4)) + } + + # convert to frequencies in %: + wr1y <- wr1y/period.length + wr2y <- wr2y/period.length + wr3y <- wr3y/period.length + wr4y <- wr4y/period.length + + varPeriodAnom1 <- varPeriodAnom[wr1,,] + varPeriodAnom2 <- varPeriodAnom[wr2,,] + varPeriodAnom3 <- varPeriodAnom[wr3,,] + varPeriodAnom4 <- varPeriodAnom[wr4,,] + + varPeriodAnom1mean <- apply(varPeriodAnom1,c(2,3),mean,na.rm=T) + varPeriodAnom2mean <- apply(varPeriodAnom2,c(2,3),mean,na.rm=T) + varPeriodAnom3mean <- apply(varPeriodAnom3,c(2,3),mean,na.rm=T) + varPeriodAnom4mean <- apply(varPeriodAnom4,c(2,3),mean,na.rm=T) + + varPeriodAnomBoth1 <- abind(varPeriodAnom, varPeriodAnom1, along = 1) + varPeriodAnomBoth2 <- abind(varPeriodAnom, varPeriodAnom2, along = 1) + varPeriodAnomBoth3 <- abind(varPeriodAnom, varPeriodAnom3, along = 1) + varPeriodAnomBoth4 <- abind(varPeriodAnom, varPeriodAnom4, along = 1) + + pvalue1 <- apply(varPeriodAnomBoth1, c(2,3), function(x) t.test(x[1:n.days.period],x[(n.days.period+1):length(x)])$p.value) + pvalue2 <- apply(varPeriodAnomBoth2, c(2,3), function(x) t.test(x[1:n.days.period],x[(n.days.period+1):length(x)])$p.value) + pvalue3 <- apply(varPeriodAnomBoth3, c(2,3), function(x) t.test(x[1:n.days.period],x[(n.days.period+1):length(x)])$p.value) + pvalue4 <- apply(varPeriodAnomBoth4, c(2,3), function(x) t.test(x[1:n.days.period],x[(n.days.period+1):length(x)])$p.value) + rm(varPeriodAnom, varPeriodAnomBoth1, varPeriodAnomBoth2, varPeriodAnomBoth3, varPeriodAnomBoth4) + gc() + + pslPeriodClim <- apply(pslPeriod, c(2,3), mean, na.rm=T) + pslPeriodClim2 <- InsertDim(pslPeriodClim, 1, n.days.period) + + pslPeriodAnom <- pslPeriod - pslPeriodClim2 + rm(pslPeriod, pslPeriodClim, pslPeriodClim2) + gc() + + pslwr1 <- pslPeriodAnom[wr1,,] + pslwr2 <- pslPeriodAnom[wr2,,] + pslwr3 <- pslPeriodAnom[wr3,,] + pslwr4 <- pslPeriodAnom[wr4,,] + + pslwr1mean <- apply(pslwr1,c(2,3),mean,na.rm=T) + pslwr2mean <- apply(pslwr2,c(2,3),mean,na.rm=T) + pslwr3mean <- apply(pslwr3,c(2,3),mean,na.rm=T) + pslwr4mean <- apply(pslwr4,c(2,3),mean,na.rm=T) + + rm(pslwr1,pslwr2,pslwr3,pslwr4) + + # breaks and colors of the geopotential fields: + #my.brks <- c(48000, seq(48501,57100,1), 60000) # % Mean anomaly of a WR + #my.brks2 <- c(48000, seq(48500,58000,500)) # % Mean anomaly of a WR + my.brks <- c(-300,-199:200,300) # % Mean anomaly of a WR + my.brks2 <- c(-300,seq(-200,200,10),300) # % Mean anomaly of a WR + #my.cols <- colorRampPalette((brewer.pal(9,"PuBu")))(length(my.brks)-1) + my.cols <- colorRampPalette(rev(brewer.pal(11,"RdBu")))(length(my.brks)-1) # blue--white--red colors + + # breaks and colors of the impact maps: + my.brks.var <- c(-20,seq(-10,-3,1),seq(-2.9,2.9,0.1),seq(3,10,1),20) # % Mean anomaly of a WR + #my.brks.var <- c(-20,seq(-10,-3,1),seq(-2.8,2.8,0.2),seq(3,10,1),20) # % Mean anomaly of a WR + + #my.cols <- colorRampPalette(as.character(read.csv("/home/Earth/vtorralb/rgbhex.csv",header=F)[,1]))(length(my.brks)-1) # blue--yellow-red colors + my.cols.var <- colorRampPalette(rev(brewer.pal(11,"RdBu")))(length(my.brks.var)-1) # blue--white--red colors + + regime1.name <- "Cluster #1" + regime2.name <- "Cluster #2" + regime3.name <- "Cluster #3" + regime4.name <- "Cluster #4" + + # when you want to add the right regime name to the clusters: + period=13 + load(file=paste0(workdir,"/",rean.name,"_",var.name,"_",my.period[period],"_mapdata.RData")) + if(period==13){ + regime1.name="Atlantic Ridge" + regime2.name="Blocking" + regime3.name="NAO+" + regime4.name="NAO-" + } + if(period==14){ + regime1.name="Blocking" + regime2.name="NAO-" + regime3.name="Atlantic Ridge" + regime4.name="NAO+" + if(period==15){ + regime1.name="NAO+" + regime2.name="Blocking" + regime3.name="Atlantic Ridge" + regime4.name="NAO-" + } + if(period==16){ + regime1.name="NAO-" + regime2.name="Blocking" + regime3.name="NAO+" + regime4.name="Atlantic Ridge" + } + + # Visualize the composition with the 3 graphs for each single regime: its pressure fields, its impact on var and its frequency serie: + png(filename=paste0(mapdir,"/",rean.name,"_",var.name,"_",my.period[period],"_",regime1.name,".png"),width=1600,height=600) + layout(matrix(c(rep(1,9),2,rep(3,9),4,rep(5,10),6,rep(7,8),8), 10, 4, byrow = FALSE), widths=c(2,2,0.2,2)) + #layout.show(8) + EU <- c(1:54,130:161) # position of long values of Europe only (without the Atlantic Sea and America) + title1 <- paste(regime1.name, "geopotential height") + PlotEquiMap(pslwr1mean[,EU], lon[EU], lat, filled.continents = FALSE, brks=my.brks, cols=my.cols, toptitle=title1, contours=t(pslwr1mean[,EU]), brks2=my.brks2, drawleg=F) + ColorBar(my.brks, cols=my.cols, vert=FALSE, subsampleg=100, cex=1.2) + mtext(side=4," m", cex=1.5) + title1 <- paste(regime1.name, "impact on", var.name.full) + PlotEquiMap(varPeriodAnom1mean[,EU], lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, toptitle=title1, drawleg=F, dots=t(pvalue1[,EU] < 0.05)) + ColorBar2(my.brks.var, cols=my.cols.var, vert=FALSE, subsampleg=10, cex=1.2, my.ticks=c(0,10,20,30,40,50,60,70,80), my.labels=c(-20,-10,-6,-3,0,3,6,10,20)) + mtext(side=4,paste0(" ",var.unit), cex=1.5) + plot(0, type="n", axes=F, xlab="", ylab="") + plot(0, type="n", axes=F, xlab="", ylab="") # plot 2 empty graphs + title(paste0(regime1.name, " Frequency (", round(100*mean(wr1y),1), "%)"), cex.main=1.7, line=-1) + barplot.freq(100*wr1y, year.start, year.end, cex.y=1.1, cex.x=1.1, freq.max=80) + plot(0, type="n", axes=F, xlab="", ylab="") + title("Year", cex.main=1.3, line=-1) + dev.off() + + png(filename=paste0(mapdir,"/",rean.name,"_",var.name,"_",my.period[period],"_",regime2.name,".png"),width=1600,height=600) + layout(matrix(c(rep(1,9),2,rep(3,9),4,rep(5,10),6,rep(7,8),8), 10, 4, byrow = FALSE), widths=c(2,2,0.2,2)) + #layout.show(8) + EU <- c(1:54,130:161) # position of long values of Europe + title2 <- paste(regime2.name, "geopotential height") + PlotEquiMap(pslwr2mean[,EU], lon[EU], lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, toptitle=title2, contours=t(pslwr2mean[,EU]), brks2=my.brks2, drawleg=F) + ColorBar(my.brks, cols=my.cols, vert=FALSE, subsampleg=100, cex=1.2) + mtext(side=4," m", cex=1.5) + title2 <- paste(regime2.name, "impact on", var.name.full) + PlotEquiMap(varPeriodAnom2mean[,EU], lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, toptitle=title2, drawleg=F, dots=t(pvalue2[,EU] < 0.05)) + ColorBar2(my.brks.var, cols=my.cols.var, vert=FALSE, subsampleg=10, cex=1.2, my.ticks=c(0,10,20,30,40,50,60,70,80), my.labels=c(-20,-10,-6,-3,0,3,6,10,20)) + mtext(side=4,paste0(" ",var.unit), cex=1.5) + plot(0, type="n", axes=F, xlab="", ylab="") + plot(0, type="n", axes=F, xlab="", ylab="") + title(paste0(regime2.name, " Frequency (", round(100*mean(wr2y),1), "%)"), cex.main=1.7, line=-1) + barplot.freq(100*wr2y, year.start, year.end, cex.y=1.1, cex.x=1.1, freq.max=80) + plot(0, type="n", axes=F, xlab="", ylab="") + title("Year", cex.main=1.3, line=-1) + dev.off() + + png(filename=paste0(mapdir,"/",rean.name,"_",var.name,"_",my.period[period],"_",regime3.name,".png"),width=1600,height=600) + layout(matrix(c(rep(1,9),2,rep(3,9),4,rep(5,10),6,rep(7,8),8), 10, 4, byrow = FALSE), widths=c(2,2,0.2,2)) + #layout.show(7) + EU <- c(1:54,130:161) # position of long values of Europe + title3 <- paste(regime3.name, "geopotential height") + PlotEquiMap(pslwr3mean[,EU], lon[EU], lat, filled.continents = FALSE, brks=my.brks, cols=my.cols, toptitle=title3, contours=t(pslwr3mean[,EU]), brks2=my.brks2, drawleg=F) + ColorBar(my.brks, cols=my.cols, vert=FALSE, subsampleg=100, cex=1.2) + mtext(side=4," m", cex=1.5) + title3 <- paste(regime3.name, "impact on", var.name.full) + PlotEquiMap(varPeriodAnom3mean[,EU], lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, toptitle=title3, drawleg=F, dots=t(pvalue3[,EU] < 0.05)) + ColorBar2(my.brks.var, cols=my.cols.var, vert=FALSE, subsampleg=10, cex=1.2, my.ticks=c(0,10,20,30,40,50,60,70,80), my.labels=c(-20,-10,-6,-3,0,3,6,10,20)) + mtext(side=4,paste0(" ",var.unit), cex=1.5) + plot(0, type="n", axes=F, xlab="", ylab="") + plot(0, type="n", axes=F, xlab="", ylab="") + title(paste0(regime3.name, " Frequency (", round(100*mean(wr3y),1), "%)"), cex.main=1.7, line=-1) + barplot.freq(100*wr3y, year.start, year.end, cex.y=1.1, cex.x=1.1, freq.max=80) + plot(0, type="n", axes=F, xlab="", ylab="") + title("Year", cex.main=1.3, line=-1) + dev.off() + + png(filename=paste0(mapdir,"/",rean.name,"_",var.name,"_",my.period[period],"_",regime4.name,".png"),width=1600,height=600) + layout(matrix(c(rep(1,9),2,rep(3,9),4,rep(5,10),6,rep(7,8),8), 10, 4, byrow = FALSE), widths=c(2,2,0.2,2)) + #layout.show(8) + EU <- c(1:54,130:161) # position of long values of Europe + title4 <- paste(regime4.name, "geopotential height") + PlotEquiMap(pslwr4mean[,EU], lon[EU], lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, toptitle=title4, contours=t(pslwr4mean[,EU]), brks2=my.brks2, drawleg=F) + ColorBar(my.brks, cols=my.cols, vert=FALSE, subsampleg=100, cex=1.2) + mtext(side=4," m", cex=1.5) + title4 <- paste(regime4.name, "impact on", var.name.full) + PlotEquiMap(varPeriodAnom4mean[,EU], lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, toptitle=title4, drawleg=F, dots=t(pvalue4[,EU] < 0.05)) + ColorBar2(my.brks.var, cols=my.cols.var, vert=FALSE, subsampleg=10, cex=1.2, my.ticks=c(0,10,20,30,40,50,60,70,80), my.labels=c(-20,-10,-6,-3,0,3,6,10,20)) + mtext(side=4,paste0(" ",var.unit), cex=1.5) + plot(0, type="n", axes=F, xlab="", ylab="") + plot(0, type="n", axes=F, xlab="", ylab="") + title(paste0(regime4.name, " Frequency (", round(100*mean(wr4y),1), "%)"), cex.main=1.7, line=-1) + barplot.freq(100*wr4y, year.start, year.end, cex.y=1.1, cex.x=1.1, freq.max=80) + plot(0, type="n", axes=F, xlab="", ylab="") + title("Year", cex.main=1.3, line=-1) + dev.off() + + # save all the data necessary to redraw the graphs when we know the right regime: + save(workdir,rean.name,var.name,var.name.full,var.unit,year.start,year.end,period,my.period,lon,lat,my.brks,my.brks2,my.brks.var,my.cols,my.cols.var,pslwr1mean,pslwr2mean,pslwr3mean,pslwr4mean, varPeriodAnom1mean, varPeriodAnom2mean, varPeriodAnom3mean,varPeriodAnom4mean,wr1y,wr2y,wr3y,wr4y,pvalue1,pvalue2,pvalue3,pvalue4,regime1.name,regime2.name,regime3.name,regime4.name,file=paste0(workdir,"/",rean.name,"_",var.name,"_",my.period[period],"_mapdata.RData")) + + rm(pvalue1,pvalue2,pvalue3,pvalue4) + rm(pslwr1mean,pslwr2mean,pslwr3mean,pslwr4mean) + rm(varPeriodAnom1mean,varPeriodAnom2mean,varPeriodAnom3mean,varPeriodAnom4mean) + gc() + +} # close for loop on period + diff --git a/old/weather_regimes_v7.R~ b/old/weather_regimes_v7.R~ new file mode 100644 index 0000000000000000000000000000000000000000..b248bb35e138b4ff9b3db2ea1c0223e0cc84b256 --- /dev/null +++ b/old/weather_regimes_v7.R~ @@ -0,0 +1,292 @@ + +library(s2dverification) # for the function Load() +library(abind) +source('/home/Earth/ncortesi/Downloads/scripts/Rfunctions.R') # for the calendar functions + +# available reanalysis for the geopotential (g500) or the geopotential height (z500 = g500/9.81): +ERAint <- list(path = '/esnas/reconstructions/ecmwf/eraint/$STORE_FREQ$_mean/$VAR_NAME$_f6h/$VAR_NAME$_$YEAR$$MONTH$.nc') + +ERAint.name <- "ERA-Interim" +JRA55 <- list(path = '/esnas/reconstructions/jma/jra-55/$STORE_FREQ$_mean/$VAR_NAME$_f6h/$VAR_NAME$_$YEAR$$MONTH$.nc') +JRA55.name <- "JRA-55" + +workdir <- "/scratch/Earth/ncortesi/RESILIENCE/Regimes" # working dir with the input files with the WT classifications for each MSLP grid point +mapdir <- "/scratch/Earth/ncortesi/RESILIENCE/Regimes_maps" # output dir with seasonal and yearly maps + +rean <- ERAint # choose one of the two above reanalysis from where to load the input psl and var data +rean.name <- ERAint.name + +psl <- "g500" # pressure variable to use from the chosen reanalysis + +var.name <- "sfcWind" #"tas" # name of the 'predictand' variable of the chosen reanalysis +var.name.full <- "10-m Wind Speed" #"Temperature" # full name of the 'predictand' variable to put in the title of the graphs +var.unit <- "m/s" #"ºC" # unit of measure (for drawing color scales) + +year.start <- 1979 +year.end <- 2013 + +variance.explained <- 0.8 # the user can set here the minimum % of variance explained by the PCs (typically 0.8 which is equal to 80%) +period = 13 # (winter) + +# Coordinates of the box enclosing the study region (the Northern Atlantic in this case): +lat.max <- 70 +lat.min <- 30 +lon.max <- 40 +lon.min <- 280 # put a positive number here because the geopotential has only positive values of longitud! + +############################################################################################################################# + +# load only 1 day of geopot to detect the minimum and maximum lat and lon values to identify only the North Atlantic +domain <- Load(var = psl, exp = NULL, obs = list(rean), paste0(year.start,'0101'), storefreq = 'daily', leadtimemax = 1, output = 'lonlat', nprocs=1) +pos.lat.max <- which(abs(domain$lat-lat.max)==min(abs(domain$lat-lat.max))) +pos.lat.min <- which(abs(domain$lat-lat.min)==min(abs(domain$lat-lat.min))) +pos.lon.max <- which(abs(domain$lon-lon.max)==min(abs(domain$lon-lon.max))) +pos.lon.min <- which(abs(domain$lon-lon.min)==min(abs(domain$lon-lon.min))) +pos.lat <- if(pos.lat.min < pos.lat.max) {pos.lat.min:pos.lat.max} else { pos.lat.max:pos.lat.min} +pos.lon <- c(1:pos.lon.max,pos.lon.min:length(domain$lon)) # beware that it only consider the case where the study region cross the meridian of Greenwhich! +n.pos.lat <- length(pos.lat) +n.pos.lon <- length(pos.lon) +lat <- domain$lat[pos.lat] # lat of chosen area only (it is smaller than the whole spatial domain) +lon <- domain$lon[pos.lon] # lon of chosen area only +if(domain$lat[pos.lat.min] >= domain$lat[pos.lat.max]) stop("lat.min cannot be higher than lat.max") + +# Load psl data: +psleuFull <-array(NA,c(n.days.in.a.yearly.period(year.start,year.end), n.pos.lat, n.pos.lon)) + +for (y in year.start:year.end){ + var <- Load(var = psl, exp = NULL, obs = list(rean), paste0(y,'0101'), storefreq = 'daily', leadtimemax = n.days.in.a.year(y), output = 'lonlat', + latmin = domain$lat[pos.lat.min], latmax = domain$lat[pos.lat.max], lonmin = domain$lon[pos.lon.min], lonmax = domain$lon[pos.lon.max]) + psleuFull[seq.days.in.a.future.year(year.start, y),,] <- var$obs + rm(var) + gc() +} + +if(psl=="g500") psleuFull <- psleuFull/9.81 # convert geopotential to geopotential height + +# Load var data: +vareuFull <-array(NA,c(n.days.in.a.yearly.period(year.start,year.end),n.pos.lat,n.pos.lon)) + +for (y in year.start:year.end){ + var <- Load(var = var.name, exp = NULL, obs = list(rean), paste0(y,'0101'), storefreq = 'daily', leadtimemax = n.days.in.a.year(y), output = 'lonlat', + latmin = domain$lat[pos.lat.min], latmax = domain$lat[pos.lat.max], lonmin = domain$lon[pos.lon.min], lonmax = domain$lon[pos.lon.max]) + vareuFull[seq.days.in.a.future.year(year.start, y),,] <- var$obs + rm(var) + gc() +} + +save.image(file=paste0(workdir,"/weather_regimes_",rean.name,"_",var.name,"_",year.start,"-",year.end,".RData")) + +# each time you want to change variable 'period', start from here: +load(file=paste0(workdir,"/weather_regimes_",rean.name,"_",var.name,"_",year.start,"-",year.end,".RData")) + +for(period in 13:16){ # 1-12: Jan-Dec; 13-16: Winter-Spring-Summer-Autumn; 17: Year + +days.period <- NA +for(y in year.start:year.end) days.period <- c(days.period, n.days.in.a.future.year(year.start, y) + pos.period(y,period)) +days.period <- days.period[-1] # remove the NA at the begin that was introduced only to be able to execure the above command +n.days.period <- length(days.period) + +pslPeriod <- psleuFull[days.period,,] # select only days in the chosen period (i.e: winter) + +pslmat <- pslPeriod + +dim(pslmat) <- c(n.days.period, n.pos.lat*n.pos.lon) # convert array in a matrix! + +my.seq <- seq(1, n.pos.lat*n.pos.lon, 9) # select only 1 point of 9 + +pslcut <- pslmat[,my.seq] + +my.PCA <- princomp(pslcut,cor=FALSE) + +tot.variance <- head(cumsum(my.PCA$sdev^2/sum(my.PCA$sdev^2)),50) # check how many PCAs to keep basing on the sum of their explained variance + +n.pcs <- head(as.numeric(which(tot.variance > variance.explained)),1) # select only the pcs that explains at least 80% of variance + +my.cluster <- kmeans(my.PCA$scores[,1:n.pcs], 4) # 4 is the number of clusters, 7 the number of EOFs which explains ~80% of variance + +rm(pslmat) +gc() + +varPeriod <- vareuFull[days.period,,] # select only var data during the chosen period + +varPeriodClim <- apply(varPeriod, c(2,3), mean, na.rm=T) +varPeriodClim2 <- InsertDim(varPeriodClim, 1, n.days.period) + +varPeriodAnom <- varPeriod - varPeriodClim2 +rm(varPeriod, varPeriodClim2) +gc() + +wr1 <- which(my.cluster$cluster==1) +wr2 <- which(my.cluster$cluster==2) +wr3 <- which(my.cluster$cluster==3) +wr4 <- which(my.cluster$cluster==4) + +period.length <- n.days.in.a.period(period,1999) # using year 1999 introduce a small error in winter season because of bisestile years + +wr1y <- wr2y <- wr3y <-wr4y <- c() +for(y in year.start:year.end){ + wr1y[y-year.start+1] <- length(which(my.cluster$cluster[(y-year.start)*period.length+(1:period.length)] == 1)) + wr2y[y-year.start+1] <- length(which(my.cluster$cluster[(y-year.start)*period.length+(1:period.length)] == 2)) + wr3y[y-year.start+1] <- length(which(my.cluster$cluster[(y-year.start)*period.length+(1:period.length)] == 3)) + wr4y[y-year.start+1] <- length(which(my.cluster$cluster[(y-year.start)*period.length+(1:period.length)] == 4)) +} + +# convert to frequencies in %: +wr1y <- wr1y/period.length +wr2y <- wr2y/period.length +wr3y <- wr3y/period.length +wr4y <- wr4y/period.length + +varPeriodAnom1 <- varPeriodAnom[wr1,,] +varPeriodAnom2 <- varPeriodAnom[wr2,,] +varPeriodAnom3 <- varPeriodAnom[wr3,,] +varPeriodAnom4 <- varPeriodAnom[wr4,,] + +varPeriodAnom1mean <- apply(varPeriodAnom1,c(2,3),mean,na.rm=T) +varPeriodAnom2mean <- apply(varPeriodAnom2,c(2,3),mean,na.rm=T) +varPeriodAnom3mean <- apply(varPeriodAnom3,c(2,3),mean,na.rm=T) +varPeriodAnom4mean <- apply(varPeriodAnom4,c(2,3),mean,na.rm=T) + +varPeriodAnomBoth1 <- abind(varPeriodAnom, varPeriodAnom1, along = 1) +varPeriodAnomBoth2 <- abind(varPeriodAnom, varPeriodAnom2, along = 1) +varPeriodAnomBoth3 <- abind(varPeriodAnom, varPeriodAnom3, along = 1) +varPeriodAnomBoth4 <- abind(varPeriodAnom, varPeriodAnom4, along = 1) + +pvalue1 <- apply(varPeriodAnomBoth1, c(2,3), function(x) t.test(x[1:n.days.period],x[(n.days.period+1):length(x)])$p.value) +pvalue2 <- apply(varPeriodAnomBoth2, c(2,3), function(x) t.test(x[1:n.days.period],x[(n.days.period+1):length(x)])$p.value) +pvalue3 <- apply(varPeriodAnomBoth3, c(2,3), function(x) t.test(x[1:n.days.period],x[(n.days.period+1):length(x)])$p.value) +pvalue4 <- apply(varPeriodAnomBoth4, c(2,3), function(x) t.test(x[1:n.days.period],x[(n.days.period+1):length(x)])$p.value) +rm(varPeriodAnom, varPeriodAnomBoth1, varPeriodAnomBoth2, varPeriodAnomBoth3, varPeriodAnomBoth4) +gc() + +pslPeriodClim <- apply(pslPeriod, c(2,3), mean, na.rm=T) +pslPeriodClim2 <- InsertDim(pslPeriodClim, 1, n.days.period) + +pslPeriodAnom <- pslPeriod - pslPeriodClim2 +rm(pslPeriod, pslPeriodClim, pslPeriodClim2) +gc() + +pslwr1 <- pslPeriodAnom[wr1,,] +pslwr2 <- pslPeriodAnom[wr2,,] +pslwr3 <- pslPeriodAnom[wr3,,] +pslwr4 <- pslPeriodAnom[wr4,,] + +pslwr1mean <- apply(pslwr1,c(2,3),mean,na.rm=T) +pslwr2mean <- apply(pslwr2,c(2,3),mean,na.rm=T) +pslwr3mean <- apply(pslwr3,c(2,3),mean,na.rm=T) +pslwr4mean <- apply(pslwr4,c(2,3),mean,na.rm=T) + +rm(pslwr1,pslwr2,pslwr3,pslwr4) + +# breaks and colors of the geopotential fields: +#my.brks <- c(48000, seq(48501,57100,1), 60000) # % Mean anomaly of a WR +#my.brks2 <- c(48000, seq(48500,58000,500)) # % Mean anomaly of a WR +my.brks <- c(-300,-199:200,300) # % Mean anomaly of a WR +my.brks2 <- c(-300,seq(-200,200,10),300) # % Mean anomaly of a WR +#my.cols <- colorRampPalette((brewer.pal(9,"PuBu")))(length(my.brks)-1) +my.cols <- colorRampPalette(rev(brewer.pal(11,"RdBu")))(length(my.brks)-1) # blue--white--red colors + +# breaks and colors of the impact maps: +my.brks.var <- c(-20,seq(-10,-3,1),seq(-2.9,2.9,0.1),seq(3,10,1),20) # % Mean anomaly of a WR +#my.cols <- colorRampPalette(as.character(read.csv("/home/Earth/vtorralb/rgbhex.csv",header=F)[,1]))(length(my.brks)-1) # blue--yellow-red colors +my.cols.var <- colorRampPalette(rev(brewer.pal(11,"RdBu")))(length(my.brks.var)-1) # blue--white--red colors + +regime1.name <- "Cluster #1" +regime2.name <- "Cluster #2" +regime3.name <- "Cluster #3" +regime4.name <- "Cluster #4" + +#period=13 +load(file=paste0(mapdir,"/weather_regimes_",rean.name,"_",var.name,"_",my.period[period],"_mapdata.RData")) + +# Visualize the composition with the 3 graphs for each single regime: its pressure fields, its impact on var and its frequency serie: +png(filename=paste0(mapdir,"/",rean.name,"_",var.name,"_",my.period[period],"_",regime1.name,"_composite.png"),width=1600,height=600) +layout(matrix(c(rep(1,9),2,rep(3,9),4,rep(5,10),6,rep(7,8),8), 10, 4, byrow = FALSE), widths=c(2,2,0.2,2)) +#layout.show(8) +EU <- c(1:54,130:161) # position of long values of Europe only (without the Atlantic Sea and America) +title1 <- paste(regime1.name, "geopotential height") +PlotEquiMap(pslwr1mean[,EU], lon[EU], lat, filled.continents = FALSE, brks=my.brks, cols=my.cols, toptitle=title1, contours=t(pslwr1mean[,EU]), brks2=my.brks2, drawleg=F) +ColorBar(my.brks, cols=my.cols, vert=FALSE, subsampleg=100, cex=1.2) +mtext(side=4," m", cex=1.5) +title1 <- paste(regime1.name, "impact on", var.name.full) +PlotEquiMap(varPeriodAnom1mean[,EU], lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, toptitle=title1, drawleg=F, dots=t(pvalue1 < 0.05)) +ColorBar(my.brks.var, cols=my.cols.var, vert=FALSE, subsampleg=5, cex=1.2) +mtext(side=4,paste0(" ",var.unit), cex=1.5) +plot(0, type="n", axes=F, xlab="", ylab="") +plot(0, type="n", axes=F, xlab="", ylab="") # plot 2 empty graphs +title(paste0(regime1.name, " Frequency (", round(100*mean(wr1y),1), "%)"), cex.main=1.7, line=-1) +barplot.freq(100*wr1y, year.start, year.end, cex.y=1.1, cex.x=1.1, freq.max=80) +plot(0, type="n", axes=F, xlab="", ylab="") +title("Year", cex.main=1.5, line=-1) +dev.off() + +png(filename=paste0(mapdir,"/",rean.name,"_",var.name,"_",my.period[period],"_",regime2.name,"_composite.png"),width=1600,height=600) +layout(matrix(c(rep(1,9),2,rep(3,9),4,rep(5,10),6,rep(7,8),8), 10, 4, byrow = FALSE), widths=c(2,2,0.2,2)) +#layout.show(8) +EU <- c(1:54,130:161) # position of long values of Europe +title2 <- paste(regime2.name, "geopotential height") +PlotEquiMap(pslwr2mean[,EU], lon[EU], lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, toptitle=title2, contours=t(pslwr2mean[,EU]), brks2=my.brks2, drawleg=F) +ColorBar(my.brks, cols=my.cols, vert=FALSE, subsampleg=100, cex=1.2) +mtext(side=4," m", cex=1.5) +title2 <- paste(regime2.name, "impact on", var.name.full) +PlotEquiMap(varPeriodAnom2mean[,EU], lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, toptitle=title2, drawleg=F, dots=t(pvalue2 < 0.05)) +ColorBar(my.brks.var, cols=my.cols.var, vert=FALSE, subsampleg=5, cex=1.2) +mtext(side=4,paste0(" ",var.unit), cex=1.5) +plot(0, type="n", axes=F, xlab="", ylab="") +plot(0, type="n", axes=F, xlab="", ylab="") +title(paste0(regime2.name, " Frequency (", round(100*mean(wr2y),1), "%)"), cex.main=1.7, line=-1) +barplot.freq(100*wr2y, year.start, year.end, cex.y=1.1, cex.x=1.1, freq.max=80) +plot(0, type="n", axes=F, xlab="", ylab="") +title("Year", cex.main=1.5, line=-1) +dev.off() + +png(filename=paste0(mapdir,"/",rean.name,"_",var.name,"_",my.period[period],"_",regime3.name,"_composite.png"),width=1600,height=600) +layout(matrix(c(rep(1,9),2,rep(3,9),4,rep(5,10),6,rep(7,8),8), 10, 4, byrow = FALSE), widths=c(2,2,0.2,2)) +#layout.show(7) +EU <- c(1:54,130:161) # position of long values of Europe +title3 <- paste(regime3.name, "geopotential height") +PlotEquiMap(pslwr3mean[,EU], lon[EU], lat, filled.continents = FALSE, brks=my.brks, cols=my.cols, toptitle=title3, contours=t(pslwr3mean[,EU]), brks2=my.brks2, drawleg=F) +ColorBar(my.brks, cols=my.cols, vert=FALSE, subsampleg=100, cex=1.2) +mtext(side=4," m", cex=1.5) +title3 <- paste(regime3.name, "impact on", var.name.full) +PlotEquiMap(varPeriodAnom3mean[,EU], lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, toptitle=title3, drawleg=F, dots=t(pvalue3 < 0.05)) +ColorBar(my.brks.var, cols=my.cols.var, vert=FALSE, subsampleg=5, cex=1.2) +mtext(side=4,paste0(" ",var.unit), cex=1.5) +plot(0, type="n", axes=F, xlab="", ylab="") +plot(0, type="n", axes=F, xlab="", ylab="") +title(paste0(regime3.name, " Frequency (", round(100*mean(wr3y),1), "%)"), cex.main=1.7, line=-1) +barplot.freq(100*wr3y, year.start, year.end, cex.y=1.1, cex.x=1.1, freq.max=80) +plot(0, type="n", axes=F, xlab="", ylab="") +title("Year", cex.main=1.5, line=-1) +dev.off() + +png(filename=paste0(mapdir,"/",rean.name,"_",var.name,"_",my.period[period],"_",regime4.name,"_composite.png"),width=1600,height=600) +layout(matrix(c(rep(1,9),2,rep(3,9),4,rep(5,10),6,rep(7,8),8), 10, 4, byrow = FALSE), widths=c(2,2,0.2,2)) +#layout.show(8) +EU <- c(1:54,130:161) # position of long values of Europe +title4 <- paste(regime4.name, "geopotential height") +PlotEquiMap(pslwr4mean[,EU], lon[EU], lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, toptitle=title4, contours=t(pslwr4mean[,EU]), brks2=my.brks2, drawleg=F) +ColorBar(my.brks, cols=my.cols, vert=FALSE, subsampleg=100, cex=1.2) +mtext(side=4," m", cex=1.5) +title4 <- paste(regime4.name, "impact on", var.name.full) +PlotEquiMap(varPeriodAnom4mean[,EU], lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, toptitle=title4, drawleg=F, dots=t(pvalue4 < 0.05)) +ColorBar(my.brks.var, cols=my.cols.var, vert=FALSE, subsampleg=5, cex=1.2) +mtext(side=4,paste0(" ",var.unit), cex=1.5) +plot(0, type="n", axes=F, xlab="", ylab="") +plot(0, type="n", axes=F, xlab="", ylab="") +title(paste0(regime4.name, " Frequency (", round(100*mean(wr4y),1), "%)"), cex.main=1.7, line=-1) +barplot.freq(100*wr4y, year.start, year.end, cex.y=1.1, cex.x=1.1, freq.max=80) +plot(0, type="n", axes=F, xlab="", ylab="") +title("Year", cex.main=1.5, line=-1) +dev.off() + +# save all the data necessary to redraw the graphs when we know the right regime: +save(workdir,rean.name,var.name,var.name.full,var.unit,year.start,year.end,period,my.period,lon,lat,my.brks,my.brks2,my.cols,my.cols.var,pslwr1mean,pslwr2mean,pslwr3mean,pslwr4mean, varPeriodAnom1mean, varPeriodAnom2mean, varPeriodAnom3mean,varPeriodAnom4mean,wr1y,wr2y,wr3y,wr4y,file=paste0(mapdir,"/weather_regimes_",rean.name,"_",var.name,"_",my.period[period],"_mapdata.RData")) + +rm(pvalue1,pvalue2,pvalue3,pvalue4) +rm(pslwr1mean,pslwr2mean,pslwr3mean,pslwr4mean) +rm(varPeriodAnom1mean,varPeriodAnom2mean,varPeriodAnom3mean,varPeriodAnom4mean) +gc() + +} # close for loop on period + diff --git a/old/weather_regimes_v8.R b/old/weather_regimes_v8.R new file mode 100644 index 0000000000000000000000000000000000000000..ef875063bb3730bbfc4746a50438296c5f2cc482 --- /dev/null +++ b/old/weather_regimes_v8.R @@ -0,0 +1,403 @@ + +library(s2dverification) # for the function Load() +library(abind) +source('/home/Earth/ncortesi/Downloads/scripts/Rfunctions.R') # for the calendar functions + +# available reanalysis for the geopotential (g500) or the geopotential height (z500 = g500/9.81): +ERAint <- list(path = '/esnas/reconstructions/ecmwf/eraint/$STORE_FREQ$_mean/$VAR_NAME$_f6h/$VAR_NAME$_$YEAR$$MONTH$.nc') + +ERAint.name <- "ERA-Interim" +JRA55 <- list(path = '/esnas/reconstructions/jma/jra-55/$STORE_FREQ$_mean/$VAR_NAME$_f6h/$VAR_NAME$_$YEAR$$MONTH$.nc') +JRA55.name <- "JRA-55" + +workdir <- "/scratch/Earth/ncortesi/RESILIENCE/Regimes" # working dir with the input files with the WT classifications for each MSLP grid point +mapdir <- "/scratch/Earth/ncortesi/RESILIENCE/Regimes_maps" # output dir with seasonal and yearly maps + +rean <- ERAint # choose one of the two above reanalysis from where to load the input psl and var data +rean.name <- ERAint.name + +psl <- "g500" # pressure variable to use from the chosen reanalysis + +var.name <- "tas" #"sfcWind" #"tas" # name of the 'predictand' variable of the chosen reanalysis +var.name.full <- "Temperature" #"Wind Speed" #"Temperature" # full name of the 'predictand' variable to put in the title of the graphs +var.unit <- "ºC" #"m/s" # unit of measure (for drawing color scales) + +year.start <- 1979 +year.end <- 2013 + +variance.explained <- 0.8 # the user can set here the minimum % of variance explained by the PCs (typically 0.8 which is equal to 80%) + +# Coordinates of the box enclosing the study region (the Northern Atlantic in this case): +lat.max <- 70 +lat.min <- 30 +lon.max <- 40 +lon.min <- 280 # put a positive number here because the geopotential has only positive values of longitud! + +############################################################################################################################# + +# load only 1 day of geopot to detect the minimum and maximum lat and lon values to identify only the North Atlantic +domain <- Load(var = psl, exp = NULL, obs = list(rean), paste0(year.start,'0101'), storefreq = 'daily', leadtimemax = 1, output = 'lonlat', nprocs=1) +pos.lat.max <- which(abs(domain$lat-lat.max)==min(abs(domain$lat-lat.max))) +pos.lat.min <- which(abs(domain$lat-lat.min)==min(abs(domain$lat-lat.min))) +pos.lon.max <- which(abs(domain$lon-lon.max)==min(abs(domain$lon-lon.max))) +pos.lon.min <- which(abs(domain$lon-lon.min)==min(abs(domain$lon-lon.min))) +pos.lat <- if(pos.lat.min < pos.lat.max) {pos.lat.min:pos.lat.max} else { pos.lat.max:pos.lat.min} +pos.lon <- c(1:pos.lon.max,pos.lon.min:length(domain$lon)) # beware that it only consider the case where the study region cross the meridian of Greenwhich! +n.pos.lat <- length(pos.lat) +n.pos.lon <- length(pos.lon) +lat <- domain$lat[pos.lat] # lat of chosen area only (it is smaller than the whole spatial domain loaded by the data) +lon <- domain$lon[pos.lon] # lon of chosen area only +lat.min.area <- domain$lat[pos.lat.min] # min and max lat/lon of the chosen area only (same as lat.max, but its values correspond to actual values in the lat vector) +lat.max.area <- domain$lat[pos.lat.max] +lon.min.area <- domain$lon[pos.lon.min] +lon.max.area <- domain$lon[pos.lon.max] +if(lat.min >= lat.max) stop("lat.min cannot be greater than lat.max") + +# Load psl data: +psleuFull <-array(NA,c(n.days.in.a.yearly.period(year.start,year.end), n.pos.lat, n.pos.lon)) + +for (y in year.start:year.end){ + var <- Load(var = psl, exp = NULL, obs = list(rean), paste0(y,'0101'), storefreq = 'daily', leadtimemax = n.days.in.a.year(y), output = 'lonlat', + latmin = lat.min.area, latmax = lat.max.area, lonmin = lon.min.area, lonmax = lon.max.area) + psleuFull[seq.days.in.a.future.year(year.start, y),,] <- var$obs + rm(var) + gc() +} + +if(psl=="g500") psleuFull <- psleuFull/9.81 # convert geopotential to geopotential height + +# Load var data: +vareuFull <-array(NA,c(n.days.in.a.yearly.period(year.start,year.end),n.pos.lat,n.pos.lon)) + +for (y in year.start:year.end){ + var <- Load(var = var.name, exp = NULL, obs = list(rean), paste0(y,'0101'), storefreq = 'daily', leadtimemax = n.days.in.a.year(y), output = 'lonlat', + latmin = domain$lat[pos.lat.min], latmax = domain$lat[pos.lat.max], lonmin = domain$lon[pos.lon.min], lonmax = domain$lon[pos.lon.max]) + vareuFull[seq.days.in.a.future.year(year.start, y),,] <- var$obs + rm(var) + gc() +} + +# compute the PCs: +my.PCA <- list() +my.cluster <- list() +tot.variance <- list() +n.pcs <- my.cluster <- c() + +for(period in 13:16){ # 1-12: Jan-Dec; 13-16: Winter-Spring-Summer-Autumn; 17: Year + + days.period <- NA + for(y in year.start:year.end) days.period <- c(days.period, n.days.in.a.future.year(year.start, y) + pos.period(y,period)) + days.period <- days.period[-1] # remove the NA at the begin that was introduced only to be able to execure the above command + n.days.period <- length(days.period) + + pslPeriod <- psleuFull[days.period,,] # select only days in the chosen period (i.e: winter) + + # weight the pressure fields based on latitude: + lat.weighted <- cos(lat*pi/180)^0.5 + lat.weighted.array <- InsertDim(InsertDim(lat.weighted,2,n.pos.lon),1,n.days.period) + pslPeriod.weighted <- pslPeriod * lat.weighted.array + + pslmat <- pslPeriod.weighted + dim(pslmat) <- c(n.days.period, n.pos.lat*n.pos.lon) # convert array in a matrix! + rm(pslPeriod, pslPeriod.weighted) + gc() + + my.seq <- seq(1, n.pos.lat*n.pos.lon, 9) # select only 1 point of 9 + pslcut <- pslmat[,my.seq] + + my.PCA[[period]] <- princomp(pslcut,cor=FALSE) + tot.variance[[period]] <- head(cumsum(my.PCA[[period]]$sdev^2/sum(my.PCA[[period]]$sdev^2)),50) # check how many PCAs to keep basing on the sum of their expl. variance + n.pcs[period] <- head(as.numeric(which(tot.variance[[period]] > variance.explained)),1) # select only the pcs that explains at least 80% of variance + + my.cluster[[period]] <- kmeans(my.PCA[[period]]$scores[,1:n.pcs[period]], 4) # 4 is the number of clusters, 7 the number of EOFs which explains ~80% of variance + + rm(pslcut, pslmat) + gc() +} + +save.image(file=paste0(workdir,"/weather_regimes_",rean.name,"_",var.name,"_",year.start,"-",year.end,".RData")) + + + +# each time you want to change only variable 'period', start from here loading the saved data: +load(file=paste0(workdir,"/weather_regimes_",rean.name,"_",var.name,"_",year.start,"-",year.end,".RData")) + +for(period in 13:16){ # 1-12: Jan-Dec; 13-16: Winter-Spring-Summer-Autumn; 17: Year + days.period <- NA + for(y in year.start:year.end) days.period <- c(days.period, n.days.in.a.future.year(year.start, y) + pos.period(y,period)) + days.period <- days.period[-1] # remove the NA at the begin that was introduced only to be able to execure the above command + n.days.period <- length(days.period) + + varPeriod <- vareuFull[days.period,,] # select only var data during the chosen period + + varPeriodClim <- apply(varPeriod, c(2,3), mean, na.rm=T) + varPeriodClim2 <- InsertDim(varPeriodClim, 1, n.days.period) + + varPeriodAnom <- varPeriod - varPeriodClim2 + rm(varPeriod, varPeriodClim2) + gc() + + wr1 <- which(my.cluster[[period]]$cluster==1) + wr2 <- which(my.cluster[[period]]$cluster==2) + wr3 <- which(my.cluster[[period]]$cluster==3) + wr4 <- which(my.cluster[[period]]$cluster==4) + + period.length <- n.days.in.a.period(period,1999) # using year 1999 introduce a small error in winter season because of bisestile years + + wr1y <- wr2y <- wr3y <-wr4y <- c() + for(y in year.start:year.end){ + wr1y[y-year.start+1] <- length(which(my.cluster[[period]]$cluster[(y-year.start)*period.length+(1:period.length)] == 1)) + wr2y[y-year.start+1] <- length(which(my.cluster[[period]]$cluster[(y-year.start)*period.length+(1:period.length)] == 2)) + wr3y[y-year.start+1] <- length(which(my.cluster[[period]]$cluster[(y-year.start)*period.length+(1:period.length)] == 3)) + wr4y[y-year.start+1] <- length(which(my.cluster[[period]]$cluster[(y-year.start)*period.length+(1:period.length)] == 4)) + } + + # convert to frequencies in %: + wr1y <- wr1y/period.length + wr2y <- wr2y/period.length + wr3y <- wr3y/period.length + wr4y <- wr4y/period.length + + varPeriodAnom1 <- varPeriodAnom[wr1,,] + varPeriodAnom2 <- varPeriodAnom[wr2,,] + varPeriodAnom3 <- varPeriodAnom[wr3,,] + varPeriodAnom4 <- varPeriodAnom[wr4,,] + + varPeriodAnom1mean <- apply(varPeriodAnom1,c(2,3),mean,na.rm=T) + varPeriodAnom2mean <- apply(varPeriodAnom2,c(2,3),mean,na.rm=T) + varPeriodAnom3mean <- apply(varPeriodAnom3,c(2,3),mean,na.rm=T) + varPeriodAnom4mean <- apply(varPeriodAnom4,c(2,3),mean,na.rm=T) + + varPeriodAnomBoth1 <- abind(varPeriodAnom, varPeriodAnom1, along = 1) + varPeriodAnomBoth2 <- abind(varPeriodAnom, varPeriodAnom2, along = 1) + varPeriodAnomBoth3 <- abind(varPeriodAnom, varPeriodAnom3, along = 1) + varPeriodAnomBoth4 <- abind(varPeriodAnom, varPeriodAnom4, along = 1) + + pvalue1 <- apply(varPeriodAnomBoth1, c(2,3), function(x) t.test(x[1:n.days.period],x[(n.days.period+1):length(x)])$p.value) + pvalue2 <- apply(varPeriodAnomBoth2, c(2,3), function(x) t.test(x[1:n.days.period],x[(n.days.period+1):length(x)])$p.value) + pvalue3 <- apply(varPeriodAnomBoth3, c(2,3), function(x) t.test(x[1:n.days.period],x[(n.days.period+1):length(x)])$p.value) + pvalue4 <- apply(varPeriodAnomBoth4, c(2,3), function(x) t.test(x[1:n.days.period],x[(n.days.period+1):length(x)])$p.value) + rm(varPeriodAnom, varPeriodAnomBoth1, varPeriodAnomBoth2, varPeriodAnomBoth3, varPeriodAnomBoth4) + gc() + + pslPeriod <- psleuFull[days.period,,] + pslPeriodClim <- apply(pslPeriod, c(2,3), mean, na.rm=T) + pslPeriodClim2 <- InsertDim(pslPeriodClim, 1, n.days.period) + + pslPeriodAnom <- pslPeriod - pslPeriodClim2 + rm(pslPeriod, pslPeriodClim, pslPeriodClim2) + gc() + + pslwr1 <- pslPeriodAnom[wr1,,] + pslwr2 <- pslPeriodAnom[wr2,,] + pslwr3 <- pslPeriodAnom[wr3,,] + pslwr4 <- pslPeriodAnom[wr4,,] + + pslwr1mean <- apply(pslwr1,c(2,3),mean,na.rm=T) + pslwr2mean <- apply(pslwr2,c(2,3),mean,na.rm=T) + pslwr3mean <- apply(pslwr3,c(2,3),mean,na.rm=T) + pslwr4mean <- apply(pslwr4,c(2,3),mean,na.rm=T) + + rm(pslwr1,pslwr2,pslwr3,pslwr4) + + # breaks and colors of the geopotential fields: + #my.brks <- c(48000, seq(48501,57100,1), 60000) # % Mean anomaly of a WR + #my.brks2 <- c(48000, seq(48500,58000,500)) # % Mean anomaly of a WR + my.brks <- c(-300,-199:200,300) # % Mean anomaly of a WR + my.brks2 <- c(-300,seq(-200,200,10),300) # % Mean anomaly of a WR + #my.cols <- colorRampPalette((brewer.pal(9,"PuBu")))(length(my.brks)-1) + my.cols <- colorRampPalette(rev(brewer.pal(11,"RdBu")))(length(my.brks)-1) # blue--white--red colors + + # breaks and colors of the impact maps: + my.brks.var <- c(-20,seq(-10,-3,1),seq(-2.9,2.9,0.1),seq(3,10,1),20) # % Mean anomaly of a WR + #my.brks.var <- c(-20,seq(-10,-3,1),seq(-2.8,2.8,0.2),seq(3,10,1),20) # % Mean anomaly of a WR + + #my.cols <- colorRampPalette(as.character(read.csv("/home/Earth/vtorralb/rgbhex.csv",header=F)[,1]))(length(my.brks)-1) # blue--yellow-red colors + my.cols.var <- colorRampPalette(rev(brewer.pal(11,"RdBu")))(length(my.brks.var)-1) # blue--white--red colors + + #regime1.name <- "Cluster #1" + #regime2.name <- "Cluster #2" + #regime3.name <- "Cluster #3" + #regime4.name <- "Cluster #4" + + # save all the data necessary to redraw the graphs when we know the right regime: + save(workdir,rean.name,var.name,var.name.full,var.unit,year.start,year.end,period,my.period,lon,lat,my.brks,my.brks2,my.brks.var,my.cols,my.cols.var,pslwr1mean,pslwr2mean,pslwr3mean,pslwr4mean, varPeriodAnom1mean, varPeriodAnom2mean, varPeriodAnom3mean,varPeriodAnom4mean,wr1y,wr2y,wr3y,wr4y,pvalue1,pvalue2,pvalue3,pvalue4,file=paste0(workdir,"/",rean.name,"_",var.name,"_",my.period[period],"_mapdata.RData")) + + rm(pvalue1,pvalue2,pvalue3,pvalue4) + rm(pslwr1mean,pslwr2mean,pslwr3mean,pslwr4mean) + rm(varPeriodAnom1mean,varPeriodAnom2mean,varPeriodAnom3mean,varPeriodAnom4mean) + gc() + +} # close the for loop on 'period' + + +# when you want to add the right regime name to the clusters: +for(period in 13:16){ + + load(file=paste0(workdir,"/",rean.name,"_",var.name,"_",my.period[period],"_mapdata.RData")) + + if(var.name == "tas"){ + if(period==13){ + cluster1.name="Atlantic Ridge" + cluster2.name="Blocking" + cluster3.name="NAO+" + cluster4.name="NAO-" + } + if(period==14){ + cluster1.name="Blocking" + cluster2.name="NAO-" + cluster3.name="Atlantic Ridge" + cluster4.name="NAO+" + } + if(period==15){ + cluster1.name="NAO+" + cluster2.name="Blocking" + cluster3.name="Atlantic Ridge" + cluster4.name="NAO-" + } + if(period==16){ + cluster1.name="NAO-" + cluster2.name="Blocking" + cluster3.name="NAO+" + cluster4.name="Atlantic Ridge" + } + } + if(var.name == "sfcWind"){ + if(period==13){ + cluster1.name="Atlantic Ridge" + cluster2.name="Blocking" + cluster3.name="NAO+" + cluster4.name="NAO-" + } + if(period==14){ + cluster1.name="Atlantic Ridge" + cluster2.name="Blocking" + cluster3.name="NAO+" + cluster4.name="NAO-" + } + if(period==15){ + cluster1.name="NAO+" + cluster2.name="NAO-" + cluster3.name="Blocking" + cluster4.name="Atlantic Ridge" + } + if(period==16){ + cluster1.name="Blocking" + cluster2.name="Atlantic Ridge" + cluster3.name="NAO+" + cluster4.name="NAO-" + } + } + + + orden <- c("NAO+","NAO-","Blocking","Atlantic Ridge") + + # correspondence between regimes to plot and clusters: + regime1 <- which(orden[1] == c(cluster1.name,cluster2.name,cluster3.name,cluster4.name)) + regime2 <- which(orden[2] == c(cluster1.name,cluster2.name,cluster3.name,cluster4.name)) + regime3 <- which(orden[3] == c(cluster1.name,cluster2.name,cluster3.name,cluster4.name)) + regime4 <- which(orden[4] == c(cluster1.name,cluster2.name,cluster3.name,cluster4.name)) + + regime1.name <- c(cluster1.name,cluster2.name,cluster3.name,cluster4.name)[regime1] + regime2.name <- c(cluster1.name,cluster2.name,cluster3.name,cluster4.name)[regime2] + regime3.name <- c(cluster1.name,cluster2.name,cluster3.name,cluster4.name)[regime3] + regime4.name <- c(cluster1.name,cluster2.name,cluster3.name,cluster4.name)[regime4] + + cluster1 <- which(c(cluster1.name,cluster2.name,cluster3.name,cluster4.name)[1] == orden) + cluster2 <- which(c(cluster1.name,cluster2.name,cluster3.name,cluster4.name)[2] == orden) + cluster3 <- which(c(cluster1.name,cluster2.name,cluster3.name,cluster4.name)[3] == orden) + cluster4 <- which(c(cluster1.name,cluster2.name,cluster3.name,cluster4.name)[4] == orden) + + assign(paste0("map",cluster1), pslwr1mean) + assign(paste0("map",cluster2), pslwr2mean) + assign(paste0("map",cluster3), pslwr3mean) + assign(paste0("map",cluster4), pslwr4mean) + + assign(paste0("imp",cluster1), varPeriodAnom1mean) + assign(paste0("imp",cluster2), varPeriodAnom2mean) + assign(paste0("imp",cluster3), varPeriodAnom3mean) + assign(paste0("imp",cluster4), varPeriodAnom4mean) + + assign(paste0("sig",cluster1), pvalue1) + assign(paste0("sig",cluster2), pvalue2) + assign(paste0("sig",cluster3), pvalue3) + assign(paste0("sig",cluster4), pvalue4) + + assign(paste0("fre",cluster1), wr1y) + assign(paste0("fre",cluster2), wr2y) + assign(paste0("fre",cluster3), wr3y) + assign(paste0("fre",cluster4), wr4y) + + # Visualize the composition with the 3 graphs for all the 4 regimes: its pressure fields, its impact on var and its frequency series: + png(filename=paste0(mapdir,"/",rean.name,"_",var.name,"_",my.period[period],".png"),width=3000,height=3700) + + layout(matrix(c(1,1,1,1,2,4,6,7,rep(c(2,4,6,8),8),3,5,6,9, + 10,12,6,14,rep(c(10,12,6,15),8),11,13,6,16, + 17,19,6,21,rep(c(17,19,6,22),8),18,20,6,23, + 24,26,6,28,rep(c(24,26,6,29),8),25,27,6,30),41,4,byrow=TRUE),widths=c(2,2,0.2,2)) + #layout.show(30) + + plot(0,0, axes=F, xlab="", ylab="") + title(paste("Weather Regimes for",my.period[period],"season (1979-2013). Source:",rean.name), cex.main=9, line=-2) + EU <- c(1:54,130:161) # position of long values of Europe only (without the Atlantic Sea and America) + + title1 <- paste(regime1.name, "geopotential height") + PlotEquiMap(map1[,EU], lon[EU], lat, filled.continents = FALSE, brks=my.brks, cols=my.cols, toptitle=title1, sizetit=1.2, contours=t(map1[,EU]),brks2=my.brks2, drawleg=F) + ColorBar(my.brks, cols=my.cols, vert=FALSE, subsampleg=100, cex=1.5) + mtext(side=4," m", cex=2) + title1 <- paste(regime1.name, "impact on", var.name.full) + PlotEquiMap(imp1[,EU], lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, toptitle=title1, sizetit=1.2, drawleg=F, dots=t(sig1[,EU] < 0.05)) + ColorBar2(my.brks.var, cols=my.cols.var, vert=FALSE, subsampleg=10, cex=1.5, my.ticks=c(0,10,20,30,40,50,60,70,80), my.labels=c(-20,-10,-6,-3,0,3,6,10,20)) + mtext(side=4,paste0(" ",var.unit), cex=2) + plot(0, type="n", axes=F, xlab="", ylab="") + plot(0, type="n", axes=F, xlab="", ylab="") # plot 2 empty graphs + title(paste0(regime1.name, " Frequency (", round(100*mean(fre1),1), "%)"), cex.main=2, line=-1) + barplot.freq(100*fre1, year.start, year.end, cex.y=1.5, cex.x=1.5, freq.max=80) + plot(0, type="n", axes=F, xlab="", ylab="") + title("Year", cex.main=1.5, line=-1) + + title2 <- paste(regime2.name, "geopotential height") + PlotEquiMap(map2[,EU], lon[EU], lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, toptitle=title2, sizetit=1.2, contours=t(map2[,EU]), brks2=my.brks2, drawleg=F) + ColorBar(my.brks, cols=my.cols, vert=FALSE, subsampleg=100, cex=1.5) + mtext(side=4," m", cex=2) + title2 <- paste(regime2.name, "impact on", var.name.full) + PlotEquiMap(imp2[,EU], lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, toptitle=title2, sizetit=1.2, drawleg=F, dots=t(sig2[,EU] < 0.05)) + ColorBar2(my.brks.var, cols=my.cols.var, vert=FALSE, subsampleg=10, cex=1.5, my.ticks=c(0,10,20,30,40,50,60,70,80), my.labels=c(-20,-10,-6,-3,0,3,6,10,20)) + mtext(side=4,paste0(" ",var.unit), cex=2) + plot(0, type="n", axes=F, xlab="", ylab="") + title(paste0(regime2.name, " Frequency (", round(100*mean(fre2),1), "%)"), cex.main=2, line=-1) + barplot.freq(100*fre2, year.start, year.end, cex.y=1.5, cex.x=1.5, freq.max=80) + plot(0, type="n", axes=F, xlab="", ylab="") + title("Year", cex.main=1.5, line=-1) + + title3 <- paste(regime3.name, "geopotential height") + PlotEquiMap(map3[,EU], lon[EU], lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, toptitle=title3, sizetit=1.2, contours=t(map3[,EU]), brks2=my.brks2, drawleg=F) + ColorBar(my.brks, cols=my.cols, vert=FALSE, subsampleg=100, cex=1.5) + mtext(side=4," m", cex=2) + title3 <- paste(regime3.name, "impact on", var.name.full) + PlotEquiMap(imp3[,EU], lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, toptitle=title3, sizetit=1.2, drawleg=F, dots=t(sig3[,EU] < 0.05)) + ColorBar2(my.brks.var, cols=my.cols.var, vert=FALSE, subsampleg=10, cex=1.5, my.ticks=c(0,10,20,30,40,50,60,70,80), my.labels=c(-20,-10,-6,-3,0,3,6,10,20)) + mtext(side=4,paste0(" ",var.unit), cex=2) + plot(0, type="n", axes=F, xlab="", ylab="") + title(paste0(regime3.name, " Frequency (", round(100*mean(fre3),1), "%)"), cex.main=2, line=-1) + barplot.freq(100*fre3, year.start, year.end, cex.y=1.5, cex.x=1.5, freq.max=80) + plot(0, type="n", axes=F, xlab="", ylab="") + title("Year", cex.main=1.5, line=-1) + + title4 <- paste(regime4.name, "geopotential height") + PlotEquiMap(map4[,EU], lon[EU], lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, toptitle=title4, sizetit=1.2, contours=t(map4[,EU]), brks2=my.brks2, drawleg=F) + ColorBar(my.brks, cols=my.cols, vert=FALSE, subsampleg=100, cex=1.5) + mtext(side=4," m", cex=2) + title4 <- paste(regime4.name, "impact on", var.name.full) + PlotEquiMap(imp4[,EU], lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, toptitle=title4, sizetit=1.2, drawleg=F, dots=t(sig4[,EU] < 0.05)) + ColorBar2(my.brks.var, cols=my.cols.var, vert=FALSE, subsampleg=10, cex=1.5, my.ticks=c(0,10,20,30,40,50,60,70,80), my.labels=c(-20,-10,-6,-3,0,3,6,10,20)) + mtext(side=4,paste0(" ",var.unit), cex=2) + plot(0, type="n", axes=F, xlab="", ylab="") + title(paste0(regime4.name, " Frequency (", round(100*mean(fre4),1), "%)"), cex.main=2, line=-1) + barplot.freq(100*fre4, year.start, year.end, cex.y=1.5, cex.x=1.5, freq.max=80) + plot(0, type="n", axes=F, xlab="", ylab="") + title("Year", cex.main=1.5, line=-1) + dev.off() + +} # close for loop on period + diff --git a/old/weather_regimes_v8.R~ b/old/weather_regimes_v8.R~ new file mode 100644 index 0000000000000000000000000000000000000000..363d6ae54144d68215f4c56015ab389ad957c5d1 --- /dev/null +++ b/old/weather_regimes_v8.R~ @@ -0,0 +1,376 @@ + +library(s2dverification) # for the function Load() +library(abind) +source('/home/Earth/ncortesi/Downloads/scripts/Rfunctions.R') # for the calendar functions + +# available reanalysis for the geopotential (g500) or the geopotential height (z500 = g500/9.81): +ERAint <- list(path = '/esnas/reconstructions/ecmwf/eraint/$STORE_FREQ$_mean/$VAR_NAME$_f6h/$VAR_NAME$_$YEAR$$MONTH$.nc') + +ERAint.name <- "ERA-Interim" +JRA55 <- list(path = '/esnas/reconstructions/jma/jra-55/$STORE_FREQ$_mean/$VAR_NAME$_f6h/$VAR_NAME$_$YEAR$$MONTH$.nc') +JRA55.name <- "JRA-55" + +workdir <- "/scratch/Earth/ncortesi/RESILIENCE/Regimes" # working dir with the input files with the WT classifications for each MSLP grid point +mapdir <- "/scratch/Earth/ncortesi/RESILIENCE/Regimes_maps" # output dir with seasonal and yearly maps + +rean <- ERAint # choose one of the two above reanalysis from where to load the input psl and var data +rean.name <- ERAint.name + +psl <- "g500" # pressure variable to use from the chosen reanalysis + +var.name <- "tas" #"sfcWind" #"tas" # name of the 'predictand' variable of the chosen reanalysis +var.name.full <- "Temperature" #"Wind Speed" #"Temperature" # full name of the 'predictand' variable to put in the title of the graphs +var.unit <- "ºC" #"m/s" # unit of measure (for drawing color scales) + +year.start <- 1979 +year.end <- 2013 + +variance.explained <- 0.8 # the user can set here the minimum % of variance explained by the PCs (typically 0.8 which is equal to 80%) + +# Coordinates of the box enclosing the study region (the Northern Atlantic in this case): +lat.max <- 70 +lat.min <- 30 +lon.max <- 40 +lon.min <- 280 # put a positive number here because the geopotential has only positive values of longitud! + +############################################################################################################################# + +# load only 1 day of geopot to detect the minimum and maximum lat and lon values to identify only the North Atlantic +domain <- Load(var = psl, exp = NULL, obs = list(rean), paste0(year.start,'0101'), storefreq = 'daily', leadtimemax = 1, output = 'lonlat', nprocs=1) +pos.lat.max <- which(abs(domain$lat-lat.max)==min(abs(domain$lat-lat.max))) +pos.lat.min <- which(abs(domain$lat-lat.min)==min(abs(domain$lat-lat.min))) +pos.lon.max <- which(abs(domain$lon-lon.max)==min(abs(domain$lon-lon.max))) +pos.lon.min <- which(abs(domain$lon-lon.min)==min(abs(domain$lon-lon.min))) +pos.lat <- if(pos.lat.min < pos.lat.max) {pos.lat.min:pos.lat.max} else { pos.lat.max:pos.lat.min} +pos.lon <- c(1:pos.lon.max,pos.lon.min:length(domain$lon)) # beware that it only consider the case where the study region cross the meridian of Greenwhich! +n.pos.lat <- length(pos.lat) +n.pos.lon <- length(pos.lon) +lat <- domain$lat[pos.lat] # lat of chosen area only (it is smaller than the whole spatial domain) +lon <- domain$lon[pos.lon] # lon of chosen area only +if(domain$lat[pos.lat.min] >= domain$lat[pos.lat.max]) stop("lat.min cannot be higher than lat.max") + +# Load psl data: +psleuFull <-array(NA,c(n.days.in.a.yearly.period(year.start,year.end), n.pos.lat, n.pos.lon)) + +for (y in year.start:year.end){ + var <- Load(var = psl, exp = NULL, obs = list(rean), paste0(y,'0101'), storefreq = 'daily', leadtimemax = n.days.in.a.year(y), output = 'lonlat', + latmin = domain$lat[pos.lat.min], latmax = domain$lat[pos.lat.max], lonmin = domain$lon[pos.lon.min], lonmax = domain$lon[pos.lon.max]) + psleuFull[seq.days.in.a.future.year(year.start, y),,] <- var$obs + rm(var) + gc() +} + +if(psl=="g500") psleuFull <- psleuFull/9.81 # convert geopotential to geopotential height + +# Load var data: +vareuFull <-array(NA,c(n.days.in.a.yearly.period(year.start,year.end),n.pos.lat,n.pos.lon)) + +for (y in year.start:year.end){ + var <- Load(var = var.name, exp = NULL, obs = list(rean), paste0(y,'0101'), storefreq = 'daily', leadtimemax = n.days.in.a.year(y), output = 'lonlat', + latmin = domain$lat[pos.lat.min], latmax = domain$lat[pos.lat.max], lonmin = domain$lon[pos.lon.min], lonmax = domain$lon[pos.lon.max]) + vareuFull[seq.days.in.a.future.year(year.start, y),,] <- var$obs + rm(var) + gc() +} + +save.image(file=paste0(workdir,"/weather_regimes_",rean.name,"_",var.name,"_",year.start,"-",year.end,".RData")) + +# each time you want to change only variable 'period', start from here loading the saved data: +load(file=paste0(workdir,"/weather_regimes_",rean.name,"_",var.name,"_",year.start,"-",year.end,".RData")) + +for(period in 13:16){ # 1-12: Jan-Dec; 13-16: Winter-Spring-Summer-Autumn; 17: Year + + days.period <- NA + for(y in year.start:year.end) days.period <- c(days.period, n.days.in.a.future.year(year.start, y) + pos.period(y,period)) + days.period <- days.period[-1] # remove the NA at the begin that was introduced only to be able to execure the above command + n.days.period <- length(days.period) + + pslPeriod <- psleuFull[days.period,,] # select only days in the chosen period (i.e: winter) + + pslmat <- pslPeriod + + dim(pslmat) <- c(n.days.period, n.pos.lat*n.pos.lon) # convert array in a matrix! + + my.seq <- seq(1, n.pos.lat*n.pos.lon, 9) # select only 1 point of 9 + + pslcut <- pslmat[,my.seq] + + my.PCA <- princomp(pslcut,cor=FALSE) + + tot.variance <- head(cumsum(my.PCA$sdev^2/sum(my.PCA$sdev^2)),50) # check how many PCAs to keep basing on the sum of their explained variance + + n.pcs <- head(as.numeric(which(tot.variance > variance.explained)),1) # select only the pcs that explains at least 80% of variance + + my.cluster <- kmeans(my.PCA$scores[,1:n.pcs], 4) # 4 is the number of clusters, 7 the number of EOFs which explains ~80% of variance + + rm(pslmat) + gc() + + varPeriod <- vareuFull[days.period,,] # select only var data during the chosen period + + varPeriodClim <- apply(varPeriod, c(2,3), mean, na.rm=T) + varPeriodClim2 <- InsertDim(varPeriodClim, 1, n.days.period) + + varPeriodAnom <- varPeriod - varPeriodClim2 + rm(varPeriod, varPeriodClim2) + gc() + + wr1 <- which(my.cluster$cluster==1) + wr2 <- which(my.cluster$cluster==2) + wr3 <- which(my.cluster$cluster==3) + wr4 <- which(my.cluster$cluster==4) + + period.length <- n.days.in.a.period(period,1999) # using year 1999 introduce a small error in winter season because of bisestile years + + wr1y <- wr2y <- wr3y <-wr4y <- c() + for(y in year.start:year.end){ + wr1y[y-year.start+1] <- length(which(my.cluster$cluster[(y-year.start)*period.length+(1:period.length)] == 1)) + wr2y[y-year.start+1] <- length(which(my.cluster$cluster[(y-year.start)*period.length+(1:period.length)] == 2)) + wr3y[y-year.start+1] <- length(which(my.cluster$cluster[(y-year.start)*period.length+(1:period.length)] == 3)) + wr4y[y-year.start+1] <- length(which(my.cluster$cluster[(y-year.start)*period.length+(1:period.length)] == 4)) + } + + # convert to frequencies in %: + wr1y <- wr1y/period.length + wr2y <- wr2y/period.length + wr3y <- wr3y/period.length + wr4y <- wr4y/period.length + + varPeriodAnom1 <- varPeriodAnom[wr1,,] + varPeriodAnom2 <- varPeriodAnom[wr2,,] + varPeriodAnom3 <- varPeriodAnom[wr3,,] + varPeriodAnom4 <- varPeriodAnom[wr4,,] + + varPeriodAnom1mean <- apply(varPeriodAnom1,c(2,3),mean,na.rm=T) + varPeriodAnom2mean <- apply(varPeriodAnom2,c(2,3),mean,na.rm=T) + varPeriodAnom3mean <- apply(varPeriodAnom3,c(2,3),mean,na.rm=T) + varPeriodAnom4mean <- apply(varPeriodAnom4,c(2,3),mean,na.rm=T) + + varPeriodAnomBoth1 <- abind(varPeriodAnom, varPeriodAnom1, along = 1) + varPeriodAnomBoth2 <- abind(varPeriodAnom, varPeriodAnom2, along = 1) + varPeriodAnomBoth3 <- abind(varPeriodAnom, varPeriodAnom3, along = 1) + varPeriodAnomBoth4 <- abind(varPeriodAnom, varPeriodAnom4, along = 1) + + pvalue1 <- apply(varPeriodAnomBoth1, c(2,3), function(x) t.test(x[1:n.days.period],x[(n.days.period+1):length(x)])$p.value) + pvalue2 <- apply(varPeriodAnomBoth2, c(2,3), function(x) t.test(x[1:n.days.period],x[(n.days.period+1):length(x)])$p.value) + pvalue3 <- apply(varPeriodAnomBoth3, c(2,3), function(x) t.test(x[1:n.days.period],x[(n.days.period+1):length(x)])$p.value) + pvalue4 <- apply(varPeriodAnomBoth4, c(2,3), function(x) t.test(x[1:n.days.period],x[(n.days.period+1):length(x)])$p.value) + rm(varPeriodAnom, varPeriodAnomBoth1, varPeriodAnomBoth2, varPeriodAnomBoth3, varPeriodAnomBoth4) + gc() + + pslPeriodClim <- apply(pslPeriod, c(2,3), mean, na.rm=T) + pslPeriodClim2 <- InsertDim(pslPeriodClim, 1, n.days.period) + + pslPeriodAnom <- pslPeriod - pslPeriodClim2 + rm(pslPeriod, pslPeriodClim, pslPeriodClim2) + gc() + + pslwr1 <- pslPeriodAnom[wr1,,] + pslwr2 <- pslPeriodAnom[wr2,,] + pslwr3 <- pslPeriodAnom[wr3,,] + pslwr4 <- pslPeriodAnom[wr4,,] + + pslwr1mean <- apply(pslwr1,c(2,3),mean,na.rm=T) + pslwr2mean <- apply(pslwr2,c(2,3),mean,na.rm=T) + pslwr3mean <- apply(pslwr3,c(2,3),mean,na.rm=T) + pslwr4mean <- apply(pslwr4,c(2,3),mean,na.rm=T) + + rm(pslwr1,pslwr2,pslwr3,pslwr4) + + # breaks and colors of the geopotential fields: + #my.brks <- c(48000, seq(48501,57100,1), 60000) # % Mean anomaly of a WR + #my.brks2 <- c(48000, seq(48500,58000,500)) # % Mean anomaly of a WR + my.brks <- c(-300,-199:200,300) # % Mean anomaly of a WR + my.brks2 <- c(-300,seq(-200,200,10),300) # % Mean anomaly of a WR + #my.cols <- colorRampPalette((brewer.pal(9,"PuBu")))(length(my.brks)-1) + my.cols <- colorRampPalette(rev(brewer.pal(11,"RdBu")))(length(my.brks)-1) # blue--white--red colors + + # breaks and colors of the impact maps: + my.brks.var <- c(-20,seq(-10,-3,1),seq(-2.9,2.9,0.1),seq(3,10,1),20) # % Mean anomaly of a WR + #my.brks.var <- c(-20,seq(-10,-3,1),seq(-2.8,2.8,0.2),seq(3,10,1),20) # % Mean anomaly of a WR + + #my.cols <- colorRampPalette(as.character(read.csv("/home/Earth/vtorralb/rgbhex.csv",header=F)[,1]))(length(my.brks)-1) # blue--yellow-red colors + my.cols.var <- colorRampPalette(rev(brewer.pal(11,"RdBu")))(length(my.brks.var)-1) # blue--white--red colors + + #regime1.name <- "Cluster #1" + #regime2.name <- "Cluster #2" + #regime3.name <- "Cluster #3" + #regime4.name <- "Cluster #4" + + # when you want to add the right regime name to the clusters: + period <- 16 + load(file=paste0(workdir,"/",rean.name,"_",var.name,"_",my.period[period],"_mapdata.RData")) + + if(var.name == "tas"){ + if(period==13){ + cluster1.name="Atlantic Ridge" + cluster2.name="Blocking" + cluster3.name="NAO+" + cluster4.name="NAO-" + } + if(period==14){ + cluster1.name="Blocking" + cluster2.name="NAO-" + cluster3.name="Atlantic Ridge" + cluster4.name="NAO+" + } + if(period==15){ + cluster1.name="NAO+" + cluster2.name="Blocking" + cluster3.name="Atlantic Ridge" + cluster4.name="NAO-" + } + if(period==16){ + cluster1.name="NAO-" + cluster2.name="Blocking" + cluster3.name="NAO+" + cluster4.name="Atlantic Ridge" + } + } + if(var.name == "sfcWind"){ + if(period==13){ + cluster1.name="Atlantic Ridge" + cluster2.name="Blocking" + cluster3.name="NAO+" + cluster4.name="NAO-" + } + if(period==14){ + cluster1.name="Atlantic Ridge" + cluster2.name="Blocking" + cluster3.name="NAO+" + cluster4.name="NAO-" + } + if(period==15){ + cluster1.name="NAO+" + cluster2.name="NAO-" + cluster3.name="Blocking" + cluster4.name="Atlantic Ridge" + } + if(period==16){ + cluster1.name="Blocking" + cluster2.name="Atlantic Ridge" + cluster3.name="NAO+" + cluster4.name="NAO-" + } + } + + + orden <- c("NAO+","NAO-","Blocking","Atlantic Ridge") + + # correspondence between regimes to plot and clusters: + regime1 <- which(orden[1] == c(cluster1.name,cluster2.name,cluster3.name,cluster4.name)) + regime2 <- which(orden[2] == c(cluster1.name,cluster2.name,cluster3.name,cluster4.name)) + regime3 <- which(orden[3] == c(cluster1.name,cluster2.name,cluster3.name,cluster4.name)) + regime4 <- which(orden[4] == c(cluster1.name,cluster2.name,cluster3.name,cluster4.name)) + + regime1.name <- c(cluster1.name,cluster2.name,cluster3.name,cluster4.name)[regime1] + regime2.name <- c(cluster1.name,cluster2.name,cluster3.name,cluster4.name)[regime2] + regime3.name <- c(cluster1.name,cluster2.name,cluster3.name,cluster4.name)[regime3] + regime4.name <- c(cluster1.name,cluster2.name,cluster3.name,cluster4.name)[regime4] + + cluster1 <- which(c(cluster1.name,cluster2.name,cluster3.name,cluster4.name)[1] == orden) + cluster2 <- which(c(cluster1.name,cluster2.name,cluster3.name,cluster4.name)[2] == orden) + cluster3 <- which(c(cluster1.name,cluster2.name,cluster3.name,cluster4.name)[3] == orden) + cluster4 <- which(c(cluster1.name,cluster2.name,cluster3.name,cluster4.name)[4] == orden) + + assign(paste0("map",cluster1), pslwr1mean) + assign(paste0("map",cluster2), pslwr2mean) + assign(paste0("map",cluster3), pslwr3mean) + assign(paste0("map",cluster4), pslwr4mean) + + assign(paste0("imp",cluster1), varPeriodAnom1mean) + assign(paste0("imp",cluster2), varPeriodAnom2mean) + assign(paste0("imp",cluster3), varPeriodAnom3mean) + assign(paste0("imp",cluster4), varPeriodAnom4mean) + + assign(paste0("sig",cluster1), pvalue1) + assign(paste0("sig",cluster2), pvalue2) + assign(paste0("sig",cluster3), pvalue3) + assign(paste0("sig",cluster4), pvalue4) + + assign(paste0("fre",cluster1), wr1y) + assign(paste0("fre",cluster2), wr2y) + assign(paste0("fre",cluster3), wr3y) + assign(paste0("fre",cluster4), wr4y) + + # Visualize the composition with the 3 graphs for all the 4 regimes: its pressure fields, its impact on var and its frequency series: + png(filename=paste0(mapdir,"/",rean.name,"_",var.name,"_",my.period[period],".png"),width=3000,height=3700) + + layout(matrix(c(1,1,1,1,2,4,6,7,rep(c(2,4,6,8),8),3,5,6,9, + 10,12,6,14,rep(c(10,12,6,15),8),11,13,6,16, + 17,19,6,21,rep(c(17,19,6,22),8),18,20,6,23, + 24,26,6,28,rep(c(24,26,6,29),8),25,27,6,30),41,4,byrow=TRUE),widths=c(2,2,0.2,2)) + #layout.show(30) + + plot(0,0, axes=F, xlab="", ylab="") + title(paste("Weather Regimes for",my.period[period],"season (1979-2013). Source:",rean.name), cex.main=9, line=-2) + EU <- c(1:54,130:161) # position of long values of Europe only (without the Atlantic Sea and America) + + title1 <- paste(regime1.name, "geopotential height") + PlotEquiMap(map1[,EU], lon[EU], lat, filled.continents = FALSE, brks=my.brks, cols=my.cols, toptitle=title1, sizetit=1.2, contours=t(map1[,EU]),brks2=my.brks2, drawleg=F) + ColorBar(my.brks, cols=my.cols, vert=FALSE, subsampleg=100, cex=1.5) + mtext(side=4," m", cex=2) + title1 <- paste(regime1.name, "impact on", var.name.full) + PlotEquiMap(imp1[,EU], lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, toptitle=title1, sizetit=1.2, drawleg=F, dots=t(sig1[,EU] < 0.05)) + ColorBar2(my.brks.var, cols=my.cols.var, vert=FALSE, subsampleg=10, cex=1.5, my.ticks=c(0,10,20,30,40,50,60,70,80), my.labels=c(-20,-10,-6,-3,0,3,6,10,20)) + mtext(side=4,paste0(" ",var.unit), cex=2) + plot(0, type="n", axes=F, xlab="", ylab="") + plot(0, type="n", axes=F, xlab="", ylab="") # plot 2 empty graphs + title(paste0(regime1.name, " Frequency (", round(100*mean(fre1),1), "%)"), cex.main=2, line=-1) + barplot.freq(100*fre1, year.start, year.end, cex.y=1.5, cex.x=1.5, freq.max=80) + plot(0, type="n", axes=F, xlab="", ylab="") + title("Year", cex.main=1.5, line=-1) + + title2 <- paste(regime2.name, "geopotential height") + PlotEquiMap(map2[,EU], lon[EU], lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, toptitle=title2, sizetit=1.2, contours=t(map2[,EU]), brks2=my.brks2, drawleg=F) + ColorBar(my.brks, cols=my.cols, vert=FALSE, subsampleg=100, cex=1.5) + mtext(side=4," m", cex=2) + title2 <- paste(regime2.name, "impact on", var.name.full) + PlotEquiMap(imp2[,EU], lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, toptitle=title2, sizetit=1.2, drawleg=F, dots=t(sig2[,EU] < 0.05)) + ColorBar2(my.brks.var, cols=my.cols.var, vert=FALSE, subsampleg=10, cex=1.5, my.ticks=c(0,10,20,30,40,50,60,70,80), my.labels=c(-20,-10,-6,-3,0,3,6,10,20)) + mtext(side=4,paste0(" ",var.unit), cex=2) + plot(0, type="n", axes=F, xlab="", ylab="") + title(paste0(regime2.name, " Frequency (", round(100*mean(fre2),1), "%)"), cex.main=2, line=-1) + barplot.freq(100*fre2, year.start, year.end, cex.y=1.5, cex.x=1.5, freq.max=80) + plot(0, type="n", axes=F, xlab="", ylab="") + title("Year", cex.main=1.5, line=-1) + + title3 <- paste(regime3.name, "geopotential height") + PlotEquiMap(map3[,EU], lon[EU], lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, toptitle=title3, sizetit=1.2, contours=t(map3[,EU]), brks2=my.brks2, drawleg=F) + ColorBar(my.brks, cols=my.cols, vert=FALSE, subsampleg=100, cex=1.5) + mtext(side=4," m", cex=2) + title3 <- paste(regime3.name, "impact on", var.name.full) + PlotEquiMap(imp3[,EU], lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, toptitle=title3, sizetit=1.2, drawleg=F, dots=t(sig3[,EU] < 0.05)) + ColorBar2(my.brks.var, cols=my.cols.var, vert=FALSE, subsampleg=10, cex=1.5, my.ticks=c(0,10,20,30,40,50,60,70,80), my.labels=c(-20,-10,-6,-3,0,3,6,10,20)) + mtext(side=4,paste0(" ",var.unit), cex=2) + plot(0, type="n", axes=F, xlab="", ylab="") + title(paste0(regime3.name, " Frequency (", round(100*mean(fre3),1), "%)"), cex.main=2, line=-1) + barplot.freq(100*fre3, year.start, year.end, cex.y=1.5, cex.x=1.5, freq.max=80) + plot(0, type="n", axes=F, xlab="", ylab="") + title("Year", cex.main=1.5, line=-1) + + title4 <- paste(regime4.name, "geopotential height") + PlotEquiMap(map4[,EU], lon[EU], lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, toptitle=title4, sizetit=1.2, contours=t(map4[,EU]), brks2=my.brks2, drawleg=F) + ColorBar(my.brks, cols=my.cols, vert=FALSE, subsampleg=100, cex=1.5) + mtext(side=4," m", cex=2) + title4 <- paste(regime4.name, "impact on", var.name.full) + PlotEquiMap(imp4[,EU], lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, toptitle=title4, sizetit=1.2, drawleg=F, dots=t(sig4[,EU] < 0.05)) + ColorBar2(my.brks.var, cols=my.cols.var, vert=FALSE, subsampleg=10, cex=1.5, my.ticks=c(0,10,20,30,40,50,60,70,80), my.labels=c(-20,-10,-6,-3,0,3,6,10,20)) + mtext(side=4,paste0(" ",var.unit), cex=2) + plot(0, type="n", axes=F, xlab="", ylab="") + title(paste0(regime4.name, " Frequency (", round(100*mean(fre4),1), "%)"), cex.main=2, line=-1) + barplot.freq(100*fre4, year.start, year.end, cex.y=1.5, cex.x=1.5, freq.max=80) + plot(0, type="n", axes=F, xlab="", ylab="") + title("Year", cex.main=1.5, line=-1) + dev.off() + + # save all the data necessary to redraw the graphs when we know the right regime: + save(workdir,rean.name,var.name,var.name.full,var.unit,year.start,year.end,period,my.period,lon,lat,my.brks,my.brks2,my.brks.var,my.cols,my.cols.var,pslwr1mean,pslwr2mean,pslwr3mean,pslwr4mean, varPeriodAnom1mean, varPeriodAnom2mean, varPeriodAnom3mean,varPeriodAnom4mean,wr1y,wr2y,wr3y,wr4y,pvalue1,pvalue2,pvalue3,pvalue4,regime1.name,regime2.name,regime3.name,regime4.name,file=paste0(workdir,"/",rean.name,"_",var.name,"_",my.period[period],"_mapdata.RData")) + + rm(pvalue1,pvalue2,pvalue3,pvalue4) + rm(pslwr1mean,pslwr2mean,pslwr3mean,pslwr4mean) + rm(varPeriodAnom1mean,varPeriodAnom2mean,varPeriodAnom3mean,varPeriodAnom4mean) + gc() + +} # close for loop on period + diff --git a/old/weather_regimes_v9.R b/old/weather_regimes_v9.R new file mode 100644 index 0000000000000000000000000000000000000000..a82456ede0ea9be90a22fc70a616a0f1ba8325be --- /dev/null +++ b/old/weather_regimes_v9.R @@ -0,0 +1,425 @@ + +library(s2dverification) # for the function Load() +library(abind) +source('/home/Earth/ncortesi/Downloads/scripts/Rfunctions.R') # for the calendar functions + +# available reanalysis for the geopotential (g500) or the geopotential height (z500 = g500/9.81): +ERAint <- list(path = '/esnas/reconstructions/ecmwf/eraint/$STORE_FREQ$_mean/$VAR_NAME$_f6h/$VAR_NAME$_$YEAR$$MONTH$.nc') + +ERAint.name <- "ERA-Interim" +JRA55 <- list(path = '/esnas/reconstructions/jma/jra-55/$STORE_FREQ$_mean/$VAR_NAME$_f6h/$VAR_NAME$_$YEAR$$MONTH$.nc') +JRA55.name <- "JRA-55" + +workdir <- "/scratch/Earth/ncortesi/RESILIENCE/Regimes" # working dir with the input files with the WT classifications for each MSLP grid point +mapdir <- "/scratch/Earth/ncortesi/RESILIENCE/Regimes_maps" # output dir with seasonal and yearly maps + +rean <- ERAint # choose one of the two above reanalysis from where to load the input psl and var data +rean.name <- ERAint.name + +psl <- "g500" # pressure variable to use from the chosen reanalysis + +var.name <- "tas" #"sfcWind" #"tas" # name of the 'predictand' variable of the chosen reanalysis +var.name.full <- "Temperature" #"Wind Speed" #"Temperature" # full name of the 'predictand' variable to put in the title of the graphs +var.unit <- "ºC" #"m/s" # unit of measure (for drawing color scales) + +year.start <- 1979 +year.end <- 2013 + +variance.explained <- 0.8 # the user can set here the minimum % of variance explained by the PCs (typically 0.8 which is equal to 80%) + +# Coordinates of the box enclosing the study region (the Northern Atlantic in this case): +lat.max <- 70 +lat.min <- 30 +lon.max <- 40 +lon.min <- 280 # put a positive number here because the geopotential has only positive values of longitud! + +############################################################################################################################# + +# load only 1 day of geopot to detect the minimum and maximum lat and lon values to identify only the North Atlantic +domain <- Load(var = psl, exp = NULL, obs = list(rean), paste0(year.start,'0101'), storefreq = 'daily', leadtimemax = 1, output = 'lonlat', nprocs=1) +pos.lat.max <- which(abs(domain$lat-lat.max)==min(abs(domain$lat-lat.max))) +pos.lat.min <- which(abs(domain$lat-lat.min)==min(abs(domain$lat-lat.min))) +pos.lon.max <- which(abs(domain$lon-lon.max)==min(abs(domain$lon-lon.max))) +pos.lon.min <- which(abs(domain$lon-lon.min)==min(abs(domain$lon-lon.min))) +pos.lat <- if(pos.lat.min < pos.lat.max) {pos.lat.min:pos.lat.max} else { pos.lat.max:pos.lat.min} +pos.lon <- c(1:pos.lon.max,pos.lon.min:length(domain$lon)) # beware that it only consider the case where the study region cross the meridian of Greenwhich! +n.pos.lat <- length(pos.lat) +n.pos.lon <- length(pos.lon) +lat <- domain$lat[pos.lat] # lat of chosen area only (it is smaller than the whole spatial domain loaded by the data) +lon <- domain$lon[pos.lon] # lon of chosen area only +lat.min.area <- domain$lat[pos.lat.min] # min and max lat/lon of the chosen area only (same as lat.max, but its values correspond to actual values in the lat vector) +lat.max.area <- domain$lat[pos.lat.max] +lon.min.area <- domain$lon[pos.lon.min] +lon.max.area <- domain$lon[pos.lon.max] +if(lat.min >= lat.max) stop("lat.min cannot be greater than lat.max") + +# Load psl data: +psleuFull <-array(NA,c(n.days.in.a.yearly.period(year.start,year.end), n.pos.lat, n.pos.lon)) + +for (y in year.start:year.end){ + var <- Load(var = psl, exp = NULL, obs = list(rean), paste0(y,'0101'), storefreq = 'daily', leadtimemax = n.days.in.a.year(y), output = 'lonlat', + latmin = lat.min.area, latmax = lat.max.area, lonmin = lon.min.area, lonmax = lon.max.area) + psleuFull[seq.days.in.a.future.year(year.start, y),,] <- var$obs + rm(var) + gc() +} + +if(psl=="g500") psleuFull <- psleuFull/9.81 # convert geopotential to geopotential height + +# compute the PCs: +my.PCA <- list() +my.cluster <- list() +tot.variance <- list() +n.pcs <- my.cluster <- c() + +for(period in 13:16){ # 1-12: Jan-Dec; 13-16: Winter-Spring-Summer-Autumn; 17: Year + + days.period <- NA + for(y in year.start:year.end) days.period <- c(days.period, n.days.in.a.future.year(year.start, y) + pos.period(y,period)) + days.period <- days.period[-1] # remove the NA at the begin that was introduced only to be able to execure the above command + n.days.period <- length(days.period) + + pslPeriod <- psleuFull[days.period,,] # select only days in the chosen period (i.e: winter) + + # weight the pressure fields based on latitude: + lat.weighted <- cos(lat*pi/180)^0.5 + lat.weighted.array <- InsertDim(InsertDim(lat.weighted,2,n.pos.lon),1,n.days.period) + pslPeriod.weighted <- pslPeriod * lat.weighted.array + rm(lat.weighted.array) + gc() + + pslmat <- pslPeriod.weighted + dim(pslmat) <- c(n.days.period, n.pos.lat*n.pos.lon) # convert array in a matrix! + rm(pslPeriod, pslPeriod.weighted) + gc() + + my.seq <- seq(1, n.pos.lat*n.pos.lon, 9) # select only 1 point of 9 + pslcut <- pslmat[,my.seq] + + my.PCA[[period]] <- princomp(pslcut,cor=FALSE) + tot.variance[[period]] <- head(cumsum(my.PCA[[period]]$sdev^2/sum(my.PCA[[period]]$sdev^2)),50) # check how many PCAs to keep basing on the sum of their expl. variance + n.pcs[period] <- head(as.numeric(which(tot.variance[[period]] > variance.explained)),1) # select only the pcs that explains at least 80% of variance + + my.cluster[[period]] <- kmeans(my.PCA[[period]]$scores[,1:n.pcs[period]], 4) # 4 is the number of clusters, 7 the number of EOFs which explains ~80% of variance + + rm(pslcut, pslmat) + gc() +} + +# Load var data: +vareuFull <-array(NA,c(n.days.in.a.yearly.period(year.start,year.end),n.pos.lat,n.pos.lon)) + +for (y in year.start:year.end){ + var <- Load(var = var.name, exp = NULL, obs = list(rean), paste0(y,'0101'), storefreq = 'daily', leadtimemax = n.days.in.a.year(y), output = 'lonlat', + latmin = domain$lat[pos.lat.min], latmax = domain$lat[pos.lat.max], lonmin = domain$lon[pos.lon.min], lonmax = domain$lon[pos.lon.max]) + vareuFull[seq.days.in.a.future.year(year.start, y),,] <- var$obs + rm(var) + gc() +} + +save.image(file=paste0(workdir,"/weather_regimes_",rean.name,"_",var.name,"_",year.start,"-",year.end,".RData")) +load(file=paste0(workdir,"/weather_regimes_",rean.name,"_",var.name,"_",year.start,"-",year.end,".RData")) + +for(period in 13:16){ # 1-12: Jan-Dec; 13-16: Winter-Spring-Summer-Autumn; 17: Year + days.period <- NA + for(y in year.start:year.end) days.period <- c(days.period, n.days.in.a.future.year(year.start, y) + pos.period(y,period)) + days.period <- days.period[-1] # remove the NA at the begin that was introduced only to be able to execure the above command + n.days.period <- length(days.period) + + varPeriod <- vareuFull[days.period,,] # select only var data during the chosen period + + varPeriodClim <- apply(varPeriod, c(2,3), mean, na.rm=T) + varPeriodClim2 <- InsertDim(varPeriodClim, 1, n.days.period) + + varPeriodAnom <- varPeriod - varPeriodClim2 + rm(varPeriod, varPeriodClim2) + gc() + + # for each of the 4 clusters, remove the days not belonging to sequences of at least 5 days: + # warning: first 4 days and last 4 days are not take into account y the algorithm and are treated as not belonging to any sequence (sequ=FALSE) + wrdiff <- diff(my.cluster[[period]]$cluster) + + sequ <- rep(FALSE, n.days.period) + for(i in 5:(n.days.period-5)){ + sequ[i] <- TRUE + if(wrdiff[i] != 0 & wrdiff[i+1] != 0) sequ[i] = FALSE + if(wrdiff[i] != 0 & wrdiff[i+2] != 0) sequ[i] = FALSE + if(wrdiff[i] != 0 & wrdiff[i+3] != 0) sequ[i] = FALSE + if(wrdiff[i] != 0 & wrdiff[i+4] != 0) sequ[i] = FALSE + if(wrdiff[i-1] != 0 & wrdiff[i] != 0) sequ[i] = FALSE + if(wrdiff[i-1] != 0 & wrdiff[i+1] != 0) sequ[i] = FALSE + if(wrdiff[i-1] != 0 & wrdiff[i+2] != 0) sequ[i] = FALSE + if(wrdiff[i-1] != 0 & wrdiff[i+3] != 0) sequ[i] = FALSE + if(wrdiff[i-2] != 0 & wrdiff[i] != 0) sequ[i] = FALSE + if(wrdiff[i-2] != 0 & wrdiff[i+1] != 0) sequ[i] = FALSE + if(wrdiff[i-2] != 0 & wrdiff[i+2] != 0) sequ[i] = FALSE + if(wrdiff[i-3] != 0 & wrdiff[i] != 0) sequ[i] = FALSE + if(wrdiff[i-3] != 0 & wrdiff[i+1] != 0) sequ[i] = FALSE + if(wrdiff[i-4] != 0 & wrdiff[i] != 0) sequ[i] = FALSE + } # close for loop on i + + # sequence of daily cluster numbers without the days that doesn't belong to sequences of 5 or more days: + cluster.sequence <- my.cluster[[period]]$cluster * sequ + + rm(wrdiff, sequ) + gc() + + #wr1 <- which(my.cluster[[period]]$cluster==1) + #wr2 <- which(my.cluster[[period]]$cluster==2) + #wr3 <- which(my.cluster[[period]]$cluster==3) + #wr4 <- which(my.cluster[[period]]$cluster==4) + + # Replace the days belonging to a regime with the days belonging to a sequence of at least 5 days of that regime: + wr1 <- which(cluster.sequence == 1) + wr2 <- which(cluster.sequence == 2) + wr3 <- which(cluster.sequence == 3) + wr4 <- which(cluster.sequence == 4) + + period.length <- n.days.in.a.period(period,1999) # using year 1999 introduce a small error in winter season because of bisestile years + + # yearly frequency of each regime: + wr1y <- wr2y <- wr3y <-wr4y <- c() + for(y in year.start:year.end){ + #wr1y[y-year.start+1] <- length(which(my.cluster[[period]]$cluster[(y-year.start)*period.length+(1:period.length)] == 1)) + #wr2y[y-year.start+1] <- length(which(my.cluster[[period]]$cluster[(y-year.start)*period.length+(1:period.length)] == 2)) + #wr3y[y-year.start+1] <- length(which(my.cluster[[period]]$cluster[(y-year.start)*period.length+(1:period.length)] == 3)) + #wr4y[y-year.start+1] <- length(which(my.cluster[[period]]$cluster[(y-year.start)*period.length+(1:period.length)] == 4)) + wr1y[y-year.start+1] <- length(which(cluster.sequence[(y-year.start)*period.length+(1:period.length)] == 1)) + wr2y[y-year.start+1] <- length(which(cluster.sequence[(y-year.start)*period.length+(1:period.length)] == 2)) + wr3y[y-year.start+1] <- length(which(cluster.sequence[(y-year.start)*period.length+(1:period.length)] == 3)) + wr4y[y-year.start+1] <- length(which(cluster.sequence[(y-year.start)*period.length+(1:period.length)] == 4)) + } + + # convert to frequencies in %: + wr1y <- wr1y/period.length + wr2y <- wr2y/period.length + wr3y <- wr3y/period.length + wr4y <- wr4y/period.length + + varPeriodAnom1 <- varPeriodAnom[wr1,,] + varPeriodAnom2 <- varPeriodAnom[wr2,,] + varPeriodAnom3 <- varPeriodAnom[wr3,,] + varPeriodAnom4 <- varPeriodAnom[wr4,,] + + varPeriodAnom1mean <- apply(varPeriodAnom1,c(2,3),mean,na.rm=T) + varPeriodAnom2mean <- apply(varPeriodAnom2,c(2,3),mean,na.rm=T) + varPeriodAnom3mean <- apply(varPeriodAnom3,c(2,3),mean,na.rm=T) + varPeriodAnom4mean <- apply(varPeriodAnom4,c(2,3),mean,na.rm=T) + + varPeriodAnomBoth1 <- abind(varPeriodAnom, varPeriodAnom1, along = 1) + varPeriodAnomBoth2 <- abind(varPeriodAnom, varPeriodAnom2, along = 1) + varPeriodAnomBoth3 <- abind(varPeriodAnom, varPeriodAnom3, along = 1) + varPeriodAnomBoth4 <- abind(varPeriodAnom, varPeriodAnom4, along = 1) + + pvalue1 <- apply(varPeriodAnomBoth1, c(2,3), function(x) t.test(x[1:n.days.period],x[(n.days.period+1):length(x)])$p.value) + pvalue2 <- apply(varPeriodAnomBoth2, c(2,3), function(x) t.test(x[1:n.days.period],x[(n.days.period+1):length(x)])$p.value) + pvalue3 <- apply(varPeriodAnomBoth3, c(2,3), function(x) t.test(x[1:n.days.period],x[(n.days.period+1):length(x)])$p.value) + pvalue4 <- apply(varPeriodAnomBoth4, c(2,3), function(x) t.test(x[1:n.days.period],x[(n.days.period+1):length(x)])$p.value) + rm(varPeriodAnom, varPeriodAnomBoth1, varPeriodAnomBoth2, varPeriodAnomBoth3, varPeriodAnomBoth4) + gc() + + pslPeriod <- psleuFull[days.period,,] + pslPeriodClim <- apply(pslPeriod, c(2,3), mean, na.rm=T) + pslPeriodClim2 <- InsertDim(pslPeriodClim, 1, n.days.period) + + pslPeriodAnom <- pslPeriod - pslPeriodClim2 + rm(pslPeriod, pslPeriodClim, pslPeriodClim2) + gc() + + pslwr1 <- pslPeriodAnom[wr1,,] + pslwr2 <- pslPeriodAnom[wr2,,] + pslwr3 <- pslPeriodAnom[wr3,,] + pslwr4 <- pslPeriodAnom[wr4,,] + rm(pslPeriodAnom) + gc() + + pslwr1mean <- apply(pslwr1,c(2,3),mean,na.rm=T) + pslwr2mean <- apply(pslwr2,c(2,3),mean,na.rm=T) + pslwr3mean <- apply(pslwr3,c(2,3),mean,na.rm=T) + pslwr4mean <- apply(pslwr4,c(2,3),mean,na.rm=T) + + rm(pslwr1,pslwr2,pslwr3,pslwr4) + + # breaks and colors of the geopotential fields: + #my.brks <- c(48000, seq(48501,57100,1), 60000) # % Mean anomaly of a WR + #my.brks2 <- c(48000, seq(48500,58000,500)) # % Mean anomaly of a WR + my.brks <- c(-300,-199:200,300) # % Mean anomaly of a WR + my.brks2 <- c(-300,seq(-200,200,10),300) # % Mean anomaly of a WR + #my.cols <- colorRampPalette((brewer.pal(9,"PuBu")))(length(my.brks)-1) + my.cols <- colorRampPalette(rev(brewer.pal(11,"RdBu")))(length(my.brks)-1) # blue--white--red colors + + # breaks and colors of the impact maps: + my.brks.var <- c(-20,seq(-10,-3,1),seq(-2.9,2.9,0.1),seq(3,10,1),20) # % Mean anomaly of a WR + #my.brks.var <- c(-20,seq(-10,-3,1),seq(-2.8,2.8,0.2),seq(3,10,1),20) # % Mean anomaly of a WR + + #my.cols <- colorRampPalette(as.character(read.csv("/home/Earth/vtorralb/rgbhex.csv",header=F)[,1]))(length(my.brks)-1) # blue--yellow-red colors + my.cols.var <- colorRampPalette(rev(brewer.pal(11,"RdBu")))(length(my.brks.var)-1) # blue--white--red colors + + #regime1.name <- "Cluster #1" + #regime2.name <- "Cluster #2" + #regime3.name <- "Cluster #3" + #regime4.name <- "Cluster #4" + + # save all the data necessary to redraw the graphs when we know the right regime: + save(workdir,rean.name,var.name,var.name.full,var.unit,year.start,year.end,period,my.period,lon,lat,my.brks,my.brks2,my.brks.var,my.cols,my.cols.var,pslwr1mean,pslwr2mean,pslwr3mean,pslwr4mean, varPeriodAnom1mean, varPeriodAnom2mean, varPeriodAnom3mean,varPeriodAnom4mean,wr1y,wr2y,wr3y,wr4y,pvalue1,pvalue2,pvalue3,pvalue4,file=paste0(workdir,"/",rean.name,"_",var.name,"_",my.period[period],"_mapdata.RData")) + + rm(pvalue1,pvalue2,pvalue3,pvalue4) + rm(pslwr1mean,pslwr2mean,pslwr3mean,pslwr4mean) + rm(varPeriodAnom1mean,varPeriodAnom2mean,varPeriodAnom3mean,varPeriodAnom4mean) + gc() + +} # close the for loop on 'period' + +# run it twice: once, with ordering <- FALSE to look only at the cartography and find visually the right regime names of the four clusters; +# and the second time, to save the ordered cartography: +ordering <- TRUE + +# choose an order to give to the cartography, from top to bottom: +orden <- c("NAO+","NAO-","Blocking","Atlantic Ridge") + +regime1.name <- orden[1] +regime2.name <- orden[2] +regime3.name <- orden[3] +regime4.name <- orden[4] + +for(period in 13:16){ + load(file=paste0(workdir,"/",rean.name,"_",var.name,"_",my.period[period],"_mapdata.RData")) + + if(period == 13){ # Winter + cluster3.name="NAO-" + cluster4.name="Atlantic Ridge" + cluster2.name="NAO+" + cluster1.name="Blocking" + } + if(period == 14){ + cluster4.name="Atlantic Ridge" + cluster2.name="NAO-" + cluster1.name="Blocking" + cluster3.name="NAO+" + } + if(period == 15){ + cluster1.name="NAO+" + cluster4.name="NAO-" + cluster2.name="Blocking" + cluster3.name="Atlantic Ridge" + } + if(period == 16){ + cluster3.name="Atlantic Ridge" + cluster1.name="Blocking" + cluster4.name="NAO-" + cluster2.name="NAO+" + } + + # assign to each cluster its regime: + if(ordering == FALSE){ + cluster1 <- 1; cluster2 <- 2; cluster3 <- 3; cluster4 <- 4 + } else { + cluster1 <- which(orden == cluster1.name) + cluster2 <- which(orden == cluster2.name) + cluster3 <- which(orden == cluster3.name) + cluster4 <- which(orden == cluster4.name) + } + + assign(paste0("map",cluster1), pslwr1mean) + assign(paste0("map",cluster2), pslwr2mean) + assign(paste0("map",cluster3), pslwr3mean) + assign(paste0("map",cluster4), pslwr4mean) + + assign(paste0("imp",cluster1), varPeriodAnom1mean) + assign(paste0("imp",cluster2), varPeriodAnom2mean) + assign(paste0("imp",cluster3), varPeriodAnom3mean) + assign(paste0("imp",cluster4), varPeriodAnom4mean) + + assign(paste0("sig",cluster1), pvalue1) + assign(paste0("sig",cluster2), pvalue2) + assign(paste0("sig",cluster3), pvalue3) + assign(paste0("sig",cluster4), pvalue4) + + assign(paste0("fre",cluster1), wr1y) + assign(paste0("fre",cluster2), wr2y) + assign(paste0("fre",cluster3), wr3y) + assign(paste0("fre",cluster4), wr4y) + + # Visualize the composition with the 3 graphs for all the 4 regimes: its pressure fields, its impact on var and its frequency series: + png(filename=paste0(mapdir,"/",rean.name,"_",var.name,"_",my.period[period],".png"),width=3000,height=3700) + + #layout(matrix(c(1,1,1,1,2,4,6,7,rep(c(2,4,6,8),8),3,5,6,9, + # 10,12,6,14,rep(c(10,12,6,15),8),11,13,6,16, + # 17,19,6,21,rep(c(17,19,6,22),8),18,20,6,23, + # 24,26,6,28,rep(c(24,26,6,29),8),25,27,6,30),41,4,byrow=TRUE),widths=c(2,2,0.2,2)) + #layout.show(30) + layout(matrix(c(1,1,1,1,1,1,1,1,2,4,6,7,rep(c(2,4,6,8),8),3,5,6,9,31,32,6,33, + 10,12,6,14,rep(c(10,12,6,15),8),11,13,6,16,34,35,6,36, + 17,19,6,21,rep(c(17,19,6,22),8),18,20,6,23,37,38,6,39, + 24,26,6,28,rep(c(24,26,6,29),8),25,27,6,30),45,4,byrow=TRUE),widths=c(2,2,0.2,2), heights=c(rep(0.2,12),0.2,rep(0.2,10),0.2,rep(0.2,10),0.2,rep(0.2,11))) + + layout.show(39) + + plot(0,0, axes=F, xlab="", ylab="") + title(paste("Weather Regimes for",my.period[period],"season (1979-2013). Source:",rean.name), cex.main=9, line=-2) + EU <- c(1:54,130:161) # position of long values of Europe only (without the Atlantic Sea and America) + + #title1 <- paste(regime1.name, "geopotential height") + title(paste(regime1.name, "geopotential height"), cex.main=2) + PlotEquiMap(map1[,EU], lon[EU], lat, filled.continents = FALSE, brks=my.brks, cols=my.cols, toptitle="", sizetit=1.2, contours=t(map1[,EU]),brks2=my.brks2, drawleg=F) + + + ColorBar(my.brks, cols=my.cols, vert=FALSE, subsampleg=100, cex=1.5) + mtext(side=4," m", cex=2) + title1 <- paste(regime1.name, "impact on", var.name.full) + PlotEquiMap(imp1[,EU], lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, toptitle=title1, sizetit=1.2, drawleg=F, dots=t(sig1[,EU] < 0.05)) + ColorBar2(my.brks.var, cols=my.cols.var, vert=FALSE, subsampleg=10, cex=1.5, my.ticks=c(0,10,20,30,40,50,60,70,80), my.labels=c(-20,-10,-6,-3,0,3,6,10,20)) + mtext(side=4,paste0(" ",var.unit), cex=2) + plot(0, type="n", axes=F, xlab="", ylab="") + plot(0, type="n", axes=F, xlab="", ylab="") # plot 2 empty graphs + title(paste0(regime1.name, " Frequency (", round(100*mean(fre1),1), "%)"), cex.main=2, line=-1) + barplot.freq(100*fre1, year.start, year.end, cex.y=1.5, cex.x=1.5, freq.max=60) + plot(0, type="n", axes=F, xlab="", ylab="") + title("Year", cex.main=1.5, line=-1) + + title2 <- paste(regime2.name, "geopotential height") + PlotEquiMap(map2[,EU], lon[EU], lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, toptitle=title2, sizetit=1.2, contours=t(map2[,EU]), brks2=my.brks2, drawleg=F) + ColorBar(my.brks, cols=my.cols, vert=FALSE, subsampleg=100, cex=1.5) + mtext(side=4," m", cex=2) + title2 <- paste(regime2.name, "impact on", var.name.full) + PlotEquiMap(imp2[,EU], lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, toptitle=title2, sizetit=1.2, drawleg=F, dots=t(sig2[,EU] < 0.05)) + ColorBar2(my.brks.var, cols=my.cols.var, vert=FALSE, subsampleg=10, cex=1.5, my.ticks=c(0,10,20,30,40,50,60,70,80), my.labels=c(-20,-10,-6,-3,0,3,6,10,20)) + mtext(side=4,paste0(" ",var.unit), cex=2) + plot(0, type="n", axes=F, xlab="", ylab="") + title(paste0(regime2.name, " Frequency (", round(100*mean(fre2),1), "%)"), cex.main=2, line=-1) + barplot.freq(100*fre2, year.start, year.end, cex.y=1.5, cex.x=1.5, freq.max=60) + plot(0, type="n", axes=F, xlab="", ylab="") + title("Year", cex.main=1.5, line=-1) + + title3 <- paste(regime3.name, "geopotential height") + PlotEquiMap(map3[,EU], lon[EU], lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, toptitle=title3, sizetit=1.2, contours=t(map3[,EU]), brks2=my.brks2, drawleg=F) + ColorBar(my.brks, cols=my.cols, vert=FALSE, subsampleg=100, cex=1.5) + mtext(side=4," m", cex=2) + title3 <- paste(regime3.name, "impact on", var.name.full) + PlotEquiMap(imp3[,EU], lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, toptitle=title3, sizetit=1.2, drawleg=F, dots=t(sig3[,EU] < 0.05)) + ColorBar2(my.brks.var, cols=my.cols.var, vert=FALSE, subsampleg=10, cex=1.5, my.ticks=c(0,10,20,30,40,50,60,70,80), my.labels=c(-20,-10,-6,-3,0,3,6,10,20)) + mtext(side=4,paste0(" ",var.unit), cex=2) + plot(0, type="n", axes=F, xlab="", ylab="") + title(paste0(regime3.name, " Frequency (", round(100*mean(fre3),1), "%)"), cex.main=2, line=-1) + barplot.freq(100*fre3, year.start, year.end, cex.y=1.5, cex.x=1.5, freq.max=60) + plot(0, type="n", axes=F, xlab="", ylab="") + title("Year", cex.main=1.5, line=-1) + + title4 <- paste(regime4.name, "geopotential height") + PlotEquiMap(map4[,EU], lon[EU], lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, toptitle=title4, sizetit=1.2, contours=t(map4[,EU]), brks2=my.brks2, drawleg=F) + ColorBar(my.brks, cols=my.cols, vert=FALSE, subsampleg=100, cex=1.5) + mtext(side=4," m", cex=2) + title4 <- paste(regime4.name, "impact on", var.name.full) + PlotEquiMap(imp4[,EU], lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, toptitle=title4, sizetit=1.2, drawleg=F, dots=t(sig4[,EU] < 0.05)) + ColorBar2(my.brks.var, cols=my.cols.var, vert=FALSE, subsampleg=10, cex=1.5, my.ticks=c(0,10,20,30,40,50,60,70,80), my.labels=c(-20,-10,-6,-3,0,3,6,10,20)) + mtext(side=4,paste0(" ",var.unit), cex=2) + plot(0, type="n", axes=F, xlab="", ylab="") + title(paste0(regime4.name, " Frequency (", round(100*mean(fre4),1), "%)"), cex.main=2, line=-1) + barplot.freq(100*fre4, year.start, year.end, cex.y=1.5, cex.x=1.5, freq.max=60) + plot(0, type="n", axes=F, xlab="", ylab="") + title("Year", cex.main=1.5, line=-1) + dev.off() + +} # close for loop on 'period' + + diff --git a/old/weather_regimes_v9.R~ b/old/weather_regimes_v9.R~ new file mode 100644 index 0000000000000000000000000000000000000000..ef875063bb3730bbfc4746a50438296c5f2cc482 --- /dev/null +++ b/old/weather_regimes_v9.R~ @@ -0,0 +1,403 @@ + +library(s2dverification) # for the function Load() +library(abind) +source('/home/Earth/ncortesi/Downloads/scripts/Rfunctions.R') # for the calendar functions + +# available reanalysis for the geopotential (g500) or the geopotential height (z500 = g500/9.81): +ERAint <- list(path = '/esnas/reconstructions/ecmwf/eraint/$STORE_FREQ$_mean/$VAR_NAME$_f6h/$VAR_NAME$_$YEAR$$MONTH$.nc') + +ERAint.name <- "ERA-Interim" +JRA55 <- list(path = '/esnas/reconstructions/jma/jra-55/$STORE_FREQ$_mean/$VAR_NAME$_f6h/$VAR_NAME$_$YEAR$$MONTH$.nc') +JRA55.name <- "JRA-55" + +workdir <- "/scratch/Earth/ncortesi/RESILIENCE/Regimes" # working dir with the input files with the WT classifications for each MSLP grid point +mapdir <- "/scratch/Earth/ncortesi/RESILIENCE/Regimes_maps" # output dir with seasonal and yearly maps + +rean <- ERAint # choose one of the two above reanalysis from where to load the input psl and var data +rean.name <- ERAint.name + +psl <- "g500" # pressure variable to use from the chosen reanalysis + +var.name <- "tas" #"sfcWind" #"tas" # name of the 'predictand' variable of the chosen reanalysis +var.name.full <- "Temperature" #"Wind Speed" #"Temperature" # full name of the 'predictand' variable to put in the title of the graphs +var.unit <- "ºC" #"m/s" # unit of measure (for drawing color scales) + +year.start <- 1979 +year.end <- 2013 + +variance.explained <- 0.8 # the user can set here the minimum % of variance explained by the PCs (typically 0.8 which is equal to 80%) + +# Coordinates of the box enclosing the study region (the Northern Atlantic in this case): +lat.max <- 70 +lat.min <- 30 +lon.max <- 40 +lon.min <- 280 # put a positive number here because the geopotential has only positive values of longitud! + +############################################################################################################################# + +# load only 1 day of geopot to detect the minimum and maximum lat and lon values to identify only the North Atlantic +domain <- Load(var = psl, exp = NULL, obs = list(rean), paste0(year.start,'0101'), storefreq = 'daily', leadtimemax = 1, output = 'lonlat', nprocs=1) +pos.lat.max <- which(abs(domain$lat-lat.max)==min(abs(domain$lat-lat.max))) +pos.lat.min <- which(abs(domain$lat-lat.min)==min(abs(domain$lat-lat.min))) +pos.lon.max <- which(abs(domain$lon-lon.max)==min(abs(domain$lon-lon.max))) +pos.lon.min <- which(abs(domain$lon-lon.min)==min(abs(domain$lon-lon.min))) +pos.lat <- if(pos.lat.min < pos.lat.max) {pos.lat.min:pos.lat.max} else { pos.lat.max:pos.lat.min} +pos.lon <- c(1:pos.lon.max,pos.lon.min:length(domain$lon)) # beware that it only consider the case where the study region cross the meridian of Greenwhich! +n.pos.lat <- length(pos.lat) +n.pos.lon <- length(pos.lon) +lat <- domain$lat[pos.lat] # lat of chosen area only (it is smaller than the whole spatial domain loaded by the data) +lon <- domain$lon[pos.lon] # lon of chosen area only +lat.min.area <- domain$lat[pos.lat.min] # min and max lat/lon of the chosen area only (same as lat.max, but its values correspond to actual values in the lat vector) +lat.max.area <- domain$lat[pos.lat.max] +lon.min.area <- domain$lon[pos.lon.min] +lon.max.area <- domain$lon[pos.lon.max] +if(lat.min >= lat.max) stop("lat.min cannot be greater than lat.max") + +# Load psl data: +psleuFull <-array(NA,c(n.days.in.a.yearly.period(year.start,year.end), n.pos.lat, n.pos.lon)) + +for (y in year.start:year.end){ + var <- Load(var = psl, exp = NULL, obs = list(rean), paste0(y,'0101'), storefreq = 'daily', leadtimemax = n.days.in.a.year(y), output = 'lonlat', + latmin = lat.min.area, latmax = lat.max.area, lonmin = lon.min.area, lonmax = lon.max.area) + psleuFull[seq.days.in.a.future.year(year.start, y),,] <- var$obs + rm(var) + gc() +} + +if(psl=="g500") psleuFull <- psleuFull/9.81 # convert geopotential to geopotential height + +# Load var data: +vareuFull <-array(NA,c(n.days.in.a.yearly.period(year.start,year.end),n.pos.lat,n.pos.lon)) + +for (y in year.start:year.end){ + var <- Load(var = var.name, exp = NULL, obs = list(rean), paste0(y,'0101'), storefreq = 'daily', leadtimemax = n.days.in.a.year(y), output = 'lonlat', + latmin = domain$lat[pos.lat.min], latmax = domain$lat[pos.lat.max], lonmin = domain$lon[pos.lon.min], lonmax = domain$lon[pos.lon.max]) + vareuFull[seq.days.in.a.future.year(year.start, y),,] <- var$obs + rm(var) + gc() +} + +# compute the PCs: +my.PCA <- list() +my.cluster <- list() +tot.variance <- list() +n.pcs <- my.cluster <- c() + +for(period in 13:16){ # 1-12: Jan-Dec; 13-16: Winter-Spring-Summer-Autumn; 17: Year + + days.period <- NA + for(y in year.start:year.end) days.period <- c(days.period, n.days.in.a.future.year(year.start, y) + pos.period(y,period)) + days.period <- days.period[-1] # remove the NA at the begin that was introduced only to be able to execure the above command + n.days.period <- length(days.period) + + pslPeriod <- psleuFull[days.period,,] # select only days in the chosen period (i.e: winter) + + # weight the pressure fields based on latitude: + lat.weighted <- cos(lat*pi/180)^0.5 + lat.weighted.array <- InsertDim(InsertDim(lat.weighted,2,n.pos.lon),1,n.days.period) + pslPeriod.weighted <- pslPeriod * lat.weighted.array + + pslmat <- pslPeriod.weighted + dim(pslmat) <- c(n.days.period, n.pos.lat*n.pos.lon) # convert array in a matrix! + rm(pslPeriod, pslPeriod.weighted) + gc() + + my.seq <- seq(1, n.pos.lat*n.pos.lon, 9) # select only 1 point of 9 + pslcut <- pslmat[,my.seq] + + my.PCA[[period]] <- princomp(pslcut,cor=FALSE) + tot.variance[[period]] <- head(cumsum(my.PCA[[period]]$sdev^2/sum(my.PCA[[period]]$sdev^2)),50) # check how many PCAs to keep basing on the sum of their expl. variance + n.pcs[period] <- head(as.numeric(which(tot.variance[[period]] > variance.explained)),1) # select only the pcs that explains at least 80% of variance + + my.cluster[[period]] <- kmeans(my.PCA[[period]]$scores[,1:n.pcs[period]], 4) # 4 is the number of clusters, 7 the number of EOFs which explains ~80% of variance + + rm(pslcut, pslmat) + gc() +} + +save.image(file=paste0(workdir,"/weather_regimes_",rean.name,"_",var.name,"_",year.start,"-",year.end,".RData")) + + + +# each time you want to change only variable 'period', start from here loading the saved data: +load(file=paste0(workdir,"/weather_regimes_",rean.name,"_",var.name,"_",year.start,"-",year.end,".RData")) + +for(period in 13:16){ # 1-12: Jan-Dec; 13-16: Winter-Spring-Summer-Autumn; 17: Year + days.period <- NA + for(y in year.start:year.end) days.period <- c(days.period, n.days.in.a.future.year(year.start, y) + pos.period(y,period)) + days.period <- days.period[-1] # remove the NA at the begin that was introduced only to be able to execure the above command + n.days.period <- length(days.period) + + varPeriod <- vareuFull[days.period,,] # select only var data during the chosen period + + varPeriodClim <- apply(varPeriod, c(2,3), mean, na.rm=T) + varPeriodClim2 <- InsertDim(varPeriodClim, 1, n.days.period) + + varPeriodAnom <- varPeriod - varPeriodClim2 + rm(varPeriod, varPeriodClim2) + gc() + + wr1 <- which(my.cluster[[period]]$cluster==1) + wr2 <- which(my.cluster[[period]]$cluster==2) + wr3 <- which(my.cluster[[period]]$cluster==3) + wr4 <- which(my.cluster[[period]]$cluster==4) + + period.length <- n.days.in.a.period(period,1999) # using year 1999 introduce a small error in winter season because of bisestile years + + wr1y <- wr2y <- wr3y <-wr4y <- c() + for(y in year.start:year.end){ + wr1y[y-year.start+1] <- length(which(my.cluster[[period]]$cluster[(y-year.start)*period.length+(1:period.length)] == 1)) + wr2y[y-year.start+1] <- length(which(my.cluster[[period]]$cluster[(y-year.start)*period.length+(1:period.length)] == 2)) + wr3y[y-year.start+1] <- length(which(my.cluster[[period]]$cluster[(y-year.start)*period.length+(1:period.length)] == 3)) + wr4y[y-year.start+1] <- length(which(my.cluster[[period]]$cluster[(y-year.start)*period.length+(1:period.length)] == 4)) + } + + # convert to frequencies in %: + wr1y <- wr1y/period.length + wr2y <- wr2y/period.length + wr3y <- wr3y/period.length + wr4y <- wr4y/period.length + + varPeriodAnom1 <- varPeriodAnom[wr1,,] + varPeriodAnom2 <- varPeriodAnom[wr2,,] + varPeriodAnom3 <- varPeriodAnom[wr3,,] + varPeriodAnom4 <- varPeriodAnom[wr4,,] + + varPeriodAnom1mean <- apply(varPeriodAnom1,c(2,3),mean,na.rm=T) + varPeriodAnom2mean <- apply(varPeriodAnom2,c(2,3),mean,na.rm=T) + varPeriodAnom3mean <- apply(varPeriodAnom3,c(2,3),mean,na.rm=T) + varPeriodAnom4mean <- apply(varPeriodAnom4,c(2,3),mean,na.rm=T) + + varPeriodAnomBoth1 <- abind(varPeriodAnom, varPeriodAnom1, along = 1) + varPeriodAnomBoth2 <- abind(varPeriodAnom, varPeriodAnom2, along = 1) + varPeriodAnomBoth3 <- abind(varPeriodAnom, varPeriodAnom3, along = 1) + varPeriodAnomBoth4 <- abind(varPeriodAnom, varPeriodAnom4, along = 1) + + pvalue1 <- apply(varPeriodAnomBoth1, c(2,3), function(x) t.test(x[1:n.days.period],x[(n.days.period+1):length(x)])$p.value) + pvalue2 <- apply(varPeriodAnomBoth2, c(2,3), function(x) t.test(x[1:n.days.period],x[(n.days.period+1):length(x)])$p.value) + pvalue3 <- apply(varPeriodAnomBoth3, c(2,3), function(x) t.test(x[1:n.days.period],x[(n.days.period+1):length(x)])$p.value) + pvalue4 <- apply(varPeriodAnomBoth4, c(2,3), function(x) t.test(x[1:n.days.period],x[(n.days.period+1):length(x)])$p.value) + rm(varPeriodAnom, varPeriodAnomBoth1, varPeriodAnomBoth2, varPeriodAnomBoth3, varPeriodAnomBoth4) + gc() + + pslPeriod <- psleuFull[days.period,,] + pslPeriodClim <- apply(pslPeriod, c(2,3), mean, na.rm=T) + pslPeriodClim2 <- InsertDim(pslPeriodClim, 1, n.days.period) + + pslPeriodAnom <- pslPeriod - pslPeriodClim2 + rm(pslPeriod, pslPeriodClim, pslPeriodClim2) + gc() + + pslwr1 <- pslPeriodAnom[wr1,,] + pslwr2 <- pslPeriodAnom[wr2,,] + pslwr3 <- pslPeriodAnom[wr3,,] + pslwr4 <- pslPeriodAnom[wr4,,] + + pslwr1mean <- apply(pslwr1,c(2,3),mean,na.rm=T) + pslwr2mean <- apply(pslwr2,c(2,3),mean,na.rm=T) + pslwr3mean <- apply(pslwr3,c(2,3),mean,na.rm=T) + pslwr4mean <- apply(pslwr4,c(2,3),mean,na.rm=T) + + rm(pslwr1,pslwr2,pslwr3,pslwr4) + + # breaks and colors of the geopotential fields: + #my.brks <- c(48000, seq(48501,57100,1), 60000) # % Mean anomaly of a WR + #my.brks2 <- c(48000, seq(48500,58000,500)) # % Mean anomaly of a WR + my.brks <- c(-300,-199:200,300) # % Mean anomaly of a WR + my.brks2 <- c(-300,seq(-200,200,10),300) # % Mean anomaly of a WR + #my.cols <- colorRampPalette((brewer.pal(9,"PuBu")))(length(my.brks)-1) + my.cols <- colorRampPalette(rev(brewer.pal(11,"RdBu")))(length(my.brks)-1) # blue--white--red colors + + # breaks and colors of the impact maps: + my.brks.var <- c(-20,seq(-10,-3,1),seq(-2.9,2.9,0.1),seq(3,10,1),20) # % Mean anomaly of a WR + #my.brks.var <- c(-20,seq(-10,-3,1),seq(-2.8,2.8,0.2),seq(3,10,1),20) # % Mean anomaly of a WR + + #my.cols <- colorRampPalette(as.character(read.csv("/home/Earth/vtorralb/rgbhex.csv",header=F)[,1]))(length(my.brks)-1) # blue--yellow-red colors + my.cols.var <- colorRampPalette(rev(brewer.pal(11,"RdBu")))(length(my.brks.var)-1) # blue--white--red colors + + #regime1.name <- "Cluster #1" + #regime2.name <- "Cluster #2" + #regime3.name <- "Cluster #3" + #regime4.name <- "Cluster #4" + + # save all the data necessary to redraw the graphs when we know the right regime: + save(workdir,rean.name,var.name,var.name.full,var.unit,year.start,year.end,period,my.period,lon,lat,my.brks,my.brks2,my.brks.var,my.cols,my.cols.var,pslwr1mean,pslwr2mean,pslwr3mean,pslwr4mean, varPeriodAnom1mean, varPeriodAnom2mean, varPeriodAnom3mean,varPeriodAnom4mean,wr1y,wr2y,wr3y,wr4y,pvalue1,pvalue2,pvalue3,pvalue4,file=paste0(workdir,"/",rean.name,"_",var.name,"_",my.period[period],"_mapdata.RData")) + + rm(pvalue1,pvalue2,pvalue3,pvalue4) + rm(pslwr1mean,pslwr2mean,pslwr3mean,pslwr4mean) + rm(varPeriodAnom1mean,varPeriodAnom2mean,varPeriodAnom3mean,varPeriodAnom4mean) + gc() + +} # close the for loop on 'period' + + +# when you want to add the right regime name to the clusters: +for(period in 13:16){ + + load(file=paste0(workdir,"/",rean.name,"_",var.name,"_",my.period[period],"_mapdata.RData")) + + if(var.name == "tas"){ + if(period==13){ + cluster1.name="Atlantic Ridge" + cluster2.name="Blocking" + cluster3.name="NAO+" + cluster4.name="NAO-" + } + if(period==14){ + cluster1.name="Blocking" + cluster2.name="NAO-" + cluster3.name="Atlantic Ridge" + cluster4.name="NAO+" + } + if(period==15){ + cluster1.name="NAO+" + cluster2.name="Blocking" + cluster3.name="Atlantic Ridge" + cluster4.name="NAO-" + } + if(period==16){ + cluster1.name="NAO-" + cluster2.name="Blocking" + cluster3.name="NAO+" + cluster4.name="Atlantic Ridge" + } + } + if(var.name == "sfcWind"){ + if(period==13){ + cluster1.name="Atlantic Ridge" + cluster2.name="Blocking" + cluster3.name="NAO+" + cluster4.name="NAO-" + } + if(period==14){ + cluster1.name="Atlantic Ridge" + cluster2.name="Blocking" + cluster3.name="NAO+" + cluster4.name="NAO-" + } + if(period==15){ + cluster1.name="NAO+" + cluster2.name="NAO-" + cluster3.name="Blocking" + cluster4.name="Atlantic Ridge" + } + if(period==16){ + cluster1.name="Blocking" + cluster2.name="Atlantic Ridge" + cluster3.name="NAO+" + cluster4.name="NAO-" + } + } + + + orden <- c("NAO+","NAO-","Blocking","Atlantic Ridge") + + # correspondence between regimes to plot and clusters: + regime1 <- which(orden[1] == c(cluster1.name,cluster2.name,cluster3.name,cluster4.name)) + regime2 <- which(orden[2] == c(cluster1.name,cluster2.name,cluster3.name,cluster4.name)) + regime3 <- which(orden[3] == c(cluster1.name,cluster2.name,cluster3.name,cluster4.name)) + regime4 <- which(orden[4] == c(cluster1.name,cluster2.name,cluster3.name,cluster4.name)) + + regime1.name <- c(cluster1.name,cluster2.name,cluster3.name,cluster4.name)[regime1] + regime2.name <- c(cluster1.name,cluster2.name,cluster3.name,cluster4.name)[regime2] + regime3.name <- c(cluster1.name,cluster2.name,cluster3.name,cluster4.name)[regime3] + regime4.name <- c(cluster1.name,cluster2.name,cluster3.name,cluster4.name)[regime4] + + cluster1 <- which(c(cluster1.name,cluster2.name,cluster3.name,cluster4.name)[1] == orden) + cluster2 <- which(c(cluster1.name,cluster2.name,cluster3.name,cluster4.name)[2] == orden) + cluster3 <- which(c(cluster1.name,cluster2.name,cluster3.name,cluster4.name)[3] == orden) + cluster4 <- which(c(cluster1.name,cluster2.name,cluster3.name,cluster4.name)[4] == orden) + + assign(paste0("map",cluster1), pslwr1mean) + assign(paste0("map",cluster2), pslwr2mean) + assign(paste0("map",cluster3), pslwr3mean) + assign(paste0("map",cluster4), pslwr4mean) + + assign(paste0("imp",cluster1), varPeriodAnom1mean) + assign(paste0("imp",cluster2), varPeriodAnom2mean) + assign(paste0("imp",cluster3), varPeriodAnom3mean) + assign(paste0("imp",cluster4), varPeriodAnom4mean) + + assign(paste0("sig",cluster1), pvalue1) + assign(paste0("sig",cluster2), pvalue2) + assign(paste0("sig",cluster3), pvalue3) + assign(paste0("sig",cluster4), pvalue4) + + assign(paste0("fre",cluster1), wr1y) + assign(paste0("fre",cluster2), wr2y) + assign(paste0("fre",cluster3), wr3y) + assign(paste0("fre",cluster4), wr4y) + + # Visualize the composition with the 3 graphs for all the 4 regimes: its pressure fields, its impact on var and its frequency series: + png(filename=paste0(mapdir,"/",rean.name,"_",var.name,"_",my.period[period],".png"),width=3000,height=3700) + + layout(matrix(c(1,1,1,1,2,4,6,7,rep(c(2,4,6,8),8),3,5,6,9, + 10,12,6,14,rep(c(10,12,6,15),8),11,13,6,16, + 17,19,6,21,rep(c(17,19,6,22),8),18,20,6,23, + 24,26,6,28,rep(c(24,26,6,29),8),25,27,6,30),41,4,byrow=TRUE),widths=c(2,2,0.2,2)) + #layout.show(30) + + plot(0,0, axes=F, xlab="", ylab="") + title(paste("Weather Regimes for",my.period[period],"season (1979-2013). Source:",rean.name), cex.main=9, line=-2) + EU <- c(1:54,130:161) # position of long values of Europe only (without the Atlantic Sea and America) + + title1 <- paste(regime1.name, "geopotential height") + PlotEquiMap(map1[,EU], lon[EU], lat, filled.continents = FALSE, brks=my.brks, cols=my.cols, toptitle=title1, sizetit=1.2, contours=t(map1[,EU]),brks2=my.brks2, drawleg=F) + ColorBar(my.brks, cols=my.cols, vert=FALSE, subsampleg=100, cex=1.5) + mtext(side=4," m", cex=2) + title1 <- paste(regime1.name, "impact on", var.name.full) + PlotEquiMap(imp1[,EU], lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, toptitle=title1, sizetit=1.2, drawleg=F, dots=t(sig1[,EU] < 0.05)) + ColorBar2(my.brks.var, cols=my.cols.var, vert=FALSE, subsampleg=10, cex=1.5, my.ticks=c(0,10,20,30,40,50,60,70,80), my.labels=c(-20,-10,-6,-3,0,3,6,10,20)) + mtext(side=4,paste0(" ",var.unit), cex=2) + plot(0, type="n", axes=F, xlab="", ylab="") + plot(0, type="n", axes=F, xlab="", ylab="") # plot 2 empty graphs + title(paste0(regime1.name, " Frequency (", round(100*mean(fre1),1), "%)"), cex.main=2, line=-1) + barplot.freq(100*fre1, year.start, year.end, cex.y=1.5, cex.x=1.5, freq.max=80) + plot(0, type="n", axes=F, xlab="", ylab="") + title("Year", cex.main=1.5, line=-1) + + title2 <- paste(regime2.name, "geopotential height") + PlotEquiMap(map2[,EU], lon[EU], lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, toptitle=title2, sizetit=1.2, contours=t(map2[,EU]), brks2=my.brks2, drawleg=F) + ColorBar(my.brks, cols=my.cols, vert=FALSE, subsampleg=100, cex=1.5) + mtext(side=4," m", cex=2) + title2 <- paste(regime2.name, "impact on", var.name.full) + PlotEquiMap(imp2[,EU], lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, toptitle=title2, sizetit=1.2, drawleg=F, dots=t(sig2[,EU] < 0.05)) + ColorBar2(my.brks.var, cols=my.cols.var, vert=FALSE, subsampleg=10, cex=1.5, my.ticks=c(0,10,20,30,40,50,60,70,80), my.labels=c(-20,-10,-6,-3,0,3,6,10,20)) + mtext(side=4,paste0(" ",var.unit), cex=2) + plot(0, type="n", axes=F, xlab="", ylab="") + title(paste0(regime2.name, " Frequency (", round(100*mean(fre2),1), "%)"), cex.main=2, line=-1) + barplot.freq(100*fre2, year.start, year.end, cex.y=1.5, cex.x=1.5, freq.max=80) + plot(0, type="n", axes=F, xlab="", ylab="") + title("Year", cex.main=1.5, line=-1) + + title3 <- paste(regime3.name, "geopotential height") + PlotEquiMap(map3[,EU], lon[EU], lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, toptitle=title3, sizetit=1.2, contours=t(map3[,EU]), brks2=my.brks2, drawleg=F) + ColorBar(my.brks, cols=my.cols, vert=FALSE, subsampleg=100, cex=1.5) + mtext(side=4," m", cex=2) + title3 <- paste(regime3.name, "impact on", var.name.full) + PlotEquiMap(imp3[,EU], lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, toptitle=title3, sizetit=1.2, drawleg=F, dots=t(sig3[,EU] < 0.05)) + ColorBar2(my.brks.var, cols=my.cols.var, vert=FALSE, subsampleg=10, cex=1.5, my.ticks=c(0,10,20,30,40,50,60,70,80), my.labels=c(-20,-10,-6,-3,0,3,6,10,20)) + mtext(side=4,paste0(" ",var.unit), cex=2) + plot(0, type="n", axes=F, xlab="", ylab="") + title(paste0(regime3.name, " Frequency (", round(100*mean(fre3),1), "%)"), cex.main=2, line=-1) + barplot.freq(100*fre3, year.start, year.end, cex.y=1.5, cex.x=1.5, freq.max=80) + plot(0, type="n", axes=F, xlab="", ylab="") + title("Year", cex.main=1.5, line=-1) + + title4 <- paste(regime4.name, "geopotential height") + PlotEquiMap(map4[,EU], lon[EU], lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, toptitle=title4, sizetit=1.2, contours=t(map4[,EU]), brks2=my.brks2, drawleg=F) + ColorBar(my.brks, cols=my.cols, vert=FALSE, subsampleg=100, cex=1.5) + mtext(side=4," m", cex=2) + title4 <- paste(regime4.name, "impact on", var.name.full) + PlotEquiMap(imp4[,EU], lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, toptitle=title4, sizetit=1.2, drawleg=F, dots=t(sig4[,EU] < 0.05)) + ColorBar2(my.brks.var, cols=my.cols.var, vert=FALSE, subsampleg=10, cex=1.5, my.ticks=c(0,10,20,30,40,50,60,70,80), my.labels=c(-20,-10,-6,-3,0,3,6,10,20)) + mtext(side=4,paste0(" ",var.unit), cex=2) + plot(0, type="n", axes=F, xlab="", ylab="") + title(paste0(regime4.name, " Frequency (", round(100*mean(fre4),1), "%)"), cex.main=2, line=-1) + barplot.freq(100*fre4, year.start, year.end, cex.y=1.5, cex.x=1.5, freq.max=80) + plot(0, type="n", axes=F, xlab="", ylab="") + title("Year", cex.main=1.5, line=-1) + dev.off() + +} # close for loop on period + diff --git a/palettes/rgbhex.csv b/palettes/rgbhex.csv new file mode 100644 index 0000000000000000000000000000000000000000..7fb1ca44b4a715a70e8658e978350659ba79461f --- /dev/null +++ b/palettes/rgbhex.csv @@ -0,0 +1,20 @@ +#0C046E +#1914BE +#2341F7 +#2F55FB +#3E64FF +#528CFF +#64AAFF +#82C8FF +#A0DCFF +#B4F0FF +#FFFBAF +#FFDD9A +#FFBF87 +#FFA173 +#FF7055 +#FE6346 +#F7403B +#E92D36 +#C80F1E +#A50519 diff --git a/slurm/.RData b/slurm/.RData new file mode 100644 index 0000000000000000000000000000000000000000..ddcfb6e3487c2b4709234d613a4426eecd0a49a1 Binary files /dev/null and b/slurm/.RData differ diff --git a/slurm/.directory b/slurm/.directory new file mode 100644 index 0000000000000000000000000000000000000000..5bd515931fe0db1bba299aa6fe17e502bf09119f --- /dev/null +++ b/slurm/.directory @@ -0,0 +1,4 @@ +[Dolphin] +Timestamp=2016,8,11,9,59,47 +Version=3 +ViewMode=2 diff --git a/slurm/diagnostic.R b/slurm/diagnostic.R new file mode 100644 index 0000000000000000000000000000000000000000..cb059535cd2f882de6fbcf0fee339b512da4e796 --- /dev/null +++ b/slurm/diagnostic.R @@ -0,0 +1,77 @@ +############################################################## +# In your script, before performing the analysis, the range # +# of values of its parameter(s) is specified somewhere # +# before the main analysis: # +############################################################## + +start_date <- 1:12 +lead_time <- 1:5 + +... + +############################################################### +# Insert these new lines to link your script with the job # +# file. They have to be inserted BEFORE the main loop where # +# you repeat the same analysis many times varying the # +# parameter(s) defined above (in this example, start_date and # +# lead_time): # +############################################################### + +script.arg <- as.integer(commandArgs(TRUE)) + +if(length(script.arg) > 0) { + start_date <- script.arg[1] + lead_time <- script.arg[2] +} + +############################################################### +# In case you script is written in Python, you have to # +# introduce these lines instead: # +############################################################### + +import sys +script.arg = int(sys.argv) + +if len(script.arg) > 0 : + start_date = script.arg[1] + lead_time <- script.arg[2] + + +############################################################### +# If your script is in fortran, C or another language, you # +# need to adapt the syntax of the previous lines to the # +# language used. # +############################################################### + +############################################################### +# Here you perform the main analysis (the one which the # +# major part of computing times) many times, cycling over one # +# parameter (or a few parameters) inside this loop until the # +# analysis is finished. When you link this script to the job # +# file, you'll be able to split this analysis in multiple # +# ones, one for each different value of the looping # +# parameter(s), and to assign each value to a different job, # +# running all jobs (or as many as possible) on our cluster. # +############################################################### + +for(sd in start_date){ + for(lt in lead_time){ + + # your analysis here + # ...... + + + ########################################################### + # When you save the outputs of the analysis, save them in # + # one file for each different value of the looping # + # parameter(s), because if not, each job will overwrite # + # the results of the previous jobs! # + ########################################################### + + save(output_1, output_2, ..., output_N, file=paste0(work.dir, "/my_analysis_start_date_", sd, "_lead_time_", lt, ".RData")) + + } +} + + + diff --git a/slurm/diagnostic.R~ b/slurm/diagnostic.R~ new file mode 100644 index 0000000000000000000000000000000000000000..a481d525962de3861d4c817b8b39c057900ad8a4 --- /dev/null +++ b/slurm/diagnostic.R~ @@ -0,0 +1,71 @@ +############################################################## +# In your script, before performing the analysis, the range # +# of values of its parameter(s) is specified somewhere # +# before the main analysis: # +############################################################## + +start_date <- 1:12 +lead_time <- 1:5 + +... + +############################################################### +# Insert these new lines to link your script with the job # +# file. They have to be inserted BEFORE the main loop where # +# you repeat the same analysis many times varying the # +# parameter(s) defined above (in this example, start_date and # +# lead_time): # +############################################################### + +script.arg <- as.integer(commandArgs(TRUE)) + +if(length(script.arg) > 0) { + start_date <- script.arg[1] + lead_time <- script.arg[2] +} + +############################################################### +# In case you script is written in Python, you have to # +# introduce these lines instead: # +############################################################### + + + +############################################################### +# If your script is in fortran, C or another language, you # +# need to adapt the syntax of the previous lines to the # +# language used. # +############################################################### + +############################################################### +# Here you perform the main analysis (the one which the # +# major part of computing times) many times, cycling over one # +# parameter (or a few parameters) inside this loop until the # +# analysis is finished. When you link this script to the job # +# file, you'll be able to split this analysis in multiple # +# ones, one for each different value of the looping # +# parameter(s), and to assign each value to a different job, # +# running all jobs (or as many as possible) on our cluster. # +############################################################### + +for(sd in start_date){ + for(lt in lead_time){ + + # your analysis here + # ...... + + + ########################################################### + # When you save the outputs of the analysis, save them in # + # one file for each different value of the looping # + # parameter(s), because if not, each job will overwrite # + # the results of the previous jobs! # + ########################################################### + + save(output_1, output_2, ..., output_N, file=paste0(work.dir, "/my_analysis_start_date_", sd, "_lead_time_", lt, ".RData")) + + } +} + + + diff --git a/slurm/diags.com b/slurm/diags.com new file mode 100644 index 0000000000000000000000000000000000000000..7dd629233ba15f474a1612b4822ed7005758c069 --- /dev/null +++ b/slurm/diags.com @@ -0,0 +1,22 @@ +[DIAGNOSTICS] +SCRATCH_DIR = /scratch/Earth/$USER +DATA_DIR = /esnas:/esarchive +CON_FILES = /esnas/autosubmit/con_files/ +#DIAGS = moc mocarea,30,40,1000,2000,glob +DIAGS = moc areamoc + +FREQUENCY = mon + +[EXPERIMENT] +INSTITUTE = BSC +MODEL = EC-EARTH3 +MODEL_VERSION = Ec3.2_O1L75 +EXPID = t011 +STARTDATES = 19900101 +MEMBERS = 0 +CHUNK_SIZE = 1 +CHUNKS = 2 + +[ALIAS] +OHC = ohc,glob,0,1,10 +AREAMOC = mocarea,30,40,1000,2000,glob diff --git a/slurm/diags.com~ b/slurm/diags.com~ new file mode 100644 index 0000000000000000000000000000000000000000..54324378902f5f6cc6b54c4ba1728c2b1dba94bd --- /dev/null +++ b/slurm/diags.com~ @@ -0,0 +1,21 @@ +[DIAGNOSTICS] +SCRATCH_DIR = /scratch/Earth/$USER +DATA_DIR = /esnas:/esarchive +CON_FILES = /esnas/autosubmit/con_files/ +#DIAGS = moc mocarea,30,40,1000,2000,glob +DIAGS = moc areamoc +FREQUENCY = mon + +[EXPERIMENT] +INSTITUTE = BSC +MODEL = EC-EARTH3 +MODEL_VERSION = Ec3.2_O1L75 +EXPID = t011 +STARTDATES = 19900101 +MEMBERS = 0 +CHUNK_SIZE = 1 +CHUNKS = 2 + +[ALIAS] +OHC = ohc,glob,0,1,10 +AREAMOC = mocarea,30,40,1000,2000,glob diff --git a/slurm/old/diagnostic_cluster.R b/slurm/old/diagnostic_cluster.R new file mode 100644 index 0000000000000000000000000000000000000000..176fb8eb2c9602b4feac26dca55f5620269dc80c --- /dev/null +++ b/slurm/old/diagnostic_cluster.R @@ -0,0 +1,19 @@ +library(s2dverification) + +chunk <- as.integer(commandArgs(TRUE)[1]) + +ERAint <- '/esnas/reconstructions/ecmwf/eraint/$STORE_FREQ$_mean/$VAR_NAME$_f6h/$VAR_NAME$_$YEAR$$MONTH$.nc' + +domain <- Load(var = "sfcWind", exp = NULL, obs = list(list(path=ERAint)), '19950101', storefreq = 'daily', leadtimemax = 1, output = 'lonlat', nprocs=1) +n.lat <- length(domain$lat) +n.lon <- length(domain$lon) + +data <- Load(var = "sfcWind", exp = NULL, obs = list(list(path=ERAint)), '19950101', storefreq = 'daily', leadtimemax = 3, output = 'lonlat', latmin = domain$lat[chunk+1], latmax = domain$lat[chunk], nprocs=1)$obs +data <- data[,,,,1,] + +n.lat <- 1 +my.RMS <- array(NA, c(n.lat, n.lon)) + +# it is mandatory to save the output variable with the name 'var' and inside the file 'output_" + chunk number + ".RData": +var <- RMS(data,data+1) +save(var, file=paste0("/scratch/Earth/ncortesi/output_",chunk,".RData")) diff --git a/slurm/old/diagnostic_cluster.job b/slurm/old/diagnostic_cluster.job new file mode 100644 index 0000000000000000000000000000000000000000..6a59e9c1a921dae8320b26ba375be1897c36b275 --- /dev/null +++ b/slurm/old/diagnostic_cluster.job @@ -0,0 +1,28 @@ +#!/bin/bash +#SBATCH -n 1 +#SBATCH -J diagnostic +#SBATCH -o diagnostic.out +#SBATCH -e diagnostic.err + +# set the maximum execution time of the diagnostic: +#SBATCH -t 12:00:00 + +# Set the name of your script to run: +diagnostic="../weather_regimes_v35.R" + +# Jobs to run: +firstJob=1 +lastJob=12 + +# Set the maximum number of jobs: +nCores=4 + +# run a process (thread) in background for each job: +for ARG in $(seq $firstJob $lastJob); do + Rscript $diagnostic $ARG & + nThreads=$(($nThreads+1)) + if [ "$nThreads" -ge $nCores ]; then + wait # wait until the first $nCores chunks have finished before executing the next $nCores + nThreads=0 + fi +done diff --git a/slurm/old/diagnostic_cluster_v2.job b/slurm/old/diagnostic_cluster_v2.job new file mode 100644 index 0000000000000000000000000000000000000000..191a8c554b831320ea3be3f78e894fe565f5e7c8 --- /dev/null +++ b/slurm/old/diagnostic_cluster_v2.job @@ -0,0 +1,83 @@ +#!/bin/bash + +#SBATCH -J diagnostic +#SBATCH -o diagnostic.out +#SBATCH -e diagnostic.err + +# Set the total number of processors (cores) to allocate to the +# parallel job. Maximum is 8 cores for Moore, 12 for Amdahl +# and 20 for Gustafson. Slurm will choose automatically +# to which of the three clusters assign the job: + +#SBATCH -n 10 + +# Set the total computation time of the parallel job. +# Time max is 12 hours (syntax: hours:minutes:second): + +#SBATCH -t 12:00:00 + +# Set the name of your script to run: +# if no path is provided, it is assumed to be in the +# directory where the sbatch command is executed: + +diagnostic="../weather_regimes_v35.R" + +# First and last values of each of the three variables +# of the parallel job. Values must be integers. Each value +# between the first and last ones (including the extremes) +# will be assigned to a different parallel job. If your +# job has less than three variables, you can disable one or two +# of them by setting them to -999 (both varXmin and varXmax): + +var1min=8 +var1max=12 + +var2min=0 +var2max=6 + +var3min=-999 +var3max=-999 + +# Set the maximum number of jobs to run in parallel: +# it should be equal to the number of cores allocated for SLURM +# above at line #SBATCH -n X, unless the user wants to allocate +# more cores to run the parallel job in amdahl or gustafson: + +nCores=6 + +# In case the jobs have a high memory peak, you can introduce +# a time shift in the running of the following job, to shift +# the execution of the parallel jobs of a few seconds/minutes, +# so the memory peak is not reached at the same tim by the jobs +# and consequently more memory is avalable, for example to run +# more jobs than normal. Job shift is set below in seconds. Set +# it to 0 if you don't want to introduce a job shift: + +shiftJob=600 + +# the following lines are used to run the parallel job: + +arg1=$(seq $var1min $var1max) +arg2=$(seq $var2min $var2max) +arg3=$(seq $var3min $var3max) + +if [ "$var1min" -eq -999 ] && [ "$var1max" -eq -999 ]; then arg1=""; fi +if [ "$var2min" -eq -999 ] && [ "$var2max" -eq -999 ]; then arg2=""; fi +if [ "$var3min" -eq -999 ] && [ "$var3max" -eq -999 ]; then arg3=""; fi + +# run a process (thread) in background for each job: +for seq1 in $(seq $var1min $var1max ); do + for seq2 in $(seq $var2min $var2max); do + for seq3 in $(seq $var3min $var3max); do + + Rscript $diagnostic $arg1 $arg2 $arg3 & + sleep $shiftJob + + nThreads=$(($nThreads+1)) + if [ "$nThreads" -ge $nCores ]; then + wait # wait until the first $nCores chunks have finished before executing the next $nCores + nThreads=0 + fi + done + done +done diff --git a/slurm/old/empty_sbatch.sh b/slurm/old/empty_sbatch.sh new file mode 100644 index 0000000000000000000000000000000000000000..2e11413ded8f553ffcf264656453c4625dad9d7e --- /dev/null +++ b/slurm/old/empty_sbatch.sh @@ -0,0 +1,4 @@ +#!/bin/bash + +Rscript Weather_regimes_v34.R 12 0 + diff --git a/slurm/old/load_netcdf.R b/slurm/old/load_netcdf.R new file mode 100644 index 0000000000000000000000000000000000000000..4d2b0cf30dc326269a535328b7db17f59a030bdf --- /dev/null +++ b/slurm/old/load_netcdf.R @@ -0,0 +1,6 @@ +library(s2dverification) + +# Carga un fichero NetCDF-3 de 1GB desde esnas: +time <- system.time(data.hindcast <- Load(var='tp', exp = list(list(path="/esnas/exp/ecmwf/system4_m1/daily_mean/prlr_f6h/prlr_$YEAR$$MONTH$.nc")), obs=NULL,sdates='20070501', nleadtime=157, output='lonlat', nprocs=1)) + +write.table(time, file=paste0("load_netcdf_total_time_",round(time,2),"_seconds.txt")) diff --git a/slurm/old/load_netcdf.job b/slurm/old/load_netcdf.job new file mode 100644 index 0000000000000000000000000000000000000000..f3c603e0f0c35c41541e9e76cf0bbd87ec90a048 --- /dev/null +++ b/slurm/old/load_netcdf.job @@ -0,0 +1,72 @@ +#!/bin/bash + +#SBATCH -J parallel +#SBATCH -o parallel-%J.out +#SBATCH -e parallel-%J.err + +############################################################# +# The line below specify to assign the job to one of the # +# nodes available (currently: moore, amdahl and gustafson) # +# if you don't have any preference, you can comment it: # +# You can check if a node is full with: # +# squeue -o "%.18i %.9P %.8j %.8u %.8T %.12M %.12l %.6D # +# %.15R %.5C %.8m" # +# which also shows how much memory and cores a job uses # +############################################################# + +#SBATCH -w amdahl + +############################################################# +# Set the maximum computation time of the parallel job. # +# It have to be higher than the estimated computation # +# time of the parallel job! (syntax HH:MM:SS): # +# (Time max is 78 hours, or 12 h for interactive sessions) # +############################################################# + +#SBATCH -t 78:00:00 + +############################################################# +# Allocate the desired quantity of RAM (in MB) for each # +# processor (core). This value corresponds to the peak RAM # +# utilized by your jobs during their sequential execution: # +############################################################# + +#SBATCH --mem-per-cpu 15000 + +############################################################# +# Allocate a number of processors (cores) to this job. # +# it is also equal to the maximum number of jobs running at # +# the same time; the jobs exceeding this number will start # +# only when the previous jobs have finished computing. # +# The maximum number of cores that can be allocated depends # +# on the cluster: it is 8 cores for Moore, 12 for Amdahl # +# and 20 for Gustafson. Slurm will choose automatically to # +# which of the three clusters assign the job. However, this # +# number also cannot be higher than the total cluster's RAM # +# divided by the RAM allocated to one core (rounded down). # +# Total cluster RAM is 145 / 258 / 264 GB on Moore / Amdahl # +# / Gustafson, respectively. For example, if you allocated # +# 40 GB/core on Moore in the previous line, you can't # +# allocate more than 145 / 40 = 3 cores to your job. # +############################################################# + +#SBATCH -n 1 + +nCores=1 # same as the number of cores above + +############################################################# +# These modules should already been loaded in your session. # +# They are provided here in case someone wasn't loaded: # +############################################################# + +module load R CDO NCO NETCDF gcc intel openmpi HDF5 UDUNITS + +############################################################# +# Set the name of your script to run; if no path is # +# provided, it is assumed to be in the directory where # +# the 'sbatch' command is executed: # +############################################################# + +diagnostic="./load_netcdf.R" + +Rscript $diagnostic diff --git a/slurm/old/parallel_almost_old.job b/slurm/old/parallel_almost_old.job new file mode 100644 index 0000000000000000000000000000000000000000..c446e01f97ec36cd58d62cdd4a9fc42f4a601358 --- /dev/null +++ b/slurm/old/parallel_almost_old.job @@ -0,0 +1,175 @@ +#!/bin/bash + +#SBATCH -J parallel +#SBATCH -o parallel-%J.out +#SBATCH -e parallel-%J.err + +############################################################# +# The line below specify to assign the job to one of the # +# nodes available (currently: moore, amdahl and gustafson) # +# The node should be chosen on the basis of the jobs of the # +# other user already running in the node. # +# if you don't have any preference, you can comment this # +# line, so Slurm will assign your jobs to one or more of # +# the nodes avaiable. # +# Note that if you jobs are assigned to different nodes, # +# and each job creates one or more output files in the # +# /scratch, they will end in the /scratch of different nodes# +# # +# You can check if a node is full with 'squeue' or with: # +# squeue -o "%.18i %.9P %.8j %.8u %.8T %.12M %.12l %.6D # +# %.15R %.5C %.8m" # +# which also shows how much memory and cores a job uses # +############################################################# + +#SBATCH -w gustafson + +############################################################# +# Set the maximum computation time of the parallel job. # +# It has to be higher than the estimated computation # +# time of the parallel job (syntax: HH:MM:SS): # +# Time max is 96 hours (4 days), or 12 h for interactive # +# sessions. If there isn't any queue issue on the chosen # +# node, you can leave this value set to 96 hours. # +############################################################# + +#SBATCH -t 96:00:00 + +############################################################# +# Allocate the desired quantity of RAM (in MB) for each # +# processor (core). This value corresponds to the peak RAM # +# utilized by your jobs during their sequential execution: # +############################################################# + +#SBATCH --mem-per-cpu 65000 + +############################################################# +# Allocate a number of processors (cores) to this job. # +# it is also equal to the maximum number of jobs running at # +# the same time; the jobs exceeding this number will start # +# only when the previous jobs have finished computing. # +# The maximum number of cores that can be allocated depends # +# on the cluster: it is 8 cores for Moore, 12 for Amdahl # +# and 20 for Gustafson. Slurm will choose automatically to # +# which of the three clusters assign the job. However, this # +# number also cannot be higher than the total cluster's RAM # +# divided by the RAM allocated to one core (rounded down). # +# Total cluster RAM is 145 / 258 / 264 GB on Moore / Amdahl # +# / Gustafson, respectively. For example, if you allocated # +# 40 GB/core on Moore in the previous line, you can't # +# allocate more than 145 / 40 = 3 cores to your job. # +############################################################# + +#SBATCH -n 4 + +nCores=4 # same as the number of cores above + +############################################################# +# If your 'nCores' variable is limited by the total RAM, # +# and the peak memory use of each job is short compared to # +# the total computational time of a single job, you can # +# still increase 'nCores' by introducing a time delay # +# between jobs: each time you run a new job, the scheduler # +# will wait an amount of time determined by the user before # +# running the next job. In this way, the peak memory use of # +# the jobs don't overlap (i.e: don't happen at the same # +# time simulataneously), so more jobs can be executed in # +# parallel. However, have to disable the row above with # +# option. --mem-per-cpu commenting it. # +# Example: each of your jobs lasts ~ 50 min and needs an # +# average of 30 GB of RAM. However, for ~10 minutes, it # +# needs 60 GB of RAM. This memory peak usually should # +# halve 'nCores' value. However, setting 'delay' to 600 (s),# +# each job reaches its memory peak at a different time of # +# the other jobs, so you can double 'nCores', running in # +# this way more jobs in parallel. # +############################################################# + +delay=0 + +############################################################# +# These modules should already been loaded in your session. # +# They are provided here in case someone wasn't loaded: # +############################################################# + +module load R CDO NCO NETCDF gcc intel openmpi HDF5 UDUNITS + +############################################################# +# Set the name of your script to run; if no path is # +# provided, it is assumed to be in the directory where # +# the 'sbatch' command is executed: # +############################################################# + +diagnostic="../weather_regimes_v35.R" + +############################################################# +# First and last values of each of the three variables of # +# the parallel job. Values must be integers. Each value # +# between the first and last ones (including the extremes) # +# will be assigned to a different parallel job. If your job # +# has less than three variables, you can disable one or two # +# of them by setting both them to -999. # +# # +# Examples: # +# - If you want to run the same script 12 times changing # +# only the variable "month" from 1 to 12, set var1min = 1 # +# and var1max = 12, leaving the others to -999 # +# - If you also want to run the same script for a sequence # +# of years, i.e: for each month from 1980 to 2015, also # +# set var2min = 1950 and var2max = 2015 # +# # +# To link these variables to your script, just add the # +# following lines at the beginning of your script: # +# # +# var1 <- as.integer(commandArgs(TRUE)[1]) # +# var2 <- as.integer(commandArgs(TRUE)[2]) # +# var3 <- as.integer(commandArgs(TRUE)[3]) # +# # +# where var, var2, var3 are the variables that you want to # +# change in your script (i.e: 'month', 'year', etc.). If # +# use less than 3 variables, add less than three lines. # +# Note that the variables have to be integers! # +############################################################# + +var1min=12 +var1max=12 + +var2min=0 +var2max=6 + +var3min=-999 +var3max=-999 + +############################################################# +# the following lines run a process (thread) in background # +# for each different set of values of the 3 above variables # +# in group of jobs at the same time: # +############################################################# + +for arg1 in $(seq $var1min $var1max ); do + for arg2 in $(seq $var2min $var2max); do + for arg3 in $(seq $var3min $var3max); do + + if [ "$var1min" -eq -999 ] && [ "$var1max" -eq -999 ]; then arg1=""; fi + if [ "$var2min" -eq -999 ] && [ "$var2max" -eq -999 ]; then arg2=""; fi + if [ "$var3min" -eq -999 ] && [ "$var3max" -eq -999 ]; then arg3=""; fi + + Rscript $diagnostic $arg1 $arg2 $arg3 & + job_id_${arg1}_${arg2}_${arg3}=$! + sleep $delay + + nThreads=$(($nThreads+1)) + if [ "$nThreads" -ge $nCores ]; then + wait # wait until the first $nCores have finished before executing the next $nCores + nThreads=0 + fi + done + done +done + +############################################################ +# remove any eventual data loaded in the shared memory # +# /dev/shm/ before ending the job: # +############################################################ + +mpirun rm `ls /dev/shm -la | grep $(whoami) | awk ' { print "/dev/shm/" $9 } ' diff --git a/slurm/old/sbatch.job b/slurm/old/sbatch.job new file mode 100644 index 0000000000000000000000000000000000000000..67ed36fa19fbbf834d8c5eede963c583569b515f --- /dev/null +++ b/slurm/old/sbatch.job @@ -0,0 +1,11 @@ +#!/bin/bash + +#SBATCH -n 1 +#SBATCH -o output_%J.out +#SBATCH -e output_%J.err +#SBATCH -J regimes +#SBATCH -t 10:00:00 +#SBATCH --mem-per-cpu 50000 + +Rscript weather_regimes_v34.R 12 0 + diff --git a/slurm/old/sbatch2.job b/slurm/old/sbatch2.job new file mode 100644 index 0000000000000000000000000000000000000000..bdd69f566ff31c033ea66fa731d96808d5d4db64 --- /dev/null +++ b/slurm/old/sbatch2.job @@ -0,0 +1,11 @@ +#!/bin/bash + +#SBATCH -n 1 +#SBATCH -o output_%J.out +#SBATCH -e output_%J.err +#SBATCH -J regimes +#SBATCH -t 10:00:00 +#SBATCH --mem-per-cpu 50000 + +Rscript weather_regimes_v34.R 11 1 + diff --git a/slurm/old/sbatch3.job b/slurm/old/sbatch3.job new file mode 100644 index 0000000000000000000000000000000000000000..3607f7059cae164d5c1475f7cfee18399cda9e19 --- /dev/null +++ b/slurm/old/sbatch3.job @@ -0,0 +1,11 @@ +#!/bin/bash + +#SBATCH -n 1 +#SBATCH -o output_%J.out +#SBATCH -e output_%J.err +#SBATCH -J regimes +#SBATCH -t 10:00:00 +#SBATCH --mem-per-cpu 50000 + +Rscript weather_regimes_v34.R 10 2 + diff --git a/slurm/old/sbatch4.job b/slurm/old/sbatch4.job new file mode 100644 index 0000000000000000000000000000000000000000..479eb4c9c4630a98999f65288233a3044fdf6bf4 --- /dev/null +++ b/slurm/old/sbatch4.job @@ -0,0 +1,11 @@ +#!/bin/bash + +#SBATCH -n 1 +#SBATCH -o output_%J.out +#SBATCH -e output_%J.err +#SBATCH -J regimes +#SBATCH -t 10:00:00 +#SBATCH --mem-per-cpu 50000 + +Rscript weather_regimes_v34.R 9 3 + diff --git a/slurm/old/sbatch5.job b/slurm/old/sbatch5.job new file mode 100644 index 0000000000000000000000000000000000000000..c6a784feafd2ade2933e2ec7207f5542e9127d5e --- /dev/null +++ b/slurm/old/sbatch5.job @@ -0,0 +1,11 @@ +#!/bin/bash + +#SBATCH -n 1 +#SBATCH -o output_%J.out +#SBATCH -e output_%J.err +#SBATCH -J regimes +#SBATCH -t 10:00:00 +#SBATCH --mem-per-cpu 50000 + +Rscript weather_regimes_v34.R 8 4 + diff --git a/slurm/old/sbatch6.job b/slurm/old/sbatch6.job new file mode 100644 index 0000000000000000000000000000000000000000..a123b67d5ede4536ecab8ffdd721119ec5b4a3da --- /dev/null +++ b/slurm/old/sbatch6.job @@ -0,0 +1,11 @@ +#!/bin/bash + +#SBATCH -n 1 +#SBATCH -o output_%J.out +#SBATCH -e output_%J.err +#SBATCH -J regimes +#SBATCH -t 10:00:00 +#SBATCH --mem-per-cpu 50000 + +Rscript weather_regimes_v34.R 7 5 + diff --git a/slurm/old/sbatch7.job b/slurm/old/sbatch7.job new file mode 100644 index 0000000000000000000000000000000000000000..8f13041a49763767a847dd30302ed53a8e656a3f --- /dev/null +++ b/slurm/old/sbatch7.job @@ -0,0 +1,11 @@ +#!/bin/bash + +#SBATCH -n 1 +#SBATCH -o output_%J.out +#SBATCH -e output_%J.err +#SBATCH -J regimes +#SBATCH -t 10:00:00 +#SBATCH --mem-per-cpu 50000 + +Rscript weather_regimes_v34.R 6 6 + diff --git a/slurm/old/test_load.R b/slurm/old/test_load.R new file mode 100644 index 0000000000000000000000000000000000000000..2ee6262dd7564cf5c1305d38ff1e42aa47f9eb47 --- /dev/null +++ b/slurm/old/test_load.R @@ -0,0 +1,185 @@ +######################################################################################### +# Sub-seasonal Skill Scores # +######################################################################################### +# you can run it in a sequential way from the terminal of your workstation with the syntax: +# +# Rscript SkillScores_v7.R +# +# or in parallel on MareNostrum: +# +# bsub < SkillScores_v7.job +# + + +# i.e: to split the data in 8 chunks and run only the chunk number 3, write: +# +# Rscript SkillScores_v4.R 8 3 +# +# if you don't specify any argument, or you run it from the R interface, it runs all the chunks in a sequential way. +# Use this last approach if the computational speed is not a problem. +# + + + +########################################################################################## +# Load functions and libraries # +########################################################################################## + +#load libraries and functions: +library(s2dverification) +library(SpecsVerification) +#library(easyVerification) +#library(jpeg) +#library(abind) + +# Load function split.array: +source('/gpfs/projects/bsc32/bsc32842/scripts/Rfunctions.R') + +########################################################################################## +# User's settings # +########################################################################################## + +# working dir where to put the output maps and files in the /Data and /Maps subdirs: +workdir <- "/gpfs/projects/bsc32/bsc32842/RESILIENCE" + +cfs.name <- 'ECMWF' # name of the climate forecast system (CFS) used for monthly predictions (to be used in map titles) +var.name <- 'sfcWind' # forecast variable. It is the name used inside the netCDF files and as suffix in map filenames, i.e: 'sfcWind' or 'psl' +var.name.map <- '10m Wind Speed' # forecast variable to verify (name used in the map titles), i.e: '10m Wind Speed' or 'SLP' + +yr1 <- 2014 # starting year of the weekly sequence of the forecasts +mes <- 1 # starting forecast month (usually january) +day <- 2 # starting forecast day + +yr1.hind <- 1994 #1994 # first hindcast year +yr2.hind <- 2013 #2013 # last hindcast year (usually the forecast year -1) + +leadtime.week <- c('5-11','12-18','19-25','26-32') # which leadtimes are available in the weekly dataset +n.members <- 4 # number of hindcast members + +my.score.name <- c('FairRpss','FairCrpss') #c('EnsCorr','FairRpss','FairCrpss','RelDiagr') # choose one or more skills scores to compute between EnsCorr, FairRPSS, FairCRPSS and/or RelDiagr + +veri.month <- 2 #1:12 # select the month(s) you want to compute the chosen skill scores + +my.pvalue <- 0.05 # in case of computing the EnsCorr, set the p_value level to show in the ensemble mean correlation maps +my.prob <- c(1/3,2/3) # quantiles used for FairRPSS and the RelDiagr (usually they are the terciles) +conf.level <- c(0.05, 0.95)# percentiles employed for the calculation of the confidence level for the Rpss and Crpss (can be more than 2 if needed) +int.lat <- NULL #1:160 # latitude position of grid points selected for the Reliability Diagram (if you want to subset a region; if not, put NULL) +int.lon <- NULL #1:320 # longitude position of grid points selected for the Reliability Diagram (if you want to subset a region; if not, put NULL) + +boot <- TRUE # in case of computing the FairRpss and/or the FairCrpss, you can choose to do a bootstrap to evaluate the uncertainty of the skill scores +n.boot <- 20 # number of resamples considered in the bootstrapping + +# coordinates of a chosen point in the world to plot the time series over the 52 maps (they must be the same values in objects la y lo) +my.lat <- 55.3197120 +my.lon <- 0.5625 + +###################################### Derived variables ############################################################################# + +args <- commandArgs(TRUE) # put into variable args the list with all the optional arguments with whom the script was run from the terminal +chunk <- as.integer(args[1]) # number of the chunk to run in this script + +#source(paste0(workdir,'/ColorBarV.R')) # Color scale used for maps +n.categ <- 1+length(my.prob) # number of forecasting categories (usually 3 if terciles are used) + +# blue-yellow-red colors: +col <- as.character(read.csv("/gpfs/projects/bsc32//bsc32842/scripts/rgbhex.csv",header=F)[,1]) + +sdates.seq <- weekly.seq(yr1,mes,day) # load the sequence of dates corresponding to all the thursday of the year +n.leadtimes <- length(leadtime.week) +n.yrs.hind <- yr2.hind-yr1.hind+1 +my.month.short <- substr(my.month,1,3) + +# Monthly Startdates for 2014 reforecasts: (in future you can modify it to work for a generic year) +startdates.monthly<-list() +startdates.monthly[[1]]<-1:5 +startdates.monthly[[2]]<-6:9 +startdates.monthly[[3]]<-10:13 +startdates.monthly[[4]]<-14:17 +startdates.monthly[[5]]<-18:22 +startdates.monthly[[6]]<-23:26 +startdates.monthly[[7]]<-27:31 +startdates.monthly[[8]]<-32:35 +startdates.monthly[[9]]<-36:39 +startdates.monthly[[10]]<-40:44 +startdates.monthly[[11]]<-45:48 +startdates.monthly[[12]]<-49:52 + +## extract geographic coordinates; do only ONCE for each prediction system and then save the lat/lon in a coordinates.RData and comment these rows to use function load() below: +#data.hindcast <- Load(var='sfcWind', exp = 'EnsEcmwfWeekHind', +# obs = NULL, sdates=paste0(yr1.hind:yr2.hind,substr(sdates.seq[startdate],5,6),substr(sdates.seq[startdate],7,8)), +# nleadtime = 1, leadtimemin = 1, leadtimemax = 1, output = 'lonlat', configfile = file_path, method='distance-weighted' ) + +#lats<-data.hindcast$lat +#lons<-data.hindcast$lon +#save(lats,lons,file=paste0(workdir,'/coordinates.RData')) +#rm(data.hindcast);gc() + +# format geographic coordinates to use with PlotEquiMap: +load(paste0(workdir,'/coordinates.RData')) +n.lon <- length(lons) +n.lat <- length(lats) +n.lonr <- n.lon-ceiling(length(lons[lons<180 & lons > 0])) + +#la<-rev(lats) +#lo<-c(lons[c((n.lonr+1):n.lon)]-360,lons[1:n.lonr]) + +n.lat == 1 + +#ERAint <- list(path = '/esnas/reconstructions/ecmwf/eraint/$STORE_FREQ$_mean/$VAR_NAME$_f6h/$VAR_NAME$_$YEAR$$MONTH$.nc') +#var <- Load(var = 'z500', exp = NULL, obs = list(rean), paste0(2001,'0101'), storefreq = 'daily', leadtimemax = 365, output = 'lonlat') +#var <- Load(var = 'z500', exp = list(exp), obs = NULL, paste0(2000,'0101'), storefreq = 'daily', leadtimemax = 1, output = 'lonlat') + +for(month in veri.month){ + #month=1 # for the debug + my.startdates <- startdates.monthly[[month]] #c(1:5) # select the startdates (weeks) you want to compute + startdate.name <- my.month[month] # name given to the period of the selected startdates # i.e:"January" + n.startdates <- length(my.startdates) # number of startdates in the selected month + n.yrs <- length(my.startdates)*n.yrs.hind # number of total years for the selected month + + print(paste0("Computing Skill Scores for ", startdate.name)) + + # Merge all selected startdates: + hind.dim <- c(n.members, n.yrs.hind*n.startdates, n.leadtimes, n.lat, n.lon) # dimensions of the hindcast array + + #my.FairRpss <- my.FairCrpss <- my.EnsCorr <- my.PValue <- array(NA,c(n.leadtimes,n.lat,n.lon)) + #if(boot) my.FairRpssBoot <- my.FairCrpssBoot <- array(NA, c(n.boot, n.leadtimes, n.lat, n.lon)) + #if(boot) my.FairRpssConf <- my.FairCrpssConf <- array(NA, c(length(conf.level), n.leadtimes, n.lat, n.lon)) + + anom.hindcast.chunk <- anom.hindcast.chunk.sampled <- array(NA, c(n.members, n.startdates*n.yrs.hind, n.leadtimes, n.lat, n.lon)) + anom.rean.chunk <- anom.hindcast.mean.chunk <- anom.rean.chunk.sampled <- array(NA, c(n.startdates*n.yrs.hind, n.leadtimes, n.lat, n.lon)) + + time <- system.time({for(startdate in my.startdates){ + pos.startdate <- which(startdate == my.startdates) # count of the number of stardates already loaded+1 + my.time.interv <- (1+(pos.startdate-1)*n.yrs.hind):(pos.startdate*n.yrs.hind) # time interval where to load data + + # Load reanalysis data: + load(paste0(workdir,'/Data_',var.name,'/anom_rean_startdate',startdate,'.RData')) +# anom.rean <- drop(anom.rean) +# anom.rean.chunk[my.time.interv,,,] <- anom.rean[,,1,] +# + # Load hindcast data: +# if(any(my.score.name=="FairRpss") || any(my.score.name=="FairCrpss" || any(my.score.name=="RelDiagr"))){ +# load(paste0(workdir,'/Data_',var.name,'/anom_hindcast_startdate',startdate,'.RData')) +# anom.hindcast <- drop(anom.hindcast) +# anom.hindcast.chunk[,my.time.interv,,,] <- anom.hindcast[,,,1,] +# rm(anom.hindcast, anom.rean) +# gc() +# } +# + #if(any(my.score.name=="EnsCorr")){ + # load(paste0(workdir,'/Data_',var.name,'/anom_hindcast_mean_startdate',startdate,'.RData')) # load ensemble mean hindcast data + # anom.hindcast.mean <- drop(anom.hindcast.mean) + # anom.hindcast.mean.chunk[my.time.interv,,,] <- anom.hindcast.mean[,,1,] + # rm(anom.hindcast.mean) + # gc() + #} + + } # close for on startdate + }) + + + #save(time, file=paste0(workdir,'/test_',startdate.name,'_chunk_',chunk,'_time_',time,'.RData')) + save(time, file=paste0(workdir,'/test_',startdate.name,'_chunk_',chunk,'_time_',time[3],'.RData')) + +} # close for on month + diff --git a/slurm/parallel.job b/slurm/parallel.job new file mode 100644 index 0000000000000000000000000000000000000000..2f1e06d6880db42a1892b3dfadb3130dc8d8391b --- /dev/null +++ b/slurm/parallel.job @@ -0,0 +1,283 @@ +#!/bin/bash + +############################################################# +# You can keep the name of your parallel job as it is below # +# ('parallel') or change it. In any case, only one standard # +# error and standard output files will be created, even if # +# you run hundreds of jobs, not to fill your directory with # +# unnecessary files. All the information you need on the # +# jobs is inside these two files, whose file names have the # +# job number appended as suffix. # +# # +# To increase productivity, create an alias en .bashrc # +# such as the one below to run this script from everywhere: # +# # +# alias p='sbatch ~/scripts/slurm/parallel.job' # +# # +# Remember to remove regularly any eventual data loaded in # +# the shared memory (/dev/shm/), because ths shared memory # +# is limited to a few GB (depending on the node), and when # +# it is full, no users can run jobs. You can create # +# an alias as the 'clean' alias below and execute it from # +# time to time. Run it also after a Load() function of # +# sd2verification terminates before finishing. # +# # +# clean='find /dev/shm/ -user $(whoami) -exec rm {} \;' # +# # +############################################################# + +#SBATCH -J parallel +#SBATCH -o parallel-%J.out +#SBATCH -e parallel-%J.err + +############################################################# +# The line below assigns your parallel jobs to the three # +# nodes of our cluster: # +# # +# *NODE* *CORES* *RAM (GB)* # +# moore 8 144 # +# amdahl 12 258 # +# gustafson 20 258 # +# # +# SLURM automatically distributes your jobs to one or more # +# nodes, depending on the available resources, if you want # +# to tun many sequential jobs at the same time it is better # +# to run them on the nodes with more resources available # +# (cores and memory); for example, if a node has its RAM # +# almost full you don't want to risk your jobs to be # +# cancelled by lack of RAM. Or, you want to save your # +# outputs not in the /esnas scratch but in the nodes's # +# /scratch, and you don't want to have them saved in the # +# /scratch of different nodes. # +# # +# In the past, option --mem-per-cpu allocated the desired # +# quantity of RAM for each processor (core). It was disabled# +# to allow more users to work at the same time, decreasing # +# queue times considerably. # +# # +# On the contrary, the option -m of interactive still works,# +# so you can take advantage of it to run an interactive # +# session in amdahl or gustafson, by specifying a higher # +# RAM amount than moore: # +# ~> sinteractive -m 200000 # +# # +# You can check if a node is full with 'squeue' or with: # +# ~> squeue -o "%.18i %.9P %.8j %.8u %.8T %.12M %.12l %.6D # +# %.15R %.5C %.8m" # +# which also shows how much memory and cores a job is using # +############################################################# + +#SBATCH -w amdahl + +############################################################# +# Set the maximum computation time of the parallel job. # +# It has to be higher than the estimated computation # +# time of all parallel job (syntax: DD-HH:MM:SS): # +# You can leave this value as set below (1 week). If your # +# job take more than 1 week to finish, even after # +# splitting it in many jobs that runs simultaneously, than # +# it is better not to run it on our cluster but on the SMP # +# machine or on MareNostrum instead, because it takes too # +# much computational resources that other users cannot # +# employ while your jobs are running. # +############################################################# + +#SBATCH -t 01:00:00 + +############################################################# +# Allocate a number of processors (cores) to this job, i.e: # +# it sets the number of sequential jobs that run at the # +# same time on the chosen node. Each time a job finishes, a # +# new job is run, so there will always be this number of # +# jobs running (except at the end, when there are less jobs # +# left than this number). # +# # +# This value also cannot be higher than the node's free RAM # +# divided by the RAM allocated to one core (rounded down). # +# / Gustafson, respectively. For example, if you allocated # +# 40 GB/core on Moore in the previous line, you can't # +# allocate more than 145 / 40 = 3 cores to your job. # +# # +# The Load() function of s2dverification loads data in # +# parallel automatically. You have to disable this feature # +# adding option nprocs=1, or you'll use all the cores of the# +# node even if you reserved less cores. The downside is that# +# loading times of each job will increase. This is the main # +# disadvantage of running many sequential jobs at the same # +# time. # +# # +# Variable 'nCores' below must be set always equal to the # +# SAME VALUE of the number of cores introduced in the line # +# #SBATCH -n (it is redundant but this job file needs it) # +# # +############################################################# + +#SBATCH -n 1 +nCores=1 + +############################################################# +# if TRUE, run the job with NO external options, # +# and in serial mode. # +# You have to set also #SBATCH -n 1 manually # +############################################################# + +serial=FALSE + +############################################################# +# Set the name of your script to run; if no path is # +# provided, it is assumed to be in the directory where # +# the 'sbatch' command is executed: # +############################################################# + +#script="/home/Earth/ncortesi/scripts/SkillScores_v11.R" +#script="/home/Earth/ncortesi/scripts/WT_v7.R" +#script="/home/Earth/ncortesi/scripts/WT_drivers_v8.R" +#script="/home/Earth/ncortesi/scripts/weather_regimes_maps_v29.R" +#script="/home/Earth/ncortesi/scripts/weather_regimes_v43.R" +#script="/home/Earth/ncortesi/scripts/weather_regimes_EDPR.R" +script="/shared/earth/Operational/EDPR/weather_regimes_EDPR.R" + + +############################################################# +# Be sure to include all the libraries your script needs: # +############################################################# + +module load R CDO NCO netCDF GCC HDF5 UDUNITS + +############################################################# +# First and last values of each of the three variables of # +# the parallel job. Values must be integers. Each value # +# between the first and last ones (including the extremes) # +# will be assigned to a different parallel job. If your job # +# has less than three variables, you can disable one or two # +# of them by leaving them empty in the lines below. # +# # +# IMPORTANT: All variables must be integers. First empty # +# variable must be arg3, and the second must be arg2. # +# # +# Examples: # +# - If you want to run the same script 12 times changing # +# only the variable "month" from 1 to 12, set arg1_min= 1 # +# and arg1_max = 12, leaving the others two in blank: # +# arg1_min = 1 # +# arg1_max = 12 # +# arg2_min = # +# arg2_max = # +# arg3_min = # +# arg3_max = # +# # +# - If you also want to run the same script for a sequence # +# of years, i.e: for each month from 1980 to 2015, also # +# set arg2_min = 1950 and arg2_max = 2015 # +# # +# To link these variables to your R script, just add the # +# following lines at the beginning of your script: # +# # +# script.arg <- as.integer(commandArgs(TRUE)) # +# if(length(script.arg) > 0){ # +# <- script.arg[2] # +# <- script.arg[3] # +# } # +# # +# or add the lines below if you are using Python: # +# # +# import sys # +# script.arg = int(sys.argv) # +# if len(script.arg) > 0 : # +# = script.arg[1] # +# = script.arg[2] # +# = script.arg[3] # +# # +# where , , are the variables that are # +# used in the script and that you'd like to run in parallel # +# (i.e: 'month', 'year', 'leadtime', etc.). # +# # +# To optimize computation time, try to set 'nCores' to be # +# which is a divisor of the total number of jobs, so it # +# will not run a lonely job or a few jobs in the last loop. # +# For example: if you have to run 20 jobs, run them in # +# blocks of 4, 5 or 10 jobs simultaneously # +# # +# If, after the jobs has finished, you need to re-run some # +# of them, just re-run this job file with arg1_min, arg1_max# +# and the other argX variables corresponding to the values # +# of the jobs you need to re-run. In case the values are not# +# consecutive (i.e: 2,5,and 11), then set 'nCores'=1 and run# +# those jobs in a sequential way, executing this job file # +# one time for each different value you need to recompute. # +############################################################# + +arg1_min=2017 +arg1_max=2017 + +arg2_min=2 +arg2_max=2 + +arg3_min= +arg3_max= + +############################################################# +# ! DO NOT MODIFY THE LINES BELOW ! # +############################################################# + +############################################################# +# the following lines run a process (thread) in background # +# for each different set of values of the 3 above variables # +# in groups of jobs at the same time: # +############################################################# + +bash_pid=$$ # get the pid of the parent job (use instead $! to get the pid of last child process) +nChildren=0 # set initial number of children processes (background jobs executed by this script) + +# if the user left arg2_min or arg3_min empty, fill them with -999 instead +# to be able to run the next for loop with only 1 'fake' value (-999)for arg2 and/or arg3: +size2=${#arg2_min} # length of arg2_min +size3=${#arg3_min} # length of arg3_min +if [ $size2 -eq 0 ] ; then arg2_min=-999; arg2_max=-999; fi +if [ $size3 -eq 0 ] ; then arg3_min=-999; arg3_max=-999; fi + +if [ $serial == 'FALSE' ]; then + +# For loops over the three variables to cycle all their values: +for arg1 in $(seq $arg1_min $arg1_max ); do + for arg2 in $(seq $arg2_min $arg2_max); do + for arg3 in $(seq $arg3_min $arg3_max); do + # disable 'arg2' and/or 'arg3' if the user left them empty: + if [ $size2 -eq 0 ]; then arg2=""; fi + if [ $size3 -eq 0 ]; then arg3=""; fi + + # detect the language of your script and run a new children job in background + if [ ${script: -1} == 'R' ]; then + Rscript $script $arg1 $arg2 $arg3 & + elif [ ${script: -2} == 'py' ]; then + python $script $arg1 $arg2 $arg3 & + else + $script $arg1 $arg2 $arg3 & + fi + + nChildren=$(($nChildren+1)) # increase the number of children process + + # if there are already job running, wait until at least one job has finished before executing the next job: + while [ $nChildren -ge $nCores ] + do + children=`ps -eo ppid | grep -w $bash_pid` # get the pids of its children + parent + nChildren=`echo $children | wc -w` # total number of children + parent + let nChildren=nChildren-1 # total number of children + sleep 1 # pause until at least one job has finished + done + done + done +done + + +else # run the job in a sequential way: + + if [ ${script: -1} == 'R' ]; then + Rscript $script + elif [ ${script: -2} == 'py' ]; then + python $script + else + $script + fi +fi diff --git a/slurm/parallel.job~ b/slurm/parallel.job~ new file mode 100644 index 0000000000000000000000000000000000000000..53eee0d13c8200a81c624fb828ce634ee7284e6e --- /dev/null +++ b/slurm/parallel.job~ @@ -0,0 +1,281 @@ +#!/bin/bash + +############################################################# +# You can keep the name of your parallel job as it is below # +# ('parallel') or change it. In any case, only one standard # +# error and standard output files will be created, even if # +# you run hundreds of jobs, not to fill your directory with # +# unnecessary files. All the information you need on the # +# jobs is inside these two files, whose file names have the # +# job number appended as suffix. # +# # +# To increase productivity, create an alias en .bashrc # +# such as the one below to run this script from everywhere: # +# # +# alias p='sbatch ~/scripts/slurm/parallel.job' # +# # +# Remember to remove regularly any eventual data loaded in # +# the shared memory (/dev/shm/), because ths shared memory # +# is limited to a few GB (depending on the node), and when # +# it is full, no users can run jobs. You can create # +# an alias as the 'clean' alias below and execute it from # +# time to time. Run it also after a Load() function of # +# sd2verification terminates before finishing. # +# # +# clean='find /dev/shm/ -user $(whoami) -exec rm {} \;' # +# # +############################################################# + +#SBATCH -J parallel +#SBATCH -o parallel-%J.out +#SBATCH -e parallel-%J.err + +############################################################# +# The line below assigns your parallel jobs to the three # +# nodes of our cluster: # +# # +# *NODE* *CORES* *RAM (GB)* # +# moore 8 144 # +# amdahl 12 258 # +# gustafson 20 258 # +# # +# SLURM automatically distributes your jobs to one or more # +# nodes, depending on the available resources, if you want # +# to tun many sequential jobs at the same time it is better # +# to run them on the nodes with more resources available # +# (cores and memory); for example, if a node has its RAM # +# almost full you don't want to risk your jobs to be # +# cancelled by lack of RAM. Or, you want to save your # +# outputs not in the /esnas scratch but in the nodes's # +# /scratch, and you don't want to have them saved in the # +# /scratch of different nodes. # +# # +# In the past, option --mem-per-cpu allocated the desired # +# quantity of RAM for each processor (core). It was disabled# +# to allow more users to work at the same time, decreasing # +# queue times considerably. # +# # +# On the contrary, the option -m of interactive still works,# +# so you can take advantage of it to run an interactive # +# session in amdahl or gustafson, by specifying a higher # +# RAM amount than moore: # +# ~> sinteractive -m 200000 # +# # +# You can check if a node is full with 'squeue' or with: # +# ~> squeue -o "%.18i %.9P %.8j %.8u %.8T %.12M %.12l %.6D # +# %.15R %.5C %.8m" # +# which also shows how much memory and cores a job is using # +############################################################# + +#SBATCH -w amdahl + +############################################################# +# Set the maximum computation time of the parallel job. # +# It has to be higher than the estimated computation # +# time of all parallel job (syntax: DD-HH:MM:SS): # +# You can leave this value as set below (1 week). If your # +# job take more than 1 week to finish, even after # +# splitting it in many jobs that runs simultaneously, than # +# it is better not to run it on our cluster but on the SMP # +# machine or on MareNostrum instead, because it takes too # +# much computational resources that other users cannot # +# employ while your jobs are running. # +############################################################# + +#SBATCH -t 01:00:00 + +############################################################# +# Allocate a number of processors (cores) to this job, i.e: # +# it sets the number of sequential jobs that run at the # +# same time on the chosen node. Each time a job finishes, a # +# new job is run, so there will always be this number of # +# jobs running (except at the end, when there are less jobs # +# left than this number). # +# # +# This value also cannot be higher than the node's free RAM # +# divided by the RAM allocated to one core (rounded down). # +# / Gustafson, respectively. For example, if you allocated # +# 40 GB/core on Moore in the previous line, you can't # +# allocate more than 145 / 40 = 3 cores to your job. # +# # +# The Load() function of s2dverification loads data in # +# parallel automatically. You have to disable this feature # +# adding option nprocs=1, or you'll use all the cores of the# +# node even if you reserved less cores. The downside is that# +# loading times of each job will increase. This is the main # +# disadvantage of running many sequential jobs at the same # +# time. # +# # +# Variable 'nCores' below must be set always equal to the # +# SAME VALUE of the number of cores introduced in the line # +# #SBATCH -n (it is redundant but this job file needs it) # +# # +############################################################# + +#SBATCH -n 1 +nCores=1 + +############################################################# +# if TRUE, run the job with NO external options, # +# and in serial mode. # +# You have to set also #SBATCH -n 1 manually # +############################################################# + +serial=FALSE + +############################################################# +# Set the name of your script to run; if no path is # +# provided, it is assumed to be in the directory where # +# the 'sbatch' command is executed: # +############################################################# + +#script="/home/Earth/ncortesi/scripts/SkillScores_v11.R" +#script="/home/Earth/ncortesi/scripts/WT_v7.R" +#script="/home/Earth/ncortesi/scripts/WT_drivers_v8.R" +#script="/home/Earth/ncortesi/scripts/weather_regimes_maps_v29.R" +#script="/home/Earth/ncortesi/scripts/weather_regimes_v43.R" +script="/home/Earth/ncortesi/scripts/weather_regimes_EDPR.R" + +############################################################# +# Be sure to include all the libraries your script needs: # +############################################################# + +module load R CDO NCO netCDF GCC HDF5 UDUNITS + +############################################################# +# First and last values of each of the three variables of # +# the parallel job. Values must be integers. Each value # +# between the first and last ones (including the extremes) # +# will be assigned to a different parallel job. If your job # +# has less than three variables, you can disable one or two # +# of them by leaving them empty in the lines below. # +# # +# IMPORTANT: All variables must be integers. First empty # +# variable must be arg3, and the second must be arg2. # +# # +# Examples: # +# - If you want to run the same script 12 times changing # +# only the variable "month" from 1 to 12, set arg1_min= 1 # +# and arg1_max = 12, leaving the others two in blank: # +# arg1_min = 1 # +# arg1_max = 12 # +# arg2_min = # +# arg2_max = # +# arg3_min = # +# arg3_max = # +# # +# - If you also want to run the same script for a sequence # +# of years, i.e: for each month from 1980 to 2015, also # +# set arg2_min = 1950 and arg2_max = 2015 # +# # +# To link these variables to your R script, just add the # +# following lines at the beginning of your script: # +# # +# script.arg <- as.integer(commandArgs(TRUE)) # +# if(length(script.arg) > 0){ # +# <- script.arg[2] # +# <- script.arg[3] # +# } # +# # +# or add the lines below if you are using Python: # +# # +# import sys # +# script.arg = int(sys.argv) # +# if len(script.arg) > 0 : # +# = script.arg[1] # +# = script.arg[2] # +# = script.arg[3] # +# # +# where , , are the variables that are # +# used in the script and that you'd like to run in parallel # +# (i.e: 'month', 'year', 'leadtime', etc.). # +# # +# To optimize computation time, try to set 'nCores' to be # +# which is a divisor of the total number of jobs, so it # +# will not run a lonely job or a few jobs in the last loop. # +# For example: if you have to run 20 jobs, run them in # +# blocks of 4, 5 or 10 jobs simultaneously # +# # +# If, after the jobs has finished, you need to re-run some # +# of them, just re-run this job file with arg1_min, arg1_max# +# and the other argX variables corresponding to the values # +# of the jobs you need to re-run. In case the values are not# +# consecutive (i.e: 2,5,and 11), then set 'nCores'=1 and run# +# those jobs in a sequential way, executing this job file # +# one time for each different value you need to recompute. # +############################################################# + +arg1_min=2017 +arg1_max=2017 + +arg2_min=2 +arg2_max=2 + +arg3_min= +arg3_max= + +############################################################# +# ! DO NOT MODIFY THE LINES BELOW ! # +############################################################# + +############################################################# +# the following lines run a process (thread) in background # +# for each different set of values of the 3 above variables # +# in groups of jobs at the same time: # +############################################################# + +bash_pid=$$ # get the pid of the parent job (use instead $! to get the pid of last child process) +nChildren=0 # set initial number of children processes (background jobs executed by this script) + +# if the user left arg2_min or arg3_min empty, fill them with -999 instead +# to be able to run the next for loop with only 1 'fake' value (-999)for arg2 and/or arg3: +size2=${#arg2_min} # length of arg2_min +size3=${#arg3_min} # length of arg3_min +if [ $size2 -eq 0 ] ; then arg2_min=-999; arg2_max=-999; fi +if [ $size3 -eq 0 ] ; then arg3_min=-999; arg3_max=-999; fi + +if [ $serial == 'FALSE' ]; then + +# For loops over the three variables to cycle all their values: +for arg1 in $(seq $arg1_min $arg1_max ); do + for arg2 in $(seq $arg2_min $arg2_max); do + for arg3 in $(seq $arg3_min $arg3_max); do + # disable 'arg2' and/or 'arg3' if the user left them empty: + if [ $size2 -eq 0 ]; then arg2=""; fi + if [ $size3 -eq 0 ]; then arg3=""; fi + + # detect the language of your script and run a new children job in background + if [ ${script: -1} == 'R' ]; then + Rscript $script $arg1 $arg2 $arg3 & + elif [ ${script: -2} == 'py' ]; then + python $script $arg1 $arg2 $arg3 & + else + $script $arg1 $arg2 $arg3 & + fi + + nChildren=$(($nChildren+1)) # increase the number of children process + + # if there are already job running, wait until at least one job has finished before executing the next job: + while [ $nChildren -ge $nCores ] + do + children=`ps -eo ppid | grep -w $bash_pid` # get the pids of its children + parent + nChildren=`echo $children | wc -w` # total number of children + parent + let nChildren=nChildren-1 # total number of children + sleep 1 # pause until at least one job has finished + done + done + done +done + + +else # run the job in a sequential way: + + if [ ${script: -1} == 'R' ]; then + Rscript $script + elif [ ${script: -2} == 'py' ]; then + python $script + else + $script + fi +fi diff --git a/slurm/parallel.sh b/slurm/parallel.sh new file mode 100755 index 0000000000000000000000000000000000000000..9f82dc109ebf67d22c897614dc041db00cc05684 --- /dev/null +++ b/slurm/parallel.sh @@ -0,0 +1,11 @@ +#!/bin/bash + +############################################################# +# Set the name of your script to run; if no path is # +# provided, it is assumed to be in the directory where # +# the 'sbatch' command is executed: # +############################################################# + +script="/home/Earth/ncortesi/scripts/test.R" + +sbatch /home/Earth/ncortesi/scripts/slurm/parallel.job $script diff --git a/slurm/parallel.sh~ b/slurm/parallel.sh~ new file mode 100644 index 0000000000000000000000000000000000000000..133aff160318f3b8ca70380cc3b9f226c50f20c6 --- /dev/null +++ b/slurm/parallel.sh~ @@ -0,0 +1,260 @@ +#!/bin/bash + +############################################################# +# To increase productivity, you can an alias en .bashrc # +# such as the on below to edit and run this script from # +# everywhere: # +# # +# alias p='vi ~/scripts/slurm/parallel.job' # +# alias pp='sbatch ~/scripts/slurm/parallel.job' # +############################################################# + +#SBATCH -J parallel +#SBATCH -o parallel-%J.out +#SBATCH -e parallel-%J.err + +############################################################# +# The line below assigns your parallel jobs to the three # +# nodes of our cluster: # +# # +# *NODE* *CORES* *RAM (GB)* # +# moore 8 144 # +# amdahl 12 258 # +# gustafson 20 258 # +# # +# SLURM automatically distributes your jobs to one or more # +# nodes, depending on the available resources, but there are# +# some situations in which you might want to run your jobs # +# in the same node, for example when one node has its RAM # +# almost full and you don't want to risk your jobs to be # +# cancelled for a lack of RAM. Or, you want to save your # +# outputs not in the /esnas scratch but in the nodes's # +# /scratch, but you don't want to have them saved in the # +# /scratch of different nodes. # +# # +# You can check if a node is full with 'squeue' or with: # +# squeue -o "%.18i %.9P %.8j %.8u %.8T %.12M %.12l %.6D # +# %.15R %.5C %.8m" # +# which also shows how much memory and cores a job is using # +############################################################# + +#SBATCH -w amdahl + +############################################################# +# In the past, this option allocated the desired quantity # +# of RAM (in MB) for each processor (core). Now, it has been# +# disabled for all nodes of our cluster, but it is still # +# useful to estimate it just to let you know how many jobs # +# you might run theretically at the same time in the cluster# +# Such a value should be slightly greater than the peak # +# RAM utilized by one of your jobs during its sequential # +# execution. # +# On the contrary, the option -m of interactive still works,# +# so you can take advantage of it to run an interactive # +# session in amdahl or gustafson, by specifying a higher # +# RAM amount: # +# sinteractive -m 200000 # +############################################################# + +###SBATCH --mem-per-cpu 50000 + +############################################################# +# Set the maximum computation time of the parallel job. # +# It has to be higher than the estimated computation # +# time of the parallel job (syntax: HH:MM:SS): # +# Time max is 96 hours (4 days), or 12 h for interactive # +# sessions. If there isn't any queue issue on the chosen # +# node, you can leave this value set to 96 hours. # +############################################################# + +#SBATCH -t 5-23:59:59 + +############################################################# +# Set the name of your script to run; if no path is # +# provided, it is assumed to be in the directory where # +# the 'sbatch' command is executed: # +############################################################# + +script="/home/Earth/ncortesi/scripts/weather_regimes_impact_v3.R" + +############################################################# +# Allocate a number of processors (cores) to this job, i.e: # +# it sets the maximum number of jobs that can run at # +# the same time; the jobs exceeding this number will start # +# only when some of the previous jobs have finished. # +# The maximum number of cores that can be allocated depends # +# on the cluster: it is 8 cores for Moore, 12 for Amdahl # +# and 20 for Gustafson. However, this # +# number also cannot be higher than the total node's RAM # +# divided by the RAM allocated to one core (rounded down). # +# Total cluster RAM is 145 / 258 / 258 GB on Moore / Amdahl # +# / Gustafson, respectively. For example, if you allocated # +# 40 GB/core on Moore in the previous line, you can't # +# allocate more than 145 / 40 = 3 cores to your job. # +# # +# Note that the Load() function of s2dverification loads # +# data in parallel automatically and as such is not # +# affected by the SBATCH -n option unless the option # +# 'nprocs=1' is provided inside Load(). # +# ***** IT IS HIGHLY RECOMMENDED TO DO SO ***** # +# even if it increase the loading times of each job # +# If you don't specify it explicitly when you load the data # +# in your script, no job will be terminated but all the # +# node's cores will be totally used by your Load()'s jobs # +# until all data have been loaded. # +############################################################# + +#SBATCH -n 7 +nCores=7 # same as the number of cores above + +############################################################# +# These modules should already been loaded in your session. # +# They are provided here in case someone wasn't loaded: # +############################################################# + +module load R CDO NCO netCDF GCC HDF5 UDUNITS + +############################################################# +# First and last values of each of the three variables of # +# the parallel job. Values must be integers. Each value # +# between the first and last ones (including the extremes) # +# will be assigned to a different parallel job. If your job # +# has less than three variables, you can disable one or two # +# of them by leaving them empty in the lines below. # +# # +# IMPORTANT: All variables must be integers. First empty # +# variable must be var3, and the second must be var2. # +# # +# Examples: # +# - If you want to run the same script 12 times changing # +# only the variable "month" from 1 to 12, set var1min = 1 # +# and var1max = 12, leaving the others two in blank: # +# var1min=1 # +# var1max=12 # +# var2min= # +# var2max= # +# var3min= # +# var3max= # +# # +# - If you also want to run the same script for a sequence # +# of years, i.e: for each month from 1980 to 2015, also # +# set var2min = 1950 and var2max = 2015 # +# # +# To link these variables to your R script, just add the # +# following lines at the beginning of your script: # +# # +# script.arg <- as.integer(commandArgs(TRUE)) # +# if(length(script.arg) > 0){ # +# <- script.arg[2] # +# <- script.arg[3] # +# } # +# # +# or add the lines below if you are using Python: # +# # +# import sys # +# script.arg = int(sys.argv) # +# if len(script.arg) > 0 : # +# = script.arg[1] # +# = script.arg[2] # +# = script.arg[3] # +# # +# where , , are the variables that are # +# used in the script and that you'd like to run in parallel # +# (i.e: 'month', 'year', 'leadtime', etc.). # +# # +# To optimize computation time, try to run a number of jobs # +# which is a multiple of nCores. so it will not need to # +# compute a lonely job or a few jobs in the last loop # +############################################################# + +var1min=12 +var1max=12 + +var2min=0 +var2max=6 + +var3min= +var3max= + +############################################################# +# If your 'nCores' variable is limited by the total RAM, # +# and the peak memory use of each job is short compared to # +# the total computational time of a single job, you can # +# still increase 'nCores' by introducing a time delay # +# between jobs: each time you run a new job, the scheduler # +# will wait an amount of time determined by the user before # +# running the next job. In this way, the peak memory use of # +# the jobs don't overlap (i.e: don't happen at the same # +# time simulataneously), so more jobs can be executed in # +# parallel # +# Example: each of your jobs lasts ~ 50 min and needs an # +# average of 30 GB of RAM. However, for ~10 minutes, it # +# needs 60 GB of RAM. This memory peak usually should # +# halve 'nCores' value. However, setting 'delay' to 600 (s),# +# each job reaches its memory peak at a different time of # +# the other jobs, enabling you to double 'nCores'. # +############################################################# + +delay=0 + +############################################################# +# the following lines run a process (thread) in background # +# for each different set of values of the 3 above variables # +# in groups of jobs at the same time: # +############################################################# + +bash_pid=$$ # get the pid of the parent job (use instead $! to get the pid of last child process) +nChildren=0 # set initial number of children processes (background jobs executed by this script) + +# if the user left var2min or var3min empty, fill them with -999 instead +# to be able to run the next for loop with only 1 value for var2 and/or var3: +size2=${#var2min} # length of var2min +size3=${#var3min} # length of var3min +if [ $size2 -eq 0 ] ; then var2min=-999; var2max=-999; fi +if [ $size3 -eq 0 ] ; then var3min=-999; var3max=-999; fi + +# For loops over the three variables to cycle all their values: +for arg1 in $(seq $var1min $var1max ); do + for arg2 in $(seq $var2min $var2max); do + for arg3 in $(seq $var3min $var3max); do + # disable 'arg2' and/or 'arg3' if the user left them empty: + if [ "$var2min" -eq -999 ] && [ "$var2max" -eq -999 ]; then arg2=""; fi + if [ "$var3min" -eq -999 ] && [ "$var3max" -eq -999 ]; then arg3=""; fi + + # detect if your script is in R or in python: + if [ ${script: -1} == 'R' ]; then + Rscript $script $arg1 $arg2 $arg3 & # run a new children job in background + fi + + if [ ${script: -2} == 'py' ]; then + python $script $arg1 $arg2 $arg3 & # run a new children job in background + fi + + nChildren=$(($nChildren+1)) # increase the number of children process + sleep $delay # wait a moment if the user wants to run the following job after some time + + # if there are already job running, wait until at least one job has finished before executing the next job: + while [ $nChildren -ge $nCores ] + do + children=`ps -eo ppid | grep -w $bash_pid` # get the pids of its children + parent + nChildren=`echo $children | wc -w` # total number of children + parent + let nChildren=nChildren-1 # total number of children + sleep 1 # pause until at least one job has finished + done + done + done +done + +############################################################# +# remove any eventual data loaded in the shared memory # +# /dev/shm/ of the node before ending the job. # +# # +# You can also create an alias as the one below do it # +# manually, as it should also be used each time a Load() # +# command terminates before finishing: # +# # +# clean='find /dev/shm/ -user ncortesi -exec rm {} \;' # +############################################################# + +mpirun rm `ls /dev/shm -la | grep $(whoami) | awk ' { print "/dev/shm/" $9 } '` diff --git a/slurm/parallel_chunk.job b/slurm/parallel_chunk.job new file mode 100644 index 0000000000000000000000000000000000000000..c85be7746f4ca83bb28b1008d64cd2458289f81c --- /dev/null +++ b/slurm/parallel_chunk.job @@ -0,0 +1,33 @@ +#!/bin/bash +#SBATCH -n 1 +#SBATCH -J diagnostic +#SBATCH -o diagnostic.out +#SBATCH -e diagnostic.err + +# set the maximum execution time of the diagnostic: +#SBATCH -t 12:00:00 + +# Set the name of your script to run: +diagnostic="weather_regimes_v35.R" #"diagnostic_cluster.R" + +# Set the total number of chunks to employ: +firstChunk=7 +lastChunk=12 + +# Set the maximum number of jobs: +nCores=4 + +# run a process (thread) for each chunk in background: +for ARG in $(seq $firstChunk $lastChunk); do + Rscript $diagnostic $ARG & + nThreads=$(($nThreads+1)) + if [ "$nThreads" -ge $nCores ]; then + wait # wait until the first $nCores chunks have finished before executing the next $nCores + nThreads=0 + fi +done + +# wait until all chunks have been computed, then in case we are splitting an array in chunk, +# collect all the results of each chunk and merge them in the file 'diagnostic_output.RData': +#wait +#Rscript -e 'firstChunk <- as.integer(commandArgs(TRUE)[1])' -e 'lastChunk <- as.integer(commandArgs(TRUE)[2])' -e 'for(cnk in firstChunk:lastChunk){' -e 'load(paste0(getwd(),"/output_",cnk,".RData"))' -e 'if(cnk==1) output <- array(NA, c(lastChunk-firstChunk+1, length(var)))' -e 'output[cnk,] <- var' -e 'file.remove(paste0(getwd(),"/output_",cnk,".RData"))}' -e 'save(var, file="diagnostic_output.RData")' $firstChunk $lastChunk diff --git a/slurm/parallel_old.job b/slurm/parallel_old.job new file mode 100644 index 0000000000000000000000000000000000000000..8cd94a94cbf1595c66cbea9eae01915eb6474014 --- /dev/null +++ b/slurm/parallel_old.job @@ -0,0 +1,246 @@ +#!/bin/bash + +############################################################# +# To increase productivity, you can an alias en .bashrc # +# such as the on below to run this script from # +# everywhere: # +# # +# alias p='~/scripts/slurm/parallel.sh' # +############################################################# + +#SBATCH -J parallel +#SBATCH -o parallel-%J.out +#SBATCH -e parallel-%J.err + +############################################################# +# The line below assigns your parallel jobs to the three # +# nodes of our cluster: # +# # +# *NODE* *CORES* *RAM (GB)* # +# moore 8 144 # +# amdahl 12 258 # +# gustafson 20 258 # +# # +# SLURM automatically distributes your jobs to one or more # +# nodes, depending on the available resources, but there are# +# some situations in which you might want to run your jobs # +# in the same node, for example when one node has its RAM # +# almost full and you don't want to risk your jobs to be # +# cancelled for a lack of RAM. Or, you want to save your # +# outputs not in the /esnas scratch but in the nodes's # +# /scratch, but you don't want to have them saved in the # +# /scratch of different nodes. # +# # +# You can check if a node is full with 'squeue' or with: # +# squeue -o "%.18i %.9P %.8j %.8u %.8T %.12M %.12l %.6D # +# %.15R %.5C %.8m" # +# which also shows how much memory and cores a job is using # +############################################################# + +#SBATCH -w gustafson + +############################################################# +# In the past, this option allocated the desired quantity # +# of RAM (in MB) for each processor (core). Now, it has been# +# disabled for all nodes of our cluster, but it is still # +# useful to estimate it just to let you know how many jobs # +# you might run theretically at the same time in the cluster# +# Such a value should be slightly greater than the peak # +# RAM utilized by one of your jobs during its sequential # +# execution. # +# On the contrary, the option -m of interactive still works,# +# so you can take advantage of it to run an interactive # +# session in amdahl or gustafson, by specifying a higher # +# RAM amount: # +# sinteractive -m 200000 # +############################################################# + +###SBATCH --mem-per-cpu 50000 + +############################################################# +# Set the maximum computation time of the parallel job. # +# It has to be higher than the estimated computation # +# time of the parallel job (syntax: HH:MM:SS): # +# Time max is 96 hours (4 days), or 12 h for interactive # +# sessions. If there isn't any queue issue on the chosen # +# node, you can leave this value set to 96 hours. # +############################################################# + +#SBATCH -t 5-23:59:59 + +############################################################# +# Allocate a number of processors (cores) to this job, i.e: # +# it sets the maximum number of jobs that can run at # +# the same time; the jobs exceeding this number will start # +# only when some of the previous jobs have finished. # +# The maximum number of cores that can be allocated depends # +# on the cluster: it is 8 cores for Moore, 12 for Amdahl # +# and 20 for Gustafson. However, this # +# number also cannot be higher than the total node's RAM # +# divided by the RAM allocated to one core (rounded down). # +# Total cluster RAM is 145 / 258 / 258 GB on Moore / Amdahl # +# / Gustafson, respectively. For example, if you allocated # +# 40 GB/core on Moore in the previous line, you can't # +# allocate more than 145 / 40 = 3 cores to your job. # +# # +# Note that if you use the Load() function of # +# s2dverification to load you data, you HAVE TO ADD into # +# this function the option 'nprocs=1', if not the data will # +# be loaded using all the cores of the node. # +# Loading times of each job increases, but # +############################################################# + +#SBATCH -n 7 +nCores=7 # same as the number of cores above + +############################################################# +# These modules should already been loaded in your session. # +# They are provided here in case someone wasn't loaded: # +############################################################# + +module load R CDO NCO netCDF GCC HDF5 UDUNITS + +############################################################# +# First and last values of each of the three variables of # +# the parallel job. Values must be integers. Each value # +# between the first and last ones (including the extremes) # +# will be assigned to a different parallel job. If your job # +# has less than three variables, you can disable one or two # +# of them by leaving them empty in the lines below. # +# # +# IMPORTANT: All variables must be integers. First empty # +# variable must be var3, and the second must be var2. # +# # +# Examples: # +# - If you want to run the same script 12 times changing # +# only the variable "month" from 1 to 12, set var1min = 1 # +# and var1max = 12, leaving the others two in blank: # +# var1min=1 # +# var1max=12 # +# var2min= # +# var2max= # +# var3min= # +# var3max= # +# # +# - If you also want to run the same script for a sequence # +# of years, i.e: for each month from 1980 to 2015, also # +# set var2min = 1950 and var2max = 2015 # +# # +# To link these variables to your R script, just add the # +# following lines at the beginning of your script: # +# # +# script.arg <- as.integer(commandArgs(TRUE)) # +# if(length(script.arg) > 0){ # +# <- script.arg[2] # +# <- script.arg[3] # +# } # +# # +# or add the lines below if you are using Python: # +# # +# import sys # +# script.arg = int(sys.argv) # +# if len(script.arg) > 0 : # +# = script.arg[1] # +# = script.arg[2] # +# = script.arg[3] # +# # +# where , , are the variables that are # +# used in the script and that you'd like to run in parallel # +# (i.e: 'month', 'year', 'leadtime', etc.). # +# # +# To optimize computation time, try to run a number of jobs # +# which is a multiple of nCores. so it will not need to # +# compute a lonely job or a few jobs in the last loop # +############################################################# + +var1min=1 +var1max=2 + +var2min=0 +var2max=6 + +var3min= +var3max= + +############################################################# +# If your 'nCores' variable is limited by the total RAM, # +# and the peak memory use of each job is short compared to # +# the total computational time of a single job, you can # +# still increase 'nCores' by introducing a time delay # +# between jobs: each time you run a new job, the scheduler # +# will wait an amount of time determined by the user before # +# running the next job. In this way, the peak memory use of # +# the jobs don't overlap (i.e: don't happen at the same # +# time simulataneously), so more jobs can be executed in # +# parallel # +# Example: each of your jobs lasts ~ 50 min and needs an # +# average of 30 GB of RAM. However, for ~10 minutes, it # +# needs 60 GB of RAM. This memory peak usually should # +# halve 'nCores' value. However, setting 'delay' to 600 (s),# +# each job reaches its memory peak at a different time of # +# the other jobs, enabling you to double 'nCores'. # +############################################################# + +delay=0 + +############################################################# +# the following lines run a process (thread) in background # +# for each different set of values of the 3 above variables # +# in groups of jobs at the same time: # +############################################################# + +bash_pid=$$ # get the pid of the parent job (use instead $! to get the pid of last child process) +nChildren=0 # set initial number of children processes (background jobs executed by this script) + +# if the user left var2min or var3min empty, fill them with -999 instead +# to be able to run the next for loop with only 1 value for var2 and/or var3: +size2=${#var2min} # length of var2min +size3=${#var3min} # length of var3min +if [ $size2 -eq 0 ] ; then var2min=-999; var2max=-999; fi +if [ $size3 -eq 0 ] ; then var3min=-999; var3max=-999; fi + +# For loops over the three variables to cycle all their values: +for arg1 in $(seq $var1min $var1max ); do + for arg2 in $(seq $var2min $var2max); do + for arg3 in $(seq $var3min $var3max); do + # disable 'arg2' and/or 'arg3' if the user left them empty: + if [ "$var2min" -eq -999 ] && [ "$var2max" -eq -999 ]; then arg2=""; fi + if [ "$var3min" -eq -999 ] && [ "$var3max" -eq -999 ]; then arg3=""; fi + + # detect if your script is in R or in python: + if [ ${script: -1} == 'R' ]; then + Rscript $1 $arg1 $arg2 $arg3 & # run a new children job in background + fi + + if [ ${script: -2} == 'py' ]; then + python $1 $arg1 $arg2 $arg3 & # run a new children job in background + fi + + nChildren=$(($nChildren+1)) # increase the number of children process + sleep $delay # wait a moment if the user wants to run the following job after some time + + # if there are already job running, wait until at least one job has finished before executing the next job: + while [ $nChildren -ge $nCores ] + do + children=`ps -eo ppid | grep -w $bash_pid` # get the pids of its children + parent + nChildren=`echo $children | wc -w` # total number of children + parent + let nChildren=nChildren-1 # total number of children + sleep 1 # pause until at least one job has finished + done + done + done +done + +############################################################# +# remove any eventual data loaded in the shared memory # +# /dev/shm/ of the node before ending the job. # +# # +# You can also create an alias as the one below do it # +# manually, as it should also be used each time a Load() # +# command terminates before finishing: # +# # +# clean='find /dev/shm/ -user ncortesi -exec rm {} \;' # +############################################################# + +mpirun rm `ls /dev/shm -la | grep $(whoami) | awk ' { print "/dev/shm/" $9 } '` diff --git a/slurm/parallel_old2.job b/slurm/parallel_old2.job new file mode 100644 index 0000000000000000000000000000000000000000..a9b3a141e180a5e67f679efbdafa57b3c4b5f094 --- /dev/null +++ b/slurm/parallel_old2.job @@ -0,0 +1,263 @@ +#!/bin/bash + +############################################################# +# You can keep the name of your parallel job as it is below # +# ('parallel') or change it. In any case, only one standard # +# error and standard output files will be created, even if # +# you run hundreds of jobs, not to fill your directory with # +# unnecessary files. All the information you need on the # +# jobs is inside these two files, whose file names have the # +# job number appended as suffix. # +# # +# To increase productivity, create an alias en .bashrc # +# such as the one below to run this script from everywhere: # +# # +# alias p='sbatch ~/scripts/slurm/parallel.job' # +############################################################# + +#SBATCH -J parallel +#SBATCH -o parallel-%J.out +#SBATCH -e parallel-%J.err + +############################################################# +# The line below assigns your parallel jobs to the three # +# nodes of our cluster: # +# # +# *NODE* *CORES* *RAM (GB)* # +# moore 8 144 # +# amdahl 12 258 # +# gustafson 20 258 # +# # +# SLURM automatically distributes your jobs to one or more # +# nodes, depending on the available resources, if you want # +# to tun many sequential jobs at the same time it is better # +# to run them on the nodes with more resources available # +# (cores and memory); for example, if a node has its RAM # +# almost full you don't want to risk your jobs to be # +# cancelled by lack of RAM. Or, you want to save your # +# outputs not in the /esnas scratch but in the nodes's # +# /scratch, and you don't want to have them saved in the # +# /scratch of different nodes. # +# # +# In the past, option --mem-per-cpu allocated the desired # +# quantity of RAM for each processor (core). It was disabled# +# to allow more users to work at the same time, decreasing # +# queue times considerably. # +# # +# On the contrary, the option -m of interactive still works,# +# so you can take advantage of it to run an interactive # +# session in amdahl or gustafson, by specifying a higher # +# RAM amount than moore: # +# ~> sinteractive -m 200000 # +# # +# You can check if a node is full with 'squeue' or with: # +# ~> squeue -o "%.18i %.9P %.8j %.8u %.8T %.12M %.12l %.6D # +# %.15R %.5C %.8m" # +# which also shows how much memory and cores a job is using # +############################################################# + +#SBATCH -w gustafson + +############################################################# +# Set the maximum computation time of the parallel job. # +# It has to be higher than the estimated computation # +# time of all parallel job (syntax: DD-HH:MM:SS): # +# You can leave this value as set below (1 week). If you # +# job take more than one week to finish, even after # +# splitting it in many jobs that runs simultaneously, than # +# it is better not to run it on our cluster but on the SMP # +# machine or on MareNostrum instead, because it takes too # +# much computational resources. # +############################################################# + +#SBATCH -t 7-00:00:00 + +############################################################# +# Set the name of your script to run; if no path is # +# provided, it is assumed to be in the directory where # +# the 'sbatch' command is executed: # +############################################################# + +script="/home/Earth/ncortesi/scripts/weather_regimes_impact_v3.R" + +############################################################# +# Allocate a number of processors (cores) to this job, i.e: # +# it sets the number of sequential jobs that run at the # +# same time on the chosen node. Each time a job finishes, a # +# new job is run, so there will always be this number of # +# jobs running (except at the end, when there are less jobs # +# left than this number). # +# # +# number also cannot be higher than the node's free RAM # +# divided by the RAM allocated to one core (rounded down). # +# / Gustafson, respectively. For example, if you allocated # +# 40 GB/core on Moore in the previous line, you can't # +# allocate more than 145 / 40 = 3 cores to your job. # +# # +# The Load() function of s2dverification loads data in # +# parallel automatically. You have to disable this feature # +# adding option nprocs=1, or you'll use all the cores of the# +# node even if you reserved less cores. The downside is that# +# loading times of each job will increase # +############################################################# + +#SBATCH -n 1 + +# variable 'nCores' must always be the same as the maximum +# number of cores introduced in the above line: + +nCores=1 + +############################################################# +# Be sure to include all the libraries your script needs: # +############################################################# + +module load R CDO NCO netCDF GCC HDF5 UDUNITS + +############################################################# +# First and last values of each of the three variables of # +# the parallel job. Values must be integers. Each value # +# between the first and last ones (including the extremes) # +# will be assigned to a different parallel job. If your job # +# has less than three variables, you can disable one or two # +# of them by leaving them empty in the lines below. # +# # +# IMPORTANT: All variables must be integers. First empty # +# variable must be arg3, and the second must be arg2. # +# # +# Examples: # +# - If you want to run the same script 12 times changing # +# only the variable "month" from 1 to 12, set arg1_min= 1 # +# and arg1_max = 12, leaving the others two in blank: # +# arg1_min = 1 # +# arg1_max = 12 # +# arg2_min = # +# arg2_max = # +# arg3_min = # +# arg3_max = # +# # +# - If you also want to run the same script for a sequence # +# of years, i.e: for each month from 1980 to 2015, also # +# set arg2_min = 1950 and arg2_max = 2015 # +# # +# To link these variables to your R script, just add the # +# following lines at the beginning of your script: # +# # +# script.arg <- as.integer(commandArgs(TRUE)) # +# if(length(script.arg) > 0){ # +# <- script.arg[2] # +# <- script.arg[3] # +# } # +# # +# or add the lines below if you are using Python: # +# # +# import sys # +# script.arg = int(sys.argv) # +# if len(script.arg) > 0 : # +# = script.arg[1] # +# = script.arg[2] # +# = script.arg[3] # +# # +# where , , are the variables that are # +# used in the script and that you'd like to run in parallel # +# (i.e: 'month', 'year', 'leadtime', etc.). # +# # +# To optimize computation time, try to run a number of jobs # +# which is a multiple of nCores. so it will not need to # +# compute a lonely job or a few jobs in the last loop # +############################################################# + +arg1_min=8 +arg1_max=8 + +arg2_min=6 +arg2_max=6 + +arg3_min= +arg3_max= + +############################################################# +# In the majority of cases, you can simply ignore variable # +# 'delay' introduced below and leave it to 0. Only in case # +# your 'nCores' variable is limited by the total memory, # +# and the peak memory use of each job is short compared to # +# the total computational time of a single job, you can # +# still increase 'nCores' by introducing a time delay # +# between jobs: each time you run a new job, the scheduler # +# will wait an amount of time determined by the user before # +# running the next job. In this way, the peak memory use of # +# the jobs don't overlap (i.e: don't happen at the same # +# time simulataneously), so more jobs can be executed in # +# parallel # +# Example: each of your jobs lasts ~ 50 min and needs an # +# average of 30 GB of RAM. However, for ~5 minutes, each job# +# needs 60 GB of RAM. This memory peak has the effect of # +# halving the 'nCores' value. However, setting 'delay' # +# to 600 (s), each job reaches its memory peak at a # +# different time of the other jobs, enabling you to # +# double 'nCores' value. # +############################################################# + +delay=0 + +############################################################# +# the following lines run a process (thread) in background # +# for each different set of values of the 3 above variables # +# in groups of jobs at the same time: # +############################################################# + +bash_pid=$$ # get the pid of the parent job (use instead $! to get the pid of last child process) +nChildren=0 # set initial number of children processes (background jobs executed by this script) + +# if the user left arg2_min or arg3_min empty, fill them with -999 instead +# to be able to run the next for loop with only 1 'fake' value (-999)for arg2 and/or arg3: +size2=${#arg2_min} # length of var2min +size3=${#arg3_min} # length of var3min +if [ $size2 -eq 0 ] ; then arg2_min=-999; arg2_max=-999; fi +if [ $size3 -eq 0 ] ; then arg3_min=-999; arg3_max=-999; fi + +# For loops over the three variables to cycle all their values: +for arg1 in $(seq $arg1_min $arg1_max ); do + for arg2 in $(seq $arg2_min $arg2_max); do + for arg3 in $(seq $arg3_min $arg3_max); do + # disable 'arg2' and/or 'arg3' if the user left them empty: + if [ "$arg2_min" -eq -999 ] && [ "$arg2_max" -eq -999 ]; then arg2=""; fi + if [ "$arg3_min" -eq -999 ] && [ "$arg3_max" -eq -999 ]; then arg3=""; fi + + # detect if your script is in R or in python: + if [ ${script: -1} == 'R' ]; then + Rscript $script $arg1 $arg2 $arg3 & # run a new children job in background + fi + + if [ ${script: -2} == 'py' ]; then + python $script $arg1 $arg2 $arg3 & # run a new children job in background + fi + + nChildren=$(($nChildren+1)) # increase the number of children process + sleep $delay # wait a moment if the user wants to run the following job after some time + + # if there are already job running, wait until at least one job has finished before executing the next job: + while [ $nChildren -ge $nCores ] + do + children=`ps -eo ppid | grep -w $bash_pid` # get the pids of its children + parent + nChildren=`echo $children | wc -w` # total number of children + parent + let nChildren=nChildren-1 # total number of children + sleep 1 # pause until at least one job has finished + done + done + done +done + +############################################################# +# remove any eventual data loaded in the shared memory # +# /dev/shm/ of the node before ending the job. # +# # +# You can also create an alias as the one below do it # +# manually, as it should also be used each time a Load() # +# command terminates before finishing: # +# # +# clean='find /dev/shm/ -user ncortesi -exec rm {} \;' # +############################################################# + +mpirun rm `ls /dev/shm -la | grep $(whoami) | awk ' { print "/dev/shm/" $9 } '` diff --git a/slurm/parallel_old3.job b/slurm/parallel_old3.job new file mode 100644 index 0000000000000000000000000000000000000000..06e2e0be3764b22b27cd0e38a3199ab2f3ce9426 --- /dev/null +++ b/slurm/parallel_old3.job @@ -0,0 +1,273 @@ +#!/bin/bash + +############################################################# +# You can keep the name of your parallel job as it is below # +# ('parallel') or change it. In any case, only one standard # +# error and standard output files will be created, even if # +# you run hundreds of jobs, not to fill your directory with # +# unnecessary files. All the information you need on the # +# jobs is inside these two files, whose file names have the # +# job number appended as suffix. # +# # +# To increase productivity, create an alias en .bashrc # +# such as the one below to run this script from everywhere: # +# # +# alias p='sbatch ~/scripts/slurm/parallel.job' # +# # +# Remember to remove regularly any eventual data loaded in # +# the shared memory (/dev/shm/), because ths shared memory # +# is limited to a few GB (depending on the node), and when # +# it is full, no users can run jobs. You can create # +# an alias as the 'clean' alias below and execute it from # +# time to time. Run it also after a Load() function of # +# sd2verification terminates before finishing. # +# # +# clean='find /dev/shm/ -user ncortesi -exec rm {} \;' # +############################################################# + +#SBATCH -J parallel +#SBATCH -o parallel-%J.out +#SBATCH -e parallel-%J.err + +############################################################# +# The line below assigns your parallel jobs to the three # +# nodes of our cluster: # +# # +# *NODE* *CORES* *RAM (GB)* # +# moore 8 144 # +# amdahl 12 258 # +# gustafson 20 258 # +# # +# SLURM automatically distributes your jobs to one or more # +# nodes, depending on the available resources, if you want # +# to tun many sequential jobs at the same time it is better # +# to run them on the nodes with more resources available # +# (cores and memory); for example, if a node has its RAM # +# almost full you don't want to risk your jobs to be # +# cancelled by lack of RAM. Or, you want to save your # +# outputs not in the /esnas scratch but in the nodes's # +# /scratch, and you don't want to have them saved in the # +# /scratch of different nodes. # +# # +# In the past, option --mem-per-cpu allocated the desired # +# quantity of RAM for each processor (core). It was disabled# +# to allow more users to work at the same time, decreasing # +# queue times considerably. # +# # +# On the contrary, the option -m of interactive still works,# +# so you can take advantage of it to run an interactive # +# session in amdahl or gustafson, by specifying a higher # +# RAM amount than moore: # +# ~> sinteractive -m 200000 # +# # +# You can check if a node is full with 'squeue' or with: # +# ~> squeue -o "%.18i %.9P %.8j %.8u %.8T %.12M %.12l %.6D # +# %.15R %.5C %.8m" # +# which also shows how much memory and cores a job is using # +############################################################# + +#SBATCH -w gustafson + +############################################################# +# Set the maximum computation time of the parallel job. # +# It has to be higher than the estimated computation # +# time of all parallel job (syntax: DD-HH:MM:SS): # +# You can leave this value as set below (3 days). If your # +# job take more than 3 days to finish, even after # +# splitting it in many jobs that runs simultaneously, than # +# it is better not to run it on our cluster but on the SMP # +# machine or on MareNostrum instead, because it takes too # +# much computational resources that other users cannot # +# employ while your jobs are running. # +############################################################# + +#SBATCH -t 3-00:00:00 + +############################################################# +# Allocate a number of processors (cores) to this job, i.e: # +# it sets the number of sequential jobs that run at the # +# same time on the chosen node. Each time a job finishes, a # +# new job is run, so there will always be this number of # +# jobs running (except at the end, when there are less jobs # +# left than this number). # +# # +# number also cannot be higher than the node's free RAM # +# divided by the RAM allocated to one core (rounded down). # +# / Gustafson, respectively. For example, if you allocated # +# 40 GB/core on Moore in the previous line, you can't # +# allocate more than 145 / 40 = 3 cores to your job. # +# # +# The Load() function of s2dverification loads data in # +# parallel automatically. You have to disable this feature # +# adding option nprocs=1, or you'll use all the cores of the# +# node even if you reserved less cores. The downside is that# +# loading times of each job will increase. # +# # +############################################################# + +#SBATCH -n 4 + +# variable 'nCores' must always be the same as the maximum +# number of cores introduced in the above line: + +nCores=4 + +############################################################# +# Set the name of your script to run; if no path is # +# provided, it is assumed to be in the directory where # +# the 'sbatch' command is executed: # +############################################################# + +script="/home/Earth/fullano/my.script.R" + +############################################################# +# Be sure to include all the libraries your script needs: # +############################################################# + +module load R CDO NCO netCDF GCC HDF5 UDUNITS + +############################################################# +# First and last values of each of the three variables of # +# the parallel job. Values must be integers. Each value # +# between the first and last ones (including the extremes) # +# will be assigned to a different parallel job. If your job # +# has less than three variables, you can disable one or two # +# of them by leaving them empty in the lines below. # +# # +# IMPORTANT: All variables must be integers. First empty # +# variable must be arg3, and the second must be arg2. # +# # +# Examples: # +# - If you want to run the same script 12 times changing # +# only the variable "month" from 1 to 12, set arg1_min= 1 # +# and arg1_max = 12, leaving the others two in blank: # +# arg1_min = 1 # +# arg1_max = 12 # +# arg2_min = # +# arg2_max = # +# arg3_min = # +# arg3_max = # +# # +# - If you also want to run the same script for a sequence # +# of years, i.e: for each month from 1980 to 2015, also # +# set arg2_min = 1950 and arg2_max = 2015 # +# # +# To link these variables to your R script, just add the # +# following lines at the beginning of your script: # +# # +# script.arg <- as.integer(commandArgs(TRUE)) # +# if(length(script.arg) > 0){ # +# <- script.arg[2] # +# <- script.arg[3] # +# } # +# # +# or add the lines below if you are using Python: # +# # +# import sys # +# script.arg = int(sys.argv) # +# if len(script.arg) > 0 : # +# = script.arg[1] # +# = script.arg[2] # +# = script.arg[3] # +# # +# where , , are the variables that are # +# used in the script and that you'd like to run in parallel # +# (i.e: 'month', 'year', 'leadtime', etc.). # +# # +# To optimize computation time, try to set 'nCores' to be # +# which is a divisor of the total number of jobs, so it # +# will not run a lonely job or a few jobs in the last loop. # +# For example: if you have to run 20 jobs, run them in # +# blocks of 4, 5 or 10 jobs simultaneously # +# # +# If, after the jobs has finished, you need to re-run some # +# of them, just re-run this job file with arg1_min, arg1_max# +# and the other argX variables corresponding to the values # +# of the jobs you need to re-run. In case the values are not# +# consecutive (i.e: 2,5,and 11), then set 'nCores'=1 and run# +# those jobs in a sequential way, executing this job file # +# one time for each different value you need to recompute. # +############################################################# + +arg1_min=1 +arg1_max=12 + +arg2_min=1 +arg2_max=5 + +arg3_min= +arg3_max= + +############################################################# +# In the majority of cases, you can simply ignore variable # +# 'delay' introduced below and leave it to 0. Only in case # +# your 'nCores' variable is limited by the total memory, # +# and the peak memory use of each job is short compared to # +# the total computational time of a single job, you can # +# still increase 'nCores' by introducing a time delay # +# between jobs: each time you run a new job, the scheduler # +# will wait an amount of time determined by the user before # +# running the next job. In this way, the peak memory use of # +# the jobs don't overlap (i.e: don't happen at the same # +# time simulataneously), so more jobs can be executed in # +# parallel. # +# Example: each of your jobs lasts ~ 50 min and needs an # +# average of 30 GB of RAM. However, for ~5 minutes, each job# +# needs 60 GB of RAM. This memory peak has the effect of # +# halving the 'nCores' value. However, setting 'delay' # +# to 600 (s), each job reaches its memory peak at a # +# different time of the other jobs, enabling you to # +# double 'nCores' value. # +############################################################# + +delay=0 + +############################################################# +# the following lines run a process (thread) in background # +# for each different set of values of the 3 above variables # +# in groups of jobs at the same time: # +############################################################# + +bash_pid=$$ # get the pid of the parent job (use instead $! to get the pid of last child process) +nChildren=0 # set initial number of children processes (background jobs executed by this script) + +# if the user left arg2_min or arg3_min empty, fill them with -999 instead +# to be able to run the next for loop with only 1 'fake' value (-999)for arg2 and/or arg3: +size2=${#arg2_min} # length of arg2_min +size3=${#arg3_min} # length of arg3_min +if [ $size2 -eq 0 ] ; then arg2_min=-999; arg2_max=-999; fi +if [ $size3 -eq 0 ] ; then arg3_min=-999; arg3_max=-999; fi + +# For loops over the three variables to cycle all their values: +for arg1 in $(seq $arg1_min $arg1_max ); do + for arg2 in $(seq $arg2_min $arg2_max); do + for arg3 in $(seq $arg3_min $arg3_max); do + # disable 'arg2' and/or 'arg3' if the user left them empty: + if [ $size2 -eq 0 ]; then arg2=""; fi + if [ $size3 -eq 0 ]; then arg3=""; fi + + # detect the language of your script and run a new children job in background + if [ ${script: -1} == 'R' ]; then + Rscript $script $arg1 $arg2 $arg3 & + elif [ ${script: -2} == 'py' ]; then + python $script $arg1 $arg2 $arg3 & + else + $script $arg1 $arg2 $arg3 & + fi + + nChildren=$(($nChildren+1)) # increase the number of children process + sleep $delay # wait a moment if the user wants to run the following job after some time + + # if there are already job running, wait until at least one job has finished before executing the next job: + while [ $nChildren -ge $nCores ] + do + children=`ps -eo ppid | grep -w $bash_pid` # get the pids of its children + parent + nChildren=`echo $children | wc -w` # total number of children + parent + let nChildren=nChildren-1 # total number of children + sleep 1 # pause until at least one job has finished + done + done + done +done + diff --git a/slurm/parallel_test.job b/slurm/parallel_test.job new file mode 100644 index 0000000000000000000000000000000000000000..87b97cef6f73ff9648b77c547dbcb892c50022ea --- /dev/null +++ b/slurm/parallel_test.job @@ -0,0 +1,24 @@ +#!/bin/bash + + +#SBATCH -J parallel +#SBATCH -o parallel-%J.out +#SBATCH -e parallel-%J.err +#SBATCH -w amdahl +#SBATCH -t 3-00:00:00 +#SBATCH -n 4 + +script="/home/Earth/ncortesi/scripts/test.R" + + +# variable 'nCores' must always be the same as the maximum +# number of cores introduced in the above line: + +module load R CDO NCO netCDF GCC HDF5 UDUNITS + +# For loops over the three variables to cycle all their values: + +for i in `seq 4`; do + Rscript $script i & +done + diff --git a/slurm/parallel_test.job~ b/slurm/parallel_test.job~ new file mode 100644 index 0000000000000000000000000000000000000000..95a487aa976632241fd9dd32d0bf3d89268c2b90 --- /dev/null +++ b/slurm/parallel_test.job~ @@ -0,0 +1,274 @@ +#!/bin/bash + +############################################################# +# You can keep the name of your parallel job as it is below # +# ('parallel') or change it. In any case, only one standard # +# error and standard output files will be created, even if # +# you run hundreds of jobs, not to fill your directory with # +# unnecessary files. All the information you need on the # +# jobs is inside these two files, whose file names have the # +# job number appended as suffix. # +# # +# To increase productivity, create an alias en .bashrc # +# such as the one below to run this script from everywhere: # +# # +# alias p='sbatch ~/scripts/slurm/parallel.job' # +# # +# Remember to remove regularly any eventual data loaded in # +# the shared memory (/dev/shm/), because ths shared memory # +# is limited to a few GB (depending on the node), and when # +# it is full, no users can run jobs. You can create # +# an alias as the 'clean' alias below and execute it from # +# time to time. Run it also after a Load() function of # +# sd2verification terminates before finishing. # +# # +# clean='find /dev/shm/ -user ncortesi -exec rm {} \;' # +############################################################# + +#SBATCH -J parallel +#SBATCH -o parallel-%J.out +#SBATCH -e parallel-%J.err + +############################################################# +# The line below assigns your parallel jobs to the three # +# nodes of our cluster: # +# # +# *NODE* *CORES* *RAM (GB)* # +# moore 8 144 # +# amdahl 12 258 # +# gustafson 20 258 # +# # +# SLURM automatically distributes your jobs to one or more # +# nodes, depending on the available resources, if you want # +# to tun many sequential jobs at the same time it is better # +# to run them on the nodes with more resources available # +# (cores and memory); for example, if a node has its RAM # +# almost full you don't want to risk your jobs to be # +# cancelled by lack of RAM. Or, you want to save your # +# outputs not in the /esnas scratch but in the nodes's # +# /scratch, and you don't want to have them saved in the # +# /scratch of different nodes. # +# # +# In the past, option --mem-per-cpu allocated the desired # +# quantity of RAM for each processor (core). It was disabled# +# to allow more users to work at the same time, decreasing # +# queue times considerably. # +# # +# On the contrary, the option -m of interactive still works,# +# so you can take advantage of it to run an interactive # +# session in amdahl or gustafson, by specifying a higher # +# RAM amount than moore: # +# ~> sinteractive -m 200000 # +# # +# You can check if a node is full with 'squeue' or with: # +# ~> squeue -o "%.18i %.9P %.8j %.8u %.8T %.12M %.12l %.6D # +# %.15R %.5C %.8m" # +# which also shows how much memory and cores a job is using # +############################################################# + +#SBATCH -w amdahl + +############################################################# +# Set the maximum computation time of the parallel job. # +# It has to be higher than the estimated computation # +# time of all parallel job (syntax: DD-HH:MM:SS): # +# You can leave this value as set below (3 days). If your # +# job take more than 3 days to finish, even after # +# splitting it in many jobs that runs simultaneously, than # +# it is better not to run it on our cluster but on the SMP # +# machine or on MareNostrum instead, because it takes too # +# much computational resources that other users cannot # +# employ while your jobs are running. # +############################################################# + +#SBATCH -t 3-00:00:00 + +############################################################# +# Set the name of your script to run; if no path is # +# provided, it is assumed to be in the directory where # +# the 'sbatch' command is executed: # +############################################################# + +script="/home/Earth/ncortesi/scripts/SkillScores_v10.R" +#script="/home/Earth/ncortesi/scripts/test.R" + +############################################################# +# Allocate a number of processors (cores) to this job, i.e: # +# it sets the number of sequential jobs that run at the # +# same time on the chosen node. Each time a job finishes, a # +# new job is run, so there will always be this number of # +# jobs running (except at the end, when there are less jobs # +# left than this number). # +# # +# number also cannot be higher than the node's free RAM # +# divided by the RAM allocated to one core (rounded down). # +# / Gustafson, respectively. For example, if you allocated # +# 40 GB/core on Moore in the previous line, you can't # +# allocate more than 145 / 40 = 3 cores to your job. # +# # +# The Load() function of s2dverification loads data in # +# parallel automatically. You have to disable this feature # +# adding option nprocs=1, or you'll use all the cores of the# +# node even if you reserved less cores. The downside is that# +# loading times of each job will increase. # +# # +############################################################# + +#SBATCH -n 1 + +# variable 'nCores' must always be the same as the maximum +# number of cores introduced in the above line: + +nCores=1 + +############################################################# +# Be sure to include all the libraries your script needs: # +############################################################# + +module load R CDO NCO netCDF GCC HDF5 UDUNITS + +############################################################# +# First and last values of each of the three variables of # +# the parallel job. Values must be integers. Each value # +# between the first and last ones (including the extremes) # +# will be assigned to a different parallel job. If your job # +# has less than three variables, you can disable one or two # +# of them by leaving them empty in the lines below. # +# # +# IMPORTANT: All variables must be integers. First empty # +# variable must be arg3, and the second must be arg2. # +# # +# Examples: # +# - If you want to run the same script 12 times changing # +# only the variable "month" from 1 to 12, set arg1_min= 1 # +# and arg1_max = 12, leaving the others two in blank: # +# arg1_min = 1 # +# arg1_max = 12 # +# arg2_min = # +# arg2_max = # +# arg3_min = # +# arg3_max = # +# # +# - If you also want to run the same script for a sequence # +# of years, i.e: for each month from 1980 to 2015, also # +# set arg2_min = 1950 and arg2_max = 2015 # +# # +# To link these variables to your R script, just add the # +# following lines at the beginning of your script: # +# # +# script.arg <- as.integer(commandArgs(TRUE)) # +# if(length(script.arg) > 0){ # +# <- script.arg[2] # +# <- script.arg[3] # +# } # +# # +# or add the lines below if you are using Python: # +# # +# import sys # +# script.arg = int(sys.argv) # +# if len(script.arg) > 0 : # +# = script.arg[1] # +# = script.arg[2] # +# = script.arg[3] # +# # +# where , , are the variables that are # +# used in the script and that you'd like to run in parallel # +# (i.e: 'month', 'year', 'leadtime', etc.). # +# # +# To optimize computation time, try to set 'nCores' to be # +# which is a divisor of the total number of jobs, so it # +# will not run a lonely job or a few jobs in the last loop. # +# For example: if you have to run 20 jobs, run them in # +# blocks of 4, 5 or 10 jobs simultaneously # +# # +# If, after the jobs has finished, you need to re-run some # +# of them, just re-run this job file with arg1_min, arg1_max# +# and the other argX variables corresponding to the values # +# of the jobs you need to re-run. In case the values are not# +# consecutive (i.e: 2,5,and 11), then set 'nCores'=1 and run# +# those jobs in a sequential way, executing this job file # +# one time for each different value you need to recompute. # +############################################################# + +arg1_min=6 +arg1_max=6 + +arg2_min= +arg2_max= + +arg3_min= +arg3_max= + +############################################################# +# In the majority of cases, you can simply ignore variable # +# 'delay' introduced below and leave it to 0. Only in case # +# your 'nCores' variable is limited by the total memory, # +# and the peak memory use of each job is short compared to # +# the total computational time of a single job, you can # +# still increase 'nCores' by introducing a time delay # +# between jobs: each time you run a new job, the scheduler # +# will wait an amount of time determined by the user before # +# running the next job. In this way, the peak memory use of # +# the jobs don't overlap (i.e: don't happen at the same # +# time simulataneously), so more jobs can be executed in # +# parallel. # +# Example: each of your jobs lasts ~ 50 min and needs an # +# average of 30 GB of RAM. However, for ~5 minutes, each job# +# needs 60 GB of RAM. This memory peak has the effect of # +# halving the 'nCores' value. However, setting 'delay' # +# to 600 (s), each job reaches its memory peak at a # +# different time of the other jobs, enabling you to # +# double 'nCores' value. # +############################################################# + +delay=0 + +############################################################# +# the following lines run a process (thread) in background # +# for each different set of values of the 3 above variables # +# in groups of jobs at the same time: # +############################################################# + +bash_pid=$$ # get the pid of the parent job (use instead $! to get the pid of last child process) +nChildren=0 # set initial number of children processes (background jobs executed by this script) + +# if the user left arg2_min or arg3_min empty, fill them with -999 instead +# to be able to run the next for loop with only 1 'fake' value (-999)for arg2 and/or arg3: +size2=${#arg2_min} # length of arg2_min +size3=${#arg3_min} # length of arg3_min +if [ $size2 -eq 0 ] ; then arg2_min=-999; arg2_max=-999; fi +if [ $size3 -eq 0 ] ; then arg3_min=-999; arg3_max=-999; fi + +# For loops over the three variables to cycle all their values: +for arg1 in $(seq $arg1_min $arg1_max ); do + for arg2 in $(seq $arg2_min $arg2_max); do + for arg3 in $(seq $arg3_min $arg3_max); do + # disable 'arg2' and/or 'arg3' if the user left them empty: + if [ $size2 -eq 0 ]; then arg2=""; fi + if [ $size3 -eq 0 ]; then arg3=""; fi + + # detect the language of your script and run a new children job in background + if [ ${script: -1} == 'R' ]; then + Rscript $script $arg1 $arg2 $arg3 & + elif [ ${script: -2} == 'py' ]; then + python $script $arg1 $arg2 $arg3 & + else + $script $arg1 $arg2 $arg3 & + fi + + nChildren=$(($nChildren+1)) # increase the number of children process + sleep $delay # wait a moment if the user wants to run the following job after some time + + # if there are already job running, wait until at least one job has finished before executing the next job: + while [ $nChildren -ge $nCores ] + do + children=`ps -eo ppid | grep -w $bash_pid` # get the pids of its children + parent + nChildren=`echo $children | wc -w` # total number of children + parent + let nChildren=nChildren-1 # total number of children + sleep 1 # pause until at least one job has finished + done + done + done +done + diff --git a/slurm/quick.sh b/slurm/quick.sh new file mode 100644 index 0000000000000000000000000000000000000000..0c72753839b6bcd9665f0f6049ad0815f43c2216 --- /dev/null +++ b/slurm/quick.sh @@ -0,0 +1,19 @@ +#!/bin/sh + +# dentro la termina, execute this job script: +# +# sbatch -n 1 -t 48:00:00 -w amdahl ~/scripts/slurm/quick.sh +# + +#SBATCH -J quick +#SBATCH -o quick-%J.out +#SBATCH -e quick-%J.err + +module load R CDO NCO netCDF GCC HDF5 UDUNITS + +# insert here your R script to run: +script="/home/Earth/ncortesi/scripts/weather_regimes_v38.R" + +Rscript $script + + diff --git a/slurm/quick.sh~ b/slurm/quick.sh~ new file mode 100644 index 0000000000000000000000000000000000000000..68044d3720ddbe3a3996ef6add6904e573918cf4 --- /dev/null +++ b/slurm/quick.sh~ @@ -0,0 +1,12 @@ +#!/bin/sh + +# dentro la termina, execute this job script: +# +# sbatch -n 1 -t 48:00:00 -J parallel -w amdahl ~/scripts/slurm/quick.sh +# + +# insert here your R script to run: +script="/home/Earth/ncortesi/scripts/weather_regimes_v38.R" + +module load R CDO NCO netCDF GCC HDF5 UDUNITS + diff --git a/slurm/tirar.sh b/slurm/tirar.sh new file mode 100644 index 0000000000000000000000000000000000000000..c03260a8153d2622dad6aa1311d427dc1f4e705b --- /dev/null +++ b/slurm/tirar.sh @@ -0,0 +1,75 @@ +#!/bin/bash + +#SBATCH -J parallel +#SBATCH -o parallel-%J.out +#SBATCH -e parallel-%J.err +#SBATCH -t 7-00:00:00 + +#SBATCH -w amdahl +#SBATCH -n 5 + +module load R CDO NCO netCDF GCC HDF5 UDUNITS + +script="/home/Earth/fullano/.../diagnostic.R" + +arg1_min=1 +arg1_max=12 + +arg2_min=1 +arg2_max=5 + +arg3_min= +arg3_max= + + + + + + + start_date = 1:12 + lead_time = 1:5 + + ... + + script.arg <- as.integer(commandArgs(TRUE)) + + if(length(script.arg) > 0){ + start_date <- script.arg[1] + lead_month <- script.arg[2] + } + + for (sd in start_date){ + for (lt in lead_time){ + ###################### + # Your analysis here # + ###################### + + + + } + } + + + save(output_1, ... , output_N, file=paste0(workdir,"/my_analysis.RData")) + + + + + + + + + save(output_1, ... , output_N, file=paste0(workdir, + "/my_analysis_start_date_",sd,"_lead_time_",lt,".RData")) + + import sys + script.arg = int(sys.argv) + + if len(script.arg) > 0 : + start_date = script.arg[1] + lead_month = script.arg[2] + + + + + diff --git a/slurm/tirar.sh~ b/slurm/tirar.sh~ new file mode 100644 index 0000000000000000000000000000000000000000..c001eae6c234c256b9b8184cf2fe29b2f8757199 --- /dev/null +++ b/slurm/tirar.sh~ @@ -0,0 +1,73 @@ +#!/bin/bash + +#SBATCH -J parallel +#SBATCH -o parallel-%J.out +#SBATCH -e parallel-%J.err +#SBATCH -t 5-23:59:59 + +#SBATCH -w amdahl +#SBATCH -n 5 + +module load R CDO NCO netCDF GCC HDF5 UDUNITS + +script="/home/Earth/fullano/scripts/diagnostic.R" + +var1min=1 +var1max=12 + +var2min=1 +var2max=5 + +var3min= +var3max= + + + + + start_date = 1:12 + lead_time = 1:5 + + ... + + script.arg <- as.integer(commandArgs(TRUE)) + + if(length(script.arg) > 0){ + start_date <- script.arg[1] + lead_month <- script.arg[2] + } + + for (sd in start_date){ + for (lt in lead_time){ + ###################### + # Your analysis here # + ###################### + + + + } + } + + + save(output_1, ... , output_N, file=paste0(workdir,"/my_analysis.RData")) + + + + + + + + + save(output_1, ... , output_N, file=paste0(workdir, + "/my_analysis_start_date_",sd,"_lead_time_",lt,".RData")) + + import sys + script.arg = int(sys.argv) + + if len(script.arg) > 0 : + start_date = script.arg[1] + lead_month = script.arg[2] + + + + + diff --git a/toyModel.R b/toyModel.R new file mode 100644 index 0000000000000000000000000000000000000000..19c09ba056a33d95997a9c933b3f65f7ddd73c68 --- /dev/null +++ b/toyModel.R @@ -0,0 +1,573 @@ + +################################################################################## +# Toy Model # +################################################################################## + +library(s2dverification) +library(SpecsVerification) +library(easyVerification) + +my.prob=c(1/3,2/3) # quantiles used for FairRPSS and the RelDiagr (usually they are the terciles) + +N=100 # number of years +M=15 # number of members + +for(M in c(2:10,20,51,100,200)){ + +for(N in c(5,10,15,20,25,30,40,50,100,200,500,1000,2000,5000,10000,20000,50000,100000)){ + +anom.rean<-rnorm(N) +anom.hind<-array(rnorm(N*M),c(N,M)) +#quantile(anom.hind,prob=my.prob,type=8) + +obs<-convert2prob(anom.rean,prob<-my.prob) +#mean(obs[,1]) # check if it is equal to 0.333 for K=3 + +ens<-convert2prob(anom.hind,prob<-my.prob) + +# climatological forecast: +anom.hind.clim<-array(sample(anom.hind,N*M),c(N,M)) # extract a random sample with no replacement of the hindcast +ens.clim<-convert2prob(anom.hind.clim,prob<-my.prob) +#mean(ens.clim[,1]) # check if it is equal to M/3 (1.333 for M=4) + +# alternative definition of climatological forecast, based on observations instead (as applied by veriApply via convert2prob when option fcst.ref is missing): +#anom.rean.clim<-ClimEns(anom.rean) # create a climatological ensemble from a vector of observations (anom.rean) with the leave-one-out cross validation +anom.rean.clim<-t(array(anom.rean,c(N,N))) # function used by convert2prob when ref is not specified; it is ~ equal to the above function ClimEns, it only has a column more +ens.clim.rean<-convert2prob(anom.rean.clim,prob<-c(1/3,2/3)) # create a new prob.table based on the climatological ensemble of observations + +p<-count2prob(ens) # we should add the option type=4, in not the sum for each year is not 100%!!! +y<-count2prob(obs) # we should add the option type=4, in not the sum for each year is not 100%!!! +ReliabilityDiagram(p[,1], y[,1], bins=10, nboot=0, plot=TRUE, cons.prob=c(0.05, 0.85),plot.refin=F) + +### computation of verification scores: + +Rps<-round(mean(EnsRps(ens,obs)),4) +FairRps<-round(mean(FairRps(ens,obs)),4) +#Rps.easy<-round(mean(veriApply("EnsRps", fcst=anom.hind, obs=anom.rean, prob=my.prob, tdim=1, ensdim=2)),4) # check once to see if it is the same value of Rps +#FairRps.easy<-round(mean(veriApply("FairRps", fcst=anom.hind, obs=anom.rean, prob=my.prob, tdim=1, ensdim=2)),4) # check once to see if it is the same value of FairRps + +Rps.clim<-round(mean(EnsRps(ens.clim,obs)),4) +FairRps.clim<-round(mean(FairRps(ens.clim,obs)),4) +FairRps.clim.rean<-round(mean(FairRps(ens.clim.rean,obs)),4) + +#FairRps.clim.stable<-0.44444444 +#Rps.clim.easy<-round(mean(veriApply("EnsRps", fcst=anom.hind.clim, obs=anom.rean, prob=my.prob, tdim=1, ensdim=2)),4) # check once to see if it is the same value of Rps.clim +#FairRps.clim.easy<-round(mean(veriApply("FairRps", fcst=anom.hind.clim, obs=anom.rean, prob=my.prob, tdim=1, ensdim=2)),4) # check once to see if it is = to FairRps.clim + +Rpss<-round(1-(Rps/Rps.clim),4) +Rpss.specs<-round(EnsRpss(ens=ens, ens.ref=ens.clim, obs=obs)$rpss,4) # check it once to see if it is the same as Rpss (yes) +Rpss.easy<-veriApply("EnsRpss", fcst=anom.hind, fcst.ref=anom.hind.clim, obs=anom.rean, prob=my.prob, tdim=1, ensdim=2)$rpss # we provide the clim.forecast; = to Rpss.specs +Rpss.easy.noclim<-round(veriApply("EnsRpss", fcst=anom.hind, obs=anom.rean, prob=my.prob, tdim=1, ensdim=2, parallel=FALSE, ncpus=5)$rpss,4) # using their climatological forecast we get a different value from Rpss.specs +#cat(Rps.clim.noclim<-Rps/(1-Rpss.easy.noclim)) # sale igual a ~0.45 per ogni hindcast; é diverso sia da Rps.clim che da Rps.clim per N-> +oo (0.555) + +Rpss.easy<-veriApply("EnsRpss", fcst=anom.hind, fcst.ref=anom.rean.clim, obs=anom.rean, prob=my.prob, tdim=1, ensdim=2)$rpss # we provide the clim.forecast; = to Rpss.specs +Rpss.easy.noclim<-veriApply("EnsRpss", fcst=anom.hind, obs=anom.rean, prob=my.prob, tdim=1, ensdim=2)$rpss # we provide the clim.forecast; = to Rpss.specs + +FairRpss<-round(1-(FairRps/FairRps.clim),4) +#FairRpss.specs<-round(FairRpss(ens=ens,ens.ref=ens.clim,obs=obs)$rpss,4) # check it once to see if it is the same as FairRpss (yes) +#FairRpss.easy<-veriApply("FairRpss", fcst=anom.hind, fcst.ref=anom.hind.clim, obs=anom.rean, prob=c(1/3,2/3), tdim=1, ensdim=2)$rps +#FairRpss.easy.noclim<-round(veriApply("FairRpss", fcst=anom.hind, obs=anom.rean, prob=c(1/3,2/3), tdim=1, ensdim=2)$rpss,4) # using their climatological forecast +#cat(FairRps.clim.noclim<-Rps/(1-FairRpss.easy.noclim)) # sale igual a ~0.55; é diverso sia da FairRps.clim che da FairRps.clim per N-> +oo (0.444) + +cat(paste0('n.memb: ' ,M,' n.yrs: ',N,' : ',FairRps,' : ' ,FairRps.clim,' FairRpss: ',FairRpss,'\n')) + +#cat(paste0('n.memb: ' ,M,' n.yrs: ',N,' : ',FairRps,' : ' ,FairRps.clim,' FairRpss: ',FairRpss,'\n')) + +#cat(paste0('n.memb: ' ,M,' n.yrs: ',N,' : ',Rps,' : ' ,Rps.clim,' Rpss: ',Rpss,'\n','n.memb: ' ,M,' n.yrs: ',N,' : ',FairRps,' : ' ,FairRps.clim,' FairRpss: ',FairRpss,'\n')) + +#cat(paste0('n.memb: ' ,M,' n.yrs: ',N,' : ',Rps,' : ' ,Rps.clim,' Rpss: ',Rpss,' Rpss.easy: ',Rpss.easy.noclim,'\n','n.memb: ' ,M,' n.yrs: ',N,' #: ',FairRps,' : ' ,FairRps.clim,' FairRpss: ',FairRpss,' FairRpss.easy: ',FairRpss.easy.noclim)) + +} + +# Crpss: + +#Crps<-round(mean(EnsCrps(ens,obs)),4) +#FairCrps<-round(mean(FairCrps(ens,obs)),4) + +#cat(Crps.clim<-round(mean(EnsCrps(ens.clim,obs)),4)) +#FairCrps.clim<-round(mean(FairCrps(ens.clim,obs)),4) + +#Crpss<-round(1-(Crps/Crps.clim),4) +#Crpss.specs<-round(EnsCrpss(ens=ens, ens.ref=ens.clim, obs=obs)$rpss,4) + +#FairCrpss<-round(1-(FairCrps/FairCrps.clim),4) +#FairCrpss.specs<-round(FairCrpss(ens=ens,ens.ref=ens.clim,obs=obs)$rpss,4) + +} + + +################################################################################## +# Other analysis # +################################################################################## + +my.startdates=1 #c(1:9) # select the startdates (weeks) you want to merge together + +#tm<-toyarray(dims=c(32,64),N=20,nens=4) # in the number of startdates in case of forecasts or the number of years in case of hindcasts +#str(tm) # tm$fcst: [32,64,20,4] tm$obs: [32,64,20] +# +#f.corr <- veriApply("EnsCorr", fcst=tm$fcst, obs=tm$obs) +#f.me <- veriApply('EnsMe', tm$fcst, tm$obs) +#f.crps <- veriApply("FairCrps", fcst=tm$fcst, obs=tm$obs) +#str(f.crps) # [32,64,20] + +#cst <- array(rnorm(prod(4:8)), 4:8) +#obs <- array(rnorm(prod(4:7)), 4:7) +#f.me <- veriApply('EnsMe', fcst=fcst, obs=obs) +#dim(f.me) +#fcst2 <- array(aperm(fcst, c(5,1,4,2,3)), c(8, 4, 7, 5*6)) # very interesting use of aperm not only to swap dimensions, but also to merge two dimensions in the same one! +#obs2 <- array(aperm(obs, c(1,4,2,3)), c(4,7,5*6)) +#f.me2 <- veriApply('EnsMe', fcst=fcst2, obs=obs2, tdim=3, ensdim=1) +#dim(f.me2) + +# when you have for each leadtime all the 52 weeks of year 2014 stored in a map, +# you can plot the time serie for a particular point and leadtime: +pos.lat<-which(round(la,3)==round(my.lat,3)) +pos.lon<-which(round(lo,3)==round(my.lon,3)) + +x11() +plot(1:week,EnMeanCorr[,leadtime],type="b", + main=paste0("Weekly time serie of point with lat=",round(my.lat,2),", lon=",round(my.lon,2)), + xlab="week",ylab="Ensemble Mean Corr.",ylim=c(-1,1)) + + + +### check if 4 time steps are better than one: + +yrs.hind<-yr2.hind-yr1.hind+1 +my.point.obs<-array(NA,c(yrs.hind,28)) + +# load obs.data for the four time steps: +for(year in yr1.hind:yr2.hind){ + data_erai_6h <- Load(var=var.name, exp = 'ERAint6h', + obs = NULL, sdates=paste0(year,'01'), leadtimemin = 61, + leadtimemax = 88, output = 'lonlat', configfile = file_path, grid=my.grid,method='distance-weighted') + + my.point.obs[year-yr1.hind+1,]<-data_erai_6h$mod[1,1,1,,65,633] +} + +utm0<-c(1,5,9,13,17,21,25) +utm6<-utm0+1 +utm12<-utm0+2 +utm18<-utm0+3 +my.points.obs.weekly.mean.utm0<-rowMeans(my.point.obs[,utm0]) +my.points.obs.weekly.mean.utm6<-rowMeans(my.point.obs[,utm6]) +my.points.obs.weekly.mean.utm12<-rowMeans(my.point.obs[,utm12]) +my.points.obs.weekly.mean.utm18<-rowMeans(my.point.obs[,utm18]) +my.points.obs.weekly.mean.all<-rowMeans(my.point.obs) + +cor(my.points.exp.weekly.mean.all,my.points.obs.weekly.mean.all,use="complete.obs") + +my.points.obs.weekly.mean.utm<-c(my.points.obs.weekly.mean.utm0,my.points.obs.weekly.mean.utm6,my.points.obs.weekly.mean.utm12,my.points.obs.weekly.mean.utm18) +my.points.exp.weekly.mean.utm<-c(my.points.exp.weekly.mean.utm0,my.points.exp.weekly.mean.utm6,my.points.exp.weekly.mean.utm12,my.points.exp.weekly.mean.utm18) +cor(my.points.exp.weekly.mean.utm,my.points.obs.weekly.mean.utm,use="complete.obs") + + +##### Calculate Reanalysis climatology loading only 1 file each 4 (it's quick but it needs to have all files beforehand): + +ss<-c(seq(1,52,by=4),50,51,52) # take only 1 startdate each 4, more the last 3 startdates that must be computed individually because they are not a complete set of 4 + +for(startdate in ss){ # the startdate day of 2014 to load in the sdates weekly sequence along with the same startdates of the previous 20 years + data.ERAI <- Load(var=var.name, exp = 'ERAintEnsWeek', + obs = NULL, sdates=paste0(yr1.hind:yr2.hind,substr(sdates.seq[startdate],5,6),substr(sdates.seq[startdate],7,8)), + nleadtime = 4, leadtimemin = 1, leadtimemax = 4, output = 'lonlat', configfile = file_path, grid=my.grid, method='distance-weighted') + + clim.ERAI[startdate,,,]<-apply(data.ERAI$mod,c(1,2,4,5,6),mean)[1,1,,,] # average for each leadtime and pixel + + # the intermediate startdate between startdate week i and startdate week i+4 are repeated: + if(startdate <= 49){ + clim.ERAI[startdate+1,1,,]<- clim.ERAI[startdate,2,,] # startdate leadtime (week) + clim.ERAI[startdate+1,2,,]<- clim.ERAI[startdate,3,,] # (week) 1 2 3 4 + clim.ERAI[startdate+2,1,,]<- clim.ERAI[startdate,3,,] # 1 a b c d <- only startdate 1 and 5 are necessary to calculate all startdates between 1 and 5 + clim.ERAI[startdate+1,3,,]<- clim.ERAI[startdate,4,,] # 2 b c d e + clim.ERAI[startdate+2,2,,]<- clim.ERAI[startdate,4,,] # 3 c d e f + clim.ERAI[startdate+3,1,,]<- clim.ERAI[startdate,4,,] # 4 d e f g + } # 5 e f g h + + if(startdate > 1 && startdate <=49){ # fill the previous startdates: + clim.ERAI[startdate-1,4,,]<- clim.ERAI[startdate,3,,] + clim.ERAI[startdate-1,3,,]<- clim.ERAI[startdate,2,,] + clim.ERAI[startdate-2,4,,]<- clim.ERAI[startdate,2,,] + clim.ERAI[startdate-1,2,,]<- clim.ERAI[startdate,1,,] + clim.ERAI[startdate-2,3,,]<- clim.ERAI[startdate,1,,] + clim.ERAI[startdate-3,4,,]<- clim.ERAI[startdate,1,,] + } + +} + + + + + # You can make cor much faster by stripping away all the error checking code and calling the internal c function directly: + # r <- apply(m1, 1, function(x) { apply(m2, 1, function(y) { cor(x,y) })}) + # r2 <- apply(m1, 1, function(x) { apply(m2, 1, function(y) {.Internal(cor(x, y, 4L, FALSE)) })}) + C_cor<-get("C_cor", asNamespace("stats")) + test<-.Call(C_cor, x=rnorm(100,1), y=rnorm(100,1), na.method=2, FALSE) + my.EnsCorr.chunk[i,j,k]<-.Call(C_cor, x=anom.hindcast.mean.chunk[,i,j,k], y=nom.rean.chunk[,i,j,k], na.method=2, FALSE) + + + +####################################################################################### +# Raster Animations # +####################################################################################### + +library(png) +library(animation) + + +clustPNG <- function(pngobj, nclust = 3){ + + j <- pngobj + # If there is an alpha component remove it... + # might be better to just adjust the mat matrix + if(dim(j)[3] > 3){ + j <- j[,,1:3] + } + k <- apply(j, c(1,2), c) + mat <- matrix(unlist(k), ncol = 3, byrow = TRUE) + grd <- expand.grid(1:dim(j)[1], 1:dim(j)[2]) + clust <- kmeans(mat, nclust, iter.max = 20) + new <- clust$centers[clust$cluster,] + + for(i in 1:3){ + tmp <- matrix(new[,i], nrow = dim(j)[1]) + j[,,i] <- tmp + } + + plot.new() + rasterImage(j, 0, 0, 1, 1, interpolate = FALSE) +} + +makeAni <- function(file, n = 10){ + j <- readPNG(file) + for(i in 1:n){ + clustPNG(j, i) + mtext(paste("Number of clusters:", i)) + } + + j <- readPNG(file) + plot.new() + rasterImage(j, 0, 0, 1, 1, interpolate = FALSE) + mtext("True Picture") +} + +file <- "~/Dropbox/Photos/ClassyDogCropPng.png" +n <- 20 +saveGIF(makeAni(file, n), movie.name = "tmp.gif", interval = c(rep(1,n), 5)) + + + + +####################################################################################### +# ProgressBar # +####################################################################################### + +for(i in 1:10) { + Sys.sleep(0.2) + # Dirk says using cat() like this is naughty ;-) + #cat(i,"\r") + # So you can use message() like this, thanks to Sharpie's + # comment to use appendLF=FALSE. + message(i,"\r",appendLF=FALSE) + flush.console() +} + + +for(i in 1:10) { + Sys.sleep(0.2) + # Dirk says using cat() like this is naughty ;-) + cat(i,"\r") + + +} + + + +####################################################################################### +# Load large datasets with ff # +####################################################################################### + +library(ff) +my.scratch<-"/scratch/Earth/ncortesi/myBigData" +anom.hind<-as.ff(anom.hind, vmode="double", file=my.scratch) +ffsave(anom.hind,file=my.scratch) +close(anom.hind);rm(anom.hind) + +ffload(file=my.scratch, list=c(anom.hind)) +open(anom.hind) +my.SkillScore <- veriApplyBig("FairRpss", fcst=anom.hind, obs=anom.rean, tdim=2, ensdim=1, prob=c(1/3,1/3)) +close(anom.hind);rm(anom.hind) + + + +BigDataPath <- "/scratch/Earth/ncortesi/myBigData" +source('/home/Earth/ncortesi/Downloads/RESILIENCE/veriApplyBig.R') + +anom.hind.dim<-c(5,3,1,256,512) +anom.hind<-array(rnorm(prod(anom.hind.dim)),anom.hind.dim) # doesn't fit in memory +save.big(array=anom.hind, path=BigDataPath) + +veriApplyBig <- function(, obsmy.scratch){ + ffload(file=my.scratch) + open(anom.hind) + my.SkillScore <- veriApplyBig("FairRpss", fcst=anom.hind, obs=anom.rean, tdim=2, ensdim=1, prob=c(1/3,1/3)) + close(anom.hind) +} + + +anom.hind<-as.ff(anom.hind, vmode="double", file=my.scratch) +ffsave(anom.hind, file=my.scratch) + +my.scratch<-"/scratch/Earth/ncortesi/myBigData" + +anom.hind<-as.ff(anom.hind, vmode="double", file=my.scratch) +ffsave(anom.hind, file=my.scratch) +close(anom.hind); rm(anom.hind) + +ffload(file=my.scratch, list=c(anom.hind)) +open(anom.hind) +my.SkillScore <- veriApplyBig("FairRpss", fcst=anom.hind, obs=anom.rean, tdim=2, ensdim=1, prob=c(1/3,1/3)) +close(anom.hind); rm(anom.hind) + +anom.hind.dim<-c(51,30,1,256,51) +anom.hind<-ff(rnorm(prod(anom.hind.dim)), dim=anom.hind.dim, vmode="double", filename="/scratch/Earth/ncortesi/anomhind") +str(anom.hind) +dim(anom.hind) + +anom.hind.dim<-c(51,30,1,256,51) +anom.hind<-array(rnorm(prod(anom.hind.dim)),anom.hind.dim) # doesn't fit in memory +anom.hind<-as.ff(anom.hind, vmode="double", file="/scratch/Earth/ncortesi/myBigData") +str(anom.hind) +dim(anom.hind) + +ffsave(anom.hind,file="/scratch/Earth/ncortesi/anomhind") +#ffinfo(file="/scratch/Earth/ncortesi/anomhind") +close(anom.hind) +#delete(anom.hind) +#rm(anom.hind) + +ffload(file="/scratch/Earth/ncortesi/anomhind") + +anom.rean<-array(rnorm(prod(anom.hind.dim[-1])), anom.hind.dim[-1]) # doesn't fit in memory + +open(anom.hind) +my.SkillScore <- veriApplyBig("FairRpss", fcst=anom.hind, obs=anom.rean, tdim=2, ensdim=1, prob=c(1/3,1/3)) +fcst<-anom.hind +obs<-anom.rean + +close(anom.hind) +rm(anom.hind) + +#ffdrop(file="/scratch/Earth/ncortesi/anomhind") + +a<-ffapply(X=anom.hind.big, MARGIN=c(1,3,4,5), AFUN=mean, RETURN=TRUE) +a<-ffapply(X=anom.hind.big, MARGIN=c(1,3,4,5), AFUN=function(x) sample(x, replace=TRUE), CFUN="list", RETURN=TRUE) + + +pred_Cal_cross_ff <- as.ff(pred_Cal_cross, vmode='double', file=fileoutput_cross) +ffsave(pred_Cal_cross_ff, file=paste0(fileoutput_cross)) +delete(pred_Cal_cross_ff) + +assign("pred_Cal_cross_ff", as.ff(pred_Cal_cross, vmode='double', file=fileoutput_cross)) +ffsave(pred_Cal_cross_ff, file=paste0(fileoutput_cross)) +delete(pred_Cal_cross_ff) +rm(pred_Cal_cross_ff) + +assign("pred_Cal_cross_ff", as.ff(pred_Cal_cross, vmode='double', file=fileoutput_cross)) +a<-get("pred_Cal_cross_ff") # get() works well in this case but not in the row below! (you must use do.call, see WT_drivers_v2.R) +ffsave(get("pred_Cal_cross_ff"), file=paste0(fileoutput_cross)) +delete(pred_Cal_cross_ff) +rm(pred_Cal_cross_ff) + + + +######################################################################################### +# Calculate and save the FairRpss and/or the FairCrpss for a chosen period using ff # +######################################################################################### + +bigdata=FALSE # if TRUE, compute the Rpss and the Crpss using the package ff (storing arrays in the disk instead than in the RAM) to remove the memory limit + +for(month in veri.month){ + month=1 # for debug + my.startdates <- which(as.integer(substr(sdates.seq,5,6)) == month) #startdates.monthly[[month]] #c(1:5) # select the startdates (weeks) you want to compute + startdate.name=my.month[month] # name given to the period of the selected startdates, i.e:"January" + n.yrs <- length(my.startdates)*n.yrs.hind # number of total years for the selected month + print(paste0("Computing Skill Scores for ",startdate.name)) + + if(!boot) anom.rean.big<-array(NA, dim=c(n.yrs, n.leadtimes, n.lat, n.lon)) # reanalysis data is not converted to ff because it is a small array + + if(boot) mod<-1 else mod=0 # in case of the boostrap, anom.hind.big includes also the reanalysis data as additional last member + + if(bigdata) anom.hind.big <- ff(NA, dim=c(n.members + mod, n.yrs, n.leadtimes, n.lat, n.lon), vmode="double", filename=paste0(workdir,"/anomhindbig.ff")) + if(!bigdata) anom.hind.big <- array(NA, dim=c(n.members + mod, n.yrs, n.leadtimes, n.lat, n.lon)) + + for(startdate in my.startdates){ + + pos.startdate<-which(startdate==my.startdates) # count of the number of stardates already loaded +1 + my.time.interv<-(1+(pos.startdate-1)*n.yrs.hind):(pos.startdate*n.yrs.hind) # time interval where to load data + + load(paste0(workdir,'/Data_',var.name,'/anom_rean_startdate',startdate,'.RData')) + if(!boot) { + anom.rean<-drop(anom.rean) + anom.rean.big[my.time.interv,,,] <- anom.rean + } + + if(any(my.score.name=="FairRpss") || any(my.score.name=="FairCrpss")){ + load(paste0(workdir,'/Data_',var.name,'/anom_hindcast_startdate',startdate,'.RData')) # load hindcast data + anom.hindcast<-drop(anom.hindcast) + + if(!boot) anom.hind.big[,my.time.interv,,,] <- anom.hindcast + if(boot) anom.hind.big[,my.time.interv,,,] <- abind(anom.hindcast, anom.rean, along=1) + + rm(anom.hindcast, anom.rean) + } + gc() + } + + if(any(my.score.name=="FairRpss" || my.score.name=="FairCrpss")){ + if(!boot) { + if(any(my.score.name=="FairRpss")) my.FairRpss <- veriApplyBig("FairRpss", fcst=anom.hind.big, obs=anom.rean.big, + prob=my.prob, tdim=2, ensdim=1, parallel=TRUE, ncpus=n.cpus) + if(any(my.score.name=="FairCrpss")) my.FairCrpss <- veriApplyBig("FairCrpss", fcst=anom.hind.big, obs=anom.rean.big, tdim=2, ensdim=1, parallel=TRUE, ncpus=n.cpus) + + } else { + # bootstrapping: + my.FairRpssBoot<-my.FairCrpssBoot<-array(NA, c(n.boot, n.leadtimes, n.lat, n.lon)) + if(!bigdata) save(anom.hind.big, file=paste0(workdir,"/anom_hind_big.RData")) # save the anomalies to free memory + + for(b in 1:n.boot){ + print(paste0("resampling n. ",b,"/",n.boot)) + yrs.sampled <- sample(1:n.yrs, replace=TRUE) + + if(bigdata) anom.hind.big.sampled <- ff(NA, dim=c(n.members+1, n.yrs, n.leadtimes, n.lat, n.lon), vmode="double", filename=paste0(workdir,"/anomhindbigsampled")) + if(!bigdata) anom.hind.big.sampled <- array(NA, dim=c(n.members+1, n.yrs, n.leadtimes, n.lat, n.lon)) + + if(!bigdata && b>1) load(paste0(workdir,"/anom_hind_big.RData")) # need to load the hindcasts if b>1 + + for(y in 1:n.yrs) anom.hind.big.sampled[,y,,,] <- anom.hind.big[,yrs.sampled[y],,,] # with ff takes 53s x 100 ~1h!!! (without, only 4s x 100 ~6m) + + if(!bigdata) rm(anom.hind.big); gc() # free memory for the following commands + + # faster way that will be able to replace the above one when ffapply output parameters wll be improved: + #a<-ffapply(X=anom.hind.big, MARGIN=c(3,4,5), AFUN=function(x) my.sample(x,n.members+1, replace=TRUE), CFUN="list", RETURN=TRUE, FF_RETURN=TRUE) # takes 3m only + #aa<-abind(a[1:10], along=4) # it doesn't fit into memory! + + if(any(my.score.name=="FairRpss")) my.FairRpssBoot[b,,,] <- veriApplyBig("FairRpss", fcst=anom.hind.big.sampled[1:n.members,,,,], + obs=anom.hind.big.sampled[n.members+1,,,,], + prob=my.prob, tdim=2, ensdim=1, parallel=TRUE, ncpus=n.cpus) #$rpss + + if(any(my.score.name=="FairCrpss")) my.FairCrpssBoot[b,,,] <- veriApplyBig("FairCrpss", fcst=anom.hind.big.sampled[1:n.members,,,,], + obs=anom.hind.big.sampled[n.members+1,,,,], + tdim=2, ensdim=1, parallel=TRUE, ncpus=n.cpus) #$crpss + + if(bigdata) delete(anom.hind.big.sampled) + rm(anom.hind.big.sampled) # delete the sampled hindcast + + } + + # calculate 5 and 95 percentiles of skill scores: + if(any(my.score.name=="FairRpss")) my.FairRpssConf <- apply(my.FairRpssBoot, c(2,3,4), function(x) quantile(x, probs=c(0.05,0.95), type=8)) + if(any(my.score.name=="FairCrpss")) my.FairCrpssConf <- quantile(my.FairCrpssBoot, probs=c(0.05,0.95), type=8) + + } # close if on boot + + } + + if(bigdata) delete(anom.hind.big) # delete the file with the hindcasts + rm(anom.hind.big) + rm(anom.rean.big) + gc() + + if(!boot){ + if(any(my.score.name=="FairRpss")) save(my.FairRpss, file=paste0(workdir,'/Data_',var.name,'/FairRpss_',startdate.name,'.RData')) + if(any(my.score.name=="FairCrpss")) save(my.FairCrpss, file=paste0(workdir,'/Data_',var.name,'/FairCrpss_',startdate.name,'.RData')) + } else { + if(any(my.score.name=="FairRpss")) save(my.FairRpssBoot, my.FairRpssConf, file=paste0(workdir,'/Data_',var.name,'/FairRpssBoot_',startdate.name,'.RData')) + if(any(my.score.name=="FairCrpss")) save(my.FairCrpssBoot, my.FairCrpssConf, file=paste0(workdir,'/Data_',var.name,'/FairCrpssBoot_',startdate.name,'.RData')) + } + + + if(is.object(my.FairRpss)) rm(my.FairRpss); if(is.object(my.FairCrpss)) rm(my.FairCrpss) + if(is.object(my.FairRpssBoot)) rm(my.FairRpssBoot); if(is.object(my.FairCrpssBoot)) rm(my.FairCrpssBoot) + if(is.object(my.FairRpssConf)) rm(my.FairRpssConf); if(is.object(my.FairCrpssConf)) rm(my.FairCrpssConf) + +} # next m (month) + + +######################################################################################### +# Calculate and save the FairRpss for a chosen period using ff and Load() # +######################################################################################### + + anom.rean.big<-array(NA, dim=c(length(my.startdates)*n.yrs.hind, n.leadtimes, n.lat, n.lon)) + anom.hind.big <- ff(NA, dim=c(n.members,length(my.startdates)*n.yrs.hind, n.leadtimes, n.lat, n.lon), vmode="double", filename="/scratch/Earth/anomhindbig") + + for(startdate in my.startdates){ + pos.startdate<-which(startdate==my.startdates) # count of the number of stardates already loaded+1 + my.time.interv<-(1+(pos.startdate-1)*n.yrs.hind):(pos.startdate*n.yrs.hind) # time interval where to load data + + #load(paste0(workdir,'/Data_',var.name,'/anom_rean_startdate',startdate,'.RData')) + #anom.rean<-drop(anom.rean) + #anom.rean.big[my.time.interv,,,] <- anom.rean + + my.date <- paste0(yr1.hind:yr2.hind,substr(sdates.seq[startdate],5,6),substr(sdates.seq[startdate],7,8)) + anom.rean <- Load(var=var.name, exp = 'EnsEcmwfWeekHind', + obs = NULL, sdates=my.date, nleadtime = n.leadtimes, leadtimemin = 1, leadtimemax = n.leadtimes, + output = 'lonlat', configfile = file_path, method='distance-weighted' ) + + #load(paste0(workdir,'/Data_',var.name,'/anom_hindcast_startdate',startdate,'.RData')) # load hindcast data + #anom.hindcast<-drop(anom.hindcast) + + anom.hind.big[,my.time.interv,,,] <- anom.hindcast + rm(anom.hindcast) + } + + my.FairRpss <- veriApplyBig("FairRpss", fcst=anom.hind.big, obs=anom.rean.big, prob=my.prob, tdim=2, ensdim=1, parallel=TRUE, ncpus=n.cpus) + + save(my.FairRpss, file=paste0(workdir,'/Data_',var.name,'/FairRpss_',startdate.name,'.RData')) + + +######################################################################################### +# Alternative way to speed up Load() # +######################################################################################### + +ERAint <- list(path = '/esnas/reconstructions/ecmwf/eraint/$STORE_FREQ$_mean/$VAR_NAME$_f6h/$VAR_NAME$_$YEAR$$MONTH$.nc') + var.rean=ERAint # daily reanalysis dataset used for the var data; it can have a different resolution of the MSLP dataset + var.name='sfcWind' #'tas' #'prlr' # any daily variable we want to find if it is WTs-driven + var <- Load(var = var.name, exp = NULL, obs = list(var.rean), paste0(y,'0101'), storefreq = 'daily', leadtimemax = 1, output = 'lonlat') + + # Forecast system list: + S4 <- list(path = ...) + CFSv2 <- list(path = ...) + + # Reanalysis list: + ERAint <- list(path = '/esnas/reconstructions/ecmwf/eraint/$STORE_FREQ$_mean/$VAR_NAME$_f6h/$VAR_NAME$_$YEAR$$MONTH$.nc') + JRA55 <- list(path = ...) + + + + + # Select one forecast system and one reanalysis (put NULL if you don't need to load any experiment/observation): + exp.source <- NULL + obs.source <- list(path = '/esnas/reconstructions/ecmwf/eraint/$STORE_FREQ$_mean/$VAR_NAME$_f6h/$VAR_NAME$_$YEAR$$MONTH$.nc') + + # Select one variable and its frequency: + var.name <- 'sfcWind' + store.freq <- 'daily' + + # Extract only the directory path of the forecast and reanalysis: + obs.source2 <- gsub("\\$STORE_FREQ\\$", store.freq, obs.source$path) + obs.source3 <- gsub("\\$VAR_NAME\\$", var.name, obs.source2) + obs.source4 <- strsplit(obs.source3,'/') + obs.source5 <- paste0(obs.source4[[1]][-length(obs.source4[[1]])], collapse="/") + + obs.source6 <- list.files(path = obs.source5) + + system(paste0("ncks -O -h -d longitude,5,6 ", '/esnas/reconstructions/ecmwf/eraint/',STORE_FREQ,'_mean/',VAR_NAME,'_f6h')) + system("ncks -O -h -d longitude,5,6 sfcWind_201405.nc ~/test.nc") + + y <- 1990 + var <- Load(var = VAR_NAME, exp = list(exp.source), obs = list(obs.source), sdates = paste0(y,'0101'), storefreq = store.freq, leadtimemax = 1, output = 'lonlat') + + +} # close if on mare + + diff --git a/toyModel.R~ b/toyModel.R~ new file mode 100644 index 0000000000000000000000000000000000000000..52af85b4137f04bcddc2ee3a179cbcc91556a861 --- /dev/null +++ b/toyModel.R~ @@ -0,0 +1,573 @@ + +################################################################################## +# Toy Model # +################################################################################## + +library(s2dverification) +library(SpecsVerification) +library(easyVerification) + +my.prob=c(1/3,2/3) # quantiles used for FairRPSS and the RelDiagr (usually they are the terciles) + +N=100 # number of years +M=15 # number of members + +for(M in c(2:10,20,51,100,200)){ + +for(N in c(5,10,15,20,25,30,40,50,100,200,500,1000,2000,5000,10000,20000,50000,100000)){ + +anom.rean<-rnorm(N) +anom.hind<-array(rnorm(N*M),c(N,M)) +#quantile(anom.hind,prob=my.prob,type=8) + +obs<-convert2prob(anom.rean,prob<-my.prob) +#mean(obs[,1]) # check if it is equal to 0.333 for K=3 + +ens<-convert2prob(anom.hind,prob<-my.prob) + +# climatological forecast: +anom.hind.clim<-array(sample(anom.hind,N*M),c(N,M)) # extract a random sample with no replacement of the hindcast +ens.clim<-convert2prob(anom.hind.clim,prob<-my.prob) +#mean(ens.clim[,1]) # check if it is equal to M/3 (1.333 for M=4) + +# alternative definition of climatological forecast, based on observations instead (as applied by veriApply via convert2prob when option fcst.ref is missing): +#anom.rean.clim<-ClimEns(anom.rean) # create a climatological ensemble from a vector of observations (anom.rean) with the leave-one-out cross validation +anom.rean.clim<-t(array(anom.rean,c(N,N))) # function used by convert2prob when ref is not specified; it is ~ equal to the above function ClimEns, it only has a column more +ens.clim.rean<-convert2prob(anom.rean.clim,prob<-c(1/3,2/3)) # create a new prob.table based on the climatological ensemble of observations + +p<-count2prob(ens) # we should add the option type=4, in not the sum for each year is not 100%!!! +y<-count2prob(obs) # we should add the option type=4, in not the sum for each year is not 100%!!! +ReliabilityDiagram(p[,1], y[,1], bins=10, nboot=0, plot=TRUE, cons.prob=c(0.05, 0.85),plot.refin=F) + +### computation of verification scores: + +Rps<-round(mean(EnsRps(ens,obs)),4) +FairRps<-round(mean(FairRps(ens,obs)),4) +#Rps.easy<-round(mean(veriApply("EnsRps", fcst=anom.hind, obs=anom.rean, prob=my.prob, tdim=1, ensdim=2)),4) # check once to see if it is the same value of Rps +#FairRps.easy<-round(mean(veriApply("FairRps", fcst=anom.hind, obs=anom.rean, prob=my.prob, tdim=1, ensdim=2)),4) # check once to see if it is the same value of FairRps + +Rps.clim<-round(mean(EnsRps(ens.clim,obs)),4) +FairRps.clim<-round(mean(FairRps(ens.clim,obs)),4) +FairRps.clim.rean<-round(mean(FairRps(ens.clim.rean,obs)),4) + +#FairRps.clim.stable<-0.44444444 +#Rps.clim.easy<-round(mean(veriApply("EnsRps", fcst=anom.hind.clim, obs=anom.rean, prob=my.prob, tdim=1, ensdim=2)),4) # check once to see if it is the same value of Rps.clim +#FairRps.clim.easy<-round(mean(veriApply("FairRps", fcst=anom.hind.clim, obs=anom.rean, prob=my.prob, tdim=1, ensdim=2)),4) # check once to see if it is = to FairRps.clim + +Rpss<-round(1-(Rps/Rps.clim),4) +#Rpss.specs<-round(EnsRpss(ens=ens, ens.ref=ens.clim, obs=obs)$rpss,4) # check it once to see if it is the same as Rpss (yes) +Rpss.easy<-veriApply("EnsRpss", fcst=anom.hind, fcst.ref=anom.hind.clim, obs=anom.rean, prob=my.prob, tdim=1, ensdim=2)$rpss # we provide the clim.forecast; = to Rpss.specs +system.time(Rpss.easy.noclim<-round(veriApply("EnsRpss", fcst=anom.hind, obs=anom.rean, prob=my.prob, tdim=1, ensdim=2, parallel=FALSE, ncpus=5)$rpss,4)) # using their climatological forecast +#cat(Rps.clim.noclim<-Rps/(1-Rpss.easy.noclim)) # sale igual a ~0.45 per ogni hindcast; é diverso sia da Rps.clim che da Rps.clim per N-> +oo (0.555) + +Rpss.easy<-veriApply("EnsRpss", fcst=anom.hind, fcst.ref=anom.rean.clim, obs=anom.rean, prob=my.prob, tdim=1, ensdim=2)$rpss # we provide the clim.forecast; = to Rpss.specs +Rpss.easy.noclim<-veriApply("EnsRpss", fcst=anom.hind, obs=anom.rean, prob=my.prob, tdim=1, ensdim=2)$rpss # we provide the clim.forecast; = to Rpss.specs + +FairRpss<-round(1-(FairRps/FairRps.clim),4) +#FairRpss.specs<-round(FairRpss(ens=ens,ens.ref=ens.clim,obs=obs)$rpss,4) # check it once to see if it is the same as FairRpss (yes) +#FairRpss.easy<-veriApply("FairRpss", fcst=anom.hind, fcst.ref=anom.hind.clim, obs=anom.rean, prob=c(1/3,2/3), tdim=1, ensdim=2)$rps +#FairRpss.easy.noclim<-round(veriApply("FairRpss", fcst=anom.hind, obs=anom.rean, prob=c(1/3,2/3), tdim=1, ensdim=2)$rpss,4) # using their climatological forecast +#cat(FairRps.clim.noclim<-Rps/(1-FairRpss.easy.noclim)) # sale igual a ~0.55; é diverso sia da FairRps.clim che da FairRps.clim per N-> +oo (0.444) + +cat(paste0('n.memb: ' ,M,' n.yrs: ',N,' : ',FairRps,' : ' ,FairRps.clim,' FairRpss: ',FairRpss,'\n')) + +#cat(paste0('n.memb: ' ,M,' n.yrs: ',N,' : ',FairRps,' : ' ,FairRps.clim,' FairRpss: ',FairRpss,'\n')) + +#cat(paste0('n.memb: ' ,M,' n.yrs: ',N,' : ',Rps,' : ' ,Rps.clim,' Rpss: ',Rpss,'\n','n.memb: ' ,M,' n.yrs: ',N,' : ',FairRps,' : ' ,FairRps.clim,' FairRpss: ',FairRpss,'\n')) + +#cat(paste0('n.memb: ' ,M,' n.yrs: ',N,' : ',Rps,' : ' ,Rps.clim,' Rpss: ',Rpss,' Rpss.easy: ',Rpss.easy.noclim,'\n','n.memb: ' ,M,' n.yrs: ',N,' #: ',FairRps,' : ' ,FairRps.clim,' FairRpss: ',FairRpss,' FairRpss.easy: ',FairRpss.easy.noclim)) + +} + +# Crpss: + +#Crps<-round(mean(EnsCrps(ens,obs)),4) +#FairCrps<-round(mean(FairCrps(ens,obs)),4) + +#cat(Crps.clim<-round(mean(EnsCrps(ens.clim,obs)),4)) +#FairCrps.clim<-round(mean(FairCrps(ens.clim,obs)),4) + +#Crpss<-round(1-(Crps/Crps.clim),4) +#Crpss.specs<-round(EnsCrpss(ens=ens, ens.ref=ens.clim, obs=obs)$rpss,4) + +#FairCrpss<-round(1-(FairCrps/FairCrps.clim),4) +#FairCrpss.specs<-round(FairCrpss(ens=ens,ens.ref=ens.clim,obs=obs)$rpss,4) + +} + + +################################################################################## +# Other analysis # +################################################################################## + +my.startdates=1 #c(1:9) # select the startdates (weeks) you want to merge together + +#tm<-toyarray(dims=c(32,64),N=20,nens=4) # in the number of startdates in case of forecasts or the number of years in case of hindcasts +#str(tm) # tm$fcst: [32,64,20,4] tm$obs: [32,64,20] +# +#f.corr <- veriApply("EnsCorr", fcst=tm$fcst, obs=tm$obs) +#f.me <- veriApply('EnsMe', tm$fcst, tm$obs) +#f.crps <- veriApply("FairCrps", fcst=tm$fcst, obs=tm$obs) +#str(f.crps) # [32,64,20] + +#cst <- array(rnorm(prod(4:8)), 4:8) +#obs <- array(rnorm(prod(4:7)), 4:7) +#f.me <- veriApply('EnsMe', fcst=fcst, obs=obs) +#dim(f.me) +#fcst2 <- array(aperm(fcst, c(5,1,4,2,3)), c(8, 4, 7, 5*6)) # very interesting use of aperm not only to swap dimensions, but also to merge two dimensions in the same one! +#obs2 <- array(aperm(obs, c(1,4,2,3)), c(4,7,5*6)) +#f.me2 <- veriApply('EnsMe', fcst=fcst2, obs=obs2, tdim=3, ensdim=1) +#dim(f.me2) + +# when you have for each leadtime all the 52 weeks of year 2014 stored in a map, +# you can plot the time serie for a particular point and leadtime: +pos.lat<-which(round(la,3)==round(my.lat,3)) +pos.lon<-which(round(lo,3)==round(my.lon,3)) + +x11() +plot(1:week,EnMeanCorr[,leadtime],type="b", + main=paste0("Weekly time serie of point with lat=",round(my.lat,2),", lon=",round(my.lon,2)), + xlab="week",ylab="Ensemble Mean Corr.",ylim=c(-1,1)) + + + +### check if 4 time steps are better than one: + +yrs.hind<-yr2.hind-yr1.hind+1 +my.point.obs<-array(NA,c(yrs.hind,28)) + +# load obs.data for the four time steps: +for(year in yr1.hind:yr2.hind){ + data_erai_6h <- Load(var=var.name, exp = 'ERAint6h', + obs = NULL, sdates=paste0(year,'01'), leadtimemin = 61, + leadtimemax = 88, output = 'lonlat', configfile = file_path, grid=my.grid,method='distance-weighted') + + my.point.obs[year-yr1.hind+1,]<-data_erai_6h$mod[1,1,1,,65,633] +} + +utm0<-c(1,5,9,13,17,21,25) +utm6<-utm0+1 +utm12<-utm0+2 +utm18<-utm0+3 +my.points.obs.weekly.mean.utm0<-rowMeans(my.point.obs[,utm0]) +my.points.obs.weekly.mean.utm6<-rowMeans(my.point.obs[,utm6]) +my.points.obs.weekly.mean.utm12<-rowMeans(my.point.obs[,utm12]) +my.points.obs.weekly.mean.utm18<-rowMeans(my.point.obs[,utm18]) +my.points.obs.weekly.mean.all<-rowMeans(my.point.obs) + +cor(my.points.exp.weekly.mean.all,my.points.obs.weekly.mean.all,use="complete.obs") + +my.points.obs.weekly.mean.utm<-c(my.points.obs.weekly.mean.utm0,my.points.obs.weekly.mean.utm6,my.points.obs.weekly.mean.utm12,my.points.obs.weekly.mean.utm18) +my.points.exp.weekly.mean.utm<-c(my.points.exp.weekly.mean.utm0,my.points.exp.weekly.mean.utm6,my.points.exp.weekly.mean.utm12,my.points.exp.weekly.mean.utm18) +cor(my.points.exp.weekly.mean.utm,my.points.obs.weekly.mean.utm,use="complete.obs") + + +##### Calculate Reanalysis climatology loading only 1 file each 4 (it's quick but it needs to have all files beforehand): + +ss<-c(seq(1,52,by=4),50,51,52) # take only 1 startdate each 4, more the last 3 startdates that must be computed individually because they are not a complete set of 4 + +for(startdate in ss){ # the startdate day of 2014 to load in the sdates weekly sequence along with the same startdates of the previous 20 years + data.ERAI <- Load(var=var.name, exp = 'ERAintEnsWeek', + obs = NULL, sdates=paste0(yr1.hind:yr2.hind,substr(sdates.seq[startdate],5,6),substr(sdates.seq[startdate],7,8)), + nleadtime = 4, leadtimemin = 1, leadtimemax = 4, output = 'lonlat', configfile = file_path, grid=my.grid, method='distance-weighted') + + clim.ERAI[startdate,,,]<-apply(data.ERAI$mod,c(1,2,4,5,6),mean)[1,1,,,] # average for each leadtime and pixel + + # the intermediate startdate between startdate week i and startdate week i+4 are repeated: + if(startdate <= 49){ + clim.ERAI[startdate+1,1,,]<- clim.ERAI[startdate,2,,] # startdate leadtime (week) + clim.ERAI[startdate+1,2,,]<- clim.ERAI[startdate,3,,] # (week) 1 2 3 4 + clim.ERAI[startdate+2,1,,]<- clim.ERAI[startdate,3,,] # 1 a b c d <- only startdate 1 and 5 are necessary to calculate all startdates between 1 and 5 + clim.ERAI[startdate+1,3,,]<- clim.ERAI[startdate,4,,] # 2 b c d e + clim.ERAI[startdate+2,2,,]<- clim.ERAI[startdate,4,,] # 3 c d e f + clim.ERAI[startdate+3,1,,]<- clim.ERAI[startdate,4,,] # 4 d e f g + } # 5 e f g h + + if(startdate > 1 && startdate <=49){ # fill the previous startdates: + clim.ERAI[startdate-1,4,,]<- clim.ERAI[startdate,3,,] + clim.ERAI[startdate-1,3,,]<- clim.ERAI[startdate,2,,] + clim.ERAI[startdate-2,4,,]<- clim.ERAI[startdate,2,,] + clim.ERAI[startdate-1,2,,]<- clim.ERAI[startdate,1,,] + clim.ERAI[startdate-2,3,,]<- clim.ERAI[startdate,1,,] + clim.ERAI[startdate-3,4,,]<- clim.ERAI[startdate,1,,] + } + +} + + + + + # You can make cor much faster by stripping away all the error checking code and calling the internal c function directly: + # r <- apply(m1, 1, function(x) { apply(m2, 1, function(y) { cor(x,y) })}) + # r2 <- apply(m1, 1, function(x) { apply(m2, 1, function(y) {.Internal(cor(x, y, 4L, FALSE)) })}) + C_cor<-get("C_cor", asNamespace("stats")) + test<-.Call(C_cor, x=rnorm(100,1), y=rnorm(100,1), na.method=2, FALSE) + my.EnsCorr.chunk[i,j,k]<-.Call(C_cor, x=anom.hindcast.mean.chunk[,i,j,k], y=nom.rean.chunk[,i,j,k], na.method=2, FALSE) + + + +####################################################################################### +# Raster Animations # +####################################################################################### + +library(png) +library(animation) + + +clustPNG <- function(pngobj, nclust = 3){ + + j <- pngobj + # If there is an alpha component remove it... + # might be better to just adjust the mat matrix + if(dim(j)[3] > 3){ + j <- j[,,1:3] + } + k <- apply(j, c(1,2), c) + mat <- matrix(unlist(k), ncol = 3, byrow = TRUE) + grd <- expand.grid(1:dim(j)[1], 1:dim(j)[2]) + clust <- kmeans(mat, nclust, iter.max = 20) + new <- clust$centers[clust$cluster,] + + for(i in 1:3){ + tmp <- matrix(new[,i], nrow = dim(j)[1]) + j[,,i] <- tmp + } + + plot.new() + rasterImage(j, 0, 0, 1, 1, interpolate = FALSE) +} + +makeAni <- function(file, n = 10){ + j <- readPNG(file) + for(i in 1:n){ + clustPNG(j, i) + mtext(paste("Number of clusters:", i)) + } + + j <- readPNG(file) + plot.new() + rasterImage(j, 0, 0, 1, 1, interpolate = FALSE) + mtext("True Picture") +} + +file <- "~/Dropbox/Photos/ClassyDogCropPng.png" +n <- 20 +saveGIF(makeAni(file, n), movie.name = "tmp.gif", interval = c(rep(1,n), 5)) + + + + +####################################################################################### +# ProgressBar # +####################################################################################### + +for(i in 1:10) { + Sys.sleep(0.2) + # Dirk says using cat() like this is naughty ;-) + #cat(i,"\r") + # So you can use message() like this, thanks to Sharpie's + # comment to use appendLF=FALSE. + message(i,"\r",appendLF=FALSE) + flush.console() +} + + +for(i in 1:10) { + Sys.sleep(0.2) + # Dirk says using cat() like this is naughty ;-) + cat(i,"\r") + + +} + + + +####################################################################################### +# Load large datasets with ff # +####################################################################################### + +library(ff) +my.scratch<-"/scratch/Earth/ncortesi/myBigData" +anom.hind<-as.ff(anom.hind, vmode="double", file=my.scratch) +ffsave(anom.hind,file=my.scratch) +close(anom.hind);rm(anom.hind) + +ffload(file=my.scratch, list=c(anom.hind)) +open(anom.hind) +my.SkillScore <- veriApplyBig("FairRpss", fcst=anom.hind, obs=anom.rean, tdim=2, ensdim=1, prob=c(1/3,1/3)) +close(anom.hind);rm(anom.hind) + + + +BigDataPath <- "/scratch/Earth/ncortesi/myBigData" +source('/home/Earth/ncortesi/Downloads/RESILIENCE/veriApplyBig.R') + +anom.hind.dim<-c(5,3,1,256,512) +anom.hind<-array(rnorm(prod(anom.hind.dim)),anom.hind.dim) # doesn't fit in memory +save.big(array=anom.hind, path=BigDataPath) + +veriApplyBig <- function(, obsmy.scratch){ + ffload(file=my.scratch) + open(anom.hind) + my.SkillScore <- veriApplyBig("FairRpss", fcst=anom.hind, obs=anom.rean, tdim=2, ensdim=1, prob=c(1/3,1/3)) + close(anom.hind) +} + + +anom.hind<-as.ff(anom.hind, vmode="double", file=my.scratch) +ffsave(anom.hind, file=my.scratch) + +my.scratch<-"/scratch/Earth/ncortesi/myBigData" + +anom.hind<-as.ff(anom.hind, vmode="double", file=my.scratch) +ffsave(anom.hind, file=my.scratch) +close(anom.hind); rm(anom.hind) + +ffload(file=my.scratch, list=c(anom.hind)) +open(anom.hind) +my.SkillScore <- veriApplyBig("FairRpss", fcst=anom.hind, obs=anom.rean, tdim=2, ensdim=1, prob=c(1/3,1/3)) +close(anom.hind); rm(anom.hind) + +anom.hind.dim<-c(51,30,1,256,51) +anom.hind<-ff(rnorm(prod(anom.hind.dim)), dim=anom.hind.dim, vmode="double", filename="/scratch/Earth/ncortesi/anomhind") +str(anom.hind) +dim(anom.hind) + +anom.hind.dim<-c(51,30,1,256,51) +anom.hind<-array(rnorm(prod(anom.hind.dim)),anom.hind.dim) # doesn't fit in memory +anom.hind<-as.ff(anom.hind, vmode="double", file="/scratch/Earth/ncortesi/myBigData") +str(anom.hind) +dim(anom.hind) + +ffsave(anom.hind,file="/scratch/Earth/ncortesi/anomhind") +#ffinfo(file="/scratch/Earth/ncortesi/anomhind") +close(anom.hind) +#delete(anom.hind) +#rm(anom.hind) + +ffload(file="/scratch/Earth/ncortesi/anomhind") + +anom.rean<-array(rnorm(prod(anom.hind.dim[-1])), anom.hind.dim[-1]) # doesn't fit in memory + +open(anom.hind) +my.SkillScore <- veriApplyBig("FairRpss", fcst=anom.hind, obs=anom.rean, tdim=2, ensdim=1, prob=c(1/3,1/3)) +fcst<-anom.hind +obs<-anom.rean + +close(anom.hind) +rm(anom.hind) + +#ffdrop(file="/scratch/Earth/ncortesi/anomhind") + +a<-ffapply(X=anom.hind.big, MARGIN=c(1,3,4,5), AFUN=mean, RETURN=TRUE) +a<-ffapply(X=anom.hind.big, MARGIN=c(1,3,4,5), AFUN=function(x) sample(x, replace=TRUE), CFUN="list", RETURN=TRUE) + + +pred_Cal_cross_ff <- as.ff(pred_Cal_cross, vmode='double', file=fileoutput_cross) +ffsave(pred_Cal_cross_ff, file=paste0(fileoutput_cross)) +delete(pred_Cal_cross_ff) + +assign("pred_Cal_cross_ff", as.ff(pred_Cal_cross, vmode='double', file=fileoutput_cross)) +ffsave(pred_Cal_cross_ff, file=paste0(fileoutput_cross)) +delete(pred_Cal_cross_ff) +rm(pred_Cal_cross_ff) + +assign("pred_Cal_cross_ff", as.ff(pred_Cal_cross, vmode='double', file=fileoutput_cross)) +a<-get("pred_Cal_cross_ff") # get() works well in this case but not in the row below! (you must use do.call, see WT_drivers_v2.R) +ffsave(get("pred_Cal_cross_ff"), file=paste0(fileoutput_cross)) +delete(pred_Cal_cross_ff) +rm(pred_Cal_cross_ff) + + + +######################################################################################### +# Calculate and save the FairRpss and/or the FairCrpss for a chosen period using ff # +######################################################################################### + +bigdata=FALSE # if TRUE, compute the Rpss and the Crpss using the package ff (storing arrays in the disk instead than in the RAM) to remove the memory limit + +for(month in veri.month){ + month=1 # for debug + my.startdates <- which(as.integer(substr(sdates.seq,5,6)) == month) #startdates.monthly[[month]] #c(1:5) # select the startdates (weeks) you want to compute + startdate.name=my.month[month] # name given to the period of the selected startdates, i.e:"January" + n.yrs <- length(my.startdates)*n.yrs.hind # number of total years for the selected month + print(paste0("Computing Skill Scores for ",startdate.name)) + + if(!boot) anom.rean.big<-array(NA, dim=c(n.yrs, n.leadtimes, n.lat, n.lon)) # reanalysis data is not converted to ff because it is a small array + + if(boot) mod<-1 else mod=0 # in case of the boostrap, anom.hind.big includes also the reanalysis data as additional last member + + if(bigdata) anom.hind.big <- ff(NA, dim=c(n.members + mod, n.yrs, n.leadtimes, n.lat, n.lon), vmode="double", filename=paste0(workdir,"/anomhindbig.ff")) + if(!bigdata) anom.hind.big <- array(NA, dim=c(n.members + mod, n.yrs, n.leadtimes, n.lat, n.lon)) + + for(startdate in my.startdates){ + + pos.startdate<-which(startdate==my.startdates) # count of the number of stardates already loaded +1 + my.time.interv<-(1+(pos.startdate-1)*n.yrs.hind):(pos.startdate*n.yrs.hind) # time interval where to load data + + load(paste0(workdir,'/Data_',var.name,'/anom_rean_startdate',startdate,'.RData')) + if(!boot) { + anom.rean<-drop(anom.rean) + anom.rean.big[my.time.interv,,,] <- anom.rean + } + + if(any(my.score.name=="FairRpss") || any(my.score.name=="FairCrpss")){ + load(paste0(workdir,'/Data_',var.name,'/anom_hindcast_startdate',startdate,'.RData')) # load hindcast data + anom.hindcast<-drop(anom.hindcast) + + if(!boot) anom.hind.big[,my.time.interv,,,] <- anom.hindcast + if(boot) anom.hind.big[,my.time.interv,,,] <- abind(anom.hindcast, anom.rean, along=1) + + rm(anom.hindcast, anom.rean) + } + gc() + } + + if(any(my.score.name=="FairRpss" || my.score.name=="FairCrpss")){ + if(!boot) { + if(any(my.score.name=="FairRpss")) my.FairRpss <- veriApplyBig("FairRpss", fcst=anom.hind.big, obs=anom.rean.big, + prob=my.prob, tdim=2, ensdim=1, parallel=TRUE, ncpus=n.cpus) + if(any(my.score.name=="FairCrpss")) my.FairCrpss <- veriApplyBig("FairCrpss", fcst=anom.hind.big, obs=anom.rean.big, tdim=2, ensdim=1, parallel=TRUE, ncpus=n.cpus) + + } else { + # bootstrapping: + my.FairRpssBoot<-my.FairCrpssBoot<-array(NA, c(n.boot, n.leadtimes, n.lat, n.lon)) + if(!bigdata) save(anom.hind.big, file=paste0(workdir,"/anom_hind_big.RData")) # save the anomalies to free memory + + for(b in 1:n.boot){ + print(paste0("resampling n. ",b,"/",n.boot)) + yrs.sampled <- sample(1:n.yrs, replace=TRUE) + + if(bigdata) anom.hind.big.sampled <- ff(NA, dim=c(n.members+1, n.yrs, n.leadtimes, n.lat, n.lon), vmode="double", filename=paste0(workdir,"/anomhindbigsampled")) + if(!bigdata) anom.hind.big.sampled <- array(NA, dim=c(n.members+1, n.yrs, n.leadtimes, n.lat, n.lon)) + + if(!bigdata && b>1) load(paste0(workdir,"/anom_hind_big.RData")) # need to load the hindcasts if b>1 + + for(y in 1:n.yrs) anom.hind.big.sampled[,y,,,] <- anom.hind.big[,yrs.sampled[y],,,] # with ff takes 53s x 100 ~1h!!! (without, only 4s x 100 ~6m) + + if(!bigdata) rm(anom.hind.big); gc() # free memory for the following commands + + # faster way that will be able to replace the above one when ffapply output parameters wll be improved: + #a<-ffapply(X=anom.hind.big, MARGIN=c(3,4,5), AFUN=function(x) my.sample(x,n.members+1, replace=TRUE), CFUN="list", RETURN=TRUE, FF_RETURN=TRUE) # takes 3m only + #aa<-abind(a[1:10], along=4) # it doesn't fit into memory! + + if(any(my.score.name=="FairRpss")) my.FairRpssBoot[b,,,] <- veriApplyBig("FairRpss", fcst=anom.hind.big.sampled[1:n.members,,,,], + obs=anom.hind.big.sampled[n.members+1,,,,], + prob=my.prob, tdim=2, ensdim=1, parallel=TRUE, ncpus=n.cpus) #$rpss + + if(any(my.score.name=="FairCrpss")) my.FairCrpssBoot[b,,,] <- veriApplyBig("FairCrpss", fcst=anom.hind.big.sampled[1:n.members,,,,], + obs=anom.hind.big.sampled[n.members+1,,,,], + tdim=2, ensdim=1, parallel=TRUE, ncpus=n.cpus) #$crpss + + if(bigdata) delete(anom.hind.big.sampled) + rm(anom.hind.big.sampled) # delete the sampled hindcast + + } + + # calculate 5 and 95 percentiles of skill scores: + if(any(my.score.name=="FairRpss")) my.FairRpssConf <- apply(my.FairRpssBoot, c(2,3,4), function(x) quantile(x, probs=c(0.05,0.95), type=8)) + if(any(my.score.name=="FairCrpss")) my.FairCrpssConf <- quantile(my.FairCrpssBoot, probs=c(0.05,0.95), type=8) + + } # close if on boot + + } + + if(bigdata) delete(anom.hind.big) # delete the file with the hindcasts + rm(anom.hind.big) + rm(anom.rean.big) + gc() + + if(!boot){ + if(any(my.score.name=="FairRpss")) save(my.FairRpss, file=paste0(workdir,'/Data_',var.name,'/FairRpss_',startdate.name,'.RData')) + if(any(my.score.name=="FairCrpss")) save(my.FairCrpss, file=paste0(workdir,'/Data_',var.name,'/FairCrpss_',startdate.name,'.RData')) + } else { + if(any(my.score.name=="FairRpss")) save(my.FairRpssBoot, my.FairRpssConf, file=paste0(workdir,'/Data_',var.name,'/FairRpssBoot_',startdate.name,'.RData')) + if(any(my.score.name=="FairCrpss")) save(my.FairCrpssBoot, my.FairCrpssConf, file=paste0(workdir,'/Data_',var.name,'/FairCrpssBoot_',startdate.name,'.RData')) + } + + + if(is.object(my.FairRpss)) rm(my.FairRpss); if(is.object(my.FairCrpss)) rm(my.FairCrpss) + if(is.object(my.FairRpssBoot)) rm(my.FairRpssBoot); if(is.object(my.FairCrpssBoot)) rm(my.FairCrpssBoot) + if(is.object(my.FairRpssConf)) rm(my.FairRpssConf); if(is.object(my.FairCrpssConf)) rm(my.FairCrpssConf) + +} # next m (month) + + +######################################################################################### +# Calculate and save the FairRpss for a chosen period using ff and Load() # +######################################################################################### + + anom.rean.big<-array(NA, dim=c(length(my.startdates)*n.yrs.hind, n.leadtimes, n.lat, n.lon)) + anom.hind.big <- ff(NA, dim=c(n.members,length(my.startdates)*n.yrs.hind, n.leadtimes, n.lat, n.lon), vmode="double", filename="/scratch/Earth/anomhindbig") + + for(startdate in my.startdates){ + pos.startdate<-which(startdate==my.startdates) # count of the number of stardates already loaded+1 + my.time.interv<-(1+(pos.startdate-1)*n.yrs.hind):(pos.startdate*n.yrs.hind) # time interval where to load data + + #load(paste0(workdir,'/Data_',var.name,'/anom_rean_startdate',startdate,'.RData')) + #anom.rean<-drop(anom.rean) + #anom.rean.big[my.time.interv,,,] <- anom.rean + + my.date <- paste0(yr1.hind:yr2.hind,substr(sdates.seq[startdate],5,6),substr(sdates.seq[startdate],7,8)) + anom.rean <- Load(var=var.name, exp = 'EnsEcmwfWeekHind', + obs = NULL, sdates=my.date, nleadtime = n.leadtimes, leadtimemin = 1, leadtimemax = n.leadtimes, + output = 'lonlat', configfile = file_path, method='distance-weighted' ) + + #load(paste0(workdir,'/Data_',var.name,'/anom_hindcast_startdate',startdate,'.RData')) # load hindcast data + #anom.hindcast<-drop(anom.hindcast) + + anom.hind.big[,my.time.interv,,,] <- anom.hindcast + rm(anom.hindcast) + } + + my.FairRpss <- veriApplyBig("FairRpss", fcst=anom.hind.big, obs=anom.rean.big, prob=my.prob, tdim=2, ensdim=1, parallel=TRUE, ncpus=n.cpus) + + save(my.FairRpss, file=paste0(workdir,'/Data_',var.name,'/FairRpss_',startdate.name,'.RData')) + + +######################################################################################### +# Alternative way to speed up Load() # +######################################################################################### + +ERAint <- list(path = '/esnas/reconstructions/ecmwf/eraint/$STORE_FREQ$_mean/$VAR_NAME$_f6h/$VAR_NAME$_$YEAR$$MONTH$.nc') + var.rean=ERAint # daily reanalysis dataset used for the var data; it can have a different resolution of the MSLP dataset + var.name='sfcWind' #'tas' #'prlr' # any daily variable we want to find if it is WTs-driven + var <- Load(var = var.name, exp = NULL, obs = list(var.rean), paste0(y,'0101'), storefreq = 'daily', leadtimemax = 1, output = 'lonlat') + + # Forecast system list: + S4 <- list(path = ...) + CFSv2 <- list(path = ...) + + # Reanalysis list: + ERAint <- list(path = '/esnas/reconstructions/ecmwf/eraint/$STORE_FREQ$_mean/$VAR_NAME$_f6h/$VAR_NAME$_$YEAR$$MONTH$.nc') + JRA55 <- list(path = ...) + + + + + # Select one forecast system and one reanalysis (put NULL if you don't need to load any experiment/observation): + exp.source <- NULL + obs.source <- list(path = '/esnas/reconstructions/ecmwf/eraint/$STORE_FREQ$_mean/$VAR_NAME$_f6h/$VAR_NAME$_$YEAR$$MONTH$.nc') + + # Select one variable and its frequency: + var.name <- 'sfcWind' + store.freq <- 'daily' + + # Extract only the directory path of the forecast and reanalysis: + obs.source2 <- gsub("\\$STORE_FREQ\\$", store.freq, obs.source$path) + obs.source3 <- gsub("\\$VAR_NAME\\$", var.name, obs.source2) + obs.source4 <- strsplit(obs.source3,'/') + obs.source5 <- paste0(obs.source4[[1]][-length(obs.source4[[1]])], collapse="/") + + obs.source6 <- list.files(path = obs.source5) + + system(paste0("ncks -O -h -d longitude,5,6 ", '/esnas/reconstructions/ecmwf/eraint/',STORE_FREQ,'_mean/',VAR_NAME,'_f6h')) + system("ncks -O -h -d longitude,5,6 sfcWind_201405.nc ~/test.nc") + + y <- 1990 + var <- Load(var = VAR_NAME, exp = list(exp.source), obs = list(obs.source), sdates = paste0(y,'0101'), storefreq = store.freq, leadtimemax = 1, output = 'lonlat') + + +} # close if on mare + + diff --git a/weather_regimes_impact_v4.R b/weather_regimes_impact_v4.R new file mode 100644 index 0000000000000000000000000000000000000000..035e96c0ce5bd6233df57b9c59d865e384dbe4c3 --- /dev/null +++ b/weather_regimes_impact_v4.R @@ -0,0 +1,541 @@ + +# Creation: 6/2016 +# Author: Nicola Cortesi +# Aim: Measure the impact of Weather Regimes of a chosen variable, from a reanalysis of from a forecast systems. +# +# I/O: input are all the various "*_psl.RData" created by the script 'weather_regimes.R' +# its output are "*_.RData" files which are need by the script weather_regimes_maps.R +# You can also run this script from terminal with: Rscript ~/scripts/weather_regimes_impact.R +# +# Assumption: all input/output files are located in the 'workdir' folder. Note that this script should not be run with many parallel jobs since it spends most of its time +# in loading data, so it's better to take advantage of the parallel loading feature of Load() to run the script once computing the monthly analysis in a sequential way +# +# Branch: weather_regimes + +rm(list=ls()) +library(s2dverification) # for the function Load() +library(abind) +library(reshape2) # after each undate of s2dverification, it's better to remove and install this package again because sometimes it gets broken! +#library(TTR) +source('/home/Earth/ncortesi/scripts/Rfunctions.R') # for the calendar functions +#workdir <- "/scratch/Earth/ncortesi/RESILIENCE/Regimes" # working dir +# module load NCO # : load it from the terminal before running this script interactively in the SMP machine, if you didn't load it in the .bashrc + +#rean.dir <- "/scratch/Earth/ncortesi/RESILIENCE/Regimes/41_ERA-Interim_monthly_1981-2015_LOESS_filter" +#ean.dir <- "/scratch/Earth/ncortesi/RESILIENCE/Regimes/18) as 12) but with no lat correction" +#rean.dir <- "/scratch/Earth/ncortesi/RESILIENCE/Regimes/18_as_12_but_with_no_lat_correction" +#rean.dir <- "/scratch/Earth/ncortesi/RESILIENCE/Regimes/44_as_43_but_with_no_lat_correction" +#rean.dir <- "/scratch/Earth/ncortesi/RESILIENCE/Regimes/50_NCEP_monthly_1981-2016_LOESS_filter_lat_corr" +#rean.dir <- "/scratch/Earth/ncortesi/RESILIENCE/Regimes/52_JRA55_monthly_1981-2016_LOESS_filter_lat_corr" +rean.dir <- "/scratch/Earth/ncortesi/RESILIENCE/Regimes/56_JRA55_monthly_1981-2016_LOESS_filter_lat_corr_ordered4variance" +rean.dir <- "/scratch/Earth/ncortesi/RESILIENCE/Regimes/EDPR_February_2017" + +forecast.dir <- "/scratch/Earth/ncortesi/RESILIENCE/Regimes/42_ECMWF_S4_monthly_1981-2015_LOESS_filter" + +# available reanalysis for var data: +#ERAint <- '/esnas/reconstructions/ecmwf/eraint/$STORE_FREQ$_mean/$VAR_NAME$_f6h/$VAR_NAME$_$YEAR$$MONTH$.nc' +ERAint <- '/esarchive/old-files/recon_ecmwf_erainterim/$STORE_FREQ$_mean/$VAR_NAME$_f6h/$VAR_NAME$_$YEAR$$MONTH$.nc' +ERAint.name <- "ERA-Interim" + +JRA55 <- '/esnas/recon/jma/jra55/$STORE_FREQ$_mean/$VAR_NAME$_f6h/$VAR_NAME$_$YEAR$$MONTH$.nc' +JRA55.name <- "JRA-55" + +NCEP <- '/esnas/recon/noaa/ncep-reanalysis/$STORE_FREQ$_mean/$VAR_NAME$_f6h/$VAR_NAME$_$YEAR$$MONTH$.nc' +NCEP.name <- "NCEP" + +rean <- JRA55 #JRA55 #ERAint #NCEP # choose one of the two above reanalysis from where to load the input psl data + +# available forecast systems for the psl and var data: +#ECMWF_S4 <- list(path = paste0('/esnas/exp/ECMWF/seasonal/0001/s004/m001/$STORE_FREQ$_mean/$VAR_NAME$_f6h/$VAR_NAME$_$START_DATE$.nc')) +#ECMWF_S4 <- '/esnas/exp/ECMWF/seasonal/0001/s004/m001/$STORE_FREQ$_mean/psl_f6h/psl_$START_DATE$.nc' +#ECMWF_S4 <- '/esnas/exp/ecmwf/system4_m1/$STORE_FREQ$_mean/$VAR_NAME$_f6h/$VAR_NAME$_$START_DATE$.nc' +ECMWF_S4 <- '/esarchive/old-files/exp_ecmwf_system4_m1/$STORE_FREQ$_mean/old/$VAR_NAME$_f6h/$VAR_NAME$_$START_DATE$.nc' # only for daily tas S4 + +ECMWF_S4.name <- "ECMWF-S4" +member.name <- "ensemble" # name of the dimension with the ensemble members inside the netCDF files of S4 +lat.name <- "latitude" +lon.name <- "longitude" + +ECMWF_monthly <- '/esnas/exp/ECMWF/monthly/ensforhc/6hourly/$VAR_NAME$/2014$MONTH$$DAY$00/$VAR_NAME$_$START_DATE$00.nc' +ECMWF_monthly.name <- "ECMWF-Monthly" + +forecast <- ECMWF_S4 + +# set it to 'rean' to load pressure fields and var from reanalisis, or to 'forecast' to load them from forecast systems: +fields <- rean #forecast #rean + +var.num <- 1 # Choose a variable. 1: sfcWind 2: tas +var.name <- c("sfcWind","tas") # name of the 'predictand' variable of the chosen reanalysis +var.name.full <- c("wind Speed","temperature") # full name of the 'predictand' variable to put in the title of the graphs +var.unit <- c("m/s", "ºC") # unit of measure of var (for drawing color scales) + +WR.period <- 2 #1:12 #13:16 # 1-12: Jan-Dec; 13-16: Winter-Spring-Summer-Autumn # For forecasts, it is the start month + +# the following variables have to be the same as in the weather_regimes_vXX.R script that processed the psl data: +year.start <- 1981 #1979 #1981 #1982 #1981 # specify the first year of var data +year.end <- 2017 #2016 #2015 #2013 #2010 # specify the last year of var data + +# Only for seasonal forecasts: +startM <- 3 # start month (1=January, 12=December) [bypassed by the optional arguments of the script] +leadM <- 6 # select only the data of one lead month: [bypassed by the optional arguments of the script] +n.members <- 15 # number of members of the forecast system to load +n.leadtimes <- 216 # number of leadtimes of the forecast system to load +missing.forecasts=FALSE # set it to TRUE if there can be missing hindcasts files in the forecast var data, so it will load only the available years and deals with NA +res <- 0.75 # set the resolution you want to interpolate the var data when loading it (i.e: set to 0.75 to use the same res of ERA-Interim) + # interpolation method is BILINEAR. + +# Coordinates of the box enclosing the study region (the Northern Atlantic in this case): +lat.max <- 81 #81 #80 #70 +lat.min <- 27 #30 #20 #30 +lon.max <- 45 #36 #30 #40 +lon.min <- 274.5 #274.5 #270 #280 # put a positive number here because the geopotential has only positive vaues of longitud! + +############################################################################################################################# +#ECMWF_monthly <- list(path = paste0('/esnas/exp/ECMWF/monthly/ensforhc/6hourly/psl/2014010200/1994_010200.nc')) +#ECMWF_monthly <- list(path = paste0('/esnas/exp/ECMWF/monthly/ensforhc/6hourly/$VAR_NAME$/2014$MONTH$$DAY$00/$VAR_NAME$_$START_DATE$00.nc')) + +rean.name <- unname(ifelse(rean == ERAint, ERAint.name, ifelse(rean == JRA55, JRA55.name, NCEP.name))) +forecast.name <- unname(ifelse(forecast == ECMWF_S4, ECMWF_S4.name, ECMWF_monthly.name)) +fields.name <- unname(ifelse(fields == rean, rean.name, forecast.name)) + +# in case the script is run with one argument, it is assumed a reanalysis is being used and the argument becomes the month we want to perform the clustering; +# in case the script is run with two arguments, it is assumed a forecast is used and the first argument represents the startmonth and the second one the leadmonth. +# in case the script is run with no arguments, the values of the variables inside the script are used: +script.arg <- as.integer(commandArgs(TRUE)) + +# in case the script is run with no arguments: +if(length(script.arg) == 0 && fields.name == forecast.name){ + start.month <- startM + #WR.period <- start.month + lead.month <- leadM +} + +# in case the script is run with 1 argument, it is assumed you are using a reanalysis, so the variable to change is the period: +if(length(script.arg) == 1){ + fields <- rean + fields.name <- rean.name + WR.period <- script.arg[1] +} + +# in case the script is run with 2 arguments, it is assumed you are using forecasts, so the variables to change are the start date and the lead time: +if(length(script.arg) >= 2){ + fields <- forecast + fields.name <- forecast.name + start.month <- script.arg[1] + lead.month <- script.arg[2] + WR.period <- start.month # it becomes just a copy of the start.month +} + +if(length(script.arg) == 3){ # in this case, we are running the script in the SMP machine and we need to override the variables below: + source('/gpfs/projects/bsc32/bsc32842/Rfunctions.R') # for the calendar function + #workdir <- "/gpfs/projects/bsc32/bsc32842/RESILIENCE" # working dir +} + +if(length(script.arg) >= 2) {lead.comment <- paste0("Lead month: ", lead.month)} else {lead.comment <- ""} + +cat(paste0("Field: ", fields.name, " Period: ", WR.period, " ", lead.comment, "\n")) + +#if(fields.name == forecast.name) WR.period = 1 + +#if(length(script.arg) == 3){ # in this case, we are running the script in the SMP machine and we need to override the variables below: +# source('/gpfs/projects/bsc32/bsc32842/Rfunctions.R') # for the calendar function +# workdir <- "/gpfs/projects/bsc32/bsc32842/RESILIENCE" # working dir +#} + +#if(length(script.arg) >= 2) {lead.comment <- paste0("Lead month: ", lead.month)} else {lead.comment <- ""} +#cat(paste0("Field: ", fields.name, " Period: ", WR.period, " ", lead.comment, "\n")) + + +# Create a regular lat/lon grid to interpolate the data to the chosen res: (Notice that the regular grid is always centered at lat=0 and lon=0!) +n.lon.grid <- 360/res +n.lat.grid <- (180/res)+1 +my.grid <- paste0('r',n.lon.grid,'x',n.lat.grid) + +## days.period <- n.days.period <- period.length <- list() +## for (pp in 1:17){ +## days.period[[pp]] <- NA +## for(y in year.start:year.end) days.period[[pp]] <- c(days.period[[pp]], n.days.in.a.future.year(year.start, y) + pos.period(y,pp)) +## days.period[[pp]] <- days.period[[pp]][-1] # remove the NA at the begin that was introduced only to be able to execute the above command +## # number of days belonging to that period from year.start to year.end: +## n.days.period[[pp]] <- length(days.period[[pp]]) +## # Number of days belonging to that period in a single year: +## period.length[[pp]] <- n.days.in.a.period(pp,1999) #+ ifelse(period==13 | period==2, 1, 0) # using year 1999 introduce a small error in winter season length because of bisestile years +## } + + +# Load var data (interpolating it to the same reanalysis grid, if necessary): +cat("Loading data. Please wait...\n") + +# Load var data: +if(fields.name == rean.name){ # Load daily var data in the reanalysis case: + info.period <- WR.period[1] # period used to get the variables n.years, lat.min, lat.max, lon.min y lon.max + WR.period.old <- WR.period + + load(file=paste0(rean.dir,"/",fields.name,"_",my.period[info.period],"_psl.RData")) + if(!identical(WR.period, WR.period.old)) WR.period <- WR.period.old + + n.years <- year.end - year.start + 1 + + #for (y in year.start:year.end){ + # var <- Load(var = var.name[var.num], exp = NULL, obs = list(fields), paste0(y,'0101'), storefreq = 'daily', leadtimemax = n.days.in.a.year(y), output = 'lonlat', + # latmin = lat.min, latmax = lat.max, lonmin = lon.min, lonmax = lon.max) + # vareuFull[seq.days.in.a.future.year(year.start, y),,] <- var$obs + # rm(var) + # gc() + #} + + vareuFull366 <- Load(var = var.name[var.num], exp = NULL, obs = list(list(path=fields)), paste0(year.start:year.end,'0101'), storefreq = 'daily', leadtimemax = 366, output = 'lonlat', latmin = lat.min, latmax = lat.max, lonmin = lon.min, lonmax = lon.max, nprocs=8)$obs + + vareuFull <-array(NA,c(1,1,n.years,365, dim(vareuFull366)[5], dim(vareuFull366)[6])) + + # remove bisestile days to have all arrays with the same dimensions: + cat("Removing bisestile days. Please wait...\n") + vareuFull[,,,,,] <- vareuFull366[,,,1:365,,] + + for(y in year.start:year.end){ + y <- y - year.start + 1 + if(n.days.in.a.year(y) == 366) vareuFull[,,y,60:365,,] <- vareuFull366[,,y,61:366,,] # take the march to december period removing the 29th of February + } + + rm(vareuFull366) + gc() + + # convert psl in daily anomalies with the LOESS filter: + cat("Calculating anomalies. Please wait...\n") + varPeriodClim <- apply(vareuFull, c(1,2,4,5,6), mean, na.rm=T) + + varPeriodClimLoess <- varPeriodClim + for(i in n.pos.lat){ + for(j in n.pos.lon){ + my.data <- data.frame(ens.mean=varPeriodClim[1,1,,i,j], day=1:365) + my.loess <- loess(ens.mean ~ day, my.data, span=0.35) + varPeriodClimLoess[1,1,,i,j] <- predict(my.loess) + } + } + + rm(varPeriodClim) + gc() + + varPeriodClim2 <- InsertDim(varPeriodClimLoess, 3, n.years) + + ## s4.data <- data.frame(s4=pslPeriodClim[1,], day=1:216) + ## s4.loess <- loess(s4 ~ day, s4.data, span=0.35) + ## s4.pred <- predict(s4.loess) + + vareuFull <- vareuFull - varPeriodClim2 + #rm(varPeriodClim2) + gc() + + for(p in WR.period){ + #p=1 # for the debug + + ## load regime data for chosen month: + varPeriod <- vareuFull[1,1,,pos.period(1,p),,] # select only var data during the chosen period + varPeriodClim <- varPeriodClim2[1,1,,pos.period(1,p),,] + varPeriodRel <- varPeriod / varPeriodClim + + load(file=paste0(rean.dir,"/",fields.name,"_",my.period[p],"_psl.RData")) + + cluster.sequence <- my.cluster$cluster # old syntax: my.cluster[[p]]$cluster + + seasonal.data <- ifelse(length(my.cluster$cluster)/n.years > 33, TRUE, FALSE) # if there are more than 33 days, it means that we are loading sequences of 3 months + if(seasonal.data == TRUE && p < 13){ # select only the days of the 3-months cluster series inside the target month p + n.days.period <- length(pos.month.extended(2001,p)) # total number of days in the three months + + days.month <- days.month.new <- days.month.full <- c() + days.month <- which(pos.month.extended(2001,p) == pos.month(2001,p)[1])-1 + 1:length(pos.month(2001,p)) + + for(y in 1:n.years){ + days.month.new <- days.month + n.days.period*(y-1) + days.month.full <- c(days.month.full, days.month.new) + #print(days.month.new) + #print(days.month) + } + + cluster.sequence <- my.cluster$cluster[days.month.full] # select only the days of the cluster series inside the target month p + + } + + wr1 <- which(cluster.sequence == 1) + wr2 <- which(cluster.sequence == 2) + wr3 <- which(cluster.sequence == 3) + wr4 <- which(cluster.sequence == 4) + + gc() + + cat("Formatting data. Please wait...\n") + var.melted <- melt(varPeriod[,,,, drop=FALSE], varnames=c("Year","Day","Lat","Lon")) + var.meltedRel <- melt(varPeriodRel[,,,, drop=FALSE], varnames=c("Year","Day","Lat","Lon")) + gc() + + cat("Formatting data. Please wait......\n") + varmat <- unname(acast(var.melted, Year + Day ~ Lat ~ Lon)) + varmatRel <- unname(acast(var.meltedRel, Year + Day ~ Lat ~ Lon)) + + varwr1 <- varmat[wr1,,,drop=F] + varwr2 <- varmat[wr2,,,drop=F] + varwr3 <- varmat[wr3,,,drop=F] + varwr4 <- varmat[wr4,,,drop=F] + + varwr1Rel <- varmatRel[wr1,,,drop=F] + varwr2Rel <- varmatRel[wr2,,,drop=F] + varwr3Rel <- varmatRel[wr3,,,drop=F] + varwr4Rel <- varmatRel[wr4,,,drop=F] + + + varwr1mean <- apply(varwr1,c(2,3),mean,na.rm=T) + varwr2mean <- apply(varwr2,c(2,3),mean,na.rm=T) + varwr3mean <- apply(varwr3,c(2,3),mean,na.rm=T) + varwr4mean <- apply(varwr4,c(2,3),mean,na.rm=T) + + varwr1meanRel <- apply(varwr1Rel,c(2,3),mean,na.rm=T) + varwr2meanRel <- apply(varwr2Rel,c(2,3),mean,na.rm=T) + varwr3meanRel <- apply(varwr3Rel,c(2,3),mean,na.rm=T) + varwr4meanRel <- apply(varwr4Rel,c(2,3),mean,na.rm=T) + + n.datos <- n.years * n.days.in.a.period(p,2001) + + varwrBoth1 <- abind(varmat, varwr1, along = 1) + pvalue1 <- apply(varwrBoth1, c(2,3), function(x) t.test(x[1:n.datos],x[(n.datos+1):length(x)])$p.value) + rm(varwrBoth1, varwr1) + gc() + + varwrBoth2 <- abind(varmat, varwr2, along = 1) + pvalue2 <- apply(varwrBoth2, c(2,3), function(x) t.test(x[1:n.datos],x[(n.datos+1):length(x)])$p.value) + rm(varwrBoth2, varwr2) + gc() + + varwrBoth3 <- abind(varmat, varwr3, along = 1) + pvalue3 <- apply(varwrBoth3, c(2,3), function(x) t.test(x[1:n.datos],x[(n.datos+1):length(x)])$p.value) + rm(varwrBoth3, varwr3) + gc() + + varwrBoth4 <- abind(varmat, varwr4, along = 1) + pvalue4 <- apply(varwrBoth4, c(2,3), function(x) t.test(x[1:n.datos],x[(n.datos+1):length(x)])$p.value) + rm(varwrBoth4, varwr4) + + rm(varmat, varmatRel) + gc() + + #EU <- c(1:which(lon >= lon.max)[1], (which(lon >= 337)[1]:length(lon))) # restrict area to continental europe only + #my.brks.var <- seq(-3,3,0.5) + #my.cols.var <- colorRampPalette(rev(brewer.pal(11,"RdBu")))(length(my.brks.var)-1) # blue--white--red colors + #PlotEquiMap2(rescale(varwr3mean[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, continents.col="black") + + # save all the data necessary to redraw the graphs when we know the right regime: + save(varwr1mean, varwr2mean, varwr3mean,varwr4mean, pvalue1, pvalue2, pvalue3, pvalue4, varwr1meanRel, varwr2meanRel, varwr3meanRel,varwr4meanRel, file=paste0(rean.dir,"/",fields.name,"_",my.period[p],"_",var.name[var.num],".RData")) + rm(pvalue1,pvalue2,pvalue3,pvalue4) + rm(varwr1mean,varwr2mean,varwr3mean,varwr4mean) + gc() + + } # close p on WR.period + +} + + + +if(fields.name == ECMWF_S4.name){ # for forecasts, we can load only the data for 1 month at time (since each month needs ~20 GB of memory to load all its leadtimes) + for(p in WR.period){ + #p=1 # for the debug + + start.month <- p + if(start.month <= 9) {start.month.char <- paste0("0",as.character(start.month))} else {start.month.char <- start.month} + + fields2 <- gsub("\\$STORE_FREQ\\$","daily",fields) + fields3 <- gsub("\\$VAR_NAME\\$",var.name[var.num],fields2) + + years <- c() + + for(y in year.start:year.end){ + fields4 <- gsub("\\$START_DATE\\$",paste0(y, start.month.char,'01'),fields3) + + if(file.exists(fields4)){ + # beware, in this way it can only take into account leadtimes from 100 to 999, but it is the only way in the SMP machine: + #char.lead <- nchar(system(paste0("ncks -m ",fields4,"| grep -E -i 'psl size' | cut -d '*' -f1"), intern=T)) + #num.lead <- as.integer(substr(system(paste0("ncks -m ",fields4,"| grep -E -i 'psl size' | cut -d '*' -f1"), intern=T),char.lead-3,char.lead)) + num.lead <- as.integer(system(paste0("ncks -m ",fields4,"| grep -E -i 'time dimension' | cut -d '=' -f2 | cut -d ' ' -f2"), intern=T)) #don't work well on the SMP + #num.memb <- as.integer(system(paste0("ncks -m ",fields4,"| grep -E -i 'psl size' | cut -d '=' -f2 | cut -d '*' -f2"), intern=T)) + + if(fields == forecast && forecast == ECMWF_S4 && var.name[var.num] == "sfcWind") { # sfcWind daily data of S4 has a different formatting: + num.memb <- as.integer(system(paste0("ncks -m ",fields4,"| grep -E -i '0: '", member.name,"', size' | cut -d '=' -f2 | cut -d ',' -f1"), intern=T)) + } else { + num.memb <- as.integer(system(paste0("ncks -m ",fields4,"| grep -E -i '0: '", member.name,"', size' | cut -d '=' -f2 | cut -d ' ' -f2"), intern=T)) } + + #num.lat <- as.integer(system(paste0("ncks -m ",fields4,"| grep -E -i 'psl size' | cut -d '=' -f2 | cut -d '*' -f3"), intern=T)) + num.lat1 <- as.integer(system(paste0("ncks -m ",fields4,"| grep -E -i 'latitude dimension' | cut -d '=' -f2 | cut -d ' ' -f2"), intern=T)) + num.lat2 <- as.integer(system(paste0("ncks -m ",fields4,"| grep -E -i 'lat dimension' | cut -d '=' -f2 | cut -d ' ' -f2"), intern=T)) + num.lat <- ifelse(length(num.lat2) == 0, num.lat1, num.lat2) + + #num.lon <- as.integer(system(paste0("ncks -m ",fields4,"| grep -E -i 'psl size' | cut -d '=' -f2 | cut -d '*' -f4"), intern=T)) + num.lon1 <- as.integer(system(paste0("ncks -m ",fields4,"| grep -E -i 'longitude dimension' | cut -d '=' -f2 | cut -d ' ' -f2"), intern=T)) + num.lon2 <- as.integer(system(paste0("ncks -m ",fields4,"| grep -E -i 'lon dimension' | cut -d '=' -f2 | cut -d ' ' -f2"), intern=T)) + num.lon <- ifelse(length(num.lon2) == 0, num.lon1, num.lon2) + + num.var <- system(paste0("ncks -m ",fields4,"| grep -E -i '", var.name[var.num], "'"), intern=T) + + if(length(num.lead) != 0 && length(num.memb) != 0 && length(num.lat) != 0 && length(num.lon) != 0 && length(num.var) != 0){ + if(num.lead == n.leadtimes && num.memb >= n.members) years <- c(years,y) + } + } + } + + n.years.full <- length(years) # years really available for that month + print(paste0(n.years.full, " available years for ", my.period[p])) + + vareuFull <- Load(var = var.name[var.num], exp = list(list(path=fields)), obs = NULL, sdates=paste0(years, start.month.char,'01'), dimnames=list(lon=lon.name, lat=lat.name, member=member.name), nmember=n.members, leadtimemax=n.leadtimes, storefreq='daily', output = 'lonlat', latmin = lat.min, latmax = lat.max, lonmin = lon.min, lonmax = lon.max, grid=my.grid, method='bilinear', nprocs=1)$mod + + # convert var in daily anomalies with the LOESS filter: + cat("Calculating anomalies. Please wait...\n") + varPeriodClim <- apply(vareuFull, c(1,4,5,6), mean, na.rm=T) + varPeriodClimLoess <- varPeriodClim + + n.pos.lat <- unname(dim(varPeriodClim)[3]) + n.pos.lon <- unname(dim(varPeriodClim)[4]) + + for(i in n.pos.lat){ + for(j in n.pos.lon){ + my.data <- data.frame(ens.mean=varPeriodClim[1,,i,j], day=1:n.leadtimes) + my.loess <- loess(ens.mean ~ day, my.data, span=0.35) + varPeriodClimLoess[1,,i,j] <- predict(my.loess) + } + } + + rm(varPeriodClim) + gc() + + varPeriodClim2 <- InsertDim(InsertDim(varPeriodClimLoess, 2, n.years.full), 2, n.members) + + ## s4.data <- data.frame(s4=pslPeriodClim[1,], day=1:216) + ## s4.loess <- loess(s4 ~ day, s4.data, span=0.35) + ## s4.pred <- predict(s4.loess) + + vareuFull <- vareuFull - varPeriodClim2 + rm(varPeriodClim2) + gc() + + #if(psl=="g500") psleuFull <- psleuFull/9.81 # convert geopotential to geopotential height (in m) + #if(psl=="psl") psleuFull <- psleuFull/100 # convert MSLP in Pascal to MSLP in hPa + gc() + + ###### impact maps: + + load.month <- p + lead.month + if(load.month > 12) load.month <- load.month - 12 + + # select only var data during the chosen leadtime, and removing bisestile days! + varPeriod <- array(NA,c(dim(vareuFull)[1:3],length(pos.period(2001,load.month)),dim(vareuFull)[5:6])) + + i <- 1 + for(y in years){ + #y.mod <- y + #if((p + lead.month) > 12) y.mod <- y + 1 + #pos.per <- pos.period(y.mod, load.month) - ifelse(p == 1, 0, length(c(pos.months.before(y, p-1), pos.period(y, p-1)))) + + if(lead.month == 0) pos.per <- 1:ndm(p,y) + if(lead.month == 1) pos.per <- ndm(p,y) + 1:ndm(p + 1,y) + if(lead.month == 2) pos.per <- ndm(p,y) + ndm(p + 1,y) + 1:ndm(p + 2,y) + if(lead.month == 3) pos.per <- ndm(p,y) + ndm(p + 1,y) + ndm(p + 2,y) + 1:ndm(p + 3,y) + if(lead.month == 4) pos.per <- ndm(p,y) + ndm(p + 1,y) + ndm(p + 2,y) + ndm(p + 3,y) + 1:ndm(p + 4,y) + if(lead.month == 5) pos.per <- ndm(p,y) + ndm(p + 1,y) + ndm(p + 2,y) + ndm(p + 3,y) + ndm(p + 4,y) + 1:ndm(p + 5,y) + if(lead.month == 6) pos.per <- ndm(p,y) + ndm(p + 1,y) + ndm(p + 2,y) + ndm(p + 3,y) + ndm(p + 4,y) + ndm(p + 5,y) + 1:ndm(p + 6,y) + + if(length(pos.per) == 29) { + #febmod = length(pos.period(y.mod,load.month)) + varPeriod[1,,i,,,] <- vareuFull[1,,i,pos.per[-29],,] + } else { + varPeriod[1,,i,,,] <- vareuFull[1,,i,pos.per,,] + } + + i <- i+1 + } + + gc() + + years.var <- years + load(file=paste0(forecast.dir,"/",fields.name,"_",my.period[p],"_leadmonth_",lead.month,"_psl.RData")) # load the cluster sequence for a chosen startdate and leadtime + #cluster.sequence <- my.cluster[[p]]$cluster #as.vector(my.cluster.array[[p]]) # no bisestile days are left, only the years can be different! + if(length(years.var) < length(years)) { + print("Warning: psl years are more than var years. Removing years not in common..") + ss <- which(is.na(match(years,years.var))) # remove psl years not present in var data + my.cluster.array2 <- my.cluster.array[[p]][,-ss,] # (no bisestile days are left in my.cluster.array) + } else { + my.cluster.array2 <- my.cluster.array[[p]] # (no bisestile days are left in my.cluster.array) + } + + wr1 <- which(my.cluster.array2 == 1, arr.ind=T) + wr2 <- which(my.cluster.array2 == 2, arr.ind=T) + wr3 <- which(my.cluster.array2 == 3, arr.ind=T) + wr4 <- which(my.cluster.array2 == 4, arr.ind=T) + + var1 <- var2 <- var3 <- var4 <- array(NA,c(dim(varPeriod))) + + for (i in 1:dim(wr1)[1]) var1[1, wr1[i,3], wr1[i,2], wr1[i,1],,] <- varPeriod[1, wr1[i,3], wr1[i,2], wr1[i,1],,] + for (i in 1:dim(wr2)[1]) var2[1, wr2[i,3], wr2[i,2], wr2[i,1],,] <- varPeriod[1, wr2[i,3], wr2[i,2], wr2[i,1],,] + for (i in 1:dim(wr3)[1]) var3[1, wr3[i,3], wr3[i,2], wr3[i,1],,] <- varPeriod[1, wr3[i,3], wr3[i,2], wr3[i,1],,] + for (i in 1:dim(wr4)[1]) var4[1, wr4[i,3], wr4[i,2], wr4[i,1],,] <- varPeriod[1, wr4[i,3], wr4[i,2], wr4[i,1],,] + + varwr1mean <- apply(var1, c(5,6), mean, na.rm=T) + varwr2mean <- apply(var2, c(5,6), mean, na.rm=T) + varwr3mean <- apply(var3, c(5,6), mean, na.rm=T) + varwr4mean <- apply(var4, c(5,6), mean, na.rm=T) + gc() + + n.datos <- n.years.full * n.days.in.a.period(p,2001) + + varwrBoth1 <- abind(varPeriod, var1, along = 1) + pvalue1 <- apply(varwrBoth1, c(5,6), function(x) t.test(x[1:n.datos],x[(n.datos+1):length(x)])$p.value) + rm(varwrBoth1) + gc() + + varwrBoth2 <- abind(varPeriod, var2, along = 1) + pvalue2 <- apply(varwrBoth2, c(5,6), function(x) t.test(x[1:n.datos],x[(n.datos+1):length(x)])$p.value) + rm(varwrBoth2) + gc() + + varwrBoth3 <- abind(varPeriod, var3, along = 1) + pvalue3 <- apply(varwrBoth3, c(5,6), function(x) t.test(x[1:n.datos],x[(n.datos+1):length(x)])$p.value) + rm(varwrBoth3) + gc() + + varwrBoth4 <- abind(varPeriod, var4, along = 1) + pvalue4 <- apply(varwrBoth4, c(5,6), function(x) t.test(x[1:n.datos],x[(n.datos+1):length(x)])$p.value) + rm(varwrBoth4) + + # save all the data necessary to redraw the graphs when we know the right regime: + save(varwr1mean, varwr2mean, varwr3mean, varwr4mean, pvalue1, pvalue2, pvalue3, pvalue4, file=paste0(forecast.dir,"/",fields.name,"_",my.period[p],"_leadmonth_",lead.month,"_",var.name[var.num],".RData")) + + rm(varwr1mean,varwr2mean,varwr3mean,varwr4mean) + gc() + + } # close p on period +} # close if on data type + + + + +if(fields.name == ECMWF_monthly.name){ + # Load 6-hourly psl data in the forecast case: + psleuFull <-array(NA, c(n.sdates*n.years, n.leadtimes, n.pos.lat, n.pos.lon)) # daily order: 19940102 19950102 ... 20130102 19940109 19950109 ... 20130109 ... + n.leadtimes <- length(leadtime) + sdates <- weekly.seq(forecast.year,mes,day) + n.sdates <- length(sdates) + + for (startdate in 1:n.sdates){ + for(lday in leadtime){ + var <- Load(var = psl, exp = NULL, obs = list(list(path=fields)), paste0(year.start:year.end, substr(sdates[startdate],5,6), substr(sdates[startdate],7,8) ), storefreq = 'daily', leadtimemin = lday*4-3, leadtimemax = lday*4, output = 'lonlat', latmin = lat.min, latmax = lat.max, lonmin = lon.min, lonmax = lon.max, nprocs=1) + + mean.lday <- apply(var$obs, c(3,5,6), mean, na.rm=T) + rm(var) + gc() + + psleuFull[(1+(startdate-1)*n.years):(startdate*n.years), lday-leadtime[1]+1,,] <- mean.lday + rm(mean.lday) + gc() + } + } + } + + + +cat("Finished!\n") + diff --git a/weather_regimes_impact_v4.R~ b/weather_regimes_impact_v4.R~ new file mode 100644 index 0000000000000000000000000000000000000000..99fe0d93de28997b910b7b1cd78bc92c1fcb5530 --- /dev/null +++ b/weather_regimes_impact_v4.R~ @@ -0,0 +1,541 @@ + +# Creation: 6/2016 +# Author: Nicola Cortesi +# Aim: Measure the impact of Weather Regimes of a chosen variable, from a reanalysis of from a forecast systems. +# +# I/O: input are all the various "*_psl.RData" created by the script 'weather_regimes.R' +# its output are "*_.RData" files which are need by the script weather_regimes_maps.R +# You can also run this script from terminal with: Rscript ~/scripts/weather_regimes_impact.R +# +# Assumption: all input/output files are located in the 'workdir' folder. Note that this script should not be run with many parallel jobs since it spends most of its time +# in loading data, so it's better to take advantage of the parallel loading feature of Load() to run the script once computing the monthly analysis in a sequential way +# +# Branch: weather_regimes + +rm(list=ls()) +library(s2dverification) # for the function Load() +library(abind) +library(reshape2) # after each undate of s2dverification, it's better to remove and install this package again because sometimes it gets broken! +#library(TTR) +source('/home/Earth/ncortesi/scripts/Rfunctions.R') # for the calendar functions +#workdir <- "/scratch/Earth/ncortesi/RESILIENCE/Regimes" # working dir +# module load NCO # : load it from the terminal before running this script interactively in the SMP machine, if you didn't load it in the .bashrc + +#rean.dir <- "/scratch/Earth/ncortesi/RESILIENCE/Regimes/41_ERA-Interim_monthly_1981-2015_LOESS_filter" +#ean.dir <- "/scratch/Earth/ncortesi/RESILIENCE/Regimes/18) as 12) but with no lat correction" +#rean.dir <- "/scratch/Earth/ncortesi/RESILIENCE/Regimes/18_as_12_but_with_no_lat_correction" +#rean.dir <- "/scratch/Earth/ncortesi/RESILIENCE/Regimes/44_as_43_but_with_no_lat_correction" +#rean.dir <- "/scratch/Earth/ncortesi/RESILIENCE/Regimes/50_NCEP_monthly_1981-2016_LOESS_filter_lat_corr" +#rean.dir <- "/scratch/Earth/ncortesi/RESILIENCE/Regimes/52_JRA55_monthly_1981-2016_LOESS_filter_lat_corr" +rean.dir <- "/scratch/Earth/ncortesi/RESILIENCE/Regimes/56_JRA55_monthly_1981-2016_LOESS_filter_lat_corr_ordered4variance" + +forecast.dir <- "/scratch/Earth/ncortesi/RESILIENCE/Regimes/42_ECMWF_S4_monthly_1981-2015_LOESS_filter" + +# available reanalysis for var data: +#ERAint <- '/esnas/reconstructions/ecmwf/eraint/$STORE_FREQ$_mean/$VAR_NAME$_f6h/$VAR_NAME$_$YEAR$$MONTH$.nc' +ERAint <- '/esarchive/old-files/recon_ecmwf_erainterim/$STORE_FREQ$_mean/$VAR_NAME$_f6h/$VAR_NAME$_$YEAR$$MONTH$.nc' +ERAint.name <- "ERA-Interim" + +JRA55 <- '/esnas/recon/jma/jra55/$STORE_FREQ$_mean/$VAR_NAME$_f6h/$VAR_NAME$_$YEAR$$MONTH$.nc' +JRA55.name <- "JRA-55" + +NCEP <- '/esnas/recon/noaa/ncep-reanalysis/$STORE_FREQ$_mean/$VAR_NAME$_f6h/$VAR_NAME$_$YEAR$$MONTH$.nc' +NCEP.name <- "NCEP" + +rean <- JRA55 #JRA55 #ERAint #NCEP # choose one of the two above reanalysis from where to load the input psl data + +# available forecast systems for the psl and var data: +#ECMWF_S4 <- list(path = paste0('/esnas/exp/ECMWF/seasonal/0001/s004/m001/$STORE_FREQ$_mean/$VAR_NAME$_f6h/$VAR_NAME$_$START_DATE$.nc')) +#ECMWF_S4 <- '/esnas/exp/ECMWF/seasonal/0001/s004/m001/$STORE_FREQ$_mean/psl_f6h/psl_$START_DATE$.nc' +#ECMWF_S4 <- '/esnas/exp/ecmwf/system4_m1/$STORE_FREQ$_mean/$VAR_NAME$_f6h/$VAR_NAME$_$START_DATE$.nc' +ECMWF_S4 <- '/esarchive/old-files/exp_ecmwf_system4_m1/$STORE_FREQ$_mean/old/$VAR_NAME$_f6h/$VAR_NAME$_$START_DATE$.nc' # only for daily tas S4 + +ECMWF_S4.name <- "ECMWF-S4" +member.name <- "ensemble" # name of the dimension with the ensemble members inside the netCDF files of S4 +lat.name <- "latitude" +lon.name <- "longitude" + +ECMWF_monthly <- '/esnas/exp/ECMWF/monthly/ensforhc/6hourly/$VAR_NAME$/2014$MONTH$$DAY$00/$VAR_NAME$_$START_DATE$00.nc' +ECMWF_monthly.name <- "ECMWF-Monthly" + +forecast <- ECMWF_S4 + +# set it to 'rean' to load pressure fields and var from reanalisis, or to 'forecast' to load them from forecast systems: +fields <- rean #forecast #rean + +var.num <- 1 # Choose a variable. 1: sfcWind 2: tas +var.name <- c("sfcWind","tas") # name of the 'predictand' variable of the chosen reanalysis +var.name.full <- c("wind Speed","temperature") # full name of the 'predictand' variable to put in the title of the graphs +var.unit <- c("m/s", "ºC") # unit of measure of var (for drawing color scales) + +WR.period <- 1:12 #13:16 # 1-12: Jan-Dec; 13-16: Winter-Spring-Summer-Autumn # For forecasts, it is the start month + +# the following 8 variables have to be the same as in the weather_regimes_vXX.R script that processed the psl data: +year.start <- 1981 #1979 #1981 #1982 #1981 # specify the first year of var data +year.end <- 2016 #2013 #2015 #2013 #2010 # specify the last year of var data + +# Only for seasonal forecasts: +startM <- 3 # start month (1=January, 12=December) [bypassed by the optional arguments of the script] +leadM <- 6 # select only the data of one lead month: [bypassed by the optional arguments of the script] +n.members <- 15 # number of members of the forecast system to load +n.leadtimes <- 216 # number of leadtimes of the forecast system to load +missing.forecasts=FALSE # set it to TRUE if there can be missing hindcasts files in the forecast var data, so it will load only the available years and deals with NA +res <- 0.75 # set the resolution you want to interpolate the var data when loading it (i.e: set to 0.75 to use the same res of ERA-Interim) + # interpolation method is BILINEAR. + +# Coordinates of the box enclosing the study region (the Northern Atlantic in this case): +lat.max <- 81 #81 #80 #70 +lat.min <- 27 #30 #20 #30 +lon.max <- 45 #36 #30 #40 +lon.min <- 274.5 #274.5 #270 #280 # put a positive number here because the geopotential has only positive vaues of longitud! + +############################################################################################################################# +#ECMWF_monthly <- list(path = paste0('/esnas/exp/ECMWF/monthly/ensforhc/6hourly/psl/2014010200/1994_010200.nc')) +#ECMWF_monthly <- list(path = paste0('/esnas/exp/ECMWF/monthly/ensforhc/6hourly/$VAR_NAME$/2014$MONTH$$DAY$00/$VAR_NAME$_$START_DATE$00.nc')) + +rean.name <- unname(ifelse(rean == ERAint, ERAint.name, ifelse(rean == JRA55, JRA55.name, NCEP.name))) +forecast.name <- unname(ifelse(forecast == ECMWF_S4, ECMWF_S4.name, ECMWF_monthly.name)) +fields.name <- unname(ifelse(fields == rean, rean.name, forecast.name)) + +# in case the script is run with one argument, it is assumed a reanalysis is being used and the argument becomes the month we want to perform the clustering; +# in case the script is run with two arguments, it is assumed a forecast is used and the first argument represents the startmonth and the second one the leadmonth. +# in case the script is run with no arguments, the values of the variables inside the script are used: +script.arg <- as.integer(commandArgs(TRUE)) + +# in case the script is run with no arguments: +if(length(script.arg) == 0 && fields.name == forecast.name){ + start.month <- startM + #WR.period <- start.month + lead.month <- leadM +} + +# in case the script is run with 1 argument, it is assumed you are using a reanalysis, so the variable to change is the period: +if(length(script.arg) == 1){ + fields <- rean + fields.name <- rean.name + WR.period <- script.arg[1] +} + +# in case the script is run with 2 arguments, it is assumed you are using forecasts, so the variables to change are the start date and the lead time: +if(length(script.arg) >= 2){ + fields <- forecast + fields.name <- forecast.name + start.month <- script.arg[1] + lead.month <- script.arg[2] + WR.period <- start.month # it becomes just a copy of the start.month +} + +if(length(script.arg) == 3){ # in this case, we are running the script in the SMP machine and we need to override the variables below: + source('/gpfs/projects/bsc32/bsc32842/Rfunctions.R') # for the calendar function + #workdir <- "/gpfs/projects/bsc32/bsc32842/RESILIENCE" # working dir +} + +if(length(script.arg) >= 2) {lead.comment <- paste0("Lead month: ", lead.month)} else {lead.comment <- ""} + +cat(paste0("Field: ", fields.name, " Period: ", WR.period, " ", lead.comment, "\n")) + +#if(fields.name == forecast.name) WR.period = 1 + +#if(length(script.arg) == 3){ # in this case, we are running the script in the SMP machine and we need to override the variables below: +# source('/gpfs/projects/bsc32/bsc32842/Rfunctions.R') # for the calendar function +# workdir <- "/gpfs/projects/bsc32/bsc32842/RESILIENCE" # working dir +#} + +#if(length(script.arg) >= 2) {lead.comment <- paste0("Lead month: ", lead.month)} else {lead.comment <- ""} +#cat(paste0("Field: ", fields.name, " Period: ", WR.period, " ", lead.comment, "\n")) + + +# Create a regular lat/lon grid to interpolate the data to the chosen res: (Notice that the regular grid is always centered at lat=0 and lon=0!) +n.lon.grid <- 360/res +n.lat.grid <- (180/res)+1 +my.grid <- paste0('r',n.lon.grid,'x',n.lat.grid) + +## days.period <- n.days.period <- period.length <- list() +## for (pp in 1:17){ +## days.period[[pp]] <- NA +## for(y in year.start:year.end) days.period[[pp]] <- c(days.period[[pp]], n.days.in.a.future.year(year.start, y) + pos.period(y,pp)) +## days.period[[pp]] <- days.period[[pp]][-1] # remove the NA at the begin that was introduced only to be able to execute the above command +## # number of days belonging to that period from year.start to year.end: +## n.days.period[[pp]] <- length(days.period[[pp]]) +## # Number of days belonging to that period in a single year: +## period.length[[pp]] <- n.days.in.a.period(pp,1999) #+ ifelse(period==13 | period==2, 1, 0) # using year 1999 introduce a small error in winter season length because of bisestile years +## } + + +# Load var data (interpolating it to the same reanalysis grid, if necessary): +cat("Loading data. Please wait...\n") + +# Load var data: +if(fields.name == rean.name){ # Load daily var data in the reanalysis case: + info.period <- WR.period[1] # period used to get the variables n.years, lat.min, lat.max, lon.min y lon.max + WR.period.old <- WR.period + + load(file=paste0(rean.dir,"/",fields.name,"_",my.period[info.period],"_psl.RData")) + if(!identical(WR.period, WR.period.old)) WR.period <- WR.period.old + + n.years <- year.end - year.start + 1 + + #for (y in year.start:year.end){ + # var <- Load(var = var.name[var.num], exp = NULL, obs = list(fields), paste0(y,'0101'), storefreq = 'daily', leadtimemax = n.days.in.a.year(y), output = 'lonlat', + # latmin = lat.min, latmax = lat.max, lonmin = lon.min, lonmax = lon.max) + # vareuFull[seq.days.in.a.future.year(year.start, y),,] <- var$obs + # rm(var) + # gc() + #} + + vareuFull366 <- Load(var = var.name[var.num], exp = NULL, obs = list(list(path=fields)), paste0(year.start:year.end,'0101'), storefreq = 'daily', leadtimemax = 366, output = 'lonlat', latmin = lat.min, latmax = lat.max, lonmin = lon.min, lonmax = lon.max, nprocs=8)$obs + + vareuFull <-array(NA,c(1,1,n.years,365, dim(vareuFull366)[5], dim(vareuFull366)[6])) + + # remove bisestile days to have all arrays with the same dimensions: + cat("Removing bisestile days. Please wait...\n") + vareuFull[,,,,,] <- vareuFull366[,,,1:365,,] + + for(y in year.start:year.end){ + y <- y - year.start + 1 + if(n.days.in.a.year(y) == 366) vareuFull[,,y,60:365,,] <- vareuFull366[,,y,61:366,,] # take the march to december period removing the 29th of February + } + + rm(vareuFull366) + gc() + + # convert psl in daily anomalies with the LOESS filter: + cat("Calculating anomalies. Please wait...\n") + varPeriodClim <- apply(vareuFull, c(1,2,4,5,6), mean, na.rm=T) + + varPeriodClimLoess <- varPeriodClim + for(i in n.pos.lat){ + for(j in n.pos.lon){ + my.data <- data.frame(ens.mean=varPeriodClim[1,1,,i,j], day=1:365) + my.loess <- loess(ens.mean ~ day, my.data, span=0.35) + varPeriodClimLoess[1,1,,i,j] <- predict(my.loess) + } + } + + rm(varPeriodClim) + gc() + + varPeriodClim2 <- InsertDim(varPeriodClimLoess, 3, n.years) + + ## s4.data <- data.frame(s4=pslPeriodClim[1,], day=1:216) + ## s4.loess <- loess(s4 ~ day, s4.data, span=0.35) + ## s4.pred <- predict(s4.loess) + + vareuFull <- vareuFull - varPeriodClim2 + #rm(varPeriodClim2) + gc() + + for(p in WR.period){ + #p=1 # for the debug + + ## load regime data for chosen month: + varPeriod <- vareuFull[1,1,,pos.period(1,p),,] # select only var data during the chosen period + varPeriodClim <- varPeriodClim2[1,1,,pos.period(1,p),,] + varPeriodRel <- varPeriod / varPeriodClim + + load(file=paste0(rean.dir,"/",fields.name,"_",my.period[p],"_psl.RData")) + + seasonal.data <- ifelse(length(my.cluster$cluster)/n.years > 33, TRUE, FALSE) # if there are more than 33 days, it means that we are loading sequences of 3 months + + if(seasonal.data == TRUE && p < 13){ # select only the days of the 3-months cluster series inside the target month p + n.days.period <- length(pos.month.extended(2001,p)) # total number of days in the three months + + days.month <- days.month.new <- days.month.full <- c() + days.month <- which(pos.month.extended(2001,p) == pos.month(2001,p)[1])-1 + 1:length(pos.month(2001,p)) + + for(y in 1:n.years){ + days.month.new <- days.month + n.days.period*(y-1) + days.month.full <- c(days.month.full, days.month.new) + #print(days.month.new) + #print(days.month) + } + + cluster.sequence <- my.cluster$cluster[days.month.full] # select only the days of the cluster series inside the target month p + + } else { + cluster.sequence <- my.cluster$cluster # old syntax: my.cluster[[p]]$cluster + } + + wr1 <- which(cluster.sequence == 1) + wr2 <- which(cluster.sequence == 2) + wr3 <- which(cluster.sequence == 3) + wr4 <- which(cluster.sequence == 4) + + gc() + + cat("Formatting data. Please wait...\n") + var.melted <- melt(varPeriod[,,,, drop=FALSE], varnames=c("Year","Day","Lat","Lon")) + var.meltedRel <- melt(varPeriodRel[,,,, drop=FALSE], varnames=c("Year","Day","Lat","Lon")) + gc() + + cat("Formatting data. Please wait......\n") + varmat <- unname(acast(var.melted, Year + Day ~ Lat ~ Lon)) + varmatRel <- unname(acast(var.meltedRel, Year + Day ~ Lat ~ Lon)) + + varwr1 <- varmat[wr1,,,drop=F] + varwr2 <- varmat[wr2,,,drop=F] + varwr3 <- varmat[wr3,,,drop=F] + varwr4 <- varmat[wr4,,,drop=F] + + varwr1Rel <- varmatRel[wr1,,,drop=F] + varwr2Rel <- varmatRel[wr2,,,drop=F] + varwr3Rel <- varmatRel[wr3,,,drop=F] + varwr4Rel <- varmatRel[wr4,,,drop=F] + + + varwr1mean <- apply(varwr1,c(2,3),mean,na.rm=T) + varwr2mean <- apply(varwr2,c(2,3),mean,na.rm=T) + varwr3mean <- apply(varwr3,c(2,3),mean,na.rm=T) + varwr4mean <- apply(varwr4,c(2,3),mean,na.rm=T) + + varwr1meanRel <- apply(varwr1Rel,c(2,3),mean,na.rm=T) + varwr2meanRel <- apply(varwr2Rel,c(2,3),mean,na.rm=T) + varwr3meanRel <- apply(varwr3Rel,c(2,3),mean,na.rm=T) + varwr4meanRel <- apply(varwr4Rel,c(2,3),mean,na.rm=T) + + n.datos <- n.years * n.days.in.a.period(p,2001) + + varwrBoth1 <- abind(varmat, varwr1, along = 1) + pvalue1 <- apply(varwrBoth1, c(2,3), function(x) t.test(x[1:n.datos],x[(n.datos+1):length(x)])$p.value) + rm(varwrBoth1, varwr1) + gc() + + varwrBoth2 <- abind(varmat, varwr2, along = 1) + pvalue2 <- apply(varwrBoth2, c(2,3), function(x) t.test(x[1:n.datos],x[(n.datos+1):length(x)])$p.value) + rm(varwrBoth2, varwr2) + gc() + + varwrBoth3 <- abind(varmat, varwr3, along = 1) + pvalue3 <- apply(varwrBoth3, c(2,3), function(x) t.test(x[1:n.datos],x[(n.datos+1):length(x)])$p.value) + rm(varwrBoth3, varwr3) + gc() + + varwrBoth4 <- abind(varmat, varwr4, along = 1) + pvalue4 <- apply(varwrBoth4, c(2,3), function(x) t.test(x[1:n.datos],x[(n.datos+1):length(x)])$p.value) + rm(varwrBoth4, varwr4) + + rm(varmat, varmatRel) + gc() + + #EU <- c(1:which(lon >= lon.max)[1], (which(lon >= 337)[1]:length(lon))) # restrict area to continental europe only + #my.brks.var <- seq(-3,3,0.5) + #my.cols.var <- colorRampPalette(rev(brewer.pal(11,"RdBu")))(length(my.brks.var)-1) # blue--white--red colors + #PlotEquiMap2(rescale(varwr3mean[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, continents.col="black") + + # save all the data necessary to redraw the graphs when we know the right regime: + save(varwr1mean, varwr2mean, varwr3mean,varwr4mean, pvalue1, pvalue2, pvalue3, pvalue4, varwr1meanRel, varwr2meanRel, varwr3meanRel,varwr4meanRel, file=paste0(rean.dir,"/",fields.name,"_",my.period[p],"_",var.name[var.num],".RData")) + rm(pvalue1,pvalue2,pvalue3,pvalue4) + rm(varwr1mean,varwr2mean,varwr3mean,varwr4mean) + gc() + + } # close p on WR.period + +} + + + +if(fields.name == ECMWF_S4.name){ # for forecasts, we can load only the data for 1 month at time (since each month needs ~20 GB of memory to load all its leadtimes) + for(p in WR.period){ + #p=1 # for the debug + + start.month <- p + if(start.month <= 9) {start.month.char <- paste0("0",as.character(start.month))} else {start.month.char <- start.month} + + fields2 <- gsub("\\$STORE_FREQ\\$","daily",fields) + fields3 <- gsub("\\$VAR_NAME\\$",var.name[var.num],fields2) + + years <- c() + + for(y in year.start:year.end){ + fields4 <- gsub("\\$START_DATE\\$",paste0(y, start.month.char,'01'),fields3) + + if(file.exists(fields4)){ + # beware, in this way it can only take into account leadtimes from 100 to 999, but it is the only way in the SMP machine: + #char.lead <- nchar(system(paste0("ncks -m ",fields4,"| grep -E -i 'psl size' | cut -d '*' -f1"), intern=T)) + #num.lead <- as.integer(substr(system(paste0("ncks -m ",fields4,"| grep -E -i 'psl size' | cut -d '*' -f1"), intern=T),char.lead-3,char.lead)) + num.lead <- as.integer(system(paste0("ncks -m ",fields4,"| grep -E -i 'time dimension' | cut -d '=' -f2 | cut -d ' ' -f2"), intern=T)) #don't work well on the SMP + #num.memb <- as.integer(system(paste0("ncks -m ",fields4,"| grep -E -i 'psl size' | cut -d '=' -f2 | cut -d '*' -f2"), intern=T)) + + if(fields == forecast && forecast == ECMWF_S4 && var.name[var.num] == "sfcWind") { # sfcWind daily data of S4 has a different formatting: + num.memb <- as.integer(system(paste0("ncks -m ",fields4,"| grep -E -i '0: '", member.name,"', size' | cut -d '=' -f2 | cut -d ',' -f1"), intern=T)) + } else { + num.memb <- as.integer(system(paste0("ncks -m ",fields4,"| grep -E -i '0: '", member.name,"', size' | cut -d '=' -f2 | cut -d ' ' -f2"), intern=T)) } + + #num.lat <- as.integer(system(paste0("ncks -m ",fields4,"| grep -E -i 'psl size' | cut -d '=' -f2 | cut -d '*' -f3"), intern=T)) + num.lat1 <- as.integer(system(paste0("ncks -m ",fields4,"| grep -E -i 'latitude dimension' | cut -d '=' -f2 | cut -d ' ' -f2"), intern=T)) + num.lat2 <- as.integer(system(paste0("ncks -m ",fields4,"| grep -E -i 'lat dimension' | cut -d '=' -f2 | cut -d ' ' -f2"), intern=T)) + num.lat <- ifelse(length(num.lat2) == 0, num.lat1, num.lat2) + + #num.lon <- as.integer(system(paste0("ncks -m ",fields4,"| grep -E -i 'psl size' | cut -d '=' -f2 | cut -d '*' -f4"), intern=T)) + num.lon1 <- as.integer(system(paste0("ncks -m ",fields4,"| grep -E -i 'longitude dimension' | cut -d '=' -f2 | cut -d ' ' -f2"), intern=T)) + num.lon2 <- as.integer(system(paste0("ncks -m ",fields4,"| grep -E -i 'lon dimension' | cut -d '=' -f2 | cut -d ' ' -f2"), intern=T)) + num.lon <- ifelse(length(num.lon2) == 0, num.lon1, num.lon2) + + num.var <- system(paste0("ncks -m ",fields4,"| grep -E -i '", var.name[var.num], "'"), intern=T) + + if(length(num.lead) != 0 && length(num.memb) != 0 && length(num.lat) != 0 && length(num.lon) != 0 && length(num.var) != 0){ + if(num.lead == n.leadtimes && num.memb >= n.members) years <- c(years,y) + } + } + } + + n.years.full <- length(years) # years really available for that month + print(paste0(n.years.full, " available years for ", my.period[p])) + + vareuFull <- Load(var = var.name[var.num], exp = list(list(path=fields)), obs = NULL, sdates=paste0(years, start.month.char,'01'), dimnames=list(lon=lon.name, lat=lat.name, member=member.name), nmember=n.members, leadtimemax=n.leadtimes, storefreq='daily', output = 'lonlat', latmin = lat.min, latmax = lat.max, lonmin = lon.min, lonmax = lon.max, grid=my.grid, method='bilinear', nprocs=1)$mod + + # convert var in daily anomalies with the LOESS filter: + cat("Calculating anomalies. Please wait...\n") + varPeriodClim <- apply(vareuFull, c(1,4,5,6), mean, na.rm=T) + varPeriodClimLoess <- varPeriodClim + + n.pos.lat <- unname(dim(varPeriodClim)[3]) + n.pos.lon <- unname(dim(varPeriodClim)[4]) + + for(i in n.pos.lat){ + for(j in n.pos.lon){ + my.data <- data.frame(ens.mean=varPeriodClim[1,,i,j], day=1:n.leadtimes) + my.loess <- loess(ens.mean ~ day, my.data, span=0.35) + varPeriodClimLoess[1,,i,j] <- predict(my.loess) + } + } + + rm(varPeriodClim) + gc() + + varPeriodClim2 <- InsertDim(InsertDim(varPeriodClimLoess, 2, n.years.full), 2, n.members) + + ## s4.data <- data.frame(s4=pslPeriodClim[1,], day=1:216) + ## s4.loess <- loess(s4 ~ day, s4.data, span=0.35) + ## s4.pred <- predict(s4.loess) + + vareuFull <- vareuFull - varPeriodClim2 + rm(varPeriodClim2) + gc() + + #if(psl=="g500") psleuFull <- psleuFull/9.81 # convert geopotential to geopotential height (in m) + #if(psl=="psl") psleuFull <- psleuFull/100 # convert MSLP in Pascal to MSLP in hPa + gc() + + ###### impact maps: + + load.month <- p + lead.month + if(load.month > 12) load.month <- load.month - 12 + + # select only var data during the chosen leadtime, and removing bisestile days! + varPeriod <- array(NA,c(dim(vareuFull)[1:3],length(pos.period(2001,load.month)),dim(vareuFull)[5:6])) + + i <- 1 + for(y in years){ + #y.mod <- y + #if((p + lead.month) > 12) y.mod <- y + 1 + #pos.per <- pos.period(y.mod, load.month) - ifelse(p == 1, 0, length(c(pos.months.before(y, p-1), pos.period(y, p-1)))) + + if(lead.month == 0) pos.per <- 1:ndm(p,y) + if(lead.month == 1) pos.per <- ndm(p,y) + 1:ndm(p + 1,y) + if(lead.month == 2) pos.per <- ndm(p,y) + ndm(p + 1,y) + 1:ndm(p + 2,y) + if(lead.month == 3) pos.per <- ndm(p,y) + ndm(p + 1,y) + ndm(p + 2,y) + 1:ndm(p + 3,y) + if(lead.month == 4) pos.per <- ndm(p,y) + ndm(p + 1,y) + ndm(p + 2,y) + ndm(p + 3,y) + 1:ndm(p + 4,y) + if(lead.month == 5) pos.per <- ndm(p,y) + ndm(p + 1,y) + ndm(p + 2,y) + ndm(p + 3,y) + ndm(p + 4,y) + 1:ndm(p + 5,y) + if(lead.month == 6) pos.per <- ndm(p,y) + ndm(p + 1,y) + ndm(p + 2,y) + ndm(p + 3,y) + ndm(p + 4,y) + ndm(p + 5,y) + 1:ndm(p + 6,y) + + if(length(pos.per) == 29) { + #febmod = length(pos.period(y.mod,load.month)) + varPeriod[1,,i,,,] <- vareuFull[1,,i,pos.per[-29],,] + } else { + varPeriod[1,,i,,,] <- vareuFull[1,,i,pos.per,,] + } + + i <- i+1 + } + + gc() + + years.var <- years + load(file=paste0(forecast.dir,"/",fields.name,"_",my.period[p],"_leadmonth_",lead.month,"_psl.RData")) # load the cluster sequence for a chosen startdate and leadtime + #cluster.sequence <- my.cluster[[p]]$cluster #as.vector(my.cluster.array[[p]]) # no bisestile days are left, only the years can be different! + if(length(years.var) < length(years)) { + print("Warning: psl years are more than var years. Removing years not in common..") + ss <- which(is.na(match(years,years.var))) # remove psl years not present in var data + my.cluster.array2 <- my.cluster.array[[p]][,-ss,] # (no bisestile days are left in my.cluster.array) + } else { + my.cluster.array2 <- my.cluster.array[[p]] # (no bisestile days are left in my.cluster.array) + } + + wr1 <- which(my.cluster.array2 == 1, arr.ind=T) + wr2 <- which(my.cluster.array2 == 2, arr.ind=T) + wr3 <- which(my.cluster.array2 == 3, arr.ind=T) + wr4 <- which(my.cluster.array2 == 4, arr.ind=T) + + var1 <- var2 <- var3 <- var4 <- array(NA,c(dim(varPeriod))) + + for (i in 1:dim(wr1)[1]) var1[1, wr1[i,3], wr1[i,2], wr1[i,1],,] <- varPeriod[1, wr1[i,3], wr1[i,2], wr1[i,1],,] + for (i in 1:dim(wr2)[1]) var2[1, wr2[i,3], wr2[i,2], wr2[i,1],,] <- varPeriod[1, wr2[i,3], wr2[i,2], wr2[i,1],,] + for (i in 1:dim(wr3)[1]) var3[1, wr3[i,3], wr3[i,2], wr3[i,1],,] <- varPeriod[1, wr3[i,3], wr3[i,2], wr3[i,1],,] + for (i in 1:dim(wr4)[1]) var4[1, wr4[i,3], wr4[i,2], wr4[i,1],,] <- varPeriod[1, wr4[i,3], wr4[i,2], wr4[i,1],,] + + varwr1mean <- apply(var1, c(5,6), mean, na.rm=T) + varwr2mean <- apply(var2, c(5,6), mean, na.rm=T) + varwr3mean <- apply(var3, c(5,6), mean, na.rm=T) + varwr4mean <- apply(var4, c(5,6), mean, na.rm=T) + gc() + + n.datos <- n.years.full * n.days.in.a.period(p,2001) + + varwrBoth1 <- abind(varPeriod, var1, along = 1) + pvalue1 <- apply(varwrBoth1, c(5,6), function(x) t.test(x[1:n.datos],x[(n.datos+1):length(x)])$p.value) + rm(varwrBoth1) + gc() + + varwrBoth2 <- abind(varPeriod, var2, along = 1) + pvalue2 <- apply(varwrBoth2, c(5,6), function(x) t.test(x[1:n.datos],x[(n.datos+1):length(x)])$p.value) + rm(varwrBoth2) + gc() + + varwrBoth3 <- abind(varPeriod, var3, along = 1) + pvalue3 <- apply(varwrBoth3, c(5,6), function(x) t.test(x[1:n.datos],x[(n.datos+1):length(x)])$p.value) + rm(varwrBoth3) + gc() + + varwrBoth4 <- abind(varPeriod, var4, along = 1) + pvalue4 <- apply(varwrBoth4, c(5,6), function(x) t.test(x[1:n.datos],x[(n.datos+1):length(x)])$p.value) + rm(varwrBoth4) + + # save all the data necessary to redraw the graphs when we know the right regime: + save(varwr1mean, varwr2mean, varwr3mean, varwr4mean, pvalue1, pvalue2, pvalue3, pvalue4, file=paste0(forecast.dir,"/",fields.name,"_",my.period[p],"_leadmonth_",lead.month,"_",var.name[var.num],".RData")) + + rm(varwr1mean,varwr2mean,varwr3mean,varwr4mean) + gc() + + } # close p on period +} # close if on data type + + + + +if(fields.name == ECMWF_monthly.name){ + # Load 6-hourly psl data in the forecast case: + psleuFull <-array(NA, c(n.sdates*n.years, n.leadtimes, n.pos.lat, n.pos.lon)) # daily order: 19940102 19950102 ... 20130102 19940109 19950109 ... 20130109 ... + n.leadtimes <- length(leadtime) + sdates <- weekly.seq(forecast.year,mes,day) + n.sdates <- length(sdates) + + for (startdate in 1:n.sdates){ + for(lday in leadtime){ + var <- Load(var = psl, exp = NULL, obs = list(list(path=fields)), paste0(year.start:year.end, substr(sdates[startdate],5,6), substr(sdates[startdate],7,8) ), storefreq = 'daily', leadtimemin = lday*4-3, leadtimemax = lday*4, output = 'lonlat', latmin = lat.min, latmax = lat.max, lonmin = lon.min, lonmax = lon.max, nprocs=1) + + mean.lday <- apply(var$obs, c(3,5,6), mean, na.rm=T) + rm(var) + gc() + + psleuFull[(1+(startdate-1)*n.years):(startdate*n.years), lday-leadtime[1]+1,,] <- mean.lday + rm(mean.lday) + gc() + } + } + } + + + +cat("Finished!\n") + diff --git a/weather_regimes_maps_v29.R b/weather_regimes_maps_v29.R new file mode 100644 index 0000000000000000000000000000000000000000..15df79d8af301d4177c2fe412bbd0ca85c062d52 --- /dev/null +++ b/weather_regimes_maps_v29.R @@ -0,0 +1,5823 @@ + +# Creation: 6/2016 +# Author: Nicola Cortesi +# +# Aim: to visualize the regime anomalies of the weather regimes, their interannual frequencies and their impact on a chosen cliamte variable (i.e: wind speed or temperature) +# +# I/O: input file needed are: +# 1) the output of the weather_regimes.R script, which ends with the suffix "_psl.RData". +# 2) if you need the impact map too, the outputs of the weather_regimes_impact.R script, which end wuth the suffix "_sfcWind.RData" and "_tas.RData" +# +# Output files are the maps, witch the user can generate as .png or as .pdf +# Due to the script's length, it is better to run it from the terminal with: Rscript ~/scripts/weather_regimes_maps.R +# Note that this script doesn't need to be parallelized because it takes only a few seconds to create and save the output maps. +# +# Assumptions: You need to have created before the '_psl.RData' files which are the output of 'weather_regimes'.R, for each period you want to visualize. +# If your regimes derive from a reanalysis, this script must be run twice: +# first, only one month/season at time with: 'period=X', (X=1 .. 12), 'ordering = TRUE', 'save.names = TRUE', 'as.pdf = FALSE' and 'composition = "none" +# And inserting the regime names 'cluster.name1=...' in the correct order; in this first run, you only save the ordered cartography. +# You already have to know which is the right regime order by taking a look at the output maps (_clusterX.png) of weather_regimes.R +# After that, any time you run this script, remember to set: +# 'ordering = TRUE , 'save.names = FALSE' (and any type of composition you want to plot) not to overwrite the files which stores the right cluster order. +# You can check if the monthly regimes jave been associated correctly setting composition <- "psl.rean" +# +# For example, you can find that the sequence of the images in the file (from top to down) corresponds to: +# Blocking / NAO- / Atl.Ridge / NAO+ regime. Subsequently, you have to change 'ordering' +# from F to T (see below) and set the four variables 'clusterX.name' (X = 1...4) to follow the same order found inside that file. +# Then, you have to run this script only to get the maps in the right order, which by default is, from top to down: +# "NAO+","NAO-","Blocking" and "Atl.Ridge" (you can change it with the variable 'orden'). You also have to set 'save.names' to TRUE, so an .RData file +# is written to store the order of the four regimes, in case you need to visualize these maps again in future or create new ones based on the saved data. +# +# If your regimes derive from a forecast system, you have to compute before the regimes derived from a reanalysis. Then, you can associate the reanalysis +# to the forecast system, to employ it to automatically associate to each simulated cluster one of the +# observed regimes, the one with the highest spatial correlation. Beware that the two maps of regime anomalies must have the same spatial resolution! +# +# Branch: weather_regimes + +rm(list=ls()) +library(s2dverification) # for the function Load() +library(Kendall) # for the MannKendall test +#library("corrplot") + +source('/home/Earth/ncortesi/scripts/Rfunctions.R') # for the calendar functions + +### set the directory where the weather regimes computed with a reanalysis are stored (xxxxx_psl.RData files): + +#rean.dir <- "/scratch/Earth/ncortesi/RESILIENCE/Regimes/18) as 12) but with no lat correction" +#rean.dir <- "/scratch/Earth/ncortesi/RESILIENCE/Regimes/18_as_12_but_with_no_lat_correction" +#rean.dir <- "/scratch/Earth/ncortesi/RESILIENCE/Regimes/41_ERA-Interim_monthly_1981-2015_LOESS_filter" +#rean.dir <- "/scratch/Earth/ncortesi/RESILIENCE/Regimes/43_as_41_but_with_running_cluster_and_lat_correction" +#rean.dir <- "/scratch/Earth/ncortesi/RESILIENCE/Regimes/45_as_43_but_with_Z500" +#rean.dir <- "/scratch/Earth/ncortesi/RESILIENCE/Regimes/44_as_43_but_with_no_lat_correction" +#rean.dir <- "/scratch/Earth/ncortesi/RESILIENCE/Regimes/50_NCEP_monthly_1981-2016_LOESS_filter_lat_corr" +#rean.dir <- "/scratch/Earth/ncortesi/RESILIENCE/Regimes/52_JRA55_monthly_1981-2016_LOESS_filter_lat_corr" +#rean.dir <- "/scratch/Earth/ncortesi/RESILIENCE/Regimes/53_JRA55_seasonal_1981-2016_LOESS_filter_lat_corr" +#rean.dir <- "/scratch/Earth/ncortesi/RESILIENCE/Regimes/54_JRA55_monthly_1981-2016_LOESS_filter_lat_corr_running" +#rean.dir <- "/scratch/Earth/ncortesi/RESILIENCE/Regimes/55_JRA55_monthly_1981-2016_LOESS_filter_lat_corr_running_15days" +rean.dir <- "/scratch/Earth/ncortesi/RESILIENCE/Regimes/56_JRA55_monthly_1981-2016_LOESS_filter_lat_corr_ordered4variance" +#rean.dir <- "/scratch/Earth/ncortesi/RESILIENCE/Regimes/57_JRA55_monthly_1981-2016_LOESS_filter_lat_corr_running_ordered4variance" +#rean.dir <- "/scratch/Earth/ncortesi/RESILIENCE/Regimes/58_JRA55_monthly_1981-2016_LOESS_filter_lat_corr_running_15days_ordered4variance" + +rean.name <- "JRA-55" #"JRA-55" #"NCEP" #"ERA-Interim" # reanalysis name (if input data comes from a reanalysis) + +forecast.name <- "ECMWF-S4" # forecast system name (if input data comes from a forecast system) + +fields.name <- rean.name #forecast.name # Choose between loading reanalysis ('rean.name') or forecasts ('forecast.name') + +composition <- "edpr" # choose which kind of composition you want to plot: + # 'none' doesn't plot anything, it only associates the clusters to the regimes with the manual association in the rows below + # and saves them in the ClusterName.Rdata files for each period selected, overwriting the eventual pre-existing files. + # 'variance' : as 'none', but before saving ClusterName.RData, it reordinates the clusters in decreasing order of explained variance, + # so that orden[1] regime correspond to the cluster with the highest variance, orden[2] to the second and so on. + # In this way, you can show all the compositions below in order if variance instead of ordinating them manually one period at time + # 'simple' for monthly graphs of regime anomalies / impact / interannual frequencies in three columns + # 'psl' for all the regime anomalies for a fixed forecast month + # 'fre' for all the interannual frequencies for a fixed forecast month + # 'impact' for all the impact maps for a fixed forecast month and the variable specified in var.num + # 'summary' for the summary plots of all forecast months (persistence bias, freq.bias, correlations, etc.) [You need all ClusterNames files] + # 'impact.highest' for all the impact plot of the regime with the highest impact + # 'single.impact' to save the four impact maps in a composition 2x2 + # 'single.psl' to save the individual psl map + # 'single.fre' to save the individual fre map + # 'taylor' plot the taylor's diagram between the regime anomalies of a monthly reanalaysis and a seasonal one + # 'edpr' as 'simple', but swapping the position of the regime anomalie maps with that of the impact maps + # 'psl.rean': plot all the regime anomalies and correlation matrix of all months of a reanalysis with DJF regime anomalies of the same reanalysis + # 'psl.rean.unordered': as before, but without ordering the regimes with the same order in vector 'orden' + +var.num <- 1 # if fields.name ='rean.name' and composition ='simple' or 'edpr', choose a variable for the impact maps 1: sfcWind 2: tas + +# Choose one or more periods to plot from 1 to 17 (1-12: Jan - Dec, 13-16: Winter, Spring, Summer, Autumn, 17: Year). +period <- 1:12 + +# Manually associates the four clusters to the four regimes, one period at time (only in case composition="none"): +cluster4.name <- "NAO+" +cluster1.name <- "NAO-" +cluster2.name <- "Blocking" +cluster3.name <- "Atl.Ridge" + +# choose an order to give to the cartograpy, from top to bottom: +orden <- c("NAO+","NAO-","Blocking","Atl.Ridge") + +no.regimes <- TRUE # if TRUE, instead of putting the regime names in the figure titles, insert "Cluster1", "Cluster2", "Cluster3" and "Cluster4" + # (when composition='edpr' or monthly_anomalies = TRUE) + +####### if monthly_anomalies <- TRUE, you have to specify these additional parameters: +monthly_anomalies <- FALSE # if TRUE, also save maps with with monthly wind speed and slp anomalies for the chosen years and months set below over Europe +year.chosen <- 2016 +month.chosen <- 1:12 +NCEP <- '/esnas/recon/noaa/ncep-reanalysis/$STORE_FREQ$_mean/$VAR_NAME$_f6h/$VAR_NAME$_$YEAR$$MONTH$.nc' +JRA55 <- '/esnas/recon/jma/jra55/$STORE_FREQ$_mean/$VAR_NAME$_f6h/$VAR_NAME$_$YEAR$$MONTH$.nc' +ERAint <- '/esarchive/old-files/recon_ecmwf_erainterim/$STORE_FREQ$_mean/$VAR_NAME$_f6h/$VAR_NAME$_$YEAR$$MONTH$.nc' +rean.data <- JRA55 # choose one of the above reanalysis + +####### if fields.name='forecast.name', you have to specify these additional variables: + +# working dir with the input files with '_psl.RData' suffix: +work.dir <- "/scratch/Earth/ncortesi/RESILIENCE/Regimes/42_ECMWF_S4_monthly_1981-2015_LOESS_filter" + +lead.months <- 0 #0:6 # in case you selected 'fields.name = forecast.name', specify a leadtime (0 = same month of the start month) to plot + # (and variable 'period' becomes the start month) +forecast.month <- 1 # in case composition = 'psl' or 'fre' or 'impact', choose a target month to forecast, from 1 to 12, to plot its composition + # if you want to plot all compositions from 1 to 12 at once, just enable the line below and add a { at the end of the script + # DO NOT run the loop below when composition="summary" or "taylor", because it will modify the data in the ClusterName.RData files!!! +#for(forecast.month in 1:12){ + +####### Derived variables ############################################################################################################################################### + +ordering <- TRUE # If TRUE, order the clusters following the regimes listed in the 'orden' vector (set it to TRUE only after you manually inserted the names below). +save.names <- FALSE # if TRUE, also save the cluster names (i.e: the association between clusters and weather regimes); if FALSE, the cluster names are loaded from a file +as.pdf <- FALSE # FALSE: save results as one .png file for each season/month, TRUE: save only one .pdf file with all seasons/months +mean.freq <- TRUE # if TRUE, when composition <- "simple" o 'edpr', it also add the mean frequency value over each frequency plot + +if(fields.name != rean.name && fields.name != forecast.name) stop("variable 'fields.name' is not properly set") +if(no.regimes) { regime.title <- paste0("Cluster",1:4) } else { regime.title <- orden} + +var.name <- c("sfcWind","tas") +var.name.full <- c("wind speed","temperature") # full name of the 'predictand' variable to put in the title of the graphs +var.unit <- c("m/s", "ºC") # unit of measure of var (for drawing color scales) + +var.num.orig <- var.num # loading _psl files, this value can change! +fields.name.orig <- fields.name +period.orig <- period +work.dir.orig <- work.dir +pdf.suffix <- ifelse(length(period)==1, my.period[period], "all_periods") + +n.map <- 0 + +############################################################################################ +## Start analysis ## +############################################################################################ + +if(composition != "summary" && composition != "impact.highest" && composition != "taylor"){ + + if(composition == "psl" || composition == "fre" || composition == "impact") { + if(as.pdf && fields.name == forecast.name) pdf(file=paste0(work.dir,"/",fields.name,"_",pdf.suffix,"_forecast_month_",forecast.month,"_",composition,".pdf"),width=110,height=60) + if(!as.pdf && fields.name == forecast.name) png(filename=paste0(work.dir,"/",fields.name,"_forecast_month_",forecast.month,"_",composition,".png"),width=6000,height=2300) + + lead.months <- c(6:0,0) + plot.new() + + # Sheet title: + par(fig=c(0, 1, 0.94, 0.98), new=TRUE) + if(composition == "psl") mtext(paste0(fields.name, " simulated Weather Regimes for ", month.name[forecast.month]), cex=5.5, font=2) + if(composition == "fre") mtext(paste0(fields.name, " simulated interannual frequencies for ", month.name[forecast.month]), cex=5.5, font=2) + if(composition == "impact") mtext(paste0(fields.name, " simulated ",var.name.full[var.num], " impact for ", month.name[forecast.month]), cex=5.5, font=2) + + } # close if on composition == "psl" ... + + if(composition == "none") { + ordering <- TRUE + save.names <- TRUE + as.pdf <- FALSE + } + + if(composition == "variance"){ + ordering <- TRUE + save.names <- TRUE + as.pdf <- FALSE + period <- 1:12 + } + + if(composition == "psl.rean") { + ordering <- TRUE + period <- c(9:12, 1:8) # to start from September instead of January + png(filename=paste0(rean.dir,"/",fields.name,"_reanalysis_all_months.png"),width=6000,height=2000) + plot.new() + } + + if(composition == "psl.rean.unordered") { + ordering <- FALSE + period <- c(9:12, 1:8) # to start from September instead of January + png(filename=paste0(rean.dir,"/",fields.name,"_reanalysis_all_months_unordered.png"),width=6000,height=2000) + plot.new() + } + + ## if(composition == "corr.matrix") { + ## ordering <- FALSE # set it to TRUE if you want to see the correlation matrix of the ordered clusters instead!!! + ## period <- c(9:12, 1:8) # to start from September instead of January + ## png(filename=paste0(rean.dir,"/",fields.name,"_reanalysis_all_months_corr_matrix.png"),width=6000,height=2000) + ## plot.new() + ## } + + if(fields.name == rean.name) { lead.month <- 1; lead.months <- 1 } # just to be able to enter the loop below once! + + for(lead.month in lead.months){ + #lead.month=lead.months[1] # for the debug + + if(composition == "simple" || composition == 'edpr'){ + if(as.pdf && fields.name == rean.name) pdf(file=paste0(rean.dir,"/",fields.name,"_",var.name[var.num],"_",pdf.suffix,".pdf"),width=40, height=60) + if(as.pdf && fields.name == forecast.name) pdf(file=paste0(work.dir,"/",fields.name,"_",var.name[var.num],"_",pdf.suffix,"_leadmonth_",lead.month,".pdf"),width=40,height=60) + } + + if(composition == 'psl' || composition == 'fre' || composition == 'impact') { + period <- forecast.month - lead.month + if(period < 1) period <- period + 12 + } + + + for(p in period){ + #p <- period[1] # for the debug + p.orig <- p + + if(fields.name == rean.name) print(paste("p =",p)) + if(fields.name == forecast.name) print(paste("p =",p,"lead.month =", lead.month)) + + # load regime data (for forecasts, it load only the data corresponding to the chosen start month p and lead month 'lead.month') + impact.data <- FALSE + if(fields.name == rean.name || (fields.name == forecast.name && n.map == 7)) { + load(file=paste0(rean.dir,"/",rean.name,"_",my.period[p],"_","psl",".RData")) + if(var.num != var.num.orig) var.num <- var.num.orig # ripristinate original values of this script (necessary after loading regime data) + if(fields.name != fields.name.orig) fields.name <- fields.name.orig # ripristinate original values of this script + if(work.dir != work.dir.orig) work.dir <- work.dir.orig # ripristinate original values of this script + + # load impact data only if it is available, if not it will leave an empty space in the composition: + if(file.exists(paste0(rean.dir,"/",rean.name,"_",my.period[p],"_",var.name[var.num],".RData"))){ + impact.data <- TRUE + print(paste0("Impact data for variable ",var.name[var.num] ," available for reanalysis ", rean.name)) + load(file=paste0(rean.dir,"/",rean.name,"_",my.period[p],"_",var.name[var.num],".RData")) + } else { + impact.data <- FALSE + print(paste0("Impact data for variable ",var.name[var.num] ," not available for reanalysis ", rean.name)) + } + + if(composition == "variance"){ + my.cluster2 <- my.cluster # create a copy of my.cluster + + ss1 <- which(my.cluster$cluster == 1) + ss2 <- which(my.cluster$cluster == 2) + ss3 <- which(my.cluster$cluster == 3) + ss4 <- which(my.cluster$cluster == 4) + + withinss <- my.cluster$withinss + max1 <- which(my.cluster$withinss == max(withinss, na.rm=TRUE)) # first cluster with maximum variance + withinss[max1] <- NA + + max2 <- which(my.cluster$withinss == max(withinss, na.rm=TRUE)) # second cluster with maximum variance + withinss[max2] <- NA + + max3 <- which(my.cluster$withinss == max(withinss, na.rm=TRUE)) # third cluster with maximum variance + withinss[max3] <- NA + + max4 <- which(!is.na(withinss)) + rm(withinss) + + # vector where the first element tells you which is the clister with the maximum variance the second element shows which is the cluster the + # second maximum variance, and so on: + max.seq <- c(max1, max2, max3, max4) + + assign(paste0("cluster",max1,".name"), orden[1]) # associate the cluster with the highest explained variance to the first regime to plot (usually NAO+) + assign(paste0("cluster",max2,".name"), orden[2]) + assign(paste0("cluster",max3,".name"), orden[3]) + assign(paste0("cluster",max4,".name"), orden[4]) + + } + } + + if(fields.name == forecast.name && n.map < 7){ + load(file=paste0(work.dir,"/",forecast.name,"_",my.period[p],"_leadmonth_",lead.month,"_","psl",".RData")) # load cluster time series and mean psl data + + if(file.exists(paste0(work.dir,"/",forecast.name,"_",my.period[p],"_leadmonth_",lead.month,"_",var.name[var.num],".RData"))){ + impact.data <- TRUE + print("Impact data available for forecasts") + if((composition == "simple" || composition == "impact")) load(file=paste0(work.dir,"/",forecast.name,"_",my.period[p],"_leadmonth_",lead.month,"_",var.name[var.num],".RData")) # load mean var data for impact maps + } else { + impact.data <- FALSE + print("Impact data for forecasts not available") + } + + if(var.num != var.num.orig) var.num <- var.num.orig # ripristinate original values of this script (necessary after loading regime data) + if(fields.name != fields.name.orig) fields.name <- fields.name.orig # ripristinate original values of this script + + } + + if(var.num != var.num.orig) var.num <- var.num.orig # ripristinate original values of this script (necessary after loading regime data) + if(fields.name != fields.name.orig) fields.name <- fields.name.orig # ripristinate original values of this script + if(work.dir != work.dir.orig) work.dir <- work.dir.orig # ripristinate original values of this script + if(p != p.orig) p <- p.orig + + if(fields.name == forecast.name && n.map < 7){ + + # compute the simulated interannual monthly variability: + n.days.month <- dim(my.cluster.array[[p]])[1] # number of days in the forecasted month + + freq.sim1 <- apply(my.cluster.array[[p]] == 1, c(2,3), sum) + freq.sim2 <- apply(my.cluster.array[[p]] == 2, c(2,3), sum) + freq.sim3 <- apply(my.cluster.array[[p]] == 3, c(2,3), sum) + freq.sim4 <- apply(my.cluster.array[[p]] == 4, c(2,3), sum) + + sd.freq.sim1 <- sd(freq.sim1) / n.days.month + sd.freq.sim2 <- sd(freq.sim2) / n.days.month + sd.freq.sim3 <- sd(freq.sim3) / n.days.month + sd.freq.sim4 <- sd(freq.sim4) / n.days.month + + # compute the transition probabilities: + j1 <- j2 <- j3 <- j4 <- 1 + transition1 <- transition2 <- transition3 <- transition4 <- c() + + n.members <- dim(my.cluster.array[[p]])[3] + + for(m in 1:n.members){ + for(y in 1:n.years){ + for (d in 1:(n.days.month-1)){ + + if (my.cluster.array[[p]][d,y,m] == 1 && (my.cluster.array[[p]][d + 1,y,m] != 1)){ + transition1[j1] <- my.cluster.array[[p]][d + 1,y,m] + j1 <- j1 + 1 + } + if (my.cluster.array[[p]][d,y,m] == 2 && (my.cluster.array[[p]][d + 1,y,m] != 2)){ + transition2[j2] <- my.cluster.array[[p]][d + 1,y,m] + j2 <- j2 + 1 + } + if (my.cluster.array[[p]][d,y,m] == 3 && (my.cluster.array[[p]][d + 1,y,m] != 3)){ + transition3[j3] <- my.cluster.array[[p]][d + 1,y,m] + j3 <- j3 +1 + } + if (my.cluster.array[[p]][d,y,m] == 4 && (my.cluster.array[[p]][d + 1,y,m] != 4)){ + transition4[j4] <- my.cluster.array[[p]][d + 1,y,m] + j4 <- j4 +1 + } + + } + } + } + + trans.sim <- matrix(NA,4,4 , dimnames= list(c("cluster1.sim", "cluster2.sim", "cluster3.sim", "cluster4.sim"),c("cluster1.sim","cluster2.sim","cluster3.sim","cluster4.sim"))) + trans.sim[1,2] <- length(which(transition1 == 2)) + trans.sim[1,3] <- length(which(transition1 == 3)) + trans.sim[1,4] <- length(which(transition1 == 4)) + + trans.sim[2,1] <- length(which(transition2 == 1)) + trans.sim[2,3] <- length(which(transition2 == 3)) + trans.sim[2,4] <- length(which(transition2 == 4)) + + trans.sim[3,1] <- length(which(transition3 == 1)) + trans.sim[3,2] <- length(which(transition3 == 2)) + trans.sim[3,4] <- length(which(transition3 == 4)) + + trans.sim[4,1] <- length(which(transition4 == 1)) + trans.sim[4,2] <- length(which(transition4 == 2)) + trans.sim[4,3] <- length(which(transition4 == 3)) + + trans.tot <- apply(trans.sim,1,sum,na.rm=T) + for(i in 1:4) trans.sim[i,] <- trans.sim[i,]/trans.tot[i] + + + # compute cluster persistence: + my.cluster.vector <- as.vector(my.cluster.array[[p]]) # reduce it to a vector with the order: day1month1member1 , day2month1member1, ... , day1month2member1, day2month2member1, ,,, + + sequ1 <- sequ2 <- sequ3 <- sequ4 <- rep(0,10000) + i1 <- i2 <- i3 <- i4 <- 1 + + if(my.cluster.vector[1] != 1) i1 <- 0 + if(my.cluster.vector[1] == 1) sequ1[i1] <- sequ1[i1]+1 + if(my.cluster.vector[1] != 2) i2 <- 0 + if(my.cluster.vector[1] == 2) sequ2[i2] <- sequ2[i2]+1 + if(my.cluster.vector[1] != 3) i3 <- 0 + if(my.cluster.vector[1] == 3) sequ3[i3] <- sequ3[i3]+1 + if(my.cluster.vector[1] != 4) i4 <- 0 + if(my.cluster.vector[1] == 4) sequ4[i4] <- sequ4[i4]+1 + n.dd <- length(my.cluster.vector) + + for(d in 1:(n.dd-1)){ + if(my.cluster.vector[d] != 1 && my.cluster.vector[d+1] == 1) i1 <- i1+1 + if(my.cluster.vector[d] == 1) sequ1[i1] <- sequ1[i1]+1 + + if(my.cluster.vector[d] != 2 && my.cluster.vector[d+1] == 2) i2 <- i2+1 + if(my.cluster.vector[d] == 2) sequ2[i2] <- sequ2[i2]+1 + + if(my.cluster.vector[d] != 3 && my.cluster.vector[d+1] == 3) i3 <- i3+1 + if(my.cluster.vector[d] == 3) sequ3[i3] <- sequ3[i3]+1 + + if(my.cluster.vector[d] != 4 && my.cluster.vector[d+1] == 4) i4 <- i4+1 + if(my.cluster.vector[d] == 4) sequ4[i4] <- sequ4[i4]+1 + } + + if(my.cluster.vector[n.dd] == 1) sequ1[i1] <- sequ1[i1]+1 + if(my.cluster.vector[n.dd] == 2) sequ2[i2] <- sequ2[i2]+1 + if(my.cluster.vector[n.dd] == 3) sequ3[i3] <- sequ3[i3]+1 + if(my.cluster.vector[n.dd] == 4) sequ4[i4] <- sequ4[i4]+1 + + pers1 <- mean(sequ1[which(sequ1 != 0)]) + pers2 <- mean(sequ2[which(sequ2 != 0)]) + pers3 <- mean(sequ3[which(sequ3 != 0)]) + pers4 <- mean(sequ4[which(sequ4 != 0)]) + + # compute correlations between simulated clusters and observed regime's anomalies: + cluster1.sim <- pslwr1mean; cluster2.sim <- pslwr2mean; cluster3.sim <- pslwr3mean; cluster4.sim <- pslwr4mean + + if(composition == 'impact' && impact.data == TRUE) { + cluster1.sim.imp <- varwr1mean; cluster2.sim.imp <- varwr2mean; cluster3.sim.imp <- varwr3mean; cluster4.sim.imp <- varwr4mean + #cluster1.sim.imp.pv <- pvalue1; cluster2.sim.imp.pv <- pvalue2; cluster3.sim.imp.pv <- pvalue3; cluster4.sim.imp.pv <- pvalue4 + } + + lat.forecast <- lat; lon.forecast <- lon + + if((p + lead.month) > 12) { load.month <- p + lead.month - 12 } else { load.month <- p + lead.month } # reanalysis forecast month to load + load(file=paste0(rean.dir,"/",rean.name,"_",my.period[load.month],"_","psl",".RData")) # load reanalysis data, which overwrities the pslwrXmean variables + load(paste0(rean.dir,"/",rean.name,"_", my.period[load.month],"_","ClusterNames",".RData")) # load also reanalysis regime names + + # load reanalysis varwrXmean impact data: + if(composition == 'impact' && impact.data == TRUE) load(file=paste0(rean.dir,"/",rean.name,"_",my.period[load.month],"_",var.name[var.num],".RData")) + + if(var.num != var.num.orig) var.num <- var.num.orig # ripristinate original values of this script + if(fields.name != fields.name.orig) fields.name <- fields.name.orig # ripristinate original values of this script + if(!identical(period.orig,period)) period <- period.orig + if(work.dir != work.dir.orig) work.dir <- work.dir.orig # ripristinate original values of this script + if(p.orig != p) p <- p.orig + + # compute the observed transition probabilities: + n.days.month <- n.days.in.a.period(load.month,2001) + j1o <- j2o <- j3o <- j4o <- 1; transition.obs1 <- transition.obs2 <- transition.obs3 <- transition.obs4 <- c() + + for (d in 1:(length(my.cluster$cluster)-1)){ + if (my.cluster$cluster[d] == 1 && (my.cluster$cluster[d + 1] != my.cluster$cluster[d])){ + transition.obs1[j1o] <- my.cluster$cluster[d + 1] + j1o <- j1o + 1 + } + if (my.cluster$cluster[d] == 2 && (my.cluster$cluster[d + 1] != my.cluster$cluster[d])){ + transition.obs2[j2o] <- my.cluster$cluster[d + 1] + j2o <- j2o + 1 + } + if (my.cluster$cluster[d] == 3 && (my.cluster$cluster[d + 1] != my.cluster$cluster[d])){ + transition.obs3[j3o] <- my.cluster$cluster[d + 1] + j3o <- j3o + 1 + } + if (my.cluster$cluster[d] == 4 && (my.cluster$cluster[d + 1] != my.cluster$cluster[d])){ + transition.obs4[j4o] <- my.cluster$cluster[d + 1] + j4o <- j4o +1 + } + + } + + trans.obs <- matrix(NA,4,4 , dimnames= list(c("cluster1.obs", "cluster2.obs", "cluster3.obs", "cluster4.obs"),c("cluster1.obs","cluster2.obs","cluster3.obs","cluster4.obs"))) + trans.obs[1,2] <- length(which(transition.obs1 == 2)) + trans.obs[1,3] <- length(which(transition.obs1 == 3)) + trans.obs[1,4] <- length(which(transition.obs1 == 4)) + + trans.obs[2,1] <- length(which(transition.obs2 == 1)) + trans.obs[2,3] <- length(which(transition.obs2 == 3)) + trans.obs[2,4] <- length(which(transition.obs2 == 4)) + + trans.obs[3,1] <- length(which(transition.obs3 == 1)) + trans.obs[3,2] <- length(which(transition.obs3 == 2)) + trans.obs[3,4] <- length(which(transition.obs3 == 4)) + + trans.obs[4,1] <- length(which(transition.obs4 == 1)) + trans.obs[4,2] <- length(which(transition.obs4 == 2)) + trans.obs[4,3] <- length(which(transition.obs4 == 3)) + + trans.tot.obs <- apply(trans.obs,1,sum,na.rm=T) + for(i in 1:4) trans.obs[i,] <- trans.obs[i,]/trans.tot.obs[i] + + # compute the observed interannual monthly variability: + freq.obs1 <- freq.obs2 <- freq.obs3 <- freq.obs4 <- c() + + for(y in year.start:year.end){ + # for both reanalysis and S4, the time order is always: year1day1, year1day2, ..., year1day31, year2day1, year2day2, ..., year2day28, etc, + freq.obs1[y-year.start+1] <- length(which(my.cluster$cluster[(y-year.start)*n.days.month + (1:n.days.month)] == 1)) + freq.obs2[y-year.start+1] <- length(which(my.cluster$cluster[(y-year.start)*n.days.month + (1:n.days.month)] == 2)) + freq.obs3[y-year.start+1] <- length(which(my.cluster$cluster[(y-year.start)*n.days.month + (1:n.days.month)] == 3)) + freq.obs4[y-year.start+1] <- length(which(my.cluster$cluster[(y-year.start)*n.days.month + (1:n.days.month)] == 4)) + } + + sd.freq.obs1 <- sd(freq.obs1) / n.days.month + sd.freq.obs2 <- sd(freq.obs2) / n.days.month + sd.freq.obs3 <- sd(freq.obs3) / n.days.month + sd.freq.obs4 <- sd(freq.obs4) / n.days.month + + wr1y.obs <- wr1y; wr2y.obs <- wr2y; wr3y.obs <- wr3y; wr4y.obs <- wr4y # observed frecuencies of the regimes + + regimes.obs <- c(cluster1.name, cluster2.name, cluster3.name, cluster4.name) + + if(length(unique(regimes.obs)) < 4) stop("Two or more names of the weather types in the reanalsyis input file are repeated!") + + if(identical(rev(lat),lat.forecast)) {lat.forecast <- rev(lat.forecast); print("Warning: forecast latitudes are in the opposite order than renalysis's")} + if(!identical(lat, lat.forecast)) stop("forecast latitudes are different from reanalysis latitudes!") + if(!identical(lon, lon.forecast)) stop("forecast longitude are different from reanalysis longitudes!") + + cluster.corr <- array(NA, c(4,4), dimnames=list(c("cluster1.sim","cluster2.sim","cluster3.sim","cluster4.sim"), regimes.obs)) + cluster.corr[1,1] <- cor(as.vector(cluster1.sim), as.vector(pslwr1mean)) + cluster.corr[1,2] <- cor(as.vector(cluster1.sim), as.vector(pslwr2mean)) + cluster.corr[1,3] <- cor(as.vector(cluster1.sim), as.vector(pslwr3mean)) + cluster.corr[1,4] <- cor(as.vector(cluster1.sim), as.vector(pslwr4mean)) + cluster.corr[2,1] <- cor(as.vector(cluster2.sim), as.vector(pslwr1mean)) + cluster.corr[2,2] <- cor(as.vector(cluster2.sim), as.vector(pslwr2mean)) + cluster.corr[2,3] <- cor(as.vector(cluster2.sim), as.vector(pslwr3mean)) + cluster.corr[2,4] <- cor(as.vector(cluster2.sim), as.vector(pslwr4mean)) + cluster.corr[3,1] <- cor(as.vector(cluster3.sim), as.vector(pslwr1mean)) + cluster.corr[3,2] <- cor(as.vector(cluster3.sim), as.vector(pslwr2mean)) + cluster.corr[3,3] <- cor(as.vector(cluster3.sim), as.vector(pslwr3mean)) + cluster.corr[3,4] <- cor(as.vector(cluster3.sim), as.vector(pslwr4mean)) + cluster.corr[4,1] <- cor(as.vector(cluster4.sim), as.vector(pslwr1mean)) + cluster.corr[4,2] <- cor(as.vector(cluster4.sim), as.vector(pslwr2mean)) + cluster.corr[4,3] <- cor(as.vector(cluster4.sim), as.vector(pslwr3mean)) + cluster.corr[4,4] <- cor(as.vector(cluster4.sim), as.vector(pslwr4mean)) + + # associate each simulated cluster to one observed regime: + max1 <- which(cluster.corr == max(cluster.corr), arr.ind=T) + cluster.corr2 <- cluster.corr + cluster.corr2[max1[1],] <- -999 # set this row to negative values so it won't be found as a maximum anymore + cluster.corr2[,max1[2]] <- -999 # set this column to negative values so it won't be found as a maximum anymore + max2 <- which(cluster.corr2 == max(cluster.corr2), arr.ind=T) + cluster.corr3 <- cluster.corr2 + cluster.corr3[max2[1],] <- -999 + cluster.corr3[,max2[2]] <- -999 + max3 <- which(cluster.corr3 == max(cluster.corr3), arr.ind=T) + cluster.corr4 <- cluster.corr3 + cluster.corr4[max3[1],] <- -999 + cluster.corr4[,max3[2]] <- -999 + max4 <- which(cluster.corr4 == max(cluster.corr4), arr.ind=T) + + seq.max1 <- c(max1[1],max2[1],max3[1],max4[1]) + seq.max2 <- c(max1[2],max2[2],max3[2],max4[2]) + + cluster1.regime <- seq.max2[which(seq.max1 == 1)] + cluster2.regime <- seq.max2[which(seq.max1 == 2)] + cluster3.regime <- seq.max2[which(seq.max1 == 3)] + cluster4.regime <- seq.max2[which(seq.max1 == 4)] + + cluster1.name <- regimes.obs[cluster1.regime] + cluster2.name <- regimes.obs[cluster2.regime] + cluster3.name <- regimes.obs[cluster3.regime] + cluster4.name <- regimes.obs[cluster4.regime] + + regimes.sim <- c(cluster1.name, cluster2.name, cluster3.name, cluster4.name) + cluster.corr2 <- array(cluster.corr, c(4,4), dimnames=list(regimes.sim, regimes.obs)) + + spat.cor1 <- cluster.corr[1,seq.max2[which(seq.max1 == 1)]] + spat.cor2 <- cluster.corr[2,seq.max2[which(seq.max1 == 2)]] + spat.cor3 <- cluster.corr[3,seq.max2[which(seq.max1 == 3)]] + spat.cor4 <- cluster.corr[4,seq.max2[which(seq.max1 == 4)]] + + # measure persistence for the reanalysis dataset: + my.cluster.vector <- my.cluster[[1]] + + sequ1 <- sequ2 <- sequ3 <- sequ4 <- rep(0,10000) + i1 <- i2 <- i3 <- i4 <- 1 + + if(my.cluster.vector[1] != 1) i1 <- 0 + if(my.cluster.vector[1] == 1) sequ1[i1] <- sequ1[i1]+1 + if(my.cluster.vector[1] != 2) i2 <- 0 + if(my.cluster.vector[1] == 2) sequ2[i2] <- sequ2[i2]+1 + if(my.cluster.vector[1] != 3) i3 <- 0 + if(my.cluster.vector[1] == 3) sequ3[i3] <- sequ3[i3]+1 + if(my.cluster.vector[1] != 4) i4 <- 0 + if(my.cluster.vector[1] == 4) sequ4[i4] <- sequ4[i4]+1 + + n.dd <- length(my.cluster.vector) + for(d in 1:(n.dd-1)){ + if(my.cluster.vector[d] != 1 && my.cluster.vector[d+1] == 1) i1 <- i1+1 + if(my.cluster.vector[d] == 1) sequ1[i1] <- sequ1[i1]+1 + + if(my.cluster.vector[d] != 2 && my.cluster.vector[d+1] == 2) i2 <- i2+1 + if(my.cluster.vector[d] == 2) sequ2[i2] <- sequ2[i2]+1 + + if(my.cluster.vector[d] != 3 && my.cluster.vector[d+1] == 3) i3 <- i3+1 + if(my.cluster.vector[d] == 3) sequ3[i3] <- sequ3[i3]+1 + + if(my.cluster.vector[d] != 4 && my.cluster.vector[d+1] == 4) i4 <- i4+1 + if(my.cluster.vector[d] == 4) sequ4[i4] <- sequ4[i4]+1 + } + + if(my.cluster.vector[n.dd] == 1) sequ1[i1] <- sequ1[i1]+1 + if(my.cluster.vector[n.dd] == 2) sequ2[i2] <- sequ2[i2]+1 + if(my.cluster.vector[n.dd] == 3) sequ3[i3] <- sequ3[i3]+1 + if(my.cluster.vector[n.dd] == 4) sequ4[i4] <- sequ4[i4]+1 + + persObs1 <- mean(sequ1[which(sequ1 != 0)]) + persObs2 <- mean(sequ2[which(sequ2 != 0)]) + persObs3 <- mean(sequ3[which(sequ3 != 0)]) + persObs4 <- mean(sequ4[which(sequ4 != 0)]) + + ### insert here all the manual corrections to the regime assignation due to misasignment in the automatic selection: + ### forecast month: September + #if(p == 9 && lead.month == 0) { cluster1.name <- "Blocking"; cluster2.name <- "Atl.Ridge"; cluster3.name <- "NAO-"; cluster4.name <- "NAO+"} + #if(p == 8 && lead.month == 1) { cluster1.name <- "NAO+"; cluster2.name <- "NAO-"; cluster3.name <- "Blocking"; cluster4.name <- "Atl.Ridge"} + #if(p == 6 && lead.month == 3) { cluster1.name <- "Atl.Ridge"; cluster2.name <- "NAO+"} + #if(p == 4 && lead.month == 5) { cluster1.name <- "Blocking"; cluster2.name <- "NAO+"; cluster3.name <- "Atl.Ridge"; cluster4.name <- "NAO-"} + + if(p == 9 && lead.month == 0) { cluster1.name <- "Blocking"; cluster2.name <- "Atl.Ridge"; cluster3.name <- "NAO-"; cluster4.name <- "NAO+" } + if(p == 8 && lead.month == 1) { cluster1.name <- "NAO+"; cluster2.name <- "NAO-"; cluster3.name <- "Blocking"; cluster4.name <- "Atl.Ridge" } + if(p == 7 && lead.month == 2) { cluster1.name <- "Atl.Ridge"; cluster2.name <- "Blocking"; cluster3.name <- "NAO-"; cluster4.name <- "NAO+" } + if(p == 6 && lead.month == 3) { cluster1.name <- "Atl.Ridge"; cluster2.name <- "NAO-"; cluster3.name <- "NAO+"; cluster4.name <- "Blocking" } + if(p == 5 && lead.month == 4) { cluster1.name <- "Atl.Ridge"; cluster2.name <- "NAO+"; cluster3.name <- "NAO-"; cluster4.name <- "Blocking" } + if(p == 4 && lead.month == 5) { cluster1.name <- "Blocking"; cluster2.name <- "NAO+"; cluster3.name <- "Atl.Ridge"; cluster4.name <- "NAO-" } + if(p == 3 && lead.month == 6) { cluster2.name <- "NAO-"; cluster3.name <- "Atl.Ridge"} # cluster1.name <- "Blocking"; cluster4.name <- "NAO+" + + # regimes.sim # for the debug + + ### forecast month: October + #if(p == 4 && lead.month == 6) { cluster1.name <- "Atl.Ridge"; cluster2.name <- "Blocking"; cluster3.name <- "NAO+"; cluster4.name <- "NAO-"} + #if(p == 5 && lead.month == 5) { cluster1.name <- "NAO+"; cluster2.name <- "Blocking"; cluster3.name <- "NAO-"; cluster4.name <- "Atl.Ridge"} + #if(p == 7 && lead.month == 3) { cluster1.name <- "NAO-"; cluster2.name <- "Atl.Ridge"; cluster3.name <- "Blocking"; cluster4.name <- "NAO+"} + #if(p == 8 && lead.month == 2) { cluster1.name <- "Blocking"; cluster2.name <- "NAO-"; cluster3.name <- "Atl.Ridge"; cluster4.name <- "NAO+"} + #if(p == 9 && lead.month == 1) { cluster1.name <- "NAO-"; cluster2.name <- "Blocking"; cluster3.name <- "Atl.Ridge"; cluster4.name <- "NAO+"} + + if(p == 10 && lead.month == 0) { cluster1.name <- "Blocking"; cluster2.name <- "NAO+"; cluster3.name <- "Atl.Ridge"; cluster4.name <- "NAO-"} + if(p == 9 && lead.month == 1) { cluster1.name <- "NAO-"; cluster2.name <- "Blocking"; cluster3.name <- "Atl.Ridge"; cluster4.name <- "NAO+"} + if(p == 8 && lead.month == 2) { cluster1.name <- "Blocking"; cluster2.name <- "NAO-"; cluster3.name <- "Atl.Ridge"; cluster4.name <- "NAO+"} + if(p == 7 && lead.month == 3) { cluster1.name <- "NAO-"; cluster2.name <- "Atl.Ridge"; cluster3.name <- "Blocking"; cluster4.name <- "NAO+"} + if(p == 6 && lead.month == 4) { cluster1.name <- "NAO+"; cluster2.name <- "Blocking"; cluster3.name <- "NAO-"; cluster4.name <- "Atl.Ridge"} + + ### forecast month: November + #if(p == 6 && lead.month == 5) { cluster2.name <- "Atl.Ridge"; cluster3.name <- "NAO+"; cluster4.name <- "Blocking"} + #if(p == 7 && lead.month == 4) { cluster1.name <- "NAO+"; cluster2.name <- "Blocking"; cluster4.name <- "Atl.Ridge"} + #if(p == 8 && lead.month == 3) { cluster2.name <- "Blocking"; cluster4.name <- "Atl.Ridge"} + #if(p == 9 && lead.month == 2) { cluster2.name <- "Atl.Ridge"; cluster3.name <- "NAO+"; cluster4.name <- "Blocking"} + #if(p == 10 && lead.month == 1) { cluster2.name <- "Blocking"; cluster4.name <- "Atl.Ridge"} + + regimes.sim2 <- c(cluster1.name, cluster2.name, cluster3.name, cluster4.name) # Simulated regimes after the manual correction + #regimes.obs <- c(cluster1.name, cluster2.name, cluster3.name, cluster4.name) # already defined above, included only as reference + + order.sim <- match(regimes.sim2, orden) + order.obs <- match(regimes.obs, orden) + + clusters.sim <- list(cluster1.sim, cluster2.sim, cluster3.sim, cluster4.sim) + clusters.obs <- list(pslwr1mean, pslwr2mean, pslwr3mean, pslwr4mean) + + # recompute the spatial correlations, because they might have changed because of the manual corrections above: + spat.cor1 <- cor(as.vector(clusters.sim[[which(order.sim == 1)]]), as.vector(clusters.obs[[which(order.obs == 1)]])) + spat.cor2 <- cor(as.vector(clusters.sim[[which(order.sim == 2)]]), as.vector(clusters.obs[[which(order.obs == 2)]])) + spat.cor3 <- cor(as.vector(clusters.sim[[which(order.sim == 3)]]), as.vector(clusters.obs[[which(order.obs == 3)]])) + spat.cor4 <- cor(as.vector(clusters.sim[[which(order.sim == 4)]]), as.vector(clusters.obs[[which(order.obs == 4)]])) + + if(composition == 'impact' && impact.data == TRUE) { + clusters.sim.imp <- list(cluster1.sim.imp, cluster2.sim.imp, cluster3.sim.imp, cluster4.sim.imp) + #clusters.sim.imp.pv <- list(cluster1.sim.imp.pv, cluster2.sim.imp.pv, cluster3.sim.imp.pv, cluster4.sim.imp.pv) + + clusters.obs.imp <- list(varwr1mean, varwr2mean, varwr3mean, varwr4mean) + #clusters.obs.imp.pv <- list(pvalue1, pvalue2, pvalue3, pvalue4) + + cor.imp1 <- cor(as.vector(clusters.sim.imp[[which(order.sim == 1)]]), as.vector(clusters.obs.imp[[which(order.obs == 1)]])) + cor.imp2 <- cor(as.vector(clusters.sim.imp[[which(order.sim == 2)]]), as.vector(clusters.obs.imp[[which(order.obs == 2)]])) + cor.imp3 <- cor(as.vector(clusters.sim.imp[[which(order.sim == 3)]]), as.vector(clusters.obs.imp[[which(order.obs == 3)]])) + cor.imp4 <- cor(as.vector(clusters.sim.imp[[which(order.sim == 4)]]), as.vector(clusters.obs.imp[[which(order.obs == 4)]])) + + } + + load(file=paste0(work.dir,"/",fields.name,"_",my.period[p],"_leadmonth_",lead.month,"_","psl",".RData")) # reload forecast cluster time series and mean psl data + load(file=paste0(work.dir,"/",fields.name,"_",my.period[p],"_leadmonth_",lead.month,"_",var.name[var.num],".RData")) # reload mean var data for impact maps + + if(var.num != var.num.orig) var.num <- var.num.orig # ripristinate original values of this script + if(fields.name != fields.name.orig) fields.name <- fields.name.orig # ripristinate original values of this script + if(!identical(period.orig, period)) period <- period.orig + if(p.orig != p) p <- p.orig + if(identical(rev(lat),lat.forecast)) lat <- rev(lat) # ripristinate right lat order + if(work.dir != work.dir.orig) work.dir <- work.dir.orig # ripristinate original values of this script + + } # close for on 'forecast.name' + + if(fields.name == rean.name || (fields.name == forecast.name && n.map == 7)){ + if(save.names){ + save(cluster1.name, cluster2.name, cluster3.name, cluster4.name, file=paste0(rean.dir,"/",fields.name,"_", my.period[p],"_","ClusterNames",".RData")) + + } else { # in this case, we load the cluster names from the file already saved, if regimes come from a reanalysis: + ClusterName.file <- paste0(rean.dir,"/",rean.name,"_", my.period[p],"_","ClusterNames",".RData") + if(!file.exists(ClusterName.file)) stop(paste0("file: ",ClusterName.file," missing")) # check if file exists or not + load(ClusterName.file) # load cluster names + + if(var.num != var.num.orig) var.num <- var.num.orig # ripristinate original values of this script + if(fields.name != fields.name.orig) fields.name <- fields.name.orig # ripristinate original values of this script + } + } + + # assign to each cluster its regime: + #if(ordering == FALSE && (fields.name == rean.name || (fields.name == forecast.name && n.map == 7))){ + if(ordering == FALSE){ + cluster1 <- 1; cluster2 <- 2; cluster3 <- 3; cluster4 <- 4 # same as: cluster1.name=orden[1], cluster2.name=orden[2], cluster3.name=orden[3], etc. + } else { + cluster1 <- which(orden == cluster1.name) + cluster2 <- which(orden == cluster2.name) + cluster3 <- which(orden == cluster3.name) + cluster4 <- which(orden == cluster4.name) + } + + assign(paste0("map",cluster1), pslwr1mean) + assign(paste0("map",cluster2), pslwr2mean) + assign(paste0("map",cluster3), pslwr3mean) + assign(paste0("map",cluster4), pslwr4mean) + + assign(paste0("fre",cluster1), wr1y) + assign(paste0("fre",cluster2), wr2y) + assign(paste0("fre",cluster3), wr3y) + assign(paste0("fre",cluster4), wr4y) + + #assign(paste0("withinss",cluster1), my.cluster[[p]]$withinss[1]/my.cluster[[p]]$size[1]) + #assign(paste0("withinss",cluster2), my.cluster[[p]]$withinss[2]/my.cluster[[p]]$size[2]) + #assign(paste0("withinss",cluster3), my.cluster[[p]]$withinss[3]/my.cluster[[p]]$size[3]) + #assign(paste0("withinss",cluster4), my.cluster[[p]]$withinss[4]/my.cluster[[p]]$size[4]) + + if(impact.data == TRUE && (composition == "simple" || composition == "single.impact" || composition == 'impact' || composition == 'edpr')){ + assign(paste0("imp",cluster1), varwr1mean) + assign(paste0("imp",cluster2), varwr2mean) + assign(paste0("imp",cluster3), varwr3mean) + assign(paste0("imp",cluster4), varwr4mean) + + assign(paste0("sig",cluster1), pvalue1) + assign(paste0("sig",cluster2), pvalue2) + assign(paste0("sig",cluster3), pvalue3) + assign(paste0("sig",cluster4), pvalue4) + + if(fields.name == rean.name) { + assign(paste0("impRel",cluster1), varwr1meanRel) + assign(paste0("impRel",cluster2), varwr2meanRel) + assign(paste0("impRel",cluster3), varwr3meanRel) + assign(paste0("impRel",cluster4), varwr4meanRel) + } + + } + + if(fields.name == forecast.name && n.map < 7){ + assign(paste0("freMax",cluster1), wr1yMax) + assign(paste0("freMax",cluster2), wr2yMax) + assign(paste0("freMax",cluster3), wr3yMax) + assign(paste0("freMax",cluster4), wr4yMax) + + assign(paste0("freMin",cluster1), wr1yMin) + assign(paste0("freMin",cluster2), wr2yMin) + assign(paste0("freMin",cluster3), wr3yMin) + assign(paste0("freMin",cluster4), wr4yMin) + + #assign(paste0("spatial.cor",cluster1), spat.cor1) + #assign(paste0("spatial.cor",cluster2), spat.cor2) + #assign(paste0("spatial.cor",cluster3), spat.cor3) + #assign(paste0("spatial.cor",cluster4), spat.cor4) + + assign(paste0("persist",cluster1), pers1) + assign(paste0("persist",cluster2), pers2) + assign(paste0("persist",cluster3), pers3) + assign(paste0("persist",cluster4), pers4) + + clusters.all <- c(cluster1, cluster2, cluster3, cluster4) + ordered.sequence <- c(which(clusters.all == 1), which(clusters.all == 2), which(clusters.all == 3), which(clusters.all == 4)) + + assign(paste0("transSim",cluster1), unname(trans.sim[1,ordered.sequence])) + assign(paste0("transSim",cluster2), unname(trans.sim[2,ordered.sequence])) + assign(paste0("transSim",cluster3), unname(trans.sim[3,ordered.sequence])) + assign(paste0("transSim",cluster4), unname(trans.sim[4,ordered.sequence])) + + cluster1.obs <- which(orden == regimes.obs[1]) + cluster2.obs <- which(orden == regimes.obs[2]) + cluster3.obs <- which(orden == regimes.obs[3]) + cluster4.obs <- which(orden == regimes.obs[4]) + + clusters.all.obs <- c(cluster1.obs, cluster2.obs, cluster3.obs, cluster4.obs) + ordered.sequence.obs <- c(which(clusters.all.obs == 1), which(clusters.all.obs == 2), which(clusters.all.obs == 3), which(clusters.all.obs == 4)) + + assign(paste0("freObs",cluster1.obs), wr1y.obs) + assign(paste0("freObs",cluster2.obs), wr2y.obs) + assign(paste0("freObs",cluster3.obs), wr3y.obs) + assign(paste0("freObs",cluster4.obs), wr4y.obs) + + assign(paste0("persistObs",cluster1.obs), persObs1) + assign(paste0("persistObs",cluster2.obs), persObs2) + assign(paste0("persistObs",cluster3.obs), persObs3) + assign(paste0("persistObs",cluster4.obs), persObs4) + + assign(paste0("transObs",cluster1.obs), unname(trans.obs[1,ordered.sequence.obs])) + assign(paste0("transObs",cluster2.obs), unname(trans.obs[2,ordered.sequence.obs])) + assign(paste0("transObs",cluster3.obs), unname(trans.obs[3,ordered.sequence.obs])) + assign(paste0("transObs",cluster4.obs), unname(trans.obs[4,ordered.sequence.obs])) + + } + + # position of long values of Europe only (without the Atlantic Sea and America): + #if(fields.name == "ERA-Interim") EU <- c(1:which(lon >= lon.max)[1], (length(lon)-30):length(lon)) # restrict area to continental europe only + EU <- c(1:which(lon >= lon.max)[1], (which(lon >= 337)[1]:length(lon))) # restrict area to continental europe only + + # breaks and colors of the geopotential fields: + if(psl == "g500"){ + #my.brks <- c(-300,-199:200,300) # % Mean anomaly of a WR + my.brks <- c(-300,seq(-200,200,10),300) # % Mean anomaly of a WR + my.brks2 <- c(-300,seq(-200,200,10),300) # % Mean anomaly of a WR for the contour lines + #my.cols <- colorRampPalette((brewer.pal(9,"PuBu")))(length(my.brks)-1) + values.to.plot <- c(-200, -100, 0, 100, 200) # values we want to show in the labels + } + + if(psl == "psl"){ + my.brks <- c(seq(-21,-1,2),0,seq(1,21,2)) # % Mean anomaly of a WR + # my.brks <- c(seq(-19,-1,2),0,seq(1,19,2)) # % Mean anomaly of a WR + + my.brks2 <- my.brks #c(seq(-20,20,2)) # % Mean anomaly of a WR for the contour lines + values.to.plot <- my.brks #c(-17,-15,-13,-11,-9,-7,-5,-3,-1,0,1,3,5,7,9,11,13,15,17) + } + + my.cols <- colorRampPalette(rev(brewer.pal(11,"RdBu")))(length(my.brks)-1) # blue--red colors + my.cols[floor(length(my.cols)/2)] <- my.cols[floor(length(my.cols)/2)+1] <- "white" # add white in the middle + + # breaks and colors of the impact maps (valid both for Wind speed anomalies and Temperature anomalies): + #my.brks.var <- c(-20,seq(-10,-3,1),seq(-2.9,2.9,0.1),seq(3,10,1),20) # % Mean anomaly of a WR + #my.brks.var <- c(-20,-2,-1.5,-1,-0.5,-0.2,0.2,0.5,1,1.5,2,20) # % Mean anomaly of a WR + #my.brks.var <- c(-20,seq(-3,3,0.5),20) # % Mean anomaly of a WR + if(var.name[var.num] == "sfcWind") { + my.brks.var <- seq(-3,3,0.5) + values.to.plot2 <- c(-3,-2,-1,0,1,2,3) + } + if(var.name[var.num] == "tas") { + my.brks.var <- seq(-5,5,1) + values.to.plot2 <- my.brks.var #c(-5,-3,-1,0,1,3,5) + } + + #my.cols <- colorRampPalette(as.character(read.csv("/home/Earth/vtorralb/rgbhex.csv",header=F)[,1]))(length(my.brks)-1) # blue--yellow-red colors + my.cols.var <- colorRampPalette(rev(brewer.pal(11,"RdBu")))(length(my.brks.var)-1) # blue--white--red colors + #my.cols.var[floor(length(my.cols.var)/2)] <- my.cols.var[floor(length(my.cols.var)/2)+1] <- "white" + + freq.max=100 + if(fields.name == forecast.name){ + pos.years.present <- match(year.start:year.end, years) + years.missing <- which(is.na(pos.years.present)) + fre1.NA <- fre2.NA <- fre3.NA <- fre4.NA <- freMax1.NA <- freMax2.NA <- freMax3.NA <- freMax4.NA <- freMin1.NA <- freMin2.NA <- freMin3.NA <- freMin4.NA <- c() + k <- 0 + for(y in year.start:year.end) { + if(y %in% (years.missing + year.start - 1)) { + fre1.NA <- c(fre1.NA,NA); fre2.NA <- c(fre2.NA,NA); fre3.NA <- c(fre3.NA,NA); fre4.NA <- c(fre4.NA,NA) + freMax1.NA <- c(freMax1.NA,NA); freMax2.NA <- c(freMax2.NA,NA); freMax3.NA <- c(freMax3.NA,NA); freMax4.NA <- c(freMax4.NA,NA) + freMin1.NA <- c(freMin1.NA,NA); freMin2.NA <- c(freMin2.NA,NA); freMin3.NA <- c(freMin3.NA,NA); freMin4.NA <- c(freMin4.NA,NA) + k=k+1 + } else { + fre1.NA <- c(fre1.NA,fre1[y - year.start + 1 - k]); fre2.NA <- c(fre2.NA,fre2[y - year.start + 1 - k]) + fre3.NA <- c(fre3.NA,fre3[y - year.start + 1 - k]); fre4.NA <- c(fre4.NA,fre4[y - year.start + 1 - k]) + freMax1.NA <- c(freMax1.NA,freMax1[y - year.start + 1 - k]); freMax2.NA <- c(freMax2.NA,freMax2[y - year.start + 1 - k]) + freMax3.NA <- c(freMax3.NA,freMax3[y - year.start + 1 - k]); freMax4.NA <- c(freMax4.NA,freMax4[y - year.start + 1 - k]) + freMin1.NA <- c(freMin1.NA,freMin1[y - year.start + 1 - k]); freMin2.NA <- c(freMin2.NA,freMin2[y - year.start + 1 - k]) + freMin3.NA <- c(freMin3.NA,freMin3[y - year.start + 1 - k]); freMin4.NA <- c(freMin4.NA,freMin4[y - year.start + 1 - k]) + } + } + + sp.cor1 <- round(cor(fre1.NA,freObs1, use="complete.obs"),2) + sp.cor2 <- round(cor(fre2.NA,freObs2, use="complete.obs"),2) + sp.cor3 <- round(cor(fre3.NA,freObs3, use="complete.obs"),2) + sp.cor4 <- round(cor(fre4.NA,freObs4, use="complete.obs"),2) + + } else { + fre1.NA <- fre1; fre2.NA <- fre2; fre3.NA <- fre3; fre4.NA <- fre4 + } + + + # Visualize the composition with the 3 graphs for all the 4 regimes: its pressure fields, its impact on var and its frequency series: + if(composition == "simple"){ + + if(!as.pdf && fields.name == rean.name) png(filename=paste0(rean.dir,"/",fields.name,"_",my.period[p],"_",var.name[var.num],".png"),width=3000,height=3700) + if(!as.pdf && fields.name == forecast.name) png(filename=paste0(work.dir,"/",fields.name,"_",my.period[p],"_leadmonth_",lead.month,"_",var.name[var.num],".png"),width=3000,height=3700) + + plot.new() + + # Sheet title: + par(fig=c(0, 1, 0.94, 0.98), new=TRUE) + if(fields.name == forecast.name) {forecast.title <- paste0(" startdate and ",lead.month," forecast month ")} else {forecast.title <- ""} + mtext(paste0(fields.name, " Weather Regimes for ", my.period[p], forecast.title," (",year.start,"-",year.end,")"), cex=5.5, font=2) + + # Centroid maps: + map.xpos <- 0 + map.width <- 0.46 + par(fig=c(map.xpos + 0.003, map.xpos + map.width - 0.003, 0.745, 0.925), new=TRUE) + PlotEquiMap2(rescale(map1,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map1), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, xlabel.dist=1.5, contours.lty="F1FF1F", continents.col="black") + par(fig=c(map.xpos, map.xpos + map.width, 0.51, 0.69), new=TRUE) + PlotEquiMap2(rescale(map2,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map2), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F", continents.col="black") + par(fig=c(map.xpos, map.xpos + map.width, 0.275, 0.455), new=TRUE) + PlotEquiMap2(rescale(map3,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map3), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F", continents.col="black") + par(fig=c(map.xpos, map.xpos + map.width, 0.04, 0.22), new=TRUE) + PlotEquiMap2(rescale(map4,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map4), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F", continents.col="black") + + ## # for the debug: + ## obs <- read.table("/scratch/Earth/ncortesi/RESILIENCE/Regimes/NAO_series.txt") + ## mes <- p + ## fre.obs <- obs$V3[seq(mes,12*35,12)] # get the ovserved freuency of the NAO + ## #plot(1981:2015,fre.obs,type="l") + ## #lines(1981:2015,fre1,type="l",col="red") + ## #lines(1981:2015,fre2,type="l",col="blue") + ## #lines(1981:2015,fre4,type="l",col="green") + ## cor1 <- cor(fre.obs, fre1) + ## cor2 <- cor(fre.obs, fre2) + ## cor3 <- cor(fre.obs, fre3) + ## cor4 <- cor(fre.obs, fre4) + ## round(c(cor1,cor2,cor3,cor4),2) + + # Legend centroid maps: + legend1.xpos <- 0.10 + legend1.width <- 0.30 + legend1.cex <- 2 + my.subset <- match(values.to.plot, my.brks) + par(fig=c(legend1.xpos, legend1.xpos + legend1.width, 0.719, 0.744), new=TRUE) # syntax fig=c(xmin, xmax, ymin, ymax) + ColorBar3(my.brks, cols=my.cols, vert=FALSE, cex=legend1.cex, subset=my.subset) + if(psl=="g500") {mtext(side=4," m", cex=2.5, las=1)} else {mtext(side=4," hPa", cex=legend1.cex, las=1)} + par(fig=c(legend1.xpos, legend1.xpos + legend1.width, 0.485, 0.508), new=TRUE) + ColorBar3(my.brks, cols=my.cols, vert=FALSE, cex=legend1.cex, subset=my.subset) + if(psl=="g500") {mtext(side=4," m", cex=2.5, las=1)} else {mtext(side=4," hPa", cex=legend1.cex, las=1)} + par(fig=c(legend1.xpos, legend1.xpos + legend1.width, 0.250, 0.273), new=TRUE) + ColorBar3(my.brks, cols=my.cols, vert=FALSE, cex=legend1.cex, subset=my.subset) + if(psl=="g500") {mtext(side=4," m", cex=2.5, las=1)} else {mtext(side=4," hPa", cex=legend1.cex, las=1)} + par(fig=c(legend1.xpos, legend1.xpos + legend1.width, 0.015, 0.038), new=TRUE) + ColorBar3(my.brks, cols=my.cols, vert=FALSE, cex=legend1.cex, subset=my.subset) + if(psl=="g500") {mtext(side=4," m", cex=2.5, las=1)} else {mtext(side=4," hPa", cex=legend1.cex, las=1)} + + # Subtitle centroid maps: + map.title.xpos <- 0.76 + map.title.width <- 0.2 + par(fig=c(map.title.xpos, map.title.xpos + map.title.width, 0.728, 0.729), new=TRUE) + mtext("year", cex=3) + par(fig=c(map.title.xpos, map.title.xpos + map.title.width, 0.494, 0.495), new=TRUE) + mtext("year", cex=3) + par(fig=c(map.title.xpos, map.title.xpos + map.title.width, 0.260, 0.261), new=TRUE) + mtext("year", cex=3) + par(fig=c(map.title.xpos, map.title.xpos + map.title.width, 0.026, 0.027), new=TRUE) + mtext("year", cex=3) + + # Title Centroid Maps: + title1.xpos <- 0.02 + title1.width <- 0.4 + par(fig=c(title1.xpos, title1.xpos + title1.width, 0.9305, 0.9355), new=TRUE) + mtext(paste0(regime.title[1],": ", psl.name, " Anomaly"), font=2, cex=4) + par(fig=c(title1.xpos, title1.xpos + title1.width, 0.6955, 0.7005), new=TRUE) + mtext(paste0(regime.title[2],": ", psl.name, " Anomaly"), font=2, cex=4) + par(fig=c(title1.xpos, title1.xpos + title1.width, 0.4605, 0.4655), new=TRUE) + mtext(paste0(regime.title[3],": ", psl.name, " Anomaly"), font=2, cex=4) + par(fig=c(title1.xpos, title1.xpos + title1.width, 0.2255, 0.2305), new=TRUE) + mtext(paste0(regime.title[4],": ", psl.name, " Anomaly"), font=2, cex=4) + + # Impact maps: + if(impact.data == TRUE ){ + impact.xpos <- 0.47 + impact.width <- 0.26 + par(fig=c(impact.xpos, impact.xpos + impact.width, 0.745, 0.925), new=TRUE) + PlotEquiMap2(rescale(imp1[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=t(sig1[,EU] < 0.05), cex.lab=1.5, continents.col="black") + par(fig=c(impact.xpos, impact.xpos + impact.width, 0.51, 0.69), new=TRUE) + PlotEquiMap2(rescale(imp2[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=t(sig2[,EU] < 0.05), cex.lab=1.5, continents.col="black") + par(fig=c(impact.xpos, impact.xpos + impact.width, 0.275, 0.455), new=TRUE) + PlotEquiMap2(rescale(imp3[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=t(sig3[,EU] < 0.05), cex.lab=1.5, continents.col="black") + par(fig=c(impact.xpos, impact.xpos + impact.width, 0.04, 0.22), new=TRUE) + PlotEquiMap2(rescale(imp4[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=t(sig4[,EU] < 0.05), cex.lab=1.5, continents.col="black") + + + # Title impact maps: + title2.xpos <- 0.42 + title2.width <- 0.35 + par(fig=c(title2.xpos, title2.xpos + title2.width, 0.928, 0.933), new=TRUE) + mtext(paste0(regime.title[1], ": Impact on ", var.name.full[var.num]), font=2, cex=4) + par(fig=c(title2.xpos, title2.xpos + title2.width, 0.693, 0.698), new=TRUE) + mtext(paste0(regime.title[2], ": Impact on ", var.name.full[var.num]), font=2, cex=4) + par(fig=c(title2.xpos, title2.xpos + title2.width, 0.458, 0.463), new=TRUE) + mtext(paste0(regime.title[3], ": Impact on ", var.name.full[var.num]), font=2, cex=4) + par(fig=c(title2.xpos, title2.xpos + title2.width, 0.223, 0.227), new=TRUE) + mtext(paste0(regime.title[4], ": Impact on ", var.name.full[var.num]), font=2, cex=4) + + # Legend: + legend2.xpos <- 0.48 + legend2.width <- 0.25 + legend2.cex <- 1.8 + + my.subset2 <- match(values.to.plot2, my.brks.var) + par(fig=c(legend2.xpos, legend2.xpos + legend2.width, 0.719 + 0.001, 0.744), new=TRUE) + ColorBar3(my.brks.var, cols=my.cols.var, vert=FALSE, cex=legend2.cex, subset=my.subset2) + mtext(side=4,paste0(" ",var.unit[var.num]), cex=legend2.cex) + par(fig=c(legend2.xpos, legend2.xpos + legend2.width, 0.485, 0.508), new=TRUE) + ColorBar3(my.brks.var, cols=my.cols.var, vert=FALSE, cex=legend2.cex, subset=my.subset2) + mtext(side=4,paste0(" ",var.unit[var.num]), cex=legend2.cex) + par(fig=c(legend2.xpos, legend2.xpos + legend2.width, 0.250, 0.273), new=TRUE) + ColorBar3(my.brks.var, cols=my.cols.var, vert=FALSE, cex=legend2.cex, subset=my.subset2) + mtext(side=4,paste0(" ",var.unit[var.num]), cex=legend2.cex) + par(fig=c(legend2.xpos, legend2.xpos + legend2.width, 0.015, 0.038), new=TRUE) + ColorBar3(my.brks.var, cols=my.cols.var, vert=FALSE, cex=legend2.cex, subset=my.subset2) + mtext(side=4,paste0(" ",var.unit[var.num]), cex=legend2.cex) + + } + + # Frequency plots: + barplot.xpos <- 0.75 + barplot.width <- 0.25 + + par(fig=c(barplot.xpos, barplot.xpos + barplot.width, 0.745, 0.915), new=TRUE) + if(fields.name == rean.name) barplot.freq(100*fre1.NA, year.start, year.end, cex.y=2, cex.x=2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0)) + if(fields.name == forecast.name) barplot.freq.sim2(100*fre1.NA, 100*freMax1.NA, 100*freMin1.NA, 100*freObs1, year.start, year.end, cex.y=2, cex.x=2, freq.min=-2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0), col.line="gray20", cex.r=3, cex.mean=2, cex.obs=2) + + par(fig=c(barplot.xpos, barplot.xpos + barplot.width,0.510,0.680), new=TRUE) + if(fields.name == rean.name) barplot.freq(100*fre2.NA, year.start, year.end, cex.y=2, cex.x=2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0)) + if(fields.name == forecast.name) barplot.freq.sim2(100*fre2.NA, 100*freMax2.NA, 100*freMin2.NA, 100*freObs2, year.start, year.end, cex.y=2, cex.x=2, freq.min=-2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0), col.line="gray20", cex.r=3, cex.mean=2, cex.obs=2) + + par(fig=c(barplot.xpos, barplot.xpos + barplot.width,0.275,0.445), new=TRUE) + if(fields.name == rean.name) barplot.freq(100*fre3.NA, year.start, year.end, cex.y=2, cex.x=2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0)) + if(fields.name == forecast.name) barplot.freq.sim2(100*fre3.NA, 100*freMax3.NA, 100*freMin3.NA, 100*freObs3, year.start, year.end, cex.y=2, cex.x=2, freq.min=-2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0), col.line="gray20", cex.r=3, cex.mean=2, cex.obs=2) + + par(fig=c(barplot.xpos, barplot.xpos + barplot.width,0.040,0.210), new=TRUE) + if(fields.name == rean.name) barplot.freq(100*fre4.NA, year.start, year.end, cex.y=2, cex.x=2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0)) + if(fields.name == forecast.name) barplot.freq.sim2(100*fre4.NA, 100*freMax4.NA, 100*freMin4.NA, 100*freObs4, year.start, year.end, cex.y=2, cex.x=2, freq.min=-2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0), col.line="gray20", cex.r=3, cex.mean=2, cex.obs=2) + + # Title Frequency maps: + title3.xpos <- 0.75 + title3.width <-0.25 + par(fig=c(title3.xpos, title3.xpos + title3.width, 0.928, 0.933), new=TRUE) + mtext(paste0(regime.title[1], " Frequency"), font=2, cex=4) # paste0 doesn't work inside an expression! + par(fig=c(title3.xpos, title3.xpos + title3.width, 0.693, 0.698), new=TRUE) + mtext(paste0(regime.title[2], " Frequency"), font=2, cex=4) + par(fig=c(title3.xpos, title3.xpos + title3.width, 0.458, 0.463), new=TRUE) + mtext(paste0(regime.title[3], " Frequency"), font=2, cex=4) + par(fig=c(title3.xpos, title3.xpos + title3.width, 0.223, 0.228), new=TRUE) + mtext(paste0(regime.title[4], " Frequency"), font=2, cex=4) + + # % on Frequency plots: + symbol.xpos <- 0.735 + symbol.width <- 0.01 + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.92, 0.93), new=TRUE) + mtext("%", cex=3.3) + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.59, 0.70), new=TRUE) + mtext("%", cex=3.3) + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.45, 0.46), new=TRUE) + mtext("%", cex=3.3) + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.22, 0.23), new=TRUE) + mtext("%", cex=3.3) + + # mean frequency on Frequency plots: + if(mean.freq == TRUE){ + symbol.xpos <- 0.9 + symbol.width <- 0.1 + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.908, 0.918), new=TRUE) + mtext(bquote(bar(nu) == .(paste0(round(mean(100*fre1.NA),1),"%"))),cex=4.5) + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.673, 0.683), new=TRUE) + mtext(bquote(bar(nu) == .(paste0(round(mean(100*fre2.NA),1),"%"))),cex=4.5) + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.44, 0.45), new=TRUE) + mtext(bquote(bar(nu) == .(paste0(round(mean(100*fre3.NA),1),"%"))),cex=4.5) + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.203, 0.213), new=TRUE) + mtext(bquote(bar(nu) == .(paste0(round(mean(100*fre4.NA),1),"%"))),cex=4.5) + } + + # add corr between obs.time series and ensemble mean time series on Frequency plots: + if(fields.name == forecast.name){ + symbol.xpos <- 0.76 + symbol.width <- 0.1 + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.908, 0.918), new=TRUE) + sp.cor1 <- round(cor(fre1.NA,freObs1, use="complete.obs"),2) + mtext(paste0("r= ",sp.cor1), cex=4.5) + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.673, 0.683), new=TRUE) + sp.cor2 <- round(cor(fre2.NA,freObs2, use="complete.obs"),2) + mtext(paste0("r= ",sp.cor2), cex=4.5) + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.44, 0.45), new=TRUE) + sp.cor3 <- round(cor(fre3.NA,freObs3, use="complete.obs"),2) + mtext(paste0("r= ",sp.cor3), cex=4.5) + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.203, 0.213), new=TRUE) + sp.cor4 <- round(cor(fre4.NA,freObs4, use="complete.obs"),2) + mtext(paste0("r= ",sp.cor4), cex=4.5) + } + + if(!as.pdf) dev.off() # for saving 4 png + + } # close if on: composition == 'simple' + + + if(composition == "edpr"){ + + ## adjust color legends to include triangles to the extremities increasing by two the number of intervals: + my.brks.var <- c(-20,seq(-0.6,0.6,0.1),20) + my.cols.var <- colorRampPalette(rev(brewer.pal(11,"RdBu")))(length(my.brks.var)-1) # blue--red colors + + ## same but for SLP: + my.brks <- c(-100,seq(-21,-1,2),0,seq(1,21,2),100) + my.cols <- colorRampPalette(rev(brewer.pal(11,"RdBu")))(length(my.brks)-1) # blue--red colors + my.cols[floor(length(my.cols)/2)] <- my.cols[floor(length(my.cols)/2)+1] <- "white" ## add white in the middle + + fileoutput <- paste0(rean.dir,"/",fields.name,"_",my.period[p],"_",var.name[var.num],"_edpr.png") + + png(filename=fileoutput,width=3000,height=3700) + + plot.new() + + y1 <- 0.10 + y3 <- 0.315 + y5 <- 0.53 + y7 <- 0.745 + y.width <- 0.18 + + y2 <- y1 + y.width; y4 <- y3 + y.width; y6 <- y5 + y.width; y8 <- y7 + y.width + yt1 <- y2+0.003; yt3 <- y4+0.003; yt5 <- y6+0.003; yt7 <- y8+0.003 + yt2 <- yt1 + 0.004; yt4 <- yt3 + 0.005; yt6 <- yt5 + 0.005; yt8 <- yt7 + 0.005 + + ## Centroid maps: + map.xpos <- 0.27 + map.width <- 0.46 + par(fig=c(map.xpos + 0.003, map.xpos + map.width - 0.003, y7, y8), new=TRUE) + PlotEquiMap2(map1, lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map1), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, xlabel.dist=1.5, contours.lty="F1FF1F", continents.col="black") + par(fig=c(map.xpos, map.xpos + map.width, y5, y6), new=TRUE) + PlotEquiMap2(map2, lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map2), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F", continents.col="black") + par(fig=c(map.xpos, map.xpos + map.width, y3, y4), new=TRUE) + PlotEquiMap2(map3, lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map3), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F", continents.col="black") + par(fig=c(map.xpos, map.xpos + map.width, y1, y2), new=TRUE) + PlotEquiMap2(map4, lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map4), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F", continents.col="black") + + # Legend centroid maps: + legend1.xpos <- 0.30 + legend1.width <- 0.40 + legend1.cex <- 3 + my.subset <- match(values.to.plot, my.brks) + par(fig=c(legend1.xpos, legend1.xpos + legend1.width, 0.045, 0.085), new=TRUE) + ##ColorBar3(my.brks, cols=my.cols[2:(length(my.cols)-1)], vert=FALSE, cex=legend1.cex, subset=my.subset) + ColorBar(my.brks[2:(l(my.brks)-1)], cols=my.cols[2:(length(my.cols)-1)], vert=FALSE, label_scale=legend1.cex, bar_limits=c(my.brks[2], my.brks[l(my.brks)-1]), col_inf=my.cols[1], col_sup=my.cols[l(my.cols)], subsample=1) + + if(psl=="g500") {mtext(side=4," m", cex=2.5, las=1)} else {mtext(side=4," hPa", cex=legend1.cex, las=1)} ## las=1 is to display in horizontal instead of vert + + # Title Centroid Maps: + title1.xpos <- 0.3 + title1.width <- 0.44 + par(fig=c(title1.xpos, title1.xpos + title1.width, yt7+0.0025, yt7+0.0075), new=TRUE) + mtext(paste0(regime.title[1]," ", psl.name, " anomaly"), font=2, cex=4) + par(fig=c(title1.xpos, title1.xpos + title1.width, yt5+0.0025, yt5+0.0075), new=TRUE) + mtext(paste0(regime.title[2]," ", psl.name, " anomaly"), font=2, cex=4) + par(fig=c(title1.xpos, title1.xpos + title1.width, yt3+0.0025, yt3+0.0075), new=TRUE) + mtext(paste0(regime.title[3]," ", psl.name, " anomaly"), font=2, cex=4) + par(fig=c(title1.xpos, title1.xpos + title1.width, yt1+0.0025, yt1+0.0075), new=TRUE) + mtext(paste0(regime.title[4]," ", psl.name, " anomaly"), font=2, cex=4) + + # Impact maps: + if(impact.data == TRUE ){ + impact.xpos <- 0 + impact.width <- 0.26 + par(fig=c(impact.xpos, impact.xpos + impact.width, y7, y8), new=TRUE) + PlotEquiMap2(impRel1[,EU], lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=t(sig1[,EU] < 0.05), cex.lab=1.5, continents.col="black") + par(fig=c(impact.xpos, impact.xpos + impact.width, y5, y6), new=TRUE) + PlotEquiMap2(impRel2[,EU], lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=t(sig2[,EU] < 0.05), cex.lab=1.5, continents.col="black") + par(fig=c(impact.xpos, impact.xpos + impact.width, y3, y4), new=TRUE) + PlotEquiMap2(impRel3[,EU], lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=t(sig3[,EU] < 0.05), cex.lab=1.5, continents.col="black") + par(fig=c(impact.xpos, impact.xpos + impact.width, y1, y2), new=TRUE) + PlotEquiMap2(impRel4[,EU], lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=t(sig4[,EU] < 0.05), cex.lab=1.5, continents.col="black") + + + # Title impact maps: + title2.xpos <- 0 + title2.width <- 0.26 + par(fig=c(title2.xpos, title2.xpos + title2.width, yt7, yt8), new=TRUE) + mtext(paste0(regime.title[1], " impact on ", var.name.full[var.num]), font=2, cex=4) + par(fig=c(title2.xpos, title2.xpos + title2.width, yt5, yt6), new=TRUE) + mtext(paste0(regime.title[2], " impact on ", var.name.full[var.num]), font=2, cex=4) + par(fig=c(title2.xpos, title2.xpos + title2.width, yt3, yt4), new=TRUE) + mtext(paste0(regime.title[3], " impact on ", var.name.full[var.num]), font=2, cex=4) + par(fig=c(title2.xpos, title2.xpos + title2.width, yt1, yt2), new=TRUE) + mtext(paste0(regime.title[4], " impact on ", var.name.full[var.num]), font=2, cex=4) + + # Legend: + legend2.xpos <- 0.01 + legend2.width <- 0.25 + legend2.cex <- 3 + + my.subset2 <- match(values.to.plot2, my.brks.var) + par(fig=c(legend2.xpos, legend2.xpos + legend2.width, 0.045, 0.085), new=TRUE) + + ColorBar(brks=round(100*my.brks.var[2:(l(my.brks.var)-1)],0), cols=my.cols.var[2:(length(my.cols.var)-1)], vert=FALSE, label_scale=3, bar_limits=c(100*my.brks.var[2],100*my.brks.var[l(my.brks.var)-1]), col_inf=my.cols.var[1], col_sup=my.cols.var[l(my.cols.var)], subsample=2) + ##mtext(side=4,paste0(" ",var.unit[var.num]), cex=legend2.cex, las=1) + mtext(side=4,"%", cex=legend2.cex, las=1) + + } + + # Frequency plots: + barplot.xpos <- 0.75 + barplot.width <- 0.25 + + par(fig=c(barplot.xpos, barplot.xpos + barplot.width, y7, y8-0.01), new=TRUE) + if(fields.name == rean.name) barplot.freq(100*fre1.NA, year.start, year.end, cex.y=2, cex.x=2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0)) + if(fields.name == forecast.name) barplot.freq.sim2(100*fre1.NA, 100*freMax1.NA, 100*freMin1.NA, 100*freObs1, year.start, year.end, cex.y=2, cex.x=2, freq.min=-2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0), col.line="gray20", cex.r=3, cex.mean=2, cex.obs=2) + + par(fig=c(barplot.xpos, barplot.xpos + barplot.width, y5, y6-0.01), new=TRUE) + if(fields.name == rean.name) barplot.freq(100*fre2.NA, year.start, year.end, cex.y=2, cex.x=2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0)) + if(fields.name == forecast.name) barplot.freq.sim2(100*fre2.NA, 100*freMax2.NA, 100*freMin2.NA, 100*freObs2, year.start, year.end, cex.y=2, cex.x=2, freq.min=-2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0), col.line="gray20", cex.r=3, cex.mean=2, cex.obs=2) + + par(fig=c(barplot.xpos, barplot.xpos + barplot.width, y3, y4-0.01), new=TRUE) + if(fields.name == rean.name) barplot.freq(100*fre3.NA, year.start, year.end, cex.y=2, cex.x=2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0)) + if(fields.name == forecast.name) barplot.freq.sim2(100*fre3.NA, 100*freMax3.NA, 100*freMin3.NA, 100*freObs3, year.start, year.end, cex.y=2, cex.x=2, freq.min=-2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0), col.line="gray20", cex.r=3, cex.mean=2, cex.obs=2) + + par(fig=c(barplot.xpos, barplot.xpos + barplot.width, y1, y2-0.01), new=TRUE) + if(fields.name == rean.name) barplot.freq(100*fre4.NA, year.start, year.end, cex.y=2, cex.x=2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0)) + if(fields.name == forecast.name) barplot.freq.sim2(100*fre4.NA, 100*freMax4.NA, 100*freMin4.NA, 100*freObs4, year.start, year.end, cex.y=2, cex.x=2, freq.min=-2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0), col.line="gray20", cex.r=3, cex.mean=2, cex.obs=2) + + # Title Frequency maps: + title3.xpos <- 0.75 + title3.width <-0.25 + par(fig=c(title3.xpos, title3.xpos + title3.width, yt7, yt8), new=TRUE) + mtext(paste0(regime.title[1], " Frequency"), font=2, cex=4) # paste0 doesn't work inside an expression! + par(fig=c(title3.xpos, title3.xpos + title3.width, yt5, yt6), new=TRUE) + mtext(paste0(regime.title[2], " Frequency"), font=2, cex=4) + par(fig=c(title3.xpos, title3.xpos + title3.width, yt3, yt4), new=TRUE) + mtext(paste0(regime.title[3], " Frequency"), font=2, cex=4) + par(fig=c(title3.xpos, title3.xpos + title3.width, yt1, yt2), new=TRUE) + mtext(paste0(regime.title[4], " Frequency"), font=2, cex=4) + + ## % on Frequency plots: + symbol.xpos <- 0.735 + symbol.width <- 0.01 + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, y4+0.425, y4+0.425+0.01), new=TRUE) + mtext("%", cex=3.3) + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, y3+0.39, y3+0.39+0.01), new=TRUE) + mtext("%", cex=3.3) + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, y2+0.21, y2+0.21+0.01), new=TRUE) + mtext("%", cex=3.3) + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, y1+0.17, y1+0.17+0.01), new=TRUE) + mtext("%", cex=3.3) + + ## mean frequency on Frequency plots: + if(mean.freq == TRUE){ + symbol.xpos <- 0.83 + symbol.width <- 0.1 + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, y7+0.163, y7+0.163+0.01), new=TRUE) + mtext(bquote(bar(nu) == .(paste0(round(mean(100*fre1.NA),1),"%"))),cex=3.5) + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, y5+0.163, y5+0.163+0.01), new=TRUE) + mtext(bquote(bar(nu) == .(paste0(round(mean(100*fre2.NA),1),"%"))),cex=3.5) + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, y3+0.165, y3+0.165+0.01), new=TRUE) + mtext(bquote(bar(nu) == .(paste0(round(mean(100*fre3.NA),1),"%"))),cex=3.5) + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, y1+0.163, y1+0.163+0.01), new=TRUE) + mtext(bquote(bar(nu) == .(paste0(round(mean(100*fre4.NA),1),"%"))),cex=3.5) + } + + + ## Subtitle frequency maps: + map.title.xpos <- 0.96 + map.title.width <- 0.04 + par(fig=c(map.title.xpos, map.title.xpos + map.title.width, y7-0.012, y7-0.012+0.001), new=TRUE) + mtext("year", cex=3) + par(fig=c(map.title.xpos, map.title.xpos + map.title.width, y5-0.012, y5-0.012+0.001), new=TRUE) + mtext("year", cex=3) + par(fig=c(map.title.xpos, map.title.xpos + map.title.width, y3-0.012, y3-0.012+0.001), new=TRUE) + mtext("year", cex=3) + par(fig=c(map.title.xpos, map.title.xpos + map.title.width, y1-0.012, y1-0.012+0.001), new=TRUE) + mtext("year", cex=3) + + if(!as.pdf) dev.off() # for saving 4 png + + # same image formatted for the catalogue: + fileoutput2 <- paste0(rean.dir,"/",fields.name,"_",my.period[p],"_",var.name[var.num],"_edpr_fig2cat.png") + + system(paste0("~/scripts/fig2catalog.sh -s 0.8 -r 150 -m 150 -t '",rean.name," / 10m wind speed and sea level pressure / Monthly anomalies and frequencies \n",month.name[p]," / ",year.start,"-",year.end,"' -c 'Region: North Atlantic (27°N-81°N, 85.5°W-45°E)\nReference dataset: ",rean.name," reanalysis' ", fileoutput," ", fileoutput2)) + + system(paste0("rm ", fileoutput)) + + + } # close if on: composition == 'edpr' + + + # Visualize the composition with the regime anomalies for a selected forecasted month: + if(composition == "psl"){ + # Centroid maps: + n.map <- n.map + 1 # n.maps starts from 0 + map.width <- 0.115 + map.xpos <- 0.06 + map.width * (n.map - 1) + map.ypos <- 0.08 + map.height <- 0.19 + map.ysep <- 0.015 + + par(fig=c(map.xpos, map.xpos + map.width, map.ypos + 3*map.height + 3*map.ysep, map.ypos + 4*map.height + 3*map.ysep), new=TRUE) + PlotEquiMap2(rescale(map1,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map1), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, xlabel.dist=1.5, contours.lty="F1FF1F") + par(fig=c(map.xpos, map.xpos + map.width, map.ypos + 2*map.height + 2*map.ysep, map.ypos + 3*map.height + 2*map.ysep), new=TRUE) + PlotEquiMap2(rescale(map2,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map2), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F") + par(fig=c(map.xpos, map.xpos + map.width, map.ypos + map.height + map.ysep, map.ypos + 2*map.height + map.ysep), new=TRUE) + PlotEquiMap2(rescale(map3,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map3), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F") + par(fig=c(map.xpos, map.xpos + map.width, map.ypos, map.ypos + map.height), new=TRUE) + PlotEquiMap2(rescale(map4,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map4), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F") + + # Sheet subtitles: + par(fig=c(map.xpos, map.xpos + map.width, 0.86, 0.90), new=TRUE) + if(n.map < 8) { + mtext(paste0(my.month.short[p], " --> ", my.month.short[forecast.month]), cex=5.5, font=2) + } else{ + mtext(paste0(rean.name," ", my.month.short[forecast.month]), cex=5.5, font=2) + } + + } # close if on composition == 'psl' + + + if(composition == "psl.rean" || composition == "psl.rean.unordered"){ + ## matrix correlation with DJF regimes: + #cluster1.monthly <- pslwr1mean; cluster2.monthly <- pslwr2mean; cluster3.monthly <- pslwr3mean; cluster4.monthly <- pslwr4mean + #cluster1.name.monthly <- cluster1.name; cluster2.name.monthly <- cluster2.name; cluster3.name.monthly <- cluster3.name; cluster4.name.monthly <- cluster4.name + assign(paste0("clusterMax", which(orden == cluster1.name), ".monthly"), pslwr1mean) + assign(paste0("clusterMax", which(orden == cluster2.name), ".monthly"), pslwr2mean) + assign(paste0("clusterMax", which(orden == cluster3.name), ".monthly"), pslwr3mean) + assign(paste0("clusterMax", which(orden == cluster4.name), ".monthly"), pslwr4mean) + + ## cluster.name.monthly <- c(cluster1.name.monthly, cluster2.name.monthly, cluster3.name.monthly, cluster4.name.monthly) + ## max1 <- which(cluster.name.monthly == orden[1]) # get which is the monthly cluster with the highest explained variance (by default it is associated to NAO+) + ## max2 <- which(cluster.name.monthly == orden[2]) # get the monthly cluster with the second highest variance + ## max3 <- which(cluster.name.monthly == orden[3]) # ... + ## max4 <- which(cluster.name.monthly == orden[4]) # ... + + ## max.seq <- c(max1, max2, max3, max4) + + ## Load DJF data: + rean.dir.DJF <- "/scratch/Earth/ncortesi/RESILIENCE/Regimes/53_JRA55_seasonal_1981-2016_LOESS_filter_lat_corr" + + load(file=paste0(rean.dir.DJF,"/",rean.name,"_",my.period[13],"_","psl",".RData")) # Load mean slp DJF data from the same reanalysis + load(paste0(rean.dir.DJF,"/",rean.name,"_", my.period[13],"_","ClusterNames",".RData")) # load also reanalysis DJF regime names + + if(var.num != var.num.orig) var.num <- var.num.orig # ripristinate original values of this script (necessary after loading regime data) + if(fields.name != fields.name.orig) fields.name <- fields.name.orig # ripristinate original values of this script + if(work.dir != work.dir.orig) work.dir <- work.dir.orig # ripristinate original values of this script + if(p.orig != p) p <- p.orig + + cluster1 <- which(orden == cluster1.name) # clusters for DJF!!! + cluster2 <- which(orden == cluster2.name) + cluster3 <- which(orden == cluster3.name) + cluster4 <- which(orden == cluster4.name) + + assign(paste0("psl.ordered",cluster1), pslwr1mean) # psl for DJF + assign(paste0("psl.ordered",cluster2), pslwr2mean) + assign(paste0("psl.ordered",cluster3), pslwr3mean) + assign(paste0("psl.ordered",cluster4), pslwr4mean) + + cluster.corr <- array(NA, c(4,4), dimnames=list(c("cl1","cl2","cl3","cl4"), orden)) + cluster.corr[1,1] <- cor(as.vector(clusterMax1.monthly), as.vector(psl.ordered1)) + cluster.corr[1,2] <- cor(as.vector(clusterMax1.monthly), as.vector(psl.ordered2)) + cluster.corr[1,3] <- cor(as.vector(clusterMax1.monthly), as.vector(psl.ordered3)) + cluster.corr[1,4] <- cor(as.vector(clusterMax1.monthly), as.vector(psl.ordered4)) + cluster.corr[2,1] <- cor(as.vector(clusterMax2.monthly), as.vector(psl.ordered1)) + cluster.corr[2,2] <- cor(as.vector(clusterMax2.monthly), as.vector(psl.ordered2)) + cluster.corr[2,3] <- cor(as.vector(clusterMax2.monthly), as.vector(psl.ordered3)) + cluster.corr[2,4] <- cor(as.vector(clusterMax2.monthly), as.vector(psl.ordered4)) + cluster.corr[3,1] <- cor(as.vector(clusterMax3.monthly), as.vector(psl.ordered1)) + cluster.corr[3,2] <- cor(as.vector(clusterMax3.monthly), as.vector(psl.ordered2)) + cluster.corr[3,3] <- cor(as.vector(clusterMax3.monthly), as.vector(psl.ordered3)) + cluster.corr[3,4] <- cor(as.vector(clusterMax3.monthly), as.vector(psl.ordered4)) + cluster.corr[4,1] <- cor(as.vector(clusterMax4.monthly), as.vector(psl.ordered1)) + cluster.corr[4,2] <- cor(as.vector(clusterMax4.monthly), as.vector(psl.ordered2)) + cluster.corr[4,3] <- cor(as.vector(clusterMax4.monthly), as.vector(psl.ordered3)) + cluster.corr[4,4] <- cor(as.vector(clusterMax4.monthly), as.vector(psl.ordered4)) + + # Centroid maps: + n.map <- n.map + 1 # n.maps starts from 0 + map.width <- 0.08 + map.xpos <- map.width * (n.map - 1) + map.ypos <- 0.08 + map.height <- 0.19 + map.ysep <- 0.015 + print(paste0("map.xpos= ", map.xpos)) + + par(fig <- c(0, 1, 0, 1), new=TRUE) + # reset par to its default values, because drawing with PlotEquiMap() alters some par values: + if(n.map == 1) { op <- par(no.readonly = TRUE) } else { par(op) } + + text.cex <- 2 + text.ypos <- 1.03 + text.xmod <- 0.007 * (n.map - 1) + text.xpos <- map.xpos + text.xmod - 0.02 + text.width <- 0.015 + text(x=text.xpos - text.width, y=text.ypos - 0.02, labels="cl1", cex=text.cex) + text(x=text.xpos - text.width, y=text.ypos - 0.04, labels="cl2", cex=text.cex) + text(x=text.xpos - text.width, y=text.ypos - 0.06, labels="cl3", cex=text.cex) + text(x=text.xpos - text.width, y=text.ypos - 0.08, labels="cl4", cex=text.cex) + text(x=text.xpos + 0*text.width, y=text.ypos + 0.00, labels="NAO+", cex=text.cex) + text(x=text.xpos + 1*text.width, y=text.ypos + 0.00, labels="NAO-", cex=text.cex) + text(x=text.xpos + 2*text.width, y=text.ypos + 0.00, labels="BLO", cex=text.cex) + text(x=text.xpos + 3*text.width, y=text.ypos + 0.00, labels="ATL", cex=text.cex) + + text(x=text.xpos + 0*text.width, y=text.ypos - 0.02, labels=round(cluster.corr,2)[1,1], cex=text.cex) + text(x=text.xpos + 0*text.width, y=text.ypos - 0.04, labels=round(cluster.corr,2)[2,1], cex=text.cex) + text(x=text.xpos + 0*text.width, y=text.ypos - 0.06, labels=round(cluster.corr,2)[3,1], cex=text.cex) + text(x=text.xpos + 0*text.width, y=text.ypos - 0.08, labels=round(cluster.corr,2)[4,1], cex=text.cex) + text(x=text.xpos + 1*text.width, y=text.ypos - 0.02, labels=round(cluster.corr,2)[1,2], cex=text.cex) + text(x=text.xpos + 1*text.width, y=text.ypos - 0.04, labels=round(cluster.corr,2)[2,2], cex=text.cex) + text(x=text.xpos + 1*text.width, y=text.ypos - 0.06, labels=round(cluster.corr,2)[3,2], cex=text.cex) + text(x=text.xpos + 1*text.width, y=text.ypos - 0.08, labels=round(cluster.corr,2)[4,2], cex=text.cex) + text(x=text.xpos + 2*text.width, y=text.ypos - 0.02, labels=round(cluster.corr,2)[1,3], cex=text.cex) + text(x=text.xpos + 2*text.width, y=text.ypos - 0.04, labels=round(cluster.corr,2)[2,3], cex=text.cex) + text(x=text.xpos + 2*text.width, y=text.ypos - 0.06, labels=round(cluster.corr,2)[3,3], cex=text.cex) + text(x=text.xpos + 2*text.width, y=text.ypos - 0.08, labels=round(cluster.corr,2)[4,3], cex=text.cex) + text(x=text.xpos + 3*text.width, y=text.ypos - 0.02, labels=round(cluster.corr,2)[1,4], cex=text.cex) + text(x=text.xpos + 3*text.width, y=text.ypos - 0.04, labels=round(cluster.corr,2)[2,4], cex=text.cex) + text(x=text.xpos + 3*text.width, y=text.ypos - 0.06, labels=round(cluster.corr,2)[3,4], cex=text.cex) + text(x=text.xpos + 3*text.width, y=text.ypos - 0.08, labels=round(cluster.corr,2)[4,4], cex=text.cex) + + ## Centroid maps: + ## (note that mapX == clusterMaxX.monthly, X = 1, ..., 4 by default) + + par(fig=c(map.xpos, map.xpos + map.width, map.ypos + 3*map.height + 3*map.ysep, map.ypos + 4*map.height + 3*map.ysep), new=TRUE) + PlotEquiMap2(rescale(map1,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map1), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, xlabel.dist=1.5, contours.lty="F1FF1F") + par(fig=c(map.xpos, map.xpos + map.width, map.ypos + 2*map.height + 2*map.ysep, map.ypos + 3*map.height + 2*map.ysep), new=TRUE) + PlotEquiMap2(rescale(map2,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map2), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F") + par(fig=c(map.xpos, map.xpos + map.width, map.ypos + map.height + map.ysep, map.ypos + 2*map.height + map.ysep), new=TRUE) + PlotEquiMap2(rescale(map3,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map3), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F") + par(fig=c(map.xpos, map.xpos + map.width, map.ypos, map.ypos + map.height), new=TRUE) + PlotEquiMap2(rescale(map4,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map4), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F") + + } # close if on psl.rean or on psl.rean.unordered + + + # Visualize the composition if the impact maps for a selected forecasted month: + if(composition == "impact"){ + # Centroid maps: + n.map <- n.map + 1 # n.maps starts from 0 + map.width <- 0.115 + map.xpos <- 0.06 + map.width * (n.map - 1) + map.ypos <- 0.08 + map.height <- 0.19 + map.ysep <- 0.015 + + par(fig=c(map.xpos, map.xpos + map.width, map.ypos + 3*map.height + 3*map.ysep, map.ypos + 4*map.height + 3*map.ysep), new=TRUE) + #PlotEquiMap2(rescale(imp1[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=t(sig1[,EU] < 0.05), cex.lab=1.5) + PlotEquiMap2(rescale(imp1[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5) + + par(fig=c(map.xpos, map.xpos + map.width, map.ypos + 2*map.height + 2*map.ysep, map.ypos + 3*map.height + 2*map.ysep), new=TRUE) + #PlotEquiMap2(rescale(imp2[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=t(sig2[,EU] < 0.05), cex.lab=1.5) + PlotEquiMap2(rescale(imp2[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5) + + par(fig=c(map.xpos, map.xpos + map.width, map.ypos + map.height + map.ysep, map.ypos + 2*map.height + map.ysep), new=TRUE) + #PlotEquiMap2(rescale(imp3[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=t(sig3[,EU] < 0.05), cex.lab=1.5) + PlotEquiMap2(rescale(imp3[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5) + + par(fig=c(map.xpos, map.xpos + map.width, map.ypos, map.ypos + map.height), new=TRUE) + #PlotEquiMap2(rescale(imp4[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=t(sig4[,EU] < 0.05), cex.lab=1.5) + PlotEquiMap2(rescale(imp4[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5) + + + # Sheet subtitles: + par(fig=c(map.xpos, map.xpos + map.width, 0.86, 0.90), new=TRUE) + if(n.map < 8) { + mtext(paste0(my.month.short[p], " --> ", my.month.short[forecast.month]), cex=5.5, font=2) + } else{ + mtext(paste0(rean.name," ", my.month.short[forecast.month]), cex=5.5, font=2) + } + + } # close if on composition == 'impact' + + # Visualize the 2x2 composition of the four impact maps for a selected reanalysis or forecasted month: + if(composition == "single.impact"){ + + png(filename=paste0(rean.dir,"/",fields.name,"_",my.period[p],"_",var.name[var.num],"_impact_composition.png"),width=2000,height=2000) + + plot.new() + + par(fig=c(0, 0.5, 0.95, 0.988), new=TRUE) + mtext("NAO+",cex=5) + par(fig=c(0, 0.5, 0.52, 0.95), new=TRUE) + PlotEquiMap(rescale(imp1[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=sig1[,EU] < 0.05, dot_size=2, axes_label_scale=2.5) + + par(fig=c(0.5, 1, 0.93, 0.96), new=TRUE) + mtext("NAO-",cex=5) + par(fig=c(0.5, 1, 0.52, 0.95), new=TRUE) + PlotEquiMap(rescale(imp2[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=sig2[,EU] < 0.05, dot_size=2, axes_label_scale=2.5) + + par(fig=c(0, 0.5, 0.44, 0.47), new=TRUE) + mtext("Blocking",cex=5) + par(fig=c(0, 0.5, 0.08, 0.46), new=TRUE) + PlotEquiMap(rescale(imp3[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=sig3[,EU] < 0.05, dot_size=2, axes_label_scale=2.5) + + par(fig=c(0.5, 1, 0.44, 0.47), new=TRUE) + mtext("Atlantic Ridge",cex=5) + par(fig=c(0.5, 1, 0.08, 0.46), new=TRUE) + PlotEquiMap(rescale(imp4[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=sig4[,EU] < 0.05, dot_size=2, axes_label_scale=2.5) + + par(fig=c(0.03, 0.96, 0.02, 0.08), new=TRUE) + ColorBar2(my.brks.var, cols=my.cols.var, vert=FALSE, cex=3, my.ticks=-0.5 + 1:length(my.brks.var), my.labels=my.brks.var, label.dist=3) + + par(fig=c(0.965, 0.99, 0, 0.026), new=TRUE) + mtext("%",cex=3) + + dev.off() + + # format it manually from the bash shell (you must run it from your workstation until the identity command of ImageMagick will be loaded un the cluster): + #sh ~/scripts/fig2catalog.sh -x 20 -t 'NCEP / 10-m wind speed / Regime impact \nOctober / 1981-2016' -c 'Region: North Atlantic (27°N-81°N, 85.5°W-45°E)\nReference dataset: NCEP/NCAR reanalysis' NCEP_October_sfcWind_impact_composition.png NCEP_October_sfcWind_impact_composition_catalogue.png + + + ### to plot the impact map of all regimes on a particular month: + #imp.oct <- (imp1*3.2 + imp2*38.7 + imp3*51.6 + imp4*6.4)/100 + year.test <- 2016 + pos.year.test <- year.test - year.start +1 + imp.test <- imp1*fre1.NA[pos.year.test] + imp2*fre2.NA[pos.year.test] + imp3*fre3.NA[pos.year.test] + imp4*fre4.NA[pos.year.test] + #imp.test <- imp1*fre1.NA[pos.year.test] + imp3*(fre3.NA[pos.year.test]+0.032) + imp4*(fre4.NA[pos.year.test]+0.032) + par(fig=c(0, 1, 0.05, 1), new=TRUE) + PlotEquiMap2(rescale(imp.test[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5) + + # vector with the frequency of the WRs in the chosen month and year: + wt.test.freq <- c(fre1.NA[pos.year.test],fre2.NA[pos.year.test],fre3.NA[pos.year.test],fre4.NA[pos.year.test]) + + ## # or save them as individual maps: + ## png(filename=paste0(rean.dir,"/",fields.name,"_",my.period[p],"_",var.name[var.num],"_impact_NAO+.png"),width=3000,height=3700) + ## PlotEquiMap2(rescale(imp1[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=t(sig1[,EU] < 0.05), cex.lab=1.5) + ## dev.off() + + ## png(filename=paste0(rean.dir,"/",fields.name,"_",my.period[p],"_",var.name[var.num],"_impact_NAO-.png"),width=3000,height=3700) + ## PlotEquiMap2(rescale(imp2[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=t(sig2[,EU] < 0.05), cex.lab=1.5) + ## dev.off() + + ## png(filename=paste0(rean.dir,"/",fields.name,"_",my.period[p],"_",var.name[var.num],"_impact_Blocking.png"),width=3000,height=3700) + ## PlotEquiMap2(rescale(imp3[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=t(sig3[,EU] < 0.05), cex.lab=1.5) + ## dev.off() + + ## png(filename=paste0(rean.dir,"/",fields.name,"_",my.period[p],"_",var.name[var.num],"_impact_Atl_Ridge.png"),width=3000,height=3700) + ## PlotEquiMap2(rescale(imp4[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=t(sig4[,EU] < 0.05), cex.lab=1.5) + ## dev.off() + } + + # Visualize the composition if the impact maps for a selected forecasted month: + if(composition == "single.psl"){ + png(filename=paste0(rean.dir,"/",fields.name,"_",my.period[p],"_",var.name[var.num],"_pattern_cluster1.png"),width=2000,height=1400, res=300) + PlotEquiMap2(rescale(map1,my.brks[1],tail(my.brks,1)), lon, lat,filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1, contours=t(map1), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=0.5, xlabel.dist=0, contours.lty="F1FF1F", toptitle=paste0(my.period[p]," WithinSS=", round(withinss1/1000000,2))) + dev.off() + + png(filename=paste0(rean.dir,"/",fields.name,"_",my.period[p],"_",var.name[var.num],"_pattern_cluster2.png"),width=2000,height=1400, res=300) + PlotEquiMap2(rescale(map2,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1, contours=t(map2), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=0.5, xlabel.dist=0, contours.lty="F1FF1F", toptitle=paste0(my.period[p]," WithinSS=", round(withinss2/1000000,2))) + dev.off() + + png(filename=paste0(rean.dir,"/",fields.name,"_",my.period[p],"_",var.name[var.num],"_pattern_cluster3.png"),width=2000,height=1400, res=300) + PlotEquiMap2(rescale(map3,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1, contours=t(map3), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=0.5, xlabel.dist=0, contours.lty="F1FF1F", toptitle=paste0(my.period[p]," WithinSS=", round(withinss3/1000000,2))) + dev.off() + + png(filename=paste0(rean.dir,"/",fields.name,"_",my.period[p],"_",var.name[var.num],"_pattern_cluster4.png"),width=2000,height=1400, res=300) + PlotEquiMap2(rescale(map4,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1, contours=t(map4), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=0.5, xlabel.dist=0, contours.lty="F1FF1F", toptitle=paste0(my.period[p]," WithinSS=", round(withinss4/1000000,2))) + dev.off() + + # Legend centroid maps: + #my.subset <- match(values.to.plot, my.brks) + #ColorBar3(my.brks, cols=my.cols, vert=FALSE, cex=legend1.cex, subset=my.subset) + #mtext(side=4," hPa", cex=legend1.cex) + + } + + # Visualize the composition with the interannual frequencies for a selected forecasted month: + if(composition == "fre"){ + # Centroid maps: + n.map <- n.map + 1 # n.maps starts from 0 + map.width <- 0.115 + map.xpos <- 0.06 + map.width * (n.map - 1) + map.ypos <- 0.08 + map.height <- 0.19 + map.ysep <- 0.015 + + par(fig=c(map.xpos, map.xpos + map.width, map.ypos + 3*map.height + 3*map.ysep, map.ypos + 4*map.height + 3*map.ysep), new=TRUE) + if(n.map < 8) barplot.freq.sim2(100*fre1.NA, 100*freMax1.NA, 100*freMin1.NA, 100*freObs1, year.start, year.end, cex.y=2, cex.x=2, freq.min=-2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0), col.line="gray20", cex.r=3, cex.mean=2, cex.obs=2) + if(n.map == 8) barplot.freq(100*fre1.NA, year.start, year.end, cex.y=2, cex.x=2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0)) + + par(fig=c(map.xpos, map.xpos + map.width, map.ypos + 2*map.height + 2*map.ysep, map.ypos + 3*map.height + 2*map.ysep), new=TRUE) + if(n.map < 8) if(fields.name == forecast.name) barplot.freq.sim2(100*fre2.NA, 100*freMax2.NA, 100*freMin2.NA, 100*freObs2, year.start, year.end, cex.y=2, cex.x=2, freq.min=-2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0), col.line="gray20", cex.r=3, cex.mean=2, cex.obs=2) + if(n.map == 8) barplot.freq(100*fre2.NA, year.start, year.end, cex.y=2, cex.x=2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0)) + + par(fig=c(map.xpos, map.xpos + map.width, map.ypos + map.height + map.ysep, map.ypos + 2*map.height + map.ysep), new=TRUE) + if(n.map < 8) if(fields.name == forecast.name) barplot.freq.sim2(100*fre3.NA, 100*freMax3.NA, 100*freMin3.NA, 100*freObs3, year.start, year.end, cex.y=2, cex.x=2, freq.min=-2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0), col.line="gray20", cex.r=3, cex.mean=2, cex.obs=2) + if(n.map == 8) barplot.freq(100*fre3.NA, year.start, year.end, cex.y=2, cex.x=2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0)) + + par(fig=c(map.xpos, map.xpos + map.width, map.ypos, map.ypos + map.height), new=TRUE) + if(n.map < 8) if(fields.name == forecast.name) barplot.freq.sim2(100*fre4.NA, 100*freMax4.NA, 100*freMin4.NA, 100*freObs4, year.start, year.end, cex.y=2, cex.x=2, freq.min=-2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0), col.line="gray20", cex.r=3, cex.mean=2, cex.obs=2) + if(n.map == 8) barplot.freq(100*fre4.NA, year.start, year.end, cex.y=2, cex.x=2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0)) + + # Sheet subtitles: + par(fig=c(map.xpos, map.xpos + map.width, 0.86, 0.90), new=TRUE) + if(n.map < 8) { + mtext(paste0(my.month.short[p], " --> ", my.month.short[forecast.month]), cex=5.5, font=2) + } else{ + mtext(paste0(rean.name," ", my.month.short[forecast.month]), cex=5.5, font=2) + } + + # % on Frequency plots: + par(fig=c(map.xpos - 0.006, map.xpos, map.ypos + 3.9*map.height + 3*map.ysep, map.ypos + 4*map.height + 3*map.ysep), new=TRUE) + mtext("%", cex=3.3) + par(fig=c(map.xpos - 0.006, map.xpos, map.ypos + 2.9*map.height + 2*map.ysep, map.ypos + 3*map.height + 2*map.ysep), new=TRUE) + mtext("%", cex=3.3) + par(fig=c(map.xpos - 0.006, map.xpos, map.ypos + 1.9*map.height + 1*map.ysep, map.ypos + 2*map.height + 1*map.ysep), new=TRUE) + mtext("%", cex=3.3) + par(fig=c(map.xpos - 0.006, map.xpos, map.ypos + 0.9*map.height + 0*map.ysep, map.ypos + 1*map.height + 0*map.ysep), new=TRUE) + mtext("%", cex=3.3) + + # mean frequency on Frequency plots: + par(fig=c(map.xpos + 0.02, map.xpos + 0.04, map.ypos + 3*map.height + 3*map.ysep, map.ypos + 3.1*map.height + 3*map.ysep), new=TRUE) + mtext(bquote(bar(nu) == .(paste0(round(mean(100*fre1.NA),1),"%"))),cex=3.5) + par(fig=c(map.xpos + 0.02, map.xpos + 0.04, map.ypos + 2*map.height + 2*map.ysep, map.ypos + 2.1*map.height + 2*map.ysep), new=TRUE) + mtext(bquote(bar(nu) == .(paste0(round(mean(100*fre2.NA),1),"%"))),cex=3.5) + par(fig=c(map.xpos + 0.02, map.xpos + 0.04, map.ypos + 1*map.height + 1*map.ysep, map.ypos + 1.1*map.height + 1*map.ysep), new=TRUE) + mtext(bquote(bar(nu) == .(paste0(round(mean(100*fre3.NA),1),"%"))),cex=3.5) + par(fig=c(map.xpos + 0.02, map.xpos + 0.04, map.ypos + 0*map.height + 0*map.ysep, map.ypos + 0.1*map.height + 0*map.ysep), new=TRUE) + mtext(bquote(bar(nu) == .(paste0(round(mean(100*fre4.NA),1),"%"))),cex=3.5) + + # add corr between obs.time series and ensemble mean time series on Frequency plots: + if(n.map < 8){ + par(fig=c(map.xpos + map.width - 0.04, map.xpos + map.width - 0.02, map.ypos + 3*map.height + 3*map.ysep, map.ypos + 3.1*map.height + 3*map.ysep), new=TRUE) + mtext(paste0("r= ",sp.cor1), cex=3.5) + par(fig=c(map.xpos + map.width - 0.04, map.xpos + map.width - 0.02, map.ypos + 2*map.height + 2*map.ysep, map.ypos + 2.1*map.height + 2*map.ysep), new=TRUE) + mtext(paste0("r= ",sp.cor2), cex=3.5) + par(fig=c(map.xpos + map.width - 0.04, map.xpos + map.width - 0.02, map.ypos + 1*map.height + 1*map.ysep, map.ypos + 1.1*map.height + 1*map.ysep), new=TRUE) + mtext(paste0("r= ",sp.cor3), cex=3.5) + par(fig=c(map.xpos + map.width - 0.04, map.xpos + map.width - 0.02, map.ypos + 0*map.height + 0*map.ysep, map.ypos + 0.1*map.height + 0*map.ysep), new=TRUE) + mtext(paste0("r= ",sp.cor4), cex=3.5) + } + } # close if on composition == 'fre' + + # save indicators for each startdate and forecast time: + # (map1, map2, map3, map4, fre1, fre2, etc. already refer to the regimes in the same order as listed in the 'orden' vector) + if(fields.name == forecast.name) { + if (composition != "impact") { + save(orden, map1, map2, map3, map4, fre1.NA, fre2.NA, fre3.NA, fre4.NA, freObs1, freObs2, freObs3, freObs4, sp.cor1, sp.cor2, sp.cor3, sp.cor4, persist1, persist2, persist3, persist4, persistObs1, persistObs2, persistObs3, persistObs4, spat.cor1, spat.cor2, spat.cor3, spat.cor4, sd.freq.sim1, sd.freq.sim2, sd.freq.sim3, sd.freq.sim4, sd.freq.obs1, sd.freq.obs2, sd.freq.obs3, sd.freq.obs4, transSim1, transSim2, transSim3, transSim4, transObs1, transObs2, transObs3, transObs4, file=paste0(work.dir,"/",fields.name,"_", my.period[p],"_leadmonth_",lead.month,"_","ClusterNames",".RData")) + } else { # also save impX objects + save(orden, map1, map2, map3, map4, fre1.NA, fre2.NA, fre3.NA, fre4.NA, freObs1, freObs2, freObs3, freObs4, sp.cor1, sp.cor2, sp.cor3, sp.cor4, persist1, persist2, persist3, persist4, persistObs1, persistObs2, persistObs3, persistObs4, spat.cor1, spat.cor2, spat.cor3, spat.cor4, sd.freq.sim1, sd.freq.sim2, sd.freq.sim3, sd.freq.sim4, sd.freq.obs1, sd.freq.obs2, sd.freq.obs3, sd.freq.obs4, transSim1, transSim2, transSim3, transSim4, transObs1, transObs2, transObs3, transObs4, imp1, imp2, imp3, imp4,cor.imp1,cor.imp2,cor.imp3,cor.imp4, file=paste0(work.dir,"/",fields.name,"_", my.period[p],"_leadmonth_",lead.month,"_","ClusterNames",".RData")) + } + } + + } # close 'p' on 'period' + + if((composition == 'simple') && as.pdf) dev.off() # for saving a single pdf with all months/seasons + + } # close 'lead.month' on 'lead.months' + + if(composition == "psl" || composition == "fre" || composition == "impact"){ + # Regime's names: + title1.xpos <- 0.01 + title1.width <- 0.04 + par(fig=c(title1.xpos, title1.xpos + title1.width, 0.7905, 0.7955), new=TRUE) + mtext(paste0(regime.title[1]), font=2, cex=5) + par(fig=c(title1.xpos, title1.xpos + title1.width, 0.5855, 0.5905), new=TRUE) + mtext(paste0(regime.title[2]), font=2, cex=5) + par(fig=c(title1.xpos, title1.xpos + title1.width, 0.3805, 0.3955), new=TRUE) + mtext(paste0(regime.title[3]), font=2, cex=5) + par(fig=c(title1.xpos, title1.xpos + title1.width, 0.1755, 0.1805), new=TRUE) + mtext(paste0(regime.title[4]), font=2, cex=5) + + if(composition == "psl") { + # Color legend: + legend1.xpos <- 0.10 + legend1.width <- 0.80 + legend1.cex <- 4 + + par(fig=c(legend1.xpos, legend1.xpos + legend1.width, 0, 0.065), new=TRUE) # syntax fig=c(xmin, xmax, ymin, ymax) + ColorBar2(brks=my.brks, cols=my.cols, vert=FALSE, cex=legend1.cex, label.dist=2, my.ticks=-0.5 + 1:length(my.brks), my.labels=my.brks) + par(fig=c(legend1.xpos + legend1.width - 0.02, legend1.xpos + legend1.width + 0.05, 0, 0.02), new=TRUE) # syntax fig=c(xmin, xmax, ymin, ymax) + mtext("hPa", cex=legend1.cex*1.3) + } + + if(composition == "impact") { + # Color legend: + legend1.xpos <- 0.10 + legend1.width <- 0.80 + legend1.cex <- 4 + + #my.subset2 <- match(values.to.plot2, my.brks.var) + par(fig=c(legend1.xpos, legend1.xpos + legend1.width, 0, 0.065), new=TRUE) + ColorBar2(brks=my.brks.var, cols=my.cols.var, vert=FALSE, cex=legend1.cex, label.dist=2, my.ticks=-0.5 + 1:length(my.brks.var), my.labels=my.brks.var) + par(fig=c(legend1.xpos + legend1.width - 0.02, legend1.xpos + legend1.width + 0.05, 0, 0.02), new=TRUE) # syntax fig=c(xmin, xmax, ymin, ymax) + #ColorBar2(brks=my.brks.var, cols=my.cols.var, vert=FALSE, cex=legend2.cex, subset=my.subset2) + mtext(var.unit[var.num], cex=legend1.cex*1.3) + + } + + dev.off() + + } # close if on composition + + + if(composition == "psl.rean" || composition == "psl.rean.unordered") dev.off() + + print("Finished!") +} # close if on composition != "summary" + + + +#} # close for on forecasts.month + + + +if(composition == "taylor"){ + library("plotrix") + + fields.name="ERA-Interim" + + # choose a reference season from 13 (winter) to 16 (Autumn): + season.ref <- 13 + + # set the first WR classification: + rean.dir <- "/scratch/Earth/ncortesi/RESILIENCE/Regimes/41_ERA-Interim_monthly_1981-2015_LOESS_filter" + + # load data of the reference season of the first classification (this line should be modified to load the chosen season): + #load("/scratch/Earth/ncortesi/RESILIENCE/Regimes/18) as 12) but with no lat correction/ERA-Interim_Winter_psl.RData") # load pslwrXmean data + #load("/scratch/Earth/ncortesi/RESILIENCE/Regimes/18) as 12) but with no lat correction/ERA-Interim_Winter_ClusterNames.RData") # load clusterX.name.period + load("/scratch/Earth/ncortesi/RESILIENCE/Regimes/46_as_18_but_with_no_lat_correction_and_with_LOESS/ERA-Interim_Winter_psl.RData") # load pslwrXmean data + load("/scratch/Earth/ncortesi/RESILIENCE/Regimes/46_as_18_but_with_no_lat_correction_and_with_LOESS/ERA-Interim_Winter_ClusterNames.RData") # load clusterX.name.period + + if(var.num != var.num.orig) var.num <- var.num.orig # ripristinate original values of this script (necessary after loading regime data) + if(fields.name != fields.name.orig) fields.name <- fields.name.orig # ripristinate original values of this script + + cluster1.ref <- which(orden == cluster1.name) + cluster2.ref <- which(orden == cluster2.name) + cluster3.ref <- which(orden == cluster3.name) + cluster4.ref <- which(orden == cluster4.name) + + #cluster1.ref <- cluster1.name.period[13] + #cluster2.ref <- cluster2.name.period[13] + #cluster3.ref <- cluster3.name.period[13] + #cluster4.ref <- cluster4.name.period[13] + + cluster1.ref <- 3 # bypass the previous values because for dataset num.46 the blocking and atlantic regimes were saved swapped! + cluster2.ref <- 2 + cluster3.ref <- 1 + cluster4.ref <- 4 + + assign(paste0("map.ref",cluster1.ref), pslwr1mean) # NAO+ + assign(paste0("map.ref",cluster2.ref), pslwr2mean) # NAO- + assign(paste0("map.ref",cluster3.ref), pslwr3mean) # Blocking + assign(paste0("map.ref",cluster4.ref), pslwr4mean) # Atl.ridge + + # set a second WR classification (i.e: with running cluster) to contrast with the new one: + #rean2.dir <- "/scratch/Earth/ncortesi/RESILIENCE/Regimes/41_ERA-Interim_monthly_1981-2015_LOESS_filter" + rean2.dir <- "/scratch/Earth/ncortesi/RESILIENCE/Regimes/44_as_43_but_with_no_lat_correction" + rean2.name <- "ERA-Interim" + + # load data of the reference season of the second classification (this line should be modified to load the chosen season): + load("/scratch/Earth/ncortesi/RESILIENCE/Regimes/46_as_18_but_with_no_lat_correction_and_with_LOESS/ERA-Interim_Winter_psl.RData") # load pslwrXmean data + load("/scratch/Earth/ncortesi/RESILIENCE/Regimes/46_as_18_but_with_no_lat_correction_and_with_LOESS/ERA-Interim_Winter_ClusterNames.RData") # load clusterX.name.period + + if(var.num != var.num.orig) var.num <- var.num.orig # ripristinate original values of this script (necessary after loading regime data) + if(fields.name != fields.name.orig) fields.name <- fields.name.orig # ripristinate original values of this script + + #cluster1.name.ref <- cluster1.name.period[season.ref] + #cluster2.name.ref <- cluster2.name.period[season.ref] + #cluster3.name.ref <- cluster3.name.period[season.ref] + #cluster4.name.ref <- cluster4.name.period[season.ref] + + cluster1.ref <- which(orden == cluster1.name) + cluster2.ref <- which(orden == cluster2.name) + cluster3.ref <- which(orden == cluster3.name) + cluster4.ref <- which(orden == cluster4.name) + + cluster1.ref <- 3 # bypass the previous values because for dataset num.46 the blocking and atlantic regimes were saved swapped! + cluster2.ref <- 2 + cluster3.ref <- 1 + cluster4.ref <- 4 + + assign(paste0("map.old.ref",cluster1.ref), pslwr1mean) # NAO+ + assign(paste0("map.old.ref",cluster2.ref), pslwr2mean) # NAO- + assign(paste0("map.old.ref",cluster3.ref), pslwr3mean) # Blocking + assign(paste0("map.old.ref",cluster4.ref), pslwr4mean) # Atl.ridge + + + # breaks and colors of the geopotential fields: + if(psl == "g500"){ + my.brks <- c(-300,seq(-200,200,10),300) # % Mean anomaly of a WR + my.brks2 <- c(-300,seq(-200,200,10),300) # % Mean anomaly of a WR for the contour lines + values.to.plot <- c(-200, -100, 0, 100, 200) # values we want to show in the labels + } + + if(psl == "psl"){ + my.brks <- c(seq(-19,-1,2),0,seq(1,19,2)) # % Mean anomaly of a WR + my.brks2 <- my.brks #c(seq(-20,20,2)) # % Mean anomaly of a WR for the contour lines + values.to.plot <- my.brks #c(-17,-15,-13,-11,-9,-7,-5,-3,-1,0,1,3,5,7,9,11,13,15,17) + } + + my.cols <- colorRampPalette(rev(brewer.pal(11,"RdBu")))(length(my.brks)-1) # blue--white--red colors + my.cols[floor(length(my.cols)/2)] <- my.cols[floor(length(my.cols)/2)+1] <- "white" + + # plot the composition with the regime anomalies of each monthly regimes: + png(filename=paste0(rean.dir,"/",rean.name,"_taylor_source",".png"),width=6000,height=2000) + + plot.new() + + # Sheet title: + par(fig=c(0, 1, 0.94, 0.98), new=TRUE) + mtext(paste0(fields.name, " observed monthly regime anomalies"), cex=5.5, font=2) + + n.map <- 0 + for(p in c(9:12,1:8)){ + p.orig <- p + + load(file=paste0(rean.dir,"/",fields.name,"_",my.period[p],"_","psl",".RData")) # load cluster names and summarized data + load(file=paste0(rean.dir,"/",fields.name,"_",my.period[p],"_","ClusterNames",".RData")) # load cluster names and summarized data + + if(var.num != var.num.orig) var.num <- var.num.orig # ripristinate original values of this script (necessary after loading regime data) + if(fields.name != fields.name.orig) fields.name <- fields.name.orig # ripristinate original values of this script + if(p != p.orig) p <- p.orig + + cluster1 <- which(orden == cluster1.name) + cluster2 <- which(orden == cluster2.name) + cluster3 <- which(orden == cluster3.name) + cluster4 <- which(orden == cluster4.name) + + assign(paste0("map",cluster1), pslwr1mean) # NAO+ + assign(paste0("map",cluster2), pslwr2mean) # NAO- + assign(paste0("map",cluster3), pslwr3mean) # Blocking + assign(paste0("map",cluster4), pslwr4mean) # Atl.ridge + + n.map <- n.map + 1 # n.maps starts from 0 + map.width <- 0.07 + map.xpos <- 0.06 + map.width * (n.map - 1) + map.ypos <- 0.08 + map.height <- 0.19 + map.ysep <- 0.015 + + par(fig=c(map.xpos, map.xpos + map.width, map.ypos + 3*map.height + 3*map.ysep, map.ypos + 4*map.height + 3*map.ysep), new=TRUE) + PlotEquiMap2(rescale(map1,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map1), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, xlabel.dist=1.5, contours.lty="F1FF1F") + par(fig=c(map.xpos, map.xpos + map.width, map.ypos + 2*map.height + 2*map.ysep, map.ypos + 3*map.height + 2*map.ysep), new=TRUE) + PlotEquiMap2(rescale(map2,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map2), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F") + par(fig=c(map.xpos, map.xpos + map.width, map.ypos + map.height + map.ysep, map.ypos + 2*map.height + map.ysep), new=TRUE) + PlotEquiMap2(rescale(map3,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map3), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F") + par(fig=c(map.xpos, map.xpos + map.width, map.ypos, map.ypos + map.height), new=TRUE) + PlotEquiMap2(rescale(map4,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map4), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F") + + # Sheet subtitles: + par(fig=c(map.xpos, map.xpos + map.width, 0.86, 0.90), new=TRUE) + mtext(my.month.short[p], cex=5.5, font=2) + + } + + # draw the last map to the right of the composition, that with the seasonal anomalies: + n.map <- n.map + 1 # n.maps starts from 0 + map.width <- 0.07 + map.xpos <- 0.06 + map.width * (n.map - 1) + map.ypos <- 0.08 + map.height <- 0.19 + map.ysep <- 0.015 + + par(fig=c(map.xpos, map.xpos + map.width, map.ypos + 3*map.height + 3*map.ysep, map.ypos + 4*map.height + 3*map.ysep), new=TRUE) + PlotEquiMap2(rescale(map.ref1,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map.ref1), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, xlabel.dist=1.5, contours.lty="F1FF1F") + par(fig=c(map.xpos, map.xpos + map.width, map.ypos + 2*map.height + 2*map.ysep, map.ypos + 3*map.height + 2*map.ysep), new=TRUE) + PlotEquiMap2(rescale(map.ref2,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map.ref2), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F") + par(fig=c(map.xpos, map.xpos + map.width, map.ypos + map.height + map.ysep, map.ypos + 2*map.height + map.ysep), new=TRUE) + PlotEquiMap2(rescale(map.ref3,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map.ref3), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F") + par(fig=c(map.xpos, map.xpos + map.width, map.ypos, map.ypos + map.height), new=TRUE) + PlotEquiMap2(rescale(map.ref4,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map.ref4), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F") + + # subtitle: + par(fig=c(map.xpos, map.xpos + map.width, 0.86, 0.90), new=TRUE) + mtext(my.period[season.ref], cex=5.5, font=2) + + dev.off() + + # Plot the Taylor diagrams: + + corr.first <- corr.second <- rmse.first <- rmse.second <- array(NA,c(4,12)) + + for(regime in 1:4){ + + png(filename=paste0(rean.dir,"/",rean.name,"_taylor_",orden[regime],".png"), width=1200, height=800) + + for(p in 1:12){ + p.orig <- p + + load(file=paste0(rean.dir,"/",rean.name,"_",my.period[p],"_","psl",".RData")) # load cluster names and summarized data + load(file=paste0(rean.dir,"/",rean.name,"_",my.period[p],"_","ClusterNames",".RData")) # load cluster names and summarized data + + if(var.num != var.num.orig) var.num <- var.num.orig # ripristinate original values of this script (necessary after loading regime data) + if(fields.name != fields.name.orig) fields.name <- fields.name.orig # ripristinate original values of this script + if(p != p.orig) p <- p.orig + + cluster1 <- which(orden == cluster1.name) + cluster2 <- which(orden == cluster2.name) + cluster3 <- which(orden == cluster3.name) + cluster4 <- which(orden == cluster4.name) + + assign(paste0("map",cluster1), pslwr1mean) # NAO+ + assign(paste0("map",cluster2), pslwr2mean) # NAO- + assign(paste0("map",cluster3), pslwr3mean) # Blocking + assign(paste0("map",cluster4), pslwr4mean) # Atl.ridge + + # load data from the second classification: + load(file=paste0(rean2.dir,"/",rean2.name,"_",my.period[p],"_","psl",".RData")) # load cluster names and summarized data + load(file=paste0(rean2.dir,"/",rean2.name,"_",my.period[p],"_","ClusterNames",".RData")) # load cluster names and summarized data + + if(var.num != var.num.orig) var.num <- var.num.orig # ripristinate original values of this script (necessary after loading regime data) + if(fields.name != fields.name.orig) fields.name <- fields.name.orig # ripristinate original values of this script + if(p != p.orig) p <- p.orig + + cluster1.old <- which(orden == cluster1.name) + cluster2.old <- which(orden == cluster2.name) + cluster3.old <- which(orden == cluster3.name) + cluster4.old <- which(orden == cluster4.name) + + assign(paste0("map.old",cluster1.old), pslwr1mean) # NAO+ + assign(paste0("map.old",cluster2.old), pslwr2mean) # NAO- + assign(paste0("map.old",cluster3.old), pslwr3mean) # Blocking + assign(paste0("map.old",cluster4.old), pslwr4mean) # Atl.ridge + + add.mod <- ifelse(p == 1, FALSE, TRUE) + main.mod <- ifelse(p == 1, orden[regime],"") + + color.first <- "red" + color.second <- "blue" # "black" + sd.arcs.mod <- c(0,0.1,0.2,0.3,0.4,0.5,0.6,0.7,0.8,0.9,1,1.1,1.2,1.3) #c(0,0.5,1,1.5) # doesn't work! + if(regime == 1) my.taylor(as.vector(map.ref1),as.vector(map1), normalize=TRUE, add=add.mod, pos.cor=FALSE, sd.arcs=sd.arcs.mod, ref.sd=F, cex.axis=1.7, text.cex=1, my.text=my.month.short[p], col = color.first, main=main.mod) + if(regime == 2) my.taylor(as.vector(map.ref2),as.vector(map2), normalize=TRUE, add=add.mod, pos.cor=FALSE, sd.arcs=sd.arcs.mod, ref.sd=F, cex.axis=1.7, text.cex=1, my.text=my.month.short[p], col = color.first, main=main.mod) + if(regime == 3) my.taylor(as.vector(map.ref3),as.vector(map3), normalize=TRUE, add=add.mod, pos.cor=FALSE, sd.arcs=sd.arcs.mod, ref.sd=F, cex.axis=1.7, text.cex=1, my.text=my.month.short[p], col = color.first, main=main.mod) + if(regime == 4) my.taylor(as.vector(map.ref4),as.vector(map4), normalize=TRUE, add=add.mod, pos.cor=FALSE, sd.arcs=sd.arcs.mod, ref.sd=F, cex.axis=1.7, text.cex=1, my.text=my.month.short[p], col = color.first, main=main.mod) + + # add the points from the second classification: + add.mod=TRUE + #add.mod <- ifelse(p == 1, FALSE, TRUE) + + if(regime == 1) my.taylor(as.vector(map.old.ref1),as.vector(map.old1), normalize=TRUE, add=add.mod, pos.cor=FALSE, sd.arcs=sd.arcs.mod, ref.sd=F, cex.axis=1.7, text.cex=1, my.text=my.month.short[p], col = color.second) + if(regime == 2) my.taylor(as.vector(map.old.ref2),as.vector(map.old2), normalize=TRUE, add=add.mod, pos.cor=FALSE, sd.arcs=sd.arcs.mod, ref.sd=F, cex.axis=1.7, text.cex=1, my.text=my.month.short[p], col = color.second) + if(regime == 3) my.taylor(as.vector(map.old.ref3),as.vector(map.old3), normalize=TRUE, add=add.mod, pos.cor=FALSE, sd.arcs=sd.arcs.mod, ref.sd=F, cex.axis=1.7, text.cex=1, my.text=my.month.short[p], col = color.second) + if(regime == 4) my.taylor(as.vector(map.old.ref4),as.vector(map.old4), normalize=TRUE, add=add.mod, pos.cor=FALSE, sd.arcs=sd.arcs.mod, ref.sd=F, cex.axis=1.7, text.cex=1, my.text=my.month.short[p], col = color.second) + + if(regime == 1) { corr.first[1,p] <- cor(as.vector(map.ref1),as.vector(map1)); rmse.first[1,p] <- RMSE(as.vector(map.ref1),as.vector(map1)) } + if(regime == 2) { corr.first[2,p] <- cor(as.vector(map.ref2),as.vector(map2)); rmse.first[2,p] <- RMSE(as.vector(map.ref2),as.vector(map2)) } + if(regime == 3) { corr.first[3,p] <- cor(as.vector(map.ref3),as.vector(map3)); rmse.first[3,p] <- RMSE(as.vector(map.ref3),as.vector(map3)) } + if(regime == 4) { corr.first[4,p] <- cor(as.vector(map.ref4),as.vector(map4)); rmse.first[4,p] <- RMSE(as.vector(map.ref4),as.vector(map4)) } + + if(regime == 1) { corr.second[1,p] <- cor(as.vector(map.old.ref1),as.vector(map.old1)); rmse.second[1,p] <- RMSE(as.vector(map.old.ref1),as.vector(map.old1)) } + if(regime == 2) { corr.second[2,p] <- cor(as.vector(map.old.ref2),as.vector(map.old2)); rmse.second[2,p] <- RMSE(as.vector(map.old.ref2),as.vector(map.old2)) } + if(regime == 3) { corr.second[3,p] <- cor(as.vector(map.old.ref3),as.vector(map.old3)); rmse.second[3,p] <- RMSE(as.vector(map.old.ref3),as.vector(map.old3)) } + if(regime == 4) { corr.second[4,p] <- cor(as.vector(map.old.ref4),as.vector(map.old4)); rmse.second[4,p] <- RMSE(as.vector(map.old.ref4),as.vector(map.old4)) } + + } # close for on 'p' + + dev.off() + + } # close for on 'regime' + + corr.first.mean <- apply(corr.first,1,mean) + corr.second.mean <- apply(corr.second,1,mean) + corr.diff <- corr.second.mean - corr.first.mean + + rmse.first.mean <- apply(rmse.first,1,mean) + rmse.second.mean <- apply(rmse.second,1,mean) + rmse.diff <- rmse.second.mean - rmse.first.mean + +} # close if on composition == "taylor" + + + + + + +# Summary graphs: +if(composition == "summary" && fields.name == forecast.name){ + # array storing correlations in the format: [startdate, leadmonth, regime] + array.diff.sd.freq <- array.sd.freq <- array.cor <- array.sp.cor <- array.freq <- array.diff.freq <- array.rpss <- array.pers <- array.diff.pers <- array(NA,c(12,7,4)) + array.spat.cor <- array.imp <- array.trans.sim1 <- array.trans.sim2 <- array.trans.sim3 <- array.trans.sim4 <- array(NA,c(12,7,4)) + array.trans.diff1 <- array.trans.diff2 <- array.trans.diff3 <- array.trans.diff4 <- array.freq.obs <- array.pers.obs <- array(NA,c(12,7,4)) + array.trans.obs1 <- array.trans.obs2 <- array.trans.obs3 <- array.trans.obs4 <- array(NA,c(12,7,4)) + + for(p in 1:12){ + p.orig <- p + + for(l in 0:6){ + + load(file=paste0(work.dir,"/",fields.name,"_",my.period[p],"_leadmonth_",l,"_","ClusterNames",".RData")) # load cluster names and summarized data + + if(var.num != var.num.orig) var.num <- var.num.orig # ripristinate original values of this script (necessary after loading regime data) + if(fields.name != fields.name.orig) fields.name <- fields.name.orig # ripristinate original values of this script + if(p != p.orig) p <- p.orig + + array.spat.cor[p,1+l, 1] <- spat.cor1 # associates NAO+ to the first triangle (at left) + array.spat.cor[p,1+l, 3] <- spat.cor2 # associates NAO- to the third triangle (at right) + array.spat.cor[p,1+l, 4] <- spat.cor3 # associates Blocking to the fourth triangle (top one) + array.spat.cor[p,1+l, 2] <- spat.cor4 # associates Atl.Ridge to the second triangle (bottom one) + + array.cor[p,1+l, 1] <- sp.cor1 # associates NAO+ to the first triangle (at left) + array.cor[p,1+l, 3] <- sp.cor2 # associates NAO- to the third triangle (at right) + array.cor[p,1+l, 4] <- sp.cor3 # associates Blocking to the fourth triangle (top one) + array.cor[p,1+l, 2] <- sp.cor4 # associates Atl.Ridge to the second triangle (bottom one) + + array.freq[p,1+l, 1] <- 100*(mean(fre1.NA)) # NAO+ + array.freq[p,1+l, 3] <- 100*(mean(fre2.NA)) # NAO- + array.freq[p,1+l, 4] <- 100*(mean(fre3.NA)) # Blocking + array.freq[p,1+l, 2] <- 100*(mean(fre4.NA)) # Atl.Ridge + + array.freq.obs[p,1+l, 1] <- 100*(mean(freObs1)) # NAO+ + array.freq.obs[p,1+l, 3] <- 100*(mean(freObs2)) # NAO- + array.freq.obs[p,1+l, 4] <- 100*(mean(freObs3)) # Blocking + array.freq.obs[p,1+l, 2] <- 100*(mean(freObs4)) # Atl.Ridge + + array.diff.freq[p,1+l, 1] <- 100*(mean(fre1.NA) - mean(freObs1)) # NAO+ + array.diff.freq[p,1+l, 3] <- 100*(mean(fre2.NA) - mean(freObs2)) # NAO- + array.diff.freq[p,1+l, 4] <- 100*(mean(fre3.NA) - mean(freObs3)) # Blocking + array.diff.freq[p,1+l, 2] <- 100*(mean(fre4.NA) - mean(freObs4)) # Atl.Ridge + + array.pers[p,1+l, 1] <- persist1 # NAO+ + array.pers[p,1+l, 3] <- persist2 # NAO- + array.pers[p,1+l, 4] <- persist3 # Blocking + array.pers[p,1+l, 2] <- persist4 # Atl.Ridge + + array.pers.obs[p,1+l, 1] <- persistObs1 # NAO+ + array.pers.obs[p,1+l, 3] <- persistObs2 # NAO- + array.pers.obs[p,1+l, 4] <- persistObs3 # Blocking + array.pers.obs[p,1+l, 2] <- persistObs4 # Atl.Ridge + + array.diff.pers[p,1+l, 1] <- persist1 - persistObs1 # NAO+ + array.diff.pers[p,1+l, 3] <- persist2 - persistObs2 # NAO- + array.diff.pers[p,1+l, 4] <- persist3 - persistObs3 # Blocking + array.diff.pers[p,1+l, 2] <- persist4 - persistObs4 # Atl.Ridge + + array.sd.freq[p,1+l, 1] <- sd.freq.sim1 # NAO+ + array.sd.freq[p,1+l, 3] <- sd.freq.sim2 # NAO- + array.sd.freq[p,1+l, 4] <- sd.freq.sim3 # Blocking + array.sd.freq[p,1+l, 2] <- sd.freq.sim4 # Atl.Ridge + + array.diff.sd.freq[p,1+l, 1] <- sd.freq.sim1 / sd.freq.obs1 # NAO+ + array.diff.sd.freq[p,1+l, 3] <- sd.freq.sim2 / sd.freq.obs2 # NAO- + array.diff.sd.freq[p,1+l, 4] <- sd.freq.sim3 / sd.freq.obs3 # Blocking + array.diff.sd.freq[p,1+l, 2] <- sd.freq.sim4 / sd.freq.obs4 # Atl.Ridge + + array.imp[p,1+l, 1] <- cor.imp1 # NAO+ + array.imp[p,1+l, 3] <- cor.imp2 # NAO- + array.imp[p,1+l, 4] <- cor.imp3 # Blocking + array.imp[p,1+l, 2] <- cor.imp4 # Atl.Ridge + + array.trans.sim1[p,1+l, 1] <- NA # Transition from NAO+ to NAO+ + array.trans.sim1[p,1+l, 3] <- 100*transSim1[2] # Transition from NAO+ to NAO- + array.trans.sim1[p,1+l, 4] <- 100*transSim1[3] # Transition from NAO+ to blocking + array.trans.sim1[p,1+l, 2] <- 100*transSim1[4] # Transition from NAO+ to Atl.Ridge + + array.trans.sim2[p,1+l, 1] <- 100*transSim2[1] # Transition from NAO- to NAO+ + array.trans.sim2[p,1+l, 3] <- NA # Transition from NAO- to NAO- + array.trans.sim2[p,1+l, 4] <- 100*transSim2[3] # Transition from NAO- to blocking + array.trans.sim2[p,1+l, 2] <- 100*transSim2[4] # Transition from NAO- to Atl.Ridge + + array.trans.sim3[p,1+l, 1] <- 100*transSim3[1] # Transition from blocking to NAO+ + array.trans.sim3[p,1+l, 3] <- 100*transSim3[2] # Transition from blocking to NAO- + array.trans.sim3[p,1+l, 4] <- NA # Transition from blocking to blocking + array.trans.sim3[p,1+l, 2] <- 100*transSim3[4] # Transition from blocking to Atl.Ridge + + array.trans.sim4[p,1+l, 1] <- 100*transSim4[1] # Transition from Atl.ridge to NAO+ + array.trans.sim4[p,1+l, 3] <- 100*transSim4[2] # Transition from Atl.ridge to NAO- + array.trans.sim4[p,1+l, 4] <- 100*transSim4[3] # Transition from Atl.ridge to blocking + array.trans.sim4[p,1+l, 2] <- NA # Transition from Atl.ridge to Atl.Ridge + + array.trans.obs1[p,1+l, 1] <- NA # Transition from NAO+ to NAO+ + array.trans.obs1[p,1+l, 3] <- 100*transObs1[2] # Transition from NAO+ to NAO- + array.trans.obs1[p,1+l, 4] <- 100*transObs1[3] # Transition from NAO+ to blocking + array.trans.obs1[p,1+l, 2] <- 100*transObs1[4] # Transition from NAO+ to Atl.Ridge + + array.trans.obs2[p,1+l, 1] <- 100*transObs2[1] # Transition from NAO- to NAO+ + array.trans.obs2[p,1+l, 3] <- NA # Transition from NAO- to NAO- + array.trans.obs2[p,1+l, 4] <- 100*transObs2[3] # Transition from NAO- to blocking + array.trans.obs2[p,1+l, 2] <- 100*transObs2[4] # Transition from NAO- to Atl.Ridge + + array.trans.obs3[p,1+l, 1] <- 100*transObs3[1] # Transition from blocking to NAO+ + array.trans.obs3[p,1+l, 3] <- 100*transObs3[2] # Transition from blocking to NAO- + array.trans.obs3[p,1+l, 4] <- NA # Transition from blocking to blocking + array.trans.obs3[p,1+l, 2] <- 100*transObs3[4] # Transition from blocking to Atl.Ridge + + array.trans.obs4[p,1+l, 1] <- 100*transObs4[1] # Transition from Atl.ridge to NAO+ + array.trans.obs4[p,1+l, 3] <- 100*transObs4[2] # Transition from Atl.ridge to NAO- + array.trans.obs4[p,1+l, 4] <- 100*transObs4[3] # Transition from Atl.ridge to blocking + array.trans.obs4[p,1+l, 2] <- NA # Transition from Atl.ridge to Atl.Ridge + + array.trans.diff1[p,1+l, 1] <- NA # Transition from NAO+ to NAO+ + array.trans.diff1[p,1+l, 3] <- 100*(transSim1[2] - transObs1[2]) # Transition from NAO+ to NAO- + array.trans.diff1[p,1+l, 4] <- 100*(transSim1[3] - transObs1[3]) # Transition from NAO+ to blocking + array.trans.diff1[p,1+l, 2] <- 100*(transSim1[4] - transObs1[4]) # Transition from NAO+ to Atl.Ridge + + array.trans.diff2[p,1+l, 1] <- 100*(transSim2[1] - transObs2[1]) # Transition from NAO+ to NAO+ + array.trans.diff2[p,1+l, 3] <- NA + array.trans.diff2[p,1+l, 4] <- 100*(transSim2[3] - transObs2[3]) # Transition from NAO+ to blocking + array.trans.diff2[p,1+l, 2] <- 100*(transSim2[4] - transObs2[4]) # Transition from NAO+ to Atl.Ridge + + array.trans.diff3[p,1+l, 1] <- 100*(transSim3[1] - transObs3[1]) # Transition from NAO+ to NAO+ + array.trans.diff3[p,1+l, 3] <- 100*(transSim3[2] - transObs3[2]) # Transition from NAO+ to NAO- + array.trans.diff3[p,1+l, 4] <- NA + array.trans.diff3[p,1+l, 2] <- 100*(transSim3[4] - transObs3[4]) # Transition from NAO+ to Atl.Ridge + + array.trans.diff4[p,1+l, 1] <- 100*(transSim4[1] - transObs4[1]) # Transition from NAO+ to NAO+ + array.trans.diff4[p,1+l, 3] <- 100*(transSim4[2] - transObs4[2]) # Transition from NAO+ to NAO- + array.trans.diff4[p,1+l, 4] <- 100*(transSim4[3] - transObs4[3]) # Transition from NAO+ to blocking + array.trans.diff4[p,1+l, 2] <- NA + + #array.rpss[p,1+l, 1] <- # NAO+ + #array.rpss[p,1+l, 3] <- # NAO- + #array.rpss[p,1+l, 4] <- # Blocking + #array.rpss[p,1+l, 2] <- # Atl.Ridge + } + } + + + + # Spatial Corr summary: + png(filename=paste0(work.dir,"/",forecast.name,"_summary_spatial_correlations.png"), width=550, height=400) + + # create an array similar to array.cor but with colors instead of corr.values: + sp.corr.cols <- rev(c('#b2182b','#d6604d','#f4a582','#fddbc7','#d1e5f0','#92c5de','#4393c3','#2166ac')) + my.seq <- c(-1,0,0.4,0.5,0.6,0.7,0.8,0.9,1) + + array.spat.cor.colors <- array(sp.corr.cols[8],c(12,7,4)) + array.spat.cor.colors[array.spat.cor < my.seq[8]] <- sp.corr.cols[7] + array.spat.cor.colors[array.spat.cor < my.seq[7]] <- sp.corr.cols[6] + array.spat.cor.colors[array.spat.cor < my.seq[6]] <- sp.corr.cols[5] + array.spat.cor.colors[array.spat.cor < my.seq[5]] <- sp.corr.cols[4] + array.spat.cor.colors[array.spat.cor < my.seq[4]] <- sp.corr.cols[3] + array.spat.cor.colors[array.spat.cor < my.seq[3]] <- sp.corr.cols[2] + array.spat.cor.colors[array.spat.cor < my.seq[2]] <- sp.corr.cols[1] + + par(mar=c(5,4,4,4.5)) + plot(1,1, type="n", xaxt="n", yaxt="n", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + title("Spatial correlation between S4 and ERA-Interim regime anomalies", cex=1.5) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + + for(p in 1:12){ + for(l in 0:6){ + polygon(c(0.5+p-1, 0.5+p-1, 1+p-1), c(-0.5+l, 0.5+l, 0+l), col=array.spat.cor.colors[p,1+l,1]) # first triangle + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(-0.5+l, 0+l, -0.5+l), col=array.spat.cor.colors[p,1+l,2]) + polygon(c(1+p-1, 1.5+p-1, 1.5+p-1), c(l, 0.5+l, -0.5+l), col=array.spat.cor.colors[p,1+l,3]) + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(0.5+l, 0+l, 0.5+l), col=array.spat.cor.colors[p,1+l,4]) + } + } + + par(fig=c(0.875, 1, 0.14, 0.89), new=TRUE) + ColorBar2(brks = my.seq, cols = sp.corr.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + dev.off() + + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_spatial_correlations_startdate.png"), width=750, height=600) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext(text="Spatial correlation between S4 and ERA-Interim regime anomalies", cex=1.5) + + # NAO+ : + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.85, 0.98), new=TRUE) + mtext("NAO+", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.spat.cor.colors[p,1+l,1])}} + + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.42, 0.5), new=TRUE) + mtext("Blocking", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.spat.cor.colors[p,1+l,4])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.85, 0.98), new=TRUE) + mtext("NAO-", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.spat.cor.colors[p,1+l,3])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.42, 0.5), new=TRUE) + mtext("Atlantic Ridge", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.spat.cor.colors[p,1+l,2])}} + + par(fig=c(0.91, 1, 0.1, 0.9), new=TRUE) + ColorBar2(brks = my.seq, cols = sp.corr.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_spatial_correlations_target_month.png"), width=800, height=300) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext(text="Spatial correlation between S4 and ERA-Interim regime anomalies", cex=1.2) + + par(mar=c(0,0,4,0), fig=c(0.07, 0.22, 0.8, 0.98), new=TRUE) + mtext("NAO+", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0, 0.22, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.6, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.spat.cor.colors[i2,1+6-j,1])}} + + par(mar=c(0,0,4,0), fig=c(0.22, 0.44, 0.8, 0.98), new=TRUE) + mtext("NAO-", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.22, 0.44, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.6, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.spat.cor.colors[i2,1+6-j,3])}} + + par(mar=c(0,0,4,0), fig=c(0.44, 0.66, 0.8, 0.98), new=TRUE) + mtext("Blocking", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.44, 0.66, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.6, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.spat.cor.colors[i2,1+6-j,4])}} + + par(mar=c(0,0,4,0), fig=c(0.68, 0.88, 0.8, 0.98), new=TRUE) + mtext("Atlantic Ridge", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.66, 0.88, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.6, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.spat.cor.colors[i2,1+6-j,2])}} + + par(fig=c(0.91, 1, 0.1, 0.8), new=TRUE) + ColorBar2(brks = my.seq, cols = sp.corr.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + + + + + + + + # Temporal Corr summary: + png(filename=paste0(work.dir,"/",forecast.name,"_summary_temporal_correlations.png"), width=550, height=400) + + # create an array similar to array.cor but with colors instead of corr.values: + corr.cols <- rev(c('#b2182b','#d6604d','#f4a582','#fddbc7','#d1e5f0','#92c5de','#4393c3','#2166ac')) + my.seq <- c(-1,-0.75,-0.5,-0.25,0,0.25,0.5,0.75,1) + + array.cor.colors <- array(corr.cols[8],c(12,7,4)) + array.cor.colors[array.cor < 0.75] <- corr.cols[7] + array.cor.colors[array.cor < 0.5] <- corr.cols[6] + array.cor.colors[array.cor < 0.25] <- corr.cols[5] + array.cor.colors[array.cor < 0] <- corr.cols[4] + array.cor.colors[array.cor < -0.25] <- corr.cols[3] + array.cor.colors[array.cor < -0.5] <- corr.cols[2] + array.cor.colors[array.cor < -0.75] <- corr.cols[1] + + par(mar=c(5,4,4,4.5)) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Forecast time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + title("Correlation between S4 and ERA-Interim frequencies", cex=1.5) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + + for(p in 1:12){ + for(l in 0:6){ + polygon(c(0.5+p-1,0.5+p-1,1+p-1),c(-0.5+l,0.5+l,0+l), col=array.cor.colors[p,1+l,1]) # first triangle + polygon(c(0.5+p-1,1+p-1,1.5+p-1),c(-0.5+l,0+l,-0.5+l), col=array.cor.colors[p,1+l,2]) + polygon(c(1+p-1,1.5+p-1,1.5+p-1),c(l,0.5+l,-0.5+l), col=array.cor.colors[p,1+l,3]) + polygon(c(0.5+p-1,1+p-1,1.5+p-1),c(0.5+l,0+l,0.5+l), col=array.cor.colors[p,1+l,4]) + } + } + + par(fig=c(0.875, 1, 0.14, 0.89), new=TRUE) + ColorBar2(brks = seq(-1,1,0.25), cols = corr.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(seq(-1,1,0.25)), my.labels=seq(-1,1,0.25)) + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_temporal_correlations_startdate.png"), width=750, height=600) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext(text="Correlation between S4 and ERA-Interim frequencies", cex=1.5) + + # NAO+ : + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.85, 0.98), new=TRUE) + mtext("NAO+", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.cor.colors[p,1+l,1])}} + + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.42, 0.5), new=TRUE) + mtext("Blocking", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.cor.colors[p,1+l,4])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.85, 0.98), new=TRUE) + mtext("NAO-", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.cor.colors[p,1+l,3])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.42, 0.5), new=TRUE) + mtext("Atlantic Ridge", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.cor.colors[p,1+l,2])}} + + par(fig=c(0.91, 1, 0.1, 0.9), new=TRUE) + ColorBar2(brks = my.seq, cols = corr.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_temporal_correlations_target_month.png"), width=800, height=300) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext(text="Correlation between S4 and ERA-Interim frequencies", cex=1.2) + + par(mar=c(0,0,4,0), fig=c(0.07, 0.22, 0.8, 0.98), new=TRUE) + mtext("NAO+", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0, 0.22, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.6, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.cor.colors[i2,1+6-j,1])}} + + par(mar=c(0,0,4,0), fig=c(0.22, 0.44, 0.8, 0.98), new=TRUE) + mtext("NAO-", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.22, 0.44, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.6, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.cor.colors[i2,1+6-j,3])}} + + par(mar=c(0,0,4,0), fig=c(0.44, 0.66, 0.8, 0.98), new=TRUE) + mtext("Blocking", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.44, 0.66, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.6, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.cor.colors[i2,1+6-j,4])}} + + par(mar=c(0,0,4,0), fig=c(0.68, 0.88, 0.8, 0.98), new=TRUE) + mtext("Atlantic Ridge", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.66, 0.88, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.6, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.cor.colors[i2,1+6-j,2])}} + + par(fig=c(0.91, 1, 0.1, 0.8), new=TRUE) + ColorBar2(brks = my.seq, cols = corr.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + + + + + + # Frequency summary: + png(filename=paste0(work.dir,"/",forecast.name,"_summary_frequency.png"), width=550, height=400) + + # create an array similar to array.diff.freq but with colors instead of frequencies: + freq.cols <- rev(c('#d73027','#f46d43','#fdae61','#fee090','#e0f3f8','#abd9e9','#74add1','#4575b4')) + my.seq <- c(NA,20,22,24,26,28,30,32,NA) + + array.freq.colors <- array(freq.cols[8],c(12,7,4)) + array.freq.colors[array.freq < my.seq[[8]]] <- freq.cols[7] + array.freq.colors[array.freq < my.seq[[7]]] <- freq.cols[6] + array.freq.colors[array.freq < my.seq[[6]]] <- freq.cols[5] + array.freq.colors[array.freq < my.seq[[5]]] <- freq.cols[4] + array.freq.colors[array.freq < my.seq[[4]]] <- freq.cols[3] + array.freq.colors[array.freq < my.seq[[3]]] <- freq.cols[2] + array.freq.colors[array.freq < my.seq[[2]]] <- freq.cols[1] + + par(mar=c(5,4,4,4.5)) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Forecast time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + title("Mean S4 frequency (%)", cex=1.5) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + + for(p in 1:12){ + for(l in 0:6){ + polygon(c(0.5+p-1, 0.5+p-1, 1+p-1), c(-0.5+l, 0.5+l, 0+l), col=array.freq.colors[p,1+l,1]) # first triangle + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(-0.5+l, 0+l, -0.5+l), col=array.freq.colors[p,1+l,2]) + polygon(c(1+p-1, 1.5+p-1, 1.5+p-1), c(l, 0.5+l, -0.5+l), col=array.freq.colors[p,1+l,3]) + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(0.5+l, 0+l, 0.5+l), col=array.freq.colors[p,1+l,4]) + } + } + + par(fig=c(0.875, 1, 0.14, 0.89), new=TRUE) + ColorBar2(brks = my.seq, cols = freq.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_frequency_startdate.png"), width=750, height=600) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext(text="Mean S4 frequency (%)", cex=1.5) + + # NAO+ : + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.85, 0.98), new=TRUE) + mtext("NAO+", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.freq.colors[p,1+l,1])}} + + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.42, 0.5), new=TRUE) + mtext("Blocking", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.freq.colors[p,1+l,4])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.85, 0.98), new=TRUE) + mtext("NAO-", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.freq.colors[p,1+l,3])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.42, 0.5), new=TRUE) + mtext("Atlantic Ridge", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.freq.colors[p,1+l,2])}} + + par(fig=c(0.91, 1, 0.1, 0.9), new=TRUE) + ColorBar2(brks = my.seq, cols = freq.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_frequency_target_month.png"), width=800, height=300) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext(text="Mean S4 frequency (%)", cex=1.2) + + par(mar=c(0,0,4,0), fig=c(0.07, 0.22, 0.8, 0.98), new=TRUE) + mtext("NAO+", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0, 0.22, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.6, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.freq.colors[i2,1+6-j,1])}} + + par(mar=c(0,0,4,0), fig=c(0.22, 0.44, 0.8, 0.98), new=TRUE) + mtext("NAO-", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.22, 0.44, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.6, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.freq.colors[i2,1+6-j,3])}} + + par(mar=c(0,0,4,0), fig=c(0.44, 0.66, 0.8, 0.98), new=TRUE) + mtext("Blocking", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.44, 0.66, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.6, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.freq.colors[i2,1+6-j,4])}} + + par(mar=c(0,0,4,0), fig=c(0.68, 0.88, 0.8, 0.98), new=TRUE) + mtext("Atlantic Ridge", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.66, 0.88, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.6, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.freq.colors[i2,1+6-j,2])}} + + par(fig=c(0.91, 1, 0.1, 0.8), new=TRUE) + ColorBar2(brks = my.seq, cols = freq.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + par(fig=c(0.91, 1, 0.88, 0.93), new=TRUE) + mtext(side = 1, text = "%", line = 2, cex=1) + dev.off() + + + + + + + + + # Obs. frequency summary: + png(filename=paste0(work.dir,"/",forecast.name,"_summary_frequency_obs.png"), width=550, height=400) + + # create an array similar to array.diff.freq but with colors instead of frequencies: + freq.cols <- rev(c('#d73027','#f46d43','#fdae61','#fee090','#e0f3f8','#abd9e9','#74add1','#4575b4')) + my.seq <- c(NA,20,22,24,26,28,30,32,NA) + + array.freq.colors <- array(freq.cols[8],c(12,7,4)) + array.freq.colors[array.freq.obs < my.seq[[8]]] <- freq.cols[7] + array.freq.colors[array.freq.obs < my.seq[[7]]] <- freq.cols[6] + array.freq.colors[array.freq.obs < my.seq[[6]]] <- freq.cols[5] + array.freq.colors[array.freq.obs < my.seq[[5]]] <- freq.cols[4] + array.freq.colors[array.freq.obs < my.seq[[4]]] <- freq.cols[3] + array.freq.colors[array.freq.obs < my.seq[[3]]] <- freq.cols[2] + array.freq.colors[array.freq.obs < my.seq[[2]]] <- freq.cols[1] + + par(mar=c(5,4,4,4.5)) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Forecast time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + title("Mean observed frequency (%)", cex=1.5) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + + for(p in 1:12){ + for(l in 0:6){ + polygon(c(0.5+p-1, 0.5+p-1, 1+p-1), c(-0.5+l, 0.5+l, 0+l), col=array.freq.colors[p,1+l,1]) # first triangle + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(-0.5+l, 0+l, -0.5+l), col=array.freq.colors[p,1+l,2]) + polygon(c(1+p-1, 1.5+p-1, 1.5+p-1), c(l, 0.5+l, -0.5+l), col=array.freq.colors[p,1+l,3]) + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(0.5+l, 0+l, 0.5+l), col=array.freq.colors[p,1+l,4]) + } + } + + par(fig=c(0.875, 1, 0.14, 0.89), new=TRUE) + ColorBar2(brks = my.seq, cols = freq.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_frequency_obs_startdate.png"), width=750, height=600) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext(text="Mean observed frequency (%)", cex=1.5) + + # NAO+ : + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.85, 0.98), new=TRUE) + mtext("NAO+", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.freq.colors[p,1+l,1])}} + + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.42, 0.5), new=TRUE) + mtext("Blocking", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.freq.colors[p,1+l,4])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.85, 0.98), new=TRUE) + mtext("NAO-", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.freq.colors[p,1+l,3])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.42, 0.5), new=TRUE) + mtext("Atlantic Ridge", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.freq.colors[p,1+l,2])}} + + par(fig=c(0.91, 1, 0.1, 0.9), new=TRUE) + ColorBar2(brks = my.seq, cols = freq.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_frequency_obs_target_month.png"), width=800, height=300) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext(text="Mean observed frequency (%)", cex=1.2) + + par(mar=c(0,0,4,0), fig=c(0.07, 0.22, 0.8, 0.98), new=TRUE) + mtext("NAO+", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0, 0.22, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.6, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.freq.colors[i2,1+6-j,1])}} + + par(mar=c(0,0,4,0), fig=c(0.22, 0.44, 0.8, 0.98), new=TRUE) + mtext("NAO-", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.22, 0.44, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.6, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.freq.colors[i2,1+6-j,3])}} + + par(mar=c(0,0,4,0), fig=c(0.44, 0.66, 0.8, 0.98), new=TRUE) + mtext("Blocking", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.44, 0.66, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.6, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.freq.colors[i2,1+6-j,4])}} + + par(mar=c(0,0,4,0), fig=c(0.68, 0.88, 0.8, 0.98), new=TRUE) + mtext("Atlantic Ridge", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.66, 0.88, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.6, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.freq.colors[i2,1+6-j,2])}} + + par(fig=c(0.91, 1, 0.1, 0.8), new=TRUE) + ColorBar2(brks = my.seq, cols = freq.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + par(fig=c(0.91, 1, 0.88, 0.93), new=TRUE) + mtext(side = 1, text = "%", line = 2, cex=1) + dev.off() + + + + + + + + + + # Bias freq. summary: + png(filename=paste0(work.dir,"/",forecast.name,"_summary_bias_frequency.png"), width=550, height=400) + + # create an array similar to array.diff.freq but with colors instead of frequencies: + diff.freq.cols <- rev(c('#d73027','#f46d43','#fdae61','#fee090','#e0f3f8','#abd9e9','#74add1','#4575b4')) + my.seq <- c(-20,-10,-5,-2,0,2,5,10,20) + + array.diff.freq.colors <- array(diff.freq.cols[8],c(12,7,4)) + array.diff.freq.colors[array.diff.freq < my.seq[[8]]] <- diff.freq.cols[7] + array.diff.freq.colors[array.diff.freq 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.diff.freq.colors[i2,1+6-j,1])}} + + par(mar=c(0,0,4,0), fig=c(0.22, 0.44, 0.8, 0.98), new=TRUE) + mtext("NAO-", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.22, 0.44, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.6, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.diff.freq.colors[i2,1+6-j,3])}} + + par(mar=c(0,0,4,0), fig=c(0.44, 0.66, 0.8, 0.98), new=TRUE) + mtext("Blocking", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.44, 0.66, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.6, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.diff.freq.colors[i2,1+6-j,4])}} + + par(mar=c(0,0,4,0), fig=c(0.68, 0.88, 0.8, 0.98), new=TRUE) + mtext("Atlantic Ridge", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.66, 0.88, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.6, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.diff.freq.colors[i2,1+6-j,2])}} + + par(fig=c(0.91, 1, 0.1, 0.8), new=TRUE) + ColorBar2(brks = my.seq, cols = diff.freq.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + par(fig=c(0.91, 1, 0.88, 0.93), new=TRUE) + mtext(side = 1, text = "%", line = 2, cex=1) + dev.off() + + + + + + + + + # Simulated persistence summary: + png(filename=paste0(work.dir,"/",forecast.name,"_summary_persistence.png"), width=550, height=400) + + # create an array similar to array.pers but with colors instead of frequencies: + pers.cols <- rev(c('#b2182b','#d6604d','#f4a582','#fddbc7','#d1e5f0','#92c5de','#4393c3','#2166ac')) + my.seq <- c(NA, 3.25, 3.5, 3.75, 4, 4.25, 4.5, 4.75, NA) + + array.pers.colors <- array(pers.cols[8],c(12,7,4)) + array.pers.colors[array.pers < my.seq[8]] <- pers.cols[7] + array.pers.colors[array.pers < my.seq[7]] <- pers.cols[6] + array.pers.colors[array.pers < my.seq[6]] <- pers.cols[5] + array.pers.colors[array.pers < my.seq[5]] <- pers.cols[4] + array.pers.colors[array.pers < my.seq[4]] <- pers.cols[3] + array.pers.colors[array.pers < my.seq[3]] <- pers.cols[2] + array.pers.colors[array.pers < my.seq[2]] <- pers.cols[1] + + par(mar=c(5,4,4,4.5)) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Forecast time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + title("ECMWF-S4 Regime persistence (in days/month)", cex=1.5) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + + for(p in 1:12){ + for(l in 0:6){ + polygon(c(0.5+p-1,0.5+p-1,1+p-1),c(-0.5+l,0.5+l,0+l), col=array.pers.colors[p,1+l,1]) # first triangle + polygon(c(0.5+p-1,1+p-1,1.5+p-1),c(-0.5+l,0+l,-0.5+l), col=array.pers.colors[p,1+l,2]) + polygon(c(1+p-1,1.5+p-1,1.5+p-1),c(l,0.5+l,-0.5+l), col=array.pers.colors[p,1+l,3]) + polygon(c(0.5+p-1,1+p-1,1.5+p-1),c(0.5+l,0+l,0.5+l), col=array.pers.colors[p,1+l,4]) + } + } + + par(fig=c(0.875, 1, 0.14, 0.89), new=TRUE) + ColorBar2(brks = my.seq, cols = pers.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_persistence_startdate.png"), width=750, height=600) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("ECMWF-S4 Regime persistence (in days/month)", cex=1.5) + + # NAO+ : + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.85, 0.98), new=TRUE) + mtext("NAO+", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.pers.colors[p,1+l,1])}} + + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.42, 0.5), new=TRUE) + mtext("Blocking", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.pers.colors[p,1+l,4])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.85, 0.98), new=TRUE) + mtext("NAO-", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.pers.colors[p,1+l,3])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.42, 0.5), new=TRUE) + mtext("Atlantic Ridge", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.pers.colors[p,1+l,2])}} + + par(fig=c(0.91, 1, 0.1, 0.9), new=TRUE) + ColorBar2(brks = my.seq, cols = pers.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_persistence_target_month.png"), width=800, height=300) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("ECMWF-S4 Regime persistence (in days/month)", cex=1.2) + + par(mar=c(0,0,4,0), fig=c(0.07, 0.22, 0.8, 0.98), new=TRUE) + mtext("NAO+", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0, 0.22, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.6, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.pers.colors[i2,1+6-j,1])}} + + par(mar=c(0,0,4,0), fig=c(0.22, 0.44, 0.8, 0.98), new=TRUE) + mtext("NAO-", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.22, 0.44, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.6, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.pers.colors[i2,1+6-j,3])}} + + par(mar=c(0,0,4,0), fig=c(0.44, 0.66, 0.8, 0.98), new=TRUE) + mtext("Blocking", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.44, 0.66, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.6, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.pers.colors[i2,1+6-j,4])}} + + par(mar=c(0,0,4,0), fig=c(0.68, 0.88, 0.8, 0.98), new=TRUE) + mtext("Atlantic Ridge", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.66, 0.88, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.6, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.pers.colors[i2,1+6-j,2])}} + + par(fig=c(0.91, 1, 0.1, 0.8), new=TRUE) + ColorBar2(brks = my.seq, cols = pers.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + par(fig=c(0.9, 1, 0.88, 0.93), new=TRUE) + mtext(side = 1, text = "days/month", line = 2, cex=1) + dev.off() + + + + + + + + # Observed persistence summary: + png(filename=paste0(work.dir,"/",forecast.name,"_summary_persistence_obs.png"), width=550, height=400) + + # create an array similar to array.pers but with colors instead of frequencies: + pers.cols <- rev(c('#b2182b','#d6604d','#f4a582','#fddbc7','#d1e5f0','#92c5de','#4393c3','#2166ac')) + my.seq <- c(NA, 3.25, 3.5, 3.75, 4, 4.25, 4.5, 4.75, NA) + + array.pers.colors <- array(pers.cols[8],c(12,7,4)) + array.pers.colors[array.pers.obs < my.seq[8]] <- pers.cols[7] + array.pers.colors[array.pers.obs < my.seq[7]] <- pers.cols[6] + array.pers.colors[array.pers.obs < my.seq[6]] <- pers.cols[5] + array.pers.colors[array.pers.obs < my.seq[5]] <- pers.cols[4] + array.pers.colors[array.pers.obs < my.seq[4]] <- pers.cols[3] + array.pers.colors[array.pers.obs < my.seq[3]] <- pers.cols[2] + array.pers.colors[array.pers.obs < my.seq[2]] <- pers.cols[1] + + par(mar=c(5,4,4,4.5)) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Forecast time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + title("Observed regime persistence (in days/month)", cex=1.5) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + + for(p in 1:12){ + for(l in 0:6){ + polygon(c(0.5+p-1,0.5+p-1,1+p-1),c(-0.5+l,0.5+l,0+l), col=array.pers.colors[p,1+l,1]) # first triangle + polygon(c(0.5+p-1,1+p-1,1.5+p-1),c(-0.5+l,0+l,-0.5+l), col=array.pers.colors[p,1+l,2]) + polygon(c(1+p-1,1.5+p-1,1.5+p-1),c(l,0.5+l,-0.5+l), col=array.pers.colors[p,1+l,3]) + polygon(c(0.5+p-1,1+p-1,1.5+p-1),c(0.5+l,0+l,0.5+l), col=array.pers.colors[p,1+l,4]) + } + } + + par(fig=c(0.875, 1, 0.14, 0.89), new=TRUE) + ColorBar2(brks = my.seq, cols = pers.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_persistence_obs_startdate.png"), width=750, height=600) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("Observed Regime persistence (in days/month)", cex=1.5) + + # NAO+ : + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.85, 0.98), new=TRUE) + mtext("NAO+", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.pers.colors[p,1+l,1])}} + + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.42, 0.5), new=TRUE) + mtext("Blocking", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.pers.colors[p,1+l,4])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.85, 0.98), new=TRUE) + mtext("NAO-", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.pers.colors[p,1+l,3])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.42, 0.5), new=TRUE) + mtext("Atlantic Ridge", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.pers.colors[p,1+l,2])}} + + par(fig=c(0.91, 1, 0.1, 0.9), new=TRUE) + ColorBar2(brks = my.seq, cols = pers.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_persistence_obs_target_month.png"), width=800, height=300) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("Observed regime persistence (in days/month)", cex=1.2) + + par(mar=c(0,0,4,0), fig=c(0.07, 0.22, 0.8, 0.98), new=TRUE) + mtext("NAO+", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0, 0.22, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.6, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.pers.colors[i2,1+6-j,1])}} + + par(mar=c(0,0,4,0), fig=c(0.22, 0.44, 0.8, 0.98), new=TRUE) + mtext("NAO-", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.22, 0.44, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.6, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.pers.colors[i2,1+6-j,3])}} + + par(mar=c(0,0,4,0), fig=c(0.44, 0.66, 0.8, 0.98), new=TRUE) + mtext("Blocking", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.44, 0.66, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.6, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.pers.colors[i2,1+6-j,4])}} + + par(mar=c(0,0,4,0), fig=c(0.68, 0.88, 0.8, 0.98), new=TRUE) + mtext("Atlantic Ridge", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.66, 0.88, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.6, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.pers.colors[i2,1+6-j,2])}} + + par(fig=c(0.91, 1, 0.1, 0.8), new=TRUE) + ColorBar2(brks = my.seq, cols = pers.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + par(fig=c(0.9, 1, 0.88, 0.93), new=TRUE) + mtext(side = 1, text = "days/month", line = 2, cex=1) + dev.off() + + + + + + + + + + + + + # Bias persistence summary: + png(filename=paste0(work.dir,"/",forecast.name,"_summary_bias_persistence.png"), width=550, height=400) + + # create an array similar to array.pers but with colors instead of frequencies: + diff.pers.cols <- rev(c('#b2182b','#d6604d','#f4a582','#fddbc7','#d1e5f0','#92c5de','#4393c3','#2166ac')) + my.seq <- c(NA, -1.5, -1, -0.5, 0, 0.5, 1, 1.5, NA) + + array.diff.pers.colors <- array(diff.pers.cols[8],c(12,7,4)) + array.diff.pers.colors[array.diff.pers < my.seq[8]] <- diff.pers.cols[7] + array.diff.pers.colors[array.diff.pers < my.seq[7]] <- diff.pers.cols[6] + array.diff.pers.colors[array.diff.pers < my.seq[6]] <- diff.pers.cols[5] + array.diff.pers.colors[array.diff.pers < my.seq[5]] <- diff.pers.cols[4] + array.diff.pers.colors[array.diff.pers < my.seq[4]] <- diff.pers.cols[3] + array.diff.pers.colors[array.diff.pers < my.seq[3]] <- diff.pers.cols[2] + array.diff.pers.colors[array.diff.pers < my.seq[2]] <- diff.pers.cols[1] + + par(mar=c(5,4,4,4.5)) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Forecast time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + title("Diff. between S4 and ERA-Interim regime persistence (in days/month)", cex=1.5) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + + for(p in 1:12){ + for(l in 0:6){ + polygon(c(0.5+p-1,0.5+p-1,1+p-1),c(-0.5+l,0.5+l,0+l), col=array.diff.pers.colors[p,1+l,1]) # first triangle + polygon(c(0.5+p-1,1+p-1,1.5+p-1),c(-0.5+l,0+l,-0.5+l), col=array.diff.pers.colors[p,1+l,2]) + polygon(c(1+p-1,1.5+p-1,1.5+p-1),c(l,0.5+l,-0.5+l), col=array.diff.pers.colors[p,1+l,3]) + polygon(c(0.5+p-1,1+p-1,1.5+p-1),c(0.5+l,0+l,0.5+l), col=array.diff.pers.colors[p,1+l,4]) + } + } + + par(fig=c(0.875, 1, 0.14, 0.89), new=TRUE) + ColorBar2(brks = my.seq, cols = diff.pers.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_bias_persistence_startdate.png"), width=750, height=600) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("Diff. between S4 and ERA-Interim regime persistence (in days/month)", cex=1.5) + + # NAO+ : + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.85, 0.98), new=TRUE) + mtext("NAO+", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.diff.pers.colors[p,1+l,1])}} + + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.42, 0.5), new=TRUE) + mtext("Blocking", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.diff.pers.colors[p,1+l,4])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.85, 0.98), new=TRUE) + mtext("NAO-", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.diff.pers.colors[p,1+l,3])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.42, 0.5), new=TRUE) + mtext("Atlantic Ridge", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.diff.pers.colors[p,1+l,2])}} + + par(fig=c(0.91, 1, 0.1, 0.9), new=TRUE) + ColorBar2(brks = my.seq, cols = diff.pers.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_bias_persistence_target_month.png"), width=800, height=300) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("Diff. between S4 and ERA-Interim regime persistence (in days/month)", cex=1.2) + + par(mar=c(0,0,4,0), fig=c(0.07, 0.22, 0.8, 0.98), new=TRUE) + mtext("NAO+", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0, 0.22, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.6, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.diff.pers.colors[i2,1+6-j,1])}} + + par(mar=c(0,0,4,0), fig=c(0.22, 0.44, 0.8, 0.98), new=TRUE) + mtext("NAO-", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.22, 0.44, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.6, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.diff.pers.colors[i2,1+6-j,3])}} + + par(mar=c(0,0,4,0), fig=c(0.44, 0.66, 0.8, 0.98), new=TRUE) + mtext("Blocking", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.44, 0.66, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.6, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.diff.pers.colors[i2,1+6-j,4])}} + + par(mar=c(0,0,4,0), fig=c(0.68, 0.88, 0.8, 0.98), new=TRUE) + mtext("Atlantic Ridge", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.66, 0.88, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.6, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.diff.pers.colors[i2,1+6-j,2])}} + + par(fig=c(0.91, 1, 0.1, 0.8), new=TRUE) + ColorBar2(brks = my.seq, cols = diff.pers.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + par(fig=c(0.9, 1, 0.88, 0.93), new=TRUE) + mtext(side = 1, text = "days/month", line = 2, cex=1) + dev.off() + + + + + + + + + + # St.Dev.freq ratio summary: + png(filename=paste0(work.dir,"/",forecast.name,"_summary_ratio_sd_freq.png"), width=550, height=400) + + # create an array similar to array.cor but with colors instead of corr.values: + diff.sd.freq.cols <- rev(c('#b2182b','#d6604d','#f4a582','#fddbc7','#d1e5f0','#92c5de','#4393c3','#2166ac')) + my.seq <- c(0,0.7,0.8,0.9,1.0,1.1,1.2,1.3,2) + + array.diff.sd.freq.colors <- array(diff.sd.freq.cols[8],c(12,7,4)) + array.diff.sd.freq.colors[array.diff.sd.freq < my.seq[8]] <- diff.sd.freq.cols[7] + array.diff.sd.freq.colors[array.diff.sd.freq < my.seq[7]] <- diff.sd.freq.cols[6] + array.diff.sd.freq.colors[array.diff.sd.freq < my.seq[6]] <- diff.sd.freq.cols[5] + array.diff.sd.freq.colors[array.diff.sd.freq < my.seq[5]] <- diff.sd.freq.cols[4] + array.diff.sd.freq.colors[array.diff.sd.freq < my.seq[4]] <- diff.sd.freq.cols[3] + array.diff.sd.freq.colors[array.diff.sd.freq < my.seq[3]] <- diff.sd.freq.cols[2] + array.diff.sd.freq.colors[array.diff.sd.freq < my.seq[2]] <- diff.sd.freq.cols[1] + + par(mar=c(5,4,4,4.5)) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Forecast time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + title("St.Dev. ratio between sim. and obs. average frequencies", cex=1.5) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + + for(p in 1:12){ + for(l in 0:6){ + polygon(c(0.5+p-1, 0.5+p-1, 1+p-1), c(-0.5+l, 0.5+l, 0+l), col=array.diff.sd.freq.colors[p,1+l,1]) # first triangle + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(-0.5+l, 0+l, -0.5+l), col=array.diff.sd.freq.colors[p,1+l,2]) + polygon(c(1+p-1, 1.5+p-1, 1.5+p-1), c(l, 0.5+l, -0.5+l), col=array.diff.sd.freq.colors[p,1+l,3]) + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(0.5+l, 0+l, 0.5+l), col=array.diff.sd.freq.colors[p,1+l,4]) + } + } + + par(fig=c(0.875, 1, 0.14, 0.89), new=TRUE) + ColorBar2(brks = my.seq, cols = diff.sd.freq.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + dev.off() + + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_ratio_sd_frequency_startdate.png"), width=750, height=600) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("St.Dev. ratio between sim. and obs. average frequencies", cex=1.5) + + # NAO+ : + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.85, 0.98), new=TRUE) + mtext("NAO+", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.diff.sd.freq.colors[p,1+l,1])}} + + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.42, 0.5), new=TRUE) + mtext("Blocking", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.diff.sd.freq.colors[p,1+l,4])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.85, 0.98), new=TRUE) + mtext("NAO-", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.diff.sd.freq.colors[p,1+l,3])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.42, 0.5), new=TRUE) + mtext("Atlantic Ridge", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.diff.sd.freq.colors[p,1+l,2])}} + + par(fig=c(0.91, 1, 0.1, 0.9), new=TRUE) + ColorBar2(brks = my.seq, cols = diff.sd.freq.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_ratio_sd_frequency_target_month.png"), width=800, height=300) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("St.Dev. ratio between sim. and obs. average frequencies", cex=1.2) + + par(mar=c(0,0,4,0), fig=c(0.07, 0.22, 0.8, 0.98), new=TRUE) + mtext("NAO+", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0, 0.22, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.6, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.diff.sd.freq.colors[i2,1+6-j,1])}} + + par(mar=c(0,0,4,0), fig=c(0.22, 0.44, 0.8, 0.98), new=TRUE) + mtext("NAO-", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.22, 0.44, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.6, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.diff.sd.freq.colors[i2,1+6-j,3])}} + + par(mar=c(0,0,4,0), fig=c(0.44, 0.66, 0.8, 0.98), new=TRUE) + mtext("Blocking", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.44, 0.66, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.6, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.diff.sd.freq.colors[i2,1+6-j,4])}} + + par(mar=c(0,0,4,0), fig=c(0.68, 0.88, 0.8, 0.98), new=TRUE) + mtext("Atlantic Ridge", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.66, 0.88, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.6, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.diff.sd.freq.colors[i2,1+6-j,2])}} + + par(fig=c(0.91, 1, 0.1, 0.8), new=TRUE) + ColorBar2(brks = my.seq, cols = diff.sd.freq.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + + + + + + + + + + + + + # Impact summary: + png(filename=paste0(work.dir,"/",forecast.name,"_summary_impact_",var.name[var.num],".png"), width=550, height=400) + + # create an array similar to array.pers but with colors instead of frequencies: + imp.cols <- rev(c('#b2182b','#d6604d','#f4a582','#fddbc7','#d1e5f0','#92c5de','#4393c3','#2166ac')) + my.seq <- c(-1,0,0.4,0.5,0.6,0.7,0.8,0.9,1) + + array.imp.colors <- array(imp.cols[8],c(12,7,4)) + array.imp.colors[array.imp < my.seq[8]] <- imp.cols[7] + array.imp.colors[array.imp < my.seq[7]] <- imp.cols[6] + array.imp.colors[array.imp < my.seq[6]] <- imp.cols[5] + array.imp.colors[array.imp < my.seq[5]] <- imp.cols[4] + array.imp.colors[array.imp < my.seq[4]] <- imp.cols[3] + array.imp.colors[array.imp < my.seq[3]] <- imp.cols[2] + array.imp.colors[array.imp < my.seq[2]] <- imp.cols[1] + + par(mar=c(5,4,4,4.5)) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Forecast time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + title(paste0("S4 spatial correlation of impact on ",var.name[var.num]), cex=1.5) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + + for(p in 1:12){ + for(l in 0:6){ + polygon(c(0.5+p-1,0.5+p-1,1+p-1),c(-0.5+l,0.5+l,0+l), col=array.imp.colors[p,1+l,1]) # first triangle + polygon(c(0.5+p-1,1+p-1,1.5+p-1),c(-0.5+l,0+l,-0.5+l), col=array.imp.colors[p,1+l,2]) + polygon(c(1+p-1,1.5+p-1,1.5+p-1),c(l,0.5+l,-0.5+l), col=array.imp.colors[p,1+l,3]) + polygon(c(0.5+p-1,1+p-1,1.5+p-1),c(0.5+l,0+l,0.5+l), col=array.imp.colors[p,1+l,4]) + } + } + + par(fig=c(0.875, 1, 0.14, 0.89), new=TRUE) + ColorBar2(brks = my.seq, cols = imp.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_impact_",var.name[var.num],"_startdate.png"), width=750, height=600) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("S4 spatial correlation of impact on ",var.name[var.num], cex=1.5) + + # NAO+ : + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.85, 0.98), new=TRUE) + mtext("NAO+", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.imp.colors[p,1+l,1])}} + + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.42, 0.5), new=TRUE) + mtext("Blocking", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.imp.colors[p,1+l,4])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.85, 0.98), new=TRUE) + mtext("NAO-", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.imp.colors[p,1+l,3])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.42, 0.5), new=TRUE) + mtext("Atlantic Ridge", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.imp.colors[p,1+l,2])}} + + par(fig=c(0.91, 1, 0.1, 0.9), new=TRUE) + ColorBar2(brks = my.seq, cols = imp.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_impact_",var.name[var.num],"_target_month.png"), width=800, height=300) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("S4 spatial correlation of impact on ",var.name[var.num], cex=1.2) + + par(mar=c(0,0,4,0), fig=c(0.07, 0.22, 0.8, 0.98), new=TRUE) + mtext("NAO+", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0, 0.22, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.6, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.imp.colors[i2,1+6-j,1])}} + + par(mar=c(0,0,4,0), fig=c(0.22, 0.44, 0.8, 0.98), new=TRUE) + mtext("NAO-", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.22, 0.44, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.6, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.imp.colors[i2,1+6-j,3])}} + + par(mar=c(0,0,4,0), fig=c(0.44, 0.66, 0.8, 0.98), new=TRUE) + mtext("Blocking", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.44, 0.66, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.6, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.imp.colors[i2,1+6-j,4])}} + + par(mar=c(0,0,4,0), fig=c(0.68, 0.88, 0.8, 0.98), new=TRUE) + mtext("Atlantic Ridge", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.66, 0.88, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.6, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.imp.colors[i2,1+6-j,2])}} + + par(fig=c(0.91, 1, 0.1, 0.8), new=TRUE) + ColorBar2(brks = my.seq, cols = imp.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + + + + + + + # Simulated Transition probability from NAO+ to X summary: + png(filename=paste0(work.dir,"/",forecast.name,"_summary_transition_prob_NAO+.png"), width=550, height=400) + + # create an array similar to array.cor but with colors instead of corr.values: + trans.cols <- rev(c('#b2182b','#d6604d','#f4a582','#fddbc7','#d1e5f0','#92c5de','#4393c3','#2166ac')) + my.seq <- c(0,10,20,30,40,50,60,70,100) + + array.trans.sim1.colors <- array(trans.cols[8],c(12,7,4)) + array.trans.sim1.colors[array.trans.sim1 < my.seq[8]] <- trans.cols[7] + array.trans.sim1.colors[array.trans.sim1 < my.seq[7]] <- trans.cols[6] + array.trans.sim1.colors[array.trans.sim1 < my.seq[6]] <- trans.cols[5] + array.trans.sim1.colors[array.trans.sim1 < my.seq[5]] <- trans.cols[4] + array.trans.sim1.colors[array.trans.sim1 < my.seq[4]] <- trans.cols[3] + array.trans.sim1.colors[array.trans.sim1 < my.seq[3]] <- trans.cols[2] + array.trans.sim1.colors[array.trans.sim1 < my.seq[2]] <- trans.cols[1] + array.trans.sim1.colors[is.na(array.trans.sim1)] <- "white" + + par(mar=c(5,4,4,4.5)) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Forecast time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + title("% ECMWF-S4 transition from NAO+ regime", cex=1.5) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + + for(p in 1:12){ + for(l in 0:6){ + polygon(c(0.5+p-1, 0.5+p-1, 1+p-1), c(-0.5+l, 0.5+l, 0+l), col=array.trans.sim1.colors[p,1+l,1]) # first triangle + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(-0.5+l, 0+l, -0.5+l), col=array.trans.sim1.colors[p,1+l,2]) + polygon(c(1+p-1, 1.5+p-1, 1.5+p-1), c(l, 0.5+l, -0.5+l), col=array.trans.sim1.colors[p,1+l,3]) + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(0.5+l, 0+l, 0.5+l), col=array.trans.sim1.colors[p,1+l,4]) + } + } + + par(fig=c(0.875, 1, 0.14, 0.89), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_transition_prob_NAO+_startdate.png"), width=750, height=600) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("% Transition from NAO+ regime", cex=1.5) + mtext("ECMWF-S4 / NAO+ Transition Probability \nJanuary to December / 1994-2013", cex=1.5) + + # NAO+ : + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.85, 0.98), new=TRUE) + mtext("NAO+", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.sim1.colors[p,1+l,1])}} + + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.42, 0.5), new=TRUE) + mtext("Blocking", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.sim1.colors[p,1+l,4])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.85, 0.98), new=TRUE) + mtext("NAO-", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.sim1.colors[p,1+l,3])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.42, 0.5), new=TRUE) + mtext("Atlantic Ridge", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.sim1.colors[p,1+l,2])}} + + par(fig=c(0.91, 1, 0.1, 0.9), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_transition_prob_NAO+_target_month.png"), width=750, height=350) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 0.95), new=TRUE) + mtext("% Transition from NAO+ regime", cex=1.2) + + par(mar=c(0,0,4,0), fig=c(0.10, 0.28, 0.8, 0.95), new=TRUE) + mtext("NAO-", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0, 0.28, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.8, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.sim1.colors[i2,1+6-j,3])}} + + par(mar=c(0,0,4,0), fig=c(0.30, 0.58, 0.8, 0.95), new=TRUE) + mtext("Blocking", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.30, 0.58, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.8, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.sim1.colors[i2,1+6-j,4])}} + + par(mar=c(0,0,4,0), fig=c(0.60, 0.88, 0.8, 0.95), new=TRUE) + mtext("Atlantic Ridge", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.60, 0.88, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.8, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.sim1.colors[i2,1+6-j,2])}} + + par(fig=c(0.91, 1, 0.1, 0.8), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + par(fig=c(0.91, 1, 0.88, 0.93), new=TRUE) + mtext(side = 1, text = "%", line = 2, cex=1) + + dev.off() + + + + + + + # Observed Transition probability from NAO+ to X summary: + png(filename=paste0(work.dir,"/",forecast.name,"_summary_transition_prob_NAO+_obs.png"), width=550, height=400) + + # create an array similar to array.cor but with colors instead of corr.values: + trans.cols <- rev(c('#b2182b','#d6604d','#f4a582','#fddbc7','#d1e5f0','#92c5de','#4393c3','#2166ac')) + my.seq <- c(0,10,20,30,40,50,60,70,100) + + array.trans.obs1.colors <- array(trans.cols[8],c(12,7,4)) + array.trans.obs1.colors[array.trans.obs1 < my.seq[8]] <- trans.cols[7] + array.trans.obs1.colors[array.trans.obs1 < my.seq[7]] <- trans.cols[6] + array.trans.obs1.colors[array.trans.obs1 < my.seq[6]] <- trans.cols[5] + array.trans.obs1.colors[array.trans.obs1 < my.seq[5]] <- trans.cols[4] + array.trans.obs1.colors[array.trans.obs1 < my.seq[4]] <- trans.cols[3] + array.trans.obs1.colors[array.trans.obs1 < my.seq[3]] <- trans.cols[2] + array.trans.obs1.colors[array.trans.obs1 < my.seq[2]] <- trans.cols[1] + array.trans.obs1.colors[is.na(array.trans.obs1)] <- "white" + + par(mar=c(5,4,4,4.5)) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Forecast time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + title("% Observed transition from NAO+ regime", cex=1.5) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + + for(p in 1:12){ + for(l in 0:6){ + polygon(c(0.5+p-1, 0.5+p-1, 1+p-1), c(-0.5+l, 0.5+l, 0+l), col=array.trans.obs1.colors[p,1+l,1]) # first triangle + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(-0.5+l, 0+l, -0.5+l), col=array.trans.obs1.colors[p,1+l,2]) + polygon(c(1+p-1, 1.5+p-1, 1.5+p-1), c(l, 0.5+l, -0.5+l), col=array.trans.obs1.colors[p,1+l,3]) + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(0.5+l, 0+l, 0.5+l), col=array.trans.obs1.colors[p,1+l,4]) + } + } + + par(fig=c(0.875, 1, 0.14, 0.89), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_transition_prob_NAO+_obs_startdate.png"), width=750, height=600) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("% Observed transition from NAO+ regime", cex=1.5) + + # NAO+ : + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.85, 0.98), new=TRUE) + mtext("NAO+", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.obs1.colors[p,1+l,1])}} + + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.42, 0.5), new=TRUE) + mtext("Blocking", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.obs1.colors[p,1+l,4])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.85, 0.98), new=TRUE) + mtext("NAO-", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.obs1.colors[p,1+l,3])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.42, 0.5), new=TRUE) + mtext("Atlantic Ridge", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.obs1.colors[p,1+l,2])}} + + par(fig=c(0.91, 1, 0.1, 0.9), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_transition_prob_NAO+_obs_target_month.png"), width=750, height=350) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 0.95), new=TRUE) + mtext("% Observed transition from NAO+ regime", cex=1.2) + + par(mar=c(0,0,4,0), fig=c(0.10, 0.28, 0.8, 0.95), new=TRUE) + mtext("NAO-", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0, 0.28, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.8, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.obs1.colors[i2,1+6-j,3])}} + + par(mar=c(0,0,4,0), fig=c(0.30, 0.58, 0.8, 0.95), new=TRUE) + mtext("Blocking", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.30, 0.58, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.8, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.obs1.colors[i2,1+6-j,4])}} + + par(mar=c(0,0,4,0), fig=c(0.60, 0.88, 0.8, 0.95), new=TRUE) + mtext("Atlantic Ridge", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.60, 0.88, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.8, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.obs1.colors[i2,1+6-j,2])}} + + par(fig=c(0.91, 1, 0.1, 0.8), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + par(fig=c(0.91, 1, 0.88, 0.93), new=TRUE) + mtext(side = 1, text = "%", line = 2, cex=1) + + dev.off() + + + + + + + + + + + + # Simulated transition probability from NAO- to X summary: + png(filename=paste0(work.dir,"/",forecast.name,"_summary_transition_prob_NAO-.png"), width=550, height=400) + + # create an array similar to array.cor but with colors instead of corr.values: + trans.cols <- rev(c('#b2182b','#d6604d','#f4a582','#fddbc7','#d1e5f0','#92c5de','#4393c3','#2166ac')) + my.seq <- c(0,10,20,30,40,50,60,70,100) + + array.trans.sim2.colors <- array(trans.cols[8],c(12,7,4)) + array.trans.sim2.colors[array.trans.sim2 < my.seq[8]] <- trans.cols[7] + array.trans.sim2.colors[array.trans.sim2 < my.seq[7]] <- trans.cols[6] + array.trans.sim2.colors[array.trans.sim2 < my.seq[6]] <- trans.cols[5] + array.trans.sim2.colors[array.trans.sim2 < my.seq[5]] <- trans.cols[4] + array.trans.sim2.colors[array.trans.sim2 < my.seq[4]] <- trans.cols[3] + array.trans.sim2.colors[array.trans.sim2 < my.seq[3]] <- trans.cols[2] + array.trans.sim2.colors[array.trans.sim2 < my.seq[2]] <- trans.cols[1] + array.trans.sim2.colors[is.na(array.trans.sim2)] <- "white" + + par(mar=c(5,4,4,4.5)) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Forecast time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + title("% ECMWF-S4 transition from NAO- regime ", cex=1.5) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + + for(p in 1:12){ + for(l in 0:6){ + polygon(c(0.5+p-1, 0.5+p-1, 1+p-1), c(-0.5+l, 0.5+l, 0+l), col=array.trans.sim2.colors[p,1+l,1]) # first triangle + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(-0.5+l, 0+l, -0.5+l), col=array.trans.sim2.colors[p,1+l,2]) + polygon(c(1+p-1, 1.5+p-1, 1.5+p-1), c(l, 0.5+l, -0.5+l), col=array.trans.sim2.colors[p,1+l,3]) + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(0.5+l, 0+l, 0.5+l), col=array.trans.sim2.colors[p,1+l,4]) + } + } + + par(fig=c(0.875, 1, 0.14, 0.89), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_transition_prob_NAO-_startdate.png"), width=750, height=600) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("% ECMWF-S4 transition from NAO- regime", cex=1.5) + + # NAO+ : + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.85, 0.98), new=TRUE) + mtext("NAO+", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.sim2.colors[p,1+l,1])}} + + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.42, 0.5), new=TRUE) + mtext("Blocking", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.sim2.colors[p,1+l,4])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.85, 0.98), new=TRUE) + mtext("NAO-", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.sim2.colors[p,1+l,3])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.42, 0.5), new=TRUE) + mtext("Atlantic Ridge", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.sim2.colors[p,1+l,2])}} + + par(fig=c(0.91, 1, 0.1, 0.9), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_transition_prob_NAO-_target_month.png"), width=750, height=350) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 0.95), new=TRUE) + mtext("% ECMWF-S4 transition from NAO- regime", cex=1.2) + + par(mar=c(0,0,4,0), fig=c(0.1, 0.28, 0.8, 0.95), new=TRUE) + mtext("NAO+", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0, 0.28, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.8, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.sim2.colors[i2,1+6-j,1])}} + + par(mar=c(0,0,4,0), fig=c(0.30, 0.58, 0.8, 0.95), new=TRUE) + mtext("Blocking", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.30, 0.58, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.8, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.sim2.colors[i2,1+6-j,4])}} + + par(mar=c(0,0,4,0), fig=c(0.60, 0.88, 0.8, 0.95), new=TRUE) + mtext("Atlantic Ridge", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.60, 0.88, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.8, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.sim2.colors[i2,1+6-j,2])}} + + par(fig=c(0.91, 1, 0.1, 0.8), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + par(fig=c(0.91, 1, 0.88, 0.93), new=TRUE) + mtext(side = 1, text = "%", line = 2, cex=1) + + dev.off() + + + + + + + # Observed transition probability from NAO- to X summary: + png(filename=paste0(work.dir,"/",forecast.name,"_summary_transition_prob_NAO-_obs.png"), width=550, height=400) + + # create an array similar to array.cor but with colors instead of corr.values: + trans.cols <- rev(c('#b2182b','#d6604d','#f4a582','#fddbc7','#d1e5f0','#92c5de','#4393c3','#2166ac')) + my.seq <- c(0,10,20,30,40,50,60,70,100) + + array.trans.obs2.colors <- array(trans.cols[8],c(12,7,4)) + array.trans.obs2.colors[array.trans.obs2 < my.seq[8]] <- trans.cols[7] + array.trans.obs2.colors[array.trans.obs2 < my.seq[7]] <- trans.cols[6] + array.trans.obs2.colors[array.trans.obs2 < my.seq[6]] <- trans.cols[5] + array.trans.obs2.colors[array.trans.obs2 < my.seq[5]] <- trans.cols[4] + array.trans.obs2.colors[array.trans.obs2 < my.seq[4]] <- trans.cols[3] + array.trans.obs2.colors[array.trans.obs2 < my.seq[3]] <- trans.cols[2] + array.trans.obs2.colors[array.trans.obs2 < my.seq[2]] <- trans.cols[1] + array.trans.obs2.colors[is.na(array.trans.obs2)] <- "white" + + par(mar=c(5,4,4,4.5)) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Forecast time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + title("% Observed transition from NAO- regime ", cex=1.5) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + + for(p in 1:12){ + for(l in 0:6){ + polygon(c(0.5+p-1, 0.5+p-1, 1+p-1), c(-0.5+l, 0.5+l, 0+l), col=array.trans.obs2.colors[p,1+l,1]) # first triangle + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(-0.5+l, 0+l, -0.5+l), col=array.trans.obs2.colors[p,1+l,2]) + polygon(c(1+p-1, 1.5+p-1, 1.5+p-1), c(l, 0.5+l, -0.5+l), col=array.trans.obs2.colors[p,1+l,3]) + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(0.5+l, 0+l, 0.5+l), col=array.trans.obs2.colors[p,1+l,4]) + } + } + + par(fig=c(0.875, 1, 0.14, 0.89), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_transition_prob_NAO-_obs_startdate.png"), width=750, height=600) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("% Observed transition from NAO- regime", cex=1.5) + + # NAO+ : + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.85, 0.98), new=TRUE) + mtext("NAO+", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.obs2.colors[p,1+l,1])}} + + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.42, 0.5), new=TRUE) + mtext("Blocking", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.obs2.colors[p,1+l,4])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.85, 0.98), new=TRUE) + mtext("NAO-", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.obs2.colors[p,1+l,3])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.42, 0.5), new=TRUE) + mtext("Atlantic Ridge", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.obs2.colors[p,1+l,2])}} + + par(fig=c(0.91, 1, 0.1, 0.9), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_transition_prob_NAO-_obs_target_month.png"), width=750, height=350) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 0.95), new=TRUE) + mtext("% Observed transition from NAO- regime", cex=1.2) + + par(mar=c(0,0,4,0), fig=c(0.07, 0.28, 0.8, 0.95), new=TRUE) + mtext("NAO+", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0, 0.28, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.8, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.obs2.colors[i2,1+6-j,1])}} + + par(mar=c(0,0,4,0), fig=c(0.30, 0.58, 0.8, 0.95), new=TRUE) + mtext("Blocking", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.30, 0.58, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.8, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.obs2.colors[i2,1+6-j,4])}} + + par(mar=c(0,0,4,0), fig=c(0.58, 0.88, 0.8, 0.95), new=TRUE) + mtext("Atlantic Ridge", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.58, 0.88, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.8, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.obs2.colors[i2,1+6-j,2])}} + + par(fig=c(0.91, 1, 0.1, 0.8), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + par(fig=c(0.91, 1, 0.88, 0.93), new=TRUE) + mtext(side = 1, text = "%", line = 2, cex=1) + + dev.off() + + + + + + + + + + + # Simulated transition probability from blocking to X summary: + png(filename=paste0(work.dir,"/",forecast.name,"_summary_transition_prob_blocking.png"), width=550, height=400) + + # create an array similar to array.cor but with colors instead of corr.values: + trans.cols <- rev(c('#b2182b','#d6604d','#f4a582','#fddbc7','#d1e5f0','#92c5de','#4393c3','#2166ac')) + my.seq <- c(0,10,20,30,40,50,60,70,100) + + array.trans.sim3.colors <- array(trans.cols[8],c(12,7,4)) + array.trans.sim3.colors[array.trans.sim3 < my.seq[8]] <- trans.cols[7] + array.trans.sim3.colors[array.trans.sim3 < my.seq[7]] <- trans.cols[6] + array.trans.sim3.colors[array.trans.sim3 < my.seq[6]] <- trans.cols[5] + array.trans.sim3.colors[array.trans.sim3 < my.seq[5]] <- trans.cols[4] + array.trans.sim3.colors[array.trans.sim3 < my.seq[4]] <- trans.cols[3] + array.trans.sim3.colors[array.trans.sim3 < my.seq[3]] <- trans.cols[2] + array.trans.sim3.colors[array.trans.sim3 < my.seq[2]] <- trans.cols[1] + array.trans.sim3.colors[is.na(array.trans.sim3)] <- "white" + + par(mar=c(5,4,4,4.5)) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Forecast time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + title("% ECMWF-S4 transition from blocking regime", cex=1.5) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + + for(p in 1:12){ + for(l in 0:6){ + polygon(c(0.5+p-1, 0.5+p-1, 1+p-1), c(-0.5+l, 0.5+l, 0+l), col=array.trans.sim3.colors[p,1+l,1]) # first triangle + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(-0.5+l, 0+l, -0.5+l), col=array.trans.sim3.colors[p,1+l,2]) + polygon(c(1+p-1, 1.5+p-1, 1.5+p-1), c(l, 0.5+l, -0.5+l), col=array.trans.sim3.colors[p,1+l,3]) + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(0.5+l, 0+l, 0.5+l), col=array.trans.sim3.colors[p,1+l,4]) + } + } + + par(fig=c(0.875, 1, 0.14, 0.89), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_transition_prob_blocking_startdate.png"), width=750, height=600) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("% ECMWF-S4 transition from blocking regime", cex=1.5) + + # NAO+ : + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.85, 0.98), new=TRUE) + mtext("NAO+", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.sim3.colors[p,1+l,1])}} + + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.42, 0.5), new=TRUE) + mtext("Blocking", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.sim3.colors[p,1+l,4])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.85, 0.98), new=TRUE) + mtext("NAO-", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.sim3.colors[p,1+l,3])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.42, 0.5), new=TRUE) + mtext("Atlantic Ridge", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.sim3.colors[p,1+l,2])}} + + par(fig=c(0.91, 1, 0.1, 0.9), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_transition_prob_blocking_target_month.png"), width=750, height=350) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 0.95), new=TRUE) + mtext("% ECMWF-S4 transition from blocking regime", cex=1.2) + + par(mar=c(0,0,4,0), fig=c(0.10, 0.28, 0.8, 0.95), new=TRUE) + mtext("NAO+", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0, 0.28, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.8, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.sim3.colors[i2,1+6-j,1])}} + + par(mar=c(0,0,4,0), fig=c(0.30, 0.58, 0.8, 0.95), new=TRUE) + mtext("NAO-", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.30, 0.58, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.8, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.sim3.colors[i2,1+6-j,3])}} + + par(mar=c(0,0,4,0), fig=c(0.60, 0.88, 0.8, 0.95), new=TRUE) + mtext("Atlantic Ridge", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.60, 0.88, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.8, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.sim3.colors[i2,1+6-j,2])}} + + par(fig=c(0.91, 1, 0.1, 0.8), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + par(fig=c(0.91, 1, 0.88, 0.93), new=TRUE) + mtext(side = 1, text = "%", line = 2, cex=1) + + dev.off() + + + + + + + + + # Observed transition probability from blocking to X summary: + png(filename=paste0(work.dir,"/",forecast.name,"_summary_transition_prob_blocking_obs.png"), width=550, height=400) + + # create an array similar to array.cor but with colors instead of corr.values: + trans.cols <- rev(c('#b2182b','#d6604d','#f4a582','#fddbc7','#d1e5f0','#92c5de','#4393c3','#2166ac')) + my.seq <- c(0,10,20,30,40,50,60,70,100) + + array.trans.obs3.colors <- array(trans.cols[8],c(12,7,4)) + array.trans.obs3.colors[array.trans.obs3 < my.seq[8]] <- trans.cols[7] + array.trans.obs3.colors[array.trans.obs3 < my.seq[7]] <- trans.cols[6] + array.trans.obs3.colors[array.trans.obs3 < my.seq[6]] <- trans.cols[5] + array.trans.obs3.colors[array.trans.obs3 < my.seq[5]] <- trans.cols[4] + array.trans.obs3.colors[array.trans.obs3 < my.seq[4]] <- trans.cols[3] + array.trans.obs3.colors[array.trans.obs3 < my.seq[3]] <- trans.cols[2] + array.trans.obs3.colors[array.trans.obs3 < my.seq[2]] <- trans.cols[1] + array.trans.obs3.colors[is.na(array.trans.obs3)] <- "white" + + par(mar=c(5,4,4,4.5)) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Forecast time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + title("% Observed transition from blocking regime", cex=1.5) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + + for(p in 1:12){ + for(l in 0:6){ + polygon(c(0.5+p-1, 0.5+p-1, 1+p-1), c(-0.5+l, 0.5+l, 0+l), col=array.trans.obs3.colors[p,1+l,1]) # first triangle + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(-0.5+l, 0+l, -0.5+l), col=array.trans.obs3.colors[p,1+l,2]) + polygon(c(1+p-1, 1.5+p-1, 1.5+p-1), c(l, 0.5+l, -0.5+l), col=array.trans.obs3.colors[p,1+l,3]) + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(0.5+l, 0+l, 0.5+l), col=array.trans.obs3.colors[p,1+l,4]) + } + } + + par(fig=c(0.875, 1, 0.14, 0.89), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_transition_prob_blocking_obs_startdate.png"), width=750, height=600) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("% Observed transition from blocking regime", cex=1.5) + + # NAO+ : + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.85, 0.98), new=TRUE) + mtext("NAO+", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.obs3.colors[p,1+l,1])}} + + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.42, 0.5), new=TRUE) + mtext("Blocking", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.obs3.colors[p,1+l,4])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.85, 0.98), new=TRUE) + mtext("NAO-", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.obs3.colors[p,1+l,3])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.42, 0.5), new=TRUE) + mtext("Atlantic Ridge", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.obs3.colors[p,1+l,2])}} + + par(fig=c(0.91, 1, 0.1, 0.9), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_transition_prob_blocking_obs_target_month.png"), width=750, height=350) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 0.95), new=TRUE) + mtext("% Observed transition from blocking regime", cex=1.2) + + par(mar=c(0,0,4,0), fig=c(0.10, 0.28, 0.8, 0.95), new=TRUE) + mtext("NAO+", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0, 0.28, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.8, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.obs3.colors[i2,1+6-j,1])}} + + par(mar=c(0,0,4,0), fig=c(0.30, 0.58, 0.8, 0.95), new=TRUE) + mtext("NAO-", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.30, 0.58, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.8, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.obs3.colors[i2,1+6-j,3])}} + + par(mar=c(0,0,4,0), fig=c(0.60, 0.88, 0.8, 0.95), new=TRUE) + mtext("Atlantic Ridge", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.60, 0.88, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.8, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.obs3.colors[i2,1+6-j,2])}} + + par(fig=c(0.91, 1, 0.1, 0.8), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + par(fig=c(0.91, 1, 0.88, 0.93), new=TRUE) + mtext(side = 1, text = "%", line = 2, cex=1) + + dev.off() + + + + + + + + + + + + + + + + + + # Simulated transition probability from Atlantic ridge to X summary: + png(filename=paste0(work.dir,"/",forecast.name,"_summary_transition_prob_Atlantic_ridge.png"), width=550, height=400) + + # create an array similar to array.cor but with colors instead of corr.values: + trans.cols <- rev(c('#b2182b','#d6604d','#f4a582','#fddbc7','#d1e5f0','#92c5de','#4393c3','#2166ac')) + my.seq <- c(0,10,20,30,40,50,60,70,100) + + array.trans.sim4.colors <- array(trans.cols[8],c(12,7,4)) + array.trans.sim4.colors[array.trans.sim4 < my.seq[8]] <- trans.cols[7] + array.trans.sim4.colors[array.trans.sim4 < my.seq[7]] <- trans.cols[6] + array.trans.sim4.colors[array.trans.sim4 < my.seq[6]] <- trans.cols[5] + array.trans.sim4.colors[array.trans.sim4 < my.seq[5]] <- trans.cols[4] + array.trans.sim4.colors[array.trans.sim4 < my.seq[4]] <- trans.cols[3] + array.trans.sim4.colors[array.trans.sim4 < my.seq[3]] <- trans.cols[2] + array.trans.sim4.colors[array.trans.sim4 < my.seq[2]] <- trans.cols[1] + array.trans.sim4.colors[is.na(array.trans.sim4)] <- "white" + + par(mar=c(5,4,4,4.5)) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Forecast time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + title("% ECMWF-S4 transition from Atlantic ridge regime ", cex=1.5) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + + for(p in 1:12){ + for(l in 0:6){ + polygon(c(0.5+p-1, 0.5+p-1, 1+p-1), c(-0.5+l, 0.5+l, 0+l), col=array.trans.sim4.colors[p,1+l,1]) # first triangle + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(-0.5+l, 0+l, -0.5+l), col=array.trans.sim4.colors[p,1+l,2]) + polygon(c(1+p-1, 1.5+p-1, 1.5+p-1), c(l, 0.5+l, -0.5+l), col=array.trans.sim4.colors[p,1+l,3]) + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(0.5+l, 0+l, 0.5+l), col=array.trans.sim4.colors[p,1+l,4]) + } + } + + par(fig=c(0.875, 1, 0.14, 0.89), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_transition_prob_Atlantic_ridge_startdate.png"), width=750, height=600) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("% ECMWF-S4 transition from Atlantic Ridge regime", cex=1.5) + + # NAO+ : + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.85, 0.98), new=TRUE) + mtext("NAO+", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.sim4.colors[p,1+l,1])}} + + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.42, 0.5), new=TRUE) + mtext("Blocking", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.sim4.colors[p,1+l,4])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.85, 0.98), new=TRUE) + mtext("NAO-", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.sim4.colors[p,1+l,3])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.42, 0.5), new=TRUE) + mtext("Atlantic Ridge", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.sim4.colors[p,1+l,2])}} + + par(fig=c(0.91, 1, 0.1, 0.9), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_transition_prob_Atlantic_ridge_target_month.png"), width=750, height=350) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 0.95), new=TRUE) + mtext("% ECMWF-S4 transition from Atlantic Ridge regime", cex=1.2) + + par(mar=c(0,0,4,0), fig=c(0.1, 0.28, 0.8, 0.95), new=TRUE) + mtext("NAO+", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0, 0.28, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.8, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.sim4.colors[i2,1+6-j,1])}} + + par(mar=c(0,0,4,0), fig=c(0.30, 0.58, 0.8, 0.95), new=TRUE) + mtext("NAO-", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.30, 0.58, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.8, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.sim4.colors[i2,1+6-j,3])}} + + par(mar=c(0,0,4,0), fig=c(0.60, 0.88, 0.8, 0.95), new=TRUE) + mtext("Blocking", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.60, 0.88, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.8, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.sim4.colors[i2,1+6-j,4])}} + + par(fig=c(0.91, 1, 0.1, 0.8), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + par(fig=c(0.91, 1, 0.88, 0.93), new=TRUE) + mtext(side = 1, text = "%", line = 2, cex=1) + + dev.off() + + + + + + + + + + # Observed transition probability from Atlantic ridge to X summary: + png(filename=paste0(work.dir,"/",forecast.name,"_summary_transition_prob_Atlantic_ridge_obs.png"), width=550, height=400) + + # create an array obsilar to array.cor but with colors instead of corr.values: + trans.cols <- rev(c('#b2182b','#d6604d','#f4a582','#fddbc7','#d1e5f0','#92c5de','#4393c3','#2166ac')) + my.seq <- c(0,10,20,30,40,50,60,70,100) + + array.trans.obs4.colors <- array(trans.cols[8],c(12,7,4)) + array.trans.obs4.colors[array.trans.obs4 < my.seq[8]] <- trans.cols[7] + array.trans.obs4.colors[array.trans.obs4 < my.seq[7]] <- trans.cols[6] + array.trans.obs4.colors[array.trans.obs4 < my.seq[6]] <- trans.cols[5] + array.trans.obs4.colors[array.trans.obs4 < my.seq[5]] <- trans.cols[4] + array.trans.obs4.colors[array.trans.obs4 < my.seq[4]] <- trans.cols[3] + array.trans.obs4.colors[array.trans.obs4 < my.seq[3]] <- trans.cols[2] + array.trans.obs4.colors[array.trans.obs4 < my.seq[2]] <- trans.cols[1] + array.trans.obs4.colors[is.na(array.trans.obs4)] <- "white" + + par(mar=c(5,4,4,4.5)) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Forecast time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + title("% Observed transition from Atlantic ridge regime ", cex=1.5) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + + for(p in 1:12){ + for(l in 0:6){ + polygon(c(0.5+p-1, 0.5+p-1, 1+p-1), c(-0.5+l, 0.5+l, 0+l), col=array.trans.obs4.colors[p,1+l,1]) # first triangle + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(-0.5+l, 0+l, -0.5+l), col=array.trans.obs4.colors[p,1+l,2]) + polygon(c(1+p-1, 1.5+p-1, 1.5+p-1), c(l, 0.5+l, -0.5+l), col=array.trans.obs4.colors[p,1+l,3]) + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(0.5+l, 0+l, 0.5+l), col=array.trans.obs4.colors[p,1+l,4]) + } + } + + par(fig=c(0.875, 1, 0.14, 0.89), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_transition_prob_Atlantic_ridge_obs_startdate.png"), width=750, height=600) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("% Observed transition from Atlantic Ridge regime", cex=1.5) + + # NAO+ : + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.85, 0.98), new=TRUE) + mtext("NAO+", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.obs4.colors[p,1+l,1])}} + + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.42, 0.5), new=TRUE) + mtext("Blocking", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.obs4.colors[p,1+l,4])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.85, 0.98), new=TRUE) + mtext("NAO-", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.obs4.colors[p,1+l,3])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.42, 0.5), new=TRUE) + mtext("Atlantic Ridge", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.obs4.colors[p,1+l,2])}} + + par(fig=c(0.91, 1, 0.1, 0.9), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_transition_prob_Atlantic_ridge_obs_target_month.png"), width=750, height=350) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 0.95), new=TRUE) + mtext("% Observed transition from Atlantic Ridge regime", cex=1.2) + + par(mar=c(0,0,4,0), fig=c(0.1, 0.28, 0.8, 0.95), new=TRUE) + mtext("NAO+", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0, 0.28, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.8, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.obs4.colors[i2,1+6-j,1])}} + + par(mar=c(0,0,4,0), fig=c(0.30, 0.58, 0.8, 0.95), new=TRUE) + mtext("NAO-", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.30, 0.58, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.8, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.obs4.colors[i2,1+6-j,3])}} + + par(mar=c(0,0,4,0), fig=c(0.60, 0.88, 0.8, 0.95), new=TRUE) + mtext("Blocking", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.60, 0.88, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.8, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.obs4.colors[i2,1+6-j,4])}} + + par(fig=c(0.91, 1, 0.1, 0.8), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + par(fig=c(0.91, 1, 0.88, 0.93), new=TRUE) + mtext(side = 1, text = "%", line = 2, cex=1) + + dev.off() + + + + + + + + + + + + + + + # Bias of the transition probability from NAO+ to X summary: + png(filename=paste0(work.dir,"/",forecast.name,"_summary_bias_transition_prob_NAO+.png"), width=550, height=400) + + # create an array similar to array.cor but with colors instead of corr.values: + trans.cols <- rev(c('#b2182b','#d6604d','#f4a582','#fddbc7','#d1e5f0','#92c5de','#4393c3','#2166ac')) + my.seq <- c(-20,-10,-5,-2,0,2,5,10,20) + + array.trans.diff1.colors <- array(trans.cols[8],c(12,7,4)) + array.trans.diff1.colors[array.trans.diff1 < my.seq[8]] <- trans.cols[7] + array.trans.diff1.colors[array.trans.diff1 < my.seq[7]] <- trans.cols[6] + array.trans.diff1.colors[array.trans.diff1 < my.seq[6]] <- trans.cols[5] + array.trans.diff1.colors[array.trans.diff1 < my.seq[5]] <- trans.cols[4] + array.trans.diff1.colors[array.trans.diff1 < my.seq[4]] <- trans.cols[3] + array.trans.diff1.colors[array.trans.diff1 < my.seq[3]] <- trans.cols[2] + array.trans.diff1.colors[array.trans.diff1 < my.seq[2]] <- trans.cols[1] + array.trans.diff1.colors[is.na(array.trans.diff1)] <- "white" + + par(mar=c(5,4,4,4.5)) + plot(1,1, type="n", xaxt="n", yaxt="n", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + title("% Bias transition from NAO+ regime", cex=1.5) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + + for(p in 1:12){ + for(l in 0:6){ + polygon(c(0.5+p-1, 0.5+p-1, 1+p-1), c(-0.5+l, 0.5+l, 0+l), col=array.trans.diff1.colors[p,1+l,1]) # first triangle + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(-0.5+l, 0+l, -0.5+l), col=array.trans.diff1.colors[p,1+l,2]) + polygon(c(1+p-1, 1.5+p-1, 1.5+p-1), c(l, 0.5+l, -0.5+l), col=array.trans.diff1.colors[p,1+l,3]) + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(0.5+l, 0+l, 0.5+l), col=array.trans.diff1.colors[p,1+l,4]) + } + } + + par(fig=c(0.875, 1, 0.14, 0.89), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_bias_transition_prob_NAO+_startdate.png"), width=750, height=600) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("% Bias transition from NAO+ regime", cex=1.5) + + # NAO+ : + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.85, 0.98), new=TRUE) + mtext("NAO+", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.diff1.colors[p,1+l,1])}} + + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.42, 0.5), new=TRUE) + mtext("Blocking", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.diff1.colors[p,1+l,4])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.85, 0.98), new=TRUE) + mtext("NAO-", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.diff1.colors[p,1+l,3])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.42, 0.5), new=TRUE) + mtext("Atlantic Ridge", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.diff1.colors[p,1+l,2])}} + + par(fig=c(0.91, 1, 0.1, 0.9), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_bias_transition_prob_NAO+_target_month.png"), width=750, height=350) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 0.95), new=TRUE) + mtext("% Bias transition from NAO+ regime", cex=1.2) + + par(mar=c(0,0,4,0), fig=c(0.07, 0.28, 0.8, 0.95), new=TRUE) + mtext("NAO-", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0, 0.28, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.8, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.diff1.colors[i2,1+6-j,3])}} + + par(mar=c(0,0,4,0), fig=c(0.30, 0.58, 0.8, 0.95), new=TRUE) + mtext("Blocking", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.30, 0.58, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.8, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.diff1.colors[i2,1+6-j,4])}} + + par(mar=c(0,0,4,0), fig=c(0.60, 0.88, 0.8, 0.95), new=TRUE) + mtext("Atlantic Ridge", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.60, 0.88, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.8, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.diff1.colors[i2,1+6-j,2])}} + + par(fig=c(0.91, 1, 0.1, 0.8), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + par(fig=c(0.91, 1, 0.88, 0.93), new=TRUE) + mtext(side = 1, text = "%", line = 2, cex=1) + + dev.off() + + + + + + + + + + + + + + + + # Bias of the Transition probability from NAO- to X summary: + png(filename=paste0(work.dir,"/",forecast.name,"_summary_bias_transition_prob_NAO-.png"), width=550, height=400) + + # create an array similar to array.cor but with colors instead of corr.values: + trans.cols <- rev(c('#b2182b','#d6604d','#f4a582','#fddbc7','#d1e5f0','#92c5de','#4393c3','#2166ac')) + my.seq <- c(-20,-10,-5,-2,0,2,5,10,20) + + array.trans.diff2.colors <- array(trans.cols[8],c(12,7,4)) + array.trans.diff2.colors[array.trans.diff2 < my.seq[8]] <- trans.cols[7] + array.trans.diff2.colors[array.trans.diff2 < my.seq[7]] <- trans.cols[6] + array.trans.diff2.colors[array.trans.diff2 < my.seq[6]] <- trans.cols[5] + array.trans.diff2.colors[array.trans.diff2 < my.seq[5]] <- trans.cols[4] + array.trans.diff2.colors[array.trans.diff2 < my.seq[4]] <- trans.cols[3] + array.trans.diff2.colors[array.trans.diff2 < my.seq[3]] <- trans.cols[2] + array.trans.diff2.colors[array.trans.diff2 < my.seq[2]] <- trans.cols[1] + array.trans.diff2.colors[is.na(array.trans.diff2)] <- "white" + + par(mar=c(5,4,4,4.5)) + plot(1,1, type="n", xaxt="n", yaxt="n", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + title("% Bias transition from NAO- regime", cex=1.5) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + + for(p in 1:12){ + for(l in 0:6){ + polygon(c(0.5+p-1, 0.5+p-1, 1+p-1), c(-0.5+l, 0.5+l, 0+l), col=array.trans.diff2.colors[p,1+l,1]) # first triangle + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(-0.5+l, 0+l, -0.5+l), col=array.trans.diff2.colors[p,1+l,2]) + polygon(c(1+p-1, 1.5+p-1, 1.5+p-1), c(l, 0.5+l, -0.5+l), col=array.trans.diff2.colors[p,1+l,3]) + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(0.5+l, 0+l, 0.5+l), col=array.trans.diff2.colors[p,1+l,4]) + } + } + + par(fig=c(0.875, 1, 0.14, 0.89), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_bias_transition_prob_NAO-_startdate.png"), width=750, height=600) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 0.95), new=TRUE) + mtext("% Bias transition from NAO- regime", cex=1.5) + + # NAO+ : + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.85, 0.98), new=TRUE) + mtext("NAO+", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.diff2.colors[p,1+l,1])}} + + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.42, 0.5), new=TRUE) + mtext("Blocking", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.diff2.colors[p,1+l,4])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.85, 0.98), new=TRUE) + mtext("NAO-", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.diff2.colors[p,1+l,3])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.42, 0.5), new=TRUE) + mtext("Atlantic Ridge", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.diff2.colors[p,1+l,2])}} + + par(fig=c(0.91, 1, 0.1, 0.9), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_bias_transition_prob_NAO-_target_month.png"), width=750, height=350) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 0.95), new=TRUE) + mtext("% Bias transition from NAO- regime", cex=1.2) + + par(mar=c(0,0,4,0), fig=c(0.07, 0.28, 0.8, 0.95), new=TRUE) + mtext("NAO+", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0, 0.28, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.8, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.diff2.colors[i2,1+6-j,1])}} + + par(mar=c(0,0,4,0), fig=c(0.30, 0.58, 0.8, 0.95), new=TRUE) + mtext("Blocking", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.30, 0.58, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.8, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.diff2.colors[i2,1+6-j,4])}} + + par(mar=c(0,0,4,0), fig=c(0.60, 0.88, 0.8, 0.95), new=TRUE) + mtext("Atlantic Ridge", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.60, 0.88, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.8, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.diff2.colors[i2,1+6-j,2])}} + + par(fig=c(0.91, 1, 0.1, 0.8), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + par(fig=c(0.91, 1, 0.88, 0.93), new=TRUE) + mtext(side = 1, text = "%", line = 2, cex=1) + + dev.off() + + + + + + + + + + + + # Bias of the Transition probability from blocking to X summary: + png(filename=paste0(work.dir,"/",forecast.name,"_summary_bias_transition_prob_blocking.png"), width=550, height=400) + + # create an array similar to array.cor but with colors instead of corr.values: + trans.cols <- rev(c('#b2182b','#d6604d','#f4a582','#fddbc7','#d1e5f0','#92c5de','#4393c3','#2166ac')) + my.seq <- c(-20,-10,-5,-2,0,2,5,10,20) + + array.trans.diff3.colors <- array(trans.cols[8],c(12,7,4)) + array.trans.diff3.colors[array.trans.diff3 < my.seq[8]] <- trans.cols[7] + array.trans.diff3.colors[array.trans.diff3 < my.seq[7]] <- trans.cols[6] + array.trans.diff3.colors[array.trans.diff3 < my.seq[6]] <- trans.cols[5] + array.trans.diff3.colors[array.trans.diff3 < my.seq[5]] <- trans.cols[4] + array.trans.diff3.colors[array.trans.diff3 < my.seq[4]] <- trans.cols[3] + array.trans.diff3.colors[array.trans.diff3 < my.seq[3]] <- trans.cols[2] + array.trans.diff3.colors[array.trans.diff3 < my.seq[2]] <- trans.cols[1] + array.trans.diff3.colors[is.na(array.trans.diff3)] <- "white" + + par(mar=c(5,4,4,4.5)) + plot(1,1, type="n", xaxt="n", yaxt="n", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + title("% Bias transition from blocking regime", cex=1.5) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + + for(p in 1:12){ + for(l in 0:6){ + polygon(c(0.5+p-1, 0.5+p-1, 1+p-1), c(-0.5+l, 0.5+l, 0+l), col=array.trans.diff3.colors[p,1+l,1]) # first triangle + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(-0.5+l, 0+l, -0.5+l), col=array.trans.diff3.colors[p,1+l,2]) + polygon(c(1+p-1, 1.5+p-1, 1.5+p-1), c(l, 0.5+l, -0.5+l), col=array.trans.diff3.colors[p,1+l,3]) + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(0.5+l, 0+l, 0.5+l), col=array.trans.diff3.colors[p,1+l,4]) + } + } + + par(fig=c(0.875, 1, 0.14, 0.89), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_bias_transition_prob_blocking_startdate.png"), width=750, height=600) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("% Bias transition from blocking regime", cex=1.5) + + # NAO+ : + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.85, 0.98), new=TRUE) + mtext("NAO+", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.diff3.colors[p,1+l,1])}} + + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.42, 0.5), new=TRUE) + mtext("Blocking", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.diff3.colors[p,1+l,4])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.85, 0.98), new=TRUE) + mtext("NAO-", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.diff3.colors[p,1+l,3])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.42, 0.5), new=TRUE) + mtext("Atlantic Ridge", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.diff3.colors[p,1+l,2])}} + + par(fig=c(0.91, 1, 0.1, 0.9), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_bias_transition_prob_blocking_target_month.png"), width=750, height=350) + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 0.95), new=TRUE) + mtext("% Bias transition from Blocking regime", cex=1.2) + + par(mar=c(0,0,4,0), fig=c(0.07, 0.28, 0.8, 0.95), new=TRUE) + mtext("NAO+", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0, 0.28, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.8, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.diff3.colors[i2,1+6-j,1])}} + + par(mar=c(0,0,4,0), fig=c(0.30, 0.58, 0.8, 0.95), new=TRUE) + mtext("NAO-", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.30, 0.58, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.8, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.diff3.colors[i2,1+6-j,3])}} + + par(mar=c(0,0,4,0), fig=c(0.60, 0.88, 0.8, 0.95), new=TRUE) + mtext("Atlantic Ridge", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.60, 0.88, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.8, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.diff3.colors[i2,1+6-j,2])}} + + par(fig=c(0.91, 1, 0.1, 0.8), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + par(fig=c(0.91, 1, 0.88, 0.93), new=TRUE) + mtext(side = 1, text = "%", line = 2, cex=1) + + dev.off() + + + + + + + + + + + # Bias of the Transition probability from Atlantic Ridge to X summary: + png(filename=paste0(work.dir,"/",forecast.name,"_summary_bias_transition_prob_Atlantic_ridge.png"), width=550, height=400) + + # create an array similar to array.cor but with colors instead of corr.values: + trans.cols <- rev(c('#b2182b','#d6604d','#f4a582','#fddbc7','#d1e5f0','#92c5de','#4393c3','#2166ac')) + my.seq <- c(-20,-10,-5,-2,0,2,5,10,20) + + array.trans.diff4.colors <- array(trans.cols[8],c(12,7,4)) + array.trans.diff4.colors[array.trans.diff4 < my.seq[8]] <- trans.cols[7] + array.trans.diff4.colors[array.trans.diff4 < my.seq[7]] <- trans.cols[6] + array.trans.diff4.colors[array.trans.diff4 < my.seq[6]] <- trans.cols[5] + array.trans.diff4.colors[array.trans.diff4 < my.seq[5]] <- trans.cols[4] + array.trans.diff4.colors[array.trans.diff4 < my.seq[4]] <- trans.cols[3] + array.trans.diff4.colors[array.trans.diff4 < my.seq[3]] <- trans.cols[2] + array.trans.diff4.colors[array.trans.diff4 < my.seq[2]] <- trans.cols[1] + array.trans.diff4.colors[is.na(array.trans.diff4)] <- "white" + + par(mar=c(5,4,4,4.5)) + plot(1,1, type="n", xaxt="n", yaxt="n", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + title("% Bias transition from Atlantic Ridge regime", cex=1.5) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + + for(p in 1:12){ + for(l in 0:6){ + polygon(c(0.5+p-1, 0.5+p-1, 1+p-1), c(-0.5+l, 0.5+l, 0+l), col=array.trans.diff4.colors[p,1+l,1]) # first triangle + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(-0.5+l, 0+l, -0.5+l), col=array.trans.diff4.colors[p,1+l,2]) + polygon(c(1+p-1, 1.5+p-1, 1.5+p-1), c(l, 0.5+l, -0.5+l), col=array.trans.diff4.colors[p,1+l,3]) + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(0.5+l, 0+l, 0.5+l), col=array.trans.diff4.colors[p,1+l,4]) + } + } + + par(fig=c(0.875, 1, 0.14, 0.89), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_bias_transition_prob_Atlantic_ridge_startdate.png"), width=750, height=600) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("% Bias transition from Atlantic_Ridge_regime", cex=1.5) + + # NAO+ : + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.85, 0.98), new=TRUE) + mtext("NAO+", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.diff4.colors[p,1+l,1])}} + + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.42, 0.5), new=TRUE) + mtext("Blocking", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.diff4.colors[p,1+l,4])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.85, 0.98), new=TRUE) + mtext("NAO-", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.diff4.colors[p,1+l,3])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.42, 0.5), new=TRUE) + mtext("Atlantic Ridge", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.diff4.colors[p,1+l,2])}} + + par(fig=c(0.91, 1, 0.1, 0.9), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_bias_transition_prob_Atlantic_ridge_target_month.png"), width=750, height=350) + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 0.95), new=TRUE) + mtext("% Bias transition from Atlantic Ridge regime", cex=1.2) + + par(mar=c(0,0,4,0), fig=c(0.07, 0.28, 0.8, 0.95), new=TRUE) + mtext("NAO+", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0, 0.28, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.8, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.diff4.colors[i2,1+6-j,1])}} + + par(mar=c(0,0,4,0), fig=c(0.30, 0.58, 0.8, 0.95), new=TRUE) + mtext("NAO-", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.30, 0.58, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.8, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.diff4.colors[i2,1+6-j,3])}} + + par(mar=c(0,0,4,0), fig=c(0.60, 0.88, 0.8, 0.95), new=TRUE) + mtext("Blocking", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.60, 0.88, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.8, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.diff4.colors[i2,1+6-j,4])}} + + par(fig=c(0.91, 1, 0.1, 0.8), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + par(fig=c(0.91, 1, 0.88, 0.93), new=TRUE) + mtext(side = 1, text = "%", line = 2, cex=1) + + dev.off() + + ## # FairRPSS summary: + ## png(filename=paste0(work.dir,"/",forecast.name,"_summary_rpss.png"), width=550, height=400) + + ## # create an array similar to array.diff.freq but with colors instead of frequencies: + ## freq.cols <- rev(c('#b2182b','#d6604d','#f4a582','#fddbc7','#d1e5f0','#92c5de','#4393c3','#2166ac')) + + ## array.freq.colors <- array(freq.cols[8],c(12,7,4)) + ## array.freq.colors[array.diff.freq < 10] <- freq.cols[7] + ## array.freq.colors[array.diff.freq < 5] <- freq.cols[6] + ## array.freq.colors[array.diff.freq < 2] <- freq.cols[5] + ## array.freq.colors[array.diff.freq < 0] <- freq.cols[4] + ## array.freq.colors[array.diff.freq < -2] <- freq.cols[3] + ## array.freq.colors[array.diff.freq < -5] <- freq.cols[2] + ## array.freq.colors[array.diff.freq < -10] <- freq.cols[1] + + ## par(mar=c(5,4,4,4.5)) + ## plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Forecast time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + ## title("Frequency difference (%) between S4 and ERA-Interim", cex=1.5) + ## mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + ## mtext(side = 2, text = "Forecast time", line = 2.5, cex=1.5) + ## axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + ## axis(2, at=seq(0,6), las=2, cex.axis=1.5) + + ## for(p in 1:12){ + ## for(l in 0:6){ + ## polygon(c(0.5+p-1,0.5+p-1,1+p-1),c(-0.5+l,0.5+l,0+l), col=array.freq.colors[p,1+l,1]) # first triangle + ## polygon(c(0.5+p-1,1+p-1,1.5+p-1),c(-0.5+l,0+l,-0.5+l), col=array.freq.colors[p,1+l,2]) + ## polygon(c(1+p-1,1.5+p-1,1.5+p-1),c(l,0.5+l,-0.5+l), col=array.freq.colors[p,1+l,3]) + ## polygon(c(0.5+p-1,1+p-1,1.5+p-1),c(0.5+l,0+l,0.5+l), col=array.freq.colors[p,1+l,4]) + ## } + ## } + + ## par(fig=c(0.875, 1, 0.14, 0.89), new=TRUE) + ## ColorBar2(brks = c(-20,-10,-5,-2,0,2,5,10,20), cols = freq.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(c(-20,-10,-5,-2,0,2,5,10,20)), my.labels=c(-20,-10,-5,-2,0,2,5,10,20)) + ## dev.off() + +} # close if on composition == "summary" + + + + +# impact map of the stronger WR: +if(composition == "impact.highest" && fields.name == rean.name){ + + var.num <- 2 # choose a variable (1:sfcWind, 2:tas) + index <- 1 # chose an index: 1: most influent, 2: most influent positively, 3: most influent negatively + + if ( index == 1 ) png(filename=paste0(rean.dir,"/",rean.name,"_",var.name.full[var.num],"_most_influent.png"), width=550, height=650) + if ( index == 2 ) png(filename=paste0(rean.dir,"/",rean.name,"_",var.name.full[var.num],"_most_influent_positively.png"), width=550, height=650) + if ( index == 3 ) png(filename=paste0(rean.dir,"/",rean.name,"_",var.name.full[var.num],"_most_influent_negatively.png"), width=550, height=650) + + plot.new() + #par(mfrow=c(4,3)) + #col.regimes <- c('#7b3294','#c2a5cf','#a6dba0','#008837') + col.regimes <- c('#e66101','#fdb863','#b2abd2','#5e3c99') + + for(p in 13:16){ # or 1:12 for monthly maps + load(file=paste0(rean.dir,"/",rean.name,"_",my.period[p],"_",var.name[var.num],".RData")) + load(paste0(rean.dir,"/",rean.name,"_",my.period[p],"_ClusterNames.RData")) # they should not depend on var.name!!! + + # position of long values of Europe only (without the Atlantic Sea and America): + #if(fields.name == "ERA-Interim") EU <- c(1:which(lon >= lon.max)[1], (length(lon)-30):length(lon)) # restrict area to continental europe only + lon.max = 45 + EU <- c(1:which(lon >= lon.max)[1], (which(lon >= 337)[1]:length(lon))) # restrict area to continental europe only + + cluster1 <- which(orden == cluster1.name.period[p]) # in future it will be == cluster1.name + cluster2 <- which(orden == cluster2.name.period[p]) + cluster3 <- which(orden == cluster3.name.period[p]) + cluster4 <- which(orden == cluster4.name.period[p]) + + assign(paste0("imp",cluster1), varPeriodAnom1mean) + assign(paste0("imp",cluster2), varPeriodAnom2mean) + assign(paste0("imp",cluster3), varPeriodAnom3mean) + assign(paste0("imp",cluster4), varPeriodAnom4mean) + + #most influent WT: + if (index == 1) { + imp.all <- imp1 + for(i in 1:dim(imp1)[1]){ + for(j in 1:dim(imp1)[2]){ + if(abs(imp1[i,j]) >= max(abs(imp1[i,j]), abs(imp2[i,j]), abs(imp3[i,j]), abs(imp4[i,j]))) imp.all[i,j] <- 1.5 + if(abs(imp2[i,j]) >= max(abs(imp1[i,j]), abs(imp2[i,j]), abs(imp3[i,j]), abs(imp4[i,j]))) imp.all[i,j] <- 2.5 + if(abs(imp3[i,j]) >= max(abs(imp1[i,j]), abs(imp2[i,j]), abs(imp3[i,j]), abs(imp4[i,j]))) imp.all[i,j] <- 3.5 + if(abs(imp4[i,j]) >= max(abs(imp1[i,j]), abs(imp2[i,j]), abs(imp3[i,j]), abs(imp4[i,j]))) imp.all[i,j] <- 4.5 + } + } + } + + #most influent WT with positive impact: + if (index == 2) { + imp.all <- imp1 + for(i in 1:dim(imp1)[1]){ + for(j in 1:dim(imp1)[2]){ + if((imp1[i,j]) >= max((imp1[i,j]), (imp2[i,j]), (imp3[i,j]), (imp4[i,j]))) imp.all[i,j] <- 1.5 + if((imp2[i,j]) >= max((imp1[i,j]), (imp2[i,j]), (imp3[i,j]), (imp4[i,j]))) imp.all[i,j] <- 2.5 + if((imp3[i,j]) >= max((imp1[i,j]), (imp2[i,j]), (imp3[i,j]), (imp4[i,j]))) imp.all[i,j] <- 3.5 + if((imp4[i,j]) >= max((imp1[i,j]), (imp2[i,j]), (imp3[i,j]), (imp4[i,j]))) imp.all[i,j] <- 4.5 + } + } + + } + + # most influent WT with negative impact: + if (index == 3) { + imp.all <- imp1 + for(i in 1:dim(imp1)[1]){ + for(j in 1:dim(imp1)[2]){ + if((imp1[i,j]) <= min((imp1[i,j]), (imp2[i,j]), (imp3[i,j]), (imp4[i,j]))) imp.all[i,j] <- 1.5 + if((imp2[i,j]) <= min((imp1[i,j]), (imp2[i,j]), (imp3[i,j]), (imp4[i,j]))) imp.all[i,j] <- 2.5 + if((imp3[i,j]) <= min((imp1[i,j]), (imp2[i,j]), (imp3[i,j]), (imp4[i,j]))) imp.all[i,j] <- 3.5 + if((imp4[i,j]) <= min((imp1[i,j]), (imp2[i,j]), (imp3[i,j]), (imp4[i,j]))) imp.all[i,j] <- 4.5 + } + } + } + + if(p<=3) yMod <- 0.7 + if(p>=4 && p<=6) yMod <- 0.5 + if(p>=7 && p<=9) yMod <- 0.3 + if(p>=10) yMod <- 0.1 + if(p==13 || p==14) yMod <- 0.52 + if(p==15 || p==16) yMod <- 0.1 + + if(p==1 || p==4 || p==7 || p==10) xMod <- 0.05 + if(p==2 || p==5 || p==8 || p==11) xMod <- 0.35 + if(p==3 || p==6 || p==9 || p==12) xMod <- 0.65 + if(p==13 || p==15) xMod <- 0.05 + if(p==14 || p==16) xMod <- 0.50 + + # impact map: + if(p <= 12) par(fig=c(xMod, xMod + 0.3, yMod, yMod + 0.17), new=TRUE) + if(p > 12) par(fig=c(xMod, xMod + 0.45, yMod, yMod + 0.38), new=TRUE) + PlotEquiMap(imp.all[,EU], lon[EU], lat, filled.continents = FALSE, brks=1:5, cols=col.regimes, axelab=F, drawleg=F) + + # title map: + if(p <= 12) par(fig=c(xMod, xMod + 0.3, yMod + 0.162, yMod + 0.172), new=TRUE) + if(p > 12) par(fig=c(xMod + 0.07, xMod + 0.07 + 0.3, yMod +0.21 + 0.166, yMod +0.21 + 0.176), new=TRUE) + mtext(my.period[p], font=2, cex=1.5) + + } # close 'p' on 'period' + + par(fig=c(0.1,0.9, 0.03, 0.11), new=TRUE) + ColorBar2(1:5, cols=col.regimes, vert=FALSE, my.ticks=1:4, draw.ticks=FALSE, my.labels=orden) + + par(fig=c(0.1,0.9, 0.92, 0.97), new=TRUE) + if( index == 1 ) mtext(paste0("Most influent WR on ",var.name[var.num]), font=2, cex=1.5) + if( index == 2 ) mtext(paste0("Most positively influent WR on ",var.name[var.num]), font=2, cex=1.5) + if( index == 3 ) mtext(paste0("Most negatively influent WR on ",var.name[var.num]), font=2, cex=1.5) + + dev.off() + +} # close if on composition == "impact.highest" + + + + + + + + + + + + + +if(monthly_anomalies) { + # Compute climatology and anomalies (with LOESS filter) for sfcWind data, and draw monthly anomalies, if you want to compare the mean daily wind speed anomalies reconstructed by the weather regimes classification with those observed by the reanalysis: + + load(paste0(rean.dir,"/",rean.name,"_",my.period[month.chosen[1]],"_psl.RData")) # only to load year.start and year.end + + sfcWind366 <- Load(var = "sfcWind", exp = NULL, obs = list(list(path=rean.data)), paste0(year.start:year.end,'0101'), storefreq = 'daily', leadtimemax = 366, output = 'lonlat', latmin = lat.min, latmax = lat.max, lonmin = lon.min, lonmax = lon.max, nprocs=8)$obs + + sfcWind <- sfcWind366[,,,1:365,,] + + for(y in year.start:year.end){ + y2 <- y - year.start + 1 + if(n.days.in.a.year(y) == 366) sfcWind[y2,60:365,,] <- sfcWind366[1,1,y2,61:366,,] # take the march to december period removing the 29th of February + } + + rm(sfcWind366) + gc() + + # LOESS anomalies: + sfcWindClimDaily <- apply(sfcWind, c(2,3,4), mean, na.rm=T) + + sfcWindClimLoess <- sfcWindClimDaily + for(i in n.pos.lat){ + for(j in n.pos.lon){ + my.data <- data.frame(ens.mean=sfcWindClimDaily[,i,j], day=1:365) + my.loess <- loess(ens.mean ~ day, my.data, span=0.35) + sfcWindClimLoess[,i,j] <- predict(my.loess) + } + } + + rm(sfcWindClimDaily) + gc() + + sfcWindClim2 <- InsertDim(sfcWindClimLoess, 1, n.years) + sfcWindAnom <- sfcWind - sfcWindClim2 + sfcWindAnomRel <- sfcWindAnom / sfcWindClim2 + + rm(sfcWindClimLoess) + gc() + + # same as above but for computing climatology and anomalies (with LOESS filter) for psl data, and draw monthly slp anomalies: + slp366 <- Load(var = psl, exp = NULL, obs = list(list(path=rean.data)), paste0(year.start:year.end,'0101'), storefreq = 'daily', leadtimemax = 366, output = 'lonlat', latmin = lat.min, latmax = lat.max, lonmin = lon.min, lonmax = lon.max, nprocs=8)$obs + + slp <- slp366[,,,1:365,,] + + for(y in year.start:year.end){ + y2 <- y - year.start + 1 + if(n.days.in.a.year(y) == 366) slp[y2,60:365,,] <- slp366[1,1,y2,61:366,,] # take the march to december period removing the 29th of February + } + + rm(slp366) + gc() + + slpClimDaily <- apply(slp, c(2,3,4), mean, na.rm=T) + + slpClimLoess <- slpClimDaily + for(i in n.pos.lat){ + for(j in n.pos.lon){ + my.data <- data.frame(ens.mean=slpClimDaily[,i,j], day=1:365) + my.loess <- loess(ens.mean ~ day, my.data, span=0.35) + slpClimLoess[,i,j] <- predict(my.loess) + } + } + + rm(slpClimDaily) + gc() + + slpClim2 <- InsertDim(slpClimLoess, 1, n.years) + slpAnom <- slp - slpClim2 + + rm(slpClimLoess) + gc() + + if(psl == "psl") slpAnom <- slpAnom/100 # convert MSLP in Pascal to MSLP in hPa + + EU <- c(1:which(lon >= lon.max)[1], (which(lon >= 337)[1]:length(lon))) # restrict area to continental europe only + + ## color scale for impact maps: + ## my.brks.var <- c(-20,seq(-3,3,0.5),20) + my.brks.var <- c(-20,seq(-0.6,0.6,0.1),20) + my.cols.var <- colorRampPalette(rev(brewer.pal(11,"RdBu")))(length(my.brks.var)-1) # blue--white--red colors + + # same but for slp: + my.brks.var2 <- c(-100,seq(-21,-1,2),0,seq(1,21,2),100) + my.cols.var2 <- colorRampPalette(rev(brewer.pal(11,"RdBu")))(length(my.brks.var2)-1) # blue--red colors + my.cols.var2[floor(length(my.cols.var2)/2)] <- my.cols.var2[floor(length(my.cols.var2)/2)+1] <- "white" + + orden <- c("NAO+","NAO-","Blocking","Atl.Ridge") + + # save all monthly anomaly maps: + for(year.test in year.chosen){ + for(month.test in month.chosen){ + ##year.test <- 2016; month.test <- 10 # for the debug + + pos.year.test <- year.test - year.start + 1 + + ## wind anomaly for the chosen year and month: + sfcWindAnomPeriod <- sfcWindAnom[pos.year.test, pos.period(1,month.test),,] + sfcWindAnomPeriodRel <- sfcWindAnomRel[pos.year.test, pos.period(1,month.test),,] + + sfcWindAnomPeriodMean <- apply(sfcWindAnomPeriodRel, c(2,3), mean, na.rm=TRUE) + + # psl anomaly for the chosen year and month: + slpAnomPeriod <- slpAnom[pos.year.test,pos.period(1,month.test),,] + + slpAnomPeriodMean <- apply(slpAnomPeriod, c(2,3), mean, na.rm=TRUE) + + fileoutput <- paste0(rean.dir,"/",rean.name,"_",my.period[month.test],"_monthly_anomaly.png") + + png(filename=fileoutput,width=2800,height=1000) + + ## reset par to its default values, because drawing with PlotEquiMap() alters some par values: + if(year.test == year.chosen[1] && month.test == month.chosen[1]) { op <- par(no.readonly = TRUE) } + + par(fig=c(0, 0.36, 0.08, 0.98), new=TRUE) + #PlotEquiMap(rescale(sfcWindAnomPeriodMean[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents=FALSE, brks=my.brks.var, cols=my.cols.var[2:(length(my.cols.var)-1)], title_scale=1.2, intxlon=10, intylat=10, drawleg=F) #, cex.lab=1.5) + + PlotEquiMap(sfcWindAnomPeriodMean[,EU], lon[EU], lat, filled.continents=FALSE, brks=my.brks.var, cols=my.cols.var, title_scale=1.2, intxlon=10, intylat=10, drawleg=F) #, cex.lab=1.5) + + #my.subset2 <- match(values.to.plot2, my.brks.var) + #legend2.cex <- 1.8 + + par(fig=c(0.03, 0.36, 0.00, 0.1), new=TRUE) + #ColorBar(my.brks.var, cols=my.cols.var, vert=FALSE, label_scale=1.8, triangle_ends=c(FALSE,FALSE)) #, subset=my.subset2) + #ColorBar(my.brks.var, cols=my.cols.var[2:(length(my.cols.var)-1)], vert=FALSE, label_scale=1.8, var_limits=c(-10,10), bar_limits=c(my.brks.var[1],my.brks.var[l(my.brks.var)]), col_inf=my.cols.var[1], col_sup=my.cols.var[length(my.cols.var)]) + + ColorBar(brks=round(100*my.brks.var[2:(l(my.brks.var)-1)],0), cols=my.cols.var[2:(l(my.cols.var)-1)], vert=FALSE, label_scale=3, bar_limits=c(100*my.brks.var[2],100*my.brks.var[l(my.brks.var)-1]), col_inf=my.cols.var[1], col_sup=my.cols.var[l(my.cols.var)], subsample=2) + + par(fig=c(0.34, 0.37, 0, 0.028), new=TRUE) + mtext("%", cex=1.8) + + par(fig=c(0.37, 1, 0.1, 0.98), new=TRUE) + + PlotEquiMap2(slpAnomPeriodMean, lon, lat, filled.continents=FALSE, continents.col="black", brks=my.brks.var2, brks2=my.brks.var2, cols=my.cols.var2, intxlon=10, intylat=10, drawleg=F, contours=t(slpAnomPeriodMean), contours.lty="F1FF1F", cex.lab=1) + + # you could almost use the normal PlotEquiMap funcion below, it only needs to set the black line for anomaly=0 and decrease the size of the contour labels: + # PlotEquiMap(rescale(slpAnomPeriodMean[,EU], my.brks.var2[1], tail(my.brks.var2,1)), lon[EU], lat, filled.continents=FALSE, brks=my.brks.var2, brks2=my.brks.var2, cols=my.cols.var2, intxlon=10, intylat=10, drawleg=F, contours=(slpAnomPeriodMean[,EU]), contour_lty="F1FF1F", contour_label_scale=10, title_scale=1.2) #, cex.lab=1.5) + + ##my.subset2 <- match(values.to.plot2, my.brks.var) + #legend2.cex <- 1.8 + + par(op) # reset par parameters + par(fig=c(0.37, 1, 0, 0.1), new=TRUE) + #ColorBar2(my.brks.var2, cols=my.cols.var2, vert=FALSE, my.ticks=-0.5 + 1:length(my.brks.var2), my.labels=my.brks.var2) #, subsampleg=1, label_scale=1.8) #, subset=my.subset2) + ColorBar(brks=my.brks.var2[2:(l(my.brks.var2)-1)], cols=my.cols.var2[2:(l(my.cols.var2)-1)], vert=FALSE, label_scale=3, bar_limits=c(my.brks.var2[2],my.brks.var2[l(my.brks.var2)-1]), col_inf=my.cols.var2[1], col_sup=my.cols.var2[l(my.cols.var2)], subsample=1) #my.ticks=-0.5 + 1:length(my.brks.var2), my.labels=my.brks.var2) subset=my.subset2) + + par(fig=c(0.96, 0.99, 0, 0.027), new=TRUE) + mtext("hPa", cex=1.8) + + #par(fig=c(0.74, 0.76, 0, 0.027), new=TRUE) + #mtext("0", cex=1.8) + + dev.off() + + # same image formatted for the catalogue: + fileoutput2 <- paste0(rean.dir,"/",rean.name,"_",my.period[month.test],"_monthly_anomaly_fig2cat.png") + + system(paste0("~/scripts/fig2catalog.sh -s 0.8 -t '",rean.name," / 10m wind speed and sea level pressure / Monthly anomalies \n",month.name[month.test]," / ",year.test,"' -c 'Region: North Atlantic (27°N-81°N, 85.5°W-45°E)\nReference dataset: ",rean.name," reanalysis' ", fileoutput," ", fileoutput2)) + + # mean wind speed and slp anomaly only during days of the chosen year: + load(paste0(rean.dir,"/",rean.name,"_",my.period[month.test],"_psl.RData")) # load cluster.sequence + + # arrange the regime sequence in an array, to separate months from years: + cluster2D <- array(cluster.sequence, c(n.days.in.a.period(month.test,1),n.years)) + #cluster2D <- array(my.cluster$cluster, c(n.days.in.a.period(p,1),n.years)) # only for NCEP + + cluster.test <- cluster2D[,pos.year.test] + + fre1.days <- length(which(cluster.test == 1)) + fre2.days <- length(which(cluster.test == 2)) + fre3.days <- length(which(cluster.test == 3)) + fre4.days <- length(which(cluster.test == 4)) + + # wind anomaly for the chosen year and month and cluster: + sfcWind1 <- sfcWindAnomPeriodRel[which(cluster.test == 1),,,drop=FALSE] + sfcWind2 <- sfcWindAnomPeriodRel[which(cluster.test == 2),,,drop=FALSE] + sfcWind3 <- sfcWindAnomPeriodRel[which(cluster.test == 3),,,drop=FALSE] + sfcWind4 <- sfcWindAnomPeriodRel[which(cluster.test == 4),,,drop=FALSE] + + # in case a regime has NO days in that month, we must set it to NA: + if(length(which(cluster.test == 1)) == 0 ) sfcWind1 <- array(NA, c(1,dim(sfcWindAnomPeriod)[2:3])) + if(length(which(cluster.test == 2)) == 0 ) sfcWind2 <- array(NA, c(1,dim(sfcWindAnomPeriod)[2:3])) + if(length(which(cluster.test == 3)) == 0 ) sfcWind3 <- array(NA, c(1,dim(sfcWindAnomPeriod)[2:3])) + if(length(which(cluster.test == 4)) == 0 ) sfcWind4 <- array(NA, c(1,dim(sfcWindAnomPeriod)[2:3])) + + sfcWindMean1 <- apply(sfcWind1, c(2,3), mean, na.rm=FALSE) + sfcWindMean2 <- apply(sfcWind2, c(2,3), mean, na.rm=FALSE) + sfcWindMean3 <- apply(sfcWind3, c(2,3), mean, na.rm=FALSE) + sfcWindMean4 <- apply(sfcWind4, c(2,3), mean, na.rm=FALSE) + + # load regime names: + load(paste0(rean.dir,"/",rean.name,"_", my.period[p],"_","ClusterNames",".RData")) + + ## add strip with daily sequence of WRs: + + mod.name1 <- substr(cluster1.name, nchar(cluster1.name), nchar(cluster1.name)) + mod.name2 <- substr(cluster2.name, nchar(cluster2.name), nchar(cluster2.name)) + mod.name3 <- substr(cluster3.name, nchar(cluster3.name), nchar(cluster3.name)) + mod.name4 <- substr(cluster4.name, nchar(cluster4.name), nchar(cluster4.name)) + + cluster1.name.short <- substr(cluster1.name,1,1) + cluster2.name.short <- substr(cluster2.name,1,1) + cluster3.name.short <- substr(cluster3.name,1,1) + cluster4.name.short <- substr(cluster4.name,1,1) + + ## add + or - at the end of the cluster name, if it is a NAO+ or NAO- regime: + if(mod.name1 == "+" || mod.name1 == "-") cluster1.name.short <- paste0(substr(cluster1.name,1,1), mod.name1) + if(mod.name2 == "+" || mod.name2 == "-") cluster2.name.short <- paste0(substr(cluster2.name,1,1), mod.name2) + if(mod.name3 == "+" || mod.name3 == "-") cluster3.name.short <- paste0(substr(cluster3.name,1,1), mod.name3) + if(mod.name4 == "+" || mod.name4 == "-") cluster4.name.short <- paste0(substr(cluster4.name,1,1), mod.name4) + + c1 <- which(cluster.test == 1) + c2 <- which(cluster.test == 2) + c3 <- which(cluster.test == 3) + c4 <- which(cluster.test == 4) + + cluster.test.letters <- cluster.test + cluster.test.letters[c1] <- cluster1.name.short + cluster.test.letters[c2] <- cluster2.name.short + cluster.test.letters[c3] <- cluster3.name.short + cluster.test.letters[c4] <- cluster4.name.short + + my.strip <- cluster.test.letters + + if(no.regimes) { + cluster.test.letters2 <- cluster.test.letters + cluster.test.letters2[which(cluster.test.letters == "N+")] <- "C1" + cluster.test.letters2[which(cluster.test.letters == "N-")] <- "C2" + cluster.test.letters2[which(cluster.test.letters == "B")] <- "C3" + cluster.test.letters2[which(cluster.test.letters == "A")] <- "C4" + my.strip <- cluster.test.letters2 + } + + cluster.col <- cluster.test.letters + cluster.col[which(cluster.test.letters == "N+")] <- "Firebrick1" + cluster.col[which(cluster.test.letters == "N-")] <- "Dodgerblue1" + cluster.col[which(cluster.test.letters == "B")] <- "White" + cluster.col[which(cluster.test.letters == "A")] <- "Darkgoldenrod1" + + cluster1 <- which(orden == cluster1.name) + cluster2 <- which(orden == cluster2.name) + cluster3 <- which(orden == cluster3.name) + cluster4 <- which(orden == cluster4.name) + + assign(paste0("fre.days",cluster1), fre1.days) + assign(paste0("fre.days",cluster2), fre2.days) + assign(paste0("fre.days",cluster3), fre3.days) + assign(paste0("fre.days",cluster4), fre4.days) + + assign(paste0("fre",cluster1), wr1y) + assign(paste0("fre",cluster2), wr2y) + assign(paste0("fre",cluster3), wr3y) + assign(paste0("fre",cluster4), wr4y) + + assign(paste0("imp.test",cluster1), sfcWindMean1) + assign(paste0("imp.test",cluster2), sfcWindMean2) + assign(paste0("imp.test",cluster3), sfcWindMean3) + assign(paste0("imp.test",cluster4), sfcWindMean4) + + # psl anomaly for the chosen year and month and cluster: + slp1 <- slpAnomPeriod[which(cluster.test == 1),,,drop=FALSE] + slp2 <- slpAnomPeriod[which(cluster.test == 2),,,drop=FALSE] + slp3 <- slpAnomPeriod[which(cluster.test == 3),,,drop=FALSE] + slp4 <- slpAnomPeriod[which(cluster.test == 4),,,drop=FALSE] + + # in case a regime has NO days in that month, we must set it to NA: + if(length(which(cluster.test == 1)) == 0 ) slp1 <- array(NA, c(1,dim(slpAnomPeriod)[2:3])) + if(length(which(cluster.test == 2)) == 0 ) slp2 <- array(NA, c(1,dim(slpAnomPeriod)[2:3])) + if(length(which(cluster.test == 3)) == 0 ) slp3 <- array(NA, c(1,dim(slpAnomPeriod)[2:3])) + if(length(which(cluster.test == 4)) == 0 ) slp4 <- array(NA, c(1,dim(slpAnomPeriod)[2:3])) + + slpMean1 <- apply(slp1, c(2,3), mean, na.rm=FALSE) + slpMean2 <- apply(slp2, c(2,3), mean, na.rm=FALSE) + slpMean3 <- apply(slp3, c(2,3), mean, na.rm=FALSE) + slpMean4 <- apply(slp4, c(2,3), mean, na.rm=FALSE) + + assign(paste0("psl.test",cluster1), slpMean1) + assign(paste0("psl.test",cluster2), slpMean2) + assign(paste0("psl.test",cluster3), slpMean3) + assign(paste0("psl.test",cluster4), slpMean4) + + # save strip with the daily regime series for chosen month and year: + fileoutput.seq <- paste0(rean.dir,"/",rean.name,"_",my.period[month.test],"_regimes_sequence.png") + png(filename=fileoutput.seq,width=1500,height=1850) + + plot.new() + + sep <- 0.03 + for(day in 1: n.days.in.a.period(p, 2001)){ + sep.cum <- (day-1)*sep + polygon(c(sep.cum + 0.01, sep.cum + 0.01 + sep, sep.cum + 0.01 + sep, sep.cum + 0.01), c(1.01, 1.01, 1.01+sep, 1.01+sep), border="black", col=cluster.col[day]) + text(sep.cum + 0.01 + sep/2, 0.997 + sep + 0.005, labels=day, cex=1.5) + text(sep.cum + 0.01 + sep/2, 1.013 + 0.005, labels=my.strip[day], cex=2) + + } + + dev.off() + + # save average impact and sea level pressure only for chosen month and year: + fileoutput.test <- paste0(rean.dir,"/",rean.name,"_",my.period[month.test],"_monthly_anomaly_regimes.png") + + png(filename=fileoutput.test,width=1500,height=2000) + + plot.new() + + par(fig=c(0, 0.33, 0.77, 0.97), new=TRUE) + PlotEquiMap2(imp.test1[,EU], lon[EU], lat, filled.continents=FALSE, continents.col="black", brks=my.brks.var, cols=my.cols.var, cex.lab=1.2, intxlon=10, intylat=10, drawleg=F) + par(fig=c(0, 0.33, 0.54, 0.74), new=TRUE) + PlotEquiMap2(imp.test2[,EU], lon[EU], lat, filled.continents=FALSE, continents.col="black", brks=my.brks.var, cols=my.cols.var, cex.lab=1.2, intxlon=10, intylat=10, drawleg=F) + par(fig=c(0, 0.33, 0.31, 0.51), new=TRUE) + PlotEquiMap2(imp.test3[,EU], lon[EU], lat, filled.continents=FALSE, continents.col="black", brks=my.brks.var, cols=my.cols.var, cex.lab=1.2, intxlon=10, intylat=10, drawleg=F) + par(fig=c(0, 0.33, 0.08, 0.28), new=TRUE) + PlotEquiMap2(imp.test4[,EU], lon[EU], lat, filled.continents=FALSE, continents.col="black", brks=my.brks.var, cols=my.cols.var, cex.lab=1.2, intxlon=10, intylat=10, drawleg=F) + + if(no.regimes) { regime.title <- paste0("Cluster",1:4)} else { regime.title <- orden} + + par(fig=c(0,0.33,0.955,0.975), new=TRUE) + mtext(paste0(regime.title[1], " wind speed anomaly "), font=2, cex=2) + par(fig=c(0,0.33,0.725,0.745), new=TRUE) + mtext(paste0(regime.title[2], " wind speed anomaly "), font=2, cex=2) + par(fig=c(0,0.33,0.495,0.515), new=TRUE) + mtext(paste0(regime.title[3], " wind speed anomaly "), font=2, cex=2) + par(fig=c(0,0.33,0.265,0.285), new=TRUE) + mtext(paste0(regime.title[4], " wind speed anomaly "), font=2, cex=2) + + par(fig=c(0.03, 0.33, 0.015, 0.06), new=TRUE) + #ColorBar(my.brks.var, cols=my.cols.var, vert=FALSE, label_scale=1.8, triangle_ends=c(FALSE,FALSE)) #, subset=my.subset2) + ColorBar(brks=round(100*my.brks.var[2:(l(my.brks.var)-1)]), cols=my.cols.var[2:(l(my.cols.var)-1)], vert=FALSE, label_scale=2, bar_limits=c(100*my.brks.var[2],100*my.brks.var[l(my.brks.var)-1]), col_inf=my.cols.var[1], col_sup=my.cols.var[l(my.cols.var)], subsample=2) #triangle_ends=c(T,T)) #, subset=my.subset2) + + par(fig=c(0.33, 0.34, 0.01, 0.044), new=TRUE) + mtext("%", cex=1.6) + + # right figures: + par(fig=c(0.34, 0.92, 0.77, 0.97), new=TRUE) + PlotEquiMap2(psl.test1, lon, lat, filled.continents=FALSE, continents.col="black", brks=my.brks.var2, brks2=my.brks.var2, cols=my.cols.var2, intxlon=10, intylat=10, drawleg=F, contours=t(psl.test1), contours.lty="F1FF1F", cex.lab=1, xlabel.dist=.5) + par(fig=c(0.34, 0.92, 0.54, 0.74), new=TRUE) + PlotEquiMap2(psl.test2, lon, lat, filled.continents=FALSE, continents.col="black", brks=my.brks.var2, brks2=my.brks.var2, cols=my.cols.var2, intxlon=10, intylat=10, drawleg=F, contours=t(psl.test2), contours.lty="F1FF1F", cex.lab=1, xlabel.dist=.5) + par(fig=c(0.34, 0.92, 0.31, 0.51), new=TRUE) + PlotEquiMap2(psl.test3, lon, lat, filled.continents=FALSE, continents.col="black", brks=my.brks.var2, brks2=my.brks.var2, cols=my.cols.var2, intxlon=10, intylat=10, drawleg=F, contours=t(psl.test3), contours.lty="F1FF1F", cex.lab=1, xlabel.dist=.5) + par(fig=c(0.34, 0.92, 0.08, 0.28), new=TRUE) + PlotEquiMap2(psl.test4, lon, lat, filled.continents=FALSE, continents.col="black", brks=my.brks.var2, brks2=my.brks.var2, cols=my.cols.var2, intxlon=10, intylat=10, drawleg=F, contours=t(psl.test4), contours.lty="F1FF1F", cex.lab=1, xlabel.dist=.5) + + par(fig=c(0.34,0.92,0.955,0.975), new=TRUE) + mtext(paste0(regime.title[1], " SLP anomaly "), font=2, cex=2) + par(fig=c(0.34,0.92,0.725,0.745), new=TRUE) + mtext(paste0(regime.title[2], " SLP anomaly "), font=2, cex=2) + par(fig=c(0.34,0.92,0.495,0.515), new=TRUE) + mtext(paste0(regime.title[3], " SLP anomaly "), font=2, cex=2) + par(fig=c(0.34,0.92,0.265,0.285), new=TRUE) + mtext(paste0(regime.title[4], " SLP anomaly "), font=2, cex=2) + + par(fig=c(0.34, 0.93, 0.015, 0.06), new=TRUE) + #ColorBar2(my.brks.var2, cols=my.cols.var2, vert=FALSE, my.ticks=-0.5 + 1:length(my.brks.var2), my.labels=my.brks.var2) #, subsampleg=1, label_scale=1.8) #, subset=my.subset2) + ColorBar(my.brks.var2[2:(length(my.brks.var2)-1)], cols=my.cols.var2[2:(length(my.cols.var2)-1)], vert=FALSE, label_scale=2, bar_limits=c(my.brks.var2[2],my.brks.var2[l(my.brks.var2)-1]), col_inf=my.cols.var2[1], col_sup=my.cols.var2[l(my.cols.var2)], subsample=1) + + par(fig=c(0.924, 0.930, 0.01, 0.044), new=TRUE) + mtext("hPa", cex=1.6) + + #par(fig=c(0.627, 0.647, 0, 0.028), new=TRUE) + #mtext("0", cex=1.8) + + n.days <- floor(n.days.in.a.period(month.test,1)) + + par(fig=c(0.93, 0.99, 0.77, 0.87), new=TRUE) + mtext(paste0(fre.days1," days\n(",round(100*fre.days1/n.days,1),"%)"), cex=2.8) + par(fig=c(0.93, 0.99, 0.54, 0.64), new=TRUE) + mtext(paste0(fre.days2," days\n(",round(100*fre.days2/n.days,1),"%)"), cex=2.8) + par(fig=c(0.93, 0.99, 0.31, 0.41), new=TRUE) + mtext(paste0(fre.days3," days\n(",round(100*fre.days3/n.days,1),"%)"), cex=2.8) + par(fig=c(0.93, 0.99, 0.08, 0.18), new=TRUE) + mtext(paste0(fre.days4," days\n(",round(100*fre.days4/n.days,1),"%)"), cex=2.8) + + dev.off() + + ## add the strip with the regime sequence over the average impact composition: + fileoutput.temp <- paste0(rean.dir,"/",rean.name,"_",my.period[month.test],"_temp.png") + fileoutput.both <- paste0(rean.dir,"/",rean.name,"_",my.period[month.test],"_temp2.png") + + system(paste0("convert ",fileoutput.seq," -crop +0-1730 +repage ",fileoutput.temp)) # cut the lower part of the strip + system(paste0("montage ",fileoutput.temp," ",fileoutput.test," -tile 1x2 -geometry +0+0 ",fileoutput.both)) + + + ## same image formatted for the catalogue: + fileoutput2.test <- paste0(rean.dir,"/",rean.name,"_",my.period[month.test],"_monthly_anomaly_regimes_fig2cat.png") + + system(paste0("~/scripts/fig2catalog.sh -s 0.8 -m 20 -r 40 -t '",rean.name," / 10m wind speed and sea level pressure / Monthly anomalies \n",month.name[month.test]," / ",year.test,"' -c 'Region: North Atlantic (27°N-81°N, 85.5°W-45°E)\nReference dataset: ",rean.name," reanalysis' ", fileoutput.both," ", fileoutput2.test)) + + system(paste0("rm ", fileoutput.temp, " ", fileoutput.both," ", fileoutput.seq," ", fileoutput.test, " ", fileoutput)) + + + } # close for on year.test + } # close for on month.test + + +} # close for on monthly_anomalies + + + + + + + + + + diff --git a/weather_regimes_maps_v29.R~ b/weather_regimes_maps_v29.R~ new file mode 100644 index 0000000000000000000000000000000000000000..c598bd156cd61c7052ac5c393774ee15590396aa --- /dev/null +++ b/weather_regimes_maps_v29.R~ @@ -0,0 +1,5823 @@ + +# Creation: 6/2016 +# Author: Nicola Cortesi +# +# Aim: to visualize the regime anomalies of the weather regimes, their interannual frequencies and their impact on a chosen cliamte variable (i.e: wind speed or temperature) +# +# I/O: input file needed are: +# 1) the output of the weather_regimes.R script, which ends with the suffix "_psl.RData". +# 2) if you need the impact map too, the outputs of the weather_regimes_impact.R script, which end wuth the suffix "_sfcWind.RData" and "_tas.RData" +# +# Output files are the maps, witch the user can generate as .png or as .pdf +# Due to the script's length, it is better to run it from the terminal with: Rscript ~/scripts/weather_regimes_maps.R +# Note that this script doesn't need to be parallelized because it takes only a few seconds to create and save the output maps. +# +# Assumptions: You need to have created before the '_psl.RData' files which are the output of 'weather_regimes'.R, for each period you want to visualize. +# If your regimes derive from a reanalysis, this script must be run twice: +# first, only one month/season at time with: 'period=X', (X=1 .. 12), 'ordering = TRUE', 'save.names = TRUE', 'as.pdf = FALSE' and 'composition = "none" +# And inserting the regime names 'cluster.name1=...' in the correct order; in this first run, you only save the ordered cartography. +# You already have to know which is the right regime order by taking a look at the output maps (_clusterX.png) of weather_regimes.R +# After that, any time you run this script, remember to set: +# 'ordering = TRUE , 'save.names = FALSE' (and any type of composition you want to plot) not to overwrite the files which stores the right cluster order. +# You can check if the monthly regimes jave been associated correctly setting composition <- "psl.rean" +# +# For example, you can find that the sequence of the images in the file (from top to down) corresponds to: +# Blocking / NAO- / Atl.Ridge / NAO+ regime. Subsequently, you have to change 'ordering' +# from F to T (see below) and set the four variables 'clusterX.name' (X = 1...4) to follow the same order found inside that file. +# Then, you have to run this script only to get the maps in the right order, which by default is, from top to down: +# "NAO+","NAO-","Blocking" and "Atl.Ridge" (you can change it with the variable 'orden'). You also have to set 'save.names' to TRUE, so an .RData file +# is written to store the order of the four regimes, in case you need to visualize these maps again in future or create new ones based on the saved data. +# +# If your regimes derive from a forecast system, you have to compute before the regimes derived from a reanalysis. Then, you can associate the reanalysis +# to the forecast system, to employ it to automatically associate to each simulated cluster one of the +# observed regimes, the one with the highest spatial correlation. Beware that the two maps of regime anomalies must have the same spatial resolution! +# +# Branch: weather_regimes + +rm(list=ls()) +library(s2dverification) # for the function Load() +library(Kendall) # for the MannKendall test +#library("corrplot") + +source('/home/Earth/ncortesi/scripts/Rfunctions.R') # for the calendar functions + +### set the directory where the weather regimes computed with a reanalysis are stored (xxxxx_psl.RData files): + +#rean.dir <- "/scratch/Earth/ncortesi/RESILIENCE/Regimes/18) as 12) but with no lat correction" +#rean.dir <- "/scratch/Earth/ncortesi/RESILIENCE/Regimes/18_as_12_but_with_no_lat_correction" +#rean.dir <- "/scratch/Earth/ncortesi/RESILIENCE/Regimes/41_ERA-Interim_monthly_1981-2015_LOESS_filter" +#rean.dir <- "/scratch/Earth/ncortesi/RESILIENCE/Regimes/43_as_41_but_with_running_cluster_and_lat_correction" +#rean.dir <- "/scratch/Earth/ncortesi/RESILIENCE/Regimes/45_as_43_but_with_Z500" +#rean.dir <- "/scratch/Earth/ncortesi/RESILIENCE/Regimes/44_as_43_but_with_no_lat_correction" +#rean.dir <- "/scratch/Earth/ncortesi/RESILIENCE/Regimes/50_NCEP_monthly_1981-2016_LOESS_filter_lat_corr" +#rean.dir <- "/scratch/Earth/ncortesi/RESILIENCE/Regimes/52_JRA55_monthly_1981-2016_LOESS_filter_lat_corr" +#rean.dir <- "/scratch/Earth/ncortesi/RESILIENCE/Regimes/53_JRA55_seasonal_1981-2016_LOESS_filter_lat_corr" +#rean.dir <- "/scratch/Earth/ncortesi/RESILIENCE/Regimes/54_JRA55_monthly_1981-2016_LOESS_filter_lat_corr_running" +#rean.dir <- "/scratch/Earth/ncortesi/RESILIENCE/Regimes/55_JRA55_monthly_1981-2016_LOESS_filter_lat_corr_running_15days" +rean.dir <- "/scratch/Earth/ncortesi/RESILIENCE/Regimes/56_JRA55_monthly_1981-2016_LOESS_filter_lat_corr_ordered4variance" +#rean.dir <- "/scratch/Earth/ncortesi/RESILIENCE/Regimes/57_JRA55_monthly_1981-2016_LOESS_filter_lat_corr_running_ordered4variance" +#rean.dir <- "/scratch/Earth/ncortesi/RESILIENCE/Regimes/58_JRA55_monthly_1981-2016_LOESS_filter_lat_corr_running_15days_ordered4variance" + +rean.name <- "JRA-55" #"JRA-55" #"NCEP" #"ERA-Interim" # reanalysis name (if input data comes from a reanalysis) + +forecast.name <- "ECMWF-S4" # forecast system name (if input data comes from a forecast system) + +fields.name <- rean.name #forecast.name # Choose between loading reanalysis ('rean.name') or forecasts ('forecast.name') + +composition <- "edpr" # choose which kind of composition you want to plot: + # 'none' doesn't plot anything, it only associates the clusters to the regimes with the manual association in the rows below + # and saves them in the ClusterName.Rdata files for each period selected, overwriting the eventual pre-existing files. + # 'variance' : as 'none', but before saving ClusterName.RData, it reordinates the clusters in decreasing order of explained variance, + # so that orden[1] regime correspond to the cluster with the highest variance, orden[2] to the second and so on. + # In this way, you can show all the compositions below in order if variance instead of ordinating them manually one period at time + # 'simple' for monthly graphs of regime anomalies / impact / interannual frequencies in three columns + # 'psl' for all the regime anomalies for a fixed forecast month + # 'fre' for all the interannual frequencies for a fixed forecast month + # 'impact' for all the impact maps for a fixed forecast month and the variable specified in var.num + # 'summary' for the summary plots of all forecast months (persistence bias, freq.bias, correlations, etc.) [You need all ClusterNames files] + # 'impact.highest' for all the impact plot of the regime with the highest impact + # 'single.impact' to save the four impact maps in a composition 2x2 + # 'single.psl' to save the individual psl map + # 'single.fre' to save the individual fre map + # 'taylor' plot the taylor's diagram between the regime anomalies of a monthly reanalaysis and a seasonal one + # 'edpr' as 'simple', but swapping the position of the regime anomalie maps with that of the impact maps + # 'psl.rean': plot all the regime anomalies and correlation matrix of all months of a reanalysis with DJF regime anomalies of the same reanalysis + # 'psl.rean.unordered': as before, but without ordering the regimes with the same order in vector 'orden' + +var.num <- 1 # if fields.name ='rean.name' and composition ='simple' or 'edpr', choose a variable for the impact maps 1: sfcWind 2: tas + +# Choose one or more periods to plot from 1 to 17 (1-12: Jan - Dec, 13-16: Winter, Spring, Summer, Autumn, 17: Year). +period <- 1:12 + +# Manually associates the four clusters to the four regimes, one period at time (only in case composition="none"): +cluster4.name <- "NAO+" +cluster1.name <- "NAO-" +cluster2.name <- "Blocking" +cluster3.name <- "Atl.Ridge" + +# choose an order to give to the cartograpy, from top to bottom: +orden <- c("NAO+","NAO-","Blocking","Atl.Ridge") + +no.regimes <- TRUE # if TRUE, instead of putting the regime names in the figure titles, insert "Cluster1", "Cluster2", "Cluster3" and "Cluster4" + # (when composition='edpr' or monthly_anomalies = TRUE) + +####### if monthly_anomalies <- TRUE, you have to specify these additional parameters: +monthly_anomalies <- FALSE # if TRUE, also save maps with with monthly wind speed and slp anomalies for the chosen years and months set below over Europe +year.chosen <- 2016 +month.chosen <- 1:12 +NCEP <- '/esnas/recon/noaa/ncep-reanalysis/$STORE_FREQ$_mean/$VAR_NAME$_f6h/$VAR_NAME$_$YEAR$$MONTH$.nc' +JRA55 <- '/esnas/recon/jma/jra55/$STORE_FREQ$_mean/$VAR_NAME$_f6h/$VAR_NAME$_$YEAR$$MONTH$.nc' +ERAint <- '/esarchive/old-files/recon_ecmwf_erainterim/$STORE_FREQ$_mean/$VAR_NAME$_f6h/$VAR_NAME$_$YEAR$$MONTH$.nc' +rean.data <- JRA55 # choose one of the above reanalysis + +####### if fields.name='forecast.name', you have to specify these additional variables: + +# working dir with the input files with '_psl.RData' suffix: +work.dir <- "/scratch/Earth/ncortesi/RESILIENCE/Regimes/42_ECMWF_S4_monthly_1981-2015_LOESS_filter" + +lead.months <- 0 #0:6 # in case you selected 'fields.name = forecast.name', specify a leadtime (0 = same month of the start month) to plot + # (and variable 'period' becomes the start month) +forecast.month <- 1 # in case composition = 'psl' or 'fre' or 'impact', choose a target month to forecast, from 1 to 12, to plot its composition + # if you want to plot all compositions from 1 to 12 at once, just enable the line below and add a { at the end of the script + # DO NOT run the loop below when composition="summary" or "taylor", because it will modify the data in the ClusterName.RData files!!! +#for(forecast.month in 1:12){ + +####### Derived variables ############################################################################################################################################### + +ordering <- TRUE # If TRUE, order the clusters following the regimes listed in the 'orden' vector (set it to TRUE only after you manually inserted the names below). +save.names <- FALSE # if TRUE, also save the cluster names (i.e: the association between clusters and weather regimes); if FALSE, the cluster names are loaded from a file +as.pdf <- FALSE # FALSE: save results as one .png file for each season/month, TRUE: save only one .pdf file with all seasons/months +mean.freq <- TRUE # if TRUE, when composition <- "simple" o 'edpr', it also add the mean frequency value over each frequency plot + +if(fields.name != rean.name && fields.name != forecast.name) stop("variable 'fields.name' is not properly set") +if(no.regimes) { regime.title <- paste0("Cluster",1:4)} else { regime.title <- orden} + +var.name <- c("sfcWind","tas") +var.name.full <- c("wind speed","temperature") # full name of the 'predictand' variable to put in the title of the graphs +var.unit <- c("m/s", "ºC") # unit of measure of var (for drawing color scales) + +var.num.orig <- var.num # loading _psl files, this value can change! +fields.name.orig <- fields.name +period.orig <- period +work.dir.orig <- work.dir +pdf.suffix <- ifelse(length(period)==1, my.period[period], "all_periods") + +n.map <- 0 + +############################################################################################ +## Start analysis ## +############################################################################################ + +if(composition != "summary" && composition != "impact.highest" && composition != "taylor"){ + + if(composition == "psl" || composition == "fre" || composition == "impact") { + if(as.pdf && fields.name == forecast.name) pdf(file=paste0(work.dir,"/",fields.name,"_",pdf.suffix,"_forecast_month_",forecast.month,"_",composition,".pdf"),width=110,height=60) + if(!as.pdf && fields.name == forecast.name) png(filename=paste0(work.dir,"/",fields.name,"_forecast_month_",forecast.month,"_",composition,".png"),width=6000,height=2300) + + lead.months <- c(6:0,0) + plot.new() + + # Sheet title: + par(fig=c(0, 1, 0.94, 0.98), new=TRUE) + if(composition == "psl") mtext(paste0(fields.name, " simulated Weather Regimes for ", month.name[forecast.month]), cex=5.5, font=2) + if(composition == "fre") mtext(paste0(fields.name, " simulated interannual frequencies for ", month.name[forecast.month]), cex=5.5, font=2) + if(composition == "impact") mtext(paste0(fields.name, " simulated ",var.name.full[var.num], " impact for ", month.name[forecast.month]), cex=5.5, font=2) + + } # close if on composition == "psl" ... + + if(composition == "none") { + ordering <- TRUE + save.names <- TRUE + as.pdf <- FALSE + } + + if(composition == "variance"){ + ordering <- TRUE + save.names <- TRUE + as.pdf <- FALSE + period <- 1:12 + } + + if(composition == "psl.rean") { + ordering <- TRUE + period <- c(9:12, 1:8) # to start from September instead of January + png(filename=paste0(rean.dir,"/",fields.name,"_reanalysis_all_months.png"),width=6000,height=2000) + plot.new() + } + + if(composition == "psl.rean.unordered") { + ordering <- FALSE + period <- c(9:12, 1:8) # to start from September instead of January + png(filename=paste0(rean.dir,"/",fields.name,"_reanalysis_all_months_unordered.png"),width=6000,height=2000) + plot.new() + } + + ## if(composition == "corr.matrix") { + ## ordering <- FALSE # set it to TRUE if you want to see the correlation matrix of the ordered clusters instead!!! + ## period <- c(9:12, 1:8) # to start from September instead of January + ## png(filename=paste0(rean.dir,"/",fields.name,"_reanalysis_all_months_corr_matrix.png"),width=6000,height=2000) + ## plot.new() + ## } + + if(fields.name == rean.name) { lead.month <- 1; lead.months <- 1 } # just to be able to enter the loop below once! + + for(lead.month in lead.months){ + #lead.month=lead.months[1] # for the debug + + if(composition == "simple" || composition == 'edpr'){ + if(as.pdf && fields.name == rean.name) pdf(file=paste0(rean.dir,"/",fields.name,"_",var.name[var.num],"_",pdf.suffix,".pdf"),width=40, height=60) + if(as.pdf && fields.name == forecast.name) pdf(file=paste0(work.dir,"/",fields.name,"_",var.name[var.num],"_",pdf.suffix,"_leadmonth_",lead.month,".pdf"),width=40,height=60) + } + + if(composition == 'psl' || composition == 'fre' || composition == 'impact') { + period <- forecast.month - lead.month + if(period < 1) period <- period + 12 + } + + + for(p in period){ + #p <- period[1] # for the debug + p.orig <- p + + if(fields.name == rean.name) print(paste("p =",p)) + if(fields.name == forecast.name) print(paste("p =",p,"lead.month =", lead.month)) + + # load regime data (for forecasts, it load only the data corresponding to the chosen start month p and lead month 'lead.month') + impact.data <- FALSE + if(fields.name == rean.name || (fields.name == forecast.name && n.map == 7)) { + load(file=paste0(rean.dir,"/",rean.name,"_",my.period[p],"_","psl",".RData")) + if(var.num != var.num.orig) var.num <- var.num.orig # ripristinate original values of this script (necessary after loading regime data) + if(fields.name != fields.name.orig) fields.name <- fields.name.orig # ripristinate original values of this script + if(work.dir != work.dir.orig) work.dir <- work.dir.orig # ripristinate original values of this script + + # load impact data only if it is available, if not it will leave an empty space in the composition: + if(file.exists(paste0(rean.dir,"/",rean.name,"_",my.period[p],"_",var.name[var.num],".RData"))){ + impact.data <- TRUE + print(paste0("Impact data for variable ",var.name[var.num] ," available for reanalysis ", rean.name)) + load(file=paste0(rean.dir,"/",rean.name,"_",my.period[p],"_",var.name[var.num],".RData")) + } else { + impact.data <- FALSE + print(paste0("Impact data for variable ",var.name[var.num] ," not available for reanalysis ", rean.name)) + } + + if(composition == "variance"){ + my.cluster2 <- my.cluster # create a copy of my.cluster + + ss1 <- which(my.cluster$cluster == 1) + ss2 <- which(my.cluster$cluster == 2) + ss3 <- which(my.cluster$cluster == 3) + ss4 <- which(my.cluster$cluster == 4) + + withinss <- my.cluster$withinss + max1 <- which(my.cluster$withinss == max(withinss, na.rm=TRUE)) # first cluster with maximum variance + withinss[max1] <- NA + + max2 <- which(my.cluster$withinss == max(withinss, na.rm=TRUE)) # second cluster with maximum variance + withinss[max2] <- NA + + max3 <- which(my.cluster$withinss == max(withinss, na.rm=TRUE)) # third cluster with maximum variance + withinss[max3] <- NA + + max4 <- which(!is.na(withinss)) + rm(withinss) + + # vector where the first element tells you which is the clister with the maximum variance the second element shows which is the cluster the + # second maximum variance, and so on: + max.seq <- c(max1, max2, max3, max4) + + assign(paste0("cluster",max1,".name"), orden[1]) # associate the cluster with the highest explained variance to the first regime to plot (usually NAO+) + assign(paste0("cluster",max2,".name"), orden[2]) + assign(paste0("cluster",max3,".name"), orden[3]) + assign(paste0("cluster",max4,".name"), orden[4]) + + } + } + + if(fields.name == forecast.name && n.map < 7){ + load(file=paste0(work.dir,"/",forecast.name,"_",my.period[p],"_leadmonth_",lead.month,"_","psl",".RData")) # load cluster time series and mean psl data + + if(file.exists(paste0(work.dir,"/",forecast.name,"_",my.period[p],"_leadmonth_",lead.month,"_",var.name[var.num],".RData"))){ + impact.data <- TRUE + print("Impact data available for forecasts") + if((composition == "simple" || composition == "impact")) load(file=paste0(work.dir,"/",forecast.name,"_",my.period[p],"_leadmonth_",lead.month,"_",var.name[var.num],".RData")) # load mean var data for impact maps + } else { + impact.data <- FALSE + print("Impact data for forecasts not available") + } + + if(var.num != var.num.orig) var.num <- var.num.orig # ripristinate original values of this script (necessary after loading regime data) + if(fields.name != fields.name.orig) fields.name <- fields.name.orig # ripristinate original values of this script + + } + + if(var.num != var.num.orig) var.num <- var.num.orig # ripristinate original values of this script (necessary after loading regime data) + if(fields.name != fields.name.orig) fields.name <- fields.name.orig # ripristinate original values of this script + if(work.dir != work.dir.orig) work.dir <- work.dir.orig # ripristinate original values of this script + if(p != p.orig) p <- p.orig + + if(fields.name == forecast.name && n.map < 7){ + + # compute the simulated interannual monthly variability: + n.days.month <- dim(my.cluster.array[[p]])[1] # number of days in the forecasted month + + freq.sim1 <- apply(my.cluster.array[[p]] == 1, c(2,3), sum) + freq.sim2 <- apply(my.cluster.array[[p]] == 2, c(2,3), sum) + freq.sim3 <- apply(my.cluster.array[[p]] == 3, c(2,3), sum) + freq.sim4 <- apply(my.cluster.array[[p]] == 4, c(2,3), sum) + + sd.freq.sim1 <- sd(freq.sim1) / n.days.month + sd.freq.sim2 <- sd(freq.sim2) / n.days.month + sd.freq.sim3 <- sd(freq.sim3) / n.days.month + sd.freq.sim4 <- sd(freq.sim4) / n.days.month + + # compute the transition probabilities: + j1 <- j2 <- j3 <- j4 <- 1 + transition1 <- transition2 <- transition3 <- transition4 <- c() + + n.members <- dim(my.cluster.array[[p]])[3] + + for(m in 1:n.members){ + for(y in 1:n.years){ + for (d in 1:(n.days.month-1)){ + + if (my.cluster.array[[p]][d,y,m] == 1 && (my.cluster.array[[p]][d + 1,y,m] != 1)){ + transition1[j1] <- my.cluster.array[[p]][d + 1,y,m] + j1 <- j1 + 1 + } + if (my.cluster.array[[p]][d,y,m] == 2 && (my.cluster.array[[p]][d + 1,y,m] != 2)){ + transition2[j2] <- my.cluster.array[[p]][d + 1,y,m] + j2 <- j2 + 1 + } + if (my.cluster.array[[p]][d,y,m] == 3 && (my.cluster.array[[p]][d + 1,y,m] != 3)){ + transition3[j3] <- my.cluster.array[[p]][d + 1,y,m] + j3 <- j3 +1 + } + if (my.cluster.array[[p]][d,y,m] == 4 && (my.cluster.array[[p]][d + 1,y,m] != 4)){ + transition4[j4] <- my.cluster.array[[p]][d + 1,y,m] + j4 <- j4 +1 + } + + } + } + } + + trans.sim <- matrix(NA,4,4 , dimnames= list(c("cluster1.sim", "cluster2.sim", "cluster3.sim", "cluster4.sim"),c("cluster1.sim","cluster2.sim","cluster3.sim","cluster4.sim"))) + trans.sim[1,2] <- length(which(transition1 == 2)) + trans.sim[1,3] <- length(which(transition1 == 3)) + trans.sim[1,4] <- length(which(transition1 == 4)) + + trans.sim[2,1] <- length(which(transition2 == 1)) + trans.sim[2,3] <- length(which(transition2 == 3)) + trans.sim[2,4] <- length(which(transition2 == 4)) + + trans.sim[3,1] <- length(which(transition3 == 1)) + trans.sim[3,2] <- length(which(transition3 == 2)) + trans.sim[3,4] <- length(which(transition3 == 4)) + + trans.sim[4,1] <- length(which(transition4 == 1)) + trans.sim[4,2] <- length(which(transition4 == 2)) + trans.sim[4,3] <- length(which(transition4 == 3)) + + trans.tot <- apply(trans.sim,1,sum,na.rm=T) + for(i in 1:4) trans.sim[i,] <- trans.sim[i,]/trans.tot[i] + + + # compute cluster persistence: + my.cluster.vector <- as.vector(my.cluster.array[[p]]) # reduce it to a vector with the order: day1month1member1 , day2month1member1, ... , day1month2member1, day2month2member1, ,,, + + sequ1 <- sequ2 <- sequ3 <- sequ4 <- rep(0,10000) + i1 <- i2 <- i3 <- i4 <- 1 + + if(my.cluster.vector[1] != 1) i1 <- 0 + if(my.cluster.vector[1] == 1) sequ1[i1] <- sequ1[i1]+1 + if(my.cluster.vector[1] != 2) i2 <- 0 + if(my.cluster.vector[1] == 2) sequ2[i2] <- sequ2[i2]+1 + if(my.cluster.vector[1] != 3) i3 <- 0 + if(my.cluster.vector[1] == 3) sequ3[i3] <- sequ3[i3]+1 + if(my.cluster.vector[1] != 4) i4 <- 0 + if(my.cluster.vector[1] == 4) sequ4[i4] <- sequ4[i4]+1 + n.dd <- length(my.cluster.vector) + + for(d in 1:(n.dd-1)){ + if(my.cluster.vector[d] != 1 && my.cluster.vector[d+1] == 1) i1 <- i1+1 + if(my.cluster.vector[d] == 1) sequ1[i1] <- sequ1[i1]+1 + + if(my.cluster.vector[d] != 2 && my.cluster.vector[d+1] == 2) i2 <- i2+1 + if(my.cluster.vector[d] == 2) sequ2[i2] <- sequ2[i2]+1 + + if(my.cluster.vector[d] != 3 && my.cluster.vector[d+1] == 3) i3 <- i3+1 + if(my.cluster.vector[d] == 3) sequ3[i3] <- sequ3[i3]+1 + + if(my.cluster.vector[d] != 4 && my.cluster.vector[d+1] == 4) i4 <- i4+1 + if(my.cluster.vector[d] == 4) sequ4[i4] <- sequ4[i4]+1 + } + + if(my.cluster.vector[n.dd] == 1) sequ1[i1] <- sequ1[i1]+1 + if(my.cluster.vector[n.dd] == 2) sequ2[i2] <- sequ2[i2]+1 + if(my.cluster.vector[n.dd] == 3) sequ3[i3] <- sequ3[i3]+1 + if(my.cluster.vector[n.dd] == 4) sequ4[i4] <- sequ4[i4]+1 + + pers1 <- mean(sequ1[which(sequ1 != 0)]) + pers2 <- mean(sequ2[which(sequ2 != 0)]) + pers3 <- mean(sequ3[which(sequ3 != 0)]) + pers4 <- mean(sequ4[which(sequ4 != 0)]) + + # compute correlations between simulated clusters and observed regime's anomalies: + cluster1.sim <- pslwr1mean; cluster2.sim <- pslwr2mean; cluster3.sim <- pslwr3mean; cluster4.sim <- pslwr4mean + + if(composition == 'impact' && impact.data == TRUE) { + cluster1.sim.imp <- varwr1mean; cluster2.sim.imp <- varwr2mean; cluster3.sim.imp <- varwr3mean; cluster4.sim.imp <- varwr4mean + #cluster1.sim.imp.pv <- pvalue1; cluster2.sim.imp.pv <- pvalue2; cluster3.sim.imp.pv <- pvalue3; cluster4.sim.imp.pv <- pvalue4 + } + + lat.forecast <- lat; lon.forecast <- lon + + if((p + lead.month) > 12) { load.month <- p + lead.month - 12 } else { load.month <- p + lead.month } # reanalysis forecast month to load + load(file=paste0(rean.dir,"/",rean.name,"_",my.period[load.month],"_","psl",".RData")) # load reanalysis data, which overwrities the pslwrXmean variables + load(paste0(rean.dir,"/",rean.name,"_", my.period[load.month],"_","ClusterNames",".RData")) # load also reanalysis regime names + + # load reanalysis varwrXmean impact data: + if(composition == 'impact' && impact.data == TRUE) load(file=paste0(rean.dir,"/",rean.name,"_",my.period[load.month],"_",var.name[var.num],".RData")) + + if(var.num != var.num.orig) var.num <- var.num.orig # ripristinate original values of this script + if(fields.name != fields.name.orig) fields.name <- fields.name.orig # ripristinate original values of this script + if(!identical(period.orig,period)) period <- period.orig + if(work.dir != work.dir.orig) work.dir <- work.dir.orig # ripristinate original values of this script + if(p.orig != p) p <- p.orig + + # compute the observed transition probabilities: + n.days.month <- n.days.in.a.period(load.month,2001) + j1o <- j2o <- j3o <- j4o <- 1; transition.obs1 <- transition.obs2 <- transition.obs3 <- transition.obs4 <- c() + + for (d in 1:(length(my.cluster$cluster)-1)){ + if (my.cluster$cluster[d] == 1 && (my.cluster$cluster[d + 1] != my.cluster$cluster[d])){ + transition.obs1[j1o] <- my.cluster$cluster[d + 1] + j1o <- j1o + 1 + } + if (my.cluster$cluster[d] == 2 && (my.cluster$cluster[d + 1] != my.cluster$cluster[d])){ + transition.obs2[j2o] <- my.cluster$cluster[d + 1] + j2o <- j2o + 1 + } + if (my.cluster$cluster[d] == 3 && (my.cluster$cluster[d + 1] != my.cluster$cluster[d])){ + transition.obs3[j3o] <- my.cluster$cluster[d + 1] + j3o <- j3o + 1 + } + if (my.cluster$cluster[d] == 4 && (my.cluster$cluster[d + 1] != my.cluster$cluster[d])){ + transition.obs4[j4o] <- my.cluster$cluster[d + 1] + j4o <- j4o +1 + } + + } + + trans.obs <- matrix(NA,4,4 , dimnames= list(c("cluster1.obs", "cluster2.obs", "cluster3.obs", "cluster4.obs"),c("cluster1.obs","cluster2.obs","cluster3.obs","cluster4.obs"))) + trans.obs[1,2] <- length(which(transition.obs1 == 2)) + trans.obs[1,3] <- length(which(transition.obs1 == 3)) + trans.obs[1,4] <- length(which(transition.obs1 == 4)) + + trans.obs[2,1] <- length(which(transition.obs2 == 1)) + trans.obs[2,3] <- length(which(transition.obs2 == 3)) + trans.obs[2,4] <- length(which(transition.obs2 == 4)) + + trans.obs[3,1] <- length(which(transition.obs3 == 1)) + trans.obs[3,2] <- length(which(transition.obs3 == 2)) + trans.obs[3,4] <- length(which(transition.obs3 == 4)) + + trans.obs[4,1] <- length(which(transition.obs4 == 1)) + trans.obs[4,2] <- length(which(transition.obs4 == 2)) + trans.obs[4,3] <- length(which(transition.obs4 == 3)) + + trans.tot.obs <- apply(trans.obs,1,sum,na.rm=T) + for(i in 1:4) trans.obs[i,] <- trans.obs[i,]/trans.tot.obs[i] + + # compute the observed interannual monthly variability: + freq.obs1 <- freq.obs2 <- freq.obs3 <- freq.obs4 <- c() + + for(y in year.start:year.end){ + # for both reanalysis and S4, the time order is always: year1day1, year1day2, ..., year1day31, year2day1, year2day2, ..., year2day28, etc, + freq.obs1[y-year.start+1] <- length(which(my.cluster$cluster[(y-year.start)*n.days.month + (1:n.days.month)] == 1)) + freq.obs2[y-year.start+1] <- length(which(my.cluster$cluster[(y-year.start)*n.days.month + (1:n.days.month)] == 2)) + freq.obs3[y-year.start+1] <- length(which(my.cluster$cluster[(y-year.start)*n.days.month + (1:n.days.month)] == 3)) + freq.obs4[y-year.start+1] <- length(which(my.cluster$cluster[(y-year.start)*n.days.month + (1:n.days.month)] == 4)) + } + + sd.freq.obs1 <- sd(freq.obs1) / n.days.month + sd.freq.obs2 <- sd(freq.obs2) / n.days.month + sd.freq.obs3 <- sd(freq.obs3) / n.days.month + sd.freq.obs4 <- sd(freq.obs4) / n.days.month + + wr1y.obs <- wr1y; wr2y.obs <- wr2y; wr3y.obs <- wr3y; wr4y.obs <- wr4y # observed frecuencies of the regimes + + regimes.obs <- c(cluster1.name, cluster2.name, cluster3.name, cluster4.name) + + if(length(unique(regimes.obs)) < 4) stop("Two or more names of the weather types in the reanalsyis input file are repeated!") + + if(identical(rev(lat),lat.forecast)) {lat.forecast <- rev(lat.forecast); print("Warning: forecast latitudes are in the opposite order than renalysis's")} + if(!identical(lat, lat.forecast)) stop("forecast latitudes are different from reanalysis latitudes!") + if(!identical(lon, lon.forecast)) stop("forecast longitude are different from reanalysis longitudes!") + + cluster.corr <- array(NA, c(4,4), dimnames=list(c("cluster1.sim","cluster2.sim","cluster3.sim","cluster4.sim"), regimes.obs)) + cluster.corr[1,1] <- cor(as.vector(cluster1.sim), as.vector(pslwr1mean)) + cluster.corr[1,2] <- cor(as.vector(cluster1.sim), as.vector(pslwr2mean)) + cluster.corr[1,3] <- cor(as.vector(cluster1.sim), as.vector(pslwr3mean)) + cluster.corr[1,4] <- cor(as.vector(cluster1.sim), as.vector(pslwr4mean)) + cluster.corr[2,1] <- cor(as.vector(cluster2.sim), as.vector(pslwr1mean)) + cluster.corr[2,2] <- cor(as.vector(cluster2.sim), as.vector(pslwr2mean)) + cluster.corr[2,3] <- cor(as.vector(cluster2.sim), as.vector(pslwr3mean)) + cluster.corr[2,4] <- cor(as.vector(cluster2.sim), as.vector(pslwr4mean)) + cluster.corr[3,1] <- cor(as.vector(cluster3.sim), as.vector(pslwr1mean)) + cluster.corr[3,2] <- cor(as.vector(cluster3.sim), as.vector(pslwr2mean)) + cluster.corr[3,3] <- cor(as.vector(cluster3.sim), as.vector(pslwr3mean)) + cluster.corr[3,4] <- cor(as.vector(cluster3.sim), as.vector(pslwr4mean)) + cluster.corr[4,1] <- cor(as.vector(cluster4.sim), as.vector(pslwr1mean)) + cluster.corr[4,2] <- cor(as.vector(cluster4.sim), as.vector(pslwr2mean)) + cluster.corr[4,3] <- cor(as.vector(cluster4.sim), as.vector(pslwr3mean)) + cluster.corr[4,4] <- cor(as.vector(cluster4.sim), as.vector(pslwr4mean)) + + # associate each simulated cluster to one observed regime: + max1 <- which(cluster.corr == max(cluster.corr), arr.ind=T) + cluster.corr2 <- cluster.corr + cluster.corr2[max1[1],] <- -999 # set this row to negative values so it won't be found as a maximum anymore + cluster.corr2[,max1[2]] <- -999 # set this column to negative values so it won't be found as a maximum anymore + max2 <- which(cluster.corr2 == max(cluster.corr2), arr.ind=T) + cluster.corr3 <- cluster.corr2 + cluster.corr3[max2[1],] <- -999 + cluster.corr3[,max2[2]] <- -999 + max3 <- which(cluster.corr3 == max(cluster.corr3), arr.ind=T) + cluster.corr4 <- cluster.corr3 + cluster.corr4[max3[1],] <- -999 + cluster.corr4[,max3[2]] <- -999 + max4 <- which(cluster.corr4 == max(cluster.corr4), arr.ind=T) + + seq.max1 <- c(max1[1],max2[1],max3[1],max4[1]) + seq.max2 <- c(max1[2],max2[2],max3[2],max4[2]) + + cluster1.regime <- seq.max2[which(seq.max1 == 1)] + cluster2.regime <- seq.max2[which(seq.max1 == 2)] + cluster3.regime <- seq.max2[which(seq.max1 == 3)] + cluster4.regime <- seq.max2[which(seq.max1 == 4)] + + cluster1.name <- regimes.obs[cluster1.regime] + cluster2.name <- regimes.obs[cluster2.regime] + cluster3.name <- regimes.obs[cluster3.regime] + cluster4.name <- regimes.obs[cluster4.regime] + + regimes.sim <- c(cluster1.name, cluster2.name, cluster3.name, cluster4.name) + cluster.corr2 <- array(cluster.corr, c(4,4), dimnames=list(regimes.sim, regimes.obs)) + + spat.cor1 <- cluster.corr[1,seq.max2[which(seq.max1 == 1)]] + spat.cor2 <- cluster.corr[2,seq.max2[which(seq.max1 == 2)]] + spat.cor3 <- cluster.corr[3,seq.max2[which(seq.max1 == 3)]] + spat.cor4 <- cluster.corr[4,seq.max2[which(seq.max1 == 4)]] + + # measure persistence for the reanalysis dataset: + my.cluster.vector <- my.cluster[[1]] + + sequ1 <- sequ2 <- sequ3 <- sequ4 <- rep(0,10000) + i1 <- i2 <- i3 <- i4 <- 1 + + if(my.cluster.vector[1] != 1) i1 <- 0 + if(my.cluster.vector[1] == 1) sequ1[i1] <- sequ1[i1]+1 + if(my.cluster.vector[1] != 2) i2 <- 0 + if(my.cluster.vector[1] == 2) sequ2[i2] <- sequ2[i2]+1 + if(my.cluster.vector[1] != 3) i3 <- 0 + if(my.cluster.vector[1] == 3) sequ3[i3] <- sequ3[i3]+1 + if(my.cluster.vector[1] != 4) i4 <- 0 + if(my.cluster.vector[1] == 4) sequ4[i4] <- sequ4[i4]+1 + + n.dd <- length(my.cluster.vector) + for(d in 1:(n.dd-1)){ + if(my.cluster.vector[d] != 1 && my.cluster.vector[d+1] == 1) i1 <- i1+1 + if(my.cluster.vector[d] == 1) sequ1[i1] <- sequ1[i1]+1 + + if(my.cluster.vector[d] != 2 && my.cluster.vector[d+1] == 2) i2 <- i2+1 + if(my.cluster.vector[d] == 2) sequ2[i2] <- sequ2[i2]+1 + + if(my.cluster.vector[d] != 3 && my.cluster.vector[d+1] == 3) i3 <- i3+1 + if(my.cluster.vector[d] == 3) sequ3[i3] <- sequ3[i3]+1 + + if(my.cluster.vector[d] != 4 && my.cluster.vector[d+1] == 4) i4 <- i4+1 + if(my.cluster.vector[d] == 4) sequ4[i4] <- sequ4[i4]+1 + } + + if(my.cluster.vector[n.dd] == 1) sequ1[i1] <- sequ1[i1]+1 + if(my.cluster.vector[n.dd] == 2) sequ2[i2] <- sequ2[i2]+1 + if(my.cluster.vector[n.dd] == 3) sequ3[i3] <- sequ3[i3]+1 + if(my.cluster.vector[n.dd] == 4) sequ4[i4] <- sequ4[i4]+1 + + persObs1 <- mean(sequ1[which(sequ1 != 0)]) + persObs2 <- mean(sequ2[which(sequ2 != 0)]) + persObs3 <- mean(sequ3[which(sequ3 != 0)]) + persObs4 <- mean(sequ4[which(sequ4 != 0)]) + + ### insert here all the manual corrections to the regime assignation due to misasignment in the automatic selection: + ### forecast month: September + #if(p == 9 && lead.month == 0) { cluster1.name <- "Blocking"; cluster2.name <- "Atl.Ridge"; cluster3.name <- "NAO-"; cluster4.name <- "NAO+"} + #if(p == 8 && lead.month == 1) { cluster1.name <- "NAO+"; cluster2.name <- "NAO-"; cluster3.name <- "Blocking"; cluster4.name <- "Atl.Ridge"} + #if(p == 6 && lead.month == 3) { cluster1.name <- "Atl.Ridge"; cluster2.name <- "NAO+"} + #if(p == 4 && lead.month == 5) { cluster1.name <- "Blocking"; cluster2.name <- "NAO+"; cluster3.name <- "Atl.Ridge"; cluster4.name <- "NAO-"} + + if(p == 9 && lead.month == 0) { cluster1.name <- "Blocking"; cluster2.name <- "Atl.Ridge"; cluster3.name <- "NAO-"; cluster4.name <- "NAO+" } + if(p == 8 && lead.month == 1) { cluster1.name <- "NAO+"; cluster2.name <- "NAO-"; cluster3.name <- "Blocking"; cluster4.name <- "Atl.Ridge" } + if(p == 7 && lead.month == 2) { cluster1.name <- "Atl.Ridge"; cluster2.name <- "Blocking"; cluster3.name <- "NAO-"; cluster4.name <- "NAO+" } + if(p == 6 && lead.month == 3) { cluster1.name <- "Atl.Ridge"; cluster2.name <- "NAO-"; cluster3.name <- "NAO+"; cluster4.name <- "Blocking" } + if(p == 5 && lead.month == 4) { cluster1.name <- "Atl.Ridge"; cluster2.name <- "NAO+"; cluster3.name <- "NAO-"; cluster4.name <- "Blocking" } + if(p == 4 && lead.month == 5) { cluster1.name <- "Blocking"; cluster2.name <- "NAO+"; cluster3.name <- "Atl.Ridge"; cluster4.name <- "NAO-" } + if(p == 3 && lead.month == 6) { cluster2.name <- "NAO-"; cluster3.name <- "Atl.Ridge"} # cluster1.name <- "Blocking"; cluster4.name <- "NAO+" + + # regimes.sim # for the debug + + ### forecast month: October + #if(p == 4 && lead.month == 6) { cluster1.name <- "Atl.Ridge"; cluster2.name <- "Blocking"; cluster3.name <- "NAO+"; cluster4.name <- "NAO-"} + #if(p == 5 && lead.month == 5) { cluster1.name <- "NAO+"; cluster2.name <- "Blocking"; cluster3.name <- "NAO-"; cluster4.name <- "Atl.Ridge"} + #if(p == 7 && lead.month == 3) { cluster1.name <- "NAO-"; cluster2.name <- "Atl.Ridge"; cluster3.name <- "Blocking"; cluster4.name <- "NAO+"} + #if(p == 8 && lead.month == 2) { cluster1.name <- "Blocking"; cluster2.name <- "NAO-"; cluster3.name <- "Atl.Ridge"; cluster4.name <- "NAO+"} + #if(p == 9 && lead.month == 1) { cluster1.name <- "NAO-"; cluster2.name <- "Blocking"; cluster3.name <- "Atl.Ridge"; cluster4.name <- "NAO+"} + + if(p == 10 && lead.month == 0) { cluster1.name <- "Blocking"; cluster2.name <- "NAO+"; cluster3.name <- "Atl.Ridge"; cluster4.name <- "NAO-"} + if(p == 9 && lead.month == 1) { cluster1.name <- "NAO-"; cluster2.name <- "Blocking"; cluster3.name <- "Atl.Ridge"; cluster4.name <- "NAO+"} + if(p == 8 && lead.month == 2) { cluster1.name <- "Blocking"; cluster2.name <- "NAO-"; cluster3.name <- "Atl.Ridge"; cluster4.name <- "NAO+"} + if(p == 7 && lead.month == 3) { cluster1.name <- "NAO-"; cluster2.name <- "Atl.Ridge"; cluster3.name <- "Blocking"; cluster4.name <- "NAO+"} + if(p == 6 && lead.month == 4) { cluster1.name <- "NAO+"; cluster2.name <- "Blocking"; cluster3.name <- "NAO-"; cluster4.name <- "Atl.Ridge"} + + ### forecast month: November + #if(p == 6 && lead.month == 5) { cluster2.name <- "Atl.Ridge"; cluster3.name <- "NAO+"; cluster4.name <- "Blocking"} + #if(p == 7 && lead.month == 4) { cluster1.name <- "NAO+"; cluster2.name <- "Blocking"; cluster4.name <- "Atl.Ridge"} + #if(p == 8 && lead.month == 3) { cluster2.name <- "Blocking"; cluster4.name <- "Atl.Ridge"} + #if(p == 9 && lead.month == 2) { cluster2.name <- "Atl.Ridge"; cluster3.name <- "NAO+"; cluster4.name <- "Blocking"} + #if(p == 10 && lead.month == 1) { cluster2.name <- "Blocking"; cluster4.name <- "Atl.Ridge"} + + regimes.sim2 <- c(cluster1.name, cluster2.name, cluster3.name, cluster4.name) # Simulated regimes after the manual correction + #regimes.obs <- c(cluster1.name, cluster2.name, cluster3.name, cluster4.name) # already defined above, included only as reference + + order.sim <- match(regimes.sim2, orden) + order.obs <- match(regimes.obs, orden) + + clusters.sim <- list(cluster1.sim, cluster2.sim, cluster3.sim, cluster4.sim) + clusters.obs <- list(pslwr1mean, pslwr2mean, pslwr3mean, pslwr4mean) + + # recompute the spatial correlations, because they might have changed because of the manual corrections above: + spat.cor1 <- cor(as.vector(clusters.sim[[which(order.sim == 1)]]), as.vector(clusters.obs[[which(order.obs == 1)]])) + spat.cor2 <- cor(as.vector(clusters.sim[[which(order.sim == 2)]]), as.vector(clusters.obs[[which(order.obs == 2)]])) + spat.cor3 <- cor(as.vector(clusters.sim[[which(order.sim == 3)]]), as.vector(clusters.obs[[which(order.obs == 3)]])) + spat.cor4 <- cor(as.vector(clusters.sim[[which(order.sim == 4)]]), as.vector(clusters.obs[[which(order.obs == 4)]])) + + if(composition == 'impact' && impact.data == TRUE) { + clusters.sim.imp <- list(cluster1.sim.imp, cluster2.sim.imp, cluster3.sim.imp, cluster4.sim.imp) + #clusters.sim.imp.pv <- list(cluster1.sim.imp.pv, cluster2.sim.imp.pv, cluster3.sim.imp.pv, cluster4.sim.imp.pv) + + clusters.obs.imp <- list(varwr1mean, varwr2mean, varwr3mean, varwr4mean) + #clusters.obs.imp.pv <- list(pvalue1, pvalue2, pvalue3, pvalue4) + + cor.imp1 <- cor(as.vector(clusters.sim.imp[[which(order.sim == 1)]]), as.vector(clusters.obs.imp[[which(order.obs == 1)]])) + cor.imp2 <- cor(as.vector(clusters.sim.imp[[which(order.sim == 2)]]), as.vector(clusters.obs.imp[[which(order.obs == 2)]])) + cor.imp3 <- cor(as.vector(clusters.sim.imp[[which(order.sim == 3)]]), as.vector(clusters.obs.imp[[which(order.obs == 3)]])) + cor.imp4 <- cor(as.vector(clusters.sim.imp[[which(order.sim == 4)]]), as.vector(clusters.obs.imp[[which(order.obs == 4)]])) + + } + + load(file=paste0(work.dir,"/",fields.name,"_",my.period[p],"_leadmonth_",lead.month,"_","psl",".RData")) # reload forecast cluster time series and mean psl data + load(file=paste0(work.dir,"/",fields.name,"_",my.period[p],"_leadmonth_",lead.month,"_",var.name[var.num],".RData")) # reload mean var data for impact maps + + if(var.num != var.num.orig) var.num <- var.num.orig # ripristinate original values of this script + if(fields.name != fields.name.orig) fields.name <- fields.name.orig # ripristinate original values of this script + if(!identical(period.orig, period)) period <- period.orig + if(p.orig != p) p <- p.orig + if(identical(rev(lat),lat.forecast)) lat <- rev(lat) # ripristinate right lat order + if(work.dir != work.dir.orig) work.dir <- work.dir.orig # ripristinate original values of this script + + } # close for on 'forecast.name' + + if(fields.name == rean.name || (fields.name == forecast.name && n.map == 7)){ + if(save.names){ + save(cluster1.name, cluster2.name, cluster3.name, cluster4.name, file=paste0(rean.dir,"/",fields.name,"_", my.period[p],"_","ClusterNames",".RData")) + + } else { # in this case, we load the cluster names from the file already saved, if regimes come from a reanalysis: + ClusterName.file <- paste0(rean.dir,"/",rean.name,"_", my.period[p],"_","ClusterNames",".RData") + if(!file.exists(ClusterName.file)) stop(paste0("file: ",ClusterName.file," missing")) # check if file exists or not + load(ClusterName.file) # load cluster names + + if(var.num != var.num.orig) var.num <- var.num.orig # ripristinate original values of this script + if(fields.name != fields.name.orig) fields.name <- fields.name.orig # ripristinate original values of this script + } + } + + # assign to each cluster its regime: + #if(ordering == FALSE && (fields.name == rean.name || (fields.name == forecast.name && n.map == 7))){ + if(ordering == FALSE){ + cluster1 <- 1; cluster2 <- 2; cluster3 <- 3; cluster4 <- 4 # same as: cluster1.name=orden[1], cluster2.name=orden[2], cluster3.name=orden[3], etc. + } else { + cluster1 <- which(orden == cluster1.name) + cluster2 <- which(orden == cluster2.name) + cluster3 <- which(orden == cluster3.name) + cluster4 <- which(orden == cluster4.name) + } + + assign(paste0("map",cluster1), pslwr1mean) + assign(paste0("map",cluster2), pslwr2mean) + assign(paste0("map",cluster3), pslwr3mean) + assign(paste0("map",cluster4), pslwr4mean) + + assign(paste0("fre",cluster1), wr1y) + assign(paste0("fre",cluster2), wr2y) + assign(paste0("fre",cluster3), wr3y) + assign(paste0("fre",cluster4), wr4y) + + #assign(paste0("withinss",cluster1), my.cluster[[p]]$withinss[1]/my.cluster[[p]]$size[1]) + #assign(paste0("withinss",cluster2), my.cluster[[p]]$withinss[2]/my.cluster[[p]]$size[2]) + #assign(paste0("withinss",cluster3), my.cluster[[p]]$withinss[3]/my.cluster[[p]]$size[3]) + #assign(paste0("withinss",cluster4), my.cluster[[p]]$withinss[4]/my.cluster[[p]]$size[4]) + + if(impact.data == TRUE && (composition == "simple" || composition == "single.impact" || composition == 'impact' || composition == 'edpr')){ + assign(paste0("imp",cluster1), varwr1mean) + assign(paste0("imp",cluster2), varwr2mean) + assign(paste0("imp",cluster3), varwr3mean) + assign(paste0("imp",cluster4), varwr4mean) + + assign(paste0("sig",cluster1), pvalue1) + assign(paste0("sig",cluster2), pvalue2) + assign(paste0("sig",cluster3), pvalue3) + assign(paste0("sig",cluster4), pvalue4) + + if(fields.name == rean.name) { + assign(paste0("impRel",cluster1), varwr1meanRel) + assign(paste0("impRel",cluster2), varwr2meanRel) + assign(paste0("impRel",cluster3), varwr3meanRel) + assign(paste0("impRel",cluster4), varwr4meanRel) + } + + } + + if(fields.name == forecast.name && n.map < 7){ + assign(paste0("freMax",cluster1), wr1yMax) + assign(paste0("freMax",cluster2), wr2yMax) + assign(paste0("freMax",cluster3), wr3yMax) + assign(paste0("freMax",cluster4), wr4yMax) + + assign(paste0("freMin",cluster1), wr1yMin) + assign(paste0("freMin",cluster2), wr2yMin) + assign(paste0("freMin",cluster3), wr3yMin) + assign(paste0("freMin",cluster4), wr4yMin) + + #assign(paste0("spatial.cor",cluster1), spat.cor1) + #assign(paste0("spatial.cor",cluster2), spat.cor2) + #assign(paste0("spatial.cor",cluster3), spat.cor3) + #assign(paste0("spatial.cor",cluster4), spat.cor4) + + assign(paste0("persist",cluster1), pers1) + assign(paste0("persist",cluster2), pers2) + assign(paste0("persist",cluster3), pers3) + assign(paste0("persist",cluster4), pers4) + + clusters.all <- c(cluster1, cluster2, cluster3, cluster4) + ordered.sequence <- c(which(clusters.all == 1), which(clusters.all == 2), which(clusters.all == 3), which(clusters.all == 4)) + + assign(paste0("transSim",cluster1), unname(trans.sim[1,ordered.sequence])) + assign(paste0("transSim",cluster2), unname(trans.sim[2,ordered.sequence])) + assign(paste0("transSim",cluster3), unname(trans.sim[3,ordered.sequence])) + assign(paste0("transSim",cluster4), unname(trans.sim[4,ordered.sequence])) + + cluster1.obs <- which(orden == regimes.obs[1]) + cluster2.obs <- which(orden == regimes.obs[2]) + cluster3.obs <- which(orden == regimes.obs[3]) + cluster4.obs <- which(orden == regimes.obs[4]) + + clusters.all.obs <- c(cluster1.obs, cluster2.obs, cluster3.obs, cluster4.obs) + ordered.sequence.obs <- c(which(clusters.all.obs == 1), which(clusters.all.obs == 2), which(clusters.all.obs == 3), which(clusters.all.obs == 4)) + + assign(paste0("freObs",cluster1.obs), wr1y.obs) + assign(paste0("freObs",cluster2.obs), wr2y.obs) + assign(paste0("freObs",cluster3.obs), wr3y.obs) + assign(paste0("freObs",cluster4.obs), wr4y.obs) + + assign(paste0("persistObs",cluster1.obs), persObs1) + assign(paste0("persistObs",cluster2.obs), persObs2) + assign(paste0("persistObs",cluster3.obs), persObs3) + assign(paste0("persistObs",cluster4.obs), persObs4) + + assign(paste0("transObs",cluster1.obs), unname(trans.obs[1,ordered.sequence.obs])) + assign(paste0("transObs",cluster2.obs), unname(trans.obs[2,ordered.sequence.obs])) + assign(paste0("transObs",cluster3.obs), unname(trans.obs[3,ordered.sequence.obs])) + assign(paste0("transObs",cluster4.obs), unname(trans.obs[4,ordered.sequence.obs])) + + } + + # position of long values of Europe only (without the Atlantic Sea and America): + #if(fields.name == "ERA-Interim") EU <- c(1:which(lon >= lon.max)[1], (length(lon)-30):length(lon)) # restrict area to continental europe only + EU <- c(1:which(lon >= lon.max)[1], (which(lon >= 337)[1]:length(lon))) # restrict area to continental europe only + + # breaks and colors of the geopotential fields: + if(psl == "g500"){ + #my.brks <- c(-300,-199:200,300) # % Mean anomaly of a WR + my.brks <- c(-300,seq(-200,200,10),300) # % Mean anomaly of a WR + my.brks2 <- c(-300,seq(-200,200,10),300) # % Mean anomaly of a WR for the contour lines + #my.cols <- colorRampPalette((brewer.pal(9,"PuBu")))(length(my.brks)-1) + values.to.plot <- c(-200, -100, 0, 100, 200) # values we want to show in the labels + } + + if(psl == "psl"){ + my.brks <- c(seq(-21,-1,2),0,seq(1,21,2)) # % Mean anomaly of a WR + # my.brks <- c(seq(-19,-1,2),0,seq(1,19,2)) # % Mean anomaly of a WR + + my.brks2 <- my.brks #c(seq(-20,20,2)) # % Mean anomaly of a WR for the contour lines + values.to.plot <- my.brks #c(-17,-15,-13,-11,-9,-7,-5,-3,-1,0,1,3,5,7,9,11,13,15,17) + } + + my.cols <- colorRampPalette(rev(brewer.pal(11,"RdBu")))(length(my.brks)-1) # blue--red colors + my.cols[floor(length(my.cols)/2)] <- my.cols[floor(length(my.cols)/2)+1] <- "white" # add white in the middle + + # breaks and colors of the impact maps (valid both for Wind speed anomalies and Temperature anomalies): + #my.brks.var <- c(-20,seq(-10,-3,1),seq(-2.9,2.9,0.1),seq(3,10,1),20) # % Mean anomaly of a WR + #my.brks.var <- c(-20,-2,-1.5,-1,-0.5,-0.2,0.2,0.5,1,1.5,2,20) # % Mean anomaly of a WR + #my.brks.var <- c(-20,seq(-3,3,0.5),20) # % Mean anomaly of a WR + if(var.name[var.num] == "sfcWind") { + my.brks.var <- seq(-3,3,0.5) + values.to.plot2 <- c(-3,-2,-1,0,1,2,3) + } + if(var.name[var.num] == "tas") { + my.brks.var <- seq(-5,5,1) + values.to.plot2 <- my.brks.var #c(-5,-3,-1,0,1,3,5) + } + + #my.cols <- colorRampPalette(as.character(read.csv("/home/Earth/vtorralb/rgbhex.csv",header=F)[,1]))(length(my.brks)-1) # blue--yellow-red colors + my.cols.var <- colorRampPalette(rev(brewer.pal(11,"RdBu")))(length(my.brks.var)-1) # blue--white--red colors + #my.cols.var[floor(length(my.cols.var)/2)] <- my.cols.var[floor(length(my.cols.var)/2)+1] <- "white" + + freq.max=100 + if(fields.name == forecast.name){ + pos.years.present <- match(year.start:year.end, years) + years.missing <- which(is.na(pos.years.present)) + fre1.NA <- fre2.NA <- fre3.NA <- fre4.NA <- freMax1.NA <- freMax2.NA <- freMax3.NA <- freMax4.NA <- freMin1.NA <- freMin2.NA <- freMin3.NA <- freMin4.NA <- c() + k <- 0 + for(y in year.start:year.end) { + if(y %in% (years.missing + year.start - 1)) { + fre1.NA <- c(fre1.NA,NA); fre2.NA <- c(fre2.NA,NA); fre3.NA <- c(fre3.NA,NA); fre4.NA <- c(fre4.NA,NA) + freMax1.NA <- c(freMax1.NA,NA); freMax2.NA <- c(freMax2.NA,NA); freMax3.NA <- c(freMax3.NA,NA); freMax4.NA <- c(freMax4.NA,NA) + freMin1.NA <- c(freMin1.NA,NA); freMin2.NA <- c(freMin2.NA,NA); freMin3.NA <- c(freMin3.NA,NA); freMin4.NA <- c(freMin4.NA,NA) + k=k+1 + } else { + fre1.NA <- c(fre1.NA,fre1[y - year.start + 1 - k]); fre2.NA <- c(fre2.NA,fre2[y - year.start + 1 - k]) + fre3.NA <- c(fre3.NA,fre3[y - year.start + 1 - k]); fre4.NA <- c(fre4.NA,fre4[y - year.start + 1 - k]) + freMax1.NA <- c(freMax1.NA,freMax1[y - year.start + 1 - k]); freMax2.NA <- c(freMax2.NA,freMax2[y - year.start + 1 - k]) + freMax3.NA <- c(freMax3.NA,freMax3[y - year.start + 1 - k]); freMax4.NA <- c(freMax4.NA,freMax4[y - year.start + 1 - k]) + freMin1.NA <- c(freMin1.NA,freMin1[y - year.start + 1 - k]); freMin2.NA <- c(freMin2.NA,freMin2[y - year.start + 1 - k]) + freMin3.NA <- c(freMin3.NA,freMin3[y - year.start + 1 - k]); freMin4.NA <- c(freMin4.NA,freMin4[y - year.start + 1 - k]) + } + } + + sp.cor1 <- round(cor(fre1.NA,freObs1, use="complete.obs"),2) + sp.cor2 <- round(cor(fre2.NA,freObs2, use="complete.obs"),2) + sp.cor3 <- round(cor(fre3.NA,freObs3, use="complete.obs"),2) + sp.cor4 <- round(cor(fre4.NA,freObs4, use="complete.obs"),2) + + } else { + fre1.NA <- fre1; fre2.NA <- fre2; fre3.NA <- fre3; fre4.NA <- fre4 + } + + + # Visualize the composition with the 3 graphs for all the 4 regimes: its pressure fields, its impact on var and its frequency series: + if(composition == "simple"){ + + if(!as.pdf && fields.name == rean.name) png(filename=paste0(rean.dir,"/",fields.name,"_",my.period[p],"_",var.name[var.num],".png"),width=3000,height=3700) + if(!as.pdf && fields.name == forecast.name) png(filename=paste0(work.dir,"/",fields.name,"_",my.period[p],"_leadmonth_",lead.month,"_",var.name[var.num],".png"),width=3000,height=3700) + + plot.new() + + # Sheet title: + par(fig=c(0, 1, 0.94, 0.98), new=TRUE) + if(fields.name == forecast.name) {forecast.title <- paste0(" startdate and ",lead.month," forecast month ")} else {forecast.title <- ""} + mtext(paste0(fields.name, " Weather Regimes for ", my.period[p], forecast.title," (",year.start,"-",year.end,")"), cex=5.5, font=2) + + # Centroid maps: + map.xpos <- 0 + map.width <- 0.46 + par(fig=c(map.xpos + 0.003, map.xpos + map.width - 0.003, 0.745, 0.925), new=TRUE) + PlotEquiMap2(rescale(map1,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map1), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, xlabel.dist=1.5, contours.lty="F1FF1F", continents.col="black") + par(fig=c(map.xpos, map.xpos + map.width, 0.51, 0.69), new=TRUE) + PlotEquiMap2(rescale(map2,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map2), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F", continents.col="black") + par(fig=c(map.xpos, map.xpos + map.width, 0.275, 0.455), new=TRUE) + PlotEquiMap2(rescale(map3,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map3), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F", continents.col="black") + par(fig=c(map.xpos, map.xpos + map.width, 0.04, 0.22), new=TRUE) + PlotEquiMap2(rescale(map4,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map4), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F", continents.col="black") + + ## # for the debug: + ## obs <- read.table("/scratch/Earth/ncortesi/RESILIENCE/Regimes/NAO_series.txt") + ## mes <- p + ## fre.obs <- obs$V3[seq(mes,12*35,12)] # get the ovserved freuency of the NAO + ## #plot(1981:2015,fre.obs,type="l") + ## #lines(1981:2015,fre1,type="l",col="red") + ## #lines(1981:2015,fre2,type="l",col="blue") + ## #lines(1981:2015,fre4,type="l",col="green") + ## cor1 <- cor(fre.obs, fre1) + ## cor2 <- cor(fre.obs, fre2) + ## cor3 <- cor(fre.obs, fre3) + ## cor4 <- cor(fre.obs, fre4) + ## round(c(cor1,cor2,cor3,cor4),2) + + # Legend centroid maps: + legend1.xpos <- 0.10 + legend1.width <- 0.30 + legend1.cex <- 2 + my.subset <- match(values.to.plot, my.brks) + par(fig=c(legend1.xpos, legend1.xpos + legend1.width, 0.719, 0.744), new=TRUE) # syntax fig=c(xmin, xmax, ymin, ymax) + ColorBar3(my.brks, cols=my.cols, vert=FALSE, cex=legend1.cex, subset=my.subset) + if(psl=="g500") {mtext(side=4," m", cex=2.5, las=1)} else {mtext(side=4," hPa", cex=legend1.cex, las=1)} + par(fig=c(legend1.xpos, legend1.xpos + legend1.width, 0.485, 0.508), new=TRUE) + ColorBar3(my.brks, cols=my.cols, vert=FALSE, cex=legend1.cex, subset=my.subset) + if(psl=="g500") {mtext(side=4," m", cex=2.5, las=1)} else {mtext(side=4," hPa", cex=legend1.cex, las=1)} + par(fig=c(legend1.xpos, legend1.xpos + legend1.width, 0.250, 0.273), new=TRUE) + ColorBar3(my.brks, cols=my.cols, vert=FALSE, cex=legend1.cex, subset=my.subset) + if(psl=="g500") {mtext(side=4," m", cex=2.5, las=1)} else {mtext(side=4," hPa", cex=legend1.cex, las=1)} + par(fig=c(legend1.xpos, legend1.xpos + legend1.width, 0.015, 0.038), new=TRUE) + ColorBar3(my.brks, cols=my.cols, vert=FALSE, cex=legend1.cex, subset=my.subset) + if(psl=="g500") {mtext(side=4," m", cex=2.5, las=1)} else {mtext(side=4," hPa", cex=legend1.cex, las=1)} + + # Subtitle centroid maps: + map.title.xpos <- 0.76 + map.title.width <- 0.2 + par(fig=c(map.title.xpos, map.title.xpos + map.title.width, 0.728, 0.729), new=TRUE) + mtext("year", cex=3) + par(fig=c(map.title.xpos, map.title.xpos + map.title.width, 0.494, 0.495), new=TRUE) + mtext("year", cex=3) + par(fig=c(map.title.xpos, map.title.xpos + map.title.width, 0.260, 0.261), new=TRUE) + mtext("year", cex=3) + par(fig=c(map.title.xpos, map.title.xpos + map.title.width, 0.026, 0.027), new=TRUE) + mtext("year", cex=3) + + # Title Centroid Maps: + title1.xpos <- 0.02 + title1.width <- 0.4 + par(fig=c(title1.xpos, title1.xpos + title1.width, 0.9305, 0.9355), new=TRUE) + mtext(paste0(regime.title[1],": ", psl.name, " Anomaly"), font=2, cex=4) + par(fig=c(title1.xpos, title1.xpos + title1.width, 0.6955, 0.7005), new=TRUE) + mtext(paste0(regime.title[2],": ", psl.name, " Anomaly"), font=2, cex=4) + par(fig=c(title1.xpos, title1.xpos + title1.width, 0.4605, 0.4655), new=TRUE) + mtext(paste0(regime.title[3],": ", psl.name, " Anomaly"), font=2, cex=4) + par(fig=c(title1.xpos, title1.xpos + title1.width, 0.2255, 0.2305), new=TRUE) + mtext(paste0(regime.title[4],": ", psl.name, " Anomaly"), font=2, cex=4) + + # Impact maps: + if(impact.data == TRUE ){ + impact.xpos <- 0.47 + impact.width <- 0.26 + par(fig=c(impact.xpos, impact.xpos + impact.width, 0.745, 0.925), new=TRUE) + PlotEquiMap2(rescale(imp1[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=t(sig1[,EU] < 0.05), cex.lab=1.5, continents.col="black") + par(fig=c(impact.xpos, impact.xpos + impact.width, 0.51, 0.69), new=TRUE) + PlotEquiMap2(rescale(imp2[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=t(sig2[,EU] < 0.05), cex.lab=1.5, continents.col="black") + par(fig=c(impact.xpos, impact.xpos + impact.width, 0.275, 0.455), new=TRUE) + PlotEquiMap2(rescale(imp3[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=t(sig3[,EU] < 0.05), cex.lab=1.5, continents.col="black") + par(fig=c(impact.xpos, impact.xpos + impact.width, 0.04, 0.22), new=TRUE) + PlotEquiMap2(rescale(imp4[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=t(sig4[,EU] < 0.05), cex.lab=1.5, continents.col="black") + + + # Title impact maps: + title2.xpos <- 0.42 + title2.width <- 0.35 + par(fig=c(title2.xpos, title2.xpos + title2.width, 0.928, 0.933), new=TRUE) + mtext(paste0(regime.title[1], ": Impact on ", var.name.full[var.num]), font=2, cex=4) + par(fig=c(title2.xpos, title2.xpos + title2.width, 0.693, 0.698), new=TRUE) + mtext(paste0(regime.title[2], ": Impact on ", var.name.full[var.num]), font=2, cex=4) + par(fig=c(title2.xpos, title2.xpos + title2.width, 0.458, 0.463), new=TRUE) + mtext(paste0(regime.title[3], ": Impact on ", var.name.full[var.num]), font=2, cex=4) + par(fig=c(title2.xpos, title2.xpos + title2.width, 0.223, 0.227), new=TRUE) + mtext(paste0(regime.title[4], ": Impact on ", var.name.full[var.num]), font=2, cex=4) + + # Legend: + legend2.xpos <- 0.48 + legend2.width <- 0.25 + legend2.cex <- 1.8 + + my.subset2 <- match(values.to.plot2, my.brks.var) + par(fig=c(legend2.xpos, legend2.xpos + legend2.width, 0.719 + 0.001, 0.744), new=TRUE) + ColorBar3(my.brks.var, cols=my.cols.var, vert=FALSE, cex=legend2.cex, subset=my.subset2) + mtext(side=4,paste0(" ",var.unit[var.num]), cex=legend2.cex) + par(fig=c(legend2.xpos, legend2.xpos + legend2.width, 0.485, 0.508), new=TRUE) + ColorBar3(my.brks.var, cols=my.cols.var, vert=FALSE, cex=legend2.cex, subset=my.subset2) + mtext(side=4,paste0(" ",var.unit[var.num]), cex=legend2.cex) + par(fig=c(legend2.xpos, legend2.xpos + legend2.width, 0.250, 0.273), new=TRUE) + ColorBar3(my.brks.var, cols=my.cols.var, vert=FALSE, cex=legend2.cex, subset=my.subset2) + mtext(side=4,paste0(" ",var.unit[var.num]), cex=legend2.cex) + par(fig=c(legend2.xpos, legend2.xpos + legend2.width, 0.015, 0.038), new=TRUE) + ColorBar3(my.brks.var, cols=my.cols.var, vert=FALSE, cex=legend2.cex, subset=my.subset2) + mtext(side=4,paste0(" ",var.unit[var.num]), cex=legend2.cex) + + } + + # Frequency plots: + barplot.xpos <- 0.75 + barplot.width <- 0.25 + + par(fig=c(barplot.xpos, barplot.xpos + barplot.width, 0.745, 0.915), new=TRUE) + if(fields.name == rean.name) barplot.freq(100*fre1.NA, year.start, year.end, cex.y=2, cex.x=2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0)) + if(fields.name == forecast.name) barplot.freq.sim2(100*fre1.NA, 100*freMax1.NA, 100*freMin1.NA, 100*freObs1, year.start, year.end, cex.y=2, cex.x=2, freq.min=-2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0), col.line="gray20", cex.r=3, cex.mean=2, cex.obs=2) + + par(fig=c(barplot.xpos, barplot.xpos + barplot.width,0.510,0.680), new=TRUE) + if(fields.name == rean.name) barplot.freq(100*fre2.NA, year.start, year.end, cex.y=2, cex.x=2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0)) + if(fields.name == forecast.name) barplot.freq.sim2(100*fre2.NA, 100*freMax2.NA, 100*freMin2.NA, 100*freObs2, year.start, year.end, cex.y=2, cex.x=2, freq.min=-2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0), col.line="gray20", cex.r=3, cex.mean=2, cex.obs=2) + + par(fig=c(barplot.xpos, barplot.xpos + barplot.width,0.275,0.445), new=TRUE) + if(fields.name == rean.name) barplot.freq(100*fre3.NA, year.start, year.end, cex.y=2, cex.x=2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0)) + if(fields.name == forecast.name) barplot.freq.sim2(100*fre3.NA, 100*freMax3.NA, 100*freMin3.NA, 100*freObs3, year.start, year.end, cex.y=2, cex.x=2, freq.min=-2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0), col.line="gray20", cex.r=3, cex.mean=2, cex.obs=2) + + par(fig=c(barplot.xpos, barplot.xpos + barplot.width,0.040,0.210), new=TRUE) + if(fields.name == rean.name) barplot.freq(100*fre4.NA, year.start, year.end, cex.y=2, cex.x=2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0)) + if(fields.name == forecast.name) barplot.freq.sim2(100*fre4.NA, 100*freMax4.NA, 100*freMin4.NA, 100*freObs4, year.start, year.end, cex.y=2, cex.x=2, freq.min=-2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0), col.line="gray20", cex.r=3, cex.mean=2, cex.obs=2) + + # Title Frequency maps: + title3.xpos <- 0.75 + title3.width <-0.25 + par(fig=c(title3.xpos, title3.xpos + title3.width, 0.928, 0.933), new=TRUE) + mtext(paste0(regime.title[1], " Frequency"), font=2, cex=4) # paste0 doesn't work inside an expression! + par(fig=c(title3.xpos, title3.xpos + title3.width, 0.693, 0.698), new=TRUE) + mtext(paste0(regime.title[2], " Frequency"), font=2, cex=4) + par(fig=c(title3.xpos, title3.xpos + title3.width, 0.458, 0.463), new=TRUE) + mtext(paste0(regime.title[3], " Frequency"), font=2, cex=4) + par(fig=c(title3.xpos, title3.xpos + title3.width, 0.223, 0.228), new=TRUE) + mtext(paste0(regime.title[4], " Frequency"), font=2, cex=4) + + # % on Frequency plots: + symbol.xpos <- 0.735 + symbol.width <- 0.01 + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.92, 0.93), new=TRUE) + mtext("%", cex=3.3) + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.59, 0.70), new=TRUE) + mtext("%", cex=3.3) + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.45, 0.46), new=TRUE) + mtext("%", cex=3.3) + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.22, 0.23), new=TRUE) + mtext("%", cex=3.3) + + # mean frequency on Frequency plots: + if(mean.freq == TRUE){ + symbol.xpos <- 0.9 + symbol.width <- 0.1 + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.908, 0.918), new=TRUE) + mtext(bquote(bar(nu) == .(paste0(round(mean(100*fre1.NA),1),"%"))),cex=4.5) + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.673, 0.683), new=TRUE) + mtext(bquote(bar(nu) == .(paste0(round(mean(100*fre2.NA),1),"%"))),cex=4.5) + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.44, 0.45), new=TRUE) + mtext(bquote(bar(nu) == .(paste0(round(mean(100*fre3.NA),1),"%"))),cex=4.5) + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.203, 0.213), new=TRUE) + mtext(bquote(bar(nu) == .(paste0(round(mean(100*fre4.NA),1),"%"))),cex=4.5) + } + + # add corr between obs.time series and ensemble mean time series on Frequency plots: + if(fields.name == forecast.name){ + symbol.xpos <- 0.76 + symbol.width <- 0.1 + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.908, 0.918), new=TRUE) + sp.cor1 <- round(cor(fre1.NA,freObs1, use="complete.obs"),2) + mtext(paste0("r= ",sp.cor1), cex=4.5) + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.673, 0.683), new=TRUE) + sp.cor2 <- round(cor(fre2.NA,freObs2, use="complete.obs"),2) + mtext(paste0("r= ",sp.cor2), cex=4.5) + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.44, 0.45), new=TRUE) + sp.cor3 <- round(cor(fre3.NA,freObs3, use="complete.obs"),2) + mtext(paste0("r= ",sp.cor3), cex=4.5) + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, 0.203, 0.213), new=TRUE) + sp.cor4 <- round(cor(fre4.NA,freObs4, use="complete.obs"),2) + mtext(paste0("r= ",sp.cor4), cex=4.5) + } + + if(!as.pdf) dev.off() # for saving 4 png + + } # close if on: composition == 'simple' + + + if(composition == "edpr"){ + + ## adjust color legends to include triangles to the extremities increasing by two the number of intervals: + my.brks.var <- c(-20,seq(-0.6,0.6,0.1),20) + my.cols.var <- colorRampPalette(rev(brewer.pal(11,"RdBu")))(length(my.brks.var)-1) # blue--red colors + + ## same but for SLP: + my.brks <- c(-100,seq(-21,-1,2),0,seq(1,21,2),100) + my.cols <- colorRampPalette(rev(brewer.pal(11,"RdBu")))(length(my.brks)-1) # blue--red colors + my.cols[floor(length(my.cols)/2)] <- my.cols[floor(length(my.cols)/2)+1] <- "white" ## add white in the middle + + fileoutput <- paste0(rean.dir,"/",fields.name,"_",my.period[p],"_",var.name[var.num],"_edpr.png") + + png(filename=fileoutput,width=3000,height=3700) + + plot.new() + + y1 <- 0.10 + y3 <- 0.315 + y5 <- 0.53 + y7 <- 0.745 + y.width <- 0.18 + + y2 <- y1 + y.width; y4 <- y3 + y.width; y6 <- y5 + y.width; y8 <- y7 + y.width + yt1 <- y2+0.003; yt3 <- y4+0.003; yt5 <- y6+0.003; yt7 <- y8+0.003 + yt2 <- yt1 + 0.004; yt4 <- yt3 + 0.005; yt6 <- yt5 + 0.005; yt8 <- yt7 + 0.005 + + ## Centroid maps: + map.xpos <- 0.27 + map.width <- 0.46 + par(fig=c(map.xpos + 0.003, map.xpos + map.width - 0.003, y7, y8), new=TRUE) + PlotEquiMap2(map1, lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map1), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, xlabel.dist=1.5, contours.lty="F1FF1F", continents.col="black") + par(fig=c(map.xpos, map.xpos + map.width, y5, y6), new=TRUE) + PlotEquiMap2(map2, lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map2), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F", continents.col="black") + par(fig=c(map.xpos, map.xpos + map.width, y3, y4), new=TRUE) + PlotEquiMap2(map3, lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map3), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F", continents.col="black") + par(fig=c(map.xpos, map.xpos + map.width, y1, y2), new=TRUE) + PlotEquiMap2(map4, lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map4), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F", continents.col="black") + + # Legend centroid maps: + legend1.xpos <- 0.30 + legend1.width <- 0.40 + legend1.cex <- 3 + my.subset <- match(values.to.plot, my.brks) + par(fig=c(legend1.xpos, legend1.xpos + legend1.width, 0.045, 0.085), new=TRUE) + ##ColorBar3(my.brks, cols=my.cols[2:(length(my.cols)-1)], vert=FALSE, cex=legend1.cex, subset=my.subset) + ColorBar(my.brks[2:(l(my.brks)-1)], cols=my.cols[2:(length(my.cols)-1)], vert=FALSE, label_scale=legend1.cex, bar_limits=c(my.brks[2], my.brks[l(my.brks)-1]), col_inf=my.cols[1], col_sup=my.cols[l(my.cols)], subsample=1) + + if(psl=="g500") {mtext(side=4," m", cex=2.5, las=1)} else {mtext(side=4," hPa", cex=legend1.cex, las=1)} ## las=1 is to display in horizontal instead of vert + + # Title Centroid Maps: + title1.xpos <- 0.3 + title1.width <- 0.44 + par(fig=c(title1.xpos, title1.xpos + title1.width, yt7+0.0025, yt7+0.0075), new=TRUE) + mtext(paste0(regime.title[1]," ", psl.name, " anomaly"), font=2, cex=4) + par(fig=c(title1.xpos, title1.xpos + title1.width, yt5+0.0025, yt5+0.0075), new=TRUE) + mtext(paste0(regime.title[2]," ", psl.name, " anomaly"), font=2, cex=4) + par(fig=c(title1.xpos, title1.xpos + title1.width, yt3+0.0025, yt3+0.0075), new=TRUE) + mtext(paste0(regime.title[3]," ", psl.name, " anomaly"), font=2, cex=4) + par(fig=c(title1.xpos, title1.xpos + title1.width, yt1+0.0025, yt1+0.0075), new=TRUE) + mtext(paste0(regime.title[4]," ", psl.name, " anomaly"), font=2, cex=4) + + # Impact maps: + if(impact.data == TRUE ){ + impact.xpos <- 0 + impact.width <- 0.26 + par(fig=c(impact.xpos, impact.xpos + impact.width, y7, y8), new=TRUE) + PlotEquiMap2(impRel1[,EU], lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=t(sig1[,EU] < 0.05), cex.lab=1.5, continents.col="black") + par(fig=c(impact.xpos, impact.xpos + impact.width, y5, y6), new=TRUE) + PlotEquiMap2(impRel2[,EU], lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=t(sig2[,EU] < 0.05), cex.lab=1.5, continents.col="black") + par(fig=c(impact.xpos, impact.xpos + impact.width, y3, y4), new=TRUE) + PlotEquiMap2(impRel3[,EU], lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=t(sig3[,EU] < 0.05), cex.lab=1.5, continents.col="black") + par(fig=c(impact.xpos, impact.xpos + impact.width, y1, y2), new=TRUE) + PlotEquiMap2(impRel4[,EU], lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=t(sig4[,EU] < 0.05), cex.lab=1.5, continents.col="black") + + + # Title impact maps: + title2.xpos <- 0 + title2.width <- 0.26 + par(fig=c(title2.xpos, title2.xpos + title2.width, yt7, yt8), new=TRUE) + mtext(paste0(regime.title[1], " impact on ", var.name.full[var.num]), font=2, cex=4) + par(fig=c(title2.xpos, title2.xpos + title2.width, yt5, yt6), new=TRUE) + mtext(paste0(regime.title[2], " impact on ", var.name.full[var.num]), font=2, cex=4) + par(fig=c(title2.xpos, title2.xpos + title2.width, yt3, yt4), new=TRUE) + mtext(paste0(regime.title[3], " impact on ", var.name.full[var.num]), font=2, cex=4) + par(fig=c(title2.xpos, title2.xpos + title2.width, yt1, yt2), new=TRUE) + mtext(paste0(regime.title[4], " impact on ", var.name.full[var.num]), font=2, cex=4) + + # Legend: + legend2.xpos <- 0.01 + legend2.width <- 0.25 + legend2.cex <- 3 + + my.subset2 <- match(values.to.plot2, my.brks.var) + par(fig=c(legend2.xpos, legend2.xpos + legend2.width, 0.045, 0.085), new=TRUE) + + ColorBar(brks=round(100*my.brks.var[2:(l(my.brks.var)-1)],0), cols=my.cols.var[2:(length(my.cols.var)-1)], vert=FALSE, label_scale=3, bar_limits=c(100*my.brks.var[2],100*my.brks.var[l(my.brks.var)-1]), col_inf=my.cols.var[1], col_sup=my.cols.var[l(my.cols.var)], subsample=2) + ##mtext(side=4,paste0(" ",var.unit[var.num]), cex=legend2.cex, las=1) + mtext(side=4,"%", cex=legend2.cex, las=1) + + } + + # Frequency plots: + barplot.xpos <- 0.75 + barplot.width <- 0.25 + + par(fig=c(barplot.xpos, barplot.xpos + barplot.width, y7, y8-0.01), new=TRUE) + if(fields.name == rean.name) barplot.freq(100*fre1.NA, year.start, year.end, cex.y=2, cex.x=2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0)) + if(fields.name == forecast.name) barplot.freq.sim2(100*fre1.NA, 100*freMax1.NA, 100*freMin1.NA, 100*freObs1, year.start, year.end, cex.y=2, cex.x=2, freq.min=-2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0), col.line="gray20", cex.r=3, cex.mean=2, cex.obs=2) + + par(fig=c(barplot.xpos, barplot.xpos + barplot.width, y5, y6-0.01), new=TRUE) + if(fields.name == rean.name) barplot.freq(100*fre2.NA, year.start, year.end, cex.y=2, cex.x=2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0)) + if(fields.name == forecast.name) barplot.freq.sim2(100*fre2.NA, 100*freMax2.NA, 100*freMin2.NA, 100*freObs2, year.start, year.end, cex.y=2, cex.x=2, freq.min=-2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0), col.line="gray20", cex.r=3, cex.mean=2, cex.obs=2) + + par(fig=c(barplot.xpos, barplot.xpos + barplot.width, y3, y4-0.01), new=TRUE) + if(fields.name == rean.name) barplot.freq(100*fre3.NA, year.start, year.end, cex.y=2, cex.x=2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0)) + if(fields.name == forecast.name) barplot.freq.sim2(100*fre3.NA, 100*freMax3.NA, 100*freMin3.NA, 100*freObs3, year.start, year.end, cex.y=2, cex.x=2, freq.min=-2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0), col.line="gray20", cex.r=3, cex.mean=2, cex.obs=2) + + par(fig=c(barplot.xpos, barplot.xpos + barplot.width, y1, y2-0.01), new=TRUE) + if(fields.name == rean.name) barplot.freq(100*fre4.NA, year.start, year.end, cex.y=2, cex.x=2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0)) + if(fields.name == forecast.name) barplot.freq.sim2(100*fre4.NA, 100*freMax4.NA, 100*freMin4.NA, 100*freObs4, year.start, year.end, cex.y=2, cex.x=2, freq.min=-2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0), col.line="gray20", cex.r=3, cex.mean=2, cex.obs=2) + + # Title Frequency maps: + title3.xpos <- 0.75 + title3.width <-0.25 + par(fig=c(title3.xpos, title3.xpos + title3.width, yt7, yt8), new=TRUE) + mtext(paste0(regime.title[1], " Frequency"), font=2, cex=4) # paste0 doesn't work inside an expression! + par(fig=c(title3.xpos, title3.xpos + title3.width, yt5, yt6), new=TRUE) + mtext(paste0(regime.title[2], " Frequency"), font=2, cex=4) + par(fig=c(title3.xpos, title3.xpos + title3.width, yt3, yt4), new=TRUE) + mtext(paste0(regime.title[3], " Frequency"), font=2, cex=4) + par(fig=c(title3.xpos, title3.xpos + title3.width, yt1, yt2), new=TRUE) + mtext(paste0(regime.title[4], " Frequency"), font=2, cex=4) + + ## % on Frequency plots: + symbol.xpos <- 0.735 + symbol.width <- 0.01 + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, y4+0.425, y4+0.425+0.01), new=TRUE) + mtext("%", cex=3.3) + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, y3+0.39, y3+0.39+0.01), new=TRUE) + mtext("%", cex=3.3) + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, y2+0.21, y2+0.21+0.01), new=TRUE) + mtext("%", cex=3.3) + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, y1+0.17, y1+0.17+0.01), new=TRUE) + mtext("%", cex=3.3) + + ## mean frequency on Frequency plots: + if(mean.freq == TRUE){ + symbol.xpos <- 0.83 + symbol.width <- 0.1 + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, y7+0.163, y7+0.163+0.01), new=TRUE) + mtext(bquote(bar(nu) == .(paste0(round(mean(100*fre1.NA),1),"%"))),cex=3.5) + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, y5+0.163, y5+0.163+0.01), new=TRUE) + mtext(bquote(bar(nu) == .(paste0(round(mean(100*fre2.NA),1),"%"))),cex=3.5) + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, y3+0.165, y3+0.165+0.01), new=TRUE) + mtext(bquote(bar(nu) == .(paste0(round(mean(100*fre3.NA),1),"%"))),cex=3.5) + par(fig=c(symbol.xpos, symbol.xpos + symbol.width, y1+0.163, y1+0.163+0.01), new=TRUE) + mtext(bquote(bar(nu) == .(paste0(round(mean(100*fre4.NA),1),"%"))),cex=3.5) + } + + + ## Subtitle frequency maps: + map.title.xpos <- 0.96 + map.title.width <- 0.04 + par(fig=c(map.title.xpos, map.title.xpos + map.title.width, y7-0.012, y7-0.012+0.001), new=TRUE) + mtext("year", cex=3) + par(fig=c(map.title.xpos, map.title.xpos + map.title.width, y5-0.012, y5-0.012+0.001), new=TRUE) + mtext("year", cex=3) + par(fig=c(map.title.xpos, map.title.xpos + map.title.width, y3-0.012, y3-0.012+0.001), new=TRUE) + mtext("year", cex=3) + par(fig=c(map.title.xpos, map.title.xpos + map.title.width, y1-0.012, y1-0.012+0.001), new=TRUE) + mtext("year", cex=3) + + if(!as.pdf) dev.off() # for saving 4 png + + # same image formatted for the catalogue: + fileoutput2 <- paste0(rean.dir,"/",fields.name,"_",my.period[p],"_",var.name[var.num],"_edpr_fig2cat.png") + + system(paste0("~/scripts/fig2catalog.sh -s 0.8 -r 150 -m 150 -t '",rean.name," / 10m wind speed and sea level pressure / Monthly anomalies and frequencies \n",month.name[p]," / ",year.start,"-",year.end,"' -c 'Region: North Atlantic (27°N-81°N, 85.5°W-45°E)\nReference dataset: ",rean.name," reanalysis' ", fileoutput," ", fileoutput2)) + + system(paste0("rm ", fileoutput)) + + + } # close if on: composition == 'edpr' + + + # Visualize the composition with the regime anomalies for a selected forecasted month: + if(composition == "psl"){ + # Centroid maps: + n.map <- n.map + 1 # n.maps starts from 0 + map.width <- 0.115 + map.xpos <- 0.06 + map.width * (n.map - 1) + map.ypos <- 0.08 + map.height <- 0.19 + map.ysep <- 0.015 + + par(fig=c(map.xpos, map.xpos + map.width, map.ypos + 3*map.height + 3*map.ysep, map.ypos + 4*map.height + 3*map.ysep), new=TRUE) + PlotEquiMap2(rescale(map1,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map1), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, xlabel.dist=1.5, contours.lty="F1FF1F") + par(fig=c(map.xpos, map.xpos + map.width, map.ypos + 2*map.height + 2*map.ysep, map.ypos + 3*map.height + 2*map.ysep), new=TRUE) + PlotEquiMap2(rescale(map2,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map2), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F") + par(fig=c(map.xpos, map.xpos + map.width, map.ypos + map.height + map.ysep, map.ypos + 2*map.height + map.ysep), new=TRUE) + PlotEquiMap2(rescale(map3,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map3), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F") + par(fig=c(map.xpos, map.xpos + map.width, map.ypos, map.ypos + map.height), new=TRUE) + PlotEquiMap2(rescale(map4,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map4), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F") + + # Sheet subtitles: + par(fig=c(map.xpos, map.xpos + map.width, 0.86, 0.90), new=TRUE) + if(n.map < 8) { + mtext(paste0(my.month.short[p], " --> ", my.month.short[forecast.month]), cex=5.5, font=2) + } else{ + mtext(paste0(rean.name," ", my.month.short[forecast.month]), cex=5.5, font=2) + } + + } # close if on composition == 'psl' + + + if(composition == "psl.rean" || composition == "psl.rean.unordered"){ + ## matrix correlation with DJF regimes: + #cluster1.monthly <- pslwr1mean; cluster2.monthly <- pslwr2mean; cluster3.monthly <- pslwr3mean; cluster4.monthly <- pslwr4mean + #cluster1.name.monthly <- cluster1.name; cluster2.name.monthly <- cluster2.name; cluster3.name.monthly <- cluster3.name; cluster4.name.monthly <- cluster4.name + assign(paste0("clusterMax", which(orden == cluster1.name), ".monthly"), pslwr1mean) + assign(paste0("clusterMax", which(orden == cluster2.name), ".monthly"), pslwr2mean) + assign(paste0("clusterMax", which(orden == cluster3.name), ".monthly"), pslwr3mean) + assign(paste0("clusterMax", which(orden == cluster4.name), ".monthly"), pslwr4mean) + + ## cluster.name.monthly <- c(cluster1.name.monthly, cluster2.name.monthly, cluster3.name.monthly, cluster4.name.monthly) + ## max1 <- which(cluster.name.monthly == orden[1]) # get which is the monthly cluster with the highest explained variance (by default it is associated to NAO+) + ## max2 <- which(cluster.name.monthly == orden[2]) # get the monthly cluster with the second highest variance + ## max3 <- which(cluster.name.monthly == orden[3]) # ... + ## max4 <- which(cluster.name.monthly == orden[4]) # ... + + ## max.seq <- c(max1, max2, max3, max4) + + ## Load DJF data: + rean.dir.DJF <- "/scratch/Earth/ncortesi/RESILIENCE/Regimes/53_JRA55_seasonal_1981-2016_LOESS_filter_lat_corr" + + load(file=paste0(rean.dir.DJF,"/",rean.name,"_",my.period[13],"_","psl",".RData")) # Load mean slp DJF data from the same reanalysis + load(paste0(rean.dir.DJF,"/",rean.name,"_", my.period[13],"_","ClusterNames",".RData")) # load also reanalysis DJF regime names + + if(var.num != var.num.orig) var.num <- var.num.orig # ripristinate original values of this script (necessary after loading regime data) + if(fields.name != fields.name.orig) fields.name <- fields.name.orig # ripristinate original values of this script + if(work.dir != work.dir.orig) work.dir <- work.dir.orig # ripristinate original values of this script + if(p.orig != p) p <- p.orig + + cluster1 <- which(orden == cluster1.name) # clusters for DJF!!! + cluster2 <- which(orden == cluster2.name) + cluster3 <- which(orden == cluster3.name) + cluster4 <- which(orden == cluster4.name) + + assign(paste0("psl.ordered",cluster1), pslwr1mean) # psl for DJF + assign(paste0("psl.ordered",cluster2), pslwr2mean) + assign(paste0("psl.ordered",cluster3), pslwr3mean) + assign(paste0("psl.ordered",cluster4), pslwr4mean) + + cluster.corr <- array(NA, c(4,4), dimnames=list(c("cl1","cl2","cl3","cl4"), orden)) + cluster.corr[1,1] <- cor(as.vector(clusterMax1.monthly), as.vector(psl.ordered1)) + cluster.corr[1,2] <- cor(as.vector(clusterMax1.monthly), as.vector(psl.ordered2)) + cluster.corr[1,3] <- cor(as.vector(clusterMax1.monthly), as.vector(psl.ordered3)) + cluster.corr[1,4] <- cor(as.vector(clusterMax1.monthly), as.vector(psl.ordered4)) + cluster.corr[2,1] <- cor(as.vector(clusterMax2.monthly), as.vector(psl.ordered1)) + cluster.corr[2,2] <- cor(as.vector(clusterMax2.monthly), as.vector(psl.ordered2)) + cluster.corr[2,3] <- cor(as.vector(clusterMax2.monthly), as.vector(psl.ordered3)) + cluster.corr[2,4] <- cor(as.vector(clusterMax2.monthly), as.vector(psl.ordered4)) + cluster.corr[3,1] <- cor(as.vector(clusterMax3.monthly), as.vector(psl.ordered1)) + cluster.corr[3,2] <- cor(as.vector(clusterMax3.monthly), as.vector(psl.ordered2)) + cluster.corr[3,3] <- cor(as.vector(clusterMax3.monthly), as.vector(psl.ordered3)) + cluster.corr[3,4] <- cor(as.vector(clusterMax3.monthly), as.vector(psl.ordered4)) + cluster.corr[4,1] <- cor(as.vector(clusterMax4.monthly), as.vector(psl.ordered1)) + cluster.corr[4,2] <- cor(as.vector(clusterMax4.monthly), as.vector(psl.ordered2)) + cluster.corr[4,3] <- cor(as.vector(clusterMax4.monthly), as.vector(psl.ordered3)) + cluster.corr[4,4] <- cor(as.vector(clusterMax4.monthly), as.vector(psl.ordered4)) + + # Centroid maps: + n.map <- n.map + 1 # n.maps starts from 0 + map.width <- 0.08 + map.xpos <- map.width * (n.map - 1) + map.ypos <- 0.08 + map.height <- 0.19 + map.ysep <- 0.015 + print(paste0("map.xpos= ", map.xpos)) + + par(fig <- c(0, 1, 0, 1), new=TRUE) + # reset par to its default values, because drawing with PlotEquiMap() alters some par values: + if(n.map == 1) { op <- par(no.readonly = TRUE) } else { par(op) } + + text.cex <- 2 + text.ypos <- 1.03 + text.xmod <- 0.007 * (n.map - 1) + text.xpos <- map.xpos + text.xmod - 0.02 + text.width <- 0.015 + text(x=text.xpos - text.width, y=text.ypos - 0.02, labels="cl1", cex=text.cex) + text(x=text.xpos - text.width, y=text.ypos - 0.04, labels="cl2", cex=text.cex) + text(x=text.xpos - text.width, y=text.ypos - 0.06, labels="cl3", cex=text.cex) + text(x=text.xpos - text.width, y=text.ypos - 0.08, labels="cl4", cex=text.cex) + text(x=text.xpos + 0*text.width, y=text.ypos + 0.00, labels="NAO+", cex=text.cex) + text(x=text.xpos + 1*text.width, y=text.ypos + 0.00, labels="NAO-", cex=text.cex) + text(x=text.xpos + 2*text.width, y=text.ypos + 0.00, labels="BLO", cex=text.cex) + text(x=text.xpos + 3*text.width, y=text.ypos + 0.00, labels="ATL", cex=text.cex) + + text(x=text.xpos + 0*text.width, y=text.ypos - 0.02, labels=round(cluster.corr,2)[1,1], cex=text.cex) + text(x=text.xpos + 0*text.width, y=text.ypos - 0.04, labels=round(cluster.corr,2)[2,1], cex=text.cex) + text(x=text.xpos + 0*text.width, y=text.ypos - 0.06, labels=round(cluster.corr,2)[3,1], cex=text.cex) + text(x=text.xpos + 0*text.width, y=text.ypos - 0.08, labels=round(cluster.corr,2)[4,1], cex=text.cex) + text(x=text.xpos + 1*text.width, y=text.ypos - 0.02, labels=round(cluster.corr,2)[1,2], cex=text.cex) + text(x=text.xpos + 1*text.width, y=text.ypos - 0.04, labels=round(cluster.corr,2)[2,2], cex=text.cex) + text(x=text.xpos + 1*text.width, y=text.ypos - 0.06, labels=round(cluster.corr,2)[3,2], cex=text.cex) + text(x=text.xpos + 1*text.width, y=text.ypos - 0.08, labels=round(cluster.corr,2)[4,2], cex=text.cex) + text(x=text.xpos + 2*text.width, y=text.ypos - 0.02, labels=round(cluster.corr,2)[1,3], cex=text.cex) + text(x=text.xpos + 2*text.width, y=text.ypos - 0.04, labels=round(cluster.corr,2)[2,3], cex=text.cex) + text(x=text.xpos + 2*text.width, y=text.ypos - 0.06, labels=round(cluster.corr,2)[3,3], cex=text.cex) + text(x=text.xpos + 2*text.width, y=text.ypos - 0.08, labels=round(cluster.corr,2)[4,3], cex=text.cex) + text(x=text.xpos + 3*text.width, y=text.ypos - 0.02, labels=round(cluster.corr,2)[1,4], cex=text.cex) + text(x=text.xpos + 3*text.width, y=text.ypos - 0.04, labels=round(cluster.corr,2)[2,4], cex=text.cex) + text(x=text.xpos + 3*text.width, y=text.ypos - 0.06, labels=round(cluster.corr,2)[3,4], cex=text.cex) + text(x=text.xpos + 3*text.width, y=text.ypos - 0.08, labels=round(cluster.corr,2)[4,4], cex=text.cex) + + ## Centroid maps: + ## (note that mapX == clusterMaxX.monthly, X = 1, ..., 4 by default) + + par(fig=c(map.xpos, map.xpos + map.width, map.ypos + 3*map.height + 3*map.ysep, map.ypos + 4*map.height + 3*map.ysep), new=TRUE) + PlotEquiMap2(rescale(map1,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map1), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, xlabel.dist=1.5, contours.lty="F1FF1F") + par(fig=c(map.xpos, map.xpos + map.width, map.ypos + 2*map.height + 2*map.ysep, map.ypos + 3*map.height + 2*map.ysep), new=TRUE) + PlotEquiMap2(rescale(map2,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map2), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F") + par(fig=c(map.xpos, map.xpos + map.width, map.ypos + map.height + map.ysep, map.ypos + 2*map.height + map.ysep), new=TRUE) + PlotEquiMap2(rescale(map3,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map3), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F") + par(fig=c(map.xpos, map.xpos + map.width, map.ypos, map.ypos + map.height), new=TRUE) + PlotEquiMap2(rescale(map4,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map4), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F") + + } # close if on psl.rean or on psl.rean.unordered + + + # Visualize the composition if the impact maps for a selected forecasted month: + if(composition == "impact"){ + # Centroid maps: + n.map <- n.map + 1 # n.maps starts from 0 + map.width <- 0.115 + map.xpos <- 0.06 + map.width * (n.map - 1) + map.ypos <- 0.08 + map.height <- 0.19 + map.ysep <- 0.015 + + par(fig=c(map.xpos, map.xpos + map.width, map.ypos + 3*map.height + 3*map.ysep, map.ypos + 4*map.height + 3*map.ysep), new=TRUE) + #PlotEquiMap2(rescale(imp1[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=t(sig1[,EU] < 0.05), cex.lab=1.5) + PlotEquiMap2(rescale(imp1[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5) + + par(fig=c(map.xpos, map.xpos + map.width, map.ypos + 2*map.height + 2*map.ysep, map.ypos + 3*map.height + 2*map.ysep), new=TRUE) + #PlotEquiMap2(rescale(imp2[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=t(sig2[,EU] < 0.05), cex.lab=1.5) + PlotEquiMap2(rescale(imp2[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5) + + par(fig=c(map.xpos, map.xpos + map.width, map.ypos + map.height + map.ysep, map.ypos + 2*map.height + map.ysep), new=TRUE) + #PlotEquiMap2(rescale(imp3[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=t(sig3[,EU] < 0.05), cex.lab=1.5) + PlotEquiMap2(rescale(imp3[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5) + + par(fig=c(map.xpos, map.xpos + map.width, map.ypos, map.ypos + map.height), new=TRUE) + #PlotEquiMap2(rescale(imp4[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=t(sig4[,EU] < 0.05), cex.lab=1.5) + PlotEquiMap2(rescale(imp4[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5) + + + # Sheet subtitles: + par(fig=c(map.xpos, map.xpos + map.width, 0.86, 0.90), new=TRUE) + if(n.map < 8) { + mtext(paste0(my.month.short[p], " --> ", my.month.short[forecast.month]), cex=5.5, font=2) + } else{ + mtext(paste0(rean.name," ", my.month.short[forecast.month]), cex=5.5, font=2) + } + + } # close if on composition == 'impact' + + # Visualize the 2x2 composition of the four impact maps for a selected reanalysis or forecasted month: + if(composition == "single.impact"){ + + png(filename=paste0(rean.dir,"/",fields.name,"_",my.period[p],"_",var.name[var.num],"_impact_composition.png"),width=2000,height=2000) + + plot.new() + + par(fig=c(0, 0.5, 0.95, 0.988), new=TRUE) + mtext("NAO+",cex=5) + par(fig=c(0, 0.5, 0.52, 0.95), new=TRUE) + PlotEquiMap(rescale(imp1[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=sig1[,EU] < 0.05, dot_size=2, axes_label_scale=2.5) + + par(fig=c(0.5, 1, 0.93, 0.96), new=TRUE) + mtext("NAO-",cex=5) + par(fig=c(0.5, 1, 0.52, 0.95), new=TRUE) + PlotEquiMap(rescale(imp2[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=sig2[,EU] < 0.05, dot_size=2, axes_label_scale=2.5) + + par(fig=c(0, 0.5, 0.44, 0.47), new=TRUE) + mtext("Blocking",cex=5) + par(fig=c(0, 0.5, 0.08, 0.46), new=TRUE) + PlotEquiMap(rescale(imp3[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=sig3[,EU] < 0.05, dot_size=2, axes_label_scale=2.5) + + par(fig=c(0.5, 1, 0.44, 0.47), new=TRUE) + mtext("Atlantic Ridge",cex=5) + par(fig=c(0.5, 1, 0.08, 0.46), new=TRUE) + PlotEquiMap(rescale(imp4[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=sig4[,EU] < 0.05, dot_size=2, axes_label_scale=2.5) + + par(fig=c(0.03, 0.96, 0.02, 0.08), new=TRUE) + ColorBar2(my.brks.var, cols=my.cols.var, vert=FALSE, cex=3, my.ticks=-0.5 + 1:length(my.brks.var), my.labels=my.brks.var, label.dist=3) + + par(fig=c(0.965, 0.99, 0, 0.026), new=TRUE) + mtext("%",cex=3) + + dev.off() + + # format it manually from the bash shell (you must run it from your workstation until the identity command of ImageMagick will be loaded un the cluster): + #sh ~/scripts/fig2catalog.sh -x 20 -t 'NCEP / 10-m wind speed / Regime impact \nOctober / 1981-2016' -c 'Region: North Atlantic (27°N-81°N, 85.5°W-45°E)\nReference dataset: NCEP/NCAR reanalysis' NCEP_October_sfcWind_impact_composition.png NCEP_October_sfcWind_impact_composition_catalogue.png + + + ### to plot the impact map of all regimes on a particular month: + #imp.oct <- (imp1*3.2 + imp2*38.7 + imp3*51.6 + imp4*6.4)/100 + year.test <- 2016 + pos.year.test <- year.test - year.start +1 + imp.test <- imp1*fre1.NA[pos.year.test] + imp2*fre2.NA[pos.year.test] + imp3*fre3.NA[pos.year.test] + imp4*fre4.NA[pos.year.test] + #imp.test <- imp1*fre1.NA[pos.year.test] + imp3*(fre3.NA[pos.year.test]+0.032) + imp4*(fre4.NA[pos.year.test]+0.032) + par(fig=c(0, 1, 0.05, 1), new=TRUE) + PlotEquiMap2(rescale(imp.test[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5) + + # vector with the frequency of the WRs in the chosen month and year: + wt.test.freq <- c(fre1.NA[pos.year.test],fre2.NA[pos.year.test],fre3.NA[pos.year.test],fre4.NA[pos.year.test]) + + ## # or save them as individual maps: + ## png(filename=paste0(rean.dir,"/",fields.name,"_",my.period[p],"_",var.name[var.num],"_impact_NAO+.png"),width=3000,height=3700) + ## PlotEquiMap2(rescale(imp1[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=t(sig1[,EU] < 0.05), cex.lab=1.5) + ## dev.off() + + ## png(filename=paste0(rean.dir,"/",fields.name,"_",my.period[p],"_",var.name[var.num],"_impact_NAO-.png"),width=3000,height=3700) + ## PlotEquiMap2(rescale(imp2[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=t(sig2[,EU] < 0.05), cex.lab=1.5) + ## dev.off() + + ## png(filename=paste0(rean.dir,"/",fields.name,"_",my.period[p],"_",var.name[var.num],"_impact_Blocking.png"),width=3000,height=3700) + ## PlotEquiMap2(rescale(imp3[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=t(sig3[,EU] < 0.05), cex.lab=1.5) + ## dev.off() + + ## png(filename=paste0(rean.dir,"/",fields.name,"_",my.period[p],"_",var.name[var.num],"_impact_Atl_Ridge.png"),width=3000,height=3700) + ## PlotEquiMap2(rescale(imp4[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents = FALSE, brks=my.brks.var, cols=my.cols.var, intxlon=10, intylat=10, drawleg=F, dots=t(sig4[,EU] < 0.05), cex.lab=1.5) + ## dev.off() + } + + # Visualize the composition if the impact maps for a selected forecasted month: + if(composition == "single.psl"){ + png(filename=paste0(rean.dir,"/",fields.name,"_",my.period[p],"_",var.name[var.num],"_pattern_cluster1.png"),width=2000,height=1400, res=300) + PlotEquiMap2(rescale(map1,my.brks[1],tail(my.brks,1)), lon, lat,filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1, contours=t(map1), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=0.5, xlabel.dist=0, contours.lty="F1FF1F", toptitle=paste0(my.period[p]," WithinSS=", round(withinss1/1000000,2))) + dev.off() + + png(filename=paste0(rean.dir,"/",fields.name,"_",my.period[p],"_",var.name[var.num],"_pattern_cluster2.png"),width=2000,height=1400, res=300) + PlotEquiMap2(rescale(map2,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1, contours=t(map2), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=0.5, xlabel.dist=0, contours.lty="F1FF1F", toptitle=paste0(my.period[p]," WithinSS=", round(withinss2/1000000,2))) + dev.off() + + png(filename=paste0(rean.dir,"/",fields.name,"_",my.period[p],"_",var.name[var.num],"_pattern_cluster3.png"),width=2000,height=1400, res=300) + PlotEquiMap2(rescale(map3,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1, contours=t(map3), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=0.5, xlabel.dist=0, contours.lty="F1FF1F", toptitle=paste0(my.period[p]," WithinSS=", round(withinss3/1000000,2))) + dev.off() + + png(filename=paste0(rean.dir,"/",fields.name,"_",my.period[p],"_",var.name[var.num],"_pattern_cluster4.png"),width=2000,height=1400, res=300) + PlotEquiMap2(rescale(map4,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1, contours=t(map4), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=0.5, xlabel.dist=0, contours.lty="F1FF1F", toptitle=paste0(my.period[p]," WithinSS=", round(withinss4/1000000,2))) + dev.off() + + # Legend centroid maps: + #my.subset <- match(values.to.plot, my.brks) + #ColorBar3(my.brks, cols=my.cols, vert=FALSE, cex=legend1.cex, subset=my.subset) + #mtext(side=4," hPa", cex=legend1.cex) + + } + + # Visualize the composition with the interannual frequencies for a selected forecasted month: + if(composition == "fre"){ + # Centroid maps: + n.map <- n.map + 1 # n.maps starts from 0 + map.width <- 0.115 + map.xpos <- 0.06 + map.width * (n.map - 1) + map.ypos <- 0.08 + map.height <- 0.19 + map.ysep <- 0.015 + + par(fig=c(map.xpos, map.xpos + map.width, map.ypos + 3*map.height + 3*map.ysep, map.ypos + 4*map.height + 3*map.ysep), new=TRUE) + if(n.map < 8) barplot.freq.sim2(100*fre1.NA, 100*freMax1.NA, 100*freMin1.NA, 100*freObs1, year.start, year.end, cex.y=2, cex.x=2, freq.min=-2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0), col.line="gray20", cex.r=3, cex.mean=2, cex.obs=2) + if(n.map == 8) barplot.freq(100*fre1.NA, year.start, year.end, cex.y=2, cex.x=2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0)) + + par(fig=c(map.xpos, map.xpos + map.width, map.ypos + 2*map.height + 2*map.ysep, map.ypos + 3*map.height + 2*map.ysep), new=TRUE) + if(n.map < 8) if(fields.name == forecast.name) barplot.freq.sim2(100*fre2.NA, 100*freMax2.NA, 100*freMin2.NA, 100*freObs2, year.start, year.end, cex.y=2, cex.x=2, freq.min=-2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0), col.line="gray20", cex.r=3, cex.mean=2, cex.obs=2) + if(n.map == 8) barplot.freq(100*fre2.NA, year.start, year.end, cex.y=2, cex.x=2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0)) + + par(fig=c(map.xpos, map.xpos + map.width, map.ypos + map.height + map.ysep, map.ypos + 2*map.height + map.ysep), new=TRUE) + if(n.map < 8) if(fields.name == forecast.name) barplot.freq.sim2(100*fre3.NA, 100*freMax3.NA, 100*freMin3.NA, 100*freObs3, year.start, year.end, cex.y=2, cex.x=2, freq.min=-2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0), col.line="gray20", cex.r=3, cex.mean=2, cex.obs=2) + if(n.map == 8) barplot.freq(100*fre3.NA, year.start, year.end, cex.y=2, cex.x=2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0)) + + par(fig=c(map.xpos, map.xpos + map.width, map.ypos, map.ypos + map.height), new=TRUE) + if(n.map < 8) if(fields.name == forecast.name) barplot.freq.sim2(100*fre4.NA, 100*freMax4.NA, 100*freMin4.NA, 100*freObs4, year.start, year.end, cex.y=2, cex.x=2, freq.min=-2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0), col.line="gray20", cex.r=3, cex.mean=2, cex.obs=2) + if(n.map == 8) barplot.freq(100*fre4.NA, year.start, year.end, cex.y=2, cex.x=2, freq.max=freq.max, ylab="", mgp=c(3,1.5,0)) + + # Sheet subtitles: + par(fig=c(map.xpos, map.xpos + map.width, 0.86, 0.90), new=TRUE) + if(n.map < 8) { + mtext(paste0(my.month.short[p], " --> ", my.month.short[forecast.month]), cex=5.5, font=2) + } else{ + mtext(paste0(rean.name," ", my.month.short[forecast.month]), cex=5.5, font=2) + } + + # % on Frequency plots: + par(fig=c(map.xpos - 0.006, map.xpos, map.ypos + 3.9*map.height + 3*map.ysep, map.ypos + 4*map.height + 3*map.ysep), new=TRUE) + mtext("%", cex=3.3) + par(fig=c(map.xpos - 0.006, map.xpos, map.ypos + 2.9*map.height + 2*map.ysep, map.ypos + 3*map.height + 2*map.ysep), new=TRUE) + mtext("%", cex=3.3) + par(fig=c(map.xpos - 0.006, map.xpos, map.ypos + 1.9*map.height + 1*map.ysep, map.ypos + 2*map.height + 1*map.ysep), new=TRUE) + mtext("%", cex=3.3) + par(fig=c(map.xpos - 0.006, map.xpos, map.ypos + 0.9*map.height + 0*map.ysep, map.ypos + 1*map.height + 0*map.ysep), new=TRUE) + mtext("%", cex=3.3) + + # mean frequency on Frequency plots: + par(fig=c(map.xpos + 0.02, map.xpos + 0.04, map.ypos + 3*map.height + 3*map.ysep, map.ypos + 3.1*map.height + 3*map.ysep), new=TRUE) + mtext(bquote(bar(nu) == .(paste0(round(mean(100*fre1.NA),1),"%"))),cex=3.5) + par(fig=c(map.xpos + 0.02, map.xpos + 0.04, map.ypos + 2*map.height + 2*map.ysep, map.ypos + 2.1*map.height + 2*map.ysep), new=TRUE) + mtext(bquote(bar(nu) == .(paste0(round(mean(100*fre2.NA),1),"%"))),cex=3.5) + par(fig=c(map.xpos + 0.02, map.xpos + 0.04, map.ypos + 1*map.height + 1*map.ysep, map.ypos + 1.1*map.height + 1*map.ysep), new=TRUE) + mtext(bquote(bar(nu) == .(paste0(round(mean(100*fre3.NA),1),"%"))),cex=3.5) + par(fig=c(map.xpos + 0.02, map.xpos + 0.04, map.ypos + 0*map.height + 0*map.ysep, map.ypos + 0.1*map.height + 0*map.ysep), new=TRUE) + mtext(bquote(bar(nu) == .(paste0(round(mean(100*fre4.NA),1),"%"))),cex=3.5) + + # add corr between obs.time series and ensemble mean time series on Frequency plots: + if(n.map < 8){ + par(fig=c(map.xpos + map.width - 0.04, map.xpos + map.width - 0.02, map.ypos + 3*map.height + 3*map.ysep, map.ypos + 3.1*map.height + 3*map.ysep), new=TRUE) + mtext(paste0("r= ",sp.cor1), cex=3.5) + par(fig=c(map.xpos + map.width - 0.04, map.xpos + map.width - 0.02, map.ypos + 2*map.height + 2*map.ysep, map.ypos + 2.1*map.height + 2*map.ysep), new=TRUE) + mtext(paste0("r= ",sp.cor2), cex=3.5) + par(fig=c(map.xpos + map.width - 0.04, map.xpos + map.width - 0.02, map.ypos + 1*map.height + 1*map.ysep, map.ypos + 1.1*map.height + 1*map.ysep), new=TRUE) + mtext(paste0("r= ",sp.cor3), cex=3.5) + par(fig=c(map.xpos + map.width - 0.04, map.xpos + map.width - 0.02, map.ypos + 0*map.height + 0*map.ysep, map.ypos + 0.1*map.height + 0*map.ysep), new=TRUE) + mtext(paste0("r= ",sp.cor4), cex=3.5) + } + } # close if on composition == 'fre' + + # save indicators for each startdate and forecast time: + # (map1, map2, map3, map4, fre1, fre2, etc. already refer to the regimes in the same order as listed in the 'orden' vector) + if(fields.name == forecast.name) { + if (composition != "impact") { + save(orden, map1, map2, map3, map4, fre1.NA, fre2.NA, fre3.NA, fre4.NA, freObs1, freObs2, freObs3, freObs4, sp.cor1, sp.cor2, sp.cor3, sp.cor4, persist1, persist2, persist3, persist4, persistObs1, persistObs2, persistObs3, persistObs4, spat.cor1, spat.cor2, spat.cor3, spat.cor4, sd.freq.sim1, sd.freq.sim2, sd.freq.sim3, sd.freq.sim4, sd.freq.obs1, sd.freq.obs2, sd.freq.obs3, sd.freq.obs4, transSim1, transSim2, transSim3, transSim4, transObs1, transObs2, transObs3, transObs4, file=paste0(work.dir,"/",fields.name,"_", my.period[p],"_leadmonth_",lead.month,"_","ClusterNames",".RData")) + } else { # also save impX objects + save(orden, map1, map2, map3, map4, fre1.NA, fre2.NA, fre3.NA, fre4.NA, freObs1, freObs2, freObs3, freObs4, sp.cor1, sp.cor2, sp.cor3, sp.cor4, persist1, persist2, persist3, persist4, persistObs1, persistObs2, persistObs3, persistObs4, spat.cor1, spat.cor2, spat.cor3, spat.cor4, sd.freq.sim1, sd.freq.sim2, sd.freq.sim3, sd.freq.sim4, sd.freq.obs1, sd.freq.obs2, sd.freq.obs3, sd.freq.obs4, transSim1, transSim2, transSim3, transSim4, transObs1, transObs2, transObs3, transObs4, imp1, imp2, imp3, imp4,cor.imp1,cor.imp2,cor.imp3,cor.imp4, file=paste0(work.dir,"/",fields.name,"_", my.period[p],"_leadmonth_",lead.month,"_","ClusterNames",".RData")) + } + } + + } # close 'p' on 'period' + + if((composition == 'simple') && as.pdf) dev.off() # for saving a single pdf with all months/seasons + + } # close 'lead.month' on 'lead.months' + + if(composition == "psl" || composition == "fre" || composition == "impact"){ + # Regime's names: + title1.xpos <- 0.01 + title1.width <- 0.04 + par(fig=c(title1.xpos, title1.xpos + title1.width, 0.7905, 0.7955), new=TRUE) + mtext(paste0(regime.title[1]), font=2, cex=5) + par(fig=c(title1.xpos, title1.xpos + title1.width, 0.5855, 0.5905), new=TRUE) + mtext(paste0(regime.title[2]), font=2, cex=5) + par(fig=c(title1.xpos, title1.xpos + title1.width, 0.3805, 0.3955), new=TRUE) + mtext(paste0(regime.title[3]), font=2, cex=5) + par(fig=c(title1.xpos, title1.xpos + title1.width, 0.1755, 0.1805), new=TRUE) + mtext(paste0(regime.title[4]), font=2, cex=5) + + if(composition == "psl") { + # Color legend: + legend1.xpos <- 0.10 + legend1.width <- 0.80 + legend1.cex <- 4 + + par(fig=c(legend1.xpos, legend1.xpos + legend1.width, 0, 0.065), new=TRUE) # syntax fig=c(xmin, xmax, ymin, ymax) + ColorBar2(brks=my.brks, cols=my.cols, vert=FALSE, cex=legend1.cex, label.dist=2, my.ticks=-0.5 + 1:length(my.brks), my.labels=my.brks) + par(fig=c(legend1.xpos + legend1.width - 0.02, legend1.xpos + legend1.width + 0.05, 0, 0.02), new=TRUE) # syntax fig=c(xmin, xmax, ymin, ymax) + mtext("hPa", cex=legend1.cex*1.3) + } + + if(composition == "impact") { + # Color legend: + legend1.xpos <- 0.10 + legend1.width <- 0.80 + legend1.cex <- 4 + + #my.subset2 <- match(values.to.plot2, my.brks.var) + par(fig=c(legend1.xpos, legend1.xpos + legend1.width, 0, 0.065), new=TRUE) + ColorBar2(brks=my.brks.var, cols=my.cols.var, vert=FALSE, cex=legend1.cex, label.dist=2, my.ticks=-0.5 + 1:length(my.brks.var), my.labels=my.brks.var) + par(fig=c(legend1.xpos + legend1.width - 0.02, legend1.xpos + legend1.width + 0.05, 0, 0.02), new=TRUE) # syntax fig=c(xmin, xmax, ymin, ymax) + #ColorBar2(brks=my.brks.var, cols=my.cols.var, vert=FALSE, cex=legend2.cex, subset=my.subset2) + mtext(var.unit[var.num], cex=legend1.cex*1.3) + + } + + dev.off() + + } # close if on composition + + + if(composition == "psl.rean" || composition == "psl.rean.unordered") dev.off() + + print("Finished!") +} # close if on composition != "summary" + + + +#} # close for on forecasts.month + + + +if(composition == "taylor"){ + library("plotrix") + + fields.name="ERA-Interim" + + # choose a reference season from 13 (winter) to 16 (Autumn): + season.ref <- 13 + + # set the first WR classification: + rean.dir <- "/scratch/Earth/ncortesi/RESILIENCE/Regimes/41_ERA-Interim_monthly_1981-2015_LOESS_filter" + + # load data of the reference season of the first classification (this line should be modified to load the chosen season): + #load("/scratch/Earth/ncortesi/RESILIENCE/Regimes/18) as 12) but with no lat correction/ERA-Interim_Winter_psl.RData") # load pslwrXmean data + #load("/scratch/Earth/ncortesi/RESILIENCE/Regimes/18) as 12) but with no lat correction/ERA-Interim_Winter_ClusterNames.RData") # load clusterX.name.period + load("/scratch/Earth/ncortesi/RESILIENCE/Regimes/46_as_18_but_with_no_lat_correction_and_with_LOESS/ERA-Interim_Winter_psl.RData") # load pslwrXmean data + load("/scratch/Earth/ncortesi/RESILIENCE/Regimes/46_as_18_but_with_no_lat_correction_and_with_LOESS/ERA-Interim_Winter_ClusterNames.RData") # load clusterX.name.period + + if(var.num != var.num.orig) var.num <- var.num.orig # ripristinate original values of this script (necessary after loading regime data) + if(fields.name != fields.name.orig) fields.name <- fields.name.orig # ripristinate original values of this script + + cluster1.ref <- which(orden == cluster1.name) + cluster2.ref <- which(orden == cluster2.name) + cluster3.ref <- which(orden == cluster3.name) + cluster4.ref <- which(orden == cluster4.name) + + #cluster1.ref <- cluster1.name.period[13] + #cluster2.ref <- cluster2.name.period[13] + #cluster3.ref <- cluster3.name.period[13] + #cluster4.ref <- cluster4.name.period[13] + + cluster1.ref <- 3 # bypass the previous values because for dataset num.46 the blocking and atlantic regimes were saved swapped! + cluster2.ref <- 2 + cluster3.ref <- 1 + cluster4.ref <- 4 + + assign(paste0("map.ref",cluster1.ref), pslwr1mean) # NAO+ + assign(paste0("map.ref",cluster2.ref), pslwr2mean) # NAO- + assign(paste0("map.ref",cluster3.ref), pslwr3mean) # Blocking + assign(paste0("map.ref",cluster4.ref), pslwr4mean) # Atl.ridge + + # set a second WR classification (i.e: with running cluster) to contrast with the new one: + #rean2.dir <- "/scratch/Earth/ncortesi/RESILIENCE/Regimes/41_ERA-Interim_monthly_1981-2015_LOESS_filter" + rean2.dir <- "/scratch/Earth/ncortesi/RESILIENCE/Regimes/44_as_43_but_with_no_lat_correction" + rean2.name <- "ERA-Interim" + + # load data of the reference season of the second classification (this line should be modified to load the chosen season): + load("/scratch/Earth/ncortesi/RESILIENCE/Regimes/46_as_18_but_with_no_lat_correction_and_with_LOESS/ERA-Interim_Winter_psl.RData") # load pslwrXmean data + load("/scratch/Earth/ncortesi/RESILIENCE/Regimes/46_as_18_but_with_no_lat_correction_and_with_LOESS/ERA-Interim_Winter_ClusterNames.RData") # load clusterX.name.period + + if(var.num != var.num.orig) var.num <- var.num.orig # ripristinate original values of this script (necessary after loading regime data) + if(fields.name != fields.name.orig) fields.name <- fields.name.orig # ripristinate original values of this script + + #cluster1.name.ref <- cluster1.name.period[season.ref] + #cluster2.name.ref <- cluster2.name.period[season.ref] + #cluster3.name.ref <- cluster3.name.period[season.ref] + #cluster4.name.ref <- cluster4.name.period[season.ref] + + cluster1.ref <- which(orden == cluster1.name) + cluster2.ref <- which(orden == cluster2.name) + cluster3.ref <- which(orden == cluster3.name) + cluster4.ref <- which(orden == cluster4.name) + + cluster1.ref <- 3 # bypass the previous values because for dataset num.46 the blocking and atlantic regimes were saved swapped! + cluster2.ref <- 2 + cluster3.ref <- 1 + cluster4.ref <- 4 + + assign(paste0("map.old.ref",cluster1.ref), pslwr1mean) # NAO+ + assign(paste0("map.old.ref",cluster2.ref), pslwr2mean) # NAO- + assign(paste0("map.old.ref",cluster3.ref), pslwr3mean) # Blocking + assign(paste0("map.old.ref",cluster4.ref), pslwr4mean) # Atl.ridge + + + # breaks and colors of the geopotential fields: + if(psl == "g500"){ + my.brks <- c(-300,seq(-200,200,10),300) # % Mean anomaly of a WR + my.brks2 <- c(-300,seq(-200,200,10),300) # % Mean anomaly of a WR for the contour lines + values.to.plot <- c(-200, -100, 0, 100, 200) # values we want to show in the labels + } + + if(psl == "psl"){ + my.brks <- c(seq(-19,-1,2),0,seq(1,19,2)) # % Mean anomaly of a WR + my.brks2 <- my.brks #c(seq(-20,20,2)) # % Mean anomaly of a WR for the contour lines + values.to.plot <- my.brks #c(-17,-15,-13,-11,-9,-7,-5,-3,-1,0,1,3,5,7,9,11,13,15,17) + } + + my.cols <- colorRampPalette(rev(brewer.pal(11,"RdBu")))(length(my.brks)-1) # blue--white--red colors + my.cols[floor(length(my.cols)/2)] <- my.cols[floor(length(my.cols)/2)+1] <- "white" + + # plot the composition with the regime anomalies of each monthly regimes: + png(filename=paste0(rean.dir,"/",rean.name,"_taylor_source",".png"),width=6000,height=2000) + + plot.new() + + # Sheet title: + par(fig=c(0, 1, 0.94, 0.98), new=TRUE) + mtext(paste0(fields.name, " observed monthly regime anomalies"), cex=5.5, font=2) + + n.map <- 0 + for(p in c(9:12,1:8)){ + p.orig <- p + + load(file=paste0(rean.dir,"/",fields.name,"_",my.period[p],"_","psl",".RData")) # load cluster names and summarized data + load(file=paste0(rean.dir,"/",fields.name,"_",my.period[p],"_","ClusterNames",".RData")) # load cluster names and summarized data + + if(var.num != var.num.orig) var.num <- var.num.orig # ripristinate original values of this script (necessary after loading regime data) + if(fields.name != fields.name.orig) fields.name <- fields.name.orig # ripristinate original values of this script + if(p != p.orig) p <- p.orig + + cluster1 <- which(orden == cluster1.name) + cluster2 <- which(orden == cluster2.name) + cluster3 <- which(orden == cluster3.name) + cluster4 <- which(orden == cluster4.name) + + assign(paste0("map",cluster1), pslwr1mean) # NAO+ + assign(paste0("map",cluster2), pslwr2mean) # NAO- + assign(paste0("map",cluster3), pslwr3mean) # Blocking + assign(paste0("map",cluster4), pslwr4mean) # Atl.ridge + + n.map <- n.map + 1 # n.maps starts from 0 + map.width <- 0.07 + map.xpos <- 0.06 + map.width * (n.map - 1) + map.ypos <- 0.08 + map.height <- 0.19 + map.ysep <- 0.015 + + par(fig=c(map.xpos, map.xpos + map.width, map.ypos + 3*map.height + 3*map.ysep, map.ypos + 4*map.height + 3*map.ysep), new=TRUE) + PlotEquiMap2(rescale(map1,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map1), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, xlabel.dist=1.5, contours.lty="F1FF1F") + par(fig=c(map.xpos, map.xpos + map.width, map.ypos + 2*map.height + 2*map.ysep, map.ypos + 3*map.height + 2*map.ysep), new=TRUE) + PlotEquiMap2(rescale(map2,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map2), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F") + par(fig=c(map.xpos, map.xpos + map.width, map.ypos + map.height + map.ysep, map.ypos + 2*map.height + map.ysep), new=TRUE) + PlotEquiMap2(rescale(map3,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map3), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F") + par(fig=c(map.xpos, map.xpos + map.width, map.ypos, map.ypos + map.height), new=TRUE) + PlotEquiMap2(rescale(map4,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map4), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F") + + # Sheet subtitles: + par(fig=c(map.xpos, map.xpos + map.width, 0.86, 0.90), new=TRUE) + mtext(my.month.short[p], cex=5.5, font=2) + + } + + # draw the last map to the right of the composition, that with the seasonal anomalies: + n.map <- n.map + 1 # n.maps starts from 0 + map.width <- 0.07 + map.xpos <- 0.06 + map.width * (n.map - 1) + map.ypos <- 0.08 + map.height <- 0.19 + map.ysep <- 0.015 + + par(fig=c(map.xpos, map.xpos + map.width, map.ypos + 3*map.height + 3*map.ysep, map.ypos + 4*map.height + 3*map.ysep), new=TRUE) + PlotEquiMap2(rescale(map.ref1,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map.ref1), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, xlabel.dist=1.5, contours.lty="F1FF1F") + par(fig=c(map.xpos, map.xpos + map.width, map.ypos + 2*map.height + 2*map.ysep, map.ypos + 3*map.height + 2*map.ysep), new=TRUE) + PlotEquiMap2(rescale(map.ref2,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map.ref2), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F") + par(fig=c(map.xpos, map.xpos + map.width, map.ypos + map.height + map.ysep, map.ypos + 2*map.height + map.ysep), new=TRUE) + PlotEquiMap2(rescale(map.ref3,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map.ref3), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F") + par(fig=c(map.xpos, map.xpos + map.width, map.ypos, map.ypos + map.height), new=TRUE) + PlotEquiMap2(rescale(map.ref4,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map.ref4), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F") + + # subtitle: + par(fig=c(map.xpos, map.xpos + map.width, 0.86, 0.90), new=TRUE) + mtext(my.period[season.ref], cex=5.5, font=2) + + dev.off() + + # Plot the Taylor diagrams: + + corr.first <- corr.second <- rmse.first <- rmse.second <- array(NA,c(4,12)) + + for(regime in 1:4){ + + png(filename=paste0(rean.dir,"/",rean.name,"_taylor_",orden[regime],".png"), width=1200, height=800) + + for(p in 1:12){ + p.orig <- p + + load(file=paste0(rean.dir,"/",rean.name,"_",my.period[p],"_","psl",".RData")) # load cluster names and summarized data + load(file=paste0(rean.dir,"/",rean.name,"_",my.period[p],"_","ClusterNames",".RData")) # load cluster names and summarized data + + if(var.num != var.num.orig) var.num <- var.num.orig # ripristinate original values of this script (necessary after loading regime data) + if(fields.name != fields.name.orig) fields.name <- fields.name.orig # ripristinate original values of this script + if(p != p.orig) p <- p.orig + + cluster1 <- which(orden == cluster1.name) + cluster2 <- which(orden == cluster2.name) + cluster3 <- which(orden == cluster3.name) + cluster4 <- which(orden == cluster4.name) + + assign(paste0("map",cluster1), pslwr1mean) # NAO+ + assign(paste0("map",cluster2), pslwr2mean) # NAO- + assign(paste0("map",cluster3), pslwr3mean) # Blocking + assign(paste0("map",cluster4), pslwr4mean) # Atl.ridge + + # load data from the second classification: + load(file=paste0(rean2.dir,"/",rean2.name,"_",my.period[p],"_","psl",".RData")) # load cluster names and summarized data + load(file=paste0(rean2.dir,"/",rean2.name,"_",my.period[p],"_","ClusterNames",".RData")) # load cluster names and summarized data + + if(var.num != var.num.orig) var.num <- var.num.orig # ripristinate original values of this script (necessary after loading regime data) + if(fields.name != fields.name.orig) fields.name <- fields.name.orig # ripristinate original values of this script + if(p != p.orig) p <- p.orig + + cluster1.old <- which(orden == cluster1.name) + cluster2.old <- which(orden == cluster2.name) + cluster3.old <- which(orden == cluster3.name) + cluster4.old <- which(orden == cluster4.name) + + assign(paste0("map.old",cluster1.old), pslwr1mean) # NAO+ + assign(paste0("map.old",cluster2.old), pslwr2mean) # NAO- + assign(paste0("map.old",cluster3.old), pslwr3mean) # Blocking + assign(paste0("map.old",cluster4.old), pslwr4mean) # Atl.ridge + + add.mod <- ifelse(p == 1, FALSE, TRUE) + main.mod <- ifelse(p == 1, orden[regime],"") + + color.first <- "red" + color.second <- "blue" # "black" + sd.arcs.mod <- c(0,0.1,0.2,0.3,0.4,0.5,0.6,0.7,0.8,0.9,1,1.1,1.2,1.3) #c(0,0.5,1,1.5) # doesn't work! + if(regime == 1) my.taylor(as.vector(map.ref1),as.vector(map1), normalize=TRUE, add=add.mod, pos.cor=FALSE, sd.arcs=sd.arcs.mod, ref.sd=F, cex.axis=1.7, text.cex=1, my.text=my.month.short[p], col = color.first, main=main.mod) + if(regime == 2) my.taylor(as.vector(map.ref2),as.vector(map2), normalize=TRUE, add=add.mod, pos.cor=FALSE, sd.arcs=sd.arcs.mod, ref.sd=F, cex.axis=1.7, text.cex=1, my.text=my.month.short[p], col = color.first, main=main.mod) + if(regime == 3) my.taylor(as.vector(map.ref3),as.vector(map3), normalize=TRUE, add=add.mod, pos.cor=FALSE, sd.arcs=sd.arcs.mod, ref.sd=F, cex.axis=1.7, text.cex=1, my.text=my.month.short[p], col = color.first, main=main.mod) + if(regime == 4) my.taylor(as.vector(map.ref4),as.vector(map4), normalize=TRUE, add=add.mod, pos.cor=FALSE, sd.arcs=sd.arcs.mod, ref.sd=F, cex.axis=1.7, text.cex=1, my.text=my.month.short[p], col = color.first, main=main.mod) + + # add the points from the second classification: + add.mod=TRUE + #add.mod <- ifelse(p == 1, FALSE, TRUE) + + if(regime == 1) my.taylor(as.vector(map.old.ref1),as.vector(map.old1), normalize=TRUE, add=add.mod, pos.cor=FALSE, sd.arcs=sd.arcs.mod, ref.sd=F, cex.axis=1.7, text.cex=1, my.text=my.month.short[p], col = color.second) + if(regime == 2) my.taylor(as.vector(map.old.ref2),as.vector(map.old2), normalize=TRUE, add=add.mod, pos.cor=FALSE, sd.arcs=sd.arcs.mod, ref.sd=F, cex.axis=1.7, text.cex=1, my.text=my.month.short[p], col = color.second) + if(regime == 3) my.taylor(as.vector(map.old.ref3),as.vector(map.old3), normalize=TRUE, add=add.mod, pos.cor=FALSE, sd.arcs=sd.arcs.mod, ref.sd=F, cex.axis=1.7, text.cex=1, my.text=my.month.short[p], col = color.second) + if(regime == 4) my.taylor(as.vector(map.old.ref4),as.vector(map.old4), normalize=TRUE, add=add.mod, pos.cor=FALSE, sd.arcs=sd.arcs.mod, ref.sd=F, cex.axis=1.7, text.cex=1, my.text=my.month.short[p], col = color.second) + + if(regime == 1) { corr.first[1,p] <- cor(as.vector(map.ref1),as.vector(map1)); rmse.first[1,p] <- RMSE(as.vector(map.ref1),as.vector(map1)) } + if(regime == 2) { corr.first[2,p] <- cor(as.vector(map.ref2),as.vector(map2)); rmse.first[2,p] <- RMSE(as.vector(map.ref2),as.vector(map2)) } + if(regime == 3) { corr.first[3,p] <- cor(as.vector(map.ref3),as.vector(map3)); rmse.first[3,p] <- RMSE(as.vector(map.ref3),as.vector(map3)) } + if(regime == 4) { corr.first[4,p] <- cor(as.vector(map.ref4),as.vector(map4)); rmse.first[4,p] <- RMSE(as.vector(map.ref4),as.vector(map4)) } + + if(regime == 1) { corr.second[1,p] <- cor(as.vector(map.old.ref1),as.vector(map.old1)); rmse.second[1,p] <- RMSE(as.vector(map.old.ref1),as.vector(map.old1)) } + if(regime == 2) { corr.second[2,p] <- cor(as.vector(map.old.ref2),as.vector(map.old2)); rmse.second[2,p] <- RMSE(as.vector(map.old.ref2),as.vector(map.old2)) } + if(regime == 3) { corr.second[3,p] <- cor(as.vector(map.old.ref3),as.vector(map.old3)); rmse.second[3,p] <- RMSE(as.vector(map.old.ref3),as.vector(map.old3)) } + if(regime == 4) { corr.second[4,p] <- cor(as.vector(map.old.ref4),as.vector(map.old4)); rmse.second[4,p] <- RMSE(as.vector(map.old.ref4),as.vector(map.old4)) } + + } # close for on 'p' + + dev.off() + + } # close for on 'regime' + + corr.first.mean <- apply(corr.first,1,mean) + corr.second.mean <- apply(corr.second,1,mean) + corr.diff <- corr.second.mean - corr.first.mean + + rmse.first.mean <- apply(rmse.first,1,mean) + rmse.second.mean <- apply(rmse.second,1,mean) + rmse.diff <- rmse.second.mean - rmse.first.mean + +} # close if on composition == "taylor" + + + + + + +# Summary graphs: +if(composition == "summary" && fields.name == forecast.name){ + # array storing correlations in the format: [startdate, leadmonth, regime] + array.diff.sd.freq <- array.sd.freq <- array.cor <- array.sp.cor <- array.freq <- array.diff.freq <- array.rpss <- array.pers <- array.diff.pers <- array(NA,c(12,7,4)) + array.spat.cor <- array.imp <- array.trans.sim1 <- array.trans.sim2 <- array.trans.sim3 <- array.trans.sim4 <- array(NA,c(12,7,4)) + array.trans.diff1 <- array.trans.diff2 <- array.trans.diff3 <- array.trans.diff4 <- array.freq.obs <- array.pers.obs <- array(NA,c(12,7,4)) + array.trans.obs1 <- array.trans.obs2 <- array.trans.obs3 <- array.trans.obs4 <- array(NA,c(12,7,4)) + + for(p in 1:12){ + p.orig <- p + + for(l in 0:6){ + + load(file=paste0(work.dir,"/",fields.name,"_",my.period[p],"_leadmonth_",l,"_","ClusterNames",".RData")) # load cluster names and summarized data + + if(var.num != var.num.orig) var.num <- var.num.orig # ripristinate original values of this script (necessary after loading regime data) + if(fields.name != fields.name.orig) fields.name <- fields.name.orig # ripristinate original values of this script + if(p != p.orig) p <- p.orig + + array.spat.cor[p,1+l, 1] <- spat.cor1 # associates NAO+ to the first triangle (at left) + array.spat.cor[p,1+l, 3] <- spat.cor2 # associates NAO- to the third triangle (at right) + array.spat.cor[p,1+l, 4] <- spat.cor3 # associates Blocking to the fourth triangle (top one) + array.spat.cor[p,1+l, 2] <- spat.cor4 # associates Atl.Ridge to the second triangle (bottom one) + + array.cor[p,1+l, 1] <- sp.cor1 # associates NAO+ to the first triangle (at left) + array.cor[p,1+l, 3] <- sp.cor2 # associates NAO- to the third triangle (at right) + array.cor[p,1+l, 4] <- sp.cor3 # associates Blocking to the fourth triangle (top one) + array.cor[p,1+l, 2] <- sp.cor4 # associates Atl.Ridge to the second triangle (bottom one) + + array.freq[p,1+l, 1] <- 100*(mean(fre1.NA)) # NAO+ + array.freq[p,1+l, 3] <- 100*(mean(fre2.NA)) # NAO- + array.freq[p,1+l, 4] <- 100*(mean(fre3.NA)) # Blocking + array.freq[p,1+l, 2] <- 100*(mean(fre4.NA)) # Atl.Ridge + + array.freq.obs[p,1+l, 1] <- 100*(mean(freObs1)) # NAO+ + array.freq.obs[p,1+l, 3] <- 100*(mean(freObs2)) # NAO- + array.freq.obs[p,1+l, 4] <- 100*(mean(freObs3)) # Blocking + array.freq.obs[p,1+l, 2] <- 100*(mean(freObs4)) # Atl.Ridge + + array.diff.freq[p,1+l, 1] <- 100*(mean(fre1.NA) - mean(freObs1)) # NAO+ + array.diff.freq[p,1+l, 3] <- 100*(mean(fre2.NA) - mean(freObs2)) # NAO- + array.diff.freq[p,1+l, 4] <- 100*(mean(fre3.NA) - mean(freObs3)) # Blocking + array.diff.freq[p,1+l, 2] <- 100*(mean(fre4.NA) - mean(freObs4)) # Atl.Ridge + + array.pers[p,1+l, 1] <- persist1 # NAO+ + array.pers[p,1+l, 3] <- persist2 # NAO- + array.pers[p,1+l, 4] <- persist3 # Blocking + array.pers[p,1+l, 2] <- persist4 # Atl.Ridge + + array.pers.obs[p,1+l, 1] <- persistObs1 # NAO+ + array.pers.obs[p,1+l, 3] <- persistObs2 # NAO- + array.pers.obs[p,1+l, 4] <- persistObs3 # Blocking + array.pers.obs[p,1+l, 2] <- persistObs4 # Atl.Ridge + + array.diff.pers[p,1+l, 1] <- persist1 - persistObs1 # NAO+ + array.diff.pers[p,1+l, 3] <- persist2 - persistObs2 # NAO- + array.diff.pers[p,1+l, 4] <- persist3 - persistObs3 # Blocking + array.diff.pers[p,1+l, 2] <- persist4 - persistObs4 # Atl.Ridge + + array.sd.freq[p,1+l, 1] <- sd.freq.sim1 # NAO+ + array.sd.freq[p,1+l, 3] <- sd.freq.sim2 # NAO- + array.sd.freq[p,1+l, 4] <- sd.freq.sim3 # Blocking + array.sd.freq[p,1+l, 2] <- sd.freq.sim4 # Atl.Ridge + + array.diff.sd.freq[p,1+l, 1] <- sd.freq.sim1 / sd.freq.obs1 # NAO+ + array.diff.sd.freq[p,1+l, 3] <- sd.freq.sim2 / sd.freq.obs2 # NAO- + array.diff.sd.freq[p,1+l, 4] <- sd.freq.sim3 / sd.freq.obs3 # Blocking + array.diff.sd.freq[p,1+l, 2] <- sd.freq.sim4 / sd.freq.obs4 # Atl.Ridge + + array.imp[p,1+l, 1] <- cor.imp1 # NAO+ + array.imp[p,1+l, 3] <- cor.imp2 # NAO- + array.imp[p,1+l, 4] <- cor.imp3 # Blocking + array.imp[p,1+l, 2] <- cor.imp4 # Atl.Ridge + + array.trans.sim1[p,1+l, 1] <- NA # Transition from NAO+ to NAO+ + array.trans.sim1[p,1+l, 3] <- 100*transSim1[2] # Transition from NAO+ to NAO- + array.trans.sim1[p,1+l, 4] <- 100*transSim1[3] # Transition from NAO+ to blocking + array.trans.sim1[p,1+l, 2] <- 100*transSim1[4] # Transition from NAO+ to Atl.Ridge + + array.trans.sim2[p,1+l, 1] <- 100*transSim2[1] # Transition from NAO- to NAO+ + array.trans.sim2[p,1+l, 3] <- NA # Transition from NAO- to NAO- + array.trans.sim2[p,1+l, 4] <- 100*transSim2[3] # Transition from NAO- to blocking + array.trans.sim2[p,1+l, 2] <- 100*transSim2[4] # Transition from NAO- to Atl.Ridge + + array.trans.sim3[p,1+l, 1] <- 100*transSim3[1] # Transition from blocking to NAO+ + array.trans.sim3[p,1+l, 3] <- 100*transSim3[2] # Transition from blocking to NAO- + array.trans.sim3[p,1+l, 4] <- NA # Transition from blocking to blocking + array.trans.sim3[p,1+l, 2] <- 100*transSim3[4] # Transition from blocking to Atl.Ridge + + array.trans.sim4[p,1+l, 1] <- 100*transSim4[1] # Transition from Atl.ridge to NAO+ + array.trans.sim4[p,1+l, 3] <- 100*transSim4[2] # Transition from Atl.ridge to NAO- + array.trans.sim4[p,1+l, 4] <- 100*transSim4[3] # Transition from Atl.ridge to blocking + array.trans.sim4[p,1+l, 2] <- NA # Transition from Atl.ridge to Atl.Ridge + + array.trans.obs1[p,1+l, 1] <- NA # Transition from NAO+ to NAO+ + array.trans.obs1[p,1+l, 3] <- 100*transObs1[2] # Transition from NAO+ to NAO- + array.trans.obs1[p,1+l, 4] <- 100*transObs1[3] # Transition from NAO+ to blocking + array.trans.obs1[p,1+l, 2] <- 100*transObs1[4] # Transition from NAO+ to Atl.Ridge + + array.trans.obs2[p,1+l, 1] <- 100*transObs2[1] # Transition from NAO- to NAO+ + array.trans.obs2[p,1+l, 3] <- NA # Transition from NAO- to NAO- + array.trans.obs2[p,1+l, 4] <- 100*transObs2[3] # Transition from NAO- to blocking + array.trans.obs2[p,1+l, 2] <- 100*transObs2[4] # Transition from NAO- to Atl.Ridge + + array.trans.obs3[p,1+l, 1] <- 100*transObs3[1] # Transition from blocking to NAO+ + array.trans.obs3[p,1+l, 3] <- 100*transObs3[2] # Transition from blocking to NAO- + array.trans.obs3[p,1+l, 4] <- NA # Transition from blocking to blocking + array.trans.obs3[p,1+l, 2] <- 100*transObs3[4] # Transition from blocking to Atl.Ridge + + array.trans.obs4[p,1+l, 1] <- 100*transObs4[1] # Transition from Atl.ridge to NAO+ + array.trans.obs4[p,1+l, 3] <- 100*transObs4[2] # Transition from Atl.ridge to NAO- + array.trans.obs4[p,1+l, 4] <- 100*transObs4[3] # Transition from Atl.ridge to blocking + array.trans.obs4[p,1+l, 2] <- NA # Transition from Atl.ridge to Atl.Ridge + + array.trans.diff1[p,1+l, 1] <- NA # Transition from NAO+ to NAO+ + array.trans.diff1[p,1+l, 3] <- 100*(transSim1[2] - transObs1[2]) # Transition from NAO+ to NAO- + array.trans.diff1[p,1+l, 4] <- 100*(transSim1[3] - transObs1[3]) # Transition from NAO+ to blocking + array.trans.diff1[p,1+l, 2] <- 100*(transSim1[4] - transObs1[4]) # Transition from NAO+ to Atl.Ridge + + array.trans.diff2[p,1+l, 1] <- 100*(transSim2[1] - transObs2[1]) # Transition from NAO+ to NAO+ + array.trans.diff2[p,1+l, 3] <- NA + array.trans.diff2[p,1+l, 4] <- 100*(transSim2[3] - transObs2[3]) # Transition from NAO+ to blocking + array.trans.diff2[p,1+l, 2] <- 100*(transSim2[4] - transObs2[4]) # Transition from NAO+ to Atl.Ridge + + array.trans.diff3[p,1+l, 1] <- 100*(transSim3[1] - transObs3[1]) # Transition from NAO+ to NAO+ + array.trans.diff3[p,1+l, 3] <- 100*(transSim3[2] - transObs3[2]) # Transition from NAO+ to NAO- + array.trans.diff3[p,1+l, 4] <- NA + array.trans.diff3[p,1+l, 2] <- 100*(transSim3[4] - transObs3[4]) # Transition from NAO+ to Atl.Ridge + + array.trans.diff4[p,1+l, 1] <- 100*(transSim4[1] - transObs4[1]) # Transition from NAO+ to NAO+ + array.trans.diff4[p,1+l, 3] <- 100*(transSim4[2] - transObs4[2]) # Transition from NAO+ to NAO- + array.trans.diff4[p,1+l, 4] <- 100*(transSim4[3] - transObs4[3]) # Transition from NAO+ to blocking + array.trans.diff4[p,1+l, 2] <- NA + + #array.rpss[p,1+l, 1] <- # NAO+ + #array.rpss[p,1+l, 3] <- # NAO- + #array.rpss[p,1+l, 4] <- # Blocking + #array.rpss[p,1+l, 2] <- # Atl.Ridge + } + } + + + + # Spatial Corr summary: + png(filename=paste0(work.dir,"/",forecast.name,"_summary_spatial_correlations.png"), width=550, height=400) + + # create an array similar to array.cor but with colors instead of corr.values: + sp.corr.cols <- rev(c('#b2182b','#d6604d','#f4a582','#fddbc7','#d1e5f0','#92c5de','#4393c3','#2166ac')) + my.seq <- c(-1,0,0.4,0.5,0.6,0.7,0.8,0.9,1) + + array.spat.cor.colors <- array(sp.corr.cols[8],c(12,7,4)) + array.spat.cor.colors[array.spat.cor < my.seq[8]] <- sp.corr.cols[7] + array.spat.cor.colors[array.spat.cor < my.seq[7]] <- sp.corr.cols[6] + array.spat.cor.colors[array.spat.cor < my.seq[6]] <- sp.corr.cols[5] + array.spat.cor.colors[array.spat.cor < my.seq[5]] <- sp.corr.cols[4] + array.spat.cor.colors[array.spat.cor < my.seq[4]] <- sp.corr.cols[3] + array.spat.cor.colors[array.spat.cor < my.seq[3]] <- sp.corr.cols[2] + array.spat.cor.colors[array.spat.cor < my.seq[2]] <- sp.corr.cols[1] + + par(mar=c(5,4,4,4.5)) + plot(1,1, type="n", xaxt="n", yaxt="n", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + title("Spatial correlation between S4 and ERA-Interim regime anomalies", cex=1.5) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + + for(p in 1:12){ + for(l in 0:6){ + polygon(c(0.5+p-1, 0.5+p-1, 1+p-1), c(-0.5+l, 0.5+l, 0+l), col=array.spat.cor.colors[p,1+l,1]) # first triangle + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(-0.5+l, 0+l, -0.5+l), col=array.spat.cor.colors[p,1+l,2]) + polygon(c(1+p-1, 1.5+p-1, 1.5+p-1), c(l, 0.5+l, -0.5+l), col=array.spat.cor.colors[p,1+l,3]) + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(0.5+l, 0+l, 0.5+l), col=array.spat.cor.colors[p,1+l,4]) + } + } + + par(fig=c(0.875, 1, 0.14, 0.89), new=TRUE) + ColorBar2(brks = my.seq, cols = sp.corr.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + dev.off() + + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_spatial_correlations_startdate.png"), width=750, height=600) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext(text="Spatial correlation between S4 and ERA-Interim regime anomalies", cex=1.5) + + # NAO+ : + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.85, 0.98), new=TRUE) + mtext("NAO+", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.spat.cor.colors[p,1+l,1])}} + + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.42, 0.5), new=TRUE) + mtext("Blocking", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.spat.cor.colors[p,1+l,4])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.85, 0.98), new=TRUE) + mtext("NAO-", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.spat.cor.colors[p,1+l,3])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.42, 0.5), new=TRUE) + mtext("Atlantic Ridge", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.spat.cor.colors[p,1+l,2])}} + + par(fig=c(0.91, 1, 0.1, 0.9), new=TRUE) + ColorBar2(brks = my.seq, cols = sp.corr.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_spatial_correlations_target_month.png"), width=800, height=300) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext(text="Spatial correlation between S4 and ERA-Interim regime anomalies", cex=1.2) + + par(mar=c(0,0,4,0), fig=c(0.07, 0.22, 0.8, 0.98), new=TRUE) + mtext("NAO+", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0, 0.22, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.6, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.spat.cor.colors[i2,1+6-j,1])}} + + par(mar=c(0,0,4,0), fig=c(0.22, 0.44, 0.8, 0.98), new=TRUE) + mtext("NAO-", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.22, 0.44, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.6, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.spat.cor.colors[i2,1+6-j,3])}} + + par(mar=c(0,0,4,0), fig=c(0.44, 0.66, 0.8, 0.98), new=TRUE) + mtext("Blocking", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.44, 0.66, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.6, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.spat.cor.colors[i2,1+6-j,4])}} + + par(mar=c(0,0,4,0), fig=c(0.68, 0.88, 0.8, 0.98), new=TRUE) + mtext("Atlantic Ridge", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.66, 0.88, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.6, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.spat.cor.colors[i2,1+6-j,2])}} + + par(fig=c(0.91, 1, 0.1, 0.8), new=TRUE) + ColorBar2(brks = my.seq, cols = sp.corr.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + + + + + + + + # Temporal Corr summary: + png(filename=paste0(work.dir,"/",forecast.name,"_summary_temporal_correlations.png"), width=550, height=400) + + # create an array similar to array.cor but with colors instead of corr.values: + corr.cols <- rev(c('#b2182b','#d6604d','#f4a582','#fddbc7','#d1e5f0','#92c5de','#4393c3','#2166ac')) + my.seq <- c(-1,-0.75,-0.5,-0.25,0,0.25,0.5,0.75,1) + + array.cor.colors <- array(corr.cols[8],c(12,7,4)) + array.cor.colors[array.cor < 0.75] <- corr.cols[7] + array.cor.colors[array.cor < 0.5] <- corr.cols[6] + array.cor.colors[array.cor < 0.25] <- corr.cols[5] + array.cor.colors[array.cor < 0] <- corr.cols[4] + array.cor.colors[array.cor < -0.25] <- corr.cols[3] + array.cor.colors[array.cor < -0.5] <- corr.cols[2] + array.cor.colors[array.cor < -0.75] <- corr.cols[1] + + par(mar=c(5,4,4,4.5)) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Forecast time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + title("Correlation between S4 and ERA-Interim frequencies", cex=1.5) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + + for(p in 1:12){ + for(l in 0:6){ + polygon(c(0.5+p-1,0.5+p-1,1+p-1),c(-0.5+l,0.5+l,0+l), col=array.cor.colors[p,1+l,1]) # first triangle + polygon(c(0.5+p-1,1+p-1,1.5+p-1),c(-0.5+l,0+l,-0.5+l), col=array.cor.colors[p,1+l,2]) + polygon(c(1+p-1,1.5+p-1,1.5+p-1),c(l,0.5+l,-0.5+l), col=array.cor.colors[p,1+l,3]) + polygon(c(0.5+p-1,1+p-1,1.5+p-1),c(0.5+l,0+l,0.5+l), col=array.cor.colors[p,1+l,4]) + } + } + + par(fig=c(0.875, 1, 0.14, 0.89), new=TRUE) + ColorBar2(brks = seq(-1,1,0.25), cols = corr.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(seq(-1,1,0.25)), my.labels=seq(-1,1,0.25)) + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_temporal_correlations_startdate.png"), width=750, height=600) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext(text="Correlation between S4 and ERA-Interim frequencies", cex=1.5) + + # NAO+ : + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.85, 0.98), new=TRUE) + mtext("NAO+", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.cor.colors[p,1+l,1])}} + + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.42, 0.5), new=TRUE) + mtext("Blocking", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.cor.colors[p,1+l,4])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.85, 0.98), new=TRUE) + mtext("NAO-", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.cor.colors[p,1+l,3])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.42, 0.5), new=TRUE) + mtext("Atlantic Ridge", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.cor.colors[p,1+l,2])}} + + par(fig=c(0.91, 1, 0.1, 0.9), new=TRUE) + ColorBar2(brks = my.seq, cols = corr.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_temporal_correlations_target_month.png"), width=800, height=300) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext(text="Correlation between S4 and ERA-Interim frequencies", cex=1.2) + + par(mar=c(0,0,4,0), fig=c(0.07, 0.22, 0.8, 0.98), new=TRUE) + mtext("NAO+", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0, 0.22, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.6, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.cor.colors[i2,1+6-j,1])}} + + par(mar=c(0,0,4,0), fig=c(0.22, 0.44, 0.8, 0.98), new=TRUE) + mtext("NAO-", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.22, 0.44, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.6, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.cor.colors[i2,1+6-j,3])}} + + par(mar=c(0,0,4,0), fig=c(0.44, 0.66, 0.8, 0.98), new=TRUE) + mtext("Blocking", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.44, 0.66, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.6, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.cor.colors[i2,1+6-j,4])}} + + par(mar=c(0,0,4,0), fig=c(0.68, 0.88, 0.8, 0.98), new=TRUE) + mtext("Atlantic Ridge", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.66, 0.88, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.6, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.cor.colors[i2,1+6-j,2])}} + + par(fig=c(0.91, 1, 0.1, 0.8), new=TRUE) + ColorBar2(brks = my.seq, cols = corr.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + + + + + + # Frequency summary: + png(filename=paste0(work.dir,"/",forecast.name,"_summary_frequency.png"), width=550, height=400) + + # create an array similar to array.diff.freq but with colors instead of frequencies: + freq.cols <- rev(c('#d73027','#f46d43','#fdae61','#fee090','#e0f3f8','#abd9e9','#74add1','#4575b4')) + my.seq <- c(NA,20,22,24,26,28,30,32,NA) + + array.freq.colors <- array(freq.cols[8],c(12,7,4)) + array.freq.colors[array.freq < my.seq[[8]]] <- freq.cols[7] + array.freq.colors[array.freq < my.seq[[7]]] <- freq.cols[6] + array.freq.colors[array.freq < my.seq[[6]]] <- freq.cols[5] + array.freq.colors[array.freq < my.seq[[5]]] <- freq.cols[4] + array.freq.colors[array.freq < my.seq[[4]]] <- freq.cols[3] + array.freq.colors[array.freq < my.seq[[3]]] <- freq.cols[2] + array.freq.colors[array.freq < my.seq[[2]]] <- freq.cols[1] + + par(mar=c(5,4,4,4.5)) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Forecast time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + title("Mean S4 frequency (%)", cex=1.5) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + + for(p in 1:12){ + for(l in 0:6){ + polygon(c(0.5+p-1, 0.5+p-1, 1+p-1), c(-0.5+l, 0.5+l, 0+l), col=array.freq.colors[p,1+l,1]) # first triangle + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(-0.5+l, 0+l, -0.5+l), col=array.freq.colors[p,1+l,2]) + polygon(c(1+p-1, 1.5+p-1, 1.5+p-1), c(l, 0.5+l, -0.5+l), col=array.freq.colors[p,1+l,3]) + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(0.5+l, 0+l, 0.5+l), col=array.freq.colors[p,1+l,4]) + } + } + + par(fig=c(0.875, 1, 0.14, 0.89), new=TRUE) + ColorBar2(brks = my.seq, cols = freq.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_frequency_startdate.png"), width=750, height=600) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext(text="Mean S4 frequency (%)", cex=1.5) + + # NAO+ : + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.85, 0.98), new=TRUE) + mtext("NAO+", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.freq.colors[p,1+l,1])}} + + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.42, 0.5), new=TRUE) + mtext("Blocking", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.freq.colors[p,1+l,4])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.85, 0.98), new=TRUE) + mtext("NAO-", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.freq.colors[p,1+l,3])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.42, 0.5), new=TRUE) + mtext("Atlantic Ridge", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.freq.colors[p,1+l,2])}} + + par(fig=c(0.91, 1, 0.1, 0.9), new=TRUE) + ColorBar2(brks = my.seq, cols = freq.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_frequency_target_month.png"), width=800, height=300) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext(text="Mean S4 frequency (%)", cex=1.2) + + par(mar=c(0,0,4,0), fig=c(0.07, 0.22, 0.8, 0.98), new=TRUE) + mtext("NAO+", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0, 0.22, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.6, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.freq.colors[i2,1+6-j,1])}} + + par(mar=c(0,0,4,0), fig=c(0.22, 0.44, 0.8, 0.98), new=TRUE) + mtext("NAO-", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.22, 0.44, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.6, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.freq.colors[i2,1+6-j,3])}} + + par(mar=c(0,0,4,0), fig=c(0.44, 0.66, 0.8, 0.98), new=TRUE) + mtext("Blocking", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.44, 0.66, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.6, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.freq.colors[i2,1+6-j,4])}} + + par(mar=c(0,0,4,0), fig=c(0.68, 0.88, 0.8, 0.98), new=TRUE) + mtext("Atlantic Ridge", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.66, 0.88, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.6, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.freq.colors[i2,1+6-j,2])}} + + par(fig=c(0.91, 1, 0.1, 0.8), new=TRUE) + ColorBar2(brks = my.seq, cols = freq.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + par(fig=c(0.91, 1, 0.88, 0.93), new=TRUE) + mtext(side = 1, text = "%", line = 2, cex=1) + dev.off() + + + + + + + + + # Obs. frequency summary: + png(filename=paste0(work.dir,"/",forecast.name,"_summary_frequency_obs.png"), width=550, height=400) + + # create an array similar to array.diff.freq but with colors instead of frequencies: + freq.cols <- rev(c('#d73027','#f46d43','#fdae61','#fee090','#e0f3f8','#abd9e9','#74add1','#4575b4')) + my.seq <- c(NA,20,22,24,26,28,30,32,NA) + + array.freq.colors <- array(freq.cols[8],c(12,7,4)) + array.freq.colors[array.freq.obs < my.seq[[8]]] <- freq.cols[7] + array.freq.colors[array.freq.obs < my.seq[[7]]] <- freq.cols[6] + array.freq.colors[array.freq.obs < my.seq[[6]]] <- freq.cols[5] + array.freq.colors[array.freq.obs < my.seq[[5]]] <- freq.cols[4] + array.freq.colors[array.freq.obs < my.seq[[4]]] <- freq.cols[3] + array.freq.colors[array.freq.obs < my.seq[[3]]] <- freq.cols[2] + array.freq.colors[array.freq.obs < my.seq[[2]]] <- freq.cols[1] + + par(mar=c(5,4,4,4.5)) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Forecast time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + title("Mean observed frequency (%)", cex=1.5) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + + for(p in 1:12){ + for(l in 0:6){ + polygon(c(0.5+p-1, 0.5+p-1, 1+p-1), c(-0.5+l, 0.5+l, 0+l), col=array.freq.colors[p,1+l,1]) # first triangle + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(-0.5+l, 0+l, -0.5+l), col=array.freq.colors[p,1+l,2]) + polygon(c(1+p-1, 1.5+p-1, 1.5+p-1), c(l, 0.5+l, -0.5+l), col=array.freq.colors[p,1+l,3]) + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(0.5+l, 0+l, 0.5+l), col=array.freq.colors[p,1+l,4]) + } + } + + par(fig=c(0.875, 1, 0.14, 0.89), new=TRUE) + ColorBar2(brks = my.seq, cols = freq.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_frequency_obs_startdate.png"), width=750, height=600) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext(text="Mean observed frequency (%)", cex=1.5) + + # NAO+ : + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.85, 0.98), new=TRUE) + mtext("NAO+", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.freq.colors[p,1+l,1])}} + + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.42, 0.5), new=TRUE) + mtext("Blocking", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.freq.colors[p,1+l,4])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.85, 0.98), new=TRUE) + mtext("NAO-", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.freq.colors[p,1+l,3])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.42, 0.5), new=TRUE) + mtext("Atlantic Ridge", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.freq.colors[p,1+l,2])}} + + par(fig=c(0.91, 1, 0.1, 0.9), new=TRUE) + ColorBar2(brks = my.seq, cols = freq.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_frequency_obs_target_month.png"), width=800, height=300) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext(text="Mean observed frequency (%)", cex=1.2) + + par(mar=c(0,0,4,0), fig=c(0.07, 0.22, 0.8, 0.98), new=TRUE) + mtext("NAO+", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0, 0.22, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.6, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.freq.colors[i2,1+6-j,1])}} + + par(mar=c(0,0,4,0), fig=c(0.22, 0.44, 0.8, 0.98), new=TRUE) + mtext("NAO-", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.22, 0.44, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.6, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.freq.colors[i2,1+6-j,3])}} + + par(mar=c(0,0,4,0), fig=c(0.44, 0.66, 0.8, 0.98), new=TRUE) + mtext("Blocking", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.44, 0.66, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.6, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.freq.colors[i2,1+6-j,4])}} + + par(mar=c(0,0,4,0), fig=c(0.68, 0.88, 0.8, 0.98), new=TRUE) + mtext("Atlantic Ridge", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.66, 0.88, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.6, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.freq.colors[i2,1+6-j,2])}} + + par(fig=c(0.91, 1, 0.1, 0.8), new=TRUE) + ColorBar2(brks = my.seq, cols = freq.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + par(fig=c(0.91, 1, 0.88, 0.93), new=TRUE) + mtext(side = 1, text = "%", line = 2, cex=1) + dev.off() + + + + + + + + + + # Bias freq. summary: + png(filename=paste0(work.dir,"/",forecast.name,"_summary_bias_frequency.png"), width=550, height=400) + + # create an array similar to array.diff.freq but with colors instead of frequencies: + diff.freq.cols <- rev(c('#d73027','#f46d43','#fdae61','#fee090','#e0f3f8','#abd9e9','#74add1','#4575b4')) + my.seq <- c(-20,-10,-5,-2,0,2,5,10,20) + + array.diff.freq.colors <- array(diff.freq.cols[8],c(12,7,4)) + array.diff.freq.colors[array.diff.freq < my.seq[[8]]] <- diff.freq.cols[7] + array.diff.freq.colors[array.diff.freq 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.diff.freq.colors[i2,1+6-j,1])}} + + par(mar=c(0,0,4,0), fig=c(0.22, 0.44, 0.8, 0.98), new=TRUE) + mtext("NAO-", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.22, 0.44, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.6, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.diff.freq.colors[i2,1+6-j,3])}} + + par(mar=c(0,0,4,0), fig=c(0.44, 0.66, 0.8, 0.98), new=TRUE) + mtext("Blocking", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.44, 0.66, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.6, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.diff.freq.colors[i2,1+6-j,4])}} + + par(mar=c(0,0,4,0), fig=c(0.68, 0.88, 0.8, 0.98), new=TRUE) + mtext("Atlantic Ridge", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.66, 0.88, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.6, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.diff.freq.colors[i2,1+6-j,2])}} + + par(fig=c(0.91, 1, 0.1, 0.8), new=TRUE) + ColorBar2(brks = my.seq, cols = diff.freq.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + par(fig=c(0.91, 1, 0.88, 0.93), new=TRUE) + mtext(side = 1, text = "%", line = 2, cex=1) + dev.off() + + + + + + + + + # Simulated persistence summary: + png(filename=paste0(work.dir,"/",forecast.name,"_summary_persistence.png"), width=550, height=400) + + # create an array similar to array.pers but with colors instead of frequencies: + pers.cols <- rev(c('#b2182b','#d6604d','#f4a582','#fddbc7','#d1e5f0','#92c5de','#4393c3','#2166ac')) + my.seq <- c(NA, 3.25, 3.5, 3.75, 4, 4.25, 4.5, 4.75, NA) + + array.pers.colors <- array(pers.cols[8],c(12,7,4)) + array.pers.colors[array.pers < my.seq[8]] <- pers.cols[7] + array.pers.colors[array.pers < my.seq[7]] <- pers.cols[6] + array.pers.colors[array.pers < my.seq[6]] <- pers.cols[5] + array.pers.colors[array.pers < my.seq[5]] <- pers.cols[4] + array.pers.colors[array.pers < my.seq[4]] <- pers.cols[3] + array.pers.colors[array.pers < my.seq[3]] <- pers.cols[2] + array.pers.colors[array.pers < my.seq[2]] <- pers.cols[1] + + par(mar=c(5,4,4,4.5)) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Forecast time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + title("ECMWF-S4 Regime persistence (in days/month)", cex=1.5) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + + for(p in 1:12){ + for(l in 0:6){ + polygon(c(0.5+p-1,0.5+p-1,1+p-1),c(-0.5+l,0.5+l,0+l), col=array.pers.colors[p,1+l,1]) # first triangle + polygon(c(0.5+p-1,1+p-1,1.5+p-1),c(-0.5+l,0+l,-0.5+l), col=array.pers.colors[p,1+l,2]) + polygon(c(1+p-1,1.5+p-1,1.5+p-1),c(l,0.5+l,-0.5+l), col=array.pers.colors[p,1+l,3]) + polygon(c(0.5+p-1,1+p-1,1.5+p-1),c(0.5+l,0+l,0.5+l), col=array.pers.colors[p,1+l,4]) + } + } + + par(fig=c(0.875, 1, 0.14, 0.89), new=TRUE) + ColorBar2(brks = my.seq, cols = pers.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_persistence_startdate.png"), width=750, height=600) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("ECMWF-S4 Regime persistence (in days/month)", cex=1.5) + + # NAO+ : + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.85, 0.98), new=TRUE) + mtext("NAO+", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.pers.colors[p,1+l,1])}} + + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.42, 0.5), new=TRUE) + mtext("Blocking", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.pers.colors[p,1+l,4])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.85, 0.98), new=TRUE) + mtext("NAO-", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.pers.colors[p,1+l,3])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.42, 0.5), new=TRUE) + mtext("Atlantic Ridge", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.pers.colors[p,1+l,2])}} + + par(fig=c(0.91, 1, 0.1, 0.9), new=TRUE) + ColorBar2(brks = my.seq, cols = pers.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_persistence_target_month.png"), width=800, height=300) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("ECMWF-S4 Regime persistence (in days/month)", cex=1.2) + + par(mar=c(0,0,4,0), fig=c(0.07, 0.22, 0.8, 0.98), new=TRUE) + mtext("NAO+", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0, 0.22, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.6, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.pers.colors[i2,1+6-j,1])}} + + par(mar=c(0,0,4,0), fig=c(0.22, 0.44, 0.8, 0.98), new=TRUE) + mtext("NAO-", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.22, 0.44, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.6, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.pers.colors[i2,1+6-j,3])}} + + par(mar=c(0,0,4,0), fig=c(0.44, 0.66, 0.8, 0.98), new=TRUE) + mtext("Blocking", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.44, 0.66, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.6, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.pers.colors[i2,1+6-j,4])}} + + par(mar=c(0,0,4,0), fig=c(0.68, 0.88, 0.8, 0.98), new=TRUE) + mtext("Atlantic Ridge", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.66, 0.88, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.6, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.pers.colors[i2,1+6-j,2])}} + + par(fig=c(0.91, 1, 0.1, 0.8), new=TRUE) + ColorBar2(brks = my.seq, cols = pers.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + par(fig=c(0.9, 1, 0.88, 0.93), new=TRUE) + mtext(side = 1, text = "days/month", line = 2, cex=1) + dev.off() + + + + + + + + # Observed persistence summary: + png(filename=paste0(work.dir,"/",forecast.name,"_summary_persistence_obs.png"), width=550, height=400) + + # create an array similar to array.pers but with colors instead of frequencies: + pers.cols <- rev(c('#b2182b','#d6604d','#f4a582','#fddbc7','#d1e5f0','#92c5de','#4393c3','#2166ac')) + my.seq <- c(NA, 3.25, 3.5, 3.75, 4, 4.25, 4.5, 4.75, NA) + + array.pers.colors <- array(pers.cols[8],c(12,7,4)) + array.pers.colors[array.pers.obs < my.seq[8]] <- pers.cols[7] + array.pers.colors[array.pers.obs < my.seq[7]] <- pers.cols[6] + array.pers.colors[array.pers.obs < my.seq[6]] <- pers.cols[5] + array.pers.colors[array.pers.obs < my.seq[5]] <- pers.cols[4] + array.pers.colors[array.pers.obs < my.seq[4]] <- pers.cols[3] + array.pers.colors[array.pers.obs < my.seq[3]] <- pers.cols[2] + array.pers.colors[array.pers.obs < my.seq[2]] <- pers.cols[1] + + par(mar=c(5,4,4,4.5)) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Forecast time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + title("Observed regime persistence (in days/month)", cex=1.5) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + + for(p in 1:12){ + for(l in 0:6){ + polygon(c(0.5+p-1,0.5+p-1,1+p-1),c(-0.5+l,0.5+l,0+l), col=array.pers.colors[p,1+l,1]) # first triangle + polygon(c(0.5+p-1,1+p-1,1.5+p-1),c(-0.5+l,0+l,-0.5+l), col=array.pers.colors[p,1+l,2]) + polygon(c(1+p-1,1.5+p-1,1.5+p-1),c(l,0.5+l,-0.5+l), col=array.pers.colors[p,1+l,3]) + polygon(c(0.5+p-1,1+p-1,1.5+p-1),c(0.5+l,0+l,0.5+l), col=array.pers.colors[p,1+l,4]) + } + } + + par(fig=c(0.875, 1, 0.14, 0.89), new=TRUE) + ColorBar2(brks = my.seq, cols = pers.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_persistence_obs_startdate.png"), width=750, height=600) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("Observed Regime persistence (in days/month)", cex=1.5) + + # NAO+ : + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.85, 0.98), new=TRUE) + mtext("NAO+", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.pers.colors[p,1+l,1])}} + + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.42, 0.5), new=TRUE) + mtext("Blocking", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.pers.colors[p,1+l,4])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.85, 0.98), new=TRUE) + mtext("NAO-", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.pers.colors[p,1+l,3])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.42, 0.5), new=TRUE) + mtext("Atlantic Ridge", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.pers.colors[p,1+l,2])}} + + par(fig=c(0.91, 1, 0.1, 0.9), new=TRUE) + ColorBar2(brks = my.seq, cols = pers.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_persistence_obs_target_month.png"), width=800, height=300) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("Observed regime persistence (in days/month)", cex=1.2) + + par(mar=c(0,0,4,0), fig=c(0.07, 0.22, 0.8, 0.98), new=TRUE) + mtext("NAO+", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0, 0.22, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.6, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.pers.colors[i2,1+6-j,1])}} + + par(mar=c(0,0,4,0), fig=c(0.22, 0.44, 0.8, 0.98), new=TRUE) + mtext("NAO-", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.22, 0.44, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.6, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.pers.colors[i2,1+6-j,3])}} + + par(mar=c(0,0,4,0), fig=c(0.44, 0.66, 0.8, 0.98), new=TRUE) + mtext("Blocking", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.44, 0.66, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.6, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.pers.colors[i2,1+6-j,4])}} + + par(mar=c(0,0,4,0), fig=c(0.68, 0.88, 0.8, 0.98), new=TRUE) + mtext("Atlantic Ridge", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.66, 0.88, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.6, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.pers.colors[i2,1+6-j,2])}} + + par(fig=c(0.91, 1, 0.1, 0.8), new=TRUE) + ColorBar2(brks = my.seq, cols = pers.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + par(fig=c(0.9, 1, 0.88, 0.93), new=TRUE) + mtext(side = 1, text = "days/month", line = 2, cex=1) + dev.off() + + + + + + + + + + + + + # Bias persistence summary: + png(filename=paste0(work.dir,"/",forecast.name,"_summary_bias_persistence.png"), width=550, height=400) + + # create an array similar to array.pers but with colors instead of frequencies: + diff.pers.cols <- rev(c('#b2182b','#d6604d','#f4a582','#fddbc7','#d1e5f0','#92c5de','#4393c3','#2166ac')) + my.seq <- c(NA, -1.5, -1, -0.5, 0, 0.5, 1, 1.5, NA) + + array.diff.pers.colors <- array(diff.pers.cols[8],c(12,7,4)) + array.diff.pers.colors[array.diff.pers < my.seq[8]] <- diff.pers.cols[7] + array.diff.pers.colors[array.diff.pers < my.seq[7]] <- diff.pers.cols[6] + array.diff.pers.colors[array.diff.pers < my.seq[6]] <- diff.pers.cols[5] + array.diff.pers.colors[array.diff.pers < my.seq[5]] <- diff.pers.cols[4] + array.diff.pers.colors[array.diff.pers < my.seq[4]] <- diff.pers.cols[3] + array.diff.pers.colors[array.diff.pers < my.seq[3]] <- diff.pers.cols[2] + array.diff.pers.colors[array.diff.pers < my.seq[2]] <- diff.pers.cols[1] + + par(mar=c(5,4,4,4.5)) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Forecast time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + title("Diff. between S4 and ERA-Interim regime persistence (in days/month)", cex=1.5) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + + for(p in 1:12){ + for(l in 0:6){ + polygon(c(0.5+p-1,0.5+p-1,1+p-1),c(-0.5+l,0.5+l,0+l), col=array.diff.pers.colors[p,1+l,1]) # first triangle + polygon(c(0.5+p-1,1+p-1,1.5+p-1),c(-0.5+l,0+l,-0.5+l), col=array.diff.pers.colors[p,1+l,2]) + polygon(c(1+p-1,1.5+p-1,1.5+p-1),c(l,0.5+l,-0.5+l), col=array.diff.pers.colors[p,1+l,3]) + polygon(c(0.5+p-1,1+p-1,1.5+p-1),c(0.5+l,0+l,0.5+l), col=array.diff.pers.colors[p,1+l,4]) + } + } + + par(fig=c(0.875, 1, 0.14, 0.89), new=TRUE) + ColorBar2(brks = my.seq, cols = diff.pers.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_bias_persistence_startdate.png"), width=750, height=600) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("Diff. between S4 and ERA-Interim regime persistence (in days/month)", cex=1.5) + + # NAO+ : + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.85, 0.98), new=TRUE) + mtext("NAO+", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.diff.pers.colors[p,1+l,1])}} + + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.42, 0.5), new=TRUE) + mtext("Blocking", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.diff.pers.colors[p,1+l,4])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.85, 0.98), new=TRUE) + mtext("NAO-", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.diff.pers.colors[p,1+l,3])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.42, 0.5), new=TRUE) + mtext("Atlantic Ridge", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.diff.pers.colors[p,1+l,2])}} + + par(fig=c(0.91, 1, 0.1, 0.9), new=TRUE) + ColorBar2(brks = my.seq, cols = diff.pers.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_bias_persistence_target_month.png"), width=800, height=300) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("Diff. between S4 and ERA-Interim regime persistence (in days/month)", cex=1.2) + + par(mar=c(0,0,4,0), fig=c(0.07, 0.22, 0.8, 0.98), new=TRUE) + mtext("NAO+", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0, 0.22, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.6, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.diff.pers.colors[i2,1+6-j,1])}} + + par(mar=c(0,0,4,0), fig=c(0.22, 0.44, 0.8, 0.98), new=TRUE) + mtext("NAO-", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.22, 0.44, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.6, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.diff.pers.colors[i2,1+6-j,3])}} + + par(mar=c(0,0,4,0), fig=c(0.44, 0.66, 0.8, 0.98), new=TRUE) + mtext("Blocking", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.44, 0.66, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.6, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.diff.pers.colors[i2,1+6-j,4])}} + + par(mar=c(0,0,4,0), fig=c(0.68, 0.88, 0.8, 0.98), new=TRUE) + mtext("Atlantic Ridge", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.66, 0.88, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.6, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.diff.pers.colors[i2,1+6-j,2])}} + + par(fig=c(0.91, 1, 0.1, 0.8), new=TRUE) + ColorBar2(brks = my.seq, cols = diff.pers.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + par(fig=c(0.9, 1, 0.88, 0.93), new=TRUE) + mtext(side = 1, text = "days/month", line = 2, cex=1) + dev.off() + + + + + + + + + + # St.Dev.freq ratio summary: + png(filename=paste0(work.dir,"/",forecast.name,"_summary_ratio_sd_freq.png"), width=550, height=400) + + # create an array similar to array.cor but with colors instead of corr.values: + diff.sd.freq.cols <- rev(c('#b2182b','#d6604d','#f4a582','#fddbc7','#d1e5f0','#92c5de','#4393c3','#2166ac')) + my.seq <- c(0,0.7,0.8,0.9,1.0,1.1,1.2,1.3,2) + + array.diff.sd.freq.colors <- array(diff.sd.freq.cols[8],c(12,7,4)) + array.diff.sd.freq.colors[array.diff.sd.freq < my.seq[8]] <- diff.sd.freq.cols[7] + array.diff.sd.freq.colors[array.diff.sd.freq < my.seq[7]] <- diff.sd.freq.cols[6] + array.diff.sd.freq.colors[array.diff.sd.freq < my.seq[6]] <- diff.sd.freq.cols[5] + array.diff.sd.freq.colors[array.diff.sd.freq < my.seq[5]] <- diff.sd.freq.cols[4] + array.diff.sd.freq.colors[array.diff.sd.freq < my.seq[4]] <- diff.sd.freq.cols[3] + array.diff.sd.freq.colors[array.diff.sd.freq < my.seq[3]] <- diff.sd.freq.cols[2] + array.diff.sd.freq.colors[array.diff.sd.freq < my.seq[2]] <- diff.sd.freq.cols[1] + + par(mar=c(5,4,4,4.5)) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Forecast time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + title("St.Dev. ratio between sim. and obs. average frequencies", cex=1.5) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + + for(p in 1:12){ + for(l in 0:6){ + polygon(c(0.5+p-1, 0.5+p-1, 1+p-1), c(-0.5+l, 0.5+l, 0+l), col=array.diff.sd.freq.colors[p,1+l,1]) # first triangle + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(-0.5+l, 0+l, -0.5+l), col=array.diff.sd.freq.colors[p,1+l,2]) + polygon(c(1+p-1, 1.5+p-1, 1.5+p-1), c(l, 0.5+l, -0.5+l), col=array.diff.sd.freq.colors[p,1+l,3]) + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(0.5+l, 0+l, 0.5+l), col=array.diff.sd.freq.colors[p,1+l,4]) + } + } + + par(fig=c(0.875, 1, 0.14, 0.89), new=TRUE) + ColorBar2(brks = my.seq, cols = diff.sd.freq.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + dev.off() + + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_ratio_sd_frequency_startdate.png"), width=750, height=600) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("St.Dev. ratio between sim. and obs. average frequencies", cex=1.5) + + # NAO+ : + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.85, 0.98), new=TRUE) + mtext("NAO+", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.diff.sd.freq.colors[p,1+l,1])}} + + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.42, 0.5), new=TRUE) + mtext("Blocking", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.diff.sd.freq.colors[p,1+l,4])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.85, 0.98), new=TRUE) + mtext("NAO-", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.diff.sd.freq.colors[p,1+l,3])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.42, 0.5), new=TRUE) + mtext("Atlantic Ridge", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.diff.sd.freq.colors[p,1+l,2])}} + + par(fig=c(0.91, 1, 0.1, 0.9), new=TRUE) + ColorBar2(brks = my.seq, cols = diff.sd.freq.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_ratio_sd_frequency_target_month.png"), width=800, height=300) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("St.Dev. ratio between sim. and obs. average frequencies", cex=1.2) + + par(mar=c(0,0,4,0), fig=c(0.07, 0.22, 0.8, 0.98), new=TRUE) + mtext("NAO+", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0, 0.22, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.6, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.diff.sd.freq.colors[i2,1+6-j,1])}} + + par(mar=c(0,0,4,0), fig=c(0.22, 0.44, 0.8, 0.98), new=TRUE) + mtext("NAO-", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.22, 0.44, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.6, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.diff.sd.freq.colors[i2,1+6-j,3])}} + + par(mar=c(0,0,4,0), fig=c(0.44, 0.66, 0.8, 0.98), new=TRUE) + mtext("Blocking", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.44, 0.66, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.6, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.diff.sd.freq.colors[i2,1+6-j,4])}} + + par(mar=c(0,0,4,0), fig=c(0.68, 0.88, 0.8, 0.98), new=TRUE) + mtext("Atlantic Ridge", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.66, 0.88, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.6, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.diff.sd.freq.colors[i2,1+6-j,2])}} + + par(fig=c(0.91, 1, 0.1, 0.8), new=TRUE) + ColorBar2(brks = my.seq, cols = diff.sd.freq.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + + + + + + + + + + + + + # Impact summary: + png(filename=paste0(work.dir,"/",forecast.name,"_summary_impact_",var.name[var.num],".png"), width=550, height=400) + + # create an array similar to array.pers but with colors instead of frequencies: + imp.cols <- rev(c('#b2182b','#d6604d','#f4a582','#fddbc7','#d1e5f0','#92c5de','#4393c3','#2166ac')) + my.seq <- c(-1,0,0.4,0.5,0.6,0.7,0.8,0.9,1) + + array.imp.colors <- array(imp.cols[8],c(12,7,4)) + array.imp.colors[array.imp < my.seq[8]] <- imp.cols[7] + array.imp.colors[array.imp < my.seq[7]] <- imp.cols[6] + array.imp.colors[array.imp < my.seq[6]] <- imp.cols[5] + array.imp.colors[array.imp < my.seq[5]] <- imp.cols[4] + array.imp.colors[array.imp < my.seq[4]] <- imp.cols[3] + array.imp.colors[array.imp < my.seq[3]] <- imp.cols[2] + array.imp.colors[array.imp < my.seq[2]] <- imp.cols[1] + + par(mar=c(5,4,4,4.5)) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Forecast time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + title(paste0("S4 spatial correlation of impact on ",var.name[var.num]), cex=1.5) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + + for(p in 1:12){ + for(l in 0:6){ + polygon(c(0.5+p-1,0.5+p-1,1+p-1),c(-0.5+l,0.5+l,0+l), col=array.imp.colors[p,1+l,1]) # first triangle + polygon(c(0.5+p-1,1+p-1,1.5+p-1),c(-0.5+l,0+l,-0.5+l), col=array.imp.colors[p,1+l,2]) + polygon(c(1+p-1,1.5+p-1,1.5+p-1),c(l,0.5+l,-0.5+l), col=array.imp.colors[p,1+l,3]) + polygon(c(0.5+p-1,1+p-1,1.5+p-1),c(0.5+l,0+l,0.5+l), col=array.imp.colors[p,1+l,4]) + } + } + + par(fig=c(0.875, 1, 0.14, 0.89), new=TRUE) + ColorBar2(brks = my.seq, cols = imp.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_impact_",var.name[var.num],"_startdate.png"), width=750, height=600) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("S4 spatial correlation of impact on ",var.name[var.num], cex=1.5) + + # NAO+ : + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.85, 0.98), new=TRUE) + mtext("NAO+", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.imp.colors[p,1+l,1])}} + + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.42, 0.5), new=TRUE) + mtext("Blocking", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.imp.colors[p,1+l,4])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.85, 0.98), new=TRUE) + mtext("NAO-", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.imp.colors[p,1+l,3])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.42, 0.5), new=TRUE) + mtext("Atlantic Ridge", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.imp.colors[p,1+l,2])}} + + par(fig=c(0.91, 1, 0.1, 0.9), new=TRUE) + ColorBar2(brks = my.seq, cols = imp.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_impact_",var.name[var.num],"_target_month.png"), width=800, height=300) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("S4 spatial correlation of impact on ",var.name[var.num], cex=1.2) + + par(mar=c(0,0,4,0), fig=c(0.07, 0.22, 0.8, 0.98), new=TRUE) + mtext("NAO+", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0, 0.22, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.6, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.imp.colors[i2,1+6-j,1])}} + + par(mar=c(0,0,4,0), fig=c(0.22, 0.44, 0.8, 0.98), new=TRUE) + mtext("NAO-", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.22, 0.44, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.6, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.imp.colors[i2,1+6-j,3])}} + + par(mar=c(0,0,4,0), fig=c(0.44, 0.66, 0.8, 0.98), new=TRUE) + mtext("Blocking", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.44, 0.66, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.6, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.imp.colors[i2,1+6-j,4])}} + + par(mar=c(0,0,4,0), fig=c(0.68, 0.88, 0.8, 0.98), new=TRUE) + mtext("Atlantic Ridge", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.66, 0.88, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.6, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.imp.colors[i2,1+6-j,2])}} + + par(fig=c(0.91, 1, 0.1, 0.8), new=TRUE) + ColorBar2(brks = my.seq, cols = imp.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + + + + + + + # Simulated Transition probability from NAO+ to X summary: + png(filename=paste0(work.dir,"/",forecast.name,"_summary_transition_prob_NAO+.png"), width=550, height=400) + + # create an array similar to array.cor but with colors instead of corr.values: + trans.cols <- rev(c('#b2182b','#d6604d','#f4a582','#fddbc7','#d1e5f0','#92c5de','#4393c3','#2166ac')) + my.seq <- c(0,10,20,30,40,50,60,70,100) + + array.trans.sim1.colors <- array(trans.cols[8],c(12,7,4)) + array.trans.sim1.colors[array.trans.sim1 < my.seq[8]] <- trans.cols[7] + array.trans.sim1.colors[array.trans.sim1 < my.seq[7]] <- trans.cols[6] + array.trans.sim1.colors[array.trans.sim1 < my.seq[6]] <- trans.cols[5] + array.trans.sim1.colors[array.trans.sim1 < my.seq[5]] <- trans.cols[4] + array.trans.sim1.colors[array.trans.sim1 < my.seq[4]] <- trans.cols[3] + array.trans.sim1.colors[array.trans.sim1 < my.seq[3]] <- trans.cols[2] + array.trans.sim1.colors[array.trans.sim1 < my.seq[2]] <- trans.cols[1] + array.trans.sim1.colors[is.na(array.trans.sim1)] <- "white" + + par(mar=c(5,4,4,4.5)) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Forecast time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + title("% ECMWF-S4 transition from NAO+ regime", cex=1.5) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + + for(p in 1:12){ + for(l in 0:6){ + polygon(c(0.5+p-1, 0.5+p-1, 1+p-1), c(-0.5+l, 0.5+l, 0+l), col=array.trans.sim1.colors[p,1+l,1]) # first triangle + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(-0.5+l, 0+l, -0.5+l), col=array.trans.sim1.colors[p,1+l,2]) + polygon(c(1+p-1, 1.5+p-1, 1.5+p-1), c(l, 0.5+l, -0.5+l), col=array.trans.sim1.colors[p,1+l,3]) + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(0.5+l, 0+l, 0.5+l), col=array.trans.sim1.colors[p,1+l,4]) + } + } + + par(fig=c(0.875, 1, 0.14, 0.89), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_transition_prob_NAO+_startdate.png"), width=750, height=600) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("% Transition from NAO+ regime", cex=1.5) + mtext("ECMWF-S4 / NAO+ Transition Probability \nJanuary to December / 1994-2013", cex=1.5) + + # NAO+ : + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.85, 0.98), new=TRUE) + mtext("NAO+", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.sim1.colors[p,1+l,1])}} + + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.42, 0.5), new=TRUE) + mtext("Blocking", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.sim1.colors[p,1+l,4])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.85, 0.98), new=TRUE) + mtext("NAO-", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.sim1.colors[p,1+l,3])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.42, 0.5), new=TRUE) + mtext("Atlantic Ridge", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.sim1.colors[p,1+l,2])}} + + par(fig=c(0.91, 1, 0.1, 0.9), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_transition_prob_NAO+_target_month.png"), width=750, height=350) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 0.95), new=TRUE) + mtext("% Transition from NAO+ regime", cex=1.2) + + par(mar=c(0,0,4,0), fig=c(0.10, 0.28, 0.8, 0.95), new=TRUE) + mtext("NAO-", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0, 0.28, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.8, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.sim1.colors[i2,1+6-j,3])}} + + par(mar=c(0,0,4,0), fig=c(0.30, 0.58, 0.8, 0.95), new=TRUE) + mtext("Blocking", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.30, 0.58, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.8, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.sim1.colors[i2,1+6-j,4])}} + + par(mar=c(0,0,4,0), fig=c(0.60, 0.88, 0.8, 0.95), new=TRUE) + mtext("Atlantic Ridge", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.60, 0.88, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.8, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.sim1.colors[i2,1+6-j,2])}} + + par(fig=c(0.91, 1, 0.1, 0.8), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + par(fig=c(0.91, 1, 0.88, 0.93), new=TRUE) + mtext(side = 1, text = "%", line = 2, cex=1) + + dev.off() + + + + + + + # Observed Transition probability from NAO+ to X summary: + png(filename=paste0(work.dir,"/",forecast.name,"_summary_transition_prob_NAO+_obs.png"), width=550, height=400) + + # create an array similar to array.cor but with colors instead of corr.values: + trans.cols <- rev(c('#b2182b','#d6604d','#f4a582','#fddbc7','#d1e5f0','#92c5de','#4393c3','#2166ac')) + my.seq <- c(0,10,20,30,40,50,60,70,100) + + array.trans.obs1.colors <- array(trans.cols[8],c(12,7,4)) + array.trans.obs1.colors[array.trans.obs1 < my.seq[8]] <- trans.cols[7] + array.trans.obs1.colors[array.trans.obs1 < my.seq[7]] <- trans.cols[6] + array.trans.obs1.colors[array.trans.obs1 < my.seq[6]] <- trans.cols[5] + array.trans.obs1.colors[array.trans.obs1 < my.seq[5]] <- trans.cols[4] + array.trans.obs1.colors[array.trans.obs1 < my.seq[4]] <- trans.cols[3] + array.trans.obs1.colors[array.trans.obs1 < my.seq[3]] <- trans.cols[2] + array.trans.obs1.colors[array.trans.obs1 < my.seq[2]] <- trans.cols[1] + array.trans.obs1.colors[is.na(array.trans.obs1)] <- "white" + + par(mar=c(5,4,4,4.5)) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Forecast time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + title("% Observed transition from NAO+ regime", cex=1.5) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + + for(p in 1:12){ + for(l in 0:6){ + polygon(c(0.5+p-1, 0.5+p-1, 1+p-1), c(-0.5+l, 0.5+l, 0+l), col=array.trans.obs1.colors[p,1+l,1]) # first triangle + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(-0.5+l, 0+l, -0.5+l), col=array.trans.obs1.colors[p,1+l,2]) + polygon(c(1+p-1, 1.5+p-1, 1.5+p-1), c(l, 0.5+l, -0.5+l), col=array.trans.obs1.colors[p,1+l,3]) + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(0.5+l, 0+l, 0.5+l), col=array.trans.obs1.colors[p,1+l,4]) + } + } + + par(fig=c(0.875, 1, 0.14, 0.89), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_transition_prob_NAO+_obs_startdate.png"), width=750, height=600) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("% Observed transition from NAO+ regime", cex=1.5) + + # NAO+ : + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.85, 0.98), new=TRUE) + mtext("NAO+", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.obs1.colors[p,1+l,1])}} + + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.42, 0.5), new=TRUE) + mtext("Blocking", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.obs1.colors[p,1+l,4])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.85, 0.98), new=TRUE) + mtext("NAO-", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.obs1.colors[p,1+l,3])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.42, 0.5), new=TRUE) + mtext("Atlantic Ridge", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.obs1.colors[p,1+l,2])}} + + par(fig=c(0.91, 1, 0.1, 0.9), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_transition_prob_NAO+_obs_target_month.png"), width=750, height=350) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 0.95), new=TRUE) + mtext("% Observed transition from NAO+ regime", cex=1.2) + + par(mar=c(0,0,4,0), fig=c(0.10, 0.28, 0.8, 0.95), new=TRUE) + mtext("NAO-", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0, 0.28, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.8, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.obs1.colors[i2,1+6-j,3])}} + + par(mar=c(0,0,4,0), fig=c(0.30, 0.58, 0.8, 0.95), new=TRUE) + mtext("Blocking", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.30, 0.58, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.8, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.obs1.colors[i2,1+6-j,4])}} + + par(mar=c(0,0,4,0), fig=c(0.60, 0.88, 0.8, 0.95), new=TRUE) + mtext("Atlantic Ridge", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.60, 0.88, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.8, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.obs1.colors[i2,1+6-j,2])}} + + par(fig=c(0.91, 1, 0.1, 0.8), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + par(fig=c(0.91, 1, 0.88, 0.93), new=TRUE) + mtext(side = 1, text = "%", line = 2, cex=1) + + dev.off() + + + + + + + + + + + + # Simulated transition probability from NAO- to X summary: + png(filename=paste0(work.dir,"/",forecast.name,"_summary_transition_prob_NAO-.png"), width=550, height=400) + + # create an array similar to array.cor but with colors instead of corr.values: + trans.cols <- rev(c('#b2182b','#d6604d','#f4a582','#fddbc7','#d1e5f0','#92c5de','#4393c3','#2166ac')) + my.seq <- c(0,10,20,30,40,50,60,70,100) + + array.trans.sim2.colors <- array(trans.cols[8],c(12,7,4)) + array.trans.sim2.colors[array.trans.sim2 < my.seq[8]] <- trans.cols[7] + array.trans.sim2.colors[array.trans.sim2 < my.seq[7]] <- trans.cols[6] + array.trans.sim2.colors[array.trans.sim2 < my.seq[6]] <- trans.cols[5] + array.trans.sim2.colors[array.trans.sim2 < my.seq[5]] <- trans.cols[4] + array.trans.sim2.colors[array.trans.sim2 < my.seq[4]] <- trans.cols[3] + array.trans.sim2.colors[array.trans.sim2 < my.seq[3]] <- trans.cols[2] + array.trans.sim2.colors[array.trans.sim2 < my.seq[2]] <- trans.cols[1] + array.trans.sim2.colors[is.na(array.trans.sim2)] <- "white" + + par(mar=c(5,4,4,4.5)) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Forecast time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + title("% ECMWF-S4 transition from NAO- regime ", cex=1.5) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + + for(p in 1:12){ + for(l in 0:6){ + polygon(c(0.5+p-1, 0.5+p-1, 1+p-1), c(-0.5+l, 0.5+l, 0+l), col=array.trans.sim2.colors[p,1+l,1]) # first triangle + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(-0.5+l, 0+l, -0.5+l), col=array.trans.sim2.colors[p,1+l,2]) + polygon(c(1+p-1, 1.5+p-1, 1.5+p-1), c(l, 0.5+l, -0.5+l), col=array.trans.sim2.colors[p,1+l,3]) + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(0.5+l, 0+l, 0.5+l), col=array.trans.sim2.colors[p,1+l,4]) + } + } + + par(fig=c(0.875, 1, 0.14, 0.89), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_transition_prob_NAO-_startdate.png"), width=750, height=600) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("% ECMWF-S4 transition from NAO- regime", cex=1.5) + + # NAO+ : + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.85, 0.98), new=TRUE) + mtext("NAO+", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.sim2.colors[p,1+l,1])}} + + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.42, 0.5), new=TRUE) + mtext("Blocking", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.sim2.colors[p,1+l,4])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.85, 0.98), new=TRUE) + mtext("NAO-", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.sim2.colors[p,1+l,3])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.42, 0.5), new=TRUE) + mtext("Atlantic Ridge", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.sim2.colors[p,1+l,2])}} + + par(fig=c(0.91, 1, 0.1, 0.9), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_transition_prob_NAO-_target_month.png"), width=750, height=350) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 0.95), new=TRUE) + mtext("% ECMWF-S4 transition from NAO- regime", cex=1.2) + + par(mar=c(0,0,4,0), fig=c(0.1, 0.28, 0.8, 0.95), new=TRUE) + mtext("NAO+", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0, 0.28, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.8, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.sim2.colors[i2,1+6-j,1])}} + + par(mar=c(0,0,4,0), fig=c(0.30, 0.58, 0.8, 0.95), new=TRUE) + mtext("Blocking", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.30, 0.58, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.8, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.sim2.colors[i2,1+6-j,4])}} + + par(mar=c(0,0,4,0), fig=c(0.60, 0.88, 0.8, 0.95), new=TRUE) + mtext("Atlantic Ridge", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.60, 0.88, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.8, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.sim2.colors[i2,1+6-j,2])}} + + par(fig=c(0.91, 1, 0.1, 0.8), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + par(fig=c(0.91, 1, 0.88, 0.93), new=TRUE) + mtext(side = 1, text = "%", line = 2, cex=1) + + dev.off() + + + + + + + # Observed transition probability from NAO- to X summary: + png(filename=paste0(work.dir,"/",forecast.name,"_summary_transition_prob_NAO-_obs.png"), width=550, height=400) + + # create an array similar to array.cor but with colors instead of corr.values: + trans.cols <- rev(c('#b2182b','#d6604d','#f4a582','#fddbc7','#d1e5f0','#92c5de','#4393c3','#2166ac')) + my.seq <- c(0,10,20,30,40,50,60,70,100) + + array.trans.obs2.colors <- array(trans.cols[8],c(12,7,4)) + array.trans.obs2.colors[array.trans.obs2 < my.seq[8]] <- trans.cols[7] + array.trans.obs2.colors[array.trans.obs2 < my.seq[7]] <- trans.cols[6] + array.trans.obs2.colors[array.trans.obs2 < my.seq[6]] <- trans.cols[5] + array.trans.obs2.colors[array.trans.obs2 < my.seq[5]] <- trans.cols[4] + array.trans.obs2.colors[array.trans.obs2 < my.seq[4]] <- trans.cols[3] + array.trans.obs2.colors[array.trans.obs2 < my.seq[3]] <- trans.cols[2] + array.trans.obs2.colors[array.trans.obs2 < my.seq[2]] <- trans.cols[1] + array.trans.obs2.colors[is.na(array.trans.obs2)] <- "white" + + par(mar=c(5,4,4,4.5)) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Forecast time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + title("% Observed transition from NAO- regime ", cex=1.5) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + + for(p in 1:12){ + for(l in 0:6){ + polygon(c(0.5+p-1, 0.5+p-1, 1+p-1), c(-0.5+l, 0.5+l, 0+l), col=array.trans.obs2.colors[p,1+l,1]) # first triangle + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(-0.5+l, 0+l, -0.5+l), col=array.trans.obs2.colors[p,1+l,2]) + polygon(c(1+p-1, 1.5+p-1, 1.5+p-1), c(l, 0.5+l, -0.5+l), col=array.trans.obs2.colors[p,1+l,3]) + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(0.5+l, 0+l, 0.5+l), col=array.trans.obs2.colors[p,1+l,4]) + } + } + + par(fig=c(0.875, 1, 0.14, 0.89), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_transition_prob_NAO-_obs_startdate.png"), width=750, height=600) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("% Observed transition from NAO- regime", cex=1.5) + + # NAO+ : + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.85, 0.98), new=TRUE) + mtext("NAO+", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.obs2.colors[p,1+l,1])}} + + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.42, 0.5), new=TRUE) + mtext("Blocking", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.obs2.colors[p,1+l,4])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.85, 0.98), new=TRUE) + mtext("NAO-", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.obs2.colors[p,1+l,3])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.42, 0.5), new=TRUE) + mtext("Atlantic Ridge", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.obs2.colors[p,1+l,2])}} + + par(fig=c(0.91, 1, 0.1, 0.9), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_transition_prob_NAO-_obs_target_month.png"), width=750, height=350) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 0.95), new=TRUE) + mtext("% Observed transition from NAO- regime", cex=1.2) + + par(mar=c(0,0,4,0), fig=c(0.07, 0.28, 0.8, 0.95), new=TRUE) + mtext("NAO+", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0, 0.28, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.8, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.obs2.colors[i2,1+6-j,1])}} + + par(mar=c(0,0,4,0), fig=c(0.30, 0.58, 0.8, 0.95), new=TRUE) + mtext("Blocking", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.30, 0.58, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.8, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.obs2.colors[i2,1+6-j,4])}} + + par(mar=c(0,0,4,0), fig=c(0.58, 0.88, 0.8, 0.95), new=TRUE) + mtext("Atlantic Ridge", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.58, 0.88, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.8, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.obs2.colors[i2,1+6-j,2])}} + + par(fig=c(0.91, 1, 0.1, 0.8), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + par(fig=c(0.91, 1, 0.88, 0.93), new=TRUE) + mtext(side = 1, text = "%", line = 2, cex=1) + + dev.off() + + + + + + + + + + + # Simulated transition probability from blocking to X summary: + png(filename=paste0(work.dir,"/",forecast.name,"_summary_transition_prob_blocking.png"), width=550, height=400) + + # create an array similar to array.cor but with colors instead of corr.values: + trans.cols <- rev(c('#b2182b','#d6604d','#f4a582','#fddbc7','#d1e5f0','#92c5de','#4393c3','#2166ac')) + my.seq <- c(0,10,20,30,40,50,60,70,100) + + array.trans.sim3.colors <- array(trans.cols[8],c(12,7,4)) + array.trans.sim3.colors[array.trans.sim3 < my.seq[8]] <- trans.cols[7] + array.trans.sim3.colors[array.trans.sim3 < my.seq[7]] <- trans.cols[6] + array.trans.sim3.colors[array.trans.sim3 < my.seq[6]] <- trans.cols[5] + array.trans.sim3.colors[array.trans.sim3 < my.seq[5]] <- trans.cols[4] + array.trans.sim3.colors[array.trans.sim3 < my.seq[4]] <- trans.cols[3] + array.trans.sim3.colors[array.trans.sim3 < my.seq[3]] <- trans.cols[2] + array.trans.sim3.colors[array.trans.sim3 < my.seq[2]] <- trans.cols[1] + array.trans.sim3.colors[is.na(array.trans.sim3)] <- "white" + + par(mar=c(5,4,4,4.5)) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Forecast time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + title("% ECMWF-S4 transition from blocking regime", cex=1.5) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + + for(p in 1:12){ + for(l in 0:6){ + polygon(c(0.5+p-1, 0.5+p-1, 1+p-1), c(-0.5+l, 0.5+l, 0+l), col=array.trans.sim3.colors[p,1+l,1]) # first triangle + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(-0.5+l, 0+l, -0.5+l), col=array.trans.sim3.colors[p,1+l,2]) + polygon(c(1+p-1, 1.5+p-1, 1.5+p-1), c(l, 0.5+l, -0.5+l), col=array.trans.sim3.colors[p,1+l,3]) + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(0.5+l, 0+l, 0.5+l), col=array.trans.sim3.colors[p,1+l,4]) + } + } + + par(fig=c(0.875, 1, 0.14, 0.89), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_transition_prob_blocking_startdate.png"), width=750, height=600) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("% ECMWF-S4 transition from blocking regime", cex=1.5) + + # NAO+ : + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.85, 0.98), new=TRUE) + mtext("NAO+", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.sim3.colors[p,1+l,1])}} + + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.42, 0.5), new=TRUE) + mtext("Blocking", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.sim3.colors[p,1+l,4])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.85, 0.98), new=TRUE) + mtext("NAO-", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.sim3.colors[p,1+l,3])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.42, 0.5), new=TRUE) + mtext("Atlantic Ridge", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.sim3.colors[p,1+l,2])}} + + par(fig=c(0.91, 1, 0.1, 0.9), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_transition_prob_blocking_target_month.png"), width=750, height=350) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 0.95), new=TRUE) + mtext("% ECMWF-S4 transition from blocking regime", cex=1.2) + + par(mar=c(0,0,4,0), fig=c(0.10, 0.28, 0.8, 0.95), new=TRUE) + mtext("NAO+", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0, 0.28, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.8, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.sim3.colors[i2,1+6-j,1])}} + + par(mar=c(0,0,4,0), fig=c(0.30, 0.58, 0.8, 0.95), new=TRUE) + mtext("NAO-", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.30, 0.58, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.8, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.sim3.colors[i2,1+6-j,3])}} + + par(mar=c(0,0,4,0), fig=c(0.60, 0.88, 0.8, 0.95), new=TRUE) + mtext("Atlantic Ridge", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.60, 0.88, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.8, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.sim3.colors[i2,1+6-j,2])}} + + par(fig=c(0.91, 1, 0.1, 0.8), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + par(fig=c(0.91, 1, 0.88, 0.93), new=TRUE) + mtext(side = 1, text = "%", line = 2, cex=1) + + dev.off() + + + + + + + + + # Observed transition probability from blocking to X summary: + png(filename=paste0(work.dir,"/",forecast.name,"_summary_transition_prob_blocking_obs.png"), width=550, height=400) + + # create an array similar to array.cor but with colors instead of corr.values: + trans.cols <- rev(c('#b2182b','#d6604d','#f4a582','#fddbc7','#d1e5f0','#92c5de','#4393c3','#2166ac')) + my.seq <- c(0,10,20,30,40,50,60,70,100) + + array.trans.obs3.colors <- array(trans.cols[8],c(12,7,4)) + array.trans.obs3.colors[array.trans.obs3 < my.seq[8]] <- trans.cols[7] + array.trans.obs3.colors[array.trans.obs3 < my.seq[7]] <- trans.cols[6] + array.trans.obs3.colors[array.trans.obs3 < my.seq[6]] <- trans.cols[5] + array.trans.obs3.colors[array.trans.obs3 < my.seq[5]] <- trans.cols[4] + array.trans.obs3.colors[array.trans.obs3 < my.seq[4]] <- trans.cols[3] + array.trans.obs3.colors[array.trans.obs3 < my.seq[3]] <- trans.cols[2] + array.trans.obs3.colors[array.trans.obs3 < my.seq[2]] <- trans.cols[1] + array.trans.obs3.colors[is.na(array.trans.obs3)] <- "white" + + par(mar=c(5,4,4,4.5)) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Forecast time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + title("% Observed transition from blocking regime", cex=1.5) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + + for(p in 1:12){ + for(l in 0:6){ + polygon(c(0.5+p-1, 0.5+p-1, 1+p-1), c(-0.5+l, 0.5+l, 0+l), col=array.trans.obs3.colors[p,1+l,1]) # first triangle + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(-0.5+l, 0+l, -0.5+l), col=array.trans.obs3.colors[p,1+l,2]) + polygon(c(1+p-1, 1.5+p-1, 1.5+p-1), c(l, 0.5+l, -0.5+l), col=array.trans.obs3.colors[p,1+l,3]) + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(0.5+l, 0+l, 0.5+l), col=array.trans.obs3.colors[p,1+l,4]) + } + } + + par(fig=c(0.875, 1, 0.14, 0.89), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_transition_prob_blocking_obs_startdate.png"), width=750, height=600) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("% Observed transition from blocking regime", cex=1.5) + + # NAO+ : + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.85, 0.98), new=TRUE) + mtext("NAO+", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.obs3.colors[p,1+l,1])}} + + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.42, 0.5), new=TRUE) + mtext("Blocking", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.obs3.colors[p,1+l,4])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.85, 0.98), new=TRUE) + mtext("NAO-", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.obs3.colors[p,1+l,3])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.42, 0.5), new=TRUE) + mtext("Atlantic Ridge", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.obs3.colors[p,1+l,2])}} + + par(fig=c(0.91, 1, 0.1, 0.9), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_transition_prob_blocking_obs_target_month.png"), width=750, height=350) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 0.95), new=TRUE) + mtext("% Observed transition from blocking regime", cex=1.2) + + par(mar=c(0,0,4,0), fig=c(0.10, 0.28, 0.8, 0.95), new=TRUE) + mtext("NAO+", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0, 0.28, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.8, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.obs3.colors[i2,1+6-j,1])}} + + par(mar=c(0,0,4,0), fig=c(0.30, 0.58, 0.8, 0.95), new=TRUE) + mtext("NAO-", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.30, 0.58, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.8, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.obs3.colors[i2,1+6-j,3])}} + + par(mar=c(0,0,4,0), fig=c(0.60, 0.88, 0.8, 0.95), new=TRUE) + mtext("Atlantic Ridge", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.60, 0.88, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.8, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.obs3.colors[i2,1+6-j,2])}} + + par(fig=c(0.91, 1, 0.1, 0.8), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + par(fig=c(0.91, 1, 0.88, 0.93), new=TRUE) + mtext(side = 1, text = "%", line = 2, cex=1) + + dev.off() + + + + + + + + + + + + + + + + + + # Simulated transition probability from Atlantic ridge to X summary: + png(filename=paste0(work.dir,"/",forecast.name,"_summary_transition_prob_Atlantic_ridge.png"), width=550, height=400) + + # create an array similar to array.cor but with colors instead of corr.values: + trans.cols <- rev(c('#b2182b','#d6604d','#f4a582','#fddbc7','#d1e5f0','#92c5de','#4393c3','#2166ac')) + my.seq <- c(0,10,20,30,40,50,60,70,100) + + array.trans.sim4.colors <- array(trans.cols[8],c(12,7,4)) + array.trans.sim4.colors[array.trans.sim4 < my.seq[8]] <- trans.cols[7] + array.trans.sim4.colors[array.trans.sim4 < my.seq[7]] <- trans.cols[6] + array.trans.sim4.colors[array.trans.sim4 < my.seq[6]] <- trans.cols[5] + array.trans.sim4.colors[array.trans.sim4 < my.seq[5]] <- trans.cols[4] + array.trans.sim4.colors[array.trans.sim4 < my.seq[4]] <- trans.cols[3] + array.trans.sim4.colors[array.trans.sim4 < my.seq[3]] <- trans.cols[2] + array.trans.sim4.colors[array.trans.sim4 < my.seq[2]] <- trans.cols[1] + array.trans.sim4.colors[is.na(array.trans.sim4)] <- "white" + + par(mar=c(5,4,4,4.5)) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Forecast time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + title("% ECMWF-S4 transition from Atlantic ridge regime ", cex=1.5) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + + for(p in 1:12){ + for(l in 0:6){ + polygon(c(0.5+p-1, 0.5+p-1, 1+p-1), c(-0.5+l, 0.5+l, 0+l), col=array.trans.sim4.colors[p,1+l,1]) # first triangle + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(-0.5+l, 0+l, -0.5+l), col=array.trans.sim4.colors[p,1+l,2]) + polygon(c(1+p-1, 1.5+p-1, 1.5+p-1), c(l, 0.5+l, -0.5+l), col=array.trans.sim4.colors[p,1+l,3]) + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(0.5+l, 0+l, 0.5+l), col=array.trans.sim4.colors[p,1+l,4]) + } + } + + par(fig=c(0.875, 1, 0.14, 0.89), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_transition_prob_Atlantic_ridge_startdate.png"), width=750, height=600) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("% ECMWF-S4 transition from Atlantic Ridge regime", cex=1.5) + + # NAO+ : + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.85, 0.98), new=TRUE) + mtext("NAO+", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.sim4.colors[p,1+l,1])}} + + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.42, 0.5), new=TRUE) + mtext("Blocking", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.sim4.colors[p,1+l,4])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.85, 0.98), new=TRUE) + mtext("NAO-", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.sim4.colors[p,1+l,3])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.42, 0.5), new=TRUE) + mtext("Atlantic Ridge", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.sim4.colors[p,1+l,2])}} + + par(fig=c(0.91, 1, 0.1, 0.9), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_transition_prob_Atlantic_ridge_target_month.png"), width=750, height=350) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 0.95), new=TRUE) + mtext("% ECMWF-S4 transition from Atlantic Ridge regime", cex=1.2) + + par(mar=c(0,0,4,0), fig=c(0.1, 0.28, 0.8, 0.95), new=TRUE) + mtext("NAO+", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0, 0.28, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.8, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.sim4.colors[i2,1+6-j,1])}} + + par(mar=c(0,0,4,0), fig=c(0.30, 0.58, 0.8, 0.95), new=TRUE) + mtext("NAO-", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.30, 0.58, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.8, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.sim4.colors[i2,1+6-j,3])}} + + par(mar=c(0,0,4,0), fig=c(0.60, 0.88, 0.8, 0.95), new=TRUE) + mtext("Blocking", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.60, 0.88, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.8, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.sim4.colors[i2,1+6-j,4])}} + + par(fig=c(0.91, 1, 0.1, 0.8), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + par(fig=c(0.91, 1, 0.88, 0.93), new=TRUE) + mtext(side = 1, text = "%", line = 2, cex=1) + + dev.off() + + + + + + + + + + # Observed transition probability from Atlantic ridge to X summary: + png(filename=paste0(work.dir,"/",forecast.name,"_summary_transition_prob_Atlantic_ridge_obs.png"), width=550, height=400) + + # create an array obsilar to array.cor but with colors instead of corr.values: + trans.cols <- rev(c('#b2182b','#d6604d','#f4a582','#fddbc7','#d1e5f0','#92c5de','#4393c3','#2166ac')) + my.seq <- c(0,10,20,30,40,50,60,70,100) + + array.trans.obs4.colors <- array(trans.cols[8],c(12,7,4)) + array.trans.obs4.colors[array.trans.obs4 < my.seq[8]] <- trans.cols[7] + array.trans.obs4.colors[array.trans.obs4 < my.seq[7]] <- trans.cols[6] + array.trans.obs4.colors[array.trans.obs4 < my.seq[6]] <- trans.cols[5] + array.trans.obs4.colors[array.trans.obs4 < my.seq[5]] <- trans.cols[4] + array.trans.obs4.colors[array.trans.obs4 < my.seq[4]] <- trans.cols[3] + array.trans.obs4.colors[array.trans.obs4 < my.seq[3]] <- trans.cols[2] + array.trans.obs4.colors[array.trans.obs4 < my.seq[2]] <- trans.cols[1] + array.trans.obs4.colors[is.na(array.trans.obs4)] <- "white" + + par(mar=c(5,4,4,4.5)) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Forecast time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + title("% Observed transition from Atlantic ridge regime ", cex=1.5) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + + for(p in 1:12){ + for(l in 0:6){ + polygon(c(0.5+p-1, 0.5+p-1, 1+p-1), c(-0.5+l, 0.5+l, 0+l), col=array.trans.obs4.colors[p,1+l,1]) # first triangle + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(-0.5+l, 0+l, -0.5+l), col=array.trans.obs4.colors[p,1+l,2]) + polygon(c(1+p-1, 1.5+p-1, 1.5+p-1), c(l, 0.5+l, -0.5+l), col=array.trans.obs4.colors[p,1+l,3]) + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(0.5+l, 0+l, 0.5+l), col=array.trans.obs4.colors[p,1+l,4]) + } + } + + par(fig=c(0.875, 1, 0.14, 0.89), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_transition_prob_Atlantic_ridge_obs_startdate.png"), width=750, height=600) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("% Observed transition from Atlantic Ridge regime", cex=1.5) + + # NAO+ : + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.85, 0.98), new=TRUE) + mtext("NAO+", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.obs4.colors[p,1+l,1])}} + + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.42, 0.5), new=TRUE) + mtext("Blocking", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.obs4.colors[p,1+l,4])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.85, 0.98), new=TRUE) + mtext("NAO-", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.obs4.colors[p,1+l,3])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.42, 0.5), new=TRUE) + mtext("Atlantic Ridge", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.obs4.colors[p,1+l,2])}} + + par(fig=c(0.91, 1, 0.1, 0.9), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_transition_prob_Atlantic_ridge_obs_target_month.png"), width=750, height=350) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 0.95), new=TRUE) + mtext("% Observed transition from Atlantic Ridge regime", cex=1.2) + + par(mar=c(0,0,4,0), fig=c(0.1, 0.28, 0.8, 0.95), new=TRUE) + mtext("NAO+", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0, 0.28, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.8, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.obs4.colors[i2,1+6-j,1])}} + + par(mar=c(0,0,4,0), fig=c(0.30, 0.58, 0.8, 0.95), new=TRUE) + mtext("NAO-", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.30, 0.58, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.8, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.obs4.colors[i2,1+6-j,3])}} + + par(mar=c(0,0,4,0), fig=c(0.60, 0.88, 0.8, 0.95), new=TRUE) + mtext("Blocking", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.60, 0.88, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.8, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.obs4.colors[i2,1+6-j,4])}} + + par(fig=c(0.91, 1, 0.1, 0.8), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + par(fig=c(0.91, 1, 0.88, 0.93), new=TRUE) + mtext(side = 1, text = "%", line = 2, cex=1) + + dev.off() + + + + + + + + + + + + + + + # Bias of the transition probability from NAO+ to X summary: + png(filename=paste0(work.dir,"/",forecast.name,"_summary_bias_transition_prob_NAO+.png"), width=550, height=400) + + # create an array similar to array.cor but with colors instead of corr.values: + trans.cols <- rev(c('#b2182b','#d6604d','#f4a582','#fddbc7','#d1e5f0','#92c5de','#4393c3','#2166ac')) + my.seq <- c(-20,-10,-5,-2,0,2,5,10,20) + + array.trans.diff1.colors <- array(trans.cols[8],c(12,7,4)) + array.trans.diff1.colors[array.trans.diff1 < my.seq[8]] <- trans.cols[7] + array.trans.diff1.colors[array.trans.diff1 < my.seq[7]] <- trans.cols[6] + array.trans.diff1.colors[array.trans.diff1 < my.seq[6]] <- trans.cols[5] + array.trans.diff1.colors[array.trans.diff1 < my.seq[5]] <- trans.cols[4] + array.trans.diff1.colors[array.trans.diff1 < my.seq[4]] <- trans.cols[3] + array.trans.diff1.colors[array.trans.diff1 < my.seq[3]] <- trans.cols[2] + array.trans.diff1.colors[array.trans.diff1 < my.seq[2]] <- trans.cols[1] + array.trans.diff1.colors[is.na(array.trans.diff1)] <- "white" + + par(mar=c(5,4,4,4.5)) + plot(1,1, type="n", xaxt="n", yaxt="n", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + title("% Bias transition from NAO+ regime", cex=1.5) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + + for(p in 1:12){ + for(l in 0:6){ + polygon(c(0.5+p-1, 0.5+p-1, 1+p-1), c(-0.5+l, 0.5+l, 0+l), col=array.trans.diff1.colors[p,1+l,1]) # first triangle + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(-0.5+l, 0+l, -0.5+l), col=array.trans.diff1.colors[p,1+l,2]) + polygon(c(1+p-1, 1.5+p-1, 1.5+p-1), c(l, 0.5+l, -0.5+l), col=array.trans.diff1.colors[p,1+l,3]) + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(0.5+l, 0+l, 0.5+l), col=array.trans.diff1.colors[p,1+l,4]) + } + } + + par(fig=c(0.875, 1, 0.14, 0.89), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_bias_transition_prob_NAO+_startdate.png"), width=750, height=600) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("% Bias transition from NAO+ regime", cex=1.5) + + # NAO+ : + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.85, 0.98), new=TRUE) + mtext("NAO+", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.diff1.colors[p,1+l,1])}} + + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.42, 0.5), new=TRUE) + mtext("Blocking", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.diff1.colors[p,1+l,4])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.85, 0.98), new=TRUE) + mtext("NAO-", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.diff1.colors[p,1+l,3])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.42, 0.5), new=TRUE) + mtext("Atlantic Ridge", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.diff1.colors[p,1+l,2])}} + + par(fig=c(0.91, 1, 0.1, 0.9), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_bias_transition_prob_NAO+_target_month.png"), width=750, height=350) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 0.95), new=TRUE) + mtext("% Bias transition from NAO+ regime", cex=1.2) + + par(mar=c(0,0,4,0), fig=c(0.07, 0.28, 0.8, 0.95), new=TRUE) + mtext("NAO-", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0, 0.28, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.8, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.diff1.colors[i2,1+6-j,3])}} + + par(mar=c(0,0,4,0), fig=c(0.30, 0.58, 0.8, 0.95), new=TRUE) + mtext("Blocking", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.30, 0.58, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.8, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.diff1.colors[i2,1+6-j,4])}} + + par(mar=c(0,0,4,0), fig=c(0.60, 0.88, 0.8, 0.95), new=TRUE) + mtext("Atlantic Ridge", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.60, 0.88, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.8, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.diff1.colors[i2,1+6-j,2])}} + + par(fig=c(0.91, 1, 0.1, 0.8), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + par(fig=c(0.91, 1, 0.88, 0.93), new=TRUE) + mtext(side = 1, text = "%", line = 2, cex=1) + + dev.off() + + + + + + + + + + + + + + + + # Bias of the Transition probability from NAO- to X summary: + png(filename=paste0(work.dir,"/",forecast.name,"_summary_bias_transition_prob_NAO-.png"), width=550, height=400) + + # create an array similar to array.cor but with colors instead of corr.values: + trans.cols <- rev(c('#b2182b','#d6604d','#f4a582','#fddbc7','#d1e5f0','#92c5de','#4393c3','#2166ac')) + my.seq <- c(-20,-10,-5,-2,0,2,5,10,20) + + array.trans.diff2.colors <- array(trans.cols[8],c(12,7,4)) + array.trans.diff2.colors[array.trans.diff2 < my.seq[8]] <- trans.cols[7] + array.trans.diff2.colors[array.trans.diff2 < my.seq[7]] <- trans.cols[6] + array.trans.diff2.colors[array.trans.diff2 < my.seq[6]] <- trans.cols[5] + array.trans.diff2.colors[array.trans.diff2 < my.seq[5]] <- trans.cols[4] + array.trans.diff2.colors[array.trans.diff2 < my.seq[4]] <- trans.cols[3] + array.trans.diff2.colors[array.trans.diff2 < my.seq[3]] <- trans.cols[2] + array.trans.diff2.colors[array.trans.diff2 < my.seq[2]] <- trans.cols[1] + array.trans.diff2.colors[is.na(array.trans.diff2)] <- "white" + + par(mar=c(5,4,4,4.5)) + plot(1,1, type="n", xaxt="n", yaxt="n", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + title("% Bias transition from NAO- regime", cex=1.5) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + + for(p in 1:12){ + for(l in 0:6){ + polygon(c(0.5+p-1, 0.5+p-1, 1+p-1), c(-0.5+l, 0.5+l, 0+l), col=array.trans.diff2.colors[p,1+l,1]) # first triangle + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(-0.5+l, 0+l, -0.5+l), col=array.trans.diff2.colors[p,1+l,2]) + polygon(c(1+p-1, 1.5+p-1, 1.5+p-1), c(l, 0.5+l, -0.5+l), col=array.trans.diff2.colors[p,1+l,3]) + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(0.5+l, 0+l, 0.5+l), col=array.trans.diff2.colors[p,1+l,4]) + } + } + + par(fig=c(0.875, 1, 0.14, 0.89), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_bias_transition_prob_NAO-_startdate.png"), width=750, height=600) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 0.95), new=TRUE) + mtext("% Bias transition from NAO- regime", cex=1.5) + + # NAO+ : + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.85, 0.98), new=TRUE) + mtext("NAO+", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.diff2.colors[p,1+l,1])}} + + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.42, 0.5), new=TRUE) + mtext("Blocking", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.diff2.colors[p,1+l,4])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.85, 0.98), new=TRUE) + mtext("NAO-", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.diff2.colors[p,1+l,3])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.42, 0.5), new=TRUE) + mtext("Atlantic Ridge", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.diff2.colors[p,1+l,2])}} + + par(fig=c(0.91, 1, 0.1, 0.9), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_bias_transition_prob_NAO-_target_month.png"), width=750, height=350) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 0.95), new=TRUE) + mtext("% Bias transition from NAO- regime", cex=1.2) + + par(mar=c(0,0,4,0), fig=c(0.07, 0.28, 0.8, 0.95), new=TRUE) + mtext("NAO+", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0, 0.28, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.8, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.diff2.colors[i2,1+6-j,1])}} + + par(mar=c(0,0,4,0), fig=c(0.30, 0.58, 0.8, 0.95), new=TRUE) + mtext("Blocking", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.30, 0.58, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.8, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.diff2.colors[i2,1+6-j,4])}} + + par(mar=c(0,0,4,0), fig=c(0.60, 0.88, 0.8, 0.95), new=TRUE) + mtext("Atlantic Ridge", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.60, 0.88, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.8, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.diff2.colors[i2,1+6-j,2])}} + + par(fig=c(0.91, 1, 0.1, 0.8), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + par(fig=c(0.91, 1, 0.88, 0.93), new=TRUE) + mtext(side = 1, text = "%", line = 2, cex=1) + + dev.off() + + + + + + + + + + + + # Bias of the Transition probability from blocking to X summary: + png(filename=paste0(work.dir,"/",forecast.name,"_summary_bias_transition_prob_blocking.png"), width=550, height=400) + + # create an array similar to array.cor but with colors instead of corr.values: + trans.cols <- rev(c('#b2182b','#d6604d','#f4a582','#fddbc7','#d1e5f0','#92c5de','#4393c3','#2166ac')) + my.seq <- c(-20,-10,-5,-2,0,2,5,10,20) + + array.trans.diff3.colors <- array(trans.cols[8],c(12,7,4)) + array.trans.diff3.colors[array.trans.diff3 < my.seq[8]] <- trans.cols[7] + array.trans.diff3.colors[array.trans.diff3 < my.seq[7]] <- trans.cols[6] + array.trans.diff3.colors[array.trans.diff3 < my.seq[6]] <- trans.cols[5] + array.trans.diff3.colors[array.trans.diff3 < my.seq[5]] <- trans.cols[4] + array.trans.diff3.colors[array.trans.diff3 < my.seq[4]] <- trans.cols[3] + array.trans.diff3.colors[array.trans.diff3 < my.seq[3]] <- trans.cols[2] + array.trans.diff3.colors[array.trans.diff3 < my.seq[2]] <- trans.cols[1] + array.trans.diff3.colors[is.na(array.trans.diff3)] <- "white" + + par(mar=c(5,4,4,4.5)) + plot(1,1, type="n", xaxt="n", yaxt="n", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + title("% Bias transition from blocking regime", cex=1.5) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + + for(p in 1:12){ + for(l in 0:6){ + polygon(c(0.5+p-1, 0.5+p-1, 1+p-1), c(-0.5+l, 0.5+l, 0+l), col=array.trans.diff3.colors[p,1+l,1]) # first triangle + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(-0.5+l, 0+l, -0.5+l), col=array.trans.diff3.colors[p,1+l,2]) + polygon(c(1+p-1, 1.5+p-1, 1.5+p-1), c(l, 0.5+l, -0.5+l), col=array.trans.diff3.colors[p,1+l,3]) + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(0.5+l, 0+l, 0.5+l), col=array.trans.diff3.colors[p,1+l,4]) + } + } + + par(fig=c(0.875, 1, 0.14, 0.89), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_bias_transition_prob_blocking_startdate.png"), width=750, height=600) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("% Bias transition from blocking regime", cex=1.5) + + # NAO+ : + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.85, 0.98), new=TRUE) + mtext("NAO+", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.diff3.colors[p,1+l,1])}} + + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.42, 0.5), new=TRUE) + mtext("Blocking", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.diff3.colors[p,1+l,4])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.85, 0.98), new=TRUE) + mtext("NAO-", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.diff3.colors[p,1+l,3])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.42, 0.5), new=TRUE) + mtext("Atlantic Ridge", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.diff3.colors[p,1+l,2])}} + + par(fig=c(0.91, 1, 0.1, 0.9), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_bias_transition_prob_blocking_target_month.png"), width=750, height=350) + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 0.95), new=TRUE) + mtext("% Bias transition from Blocking regime", cex=1.2) + + par(mar=c(0,0,4,0), fig=c(0.07, 0.28, 0.8, 0.95), new=TRUE) + mtext("NAO+", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0, 0.28, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.8, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.diff3.colors[i2,1+6-j,1])}} + + par(mar=c(0,0,4,0), fig=c(0.30, 0.58, 0.8, 0.95), new=TRUE) + mtext("NAO-", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.30, 0.58, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.8, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.diff3.colors[i2,1+6-j,3])}} + + par(mar=c(0,0,4,0), fig=c(0.60, 0.88, 0.8, 0.95), new=TRUE) + mtext("Atlantic Ridge", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.60, 0.88, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.8, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.diff3.colors[i2,1+6-j,2])}} + + par(fig=c(0.91, 1, 0.1, 0.8), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + par(fig=c(0.91, 1, 0.88, 0.93), new=TRUE) + mtext(side = 1, text = "%", line = 2, cex=1) + + dev.off() + + + + + + + + + + + # Bias of the Transition probability from Atlantic Ridge to X summary: + png(filename=paste0(work.dir,"/",forecast.name,"_summary_bias_transition_prob_Atlantic_ridge.png"), width=550, height=400) + + # create an array similar to array.cor but with colors instead of corr.values: + trans.cols <- rev(c('#b2182b','#d6604d','#f4a582','#fddbc7','#d1e5f0','#92c5de','#4393c3','#2166ac')) + my.seq <- c(-20,-10,-5,-2,0,2,5,10,20) + + array.trans.diff4.colors <- array(trans.cols[8],c(12,7,4)) + array.trans.diff4.colors[array.trans.diff4 < my.seq[8]] <- trans.cols[7] + array.trans.diff4.colors[array.trans.diff4 < my.seq[7]] <- trans.cols[6] + array.trans.diff4.colors[array.trans.diff4 < my.seq[6]] <- trans.cols[5] + array.trans.diff4.colors[array.trans.diff4 < my.seq[5]] <- trans.cols[4] + array.trans.diff4.colors[array.trans.diff4 < my.seq[4]] <- trans.cols[3] + array.trans.diff4.colors[array.trans.diff4 < my.seq[3]] <- trans.cols[2] + array.trans.diff4.colors[array.trans.diff4 < my.seq[2]] <- trans.cols[1] + array.trans.diff4.colors[is.na(array.trans.diff4)] <- "white" + + par(mar=c(5,4,4,4.5)) + plot(1,1, type="n", xaxt="n", yaxt="n", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + title("% Bias transition from Atlantic Ridge regime", cex=1.5) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + + for(p in 1:12){ + for(l in 0:6){ + polygon(c(0.5+p-1, 0.5+p-1, 1+p-1), c(-0.5+l, 0.5+l, 0+l), col=array.trans.diff4.colors[p,1+l,1]) # first triangle + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(-0.5+l, 0+l, -0.5+l), col=array.trans.diff4.colors[p,1+l,2]) + polygon(c(1+p-1, 1.5+p-1, 1.5+p-1), c(l, 0.5+l, -0.5+l), col=array.trans.diff4.colors[p,1+l,3]) + polygon(c(0.5+p-1, 1+p-1, 1.5+p-1), c(0.5+l, 0+l, 0.5+l), col=array.trans.diff4.colors[p,1+l,4]) + } + } + + par(fig=c(0.875, 1, 0.14, 0.89), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_bias_transition_prob_Atlantic_ridge_startdate.png"), width=750, height=600) + + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 1), new=TRUE) + mtext("% Bias transition from Atlantic_Ridge_regime", cex=1.5) + + # NAO+ : + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.85, 0.98), new=TRUE) + mtext("NAO+", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.diff4.colors[p,1+l,1])}} + + par(mar=c(0,0,4,0), fig=c(0, 0.45, 0.42, 0.5), new=TRUE) + mtext("Blocking", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0, 0.45, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.diff4.colors[p,1+l,4])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.85, 0.98), new=TRUE) + mtext("NAO-", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.6, 0.9), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.diff4.colors[p,1+l,3])}} + + par(mar=c(0,0,4,0), fig=c(0.45, 0.9, 0.42, 0.5), new=TRUE) + mtext("Atlantic Ridge", cex=1.5) + par(mar=c(0,4,1,0), fig=c(0.45, 0.9, 0.12, 0.42), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Lead time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + mtext(side = 2, text = "Lead time", line = 2.5, cex=1.5) + axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + axis(2, at=seq(0,6), las=2, cex.axis=1.5, labels=0:6) + for(p in 1:12){ for(l in 0:6){ polygon(c(0.5+p-1, 0.5+p-1, 1.5+p-1, 1.5+p-1), c(-0.5+l, 0.5+l, 0.5+l, -0.5+l), col=array.trans.diff4.colors[p,1+l,2])}} + + par(fig=c(0.91, 1, 0.1, 0.9), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + + dev.off() + + png(filename=paste0(work.dir,"/",forecast.name,"_summary_bias_transition_prob_Atlantic_ridge_target_month.png"), width=750, height=350) + plot.new() + par(mar=c(0,0,2,0), fig=c(0, 1, 0.9, 0.95), new=TRUE) + mtext("% Bias transition from Atlantic Ridge regime", cex=1.2) + + par(mar=c(0,0,4,0), fig=c(0.07, 0.28, 0.8, 0.95), new=TRUE) + mtext("NAO+", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0, 0.28, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.8, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.diff4.colors[i2,1+6-j,1])}} + + par(mar=c(0,0,4,0), fig=c(0.30, 0.58, 0.8, 0.95), new=TRUE) + mtext("NAO-", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.30, 0.58, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.8, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.diff4.colors[i2,1+6-j,3])}} + + par(mar=c(0,0,4,0), fig=c(0.60, 0.88, 0.8, 0.95), new=TRUE) + mtext("Blocking", cex=1.2) + par(mar=c(1,4,1,0), fig=c(0.60, 0.88, 0.1, 0.8), new=TRUE) + plot(1,1, type="n", xaxt="n", yaxt="n", ylim=c(1,12.04), xlim=c(-0.23,6.2), ann=F) + mtext(side = 1, text = "Lead time", line = 2, cex=1) + mtext(side = 2, text = "Target month", line = 2.8, cex=1) + axis(1, at=seq(0,6), las=1, cex.axis=1, labels=6:0, mgp=c(3,0.7,0)) + axis(2, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1) + for(i in 1:12){ for(j in 0:6){ i2=ifelse(i-(6-j) > 0, i-(6-j), i-(6-j)+12) ; polygon(c(-0.5+j, 0.5+j, 0.5+j, -0.5+j), c(0.5+i-1, 0.5+i-1, 1.5+i-1, 1.5+i-1), col=array.trans.diff4.colors[i2,1+6-j,4])}} + + par(fig=c(0.91, 1, 0.1, 0.8), new=TRUE) + ColorBar2(brks = my.seq, cols = trans.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(my.seq), my.labels=my.seq) + par(fig=c(0.91, 1, 0.88, 0.93), new=TRUE) + mtext(side = 1, text = "%", line = 2, cex=1) + + dev.off() + + ## # FairRPSS summary: + ## png(filename=paste0(work.dir,"/",forecast.name,"_summary_rpss.png"), width=550, height=400) + + ## # create an array similar to array.diff.freq but with colors instead of frequencies: + ## freq.cols <- rev(c('#b2182b','#d6604d','#f4a582','#fddbc7','#d1e5f0','#92c5de','#4393c3','#2166ac')) + + ## array.freq.colors <- array(freq.cols[8],c(12,7,4)) + ## array.freq.colors[array.diff.freq < 10] <- freq.cols[7] + ## array.freq.colors[array.diff.freq < 5] <- freq.cols[6] + ## array.freq.colors[array.diff.freq < 2] <- freq.cols[5] + ## array.freq.colors[array.diff.freq < 0] <- freq.cols[4] + ## array.freq.colors[array.diff.freq < -2] <- freq.cols[3] + ## array.freq.colors[array.diff.freq < -5] <- freq.cols[2] + ## array.freq.colors[array.diff.freq < -10] <- freq.cols[1] + + ## par(mar=c(5,4,4,4.5)) + ## plot(1,1, type="n", xaxt="n", yaxt="n",ylab="Forecast time", xlim=c(1,12.04), ylim=c(-0.23,6.2), ann=F) + ## title("Frequency difference (%) between S4 and ERA-Interim", cex=1.5) + ## mtext(side = 1, text = "Startdate", line = 4, cex=1.5) + ## mtext(side = 2, text = "Forecast time", line = 2.5, cex=1.5) + ## axis(1, at=seq(1,12), las=2, labels=my.month.short, cex.axis=1.5) + ## axis(2, at=seq(0,6), las=2, cex.axis=1.5) + + ## for(p in 1:12){ + ## for(l in 0:6){ + ## polygon(c(0.5+p-1,0.5+p-1,1+p-1),c(-0.5+l,0.5+l,0+l), col=array.freq.colors[p,1+l,1]) # first triangle + ## polygon(c(0.5+p-1,1+p-1,1.5+p-1),c(-0.5+l,0+l,-0.5+l), col=array.freq.colors[p,1+l,2]) + ## polygon(c(1+p-1,1.5+p-1,1.5+p-1),c(l,0.5+l,-0.5+l), col=array.freq.colors[p,1+l,3]) + ## polygon(c(0.5+p-1,1+p-1,1.5+p-1),c(0.5+l,0+l,0.5+l), col=array.freq.colors[p,1+l,4]) + ## } + ## } + + ## par(fig=c(0.875, 1, 0.14, 0.89), new=TRUE) + ## ColorBar2(brks = c(-20,-10,-5,-2,0,2,5,10,20), cols = freq.cols, vert=T, draw.ticks=F, label.dist=0.3, my.ticks=-0.5 + 1:length(c(-20,-10,-5,-2,0,2,5,10,20)), my.labels=c(-20,-10,-5,-2,0,2,5,10,20)) + ## dev.off() + +} # close if on composition == "summary" + + + + +# impact map of the stronger WR: +if(composition == "impact.highest" && fields.name == rean.name){ + + var.num <- 2 # choose a variable (1:sfcWind, 2:tas) + index <- 1 # chose an index: 1: most influent, 2: most influent positively, 3: most influent negatively + + if ( index == 1 ) png(filename=paste0(rean.dir,"/",rean.name,"_",var.name.full[var.num],"_most_influent.png"), width=550, height=650) + if ( index == 2 ) png(filename=paste0(rean.dir,"/",rean.name,"_",var.name.full[var.num],"_most_influent_positively.png"), width=550, height=650) + if ( index == 3 ) png(filename=paste0(rean.dir,"/",rean.name,"_",var.name.full[var.num],"_most_influent_negatively.png"), width=550, height=650) + + plot.new() + #par(mfrow=c(4,3)) + #col.regimes <- c('#7b3294','#c2a5cf','#a6dba0','#008837') + col.regimes <- c('#e66101','#fdb863','#b2abd2','#5e3c99') + + for(p in 13:16){ # or 1:12 for monthly maps + load(file=paste0(rean.dir,"/",rean.name,"_",my.period[p],"_",var.name[var.num],".RData")) + load(paste0(rean.dir,"/",rean.name,"_",my.period[p],"_ClusterNames.RData")) # they should not depend on var.name!!! + + # position of long values of Europe only (without the Atlantic Sea and America): + #if(fields.name == "ERA-Interim") EU <- c(1:which(lon >= lon.max)[1], (length(lon)-30):length(lon)) # restrict area to continental europe only + lon.max = 45 + EU <- c(1:which(lon >= lon.max)[1], (which(lon >= 337)[1]:length(lon))) # restrict area to continental europe only + + cluster1 <- which(orden == cluster1.name.period[p]) # in future it will be == cluster1.name + cluster2 <- which(orden == cluster2.name.period[p]) + cluster3 <- which(orden == cluster3.name.period[p]) + cluster4 <- which(orden == cluster4.name.period[p]) + + assign(paste0("imp",cluster1), varPeriodAnom1mean) + assign(paste0("imp",cluster2), varPeriodAnom2mean) + assign(paste0("imp",cluster3), varPeriodAnom3mean) + assign(paste0("imp",cluster4), varPeriodAnom4mean) + + #most influent WT: + if (index == 1) { + imp.all <- imp1 + for(i in 1:dim(imp1)[1]){ + for(j in 1:dim(imp1)[2]){ + if(abs(imp1[i,j]) >= max(abs(imp1[i,j]), abs(imp2[i,j]), abs(imp3[i,j]), abs(imp4[i,j]))) imp.all[i,j] <- 1.5 + if(abs(imp2[i,j]) >= max(abs(imp1[i,j]), abs(imp2[i,j]), abs(imp3[i,j]), abs(imp4[i,j]))) imp.all[i,j] <- 2.5 + if(abs(imp3[i,j]) >= max(abs(imp1[i,j]), abs(imp2[i,j]), abs(imp3[i,j]), abs(imp4[i,j]))) imp.all[i,j] <- 3.5 + if(abs(imp4[i,j]) >= max(abs(imp1[i,j]), abs(imp2[i,j]), abs(imp3[i,j]), abs(imp4[i,j]))) imp.all[i,j] <- 4.5 + } + } + } + + #most influent WT with positive impact: + if (index == 2) { + imp.all <- imp1 + for(i in 1:dim(imp1)[1]){ + for(j in 1:dim(imp1)[2]){ + if((imp1[i,j]) >= max((imp1[i,j]), (imp2[i,j]), (imp3[i,j]), (imp4[i,j]))) imp.all[i,j] <- 1.5 + if((imp2[i,j]) >= max((imp1[i,j]), (imp2[i,j]), (imp3[i,j]), (imp4[i,j]))) imp.all[i,j] <- 2.5 + if((imp3[i,j]) >= max((imp1[i,j]), (imp2[i,j]), (imp3[i,j]), (imp4[i,j]))) imp.all[i,j] <- 3.5 + if((imp4[i,j]) >= max((imp1[i,j]), (imp2[i,j]), (imp3[i,j]), (imp4[i,j]))) imp.all[i,j] <- 4.5 + } + } + + } + + # most influent WT with negative impact: + if (index == 3) { + imp.all <- imp1 + for(i in 1:dim(imp1)[1]){ + for(j in 1:dim(imp1)[2]){ + if((imp1[i,j]) <= min((imp1[i,j]), (imp2[i,j]), (imp3[i,j]), (imp4[i,j]))) imp.all[i,j] <- 1.5 + if((imp2[i,j]) <= min((imp1[i,j]), (imp2[i,j]), (imp3[i,j]), (imp4[i,j]))) imp.all[i,j] <- 2.5 + if((imp3[i,j]) <= min((imp1[i,j]), (imp2[i,j]), (imp3[i,j]), (imp4[i,j]))) imp.all[i,j] <- 3.5 + if((imp4[i,j]) <= min((imp1[i,j]), (imp2[i,j]), (imp3[i,j]), (imp4[i,j]))) imp.all[i,j] <- 4.5 + } + } + } + + if(p<=3) yMod <- 0.7 + if(p>=4 && p<=6) yMod <- 0.5 + if(p>=7 && p<=9) yMod <- 0.3 + if(p>=10) yMod <- 0.1 + if(p==13 || p==14) yMod <- 0.52 + if(p==15 || p==16) yMod <- 0.1 + + if(p==1 || p==4 || p==7 || p==10) xMod <- 0.05 + if(p==2 || p==5 || p==8 || p==11) xMod <- 0.35 + if(p==3 || p==6 || p==9 || p==12) xMod <- 0.65 + if(p==13 || p==15) xMod <- 0.05 + if(p==14 || p==16) xMod <- 0.50 + + # impact map: + if(p <= 12) par(fig=c(xMod, xMod + 0.3, yMod, yMod + 0.17), new=TRUE) + if(p > 12) par(fig=c(xMod, xMod + 0.45, yMod, yMod + 0.38), new=TRUE) + PlotEquiMap(imp.all[,EU], lon[EU], lat, filled.continents = FALSE, brks=1:5, cols=col.regimes, axelab=F, drawleg=F) + + # title map: + if(p <= 12) par(fig=c(xMod, xMod + 0.3, yMod + 0.162, yMod + 0.172), new=TRUE) + if(p > 12) par(fig=c(xMod + 0.07, xMod + 0.07 + 0.3, yMod +0.21 + 0.166, yMod +0.21 + 0.176), new=TRUE) + mtext(my.period[p], font=2, cex=1.5) + + } # close 'p' on 'period' + + par(fig=c(0.1,0.9, 0.03, 0.11), new=TRUE) + ColorBar2(1:5, cols=col.regimes, vert=FALSE, my.ticks=1:4, draw.ticks=FALSE, my.labels=orden) + + par(fig=c(0.1,0.9, 0.92, 0.97), new=TRUE) + if( index == 1 ) mtext(paste0("Most influent WR on ",var.name[var.num]), font=2, cex=1.5) + if( index == 2 ) mtext(paste0("Most positively influent WR on ",var.name[var.num]), font=2, cex=1.5) + if( index == 3 ) mtext(paste0("Most negatively influent WR on ",var.name[var.num]), font=2, cex=1.5) + + dev.off() + +} # close if on composition == "impact.highest" + + + + + + + + + + + + + +if(monthly_anomalies) { + # Compute climatology and anomalies (with LOESS filter) for sfcWind data, and draw monthly anomalies, if you want to compare the mean daily wind speed anomalies reconstructed by the weather regimes classification with those observed by the reanalysis: + + load(paste0(rean.dir,"/",rean.name,"_",my.period[month.chosen[1]],"_psl.RData")) # only to load year.start and year.end + + sfcWind366 <- Load(var = "sfcWind", exp = NULL, obs = list(list(path=rean.data)), paste0(year.start:year.end,'0101'), storefreq = 'daily', leadtimemax = 366, output = 'lonlat', latmin = lat.min, latmax = lat.max, lonmin = lon.min, lonmax = lon.max, nprocs=8)$obs + + sfcWind <- sfcWind366[,,,1:365,,] + + for(y in year.start:year.end){ + y2 <- y - year.start + 1 + if(n.days.in.a.year(y) == 366) sfcWind[y2,60:365,,] <- sfcWind366[1,1,y2,61:366,,] # take the march to december period removing the 29th of February + } + + rm(sfcWind366) + gc() + + # LOESS anomalies: + sfcWindClimDaily <- apply(sfcWind, c(2,3,4), mean, na.rm=T) + + sfcWindClimLoess <- sfcWindClimDaily + for(i in n.pos.lat){ + for(j in n.pos.lon){ + my.data <- data.frame(ens.mean=sfcWindClimDaily[,i,j], day=1:365) + my.loess <- loess(ens.mean ~ day, my.data, span=0.35) + sfcWindClimLoess[,i,j] <- predict(my.loess) + } + } + + rm(sfcWindClimDaily) + gc() + + sfcWindClim2 <- InsertDim(sfcWindClimLoess, 1, n.years) + sfcWindAnom <- sfcWind - sfcWindClim2 + sfcWindAnomRel <- sfcWindAnom / sfcWindClim2 + + rm(sfcWindClimLoess) + gc() + + # same as above but for computing climatology and anomalies (with LOESS filter) for psl data, and draw monthly slp anomalies: + slp366 <- Load(var = psl, exp = NULL, obs = list(list(path=rean.data)), paste0(year.start:year.end,'0101'), storefreq = 'daily', leadtimemax = 366, output = 'lonlat', latmin = lat.min, latmax = lat.max, lonmin = lon.min, lonmax = lon.max, nprocs=8)$obs + + slp <- slp366[,,,1:365,,] + + for(y in year.start:year.end){ + y2 <- y - year.start + 1 + if(n.days.in.a.year(y) == 366) slp[y2,60:365,,] <- slp366[1,1,y2,61:366,,] # take the march to december period removing the 29th of February + } + + rm(slp366) + gc() + + slpClimDaily <- apply(slp, c(2,3,4), mean, na.rm=T) + + slpClimLoess <- slpClimDaily + for(i in n.pos.lat){ + for(j in n.pos.lon){ + my.data <- data.frame(ens.mean=slpClimDaily[,i,j], day=1:365) + my.loess <- loess(ens.mean ~ day, my.data, span=0.35) + slpClimLoess[,i,j] <- predict(my.loess) + } + } + + rm(slpClimDaily) + gc() + + slpClim2 <- InsertDim(slpClimLoess, 1, n.years) + slpAnom <- slp - slpClim2 + + rm(slpClimLoess) + gc() + + if(psl == "psl") slpAnom <- slpAnom/100 # convert MSLP in Pascal to MSLP in hPa + + EU <- c(1:which(lon >= lon.max)[1], (which(lon >= 337)[1]:length(lon))) # restrict area to continental europe only + + ## color scale for impact maps: + ## my.brks.var <- c(-20,seq(-3,3,0.5),20) + my.brks.var <- c(-20,seq(-0.6,0.6,0.1),20) + my.cols.var <- colorRampPalette(rev(brewer.pal(11,"RdBu")))(length(my.brks.var)-1) # blue--white--red colors + + # same but for slp: + my.brks.var2 <- c(-100,seq(-21,-1,2),0,seq(1,21,2),100) + my.cols.var2 <- colorRampPalette(rev(brewer.pal(11,"RdBu")))(length(my.brks.var2)-1) # blue--red colors + my.cols.var2[floor(length(my.cols.var2)/2)] <- my.cols.var2[floor(length(my.cols.var2)/2)+1] <- "white" + + orden <- c("NAO+","NAO-","Blocking","Atl.Ridge") + + # save all monthly anomaly maps: + for(year.test in year.chosen){ + for(month.test in month.chosen){ + ##year.test <- 2016; month.test <- 10 # for the debug + + pos.year.test <- year.test - year.start + 1 + + ## wind anomaly for the chosen year and month: + sfcWindAnomPeriod <- sfcWindAnom[pos.year.test, pos.period(1,month.test),,] + sfcWindAnomPeriodRel <- sfcWindAnomRel[pos.year.test, pos.period(1,month.test),,] + + sfcWindAnomPeriodMean <- apply(sfcWindAnomPeriodRel, c(2,3), mean, na.rm=TRUE) + + # psl anomaly for the chosen year and month: + slpAnomPeriod <- slpAnom[pos.year.test,pos.period(1,month.test),,] + + slpAnomPeriodMean <- apply(slpAnomPeriod, c(2,3), mean, na.rm=TRUE) + + fileoutput <- paste0(rean.dir,"/",rean.name,"_",my.period[month.test],"_monthly_anomaly.png") + + png(filename=fileoutput,width=2800,height=1000) + + ## reset par to its default values, because drawing with PlotEquiMap() alters some par values: + if(year.test == year.chosen[1] && month.test == month.chosen[1]) { op <- par(no.readonly = TRUE) } + + par(fig=c(0, 0.36, 0.08, 0.98), new=TRUE) + #PlotEquiMap(rescale(sfcWindAnomPeriodMean[,EU],my.brks.var[1],tail(my.brks.var,1)), lon[EU], lat, filled.continents=FALSE, brks=my.brks.var, cols=my.cols.var[2:(length(my.cols.var)-1)], title_scale=1.2, intxlon=10, intylat=10, drawleg=F) #, cex.lab=1.5) + + PlotEquiMap(sfcWindAnomPeriodMean[,EU], lon[EU], lat, filled.continents=FALSE, brks=my.brks.var, cols=my.cols.var, title_scale=1.2, intxlon=10, intylat=10, drawleg=F) #, cex.lab=1.5) + + #my.subset2 <- match(values.to.plot2, my.brks.var) + #legend2.cex <- 1.8 + + par(fig=c(0.03, 0.36, 0.00, 0.1), new=TRUE) + #ColorBar(my.brks.var, cols=my.cols.var, vert=FALSE, label_scale=1.8, triangle_ends=c(FALSE,FALSE)) #, subset=my.subset2) + #ColorBar(my.brks.var, cols=my.cols.var[2:(length(my.cols.var)-1)], vert=FALSE, label_scale=1.8, var_limits=c(-10,10), bar_limits=c(my.brks.var[1],my.brks.var[l(my.brks.var)]), col_inf=my.cols.var[1], col_sup=my.cols.var[length(my.cols.var)]) + + ColorBar(brks=round(100*my.brks.var[2:(l(my.brks.var)-1)],0), cols=my.cols.var[2:(l(my.cols.var)-1)], vert=FALSE, label_scale=3, bar_limits=c(100*my.brks.var[2],100*my.brks.var[l(my.brks.var)-1]), col_inf=my.cols.var[1], col_sup=my.cols.var[l(my.cols.var)], subsample=2) + + par(fig=c(0.34, 0.37, 0, 0.028), new=TRUE) + mtext("%", cex=1.8) + + par(fig=c(0.37, 1, 0.1, 0.98), new=TRUE) + + PlotEquiMap2(slpAnomPeriodMean, lon, lat, filled.continents=FALSE, continents.col="black", brks=my.brks.var2, brks2=my.brks.var2, cols=my.cols.var2, intxlon=10, intylat=10, drawleg=F, contours=t(slpAnomPeriodMean), contours.lty="F1FF1F", cex.lab=1) + + # you could almost use the normal PlotEquiMap funcion below, it only needs to set the black line for anomaly=0 and decrease the size of the contour labels: + # PlotEquiMap(rescale(slpAnomPeriodMean[,EU], my.brks.var2[1], tail(my.brks.var2,1)), lon[EU], lat, filled.continents=FALSE, brks=my.brks.var2, brks2=my.brks.var2, cols=my.cols.var2, intxlon=10, intylat=10, drawleg=F, contours=(slpAnomPeriodMean[,EU]), contour_lty="F1FF1F", contour_label_scale=10, title_scale=1.2) #, cex.lab=1.5) + + ##my.subset2 <- match(values.to.plot2, my.brks.var) + #legend2.cex <- 1.8 + + par(op) # reset par parameters + par(fig=c(0.37, 1, 0, 0.1), new=TRUE) + #ColorBar2(my.brks.var2, cols=my.cols.var2, vert=FALSE, my.ticks=-0.5 + 1:length(my.brks.var2), my.labels=my.brks.var2) #, subsampleg=1, label_scale=1.8) #, subset=my.subset2) + ColorBar(brks=my.brks.var2[2:(l(my.brks.var2)-1)], cols=my.cols.var2[2:(l(my.cols.var2)-1)], vert=FALSE, label_scale=3, bar_limits=c(my.brks.var2[2],my.brks.var2[l(my.brks.var2)-1]), col_inf=my.cols.var2[1], col_sup=my.cols.var2[l(my.cols.var2)], subsample=1) #my.ticks=-0.5 + 1:length(my.brks.var2), my.labels=my.brks.var2) subset=my.subset2) + + par(fig=c(0.96, 0.99, 0, 0.027), new=TRUE) + mtext("hPa", cex=1.8) + + #par(fig=c(0.74, 0.76, 0, 0.027), new=TRUE) + #mtext("0", cex=1.8) + + dev.off() + + # same image formatted for the catalogue: + fileoutput2 <- paste0(rean.dir,"/",rean.name,"_",my.period[month.test],"_monthly_anomaly_fig2cat.png") + + system(paste0("~/scripts/fig2catalog.sh -s 0.8 -t '",rean.name," / 10m wind speed and sea level pressure / Monthly anomalies \n",month.name[month.test]," / ",year.test,"' -c 'Region: North Atlantic (27°N-81°N, 85.5°W-45°E)\nReference dataset: ",rean.name," reanalysis' ", fileoutput," ", fileoutput2)) + + # mean wind speed and slp anomaly only during days of the chosen year: + load(paste0(rean.dir,"/",rean.name,"_",my.period[month.test],"_psl.RData")) # load cluster.sequence + + # arrange the regime sequence in an array, to separate months from years: + cluster2D <- array(cluster.sequence, c(n.days.in.a.period(month.test,1),n.years)) + #cluster2D <- array(my.cluster$cluster, c(n.days.in.a.period(p,1),n.years)) # only for NCEP + + cluster.test <- cluster2D[,pos.year.test] + + fre1.days <- length(which(cluster.test == 1)) + fre2.days <- length(which(cluster.test == 2)) + fre3.days <- length(which(cluster.test == 3)) + fre4.days <- length(which(cluster.test == 4)) + + # wind anomaly for the chosen year and month and cluster: + sfcWind1 <- sfcWindAnomPeriodRel[which(cluster.test == 1),,,drop=FALSE] + sfcWind2 <- sfcWindAnomPeriodRel[which(cluster.test == 2),,,drop=FALSE] + sfcWind3 <- sfcWindAnomPeriodRel[which(cluster.test == 3),,,drop=FALSE] + sfcWind4 <- sfcWindAnomPeriodRel[which(cluster.test == 4),,,drop=FALSE] + + # in case a regime has NO days in that month, we must set it to NA: + if(length(which(cluster.test == 1)) == 0 ) sfcWind1 <- array(NA, c(1,dim(sfcWindAnomPeriod)[2:3])) + if(length(which(cluster.test == 2)) == 0 ) sfcWind2 <- array(NA, c(1,dim(sfcWindAnomPeriod)[2:3])) + if(length(which(cluster.test == 3)) == 0 ) sfcWind3 <- array(NA, c(1,dim(sfcWindAnomPeriod)[2:3])) + if(length(which(cluster.test == 4)) == 0 ) sfcWind4 <- array(NA, c(1,dim(sfcWindAnomPeriod)[2:3])) + + sfcWindMean1 <- apply(sfcWind1, c(2,3), mean, na.rm=FALSE) + sfcWindMean2 <- apply(sfcWind2, c(2,3), mean, na.rm=FALSE) + sfcWindMean3 <- apply(sfcWind3, c(2,3), mean, na.rm=FALSE) + sfcWindMean4 <- apply(sfcWind4, c(2,3), mean, na.rm=FALSE) + + # load regime names: + load(paste0(rean.dir,"/",rean.name,"_", my.period[p],"_","ClusterNames",".RData")) + + ## add strip with daily sequence of WRs: + + mod.name1 <- substr(cluster1.name, nchar(cluster1.name), nchar(cluster1.name)) + mod.name2 <- substr(cluster2.name, nchar(cluster2.name), nchar(cluster2.name)) + mod.name3 <- substr(cluster3.name, nchar(cluster3.name), nchar(cluster3.name)) + mod.name4 <- substr(cluster4.name, nchar(cluster4.name), nchar(cluster4.name)) + + cluster1.name.short <- substr(cluster1.name,1,1) + cluster2.name.short <- substr(cluster2.name,1,1) + cluster3.name.short <- substr(cluster3.name,1,1) + cluster4.name.short <- substr(cluster4.name,1,1) + + ## add + or - at the end of the cluster name, if it is a NAO+ or NAO- regime: + if(mod.name1 == "+" || mod.name1 == "-") cluster1.name.short <- paste0(substr(cluster1.name,1,1), mod.name1) + if(mod.name2 == "+" || mod.name2 == "-") cluster2.name.short <- paste0(substr(cluster2.name,1,1), mod.name2) + if(mod.name3 == "+" || mod.name3 == "-") cluster3.name.short <- paste0(substr(cluster3.name,1,1), mod.name3) + if(mod.name4 == "+" || mod.name4 == "-") cluster4.name.short <- paste0(substr(cluster4.name,1,1), mod.name4) + + c1 <- which(cluster.test == 1) + c2 <- which(cluster.test == 2) + c3 <- which(cluster.test == 3) + c4 <- which(cluster.test == 4) + + cluster.test.letters <- cluster.test + cluster.test.letters[c1] <- cluster1.name.short + cluster.test.letters[c2] <- cluster2.name.short + cluster.test.letters[c3] <- cluster3.name.short + cluster.test.letters[c4] <- cluster4.name.short + + my.strip <- cluster.test.letters + + if(no.regimes) { + cluster.test.letters2 <- cluster.test.letters + cluster.test.letters2[which(cluster.test.letters == "N+")] <- "C1" + cluster.test.letters2[which(cluster.test.letters == "N-")] <- "C2" + cluster.test.letters2[which(cluster.test.letters == "B")] <- "C3" + cluster.test.letters2[which(cluster.test.letters == "A")] <- "C4" + my.strip <- cluster.test.letters2 + } + + cluster.col <- cluster.test.letters + cluster.col[which(cluster.test.letters == "N+")] <- "Firebrick1" + cluster.col[which(cluster.test.letters == "N-")] <- "Dodgerblue1" + cluster.col[which(cluster.test.letters == "B")] <- "White" + cluster.col[which(cluster.test.letters == "A")] <- "Darkgoldenrod1" + + cluster1 <- which(orden == cluster1.name) + cluster2 <- which(orden == cluster2.name) + cluster3 <- which(orden == cluster3.name) + cluster4 <- which(orden == cluster4.name) + + assign(paste0("fre.days",cluster1), fre1.days) + assign(paste0("fre.days",cluster2), fre2.days) + assign(paste0("fre.days",cluster3), fre3.days) + assign(paste0("fre.days",cluster4), fre4.days) + + assign(paste0("fre",cluster1), wr1y) + assign(paste0("fre",cluster2), wr2y) + assign(paste0("fre",cluster3), wr3y) + assign(paste0("fre",cluster4), wr4y) + + assign(paste0("imp.test",cluster1), sfcWindMean1) + assign(paste0("imp.test",cluster2), sfcWindMean2) + assign(paste0("imp.test",cluster3), sfcWindMean3) + assign(paste0("imp.test",cluster4), sfcWindMean4) + + # psl anomaly for the chosen year and month and cluster: + slp1 <- slpAnomPeriod[which(cluster.test == 1),,,drop=FALSE] + slp2 <- slpAnomPeriod[which(cluster.test == 2),,,drop=FALSE] + slp3 <- slpAnomPeriod[which(cluster.test == 3),,,drop=FALSE] + slp4 <- slpAnomPeriod[which(cluster.test == 4),,,drop=FALSE] + + # in case a regime has NO days in that month, we must set it to NA: + if(length(which(cluster.test == 1)) == 0 ) slp1 <- array(NA, c(1,dim(slpAnomPeriod)[2:3])) + if(length(which(cluster.test == 2)) == 0 ) slp2 <- array(NA, c(1,dim(slpAnomPeriod)[2:3])) + if(length(which(cluster.test == 3)) == 0 ) slp3 <- array(NA, c(1,dim(slpAnomPeriod)[2:3])) + if(length(which(cluster.test == 4)) == 0 ) slp4 <- array(NA, c(1,dim(slpAnomPeriod)[2:3])) + + slpMean1 <- apply(slp1, c(2,3), mean, na.rm=FALSE) + slpMean2 <- apply(slp2, c(2,3), mean, na.rm=FALSE) + slpMean3 <- apply(slp3, c(2,3), mean, na.rm=FALSE) + slpMean4 <- apply(slp4, c(2,3), mean, na.rm=FALSE) + + assign(paste0("psl.test",cluster1), slpMean1) + assign(paste0("psl.test",cluster2), slpMean2) + assign(paste0("psl.test",cluster3), slpMean3) + assign(paste0("psl.test",cluster4), slpMean4) + + # save strip with the daily regime series for chosen month and year: + fileoutput.seq <- paste0(rean.dir,"/",rean.name,"_",my.period[month.test],"_regimes_sequence.png") + png(filename=fileoutput.seq,width=1500,height=1850) + + plot.new() + + sep <- 0.03 + for(day in 1: n.days.in.a.period(p, 2001)){ + sep.cum <- (day-1)*sep + polygon(c(sep.cum + 0.01, sep.cum + 0.01 + sep, sep.cum + 0.01 + sep, sep.cum + 0.01), c(1.01, 1.01, 1.01+sep, 1.01+sep), border="black", col=cluster.col[day]) + text(sep.cum + 0.01 + sep/2, 0.997 + sep + 0.005, labels=day, cex=1.5) + text(sep.cum + 0.01 + sep/2, 1.013 + 0.005, labels=my.strip[day], cex=2) + + } + + dev.off() + + # save average impact and sea level pressure only for chosen month and year: + fileoutput.test <- paste0(rean.dir,"/",rean.name,"_",my.period[month.test],"_monthly_anomaly_regimes.png") + + png(filename=fileoutput.test,width=1500,height=2000) + + plot.new() + + par(fig=c(0, 0.33, 0.77, 0.97), new=TRUE) + PlotEquiMap2(imp.test1[,EU], lon[EU], lat, filled.continents=FALSE, continents.col="black", brks=my.brks.var, cols=my.cols.var, cex.lab=1.2, intxlon=10, intylat=10, drawleg=F) + par(fig=c(0, 0.33, 0.54, 0.74), new=TRUE) + PlotEquiMap2(imp.test2[,EU], lon[EU], lat, filled.continents=FALSE, continents.col="black", brks=my.brks.var, cols=my.cols.var, cex.lab=1.2, intxlon=10, intylat=10, drawleg=F) + par(fig=c(0, 0.33, 0.31, 0.51), new=TRUE) + PlotEquiMap2(imp.test3[,EU], lon[EU], lat, filled.continents=FALSE, continents.col="black", brks=my.brks.var, cols=my.cols.var, cex.lab=1.2, intxlon=10, intylat=10, drawleg=F) + par(fig=c(0, 0.33, 0.08, 0.28), new=TRUE) + PlotEquiMap2(imp.test4[,EU], lon[EU], lat, filled.continents=FALSE, continents.col="black", brks=my.brks.var, cols=my.cols.var, cex.lab=1.2, intxlon=10, intylat=10, drawleg=F) + + if(no.regimes) { regime.title <- paste0("Cluster",1:4)} else { regime.title <- orden} + + par(fig=c(0,0.33,0.955,0.975), new=TRUE) + mtext(paste0(regime.title[1], " wind speed anomaly "), font=2, cex=2) + par(fig=c(0,0.33,0.725,0.745), new=TRUE) + mtext(paste0(regime.title[2], " wind speed anomaly "), font=2, cex=2) + par(fig=c(0,0.33,0.495,0.515), new=TRUE) + mtext(paste0(regime.title[3], " wind speed anomaly "), font=2, cex=2) + par(fig=c(0,0.33,0.265,0.285), new=TRUE) + mtext(paste0(regime.title[4], " wind speed anomaly "), font=2, cex=2) + + par(fig=c(0.03, 0.33, 0.015, 0.06), new=TRUE) + #ColorBar(my.brks.var, cols=my.cols.var, vert=FALSE, label_scale=1.8, triangle_ends=c(FALSE,FALSE)) #, subset=my.subset2) + ColorBar(brks=round(100*my.brks.var[2:(l(my.brks.var)-1)]), cols=my.cols.var[2:(l(my.cols.var)-1)], vert=FALSE, label_scale=2, bar_limits=c(100*my.brks.var[2],100*my.brks.var[l(my.brks.var)-1]), col_inf=my.cols.var[1], col_sup=my.cols.var[l(my.cols.var)], subsample=2) #triangle_ends=c(T,T)) #, subset=my.subset2) + + par(fig=c(0.33, 0.34, 0.01, 0.044), new=TRUE) + mtext("%", cex=1.6) + + # right figures: + par(fig=c(0.34, 0.92, 0.77, 0.97), new=TRUE) + PlotEquiMap2(psl.test1, lon, lat, filled.continents=FALSE, continents.col="black", brks=my.brks.var2, brks2=my.brks.var2, cols=my.cols.var2, intxlon=10, intylat=10, drawleg=F, contours=t(psl.test1), contours.lty="F1FF1F", cex.lab=1, xlabel.dist=.5) + par(fig=c(0.34, 0.92, 0.54, 0.74), new=TRUE) + PlotEquiMap2(psl.test2, lon, lat, filled.continents=FALSE, continents.col="black", brks=my.brks.var2, brks2=my.brks.var2, cols=my.cols.var2, intxlon=10, intylat=10, drawleg=F, contours=t(psl.test2), contours.lty="F1FF1F", cex.lab=1, xlabel.dist=.5) + par(fig=c(0.34, 0.92, 0.31, 0.51), new=TRUE) + PlotEquiMap2(psl.test3, lon, lat, filled.continents=FALSE, continents.col="black", brks=my.brks.var2, brks2=my.brks.var2, cols=my.cols.var2, intxlon=10, intylat=10, drawleg=F, contours=t(psl.test3), contours.lty="F1FF1F", cex.lab=1, xlabel.dist=.5) + par(fig=c(0.34, 0.92, 0.08, 0.28), new=TRUE) + PlotEquiMap2(psl.test4, lon, lat, filled.continents=FALSE, continents.col="black", brks=my.brks.var2, brks2=my.brks.var2, cols=my.cols.var2, intxlon=10, intylat=10, drawleg=F, contours=t(psl.test4), contours.lty="F1FF1F", cex.lab=1, xlabel.dist=.5) + + par(fig=c(0.34,0.92,0.955,0.975), new=TRUE) + mtext(paste0(regime.title[1], " SLP anomaly "), font=2, cex=2) + par(fig=c(0.34,0.92,0.725,0.745), new=TRUE) + mtext(paste0(regime.title[2], " SLP anomaly "), font=2, cex=2) + par(fig=c(0.34,0.92,0.495,0.515), new=TRUE) + mtext(paste0(regime.title[3], " SLP anomaly "), font=2, cex=2) + par(fig=c(0.34,0.92,0.265,0.285), new=TRUE) + mtext(paste0(regime.title[4], " SLP anomaly "), font=2, cex=2) + + par(fig=c(0.34, 0.93, 0.015, 0.06), new=TRUE) + #ColorBar2(my.brks.var2, cols=my.cols.var2, vert=FALSE, my.ticks=-0.5 + 1:length(my.brks.var2), my.labels=my.brks.var2) #, subsampleg=1, label_scale=1.8) #, subset=my.subset2) + ColorBar(my.brks.var2[2:(length(my.brks.var2)-1)], cols=my.cols.var2[2:(length(my.cols.var2)-1)], vert=FALSE, label_scale=2, bar_limits=c(my.brks.var2[2],my.brks.var2[l(my.brks.var2)-1]), col_inf=my.cols.var2[1], col_sup=my.cols.var2[l(my.cols.var2)], subsample=1) + + par(fig=c(0.924, 0.930, 0.01, 0.044), new=TRUE) + mtext("hPa", cex=1.6) + + #par(fig=c(0.627, 0.647, 0, 0.028), new=TRUE) + #mtext("0", cex=1.8) + + n.days <- floor(n.days.in.a.period(month.test,1)) + + par(fig=c(0.93, 0.99, 0.77, 0.87), new=TRUE) + mtext(paste0(fre.days1," days\n(",round(100*fre.days1/n.days,1),"%)"), cex=2.8) + par(fig=c(0.93, 0.99, 0.54, 0.64), new=TRUE) + mtext(paste0(fre.days2," days\n(",round(100*fre.days2/n.days,1),"%)"), cex=2.8) + par(fig=c(0.93, 0.99, 0.31, 0.41), new=TRUE) + mtext(paste0(fre.days3," days\n(",round(100*fre.days3/n.days,1),"%)"), cex=2.8) + par(fig=c(0.93, 0.99, 0.08, 0.18), new=TRUE) + mtext(paste0(fre.days4," days\n(",round(100*fre.days4/n.days,1),"%)"), cex=2.8) + + dev.off() + + ## add the strip with the regime sequence over the average impact composition: + fileoutput.temp <- paste0(rean.dir,"/",rean.name,"_",my.period[month.test],"_temp.png") + fileoutput.both <- paste0(rean.dir,"/",rean.name,"_",my.period[month.test],"_temp2.png") + + system(paste0("convert ",fileoutput.seq," -crop +0-1730 +repage ",fileoutput.temp)) # cut the lower part of the strip + system(paste0("montage ",fileoutput.temp," ",fileoutput.test," -tile 1x2 -geometry +0+0 ",fileoutput.both)) + + + ## same image formatted for the catalogue: + fileoutput2.test <- paste0(rean.dir,"/",rean.name,"_",my.period[month.test],"_monthly_anomaly_regimes_fig2cat.png") + + system(paste0("~/scripts/fig2catalog.sh -s 0.8 -m 20 -r 40 -t '",rean.name," / 10m wind speed and sea level pressure / Monthly anomalies \n",month.name[month.test]," / ",year.test,"' -c 'Region: North Atlantic (27°N-81°N, 85.5°W-45°E)\nReference dataset: ",rean.name," reanalysis' ", fileoutput.both," ", fileoutput2.test)) + + system(paste0("rm ", fileoutput.temp, " ", fileoutput.both," ", fileoutput.seq," ", fileoutput.test, " ", fileoutput)) + + + } # close for on year.test + } # close for on month.test + + +} # close for on monthly_anomalies + + + + + + + + + + diff --git a/weather_regimes_titles_catalogue.sh b/weather_regimes_titles_catalogue.sh new file mode 100644 index 0000000000000000000000000000000000000000..3acc101c3f0315d7ffa3ccd75273769dc36dc6e8 --- /dev/null +++ b/weather_regimes_titles_catalogue.sh @@ -0,0 +1,2074 @@ +################################################################################### +# Single images # +################################################################################### + +# regime anomalies: +sh ~/scripts/fig2catalog.sh -t 'ERA-Interim / sea level pressure / NAO+ anomalies\nDJF / 1979-2013' ERA-Interim_Winter_NAO+_anomalies.png ./formatted/ERA-Interim_Winter_NAO+_anomalies.png +sh ~/scripts/fig2catalog.sh -t 'ERA-Interim / sea level pressure / NAO- anomalies\nDJF / 1979-2013' ERA-Interim_Winter_NAO-_anomalies.png ./formatted/ERA-Interim_Winter_NAO-_anomalies.png +sh ~/scripts/fig2catalog.sh -t 'ERA-Interim / sea level pressure / Blocking anomalies\nDJF / 1979-2013' ERA-Interim_Winter_blocking_anomalies.png ./formatted/ERA-Interim_Winter_blocking_anomalies.png +sh ~/scripts/fig2catalog.sh -t 'ERA-Interim / sea level pressure / Atlantic ridge anomalies\nDJF / 1979-2013' ERA-Interim_Winter_atlantic_anomalies.png ./formatted/ERA-Interim_Winter_atlantic_anomalies.png + +sh ~/scripts/fig2catalog.sh -t 'ERA-Interim / sea level pressure / NAO+ anomalies\nMAM / 1979-2013' ERA-Interim_Spring_NAO+_anomalies.png ./formatted/ERA-Interim_Spring_NAO+_anomalies.png +sh ~/scripts/fig2catalog.sh -t 'ERA-Interim / sea level pressure / NAO- anomalies\nMAM / 1979-2013' ERA-Interim_Spring_NAO-_anomalies.png ./formatted/ERA-Interim_Spring_NAO-_anomalies.png +sh ~/scripts/fig2catalog.sh -t 'ERA-Interim / sea level pressure / Blocking anomalies\nMAM / 1979-2013' ERA-Interim_Spring_blocking_anomalies.png ./formatted/ERA-Interim_Spring_blocking_anomalies.png +sh ~/scripts/fig2catalog.sh -t 'ERA-Interim / sea level pressure / Atlantic ridge anomalies\nMAM / 1979-2013' ERA-Interim_Spring_atlantic_anomalies.png ./formatted/ERA-Interim_Spring_atlantic_anomalies.png + +sh ~/scripts/fig2catalog.sh -t 'ERA-Interim / sea level pressure / NAO+ anomalies\nJJA / 1979-2013' ERA-Interim_Summer_NAO+_anomalies.png ./formatted/ERA-Interim_Summer_NAO+_anomalies.png +sh ~/scripts/fig2catalog.sh -t 'ERA-Interim / sea level pressure / NAO- anomalies\nJJA / 1979-2013' ERA-Interim_Summer_NAO-_anomalies.png ./formatted/ERA-Interim_Summer_NAO-_anomalies.png +sh ~/scripts/fig2catalog.sh -t 'ERA-Interim / sea level pressure / Blocking anomalies\nJJA / 1979-2013' ERA-Interim_Summer_blocking_anomalies.png ./formatted/ERA-Interim_Summer_blocking_anomalies.png +sh ~/scripts/fig2catalog.sh -t 'ERA-Interim / sea level pressure / Atlantic ridge anomalies\nJJA / 1979-2013' ERA-Interim_Summer_atlantic_anomalies.png ./formatted/ERA-Interim_Summer_atlantic_anomalies.png + +sh ~/scripts/fig2catalog.sh -t 'ERA-Interim / sea level pressure / NAO+ anomalies\nSON / 1979-2013' ERA-Interim_Autumn_NAO+_anomalies.png ./formatted/ERA-Interim_Autumn_NAO+_anomalies.png +sh ~/scripts/fig2catalog.sh -t 'ERA-Interim / sea level pressure / NAO- anomalies\nSON / 1979-2013' ERA-Interim_Autumn_NAO-_anomalies.png ./formatted/ERA-Interim_Autumn_NAO-_anomalies.png +sh ~/scripts/fig2catalog.sh -t 'ERA-Interim / sea level pressure / Blocking anomalies\nSON / 1979-2013' ERA-Interim_Autumn_blocking_anomalies.png ./formatted/ERA-Interim_Autumn_blocking_anomalies.png +sh ~/scripts/fig2catalog.sh -t 'ERA-Interim / sea level pressure / Atlantic ridge anomalies\nSON / 1979-2013' ERA-Interim_Autumn_atlantic_anomalies.png ./formatted/ERA-Interim_Autumn_atlantic_anomalies.png + + +# regime frequency: + +sh ~/scripts/fig2catalog.sh -t 'ERA-Interim / weather regimes / NAO+ anomalies\nDJF / 1979-2013' ERA-Interim_Winter_NAO+_freq.png ./formatted/ERA-Interim_Winter_NAO+_freq.png +sh ~/scripts/fig2catalog.sh -t 'ERA-Interim / weather regimes / NAO- anomalies\nDJF / 1979-2013' ERA-Interim_Winter_NAO-_freq.png ./formatted/ERA-Interim_Winter_NAO-_freq.png +sh ~/scripts/fig2catalog.sh -t 'ERA-Interim / weather regimes / Blocking anomalies\nDJF / 1979-2013' ERA-Interim_Winter_blocking_freq.png ./formatted/ERA-Interim_Winter_blocking_freq.png +sh ~/scripts/fig2catalog.sh -t 'ERA-Interim / weather regimes / Atlantic ridge anomalies\nDJF / 1979-2013' ERA-Interim_Winter_atlantic_freq.png ./formatted/ERA-Interim_Winter_atlantic_freq.png + +sh ~/scripts/fig2catalog.sh -t 'ERA-Interim / weather regimes / NAO+ anomalies\nMAM / 1979-2013' ERA-Interim_Spring_NAO+_freq.png ./formatted/ERA-Interim_Spring_NAO+_freq.png +sh ~/scripts/fig2catalog.sh -t 'ERA-Interim / weather regimes / NAO- anomalies\nMAM / 1979-2013' ERA-Interim_Spring_NAO-_freq.png ./formatted/ERA-Interim_Spring_NAO-_freq.png +sh ~/scripts/fig2catalog.sh -t 'ERA-Interim / weather regimes / Blocking anomalies\nMAM / 1979-2013' ERA-Interim_Spring_blocking_freq.png ./formatted/ERA-Interim_Spring_blocking_freq.png +sh ~/scripts/fig2catalog.sh -t 'ERA-Interim / weather regimes / Atlantic ridge anomalies\nMAM / 1979-2013' ERA-Interim_Spring_atlantic_freq.png ./formatted/ERA-Interim_Spring_atlantic_freq.png + +sh ~/scripts/fig2catalog.sh -t 'ERA-Interim / weather regimes / NAO+ anomalies\nJJA / 1979-2013' ERA-Interim_Summer_NAO+_freq.png ./formatted/ERA-Interim_Summer_NAO+_freq.png +sh ~/scripts/fig2catalog.sh -t 'ERA-Interim / weather regimes / NAO- anomalies\nJJA / 1979-2013' ERA-Interim_Summer_NAO-_freq.png ./formatted/ERA-Interim_Summer_NAO-_freq.png +sh ~/scripts/fig2catalog.sh -t 'ERA-Interim / weather regimes / Blocking anomalies\nJJA / 1979-2013' ERA-Interim_Summer_blocking_freq.png ./formatted/ERA-Interim_Summer_blocking_freq.png +sh ~/scripts/fig2catalog.sh -t 'ERA-Interim / weather regimes / Atlantic ridge anomalies\nJJA / 1979-2013' ERA-Interim_Summer_atlantic_freq.png ./formatted/ERA-Interim_Summer_atlantic_freq.png + +sh ~/scripts/fig2catalog.sh -t 'ERA-Interim / weather regimes / NAO+ anomalies\nSON / 1979-2013' ERA-Interim_Autumn_NAO+_freq.png ./formatted/ERA-Interim_Autumn_NAO+_freq.png +sh ~/scripts/fig2catalog.sh -t 'ERA-Interim / weather regimes / NAO- anomalies\nSON / 1979-2013' ERA-Interim_Autumn_NAO-_freq.png ./formatted/ERA-Interim_Autumn_NAO-_freq.png +sh ~/scripts/fig2catalog.sh -t 'ERA-Interim / weather regimes / Blocking anomalies\nSON / 1979-2013' ERA-Interim_Autumn_blocking_freq.png ./formatted/ERA-Interim_Autumn_blocking_freq.png +sh ~/scripts/fig2catalog.sh -t 'ERA-Interim / weather regimes / Atlantic ridge anomalies\nSON / 1979-2013' ERA-Interim_Autumn_atlantic_freq.png ./formatted/ERA-Interim_Autumn_atlantic_freq.png + + +# impact on sfcWind: + +sh ~/scripts/fig2catalog.sh -t 'ERA-Interim / 10m wind speed / NAO+ impact\nDJF / 1979-2013' ERA-Interim_Winter_sfcWind_NAO+_impact.png ./formatted/ERA-Interim_Winter_sfcWind_NAO+_impact.png +sh ~/scripts/fig2catalog.sh -t 'ERA-Interim / 10m wind speed / NAO- impact\nDJF / 1979-2013' ERA-Interim_Winter_sfcWind_NAO-_impact.png ./formatted/ERA-Interim_Winter_sfcWind_NAO-_impact.png +sh ~/scripts/fig2catalog.sh -t 'ERA-Interim / 10m wind speed / Blocking impact\nDJF / 1979-2013' ERA-Interim_Winter_sfcWind_blocking_impact.png ./formatted/ERA-Interim_Winter_sfcWind_blocking_impact.png +sh ~/scripts/fig2catalog.sh -t 'ERA-Interim / 10m wind speed / Atlantic ridge impact\nDJF / 1979-2013' ERA-Interim_Winter_sfcWind_atlantic_impact.png ./formatted/ERA-Interim_Winter_sfcWind_atlantic_impact.png + +sh ~/scripts/fig2catalog.sh -t 'ERA-Interim / 10m wind speed / NAO+ impact\nMAM / 1979-2013' ERA-Interim_Spring_sfcWind_NAO+_impact.png ./formatted/ERA-Interim_Spring_sfcWind_NAO+_impact.png +sh ~/scripts/fig2catalog.sh -t 'ERA-Interim / 10m wind speed / NAO- impact\nMAM / 1979-2013' ERA-Interim_Spring_sfcWind_NAO-_impact.png ./formatted/ERA-Interim_Spring_sfcWind_NAO-_impact.png +sh ~/scripts/fig2catalog.sh -t 'ERA-Interim / 10m wind speed / Blocking impact\nMAM / 1979-2013' ERA-Interim_Spring_sfcWind_blocking_impact.png ./formatted/ERA-Interim_Spring_sfcWind_blocking_impact.png +sh ~/scripts/fig2catalog.sh -t 'ERA-Interim / 10m wind speed / Atlantic ridge impact\nMAM / 1979-2013' ERA-Interim_Spring_sfcWind_atlantic_impact.png ./formatted/ERA-Interim_Spring_sfcWind_atlantic_impact.png + +sh ~/scripts/fig2catalog.sh -t 'ERA-Interim / 10m wind speed / NAO+ impact\nJJA / 1979-2013' ERA-Interim_Summer_sfcWind_NAO+_impact.png ./formatted/ERA-Interim_Summer_sfcWind_NAO+_impact.png +sh ~/scripts/fig2catalog.sh -t 'ERA-Interim / 10m wind speed / NAO- impact\nJJA / 1979-2013' ERA-Interim_Summer_sfcWind_NAO-_impact.png ./formatted/ERA-Interim_Summer_sfcWind_NAO-_impact.png +sh ~/scripts/fig2catalog.sh -t 'ERA-Interim / 10m wind speed / Blocking impact\nJJA / 1979-2013' ERA-Interim_Summer_sfcWind_blocking_impact.png ./formatted/ERA-Interim_Summer_sfcWind_blocking_impact.png +sh ~/scripts/fig2catalog.sh -t 'ERA-Interim / 10m wind speed / Atlantic ridge impact\nJJA / 1979-2013' ERA-Interim_Summer_sfcWind_atlantic_impact.png ./formatted/ERA-Interim_Summer_sfcWind_atlantic_impact.png + +sh ~/scripts/fig2catalog.sh -t 'ERA-Interim / 10m wind speed / NAO+ impact\nSON / 1979-2013' ERA-Interim_Autumn_sfcWind_NAO+_impact.png ./formatted/ERA-Interim_Autumn_sfcWind_NAO+_impact.png +sh ~/scripts/fig2catalog.sh -t 'ERA-Interim / 10m wind speed / NAO- impact\nSON / 1979-2013' ERA-Interim_Autumn_sfcWind_NAO-_impact.png ./formatted/ERA-Interim_Autumn_sfcWind_NAO-_impact.png +sh ~/scripts/fig2catalog.sh -t 'ERA-Interim / 10m wind speed / Blocking impact\nSON / 1979-2013' ERA-Interim_Autumn_sfcWind_blocking_impact.png ./formatted/ERA-Interim_Autumn_sfcWind_blocking_impact.png +sh ~/scripts/fig2catalog.sh -t 'ERA-Interim / 10m wind speed / Atlantic ridge impact\nSON / 1979-2013' ERA-Interim_Autumn_sfcWind_atlantic_impact.png ./formatted/ERA-Interim_Autumn_sfcWind_atlantic_impact.png + +# impact on tas: + +sh ~/scripts/fig2catalog.sh -t 'ERA-Interim / 2m temperature / NAO+ impact\nDJF / 1979-2013' ERA-Interim_Winter_tas_NAO+_impact.png ./formatted/ERA-Interim_Winter_tas_NAO+_impact.png +sh ~/scripts/fig2catalog.sh -t 'ERA-Interim / 2m temperature / NAO- impact\nDJF / 1979-2013' ERA-Interim_Winter_tas_NAO-_impact.png ./formatted/ERA-Interim_Winter_tas_NAO-_impact.png +sh ~/scripts/fig2catalog.sh -t 'ERA-Interim / 2m temperature / Blocking impact\nDJF / 1979-2013' ERA-Interim_Winter_tas_blocking_impact.png ./formatted/ERA-Interim_Winter_tas_blocking_impact.png +sh ~/scripts/fig2catalog.sh -t 'ERA-Interim / 2m temperature / Atlantic ridge impact\nDJF / 1979-2013' ERA-Interim_Winter_tas_atlantic_impact.png ./formatted/ERA-Interim_Winter_tas_atlantic_impact.png + +sh ~/scripts/fig2catalog.sh -t 'ERA-Interim / 2m temperature / NAO+ impact\nMAM / 1979-2013' ERA-Interim_Spring_tas_NAO+_impact.png ./formatted/ERA-Interim_Spring_tas_NAO+_impact.png +sh ~/scripts/fig2catalog.sh -t 'ERA-Interim / 2m temperature / NAO- impact\nMAM / 1979-2013' ERA-Interim_Spring_tas_NAO-_impact.png ./formatted/ERA-Interim_Spring_tas_NAO-_impact.png +sh ~/scripts/fig2catalog.sh -t 'ERA-Interim / 2m temperature / Blocking impact\nMAM / 1979-2013' ERA-Interim_Spring_tas_blocking_impact.png ./formatted/ERA-Interim_Spring_tas_blocking_impact.png +sh ~/scripts/fig2catalog.sh -t 'ERA-Interim / 2m temperature / Atlantic ridge impact\nMAM / 1979-2013' ERA-Interim_Spring_tas_atlantic_impact.png ./formatted/ERA-Interim_Spring_tas_atlantic_impact.png + +sh ~/scripts/fig2catalog.sh -t 'ERA-Interim / 2m temperature / NAO+ impact\nJJA / 1979-2013' ERA-Interim_Summer_tas_NAO+_impact.png ./formatted/ERA-Interim_Summer_tas_NAO+_impact.png +sh ~/scripts/fig2catalog.sh -t 'ERA-Interim / 2m temperature / NAO- impact\nJJA / 1979-2013' ERA-Interim_Summer_tas_NAO-_impact.png ./formatted/ERA-Interim_Summer_tas_NAO-_impact.png +sh ~/scripts/fig2catalog.sh -t 'ERA-Interim / 2m temperature / Blocking impact\nJJA / 1979-2013' ERA-Interim_Summer_tas_blocking_impact.png ./formatted/ERA-Interim_Summer_tas_blocking_impact.png +sh ~/scripts/fig2catalog.sh -t 'ERA-Interim / 2m temperature / Atlantic ridge impact\nJJA / 1979-2013' ERA-Interim_Summer_tas_atlantic_impact.png ./formatted/ERA-Interim_Summer_tas_atlantic_impact.png + +sh ~/scripts/fig2catalog.sh -t 'ERA-Interim / 2m temperature / NAO+ impact\nSON / 1979-2013' ERA-Interim_Autumn_tas_NAO+_impact.png ./formatted/ERA-Interim_Autumn_tas_NAO+_impact.png +sh ~/scripts/fig2catalog.sh -t 'ERA-Interim / 2m temperature / NAO- impact\nSON / 1979-2013' ERA-Interim_Autumn_tas_NAO-_impact.png ./formatted/ERA-Interim_Autumn_tas_NAO-_impact.png +sh ~/scripts/fig2catalog.sh -t 'ERA-Interim / 2m temperature / Blocking impact\nSON / 1979-2013' ERA-Interim_Autumn_tas_blocking_impact.png ./formatted/ERA-Interim_Autumn_tas_blocking_impact.png +sh ~/scripts/fig2catalog.sh -t 'ERA-Interim / 2m temperature / Atlantic ridge impact\nSON / 1979-2013' ERA-Interim_Autumn_tas_atlantic_impact.png ./formatted/ERA-Interim_Autumn_tas_atlantic_impact.png + + +############################################################# +# JRA-55 # +############################################################# + +# regime anomalies: + +sh ~/scripts/fig2catalog.sh -t 'JRA-55 / sea level pressure / NAO+ anomalies\nDJF / 1979-2013' JRA-55_Winter_NAO+_anomalies.png ./formatted/JRA-55_Winter_NAO+_anomalies.png +sh ~/scripts/fig2catalog.sh -t 'JRA-55 / sea level pressure / NAO- anomalies\nDJF / 1979-2013' JRA-55_Winter_NAO-_anomalies.png ./formatted/JRA-55_Winter_NAO-_anomalies.png +sh ~/scripts/fig2catalog.sh -t 'JRA-55 / sea level pressure / Blocking anomalies\nDJF / 1979-2013' JRA-55_Winter_blocking_anomalies.png ./formatted/JRA-55_Winter_blocking_anomalies.png +sh ~/scripts/fig2catalog.sh -t 'JRA-55 / sea level pressure / Atlantic ridge anomalies\nDJF / 1979-2013' JRA-55_Winter_atlantic_anomalies.png ./formatted/JRA-55_Winter_atlantic_anomalies.png + +sh ~/scripts/fig2catalog.sh -t 'JRA-55 / sea level pressure / NAO+ anomalies\nMAM / 1979-2013' JRA-55_Spring_NAO+_anomalies.png ./formatted/JRA-55_Spring_NAO+_anomalies.png +sh ~/scripts/fig2catalog.sh -t 'JRA-55 / sea level pressure / NAO- anomalies\nMAM / 1979-2013' JRA-55_Spring_NAO-_anomalies.png ./formatted/JRA-55_Spring_NAO-_anomalies.png +sh ~/scripts/fig2catalog.sh -t 'JRA-55 / sea level pressure / Blocking anomalies\nMAM / 1979-2013' JRA-55_Spring_blocking_anomalies.png ./formatted/JRA-55_Spring_blocking_anomalies.png +sh ~/scripts/fig2catalog.sh -t 'JRA-55 / sea level pressure / Atlantic ridge anomalies\nMAM / 1979-2013' JRA-55_Spring_atlantic_anomalies.png ./formatted/JRA-55_Spring_atlantic_anomalies.png + +sh ~/scripts/fig2catalog.sh -t 'JRA-55 / sea level pressure / NAO+ anomalies\nJJA / 1979-2013' JRA-55_Summer_NAO+_anomalies.png ./formatted/JRA-55_Summer_NAO+_anomalies.png +sh ~/scripts/fig2catalog.sh -t 'JRA-55 / sea level pressure / NAO- anomalies\nJJA / 1979-2013' JRA-55_Summer_NAO-_anomalies.png ./formatted/JRA-55_Summer_NAO-_anomalies.png +sh ~/scripts/fig2catalog.sh -t 'JRA-55 / sea level pressure / Blocking anomalies\nJJA / 1979-2013' JRA-55_Summer_blocking_anomalies.png ./formatted/JRA-55_Summer_blocking_anomalies.png +sh ~/scripts/fig2catalog.sh -t 'JRA-55 / sea level pressure / Atlantic ridge anomalies\nJJA / 1979-2013' JRA-55_Summer_atlantic_anomalies.png ./formatted/JRA-55_Summer_atlantic_anomalies.png + +sh ~/scripts/fig2catalog.sh -t 'JRA-55 / sea level pressure / NAO+ anomalies\nSON / 1979-2013' JRA-55_Autumn_NAO+_anomalies.png ./formatted/JRA-55_Autumn_NAO+_anomalies.png +sh ~/scripts/fig2catalog.sh -t 'JRA-55 / sea level pressure / NAO- anomalies\nSON / 1979-2013' JRA-55_Autumn_NAO-_anomalies.png ./formatted/JRA-55_Autumn_NAO-_anomalies.png +sh ~/scripts/fig2catalog.sh -t 'JRA-55 / sea level pressure / Blocking anomalies\nSON / 1979-2013' JRA-55_Autumn_blocking_anomalies.png ./formatted/JRA-55_Autumn_blocking_anomalies.png +sh ~/scripts/fig2catalog.sh -t 'JRA-55 / sea level pressure / Atlantic ridge anomalies\nSON / 1979-2013' JRA-55_Autumn_atlantic_anomalies.png ./formatted/JRA-55_Autumn_atlantic_anomalies.png + + +# regime frequency: + +sh ~/scripts/fig2catalog.sh -t 'JRA-55 / weather regimes / NAO+ anomalies\nDJF / 1979-2013' JRA-55_Winter_NAO+_freq.png ./formatted/JRA-55_Winter_NAO+_freq.png +sh ~/scripts/fig2catalog.sh -t 'JRA-55 / weather regimes / NAO- anomalies\nDJF / 1979-2013' JRA-55_Winter_NAO-_freq.png ./formatted/JRA-55_Winter_NAO-_freq.png +sh ~/scripts/fig2catalog.sh -t 'JRA-55 / weather regimes / Blocking anomalies\nDJF / 1979-2013' JRA-55_Winter_blocking_freq.png ./formatted/JRA-55_Winter_blocking_freq.png +sh ~/scripts/fig2catalog.sh -t 'JRA-55 / weather regimes / Atlantic ridge anomalies\nDJF / 1979-2013' JRA-55_Winter_atlantic_freq.png ./formatted/JRA-55_Winter_atlantic_freq.png + +sh ~/scripts/fig2catalog.sh -t 'JRA-55 / weather regimes / NAO+ anomalies\nMAM / 1979-2013' JRA-55_Spring_NAO+_freq.png ./formatted/JRA-55_Spring_NAO+_freq.png +sh ~/scripts/fig2catalog.sh -t 'JRA-55 / weather regimes / NAO- anomalies\nMAM / 1979-2013' JRA-55_Spring_NAO-_freq.png ./formatted/JRA-55_Spring_NAO-_freq.png +sh ~/scripts/fig2catalog.sh -t 'JRA-55 / weather regimes / Blocking anomalies\nMAM / 1979-2013' JRA-55_Spring_blocking_freq.png ./formatted/JRA-55_Spring_blocking_freq.png +sh ~/scripts/fig2catalog.sh -t 'JRA-55 / weather regimes / Atlantic ridge anomalies\nMAM / 1979-2013' JRA-55_Spring_atlantic_freq.png ./formatted/JRA-55_Spring_atlantic_freq.png + +sh ~/scripts/fig2catalog.sh -t 'JRA-55 / weather regimes / NAO+ anomalies\nJJA / 1979-2013' JRA-55_Summer_NAO+_freq.png ./formatted/JRA-55_Summer_NAO+_freq.png +sh ~/scripts/fig2catalog.sh -t 'JRA-55 / weather regimes / NAO- anomalies\nJJA / 1979-2013' JRA-55_Summer_NAO-_freq.png ./formatted/JRA-55_Summer_NAO-_freq.png +sh ~/scripts/fig2catalog.sh -t 'JRA-55 / weather regimes / Blocking anomalies\nJJA / 1979-2013' JRA-55_Summer_blocking_freq.png ./formatted/JRA-55_Summer_blocking_freq.png +sh ~/scripts/fig2catalog.sh -t 'JRA-55 / weather regimes / Atlantic ridge anomalies\nJJA / 1979-2013' JRA-55_Summer_atlantic_freq.png ./formatted/JRA-55_Summer_atlantic_freq.png + +sh ~/scripts/fig2catalog.sh -t 'JRA-55 / weather regimes / NAO+ anomalies\nSON / 1979-2013' JRA-55_Autumn_NAO+_freq.png ./formatted/JRA-55_Autumn_NAO+_freq.png +sh ~/scripts/fig2catalog.sh -t 'JRA-55 / weather regimes / NAO- anomalies\nSON / 1979-2013' JRA-55_Autumn_NAO-_freq.png ./formatted/JRA-55_Autumn_NAO-_freq.png +sh ~/scripts/fig2catalog.sh -t 'JRA-55 / weather regimes / Blocking anomalies\nSON / 1979-2013' JRA-55_Autumn_blocking_freq.png ./formatted/JRA-55_Autumn_blocking_freq.png +sh ~/scripts/fig2catalog.sh -t 'JRA-55 / weather regimes / Atlantic ridge anomalies\nSON / 1979-2013' JRA-55_Autumn_atlantic_freq.png ./formatted/JRA-55_Autumn_atlantic_freq.png + + +# impact on sfcWind: + +sh ~/scripts/fig2catalog.sh -t 'JRA-55 / 10m wind speed / NAO+ impact\nDJF / 1979-2013' JRA-55_Winter_sfcWind_NAO+_impact.png ./formatted/JRA-55_Winter_sfcWind_NAO+_impact.png +sh ~/scripts/fig2catalog.sh -t 'JRA-55 / 10m wind speed / NAO- impact\nDJF / 1979-2013' JRA-55_Winter_sfcWind_NAO-_impact.png ./formatted/JRA-55_Winter_sfcWind_NAO-_impact.png +sh ~/scripts/fig2catalog.sh -t 'JRA-55 / 10m wind speed / Blocking impact\nDJF / 1979-2013' JRA-55_Winter_sfcWind_blocking_impact.png ./formatted/JRA-55_Winter_sfcWind_blocking_impact.png +sh ~/scripts/fig2catalog.sh -t 'JRA-55 / 10m wind speed / Atlantic ridge impact\nDJF / 1979-2013' JRA-55_Winter_sfcWind_atlantic_impact.png ./formatted/JRA-55_Winter_sfcWind_atlantic_impact.png + +sh ~/scripts/fig2catalog.sh -t 'JRA-55 / 10m wind speed / NAO+ impact\nMAM / 1979-2013' JRA-55_Spring_sfcWind_NAO+_impact.png ./formatted/JRA-55_Spring_sfcWind_NAO+_impact.png +sh ~/scripts/fig2catalog.sh -t 'JRA-55 / 10m wind speed / NAO- impact\nMAM / 1979-2013' JRA-55_Spring_sfcWind_NAO-_impact.png ./formatted/JRA-55_Spring_sfcWind_NAO-_impact.png +sh ~/scripts/fig2catalog.sh -t 'JRA-55 / 10m wind speed / Blocking impact\nMAM / 1979-2013' JRA-55_Spring_sfcWind_blocking_impact.png ./formatted/JRA-55_Spring_sfcWind_blocking_impact.png +sh ~/scripts/fig2catalog.sh -t 'JRA-55 / 10m wind speed / Atlantic ridge impact\nMAM / 1979-2013' JRA-55_Spring_sfcWind_atlantic_impact.png ./formatted/JRA-55_Spring_sfcWind_atlantic_impact.png + +sh ~/scripts/fig2catalog.sh -t 'JRA-55 / 10m wind speed / NAO+ impact\nJJA / 1979-2013' JRA-55_Summer_sfcWind_NAO+_impact.png ./formatted/JRA-55_Summer_sfcWind_NAO+_impact.png +sh ~/scripts/fig2catalog.sh -t 'JRA-55 / 10m wind speed / NAO- impact\nJJA / 1979-2013' JRA-55_Summer_sfcWind_NAO-_impact.png ./formatted/JRA-55_Summer_sfcWind_NAO-_impact.png +sh ~/scripts/fig2catalog.sh -t 'JRA-55 / 10m wind speed / Blocking impact\nJJA / 1979-2013' JRA-55_Summer_sfcWind_blocking_impact.png ./formatted/JRA-55_Summer_sfcWind_blocking_impact.png +sh ~/scripts/fig2catalog.sh -t 'JRA-55 / 10m wind speed / Atlantic ridge impact\nJJA / 1979-2013' JRA-55_Summer_sfcWind_atlantic_impact.png ./formatted/JRA-55_Summer_sfcWind_atlantic_impact.png + +sh ~/scripts/fig2catalog.sh -t 'JRA-55 / 10m wind speed / NAO+ impact\nSON / 1979-2013' JRA-55_Autumn_sfcWind_NAO+_impact.png ./formatted/JRA-55_Autumn_sfcWind_NAO+_impact.png +sh ~/scripts/fig2catalog.sh -t 'JRA-55 / 10m wind speed / NAO- impact\nSON / 1979-2013' JRA-55_Autumn_sfcWind_NAO-_impact.png ./formatted/JRA-55_Autumn_sfcWind_NAO-_impact.png +sh ~/scripts/fig2catalog.sh -t 'JRA-55 / 10m wind speed / Blocking impact\nSON / 1979-2013' JRA-55_Autumn_sfcWind_blocking_impact.png ./formatted/JRA-55_Autumn_sfcWind_blocking_impact.png +sh ~/scripts/fig2catalog.sh -t 'JRA-55 / 10m wind speed / Atlantic ridge impact\nSON / 1979-2013' JRA-55_Autumn_sfcWind_atlantic_impact.png ./formatted/JRA-55_Autumn_sfcWind_atlantic_impact.png + +# impact on tas: + +sh ~/scripts/fig2catalog.sh -t 'JRA-55 / 2m temperature / NAO+ impact\nDJF / 1979-2013' JRA-55_Winter_tas_NAO+_impact.png ./formatted/JRA-55_Winter_tas_NAO+_impact.png +sh ~/scripts/fig2catalog.sh -t 'JRA-55 / 2m temperature / NAO- impact\nDJF / 1979-2013' JRA-55_Winter_tas_NAO-_impact.png ./formatted/JRA-55_Winter_tas_NAO-_impact.png +sh ~/scripts/fig2catalog.sh -t 'JRA-55 / 2m temperature / Blocking impact\nDJF / 1979-2013' JRA-55_Winter_tas_blocking_impact.png ./formatted/JRA-55_Winter_tas_blocking_impact.png +sh ~/scripts/fig2catalog.sh -t 'JRA-55 / 2m temperature / Atlantic ridge impact\nDJF / 1979-2013' JRA-55_Winter_tas_atlantic_impact.png ./formatted/JRA-55_Winter_tas_atlantic_impact.png + +sh ~/scripts/fig2catalog.sh -t 'JRA-55 / 2m temperature / NAO+ impact\nMAM / 1979-2013' JRA-55_Spring_tas_NAO+_impact.png ./formatted/JRA-55_Spring_tas_NAO+_impact.png +sh ~/scripts/fig2catalog.sh -t 'JRA-55 / 2m temperature / NAO- impact\nMAM / 1979-2013' JRA-55_Spring_tas_NAO-_impact.png ./formatted/JRA-55_Spring_tas_NAO-_impact.png +sh ~/scripts/fig2catalog.sh -t 'JRA-55 / 2m temperature / Blocking impact\nMAM / 1979-2013' JRA-55_Spring_tas_blocking_impact.png ./formatted/JRA-55_Spring_tas_blocking_impact.png +sh ~/scripts/fig2catalog.sh -t 'JRA-55 / 2m temperature / Atlantic ridge impact\nMAM / 1979-2013' JRA-55_Spring_tas_atlantic_impact.png ./formatted/JRA-55_Spring_tas_atlantic_impact.png + +sh ~/scripts/fig2catalog.sh -t 'JRA-55 / 2m temperature / NAO+ impact\nJJA / 1979-2013' JRA-55_Summer_tas_NAO+_impact.png ./formatted/JRA-55_Summer_tas_NAO+_impact.png +sh ~/scripts/fig2catalog.sh -t 'JRA-55 / 2m temperature / NAO- impact\nJJA / 1979-2013' JRA-55_Summer_tas_NAO-_impact.png ./formatted/JRA-55_Summer_tas_NAO-_impact.png +sh ~/scripts/fig2catalog.sh -t 'JRA-55 / 2m temperature / Blocking impact\nJJA / 1979-2013' JRA-55_Summer_tas_blocking_impact.png ./formatted/JRA-55_Summer_tas_blocking_impact.png +sh ~/scripts/fig2catalog.sh -t 'JRA-55 / 2m temperature / Atlantic ridge impact\nJJA / 1979-2013' JRA-55_Summer_tas_atlantic_impact.png ./formatted/JRA-55_Summer_tas_atlantic_impact.png + +sh ~/scripts/fig2catalog.sh -t 'JRA-55 / 2m temperature / NAO+ impact\nSON / 1979-2013' JRA-55_Autumn_tas_NAO+_impact.png ./formatted/JRA-55_Autumn_tas_NAO+_impact.png +sh ~/scripts/fig2catalog.sh -t 'JRA-55 / 2m temperature / NAO- impact\nSON / 1979-2013' JRA-55_Autumn_tas_NAO-_impact.png ./formatted/JRA-55_Autumn_tas_NAO-_impact.png +sh ~/scripts/fig2catalog.sh -t 'JRA-55 / 2m temperature / Blocking impact\nSON / 1979-2013' JRA-55_Autumn_tas_blocking_impact.png ./formatted/JRA-55_Autumn_tas_blocking_impact.png +sh ~/scripts/fig2catalog.sh -t 'JRA-55 / 2m temperature / Atlantic ridge impact\nSON / 1979-2013' JRA-55_Autumn_tas_atlantic_impact.png ./formatted/JRA-55_tas_atlantic_impact.png + + +################################################################################### +# Seasonal composition # +################################################################################### +# regime anomalies: +width_figure=$(identify -ping -format %w ERA-Interim_Winter_NAO+_anomalies.png) + +convert -background white -font Arial -pointsize 50 -size ${width_figure}x -gravity center pango:"NAO+" trash1.png +montage trash1.png ERA-Interim_Winter_NAO+_anomalies.png -tile 1x2 -geometry +0+0 trash2.png +convert -background white -font Arial -pointsize 50 -size ${width_figure}x -gravity center pango:"NAO-" trash3.png +montage trash3.png ERA-Interim_Winter_NAO-_anomalies.png -tile 1x2 -geometry +0+0 trash4.png +montage trash2.png trash4.png -tile 2x1 -geometry +0+0 trash5.png +convert -background white -font Arial -pointsize 1 -size ${width_figure}x30 -gravity center pango:"" trash6.png +montage trash6.png trash5.png -tile 1x2 -geometry +0+0 trash7.png +convert -background white -font Arial -pointsize 1 -size ${width_figure}x30 -gravity center pango:"" trash8.png +montage trash7.png trash8.png -tile 1x2 -geometry +0+0 trash9.png +convert -background white -font Arial -pointsize 50 -size ${width_figure}x -gravity center pango:"Blocking" trash1.png +montage trash1.png ERA-Interim_Winter_blocking_anomalies.png -tile 1x2 -geometry +0+0 trash2.png +convert -background white -font Arial -pointsize 50 -size ${width_figure}x -gravity center pango:"Atlantic Ridge" trash3.png +montage trash3.png ERA-Interim_Winter_atlantic_anomalies.png -tile 1x2 -geometry +0+0 trash4.png +montage trash2.png trash4.png -tile 2x1 -geometry +0+0 trash5.png +convert -background white -font Arial -pointsize 1 -size ${width_figure}x5 -gravity center pango:"" trash8.png +montage trash5.png trash8.png -tile 1x2 -geometry +0+0 trash10.png +montage trash9.png trash10.png -tile 1x2 -geometry +0+0 ./2x2/ERA-Interim_Winter_anomalies.png + +convert -background white -font Arial -pointsize 50 -size ${width_figure}x -gravity center pango:"NAO+" trash1.png +montage trash1.png ERA-Interim_Spring_NAO+_anomalies.png -tile 1x2 -geometry +0+0 trash2.png +convert -background white -font Arial -pointsize 50 -size ${width_figure}x -gravity center pango:"NAO-" trash3.png +montage trash3.png ERA-Interim_Spring_NAO-_anomalies.png -tile 1x2 -geometry +0+0 trash4.png +montage trash2.png trash4.png -tile 2x1 -geometry +0+0 trash5.png +convert -background white -font Arial -pointsize 1 -size ${width_figure}x30 -gravity center pango:"" trash6.png +montage trash6.png trash5.png -tile 1x2 -geometry +0+0 trash7.png +convert -background white -font Arial -pointsize 1 -size ${width_figure}x30 -gravity center pango:"" trash8.png +montage trash7.png trash8.png -tile 1x2 -geometry +0+0 trash9.png +convert -background white -font Arial -pointsize 50 -size ${width_figure}x -gravity center pango:"Blocking" trash1.png +montage trash1.png ERA-Interim_Spring_blocking_anomalies.png -tile 1x2 -geometry +0+0 trash2.png +convert -background white -font Arial -pointsize 50 -size ${width_figure}x -gravity center pango:"Atlantic Ridge" trash3.png +montage trash3.png ERA-Interim_Spring_atlantic_anomalies.png -tile 1x2 -geometry +0+0 trash4.png +montage trash2.png trash4.png -tile 2x1 -geometry +0+0 trash5.png +convert -background white -font Arial -pointsize 1 -size ${width_figure}x5 -gravity center pango:"" trash8.png +montage trash5.png trash8.png -tile 1x2 -geometry +0+0 trash10.png +montage trash9.png trash10.png -tile 1x2 -geometry +0+0 ./2x2/ERA-Interim_Spring_anomalies.png + +convert -background white -font Arial -pointsize 50 -size ${width_figure}x -gravity center pango:"NAO+" trash1.png +montage trash1.png ERA-Interim_Summer_NAO+_anomalies.png -tile 1x2 -geometry +0+0 trash2.png +convert -background white -font Arial -pointsize 50 -size ${width_figure}x -gravity center pango:"NAO-" trash3.png +montage trash3.png ERA-Interim_Summer_NAO-_anomalies.png -tile 1x2 -geometry +0+0 trash4.png +montage trash2.png trash4.png -tile 2x1 -geometry +0+0 trash5.png +convert -background white -font Arial -pointsize 1 -size ${width_figure}x30 -gravity center pango:"" trash6.png +montage trash6.png trash5.png -tile 1x2 -geometry +0+0 trash7.png +convert -background white -font Arial -pointsize 1 -size ${width_figure}x30 -gravity center pango:"" trash8.png +montage trash7.png trash8.png -tile 1x2 -geometry +0+0 trash9.png +convert -background white -font Arial -pointsize 50 -size ${width_figure}x -gravity center pango:"Blocking" trash1.png +montage trash1.png ERA-Interim_Summer_blocking_anomalies.png -tile 1x2 -geometry +0+0 trash2.png +convert -background white -font Arial -pointsize 50 -size ${width_figure}x -gravity center pango:"Atlantic Ridge" trash3.png +montage trash3.png ERA-Interim_Summer_atlantic_anomalies.png -tile 1x2 -geometry +0+0 trash4.png +montage trash2.png trash4.png -tile 2x1 -geometry +0+0 trash5.png +convert -background white -font Arial -pointsize 1 -size ${width_figure}x5 -gravity center pango:"" trash8.png +montage trash5.png trash8.png -tile 1x2 -geometry +0+0 trash10.png +montage trash9.png trash10.png -tile 1x2 -geometry +0+0 ./2x2/ERA-Interim_Summer_anomalies.png + +convert -background white -font Arial -pointsize 50 -size ${width_figure}x -gravity center pango:"NAO+" trash1.png +montage trash1.png ERA-Interim_Autumn_NAO+_anomalies.png -tile 1x2 -geometry +0+0 trash2.png +convert -background white -font Arial -pointsize 50 -size ${width_figure}x -gravity center pango:"NAO-" trash3.png +montage trash3.png ERA-Interim_Autumn_NAO-_anomalies.png -tile 1x2 -geometry +0+0 trash4.png +montage trash2.png trash4.png -tile 2x1 -geometry +0+0 trash5.png +convert -background white -font Arial -pointsize 1 -size ${width_figure}x30 -gravity center pango:"" trash6.png +montage trash6.png trash5.png -tile 1x2 -geometry +0+0 trash7.png +convert -background white -font Arial -pointsize 1 -size ${width_figure}x30 -gravity center pango:"" trash8.png +montage trash7.png trash8.png -tile 1x2 -geometry +0+0 trash9.png +convert -background white -font Arial -pointsize 50 -size ${width_figure}x -gravity center pango:"Blocking" trash1.png +montage trash1.png ERA-Interim_Autumn_blocking_anomalies.png -tile 1x2 -geometry +0+0 trash2.png +convert -background white -font Arial -pointsize 50 -size ${width_figure}x -gravity center pango:"Atlantic Ridge" trash3.png +montage trash3.png ERA-Interim_Autumn_atlantic_anomalies.png -tile 1x2 -geometry +0+0 trash4.png +montage trash2.png trash4.png -tile 2x1 -geometry +0+0 trash5.png +convert -background white -font Arial -pointsize 1 -size ${width_figure}x5 -gravity center pango:"" trash8.png +montage trash5.png trash8.png -tile 1x2 -geometry +0+0 trash10.png +montage trash9.png trash10.png -tile 1x2 -geometry +0+0 ./2x2/ERA-Interim_Autumn_anomalies.png + +# regime frequency: + +width_figure=$(identify -ping -format %w ERA-Interim_Winter_NAO+_freq.png) + +convert -background white -font Arial -pointsize 50 -size ${width_figure}x -gravity center pango:"NAO+" trash1.png +montage trash1.png ERA-Interim_Winter_NAO+_freq.png -tile 1x2 -geometry +0+0 trash2.png +convert -background white -font Arial -pointsize 50 -size ${width_figure}x -gravity center pango:"NAO-" trash3.png +montage trash3.png ERA-Interim_Winter_NAO-_freq.png -tile 1x2 -geometry +0+0 trash4.png +montage trash2.png trash4.png -tile 2x1 -geometry +0+0 trash5.png +convert -background white -font Arial -pointsize 1 -size ${width_figure}x30 -gravity center pango:"" trash6.png +montage trash6.png trash5.png -tile 1x2 -geometry +0+0 trash7.png +convert -background white -font Arial -pointsize 1 -size ${width_figure}x30 -gravity center pango:"" trash8.png +montage trash7.png trash8.png -tile 1x2 -geometry +0+0 trash9.png +convert -background white -font Arial -pointsize 50 -size ${width_figure}x -gravity center pango:"Blocking" trash1.png +montage trash1.png ERA-Interim_Winter_blocking_freq.png -tile 1x2 -geometry +0+0 trash2.png +convert -background white -font Arial -pointsize 50 -size ${width_figure}x -gravity center pango:"Atlantic Ridge" trash3.png +montage trash3.png ERA-Interim_Winter_atlantic_freq.png -tile 1x2 -geometry +0+0 trash4.png +montage trash2.png trash4.png -tile 2x1 -geometry +0+0 trash5.png +convert -background white -font Arial -pointsize 1 -size ${width_figure}x5 -gravity center pango:"" trash8.png +montage trash5.png trash8.png -tile 1x2 -geometry +0+0 trash10.png +montage trash9.png trash10.png -tile 1x2 -geometry +0+0 ./2x2/ERA-Interim_Winter_freq.png + +convert -background white -font Arial -pointsize 50 -size ${width_figure}x -gravity center pango:"NAO+" trash1.png +montage trash1.png ERA-Interim_Spring_NAO+_freq.png -tile 1x2 -geometry +0+0 trash2.png +convert -background white -font Arial -pointsize 50 -size ${width_figure}x -gravity center pango:"NAO-" trash3.png +montage trash3.png ERA-Interim_Spring_NAO-_freq.png -tile 1x2 -geometry +0+0 trash4.png +montage trash2.png trash4.png -tile 2x1 -geometry +0+0 trash5.png +convert -background white -font Arial -pointsize 1 -size ${width_figure}x30 -gravity center pango:"" trash6.png +montage trash6.png trash5.png -tile 1x2 -geometry +0+0 trash7.png +convert -background white -font Arial -pointsize 1 -size ${width_figure}x30 -gravity center pango:"" trash8.png +montage trash7.png trash8.png -tile 1x2 -geometry +0+0 trash9.png +convert -background white -font Arial -pointsize 50 -size ${width_figure}x -gravity center pango:"Blocking" trash1.png +montage trash1.png ERA-Interim_Spring_blocking_freq.png -tile 1x2 -geometry +0+0 trash2.png +convert -background white -font Arial -pointsize 50 -size ${width_figure}x -gravity center pango:"Atlantic Ridge" trash3.png +montage trash3.png ERA-Interim_Spring_atlantic_freq.png -tile 1x2 -geometry +0+0 trash4.png +montage trash2.png trash4.png -tile 2x1 -geometry +0+0 trash5.png +convert -background white -font Arial -pointsize 1 -size ${width_figure}x5 -gravity center pango:"" trash8.png +montage trash5.png trash8.png -tile 1x2 -geometry +0+0 trash10.png +montage trash9.png trash10.png -tile 1x2 -geometry +0+0 ./2x2/ERA-Interim_Spring_freq.png + +convert -background white -font Arial -pointsize 50 -size ${width_figure}x -gravity center pango:"NAO+" trash1.png +montage trash1.png ERA-Interim_Summer_NAO+_freq.png -tile 1x2 -geometry +0+0 trash2.png +convert -background white -font Arial -pointsize 50 -size ${width_figure}x -gravity center pango:"NAO-" trash3.png +montage trash3.png ERA-Interim_Summer_NAO-_freq.png -tile 1x2 -geometry +0+0 trash4.png +montage trash2.png trash4.png -tile 2x1 -geometry +0+0 trash5.png +convert -background white -font Arial -pointsize 1 -size ${width_figure}x30 -gravity center pango:"" trash6.png +montage trash6.png trash5.png -tile 1x2 -geometry +0+0 trash7.png +convert -background white -font Arial -pointsize 1 -size ${width_figure}x30 -gravity center pango:"" trash8.png +montage trash7.png trash8.png -tile 1x2 -geometry +0+0 trash9.png +convert -background white -font Arial -pointsize 50 -size ${width_figure}x -gravity center pango:"Blocking" trash1.png +montage trash1.png ERA-Interim_Summer_blocking_freq.png -tile 1x2 -geometry +0+0 trash2.png +convert -background white -font Arial -pointsize 50 -size ${width_figure}x -gravity center pango:"Atlantic Ridge" trash3.png +montage trash3.png ERA-Interim_Summer_atlantic_freq.png -tile 1x2 -geometry +0+0 trash4.png +montage trash2.png trash4.png -tile 2x1 -geometry +0+0 trash5.png +convert -background white -font Arial -pointsize 1 -size ${width_figure}x5 -gravity center pango:"" trash8.png +montage trash5.png trash8.png -tile 1x2 -geometry +0+0 trash10.png +montage trash9.png trash10.png -tile 1x2 -geometry +0+0 ./2x2/ERA-Interim_Summer_freq.png + +convert -background white -font Arial -pointsize 50 -size ${width_figure}x -gravity center pango:"NAO+" trash1.png +montage trash1.png ERA-Interim_Autumn_NAO+_freq.png -tile 1x2 -geometry +0+0 trash2.png +convert -background white -font Arial -pointsize 50 -size ${width_figure}x -gravity center pango:"NAO-" trash3.png +montage trash3.png ERA-Interim_Autumn_NAO-_freq.png -tile 1x2 -geometry +0+0 trash4.png +montage trash2.png trash4.png -tile 2x1 -geometry +0+0 trash5.png +convert -background white -font Arial -pointsize 1 -size ${width_figure}x30 -gravity center pango:"" trash6.png +montage trash6.png trash5.png -tile 1x2 -geometry +0+0 trash7.png +convert -background white -font Arial -pointsize 1 -size ${width_figure}x30 -gravity center pango:"" trash8.png +montage trash7.png trash8.png -tile 1x2 -geometry +0+0 trash9.png +convert -background white -font Arial -pointsize 50 -size ${width_figure}x -gravity center pango:"Blocking" trash1.png +montage trash1.png ERA-Interim_Autumn_blocking_freq.png -tile 1x2 -geometry +0+0 trash2.png +convert -background white -font Arial -pointsize 50 -size ${width_figure}x -gravity center pango:"Atlantic Ridge" trash3.png +montage trash3.png ERA-Interim_Autumn_atlantic_freq.png -tile 1x2 -geometry +0+0 trash4.png +montage trash2.png trash4.png -tile 2x1 -geometry +0+0 trash5.png +convert -background white -font Arial -pointsize 1 -size ${width_figure}x5 -gravity center pango:"" trash8.png +montage trash5.png trash8.png -tile 1x2 -geometry +0+0 trash10.png +montage trash9.png trash10.png -tile 1x2 -geometry +0+0 ./2x2/ERA-Interim_Autumn_freq.png + + +# regime impact on sfcWind: + +width_figure=$(identify -ping -format %w ERA-Interim_Winter_sfcWind_NAO+_impact.png) + +convert -background white -font Arial -pointsize 50 -size ${width_figure}x -gravity center pango:"NAO+" trash1.png +montage trash1.png ERA-Interim_Winter_sfcWind_NAO+_impact.png -tile 1x2 -geometry +0+0 trash2.png +convert -background white -font Arial -pointsize 50 -size ${width_figure}x -gravity center pango:"NAO-" trash3.png +montage trash3.png ERA-Interim_Winter_sfcWind_NAO-_impact.png -tile 1x2 -geometry +0+0 trash4.png +montage trash2.png trash4.png -tile 2x1 -geometry +0+0 trash5.png +convert -background white -font Arial -pointsize 1 -size ${width_figure}x30 -gravity center pango:"" trash6.png +montage trash6.png trash5.png -tile 1x2 -geometry +0+0 trash7.png +convert -background white -font Arial -pointsize 1 -size ${width_figure}x30 -gravity center pango:"" trash8.png +montage trash7.png trash8.png -tile 1x2 -geometry +0+0 trash9.png +convert -background white -font Arial -pointsize 50 -size ${width_figure}x -gravity center pango:"Blocking" trash1.png +montage trash1.png ERA-Interim_Winter_sfcWind_blocking_impact.png -tile 1x2 -geometry +0+0 trash2.png +convert -background white -font Arial -pointsize 50 -size ${width_figure}x -gravity center pango:"Atlantic Ridge" trash3.png +montage trash3.png ERA-Interim_Winter_sfcWind_atlantic_impact.png -tile 1x2 -geometry +0+0 trash4.png +montage trash2.png trash4.png -tile 2x1 -geometry +0+0 trash5.png +convert -background white -font Arial -pointsize 1 -size ${width_figure}x5 -gravity center pango:"" trash8.png +montage trash5.png trash8.png -tile 1x2 -geometry +0+0 trash10.png +montage trash9.png trash10.png -tile 1x2 -geometry +0+0 ./2x2/ERA-Interim_Winter_sfcWind_impact.png + +convert -background white -font Arial -pointsize 50 -size ${width_figure}x -gravity center pango:"NAO+" trash1.png +montage trash1.png ERA-Interim_Spring_sfcWind_NAO+_impact.png -tile 1x2 -geometry +0+0 trash2.png +convert -background white -font Arial -pointsize 50 -size ${width_figure}x -gravity center pango:"NAO-" trash3.png +montage trash3.png ERA-Interim_Spring_sfcWind_NAO-_impact.png -tile 1x2 -geometry +0+0 trash4.png +montage trash2.png trash4.png -tile 2x1 -geometry +0+0 trash5.png +convert -background white -font Arial -pointsize 1 -size ${width_figure}x30 -gravity center pango:"" trash6.png +montage trash6.png trash5.png -tile 1x2 -geometry +0+0 trash7.png +convert -background white -font Arial -pointsize 1 -size ${width_figure}x30 -gravity center pango:"" trash8.png +montage trash7.png trash8.png -tile 1x2 -geometry +0+0 trash9.png +convert -background white -font Arial -pointsize 50 -size ${width_figure}x -gravity center pango:"Blocking" trash1.png +montage trash1.png ERA-Interim_Spring_sfcWind_blocking_impact.png -tile 1x2 -geometry +0+0 trash2.png +convert -background white -font Arial -pointsize 50 -size ${width_figure}x -gravity center pango:"Atlantic Ridge" trash3.png +montage trash3.png ERA-Interim_Spring_sfcWind_atlantic_impact.png -tile 1x2 -geometry +0+0 trash4.png +montage trash2.png trash4.png -tile 2x1 -geometry +0+0 trash5.png +convert -background white -font Arial -pointsize 1 -size ${width_figure}x5 -gravity center pango:"" trash8.png +montage trash5.png trash8.png -tile 1x2 -geometry +0+0 trash10.png +montage trash9.png trash10.png -tile 1x2 -geometry +0+0 ./2x2/ERA-Interim_Spring_sfcWind_impact.png + +convert -background white -font Arial -pointsize 50 -size ${width_figure}x -gravity center pango:"NAO+" trash1.png +montage trash1.png ERA-Interim_Summer_sfcWind_NAO+_impact.png -tile 1x2 -geometry +0+0 trash2.png +convert -background white -font Arial -pointsize 50 -size ${width_figure}x -gravity center pango:"NAO-" trash3.png +montage trash3.png ERA-Interim_Summer_sfcWind_NAO-_impact.png -tile 1x2 -geometry +0+0 trash4.png +montage trash2.png trash4.png -tile 2x1 -geometry +0+0 trash5.png +convert -background white -font Arial -pointsize 1 -size ${width_figure}x30 -gravity center pango:"" trash6.png +montage trash6.png trash5.png -tile 1x2 -geometry +0+0 trash7.png +convert -background white -font Arial -pointsize 1 -size ${width_figure}x30 -gravity center pango:"" trash8.png +montage trash7.png trash8.png -tile 1x2 -geometry +0+0 trash9.png +convert -background white -font Arial -pointsize 50 -size ${width_figure}x -gravity center pango:"Blocking" trash1.png +montage trash1.png ERA-Interim_Summer_sfcWind_blocking_impact.png -tile 1x2 -geometry +0+0 trash2.png +convert -background white -font Arial -pointsize 50 -size ${width_figure}x -gravity center pango:"Atlantic Ridge" trash3.png +montage trash3.png ERA-Interim_Summer_sfcWind_atlantic_impact.png -tile 1x2 -geometry +0+0 trash4.png +montage trash2.png trash4.png -tile 2x1 -geometry +0+0 trash5.png +convert -background white -font Arial -pointsize 1 -size ${width_figure}x5 -gravity center pango:"" trash8.png +montage trash5.png trash8.png -tile 1x2 -geometry +0+0 trash10.png +montage trash9.png trash10.png -tile 1x2 -geometry +0+0 ./2x2/ERA-Interim_Summer_sfcWind_impact.png + +convert -background white -font Arial -pointsize 50 -size ${width_figure}x -gravity center pango:"NAO+" trash1.png +montage trash1.png ERA-Interim_Autumn_sfcWind_NAO+_impact.png -tile 1x2 -geometry +0+0 trash2.png +convert -background white -font Arial -pointsize 50 -size ${width_figure}x -gravity center pango:"NAO-" trash3.png +montage trash3.png ERA-Interim_Autumn_sfcWind_NAO-_impact.png -tile 1x2 -geometry +0+0 trash4.png +montage trash2.png trash4.png -tile 2x1 -geometry +0+0 trash5.png +convert -background white -font Arial -pointsize 1 -size ${width_figure}x30 -gravity center pango:"" trash6.png +montage trash6.png trash5.png -tile 1x2 -geometry +0+0 trash7.png +convert -background white -font Arial -pointsize 1 -size ${width_figure}x30 -gravity center pango:"" trash8.png +montage trash7.png trash8.png -tile 1x2 -geometry +0+0 trash9.png +convert -background white -font Arial -pointsize 50 -size ${width_figure}x -gravity center pango:"Blocking" trash1.png +montage trash1.png ERA-Interim_Autumn_sfcWind_blocking_impact.png -tile 1x2 -geometry +0+0 trash2.png +convert -background white -font Arial -pointsize 50 -size ${width_figure}x -gravity center pango:"Atlantic Ridge" trash3.png +montage trash3.png ERA-Interim_Autumn_sfcWind_atlantic_impact.png -tile 1x2 -geometry +0+0 trash4.png +montage trash2.png trash4.png -tile 2x1 -geometry +0+0 trash5.png +convert -background white -font Arial -pointsize 1 -size ${width_figure}x5 -gravity center pango:"" trash8.png +montage trash5.png trash8.png -tile 1x2 -geometry +0+0 trash10.png +montage trash9.png trash10.png -tile 1x2 -geometry +0+0 ./2x2/ERA-Interim_Autumn_sfcWind_impact.png + + +# regime impact on temperature: + +width_figure=$(identify -ping -format %w ERA-Interim_Winter_tas_NAO+_impact.png) + +convert -background white -font Arial -pointsize 50 -size ${width_figure}x -gravity center pango:"NAO+" trash1.png +montage trash1.png ERA-Interim_Winter_tas_NAO+_impact.png -tile 1x2 -geometry +0+0 trash2.png +convert -background white -font Arial -pointsize 50 -size ${width_figure}x -gravity center pango:"NAO-" trash3.png +montage trash3.png ERA-Interim_Winter_tas_NAO-_impact.png -tile 1x2 -geometry +0+0 trash4.png +montage trash2.png trash4.png -tile 2x1 -geometry +0+0 trash5.png +convert -background white -font Arial -pointsize 1 -size ${width_figure}x30 -gravity center pango:"" trash6.png +montage trash6.png trash5.png -tile 1x2 -geometry +0+0 trash7.png +convert -background white -font Arial -pointsize 1 -size ${width_figure}x30 -gravity center pango:"" trash8.png +montage trash7.png trash8.png -tile 1x2 -geometry +0+0 trash9.png +convert -background white -font Arial -pointsize 50 -size ${width_figure}x -gravity center pango:"Blocking" trash1.png +montage trash1.png ERA-Interim_Winter_tas_blocking_impact.png -tile 1x2 -geometry +0+0 trash2.png +convert -background white -font Arial -pointsize 50 -size ${width_figure}x -gravity center pango:"Atlantic Ridge" trash3.png +montage trash3.png ERA-Interim_Winter_tas_atlantic_impact.png -tile 1x2 -geometry +0+0 trash4.png +montage trash2.png trash4.png -tile 2x1 -geometry +0+0 trash5.png +convert -background white -font Arial -pointsize 1 -size ${width_figure}x5 -gravity center pango:"" trash8.png +montage trash5.png trash8.png -tile 1x2 -geometry +0+0 trash10.png +montage trash9.png trash10.png -tile 1x2 -geometry +0+0 ./2x2/ERA-Interim_Winter_tas_impact.png + +convert -background white -font Arial -pointsize 50 -size ${width_figure}x -gravity center pango:"NAO+" trash1.png +montage trash1.png ERA-Interim_Spring_tas_NAO+_impact.png -tile 1x2 -geometry +0+0 trash2.png +convert -background white -font Arial -pointsize 50 -size ${width_figure}x -gravity center pango:"NAO-" trash3.png +montage trash3.png ERA-Interim_Spring_tas_NAO-_impact.png -tile 1x2 -geometry +0+0 trash4.png +montage trash2.png trash4.png -tile 2x1 -geometry +0+0 trash5.png +convert -background white -font Arial -pointsize 1 -size ${width_figure}x30 -gravity center pango:"" trash6.png +montage trash6.png trash5.png -tile 1x2 -geometry +0+0 trash7.png +convert -background white -font Arial -pointsize 1 -size ${width_figure}x30 -gravity center pango:"" trash8.png +montage trash7.png trash8.png -tile 1x2 -geometry +0+0 trash9.png +convert -background white -font Arial -pointsize 50 -size ${width_figure}x -gravity center pango:"Blocking" trash1.png +montage trash1.png ERA-Interim_Spring_tas_blocking_impact.png -tile 1x2 -geometry +0+0 trash2.png +convert -background white -font Arial -pointsize 50 -size ${width_figure}x -gravity center pango:"Atlantic Ridge" trash3.png +montage trash3.png ERA-Interim_Spring_tas_atlantic_impact.png -tile 1x2 -geometry +0+0 trash4.png +montage trash2.png trash4.png -tile 2x1 -geometry +0+0 trash5.png +convert -background white -font Arial -pointsize 1 -size ${width_figure}x5 -gravity center pango:"" trash8.png +montage trash5.png trash8.png -tile 1x2 -geometry +0+0 trash10.png +montage trash9.png trash10.png -tile 1x2 -geometry +0+0 ./2x2/ERA-Interim_Spring_tas_impact.png + +convert -background white -font Arial -pointsize 50 -size ${width_figure}x -gravity center pango:"NAO+" trash1.png +montage trash1.png ERA-Interim_Summer_tas_NAO+_impact.png -tile 1x2 -geometry +0+0 trash2.png +convert -background white -font Arial -pointsize 50 -size ${width_figure}x -gravity center pango:"NAO-" trash3.png +montage trash3.png ERA-Interim_Summer_tas_NAO-_impact.png -tile 1x2 -geometry +0+0 trash4.png +montage trash2.png trash4.png -tile 2x1 -geometry +0+0 trash5.png +convert -background white -font Arial -pointsize 1 -size ${width_figure}x30 -gravity center pango:"" trash6.png +montage trash6.png trash5.png -tile 1x2 -geometry +0+0 trash7.png +convert -background white -font Arial -pointsize 1 -size ${width_figure}x30 -gravity center pango:"" trash8.png +montage trash7.png trash8.png -tile 1x2 -geometry +0+0 trash9.png +convert -background white -font Arial -pointsize 50 -size ${width_figure}x -gravity center pango:"Blocking" trash1.png +montage trash1.png ERA-Interim_Summer_tas_blocking_impact.png -tile 1x2 -geometry +0+0 trash2.png +convert -background white -font Arial -pointsize 50 -size ${width_figure}x -gravity center pango:"Atlantic Ridge" trash3.png +montage trash3.png ERA-Interim_Summer_tas_atlantic_impact.png -tile 1x2 -geometry +0+0 trash4.png +montage trash2.png trash4.png -tile 2x1 -geometry +0+0 trash5.png +convert -background white -font Arial -pointsize 1 -size ${width_figure}x5 -gravity center pango:"" trash8.png +montage trash5.png trash8.png -tile 1x2 -geometry +0+0 trash10.png +montage trash9.png trash10.png -tile 1x2 -geometry +0+0 ./2x2/ERA-Interim_Summer_tas_impact.png + +convert -background white -font Arial -pointsize 50 -size ${width_figure}x -gravity center pango:"NAO+" trash1.png +montage trash1.png ERA-Interim_Autumn_tas_NAO+_impact.png -tile 1x2 -geometry +0+0 trash2.png +convert -background white -font Arial -pointsize 50 -size ${width_figure}x -gravity center pango:"NAO-" trash3.png +montage trash3.png ERA-Interim_Autumn_tas_NAO-_impact.png -tile 1x2 -geometry +0+0 trash4.png +montage trash2.png trash4.png -tile 2x1 -geometry +0+0 trash5.png +convert -background white -font Arial -pointsize 1 -size ${width_figure}x30 -gravity center pango:"" trash6.png +montage trash6.png trash5.png -tile 1x2 -geometry +0+0 trash7.png +convert -background white -font Arial -pointsize 1 -size ${width_figure}x30 -gravity center pango:"" trash8.png +montage trash7.png trash8.png -tile 1x2 -geometry +0+0 trash9.png +convert -background white -font Arial -pointsize 50 -size ${width_figure}x -gravity center pango:"Blocking" trash1.png +montage trash1.png ERA-Interim_Autumn_tas_blocking_impact.png -tile 1x2 -geometry +0+0 trash2.png +convert -background white -font Arial -pointsize 50 -size ${width_figure}x -gravity center pango:"Atlantic Ridge" trash3.png +montage trash3.png ERA-Interim_Autumn_tas_atlantic_impact.png -tile 1x2 -geometry +0+0 trash4.png +montage trash2.png trash4.png -tile 2x1 -geometry +0+0 trash5.png +convert -background white -font Arial -pointsize 1 -size ${width_figure}x5 -gravity center pango:"" trash8.png +montage trash5.png trash8.png -tile 1x2 -geometry +0+0 trash10.png +montage trash9.png trash10.png -tile 1x2 -geometry +0+0 ./2x2/ERA-Interim_Autumn_tas_impact.png + + + +################## +# Same for JRA: # +################## + +# regime anomalies: +width_figure=$(identify -ping -format %w JRA-55_Winter_NAO+_anomalies.png) + +convert -background white -font Arial -pointsize 50 -size ${width_figure}x -gravity center pango:"NAO+" trash1.png +montage trash1.png JRA-55_Winter_NAO+_anomalies.png -tile 1x2 -geometry +0+0 trash2.png +convert -background white -font Arial -pointsize 50 -size ${width_figure}x -gravity center pango:"NAO-" trash3.png +montage trash3.png JRA-55_Winter_NAO-_anomalies.png -tile 1x2 -geometry +0+0 trash4.png +montage trash2.png trash4.png -tile 2x1 -geometry +0+0 trash5.png +convert -background white -font Arial -pointsize 1 -size ${width_figure}x30 -gravity center pango:"" trash6.png +montage trash6.png trash5.png -tile 1x2 -geometry +0+0 trash7.png +convert -background white -font Arial -pointsize 1 -size ${width_figure}x30 -gravity center pango:"" trash8.png +montage trash7.png trash8.png -tile 1x2 -geometry +0+0 trash9.png +convert -background white -font Arial -pointsize 50 -size ${width_figure}x -gravity center pango:"Blocking" trash1.png +montage trash1.png JRA-55_Winter_blocking_anomalies.png -tile 1x2 -geometry +0+0 trash2.png +convert -background white -font Arial -pointsize 50 -size ${width_figure}x -gravity center pango:"Atlantic Ridge" trash3.png +montage trash3.png JRA-55_Winter_atlantic_anomalies.png -tile 1x2 -geometry +0+0 trash4.png +montage trash2.png trash4.png -tile 2x1 -geometry +0+0 trash5.png +convert -background white -font Arial -pointsize 1 -size ${width_figure}x5 -gravity center pango:"" trash8.png +montage trash5.png trash8.png -tile 1x2 -geometry +0+0 trash10.png +montage trash9.png trash10.png -tile 1x2 -geometry +0+0 ./2x2/JRA-55_Winter_anomalies.png + +convert -background white -font Arial -pointsize 50 -size ${width_figure}x -gravity center pango:"NAO+" trash1.png +montage trash1.png JRA-55_Spring_NAO+_anomalies.png -tile 1x2 -geometry +0+0 trash2.png +convert -background white -font Arial -pointsize 50 -size ${width_figure}x -gravity center pango:"NAO-" trash3.png +montage trash3.png JRA-55_Spring_NAO-_anomalies.png -tile 1x2 -geometry +0+0 trash4.png +montage trash2.png trash4.png -tile 2x1 -geometry +0+0 trash5.png +convert -background white -font Arial -pointsize 1 -size ${width_figure}x30 -gravity center pango:"" trash6.png +montage trash6.png trash5.png -tile 1x2 -geometry +0+0 trash7.png +convert -background white -font Arial -pointsize 1 -size ${width_figure}x30 -gravity center pango:"" trash8.png +montage trash7.png trash8.png -tile 1x2 -geometry +0+0 trash9.png +convert -background white -font Arial -pointsize 50 -size ${width_figure}x -gravity center pango:"Blocking" trash1.png +montage trash1.png JRA-55_Spring_blocking_anomalies.png -tile 1x2 -geometry +0+0 trash2.png +convert -background white -font Arial -pointsize 50 -size ${width_figure}x -gravity center pango:"Atlantic Ridge" trash3.png +montage trash3.png JRA-55_Spring_atlantic_anomalies.png -tile 1x2 -geometry +0+0 trash4.png +montage trash2.png trash4.png -tile 2x1 -geometry +0+0 trash5.png +convert -background white -font Arial -pointsize 1 -size ${width_figure}x5 -gravity center pango:"" trash8.png +montage trash5.png trash8.png -tile 1x2 -geometry +0+0 trash10.png +montage trash9.png trash10.png -tile 1x2 -geometry +0+0 ./2x2/JRA-55_Spring_anomalies.png + +convert -background white -font Arial -pointsize 50 -size ${width_figure}x -gravity center pango:"NAO+" trash1.png +montage trash1.png JRA-55_Summer_NAO+_anomalies.png -tile 1x2 -geometry +0+0 trash2.png +convert -background white -font Arial -pointsize 50 -size ${width_figure}x -gravity center pango:"NAO-" trash3.png +montage trash3.png JRA-55_Summer_NAO-_anomalies.png -tile 1x2 -geometry +0+0 trash4.png +montage trash2.png trash4.png -tile 2x1 -geometry +0+0 trash5.png +convert -background white -font Arial -pointsize 1 -size ${width_figure}x30 -gravity center pango:"" trash6.png +montage trash6.png trash5.png -tile 1x2 -geometry +0+0 trash7.png +convert -background white -font Arial -pointsize 1 -size ${width_figure}x30 -gravity center pango:"" trash8.png +montage trash7.png trash8.png -tile 1x2 -geometry +0+0 trash9.png +convert -background white -font Arial -pointsize 50 -size ${width_figure}x -gravity center pango:"Blocking" trash1.png +montage trash1.png JRA-55_Summer_blocking_anomalies.png -tile 1x2 -geometry +0+0 trash2.png +convert -background white -font Arial -pointsize 50 -size ${width_figure}x -gravity center pango:"Atlantic Ridge" trash3.png +montage trash3.png JRA-55_Summer_atlantic_anomalies.png -tile 1x2 -geometry +0+0 trash4.png +montage trash2.png trash4.png -tile 2x1 -geometry +0+0 trash5.png +convert -background white -font Arial -pointsize 1 -size ${width_figure}x5 -gravity center pango:"" trash8.png +montage trash5.png trash8.png -tile 1x2 -geometry +0+0 trash10.png +montage trash9.png trash10.png -tile 1x2 -geometry +0+0 ./2x2/JRA-55_Summer_anomalies.png + +convert -background white -font Arial -pointsize 50 -size ${width_figure}x -gravity center pango:"NAO+" trash1.png +montage trash1.png JRA-55_Autumn_NAO+_anomalies.png -tile 1x2 -geometry +0+0 trash2.png +convert -background white -font Arial -pointsize 50 -size ${width_figure}x -gravity center pango:"NAO-" trash3.png +montage trash3.png JRA-55_Autumn_NAO-_anomalies.png -tile 1x2 -geometry +0+0 trash4.png +montage trash2.png trash4.png -tile 2x1 -geometry +0+0 trash5.png +convert -background white -font Arial -pointsize 1 -size ${width_figure}x30 -gravity center pango:"" trash6.png +montage trash6.png trash5.png -tile 1x2 -geometry +0+0 trash7.png +convert -background white -font Arial -pointsize 1 -size ${width_figure}x30 -gravity center pango:"" trash8.png +montage trash7.png trash8.png -tile 1x2 -geometry +0+0 trash9.png +convert -background white -font Arial -pointsize 50 -size ${width_figure}x -gravity center pango:"Blocking" trash1.png +montage trash1.png JRA-55_Autumn_blocking_anomalies.png -tile 1x2 -geometry +0+0 trash2.png +convert -background white -font Arial -pointsize 50 -size ${width_figure}x -gravity center pango:"Atlantic Ridge" trash3.png +montage trash3.png JRA-55_Autumn_atlantic_anomalies.png -tile 1x2 -geometry +0+0 trash4.png +montage trash2.png trash4.png -tile 2x1 -geometry +0+0 trash5.png +convert -background white -font Arial -pointsize 1 -size ${width_figure}x5 -gravity center pango:"" trash8.png +montage trash5.png trash8.png -tile 1x2 -geometry +0+0 trash10.png +montage trash9.png trash10.png -tile 1x2 -geometry +0+0 ./2x2/JRA-55_Autumn_anomalies.png + +# regime frequency: + +width_figure=$(identify -ping -format %w JRA-55_Winter_NAO+_freq.png) + +convert -background white -font Arial -pointsize 50 -size ${width_figure}x -gravity center pango:"NAO+" trash1.png +montage trash1.png JRA-55_Winter_NAO+_freq.png -tile 1x2 -geometry +0+0 trash2.png +convert -background white -font Arial -pointsize 50 -size ${width_figure}x -gravity center pango:"NAO-" trash3.png +montage trash3.png JRA-55_Winter_NAO-_freq.png -tile 1x2 -geometry +0+0 trash4.png +montage trash2.png trash4.png -tile 2x1 -geometry +0+0 trash5.png +convert -background white -font Arial -pointsize 1 -size ${width_figure}x30 -gravity center pango:"" trash6.png +montage trash6.png trash5.png -tile 1x2 -geometry +0+0 trash7.png +convert -background white -font Arial -pointsize 1 -size ${width_figure}x30 -gravity center pango:"" trash8.png +montage trash7.png trash8.png -tile 1x2 -geometry +0+0 trash9.png +convert -background white -font Arial -pointsize 50 -size ${width_figure}x -gravity center pango:"Blocking" trash1.png +montage trash1.png JRA-55_Winter_blocking_freq.png -tile 1x2 -geometry +0+0 trash2.png +convert -background white -font Arial -pointsize 50 -size ${width_figure}x -gravity center pango:"Atlantic Ridge" trash3.png +montage trash3.png JRA-55_Winter_atlantic_freq.png -tile 1x2 -geometry +0+0 trash4.png +montage trash2.png trash4.png -tile 2x1 -geometry +0+0 trash5.png +convert -background white -font Arial -pointsize 1 -size ${width_figure}x5 -gravity center pango:"" trash8.png +montage trash5.png trash8.png -tile 1x2 -geometry +0+0 trash10.png +montage trash9.png trash10.png -tile 1x2 -geometry +0+0 ./2x2/JRA-55_Winter_freq.png + +convert -background white -font Arial -pointsize 50 -size ${width_figure}x -gravity center pango:"NAO+" trash1.png +montage trash1.png JRA-55_Spring_NAO+_freq.png -tile 1x2 -geometry +0+0 trash2.png +convert -background white -font Arial -pointsize 50 -size ${width_figure}x -gravity center pango:"NAO-" trash3.png +montage trash3.png JRA-55_Spring_NAO-_freq.png -tile 1x2 -geometry +0+0 trash4.png +montage trash2.png trash4.png -tile 2x1 -geometry +0+0 trash5.png +convert -background white -font Arial -pointsize 1 -size ${width_figure}x30 -gravity center pango:"" trash6.png +montage trash6.png trash5.png -tile 1x2 -geometry +0+0 trash7.png +convert -background white -font Arial -pointsize 1 -size ${width_figure}x30 -gravity center pango:"" trash8.png +montage trash7.png trash8.png -tile 1x2 -geometry +0+0 trash9.png +convert -background white -font Arial -pointsize 50 -size ${width_figure}x -gravity center pango:"Blocking" trash1.png +montage trash1.png JRA-55_Spring_blocking_freq.png -tile 1x2 -geometry +0+0 trash2.png +convert -background white -font Arial -pointsize 50 -size ${width_figure}x -gravity center pango:"Atlantic Ridge" trash3.png +montage trash3.png JRA-55_Spring_atlantic_freq.png -tile 1x2 -geometry +0+0 trash4.png +montage trash2.png trash4.png -tile 2x1 -geometry +0+0 trash5.png +convert -background white -font Arial -pointsize 1 -size ${width_figure}x5 -gravity center pango:"" trash8.png +montage trash5.png trash8.png -tile 1x2 -geometry +0+0 trash10.png +montage trash9.png trash10.png -tile 1x2 -geometry +0+0 ./2x2/JRA-55_Spring_freq.png + +convert -background white -font Arial -pointsize 50 -size ${width_figure}x -gravity center pango:"NAO+" trash1.png +montage trash1.png JRA-55_Summer_NAO+_freq.png -tile 1x2 -geometry +0+0 trash2.png +convert -background white -font Arial -pointsize 50 -size ${width_figure}x -gravity center pango:"NAO-" trash3.png +montage trash3.png JRA-55_Summer_NAO-_freq.png -tile 1x2 -geometry +0+0 trash4.png +montage trash2.png trash4.png -tile 2x1 -geometry +0+0 trash5.png +convert -background white -font Arial -pointsize 1 -size ${width_figure}x30 -gravity center pango:"" trash6.png +montage trash6.png trash5.png -tile 1x2 -geometry +0+0 trash7.png +convert -background white -font Arial -pointsize 1 -size ${width_figure}x30 -gravity center pango:"" trash8.png +montage trash7.png trash8.png -tile 1x2 -geometry +0+0 trash9.png +convert -background white -font Arial -pointsize 50 -size ${width_figure}x -gravity center pango:"Blocking" trash1.png +montage trash1.png JRA-55_Summer_blocking_freq.png -tile 1x2 -geometry +0+0 trash2.png +convert -background white -font Arial -pointsize 50 -size ${width_figure}x -gravity center pango:"Atlantic Ridge" trash3.png +montage trash3.png JRA-55_Summer_atlantic_freq.png -tile 1x2 -geometry +0+0 trash4.png +montage trash2.png trash4.png -tile 2x1 -geometry +0+0 trash5.png +convert -background white -font Arial -pointsize 1 -size ${width_figure}x5 -gravity center pango:"" trash8.png +montage trash5.png trash8.png -tile 1x2 -geometry +0+0 trash10.png +montage trash9.png trash10.png -tile 1x2 -geometry +0+0 ./2x2/JRA-55_Summer_freq.png + +convert -background white -font Arial -pointsize 50 -size ${width_figure}x -gravity center pango:"NAO+" trash1.png +montage trash1.png JRA-55_Autumn_NAO+_freq.png -tile 1x2 -geometry +0+0 trash2.png +convert -background white -font Arial -pointsize 50 -size ${width_figure}x -gravity center pango:"NAO-" trash3.png +montage trash3.png JRA-55_Autumn_NAO-_freq.png -tile 1x2 -geometry +0+0 trash4.png +montage trash2.png trash4.png -tile 2x1 -geometry +0+0 trash5.png +convert -background white -font Arial -pointsize 1 -size ${width_figure}x30 -gravity center pango:"" trash6.png +montage trash6.png trash5.png -tile 1x2 -geometry +0+0 trash7.png +convert -background white -font Arial -pointsize 1 -size ${width_figure}x30 -gravity center pango:"" trash8.png +montage trash7.png trash8.png -tile 1x2 -geometry +0+0 trash9.png +convert -background white -font Arial -pointsize 50 -size ${width_figure}x -gravity center pango:"Blocking" trash1.png +montage trash1.png JRA-55_Autumn_blocking_freq.png -tile 1x2 -geometry +0+0 trash2.png +convert -background white -font Arial -pointsize 50 -size ${width_figure}x -gravity center pango:"Atlantic Ridge" trash3.png +montage trash3.png JRA-55_Autumn_atlantic_freq.png -tile 1x2 -geometry +0+0 trash4.png +montage trash2.png trash4.png -tile 2x1 -geometry +0+0 trash5.png +convert -background white -font Arial -pointsize 1 -size ${width_figure}x5 -gravity center pango:"" trash8.png +montage trash5.png trash8.png -tile 1x2 -geometry +0+0 trash10.png +montage trash9.png trash10.png -tile 1x2 -geometry +0+0 ./2x2/JRA-55_Autumn_freq.png + + +# regime impact on sfcWind: + +width_figure=$(identify -ping -format %w JRA-55_Winter_sfcWind_NAO+_impact.png) + +convert -background white -font Arial -pointsize 50 -size ${width_figure}x -gravity center pango:"NAO+" trash1.png +montage trash1.png JRA-55_Winter_sfcWind_NAO+_impact.png -tile 1x2 -geometry +0+0 trash2.png +convert -background white -font Arial -pointsize 50 -size ${width_figure}x -gravity center pango:"NAO-" trash3.png +montage trash3.png JRA-55_Winter_sfcWind_NAO-_impact.png -tile 1x2 -geometry +0+0 trash4.png +montage trash2.png trash4.png -tile 2x1 -geometry +0+0 trash5.png +convert -background white -font Arial -pointsize 1 -size ${width_figure}x30 -gravity center pango:"" trash6.png +montage trash6.png trash5.png -tile 1x2 -geometry +0+0 trash7.png +convert -background white -font Arial -pointsize 1 -size ${width_figure}x30 -gravity center pango:"" trash8.png +montage trash7.png trash8.png -tile 1x2 -geometry +0+0 trash9.png +convert -background white -font Arial -pointsize 50 -size ${width_figure}x -gravity center pango:"Blocking" trash1.png +montage trash1.png JRA-55_Winter_sfcWind_blocking_impact.png -tile 1x2 -geometry +0+0 trash2.png +convert -background white -font Arial -pointsize 50 -size ${width_figure}x -gravity center pango:"Atlantic Ridge" trash3.png +montage trash3.png JRA-55_Winter_sfcWind_atlantic_impact.png -tile 1x2 -geometry +0+0 trash4.png +montage trash2.png trash4.png -tile 2x1 -geometry +0+0 trash5.png +convert -background white -font Arial -pointsize 1 -size ${width_figure}x5 -gravity center pango:"" trash8.png +montage trash5.png trash8.png -tile 1x2 -geometry +0+0 trash10.png +montage trash9.png trash10.png -tile 1x2 -geometry +0+0 ./2x2/JRA-55_Winter_sfcWind_impact.png + +convert -background white -font Arial -pointsize 50 -size ${width_figure}x -gravity center pango:"NAO+" trash1.png +montage trash1.png JRA-55_Spring_sfcWind_NAO+_impact.png -tile 1x2 -geometry +0+0 trash2.png +convert -background white -font Arial -pointsize 50 -size ${width_figure}x -gravity center pango:"NAO-" trash3.png +montage trash3.png JRA-55_Spring_sfcWind_NAO-_impact.png -tile 1x2 -geometry +0+0 trash4.png +montage trash2.png trash4.png -tile 2x1 -geometry +0+0 trash5.png +convert -background white -font Arial -pointsize 1 -size ${width_figure}x30 -gravity center pango:"" trash6.png +montage trash6.png trash5.png -tile 1x2 -geometry +0+0 trash7.png +convert -background white -font Arial -pointsize 1 -size ${width_figure}x30 -gravity center pango:"" trash8.png +montage trash7.png trash8.png -tile 1x2 -geometry +0+0 trash9.png +convert -background white -font Arial -pointsize 50 -size ${width_figure}x -gravity center pango:"Blocking" trash1.png +montage trash1.png JRA-55_Spring_sfcWind_blocking_impact.png -tile 1x2 -geometry +0+0 trash2.png +convert -background white -font Arial -pointsize 50 -size ${width_figure}x -gravity center pango:"Atlantic Ridge" trash3.png +montage trash3.png JRA-55_Spring_sfcWind_atlantic_impact.png -tile 1x2 -geometry +0+0 trash4.png +montage trash2.png trash4.png -tile 2x1 -geometry +0+0 trash5.png +convert -background white -font Arial -pointsize 1 -size ${width_figure}x5 -gravity center pango:"" trash8.png +montage trash5.png trash8.png -tile 1x2 -geometry +0+0 trash10.png +montage trash9.png trash10.png -tile 1x2 -geometry +0+0 ./2x2/JRA-55_Spring_sfcWind_impact.png + +convert -background white -font Arial -pointsize 50 -size ${width_figure}x -gravity center pango:"NAO+" trash1.png +montage trash1.png JRA-55_Summer_sfcWind_NAO+_impact.png -tile 1x2 -geometry +0+0 trash2.png +convert -background white -font Arial -pointsize 50 -size ${width_figure}x -gravity center pango:"NAO-" trash3.png +montage trash3.png JRA-55_Summer_sfcWind_NAO-_impact.png -tile 1x2 -geometry +0+0 trash4.png +montage trash2.png trash4.png -tile 2x1 -geometry +0+0 trash5.png +convert -background white -font Arial -pointsize 1 -size ${width_figure}x30 -gravity center pango:"" trash6.png +montage trash6.png trash5.png -tile 1x2 -geometry +0+0 trash7.png +convert -background white -font Arial -pointsize 1 -size ${width_figure}x30 -gravity center pango:"" trash8.png +montage trash7.png trash8.png -tile 1x2 -geometry +0+0 trash9.png +convert -background white -font Arial -pointsize 50 -size ${width_figure}x -gravity center pango:"Blocking" trash1.png +montage trash1.png JRA-55_Summer_sfcWind_blocking_impact.png -tile 1x2 -geometry +0+0 trash2.png +convert -background white -font Arial -pointsize 50 -size ${width_figure}x -gravity center pango:"Atlantic Ridge" trash3.png +montage trash3.png JRA-55_Summer_sfcWind_atlantic_impact.png -tile 1x2 -geometry +0+0 trash4.png +montage trash2.png trash4.png -tile 2x1 -geometry +0+0 trash5.png +convert -background white -font Arial -pointsize 1 -size ${width_figure}x5 -gravity center pango:"" trash8.png +montage trash5.png trash8.png -tile 1x2 -geometry +0+0 trash10.png +montage trash9.png trash10.png -tile 1x2 -geometry +0+0 ./2x2/JRA-55_Summer_sfcWind_impact.png + +convert -background white -font Arial -pointsize 50 -size ${width_figure}x -gravity center pango:"NAO+" trash1.png +montage trash1.png JRA-55_Autumn_sfcWind_NAO+_impact.png -tile 1x2 -geometry +0+0 trash2.png +convert -background white -font Arial -pointsize 50 -size ${width_figure}x -gravity center pango:"NAO-" trash3.png +montage trash3.png JRA-55_Autumn_sfcWind_NAO-_impact.png -tile 1x2 -geometry +0+0 trash4.png +montage trash2.png trash4.png -tile 2x1 -geometry +0+0 trash5.png +convert -background white -font Arial -pointsize 1 -size ${width_figure}x30 -gravity center pango:"" trash6.png +montage trash6.png trash5.png -tile 1x2 -geometry +0+0 trash7.png +convert -background white -font Arial -pointsize 1 -size ${width_figure}x30 -gravity center pango:"" trash8.png +montage trash7.png trash8.png -tile 1x2 -geometry +0+0 trash9.png +convert -background white -font Arial -pointsize 50 -size ${width_figure}x -gravity center pango:"Blocking" trash1.png +montage trash1.png JRA-55_Autumn_sfcWind_blocking_impact.png -tile 1x2 -geometry +0+0 trash2.png +convert -background white -font Arial -pointsize 50 -size ${width_figure}x -gravity center pango:"Atlantic Ridge" trash3.png +montage trash3.png JRA-55_Autumn_sfcWind_atlantic_impact.png -tile 1x2 -geometry +0+0 trash4.png +montage trash2.png trash4.png -tile 2x1 -geometry +0+0 trash5.png +convert -background white -font Arial -pointsize 1 -size ${width_figure}x5 -gravity center pango:"" trash8.png +montage trash5.png trash8.png -tile 1x2 -geometry +0+0 trash10.png +montage trash9.png trash10.png -tile 1x2 -geometry +0+0 ./2x2/JRA-55_Autumn_sfcWind_impact.png + + +# regime impact on temperature: + +width_figure=$(identify -ping -format %w JRA-55_Winter_tas_NAO+_impact.png) + +convert -background white -font Arial -pointsize 50 -size ${width_figure}x -gravity center pango:"NAO+" trash1.png +montage trash1.png JRA-55_Winter_tas_NAO+_impact.png -tile 1x2 -geometry +0+0 trash2.png +convert -background white -font Arial -pointsize 50 -size ${width_figure}x -gravity center pango:"NAO-" trash3.png +montage trash3.png JRA-55_Winter_tas_NAO-_impact.png -tile 1x2 -geometry +0+0 trash4.png +montage trash2.png trash4.png -tile 2x1 -geometry +0+0 trash5.png +convert -background white -font Arial -pointsize 1 -size ${width_figure}x30 -gravity center pango:"" trash6.png +montage trash6.png trash5.png -tile 1x2 -geometry +0+0 trash7.png +convert -background white -font Arial -pointsize 1 -size ${width_figure}x30 -gravity center pango:"" trash8.png +montage trash7.png trash8.png -tile 1x2 -geometry +0+0 trash9.png +convert -background white -font Arial -pointsize 50 -size ${width_figure}x -gravity center pango:"Blocking" trash1.png +montage trash1.png JRA-55_Winter_tas_blocking_impact.png -tile 1x2 -geometry +0+0 trash2.png +convert -background white -font Arial -pointsize 50 -size ${width_figure}x -gravity center pango:"Atlantic Ridge" trash3.png +montage trash3.png JRA-55_Winter_tas_atlantic_impact.png -tile 1x2 -geometry +0+0 trash4.png +montage trash2.png trash4.png -tile 2x1 -geometry +0+0 trash5.png +convert -background white -font Arial -pointsize 1 -size ${width_figure}x5 -gravity center pango:"" trash8.png +montage trash5.png trash8.png -tile 1x2 -geometry +0+0 trash10.png +montage trash9.png trash10.png -tile 1x2 -geometry +0+0 ./2x2/JRA-55_Winter_tas_impact.png + +convert -background white -font Arial -pointsize 50 -size ${width_figure}x -gravity center pango:"NAO+" trash1.png +montage trash1.png JRA-55_Spring_tas_NAO+_impact.png -tile 1x2 -geometry +0+0 trash2.png +convert -background white -font Arial -pointsize 50 -size ${width_figure}x -gravity center pango:"NAO-" trash3.png +montage trash3.png JRA-55_Spring_tas_NAO-_impact.png -tile 1x2 -geometry +0+0 trash4.png +montage trash2.png trash4.png -tile 2x1 -geometry +0+0 trash5.png +convert -background white -font Arial -pointsize 1 -size ${width_figure}x30 -gravity center pango:"" trash6.png +montage trash6.png trash5.png -tile 1x2 -geometry +0+0 trash7.png +convert -background white -font Arial -pointsize 1 -size ${width_figure}x30 -gravity center pango:"" trash8.png +montage trash7.png trash8.png -tile 1x2 -geometry +0+0 trash9.png +convert -background white -font Arial -pointsize 50 -size ${width_figure}x -gravity center pango:"Blocking" trash1.png +montage trash1.png JRA-55_Spring_tas_blocking_impact.png -tile 1x2 -geometry +0+0 trash2.png +convert -background white -font Arial -pointsize 50 -size ${width_figure}x -gravity center pango:"Atlantic Ridge" trash3.png +montage trash3.png JRA-55_Spring_tas_atlantic_impact.png -tile 1x2 -geometry +0+0 trash4.png +montage trash2.png trash4.png -tile 2x1 -geometry +0+0 trash5.png +convert -background white -font Arial -pointsize 1 -size ${width_figure}x5 -gravity center pango:"" trash8.png +montage trash5.png trash8.png -tile 1x2 -geometry +0+0 trash10.png +montage trash9.png trash10.png -tile 1x2 -geometry +0+0 ./2x2/JRA-55_Spring_tas_impact.png + +convert -background white -font Arial -pointsize 50 -size ${width_figure}x -gravity center pango:"NAO+" trash1.png +montage trash1.png JRA-55_Summer_tas_NAO+_impact.png -tile 1x2 -geometry +0+0 trash2.png +convert -background white -font Arial -pointsize 50 -size ${width_figure}x -gravity center pango:"NAO-" trash3.png +montage trash3.png JRA-55_Summer_tas_NAO-_impact.png -tile 1x2 -geometry +0+0 trash4.png +montage trash2.png trash4.png -tile 2x1 -geometry +0+0 trash5.png +convert -background white -font Arial -pointsize 1 -size ${width_figure}x30 -gravity center pango:"" trash6.png +montage trash6.png trash5.png -tile 1x2 -geometry +0+0 trash7.png +convert -background white -font Arial -pointsize 1 -size ${width_figure}x30 -gravity center pango:"" trash8.png +montage trash7.png trash8.png -tile 1x2 -geometry +0+0 trash9.png +convert -background white -font Arial -pointsize 50 -size ${width_figure}x -gravity center pango:"Blocking" trash1.png +montage trash1.png JRA-55_Summer_tas_blocking_impact.png -tile 1x2 -geometry +0+0 trash2.png +convert -background white -font Arial -pointsize 50 -size ${width_figure}x -gravity center pango:"Atlantic Ridge" trash3.png +montage trash3.png JRA-55_Summer_tas_atlantic_impact.png -tile 1x2 -geometry +0+0 trash4.png +montage trash2.png trash4.png -tile 2x1 -geometry +0+0 trash5.png +convert -background white -font Arial -pointsize 1 -size ${width_figure}x5 -gravity center pango:"" trash8.png +montage trash5.png trash8.png -tile 1x2 -geometry +0+0 trash10.png +montage trash9.png trash10.png -tile 1x2 -geometry +0+0 ./2x2/JRA-55_Summer_tas_impact.png + +convert -background white -font Arial -pointsize 50 -size ${width_figure}x -gravity center pango:"NAO+" trash1.png +montage trash1.png JRA-55_Autumn_tas_NAO+_impact.png -tile 1x2 -geometry +0+0 trash2.png +convert -background white -font Arial -pointsize 50 -size ${width_figure}x -gravity center pango:"NAO-" trash3.png +montage trash3.png JRA-55_Autumn_tas_NAO-_impact.png -tile 1x2 -geometry +0+0 trash4.png +montage trash2.png trash4.png -tile 2x1 -geometry +0+0 trash5.png +convert -background white -font Arial -pointsize 1 -size ${width_figure}x30 -gravity center pango:"" trash6.png +montage trash6.png trash5.png -tile 1x2 -geometry +0+0 trash7.png +convert -background white -font Arial -pointsize 1 -size ${width_figure}x30 -gravity center pango:"" trash8.png +montage trash7.png trash8.png -tile 1x2 -geometry +0+0 trash9.png +convert -background white -font Arial -pointsize 50 -size ${width_figure}x -gravity center pango:"Blocking" trash1.png +montage trash1.png JRA-55_Autumn_tas_blocking_impact.png -tile 1x2 -geometry +0+0 trash2.png +convert -background white -font Arial -pointsize 50 -size ${width_figure}x -gravity center pango:"Atlantic Ridge" trash3.png +montage trash3.png JRA-55_Autumn_tas_atlantic_impact.png -tile 1x2 -geometry +0+0 trash4.png +montage trash2.png trash4.png -tile 2x1 -geometry +0+0 trash5.png +convert -background white -font Arial -pointsize 1 -size ${width_figure}x5 -gravity center pango:"" trash8.png +montage trash5.png trash8.png -tile 1x2 -geometry +0+0 trash10.png +montage trash9.png trash10.png -tile 1x2 -geometry +0+0 ./2x2/JRA-55_Autumn_tas_impact.png + +########## +# titles # +########## + +sh ~/scripts/fig2catalog.sh -t 'ERA-Interim / sea level pressure / regime anomalies\nDJF / 1979-2013' -c 'Region: North Atlantic (27°N–81°N, 85.5°W–45°E) ' ./2x2/ERA-Interim_Winter_anomalies.png ./2x2_formatted/ERA-Interim_Winter_anomalies.png +sh ~/scripts/fig2catalog.sh -t 'ERA-Interim / sea level pressure / regime anomalies\nMAM / 1979-2013' -c 'Region: North Atlantic (27°N–81°N, 85.5°W–45°E) ' ./2x2/ERA-Interim_Spring_anomalies.png ./2x2_formatted/ERA-Interim_Spring_anomalies.png +sh ~/scripts/fig2catalog.sh -t 'ERA-Interim / sea level pressure / regime anomalies\nJJA / 1979-2013' -c 'Region: North Atlantic (27°N–81°N, 85.5°W–45°E) ' ./2x2/ERA-Interim_Summer_anomalies.png ./2x2_formatted/ERA-Interim_Summer_anomalies.png +sh ~/scripts/fig2catalog.sh -t 'ERA-Interim / sea level pressure / regime anomalies\nSON / 1979-2013' -c 'Region: North Atlantic (27°N–81°N, 85.5°W–45°E) ' ./2x2/ERA-Interim_Autumn_anomalies.png ./2x2_formatted/ERA-Interim_Autumn_anomalies.png + +sh ~/scripts/fig2catalog.sh -s 1.4 -t 'ERA-Interim / weather regimes frequency\nDJF / 1979-2013' -c 'Region: North Atlantic (27°N–81°N, 85.5°W–45°E) ' ./2x2/ERA-Interim_Winter_freq.png ./2x2_formatted/ERA-Interim_Winter_freq.png +sh ~/scripts/fig2catalog.sh -s 1.4 -t 'ERA-Interim / weather regimes frequency\nMAM / 1979-2013' -c 'Black Line: significant trend at 95% confidence level with Mann-Kendall test \nRegion: North Atlantic (27°N–81°N, 85.5°W–45°E) ' ./2x2/ERA-Interim_Spring_freq.png ./2x2_formatted/ERA-Interim_Spring_freq.png +sh ~/scripts/fig2catalog.sh -s 1.4 -t 'ERA-Interim / weather regimes frequency\nJJA / 1979-2013' -c 'Black Line: significant trend at 95% confidence level with Mann-Kendall test \nRegion: North Atlantic (27°N–81°N, 85.5°W–45°E) ' ./2x2/ERA-Interim_Summer_freq.png ./2x2_formatted/ERA-Interim_Summer_freq.png +sh ~/scripts/fig2catalog.sh -s 1.4 -t 'ERA-Interim / weather regimes frequency\nSON / 1979-2013' -c 'Region: North Atlantic (27°N–81°N, 85.5°W–45°E) ' ./2x2/ERA-Interim_Autumn_freq.png ./2x2_formatted/ERA-Interim_Autumn_freq.png + +sh ~/scripts/fig2catalog.sh -s 1.4 -t 'ERA-Interim / 10m wind speed / regime impact\nDJF / 1979-2013' -c 'Region: North Atlantic (27°N–81°N, 85.5°W–45°E) \nHatched areas: significant at 95% confidence level from a two-tailed t-test' ./2x2/ERA-Interim_Winter_sfcWind_impact.png ./2x2_formatted/ERA-Interim_Winter_sfcWind_impact.png +sh ~/scripts/fig2catalog.sh -s 1.4 -t 'ERA-Interim / 10m wind speed / regime impact\nMAM / 1979-2013' -c 'Region: North Atlantic (27°N–81°N, 85.5°W–45°E) \nHatched areas: significant at 95% confidence level from a two-tailed t-test' ./2x2/ERA-Interim_Spring_sfcWind_impact.png ./2x2_formatted/ERA-Interim_Spring_sfcWind_impact.png +sh ~/scripts/fig2catalog.sh -s 1.4 -t 'ERA-Interim / 10m wind speed / regime impact\nJJA / 1979-2013' -c 'Region: North Atlantic (27°N–81°N, 85.5°W–45°E) \nHatched areas: significant at 95% confidence level from a two-tailed t-test' ./2x2/ERA-Interim_Summer_sfcWind_impact.png ./2x2_formatted/ERA-Interim_Summer_sfcWind_impact.png +sh ~/scripts/fig2catalog.sh -s 1.4 -t 'ERA-Interim / 10m wind speed / regime impact\nSON / 1979-2013' -c 'Region: North Atlantic (27°N–81°N, 85.5°W–45°E) \nHatched areas: significant at 95% confidence level from a two-tailed t-test' ./2x2/ERA-Interim_Autumn_sfcWind_impact.png ./2x2_formatted/ERA-Interim_Autumn_sfcWind_impact.png + +sh ~/scripts/fig2catalog.sh -s 1.4 -t 'ERA-Interim / 2m temperature / regime impact\nDJF / 1979-2013' -c 'Region: North Atlantic (27°N–81°N, 85.5°W–45°E) \nHatched areas: significant at 95% confidence level from a two-tailed t-test' ./2x2/ERA-Interim_Winter_tas_impact.png ./2x2_formatted/ERA-Interim_Winter_tas_impact.png +sh ~/scripts/fig2catalog.sh -s 1.4 -t 'ERA-Interim / 2m temperature / regime impact\nMAM / 1979-2013' -c 'Region: North Atlantic (27°N–81°N, 85.5°W–45°E) \nHatched areas: significant at 95% confidence level from a two-tailed t-test' ./2x2/ERA-Interim_Spring_tas_impact.png ./2x2_formatted/ERA-Interim_Spring_tas_impact.png +sh ~/scripts/fig2catalog.sh -s 1.4 -t 'ERA-Interim / 2m temperature / regime impact\nJJA / 1979-2013' -c 'Region: North Atlantic (27°N–81°N, 85.5°W–45°E) \nHatched areas: significant at 95% confidence level from a two-tailed t-test' ./2x2/ERA-Interim_Summer_tas_impact.png ./2x2_formatted/ERA-Interim_Summer_tas_impact.png +sh ~/scripts/fig2catalog.sh -s 1.4 -t 'ERA-Interim / 2m temperature / regime impact\nSON / 1979-2013' -c 'Region: North Atlantic (27°N–81°N, 85.5°W–45°E) \nHatched areas: significant at 95% confidence level from a two-tailed t-test' ./2x2/ERA-Interim_Autumn_tas_impact.png ./2x2_formatted/ERA-Interim_Autumn_tas_impact.png + +# JRA-55: + +sh ~/scripts/fig2catalog.sh -t 'JRA-55 / sea level pressure / regime anomalies\nDJF / 1979-2013' -c 'Region: North Atlantic (27°N–81°N, 85.5°W–45°E) ' ./2x2/JRA-55_Winter_anomalies.png ./2x2_formatted/JRA-55_Winter_anomalies.png +sh ~/scripts/fig2catalog.sh -t 'JRA-55 / sea level pressure / regime anomalies\nMAM / 1979-2013' -c 'Region: North Atlantic (27°N–81°N, 85.5°W–45°E) ' ./2x2/JRA-55_Spring_anomalies.png ./2x2_formatted/JRA-55_Spring_anomalies.png +sh ~/scripts/fig2catalog.sh -t 'JRA-55 / sea level pressure / regime anomalies\nJJA / 1979-2013' -c 'Region: North Atlantic (27°N–81°N, 85.5°W–45°E) ' ./2x2/JRA-55_Summer_anomalies.png ./2x2_formatted/JRA-55_Summer_anomalies.png +sh ~/scripts/fig2catalog.sh -t 'JRA-55 / sea level pressure / regime anomalies\nSON / 1979-2013' -c 'Region: North Atlantic (27°N–81°N, 85.5°W–45°E) ' ./2x2/JRA-55_Autumn_anomalies.png ./2x2_formatted/JRA-55_Autumn_anomalies.png + +sh ~/scripts/fig2catalog.sh -s 1.4 -t 'JRA-55 / weather regimes frequency\nDJF / 1979-2013' -c 'Region: North Atlantic (27°N–81°N, 85.5°W–45°E) ' ./2x2/JRA-55_Winter_freq.png ./2x2_formatted/JRA-55_Winter_freq.png +sh ~/scripts/fig2catalog.sh -s 1.4 -t 'JRA-55 / weather regimes frequency\nMAM / 1979-2013' -c 'Region: North Atlantic (27°N–81°N, 85.5°W–45°E) ' ./2x2/JRA-55_Spring_freq.png ./2x2_formatted/JRA-55_Spring_freq.png +sh ~/scripts/fig2catalog.sh -s 1.4 -t 'JRA-55 / weather regimes frequency\nJJA / 1979-2013' -c 'Region: North Atlantic (27°N–81°N, 85.5°W–45°E) ' ./2x2/JRA-55_Summer_freq.png ./2x2_formatted/JRA-55_Summer_freq.png +sh ~/scripts/fig2catalog.sh -s 1.4 -t 'JRA-55 / weather regimes frequency\nSON / 1979-2013' -c 'Region: North Atlantic (27°N–81°N, 85.5°W–45°E) ' ./2x2/JRA-55_Autumn_freq.png ./2x2_formatted/JRA-55_Autumn_freq.png + +sh ~/scripts/fig2catalog.sh -s 1.4 -t 'JRA-55 / 10m wind speed / regime impact\nDJF / 1979-2013' -c 'Region: North Atlantic (27°N–81°N, 85.5°W–45°E) \nHatched areas: significant at 95% confidence level from a two-tailed t-test' ./2x2/JRA-55_Winter_sfcWind_impact.png ./2x2_formatted/JRA-55_Winter_sfcWind_impact.png +sh ~/scripts/fig2catalog.sh -s 1.4 -t 'JRA-55 / 10m wind speed / regime impact\nMAM / 1979-2013' -c 'Region: North Atlantic (27°N–81°N, 85.5°W–45°E) \nHatched areas: significant at 95% confidence level from a two-tailed t-test' ./2x2/JRA-55_Spring_sfcWind_impact.png ./2x2_formatted/JRA-55_Spring_sfcWind_impact.png +sh ~/scripts/fig2catalog.sh -s 1.4 -t 'JRA-55 / 10m wind speed / regime impact\nJJA / 1979-2013' -c 'Black Line: significant trend at 95% confidence level with Mann-Kendall test \nRegion: North Atlantic (27°N–81°N, 85.5°W–45°E) \nHatched areas: significant at 95% confidence level from a two-tailed t-test' ./2x2/JRA-55_Summer_sfcWind_impact.png ./2x2_formatted/JRA-55_Summer_sfcWind_impact.png +sh ~/scripts/fig2catalog.sh -s 1.4 -t 'JRA-55 / 10m wind speed / regime impact\nSON / 1979-2013' -c 'Region: North Atlantic (27°N–81°N, 85.5°W–45°E) \nHatched areas: significant at 95% confidence level from a two-tailed t-test' ./2x2/JRA-55_Autumn_sfcWind_impact.png ./2x2_formatted/JRA-55_Autumn_sfcWind_impact.png + +sh ~/scripts/fig2catalog.sh -s 1.4 -t 'JRA-55 / 2m temperature / regime impact\nDJF / 1979-2013' -c 'Region: North Atlantic (27°N–81°N, 85.5°W–45°E) \nHatched areas: significant at 95% confidence level from a two-tailed t-test' ./2x2/JRA-55_Winter_tas_impact.png ./2x2_formatted/JRA-55_Winter_tas_impact.png +sh ~/scripts/fig2catalog.sh -s 1.4 -t 'JRA-55 / 2m temperature / regime impact\nMAM / 1979-2013' -c 'Region: North Atlantic (27°N–81°N, 85.5°W–45°E) \nHatched areas: significant at 95% confidence level from a two-tailed t-test' ./2x2/JRA-55_Spring_tas_impact.png ./2x2_formatted/JRA-55_Spring_tas_impact.png +sh ~/scripts/fig2catalog.sh -s 1.4 -t 'JRA-55 / 2m temperature / regime impact\nJJA / 1979-2013' -c 'Region: North Atlantic (27°N–81°N, 85.5°W–45°E) \nHatched areas: significant at 95% confidence level from a two-tailed t-test' ./2x2/JRA-55_Summer_tas_impact.png ./2x2_formatted/JRA-55_Summer_tas_impact.png +sh ~/scripts/fig2catalog.sh -s 1.4 -t 'JRA-55 / 2m temperature / regime impact\nSON / 1979-2013' -c 'Region: North Atlantic (27°N–81°N, 85.5°W–45°E) \nHatched areas: significant at 95% confidence level from a two-tailed t-test' ./2x2/JRA-55_Autumn_tas_impact.png ./2x2_formatted/JRA-55_Autumn_tas_impact.png + + +rm trash*.* + + + + + + + +############################################################################### +# Validation weather regimes with S4 # +############################################################################### + +# regime anomalies: +sh ~/scripts/fig2catalog.sh -t 'ECMWF-S4 / sea level pressure / regime anomalies\nJanuary / 1981-2015' -c 'Start date: 1st of every month from July to January \nRegion: North Atlantic (27°N–81°N, 85.5°W–45°E) \nReference dataset: ERA-Interim (last column on the right)' ECMWF-S4_forecast_month_1_psl.png ./formatted/ECMWF-S4_forecast_month_1_psl.png +sh ~/scripts/fig2catalog.sh -t 'ECMWF-S4 / sea level pressure / regime anomalies\nFebruary / 1981-2015' -c 'Start date: 1st of every month from August to February \nRegion: North Atlantic (27°N–81°N, 85.5°W–45°E) \nReference dataset: ERA-Interim (last column on the right)' ECMWF-S4_forecast_month_2_psl.png ./formatted/ECMWF-S4_forecast_month_2_psl.png +sh ~/scripts/fig2catalog.sh -t 'ECMWF-S4 / sea level pressure / regime anomalies\nMarch / 1981-2015' -c 'Start date: 1st of every month from September to March \nRegion: North Atlantic (27°N–81°N, 85.5°W–45°E) \nReference dataset: ERA-Interim (last column on the right)' ECMWF-S4_forecast_month_3_psl.png ./formatted/ECMWF-S4_forecast_month_3_psl.png +sh ~/scripts/fig2catalog.sh -t 'ECMWF-S4 / sea level pressure / regime anomalies\nApril / 1981-2015' -c 'Start date: 1st of every month from October to April \nRegion: North Atlantic (27°N–81°N, 85.5°W–45°E) \nReference dataset: ERA-Interim (last column on the right)' ECMWF-S4_forecast_month_4_psl.png ./formatted/ECMWF-S4_forecast_month_4_psl.png +sh ~/scripts/fig2catalog.sh -t 'ECMWF-S4 / sea level pressure / regime anomalies\nMay / 1981-2015' -c 'Start date: 1st of every month from November to May \nRegion: North Atlantic (27°N–81°N, 85.5°W–45°E) \nReference dataset: ERA-Interim (last column on the right)' ECMWF-S4_forecast_month_5_psl.png ./formatted/ECMWF-S4_forecast_month_5_psl.png +sh ~/scripts/fig2catalog.sh -t 'ECMWF-S4 / sea level pressure / regime anomalies\nJune / 1981-2015' -c 'Start date: 1st of every month from December to June \nRegion: North Atlantic (27°N–81°N, 85.5°W–45°E) \nReference dataset: ERA-Interim (last column on the right)' ECMWF-S4_forecast_month_6_psl.png ./formatted/ECMWF-S4_forecast_month_6_psl.png +sh ~/scripts/fig2catalog.sh -t 'ECMWF-S4 / sea level pressure / regime anomalies\nJuly / 1981-2015' -c 'Start date: 1st of every month from January to July \nRegion: North Atlantic (27°N–81°N, 85.5°W–45°E) \nReference dataset: ERA-Interim (last column on the right)' ECMWF-S4_forecast_month_7_psl.png ./formatted/ECMWF-S4_forecast_month_7_psl.png +sh ~/scripts/fig2catalog.sh -t 'ECMWF-S4 / sea level pressure / regime anomalies\nAugust / 1981-2015' -c 'Start date: 1st of every month from February to August \nRegion: North Atlantic (27°N–81°N, 85.5°W–45°E) \nReference dataset: ERA-Interim (last column on the right)' ECMWF-S4_forecast_month_8_psl.png ./formatted/ECMWF-S4_forecast_month_8_psl.png +sh ~/scripts/fig2catalog.sh -t 'ECMWF-S4 / sea level pressure / regime anomalies\nSeptember / 1981-2015' -c 'Start date: 1st of every month from March to September \nRegion: North Atlantic (27°N–81°N, 85.5°W–45°E) \nReference dataset: ERA-Interim (last column on the right)' ECMWF-S4_forecast_month_9_psl.png ./formatted/ECMWF-S4_forecast_month_9_psl.png +sh ~/scripts/fig2catalog.sh -t 'ECMWF-S4 / sea level pressure / regime anomalies\nOctober / 1981-2015' -c 'Start date: 1st of every month from April to October \nRegion: North Atlantic (27°N–81°N, 85.5°W–45°E) \nReference dataset: ERA-Interim (last column on the right)' ECMWF-S4_forecast_month_10_psl.png ./formatted/ECMWF-S4_forecast_month_10_psl.png +sh ~/scripts/fig2catalog.sh -t 'ECMWF-S4 / sea level pressure / regime anomalies\nNovember / 1981-2015' -c 'Start date: 1st of every month from May to November \nRegion: North Atlantic (27°N–81°N, 85.5°W–45°E) \nReference dataset: ERA-Interim (last column on the right)' ECMWF-S4_forecast_month_11_psl.png ./formatted/ECMWF-S4_forecast_month_11_psl.png +sh ~/scripts/fig2catalog.sh -t 'ECMWF-S4 / sea level pressure / regime anomalies\nDecember / 1981-2015' -c 'Start date: 1st of every month from June to December \nRegion: North Atlantic (27°N–81°N, 85.5°W–45°E) \nReference dataset: ERA-Interim (last column on the right)' ECMWF-S4_forecast_month_12_psl.png ./formatted/ECMWF-S4_forecast_month_12_psl.png + +# regime frequency: +sh ~/scripts/fig2catalog.sh -t 'ECMWF-S4 / weather regimes frequency\nJanuary / 1981-2015' -c 'Start date: 1st of every month from July to January \nRegion: North Atlantic (27°N–81°N, 85.5°W–45°E) \nReference dataset: ERA-Interim (last column on the right)' ECMWF-S4_forecast_month_1_frequency.png ./formatted/ECMWF-S4_forecast_month_1_frequency.png +sh ~/scripts/fig2catalog.sh -t 'ECMWF-S4 / weather regimes frequency\nFebruary / 1981-2015' -c 'Start date: 1st of every month from August to February \nRegion: North Atlantic (27°N–81°N, 85.5°W–45°E) \nReference dataset: ERA-Interim (last column on the right)' ECMWF-S4_forecast_month_2_frequency.png ./formatted/ECMWF-S4_forecast_month_2_frequency.png +sh ~/scripts/fig2catalog.sh -t 'ECMWF-S4 / weather regimes frequency\nMarch / 1981-2015' -c 'Start date: 1st of every month from September to March \nRegion: North Atlantic (27°N–81°N, 85.5°W–45°E) \nReference dataset: ERA-Interim (last column on the right)' ECMWF-S4_forecast_month_3_frequency.png ./formatted/ECMWF-S4_forecast_month_3_frequency.png +sh ~/scripts/fig2catalog.sh -t 'ECMWF-S4 / weather regimes frequency\nApril / 1981-2015' -c 'Start date: 1st of every month from October to April \nRegion: North Atlantic (27°N–81°N, 85.5°W–45°E) \nReference dataset: ERA-Interim (last column on the right)' ECMWF-S4_forecast_month_4_frequency.png ./formatted/ECMWF-S4_forecast_month_4_frequency.png +sh ~/scripts/fig2catalog.sh -t 'ECMWF-S4 / weather regimes frequency\nMay / 1981-2015' -c 'Start date: 1st of every month from November to May \nRegion: North Atlantic (27°N–81°N, 85.5°W–45°E) \nReference dataset: ERA-Interim (last column on the right)' ECMWF-S4_forecast_month_5_frequency.png ./formatted/ECMWF-S4_forecast_month_5_frequency.png +sh ~/scripts/fig2catalog.sh -t 'ECMWF-S4 / weather regimes / monthly frequency\nJune / 1981-2015' -c 'Start date: 1st of every month from December to June \nRegion: North Atlantic (27°N–81°N, 85.5°W–45°E) \nReference dataset: ERA-Interim (last column on the right)' ECMWF-S4_forecast_month_6_frequency.png ./formatted/ECMWF-S4_forecast_month_6_frequency.png +sh ~/scripts/fig2catalog.sh -t 'ECMWF-S4 / weather regimes frequency\nJuly / 1981-2015' -c 'Start date: 1st of every month from January to July \nRegion: North Atlantic (27°N–81°N, 85.5°W–45°E) \nReference dataset: ERA-Interim (last column on the right)' ECMWF-S4_forecast_month_7_frequency.png ./formatted/ECMWF-S4_forecast_month_7_frequency.png +sh ~/scripts/fig2catalog.sh -t 'ECMWF-S4 / weather regimes frequency\nAugust / 1981-2015' -c 'Start date: 1st of every month from February to August \nRegion: North Atlantic (27°N–81°N, 85.5°W–45°E) \nReference dataset: ERA-Interim (last column on the right)' ECMWF-S4_forecast_month_8_frequency.png ./formatted/ECMWF-S4_forecast_month_8_frequency.png +sh ~/scripts/fig2catalog.sh -t 'ECMWF-S4 / weather regimes frequency\nSeptember / 1981-2015' -c 'Start date: 1st of every month from March to September \nRegion: North Atlantic (27°N–81°N, 85.5°W–45°E) \nReference dataset: ERA-Interim (last column on the right)' ECMWF-S4_forecast_month_9_frequency.png ./formatted/ECMWF-S4_forecast_month_9_frequency.png +sh ~/scripts/fig2catalog.sh -t 'ECMWF-S4 / weather regimes frequency\nOctober / 1981-2015' -c 'Start date: 1st of every month from April to October \nRegion: North Atlantic (27°N–81°N, 85.5°W–45°E) \nReference dataset: ERA-Interim (last column on the right)' ECMWF-S4_forecast_month_10_frequency.png ./formatted/ECMWF-S4_forecast_month_10_frequency.png +sh ~/scripts/fig2catalog.sh -t 'ECMWF-S4 / weather regimes frequency\nNovember / 1981-2015' -c 'Start date: 1st of every month from May to November \nRegion: North Atlantic (27°N–81°N, 85.5°W–45°E) \nReference dataset: ERA-Interim (last column on the right)' ECMWF-S4_forecast_month_11_frequency.png ./formatted/ECMWF-S4_forecast_month_11_frequency.png +sh ~/scripts/fig2catalog.sh -t 'ECMWF-S4 / weather regimes frequency\nDecember / 1981-2015' -c 'Start date: 1st of every month from June to December \nRegion: North Atlantic (27°N–81°N, 85.5°W–45°E) \nReference dataset: ERA-Interim (last column on the right)' ECMWF-S4_forecast_month_12_frequency.png ./formatted/ECMWF-S4_forecast_month_12_frequency.png + +# regime impact on sfcWind: +sh ~/scripts/fig2catalog.sh -t 'ECMWF-S4 / 10m wind speed / regime impact\nJanuary / 1981-2015' -c 'Start date: 1st of every month from July to January \nRegion: North Atlantic (27°N–81°N, 85.5°W–45°E) \nReference dataset: ERA-Interim (last column on the right)' ECMWF-S4_forecast_month_1_impact_sfcWind.png ./formatted/ECMWF-S4_forecast_month_1_impact_sfcWind.png +sh ~/scripts/fig2catalog.sh -t 'ECMWF-S4 / 10m wind speed / regime impact\nFebruary / 1981-2015' -c 'Start date: 1st of every month from August to February \nRegion: North Atlantic (27°N–81°N, 85.5°W–45°E) \nReference dataset: ERA-Interim (last column on the right)' ECMWF-S4_forecast_month_2_impact_sfcWind.png ./formatted/ECMWF-S4_forecast_month_2_impact_sfcWind.png +sh ~/scripts/fig2catalog.sh -t 'ECMWF-S4 / 10m wind speed / regime impact\nMarch / 1981-2015' -c 'Start date: 1st of every month from September to March \nRegion: North Atlantic (27°N–81°N, 85.5°W–45°E) \nReference dataset: ERA-Interim (last column on the right)' ECMWF-S4_forecast_month_3_impact_sfcWind.png ./formatted/ECMWF-S4_forecast_month_3_impact_sfcWind.png +sh ~/scripts/fig2catalog.sh -t 'ECMWF-S4 / 10m wind speed / regime impact\nApril / 1981-2015' -c 'Start date: 1st of every month from October to April \nRegion: North Atlantic (27°N–81°N, 85.5°W–45°E) \nReference dataset: ERA-Interim (last column on the right)' ECMWF-S4_forecast_month_4_impact_sfcWind.png ./formatted/ECMWF-S4_forecast_month_4_impact_sfcWind.png +sh ~/scripts/fig2catalog.sh -t 'ECMWF-S4 / 10m wind speed / regime impact\nMay / 1981-2015' -c 'Start date: 1st of every month from November to May \nRegion: North Atlantic (27°N–81°N, 85.5°W–45°E) \nReference dataset: ERA-Interim (last column on the right)' ECMWF-S4_forecast_month_5_impact_sfcWind.png ./formatted/ECMWF-S4_forecast_month_5_impact_sfcWind.png +sh ~/scripts/fig2catalog.sh -t 'ECMWF-S4 / 10m wind speed / regime impact\nJune / 1981-2015' -c 'Start date: 1st of every month from December to June \nRegion: North Atlantic (27°N–81°N, 85.5°W–45°E) \nReference dataset: ERA-Interim (last column on the right)' ECMWF-S4_forecast_month_6_impact_sfcWind.png ./formatted/ECMWF-S4_forecast_month_6_impact_sfcWind.png +sh ~/scripts/fig2catalog.sh -t 'ECMWF-S4 / 10m wind speed / regime impact\nJuly / 1981-2015' -c 'Start date: 1st of every month from January to July \nRegion: North Atlantic (27°N–81°N, 85.5°W–45°E) \nReference dataset: ERA-Interim (last column on the right)' ECMWF-S4_forecast_month_7_impact_sfcWind.png ./formatted/ECMWF-S4_forecast_month_7_impact_sfcWind.png +sh ~/scripts/fig2catalog.sh -t 'ECMWF-S4 / 10m wind speed / regime impact\nAugust / 1981-2015' -c 'Start date: 1st of every month from February to August \nRegion: North Atlantic (27°N–81°N, 85.5°W–45°E) \nReference dataset: ERA-Interim (last column on the right)' ECMWF-S4_forecast_month_8_impact_sfcWind.png ./formatted/ECMWF-S4_forecast_month_8_impact_sfcWind.png +sh ~/scripts/fig2catalog.sh -t 'ECMWF-S4 / 10m wind speed / regime impact\nSeptember / 1981-2015' -c 'Start date: 1st of every month from March to September \nRegion: North Atlantic (27°N–81°N, 85.5°W–45°E) \nReference dataset: ERA-Interim (last column on the right)' ECMWF-S4_forecast_month_9_impact_sfcWind.png ./formatted/ECMWF-S4_forecast_month_9_impact_sfcWind.png +sh ~/scripts/fig2catalog.sh -t 'ECMWF-S4 / 10m wind speed / regime impact\nOctober / 1981-2015' -c 'Start date: 1st of every month from April to October \nRegion: North Atlantic (27°N–81°N, 85.5°W–45°E) \nReference dataset: ERA-Interim (last column on the right)' ECMWF-S4_forecast_month_10_impact_sfcWind.png ./formatted/ECMWF-S4_forecast_month_10_impact_sfcWind.png +sh ~/scripts/fig2catalog.sh -t 'ECMWF-S4 / 10m wind speed / regime impact\nNovember / 1981-2015' -c 'Start date: 1st of every month from May to November \nRegion: North Atlantic (27°N–81°N, 85.5°W–45°E) \nReference dataset: ERA-Interim (last column on the right)' ECMWF-S4_forecast_month_11_impact_sfcWind.png ./formatted/ECMWF-S4_forecast_month_11_impact_sfcWind.png +sh ~/scripts/fig2catalog.sh -t 'ECMWF-S4 / 10m wind speed / regime impact\nDecember / 1981-2015' -c 'Start date: 1st of every month from June to December \nRegion: North Atlantic (27°N–81°N, 85.5°W–45°E) \nReference dataset: ERA-Interim (last column on the right)' ECMWF-S4_forecast_month_12_impact_sfcWind.png ./formatted/ECMWF-S4_forecast_month_12_impact_sfcWind.png + +# regime impact on tas: +sh ~/scripts/fig2catalog.sh -t 'ECMWF-S4 / 2m temperature / regime impact\nJanuary / 1981-2015' -c 'Start date: 1st of every month from July to January \nRegion: North Atlantic (27°N–81°N, 85.5°W–45°E) \nReference dataset: ERA-Interim (last column on the right)' ECMWF-S4_forecast_month_1_impact_tas.png ./formatted/ECMWF-S4_forecast_month_1_impact_tas.png +sh ~/scripts/fig2catalog.sh -t 'ECMWF-S4 / 2m temperature / regime impact\nFebruary / 1981-2015' -c 'Start date: 1st of every month from August to February \nRegion: North Atlantic (27°N–81°N, 85.5°W–45°E) \nReference dataset: ERA-Interim (last column on the right)' ECMWF-S4_forecast_month_2_impact_tas.png ./formatted/ECMWF-S4_forecast_month_2_impact_tas.png +sh ~/scripts/fig2catalog.sh -t 'ECMWF-S4 / 2m temperature / regime impact\nMarch / 1981-2015' -c 'Start date: 1st of every month from September to March \nRegion: North Atlantic (27°N–81°N, 85.5°W–45°E) \nReference dataset: ERA-Interim (last column on the right)' ECMWF-S4_forecast_month_3_impact_tas.png ./formatted/ECMWF-S4_forecast_month_3_impact_tas.png +sh ~/scripts/fig2catalog.sh -t 'ECMWF-S4 / 2m temperature / regime impact\nApril / 1981-2015' -c 'Start date: 1st of every month from October to April \nRegion: North Atlantic (27°N–81°N, 85.5°W–45°E) \nReference dataset: ERA-Interim (last column on the right)' ECMWF-S4_forecast_month_4_impact_tas.png ./formatted/ECMWF-S4_forecast_month_4_impact_tas.png +sh ~/scripts/fig2catalog.sh -t 'ECMWF-S4 / 2m temperature / regime impact\nMay / 1981-2015' -c 'Start date: 1st of every month from November to May \nRegion: North Atlantic (27°N–81°N, 85.5°W–45°E) \nReference dataset: ERA-Interim (last column on the right)' ECMWF-S4_forecast_month_5_impact_tas.png ./formatted/ECMWF-S4_forecast_month_5_impact_tas.png +sh ~/scripts/fig2catalog.sh -t 'ECMWF-S4 / 2m temperature / regime impact\nJune / 1981-2015' -c 'Start date: 1st of every month from December to June \nRegion: North Atlantic (27°N–81°N, 85.5°W–45°E) \nReference dataset: ERA-Interim (last column on the right)' ECMWF-S4_forecast_month_6_impact_tas.png ./formatted/ECMWF-S4_forecast_month_6_impact_tas.png +sh ~/scripts/fig2catalog.sh -t 'ECMWF-S4 / 2m temperature / regime impact\nJuly / 1981-2015' -c 'Start date: 1st of every month from January to July \nRegion: North Atlantic (27°N–81°N, 85.5°W–45°E) \nReference dataset: ERA-Interim (last column on the right)' ECMWF-S4_forecast_month_7_impact_tas.png ./formatted/ECMWF-S4_forecast_month_7_impact_tas.png +sh ~/scripts/fig2catalog.sh -t 'ECMWF-S4 / 2m temperature / regime impact\nAugust / 1981-2015' -c 'Start date: 1st of every month from February to August \nRegion: North Atlantic (27°N–81°N, 85.5°W–45°E) \nReference dataset: ERA-Interim (last column on the right)' ECMWF-S4_forecast_month_8_impact_tas.png ./formatted/ECMWF-S4_forecast_month_8_impact_tas.png +sh ~/scripts/fig2catalog.sh -t 'ECMWF-S4 / 2m temperature / regime impact\nSeptember / 1981-2015' -c 'Start date: 1st of every month from March to September \nRegion: North Atlantic (27°N–81°N, 85.5°W–45°E) \nReference dataset: ERA-Interim (last column on the right)' ECMWF-S4_forecast_month_9_impact_tas.png ./formatted/ECMWF-S4_forecast_month_9_impact_tas.png +sh ~/scripts/fig2catalog.sh -t 'ECMWF-S4 / 2m temperature / regime impact\nOctober / 1981-2015' -c 'Start date: 1st of every month from April to October \nRegion: North Atlantic (27°N–81°N, 85.5°W–45°E) \nReference dataset: ERA-Interim (last column on the right)' ECMWF-S4_forecast_month_10_impact_tas.png ./formatted/ECMWF-S4_forecast_month_10_impact_tas.png +sh ~/scripts/fig2catalog.sh -t 'ECMWF-S4 / 2m temperature / regime impact\nNovember / 1981-2015' -c 'Start date: 1st of every month from May to November \nRegion: North Atlantic (27°N–81°N, 85.5°W–45°E) \nReference dataset: ERA-Interim (last column on the right)' ECMWF-S4_forecast_month_11_impact_tas.png ./formatted/ECMWF-S4_forecast_month_11_impact_tas.png +sh ~/scripts/fig2catalog.sh -t 'ECMWF-S4 / 2m temperature / regime impact\nDecember / 1981-2015' -c 'Start date: 1st of every month from June to December \nRegion: North Atlantic (27°N–81°N, 85.5°W–45°E) \nReference dataset: ERA-Interim (last column on the right)' ECMWF-S4_forecast_month_12_impact_tas.png ./formatted/ECMWF-S4_forecast_month_12_impact_tas.png + + +##################################### +# Summary tables: # +##################################### + +#sh ~/scripts/fig2catalog.sh -t 'ECMWF-S4 / regime anomalies spatial correlation\nJanuary to December / 1981-2015' -x 15 -c 'Region: North Atlantic (27°N–81°N, 85.5°W–45°E) \nReference dataset: ERA-Interim' ECMWF-S4_summary_spatial_correlations.png ./formatted/ECMWF-S4_summary_spatial_correlations.png +sh ~/scripts/fig2catalog.sh -r 40 -t 'ECMWF-S4 / regime anomalies spatial correlation\nJanuary to December / 1981-2015' -x 15 -c 'Region: North Atlantic (27°N–81°N, 85.5°W–45°E) \nReference dataset: ERA-Interim' ECMWF-S4_summary_spatial_correlations_target_month.png ./formatted/ECMWF-S4_summary_spatial_correlations_target_month.png + +#sh ~/scripts/fig2catalog.sh -t 'ECMWF-S4 / weather regimes frequency correlation\nJanuary to December / 1981-2015' -x 15 -c 'Region: North Atlantic (27°N–81°N, 85.5°W–45°E) \nReference dataset: ERA-Interim' ECMWF-S4_summary_temporal_correlations.png ./formatted/ECMWF-S4_summary_temporal_correlations.png +sh ~/scripts/fig2catalog.sh -r 40 -t 'ECMWF-S4 / weather regimes frequency correlation\nJanuary to December / 1981-2015' -x 15 -c 'Region: North Atlantic (27°N–81°N, 85.5°W–45°E) \nReference dataset: ERA-Interim' ECMWF-S4_summary_temporal_correlations_target_month.png ./formatted/ECMWF-S4_summary_temporal_correlations_target_month.png + +#sh ~/scripts/fig2catalog.sh -t 'ECMWF-S4 / 10m wind speed / impact correlation\nJanuary to December / 1981-2015' -x 15 -c 'Region: North Atlantic (27°N–81°N, 85.5°W–45°E) \nReference dataset: ERA-Interim' ECMWF-S4_summary_impact_sfcWind.png ./formatted/ECMWF-S4_summary_impact_sfcWind.png +sh ~/scripts/fig2catalog.sh -r 40 -t 'ECMWF-S4 / 10m wind speed / impact correlation\nJanuary to December / 1981-2015' -x 15 -c 'Region: North Atlantic (27°N–81°N, 85.5°W–45°E) \nReference dataset: ERA-Interim' ECMWF-S4_summary_impact_sfcWind_target_month.png ./formatted/ECMWF-S4_summary_impact_sfcWind_target_month.png + +#sh ~/scripts/fig2catalog.sh -t 'ECMWF-S4 / 2m temperature / impact correlation\nJanuary to December / 1981-2015' -x 15 -c 'Region: North Atlantic (27°N–81°N, 85.5°W–45°E) \nReference dataset: ERA-Interim' ECMWF-S4_summary_impact_tas.png ./formatted/ECMWF-S4_summary_impact_tas.png +sh ~/scripts/fig2catalog.sh -r 40 -t 'ECMWF-S4 / 2m temperature / impact correlation\nJanuary to December / 1981-2015' -x 15 -c 'Region: North Atlantic (27°N–81°N, 85.5°W–45°E) \nReference dataset: ERA-Interim' ECMWF-S4_summary_impact_tas_target_month.png ./formatted/ECMWF-S4_summary_impact_tas_target_month.png + +# frequency: +#sh ~/scripts/fig2catalog.sh -t 'ERA-Interim / weather regimes average frequency\nJanuary to December / 1981-2015' -x 15 -c 'Region: North Atlantic (27°N–81°N, 85.5°W–45°E) ' ECMWF-S4_summary_frequency_obs_target_month.png ./formatted/ECMWF-S4_summary_frequency_obs_target_month.png +#sh ~/scripts/fig2catalog.sh -t 'ECMWF-S4 / weather regimes average frequency\nJanuary to December / 1981-2015' -x 15 -c 'Region: North Atlantic (27°N–81°N, 85.5°W–45°E) ' ECMWF-S4_summary_frequency_target_month.png ./formatted/ECMWF-S4_summary_frequency_target_month.png +#sh ~/scripts/fig2catalog.sh -t 'Weather regimes average frequency bias\nJanuary to December / 1981-2015' -x 15 -c 'Region: North Atlantic (27°N–81°N, 85.5°W–45°E) \nReference dataset: ERA-Interim' ECMWF-S4_summary_bias_frequency_target_month.png ./formatted/ECMWF-S4_summary_bias_frequency_target_month.png + +sh ~/scripts/fig2catalog.sh -l -r 30 -t 'ERA-Interim / weather regimes average frequency\nJanuary to December / 1981-2015' -x 15 ECMWF-S4_summary_frequency_obs_target_month.png ./trash2.png +sh ~/scripts/fig2catalog.sh -l -r 30 -t 'ECMWF-S4 / weather regimes average frequency\nJanuary to December / 1981-2015' -x 15 ECMWF-S4_summary_frequency_target_month.png ./trash1.png +sh ~/scripts/fig2catalog.sh -r 30 -t 'Weather regimes average frequency bias\nJanuary to December / 1981-2015' -x 15 -c 'Region: North Atlantic (27°N–81°N, 85.5°W–45°E)' ECMWF-S4_summary_bias_frequency_target_month.png ./trash3.png +montage trash1.png trash2.png trash3.png -tile 1x3 -geometry +0+0 ./formatted/ECMWF-S4_summary_all_frequency_target_month.png + +# persistence: +#sh ~/scripts/fig2catalog.sh -t 'ERA-Interim / weather regimes persistence\nJanuary to December / 1981-2015' -x 15 -c 'Region: North Atlantic (27°N–81°N, 85.5°W–45°E) ' ECMWF-S4_summary_persistence_obs_target_month.png ./formatted/ECMWF-S4_summary_persistence_obs_target_month.png +#sh ~/scripts/fig2catalog.sh -t 'ECMWF-S4 / weather regimes persistence\nJanuary to December / 1981-2015' -x 15 -c 'Region: North Atlantic (27°N–81°N, 85.5°W–45°E) ' ECMWF-S4_summary_persistence_target_month.png ./formatted/ECMWF-S4_summary_persistence_target_month.png +#sh ~/scripts/fig2catalog.sh -t 'Weather regimes persistence bias\nJanuary to December / 1981-2015' -x 15 -c 'Region: North Atlantic (27°N–81°N, 85.5°W–45°E) \nReference dataset: ERA-Interim' ECMWF-S4_summary_bias_persistence_target_month.png ./formatted/ECMWF-S4_summary_bias_persistence_target_month.png + +sh ~/scripts/fig2catalog.sh -l -r 30 -t 'ERA-Interim / weather regimes persistence\nJanuary to December / 1981-2015' -x 15 ECMWF-S4_summary_persistence_obs_target_month.png ./trash2.png +sh ~/scripts/fig2catalog.sh -l -r 30 -t 'ECMWF-S4 / weather regimes persistence\nJanuary to December / 1981-2015' -x 15 ECMWF-S4_summary_persistence_target_month.png ./trash1.png +sh ~/scripts/fig2catalog.sh -r 30 -t 'Weather regimes persistence bias\nJanuary to December / 1981-2015' -x 15 -c 'Region: North Atlantic (27°N–81°N, 85.5°W–45°E)' ECMWF-S4_summary_bias_persistence_target_month.png ./trash3.png +montage trash1.png trash2.png trash3.png -tile 1x3 -geometry +0+0 ./formatted/ECMWF-S4_summary_all_persistence_target_month.png + +# Transition prob: +# Atlantic ridge: +#sh ~/scripts/fig2catalog.sh -t 'ECMWF-S4 / Atlantic ridge transition probability\nJanuary to December / 1981-2015' -x 15 -c 'Region: North Atlantic (27°N–81°N, 85.5°W–45°E) ' ECMWF-S4_summary_transition_prob_Atlantic_ridge_obs_target_month.png ./formatted/ECMWF-S4_summary_transition_prob_Atlantic_ridge_obs_target_month.png +#sh ~/scripts/fig2catalog.sh -t 'ECMWF-S4 / Atlantic ridge transition probability\nJanuary to December / 1981-2015' -x 15 -c 'Region: North Atlantic (27°N–81°N, 85.5°W–45°E) ' ECMWF-S4_summary_transition_prob_Atlantic_ridge_target_month.png ./formatted/ECMWF-S4_summary_transition_prob_Atlantic_ridge_target_month.png +#sh ~/scripts/fig2catalog.sh -t 'ECMWF-S4 / Atlantic ridge transition probability bias\nJanuary to December / 1981-2015' -x 15 -c 'Region: North Atlantic (27°N–81°N, 85.5°W–45°E) \nReference dataset: ERA-Interim' ECMWF-S4_summary_bias_transition_prob_Atlantic_ridge_target_month.png ./formatted/ECMWF-S4_summary_bias_transition_prob_Atlantic_ridge_target_month.png + +sh ~/scripts/fig2catalog.sh -l -r 47 -t 'ERA-Interim / Atlantic ridge transition probability\nJanuary to December / 1981-2015' -x 15 ECMWF-S4_summary_transition_prob_Atlantic_ridge_obs_target_month.png ./trash2.png +sh ~/scripts/fig2catalog.sh -l -r 47 -t 'ECMWF-S4 / Atlantic ridge transition probability\nJanuary to December / 1981-2015' -x 15 ECMWF-S4_summary_transition_prob_Atlantic_ridge_target_month.png ./trash1.png +sh ~/scripts/fig2catalog.sh -r 47 -t 'Atlantic ridge transition probability bias\nJanuary to December / 1981-2015' -x 15 -c 'Region: North Atlantic (27°N–81°N, 85.5°W–45°E)' ECMWF-S4_summary_bias_transition_prob_Atlantic_ridge_target_month.png ./trash3.png +montage trash1.png trash2.png trash3.png -tile 1x3 -geometry +0+0 ./formatted/ECMWF-S4_summary_all_transition_prob_Atlantic_ridge_target_month.png + +# Blocking: +#sh ~/scripts/fig2catalog.sh -t 'ECMWF-S4 / Blocking transition probability\nJanuary to December / 1981-2015' -x 15 -c 'Region: North Atlantic (27°N–81°N, 85.5°W–45°E) ' ECMWF-S4_summary_transition_prob_blocking_obs_target_month.png ./formatted/ECMWF-S4_summary_transition_prob_blocking_obs_target_month.png +#sh ~/scripts/fig2catalog.sh -t 'ECMWF-S4 / Blocking transition probability\nJanuary to December / 1981-2015' -x 15 -c 'Region: North Atlantic (27°N–81°N, 85.5°W–45°E) ' ECMWF-S4_summary_transition_prob_blocking_target_month.png ./formatted/ECMWF-S4_summary_transition_prob_blocking_target_month.png +#sh ~/scripts/fig2catalog.sh -t 'ECMWF-S4 / Blocking transition probability bias\nJanuary to December / 1981-2015' -x 15 -c 'Region: North Atlantic (27°N–81°N, 85.5°W–45°E) \nReference dataset: ERA-Interim' ECMWF-S4_summary_bias_transition_prob_blocking_target_month.png ./formatted/ECMWF-S4_summary_bias_transition_prob_blocking_target_month.png + +sh ~/scripts/fig2catalog.sh -l -r 47 -t 'ERA-Interim / Blocking transition probability\nJanuary to December / 1981-2015' -x 15 ECMWF-S4_summary_transition_prob_blocking_obs_target_month.png ./trash2.png +sh ~/scripts/fig2catalog.sh -l -r 47 -t 'ECMWF-S4 / Blocking transition probability\nJanuary to December / 1981-2015' -x 15 ECMWF-S4_summary_transition_prob_blocking_target_month.png ./trash1.png +sh ~/scripts/fig2catalog.sh -r 47 -t 'Blocking transition probability bias\nJanuary to December / 1981-2015' -x 15 -c 'Region: North Atlantic (27°N–81°N, 85.5°W–45°E)' ECMWF-S4_summary_bias_transition_prob_blocking_target_month.png ./trash3.png +montage trash1.png trash2.png trash3.png -tile 1x3 -geometry +0+0 ./formatted/ECMWF-S4_summary_all_transition_prob_blocking_target_month.png + + +# NAO+: +#sh ~/scripts/fig2catalog.sh -t 'ECMWF-S4 / NAO+ transition probability\nJanuary to December / 1981-2015' -x 15 -c 'Region: North Atlantic (27°N–81°N, 85.5°W–45°E) ' ECMWF-S4_summary_transition_prob_NAO+_obs_target_month.png ./formatted/ECMWF-S4_summary_transition_prob_NAO+_obs_target_month.png +#sh ~/scripts/fig2catalog.sh -t 'ECMWF-S4 / NAO+ transition probability\nJanuary to December / 1981-2015' -x 15 -c 'Region: North Atlantic (27°N–81°N, 85.5°W–45°E) ' ECMWF-S4_summary_transition_prob_NAO+_target_month.png ./formatted/ECMWF-S4_summary_transition_prob_NAO+_target_month.png +#sh ~/scripts/fig2catalog.sh -t 'ECMWF-S4 / NAO+ transition probability bias\nJanuary to December / 1981-2015' -x 15 -c 'Reference dataset: ERA-Interim' ECMWF-S4_summary_bias_transition_prob_NAO+_target_month.png ./formatted/ECMWF-S4_summary_bias_transition_prob_NAO+_target_month.png + +sh ~/scripts/fig2catalog.sh -l -r 47 -t 'ERA-Interim / NAO+ transition probability\nJanuary to December / 1981-2015' -x 15 ECMWF-S4_summary_transition_prob_NAO+_obs_target_month.png ./trash2.png +sh ~/scripts/fig2catalog.sh -l -r 47 -t 'ECMWF-S4 / NAO+ transition probability\nJanuary to December / 1981-2015' -x 15 ECMWF-S4_summary_transition_prob_NAO+_target_month.png ./trash1.png +sh ~/scripts/fig2catalog.sh -r 47 -t 'NAO+ transition probability bias\nJanuary to December / 1981-2015' -x 15 -c 'Region: North Atlantic (27°N–81°N, 85.5°W–45°E)' ECMWF-S4_summary_bias_transition_prob_NAO+_target_month.png ./trash3.png +montage trash1.png trash2.png trash3.png -tile 1x3 -geometry +0+0 ./formatted/ECMWF-S4_summary_all_transition_prob_NAO+_target_month.png + + +# NAO-: +#sh ~/scripts/fig2catalog.sh -t 'ECMWF-S4 / NAO- transition probability\nJanuary to December / 1981-2015' -x 15 -c 'Region: North Atlantic (27°N–81°N, 85.5°W–45°E) ' ECMWF-S4_summary_transition_prob_obs_NAO-_target_month.png ./formatted/ECMWF-S4_summary_transition_prob_NAO-_obs_target_month.png +#sh ~/scripts/fig2catalog.sh -t 'ECMWF-S4 / NAO- transition probability\nJanuary to December / 1981-2015' -x 15 -c 'Region: North Atlantic (27°N–81°N, 85.5°W–45°E) ' ECMWF-S4_summary_transition_prob_NAO-_target_month.png ./formatted/ECMWF-S4_summary_transition_prob_NAO-_target_month.png +#sh ~/scripts/fig2catalog.sh -t 'ECMWF-S4 / NAO- transition probability bias\nJanuary to December / 1981-2015' -x 15 -c 'Region: North Atlantic (27°N–81°N, 85.5°W–45°E) \nReference dataset: ERA-Interim' ECMWF-S4_summary_bias_transition_prob_NAO-_target_month.png ./formatted/ECMWF-S4_summary_bias_transition_prob_NAO-_target_month.png + +sh ~/scripts/fig2catalog.sh -l -r 47 -t 'ERA-Interim / NAO- transition probability\nJanuary to December / 1981-2015' -x 15 ECMWF-S4_summary_transition_prob_NAO-_obs_target_month.png ./trash2.png +sh ~/scripts/fig2catalog.sh -l -r 47 -t 'ECMWF-S4 / NAO- transition probability\nJanuary to December / 1981-2015' -x 15 ECMWF-S4_summary_transition_prob_NAO-_target_month.png ./trash1.png +sh ~/scripts/fig2catalog.sh -r 47 -t 'NAO- transition probability bias\nJanuary to December / 1981-2015' -x 15 -c 'Region: North Atlantic (27°N–81°N, 85.5°W–45°E)' ECMWF-S4_summary_bias_transition_prob_NAO-_target_month.png ./trash3.png +montage trash1.png trash2.png trash3.png -tile 1x3 -geometry +0+0 ./formatted/ECMWF-S4_summary_all_transition_prob_NAO-_target_month.png + + + + +#sh ~/scripts/fig2catalog.sh -t 'ECMWF-S4 / Atlantic ridge transition probability bias\nJanuary to December / 1981-2015' -x 15 -c 'Region: North Atlantic (27°N–81°N, 85.5°W–45°E) \nReference dataset: ERA-Interim' ECMWF-S4_summary_bias_transition_prob_Atlantic_ridge.png ./formatted/ECMWF-S4_summary_bias_transition_prob_Atlantic_ridge.png +#sh ~/scripts/fig2catalog.sh -t 'ECMWF-S4 / Blocking transition probability bias\nJanuary to December / 1981-2015' -x 15 -c 'Region: North Atlantic (27°N–81°N, 85.5°W–45°E) \nReference dataset: ERA-Interim' ECMWF-S4_summary_bias_transition_prob_blocking.png ./formatted/ECMWF-S4_summary_bias_transition_prob_blocking.png +#sh ~/scripts/fig2catalog.sh -t 'ECMWF-S4 / NAO+ transition probability bias\nJanuary to December / 1981-2015' -x 15 -c 'Region: North Atlantic (27°N–81°N, 85.5°W–45°E) \nReference dataset: ERA-Interim' ECMWF-S4_summary_bias_transition_prob_NAO+.png ./formatted/ECMWF-S4_summary_bias_transition_prob_NAO+.png +#sh ~/scripts/fig2catalog.sh -t 'ECMWF-S4 / NAO- transition probability bias\nJanuary to December / 1981-2015' -x 15 -c 'Region: North Atlantic (27°N–81°N, 85.5°W–45°E) \nReference dataset: ERA-Interim' ECMWF-S4_summary_bias_transition_prob_NAO-.png ./formatted/ECMWF-S4_summary_bias_transition_prob_NAO-.png + +sh ~/scripts/fig2catalog.sh -m 200 -r 140 -s 0.7 -t 'ERA-Interim / sea level pressure / regime anomalies\nJanuary to December / 1981-2015' -x 15 -c 'Clustering: monthly k-means clustering \nRegion: North Atlantic (27°N–81°N, 85.5°W–45°E)' ERA-Interim_monthly_regime_anomalies.png ./formatted/ERA-Interim_monthly_regime_anomalies.png + +sh ~/scripts/fig2catalog.sh -m 200 -r 140 -s 0.7 -t 'ERA-Interim / sea level pressure / regime anomalies\nJanuary to December / 1981-2015' -x 15 -c 'Clustering: 3-months running k-means clustering \nRegion: North Atlantic (27°N–81°N, 85.5°W–45°E)' ERA-Interim_3-months_regime_anomalies.png ./formatted/ERA-Interim_3-months_regime_anomalies.png + +# Taylor diagram: +sh ~/scripts/fig2catalog.sh -t 'ERA-Interim / Taylor diagram of the regime anomalies \nJanuary to December / 1981-2015' -c 'Region: North Atlantic (27°N–81°N, 85.5°W–45°E) \nReference dataset: ERA-Interim DJF regime anomalies' ERA-Interim_regime_anomalies_DJF_Taylor_Diagram.png ./formatted/ERA-Interim_regime_anomalies_DJF_Taylor_Diagram.png + + +######################################################################### +# Subseasonal system # +######################################################################### + +~/scripts/fig2catalog.sh -r 30 -p 10 -x 15 -t 'ECMWF-MPS / 10m wind speed / Skill Scores \n January to December / 1994-2013 ' -c 'Region: global \nReference dataset: ERA-Interim\nBias correction: none' Summary_sfcWind_World.png formatted/Summary_sfcWind_World.png + +~/scripts/fig2catalog.sh -r 30 -p 10 -x 15 -t 'ECMWF-MPS / 10m wind speed / Skill Scores over Europe \n January to December / 1994-2013 ' -c 'Region: Europe (15°W-45°E, 35°N-75°N) \nReference dataset: ERA-Interim\nBias correction: none' Summary_sfcWind_Europe.png formatted/Summary_sfcWind_Europe.png + +~/scripts/fig2catalog.sh -r 30 -p 10 -x 15 -t 'ECMWF-MPS / 10m wind speed / Skill Scores over Iberian Peninsula \n January to December / 1994-2013 ' -c 'Region: Iberian Peninsula (10°W-4°E, 36°N-44°N) \nReference dataset: ERA-Interim\nBias correction: none' Summary_sfcWind_Iberian_Peninsula.png formatted/Summary_sfcWind_Iberian_Peninsula.png + +~/scripts/fig2catalog.sh -r 30 -p 10 -x 15 -t 'ECMWF-MPS / 10m wind speed / Skill Scores over North Sea \n January to December / 1994-2013 ' -c 'Region: North Sea (4°W-15°E, 50°N-65°N) \nReference dataset: ERA-Interim\nBias correction: none' Summary_sfcWind_North_Sea.png formatted/Summary_sfcWind_North_Sea.png + +~/scripts/fig2catalog.sh -r 30 -p 10 -x 15 -t 'ECMWF-MPS / 10m wind speed / Skill Scores over North America \n January to December / 1994-2013 ' -c 'Region: North America (130°W-60°E, 30°N-50°N) \nReference dataset: ERA-Interim\nBias correction: none' Summary_sfcWind_North_America.png formatted/Summary_sfcWind_North_America.png + + +# Reliability Diagrams: +~/scripts/fig2catalog.sh -s 0.8 -r 78 -t 'ECMWF-MPS / 10m wind speed / Reliability diagram \n January, April, July and October / 1994-2013 ' -c 'Start dates: January, April, July and October \nLead times: 5-11, 12-18, 19-25, 26-32 days \nRegion: global \nReference dataset: ERA-Interim' Summary_RelDiagr_World.png formatted/Summary_RelDiagr_World.png + +~/scripts/fig2catalog.sh -s 0.8 -r 78 -t 'ECMWF-MPS / 10m wind speed / Reliability diagram over Europe \n January, April, July and October / 1994-2013 ' -c 'Start dates: January, April, July and October \nLead times: 5-11, 12-18, 19-25, 26-32 days \nRegion: Europe (15°W-45°E, 35°N-75°N)\nReference dataset: ERA-Interim' Summary_RelDiagr_Europe.png formatted/Summary_RelDiagr_Europe.png + +~/scripts/fig2catalog.sh -s 0.8 -r 78 -t 'ECMWF-MPS / 10m wind speed / Reliability diagram over Iberian Peninsula \n January, April, July and October / 1994-2013 ' -c 'Start dates: January, April, July and October \nLead times: 5-11, 12-18, 19-25, 26-32 days \nRegion: Iberian Peninsula (10°W-4°E, 36°N-44°N)\nReference dataset: ERA-Interim' Summary_RelDiagr_Iberian_Peninsula.png formatted/Summary_RelDiagr_Iberian_Peninsula.png + +~/scripts/fig2catalog.sh -s 0.8 -r 78 -t 'ECMWF-MPS / 10m wind speed / Reliability diagram over North Sea \n January, April, July and October / 1994-2013 ' -c 'Start dates: January, April, July and October \nLead times: 5-11, 12-18, 19-25, 26-32 days \nRegion: North Sea (4°W-15°E, 50°N-65°N)\nReference dataset: ERA-Interim' Summary_RelDiagr_North_Sea.png formatted/Summary_RelDiagr_North_Sea.png + +~/scripts/fig2catalog.sh -s 0.8 -r 78 -t 'ECMWF-MPS / 10m wind speed / Reliability diagram over North America \n January, April, July and October / 1994-2013 ' -c 'Star dates: January, April, July and October \nLead times: 5-11, 12-18, 19-25, 26-32 days \nRegion: North America (130°W-60°E, 30°N-50°N)\nReference dataset: ERA-Interim' Summary_RelDiagr_North_America.png formatted/Summary_RelDiagr_North_America.png + +###################################################################### +# Seasonal system # +###################################################################### + +# sfcWind: +for f in $( ls *nEU.* ); do mv $f 1_Europe/; done +for f in $( ls *seEU.* ); do mv $f 1_Europe/; done +for f in $( ls *swEU.* ); do mv $f 1_Europe/; done +for f in $( ls *AF.* ); do mv $f 2_Africa/; done +for f in $( ls *eNA.* ); do mv $f 3_North_America/; done +for f in $( ls *wNA.* ); do mv $f 3_North_America/; done +for f in $( ls *mNA.* ); do mv $f 3_North_America/; done +for f in $( ls *eCA.* ); do mv $f 4_Central_America/; done +for f in $( ls *wCA.* ); do mv $f 4_Central_America/; done +for f in $( ls *eSA.* ); do mv $f 5_South_America/; done +for f in $( ls *wSA.* ); do mv $f 5_South_America/; done +for f in $( ls *sSA.* ); do mv $f 5_South_America/; done +for f in $( ls *IN.* ); do mv $f 6_Asia/; done +for f in $( ls *nEA.* ); do mv $f 6_Asia/; done +for f in $( ls *sEA.* ); do mv $f 6_Asia/; done +for f in $( ls *eAU.* ); do mv $f 7_Oceania/; done +for f in $( ls *wAU.* ); do mv $f 7_Oceania/; done + +# tas: +for f in $( ls *EU.* ); do mv $f 1_Europe/; done +for f in $( ls *AF.* ); do mv $f 2_Africa/; done +for f in $( ls *NA.* ); do mv $f 3_North_America/; done +for f in $( ls *SA.* ); do mv $f 5_South_America/; done +for f in $( ls *EA.* ); do mv $f 6_Asia/; done +for f in $( ls *AU.* ); do mv $f 7_Oceania/; done +for f in $( ls *NH.* ); do mv $f 8_Northern_Hemisphere/; done +for f in $( ls *SH.* ); do mv $f 9_Southern_Hemisphere/; done +for f in $( ls *TR.* ); do mv $f 10_Tropics/; done +for f in $( ls *GL.* ); do mv $f 11_Global/; done + +for f in $( ls *_rawdata_*.* ); do mv $f 1_raw/; done +for f in $( ls *_sbc_*.* ); do mv $f 2_sbc/; done +for f in $( ls *_cal_*.* ); do mv $f 3_cal/; done + +for f in $( ls *nEU.* ); do mv $f 1_nEU/; done +for f in $( ls *swEU.* ); do mv $f 2_swEU/; done +for f in $( ls *seEU.* ); do mv $f 3_seEU/; done + +for f in $( ls *eNA.* ); do mv $f 1_eNA/; done +for f in $( ls *mNA.* ); do mv $f 2_mNA/; done +for f in $( ls *wNA.* ); do mv $f 3_wNA/; done + +for f in $( ls *eCA.* ); do mv $f 1_eCA/; done +for f in $( ls *wCA.* ); do mv $f 2_wCA/; done + +for f in $( ls *eSA.* ); do mv $f 1_eSA/; done +for f in $( ls *sSA.* ); do mv $f 2_sSA/; done +for f in $( ls *wSA.* ); do mv $f 3_wSA/; done + +for f in $( ls *IN.* ); do mv $f 1_IN/; done +for f in $( ls *nEA.* ); do mv $f 2_nEA/; done +for f in $( ls *sEA.* ); do mv $f 3_sEA/; done + +for f in $( ls *eAU.* ); do mv $f 1_eAU/; done +for f in $( ls *wAU.* ); do mv $f 2_wAU/; done + +for f in $( ls *_cross_*.* ); do mv $f 1_cross/; done +for f in $( ls *_nocross_*.* ); do mv $f 2_nocross/; done + + + + + +mv sfcWind_1_ERAI_MMA_1_Above_RelDiag_rawdata_sfcwind_1991_2012_wAU.png sfcWind_1_ERAI_MAM_1_Above_RelDiag_rawdata_sfcwind_1991_2012_wAU.png +mv sfcWind_1_ERAI_MMA_1_Below_RelDiag_rawdata_sfcwind_1991_2012_wAU.png sfcWind_1_ERAI_MAM_1_Below_RelDiag_rawdata_sfcwind_1991_2012_wAU.png +mv sfcWind_1_ERAI_MMA_1_Near_RelDiag_rawdata_sfcwind_1991_2012_wAU.png sfcWind_1_ERAI_MAM_1_Near_RelDiag_rawdata_sfcwind_1991_2012_wAU.png + +mv sfcWind_1_ERAI_MMA_2_Above_RelDiag_sbc_cross_sfcwind_1991_2012_wAU.png sfcWind_1_ERAI_MAM_2_Above_RelDiag_sbc_cross_sfcwind_1991_2012_wAU.png +mv sfcWind_1_ERAI_MMA_2_Below_RelDiag_sbc_cross_sfcwind_1991_2012_wAU.png sfcWind_1_ERAI_MAM_2_Below_RelDiag_sbc_cross_sfcwind_1991_2012_wAU.png +mv sfcWind_1_ERAI_MMA_2_Near_RelDiag_sbc_cross_sfcwind_1991_2012_wAU.png sfcWind_1_ERAI_MAM_2_Near_RelDiag_sbc_cross_sfcwind_1991_2012_wAU.png + +mv sfcWind_1_ERAI_MMA_3_Above_RelDiag_sbc_nocross_sfcwind_1991_2012_wAU.png sfcWind_2_ERAI_MAM_3_Above_RelDiag_sbc_nocross_sfcwind_1991_2012_wAU.png +mv sfcWind_1_ERAI_MMA_3_Below_RelDiag_sbc_nocross_sfcwind_1991_2012_wAU.png sfcWind_2_ERAI_MAM_3_Below_RelDiag_sbc_nocross_sfcwind_1991_2012_wAU.png +mv sfcWind_1_ERAI_MMA_3_Near_RelDiag_sbc_nocross_sfcwind_1991_2012_wAU.png sfcWind_2_ERAI_MAM_3_Near_RelDiag_sbc_nocross_sfcwind_1991_2012_wAU.png + +mv sfcWind_1_ERAI_MMA_4_Above_RelDiag_cal_cross_sfcwind_1991_2012_wAU.png sfcWind_1_ERAI_MAM_4_Above_RelDiag_cal_cross_sfcwind_1991_2012_wAU.png +mv sfcWind_1_ERAI_MMA_4_Below_RelDiag_cal_cross_sfcwind_1991_2012_wAU.png sfcWind_1_ERAI_MAM_4_Below_RelDiag_cal_cross_sfcwind_1991_2012_wAU.png +mv sfcWind_1_ERAI_MMA_4_Near_RelDiag_cal_cross_sfcwind_1991_2012_wAU.png sfcWind_1_ERAI_MAM_4_Near_RelDiag_cal_cross_sfcwind_1991_2012_wAU.png + +mv sfcWind_1_ERAI_MMA_5_Above_RelDiag_cal_nocross_sfcwind_1991_2012_wAU.png sfcWind_1_ERAI_MAM_5_Above_RelDiag_cal_nocross_sfcwind_1991_2012_wAU.png +mv sfcWind_1_ERAI_MMA_5_Below_RelDiag_cal_nocross_sfcwind_1991_2012_wAU.png sfcWind_1_ERAI_MAM_5_Below_RelDiag_cal_nocross_sfcwind_1991_2012_wAU.png +mv sfcWind_1_ERAI_MMA_5_Near_RelDiag_cal_nocross_sfcwind_1991_2012_wAU.png sfcWind_1_ERAI_MAM_5_Near_RelDiag_cal_nocross_sfcwind_1991_2012_wAU.png + + +for f in $( ls *_MAM_*.* ); do mv "$f" "1_$f"; done +for f in $( ls *_JJA_*.* ); do mv "$f" "2_$f"; done +for f in $( ls *_SON_*.* ); do mv "$f" "3_$f"; done +for f in $( ls *_DJF_*.* ); do mv "$f" "4_$f"; done +for f in $( ls *_ERAI_*.* ); do mv "$f" "1_$f"; done +for f in $( ls *_JRA_*.* ); do mv "$f" "2_$f"; done +for f in $( ls *_ERAI2_*.* ); do mv "$f" "3_$f"; done +for f in $( ls *_JRA2_*.* ); do mv "$f" "4_$f"; done +for f in $( ls *_Above_*.* ); do mv "$f" "1_$f"; done +for f in $( ls *_Near_*.* ); do mv "$f" "2_$f"; done +for f in $( ls *_Below_*.* ); do mv "$f" "3_$f"; done + + + + + + + +for f in $( ls *_MAM_*.* ); do mv "$f" "1_tas_$f"; done +for f in $( ls *_JJA_*.* ); do mv "$f" "2_tas_$f"; done +for f in $( ls *_SON_*.* ); do mv "$f" "3_tas_$f"; done +for f in $( ls *_DJF_*.* ); do mv "$f" "4_tas_$f"; done +for f in $( ls *_ERAI_*.* ); do mv "$f" "1_$f"; done +for f in $( ls *_JRA_*.* ); do mv "$f" "2_$f"; done +for f in $( ls *_ERAI2_*.* ); do mv "$f" "3_$f"; done +for f in $( ls *_JRA2_*.* ); do mv "$f" "4_$f"; done +for f in $( ls *_Above_*.* ); do mv "$f" "1_$f"; done +for f in $( ls *_Near_*.* ); do mv "$f" "2_$f"; done +for f in $( ls *_Below_*.* ); do mv "$f" "3_$f"; done + + + + + + + + + + +mv 1_tas_1_tas_1_tas_1_ERAI_MAM_1_Above_RelDiag_rawdata_tas_1991_2012_AF.png 1_1_1_tas_1_ERAI_MAM_1_Above_RelDiag_rawdata_tas_1991_2012_AF.png +mv 1_tas_1_tas_2_tas_1_ERAI_JJA_1_Above_RelDiag_rawdata_tas_1991_2012_AF.png 1_1_2_tas_1_ERAI_JJA_1_Above_RelDiag_rawdata_tas_1991_2012_AF.png +mv 1_tas_1_tas_3_tas_1_ERAI_SON_1_Above_RelDiag_rawdata_tas_1991_2012_AF.png 1_1_3_tas_1_ERAI_SON_1_Above_RelDiag_rawdata_tas_1991_2012_AF.png +mv 1_tas_1_tas_4_tas_1_ERAI_DJF_1_Above_RelDiag_rawdata_tas_1991_2012_AF.png 1_1_4_tas_1_ERAI_DJF_1_Above_RelDiag_rawdata_tas_1991_2012_AF.png +mv 1_tas_2_tas_1_tas_2_JRA_MAM_1_Above_RelDiag_rawdata_tas_1991_2012_AF.png 1_2_1_tas_2_JRA_MAM_1_Above_RelDiag_rawdata_tas_1991_2012_AF.png +mv 1_tas_2_tas_2_tas_2_JRA_JJA_1_Above_RelDiag_rawdata_tas_1991_2012_AF.png 1_2_2_tas_2_JRA_JJA_1_Above_RelDiag_rawdata_tas_1991_2012_AF.png +mv 1_tas_2_tas_3_tas_2_JRA_SON_1_Above_RelDiag_rawdata_tas_1991_2012_AF.png 1_2_3_tas_2_JRA_SON_1_Above_RelDiag_rawdata_tas_1991_2012_AF.png +mv 1_tas_2_tas_4_tas_2_JRA_DJF_1_Above_RelDiag_rawdata_tas_1991_2012_AF.png 1_2_4_tas_2_JRA_DJF_1_Above_RelDiag_rawdata_tas_1991_2012_AF.png +mv 1_tas_3_tas_1_tas_3_ERAI2_MAM_1_Above_RelDiag_rawdata_tas_1981_2012_AF.png 1_3_1_tas_3_ERAI2_MAM_1_Above_RelDiag_rawdata_tas_1981_2012_AF.png +mv 1_tas_3_tas_2_tas_3_ERAI2_JJA_1_Above_RelDiag_rawdata_tas_1981_2012_AF.png 1_3_2_tas_3_ERAI2_JJA_1_Above_RelDiag_rawdata_tas_1981_2012_AF.png +mv 1_tas_3_tas_3_tas_3_ERAI2_SON_1_Above_RelDiag_rawdata_tas_1981_2012_AF.png 1_3_3_tas_3_ERAI2_SON_1_Above_RelDiag_rawdata_tas_1981_2012_AF.png +mv 1_tas_3_tas_4_tas_3_ERAI2_DJF_1_Above_RelDiag_rawdata_tas_1981_2012_AF.png 1_3_4_tas_3_ERAI2_DJF_1_Above_RelDiag_rawdata_tas_1981_2012_AF.png +mv 1_tas_4_tas_1_tas_4_JRA2_MAM_1_Above_RelDiag_rawdata_tas_1981_2012_AF.png 1_4_1_tas_4_JRA2_MAM_1_Above_RelDiag_rawdata_tas_1981_2012_AF.png +mv 1_tas_4_tas_2_tas_4_JRA2_JJA_1_Above_RelDiag_rawdata_tas_1981_2012_AF.png 1_4_2_tas_4_JRA2_JJA_1_Above_RelDiag_rawdata_tas_1981_2012_AF.png +mv 1_tas_4_tas_3_tas_4_JRA2_SON_1_Above_RelDiag_rawdata_tas_1981_2012_AF.png 1_4_3_tas_4_JRA2_SON_1_Above_RelDiag_rawdata_tas_1981_2012_AF.png +mv 1_tas_4_tas_4_tas_4_JRA2_DJF_1_Above_RelDiag_rawdata_tas_1981_2012_AF.png 1_4_4_tas_4_JRA2_DJF_1_Above_RelDiag_rawdata_tas_1981_2012_AF.png +mv 2_tas_1_tas_1_tas_1_ERAI_MAM_1_Near_RelDiag_rawdata_tas_1991_2012_AF.png 2_1_1_tas_1_ERAI_MAM_1_Near_RelDiag_rawdata_tas_1991_2012_AF.png +mv 2_tas_1_tas_2_tas_1_ERAI_JJA_1_Near_RelDiag_rawdata_tas_1991_2012_AF.png 2_1_2_tas_1_ERAI_JJA_1_Near_RelDiag_rawdata_tas_1991_2012_AF.png +mv 2_tas_1_tas_3_tas_1_ERAI_SON_1_Near_RelDiag_rawdata_tas_1991_2012_AF.png 2_1_3_tas_1_ERAI_SON_1_Near_RelDiag_rawdata_tas_1991_2012_AF.png +mv 2_tas_1_tas_4_tas_1_ERAI_DJF_1_Near_RelDiag_rawdata_tas_1991_2012_AF.png 2_1_4_tas_1_ERAI_DJF_1_Near_RelDiag_rawdata_tas_1991_2012_AF.png +mv 2_tas_2_tas_1_tas_2_JRA_MAM_1_Near_RelDiag_rawdata_tas_1991_2012_AF.png 2_2_1_tas_2_JRA_MAM_1_Near_RelDiag_rawdata_tas_1991_2012_AF.png +mv 2_tas_2_tas_2_tas_2_JRA_JJA_1_Near_RelDiag_rawdata_tas_1991_2012_AF.png 2_2_2_tas_2_JRA_JJA_1_Near_RelDiag_rawdata_tas_1991_2012_AF.png +mv 2_tas_2_tas_3_tas_2_JRA_SON_1_Near_RelDiag_rawdata_tas_1991_2012_AF.png 2_2_3_tas_2_JRA_SON_1_Near_RelDiag_rawdata_tas_1991_2012_AF.png +mv 2_tas_2_tas_4_tas_2_JRA_DJF_1_Near_RelDiag_rawdata_tas_1991_2012_AF.png 2_2_4_tas_2_JRA_DJF_1_Near_RelDiag_rawdata_tas_1991_2012_AF.png +mv 2_tas_3_tas_1_tas_3_ERAI2_MAM_1_Near_RelDiag_rawdata_tas_1981_2012_AF.png 2_3_1_tas_3_ERAI2_MAM_1_Near_RelDiag_rawdata_tas_1981_2012_AF.png +mv 2_tas_3_tas_2_tas_3_ERAI2_JJA_1_Near_RelDiag_rawdata_tas_1981_2012_AF.png 2_3_2_tas_3_ERAI2_JJA_1_Near_RelDiag_rawdata_tas_1981_2012_AF.png +mv 2_tas_3_tas_3_tas_3_ERAI2_SON_1_Near_RelDiag_rawdata_tas_1981_2012_AF.png 2_3_3_tas_3_ERAI2_SON_1_Near_RelDiag_rawdata_tas_1981_2012_AF.png +mv 2_tas_3_tas_4_tas_3_ERAI2_DJF_1_Near_RelDiag_rawdata_tas_1981_2012_AF.png 2_3_4_tas_3_ERAI2_DJF_1_Near_RelDiag_rawdata_tas_1981_2012_AF.png +mv 2_tas_4_tas_1_tas_4_JRA2_MAM_1_Near_RelDiag_rawdata_tas_1981_2012_AF.png 2_4_1_tas_4_JRA2_MAM_1_Near_RelDiag_rawdata_tas_1981_2012_AF.png +mv 2_tas_4_tas_2_tas_4_JRA2_JJA_1_Near_RelDiag_rawdata_tas_1981_2012_AF.png 2_4_2_tas_4_JRA2_JJA_1_Near_RelDiag_rawdata_tas_1981_2012_AF.png +mv 2_tas_4_tas_3_tas_4_JRA2_SON_1_Near_RelDiag_rawdata_tas_1981_2012_AF.png 2_4_3_tas_4_JRA2_SON_1_Near_RelDiag_rawdata_tas_1981_2012_AF.png +mv 2_tas_4_tas_4_tas_4_JRA2_DJF_1_Near_RelDiag_rawdata_tas_1981_2012_AF.png 2_4_4_tas_4_JRA2_DJF_1_Near_RelDiag_rawdata_tas_1981_2012_AF.png +mv 3_tas_1_tas_1_tas_1_ERAI_MAM_1_Below_RelDiag_rawdata_tas_1991_2012_AF.png 3_1_1_tas_1_ERAI_MAM_1_Below_RelDiag_rawdata_tas_1991_2012_AF.png +mv 3_tas_1_tas_2_tas_1_ERAI_JJA_1_Below_RelDiag_rawdata_tas_1991_2012_AF.png 3_1_2_tas_1_ERAI_JJA_1_Below_RelDiag_rawdata_tas_1991_2012_AF.png +mv 3_tas_1_tas_3_tas_1_ERAI_SON_1_Below_RelDiag_rawdata_tas_1991_2012_AF.png 3_1_3_tas_1_ERAI_SON_1_Below_RelDiag_rawdata_tas_1991_2012_AF.png +mv 3_tas_1_tas_4_tas_1_ERAI_DJF_1_Below_RelDiag_rawdata_tas_1991_2012_AF.png 3_1_4_tas_1_ERAI_DJF_1_Below_RelDiag_rawdata_tas_1991_2012_AF.png +mv 3_tas_2_tas_1_tas_2_JRA_MAM_1_Below_RelDiag_rawdata_tas_1991_2012_AF.png 3_2_1_tas_2_JRA_MAM_1_Below_RelDiag_rawdata_tas_1991_2012_AF.png +mv 3_tas_2_tas_2_tas_2_JRA_JJA_1_Below_RelDiag_rawdata_tas_1991_2012_AF.png 3_2_2_tas_2_JRA_JJA_1_Below_RelDiag_rawdata_tas_1991_2012_AF.png +mv 3_tas_2_tas_3_tas_2_JRA_SON_1_Below_RelDiag_rawdata_tas_1991_2012_AF.png 3_2_3_tas_2_JRA_SON_1_Below_RelDiag_rawdata_tas_1991_2012_AF.png +mv 3_tas_2_tas_4_tas_2_JRA_DJF_1_Below_RelDiag_rawdata_tas_1991_2012_AF.png 3_2_4_tas_2_JRA_DJF_1_Below_RelDiag_rawdata_tas_1991_2012_AF.png +mv 3_tas_3_tas_1_tas_3_ERAI2_MAM_1_Below_RelDiag_rawdata_tas_1981_2012_AF.png 3_3_1_tas_3_ERAI2_MAM_1_Below_RelDiag_rawdata_tas_1981_2012_AF.png +mv 3_tas_3_tas_2_tas_3_ERAI2_JJA_1_Below_RelDiag_rawdata_tas_1981_2012_AF.png 3_3_2_tas_3_ERAI2_JJA_1_Below_RelDiag_rawdata_tas_1981_2012_AF.png +mv 3_tas_3_tas_3_tas_3_ERAI2_SON_1_Below_RelDiag_rawdata_tas_1981_2012_AF.png 3_3_3_tas_3_ERAI2_SON_1_Below_RelDiag_rawdata_tas_1981_2012_AF.png +mv 3_tas_3_tas_4_tas_3_ERAI2_DJF_1_Below_RelDiag_rawdata_tas_1981_2012_AF.png 3_3_4_tas_3_ERAI2_DJF_1_Below_RelDiag_rawdata_tas_1981_2012_AF.png +mv 3_tas_4_tas_1_tas_4_JRA2_MAM_1_Below_RelDiag_rawdata_tas_1981_2012_AF.png 3_4_1_tas_4_JRA2_MAM_1_Below_RelDiag_rawdata_tas_1981_2012_AF.png +mv 3_tas_4_tas_2_tas_4_JRA2_JJA_1_Below_RelDiag_rawdata_tas_1981_2012_AF.png 3_4_2_tas_4_JRA2_JJA_1_Below_RelDiag_rawdata_tas_1981_2012_AF.png +mv 3_tas_4_tas_3_tas_4_JRA2_SON_1_Below_RelDiag_rawdata_tas_1981_2012_AF.png 3_4_3_tas_4_JRA2_SON_1_Below_RelDiag_rawdata_tas_1981_2012_AF.png +mv 3_tas_4_tas_4_tas_4_JRA2_DJF_1_Below_RelDiag_rawdata_tas_1981_2012_AF.png 3_4_4_tas_4_JRA2_DJF_1_Below_RelDiag_rawdata_tas_1981_2012_AF.png + + + + + +mv 1_tas_1_tas_1_tas_1_ERAI_MAM_2_Above_RelDiag_sbc_cross_tas_1991_2012_AF.png 1_1_1_tas_1_ERAI_MAM_2_Above_RelDiag_sbc_cross_tas_1991_2012_AF.png +mv 1_tas_1_tas_2_tas_1_ERAI_JJA_2_Above_RelDiag_sbc_cross_tas_1991_2012_AF.png 1_1_2_tas_1_ERAI_JJA_2_Above_RelDiag_sbc_cross_tas_1991_2012_AF.png +mv 1_tas_1_tas_3_tas_1_ERAI_SON_2_Above_RelDiag_sbc_cross_tas_1991_2012_AF.png 1_1_3_tas_1_ERAI_SON_2_Above_RelDiag_sbc_cross_tas_1991_2012_AF.png +mv 1_tas_1_tas_4_tas_1_ERAI_DJF_2_Above_RelDiag_sbc_cross_tas_1991_2012_AF.png 1_1_4_tas_1_ERAI_DJF_2_Above_RelDiag_sbc_cross_tas_1991_2012_AF.png +mv 1_tas_2_tas_1_tas_2_JRA_MAM_2_Above_RelDiag_sbc_cross_tas_1991_2012_AF.png 1_2_1_tas_2_JRA_MAM_2_Above_RelDiag_sbc_cross_tas_1991_2012_AF.png +mv 1_tas_2_tas_2_tas_2_JRA_JJA_2_Above_RelDiag_sbc_cross_tas_1991_2012_AF.png 1_2_2_tas_2_JRA_JJA_2_Above_RelDiag_sbc_cross_tas_1991_2012_AF.png +mv 1_tas_2_tas_3_tas_2_JRA_SON_2_Above_RelDiag_sbc_cross_tas_1991_2012_AF.png 1_2_3_tas_2_JRA_SON_2_Above_RelDiag_sbc_cross_tas_1991_2012_AF.png +mv 1_tas_2_tas_4_tas_2_JRA_DJF_2_Above_RelDiag_sbc_cross_tas_1991_2012_AF.png 1_2_4_tas_2_JRA_DJF_2_Above_RelDiag_sbc_cross_tas_1991_2012_AF.png +mv 1_tas_3_tas_1_tas_3_ERAI2_MAM_2_Above_RelDiag_sbc_cross_tas_1981_2012_AF.png 1_3_1_tas_3_ERAI2_MAM_2_Above_RelDiag_sbc_cross_tas_1981_2012_AF.png +mv 1_tas_3_tas_2_tas_3_ERAI2_JJA_2_Above_RelDiag_sbc_cross_tas_1981_2012_AF.png 1_3_2_tas_3_ERAI2_JJA_2_Above_RelDiag_sbc_cross_tas_1981_2012_AF.png +mv 1_tas_3_tas_3_tas_3_ERAI2_SON_2_Above_RelDiag_sbc_cross_tas_1981_2012_AF.png 1_3_3_tas_3_ERAI2_SON_2_Above_RelDiag_sbc_cross_tas_1981_2012_AF.png +mv 1_tas_3_tas_4_tas_3_ERAI2_DJF_2_Above_RelDiag_sbc_cross_tas_1981_2012_AF.png 1_3_4_tas_3_ERAI2_DJF_2_Above_RelDiag_sbc_cross_tas_1981_2012_AF.png +mv 1_tas_4_tas_1_tas_4_JRA2_MAM_2_Above_RelDiag_sbc_cross_tas_1981_2012_AF.png 1_4_1_tas_4_JRA2_MAM_2_Above_RelDiag_sbc_cross_tas_1981_2012_AF.png +mv 1_tas_4_tas_2_tas_4_JRA2_JJA_2_Above_RelDiag_sbc_cross_tas_1981_2012_AF.png 1_4_2_tas_4_JRA2_JJA_2_Above_RelDiag_sbc_cross_tas_1981_2012_AF.png +mv 1_tas_4_tas_3_tas_4_JRA2_SON_2_Above_RelDiag_sbc_cross_tas_1981_2012_AF.png 1_4_3_tas_4_JRA2_SON_2_Above_RelDiag_sbc_cross_tas_1981_2012_AF.png +mv 1_tas_4_tas_4_tas_4_JRA2_DJF_2_Above_RelDiag_sbc_cross_tas_1981_2012_AF.png 1_4_4_tas_4_JRA2_DJF_2_Above_RelDiag_sbc_cross_tas_1981_2012_AF.png +mv 2_tas_1_tas_1_tas_1_ERAI_MAM_2_Near_RelDiag_sbc_cross_tas_1991_2012_AF.png 2_1_1_tas_1_ERAI_MAM_2_Near_RelDiag_sbc_cross_tas_1991_2012_AF.png +mv 2_tas_1_tas_2_tas_1_ERAI_JJA_2_Near_RelDiag_sbc_cross_tas_1991_2012_AF.png 2_1_2_tas_1_ERAI_JJA_2_Near_RelDiag_sbc_cross_tas_1991_2012_AF.png +mv 2_tas_1_tas_3_tas_1_ERAI_SON_2_Near_RelDiag_sbc_cross_tas_1991_2012_AF.png 2_1_3_tas_1_ERAI_SON_2_Near_RelDiag_sbc_cross_tas_1991_2012_AF.png +mv 2_tas_1_tas_4_tas_1_ERAI_DJF_2_Near_RelDiag_sbc_cross_tas_1991_2012_AF.png 2_1_4_tas_1_ERAI_DJF_2_Near_RelDiag_sbc_cross_tas_1991_2012_AF.png +mv 2_tas_2_tas_1_tas_2_JRA_MAM_2_Near_RelDiag_sbc_cross_tas_1991_2012_AF.png 2_2_1_tas_2_JRA_MAM_2_Near_RelDiag_sbc_cross_tas_1991_2012_AF.png +mv 2_tas_2_tas_2_tas_2_JRA_JJA_2_Near_RelDiag_sbc_cross_tas_1991_2012_AF.png 2_2_2_tas_2_JRA_JJA_2_Near_RelDiag_sbc_cross_tas_1991_2012_AF.png +mv 2_tas_2_tas_3_tas_2_JRA_SON_2_Near_RelDiag_sbc_cross_tas_1991_2012_AF.png 2_2_3_tas_2_JRA_SON_2_Near_RelDiag_sbc_cross_tas_1991_2012_AF.png +mv 2_tas_2_tas_4_tas_2_JRA_DJF_2_Near_RelDiag_sbc_cross_tas_1991_2012_AF.png 2_2_4_tas_2_JRA_DJF_2_Near_RelDiag_sbc_cross_tas_1991_2012_AF.png +mv 2_tas_3_tas_1_tas_3_ERAI2_MAM_2_Near_RelDiag_sbc_cross_tas_1981_2012_AF.png 2_3_1_tas_3_ERAI2_MAM_2_Near_RelDiag_sbc_cross_tas_1981_2012_AF.png +mv 2_tas_3_tas_2_tas_3_ERAI2_JJA_2_Near_RelDiag_sbc_cross_tas_1981_2012_AF.png 2_3_2_tas_3_ERAI2_JJA_2_Near_RelDiag_sbc_cross_tas_1981_2012_AF.png +mv 2_tas_3_tas_3_tas_3_ERAI2_SON_2_Near_RelDiag_sbc_cross_tas_1981_2012_AF.png 2_3_3_tas_3_ERAI2_SON_2_Near_RelDiag_sbc_cross_tas_1981_2012_AF.png +mv 2_tas_3_tas_4_tas_3_ERAI2_DJF_2_Near_RelDiag_sbc_cross_tas_1981_2012_AF.png 2_3_4_tas_3_ERAI2_DJF_2_Near_RelDiag_sbc_cross_tas_1981_2012_AF.png +mv 2_tas_4_tas_1_tas_4_JRA2_MAM_2_Near_RelDiag_sbc_cross_tas_1981_2012_AF.png 2_4_1_tas_4_JRA2_MAM_2_Near_RelDiag_sbc_cross_tas_1981_2012_AF.png +mv 2_tas_4_tas_2_tas_4_JRA2_JJA_2_Near_RelDiag_sbc_cross_tas_1981_2012_AF.png 2_4_2_tas_4_JRA2_JJA_2_Near_RelDiag_sbc_cross_tas_1981_2012_AF.png +mv 2_tas_4_tas_3_tas_4_JRA2_SON_2_Near_RelDiag_sbc_cross_tas_1981_2012_AF.png 2_4_3_tas_4_JRA2_SON_2_Near_RelDiag_sbc_cross_tas_1981_2012_AF.png +mv 2_tas_4_tas_4_tas_4_JRA2_DJF_2_Near_RelDiag_sbc_cross_tas_1981_2012_AF.png 2_4_4_tas_4_JRA2_DJF_2_Near_RelDiag_sbc_cross_tas_1981_2012_AF.png +mv 3_tas_1_tas_1_tas_1_ERAI_MAM_2_Below_RelDiag_sbc_cross_tas_1991_2012_AF.png 3_1_1_tas_1_ERAI_MAM_2_Below_RelDiag_sbc_cross_tas_1991_2012_AF.png +mv 3_tas_1_tas_2_tas_1_ERAI_JJA_2_Below_RelDiag_sbc_cross_tas_1991_2012_AF.png 3_1_2_tas_1_ERAI_JJA_2_Below_RelDiag_sbc_cross_tas_1991_2012_AF.png +mv 3_tas_1_tas_3_tas_1_ERAI_SON_2_Below_RelDiag_sbc_cross_tas_1991_2012_AF.png 3_1_3_tas_1_ERAI_SON_2_Below_RelDiag_sbc_cross_tas_1991_2012_AF.png +mv 3_tas_1_tas_4_tas_1_ERAI_DJF_2_Below_RelDiag_sbc_cross_tas_1991_2012_AF.png 3_1_4_tas_1_ERAI_DJF_2_Below_RelDiag_sbc_cross_tas_1991_2012_AF.png +mv 3_tas_2_tas_1_tas_2_JRA_MAM_2_Below_RelDiag_sbc_cross_tas_1991_2012_AF.png 3_2_1_tas_2_JRA_MAM_2_Below_RelDiag_sbc_cross_tas_1991_2012_AF.png +mv 3_tas_2_tas_2_tas_2_JRA_JJA_2_Below_RelDiag_sbc_cross_tas_1991_2012_AF.png 3_2_2_tas_2_JRA_JJA_2_Below_RelDiag_sbc_cross_tas_1991_2012_AF.png +mv 3_tas_2_tas_3_tas_2_JRA_SON_2_Below_RelDiag_sbc_cross_tas_1991_2012_AF.png 3_2_3_tas_2_JRA_SON_2_Below_RelDiag_sbc_cross_tas_1991_2012_AF.png +mv 3_tas_2_tas_4_tas_2_JRA_DJF_2_Below_RelDiag_sbc_cross_tas_1991_2012_AF.png 3_2_4_tas_2_JRA_DJF_2_Below_RelDiag_sbc_cross_tas_1991_2012_AF.png +mv 3_tas_3_tas_1_tas_3_ERAI2_MAM_2_Below_RelDiag_sbc_cross_tas_1981_2012_AF.png 3_3_1_tas_3_ERAI2_MAM_2_Below_RelDiag_sbc_cross_tas_1981_2012_AF.png +mv 3_tas_3_tas_2_tas_3_ERAI2_JJA_2_Below_RelDiag_sbc_cross_tas_1981_2012_AF.png 3_3_2_tas_3_ERAI2_JJA_2_Below_RelDiag_sbc_cross_tas_1981_2012_AF.png +mv 3_tas_3_tas_3_tas_3_ERAI2_SON_2_Below_RelDiag_sbc_cross_tas_1981_2012_AF.png 3_3_3_tas_3_ERAI2_SON_2_Below_RelDiag_sbc_cross_tas_1981_2012_AF.png +mv 3_tas_3_tas_4_tas_3_ERAI2_DJF_2_Below_RelDiag_sbc_cross_tas_1981_2012_AF.png 3_3_4_tas_3_ERAI2_DJF_2_Below_RelDiag_sbc_cross_tas_1981_2012_AF.png +mv 3_tas_4_tas_1_tas_4_JRA2_MAM_2_Below_RelDiag_sbc_cross_tas_1981_2012_AF.png 3_4_1_tas_4_JRA2_MAM_2_Below_RelDiag_sbc_cross_tas_1981_2012_AF.png +mv 3_tas_4_tas_2_tas_4_JRA2_JJA_2_Below_RelDiag_sbc_cross_tas_1981_2012_AF.png 3_4_2_tas_4_JRA2_JJA_2_Below_RelDiag_sbc_cross_tas_1981_2012_AF.png +mv 3_tas_4_tas_3_tas_4_JRA2_SON_2_Below_RelDiag_sbc_cross_tas_1981_2012_AF.png 3_4_3_tas_4_JRA2_SON_2_Below_RelDiag_sbc_cross_tas_1981_2012_AF.png +mv 3_tas_4_tas_4_tas_4_JRA2_DJF_2_Below_RelDiag_sbc_cross_tas_1981_2012_AF.png 3_4_4_tas_4_JRA2_DJF_2_Below_RelDiag_sbc_cross_tas_1981_2012_AF.png + + + +mv 1_tas_1_tas_1_tas_1_ERAI_MAM_3_Above_RelDiag_sbc_nocross_tas_1991_2012_AF.png 1_1_1_tas_1_ERAI_MAM_3_Above_RelDiag_sbc_nocross_tas_1991_2012_AF.png +mv 1_tas_1_tas_2_tas_1_ERAI_JJA_3_Above_RelDiag_sbc_nocross_tas_1991_2012_AF.png 1_1_2_tas_1_ERAI_JJA_3_Above_RelDiag_sbc_nocross_tas_1991_2012_AF.png +mv 1_tas_1_tas_3_tas_1_ERAI_SON_3_Above_RelDiag_sbc_nocross_tas_1991_2012_AF.png 1_1_3_tas_1_ERAI_SON_3_Above_RelDiag_sbc_nocross_tas_1991_2012_AF.png +mv 1_tas_1_tas_4_tas_1_ERAI_DJF_3_Above_RelDiag_sbc_nocross_tas_1991_2012_AF.png 1_1_4_tas_1_ERAI_DJF_3_Above_RelDiag_sbc_nocross_tas_1991_2012_AF.png +mv 1_tas_2_tas_1_tas_2_JRA_MAM_3_Above_RelDiag_sbc_nocross_tas_1991_2012_AF.png 1_2_1_tas_2_JRA_MAM_3_Above_RelDiag_sbc_nocross_tas_1991_2012_AF.png +mv 1_tas_2_tas_2_tas_2_JRA_JJA_3_Above_RelDiag_sbc_nocross_tas_1991_2012_AF.png 1_2_2_tas_2_JRA_JJA_3_Above_RelDiag_sbc_nocross_tas_1991_2012_AF.png +mv 1_tas_2_tas_3_tas_2_JRA_SON_3_Above_RelDiag_sbc_nocross_tas_1991_2012_AF.png 1_2_3_tas_2_JRA_SON_3_Above_RelDiag_sbc_nocross_tas_1991_2012_AF.png +mv 1_tas_2_tas_4_tas_2_JRA_DJF_3_Above_RelDiag_sbc_nocross_tas_1991_2012_AF.png 1_2_4_tas_2_JRA_DJF_3_Above_RelDiag_sbc_nocross_tas_1991_2012_AF.png +mv 1_tas_3_tas_1_tas_3_ERAI2_MAM_3_Above_RelDiag_sbc_nocross_tas_1981_2012_AF.png 1_3_1_tas_3_ERAI2_MAM_3_Above_RelDiag_sbc_nocross_tas_1981_2012_AF.png +mv 1_tas_3_tas_2_tas_3_ERAI2_JJA_3_Above_RelDiag_sbc_nocross_tas_1981_2012_AF.png 1_3_2_tas_3_ERAI2_JJA_3_Above_RelDiag_sbc_nocross_tas_1981_2012_AF.png +mv 1_tas_3_tas_3_tas_3_ERAI2_SON_3_Above_RelDiag_sbc_nocross_tas_1981_2012_AF.png 1_3_3_tas_3_ERAI2_SON_3_Above_RelDiag_sbc_nocross_tas_1981_2012_AF.png +mv 1_tas_3_tas_4_tas_3_ERAI2_DJF_3_Above_RelDiag_sbc_nocross_tas_1981_2012_AF.png 1_3_4_tas_3_ERAI2_DJF_3_Above_RelDiag_sbc_nocross_tas_1981_2012_AF.png +mv 1_tas_4_tas_1_tas_4_JRA2_MAM_3_Above_RelDiag_sbc_nocross_tas_1981_2012_AF.png 1_4_1_tas_4_JRA2_MAM_3_Above_RelDiag_sbc_nocross_tas_1981_2012_AF.png +mv 1_tas_4_tas_2_tas_4_JRA2_JJA_3_Above_RelDiag_sbc_nocross_tas_1981_2012_AF.png 1_4_2_tas_4_JRA2_JJA_3_Above_RelDiag_sbc_nocross_tas_1981_2012_AF.png +mv 1_tas_4_tas_3_tas_4_JRA2_SON_3_Above_RelDiag_sbc_nocross_tas_1981_2012_AF.png 1_4_3_tas_4_JRA2_SON_3_Above_RelDiag_sbc_nocross_tas_1981_2012_AF.png +mv 1_tas_4_tas_4_tas_4_JRA2_DJF_3_Above_RelDiag_sbc_nocross_tas_1981_2012_AF.png 1_4_4_tas_4_JRA2_DJF_3_Above_RelDiag_sbc_nocross_tas_1981_2012_AF.png +mv 2_tas_1_tas_1_tas_1_ERAI_MAM_3_Near_RelDiag_sbc_nocross_tas_1991_2012_AF.png 2_1_1_tas_1_ERAI_MAM_3_Near_RelDiag_sbc_nocross_tas_1991_2012_AF.png +mv 2_tas_1_tas_2_tas_1_ERAI_JJA_3_Near_RelDiag_sbc_nocross_tas_1991_2012_AF.png 2_1_2_tas_1_ERAI_JJA_3_Near_RelDiag_sbc_nocross_tas_1991_2012_AF.png +mv 2_tas_1_tas_3_tas_1_ERAI_SON_3_Near_RelDiag_sbc_nocross_tas_1991_2012_AF.png 2_1_3_tas_1_ERAI_SON_3_Near_RelDiag_sbc_nocross_tas_1991_2012_AF.png +mv 2_tas_1_tas_4_tas_1_ERAI_DJF_3_Near_RelDiag_sbc_nocross_tas_1991_2012_AF.png 2_1_4_tas_1_ERAI_DJF_3_Near_RelDiag_sbc_nocross_tas_1991_2012_AF.png +mv 2_tas_2_tas_1_tas_2_JRA_MAM_3_Near_RelDiag_sbc_nocross_tas_1991_2012_AF.png 2_2_1_tas_2_JRA_MAM_3_Near_RelDiag_sbc_nocross_tas_1991_2012_AF.png +mv 2_tas_2_tas_2_tas_2_JRA_JJA_3_Near_RelDiag_sbc_nocross_tas_1991_2012_AF.png 2_2_2_tas_2_JRA_JJA_3_Near_RelDiag_sbc_nocross_tas_1991_2012_AF.png +mv 2_tas_2_tas_3_tas_2_JRA_SON_3_Near_RelDiag_sbc_nocross_tas_1991_2012_AF.png 2_2_3_tas_2_JRA_SON_3_Near_RelDiag_sbc_nocross_tas_1991_2012_AF.png +mv 2_tas_2_tas_4_tas_2_JRA_DJF_3_Near_RelDiag_sbc_nocross_tas_1991_2012_AF.png 2_2_4_tas_2_JRA_DJF_3_Near_RelDiag_sbc_nocross_tas_1991_2012_AF.png +mv 2_tas_3_tas_1_tas_3_ERAI2_MAM_3_Near_RelDiag_sbc_nocross_tas_1981_2012_AF.png 2_3_1_tas_3_ERAI2_MAM_3_Near_RelDiag_sbc_nocross_tas_1981_2012_AF.png +mv 2_tas_3_tas_2_tas_3_ERAI2_JJA_3_Near_RelDiag_sbc_nocross_tas_1981_2012_AF.png 2_3_2_tas_3_ERAI2_JJA_3_Near_RelDiag_sbc_nocross_tas_1981_2012_AF.png +mv 2_tas_3_tas_3_tas_3_ERAI2_SON_3_Near_RelDiag_sbc_nocross_tas_1981_2012_AF.png 2_3_3_tas_3_ERAI2_SON_3_Near_RelDiag_sbc_nocross_tas_1981_2012_AF.png +mv 2_tas_3_tas_4_tas_3_ERAI2_DJF_3_Near_RelDiag_sbc_nocross_tas_1981_2012_AF.png 2_3_4_tas_3_ERAI2_DJF_3_Near_RelDiag_sbc_nocross_tas_1981_2012_AF.png +mv 2_tas_4_tas_1_tas_4_JRA2_MAM_3_Near_RelDiag_sbc_nocross_tas_1981_2012_AF.png 2_4_1_tas_4_JRA2_MAM_3_Near_RelDiag_sbc_nocross_tas_1981_2012_AF.png +mv 2_tas_4_tas_2_tas_4_JRA2_JJA_3_Near_RelDiag_sbc_nocross_tas_1981_2012_AF.png 2_4_2_tas_4_JRA2_JJA_3_Near_RelDiag_sbc_nocross_tas_1981_2012_AF.png +mv 2_tas_4_tas_3_tas_4_JRA2_SON_3_Near_RelDiag_sbc_nocross_tas_1981_2012_AF.png 2_4_3_tas_4_JRA2_SON_3_Near_RelDiag_sbc_nocross_tas_1981_2012_AF.png +mv 2_tas_4_tas_4_tas_4_JRA2_DJF_3_Near_RelDiag_sbc_nocross_tas_1981_2012_AF.png 2_4_4_tas_4_JRA2_DJF_3_Near_RelDiag_sbc_nocross_tas_1981_2012_AF.png +mv 3_tas_1_tas_1_tas_1_ERAI_MAM_3_Below_RelDiag_sbc_nocross_tas_1991_2012_AF.png 3_1_1_tas_1_ERAI_MAM_3_Below_RelDiag_sbc_nocross_tas_1991_2012_AF.png +mv 3_tas_1_tas_2_tas_1_ERAI_JJA_3_Below_RelDiag_sbc_nocross_tas_1991_2012_AF.png 3_1_2_tas_1_ERAI_JJA_3_Below_RelDiag_sbc_nocross_tas_1991_2012_AF.png +mv 3_tas_1_tas_3_tas_1_ERAI_SON_3_Below_RelDiag_sbc_nocross_tas_1991_2012_AF.png 3_1_3_tas_1_ERAI_SON_3_Below_RelDiag_sbc_nocross_tas_1991_2012_AF.png +mv 3_tas_1_tas_4_tas_1_ERAI_DJF_3_Below_RelDiag_sbc_nocross_tas_1991_2012_AF.png 3_1_4_tas_1_ERAI_DJF_3_Below_RelDiag_sbc_nocross_tas_1991_2012_AF.png +mv 3_tas_2_tas_1_tas_2_JRA_MAM_3_Below_RelDiag_sbc_nocross_tas_1991_2012_AF.png 3_2_1_tas_2_JRA_MAM_3_Below_RelDiag_sbc_nocross_tas_1991_2012_AF.png +mv 3_tas_2_tas_2_tas_2_JRA_JJA_3_Below_RelDiag_sbc_nocross_tas_1991_2012_AF.png 3_2_2_tas_2_JRA_JJA_3_Below_RelDiag_sbc_nocross_tas_1991_2012_AF.png +mv 3_tas_2_tas_3_tas_2_JRA_SON_3_Below_RelDiag_sbc_nocross_tas_1991_2012_AF.png 3_2_3_tas_2_JRA_SON_3_Below_RelDiag_sbc_nocross_tas_1991_2012_AF.png +mv 3_tas_2_tas_4_tas_2_JRA_DJF_3_Below_RelDiag_sbc_nocross_tas_1991_2012_AF.png 3_2_4_tas_2_JRA_DJF_3_Below_RelDiag_sbc_nocross_tas_1991_2012_AF.png +mv 3_tas_3_tas_1_tas_3_ERAI2_MAM_3_Below_RelDiag_sbc_nocross_tas_1981_2012_AF.png 3_3_1_tas_3_ERAI2_MAM_3_Below_RelDiag_sbc_nocross_tas_1981_2012_AF.png +mv 3_tas_3_tas_2_tas_3_ERAI2_JJA_3_Below_RelDiag_sbc_nocross_tas_1981_2012_AF.png 3_3_2_tas_3_ERAI2_JJA_3_Below_RelDiag_sbc_nocross_tas_1981_2012_AF.png +mv 3_tas_3_tas_3_tas_3_ERAI2_SON_3_Below_RelDiag_sbc_nocross_tas_1981_2012_AF.png 3_3_3_tas_3_ERAI2_SON_3_Below_RelDiag_sbc_nocross_tas_1981_2012_AF.png +mv 3_tas_3_tas_4_tas_3_ERAI2_DJF_3_Below_RelDiag_sbc_nocross_tas_1981_2012_AF.png 3_3_4_tas_3_ERAI2_DJF_3_Below_RelDiag_sbc_nocross_tas_1981_2012_AF.png +mv 3_tas_4_tas_1_tas_4_JRA2_MAM_3_Below_RelDiag_sbc_nocross_tas_1981_2012_AF.png 3_4_1_tas_4_JRA2_MAM_3_Below_RelDiag_sbc_nocross_tas_1981_2012_AF.png +mv 3_tas_4_tas_2_tas_4_JRA2_JJA_3_Below_RelDiag_sbc_nocross_tas_1981_2012_AF.png 3_4_2_tas_4_JRA2_JJA_3_Below_RelDiag_sbc_nocross_tas_1981_2012_AF.png +mv 3_tas_4_tas_3_tas_4_JRA2_SON_3_Below_RelDiag_sbc_nocross_tas_1981_2012_AF.png 3_4_3_tas_4_JRA2_SON_3_Below_RelDiag_sbc_nocross_tas_1981_2012_AF.png +mv 3_tas_4_tas_4_tas_4_JRA2_DJF_3_Below_RelDiag_sbc_nocross_tas_1981_2012_AF.png 3_4_4_tas_4_JRA2_DJF_3_Below_RelDiag_sbc_nocross_tas_1981_2012_AF.png + + +mv 1_tas_1_tas_1_tas_1_ERAI_MAM_4_Above_RelDiag_cal_cross_tas_1991_2012_AF.png 1_1_1_tas_1_ERAI_MAM_4_Above_RelDiag_cal_cross_tas_1991_2012_AF.png +mv 1_tas_1_tas_2_tas_1_ERAI_JJA_4_Above_RelDiag_cal_cross_tas_1991_2012_AF.png 1_1_2_tas_1_ERAI_JJA_4_Above_RelDiag_cal_cross_tas_1991_2012_AF.png +mv 1_tas_1_tas_3_tas_1_ERAI_SON_4_Above_RelDiag_cal_cross_tas_1991_2012_AF.png 1_1_3_tas_1_ERAI_SON_4_Above_RelDiag_cal_cross_tas_1991_2012_AF.png +mv 1_tas_1_tas_4_tas_1_ERAI_DJF_4_Above_RelDiag_cal_cross_tas_1991_2012_AF.png 1_1_4_tas_1_ERAI_DJF_4_Above_RelDiag_cal_cross_tas_1991_2012_AF.png +mv 1_tas_2_tas_1_tas_2_JRA_MAM_4_Above_RelDiag_cal_cross_tas_1991_2012_AF.png 1_2_1_tas_2_JRA_MAM_4_Above_RelDiag_cal_cross_tas_1991_2012_AF.png +mv 1_tas_2_tas_2_tas_2_JRA_JJA_4_Above_RelDiag_cal_cross_tas_1991_2012_AF.png 1_2_2_tas_2_JRA_JJA_4_Above_RelDiag_cal_cross_tas_1991_2012_AF.png +mv 1_tas_2_tas_3_tas_2_JRA_SON_4_Above_RelDiag_cal_cross_tas_1991_2012_AF.png 1_2_3_tas_2_JRA_SON_4_Above_RelDiag_cal_cross_tas_1991_2012_AF.png +mv 1_tas_2_tas_4_tas_2_JRA_DJF_4_Above_RelDiag_cal_cross_tas_1991_2012_AF.png 1_2_4_tas_2_JRA_DJF_4_Above_RelDiag_cal_cross_tas_1991_2012_AF.png +mv 1_tas_3_tas_1_tas_3_ERAI2_MAM_4_Above_RelDiag_cal_cross_tas_1981_2012_AF.png 1_3_1_tas_3_ERAI2_MAM_4_Above_RelDiag_cal_cross_tas_1981_2012_AF.png +mv 1_tas_3_tas_2_tas_3_ERAI2_JJA_4_Above_RelDiag_cal_cross_tas_1981_2012_AF.png 1_3_2_tas_3_ERAI2_JJA_4_Above_RelDiag_cal_cross_tas_1981_2012_AF.png +mv 1_tas_3_tas_3_tas_3_ERAI2_SON_4_Above_RelDiag_cal_cross_tas_1981_2012_AF.png 1_3_3_tas_3_ERAI2_SON_4_Above_RelDiag_cal_cross_tas_1981_2012_AF.png +mv 1_tas_3_tas_4_tas_3_ERAI2_DJF_4_Above_RelDiag_cal_cross_tas_1981_2012_AF.png 1_3_4_tas_3_ERAI2_DJF_4_Above_RelDiag_cal_cross_tas_1981_2012_AF.png +mv 1_tas_4_tas_1_tas_4_JRA2_MAM_4_Above_RelDiag_cal_cross_tas_1981_2012_AF.png 1_4_1_tas_4_JRA2_MAM_4_Above_RelDiag_cal_cross_tas_1981_2012_AF.png +mv 1_tas_4_tas_2_tas_4_JRA2_JJA_4_Above_RelDiag_cal_cross_tas_1981_2012_AF.png 1_4_2_tas_4_JRA2_JJA_4_Above_RelDiag_cal_cross_tas_1981_2012_AF.png +mv 1_tas_4_tas_3_tas_4_JRA2_SON_4_Above_RelDiag_cal_cross_tas_1981_2012_AF.png 1_4_3_tas_4_JRA2_SON_4_Above_RelDiag_cal_cross_tas_1981_2012_AF.png +mv 1_tas_4_tas_4_tas_4_JRA2_DJF_4_Above_RelDiag_cal_cross_tas_1981_2012_AF.png 1_4_4_tas_4_JRA2_DJF_4_Above_RelDiag_cal_cross_tas_1981_2012_AF.png +mv 2_tas_1_tas_1_tas_1_ERAI_MAM_4_Near_RelDiag_cal_cross_tas_1991_2012_AF.png 2_1_1_tas_1_ERAI_MAM_4_Near_RelDiag_cal_cross_tas_1991_2012_AF.png +mv 2_tas_1_tas_2_tas_1_ERAI_JJA_4_Near_RelDiag_cal_cross_tas_1991_2012_AF.png 2_1_2_tas_1_ERAI_JJA_4_Near_RelDiag_cal_cross_tas_1991_2012_AF.png +mv 2_tas_1_tas_3_tas_1_ERAI_SON_4_Near_RelDiag_cal_cross_tas_1991_2012_AF.png 2_1_3_tas_1_ERAI_SON_4_Near_RelDiag_cal_cross_tas_1991_2012_AF.png +mv 2_tas_1_tas_4_tas_1_ERAI_DJF_4_Near_RelDiag_cal_cross_tas_1991_2012_AF.png 2_1_4_tas_1_ERAI_DJF_4_Near_RelDiag_cal_cross_tas_1991_2012_AF.png +mv 2_tas_2_tas_1_tas_2_JRA_MAM_4_Near_RelDiag_cal_cross_tas_1991_2012_AF.png 2_2_1_tas_2_JRA_MAM_4_Near_RelDiag_cal_cross_tas_1991_2012_AF.png +mv 2_tas_2_tas_2_tas_2_JRA_JJA_4_Near_RelDiag_cal_cross_tas_1991_2012_AF.png 2_2_2_tas_2_JRA_JJA_4_Near_RelDiag_cal_cross_tas_1991_2012_AF.png +mv 2_tas_2_tas_3_tas_2_JRA_SON_4_Near_RelDiag_cal_cross_tas_1991_2012_AF.png 2_2_3_tas_2_JRA_SON_4_Near_RelDiag_cal_cross_tas_1991_2012_AF.png +mv 2_tas_2_tas_4_tas_2_JRA_DJF_4_Near_RelDiag_cal_cross_tas_1991_2012_AF.png 2_2_4_tas_2_JRA_DJF_4_Near_RelDiag_cal_cross_tas_1991_2012_AF.png +mv 2_tas_3_tas_1_tas_3_ERAI2_MAM_4_Near_RelDiag_cal_cross_tas_1981_2012_AF.png 2_3_1_tas_3_ERAI2_MAM_4_Near_RelDiag_cal_cross_tas_1981_2012_AF.png +mv 2_tas_3_tas_2_tas_3_ERAI2_JJA_4_Near_RelDiag_cal_cross_tas_1981_2012_AF.png 2_3_2_tas_3_ERAI2_JJA_4_Near_RelDiag_cal_cross_tas_1981_2012_AF.png +mv 2_tas_3_tas_3_tas_3_ERAI2_SON_4_Near_RelDiag_cal_cross_tas_1981_2012_AF.png 2_3_3_tas_3_ERAI2_SON_4_Near_RelDiag_cal_cross_tas_1981_2012_AF.png +mv 2_tas_3_tas_4_tas_3_ERAI2_DJF_4_Near_RelDiag_cal_cross_tas_1981_2012_AF.png 2_3_4_tas_3_ERAI2_DJF_4_Near_RelDiag_cal_cross_tas_1981_2012_AF.png +mv 2_tas_4_tas_1_tas_4_JRA2_MAM_4_Near_RelDiag_cal_cross_tas_1981_2012_AF.png 2_4_1_tas_4_JRA2_MAM_4_Near_RelDiag_cal_cross_tas_1981_2012_AF.png +mv 2_tas_4_tas_2_tas_4_JRA2_JJA_4_Near_RelDiag_cal_cross_tas_1981_2012_AF.png 2_4_2_tas_4_JRA2_JJA_4_Near_RelDiag_cal_cross_tas_1981_2012_AF.png +mv 2_tas_4_tas_3_tas_4_JRA2_SON_4_Near_RelDiag_cal_cross_tas_1981_2012_AF.png 2_4_3_tas_4_JRA2_SON_4_Near_RelDiag_cal_cross_tas_1981_2012_AF.png +mv 2_tas_4_tas_4_tas_4_JRA2_DJF_4_Near_RelDiag_cal_cross_tas_1981_2012_AF.png 2_4_4_tas_4_JRA2_DJF_4_Near_RelDiag_cal_cross_tas_1981_2012_AF.png +mv 3_tas_1_tas_1_tas_1_ERAI_MAM_4_Below_RelDiag_cal_cross_tas_1991_2012_AF.png 3_1_1_tas_1_ERAI_MAM_4_Below_RelDiag_cal_cross_tas_1991_2012_AF.png +mv 3_tas_1_tas_2_tas_1_ERAI_JJA_4_Below_RelDiag_cal_cross_tas_1991_2012_AF.png 3_1_2_tas_1_ERAI_JJA_4_Below_RelDiag_cal_cross_tas_1991_2012_AF.png +mv 3_tas_1_tas_3_tas_1_ERAI_SON_4_Below_RelDiag_cal_cross_tas_1991_2012_AF.png 3_1_3_tas_1_ERAI_SON_4_Below_RelDiag_cal_cross_tas_1991_2012_AF.png +mv 3_tas_1_tas_4_tas_1_ERAI_DJF_4_Below_RelDiag_cal_cross_tas_1991_2012_AF.png 3_1_4_tas_1_ERAI_DJF_4_Below_RelDiag_cal_cross_tas_1991_2012_AF.png +mv 3_tas_2_tas_1_tas_2_JRA_MAM_4_Below_RelDiag_cal_cross_tas_1991_2012_AF.png 3_2_1_tas_2_JRA_MAM_4_Below_RelDiag_cal_cross_tas_1991_2012_AF.png +mv 3_tas_2_tas_2_tas_2_JRA_JJA_4_Below_RelDiag_cal_cross_tas_1991_2012_AF.png 3_2_2_tas_2_JRA_JJA_4_Below_RelDiag_cal_cross_tas_1991_2012_AF.png +mv 3_tas_2_tas_3_tas_2_JRA_SON_4_Below_RelDiag_cal_cross_tas_1991_2012_AF.png 3_2_3_tas_2_JRA_SON_4_Below_RelDiag_cal_cross_tas_1991_2012_AF.png +mv 3_tas_2_tas_4_tas_2_JRA_DJF_4_Below_RelDiag_cal_cross_tas_1991_2012_AF.png 3_2_4_tas_2_JRA_DJF_4_Below_RelDiag_cal_cross_tas_1991_2012_AF.png +mv 3_tas_3_tas_1_tas_3_ERAI2_MAM_4_Below_RelDiag_cal_cross_tas_1981_2012_AF.png 3_3_1_tas_3_ERAI2_MAM_4_Below_RelDiag_cal_cross_tas_1981_2012_AF.png +mv 3_tas_3_tas_2_tas_3_ERAI2_JJA_4_Below_RelDiag_cal_cross_tas_1981_2012_AF.png 3_3_2_tas_3_ERAI2_JJA_4_Below_RelDiag_cal_cross_tas_1981_2012_AF.png +mv 3_tas_3_tas_3_tas_3_ERAI2_SON_4_Below_RelDiag_cal_cross_tas_1981_2012_AF.png 3_3_3_tas_3_ERAI2_SON_4_Below_RelDiag_cal_cross_tas_1981_2012_AF.png +mv 3_tas_3_tas_4_tas_3_ERAI2_DJF_4_Below_RelDiag_cal_cross_tas_1981_2012_AF.png 3_3_4_tas_3_ERAI2_DJF_4_Below_RelDiag_cal_cross_tas_1981_2012_AF.png +mv 3_tas_4_tas_1_tas_4_JRA2_MAM_4_Below_RelDiag_cal_cross_tas_1981_2012_AF.png 3_4_1_tas_4_JRA2_MAM_4_Below_RelDiag_cal_cross_tas_1981_2012_AF.png +mv 3_tas_4_tas_2_tas_4_JRA2_JJA_4_Below_RelDiag_cal_cross_tas_1981_2012_AF.png 3_4_2_tas_4_JRA2_JJA_4_Below_RelDiag_cal_cross_tas_1981_2012_AF.png +mv 3_tas_4_tas_3_tas_4_JRA2_SON_4_Below_RelDiag_cal_cross_tas_1981_2012_AF.png 3_4_3_tas_4_JRA2_SON_4_Below_RelDiag_cal_cross_tas_1981_2012_AF.png +mv 3_tas_4_tas_4_tas_4_JRA2_DJF_4_Below_RelDiag_cal_cross_tas_1981_2012_AF.png 3_4_4_tas_4_JRA2_DJF_4_Below_RelDiag_cal_cross_tas_1981_2012_AF.png + +mv 1_tas_1_tas_1_tas_1_ERAI_MAM_5_Above_RelDiag_cal_nocross_tas_1991_2012_AF.png 1_1_1_tas_1_ERAI_MAM_5_Above_RelDiag_cal_nocross_tas_1991_2012_AF.png +mv 1_tas_1_tas_2_tas_1_ERAI_JJA_5_Above_RelDiag_cal_nocross_tas_1991_2012_AF.png 1_1_2_tas_1_ERAI_JJA_5_Above_RelDiag_cal_nocross_tas_1991_2012_AF.png +mv 1_tas_1_tas_3_tas_1_ERAI_SON_5_Above_RelDiag_cal_nocross_tas_1991_2012_AF.png 1_1_3_tas_1_ERAI_SON_5_Above_RelDiag_cal_nocross_tas_1991_2012_AF.png +mv 1_tas_1_tas_4_tas_1_ERAI_DJF_5_Above_RelDiag_cal_nocross_tas_1991_2012_AF.png 1_1_4_tas_1_ERAI_DJF_5_Above_RelDiag_cal_nocross_tas_1991_2012_AF.png +mv 1_tas_2_tas_1_tas_2_JRA_MAM_5_Above_RelDiag_cal_nocross_tas_1991_2012_AF.png 1_2_1_tas_2_JRA_MAM_5_Above_RelDiag_cal_nocross_tas_1991_2012_AF.png +mv 1_tas_2_tas_2_tas_2_JRA_JJA_5_Above_RelDiag_cal_nocross_tas_1991_2012_AF.png 1_2_2_tas_2_JRA_JJA_5_Above_RelDiag_cal_nocross_tas_1991_2012_AF.png +mv 1_tas_2_tas_3_tas_2_JRA_SON_5_Above_RelDiag_cal_nocross_tas_1991_2012_AF.png 1_2_3_tas_2_JRA_SON_5_Above_RelDiag_cal_nocross_tas_1991_2012_AF.png +mv 1_tas_2_tas_4_tas_2_JRA_DJF_5_Above_RelDiag_cal_nocross_tas_1991_2012_AF.png 1_2_4_tas_2_JRA_DJF_5_Above_RelDiag_cal_nocross_tas_1991_2012_AF.png +mv 1_tas_3_tas_1_tas_3_ERAI2_MAM_5_Above_RelDiag_cal_nocross_tas_1981_2012_AF.png 1_3_1_tas_3_ERAI2_MAM_5_Above_RelDiag_cal_nocross_tas_1981_2012_AF.png +mv 1_tas_3_tas_2_tas_3_ERAI2_JJA_5_Above_RelDiag_cal_nocross_tas_1981_2012_AF.png 1_3_2_tas_3_ERAI2_JJA_5_Above_RelDiag_cal_nocross_tas_1981_2012_AF.png +mv 1_tas_3_tas_3_tas_3_ERAI2_SON_5_Above_RelDiag_cal_nocross_tas_1981_2012_AF.png 1_3_3_tas_3_ERAI2_SON_5_Above_RelDiag_cal_nocross_tas_1981_2012_AF.png +mv 1_tas_3_tas_4_tas_3_ERAI2_DJF_5_Above_RelDiag_cal_nocross_tas_1981_2012_AF.png 1_3_4_tas_3_ERAI2_DJF_5_Above_RelDiag_cal_nocross_tas_1981_2012_AF.png +mv 1_tas_4_tas_1_tas_4_JRA2_MAM_5_Above_RelDiag_cal_nocross_tas_1981_2012_AF.png 1_4_1_tas_4_JRA2_MAM_5_Above_RelDiag_cal_nocross_tas_1981_2012_AF.png +mv 1_tas_4_tas_2_tas_4_JRA2_JJA_5_Above_RelDiag_cal_nocross_tas_1981_2012_AF.png 1_4_2_tas_4_JRA2_JJA_5_Above_RelDiag_cal_nocross_tas_1981_2012_AF.png +mv 1_tas_4_tas_3_tas_4_JRA2_SON_5_Above_RelDiag_cal_nocross_tas_1981_2012_AF.png 1_4_3_tas_4_JRA2_SON_5_Above_RelDiag_cal_nocross_tas_1981_2012_AF.png +mv 1_tas_4_tas_4_tas_4_JRA2_DJF_5_Above_RelDiag_cal_nocross_tas_1981_2012_AF.png 1_4_4_tas_4_JRA2_DJF_5_Above_RelDiag_cal_nocross_tas_1981_2012_AF.png +mv 2_tas_1_tas_1_tas_1_ERAI_MAM_5_Near_RelDiag_cal_nocross_tas_1991_2012_AF.png 2_1_1_tas_1_ERAI_MAM_5_Near_RelDiag_cal_nocross_tas_1991_2012_AF.png +mv 2_tas_1_tas_2_tas_1_ERAI_JJA_5_Near_RelDiag_cal_nocross_tas_1991_2012_AF.png 2_1_2_tas_1_ERAI_JJA_5_Near_RelDiag_cal_nocross_tas_1991_2012_AF.png +mv 2_tas_1_tas_3_tas_1_ERAI_SON_5_Near_RelDiag_cal_nocross_tas_1991_2012_AF.png 2_1_3_tas_1_ERAI_SON_5_Near_RelDiag_cal_nocross_tas_1991_2012_AF.png +mv 2_tas_1_tas_4_tas_1_ERAI_DJF_5_Near_RelDiag_cal_nocross_tas_1991_2012_AF.png 2_1_4_tas_1_ERAI_DJF_5_Near_RelDiag_cal_nocross_tas_1991_2012_AF.png +mv 2_tas_2_tas_1_tas_2_JRA_MAM_5_Near_RelDiag_cal_nocross_tas_1991_2012_AF.png 2_2_1_tas_2_JRA_MAM_5_Near_RelDiag_cal_nocross_tas_1991_2012_AF.png +mv 2_tas_2_tas_2_tas_2_JRA_JJA_5_Near_RelDiag_cal_nocross_tas_1991_2012_AF.png 2_2_2_tas_2_JRA_JJA_5_Near_RelDiag_cal_nocross_tas_1991_2012_AF.png +mv 2_tas_2_tas_3_tas_2_JRA_SON_5_Near_RelDiag_cal_nocross_tas_1991_2012_AF.png 2_2_3_tas_2_JRA_SON_5_Near_RelDiag_cal_nocross_tas_1991_2012_AF.png +mv 2_tas_2_tas_4_tas_2_JRA_DJF_5_Near_RelDiag_cal_nocross_tas_1991_2012_AF.png 2_2_4_tas_2_JRA_DJF_5_Near_RelDiag_cal_nocross_tas_1991_2012_AF.png +mv 2_tas_3_tas_1_tas_3_ERAI2_MAM_5_Near_RelDiag_cal_nocross_tas_1981_2012_AF.png 2_3_1_tas_3_ERAI2_MAM_5_Near_RelDiag_cal_nocross_tas_1981_2012_AF.png +mv 2_tas_3_tas_2_tas_3_ERAI2_JJA_5_Near_RelDiag_cal_nocross_tas_1981_2012_AF.png 2_3_2_tas_3_ERAI2_JJA_5_Near_RelDiag_cal_nocross_tas_1981_2012_AF.png +mv 2_tas_3_tas_3_tas_3_ERAI2_SON_5_Near_RelDiag_cal_nocross_tas_1981_2012_AF.png 2_3_3_tas_3_ERAI2_SON_5_Near_RelDiag_cal_nocross_tas_1981_2012_AF.png +mv 2_tas_3_tas_4_tas_3_ERAI2_DJF_5_Near_RelDiag_cal_nocross_tas_1981_2012_AF.png 2_3_4_tas_3_ERAI2_DJF_5_Near_RelDiag_cal_nocross_tas_1981_2012_AF.png +mv 2_tas_4_tas_1_tas_4_JRA2_MAM_5_Near_RelDiag_cal_nocross_tas_1981_2012_AF.png 2_4_1_tas_4_JRA2_MAM_5_Near_RelDiag_cal_nocross_tas_1981_2012_AF.png +mv 2_tas_4_tas_2_tas_4_JRA2_JJA_5_Near_RelDiag_cal_nocross_tas_1981_2012_AF.png 2_4_2_tas_4_JRA2_JJA_5_Near_RelDiag_cal_nocross_tas_1981_2012_AF.png +mv 2_tas_4_tas_3_tas_4_JRA2_SON_5_Near_RelDiag_cal_nocross_tas_1981_2012_AF.png 2_4_3_tas_4_JRA2_SON_5_Near_RelDiag_cal_nocross_tas_1981_2012_AF.png +mv 2_tas_4_tas_4_tas_4_JRA2_DJF_5_Near_RelDiag_cal_nocross_tas_1981_2012_AF.png 2_4_4_tas_4_JRA2_DJF_5_Near_RelDiag_cal_nocross_tas_1981_2012_AF.png +mv 3_tas_1_tas_1_tas_1_ERAI_MAM_5_Below_RelDiag_cal_nocross_tas_1991_2012_AF.png 3_1_1_tas_1_ERAI_MAM_5_Below_RelDiag_cal_nocross_tas_1991_2012_AF.png +mv 3_tas_1_tas_2_tas_1_ERAI_JJA_5_Below_RelDiag_cal_nocross_tas_1991_2012_AF.png 3_1_2_tas_1_ERAI_JJA_5_Below_RelDiag_cal_nocross_tas_1991_2012_AF.png +mv 3_tas_1_tas_3_tas_1_ERAI_SON_5_Below_RelDiag_cal_nocross_tas_1991_2012_AF.png 3_1_3_tas_1_ERAI_SON_5_Below_RelDiag_cal_nocross_tas_1991_2012_AF.png +mv 3_tas_1_tas_4_tas_1_ERAI_DJF_5_Below_RelDiag_cal_nocross_tas_1991_2012_AF.png 3_1_4_tas_1_ERAI_DJF_5_Below_RelDiag_cal_nocross_tas_1991_2012_AF.png +mv 3_tas_2_tas_1_tas_2_JRA_MAM_5_Below_RelDiag_cal_nocross_tas_1991_2012_AF.png 3_2_1_tas_2_JRA_MAM_5_Below_RelDiag_cal_nocross_tas_1991_2012_AF.png +mv 3_tas_2_tas_2_tas_2_JRA_JJA_5_Below_RelDiag_cal_nocross_tas_1991_2012_AF.png 3_2_2_tas_2_JRA_JJA_5_Below_RelDiag_cal_nocross_tas_1991_2012_AF.png +mv 3_tas_2_tas_3_tas_2_JRA_SON_5_Below_RelDiag_cal_nocross_tas_1991_2012_AF.png 3_2_3_tas_2_JRA_SON_5_Below_RelDiag_cal_nocross_tas_1991_2012_AF.png +mv 3_tas_2_tas_4_tas_2_JRA_DJF_5_Below_RelDiag_cal_nocross_tas_1991_2012_AF.png 3_2_4_tas_2_JRA_DJF_5_Below_RelDiag_cal_nocross_tas_1991_2012_AF.png +mv 3_tas_3_tas_1_tas_3_ERAI2_MAM_5_Below_RelDiag_cal_nocross_tas_1981_2012_AF.png 3_3_1_tas_3_ERAI2_MAM_5_Below_RelDiag_cal_nocross_tas_1981_2012_AF.png +mv 3_tas_3_tas_2_tas_3_ERAI2_JJA_5_Below_RelDiag_cal_nocross_tas_1981_2012_AF.png 3_3_2_tas_3_ERAI2_JJA_5_Below_RelDiag_cal_nocross_tas_1981_2012_AF.png +mv 3_tas_3_tas_3_tas_3_ERAI2_SON_5_Below_RelDiag_cal_nocross_tas_1981_2012_AF.png 3_3_3_tas_3_ERAI2_SON_5_Below_RelDiag_cal_nocross_tas_1981_2012_AF.png +mv 3_tas_3_tas_4_tas_3_ERAI2_DJF_5_Below_RelDiag_cal_nocross_tas_1981_2012_AF.png 3_3_4_tas_3_ERAI2_DJF_5_Below_RelDiag_cal_nocross_tas_1981_2012_AF.png +mv 3_tas_4_tas_1_tas_4_JRA2_MAM_5_Below_RelDiag_cal_nocross_tas_1981_2012_AF.png 3_4_1_tas_4_JRA2_MAM_5_Below_RelDiag_cal_nocross_tas_1981_2012_AF.png +mv 3_tas_4_tas_2_tas_4_JRA2_JJA_5_Below_RelDiag_cal_nocross_tas_1981_2012_AF.png 3_4_2_tas_4_JRA2_JJA_5_Below_RelDiag_cal_nocross_tas_1981_2012_AF.png +mv 3_tas_4_tas_3_tas_4_JRA2_SON_5_Below_RelDiag_cal_nocross_tas_1981_2012_AF.png 3_4_3_tas_4_JRA2_SON_5_Below_RelDiag_cal_nocross_tas_1981_2012_AF.png +mv 3_tas_4_tas_4_tas_4_JRA2_DJF_5_Below_RelDiag_cal_nocross_tas_1981_2012_AF.png 3_4_4_tas_4_JRA2_DJF_5_Below_RelDiag_cal_nocross_tas_1981_2012_AF.png + + + + + + + + + + + + + +# Europa tas: + + + +mv 1_1_1_1_ERAI_MAM_1_Above_RelDiag_rawdata_tas_1991_2012_EU.png 1_1_1_tas_1_ERAI_MAM_1_Above_RelDiag_rawdata_tas_1991_2012_EU.png +mv 1_1_2_1_ERAI_JJA_1_Above_RelDiag_rawdata_tas_1991_2012_EU.png 1_1_2_tas_1_ERAI_JJA_1_Above_RelDiag_rawdata_tas_1991_2012_EU.png +mv 1_1_3_1_ERAI_SON_1_Above_RelDiag_rawdata_tas_1991_2012_EU.png 1_1_3_tas_1_ERAI_SON_1_Above_RelDiag_rawdata_tas_1991_2012_EU.png +mv 1_1_4_1_ERAI_DJF_1_Above_RelDiag_rawdata_tas_1991_2012_EU.png 1_1_4_tas_1_ERAI_DJF_1_Above_RelDiag_rawdata_tas_1991_2012_EU.png +mv 1_2_1_2_JRA_MAM_1_Above_RelDiag_rawdata_tas_1991_2012_EU.png 1_2_1_tas_2_JRA_MAM_1_Above_RelDiag_rawdata_tas_1991_2012_EU.png +mv 1_2_2_2_JRA_JJA_1_Above_RelDiag_rawdata_tas_1991_2012_EU.png 1_2_2_tas_2_JRA_JJA_1_Above_RelDiag_rawdata_tas_1991_2012_EU.png +mv 1_2_3_2_JRA_SON_1_Above_RelDiag_rawdata_tas_1991_2012_EU.png 1_2_3_tas_2_JRA_SON_1_Above_RelDiag_rawdata_tas_1991_2012_EU.png +mv 1_2_4_2_JRA_DJF_1_Above_RelDiag_rawdata_tas_1991_2012_EU.png 1_2_4_tas_2_JRA_DJF_1_Above_RelDiag_rawdata_tas_1991_2012_EU.png +mv 1_3_1_3_ERAI2_MAM_1_Above_RelDiag_rawdata_tas_1981_2012_EU.png 1_3_1_tas_3_ERAI2_MAM_1_Above_RelDiag_rawdata_tas_1981_2012_EU.png +mv 1_3_2_3_ERAI2_JJA_1_Above_RelDiag_rawdata_tas_1981_2012_EU.png 1_3_2_tas_3_ERAI2_JJA_1_Above_RelDiag_rawdata_tas_1981_2012_EU.png +mv 1_3_3_3_ERAI2_SON_1_Above_RelDiag_rawdata_tas_1981_2012_EU.png 1_3_3_tas_3_ERAI2_SON_1_Above_RelDiag_rawdata_tas_1981_2012_EU.png +mv 1_3_4_3_ERAI2_DJF_1_Above_RelDiag_rawdata_tas_1981_2012_EU.png 1_3_4_tas_3_ERAI2_DJF_1_Above_RelDiag_rawdata_tas_1981_2012_EU.png +mv 1_4_1_4_JRA2_MAM_1_Above_RelDiag_rawdata_tas_1981_2012_EU.png 1_4_1_tas_4_JRA2_MAM_1_Above_RelDiag_rawdata_tas_1981_2012_EU.png +mv 1_4_2_4_JRA2_JJA_1_Above_RelDiag_rawdata_tas_1981_2012_EU.png 1_4_2_tas_4_JRA2_JJA_1_Above_RelDiag_rawdata_tas_1981_2012_EU.png +mv 1_4_3_4_JRA2_SON_1_Above_RelDiag_rawdata_tas_1981_2012_EU.png 1_4_3_tas_4_JRA2_SON_1_Above_RelDiag_rawdata_tas_1981_2012_EU.png +mv 1_4_4_4_JRA2_DJF_1_Above_RelDiag_rawdata_tas_1981_2012_EU.png 1_4_4_tas_4_JRA2_DJF_1_Above_RelDiag_rawdata_tas_1981_2012_EU.png +mv 2_1_1_1_ERAI_MAM_1_Near_RelDiag_rawdata_tas_1991_2012_EU.png 2_1_1_tas_1_ERAI_MAM_1_Near_RelDiag_rawdata_tas_1991_2012_EU.png +mv 2_1_2_1_ERAI_JJA_1_Near_RelDiag_rawdata_tas_1991_2012_EU.png 2_1_2_tas_1_ERAI_JJA_1_Near_RelDiag_rawdata_tas_1991_2012_EU.png +mv 2_1_3_1_ERAI_SON_1_Near_RelDiag_rawdata_tas_1991_2012_EU.png 2_1_3_tas_1_ERAI_SON_1_Near_RelDiag_rawdata_tas_1991_2012_EU.png +mv 2_1_4_1_ERAI_DJF_1_Near_RelDiag_rawdata_tas_1991_2012_EU.png 2_1_4_tas_1_ERAI_DJF_1_Near_RelDiag_rawdata_tas_1991_2012_EU.png +mv 2_2_1_2_JRA_MAM_1_Near_RelDiag_rawdata_tas_1991_2012_EU.png 2_2_1_tas_2_JRA_MAM_1_Near_RelDiag_rawdata_tas_1991_2012_EU.png +mv 2_2_2_2_JRA_JJA_1_Near_RelDiag_rawdata_tas_1991_2012_EU.png 2_2_2_tas_2_JRA_JJA_1_Near_RelDiag_rawdata_tas_1991_2012_EU.png +mv 2_2_3_2_JRA_SON_1_Near_RelDiag_rawdata_tas_1991_2012_EU.png 2_2_3_tas_2_JRA_SON_1_Near_RelDiag_rawdata_tas_1991_2012_EU.png +mv 2_2_4_2_JRA_DJF_1_Near_RelDiag_rawdata_tas_1991_2012_EU.png 2_2_4_tas_2_JRA_DJF_1_Near_RelDiag_rawdata_tas_1991_2012_EU.png +mv 2_3_1_3_ERAI2_MAM_1_Near_RelDiag_rawdata_tas_1981_2012_EU.png 2_3_1_tas_3_ERAI2_MAM_1_Near_RelDiag_rawdata_tas_1981_2012_EU.png +mv 2_3_2_3_ERAI2_JJA_1_Near_RelDiag_rawdata_tas_1981_2012_EU.png 2_3_2_tas_3_ERAI2_JJA_1_Near_RelDiag_rawdata_tas_1981_2012_EU.png +mv 2_3_3_3_ERAI2_SON_1_Near_RelDiag_rawdata_tas_1981_2012_EU.png 2_3_3_tas_3_ERAI2_SON_1_Near_RelDiag_rawdata_tas_1981_2012_EU.png +mv 2_3_4_3_ERAI2_DJF_1_Near_RelDiag_rawdata_tas_1981_2012_EU.png 2_3_4_tas_3_ERAI2_DJF_1_Near_RelDiag_rawdata_tas_1981_2012_EU.png +mv 2_4_1_4_JRA2_MAM_1_Near_RelDiag_rawdata_tas_1981_2012_EU.png 2_4_1_tas_4_JRA2_MAM_1_Near_RelDiag_rawdata_tas_1981_2012_EU.png +mv 2_4_2_4_JRA2_JJA_1_Near_RelDiag_rawdata_tas_1981_2012_EU.png 2_4_2_tas_4_JRA2_JJA_1_Near_RelDiag_rawdata_tas_1981_2012_EU.png +mv 2_4_3_4_JRA2_SON_1_Near_RelDiag_rawdata_tas_1981_2012_EU.png 2_4_3_tas_4_JRA2_SON_1_Near_RelDiag_rawdata_tas_1981_2012_EU.png +mv 2_4_4_4_JRA2_DJF_1_Near_RelDiag_rawdata_tas_1981_2012_EU.png 2_4_4_tas_4_JRA2_DJF_1_Near_RelDiag_rawdata_tas_1981_2012_EU.png +mv 3_1_1_1_ERAI_MAM_1_Below_RelDiag_rawdata_tas_1991_2012_EU.png 3_1_1_tas_1_ERAI_MAM_1_Below_RelDiag_rawdata_tas_1991_2012_EU.png +mv 3_1_2_1_ERAI_JJA_1_Below_RelDiag_rawdata_tas_1991_2012_EU.png 3_1_2_tas_1_ERAI_JJA_1_Below_RelDiag_rawdata_tas_1991_2012_EU.png +mv 3_1_3_1_ERAI_SON_1_Below_RelDiag_rawdata_tas_1991_2012_EU.png 3_1_3_tas_1_ERAI_SON_1_Below_RelDiag_rawdata_tas_1991_2012_EU.png +mv 3_1_4_1_ERAI_DJF_1_Below_RelDiag_rawdata_tas_1991_2012_EU.png 3_1_4_tas_1_ERAI_DJF_1_Below_RelDiag_rawdata_tas_1991_2012_EU.png +mv 3_2_1_2_JRA_MAM_1_Below_RelDiag_rawdata_tas_1991_2012_EU.png 3_2_1_tas_2_JRA_MAM_1_Below_RelDiag_rawdata_tas_1991_2012_EU.png +mv 3_2_2_2_JRA_JJA_1_Below_RelDiag_rawdata_tas_1991_2012_EU.png 3_2_2_tas_2_JRA_JJA_1_Below_RelDiag_rawdata_tas_1991_2012_EU.png +mv 3_2_3_2_JRA_SON_1_Below_RelDiag_rawdata_tas_1991_2012_EU.png 3_2_3_tas_2_JRA_SON_1_Below_RelDiag_rawdata_tas_1991_2012_EU.png +mv 3_2_4_2_JRA_DJF_1_Below_RelDiag_rawdata_tas_1991_2012_EU.png 3_2_4_tas_2_JRA_DJF_1_Below_RelDiag_rawdata_tas_1991_2012_EU.png +mv 3_3_1_3_ERAI2_MAM_1_Below_RelDiag_rawdata_tas_1981_2012_EU.png 3_3_1_tas_3_ERAI2_MAM_1_Below_RelDiag_rawdata_tas_1981_2012_EU.png +mv 3_3_2_3_ERAI2_JJA_1_Below_RelDiag_rawdata_tas_1981_2012_EU.png 3_3_2_tas_3_ERAI2_JJA_1_Below_RelDiag_rawdata_tas_1981_2012_EU.png +mv 3_3_3_3_ERAI2_SON_1_Below_RelDiag_rawdata_tas_1981_2012_EU.png 3_3_3_tas_3_ERAI2_SON_1_Below_RelDiag_rawdata_tas_1981_2012_EU.png +mv 3_3_4_3_ERAI2_DJF_1_Below_RelDiag_rawdata_tas_1981_2012_EU.png 3_3_4_tas_3_ERAI2_DJF_1_Below_RelDiag_rawdata_tas_1981_2012_EU.png +mv 3_4_1_4_JRA2_MAM_1_Below_RelDiag_rawdata_tas_1981_2012_EU.png 3_4_1_tas_4_JRA2_MAM_1_Below_RelDiag_rawdata_tas_1981_2012_EU.png +mv 3_4_2_4_JRA2_JJA_1_Below_RelDiag_rawdata_tas_1981_2012_EU.png 3_4_2_tas_4_JRA2_JJA_1_Below_RelDiag_rawdata_tas_1981_2012_EU.png +mv 3_4_3_4_JRA2_SON_1_Below_RelDiag_rawdata_tas_1981_2012_EU.png 3_4_3_tas_4_JRA2_SON_1_Below_RelDiag_rawdata_tas_1981_2012_EU.png +mv 3_4_4_4_JRA2_DJF_1_Below_RelDiag_rawdata_tas_1981_2012_EU.png 3_4_4_tas_4_JRA2_DJF_1_Below_RelDiag_rawdata_tas_1981_2012_EU.png + + + + + +mv 1_1_1_1_ERAI_MAM_2_Above_RelDiag_sbc_cross_tas_1991_2012_EU.png 1_1_1_tas_1_ERAI_MAM_2_Above_RelDiag_sbc_cross_tas_1991_2012_EU.png +mv 1_1_2_1_ERAI_JJA_2_Above_RelDiag_sbc_cross_tas_1991_2012_EU.png 1_1_2_tas_1_ERAI_JJA_2_Above_RelDiag_sbc_cross_tas_1991_2012_EU.png +mv 1_1_3_1_ERAI_SON_2_Above_RelDiag_sbc_cross_tas_1991_2012_EU.png 1_1_3_tas_1_ERAI_SON_2_Above_RelDiag_sbc_cross_tas_1991_2012_EU.png +mv 1_1_4_1_ERAI_DJF_2_Above_RelDiag_sbc_cross_tas_1991_2012_EU.png 1_1_4_tas_1_ERAI_DJF_2_Above_RelDiag_sbc_cross_tas_1991_2012_EU.png +mv 1_2_1_2_JRA_MAM_2_Above_RelDiag_sbc_cross_tas_1991_2012_EU.png 1_2_1_tas_2_JRA_MAM_2_Above_RelDiag_sbc_cross_tas_1991_2012_EU.png +mv 1_2_2_2_JRA_JJA_2_Above_RelDiag_sbc_cross_tas_1991_2012_EU.png 1_2_2_tas_2_JRA_JJA_2_Above_RelDiag_sbc_cross_tas_1991_2012_EU.png +mv 1_2_3_2_JRA_SON_2_Above_RelDiag_sbc_cross_tas_1991_2012_EU.png 1_2_3_tas_2_JRA_SON_2_Above_RelDiag_sbc_cross_tas_1991_2012_EU.png +mv 1_2_4_2_JRA_DJF_2_Above_RelDiag_sbc_cross_tas_1991_2012_EU.png 1_2_4_tas_2_JRA_DJF_2_Above_RelDiag_sbc_cross_tas_1991_2012_EU.png +mv 1_3_1_3_ERAI2_MAM_2_Above_RelDiag_sbc_cross_tas_1981_2012_EU.png 1_3_1_tas_3_ERAI2_MAM_2_Above_RelDiag_sbc_cross_tas_1981_2012_EU.png +mv 1_3_2_3_ERAI2_JJA_2_Above_RelDiag_sbc_cross_tas_1981_2012_EU.png 1_3_2_tas_3_ERAI2_JJA_2_Above_RelDiag_sbc_cross_tas_1981_2012_EU.png +mv 1_3_3_3_ERAI2_SON_2_Above_RelDiag_sbc_cross_tas_1981_2012_EU.png 1_3_3_tas_3_ERAI2_SON_2_Above_RelDiag_sbc_cross_tas_1981_2012_EU.png +mv 1_3_4_3_ERAI2_DJF_2_Above_RelDiag_sbc_cross_tas_1981_2012_EU.png 1_3_4_tas_3_ERAI2_DJF_2_Above_RelDiag_sbc_cross_tas_1981_2012_EU.png +mv 1_4_1_4_JRA2_MAM_2_Above_RelDiag_sbc_cross_tas_1981_2012_EU.png 1_4_1_tas_4_JRA2_MAM_2_Above_RelDiag_sbc_cross_tas_1981_2012_EU.png +mv 1_4_2_4_JRA2_JJA_2_Above_RelDiag_sbc_cross_tas_1981_2012_EU.png 1_4_2_tas_4_JRA2_JJA_2_Above_RelDiag_sbc_cross_tas_1981_2012_EU.png +mv 1_4_3_4_JRA2_SON_2_Above_RelDiag_sbc_cross_tas_1981_2012_EU.png 1_4_3_tas_4_JRA2_SON_2_Above_RelDiag_sbc_cross_tas_1981_2012_EU.png +mv 1_4_4_4_JRA2_DJF_2_Above_RelDiag_sbc_cross_tas_1981_2012_EU.png 1_4_4_tas_4_JRA2_DJF_2_Above_RelDiag_sbc_cross_tas_1981_2012_EU.png +mv 2_1_1_1_ERAI_MAM_2_Near_RelDiag_sbc_cross_tas_1991_2012_EU.png 2_1_1_tas_1_ERAI_MAM_2_Near_RelDiag_sbc_cross_tas_1991_2012_EU.png +mv 2_1_2_1_ERAI_JJA_2_Near_RelDiag_sbc_cross_tas_1991_2012_EU.png 2_1_2_tas_1_ERAI_JJA_2_Near_RelDiag_sbc_cross_tas_1991_2012_EU.png +mv 2_1_3_1_ERAI_SON_2_Near_RelDiag_sbc_cross_tas_1991_2012_EU.png 2_1_3_tas_1_ERAI_SON_2_Near_RelDiag_sbc_cross_tas_1991_2012_EU.png +mv 2_1_4_1_ERAI_DJF_2_Near_RelDiag_sbc_cross_tas_1991_2012_EU.png 2_1_4_tas_1_ERAI_DJF_2_Near_RelDiag_sbc_cross_tas_1991_2012_EU.png +mv 2_2_1_2_JRA_MAM_2_Near_RelDiag_sbc_cross_tas_1991_2012_EU.png 2_2_1_tas_2_JRA_MAM_2_Near_RelDiag_sbc_cross_tas_1991_2012_EU.png +mv 2_2_2_2_JRA_JJA_2_Near_RelDiag_sbc_cross_tas_1991_2012_EU.png 2_2_2_tas_2_JRA_JJA_2_Near_RelDiag_sbc_cross_tas_1991_2012_EU.png +mv 2_2_3_2_JRA_SON_2_Near_RelDiag_sbc_cross_tas_1991_2012_EU.png 2_2_3_tas_2_JRA_SON_2_Near_RelDiag_sbc_cross_tas_1991_2012_EU.png +mv 2_2_4_2_JRA_DJF_2_Near_RelDiag_sbc_cross_tas_1991_2012_EU.png 2_2_4_tas_2_JRA_DJF_2_Near_RelDiag_sbc_cross_tas_1991_2012_EU.png +mv 2_3_1_3_ERAI2_MAM_2_Near_RelDiag_sbc_cross_tas_1981_2012_EU.png 2_3_1_tas_3_ERAI2_MAM_2_Near_RelDiag_sbc_cross_tas_1981_2012_EU.png +mv 2_3_2_3_ERAI2_JJA_2_Near_RelDiag_sbc_cross_tas_1981_2012_EU.png 2_3_2_tas_3_ERAI2_JJA_2_Near_RelDiag_sbc_cross_tas_1981_2012_EU.png +mv 2_3_3_3_ERAI2_SON_2_Near_RelDiag_sbc_cross_tas_1981_2012_EU.png 2_3_3_tas_3_ERAI2_SON_2_Near_RelDiag_sbc_cross_tas_1981_2012_EU.png +mv 2_3_4_3_ERAI2_DJF_2_Near_RelDiag_sbc_cross_tas_1981_2012_EU.png 2_3_4_tas_3_ERAI2_DJF_2_Near_RelDiag_sbc_cross_tas_1981_2012_EU.png +mv 2_4_1_4_JRA2_MAM_2_Near_RelDiag_sbc_cross_tas_1981_2012_EU.png 2_4_1_tas_4_JRA2_MAM_2_Near_RelDiag_sbc_cross_tas_1981_2012_EU.png +mv 2_4_2_4_JRA2_JJA_2_Near_RelDiag_sbc_cross_tas_1981_2012_EU.png 2_4_2_tas_4_JRA2_JJA_2_Near_RelDiag_sbc_cross_tas_1981_2012_EU.png +mv 2_4_3_4_JRA2_SON_2_Near_RelDiag_sbc_cross_tas_1981_2012_EU.png 2_4_3_tas_4_JRA2_SON_2_Near_RelDiag_sbc_cross_tas_1981_2012_EU.png +mv 2_4_4_4_JRA2_DJF_2_Near_RelDiag_sbc_cross_tas_1981_2012_EU.png 2_4_4_tas_4_JRA2_DJF_2_Near_RelDiag_sbc_cross_tas_1981_2012_EU.png +mv 3_1_1_1_ERAI_MAM_2_Below_RelDiag_sbc_cross_tas_1991_2012_EU.png 3_1_1_tas_1_ERAI_MAM_2_Below_RelDiag_sbc_cross_tas_1991_2012_EU.png +mv 3_1_2_1_ERAI_JJA_2_Below_RelDiag_sbc_cross_tas_1991_2012_EU.png 3_1_2_tas_1_ERAI_JJA_2_Below_RelDiag_sbc_cross_tas_1991_2012_EU.png +mv 3_1_3_1_ERAI_SON_2_Below_RelDiag_sbc_cross_tas_1991_2012_EU.png 3_1_3_tas_1_ERAI_SON_2_Below_RelDiag_sbc_cross_tas_1991_2012_EU.png +mv 3_1_4_1_ERAI_DJF_2_Below_RelDiag_sbc_cross_tas_1991_2012_EU.png 3_1_4_tas_1_ERAI_DJF_2_Below_RelDiag_sbc_cross_tas_1991_2012_EU.png +mv 3_2_1_2_JRA_MAM_2_Below_RelDiag_sbc_cross_tas_1991_2012_EU.png 3_2_1_tas_2_JRA_MAM_2_Below_RelDiag_sbc_cross_tas_1991_2012_EU.png +mv 3_2_2_2_JRA_JJA_2_Below_RelDiag_sbc_cross_tas_1991_2012_EU.png 3_2_2_tas_2_JRA_JJA_2_Below_RelDiag_sbc_cross_tas_1991_2012_EU.png +mv 3_2_3_2_JRA_SON_2_Below_RelDiag_sbc_cross_tas_1991_2012_EU.png 3_2_3_tas_2_JRA_SON_2_Below_RelDiag_sbc_cross_tas_1991_2012_EU.png +mv 3_2_4_2_JRA_DJF_2_Below_RelDiag_sbc_cross_tas_1991_2012_EU.png 3_2_4_tas_2_JRA_DJF_2_Below_RelDiag_sbc_cross_tas_1991_2012_EU.png +mv 3_3_1_3_ERAI2_MAM_2_Below_RelDiag_sbc_cross_tas_1981_2012_EU.png 3_3_1_tas_3_ERAI2_MAM_2_Below_RelDiag_sbc_cross_tas_1981_2012_EU.png +mv 3_3_2_3_ERAI2_JJA_2_Below_RelDiag_sbc_cross_tas_1981_2012_EU.png 3_3_2_tas_3_ERAI2_JJA_2_Below_RelDiag_sbc_cross_tas_1981_2012_EU.png +mv 3_3_3_3_ERAI2_SON_2_Below_RelDiag_sbc_cross_tas_1981_2012_EU.png 3_3_3_tas_3_ERAI2_SON_2_Below_RelDiag_sbc_cross_tas_1981_2012_EU.png +mv 3_3_4_3_ERAI2_DJF_2_Below_RelDiag_sbc_cross_tas_1981_2012_EU.png 3_3_4_tas_3_ERAI2_DJF_2_Below_RelDiag_sbc_cross_tas_1981_2012_EU.png +mv 3_4_1_4_JRA2_MAM_2_Below_RelDiag_sbc_cross_tas_1981_2012_EU.png 3_4_1_tas_4_JRA2_MAM_2_Below_RelDiag_sbc_cross_tas_1981_2012_EU.png +mv 3_4_2_4_JRA2_JJA_2_Below_RelDiag_sbc_cross_tas_1981_2012_EU.png 3_4_2_tas_4_JRA2_JJA_2_Below_RelDiag_sbc_cross_tas_1981_2012_EU.png +mv 3_4_3_4_JRA2_SON_2_Below_RelDiag_sbc_cross_tas_1981_2012_EU.png 3_4_3_tas_4_JRA2_SON_2_Below_RelDiag_sbc_cross_tas_1981_2012_EU.png +mv 3_4_4_4_JRA2_DJF_2_Below_RelDiag_sbc_cross_tas_1981_2012_EU.png 3_4_4_tas_4_JRA2_DJF_2_Below_RelDiag_sbc_cross_tas_1981_2012_EU.png + + + +mv 1_1_1_1_ERAI_MAM_3_Above_RelDiag_sbc_nocross_tas_1991_2012_EU.png 1_1_1_tas_1_ERAI_MAM_3_Above_RelDiag_sbc_nocross_tas_1991_2012_EU.png +mv 1_1_2_1_ERAI_JJA_3_Above_RelDiag_sbc_nocross_tas_1991_2012_EU.png 1_1_2_tas_1_ERAI_JJA_3_Above_RelDiag_sbc_nocross_tas_1991_2012_EU.png +mv 1_1_3_1_ERAI_SON_3_Above_RelDiag_sbc_nocross_tas_1991_2012_EU.png 1_1_3_tas_1_ERAI_SON_3_Above_RelDiag_sbc_nocross_tas_1991_2012_EU.png +mv 1_1_4_1_ERAI_DJF_3_Above_RelDiag_sbc_nocross_tas_1991_2012_EU.png 1_1_4_tas_1_ERAI_DJF_3_Above_RelDiag_sbc_nocross_tas_1991_2012_EU.png +mv 1_2_1_2_JRA_MAM_3_Above_RelDiag_sbc_nocross_tas_1991_2012_EU.png 1_2_1_tas_2_JRA_MAM_3_Above_RelDiag_sbc_nocross_tas_1991_2012_EU.png +mv 1_2_2_2_JRA_JJA_3_Above_RelDiag_sbc_nocross_tas_1991_2012_EU.png 1_2_2_tas_2_JRA_JJA_3_Above_RelDiag_sbc_nocross_tas_1991_2012_EU.png +mv 1_2_3_2_JRA_SON_3_Above_RelDiag_sbc_nocross_tas_1991_2012_EU.png 1_2_3_tas_2_JRA_SON_3_Above_RelDiag_sbc_nocross_tas_1991_2012_EU.png +mv 1_2_4_2_JRA_DJF_3_Above_RelDiag_sbc_nocross_tas_1991_2012_EU.png 1_2_4_tas_2_JRA_DJF_3_Above_RelDiag_sbc_nocross_tas_1991_2012_EU.png +mv 1_3_1_3_ERAI2_MAM_3_Above_RelDiag_sbc_nocross_tas_1981_2012_EU.png 1_3_1_tas_3_ERAI2_MAM_3_Above_RelDiag_sbc_nocross_tas_1981_2012_EU.png +mv 1_3_2_3_ERAI2_JJA_3_Above_RelDiag_sbc_nocross_tas_1981_2012_EU.png 1_3_2_tas_3_ERAI2_JJA_3_Above_RelDiag_sbc_nocross_tas_1981_2012_EU.png +mv 1_3_3_3_ERAI2_SON_3_Above_RelDiag_sbc_nocross_tas_1981_2012_EU.png 1_3_3_tas_3_ERAI2_SON_3_Above_RelDiag_sbc_nocross_tas_1981_2012_EU.png +mv 1_3_4_3_ERAI2_DJF_3_Above_RelDiag_sbc_nocross_tas_1981_2012_EU.png 1_3_4_tas_3_ERAI2_DJF_3_Above_RelDiag_sbc_nocross_tas_1981_2012_EU.png +mv 1_4_1_4_JRA2_MAM_3_Above_RelDiag_sbc_nocross_tas_1981_2012_EU.png 1_4_1_tas_4_JRA2_MAM_3_Above_RelDiag_sbc_nocross_tas_1981_2012_EU.png +mv 1_4_2_4_JRA2_JJA_3_Above_RelDiag_sbc_nocross_tas_1981_2012_EU.png 1_4_2_tas_4_JRA2_JJA_3_Above_RelDiag_sbc_nocross_tas_1981_2012_EU.png +mv 1_4_3_4_JRA2_SON_3_Above_RelDiag_sbc_nocross_tas_1981_2012_EU.png 1_4_3_tas_4_JRA2_SON_3_Above_RelDiag_sbc_nocross_tas_1981_2012_EU.png +mv 1_4_4_4_JRA2_DJF_3_Above_RelDiag_sbc_nocross_tas_1981_2012_EU.png 1_4_4_tas_4_JRA2_DJF_3_Above_RelDiag_sbc_nocross_tas_1981_2012_EU.png +mv 2_1_1_1_ERAI_MAM_3_Near_RelDiag_sbc_nocross_tas_1991_2012_EU.png 2_1_1_tas_1_ERAI_MAM_3_Near_RelDiag_sbc_nocross_tas_1991_2012_EU.png +mv 2_1_2_1_ERAI_JJA_3_Near_RelDiag_sbc_nocross_tas_1991_2012_EU.png 2_1_2_tas_1_ERAI_JJA_3_Near_RelDiag_sbc_nocross_tas_1991_2012_EU.png +mv 2_1_3_1_ERAI_SON_3_Near_RelDiag_sbc_nocross_tas_1991_2012_EU.png 2_1_3_tas_1_ERAI_SON_3_Near_RelDiag_sbc_nocross_tas_1991_2012_EU.png +mv 2_1_4_1_ERAI_DJF_3_Near_RelDiag_sbc_nocross_tas_1991_2012_EU.png 2_1_4_tas_1_ERAI_DJF_3_Near_RelDiag_sbc_nocross_tas_1991_2012_EU.png +mv 2_2_1_2_JRA_MAM_3_Near_RelDiag_sbc_nocross_tas_1991_2012_EU.png 2_2_1_tas_2_JRA_MAM_3_Near_RelDiag_sbc_nocross_tas_1991_2012_EU.png +mv 2_2_2_2_JRA_JJA_3_Near_RelDiag_sbc_nocross_tas_1991_2012_EU.png 2_2_2_tas_2_JRA_JJA_3_Near_RelDiag_sbc_nocross_tas_1991_2012_EU.png +mv 2_2_3_2_JRA_SON_3_Near_RelDiag_sbc_nocross_tas_1991_2012_EU.png 2_2_3_tas_2_JRA_SON_3_Near_RelDiag_sbc_nocross_tas_1991_2012_EU.png +mv 2_2_4_2_JRA_DJF_3_Near_RelDiag_sbc_nocross_tas_1991_2012_EU.png 2_2_4_tas_2_JRA_DJF_3_Near_RelDiag_sbc_nocross_tas_1991_2012_EU.png +mv 2_3_1_3_ERAI2_MAM_3_Near_RelDiag_sbc_nocross_tas_1981_2012_EU.png 2_3_1_tas_3_ERAI2_MAM_3_Near_RelDiag_sbc_nocross_tas_1981_2012_EU.png +mv 2_3_2_3_ERAI2_JJA_3_Near_RelDiag_sbc_nocross_tas_1981_2012_EU.png 2_3_2_tas_3_ERAI2_JJA_3_Near_RelDiag_sbc_nocross_tas_1981_2012_EU.png +mv 2_3_3_3_ERAI2_SON_3_Near_RelDiag_sbc_nocross_tas_1981_2012_EU.png 2_3_3_tas_3_ERAI2_SON_3_Near_RelDiag_sbc_nocross_tas_1981_2012_EU.png +mv 2_3_4_3_ERAI2_DJF_3_Near_RelDiag_sbc_nocross_tas_1981_2012_EU.png 2_3_4_tas_3_ERAI2_DJF_3_Near_RelDiag_sbc_nocross_tas_1981_2012_EU.png +mv 2_4_1_4_JRA2_MAM_3_Near_RelDiag_sbc_nocross_tas_1981_2012_EU.png 2_4_1_tas_4_JRA2_MAM_3_Near_RelDiag_sbc_nocross_tas_1981_2012_EU.png +mv 2_4_2_4_JRA2_JJA_3_Near_RelDiag_sbc_nocross_tas_1981_2012_EU.png 2_4_2_tas_4_JRA2_JJA_3_Near_RelDiag_sbc_nocross_tas_1981_2012_EU.png +mv 2_4_3_4_JRA2_SON_3_Near_RelDiag_sbc_nocross_tas_1981_2012_EU.png 2_4_3_tas_4_JRA2_SON_3_Near_RelDiag_sbc_nocross_tas_1981_2012_EU.png +mv 2_4_4_4_JRA2_DJF_3_Near_RelDiag_sbc_nocross_tas_1981_2012_EU.png 2_4_4_tas_4_JRA2_DJF_3_Near_RelDiag_sbc_nocross_tas_1981_2012_EU.png +mv 3_1_1_1_ERAI_MAM_3_Below_RelDiag_sbc_nocross_tas_1991_2012_EU.png 3_1_1_tas_1_ERAI_MAM_3_Below_RelDiag_sbc_nocross_tas_1991_2012_EU.png +mv 3_1_2_1_ERAI_JJA_3_Below_RelDiag_sbc_nocross_tas_1991_2012_EU.png 3_1_2_tas_1_ERAI_JJA_3_Below_RelDiag_sbc_nocross_tas_1991_2012_EU.png +mv 3_1_3_1_ERAI_SON_3_Below_RelDiag_sbc_nocross_tas_1991_2012_EU.png 3_1_3_tas_1_ERAI_SON_3_Below_RelDiag_sbc_nocross_tas_1991_2012_EU.png +mv 3_1_4_1_ERAI_DJF_3_Below_RelDiag_sbc_nocross_tas_1991_2012_EU.png 3_1_4_tas_1_ERAI_DJF_3_Below_RelDiag_sbc_nocross_tas_1991_2012_EU.png +mv 3_2_1_2_JRA_MAM_3_Below_RelDiag_sbc_nocross_tas_1991_2012_EU.png 3_2_1_tas_2_JRA_MAM_3_Below_RelDiag_sbc_nocross_tas_1991_2012_EU.png +mv 3_2_2_2_JRA_JJA_3_Below_RelDiag_sbc_nocross_tas_1991_2012_EU.png 3_2_2_tas_2_JRA_JJA_3_Below_RelDiag_sbc_nocross_tas_1991_2012_EU.png +mv 3_2_3_2_JRA_SON_3_Below_RelDiag_sbc_nocross_tas_1991_2012_EU.png 3_2_3_tas_2_JRA_SON_3_Below_RelDiag_sbc_nocross_tas_1991_2012_EU.png +mv 3_2_4_2_JRA_DJF_3_Below_RelDiag_sbc_nocross_tas_1991_2012_EU.png 3_2_4_tas_2_JRA_DJF_3_Below_RelDiag_sbc_nocross_tas_1991_2012_EU.png +mv 3_3_1_3_ERAI2_MAM_3_Below_RelDiag_sbc_nocross_tas_1981_2012_EU.png 3_3_1_tas_3_ERAI2_MAM_3_Below_RelDiag_sbc_nocross_tas_1981_2012_EU.png +mv 3_3_2_3_ERAI2_JJA_3_Below_RelDiag_sbc_nocross_tas_1981_2012_EU.png 3_3_2_tas_3_ERAI2_JJA_3_Below_RelDiag_sbc_nocross_tas_1981_2012_EU.png +mv 3_3_3_3_ERAI2_SON_3_Below_RelDiag_sbc_nocross_tas_1981_2012_EU.png 3_3_3_tas_3_ERAI2_SON_3_Below_RelDiag_sbc_nocross_tas_1981_2012_EU.png +mv 3_3_4_3_ERAI2_DJF_3_Below_RelDiag_sbc_nocross_tas_1981_2012_EU.png 3_3_4_tas_3_ERAI2_DJF_3_Below_RelDiag_sbc_nocross_tas_1981_2012_EU.png +mv 3_4_1_4_JRA2_MAM_3_Below_RelDiag_sbc_nocross_tas_1981_2012_EU.png 3_4_1_tas_4_JRA2_MAM_3_Below_RelDiag_sbc_nocross_tas_1981_2012_EU.png +mv 3_4_2_4_JRA2_JJA_3_Below_RelDiag_sbc_nocross_tas_1981_2012_EU.png 3_4_2_tas_4_JRA2_JJA_3_Below_RelDiag_sbc_nocross_tas_1981_2012_EU.png +mv 3_4_3_4_JRA2_SON_3_Below_RelDiag_sbc_nocross_tas_1981_2012_EU.png 3_4_3_tas_4_JRA2_SON_3_Below_RelDiag_sbc_nocross_tas_1981_2012_EU.png +mv 3_4_4_4_JRA2_DJF_3_Below_RelDiag_sbc_nocross_tas_1981_2012_EU.png 3_4_4_tas_4_JRA2_DJF_3_Below_RelDiag_sbc_nocross_tas_1981_2012_EU.png + + +mv 1_1_1_1_ERAI_MAM_4_Above_RelDiag_cal_cross_tas_1991_2012_EU.png 1_1_1_tas_1_ERAI_MAM_4_Above_RelDiag_cal_cross_tas_1991_2012_EU.png +mv 1_1_2_1_ERAI_JJA_4_Above_RelDiag_cal_cross_tas_1991_2012_EU.png 1_1_2_tas_1_ERAI_JJA_4_Above_RelDiag_cal_cross_tas_1991_2012_EU.png +mv 1_1_3_1_ERAI_SON_4_Above_RelDiag_cal_cross_tas_1991_2012_EU.png 1_1_3_tas_1_ERAI_SON_4_Above_RelDiag_cal_cross_tas_1991_2012_EU.png +mv 1_1_4_1_ERAI_DJF_4_Above_RelDiag_cal_cross_tas_1991_2012_EU.png 1_1_4_tas_1_ERAI_DJF_4_Above_RelDiag_cal_cross_tas_1991_2012_EU.png +mv 1_2_1_2_JRA_MAM_4_Above_RelDiag_cal_cross_tas_1991_2012_EU.png 1_2_1_tas_2_JRA_MAM_4_Above_RelDiag_cal_cross_tas_1991_2012_EU.png +mv 1_2_2_2_JRA_JJA_4_Above_RelDiag_cal_cross_tas_1991_2012_EU.png 1_2_2_tas_2_JRA_JJA_4_Above_RelDiag_cal_cross_tas_1991_2012_EU.png +mv 1_2_3_2_JRA_SON_4_Above_RelDiag_cal_cross_tas_1991_2012_EU.png 1_2_3_tas_2_JRA_SON_4_Above_RelDiag_cal_cross_tas_1991_2012_EU.png +mv 1_2_4_2_JRA_DJF_4_Above_RelDiag_cal_cross_tas_1991_2012_EU.png 1_2_4_tas_2_JRA_DJF_4_Above_RelDiag_cal_cross_tas_1991_2012_EU.png +mv 1_3_1_3_ERAI2_MAM_4_Above_RelDiag_cal_cross_tas_1981_2012_EU.png 1_3_1_tas_3_ERAI2_MAM_4_Above_RelDiag_cal_cross_tas_1981_2012_EU.png +mv 1_3_2_3_ERAI2_JJA_4_Above_RelDiag_cal_cross_tas_1981_2012_EU.png 1_3_2_tas_3_ERAI2_JJA_4_Above_RelDiag_cal_cross_tas_1981_2012_EU.png +mv 1_3_3_3_ERAI2_SON_4_Above_RelDiag_cal_cross_tas_1981_2012_EU.png 1_3_3_tas_3_ERAI2_SON_4_Above_RelDiag_cal_cross_tas_1981_2012_EU.png +mv 1_3_4_3_ERAI2_DJF_4_Above_RelDiag_cal_cross_tas_1981_2012_EU.png 1_3_4_tas_3_ERAI2_DJF_4_Above_RelDiag_cal_cross_tas_1981_2012_EU.png +mv 1_4_1_4_JRA2_MAM_4_Above_RelDiag_cal_cross_tas_1981_2012_EU.png 1_4_1_tas_4_JRA2_MAM_4_Above_RelDiag_cal_cross_tas_1981_2012_EU.png +mv 1_4_2_4_JRA2_JJA_4_Above_RelDiag_cal_cross_tas_1981_2012_EU.png 1_4_2_tas_4_JRA2_JJA_4_Above_RelDiag_cal_cross_tas_1981_2012_EU.png +mv 1_4_3_4_JRA2_SON_4_Above_RelDiag_cal_cross_tas_1981_2012_EU.png 1_4_3_tas_4_JRA2_SON_4_Above_RelDiag_cal_cross_tas_1981_2012_EU.png +mv 1_4_4_4_JRA2_DJF_4_Above_RelDiag_cal_cross_tas_1981_2012_EU.png 1_4_4_tas_4_JRA2_DJF_4_Above_RelDiag_cal_cross_tas_1981_2012_EU.png +mv 2_1_1_1_ERAI_MAM_4_Near_RelDiag_cal_cross_tas_1991_2012_EU.png 2_1_1_tas_1_ERAI_MAM_4_Near_RelDiag_cal_cross_tas_1991_2012_EU.png +mv 2_1_2_1_ERAI_JJA_4_Near_RelDiag_cal_cross_tas_1991_2012_EU.png 2_1_2_tas_1_ERAI_JJA_4_Near_RelDiag_cal_cross_tas_1991_2012_EU.png +mv 2_1_3_1_ERAI_SON_4_Near_RelDiag_cal_cross_tas_1991_2012_EU.png 2_1_3_tas_1_ERAI_SON_4_Near_RelDiag_cal_cross_tas_1991_2012_EU.png +mv 2_1_4_1_ERAI_DJF_4_Near_RelDiag_cal_cross_tas_1991_2012_EU.png 2_1_4_tas_1_ERAI_DJF_4_Near_RelDiag_cal_cross_tas_1991_2012_EU.png +mv 2_2_1_2_JRA_MAM_4_Near_RelDiag_cal_cross_tas_1991_2012_EU.png 2_2_1_tas_2_JRA_MAM_4_Near_RelDiag_cal_cross_tas_1991_2012_EU.png +mv 2_2_2_2_JRA_JJA_4_Near_RelDiag_cal_cross_tas_1991_2012_EU.png 2_2_2_tas_2_JRA_JJA_4_Near_RelDiag_cal_cross_tas_1991_2012_EU.png +mv 2_2_3_2_JRA_SON_4_Near_RelDiag_cal_cross_tas_1991_2012_EU.png 2_2_3_tas_2_JRA_SON_4_Near_RelDiag_cal_cross_tas_1991_2012_EU.png +mv 2_2_4_2_JRA_DJF_4_Near_RelDiag_cal_cross_tas_1991_2012_EU.png 2_2_4_tas_2_JRA_DJF_4_Near_RelDiag_cal_cross_tas_1991_2012_EU.png +mv 2_3_1_3_ERAI2_MAM_4_Near_RelDiag_cal_cross_tas_1981_2012_EU.png 2_3_1_tas_3_ERAI2_MAM_4_Near_RelDiag_cal_cross_tas_1981_2012_EU.png +mv 2_3_2_3_ERAI2_JJA_4_Near_RelDiag_cal_cross_tas_1981_2012_EU.png 2_3_2_tas_3_ERAI2_JJA_4_Near_RelDiag_cal_cross_tas_1981_2012_EU.png +mv 2_3_3_3_ERAI2_SON_4_Near_RelDiag_cal_cross_tas_1981_2012_EU.png 2_3_3_tas_3_ERAI2_SON_4_Near_RelDiag_cal_cross_tas_1981_2012_EU.png +mv 2_3_4_3_ERAI2_DJF_4_Near_RelDiag_cal_cross_tas_1981_2012_EU.png 2_3_4_tas_3_ERAI2_DJF_4_Near_RelDiag_cal_cross_tas_1981_2012_EU.png +mv 2_4_1_4_JRA2_MAM_4_Near_RelDiag_cal_cross_tas_1981_2012_EU.png 2_4_1_tas_4_JRA2_MAM_4_Near_RelDiag_cal_cross_tas_1981_2012_EU.png +mv 2_4_2_4_JRA2_JJA_4_Near_RelDiag_cal_cross_tas_1981_2012_EU.png 2_4_2_tas_4_JRA2_JJA_4_Near_RelDiag_cal_cross_tas_1981_2012_EU.png +mv 2_4_3_4_JRA2_SON_4_Near_RelDiag_cal_cross_tas_1981_2012_EU.png 2_4_3_tas_4_JRA2_SON_4_Near_RelDiag_cal_cross_tas_1981_2012_EU.png +mv 2_4_4_4_JRA2_DJF_4_Near_RelDiag_cal_cross_tas_1981_2012_EU.png 2_4_4_tas_4_JRA2_DJF_4_Near_RelDiag_cal_cross_tas_1981_2012_EU.png +mv 3_1_1_1_ERAI_MAM_4_Below_RelDiag_cal_cross_tas_1991_2012_EU.png 3_1_1_tas_1_ERAI_MAM_4_Below_RelDiag_cal_cross_tas_1991_2012_EU.png +mv 3_1_2_1_ERAI_JJA_4_Below_RelDiag_cal_cross_tas_1991_2012_EU.png 3_1_2_tas_1_ERAI_JJA_4_Below_RelDiag_cal_cross_tas_1991_2012_EU.png +mv 3_1_3_1_ERAI_SON_4_Below_RelDiag_cal_cross_tas_1991_2012_EU.png 3_1_3_tas_1_ERAI_SON_4_Below_RelDiag_cal_cross_tas_1991_2012_EU.png +mv 3_1_4_1_ERAI_DJF_4_Below_RelDiag_cal_cross_tas_1991_2012_EU.png 3_1_4_tas_1_ERAI_DJF_4_Below_RelDiag_cal_cross_tas_1991_2012_EU.png +mv 3_2_1_2_JRA_MAM_4_Below_RelDiag_cal_cross_tas_1991_2012_EU.png 3_2_1_tas_2_JRA_MAM_4_Below_RelDiag_cal_cross_tas_1991_2012_EU.png +mv 3_2_2_2_JRA_JJA_4_Below_RelDiag_cal_cross_tas_1991_2012_EU.png 3_2_2_tas_2_JRA_JJA_4_Below_RelDiag_cal_cross_tas_1991_2012_EU.png +mv 3_2_3_2_JRA_SON_4_Below_RelDiag_cal_cross_tas_1991_2012_EU.png 3_2_3_tas_2_JRA_SON_4_Below_RelDiag_cal_cross_tas_1991_2012_EU.png +mv 3_2_4_2_JRA_DJF_4_Below_RelDiag_cal_cross_tas_1991_2012_EU.png 3_2_4_tas_2_JRA_DJF_4_Below_RelDiag_cal_cross_tas_1991_2012_EU.png +mv 3_3_1_3_ERAI2_MAM_4_Below_RelDiag_cal_cross_tas_1981_2012_EU.png 3_3_1_tas_3_ERAI2_MAM_4_Below_RelDiag_cal_cross_tas_1981_2012_EU.png +mv 3_3_2_3_ERAI2_JJA_4_Below_RelDiag_cal_cross_tas_1981_2012_EU.png 3_3_2_tas_3_ERAI2_JJA_4_Below_RelDiag_cal_cross_tas_1981_2012_EU.png +mv 3_3_3_3_ERAI2_SON_4_Below_RelDiag_cal_cross_tas_1981_2012_EU.png 3_3_3_tas_3_ERAI2_SON_4_Below_RelDiag_cal_cross_tas_1981_2012_EU.png +mv 3_3_4_3_ERAI2_DJF_4_Below_RelDiag_cal_cross_tas_1981_2012_EU.png 3_3_4_tas_3_ERAI2_DJF_4_Below_RelDiag_cal_cross_tas_1981_2012_EU.png +mv 3_4_1_4_JRA2_MAM_4_Below_RelDiag_cal_cross_tas_1981_2012_EU.png 3_4_1_tas_4_JRA2_MAM_4_Below_RelDiag_cal_cross_tas_1981_2012_EU.png +mv 3_4_2_4_JRA2_JJA_4_Below_RelDiag_cal_cross_tas_1981_2012_EU.png 3_4_2_tas_4_JRA2_JJA_4_Below_RelDiag_cal_cross_tas_1981_2012_EU.png +mv 3_4_3_4_JRA2_SON_4_Below_RelDiag_cal_cross_tas_1981_2012_EU.png 3_4_3_tas_4_JRA2_SON_4_Below_RelDiag_cal_cross_tas_1981_2012_EU.png +mv 3_4_4_4_JRA2_DJF_4_Below_RelDiag_cal_cross_tas_1981_2012_EU.png 3_4_4_tas_4_JRA2_DJF_4_Below_RelDiag_cal_cross_tas_1981_2012_EU.png + + +mv 1_1_1_1_ERAI_MAM_5_Above_RelDiag_cal_nocross_tas_1991_2012_EU.png 1_1_1_tas_1_ERAI_MAM_5_Above_RelDiag_cal_nocross_tas_1991_2012_EU.png +mv 1_1_2_1_ERAI_JJA_5_Above_RelDiag_cal_nocross_tas_1991_2012_EU.png 1_1_2_tas_1_ERAI_JJA_5_Above_RelDiag_cal_nocross_tas_1991_2012_EU.png +mv 1_1_3_1_ERAI_SON_5_Above_RelDiag_cal_nocross_tas_1991_2012_EU.png 1_1_3_tas_1_ERAI_SON_5_Above_RelDiag_cal_nocross_tas_1991_2012_EU.png +mv 1_1_4_1_ERAI_DJF_5_Above_RelDiag_cal_nocross_tas_1991_2012_EU.png 1_1_4_tas_1_ERAI_DJF_5_Above_RelDiag_cal_nocross_tas_1991_2012_EU.png +mv 1_2_1_2_JRA_MAM_5_Above_RelDiag_cal_nocross_tas_1991_2012_EU.png 1_2_1_tas_2_JRA_MAM_5_Above_RelDiag_cal_nocross_tas_1991_2012_EU.png +mv 1_2_2_2_JRA_JJA_5_Above_RelDiag_cal_nocross_tas_1991_2012_EU.png 1_2_2_tas_2_JRA_JJA_5_Above_RelDiag_cal_nocross_tas_1991_2012_EU.png +mv 1_2_3_2_JRA_SON_5_Above_RelDiag_cal_nocross_tas_1991_2012_EU.png 1_2_3_tas_2_JRA_SON_5_Above_RelDiag_cal_nocross_tas_1991_2012_EU.png +mv 1_2_4_2_JRA_DJF_5_Above_RelDiag_cal_nocross_tas_1991_2012_EU.png 1_2_4_tas_2_JRA_DJF_5_Above_RelDiag_cal_nocross_tas_1991_2012_EU.png +mv 1_3_1_3_ERAI2_MAM_5_Above_RelDiag_cal_nocross_tas_1981_2012_EU.png 1_3_1_tas_3_ERAI2_MAM_5_Above_RelDiag_cal_nocross_tas_1981_2012_EU.png +mv 1_3_2_3_ERAI2_JJA_5_Above_RelDiag_cal_nocross_tas_1981_2012_EU.png 1_3_2_tas_3_ERAI2_JJA_5_Above_RelDiag_cal_nocross_tas_1981_2012_EU.png +mv 1_3_3_3_ERAI2_SON_5_Above_RelDiag_cal_nocross_tas_1981_2012_EU.png 1_3_3_tas_3_ERAI2_SON_5_Above_RelDiag_cal_nocross_tas_1981_2012_EU.png +mv 1_3_4_3_ERAI2_DJF_5_Above_RelDiag_cal_nocross_tas_1981_2012_EU.png 1_3_4_tas_3_ERAI2_DJF_5_Above_RelDiag_cal_nocross_tas_1981_2012_EU.png +mv 1_4_1_4_JRA2_MAM_5_Above_RelDiag_cal_nocross_tas_1981_2012_EU.png 1_4_1_tas_4_JRA2_MAM_5_Above_RelDiag_cal_nocross_tas_1981_2012_EU.png +mv 1_4_2_4_JRA2_JJA_5_Above_RelDiag_cal_nocross_tas_1981_2012_EU.png 1_4_2_tas_4_JRA2_JJA_5_Above_RelDiag_cal_nocross_tas_1981_2012_EU.png +mv 1_4_3_4_JRA2_SON_5_Above_RelDiag_cal_nocross_tas_1981_2012_EU.png 1_4_3_tas_4_JRA2_SON_5_Above_RelDiag_cal_nocross_tas_1981_2012_EU.png +mv 1_4_4_4_JRA2_DJF_5_Above_RelDiag_cal_nocross_tas_1981_2012_EU.png 1_4_4_tas_4_JRA2_DJF_5_Above_RelDiag_cal_nocross_tas_1981_2012_EU.png +mv 2_1_1_1_ERAI_MAM_5_Near_RelDiag_cal_nocross_tas_1991_2012_EU.png 2_1_1_tas_1_ERAI_MAM_5_Near_RelDiag_cal_nocross_tas_1991_2012_EU.png +mv 2_1_2_1_ERAI_JJA_5_Near_RelDiag_cal_nocross_tas_1991_2012_EU.png 2_1_2_tas_1_ERAI_JJA_5_Near_RelDiag_cal_nocross_tas_1991_2012_EU.png +mv 2_1_3_1_ERAI_SON_5_Near_RelDiag_cal_nocross_tas_1991_2012_EU.png 2_1_3_tas_1_ERAI_SON_5_Near_RelDiag_cal_nocross_tas_1991_2012_EU.png +mv 2_1_4_1_ERAI_DJF_5_Near_RelDiag_cal_nocross_tas_1991_2012_EU.png 2_1_4_tas_1_ERAI_DJF_5_Near_RelDiag_cal_nocross_tas_1991_2012_EU.png +mv 2_2_1_2_JRA_MAM_5_Near_RelDiag_cal_nocross_tas_1991_2012_EU.png 2_2_1_tas_2_JRA_MAM_5_Near_RelDiag_cal_nocross_tas_1991_2012_EU.png +mv 2_2_2_2_JRA_JJA_5_Near_RelDiag_cal_nocross_tas_1991_2012_EU.png 2_2_2_tas_2_JRA_JJA_5_Near_RelDiag_cal_nocross_tas_1991_2012_EU.png +mv 2_2_3_2_JRA_SON_5_Near_RelDiag_cal_nocross_tas_1991_2012_EU.png 2_2_3_tas_2_JRA_SON_5_Near_RelDiag_cal_nocross_tas_1991_2012_EU.png +mv 2_2_4_2_JRA_DJF_5_Near_RelDiag_cal_nocross_tas_1991_2012_EU.png 2_2_4_tas_2_JRA_DJF_5_Near_RelDiag_cal_nocross_tas_1991_2012_EU.png +mv 2_3_1_3_ERAI2_MAM_5_Near_RelDiag_cal_nocross_tas_1981_2012_EU.png 2_3_1_tas_3_ERAI2_MAM_5_Near_RelDiag_cal_nocross_tas_1981_2012_EU.png +mv 2_3_2_3_ERAI2_JJA_5_Near_RelDiag_cal_nocross_tas_1981_2012_EU.png 2_3_2_tas_3_ERAI2_JJA_5_Near_RelDiag_cal_nocross_tas_1981_2012_EU.png +mv 2_3_3_3_ERAI2_SON_5_Near_RelDiag_cal_nocross_tas_1981_2012_EU.png 2_3_3_tas_3_ERAI2_SON_5_Near_RelDiag_cal_nocross_tas_1981_2012_EU.png +mv 2_3_4_3_ERAI2_DJF_5_Near_RelDiag_cal_nocross_tas_1981_2012_EU.png 2_3_4_tas_3_ERAI2_DJF_5_Near_RelDiag_cal_nocross_tas_1981_2012_EU.png +mv 2_4_1_4_JRA2_MAM_5_Near_RelDiag_cal_nocross_tas_1981_2012_EU.png 2_4_1_tas_4_JRA2_MAM_5_Near_RelDiag_cal_nocross_tas_1981_2012_EU.png +mv 2_4_2_4_JRA2_JJA_5_Near_RelDiag_cal_nocross_tas_1981_2012_EU.png 2_4_2_tas_4_JRA2_JJA_5_Near_RelDiag_cal_nocross_tas_1981_2012_EU.png +mv 2_4_3_4_JRA2_SON_5_Near_RelDiag_cal_nocross_tas_1981_2012_EU.png 2_4_3_tas_4_JRA2_SON_5_Near_RelDiag_cal_nocross_tas_1981_2012_EU.png +mv 2_4_4_4_JRA2_DJF_5_Near_RelDiag_cal_nocross_tas_1981_2012_EU.png 2_4_4_tas_4_JRA2_DJF_5_Near_RelDiag_cal_nocross_tas_1981_2012_EU.png +mv 3_1_1_1_ERAI_MAM_5_Below_RelDiag_cal_nocross_tas_1991_2012_EU.png 3_1_1_tas_1_ERAI_MAM_5_Below_RelDiag_cal_nocross_tas_1991_2012_EU.png +mv 3_1_2_1_ERAI_JJA_5_Below_RelDiag_cal_nocross_tas_1991_2012_EU.png 3_1_2_tas_1_ERAI_JJA_5_Below_RelDiag_cal_nocross_tas_1991_2012_EU.png +mv 3_1_3_1_ERAI_SON_5_Below_RelDiag_cal_nocross_tas_1991_2012_EU.png 3_1_3_tas_1_ERAI_SON_5_Below_RelDiag_cal_nocross_tas_1991_2012_EU.png +mv 3_1_4_1_ERAI_DJF_5_Below_RelDiag_cal_nocross_tas_1991_2012_EU.png 3_1_4_tas_1_ERAI_DJF_5_Below_RelDiag_cal_nocross_tas_1991_2012_EU.png +mv 3_2_1_2_JRA_MAM_5_Below_RelDiag_cal_nocross_tas_1991_2012_EU.png 3_2_1_tas_2_JRA_MAM_5_Below_RelDiag_cal_nocross_tas_1991_2012_EU.png +mv 3_2_2_2_JRA_JJA_5_Below_RelDiag_cal_nocross_tas_1991_2012_EU.png 3_2_2_tas_2_JRA_JJA_5_Below_RelDiag_cal_nocross_tas_1991_2012_EU.png +mv 3_2_3_2_JRA_SON_5_Below_RelDiag_cal_nocross_tas_1991_2012_EU.png 3_2_3_tas_2_JRA_SON_5_Below_RelDiag_cal_nocross_tas_1991_2012_EU.png +mv 3_2_4_2_JRA_DJF_5_Below_RelDiag_cal_nocross_tas_1991_2012_EU.png 3_2_4_tas_2_JRA_DJF_5_Below_RelDiag_cal_nocross_tas_1991_2012_EU.png +mv 3_3_1_3_ERAI2_MAM_5_Below_RelDiag_cal_nocross_tas_1981_2012_EU.png 3_3_1_tas_3_ERAI2_MAM_5_Below_RelDiag_cal_nocross_tas_1981_2012_EU.png +mv 3_3_2_3_ERAI2_JJA_5_Below_RelDiag_cal_nocross_tas_1981_2012_EU.png 3_3_2_tas_3_ERAI2_JJA_5_Below_RelDiag_cal_nocross_tas_1981_2012_EU.png +mv 3_3_3_3_ERAI2_SON_5_Below_RelDiag_cal_nocross_tas_1981_2012_EU.png 3_3_3_tas_3_ERAI2_SON_5_Below_RelDiag_cal_nocross_tas_1981_2012_EU.png +mv 3_3_4_3_ERAI2_DJF_5_Below_RelDiag_cal_nocross_tas_1981_2012_EU.png 3_3_4_tas_3_ERAI2_DJF_5_Below_RelDiag_cal_nocross_tas_1981_2012_EU.png +mv 3_4_1_4_JRA2_MAM_5_Below_RelDiag_cal_nocross_tas_1981_2012_EU.png 3_4_1_tas_4_JRA2_MAM_5_Below_RelDiag_cal_nocross_tas_1981_2012_EU.png +mv 3_4_2_4_JRA2_JJA_5_Below_RelDiag_cal_nocross_tas_1981_2012_EU.png 3_4_2_tas_4_JRA2_JJA_5_Below_RelDiag_cal_nocross_tas_1981_2012_EU.png +mv 3_4_3_4_JRA2_SON_5_Below_RelDiag_cal_nocross_tas_1981_2012_EU.png 3_4_3_tas_4_JRA2_SON_5_Below_RelDiag_cal_nocross_tas_1981_2012_EU.png +mv 3_4_4_4_JRA2_DJF_5_Below_RelDiag_cal_nocross_tas_1981_2012_EU.png 3_4_4_tas_4_JRA2_DJF_5_Below_RelDiag_cal_nocross_tas_1981_2012_EU.png + +###################################################################### +# Reliability diagram composition # +###################################################################### + +var=sfcWind #sfcWind tas + +for region in GL #nEU swEU seEU #EU #AF #eNA mNA wNA # NA #eCA wCA #eSA sSA wSA # SA #IN nEA sEA # EA #eAU wAU #AU #GL #NH #SH #TR +do +echo region: $region + +for bias in raw sbc cal +do +echo bias: $bias + +case "$var" in +"sfcWind") + var2=sfcwind + var_title1="10m" + var_title2="wind speed" ;; +"tas") + var2=tas + var_title1="2m" + var_title2="temperature";; +esac + +case "$bias" in +"raw") + bias_suffix=rawdata + bias_text=none + bias_num=1 ;; +"sbc") + bias_suffix=sbc_cross + bias_text='simple_bias_correction_with_cross_validation' + bias_num=2 ;; +"cal") + bias_suffix=cal_cross + bias_text='calibration_with_cross_validation' + bias_num=4 ;; +esac + +case "$region" in +"nEU") + region_name='northern_Europe' + domain='[15W-45E,45N-75N]';; +"swEU") + region_name='southwestern_Europe' + domain='[15W-20E,35N-45N]';; +"seEU") + region_name='southeastern_Europe' + domain='[20W-45E,45N-75N]';; +"AF") + region_name='Africa' + domain='[20W-55E,35S-40N]';; +"eNA") + region_name='eastern_North_America' + domain='[86W-60W,30N-50N]';; +"mNA") + region_name='middle_North_America' + domain='[110W-85W,30N-50N]';; +"wNA") + region_name='western_North_America' + domain='[130W-110W,30N-50N]';; +"NA") + region_name='North_America' + domain='[170W-50W,10N-75N]';; +"eCA") + region_name='eastern_Central_America' + domain='[80W-60W,12.5N-30N]';; +"wCA") + region_name='western_Central_America' + domain='[110W-80W,10N-30N]';; +"eSA") + region_name='eastern_South_America' + domain='[45W-30W,25S-0S]';; +"sSA") + region_name='southern_South_America' + domain='[65W-45W,45S-25S]';; +"wSA") + region_name='western_South_America' + domain='[80W-65W,35S-0S]';; +"SA") + region_name='South_America' + domain='[90W-30W,60S-10N]';; +"IN") + region_name='India' + domain='[65E-85E,5N-35N]';; +"nEA") + region_name='northern_East_Asia' + domain='[95E-145E,35N-50N]';; +"sEA") + region_name='southern_East_Asia' + domain='[95E-145E,20N-35N]';; +"EA") + region_name='East_Asia' + domain='[90E-150E,20N-50N]';; +"eAU") + region_name='eastern_Australia' + domain='[135E-180E,50S-25S]';; +"wAU") + region_name='western_Australia' + domain='[110E-135E,40S-20S]';; +"AU") + region_name='Australia' + domain='[110E-180E,50S-0S]';; +"GL") + region_name='Globe' + domain='[180W-180E,90S-90N]';; +"NH") + region_name='Northern_Hemisphere' + domain='[180W-180E,20N-90N]';; +"SH") + region_name='Southern_Hemisphere' + domain='[180W-180E,90S-20S]';; +"TR") + region_name='Tropics' + domain='[180W-180E,20S-20N]';; +esac + + +for rean in ERAI JRA ERAI2 JRA2 +do +echo rean: $rean + +case "$rean" in +"ERAI") + rean_name=ERA-Interim + rean_dir=ERAI + rean_num=1 + year=1991 ;; +"JRA") + rean_name=JRA-55 + rean_dir=JRA + rean_num=2 + year=1991 ;; +"ERAI2") + rean_name=ERA-Interim + rean_dir=ERAI2 + rean_num=3 + year=1981 ;; +"JRA2") + rean_name=JRA-55 + rean_dir=JRA2 + rean_num=4 + year=1981;; +esac + +# only cut title and caption from single figures: +#~/scripts/fig2catalog.sh -l -m 300 -r 160 1_${rean_num}_1_${var}_${rean_num}_${rean}_MAM_${bias_num}_Above_RelDiag_${bias_suffix}_${var2}_${year}_2012_${region}.png trash2.png +#~/scripts/fig2catalog.sh -l -m 300 -r 160 1_${rean_num}_2_${var}_${rean_num}_${rean}_JJA_${bias_num}_Above_RelDiag_${bias_suffix}_${var2}_${year}_2012_${region}.png trash3.png +#~/scripts/fig2catalog.sh -l -m 300 -r 160 1_${rean_num}_3_${var}_${rean_num}_${rean}_SON_${bias_num}_Above_RelDiag_${bias_suffix}_${var2}_${year}_2012_${region}.png trash4.png +#~/scripts/fig2catalog.sh -l -m 300 -r 160 1_${rean_num}_4_${var}_${rean_num}_${rean}_DJF_${bias_num}_Above_RelDiag_${bias_suffix}_${var2}_${year}_2012_${region}.png trash5.png + +echo tercile: Above + +~/scripts/fig2catalog.sh -l -m 300 -r 160 ./${rean_dir}/MAM/${bias_num}_Above_RelDiag_${bias_suffix}_${var2}_${year}_2012_${region}.png trash2.png +~/scripts/fig2catalog.sh -l -m 300 -r 160 ./${rean_dir}/JJA/${bias_num}_Above_RelDiag_${bias_suffix}_${var2}_${year}_2012_${region}.png trash3.png +~/scripts/fig2catalog.sh -l -m 300 -r 160 ./${rean_dir}/SON/${bias_num}_Above_RelDiag_${bias_suffix}_${var2}_${year}_2012_${region}.png trash4.png +~/scripts/fig2catalog.sh -l -m 300 -r 160 ./${rean_dir}/DJF/${bias_num}_Above_RelDiag_${bias_suffix}_${var2}_${year}_2012_${region}.png trash5.png +height_figure=$(identify -ping -format %h trash2.png) + +#convert -background white -size ${height_figure}x -pointsize 50 -gravity center label:"Above Normal" trash1.png +convert -background white -size 500x1800 -pointsize 150 -gravity center label:"Above \nNormal" trash1.png + +montage trash1.png trash2.png trash3.png trash4.png trash5.png -tile 5x1 -geometry +0+0 trash6.png + +# remove a bug that makes the new text repeat twice: +~/scripts/fig2catalog.sh -l -m 30 trash6.png trash6.png + +# repeat with next tercile: +echo tercile: Near + +# only cut title and caption from single figures: +~/scripts/fig2catalog.sh -l -m 300 -r 160 ./${rean_dir}/MAM/${bias_num}_Near_RelDiag_${bias_suffix}_${var2}_${year}_2012_${region}.png trash8.png +~/scripts/fig2catalog.sh -l -m 300 -r 160 ./${rean_dir}/JJA/${bias_num}_Near_RelDiag_${bias_suffix}_${var2}_${year}_2012_${region}.png trash9.png +~/scripts/fig2catalog.sh -l -m 300 -r 160 ./${rean_dir}/SON/${bias_num}_Near_RelDiag_${bias_suffix}_${var2}_${year}_2012_${region}.png trash10.png +~/scripts/fig2catalog.sh -l -m 300 -r 160 ./${rean_dir}/DJF/${bias_num}_Near_RelDiag_${bias_suffix}_${var2}_${year}_2012_${region}.png trash11.png + +convert -background white -size 500x1800 -pointsize 150 -gravity center label:"Near \nNormal" trash7.png +montage trash7.png trash8.png trash9.png trash10.png trash11.png -tile 5x1 -geometry +0+0 trash12.png +~/scripts/fig2catalog.sh -l -m 30 trash12.png trash12.png + +# repeat with next tercile: +echo tercile: Below + +# only cut title and caption from single figures: +#~/scripts/fig2catalog.sh -l -m 300 -r 160 3_${rean_num}_1_${var}_${rean_num}_${rean}_MAM_${bias_num}_Below_RelDiag_${bias_suffix}_${var2}_${year}_2012_${region}.png trash14.png +#~/scripts/fig2catalog.sh -l -m 300 -r 160 3_${rean_num}_2_${var}_${rean_num}_${rean}_JJA_${bias_num}_Below_RelDiag_${bias_suffix}_${var2}_${year}_2012_${region}.png trash15.png +#~/scripts/fig2catalog.sh -l -m 300 -r 160 3_${rean_num}_3_${var}_${rean_num}_${rean}_SON_${bias_num}_Below_RelDiag_${bias_suffix}_${var2}_${year}_2012_${region}.png trash16.png +#~/scripts/fig2catalog.sh -l -m 300 -r 160 3_${rean_num}_4_${var}_${rean_num}_${rean}_DJF_${bias_num}_Below_RelDiag_${bias_suffix}_${var2}_${year}_2012_${region}.png trash17.png +~/scripts/fig2catalog.sh -l -m 300 -r 160 ./${rean_dir}/MAM/${bias_num}_Below_RelDiag_${bias_suffix}_${var2}_${year}_2012_${region}.png trash14.png +~/scripts/fig2catalog.sh -l -m 300 -r 160 ./${rean_dir}/JJA/${bias_num}_Below_RelDiag_${bias_suffix}_${var2}_${year}_2012_${region}.png trash15.png +~/scripts/fig2catalog.sh -l -m 300 -r 160 ./${rean_dir}/SON/${bias_num}_Below_RelDiag_${bias_suffix}_${var2}_${year}_2012_${region}.png trash16.png +~/scripts/fig2catalog.sh -l -m 300 -r 160 ./${rean_dir}/DJF/${bias_num}_Below_RelDiag_${bias_suffix}_${var2}_${year}_2012_${region}.png trash17.png + +convert -background white -size 500x1800 -pointsize 150 -gravity center label:"Below \nNormal" trash13.png +montage trash13.png trash14.png trash15.png trash16.png trash17.png -tile 5x1 -geometry +0+0 trash18.png +~/scripts/fig2catalog.sh -l -m 30 trash18.png trash18.png + +montage trash6.png trash12.png trash18.png -tile 1x3 -geometry +0+0 trash19.png + +width_figure=$(identify -ping -format %w trash19.png) +convert -background white -size 400x200 -pointsize 150 -gravity center label:" " trash20.png +convert -background white -size 2380x200 -pointsize 150 -gravity center label:"MAM" trash21.png +convert -background white -size 2380x200 -pointsize 150 -gravity center label:"JJA" trash22.png +convert -background white -size 2380x200 -pointsize 150 -gravity center label:"SON" trash23.png +convert -background white -size 2380x200 -pointsize 150 -gravity center label:"DJF" trash24.png + +montage trash20.png trash21.png trash22.png trash23.png trash24.png -tile 5x1 -geometry +0+0 trash25.png +~/scripts/fig2catalog.sh -l -m 30 trash25.png trash25.png + +montage trash25.png trash19.png -tile 1x2 -geometry +0+0 trash26.png + +~/scripts/fig2catalog.sh -s 0.9 -p 100 -x 50 -t 'Multiple Systems / '${var}' / Reliability Diagram \nAll seasons / '${year}'-2012' -c 'Start dates: 1st February (MAM), 1st May (JJA), 1st August (SON) or 1st November (DJF)\nReference dataset: '${rean_name}'\nBias correction: '${bias_text}' \nRegion: '${region_name}' '${domain}'\nMask: sea depth below 50m' trash26.png composition_${var}_${rean}_${bias_suffix}_${year}_${region}.png + +~/scripts/fig2catalog.sh -s 0.9 -r 450 -m 760 -t 'Multiple Systems / '${var_title1}' '${var_title2}' / Reliability Diagram \nAll seasons / '${year}'-2012' -c 'Start dates: 1st February (MAM), 1st May (JJA), 1st August (SON) or 1st November (DJF)\nReference dataset: '${rean_name}'\nBias correction: '${bias_text}' \nRegion: '${region_name}' '${domain}'\nMask: sea depth below 50m' trash26.png composition_${var}_${rean}_${bias_suffix}_${year}_${region}.png + + +rm trash*.* + +done +done +done + + + + + + + + + + +# Fix titles of temperature rel.diagrams which was still with 10m wind speed instead of temperature: + +var='tas' + +for region in GL NH SH TR EU AF NA SA EA AU +do + +for bias in raw sbc cal +do + +case "$var" in +"sfcWind") + var2=sfcwind + var_title1="10m" + var_title2="wind speed" ;; + +"tas") + var2=tas + var_title1="2m" + var_title2="temperature";; +esac + +case "$bias" in +"raw") + bias_suffix=rawdata + bias_text=none + bias_num=1 ;; +"sbc") + bias_suffix=sbc_cross + bias_text='simple_bias_correction_with_cross_validation' + bias_num=2 ;; +"cal") + bias_suffix=cal_cross + bias_text='calibration_with_cross_validation' + bias_num=4 ;; +esac + +case "$region" in +"nEU") + region_name='northern_Europe' + domain='[15W-45E,45N-75N]';; +"swEU") + region_name='southwestern_Europe' + domain='[15W-20E,35N-45N]';; +"seEU") + region_name='southeastern_Europe' + domain='[20W-45E,45N-75N]';; +"AF") + region_name='Africa' + domain='[20W-55E,35S-40N]';; +"eNA") + region_name='eastern_North_America' + domain='[86W-60W,30N-50N]';; +"mNA") + region_name='middle_North_America' + domain='[110W-85W,30N-50N]';; +"wNA") + region_name='western_North_America' + domain='[130W-110W,30N-50N]';; +"NA") + region_name='North_America' + domain='[170W-50W,10N-75N]';; +"eCA") + region_name='eastern_Central_America' + domain='[80W-60W,12.5N-30N]';; +"wCA") + region_name='western_Central_America' + domain='[110W-80W,10N-30N]';; +"eSA") + region_name='eastern_South_America' + domain='[45W-30W,25S-0S]';; +"sSA") + region_name='southern_South_America' + domain='[65W-45W,45S-25S]';; +"wSA") + region_name='western_South_America' + domain='[80W-65W,35S-0S]';; +"SA") + region_name='South_America' + domain='[90W-30W,60S-10N]';; +"IN") + region_name='India' + domain='[65E-85E,5N-35N]';; +"nEA") + region_name='northern_East_Asia' + domain='[95E-145E,35N-50N]';; +"sEA") + region_name='southern_East_Asia' + domain='[95E-145E,20N-35N]';; +"EA") + region_name='East_Asia' + domain='[90E-150E,20N-50N]';; +"eAU") + region_name='eastern_Australia' + domain='[135E-180E,50S-25S]';; +"wAU") + region_name='western_Australia' + domain='[110E-135E,40S-20S]';; +"AU") + region_name='Australia' + domain='[110E-180E,50S-0S]';; +"GL") + region_name='Globe' + domain='[180W-180E,90S-90N]';; +"NH") + region_name='Northern_Hemisphere' + domain='[180W-180E,20N-90N]';; +"SH") + region_name='Southern_Hemisphere' + domain='[180W-180E,90S-20S]';; +"TR") + region_name='Tropics' + domain='[180W-180E,20S-20N]';; +esac + +for rean in ERAI JRA ERAI2 JRA2 +do + +case "$rean" in +"ERAI") + rean_name=ERA-Interim + rean_num=1 + year=1991 ;; +"JRA") + rean_name=JRA-55 + rean_num=2 + year=1991 ;; +"ERAI2") + rean_name=ERA-Interim + rean_num=3 + year=1981 ;; +"JRA2") + rean_name=JRA-55 + rean_num=4 + year=1981;; +esac + +~/scripts/fig2catalog.sh -s 0.9 -r 450 -m 760 -t 'Multiple Systems / '${var_title1}' '${var_title2}' / Reliability Diagram \nAll seasons / '${year}'-2012' -c 'Start dates: 1st February (MAM), 1st May (JJA), 1st August (SON) or 1st November (DJF)\nReference dataset: '${rean_name}'\nBias correction: '${bias_text}' \nRegion: '${region_name}' '${domain}'\nMask: sea depth below 50m' composition_${var}_${rean}_${bias_suffix}_${year}_${region}.png composition_${var}_${rean}_${bias_suffix}_${year}_${region}_fixed.png + +done +done +done + + + + + + diff --git a/weather_regimes_titles_catalogue.sh~ b/weather_regimes_titles_catalogue.sh~ new file mode 100644 index 0000000000000000000000000000000000000000..242171e179b38dee9bc04e1945b584e960901f73 --- /dev/null +++ b/weather_regimes_titles_catalogue.sh~ @@ -0,0 +1,2050 @@ +################################################################################### +# Single images # +################################################################################### + +# regime anomalies: +sh ~/scripts/fig2catalog.sh -t 'ERA-Interim / sea level pressure / NAO+ anomalies\nDJF / 1979-2013' ERA-Interim_Winter_NAO+_anomalies.png ./formatted/ERA-Interim_Winter_NAO+_anomalies.png +sh ~/scripts/fig2catalog.sh -t 'ERA-Interim / sea level pressure / NAO- anomalies\nDJF / 1979-2013' ERA-Interim_Winter_NAO-_anomalies.png ./formatted/ERA-Interim_Winter_NAO-_anomalies.png +sh ~/scripts/fig2catalog.sh -t 'ERA-Interim / sea level pressure / Blocking anomalies\nDJF / 1979-2013' ERA-Interim_Winter_blocking_anomalies.png ./formatted/ERA-Interim_Winter_blocking_anomalies.png +sh ~/scripts/fig2catalog.sh -t 'ERA-Interim / sea level pressure / Atlantic ridge anomalies\nDJF / 1979-2013' ERA-Interim_Winter_atlantic_anomalies.png ./formatted/ERA-Interim_Winter_atlantic_anomalies.png + +sh ~/scripts/fig2catalog.sh -t 'ERA-Interim / sea level pressure / NAO+ anomalies\nMAM / 1979-2013' ERA-Interim_Spring_NAO+_anomalies.png ./formatted/ERA-Interim_Spring_NAO+_anomalies.png +sh ~/scripts/fig2catalog.sh -t 'ERA-Interim / sea level pressure / NAO- anomalies\nMAM / 1979-2013' ERA-Interim_Spring_NAO-_anomalies.png ./formatted/ERA-Interim_Spring_NAO-_anomalies.png +sh ~/scripts/fig2catalog.sh -t 'ERA-Interim / sea level pressure / Blocking anomalies\nMAM / 1979-2013' ERA-Interim_Spring_blocking_anomalies.png ./formatted/ERA-Interim_Spring_blocking_anomalies.png +sh ~/scripts/fig2catalog.sh -t 'ERA-Interim / sea level pressure / Atlantic ridge anomalies\nMAM / 1979-2013' ERA-Interim_Spring_atlantic_anomalies.png ./formatted/ERA-Interim_Spring_atlantic_anomalies.png + +sh ~/scripts/fig2catalog.sh -t 'ERA-Interim / sea level pressure / NAO+ anomalies\nJJA / 1979-2013' ERA-Interim_Summer_NAO+_anomalies.png ./formatted/ERA-Interim_Summer_NAO+_anomalies.png +sh ~/scripts/fig2catalog.sh -t 'ERA-Interim / sea level pressure / NAO- anomalies\nJJA / 1979-2013' ERA-Interim_Summer_NAO-_anomalies.png ./formatted/ERA-Interim_Summer_NAO-_anomalies.png +sh ~/scripts/fig2catalog.sh -t 'ERA-Interim / sea level pressure / Blocking anomalies\nJJA / 1979-2013' ERA-Interim_Summer_blocking_anomalies.png ./formatted/ERA-Interim_Summer_blocking_anomalies.png +sh ~/scripts/fig2catalog.sh -t 'ERA-Interim / sea level pressure / Atlantic ridge anomalies\nJJA / 1979-2013' ERA-Interim_Summer_atlantic_anomalies.png ./formatted/ERA-Interim_Summer_atlantic_anomalies.png + +sh ~/scripts/fig2catalog.sh -t 'ERA-Interim / sea level pressure / NAO+ anomalies\nSON / 1979-2013' ERA-Interim_Autumn_NAO+_anomalies.png ./formatted/ERA-Interim_Autumn_NAO+_anomalies.png +sh ~/scripts/fig2catalog.sh -t 'ERA-Interim / sea level pressure / NAO- anomalies\nSON / 1979-2013' ERA-Interim_Autumn_NAO-_anomalies.png ./formatted/ERA-Interim_Autumn_NAO-_anomalies.png +sh ~/scripts/fig2catalog.sh -t 'ERA-Interim / sea level pressure / Blocking anomalies\nSON / 1979-2013' ERA-Interim_Autumn_blocking_anomalies.png ./formatted/ERA-Interim_Autumn_blocking_anomalies.png +sh ~/scripts/fig2catalog.sh -t 'ERA-Interim / sea level pressure / Atlantic ridge anomalies\nSON / 1979-2013' ERA-Interim_Autumn_atlantic_anomalies.png ./formatted/ERA-Interim_Autumn_atlantic_anomalies.png + + +# regime frequency: + +sh ~/scripts/fig2catalog.sh -t 'ERA-Interim / weather regimes / NAO+ anomalies\nDJF / 1979-2013' ERA-Interim_Winter_NAO+_freq.png ./formatted/ERA-Interim_Winter_NAO+_freq.png +sh ~/scripts/fig2catalog.sh -t 'ERA-Interim / weather regimes / NAO- anomalies\nDJF / 1979-2013' ERA-Interim_Winter_NAO-_freq.png ./formatted/ERA-Interim_Winter_NAO-_freq.png +sh ~/scripts/fig2catalog.sh -t 'ERA-Interim / weather regimes / Blocking anomalies\nDJF / 1979-2013' ERA-Interim_Winter_blocking_freq.png ./formatted/ERA-Interim_Winter_blocking_freq.png +sh ~/scripts/fig2catalog.sh -t 'ERA-Interim / weather regimes / Atlantic ridge anomalies\nDJF / 1979-2013' ERA-Interim_Winter_atlantic_freq.png ./formatted/ERA-Interim_Winter_atlantic_freq.png + +sh ~/scripts/fig2catalog.sh -t 'ERA-Interim / weather regimes / NAO+ anomalies\nMAM / 1979-2013' ERA-Interim_Spring_NAO+_freq.png ./formatted/ERA-Interim_Spring_NAO+_freq.png +sh ~/scripts/fig2catalog.sh -t 'ERA-Interim / weather regimes / NAO- anomalies\nMAM / 1979-2013' ERA-Interim_Spring_NAO-_freq.png ./formatted/ERA-Interim_Spring_NAO-_freq.png +sh ~/scripts/fig2catalog.sh -t 'ERA-Interim / weather regimes / Blocking anomalies\nMAM / 1979-2013' ERA-Interim_Spring_blocking_freq.png ./formatted/ERA-Interim_Spring_blocking_freq.png +sh ~/scripts/fig2catalog.sh -t 'ERA-Interim / weather regimes / Atlantic ridge anomalies\nMAM / 1979-2013' ERA-Interim_Spring_atlantic_freq.png ./formatted/ERA-Interim_Spring_atlantic_freq.png + +sh ~/scripts/fig2catalog.sh -t 'ERA-Interim / weather regimes / NAO+ anomalies\nJJA / 1979-2013' ERA-Interim_Summer_NAO+_freq.png ./formatted/ERA-Interim_Summer_NAO+_freq.png +sh ~/scripts/fig2catalog.sh -t 'ERA-Interim / weather regimes / NAO- anomalies\nJJA / 1979-2013' ERA-Interim_Summer_NAO-_freq.png ./formatted/ERA-Interim_Summer_NAO-_freq.png +sh ~/scripts/fig2catalog.sh -t 'ERA-Interim / weather regimes / Blocking anomalies\nJJA / 1979-2013' ERA-Interim_Summer_blocking_freq.png ./formatted/ERA-Interim_Summer_blocking_freq.png +sh ~/scripts/fig2catalog.sh -t 'ERA-Interim / weather regimes / Atlantic ridge anomalies\nJJA / 1979-2013' ERA-Interim_Summer_atlantic_freq.png ./formatted/ERA-Interim_Summer_atlantic_freq.png + +sh ~/scripts/fig2catalog.sh -t 'ERA-Interim / weather regimes / NAO+ anomalies\nSON / 1979-2013' ERA-Interim_Autumn_NAO+_freq.png ./formatted/ERA-Interim_Autumn_NAO+_freq.png +sh ~/scripts/fig2catalog.sh -t 'ERA-Interim / weather regimes / NAO- anomalies\nSON / 1979-2013' ERA-Interim_Autumn_NAO-_freq.png ./formatted/ERA-Interim_Autumn_NAO-_freq.png +sh ~/scripts/fig2catalog.sh -t 'ERA-Interim / weather regimes / Blocking anomalies\nSON / 1979-2013' ERA-Interim_Autumn_blocking_freq.png ./formatted/ERA-Interim_Autumn_blocking_freq.png +sh ~/scripts/fig2catalog.sh -t 'ERA-Interim / weather regimes / Atlantic ridge anomalies\nSON / 1979-2013' ERA-Interim_Autumn_atlantic_freq.png ./formatted/ERA-Interim_Autumn_atlantic_freq.png + + +# impact on sfcWind: + +sh ~/scripts/fig2catalog.sh -t 'ERA-Interim / 10m wind speed / NAO+ impact\nDJF / 1979-2013' ERA-Interim_Winter_sfcWind_NAO+_impact.png ./formatted/ERA-Interim_Winter_sfcWind_NAO+_impact.png +sh ~/scripts/fig2catalog.sh -t 'ERA-Interim / 10m wind speed / NAO- impact\nDJF / 1979-2013' ERA-Interim_Winter_sfcWind_NAO-_impact.png ./formatted/ERA-Interim_Winter_sfcWind_NAO-_impact.png +sh ~/scripts/fig2catalog.sh -t 'ERA-Interim / 10m wind speed / Blocking impact\nDJF / 1979-2013' ERA-Interim_Winter_sfcWind_blocking_impact.png ./formatted/ERA-Interim_Winter_sfcWind_blocking_impact.png +sh ~/scripts/fig2catalog.sh -t 'ERA-Interim / 10m wind speed / Atlantic ridge impact\nDJF / 1979-2013' ERA-Interim_Winter_sfcWind_atlantic_impact.png ./formatted/ERA-Interim_Winter_sfcWind_atlantic_impact.png + +sh ~/scripts/fig2catalog.sh -t 'ERA-Interim / 10m wind speed / NAO+ impact\nMAM / 1979-2013' ERA-Interim_Spring_sfcWind_NAO+_impact.png ./formatted/ERA-Interim_Spring_sfcWind_NAO+_impact.png +sh ~/scripts/fig2catalog.sh -t 'ERA-Interim / 10m wind speed / NAO- impact\nMAM / 1979-2013' ERA-Interim_Spring_sfcWind_NAO-_impact.png ./formatted/ERA-Interim_Spring_sfcWind_NAO-_impact.png +sh ~/scripts/fig2catalog.sh -t 'ERA-Interim / 10m wind speed / Blocking impact\nMAM / 1979-2013' ERA-Interim_Spring_sfcWind_blocking_impact.png ./formatted/ERA-Interim_Spring_sfcWind_blocking_impact.png +sh ~/scripts/fig2catalog.sh -t 'ERA-Interim / 10m wind speed / Atlantic ridge impact\nMAM / 1979-2013' ERA-Interim_Spring_sfcWind_atlantic_impact.png ./formatted/ERA-Interim_Spring_sfcWind_atlantic_impact.png + +sh ~/scripts/fig2catalog.sh -t 'ERA-Interim / 10m wind speed / NAO+ impact\nJJA / 1979-2013' ERA-Interim_Summer_sfcWind_NAO+_impact.png ./formatted/ERA-Interim_Summer_sfcWind_NAO+_impact.png +sh ~/scripts/fig2catalog.sh -t 'ERA-Interim / 10m wind speed / NAO- impact\nJJA / 1979-2013' ERA-Interim_Summer_sfcWind_NAO-_impact.png ./formatted/ERA-Interim_Summer_sfcWind_NAO-_impact.png +sh ~/scripts/fig2catalog.sh -t 'ERA-Interim / 10m wind speed / Blocking impact\nJJA / 1979-2013' ERA-Interim_Summer_sfcWind_blocking_impact.png ./formatted/ERA-Interim_Summer_sfcWind_blocking_impact.png +sh ~/scripts/fig2catalog.sh -t 'ERA-Interim / 10m wind speed / Atlantic ridge impact\nJJA / 1979-2013' ERA-Interim_Summer_sfcWind_atlantic_impact.png ./formatted/ERA-Interim_Summer_sfcWind_atlantic_impact.png + +sh ~/scripts/fig2catalog.sh -t 'ERA-Interim / 10m wind speed / NAO+ impact\nSON / 1979-2013' ERA-Interim_Autumn_sfcWind_NAO+_impact.png ./formatted/ERA-Interim_Autumn_sfcWind_NAO+_impact.png +sh ~/scripts/fig2catalog.sh -t 'ERA-Interim / 10m wind speed / NAO- impact\nSON / 1979-2013' ERA-Interim_Autumn_sfcWind_NAO-_impact.png ./formatted/ERA-Interim_Autumn_sfcWind_NAO-_impact.png +sh ~/scripts/fig2catalog.sh -t 'ERA-Interim / 10m wind speed / Blocking impact\nSON / 1979-2013' ERA-Interim_Autumn_sfcWind_blocking_impact.png ./formatted/ERA-Interim_Autumn_sfcWind_blocking_impact.png +sh ~/scripts/fig2catalog.sh -t 'ERA-Interim / 10m wind speed / Atlantic ridge impact\nSON / 1979-2013' ERA-Interim_Autumn_sfcWind_atlantic_impact.png ./formatted/ERA-Interim_Autumn_sfcWind_atlantic_impact.png + +# impact on tas: + +sh ~/scripts/fig2catalog.sh -t 'ERA-Interim / 2m temperature / NAO+ impact\nDJF / 1979-2013' ERA-Interim_Winter_tas_NAO+_impact.png ./formatted/ERA-Interim_Winter_tas_NAO+_impact.png +sh ~/scripts/fig2catalog.sh -t 'ERA-Interim / 2m temperature / NAO- impact\nDJF / 1979-2013' ERA-Interim_Winter_tas_NAO-_impact.png ./formatted/ERA-Interim_Winter_tas_NAO-_impact.png +sh ~/scripts/fig2catalog.sh -t 'ERA-Interim / 2m temperature / Blocking impact\nDJF / 1979-2013' ERA-Interim_Winter_tas_blocking_impact.png ./formatted/ERA-Interim_Winter_tas_blocking_impact.png +sh ~/scripts/fig2catalog.sh -t 'ERA-Interim / 2m temperature / Atlantic ridge impact\nDJF / 1979-2013' ERA-Interim_Winter_tas_atlantic_impact.png ./formatted/ERA-Interim_Winter_tas_atlantic_impact.png + +sh ~/scripts/fig2catalog.sh -t 'ERA-Interim / 2m temperature / NAO+ impact\nMAM / 1979-2013' ERA-Interim_Spring_tas_NAO+_impact.png ./formatted/ERA-Interim_Spring_tas_NAO+_impact.png +sh ~/scripts/fig2catalog.sh -t 'ERA-Interim / 2m temperature / NAO- impact\nMAM / 1979-2013' ERA-Interim_Spring_tas_NAO-_impact.png ./formatted/ERA-Interim_Spring_tas_NAO-_impact.png +sh ~/scripts/fig2catalog.sh -t 'ERA-Interim / 2m temperature / Blocking impact\nMAM / 1979-2013' ERA-Interim_Spring_tas_blocking_impact.png ./formatted/ERA-Interim_Spring_tas_blocking_impact.png +sh ~/scripts/fig2catalog.sh -t 'ERA-Interim / 2m temperature / Atlantic ridge impact\nMAM / 1979-2013' ERA-Interim_Spring_tas_atlantic_impact.png ./formatted/ERA-Interim_Spring_tas_atlantic_impact.png + +sh ~/scripts/fig2catalog.sh -t 'ERA-Interim / 2m temperature / NAO+ impact\nJJA / 1979-2013' ERA-Interim_Summer_tas_NAO+_impact.png ./formatted/ERA-Interim_Summer_tas_NAO+_impact.png +sh ~/scripts/fig2catalog.sh -t 'ERA-Interim / 2m temperature / NAO- impact\nJJA / 1979-2013' ERA-Interim_Summer_tas_NAO-_impact.png ./formatted/ERA-Interim_Summer_tas_NAO-_impact.png +sh ~/scripts/fig2catalog.sh -t 'ERA-Interim / 2m temperature / Blocking impact\nJJA / 1979-2013' ERA-Interim_Summer_tas_blocking_impact.png ./formatted/ERA-Interim_Summer_tas_blocking_impact.png +sh ~/scripts/fig2catalog.sh -t 'ERA-Interim / 2m temperature / Atlantic ridge impact\nJJA / 1979-2013' ERA-Interim_Summer_tas_atlantic_impact.png ./formatted/ERA-Interim_Summer_tas_atlantic_impact.png + +sh ~/scripts/fig2catalog.sh -t 'ERA-Interim / 2m temperature / NAO+ impact\nSON / 1979-2013' ERA-Interim_Autumn_tas_NAO+_impact.png ./formatted/ERA-Interim_Autumn_tas_NAO+_impact.png +sh ~/scripts/fig2catalog.sh -t 'ERA-Interim / 2m temperature / NAO- impact\nSON / 1979-2013' ERA-Interim_Autumn_tas_NAO-_impact.png ./formatted/ERA-Interim_Autumn_tas_NAO-_impact.png +sh ~/scripts/fig2catalog.sh -t 'ERA-Interim / 2m temperature / Blocking impact\nSON / 1979-2013' ERA-Interim_Autumn_tas_blocking_impact.png ./formatted/ERA-Interim_Autumn_tas_blocking_impact.png +sh ~/scripts/fig2catalog.sh -t 'ERA-Interim / 2m temperature / Atlantic ridge impact\nSON / 1979-2013' ERA-Interim_Autumn_tas_atlantic_impact.png ./formatted/ERA-Interim_Autumn_tas_atlantic_impact.png + + +############################################################# +# JRA-55 # +############################################################# + +# regime anomalies: + +sh ~/scripts/fig2catalog.sh -t 'JRA-55 / sea level pressure / NAO+ anomalies\nDJF / 1979-2013' JRA-55_Winter_NAO+_anomalies.png ./formatted/JRA-55_Winter_NAO+_anomalies.png +sh ~/scripts/fig2catalog.sh -t 'JRA-55 / sea level pressure / NAO- anomalies\nDJF / 1979-2013' JRA-55_Winter_NAO-_anomalies.png ./formatted/JRA-55_Winter_NAO-_anomalies.png +sh ~/scripts/fig2catalog.sh -t 'JRA-55 / sea level pressure / Blocking anomalies\nDJF / 1979-2013' JRA-55_Winter_blocking_anomalies.png ./formatted/JRA-55_Winter_blocking_anomalies.png +sh ~/scripts/fig2catalog.sh -t 'JRA-55 / sea level pressure / Atlantic ridge anomalies\nDJF / 1979-2013' JRA-55_Winter_atlantic_anomalies.png ./formatted/JRA-55_Winter_atlantic_anomalies.png + +sh ~/scripts/fig2catalog.sh -t 'JRA-55 / sea level pressure / NAO+ anomalies\nMAM / 1979-2013' JRA-55_Spring_NAO+_anomalies.png ./formatted/JRA-55_Spring_NAO+_anomalies.png +sh ~/scripts/fig2catalog.sh -t 'JRA-55 / sea level pressure / NAO- anomalies\nMAM / 1979-2013' JRA-55_Spring_NAO-_anomalies.png ./formatted/JRA-55_Spring_NAO-_anomalies.png +sh ~/scripts/fig2catalog.sh -t 'JRA-55 / sea level pressure / Blocking anomalies\nMAM / 1979-2013' JRA-55_Spring_blocking_anomalies.png ./formatted/JRA-55_Spring_blocking_anomalies.png +sh ~/scripts/fig2catalog.sh -t 'JRA-55 / sea level pressure / Atlantic ridge anomalies\nMAM / 1979-2013' JRA-55_Spring_atlantic_anomalies.png ./formatted/JRA-55_Spring_atlantic_anomalies.png + +sh ~/scripts/fig2catalog.sh -t 'JRA-55 / sea level pressure / NAO+ anomalies\nJJA / 1979-2013' JRA-55_Summer_NAO+_anomalies.png ./formatted/JRA-55_Summer_NAO+_anomalies.png +sh ~/scripts/fig2catalog.sh -t 'JRA-55 / sea level pressure / NAO- anomalies\nJJA / 1979-2013' JRA-55_Summer_NAO-_anomalies.png ./formatted/JRA-55_Summer_NAO-_anomalies.png +sh ~/scripts/fig2catalog.sh -t 'JRA-55 / sea level pressure / Blocking anomalies\nJJA / 1979-2013' JRA-55_Summer_blocking_anomalies.png ./formatted/JRA-55_Summer_blocking_anomalies.png +sh ~/scripts/fig2catalog.sh -t 'JRA-55 / sea level pressure / Atlantic ridge anomalies\nJJA / 1979-2013' JRA-55_Summer_atlantic_anomalies.png ./formatted/JRA-55_Summer_atlantic_anomalies.png + +sh ~/scripts/fig2catalog.sh -t 'JRA-55 / sea level pressure / NAO+ anomalies\nSON / 1979-2013' JRA-55_Autumn_NAO+_anomalies.png ./formatted/JRA-55_Autumn_NAO+_anomalies.png +sh ~/scripts/fig2catalog.sh -t 'JRA-55 / sea level pressure / NAO- anomalies\nSON / 1979-2013' JRA-55_Autumn_NAO-_anomalies.png ./formatted/JRA-55_Autumn_NAO-_anomalies.png +sh ~/scripts/fig2catalog.sh -t 'JRA-55 / sea level pressure / Blocking anomalies\nSON / 1979-2013' JRA-55_Autumn_blocking_anomalies.png ./formatted/JRA-55_Autumn_blocking_anomalies.png +sh ~/scripts/fig2catalog.sh -t 'JRA-55 / sea level pressure / Atlantic ridge anomalies\nSON / 1979-2013' JRA-55_Autumn_atlantic_anomalies.png ./formatted/JRA-55_Autumn_atlantic_anomalies.png + + +# regime frequency: + +sh ~/scripts/fig2catalog.sh -t 'JRA-55 / weather regimes / NAO+ anomalies\nDJF / 1979-2013' JRA-55_Winter_NAO+_freq.png ./formatted/JRA-55_Winter_NAO+_freq.png +sh ~/scripts/fig2catalog.sh -t 'JRA-55 / weather regimes / NAO- anomalies\nDJF / 1979-2013' JRA-55_Winter_NAO-_freq.png ./formatted/JRA-55_Winter_NAO-_freq.png +sh ~/scripts/fig2catalog.sh -t 'JRA-55 / weather regimes / Blocking anomalies\nDJF / 1979-2013' JRA-55_Winter_blocking_freq.png ./formatted/JRA-55_Winter_blocking_freq.png +sh ~/scripts/fig2catalog.sh -t 'JRA-55 / weather regimes / Atlantic ridge anomalies\nDJF / 1979-2013' JRA-55_Winter_atlantic_freq.png ./formatted/JRA-55_Winter_atlantic_freq.png + +sh ~/scripts/fig2catalog.sh -t 'JRA-55 / weather regimes / NAO+ anomalies\nMAM / 1979-2013' JRA-55_Spring_NAO+_freq.png ./formatted/JRA-55_Spring_NAO+_freq.png +sh ~/scripts/fig2catalog.sh -t 'JRA-55 / weather regimes / NAO- anomalies\nMAM / 1979-2013' JRA-55_Spring_NAO-_freq.png ./formatted/JRA-55_Spring_NAO-_freq.png +sh ~/scripts/fig2catalog.sh -t 'JRA-55 / weather regimes / Blocking anomalies\nMAM / 1979-2013' JRA-55_Spring_blocking_freq.png ./formatted/JRA-55_Spring_blocking_freq.png +sh ~/scripts/fig2catalog.sh -t 'JRA-55 / weather regimes / Atlantic ridge anomalies\nMAM / 1979-2013' JRA-55_Spring_atlantic_freq.png ./formatted/JRA-55_Spring_atlantic_freq.png + +sh ~/scripts/fig2catalog.sh -t 'JRA-55 / weather regimes / NAO+ anomalies\nJJA / 1979-2013' JRA-55_Summer_NAO+_freq.png ./formatted/JRA-55_Summer_NAO+_freq.png +sh ~/scripts/fig2catalog.sh -t 'JRA-55 / weather regimes / NAO- anomalies\nJJA / 1979-2013' JRA-55_Summer_NAO-_freq.png ./formatted/JRA-55_Summer_NAO-_freq.png +sh ~/scripts/fig2catalog.sh -t 'JRA-55 / weather regimes / Blocking anomalies\nJJA / 1979-2013' JRA-55_Summer_blocking_freq.png ./formatted/JRA-55_Summer_blocking_freq.png +sh ~/scripts/fig2catalog.sh -t 'JRA-55 / weather regimes / Atlantic ridge anomalies\nJJA / 1979-2013' JRA-55_Summer_atlantic_freq.png ./formatted/JRA-55_Summer_atlantic_freq.png + +sh ~/scripts/fig2catalog.sh -t 'JRA-55 / weather regimes / NAO+ anomalies\nSON / 1979-2013' JRA-55_Autumn_NAO+_freq.png ./formatted/JRA-55_Autumn_NAO+_freq.png +sh ~/scripts/fig2catalog.sh -t 'JRA-55 / weather regimes / NAO- anomalies\nSON / 1979-2013' JRA-55_Autumn_NAO-_freq.png ./formatted/JRA-55_Autumn_NAO-_freq.png +sh ~/scripts/fig2catalog.sh -t 'JRA-55 / weather regimes / Blocking anomalies\nSON / 1979-2013' JRA-55_Autumn_blocking_freq.png ./formatted/JRA-55_Autumn_blocking_freq.png +sh ~/scripts/fig2catalog.sh -t 'JRA-55 / weather regimes / Atlantic ridge anomalies\nSON / 1979-2013' JRA-55_Autumn_atlantic_freq.png ./formatted/JRA-55_Autumn_atlantic_freq.png + + +# impact on sfcWind: + +sh ~/scripts/fig2catalog.sh -t 'JRA-55 / 10m wind speed / NAO+ impact\nDJF / 1979-2013' JRA-55_Winter_sfcWind_NAO+_impact.png ./formatted/JRA-55_Winter_sfcWind_NAO+_impact.png +sh ~/scripts/fig2catalog.sh -t 'JRA-55 / 10m wind speed / NAO- impact\nDJF / 1979-2013' JRA-55_Winter_sfcWind_NAO-_impact.png ./formatted/JRA-55_Winter_sfcWind_NAO-_impact.png +sh ~/scripts/fig2catalog.sh -t 'JRA-55 / 10m wind speed / Blocking impact\nDJF / 1979-2013' JRA-55_Winter_sfcWind_blocking_impact.png ./formatted/JRA-55_Winter_sfcWind_blocking_impact.png +sh ~/scripts/fig2catalog.sh -t 'JRA-55 / 10m wind speed / Atlantic ridge impact\nDJF / 1979-2013' JRA-55_Winter_sfcWind_atlantic_impact.png ./formatted/JRA-55_Winter_sfcWind_atlantic_impact.png + +sh ~/scripts/fig2catalog.sh -t 'JRA-55 / 10m wind speed / NAO+ impact\nMAM / 1979-2013' JRA-55_Spring_sfcWind_NAO+_impact.png ./formatted/JRA-55_Spring_sfcWind_NAO+_impact.png +sh ~/scripts/fig2catalog.sh -t 'JRA-55 / 10m wind speed / NAO- impact\nMAM / 1979-2013' JRA-55_Spring_sfcWind_NAO-_impact.png ./formatted/JRA-55_Spring_sfcWind_NAO-_impact.png +sh ~/scripts/fig2catalog.sh -t 'JRA-55 / 10m wind speed / Blocking impact\nMAM / 1979-2013' JRA-55_Spring_sfcWind_blocking_impact.png ./formatted/JRA-55_Spring_sfcWind_blocking_impact.png +sh ~/scripts/fig2catalog.sh -t 'JRA-55 / 10m wind speed / Atlantic ridge impact\nMAM / 1979-2013' JRA-55_Spring_sfcWind_atlantic_impact.png ./formatted/JRA-55_Spring_sfcWind_atlantic_impact.png + +sh ~/scripts/fig2catalog.sh -t 'JRA-55 / 10m wind speed / NAO+ impact\nJJA / 1979-2013' JRA-55_Summer_sfcWind_NAO+_impact.png ./formatted/JRA-55_Summer_sfcWind_NAO+_impact.png +sh ~/scripts/fig2catalog.sh -t 'JRA-55 / 10m wind speed / NAO- impact\nJJA / 1979-2013' JRA-55_Summer_sfcWind_NAO-_impact.png ./formatted/JRA-55_Summer_sfcWind_NAO-_impact.png +sh ~/scripts/fig2catalog.sh -t 'JRA-55 / 10m wind speed / Blocking impact\nJJA / 1979-2013' JRA-55_Summer_sfcWind_blocking_impact.png ./formatted/JRA-55_Summer_sfcWind_blocking_impact.png +sh ~/scripts/fig2catalog.sh -t 'JRA-55 / 10m wind speed / Atlantic ridge impact\nJJA / 1979-2013' JRA-55_Summer_sfcWind_atlantic_impact.png ./formatted/JRA-55_Summer_sfcWind_atlantic_impact.png + +sh ~/scripts/fig2catalog.sh -t 'JRA-55 / 10m wind speed / NAO+ impact\nSON / 1979-2013' JRA-55_Autumn_sfcWind_NAO+_impact.png ./formatted/JRA-55_Autumn_sfcWind_NAO+_impact.png +sh ~/scripts/fig2catalog.sh -t 'JRA-55 / 10m wind speed / NAO- impact\nSON / 1979-2013' JRA-55_Autumn_sfcWind_NAO-_impact.png ./formatted/JRA-55_Autumn_sfcWind_NAO-_impact.png +sh ~/scripts/fig2catalog.sh -t 'JRA-55 / 10m wind speed / Blocking impact\nSON / 1979-2013' JRA-55_Autumn_sfcWind_blocking_impact.png ./formatted/JRA-55_Autumn_sfcWind_blocking_impact.png +sh ~/scripts/fig2catalog.sh -t 'JRA-55 / 10m wind speed / Atlantic ridge impact\nSON / 1979-2013' JRA-55_Autumn_sfcWind_atlantic_impact.png ./formatted/JRA-55_Autumn_sfcWind_atlantic_impact.png + +# impact on tas: + +sh ~/scripts/fig2catalog.sh -t 'JRA-55 / 2m temperature / NAO+ impact\nDJF / 1979-2013' JRA-55_Winter_tas_NAO+_impact.png ./formatted/JRA-55_Winter_tas_NAO+_impact.png +sh ~/scripts/fig2catalog.sh -t 'JRA-55 / 2m temperature / NAO- impact\nDJF / 1979-2013' JRA-55_Winter_tas_NAO-_impact.png ./formatted/JRA-55_Winter_tas_NAO-_impact.png +sh ~/scripts/fig2catalog.sh -t 'JRA-55 / 2m temperature / Blocking impact\nDJF / 1979-2013' JRA-55_Winter_tas_blocking_impact.png ./formatted/JRA-55_Winter_tas_blocking_impact.png +sh ~/scripts/fig2catalog.sh -t 'JRA-55 / 2m temperature / Atlantic ridge impact\nDJF / 1979-2013' JRA-55_Winter_tas_atlantic_impact.png ./formatted/JRA-55_Winter_tas_atlantic_impact.png + +sh ~/scripts/fig2catalog.sh -t 'JRA-55 / 2m temperature / NAO+ impact\nMAM / 1979-2013' JRA-55_Spring_tas_NAO+_impact.png ./formatted/JRA-55_Spring_tas_NAO+_impact.png +sh ~/scripts/fig2catalog.sh -t 'JRA-55 / 2m temperature / NAO- impact\nMAM / 1979-2013' JRA-55_Spring_tas_NAO-_impact.png ./formatted/JRA-55_Spring_tas_NAO-_impact.png +sh ~/scripts/fig2catalog.sh -t 'JRA-55 / 2m temperature / Blocking impact\nMAM / 1979-2013' JRA-55_Spring_tas_blocking_impact.png ./formatted/JRA-55_Spring_tas_blocking_impact.png +sh ~/scripts/fig2catalog.sh -t 'JRA-55 / 2m temperature / Atlantic ridge impact\nMAM / 1979-2013' JRA-55_Spring_tas_atlantic_impact.png ./formatted/JRA-55_Spring_tas_atlantic_impact.png + +sh ~/scripts/fig2catalog.sh -t 'JRA-55 / 2m temperature / NAO+ impact\nJJA / 1979-2013' JRA-55_Summer_tas_NAO+_impact.png ./formatted/JRA-55_Summer_tas_NAO+_impact.png +sh ~/scripts/fig2catalog.sh -t 'JRA-55 / 2m temperature / NAO- impact\nJJA / 1979-2013' JRA-55_Summer_tas_NAO-_impact.png ./formatted/JRA-55_Summer_tas_NAO-_impact.png +sh ~/scripts/fig2catalog.sh -t 'JRA-55 / 2m temperature / Blocking impact\nJJA / 1979-2013' JRA-55_Summer_tas_blocking_impact.png ./formatted/JRA-55_Summer_tas_blocking_impact.png +sh ~/scripts/fig2catalog.sh -t 'JRA-55 / 2m temperature / Atlantic ridge impact\nJJA / 1979-2013' JRA-55_Summer_tas_atlantic_impact.png ./formatted/JRA-55_Summer_tas_atlantic_impact.png + +sh ~/scripts/fig2catalog.sh -t 'JRA-55 / 2m temperature / NAO+ impact\nSON / 1979-2013' JRA-55_Autumn_tas_NAO+_impact.png ./formatted/JRA-55_Autumn_tas_NAO+_impact.png +sh ~/scripts/fig2catalog.sh -t 'JRA-55 / 2m temperature / NAO- impact\nSON / 1979-2013' JRA-55_Autumn_tas_NAO-_impact.png ./formatted/JRA-55_Autumn_tas_NAO-_impact.png +sh ~/scripts/fig2catalog.sh -t 'JRA-55 / 2m temperature / Blocking impact\nSON / 1979-2013' JRA-55_Autumn_tas_blocking_impact.png ./formatted/JRA-55_Autumn_tas_blocking_impact.png +sh ~/scripts/fig2catalog.sh -t 'JRA-55 / 2m temperature / Atlantic ridge impact\nSON / 1979-2013' JRA-55_Autumn_tas_atlantic_impact.png ./formatted/JRA-55_tas_atlantic_impact.png + + +################################################################################### +# Seasonal composition # +################################################################################### +# regime anomalies: +width_figure=$(identify -ping -format %w ERA-Interim_Winter_NAO+_anomalies.png) + +convert -background white -font Arial -pointsize 50 -size ${width_figure}x -gravity center pango:"NAO+" trash1.png +montage trash1.png ERA-Interim_Winter_NAO+_anomalies.png -tile 1x2 -geometry +0+0 trash2.png +convert -background white -font Arial -pointsize 50 -size ${width_figure}x -gravity center pango:"NAO-" trash3.png +montage trash3.png ERA-Interim_Winter_NAO-_anomalies.png -tile 1x2 -geometry +0+0 trash4.png +montage trash2.png trash4.png -tile 2x1 -geometry +0+0 trash5.png +convert -background white -font Arial -pointsize 1 -size ${width_figure}x30 -gravity center pango:"" trash6.png +montage trash6.png trash5.png -tile 1x2 -geometry +0+0 trash7.png +convert -background white -font Arial -pointsize 1 -size ${width_figure}x30 -gravity center pango:"" trash8.png +montage trash7.png trash8.png -tile 1x2 -geometry +0+0 trash9.png +convert -background white -font Arial -pointsize 50 -size ${width_figure}x -gravity center pango:"Blocking" trash1.png +montage trash1.png ERA-Interim_Winter_blocking_anomalies.png -tile 1x2 -geometry +0+0 trash2.png +convert -background white -font Arial -pointsize 50 -size ${width_figure}x -gravity center pango:"Atlantic Ridge" trash3.png +montage trash3.png ERA-Interim_Winter_atlantic_anomalies.png -tile 1x2 -geometry +0+0 trash4.png +montage trash2.png trash4.png -tile 2x1 -geometry +0+0 trash5.png +convert -background white -font Arial -pointsize 1 -size ${width_figure}x5 -gravity center pango:"" trash8.png +montage trash5.png trash8.png -tile 1x2 -geometry +0+0 trash10.png +montage trash9.png trash10.png -tile 1x2 -geometry +0+0 ./2x2/ERA-Interim_Winter_anomalies.png + +convert -background white -font Arial -pointsize 50 -size ${width_figure}x -gravity center pango:"NAO+" trash1.png +montage trash1.png ERA-Interim_Spring_NAO+_anomalies.png -tile 1x2 -geometry +0+0 trash2.png +convert -background white -font Arial -pointsize 50 -size ${width_figure}x -gravity center pango:"NAO-" trash3.png +montage trash3.png ERA-Interim_Spring_NAO-_anomalies.png -tile 1x2 -geometry +0+0 trash4.png +montage trash2.png trash4.png -tile 2x1 -geometry +0+0 trash5.png +convert -background white -font Arial -pointsize 1 -size ${width_figure}x30 -gravity center pango:"" trash6.png +montage trash6.png trash5.png -tile 1x2 -geometry +0+0 trash7.png +convert -background white -font Arial -pointsize 1 -size ${width_figure}x30 -gravity center pango:"" trash8.png +montage trash7.png trash8.png -tile 1x2 -geometry +0+0 trash9.png +convert -background white -font Arial -pointsize 50 -size ${width_figure}x -gravity center pango:"Blocking" trash1.png +montage trash1.png ERA-Interim_Spring_blocking_anomalies.png -tile 1x2 -geometry +0+0 trash2.png +convert -background white -font Arial -pointsize 50 -size ${width_figure}x -gravity center pango:"Atlantic Ridge" trash3.png +montage trash3.png ERA-Interim_Spring_atlantic_anomalies.png -tile 1x2 -geometry +0+0 trash4.png +montage trash2.png trash4.png -tile 2x1 -geometry +0+0 trash5.png +convert -background white -font Arial -pointsize 1 -size ${width_figure}x5 -gravity center pango:"" trash8.png +montage trash5.png trash8.png -tile 1x2 -geometry +0+0 trash10.png +montage trash9.png trash10.png -tile 1x2 -geometry +0+0 ./2x2/ERA-Interim_Spring_anomalies.png + +convert -background white -font Arial -pointsize 50 -size ${width_figure}x -gravity center pango:"NAO+" trash1.png +montage trash1.png ERA-Interim_Summer_NAO+_anomalies.png -tile 1x2 -geometry +0+0 trash2.png +convert -background white -font Arial -pointsize 50 -size ${width_figure}x -gravity center pango:"NAO-" trash3.png +montage trash3.png ERA-Interim_Summer_NAO-_anomalies.png -tile 1x2 -geometry +0+0 trash4.png +montage trash2.png trash4.png -tile 2x1 -geometry +0+0 trash5.png +convert -background white -font Arial -pointsize 1 -size ${width_figure}x30 -gravity center pango:"" trash6.png +montage trash6.png trash5.png -tile 1x2 -geometry +0+0 trash7.png +convert -background white -font Arial -pointsize 1 -size ${width_figure}x30 -gravity center pango:"" trash8.png +montage trash7.png trash8.png -tile 1x2 -geometry +0+0 trash9.png +convert -background white -font Arial -pointsize 50 -size ${width_figure}x -gravity center pango:"Blocking" trash1.png +montage trash1.png ERA-Interim_Summer_blocking_anomalies.png -tile 1x2 -geometry +0+0 trash2.png +convert -background white -font Arial -pointsize 50 -size ${width_figure}x -gravity center pango:"Atlantic Ridge" trash3.png +montage trash3.png ERA-Interim_Summer_atlantic_anomalies.png -tile 1x2 -geometry +0+0 trash4.png +montage trash2.png trash4.png -tile 2x1 -geometry +0+0 trash5.png +convert -background white -font Arial -pointsize 1 -size ${width_figure}x5 -gravity center pango:"" trash8.png +montage trash5.png trash8.png -tile 1x2 -geometry +0+0 trash10.png +montage trash9.png trash10.png -tile 1x2 -geometry +0+0 ./2x2/ERA-Interim_Summer_anomalies.png + +convert -background white -font Arial -pointsize 50 -size ${width_figure}x -gravity center pango:"NAO+" trash1.png +montage trash1.png ERA-Interim_Autumn_NAO+_anomalies.png -tile 1x2 -geometry +0+0 trash2.png +convert -background white -font Arial -pointsize 50 -size ${width_figure}x -gravity center pango:"NAO-" trash3.png +montage trash3.png ERA-Interim_Autumn_NAO-_anomalies.png -tile 1x2 -geometry +0+0 trash4.png +montage trash2.png trash4.png -tile 2x1 -geometry +0+0 trash5.png +convert -background white -font Arial -pointsize 1 -size ${width_figure}x30 -gravity center pango:"" trash6.png +montage trash6.png trash5.png -tile 1x2 -geometry +0+0 trash7.png +convert -background white -font Arial -pointsize 1 -size ${width_figure}x30 -gravity center pango:"" trash8.png +montage trash7.png trash8.png -tile 1x2 -geometry +0+0 trash9.png +convert -background white -font Arial -pointsize 50 -size ${width_figure}x -gravity center pango:"Blocking" trash1.png +montage trash1.png ERA-Interim_Autumn_blocking_anomalies.png -tile 1x2 -geometry +0+0 trash2.png +convert -background white -font Arial -pointsize 50 -size ${width_figure}x -gravity center pango:"Atlantic Ridge" trash3.png +montage trash3.png ERA-Interim_Autumn_atlantic_anomalies.png -tile 1x2 -geometry +0+0 trash4.png +montage trash2.png trash4.png -tile 2x1 -geometry +0+0 trash5.png +convert -background white -font Arial -pointsize 1 -size ${width_figure}x5 -gravity center pango:"" trash8.png +montage trash5.png trash8.png -tile 1x2 -geometry +0+0 trash10.png +montage trash9.png trash10.png -tile 1x2 -geometry +0+0 ./2x2/ERA-Interim_Autumn_anomalies.png + +# regime frequency: + +width_figure=$(identify -ping -format %w ERA-Interim_Winter_NAO+_freq.png) + +convert -background white -font Arial -pointsize 50 -size ${width_figure}x -gravity center pango:"NAO+" trash1.png +montage trash1.png ERA-Interim_Winter_NAO+_freq.png -tile 1x2 -geometry +0+0 trash2.png +convert -background white -font Arial -pointsize 50 -size ${width_figure}x -gravity center pango:"NAO-" trash3.png +montage trash3.png ERA-Interim_Winter_NAO-_freq.png -tile 1x2 -geometry +0+0 trash4.png +montage trash2.png trash4.png -tile 2x1 -geometry +0+0 trash5.png +convert -background white -font Arial -pointsize 1 -size ${width_figure}x30 -gravity center pango:"" trash6.png +montage trash6.png trash5.png -tile 1x2 -geometry +0+0 trash7.png +convert -background white -font Arial -pointsize 1 -size ${width_figure}x30 -gravity center pango:"" trash8.png +montage trash7.png trash8.png -tile 1x2 -geometry +0+0 trash9.png +convert -background white -font Arial -pointsize 50 -size ${width_figure}x -gravity center pango:"Blocking" trash1.png +montage trash1.png ERA-Interim_Winter_blocking_freq.png -tile 1x2 -geometry +0+0 trash2.png +convert -background white -font Arial -pointsize 50 -size ${width_figure}x -gravity center pango:"Atlantic Ridge" trash3.png +montage trash3.png ERA-Interim_Winter_atlantic_freq.png -tile 1x2 -geometry +0+0 trash4.png +montage trash2.png trash4.png -tile 2x1 -geometry +0+0 trash5.png +convert -background white -font Arial -pointsize 1 -size ${width_figure}x5 -gravity center pango:"" trash8.png +montage trash5.png trash8.png -tile 1x2 -geometry +0+0 trash10.png +montage trash9.png trash10.png -tile 1x2 -geometry +0+0 ./2x2/ERA-Interim_Winter_freq.png + +convert -background white -font Arial -pointsize 50 -size ${width_figure}x -gravity center pango:"NAO+" trash1.png +montage trash1.png ERA-Interim_Spring_NAO+_freq.png -tile 1x2 -geometry +0+0 trash2.png +convert -background white -font Arial -pointsize 50 -size ${width_figure}x -gravity center pango:"NAO-" trash3.png +montage trash3.png ERA-Interim_Spring_NAO-_freq.png -tile 1x2 -geometry +0+0 trash4.png +montage trash2.png trash4.png -tile 2x1 -geometry +0+0 trash5.png +convert -background white -font Arial -pointsize 1 -size ${width_figure}x30 -gravity center pango:"" trash6.png +montage trash6.png trash5.png -tile 1x2 -geometry +0+0 trash7.png +convert -background white -font Arial -pointsize 1 -size ${width_figure}x30 -gravity center pango:"" trash8.png +montage trash7.png trash8.png -tile 1x2 -geometry +0+0 trash9.png +convert -background white -font Arial -pointsize 50 -size ${width_figure}x -gravity center pango:"Blocking" trash1.png +montage trash1.png ERA-Interim_Spring_blocking_freq.png -tile 1x2 -geometry +0+0 trash2.png +convert -background white -font Arial -pointsize 50 -size ${width_figure}x -gravity center pango:"Atlantic Ridge" trash3.png +montage trash3.png ERA-Interim_Spring_atlantic_freq.png -tile 1x2 -geometry +0+0 trash4.png +montage trash2.png trash4.png -tile 2x1 -geometry +0+0 trash5.png +convert -background white -font Arial -pointsize 1 -size ${width_figure}x5 -gravity center pango:"" trash8.png +montage trash5.png trash8.png -tile 1x2 -geometry +0+0 trash10.png +montage trash9.png trash10.png -tile 1x2 -geometry +0+0 ./2x2/ERA-Interim_Spring_freq.png + +convert -background white -font Arial -pointsize 50 -size ${width_figure}x -gravity center pango:"NAO+" trash1.png +montage trash1.png ERA-Interim_Summer_NAO+_freq.png -tile 1x2 -geometry +0+0 trash2.png +convert -background white -font Arial -pointsize 50 -size ${width_figure}x -gravity center pango:"NAO-" trash3.png +montage trash3.png ERA-Interim_Summer_NAO-_freq.png -tile 1x2 -geometry +0+0 trash4.png +montage trash2.png trash4.png -tile 2x1 -geometry +0+0 trash5.png +convert -background white -font Arial -pointsize 1 -size ${width_figure}x30 -gravity center pango:"" trash6.png +montage trash6.png trash5.png -tile 1x2 -geometry +0+0 trash7.png +convert -background white -font Arial -pointsize 1 -size ${width_figure}x30 -gravity center pango:"" trash8.png +montage trash7.png trash8.png -tile 1x2 -geometry +0+0 trash9.png +convert -background white -font Arial -pointsize 50 -size ${width_figure}x -gravity center pango:"Blocking" trash1.png +montage trash1.png ERA-Interim_Summer_blocking_freq.png -tile 1x2 -geometry +0+0 trash2.png +convert -background white -font Arial -pointsize 50 -size ${width_figure}x -gravity center pango:"Atlantic Ridge" trash3.png +montage trash3.png ERA-Interim_Summer_atlantic_freq.png -tile 1x2 -geometry +0+0 trash4.png +montage trash2.png trash4.png -tile 2x1 -geometry +0+0 trash5.png +convert -background white -font Arial -pointsize 1 -size ${width_figure}x5 -gravity center pango:"" trash8.png +montage trash5.png trash8.png -tile 1x2 -geometry +0+0 trash10.png +montage trash9.png trash10.png -tile 1x2 -geometry +0+0 ./2x2/ERA-Interim_Summer_freq.png + +convert -background white -font Arial -pointsize 50 -size ${width_figure}x -gravity center pango:"NAO+" trash1.png +montage trash1.png ERA-Interim_Autumn_NAO+_freq.png -tile 1x2 -geometry +0+0 trash2.png +convert -background white -font Arial -pointsize 50 -size ${width_figure}x -gravity center pango:"NAO-" trash3.png +montage trash3.png ERA-Interim_Autumn_NAO-_freq.png -tile 1x2 -geometry +0+0 trash4.png +montage trash2.png trash4.png -tile 2x1 -geometry +0+0 trash5.png +convert -background white -font Arial -pointsize 1 -size ${width_figure}x30 -gravity center pango:"" trash6.png +montage trash6.png trash5.png -tile 1x2 -geometry +0+0 trash7.png +convert -background white -font Arial -pointsize 1 -size ${width_figure}x30 -gravity center pango:"" trash8.png +montage trash7.png trash8.png -tile 1x2 -geometry +0+0 trash9.png +convert -background white -font Arial -pointsize 50 -size ${width_figure}x -gravity center pango:"Blocking" trash1.png +montage trash1.png ERA-Interim_Autumn_blocking_freq.png -tile 1x2 -geometry +0+0 trash2.png +convert -background white -font Arial -pointsize 50 -size ${width_figure}x -gravity center pango:"Atlantic Ridge" trash3.png +montage trash3.png ERA-Interim_Autumn_atlantic_freq.png -tile 1x2 -geometry +0+0 trash4.png +montage trash2.png trash4.png -tile 2x1 -geometry +0+0 trash5.png +convert -background white -font Arial -pointsize 1 -size ${width_figure}x5 -gravity center pango:"" trash8.png +montage trash5.png trash8.png -tile 1x2 -geometry +0+0 trash10.png +montage trash9.png trash10.png -tile 1x2 -geometry +0+0 ./2x2/ERA-Interim_Autumn_freq.png + + +# regime impact on sfcWind: + +width_figure=$(identify -ping -format %w ERA-Interim_Winter_sfcWind_NAO+_impact.png) + +convert -background white -font Arial -pointsize 50 -size ${width_figure}x -gravity center pango:"NAO+" trash1.png +montage trash1.png ERA-Interim_Winter_sfcWind_NAO+_impact.png -tile 1x2 -geometry +0+0 trash2.png +convert -background white -font Arial -pointsize 50 -size ${width_figure}x -gravity center pango:"NAO-" trash3.png +montage trash3.png ERA-Interim_Winter_sfcWind_NAO-_impact.png -tile 1x2 -geometry +0+0 trash4.png +montage trash2.png trash4.png -tile 2x1 -geometry +0+0 trash5.png +convert -background white -font Arial -pointsize 1 -size ${width_figure}x30 -gravity center pango:"" trash6.png +montage trash6.png trash5.png -tile 1x2 -geometry +0+0 trash7.png +convert -background white -font Arial -pointsize 1 -size ${width_figure}x30 -gravity center pango:"" trash8.png +montage trash7.png trash8.png -tile 1x2 -geometry +0+0 trash9.png +convert -background white -font Arial -pointsize 50 -size ${width_figure}x -gravity center pango:"Blocking" trash1.png +montage trash1.png ERA-Interim_Winter_sfcWind_blocking_impact.png -tile 1x2 -geometry +0+0 trash2.png +convert -background white -font Arial -pointsize 50 -size ${width_figure}x -gravity center pango:"Atlantic Ridge" trash3.png +montage trash3.png ERA-Interim_Winter_sfcWind_atlantic_impact.png -tile 1x2 -geometry +0+0 trash4.png +montage trash2.png trash4.png -tile 2x1 -geometry +0+0 trash5.png +convert -background white -font Arial -pointsize 1 -size ${width_figure}x5 -gravity center pango:"" trash8.png +montage trash5.png trash8.png -tile 1x2 -geometry +0+0 trash10.png +montage trash9.png trash10.png -tile 1x2 -geometry +0+0 ./2x2/ERA-Interim_Winter_sfcWind_impact.png + +convert -background white -font Arial -pointsize 50 -size ${width_figure}x -gravity center pango:"NAO+" trash1.png +montage trash1.png ERA-Interim_Spring_sfcWind_NAO+_impact.png -tile 1x2 -geometry +0+0 trash2.png +convert -background white -font Arial -pointsize 50 -size ${width_figure}x -gravity center pango:"NAO-" trash3.png +montage trash3.png ERA-Interim_Spring_sfcWind_NAO-_impact.png -tile 1x2 -geometry +0+0 trash4.png +montage trash2.png trash4.png -tile 2x1 -geometry +0+0 trash5.png +convert -background white -font Arial -pointsize 1 -size ${width_figure}x30 -gravity center pango:"" trash6.png +montage trash6.png trash5.png -tile 1x2 -geometry +0+0 trash7.png +convert -background white -font Arial -pointsize 1 -size ${width_figure}x30 -gravity center pango:"" trash8.png +montage trash7.png trash8.png -tile 1x2 -geometry +0+0 trash9.png +convert -background white -font Arial -pointsize 50 -size ${width_figure}x -gravity center pango:"Blocking" trash1.png +montage trash1.png ERA-Interim_Spring_sfcWind_blocking_impact.png -tile 1x2 -geometry +0+0 trash2.png +convert -background white -font Arial -pointsize 50 -size ${width_figure}x -gravity center pango:"Atlantic Ridge" trash3.png +montage trash3.png ERA-Interim_Spring_sfcWind_atlantic_impact.png -tile 1x2 -geometry +0+0 trash4.png +montage trash2.png trash4.png -tile 2x1 -geometry +0+0 trash5.png +convert -background white -font Arial -pointsize 1 -size ${width_figure}x5 -gravity center pango:"" trash8.png +montage trash5.png trash8.png -tile 1x2 -geometry +0+0 trash10.png +montage trash9.png trash10.png -tile 1x2 -geometry +0+0 ./2x2/ERA-Interim_Spring_sfcWind_impact.png + +convert -background white -font Arial -pointsize 50 -size ${width_figure}x -gravity center pango:"NAO+" trash1.png +montage trash1.png ERA-Interim_Summer_sfcWind_NAO+_impact.png -tile 1x2 -geometry +0+0 trash2.png +convert -background white -font Arial -pointsize 50 -size ${width_figure}x -gravity center pango:"NAO-" trash3.png +montage trash3.png ERA-Interim_Summer_sfcWind_NAO-_impact.png -tile 1x2 -geometry +0+0 trash4.png +montage trash2.png trash4.png -tile 2x1 -geometry +0+0 trash5.png +convert -background white -font Arial -pointsize 1 -size ${width_figure}x30 -gravity center pango:"" trash6.png +montage trash6.png trash5.png -tile 1x2 -geometry +0+0 trash7.png +convert -background white -font Arial -pointsize 1 -size ${width_figure}x30 -gravity center pango:"" trash8.png +montage trash7.png trash8.png -tile 1x2 -geometry +0+0 trash9.png +convert -background white -font Arial -pointsize 50 -size ${width_figure}x -gravity center pango:"Blocking" trash1.png +montage trash1.png ERA-Interim_Summer_sfcWind_blocking_impact.png -tile 1x2 -geometry +0+0 trash2.png +convert -background white -font Arial -pointsize 50 -size ${width_figure}x -gravity center pango:"Atlantic Ridge" trash3.png +montage trash3.png ERA-Interim_Summer_sfcWind_atlantic_impact.png -tile 1x2 -geometry +0+0 trash4.png +montage trash2.png trash4.png -tile 2x1 -geometry +0+0 trash5.png +convert -background white -font Arial -pointsize 1 -size ${width_figure}x5 -gravity center pango:"" trash8.png +montage trash5.png trash8.png -tile 1x2 -geometry +0+0 trash10.png +montage trash9.png trash10.png -tile 1x2 -geometry +0+0 ./2x2/ERA-Interim_Summer_sfcWind_impact.png + +convert -background white -font Arial -pointsize 50 -size ${width_figure}x -gravity center pango:"NAO+" trash1.png +montage trash1.png ERA-Interim_Autumn_sfcWind_NAO+_impact.png -tile 1x2 -geometry +0+0 trash2.png +convert -background white -font Arial -pointsize 50 -size ${width_figure}x -gravity center pango:"NAO-" trash3.png +montage trash3.png ERA-Interim_Autumn_sfcWind_NAO-_impact.png -tile 1x2 -geometry +0+0 trash4.png +montage trash2.png trash4.png -tile 2x1 -geometry +0+0 trash5.png +convert -background white -font Arial -pointsize 1 -size ${width_figure}x30 -gravity center pango:"" trash6.png +montage trash6.png trash5.png -tile 1x2 -geometry +0+0 trash7.png +convert -background white -font Arial -pointsize 1 -size ${width_figure}x30 -gravity center pango:"" trash8.png +montage trash7.png trash8.png -tile 1x2 -geometry +0+0 trash9.png +convert -background white -font Arial -pointsize 50 -size ${width_figure}x -gravity center pango:"Blocking" trash1.png +montage trash1.png ERA-Interim_Autumn_sfcWind_blocking_impact.png -tile 1x2 -geometry +0+0 trash2.png +convert -background white -font Arial -pointsize 50 -size ${width_figure}x -gravity center pango:"Atlantic Ridge" trash3.png +montage trash3.png ERA-Interim_Autumn_sfcWind_atlantic_impact.png -tile 1x2 -geometry +0+0 trash4.png +montage trash2.png trash4.png -tile 2x1 -geometry +0+0 trash5.png +convert -background white -font Arial -pointsize 1 -size ${width_figure}x5 -gravity center pango:"" trash8.png +montage trash5.png trash8.png -tile 1x2 -geometry +0+0 trash10.png +montage trash9.png trash10.png -tile 1x2 -geometry +0+0 ./2x2/ERA-Interim_Autumn_sfcWind_impact.png + + +# regime impact on temperature: + +width_figure=$(identify -ping -format %w ERA-Interim_Winter_tas_NAO+_impact.png) + +convert -background white -font Arial -pointsize 50 -size ${width_figure}x -gravity center pango:"NAO+" trash1.png +montage trash1.png ERA-Interim_Winter_tas_NAO+_impact.png -tile 1x2 -geometry +0+0 trash2.png +convert -background white -font Arial -pointsize 50 -size ${width_figure}x -gravity center pango:"NAO-" trash3.png +montage trash3.png ERA-Interim_Winter_tas_NAO-_impact.png -tile 1x2 -geometry +0+0 trash4.png +montage trash2.png trash4.png -tile 2x1 -geometry +0+0 trash5.png +convert -background white -font Arial -pointsize 1 -size ${width_figure}x30 -gravity center pango:"" trash6.png +montage trash6.png trash5.png -tile 1x2 -geometry +0+0 trash7.png +convert -background white -font Arial -pointsize 1 -size ${width_figure}x30 -gravity center pango:"" trash8.png +montage trash7.png trash8.png -tile 1x2 -geometry +0+0 trash9.png +convert -background white -font Arial -pointsize 50 -size ${width_figure}x -gravity center pango:"Blocking" trash1.png +montage trash1.png ERA-Interim_Winter_tas_blocking_impact.png -tile 1x2 -geometry +0+0 trash2.png +convert -background white -font Arial -pointsize 50 -size ${width_figure}x -gravity center pango:"Atlantic Ridge" trash3.png +montage trash3.png ERA-Interim_Winter_tas_atlantic_impact.png -tile 1x2 -geometry +0+0 trash4.png +montage trash2.png trash4.png -tile 2x1 -geometry +0+0 trash5.png +convert -background white -font Arial -pointsize 1 -size ${width_figure}x5 -gravity center pango:"" trash8.png +montage trash5.png trash8.png -tile 1x2 -geometry +0+0 trash10.png +montage trash9.png trash10.png -tile 1x2 -geometry +0+0 ./2x2/ERA-Interim_Winter_tas_impact.png + +convert -background white -font Arial -pointsize 50 -size ${width_figure}x -gravity center pango:"NAO+" trash1.png +montage trash1.png ERA-Interim_Spring_tas_NAO+_impact.png -tile 1x2 -geometry +0+0 trash2.png +convert -background white -font Arial -pointsize 50 -size ${width_figure}x -gravity center pango:"NAO-" trash3.png +montage trash3.png ERA-Interim_Spring_tas_NAO-_impact.png -tile 1x2 -geometry +0+0 trash4.png +montage trash2.png trash4.png -tile 2x1 -geometry +0+0 trash5.png +convert -background white -font Arial -pointsize 1 -size ${width_figure}x30 -gravity center pango:"" trash6.png +montage trash6.png trash5.png -tile 1x2 -geometry +0+0 trash7.png +convert -background white -font Arial -pointsize 1 -size ${width_figure}x30 -gravity center pango:"" trash8.png +montage trash7.png trash8.png -tile 1x2 -geometry +0+0 trash9.png +convert -background white -font Arial -pointsize 50 -size ${width_figure}x -gravity center pango:"Blocking" trash1.png +montage trash1.png ERA-Interim_Spring_tas_blocking_impact.png -tile 1x2 -geometry +0+0 trash2.png +convert -background white -font Arial -pointsize 50 -size ${width_figure}x -gravity center pango:"Atlantic Ridge" trash3.png +montage trash3.png ERA-Interim_Spring_tas_atlantic_impact.png -tile 1x2 -geometry +0+0 trash4.png +montage trash2.png trash4.png -tile 2x1 -geometry +0+0 trash5.png +convert -background white -font Arial -pointsize 1 -size ${width_figure}x5 -gravity center pango:"" trash8.png +montage trash5.png trash8.png -tile 1x2 -geometry +0+0 trash10.png +montage trash9.png trash10.png -tile 1x2 -geometry +0+0 ./2x2/ERA-Interim_Spring_tas_impact.png + +convert -background white -font Arial -pointsize 50 -size ${width_figure}x -gravity center pango:"NAO+" trash1.png +montage trash1.png ERA-Interim_Summer_tas_NAO+_impact.png -tile 1x2 -geometry +0+0 trash2.png +convert -background white -font Arial -pointsize 50 -size ${width_figure}x -gravity center pango:"NAO-" trash3.png +montage trash3.png ERA-Interim_Summer_tas_NAO-_impact.png -tile 1x2 -geometry +0+0 trash4.png +montage trash2.png trash4.png -tile 2x1 -geometry +0+0 trash5.png +convert -background white -font Arial -pointsize 1 -size ${width_figure}x30 -gravity center pango:"" trash6.png +montage trash6.png trash5.png -tile 1x2 -geometry +0+0 trash7.png +convert -background white -font Arial -pointsize 1 -size ${width_figure}x30 -gravity center pango:"" trash8.png +montage trash7.png trash8.png -tile 1x2 -geometry +0+0 trash9.png +convert -background white -font Arial -pointsize 50 -size ${width_figure}x -gravity center pango:"Blocking" trash1.png +montage trash1.png ERA-Interim_Summer_tas_blocking_impact.png -tile 1x2 -geometry +0+0 trash2.png +convert -background white -font Arial -pointsize 50 -size ${width_figure}x -gravity center pango:"Atlantic Ridge" trash3.png +montage trash3.png ERA-Interim_Summer_tas_atlantic_impact.png -tile 1x2 -geometry +0+0 trash4.png +montage trash2.png trash4.png -tile 2x1 -geometry +0+0 trash5.png +convert -background white -font Arial -pointsize 1 -size ${width_figure}x5 -gravity center pango:"" trash8.png +montage trash5.png trash8.png -tile 1x2 -geometry +0+0 trash10.png +montage trash9.png trash10.png -tile 1x2 -geometry +0+0 ./2x2/ERA-Interim_Summer_tas_impact.png + +convert -background white -font Arial -pointsize 50 -size ${width_figure}x -gravity center pango:"NAO+" trash1.png +montage trash1.png ERA-Interim_Autumn_tas_NAO+_impact.png -tile 1x2 -geometry +0+0 trash2.png +convert -background white -font Arial -pointsize 50 -size ${width_figure}x -gravity center pango:"NAO-" trash3.png +montage trash3.png ERA-Interim_Autumn_tas_NAO-_impact.png -tile 1x2 -geometry +0+0 trash4.png +montage trash2.png trash4.png -tile 2x1 -geometry +0+0 trash5.png +convert -background white -font Arial -pointsize 1 -size ${width_figure}x30 -gravity center pango:"" trash6.png +montage trash6.png trash5.png -tile 1x2 -geometry +0+0 trash7.png +convert -background white -font Arial -pointsize 1 -size ${width_figure}x30 -gravity center pango:"" trash8.png +montage trash7.png trash8.png -tile 1x2 -geometry +0+0 trash9.png +convert -background white -font Arial -pointsize 50 -size ${width_figure}x -gravity center pango:"Blocking" trash1.png +montage trash1.png ERA-Interim_Autumn_tas_blocking_impact.png -tile 1x2 -geometry +0+0 trash2.png +convert -background white -font Arial -pointsize 50 -size ${width_figure}x -gravity center pango:"Atlantic Ridge" trash3.png +montage trash3.png ERA-Interim_Autumn_tas_atlantic_impact.png -tile 1x2 -geometry +0+0 trash4.png +montage trash2.png trash4.png -tile 2x1 -geometry +0+0 trash5.png +convert -background white -font Arial -pointsize 1 -size ${width_figure}x5 -gravity center pango:"" trash8.png +montage trash5.png trash8.png -tile 1x2 -geometry +0+0 trash10.png +montage trash9.png trash10.png -tile 1x2 -geometry +0+0 ./2x2/ERA-Interim_Autumn_tas_impact.png + + + +################## +# Same for JRA: # +################## + +# regime anomalies: +width_figure=$(identify -ping -format %w JRA-55_Winter_NAO+_anomalies.png) + +convert -background white -font Arial -pointsize 50 -size ${width_figure}x -gravity center pango:"NAO+" trash1.png +montage trash1.png JRA-55_Winter_NAO+_anomalies.png -tile 1x2 -geometry +0+0 trash2.png +convert -background white -font Arial -pointsize 50 -size ${width_figure}x -gravity center pango:"NAO-" trash3.png +montage trash3.png JRA-55_Winter_NAO-_anomalies.png -tile 1x2 -geometry +0+0 trash4.png +montage trash2.png trash4.png -tile 2x1 -geometry +0+0 trash5.png +convert -background white -font Arial -pointsize 1 -size ${width_figure}x30 -gravity center pango:"" trash6.png +montage trash6.png trash5.png -tile 1x2 -geometry +0+0 trash7.png +convert -background white -font Arial -pointsize 1 -size ${width_figure}x30 -gravity center pango:"" trash8.png +montage trash7.png trash8.png -tile 1x2 -geometry +0+0 trash9.png +convert -background white -font Arial -pointsize 50 -size ${width_figure}x -gravity center pango:"Blocking" trash1.png +montage trash1.png JRA-55_Winter_blocking_anomalies.png -tile 1x2 -geometry +0+0 trash2.png +convert -background white -font Arial -pointsize 50 -size ${width_figure}x -gravity center pango:"Atlantic Ridge" trash3.png +montage trash3.png JRA-55_Winter_atlantic_anomalies.png -tile 1x2 -geometry +0+0 trash4.png +montage trash2.png trash4.png -tile 2x1 -geometry +0+0 trash5.png +convert -background white -font Arial -pointsize 1 -size ${width_figure}x5 -gravity center pango:"" trash8.png +montage trash5.png trash8.png -tile 1x2 -geometry +0+0 trash10.png +montage trash9.png trash10.png -tile 1x2 -geometry +0+0 ./2x2/JRA-55_Winter_anomalies.png + +convert -background white -font Arial -pointsize 50 -size ${width_figure}x -gravity center pango:"NAO+" trash1.png +montage trash1.png JRA-55_Spring_NAO+_anomalies.png -tile 1x2 -geometry +0+0 trash2.png +convert -background white -font Arial -pointsize 50 -size ${width_figure}x -gravity center pango:"NAO-" trash3.png +montage trash3.png JRA-55_Spring_NAO-_anomalies.png -tile 1x2 -geometry +0+0 trash4.png +montage trash2.png trash4.png -tile 2x1 -geometry +0+0 trash5.png +convert -background white -font Arial -pointsize 1 -size ${width_figure}x30 -gravity center pango:"" trash6.png +montage trash6.png trash5.png -tile 1x2 -geometry +0+0 trash7.png +convert -background white -font Arial -pointsize 1 -size ${width_figure}x30 -gravity center pango:"" trash8.png +montage trash7.png trash8.png -tile 1x2 -geometry +0+0 trash9.png +convert -background white -font Arial -pointsize 50 -size ${width_figure}x -gravity center pango:"Blocking" trash1.png +montage trash1.png JRA-55_Spring_blocking_anomalies.png -tile 1x2 -geometry +0+0 trash2.png +convert -background white -font Arial -pointsize 50 -size ${width_figure}x -gravity center pango:"Atlantic Ridge" trash3.png +montage trash3.png JRA-55_Spring_atlantic_anomalies.png -tile 1x2 -geometry +0+0 trash4.png +montage trash2.png trash4.png -tile 2x1 -geometry +0+0 trash5.png +convert -background white -font Arial -pointsize 1 -size ${width_figure}x5 -gravity center pango:"" trash8.png +montage trash5.png trash8.png -tile 1x2 -geometry +0+0 trash10.png +montage trash9.png trash10.png -tile 1x2 -geometry +0+0 ./2x2/JRA-55_Spring_anomalies.png + +convert -background white -font Arial -pointsize 50 -size ${width_figure}x -gravity center pango:"NAO+" trash1.png +montage trash1.png JRA-55_Summer_NAO+_anomalies.png -tile 1x2 -geometry +0+0 trash2.png +convert -background white -font Arial -pointsize 50 -size ${width_figure}x -gravity center pango:"NAO-" trash3.png +montage trash3.png JRA-55_Summer_NAO-_anomalies.png -tile 1x2 -geometry +0+0 trash4.png +montage trash2.png trash4.png -tile 2x1 -geometry +0+0 trash5.png +convert -background white -font Arial -pointsize 1 -size ${width_figure}x30 -gravity center pango:"" trash6.png +montage trash6.png trash5.png -tile 1x2 -geometry +0+0 trash7.png +convert -background white -font Arial -pointsize 1 -size ${width_figure}x30 -gravity center pango:"" trash8.png +montage trash7.png trash8.png -tile 1x2 -geometry +0+0 trash9.png +convert -background white -font Arial -pointsize 50 -size ${width_figure}x -gravity center pango:"Blocking" trash1.png +montage trash1.png JRA-55_Summer_blocking_anomalies.png -tile 1x2 -geometry +0+0 trash2.png +convert -background white -font Arial -pointsize 50 -size ${width_figure}x -gravity center pango:"Atlantic Ridge" trash3.png +montage trash3.png JRA-55_Summer_atlantic_anomalies.png -tile 1x2 -geometry +0+0 trash4.png +montage trash2.png trash4.png -tile 2x1 -geometry +0+0 trash5.png +convert -background white -font Arial -pointsize 1 -size ${width_figure}x5 -gravity center pango:"" trash8.png +montage trash5.png trash8.png -tile 1x2 -geometry +0+0 trash10.png +montage trash9.png trash10.png -tile 1x2 -geometry +0+0 ./2x2/JRA-55_Summer_anomalies.png + +convert -background white -font Arial -pointsize 50 -size ${width_figure}x -gravity center pango:"NAO+" trash1.png +montage trash1.png JRA-55_Autumn_NAO+_anomalies.png -tile 1x2 -geometry +0+0 trash2.png +convert -background white -font Arial -pointsize 50 -size ${width_figure}x -gravity center pango:"NAO-" trash3.png +montage trash3.png JRA-55_Autumn_NAO-_anomalies.png -tile 1x2 -geometry +0+0 trash4.png +montage trash2.png trash4.png -tile 2x1 -geometry +0+0 trash5.png +convert -background white -font Arial -pointsize 1 -size ${width_figure}x30 -gravity center pango:"" trash6.png +montage trash6.png trash5.png -tile 1x2 -geometry +0+0 trash7.png +convert -background white -font Arial -pointsize 1 -size ${width_figure}x30 -gravity center pango:"" trash8.png +montage trash7.png trash8.png -tile 1x2 -geometry +0+0 trash9.png +convert -background white -font Arial -pointsize 50 -size ${width_figure}x -gravity center pango:"Blocking" trash1.png +montage trash1.png JRA-55_Autumn_blocking_anomalies.png -tile 1x2 -geometry +0+0 trash2.png +convert -background white -font Arial -pointsize 50 -size ${width_figure}x -gravity center pango:"Atlantic Ridge" trash3.png +montage trash3.png JRA-55_Autumn_atlantic_anomalies.png -tile 1x2 -geometry +0+0 trash4.png +montage trash2.png trash4.png -tile 2x1 -geometry +0+0 trash5.png +convert -background white -font Arial -pointsize 1 -size ${width_figure}x5 -gravity center pango:"" trash8.png +montage trash5.png trash8.png -tile 1x2 -geometry +0+0 trash10.png +montage trash9.png trash10.png -tile 1x2 -geometry +0+0 ./2x2/JRA-55_Autumn_anomalies.png + +# regime frequency: + +width_figure=$(identify -ping -format %w JRA-55_Winter_NAO+_freq.png) + +convert -background white -font Arial -pointsize 50 -size ${width_figure}x -gravity center pango:"NAO+" trash1.png +montage trash1.png JRA-55_Winter_NAO+_freq.png -tile 1x2 -geometry +0+0 trash2.png +convert -background white -font Arial -pointsize 50 -size ${width_figure}x -gravity center pango:"NAO-" trash3.png +montage trash3.png JRA-55_Winter_NAO-_freq.png -tile 1x2 -geometry +0+0 trash4.png +montage trash2.png trash4.png -tile 2x1 -geometry +0+0 trash5.png +convert -background white -font Arial -pointsize 1 -size ${width_figure}x30 -gravity center pango:"" trash6.png +montage trash6.png trash5.png -tile 1x2 -geometry +0+0 trash7.png +convert -background white -font Arial -pointsize 1 -size ${width_figure}x30 -gravity center pango:"" trash8.png +montage trash7.png trash8.png -tile 1x2 -geometry +0+0 trash9.png +convert -background white -font Arial -pointsize 50 -size ${width_figure}x -gravity center pango:"Blocking" trash1.png +montage trash1.png JRA-55_Winter_blocking_freq.png -tile 1x2 -geometry +0+0 trash2.png +convert -background white -font Arial -pointsize 50 -size ${width_figure}x -gravity center pango:"Atlantic Ridge" trash3.png +montage trash3.png JRA-55_Winter_atlantic_freq.png -tile 1x2 -geometry +0+0 trash4.png +montage trash2.png trash4.png -tile 2x1 -geometry +0+0 trash5.png +convert -background white -font Arial -pointsize 1 -size ${width_figure}x5 -gravity center pango:"" trash8.png +montage trash5.png trash8.png -tile 1x2 -geometry +0+0 trash10.png +montage trash9.png trash10.png -tile 1x2 -geometry +0+0 ./2x2/JRA-55_Winter_freq.png + +convert -background white -font Arial -pointsize 50 -size ${width_figure}x -gravity center pango:"NAO+" trash1.png +montage trash1.png JRA-55_Spring_NAO+_freq.png -tile 1x2 -geometry +0+0 trash2.png +convert -background white -font Arial -pointsize 50 -size ${width_figure}x -gravity center pango:"NAO-" trash3.png +montage trash3.png JRA-55_Spring_NAO-_freq.png -tile 1x2 -geometry +0+0 trash4.png +montage trash2.png trash4.png -tile 2x1 -geometry +0+0 trash5.png +convert -background white -font Arial -pointsize 1 -size ${width_figure}x30 -gravity center pango:"" trash6.png +montage trash6.png trash5.png -tile 1x2 -geometry +0+0 trash7.png +convert -background white -font Arial -pointsize 1 -size ${width_figure}x30 -gravity center pango:"" trash8.png +montage trash7.png trash8.png -tile 1x2 -geometry +0+0 trash9.png +convert -background white -font Arial -pointsize 50 -size ${width_figure}x -gravity center pango:"Blocking" trash1.png +montage trash1.png JRA-55_Spring_blocking_freq.png -tile 1x2 -geometry +0+0 trash2.png +convert -background white -font Arial -pointsize 50 -size ${width_figure}x -gravity center pango:"Atlantic Ridge" trash3.png +montage trash3.png JRA-55_Spring_atlantic_freq.png -tile 1x2 -geometry +0+0 trash4.png +montage trash2.png trash4.png -tile 2x1 -geometry +0+0 trash5.png +convert -background white -font Arial -pointsize 1 -size ${width_figure}x5 -gravity center pango:"" trash8.png +montage trash5.png trash8.png -tile 1x2 -geometry +0+0 trash10.png +montage trash9.png trash10.png -tile 1x2 -geometry +0+0 ./2x2/JRA-55_Spring_freq.png + +convert -background white -font Arial -pointsize 50 -size ${width_figure}x -gravity center pango:"NAO+" trash1.png +montage trash1.png JRA-55_Summer_NAO+_freq.png -tile 1x2 -geometry +0+0 trash2.png +convert -background white -font Arial -pointsize 50 -size ${width_figure}x -gravity center pango:"NAO-" trash3.png +montage trash3.png JRA-55_Summer_NAO-_freq.png -tile 1x2 -geometry +0+0 trash4.png +montage trash2.png trash4.png -tile 2x1 -geometry +0+0 trash5.png +convert -background white -font Arial -pointsize 1 -size ${width_figure}x30 -gravity center pango:"" trash6.png +montage trash6.png trash5.png -tile 1x2 -geometry +0+0 trash7.png +convert -background white -font Arial -pointsize 1 -size ${width_figure}x30 -gravity center pango:"" trash8.png +montage trash7.png trash8.png -tile 1x2 -geometry +0+0 trash9.png +convert -background white -font Arial -pointsize 50 -size ${width_figure}x -gravity center pango:"Blocking" trash1.png +montage trash1.png JRA-55_Summer_blocking_freq.png -tile 1x2 -geometry +0+0 trash2.png +convert -background white -font Arial -pointsize 50 -size ${width_figure}x -gravity center pango:"Atlantic Ridge" trash3.png +montage trash3.png JRA-55_Summer_atlantic_freq.png -tile 1x2 -geometry +0+0 trash4.png +montage trash2.png trash4.png -tile 2x1 -geometry +0+0 trash5.png +convert -background white -font Arial -pointsize 1 -size ${width_figure}x5 -gravity center pango:"" trash8.png +montage trash5.png trash8.png -tile 1x2 -geometry +0+0 trash10.png +montage trash9.png trash10.png -tile 1x2 -geometry +0+0 ./2x2/JRA-55_Summer_freq.png + +convert -background white -font Arial -pointsize 50 -size ${width_figure}x -gravity center pango:"NAO+" trash1.png +montage trash1.png JRA-55_Autumn_NAO+_freq.png -tile 1x2 -geometry +0+0 trash2.png +convert -background white -font Arial -pointsize 50 -size ${width_figure}x -gravity center pango:"NAO-" trash3.png +montage trash3.png JRA-55_Autumn_NAO-_freq.png -tile 1x2 -geometry +0+0 trash4.png +montage trash2.png trash4.png -tile 2x1 -geometry +0+0 trash5.png +convert -background white -font Arial -pointsize 1 -size ${width_figure}x30 -gravity center pango:"" trash6.png +montage trash6.png trash5.png -tile 1x2 -geometry +0+0 trash7.png +convert -background white -font Arial -pointsize 1 -size ${width_figure}x30 -gravity center pango:"" trash8.png +montage trash7.png trash8.png -tile 1x2 -geometry +0+0 trash9.png +convert -background white -font Arial -pointsize 50 -size ${width_figure}x -gravity center pango:"Blocking" trash1.png +montage trash1.png JRA-55_Autumn_blocking_freq.png -tile 1x2 -geometry +0+0 trash2.png +convert -background white -font Arial -pointsize 50 -size ${width_figure}x -gravity center pango:"Atlantic Ridge" trash3.png +montage trash3.png JRA-55_Autumn_atlantic_freq.png -tile 1x2 -geometry +0+0 trash4.png +montage trash2.png trash4.png -tile 2x1 -geometry +0+0 trash5.png +convert -background white -font Arial -pointsize 1 -size ${width_figure}x5 -gravity center pango:"" trash8.png +montage trash5.png trash8.png -tile 1x2 -geometry +0+0 trash10.png +montage trash9.png trash10.png -tile 1x2 -geometry +0+0 ./2x2/JRA-55_Autumn_freq.png + + +# regime impact on sfcWind: + +width_figure=$(identify -ping -format %w JRA-55_Winter_sfcWind_NAO+_impact.png) + +convert -background white -font Arial -pointsize 50 -size ${width_figure}x -gravity center pango:"NAO+" trash1.png +montage trash1.png JRA-55_Winter_sfcWind_NAO+_impact.png -tile 1x2 -geometry +0+0 trash2.png +convert -background white -font Arial -pointsize 50 -size ${width_figure}x -gravity center pango:"NAO-" trash3.png +montage trash3.png JRA-55_Winter_sfcWind_NAO-_impact.png -tile 1x2 -geometry +0+0 trash4.png +montage trash2.png trash4.png -tile 2x1 -geometry +0+0 trash5.png +convert -background white -font Arial -pointsize 1 -size ${width_figure}x30 -gravity center pango:"" trash6.png +montage trash6.png trash5.png -tile 1x2 -geometry +0+0 trash7.png +convert -background white -font Arial -pointsize 1 -size ${width_figure}x30 -gravity center pango:"" trash8.png +montage trash7.png trash8.png -tile 1x2 -geometry +0+0 trash9.png +convert -background white -font Arial -pointsize 50 -size ${width_figure}x -gravity center pango:"Blocking" trash1.png +montage trash1.png JRA-55_Winter_sfcWind_blocking_impact.png -tile 1x2 -geometry +0+0 trash2.png +convert -background white -font Arial -pointsize 50 -size ${width_figure}x -gravity center pango:"Atlantic Ridge" trash3.png +montage trash3.png JRA-55_Winter_sfcWind_atlantic_impact.png -tile 1x2 -geometry +0+0 trash4.png +montage trash2.png trash4.png -tile 2x1 -geometry +0+0 trash5.png +convert -background white -font Arial -pointsize 1 -size ${width_figure}x5 -gravity center pango:"" trash8.png +montage trash5.png trash8.png -tile 1x2 -geometry +0+0 trash10.png +montage trash9.png trash10.png -tile 1x2 -geometry +0+0 ./2x2/JRA-55_Winter_sfcWind_impact.png + +convert -background white -font Arial -pointsize 50 -size ${width_figure}x -gravity center pango:"NAO+" trash1.png +montage trash1.png JRA-55_Spring_sfcWind_NAO+_impact.png -tile 1x2 -geometry +0+0 trash2.png +convert -background white -font Arial -pointsize 50 -size ${width_figure}x -gravity center pango:"NAO-" trash3.png +montage trash3.png JRA-55_Spring_sfcWind_NAO-_impact.png -tile 1x2 -geometry +0+0 trash4.png +montage trash2.png trash4.png -tile 2x1 -geometry +0+0 trash5.png +convert -background white -font Arial -pointsize 1 -size ${width_figure}x30 -gravity center pango:"" trash6.png +montage trash6.png trash5.png -tile 1x2 -geometry +0+0 trash7.png +convert -background white -font Arial -pointsize 1 -size ${width_figure}x30 -gravity center pango:"" trash8.png +montage trash7.png trash8.png -tile 1x2 -geometry +0+0 trash9.png +convert -background white -font Arial -pointsize 50 -size ${width_figure}x -gravity center pango:"Blocking" trash1.png +montage trash1.png JRA-55_Spring_sfcWind_blocking_impact.png -tile 1x2 -geometry +0+0 trash2.png +convert -background white -font Arial -pointsize 50 -size ${width_figure}x -gravity center pango:"Atlantic Ridge" trash3.png +montage trash3.png JRA-55_Spring_sfcWind_atlantic_impact.png -tile 1x2 -geometry +0+0 trash4.png +montage trash2.png trash4.png -tile 2x1 -geometry +0+0 trash5.png +convert -background white -font Arial -pointsize 1 -size ${width_figure}x5 -gravity center pango:"" trash8.png +montage trash5.png trash8.png -tile 1x2 -geometry +0+0 trash10.png +montage trash9.png trash10.png -tile 1x2 -geometry +0+0 ./2x2/JRA-55_Spring_sfcWind_impact.png + +convert -background white -font Arial -pointsize 50 -size ${width_figure}x -gravity center pango:"NAO+" trash1.png +montage trash1.png JRA-55_Summer_sfcWind_NAO+_impact.png -tile 1x2 -geometry +0+0 trash2.png +convert -background white -font Arial -pointsize 50 -size ${width_figure}x -gravity center pango:"NAO-" trash3.png +montage trash3.png JRA-55_Summer_sfcWind_NAO-_impact.png -tile 1x2 -geometry +0+0 trash4.png +montage trash2.png trash4.png -tile 2x1 -geometry +0+0 trash5.png +convert -background white -font Arial -pointsize 1 -size ${width_figure}x30 -gravity center pango:"" trash6.png +montage trash6.png trash5.png -tile 1x2 -geometry +0+0 trash7.png +convert -background white -font Arial -pointsize 1 -size ${width_figure}x30 -gravity center pango:"" trash8.png +montage trash7.png trash8.png -tile 1x2 -geometry +0+0 trash9.png +convert -background white -font Arial -pointsize 50 -size ${width_figure}x -gravity center pango:"Blocking" trash1.png +montage trash1.png JRA-55_Summer_sfcWind_blocking_impact.png -tile 1x2 -geometry +0+0 trash2.png +convert -background white -font Arial -pointsize 50 -size ${width_figure}x -gravity center pango:"Atlantic Ridge" trash3.png +montage trash3.png JRA-55_Summer_sfcWind_atlantic_impact.png -tile 1x2 -geometry +0+0 trash4.png +montage trash2.png trash4.png -tile 2x1 -geometry +0+0 trash5.png +convert -background white -font Arial -pointsize 1 -size ${width_figure}x5 -gravity center pango:"" trash8.png +montage trash5.png trash8.png -tile 1x2 -geometry +0+0 trash10.png +montage trash9.png trash10.png -tile 1x2 -geometry +0+0 ./2x2/JRA-55_Summer_sfcWind_impact.png + +convert -background white -font Arial -pointsize 50 -size ${width_figure}x -gravity center pango:"NAO+" trash1.png +montage trash1.png JRA-55_Autumn_sfcWind_NAO+_impact.png -tile 1x2 -geometry +0+0 trash2.png +convert -background white -font Arial -pointsize 50 -size ${width_figure}x -gravity center pango:"NAO-" trash3.png +montage trash3.png JRA-55_Autumn_sfcWind_NAO-_impact.png -tile 1x2 -geometry +0+0 trash4.png +montage trash2.png trash4.png -tile 2x1 -geometry +0+0 trash5.png +convert -background white -font Arial -pointsize 1 -size ${width_figure}x30 -gravity center pango:"" trash6.png +montage trash6.png trash5.png -tile 1x2 -geometry +0+0 trash7.png +convert -background white -font Arial -pointsize 1 -size ${width_figure}x30 -gravity center pango:"" trash8.png +montage trash7.png trash8.png -tile 1x2 -geometry +0+0 trash9.png +convert -background white -font Arial -pointsize 50 -size ${width_figure}x -gravity center pango:"Blocking" trash1.png +montage trash1.png JRA-55_Autumn_sfcWind_blocking_impact.png -tile 1x2 -geometry +0+0 trash2.png +convert -background white -font Arial -pointsize 50 -size ${width_figure}x -gravity center pango:"Atlantic Ridge" trash3.png +montage trash3.png JRA-55_Autumn_sfcWind_atlantic_impact.png -tile 1x2 -geometry +0+0 trash4.png +montage trash2.png trash4.png -tile 2x1 -geometry +0+0 trash5.png +convert -background white -font Arial -pointsize 1 -size ${width_figure}x5 -gravity center pango:"" trash8.png +montage trash5.png trash8.png -tile 1x2 -geometry +0+0 trash10.png +montage trash9.png trash10.png -tile 1x2 -geometry +0+0 ./2x2/JRA-55_Autumn_sfcWind_impact.png + + +# regime impact on temperature: + +width_figure=$(identify -ping -format %w JRA-55_Winter_tas_NAO+_impact.png) + +convert -background white -font Arial -pointsize 50 -size ${width_figure}x -gravity center pango:"NAO+" trash1.png +montage trash1.png JRA-55_Winter_tas_NAO+_impact.png -tile 1x2 -geometry +0+0 trash2.png +convert -background white -font Arial -pointsize 50 -size ${width_figure}x -gravity center pango:"NAO-" trash3.png +montage trash3.png JRA-55_Winter_tas_NAO-_impact.png -tile 1x2 -geometry +0+0 trash4.png +montage trash2.png trash4.png -tile 2x1 -geometry +0+0 trash5.png +convert -background white -font Arial -pointsize 1 -size ${width_figure}x30 -gravity center pango:"" trash6.png +montage trash6.png trash5.png -tile 1x2 -geometry +0+0 trash7.png +convert -background white -font Arial -pointsize 1 -size ${width_figure}x30 -gravity center pango:"" trash8.png +montage trash7.png trash8.png -tile 1x2 -geometry +0+0 trash9.png +convert -background white -font Arial -pointsize 50 -size ${width_figure}x -gravity center pango:"Blocking" trash1.png +montage trash1.png JRA-55_Winter_tas_blocking_impact.png -tile 1x2 -geometry +0+0 trash2.png +convert -background white -font Arial -pointsize 50 -size ${width_figure}x -gravity center pango:"Atlantic Ridge" trash3.png +montage trash3.png JRA-55_Winter_tas_atlantic_impact.png -tile 1x2 -geometry +0+0 trash4.png +montage trash2.png trash4.png -tile 2x1 -geometry +0+0 trash5.png +convert -background white -font Arial -pointsize 1 -size ${width_figure}x5 -gravity center pango:"" trash8.png +montage trash5.png trash8.png -tile 1x2 -geometry +0+0 trash10.png +montage trash9.png trash10.png -tile 1x2 -geometry +0+0 ./2x2/JRA-55_Winter_tas_impact.png + +convert -background white -font Arial -pointsize 50 -size ${width_figure}x -gravity center pango:"NAO+" trash1.png +montage trash1.png JRA-55_Spring_tas_NAO+_impact.png -tile 1x2 -geometry +0+0 trash2.png +convert -background white -font Arial -pointsize 50 -size ${width_figure}x -gravity center pango:"NAO-" trash3.png +montage trash3.png JRA-55_Spring_tas_NAO-_impact.png -tile 1x2 -geometry +0+0 trash4.png +montage trash2.png trash4.png -tile 2x1 -geometry +0+0 trash5.png +convert -background white -font Arial -pointsize 1 -size ${width_figure}x30 -gravity center pango:"" trash6.png +montage trash6.png trash5.png -tile 1x2 -geometry +0+0 trash7.png +convert -background white -font Arial -pointsize 1 -size ${width_figure}x30 -gravity center pango:"" trash8.png +montage trash7.png trash8.png -tile 1x2 -geometry +0+0 trash9.png +convert -background white -font Arial -pointsize 50 -size ${width_figure}x -gravity center pango:"Blocking" trash1.png +montage trash1.png JRA-55_Spring_tas_blocking_impact.png -tile 1x2 -geometry +0+0 trash2.png +convert -background white -font Arial -pointsize 50 -size ${width_figure}x -gravity center pango:"Atlantic Ridge" trash3.png +montage trash3.png JRA-55_Spring_tas_atlantic_impact.png -tile 1x2 -geometry +0+0 trash4.png +montage trash2.png trash4.png -tile 2x1 -geometry +0+0 trash5.png +convert -background white -font Arial -pointsize 1 -size ${width_figure}x5 -gravity center pango:"" trash8.png +montage trash5.png trash8.png -tile 1x2 -geometry +0+0 trash10.png +montage trash9.png trash10.png -tile 1x2 -geometry +0+0 ./2x2/JRA-55_Spring_tas_impact.png + +convert -background white -font Arial -pointsize 50 -size ${width_figure}x -gravity center pango:"NAO+" trash1.png +montage trash1.png JRA-55_Summer_tas_NAO+_impact.png -tile 1x2 -geometry +0+0 trash2.png +convert -background white -font Arial -pointsize 50 -size ${width_figure}x -gravity center pango:"NAO-" trash3.png +montage trash3.png JRA-55_Summer_tas_NAO-_impact.png -tile 1x2 -geometry +0+0 trash4.png +montage trash2.png trash4.png -tile 2x1 -geometry +0+0 trash5.png +convert -background white -font Arial -pointsize 1 -size ${width_figure}x30 -gravity center pango:"" trash6.png +montage trash6.png trash5.png -tile 1x2 -geometry +0+0 trash7.png +convert -background white -font Arial -pointsize 1 -size ${width_figure}x30 -gravity center pango:"" trash8.png +montage trash7.png trash8.png -tile 1x2 -geometry +0+0 trash9.png +convert -background white -font Arial -pointsize 50 -size ${width_figure}x -gravity center pango:"Blocking" trash1.png +montage trash1.png JRA-55_Summer_tas_blocking_impact.png -tile 1x2 -geometry +0+0 trash2.png +convert -background white -font Arial -pointsize 50 -size ${width_figure}x -gravity center pango:"Atlantic Ridge" trash3.png +montage trash3.png JRA-55_Summer_tas_atlantic_impact.png -tile 1x2 -geometry +0+0 trash4.png +montage trash2.png trash4.png -tile 2x1 -geometry +0+0 trash5.png +convert -background white -font Arial -pointsize 1 -size ${width_figure}x5 -gravity center pango:"" trash8.png +montage trash5.png trash8.png -tile 1x2 -geometry +0+0 trash10.png +montage trash9.png trash10.png -tile 1x2 -geometry +0+0 ./2x2/JRA-55_Summer_tas_impact.png + +convert -background white -font Arial -pointsize 50 -size ${width_figure}x -gravity center pango:"NAO+" trash1.png +montage trash1.png JRA-55_Autumn_tas_NAO+_impact.png -tile 1x2 -geometry +0+0 trash2.png +convert -background white -font Arial -pointsize 50 -size ${width_figure}x -gravity center pango:"NAO-" trash3.png +montage trash3.png JRA-55_Autumn_tas_NAO-_impact.png -tile 1x2 -geometry +0+0 trash4.png +montage trash2.png trash4.png -tile 2x1 -geometry +0+0 trash5.png +convert -background white -font Arial -pointsize 1 -size ${width_figure}x30 -gravity center pango:"" trash6.png +montage trash6.png trash5.png -tile 1x2 -geometry +0+0 trash7.png +convert -background white -font Arial -pointsize 1 -size ${width_figure}x30 -gravity center pango:"" trash8.png +montage trash7.png trash8.png -tile 1x2 -geometry +0+0 trash9.png +convert -background white -font Arial -pointsize 50 -size ${width_figure}x -gravity center pango:"Blocking" trash1.png +montage trash1.png JRA-55_Autumn_tas_blocking_impact.png -tile 1x2 -geometry +0+0 trash2.png +convert -background white -font Arial -pointsize 50 -size ${width_figure}x -gravity center pango:"Atlantic Ridge" trash3.png +montage trash3.png JRA-55_Autumn_tas_atlantic_impact.png -tile 1x2 -geometry +0+0 trash4.png +montage trash2.png trash4.png -tile 2x1 -geometry +0+0 trash5.png +convert -background white -font Arial -pointsize 1 -size ${width_figure}x5 -gravity center pango:"" trash8.png +montage trash5.png trash8.png -tile 1x2 -geometry +0+0 trash10.png +montage trash9.png trash10.png -tile 1x2 -geometry +0+0 ./2x2/JRA-55_Autumn_tas_impact.png + +########## +# titles # +########## + +sh ~/scripts/fig2catalog.sh -t 'ERA-Interim / sea level pressure / regime anomalies\nDJF / 1979-2013' -c 'Region: North Atlantic (27°N–81°N, 85.5°W–45°E) ' ./2x2/ERA-Interim_Winter_anomalies.png ./2x2_formatted/ERA-Interim_Winter_anomalies.png +sh ~/scripts/fig2catalog.sh -t 'ERA-Interim / sea level pressure / regime anomalies\nMAM / 1979-2013' -c 'Region: North Atlantic (27°N–81°N, 85.5°W–45°E) ' ./2x2/ERA-Interim_Spring_anomalies.png ./2x2_formatted/ERA-Interim_Spring_anomalies.png +sh ~/scripts/fig2catalog.sh -t 'ERA-Interim / sea level pressure / regime anomalies\nJJA / 1979-2013' -c 'Region: North Atlantic (27°N–81°N, 85.5°W–45°E) ' ./2x2/ERA-Interim_Summer_anomalies.png ./2x2_formatted/ERA-Interim_Summer_anomalies.png +sh ~/scripts/fig2catalog.sh -t 'ERA-Interim / sea level pressure / regime anomalies\nSON / 1979-2013' -c 'Region: North Atlantic (27°N–81°N, 85.5°W–45°E) ' ./2x2/ERA-Interim_Autumn_anomalies.png ./2x2_formatted/ERA-Interim_Autumn_anomalies.png + +sh ~/scripts/fig2catalog.sh -s 1.4 -t 'ERA-Interim / weather regimes frequency\nDJF / 1979-2013' -c 'Region: North Atlantic (27°N–81°N, 85.5°W–45°E) ' ./2x2/ERA-Interim_Winter_freq.png ./2x2_formatted/ERA-Interim_Winter_freq.png +sh ~/scripts/fig2catalog.sh -s 1.4 -t 'ERA-Interim / weather regimes frequency\nMAM / 1979-2013' -c 'Black Line: significant trend at 95% confidence level with Mann-Kendall test \nRegion: North Atlantic (27°N–81°N, 85.5°W–45°E) ' ./2x2/ERA-Interim_Spring_freq.png ./2x2_formatted/ERA-Interim_Spring_freq.png +sh ~/scripts/fig2catalog.sh -s 1.4 -t 'ERA-Interim / weather regimes frequency\nJJA / 1979-2013' -c 'Black Line: significant trend at 95% confidence level with Mann-Kendall test \nRegion: North Atlantic (27°N–81°N, 85.5°W–45°E) ' ./2x2/ERA-Interim_Summer_freq.png ./2x2_formatted/ERA-Interim_Summer_freq.png +sh ~/scripts/fig2catalog.sh -s 1.4 -t 'ERA-Interim / weather regimes frequency\nSON / 1979-2013' -c 'Region: North Atlantic (27°N–81°N, 85.5°W–45°E) ' ./2x2/ERA-Interim_Autumn_freq.png ./2x2_formatted/ERA-Interim_Autumn_freq.png + +sh ~/scripts/fig2catalog.sh -s 1.4 -t 'ERA-Interim / 10m wind speed / regime impact\nDJF / 1979-2013' -c 'Region: North Atlantic (27°N–81°N, 85.5°W–45°E) \nHatched areas: significant at 95% confidence level from a two-tailed t-test' ./2x2/ERA-Interim_Winter_sfcWind_impact.png ./2x2_formatted/ERA-Interim_Winter_sfcWind_impact.png +sh ~/scripts/fig2catalog.sh -s 1.4 -t 'ERA-Interim / 10m wind speed / regime impact\nMAM / 1979-2013' -c 'Region: North Atlantic (27°N–81°N, 85.5°W–45°E) \nHatched areas: significant at 95% confidence level from a two-tailed t-test' ./2x2/ERA-Interim_Spring_sfcWind_impact.png ./2x2_formatted/ERA-Interim_Spring_sfcWind_impact.png +sh ~/scripts/fig2catalog.sh -s 1.4 -t 'ERA-Interim / 10m wind speed / regime impact\nJJA / 1979-2013' -c 'Region: North Atlantic (27°N–81°N, 85.5°W–45°E) \nHatched areas: significant at 95% confidence level from a two-tailed t-test' ./2x2/ERA-Interim_Summer_sfcWind_impact.png ./2x2_formatted/ERA-Interim_Summer_sfcWind_impact.png +sh ~/scripts/fig2catalog.sh -s 1.4 -t 'ERA-Interim / 10m wind speed / regime impact\nSON / 1979-2013' -c 'Region: North Atlantic (27°N–81°N, 85.5°W–45°E) \nHatched areas: significant at 95% confidence level from a two-tailed t-test' ./2x2/ERA-Interim_Autumn_sfcWind_impact.png ./2x2_formatted/ERA-Interim_Autumn_sfcWind_impact.png + +sh ~/scripts/fig2catalog.sh -s 1.4 -t 'ERA-Interim / 2m temperature / regime impact\nDJF / 1979-2013' -c 'Region: North Atlantic (27°N–81°N, 85.5°W–45°E) \nHatched areas: significant at 95% confidence level from a two-tailed t-test' ./2x2/ERA-Interim_Winter_tas_impact.png ./2x2_formatted/ERA-Interim_Winter_tas_impact.png +sh ~/scripts/fig2catalog.sh -s 1.4 -t 'ERA-Interim / 2m temperature / regime impact\nMAM / 1979-2013' -c 'Region: North Atlantic (27°N–81°N, 85.5°W–45°E) \nHatched areas: significant at 95% confidence level from a two-tailed t-test' ./2x2/ERA-Interim_Spring_tas_impact.png ./2x2_formatted/ERA-Interim_Spring_tas_impact.png +sh ~/scripts/fig2catalog.sh -s 1.4 -t 'ERA-Interim / 2m temperature / regime impact\nJJA / 1979-2013' -c 'Region: North Atlantic (27°N–81°N, 85.5°W–45°E) \nHatched areas: significant at 95% confidence level from a two-tailed t-test' ./2x2/ERA-Interim_Summer_tas_impact.png ./2x2_formatted/ERA-Interim_Summer_tas_impact.png +sh ~/scripts/fig2catalog.sh -s 1.4 -t 'ERA-Interim / 2m temperature / regime impact\nSON / 1979-2013' -c 'Region: North Atlantic (27°N–81°N, 85.5°W–45°E) \nHatched areas: significant at 95% confidence level from a two-tailed t-test' ./2x2/ERA-Interim_Autumn_tas_impact.png ./2x2_formatted/ERA-Interim_Autumn_tas_impact.png + +# JRA-55: + +sh ~/scripts/fig2catalog.sh -t 'JRA-55 / sea level pressure / regime anomalies\nDJF / 1979-2013' -c 'Region: North Atlantic (27°N–81°N, 85.5°W–45°E) ' ./2x2/JRA-55_Winter_anomalies.png ./2x2_formatted/JRA-55_Winter_anomalies.png +sh ~/scripts/fig2catalog.sh -t 'JRA-55 / sea level pressure / regime anomalies\nMAM / 1979-2013' -c 'Region: North Atlantic (27°N–81°N, 85.5°W–45°E) ' ./2x2/JRA-55_Spring_anomalies.png ./2x2_formatted/JRA-55_Spring_anomalies.png +sh ~/scripts/fig2catalog.sh -t 'JRA-55 / sea level pressure / regime anomalies\nJJA / 1979-2013' -c 'Region: North Atlantic (27°N–81°N, 85.5°W–45°E) ' ./2x2/JRA-55_Summer_anomalies.png ./2x2_formatted/JRA-55_Summer_anomalies.png +sh ~/scripts/fig2catalog.sh -t 'JRA-55 / sea level pressure / regime anomalies\nSON / 1979-2013' -c 'Region: North Atlantic (27°N–81°N, 85.5°W–45°E) ' ./2x2/JRA-55_Autumn_anomalies.png ./2x2_formatted/JRA-55_Autumn_anomalies.png + +sh ~/scripts/fig2catalog.sh -s 1.4 -t 'JRA-55 / weather regimes frequency\nDJF / 1979-2013' -c 'Region: North Atlantic (27°N–81°N, 85.5°W–45°E) ' ./2x2/JRA-55_Winter_freq.png ./2x2_formatted/JRA-55_Winter_freq.png +sh ~/scripts/fig2catalog.sh -s 1.4 -t 'JRA-55 / weather regimes frequency\nMAM / 1979-2013' -c 'Region: North Atlantic (27°N–81°N, 85.5°W–45°E) ' ./2x2/JRA-55_Spring_freq.png ./2x2_formatted/JRA-55_Spring_freq.png +sh ~/scripts/fig2catalog.sh -s 1.4 -t 'JRA-55 / weather regimes frequency\nJJA / 1979-2013' -c 'Region: North Atlantic (27°N–81°N, 85.5°W–45°E) ' ./2x2/JRA-55_Summer_freq.png ./2x2_formatted/JRA-55_Summer_freq.png +sh ~/scripts/fig2catalog.sh -s 1.4 -t 'JRA-55 / weather regimes frequency\nSON / 1979-2013' -c 'Region: North Atlantic (27°N–81°N, 85.5°W–45°E) ' ./2x2/JRA-55_Autumn_freq.png ./2x2_formatted/JRA-55_Autumn_freq.png + +sh ~/scripts/fig2catalog.sh -s 1.4 -t 'JRA-55 / 10m wind speed / regime impact\nDJF / 1979-2013' -c 'Region: North Atlantic (27°N–81°N, 85.5°W–45°E) \nHatched areas: significant at 95% confidence level from a two-tailed t-test' ./2x2/JRA-55_Winter_sfcWind_impact.png ./2x2_formatted/JRA-55_Winter_sfcWind_impact.png +sh ~/scripts/fig2catalog.sh -s 1.4 -t 'JRA-55 / 10m wind speed / regime impact\nMAM / 1979-2013' -c 'Region: North Atlantic (27°N–81°N, 85.5°W–45°E) \nHatched areas: significant at 95% confidence level from a two-tailed t-test' ./2x2/JRA-55_Spring_sfcWind_impact.png ./2x2_formatted/JRA-55_Spring_sfcWind_impact.png +sh ~/scripts/fig2catalog.sh -s 1.4 -t 'JRA-55 / 10m wind speed / regime impact\nJJA / 1979-2013' -c 'Black Line: significant trend at 95% confidence level with Mann-Kendall test \nRegion: North Atlantic (27°N–81°N, 85.5°W–45°E) \nHatched areas: significant at 95% confidence level from a two-tailed t-test' ./2x2/JRA-55_Summer_sfcWind_impact.png ./2x2_formatted/JRA-55_Summer_sfcWind_impact.png +sh ~/scripts/fig2catalog.sh -s 1.4 -t 'JRA-55 / 10m wind speed / regime impact\nSON / 1979-2013' -c 'Region: North Atlantic (27°N–81°N, 85.5°W–45°E) \nHatched areas: significant at 95% confidence level from a two-tailed t-test' ./2x2/JRA-55_Autumn_sfcWind_impact.png ./2x2_formatted/JRA-55_Autumn_sfcWind_impact.png + +sh ~/scripts/fig2catalog.sh -s 1.4 -t 'JRA-55 / 2m temperature / regime impact\nDJF / 1979-2013' -c 'Region: North Atlantic (27°N–81°N, 85.5°W–45°E) \nHatched areas: significant at 95% confidence level from a two-tailed t-test' ./2x2/JRA-55_Winter_tas_impact.png ./2x2_formatted/JRA-55_Winter_tas_impact.png +sh ~/scripts/fig2catalog.sh -s 1.4 -t 'JRA-55 / 2m temperature / regime impact\nMAM / 1979-2013' -c 'Region: North Atlantic (27°N–81°N, 85.5°W–45°E) \nHatched areas: significant at 95% confidence level from a two-tailed t-test' ./2x2/JRA-55_Spring_tas_impact.png ./2x2_formatted/JRA-55_Spring_tas_impact.png +sh ~/scripts/fig2catalog.sh -s 1.4 -t 'JRA-55 / 2m temperature / regime impact\nJJA / 1979-2013' -c 'Region: North Atlantic (27°N–81°N, 85.5°W–45°E) \nHatched areas: significant at 95% confidence level from a two-tailed t-test' ./2x2/JRA-55_Summer_tas_impact.png ./2x2_formatted/JRA-55_Summer_tas_impact.png +sh ~/scripts/fig2catalog.sh -s 1.4 -t 'JRA-55 / 2m temperature / regime impact\nSON / 1979-2013' -c 'Region: North Atlantic (27°N–81°N, 85.5°W–45°E) \nHatched areas: significant at 95% confidence level from a two-tailed t-test' ./2x2/JRA-55_Autumn_tas_impact.png ./2x2_formatted/JRA-55_Autumn_tas_impact.png + + +rm trash*.* + + + + + + + +############################################################################### +# Validation weather regimes with S4 # +############################################################################### + +# regime anomalies: +sh ~/scripts/fig2catalog.sh -t 'ECMWF-S4 / sea level pressure / regime anomalies\nJanuary / 1981-2015' -c 'Start date: 1st of every month from July to January \nRegion: North Atlantic (27°N–81°N, 85.5°W–45°E) \nReference dataset: ERA-Interim (last column on the right)' ECMWF-S4_forecast_month_1_psl.png ./formatted/ECMWF-S4_forecast_month_1_psl.png +sh ~/scripts/fig2catalog.sh -t 'ECMWF-S4 / sea level pressure / regime anomalies\nFebruary / 1981-2015' -c 'Start date: 1st of every month from August to February \nRegion: North Atlantic (27°N–81°N, 85.5°W–45°E) \nReference dataset: ERA-Interim (last column on the right)' ECMWF-S4_forecast_month_2_psl.png ./formatted/ECMWF-S4_forecast_month_2_psl.png +sh ~/scripts/fig2catalog.sh -t 'ECMWF-S4 / sea level pressure / regime anomalies\nMarch / 1981-2015' -c 'Start date: 1st of every month from September to March \nRegion: North Atlantic (27°N–81°N, 85.5°W–45°E) \nReference dataset: ERA-Interim (last column on the right)' ECMWF-S4_forecast_month_3_psl.png ./formatted/ECMWF-S4_forecast_month_3_psl.png +sh ~/scripts/fig2catalog.sh -t 'ECMWF-S4 / sea level pressure / regime anomalies\nApril / 1981-2015' -c 'Start date: 1st of every month from October to April \nRegion: North Atlantic (27°N–81°N, 85.5°W–45°E) \nReference dataset: ERA-Interim (last column on the right)' ECMWF-S4_forecast_month_4_psl.png ./formatted/ECMWF-S4_forecast_month_4_psl.png +sh ~/scripts/fig2catalog.sh -t 'ECMWF-S4 / sea level pressure / regime anomalies\nMay / 1981-2015' -c 'Start date: 1st of every month from November to May \nRegion: North Atlantic (27°N–81°N, 85.5°W–45°E) \nReference dataset: ERA-Interim (last column on the right)' ECMWF-S4_forecast_month_5_psl.png ./formatted/ECMWF-S4_forecast_month_5_psl.png +sh ~/scripts/fig2catalog.sh -t 'ECMWF-S4 / sea level pressure / regime anomalies\nJune / 1981-2015' -c 'Start date: 1st of every month from December to June \nRegion: North Atlantic (27°N–81°N, 85.5°W–45°E) \nReference dataset: ERA-Interim (last column on the right)' ECMWF-S4_forecast_month_6_psl.png ./formatted/ECMWF-S4_forecast_month_6_psl.png +sh ~/scripts/fig2catalog.sh -t 'ECMWF-S4 / sea level pressure / regime anomalies\nJuly / 1981-2015' -c 'Start date: 1st of every month from January to July \nRegion: North Atlantic (27°N–81°N, 85.5°W–45°E) \nReference dataset: ERA-Interim (last column on the right)' ECMWF-S4_forecast_month_7_psl.png ./formatted/ECMWF-S4_forecast_month_7_psl.png +sh ~/scripts/fig2catalog.sh -t 'ECMWF-S4 / sea level pressure / regime anomalies\nAugust / 1981-2015' -c 'Start date: 1st of every month from February to August \nRegion: North Atlantic (27°N–81°N, 85.5°W–45°E) \nReference dataset: ERA-Interim (last column on the right)' ECMWF-S4_forecast_month_8_psl.png ./formatted/ECMWF-S4_forecast_month_8_psl.png +sh ~/scripts/fig2catalog.sh -t 'ECMWF-S4 / sea level pressure / regime anomalies\nSeptember / 1981-2015' -c 'Start date: 1st of every month from March to September \nRegion: North Atlantic (27°N–81°N, 85.5°W–45°E) \nReference dataset: ERA-Interim (last column on the right)' ECMWF-S4_forecast_month_9_psl.png ./formatted/ECMWF-S4_forecast_month_9_psl.png +sh ~/scripts/fig2catalog.sh -t 'ECMWF-S4 / sea level pressure / regime anomalies\nOctober / 1981-2015' -c 'Start date: 1st of every month from April to October \nRegion: North Atlantic (27°N–81°N, 85.5°W–45°E) \nReference dataset: ERA-Interim (last column on the right)' ECMWF-S4_forecast_month_10_psl.png ./formatted/ECMWF-S4_forecast_month_10_psl.png +sh ~/scripts/fig2catalog.sh -t 'ECMWF-S4 / sea level pressure / regime anomalies\nNovember / 1981-2015' -c 'Start date: 1st of every month from May to November \nRegion: North Atlantic (27°N–81°N, 85.5°W–45°E) \nReference dataset: ERA-Interim (last column on the right)' ECMWF-S4_forecast_month_11_psl.png ./formatted/ECMWF-S4_forecast_month_11_psl.png +sh ~/scripts/fig2catalog.sh -t 'ECMWF-S4 / sea level pressure / regime anomalies\nDecember / 1981-2015' -c 'Start date: 1st of every month from June to December \nRegion: North Atlantic (27°N–81°N, 85.5°W–45°E) \nReference dataset: ERA-Interim (last column on the right)' ECMWF-S4_forecast_month_12_psl.png ./formatted/ECMWF-S4_forecast_month_12_psl.png + +# regime frequency: +sh ~/scripts/fig2catalog.sh -t 'ECMWF-S4 / weather regimes frequency\nJanuary / 1981-2015' -c 'Start date: 1st of every month from July to January \nRegion: North Atlantic (27°N–81°N, 85.5°W–45°E) \nReference dataset: ERA-Interim (last column on the right)' ECMWF-S4_forecast_month_1_frequency.png ./formatted/ECMWF-S4_forecast_month_1_frequency.png +sh ~/scripts/fig2catalog.sh -t 'ECMWF-S4 / weather regimes frequency\nFebruary / 1981-2015' -c 'Start date: 1st of every month from August to February \nRegion: North Atlantic (27°N–81°N, 85.5°W–45°E) \nReference dataset: ERA-Interim (last column on the right)' ECMWF-S4_forecast_month_2_frequency.png ./formatted/ECMWF-S4_forecast_month_2_frequency.png +sh ~/scripts/fig2catalog.sh -t 'ECMWF-S4 / weather regimes frequency\nMarch / 1981-2015' -c 'Start date: 1st of every month from September to March \nRegion: North Atlantic (27°N–81°N, 85.5°W–45°E) \nReference dataset: ERA-Interim (last column on the right)' ECMWF-S4_forecast_month_3_frequency.png ./formatted/ECMWF-S4_forecast_month_3_frequency.png +sh ~/scripts/fig2catalog.sh -t 'ECMWF-S4 / weather regimes frequency\nApril / 1981-2015' -c 'Start date: 1st of every month from October to April \nRegion: North Atlantic (27°N–81°N, 85.5°W–45°E) \nReference dataset: ERA-Interim (last column on the right)' ECMWF-S4_forecast_month_4_frequency.png ./formatted/ECMWF-S4_forecast_month_4_frequency.png +sh ~/scripts/fig2catalog.sh -t 'ECMWF-S4 / weather regimes frequency\nMay / 1981-2015' -c 'Start date: 1st of every month from November to May \nRegion: North Atlantic (27°N–81°N, 85.5°W–45°E) \nReference dataset: ERA-Interim (last column on the right)' ECMWF-S4_forecast_month_5_frequency.png ./formatted/ECMWF-S4_forecast_month_5_frequency.png +sh ~/scripts/fig2catalog.sh -t 'ECMWF-S4 / weather regimes / monthly frequency\nJune / 1981-2015' -c 'Start date: 1st of every month from December to June \nRegion: North Atlantic (27°N–81°N, 85.5°W–45°E) \nReference dataset: ERA-Interim (last column on the right)' ECMWF-S4_forecast_month_6_frequency.png ./formatted/ECMWF-S4_forecast_month_6_frequency.png +sh ~/scripts/fig2catalog.sh -t 'ECMWF-S4 / weather regimes frequency\nJuly / 1981-2015' -c 'Start date: 1st of every month from January to July \nRegion: North Atlantic (27°N–81°N, 85.5°W–45°E) \nReference dataset: ERA-Interim (last column on the right)' ECMWF-S4_forecast_month_7_frequency.png ./formatted/ECMWF-S4_forecast_month_7_frequency.png +sh ~/scripts/fig2catalog.sh -t 'ECMWF-S4 / weather regimes frequency\nAugust / 1981-2015' -c 'Start date: 1st of every month from February to August \nRegion: North Atlantic (27°N–81°N, 85.5°W–45°E) \nReference dataset: ERA-Interim (last column on the right)' ECMWF-S4_forecast_month_8_frequency.png ./formatted/ECMWF-S4_forecast_month_8_frequency.png +sh ~/scripts/fig2catalog.sh -t 'ECMWF-S4 / weather regimes frequency\nSeptember / 1981-2015' -c 'Start date: 1st of every month from March to September \nRegion: North Atlantic (27°N–81°N, 85.5°W–45°E) \nReference dataset: ERA-Interim (last column on the right)' ECMWF-S4_forecast_month_9_frequency.png ./formatted/ECMWF-S4_forecast_month_9_frequency.png +sh ~/scripts/fig2catalog.sh -t 'ECMWF-S4 / weather regimes frequency\nOctober / 1981-2015' -c 'Start date: 1st of every month from April to October \nRegion: North Atlantic (27°N–81°N, 85.5°W–45°E) \nReference dataset: ERA-Interim (last column on the right)' ECMWF-S4_forecast_month_10_frequency.png ./formatted/ECMWF-S4_forecast_month_10_frequency.png +sh ~/scripts/fig2catalog.sh -t 'ECMWF-S4 / weather regimes frequency\nNovember / 1981-2015' -c 'Start date: 1st of every month from May to November \nRegion: North Atlantic (27°N–81°N, 85.5°W–45°E) \nReference dataset: ERA-Interim (last column on the right)' ECMWF-S4_forecast_month_11_frequency.png ./formatted/ECMWF-S4_forecast_month_11_frequency.png +sh ~/scripts/fig2catalog.sh -t 'ECMWF-S4 / weather regimes frequency\nDecember / 1981-2015' -c 'Start date: 1st of every month from June to December \nRegion: North Atlantic (27°N–81°N, 85.5°W–45°E) \nReference dataset: ERA-Interim (last column on the right)' ECMWF-S4_forecast_month_12_frequency.png ./formatted/ECMWF-S4_forecast_month_12_frequency.png + +# regime impact on sfcWind: +sh ~/scripts/fig2catalog.sh -t 'ECMWF-S4 / 10m wind speed / regime impact\nJanuary / 1981-2015' -c 'Start date: 1st of every month from July to January \nRegion: North Atlantic (27°N–81°N, 85.5°W–45°E) \nReference dataset: ERA-Interim (last column on the right)' ECMWF-S4_forecast_month_1_impact_sfcWind.png ./formatted/ECMWF-S4_forecast_month_1_impact_sfcWind.png +sh ~/scripts/fig2catalog.sh -t 'ECMWF-S4 / 10m wind speed / regime impact\nFebruary / 1981-2015' -c 'Start date: 1st of every month from August to February \nRegion: North Atlantic (27°N–81°N, 85.5°W–45°E) \nReference dataset: ERA-Interim (last column on the right)' ECMWF-S4_forecast_month_2_impact_sfcWind.png ./formatted/ECMWF-S4_forecast_month_2_impact_sfcWind.png +sh ~/scripts/fig2catalog.sh -t 'ECMWF-S4 / 10m wind speed / regime impact\nMarch / 1981-2015' -c 'Start date: 1st of every month from September to March \nRegion: North Atlantic (27°N–81°N, 85.5°W–45°E) \nReference dataset: ERA-Interim (last column on the right)' ECMWF-S4_forecast_month_3_impact_sfcWind.png ./formatted/ECMWF-S4_forecast_month_3_impact_sfcWind.png +sh ~/scripts/fig2catalog.sh -t 'ECMWF-S4 / 10m wind speed / regime impact\nApril / 1981-2015' -c 'Start date: 1st of every month from October to April \nRegion: North Atlantic (27°N–81°N, 85.5°W–45°E) \nReference dataset: ERA-Interim (last column on the right)' ECMWF-S4_forecast_month_4_impact_sfcWind.png ./formatted/ECMWF-S4_forecast_month_4_impact_sfcWind.png +sh ~/scripts/fig2catalog.sh -t 'ECMWF-S4 / 10m wind speed / regime impact\nMay / 1981-2015' -c 'Start date: 1st of every month from November to May \nRegion: North Atlantic (27°N–81°N, 85.5°W–45°E) \nReference dataset: ERA-Interim (last column on the right)' ECMWF-S4_forecast_month_5_impact_sfcWind.png ./formatted/ECMWF-S4_forecast_month_5_impact_sfcWind.png +sh ~/scripts/fig2catalog.sh -t 'ECMWF-S4 / 10m wind speed / regime impact\nJune / 1981-2015' -c 'Start date: 1st of every month from December to June \nRegion: North Atlantic (27°N–81°N, 85.5°W–45°E) \nReference dataset: ERA-Interim (last column on the right)' ECMWF-S4_forecast_month_6_impact_sfcWind.png ./formatted/ECMWF-S4_forecast_month_6_impact_sfcWind.png +sh ~/scripts/fig2catalog.sh -t 'ECMWF-S4 / 10m wind speed / regime impact\nJuly / 1981-2015' -c 'Start date: 1st of every month from January to July \nRegion: North Atlantic (27°N–81°N, 85.5°W–45°E) \nReference dataset: ERA-Interim (last column on the right)' ECMWF-S4_forecast_month_7_impact_sfcWind.png ./formatted/ECMWF-S4_forecast_month_7_impact_sfcWind.png +sh ~/scripts/fig2catalog.sh -t 'ECMWF-S4 / 10m wind speed / regime impact\nAugust / 1981-2015' -c 'Start date: 1st of every month from February to August \nRegion: North Atlantic (27°N–81°N, 85.5°W–45°E) \nReference dataset: ERA-Interim (last column on the right)' ECMWF-S4_forecast_month_8_impact_sfcWind.png ./formatted/ECMWF-S4_forecast_month_8_impact_sfcWind.png +sh ~/scripts/fig2catalog.sh -t 'ECMWF-S4 / 10m wind speed / regime impact\nSeptember / 1981-2015' -c 'Start date: 1st of every month from March to September \nRegion: North Atlantic (27°N–81°N, 85.5°W–45°E) \nReference dataset: ERA-Interim (last column on the right)' ECMWF-S4_forecast_month_9_impact_sfcWind.png ./formatted/ECMWF-S4_forecast_month_9_impact_sfcWind.png +sh ~/scripts/fig2catalog.sh -t 'ECMWF-S4 / 10m wind speed / regime impact\nOctober / 1981-2015' -c 'Start date: 1st of every month from April to October \nRegion: North Atlantic (27°N–81°N, 85.5°W–45°E) \nReference dataset: ERA-Interim (last column on the right)' ECMWF-S4_forecast_month_10_impact_sfcWind.png ./formatted/ECMWF-S4_forecast_month_10_impact_sfcWind.png +sh ~/scripts/fig2catalog.sh -t 'ECMWF-S4 / 10m wind speed / regime impact\nNovember / 1981-2015' -c 'Start date: 1st of every month from May to November \nRegion: North Atlantic (27°N–81°N, 85.5°W–45°E) \nReference dataset: ERA-Interim (last column on the right)' ECMWF-S4_forecast_month_11_impact_sfcWind.png ./formatted/ECMWF-S4_forecast_month_11_impact_sfcWind.png +sh ~/scripts/fig2catalog.sh -t 'ECMWF-S4 / 10m wind speed / regime impact\nDecember / 1981-2015' -c 'Start date: 1st of every month from June to December \nRegion: North Atlantic (27°N–81°N, 85.5°W–45°E) \nReference dataset: ERA-Interim (last column on the right)' ECMWF-S4_forecast_month_12_impact_sfcWind.png ./formatted/ECMWF-S4_forecast_month_12_impact_sfcWind.png + +# regime impact on tas: +sh ~/scripts/fig2catalog.sh -t 'ECMWF-S4 / 2m temperature / regime impact\nJanuary / 1981-2015' -c 'Start date: 1st of every month from July to January \nRegion: North Atlantic (27°N–81°N, 85.5°W–45°E) \nReference dataset: ERA-Interim (last column on the right)' ECMWF-S4_forecast_month_1_impact_tas.png ./formatted/ECMWF-S4_forecast_month_1_impact_tas.png +sh ~/scripts/fig2catalog.sh -t 'ECMWF-S4 / 2m temperature / regime impact\nFebruary / 1981-2015' -c 'Start date: 1st of every month from August to February \nRegion: North Atlantic (27°N–81°N, 85.5°W–45°E) \nReference dataset: ERA-Interim (last column on the right)' ECMWF-S4_forecast_month_2_impact_tas.png ./formatted/ECMWF-S4_forecast_month_2_impact_tas.png +sh ~/scripts/fig2catalog.sh -t 'ECMWF-S4 / 2m temperature / regime impact\nMarch / 1981-2015' -c 'Start date: 1st of every month from September to March \nRegion: North Atlantic (27°N–81°N, 85.5°W–45°E) \nReference dataset: ERA-Interim (last column on the right)' ECMWF-S4_forecast_month_3_impact_tas.png ./formatted/ECMWF-S4_forecast_month_3_impact_tas.png +sh ~/scripts/fig2catalog.sh -t 'ECMWF-S4 / 2m temperature / regime impact\nApril / 1981-2015' -c 'Start date: 1st of every month from October to April \nRegion: North Atlantic (27°N–81°N, 85.5°W–45°E) \nReference dataset: ERA-Interim (last column on the right)' ECMWF-S4_forecast_month_4_impact_tas.png ./formatted/ECMWF-S4_forecast_month_4_impact_tas.png +sh ~/scripts/fig2catalog.sh -t 'ECMWF-S4 / 2m temperature / regime impact\nMay / 1981-2015' -c 'Start date: 1st of every month from November to May \nRegion: North Atlantic (27°N–81°N, 85.5°W–45°E) \nReference dataset: ERA-Interim (last column on the right)' ECMWF-S4_forecast_month_5_impact_tas.png ./formatted/ECMWF-S4_forecast_month_5_impact_tas.png +sh ~/scripts/fig2catalog.sh -t 'ECMWF-S4 / 2m temperature / regime impact\nJune / 1981-2015' -c 'Start date: 1st of every month from December to June \nRegion: North Atlantic (27°N–81°N, 85.5°W–45°E) \nReference dataset: ERA-Interim (last column on the right)' ECMWF-S4_forecast_month_6_impact_tas.png ./formatted/ECMWF-S4_forecast_month_6_impact_tas.png +sh ~/scripts/fig2catalog.sh -t 'ECMWF-S4 / 2m temperature / regime impact\nJuly / 1981-2015' -c 'Start date: 1st of every month from January to July \nRegion: North Atlantic (27°N–81°N, 85.5°W–45°E) \nReference dataset: ERA-Interim (last column on the right)' ECMWF-S4_forecast_month_7_impact_tas.png ./formatted/ECMWF-S4_forecast_month_7_impact_tas.png +sh ~/scripts/fig2catalog.sh -t 'ECMWF-S4 / 2m temperature / regime impact\nAugust / 1981-2015' -c 'Start date: 1st of every month from February to August \nRegion: North Atlantic (27°N–81°N, 85.5°W–45°E) \nReference dataset: ERA-Interim (last column on the right)' ECMWF-S4_forecast_month_8_impact_tas.png ./formatted/ECMWF-S4_forecast_month_8_impact_tas.png +sh ~/scripts/fig2catalog.sh -t 'ECMWF-S4 / 2m temperature / regime impact\nSeptember / 1981-2015' -c 'Start date: 1st of every month from March to September \nRegion: North Atlantic (27°N–81°N, 85.5°W–45°E) \nReference dataset: ERA-Interim (last column on the right)' ECMWF-S4_forecast_month_9_impact_tas.png ./formatted/ECMWF-S4_forecast_month_9_impact_tas.png +sh ~/scripts/fig2catalog.sh -t 'ECMWF-S4 / 2m temperature / regime impact\nOctober / 1981-2015' -c 'Start date: 1st of every month from April to October \nRegion: North Atlantic (27°N–81°N, 85.5°W–45°E) \nReference dataset: ERA-Interim (last column on the right)' ECMWF-S4_forecast_month_10_impact_tas.png ./formatted/ECMWF-S4_forecast_month_10_impact_tas.png +sh ~/scripts/fig2catalog.sh -t 'ECMWF-S4 / 2m temperature / regime impact\nNovember / 1981-2015' -c 'Start date: 1st of every month from May to November \nRegion: North Atlantic (27°N–81°N, 85.5°W–45°E) \nReference dataset: ERA-Interim (last column on the right)' ECMWF-S4_forecast_month_11_impact_tas.png ./formatted/ECMWF-S4_forecast_month_11_impact_tas.png +sh ~/scripts/fig2catalog.sh -t 'ECMWF-S4 / 2m temperature / regime impact\nDecember / 1981-2015' -c 'Start date: 1st of every month from June to December \nRegion: North Atlantic (27°N–81°N, 85.5°W–45°E) \nReference dataset: ERA-Interim (last column on the right)' ECMWF-S4_forecast_month_12_impact_tas.png ./formatted/ECMWF-S4_forecast_month_12_impact_tas.png + + +##################################### +# Summary tables: # +##################################### + +#sh ~/scripts/fig2catalog.sh -t 'ECMWF-S4 / regime anomalies spatial correlation\nJanuary to December / 1981-2015' -x 15 -c 'Region: North Atlantic (27°N–81°N, 85.5°W–45°E) \nReference dataset: ERA-Interim' ECMWF-S4_summary_spatial_correlations.png ./formatted/ECMWF-S4_summary_spatial_correlations.png +sh ~/scripts/fig2catalog.sh -r 40 -t 'ECMWF-S4 / regime anomalies spatial correlation\nJanuary to December / 1981-2015' -x 15 -c 'Region: North Atlantic (27°N–81°N, 85.5°W–45°E) \nReference dataset: ERA-Interim' ECMWF-S4_summary_spatial_correlations_target_month.png ./formatted/ECMWF-S4_summary_spatial_correlations_target_month.png + +#sh ~/scripts/fig2catalog.sh -t 'ECMWF-S4 / weather regimes frequency correlation\nJanuary to December / 1981-2015' -x 15 -c 'Region: North Atlantic (27°N–81°N, 85.5°W–45°E) \nReference dataset: ERA-Interim' ECMWF-S4_summary_temporal_correlations.png ./formatted/ECMWF-S4_summary_temporal_correlations.png +sh ~/scripts/fig2catalog.sh -r 40 -t 'ECMWF-S4 / weather regimes frequency correlation\nJanuary to December / 1981-2015' -x 15 -c 'Region: North Atlantic (27°N–81°N, 85.5°W–45°E) \nReference dataset: ERA-Interim' ECMWF-S4_summary_temporal_correlations_target_month.png ./formatted/ECMWF-S4_summary_temporal_correlations_target_month.png + +#sh ~/scripts/fig2catalog.sh -t 'ECMWF-S4 / 10m wind speed / impact correlation\nJanuary to December / 1981-2015' -x 15 -c 'Region: North Atlantic (27°N–81°N, 85.5°W–45°E) \nReference dataset: ERA-Interim' ECMWF-S4_summary_impact_sfcWind.png ./formatted/ECMWF-S4_summary_impact_sfcWind.png +sh ~/scripts/fig2catalog.sh -r 40 -t 'ECMWF-S4 / 10m wind speed / impact correlation\nJanuary to December / 1981-2015' -x 15 -c 'Region: North Atlantic (27°N–81°N, 85.5°W–45°E) \nReference dataset: ERA-Interim' ECMWF-S4_summary_impact_sfcWind_target_month.png ./formatted/ECMWF-S4_summary_impact_sfcWind_target_month.png + +#sh ~/scripts/fig2catalog.sh -t 'ECMWF-S4 / 2m temperature / impact correlation\nJanuary to December / 1981-2015' -x 15 -c 'Region: North Atlantic (27°N–81°N, 85.5°W–45°E) \nReference dataset: ERA-Interim' ECMWF-S4_summary_impact_tas.png ./formatted/ECMWF-S4_summary_impact_tas.png +sh ~/scripts/fig2catalog.sh -r 40 -t 'ECMWF-S4 / 2m temperature / impact correlation\nJanuary to December / 1981-2015' -x 15 -c 'Region: North Atlantic (27°N–81°N, 85.5°W–45°E) \nReference dataset: ERA-Interim' ECMWF-S4_summary_impact_tas_target_month.png ./formatted/ECMWF-S4_summary_impact_tas_target_month.png + +# frequency: +#sh ~/scripts/fig2catalog.sh -t 'ERA-Interim / weather regimes average frequency\nJanuary to December / 1981-2015' -x 15 -c 'Region: North Atlantic (27°N–81°N, 85.5°W–45°E) ' ECMWF-S4_summary_frequency_obs_target_month.png ./formatted/ECMWF-S4_summary_frequency_obs_target_month.png +#sh ~/scripts/fig2catalog.sh -t 'ECMWF-S4 / weather regimes average frequency\nJanuary to December / 1981-2015' -x 15 -c 'Region: North Atlantic (27°N–81°N, 85.5°W–45°E) ' ECMWF-S4_summary_frequency_target_month.png ./formatted/ECMWF-S4_summary_frequency_target_month.png +#sh ~/scripts/fig2catalog.sh -t 'Weather regimes average frequency bias\nJanuary to December / 1981-2015' -x 15 -c 'Region: North Atlantic (27°N–81°N, 85.5°W–45°E) \nReference dataset: ERA-Interim' ECMWF-S4_summary_bias_frequency_target_month.png ./formatted/ECMWF-S4_summary_bias_frequency_target_month.png + +sh ~/scripts/fig2catalog.sh -l -r 30 -t 'ERA-Interim / weather regimes average frequency\nJanuary to December / 1981-2015' -x 15 ECMWF-S4_summary_frequency_obs_target_month.png ./trash2.png +sh ~/scripts/fig2catalog.sh -l -r 30 -t 'ECMWF-S4 / weather regimes average frequency\nJanuary to December / 1981-2015' -x 15 ECMWF-S4_summary_frequency_target_month.png ./trash1.png +sh ~/scripts/fig2catalog.sh -r 30 -t 'Weather regimes average frequency bias\nJanuary to December / 1981-2015' -x 15 -c 'Region: North Atlantic (27°N–81°N, 85.5°W–45°E)' ECMWF-S4_summary_bias_frequency_target_month.png ./trash3.png +montage trash1.png trash2.png trash3.png -tile 1x3 -geometry +0+0 ./formatted/ECMWF-S4_summary_all_frequency_target_month.png + +# persistence: +#sh ~/scripts/fig2catalog.sh -t 'ERA-Interim / weather regimes persistence\nJanuary to December / 1981-2015' -x 15 -c 'Region: North Atlantic (27°N–81°N, 85.5°W–45°E) ' ECMWF-S4_summary_persistence_obs_target_month.png ./formatted/ECMWF-S4_summary_persistence_obs_target_month.png +#sh ~/scripts/fig2catalog.sh -t 'ECMWF-S4 / weather regimes persistence\nJanuary to December / 1981-2015' -x 15 -c 'Region: North Atlantic (27°N–81°N, 85.5°W–45°E) ' ECMWF-S4_summary_persistence_target_month.png ./formatted/ECMWF-S4_summary_persistence_target_month.png +#sh ~/scripts/fig2catalog.sh -t 'Weather regimes persistence bias\nJanuary to December / 1981-2015' -x 15 -c 'Region: North Atlantic (27°N–81°N, 85.5°W–45°E) \nReference dataset: ERA-Interim' ECMWF-S4_summary_bias_persistence_target_month.png ./formatted/ECMWF-S4_summary_bias_persistence_target_month.png + +sh ~/scripts/fig2catalog.sh -l -r 30 -t 'ERA-Interim / weather regimes persistence\nJanuary to December / 1981-2015' -x 15 ECMWF-S4_summary_persistence_obs_target_month.png ./trash2.png +sh ~/scripts/fig2catalog.sh -l -r 30 -t 'ECMWF-S4 / weather regimes persistence\nJanuary to December / 1981-2015' -x 15 ECMWF-S4_summary_persistence_target_month.png ./trash1.png +sh ~/scripts/fig2catalog.sh -r 30 -t 'Weather regimes persistence bias\nJanuary to December / 1981-2015' -x 15 -c 'Region: North Atlantic (27°N–81°N, 85.5°W–45°E)' ECMWF-S4_summary_bias_persistence_target_month.png ./trash3.png +montage trash1.png trash2.png trash3.png -tile 1x3 -geometry +0+0 ./formatted/ECMWF-S4_summary_all_persistence_target_month.png + +# Transition prob: +# Atlantic ridge: +#sh ~/scripts/fig2catalog.sh -t 'ECMWF-S4 / Atlantic ridge transition probability\nJanuary to December / 1981-2015' -x 15 -c 'Region: North Atlantic (27°N–81°N, 85.5°W–45°E) ' ECMWF-S4_summary_transition_prob_Atlantic_ridge_obs_target_month.png ./formatted/ECMWF-S4_summary_transition_prob_Atlantic_ridge_obs_target_month.png +#sh ~/scripts/fig2catalog.sh -t 'ECMWF-S4 / Atlantic ridge transition probability\nJanuary to December / 1981-2015' -x 15 -c 'Region: North Atlantic (27°N–81°N, 85.5°W–45°E) ' ECMWF-S4_summary_transition_prob_Atlantic_ridge_target_month.png ./formatted/ECMWF-S4_summary_transition_prob_Atlantic_ridge_target_month.png +#sh ~/scripts/fig2catalog.sh -t 'ECMWF-S4 / Atlantic ridge transition probability bias\nJanuary to December / 1981-2015' -x 15 -c 'Region: North Atlantic (27°N–81°N, 85.5°W–45°E) \nReference dataset: ERA-Interim' ECMWF-S4_summary_bias_transition_prob_Atlantic_ridge_target_month.png ./formatted/ECMWF-S4_summary_bias_transition_prob_Atlantic_ridge_target_month.png + +sh ~/scripts/fig2catalog.sh -l -r 47 -t 'ERA-Interim / Atlantic ridge transition probability\nJanuary to December / 1981-2015' -x 15 ECMWF-S4_summary_transition_prob_Atlantic_ridge_obs_target_month.png ./trash2.png +sh ~/scripts/fig2catalog.sh -l -r 47 -t 'ECMWF-S4 / Atlantic ridge transition probability\nJanuary to December / 1981-2015' -x 15 ECMWF-S4_summary_transition_prob_Atlantic_ridge_target_month.png ./trash1.png +sh ~/scripts/fig2catalog.sh -r 47 -t 'Atlantic ridge transition probability bias\nJanuary to December / 1981-2015' -x 15 -c 'Region: North Atlantic (27°N–81°N, 85.5°W–45°E)' ECMWF-S4_summary_bias_transition_prob_Atlantic_ridge_target_month.png ./trash3.png +montage trash1.png trash2.png trash3.png -tile 1x3 -geometry +0+0 ./formatted/ECMWF-S4_summary_all_transition_prob_Atlantic_ridge_target_month.png + +# Blocking: +#sh ~/scripts/fig2catalog.sh -t 'ECMWF-S4 / Blocking transition probability\nJanuary to December / 1981-2015' -x 15 -c 'Region: North Atlantic (27°N–81°N, 85.5°W–45°E) ' ECMWF-S4_summary_transition_prob_blocking_obs_target_month.png ./formatted/ECMWF-S4_summary_transition_prob_blocking_obs_target_month.png +#sh ~/scripts/fig2catalog.sh -t 'ECMWF-S4 / Blocking transition probability\nJanuary to December / 1981-2015' -x 15 -c 'Region: North Atlantic (27°N–81°N, 85.5°W–45°E) ' ECMWF-S4_summary_transition_prob_blocking_target_month.png ./formatted/ECMWF-S4_summary_transition_prob_blocking_target_month.png +#sh ~/scripts/fig2catalog.sh -t 'ECMWF-S4 / Blocking transition probability bias\nJanuary to December / 1981-2015' -x 15 -c 'Region: North Atlantic (27°N–81°N, 85.5°W–45°E) \nReference dataset: ERA-Interim' ECMWF-S4_summary_bias_transition_prob_blocking_target_month.png ./formatted/ECMWF-S4_summary_bias_transition_prob_blocking_target_month.png + +sh ~/scripts/fig2catalog.sh -l -r 47 -t 'ERA-Interim / Blocking transition probability\nJanuary to December / 1981-2015' -x 15 ECMWF-S4_summary_transition_prob_blocking_obs_target_month.png ./trash2.png +sh ~/scripts/fig2catalog.sh -l -r 47 -t 'ECMWF-S4 / Blocking transition probability\nJanuary to December / 1981-2015' -x 15 ECMWF-S4_summary_transition_prob_blocking_target_month.png ./trash1.png +sh ~/scripts/fig2catalog.sh -r 47 -t 'Blocking transition probability bias\nJanuary to December / 1981-2015' -x 15 -c 'Region: North Atlantic (27°N–81°N, 85.5°W–45°E)' ECMWF-S4_summary_bias_transition_prob_blocking_target_month.png ./trash3.png +montage trash1.png trash2.png trash3.png -tile 1x3 -geometry +0+0 ./formatted/ECMWF-S4_summary_all_transition_prob_blocking_target_month.png + + +# NAO+: +#sh ~/scripts/fig2catalog.sh -t 'ECMWF-S4 / NAO+ transition probability\nJanuary to December / 1981-2015' -x 15 -c 'Region: North Atlantic (27°N–81°N, 85.5°W–45°E) ' ECMWF-S4_summary_transition_prob_NAO+_obs_target_month.png ./formatted/ECMWF-S4_summary_transition_prob_NAO+_obs_target_month.png +#sh ~/scripts/fig2catalog.sh -t 'ECMWF-S4 / NAO+ transition probability\nJanuary to December / 1981-2015' -x 15 -c 'Region: North Atlantic (27°N–81°N, 85.5°W–45°E) ' ECMWF-S4_summary_transition_prob_NAO+_target_month.png ./formatted/ECMWF-S4_summary_transition_prob_NAO+_target_month.png +#sh ~/scripts/fig2catalog.sh -t 'ECMWF-S4 / NAO+ transition probability bias\nJanuary to December / 1981-2015' -x 15 -c 'Reference dataset: ERA-Interim' ECMWF-S4_summary_bias_transition_prob_NAO+_target_month.png ./formatted/ECMWF-S4_summary_bias_transition_prob_NAO+_target_month.png + +sh ~/scripts/fig2catalog.sh -l -r 47 -t 'ERA-Interim / NAO+ transition probability\nJanuary to December / 1981-2015' -x 15 ECMWF-S4_summary_transition_prob_NAO+_obs_target_month.png ./trash2.png +sh ~/scripts/fig2catalog.sh -l -r 47 -t 'ECMWF-S4 / NAO+ transition probability\nJanuary to December / 1981-2015' -x 15 ECMWF-S4_summary_transition_prob_NAO+_target_month.png ./trash1.png +sh ~/scripts/fig2catalog.sh -r 47 -t 'NAO+ transition probability bias\nJanuary to December / 1981-2015' -x 15 -c 'Region: North Atlantic (27°N–81°N, 85.5°W–45°E)' ECMWF-S4_summary_bias_transition_prob_NAO+_target_month.png ./trash3.png +montage trash1.png trash2.png trash3.png -tile 1x3 -geometry +0+0 ./formatted/ECMWF-S4_summary_all_transition_prob_NAO+_target_month.png + + +# NAO-: +#sh ~/scripts/fig2catalog.sh -t 'ECMWF-S4 / NAO- transition probability\nJanuary to December / 1981-2015' -x 15 -c 'Region: North Atlantic (27°N–81°N, 85.5°W–45°E) ' ECMWF-S4_summary_transition_prob_obs_NAO-_target_month.png ./formatted/ECMWF-S4_summary_transition_prob_NAO-_obs_target_month.png +#sh ~/scripts/fig2catalog.sh -t 'ECMWF-S4 / NAO- transition probability\nJanuary to December / 1981-2015' -x 15 -c 'Region: North Atlantic (27°N–81°N, 85.5°W–45°E) ' ECMWF-S4_summary_transition_prob_NAO-_target_month.png ./formatted/ECMWF-S4_summary_transition_prob_NAO-_target_month.png +#sh ~/scripts/fig2catalog.sh -t 'ECMWF-S4 / NAO- transition probability bias\nJanuary to December / 1981-2015' -x 15 -c 'Region: North Atlantic (27°N–81°N, 85.5°W–45°E) \nReference dataset: ERA-Interim' ECMWF-S4_summary_bias_transition_prob_NAO-_target_month.png ./formatted/ECMWF-S4_summary_bias_transition_prob_NAO-_target_month.png + +sh ~/scripts/fig2catalog.sh -l -r 47 -t 'ERA-Interim / NAO- transition probability\nJanuary to December / 1981-2015' -x 15 ECMWF-S4_summary_transition_prob_NAO-_obs_target_month.png ./trash2.png +sh ~/scripts/fig2catalog.sh -l -r 47 -t 'ECMWF-S4 / NAO- transition probability\nJanuary to December / 1981-2015' -x 15 ECMWF-S4_summary_transition_prob_NAO-_target_month.png ./trash1.png +sh ~/scripts/fig2catalog.sh -r 47 -t 'NAO- transition probability bias\nJanuary to December / 1981-2015' -x 15 -c 'Region: North Atlantic (27°N–81°N, 85.5°W–45°E)' ECMWF-S4_summary_bias_transition_prob_NAO-_target_month.png ./trash3.png +montage trash1.png trash2.png trash3.png -tile 1x3 -geometry +0+0 ./formatted/ECMWF-S4_summary_all_transition_prob_NAO-_target_month.png + + + + +#sh ~/scripts/fig2catalog.sh -t 'ECMWF-S4 / Atlantic ridge transition probability bias\nJanuary to December / 1981-2015' -x 15 -c 'Region: North Atlantic (27°N–81°N, 85.5°W–45°E) \nReference dataset: ERA-Interim' ECMWF-S4_summary_bias_transition_prob_Atlantic_ridge.png ./formatted/ECMWF-S4_summary_bias_transition_prob_Atlantic_ridge.png +#sh ~/scripts/fig2catalog.sh -t 'ECMWF-S4 / Blocking transition probability bias\nJanuary to December / 1981-2015' -x 15 -c 'Region: North Atlantic (27°N–81°N, 85.5°W–45°E) \nReference dataset: ERA-Interim' ECMWF-S4_summary_bias_transition_prob_blocking.png ./formatted/ECMWF-S4_summary_bias_transition_prob_blocking.png +#sh ~/scripts/fig2catalog.sh -t 'ECMWF-S4 / NAO+ transition probability bias\nJanuary to December / 1981-2015' -x 15 -c 'Region: North Atlantic (27°N–81°N, 85.5°W–45°E) \nReference dataset: ERA-Interim' ECMWF-S4_summary_bias_transition_prob_NAO+.png ./formatted/ECMWF-S4_summary_bias_transition_prob_NAO+.png +#sh ~/scripts/fig2catalog.sh -t 'ECMWF-S4 / NAO- transition probability bias\nJanuary to December / 1981-2015' -x 15 -c 'Region: North Atlantic (27°N–81°N, 85.5°W–45°E) \nReference dataset: ERA-Interim' ECMWF-S4_summary_bias_transition_prob_NAO-.png ./formatted/ECMWF-S4_summary_bias_transition_prob_NAO-.png + +sh ~/scripts/fig2catalog.sh -m 200 -r 140 -s 0.7 -t 'ERA-Interim / sea level pressure / regime anomalies\nJanuary to December / 1981-2015' -x 15 -c 'Clustering: monthly k-means clustering \nRegion: North Atlantic (27°N–81°N, 85.5°W–45°E)' ERA-Interim_monthly_regime_anomalies.png ./formatted/ERA-Interim_monthly_regime_anomalies.png + +sh ~/scripts/fig2catalog.sh -m 200 -r 140 -s 0.7 -t 'ERA-Interim / sea level pressure / regime anomalies\nJanuary to December / 1981-2015' -x 15 -c 'Clustering: 3-months running k-means clustering \nRegion: North Atlantic (27°N–81°N, 85.5°W–45°E)' ERA-Interim_3-months_regime_anomalies.png ./formatted/ERA-Interim_3-months_regime_anomalies.png + +# Taylor diagram: +sh ~/scripts/fig2catalog.sh -t 'ERA-Interim / Taylor diagram of the regime anomalies \nJanuary to December / 1981-2015' -c 'Region: North Atlantic (27°N–81°N, 85.5°W–45°E) \nReference dataset: ERA-Interim DJF regime anomalies' ERA-Interim_regime_anomalies_DJF_Taylor_Diagram.png ./formatted/ERA-Interim_regime_anomalies_DJF_Taylor_Diagram.png + + +######################################################################### +# Subseasonal system # +######################################################################### + +~/scripts/fig2catalog.sh -r 30 -p 10 -x 15 -t 'ECMWF-MPS / 10m wind speed / Skill Scores \n January to December / 1994-2013 ' -c 'Region: global \nReference dataset: ERA-Interim\nBias correction: none' Summary_sfcWind_World.png formatted/Summary_sfcWind_World.png + +~/scripts/fig2catalog.sh -r 30 -p 10 -x 15 -t 'ECMWF-MPS / 10m wind speed / Skill Scores over Europe \n January to December / 1994-2013 ' -c 'Region: Europe (15°W-45°E, 35°N-75°N) \nReference dataset: ERA-Interim\nBias correction: none' Summary_sfcWind_Europe.png formatted/Summary_sfcWind_Europe.png + +~/scripts/fig2catalog.sh -r 30 -p 10 -x 15 -t 'ECMWF-MPS / 10m wind speed / Skill Scores over Iberian Peninsula \n January to December / 1994-2013 ' -c 'Region: Iberian Peninsula (10°W-4°E, 36°N-44°N) \nReference dataset: ERA-Interim\nBias correction: none' Summary_sfcWind_Iberian_Peninsula.png formatted/Summary_sfcWind_Iberian_Peninsula.png + +~/scripts/fig2catalog.sh -r 30 -p 10 -x 15 -t 'ECMWF-MPS / 10m wind speed / Skill Scores over North Sea \n January to December / 1994-2013 ' -c 'Region: North Sea (4°W-15°E, 50°N-65°N) \nReference dataset: ERA-Interim\nBias correction: none' Summary_sfcWind_North_Sea.png formatted/Summary_sfcWind_North_Sea.png + +~/scripts/fig2catalog.sh -r 30 -p 10 -x 15 -t 'ECMWF-MPS / 10m wind speed / Skill Scores over North America \n January to December / 1994-2013 ' -c 'Region: North America (130°W-60°E, 30°N-50°N) \nReference dataset: ERA-Interim\nBias correction: none' Summary_sfcWind_North_America.png formatted/Summary_sfcWind_North_America.png + + +# Reliability Diagrams: +~/scripts/fig2catalog.sh -s 0.8 -r 78 -t 'ECMWF-MPS / 10m wind speed / Reliability diagram \n January, April, July and October / 1994-2013 ' -c 'Start dates: January, April, July and October \nLead times: 5-11, 12-18, 19-25, 26-32 days \nRegion: global \nReference dataset: ERA-Interim' Summary_RelDiagr_World.png formatted/Summary_RelDiagr_World.png + +~/scripts/fig2catalog.sh -s 0.8 -r 78 -t 'ECMWF-MPS / 10m wind speed / Reliability diagram over Europe \n January, April, July and October / 1994-2013 ' -c 'Start dates: January, April, July and October \nLead times: 5-11, 12-18, 19-25, 26-32 days \nRegion: Europe (15°W-45°E, 35°N-75°N)\nReference dataset: ERA-Interim' Summary_RelDiagr_Europe.png formatted/Summary_RelDiagr_Europe.png + +~/scripts/fig2catalog.sh -s 0.8 -r 78 -t 'ECMWF-MPS / 10m wind speed / Reliability diagram over Iberian Peninsula \n January, April, July and October / 1994-2013 ' -c 'Start dates: January, April, July and October \nLead times: 5-11, 12-18, 19-25, 26-32 days \nRegion: Iberian Peninsula (10°W-4°E, 36°N-44°N)\nReference dataset: ERA-Interim' Summary_RelDiagr_Iberian_Peninsula.png formatted/Summary_RelDiagr_Iberian_Peninsula.png + +~/scripts/fig2catalog.sh -s 0.8 -r 78 -t 'ECMWF-MPS / 10m wind speed / Reliability diagram over North Sea \n January, April, July and October / 1994-2013 ' -c 'Start dates: January, April, July and October \nLead times: 5-11, 12-18, 19-25, 26-32 days \nRegion: North Sea (4°W-15°E, 50°N-65°N)\nReference dataset: ERA-Interim' Summary_RelDiagr_North_Sea.png formatted/Summary_RelDiagr_North_Sea.png + +~/scripts/fig2catalog.sh -s 0.8 -r 78 -t 'ECMWF-MPS / 10m wind speed / Reliability diagram over North America \n January, April, July and October / 1994-2013 ' -c 'Star dates: January, April, July and October \nLead times: 5-11, 12-18, 19-25, 26-32 days \nRegion: North America (130°W-60°E, 30°N-50°N)\nReference dataset: ERA-Interim' Summary_RelDiagr_North_America.png formatted/Summary_RelDiagr_North_America.png + +###################################################################### +# Seasonal system # +###################################################################### + +# sfcWind: +for f in $( ls *nEU.* ); do mv $f 1_Europe/; done +for f in $( ls *seEU.* ); do mv $f 1_Europe/; done +for f in $( ls *swEU.* ); do mv $f 1_Europe/; done +for f in $( ls *AF.* ); do mv $f 2_Africa/; done +for f in $( ls *eNA.* ); do mv $f 3_North_America/; done +for f in $( ls *wNA.* ); do mv $f 3_North_America/; done +for f in $( ls *mNA.* ); do mv $f 3_North_America/; done +for f in $( ls *eCA.* ); do mv $f 4_Central_America/; done +for f in $( ls *wCA.* ); do mv $f 4_Central_America/; done +for f in $( ls *eSA.* ); do mv $f 5_South_America/; done +for f in $( ls *wSA.* ); do mv $f 5_South_America/; done +for f in $( ls *sSA.* ); do mv $f 5_South_America/; done +for f in $( ls *IN.* ); do mv $f 6_Asia/; done +for f in $( ls *nEA.* ); do mv $f 6_Asia/; done +for f in $( ls *sEA.* ); do mv $f 6_Asia/; done +for f in $( ls *eAU.* ); do mv $f 7_Oceania/; done +for f in $( ls *wAU.* ); do mv $f 7_Oceania/; done + +# tas: +for f in $( ls *EU.* ); do mv $f 1_Europe/; done +for f in $( ls *AF.* ); do mv $f 2_Africa/; done +for f in $( ls *NA.* ); do mv $f 3_North_America/; done +for f in $( ls *SA.* ); do mv $f 5_South_America/; done +for f in $( ls *EA.* ); do mv $f 6_Asia/; done +for f in $( ls *AU.* ); do mv $f 7_Oceania/; done +for f in $( ls *NH.* ); do mv $f 8_Northern_Hemisphere/; done +for f in $( ls *SH.* ); do mv $f 9_Southern_Hemisphere/; done +for f in $( ls *TR.* ); do mv $f 10_Tropics/; done +for f in $( ls *GL.* ); do mv $f 11_Global/; done + +for f in $( ls *_rawdata_*.* ); do mv $f 1_raw/; done +for f in $( ls *_sbc_*.* ); do mv $f 2_sbc/; done +for f in $( ls *_cal_*.* ); do mv $f 3_cal/; done + +for f in $( ls *nEU.* ); do mv $f 1_nEU/; done +for f in $( ls *swEU.* ); do mv $f 2_swEU/; done +for f in $( ls *seEU.* ); do mv $f 3_seEU/; done + +for f in $( ls *eNA.* ); do mv $f 1_eNA/; done +for f in $( ls *mNA.* ); do mv $f 2_mNA/; done +for f in $( ls *wNA.* ); do mv $f 3_wNA/; done + +for f in $( ls *eCA.* ); do mv $f 1_eCA/; done +for f in $( ls *wCA.* ); do mv $f 2_wCA/; done + +for f in $( ls *eSA.* ); do mv $f 1_eSA/; done +for f in $( ls *sSA.* ); do mv $f 2_sSA/; done +for f in $( ls *wSA.* ); do mv $f 3_wSA/; done + +for f in $( ls *IN.* ); do mv $f 1_IN/; done +for f in $( ls *nEA.* ); do mv $f 2_nEA/; done +for f in $( ls *sEA.* ); do mv $f 3_sEA/; done + +for f in $( ls *eAU.* ); do mv $f 1_eAU/; done +for f in $( ls *wAU.* ); do mv $f 2_wAU/; done + +for f in $( ls *_cross_*.* ); do mv $f 1_cross/; done +for f in $( ls *_nocross_*.* ); do mv $f 2_nocross/; done + + + + + +mv sfcWind_1_ERAI_MMA_1_Above_RelDiag_rawdata_sfcwind_1991_2012_wAU.png sfcWind_1_ERAI_MAM_1_Above_RelDiag_rawdata_sfcwind_1991_2012_wAU.png +mv sfcWind_1_ERAI_MMA_1_Below_RelDiag_rawdata_sfcwind_1991_2012_wAU.png sfcWind_1_ERAI_MAM_1_Below_RelDiag_rawdata_sfcwind_1991_2012_wAU.png +mv sfcWind_1_ERAI_MMA_1_Near_RelDiag_rawdata_sfcwind_1991_2012_wAU.png sfcWind_1_ERAI_MAM_1_Near_RelDiag_rawdata_sfcwind_1991_2012_wAU.png + +mv sfcWind_1_ERAI_MMA_2_Above_RelDiag_sbc_cross_sfcwind_1991_2012_wAU.png sfcWind_1_ERAI_MAM_2_Above_RelDiag_sbc_cross_sfcwind_1991_2012_wAU.png +mv sfcWind_1_ERAI_MMA_2_Below_RelDiag_sbc_cross_sfcwind_1991_2012_wAU.png sfcWind_1_ERAI_MAM_2_Below_RelDiag_sbc_cross_sfcwind_1991_2012_wAU.png +mv sfcWind_1_ERAI_MMA_2_Near_RelDiag_sbc_cross_sfcwind_1991_2012_wAU.png sfcWind_1_ERAI_MAM_2_Near_RelDiag_sbc_cross_sfcwind_1991_2012_wAU.png + +mv sfcWind_1_ERAI_MMA_3_Above_RelDiag_sbc_nocross_sfcwind_1991_2012_wAU.png sfcWind_2_ERAI_MAM_3_Above_RelDiag_sbc_nocross_sfcwind_1991_2012_wAU.png +mv sfcWind_1_ERAI_MMA_3_Below_RelDiag_sbc_nocross_sfcwind_1991_2012_wAU.png sfcWind_2_ERAI_MAM_3_Below_RelDiag_sbc_nocross_sfcwind_1991_2012_wAU.png +mv sfcWind_1_ERAI_MMA_3_Near_RelDiag_sbc_nocross_sfcwind_1991_2012_wAU.png sfcWind_2_ERAI_MAM_3_Near_RelDiag_sbc_nocross_sfcwind_1991_2012_wAU.png + +mv sfcWind_1_ERAI_MMA_4_Above_RelDiag_cal_cross_sfcwind_1991_2012_wAU.png sfcWind_1_ERAI_MAM_4_Above_RelDiag_cal_cross_sfcwind_1991_2012_wAU.png +mv sfcWind_1_ERAI_MMA_4_Below_RelDiag_cal_cross_sfcwind_1991_2012_wAU.png sfcWind_1_ERAI_MAM_4_Below_RelDiag_cal_cross_sfcwind_1991_2012_wAU.png +mv sfcWind_1_ERAI_MMA_4_Near_RelDiag_cal_cross_sfcwind_1991_2012_wAU.png sfcWind_1_ERAI_MAM_4_Near_RelDiag_cal_cross_sfcwind_1991_2012_wAU.png + +mv sfcWind_1_ERAI_MMA_5_Above_RelDiag_cal_nocross_sfcwind_1991_2012_wAU.png sfcWind_1_ERAI_MAM_5_Above_RelDiag_cal_nocross_sfcwind_1991_2012_wAU.png +mv sfcWind_1_ERAI_MMA_5_Below_RelDiag_cal_nocross_sfcwind_1991_2012_wAU.png sfcWind_1_ERAI_MAM_5_Below_RelDiag_cal_nocross_sfcwind_1991_2012_wAU.png +mv sfcWind_1_ERAI_MMA_5_Near_RelDiag_cal_nocross_sfcwind_1991_2012_wAU.png sfcWind_1_ERAI_MAM_5_Near_RelDiag_cal_nocross_sfcwind_1991_2012_wAU.png + + +for f in $( ls *_MAM_*.* ); do mv "$f" "1_$f"; done +for f in $( ls *_JJA_*.* ); do mv "$f" "2_$f"; done +for f in $( ls *_SON_*.* ); do mv "$f" "3_$f"; done +for f in $( ls *_DJF_*.* ); do mv "$f" "4_$f"; done +for f in $( ls *_ERAI_*.* ); do mv "$f" "1_$f"; done +for f in $( ls *_JRA_*.* ); do mv "$f" "2_$f"; done +for f in $( ls *_ERAI2_*.* ); do mv "$f" "3_$f"; done +for f in $( ls *_JRA2_*.* ); do mv "$f" "4_$f"; done +for f in $( ls *_Above_*.* ); do mv "$f" "1_$f"; done +for f in $( ls *_Near_*.* ); do mv "$f" "2_$f"; done +for f in $( ls *_Below_*.* ); do mv "$f" "3_$f"; done + + + + + + + +for f in $( ls *_MAM_*.* ); do mv "$f" "1_tas_$f"; done +for f in $( ls *_JJA_*.* ); do mv "$f" "2_tas_$f"; done +for f in $( ls *_SON_*.* ); do mv "$f" "3_tas_$f"; done +for f in $( ls *_DJF_*.* ); do mv "$f" "4_tas_$f"; done +for f in $( ls *_ERAI_*.* ); do mv "$f" "1_$f"; done +for f in $( ls *_JRA_*.* ); do mv "$f" "2_$f"; done +for f in $( ls *_ERAI2_*.* ); do mv "$f" "3_$f"; done +for f in $( ls *_JRA2_*.* ); do mv "$f" "4_$f"; done +for f in $( ls *_Above_*.* ); do mv "$f" "1_$f"; done +for f in $( ls *_Near_*.* ); do mv "$f" "2_$f"; done +for f in $( ls *_Below_*.* ); do mv "$f" "3_$f"; done + + + + + + + + + + +mv 1_tas_1_tas_1_tas_1_ERAI_MAM_1_Above_RelDiag_rawdata_tas_1991_2012_AF.png 1_1_1_tas_1_ERAI_MAM_1_Above_RelDiag_rawdata_tas_1991_2012_AF.png +mv 1_tas_1_tas_2_tas_1_ERAI_JJA_1_Above_RelDiag_rawdata_tas_1991_2012_AF.png 1_1_2_tas_1_ERAI_JJA_1_Above_RelDiag_rawdata_tas_1991_2012_AF.png +mv 1_tas_1_tas_3_tas_1_ERAI_SON_1_Above_RelDiag_rawdata_tas_1991_2012_AF.png 1_1_3_tas_1_ERAI_SON_1_Above_RelDiag_rawdata_tas_1991_2012_AF.png +mv 1_tas_1_tas_4_tas_1_ERAI_DJF_1_Above_RelDiag_rawdata_tas_1991_2012_AF.png 1_1_4_tas_1_ERAI_DJF_1_Above_RelDiag_rawdata_tas_1991_2012_AF.png +mv 1_tas_2_tas_1_tas_2_JRA_MAM_1_Above_RelDiag_rawdata_tas_1991_2012_AF.png 1_2_1_tas_2_JRA_MAM_1_Above_RelDiag_rawdata_tas_1991_2012_AF.png +mv 1_tas_2_tas_2_tas_2_JRA_JJA_1_Above_RelDiag_rawdata_tas_1991_2012_AF.png 1_2_2_tas_2_JRA_JJA_1_Above_RelDiag_rawdata_tas_1991_2012_AF.png +mv 1_tas_2_tas_3_tas_2_JRA_SON_1_Above_RelDiag_rawdata_tas_1991_2012_AF.png 1_2_3_tas_2_JRA_SON_1_Above_RelDiag_rawdata_tas_1991_2012_AF.png +mv 1_tas_2_tas_4_tas_2_JRA_DJF_1_Above_RelDiag_rawdata_tas_1991_2012_AF.png 1_2_4_tas_2_JRA_DJF_1_Above_RelDiag_rawdata_tas_1991_2012_AF.png +mv 1_tas_3_tas_1_tas_3_ERAI2_MAM_1_Above_RelDiag_rawdata_tas_1981_2012_AF.png 1_3_1_tas_3_ERAI2_MAM_1_Above_RelDiag_rawdata_tas_1981_2012_AF.png +mv 1_tas_3_tas_2_tas_3_ERAI2_JJA_1_Above_RelDiag_rawdata_tas_1981_2012_AF.png 1_3_2_tas_3_ERAI2_JJA_1_Above_RelDiag_rawdata_tas_1981_2012_AF.png +mv 1_tas_3_tas_3_tas_3_ERAI2_SON_1_Above_RelDiag_rawdata_tas_1981_2012_AF.png 1_3_3_tas_3_ERAI2_SON_1_Above_RelDiag_rawdata_tas_1981_2012_AF.png +mv 1_tas_3_tas_4_tas_3_ERAI2_DJF_1_Above_RelDiag_rawdata_tas_1981_2012_AF.png 1_3_4_tas_3_ERAI2_DJF_1_Above_RelDiag_rawdata_tas_1981_2012_AF.png +mv 1_tas_4_tas_1_tas_4_JRA2_MAM_1_Above_RelDiag_rawdata_tas_1981_2012_AF.png 1_4_1_tas_4_JRA2_MAM_1_Above_RelDiag_rawdata_tas_1981_2012_AF.png +mv 1_tas_4_tas_2_tas_4_JRA2_JJA_1_Above_RelDiag_rawdata_tas_1981_2012_AF.png 1_4_2_tas_4_JRA2_JJA_1_Above_RelDiag_rawdata_tas_1981_2012_AF.png +mv 1_tas_4_tas_3_tas_4_JRA2_SON_1_Above_RelDiag_rawdata_tas_1981_2012_AF.png 1_4_3_tas_4_JRA2_SON_1_Above_RelDiag_rawdata_tas_1981_2012_AF.png +mv 1_tas_4_tas_4_tas_4_JRA2_DJF_1_Above_RelDiag_rawdata_tas_1981_2012_AF.png 1_4_4_tas_4_JRA2_DJF_1_Above_RelDiag_rawdata_tas_1981_2012_AF.png +mv 2_tas_1_tas_1_tas_1_ERAI_MAM_1_Near_RelDiag_rawdata_tas_1991_2012_AF.png 2_1_1_tas_1_ERAI_MAM_1_Near_RelDiag_rawdata_tas_1991_2012_AF.png +mv 2_tas_1_tas_2_tas_1_ERAI_JJA_1_Near_RelDiag_rawdata_tas_1991_2012_AF.png 2_1_2_tas_1_ERAI_JJA_1_Near_RelDiag_rawdata_tas_1991_2012_AF.png +mv 2_tas_1_tas_3_tas_1_ERAI_SON_1_Near_RelDiag_rawdata_tas_1991_2012_AF.png 2_1_3_tas_1_ERAI_SON_1_Near_RelDiag_rawdata_tas_1991_2012_AF.png +mv 2_tas_1_tas_4_tas_1_ERAI_DJF_1_Near_RelDiag_rawdata_tas_1991_2012_AF.png 2_1_4_tas_1_ERAI_DJF_1_Near_RelDiag_rawdata_tas_1991_2012_AF.png +mv 2_tas_2_tas_1_tas_2_JRA_MAM_1_Near_RelDiag_rawdata_tas_1991_2012_AF.png 2_2_1_tas_2_JRA_MAM_1_Near_RelDiag_rawdata_tas_1991_2012_AF.png +mv 2_tas_2_tas_2_tas_2_JRA_JJA_1_Near_RelDiag_rawdata_tas_1991_2012_AF.png 2_2_2_tas_2_JRA_JJA_1_Near_RelDiag_rawdata_tas_1991_2012_AF.png +mv 2_tas_2_tas_3_tas_2_JRA_SON_1_Near_RelDiag_rawdata_tas_1991_2012_AF.png 2_2_3_tas_2_JRA_SON_1_Near_RelDiag_rawdata_tas_1991_2012_AF.png +mv 2_tas_2_tas_4_tas_2_JRA_DJF_1_Near_RelDiag_rawdata_tas_1991_2012_AF.png 2_2_4_tas_2_JRA_DJF_1_Near_RelDiag_rawdata_tas_1991_2012_AF.png +mv 2_tas_3_tas_1_tas_3_ERAI2_MAM_1_Near_RelDiag_rawdata_tas_1981_2012_AF.png 2_3_1_tas_3_ERAI2_MAM_1_Near_RelDiag_rawdata_tas_1981_2012_AF.png +mv 2_tas_3_tas_2_tas_3_ERAI2_JJA_1_Near_RelDiag_rawdata_tas_1981_2012_AF.png 2_3_2_tas_3_ERAI2_JJA_1_Near_RelDiag_rawdata_tas_1981_2012_AF.png +mv 2_tas_3_tas_3_tas_3_ERAI2_SON_1_Near_RelDiag_rawdata_tas_1981_2012_AF.png 2_3_3_tas_3_ERAI2_SON_1_Near_RelDiag_rawdata_tas_1981_2012_AF.png +mv 2_tas_3_tas_4_tas_3_ERAI2_DJF_1_Near_RelDiag_rawdata_tas_1981_2012_AF.png 2_3_4_tas_3_ERAI2_DJF_1_Near_RelDiag_rawdata_tas_1981_2012_AF.png +mv 2_tas_4_tas_1_tas_4_JRA2_MAM_1_Near_RelDiag_rawdata_tas_1981_2012_AF.png 2_4_1_tas_4_JRA2_MAM_1_Near_RelDiag_rawdata_tas_1981_2012_AF.png +mv 2_tas_4_tas_2_tas_4_JRA2_JJA_1_Near_RelDiag_rawdata_tas_1981_2012_AF.png 2_4_2_tas_4_JRA2_JJA_1_Near_RelDiag_rawdata_tas_1981_2012_AF.png +mv 2_tas_4_tas_3_tas_4_JRA2_SON_1_Near_RelDiag_rawdata_tas_1981_2012_AF.png 2_4_3_tas_4_JRA2_SON_1_Near_RelDiag_rawdata_tas_1981_2012_AF.png +mv 2_tas_4_tas_4_tas_4_JRA2_DJF_1_Near_RelDiag_rawdata_tas_1981_2012_AF.png 2_4_4_tas_4_JRA2_DJF_1_Near_RelDiag_rawdata_tas_1981_2012_AF.png +mv 3_tas_1_tas_1_tas_1_ERAI_MAM_1_Below_RelDiag_rawdata_tas_1991_2012_AF.png 3_1_1_tas_1_ERAI_MAM_1_Below_RelDiag_rawdata_tas_1991_2012_AF.png +mv 3_tas_1_tas_2_tas_1_ERAI_JJA_1_Below_RelDiag_rawdata_tas_1991_2012_AF.png 3_1_2_tas_1_ERAI_JJA_1_Below_RelDiag_rawdata_tas_1991_2012_AF.png +mv 3_tas_1_tas_3_tas_1_ERAI_SON_1_Below_RelDiag_rawdata_tas_1991_2012_AF.png 3_1_3_tas_1_ERAI_SON_1_Below_RelDiag_rawdata_tas_1991_2012_AF.png +mv 3_tas_1_tas_4_tas_1_ERAI_DJF_1_Below_RelDiag_rawdata_tas_1991_2012_AF.png 3_1_4_tas_1_ERAI_DJF_1_Below_RelDiag_rawdata_tas_1991_2012_AF.png +mv 3_tas_2_tas_1_tas_2_JRA_MAM_1_Below_RelDiag_rawdata_tas_1991_2012_AF.png 3_2_1_tas_2_JRA_MAM_1_Below_RelDiag_rawdata_tas_1991_2012_AF.png +mv 3_tas_2_tas_2_tas_2_JRA_JJA_1_Below_RelDiag_rawdata_tas_1991_2012_AF.png 3_2_2_tas_2_JRA_JJA_1_Below_RelDiag_rawdata_tas_1991_2012_AF.png +mv 3_tas_2_tas_3_tas_2_JRA_SON_1_Below_RelDiag_rawdata_tas_1991_2012_AF.png 3_2_3_tas_2_JRA_SON_1_Below_RelDiag_rawdata_tas_1991_2012_AF.png +mv 3_tas_2_tas_4_tas_2_JRA_DJF_1_Below_RelDiag_rawdata_tas_1991_2012_AF.png 3_2_4_tas_2_JRA_DJF_1_Below_RelDiag_rawdata_tas_1991_2012_AF.png +mv 3_tas_3_tas_1_tas_3_ERAI2_MAM_1_Below_RelDiag_rawdata_tas_1981_2012_AF.png 3_3_1_tas_3_ERAI2_MAM_1_Below_RelDiag_rawdata_tas_1981_2012_AF.png +mv 3_tas_3_tas_2_tas_3_ERAI2_JJA_1_Below_RelDiag_rawdata_tas_1981_2012_AF.png 3_3_2_tas_3_ERAI2_JJA_1_Below_RelDiag_rawdata_tas_1981_2012_AF.png +mv 3_tas_3_tas_3_tas_3_ERAI2_SON_1_Below_RelDiag_rawdata_tas_1981_2012_AF.png 3_3_3_tas_3_ERAI2_SON_1_Below_RelDiag_rawdata_tas_1981_2012_AF.png +mv 3_tas_3_tas_4_tas_3_ERAI2_DJF_1_Below_RelDiag_rawdata_tas_1981_2012_AF.png 3_3_4_tas_3_ERAI2_DJF_1_Below_RelDiag_rawdata_tas_1981_2012_AF.png +mv 3_tas_4_tas_1_tas_4_JRA2_MAM_1_Below_RelDiag_rawdata_tas_1981_2012_AF.png 3_4_1_tas_4_JRA2_MAM_1_Below_RelDiag_rawdata_tas_1981_2012_AF.png +mv 3_tas_4_tas_2_tas_4_JRA2_JJA_1_Below_RelDiag_rawdata_tas_1981_2012_AF.png 3_4_2_tas_4_JRA2_JJA_1_Below_RelDiag_rawdata_tas_1981_2012_AF.png +mv 3_tas_4_tas_3_tas_4_JRA2_SON_1_Below_RelDiag_rawdata_tas_1981_2012_AF.png 3_4_3_tas_4_JRA2_SON_1_Below_RelDiag_rawdata_tas_1981_2012_AF.png +mv 3_tas_4_tas_4_tas_4_JRA2_DJF_1_Below_RelDiag_rawdata_tas_1981_2012_AF.png 3_4_4_tas_4_JRA2_DJF_1_Below_RelDiag_rawdata_tas_1981_2012_AF.png + + + + + +mv 1_tas_1_tas_1_tas_1_ERAI_MAM_2_Above_RelDiag_sbc_cross_tas_1991_2012_AF.png 1_1_1_tas_1_ERAI_MAM_2_Above_RelDiag_sbc_cross_tas_1991_2012_AF.png +mv 1_tas_1_tas_2_tas_1_ERAI_JJA_2_Above_RelDiag_sbc_cross_tas_1991_2012_AF.png 1_1_2_tas_1_ERAI_JJA_2_Above_RelDiag_sbc_cross_tas_1991_2012_AF.png +mv 1_tas_1_tas_3_tas_1_ERAI_SON_2_Above_RelDiag_sbc_cross_tas_1991_2012_AF.png 1_1_3_tas_1_ERAI_SON_2_Above_RelDiag_sbc_cross_tas_1991_2012_AF.png +mv 1_tas_1_tas_4_tas_1_ERAI_DJF_2_Above_RelDiag_sbc_cross_tas_1991_2012_AF.png 1_1_4_tas_1_ERAI_DJF_2_Above_RelDiag_sbc_cross_tas_1991_2012_AF.png +mv 1_tas_2_tas_1_tas_2_JRA_MAM_2_Above_RelDiag_sbc_cross_tas_1991_2012_AF.png 1_2_1_tas_2_JRA_MAM_2_Above_RelDiag_sbc_cross_tas_1991_2012_AF.png +mv 1_tas_2_tas_2_tas_2_JRA_JJA_2_Above_RelDiag_sbc_cross_tas_1991_2012_AF.png 1_2_2_tas_2_JRA_JJA_2_Above_RelDiag_sbc_cross_tas_1991_2012_AF.png +mv 1_tas_2_tas_3_tas_2_JRA_SON_2_Above_RelDiag_sbc_cross_tas_1991_2012_AF.png 1_2_3_tas_2_JRA_SON_2_Above_RelDiag_sbc_cross_tas_1991_2012_AF.png +mv 1_tas_2_tas_4_tas_2_JRA_DJF_2_Above_RelDiag_sbc_cross_tas_1991_2012_AF.png 1_2_4_tas_2_JRA_DJF_2_Above_RelDiag_sbc_cross_tas_1991_2012_AF.png +mv 1_tas_3_tas_1_tas_3_ERAI2_MAM_2_Above_RelDiag_sbc_cross_tas_1981_2012_AF.png 1_3_1_tas_3_ERAI2_MAM_2_Above_RelDiag_sbc_cross_tas_1981_2012_AF.png +mv 1_tas_3_tas_2_tas_3_ERAI2_JJA_2_Above_RelDiag_sbc_cross_tas_1981_2012_AF.png 1_3_2_tas_3_ERAI2_JJA_2_Above_RelDiag_sbc_cross_tas_1981_2012_AF.png +mv 1_tas_3_tas_3_tas_3_ERAI2_SON_2_Above_RelDiag_sbc_cross_tas_1981_2012_AF.png 1_3_3_tas_3_ERAI2_SON_2_Above_RelDiag_sbc_cross_tas_1981_2012_AF.png +mv 1_tas_3_tas_4_tas_3_ERAI2_DJF_2_Above_RelDiag_sbc_cross_tas_1981_2012_AF.png 1_3_4_tas_3_ERAI2_DJF_2_Above_RelDiag_sbc_cross_tas_1981_2012_AF.png +mv 1_tas_4_tas_1_tas_4_JRA2_MAM_2_Above_RelDiag_sbc_cross_tas_1981_2012_AF.png 1_4_1_tas_4_JRA2_MAM_2_Above_RelDiag_sbc_cross_tas_1981_2012_AF.png +mv 1_tas_4_tas_2_tas_4_JRA2_JJA_2_Above_RelDiag_sbc_cross_tas_1981_2012_AF.png 1_4_2_tas_4_JRA2_JJA_2_Above_RelDiag_sbc_cross_tas_1981_2012_AF.png +mv 1_tas_4_tas_3_tas_4_JRA2_SON_2_Above_RelDiag_sbc_cross_tas_1981_2012_AF.png 1_4_3_tas_4_JRA2_SON_2_Above_RelDiag_sbc_cross_tas_1981_2012_AF.png +mv 1_tas_4_tas_4_tas_4_JRA2_DJF_2_Above_RelDiag_sbc_cross_tas_1981_2012_AF.png 1_4_4_tas_4_JRA2_DJF_2_Above_RelDiag_sbc_cross_tas_1981_2012_AF.png +mv 2_tas_1_tas_1_tas_1_ERAI_MAM_2_Near_RelDiag_sbc_cross_tas_1991_2012_AF.png 2_1_1_tas_1_ERAI_MAM_2_Near_RelDiag_sbc_cross_tas_1991_2012_AF.png +mv 2_tas_1_tas_2_tas_1_ERAI_JJA_2_Near_RelDiag_sbc_cross_tas_1991_2012_AF.png 2_1_2_tas_1_ERAI_JJA_2_Near_RelDiag_sbc_cross_tas_1991_2012_AF.png +mv 2_tas_1_tas_3_tas_1_ERAI_SON_2_Near_RelDiag_sbc_cross_tas_1991_2012_AF.png 2_1_3_tas_1_ERAI_SON_2_Near_RelDiag_sbc_cross_tas_1991_2012_AF.png +mv 2_tas_1_tas_4_tas_1_ERAI_DJF_2_Near_RelDiag_sbc_cross_tas_1991_2012_AF.png 2_1_4_tas_1_ERAI_DJF_2_Near_RelDiag_sbc_cross_tas_1991_2012_AF.png +mv 2_tas_2_tas_1_tas_2_JRA_MAM_2_Near_RelDiag_sbc_cross_tas_1991_2012_AF.png 2_2_1_tas_2_JRA_MAM_2_Near_RelDiag_sbc_cross_tas_1991_2012_AF.png +mv 2_tas_2_tas_2_tas_2_JRA_JJA_2_Near_RelDiag_sbc_cross_tas_1991_2012_AF.png 2_2_2_tas_2_JRA_JJA_2_Near_RelDiag_sbc_cross_tas_1991_2012_AF.png +mv 2_tas_2_tas_3_tas_2_JRA_SON_2_Near_RelDiag_sbc_cross_tas_1991_2012_AF.png 2_2_3_tas_2_JRA_SON_2_Near_RelDiag_sbc_cross_tas_1991_2012_AF.png +mv 2_tas_2_tas_4_tas_2_JRA_DJF_2_Near_RelDiag_sbc_cross_tas_1991_2012_AF.png 2_2_4_tas_2_JRA_DJF_2_Near_RelDiag_sbc_cross_tas_1991_2012_AF.png +mv 2_tas_3_tas_1_tas_3_ERAI2_MAM_2_Near_RelDiag_sbc_cross_tas_1981_2012_AF.png 2_3_1_tas_3_ERAI2_MAM_2_Near_RelDiag_sbc_cross_tas_1981_2012_AF.png +mv 2_tas_3_tas_2_tas_3_ERAI2_JJA_2_Near_RelDiag_sbc_cross_tas_1981_2012_AF.png 2_3_2_tas_3_ERAI2_JJA_2_Near_RelDiag_sbc_cross_tas_1981_2012_AF.png +mv 2_tas_3_tas_3_tas_3_ERAI2_SON_2_Near_RelDiag_sbc_cross_tas_1981_2012_AF.png 2_3_3_tas_3_ERAI2_SON_2_Near_RelDiag_sbc_cross_tas_1981_2012_AF.png +mv 2_tas_3_tas_4_tas_3_ERAI2_DJF_2_Near_RelDiag_sbc_cross_tas_1981_2012_AF.png 2_3_4_tas_3_ERAI2_DJF_2_Near_RelDiag_sbc_cross_tas_1981_2012_AF.png +mv 2_tas_4_tas_1_tas_4_JRA2_MAM_2_Near_RelDiag_sbc_cross_tas_1981_2012_AF.png 2_4_1_tas_4_JRA2_MAM_2_Near_RelDiag_sbc_cross_tas_1981_2012_AF.png +mv 2_tas_4_tas_2_tas_4_JRA2_JJA_2_Near_RelDiag_sbc_cross_tas_1981_2012_AF.png 2_4_2_tas_4_JRA2_JJA_2_Near_RelDiag_sbc_cross_tas_1981_2012_AF.png +mv 2_tas_4_tas_3_tas_4_JRA2_SON_2_Near_RelDiag_sbc_cross_tas_1981_2012_AF.png 2_4_3_tas_4_JRA2_SON_2_Near_RelDiag_sbc_cross_tas_1981_2012_AF.png +mv 2_tas_4_tas_4_tas_4_JRA2_DJF_2_Near_RelDiag_sbc_cross_tas_1981_2012_AF.png 2_4_4_tas_4_JRA2_DJF_2_Near_RelDiag_sbc_cross_tas_1981_2012_AF.png +mv 3_tas_1_tas_1_tas_1_ERAI_MAM_2_Below_RelDiag_sbc_cross_tas_1991_2012_AF.png 3_1_1_tas_1_ERAI_MAM_2_Below_RelDiag_sbc_cross_tas_1991_2012_AF.png +mv 3_tas_1_tas_2_tas_1_ERAI_JJA_2_Below_RelDiag_sbc_cross_tas_1991_2012_AF.png 3_1_2_tas_1_ERAI_JJA_2_Below_RelDiag_sbc_cross_tas_1991_2012_AF.png +mv 3_tas_1_tas_3_tas_1_ERAI_SON_2_Below_RelDiag_sbc_cross_tas_1991_2012_AF.png 3_1_3_tas_1_ERAI_SON_2_Below_RelDiag_sbc_cross_tas_1991_2012_AF.png +mv 3_tas_1_tas_4_tas_1_ERAI_DJF_2_Below_RelDiag_sbc_cross_tas_1991_2012_AF.png 3_1_4_tas_1_ERAI_DJF_2_Below_RelDiag_sbc_cross_tas_1991_2012_AF.png +mv 3_tas_2_tas_1_tas_2_JRA_MAM_2_Below_RelDiag_sbc_cross_tas_1991_2012_AF.png 3_2_1_tas_2_JRA_MAM_2_Below_RelDiag_sbc_cross_tas_1991_2012_AF.png +mv 3_tas_2_tas_2_tas_2_JRA_JJA_2_Below_RelDiag_sbc_cross_tas_1991_2012_AF.png 3_2_2_tas_2_JRA_JJA_2_Below_RelDiag_sbc_cross_tas_1991_2012_AF.png +mv 3_tas_2_tas_3_tas_2_JRA_SON_2_Below_RelDiag_sbc_cross_tas_1991_2012_AF.png 3_2_3_tas_2_JRA_SON_2_Below_RelDiag_sbc_cross_tas_1991_2012_AF.png +mv 3_tas_2_tas_4_tas_2_JRA_DJF_2_Below_RelDiag_sbc_cross_tas_1991_2012_AF.png 3_2_4_tas_2_JRA_DJF_2_Below_RelDiag_sbc_cross_tas_1991_2012_AF.png +mv 3_tas_3_tas_1_tas_3_ERAI2_MAM_2_Below_RelDiag_sbc_cross_tas_1981_2012_AF.png 3_3_1_tas_3_ERAI2_MAM_2_Below_RelDiag_sbc_cross_tas_1981_2012_AF.png +mv 3_tas_3_tas_2_tas_3_ERAI2_JJA_2_Below_RelDiag_sbc_cross_tas_1981_2012_AF.png 3_3_2_tas_3_ERAI2_JJA_2_Below_RelDiag_sbc_cross_tas_1981_2012_AF.png +mv 3_tas_3_tas_3_tas_3_ERAI2_SON_2_Below_RelDiag_sbc_cross_tas_1981_2012_AF.png 3_3_3_tas_3_ERAI2_SON_2_Below_RelDiag_sbc_cross_tas_1981_2012_AF.png +mv 3_tas_3_tas_4_tas_3_ERAI2_DJF_2_Below_RelDiag_sbc_cross_tas_1981_2012_AF.png 3_3_4_tas_3_ERAI2_DJF_2_Below_RelDiag_sbc_cross_tas_1981_2012_AF.png +mv 3_tas_4_tas_1_tas_4_JRA2_MAM_2_Below_RelDiag_sbc_cross_tas_1981_2012_AF.png 3_4_1_tas_4_JRA2_MAM_2_Below_RelDiag_sbc_cross_tas_1981_2012_AF.png +mv 3_tas_4_tas_2_tas_4_JRA2_JJA_2_Below_RelDiag_sbc_cross_tas_1981_2012_AF.png 3_4_2_tas_4_JRA2_JJA_2_Below_RelDiag_sbc_cross_tas_1981_2012_AF.png +mv 3_tas_4_tas_3_tas_4_JRA2_SON_2_Below_RelDiag_sbc_cross_tas_1981_2012_AF.png 3_4_3_tas_4_JRA2_SON_2_Below_RelDiag_sbc_cross_tas_1981_2012_AF.png +mv 3_tas_4_tas_4_tas_4_JRA2_DJF_2_Below_RelDiag_sbc_cross_tas_1981_2012_AF.png 3_4_4_tas_4_JRA2_DJF_2_Below_RelDiag_sbc_cross_tas_1981_2012_AF.png + + + +mv 1_tas_1_tas_1_tas_1_ERAI_MAM_3_Above_RelDiag_sbc_nocross_tas_1991_2012_AF.png 1_1_1_tas_1_ERAI_MAM_3_Above_RelDiag_sbc_nocross_tas_1991_2012_AF.png +mv 1_tas_1_tas_2_tas_1_ERAI_JJA_3_Above_RelDiag_sbc_nocross_tas_1991_2012_AF.png 1_1_2_tas_1_ERAI_JJA_3_Above_RelDiag_sbc_nocross_tas_1991_2012_AF.png +mv 1_tas_1_tas_3_tas_1_ERAI_SON_3_Above_RelDiag_sbc_nocross_tas_1991_2012_AF.png 1_1_3_tas_1_ERAI_SON_3_Above_RelDiag_sbc_nocross_tas_1991_2012_AF.png +mv 1_tas_1_tas_4_tas_1_ERAI_DJF_3_Above_RelDiag_sbc_nocross_tas_1991_2012_AF.png 1_1_4_tas_1_ERAI_DJF_3_Above_RelDiag_sbc_nocross_tas_1991_2012_AF.png +mv 1_tas_2_tas_1_tas_2_JRA_MAM_3_Above_RelDiag_sbc_nocross_tas_1991_2012_AF.png 1_2_1_tas_2_JRA_MAM_3_Above_RelDiag_sbc_nocross_tas_1991_2012_AF.png +mv 1_tas_2_tas_2_tas_2_JRA_JJA_3_Above_RelDiag_sbc_nocross_tas_1991_2012_AF.png 1_2_2_tas_2_JRA_JJA_3_Above_RelDiag_sbc_nocross_tas_1991_2012_AF.png +mv 1_tas_2_tas_3_tas_2_JRA_SON_3_Above_RelDiag_sbc_nocross_tas_1991_2012_AF.png 1_2_3_tas_2_JRA_SON_3_Above_RelDiag_sbc_nocross_tas_1991_2012_AF.png +mv 1_tas_2_tas_4_tas_2_JRA_DJF_3_Above_RelDiag_sbc_nocross_tas_1991_2012_AF.png 1_2_4_tas_2_JRA_DJF_3_Above_RelDiag_sbc_nocross_tas_1991_2012_AF.png +mv 1_tas_3_tas_1_tas_3_ERAI2_MAM_3_Above_RelDiag_sbc_nocross_tas_1981_2012_AF.png 1_3_1_tas_3_ERAI2_MAM_3_Above_RelDiag_sbc_nocross_tas_1981_2012_AF.png +mv 1_tas_3_tas_2_tas_3_ERAI2_JJA_3_Above_RelDiag_sbc_nocross_tas_1981_2012_AF.png 1_3_2_tas_3_ERAI2_JJA_3_Above_RelDiag_sbc_nocross_tas_1981_2012_AF.png +mv 1_tas_3_tas_3_tas_3_ERAI2_SON_3_Above_RelDiag_sbc_nocross_tas_1981_2012_AF.png 1_3_3_tas_3_ERAI2_SON_3_Above_RelDiag_sbc_nocross_tas_1981_2012_AF.png +mv 1_tas_3_tas_4_tas_3_ERAI2_DJF_3_Above_RelDiag_sbc_nocross_tas_1981_2012_AF.png 1_3_4_tas_3_ERAI2_DJF_3_Above_RelDiag_sbc_nocross_tas_1981_2012_AF.png +mv 1_tas_4_tas_1_tas_4_JRA2_MAM_3_Above_RelDiag_sbc_nocross_tas_1981_2012_AF.png 1_4_1_tas_4_JRA2_MAM_3_Above_RelDiag_sbc_nocross_tas_1981_2012_AF.png +mv 1_tas_4_tas_2_tas_4_JRA2_JJA_3_Above_RelDiag_sbc_nocross_tas_1981_2012_AF.png 1_4_2_tas_4_JRA2_JJA_3_Above_RelDiag_sbc_nocross_tas_1981_2012_AF.png +mv 1_tas_4_tas_3_tas_4_JRA2_SON_3_Above_RelDiag_sbc_nocross_tas_1981_2012_AF.png 1_4_3_tas_4_JRA2_SON_3_Above_RelDiag_sbc_nocross_tas_1981_2012_AF.png +mv 1_tas_4_tas_4_tas_4_JRA2_DJF_3_Above_RelDiag_sbc_nocross_tas_1981_2012_AF.png 1_4_4_tas_4_JRA2_DJF_3_Above_RelDiag_sbc_nocross_tas_1981_2012_AF.png +mv 2_tas_1_tas_1_tas_1_ERAI_MAM_3_Near_RelDiag_sbc_nocross_tas_1991_2012_AF.png 2_1_1_tas_1_ERAI_MAM_3_Near_RelDiag_sbc_nocross_tas_1991_2012_AF.png +mv 2_tas_1_tas_2_tas_1_ERAI_JJA_3_Near_RelDiag_sbc_nocross_tas_1991_2012_AF.png 2_1_2_tas_1_ERAI_JJA_3_Near_RelDiag_sbc_nocross_tas_1991_2012_AF.png +mv 2_tas_1_tas_3_tas_1_ERAI_SON_3_Near_RelDiag_sbc_nocross_tas_1991_2012_AF.png 2_1_3_tas_1_ERAI_SON_3_Near_RelDiag_sbc_nocross_tas_1991_2012_AF.png +mv 2_tas_1_tas_4_tas_1_ERAI_DJF_3_Near_RelDiag_sbc_nocross_tas_1991_2012_AF.png 2_1_4_tas_1_ERAI_DJF_3_Near_RelDiag_sbc_nocross_tas_1991_2012_AF.png +mv 2_tas_2_tas_1_tas_2_JRA_MAM_3_Near_RelDiag_sbc_nocross_tas_1991_2012_AF.png 2_2_1_tas_2_JRA_MAM_3_Near_RelDiag_sbc_nocross_tas_1991_2012_AF.png +mv 2_tas_2_tas_2_tas_2_JRA_JJA_3_Near_RelDiag_sbc_nocross_tas_1991_2012_AF.png 2_2_2_tas_2_JRA_JJA_3_Near_RelDiag_sbc_nocross_tas_1991_2012_AF.png +mv 2_tas_2_tas_3_tas_2_JRA_SON_3_Near_RelDiag_sbc_nocross_tas_1991_2012_AF.png 2_2_3_tas_2_JRA_SON_3_Near_RelDiag_sbc_nocross_tas_1991_2012_AF.png +mv 2_tas_2_tas_4_tas_2_JRA_DJF_3_Near_RelDiag_sbc_nocross_tas_1991_2012_AF.png 2_2_4_tas_2_JRA_DJF_3_Near_RelDiag_sbc_nocross_tas_1991_2012_AF.png +mv 2_tas_3_tas_1_tas_3_ERAI2_MAM_3_Near_RelDiag_sbc_nocross_tas_1981_2012_AF.png 2_3_1_tas_3_ERAI2_MAM_3_Near_RelDiag_sbc_nocross_tas_1981_2012_AF.png +mv 2_tas_3_tas_2_tas_3_ERAI2_JJA_3_Near_RelDiag_sbc_nocross_tas_1981_2012_AF.png 2_3_2_tas_3_ERAI2_JJA_3_Near_RelDiag_sbc_nocross_tas_1981_2012_AF.png +mv 2_tas_3_tas_3_tas_3_ERAI2_SON_3_Near_RelDiag_sbc_nocross_tas_1981_2012_AF.png 2_3_3_tas_3_ERAI2_SON_3_Near_RelDiag_sbc_nocross_tas_1981_2012_AF.png +mv 2_tas_3_tas_4_tas_3_ERAI2_DJF_3_Near_RelDiag_sbc_nocross_tas_1981_2012_AF.png 2_3_4_tas_3_ERAI2_DJF_3_Near_RelDiag_sbc_nocross_tas_1981_2012_AF.png +mv 2_tas_4_tas_1_tas_4_JRA2_MAM_3_Near_RelDiag_sbc_nocross_tas_1981_2012_AF.png 2_4_1_tas_4_JRA2_MAM_3_Near_RelDiag_sbc_nocross_tas_1981_2012_AF.png +mv 2_tas_4_tas_2_tas_4_JRA2_JJA_3_Near_RelDiag_sbc_nocross_tas_1981_2012_AF.png 2_4_2_tas_4_JRA2_JJA_3_Near_RelDiag_sbc_nocross_tas_1981_2012_AF.png +mv 2_tas_4_tas_3_tas_4_JRA2_SON_3_Near_RelDiag_sbc_nocross_tas_1981_2012_AF.png 2_4_3_tas_4_JRA2_SON_3_Near_RelDiag_sbc_nocross_tas_1981_2012_AF.png +mv 2_tas_4_tas_4_tas_4_JRA2_DJF_3_Near_RelDiag_sbc_nocross_tas_1981_2012_AF.png 2_4_4_tas_4_JRA2_DJF_3_Near_RelDiag_sbc_nocross_tas_1981_2012_AF.png +mv 3_tas_1_tas_1_tas_1_ERAI_MAM_3_Below_RelDiag_sbc_nocross_tas_1991_2012_AF.png 3_1_1_tas_1_ERAI_MAM_3_Below_RelDiag_sbc_nocross_tas_1991_2012_AF.png +mv 3_tas_1_tas_2_tas_1_ERAI_JJA_3_Below_RelDiag_sbc_nocross_tas_1991_2012_AF.png 3_1_2_tas_1_ERAI_JJA_3_Below_RelDiag_sbc_nocross_tas_1991_2012_AF.png +mv 3_tas_1_tas_3_tas_1_ERAI_SON_3_Below_RelDiag_sbc_nocross_tas_1991_2012_AF.png 3_1_3_tas_1_ERAI_SON_3_Below_RelDiag_sbc_nocross_tas_1991_2012_AF.png +mv 3_tas_1_tas_4_tas_1_ERAI_DJF_3_Below_RelDiag_sbc_nocross_tas_1991_2012_AF.png 3_1_4_tas_1_ERAI_DJF_3_Below_RelDiag_sbc_nocross_tas_1991_2012_AF.png +mv 3_tas_2_tas_1_tas_2_JRA_MAM_3_Below_RelDiag_sbc_nocross_tas_1991_2012_AF.png 3_2_1_tas_2_JRA_MAM_3_Below_RelDiag_sbc_nocross_tas_1991_2012_AF.png +mv 3_tas_2_tas_2_tas_2_JRA_JJA_3_Below_RelDiag_sbc_nocross_tas_1991_2012_AF.png 3_2_2_tas_2_JRA_JJA_3_Below_RelDiag_sbc_nocross_tas_1991_2012_AF.png +mv 3_tas_2_tas_3_tas_2_JRA_SON_3_Below_RelDiag_sbc_nocross_tas_1991_2012_AF.png 3_2_3_tas_2_JRA_SON_3_Below_RelDiag_sbc_nocross_tas_1991_2012_AF.png +mv 3_tas_2_tas_4_tas_2_JRA_DJF_3_Below_RelDiag_sbc_nocross_tas_1991_2012_AF.png 3_2_4_tas_2_JRA_DJF_3_Below_RelDiag_sbc_nocross_tas_1991_2012_AF.png +mv 3_tas_3_tas_1_tas_3_ERAI2_MAM_3_Below_RelDiag_sbc_nocross_tas_1981_2012_AF.png 3_3_1_tas_3_ERAI2_MAM_3_Below_RelDiag_sbc_nocross_tas_1981_2012_AF.png +mv 3_tas_3_tas_2_tas_3_ERAI2_JJA_3_Below_RelDiag_sbc_nocross_tas_1981_2012_AF.png 3_3_2_tas_3_ERAI2_JJA_3_Below_RelDiag_sbc_nocross_tas_1981_2012_AF.png +mv 3_tas_3_tas_3_tas_3_ERAI2_SON_3_Below_RelDiag_sbc_nocross_tas_1981_2012_AF.png 3_3_3_tas_3_ERAI2_SON_3_Below_RelDiag_sbc_nocross_tas_1981_2012_AF.png +mv 3_tas_3_tas_4_tas_3_ERAI2_DJF_3_Below_RelDiag_sbc_nocross_tas_1981_2012_AF.png 3_3_4_tas_3_ERAI2_DJF_3_Below_RelDiag_sbc_nocross_tas_1981_2012_AF.png +mv 3_tas_4_tas_1_tas_4_JRA2_MAM_3_Below_RelDiag_sbc_nocross_tas_1981_2012_AF.png 3_4_1_tas_4_JRA2_MAM_3_Below_RelDiag_sbc_nocross_tas_1981_2012_AF.png +mv 3_tas_4_tas_2_tas_4_JRA2_JJA_3_Below_RelDiag_sbc_nocross_tas_1981_2012_AF.png 3_4_2_tas_4_JRA2_JJA_3_Below_RelDiag_sbc_nocross_tas_1981_2012_AF.png +mv 3_tas_4_tas_3_tas_4_JRA2_SON_3_Below_RelDiag_sbc_nocross_tas_1981_2012_AF.png 3_4_3_tas_4_JRA2_SON_3_Below_RelDiag_sbc_nocross_tas_1981_2012_AF.png +mv 3_tas_4_tas_4_tas_4_JRA2_DJF_3_Below_RelDiag_sbc_nocross_tas_1981_2012_AF.png 3_4_4_tas_4_JRA2_DJF_3_Below_RelDiag_sbc_nocross_tas_1981_2012_AF.png + + +mv 1_tas_1_tas_1_tas_1_ERAI_MAM_4_Above_RelDiag_cal_cross_tas_1991_2012_AF.png 1_1_1_tas_1_ERAI_MAM_4_Above_RelDiag_cal_cross_tas_1991_2012_AF.png +mv 1_tas_1_tas_2_tas_1_ERAI_JJA_4_Above_RelDiag_cal_cross_tas_1991_2012_AF.png 1_1_2_tas_1_ERAI_JJA_4_Above_RelDiag_cal_cross_tas_1991_2012_AF.png +mv 1_tas_1_tas_3_tas_1_ERAI_SON_4_Above_RelDiag_cal_cross_tas_1991_2012_AF.png 1_1_3_tas_1_ERAI_SON_4_Above_RelDiag_cal_cross_tas_1991_2012_AF.png +mv 1_tas_1_tas_4_tas_1_ERAI_DJF_4_Above_RelDiag_cal_cross_tas_1991_2012_AF.png 1_1_4_tas_1_ERAI_DJF_4_Above_RelDiag_cal_cross_tas_1991_2012_AF.png +mv 1_tas_2_tas_1_tas_2_JRA_MAM_4_Above_RelDiag_cal_cross_tas_1991_2012_AF.png 1_2_1_tas_2_JRA_MAM_4_Above_RelDiag_cal_cross_tas_1991_2012_AF.png +mv 1_tas_2_tas_2_tas_2_JRA_JJA_4_Above_RelDiag_cal_cross_tas_1991_2012_AF.png 1_2_2_tas_2_JRA_JJA_4_Above_RelDiag_cal_cross_tas_1991_2012_AF.png +mv 1_tas_2_tas_3_tas_2_JRA_SON_4_Above_RelDiag_cal_cross_tas_1991_2012_AF.png 1_2_3_tas_2_JRA_SON_4_Above_RelDiag_cal_cross_tas_1991_2012_AF.png +mv 1_tas_2_tas_4_tas_2_JRA_DJF_4_Above_RelDiag_cal_cross_tas_1991_2012_AF.png 1_2_4_tas_2_JRA_DJF_4_Above_RelDiag_cal_cross_tas_1991_2012_AF.png +mv 1_tas_3_tas_1_tas_3_ERAI2_MAM_4_Above_RelDiag_cal_cross_tas_1981_2012_AF.png 1_3_1_tas_3_ERAI2_MAM_4_Above_RelDiag_cal_cross_tas_1981_2012_AF.png +mv 1_tas_3_tas_2_tas_3_ERAI2_JJA_4_Above_RelDiag_cal_cross_tas_1981_2012_AF.png 1_3_2_tas_3_ERAI2_JJA_4_Above_RelDiag_cal_cross_tas_1981_2012_AF.png +mv 1_tas_3_tas_3_tas_3_ERAI2_SON_4_Above_RelDiag_cal_cross_tas_1981_2012_AF.png 1_3_3_tas_3_ERAI2_SON_4_Above_RelDiag_cal_cross_tas_1981_2012_AF.png +mv 1_tas_3_tas_4_tas_3_ERAI2_DJF_4_Above_RelDiag_cal_cross_tas_1981_2012_AF.png 1_3_4_tas_3_ERAI2_DJF_4_Above_RelDiag_cal_cross_tas_1981_2012_AF.png +mv 1_tas_4_tas_1_tas_4_JRA2_MAM_4_Above_RelDiag_cal_cross_tas_1981_2012_AF.png 1_4_1_tas_4_JRA2_MAM_4_Above_RelDiag_cal_cross_tas_1981_2012_AF.png +mv 1_tas_4_tas_2_tas_4_JRA2_JJA_4_Above_RelDiag_cal_cross_tas_1981_2012_AF.png 1_4_2_tas_4_JRA2_JJA_4_Above_RelDiag_cal_cross_tas_1981_2012_AF.png +mv 1_tas_4_tas_3_tas_4_JRA2_SON_4_Above_RelDiag_cal_cross_tas_1981_2012_AF.png 1_4_3_tas_4_JRA2_SON_4_Above_RelDiag_cal_cross_tas_1981_2012_AF.png +mv 1_tas_4_tas_4_tas_4_JRA2_DJF_4_Above_RelDiag_cal_cross_tas_1981_2012_AF.png 1_4_4_tas_4_JRA2_DJF_4_Above_RelDiag_cal_cross_tas_1981_2012_AF.png +mv 2_tas_1_tas_1_tas_1_ERAI_MAM_4_Near_RelDiag_cal_cross_tas_1991_2012_AF.png 2_1_1_tas_1_ERAI_MAM_4_Near_RelDiag_cal_cross_tas_1991_2012_AF.png +mv 2_tas_1_tas_2_tas_1_ERAI_JJA_4_Near_RelDiag_cal_cross_tas_1991_2012_AF.png 2_1_2_tas_1_ERAI_JJA_4_Near_RelDiag_cal_cross_tas_1991_2012_AF.png +mv 2_tas_1_tas_3_tas_1_ERAI_SON_4_Near_RelDiag_cal_cross_tas_1991_2012_AF.png 2_1_3_tas_1_ERAI_SON_4_Near_RelDiag_cal_cross_tas_1991_2012_AF.png +mv 2_tas_1_tas_4_tas_1_ERAI_DJF_4_Near_RelDiag_cal_cross_tas_1991_2012_AF.png 2_1_4_tas_1_ERAI_DJF_4_Near_RelDiag_cal_cross_tas_1991_2012_AF.png +mv 2_tas_2_tas_1_tas_2_JRA_MAM_4_Near_RelDiag_cal_cross_tas_1991_2012_AF.png 2_2_1_tas_2_JRA_MAM_4_Near_RelDiag_cal_cross_tas_1991_2012_AF.png +mv 2_tas_2_tas_2_tas_2_JRA_JJA_4_Near_RelDiag_cal_cross_tas_1991_2012_AF.png 2_2_2_tas_2_JRA_JJA_4_Near_RelDiag_cal_cross_tas_1991_2012_AF.png +mv 2_tas_2_tas_3_tas_2_JRA_SON_4_Near_RelDiag_cal_cross_tas_1991_2012_AF.png 2_2_3_tas_2_JRA_SON_4_Near_RelDiag_cal_cross_tas_1991_2012_AF.png +mv 2_tas_2_tas_4_tas_2_JRA_DJF_4_Near_RelDiag_cal_cross_tas_1991_2012_AF.png 2_2_4_tas_2_JRA_DJF_4_Near_RelDiag_cal_cross_tas_1991_2012_AF.png +mv 2_tas_3_tas_1_tas_3_ERAI2_MAM_4_Near_RelDiag_cal_cross_tas_1981_2012_AF.png 2_3_1_tas_3_ERAI2_MAM_4_Near_RelDiag_cal_cross_tas_1981_2012_AF.png +mv 2_tas_3_tas_2_tas_3_ERAI2_JJA_4_Near_RelDiag_cal_cross_tas_1981_2012_AF.png 2_3_2_tas_3_ERAI2_JJA_4_Near_RelDiag_cal_cross_tas_1981_2012_AF.png +mv 2_tas_3_tas_3_tas_3_ERAI2_SON_4_Near_RelDiag_cal_cross_tas_1981_2012_AF.png 2_3_3_tas_3_ERAI2_SON_4_Near_RelDiag_cal_cross_tas_1981_2012_AF.png +mv 2_tas_3_tas_4_tas_3_ERAI2_DJF_4_Near_RelDiag_cal_cross_tas_1981_2012_AF.png 2_3_4_tas_3_ERAI2_DJF_4_Near_RelDiag_cal_cross_tas_1981_2012_AF.png +mv 2_tas_4_tas_1_tas_4_JRA2_MAM_4_Near_RelDiag_cal_cross_tas_1981_2012_AF.png 2_4_1_tas_4_JRA2_MAM_4_Near_RelDiag_cal_cross_tas_1981_2012_AF.png +mv 2_tas_4_tas_2_tas_4_JRA2_JJA_4_Near_RelDiag_cal_cross_tas_1981_2012_AF.png 2_4_2_tas_4_JRA2_JJA_4_Near_RelDiag_cal_cross_tas_1981_2012_AF.png +mv 2_tas_4_tas_3_tas_4_JRA2_SON_4_Near_RelDiag_cal_cross_tas_1981_2012_AF.png 2_4_3_tas_4_JRA2_SON_4_Near_RelDiag_cal_cross_tas_1981_2012_AF.png +mv 2_tas_4_tas_4_tas_4_JRA2_DJF_4_Near_RelDiag_cal_cross_tas_1981_2012_AF.png 2_4_4_tas_4_JRA2_DJF_4_Near_RelDiag_cal_cross_tas_1981_2012_AF.png +mv 3_tas_1_tas_1_tas_1_ERAI_MAM_4_Below_RelDiag_cal_cross_tas_1991_2012_AF.png 3_1_1_tas_1_ERAI_MAM_4_Below_RelDiag_cal_cross_tas_1991_2012_AF.png +mv 3_tas_1_tas_2_tas_1_ERAI_JJA_4_Below_RelDiag_cal_cross_tas_1991_2012_AF.png 3_1_2_tas_1_ERAI_JJA_4_Below_RelDiag_cal_cross_tas_1991_2012_AF.png +mv 3_tas_1_tas_3_tas_1_ERAI_SON_4_Below_RelDiag_cal_cross_tas_1991_2012_AF.png 3_1_3_tas_1_ERAI_SON_4_Below_RelDiag_cal_cross_tas_1991_2012_AF.png +mv 3_tas_1_tas_4_tas_1_ERAI_DJF_4_Below_RelDiag_cal_cross_tas_1991_2012_AF.png 3_1_4_tas_1_ERAI_DJF_4_Below_RelDiag_cal_cross_tas_1991_2012_AF.png +mv 3_tas_2_tas_1_tas_2_JRA_MAM_4_Below_RelDiag_cal_cross_tas_1991_2012_AF.png 3_2_1_tas_2_JRA_MAM_4_Below_RelDiag_cal_cross_tas_1991_2012_AF.png +mv 3_tas_2_tas_2_tas_2_JRA_JJA_4_Below_RelDiag_cal_cross_tas_1991_2012_AF.png 3_2_2_tas_2_JRA_JJA_4_Below_RelDiag_cal_cross_tas_1991_2012_AF.png +mv 3_tas_2_tas_3_tas_2_JRA_SON_4_Below_RelDiag_cal_cross_tas_1991_2012_AF.png 3_2_3_tas_2_JRA_SON_4_Below_RelDiag_cal_cross_tas_1991_2012_AF.png +mv 3_tas_2_tas_4_tas_2_JRA_DJF_4_Below_RelDiag_cal_cross_tas_1991_2012_AF.png 3_2_4_tas_2_JRA_DJF_4_Below_RelDiag_cal_cross_tas_1991_2012_AF.png +mv 3_tas_3_tas_1_tas_3_ERAI2_MAM_4_Below_RelDiag_cal_cross_tas_1981_2012_AF.png 3_3_1_tas_3_ERAI2_MAM_4_Below_RelDiag_cal_cross_tas_1981_2012_AF.png +mv 3_tas_3_tas_2_tas_3_ERAI2_JJA_4_Below_RelDiag_cal_cross_tas_1981_2012_AF.png 3_3_2_tas_3_ERAI2_JJA_4_Below_RelDiag_cal_cross_tas_1981_2012_AF.png +mv 3_tas_3_tas_3_tas_3_ERAI2_SON_4_Below_RelDiag_cal_cross_tas_1981_2012_AF.png 3_3_3_tas_3_ERAI2_SON_4_Below_RelDiag_cal_cross_tas_1981_2012_AF.png +mv 3_tas_3_tas_4_tas_3_ERAI2_DJF_4_Below_RelDiag_cal_cross_tas_1981_2012_AF.png 3_3_4_tas_3_ERAI2_DJF_4_Below_RelDiag_cal_cross_tas_1981_2012_AF.png +mv 3_tas_4_tas_1_tas_4_JRA2_MAM_4_Below_RelDiag_cal_cross_tas_1981_2012_AF.png 3_4_1_tas_4_JRA2_MAM_4_Below_RelDiag_cal_cross_tas_1981_2012_AF.png +mv 3_tas_4_tas_2_tas_4_JRA2_JJA_4_Below_RelDiag_cal_cross_tas_1981_2012_AF.png 3_4_2_tas_4_JRA2_JJA_4_Below_RelDiag_cal_cross_tas_1981_2012_AF.png +mv 3_tas_4_tas_3_tas_4_JRA2_SON_4_Below_RelDiag_cal_cross_tas_1981_2012_AF.png 3_4_3_tas_4_JRA2_SON_4_Below_RelDiag_cal_cross_tas_1981_2012_AF.png +mv 3_tas_4_tas_4_tas_4_JRA2_DJF_4_Below_RelDiag_cal_cross_tas_1981_2012_AF.png 3_4_4_tas_4_JRA2_DJF_4_Below_RelDiag_cal_cross_tas_1981_2012_AF.png + +mv 1_tas_1_tas_1_tas_1_ERAI_MAM_5_Above_RelDiag_cal_nocross_tas_1991_2012_AF.png 1_1_1_tas_1_ERAI_MAM_5_Above_RelDiag_cal_nocross_tas_1991_2012_AF.png +mv 1_tas_1_tas_2_tas_1_ERAI_JJA_5_Above_RelDiag_cal_nocross_tas_1991_2012_AF.png 1_1_2_tas_1_ERAI_JJA_5_Above_RelDiag_cal_nocross_tas_1991_2012_AF.png +mv 1_tas_1_tas_3_tas_1_ERAI_SON_5_Above_RelDiag_cal_nocross_tas_1991_2012_AF.png 1_1_3_tas_1_ERAI_SON_5_Above_RelDiag_cal_nocross_tas_1991_2012_AF.png +mv 1_tas_1_tas_4_tas_1_ERAI_DJF_5_Above_RelDiag_cal_nocross_tas_1991_2012_AF.png 1_1_4_tas_1_ERAI_DJF_5_Above_RelDiag_cal_nocross_tas_1991_2012_AF.png +mv 1_tas_2_tas_1_tas_2_JRA_MAM_5_Above_RelDiag_cal_nocross_tas_1991_2012_AF.png 1_2_1_tas_2_JRA_MAM_5_Above_RelDiag_cal_nocross_tas_1991_2012_AF.png +mv 1_tas_2_tas_2_tas_2_JRA_JJA_5_Above_RelDiag_cal_nocross_tas_1991_2012_AF.png 1_2_2_tas_2_JRA_JJA_5_Above_RelDiag_cal_nocross_tas_1991_2012_AF.png +mv 1_tas_2_tas_3_tas_2_JRA_SON_5_Above_RelDiag_cal_nocross_tas_1991_2012_AF.png 1_2_3_tas_2_JRA_SON_5_Above_RelDiag_cal_nocross_tas_1991_2012_AF.png +mv 1_tas_2_tas_4_tas_2_JRA_DJF_5_Above_RelDiag_cal_nocross_tas_1991_2012_AF.png 1_2_4_tas_2_JRA_DJF_5_Above_RelDiag_cal_nocross_tas_1991_2012_AF.png +mv 1_tas_3_tas_1_tas_3_ERAI2_MAM_5_Above_RelDiag_cal_nocross_tas_1981_2012_AF.png 1_3_1_tas_3_ERAI2_MAM_5_Above_RelDiag_cal_nocross_tas_1981_2012_AF.png +mv 1_tas_3_tas_2_tas_3_ERAI2_JJA_5_Above_RelDiag_cal_nocross_tas_1981_2012_AF.png 1_3_2_tas_3_ERAI2_JJA_5_Above_RelDiag_cal_nocross_tas_1981_2012_AF.png +mv 1_tas_3_tas_3_tas_3_ERAI2_SON_5_Above_RelDiag_cal_nocross_tas_1981_2012_AF.png 1_3_3_tas_3_ERAI2_SON_5_Above_RelDiag_cal_nocross_tas_1981_2012_AF.png +mv 1_tas_3_tas_4_tas_3_ERAI2_DJF_5_Above_RelDiag_cal_nocross_tas_1981_2012_AF.png 1_3_4_tas_3_ERAI2_DJF_5_Above_RelDiag_cal_nocross_tas_1981_2012_AF.png +mv 1_tas_4_tas_1_tas_4_JRA2_MAM_5_Above_RelDiag_cal_nocross_tas_1981_2012_AF.png 1_4_1_tas_4_JRA2_MAM_5_Above_RelDiag_cal_nocross_tas_1981_2012_AF.png +mv 1_tas_4_tas_2_tas_4_JRA2_JJA_5_Above_RelDiag_cal_nocross_tas_1981_2012_AF.png 1_4_2_tas_4_JRA2_JJA_5_Above_RelDiag_cal_nocross_tas_1981_2012_AF.png +mv 1_tas_4_tas_3_tas_4_JRA2_SON_5_Above_RelDiag_cal_nocross_tas_1981_2012_AF.png 1_4_3_tas_4_JRA2_SON_5_Above_RelDiag_cal_nocross_tas_1981_2012_AF.png +mv 1_tas_4_tas_4_tas_4_JRA2_DJF_5_Above_RelDiag_cal_nocross_tas_1981_2012_AF.png 1_4_4_tas_4_JRA2_DJF_5_Above_RelDiag_cal_nocross_tas_1981_2012_AF.png +mv 2_tas_1_tas_1_tas_1_ERAI_MAM_5_Near_RelDiag_cal_nocross_tas_1991_2012_AF.png 2_1_1_tas_1_ERAI_MAM_5_Near_RelDiag_cal_nocross_tas_1991_2012_AF.png +mv 2_tas_1_tas_2_tas_1_ERAI_JJA_5_Near_RelDiag_cal_nocross_tas_1991_2012_AF.png 2_1_2_tas_1_ERAI_JJA_5_Near_RelDiag_cal_nocross_tas_1991_2012_AF.png +mv 2_tas_1_tas_3_tas_1_ERAI_SON_5_Near_RelDiag_cal_nocross_tas_1991_2012_AF.png 2_1_3_tas_1_ERAI_SON_5_Near_RelDiag_cal_nocross_tas_1991_2012_AF.png +mv 2_tas_1_tas_4_tas_1_ERAI_DJF_5_Near_RelDiag_cal_nocross_tas_1991_2012_AF.png 2_1_4_tas_1_ERAI_DJF_5_Near_RelDiag_cal_nocross_tas_1991_2012_AF.png +mv 2_tas_2_tas_1_tas_2_JRA_MAM_5_Near_RelDiag_cal_nocross_tas_1991_2012_AF.png 2_2_1_tas_2_JRA_MAM_5_Near_RelDiag_cal_nocross_tas_1991_2012_AF.png +mv 2_tas_2_tas_2_tas_2_JRA_JJA_5_Near_RelDiag_cal_nocross_tas_1991_2012_AF.png 2_2_2_tas_2_JRA_JJA_5_Near_RelDiag_cal_nocross_tas_1991_2012_AF.png +mv 2_tas_2_tas_3_tas_2_JRA_SON_5_Near_RelDiag_cal_nocross_tas_1991_2012_AF.png 2_2_3_tas_2_JRA_SON_5_Near_RelDiag_cal_nocross_tas_1991_2012_AF.png +mv 2_tas_2_tas_4_tas_2_JRA_DJF_5_Near_RelDiag_cal_nocross_tas_1991_2012_AF.png 2_2_4_tas_2_JRA_DJF_5_Near_RelDiag_cal_nocross_tas_1991_2012_AF.png +mv 2_tas_3_tas_1_tas_3_ERAI2_MAM_5_Near_RelDiag_cal_nocross_tas_1981_2012_AF.png 2_3_1_tas_3_ERAI2_MAM_5_Near_RelDiag_cal_nocross_tas_1981_2012_AF.png +mv 2_tas_3_tas_2_tas_3_ERAI2_JJA_5_Near_RelDiag_cal_nocross_tas_1981_2012_AF.png 2_3_2_tas_3_ERAI2_JJA_5_Near_RelDiag_cal_nocross_tas_1981_2012_AF.png +mv 2_tas_3_tas_3_tas_3_ERAI2_SON_5_Near_RelDiag_cal_nocross_tas_1981_2012_AF.png 2_3_3_tas_3_ERAI2_SON_5_Near_RelDiag_cal_nocross_tas_1981_2012_AF.png +mv 2_tas_3_tas_4_tas_3_ERAI2_DJF_5_Near_RelDiag_cal_nocross_tas_1981_2012_AF.png 2_3_4_tas_3_ERAI2_DJF_5_Near_RelDiag_cal_nocross_tas_1981_2012_AF.png +mv 2_tas_4_tas_1_tas_4_JRA2_MAM_5_Near_RelDiag_cal_nocross_tas_1981_2012_AF.png 2_4_1_tas_4_JRA2_MAM_5_Near_RelDiag_cal_nocross_tas_1981_2012_AF.png +mv 2_tas_4_tas_2_tas_4_JRA2_JJA_5_Near_RelDiag_cal_nocross_tas_1981_2012_AF.png 2_4_2_tas_4_JRA2_JJA_5_Near_RelDiag_cal_nocross_tas_1981_2012_AF.png +mv 2_tas_4_tas_3_tas_4_JRA2_SON_5_Near_RelDiag_cal_nocross_tas_1981_2012_AF.png 2_4_3_tas_4_JRA2_SON_5_Near_RelDiag_cal_nocross_tas_1981_2012_AF.png +mv 2_tas_4_tas_4_tas_4_JRA2_DJF_5_Near_RelDiag_cal_nocross_tas_1981_2012_AF.png 2_4_4_tas_4_JRA2_DJF_5_Near_RelDiag_cal_nocross_tas_1981_2012_AF.png +mv 3_tas_1_tas_1_tas_1_ERAI_MAM_5_Below_RelDiag_cal_nocross_tas_1991_2012_AF.png 3_1_1_tas_1_ERAI_MAM_5_Below_RelDiag_cal_nocross_tas_1991_2012_AF.png +mv 3_tas_1_tas_2_tas_1_ERAI_JJA_5_Below_RelDiag_cal_nocross_tas_1991_2012_AF.png 3_1_2_tas_1_ERAI_JJA_5_Below_RelDiag_cal_nocross_tas_1991_2012_AF.png +mv 3_tas_1_tas_3_tas_1_ERAI_SON_5_Below_RelDiag_cal_nocross_tas_1991_2012_AF.png 3_1_3_tas_1_ERAI_SON_5_Below_RelDiag_cal_nocross_tas_1991_2012_AF.png +mv 3_tas_1_tas_4_tas_1_ERAI_DJF_5_Below_RelDiag_cal_nocross_tas_1991_2012_AF.png 3_1_4_tas_1_ERAI_DJF_5_Below_RelDiag_cal_nocross_tas_1991_2012_AF.png +mv 3_tas_2_tas_1_tas_2_JRA_MAM_5_Below_RelDiag_cal_nocross_tas_1991_2012_AF.png 3_2_1_tas_2_JRA_MAM_5_Below_RelDiag_cal_nocross_tas_1991_2012_AF.png +mv 3_tas_2_tas_2_tas_2_JRA_JJA_5_Below_RelDiag_cal_nocross_tas_1991_2012_AF.png 3_2_2_tas_2_JRA_JJA_5_Below_RelDiag_cal_nocross_tas_1991_2012_AF.png +mv 3_tas_2_tas_3_tas_2_JRA_SON_5_Below_RelDiag_cal_nocross_tas_1991_2012_AF.png 3_2_3_tas_2_JRA_SON_5_Below_RelDiag_cal_nocross_tas_1991_2012_AF.png +mv 3_tas_2_tas_4_tas_2_JRA_DJF_5_Below_RelDiag_cal_nocross_tas_1991_2012_AF.png 3_2_4_tas_2_JRA_DJF_5_Below_RelDiag_cal_nocross_tas_1991_2012_AF.png +mv 3_tas_3_tas_1_tas_3_ERAI2_MAM_5_Below_RelDiag_cal_nocross_tas_1981_2012_AF.png 3_3_1_tas_3_ERAI2_MAM_5_Below_RelDiag_cal_nocross_tas_1981_2012_AF.png +mv 3_tas_3_tas_2_tas_3_ERAI2_JJA_5_Below_RelDiag_cal_nocross_tas_1981_2012_AF.png 3_3_2_tas_3_ERAI2_JJA_5_Below_RelDiag_cal_nocross_tas_1981_2012_AF.png +mv 3_tas_3_tas_3_tas_3_ERAI2_SON_5_Below_RelDiag_cal_nocross_tas_1981_2012_AF.png 3_3_3_tas_3_ERAI2_SON_5_Below_RelDiag_cal_nocross_tas_1981_2012_AF.png +mv 3_tas_3_tas_4_tas_3_ERAI2_DJF_5_Below_RelDiag_cal_nocross_tas_1981_2012_AF.png 3_3_4_tas_3_ERAI2_DJF_5_Below_RelDiag_cal_nocross_tas_1981_2012_AF.png +mv 3_tas_4_tas_1_tas_4_JRA2_MAM_5_Below_RelDiag_cal_nocross_tas_1981_2012_AF.png 3_4_1_tas_4_JRA2_MAM_5_Below_RelDiag_cal_nocross_tas_1981_2012_AF.png +mv 3_tas_4_tas_2_tas_4_JRA2_JJA_5_Below_RelDiag_cal_nocross_tas_1981_2012_AF.png 3_4_2_tas_4_JRA2_JJA_5_Below_RelDiag_cal_nocross_tas_1981_2012_AF.png +mv 3_tas_4_tas_3_tas_4_JRA2_SON_5_Below_RelDiag_cal_nocross_tas_1981_2012_AF.png 3_4_3_tas_4_JRA2_SON_5_Below_RelDiag_cal_nocross_tas_1981_2012_AF.png +mv 3_tas_4_tas_4_tas_4_JRA2_DJF_5_Below_RelDiag_cal_nocross_tas_1981_2012_AF.png 3_4_4_tas_4_JRA2_DJF_5_Below_RelDiag_cal_nocross_tas_1981_2012_AF.png + + + + + + + + + + + + + +# Europa tas: + + + +mv 1_1_1_1_ERAI_MAM_1_Above_RelDiag_rawdata_tas_1991_2012_EU.png 1_1_1_tas_1_ERAI_MAM_1_Above_RelDiag_rawdata_tas_1991_2012_EU.png +mv 1_1_2_1_ERAI_JJA_1_Above_RelDiag_rawdata_tas_1991_2012_EU.png 1_1_2_tas_1_ERAI_JJA_1_Above_RelDiag_rawdata_tas_1991_2012_EU.png +mv 1_1_3_1_ERAI_SON_1_Above_RelDiag_rawdata_tas_1991_2012_EU.png 1_1_3_tas_1_ERAI_SON_1_Above_RelDiag_rawdata_tas_1991_2012_EU.png +mv 1_1_4_1_ERAI_DJF_1_Above_RelDiag_rawdata_tas_1991_2012_EU.png 1_1_4_tas_1_ERAI_DJF_1_Above_RelDiag_rawdata_tas_1991_2012_EU.png +mv 1_2_1_2_JRA_MAM_1_Above_RelDiag_rawdata_tas_1991_2012_EU.png 1_2_1_tas_2_JRA_MAM_1_Above_RelDiag_rawdata_tas_1991_2012_EU.png +mv 1_2_2_2_JRA_JJA_1_Above_RelDiag_rawdata_tas_1991_2012_EU.png 1_2_2_tas_2_JRA_JJA_1_Above_RelDiag_rawdata_tas_1991_2012_EU.png +mv 1_2_3_2_JRA_SON_1_Above_RelDiag_rawdata_tas_1991_2012_EU.png 1_2_3_tas_2_JRA_SON_1_Above_RelDiag_rawdata_tas_1991_2012_EU.png +mv 1_2_4_2_JRA_DJF_1_Above_RelDiag_rawdata_tas_1991_2012_EU.png 1_2_4_tas_2_JRA_DJF_1_Above_RelDiag_rawdata_tas_1991_2012_EU.png +mv 1_3_1_3_ERAI2_MAM_1_Above_RelDiag_rawdata_tas_1981_2012_EU.png 1_3_1_tas_3_ERAI2_MAM_1_Above_RelDiag_rawdata_tas_1981_2012_EU.png +mv 1_3_2_3_ERAI2_JJA_1_Above_RelDiag_rawdata_tas_1981_2012_EU.png 1_3_2_tas_3_ERAI2_JJA_1_Above_RelDiag_rawdata_tas_1981_2012_EU.png +mv 1_3_3_3_ERAI2_SON_1_Above_RelDiag_rawdata_tas_1981_2012_EU.png 1_3_3_tas_3_ERAI2_SON_1_Above_RelDiag_rawdata_tas_1981_2012_EU.png +mv 1_3_4_3_ERAI2_DJF_1_Above_RelDiag_rawdata_tas_1981_2012_EU.png 1_3_4_tas_3_ERAI2_DJF_1_Above_RelDiag_rawdata_tas_1981_2012_EU.png +mv 1_4_1_4_JRA2_MAM_1_Above_RelDiag_rawdata_tas_1981_2012_EU.png 1_4_1_tas_4_JRA2_MAM_1_Above_RelDiag_rawdata_tas_1981_2012_EU.png +mv 1_4_2_4_JRA2_JJA_1_Above_RelDiag_rawdata_tas_1981_2012_EU.png 1_4_2_tas_4_JRA2_JJA_1_Above_RelDiag_rawdata_tas_1981_2012_EU.png +mv 1_4_3_4_JRA2_SON_1_Above_RelDiag_rawdata_tas_1981_2012_EU.png 1_4_3_tas_4_JRA2_SON_1_Above_RelDiag_rawdata_tas_1981_2012_EU.png +mv 1_4_4_4_JRA2_DJF_1_Above_RelDiag_rawdata_tas_1981_2012_EU.png 1_4_4_tas_4_JRA2_DJF_1_Above_RelDiag_rawdata_tas_1981_2012_EU.png +mv 2_1_1_1_ERAI_MAM_1_Near_RelDiag_rawdata_tas_1991_2012_EU.png 2_1_1_tas_1_ERAI_MAM_1_Near_RelDiag_rawdata_tas_1991_2012_EU.png +mv 2_1_2_1_ERAI_JJA_1_Near_RelDiag_rawdata_tas_1991_2012_EU.png 2_1_2_tas_1_ERAI_JJA_1_Near_RelDiag_rawdata_tas_1991_2012_EU.png +mv 2_1_3_1_ERAI_SON_1_Near_RelDiag_rawdata_tas_1991_2012_EU.png 2_1_3_tas_1_ERAI_SON_1_Near_RelDiag_rawdata_tas_1991_2012_EU.png +mv 2_1_4_1_ERAI_DJF_1_Near_RelDiag_rawdata_tas_1991_2012_EU.png 2_1_4_tas_1_ERAI_DJF_1_Near_RelDiag_rawdata_tas_1991_2012_EU.png +mv 2_2_1_2_JRA_MAM_1_Near_RelDiag_rawdata_tas_1991_2012_EU.png 2_2_1_tas_2_JRA_MAM_1_Near_RelDiag_rawdata_tas_1991_2012_EU.png +mv 2_2_2_2_JRA_JJA_1_Near_RelDiag_rawdata_tas_1991_2012_EU.png 2_2_2_tas_2_JRA_JJA_1_Near_RelDiag_rawdata_tas_1991_2012_EU.png +mv 2_2_3_2_JRA_SON_1_Near_RelDiag_rawdata_tas_1991_2012_EU.png 2_2_3_tas_2_JRA_SON_1_Near_RelDiag_rawdata_tas_1991_2012_EU.png +mv 2_2_4_2_JRA_DJF_1_Near_RelDiag_rawdata_tas_1991_2012_EU.png 2_2_4_tas_2_JRA_DJF_1_Near_RelDiag_rawdata_tas_1991_2012_EU.png +mv 2_3_1_3_ERAI2_MAM_1_Near_RelDiag_rawdata_tas_1981_2012_EU.png 2_3_1_tas_3_ERAI2_MAM_1_Near_RelDiag_rawdata_tas_1981_2012_EU.png +mv 2_3_2_3_ERAI2_JJA_1_Near_RelDiag_rawdata_tas_1981_2012_EU.png 2_3_2_tas_3_ERAI2_JJA_1_Near_RelDiag_rawdata_tas_1981_2012_EU.png +mv 2_3_3_3_ERAI2_SON_1_Near_RelDiag_rawdata_tas_1981_2012_EU.png 2_3_3_tas_3_ERAI2_SON_1_Near_RelDiag_rawdata_tas_1981_2012_EU.png +mv 2_3_4_3_ERAI2_DJF_1_Near_RelDiag_rawdata_tas_1981_2012_EU.png 2_3_4_tas_3_ERAI2_DJF_1_Near_RelDiag_rawdata_tas_1981_2012_EU.png +mv 2_4_1_4_JRA2_MAM_1_Near_RelDiag_rawdata_tas_1981_2012_EU.png 2_4_1_tas_4_JRA2_MAM_1_Near_RelDiag_rawdata_tas_1981_2012_EU.png +mv 2_4_2_4_JRA2_JJA_1_Near_RelDiag_rawdata_tas_1981_2012_EU.png 2_4_2_tas_4_JRA2_JJA_1_Near_RelDiag_rawdata_tas_1981_2012_EU.png +mv 2_4_3_4_JRA2_SON_1_Near_RelDiag_rawdata_tas_1981_2012_EU.png 2_4_3_tas_4_JRA2_SON_1_Near_RelDiag_rawdata_tas_1981_2012_EU.png +mv 2_4_4_4_JRA2_DJF_1_Near_RelDiag_rawdata_tas_1981_2012_EU.png 2_4_4_tas_4_JRA2_DJF_1_Near_RelDiag_rawdata_tas_1981_2012_EU.png +mv 3_1_1_1_ERAI_MAM_1_Below_RelDiag_rawdata_tas_1991_2012_EU.png 3_1_1_tas_1_ERAI_MAM_1_Below_RelDiag_rawdata_tas_1991_2012_EU.png +mv 3_1_2_1_ERAI_JJA_1_Below_RelDiag_rawdata_tas_1991_2012_EU.png 3_1_2_tas_1_ERAI_JJA_1_Below_RelDiag_rawdata_tas_1991_2012_EU.png +mv 3_1_3_1_ERAI_SON_1_Below_RelDiag_rawdata_tas_1991_2012_EU.png 3_1_3_tas_1_ERAI_SON_1_Below_RelDiag_rawdata_tas_1991_2012_EU.png +mv 3_1_4_1_ERAI_DJF_1_Below_RelDiag_rawdata_tas_1991_2012_EU.png 3_1_4_tas_1_ERAI_DJF_1_Below_RelDiag_rawdata_tas_1991_2012_EU.png +mv 3_2_1_2_JRA_MAM_1_Below_RelDiag_rawdata_tas_1991_2012_EU.png 3_2_1_tas_2_JRA_MAM_1_Below_RelDiag_rawdata_tas_1991_2012_EU.png +mv 3_2_2_2_JRA_JJA_1_Below_RelDiag_rawdata_tas_1991_2012_EU.png 3_2_2_tas_2_JRA_JJA_1_Below_RelDiag_rawdata_tas_1991_2012_EU.png +mv 3_2_3_2_JRA_SON_1_Below_RelDiag_rawdata_tas_1991_2012_EU.png 3_2_3_tas_2_JRA_SON_1_Below_RelDiag_rawdata_tas_1991_2012_EU.png +mv 3_2_4_2_JRA_DJF_1_Below_RelDiag_rawdata_tas_1991_2012_EU.png 3_2_4_tas_2_JRA_DJF_1_Below_RelDiag_rawdata_tas_1991_2012_EU.png +mv 3_3_1_3_ERAI2_MAM_1_Below_RelDiag_rawdata_tas_1981_2012_EU.png 3_3_1_tas_3_ERAI2_MAM_1_Below_RelDiag_rawdata_tas_1981_2012_EU.png +mv 3_3_2_3_ERAI2_JJA_1_Below_RelDiag_rawdata_tas_1981_2012_EU.png 3_3_2_tas_3_ERAI2_JJA_1_Below_RelDiag_rawdata_tas_1981_2012_EU.png +mv 3_3_3_3_ERAI2_SON_1_Below_RelDiag_rawdata_tas_1981_2012_EU.png 3_3_3_tas_3_ERAI2_SON_1_Below_RelDiag_rawdata_tas_1981_2012_EU.png +mv 3_3_4_3_ERAI2_DJF_1_Below_RelDiag_rawdata_tas_1981_2012_EU.png 3_3_4_tas_3_ERAI2_DJF_1_Below_RelDiag_rawdata_tas_1981_2012_EU.png +mv 3_4_1_4_JRA2_MAM_1_Below_RelDiag_rawdata_tas_1981_2012_EU.png 3_4_1_tas_4_JRA2_MAM_1_Below_RelDiag_rawdata_tas_1981_2012_EU.png +mv 3_4_2_4_JRA2_JJA_1_Below_RelDiag_rawdata_tas_1981_2012_EU.png 3_4_2_tas_4_JRA2_JJA_1_Below_RelDiag_rawdata_tas_1981_2012_EU.png +mv 3_4_3_4_JRA2_SON_1_Below_RelDiag_rawdata_tas_1981_2012_EU.png 3_4_3_tas_4_JRA2_SON_1_Below_RelDiag_rawdata_tas_1981_2012_EU.png +mv 3_4_4_4_JRA2_DJF_1_Below_RelDiag_rawdata_tas_1981_2012_EU.png 3_4_4_tas_4_JRA2_DJF_1_Below_RelDiag_rawdata_tas_1981_2012_EU.png + + + + + +mv 1_1_1_1_ERAI_MAM_2_Above_RelDiag_sbc_cross_tas_1991_2012_EU.png 1_1_1_tas_1_ERAI_MAM_2_Above_RelDiag_sbc_cross_tas_1991_2012_EU.png +mv 1_1_2_1_ERAI_JJA_2_Above_RelDiag_sbc_cross_tas_1991_2012_EU.png 1_1_2_tas_1_ERAI_JJA_2_Above_RelDiag_sbc_cross_tas_1991_2012_EU.png +mv 1_1_3_1_ERAI_SON_2_Above_RelDiag_sbc_cross_tas_1991_2012_EU.png 1_1_3_tas_1_ERAI_SON_2_Above_RelDiag_sbc_cross_tas_1991_2012_EU.png +mv 1_1_4_1_ERAI_DJF_2_Above_RelDiag_sbc_cross_tas_1991_2012_EU.png 1_1_4_tas_1_ERAI_DJF_2_Above_RelDiag_sbc_cross_tas_1991_2012_EU.png +mv 1_2_1_2_JRA_MAM_2_Above_RelDiag_sbc_cross_tas_1991_2012_EU.png 1_2_1_tas_2_JRA_MAM_2_Above_RelDiag_sbc_cross_tas_1991_2012_EU.png +mv 1_2_2_2_JRA_JJA_2_Above_RelDiag_sbc_cross_tas_1991_2012_EU.png 1_2_2_tas_2_JRA_JJA_2_Above_RelDiag_sbc_cross_tas_1991_2012_EU.png +mv 1_2_3_2_JRA_SON_2_Above_RelDiag_sbc_cross_tas_1991_2012_EU.png 1_2_3_tas_2_JRA_SON_2_Above_RelDiag_sbc_cross_tas_1991_2012_EU.png +mv 1_2_4_2_JRA_DJF_2_Above_RelDiag_sbc_cross_tas_1991_2012_EU.png 1_2_4_tas_2_JRA_DJF_2_Above_RelDiag_sbc_cross_tas_1991_2012_EU.png +mv 1_3_1_3_ERAI2_MAM_2_Above_RelDiag_sbc_cross_tas_1981_2012_EU.png 1_3_1_tas_3_ERAI2_MAM_2_Above_RelDiag_sbc_cross_tas_1981_2012_EU.png +mv 1_3_2_3_ERAI2_JJA_2_Above_RelDiag_sbc_cross_tas_1981_2012_EU.png 1_3_2_tas_3_ERAI2_JJA_2_Above_RelDiag_sbc_cross_tas_1981_2012_EU.png +mv 1_3_3_3_ERAI2_SON_2_Above_RelDiag_sbc_cross_tas_1981_2012_EU.png 1_3_3_tas_3_ERAI2_SON_2_Above_RelDiag_sbc_cross_tas_1981_2012_EU.png +mv 1_3_4_3_ERAI2_DJF_2_Above_RelDiag_sbc_cross_tas_1981_2012_EU.png 1_3_4_tas_3_ERAI2_DJF_2_Above_RelDiag_sbc_cross_tas_1981_2012_EU.png +mv 1_4_1_4_JRA2_MAM_2_Above_RelDiag_sbc_cross_tas_1981_2012_EU.png 1_4_1_tas_4_JRA2_MAM_2_Above_RelDiag_sbc_cross_tas_1981_2012_EU.png +mv 1_4_2_4_JRA2_JJA_2_Above_RelDiag_sbc_cross_tas_1981_2012_EU.png 1_4_2_tas_4_JRA2_JJA_2_Above_RelDiag_sbc_cross_tas_1981_2012_EU.png +mv 1_4_3_4_JRA2_SON_2_Above_RelDiag_sbc_cross_tas_1981_2012_EU.png 1_4_3_tas_4_JRA2_SON_2_Above_RelDiag_sbc_cross_tas_1981_2012_EU.png +mv 1_4_4_4_JRA2_DJF_2_Above_RelDiag_sbc_cross_tas_1981_2012_EU.png 1_4_4_tas_4_JRA2_DJF_2_Above_RelDiag_sbc_cross_tas_1981_2012_EU.png +mv 2_1_1_1_ERAI_MAM_2_Near_RelDiag_sbc_cross_tas_1991_2012_EU.png 2_1_1_tas_1_ERAI_MAM_2_Near_RelDiag_sbc_cross_tas_1991_2012_EU.png +mv 2_1_2_1_ERAI_JJA_2_Near_RelDiag_sbc_cross_tas_1991_2012_EU.png 2_1_2_tas_1_ERAI_JJA_2_Near_RelDiag_sbc_cross_tas_1991_2012_EU.png +mv 2_1_3_1_ERAI_SON_2_Near_RelDiag_sbc_cross_tas_1991_2012_EU.png 2_1_3_tas_1_ERAI_SON_2_Near_RelDiag_sbc_cross_tas_1991_2012_EU.png +mv 2_1_4_1_ERAI_DJF_2_Near_RelDiag_sbc_cross_tas_1991_2012_EU.png 2_1_4_tas_1_ERAI_DJF_2_Near_RelDiag_sbc_cross_tas_1991_2012_EU.png +mv 2_2_1_2_JRA_MAM_2_Near_RelDiag_sbc_cross_tas_1991_2012_EU.png 2_2_1_tas_2_JRA_MAM_2_Near_RelDiag_sbc_cross_tas_1991_2012_EU.png +mv 2_2_2_2_JRA_JJA_2_Near_RelDiag_sbc_cross_tas_1991_2012_EU.png 2_2_2_tas_2_JRA_JJA_2_Near_RelDiag_sbc_cross_tas_1991_2012_EU.png +mv 2_2_3_2_JRA_SON_2_Near_RelDiag_sbc_cross_tas_1991_2012_EU.png 2_2_3_tas_2_JRA_SON_2_Near_RelDiag_sbc_cross_tas_1991_2012_EU.png +mv 2_2_4_2_JRA_DJF_2_Near_RelDiag_sbc_cross_tas_1991_2012_EU.png 2_2_4_tas_2_JRA_DJF_2_Near_RelDiag_sbc_cross_tas_1991_2012_EU.png +mv 2_3_1_3_ERAI2_MAM_2_Near_RelDiag_sbc_cross_tas_1981_2012_EU.png 2_3_1_tas_3_ERAI2_MAM_2_Near_RelDiag_sbc_cross_tas_1981_2012_EU.png +mv 2_3_2_3_ERAI2_JJA_2_Near_RelDiag_sbc_cross_tas_1981_2012_EU.png 2_3_2_tas_3_ERAI2_JJA_2_Near_RelDiag_sbc_cross_tas_1981_2012_EU.png +mv 2_3_3_3_ERAI2_SON_2_Near_RelDiag_sbc_cross_tas_1981_2012_EU.png 2_3_3_tas_3_ERAI2_SON_2_Near_RelDiag_sbc_cross_tas_1981_2012_EU.png +mv 2_3_4_3_ERAI2_DJF_2_Near_RelDiag_sbc_cross_tas_1981_2012_EU.png 2_3_4_tas_3_ERAI2_DJF_2_Near_RelDiag_sbc_cross_tas_1981_2012_EU.png +mv 2_4_1_4_JRA2_MAM_2_Near_RelDiag_sbc_cross_tas_1981_2012_EU.png 2_4_1_tas_4_JRA2_MAM_2_Near_RelDiag_sbc_cross_tas_1981_2012_EU.png +mv 2_4_2_4_JRA2_JJA_2_Near_RelDiag_sbc_cross_tas_1981_2012_EU.png 2_4_2_tas_4_JRA2_JJA_2_Near_RelDiag_sbc_cross_tas_1981_2012_EU.png +mv 2_4_3_4_JRA2_SON_2_Near_RelDiag_sbc_cross_tas_1981_2012_EU.png 2_4_3_tas_4_JRA2_SON_2_Near_RelDiag_sbc_cross_tas_1981_2012_EU.png +mv 2_4_4_4_JRA2_DJF_2_Near_RelDiag_sbc_cross_tas_1981_2012_EU.png 2_4_4_tas_4_JRA2_DJF_2_Near_RelDiag_sbc_cross_tas_1981_2012_EU.png +mv 3_1_1_1_ERAI_MAM_2_Below_RelDiag_sbc_cross_tas_1991_2012_EU.png 3_1_1_tas_1_ERAI_MAM_2_Below_RelDiag_sbc_cross_tas_1991_2012_EU.png +mv 3_1_2_1_ERAI_JJA_2_Below_RelDiag_sbc_cross_tas_1991_2012_EU.png 3_1_2_tas_1_ERAI_JJA_2_Below_RelDiag_sbc_cross_tas_1991_2012_EU.png +mv 3_1_3_1_ERAI_SON_2_Below_RelDiag_sbc_cross_tas_1991_2012_EU.png 3_1_3_tas_1_ERAI_SON_2_Below_RelDiag_sbc_cross_tas_1991_2012_EU.png +mv 3_1_4_1_ERAI_DJF_2_Below_RelDiag_sbc_cross_tas_1991_2012_EU.png 3_1_4_tas_1_ERAI_DJF_2_Below_RelDiag_sbc_cross_tas_1991_2012_EU.png +mv 3_2_1_2_JRA_MAM_2_Below_RelDiag_sbc_cross_tas_1991_2012_EU.png 3_2_1_tas_2_JRA_MAM_2_Below_RelDiag_sbc_cross_tas_1991_2012_EU.png +mv 3_2_2_2_JRA_JJA_2_Below_RelDiag_sbc_cross_tas_1991_2012_EU.png 3_2_2_tas_2_JRA_JJA_2_Below_RelDiag_sbc_cross_tas_1991_2012_EU.png +mv 3_2_3_2_JRA_SON_2_Below_RelDiag_sbc_cross_tas_1991_2012_EU.png 3_2_3_tas_2_JRA_SON_2_Below_RelDiag_sbc_cross_tas_1991_2012_EU.png +mv 3_2_4_2_JRA_DJF_2_Below_RelDiag_sbc_cross_tas_1991_2012_EU.png 3_2_4_tas_2_JRA_DJF_2_Below_RelDiag_sbc_cross_tas_1991_2012_EU.png +mv 3_3_1_3_ERAI2_MAM_2_Below_RelDiag_sbc_cross_tas_1981_2012_EU.png 3_3_1_tas_3_ERAI2_MAM_2_Below_RelDiag_sbc_cross_tas_1981_2012_EU.png +mv 3_3_2_3_ERAI2_JJA_2_Below_RelDiag_sbc_cross_tas_1981_2012_EU.png 3_3_2_tas_3_ERAI2_JJA_2_Below_RelDiag_sbc_cross_tas_1981_2012_EU.png +mv 3_3_3_3_ERAI2_SON_2_Below_RelDiag_sbc_cross_tas_1981_2012_EU.png 3_3_3_tas_3_ERAI2_SON_2_Below_RelDiag_sbc_cross_tas_1981_2012_EU.png +mv 3_3_4_3_ERAI2_DJF_2_Below_RelDiag_sbc_cross_tas_1981_2012_EU.png 3_3_4_tas_3_ERAI2_DJF_2_Below_RelDiag_sbc_cross_tas_1981_2012_EU.png +mv 3_4_1_4_JRA2_MAM_2_Below_RelDiag_sbc_cross_tas_1981_2012_EU.png 3_4_1_tas_4_JRA2_MAM_2_Below_RelDiag_sbc_cross_tas_1981_2012_EU.png +mv 3_4_2_4_JRA2_JJA_2_Below_RelDiag_sbc_cross_tas_1981_2012_EU.png 3_4_2_tas_4_JRA2_JJA_2_Below_RelDiag_sbc_cross_tas_1981_2012_EU.png +mv 3_4_3_4_JRA2_SON_2_Below_RelDiag_sbc_cross_tas_1981_2012_EU.png 3_4_3_tas_4_JRA2_SON_2_Below_RelDiag_sbc_cross_tas_1981_2012_EU.png +mv 3_4_4_4_JRA2_DJF_2_Below_RelDiag_sbc_cross_tas_1981_2012_EU.png 3_4_4_tas_4_JRA2_DJF_2_Below_RelDiag_sbc_cross_tas_1981_2012_EU.png + + + +mv 1_1_1_1_ERAI_MAM_3_Above_RelDiag_sbc_nocross_tas_1991_2012_EU.png 1_1_1_tas_1_ERAI_MAM_3_Above_RelDiag_sbc_nocross_tas_1991_2012_EU.png +mv 1_1_2_1_ERAI_JJA_3_Above_RelDiag_sbc_nocross_tas_1991_2012_EU.png 1_1_2_tas_1_ERAI_JJA_3_Above_RelDiag_sbc_nocross_tas_1991_2012_EU.png +mv 1_1_3_1_ERAI_SON_3_Above_RelDiag_sbc_nocross_tas_1991_2012_EU.png 1_1_3_tas_1_ERAI_SON_3_Above_RelDiag_sbc_nocross_tas_1991_2012_EU.png +mv 1_1_4_1_ERAI_DJF_3_Above_RelDiag_sbc_nocross_tas_1991_2012_EU.png 1_1_4_tas_1_ERAI_DJF_3_Above_RelDiag_sbc_nocross_tas_1991_2012_EU.png +mv 1_2_1_2_JRA_MAM_3_Above_RelDiag_sbc_nocross_tas_1991_2012_EU.png 1_2_1_tas_2_JRA_MAM_3_Above_RelDiag_sbc_nocross_tas_1991_2012_EU.png +mv 1_2_2_2_JRA_JJA_3_Above_RelDiag_sbc_nocross_tas_1991_2012_EU.png 1_2_2_tas_2_JRA_JJA_3_Above_RelDiag_sbc_nocross_tas_1991_2012_EU.png +mv 1_2_3_2_JRA_SON_3_Above_RelDiag_sbc_nocross_tas_1991_2012_EU.png 1_2_3_tas_2_JRA_SON_3_Above_RelDiag_sbc_nocross_tas_1991_2012_EU.png +mv 1_2_4_2_JRA_DJF_3_Above_RelDiag_sbc_nocross_tas_1991_2012_EU.png 1_2_4_tas_2_JRA_DJF_3_Above_RelDiag_sbc_nocross_tas_1991_2012_EU.png +mv 1_3_1_3_ERAI2_MAM_3_Above_RelDiag_sbc_nocross_tas_1981_2012_EU.png 1_3_1_tas_3_ERAI2_MAM_3_Above_RelDiag_sbc_nocross_tas_1981_2012_EU.png +mv 1_3_2_3_ERAI2_JJA_3_Above_RelDiag_sbc_nocross_tas_1981_2012_EU.png 1_3_2_tas_3_ERAI2_JJA_3_Above_RelDiag_sbc_nocross_tas_1981_2012_EU.png +mv 1_3_3_3_ERAI2_SON_3_Above_RelDiag_sbc_nocross_tas_1981_2012_EU.png 1_3_3_tas_3_ERAI2_SON_3_Above_RelDiag_sbc_nocross_tas_1981_2012_EU.png +mv 1_3_4_3_ERAI2_DJF_3_Above_RelDiag_sbc_nocross_tas_1981_2012_EU.png 1_3_4_tas_3_ERAI2_DJF_3_Above_RelDiag_sbc_nocross_tas_1981_2012_EU.png +mv 1_4_1_4_JRA2_MAM_3_Above_RelDiag_sbc_nocross_tas_1981_2012_EU.png 1_4_1_tas_4_JRA2_MAM_3_Above_RelDiag_sbc_nocross_tas_1981_2012_EU.png +mv 1_4_2_4_JRA2_JJA_3_Above_RelDiag_sbc_nocross_tas_1981_2012_EU.png 1_4_2_tas_4_JRA2_JJA_3_Above_RelDiag_sbc_nocross_tas_1981_2012_EU.png +mv 1_4_3_4_JRA2_SON_3_Above_RelDiag_sbc_nocross_tas_1981_2012_EU.png 1_4_3_tas_4_JRA2_SON_3_Above_RelDiag_sbc_nocross_tas_1981_2012_EU.png +mv 1_4_4_4_JRA2_DJF_3_Above_RelDiag_sbc_nocross_tas_1981_2012_EU.png 1_4_4_tas_4_JRA2_DJF_3_Above_RelDiag_sbc_nocross_tas_1981_2012_EU.png +mv 2_1_1_1_ERAI_MAM_3_Near_RelDiag_sbc_nocross_tas_1991_2012_EU.png 2_1_1_tas_1_ERAI_MAM_3_Near_RelDiag_sbc_nocross_tas_1991_2012_EU.png +mv 2_1_2_1_ERAI_JJA_3_Near_RelDiag_sbc_nocross_tas_1991_2012_EU.png 2_1_2_tas_1_ERAI_JJA_3_Near_RelDiag_sbc_nocross_tas_1991_2012_EU.png +mv 2_1_3_1_ERAI_SON_3_Near_RelDiag_sbc_nocross_tas_1991_2012_EU.png 2_1_3_tas_1_ERAI_SON_3_Near_RelDiag_sbc_nocross_tas_1991_2012_EU.png +mv 2_1_4_1_ERAI_DJF_3_Near_RelDiag_sbc_nocross_tas_1991_2012_EU.png 2_1_4_tas_1_ERAI_DJF_3_Near_RelDiag_sbc_nocross_tas_1991_2012_EU.png +mv 2_2_1_2_JRA_MAM_3_Near_RelDiag_sbc_nocross_tas_1991_2012_EU.png 2_2_1_tas_2_JRA_MAM_3_Near_RelDiag_sbc_nocross_tas_1991_2012_EU.png +mv 2_2_2_2_JRA_JJA_3_Near_RelDiag_sbc_nocross_tas_1991_2012_EU.png 2_2_2_tas_2_JRA_JJA_3_Near_RelDiag_sbc_nocross_tas_1991_2012_EU.png +mv 2_2_3_2_JRA_SON_3_Near_RelDiag_sbc_nocross_tas_1991_2012_EU.png 2_2_3_tas_2_JRA_SON_3_Near_RelDiag_sbc_nocross_tas_1991_2012_EU.png +mv 2_2_4_2_JRA_DJF_3_Near_RelDiag_sbc_nocross_tas_1991_2012_EU.png 2_2_4_tas_2_JRA_DJF_3_Near_RelDiag_sbc_nocross_tas_1991_2012_EU.png +mv 2_3_1_3_ERAI2_MAM_3_Near_RelDiag_sbc_nocross_tas_1981_2012_EU.png 2_3_1_tas_3_ERAI2_MAM_3_Near_RelDiag_sbc_nocross_tas_1981_2012_EU.png +mv 2_3_2_3_ERAI2_JJA_3_Near_RelDiag_sbc_nocross_tas_1981_2012_EU.png 2_3_2_tas_3_ERAI2_JJA_3_Near_RelDiag_sbc_nocross_tas_1981_2012_EU.png +mv 2_3_3_3_ERAI2_SON_3_Near_RelDiag_sbc_nocross_tas_1981_2012_EU.png 2_3_3_tas_3_ERAI2_SON_3_Near_RelDiag_sbc_nocross_tas_1981_2012_EU.png +mv 2_3_4_3_ERAI2_DJF_3_Near_RelDiag_sbc_nocross_tas_1981_2012_EU.png 2_3_4_tas_3_ERAI2_DJF_3_Near_RelDiag_sbc_nocross_tas_1981_2012_EU.png +mv 2_4_1_4_JRA2_MAM_3_Near_RelDiag_sbc_nocross_tas_1981_2012_EU.png 2_4_1_tas_4_JRA2_MAM_3_Near_RelDiag_sbc_nocross_tas_1981_2012_EU.png +mv 2_4_2_4_JRA2_JJA_3_Near_RelDiag_sbc_nocross_tas_1981_2012_EU.png 2_4_2_tas_4_JRA2_JJA_3_Near_RelDiag_sbc_nocross_tas_1981_2012_EU.png +mv 2_4_3_4_JRA2_SON_3_Near_RelDiag_sbc_nocross_tas_1981_2012_EU.png 2_4_3_tas_4_JRA2_SON_3_Near_RelDiag_sbc_nocross_tas_1981_2012_EU.png +mv 2_4_4_4_JRA2_DJF_3_Near_RelDiag_sbc_nocross_tas_1981_2012_EU.png 2_4_4_tas_4_JRA2_DJF_3_Near_RelDiag_sbc_nocross_tas_1981_2012_EU.png +mv 3_1_1_1_ERAI_MAM_3_Below_RelDiag_sbc_nocross_tas_1991_2012_EU.png 3_1_1_tas_1_ERAI_MAM_3_Below_RelDiag_sbc_nocross_tas_1991_2012_EU.png +mv 3_1_2_1_ERAI_JJA_3_Below_RelDiag_sbc_nocross_tas_1991_2012_EU.png 3_1_2_tas_1_ERAI_JJA_3_Below_RelDiag_sbc_nocross_tas_1991_2012_EU.png +mv 3_1_3_1_ERAI_SON_3_Below_RelDiag_sbc_nocross_tas_1991_2012_EU.png 3_1_3_tas_1_ERAI_SON_3_Below_RelDiag_sbc_nocross_tas_1991_2012_EU.png +mv 3_1_4_1_ERAI_DJF_3_Below_RelDiag_sbc_nocross_tas_1991_2012_EU.png 3_1_4_tas_1_ERAI_DJF_3_Below_RelDiag_sbc_nocross_tas_1991_2012_EU.png +mv 3_2_1_2_JRA_MAM_3_Below_RelDiag_sbc_nocross_tas_1991_2012_EU.png 3_2_1_tas_2_JRA_MAM_3_Below_RelDiag_sbc_nocross_tas_1991_2012_EU.png +mv 3_2_2_2_JRA_JJA_3_Below_RelDiag_sbc_nocross_tas_1991_2012_EU.png 3_2_2_tas_2_JRA_JJA_3_Below_RelDiag_sbc_nocross_tas_1991_2012_EU.png +mv 3_2_3_2_JRA_SON_3_Below_RelDiag_sbc_nocross_tas_1991_2012_EU.png 3_2_3_tas_2_JRA_SON_3_Below_RelDiag_sbc_nocross_tas_1991_2012_EU.png +mv 3_2_4_2_JRA_DJF_3_Below_RelDiag_sbc_nocross_tas_1991_2012_EU.png 3_2_4_tas_2_JRA_DJF_3_Below_RelDiag_sbc_nocross_tas_1991_2012_EU.png +mv 3_3_1_3_ERAI2_MAM_3_Below_RelDiag_sbc_nocross_tas_1981_2012_EU.png 3_3_1_tas_3_ERAI2_MAM_3_Below_RelDiag_sbc_nocross_tas_1981_2012_EU.png +mv 3_3_2_3_ERAI2_JJA_3_Below_RelDiag_sbc_nocross_tas_1981_2012_EU.png 3_3_2_tas_3_ERAI2_JJA_3_Below_RelDiag_sbc_nocross_tas_1981_2012_EU.png +mv 3_3_3_3_ERAI2_SON_3_Below_RelDiag_sbc_nocross_tas_1981_2012_EU.png 3_3_3_tas_3_ERAI2_SON_3_Below_RelDiag_sbc_nocross_tas_1981_2012_EU.png +mv 3_3_4_3_ERAI2_DJF_3_Below_RelDiag_sbc_nocross_tas_1981_2012_EU.png 3_3_4_tas_3_ERAI2_DJF_3_Below_RelDiag_sbc_nocross_tas_1981_2012_EU.png +mv 3_4_1_4_JRA2_MAM_3_Below_RelDiag_sbc_nocross_tas_1981_2012_EU.png 3_4_1_tas_4_JRA2_MAM_3_Below_RelDiag_sbc_nocross_tas_1981_2012_EU.png +mv 3_4_2_4_JRA2_JJA_3_Below_RelDiag_sbc_nocross_tas_1981_2012_EU.png 3_4_2_tas_4_JRA2_JJA_3_Below_RelDiag_sbc_nocross_tas_1981_2012_EU.png +mv 3_4_3_4_JRA2_SON_3_Below_RelDiag_sbc_nocross_tas_1981_2012_EU.png 3_4_3_tas_4_JRA2_SON_3_Below_RelDiag_sbc_nocross_tas_1981_2012_EU.png +mv 3_4_4_4_JRA2_DJF_3_Below_RelDiag_sbc_nocross_tas_1981_2012_EU.png 3_4_4_tas_4_JRA2_DJF_3_Below_RelDiag_sbc_nocross_tas_1981_2012_EU.png + + +mv 1_1_1_1_ERAI_MAM_4_Above_RelDiag_cal_cross_tas_1991_2012_EU.png 1_1_1_tas_1_ERAI_MAM_4_Above_RelDiag_cal_cross_tas_1991_2012_EU.png +mv 1_1_2_1_ERAI_JJA_4_Above_RelDiag_cal_cross_tas_1991_2012_EU.png 1_1_2_tas_1_ERAI_JJA_4_Above_RelDiag_cal_cross_tas_1991_2012_EU.png +mv 1_1_3_1_ERAI_SON_4_Above_RelDiag_cal_cross_tas_1991_2012_EU.png 1_1_3_tas_1_ERAI_SON_4_Above_RelDiag_cal_cross_tas_1991_2012_EU.png +mv 1_1_4_1_ERAI_DJF_4_Above_RelDiag_cal_cross_tas_1991_2012_EU.png 1_1_4_tas_1_ERAI_DJF_4_Above_RelDiag_cal_cross_tas_1991_2012_EU.png +mv 1_2_1_2_JRA_MAM_4_Above_RelDiag_cal_cross_tas_1991_2012_EU.png 1_2_1_tas_2_JRA_MAM_4_Above_RelDiag_cal_cross_tas_1991_2012_EU.png +mv 1_2_2_2_JRA_JJA_4_Above_RelDiag_cal_cross_tas_1991_2012_EU.png 1_2_2_tas_2_JRA_JJA_4_Above_RelDiag_cal_cross_tas_1991_2012_EU.png +mv 1_2_3_2_JRA_SON_4_Above_RelDiag_cal_cross_tas_1991_2012_EU.png 1_2_3_tas_2_JRA_SON_4_Above_RelDiag_cal_cross_tas_1991_2012_EU.png +mv 1_2_4_2_JRA_DJF_4_Above_RelDiag_cal_cross_tas_1991_2012_EU.png 1_2_4_tas_2_JRA_DJF_4_Above_RelDiag_cal_cross_tas_1991_2012_EU.png +mv 1_3_1_3_ERAI2_MAM_4_Above_RelDiag_cal_cross_tas_1981_2012_EU.png 1_3_1_tas_3_ERAI2_MAM_4_Above_RelDiag_cal_cross_tas_1981_2012_EU.png +mv 1_3_2_3_ERAI2_JJA_4_Above_RelDiag_cal_cross_tas_1981_2012_EU.png 1_3_2_tas_3_ERAI2_JJA_4_Above_RelDiag_cal_cross_tas_1981_2012_EU.png +mv 1_3_3_3_ERAI2_SON_4_Above_RelDiag_cal_cross_tas_1981_2012_EU.png 1_3_3_tas_3_ERAI2_SON_4_Above_RelDiag_cal_cross_tas_1981_2012_EU.png +mv 1_3_4_3_ERAI2_DJF_4_Above_RelDiag_cal_cross_tas_1981_2012_EU.png 1_3_4_tas_3_ERAI2_DJF_4_Above_RelDiag_cal_cross_tas_1981_2012_EU.png +mv 1_4_1_4_JRA2_MAM_4_Above_RelDiag_cal_cross_tas_1981_2012_EU.png 1_4_1_tas_4_JRA2_MAM_4_Above_RelDiag_cal_cross_tas_1981_2012_EU.png +mv 1_4_2_4_JRA2_JJA_4_Above_RelDiag_cal_cross_tas_1981_2012_EU.png 1_4_2_tas_4_JRA2_JJA_4_Above_RelDiag_cal_cross_tas_1981_2012_EU.png +mv 1_4_3_4_JRA2_SON_4_Above_RelDiag_cal_cross_tas_1981_2012_EU.png 1_4_3_tas_4_JRA2_SON_4_Above_RelDiag_cal_cross_tas_1981_2012_EU.png +mv 1_4_4_4_JRA2_DJF_4_Above_RelDiag_cal_cross_tas_1981_2012_EU.png 1_4_4_tas_4_JRA2_DJF_4_Above_RelDiag_cal_cross_tas_1981_2012_EU.png +mv 2_1_1_1_ERAI_MAM_4_Near_RelDiag_cal_cross_tas_1991_2012_EU.png 2_1_1_tas_1_ERAI_MAM_4_Near_RelDiag_cal_cross_tas_1991_2012_EU.png +mv 2_1_2_1_ERAI_JJA_4_Near_RelDiag_cal_cross_tas_1991_2012_EU.png 2_1_2_tas_1_ERAI_JJA_4_Near_RelDiag_cal_cross_tas_1991_2012_EU.png +mv 2_1_3_1_ERAI_SON_4_Near_RelDiag_cal_cross_tas_1991_2012_EU.png 2_1_3_tas_1_ERAI_SON_4_Near_RelDiag_cal_cross_tas_1991_2012_EU.png +mv 2_1_4_1_ERAI_DJF_4_Near_RelDiag_cal_cross_tas_1991_2012_EU.png 2_1_4_tas_1_ERAI_DJF_4_Near_RelDiag_cal_cross_tas_1991_2012_EU.png +mv 2_2_1_2_JRA_MAM_4_Near_RelDiag_cal_cross_tas_1991_2012_EU.png 2_2_1_tas_2_JRA_MAM_4_Near_RelDiag_cal_cross_tas_1991_2012_EU.png +mv 2_2_2_2_JRA_JJA_4_Near_RelDiag_cal_cross_tas_1991_2012_EU.png 2_2_2_tas_2_JRA_JJA_4_Near_RelDiag_cal_cross_tas_1991_2012_EU.png +mv 2_2_3_2_JRA_SON_4_Near_RelDiag_cal_cross_tas_1991_2012_EU.png 2_2_3_tas_2_JRA_SON_4_Near_RelDiag_cal_cross_tas_1991_2012_EU.png +mv 2_2_4_2_JRA_DJF_4_Near_RelDiag_cal_cross_tas_1991_2012_EU.png 2_2_4_tas_2_JRA_DJF_4_Near_RelDiag_cal_cross_tas_1991_2012_EU.png +mv 2_3_1_3_ERAI2_MAM_4_Near_RelDiag_cal_cross_tas_1981_2012_EU.png 2_3_1_tas_3_ERAI2_MAM_4_Near_RelDiag_cal_cross_tas_1981_2012_EU.png +mv 2_3_2_3_ERAI2_JJA_4_Near_RelDiag_cal_cross_tas_1981_2012_EU.png 2_3_2_tas_3_ERAI2_JJA_4_Near_RelDiag_cal_cross_tas_1981_2012_EU.png +mv 2_3_3_3_ERAI2_SON_4_Near_RelDiag_cal_cross_tas_1981_2012_EU.png 2_3_3_tas_3_ERAI2_SON_4_Near_RelDiag_cal_cross_tas_1981_2012_EU.png +mv 2_3_4_3_ERAI2_DJF_4_Near_RelDiag_cal_cross_tas_1981_2012_EU.png 2_3_4_tas_3_ERAI2_DJF_4_Near_RelDiag_cal_cross_tas_1981_2012_EU.png +mv 2_4_1_4_JRA2_MAM_4_Near_RelDiag_cal_cross_tas_1981_2012_EU.png 2_4_1_tas_4_JRA2_MAM_4_Near_RelDiag_cal_cross_tas_1981_2012_EU.png +mv 2_4_2_4_JRA2_JJA_4_Near_RelDiag_cal_cross_tas_1981_2012_EU.png 2_4_2_tas_4_JRA2_JJA_4_Near_RelDiag_cal_cross_tas_1981_2012_EU.png +mv 2_4_3_4_JRA2_SON_4_Near_RelDiag_cal_cross_tas_1981_2012_EU.png 2_4_3_tas_4_JRA2_SON_4_Near_RelDiag_cal_cross_tas_1981_2012_EU.png +mv 2_4_4_4_JRA2_DJF_4_Near_RelDiag_cal_cross_tas_1981_2012_EU.png 2_4_4_tas_4_JRA2_DJF_4_Near_RelDiag_cal_cross_tas_1981_2012_EU.png +mv 3_1_1_1_ERAI_MAM_4_Below_RelDiag_cal_cross_tas_1991_2012_EU.png 3_1_1_tas_1_ERAI_MAM_4_Below_RelDiag_cal_cross_tas_1991_2012_EU.png +mv 3_1_2_1_ERAI_JJA_4_Below_RelDiag_cal_cross_tas_1991_2012_EU.png 3_1_2_tas_1_ERAI_JJA_4_Below_RelDiag_cal_cross_tas_1991_2012_EU.png +mv 3_1_3_1_ERAI_SON_4_Below_RelDiag_cal_cross_tas_1991_2012_EU.png 3_1_3_tas_1_ERAI_SON_4_Below_RelDiag_cal_cross_tas_1991_2012_EU.png +mv 3_1_4_1_ERAI_DJF_4_Below_RelDiag_cal_cross_tas_1991_2012_EU.png 3_1_4_tas_1_ERAI_DJF_4_Below_RelDiag_cal_cross_tas_1991_2012_EU.png +mv 3_2_1_2_JRA_MAM_4_Below_RelDiag_cal_cross_tas_1991_2012_EU.png 3_2_1_tas_2_JRA_MAM_4_Below_RelDiag_cal_cross_tas_1991_2012_EU.png +mv 3_2_2_2_JRA_JJA_4_Below_RelDiag_cal_cross_tas_1991_2012_EU.png 3_2_2_tas_2_JRA_JJA_4_Below_RelDiag_cal_cross_tas_1991_2012_EU.png +mv 3_2_3_2_JRA_SON_4_Below_RelDiag_cal_cross_tas_1991_2012_EU.png 3_2_3_tas_2_JRA_SON_4_Below_RelDiag_cal_cross_tas_1991_2012_EU.png +mv 3_2_4_2_JRA_DJF_4_Below_RelDiag_cal_cross_tas_1991_2012_EU.png 3_2_4_tas_2_JRA_DJF_4_Below_RelDiag_cal_cross_tas_1991_2012_EU.png +mv 3_3_1_3_ERAI2_MAM_4_Below_RelDiag_cal_cross_tas_1981_2012_EU.png 3_3_1_tas_3_ERAI2_MAM_4_Below_RelDiag_cal_cross_tas_1981_2012_EU.png +mv 3_3_2_3_ERAI2_JJA_4_Below_RelDiag_cal_cross_tas_1981_2012_EU.png 3_3_2_tas_3_ERAI2_JJA_4_Below_RelDiag_cal_cross_tas_1981_2012_EU.png +mv 3_3_3_3_ERAI2_SON_4_Below_RelDiag_cal_cross_tas_1981_2012_EU.png 3_3_3_tas_3_ERAI2_SON_4_Below_RelDiag_cal_cross_tas_1981_2012_EU.png +mv 3_3_4_3_ERAI2_DJF_4_Below_RelDiag_cal_cross_tas_1981_2012_EU.png 3_3_4_tas_3_ERAI2_DJF_4_Below_RelDiag_cal_cross_tas_1981_2012_EU.png +mv 3_4_1_4_JRA2_MAM_4_Below_RelDiag_cal_cross_tas_1981_2012_EU.png 3_4_1_tas_4_JRA2_MAM_4_Below_RelDiag_cal_cross_tas_1981_2012_EU.png +mv 3_4_2_4_JRA2_JJA_4_Below_RelDiag_cal_cross_tas_1981_2012_EU.png 3_4_2_tas_4_JRA2_JJA_4_Below_RelDiag_cal_cross_tas_1981_2012_EU.png +mv 3_4_3_4_JRA2_SON_4_Below_RelDiag_cal_cross_tas_1981_2012_EU.png 3_4_3_tas_4_JRA2_SON_4_Below_RelDiag_cal_cross_tas_1981_2012_EU.png +mv 3_4_4_4_JRA2_DJF_4_Below_RelDiag_cal_cross_tas_1981_2012_EU.png 3_4_4_tas_4_JRA2_DJF_4_Below_RelDiag_cal_cross_tas_1981_2012_EU.png + + +mv 1_1_1_1_ERAI_MAM_5_Above_RelDiag_cal_nocross_tas_1991_2012_EU.png 1_1_1_tas_1_ERAI_MAM_5_Above_RelDiag_cal_nocross_tas_1991_2012_EU.png +mv 1_1_2_1_ERAI_JJA_5_Above_RelDiag_cal_nocross_tas_1991_2012_EU.png 1_1_2_tas_1_ERAI_JJA_5_Above_RelDiag_cal_nocross_tas_1991_2012_EU.png +mv 1_1_3_1_ERAI_SON_5_Above_RelDiag_cal_nocross_tas_1991_2012_EU.png 1_1_3_tas_1_ERAI_SON_5_Above_RelDiag_cal_nocross_tas_1991_2012_EU.png +mv 1_1_4_1_ERAI_DJF_5_Above_RelDiag_cal_nocross_tas_1991_2012_EU.png 1_1_4_tas_1_ERAI_DJF_5_Above_RelDiag_cal_nocross_tas_1991_2012_EU.png +mv 1_2_1_2_JRA_MAM_5_Above_RelDiag_cal_nocross_tas_1991_2012_EU.png 1_2_1_tas_2_JRA_MAM_5_Above_RelDiag_cal_nocross_tas_1991_2012_EU.png +mv 1_2_2_2_JRA_JJA_5_Above_RelDiag_cal_nocross_tas_1991_2012_EU.png 1_2_2_tas_2_JRA_JJA_5_Above_RelDiag_cal_nocross_tas_1991_2012_EU.png +mv 1_2_3_2_JRA_SON_5_Above_RelDiag_cal_nocross_tas_1991_2012_EU.png 1_2_3_tas_2_JRA_SON_5_Above_RelDiag_cal_nocross_tas_1991_2012_EU.png +mv 1_2_4_2_JRA_DJF_5_Above_RelDiag_cal_nocross_tas_1991_2012_EU.png 1_2_4_tas_2_JRA_DJF_5_Above_RelDiag_cal_nocross_tas_1991_2012_EU.png +mv 1_3_1_3_ERAI2_MAM_5_Above_RelDiag_cal_nocross_tas_1981_2012_EU.png 1_3_1_tas_3_ERAI2_MAM_5_Above_RelDiag_cal_nocross_tas_1981_2012_EU.png +mv 1_3_2_3_ERAI2_JJA_5_Above_RelDiag_cal_nocross_tas_1981_2012_EU.png 1_3_2_tas_3_ERAI2_JJA_5_Above_RelDiag_cal_nocross_tas_1981_2012_EU.png +mv 1_3_3_3_ERAI2_SON_5_Above_RelDiag_cal_nocross_tas_1981_2012_EU.png 1_3_3_tas_3_ERAI2_SON_5_Above_RelDiag_cal_nocross_tas_1981_2012_EU.png +mv 1_3_4_3_ERAI2_DJF_5_Above_RelDiag_cal_nocross_tas_1981_2012_EU.png 1_3_4_tas_3_ERAI2_DJF_5_Above_RelDiag_cal_nocross_tas_1981_2012_EU.png +mv 1_4_1_4_JRA2_MAM_5_Above_RelDiag_cal_nocross_tas_1981_2012_EU.png 1_4_1_tas_4_JRA2_MAM_5_Above_RelDiag_cal_nocross_tas_1981_2012_EU.png +mv 1_4_2_4_JRA2_JJA_5_Above_RelDiag_cal_nocross_tas_1981_2012_EU.png 1_4_2_tas_4_JRA2_JJA_5_Above_RelDiag_cal_nocross_tas_1981_2012_EU.png +mv 1_4_3_4_JRA2_SON_5_Above_RelDiag_cal_nocross_tas_1981_2012_EU.png 1_4_3_tas_4_JRA2_SON_5_Above_RelDiag_cal_nocross_tas_1981_2012_EU.png +mv 1_4_4_4_JRA2_DJF_5_Above_RelDiag_cal_nocross_tas_1981_2012_EU.png 1_4_4_tas_4_JRA2_DJF_5_Above_RelDiag_cal_nocross_tas_1981_2012_EU.png +mv 2_1_1_1_ERAI_MAM_5_Near_RelDiag_cal_nocross_tas_1991_2012_EU.png 2_1_1_tas_1_ERAI_MAM_5_Near_RelDiag_cal_nocross_tas_1991_2012_EU.png +mv 2_1_2_1_ERAI_JJA_5_Near_RelDiag_cal_nocross_tas_1991_2012_EU.png 2_1_2_tas_1_ERAI_JJA_5_Near_RelDiag_cal_nocross_tas_1991_2012_EU.png +mv 2_1_3_1_ERAI_SON_5_Near_RelDiag_cal_nocross_tas_1991_2012_EU.png 2_1_3_tas_1_ERAI_SON_5_Near_RelDiag_cal_nocross_tas_1991_2012_EU.png +mv 2_1_4_1_ERAI_DJF_5_Near_RelDiag_cal_nocross_tas_1991_2012_EU.png 2_1_4_tas_1_ERAI_DJF_5_Near_RelDiag_cal_nocross_tas_1991_2012_EU.png +mv 2_2_1_2_JRA_MAM_5_Near_RelDiag_cal_nocross_tas_1991_2012_EU.png 2_2_1_tas_2_JRA_MAM_5_Near_RelDiag_cal_nocross_tas_1991_2012_EU.png +mv 2_2_2_2_JRA_JJA_5_Near_RelDiag_cal_nocross_tas_1991_2012_EU.png 2_2_2_tas_2_JRA_JJA_5_Near_RelDiag_cal_nocross_tas_1991_2012_EU.png +mv 2_2_3_2_JRA_SON_5_Near_RelDiag_cal_nocross_tas_1991_2012_EU.png 2_2_3_tas_2_JRA_SON_5_Near_RelDiag_cal_nocross_tas_1991_2012_EU.png +mv 2_2_4_2_JRA_DJF_5_Near_RelDiag_cal_nocross_tas_1991_2012_EU.png 2_2_4_tas_2_JRA_DJF_5_Near_RelDiag_cal_nocross_tas_1991_2012_EU.png +mv 2_3_1_3_ERAI2_MAM_5_Near_RelDiag_cal_nocross_tas_1981_2012_EU.png 2_3_1_tas_3_ERAI2_MAM_5_Near_RelDiag_cal_nocross_tas_1981_2012_EU.png +mv 2_3_2_3_ERAI2_JJA_5_Near_RelDiag_cal_nocross_tas_1981_2012_EU.png 2_3_2_tas_3_ERAI2_JJA_5_Near_RelDiag_cal_nocross_tas_1981_2012_EU.png +mv 2_3_3_3_ERAI2_SON_5_Near_RelDiag_cal_nocross_tas_1981_2012_EU.png 2_3_3_tas_3_ERAI2_SON_5_Near_RelDiag_cal_nocross_tas_1981_2012_EU.png +mv 2_3_4_3_ERAI2_DJF_5_Near_RelDiag_cal_nocross_tas_1981_2012_EU.png 2_3_4_tas_3_ERAI2_DJF_5_Near_RelDiag_cal_nocross_tas_1981_2012_EU.png +mv 2_4_1_4_JRA2_MAM_5_Near_RelDiag_cal_nocross_tas_1981_2012_EU.png 2_4_1_tas_4_JRA2_MAM_5_Near_RelDiag_cal_nocross_tas_1981_2012_EU.png +mv 2_4_2_4_JRA2_JJA_5_Near_RelDiag_cal_nocross_tas_1981_2012_EU.png 2_4_2_tas_4_JRA2_JJA_5_Near_RelDiag_cal_nocross_tas_1981_2012_EU.png +mv 2_4_3_4_JRA2_SON_5_Near_RelDiag_cal_nocross_tas_1981_2012_EU.png 2_4_3_tas_4_JRA2_SON_5_Near_RelDiag_cal_nocross_tas_1981_2012_EU.png +mv 2_4_4_4_JRA2_DJF_5_Near_RelDiag_cal_nocross_tas_1981_2012_EU.png 2_4_4_tas_4_JRA2_DJF_5_Near_RelDiag_cal_nocross_tas_1981_2012_EU.png +mv 3_1_1_1_ERAI_MAM_5_Below_RelDiag_cal_nocross_tas_1991_2012_EU.png 3_1_1_tas_1_ERAI_MAM_5_Below_RelDiag_cal_nocross_tas_1991_2012_EU.png +mv 3_1_2_1_ERAI_JJA_5_Below_RelDiag_cal_nocross_tas_1991_2012_EU.png 3_1_2_tas_1_ERAI_JJA_5_Below_RelDiag_cal_nocross_tas_1991_2012_EU.png +mv 3_1_3_1_ERAI_SON_5_Below_RelDiag_cal_nocross_tas_1991_2012_EU.png 3_1_3_tas_1_ERAI_SON_5_Below_RelDiag_cal_nocross_tas_1991_2012_EU.png +mv 3_1_4_1_ERAI_DJF_5_Below_RelDiag_cal_nocross_tas_1991_2012_EU.png 3_1_4_tas_1_ERAI_DJF_5_Below_RelDiag_cal_nocross_tas_1991_2012_EU.png +mv 3_2_1_2_JRA_MAM_5_Below_RelDiag_cal_nocross_tas_1991_2012_EU.png 3_2_1_tas_2_JRA_MAM_5_Below_RelDiag_cal_nocross_tas_1991_2012_EU.png +mv 3_2_2_2_JRA_JJA_5_Below_RelDiag_cal_nocross_tas_1991_2012_EU.png 3_2_2_tas_2_JRA_JJA_5_Below_RelDiag_cal_nocross_tas_1991_2012_EU.png +mv 3_2_3_2_JRA_SON_5_Below_RelDiag_cal_nocross_tas_1991_2012_EU.png 3_2_3_tas_2_JRA_SON_5_Below_RelDiag_cal_nocross_tas_1991_2012_EU.png +mv 3_2_4_2_JRA_DJF_5_Below_RelDiag_cal_nocross_tas_1991_2012_EU.png 3_2_4_tas_2_JRA_DJF_5_Below_RelDiag_cal_nocross_tas_1991_2012_EU.png +mv 3_3_1_3_ERAI2_MAM_5_Below_RelDiag_cal_nocross_tas_1981_2012_EU.png 3_3_1_tas_3_ERAI2_MAM_5_Below_RelDiag_cal_nocross_tas_1981_2012_EU.png +mv 3_3_2_3_ERAI2_JJA_5_Below_RelDiag_cal_nocross_tas_1981_2012_EU.png 3_3_2_tas_3_ERAI2_JJA_5_Below_RelDiag_cal_nocross_tas_1981_2012_EU.png +mv 3_3_3_3_ERAI2_SON_5_Below_RelDiag_cal_nocross_tas_1981_2012_EU.png 3_3_3_tas_3_ERAI2_SON_5_Below_RelDiag_cal_nocross_tas_1981_2012_EU.png +mv 3_3_4_3_ERAI2_DJF_5_Below_RelDiag_cal_nocross_tas_1981_2012_EU.png 3_3_4_tas_3_ERAI2_DJF_5_Below_RelDiag_cal_nocross_tas_1981_2012_EU.png +mv 3_4_1_4_JRA2_MAM_5_Below_RelDiag_cal_nocross_tas_1981_2012_EU.png 3_4_1_tas_4_JRA2_MAM_5_Below_RelDiag_cal_nocross_tas_1981_2012_EU.png +mv 3_4_2_4_JRA2_JJA_5_Below_RelDiag_cal_nocross_tas_1981_2012_EU.png 3_4_2_tas_4_JRA2_JJA_5_Below_RelDiag_cal_nocross_tas_1981_2012_EU.png +mv 3_4_3_4_JRA2_SON_5_Below_RelDiag_cal_nocross_tas_1981_2012_EU.png 3_4_3_tas_4_JRA2_SON_5_Below_RelDiag_cal_nocross_tas_1981_2012_EU.png +mv 3_4_4_4_JRA2_DJF_5_Below_RelDiag_cal_nocross_tas_1981_2012_EU.png 3_4_4_tas_4_JRA2_DJF_5_Below_RelDiag_cal_nocross_tas_1981_2012_EU.png + +###################################################################### +# Reliability diagram composition # +###################################################################### + +var=tas #sfcWind tas + +for region in seEU #nEU swEU seEU #EU #AF #eNA mNA wNA # NA #eCA wCA #eSA sSA wSA # SA #IN nEA sEA # EA #eAU wAU #AU #GL #NH #SH #TR +do + +for bias in sbc # raw sbc cal +do + +case "$var" in +"sfcWind") + var2=sfcwind ;; + var_title="10m wind speed" ;; +"tas") + var2=tas ;; + var_title="2m temperature";; +esac + +case "$bias" in +"raw") + bias_suffix=rawdata + bias_text=none + bias_num=1 ;; +"sbc") + bias_suffix=sbc_cross + bias_text='simple_bias_correction_with_cross_validation' + bias_num=2 ;; +"cal") + bias_suffix=cal_cross + bias_text='calibration_with_cross_validation' + bias_num=4 ;; +esac + +case "$region" in +"nEU") + region_name='northern_Europe' + domain='[15W-45E,45N-75N]';; +"swEU") + region_name='southwestern_Europe' + domain='[15W-20E,35N-45N]';; +"seEU") + region_name='southeastern_Europe' + domain='[20W-45E,45N-75N]';; +"AF") + region_name='Africa' + domain='[20W-55E,35S-40N]';; +"eNA") + region_name='eastern_North_America' + domain='[86W-60W,30N-50N]';; +"mNA") + region_name='middle_North_America' + domain='[110W-85W,30N-50N]';; +"wNA") + region_name='western_North_America' + domain='[130W-110W,30N-50N]';; +"NA") + region_name='North_America' + domain='[170W-50W,10N-75N]';; +"eCA") + region_name='eastern_Central_America' + domain='[80W-60W,12.5N-30N]';; +"wCA") + region_name='western_Central_America' + domain='[110W-80W,10N-30N]';; +"eSA") + region_name='eastern_South_America' + domain='[45W-30W,25S-0S]';; +"sSA") + region_name='southern_South_America' + domain='[65W-45W,45S-25S]';; +"wSA") + region_name='western_South_America' + domain='[80W-65W,35S-0S]';; +"SA") + region_name='South_America' + domain='[90W-30W,60S-10N]';; +"IN") + region_name='India' + domain='[65E-85E,5N-35N]';; +"nEA") + region_name='northern_East_Asia' + domain='[95E-145E,35N-50N]';; +"sEA") + region_name='southern_East_Asia' + domain='[95E-145E,20N-35N]';; +"EA") + region_name='East_Asia' + domain='[90E-150E,20N-50N]';; +"eAU") + region_name='eastern_Australia' + domain='[135E-180E,50S-25S]';; +"wAU") + region_name='western_Australia' + domain='[110E-135E,40S-20S]';; +"AU") + region_name='Australia' + domain='[110E-180E,50S-0S]';; +"GL") + region_name='Globe' + domain='[180W-180E,90S-90N]';; +"NH") + region_name='Northern_Hemisphere' + domain='[180W-180E,20N-90N]';; +"SH") + region_name='Southern_Hemisphere' + domain='[180W-180E,90S-20S]';; +"TR") + region_name='Tropics' + domain='[180W-180E,20S-20N]';; +esac + + +for rean in ERAI JRA ERAI2 JRA2 +do + +case "$rean" in +"ERAI") + rean_name=ERA-Interim + rean_num=1 + year=1991 ;; +"JRA") + rean_name=JRA-55 + rean_num=2 + year=1991 ;; +"ERAI2") + rean_name=ERA-Interim + rean_num=3 + year=1981 ;; +"JRA2") + rean_name=JRA-55 + rean_num=4 + year=1981;; +esac + +# only cut title and caption from single figures: +~/scripts/fig2catalog.sh -l -m 300 -r 160 1_${rean_num}_1_${var}_${rean_num}_${rean}_MAM_${bias_num}_Above_RelDiag_${bias_suffix}_${var2}_${year}_2012_${region}.png trash2.png +~/scripts/fig2catalog.sh -l -m 300 -r 160 1_${rean_num}_2_${var}_${rean_num}_${rean}_JJA_${bias_num}_Above_RelDiag_${bias_suffix}_${var2}_${year}_2012_${region}.png trash3.png +~/scripts/fig2catalog.sh -l -m 300 -r 160 1_${rean_num}_3_${var}_${rean_num}_${rean}_SON_${bias_num}_Above_RelDiag_${bias_suffix}_${var2}_${year}_2012_${region}.png trash4.png +~/scripts/fig2catalog.sh -l -m 300 -r 160 1_${rean_num}_4_${var}_${rean_num}_${rean}_DJF_${bias_num}_Above_RelDiag_${bias_suffix}_${var2}_${year}_2012_${region}.png trash5.png +height_figure=$(identify -ping -format %h trash2.png) + +#convert -background white -size ${height_figure}x -pointsize 50 -gravity center label:"Above Normal" trash1.png +convert -background white -size 500x1800 -pointsize 150 -gravity center label:"Above \nNormal" trash1.png + +montage trash1.png trash2.png trash3.png trash4.png trash5.png -tile 5x1 -geometry +0+0 trash6.png + +# remove a bug that makes the new text repeat twice: +~/scripts/fig2catalog.sh -l -m 30 trash6.png trash6.png + +# repeat with next tercile: + +# only cut title and caption from single figures: +~/scripts/fig2catalog.sh -l -m 300 -r 160 2_${rean_num}_1_${var}_${rean_num}_${rean}_MAM_${bias_num}_Near_RelDiag_${bias_suffix}_${var2}_${year}_2012_${region}.png trash8.png +~/scripts/fig2catalog.sh -l -m 300 -r 160 2_${rean_num}_2_${var}_${rean_num}_${rean}_JJA_${bias_num}_Near_RelDiag_${bias_suffix}_${var2}_${year}_2012_${region}.png trash9.png +~/scripts/fig2catalog.sh -l -m 300 -r 160 2_${rean_num}_3_${var}_${rean_num}_${rean}_SON_${bias_num}_Near_RelDiag_${bias_suffix}_${var2}_${year}_2012_${region}.png trash10.png +~/scripts/fig2catalog.sh -l -m 300 -r 160 2_${rean_num}_4_${var}_${rean_num}_${rean}_DJF_${bias_num}_Near_RelDiag_${bias_suffix}_${var2}_${year}_2012_${region}.png trash11.png + +convert -background white -size 500x1800 -pointsize 150 -gravity center label:"Near \nNormal" trash7.png +montage trash7.png trash8.png trash9.png trash10.png trash11.png -tile 5x1 -geometry +0+0 trash12.png +~/scripts/fig2catalog.sh -l -m 30 trash12.png trash12.png + +# repeat with next tercile: + +# only cut title and caption from single figures: +~/scripts/fig2catalog.sh -l -m 300 -r 160 3_${rean_num}_1_${var}_${rean_num}_${rean}_MAM_${bias_num}_Below_RelDiag_${bias_suffix}_${var2}_${year}_2012_${region}.png trash14.png +~/scripts/fig2catalog.sh -l -m 300 -r 160 3_${rean_num}_2_${var}_${rean_num}_${rean}_JJA_${bias_num}_Below_RelDiag_${bias_suffix}_${var2}_${year}_2012_${region}.png trash15.png +~/scripts/fig2catalog.sh -l -m 300 -r 160 3_${rean_num}_3_${var}_${rean_num}_${rean}_SON_${bias_num}_Below_RelDiag_${bias_suffix}_${var2}_${year}_2012_${region}.png trash16.png +~/scripts/fig2catalog.sh -l -m 300 -r 160 3_${rean_num}_4_${var}_${rean_num}_${rean}_DJF_${bias_num}_Below_RelDiag_${bias_suffix}_${var2}_${year}_2012_${region}.png trash17.png + +convert -background white -size 500x1800 -pointsize 150 -gravity center label:"Below \nNormal" trash13.png +montage trash13.png trash14.png trash15.png trash16.png trash17.png -tile 5x1 -geometry +0+0 trash18.png +~/scripts/fig2catalog.sh -l -m 30 trash18.png trash18.png + +montage trash6.png trash12.png trash18.png -tile 1x3 -geometry +0+0 trash19.png + +width_figure=$(identify -ping -format %w trash19.png) +convert -background white -size 400x200 -pointsize 150 -gravity center label:" " trash20.png +convert -background white -size 2380x200 -pointsize 150 -gravity center label:"MAM" trash21.png +convert -background white -size 2380x200 -pointsize 150 -gravity center label:"JJA" trash22.png +convert -background white -size 2380x200 -pointsize 150 -gravity center label:"SON" trash23.png +convert -background white -size 2380x200 -pointsize 150 -gravity center label:"DJF" trash24.png + +montage trash20.png trash21.png trash22.png trash23.png trash24.png -tile 5x1 -geometry +0+0 trash25.png +~/scripts/fig2catalog.sh -l -m 30 trash25.png trash25.png + +montage trash25.png trash19.png -tile 1x2 -geometry +0+0 trash26.png + +~/scripts/fig2catalog.sh -s 0.9 -p 100 -x 50 -t 'Multiple Systems / '${var}' / Reliability Diagram \nAll seasons / '${year}'-2012' -c 'Start dates: 1st February (MAM), 1st May (JJA), 1st August (SON) or 1st November (DJF)\nReference dataset: '${rean_name}'\nBias correction: '${bias_text}' \nRegion: '${region_name}' '${domain}'\nMask: sea depth below 50m' trash26.png composition_${var}_${rean}_${bias_suffix}_${year}_${region}.png + +rm trash*.* + +done +done +done + + + + + + + + + + +# Fix titles of temperature rel.diagrams which was still with 10m wind speed instead of temperature: + +var='tas' + +for region in GL NH SH TR EU AF NA SA EA AU +do + +for bias in raw sbc cal +do + +case "$var" in +"sfcWind") + var2=sfcwind + var_title1="10m" + var_title2="wind speed" ;; + +"tas") + var2=tas + var_title1="2m" + var_title2="temperature";; + +esac + +case "$bias" in +"raw") + bias_suffix=rawdata + bias_text=none + bias_num=1 ;; +"sbc") + bias_suffix=sbc_cross + bias_text='simple_bias_correction_with_cross_validation' + bias_num=2 ;; +"cal") + bias_suffix=cal_cross + bias_text='calibration_with_cross_validation' + bias_num=4 ;; +esac + +case "$region" in +"nEU") + region_name='northern_Europe' + domain='[15W-45E,45N-75N]';; +"swEU") + region_name='southwestern_Europe' + domain='[15W-20E,35N-45N]';; +"seEU") + region_name='southeastern_Europe' + domain='[20W-45E,45N-75N]';; +"AF") + region_name='Africa' + domain='[20W-55E,35S-40N]';; +"eNA") + region_name='eastern_North_America' + domain='[86W-60W,30N-50N]';; +"mNA") + region_name='middle_North_America' + domain='[110W-85W,30N-50N]';; +"wNA") + region_name='western_North_America' + domain='[130W-110W,30N-50N]';; +"NA") + region_name='North_America' + domain='[170W-50W,10N-75N]';; +"eCA") + region_name='eastern_Central_America' + domain='[80W-60W,12.5N-30N]';; +"wCA") + region_name='western_Central_America' + domain='[110W-80W,10N-30N]';; +"eSA") + region_name='eastern_South_America' + domain='[45W-30W,25S-0S]';; +"sSA") + region_name='southern_South_America' + domain='[65W-45W,45S-25S]';; +"wSA") + region_name='western_South_America' + domain='[80W-65W,35S-0S]';; +"SA") + region_name='South_America' + domain='[90W-30W,60S-10N]';; +"IN") + region_name='India' + domain='[65E-85E,5N-35N]';; +"nEA") + region_name='northern_East_Asia' + domain='[95E-145E,35N-50N]';; +"sEA") + region_name='southern_East_Asia' + domain='[95E-145E,20N-35N]';; +"EA") + region_name='East_Asia' + domain='[90E-150E,20N-50N]';; +"eAU") + region_name='eastern_Australia' + domain='[135E-180E,50S-25S]';; +"wAU") + region_name='western_Australia' + domain='[110E-135E,40S-20S]';; +"AU") + region_name='Australia' + domain='[110E-180E,50S-0S]';; +"GL") + region_name='Globe' + domain='[180W-180E,90S-90N]';; +"NH") + region_name='Northern_Hemisphere' + domain='[180W-180E,20N-90N]';; +"SH") + region_name='Southern_Hemisphere' + domain='[180W-180E,90S-20S]';; +"TR") + region_name='Tropics' + domain='[180W-180E,20S-20N]';; +esac + +for rean in ERAI JRA ERAI2 JRA2 +do + +case "$rean" in +"ERAI") + rean_name=ERA-Interim + rean_num=1 + year=1991 ;; +"JRA") + rean_name=JRA-55 + rean_num=2 + year=1991 ;; +"ERAI2") + rean_name=ERA-Interim + rean_num=3 + year=1981 ;; +"JRA2") + rean_name=JRA-55 + rean_num=4 + year=1981;; +esac + +~/scripts/fig2catalog.sh -s 0.9 -r 450 -m 760 -t 'Multiple Systems / '${var_title1}' '${var_title2}' / Reliability Diagram \nAll seasons / '${year}'-2012' -c 'Start dates: 1st February (MAM), 1st May (JJA), 1st August (SON) or 1st November (DJF)\nReference dataset: '${rean_name}'\nBias correction: '${bias_text}' \nRegion: '${region_name}' '${domain}'\nMask: sea depth below 50m' composition_${var}_${rean}_${bias_suffix}_${year}_${region}.png composition_${var}_${rean}_${bias_suffix}_${year}_${region}_fixed.png + +done +done +done + + + + + + diff --git a/weather_regimes_v43.R b/weather_regimes_v43.R new file mode 100644 index 0000000000000000000000000000000000000000..e3f080539a40a9e29abbd418f6e862cb160049f3 --- /dev/null +++ b/weather_regimes_v43.R @@ -0,0 +1,1344 @@ + +# Creation: 6/2016 +# Author: Nicola Cortesi +# Aim: To compute the four Euro-Atlantic weather regimes from a reanalysis or from a forecast system +# +# I/O: input must be in a format compatible with Load(); 1 output .RData file is created in the 'workdir' folder. +# You can also run this script from terminal with: Rscript ~/scripts/weather_regimes_maps.R +# +# Assumption: its output are needed by the scripts weather_regimes_impact.R and weather_regimes.R +# +# Branch: weather_regimes + +rm(list=ls()) +library(s2dverification) # for the function Load() +library(abind) +library(reshape2) # after each update of s2dverification, it's better to remove and install this package again because sometimes it gets broken! + +SMP <- FALSE # if TRUE, the script is assumed to run on the SMP Machine +if(SMP){ + source('/gpfs/projects/bsc32/bsc32842/Rfunctions.R') + workdir <- "/gpfs/projects/bsc32/bsc32842/RESILIENCE" # working dir where output files will be generated +} else { + source('/home/Earth/ncortesi/scripts/Rfunctions.R') + workdir <- "/scratch/Earth/ncortesi/RESILIENCE/Regimes" # working dir where output files will be generated +} + +# module load NCO # : load it from the terminal before running this script interactively in the SMP machine, if you didn't load it in the .bashrc + +f6h <- TRUE # set if suffix "_f6h" is present in the file name or not (set it always to TRUE except if rean = JRA55 and psl = "z500") + +# available reanalysis for the geopotential (g500) or the geopotential height (z500 = g500/9.81) and for var data: +ERAint <- '/esnas/recon/ecmwf/erainterim/$STORE_FREQ$_mean/$VAR_NAME$_f6h/$VAR_NAME$_$YEAR$$MONTH$.nc' +#ERAint <- '/esarchive/old-files/recon_ecmwf_erainterim/$STORE_FREQ$_mean/$VAR_NAME$_f6h/$VAR_NAME$_$YEAR$$MONTH$.nc' +ERAint <- '/esnas/recon/ecmwf/erainterim/6hourly/$VAR_NAME$/$VAR_NAME$_$YEAR$$MONTH$.nc' # subdaily data!!!!! +ERAint.name <- "ERA-Interim" + +JRA55 <- '/esnas/recon/jma/jra55/$STORE_FREQ$_mean/$VAR_NAME$_f6h/$VAR_NAME$_$YEAR$$MONTH$.nc' +JRA55.name <- "JRA-55" + +NCEP <- '/esnas/recon/noaa/ncep-reanalysis/$STORE_FREQ$_mean/$VAR_NAME$_f6h/$VAR_NAME$_$YEAR$$MONTH$.nc' +NCEP.name <- "NCEP" + +rean <- JRA55 #JRA55 #ERAint #NCEP # choose one of the two above reanalysis from where to load the input psl data + +# available forecast systems for the psl and var data: +#ECMWF_S4 <- list(path = paste0('/esnas/exp/ECMWF/seasonal/0001/s004/m001/$STORE_FREQ$_mean/$VAR_NAME$_f6h/$VAR_NAME$_$START_DATE$.nc')) +#ECMWF_S4 <- '/esnas/exp/ECMWF/seasonal/0001/s004/m001/$STORE_FREQ$_mean/psl_f6h/psl_$START_DATE$.nc' +ECMWF_S4 <- '/esnas/exp/ecmwf/system4_m1/$STORE_FREQ$_mean/$VAR_NAME$_f6h/$VAR_NAME$_$START_DATE$.nc' +ECMWF_S4.name <- "ECMWF-S4" +member.name <- "ensemble" # name of the dimension with the ensemble members inside the netCDF files of S4 + +ECMWF_monthly <- '/esnas/exp/ECMWF/monthly/ensforhc/6hourly/$VAR_NAME$/2014$MONTH$$DAY$00/$VAR_NAME$_$START_DATE$00.nc' +ECMWF_monthly.name <- "ECMWF-Monthly" + +# choose a forecast system: +forecast <- ECMWF_S4 + +# put 'rean' to load pressure fields and var from reanalisis, or put 'forecast' to load them from forecast systems: +fields <- rean #forecast + +psl <- "psl" # "psl" #"g500" # pressure variable to use from the chosen reanalysis +psl.name <- "SLP" # "Z500" # the name to show in the maps + +year.start <- 1981 #1979 #1981 #1982 #1981 #1994 #1979 #1981 +year.end <- 2017 #2017 #2015 + +year.end.clim <- year.end - 1 # last year for computation of climatology (unless year.end has data available for all months, it's better to set it to the previous year) + +LOESS <- TRUE # To apply the LOESS filter or not to the climatology +running.cluster <- FALSE # add to the clustering also the daily SLP data of the two closer months to the month to use (only for monthly analysis) +running.15 <- FALSE # add to the clustering also the daily SLP data of the 15 days of the two closer months (only for monthly analysis). You cannot set to TRUE both + # this variable and 'running.cluster' above +lat.weighting <- TRUE # set it to true to weight psl data on latitude before applying the cluster analysis +subdaily <- FALSE # id TRUE, compute the clustering using 6-hourly data instead of daily data, to be more robust (only for reanalysis with 6-hourly data avail.) + +sequences <-FALSE # set it to TRUE if you want to select only days beloning to sequences of at least 5 consecutive days belonging to the same regime. +PCA <- FALSE # set it to TRUE if you want to apply the PCA before the k-means cluster analysis, FALSE otherwise +variance.explained <- 0.8 # the user can set here the minimum % of variance explained by the PCs (typically 0.8 which is equal to 80%) + +# Coordinates of the box enclosing the study region (the Northern Atlantic in this case): +lat.max <- 81 #81 #80 #70 +lat.min <- 27 #30 #20 #30 +lon.max <- 45 #36 #30 #40 +lon.min <- 274.5 #274.5 #270 #280 # put a positive number here because the geopotential has only positive vaues of longitud! + +# Only for reanalysis: +WR.period <- 2 # 13:16 #1:12 #13:16 # 1-12: Jan-Dec; 13-16: Winter-Spring-Summer-Autumn [bypassed by the optional argument of the script] + +# Only for Seasonal forecasts: +startM <- 1 # start month (1=January, 12=December) [bypassed by the optional arguments of the script] +leadM <- 0 # select only the data of one lead month: [bypassed by the optional arguments of the script] +n.members <- 15 # number of members of the forecast system to load +n.leadtimes <- 216 # number of leadtimes of the forecast system to load +missing.forecasts <- FALSE # set it to TRUE if there can be missing hindcasts files in the forecast psl data, so it will load only the available years and deals with NA +res <- 0.75 # set the resolution you want to interpolate the seasonal psl and var data when loading it (i.e: set to 0.75 to use the same res of ERA-Interim) + # interpolation method is BILINEAR. + +# Only for Monthly forecasts: +#leadtime <- 1:7 #5:11 #1 # if fields=forecast, set the forecast time (in days) you want to compute the weather regimes. +#startdate.shift <- 1 # if leadtime=5:11, it's better to shift all startdates of the season 1 week in the past, to fit the exact season of obs.data + # if leadtime=12:18, it's better to shift all startdates of the season 2 weeks in the past, and so on. +#forecast.year <- year.end+1 # if fields=forecast, set the forecast year (it is usually the year after year.end) +#mes=1 # if fields=forecast, set the month of the first startdate of the forecast year +#day=2 # if fields=forecast, set the day of the first startdate of the forecast year + +############################################################################################################################# +rean.name <- unname(ifelse(rean == ERAint, ERAint.name, ifelse(rean == JRA55, JRA55.name, NCEP.name))) +forecast.name <- unname(ifelse(forecast == ECMWF_S4, ECMWF_S4.name, ECMWF_monthly.name)) +fields.name <- unname(ifelse(fields == rean, rean.name, forecast.name)) +n.years <- year.end - year.start + 1 +n.years.clim <- year.end.clim - year.start + 1 + +if(running.cluster && running.15) stop("both 'running.cluster' and 'running.15' cannot be TRUE simultaneously") + +if(!f6h) { pos <- regexpr("_f6h", fields); my.string <- strsplit(fields, "_f6h") ; fields <- paste0(my.string[[1]][1],my.string[[1]][2]) } # remove _f6h from filename + +# in case the script is run with one argument, it is assumed a reanalysis is being used and the argument becomes the month we want to perform the clustering; +# in case the script is run with two arguments, it is assumed a forecast is used and the first argument represents the startmonth and the second one the leadmonth. +# in case the script is run with no arguments, the values of the variables inside the script are used: +script.arg <- as.integer(commandArgs(TRUE)) + +if(length(script.arg) == 0){ + start.month <- startM + #WR.period <- start.month + if(fields.name == forecast.name) lead.month <- leadM +} + +# in case the script is run with 1 argument, it is assumed you are using a reanalysis: +if(length(script.arg) == 1){ + fields <- rean + fields.name <- rean.name + WR.period <- script.arg[1] +} + +if(length(script.arg) >= 2){ + fields <- forecast + fields.name <- forecast.name + start.month <- script.arg[1] + lead.month <- script.arg[2] + WR.period <- start.month +} + +if(length(script.arg) == 3){ # in this case, we are running the script in the SMP machine and we need to override the variables below: + source('/gpfs/projects/bsc32/bsc32842/Rfunctions.R') # for the calendar function + workdir <- "/gpfs/projects/bsc32/bsc32842/RESILIENCE" # working dir +} + +if(length(script.arg) >= 2) {lead.comment <- paste0("Lead month: ", lead.month)} else {lead.comment <- ""} +cat(paste0("Field: ", fields.name, " Period: ", WR.period, " ", lead.comment, "\n")) + +if(lat.min >= lat.max) stop("lat.min cannot be greater than lat.max") + +#days.period <- n.days.period <- period.length <- list() +#for (pp in 1:17){ +# days.period[[pp]] <- NA +# for(y in year.start:year.end) days.period[[pp]] <- c(days.period[[pp]], n.days.in.a.future.year(year.start, y) + pos.period(y,pp)) +# days.period[[pp]] <- days.period[[pp]][-1] # remove the NA at the begin that was introduced only to be able to execute the above command +# # number of days belonging to that period from year.start to year.end: +# n.days.period[[pp]] <- length(days.period[[pp]]) +# # Number of days belonging to that period in a single year: +# period.length[[pp]] <- n.days.in.a.period(pp,1999) #+ ifelse(period==13 | period==2, 1, 0) # using year 1999 introduce a small error in winter season length because of #bisestile years +#} + +# Create a regular lat/lon grid to interpolate the data to the chosen res: (Notice that the regular grid is always centered at lat=0 and lon=0!) +n.lon.grid <- 360/res +n.lat.grid <- (180/res)+1 +my.grid <- paste0('r',n.lon.grid,'x',n.lat.grid) +#domain<-list() +#domain$lon <- seq(0,359.999999999,res) +#domain$lat <- c(rev(seq(0,90,res)),seq(res[1],-90,-res)) + +cat("Loading lat/lon. Please wait...\n") +# load only 1 day of pressure data to detect the minimum and maximum lat and lon values which identify only the North Atlantic area: +if(fields.name == rean.name) domain <- Load(var = psl, exp = NULL, obs = list(list(path=fields)), paste0(year.start,'0101'), storefreq = 'daily', leadtimemax = 1, output = 'lonlat', nprocs=1) + +### Real test: +### lat <- +### domain <- Load(var = "psl", exp = list(list(path="/esnas/exp/ecmwf/system4_m1/$STORE_FREQ$_mean/$VAR_NAME$_f6h/$VAR_NAME$_$START_DATE$.nc")), obs=NULL, sdates=paste0(1982:2011,'0101'), storefreq = 'daily', leadtimemax=n.leadtimes, nmember=n.members, output = 'lonlat', latmin = lat[chunk], latmax = lat[chunk+2], nprocs=1) + +### if (fields.name="pippo"){ + +# in the case of S4, we also interpolate it to the same resolution of the reanalysis: +# (notice that if the S4 grid has the same grid of the chosen grid (typically ERA-Interim), Load() leaves the S4 grid unchanged) +if(fields.name == ECMWF_S4.name) domain <- Load(var = psl, exp = list(list(path=fields)), obs=NULL, sdates=paste0(year.start,'0101'), storefreq = 'daily', dimnames=list(member=member.name), leadtimemax=1, nmember=1, output = 'lonlat', grid=my.grid, method='bilinear', nprocs=1) + +#domain <- Load(var = "psl", exp = list(list(path="/esnas/exp/ecmwf/system4_m1/$STORE_FREQ$_mean/$VAR_NAME$_f6h/$VAR_NAME$_$START_DATE$.nc")), obs=NULL, sdates='19820101', storefreq = 'daily', leadtimemax=1, nmember=1, output = 'lonlat', grid='r480x241', lonmax=100) + +if(fields.name == ECMWF_monthly.name) domain <- Load(var = psl, exp = NULL, obs = list(list(path=fields)), paste0(year.start,'0102'), storefreq = 'daily', leadtimemax = 1, output = 'lonlat') + +n.lon <- length(domain$lon) +n.lat <- length(domain$lat) + +pos.lat.max <- which(lat.max - round(domain$lat,4) >= 0)[1] # select the first latitude of the domain below the maximum latitude +pos.lat.min <- tail(which(round(domain$lat,4) - lat.min >= 0),1) # select the last latitude of the domain over the minumum latitude +pos.lon.max <- tail(which(lon.max - round(domain$lon,4) >= 0),1) +pos.lon.min <- which(round(domain$lon,4) - lon.min >= 0)[1] + +pos.lat <- if(pos.lat.min < pos.lat.max) {pos.lat.min:pos.lat.max} else {pos.lat.max:pos.lat.min} +pos.lon <- c(1:pos.lon.max,pos.lon.min:length(domain$lon)) # beware that it only consider the case where the study area cross the meridian of Greenwhich! +n.pos.lat <- length(pos.lat) +n.pos.lon <- length(pos.lon) + +lat <- domain$lat[pos.lat] # lat values of chosen area only (it is smaller than the whole spatial domain loaded by the data) +#if (lat[2] < lat[1]) lat <- rev(lat) # reverse lat values if they are in decreasing order +lon <- domain$lon[pos.lon] # lon values of chosen area only + +#lat.min.area <- domain$lat[pos.lat.min] # min and max lat/lon of the chosen area only (same as lat.max, but its values correspond to actual values in the lat vector) +#lat.max.area <- domain$lat[pos.lat.max] +#lon.min.area <- domain$lon[pos.lon.min] +#lon.max.area <- domain$lon[pos.lon.max] + +#rm(domain) + +#################### Start analysis #######################################################3 + +# Load psl data (interpolating it to the same reanalysis grid, if necessary): +cat("Loading data. Please wait...\n") + +if(fields.name == rean.name && subdaily == FALSE){ # Load daily psl data of all years in the reanalysis case: + psleuFull <-array(NA,c(1, 1, n.years, 365, n.pos.lat, n.pos.lon)) + + psleuFull366 <- Load(var = psl, exp = NULL, obs = list(list(path=fields)), paste0(year.start:year.end,'0101'), storefreq = 'daily', leadtimemax = 366, output = 'lonlat', latmin = lat.min, latmax = lat.max, lonmin = lon.min, lonmax = lon.max, nprocs=8) + + # remove bisestile days to have all arrays with the same dimensions: + cat("Removing bisestile days. Please wait...\n") + psleuFull[,,,,,] <- psleuFull366$obs[,,,1:365,,] + + for(y in year.start:year.end){ + y2 <- y - year.start + 1 + if(n.days.in.a.year(y) == 366) psleuFull[,,y2,60:365,,] <- psleuFull366$obs[,,y2,61:366,,] # take the march to december period removing the 29th of February + } + + psleuFull366$obs <- NULL + gc() + + if((running.cluster || running.15) && fields == rean) { + ## the months after the last month of the last year loaded are automatically filled with NA by Load(); the clustering algorithm doesn't want NA, so + ## we have to replace them with some values (in this case the mean of the previous years not to affect the analysis too much) + ## if we want to be able to compute the running clustering. + ## WARNING: in case your last month is December, for the running clustering it will use the january data of the last year loaded, not of the next year: + #psleuFull[,,n.years,305:334,,] <- psleuFull[,,n.years-1,305:334,,] + month.NA <- psleuFull366$not_found_files[1] # filename of the file with the first month with NA (string value with full file path) + first.month.NA <- as.integer(substr(month.NA,nchar(month.NA)-4,nchar(month.NA)-3)) # first month with NA + first.day.NA <- pos.period(1,first.month.NA)[1] # first day with NA (counting from the start of the year) + + psleuFullnolast <- psleuFull[,,1:(n.years-1),first.day.NA:365,,] + psleuFullMean <- apply(psleuFullnolast, c(2,3,4), mean, na.rm=TRUE) # mean of the previous years + + psleuFull[,,n.years,first.day.NA:365,,] <- psleuFullMean + + rm(psleuFullnolast, psleuFullMean) + + } + + # convert psl in daily anomalies with the LOESS filter: + cat("Calculating anomalies. Please wait...\n") + psleuFull_no_last_year <- psleuFull[1,1,1:n.years.clim,,,,drop=FALSE] + pslPeriodClim <- apply(psleuFull_no_last_year, c(1,2,4,5,6), mean, na.rm=T) + rm(psleuFull_no_last_year) + gc() + + if(LOESS == TRUE){ + pslPeriodClimLoess <- array(NA, dim(pslPeriodClim)) + + for(i in 1:n.pos.lat){ + for(j in 1:n.pos.lon){ + my.data <- data.frame(ens.mean=pslPeriodClim[1,1,,i,j], day=1:365) + my.loess <- loess(ens.mean ~ day, my.data, span=0.35) + pslPeriodClimLoess[1,1,,i,j] <- predict(my.loess) + } + } + + rm(pslPeriodClim) + gc() + + pslPeriodClim2 <- InsertDim(pslPeriodClimLoess, 3, n.years) + + } else { # leave the daily climatology to compute the anomalies: + pslPeriodClim2 <- InsertDim(pslPeriodClim, 3, n.years) + + rm(pslPeriodClim) + gc() + } + + ## s4.data <- data.frame(s4=pslPeriodClim[1,], day=1:216) + ## s4.loess <- loess(s4 ~ day, s4.data, span=0.35) + ## s4.pred <- predict(s4.loess) + + psleuFull <- psleuFull - pslPeriodClim2 + rm(pslPeriodClim2) + gc() + +} # close if on fields.name == rean and subdaily == FALSE){ + + +# Load 6-hourly psl data of all years in the reanalysis case: +if(fields.name == rean.name && subdaily == TRUE){ + + #sdates <- as.vector(sapply(year.start:year.end, function(x) paste0(x, sprintf("%02d", 1:12), '01'))) + #my.exp <- list(path=fields) + #psleuFull366 <- Load(var = psl, exp = list(my.exp), NULL, sdates, output = 'lonlat', latmin = lat.min, latmax = lat.max, lonmin = lon.min, lonmax = lon.max)$obs + #dim(data$obs) <- c(dim(data$obs)[1:2], 1, dim(data$obs)[3]*dim(data$obs)[4], dim(data$obs)[5:6]) + + my.exp <- list(path=fields) + + # Load January data: + psleu1 <- Load(var = psl, exp = list(list(path=fields)), obs = NULL, sdates = paste0(year.start:year.end,'0101'), storefreq = 'daily', output = 'lonlat', latmin = lat.min, latmax = lat.max, lonmin = lon.min, lonmax = lon.max)$mod + + # Load february data (it automatically discards the 29th of February): + psleu2 <- Load(var = psl, exp = list(list(path=fields)), obs = NULL, sdates = paste0(year.start:year.end,'0201'), storefreq = 'daily', output = 'lonlat', latmin = lat.min, latmax = lat.max, lonmin = lon.min, lonmax = lon.max)$mod + + # Load March data: + psleu3 <- Load(var = psl, exp = list(list(path=fields)), obs = NULL, sdates = paste0(year.start:year.end,'0301'), storefreq = 'daily', output = 'lonlat', latmin = lat.min, latmax = lat.max, lonmin = lon.min, lonmax = lon.max)$mod + psleu4 <- Load(var = psl, exp = list(list(path=fields)), obs = NULL, sdates = paste0(year.start:year.end,'0401'), storefreq = 'daily', output = 'lonlat', latmin = lat.min, latmax = lat.max, lonmin = lon.min, lonmax = lon.max)$mod + psleu5 <- Load(var = psl, exp = list(list(path=fields)), obs = NULL, sdates = paste0(year.start:year.end,'0501'), storefreq = 'daily', output = 'lonlat', latmin = lat.min, latmax = lat.max, lonmin = lon.min, lonmax = lon.max)$mod + psleu6 <- Load(var = psl, exp = list(list(path=fields)), obs = NULL, sdates = paste0(year.start:year.end,'0601'), storefreq = 'daily', output = 'lonlat', latmin = lat.min, latmax = lat.max, lonmin = lon.min, lonmax = lon.max)$mod + psleu7 <- Load(var = psl, exp = list(list(path=fields)), obs = NULL, sdates = paste0(year.start:year.end,'0701'), storefreq = 'daily', output = 'lonlat', latmin = lat.min, latmax = lat.max, lonmin = lon.min, lonmax = lon.max)$mod + psleu8 <- Load(var = psl, exp = list(list(path=fields)), obs = NULL, sdates = paste0(year.start:year.end,'0801'), storefreq = 'daily', output = 'lonlat', latmin = lat.min, latmax = lat.max, lonmin = lon.min, lonmax = lon.max)$mod + psleu9 <- Load(var = psl, exp = list(list(path=fields)), obs = NULL, sdates = paste0(year.start:year.end,'0901'), storefreq = 'daily', output = 'lonlat', latmin = lat.min, latmax = lat.max, lonmin = lon.min, lonmax = lon.max)$mod + psleu10 <- Load(var = psl, exp = list(list(path=fields)), obs = NULL, sdates = paste0(year.start:year.end,'1001'), storefreq = 'daily', output = 'lonlat', latmin = lat.min, latmax = lat.max, lonmin = lon.min, lonmax = lon.max)$mod + psleu11 <- Load(var = psl, exp = list(list(path=fields)), obs = NULL, sdates = paste0(year.start:year.end,'1101'), storefreq = 'daily', output = 'lonlat', latmin = lat.min, latmax = lat.max, lonmin = lon.min, lonmax = lon.max)$mod + psleu12 <- Load(var = psl, exp = list(list(path=fields)), obs = NULL, sdates = paste0(year.start:year.end,'1201'), storefreq = 'daily', output = 'lonlat', latmin = lat.min, latmax = lat.max, lonmin = lon.min, lonmax = lon.max)$mod + + if(running.cluster == TRUE && rean.name == "ERA-interim") { + # in this case, last month of last year might not have in /esnas the data for the 3rd month of the running cluster. Load() set these values to NA, + # but the clustering analysis is not able to process NA, so we have to remove the NA for that month and replace with the values of the previous year: + psleu11[,,n.years,,,] <- psleu11[,,n.years-1,,,] + psleu12[,,n.years,,,] <- psleu12[,,n.years-1,,,] + + } + + psleuFull <- abind(psleu1, psleu2, psleu3, psleu4, psleu5, psleu6, psleu7, psleu8, psleu9, psleu10, psleu11, psleu12, along=4) + rm(psleu1, psleu2, psleu3, psleu4, psleu5, psleu6, psleu7, psleu8, psleu9, psleu10, psleu11, psleu12) + gc() + + # convert psl in daily anomalies with the LOESS filter: + cat("Calculating anomalies. Please wait...\n") + pslPeriodClim <- apply(psleuFull, c(1,2,4,5,6), mean, na.rm=T) + + if(LOESS == TRUE){ + # separate psl data in the four hours of the day ( 0.00, 6.00, 12.00, 18.00) + pslPeriodClim1 <- pslPeriodClim[1,1,seq(1,1460,4),,] + pslPeriodClim2 <- pslPeriodClim[1,1,seq(2,1460,4),,] + pslPeriodClim3 <- pslPeriodClim[1,1,seq(3,1460,4),,] + pslPeriodClim4 <- pslPeriodClim[1,1,seq(4,1460,4),,] + + rm(pslPeriodClim) + gc() + + pslPeriodClimLoess1 <- array(NA, dim(pslPeriodClim1)) + pslPeriodClimLoess2 <- array(NA, dim(pslPeriodClim2)) + pslPeriodClimLoess3 <- array(NA, dim(pslPeriodClim3)) + pslPeriodClimLoess4 <- array(NA, dim(pslPeriodClim4)) + + for(i in 1:n.pos.lat){ + for(j in 1:n.pos.lon){ + my.data1 <- data.frame(ens.mean=pslPeriodClim1[,i,j], hourly=1:(1460/4)) + my.loess1 <- loess(ens.mean ~ hourly, my.data1, span=0.35) + pslPeriodClimLoess1[,i,j] <- predict(my.loess1) + + my.data2 <- data.frame(ens.mean=pslPeriodClim2[,i,j], hourly=1:(1460/4)) + my.loess2 <- loess(ens.mean ~ hourly, my.data2, span=0.35) + pslPeriodClimLoess2[,i,j] <- predict(my.loess2) + + my.data3 <- data.frame(ens.mean=pslPeriodClim3[,i,j], hourly=1:(1460/4)) + my.loess3 <- loess(ens.mean ~ hourly, my.data3, span=0.35) + pslPeriodClimLoess3[,i,j] <- predict(my.loess3) + + my.data4 <- data.frame(ens.mean=pslPeriodClim4[,i,j], hourly=1:(1460/4)) + my.loess4 <- loess(ens.mean ~ hourly, my.data4, span=0.35) + pslPeriodClimLoess4[,i,j] <- predict(my.loess4) + + } + } + + rm(my.data1, my.data2, my.data3, my.data4, my.loess1, my.loess2, my.loess3, my.loess4) + rm(pslPeriodClim1, pslPeriodClim2, pslPeriodClim3, pslPeriodClim4) + gc() + + s1 <- seq(1,1460,4) + s2 <- seq(2,1460,4) + s3 <- seq(3,1460,4) + s4 <- seq(4,1460,4) + + pslPeriodClimLoess <- array(NA,c(365*4,dim(pslPeriodClimLoess1)[2:3])) + + for(day in 1:365){ + pslPeriodClimLoess[s1[day],,] <- pslPeriodClimLoess1[day,,] + pslPeriodClimLoess[s2[day],,] <- pslPeriodClimLoess2[day,,] + pslPeriodClimLoess[s3[day],,] <- pslPeriodClimLoess3[day,,] + pslPeriodClimLoess[s4[day],,] <- pslPeriodClimLoess4[day,,] + } + + pslPeriodClim2 <- InsertDim(pslPeriodClimLoess, 1, n.years) + + } else { # leave the daily climatology to compute the anomalies: + pslPeriodClim2 <- InsertDim(pslPeriodClim, 3, n.years) + + rm(pslPeriodClim) + gc() + } + + ## s4.data <- data.frame(s4=pslPeriodClim[1,], day=1:216) + ## s4.loess <- loess(s4 ~ day, s4.data, span=0.35) + ## s4.pred <- predict(s4.loess) + + psleuFull <- psleuFull[1,1,,,,] - pslPeriodClim2 + + rm(pslPeriodClim2) + gc() + +} # close if on fields.name == rean & subdaily == TRUE + + +if(fields.name == ECMWF_S4.name){ # in the forecast case, we load only the data for 1 start month at time and all the lead times (since each month needs ~3 GB of memory) + if(start.month <= 9) {start.month.char <- paste0("0",as.character(start.month))} else {start.month.char <- start.month} + + fields2 <- gsub("\\$STORE_FREQ\\$","daily",fields) + fields3 <- gsub("\\$VAR_NAME\\$",psl,fields2) + + if(missing.forecasts == TRUE){ + years <- c() + for(y in year.start:year.end){ + fields4 <- gsub("\\$START_DATE\\$",paste0(y, start.month.char,'01'),fields3) + + char.lead <- nchar(system(paste0("ncks -m ",fields4,"| grep -E -i 'psl size' | cut -d '*' -f1"), intern=T)) + # beware, in this way it can only take into account leadtimes from 100 to 999, but it is the only way in the SMP machine: + #num.lead <- as.integer(substr(system(paste0("ncks -m ",fields4,"| grep -E -i 'psl size' | cut -d '*' -f1"), intern=T),char.lead-3,char.lead)) + num.lead <- as.integer(system(paste0("ncks -m ",fields4,"| grep -E -i 'psl size' | cut -d '=' -f2 | cut -d '*' -f1"), intern=T)) # don't work well on the SMP + num.memb <- as.integer(system(paste0("ncks -m ",fields4,"| grep -E -i 'psl size' | cut -d '=' -f2 | cut -d '*' -f2"), intern=T)) + num.lat <- as.integer(system(paste0("ncks -m ",fields4,"| grep -E -i 'psl size' | cut -d '=' -f2 | cut -d '*' -f3"), intern=T)) + num.lon <- as.integer(system(paste0("ncks -m ",fields4,"| grep -E -i 'psl size' | cut -d '=' -f2 | cut -d '*' -f4"), intern=T)) + + #if(num.lead == n.leadtimes && num.memb >= n.members && num.lat == 181 && num.lon == 360) years <- c(years,y) + if(num.lead == n.leadtimes && num.memb >= n.members) years <- c(years,y) + } + + } else { + years <- year.start:year.end + } + + n.years.full <- length(years) + + if(running.cluster == TRUE) { + + start.month1 <- ifelse(start.month == 1, 12, start.month-1) + if(start.month1 <= 9) {start.month1.char <- paste0("0",as.character(start.month1))} else {start.month1.char <- start.month1} + + start.month2.char <- start.month.char + + start.month3 <- ifelse(start.month == 12, 1, start.month+1) + if(start.month3 <= 9) {start.month3.char <- paste0("0",as.character(start.month3))} else {start.month3.char <- start.month3} + + # load three months of data, inserting all them in the third dimension of the array, one month at time: + psleuFull1 <- Load(var = psl, exp = list(list(path=fields)), obs = NULL, sdates=paste0(years, start.month1.char,'01'), dimnames=list(member=member.name), nmember=n.members, leadtimemax=n.leadtimes, storefreq='daily', output = 'lonlat', latmin = lat.min, latmax = lat.max, lonmin = lon.min, lonmax = lon.max, grid=my.grid, method='bilinear')$mod + gc() + + psleuFull2 <- Load(var = psl, exp = list(list(path=fields)), obs = NULL, sdates=paste0(years, start.month2.char,'01'), dimnames=list(member=member.name), nmember=n.members, leadtimemax=n.leadtimes, storefreq='daily', output = 'lonlat', latmin = lat.min, latmax = lat.max, lonmin = lon.min, lonmax = lon.max, grid=my.grid, method='bilinear')$mod + gc() + + psleuFull3 <- Load(var = psl, exp = list(list(path=fields)), obs = NULL, sdates=paste0(years, start.month3.char,'01'), dimnames=list(member=member.name), nmember=n.members, leadtimemax=n.leadtimes, storefreq='daily', output = 'lonlat', latmin = lat.min, latmax = lat.max, lonmin = lon.min, lonmax = lon.max, grid=my.grid, method='bilinear')$mod + gc() + + } else { + + psleuFull <- Load(var = psl, exp = list(list(path=fields)), obs = NULL, sdates=paste0(years, start.month.char,'01'), dimnames=list(member=member.name), nmember=n.members, leadtimemax=n.leadtimes, storefreq='daily', output = 'lonlat', latmin = lat.min, latmax = lat.max, lonmin = lon.min, lonmax = lon.max, grid=my.grid, method='bilinear', nprocs=1)$mod + + } + + if(LOESS == TRUE){ + # convert psl in daily anomalies with the LOESS filter: + cat("Calculating anomalies. Please wait...\n") + + if(running.cluster == TRUE) { + pslPeriodClim1 <- apply(psleuFull1, c(1,4,5,6), mean, na.rm=T) + pslPeriodClim2 <- apply(psleuFull2, c(1,4,5,6), mean, na.rm=T) + pslPeriodClim3 <- apply(psleuFull3, c(1,4,5,6), mean, na.rm=T) + + pslPeriodClimLoess1 <- pslPeriodClim1 + pslPeriodClimLoess2 <- pslPeriodClim2 + pslPeriodClimLoess3 <- pslPeriodClim3 + + for(i in 1:n.pos.lat){ + for(j in 1:n.pos.lon){ + my.data1 <- data.frame(ens.mean=pslPeriodClim1[1,,i,j], day=1:n.leadtimes) + my.loess1 <- loess(ens.mean ~ day, my.data1, span=0.35) + pslPeriodClimLoess1[1,,i,j] <- predict(my.loess1) + rm(my.data1, my.loess1) + gc() + + my.data2 <- data.frame(ens.mean=pslPeriodClim2[1,,i,j], day=1:n.leadtimes) + my.loess2 <- loess(ens.mean ~ day, my.data2, span=0.35) + pslPeriodClimLoess2[1,,i,j] <- predict(my.loess2) + rm(my.data2, my.loess2) + gc() + + my.data3 <- data.frame(ens.mean=pslPeriodClim3[1,,i,j], day=1:n.leadtimes) + my.loess3 <- loess(ens.mean ~ day, my.data3, span=0.35) + pslPeriodClimLoess3[1,,i,j] <- predict(my.loess3) + rm(my.data3, my.loess3) + gc() + } + } + + rm(pslPeriodClim1, pslPeriodClim2, pslPeriodClim3) + gc() + + pslPeriodClimDos1 <- InsertDim(InsertDim(pslPeriodClimLoess1, 2, n.years.full), 2, n.members) + pslPeriodClimDos2 <- InsertDim(InsertDim(pslPeriodClimLoess2, 2, n.years.full), 2, n.members) + pslPeriodClimDos3 <- InsertDim(InsertDim(pslPeriodClimLoess3, 2, n.years.full), 2, n.members) + + pslPeriodClimDos <- unname(abind(pslPeriodClimDos1, pslPeriodClimDos2, pslPeriodClimDos3, along=3)) + rm(pslPeriodClimDos1, pslPeriodClimDos2, pslPeriodClimDos3) + gc() + + psleuFull <- unname(abind(psleuFull1, psleuFull2, psleuFull3, along=3)) + rm(psleuFull1, psleuFull2, psleuFull3) + gc() + + } else { # in case of no running cluster: + + pslPeriodClim <- apply(psleuFull, c(1,4,5,6), mean, na.rm=T) + + pslPeriodClimLoess <- pslPeriodClim + for(i in n.pos.lat){ + for(j in n.pos.lon){ + my.data <- data.frame(ens.mean=pslPeriodClim[1,,i,j], day=1:n.leadtimes) + my.loess <- loess(ens.mean ~ day, my.data, span=0.35) + pslPeriodClimLoess[1,,i,j] <- predict(my.loess) + } + } + + rm(pslPeriodClim) + gc() + + pslPeriodClimDos <- InsertDim(InsertDim(pslPeriodClimLoess, 2, n.years.full), 2, n.members) + + } # close if on running cluster + + } else { #in case of no LOESS: + + if(running.cluster == TRUE) { + # in this case, the climatology is measured FOR EACH MONTH INDIPENDENTLY, instead of using a seasonal value: + pslPeriodClim1 <- apply(psleuFull1, c(1,4,5,6), mean, na.rm=T) + pslPeriodClim2 <- apply(psleuFull2, c(1,4,5,6), mean, na.rm=T) + pslPeriodClim3 <- apply(psleuFull3, c(1,4,5,6), mean, na.rm=T) + + pslPeriodClim <- unname(abind(pslPeriodClim1, pslPeriodClim2, pslPeriodClim3)) + + pslPeriodClimDos <- InsertDim(InsertDim(pslPeriodClim, 2, n.years.full), 2, n.members) + rm(pslPeriodClim) + gc() + + psleuFull <- unname(abind(psleuFull1, psleuFull2, psleuFull3, along=3)) + rm(psleuFull1, psleuFull2, psleuFull3) + gc() + + } else {# in case of no running cluster: + + pslPeriodClimDos <- InsertDim(InsertDim(pslPeriodClim, 2, n.years.full), 2, n.members) + rm(pslPeriodClim) + gc() + } + } + + ## s4.data <- data.frame(s4=pslPeriodClim[1,], day=1:216) + ## s4.loess <- loess(s4 ~ day, s4.data, span=0.35) + ## s4.pred <- predict(s4.loess) + + psleuFull <- psleuFull - pslPeriodClimDos + rm(pslPeriodClimDos) + gc() + +} # close if on fields.name=forecasts.name + +if(fields.name == ECMWF_monthly.name){ + # Load 6-hourly psl data in the forecast case: + psleuFull <-array(NA, c(n.sdates*n.years, n.leadtimes, n.pos.lat, n.pos.lon)) # daily order: 19940102 19950102 ... 20130102 19940109 19950109 ... 20130109 ... + n.leadtimes <- length(leadtime) + sdates <- weekly.seq(forecast.year,mes,day) + n.sdates <- length(sdates) + + for (startdate in 1:n.sdates){ + for(lday in leadtime){ + var <- Load(var = psl, exp = NULL, obs = list(list(path=fields)), paste0(year.start:year.end, substr(sdates[startdate],5,6), substr(sdates[startdate],7,8) ), storefreq = 'daily', leadtimemin = lday*4-3, leadtimemax = lday*4, output = 'lonlat', latmin = lat.min, latmax = lat.max, lonmin = lon.min, lonmax = lon.max) + + mean.lday <- apply(var$obs, c(3,5,6), mean, na.rm=T) + rm(var) + gc() + + psleuFull[(1+(startdate-1)*n.years):(startdate*n.years), lday-leadtime[1]+1,,] <- mean.lday + rm(mean.lday) + gc() + } + } +} + +if(psl=="g500") psleuFull <- psleuFull/9.81 # convert geopotential to geopotential height (in m) +if(psl=="psl") psleuFull <- psleuFull/100 # convert MSLP in Pascal to MSLP in hPa +gc() + +#save.image(file=paste0(workdir,"/weather_regimes_",fields.name,"_",year.start,"-",year.end,".RData")) +#load(file=paste0(workdir,"/weather_regimes_",fields.name,"_",year.start,"-",year.end,".RData")) + +#save.image("/scratch/Earth/ncortesi/RESILIENCE/Regimes/test.R") + +# compute the cluster analysis: +my.PCA <- list() +my.cluster <- list() +my.cluster.array <- list() +tot.variance <- list() +n.pcs <- my.cluster <- c() + +#WR.period=1 # for the debug +for(p in WR.period){ + # Select only the data of the month/season we want: + if(fields.name == rean.name){ + + #pslPeriod <- psleuFull[days.period[[p]],,] # select only days in the chosen period (i.e: winter) + if(running.cluster) { + if(subdaily){ + my.hours <- sort(c(pos.month.extended(1,p)*4-3, pos.month.extended(1,p)*4-2, pos.month.extended(1,p)*4-1, pos.month.extended(1,p)*4)) + if(p == 1) my.hours <- c(1337:1460,1:236) + if(p == 12) my.hours <- c(1217:1460,1:124) + + pslPeriod <- psleuFull[,my.hours,,] # select all days in the period of 3 months centered on the target month p + + } else { + pslPeriod <- psleuFull[1,1,,pos.month.extended(1,p),,] # select all days in the period of 3 months centered on the target month p + } + + } + + if(running.15) pslPeriod <- psleuFull[1,1,,pos.month.extended15(1,p),,] + + ## if there isn't any kind of running clustering: + if(!running.cluster && !running.15) { + + if(subdaily){ + my.hours <- sort(c(pos.month(1,p)*4-3, pos.month(1,p)*4-2, pos.month(1,p)*4-1, pos.month(1,p)*4)) + pslPeriod <- psleuFull[,my.hours,,] + } else { + pslPeriod <- psleuFull[1,1,,pos.period(1,p),,] # select only days in the chosen period (i.e: winter) + } + } + + # weight the pressure fields based on latitude (apply it to anomalies, not to absolute values!): + if(lat.weighting){ + lat.weighted <- cos(lat*pi/180)^0.5 + if(!running.cluster && !running.15) lat.weighted.array <- InsertDim(InsertDim(InsertDim(lat.weighted,2,n.pos.lon), 1, length(pos.period(2001,p))), 1, n.years) + if(running.cluster) lat.weighted.array <- InsertDim(InsertDim(InsertDim(lat.weighted,2,n.pos.lon), 1, length(pos.month.extended(2001,p))), 1, n.years) + if(running.15) lat.weighted.array <- InsertDim(InsertDim(InsertDim(lat.weighted,2,n.pos.lon), 1, length(pos.month.extended15(2001,p))), 1, n.years) + + pslPeriod <- pslPeriod * lat.weighted.array + + rm(lat.weighted.array) + gc() + } + + gc() + cat("Preformatting data for clustering. Please wait...\n") + psl.melted <- melt(pslPeriod[,,,, drop=FALSE], varnames=c("Year","Day","Lat","Lon")) + gc() + + cat("Preformatting data for clustering. Please wait......\n") + # this function eats much memory!!! + psl.kmeans <- unname(acast(psl.melted, Year + Day ~ Lat + Lon)) + #gc() + + + # select period data from psleuFull to use it as input of the cluster analysis: + #psl.kmeans <- pslPeriod + #dim(psl.kmeans) <- c(n.days.period[[p]], n.pos.lat*n.pos.lon) # convert array in a matrix! + #rm(pslPeriod, pslPeriod.weighted) + } + + # in case of S4 we don't need to select anything because we already loaded only 1 month of data! + if(fields.name == ECMWF_S4.name){ + if(running.cluster == TRUE && p < 13) { + # if you want to fully implement the running cluster of the monthly S4 data, you have to finish selecting automatically the 3-months running period + # generalizing the command below (which at present only work for the month of January an lead time 0): + + # Select DJF (lead time 0) for our case study, excluding 29 of february): + pslPeriod <- psleuFull[,,,1:90,,, drop=FALSE] + + } else { + + # select data only for the startmonth and leadmonth to study: + cat("Extracting data. Please wait...\n") + chosen.month <- start.month + lead.month # find which month we want to select data + if(chosen.month > 12) chosen.month <- chosen.month - 12 + + # remove the 29th of February to have the same n. of elements for all years + i=0 + for(y in years){ + i=i+1 + leadtime.min <- 1 + n.days.in.a.monthly.period(start.month, chosen.month, y) - n.days.in.a.month(chosen.month, y) + leadtime.max <- leadtime.min + n.days.in.a.month(chosen.month, y) - 1 + #leadtime.max <- 61 + + if (n.days.in.a.month(chosen.month, y) == 29) leadtime.max <- leadtime.max - 1 + int.leadtimes <- leadtime.min:leadtime.max + num.leadtimes <- leadtime.max - leadtime.min + 1 # number of days in the chosen month (different from 'n.leadtimes',which is the total number of days in the file!) + # by construction it is equal to: n.days.in.a.month(chosen.month, y) + + if(y == years[1]) { + pslPeriod <- psleuFull[,,1,int.leadtimes,,,drop=FALSE] + } else { + pslPeriod <- unname(abind(pslPeriod, psleuFull[,,i,int.leadtimes,,,drop=FALSE], along=3)) + } + } + + } + + # weight the pressure fields based on latitude (apply it to anomalies, not to absolute values!): + if(lat.weighting){ + lat.weighted <- cos(lat*pi/180)^0.5 + lat.weighted.array <- InsertDim(InsertDim(InsertDim(lat.weighted,2,n.pos.lon), 1, length(pos.month.extended(2001,p))), 1, n.years) + pslPeriod <- pslPeriod * lat.weighted.array ######### adapt!!!!!!!!!!! + rm(lat.weighted.array) + gc() + } + + # note that the data order in psl.kmeans[,X] is: year1day1, year1day2, ... , year1day31, year2day1, year2day2, ... , year2day28 + gc() + cat("Preformatting data. Please wait...\n") + psl.melted <- melt(pslPeriod[1,,,,,, drop=FALSE], varnames=c("Exp","Member","StartYear","LeadDay","Lat","Lon")) + gc() + + #save(psl.melted,file=paste0(workdir,"/psl_melted.RData")) + + cat("Preformatting data. Please wait......\n") + # This function eats much memory!!! + psl.kmeans <- unname(acast(psl.melted, Member + StartYear + LeadDay ~ Lat + Lon)) + #gc() + + #save(psl.kmeans,file=paste0(workdir,"/psl_kmeans.RData")) + #my.cluster <- kmeans(psl.kmeans, centers=4, iter.max=100, nstart=30, trace=FALSE) + #save(my.cluster,file=paste0(workdir,"/my_cluster.RData")) + + + #psl.kmeans <- array(NA,c(dim(pslPeriod))) + #n.rows <- nrow(psl.melted) + #a <- psl.melted$Member + #b <- psl.melted$StartYear + #c <- psl.melted$LeadDay + #d <- psl.melted$Lat + #e <- psl.melted$Lon + #f <- psl.melted$value + + # this function to convert a data.frame in an array is much less memory-eater than acast but a bit slower: + #for(i in 1:n.rows) psl.kmeans[1,a[i],b[i],c[i],d[i],e[i]] <- f[i] + gc() + #rm(pslPeriod); gc() + } + + if(fields.name == ECMWF_monthly.name){ + my.startdates.period <- months.period(forecast.year,mes,day,p) # get which startdates belong to the chosen period (remember that there are no bisestile day left!) + + days.period <- list() # get which days inside psleuFull belong to the chosen period + for(startdate in my.startdates.period){ + days.period[[p]] <- c(days.period[[p]], (1+(startdate-1)*n.years):(startdate*n.years)) + } + + # in winter you must remove the 2nd startdate (20140109) because its data is bad, as you can see with: plot(psl.kmeans[,1:2]) + # if(fields.name == forecast.name && period == 13) psl.kmeans[21:40,] <- NA + } + + + # compute the clustering: + cat("Clustering data. Please wait...\n") + if(PCA){ + my.seq <- seq(1, n.pos.lat*n.pos.lon, 16) # select only 1 point of 9 + pslcut <- psl.kmeans[,my.seq] + + my.PCA <- princomp(pslcut,cor=FALSE) + tot.variance <- head(cumsum(my.PCA$sdev^2/sum(my.PCA$sdev^2)),50) # check how many PCAs to keep basing on the sum of their expl. variance + n.pcs <- head(as.numeric(which(tot.variance > variance.explained)),1) # select only the pcs that explains at least 80% of variance + my.cluster <- kmeans(my.PCA$scores[,1:n.pcs], centers=4, iter.max=100, nstart=30) # 4 is the number of clusters + rm(pslcut) + } else { # 4 is the number of clusters: + + my.cluster <- kmeans(psl.kmeans, centers=4, iter.max=100, nstart=30, trace=FALSE) + + gc() + + #save(my.cluster, file=paste0(workdir,"/",fields.name,"_",my.period[p],"_leadmonth_",lead.month,"_series.RData")) + + #if(fields.name == rean.name){ + #my.cluster[[p]] <- kmeans(psl.kmeans[which(!is.na(psl.kmeans[,1])),], centers=4, iter.max=100, nstart=30, trace=FALSE) + # save the time series output of the cluster analysis: + #save(my.cluster, my.cluster.array, my.PCA, tot.variance, n.pcs, file=paste0(workdir,"/",fields.name,"_",my.period[p],"_series.RData")) + #} + + if(fields.name == ECMWF_S4.name) { + my.cluster.array <- array(my.cluster$cluster, c(num.leadtimes, n.years.full, n.members)) # in reverse order compared to the definition of psl.kmeans + + # save the time series output of the cluster analysis: + #save(my.cluster, my.cluster.array, my.PCA, tot.variance, n.pcs, file=paste0(workdir,"/",fields.name,"_",my.period[p],"_leadmonth_",lead.month,"_series.RData")) + + #plot(SMA(my.cluster$centers[1,],201),type="l",col="gold",ylim=c(-20,20));lines(SMA(my.cluster$centers[2,],201),type="l",col="red"); lines(SMA(my.cluster$centers[3,],201),type="l",col="green");lines(SMA(my.cluster$centers[4,],201),type="l",col="blue");lines(SMA(psl.kmeans[29,],201),type="l",col="gray");lines(rep(0,14410),type="l",col="black",lty=2) + } + } + + rm(psl.kmeans) + gc() + +#} # close for on 'p' + +#save.image(file=paste0(workdir,"/weather_regimes_",fields.name,"_",year.start,"-",year.end,".RData")) +#load(file=paste0(workdir,"/weather_regimes_",fields.name,"_",year.start,"-",year.end,".RData")) + +#my.grid<-paste0('r',n.lon,'x',n.lat) +#coord <- Load(var = 'sfcWind', exp = list(ECMWF_monthly), obs = NULL, sdates=paste0(2013,'0102'), leadtimemin = 1, leadtimemax=1, output = 'lonlat', nprocs=1, grid=my.grid, method='bilinear') + +# measure regime anomalies: + +#for(p in WR.period){ # 1-12: Jan-Dec; 13-16: Winter-Spring-Summer-Autumn; 17: Year + + if(fields.name == rean.name){ + + if(running.cluster && p < 13){ + # select only the days inside the 3-months cluster series that belong only to the target month p: + n.days.period <- length(pos.month.extended(1,p)) # total number of days in the three months + + days.month <- days.month.new <- days.month.full <- c() + days.month <- which(pos.month.extended(1,p) == pos.month(1,p)[1]) - 1 + 1:length(pos.month(1,p)) + + for(y in 1:n.years){ + days.month.new <- days.month + n.days.period*(y-1) + days.month.full <- c(days.month.full, days.month.new) + #print(days.month.new) + #print(days.month) + } + + # in this case, we are not selecting days but 6-hourly intervals: + if(subdaily) days.month.full <- sort(c(days.month.full*4, days.month.full*4+1, days.month.full*4+2, days.month.full*4+3)) + + # select only the days of the cluster series inside the target month p: + cluster.sequence <- my.cluster$cluster[days.month.full] + + } + + + if(running.15 && p < 13){ + # select only the days inside the 3-months cluster series that belong only to the target month p: + n.days.period <- length(pos.month.extended15(1,p)) # total number of days in the three months + + days.month <- days.month.new <- days.month.full <- c() + days.month <- which(pos.month.extended15(1,p) == pos.month(1,p)[1]) - 1 + 1:length(pos.month(1,p)) + + for(y in 1:n.years){ + days.month.new <- days.month + n.days.period*(y-1) + days.month.full <- c(days.month.full, days.month.new) + #print(days.month.new) + #print(days.month) + } + + # select only the days of the cluster series inside the target month p: + cluster.sequence <- my.cluster$cluster[days.month.full] + + } + + if(!running.cluster && !running.15) cluster.sequence <- my.cluster$cluster + + # convert cluster sequence from 6-hourly to daily: + if(subdaily){ + type <- c() + + for(day in 1:(length(cluster.sequence)/4)){ + #day <- ceiling(hour/4) + hourly <- cluster.sequence[(1+(day-1)*4):(4+(day-1)*4)] + + t1 <- length(which(hourly == 1)) + t2 <- length(which(hourly == 2)) + t3 <- length(which(hourly == 3)) + t4 <- length(which(hourly == 4)) + tt <- c(t1,t2,t3,t4) + + # if all the 4 time steps belong to the same regime, assign it to this day: + if(length(unique(hourly)) == 1) type[day] <- hourly[1] + + # if there are two different regimes, check if one has a higher frequency: + if(length(unique(hourly)) == 2){ + if(any(tt == 3)){ # if 3 of the 4 time intervals belong to the same weather regime, assign this day to it + type[day] <- which(tt == 3) + } else { # in this case both regimes occur in 2 of the 4 time steps; arbitrary assign the regime occurring at 12.00 of that day + type[day] <- hourly[3] + } + } + + # if there are three different regimes, assign it to the only possible regime with 2 time steps in that day: + if(length(unique(hourly)) == 3) type[day] <- which(tt == 2) + + # if there are four different regimes (a very rare event!), assign it to the regime occurring at 12.00 of that day: + if(length(unique(hourly)) == 3) type[day] <- hourly[3] + + } # close for on day + + } # close for on subdaily + + + if(sequences){ # it works only for rean data!!! + # for each of the 4 clusters, remove the days not belonging to sequences of at least 5 days: + # warning: first 4 days and last 4 days are not take into account y the algorithm and are treated as not belonging to any sequence (sequ=FALSE) + wrdiff <- diff(my.cluster$cluster) + + sequ <- rep(FALSE, n.days.period[[p]]) + for(i in 5:(n.days.period[[p]]-5)){ + sequ[i] <- TRUE + if(wrdiff[i] != 0 & wrdiff[i+1] != 0) sequ[i] = FALSE + if(wrdiff[i] != 0 & wrdiff[i+2] != 0) sequ[i] = FALSE + if(wrdiff[i] != 0 & wrdiff[i+3] != 0) sequ[i] = FALSE + if(wrdiff[i] != 0 & wrdiff[i+4] != 0) sequ[i] = FALSE + if(wrdiff[i-1] != 0 & wrdiff[i] != 0) sequ[i] = FALSE + if(wrdiff[i-1] != 0 & wrdiff[i+1] != 0) sequ[i] = FALSE + if(wrdiff[i-1] != 0 & wrdiff[i+2] != 0) sequ[i] = FALSE + if(wrdiff[i-1] != 0 & wrdiff[i+3] != 0) sequ[i] = FALSE + if(wrdiff[i-2] != 0 & wrdiff[i] != 0) sequ[i] = FALSE + if(wrdiff[i-2] != 0 & wrdiff[i+1] != 0) sequ[i] = FALSE + if(wrdiff[i-2] != 0 & wrdiff[i+2] != 0) sequ[i] = FALSE + if(wrdiff[i-3] != 0 & wrdiff[i] != 0) sequ[i] = FALSE + if(wrdiff[i-3] != 0 & wrdiff[i+1] != 0) sequ[i] = FALSE + if(wrdiff[i-4] != 0 & wrdiff[i] != 0) sequ[i] = FALSE + } # close for loop on i + + # sequence of daily cluster numbers without the days that doesn't belong to sequences of 5 or more days: + cluster.sequence <- my.cluster$cluster * sequ + rm(wrdiff, sequ) + } + + + # Replace the days belonging to a regime with the days belonging to a sequence of at least 5 days of that regime: + wr1 <- which(cluster.sequence == 1) + wr2 <- which(cluster.sequence == 2) + wr3 <- which(cluster.sequence == 3) + wr4 <- which(cluster.sequence == 4) + + # yearly frequency of each regime: + wr1y <- wr2y <- wr3y <-wr4y <- c() + + mod.subdaily <- ifelse(subdaily,4,1) + np <- n.days.in.a.period(p,1)*mod.subdaily + + for(y in year.start:year.end){ + # for both reanalysis and S4, the time order is always: year1day1, year1day2, ..., year1day31, year2day1, year2day2, ..., year2day28, etc, + wr1y[y-year.start+1] <- length(which(cluster.sequence[(y-year.start)*np + (1:np)] == 1)) + wr2y[y-year.start+1] <- length(which(cluster.sequence[(y-year.start)*np + (1:np)] == 2)) + wr3y[y-year.start+1] <- length(which(cluster.sequence[(y-year.start)*np + (1:np)] == 3)) + wr4y[y-year.start+1] <- length(which(cluster.sequence[(y-year.start)*np + (1:np)] == 4)) + } + + # convert to frequencies in %: + wr1y <- wr1y/np + wr2y <- wr2y/np + wr3y <- wr3y/np + wr4y <- wr4y/np + + gc() + + + # measure regime anomalies: + pslmat <- unname(acast(psl.melted, Year + Day ~ Lat ~ Lon)) + + if(running.cluster || running.15){ + pslmat.new <- pslmat + pslmat <- pslmat.new[days.month.full,,] + rm(pslmat.new) + gc() + } + + + pslwr1 <- pslmat[wr1,,, drop=F] + pslwr2 <- pslmat[wr2,,, drop=F] + pslwr3 <- pslmat[wr3,,, drop=F] + pslwr4 <- pslmat[wr4,,, drop=F] + + # regime structure: + pslwr1mean <- apply(pslwr1, c(2,3), mean, na.rm=T) + pslwr2mean <- apply(pslwr2, c(2,3), mean, na.rm=T) + pslwr3mean <- apply(pslwr3, c(2,3), mean, na.rm=T) + pslwr4mean <- apply(pslwr4, c(2,3), mean, na.rm=T) + + # spatial mean for each day to save for the measure of the FairRPSS: + pslwr1spmean <- apply(pslwr1, 1, mean, na.rm=T) + pslwr2spmean <- apply(pslwr2, 1, mean, na.rm=T) + pslwr3spmean <- apply(pslwr3, 1, mean, na.rm=T) + pslwr4spmean <- apply(pslwr4, 1, mean, na.rm=T) + + + # save all the data necessary to redraw the graphs when we know the right regime: + save(my.cluster, cluster.sequence, lon, lon.max, lon.min, lat, lat.max, lat.min, pos.lat, pos.lon, n.pos.lat, n.pos.lon, psl, psl.name, my.period, p, workdir, rean.name, fields.name, year.start, year.end, n.years, WR.period, pslwr1mean, pslwr2mean, pslwr3mean, pslwr4mean, pslwr1spmean, pslwr2spmean, pslwr3spmean, pslwr4spmean, wr1y,wr2y,wr3y,wr4y,wr1,wr2,wr3,wr4, LOESS, running.cluster, running.15, lat.weighting, file=paste0(workdir,"/",fields.name,"_",my.period[p],"_psl.RData")) + + ## immediatly save the plots of the ERA-Interim monthly regime anomalies with the running cluster instead than loading them in the next script: + EU <- c(1:which(lon >= lon.max)[1], (which(lon >= 337)[1]:length(lon))) # restrict area to continental europe only + + if(psl == "g500"){ + my.brks <- c(-300,seq(-200,200,10),300) # % Mean anomaly of a WR + my.brks2 <- c(-300,seq(-200,200,10),300) # % Mean anomaly of a WR for the contour lines + values.to.plot <- c(-200, -100, 0, 100, 200) # values we want to show in the labels + } + + if(psl == "psl"){ + my.brks <- c(seq(-19,-1,2),0,seq(1,19,2)) # % Mean anomaly of a WR + my.brks2 <- my.brks #c(seq(-20,20,2)) # % Mean anomaly of a WR for the contour lines + values.to.plot <- my.brks #c(-17,-15,-13,-11,-9,-7,-5,-3,-1,0,1,3,5,7,9,11,13,15,17) + } + my.cols <- colorRampPalette(rev(brewer.pal(11,"RdBu")))(length(my.brks)-1) # blue--white--red colors + my.cols[floor(length(my.cols)/2)] <- my.cols[floor(length(my.cols)/2)+1] <- "white" + + map1 <- pslwr1mean + map2 <- pslwr2mean + map3 <- pslwr3mean + map4 <- pslwr4mean + png(filename=paste0(workdir,"/",fields.name,"_",my.period[p],"_cluster1.png"),width=1000,height=1200) + PlotEquiMap2(rescale(map1,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map1), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, xlabel.dist=1.5, contours.lty="F1FF1F") + dev.off() + png(filename=paste0(workdir,"/",fields.name,"_",my.period[p],"_cluster2.png"),width=1000,height=1200) + PlotEquiMap2(rescale(map2,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map2), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F") + dev.off() + png(filename=paste0(workdir,"/",fields.name,"_",my.period[p],"_cluster3.png"),width=1000,height=1200) + PlotEquiMap2(rescale(map3,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map3), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F") + dev.off() + png(filename=paste0(workdir,"/",fields.name,"_",my.period[p],"_cluster4.png"),width=1000,height=1200) + PlotEquiMap2(rescale(map4,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map4), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F") + dev.off() + + rm(pslwr1mean,pslwr2mean,pslwr3mean,pslwr4mean) + rm(pslwr1,pslwr2,pslwr3,pslwr4) + gc() + + } + + if(fields.name == ECMWF_S4.name){ + cat("Computing regime anomalies and frequencies. Please wait...\n") + + #load(file=paste0(workdir,"/",fields.name,"_",my.period[p],"_leadmonth_",lead.month,"_ClusterData.RData")) + + cluster.sequence <- my.cluster$cluster #as.vector(my.cluster.array) + + wr1 <- which(cluster.sequence == 1) + wr2 <- which(cluster.sequence == 2) + wr3 <- which(cluster.sequence == 3) + wr4 <- which(cluster.sequence == 4) + + wr1y <- apply(my.cluster.array == 1, 2, sum) + wr2y <- apply(my.cluster.array == 2, 2, sum) + wr3y <- apply(my.cluster.array == 3, 2, sum) + wr4y <- apply(my.cluster.array == 4, 2, sum) + + # convert to frequencies in %: + wr1y <- wr1y/(num.leadtimes*n.members) # in this case, n.leadtimes is the number of days of one month of the cluser time series + wr2y <- wr2y/(num.leadtimes*n.members) + wr3y <- wr3y/(num.leadtimes*n.members) + wr4y <- wr4y/(num.leadtimes*n.members) + + # same as above, but to measure the maximum frequence between all the members: + wr1yMax <- apply(apply(my.cluster.array == 1, c(2,3), sum),1,max) + wr2yMax <- apply(apply(my.cluster.array == 2, c(2,3), sum),1,max) + wr3yMax <- apply(apply(my.cluster.array == 3, c(2,3), sum),1,max) + wr4yMax <- apply(apply(my.cluster.array == 4, c(2,3), sum),1,max) + + wr1yMax <- wr1yMax/num.leadtimes + wr2yMax <- wr2yMax/num.leadtimes + wr3yMax <- wr3yMax/num.leadtimes + wr4yMax <- wr4yMax/num.leadtimes + + wr1yMin <- apply(apply(my.cluster.array == 1, c(2,3), sum),1,min) + wr2yMin <- apply(apply(my.cluster.array == 2, c(2,3), sum),1,min) + wr3yMin <- apply(apply(my.cluster.array == 3, c(2,3), sum),1,min) + wr4yMin <- apply(apply(my.cluster.array == 4, c(2,3), sum),1,min) + + wr1yMin <- wr1yMin/num.leadtimes + wr2yMin <- wr2yMin/num.leadtimes + wr3yMin <- wr3yMin/num.leadtimes + wr4yMin <- wr4yMin/num.leadtimes + + cat("Computing regime anomalies and frequencies. Please wait......\n") + + # in this case, must use psl.melted instead of psleuFull. Both are already in anomalies: + # (psl.melted depends on the startdate and leadtime, psleuFull no) + gc() + pslmat <- unname(acast(psl.melted, Member + StartYear + LeadDay ~ Lat ~ Lon)) + #pslmat <- unname(acast(melt(pslPeriod[1,1:2,,,,, drop=FALSE],varnames=c("Exp","Member","StartYear","LeadDay","Lat","Lon")), Member ~ StartYear + LeadDay ~ Lat ~ Lon)) + gc() + + #for(a in 1:2){ + # for(b in 1:3){ + # for(c in 1:4){ + # pslmat[1, a + (b-1)*a + (c-1)*a*b,,] <- pslPeriod[1,a,b,c,,] + # } + # } + #} + + pslwr1 <- pslmat[wr1,,, drop=F] + pslwr2 <- pslmat[wr2,,, drop=F] + pslwr3 <- pslmat[wr3,,, drop=F] + pslwr4 <- pslmat[wr4,,, drop=F] + + pslwr1mean <- apply(pslwr1, c(2,3), mean, na.rm=T) + pslwr2mean <- apply(pslwr2, c(2,3), mean, na.rm=T) + pslwr3mean <- apply(pslwr3, c(2,3), mean, na.rm=T) + pslwr4mean <- apply(pslwr4, c(2,3), mean, na.rm=T) + + # spatial mean for each day to save for the meaure of the FairRPSS: + pslwr1spmean <- apply(pslwr1, 1, mean, na.rm=T) + pslwr2spmean <- apply(pslwr2, 1, mean, na.rm=T) + pslwr3spmean <- apply(pslwr3, 1, mean, na.rm=T) + pslwr4spmean <- apply(pslwr4, 1, mean, na.rm=T) + + rm(pslmat) + gc() + + # save all the data necessary to draw the graphs (but not the impact maps) + save(my.cluster, cluster.sequence, my.cluster.array, lon, lon.max, lat, pos.lat, pos.lon, n.pos.lat, n.pos.lon, psl, psl.name, my.period, p, workdir,rean.name,fields.name, year.start,year.end, years, n.years, n.years.full, WR.period, pslwr1mean, pslwr2mean, pslwr3mean, pslwr4mean, pslwr1spmean, pslwr2spmean, pslwr3spmean, pslwr4spmean, wr1y, wr2y, wr3y, wr4y, wr1yMax, wr2yMax, wr3yMax, wr4yMax, wr1yMin, wr2yMin, wr3yMin, wr4yMin, wr1, wr2, wr3, wr4, n.leadtimes, num.leadtimes, LOESS, running.cluster, running.15, lat.weighting, file=paste0(workdir,"/",fields.name,"_",my.period[p],"_leadmonth_",lead.month,"_psl.RData")) + + rm(pslwr1mean,pslwr2mean,pslwr3mean,pslwr4mean) + rm(pslwr1,pslwr2,pslwr3,pslwr4) + gc() + + } + + # for the monthly system, the time order is always: year1day1, year2day1, ... , yearNday1, year1day2, year2day2, ..., yearNday2, etc.: + if(fields.name == ECMWF_monthly.name) { + if(fields.name == ECMWF_monthly.name){ + my.startdates.period <- months.period(forecast.year,mes,day,p) + + #if(p == 13) my.startdates.period <- my.startdates.period[-2] # remove the second startdate because it is broken + + days.period <- period.length <- list() + for(startdate in my.startdates.period){ + days.period[[p]] <- c(days.period[[p]], (1+(startdate-1)*(year.end-year.start+1)):(startdate*(year.end-year.start+1))) + } + period.length[[p]] <- length(my.startdates.period) # number of Thusdays in the period + } + + wr1y <- wr2y <- wr3y <-wr4y <- c() + wr1y[y-year.start+1] <- length(which(cluster.sequence[seq((y-year.start+1),length(cluster.sequence), n.years)] == 1)) + wr2y[y-year.start+1] <- length(which(cluster.sequence[seq((y-year.start+1),length(cluster.sequence), n.years)] == 2)) + wr3y[y-year.start+1] <- length(which(cluster.sequence[seq((y-year.start+1),length(cluster.sequence), n.years)] == 3)) + wr4y[y-year.start+1] <- length(which(cluster.sequence[seq((y-year.start+1),length(cluster.sequence), n.years)] == 4)) + } + + cat("Finished!\n") +} # close the for loop on 'p' + + + + + + + + +teleconextion <- FALSE +if(teleconexion){ + + ## bypass the years setted in the script header: + year.start <- 1981 + year.end <- 2015 #2016 + + n.years <- year.end - year.start + 1 + + ## load monthly psl: + pslFull <- Load(var = psl, exp = NULL, obs = list(list(path=fields)), paste0(year.start:year.end,'0101'), storefreq = 'monthly', leadtimemax = 12, output = 'lonlat', latmin = lat.min, latmax = lat.max, lonmin = lon.min, lonmax = lon.max, nprocs=1) + + if(psl == "psl") pslFull$obs <- pslFull$obs/100 # convert MSLP in Pascal to MSLP in hPa + + pslFullClim <- apply(pslFull$obs, c(1,2,4,5,6), mean, na.rm=T) + + pslFullSd <- apply(pslFull$obs, c(1,2,4,5,6), sd, na.rm=T) + + pslFullClim2 <- InsertDim(pslFullClim,3,n.years) + pslFullSd2 <- InsertDim(pslFullSd,3,n.years) + + ## monthly standarized anomalies: + pslAnom <- ( pslFull$obs - pslFullClim2) / pslFullSd2 + pslAnom <- pslAnom[1,1,,,,] + + ##pslAnom.melted <- melt(pslAnom[,,,, drop=FALSE], varnames=c("Year","Month","Lat","Lon")) + ##pslAnom2 <- unname(acast(pslAnom.melted, Year + Month ~ Lat ~ Lon)) # order: year1 month1, year1 month2, etc + gc() + + ## load monthly time series of teleconnection indices: + NAO <- read.table(file="/esarchive/scratch/ncortesi/BSC/RESILIENCE/Regimes/series_teleconnexion_CPC/NAO.txt", header=FALSE) + EA <- read.table(file="/esarchive/scratch/ncortesi/BSC/RESILIENCE/Regimes/series_teleconnexion_CPC/EA.txt", header=FALSE) + EAWR <- read.table(file="/esarchive/scratch/ncortesi/BSC/RESILIENCE/Regimes/series_teleconnexion_CPC/EAWR.txt", header=FALSE) + SCAND <- read.table(file="/esarchive/scratch/ncortesi/BSC/RESILIENCE/Regimes/series_teleconnexion_CPC/SCAND.txt", header=FALSE) + + names(NAO) <- names(EA) <- names(EAWR) <- names(SCAND) <- c("year","month","index") + + # select only teleconnection in the chosen years: + row.start1 <- which(NAO[,1] == year.start)[1] + row.end1 <- which(EA[,1] == year.end)[l(which(EA[,1] == year.end))] + row.start2 <- which(EA[,1] == year.start)[1] + row.end2 <- which(EA[,1] == year.end)[l(which(EA[,1] == year.end))] + row.start3 <- which(EAWR[,1] == year.start)[1] + row.end3 <- which(EAWR[,1] == year.end)[l(which(EAWR[,1] == year.end))] + row.start4 <- which(SCAND[,1] == year.start)[1] + row.end4 <- which(SCAND[,1] == year.end)[l(which(SCAND[,1] == year.end))] + + NAOy <- NAO[row.start1:row.end1,] + EAy <- EA[row.start2:row.end2,] + EAWRy <- EAWR[row.start3:row.end3,] + SCANDy <- SCAND[row.start4:row.end4,] + + ## month <- 1:12 + ## for(m in month){ + ## # select only teleconnection data in month m: + ## rows <- which(NAOy[,2] == m) + + ## NAOm <- NAOy[rows,] + ## EAm <- EAy[rows,] + ## EAWRm <- EAWRy[rows,] + ## SCANDm <- SCANDy[rows,] + ## } + + NAO.map <- EA.map <- EAWR.map <- SCAND.map <- NAO.map.corr <- EA.map.corr <- EAWR.map.corr <- SCAND.map.corr <- array(NA, c(12,dim(pslAnom)[3:4])) + + ## create teleconnection maps: + + month <- 1:12 + for(m in month){ + #NAO.map[m,,] <- apply(pslAnom[,m,,], c(2,3), function(x) mean(x * NAOy[which(NAOy[,2] == m),3])) + #EA.map[m,,] <- apply(pslAnom[,m,,], c(2,3), function(x) mean(x * EAy[which(NAOy[,2] == m),3])) + #EAWR.map[m,,] <- apply(pslAnom[,m,,], c(2,3), function(x) mean(x * EAWRy[which(EAWRy[,2] == m),3])) + #SCAND.map[m,,] <- apply(pslAnom[,m,,], c(2,3), function(x) mean(x * SCANDy[which(SCANDy[,2] == m),3])) + + NAO.map.corr[m,,] <- apply(pslAnom[,m,,], c(2,3), function(x) cor(x, NAOy[which(NAOy[,2] == m),3], use = "na.or.complete")) + EA.map.corr[m,,] <- apply(pslAnom[,m,,], c(2,3), function(x) cor(x, EAy[which(NAOy[,2] == m),3], use = "na.or.complete")) + EAWR.map.corr[m,,] <- apply(pslAnom[,m,,], c(2,3), function(x) cor(x, EAWRy[which(EAWRy[,2] == m),3], use = "na.or.complete")) + SCAND.map.corr[m,,] <- apply(pslAnom[,m,,], c(2,3), function(x) cor(x, SCANDy[which(SCANDy[,2] == m),3], use = "na.or.complete")) + } + + ## NAO.map <- apply(pslAnom2, c(2,3), function(x) mean(x*NAOy[,3])) + ## NAO.map.corr <- apply(pslAnom2, c(2,3), function(x) cor(x,NAOy[,3])) + + #my.brks <- c(-100,seq(-1,1,0.1),100) + #my.cols <- colorRampPalette(rev(brewer.pal(11,"RdBu")))(length(my.brks)-1) # blue--red colors + #my.cols[floor(length(my.cols)/2)] <- my.cols[floor(length(my.cols)/2)+1] <- "white" ## add white in the middle + + my.brks.corr <- c(-1, -0.6, -0.45, -0.3, -0.15, 0, 0.15, 0.30, 0.45, 0.60, 1) #seq(-1,1,0.1) + my.cols.corr <- colorRampPalette(rev(brewer.pal(11,"RdBu")))(length(my.brks.corr)-1) # blue--red colors + my.cols.corr[floor(length(my.cols.corr)/2)] <- my.cols.corr[floor(length(my.cols.corr)/2)+1] <- "white" ## add white in the middle + + fileoutput <- paste0(workdir,"/",fields.name,"_teleconexions.png") + + png(filename=fileoutput,width=15000,height=3700) + + plot.new() + + n.map <- 0 + month <- c(9:12, 1:8) + for(m in month){ + n.map <- n.map+1 + y1 <- 0.10 + y3 <- 0.315 + y5 <- 0.53 + y7 <- 0.745 + y.width <- 0.18 + + y2 <- y1 + y.width; y4 <- y3 + y.width; y6 <- y5 + y.width; y8 <- y7 + y.width + yt1 <- y2+0.003; yt3 <- y4+0.003; yt5 <- y6+0.003; yt7 <- y8+0.003 + yt2 <- yt1 + 0.004; yt4 <- yt3 + 0.005; yt6 <- yt5 + 0.005; yt8 <- yt7 + 0.005 + + ## ## Teleconnection maps: + ## map.xpos <- 0.00 + ## map.width <- 0.46 + ## par(fig=c(map.xpos + 0.003, map.xpos + map.width - 0.003, y7, y8), new=TRUE) + ## PlotEquiMap2(NAO.map[m,,], lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(NAO.map[m,,]), brks2=my.brks, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, xlabel.dist=1.5, contours.lty="F1FF1F", continents.col="black") + ## par(fig=c(map.xpos, map.xpos + map.width, y5, y6), new=TRUE) + ## PlotEquiMap2(EA.map[m,,], lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(EA.map[m,,]), brks2=my.brks, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, xlabel.dist=1.5, contours.lty="F1FF1F", continents.col="black") + ## par(fig=c(map.xpos, map.xpos + map.width, y3, y4), new=TRUE) + ## PlotEquiMap2(EAWR.map[m,,], lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(EAWR.map[m,,]), brks2=my.brks, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, xlabel.dist=1.5, contours.lty="F1FF1F", continents.col="black") + ## par(fig=c(map.xpos, map.xpos + map.width, y1, y2), new=TRUE) + ## PlotEquiMap2(SCAND.map[m,,], lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(SCAND.map[m,,]), brks2=my.brks, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, xlabel.dist=1.5, contours.lty="F1FF1F", continents.col="black") + + ## Correlation maps: + + map.width <- 0.08 + map.xpos <- 0.03 + map.width * (n.map-1) + par(fig=c(map.xpos + 0.003, map.xpos + map.width - 0.003, y7, y8), new=TRUE) + PlotEquiMap2(NAO.map.corr[m,,], lon, lat, filled.continents=FALSE, brks=my.brks.corr, cols=my.cols.corr, sizetit=1.2, contours=t(NAO.map.corr[m,,]), brks2=my.brks.corr, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, xlabel.dist=1.5, contours.lty="F1FF1F", continents.col="black") + par(fig=c(map.xpos, map.xpos + map.width, y5, y6), new=TRUE) + PlotEquiMap2(EA.map.corr[m,,], lon, lat, filled.continents=FALSE, brks=my.brks.corr, cols=my.cols.corr, sizetit=1.2, contours=t(EA.map.corr[m,,]), brks2=my.brks.corr, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, xlabel.dist=1.5, contours.lty="F1FF1F", continents.col="black") + par(fig=c(map.xpos, map.xpos + map.width, y3, y4), new=TRUE) + PlotEquiMap2(EAWR.map.corr[m,,], lon, lat, filled.continents=FALSE, brks=my.brks.corr, cols=my.cols.corr, sizetit=1.2, contours=t(EAWR.map.corr[m,,]), brks2=my.brks.corr, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, xlabel.dist=1.5, contours.lty="F1FF1F", continents.col="black") + par(fig=c(map.xpos, map.xpos + map.width, y1, y2), new=TRUE) + PlotEquiMap2(SCAND.map.corr[m,,], lon, lat, filled.continents=FALSE, brks=my.brks.corr, cols=my.cols.corr, sizetit=1.2, contours=t(SCAND.map.corr[m,,]), brks2=my.brks.corr, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, xlabel.dist=1.5, contours.lty="F1FF1F", continents.col="black") + + ## Title Centroid Maps: + title1.width <- 0.08 + title1.xpos <- 0.03 + (n.map-1) * title1.width + + par(fig=c(title1.xpos, title1.xpos + title1.width, 0.9, 0.95), new=TRUE) + mtext(my.period[m], font=2, cex=8) + + + } ## close for on month m + + ## Legend: + legend.xpos <- 0.3 + legend.width <- 0.4 + legend.cex <- 6 + + par(fig=c(legend.xpos, legend.xpos + legend.width, 0.035, 0.095), new=TRUE) + ColorBar(brks=round(my.brks.corr,2), cols=my.cols.corr, vert=FALSE, label_scale=6, subsample=1, triangle_ends=c(FALSE,FALSE)) + ##mtext(side=4,paste0(" ",var.unit[var.num]), cex=legend2.cex, las=1) + ##mtext(side=4,"cor", cex=legend2.cex, las=1) + + yt1 <- yt1 - 0.1 + yt3 <- yt3 - 0.1 + yt5 <- yt5 - 0.13 + yt7 <- yt7 - 0.14 + par(fig=c(0, 0.03, yt7+0.0025, yt7+0.0055), new=TRUE) + mtext("NAO", font=2, cex=10) + par(fig=c(0, 0.03, yt5+0.0025, yt5+0.0055), new=TRUE) + mtext("EA", font=2, cex=10) + par(fig=c(0, 0.03, yt3+0.0025, yt3+0.0055), new=TRUE) + mtext("EAWR", font=2, cex=10) + par(fig=c(0, 0.03, yt1+0.0025, yt1+0.0055), new=TRUE) + mtext("SCAND", font=2, cex=10) + + dev.off() + +} # close if on teleconexion + + + + + + + + + + + + + + + + + + + diff --git a/weather_regimes_v43.R~ b/weather_regimes_v43.R~ new file mode 100644 index 0000000000000000000000000000000000000000..2e5cc133c565d9d6e44f7c2d1959e66af0b9f4d4 --- /dev/null +++ b/weather_regimes_v43.R~ @@ -0,0 +1,1344 @@ + +# Creation: 6/2016 +# Author: Nicola Cortesi +# Aim: To compute the four Euro-Atlantic weather regimes from a reanalysis or from a forecast system +# +# I/O: input must be in a format compatible with Load(); 1 output .RData file is created in the 'workdir' folder. +# You can also run this script from terminal with: Rscript ~/scripts/weather_regimes_maps.R +# +# Assumption: its output are needed by the scripts weather_regimes_impact.R and weather_regimes.R +# +# Branch: weather_regimes + +rm(list=ls()) +library(s2dverification) # for the function Load() +library(abind) +library(reshape2) # after each update of s2dverification, it's better to remove and install this package again because sometimes it gets broken! + +SMP <- FALSE # if TRUE, the script is assumed to run on the SMP Machine +if(SMP){ + source('/gpfs/projects/bsc32/bsc32842/Rfunctions.R') + workdir <- "/gpfs/projects/bsc32/bsc32842/RESILIENCE" # working dir where output files will be generated +} else { + source('/home/Earth/ncortesi/scripts/Rfunctions.R') + workdir <- "/scratch/Earth/ncortesi/RESILIENCE/Regimes" # working dir where output files will be generated +} + +# module load NCO # : load it from the terminal before running this script interactively in the SMP machine, if you didn't load it in the .bashrc + +f6h <- TRUE # set if suffix "_f6h" is present in the file name or not (set it always to TRUE except if rean = JRA55 and psl = "z500") + +# available reanalysis for the geopotential (g500) or the geopotential height (z500 = g500/9.81) and for var data: +ERAint <- '/esnas/recon/ecmwf/erainterim/$STORE_FREQ$_mean/$VAR_NAME$_f6h/$VAR_NAME$_$YEAR$$MONTH$.nc' +#ERAint <- '/esarchive/old-files/recon_ecmwf_erainterim/$STORE_FREQ$_mean/$VAR_NAME$_f6h/$VAR_NAME$_$YEAR$$MONTH$.nc' +ERAint <- '/esnas/recon/ecmwf/erainterim/6hourly/$VAR_NAME$/$VAR_NAME$_$YEAR$$MONTH$.nc' # subdaily data!!!!! +ERAint.name <- "ERA-Interim" + +JRA55 <- '/esnas/recon/jma/jra55/$STORE_FREQ$_mean/$VAR_NAME$_f6h/$VAR_NAME$_$YEAR$$MONTH$.nc' +JRA55.name <- "JRA-55" + +NCEP <- '/esnas/recon/noaa/ncep-reanalysis/$STORE_FREQ$_mean/$VAR_NAME$_f6h/$VAR_NAME$_$YEAR$$MONTH$.nc' +NCEP.name <- "NCEP" + +rean <- JRA55 #JRA55 #ERAint #NCEP # choose one of the two above reanalysis from where to load the input psl data + +# available forecast systems for the psl and var data: +#ECMWF_S4 <- list(path = paste0('/esnas/exp/ECMWF/seasonal/0001/s004/m001/$STORE_FREQ$_mean/$VAR_NAME$_f6h/$VAR_NAME$_$START_DATE$.nc')) +#ECMWF_S4 <- '/esnas/exp/ECMWF/seasonal/0001/s004/m001/$STORE_FREQ$_mean/psl_f6h/psl_$START_DATE$.nc' +ECMWF_S4 <- '/esnas/exp/ecmwf/system4_m1/$STORE_FREQ$_mean/$VAR_NAME$_f6h/$VAR_NAME$_$START_DATE$.nc' +ECMWF_S4.name <- "ECMWF-S4" +member.name <- "ensemble" # name of the dimension with the ensemble members inside the netCDF files of S4 + +ECMWF_monthly <- '/esnas/exp/ECMWF/monthly/ensforhc/6hourly/$VAR_NAME$/2014$MONTH$$DAY$00/$VAR_NAME$_$START_DATE$00.nc' +ECMWF_monthly.name <- "ECMWF-Monthly" + +# choose a forecast system: +forecast <- ECMWF_S4 + +# put 'rean' to load pressure fields and var from reanalisis, or put 'forecast' to load them from forecast systems: +fields <- rean #forecast + +psl <- "psl" # "psl" #"g500" # pressure variable to use from the chosen reanalysis +psl.name <- "SLP" # "Z500" # the name to show in the maps + +year.start <- 1981 #1979 #1981 #1982 #1981 #1994 #1979 #1981 +year.end <- 2017 #2017 #2015 + +year.end.clim <- year.end - 1 # last year for computation of climatology (unless year.end has data available for all months, it's better to set it to the previous year) + +LOESS <- TRUE # To apply the LOESS filter or not to the climatology +running.cluster <- FALSE # add to the clustering also the daily SLP data of the two closer months to the month to use (only for monthly analysis) +running.15 <- FALSE # add to the clustering also the daily SLP data of the 15 days of the two closer months (only for monthly analysis). You cannot set to TRUE both + # this variable and 'running.cluster' above +lat.weighting <- TRUE # set it to true to weight psl data on latitude before applying the cluster analysis +subdaily <- FALSE # id TRUE, compute the clustering using 6-hourly data instead of daily data, to be more robust (only for reanalysis with 6-hourly data avail.) + +sequences <-FALSE # set it to TRUE if you want to select only days beloning to sequences of at least 5 consecutive days belonging to the same regime. +PCA <- FALSE # set it to TRUE if you want to apply the PCA before the k-means cluster analysis, FALSE otherwise +variance.explained <- 0.8 # the user can set here the minimum % of variance explained by the PCs (typically 0.8 which is equal to 80%) + +# Coordinates of the box enclosing the study region (the Northern Atlantic in this case): +lat.max <- 81 #81 #80 #70 +lat.min <- 27 #30 #20 #30 +lon.max <- 45 #36 #30 #40 +lon.min <- 274.5 #274.5 #270 #280 # put a positive number here because the geopotential has only positive vaues of longitud! + +# Only for reanalysis: +WR.period <- 2 # 13:16 #1:12 #13:16 # 1-12: Jan-Dec; 13-16: Winter-Spring-Summer-Autumn [bypassed by the optional argument of the script] + +# Only for Seasonal forecasts: +startM <- 1 # start month (1=January, 12=December) [bypassed by the optional arguments of the script] +leadM <- 0 # select only the data of one lead month: [bypassed by the optional arguments of the script] +n.members <- 15 # number of members of the forecast system to load +n.leadtimes <- 216 # number of leadtimes of the forecast system to load +missing.forecasts <- FALSE # set it to TRUE if there can be missing hindcasts files in the forecast psl data, so it will load only the available years and deals with NA +res <- 0.75 # set the resolution you want to interpolate the seasonal psl and var data when loading it (i.e: set to 0.75 to use the same res of ERA-Interim) + # interpolation method is BILINEAR. + +# Only for Monthly forecasts: +#leadtime <- 1:7 #5:11 #1 # if fields=forecast, set the forecast time (in days) you want to compute the weather regimes. +#startdate.shift <- 1 # if leadtime=5:11, it's better to shift all startdates of the season 1 week in the past, to fit the exact season of obs.data + # if leadtime=12:18, it's better to shift all startdates of the season 2 weeks in the past, and so on. +#forecast.year <- year.end+1 # if fields=forecast, set the forecast year (it is usually the year after year.end) +#mes=1 # if fields=forecast, set the month of the first startdate of the forecast year +#day=2 # if fields=forecast, set the day of the first startdate of the forecast year + +############################################################################################################################# +rean.name <- unname(ifelse(rean == ERAint, ERAint.name, ifelse(rean == JRA55, JRA55.name, NCEP.name))) +forecast.name <- unname(ifelse(forecast == ECMWF_S4, ECMWF_S4.name, ECMWF_monthly.name)) +fields.name <- unname(ifelse(fields == rean, rean.name, forecast.name)) +n.years <- year.end - year.start + 1 +n.years.clim <- year.end.clim - year.start + 1 + +if(running.cluster && running.15) stop("both 'running.cluster' and 'running.15' cannot be TRUE simultaneously") + +if(!f6h) { pos <- regexpr("_f6h", fields); my.string <- strsplit(fields, "_f6h") ; fields <- paste0(my.string[[1]][1],my.string[[1]][2]) } # remove _f6h from filename + +# in case the script is run with one argument, it is assumed a reanalysis is being used and the argument becomes the month we want to perform the clustering; +# in case the script is run with two arguments, it is assumed a forecast is used and the first argument represents the startmonth and the second one the leadmonth. +# in case the script is run with no arguments, the values of the variables inside the script are used: +script.arg <- as.integer(commandArgs(TRUE)) + +if(length(script.arg) == 0){ + start.month <- startM + #WR.period <- start.month + if(fields.name == forecast.name) lead.month <- leadM +} + +# in case the script is run with 1 argument, it is assumed you are using a reanalysis: +if(length(script.arg) == 1){ + fields <- rean + fields.name <- rean.name + WR.period <- script.arg[1] +} + +if(length(script.arg) >= 2){ + fields <- forecast + fields.name <- forecast.name + start.month <- script.arg[1] + lead.month <- script.arg[2] + WR.period <- start.month +} + +if(length(script.arg) == 3){ # in this case, we are running the script in the SMP machine and we need to override the variables below: + source('/gpfs/projects/bsc32/bsc32842/Rfunctions.R') # for the calendar function + workdir <- "/gpfs/projects/bsc32/bsc32842/RESILIENCE" # working dir +} + +if(length(script.arg) >= 2) {lead.comment <- paste0("Lead month: ", lead.month)} else {lead.comment <- ""} +cat(paste0("Field: ", fields.name, " Period: ", WR.period, " ", lead.comment, "\n")) + +if(lat.min >= lat.max) stop("lat.min cannot be greater than lat.max") + +#days.period <- n.days.period <- period.length <- list() +#for (pp in 1:17){ +# days.period[[pp]] <- NA +# for(y in year.start:year.end) days.period[[pp]] <- c(days.period[[pp]], n.days.in.a.future.year(year.start, y) + pos.period(y,pp)) +# days.period[[pp]] <- days.period[[pp]][-1] # remove the NA at the begin that was introduced only to be able to execute the above command +# # number of days belonging to that period from year.start to year.end: +# n.days.period[[pp]] <- length(days.period[[pp]]) +# # Number of days belonging to that period in a single year: +# period.length[[pp]] <- n.days.in.a.period(pp,1999) #+ ifelse(period==13 | period==2, 1, 0) # using year 1999 introduce a small error in winter season length because of #bisestile years +#} + +# Create a regular lat/lon grid to interpolate the data to the chosen res: (Notice that the regular grid is always centered at lat=0 and lon=0!) +n.lon.grid <- 360/res +n.lat.grid <- (180/res)+1 +my.grid <- paste0('r',n.lon.grid,'x',n.lat.grid) +#domain<-list() +#domain$lon <- seq(0,359.999999999,res) +#domain$lat <- c(rev(seq(0,90,res)),seq(res[1],-90,-res)) + +cat("Loading lat/lon. Please wait...\n") +# load only 1 day of pressure data to detect the minimum and maximum lat and lon values which identify only the North Atlantic area: +if(fields.name == rean.name) domain <- Load(var = psl, exp = NULL, obs = list(list(path=fields)), paste0(year.start,'0101'), storefreq = 'daily', leadtimemax = 1, output = 'lonlat', nprocs=1) + +### Real test: +### lat <- +### domain <- Load(var = "psl", exp = list(list(path="/esnas/exp/ecmwf/system4_m1/$STORE_FREQ$_mean/$VAR_NAME$_f6h/$VAR_NAME$_$START_DATE$.nc")), obs=NULL, sdates=paste0(1982:2011,'0101'), storefreq = 'daily', leadtimemax=n.leadtimes, nmember=n.members, output = 'lonlat', latmin = lat[chunk], latmax = lat[chunk+2], nprocs=1) + +### if (fields.name="pippo"){ + +# in the case of S4, we also interpolate it to the same resolution of the reanalysis: +# (notice that if the S4 grid has the same grid of the chosen grid (typically ERA-Interim), Load() leaves the S4 grid unchanged) +if(fields.name == ECMWF_S4.name) domain <- Load(var = psl, exp = list(list(path=fields)), obs=NULL, sdates=paste0(year.start,'0101'), storefreq = 'daily', dimnames=list(member=member.name), leadtimemax=1, nmember=1, output = 'lonlat', grid=my.grid, method='bilinear', nprocs=1) + +#domain <- Load(var = "psl", exp = list(list(path="/esnas/exp/ecmwf/system4_m1/$STORE_FREQ$_mean/$VAR_NAME$_f6h/$VAR_NAME$_$START_DATE$.nc")), obs=NULL, sdates='19820101', storefreq = 'daily', leadtimemax=1, nmember=1, output = 'lonlat', grid='r480x241', lonmax=100) + +if(fields.name == ECMWF_monthly.name) domain <- Load(var = psl, exp = NULL, obs = list(list(path=fields)), paste0(year.start,'0102'), storefreq = 'daily', leadtimemax = 1, output = 'lonlat') + +n.lon <- length(domain$lon) +n.lat <- length(domain$lat) + +pos.lat.max <- which(lat.max - round(domain$lat,4) >= 0)[1] # select the first latitude of the domain below the maximum latitude +pos.lat.min <- tail(which(round(domain$lat,4) - lat.min >= 0),1) # select the last latitude of the domain over the minumum latitude +pos.lon.max <- tail(which(lon.max - round(domain$lon,4) >= 0),1) +pos.lon.min <- which(round(domain$lon,4) - lon.min >= 0)[1] + +pos.lat <- if(pos.lat.min < pos.lat.max) {pos.lat.min:pos.lat.max} else {pos.lat.max:pos.lat.min} +pos.lon <- c(1:pos.lon.max,pos.lon.min:length(domain$lon)) # beware that it only consider the case where the study area cross the meridian of Greenwhich! +n.pos.lat <- length(pos.lat) +n.pos.lon <- length(pos.lon) + +lat <- domain$lat[pos.lat] # lat values of chosen area only (it is smaller than the whole spatial domain loaded by the data) +#if (lat[2] < lat[1]) lat <- rev(lat) # reverse lat values if they are in decreasing order +lon <- domain$lon[pos.lon] # lon values of chosen area only + +#lat.min.area <- domain$lat[pos.lat.min] # min and max lat/lon of the chosen area only (same as lat.max, but its values correspond to actual values in the lat vector) +#lat.max.area <- domain$lat[pos.lat.max] +#lon.min.area <- domain$lon[pos.lon.min] +#lon.max.area <- domain$lon[pos.lon.max] + +#rm(domain) + +#################### Start analysis #######################################################3 + +# Load psl data (interpolating it to the same reanalysis grid, if necessary): +cat("Loading data. Please wait...\n") + +if(fields.name == rean.name && subdaily == FALSE){ # Load daily psl data of all years in the reanalysis case: + psleuFull <-array(NA,c(1, 1, n.years, 365, n.pos.lat, n.pos.lon)) + + psleuFull366 <- Load(var = psl, exp = NULL, obs = list(list(path=fields)), paste0(year.start:year.end,'0101'), storefreq = 'daily', leadtimemax = 366, output = 'lonlat', latmin = lat.min, latmax = lat.max, lonmin = lon.min, lonmax = lon.max, nprocs=8) + + # remove bisestile days to have all arrays with the same dimensions: + cat("Removing bisestile days. Please wait...\n") + psleuFull[,,,,,] <- psleuFull366$obs[,,,1:365,,] + + for(y in year.start:year.end){ + y2 <- y - year.start + 1 + if(n.days.in.a.year(y) == 366) psleuFull[,,y2,60:365,,] <- psleuFull366$obs[,,y2,61:366,,] # take the march to december period removing the 29th of February + } + + psleuFull366$obs <- NULL + gc() + + if((running.cluster || running.15) && fields == rean) { + ## the months after the last month of the last year loaded are automatically filled with NA by Load(); the clustering algorithm doesn't want NA, so + ## we have to replace them with some values (in this case the mean of the previous years not to affect the analysis too much) + ## if we want to be able to compute the running clustering. + ## WARNING: in case your last month is December, for the running clustering it will use the january data of the last year loaded, not of the next year: + #psleuFull[,,n.years,305:334,,] <- psleuFull[,,n.years-1,305:334,,] + month.NA <- psleuFull366$not_found_files[1] # filename of the file with the first month with NA (string value with full file path) + first.month.NA <- as.integer(substr(month.NA,nchar(month.NA)-4,nchar(month.NA)-3)) # first month with NA + first.day.NA <- pos.period(1,first.month.NA)[1] # first day with NA (counting from the start of the year) + + psleuFullnolast <- psleuFull[,,1:(n.years-1),first.day.NA:365,,] + psleuFullMean <- apply(psleuFullnolast, c(2,3,4), mean, na.rm=TRUE) # mean of the previous years + + psleuFull[,,n.years,first.day.NA:365,,] <- psleuFullMean + + rm(psleuFullnolast, psleuFullMean) + + } + + # convert psl in daily anomalies with the LOESS filter: + cat("Calculating anomalies. Please wait...\n") + psleuFull_no_last_year <- psleuFull[1,1,1:n.years.clim,,,,drop=FALSE] + pslPeriodClim <- apply(psleuFull_no_last_year, c(1,2,4,5,6), mean, na.rm=T) + rm(psleuFull_no_last_year) + gc() + + if(LOESS == TRUE){ + pslPeriodClimLoess <- array(NA, dim(pslPeriodClim)) + + for(i in 1:n.pos.lat){ + for(j in 1:n.pos.lon){ + my.data <- data.frame(ens.mean=pslPeriodClim[1,1,,i,j], day=1:365) + my.loess <- loess(ens.mean ~ day, my.data, span=0.35) + pslPeriodClimLoess[1,1,,i,j] <- predict(my.loess) + } + } + + rm(pslPeriodClim) + gc() + + pslPeriodClim2 <- InsertDim(pslPeriodClimLoess, 3, n.years) + + } else { # leave the daily climatology to compute the anomalies: + pslPeriodClim2 <- InsertDim(pslPeriodClim, 3, n.years) + + rm(pslPeriodClim) + gc() + } + + ## s4.data <- data.frame(s4=pslPeriodClim[1,], day=1:216) + ## s4.loess <- loess(s4 ~ day, s4.data, span=0.35) + ## s4.pred <- predict(s4.loess) + + psleuFull <- psleuFull - pslPeriodClim2 + rm(pslPeriodClim2) + gc() + +} # close if on fields.name == rean and subdaily == FALSE){ + + +# Load 6-hourly psl data of all years in the reanalysis case: +if(fields.name == rean.name && subdaily == TRUE){ + + #sdates <- as.vector(sapply(year.start:year.end, function(x) paste0(x, sprintf("%02d", 1:12), '01'))) + #my.exp <- list(path=fields) + #psleuFull366 <- Load(var = psl, exp = list(my.exp), NULL, sdates, output = 'lonlat', latmin = lat.min, latmax = lat.max, lonmin = lon.min, lonmax = lon.max)$obs + #dim(data$obs) <- c(dim(data$obs)[1:2], 1, dim(data$obs)[3]*dim(data$obs)[4], dim(data$obs)[5:6]) + + my.exp <- list(path=fields) + + # Load January data: + psleu1 <- Load(var = psl, exp = list(list(path=fields)), obs = NULL, sdates = paste0(year.start:year.end,'0101'), storefreq = 'daily', output = 'lonlat', latmin = lat.min, latmax = lat.max, lonmin = lon.min, lonmax = lon.max)$mod + + # Load february data (it automatically discards the 29th of February): + psleu2 <- Load(var = psl, exp = list(list(path=fields)), obs = NULL, sdates = paste0(year.start:year.end,'0201'), storefreq = 'daily', output = 'lonlat', latmin = lat.min, latmax = lat.max, lonmin = lon.min, lonmax = lon.max)$mod + + # Load March data: + psleu3 <- Load(var = psl, exp = list(list(path=fields)), obs = NULL, sdates = paste0(year.start:year.end,'0301'), storefreq = 'daily', output = 'lonlat', latmin = lat.min, latmax = lat.max, lonmin = lon.min, lonmax = lon.max)$mod + psleu4 <- Load(var = psl, exp = list(list(path=fields)), obs = NULL, sdates = paste0(year.start:year.end,'0401'), storefreq = 'daily', output = 'lonlat', latmin = lat.min, latmax = lat.max, lonmin = lon.min, lonmax = lon.max)$mod + psleu5 <- Load(var = psl, exp = list(list(path=fields)), obs = NULL, sdates = paste0(year.start:year.end,'0501'), storefreq = 'daily', output = 'lonlat', latmin = lat.min, latmax = lat.max, lonmin = lon.min, lonmax = lon.max)$mod + psleu6 <- Load(var = psl, exp = list(list(path=fields)), obs = NULL, sdates = paste0(year.start:year.end,'0601'), storefreq = 'daily', output = 'lonlat', latmin = lat.min, latmax = lat.max, lonmin = lon.min, lonmax = lon.max)$mod + psleu7 <- Load(var = psl, exp = list(list(path=fields)), obs = NULL, sdates = paste0(year.start:year.end,'0701'), storefreq = 'daily', output = 'lonlat', latmin = lat.min, latmax = lat.max, lonmin = lon.min, lonmax = lon.max)$mod + psleu8 <- Load(var = psl, exp = list(list(path=fields)), obs = NULL, sdates = paste0(year.start:year.end,'0801'), storefreq = 'daily', output = 'lonlat', latmin = lat.min, latmax = lat.max, lonmin = lon.min, lonmax = lon.max)$mod + psleu9 <- Load(var = psl, exp = list(list(path=fields)), obs = NULL, sdates = paste0(year.start:year.end,'0901'), storefreq = 'daily', output = 'lonlat', latmin = lat.min, latmax = lat.max, lonmin = lon.min, lonmax = lon.max)$mod + psleu10 <- Load(var = psl, exp = list(list(path=fields)), obs = NULL, sdates = paste0(year.start:year.end,'1001'), storefreq = 'daily', output = 'lonlat', latmin = lat.min, latmax = lat.max, lonmin = lon.min, lonmax = lon.max)$mod + psleu11 <- Load(var = psl, exp = list(list(path=fields)), obs = NULL, sdates = paste0(year.start:year.end,'1101'), storefreq = 'daily', output = 'lonlat', latmin = lat.min, latmax = lat.max, lonmin = lon.min, lonmax = lon.max)$mod + psleu12 <- Load(var = psl, exp = list(list(path=fields)), obs = NULL, sdates = paste0(year.start:year.end,'1201'), storefreq = 'daily', output = 'lonlat', latmin = lat.min, latmax = lat.max, lonmin = lon.min, lonmax = lon.max)$mod + + if(running.cluster == TRUE && rean.name == "ERA-interim") { + # in this case, last month of last year might not have in /esnas the data for the 3rd month of the running cluster. Load() set these values to NA, + # but the clustering analysis is not able to process NA, so we have to remove the NA for that month and replace with the values of the previous year: + psleu11[,,n.years,,,] <- psleu11[,,n.years-1,,,] + psleu12[,,n.years,,,] <- psleu12[,,n.years-1,,,] + + } + + psleuFull <- abind(psleu1, psleu2, psleu3, psleu4, psleu5, psleu6, psleu7, psleu8, psleu9, psleu10, psleu11, psleu12, along=4) + rm(psleu1, psleu2, psleu3, psleu4, psleu5, psleu6, psleu7, psleu8, psleu9, psleu10, psleu11, psleu12) + gc() + + # convert psl in daily anomalies with the LOESS filter: + cat("Calculating anomalies. Please wait...\n") + pslPeriodClim <- apply(psleuFull, c(1,2,4,5,6), mean, na.rm=T) + + if(LOESS == TRUE){ + # separate psl data in the four hours of the day ( 0.00, 6.00, 12.00, 18.00) + pslPeriodClim1 <- pslPeriodClim[1,1,seq(1,1460,4),,] + pslPeriodClim2 <- pslPeriodClim[1,1,seq(2,1460,4),,] + pslPeriodClim3 <- pslPeriodClim[1,1,seq(3,1460,4),,] + pslPeriodClim4 <- pslPeriodClim[1,1,seq(4,1460,4),,] + + rm(pslPeriodClim) + gc() + + pslPeriodClimLoess1 <- array(NA, dim(pslPeriodClim1)) + pslPeriodClimLoess2 <- array(NA, dim(pslPeriodClim2)) + pslPeriodClimLoess3 <- array(NA, dim(pslPeriodClim3)) + pslPeriodClimLoess4 <- array(NA, dim(pslPeriodClim4)) + + for(i in 1:n.pos.lat){ + for(j in 1:n.pos.lon){ + my.data1 <- data.frame(ens.mean=pslPeriodClim1[,i,j], hourly=1:(1460/4)) + my.loess1 <- loess(ens.mean ~ hourly, my.data1, span=0.35) + pslPeriodClimLoess1[,i,j] <- predict(my.loess1) + + my.data2 <- data.frame(ens.mean=pslPeriodClim2[,i,j], hourly=1:(1460/4)) + my.loess2 <- loess(ens.mean ~ hourly, my.data2, span=0.35) + pslPeriodClimLoess2[,i,j] <- predict(my.loess2) + + my.data3 <- data.frame(ens.mean=pslPeriodClim3[,i,j], hourly=1:(1460/4)) + my.loess3 <- loess(ens.mean ~ hourly, my.data3, span=0.35) + pslPeriodClimLoess3[,i,j] <- predict(my.loess3) + + my.data4 <- data.frame(ens.mean=pslPeriodClim4[,i,j], hourly=1:(1460/4)) + my.loess4 <- loess(ens.mean ~ hourly, my.data4, span=0.35) + pslPeriodClimLoess4[,i,j] <- predict(my.loess4) + + } + } + + rm(my.data1, my.data2, my.data3, my.data4, my.loess1, my.loess2, my.loess3, my.loess4) + rm(pslPeriodClim1, pslPeriodClim2, pslPeriodClim3, pslPeriodClim4) + gc() + + s1 <- seq(1,1460,4) + s2 <- seq(2,1460,4) + s3 <- seq(3,1460,4) + s4 <- seq(4,1460,4) + + pslPeriodClimLoess <- array(NA,c(365*4,dim(pslPeriodClimLoess1)[2:3])) + + for(day in 1:365){ + pslPeriodClimLoess[s1[day],,] <- pslPeriodClimLoess1[day,,] + pslPeriodClimLoess[s2[day],,] <- pslPeriodClimLoess2[day,,] + pslPeriodClimLoess[s3[day],,] <- pslPeriodClimLoess3[day,,] + pslPeriodClimLoess[s4[day],,] <- pslPeriodClimLoess4[day,,] + } + + pslPeriodClim2 <- InsertDim(pslPeriodClimLoess, 1, n.years) + + } else { # leave the daily climatology to compute the anomalies: + pslPeriodClim2 <- InsertDim(pslPeriodClim, 3, n.years) + + rm(pslPeriodClim) + gc() + } + + ## s4.data <- data.frame(s4=pslPeriodClim[1,], day=1:216) + ## s4.loess <- loess(s4 ~ day, s4.data, span=0.35) + ## s4.pred <- predict(s4.loess) + + psleuFull <- psleuFull[1,1,,,,] - pslPeriodClim2 + + rm(pslPeriodClim2) + gc() + +} # close if on fields.name == rean & subdaily == TRUE + + +if(fields.name == ECMWF_S4.name){ # in the forecast case, we load only the data for 1 start month at time and all the lead times (since each month needs ~3 GB of memory) + if(start.month <= 9) {start.month.char <- paste0("0",as.character(start.month))} else {start.month.char <- start.month} + + fields2 <- gsub("\\$STORE_FREQ\\$","daily",fields) + fields3 <- gsub("\\$VAR_NAME\\$",psl,fields2) + + if(missing.forecasts == TRUE){ + years <- c() + for(y in year.start:year.end){ + fields4 <- gsub("\\$START_DATE\\$",paste0(y, start.month.char,'01'),fields3) + + char.lead <- nchar(system(paste0("ncks -m ",fields4,"| grep -E -i 'psl size' | cut -d '*' -f1"), intern=T)) + # beware, in this way it can only take into account leadtimes from 100 to 999, but it is the only way in the SMP machine: + #num.lead <- as.integer(substr(system(paste0("ncks -m ",fields4,"| grep -E -i 'psl size' | cut -d '*' -f1"), intern=T),char.lead-3,char.lead)) + num.lead <- as.integer(system(paste0("ncks -m ",fields4,"| grep -E -i 'psl size' | cut -d '=' -f2 | cut -d '*' -f1"), intern=T)) # don't work well on the SMP + num.memb <- as.integer(system(paste0("ncks -m ",fields4,"| grep -E -i 'psl size' | cut -d '=' -f2 | cut -d '*' -f2"), intern=T)) + num.lat <- as.integer(system(paste0("ncks -m ",fields4,"| grep -E -i 'psl size' | cut -d '=' -f2 | cut -d '*' -f3"), intern=T)) + num.lon <- as.integer(system(paste0("ncks -m ",fields4,"| grep -E -i 'psl size' | cut -d '=' -f2 | cut -d '*' -f4"), intern=T)) + + #if(num.lead == n.leadtimes && num.memb >= n.members && num.lat == 181 && num.lon == 360) years <- c(years,y) + if(num.lead == n.leadtimes && num.memb >= n.members) years <- c(years,y) + } + + } else { + years <- year.start:year.end + } + + n.years.full <- length(years) + + if(running.cluster == TRUE) { + + start.month1 <- ifelse(start.month == 1, 12, start.month-1) + if(start.month1 <= 9) {start.month1.char <- paste0("0",as.character(start.month1))} else {start.month1.char <- start.month1} + + start.month2.char <- start.month.char + + start.month3 <- ifelse(start.month == 12, 1, start.month+1) + if(start.month3 <= 9) {start.month3.char <- paste0("0",as.character(start.month3))} else {start.month3.char <- start.month3} + + # load three months of data, inserting all them in the third dimension of the array, one month at time: + psleuFull1 <- Load(var = psl, exp = list(list(path=fields)), obs = NULL, sdates=paste0(years, start.month1.char,'01'), dimnames=list(member=member.name), nmember=n.members, leadtimemax=n.leadtimes, storefreq='daily', output = 'lonlat', latmin = lat.min, latmax = lat.max, lonmin = lon.min, lonmax = lon.max, grid=my.grid, method='bilinear')$mod + gc() + + psleuFull2 <- Load(var = psl, exp = list(list(path=fields)), obs = NULL, sdates=paste0(years, start.month2.char,'01'), dimnames=list(member=member.name), nmember=n.members, leadtimemax=n.leadtimes, storefreq='daily', output = 'lonlat', latmin = lat.min, latmax = lat.max, lonmin = lon.min, lonmax = lon.max, grid=my.grid, method='bilinear')$mod + gc() + + psleuFull3 <- Load(var = psl, exp = list(list(path=fields)), obs = NULL, sdates=paste0(years, start.month3.char,'01'), dimnames=list(member=member.name), nmember=n.members, leadtimemax=n.leadtimes, storefreq='daily', output = 'lonlat', latmin = lat.min, latmax = lat.max, lonmin = lon.min, lonmax = lon.max, grid=my.grid, method='bilinear')$mod + gc() + + } else { + + psleuFull <- Load(var = psl, exp = list(list(path=fields)), obs = NULL, sdates=paste0(years, start.month.char,'01'), dimnames=list(member=member.name), nmember=n.members, leadtimemax=n.leadtimes, storefreq='daily', output = 'lonlat', latmin = lat.min, latmax = lat.max, lonmin = lon.min, lonmax = lon.max, grid=my.grid, method='bilinear', nprocs=1)$mod + + } + + if(LOESS == TRUE){ + # convert psl in daily anomalies with the LOESS filter: + cat("Calculating anomalies. Please wait...\n") + + if(running.cluster == TRUE) { + pslPeriodClim1 <- apply(psleuFull1, c(1,4,5,6), mean, na.rm=T) + pslPeriodClim2 <- apply(psleuFull2, c(1,4,5,6), mean, na.rm=T) + pslPeriodClim3 <- apply(psleuFull3, c(1,4,5,6), mean, na.rm=T) + + pslPeriodClimLoess1 <- pslPeriodClim1 + pslPeriodClimLoess2 <- pslPeriodClim2 + pslPeriodClimLoess3 <- pslPeriodClim3 + + for(i in 1:n.pos.lat){ + for(j in 1:n.pos.lon){ + my.data1 <- data.frame(ens.mean=pslPeriodClim1[1,,i,j], day=1:n.leadtimes) + my.loess1 <- loess(ens.mean ~ day, my.data1, span=0.35) + pslPeriodClimLoess1[1,,i,j] <- predict(my.loess1) + rm(my.data1, my.loess1) + gc() + + my.data2 <- data.frame(ens.mean=pslPeriodClim2[1,,i,j], day=1:n.leadtimes) + my.loess2 <- loess(ens.mean ~ day, my.data2, span=0.35) + pslPeriodClimLoess2[1,,i,j] <- predict(my.loess2) + rm(my.data2, my.loess2) + gc() + + my.data3 <- data.frame(ens.mean=pslPeriodClim3[1,,i,j], day=1:n.leadtimes) + my.loess3 <- loess(ens.mean ~ day, my.data3, span=0.35) + pslPeriodClimLoess3[1,,i,j] <- predict(my.loess3) + rm(my.data3, my.loess3) + gc() + } + } + + rm(pslPeriodClim1, pslPeriodClim2, pslPeriodClim3) + gc() + + pslPeriodClimDos1 <- InsertDim(InsertDim(pslPeriodClimLoess1, 2, n.years.full), 2, n.members) + pslPeriodClimDos2 <- InsertDim(InsertDim(pslPeriodClimLoess2, 2, n.years.full), 2, n.members) + pslPeriodClimDos3 <- InsertDim(InsertDim(pslPeriodClimLoess3, 2, n.years.full), 2, n.members) + + pslPeriodClimDos <- unname(abind(pslPeriodClimDos1, pslPeriodClimDos2, pslPeriodClimDos3, along=3)) + rm(pslPeriodClimDos1, pslPeriodClimDos2, pslPeriodClimDos3) + gc() + + psleuFull <- unname(abind(psleuFull1, psleuFull2, psleuFull3, along=3)) + rm(psleuFull1, psleuFull2, psleuFull3) + gc() + + } else { # in case of no running cluster: + + pslPeriodClim <- apply(psleuFull, c(1,4,5,6), mean, na.rm=T) + + pslPeriodClimLoess <- pslPeriodClim + for(i in n.pos.lat){ + for(j in n.pos.lon){ + my.data <- data.frame(ens.mean=pslPeriodClim[1,,i,j], day=1:n.leadtimes) + my.loess <- loess(ens.mean ~ day, my.data, span=0.35) + pslPeriodClimLoess[1,,i,j] <- predict(my.loess) + } + } + + rm(pslPeriodClim) + gc() + + pslPeriodClimDos <- InsertDim(InsertDim(pslPeriodClimLoess, 2, n.years.full), 2, n.members) + + } # close if on running cluster + + } else { #in case of no LOESS: + + if(running.cluster == TRUE) { + # in this case, the climatology is measured FOR EACH MONTH INDIPENDENTLY, instead of using a seasonal value: + pslPeriodClim1 <- apply(psleuFull1, c(1,4,5,6), mean, na.rm=T) + pslPeriodClim2 <- apply(psleuFull2, c(1,4,5,6), mean, na.rm=T) + pslPeriodClim3 <- apply(psleuFull3, c(1,4,5,6), mean, na.rm=T) + + pslPeriodClim <- unname(abind(pslPeriodClim1, pslPeriodClim2, pslPeriodClim3)) + + pslPeriodClimDos <- InsertDim(InsertDim(pslPeriodClim, 2, n.years.full), 2, n.members) + rm(pslPeriodClim) + gc() + + psleuFull <- unname(abind(psleuFull1, psleuFull2, psleuFull3, along=3)) + rm(psleuFull1, psleuFull2, psleuFull3) + gc() + + } else {# in case of no running cluster: + + pslPeriodClimDos <- InsertDim(InsertDim(pslPeriodClim, 2, n.years.full), 2, n.members) + rm(pslPeriodClim) + gc() + } + } + + ## s4.data <- data.frame(s4=pslPeriodClim[1,], day=1:216) + ## s4.loess <- loess(s4 ~ day, s4.data, span=0.35) + ## s4.pred <- predict(s4.loess) + + psleuFull <- psleuFull - pslPeriodClimDos + rm(pslPeriodClimDos) + gc() + +} # close if on fields.name=forecasts.name + +if(fields.name == ECMWF_monthly.name){ + # Load 6-hourly psl data in the forecast case: + psleuFull <-array(NA, c(n.sdates*n.years, n.leadtimes, n.pos.lat, n.pos.lon)) # daily order: 19940102 19950102 ... 20130102 19940109 19950109 ... 20130109 ... + n.leadtimes <- length(leadtime) + sdates <- weekly.seq(forecast.year,mes,day) + n.sdates <- length(sdates) + + for (startdate in 1:n.sdates){ + for(lday in leadtime){ + var <- Load(var = psl, exp = NULL, obs = list(list(path=fields)), paste0(year.start:year.end, substr(sdates[startdate],5,6), substr(sdates[startdate],7,8) ), storefreq = 'daily', leadtimemin = lday*4-3, leadtimemax = lday*4, output = 'lonlat', latmin = lat.min, latmax = lat.max, lonmin = lon.min, lonmax = lon.max) + + mean.lday <- apply(var$obs, c(3,5,6), mean, na.rm=T) + rm(var) + gc() + + psleuFull[(1+(startdate-1)*n.years):(startdate*n.years), lday-leadtime[1]+1,,] <- mean.lday + rm(mean.lday) + gc() + } + } +} + +if(psl=="g500") psleuFull <- psleuFull/9.81 # convert geopotential to geopotential height (in m) +if(psl=="psl") psleuFull <- psleuFull/100 # convert MSLP in Pascal to MSLP in hPa +gc() + +#save.image(file=paste0(workdir,"/weather_regimes_",fields.name,"_",year.start,"-",year.end,".RData")) +#load(file=paste0(workdir,"/weather_regimes_",fields.name,"_",year.start,"-",year.end,".RData")) + +#save.image("/scratch/Earth/ncortesi/RESILIENCE/Regimes/test.R") + +# compute the cluster analysis: +my.PCA <- list() +my.cluster <- list() +my.cluster.array <- list() +tot.variance <- list() +n.pcs <- my.cluster <- c() + +#WR.period=1 # for the debug +for(p in WR.period){ + # Select only the data of the month/season we want: + if(fields.name == rean.name){ + + #pslPeriod <- psleuFull[days.period[[p]],,] # select only days in the chosen period (i.e: winter) + if(running.cluster) { + if(subdaily){ + my.hours <- sort(c(pos.month.extended(1,p)*4-3, pos.month.extended(1,p)*4-2, pos.month.extended(1,p)*4-1, pos.month.extended(1,p)*4)) + if(p == 1) my.hours <- c(1337:1460,1:236) + if(p == 12) my.hours <- c(1217:1460,1:124) + + pslPeriod <- psleuFull[,my.hours,,] # select all days in the period of 3 months centered on the target month p + + } else { + pslPeriod <- psleuFull[1,1,,pos.month.extended(1,p),,] # select all days in the period of 3 months centered on the target month p + } + + } + + if(running.15) pslPeriod <- psleuFull[1,1,,pos.month.extended15(1,p),,] + + ## if there isn't any kind of running clustering: + if(!running.cluster && !running.15) { + + if(subdaily){ + my.hours <- sort(c(pos.month(1,p)*4-3, pos.month(1,p)*4-2, pos.month(1,p)*4-1, pos.month(1,p)*4)) + pslPeriod <- psleuFull[,my.hours,,] + } else { + pslPeriod <- psleuFull[1,1,,pos.period(1,p),,] # select only days in the chosen period (i.e: winter) + } + } + + # weight the pressure fields based on latitude (apply it to anomalies, not to absolute values!): + if(lat.weighting){ + lat.weighted <- cos(lat*pi/180)^0.5 + if(!running.cluster && !running.15) lat.weighted.array <- InsertDim(InsertDim(InsertDim(lat.weighted,2,n.pos.lon), 1, length(pos.period(2001,p))), 1, n.years) + if(running.cluster) lat.weighted.array <- InsertDim(InsertDim(InsertDim(lat.weighted,2,n.pos.lon), 1, length(pos.month.extended(2001,p))), 1, n.years) + if(running.15) lat.weighted.array <- InsertDim(InsertDim(InsertDim(lat.weighted,2,n.pos.lon), 1, length(pos.month.extended15(2001,p))), 1, n.years) + + pslPeriod <- pslPeriod * lat.weighted.array + + rm(lat.weighted.array) + gc() + } + + gc() + cat("Preformatting data for clustering. Please wait...\n") + psl.melted <- melt(pslPeriod[,,,, drop=FALSE], varnames=c("Year","Day","Lat","Lon")) + gc() + + cat("Preformatting data for clustering. Please wait......\n") + # this function eats much memory!!! + psl.kmeans <- unname(acast(psl.melted, Year + Day ~ Lat + Lon)) + #gc() + + + # select period data from psleuFull to use it as input of the cluster analysis: + #psl.kmeans <- pslPeriod + #dim(psl.kmeans) <- c(n.days.period[[p]], n.pos.lat*n.pos.lon) # convert array in a matrix! + #rm(pslPeriod, pslPeriod.weighted) + } + + # in case of S4 we don't need to select anything because we already loaded only 1 month of data! + if(fields.name == ECMWF_S4.name){ + if(running.cluster == TRUE && p < 13) { + # if you want to fully implement the running cluster of the monthly S4 data, you have to finish selecting automatically the 3-months running period + # generalizing the command below (which at present only work for the month of January an lead time 0): + + # Select DJF (lead time 0) for our case study, excluding 29 of february): + pslPeriod <- psleuFull[,,,1:90,,, drop=FALSE] + + } else { + + # select data only for the startmonth and leadmonth to study: + cat("Extracting data. Please wait...\n") + chosen.month <- start.month + lead.month # find which month we want to select data + if(chosen.month > 12) chosen.month <- chosen.month - 12 + + # remove the 29th of February to have the same n. of elements for all years + i=0 + for(y in years){ + i=i+1 + leadtime.min <- 1 + n.days.in.a.monthly.period(start.month, chosen.month, y) - n.days.in.a.month(chosen.month, y) + leadtime.max <- leadtime.min + n.days.in.a.month(chosen.month, y) - 1 + #leadtime.max <- 61 + + if (n.days.in.a.month(chosen.month, y) == 29) leadtime.max <- leadtime.max - 1 + int.leadtimes <- leadtime.min:leadtime.max + num.leadtimes <- leadtime.max - leadtime.min + 1 # number of days in the chosen month (different from 'n.leadtimes',which is the total number of days in the file!) + # by construction it is equal to: n.days.in.a.month(chosen.month, y) + + if(y == years[1]) { + pslPeriod <- psleuFull[,,1,int.leadtimes,,,drop=FALSE] + } else { + pslPeriod <- unname(abind(pslPeriod, psleuFull[,,i,int.leadtimes,,,drop=FALSE], along=3)) + } + } + + } + + # weight the pressure fields based on latitude (apply it to anomalies, not to absolute values!): + if(lat.weighting){ + lat.weighted <- cos(lat*pi/180)^0.5 + lat.weighted.array <- InsertDim(InsertDim(InsertDim(lat.weighted,2,n.pos.lon), 1, length(pos.month.extended(2001,p))), 1, n.years) + pslPeriod <- pslPeriod * lat.weighted.array ######### adapt!!!!!!!!!!! + rm(lat.weighted.array) + gc() + } + + # note that the data order in psl.kmeans[,X] is: year1day1, year1day2, ... , year1day31, year2day1, year2day2, ... , year2day28 + gc() + cat("Preformatting data. Please wait...\n") + psl.melted <- melt(pslPeriod[1,,,,,, drop=FALSE], varnames=c("Exp","Member","StartYear","LeadDay","Lat","Lon")) + gc() + + #save(psl.melted,file=paste0(workdir,"/psl_melted.RData")) + + cat("Preformatting data. Please wait......\n") + # This function eats much memory!!! + psl.kmeans <- unname(acast(psl.melted, Member + StartYear + LeadDay ~ Lat + Lon)) + #gc() + + #save(psl.kmeans,file=paste0(workdir,"/psl_kmeans.RData")) + #my.cluster <- kmeans(psl.kmeans, centers=4, iter.max=100, nstart=30, trace=FALSE) + #save(my.cluster,file=paste0(workdir,"/my_cluster.RData")) + + + #psl.kmeans <- array(NA,c(dim(pslPeriod))) + #n.rows <- nrow(psl.melted) + #a <- psl.melted$Member + #b <- psl.melted$StartYear + #c <- psl.melted$LeadDay + #d <- psl.melted$Lat + #e <- psl.melted$Lon + #f <- psl.melted$value + + # this function to convert a data.frame in an array is much less memory-eater than acast but a bit slower: + #for(i in 1:n.rows) psl.kmeans[1,a[i],b[i],c[i],d[i],e[i]] <- f[i] + gc() + #rm(pslPeriod); gc() + } + + if(fields.name == ECMWF_monthly.name){ + my.startdates.period <- months.period(forecast.year,mes,day,p) # get which startdates belong to the chosen period (remember that there are no bisestile day left!) + + days.period <- list() # get which days inside psleuFull belong to the chosen period + for(startdate in my.startdates.period){ + days.period[[p]] <- c(days.period[[p]], (1+(startdate-1)*n.years):(startdate*n.years)) + } + + # in winter you must remove the 2nd startdate (20140109) because its data is bad, as you can see with: plot(psl.kmeans[,1:2]) + # if(fields.name == forecast.name && period == 13) psl.kmeans[21:40,] <- NA + } + + + # compute the clustering: + cat("Clustering data. Please wait...\n") + if(PCA){ + my.seq <- seq(1, n.pos.lat*n.pos.lon, 16) # select only 1 point of 9 + pslcut <- psl.kmeans[,my.seq] + + my.PCA <- princomp(pslcut,cor=FALSE) + tot.variance <- head(cumsum(my.PCA$sdev^2/sum(my.PCA$sdev^2)),50) # check how many PCAs to keep basing on the sum of their expl. variance + n.pcs <- head(as.numeric(which(tot.variance > variance.explained)),1) # select only the pcs that explains at least 80% of variance + my.cluster <- kmeans(my.PCA$scores[,1:n.pcs], centers=4, iter.max=100, nstart=30) # 4 is the number of clusters + rm(pslcut) + } else { # 4 is the number of clusters: + + my.cluster <- kmeans(psl.kmeans, centers=4, iter.max=100, nstart=30, trace=FALSE) + + gc() + + #save(my.cluster, file=paste0(workdir,"/",fields.name,"_",my.period[p],"_leadmonth_",lead.month,"_series.RData")) + + #if(fields.name == rean.name){ + #my.cluster[[p]] <- kmeans(psl.kmeans[which(!is.na(psl.kmeans[,1])),], centers=4, iter.max=100, nstart=30, trace=FALSE) + # save the time series output of the cluster analysis: + #save(my.cluster, my.cluster.array, my.PCA, tot.variance, n.pcs, file=paste0(workdir,"/",fields.name,"_",my.period[p],"_series.RData")) + #} + + if(fields.name == ECMWF_S4.name) { + my.cluster.array <- array(my.cluster$cluster, c(num.leadtimes, n.years.full, n.members)) # in reverse order compared to the definition of psl.kmeans + + # save the time series output of the cluster analysis: + #save(my.cluster, my.cluster.array, my.PCA, tot.variance, n.pcs, file=paste0(workdir,"/",fields.name,"_",my.period[p],"_leadmonth_",lead.month,"_series.RData")) + + #plot(SMA(my.cluster$centers[1,],201),type="l",col="gold",ylim=c(-20,20));lines(SMA(my.cluster$centers[2,],201),type="l",col="red"); lines(SMA(my.cluster$centers[3,],201),type="l",col="green");lines(SMA(my.cluster$centers[4,],201),type="l",col="blue");lines(SMA(psl.kmeans[29,],201),type="l",col="gray");lines(rep(0,14410),type="l",col="black",lty=2) + } + } + + rm(psl.kmeans) + gc() + +#} # close for on 'p' + +#save.image(file=paste0(workdir,"/weather_regimes_",fields.name,"_",year.start,"-",year.end,".RData")) +#load(file=paste0(workdir,"/weather_regimes_",fields.name,"_",year.start,"-",year.end,".RData")) + +#my.grid<-paste0('r',n.lon,'x',n.lat) +#coord <- Load(var = 'sfcWind', exp = list(ECMWF_monthly), obs = NULL, sdates=paste0(2013,'0102'), leadtimemin = 1, leadtimemax=1, output = 'lonlat', nprocs=1, grid=my.grid, method='bilinear') + +# measure regime anomalies: + +#for(p in WR.period){ # 1-12: Jan-Dec; 13-16: Winter-Spring-Summer-Autumn; 17: Year + + if(fields.name == rean.name){ + + if(running.cluster && p < 13){ + # select only the days inside the 3-months cluster series that belong only to the target month p: + n.days.period <- length(pos.month.extended(1,p)) # total number of days in the three months + + days.month <- days.month.new <- days.month.full <- c() + days.month <- which(pos.month.extended(1,p) == pos.month(1,p)[1]) - 1 + 1:length(pos.month(1,p)) + + for(y in 1:n.years){ + days.month.new <- days.month + n.days.period*(y-1) + days.month.full <- c(days.month.full, days.month.new) + #print(days.month.new) + #print(days.month) + } + + # in this case, we are not selecting days but 6-hourly intervals: + if(subdaily) days.month.full <- sort(c(days.month.full*4, days.month.full*4+1, days.month.full*4+2, days.month.full*4+3)) + + # select only the days of the cluster series inside the target month p: + cluster.sequence <- my.cluster$cluster[days.month.full] + + } + + + if(running.15 && p < 13){ + # select only the days inside the 3-months cluster series that belong only to the target month p: + n.days.period <- length(pos.month.extended15(1,p)) # total number of days in the three months + + days.month <- days.month.new <- days.month.full <- c() + days.month <- which(pos.month.extended15(1,p) == pos.month(1,p)[1]) - 1 + 1:length(pos.month(1,p)) + + for(y in 1:n.years){ + days.month.new <- days.month + n.days.period*(y-1) + days.month.full <- c(days.month.full, days.month.new) + #print(days.month.new) + #print(days.month) + } + + # select only the days of the cluster series inside the target month p: + cluster.sequence <- my.cluster$cluster[days.month.full] + + } + + if(!running.cluster && !running.15) cluster.sequence <- my.cluster$cluster + + # convert cluster sequence from 6-hourly to daily: + if(subdaily){ + type <- c() + + for(day in 1:(length(cluster.sequence)/4)){ + #day <- ceiling(hour/4) + hourly <- cluster.sequence[(1+(day-1)*4):(4+(day-1)*4)] + + t1 <- length(which(hourly == 1)) + t2 <- length(which(hourly == 2)) + t3 <- length(which(hourly == 3)) + t4 <- length(which(hourly == 4)) + tt <- c(t1,t2,t3,t4) + + # if all the 4 time steps belong to the same regime, assign it to this day: + if(length(unique(hourly)) == 1) type[day] <- hourly[1] + + # if there are two different regimes, check if one has a higher frequency: + if(length(unique(hourly)) == 2){ + if(any(tt == 3)){ # if 3 of the 4 time intervals belong to the same weather regime, assign this day to it + type[day] <- which(tt == 3) + } else { # in this case both regimes occur in 2 of the 4 time steps; arbitrary assign the regime occurring at 12.00 of that day + type[day] <- hourly[3] + } + } + + # if there are three different regimes, assign it to the only possible regime with 2 time steps in that day: + if(length(unique(hourly)) == 3) type[day] <- which(tt == 2) + + # if there are four different regimes (a very rare event!), assign it to the regime occurring at 12.00 of that day: + if(length(unique(hourly)) == 3) type[day] <- hourly[3] + + } # close for on day + + } # close for on subdaily + + + if(sequences){ # it works only for rean data!!! + # for each of the 4 clusters, remove the days not belonging to sequences of at least 5 days: + # warning: first 4 days and last 4 days are not take into account y the algorithm and are treated as not belonging to any sequence (sequ=FALSE) + wrdiff <- diff(my.cluster$cluster) + + sequ <- rep(FALSE, n.days.period[[p]]) + for(i in 5:(n.days.period[[p]]-5)){ + sequ[i] <- TRUE + if(wrdiff[i] != 0 & wrdiff[i+1] != 0) sequ[i] = FALSE + if(wrdiff[i] != 0 & wrdiff[i+2] != 0) sequ[i] = FALSE + if(wrdiff[i] != 0 & wrdiff[i+3] != 0) sequ[i] = FALSE + if(wrdiff[i] != 0 & wrdiff[i+4] != 0) sequ[i] = FALSE + if(wrdiff[i-1] != 0 & wrdiff[i] != 0) sequ[i] = FALSE + if(wrdiff[i-1] != 0 & wrdiff[i+1] != 0) sequ[i] = FALSE + if(wrdiff[i-1] != 0 & wrdiff[i+2] != 0) sequ[i] = FALSE + if(wrdiff[i-1] != 0 & wrdiff[i+3] != 0) sequ[i] = FALSE + if(wrdiff[i-2] != 0 & wrdiff[i] != 0) sequ[i] = FALSE + if(wrdiff[i-2] != 0 & wrdiff[i+1] != 0) sequ[i] = FALSE + if(wrdiff[i-2] != 0 & wrdiff[i+2] != 0) sequ[i] = FALSE + if(wrdiff[i-3] != 0 & wrdiff[i] != 0) sequ[i] = FALSE + if(wrdiff[i-3] != 0 & wrdiff[i+1] != 0) sequ[i] = FALSE + if(wrdiff[i-4] != 0 & wrdiff[i] != 0) sequ[i] = FALSE + } # close for loop on i + + # sequence of daily cluster numbers without the days that doesn't belong to sequences of 5 or more days: + cluster.sequence <- my.cluster$cluster * sequ + rm(wrdiff, sequ) + } + + + # Replace the days belonging to a regime with the days belonging to a sequence of at least 5 days of that regime: + wr1 <- which(cluster.sequence == 1) + wr2 <- which(cluster.sequence == 2) + wr3 <- which(cluster.sequence == 3) + wr4 <- which(cluster.sequence == 4) + + # yearly frequency of each regime: + wr1y <- wr2y <- wr3y <-wr4y <- c() + + mod.subdaily <- ifelse(subdaily,4,1) + np <- n.days.in.a.period(p,1)*mod.subdaily + + for(y in year.start:year.end){ + # for both reanalysis and S4, the time order is always: year1day1, year1day2, ..., year1day31, year2day1, year2day2, ..., year2day28, etc, + wr1y[y-year.start+1] <- length(which(cluster.sequence[(y-year.start)*np + (1:np)] == 1)) + wr2y[y-year.start+1] <- length(which(cluster.sequence[(y-year.start)*np + (1:np)] == 2)) + wr3y[y-year.start+1] <- length(which(cluster.sequence[(y-year.start)*np + (1:np)] == 3)) + wr4y[y-year.start+1] <- length(which(cluster.sequence[(y-year.start)*np + (1:np)] == 4)) + } + + # convert to frequencies in %: + wr1y <- wr1y/np + wr2y <- wr2y/np + wr3y <- wr3y/np + wr4y <- wr4y/np + + gc() + + + # measure regime anomalies: + pslmat <- unname(acast(psl.melted, Year + Day ~ Lat ~ Lon)) + + if(running.cluster || running.15){ + pslmat.new <- pslmat + pslmat <- pslmat.new[days.month.full,,] + rm(pslmat.new) + gc() + } + + + pslwr1 <- pslmat[wr1,,, drop=F] + pslwr2 <- pslmat[wr2,,, drop=F] + pslwr3 <- pslmat[wr3,,, drop=F] + pslwr4 <- pslmat[wr4,,, drop=F] + + # regime structure: + pslwr1mean <- apply(pslwr1, c(2,3), mean, na.rm=T) + pslwr2mean <- apply(pslwr2, c(2,3), mean, na.rm=T) + pslwr3mean <- apply(pslwr3, c(2,3), mean, na.rm=T) + pslwr4mean <- apply(pslwr4, c(2,3), mean, na.rm=T) + + # spatial mean for each day to save for the measure of the FairRPSS: + pslwr1spmean <- apply(pslwr1, 1, mean, na.rm=T) + pslwr2spmean <- apply(pslwr2, 1, mean, na.rm=T) + pslwr3spmean <- apply(pslwr3, 1, mean, na.rm=T) + pslwr4spmean <- apply(pslwr4, 1, mean, na.rm=T) + + + # save all the data necessary to redraw the graphs when we know the right regime: + save(my.cluster, cluster.sequence, lon, lon.max, lon.min, lat, lat.max, lat.min, pos.lat, pos.lon, n.pos.lat, n.pos.lon, psl, psl.name, my.period, p, workdir, rean.name, fields.name, year.start, year.end, n.years, WR.period, pslwr1mean, pslwr2mean, pslwr3mean, pslwr4mean, pslwr1spmean, pslwr2spmean, pslwr3spmean, pslwr4spmean, wr1y,wr2y,wr3y,wr4y,wr1,wr2,wr3,wr4, LOESS, running.cluster, running.15, lat.weighting, file=paste0(workdir,"/",fields.name,"_",my.period[p],"_psl.RData")) + + ## immediatly save the plots of the ERA-Interim monthly regime anomalies with the running cluster instead than loading them in the next script: + EU <- c(1:which(lon >= lon.max)[1], (which(lon >= 337)[1]:length(lon))) # restrict area to continental europe only + + if(psl == "g500"){ + my.brks <- c(-300,seq(-200,200,10),300) # % Mean anomaly of a WR + my.brks2 <- c(-300,seq(-200,200,10),300) # % Mean anomaly of a WR for the contour lines + values.to.plot <- c(-200, -100, 0, 100, 200) # values we want to show in the labels + } + + if(psl == "psl"){ + my.brks <- c(seq(-19,-1,2),0,seq(1,19,2)) # % Mean anomaly of a WR + my.brks2 <- my.brks #c(seq(-20,20,2)) # % Mean anomaly of a WR for the contour lines + values.to.plot <- my.brks #c(-17,-15,-13,-11,-9,-7,-5,-3,-1,0,1,3,5,7,9,11,13,15,17) + } + my.cols <- colorRampPalette(rev(brewer.pal(11,"RdBu")))(length(my.brks)-1) # blue--white--red colors + my.cols[floor(length(my.cols)/2)] <- my.cols[floor(length(my.cols)/2)+1] <- "white" + + map1 <- pslwr1mean + map2 <- pslwr2mean + map3 <- pslwr3mean + map4 <- pslwr4mean + png(filename=paste0(workdir,"/",fields.name,"_",my.period[p],"_cluster1.png"),width=1000,height=1200) + PlotEquiMap2(rescale(map1,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map1), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, xlabel.dist=1.5, contours.lty="F1FF1F") + dev.off() + png(filename=paste0(workdir,"/",fields.name,"_",my.period[p],"_cluster2.png"),width=1000,height=1200) + PlotEquiMap2(rescale(map2,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map2), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F") + dev.off() + png(filename=paste0(workdir,"/",fields.name,"_",my.period[p],"_cluster3.png"),width=1000,height=1200) + PlotEquiMap2(rescale(map3,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map3), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F") + dev.off() + png(filename=paste0(workdir,"/",fields.name,"_",my.period[p],"_cluster4.png"),width=1000,height=1200) + PlotEquiMap2(rescale(map4,my.brks[1],tail(my.brks,1)), lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(map4), brks2=my.brks2, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, contours.lty="F1FF1F") + dev.off() + + rm(pslwr1mean,pslwr2mean,pslwr3mean,pslwr4mean) + rm(pslwr1,pslwr2,pslwr3,pslwr4) + gc() + + } + + if(fields.name == ECMWF_S4.name){ + cat("Computing regime anomalies and frequencies. Please wait...\n") + + #load(file=paste0(workdir,"/",fields.name,"_",my.period[p],"_leadmonth_",lead.month,"_ClusterData.RData")) + + cluster.sequence <- my.cluster$cluster #as.vector(my.cluster.array) + + wr1 <- which(cluster.sequence == 1) + wr2 <- which(cluster.sequence == 2) + wr3 <- which(cluster.sequence == 3) + wr4 <- which(cluster.sequence == 4) + + wr1y <- apply(my.cluster.array == 1, 2, sum) + wr2y <- apply(my.cluster.array == 2, 2, sum) + wr3y <- apply(my.cluster.array == 3, 2, sum) + wr4y <- apply(my.cluster.array == 4, 2, sum) + + # convert to frequencies in %: + wr1y <- wr1y/(num.leadtimes*n.members) # in this case, n.leadtimes is the number of days of one month of the cluser time series + wr2y <- wr2y/(num.leadtimes*n.members) + wr3y <- wr3y/(num.leadtimes*n.members) + wr4y <- wr4y/(num.leadtimes*n.members) + + # same as above, but to measure the maximum frequence between all the members: + wr1yMax <- apply(apply(my.cluster.array == 1, c(2,3), sum),1,max) + wr2yMax <- apply(apply(my.cluster.array == 2, c(2,3), sum),1,max) + wr3yMax <- apply(apply(my.cluster.array == 3, c(2,3), sum),1,max) + wr4yMax <- apply(apply(my.cluster.array == 4, c(2,3), sum),1,max) + + wr1yMax <- wr1yMax/num.leadtimes + wr2yMax <- wr2yMax/num.leadtimes + wr3yMax <- wr3yMax/num.leadtimes + wr4yMax <- wr4yMax/num.leadtimes + + wr1yMin <- apply(apply(my.cluster.array == 1, c(2,3), sum),1,min) + wr2yMin <- apply(apply(my.cluster.array == 2, c(2,3), sum),1,min) + wr3yMin <- apply(apply(my.cluster.array == 3, c(2,3), sum),1,min) + wr4yMin <- apply(apply(my.cluster.array == 4, c(2,3), sum),1,min) + + wr1yMin <- wr1yMin/num.leadtimes + wr2yMin <- wr2yMin/num.leadtimes + wr3yMin <- wr3yMin/num.leadtimes + wr4yMin <- wr4yMin/num.leadtimes + + cat("Computing regime anomalies and frequencies. Please wait......\n") + + # in this case, must use psl.melted instead of psleuFull. Both are already in anomalies: + # (psl.melted depends on the startdate and leadtime, psleuFull no) + gc() + pslmat <- unname(acast(psl.melted, Member + StartYear + LeadDay ~ Lat ~ Lon)) + #pslmat <- unname(acast(melt(pslPeriod[1,1:2,,,,, drop=FALSE],varnames=c("Exp","Member","StartYear","LeadDay","Lat","Lon")), Member ~ StartYear + LeadDay ~ Lat ~ Lon)) + gc() + + #for(a in 1:2){ + # for(b in 1:3){ + # for(c in 1:4){ + # pslmat[1, a + (b-1)*a + (c-1)*a*b,,] <- pslPeriod[1,a,b,c,,] + # } + # } + #} + + pslwr1 <- pslmat[wr1,,, drop=F] + pslwr2 <- pslmat[wr2,,, drop=F] + pslwr3 <- pslmat[wr3,,, drop=F] + pslwr4 <- pslmat[wr4,,, drop=F] + + pslwr1mean <- apply(pslwr1, c(2,3), mean, na.rm=T) + pslwr2mean <- apply(pslwr2, c(2,3), mean, na.rm=T) + pslwr3mean <- apply(pslwr3, c(2,3), mean, na.rm=T) + pslwr4mean <- apply(pslwr4, c(2,3), mean, na.rm=T) + + # spatial mean for each day to save for the meaure of the FairRPSS: + pslwr1spmean <- apply(pslwr1, 1, mean, na.rm=T) + pslwr2spmean <- apply(pslwr2, 1, mean, na.rm=T) + pslwr3spmean <- apply(pslwr3, 1, mean, na.rm=T) + pslwr4spmean <- apply(pslwr4, 1, mean, na.rm=T) + + rm(pslmat) + gc() + + # save all the data necessary to draw the graphs (but not the impact maps) + save(my.cluster, cluster.sequence, my.cluster.array, lon, lon.max, lat, pos.lat, pos.lon, n.pos.lat, n.pos.lon, psl, psl.name, my.period, p, workdir,rean.name,fields.name, year.start,year.end, years, n.years, n.years.full, WR.period, pslwr1mean, pslwr2mean, pslwr3mean, pslwr4mean, pslwr1spmean, pslwr2spmean, pslwr3spmean, pslwr4spmean, wr1y, wr2y, wr3y, wr4y, wr1yMax, wr2yMax, wr3yMax, wr4yMax, wr1yMin, wr2yMin, wr3yMin, wr4yMin, wr1, wr2, wr3, wr4, n.leadtimes, num.leadtimes, LOESS, running.cluster, running.15, lat.weighting, file=paste0(workdir,"/",fields.name,"_",my.period[p],"_leadmonth_",lead.month,"_psl.RData")) + + rm(pslwr1mean,pslwr2mean,pslwr3mean,pslwr4mean) + rm(pslwr1,pslwr2,pslwr3,pslwr4) + gc() + + } + + # for the monthly system, the time order is always: year1day1, year2day1, ... , yearNday1, year1day2, year2day2, ..., yearNday2, etc.: + if(fields.name == ECMWF_monthly.name) { + if(fields.name == ECMWF_monthly.name){ + my.startdates.period <- months.period(forecast.year,mes,day,p) + + #if(p == 13) my.startdates.period <- my.startdates.period[-2] # remove the second startdate because it is broken + + days.period <- period.length <- list() + for(startdate in my.startdates.period){ + days.period[[p]] <- c(days.period[[p]], (1+(startdate-1)*(year.end-year.start+1)):(startdate*(year.end-year.start+1))) + } + period.length[[p]] <- length(my.startdates.period) # number of Thusdays in the period + } + + wr1y <- wr2y <- wr3y <-wr4y <- c() + wr1y[y-year.start+1] <- length(which(cluster.sequence[seq((y-year.start+1),length(cluster.sequence), n.years)] == 1)) + wr2y[y-year.start+1] <- length(which(cluster.sequence[seq((y-year.start+1),length(cluster.sequence), n.years)] == 2)) + wr3y[y-year.start+1] <- length(which(cluster.sequence[seq((y-year.start+1),length(cluster.sequence), n.years)] == 3)) + wr4y[y-year.start+1] <- length(which(cluster.sequence[seq((y-year.start+1),length(cluster.sequence), n.years)] == 4)) + } + + cat("Finished!\n") +} # close the for loop on 'p' + + + + + + + + +teleconextion <- FALSE +if(teleconexion){ + + ## bypass the years setted in the script header: + year.start <- 1981 + year.end <- 2015 #2016 + + n.years <- year.end - year.start + 1 + + ## load monthly psl: + pslFull <- Load(var = psl, exp = NULL, obs = list(list(path=fields)), paste0(year.start:year.end,'0101'), storefreq = 'monthly', leadtimemax = 12, output = 'lonlat', latmin = lat.min, latmax = lat.max, lonmin = lon.min, lonmax = lon.max, nprocs=1) + + if(psl == "psl") pslFull$obs <- pslFull$obs/100 # convert MSLP in Pascal to MSLP in hPa + + pslFullClim <- apply(pslFull$obs, c(1,2,4,5,6), mean, na.rm=T) + + pslFullSd <- apply(pslFull$obs, c(1,2,4,5,6), sd, na.rm=T) + + pslFullClim2 <- InsertDim(pslFullClim,3,n.years) + pslFullSd2 <- InsertDim(pslFullSd,3,n.years) + + ## monthly standarized anomalies: + pslAnom <- ( pslFull$obs - pslFullClim2) / pslFullSd2 + pslAnom <- pslAnom[1,1,,,,] + + ##pslAnom.melted <- melt(pslAnom[,,,, drop=FALSE], varnames=c("Year","Month","Lat","Lon")) + ##pslAnom2 <- unname(acast(pslAnom.melted, Year + Month ~ Lat ~ Lon)) # order: year1 month1, year1 month2, etc + gc() + + ## load monthly time series of teleconnection indices: + NAO <- read.table(file="/esarchive/scratch/ncortesi/BSC/RESILIENCE/Regimes/series_teleconnexion_CPC/NAO.txt", header=FALSE) + EA <- read.table(file="/esarchive/scratch/ncortesi/BSC/RESILIENCE/Regimes/series_teleconnexion_CPC/EA.txt", header=FALSE) + EAWR <- read.table(file="/esarchive/scratch/ncortesi/BSC/RESILIENCE/Regimes/series_teleconnexion_CPC/EAWR.txt", header=FALSE) + SCAND <- read.table(file="/esarchive/scratch/ncortesi/BSC/RESILIENCE/Regimes/series_teleconnexion_CPC/SCAND.txt", header=FALSE) + + names(NAO) <- names(EA) <- names(EAWR) <- names(SCAND) <- c("year","month","index") + + # select only teleconnection in the chosen years: + row.start1 <- which(NAO[,1] == year.start)[1] + row.end1 <- which(EA[,1] == year.end)[l(which(EA[,1] == year.end))] + row.start2 <- which(EA[,1] == year.start)[1] + row.end2 <- which(EA[,1] == year.end)[l(which(EA[,1] == year.end))] + row.start3 <- which(EAWR[,1] == year.start)[1] + row.end3 <- which(EAWR[,1] == year.end)[l(which(EAWR[,1] == year.end))] + row.start4 <- which(SCAND[,1] == year.start)[1] + row.end4 <- which(SCAND[,1] == year.end)[l(which(SCAND[,1] == year.end))] + + NAOy <- NAO[row.start1:row.end1,] + EAy <- EA[row.start2:row.end2,] + EAWRy <- EAWR[row.start3:row.end3,] + SCANDy <- SCAND[row.start4:row.end4,] + + ## month <- 1:12 + ## for(m in month){ + ## # select only teleconnection data in month m: + ## rows <- which(NAOy[,2] == m) + + ## NAOm <- NAOy[rows,] + ## EAm <- EAy[rows,] + ## EAWRm <- EAWRy[rows,] + ## SCANDm <- SCANDy[rows,] + ## } + + NAO.map <- EA.map <- EAWR.map <- SCAND.map <- NAO.map.corr <- EA.map.corr <- EAWR.map.corr <- SCAND.map.corr <- array(NA, c(12,dim(pslAnom)[3:4])) + + ## create teleconnection maps: + + month <- 1:12 + for(m in month){ + #NAO.map[m,,] <- apply(pslAnom[,m,,], c(2,3), function(x) mean(x * NAOy[which(NAOy[,2] == m),3])) + #EA.map[m,,] <- apply(pslAnom[,m,,], c(2,3), function(x) mean(x * EAy[which(NAOy[,2] == m),3])) + #EAWR.map[m,,] <- apply(pslAnom[,m,,], c(2,3), function(x) mean(x * EAWRy[which(EAWRy[,2] == m),3])) + #SCAND.map[m,,] <- apply(pslAnom[,m,,], c(2,3), function(x) mean(x * SCANDy[which(SCANDy[,2] == m),3])) + + NAO.map.corr[m,,] <- apply(pslAnom[,m,,], c(2,3), function(x) cor(x, NAOy[which(NAOy[,2] == m),3], use = "na.or.complete")) + EA.map.corr[m,,] <- apply(pslAnom[,m,,], c(2,3), function(x) cor(x, EAy[which(NAOy[,2] == m),3], use = "na.or.complete")) + EAWR.map.corr[m,,] <- apply(pslAnom[,m,,], c(2,3), function(x) cor(x, EAWRy[which(EAWRy[,2] == m),3], use = "na.or.complete")) + SCAND.map.corr[m,,] <- apply(pslAnom[,m,,], c(2,3), function(x) cor(x, SCANDy[which(SCANDy[,2] == m),3], use = "na.or.complete")) + } + + ## NAO.map <- apply(pslAnom2, c(2,3), function(x) mean(x*NAOy[,3])) + ## NAO.map.corr <- apply(pslAnom2, c(2,3), function(x) cor(x,NAOy[,3])) + + #my.brks <- c(-100,seq(-1,1,0.1),100) + #my.cols <- colorRampPalette(rev(brewer.pal(11,"RdBu")))(length(my.brks)-1) # blue--red colors + #my.cols[floor(length(my.cols)/2)] <- my.cols[floor(length(my.cols)/2)+1] <- "white" ## add white in the middle + + my.brks.corr <- c(-1, -0.6, -0.45, -0.3, -0.15, 0, 0.15, 0.30, 0.45, 0.60, 1) #seq(-1,1,0.1) + my.cols.corr <- colorRampPalette(rev(brewer.pal(11,"RdBu")))(length(my.brks.corr)-1) # blue--red colors + my.cols.corr[floor(length(my.cols.corr)/2)] <- my.cols.corr[floor(length(my.cols.corr)/2)+1] <- "white" ## add white in the middle + + fileoutput <- paste0(workdir,"/",fields.name,"_teleconexions.png") + + png(filename=fileoutput,width=15000,height=3700) + + plot.new() + + n.map <- 0 + month <- c(9:12, 1:8) + for(m in month){ + n.map <- n.map+1 + y1 <- 0.10 + y3 <- 0.315 + y5 <- 0.53 + y7 <- 0.745 + y.width <- 0.18 + + y2 <- y1 + y.width; y4 <- y3 + y.width; y6 <- y5 + y.width; y8 <- y7 + y.width + yt1 <- y2+0.003; yt3 <- y4+0.003; yt5 <- y6+0.003; yt7 <- y8+0.003 + yt2 <- yt1 + 0.004; yt4 <- yt3 + 0.005; yt6 <- yt5 + 0.005; yt8 <- yt7 + 0.005 + + ## ## Teleconnection maps: + ## map.xpos <- 0.00 + ## map.width <- 0.46 + ## par(fig=c(map.xpos + 0.003, map.xpos + map.width - 0.003, y7, y8), new=TRUE) + ## PlotEquiMap2(NAO.map[m,,], lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(NAO.map[m,,]), brks2=my.brks, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, xlabel.dist=1.5, contours.lty="F1FF1F", continents.col="black") + ## par(fig=c(map.xpos, map.xpos + map.width, y5, y6), new=TRUE) + ## PlotEquiMap2(EA.map[m,,], lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(EA.map[m,,]), brks2=my.brks, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, xlabel.dist=1.5, contours.lty="F1FF1F", continents.col="black") + ## par(fig=c(map.xpos, map.xpos + map.width, y3, y4), new=TRUE) + ## PlotEquiMap2(EAWR.map[m,,], lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(EAWR.map[m,,]), brks2=my.brks, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, xlabel.dist=1.5, contours.lty="F1FF1F", continents.col="black") + ## par(fig=c(map.xpos, map.xpos + map.width, y1, y2), new=TRUE) + ## PlotEquiMap2(SCAND.map[m,,], lon, lat, filled.continents=FALSE, brks=my.brks, cols=my.cols, sizetit=1.2, contours=t(SCAND.map[m,,]), brks2=my.brks, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, xlabel.dist=1.5, contours.lty="F1FF1F", continents.col="black") + + ## Correlation maps: + + map.width <- 0.08 + map.xpos <- 0.03 + map.width * (n.map-1) + par(fig=c(map.xpos + 0.003, map.xpos + map.width - 0.003, y7, y8), new=TRUE) + PlotEquiMap2(NAO.map.corr[m,,], lon, lat, filled.continents=FALSE, brks=my.brks.corr, cols=my.cols.corr, sizetit=1.2, contours=t(NAO.map.corr[m,,]), brks2=my.brks.corr, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, xlabel.dist=1.5, contours.lty="F1FF1F", continents.col="black") + par(fig=c(map.xpos, map.xpos + map.width, y5, y6), new=TRUE) + PlotEquiMap2(EA.map.corr[m,,], lon, lat, filled.continents=FALSE, brks=my.brks.corr, cols=my.cols.corr, sizetit=1.2, contours=t(EA.map.corr[m,,]), brks2=my.brks.corr, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, xlabel.dist=1.5, contours.lty="F1FF1F", continents.col="black") + par(fig=c(map.xpos, map.xpos + map.width, y3, y4), new=TRUE) + PlotEquiMap2(EAWR.map.corr[m,,], lon, lat, filled.continents=FALSE, brks=my.brks.corr, cols=my.cols.corr, sizetit=1.2, contours=t(EAWR.map.corr[m,,]), brks2=my.brks.corr, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, xlabel.dist=1.5, contours.lty="F1FF1F", continents.col="black") + par(fig=c(map.xpos, map.xpos + map.width, y1, y2), new=TRUE) + PlotEquiMap2(SCAND.map.corr[m,,], lon, lat, filled.continents=FALSE, brks=my.brks.corr, cols=my.cols.corr, sizetit=1.2, contours=t(SCAND.map.corr[m,,]), brks2=my.brks.corr, intxlon=10, intylat=10, drawleg=F, cex.lab=1.5, xlabel.dist=1.5, contours.lty="F1FF1F", continents.col="black") + + ## Title Centroid Maps: + title1.width <- 0.08 + title1.xpos <- 0.03 + (n.map-1) * title1.width + + par(fig=c(title1.xpos, title1.xpos + title1.width, 0.9, 0.95), new=TRUE) + mtext(my.period[m], font=2, cex=8) + + + } ## close for on month m + + ## Legend: + legend.xpos <- 0.3 + legend.width <- 0.4 + legend.cex <- 6 + + par(fig=c(legend.xpos, legend.xpos + legend.width, 0.035, 0.095), new=TRUE) + ColorBar(brks=round(my.brks.corr,2), cols=my.cols.corr, vert=FALSE, label_scale=6, subsample=1, triangle_ends=c(FALSE,FALSE)) + ##mtext(side=4,paste0(" ",var.unit[var.num]), cex=legend2.cex, las=1) + ##mtext(side=4,"cor", cex=legend2.cex, las=1) + + yt1 <- yt1 - 0.1 + yt3 <- yt3 - 0.1 + yt5 <- yt5 - 0.13 + yt7 <- yt7 - 0.14 + par(fig=c(0, 0.03, yt7+0.0025, yt7+0.0055), new=TRUE) + mtext("NAO", font=2, cex=10) + par(fig=c(0, 0.03, yt5+0.0025, yt5+0.0055), new=TRUE) + mtext("EA", font=2, cex=10) + par(fig=c(0, 0.03, yt3+0.0025, yt3+0.0055), new=TRUE) + mtext("EAWR", font=2, cex=10) + par(fig=c(0, 0.03, yt1+0.0025, yt1+0.0055), new=TRUE) + mtext("SCAND", font=2, cex=10) + + dev.off() + +} # close if on teleconexion + + + + + + + + + + + + + + + + + + +